summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/Kconfig2
-rw-r--r--drivers/acpi/ac.c19
-rw-r--r--drivers/acpi/acpi_lpss.c13
-rw-r--r--drivers/acpi/acpi_pnp.c2
-rw-r--r--drivers/acpi/acpica/acglobal.h2
-rw-r--r--drivers/acpi/acpica/hwesleep.c8
-rw-r--r--drivers/acpi/acpica/hwsleep.c11
-rw-r--r--drivers/acpi/acpica/hwxfsleep.c7
-rw-r--r--drivers/acpi/acpica/utosi.c1
-rw-r--r--drivers/acpi/apei/einj.c15
-rw-r--r--drivers/acpi/apei/hest.c5
-rw-r--r--drivers/acpi/battery.c2
-rw-r--r--drivers/acpi/cppc_acpi.c43
-rw-r--r--drivers/acpi/dock.c8
-rw-r--r--drivers/acpi/glue.c66
-rw-r--r--drivers/acpi/internal.h1
-rw-r--r--drivers/acpi/power.c104
-rw-r--r--drivers/acpi/pptt.c67
-rw-r--r--drivers/acpi/prmt.c35
-rw-r--r--drivers/acpi/processor_idle.c3
-rw-r--r--drivers/acpi/resource.c56
-rw-r--r--drivers/acpi/scan.c7
-rw-r--r--drivers/acpi/sleep.c10
-rw-r--r--drivers/acpi/tables.c3
-rw-r--r--drivers/amba/bus.c100
-rw-r--r--drivers/android/binder.c27
-rw-r--r--drivers/android/binder_internal.h4
-rw-r--r--drivers/ata/ahci.c13
-rw-r--r--drivers/ata/libata-core.c59
-rw-r--r--drivers/ata/libata-scsi.c52
-rw-r--r--drivers/ata/pata_ali.c4
-rw-r--r--drivers/ata/pata_amd.c2
-rw-r--r--drivers/ata/pata_optidma.c4
-rw-r--r--drivers/ata/pata_radisys.c4
-rw-r--r--drivers/ata/sata_mv.c4
-rw-r--r--drivers/base/arch_topology.c15
-rw-r--r--drivers/base/power/main.c14
-rw-r--r--drivers/base/power/power.h7
-rw-r--r--drivers/base/power/runtime.c6
-rw-r--r--drivers/base/power/wakeirq.c101
-rw-r--r--drivers/base/property.c63
-rw-r--r--drivers/base/regmap/regcache-rbtree.c7
-rw-r--r--drivers/base/regmap/regmap-mdio.c6
-rw-r--r--drivers/base/regmap/regmap-spi.c36
-rw-r--r--drivers/base/topology.c10
-rw-r--r--drivers/bcma/main.c2
-rw-r--r--drivers/block/Kconfig26
-rw-r--r--drivers/block/Makefile1
-rw-r--r--drivers/block/amiflop.c9
-rw-r--r--drivers/block/aoe/aoeblk.c19
-rw-r--r--drivers/block/ataflop.c110
-rw-r--r--drivers/block/brd.c12
-rw-r--r--drivers/block/cryptoloop.c206
-rw-r--r--drivers/block/drbd/drbd_int.h5
-rw-r--r--drivers/block/drbd/drbd_main.c6
-rw-r--r--drivers/block/drbd/drbd_req.c3
-rw-r--r--drivers/block/floppy.c35
-rw-r--r--drivers/block/loop.c420
-rw-r--r--drivers/block/loop.h30
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c6
-rw-r--r--drivers/block/n64cart.c24
-rw-r--r--drivers/block/nbd.c176
-rw-r--r--drivers/block/null_blk/main.c195
-rw-r--r--drivers/block/null_blk/null_blk.h6
-rw-r--r--drivers/block/paride/pcd.c312
-rw-r--r--drivers/block/paride/pd.c148
-rw-r--r--drivers/block/paride/pf.c236
-rw-r--r--drivers/block/pktcdvd.c20
-rw-r--r--drivers/block/ps3vram.c6
-rw-r--r--drivers/block/rbd.c8
-rw-r--r--drivers/block/rnbd/rnbd-clt.c15
-rw-r--r--drivers/block/rnbd/rnbd-proto.h2
-rw-r--r--drivers/block/rsxx/core.c4
-rw-r--r--drivers/block/rsxx/dev.c19
-rw-r--r--drivers/block/swim.c36
-rw-r--r--drivers/block/swim3.c5
-rw-r--r--drivers/block/sx8.c15
-rw-r--r--drivers/block/virtio_blk.c194
-rw-r--r--drivers/block/xen-blkback/xenbus.c2
-rw-r--r--drivers/block/xen-blkfront.c9
-rw-r--r--drivers/block/zram/zram_drv.c10
-rw-r--r--drivers/bluetooth/btintel.c239
-rw-r--r--drivers/bluetooth/btintel.h11
-rw-r--r--drivers/bluetooth/btmrvl_main.c6
-rw-r--r--drivers/bluetooth/btmtkuart.c13
-rw-r--r--drivers/bluetooth/btrsi.c1
-rw-r--r--drivers/bluetooth/btrtl.c26
-rw-r--r--drivers/bluetooth/btusb.c64
-rw-r--r--drivers/bluetooth/hci_h5.c35
-rw-r--r--drivers/bluetooth/hci_ldisc.c3
-rw-r--r--drivers/bluetooth/hci_qca.c5
-rw-r--r--drivers/bluetooth/hci_vhci.c122
-rw-r--r--drivers/bus/Kconfig2
-rw-r--r--drivers/bus/brcmstb_gisb.c7
-rw-r--r--drivers/bus/fsl-mc/Makefile3
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-private.h39
-rw-r--r--drivers/bus/fsl-mc/obj-api.c103
-rw-r--r--drivers/bus/sun50i-de2.c7
-rw-r--r--drivers/bus/ti-sysc.c276
-rw-r--r--drivers/cdrom/cdrom.c63
-rw-r--r--drivers/cdrom/gdrom.c7
-rw-r--r--drivers/char/hw_random/Kconfig12
-rw-r--r--drivers/char/hw_random/ixp4xx-rng.c4
-rw-r--r--drivers/char/hw_random/meson-rng.c5
-rw-r--r--drivers/char/hw_random/mtk-rng.c9
-rw-r--r--drivers/char/hw_random/s390-trng.c4
-rw-r--r--drivers/char/hw_random/virtio-rng.c86
-rw-r--r--drivers/char/ipmi/Kconfig11
-rw-r--r--drivers/char/ipmi/Makefile1
-rw-r--r--drivers/char/ipmi/bt-bmc.c69
-rw-r--r--drivers/char/ipmi/ipmi_devintf.c8
-rw-r--r--drivers/char/ipmi/ipmi_ipmb.c539
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c330
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c8
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c4
-rw-r--r--drivers/char/ipmi/ipmi_watchdog.c25
-rw-r--r--drivers/char/ipmi/kcs_bmc_serio.c4
-rw-r--r--drivers/char/pcmcia/cm4000_cs.c9
-rw-r--r--drivers/char/tpm/Kconfig2
-rw-r--r--drivers/char/tpm/tpm2-space.c3
-rw-r--r--drivers/char/tpm/tpm_tis_core.c26
-rw-r--r--drivers/char/tpm/tpm_tis_core.h4
-rw-r--r--drivers/char/tpm/tpm_tis_spi_main.c1
-rw-r--r--drivers/char/virtio_console.c9
-rw-r--r--drivers/clk/at91/at91rm9200.c2
-rw-r--r--drivers/clk/at91/at91sam9260.c2
-rw-r--r--drivers/clk/at91/at91sam9g45.c2
-rw-r--r--drivers/clk/at91/at91sam9n12.c2
-rw-r--r--drivers/clk/at91/at91sam9rl.c2
-rw-r--r--drivers/clk/at91/at91sam9x5.c2
-rw-r--r--drivers/clk/at91/clk-generated.c46
-rw-r--r--drivers/clk/at91/clk-main.c66
-rw-r--r--drivers/clk/at91/clk-master.c463
-rw-r--r--drivers/clk/at91/clk-peripheral.c40
-rw-r--r--drivers/clk/at91/clk-pll.c39
-rw-r--r--drivers/clk/at91/clk-programmable.c29
-rw-r--r--drivers/clk/at91/clk-sam9x60-pll.c174
-rw-r--r--drivers/clk/at91/clk-system.c20
-rw-r--r--drivers/clk/at91/clk-usb.c27
-rw-r--r--drivers/clk/at91/clk-utmi.c39
-rw-r--r--drivers/clk/at91/dt-compat.c2
-rw-r--r--drivers/clk/at91/pmc.c178
-rw-r--r--drivers/clk/at91/pmc.h29
-rw-r--r--drivers/clk/at91/sam9x60.c6
-rw-r--r--drivers/clk/at91/sama5d2.c2
-rw-r--r--drivers/clk/at91/sama5d3.c2
-rw-r--r--drivers/clk/at91/sama5d4.c2
-rw-r--r--drivers/clk/at91/sama7g5.c29
-rw-r--r--drivers/clk/clk-composite.c77
-rw-r--r--drivers/clk/clk.c5
-rw-r--r--drivers/clk/imx/Kconfig7
-rw-r--r--drivers/clk/imx/Makefile2
-rw-r--r--drivers/clk/imx/clk-composite-7ulp.c88
-rw-r--r--drivers/clk/imx/clk-composite-8m.c4
-rw-r--r--drivers/clk/imx/clk-imx6ul.c9
-rw-r--r--drivers/clk/imx/clk-imx7ulp.c20
-rw-r--r--drivers/clk/imx/clk-imx8ulp.c569
-rw-r--r--drivers/clk/imx/clk-pfdv2.c23
-rw-r--r--drivers/clk/imx/clk-pllv4.c35
-rw-r--r--drivers/clk/imx/clk.h457
-rw-r--r--drivers/clk/mediatek/Kconfig28
-rw-r--r--drivers/clk/mediatek/Makefile8
-rw-r--r--drivers/clk/mediatek/clk-apmixed.c3
-rw-r--r--drivers/clk/mediatek/clk-cpumux.c3
-rw-r--r--drivers/clk/mediatek/clk-gate.c8
-rw-r--r--drivers/clk/mediatek/clk-mt6779-aud.c4
-rw-r--r--drivers/clk/mediatek/clk-mt6779-cam.c4
-rw-r--r--drivers/clk/mediatek/clk-mt6779-img.c4
-rw-r--r--drivers/clk/mediatek/clk-mt6779-ipe.c4
-rw-r--r--drivers/clk/mediatek/clk-mt6779-mfg.c4
-rw-r--r--drivers/clk/mediatek/clk-mt6779-mm.c4
-rw-r--r--drivers/clk/mediatek/clk-mt6779-vdec.c4
-rw-r--r--drivers/clk/mediatek/clk-mt6779-venc.c4
-rw-r--r--drivers/clk/mediatek/clk-mt6779.c2
-rw-r--r--drivers/clk/mediatek/clk-mt8195-apmixedsys.c145
-rw-r--r--drivers/clk/mediatek/clk-mt8195-apusys_pll.c92
-rw-r--r--drivers/clk/mediatek/clk-mt8195-cam.c142
-rw-r--r--drivers/clk/mediatek/clk-mt8195-ccu.c50
-rw-r--r--drivers/clk/mediatek/clk-mt8195-img.c96
-rw-r--r--drivers/clk/mediatek/clk-mt8195-imp_iic_wrap.c68
-rw-r--r--drivers/clk/mediatek/clk-mt8195-infra_ao.c206
-rw-r--r--drivers/clk/mediatek/clk-mt8195-ipe.c51
-rw-r--r--drivers/clk/mediatek/clk-mt8195-mfg.c47
-rw-r--r--drivers/clk/mediatek/clk-mt8195-peri_ao.c62
-rw-r--r--drivers/clk/mediatek/clk-mt8195-scp_adsp.c47
-rw-r--r--drivers/clk/mediatek/clk-mt8195-topckgen.c1273
-rw-r--r--drivers/clk/mediatek/clk-mt8195-vdec.c104
-rw-r--r--drivers/clk/mediatek/clk-mt8195-vdo0.c123
-rw-r--r--drivers/clk/mediatek/clk-mt8195-vdo1.c140
-rw-r--r--drivers/clk/mediatek/clk-mt8195-venc.c69
-rw-r--r--drivers/clk/mediatek/clk-mt8195-vpp0.c110
-rw-r--r--drivers/clk/mediatek/clk-mt8195-vpp1.c108
-rw-r--r--drivers/clk/mediatek/clk-mt8195-wpe.c143
-rw-r--r--drivers/clk/mediatek/clk-mtk.c29
-rw-r--r--drivers/clk/mediatek/clk-mtk.h1
-rw-r--r--drivers/clk/mediatek/clk-mux.c6
-rw-r--r--drivers/clk/mediatek/clk-pll.c6
-rw-r--r--drivers/clk/mediatek/reset.c2
-rw-r--r--drivers/clk/meson/meson8b.c163
-rw-r--r--drivers/clk/meson/meson8b.h26
-rw-r--r--drivers/clk/mvebu/ap-cpu-clk.c14
-rw-r--r--drivers/clk/qcom/Kconfig43
-rw-r--r--drivers/clk/qcom/Makefile3
-rw-r--r--drivers/clk/qcom/a53-pll.c4
-rw-r--r--drivers/clk/qcom/camcc-sc7280.c2484
-rw-r--r--drivers/clk/qcom/clk-smd-rpm.c135
-rw-r--r--drivers/clk/qcom/common.c8
-rw-r--r--drivers/clk/qcom/dispcc-sm8250.c27
-rw-r--r--drivers/clk/qcom/gcc-msm8953.c1
-rw-r--r--drivers/clk/qcom/gcc-msm8994.c1384
-rw-r--r--drivers/clk/qcom/gcc-msm8998.c705
-rw-r--r--drivers/clk/qcom/gcc-qcm2290.c3044
-rw-r--r--drivers/clk/qcom/gcc-sc7280.c85
-rw-r--r--drivers/clk/qcom/gcc-sdm660.c80
-rw-r--r--drivers/clk/qcom/gdsc.c51
-rw-r--r--drivers/clk/qcom/gdsc.h2
-rw-r--r--drivers/clk/qcom/gpucc-msm8998.c13
-rw-r--r--drivers/clk/qcom/gpucc-sdm660.c15
-rw-r--r--drivers/clk/qcom/kpss-xcc.c4
-rw-r--r--drivers/clk/qcom/lpasscc-sc7280.c216
-rw-r--r--drivers/clk/qcom/mmcc-msm8998.c183
-rw-r--r--drivers/clk/qcom/mmcc-sdm660.c75
-rw-r--r--drivers/clk/qcom/videocc-sm8250.c27
-rw-r--r--drivers/clk/renesas/r8a7795-cpg-mssr.c1
-rw-r--r--drivers/clk/renesas/r8a7796-cpg-mssr.c1
-rw-r--r--drivers/clk/renesas/r8a77965-cpg-mssr.c1
-rw-r--r--drivers/clk/renesas/r8a779a0-cpg-mssr.c191
-rw-r--r--drivers/clk/renesas/r9a07g044-cpg.c83
-rw-r--r--drivers/clk/renesas/rcar-cpg-lib.c83
-rw-r--r--drivers/clk/renesas/rcar-cpg-lib.h7
-rw-r--r--drivers/clk/renesas/rcar-gen3-cpg.c89
-rw-r--r--drivers/clk/renesas/rzg2l-cpg.c212
-rw-r--r--drivers/clk/renesas/rzg2l-cpg.h45
-rw-r--r--drivers/clk/rockchip/clk-rk3399.c17
-rw-r--r--drivers/clk/rockchip/clk-rk3568.c2
-rw-r--r--drivers/clk/samsung/Kconfig30
-rw-r--r--drivers/clk/samsung/Makefile1
-rw-r--r--drivers/clk/samsung/clk-cpu.c18
-rw-r--r--drivers/clk/samsung/clk-exynos-audss.c4
-rw-r--r--drivers/clk/samsung/clk-exynos4412-isp.c4
-rw-r--r--drivers/clk/samsung/clk-exynos5433.c124
-rw-r--r--drivers/clk/samsung/clk-exynos850.c835
-rw-r--r--drivers/clk/samsung/clk-pll.c196
-rw-r--r--drivers/clk/samsung/clk-pll.h2
-rw-r--r--drivers/clk/samsung/clk-s5pv210-audss.c4
-rw-r--r--drivers/clk/samsung/clk.c2
-rw-r--r--drivers/clk/samsung/clk.h26
-rw-r--r--drivers/clk/sunxi-ng/Kconfig1
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun4i-a10.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-a100-r.c3
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-a100.c3
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-a64.c7
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h6.c7
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun50i-h616.c4
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun5i.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun6i-a31.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-a23.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-a33.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-a83t.c7
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-de2.c6
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-h3.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-r.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-r40.c7
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun8i-v3s.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun9i-a80-de.c8
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun9i-a80-usb.c7
-rw-r--r--drivers/clk/sunxi-ng/ccu-sun9i-a80.c7
-rw-r--r--drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c2
-rw-r--r--drivers/clk/sunxi-ng/ccu_common.c96
-rw-r--r--drivers/clk/sunxi-ng/ccu_common.h6
-rw-r--r--drivers/clk/sunxi-ng/ccu_mux.h1
-rw-r--r--drivers/clk/sunxi/clk-mod0.c4
-rw-r--r--drivers/clk/sunxi/clk-sun6i-apb0-gates.c4
-rw-r--r--drivers/clk/sunxi/clk-sun6i-apb0.c4
-rw-r--r--drivers/clk/sunxi/clk-sun6i-ar100.c4
-rw-r--r--drivers/clk/sunxi/clk-sun8i-apb0.c4
-rw-r--r--drivers/clk/ux500/Makefile3
-rw-r--r--drivers/clk/ux500/prcc.h19
-rw-r--r--drivers/clk/ux500/reset-prcc.c181
-rw-r--r--drivers/clk/ux500/reset-prcc.h23
-rw-r--r--drivers/clk/ux500/u8500_of_clk.c30
-rw-r--r--drivers/clk/versatile/Kconfig3
-rw-r--r--drivers/clk/versatile/Makefile2
-rw-r--r--drivers/clk/versatile/clk-icst.c3
-rw-r--r--drivers/clocksource/Kconfig3
-rw-r--r--drivers/clocksource/arc_timer.c6
-rw-r--r--drivers/clocksource/arm_arch_timer.c243
-rw-r--r--drivers/clocksource/timer-riscv.c9
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c3
-rw-r--r--drivers/cpufreq/amd_freq_sensitivity.c3
-rw-r--r--drivers/cpufreq/cppc_cpufreq.c2
-rw-r--r--drivers/cpufreq/cpufreq.c19
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c6
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c16
-rw-r--r--drivers/cpufreq/intel_pstate.c120
-rw-r--r--drivers/cpufreq/mediatek-cpufreq-hw.c2
-rw-r--r--drivers/cpufreq/powernv-cpufreq.c4
-rw-r--r--drivers/cpufreq/s3c2440-cpufreq.c2
-rw-r--r--drivers/cpufreq/s5pv210-cpufreq.c2
-rw-r--r--drivers/cpufreq/tegra186-cpufreq.c4
-rw-r--r--drivers/cpufreq/tegra194-cpufreq.c8
-rw-r--r--drivers/cpuidle/Kconfig.arm3
-rw-r--r--drivers/cpuidle/cpuidle-qcom-spm.c318
-rw-r--r--drivers/cpuidle/cpuidle-tegra.c3
-rw-r--r--drivers/cpuidle/sysfs.c5
-rw-r--r--drivers/crypto/caam/caampkc.c19
-rw-r--r--drivers/crypto/caam/regs.h3
-rw-r--r--drivers/crypto/ccp/ccp-dev-v3.c5
-rw-r--r--drivers/crypto/ccp/ccp-dev-v5.c5
-rw-r--r--drivers/crypto/ccp/sev-dev.c2
-rw-r--r--drivers/crypto/ccree/cc_driver.c3
-rw-r--r--drivers/crypto/chelsio/chcr_crypto.h14
-rw-r--r--drivers/crypto/hisilicon/qm.c74
-rw-r--r--drivers/crypto/hisilicon/zip/zip_main.c2
-rw-r--r--drivers/crypto/img-hash.c7
-rw-r--r--drivers/crypto/keembay/Kconfig19
-rw-r--r--drivers/crypto/keembay/Makefile2
-rw-r--r--drivers/crypto/keembay/keembay-ocs-ecc.c1017
-rw-r--r--drivers/crypto/marvell/cesa/cesa.c1
-rw-r--r--drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c1
-rw-r--r--drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c35
-rw-r--r--drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h10
-rw-r--r--drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c89
-rw-r--r--drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h13
-rw-r--r--drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c87
-rw-r--r--drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h12
-rw-r--r--drivers/crypto/qat/qat_common/adf_accel_devices.h29
-rw-r--r--drivers/crypto/qat/qat_common/adf_common_drv.h9
-rw-r--r--drivers/crypto/qat/qat_common/adf_gen2_hw_data.c98
-rw-r--r--drivers/crypto/qat/qat_common/adf_gen2_hw_data.h27
-rw-r--r--drivers/crypto/qat/qat_common/adf_init.c5
-rw-r--r--drivers/crypto/qat/qat_common/adf_isr.c190
-rw-r--r--drivers/crypto/qat/qat_common/adf_pf2vf_msg.c238
-rw-r--r--drivers/crypto/qat/qat_common/adf_pf2vf_msg.h9
-rw-r--r--drivers/crypto/qat/qat_common/adf_vf2pf_msg.c4
-rw-r--r--drivers/crypto/qat/qat_common/adf_vf_isr.c30
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c123
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h14
-rw-r--r--drivers/crypto/s5p-sss.c2
-rw-r--r--drivers/crypto/sa2ul.c13
-rw-r--r--drivers/cxl/cxl.h61
-rw-r--r--drivers/devfreq/devfreq.c2
-rw-r--r--drivers/devfreq/event/exynos-ppmu.c12
-rw-r--r--drivers/dma-buf/Makefile2
-rw-r--r--drivers/dma-buf/dma-buf.c153
-rw-r--r--drivers/dma-buf/dma-fence.c13
-rw-r--r--drivers/dma-buf/dma-resv.c442
-rw-r--r--drivers/dma-buf/heaps/system_heap.c5
-rw-r--r--drivers/dma-buf/seqno-fence.c71
-rw-r--r--drivers/dma/pxa_dma.c3
-rw-r--r--drivers/edac/al_mc_edac.c12
-rw-r--r--drivers/edac/amd64_edac.c22
-rw-r--r--drivers/edac/edac_mc.c42
-rw-r--r--drivers/edac/edac_mc_sysfs.c8
-rw-r--r--drivers/edac/sb_edac.c2
-rw-r--r--drivers/edac/ti_edac.c7
-rw-r--r--drivers/firewire/core-cdev.c32
-rw-r--r--drivers/firewire/net.c14
-rw-r--r--drivers/firmware/Kconfig1
-rw-r--r--drivers/firmware/Makefile1
-rw-r--r--drivers/firmware/arm_ffa/driver.c53
-rw-r--r--drivers/firmware/cirrus/Kconfig5
-rw-r--r--drivers/firmware/cirrus/Makefile3
-rw-r--r--drivers/firmware/cirrus/cs_dsp.c3109
-rw-r--r--drivers/firmware/efi/efi.c5
-rw-r--r--drivers/firmware/psci/psci_checker.c2
-rw-r--r--drivers/firmware/qcom_scm.c6
-rw-r--r--drivers/firmware/tegra/bpmp-debugfs.c26
-rw-r--r--drivers/firmware/tegra/bpmp-tegra210.c7
-rw-r--r--drivers/firmware/xilinx/zynqmp.c17
-rw-r--r--drivers/gpio/gpio-amdpt.c4
-rw-r--r--drivers/gpio/gpio-mlxbf2.c5
-rw-r--r--drivers/gpio/gpio-xgs-iproc.c2
-rw-r--r--drivers/gpu/drm/Kconfig11
-rw-r--r--drivers/gpu/drm/Makefile1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/aldebaran.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c39
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c143
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c256
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c122
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_df.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c871
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c669
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c57
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c755
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h46
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c394
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h25
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h19
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c66
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c175
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umr.h51
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c119
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c43
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c191
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c30
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/athub_v2_0.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/athub_v2_1.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/beige_goby_reg_init.c54
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c51
-rw-r--r--drivers/gpu/drm/amd/amdgpu/df_v3_6.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c403
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c227
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c91
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c136
-rw-r--r--drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mca_v3_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c73
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi10_ih.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi10_reg_init.c55
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi12_reg_init.c52
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi14_reg_init.c53
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v2_3.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c66
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c382
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v10_0.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c93
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v12_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v13_0.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c100
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c59
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sienna_cichlid_reg_init.c54
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c346
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ta_ras_if.h51
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v6_7.c34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c43
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vangogh_reg_init.c50
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v2_0.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v4_0.c52
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c28
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c44
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c62
-rw-r--r--drivers/gpu/drm/amd/amdgpu/yellow_carp_reg_init.c51
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c79
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c267
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_migrate.c153
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c32
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c19
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c19
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c35
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h26
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c109
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c24
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c192
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.h1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c19
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c1024
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h103
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c34
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c16
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c71
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c6
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c10
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/Makefile3
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c102
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.c14
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c55
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile9
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c258
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c40
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c381
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c1107
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c1822
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c962
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c496
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c361
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c165
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stat.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h107
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h302
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dsc.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h38
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_abm.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_audio.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.c49
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h42
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c164
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c128
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c31
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c49
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c57
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/Makefile36
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dccg.c84
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dccg.h37
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c316
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.h83
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubbub.c107
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubbub.h45
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.c150
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.h132
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c630
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.h46
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.c131
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.h33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.c209
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.h59
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_mpc.c125
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_mpc.h86
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.c72
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.h74
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_optc.c203
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_optc.h74
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c1307
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.h50
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_afmt.c24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_afmt.h24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c73
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c17
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c43
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.c200
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.h15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/Makefile26
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c263
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/Makefile4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_afmt.c92
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_afmt.h126
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.c173
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.h115
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c383
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h52
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c136
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c616
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.h222
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c752
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.h241
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c113
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c345
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.c87
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.h162
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_cp_psp.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_helpers.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c (renamed from drivers/gpu/drm/amd/display/dc/dml/dcn2x/dcn2x.c)2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h (renamed from drivers/gpu/drm/amd/display/dc/dml/dcn2x/dcn2x.h)6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c158
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c156
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c236
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c156
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c132
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c390
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.h42
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c166
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c256
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c246
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dsc/qp_tables.h (renamed from drivers/gpu/drm/amd/display/dc/dsc/qp_tables.h)0
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.c291
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.h94
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/Makefile29
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c195
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c259
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h50
-rw-r--r--drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h36
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h45
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_dpia.h99
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/abm.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h32
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h14
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h97
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h87
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h26
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/link_hwss.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h19
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/Makefile10
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c374
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.h34
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/irq_service.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/irq_service.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/os_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c5
-rw-r--r--drivers/gpu/drm/amd/display/dmub/dmub_srv.h10
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h275
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c12
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h2
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c21
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c16
-rw-r--r--drivers/gpu/drm/amd/display/include/bios_parser_types.h8
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_asic_id.h3
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/include/dpcd_defs.h17
-rw-r--r--drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h1
-rw-r--r--drivers/gpu/drm/amd/display/include/grph_object_defs.h12
-rw-r--r--drivers/gpu/drm/amd/display/include/grph_object_id.h8
-rw-r--r--drivers/gpu/drm/amd/display/include/i2caux_interface.h3
-rw-r--r--drivers/gpu/drm/amd/display/include/link_service_types.h86
-rw-r--r--drivers/gpu/drm/amd/display/include/logger_types.h6
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.c32
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c15
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c6
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h2
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h5
-rwxr-xr-xdrivers/gpu/drm/amd/include/asic_reg/clk/clk_11_0_1_offset.h32
-rwxr-xr-xdrivers/gpu/drm/amd/include/asic_reg/clk/clk_11_0_1_sh_mask.h37
-rwxr-xr-xdrivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_3_offset.h6193
-rwxr-xr-xdrivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_3_sh_mask.h22091
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_offset.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_sh_mask.h8
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h5
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h132
-rwxr-xr-xdrivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_0_3_offset.h151
-rwxr-xr-xdrivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_0_3_sh_mask.h952
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_0_8_sh_mask.h355
-rw-r--r--drivers/gpu/drm/amd/include/atombios.h2
-rw-r--r--drivers/gpu/drm/amd/include/atomfirmware.h4
-rw-r--r--drivers/gpu/drm/amd/include/soc15_hw_ip.h2
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c22
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h4
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu_v11_0.h4
-rw-r--r--drivers/gpu/drm/amd/pm/inc/smu_v13_0.h4
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.h4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c135
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c14
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c17
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c64
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c119
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c117
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c96
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c6
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c33
-rw-r--r--drivers/gpu/drm/arm/malidp_planes.c2
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c7
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h2
-rw-r--r--drivers/gpu/drm/ast/ast_mm.c27
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c18
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_cec.c15
-rw-r--r--drivers/gpu/drm/bridge/analogix/anx7625.c27
-rw-r--r--drivers/gpu/drm/bridge/cdns-dsi.c4
-rw-r--r--drivers/gpu/drm/bridge/ite-it66121.c21
-rw-r--r--drivers/gpu/drm/bridge/panel.c37
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8640.c292
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c6
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c25
-rw-r--r--drivers/gpu/drm/drm_bridge.c78
-rw-r--r--drivers/gpu/drm/drm_cache.c4
-rw-r--r--drivers/gpu/drm/drm_connector.c83
-rw-r--r--drivers/gpu/drm/drm_crtc_internal.h2
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c42
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c42
-rw-r--r--drivers/gpu/drm/drm_edid.c367
-rw-r--r--drivers/gpu/drm/drm_format_helper.c88
-rw-r--r--drivers/gpu/drm/drm_fourcc.c3
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c23
-rw-r--r--drivers/gpu/drm/drm_gem_vram_helper.c1
-rw-r--r--drivers/gpu/drm/drm_ioctl.c21
-rw-r--r--drivers/gpu/drm/drm_kms_helper_common.c11
-rw-r--r--drivers/gpu/drm/drm_lease.c39
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c81
-rw-r--r--drivers/gpu/drm/drm_modeset_lock.c2
-rw-r--r--drivers/gpu/drm/drm_of.c3
-rw-r--r--drivers/gpu/drm/drm_panel_orientation_quirks.c61
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c119
-rw-r--r--drivers/gpu/drm/drm_property.c9
-rw-r--r--drivers/gpu/drm/drm_sysfs.c87
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c1
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_sched.c4
-rw-r--r--drivers/gpu/drm/gma500/backlight.c12
-rw-r--r--drivers/gpu/drm/gma500/cdv_device.c24
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_display.c10
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c12
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_lvds.c22
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c16
-rw-r--r--drivers/gpu/drm/gma500/gem.c2
-rw-r--r--drivers/gpu/drm/gma500/gma_device.c2
-rw-r--r--drivers/gpu/drm/gma500/gma_display.c14
-rw-r--r--drivers/gpu/drm/gma500/gtt.c18
-rw-r--r--drivers/gpu/drm/gma500/intel_bios.c10
-rw-r--r--drivers/gpu/drm/gma500/intel_gmbus.c12
-rw-r--r--drivers/gpu/drm/gma500/mid_bios.c11
-rw-r--r--drivers/gpu/drm/gma500/mmu.c12
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_crtc.c8
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_device.c20
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi.c18
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds.c14
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds_i2c.c2
-rw-r--r--drivers/gpu/drm/gma500/opregion.c14
-rw-r--r--drivers/gpu/drm/gma500/power.c20
-rw-r--r--drivers/gpu/drm/gma500/psb_device.c16
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c147
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h24
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c10
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_lvds.c31
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c10
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.c26
-rw-r--r--drivers/gpu/drm/gma500/psb_lid.c2
-rw-r--r--drivers/gpu/drm/gud/Kconfig2
-rw-r--r--drivers/gpu/drm/gud/gud_drv.c6
-rw-r--r--drivers/gpu/drm/gud/gud_internal.h12
-rw-r--r--drivers/gpu/drm/gud/gud_pipe.c6
-rw-r--r--drivers/gpu/drm/i915/Kconfig12
-rw-r--r--drivers/gpu/drm/i915/Makefile36
-rw-r--r--drivers/gpu/drm/i915/display/g4x_dp.c90
-rw-r--r--drivers/gpu/drm/i915/display/g4x_hdmi.c2
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c165
-rw-r--r--drivers/gpu/drm/i915/display/intel_acpi.c46
-rw-r--r--drivers/gpu/drm/i915/display/intel_acpi.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.c209
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.c43
-rw-r--r--drivers/gpu/drm/i915/display/intel_backlight.c1776
-rw-r--r--drivers/gpu/drm/i915/display/intel_backlight.h52
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c420
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c348
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c140
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_connector.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_cursor.c11
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c535
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.h7
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c672
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h23
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c2509
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h47
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c127
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c15
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h48
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.c20
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c780
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.h22
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c12
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_hdcp.c78
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c467
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c49
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpio_phy.c33
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpio_phy.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.c674
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.h26
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c46
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.h11
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt.c239
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt.h19
-rw-r--r--drivers/gpu/drm/i915/display/intel_drrs.c437
-rw-r--r--drivers/gpu/drm/i915/display/intel_drrs.h36
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi.c16
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c33
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_vbt.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.c24
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.c606
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.h20
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_pin.c274
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_pin.h28
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c292
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_fdi.c321
-rw-r--r--drivers/gpu/drm/i915/display/intel_fdi.h17
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c70
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c20
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.c33
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.c1833
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.h48
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane_initial.c283
-rw-r--r--drivers/gpu/drm/i915/display/intel_plane_initial.h13
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.c59
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c476
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.h13
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c19
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_phy.c225
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_phy.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c290
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c77
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.h6
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane.c58
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c53
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi_pll.c25
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_busy.c57
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c514
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.h19
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context_types.h58
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_create.c75
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c9
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c823
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_internal.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_lmem.c33
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_lmem.h4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c70
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h29
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h57
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pm.c91
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pm.h1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_region.c70
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_region.h37
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c29
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm.c201
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm.h14
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c206
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.h26
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c8
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gemfs.c22
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_pages.c48
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c29
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c36
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c190
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/mock_context.c5
-rw-r--r--drivers/gpu/drm/i915/gt/debugfs_engines.h14
-rw-r--r--drivers/gpu/drm/i915/gt/debugfs_gt.c47
-rw-r--r--drivers/gpu/drm/i915/gt/debugfs_gt_pm.h14
-rw-r--r--drivers/gpu/drm/i915/gt/gen6_ppgtt.c2
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_ppgtt.c7
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_ppgtt.h4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.c61
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.h56
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_types.h153
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine.h19
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c150
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.c36
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.h39
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h31
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.c17
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c52
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gpu_commands.h22
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c22
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_debugfs.c104
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_debugfs.h (renamed from drivers/gpu/drm/i915/gt/debugfs_gt.h)18
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_engines_debugfs.c (renamed from drivers/gpu/drm/i915/gt/debugfs_engines.c)10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_engines_debugfs.h14
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_irq.c7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.c22
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.h14
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c (renamed from drivers/gpu/drm/i915/gt/debugfs_gt_pm.c)197
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.h20
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_types.h12
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.c9
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.h11
-rw-r--r--drivers/gpu/drm/i915/gt/intel_llc.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c93
-rw-r--r--drivers/gpu/drm/i915/gt/intel_migrate.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_mocs.c176
-rw-r--r--drivers/gpu/drm/i915/gt/intel_mocs.h1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ppgtt.c13
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_region_lmem.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_submission.c7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c22
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.h1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu.c65
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu.h11
-rw-r--r--drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c10
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c262
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.h2
-rw-r--r--drivers/gpu/drm/i915/gt/mock_engine.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c4
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_execlists.c28
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_hangcheck.c10
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_workarounds.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h1
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.c39
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.h119
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c28
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c60
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_debugfs.c18
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c13
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h34
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.c8
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c2298
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.c14
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc_debugfs.c6
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c6
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c93
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h9
-rw-r--r--drivers/gpu/drm/i915/gt/uc/selftest_guc.c127
-rw-r--r--drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c179
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c17
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c2
-rw-r--r--drivers/gpu/drm/i915/i915_buddy.c45
-rw-r--r--drivers/gpu/drm/i915/i915_buddy.h8
-rw-r--r--drivers/gpu/drm/i915/i915_config.c2
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c286
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c17
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h168
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_ww.h25
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c42
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c94
-rw-r--r--drivers/gpu/drm/i915/i915_irq.h51
-rw-r--r--drivers/gpu/drm/i915/i915_module.c4
-rw-r--r--drivers/gpu/drm/i915/i915_params.h2
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c14
-rw-r--r--drivers/gpu/drm/i915/i915_pci.h12
-rw-r--r--drivers/gpu/drm/i915/i915_query.c5
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h180
-rw-r--r--drivers/gpu/drm/i915/i915_request.c157
-rw-r--r--drivers/gpu/drm/i915/i915_request.h49
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c1
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h19
-rw-r--r--drivers/gpu/drm/i915/i915_ttm_buddy_manager.c20
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h14
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c21
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h13
-rw-r--r--drivers/gpu/drm/i915/i915_vma_types.h7
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h1
-rw-r--r--drivers/gpu/drm/i915/intel_dram.c36
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.c12
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.h4
-rw-r--r--drivers/gpu/drm/i915/intel_pcode.c235
-rw-r--r--drivers/gpu/drm/i915/intel_pcode.h26
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c307
-rw-r--r--drivers/gpu/drm/i915/intel_pm.h3
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.h2
-rw-r--r--drivers/gpu/drm/i915/intel_sbi.c73
-rw-r--r--drivers/gpu/drm/i915/intel_sbi.h23
-rw-r--r--drivers/gpu/drm/i915/intel_sideband.c577
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c447
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.h20
-rw-r--r--drivers/gpu/drm/i915/intel_wakeref.h12
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp.c299
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp.h64
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_cmd.c141
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_cmd.h15
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c78
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.h21
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_irq.c101
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_irq.h32
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_pm.c46
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_pm.h24
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_session.c175
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_session.h15
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_tee.c172
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_tee.h17
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_tee_interface.h36
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_types.h83
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem_gtt.c8
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_live_selftests.h2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_vma.c4
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c12
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.h2
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_uncore.c34
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_region.c2
-rw-r--r--drivers/gpu/drm/i915/vlv_sideband.c266
-rw-r--r--drivers/gpu/drm/i915/vlv_sideband.h (renamed from drivers/gpu/drm/i915/intel_sideband.h)34
-rw-r--r--drivers/gpu/drm/kmb/kmb_crtc.c41
-rw-r--r--drivers/gpu/drm/kmb/kmb_drv.c2
-rw-r--r--drivers/gpu/drm/kmb/kmb_drv.h10
-rw-r--r--drivers/gpu/drm/kmb/kmb_dsi.c25
-rw-r--r--drivers/gpu/drm/kmb/kmb_dsi.h2
-rw-r--r--drivers/gpu/drm/kmb/kmb_plane.c43
-rw-r--r--drivers/gpu/drm/kmb/kmb_plane.h6
-rw-r--r--drivers/gpu/drm/lima/lima_gem.c9
-rw-r--r--drivers/gpu/drm/lima/lima_sched.c28
-rw-r--r--drivers/gpu/drm/lima/lima_sched.h6
-rw-r--r--drivers/gpu/drm/mcde/mcde_drv.c4
-rw-r--r--drivers/gpu/drm/mcde/mcde_dsi.c4
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c5
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c3
-rw-r--r--drivers/gpu/drm/meson/meson_dw_hdmi.c4
-rw-r--r--drivers/gpu/drm/mga/mga_ioc32.c27
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mm.c35
-rw-r--r--drivers/gpu/drm/msm/Kconfig6
-rw-r--r--drivers/gpu/drm/msm/Makefile1
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_debugfs.c6
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c10
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c7
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c256
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c147
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h19
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c39
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c267
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h92
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c56
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h13
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c8
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h3
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c70
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h13
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c4
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c18
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c89
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c18
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c12
-rw-r--r--drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c8
-rw-r--r--drivers/gpu/drm/msm/dp/dp_catalog.c64
-rw-r--r--drivers/gpu/drm/msm/dp/dp_debug.c294
-rw-r--r--drivers/gpu/drm/msm/dp/dp_debug.h4
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.c143
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.h2
-rw-r--r--drivers/gpu/drm/msm/dp/dp_drm.c13
-rw-r--r--drivers/gpu/drm/msm/dp/dp_panel.c2
-rw-r--r--drivers/gpu/drm/msm/dp/dp_parser.c138
-rw-r--r--drivers/gpu/drm/msm/dp/dp_parser.h14
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.h2
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c147
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c66
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c2
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.h1
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c25
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c4
-rw-r--r--drivers/gpu/drm/msm/edp/edp_ctrl.c5
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c38
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h6
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_bridge.c20
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_connector.c24
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy.c33
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c4
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c21
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c33
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h31
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c8
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h5
-rw-r--r--drivers/gpu/drm/msm/msm_gem_shrinker.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c35
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h11
-rw-r--r--drivers/gpu/drm/msm/msm_gpu_devfreq.c41
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h3
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c12
-rw-r--r--drivers/gpu/drm/msm/msm_submitqueue.c1
-rw-r--r--drivers/gpu/drm/mxsfb/mxsfb_drv.c6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_svm.c2
-rw-r--r--drivers/gpu/drm/omapdrm/Kconfig3
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c2
-rw-r--r--drivers/gpu/drm/panel/Kconfig27
-rw-r--r--drivers/gpu/drm/panel/Makefile2
-rw-r--r--drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c743
-rw-r--r--drivers/gpu/drm/panel/panel-edp.c1896
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9881c.c12
-rw-r--r--drivers/gpu/drm/panel/panel-orisetech-otm8009a.c85
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6d27a1.c320
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c1098
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.c10
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c33
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.c48
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.h5
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.c42
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_perfcnt.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_release.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c1
-rw-r--r--drivers/gpu/drm/r128/ati_pcigart.c11
-rw-r--r--drivers/gpu/drm/radeon/atombios.h2
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c3
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_mst.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c24
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c15
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c9
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.h11
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.c108
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_drv.h26
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.c12
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_group.c6
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.c50
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_kms.h7
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_regs.h9
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vsp.c36
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_lvds.c4
-rw-r--r--drivers/gpu/drm/rockchip/Kconfig1
-rw-r--r--drivers/gpu/drm/rockchip/analogix_dp-rockchip.c2
-rw-r--r--drivers/gpu/drm/rockchip/cdn-dp-core.c4
-rw-r--r--drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c3
-rw-r--r--drivers/gpu/drm/rockchip/inno_hdmi.c4
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c14
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.h2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_lvds.c33
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_rgb.c26
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.c2
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c140
-rw-r--r--drivers/gpu/drm/scheduler/sched_fence.c62
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c199
-rw-r--r--drivers/gpu/drm/selftests/test-drm_damage_helper.c1
-rw-r--r--drivers/gpu/drm/shmobile/shmob_drm_drv.c4
-rw-r--r--drivers/gpu/drm/sti/sti_hqvdp.c4
-rw-r--r--drivers/gpu/drm/stm/ltdc.c7
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_frontend.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tv.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c18
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_csc.h4
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c21
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_tcon_top.c4
-rw-r--r--drivers/gpu/drm/tegra/fb.c2
-rw-r--r--drivers/gpu/drm/tegra/plane.c2
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c4
-rw-r--r--drivers/gpu/drm/tiny/Kconfig4
-rw-r--r--drivers/gpu/drm/tiny/bochs.c8
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c71
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c22
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c15
-rw-r--r--drivers/gpu/drm/ttm/ttm_device.c48
-rw-r--r--drivers/gpu/drm/ttm/ttm_module.c12
-rw-r--r--drivers/gpu/drm/ttm/ttm_pool.c42
-rw-r--r--drivers/gpu/drm/ttm/ttm_range_manager.c8
-rw-r--r--drivers/gpu/drm/ttm/ttm_resource.c49
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c69
-rw-r--r--drivers/gpu/drm/udl/Kconfig1
-rw-r--r--drivers/gpu/drm/v3d/Kconfig2
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.c15
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.h30
-rw-r--r--drivers/gpu/drm/v3d/v3d_gem.c473
-rw-r--r--drivers/gpu/drm/v3d/v3d_sched.c44
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_drv.c5
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_drv.h1
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_ttm.c17
-rw-r--r--drivers/gpu/drm/vc4/vc4_dpi.c15
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c6
-rw-r--r--drivers/gpu/drm/vc4/vc4_dsi.c28
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c5
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c342
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_debugfs.c1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c44
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h36
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fence.c30
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c195
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_kms.c26
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c3
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_prime.c32
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c27
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vram.c61
-rw-r--r--drivers/gpu/drm/vmwgfx/ttm_memory.c1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c15
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_msg.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c10
-rw-r--r--drivers/gpu/drm/zte/Kconfig10
-rw-r--r--drivers/gpu/drm/zte/Makefile10
-rw-r--r--drivers/gpu/drm/zte/zx_common_regs.h28
-rw-r--r--drivers/gpu/drm/zte/zx_drm_drv.c184
-rw-r--r--drivers/gpu/drm/zte/zx_drm_drv.h34
-rw-r--r--drivers/gpu/drm/zte/zx_hdmi.c760
-rw-r--r--drivers/gpu/drm/zte/zx_hdmi_regs.h66
-rw-r--r--drivers/gpu/drm/zte/zx_plane.c537
-rw-r--r--drivers/gpu/drm/zte/zx_plane.h26
-rw-r--r--drivers/gpu/drm/zte/zx_plane_regs.h120
-rw-r--r--drivers/gpu/drm/zte/zx_tvenc.c400
-rw-r--r--drivers/gpu/drm/zte/zx_tvenc_regs.h27
-rw-r--r--drivers/gpu/drm/zte/zx_vga.c527
-rw-r--r--drivers/gpu/drm/zte/zx_vga_regs.h33
-rw-r--r--drivers/gpu/drm/zte/zx_vou.c921
-rw-r--r--drivers/gpu/drm/zte/zx_vou.h64
-rw-r--r--drivers/gpu/drm/zte/zx_vou_regs.h212
-rw-r--r--drivers/gpu/ipu-v3/ipu-csi.c31
-rw-r--r--drivers/hid/hid-cp2112.c14
-rw-r--r--drivers/hid/hid-roccat-kone.c2
-rw-r--r--drivers/hid/hid-roccat-kone.h12
-rw-r--r--drivers/hid/surface-hid/surface_hid.c4
-rw-r--r--drivers/hsi/clients/ssi_protocol.c4
-rw-r--r--drivers/hv/Kconfig1
-rw-r--r--drivers/hv/channel.c72
-rw-r--r--drivers/hv/channel_mgmt.c34
-rw-r--r--drivers/hv/connection.c101
-rw-r--r--drivers/hv/hv.c82
-rw-r--r--drivers/hv/hv_common.c12
-rw-r--r--drivers/hv/hyperv_vmbus.h3
-rw-r--r--drivers/hv/ring_buffer.c57
-rw-r--r--drivers/hwmon/Kconfig15
-rw-r--r--drivers/hwmon/Makefile1
-rw-r--r--drivers/hwmon/abituguru3.c6
-rw-r--r--drivers/hwmon/acpi_power_meter.c13
-rw-r--r--drivers/hwmon/ad7414.c4
-rw-r--r--drivers/hwmon/ad7418.c6
-rw-r--r--drivers/hwmon/adm1021.c4
-rw-r--r--drivers/hwmon/adm1025.c4
-rw-r--r--drivers/hwmon/adm1026.c4
-rw-r--r--drivers/hwmon/adm1029.c4
-rw-r--r--drivers/hwmon/adm1031.c6
-rw-r--r--drivers/hwmon/adt7310.c3
-rw-r--r--drivers/hwmon/adt7410.c3
-rw-r--r--drivers/hwmon/adt7x10.c3
-rw-r--r--drivers/hwmon/adt7x10.h2
-rw-r--r--drivers/hwmon/amc6821.c8
-rw-r--r--drivers/hwmon/applesmc.c2
-rw-r--r--drivers/hwmon/asb100.c4
-rw-r--r--drivers/hwmon/asc7621.c4
-rw-r--r--drivers/hwmon/atxp1.c10
-rw-r--r--drivers/hwmon/coretemp.c2
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c103
-rw-r--r--drivers/hwmon/dme1737.c4
-rw-r--r--drivers/hwmon/ds1621.c4
-rw-r--r--drivers/hwmon/ds620.c4
-rw-r--r--drivers/hwmon/emc6w201.c4
-rw-r--r--drivers/hwmon/f71805f.c4
-rw-r--r--drivers/hwmon/f71882fg.c4
-rw-r--r--drivers/hwmon/f75375s.c4
-rw-r--r--drivers/hwmon/fschmd.c4
-rw-r--r--drivers/hwmon/g760a.c2
-rw-r--r--drivers/hwmon/gl518sm.c4
-rw-r--r--drivers/hwmon/gl520sm.c4
-rw-r--r--drivers/hwmon/hwmon.c6
-rw-r--r--drivers/hwmon/i5500_temp.c114
-rw-r--r--drivers/hwmon/ibmaem.c2
-rw-r--r--drivers/hwmon/ibmpex.c4
-rw-r--r--drivers/hwmon/it87.c12
-rw-r--r--drivers/hwmon/lineage-pem.c2
-rw-r--r--drivers/hwmon/lm63.c6
-rw-r--r--drivers/hwmon/lm77.c4
-rw-r--r--drivers/hwmon/lm78.c4
-rw-r--r--drivers/hwmon/lm80.c6
-rw-r--r--drivers/hwmon/lm83.c4
-rw-r--r--drivers/hwmon/lm85.c4
-rw-r--r--drivers/hwmon/lm87.c4
-rw-r--r--drivers/hwmon/lm90.c75
-rw-r--r--drivers/hwmon/lm92.c4
-rw-r--r--drivers/hwmon/lm93.c4
-rw-r--r--drivers/hwmon/lm95241.c8
-rw-r--r--drivers/hwmon/ltc4151.c2
-rw-r--r--drivers/hwmon/ltc4215.c2
-rw-r--r--drivers/hwmon/ltc4261.c4
-rw-r--r--drivers/hwmon/max16065.c2
-rw-r--r--drivers/hwmon/max1619.c4
-rw-r--r--drivers/hwmon/max1668.c4
-rw-r--r--drivers/hwmon/max31722.c8
-rw-r--r--drivers/hwmon/max6620.c514
-rw-r--r--drivers/hwmon/max6639.c4
-rw-r--r--drivers/hwmon/max6642.c2
-rw-r--r--drivers/hwmon/mlxreg-fan.c138
-rw-r--r--drivers/hwmon/nct6683.c3
-rw-r--r--drivers/hwmon/nct6775.c717
-rw-r--r--drivers/hwmon/nct7802.c131
-rw-r--r--drivers/hwmon/pc87360.c4
-rw-r--r--drivers/hwmon/pmbus/ibm-cffps.c23
-rw-r--r--drivers/hwmon/pmbus/lm25066.c88
-rw-r--r--drivers/hwmon/raspberrypi-hwmon.c2
-rw-r--r--drivers/hwmon/sch5636.c4
-rw-r--r--drivers/hwmon/sht21.c4
-rw-r--r--drivers/hwmon/sis5595.c4
-rw-r--r--drivers/hwmon/smm665.c2
-rw-r--r--drivers/hwmon/smsc47b397.c4
-rw-r--r--drivers/hwmon/smsc47m192.c4
-rw-r--r--drivers/hwmon/thmc50.c4
-rw-r--r--drivers/hwmon/tmp103.c105
-rw-r--r--drivers/hwmon/tmp401.c31
-rw-r--r--drivers/hwmon/tmp421.c186
-rw-r--r--drivers/hwmon/via686a.c4
-rw-r--r--drivers/hwmon/vt1211.c4
-rw-r--r--drivers/hwmon/vt8231.c4
-rw-r--r--drivers/hwmon/w83627ehf.c8
-rw-r--r--drivers/hwmon/w83627hf.c6
-rw-r--r--drivers/hwmon/w83781d.c4
-rw-r--r--drivers/hwmon/w83791d.c4
-rw-r--r--drivers/hwmon/w83792d.c6
-rw-r--r--drivers/hwmon/w83793.c6
-rw-r--r--drivers/hwmon/w83795.c6
-rw-r--r--drivers/hwmon/w83l785ts.c4
-rw-r--r--drivers/hwmon/w83l786ng.c4
-rw-r--r--drivers/hwmon/xgene-hwmon.c35
-rw-r--r--drivers/i2c/busses/i2c-virtio.c56
-rw-r--r--drivers/i2c/busses/i2c-xgene-slimpro.c33
-rw-r--r--drivers/idle/intel_idle.c13
-rw-r--r--drivers/infiniband/core/cma.c34
-rw-r--r--drivers/infiniband/core/cma_priv.h11
-rw-r--r--drivers/infiniband/core/counters.c40
-rw-r--r--drivers/infiniband/core/device.c1
-rw-r--r--drivers/infiniband/core/iwpm_util.c2
-rw-r--r--drivers/infiniband/core/nldev.c278
-rw-r--r--drivers/infiniband/core/rw.c66
-rw-r--r--drivers/infiniband/core/sa_query.c6
-rw-r--r--drivers/infiniband/core/sysfs.c58
-rw-r--r--drivers/infiniband/core/umem_dmabuf.c51
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c3
-rw-r--r--drivers/infiniband/core/verbs.c49
-rw-r--r--drivers/infiniband/hw/bnxt_re/bnxt_re.h19
-rw-r--r--drivers/infiniband/hw/bnxt_re/hw_counters.c380
-rw-r--r--drivers/infiniband/hw/bnxt_re/hw_counters.h30
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.c45
-rw-r--r--drivers/infiniband/hw/bnxt_re/ib_verbs.h1
-rw-r--r--drivers/infiniband/hw/bnxt_re/main.c16
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_fp.c15
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.c6
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_rcfw.h2
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.c22
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_res.h10
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.c57
-rw-r--r--drivers/infiniband/hw/bnxt_re/qplib_sp.h33
-rw-r--r--drivers/infiniband/hw/bnxt_re/roce_hsi.h85
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c1
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c1
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c22
-rw-r--r--drivers/infiniband/hw/efa/efa.h23
-rw-r--r--drivers/infiniband/hw/efa/efa_admin_cmds_defs.h100
-rw-r--r--drivers/infiniband/hw/efa/efa_admin_defs.h41
-rw-r--r--drivers/infiniband/hw/efa/efa_com.c164
-rw-r--r--drivers/infiniband/hw/efa/efa_com.h38
-rw-r--r--drivers/infiniband/hw/efa/efa_com_cmd.c35
-rw-r--r--drivers/infiniband/hw/efa/efa_com_cmd.h10
-rw-r--r--drivers/infiniband/hw/efa/efa_main.c182
-rw-r--r--drivers/infiniband/hw/efa/efa_regs_defs.h7
-rw-r--r--drivers/infiniband/hw/efa/efa_verbs.c213
-rw-r--r--drivers/infiniband/hw/hfi1/Kconfig4
-rw-r--r--drivers/infiniband/hw/hfi1/chip.c3
-rw-r--r--drivers/infiniband/hw/hfi1/driver.c3
-rw-r--r--drivers/infiniband/hw/hfi1/efivar.c10
-rw-r--r--drivers/infiniband/hw/hfi1/init.c3
-rw-r--r--drivers/infiniband/hw/hfi1/ipoib.h76
-rw-r--r--drivers/infiniband/hw/hfi1/ipoib_main.c2
-rw-r--r--drivers/infiniband/hw/hfi1/ipoib_tx.c314
-rw-r--r--drivers/infiniband/hw/hfi1/pio.c9
-rw-r--r--drivers/infiniband/hw/hfi1/trace_tx.h71
-rw-r--r--drivers/infiniband/hw/hfi1/user_exp_rcv.c5
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c53
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h26
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c10
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c32
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_main.c142
-rw-r--r--drivers/infiniband/hw/irdma/cm.h12
-rw-r--r--drivers/infiniband/hw/irdma/ctrl.c43
-rw-r--r--drivers/infiniband/hw/irdma/hw.c7
-rw-r--r--drivers/infiniband/hw/irdma/main.h5
-rw-r--r--drivers/infiniband/hw/irdma/osdep.h1
-rw-r--r--drivers/infiniband/hw/irdma/protos.h2
-rw-r--r--drivers/infiniband/hw/irdma/trace_cm.h8
-rw-r--r--drivers/infiniband/hw/irdma/type.h3
-rw-r--r--drivers/infiniband/hw/irdma/uk.c105
-rw-r--r--drivers/infiniband/hw/irdma/user.h32
-rw-r--r--drivers/infiniband/hw/irdma/utils.c49
-rw-r--r--drivers/infiniband/hw/irdma/verbs.c154
-rw-r--r--drivers/infiniband/hw/irdma/ws.c13
-rw-r--r--drivers/infiniband/hw/mlx4/alias_GUID.c4
-rw-r--r--drivers/infiniband/hw/mlx4/main.c46
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h2
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c6
-rw-r--r--drivers/infiniband/hw/mlx5/cmd.c26
-rw-r--r--drivers/infiniband/hw/mlx5/cmd.h2
-rw-r--r--drivers/infiniband/hw/mlx5/counters.c283
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c13
-rw-r--r--drivers/infiniband/hw/mlx5/devx.h2
-rw-r--r--drivers/infiniband/hw/mlx5/fs.c187
-rw-r--r--drivers/infiniband/hw/mlx5/main.c55
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h59
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c111
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c79
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c2
-rw-r--r--drivers/infiniband/hw/mlx5/wr.c10
-rw-r--r--drivers/infiniband/hw/qedr/main.c3
-rw-r--r--drivers/infiniband/hw/qedr/qedr.h1
-rw-r--r--drivers/infiniband/hw/qedr/qedr_iw_cm.c2
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c30
-rw-r--r--drivers/infiniband/hw/qedr/verbs.h1
-rw-r--r--drivers/infiniband/hw/qib/qib_driver.c5
-rw-r--r--drivers/infiniband/hw/qib/qib_user_sdma.c33
-rw-r--r--drivers/infiniband/hw/usnic/usnic_fwd.c2
-rw-r--r--drivers/infiniband/hw/usnic/usnic_fwd.h2
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_av.c20
-rw-r--r--drivers/infiniband/sw/rxe/rxe_comp.c57
-rw-r--r--drivers/infiniband/sw/rxe/rxe_cq.c28
-rw-r--r--drivers/infiniband/sw/rxe/rxe_hw_counters.c42
-rw-r--r--drivers/infiniband/sw/rxe/rxe_loc.h2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mr.c267
-rw-r--r--drivers/infiniband/sw/rxe/rxe_mw.c36
-rw-r--r--drivers/infiniband/sw/rxe/rxe_opcode.h6
-rw-r--r--drivers/infiniband/sw/rxe/rxe_param.h34
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.c41
-rw-r--r--drivers/infiniband/sw/rxe/rxe_pool.h15
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c16
-rw-r--r--drivers/infiniband/sw/rxe/rxe_queue.c30
-rw-r--r--drivers/infiniband/sw/rxe/rxe_queue.h292
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c65
-rw-r--r--drivers/infiniband/sw/rxe/rxe_resp.c50
-rw-r--r--drivers/infiniband/sw/rxe/rxe_srq.c3
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.c139
-rw-r--r--drivers/infiniband/sw/rxe/rxe_verbs.h60
-rw-r--r--drivers/infiniband/sw/siw/siw_cm.c4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c4
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ib.c9
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c18
-rw-r--r--drivers/infiniband/ulp/opa_vnic/Kconfig4
-rw-r--r--drivers/infiniband/ulp/opa_vnic/Makefile3
-rw-r--r--drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c7
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c49
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c11
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt.c6
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-clt.h13
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-pri.h2
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv-stats.c3
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c2
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv.c6
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs-srv.h3
-rw-r--r--drivers/infiniband/ulp/rtrs/rtrs.c31
-rw-r--r--drivers/input/misc/axp20x-pek.c26
-rw-r--r--drivers/iommu/amd/init.c16
-rw-r--r--drivers/iommu/amd/iommu.c3
-rw-r--r--drivers/iommu/amd/iommu_v2.c3
-rw-r--r--drivers/iommu/iommu.c3
-rw-r--r--drivers/irqchip/Kconfig25
-rw-r--r--drivers/irqchip/Makefile1
-rw-r--r--drivers/irqchip/irq-apple-aic.c20
-rw-r--r--drivers/irqchip/irq-armada-370-xp.c13
-rw-r--r--drivers/irqchip/irq-aspeed-vic.c2
-rw-r--r--drivers/irqchip/irq-ativic32.c22
-rw-r--r--drivers/irqchip/irq-atmel-aic.c2
-rw-r--r--drivers/irqchip/irq-atmel-aic5.c2
-rw-r--r--drivers/irqchip/irq-bcm2835.c2
-rw-r--r--drivers/irqchip/irq-bcm2836.c2
-rw-r--r--drivers/irqchip/irq-bcm6345-l1.c6
-rw-r--r--drivers/irqchip/irq-bcm7038-l1.c47
-rw-r--r--drivers/irqchip/irq-bcm7120-l2.c21
-rw-r--r--drivers/irqchip/irq-brcmstb-l2.c16
-rw-r--r--drivers/irqchip/irq-clps711x.c8
-rw-r--r--drivers/irqchip/irq-csky-apb-intc.c2
-rw-r--r--drivers/irqchip/irq-csky-mpintc.c4
-rw-r--r--drivers/irqchip/irq-davinci-aintc.c2
-rw-r--r--drivers/irqchip/irq-davinci-cp-intc.c2
-rw-r--r--drivers/irqchip/irq-digicolor.c2
-rw-r--r--drivers/irqchip/irq-dw-apb-ictl.c2
-rw-r--r--drivers/irqchip/irq-ftintc010.c2
-rw-r--r--drivers/irqchip/irq-gic-v3.c4
-rw-r--r--drivers/irqchip/irq-gic.c2
-rw-r--r--drivers/irqchip/irq-hip04.c2
-rw-r--r--drivers/irqchip/irq-ixp4xx.c4
-rw-r--r--drivers/irqchip/irq-lpc32xx.c2
-rw-r--r--drivers/irqchip/irq-mchp-eic.c280
-rw-r--r--drivers/irqchip/irq-meson-gpio.c15
-rw-r--r--drivers/irqchip/irq-mips-gic.c37
-rw-r--r--drivers/irqchip/irq-mmp.c4
-rw-r--r--drivers/irqchip/irq-mvebu-icu.c4
-rw-r--r--drivers/irqchip/irq-mvebu-pic.c4
-rw-r--r--drivers/irqchip/irq-mxs.c2
-rw-r--r--drivers/irqchip/irq-nvic.c17
-rw-r--r--drivers/irqchip/irq-omap-intc.c2
-rw-r--r--drivers/irqchip/irq-or1k-pic.c2
-rw-r--r--drivers/irqchip/irq-orion.c4
-rw-r--r--drivers/irqchip/irq-rda-intc.c2
-rw-r--r--drivers/irqchip/irq-riscv-intc.c2
-rw-r--r--drivers/irqchip/irq-sa11x0.c4
-rw-r--r--drivers/irqchip/irq-stm32-exti.c4
-rw-r--r--drivers/irqchip/irq-sun4i.c2
-rw-r--r--drivers/irqchip/irq-ti-sci-inta.c4
-rw-r--r--drivers/irqchip/irq-ts4800.c4
-rw-r--r--drivers/irqchip/irq-versatile-fpga.c2
-rw-r--r--drivers/irqchip/irq-vic.c2
-rw-r--r--drivers/irqchip/irq-vt8500.c2
-rw-r--r--drivers/irqchip/irq-wpcm450-aic.c2
-rw-r--r--drivers/irqchip/irq-zevio.c2
-rw-r--r--drivers/isdn/hardware/mISDN/hfcpci.c8
-rw-r--r--drivers/leds/led-class-flash.c2
-rw-r--r--drivers/leds/led-triggers.c41
-rw-r--r--drivers/leds/trigger/Kconfig1
-rw-r--r--drivers/macintosh/smu.c3
-rw-r--r--drivers/mailbox/Kconfig12
-rw-r--r--drivers/mailbox/Makefile2
-rw-r--r--drivers/mailbox/apple-mailbox.c384
-rw-r--r--drivers/mailbox/bcm2835-mailbox.c4
-rw-r--r--drivers/mailbox/hi3660-mailbox.c4
-rw-r--r--drivers/mailbox/hi6220-mailbox.c7
-rw-r--r--drivers/mailbox/imx-mailbox.c124
-rw-r--r--drivers/mailbox/mailbox-altera.c5
-rw-r--r--drivers/mailbox/mailbox-sti.c4
-rw-r--r--drivers/mailbox/mailbox-xgene-slimpro.c4
-rw-r--r--drivers/mailbox/mtk-cmdq-mailbox.c15
-rw-r--r--drivers/mailbox/omap-mailbox.c4
-rw-r--r--drivers/mailbox/pcc.c598
-rw-r--r--drivers/mailbox/platform_mhu.c4
-rw-r--r--drivers/mailbox/qcom-apcs-ipc-mailbox.c31
-rw-r--r--drivers/mailbox/stm32-ipcc.c4
-rw-r--r--drivers/mailbox/sun6i-msgbox.c9
-rw-r--r--drivers/md/bcache/bcache.h6
-rw-r--r--drivers/md/bcache/bcache_ondisk.h445
-rw-r--r--drivers/md/bcache/bset.h2
-rw-r--r--drivers/md/bcache/btree.c2
-rw-r--r--drivers/md/bcache/debug.c15
-rw-r--r--drivers/md/bcache/features.c2
-rw-r--r--drivers/md/bcache/features.h3
-rw-r--r--drivers/md/bcache/io.c16
-rw-r--r--drivers/md/bcache/request.c19
-rw-r--r--drivers/md/bcache/request.h4
-rw-r--r--drivers/md/bcache/super.c91
-rw-r--r--drivers/md/bcache/sysfs.c2
-rw-r--r--drivers/md/bcache/sysfs.h18
-rw-r--r--drivers/md/bcache/util.h29
-rw-r--r--drivers/md/bcache/writeback.c2
-rw-r--r--drivers/md/dm-bio-record.h1
-rw-r--r--drivers/md/dm-bufio.c2
-rw-r--r--drivers/md/dm-cache-metadata.c2
-rw-r--r--drivers/md/dm-cache-target.c2
-rw-r--r--drivers/md/dm-clone-target.c2
-rw-r--r--drivers/md/dm-core.h4
-rw-r--r--drivers/md/dm-crypt.c1
-rw-r--r--drivers/md/dm-dust.c5
-rw-r--r--drivers/md/dm-ebs-target.c2
-rw-r--r--drivers/md/dm-era-target.c2
-rw-r--r--drivers/md/dm-exception-store.h2
-rw-r--r--drivers/md/dm-flakey.c3
-rw-r--r--drivers/md/dm-ima.c1
-rw-r--r--drivers/md/dm-integrity.c6
-rw-r--r--drivers/md/dm-linear.c3
-rw-r--r--drivers/md/dm-log-writes.c4
-rw-r--r--drivers/md/dm-log.c2
-rw-r--r--drivers/md/dm-mpath.c6
-rw-r--r--drivers/md/dm-ps-historical-service-time.c1
-rw-r--r--drivers/md/dm-raid.c6
-rw-r--r--drivers/md/dm-rq.c1
-rw-r--r--drivers/md/dm-switch.c2
-rw-r--r--drivers/md/dm-table.c172
-rw-r--r--drivers/md/dm-thin-metadata.c2
-rw-r--r--drivers/md/dm-thin.c2
-rw-r--r--drivers/md/dm-verity-target.c4
-rw-r--r--drivers/md/dm-writecache.c2
-rw-r--r--drivers/md/dm-zoned-target.c2
-rw-r--r--drivers/md/dm.c42
-rw-r--r--drivers/md/md.c130
-rw-r--r--drivers/md/md.h2
-rw-r--r--drivers/md/raid1.c13
-rw-r--r--drivers/md/raid10.c2
-rw-r--r--drivers/md/raid5.c7
-rw-r--r--drivers/media/cec/Kconfig4
-rw-r--r--drivers/media/cec/core/cec-pin.c4
-rw-r--r--drivers/media/cec/platform/meson/ao-cec-g12a.c4
-rw-r--r--drivers/media/cec/platform/meson/ao-cec.c4
-rw-r--r--drivers/media/cec/platform/s5p/s5p_cec.c4
-rw-r--r--drivers/media/cec/platform/sti/stih-cec.c4
-rw-r--r--drivers/media/cec/platform/stm32/stm32-cec.c4
-rw-r--r--drivers/media/common/siano/smscoreapi.c7
-rw-r--r--drivers/media/common/videobuf2/videobuf2-core.c149
-rw-r--r--drivers/media/common/videobuf2/videobuf2-dma-contig.c198
-rw-r--r--drivers/media/common/videobuf2/videobuf2-dma-sg.c39
-rw-r--r--drivers/media/common/videobuf2/videobuf2-v4l2.c59
-rw-r--r--drivers/media/common/videobuf2/videobuf2-vmalloc.c30
-rw-r--r--drivers/media/dvb-core/dvb_net.c8
-rw-r--r--drivers/media/dvb-core/dvb_vb2.c2
-rw-r--r--drivers/media/dvb-frontends/cxd2099.c9
-rw-r--r--drivers/media/dvb-frontends/cxd2099.h9
-rw-r--r--drivers/media/dvb-frontends/cxd2820r_priv.h2
-rw-r--r--drivers/media/dvb-frontends/mb86a20s.c4
-rw-r--r--drivers/media/dvb-frontends/mn88443x.c18
-rw-r--r--drivers/media/dvb-frontends/mxl5xx.c9
-rw-r--r--drivers/media/dvb-frontends/mxl5xx.h9
-rw-r--r--drivers/media/dvb-frontends/mxl5xx_defs.h4
-rw-r--r--drivers/media/dvb-frontends/mxl5xx_regs.h10
-rw-r--r--drivers/media/dvb-frontends/mxl692.c9
-rw-r--r--drivers/media/dvb-frontends/mxl692.h9
-rw-r--r--drivers/media/dvb-frontends/mxl692_defs.h9
-rw-r--r--drivers/media/dvb-frontends/rtl2832_sdr.c5
-rw-r--r--drivers/media/dvb-frontends/stv0910.c9
-rw-r--r--drivers/media/dvb-frontends/stv0910.h9
-rw-r--r--drivers/media/dvb-frontends/stv6111.c9
-rw-r--r--drivers/media/dvb-frontends/stv6111.h9
-rw-r--r--drivers/media/firewire/firedtv-avc.c14
-rw-r--r--drivers/media/firewire/firedtv-ci.c2
-rw-r--r--drivers/media/i2c/Kconfig27
-rw-r--r--drivers/media/i2c/Makefile2
-rw-r--r--drivers/media/i2c/adv7604.c15
-rw-r--r--drivers/media/i2c/dw9714.c14
-rw-r--r--drivers/media/i2c/hi846.c2190
-rw-r--r--drivers/media/i2c/imx258.c12
-rw-r--r--drivers/media/i2c/ir-kbd-i2c.c1
-rw-r--r--drivers/media/i2c/max9286.c17
-rw-r--r--drivers/media/i2c/mt9p031.c80
-rw-r--r--drivers/media/i2c/ov13858.c11
-rw-r--r--drivers/media/i2c/ov13b10.c1491
-rw-r--r--drivers/media/i2c/ov5670.c11
-rw-r--r--drivers/media/i2c/ov8856.c83
-rw-r--r--drivers/media/i2c/st-mipid02.c22
-rw-r--r--drivers/media/i2c/tda1997x.c131
-rw-r--r--drivers/media/i2c/tda1997x_regs.h3
-rw-r--r--drivers/media/i2c/video-i2c.c21
-rw-r--r--drivers/media/mc/Kconfig8
-rw-r--r--drivers/media/pci/cobalt/cobalt-driver.c4
-rw-r--r--drivers/media/pci/cx18/cx18-driver.c2
-rw-r--r--drivers/media/pci/cx18/cx18-ioctl.c4
-rw-r--r--drivers/media/pci/cx18/cx18-queue.c13
-rw-r--r--drivers/media/pci/cx18/cx18-streams.c24
-rw-r--r--drivers/media/pci/cx23885/cx23885-alsa.c3
-rw-r--r--drivers/media/pci/ddbridge/ddbridge-main.c4
-rw-r--r--drivers/media/pci/intel/ipu3/cio2-bridge.c60
-rw-r--r--drivers/media/pci/intel/ipu3/cio2-bridge.h9
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-cio2-main.c274
-rw-r--r--drivers/media/pci/intel/ipu3/ipu3-cio2.h4
-rw-r--r--drivers/media/pci/ivtv/ivtv-driver.c2
-rw-r--r--drivers/media/pci/ivtv/ivtv-ioctl.c8
-rw-r--r--drivers/media/pci/ivtv/ivtv-queue.c18
-rw-r--r--drivers/media/pci/ivtv/ivtv-streams.c22
-rw-r--r--drivers/media/pci/ivtv/ivtv-udma.c19
-rw-r--r--drivers/media/pci/ivtv/ivtv-yuv.c10
-rw-r--r--drivers/media/pci/ivtv/ivtvfb.c12
-rw-r--r--drivers/media/pci/netup_unidvb/netup_unidvb_core.c29
-rw-r--r--drivers/media/pci/pluto2/pluto2.c20
-rw-r--r--drivers/media/pci/pt1/pt1.c2
-rw-r--r--drivers/media/pci/saa7134/saa7134-cards.c53
-rw-r--r--drivers/media/pci/saa7134/saa7134-dvb.c29
-rw-r--r--drivers/media/pci/saa7134/saa7134.h1
-rw-r--r--drivers/media/pci/saa7164/saa7164-api.c2
-rw-r--r--drivers/media/pci/tw5864/tw5864-core.c2
-rw-r--r--drivers/media/platform/Kconfig20
-rw-r--r--drivers/media/platform/Makefile1
-rw-r--r--drivers/media/platform/allegro-dvt/allegro-core.c311
-rw-r--r--drivers/media/platform/allegro-dvt/allegro-mail.c23
-rw-r--r--drivers/media/platform/allegro-dvt/allegro-mail.h10
-rw-r--r--drivers/media/platform/allegro-dvt/nal-h264.c74
-rw-r--r--drivers/media/platform/allegro-dvt/nal-h264.h200
-rw-r--r--drivers/media/platform/allegro-dvt/nal-hevc.c202
-rw-r--r--drivers/media/platform/allegro-dvt/nal-hevc.h189
-rw-r--r--drivers/media/platform/am437x/am437x-vpfe.c23
-rw-r--r--drivers/media/platform/aspeed-video.c133
-rw-r--r--drivers/media/platform/atmel/atmel-isc-base.c29
-rw-r--r--drivers/media/platform/atmel/atmel-isc.h2
-rw-r--r--drivers/media/platform/atmel/atmel-isi.c17
-rw-r--r--drivers/media/platform/atmel/atmel-sama5d2-isc.c54
-rw-r--r--drivers/media/platform/atmel/atmel-sama7g5-isc.c37
-rw-r--r--drivers/media/platform/cadence/cdns-csi2rx.c18
-rw-r--r--drivers/media/platform/cadence/cdns-csi2tx.c4
-rw-r--r--drivers/media/platform/coda/imx-vdoa.c3
-rw-r--r--drivers/media/platform/davinci/vpbe_venc.c9
-rw-r--r--drivers/media/platform/davinci/vpif.c5
-rw-r--r--drivers/media/platform/davinci/vpif_capture.c21
-rw-r--r--drivers/media/platform/davinci/vpss.c10
-rw-r--r--drivers/media/platform/exynos-gsc/gsc-core.c3
-rw-r--r--drivers/media/platform/exynos4-is/media-dev.c20
-rw-r--r--drivers/media/platform/exynos4-is/mipi-csis.c4
-rw-r--r--drivers/media/platform/imx-jpeg/mxc-jpeg.c109
-rw-r--r--drivers/media/platform/imx-jpeg/mxc-jpeg.h2
-rw-r--r--drivers/media/platform/imx-pxp.c4
-rw-r--r--drivers/media/platform/marvell-ccic/cafe-driver.c9
-rw-r--r--drivers/media/platform/marvell-ccic/mcam-core.c10
-rw-r--r--drivers/media/platform/marvell-ccic/mmp-driver.c6
-rw-r--r--drivers/media/platform/meson/ge2d/ge2d.c10
-rw-r--r--drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c4
-rw-r--r--drivers/media/platform/mtk-vcodec/Makefile3
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c820
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.h27
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c65
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_stateful.c628
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_stateless.c360
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h59
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c148
-rw-r--r--drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c75
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec/vdec_h264_req_if.c774
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec_drv_if.c3
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec_drv_if.h1
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec_ipi_msg.h23
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec_vpu_if.c43
-rw-r--r--drivers/media/platform/mtk-vcodec/vdec_vpu_if.h5
-rw-r--r--drivers/media/platform/mtk-vpu/mtk_vpu.c5
-rw-r--r--drivers/media/platform/mx2_emmaprp.c4
-rw-r--r--drivers/media/platform/omap/omap_vout.c18
-rw-r--r--drivers/media/platform/omap/omap_vout_vrfb.c2
-rw-r--r--drivers/media/platform/omap/omap_voutdef.h2
-rw-r--r--drivers/media/platform/omap3isp/isp.c21
-rw-r--r--drivers/media/platform/pxa_camera.c26
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-170.c9
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-4-1.c28
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-4-7.c18
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe-4-8.c17
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe.c4
-rw-r--r--drivers/media/platform/qcom/camss/camss-vfe.h2
-rw-r--r--drivers/media/platform/qcom/camss/camss.c18
-rw-r--r--drivers/media/platform/qcom/venus/core.c135
-rw-r--r--drivers/media/platform/qcom/venus/core.h9
-rw-r--r--drivers/media/platform/qcom/venus/firmware.c42
-rw-r--r--drivers/media/platform/qcom/venus/helpers.c81
-rw-r--r--drivers/media/platform/qcom/venus/helpers.h4
-rw-r--r--drivers/media/platform/qcom/venus/hfi.c48
-rw-r--r--drivers/media/platform/qcom/venus/hfi_cmds.c7
-rw-r--r--drivers/media/platform/qcom/venus/hfi_helper.h14
-rw-r--r--drivers/media/platform/qcom/venus/hfi_msgs.c7
-rw-r--r--drivers/media/platform/qcom/venus/hfi_plat_bufs_v6.c6
-rw-r--r--drivers/media/platform/qcom/venus/hfi_platform.c13
-rw-r--r--drivers/media/platform/qcom/venus/hfi_platform.h2
-rw-r--r--drivers/media/platform/qcom/venus/hfi_platform_v6.c6
-rw-r--r--drivers/media/platform/qcom/venus/hfi_venus.c4
-rw-r--r--drivers/media/platform/qcom/venus/hfi_venus_io.h2
-rw-r--r--drivers/media/platform/qcom/venus/pm_helpers.c13
-rw-r--r--drivers/media/platform/qcom/venus/vdec.c67
-rw-r--r--drivers/media/platform/qcom/venus/venc.c116
-rw-r--r--drivers/media/platform/rcar-isp.c515
-rw-r--r--drivers/media/platform/rcar-vin/rcar-core.c959
-rw-r--r--drivers/media/platform/rcar-vin/rcar-csi2.c241
-rw-r--r--drivers/media/platform/rcar-vin/rcar-dma.c40
-rw-r--r--drivers/media/platform/rcar-vin/rcar-v4l2.c25
-rw-r--r--drivers/media/platform/rcar-vin/rcar-vin.h25
-rw-r--r--drivers/media/platform/rcar_drif.c17
-rw-r--r--drivers/media/platform/rcar_fdp1.c4
-rw-r--r--drivers/media/platform/rcar_jpu.c4
-rw-r--r--drivers/media/platform/renesas-ceu.c33
-rw-r--r--drivers/media/platform/rockchip/rga/rga.c5
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c9
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-common.h44
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c98
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c29
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-params.c557
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h406
-rw-r--r--drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c107
-rw-r--r--drivers/media/platform/s3c-camif/camif-core.c6
-rw-r--r--drivers/media/platform/s5p-g2d/g2d.c4
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-core.c5
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc.c9
-rw-r--r--drivers/media/platform/sti/bdisp/bdisp-v4l2.c3
-rw-r--r--drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c1
-rw-r--r--drivers/media/platform/sti/c8sectpfe/c8sectpfe-dvb.c1
-rw-r--r--drivers/media/platform/sti/hva/hva-hw.c4
-rw-r--r--drivers/media/platform/stm32/stm32-dcmi.c37
-rw-r--r--drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c16
-rw-r--r--drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c33
-rw-r--r--drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.h2
-rw-r--r--drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c8
-rw-r--r--drivers/media/platform/sunxi/sun8i-di/sun8i-di.c4
-rw-r--r--drivers/media/platform/ti-vpe/cal.c16
-rw-r--r--drivers/media/platform/via-camera.c6
-rw-r--r--drivers/media/platform/video-mux.c17
-rw-r--r--drivers/media/platform/vsp1/vsp1_drm.c8
-rw-r--r--drivers/media/platform/vsp1/vsp1_drv.c18
-rw-r--r--drivers/media/platform/vsp1/vsp1_regs.h11
-rw-r--r--drivers/media/platform/vsp1/vsp1_wpf.c2
-rw-r--r--drivers/media/platform/xilinx/xilinx-vip.c4
-rw-r--r--drivers/media/platform/xilinx/xilinx-vipp.c17
-rw-r--r--drivers/media/radio/radio-wl1273.c2
-rw-r--r--drivers/media/radio/si470x/radio-si470x-i2c.c2
-rw-r--r--drivers/media/radio/si470x/radio-si470x-usb.c2
-rw-r--r--drivers/media/rc/Kconfig8
-rw-r--r--drivers/media/rc/Makefile1
-rw-r--r--drivers/media/rc/img-ir/img-ir-core.c4
-rw-r--r--drivers/media/rc/imon.c2
-rw-r--r--drivers/media/rc/ir-hix5hd2.c4
-rw-r--r--drivers/media/rc/ir_toy.c63
-rw-r--r--drivers/media/rc/ite-cir.c2
-rw-r--r--drivers/media/rc/mceusb.c2
-rw-r--r--drivers/media/rc/meson-ir-tx.c1
-rw-r--r--drivers/media/rc/meson-ir.c4
-rw-r--r--drivers/media/rc/mtk-cir.c4
-rw-r--r--drivers/media/rc/sir_ir.c438
-rw-r--r--drivers/media/rc/st_rc.c5
-rw-r--r--drivers/media/rc/streamzap.c1
-rw-r--r--drivers/media/rc/sunxi-cir.c4
-rw-r--r--drivers/media/spi/cxd2880-spi.c2
-rw-r--r--drivers/media/test-drivers/vidtv/vidtv_bridge.c4
-rw-r--r--drivers/media/test-drivers/vim2m.c5
-rw-r--r--drivers/media/test-drivers/vimc/vimc-scaler.c366
-rw-r--r--drivers/media/test-drivers/vivid/vivid-cec.c341
-rw-r--r--drivers/media/test-drivers/vivid/vivid-cec.h9
-rw-r--r--drivers/media/test-drivers/vivid/vivid-core.c52
-rw-r--r--drivers/media/test-drivers/vivid/vivid-core.h23
-rw-r--r--drivers/media/tuners/mxl5007t.c9
-rw-r--r--drivers/media/tuners/tuner-types.c4
-rw-r--r--drivers/media/usb/airspy/airspy.c5
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf.c16
-rw-r--r--drivers/media/usb/dvb-usb/az6027.c1
-rw-r--r--drivers/media/usb/dvb-usb/dibusb-common.c2
-rw-r--r--drivers/media/usb/em28xx/em28xx-cards.c12
-rw-r--r--drivers/media/usb/em28xx/em28xx-core.c5
-rw-r--r--drivers/media/usb/gspca/gl860/gl860-mi1320.c87
-rw-r--r--drivers/media/usb/gspca/gl860/gl860-ov9655.c169
-rw-r--r--drivers/media/usb/gspca/gspca.c2
-rw-r--r--drivers/media/usb/gspca/m5602/m5602_ov7660.h1
-rw-r--r--drivers/media/usb/gspca/sn9c20x.c22
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-ctrl.c25
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-v4l2.c4
-rw-r--r--drivers/media/usb/stkwebcam/stk-webcam.c11
-rw-r--r--drivers/media/usb/tm6000/tm6000-video.c3
-rw-r--r--drivers/media/usb/ttusb-dec/ttusb_dec.c10
-rw-r--r--drivers/media/usb/uvc/uvc_ctrl.c260
-rw-r--r--drivers/media/usb/uvc/uvc_driver.c16
-rw-r--r--drivers/media/usb/uvc/uvc_metadata.c2
-rw-r--r--drivers/media/usb/uvc/uvc_v4l2.c103
-rw-r--r--drivers/media/usb/uvc/uvc_video.c5
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h17
-rw-r--r--drivers/media/v4l2-core/v4l2-async.c168
-rw-r--r--drivers/media/v4l2-core/v4l2-common.c3
-rw-r--r--drivers/media/v4l2-core/v4l2-compat-ioctl32.c9
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-core.c6
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls-defs.c5
-rw-r--r--drivers/media/v4l2-core/v4l2-fwnode.c83
-rw-r--r--drivers/media/v4l2-core/v4l2-ioctl.c77
-rw-r--r--drivers/memory/Kconfig5
-rw-r--r--drivers/memory/fsl_ifc.c13
-rw-r--r--drivers/memory/jedec_ddr.h47
-rw-r--r--drivers/memory/jedec_ddr_data.c41
-rw-r--r--drivers/memory/mtk-smi.c596
-rw-r--r--drivers/memory/of_memory.c87
-rw-r--r--drivers/memory/of_memory.h9
-rw-r--r--drivers/memory/renesas-rpc-if.c159
-rw-r--r--drivers/memory/samsung/Kconfig13
-rw-r--r--drivers/memory/tegra/Kconfig1
-rw-r--r--drivers/memory/tegra/mc.c25
-rw-r--r--drivers/memory/tegra/tegra186-emc.c5
-rw-r--r--drivers/memory/tegra/tegra20-emc.c200
-rw-r--r--drivers/memory/tegra/tegra210-emc-cc-r21021.c2
-rw-r--r--drivers/memory/tegra/tegra210-emc-core.c6
-rw-r--r--drivers/memory/tegra/tegra30-emc.c4
-rw-r--r--drivers/memstick/core/ms_block.c8
-rw-r--r--drivers/memstick/core/mspro_block.c6
-rw-r--r--drivers/memstick/host/jmb38x_ms.c5
-rw-r--r--drivers/memstick/host/r592.c8
-rw-r--r--drivers/message/fusion/mptlan.c2
-rw-r--r--drivers/misc/mei/Kconfig2
-rw-r--r--drivers/misc/mei/Makefile1
-rw-r--r--drivers/misc/mei/pxp/Kconfig13
-rw-r--r--drivers/misc/mei/pxp/Makefile7
-rw-r--r--drivers/misc/mei/pxp/mei_pxp.c229
-rw-r--r--drivers/misc/mei/pxp/mei_pxp.h18
-rw-r--r--drivers/misc/sgi-xp/xpnet.c9
-rw-r--r--drivers/mmc/core/block.c27
-rw-r--r--drivers/mmc/core/crypto.c11
-rw-r--r--drivers/mmc/core/mmc.c8
-rw-r--r--drivers/mmc/core/mmc_ops.h1
-rw-r--r--drivers/mmc/core/sd.c1
-rw-r--r--drivers/mmc/core/slot-gpio.c42
-rw-r--r--drivers/mmc/host/Kconfig10
-rw-r--r--drivers/mmc/host/Makefile1
-rw-r--r--drivers/mmc/host/cqhci-core.c7
-rw-r--r--drivers/mmc/host/cqhci-crypto.c33
-rw-r--r--drivers/mmc/host/dw_mmc-exynos.c26
-rw-r--r--drivers/mmc/host/dw_mmc.c42
-rw-r--r--drivers/mmc/host/mmci.c4
-rw-r--r--drivers/mmc/host/moxart-mmc.c29
-rw-r--r--drivers/mmc/host/mtk-sd.c137
-rw-r--r--drivers/mmc/host/mxs-mmc.c10
-rw-r--r--drivers/mmc/host/omap_hsmmc.c12
-rw-r--r--drivers/mmc/host/sdhci-acpi.c14
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c33
-rw-r--r--drivers/mmc/host/sdhci-of-arasan.c29
-rw-r--r--drivers/mmc/host/sdhci-omap.c322
-rw-r--r--drivers/mmc/host/sdhci-pci-core.c159
-rw-r--r--drivers/mmc/host/sdhci-pci-data.c6
-rw-r--r--drivers/mmc/host/sdhci-pci-o2micro.c2
-rw-r--r--drivers/mmc/host/sdhci-pci.h5
-rw-r--r--drivers/mmc/host/sdhci-s3c.c1
-rw-r--r--drivers/mmc/host/sdhci-sprd.c13
-rw-r--r--drivers/mmc/host/sdhci.c48
-rw-r--r--drivers/mmc/host/sdhci.h2
-rw-r--r--drivers/mmc/host/tmio_mmc_core.c17
-rw-r--r--drivers/mmc/host/vub300.c18
-rw-r--r--drivers/mtd/mtd_blkdevs.c6
-rw-r--r--drivers/mtd/mtdsuper.c1
-rw-r--r--drivers/net/Kconfig18
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/amt.c3296
-rw-r--r--drivers/net/appletalk/cops.c2
-rw-r--r--drivers/net/appletalk/ltpc.c3
-rw-r--r--drivers/net/arcnet/arc-rimi.c5
-rw-r--r--drivers/net/arcnet/arcdevice.h5
-rw-r--r--drivers/net/arcnet/com20020-isa.c2
-rw-r--r--drivers/net/arcnet/com20020-pci.c2
-rw-r--r--drivers/net/arcnet/com20020.c4
-rw-r--r--drivers/net/arcnet/com20020_cs.c2
-rw-r--r--drivers/net/arcnet/com90io.c2
-rw-r--r--drivers/net/arcnet/com90xx.c3
-rw-r--r--drivers/net/bareudp.c7
-rw-r--r--drivers/net/bonding/bond_alb.c28
-rw-r--r--drivers/net/bonding/bond_main.c4
-rw-r--r--drivers/net/bonding/bond_sysfs.c4
-rw-r--r--drivers/net/can/at91_can.c4
-rw-r--r--drivers/net/can/dev/bittiming.c30
-rw-r--r--drivers/net/can/dev/netlink.c221
-rw-r--r--drivers/net/can/flexcan.c68
-rw-r--r--drivers/net/can/janz-ican3.c2
-rw-r--r--drivers/net/can/m_can/m_can_platform.c14
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c6
-rw-r--r--drivers/net/can/rcar/Kconfig4
-rw-r--r--drivers/net/can/rcar/rcar_can.c20
-rw-r--r--drivers/net/can/sja1000/peak_pci.c9
-rw-r--r--drivers/net/can/usb/etas_es58x/es581_4.h2
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_fd.c7
-rw-r--r--drivers/net/can/usb/etas_es58x/es58x_fd.h2
-rw-r--r--drivers/net/can/usb/gs_usb.c12
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.c13
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_core.h1
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_fd.c17
-rw-r--r--drivers/net/can/xilinx_can.c7
-rw-r--r--drivers/net/dsa/Kconfig1
-rw-r--r--drivers/net/dsa/Makefile2
-rw-r--r--drivers/net/dsa/b53/b53_common.c101
-rw-r--r--drivers/net/dsa/b53/b53_priv.h2
-rw-r--r--drivers/net/dsa/bcm_sf2.c12
-rw-r--r--drivers/net/dsa/hirschmann/hellcreek.c6
-rw-r--r--drivers/net/dsa/lantiq_gswip.c44
-rw-r--r--drivers/net/dsa/microchip/ksz8795.c8
-rw-r--r--drivers/net/dsa/mt7530.c8
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c5
-rw-r--r--drivers/net/dsa/ocelot/felix.c4
-rw-r--r--drivers/net/dsa/ocelot/felix_vsc9959.c8
-rw-r--r--drivers/net/dsa/ocelot/seville_vsc9953.c8
-rw-r--r--drivers/net/dsa/qca/ar9331.c10
-rw-r--r--drivers/net/dsa/qca8k.c435
-rw-r--r--drivers/net/dsa/qca8k.h35
-rw-r--r--drivers/net/dsa/realtek-smi-core.c4
-rw-r--r--drivers/net/dsa/realtek-smi-core.h4
-rw-r--r--drivers/net/dsa/rtl8365mb.c1982
-rw-r--r--drivers/net/dsa/rtl8366.c96
-rw-r--r--drivers/net/dsa/rtl8366rb.c301
-rw-r--r--drivers/net/dsa/sja1105/sja1105.h29
-rw-r--r--drivers/net/dsa/sja1105/sja1105_clocking.c35
-rw-r--r--drivers/net/dsa/sja1105/sja1105_dynamic_config.c91
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c144
-rw-r--r--drivers/net/dsa/sja1105/sja1105_vl.c15
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x.c8
-rw-r--r--drivers/net/dsa/xrs700x/xrs700x_mdio.c12
-rw-r--r--drivers/net/ethernet/3com/3c509.c2
-rw-r--r--drivers/net/ethernet/3com/3c515.c5
-rw-r--r--drivers/net/ethernet/3com/3c574_cs.c11
-rw-r--r--drivers/net/ethernet/3com/3c589_cs.c10
-rw-r--r--drivers/net/ethernet/3com/3c59x.c4
-rw-r--r--drivers/net/ethernet/8390/apne.c3
-rw-r--r--drivers/net/ethernet/8390/ax88796.c12
-rw-r--r--drivers/net/ethernet/8390/axnet_cs.c7
-rw-r--r--drivers/net/ethernet/8390/mcf8390.c3
-rw-r--r--drivers/net/ethernet/8390/ne.c4
-rw-r--r--drivers/net/ethernet/8390/ne2k-pci.c2
-rw-r--r--drivers/net/ethernet/8390/pcnet_cs.c22
-rw-r--r--drivers/net/ethernet/8390/stnic.c5
-rw-r--r--drivers/net/ethernet/8390/zorro8390.c3
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/actions/owl-emac.c6
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c14
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c8
-rw-r--r--drivers/net/ethernet/agere/et131x.c4
-rw-r--r--drivers/net/ethernet/alacritech/slicoss.c4
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c4
-rw-r--r--drivers/net/ethernet/alteon/acenic.c20
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c4
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c2
-rw-r--r--drivers/net/ethernet/amd/Kconfig2
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c6
-rw-r--r--drivers/net/ethernet/amd/atarilance.c4
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c2
-rw-r--r--drivers/net/ethernet/amd/nmclan_cs.c5
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c15
-rw-r--r--drivers/net/ethernet/amd/sun3lance.c4
-rw-r--r--drivers/net/ethernet/amd/sunlance.c4
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h8
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c4
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c8
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c20
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h2
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/mac.c2
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/main.c2
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c2
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c2
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c2
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c2
-rw-r--r--drivers/net/ethernet/apple/bmac.c15
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h6
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_macsec.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c8
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c2
-rw-r--r--drivers/net/ethernet/arc/Kconfig4
-rw-r--r--drivers/net/ethernet/arc/emac_main.c4
-rw-r--r--drivers/net/ethernet/arc/emac_mdio.c9
-rw-r--r--drivers/net/ethernet/asix/Kconfig35
-rw-r--r--drivers/net/ethernet/asix/Makefile6
-rw-r--r--drivers/net/ethernet/asix/ax88796c_ioctl.c239
-rw-r--r--drivers/net/ethernet/asix/ax88796c_ioctl.h26
-rw-r--r--drivers/net/ethernet/asix/ax88796c_main.c1164
-rw-r--r--drivers/net/ethernet/asix/ax88796c_main.h568
-rw-r--r--drivers/net/ethernet/asix/ax88796c_spi.c115
-rw-r--r--drivers/net/ethernet/asix/ax88796c_spi.h69
-rw-r--r--drivers/net/ethernet/atheros/ag71xx.c12
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c4
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c12
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c10
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c2
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c4
-rw-r--r--drivers/net/ethernet/atheros/atlx/atlx.c2
-rw-r--r--drivers/net/ethernet/broadcom/b44.c12
-rw-r--r--drivers/net/ethernet/broadcom/bcm4908_enet.c4
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c6
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c6
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c6
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-bcma.c37
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-platform.c2
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c22
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c9
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/Makefile2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c283
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h113
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c444
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h51
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h14
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c785
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h27
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c400
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h46
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h155
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c87
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h10
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c202
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c60
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c5
-rw-r--r--drivers/net/ethernet/cadence/macb.h7
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c42
-rw-r--r--drivers/net/ethernet/cadence/macb_ptp.c13
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c8
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_core.c3
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c40
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c4
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c5
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c15
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c9
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/gmac.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/pm3393.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/subr.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/vsc7326.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/common.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/t3_hw.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/xgmac.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/adapter.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c8
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c2
-rw-r--r--drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h2
-rw-r--r--drivers/net/ethernet/cirrus/cs89x0.c13
-rw-r--r--drivers/net/ethernet/cirrus/ep93xx_eth.c2
-rw-r--r--drivers/net/ethernet/cirrus/mac89x0.c2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c4
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c9
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_pp.c2
-rw-r--r--drivers/net/ethernet/cortina/gemini.c6
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c9
-rw-r--r--drivers/net/ethernet/dec/tulip/de2104x.c15
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c35
-rw-r--r--drivers/net/ethernet/dec/tulip/dmfe.c9
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c45
-rw-r--r--drivers/net/ethernet/dec/tulip/uli526x.c11
-rw-r--r--drivers/net/ethernet/dec/tulip/winbond-840.c6
-rw-r--r--drivers/net/ethernet/dec/tulip/xircom_cb.c4
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c5
-rw-r--r--drivers/net/ethernet/dlink/sundance.c6
-rw-r--r--drivers/net/ethernet/dnet.c8
-rw-r--r--drivers/net/ethernet/ec_bhf.c4
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c7
-rw-r--r--drivers/net/ethernet/ethoc.c28
-rw-r--r--drivers/net/ethernet/ezchip/Kconfig2
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.c4
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c9
-rw-r--r--drivers/net/ethernet/fealnx.c8
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c6
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c21
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c24
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h7
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c58
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c2
-rw-r--r--drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.c332
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc.h4
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ethtool.c2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_hw.h6
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c37
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_ptp.c6
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_qos.c18
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_vf.c16
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c7
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c4
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_dtsec.c8
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_dtsec.h2
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.c8
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.h2
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_tgec.c8
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_tgec.h2
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.h2
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c2
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c4
-rw-r--r--drivers/net/ethernet/fujitsu/fmvj18x_cs.c14
-rw-r--r--drivers/net/ethernet/google/gve/gve.h52
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.c61
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.h15
-rw-r--r--drivers/net/ethernet/google/gve/gve_desc.h13
-rw-r--r--drivers/net/ethernet/google/gve/gve_ethtool.c7
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c109
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx.c413
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx_dqo.c68
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx.c117
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx_dqo.c84
-rw-r--r--drivers/net/ethernet/google/gve/gve_utils.c37
-rw-r--r--drivers/net/ethernet/google/gve/gve_utils.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hisi_femac.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hix5hd2_gmac.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c7
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h5
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.c21
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h12
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c215
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c41
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h10
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c9
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c33
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c18
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c19
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h6
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c591
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h35
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c18
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c13
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h1
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_devlink.c4
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_devlink.h2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_ethtool.c10
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c13
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_main.c12
-rw-r--r--drivers/net/ethernet/i825xx/sun3_82586.c7
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c4
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c14
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c46
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c666
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h10
-rw-r--r--drivers/net/ethernet/intel/Kconfig14
-rw-r--r--drivers/net/ethernet/intel/e100.c4
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h5
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c31
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h3
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c50
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c2
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c52
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h48
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c238
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c6
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile5
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h215
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h94
-rw-r--r--drivers/net/ethernet/intel/ice/ice_arfs.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c121
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c131
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.c225
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.h18
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c216
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.h32
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_nl.c192
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devids.h6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devlink.c259
-rw-r--r--drivers/net/ethernet/intel/ice/ice_devlink.h8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.c655
-rw-r--r--drivers/net/ethernet/intel/ice/ice_eswitch.h83
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c236
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fdir.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fdir.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.c307
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.h14
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_type.h17
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fltr.c80
-rw-r--r--drivers/net/ethernet/intel/ice/ice_fltr.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lag.c18
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h43
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c864
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h38
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c1645
-rw-r--r--drivers/net/ethernet/intel/ice/ice_protocol_type.h204
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.c375
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp.h24
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.c151
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ptp_hw.h22
-rw-r--r--drivers/net/ethernet/intel/ice/ice_repr.c389
-rw-r--r--drivers/net/ethernet/intel/ice/ice_repr.h28
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.c197
-rw-r--r--drivers/net/ethernet/intel/ice/ice_sched.h9
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.c2830
-rw-r--r--drivers/net/ethernet/intel/ice/ice_switch.h152
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.c1369
-rw-r--r--drivers/net/ethernet/intel/ice/ice_tc_lib.h162
-rw-r--r--drivers/net/ethernet/intel/ice/ice_trace.h28
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c326
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h147
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.c102
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.h14
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h19
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c447
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h74
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c158
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.h20
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c27
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c8
-rw-r--r--drivers/net/ethernet/intel/igc/igc_base.c8
-rw-r--r--drivers/net/ethernet/intel/igc/igc_defines.h2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_hw.h3
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c5
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ptp.c2
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_hw.c2
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_hw.h2
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h23
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c9
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c54
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c16
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c2
-rw-r--r--drivers/net/ethernet/jme.c4
-rw-r--r--drivers/net/ethernet/korina.c4
-rw-r--r--drivers/net/ethernet/lantiq_etop.c21
-rw-r--r--drivers/net/ethernet/lantiq_xrx200.c74
-rw-r--r--drivers/net/ethernet/litex/Kconfig2
-rw-r--r--drivers/net/ethernet/litex/litex_liteeth.c2
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c16
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c75
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c117
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c11
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/common.h1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h5
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h138
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc.h20
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h958
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/ptp.c133
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/ptp.h1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rpm.c17
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rpm.h3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c76
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h19
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c13
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c601
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c266
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c16
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c225
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c96
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h4
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h18
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/Makefile6
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c52
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h18
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c21
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c43
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c234
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c133
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c273
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h16
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c8
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera.h69
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_devlink.c35
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_devlink.h4
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_ethtool.c219
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_ethtool.h6
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_hw.c1064
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_hw.h47
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_main.c161
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_pci.c114
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_rxtx.c7
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c21
-rw-r--r--drivers/net/ethernet/marvell/skge.c6
-rw-r--r--drivers/net/ethernet/marvell/sky2.c99
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_star_emac.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h87
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/fs.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/health.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.c163
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/params.h18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/qos.c102
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/qos.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c134
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rss.c50
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rss.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/int_port.c457
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/int_port.h65
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c39
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.h27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c42
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tir.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tir.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/trap.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_common.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c32
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c427
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c668
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c92
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c589
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c293
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c88
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c66
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c126
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c147
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/lag.c)106
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/lag.h)9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c (renamed from drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c)17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mp.h (renamed from drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h)2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c611
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.h52
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c162
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h41
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c88
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mr.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/diag/dev_tracepoint.h58
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/diag/sf_tracepoint.h173
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/diag/vhca_tracepoint.h40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c212
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c272
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h52
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/uar.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c90
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_env.c372
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_env.h23
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/item.h56
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/minimal.c66
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h357
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/resources.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c390
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c45
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c432
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h27
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c583
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c662
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c11
-rw-r--r--drivers/net/ethernet/micrel/ks8842.c15
-rw-r--r--drivers/net/ethernet/micrel/ks8851.h2
-rw-r--r--drivers/net/ethernet/micrel/ks8851_common.c14
-rw-r--r--drivers/net/ethernet/micrel/ks8851_par.c4
-rw-r--r--drivers/net/ethernet/micrel/ks8851_spi.c4
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c16
-rw-r--r--drivers/net/ethernet/microchip/enc28j60.c7
-rw-r--r--drivers/net/ethernet/microchip/encx24j600.c7
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.c39
-rw-r--r--drivers/net/ethernet/microchip/lan743x_main.h3
-rw-r--r--drivers/net/ethernet/microchip/lan743x_ptp.c91
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_main.c4
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c6
-rw-r--r--drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c7
-rw-r--r--drivers/net/ethernet/microsoft/mana/gdma_main.c155
-rw-r--r--drivers/net/ethernet/microsoft/mana/hw_channel.c75
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana.h4
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c96
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_ethtool.c3
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c2
-rw-r--r--drivers/net/ethernet/mscc/Kconfig2
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c327
-rw-r--r--drivers/net/ethernet/mscc/ocelot.h1
-rw-r--r--drivers/net/ethernet/mscc/ocelot_flower.c125
-rw-r--r--drivers/net/ethernet/mscc/ocelot_mrp.c8
-rw-r--r--drivers/net/ethernet/mscc/ocelot_net.c24
-rw-r--r--drivers/net/ethernet/mscc/ocelot_vsc7514.c10
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c9
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c6
-rw-r--r--drivers/net/ethernet/natsemi/ns83820.c11
-rw-r--r--drivers/net/ethernet/neterion/s2io.c6
-rw-r--r--drivers/net/ethernet/neterion/s2io.h2
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c6
-rw-r--r--drivers/net/ethernet/netronome/nfp/abm/main.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/abm/qdisc.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.c16
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/offload.c17
-rw-r--r--drivers/net/ethernet/netronome/nfp/devlink_param.c9
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c3
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h2
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c6
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_asm.c4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c8
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_main.c11
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.c3
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c2
-rw-r--r--drivers/net/ethernet/ni/nixge.c2
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c51
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c15
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c4
-rw-r--r--drivers/net/ethernet/packetengines/hamachi.c5
-rw-r--r--drivers/net/ethernet/packetengines/yellowfin.c6
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c4
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic.h8
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_debugfs.c48
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.c1
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.h4
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_devlink.c10
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_ethtool.c41
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c264
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.h49
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_main.c92
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_phc.c8
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c241
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h2
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_stats.c121
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c14
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h44
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.c16
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_cxt.h143
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h1491
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.h11
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c1389
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.h7
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c126
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev_api.h347
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_devlink.c12
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_fcoe.c25
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h12339
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hw.h222
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c405
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_ops.c98
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_init_ops.h60
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.h286
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iro_hsi.h500
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iscsi.c15
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iscsi.h9
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iwarp.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c43
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.h135
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c167
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.h131
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c23
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c66
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.h765
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mfw_hsi.h2474
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ooo.c20
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ptp.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.c26
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_rdma.h7
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_reg_addr.h95
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.c1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_selftest.h30
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp.h223
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sp_commands.c10
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_spq.c63
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c201
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.h138
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.c13
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.h311
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_filter.c53
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c21
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c12
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c4
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-mac.c2
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c5
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c2
-rw-r--r--drivers/net/ethernet/qualcomm/qca_uart.c2
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c2
-rw-r--r--drivers/net/ethernet/rdc/r6040.c24
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c7
-rw-r--r--drivers/net/ethernet/realtek/8139too.c7
-rw-r--r--drivers/net/ethernet/realtek/atp.c4
-rw-r--r--drivers/net/ethernet/realtek/r8169.h2
-rw-r--r--drivers/net/ethernet/realtek/r8169_main.c45
-rw-r--r--drivers/net/ethernet/realtek/r8169_phy_config.c59
-rw-r--r--drivers/net/ethernet/renesas/ravb.h52
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c728
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c18
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c10
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h2
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c3
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c9
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c2
-rw-r--r--drivers/net/ethernet/seeq/sgiseeq.c4
-rw-r--r--drivers/net/ethernet/sfc/ef10.c4
-rw-r--r--drivers/net/ethernet/sfc/ef100_nic.c2
-rw-r--r--drivers/net/ethernet/sfc/ef10_sriov.c4
-rw-r--r--drivers/net/ethernet/sfc/ef10_sriov.h6
-rw-r--r--drivers/net/ethernet/sfc/efx.c2
-rw-r--r--drivers/net/ethernet/sfc/efx_common.c4
-rw-r--r--drivers/net/ethernet/sfc/ethtool_common.c10
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.c6
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port_common.c37
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h2
-rw-r--r--drivers/net/ethernet/sfc/ptp.c4
-rw-r--r--drivers/net/ethernet/sfc/siena_sriov.c4
-rw-r--r--drivers/net/ethernet/sfc/siena_sriov.h2
-rw-r--r--drivers/net/ethernet/sgi/ioc3-eth.c4
-rw-r--r--drivers/net/ethernet/sgi/meth.c2
-rw-r--r--drivers/net/ethernet/silan/sc92031.c14
-rw-r--r--drivers/net/ethernet/sis/sis190.c10
-rw-r--r--drivers/net/ethernet/sis/sis900.c19
-rw-r--r--drivers/net/ethernet/smsc/epic100.c4
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c4
-rw-r--r--drivers/net/ethernet/smsc/smc91c92_cs.c15
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c4
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c22
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c26
-rw-r--r--drivers/net/ethernet/socionext/netsec.c46
-rw-r--r--drivers/net/ethernet/socionext/sni_ave.c17
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c16
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c8
-rw-r--r--drivers/net/ethernet/sun/cassini.c7
-rw-r--r--drivers/net/ethernet/sun/ldmvsw.c7
-rw-r--r--drivers/net/ethernet/sun/niu.c46
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c6
-rw-r--r--drivers/net/ethernet/sun/sungem.c15
-rw-r--r--drivers/net/ethernet/sun/sunhme.c23
-rw-r--r--drivers/net/ethernet/sun/sunqe.c4
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c4
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-common.c2
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c2
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac-net.c2
-rw-r--r--drivers/net/ethernet/synopsys/dwc-xlgmac.h2
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c8
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-ethtool.c2
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c26
-rw-r--r--drivers/net/ethernet/ti/cpmac.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c6
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c17
-rw-r--r--drivers/net/ethernet/ti/cpts.c6
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c8
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c8
-rw-r--r--drivers/net/ethernet/ti/tlan.c14
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c2
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c2
-rw-r--r--drivers/net/ethernet/toshiba/tc35815.c11
-rw-r--r--drivers/net/ethernet/via/via-rhine.c4
-rw-r--r--drivers/net/ethernet/via/via-velocity.c4
-rw-r--r--drivers/net/ethernet/wiznet/w5100-spi.c4
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c11
-rw-r--r--drivers/net/ethernet/wiznet/w5100.h2
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c4
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c4
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c10
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c11
-rw-r--r--drivers/net/ethernet/xircom/xirc2ps_cs.c14
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c7
-rw-r--r--drivers/net/fddi/defxx.c12
-rw-r--r--drivers/net/fddi/defza.c2
-rw-r--r--drivers/net/fddi/skfp/h/smc.h2
-rw-r--r--drivers/net/fddi/skfp/skfddi.c9
-rw-r--r--drivers/net/fddi/skfp/smtinit.c4
-rw-r--r--drivers/net/fjes/fjes_hw.c3
-rw-r--r--drivers/net/fjes/fjes_hw.h2
-rw-r--r--drivers/net/fjes/fjes_main.c14
-rw-r--r--drivers/net/gtp.c2
-rw-r--r--drivers/net/hamradio/6pack.c6
-rw-r--r--drivers/net/hamradio/baycom_epp.c10
-rw-r--r--drivers/net/hamradio/bpqether.c7
-rw-r--r--drivers/net/hamradio/dmascc.c5
-rw-r--r--drivers/net/hamradio/hdlcdrv.c4
-rw-r--r--drivers/net/hamradio/mkiss.c6
-rw-r--r--drivers/net/hamradio/scc.c7
-rw-r--r--drivers/net/hamradio/yam.c4
-rw-r--r--drivers/net/hippi/rrunner.c6
-rw-r--r--drivers/net/hyperv/hyperv_net.h5
-rw-r--r--drivers/net/hyperv/netvsc.c15
-rw-r--r--drivers/net/hyperv/netvsc_drv.c6
-rw-r--r--drivers/net/ieee802154/ca8210.c2
-rw-r--r--drivers/net/ifb.c5
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c4
-rw-r--r--drivers/net/ipvlan/ipvtap.c2
-rw-r--r--drivers/net/macsec.c4
-rw-r--r--drivers/net/macvlan.c7
-rw-r--r--drivers/net/macvtap.c2
-rw-r--r--drivers/net/net_failover.c3
-rw-r--r--drivers/net/netdevsim/bus.c155
-rw-r--r--drivers/net/netdevsim/dev.c204
-rw-r--r--drivers/net/netdevsim/ethtool.c28
-rw-r--r--drivers/net/netdevsim/health.c32
-rw-r--r--drivers/net/netdevsim/netdev.c72
-rw-r--r--drivers/net/netdevsim/netdevsim.h57
-rw-r--r--drivers/net/ntb_netdev.c2
-rw-r--r--drivers/net/pcs/pcs-xpcs.c2
-rw-r--r--drivers/net/phy/at803x.c778
-rw-r--r--drivers/net/phy/bcm7xxx.c203
-rw-r--r--drivers/net/phy/broadcom.c106
-rw-r--r--drivers/net/phy/dp83867.c23
-rw-r--r--drivers/net/phy/dp83869.c4
-rw-r--r--drivers/net/phy/marvell10g.c107
-rw-r--r--drivers/net/phy/mdio_bus.c28
-rw-r--r--drivers/net/phy/micrel.c107
-rw-r--r--drivers/net/phy/microchip_t1.c239
-rw-r--r--drivers/net/phy/mscc/mscc_main.c2
-rw-r--r--drivers/net/phy/phy-c45.c35
-rw-r--r--drivers/net/phy/phy.c140
-rw-r--r--drivers/net/phy/phy_device.c10
-rw-r--r--drivers/net/phy/phylink.c142
-rw-r--r--drivers/net/phy/realtek.c8
-rw-r--r--drivers/net/phy/sfp-bus.c2
-rw-r--r--drivers/net/plip/plip.c8
-rw-r--r--drivers/net/ppp/ppp_generic.c2
-rw-r--r--drivers/net/rionet.c14
-rw-r--r--drivers/net/sb1000.c12
-rw-r--r--drivers/net/team/team.c2
-rw-r--r--drivers/net/thunderbolt.c8
-rw-r--r--drivers/net/usb/Kconfig1
-rw-r--r--drivers/net/usb/aqc111.c4
-rw-r--r--drivers/net/usb/asix_common.c2
-rw-r--r--drivers/net/usb/asix_devices.c2
-rw-r--r--drivers/net/usb/ax88172a.c2
-rw-r--r--drivers/net/usb/ax88179_178a.c12
-rw-r--r--drivers/net/usb/catc.c24
-rw-r--r--drivers/net/usb/cdc-phonet.c4
-rw-r--r--drivers/net/usb/ch9200.c4
-rw-r--r--drivers/net/usb/cx82310_eth.c5
-rw-r--r--drivers/net/usb/dm9601.c7
-rw-r--r--drivers/net/usb/ipheth.c2
-rw-r--r--drivers/net/usb/kalmia.c2
-rw-r--r--drivers/net/usb/kaweth.c3
-rw-r--r--drivers/net/usb/lan78xx.c10
-rw-r--r--drivers/net/usb/mcs7830.c9
-rw-r--r--drivers/net/usb/pegasus.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c7
-rw-r--r--drivers/net/usb/r8152.c4
-rw-r--r--drivers/net/usb/rndis_host.c2
-rw-r--r--drivers/net/usb/rtl8150.c4
-rw-r--r--drivers/net/usb/sierra_net.c6
-rw-r--r--drivers/net/usb/smsc75xx.c9
-rw-r--r--drivers/net/usb/smsc95xx.c9
-rw-r--r--drivers/net/usb/sr9700.c9
-rw-r--r--drivers/net/usb/sr9800.c7
-rw-r--r--drivers/net/usb/usbnet.c11
-rw-r--r--drivers/net/virtio_net.c54
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c9
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c10
-rw-r--r--drivers/net/vrf.c32
-rw-r--r--drivers/net/wan/hdlc_fr.c4
-rw-r--r--drivers/net/wan/lapbether.c2
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/bmi.h10
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c16
-rw-r--r--drivers/net/wireless/ath/ath10k/coredump.c11
-rw-r--r--drivers/net/wireless/ath/ath10k/coredump.h7
-rw-r--r--drivers/net/wireless/ath/ath10k/htt.h7
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c45
-rw-r--r--drivers/net/wireless/ath/ath10k/qmi.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/sdio.c6
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.c77
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.h5
-rw-r--r--drivers/net/wireless/ath/ath10k/usb.c7
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h3
-rw-r--r--drivers/net/wireless/ath/ath11k/core.c73
-rw-r--r--drivers/net/wireless/ath/ath11k/core.h49
-rw-r--r--drivers/net/wireless/ath/ath11k/dbring.c16
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs.c27
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs.h4
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c4344
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h226
-rw-r--r--drivers/net/wireless/ath/ath11k/debugfs_sta.c8
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.c14
-rw-r--r--drivers/net/wireless/ath/ath11k/dp.h9
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_rx.c282
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_tx.c36
-rw-r--r--drivers/net/wireless/ath/ath11k/dp_tx.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_desc.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/hal_rx.c6
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.c56
-rw-r--r--drivers/net/wireless/ath/ath11k/hw.h24
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.c1445
-rw-r--r--drivers/net/wireless/ath/ath11k/mac.h3
-rw-r--r--drivers/net/wireless/ath/ath11k/pci.c45
-rw-r--r--drivers/net/wireless/ath/ath11k/peer.c11
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.c349
-rw-r--r--drivers/net/wireless/ath/ath11k/qmi.h18
-rw-r--r--drivers/net/wireless/ath/ath11k/reg.c18
-rw-r--r--drivers/net/wireless/ath/ath11k/reg.h2
-rw-r--r--drivers/net/wireless/ath/ath11k/spectral.c42
-rw-r--r--drivers/net/wireless/ath/ath11k/trace.h11
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.c162
-rw-r--r--drivers/net/wireless/ath/ath11k/wmi.h107
-rw-r--r--drivers/net/wireless/ath/ath5k/sysfs.c8
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c9
-rw-r--r--drivers/net/wireless/ath/ath6kl/usb.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c105
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c57
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.c12
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c58
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c4
-rw-r--r--drivers/net/wireless/ath/dfs_pattern_detector.c10
-rw-r--r--drivers/net/wireless/ath/spectral_common.h1
-rw-r--r--drivers/net/wireless/ath/wcn36xx/debug.c2
-rw-r--r--drivers/net/wireless/ath/wcn36xx/dxe.c49
-rw-r--r--drivers/net/wireless/ath/wcn36xx/hal.h38
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c55
-rw-r--r--drivers/net/wireless/ath/wcn36xx/pmc.c13
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.c189
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.h4
-rw-r--r--drivers/net/wireless/ath/wcn36xx/txrx.c147
-rw-r--r--drivers/net/wireless/ath/wcn36xx/txrx.h3
-rw-r--r--drivers/net/wireless/ath/wcn36xx/wcn36xx.h7
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c10
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c6
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h2
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c2
-rw-r--r--drivers/net/wireless/atmel/atmel.c19
-rw-r--r--drivers/net/wireless/broadcom/b43/phy_g.c2
-rw-r--r--drivers/net/wireless/broadcom/b43legacy/radio.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c12
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c6
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c10
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c4
-rw-r--r--drivers/net/wireless/cisco/airo.c27
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2100.c4
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.c12
-rw-r--r--drivers/net/wireless/intel/ipw2x00/ipw2200.h2
-rw-r--r--drivers/net/wireless/intel/iwlegacy/3945-mac.c1
-rw-r--r--drivers/net/wireless/intel/iwlegacy/4965-mac.c1
-rw-r--r--drivers/net/wireless/intel/iwlegacy/commands.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/Makefile2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/1000.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/2000.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c35
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/5000.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/6000.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/agn.h11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/commands.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/dev.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/devices.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/led.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/led.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/lib.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/main.c7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/power.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/power.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rs.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rs.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rx.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/rxon.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/scan.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/sta.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/tt.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/tt.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/tx.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/dvm/ucode.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.c150
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/acpi.h43
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/d3.h45
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h57
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/debug.h35
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/location.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/mac.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h23
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/phy.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/power.h55
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rs.h234
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/rx.h31
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/sta.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/api/tx.h52
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c46
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dump.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/error-dump.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/file.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.c58
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/img.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/init.c6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/paging.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/pnvm.c15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/rs.c252
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/runtime.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/uefi.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h8
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c228
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-debug.c24
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-debug.h26
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace-io.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace-ucode.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.c44
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-drv.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.c50
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-io.h5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-prph.h36
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h30
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/d3.c362
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c19
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c15
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c106
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c44
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c269
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/nvm.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c194
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/power.c28
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c16
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.c182
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rs.h17
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rx.c39
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c119
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c10
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c117
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/utils.c54
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c4
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c306
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/rx.c9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c38
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c90
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_hw.c5
-rw-r--r--drivers/net/wireless/intersil/hostap/hostap_main.c4
-rw-r--r--drivers/net/wireless/intersil/orinoco/main.c2
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c163
-rw-r--r--drivers/net/wireless/marvell/libertas/cmd.c5
-rw-r--r--drivers/net/wireless/marvell/libertas/if_usb.c2
-rw-r--r--drivers/net/wireless/marvell/libertas/main.c4
-rw-r--r--drivers/net/wireless/marvell/libertas/mesh.c18
-rw-r--r--drivers/net/wireless/marvell/libertas_tf/if_usb.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n.c7
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c384
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cmdevt.c21
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.c22
-rw-r--r--drivers/net/wireless/marvell/mwifiex/main.h1
-rw-r--r--drivers/net/wireless/marvell/mwifiex/pcie.c36
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_cmd.c4
-rw-r--r--drivers/net/wireless/marvell/mwifiex/uap_event.c3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/usb.c16
-rw-r--r--drivers/net/wireless/marvell/mwl8k.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/Makefile2
-rw-r--r--drivers/net/wireless/mediatek/mt76/debugfs.c22
-rw-r--r--drivers/net/wireless/mediatek/mt76/eeprom.c19
-rw-r--r--drivers/net/wireless/mediatek/mt76/mac80211.c242
-rw-r--r--drivers/net/wireless/mediatek/mt76/mcu.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76.h126
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mac.c11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/main.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/pci.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/Makefile2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c29
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/init.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mac.c62
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/main.c14
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mcu.c90
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h20
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/sdio.c296
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac.h7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c357
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h38
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x0/pci.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.c15
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c12
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_util.c3
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/pci.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c542
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/init.c170
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.c652
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mac.h11
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/main.c366
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.c1192
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mcu.h128
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mmio.c6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h161
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/pci.c5
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/regs.h166
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/testmode.c23
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7915/testmode.h6
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/Kconfig19
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/Makefile7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c99
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/dma.c74
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/eeprom.c100
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/init.c96
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mac.c776
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mac.h32
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/main.c328
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.c448
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.h63
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h179
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci.c66
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c348
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/pci_mcu.c115
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/regs.h58
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/sdio.c317
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c220
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/sdio_mcu.c135
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/testmode.c197
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio.c303
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio.h (renamed from drivers/net/wireless/mediatek/mt76/mt7615/sdio.h)33
-rw-r--r--drivers/net/wireless/mediatek/mt76/sdio_txrx.c (renamed from drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c)134
-rw-r--r--drivers/net/wireless/mediatek/mt76/testmode.c4
-rw-r--r--drivers/net/wireless/mediatek/mt76/testmode.h7
-rw-r--r--drivers/net/wireless/mediatek/mt76/tx.c84
-rw-r--r--drivers/net/wireless/mediatek/mt76/usb.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/util.h10
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/dma.c2
-rw-r--r--drivers/net/wireless/microchip/wilc1000/cfg80211.c11
-rw-r--r--drivers/net/wireless/microchip/wilc1000/hif.c31
-rw-r--r--drivers/net/wireless/microchip/wilc1000/hif.h1
-rw-r--r--drivers/net/wireless/microchip/wilc1000/netdev.c14
-rw-r--r--drivers/net/wireless/microchip/wilc1000/netdev.h5
-rw-r--r--drivers/net/wireless/microchip/wilc1000/sdio.c1
-rw-r--r--drivers/net/wireless/microchip/wilc1000/spi.c91
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan.c134
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan.h5
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan_cfg.c1
-rw-r--r--drivers/net/wireless/microchip/wilc1000/wlan_if.h7
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/core.c6
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c2
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2800usb.c1
-rw-r--r--drivers/net/wireless/ray_cs.c2
-rw-r--r--drivers/net/wireless/realtek/Kconfig1
-rw-r--r--drivers/net/wireless/realtek/Makefile1
-rw-r--r--drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c14
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c6
-rw-r--r--drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.c1
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c2
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.c46
-rw-r--r--drivers/net/wireless/realtek/rtw88/debug.h1
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.c54
-rw-r--r--drivers/net/wireless/realtek/rtw88/fw.h24
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.c22
-rw-r--r--drivers/net/wireless/realtek/rtw88/main.h49
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.c119
-rw-r--r--drivers/net/wireless/realtek/rtw88/phy.h2
-rw-r--r--drivers/net/wireless/realtek/rtw88/reg.h6
-rw-r--r--drivers/net/wireless/realtek/rtw88/regd.c753
-rw-r--r--drivers/net/wireless/realtek/rtw88/regd.h8
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8821c.c19
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.c46
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822b.h8
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.c47
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8822c.h3
-rw-r--r--drivers/net/wireless/realtek/rtw89/Kconfig50
-rw-r--r--drivers/net/wireless/realtek/rtw89/Makefile25
-rw-r--r--drivers/net/wireless/realtek/rtw89/cam.c695
-rw-r--r--drivers/net/wireless/realtek/rtw89/cam.h165
-rw-r--r--drivers/net/wireless/realtek/rtw89/coex.c5716
-rw-r--r--drivers/net/wireless/realtek/rtw89/coex.h181
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.c2502
-rw-r--r--drivers/net/wireless/realtek/rtw89/core.h3384
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.c2489
-rw-r--r--drivers/net/wireless/realtek/rtw89/debug.h77
-rw-r--r--drivers/net/wireless/realtek/rtw89/efuse.c188
-rw-r--r--drivers/net/wireless/realtek/rtw89/efuse.h13
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.c1641
-rw-r--r--drivers/net/wireless/realtek/rtw89/fw.h1378
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.c3836
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.h860
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac80211.c676
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.c3060
-rw-r--r--drivers/net/wireless/realtek/rtw89/pci.h630
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.c2868
-rw-r--r--drivers/net/wireless/realtek/rtw89/phy.h311
-rw-r--r--drivers/net/wireless/realtek/rtw89/ps.c150
-rw-r--r--drivers/net/wireless/realtek/rtw89/ps.h16
-rw-r--r--drivers/net/wireless/realtek/rtw89/reg.h2159
-rw-r--r--drivers/net/wireless/realtek/rtw89/regd.c353
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a.c2036
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a.h109
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c3911
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.h24
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a_rfk_table.c1607
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a_rfk_table.h133
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a_table.c48725
-rw-r--r--drivers/net/wireless/realtek/rtw89/rtw8852a_table.h28
-rw-r--r--drivers/net/wireless/realtek/rtw89/sar.c190
-rw-r--r--drivers/net/wireless/realtek/rtw89/sar.h26
-rw-r--r--drivers/net/wireless/realtek/rtw89/ser.c491
-rw-r--r--drivers/net/wireless/realtek/rtw89/ser.h15
-rw-r--r--drivers/net/wireless/realtek/rtw89/txrx.h358
-rw-r--r--drivers/net/wireless/realtek/rtw89/util.h17
-rw-r--r--drivers/net/wireless/rndis_wlan.c2
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_core.c2
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_hal.c10
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mac80211.c74
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_main.c17
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_mgmt.c24
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio.c5
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_usb.c7
-rw-r--r--drivers/net/wireless/rsi/rsi_hal.h11
-rw-r--r--drivers/net/wireless/rsi/rsi_main.h15
-rw-r--r--drivers/net/wireless/st/cw1200/bh.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/spi.c9
-rw-r--r--drivers/net/wireless/wl3501_cs.c3
-rw-r--r--drivers/net/wireless/zydas/zd1201.c9
-rw-r--r--drivers/net/wireless/zydas/zd1211rw/zd_usb.c1
-rw-r--r--drivers/net/wwan/Kconfig1
-rw-r--r--drivers/net/wwan/iosm/Makefile5
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.c6
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.h1
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_coredump.c125
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_coredump.h59
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_devlink.c321
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_devlink.h205
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_flash.c594
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_flash.h229
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem.c107
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem.h18
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem_ops.c317
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem_ops.h49
-rw-r--r--drivers/net/xen-netback/interface.c6
-rw-r--r--drivers/net/xen-netback/netback.c2
-rw-r--r--drivers/net/xen-netfront.c12
-rw-r--r--drivers/nfc/fdp/i2c.c1
-rw-r--r--drivers/nfc/microread/i2c.c4
-rw-r--r--drivers/nfc/microread/mei.c6
-rw-r--r--drivers/nfc/nfcmrvl/fw_dnld.c4
-rw-r--r--drivers/nfc/pn533/i2c.c6
-rw-r--r--drivers/nfc/pn533/pn533.c6
-rw-r--r--drivers/nfc/pn533/pn533.h4
-rw-r--r--drivers/nfc/pn533/uart.c4
-rw-r--r--drivers/nfc/pn533/usb.c2
-rw-r--r--drivers/nfc/pn544/mei.c8
-rw-r--r--drivers/nfc/port100.c4
-rw-r--r--drivers/nfc/s3fwrn5/firmware.c29
-rw-r--r--drivers/nfc/s3fwrn5/nci.c18
-rw-r--r--drivers/nfc/st-nci/i2c.c4
-rw-r--r--drivers/nfc/st-nci/ndlc.c4
-rw-r--r--drivers/nfc/st-nci/se.c6
-rw-r--r--drivers/nfc/st-nci/spi.c4
-rw-r--r--drivers/nfc/st21nfca/i2c.c4
-rw-r--r--drivers/nfc/st21nfca/se.c4
-rw-r--r--drivers/nfc/st95hf/core.c6
-rw-r--r--drivers/nfc/trf7970a.c8
-rw-r--r--drivers/nvdimm/blk.c5
-rw-r--r--drivers/nvdimm/btt.c5
-rw-r--r--drivers/nvdimm/core.c1
-rw-r--r--drivers/nvdimm/pmem.c36
-rw-r--r--drivers/nvme/host/core.c140
-rw-r--r--drivers/nvme/host/fabrics.c6
-rw-r--r--drivers/nvme/host/fabrics.h8
-rw-r--r--drivers/nvme/host/fc.c34
-rw-r--r--drivers/nvme/host/multipath.c54
-rw-r--r--drivers/nvme/host/nvme.h19
-rw-r--r--drivers/nvme/host/pci.c58
-rw-r--r--drivers/nvme/host/rdma.c28
-rw-r--r--drivers/nvme/host/tcp.c29
-rw-r--r--drivers/nvme/host/zns.c2
-rw-r--r--drivers/nvme/target/admin-cmd.c18
-rw-r--r--drivers/nvme/target/configfs.c41
-rw-r--r--drivers/nvme/target/core.c18
-rw-r--r--drivers/nvme/target/discovery.c19
-rw-r--r--drivers/nvme/target/fabrics-cmd.c3
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c5
-rw-r--r--drivers/nvme/target/io-cmd-file.c4
-rw-r--r--drivers/nvme/target/loop.c6
-rw-r--r--drivers/nvme/target/nvmet.h6
-rw-r--r--drivers/nvme/target/rdma.c31
-rw-r--r--drivers/nvme/target/tcp.c23
-rw-r--r--drivers/of/Kconfig4
-rw-r--r--drivers/of/Makefile1
-rw-r--r--drivers/of/base.c22
-rw-r--r--drivers/of/fdt.c52
-rw-r--r--drivers/of/kobj.c4
-rw-r--r--drivers/of/of_net.c145
-rw-r--r--drivers/of/of_numa.c2
-rw-r--r--drivers/of/of_private.h10
-rw-r--r--drivers/of/of_reserved_mem.c2
-rw-r--r--drivers/of/platform.c7
-rw-r--r--drivers/of/unittest-data/Makefile8
-rw-r--r--drivers/of/unittest-data/tests-interrupts.dtsi19
-rw-r--r--drivers/of/unittest.c24
-rw-r--r--drivers/pci/pci-acpi.c74
-rw-r--r--drivers/pci/pci-mid.c37
-rw-r--r--drivers/pci/pci.c156
-rw-r--r--drivers/pci/pci.h96
-rw-r--r--drivers/pcmcia/db1xxx_ss.c1
-rw-r--r--drivers/pcmcia/pcmcia_cis.c5
-rw-r--r--drivers/perf/Kconfig12
-rw-r--r--drivers/perf/hisilicon/hisi_uncore_pa_pmu.c2
-rw-r--r--drivers/perf/qcom_l2_pmu.c7
-rw-r--r--drivers/perf/thunderx2_pmu.c2
-rw-r--r--drivers/phy/broadcom/phy-bcm-ns-usb3.c2
-rw-r--r--drivers/phy/broadcom/phy-bcm-ns2-pcie.c6
-rw-r--r--drivers/pinctrl/bcm/pinctrl-ns.c29
-rw-r--r--drivers/pinctrl/pinctrl-amd.c31
-rw-r--r--drivers/pinctrl/stm32/pinctrl-stm32.c4
-rw-r--r--drivers/platform/mellanox/Kconfig12
-rw-r--r--drivers/platform/mellanox/Makefile1
-rw-r--r--drivers/platform/mellanox/mlxreg-hotplug.c123
-rw-r--r--drivers/platform/mellanox/mlxreg-io.c2
-rw-r--r--drivers/platform/mellanox/mlxreg-lc.c906
-rw-r--r--drivers/platform/surface/surface3-wmi.c9
-rw-r--r--drivers/platform/surface/surface3_power.c3
-rw-r--r--drivers/platform/surface/surface_aggregator_registry.c66
-rw-r--r--drivers/platform/surface/surface_gpe.c13
-rw-r--r--drivers/platform/x86/Kconfig29
-rw-r--r--drivers/platform/x86/Makefile4
-rw-r--r--drivers/platform/x86/acer-wmi.c14
-rw-r--r--drivers/platform/x86/amd-pmc.c152
-rw-r--r--drivers/platform/x86/asus-wmi.c12
-rw-r--r--drivers/platform/x86/barco-p50-gpio.c436
-rw-r--r--drivers/platform/x86/dell/dell-wmi-base.c76
-rw-r--r--drivers/platform/x86/hp-wmi.c337
-rw-r--r--drivers/platform/x86/i2c-multi-instantiate.c31
-rw-r--r--drivers/platform/x86/ideapad-laptop.c35
-rw-r--r--drivers/platform/x86/intel/Kconfig16
-rw-r--r--drivers/platform/x86/intel/Makefile1
-rw-r--r--drivers/platform/x86/intel/int0002_vgpio.c14
-rw-r--r--drivers/platform/x86/intel/ishtp_eclite.c701
-rw-r--r--drivers/platform/x86/lg-laptop.c11
-rw-r--r--drivers/platform/x86/mlx-platform.c1958
-rw-r--r--drivers/platform/x86/nvidia-wmi-ec-backlight.c213
-rw-r--r--drivers/platform/x86/panasonic-laptop.c18
-rw-r--r--drivers/platform/x86/sony-laptop.c46
-rw-r--r--drivers/platform/x86/system76_acpi.c427
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c195
-rw-r--r--drivers/platform/x86/touchscreen_dmi.c25
-rw-r--r--drivers/platform/x86/wmi.c375
-rw-r--r--drivers/pnp/system.c2
-rw-r--r--drivers/powercap/dtpm.c78
-rw-r--r--drivers/powercap/dtpm_cpu.c228
-rw-r--r--drivers/ptp/idt8a340_reg.h720
-rw-r--r--drivers/ptp/ptp_clock.c16
-rw-r--r--drivers/ptp/ptp_clockmatrix.c1452
-rw-r--r--drivers/ptp/ptp_clockmatrix.h109
-rw-r--r--drivers/ptp/ptp_kvm_x86.c4
-rw-r--r--drivers/ptp/ptp_ocp.c1316
-rw-r--r--drivers/regulator/Kconfig15
-rw-r--r--drivers/regulator/Makefile1
-rw-r--r--drivers/regulator/bd71815-regulator.c4
-rw-r--r--drivers/regulator/core.c14
-rw-r--r--drivers/regulator/dummy.c3
-rw-r--r--drivers/regulator/lp872x.c52
-rw-r--r--drivers/regulator/max8973-regulator.c4
-rw-r--r--drivers/regulator/pwm-regulator.c12
-rw-r--r--drivers/regulator/qcom-rpmh-regulator.c32
-rw-r--r--drivers/regulator/qcom_smd-regulator.c49
-rw-r--r--drivers/regulator/rtq6752-regulator.c18
-rw-r--r--drivers/regulator/s5m8767.c21
-rw-r--r--drivers/regulator/sy7636a-regulator.c2
-rw-r--r--drivers/regulator/ti-abb-regulator.c31
-rw-r--r--drivers/regulator/tps62360-regulator.c59
-rw-r--r--drivers/regulator/tps80031-regulator.c753
-rw-r--r--drivers/regulator/uniphier-regulator.c4
-rw-r--r--drivers/regulator/vqmmc-ipq4019-regulator.c4
-rw-r--r--drivers/reset/Kconfig8
-rw-r--r--drivers/reset/reset-brcmstb-rescal.c2
-rw-r--r--drivers/reset/reset-microchip-sparx5.c40
-rw-r--r--drivers/reset/reset-socfpga.c26
-rw-r--r--drivers/reset/reset-uniphier-glue.c4
-rw-r--r--drivers/reset/reset-uniphier.c27
-rw-r--r--drivers/reset/tegra/reset-bpmp.c9
-rw-r--r--drivers/rtc/Kconfig10
-rw-r--r--drivers/s390/block/dasd.c9
-rw-r--r--drivers/s390/block/dasd_3990_erp.c6
-rw-r--r--drivers/s390/block/dasd_eckd.c294
-rw-r--r--drivers/s390/block/dasd_eckd.h13
-rw-r--r--drivers/s390/block/dasd_erp.c8
-rw-r--r--drivers/s390/block/dasd_genhd.c1
-rw-r--r--drivers/s390/block/dasd_int.h11
-rw-r--r--drivers/s390/block/dasd_ioctl.c4
-rw-r--r--drivers/s390/block/dcssblk.c7
-rw-r--r--drivers/s390/cio/qdio_setup.c34
-rw-r--r--drivers/s390/cio/vfio_ccw_drv.c136
-rw-r--r--drivers/s390/cio/vfio_ccw_ops.c142
-rw-r--r--drivers/s390/cio/vfio_ccw_private.h5
-rw-r--r--drivers/s390/crypto/vfio_ap_ops.c2
-rw-r--r--drivers/s390/net/ctcm_fsms.c60
-rw-r--r--drivers/s390/net/ctcm_main.c38
-rw-r--r--drivers/s390/net/ctcm_mpc.c8
-rw-r--r--drivers/s390/net/fsm.c2
-rw-r--r--drivers/s390/net/ism_drv.c2
-rw-r--r--drivers/s390/net/lcs.c123
-rw-r--r--drivers/s390/net/netiucv.c104
-rw-r--r--drivers/s390/net/qeth_core.h4
-rw-r--r--drivers/s390/net/qeth_core_main.c63
-rw-r--r--drivers/s390/net/qeth_l2_main.c33
-rw-r--r--drivers/s390/net/qeth_l3_main.c15
-rw-r--r--drivers/scsi/aic94xx/aic94xx_sds.c6
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c1
-rw-r--r--drivers/scsi/hosts.c3
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c3
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c3
-rw-r--r--drivers/scsi/lpfc/lpfc.h1
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_os.c2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c9
-rw-r--r--drivers/scsi/qedf/drv_fcoe_fw_funcs.c8
-rw-r--r--drivers/scsi/qedf/drv_fcoe_fw_funcs.h2
-rw-r--r--drivers/scsi/qedf/qedf.h4
-rw-r--r--drivers/scsi/qedf/qedf_els.c2
-rw-r--r--drivers/scsi/qedf/qedf_io.c12
-rw-r--r--drivers/scsi/qedf/qedf_main.c10
-rw-r--r--drivers/scsi/qedi/qedi_debugfs.c4
-rw-r--r--drivers/scsi/qedi/qedi_fw.c40
-rw-r--r--drivers/scsi/qedi/qedi_fw_api.c22
-rw-r--r--drivers/scsi/qedi/qedi_fw_iscsi.h2
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.h2
-rw-r--r--drivers/scsi/qedi/qedi_main.c11
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c15
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c14
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h4
-rw-r--r--drivers/scsi/scsi.c4
-rw-r--r--drivers/scsi/scsi_bsg.c6
-rw-r--r--drivers/scsi/scsi_debug.c10
-rw-r--r--drivers/scsi/scsi_error.c4
-rw-r--r--drivers/scsi/scsi_ioctl.c8
-rw-r--r--drivers/scsi/scsi_lib.c32
-rw-r--r--drivers/scsi/scsi_scan.c1
-rw-r--r--drivers/scsi/scsi_sysfs.c9
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c2
-rw-r--r--drivers/scsi/sd.c128
-rw-r--r--drivers/scsi/sd.h1
-rw-r--r--drivers/scsi/sd_dif.c2
-rw-r--r--drivers/scsi/sg.c11
-rw-r--r--drivers/scsi/sr.c5
-rw-r--r--drivers/scsi/st.c7
-rw-r--r--drivers/scsi/storvsc_drv.c32
-rw-r--r--drivers/scsi/ufs/ufs-exynos.c6
-rw-r--r--drivers/scsi/ufs/ufshcd-crypto.c32
-rw-r--r--drivers/scsi/ufs/ufshcd-crypto.h9
-rw-r--r--drivers/scsi/ufs/ufshcd-pci.c33
-rw-r--r--drivers/scsi/ufs/ufshcd.c29
-rw-r--r--drivers/scsi/ufs/ufshcd.h6
-rw-r--r--drivers/scsi/ufs/ufshpb.c287
-rw-r--r--drivers/scsi/ufs/ufshpb.h2
-rw-r--r--drivers/scsi/virtio_scsi.c2
-rw-r--r--drivers/soc/amlogic/meson-canvas.c4
-rw-r--r--drivers/soc/amlogic/meson-clk-measure.c4
-rw-r--r--drivers/soc/amlogic/meson-gx-socinfo.c1
-rw-r--r--drivers/soc/aspeed/Kconfig10
-rw-r--r--drivers/soc/aspeed/Makefile9
-rw-r--r--drivers/soc/aspeed/aspeed-uart-routing.c603
-rw-r--r--drivers/soc/bcm/bcm63xx/bcm-pmb.c4
-rw-r--r--drivers/soc/bcm/bcm63xx/bcm63xx-power.c4
-rw-r--r--drivers/soc/bcm/brcmstb/biuctrl.c2
-rw-r--r--drivers/soc/fsl/Kconfig1
-rw-r--r--drivers/soc/fsl/dpio/dpio-cmd.h3
-rw-r--r--drivers/soc/fsl/dpio/dpio-driver.c1
-rw-r--r--drivers/soc/fsl/dpio/dpio-service.c119
-rw-r--r--drivers/soc/fsl/dpio/dpio.c1
-rw-r--r--drivers/soc/fsl/dpio/dpio.h2
-rw-r--r--drivers/soc/fsl/dpio/qbman-portal.c66
-rw-r--r--drivers/soc/fsl/dpio/qbman-portal.h13
-rw-r--r--drivers/soc/fsl/guts.c4
-rw-r--r--drivers/soc/fsl/rcpm.c7
-rw-r--r--drivers/soc/imx/Kconfig1
-rw-r--r--drivers/soc/imx/Makefile1
-rw-r--r--drivers/soc/imx/gpcv2.c134
-rw-r--r--drivers/soc/imx/imx8m-blk-ctrl.c523
-rw-r--r--drivers/soc/mediatek/mt8192-mmsys.h76
-rw-r--r--drivers/soc/mediatek/mtk-mmsys.c79
-rw-r--r--drivers/soc/mediatek/mtk-mmsys.h2
-rw-r--r--drivers/soc/mediatek/mtk-mutex.c35
-rw-r--r--drivers/soc/qcom/Kconfig21
-rw-r--r--drivers/soc/qcom/Makefile2
-rw-r--r--drivers/soc/qcom/apr.c289
-rw-r--r--drivers/soc/qcom/cpr.c4
-rw-r--r--drivers/soc/qcom/llcc-qcom.c18
-rw-r--r--drivers/soc/qcom/ocmem.c4
-rw-r--r--drivers/soc/qcom/pdr_interface.c12
-rw-r--r--drivers/soc/qcom/qcom-geni-se.c4
-rw-r--r--drivers/soc/qcom/qcom_aoss.c165
-rw-r--r--drivers/soc/qcom/qcom_gsbi.c4
-rw-r--r--drivers/soc/qcom/qcom_stats.c277
-rw-r--r--drivers/soc/qcom/rpmh-rsc.c4
-rw-r--r--drivers/soc/qcom/rpmhpd.c36
-rw-r--r--drivers/soc/qcom/rpmpd.c24
-rw-r--r--drivers/soc/qcom/smd-rpm.c2
-rw-r--r--drivers/soc/qcom/smem.c57
-rw-r--r--drivers/soc/qcom/smp2p.c154
-rw-r--r--drivers/soc/qcom/socinfo.c18
-rw-r--r--drivers/soc/qcom/spm.c279
-rw-r--r--drivers/soc/renesas/Kconfig7
-rw-r--r--drivers/soc/renesas/renesas-soc.c7
-rw-r--r--drivers/soc/samsung/Kconfig5
-rw-r--r--drivers/soc/samsung/Makefile3
-rw-r--r--drivers/soc/samsung/exynos-chipid.c94
-rw-r--r--drivers/soc/samsung/exynos5422-asv.c1
-rw-r--r--drivers/soc/samsung/pm_domains.c1
-rw-r--r--drivers/soc/sunxi/sunxi_sram.c4
-rw-r--r--drivers/soc/tegra/Makefile1
-rw-r--r--drivers/soc/tegra/ari-tegra186.c80
-rw-r--r--drivers/soc/tegra/pmc.c28
-rw-r--r--drivers/spi/Kconfig26
-rw-r--r--drivers/spi/Makefile2
-rw-r--r--drivers/spi/atmel-quadspi.c2
-rw-r--r--drivers/spi/spi-altera-dfl.c2
-rw-r--r--drivers/spi/spi-altera-platform.c2
-rw-r--r--drivers/spi/spi-amd.c113
-rw-r--r--drivers/spi/spi-at91-usart.c27
-rw-r--r--drivers/spi/spi-bcm-qspi.c193
-rw-r--r--drivers/spi/spi-cadence-quadspi.c214
-rw-r--r--drivers/spi/spi-cadence-xspi.c642
-rw-r--r--drivers/spi/spi-fsi.c121
-rw-r--r--drivers/spi/spi-geni-qcom.c254
-rw-r--r--drivers/spi/spi-ingenic.c482
-rw-r--r--drivers/spi/spi-mtk-nor.c2
-rw-r--r--drivers/spi/spi-orion.c1
-rw-r--r--drivers/spi/spi-pl022.c5
-rw-r--r--drivers/spi/spi-rpc-if.c4
-rw-r--r--drivers/spi/spi-rspi.c1
-rw-r--r--drivers/spi/spi-sh-msiof.c1
-rw-r--r--drivers/spi/spi-stm32-qspi.c2
-rw-r--r--drivers/spi/spi-tegra20-slink.c6
-rw-r--r--drivers/spi/spi-tegra210-quad.c4
-rw-r--r--drivers/spi/spi-tle62x0.c2
-rw-r--r--drivers/spi/spi.c278
-rw-r--r--drivers/staging/axis-fifo/axis-fifo.c88
-rw-r--r--drivers/staging/fbtft/fbtft-core.c11
-rw-r--r--drivers/staging/fbtft/fbtft.h8
-rw-r--r--drivers/staging/fieldbus/anybuss/host.c8
-rw-r--r--drivers/staging/gdm724x/gdm_lte.c4
-rw-r--r--drivers/staging/ks7010/Kconfig3
-rw-r--r--drivers/staging/ks7010/ks_hostif.c2
-rw-r--r--drivers/staging/ks7010/ks_wlan_net.c4
-rw-r--r--drivers/staging/media/atomisp/i2c/atomisp-lm3554.c37
-rw-r--r--drivers/staging/media/atomisp/i2c/ov5693/atomisp-ov5693.c2
-rw-r--r--drivers/staging/media/atomisp/pci/atomisp_csi2.c70
-rw-r--r--drivers/staging/media/hantro/hantro_drv.c12
-rw-r--r--drivers/staging/media/hantro/hantro_g1_h264_dec.c2
-rw-r--r--drivers/staging/media/hantro/hantro_g1_regs.h2
-rw-r--r--drivers/staging/media/hantro/hantro_g1_vp8_dec.c3
-rw-r--r--drivers/staging/media/hantro/hantro_g2_hevc_dec.c52
-rw-r--r--drivers/staging/media/hantro/hantro_hevc.c21
-rw-r--r--drivers/staging/media/hantro/hantro_hw.h4
-rw-r--r--drivers/staging/media/imx/TODO5
-rw-r--r--drivers/staging/media/imx/imx-media-csi.c23
-rw-r--r--drivers/staging/media/imx/imx-media-dev-common.c9
-rw-r--r--drivers/staging/media/imx/imx-media-dev.c6
-rw-r--r--drivers/staging/media/imx/imx-media-of.c6
-rw-r--r--drivers/staging/media/imx/imx6-mipi-csi2.c17
-rw-r--r--drivers/staging/media/imx/imx7-media-csi.c24
-rw-r--r--drivers/staging/media/imx/imx7-mipi-csis.c16
-rw-r--r--drivers/staging/media/imx/imx8mq-mipi-csi2.c16
-rw-r--r--drivers/staging/media/ipu3/include/uapi/intel-ipu3.h7
-rw-r--r--drivers/staging/media/ipu3/ipu3-css-fw.c7
-rw-r--r--drivers/staging/media/ipu3/ipu3-css-fw.h2
-rw-r--r--drivers/staging/media/ipu3/ipu3-css.c19
-rw-r--r--drivers/staging/media/ipu3/ipu3-css.h1
-rw-r--r--drivers/staging/media/ipu3/ipu3-v4l2.c13
-rw-r--r--drivers/staging/media/ipu3/ipu3.h12
-rw-r--r--drivers/staging/media/meson/vdec/esparser.h6
-rw-r--r--drivers/staging/media/meson/vdec/vdec.c7
-rw-r--r--drivers/staging/media/meson/vdec/vdec.h16
-rw-r--r--drivers/staging/media/meson/vdec/vdec_helpers.h3
-rw-r--r--drivers/staging/media/rkvdec/rkvdec-h264.c5
-rw-r--r--drivers/staging/media/rkvdec/rkvdec.c44
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.c56
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus.h2
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_dec.c2
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_h264.c113
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_h265.c100
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_hw.c2
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_regs.h2
-rw-r--r--drivers/staging/media/sunxi/cedrus/cedrus_video.c7
-rw-r--r--drivers/staging/media/tegra-video/vi.c17
-rw-r--r--drivers/staging/most/dim2/Makefile2
-rw-r--r--drivers/staging/most/dim2/dim2.c115
-rw-r--r--drivers/staging/most/dim2/sysfs.c49
-rw-r--r--drivers/staging/most/dim2/sysfs.h11
-rw-r--r--drivers/staging/most/net/net.c2
-rw-r--r--drivers/staging/mt7621-dma/hsdma-mt7621.c6
-rw-r--r--drivers/staging/mt7621-dts/gbpc1.dts3
-rw-r--r--drivers/staging/mt7621-dts/gbpc2.dts1
-rw-r--r--drivers/staging/mt7621-dts/mt7621.dtsi74
-rw-r--r--drivers/staging/mt7621-pci/pci-mt7621.c2
-rw-r--r--drivers/staging/octeon/ethernet.c4
-rw-r--r--drivers/staging/pi433/pi433_if.c18
-rw-r--r--drivers/staging/pi433/pi433_if.h23
-rw-r--r--drivers/staging/qlge/qlge_main.c30
-rw-r--r--drivers/staging/qlge/qlge_mpi.c2
-rw-r--r--drivers/staging/r8188eu/Kconfig10
-rw-r--r--drivers/staging/r8188eu/Makefile155
-rw-r--r--drivers/staging/r8188eu/core/rtw_ap.c607
-rw-r--r--drivers/staging/r8188eu/core/rtw_br_ext.c3
-rw-r--r--drivers/staging/r8188eu/core/rtw_cmd.c618
-rw-r--r--drivers/staging/r8188eu/core/rtw_debug.c904
-rw-r--r--drivers/staging/r8188eu/core/rtw_efuse.c582
-rw-r--r--drivers/staging/r8188eu/core/rtw_ieee80211.c339
-rw-r--r--drivers/staging/r8188eu/core/rtw_io.c299
-rw-r--r--drivers/staging/r8188eu/core/rtw_ioctl_set.c397
-rw-r--r--drivers/staging/r8188eu/core/rtw_iol.c34
-rw-r--r--drivers/staging/r8188eu/core/rtw_led.c1189
-rw-r--r--drivers/staging/r8188eu/core/rtw_mlme.c126
-rw-r--r--drivers/staging/r8188eu/core/rtw_mlme_ext.c386
-rw-r--r--drivers/staging/r8188eu/core/rtw_mp.c935
-rw-r--r--drivers/staging/r8188eu/core/rtw_mp_ioctl.c1170
-rw-r--r--drivers/staging/r8188eu/core/rtw_p2p.c43
-rw-r--r--drivers/staging/r8188eu/core/rtw_pwrctrl.c140
-rw-r--r--drivers/staging/r8188eu/core/rtw_recv.c116
-rw-r--r--drivers/staging/r8188eu/core/rtw_rf.c17
-rw-r--r--drivers/staging/r8188eu/core/rtw_security.c197
-rw-r--r--drivers/staging/r8188eu/core/rtw_sreset.c62
-rw-r--r--drivers/staging/r8188eu/core/rtw_sta_mgt.c34
-rw-r--r--drivers/staging/r8188eu/core/rtw_wlan_util.c157
-rw-r--r--drivers/staging/r8188eu/core/rtw_xmit.c121
-rw-r--r--drivers/staging/r8188eu/hal/Hal8188ERateAdaptive.c22
-rw-r--r--drivers/staging/r8188eu/hal/HalHWImg8188E_BB.c32
-rw-r--r--drivers/staging/r8188eu/hal/HalHWImg8188E_MAC.c10
-rw-r--r--drivers/staging/r8188eu/hal/HalHWImg8188E_RF.c15
-rw-r--r--drivers/staging/r8188eu/hal/HalPhyRf_8188e.c171
-rw-r--r--drivers/staging/r8188eu/hal/hal_com.c26
-rw-r--r--drivers/staging/r8188eu/hal/hal_intf.c391
-rw-r--r--drivers/staging/r8188eu/hal/odm.c1188
-rw-r--r--drivers/staging/r8188eu/hal/odm_HWConfig.c393
-rw-r--r--drivers/staging/r8188eu/hal/odm_RTL8188E.c31
-rw-r--r--drivers/staging/r8188eu/hal/odm_RegConfig8188E.c8
-rw-r--r--drivers/staging/r8188eu/hal/odm_interface.c85
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188e_cmd.c48
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188e_dm.c93
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188e_hal_init.c310
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188e_mp.c798
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188e_phycfg.c215
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188e_rf6052.c226
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188e_rxdesc.c2
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188e_sreset.c27
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188eu_recv.c4
-rw-r--r--drivers/staging/r8188eu/hal/rtl8188eu_xmit.c60
-rw-r--r--drivers/staging/r8188eu/hal/usb_halinit.c328
-rw-r--r--drivers/staging/r8188eu/hal/usb_ops_linux.c256
-rw-r--r--drivers/staging/r8188eu/include/Hal8188EPhyCfg.h91
-rw-r--r--drivers/staging/r8188eu/include/Hal8188ERateAdaptive.h2
-rw-r--r--drivers/staging/r8188eu/include/HalHWImg8188E_FW.h16
-rw-r--r--drivers/staging/r8188eu/include/HalVerDef.h70
-rw-r--r--drivers/staging/r8188eu/include/drv_types.h37
-rw-r--r--drivers/staging/r8188eu/include/hal_intf.h312
-rw-r--r--drivers/staging/r8188eu/include/ieee80211.h77
-rw-r--r--drivers/staging/r8188eu/include/ioctl_cfg80211.h2
-rw-r--r--drivers/staging/r8188eu/include/mp_custom_oid.h333
-rw-r--r--drivers/staging/r8188eu/include/odm.h457
-rw-r--r--drivers/staging/r8188eu/include/odm_HWConfig.h11
-rw-r--r--drivers/staging/r8188eu/include/odm_RTL8188E.h2
-rw-r--r--drivers/staging/r8188eu/include/odm_RegConfig8188E.h3
-rw-r--r--drivers/staging/r8188eu/include/odm_RegDefine11AC.h29
-rw-r--r--drivers/staging/r8188eu/include/odm_RegDefine11N.h112
-rw-r--r--drivers/staging/r8188eu/include/odm_interface.h88
-rw-r--r--drivers/staging/r8188eu/include/odm_precomp.h22
-rw-r--r--drivers/staging/r8188eu/include/odm_reg.h89
-rw-r--r--drivers/staging/r8188eu/include/odm_types.h24
-rw-r--r--drivers/staging/r8188eu/include/osdep_intf.h5
-rw-r--r--drivers/staging/r8188eu/include/osdep_service.h42
-rw-r--r--drivers/staging/r8188eu/include/recv_osdep.h2
-rw-r--r--drivers/staging/r8188eu/include/rtl8188e_cmd.h16
-rw-r--r--drivers/staging/r8188eu/include/rtl8188e_dm.h13
-rw-r--r--drivers/staging/r8188eu/include/rtl8188e_hal.h102
-rw-r--r--drivers/staging/r8188eu/include/rtl8188e_led.h2
-rw-r--r--drivers/staging/r8188eu/include/rtl8188e_recv.h2
-rw-r--r--drivers/staging/r8188eu/include/rtl8188e_rf.h1
-rw-r--r--drivers/staging/r8188eu/include/rtl8188e_spec.h4
-rw-r--r--drivers/staging/r8188eu/include/rtl8188e_sreset.h2
-rw-r--r--drivers/staging/r8188eu/include/rtw_ap.h11
-rw-r--r--drivers/staging/r8188eu/include/rtw_br_ext.h3
-rw-r--r--drivers/staging/r8188eu/include/rtw_cmd.h27
-rw-r--r--drivers/staging/r8188eu/include/rtw_debug.h156
-rw-r--r--drivers/staging/r8188eu/include/rtw_eeprom.h57
-rw-r--r--drivers/staging/r8188eu/include/rtw_efuse.h21
-rw-r--r--drivers/staging/r8188eu/include/rtw_io.h87
-rw-r--r--drivers/staging/r8188eu/include/rtw_ioctl_rtl.h63
-rw-r--r--drivers/staging/r8188eu/include/rtw_ioctl_set.h8
-rw-r--r--drivers/staging/r8188eu/include/rtw_iol.h5
-rw-r--r--drivers/staging/r8188eu/include/rtw_led.h20
-rw-r--r--drivers/staging/r8188eu/include/rtw_mlme.h11
-rw-r--r--drivers/staging/r8188eu/include/rtw_mlme_ext.h14
-rw-r--r--drivers/staging/r8188eu/include/rtw_mp.h474
-rw-r--r--drivers/staging/r8188eu/include/rtw_mp_ioctl.h242
-rw-r--r--drivers/staging/r8188eu/include/rtw_mp_phy_regdef.h1063
-rw-r--r--drivers/staging/r8188eu/include/rtw_p2p.h1
-rw-r--r--drivers/staging/r8188eu/include/rtw_pwrctrl.h130
-rw-r--r--drivers/staging/r8188eu/include/rtw_recv.h6
-rw-r--r--drivers/staging/r8188eu/include/rtw_rf.h12
-rw-r--r--drivers/staging/r8188eu/include/rtw_security.h20
-rw-r--r--drivers/staging/r8188eu/include/rtw_sreset.h34
-rw-r--r--drivers/staging/r8188eu/include/rtw_xmit.h6
-rw-r--r--drivers/staging/r8188eu/include/sta_info.h7
-rw-r--r--drivers/staging/r8188eu/include/usb_ops.h5
-rw-r--r--drivers/staging/r8188eu/include/usb_ops_linux.h8
-rw-r--r--drivers/staging/r8188eu/include/usb_osintf.h5
-rw-r--r--drivers/staging/r8188eu/include/wifi.h52
-rw-r--r--drivers/staging/r8188eu/include/xmit_osdep.h2
-rw-r--r--drivers/staging/r8188eu/os_dep/ioctl_linux.c2247
-rw-r--r--drivers/staging/r8188eu/os_dep/mlme_linux.c6
-rw-r--r--drivers/staging/r8188eu/os_dep/os_intfs.c399
-rw-r--r--drivers/staging/r8188eu/os_dep/osdep_service.c82
-rw-r--r--drivers/staging/r8188eu/os_dep/recv_linux.c14
-rw-r--r--drivers/staging/r8188eu/os_dep/usb_intf.c285
-rw-r--r--drivers/staging/r8188eu/os_dep/usb_ops_linux.c40
-rw-r--r--drivers/staging/r8188eu/os_dep/xmit_linux.c4
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c7
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_cam.c4
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.c2
-rw-r--r--drivers/staging/rtl8192e/rtl819x_BAProc.c9
-rw-r--r--drivers/staging/rtl8192u/r8192U.h3
-rw-r--r--drivers/staging/rtl8192u/r8192U_core.c36
-rw-r--r--drivers/staging/rtl8712/ieee80211.h4
-rw-r--r--drivers/staging/rtl8712/os_intfs.c9
-rw-r--r--drivers/staging/rtl8712/rtl871x_cmd.c2
-rw-r--r--drivers/staging/rtl8712/rtl871x_cmd.h2
-rw-r--r--drivers/staging/rtl8712/rtl871x_xmit.h10
-rw-r--r--drivers/staging/rtl8712/usb_intf.c6
-rw-r--r--drivers/staging/rtl8712/usb_ops_linux.c2
-rw-r--r--drivers/staging/rtl8723bs/Kconfig1
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ap.c23
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_cmd.c210
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ioctl_set.c4
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme.c24
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme_ext.c79
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_recv.c22
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_security.c6
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_sta_mgt.c48
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_xmit.c49
-rw-r--r--drivers/staging/rtl8723bs/hal/odm_DIG.c2
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c12
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c6
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c2
-rw-r--r--drivers/staging/rtl8723bs/include/ieee80211.h6
-rw-r--r--drivers/staging/rtl8723bs/include/osdep_service.h2
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c26
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_linux.c34
-rw-r--r--drivers/staging/rtl8723bs/os_dep/os_intfs.c8
-rw-r--r--drivers/staging/rtl8723bs/os_dep/osdep_service.c11
-rw-r--r--drivers/staging/rts5208/ms.c42
-rw-r--r--drivers/staging/rts5208/rtsx.c2
-rw-r--r--drivers/staging/rts5208/rtsx_card.c8
-rw-r--r--drivers/staging/rts5208/rtsx_chip.c16
-rw-r--r--drivers/staging/rts5208/rtsx_scsi.c106
-rw-r--r--drivers/staging/rts5208/rtsx_transport.c6
-rw-r--r--drivers/staging/rts5208/sd.c68
-rw-r--r--drivers/staging/rts5208/xd.c48
-rw-r--r--drivers/staging/unisys/visornic/visornic_main.c5
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c298
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h52
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c20
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.h4
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c771
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h107
-rw-r--r--drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c182
-rw-r--r--drivers/staging/vt6655/baseband.c74
-rw-r--r--drivers/staging/vt6655/baseband.h2
-rw-r--r--drivers/staging/vt6655/card.c98
-rw-r--r--drivers/staging/vt6655/channel.c12
-rw-r--r--drivers/staging/vt6655/device.h10
-rw-r--r--drivers/staging/vt6655/device_main.c162
-rw-r--r--drivers/staging/vt6655/dpc.c2
-rw-r--r--drivers/staging/vt6655/key.c2
-rw-r--r--drivers/staging/vt6655/mac.c50
-rw-r--r--drivers/staging/vt6655/mac.h6
-rw-r--r--drivers/staging/vt6655/power.c24
-rw-r--r--drivers/staging/vt6655/rf.c140
-rw-r--r--drivers/staging/vt6655/rf.h2
-rw-r--r--drivers/staging/vt6655/rxtx.c64
-rw-r--r--drivers/staging/wfx/bh.c37
-rw-r--r--drivers/staging/wfx/bh.h4
-rw-r--r--drivers/staging/wfx/bus_sdio.c25
-rw-r--r--drivers/staging/wfx/bus_spi.c22
-rw-r--r--drivers/staging/wfx/data_rx.c7
-rw-r--r--drivers/staging/wfx/data_rx.h4
-rw-r--r--drivers/staging/wfx/data_tx.c87
-rw-r--r--drivers/staging/wfx/data_tx.h6
-rw-r--r--drivers/staging/wfx/debug.c56
-rw-r--r--drivers/staging/wfx/debug.h2
-rw-r--r--drivers/staging/wfx/fwio.c26
-rw-r--r--drivers/staging/wfx/fwio.h2
-rw-r--r--drivers/staging/wfx/hif_api_cmd.h14
-rw-r--r--drivers/staging/wfx/hif_api_general.h25
-rw-r--r--drivers/staging/wfx/hif_api_mib.h85
-rw-r--r--drivers/staging/wfx/hif_rx.c23
-rw-r--r--drivers/staging/wfx/hif_rx.h3
-rw-r--r--drivers/staging/wfx/hif_tx.c60
-rw-r--r--drivers/staging/wfx/hif_tx.h6
-rw-r--r--drivers/staging/wfx/hif_tx_mib.c14
-rw-r--r--drivers/staging/wfx/hif_tx_mib.h2
-rw-r--r--drivers/staging/wfx/hwio.c6
-rw-r--r--drivers/staging/wfx/hwio.h20
-rw-r--r--drivers/staging/wfx/key.c30
-rw-r--r--drivers/staging/wfx/key.h4
-rw-r--r--drivers/staging/wfx/main.c37
-rw-r--r--drivers/staging/wfx/main.h3
-rw-r--r--drivers/staging/wfx/queue.c43
-rw-r--r--drivers/staging/wfx/queue.h6
-rw-r--r--drivers/staging/wfx/scan.c51
-rw-r--r--drivers/staging/wfx/scan.h4
-rw-r--r--drivers/staging/wfx/sta.c118
-rw-r--r--drivers/staging/wfx/sta.h8
-rw-r--r--drivers/staging/wfx/traces.h2
-rw-r--r--drivers/staging/wfx/wfx.h14
-rw-r--r--drivers/staging/wlan-ng/hfa384x.h2
-rw-r--r--drivers/staging/wlan-ng/hfa384x_usb.c24
-rw-r--r--drivers/staging/wlan-ng/p80211conv.c2
-rw-r--r--drivers/staging/wlan-ng/p80211conv.h2
-rw-r--r--drivers/staging/wlan-ng/p80211hdr.h2
-rw-r--r--drivers/staging/wlan-ng/p80211ioctl.h2
-rw-r--r--drivers/staging/wlan-ng/p80211mgmt.h2
-rw-r--r--drivers/staging/wlan-ng/p80211msg.h2
-rw-r--r--drivers/staging/wlan-ng/p80211netdev.c4
-rw-r--r--drivers/staging/wlan-ng/p80211netdev.h2
-rw-r--r--drivers/staging/wlan-ng/p80211req.c2
-rw-r--r--drivers/staging/wlan-ng/p80211req.h2
-rw-r--r--drivers/staging/wlan-ng/p80211types.h2
-rw-r--r--drivers/staging/wlan-ng/p80211wep.c2
-rw-r--r--drivers/staging/wlan-ng/prism2mgmt.c2
-rw-r--r--drivers/staging/wlan-ng/prism2mgmt.h2
-rw-r--r--drivers/staging/wlan-ng/prism2mib.c2
-rw-r--r--drivers/staging/wlan-ng/prism2sta.c6
-rw-r--r--drivers/staging/wlan-ng/prism2usb.c3
-rw-r--r--drivers/target/target_core_file.c5
-rw-r--r--drivers/target/target_core_iblock.c6
-rw-r--r--drivers/target/target_core_pscsi.c7
-rw-r--r--drivers/tee/optee/Makefile5
-rw-r--r--drivers/tee/optee/call.c445
-rw-r--r--drivers/tee/optee/core.c719
-rw-r--r--drivers/tee/optee/ffa_abi.c911
-rw-r--r--drivers/tee/optee/optee_ffa.h153
-rw-r--r--drivers/tee/optee/optee_msg.h27
-rw-r--r--drivers/tee/optee/optee_private.h157
-rw-r--r--drivers/tee/optee/rpc.c237
-rw-r--r--drivers/tee/optee/shm_pool.c101
-rw-r--r--drivers/tee/optee/shm_pool.h14
-rw-r--r--drivers/tee/optee/smc_abi.c1362
-rw-r--r--drivers/thermal/gov_user_space.c9
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3400_thermal.c9
-rw-r--r--drivers/thermal/intel/int340x_thermal/int3401_thermal.c8
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device.c36
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device.h1
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c18
-rw-r--r--drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci_legacy.c8
-rw-r--r--drivers/thermal/intel/intel_powerclamp.c8
-rw-r--r--drivers/thermal/qcom/Kconfig2
-rw-r--r--drivers/thermal/qcom/qcom-spmi-adc-tm5.c41
-rw-r--r--drivers/thermal/qcom/tsens.c29
-rw-r--r--drivers/thermal/rcar_gen3_thermal.c113
-rw-r--r--drivers/thermal/rockchip_thermal.c2
-rw-r--r--drivers/thermal/thermal_core.c22
-rw-r--r--drivers/thermal/thermal_mmio.c2
-rw-r--r--drivers/thermal/thermal_netlink.c11
-rw-r--r--drivers/thermal/thermal_netlink.h8
-rw-r--r--drivers/thermal/thermal_sysfs.c3
-rw-r--r--drivers/thermal/uniphier_thermal.c4
-rw-r--r--drivers/thunderbolt/ctl.c2
-rw-r--r--drivers/thunderbolt/xdomain.c2
-rw-r--r--drivers/tty/sysrq.c2
-rw-r--r--drivers/uio/uio_hv_generic.c18
-rw-r--r--drivers/usb/atm/usbatm.c4
-rw-r--r--drivers/usb/chipidea/core.c23
-rw-r--r--drivers/usb/chipidea/udc.c8
-rw-r--r--drivers/usb/class/cdc-acm.c1
-rw-r--r--drivers/usb/class/cdc-wdm.c2
-rw-r--r--drivers/usb/core/config.c4
-rw-r--r--drivers/usb/core/devio.c144
-rw-r--r--drivers/usb/core/hcd.c6
-rw-r--r--drivers/usb/dwc2/core.h19
-rw-r--r--drivers/usb/dwc2/debugfs.c4
-rw-r--r--drivers/usb/dwc2/drd.c24
-rw-r--r--drivers/usb/dwc2/gadget.c1
-rw-r--r--drivers/usb/dwc2/hcd.c12
-rw-r--r--drivers/usb/dwc2/params.c75
-rw-r--r--drivers/usb/dwc3/Kconfig7
-rw-r--r--drivers/usb/dwc3/core.c29
-rw-r--r--drivers/usb/dwc3/core.h25
-rw-r--r--drivers/usb/dwc3/gadget.c14
-rw-r--r--drivers/usb/gadget/configfs.c26
-rw-r--r--drivers/usb/gadget/epautoconf.c2
-rw-r--r--drivers/usb/gadget/function/f_fs.c2
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.c97
-rw-r--r--drivers/usb/gadget/function/f_phonet.c5
-rw-r--r--drivers/usb/gadget/function/f_uac1.c1
-rw-r--r--drivers/usb/gadget/function/f_uac2.c24
-rw-r--r--drivers/usb/gadget/function/f_uvc.c8
-rw-r--r--drivers/usb/gadget/function/u_audio.c96
-rw-r--r--drivers/usb/gadget/function/u_audio.h10
-rw-r--r--drivers/usb/gadget/function/u_ether.c4
-rw-r--r--drivers/usb/gadget/function/u_uac2.h1
-rw-r--r--drivers/usb/gadget/function/uvc.h6
-rw-r--r--drivers/usb/gadget/function/uvc_queue.c2
-rw-r--r--drivers/usb/gadget/function/uvc_v4l2.c52
-rw-r--r--drivers/usb/gadget/function/uvc_video.c71
-rw-r--r--drivers/usb/gadget/function/uvc_video.h2
-rw-r--r--drivers/usb/gadget/legacy/hid.c4
-rw-r--r--drivers/usb/gadget/legacy/inode.c7
-rw-r--r--drivers/usb/gadget/udc/Kconfig1
-rw-r--r--drivers/usb/gadget/udc/amd5536udc.h1
-rw-r--r--drivers/usb/gadget/udc/core.c10
-rw-r--r--drivers/usb/gadget/udc/goku_udc.c6
-rw-r--r--drivers/usb/gadget/udc/pxa25x_udc.c2
-rw-r--r--drivers/usb/gadget/udc/snps_udc_plat.c5
-rw-r--r--drivers/usb/gadget/udc/udc-xilinx.c25
-rw-r--r--drivers/usb/host/Kconfig6
-rw-r--r--drivers/usb/host/ehci-atmel.c8
-rw-r--r--drivers/usb/host/ehci-hcd.c13
-rw-r--r--drivers/usb/host/ehci-hub.c11
-rw-r--r--drivers/usb/host/ehci-mem.c3
-rw-r--r--drivers/usb/host/ehci-mv.c2
-rw-r--r--drivers/usb/host/ehci-platform.c6
-rw-r--r--drivers/usb/host/ehci.h1
-rw-r--r--drivers/usb/host/fotg210-hcd.c5
-rw-r--r--drivers/usb/host/max3421-hcd.c25
-rw-r--r--drivers/usb/host/ohci-hcd.c3
-rw-r--r--drivers/usb/host/ohci-hub.c3
-rw-r--r--drivers/usb/host/ohci-tmio.c2
-rw-r--r--drivers/usb/host/oxu210hp-hcd.c2
-rw-r--r--drivers/usb/host/xhci-mtk-sch.c2
-rw-r--r--drivers/usb/host/xhci-mtk.c2
-rw-r--r--drivers/usb/host/xhci-pci.c16
-rw-r--r--drivers/usb/misc/iowarrior.c8
-rw-r--r--drivers/usb/mtu3/mtu3_plat.c2
-rw-r--r--drivers/usb/musb/Kconfig2
-rw-r--r--drivers/usb/musb/mediatek.c1
-rw-r--r--drivers/usb/musb/musb_gadget.c4
-rw-r--r--drivers/usb/musb/sunxi.c8
-rw-r--r--drivers/usb/musb/tusb6010.c5
-rw-r--r--drivers/usb/phy/phy-tahvo.c4
-rw-r--r--drivers/usb/phy/phy-tegra-usb.c198
-rw-r--r--drivers/usb/serial/ch341.c85
-rw-r--r--drivers/usb/serial/cp210x.c109
-rw-r--r--drivers/usb/serial/f81232.c96
-rw-r--r--drivers/usb/serial/ftdi_sio.c53
-rw-r--r--drivers/usb/serial/keyspan.c15
-rw-r--r--drivers/usb/serial/keyspan_pda.c67
-rw-r--r--drivers/usb/serial/kl5kusb105.c115
-rw-r--r--drivers/usb/serial/usb-serial.c59
-rw-r--r--drivers/usb/storage/unusual_devs.h10
-rw-r--r--drivers/usb/typec/Kconfig4
-rw-r--r--drivers/usb/typec/altmodes/Kconfig1
-rw-r--r--drivers/usb/typec/altmodes/displayport.c58
-rw-r--r--drivers/usb/typec/hd3ss3220.c8
-rw-r--r--drivers/usb/typec/tcpm/tcpci.c2
-rw-r--r--drivers/usb/typec/tipd/core.c223
-rw-r--r--drivers/usb/typec/tipd/tps6598x.h12
-rw-r--r--drivers/usb/typec/tipd/trace.h23
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c337
-rw-r--r--drivers/usb/typec/ucsi/ucsi.h3
-rw-r--r--drivers/usb/typec/ucsi/ucsi_acpi.c2
-rw-r--r--drivers/usb/usb-skeleton.c2
-rw-r--r--drivers/vdpa/Kconfig8
-rw-r--r--drivers/vdpa/Makefile1
-rw-r--r--drivers/vdpa/alibaba/Makefile3
-rw-r--r--drivers/vdpa/alibaba/eni_vdpa.c553
-rw-r--r--drivers/vdpa/ifcvf/ifcvf_main.c3
-rw-r--r--drivers/vdpa/mlx5/core/mlx5_vdpa.h10
-rw-r--r--drivers/vdpa/mlx5/core/mr.c8
-rw-r--r--drivers/vdpa/mlx5/core/resources.c13
-rw-r--r--drivers/vdpa/mlx5/net/mlx5_vnet.c204
-rw-r--r--drivers/vdpa/vdpa.c261
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim_blk.c3
-rw-r--r--drivers/vdpa/vdpa_sim/vdpa_sim_net.c38
-rw-r--r--drivers/vdpa/vdpa_user/vduse_dev.c32
-rw-r--r--drivers/vdpa/virtio_pci/vp_vdpa.c12
-rw-r--r--drivers/vfio/fsl-mc/vfio_fsl_mc.c62
-rw-r--r--drivers/vfio/mdev/mdev_driver.c45
-rw-r--r--drivers/vfio/mdev/vfio_mdev.c2
-rw-r--r--drivers/vfio/pci/vfio_pci_core.c13
-rw-r--r--drivers/vfio/pci/vfio_pci_igd.c234
-rw-r--r--drivers/vfio/platform/vfio_platform_common.c13
-rw-r--r--drivers/vfio/vfio.c622
-rw-r--r--drivers/vfio/vfio.h72
-rw-r--r--drivers/vfio/vfio_iommu_spapr_tce.c6
-rw-r--r--drivers/vfio/vfio_iommu_type1.c256
-rw-r--r--drivers/vhost/vdpa.c3
-rw-r--r--drivers/video/fbdev/efifb.c21
-rw-r--r--drivers/virtio/Kconfig10
-rw-r--r--drivers/virtio/Makefile1
-rw-r--r--drivers/virtio/virtio_pci_common.c58
-rw-r--r--drivers/virtio/virtio_pci_common.h16
-rw-r--r--drivers/virtio/virtio_pci_legacy.c106
-rw-r--r--drivers/virtio/virtio_pci_legacy_dev.c220
-rw-r--r--drivers/virtio/virtio_pci_modern.c6
-rw-r--r--drivers/virtio/virtio_ring.c92
-rw-r--r--drivers/virtio/virtio_vdpa.c19
-rw-r--r--drivers/watchdog/iTCO_wdt.c12
-rw-r--r--drivers/watchdog/ixp4xx_wdt.c2
-rw-r--r--drivers/watchdog/mtk_wdt.c6
-rw-r--r--drivers/watchdog/omap_wdt.c6
-rw-r--r--drivers/watchdog/sbsa_gwdt.c5
4033 files changed, 315287 insertions, 99068 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 1da360c51d66..cdbdf68bd98f 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -71,7 +71,7 @@ config ACPI_DEBUGGER
if ACPI_DEBUGGER
config ACPI_DEBUGGER_USER
- tristate "Userspace debugger accessiblity"
+ tristate "Userspace debugger accessibility"
depends on DEBUG_FS
help
Export /sys/kernel/debug/acpi/acpidbg for userspace utilities
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index b0cb662233f1..81aff651a0d4 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -61,6 +61,7 @@ static SIMPLE_DEV_PM_OPS(acpi_ac_pm, NULL, acpi_ac_resume);
static int ac_sleep_before_get_state_ms;
static int ac_check_pmic = 1;
+static int ac_only;
static struct acpi_driver acpi_ac_driver = {
.name = "ac",
@@ -93,6 +94,11 @@ static int acpi_ac_get_state(struct acpi_ac *ac)
if (!ac)
return -EINVAL;
+ if (ac_only) {
+ ac->state = 1;
+ return 0;
+ }
+
status = acpi_evaluate_integer(ac->device->handle, "_PSR", NULL,
&ac->state);
if (ACPI_FAILURE(status)) {
@@ -200,6 +206,12 @@ static int __init ac_do_not_check_pmic_quirk(const struct dmi_system_id *d)
return 0;
}
+static int __init ac_only_quirk(const struct dmi_system_id *d)
+{
+ ac_only = 1;
+ return 0;
+}
+
/* Please keep this list alphabetically sorted */
static const struct dmi_system_id ac_dmi_table[] __initconst = {
{
@@ -210,6 +222,13 @@ static const struct dmi_system_id ac_dmi_table[] __initconst = {
},
},
{
+ /* Kodlix GK45 returning incorrect state */
+ .callback = ac_only_quirk,
+ .matches = {
+ DMI_MATCH(DMI_PRODUCT_NAME, "GK45"),
+ },
+ },
+ {
/* Lenovo Ideapad Miix 320, AXP288 PMIC, separate fuel-gauge */
.callback = ac_do_not_check_pmic_quirk,
.matches = {
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 30b1f511c2af..bcae0f03572b 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -712,14 +712,13 @@ static void __lpss_reg_write(u32 val, struct lpss_private_data *pdata,
static int lpss_reg_read(struct device *dev, unsigned int reg, u32 *val)
{
- struct acpi_device *adev;
+ struct acpi_device *adev = ACPI_COMPANION(dev);
struct lpss_private_data *pdata;
unsigned long flags;
int ret;
- ret = acpi_bus_get_device(ACPI_HANDLE(dev), &adev);
- if (WARN_ON(ret))
- return ret;
+ if (WARN_ON(!adev))
+ return -ENODEV;
spin_lock_irqsave(&dev->power.lock, flags);
if (pm_runtime_suspended(dev)) {
@@ -732,6 +731,7 @@ static int lpss_reg_read(struct device *dev, unsigned int reg, u32 *val)
goto out;
}
*val = __lpss_reg_read(pdata, reg);
+ ret = 0;
out:
spin_unlock_irqrestore(&dev->power.lock, flags);
@@ -750,7 +750,7 @@ static ssize_t lpss_ltr_show(struct device *dev, struct device_attribute *attr,
if (ret)
return ret;
- return snprintf(buf, PAGE_SIZE, "%08x\n", ltr_value);
+ return sysfs_emit(buf, "%08x\n", ltr_value);
}
static ssize_t lpss_ltr_mode_show(struct device *dev,
@@ -1266,7 +1266,8 @@ static int acpi_lpss_platform_notify(struct notifier_block *nb,
if (!id || !id->driver_data)
return 0;
- if (acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev))
+ adev = ACPI_COMPANION(&pdev->dev);
+ if (!adev)
return 0;
pdata = acpi_driver_data(adev);
diff --git a/drivers/acpi/acpi_pnp.c b/drivers/acpi/acpi_pnp.c
index 8f2dc176bb41..ffdcfcd4a10d 100644
--- a/drivers/acpi/acpi_pnp.c
+++ b/drivers/acpi/acpi_pnp.c
@@ -156,8 +156,6 @@ static const struct acpi_device_id acpi_pnp_device_ids[] = {
{"BRI0A49"}, /* Boca Complete Ofc Communicator 14.4 Data-FAX */
{"BRI1400"}, /* Boca Research 33,600 ACF Modem */
{"BRI3400"}, /* Boca 33.6 Kbps Internal FD34FSVD */
- {"BRI0A49"}, /* Boca 33.6 Kbps Internal FD34FSVD */
- {"BDP3336"}, /* Best Data Products Inc. Smart One 336F PnP Modem */
{"CPI4050"}, /* Computer Peripherals Inc. EuroViVa CommCenter-33.6 SP PnP */
{"CTL3001"}, /* Creative Labs Phone Blaster 28.8 DSVD PnP Voice */
{"CTL3011"}, /* Creative Labs Modem Blaster 28.8 DSVD PnP Voice */
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index d41b810e367c..4366d36ef119 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -226,6 +226,8 @@ extern struct acpi_bit_register_info
acpi_gbl_bit_register_info[ACPI_NUM_BITREG];
ACPI_GLOBAL(u8, acpi_gbl_sleep_type_a);
ACPI_GLOBAL(u8, acpi_gbl_sleep_type_b);
+ACPI_GLOBAL(u8, acpi_gbl_sleep_type_a_s0);
+ACPI_GLOBAL(u8, acpi_gbl_sleep_type_b_s0);
/*****************************************************************************
*
diff --git a/drivers/acpi/acpica/hwesleep.c b/drivers/acpi/acpica/hwesleep.c
index 803402aefaeb..808fdf54aeeb 100644
--- a/drivers/acpi/acpica/hwesleep.c
+++ b/drivers/acpi/acpica/hwesleep.c
@@ -147,17 +147,13 @@ acpi_status acpi_hw_extended_sleep(u8 sleep_state)
acpi_status acpi_hw_extended_wake_prep(u8 sleep_state)
{
- acpi_status status;
u8 sleep_type_value;
ACPI_FUNCTION_TRACE(hw_extended_wake_prep);
- status = acpi_get_sleep_type_data(ACPI_STATE_S0,
- &acpi_gbl_sleep_type_a,
- &acpi_gbl_sleep_type_b);
- if (ACPI_SUCCESS(status)) {
+ if (acpi_gbl_sleep_type_a_s0 != ACPI_SLEEP_TYPE_INVALID) {
sleep_type_value =
- ((acpi_gbl_sleep_type_a << ACPI_X_SLEEP_TYPE_POSITION) &
+ ((acpi_gbl_sleep_type_a_s0 << ACPI_X_SLEEP_TYPE_POSITION) &
ACPI_X_SLEEP_TYPE_MASK);
(void)acpi_write((u64)(sleep_type_value | ACPI_X_SLEEP_ENABLE),
diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c
index 14baa13bf848..34a3825f25d3 100644
--- a/drivers/acpi/acpica/hwsleep.c
+++ b/drivers/acpi/acpica/hwsleep.c
@@ -179,7 +179,7 @@ acpi_status acpi_hw_legacy_sleep(u8 sleep_state)
acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state)
{
- acpi_status status;
+ acpi_status status = AE_OK;
struct acpi_bit_register_info *sleep_type_reg_info;
struct acpi_bit_register_info *sleep_enable_reg_info;
u32 pm1a_control;
@@ -192,10 +192,7 @@ acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state)
* This is unclear from the ACPI Spec, but it is required
* by some machines.
*/
- status = acpi_get_sleep_type_data(ACPI_STATE_S0,
- &acpi_gbl_sleep_type_a,
- &acpi_gbl_sleep_type_b);
- if (ACPI_SUCCESS(status)) {
+ if (acpi_gbl_sleep_type_a_s0 != ACPI_SLEEP_TYPE_INVALID) {
sleep_type_reg_info =
acpi_hw_get_bit_register_info(ACPI_BITREG_SLEEP_TYPE);
sleep_enable_reg_info =
@@ -216,9 +213,9 @@ acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state)
/* Insert the SLP_TYP bits */
- pm1a_control |= (acpi_gbl_sleep_type_a <<
+ pm1a_control |= (acpi_gbl_sleep_type_a_s0 <<
sleep_type_reg_info->bit_position);
- pm1b_control |= (acpi_gbl_sleep_type_b <<
+ pm1b_control |= (acpi_gbl_sleep_type_b_s0 <<
sleep_type_reg_info->bit_position);
/* Write the control registers and ignore any errors */
diff --git a/drivers/acpi/acpica/hwxfsleep.c b/drivers/acpi/acpica/hwxfsleep.c
index 89b12afed564..e4cde23a2906 100644
--- a/drivers/acpi/acpica/hwxfsleep.c
+++ b/drivers/acpi/acpica/hwxfsleep.c
@@ -217,6 +217,13 @@ acpi_status acpi_enter_sleep_state_prep(u8 sleep_state)
return_ACPI_STATUS(status);
}
+ status = acpi_get_sleep_type_data(ACPI_STATE_S0,
+ &acpi_gbl_sleep_type_a_s0,
+ &acpi_gbl_sleep_type_b_s0);
+ if (ACPI_FAILURE(status)) {
+ acpi_gbl_sleep_type_a_s0 = ACPI_SLEEP_TYPE_INVALID;
+ }
+
/* Execute the _PTS method (Prepare To Sleep) */
arg_list.count = 1;
diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c
index 7b8e8bf1e824..8afa1ccaf12e 100644
--- a/drivers/acpi/acpica/utosi.c
+++ b/drivers/acpi/acpica/utosi.c
@@ -73,6 +73,7 @@ static struct acpi_interface_info acpi_default_supported_interfaces[] = {
{"Windows 2018", NULL, 0, ACPI_OSI_WIN_10_RS4}, /* Windows 10 version 1803 - Added 11/2018 */
{"Windows 2018.2", NULL, 0, ACPI_OSI_WIN_10_RS5}, /* Windows 10 version 1809 - Added 11/2018 */
{"Windows 2019", NULL, 0, ACPI_OSI_WIN_10_19H1}, /* Windows 10 version 1903 - Added 08/2019 */
+ {"Windows 2020", NULL, 0, ACPI_OSI_WIN_10_20H1}, /* Windows 10 version 2004 - Added 08/2021 */
/* Feature Group Strings */
diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c
index 2882450c443e..edb2622fd35f 100644
--- a/drivers/acpi/apei/einj.c
+++ b/drivers/acpi/apei/einj.c
@@ -28,9 +28,10 @@
#undef pr_fmt
#define pr_fmt(fmt) "EINJ: " fmt
-#define SPIN_UNIT 100 /* 100ns */
-/* Firmware should respond within 1 milliseconds */
-#define FIRMWARE_TIMEOUT (1 * NSEC_PER_MSEC)
+#define SLEEP_UNIT_MIN 1000 /* 1ms */
+#define SLEEP_UNIT_MAX 5000 /* 5ms */
+/* Firmware should respond within 1 seconds */
+#define FIRMWARE_TIMEOUT (1 * USEC_PER_SEC)
#define ACPI5_VENDOR_BIT BIT(31)
#define MEM_ERROR_MASK (ACPI_EINJ_MEMORY_CORRECTABLE | \
ACPI_EINJ_MEMORY_UNCORRECTABLE | \
@@ -171,13 +172,13 @@ static int einj_get_available_error_type(u32 *type)
static int einj_timedout(u64 *t)
{
- if ((s64)*t < SPIN_UNIT) {
+ if ((s64)*t < SLEEP_UNIT_MIN) {
pr_warn(FW_WARN "Firmware does not respond in time\n");
return 1;
}
- *t -= SPIN_UNIT;
- ndelay(SPIN_UNIT);
- touch_nmi_watchdog();
+ *t -= SLEEP_UNIT_MIN;
+ usleep_range(SLEEP_UNIT_MIN, SLEEP_UNIT_MAX);
+
return 0;
}
diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c
index 277f00b288d1..0edc1ed47673 100644
--- a/drivers/acpi/apei/hest.c
+++ b/drivers/acpi/apei/hest.c
@@ -86,7 +86,9 @@ static int hest_esrc_len(struct acpi_hest_header *hest_hdr)
return len;
};
-int apei_hest_parse(apei_hest_func_t func, void *data)
+typedef int (*apei_hest_func_t)(struct acpi_hest_header *hest_hdr, void *data);
+
+static int apei_hest_parse(apei_hest_func_t func, void *data)
{
struct acpi_hest_header *hest_hdr;
int i, rc, len;
@@ -121,7 +123,6 @@ int apei_hest_parse(apei_hest_func_t func, void *data)
return 0;
}
-EXPORT_SYMBOL_GPL(apei_hest_parse);
/*
* Check if firmware advertises firmware first mode. We need FF bit to be set
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index dae91f906cea..8afa85d6eb6a 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -169,7 +169,7 @@ static int acpi_battery_is_charged(struct acpi_battery *battery)
return 1;
/* fallback to using design values for broken batteries */
- if (battery->design_capacity == battery->capacity_now)
+ if (battery->design_capacity <= battery->capacity_now)
return 1;
/* we don't do any sort of metric based on percentages */
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index bd482108310c..a85c351589be 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -43,7 +43,7 @@
#include <acpi/cppc_acpi.h>
struct cppc_pcc_data {
- struct mbox_chan *pcc_channel;
+ struct pcc_mbox_chan *pcc_channel;
void __iomem *pcc_comm_addr;
bool pcc_channel_acquired;
unsigned int deadline_us;
@@ -295,7 +295,7 @@ static int send_pcc_cmd(int pcc_ss_id, u16 cmd)
pcc_ss_data->platform_owns_pcc = true;
/* Ring doorbell */
- ret = mbox_send_message(pcc_ss_data->pcc_channel, &cmd);
+ ret = mbox_send_message(pcc_ss_data->pcc_channel->mchan, &cmd);
if (ret < 0) {
pr_err("Err sending PCC mbox message. ss: %d cmd:%d, ret:%d\n",
pcc_ss_id, cmd, ret);
@@ -308,10 +308,10 @@ static int send_pcc_cmd(int pcc_ss_id, u16 cmd)
if (pcc_ss_data->pcc_mrtt)
pcc_ss_data->last_cmd_cmpl_time = ktime_get();
- if (pcc_ss_data->pcc_channel->mbox->txdone_irq)
- mbox_chan_txdone(pcc_ss_data->pcc_channel, ret);
+ if (pcc_ss_data->pcc_channel->mchan->mbox->txdone_irq)
+ mbox_chan_txdone(pcc_ss_data->pcc_channel->mchan, ret);
else
- mbox_client_txdone(pcc_ss_data->pcc_channel, ret);
+ mbox_client_txdone(pcc_ss_data->pcc_channel->mchan, ret);
end:
if (cmd == CMD_WRITE) {
@@ -493,46 +493,33 @@ EXPORT_SYMBOL_GPL(acpi_get_psd_map);
static int register_pcc_channel(int pcc_ss_idx)
{
- struct acpi_pcct_hw_reduced *cppc_ss;
+ struct pcc_mbox_chan *pcc_chan;
u64 usecs_lat;
if (pcc_ss_idx >= 0) {
- pcc_data[pcc_ss_idx]->pcc_channel =
- pcc_mbox_request_channel(&cppc_mbox_cl, pcc_ss_idx);
+ pcc_chan = pcc_mbox_request_channel(&cppc_mbox_cl, pcc_ss_idx);
- if (IS_ERR(pcc_data[pcc_ss_idx]->pcc_channel)) {
+ if (IS_ERR(pcc_chan)) {
pr_err("Failed to find PCC channel for subspace %d\n",
pcc_ss_idx);
return -ENODEV;
}
- /*
- * The PCC mailbox controller driver should
- * have parsed the PCCT (global table of all
- * PCC channels) and stored pointers to the
- * subspace communication region in con_priv.
- */
- cppc_ss = (pcc_data[pcc_ss_idx]->pcc_channel)->con_priv;
-
- if (!cppc_ss) {
- pr_err("No PCC subspace found for %d CPPC\n",
- pcc_ss_idx);
- return -ENODEV;
- }
-
+ pcc_data[pcc_ss_idx]->pcc_channel = pcc_chan;
/*
* cppc_ss->latency is just a Nominal value. In reality
* the remote processor could be much slower to reply.
* So add an arbitrary amount of wait on top of Nominal.
*/
- usecs_lat = NUM_RETRIES * cppc_ss->latency;
+ usecs_lat = NUM_RETRIES * pcc_chan->latency;
pcc_data[pcc_ss_idx]->deadline_us = usecs_lat;
- pcc_data[pcc_ss_idx]->pcc_mrtt = cppc_ss->min_turnaround_time;
- pcc_data[pcc_ss_idx]->pcc_mpar = cppc_ss->max_access_rate;
- pcc_data[pcc_ss_idx]->pcc_nominal = cppc_ss->latency;
+ pcc_data[pcc_ss_idx]->pcc_mrtt = pcc_chan->min_turnaround_time;
+ pcc_data[pcc_ss_idx]->pcc_mpar = pcc_chan->max_access_rate;
+ pcc_data[pcc_ss_idx]->pcc_nominal = pcc_chan->latency;
pcc_data[pcc_ss_idx]->pcc_comm_addr =
- acpi_os_ioremap(cppc_ss->base_address, cppc_ss->length);
+ acpi_os_ioremap(pcc_chan->shmem_base_addr,
+ pcc_chan->shmem_size);
if (!pcc_data[pcc_ss_idx]->pcc_comm_addr) {
pr_err("Failed to ioremap PCC comm region mem for %d\n",
pcc_ss_idx);
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index 7cf92158008f..c8e9b962e18c 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -492,7 +492,7 @@ static ssize_t docked_show(struct device *dev,
struct acpi_device *adev = NULL;
acpi_bus_get_device(dock_station->handle, &adev);
- return snprintf(buf, PAGE_SIZE, "%u\n", acpi_device_enumerated(adev));
+ return sysfs_emit(buf, "%u\n", acpi_device_enumerated(adev));
}
static DEVICE_ATTR_RO(docked);
@@ -504,7 +504,7 @@ static ssize_t flags_show(struct device *dev,
{
struct dock_station *dock_station = dev->platform_data;
- return snprintf(buf, PAGE_SIZE, "%d\n", dock_station->flags);
+ return sysfs_emit(buf, "%d\n", dock_station->flags);
}
static DEVICE_ATTR_RO(flags);
@@ -543,7 +543,7 @@ static ssize_t uid_show(struct device *dev,
if (ACPI_FAILURE(status))
return 0;
- return snprintf(buf, PAGE_SIZE, "%llx\n", lbuf);
+ return sysfs_emit(buf, "%llx\n", lbuf);
}
static DEVICE_ATTR_RO(uid);
@@ -562,7 +562,7 @@ static ssize_t type_show(struct device *dev,
else
type = "unknown";
- return snprintf(buf, PAGE_SIZE, "%s\n", type);
+ return sysfs_emit(buf, "%s\n", type);
}
static DEVICE_ATTR_RO(type);
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index 7a33a6d985f8..7cd0009e7ff3 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -17,6 +17,8 @@
#include <linux/rwsem.h>
#include <linux/acpi.h>
#include <linux/dma-mapping.h>
+#include <linux/pci.h>
+#include <linux/pci-acpi.h>
#include <linux/platform_device.h>
#include "internal.h"
@@ -111,13 +113,10 @@ struct acpi_device *acpi_find_child_device(struct acpi_device *parent,
return NULL;
list_for_each_entry(adev, &parent->children, node) {
- unsigned long long addr;
- acpi_status status;
+ acpi_bus_address addr = acpi_device_adr(adev);
int score;
- status = acpi_evaluate_integer(adev->handle, METHOD_NAME__ADR,
- NULL, &addr);
- if (ACPI_FAILURE(status) || addr != address)
+ if (!adev->pnp.type.bus_address || addr != address)
continue;
if (!ret) {
@@ -287,12 +286,13 @@ EXPORT_SYMBOL_GPL(acpi_unbind_one);
void acpi_device_notify(struct device *dev)
{
- struct acpi_bus_type *type = acpi_get_bus_type(dev);
struct acpi_device *adev;
int ret;
ret = acpi_bind_one(dev, NULL);
if (ret) {
+ struct acpi_bus_type *type = acpi_get_bus_type(dev);
+
if (!type)
goto err;
@@ -304,17 +304,26 @@ void acpi_device_notify(struct device *dev)
ret = acpi_bind_one(dev, adev);
if (ret)
goto err;
- }
- adev = ACPI_COMPANION(dev);
- if (dev_is_platform(dev))
- acpi_configure_pmsi_domain(dev);
+ if (type->setup) {
+ type->setup(dev);
+ goto done;
+ }
+ } else {
+ adev = ACPI_COMPANION(dev);
+
+ if (dev_is_pci(dev)) {
+ pci_acpi_setup(dev, adev);
+ goto done;
+ } else if (dev_is_platform(dev)) {
+ acpi_configure_pmsi_domain(dev);
+ }
+ }
- if (type && type->setup)
- type->setup(dev);
- else if (adev->handler && adev->handler->bind)
+ if (adev->handler && adev->handler->bind)
adev->handler->bind(dev);
+done:
acpi_handle_debug(ACPI_HANDLE(dev), "Bound to device %s\n",
dev_name(dev));
@@ -327,16 +336,39 @@ err:
void acpi_device_notify_remove(struct device *dev)
{
struct acpi_device *adev = ACPI_COMPANION(dev);
- struct acpi_bus_type *type;
if (!adev)
return;
- type = acpi_get_bus_type(dev);
- if (type && type->cleanup)
- type->cleanup(dev);
+ if (dev_is_pci(dev))
+ pci_acpi_cleanup(dev, adev);
else if (adev->handler && adev->handler->unbind)
adev->handler->unbind(dev);
acpi_unbind_one(dev);
}
+
+int acpi_dev_turn_off_if_unused(struct device *dev, void *not_used)
+{
+ struct acpi_device *adev = to_acpi_device(dev);
+
+ /*
+ * Skip device objects with device IDs, because they may be in use even
+ * if they are not companions of any physical device objects.
+ */
+ if (adev->pnp.type.hardware_id)
+ return 0;
+
+ mutex_lock(&adev->physical_node_lock);
+
+ /*
+ * Device objects without device IDs are not in use if they have no
+ * corresponding physical device objects.
+ */
+ if (list_empty(&adev->physical_node_list))
+ acpi_device_set_power(adev, ACPI_STATE_D3_COLD);
+
+ mutex_unlock(&adev->physical_node_lock);
+
+ return 0;
+}
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index d91b560e8867..8fbdc172864b 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -117,6 +117,7 @@ bool acpi_device_is_battery(struct acpi_device *adev);
bool acpi_device_is_first_physical_node(struct acpi_device *adev,
const struct device *dev);
int acpi_bus_register_early_device(int type);
+int acpi_dev_turn_off_if_unused(struct device *dev, void *not_used);
/* --------------------------------------------------------------------------
Device Matching and Notification
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index b9863e22b952..112256154880 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -52,7 +52,6 @@ struct acpi_power_resource {
u32 order;
unsigned int ref_count;
u8 state;
- bool wakeup_enabled;
struct mutex resource_lock;
struct list_head dependents;
};
@@ -615,20 +614,19 @@ int acpi_power_wakeup_list_init(struct list_head *list, int *system_level_p)
list_for_each_entry(entry, list, node) {
struct acpi_power_resource *resource = entry->resource;
- int result;
u8 state;
mutex_lock(&resource->resource_lock);
- result = acpi_power_get_state(resource, &state);
- if (result) {
- mutex_unlock(&resource->resource_lock);
- return result;
- }
- if (state == ACPI_POWER_RESOURCE_STATE_ON) {
- resource->ref_count++;
- resource->wakeup_enabled = true;
- }
+ /*
+ * Make sure that the power resource state and its reference
+ * counter value are consistent with each other.
+ */
+ if (!resource->ref_count &&
+ !acpi_power_get_state(resource, &state) &&
+ state == ACPI_POWER_RESOURCE_STATE_ON)
+ __acpi_power_off(resource);
+
if (system_level > resource->system_level)
system_level = resource->system_level;
@@ -711,7 +709,6 @@ int acpi_device_sleep_wake(struct acpi_device *dev,
*/
int acpi_enable_wakeup_device_power(struct acpi_device *dev, int sleep_state)
{
- struct acpi_power_resource_entry *entry;
int err = 0;
if (!dev || !dev->wakeup.flags.valid)
@@ -722,33 +719,22 @@ int acpi_enable_wakeup_device_power(struct acpi_device *dev, int sleep_state)
if (dev->wakeup.prepare_count++)
goto out;
- list_for_each_entry(entry, &dev->wakeup.resources, node) {
- struct acpi_power_resource *resource = entry->resource;
-
- mutex_lock(&resource->resource_lock);
-
- if (!resource->wakeup_enabled) {
- err = acpi_power_on_unlocked(resource);
- if (!err)
- resource->wakeup_enabled = true;
- }
-
- mutex_unlock(&resource->resource_lock);
-
- if (err) {
- dev_err(&dev->dev,
- "Cannot turn wakeup power resources on\n");
- dev->wakeup.flags.valid = 0;
- goto out;
- }
+ err = acpi_power_on_list(&dev->wakeup.resources);
+ if (err) {
+ dev_err(&dev->dev, "Cannot turn on wakeup power resources\n");
+ dev->wakeup.flags.valid = 0;
+ goto out;
}
+
/*
* Passing 3 as the third argument below means the device may be
* put into arbitrary power state afterward.
*/
err = acpi_device_sleep_wake(dev, 1, sleep_state, 3);
- if (err)
+ if (err) {
+ acpi_power_off_list(&dev->wakeup.resources);
dev->wakeup.prepare_count = 0;
+ }
out:
mutex_unlock(&acpi_device_lock);
@@ -771,39 +757,33 @@ int acpi_disable_wakeup_device_power(struct acpi_device *dev)
mutex_lock(&acpi_device_lock);
- if (--dev->wakeup.prepare_count > 0)
+ if (dev->wakeup.prepare_count > 1) {
+ dev->wakeup.prepare_count--;
goto out;
+ }
- /*
- * Executing the code below even if prepare_count is already zero when
- * the function is called may be useful, for example for initialisation.
- */
- if (dev->wakeup.prepare_count < 0)
- dev->wakeup.prepare_count = 0;
+ /* Do nothing if wakeup power has not been enabled for this device. */
+ if (!dev->wakeup.prepare_count)
+ goto out;
err = acpi_device_sleep_wake(dev, 0, 0, 0);
if (err)
goto out;
+ /*
+ * All of the power resources in the list need to be turned off even if
+ * there are errors.
+ */
list_for_each_entry(entry, &dev->wakeup.resources, node) {
- struct acpi_power_resource *resource = entry->resource;
-
- mutex_lock(&resource->resource_lock);
-
- if (resource->wakeup_enabled) {
- err = acpi_power_off_unlocked(resource);
- if (!err)
- resource->wakeup_enabled = false;
- }
-
- mutex_unlock(&resource->resource_lock);
+ int ret;
- if (err) {
- dev_err(&dev->dev,
- "Cannot turn wakeup power resources off\n");
- dev->wakeup.flags.valid = 0;
- break;
- }
+ ret = acpi_power_off(entry->resource);
+ if (ret && !err)
+ err = ret;
+ }
+ if (err) {
+ dev_err(&dev->dev, "Cannot turn off wakeup power resources\n");
+ dev->wakeup.flags.valid = 0;
}
out:
@@ -943,6 +923,7 @@ struct acpi_device *acpi_add_power_resource(acpi_handle handle)
union acpi_object acpi_object;
struct acpi_buffer buffer = { sizeof(acpi_object), &acpi_object };
acpi_status status;
+ u8 state_dummy;
int result;
acpi_bus_get_device(handle, &device);
@@ -971,6 +952,10 @@ struct acpi_device *acpi_add_power_resource(acpi_handle handle)
resource->order = acpi_object.power_resource.resource_order;
resource->state = ACPI_POWER_RESOURCE_STATE_UNKNOWN;
+ /* Get the initial state or just flip it on if that fails. */
+ if (acpi_power_get_state(resource, &state_dummy))
+ __acpi_power_on(resource);
+
pr_info("%s [%s]\n", acpi_device_name(device), acpi_device_bid(device));
device->flags.match_driver = true;
@@ -1035,13 +1020,8 @@ void acpi_turn_off_unused_power_resources(void)
list_for_each_entry_reverse(resource, &acpi_power_resource_list, list_node) {
mutex_lock(&resource->resource_lock);
- /*
- * Turn off power resources in an unknown state too, because the
- * platform firmware on some system expects the OS to turn off
- * power resources without any users unconditionally.
- */
if (!resource->ref_count &&
- resource->state != ACPI_POWER_RESOURCE_STATE_OFF) {
+ resource->state == ACPI_POWER_RESOURCE_STATE_ON) {
acpi_handle_debug(resource->device.handle, "Turning OFF\n");
__acpi_power_off(resource);
}
diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c
index fe69dc518f31..701f61c01359 100644
--- a/drivers/acpi/pptt.c
+++ b/drivers/acpi/pptt.c
@@ -747,6 +747,73 @@ int find_acpi_cpu_topology_package(unsigned int cpu)
}
/**
+ * find_acpi_cpu_topology_cluster() - Determine a unique CPU cluster value
+ * @cpu: Kernel logical CPU number
+ *
+ * Determine a topology unique cluster ID for the given CPU/thread.
+ * This ID can then be used to group peers, which will have matching ids.
+ *
+ * The cluster, if present is the level of topology above CPUs. In a
+ * multi-thread CPU, it will be the level above the CPU, not the thread.
+ * It may not exist in single CPU systems. In simple multi-CPU systems,
+ * it may be equal to the package topology level.
+ *
+ * Return: -ENOENT if the PPTT doesn't exist, the CPU cannot be found
+ * or there is no toplogy level above the CPU..
+ * Otherwise returns a value which represents the package for this CPU.
+ */
+
+int find_acpi_cpu_topology_cluster(unsigned int cpu)
+{
+ struct acpi_table_header *table;
+ acpi_status status;
+ struct acpi_pptt_processor *cpu_node, *cluster_node;
+ u32 acpi_cpu_id;
+ int retval;
+ int is_thread;
+
+ status = acpi_get_table(ACPI_SIG_PPTT, 0, &table);
+ if (ACPI_FAILURE(status)) {
+ acpi_pptt_warn_missing();
+ return -ENOENT;
+ }
+
+ acpi_cpu_id = get_acpi_id_for_cpu(cpu);
+ cpu_node = acpi_find_processor_node(table, acpi_cpu_id);
+ if (cpu_node == NULL || !cpu_node->parent) {
+ retval = -ENOENT;
+ goto put_table;
+ }
+
+ is_thread = cpu_node->flags & ACPI_PPTT_ACPI_PROCESSOR_IS_THREAD;
+ cluster_node = fetch_pptt_node(table, cpu_node->parent);
+ if (cluster_node == NULL) {
+ retval = -ENOENT;
+ goto put_table;
+ }
+ if (is_thread) {
+ if (!cluster_node->parent) {
+ retval = -ENOENT;
+ goto put_table;
+ }
+ cluster_node = fetch_pptt_node(table, cluster_node->parent);
+ if (cluster_node == NULL) {
+ retval = -ENOENT;
+ goto put_table;
+ }
+ }
+ if (cluster_node->flags & ACPI_PPTT_ACPI_PROCESSOR_ID_VALID)
+ retval = cluster_node->acpi_processor_id;
+ else
+ retval = ACPI_PTR_DIFF(cluster_node, table);
+
+put_table:
+ acpi_put_table(table);
+
+ return retval;
+}
+
+/**
* find_acpi_cpu_topology_hetero_id() - Get a core architecture tag
* @cpu: Kernel logical CPU number
*
diff --git a/drivers/acpi/prmt.c b/drivers/acpi/prmt.c
index 89c22bc55057..4d3a219c67f8 100644
--- a/drivers/acpi/prmt.c
+++ b/drivers/acpi/prmt.c
@@ -49,7 +49,6 @@ struct prm_context_buffer {
};
#pragma pack()
-
static LIST_HEAD(prm_module_list);
struct prm_handler_info {
@@ -73,7 +72,6 @@ struct prm_module_info {
struct prm_handler_info handlers[];
};
-
static u64 efi_pa_va_lookup(u64 pa)
{
efi_memory_desc_t *md;
@@ -88,7 +86,6 @@ static u64 efi_pa_va_lookup(u64 pa)
return 0;
}
-
#define get_first_handler(a) ((struct acpi_prmt_handler_info *) ((char *) (a) + a->handler_info_offset))
#define get_next_handler(a) ((struct acpi_prmt_handler_info *) (sizeof(struct acpi_prmt_handler_info) + (char *) a))
@@ -99,7 +96,7 @@ acpi_parse_prmt(union acpi_subtable_headers *header, const unsigned long end)
struct acpi_prmt_handler_info *handler_info;
struct prm_handler_info *th;
struct prm_module_info *tm;
- u64 mmio_count = 0;
+ u64 *mmio_count;
u64 cur_handler = 0;
u32 module_info_size = 0;
u64 mmio_range_size = 0;
@@ -108,6 +105,8 @@ acpi_parse_prmt(union acpi_subtable_headers *header, const unsigned long end)
module_info = (struct acpi_prmt_module_info *) header;
module_info_size = struct_size(tm, handlers, module_info->handler_info_count);
tm = kmalloc(module_info_size, GFP_KERNEL);
+ if (!tm)
+ goto parse_prmt_out1;
guid_copy(&tm->guid, (guid_t *) module_info->module_guid);
tm->major_rev = module_info->major_rev;
@@ -120,14 +119,24 @@ acpi_parse_prmt(union acpi_subtable_headers *header, const unsigned long end)
* Each module is associated with a list of addr
* ranges that it can use during the service
*/
- mmio_count = *(u64 *) memremap(module_info->mmio_list_pointer, 8, MEMREMAP_WB);
- mmio_range_size = struct_size(tm->mmio_info, addr_ranges, mmio_count);
+ mmio_count = (u64 *) memremap(module_info->mmio_list_pointer, 8, MEMREMAP_WB);
+ if (!mmio_count)
+ goto parse_prmt_out2;
+
+ mmio_range_size = struct_size(tm->mmio_info, addr_ranges, *mmio_count);
tm->mmio_info = kmalloc(mmio_range_size, GFP_KERNEL);
+ if (!tm->mmio_info)
+ goto parse_prmt_out3;
+
temp_mmio = memremap(module_info->mmio_list_pointer, mmio_range_size, MEMREMAP_WB);
+ if (!temp_mmio)
+ goto parse_prmt_out4;
memmove(tm->mmio_info, temp_mmio, mmio_range_size);
} else {
- mmio_range_size = struct_size(tm->mmio_info, addr_ranges, mmio_count);
- tm->mmio_info = kmalloc(mmio_range_size, GFP_KERNEL);
+ tm->mmio_info = kmalloc(sizeof(*tm->mmio_info), GFP_KERNEL);
+ if (!tm->mmio_info)
+ goto parse_prmt_out2;
+
tm->mmio_info->mmio_count = 0;
}
@@ -145,6 +154,15 @@ acpi_parse_prmt(union acpi_subtable_headers *header, const unsigned long end)
} while (++cur_handler < tm->handler_count && (handler_info = get_next_handler(handler_info)));
return 0;
+
+parse_prmt_out4:
+ kfree(tm->mmio_info);
+parse_prmt_out3:
+ memunmap(mmio_count);
+parse_prmt_out2:
+ kfree(tm);
+parse_prmt_out1:
+ return -ENOMEM;
}
#define GET_MODULE 0
@@ -171,7 +189,6 @@ static void *find_guid_info(const guid_t *guid, u8 mode)
return NULL;
}
-
static struct prm_module_info *find_prm_module(const guid_t *guid)
{
return (struct prm_module_info *)find_guid_info(guid, GET_MODULE);
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index f37fba9e5ba0..76ef1bcc8848 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -789,7 +789,8 @@ static int acpi_processor_setup_cstates(struct acpi_processor *pr)
state->enter = acpi_idle_enter;
state->flags = 0;
- if (cx->type == ACPI_STATE_C1 || cx->type == ACPI_STATE_C2) {
+ if (cx->type == ACPI_STATE_C1 || cx->type == ACPI_STATE_C2 ||
+ cx->type == ACPI_STATE_C3) {
state->enter_dead = acpi_idle_play_dead;
drv->safe_state_index = count;
}
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index ee78a210c606..3c25ce8c95ba 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -16,6 +16,7 @@
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/irq.h>
+#include <linux/dmi.h>
#ifdef CONFIG_X86
#define valid_IRQ(i) (((i) != 0) && ((i) != 2))
@@ -380,9 +381,58 @@ unsigned int acpi_dev_get_irq_type(int triggering, int polarity)
}
EXPORT_SYMBOL_GPL(acpi_dev_get_irq_type);
+static const struct dmi_system_id medion_laptop[] = {
+ {
+ .ident = "MEDION P15651",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "MEDION"),
+ DMI_MATCH(DMI_BOARD_NAME, "M15T"),
+ },
+ },
+ {
+ .ident = "MEDION S17405",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "MEDION"),
+ DMI_MATCH(DMI_BOARD_NAME, "M17T"),
+ },
+ },
+ { }
+};
+
+struct irq_override_cmp {
+ const struct dmi_system_id *system;
+ unsigned char irq;
+ unsigned char triggering;
+ unsigned char polarity;
+ unsigned char shareable;
+};
+
+static const struct irq_override_cmp skip_override_table[] = {
+ { medion_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0 },
+};
+
+static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity,
+ u8 shareable)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(skip_override_table); i++) {
+ const struct irq_override_cmp *entry = &skip_override_table[i];
+
+ if (dmi_check_system(entry->system) &&
+ entry->irq == gsi &&
+ entry->triggering == triggering &&
+ entry->polarity == polarity &&
+ entry->shareable == shareable)
+ return false;
+ }
+
+ return true;
+}
+
static void acpi_dev_get_irqresource(struct resource *res, u32 gsi,
u8 triggering, u8 polarity, u8 shareable,
- bool legacy)
+ bool check_override)
{
int irq, p, t;
@@ -401,7 +451,9 @@ static void acpi_dev_get_irqresource(struct resource *res, u32 gsi,
* using extended IRQ descriptors we take the IRQ configuration
* from _CRS directly.
*/
- if (legacy && !acpi_get_override_irq(gsi, &t, &p)) {
+ if (check_override &&
+ acpi_dev_irq_override(gsi, triggering, polarity, shareable) &&
+ !acpi_get_override_irq(gsi, &t, &p)) {
u8 trig = t ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE;
u8 pol = p ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH;
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 5b54c80b9d32..dce2c291b982 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -608,6 +608,7 @@ struct acpi_device *acpi_bus_get_acpi_device(acpi_handle handle)
{
return handle_to_device(handle, get_acpi_device);
}
+EXPORT_SYMBOL_GPL(acpi_bus_get_acpi_device);
static struct acpi_device_bus_id *acpi_device_bus_id_match(const char *dev_id)
{
@@ -2559,6 +2560,12 @@ int __init acpi_scan_init(void)
}
}
+ /*
+ * Make sure that power management resources are not blocked by ACPI
+ * device objects with no users.
+ */
+ bus_for_each_dev(&acpi_bus_type, NULL, NULL, acpi_dev_turn_off_if_unused);
+
acpi_turn_off_unused_power_resources();
acpi_scan_initialized = true;
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 3023224515ab..eaa47753b758 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -815,14 +815,18 @@ void __weak acpi_s2idle_setup(void)
static void acpi_sleep_suspend_setup(void)
{
+ bool suspend_ops_needed = false;
int i;
for (i = ACPI_STATE_S1; i < ACPI_STATE_S4; i++)
- if (acpi_sleep_state_supported(i))
+ if (acpi_sleep_state_supported(i)) {
sleep_states[i] = 1;
+ suspend_ops_needed = true;
+ }
- suspend_set_ops(old_suspend_ordering ?
- &acpi_suspend_ops_old : &acpi_suspend_ops);
+ if (suspend_ops_needed)
+ suspend_set_ops(old_suspend_ordering ?
+ &acpi_suspend_ops_old : &acpi_suspend_ops);
acpi_s2idle_setup();
}
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index f9383736fa0f..71419eb16e09 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -21,6 +21,7 @@
#include <linux/earlycpio.h>
#include <linux/initrd.h>
#include <linux/security.h>
+#include <linux/kmemleak.h>
#include "internal.h"
#ifdef CONFIG_ACPI_CUSTOM_DSDT
@@ -601,6 +602,8 @@ void __init acpi_table_upgrade(void)
*/
arch_reserve_mem_area(acpi_tables_addr, all_tables_size);
+ kmemleak_ignore_phys(acpi_tables_addr);
+
/*
* early_ioremap only can remap 256k one time. If we map all
* tables one time, we will hit the limit. Need to map chunks
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index 962041148482..720aa6cdd402 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -19,6 +19,7 @@
#include <linux/clk/clk-conf.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
+#include <linux/of_irq.h>
#include <asm/irq.h>
@@ -371,14 +372,37 @@ static void amba_device_release(struct device *dev)
kfree(d);
}
+static int of_amba_device_decode_irq(struct amba_device *dev)
+{
+ struct device_node *node = dev->dev.of_node;
+ int i, irq = 0;
+
+ if (IS_ENABLED(CONFIG_OF_IRQ) && node) {
+ /* Decode the IRQs and address ranges */
+ for (i = 0; i < AMBA_NR_IRQS; i++) {
+ irq = of_irq_get(node, i);
+ if (irq < 0) {
+ if (irq == -EPROBE_DEFER)
+ return irq;
+ irq = 0;
+ }
+
+ dev->irq[i] = irq;
+ }
+ }
+
+ return 0;
+}
+
static int amba_device_try_add(struct amba_device *dev, struct resource *parent)
{
u32 size;
void __iomem *tmp;
int i, ret;
- WARN_ON(dev->irq[0] == (unsigned int)-1);
- WARN_ON(dev->irq[1] == (unsigned int)-1);
+ ret = of_amba_device_decode_irq(dev);
+ if (ret)
+ goto err_out;
ret = request_resource(parent, &dev->res);
if (ret)
@@ -579,78 +603,6 @@ int amba_device_add(struct amba_device *dev, struct resource *parent)
}
EXPORT_SYMBOL_GPL(amba_device_add);
-static struct amba_device *
-amba_aphb_device_add(struct device *parent, const char *name,
- resource_size_t base, size_t size, int irq1, int irq2,
- void *pdata, unsigned int periphid, u64 dma_mask,
- struct resource *resbase)
-{
- struct amba_device *dev;
- int ret;
-
- dev = amba_device_alloc(name, base, size);
- if (!dev)
- return ERR_PTR(-ENOMEM);
-
- dev->dev.coherent_dma_mask = dma_mask;
- dev->irq[0] = irq1;
- dev->irq[1] = irq2;
- dev->periphid = periphid;
- dev->dev.platform_data = pdata;
- dev->dev.parent = parent;
-
- ret = amba_device_add(dev, resbase);
- if (ret) {
- amba_device_put(dev);
- return ERR_PTR(ret);
- }
-
- return dev;
-}
-
-struct amba_device *
-amba_apb_device_add(struct device *parent, const char *name,
- resource_size_t base, size_t size, int irq1, int irq2,
- void *pdata, unsigned int periphid)
-{
- return amba_aphb_device_add(parent, name, base, size, irq1, irq2, pdata,
- periphid, 0, &iomem_resource);
-}
-EXPORT_SYMBOL_GPL(amba_apb_device_add);
-
-struct amba_device *
-amba_ahb_device_add(struct device *parent, const char *name,
- resource_size_t base, size_t size, int irq1, int irq2,
- void *pdata, unsigned int periphid)
-{
- return amba_aphb_device_add(parent, name, base, size, irq1, irq2, pdata,
- periphid, ~0ULL, &iomem_resource);
-}
-EXPORT_SYMBOL_GPL(amba_ahb_device_add);
-
-struct amba_device *
-amba_apb_device_add_res(struct device *parent, const char *name,
- resource_size_t base, size_t size, int irq1,
- int irq2, void *pdata, unsigned int periphid,
- struct resource *resbase)
-{
- return amba_aphb_device_add(parent, name, base, size, irq1, irq2, pdata,
- periphid, 0, resbase);
-}
-EXPORT_SYMBOL_GPL(amba_apb_device_add_res);
-
-struct amba_device *
-amba_ahb_device_add_res(struct device *parent, const char *name,
- resource_size_t base, size_t size, int irq1,
- int irq2, void *pdata, unsigned int periphid,
- struct resource *resbase)
-{
- return amba_aphb_device_add(parent, name, base, size, irq1, irq2, pdata,
- periphid, ~0ULL, resbase);
-}
-EXPORT_SYMBOL_GPL(amba_ahb_device_add_res);
-
-
static void amba_device_initialize(struct amba_device *dev, const char *name)
{
device_initialize(&dev->dev);
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index fe4c3b49eec1..49fb74196d02 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -2055,7 +2055,7 @@ static int binder_translate_binder(struct flat_binder_object *fp,
ret = -EINVAL;
goto done;
}
- if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
+ if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
ret = -EPERM;
goto done;
}
@@ -2101,7 +2101,7 @@ static int binder_translate_handle(struct flat_binder_object *fp,
proc->pid, thread->pid, fp->handle);
return -EINVAL;
}
- if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
+ if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
ret = -EPERM;
goto done;
}
@@ -2189,7 +2189,7 @@ static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
ret = -EBADF;
goto err_fget;
}
- ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
+ ret = security_binder_transfer_file(proc->cred, target_proc->cred, file);
if (ret < 0) {
ret = -EPERM;
goto err_security;
@@ -2594,8 +2594,8 @@ static void binder_transaction(struct binder_proc *proc,
return_error_line = __LINE__;
goto err_invalid_target_handle;
}
- if (security_binder_transaction(proc->tsk,
- target_proc->tsk) < 0) {
+ if (security_binder_transaction(proc->cred,
+ target_proc->cred) < 0) {
return_error = BR_FAILED_REPLY;
return_error_param = -EPERM;
return_error_line = __LINE__;
@@ -2710,7 +2710,7 @@ static void binder_transaction(struct binder_proc *proc,
t->from = thread;
else
t->from = NULL;
- t->sender_euid = task_euid(proc->tsk);
+ t->sender_euid = proc->cred->euid;
t->to_proc = target_proc;
t->to_thread = target_thread;
t->code = tr->code;
@@ -2721,16 +2721,7 @@ static void binder_transaction(struct binder_proc *proc,
u32 secid;
size_t added_size;
- /*
- * Arguably this should be the task's subjective LSM secid but
- * we can't reliably access the subjective creds of a task
- * other than our own so we must use the objective creds, which
- * are safe to access. The downside is that if a task is
- * temporarily overriding it's creds it will not be reflected
- * here; however, it isn't clear that binder would handle that
- * case well anyway.
- */
- security_task_getsecid_obj(proc->tsk, &secid);
+ security_cred_getsecid(proc->cred, &secid);
ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
if (ret) {
return_error = BR_FAILED_REPLY;
@@ -4353,6 +4344,7 @@ static void binder_free_proc(struct binder_proc *proc)
}
binder_alloc_deferred_release(&proc->alloc);
put_task_struct(proc->tsk);
+ put_cred(proc->cred);
binder_stats_deleted(BINDER_STAT_PROC);
kfree(proc);
}
@@ -4564,7 +4556,7 @@ static int binder_ioctl_set_ctx_mgr(struct file *filp,
ret = -EBUSY;
goto out;
}
- ret = security_binder_set_context_mgr(proc->tsk);
+ ret = security_binder_set_context_mgr(proc->cred);
if (ret < 0)
goto out;
if (uid_valid(context->binder_context_mgr_uid)) {
@@ -5055,6 +5047,7 @@ static int binder_open(struct inode *nodp, struct file *filp)
spin_lock_init(&proc->outer_lock);
get_task_struct(current->group_leader);
proc->tsk = current->group_leader;
+ proc->cred = get_cred(filp->f_cred);
INIT_LIST_HEAD(&proc->todo);
init_waitqueue_head(&proc->freeze_wait);
proc->default_priority = task_nice(current);
diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h
index 402c4d4362a8..d6b6b8cb7346 100644
--- a/drivers/android/binder_internal.h
+++ b/drivers/android/binder_internal.h
@@ -364,6 +364,9 @@ struct binder_ref {
* (invariant after initialized)
* @tsk task_struct for group_leader of process
* (invariant after initialized)
+ * @cred struct cred associated with the `struct file`
+ * in binder_open()
+ * (invariant after initialized)
* @deferred_work_node: element for binder_deferred_list
* (protected by binder_deferred_lock)
* @deferred_work: bitmap of deferred work to perform
@@ -426,6 +429,7 @@ struct binder_proc {
struct list_head waiting_threads;
int pid;
struct task_struct *tsk;
+ const struct cred *cred;
struct hlist_node deferred_work_node;
int deferred_work;
int outstanding_txns;
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 186cbf90c8ea..d60f34718b5d 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -258,7 +258,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x2683), board_ahci }, /* ESB2 */
{ PCI_VDEVICE(INTEL, 0x27c6), board_ahci }, /* ICH7-M DH */
{ PCI_VDEVICE(INTEL, 0x2821), board_ahci }, /* ICH8 */
- { PCI_VDEVICE(INTEL, 0x2822), board_ahci_nosntf }, /* ICH8 */
+ { PCI_VDEVICE(INTEL, 0x2822), board_ahci_nosntf }, /* ICH8/Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0x2824), board_ahci }, /* ICH8 */
{ PCI_VDEVICE(INTEL, 0x2829), board_ahci }, /* ICH8M */
{ PCI_VDEVICE(INTEL, 0x282a), board_ahci }, /* ICH8M */
@@ -316,7 +316,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x1d02), board_ahci }, /* PBG AHCI */
{ PCI_VDEVICE(INTEL, 0x1d04), board_ahci }, /* PBG RAID */
{ PCI_VDEVICE(INTEL, 0x1d06), board_ahci }, /* PBG RAID */
- { PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* PBG RAID */
+ { PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* PBG/Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0x2323), board_ahci }, /* DH89xxCC AHCI */
{ PCI_VDEVICE(INTEL, 0x1e02), board_ahci }, /* Panther Point AHCI */
{ PCI_VDEVICE(INTEL, 0x1e03), board_ahci_mobile }, /* Panther M AHCI */
@@ -358,8 +358,8 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x1f37), board_ahci_avn }, /* Avoton RAID */
{ PCI_VDEVICE(INTEL, 0x1f3e), board_ahci_avn }, /* Avoton RAID */
{ PCI_VDEVICE(INTEL, 0x1f3f), board_ahci_avn }, /* Avoton RAID */
- { PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Wellsburg RAID */
- { PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Wellsburg RAID */
+ { PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Wellsburg/Lewisburg AHCI*/
+ { PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Wellsburg/Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0x43d4), board_ahci }, /* Rocket Lake PCH-H RAID */
{ PCI_VDEVICE(INTEL, 0x43d5), board_ahci }, /* Rocket Lake PCH-H RAID */
{ PCI_VDEVICE(INTEL, 0x43d6), board_ahci }, /* Rocket Lake PCH-H RAID */
@@ -394,10 +394,6 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0xa106), board_ahci }, /* Sunrise Point-H RAID */
{ PCI_VDEVICE(INTEL, 0xa107), board_ahci_mobile }, /* Sunrise M RAID */
{ PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */
- { PCI_VDEVICE(INTEL, 0x2822), board_ahci }, /* Lewisburg RAID*/
- { PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Lewisburg AHCI*/
- { PCI_VDEVICE(INTEL, 0x2826), board_ahci }, /* Lewisburg RAID*/
- { PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa182), board_ahci }, /* Lewisburg AHCI*/
{ PCI_VDEVICE(INTEL, 0xa186), board_ahci }, /* Lewisburg RAID*/
{ PCI_VDEVICE(INTEL, 0xa1d2), board_ahci }, /* Lewisburg RAID*/
@@ -592,6 +588,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1062 */
{ PCI_VDEVICE(ASMEDIA, 0x0621), board_ahci }, /* ASM1061R */
{ PCI_VDEVICE(ASMEDIA, 0x0622), board_ahci }, /* ASM1062R */
+ { PCI_VDEVICE(ASMEDIA, 0x0624), board_ahci }, /* ASM1062+JMB575 */
/*
* Samsung SSDs found on some macbooks. NCQ times out if MSI is
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index eed65311b5d1..3018ca84a3d8 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -2007,7 +2007,7 @@ unsigned int ata_read_log_page(struct ata_device *dev, u8 log,
retry:
ata_tf_init(dev, &tf);
- if (dev->dma_mode && ata_id_has_read_log_dma_ext(dev->id) &&
+ if (ata_dma_enabled(dev) && ata_id_has_read_log_dma_ext(dev->id) &&
!(dev->horkage & ATA_HORKAGE_NO_DMA_LOG)) {
tf.command = ATA_CMD_READ_LOG_DMA_EXT;
tf.protocol = ATA_PROT_DMA;
@@ -2459,18 +2459,70 @@ static void ata_dev_config_devslp(struct ata_device *dev)
}
}
+static void ata_dev_config_cpr(struct ata_device *dev)
+{
+ unsigned int err_mask;
+ size_t buf_len;
+ int i, nr_cpr = 0;
+ struct ata_cpr_log *cpr_log = NULL;
+ u8 *desc, *buf = NULL;
+
+ if (!ata_identify_page_supported(dev,
+ ATA_LOG_CONCURRENT_POSITIONING_RANGES))
+ goto out;
+
+ /*
+ * Read IDENTIFY DEVICE data log, page 0x47
+ * (concurrent positioning ranges). We can have at most 255 32B range
+ * descriptors plus a 64B header.
+ */
+ buf_len = (64 + 255 * 32 + 511) & ~511;
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ goto out;
+
+ err_mask = ata_read_log_page(dev, ATA_LOG_IDENTIFY_DEVICE,
+ ATA_LOG_CONCURRENT_POSITIONING_RANGES,
+ buf, buf_len >> 9);
+ if (err_mask)
+ goto out;
+
+ nr_cpr = buf[0];
+ if (!nr_cpr)
+ goto out;
+
+ cpr_log = kzalloc(struct_size(cpr_log, cpr, nr_cpr), GFP_KERNEL);
+ if (!cpr_log)
+ goto out;
+
+ cpr_log->nr_cpr = nr_cpr;
+ desc = &buf[64];
+ for (i = 0; i < nr_cpr; i++, desc += 32) {
+ cpr_log->cpr[i].num = desc[0];
+ cpr_log->cpr[i].num_storage_elements = desc[1];
+ cpr_log->cpr[i].start_lba = get_unaligned_le64(&desc[8]);
+ cpr_log->cpr[i].num_lbas = get_unaligned_le64(&desc[16]);
+ }
+
+out:
+ swap(dev->cpr_log, cpr_log);
+ kfree(cpr_log);
+ kfree(buf);
+}
+
static void ata_dev_print_features(struct ata_device *dev)
{
if (!(dev->flags & ATA_DFLAG_FEATURES_MASK))
return;
ata_dev_info(dev,
- "Features:%s%s%s%s%s\n",
+ "Features:%s%s%s%s%s%s\n",
dev->flags & ATA_DFLAG_TRUSTED ? " Trust" : "",
dev->flags & ATA_DFLAG_DA ? " Dev-Attention" : "",
dev->flags & ATA_DFLAG_DEVSLP ? " Dev-Sleep" : "",
dev->flags & ATA_DFLAG_NCQ_SEND_RECV ? " NCQ-sndrcv" : "",
- dev->flags & ATA_DFLAG_NCQ_PRIO ? " NCQ-prio" : "");
+ dev->flags & ATA_DFLAG_NCQ_PRIO ? " NCQ-prio" : "",
+ dev->cpr_log ? " CPR" : "");
}
/**
@@ -2634,6 +2686,7 @@ int ata_dev_configure(struct ata_device *dev)
ata_dev_config_sense_reporting(dev);
ata_dev_config_zac(dev);
ata_dev_config_trusted(dev);
+ ata_dev_config_cpr(dev);
dev->cdb_len = 32;
if (ata_msg_drv(ap) && print_info)
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 1fb4611f7eeb..8a6b7b913d64 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -1895,7 +1895,7 @@ static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
*/
static unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf)
{
- int num_pages;
+ int i, num_pages = 0;
static const u8 pages[] = {
0x00, /* page 0x00, this page */
0x80, /* page 0x80, unit serial no page */
@@ -1905,13 +1905,17 @@ static unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf)
0xb1, /* page 0xb1, block device characteristics page */
0xb2, /* page 0xb2, thin provisioning page */
0xb6, /* page 0xb6, zoned block device characteristics */
+ 0xb9, /* page 0xb9, concurrent positioning ranges */
};
- num_pages = sizeof(pages);
- if (!(args->dev->flags & ATA_DFLAG_ZAC))
- num_pages--;
+ for (i = 0; i < sizeof(pages); i++) {
+ if (pages[i] == 0xb6 &&
+ !(args->dev->flags & ATA_DFLAG_ZAC))
+ continue;
+ rbuf[num_pages + 4] = pages[i];
+ num_pages++;
+ }
rbuf[3] = num_pages; /* number of supported VPD pages */
- memcpy(rbuf + 4, pages, num_pages);
return 0;
}
@@ -2121,6 +2125,26 @@ static unsigned int ata_scsiop_inq_b6(struct ata_scsi_args *args, u8 *rbuf)
return 0;
}
+static unsigned int ata_scsiop_inq_b9(struct ata_scsi_args *args, u8 *rbuf)
+{
+ struct ata_cpr_log *cpr_log = args->dev->cpr_log;
+ u8 *desc = &rbuf[64];
+ int i;
+
+ /* SCSI Concurrent Positioning Ranges VPD page: SBC-5 rev 1 or later */
+ rbuf[1] = 0xb9;
+ put_unaligned_be16(64 + (int)cpr_log->nr_cpr * 32 - 4, &rbuf[3]);
+
+ for (i = 0; i < cpr_log->nr_cpr; i++, desc += 32) {
+ desc[0] = cpr_log->cpr[i].num;
+ desc[1] = cpr_log->cpr[i].num_storage_elements;
+ put_unaligned_be64(cpr_log->cpr[i].start_lba, &desc[8]);
+ put_unaligned_be64(cpr_log->cpr[i].num_lbas, &desc[16]);
+ }
+
+ return 0;
+}
+
/**
* modecpy - Prepare response for MODE SENSE
* @dest: output buffer
@@ -2981,7 +3005,7 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
ata_qc_set_pc_nbytes(qc);
/* We may not issue DMA commands if no DMA mode is set */
- if (tf->protocol == ATA_PROT_DMA && dev->dma_mode == 0) {
+ if (tf->protocol == ATA_PROT_DMA && !ata_dma_enabled(dev)) {
fp = 1;
goto invalid_fld;
}
@@ -3131,7 +3155,7 @@ static unsigned int ata_scsi_write_same_xlat(struct ata_queued_cmd *qc)
u8 unmap = cdb[1] & 0x8;
/* we may not issue DMA commands if no DMA mode is set */
- if (unlikely(!dev->dma_mode))
+ if (unlikely(!ata_dma_enabled(dev)))
goto invalid_opcode;
/*
@@ -4120,11 +4144,17 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd)
ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b2);
break;
case 0xb6:
- if (dev->flags & ATA_DFLAG_ZAC) {
+ if (dev->flags & ATA_DFLAG_ZAC)
ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b6);
- break;
- }
- fallthrough;
+ else
+ ata_scsi_set_invalid_field(dev, cmd, 2, 0xff);
+ break;
+ case 0xb9:
+ if (dev->cpr_log)
+ ata_scsi_rbuf_fill(&args, ata_scsiop_inq_b9);
+ else
+ ata_scsi_set_invalid_field(dev, cmd, 2, 0xff);
+ break;
default:
ata_scsi_set_invalid_field(dev, cmd, 2, 0xff);
break;
diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c
index 557ecf466102..b7ff63ed3bbb 100644
--- a/drivers/ata/pata_ali.c
+++ b/drivers/ata/pata_ali.c
@@ -215,7 +215,7 @@ static void ali_set_piomode(struct ata_port *ap, struct ata_device *adev)
struct ata_timing p;
ata_timing_compute(pair, pair->pio_mode, &p, T, 1);
ata_timing_merge(&p, &t, &t, ATA_TIMING_SETUP|ATA_TIMING_8BIT);
- if (pair->dma_mode) {
+ if (ata_dma_enabled(pair)) {
ata_timing_compute(pair, pair->dma_mode, &p, T, 1);
ata_timing_merge(&p, &t, &t, ATA_TIMING_SETUP|ATA_TIMING_8BIT);
}
@@ -264,7 +264,7 @@ static void ali_set_dmamode(struct ata_port *ap, struct ata_device *adev)
struct ata_timing p;
ata_timing_compute(pair, pair->pio_mode, &p, T, 1);
ata_timing_merge(&p, &t, &t, ATA_TIMING_SETUP|ATA_TIMING_8BIT);
- if (pair->dma_mode) {
+ if (ata_dma_enabled(pair)) {
ata_timing_compute(pair, pair->dma_mode, &p, T, 1);
ata_timing_merge(&p, &t, &t, ATA_TIMING_SETUP|ATA_TIMING_8BIT);
}
diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
index c8acba162d02..154748cfcc79 100644
--- a/drivers/ata/pata_amd.c
+++ b/drivers/ata/pata_amd.c
@@ -66,7 +66,7 @@ static void timing_setup(struct ata_port *ap, struct ata_device *adev, int offse
if (peer) {
/* This may be over conservative */
- if (peer->dma_mode) {
+ if (ata_dma_enabled(peer)) {
ata_timing_compute(peer, peer->dma_mode, &apeer, T, UT);
ata_timing_merge(&apeer, &at, &at, ATA_TIMING_8BIT);
}
diff --git a/drivers/ata/pata_optidma.c b/drivers/ata/pata_optidma.c
index f6278d9de348..ad1090b90e52 100644
--- a/drivers/ata/pata_optidma.c
+++ b/drivers/ata/pata_optidma.c
@@ -153,7 +153,7 @@ static void optidma_mode_setup(struct ata_port *ap, struct ata_device *adev, u8
if (pair) {
u8 pair_addr;
/* Hardware constraint */
- if (pair->dma_mode)
+ if (ata_dma_enabled(pair))
pair_addr = 0;
else
pair_addr = addr_timing[pci_clock][pair->pio_mode - XFER_PIO_0];
@@ -301,7 +301,7 @@ static u8 optidma_make_bits43(struct ata_device *adev)
};
if (!ata_dev_enabled(adev))
return 0;
- if (adev->dma_mode)
+ if (ata_dma_enabled(adev))
return adev->dma_mode - XFER_MW_DMA_0;
return bits43[adev->pio_mode - XFER_PIO_0];
}
diff --git a/drivers/ata/pata_radisys.c b/drivers/ata/pata_radisys.c
index 8fde4a86401b..3aca8fe3fdb6 100644
--- a/drivers/ata/pata_radisys.c
+++ b/drivers/ata/pata_radisys.c
@@ -172,8 +172,8 @@ static unsigned int radisys_qc_issue(struct ata_queued_cmd *qc)
if (adev != ap->private_data) {
/* UDMA timing is not shared */
- if (adev->dma_mode < XFER_UDMA_0) {
- if (adev->dma_mode)
+ if (adev->dma_mode < XFER_UDMA_0 || !ata_dma_enabled(adev)) {
+ if (ata_dma_enabled(adev))
radisys_set_dmamode(ap, adev);
else if (adev->pio_mode)
radisys_set_piomode(ap, adev);
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 9d86203e1e7a..c53633d47bfb 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -3896,8 +3896,8 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
break;
default:
- dev_err(host->dev, "BUG: invalid board index %u\n", board_idx);
- return 1;
+ dev_alert(host->dev, "BUG: invalid board index %u\n", board_idx);
+ return -EINVAL;
}
hpriv->hp_flags = hp_flags;
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index 43407665918f..fc0836f460fb 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -600,6 +600,11 @@ const struct cpumask *cpu_coregroup_mask(int cpu)
return core_mask;
}
+const struct cpumask *cpu_clustergroup_mask(int cpu)
+{
+ return &cpu_topology[cpu].cluster_sibling;
+}
+
void update_siblings_masks(unsigned int cpuid)
{
struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
@@ -617,6 +622,12 @@ void update_siblings_masks(unsigned int cpuid)
if (cpuid_topo->package_id != cpu_topo->package_id)
continue;
+ if (cpuid_topo->cluster_id == cpu_topo->cluster_id &&
+ cpuid_topo->cluster_id != -1) {
+ cpumask_set_cpu(cpu, &cpuid_topo->cluster_sibling);
+ cpumask_set_cpu(cpuid, &cpu_topo->cluster_sibling);
+ }
+
cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
@@ -635,6 +646,9 @@ static void clear_cpu_topology(int cpu)
cpumask_clear(&cpu_topo->llc_sibling);
cpumask_set_cpu(cpu, &cpu_topo->llc_sibling);
+ cpumask_clear(&cpu_topo->cluster_sibling);
+ cpumask_set_cpu(cpu, &cpu_topo->cluster_sibling);
+
cpumask_clear(&cpu_topo->core_sibling);
cpumask_set_cpu(cpu, &cpu_topo->core_sibling);
cpumask_clear(&cpu_topo->thread_sibling);
@@ -650,6 +664,7 @@ void __init reset_cpu_topology(void)
cpu_topo->thread_id = -1;
cpu_topo->core_id = -1;
+ cpu_topo->cluster_id = -1;
cpu_topo->package_id = -1;
cpu_topo->llc_id = -1;
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index cbea78e79f3d..ac4dde8fdb8b 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -32,7 +32,6 @@
#include <linux/suspend.h>
#include <trace/events/power.h>
#include <linux/cpufreq.h>
-#include <linux/cpuidle.h>
#include <linux/devfreq.h>
#include <linux/timer.h>
@@ -747,8 +746,6 @@ void dpm_resume_noirq(pm_message_t state)
resume_device_irqs();
device_wakeup_disarm_wake_irqs();
-
- cpuidle_resume();
}
/**
@@ -1051,7 +1048,7 @@ static void device_complete(struct device *dev, pm_message_t state)
const char *info = NULL;
if (dev->power.syscore)
- return;
+ goto out;
device_lock(dev);
@@ -1081,6 +1078,7 @@ static void device_complete(struct device *dev, pm_message_t state)
device_unlock(dev);
+out:
pm_runtime_put(dev);
}
@@ -1336,8 +1334,6 @@ int dpm_suspend_noirq(pm_message_t state)
{
int ret;
- cpuidle_pause();
-
device_wakeup_arm_wake_irqs();
suspend_device_irqs();
@@ -1794,9 +1790,6 @@ static int device_prepare(struct device *dev, pm_message_t state)
int (*callback)(struct device *) = NULL;
int ret = 0;
- if (dev->power.syscore)
- return 0;
-
/*
* If a device's parent goes into runtime suspend at the wrong time,
* it won't be possible to resume the device. To prevent this we
@@ -1805,6 +1798,9 @@ static int device_prepare(struct device *dev, pm_message_t state)
*/
pm_runtime_get_noresume(dev);
+ if (dev->power.syscore)
+ return 0;
+
device_lock(dev);
dev->power.wakeup_path = false;
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h
index 54292cdd7808..0eb7f02b3ad5 100644
--- a/drivers/base/power/power.h
+++ b/drivers/base/power/power.h
@@ -25,8 +25,10 @@ extern u64 pm_runtime_active_time(struct device *dev);
#define WAKE_IRQ_DEDICATED_ALLOCATED BIT(0)
#define WAKE_IRQ_DEDICATED_MANAGED BIT(1)
+#define WAKE_IRQ_DEDICATED_REVERSE BIT(2)
#define WAKE_IRQ_DEDICATED_MASK (WAKE_IRQ_DEDICATED_ALLOCATED | \
- WAKE_IRQ_DEDICATED_MANAGED)
+ WAKE_IRQ_DEDICATED_MANAGED | \
+ WAKE_IRQ_DEDICATED_REVERSE)
struct wake_irq {
struct device *dev;
@@ -39,7 +41,8 @@ extern void dev_pm_arm_wake_irq(struct wake_irq *wirq);
extern void dev_pm_disarm_wake_irq(struct wake_irq *wirq);
extern void dev_pm_enable_wake_irq_check(struct device *dev,
bool can_change_status);
-extern void dev_pm_disable_wake_irq_check(struct device *dev);
+extern void dev_pm_disable_wake_irq_check(struct device *dev, bool cond_disable);
+extern void dev_pm_enable_wake_irq_complete(struct device *dev);
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index ec94049442b9..d504cd4ab3cb 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -645,6 +645,8 @@ static int rpm_suspend(struct device *dev, int rpmflags)
if (retval)
goto fail;
+ dev_pm_enable_wake_irq_complete(dev);
+
no_callback:
__update_runtime_status(dev, RPM_SUSPENDED);
pm_runtime_deactivate_timer(dev);
@@ -690,7 +692,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
return retval;
fail:
- dev_pm_disable_wake_irq_check(dev);
+ dev_pm_disable_wake_irq_check(dev, true);
__update_runtime_status(dev, RPM_ACTIVE);
dev->power.deferred_resume = false;
wake_up_all(&dev->power.wait_queue);
@@ -873,7 +875,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
callback = RPM_GET_CALLBACK(dev, runtime_resume);
- dev_pm_disable_wake_irq_check(dev);
+ dev_pm_disable_wake_irq_check(dev, false);
retval = rpm_callback(callback, dev);
if (retval) {
__update_runtime_status(dev, RPM_SUSPENDED);
diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c
index b91a3a9bf9f6..0004db4a9d3b 100644
--- a/drivers/base/power/wakeirq.c
+++ b/drivers/base/power/wakeirq.c
@@ -142,24 +142,7 @@ static irqreturn_t handle_threaded_wake_irq(int irq, void *_wirq)
return IRQ_HANDLED;
}
-/**
- * dev_pm_set_dedicated_wake_irq - Request a dedicated wake-up interrupt
- * @dev: Device entry
- * @irq: Device wake-up interrupt
- *
- * Unless your hardware has separate wake-up interrupts in addition
- * to the device IO interrupts, you don't need this.
- *
- * Sets up a threaded interrupt handler for a device that has
- * a dedicated wake-up interrupt in addition to the device IO
- * interrupt.
- *
- * The interrupt starts disabled, and needs to be managed for
- * the device by the bus code or the device driver using
- * dev_pm_enable_wake_irq() and dev_pm_disable_wake_irq()
- * functions.
- */
-int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
+static int __dev_pm_set_dedicated_wake_irq(struct device *dev, int irq, unsigned int flag)
{
struct wake_irq *wirq;
int err;
@@ -197,7 +180,7 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
if (err)
goto err_free_irq;
- wirq->status = WAKE_IRQ_DEDICATED_ALLOCATED;
+ wirq->status = WAKE_IRQ_DEDICATED_ALLOCATED | flag;
return err;
@@ -210,9 +193,58 @@ err_free:
return err;
}
+
+
+/**
+ * dev_pm_set_dedicated_wake_irq - Request a dedicated wake-up interrupt
+ * @dev: Device entry
+ * @irq: Device wake-up interrupt
+ *
+ * Unless your hardware has separate wake-up interrupts in addition
+ * to the device IO interrupts, you don't need this.
+ *
+ * Sets up a threaded interrupt handler for a device that has
+ * a dedicated wake-up interrupt in addition to the device IO
+ * interrupt.
+ *
+ * The interrupt starts disabled, and needs to be managed for
+ * the device by the bus code or the device driver using
+ * dev_pm_enable_wake_irq*() and dev_pm_disable_wake_irq*()
+ * functions.
+ */
+int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
+{
+ return __dev_pm_set_dedicated_wake_irq(dev, irq, 0);
+}
EXPORT_SYMBOL_GPL(dev_pm_set_dedicated_wake_irq);
/**
+ * dev_pm_set_dedicated_wake_irq_reverse - Request a dedicated wake-up interrupt
+ * with reverse enable ordering
+ * @dev: Device entry
+ * @irq: Device wake-up interrupt
+ *
+ * Unless your hardware has separate wake-up interrupts in addition
+ * to the device IO interrupts, you don't need this.
+ *
+ * Sets up a threaded interrupt handler for a device that has a dedicated
+ * wake-up interrupt in addition to the device IO interrupt. It sets
+ * the status of WAKE_IRQ_DEDICATED_REVERSE to tell rpm_suspend()
+ * to enable dedicated wake-up interrupt after running the runtime suspend
+ * callback for @dev.
+ *
+ * The interrupt starts disabled, and needs to be managed for
+ * the device by the bus code or the device driver using
+ * dev_pm_enable_wake_irq*() and dev_pm_disable_wake_irq*()
+ * functions.
+ */
+int dev_pm_set_dedicated_wake_irq_reverse(struct device *dev, int irq)
+{
+ return __dev_pm_set_dedicated_wake_irq(dev, irq, WAKE_IRQ_DEDICATED_REVERSE);
+}
+EXPORT_SYMBOL_GPL(dev_pm_set_dedicated_wake_irq_reverse);
+
+/**
* dev_pm_enable_wake_irq - Enable device wake-up interrupt
* @dev: Device
*
@@ -282,28 +314,55 @@ void dev_pm_enable_wake_irq_check(struct device *dev,
return;
enable:
- enable_irq(wirq->irq);
+ if (!can_change_status || !(wirq->status & WAKE_IRQ_DEDICATED_REVERSE))
+ enable_irq(wirq->irq);
}
/**
* dev_pm_disable_wake_irq_check - Checks and disables wake-up interrupt
* @dev: Device
+ * @cond_disable: if set, also check WAKE_IRQ_DEDICATED_REVERSE
*
* Disables wake-up interrupt conditionally based on status.
* Should be only called from rpm_suspend() and rpm_resume() path.
*/
-void dev_pm_disable_wake_irq_check(struct device *dev)
+void dev_pm_disable_wake_irq_check(struct device *dev, bool cond_disable)
{
struct wake_irq *wirq = dev->power.wakeirq;
if (!wirq || !(wirq->status & WAKE_IRQ_DEDICATED_MASK))
return;
+ if (cond_disable && (wirq->status & WAKE_IRQ_DEDICATED_REVERSE))
+ return;
+
if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED)
disable_irq_nosync(wirq->irq);
}
/**
+ * dev_pm_enable_wake_irq_complete - enable wake IRQ not enabled before
+ * @dev: Device using the wake IRQ
+ *
+ * Enable wake IRQ conditionally based on status, mainly used if want to
+ * enable wake IRQ after running ->runtime_suspend() which depends on
+ * WAKE_IRQ_DEDICATED_REVERSE.
+ *
+ * Should be only called from rpm_suspend() path.
+ */
+void dev_pm_enable_wake_irq_complete(struct device *dev)
+{
+ struct wake_irq *wirq = dev->power.wakeirq;
+
+ if (!wirq || !(wirq->status & WAKE_IRQ_DEDICATED_MASK))
+ return;
+
+ if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED &&
+ wirq->status & WAKE_IRQ_DEDICATED_REVERSE)
+ enable_irq(wirq->irq);
+}
+
+/**
* dev_pm_arm_wake_irq - Arm device wake-up
* @wirq: Device wake-up interrupt
*
diff --git a/drivers/base/property.c b/drivers/base/property.c
index 453918eb7390..f1f35b48ab8b 100644
--- a/drivers/base/property.c
+++ b/drivers/base/property.c
@@ -15,7 +15,6 @@
#include <linux/of_graph.h>
#include <linux/of_irq.h>
#include <linux/property.h>
-#include <linux/etherdevice.h>
#include <linux/phy.h>
struct fwnode_handle *dev_fwnode(struct device *dev)
@@ -935,68 +934,6 @@ int device_get_phy_mode(struct device *dev)
}
EXPORT_SYMBOL_GPL(device_get_phy_mode);
-static void *fwnode_get_mac_addr(struct fwnode_handle *fwnode,
- const char *name, char *addr,
- int alen)
-{
- int ret = fwnode_property_read_u8_array(fwnode, name, addr, alen);
-
- if (ret == 0 && alen == ETH_ALEN && is_valid_ether_addr(addr))
- return addr;
- return NULL;
-}
-
-/**
- * fwnode_get_mac_address - Get the MAC from the firmware node
- * @fwnode: Pointer to the firmware node
- * @addr: Address of buffer to store the MAC in
- * @alen: Length of the buffer pointed to by addr, should be ETH_ALEN
- *
- * Search the firmware node for the best MAC address to use. 'mac-address' is
- * checked first, because that is supposed to contain to "most recent" MAC
- * address. If that isn't set, then 'local-mac-address' is checked next,
- * because that is the default address. If that isn't set, then the obsolete
- * 'address' is checked, just in case we're using an old device tree.
- *
- * Note that the 'address' property is supposed to contain a virtual address of
- * the register set, but some DTS files have redefined that property to be the
- * MAC address.
- *
- * All-zero MAC addresses are rejected, because those could be properties that
- * exist in the firmware tables, but were not updated by the firmware. For
- * example, the DTS could define 'mac-address' and 'local-mac-address', with
- * zero MAC addresses. Some older U-Boots only initialized 'local-mac-address'.
- * In this case, the real MAC is in 'local-mac-address', and 'mac-address'
- * exists but is all zeros.
-*/
-void *fwnode_get_mac_address(struct fwnode_handle *fwnode, char *addr, int alen)
-{
- char *res;
-
- res = fwnode_get_mac_addr(fwnode, "mac-address", addr, alen);
- if (res)
- return res;
-
- res = fwnode_get_mac_addr(fwnode, "local-mac-address", addr, alen);
- if (res)
- return res;
-
- return fwnode_get_mac_addr(fwnode, "address", addr, alen);
-}
-EXPORT_SYMBOL(fwnode_get_mac_address);
-
-/**
- * device_get_mac_address - Get the MAC for a given device
- * @dev: Pointer to the device
- * @addr: Address of buffer to store the MAC in
- * @alen: Length of the buffer pointed to by addr, should be ETH_ALEN
- */
-void *device_get_mac_address(struct device *dev, char *addr, int alen)
-{
- return fwnode_get_mac_address(dev_fwnode(dev), addr, alen);
-}
-EXPORT_SYMBOL(device_get_mac_address);
-
/**
* fwnode_irq_get - Get IRQ directly from a fwnode
* @fwnode: Pointer to the firmware node
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
index cfa29dc89bbf..fabf87058d80 100644
--- a/drivers/base/regmap/regcache-rbtree.c
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -281,14 +281,14 @@ static int regcache_rbtree_insert_to_block(struct regmap *map,
if (!blk)
return -ENOMEM;
+ rbnode->block = blk;
+
if (BITS_TO_LONGS(blklen) > BITS_TO_LONGS(rbnode->blklen)) {
present = krealloc(rbnode->cache_present,
BITS_TO_LONGS(blklen) * sizeof(*present),
GFP_KERNEL);
- if (!present) {
- kfree(blk);
+ if (!present)
return -ENOMEM;
- }
memset(present + BITS_TO_LONGS(rbnode->blklen), 0,
(BITS_TO_LONGS(blklen) - BITS_TO_LONGS(rbnode->blklen))
@@ -305,7 +305,6 @@ static int regcache_rbtree_insert_to_block(struct regmap *map,
}
/* update the rbnode block, its size and the base register */
- rbnode->block = blk;
rbnode->blklen = blklen;
rbnode->base_reg = base_reg;
rbnode->cache_present = present;
diff --git a/drivers/base/regmap/regmap-mdio.c b/drivers/base/regmap/regmap-mdio.c
index 6a20201299f5..f7293040a2b1 100644
--- a/drivers/base/regmap/regmap-mdio.c
+++ b/drivers/base/regmap/regmap-mdio.c
@@ -14,7 +14,7 @@ static int regmap_mdio_read(struct mdio_device *mdio_dev, u32 reg, unsigned int
{
int ret;
- ret = mdiobus_read(mdio_dev->bus, mdio_dev->addr, reg);
+ ret = mdiodev_read(mdio_dev, reg);
if (ret < 0)
return ret;
@@ -24,7 +24,7 @@ static int regmap_mdio_read(struct mdio_device *mdio_dev, u32 reg, unsigned int
static int regmap_mdio_write(struct mdio_device *mdio_dev, u32 reg, unsigned int val)
{
- return mdiobus_write(mdio_dev->bus, mdio_dev->addr, reg, val);
+ return mdiodev_write(mdio_dev, reg, val);
}
static int regmap_mdio_c22_read(void *context, unsigned int reg, unsigned int *val)
@@ -44,7 +44,7 @@ static int regmap_mdio_c22_write(void *context, unsigned int reg, unsigned int v
if (unlikely(reg & ~REGNUM_C22_MASK))
return -ENXIO;
- return mdiobus_write(mdio_dev->bus, mdio_dev->addr, reg, val);
+ return mdiodev_write(mdio_dev, reg, val);
}
static const struct regmap_bus regmap_mdio_c22_bus = {
diff --git a/drivers/base/regmap/regmap-spi.c b/drivers/base/regmap/regmap-spi.c
index c1894e93c378..719323bc6c7f 100644
--- a/drivers/base/regmap/regmap-spi.c
+++ b/drivers/base/regmap/regmap-spi.c
@@ -109,13 +109,37 @@ static const struct regmap_bus regmap_spi = {
.val_format_endian_default = REGMAP_ENDIAN_BIG,
};
+static const struct regmap_bus *regmap_get_spi_bus(struct spi_device *spi,
+ const struct regmap_config *config)
+{
+ size_t max_size = spi_max_transfer_size(spi);
+ struct regmap_bus *bus;
+
+ if (max_size != SIZE_MAX) {
+ bus = kmemdup(&regmap_spi, sizeof(*bus), GFP_KERNEL);
+ if (!bus)
+ return ERR_PTR(-ENOMEM);
+
+ bus->free_on_exit = true;
+ bus->max_raw_read = max_size;
+ bus->max_raw_write = max_size;
+ return bus;
+ }
+
+ return &regmap_spi;
+}
+
struct regmap *__regmap_init_spi(struct spi_device *spi,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name)
{
- return __regmap_init(&spi->dev, &regmap_spi, &spi->dev, config,
- lock_key, lock_name);
+ const struct regmap_bus *bus = regmap_get_spi_bus(spi, config);
+
+ if (IS_ERR(bus))
+ return ERR_CAST(bus);
+
+ return __regmap_init(&spi->dev, bus, &spi->dev, config, lock_key, lock_name);
}
EXPORT_SYMBOL_GPL(__regmap_init_spi);
@@ -124,8 +148,12 @@ struct regmap *__devm_regmap_init_spi(struct spi_device *spi,
struct lock_class_key *lock_key,
const char *lock_name)
{
- return __devm_regmap_init(&spi->dev, &regmap_spi, &spi->dev, config,
- lock_key, lock_name);
+ const struct regmap_bus *bus = regmap_get_spi_bus(spi, config);
+
+ if (IS_ERR(bus))
+ return ERR_CAST(bus);
+
+ return __devm_regmap_init(&spi->dev, bus, &spi->dev, config, lock_key, lock_name);
}
EXPORT_SYMBOL_GPL(__devm_regmap_init_spi);
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index 43c0940643f5..8f2b641d0b8c 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -48,6 +48,9 @@ static DEVICE_ATTR_RO(physical_package_id);
define_id_show_func(die_id);
static DEVICE_ATTR_RO(die_id);
+define_id_show_func(cluster_id);
+static DEVICE_ATTR_RO(cluster_id);
+
define_id_show_func(core_id);
static DEVICE_ATTR_RO(core_id);
@@ -63,6 +66,10 @@ define_siblings_read_func(core_siblings, core_cpumask);
static BIN_ATTR_RO(core_siblings, 0);
static BIN_ATTR_RO(core_siblings_list, 0);
+define_siblings_read_func(cluster_cpus, cluster_cpumask);
+static BIN_ATTR_RO(cluster_cpus, 0);
+static BIN_ATTR_RO(cluster_cpus_list, 0);
+
define_siblings_read_func(die_cpus, die_cpumask);
static BIN_ATTR_RO(die_cpus, 0);
static BIN_ATTR_RO(die_cpus_list, 0);
@@ -94,6 +101,8 @@ static struct bin_attribute *bin_attrs[] = {
&bin_attr_thread_siblings_list,
&bin_attr_core_siblings,
&bin_attr_core_siblings_list,
+ &bin_attr_cluster_cpus,
+ &bin_attr_cluster_cpus_list,
&bin_attr_die_cpus,
&bin_attr_die_cpus_list,
&bin_attr_package_cpus,
@@ -112,6 +121,7 @@ static struct bin_attribute *bin_attrs[] = {
static struct attribute *default_attrs[] = {
&dev_attr_physical_package_id.attr,
&dev_attr_die_id.attr,
+ &dev_attr_cluster_id.attr,
&dev_attr_core_id.attr,
#ifdef CONFIG_SCHED_BOOK
&dev_attr_book_id.attr,
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index c6d6ba0d00b1..8e7ca3e4c8c4 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -20,7 +20,7 @@ MODULE_DESCRIPTION("Broadcom's specific AMBA driver");
MODULE_LICENSE("GPL");
/* contains the number the next bus should get. */
-static unsigned int bcma_bus_next_num = 0;
+static unsigned int bcma_bus_next_num;
/* bcma_buses_mutex locks the bcma_bus_next_num */
static DEFINE_MUTEX(bcma_buses_mutex);
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index ab3e37aa1830..2a51dfb09c8f 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -180,14 +180,6 @@ config BLK_DEV_LOOP
bits of, say, a sound file). This is also safe if the file resides
on a remote file server.
- There are several ways of encrypting disks. Some of these require
- kernel patches. The vanilla kernel offers the cryptoloop option
- and a Device Mapper target (which is superior, as it supports all
- file systems). If you want to use the cryptoloop, say Y to both
- LOOP and CRYPTOLOOP, and make sure you have a recent (version 2.12
- or later) version of util-linux. Additionally, be aware that
- the cryptoloop is not safe for storing journaled filesystems.
-
Note that this loop device has nothing to do with the loopback
device used for network connections from the machine to itself.
@@ -211,21 +203,6 @@ config BLK_DEV_LOOP_MIN_COUNT
is used, it can be set to 0, since needed loop devices can be
dynamically allocated with the /dev/loop-control interface.
-config BLK_DEV_CRYPTOLOOP
- tristate "Cryptoloop Support (DEPRECATED)"
- select CRYPTO
- select CRYPTO_CBC
- depends on BLK_DEV_LOOP
- help
- Say Y here if you want to be able to use the ciphers that are
- provided by the CryptoAPI as loop transformation. This might be
- used as hard disk encryption.
-
- WARNING: This device is not safe for journaled file systems like
- ext3 or Reiserfs. Please use the Device Mapper crypto module
- instead, which can be configured to be on-disk compatible with the
- cryptoloop device. cryptoloop support will be removed in Linux 5.16.
-
source "drivers/block/drbd/Kconfig"
config BLK_DEV_NBD
@@ -304,8 +281,8 @@ config BLK_DEV_RAM_SIZE
config CDROM_PKTCDVD
tristate "Packet writing on CD/DVD media (DEPRECATED)"
depends on !UML
+ depends on SCSI
select CDROM
- select SCSI_COMMON
help
Note: This driver is deprecated and will be removed from the
kernel in the near future!
@@ -394,6 +371,7 @@ config XEN_BLKDEV_BACKEND
config VIRTIO_BLK
tristate "Virtio block driver"
depends on VIRTIO
+ select SG_POOL
help
This is the virtual block driver for virtio. It can be used with
QEMU based VMMs (like KVM or Xen). Say Y or M.
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index bc68817ef496..11a74f17c9ad 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -24,7 +24,6 @@ obj-$(CONFIG_CDROM_PKTCDVD) += pktcdvd.o
obj-$(CONFIG_SUNVDC) += sunvdc.o
obj-$(CONFIG_BLK_DEV_NBD) += nbd.o
-obj-$(CONFIG_BLK_DEV_CRYPTOLOOP) += cryptoloop.o
obj-$(CONFIG_VIRTIO_BLK) += virtio_blk.o
obj-$(CONFIG_BLK_DEV_SX8) += sx8.o
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 8b1714021498..bf5c124c5452 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -61,10 +61,10 @@
#include <linux/hdreg.h>
#include <linux/delay.h>
#include <linux/init.h>
+#include <linux/major.h>
#include <linux/mutex.h>
#include <linux/fs.h>
#include <linux/blk-mq.h>
-#include <linux/elevator.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
@@ -1780,6 +1780,7 @@ static const struct blk_mq_ops amiflop_mq_ops = {
static int fd_alloc_disk(int drive, int system)
{
struct gendisk *disk;
+ int err;
disk = blk_mq_alloc_disk(&unit[drive].tag_set, NULL);
if (IS_ERR(disk))
@@ -1798,8 +1799,10 @@ static int fd_alloc_disk(int drive, int system)
set_capacity(disk, 880 * 2);
unit[drive].gendisk[system] = disk;
- add_disk(disk);
- return 0;
+ err = add_disk(disk);
+ if (err)
+ blk_cleanup_disk(disk);
+ return err;
}
static int fd_alloc_drive(int drive)
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index 06b360f7123a..52484bcdedb9 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -37,8 +37,7 @@ static ssize_t aoedisk_show_state(struct device *dev,
struct gendisk *disk = dev_to_disk(dev);
struct aoedev *d = disk->private_data;
- return snprintf(page, PAGE_SIZE,
- "%s%s\n",
+ return sysfs_emit(page, "%s%s\n",
(d->flags & DEVFL_UP) ? "up" : "down",
(d->flags & DEVFL_KICKME) ? ",kickme" :
(d->nopen && !(d->flags & DEVFL_UP)) ? ",closewait" : "");
@@ -52,8 +51,8 @@ static ssize_t aoedisk_show_mac(struct device *dev,
struct aoetgt *t = d->targets[0];
if (t == NULL)
- return snprintf(page, PAGE_SIZE, "none\n");
- return snprintf(page, PAGE_SIZE, "%pm\n", t->addr);
+ return sysfs_emit(page, "none\n");
+ return sysfs_emit(page, "%pm\n", t->addr);
}
static ssize_t aoedisk_show_netif(struct device *dev,
struct device_attribute *attr, char *page)
@@ -85,7 +84,7 @@ static ssize_t aoedisk_show_netif(struct device *dev,
ne = nd;
nd = nds;
if (*nd == NULL)
- return snprintf(page, PAGE_SIZE, "none\n");
+ return sysfs_emit(page, "none\n");
for (p = page; nd < ne; nd++)
p += scnprintf(p, PAGE_SIZE - (p-page), "%s%s",
p == page ? "" : ",", (*nd)->name);
@@ -99,7 +98,7 @@ static ssize_t aoedisk_show_fwver(struct device *dev,
struct gendisk *disk = dev_to_disk(dev);
struct aoedev *d = disk->private_data;
- return snprintf(page, PAGE_SIZE, "0x%04x\n", (unsigned int) d->fw_ver);
+ return sysfs_emit(page, "0x%04x\n", (unsigned int) d->fw_ver);
}
static ssize_t aoedisk_show_payload(struct device *dev,
struct device_attribute *attr, char *page)
@@ -107,7 +106,7 @@ static ssize_t aoedisk_show_payload(struct device *dev,
struct gendisk *disk = dev_to_disk(dev);
struct aoedev *d = disk->private_data;
- return snprintf(page, PAGE_SIZE, "%lu\n", d->maxbcnt);
+ return sysfs_emit(page, "%lu\n", d->maxbcnt);
}
static int aoedisk_debugfs_show(struct seq_file *s, void *ignored)
@@ -417,7 +416,9 @@ aoeblk_gdalloc(void *vp)
spin_unlock_irqrestore(&d->lock, flags);
- device_add_disk(NULL, gd, aoe_attr_groups);
+ err = device_add_disk(NULL, gd, aoe_attr_groups);
+ if (err)
+ goto out_disk_cleanup;
aoedisk_add_debugfs(d);
spin_lock_irqsave(&d->lock, flags);
@@ -426,6 +427,8 @@ aoeblk_gdalloc(void *vp)
spin_unlock_irqrestore(&d->lock, flags);
return;
+out_disk_cleanup:
+ blk_cleanup_disk(gd);
err_tagset:
blk_mq_free_tag_set(set);
err_mempool:
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index a093644ac39f..d14bdc3589b2 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -68,6 +68,7 @@
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/blk-mq.h>
+#include <linux/major.h>
#include <linux/mutex.h>
#include <linux/completion.h>
#include <linux/wait.h>
@@ -298,6 +299,7 @@ static struct atari_floppy_struct {
disk change detection) */
int flags; /* flags */
struct gendisk *disk[NUM_DISK_MINORS];
+ bool registered[NUM_DISK_MINORS];
int ref;
int type;
struct blk_mq_tag_set tag_set;
@@ -456,10 +458,20 @@ static DEFINE_TIMER(fd_timer, check_change);
static void fd_end_request_cur(blk_status_t err)
{
+ DPRINT(("fd_end_request_cur(), bytes %d of %d\n",
+ blk_rq_cur_bytes(fd_request),
+ blk_rq_bytes(fd_request)));
+
if (!blk_update_request(fd_request, err,
blk_rq_cur_bytes(fd_request))) {
+ DPRINT(("calling __blk_mq_end_request()\n"));
__blk_mq_end_request(fd_request, err);
fd_request = NULL;
+ } else {
+ /* requeue rest of request */
+ DPRINT(("calling blk_mq_requeue_request()\n"));
+ blk_mq_requeue_request(fd_request, true);
+ fd_request = NULL;
}
}
@@ -653,9 +665,6 @@ static inline void copy_buffer(void *from, void *to)
*p2++ = *p1++;
}
-
-
-
/* General Interrupt Handling */
static void (*FloppyIRQHandler)( int status ) = NULL;
@@ -700,12 +709,21 @@ static void fd_error( void )
if (fd_request->error_count >= MAX_ERRORS) {
printk(KERN_ERR "fd%d: too many errors.\n", SelectedDrive );
fd_end_request_cur(BLK_STS_IOERR);
+ finish_fdc();
+ return;
}
else if (fd_request->error_count == RECALIBRATE_ERRORS) {
printk(KERN_WARNING "fd%d: recalibrating\n", SelectedDrive );
if (SelectedDrive != -1)
SUD.track = -1;
}
+ /* need to re-run request to recalibrate */
+ atari_disable_irq( IRQ_MFP_FDC );
+
+ setup_req_params( SelectedDrive );
+ do_fd_action( SelectedDrive );
+
+ atari_enable_irq( IRQ_MFP_FDC );
}
@@ -732,8 +750,10 @@ static int do_format(int drive, int type, struct atari_format_descr *desc)
if (type) {
type--;
if (type >= NUM_DISK_MINORS ||
- minor2disktype[type].drive_types > DriveType)
+ minor2disktype[type].drive_types > DriveType) {
+ finish_fdc();
return -EINVAL;
+ }
}
q = unit[drive].disk[type]->queue;
@@ -751,6 +771,7 @@ static int do_format(int drive, int type, struct atari_format_descr *desc)
}
if (!UDT || desc->track >= UDT->blocks/UDT->spt/2 || desc->head >= 2) {
+ finish_fdc();
ret = -EINVAL;
goto out;
}
@@ -791,6 +812,7 @@ static int do_format(int drive, int type, struct atari_format_descr *desc)
wait_for_completion(&format_wait);
+ finish_fdc();
ret = FormatError ? -EIO : 0;
out:
blk_mq_unquiesce_queue(q);
@@ -825,6 +847,7 @@ static void do_fd_action( int drive )
else {
/* all sectors finished */
fd_end_request_cur(BLK_STS_OK);
+ finish_fdc();
return;
}
}
@@ -1229,6 +1252,7 @@ static void fd_rwsec_done1(int status)
else {
/* all sectors finished */
fd_end_request_cur(BLK_STS_OK);
+ finish_fdc();
}
return;
@@ -1350,7 +1374,7 @@ static void fd_times_out(struct timer_list *unused)
static void finish_fdc( void )
{
- if (!NeedSeek) {
+ if (!NeedSeek || !stdma_is_locked_by(floppy_irq)) {
finish_fdc_done( 0 );
}
else {
@@ -1385,7 +1409,8 @@ static void finish_fdc_done( int dummy )
start_motor_off_timer();
local_irq_save(flags);
- stdma_release();
+ if (stdma_is_locked_by(floppy_irq))
+ stdma_release();
local_irq_restore(flags);
DPRINT(("finish_fdc() finished\n"));
@@ -1435,8 +1460,7 @@ static int floppy_revalidate(struct gendisk *disk)
unsigned int drive = p - unit;
if (test_bit(drive, &changed_floppies) ||
- test_bit(drive, &fake_change) ||
- p->disktype == 0) {
+ test_bit(drive, &fake_change) || !p->disktype) {
if (UD.flags & FTD_MSG)
printk(KERN_ERR "floppy: clear format %p!\n", UDT);
BufferDrive = -1;
@@ -1475,15 +1499,6 @@ static void setup_req_params( int drive )
ReqTrack, ReqSector, (unsigned long)ReqData ));
}
-static void ataflop_commit_rqs(struct blk_mq_hw_ctx *hctx)
-{
- spin_lock_irq(&ataflop_lock);
- atari_disable_irq(IRQ_MFP_FDC);
- finish_fdc();
- atari_enable_irq(IRQ_MFP_FDC);
- spin_unlock_irq(&ataflop_lock);
-}
-
static blk_status_t ataflop_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
@@ -1491,6 +1506,10 @@ static blk_status_t ataflop_queue_rq(struct blk_mq_hw_ctx *hctx,
int drive = floppy - unit;
int type = floppy->type;
+ DPRINT(("Queue request: drive %d type %d sectors %d of %d last %d\n",
+ drive, type, blk_rq_cur_sectors(bd->rq),
+ blk_rq_sectors(bd->rq), bd->last));
+
spin_lock_irq(&ataflop_lock);
if (fd_request) {
spin_unlock_irq(&ataflop_lock);
@@ -1511,6 +1530,7 @@ static blk_status_t ataflop_queue_rq(struct blk_mq_hw_ctx *hctx,
/* drive not connected */
printk(KERN_ERR "Unknown Device: fd%d\n", drive );
fd_end_request_cur(BLK_STS_IOERR);
+ stdma_release();
goto out;
}
@@ -1527,11 +1547,13 @@ static blk_status_t ataflop_queue_rq(struct blk_mq_hw_ctx *hctx,
if (--type >= NUM_DISK_MINORS) {
printk(KERN_WARNING "fd%d: invalid disk format", drive );
fd_end_request_cur(BLK_STS_IOERR);
+ stdma_release();
goto out;
}
if (minor2disktype[type].drive_types > DriveType) {
printk(KERN_WARNING "fd%d: unsupported disk format", drive );
fd_end_request_cur(BLK_STS_IOERR);
+ stdma_release();
goto out;
}
type = minor2disktype[type].index;
@@ -1550,8 +1572,6 @@ static blk_status_t ataflop_queue_rq(struct blk_mq_hw_ctx *hctx,
setup_req_params( drive );
do_fd_action( drive );
- if (bd->last)
- finish_fdc();
atari_enable_irq( IRQ_MFP_FDC );
out:
@@ -1634,6 +1654,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode,
/* what if type > 0 here? Overwrite specified entry ? */
if (type) {
/* refuse to re-set a predefined type for now */
+ finish_fdc();
return -EINVAL;
}
@@ -1701,8 +1722,10 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode,
/* sanity check */
if (setprm.track != dtp->blocks/dtp->spt/2 ||
- setprm.head != 2)
+ setprm.head != 2) {
+ finish_fdc();
return -EINVAL;
+ }
UDT = dtp;
set_capacity(disk, UDT->blocks);
@@ -1962,7 +1985,6 @@ static const struct block_device_operations floppy_fops = {
static const struct blk_mq_ops ataflop_mq_ops = {
.queue_rq = ataflop_queue_rq,
- .commit_rqs = ataflop_commit_rqs,
};
static int ataflop_alloc_disk(unsigned int drive, unsigned int type)
@@ -2000,12 +2022,28 @@ static void ataflop_probe(dev_t dev)
return;
mutex_lock(&ataflop_probe_lock);
if (!unit[drive].disk[type]) {
- if (ataflop_alloc_disk(drive, type) == 0)
+ if (ataflop_alloc_disk(drive, type) == 0) {
add_disk(unit[drive].disk[type]);
+ unit[drive].registered[type] = true;
+ }
}
mutex_unlock(&ataflop_probe_lock);
}
+static void atari_cleanup_floppy_disk(struct atari_floppy_struct *fs)
+{
+ int type;
+
+ for (type = 0; type < NUM_DISK_MINORS; type++) {
+ if (!fs->disk[type])
+ continue;
+ if (fs->registered[type])
+ del_gendisk(fs->disk[type]);
+ blk_cleanup_disk(fs->disk[type]);
+ }
+ blk_mq_free_tag_set(&fs->tag_set);
+}
+
static int __init atari_floppy_init (void)
{
int i;
@@ -2064,7 +2102,10 @@ static int __init atari_floppy_init (void)
for (i = 0; i < FD_MAX_UNITS; i++) {
unit[i].track = -1;
unit[i].flags = 0;
- add_disk(unit[i].disk[0]);
+ ret = add_disk(unit[i].disk[0]);
+ if (ret)
+ goto err_out_dma;
+ unit[i].registered[0] = true;
}
printk(KERN_INFO "Atari floppy driver: max. %cD, %strack buffering\n",
@@ -2074,12 +2115,11 @@ static int __init atari_floppy_init (void)
return 0;
+err_out_dma:
+ atari_stram_free(DMABuffer);
err:
- while (--i >= 0) {
- blk_cleanup_queue(unit[i].disk[0]->queue);
- put_disk(unit[i].disk[0]);
- blk_mq_free_tag_set(&unit[i].tag_set);
- }
+ while (--i >= 0)
+ atari_cleanup_floppy_disk(&unit[i]);
unregister_blkdev(FLOPPY_MAJOR, "fd");
out_unlock:
@@ -2128,18 +2168,10 @@ __setup("floppy=", atari_floppy_setup);
static void __exit atari_floppy_exit(void)
{
- int i, type;
+ int i;
- for (i = 0; i < FD_MAX_UNITS; i++) {
- for (type = 0; type < NUM_DISK_MINORS; type++) {
- if (!unit[i].disk[type])
- continue;
- del_gendisk(unit[i].disk[type]);
- blk_cleanup_queue(unit[i].disk[type]->queue);
- put_disk(unit[i].disk[type]);
- }
- blk_mq_free_tag_set(&unit[i].tag_set);
- }
+ for (i = 0; i < FD_MAX_UNITS; i++)
+ atari_cleanup_floppy_disk(&unit[i]);
unregister_blkdev(FLOPPY_MAJOR, "fd");
del_timer_sync(&fd_timer);
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index 530b31240203..aa0472718dce 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -282,7 +282,7 @@ out:
return err;
}
-static blk_qc_t brd_submit_bio(struct bio *bio)
+static void brd_submit_bio(struct bio *bio)
{
struct brd_device *brd = bio->bi_bdev->bd_disk->private_data;
sector_t sector = bio->bi_iter.bi_sector;
@@ -299,16 +299,14 @@ static blk_qc_t brd_submit_bio(struct bio *bio)
err = brd_do_bvec(brd, bvec.bv_page, len, bvec.bv_offset,
bio_op(bio), sector);
- if (err)
- goto io_error;
+ if (err) {
+ bio_io_error(bio);
+ return;
+ }
sector += len >> SECTOR_SHIFT;
}
bio_endio(bio);
- return BLK_QC_T_NONE;
-io_error:
- bio_io_error(bio);
- return BLK_QC_T_NONE;
}
static int brd_rw_page(struct block_device *bdev, sector_t sector,
diff --git a/drivers/block/cryptoloop.c b/drivers/block/cryptoloop.c
deleted file mode 100644
index f0a91faa43a8..000000000000
--- a/drivers/block/cryptoloop.c
+++ /dev/null
@@ -1,206 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- Linux loop encryption enabling module
-
- Copyright (C) 2002 Herbert Valerio Riedel <hvr@gnu.org>
- Copyright (C) 2003 Fruhwirth Clemens <clemens@endorphin.org>
-
- */
-
-#include <linux/module.h>
-
-#include <crypto/skcipher.h>
-#include <linux/init.h>
-#include <linux/string.h>
-#include <linux/blkdev.h>
-#include <linux/scatterlist.h>
-#include <linux/uaccess.h>
-#include "loop.h"
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("loop blockdevice transferfunction adaptor / CryptoAPI");
-MODULE_AUTHOR("Herbert Valerio Riedel <hvr@gnu.org>");
-
-#define LOOP_IV_SECTOR_BITS 9
-#define LOOP_IV_SECTOR_SIZE (1 << LOOP_IV_SECTOR_BITS)
-
-static int
-cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info)
-{
- int err = -EINVAL;
- int cipher_len;
- int mode_len;
- char cms[LO_NAME_SIZE]; /* cipher-mode string */
- char *mode;
- char *cmsp = cms; /* c-m string pointer */
- struct crypto_sync_skcipher *tfm;
-
- /* encryption breaks for non sector aligned offsets */
-
- if (info->lo_offset % LOOP_IV_SECTOR_SIZE)
- goto out;
-
- strncpy(cms, info->lo_crypt_name, LO_NAME_SIZE);
- cms[LO_NAME_SIZE - 1] = 0;
-
- cipher_len = strcspn(cmsp, "-");
-
- mode = cmsp + cipher_len;
- mode_len = 0;
- if (*mode) {
- mode++;
- mode_len = strcspn(mode, "-");
- }
-
- if (!mode_len) {
- mode = "cbc";
- mode_len = 3;
- }
-
- if (cipher_len + mode_len + 3 > LO_NAME_SIZE)
- return -EINVAL;
-
- memmove(cms, mode, mode_len);
- cmsp = cms + mode_len;
- *cmsp++ = '(';
- memcpy(cmsp, info->lo_crypt_name, cipher_len);
- cmsp += cipher_len;
- *cmsp++ = ')';
- *cmsp = 0;
-
- tfm = crypto_alloc_sync_skcipher(cms, 0, 0);
- if (IS_ERR(tfm))
- return PTR_ERR(tfm);
-
- err = crypto_sync_skcipher_setkey(tfm, info->lo_encrypt_key,
- info->lo_encrypt_key_size);
-
- if (err != 0)
- goto out_free_tfm;
-
- lo->key_data = tfm;
- return 0;
-
- out_free_tfm:
- crypto_free_sync_skcipher(tfm);
-
- out:
- return err;
-}
-
-
-typedef int (*encdec_cbc_t)(struct skcipher_request *req);
-
-static int
-cryptoloop_transfer(struct loop_device *lo, int cmd,
- struct page *raw_page, unsigned raw_off,
- struct page *loop_page, unsigned loop_off,
- int size, sector_t IV)
-{
- struct crypto_sync_skcipher *tfm = lo->key_data;
- SYNC_SKCIPHER_REQUEST_ON_STACK(req, tfm);
- struct scatterlist sg_out;
- struct scatterlist sg_in;
-
- encdec_cbc_t encdecfunc;
- struct page *in_page, *out_page;
- unsigned in_offs, out_offs;
- int err;
-
- skcipher_request_set_sync_tfm(req, tfm);
- skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP,
- NULL, NULL);
-
- sg_init_table(&sg_out, 1);
- sg_init_table(&sg_in, 1);
-
- if (cmd == READ) {
- in_page = raw_page;
- in_offs = raw_off;
- out_page = loop_page;
- out_offs = loop_off;
- encdecfunc = crypto_skcipher_decrypt;
- } else {
- in_page = loop_page;
- in_offs = loop_off;
- out_page = raw_page;
- out_offs = raw_off;
- encdecfunc = crypto_skcipher_encrypt;
- }
-
- while (size > 0) {
- const int sz = min(size, LOOP_IV_SECTOR_SIZE);
- u32 iv[4] = { 0, };
- iv[0] = cpu_to_le32(IV & 0xffffffff);
-
- sg_set_page(&sg_in, in_page, sz, in_offs);
- sg_set_page(&sg_out, out_page, sz, out_offs);
-
- skcipher_request_set_crypt(req, &sg_in, &sg_out, sz, iv);
- err = encdecfunc(req);
- if (err)
- goto out;
-
- IV++;
- size -= sz;
- in_offs += sz;
- out_offs += sz;
- }
-
- err = 0;
-
-out:
- skcipher_request_zero(req);
- return err;
-}
-
-static int
-cryptoloop_ioctl(struct loop_device *lo, int cmd, unsigned long arg)
-{
- return -EINVAL;
-}
-
-static int
-cryptoloop_release(struct loop_device *lo)
-{
- struct crypto_sync_skcipher *tfm = lo->key_data;
- if (tfm != NULL) {
- crypto_free_sync_skcipher(tfm);
- lo->key_data = NULL;
- return 0;
- }
- printk(KERN_ERR "cryptoloop_release(): tfm == NULL?\n");
- return -EINVAL;
-}
-
-static struct loop_func_table cryptoloop_funcs = {
- .number = LO_CRYPT_CRYPTOAPI,
- .init = cryptoloop_init,
- .ioctl = cryptoloop_ioctl,
- .transfer = cryptoloop_transfer,
- .release = cryptoloop_release,
- .owner = THIS_MODULE
-};
-
-static int __init
-init_cryptoloop(void)
-{
- int rc = loop_register_transfer(&cryptoloop_funcs);
-
- if (rc)
- printk(KERN_ERR "cryptoloop: loop_register_transfer failed\n");
- else
- pr_warn("the cryptoloop driver has been deprecated and will be removed in in Linux 5.16\n");
- return rc;
-}
-
-static void __exit
-cleanup_cryptoloop(void)
-{
- if (loop_unregister_transfer(LO_CRYPT_CRYPTOAPI))
- printk(KERN_ERR
- "cryptoloop: loop_unregister_transfer failed\n");
-}
-
-module_init(init_cryptoloop);
-module_exit(cleanup_cryptoloop);
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 5d9181382ce1..f27d5b0f9a0b 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -1448,7 +1448,7 @@ extern void conn_free_crypto(struct drbd_connection *connection);
/* drbd_req */
extern void do_submit(struct work_struct *ws);
extern void __drbd_make_request(struct drbd_device *, struct bio *);
-extern blk_qc_t drbd_submit_bio(struct bio *bio);
+void drbd_submit_bio(struct bio *bio);
extern int drbd_read_remote(struct drbd_device *device, struct drbd_request *req);
extern int is_valid_ar_handle(struct drbd_request *, sector_t);
@@ -1826,8 +1826,7 @@ static inline sector_t drbd_md_last_sector(struct drbd_backing_dev *bdev)
/* Returns the number of 512 byte sectors of the device */
static inline sector_t drbd_get_capacity(struct block_device *bdev)
{
- /* return bdev ? get_capacity(bdev->bd_disk) : 0; */
- return bdev ? i_size_read(bdev->bd_inode) >> 9 : 0;
+ return bdev ? bdev_nr_sectors(bdev) : 0;
}
/**
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 55234a558e98..19db80a1e409 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -2794,7 +2794,9 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
goto out_idr_remove_vol;
}
- add_disk(disk);
+ err = add_disk(disk);
+ if (err)
+ goto out_cleanup_disk;
/* inherit the connection state */
device->state.conn = first_connection(resource)->cstate;
@@ -2808,6 +2810,8 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig
drbd_debugfs_device_add(device);
return NO_ERROR;
+out_cleanup_disk:
+ blk_cleanup_disk(disk);
out_idr_remove_vol:
idr_remove(&connection->peer_devices, vnr);
out_idr_remove_from_resource:
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 5ca233644d70..3235532ae077 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -1596,7 +1596,7 @@ void do_submit(struct work_struct *ws)
}
}
-blk_qc_t drbd_submit_bio(struct bio *bio)
+void drbd_submit_bio(struct bio *bio)
{
struct drbd_device *device = bio->bi_bdev->bd_disk->private_data;
@@ -1609,7 +1609,6 @@ blk_qc_t drbd_submit_bio(struct bio *bio)
inc_ap_bio(device);
__drbd_make_request(device, bio);
- return BLK_QC_T_NONE;
}
static bool net_timeout_reached(struct drbd_request *net_req,
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index fef79ea52e3e..3873e789478e 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -184,6 +184,7 @@ static int print_unex = 1;
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/init.h>
+#include <linux/major.h>
#include <linux/platform_device.h>
#include <linux/mod_devicetable.h>
#include <linux/mutex.h>
@@ -4478,6 +4479,7 @@ static const struct blk_mq_ops floppy_mq_ops = {
};
static struct platform_device floppy_device[N_DRIVE];
+static bool registered[N_DRIVE];
static bool floppy_available(int drive)
{
@@ -4693,8 +4695,12 @@ static int __init do_floppy_init(void)
if (err)
goto out_remove_drives;
- device_add_disk(&floppy_device[drive].dev, disks[drive][0],
- NULL);
+ registered[drive] = true;
+
+ err = device_add_disk(&floppy_device[drive].dev,
+ disks[drive][0], NULL);
+ if (err)
+ goto out_remove_drives;
}
return 0;
@@ -4703,7 +4709,8 @@ out_remove_drives:
while (drive--) {
if (floppy_available(drive)) {
del_gendisk(disks[drive][0]);
- platform_device_unregister(&floppy_device[drive]);
+ if (registered[drive])
+ platform_device_unregister(&floppy_device[drive]);
}
}
out_release_dma:
@@ -4946,30 +4953,14 @@ static void __exit floppy_module_exit(void)
if (disks[drive][i])
del_gendisk(disks[drive][i]);
}
- platform_device_unregister(&floppy_device[drive]);
+ if (registered[drive])
+ platform_device_unregister(&floppy_device[drive]);
}
for (i = 0; i < ARRAY_SIZE(floppy_type); i++) {
if (disks[drive][i])
- blk_cleanup_queue(disks[drive][i]->queue);
+ blk_cleanup_disk(disks[drive][i]);
}
blk_mq_free_tag_set(&tag_sets[drive]);
-
- /*
- * These disks have not called add_disk(). Don't put down
- * queue reference in put_disk().
- */
- if (!(allowed_drive_mask & (1 << drive)) ||
- fdc_state[FDC(drive)].version == FDC_NONE) {
- for (i = 0; i < ARRAY_SIZE(floppy_type); i++) {
- if (disks[drive][i])
- disks[drive][i]->queue = NULL;
- }
- }
-
- for (i = 0; i < ARRAY_SIZE(floppy_type); i++) {
- if (disks[drive][i])
- put_disk(disks[drive][i]);
- }
}
cancel_delayed_work_sync(&fd_timeout);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 7bf4686af774..3c09a33fa1c7 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -133,58 +133,6 @@ static void loop_global_unlock(struct loop_device *lo, bool global)
static int max_part;
static int part_shift;
-static int transfer_xor(struct loop_device *lo, int cmd,
- struct page *raw_page, unsigned raw_off,
- struct page *loop_page, unsigned loop_off,
- int size, sector_t real_block)
-{
- char *raw_buf = kmap_atomic(raw_page) + raw_off;
- char *loop_buf = kmap_atomic(loop_page) + loop_off;
- char *in, *out, *key;
- int i, keysize;
-
- if (cmd == READ) {
- in = raw_buf;
- out = loop_buf;
- } else {
- in = loop_buf;
- out = raw_buf;
- }
-
- key = lo->lo_encrypt_key;
- keysize = lo->lo_encrypt_key_size;
- for (i = 0; i < size; i++)
- *out++ = *in++ ^ key[(i & 511) % keysize];
-
- kunmap_atomic(loop_buf);
- kunmap_atomic(raw_buf);
- cond_resched();
- return 0;
-}
-
-static int xor_init(struct loop_device *lo, const struct loop_info64 *info)
-{
- if (unlikely(info->lo_encrypt_key_size <= 0))
- return -EINVAL;
- return 0;
-}
-
-static struct loop_func_table none_funcs = {
- .number = LO_CRYPT_NONE,
-};
-
-static struct loop_func_table xor_funcs = {
- .number = LO_CRYPT_XOR,
- .transfer = transfer_xor,
- .init = xor_init
-};
-
-/* xfer_funcs[0] is special - its release function is never called */
-static struct loop_func_table *xfer_funcs[MAX_LO_CRYPT] = {
- &none_funcs,
- &xor_funcs
-};
-
static loff_t get_size(loff_t offset, loff_t sizelimit, struct file *file)
{
loff_t loopsize;
@@ -228,8 +176,7 @@ static void __loop_update_dio(struct loop_device *lo, bool dio)
/*
* We support direct I/O only if lo_offset is aligned with the
* logical I/O size of backing device, and the logical block
- * size of loop is bigger than the backing device's and the loop
- * needn't transform transfer.
+ * size of loop is bigger than the backing device's.
*
* TODO: the above condition may be loosed in the future, and
* direct I/O may be switched runtime at that time because most
@@ -238,8 +185,7 @@ static void __loop_update_dio(struct loop_device *lo, bool dio)
if (dio) {
if (queue_logical_block_size(lo->lo_queue) >= sb_bsize &&
!(lo->lo_offset & dio_align) &&
- mapping->a_ops->direct_IO &&
- !lo->transfer)
+ mapping->a_ops->direct_IO)
use_dio = true;
else
use_dio = false;
@@ -273,19 +219,6 @@ static void __loop_update_dio(struct loop_device *lo, bool dio)
}
/**
- * loop_validate_block_size() - validates the passed in block size
- * @bsize: size to validate
- */
-static int
-loop_validate_block_size(unsigned short bsize)
-{
- if (bsize < 512 || bsize > PAGE_SIZE || !is_power_of_2(bsize))
- return -EINVAL;
-
- return 0;
-}
-
-/**
* loop_set_size() - sets device size and notifies userspace
* @lo: struct loop_device to set the size for
* @size: new size of the loop device
@@ -299,24 +232,6 @@ static void loop_set_size(struct loop_device *lo, loff_t size)
kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE);
}
-static inline int
-lo_do_transfer(struct loop_device *lo, int cmd,
- struct page *rpage, unsigned roffs,
- struct page *lpage, unsigned loffs,
- int size, sector_t rblock)
-{
- int ret;
-
- ret = lo->transfer(lo, cmd, rpage, roffs, lpage, loffs, size, rblock);
- if (likely(!ret))
- return 0;
-
- printk_ratelimited(KERN_ERR
- "loop: Transfer error at byte offset %llu, length %i.\n",
- (unsigned long long)rblock << 9, size);
- return ret;
-}
-
static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos)
{
struct iov_iter i;
@@ -356,41 +271,6 @@ static int lo_write_simple(struct loop_device *lo, struct request *rq,
return ret;
}
-/*
- * This is the slow, transforming version that needs to double buffer the
- * data as it cannot do the transformations in place without having direct
- * access to the destination pages of the backing file.
- */
-static int lo_write_transfer(struct loop_device *lo, struct request *rq,
- loff_t pos)
-{
- struct bio_vec bvec, b;
- struct req_iterator iter;
- struct page *page;
- int ret = 0;
-
- page = alloc_page(GFP_NOIO);
- if (unlikely(!page))
- return -ENOMEM;
-
- rq_for_each_segment(bvec, rq, iter) {
- ret = lo_do_transfer(lo, WRITE, page, 0, bvec.bv_page,
- bvec.bv_offset, bvec.bv_len, pos >> 9);
- if (unlikely(ret))
- break;
-
- b.bv_page = page;
- b.bv_offset = 0;
- b.bv_len = bvec.bv_len;
- ret = lo_write_bvec(lo->lo_backing_file, &b, &pos);
- if (ret < 0)
- break;
- }
-
- __free_page(page);
- return ret;
-}
-
static int lo_read_simple(struct loop_device *lo, struct request *rq,
loff_t pos)
{
@@ -420,64 +300,12 @@ static int lo_read_simple(struct loop_device *lo, struct request *rq,
return 0;
}
-static int lo_read_transfer(struct loop_device *lo, struct request *rq,
- loff_t pos)
-{
- struct bio_vec bvec, b;
- struct req_iterator iter;
- struct iov_iter i;
- struct page *page;
- ssize_t len;
- int ret = 0;
-
- page = alloc_page(GFP_NOIO);
- if (unlikely(!page))
- return -ENOMEM;
-
- rq_for_each_segment(bvec, rq, iter) {
- loff_t offset = pos;
-
- b.bv_page = page;
- b.bv_offset = 0;
- b.bv_len = bvec.bv_len;
-
- iov_iter_bvec(&i, READ, &b, 1, b.bv_len);
- len = vfs_iter_read(lo->lo_backing_file, &i, &pos, 0);
- if (len < 0) {
- ret = len;
- goto out_free_page;
- }
-
- ret = lo_do_transfer(lo, READ, page, 0, bvec.bv_page,
- bvec.bv_offset, len, offset >> 9);
- if (ret)
- goto out_free_page;
-
- flush_dcache_page(bvec.bv_page);
-
- if (len != bvec.bv_len) {
- struct bio *bio;
-
- __rq_for_each_bio(bio, rq)
- zero_fill_bio(bio);
- break;
- }
- }
-
- ret = 0;
-out_free_page:
- __free_page(page);
- return ret;
-}
-
static int lo_fallocate(struct loop_device *lo, struct request *rq, loff_t pos,
int mode)
{
/*
* We use fallocate to manipulate the space mappings used by the image
- * a.k.a. discard/zerorange. However we do not support this if
- * encryption is enabled, because it may give an attacker useful
- * information.
+ * a.k.a. discard/zerorange.
*/
struct file *file = lo->lo_backing_file;
struct request_queue *q = lo->lo_queue;
@@ -554,7 +382,7 @@ static void lo_rw_aio_do_completion(struct loop_cmd *cmd)
blk_mq_complete_request(rq);
}
-static void lo_rw_aio_complete(struct kiocb *iocb, long ret, long ret2)
+static void lo_rw_aio_complete(struct kiocb *iocb, long ret)
{
struct loop_cmd *cmd = container_of(iocb, struct loop_cmd, iocb);
@@ -627,7 +455,7 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
lo_rw_aio_do_completion(cmd);
if (ret != -EIOCBQUEUED)
- cmd->iocb.ki_complete(&cmd->iocb, ret, 0);
+ lo_rw_aio_complete(&cmd->iocb, ret);
return 0;
}
@@ -660,16 +488,12 @@ static int do_req_filebacked(struct loop_device *lo, struct request *rq)
case REQ_OP_DISCARD:
return lo_fallocate(lo, rq, pos, FALLOC_FL_PUNCH_HOLE);
case REQ_OP_WRITE:
- if (lo->transfer)
- return lo_write_transfer(lo, rq, pos);
- else if (cmd->use_aio)
+ if (cmd->use_aio)
return lo_rw_aio(lo, cmd, pos, WRITE);
else
return lo_write_simple(lo, rq, pos);
case REQ_OP_READ:
- if (lo->transfer)
- return lo_read_transfer(lo, rq, pos);
- else if (cmd->use_aio)
+ if (cmd->use_aio)
return lo_rw_aio(lo, cmd, pos, READ);
else
return lo_read_simple(lo, rq, pos);
@@ -934,7 +758,7 @@ static void loop_config_discard(struct loop_device *lo)
* not blkdev_issue_discard(). This maintains consistent behavior with
* file-backed loop devices: discarded regions read back as zero.
*/
- if (S_ISBLK(inode->i_mode) && !lo->lo_encrypt_key_size) {
+ if (S_ISBLK(inode->i_mode)) {
struct request_queue *backingq = bdev_get_queue(I_BDEV(inode));
max_discard_sectors = backingq->limits.max_write_zeroes_sectors;
@@ -943,11 +767,9 @@ static void loop_config_discard(struct loop_device *lo)
/*
* We use punch hole to reclaim the free space used by the
- * image a.k.a. discard. However we do not support discard if
- * encryption is enabled, because it may give an attacker
- * useful information.
+ * image a.k.a. discard.
*/
- } else if (!file->f_op->fallocate || lo->lo_encrypt_key_size) {
+ } else if (!file->f_op->fallocate) {
max_discard_sectors = 0;
granularity = 0;
@@ -1084,43 +906,6 @@ static void loop_update_rotational(struct loop_device *lo)
blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
}
-static int
-loop_release_xfer(struct loop_device *lo)
-{
- int err = 0;
- struct loop_func_table *xfer = lo->lo_encryption;
-
- if (xfer) {
- if (xfer->release)
- err = xfer->release(lo);
- lo->transfer = NULL;
- lo->lo_encryption = NULL;
- module_put(xfer->owner);
- }
- return err;
-}
-
-static int
-loop_init_xfer(struct loop_device *lo, struct loop_func_table *xfer,
- const struct loop_info64 *i)
-{
- int err = 0;
-
- if (xfer) {
- struct module *owner = xfer->owner;
-
- if (!try_module_get(owner))
- return -EINVAL;
- if (xfer->init)
- err = xfer->init(lo, i);
- if (err)
- module_put(owner);
- else
- lo->lo_encryption = xfer;
- }
- return err;
-}
-
/**
* loop_set_status_from_info - configure device from loop_info
* @lo: struct loop_device to configure
@@ -1133,55 +918,27 @@ static int
loop_set_status_from_info(struct loop_device *lo,
const struct loop_info64 *info)
{
- int err;
- struct loop_func_table *xfer;
- kuid_t uid = current_uid();
-
if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE)
return -EINVAL;
- err = loop_release_xfer(lo);
- if (err)
- return err;
-
- if (info->lo_encrypt_type) {
- unsigned int type = info->lo_encrypt_type;
-
- if (type >= MAX_LO_CRYPT)
- return -EINVAL;
- xfer = xfer_funcs[type];
- if (xfer == NULL)
- return -EINVAL;
- } else
- xfer = NULL;
-
- err = loop_init_xfer(lo, xfer, info);
- if (err)
- return err;
+ switch (info->lo_encrypt_type) {
+ case LO_CRYPT_NONE:
+ break;
+ case LO_CRYPT_XOR:
+ pr_warn("support for the xor transformation has been removed.\n");
+ return -EINVAL;
+ case LO_CRYPT_CRYPTOAPI:
+ pr_warn("support for cryptoloop has been removed. Use dm-crypt instead.\n");
+ return -EINVAL;
+ default:
+ return -EINVAL;
+ }
lo->lo_offset = info->lo_offset;
lo->lo_sizelimit = info->lo_sizelimit;
memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE);
- memcpy(lo->lo_crypt_name, info->lo_crypt_name, LO_NAME_SIZE);
lo->lo_file_name[LO_NAME_SIZE-1] = 0;
- lo->lo_crypt_name[LO_NAME_SIZE-1] = 0;
-
- if (!xfer)
- xfer = &none_funcs;
- lo->transfer = xfer->transfer;
- lo->ioctl = xfer->ioctl;
-
lo->lo_flags = info->lo_flags;
-
- lo->lo_encrypt_key_size = info->lo_encrypt_key_size;
- lo->lo_init[0] = info->lo_init[0];
- lo->lo_init[1] = info->lo_init[1];
- if (info->lo_encrypt_key_size) {
- memcpy(lo->lo_encrypt_key, info->lo_encrypt_key,
- info->lo_encrypt_key_size);
- lo->lo_key_owner = uid;
- }
-
return 0;
}
@@ -1236,7 +993,7 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
}
if (config->block_size) {
- error = loop_validate_block_size(config->block_size);
+ error = blk_validate_block_size(config->block_size);
if (error)
goto out_unlock;
}
@@ -1329,7 +1086,6 @@ static int __loop_clr_fd(struct loop_device *lo, bool release)
{
struct file *filp = NULL;
gfp_t gfp = lo->old_gfp_mask;
- struct block_device *bdev = lo->lo_device;
int err = 0;
bool partscan = false;
int lo_number;
@@ -1381,36 +1137,23 @@ static int __loop_clr_fd(struct loop_device *lo, bool release)
lo->lo_backing_file = NULL;
spin_unlock_irq(&lo->lo_lock);
- loop_release_xfer(lo);
- lo->transfer = NULL;
- lo->ioctl = NULL;
lo->lo_device = NULL;
- lo->lo_encryption = NULL;
lo->lo_offset = 0;
lo->lo_sizelimit = 0;
- lo->lo_encrypt_key_size = 0;
- memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE);
- memset(lo->lo_crypt_name, 0, LO_NAME_SIZE);
memset(lo->lo_file_name, 0, LO_NAME_SIZE);
blk_queue_logical_block_size(lo->lo_queue, 512);
blk_queue_physical_block_size(lo->lo_queue, 512);
blk_queue_io_min(lo->lo_queue, 512);
- if (bdev) {
- invalidate_bdev(bdev);
- bdev->bd_inode->i_mapping->wb_err = 0;
- }
- set_capacity(lo->lo_disk, 0);
+ invalidate_disk(lo->lo_disk);
loop_sysfs_exit(lo);
- if (bdev) {
- /* let user-space know about this change */
- kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
- }
+ /* let user-space know about this change */
+ kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE);
mapping_set_gfp_mask(filp->f_mapping, gfp);
/* This is safe: open() is still holding a reference. */
module_put(THIS_MODULE);
blk_mq_unfreeze_queue(lo->lo_queue);
- partscan = lo->lo_flags & LO_FLAGS_PARTSCAN && bdev;
+ partscan = lo->lo_flags & LO_FLAGS_PARTSCAN;
lo_number = lo->lo_number;
disk_force_media_change(lo->lo_disk, DISK_EVENT_MEDIA_CHANGE);
out_unlock:
@@ -1498,7 +1241,6 @@ static int
loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
{
int err;
- kuid_t uid = current_uid();
int prev_lo_flags;
bool partscan = false;
bool size_changed = false;
@@ -1506,12 +1248,6 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
err = mutex_lock_killable(&lo->lo_mutex);
if (err)
return err;
- if (lo->lo_encrypt_key_size &&
- !uid_eq(lo->lo_key_owner, uid) &&
- !capable(CAP_SYS_ADMIN)) {
- err = -EPERM;
- goto out_unlock;
- }
if (lo->lo_state != Lo_bound) {
err = -ENXIO;
goto out_unlock;
@@ -1597,14 +1333,6 @@ loop_get_status(struct loop_device *lo, struct loop_info64 *info)
info->lo_sizelimit = lo->lo_sizelimit;
info->lo_flags = lo->lo_flags;
memcpy(info->lo_file_name, lo->lo_file_name, LO_NAME_SIZE);
- memcpy(info->lo_crypt_name, lo->lo_crypt_name, LO_NAME_SIZE);
- info->lo_encrypt_type =
- lo->lo_encryption ? lo->lo_encryption->number : 0;
- if (lo->lo_encrypt_key_size && capable(CAP_SYS_ADMIN)) {
- info->lo_encrypt_key_size = lo->lo_encrypt_key_size;
- memcpy(info->lo_encrypt_key, lo->lo_encrypt_key,
- lo->lo_encrypt_key_size);
- }
/* Drop lo_mutex while we call into the filesystem. */
path = lo->lo_backing_file->f_path;
@@ -1630,16 +1358,8 @@ loop_info64_from_old(const struct loop_info *info, struct loop_info64 *info64)
info64->lo_rdevice = info->lo_rdevice;
info64->lo_offset = info->lo_offset;
info64->lo_sizelimit = 0;
- info64->lo_encrypt_type = info->lo_encrypt_type;
- info64->lo_encrypt_key_size = info->lo_encrypt_key_size;
info64->lo_flags = info->lo_flags;
- info64->lo_init[0] = info->lo_init[0];
- info64->lo_init[1] = info->lo_init[1];
- if (info->lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
- memcpy(info64->lo_crypt_name, info->lo_name, LO_NAME_SIZE);
- else
- memcpy(info64->lo_file_name, info->lo_name, LO_NAME_SIZE);
- memcpy(info64->lo_encrypt_key, info->lo_encrypt_key, LO_KEY_SIZE);
+ memcpy(info64->lo_file_name, info->lo_name, LO_NAME_SIZE);
}
static int
@@ -1651,16 +1371,8 @@ loop_info64_to_old(const struct loop_info64 *info64, struct loop_info *info)
info->lo_inode = info64->lo_inode;
info->lo_rdevice = info64->lo_rdevice;
info->lo_offset = info64->lo_offset;
- info->lo_encrypt_type = info64->lo_encrypt_type;
- info->lo_encrypt_key_size = info64->lo_encrypt_key_size;
info->lo_flags = info64->lo_flags;
- info->lo_init[0] = info64->lo_init[0];
- info->lo_init[1] = info64->lo_init[1];
- if (info->lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
- memcpy(info->lo_name, info64->lo_crypt_name, LO_NAME_SIZE);
- else
- memcpy(info->lo_name, info64->lo_file_name, LO_NAME_SIZE);
- memcpy(info->lo_encrypt_key, info64->lo_encrypt_key, LO_KEY_SIZE);
+ memcpy(info->lo_name, info64->lo_file_name, LO_NAME_SIZE);
/* error in case values were truncated */
if (info->lo_device != info64->lo_device ||
@@ -1759,7 +1471,7 @@ static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
if (lo->lo_state != Lo_bound)
return -ENXIO;
- err = loop_validate_block_size(arg);
+ err = blk_validate_block_size(arg);
if (err)
return err;
@@ -1809,7 +1521,7 @@ static int lo_simple_ioctl(struct loop_device *lo, unsigned int cmd,
err = loop_set_block_size(lo, arg);
break;
default:
- err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL;
+ err = -EINVAL;
}
mutex_unlock(&lo->lo_mutex);
return err;
@@ -1885,7 +1597,6 @@ struct compat_loop_info {
compat_ulong_t lo_inode; /* ioctl r/o */
compat_dev_t lo_rdevice; /* ioctl r/o */
compat_int_t lo_offset;
- compat_int_t lo_encrypt_type;
compat_int_t lo_encrypt_key_size; /* ioctl w/o */
compat_int_t lo_flags; /* ioctl r/o */
char lo_name[LO_NAME_SIZE];
@@ -1914,16 +1625,8 @@ loop_info64_from_compat(const struct compat_loop_info __user *arg,
info64->lo_rdevice = info.lo_rdevice;
info64->lo_offset = info.lo_offset;
info64->lo_sizelimit = 0;
- info64->lo_encrypt_type = info.lo_encrypt_type;
- info64->lo_encrypt_key_size = info.lo_encrypt_key_size;
info64->lo_flags = info.lo_flags;
- info64->lo_init[0] = info.lo_init[0];
- info64->lo_init[1] = info.lo_init[1];
- if (info.lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
- memcpy(info64->lo_crypt_name, info.lo_name, LO_NAME_SIZE);
- else
- memcpy(info64->lo_file_name, info.lo_name, LO_NAME_SIZE);
- memcpy(info64->lo_encrypt_key, info.lo_encrypt_key, LO_KEY_SIZE);
+ memcpy(info64->lo_file_name, info.lo_name, LO_NAME_SIZE);
return 0;
}
@@ -1943,24 +1646,14 @@ loop_info64_to_compat(const struct loop_info64 *info64,
info.lo_inode = info64->lo_inode;
info.lo_rdevice = info64->lo_rdevice;
info.lo_offset = info64->lo_offset;
- info.lo_encrypt_type = info64->lo_encrypt_type;
- info.lo_encrypt_key_size = info64->lo_encrypt_key_size;
info.lo_flags = info64->lo_flags;
- info.lo_init[0] = info64->lo_init[0];
- info.lo_init[1] = info64->lo_init[1];
- if (info.lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
- memcpy(info.lo_name, info64->lo_crypt_name, LO_NAME_SIZE);
- else
- memcpy(info.lo_name, info64->lo_file_name, LO_NAME_SIZE);
- memcpy(info.lo_encrypt_key, info64->lo_encrypt_key, LO_KEY_SIZE);
+ memcpy(info.lo_name, info64->lo_file_name, LO_NAME_SIZE);
/* error in case values were truncated */
if (info.lo_device != info64->lo_device ||
info.lo_rdevice != info64->lo_rdevice ||
info.lo_inode != info64->lo_inode ||
- info.lo_offset != info64->lo_offset ||
- info.lo_init[0] != info64->lo_init[0] ||
- info.lo_init[1] != info64->lo_init[1])
+ info.lo_offset != info64->lo_offset)
return -EOVERFLOW;
if (copy_to_user(arg, &info, sizeof(info)))
@@ -2101,43 +1794,6 @@ MODULE_PARM_DESC(max_part, "Maximum number of partitions per loop device");
MODULE_LICENSE("GPL");
MODULE_ALIAS_BLOCKDEV_MAJOR(LOOP_MAJOR);
-int loop_register_transfer(struct loop_func_table *funcs)
-{
- unsigned int n = funcs->number;
-
- if (n >= MAX_LO_CRYPT || xfer_funcs[n])
- return -EINVAL;
- xfer_funcs[n] = funcs;
- return 0;
-}
-
-int loop_unregister_transfer(int number)
-{
- unsigned int n = number;
- struct loop_func_table *xfer;
-
- if (n == 0 || n >= MAX_LO_CRYPT || (xfer = xfer_funcs[n]) == NULL)
- return -EINVAL;
- /*
- * This function is called from only cleanup_cryptoloop().
- * Given that each loop device that has a transfer enabled holds a
- * reference to the module implementing it we should never get here
- * with a transfer that is set (unless forced module unloading is
- * requested). Thus, check module's refcount and warn if this is
- * not a clean unloading.
- */
-#ifdef CONFIG_MODULE_UNLOAD
- if (xfer->owner && module_refcount(xfer->owner) != -1)
- pr_err("Danger! Unregistering an in use transfer function.\n");
-#endif
-
- xfer_funcs[n] = NULL;
- return 0;
-}
-
-EXPORT_SYMBOL(loop_register_transfer);
-EXPORT_SYMBOL(loop_unregister_transfer);
-
static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
@@ -2394,13 +2050,19 @@ static int loop_add(int i)
disk->event_flags = DISK_EVENT_FLAG_UEVENT;
sprintf(disk->disk_name, "loop%d", i);
/* Make this loop device reachable from pathname. */
- add_disk(disk);
+ err = add_disk(disk);
+ if (err)
+ goto out_cleanup_disk;
+
/* Show this loop device. */
mutex_lock(&loop_ctl_mutex);
lo->idr_visible = true;
mutex_unlock(&loop_ctl_mutex);
+
return i;
+out_cleanup_disk:
+ blk_cleanup_disk(disk);
out_cleanup_tags:
blk_mq_free_tag_set(&lo->tag_set);
out_free_idr:
diff --git a/drivers/block/loop.h b/drivers/block/loop.h
index 04c88dd6eabd..082d4b6bfc6a 100644
--- a/drivers/block/loop.h
+++ b/drivers/block/loop.h
@@ -32,23 +32,10 @@ struct loop_device {
loff_t lo_offset;
loff_t lo_sizelimit;
int lo_flags;
- int (*transfer)(struct loop_device *, int cmd,
- struct page *raw_page, unsigned raw_off,
- struct page *loop_page, unsigned loop_off,
- int size, sector_t real_block);
char lo_file_name[LO_NAME_SIZE];
- char lo_crypt_name[LO_NAME_SIZE];
- char lo_encrypt_key[LO_KEY_SIZE];
- int lo_encrypt_key_size;
- struct loop_func_table *lo_encryption;
- __u32 lo_init[2];
- kuid_t lo_key_owner; /* Who set the key */
- int (*ioctl)(struct loop_device *, int cmd,
- unsigned long arg);
struct file * lo_backing_file;
struct block_device *lo_device;
- void *key_data;
gfp_t old_gfp_mask;
@@ -82,21 +69,4 @@ struct loop_cmd {
struct cgroup_subsys_state *memcg_css;
};
-/* Support for loadable transfer modules */
-struct loop_func_table {
- int number; /* filter type */
- int (*transfer)(struct loop_device *lo, int cmd,
- struct page *raw_page, unsigned raw_off,
- struct page *loop_page, unsigned loop_off,
- int size, sector_t real_block);
- int (*init)(struct loop_device *, const struct loop_info64 *);
- /* release is called from loop_unregister_transfer or clr_fd */
- int (*release)(struct loop_device *);
- int (*ioctl)(struct loop_device *, int cmd, unsigned long arg);
- struct module *owner;
-};
-
-int loop_register_transfer(struct loop_func_table *funcs);
-int loop_unregister_transfer(int number);
-
#endif
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 901855717cb5..c91b9010c1a6 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -3633,7 +3633,9 @@ skip_create_disk:
set_capacity(dd->disk, capacity);
/* Enable the block device and add it to /dev */
- device_add_disk(&dd->pdev->dev, dd->disk, mtip_disk_attr_groups);
+ rv = device_add_disk(&dd->pdev->dev, dd->disk, mtip_disk_attr_groups);
+ if (rv)
+ goto read_capacity_error;
if (dd->mtip_svc_handler) {
set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag);
@@ -4061,7 +4063,6 @@ block_initialize_err:
msi_initialize_err:
if (dd->isr_workq) {
- flush_workqueue(dd->isr_workq);
destroy_workqueue(dd->isr_workq);
drop_cpu(dd->work[0].cpu_binding);
drop_cpu(dd->work[1].cpu_binding);
@@ -4119,7 +4120,6 @@ static void mtip_pci_remove(struct pci_dev *pdev)
mtip_block_remove(dd);
if (dd->isr_workq) {
- flush_workqueue(dd->isr_workq);
destroy_workqueue(dd->isr_workq);
drop_cpu(dd->work[0].cpu_binding);
drop_cpu(dd->work[1].cpu_binding);
diff --git a/drivers/block/n64cart.c b/drivers/block/n64cart.c
index 26798da661bd..78282f01f581 100644
--- a/drivers/block/n64cart.c
+++ b/drivers/block/n64cart.c
@@ -84,7 +84,7 @@ static bool n64cart_do_bvec(struct device *dev, struct bio_vec *bv, u32 pos)
return true;
}
-static blk_qc_t n64cart_submit_bio(struct bio *bio)
+static void n64cart_submit_bio(struct bio *bio)
{
struct bio_vec bvec;
struct bvec_iter iter;
@@ -92,16 +92,14 @@ static blk_qc_t n64cart_submit_bio(struct bio *bio)
u32 pos = bio->bi_iter.bi_sector << SECTOR_SHIFT;
bio_for_each_segment(bvec, bio, iter) {
- if (!n64cart_do_bvec(dev, &bvec, pos))
- goto io_error;
+ if (!n64cart_do_bvec(dev, &bvec, pos)) {
+ bio_io_error(bio);
+ return;
+ }
pos += bvec.bv_len;
}
bio_endio(bio);
- return BLK_QC_T_NONE;
-io_error:
- bio_io_error(bio);
- return BLK_QC_T_NONE;
}
static const struct block_device_operations n64cart_fops = {
@@ -117,6 +115,7 @@ static const struct block_device_operations n64cart_fops = {
static int __init n64cart_probe(struct platform_device *pdev)
{
struct gendisk *disk;
+ int err = -ENOMEM;
if (!start || !size) {
pr_err("start or size not specified\n");
@@ -134,7 +133,7 @@ static int __init n64cart_probe(struct platform_device *pdev)
disk = blk_alloc_disk(NUMA_NO_NODE);
if (!disk)
- return -ENOMEM;
+ goto out;
disk->first_minor = 0;
disk->flags = GENHD_FL_NO_PART_SCAN;
@@ -149,11 +148,18 @@ static int __init n64cart_probe(struct platform_device *pdev)
blk_queue_physical_block_size(disk->queue, 4096);
blk_queue_logical_block_size(disk->queue, 4096);
- add_disk(disk);
+ err = add_disk(disk);
+ if (err)
+ goto out_cleanup_disk;
pr_info("n64cart: %u kb disk\n", size / 1024);
return 0;
+
+out_cleanup_disk:
+ blk_cleanup_disk(disk);
+out:
+ return err;
}
static struct platform_driver n64cart_driver = {
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 1183f7872b71..b47b2a87ae8f 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -122,15 +122,21 @@ struct nbd_device {
struct work_struct remove_work;
struct list_head list;
- struct task_struct *task_recv;
struct task_struct *task_setup;
unsigned long flags;
+ pid_t pid; /* pid of nbd-client, if attached */
char *backend;
};
#define NBD_CMD_REQUEUED 1
+/*
+ * This flag will be set if nbd_queue_rq() succeed, and will be checked and
+ * cleared in completion. Both setting and clearing of the flag are protected
+ * by cmd->lock.
+ */
+#define NBD_CMD_INFLIGHT 2
struct nbd_cmd {
struct nbd_device *nbd;
@@ -217,7 +223,7 @@ static ssize_t pid_show(struct device *dev,
struct gendisk *disk = dev_to_disk(dev);
struct nbd_device *nbd = (struct nbd_device *)disk->private_data;
- return sprintf(buf, "%d\n", task_pid_nr(nbd->task_recv));
+ return sprintf(buf, "%d\n", nbd->pid);
}
static const struct device_attribute pid_attr = {
@@ -310,26 +316,19 @@ static void nbd_mark_nsock_dead(struct nbd_device *nbd, struct nbd_sock *nsock,
nsock->sent = 0;
}
-static void nbd_size_clear(struct nbd_device *nbd)
-{
- if (nbd->config->bytesize) {
- set_capacity(nbd->disk, 0);
- kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
- }
-}
-
static int nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
loff_t blksize)
{
if (!blksize)
blksize = 1u << NBD_DEF_BLKSIZE_BITS;
- if (blksize < 512 || blksize > PAGE_SIZE || !is_power_of_2(blksize))
+
+ if (blk_validate_block_size(blksize))
return -EINVAL;
nbd->config->bytesize = bytesize;
nbd->config->blksize_bits = __ffs(blksize);
- if (!nbd->task_recv)
+ if (!nbd->pid)
return 0;
if (nbd->config->flags & NBD_FLAG_SEND_TRIM) {
@@ -405,6 +404,11 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
if (!mutex_trylock(&cmd->lock))
return BLK_EH_RESET_TIMER;
+ if (!__test_and_clear_bit(NBD_CMD_INFLIGHT, &cmd->flags)) {
+ mutex_unlock(&cmd->lock);
+ return BLK_EH_DONE;
+ }
+
if (!refcount_inc_not_zero(&nbd->config_refs)) {
cmd->status = BLK_STS_TIMEOUT;
mutex_unlock(&cmd->lock);
@@ -484,7 +488,8 @@ done:
}
/*
- * Send or receive packet.
+ * Send or receive packet. Return a positive value on success and
+ * negtive value on failue, and never return 0.
*/
static int sock_xmit(struct nbd_device *nbd, int index, int send,
struct iov_iter *iter, int msg_flags, int *sent)
@@ -610,7 +615,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
result = sock_xmit(nbd, index, 1, &from,
(type == NBD_CMD_WRITE) ? MSG_MORE : 0, &sent);
trace_nbd_header_sent(req, handle);
- if (result <= 0) {
+ if (result < 0) {
if (was_interrupted(result)) {
/* If we havne't sent anything we can just return BUSY,
* however if we have sent something we need to make
@@ -654,7 +659,7 @@ send_pages:
skip = 0;
}
result = sock_xmit(nbd, index, 1, &from, flags, &sent);
- if (result <= 0) {
+ if (result < 0) {
if (was_interrupted(result)) {
/* We've already sent the header, we
* have no choice but to set pending and
@@ -688,38 +693,45 @@ out:
return 0;
}
-/* NULL returned = something went wrong, inform userspace */
-static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
+static int nbd_read_reply(struct nbd_device *nbd, int index,
+ struct nbd_reply *reply)
{
- struct nbd_config *config = nbd->config;
- int result;
- struct nbd_reply reply;
- struct nbd_cmd *cmd;
- struct request *req = NULL;
- u64 handle;
- u16 hwq;
- u32 tag;
- struct kvec iov = {.iov_base = &reply, .iov_len = sizeof(reply)};
+ struct kvec iov = {.iov_base = reply, .iov_len = sizeof(*reply)};
struct iov_iter to;
- int ret = 0;
+ int result;
- reply.magic = 0;
- iov_iter_kvec(&to, READ, &iov, 1, sizeof(reply));
+ reply->magic = 0;
+ iov_iter_kvec(&to, READ, &iov, 1, sizeof(*reply));
result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
- if (result <= 0) {
- if (!nbd_disconnected(config))
+ if (result < 0) {
+ if (!nbd_disconnected(nbd->config))
dev_err(disk_to_dev(nbd->disk),
"Receive control failed (result %d)\n", result);
- return ERR_PTR(result);
+ return result;
}
- if (ntohl(reply.magic) != NBD_REPLY_MAGIC) {
+ if (ntohl(reply->magic) != NBD_REPLY_MAGIC) {
dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n",
- (unsigned long)ntohl(reply.magic));
- return ERR_PTR(-EPROTO);
+ (unsigned long)ntohl(reply->magic));
+ return -EPROTO;
}
- memcpy(&handle, reply.handle, sizeof(handle));
+ return 0;
+}
+
+/* NULL returned = something went wrong, inform userspace */
+static struct nbd_cmd *nbd_handle_reply(struct nbd_device *nbd, int index,
+ struct nbd_reply *reply)
+{
+ int result;
+ struct nbd_cmd *cmd;
+ struct request *req = NULL;
+ u64 handle;
+ u16 hwq;
+ u32 tag;
+ int ret = 0;
+
+ memcpy(&handle, reply->handle, sizeof(handle));
tag = nbd_handle_to_tag(handle);
hwq = blk_mq_unique_tag_to_hwq(tag);
if (hwq < nbd->tag_set.nr_hw_queues)
@@ -734,6 +746,16 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
cmd = blk_mq_rq_to_pdu(req);
mutex_lock(&cmd->lock);
+ if (!__test_and_clear_bit(NBD_CMD_INFLIGHT, &cmd->flags)) {
+ dev_err(disk_to_dev(nbd->disk), "Suspicious reply %d (status %u flags %lu)",
+ tag, cmd->status, cmd->flags);
+ ret = -ENOENT;
+ goto out;
+ }
+ if (cmd->index != index) {
+ dev_err(disk_to_dev(nbd->disk), "Unexpected reply %d from different sock %d (expected %d)",
+ tag, index, cmd->index);
+ }
if (cmd->cmd_cookie != nbd_handle_to_cookie(handle)) {
dev_err(disk_to_dev(nbd->disk), "Double reply on req %p, cmd_cookie %u, handle cookie %u\n",
req, cmd->cmd_cookie, nbd_handle_to_cookie(handle));
@@ -752,9 +774,9 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
ret = -ENOENT;
goto out;
}
- if (ntohl(reply.error)) {
+ if (ntohl(reply->error)) {
dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
- ntohl(reply.error));
+ ntohl(reply->error));
cmd->status = BLK_STS_IOERR;
goto out;
}
@@ -763,11 +785,12 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
if (rq_data_dir(req) != WRITE) {
struct req_iterator iter;
struct bio_vec bvec;
+ struct iov_iter to;
rq_for_each_segment(bvec, req, iter) {
iov_iter_bvec(&to, READ, &bvec, 1, bvec.bv_len);
result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
- if (result <= 0) {
+ if (result < 0) {
dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
result);
/*
@@ -776,7 +799,7 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
* and let the timeout stuff handle resubmitting
* this request onto another connection.
*/
- if (nbd_disconnected(config)) {
+ if (nbd_disconnected(nbd->config)) {
cmd->status = BLK_STS_IOERR;
goto out;
}
@@ -800,24 +823,46 @@ static void recv_work(struct work_struct *work)
work);
struct nbd_device *nbd = args->nbd;
struct nbd_config *config = nbd->config;
+ struct request_queue *q = nbd->disk->queue;
+ struct nbd_sock *nsock;
struct nbd_cmd *cmd;
struct request *rq;
while (1) {
- cmd = nbd_read_stat(nbd, args->index);
- if (IS_ERR(cmd)) {
- struct nbd_sock *nsock = config->socks[args->index];
+ struct nbd_reply reply;
- mutex_lock(&nsock->tx_lock);
- nbd_mark_nsock_dead(nbd, nsock, 1);
- mutex_unlock(&nsock->tx_lock);
+ if (nbd_read_reply(nbd, args->index, &reply))
+ break;
+
+ /*
+ * Grab .q_usage_counter so request pool won't go away, then no
+ * request use-after-free is possible during nbd_handle_reply().
+ * If queue is frozen, there won't be any inflight requests, we
+ * needn't to handle the incoming garbage message.
+ */
+ if (!percpu_ref_tryget(&q->q_usage_counter)) {
+ dev_err(disk_to_dev(nbd->disk), "%s: no io inflight\n",
+ __func__);
+ break;
+ }
+
+ cmd = nbd_handle_reply(nbd, args->index, &reply);
+ if (IS_ERR(cmd)) {
+ percpu_ref_put(&q->q_usage_counter);
break;
}
rq = blk_mq_rq_from_pdu(cmd);
if (likely(!blk_should_fake_timeout(rq->q)))
blk_mq_complete_request(rq);
+ percpu_ref_put(&q->q_usage_counter);
}
+
+ nsock = config->socks[args->index];
+ mutex_lock(&nsock->tx_lock);
+ nbd_mark_nsock_dead(nbd, nsock, 1);
+ mutex_unlock(&nsock->tx_lock);
+
nbd_config_put(nbd);
atomic_dec(&config->recv_threads);
wake_up(&config->recv_wq);
@@ -833,6 +878,10 @@ static bool nbd_clear_req(struct request *req, void *data, bool reserved)
return true;
mutex_lock(&cmd->lock);
+ if (!__test_and_clear_bit(NBD_CMD_INFLIGHT, &cmd->flags)) {
+ mutex_unlock(&cmd->lock);
+ return true;
+ }
cmd->status = BLK_STS_IOERR;
mutex_unlock(&cmd->lock);
@@ -914,7 +963,6 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
if (!refcount_inc_not_zero(&nbd->config_refs)) {
dev_err_ratelimited(disk_to_dev(nbd->disk),
"Socks array is empty\n");
- blk_mq_start_request(req);
return -EINVAL;
}
config = nbd->config;
@@ -923,7 +971,6 @@ static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
dev_err_ratelimited(disk_to_dev(nbd->disk),
"Attempted send on invalid socket\n");
nbd_config_put(nbd);
- blk_mq_start_request(req);
return -EINVAL;
}
cmd->status = BLK_STS_OK;
@@ -947,7 +994,6 @@ again:
*/
sock_shutdown(nbd);
nbd_config_put(nbd);
- blk_mq_start_request(req);
return -EIO;
}
goto again;
@@ -969,7 +1015,13 @@ again:
* returns EAGAIN can be retried on a different socket.
*/
ret = nbd_send_cmd(nbd, cmd, index);
- if (ret == -EAGAIN) {
+ /*
+ * Access to this flag is protected by cmd->lock, thus it's safe to set
+ * the flag after nbd_send_cmd() succeed to send request to server.
+ */
+ if (!ret)
+ __set_bit(NBD_CMD_INFLIGHT, &cmd->flags);
+ else if (ret == -EAGAIN) {
dev_err_ratelimited(disk_to_dev(nbd->disk),
"Request send failed, requeueing\n");
nbd_mark_nsock_dead(nbd, nsock, 1);
@@ -1206,7 +1258,7 @@ static void send_disconnects(struct nbd_device *nbd)
iov_iter_kvec(&from, WRITE, &iov, 1, sizeof(request));
mutex_lock(&nsock->tx_lock);
ret = sock_xmit(nbd, i, 1, &from, 0, NULL);
- if (ret <= 0)
+ if (ret < 0)
dev_err(disk_to_dev(nbd->disk),
"Send disconnect failed %d\n", ret);
mutex_unlock(&nsock->tx_lock);
@@ -1237,11 +1289,13 @@ static void nbd_config_put(struct nbd_device *nbd)
&nbd->config_lock)) {
struct nbd_config *config = nbd->config;
nbd_dev_dbg_close(nbd);
- nbd_size_clear(nbd);
+ invalidate_disk(nbd->disk);
+ if (nbd->config->bytesize)
+ kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
if (test_and_clear_bit(NBD_RT_HAS_PID_FILE,
&config->runtime_flags))
device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
- nbd->task_recv = NULL;
+ nbd->pid = 0;
if (test_and_clear_bit(NBD_RT_HAS_BACKEND_FILE,
&config->runtime_flags)) {
device_remove_file(disk_to_dev(nbd->disk), &backend_attr);
@@ -1282,7 +1336,7 @@ static int nbd_start_device(struct nbd_device *nbd)
int num_connections = config->num_connections;
int error = 0, i;
- if (nbd->task_recv)
+ if (nbd->pid)
return -EBUSY;
if (!config->socks)
return -EINVAL;
@@ -1301,7 +1355,7 @@ static int nbd_start_device(struct nbd_device *nbd)
}
blk_mq_update_nr_hw_queues(&nbd->tag_set, config->num_connections);
- nbd->task_recv = current;
+ nbd->pid = task_pid_nr(current);
nbd_parse_flags(nbd);
@@ -1557,8 +1611,8 @@ static int nbd_dbg_tasks_show(struct seq_file *s, void *unused)
{
struct nbd_device *nbd = s->private;
- if (nbd->task_recv)
- seq_printf(s, "recv: %d\n", task_pid_nr(nbd->task_recv));
+ if (nbd->pid)
+ seq_printf(s, "recv: %d\n", nbd->pid);
return 0;
}
@@ -1762,7 +1816,9 @@ static struct nbd_device *nbd_dev_add(int index, unsigned int refs)
disk->fops = &nbd_fops;
disk->private_data = nbd;
sprintf(disk->disk_name, "nbd%d", index);
- add_disk(disk);
+ err = add_disk(disk);
+ if (err)
+ goto out_err_disk;
/*
* Now publish the device.
@@ -1771,6 +1827,8 @@ static struct nbd_device *nbd_dev_add(int index, unsigned int refs)
nbd_total_devices++;
return nbd;
+out_err_disk:
+ blk_cleanup_disk(disk);
out_free_idr:
mutex_lock(&nbd_index_mutex);
idr_remove(&nbd_index_idr, index);
@@ -2135,7 +2193,7 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
mutex_lock(&nbd->config_lock);
config = nbd->config;
if (!test_bit(NBD_RT_BOUND, &config->runtime_flags) ||
- !nbd->task_recv) {
+ !nbd->pid) {
dev_err(nbd_to_dev(nbd),
"not configured, cannot reconfigure\n");
ret = -EINVAL;
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index 187d779c8ca0..323af5c9c802 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -92,6 +92,10 @@ static int g_submit_queues = 1;
module_param_named(submit_queues, g_submit_queues, int, 0444);
MODULE_PARM_DESC(submit_queues, "Number of submission queues");
+static int g_poll_queues = 1;
+module_param_named(poll_queues, g_poll_queues, int, 0444);
+MODULE_PARM_DESC(poll_queues, "Number of IOPOLL submission queues");
+
static int g_home_node = NUMA_NO_NODE;
module_param_named(home_node, g_home_node, int, 0444);
MODULE_PARM_DESC(home_node, "Home node for the device");
@@ -324,29 +328,69 @@ nullb_device_##NAME##_store(struct config_item *item, const char *page, \
} \
CONFIGFS_ATTR(nullb_device_, NAME);
-static int nullb_apply_submit_queues(struct nullb_device *dev,
- unsigned int submit_queues)
+static int nullb_update_nr_hw_queues(struct nullb_device *dev,
+ unsigned int submit_queues,
+ unsigned int poll_queues)
+
{
- struct nullb *nullb = dev->nullb;
struct blk_mq_tag_set *set;
+ int ret, nr_hw_queues;
- if (!nullb)
+ if (!dev->nullb)
return 0;
/*
+ * Make sure at least one queue exists for each of submit and poll.
+ */
+ if (!submit_queues || !poll_queues)
+ return -EINVAL;
+
+ /*
* Make sure that null_init_hctx() does not access nullb->queues[] past
* the end of that array.
*/
- if (submit_queues > nr_cpu_ids)
+ if (submit_queues > nr_cpu_ids || poll_queues > g_poll_queues)
return -EINVAL;
- set = nullb->tag_set;
- blk_mq_update_nr_hw_queues(set, submit_queues);
- return set->nr_hw_queues == submit_queues ? 0 : -ENOMEM;
+
+ /*
+ * Keep previous and new queue numbers in nullb_device for reference in
+ * the call back function null_map_queues().
+ */
+ dev->prev_submit_queues = dev->submit_queues;
+ dev->prev_poll_queues = dev->poll_queues;
+ dev->submit_queues = submit_queues;
+ dev->poll_queues = poll_queues;
+
+ set = dev->nullb->tag_set;
+ nr_hw_queues = submit_queues + poll_queues;
+ blk_mq_update_nr_hw_queues(set, nr_hw_queues);
+ ret = set->nr_hw_queues == nr_hw_queues ? 0 : -ENOMEM;
+
+ if (ret) {
+ /* on error, revert the queue numbers */
+ dev->submit_queues = dev->prev_submit_queues;
+ dev->poll_queues = dev->prev_poll_queues;
+ }
+
+ return ret;
+}
+
+static int nullb_apply_submit_queues(struct nullb_device *dev,
+ unsigned int submit_queues)
+{
+ return nullb_update_nr_hw_queues(dev, submit_queues, dev->poll_queues);
+}
+
+static int nullb_apply_poll_queues(struct nullb_device *dev,
+ unsigned int poll_queues)
+{
+ return nullb_update_nr_hw_queues(dev, dev->submit_queues, poll_queues);
}
NULLB_DEVICE_ATTR(size, ulong, NULL);
NULLB_DEVICE_ATTR(completion_nsec, ulong, NULL);
NULLB_DEVICE_ATTR(submit_queues, uint, nullb_apply_submit_queues);
+NULLB_DEVICE_ATTR(poll_queues, uint, nullb_apply_poll_queues);
NULLB_DEVICE_ATTR(home_node, uint, NULL);
NULLB_DEVICE_ATTR(queue_mode, uint, NULL);
NULLB_DEVICE_ATTR(blocksize, uint, NULL);
@@ -466,6 +510,7 @@ static struct configfs_attribute *nullb_device_attrs[] = {
&nullb_device_attr_size,
&nullb_device_attr_completion_nsec,
&nullb_device_attr_submit_queues,
+ &nullb_device_attr_poll_queues,
&nullb_device_attr_home_node,
&nullb_device_attr_queue_mode,
&nullb_device_attr_blocksize,
@@ -593,6 +638,9 @@ static struct nullb_device *null_alloc_dev(void)
dev->size = g_gb * 1024;
dev->completion_nsec = g_completion_nsec;
dev->submit_queues = g_submit_queues;
+ dev->prev_submit_queues = g_submit_queues;
+ dev->poll_queues = g_poll_queues;
+ dev->prev_poll_queues = g_poll_queues;
dev->home_node = g_home_node;
dev->queue_mode = g_queue_mode;
dev->blocksize = g_bs;
@@ -1422,7 +1470,7 @@ static struct nullb_queue *nullb_to_queue(struct nullb *nullb)
return &nullb->queues[index];
}
-static blk_qc_t null_submit_bio(struct bio *bio)
+static void null_submit_bio(struct bio *bio)
{
sector_t sector = bio->bi_iter.bi_sector;
sector_t nr_sectors = bio_sectors(bio);
@@ -1434,7 +1482,6 @@ static blk_qc_t null_submit_bio(struct bio *bio)
cmd->bio = bio;
null_handle_cmd(cmd, sector, nr_sectors, bio_op(bio));
- return BLK_QC_T_NONE;
}
static bool should_timeout_request(struct request *rq)
@@ -1455,12 +1502,100 @@ static bool should_requeue_request(struct request *rq)
return false;
}
+static int null_map_queues(struct blk_mq_tag_set *set)
+{
+ struct nullb *nullb = set->driver_data;
+ int i, qoff;
+ unsigned int submit_queues = g_submit_queues;
+ unsigned int poll_queues = g_poll_queues;
+
+ if (nullb) {
+ struct nullb_device *dev = nullb->dev;
+
+ /*
+ * Refer nr_hw_queues of the tag set to check if the expected
+ * number of hardware queues are prepared. If block layer failed
+ * to prepare them, use previous numbers of submit queues and
+ * poll queues to map queues.
+ */
+ if (set->nr_hw_queues ==
+ dev->submit_queues + dev->poll_queues) {
+ submit_queues = dev->submit_queues;
+ poll_queues = dev->poll_queues;
+ } else if (set->nr_hw_queues ==
+ dev->prev_submit_queues + dev->prev_poll_queues) {
+ submit_queues = dev->prev_submit_queues;
+ poll_queues = dev->prev_poll_queues;
+ } else {
+ pr_warn("tag set has unexpected nr_hw_queues: %d\n",
+ set->nr_hw_queues);
+ return -EINVAL;
+ }
+ }
+
+ for (i = 0, qoff = 0; i < set->nr_maps; i++) {
+ struct blk_mq_queue_map *map = &set->map[i];
+
+ switch (i) {
+ case HCTX_TYPE_DEFAULT:
+ map->nr_queues = submit_queues;
+ break;
+ case HCTX_TYPE_READ:
+ map->nr_queues = 0;
+ continue;
+ case HCTX_TYPE_POLL:
+ map->nr_queues = poll_queues;
+ break;
+ }
+ map->queue_offset = qoff;
+ qoff += map->nr_queues;
+ blk_mq_map_queues(map);
+ }
+
+ return 0;
+}
+
+static int null_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
+{
+ struct nullb_queue *nq = hctx->driver_data;
+ LIST_HEAD(list);
+ int nr = 0;
+
+ spin_lock(&nq->poll_lock);
+ list_splice_init(&nq->poll_list, &list);
+ spin_unlock(&nq->poll_lock);
+
+ while (!list_empty(&list)) {
+ struct nullb_cmd *cmd;
+ struct request *req;
+
+ req = list_first_entry(&list, struct request, queuelist);
+ list_del_init(&req->queuelist);
+ cmd = blk_mq_rq_to_pdu(req);
+ cmd->error = null_process_cmd(cmd, req_op(req), blk_rq_pos(req),
+ blk_rq_sectors(req));
+ end_cmd(cmd);
+ nr++;
+ }
+
+ return nr;
+}
+
static enum blk_eh_timer_return null_timeout_rq(struct request *rq, bool res)
{
+ struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq);
pr_info("rq %p timed out\n", rq);
+ if (hctx->type == HCTX_TYPE_POLL) {
+ struct nullb_queue *nq = hctx->driver_data;
+
+ spin_lock(&nq->poll_lock);
+ list_del_init(&rq->queuelist);
+ spin_unlock(&nq->poll_lock);
+ }
+
/*
* If the device is marked as blocking (i.e. memory backed or zoned
* device), the submission path may be blocked waiting for resources
@@ -1481,10 +1616,11 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
struct nullb_queue *nq = hctx->driver_data;
sector_t nr_sectors = blk_rq_sectors(bd->rq);
sector_t sector = blk_rq_pos(bd->rq);
+ const bool is_poll = hctx->type == HCTX_TYPE_POLL;
might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING);
- if (nq->dev->irqmode == NULL_IRQ_TIMER) {
+ if (!is_poll && nq->dev->irqmode == NULL_IRQ_TIMER) {
hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
cmd->timer.function = null_cmd_timer_expired;
}
@@ -1508,6 +1644,13 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_STS_OK;
}
}
+
+ if (is_poll) {
+ spin_lock(&nq->poll_lock);
+ list_add_tail(&bd->rq->queuelist, &nq->poll_list);
+ spin_unlock(&nq->poll_lock);
+ return BLK_STS_OK;
+ }
if (cmd->fake_timeout)
return BLK_STS_OK;
@@ -1543,6 +1686,8 @@ static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
init_waitqueue_head(&nq->wait);
nq->queue_depth = nullb->queue_depth;
nq->dev = nullb->dev;
+ INIT_LIST_HEAD(&nq->poll_list);
+ spin_lock_init(&nq->poll_lock);
}
static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *driver_data,
@@ -1568,6 +1713,8 @@ static const struct blk_mq_ops null_mq_ops = {
.queue_rq = null_queue_rq,
.complete = null_complete_rq,
.timeout = null_timeout_rq,
+ .poll = null_poll,
+ .map_queues = null_map_queues,
.init_hctx = null_init_hctx,
.exit_hctx = null_exit_hctx,
};
@@ -1664,13 +1811,17 @@ static int setup_commands(struct nullb_queue *nq)
static int setup_queues(struct nullb *nullb)
{
- nullb->queues = kcalloc(nr_cpu_ids, sizeof(struct nullb_queue),
+ int nqueues = nr_cpu_ids;
+
+ if (g_poll_queues)
+ nqueues += g_poll_queues;
+
+ nullb->queues = kcalloc(nqueues, sizeof(struct nullb_queue),
GFP_KERNEL);
if (!nullb->queues)
return -ENOMEM;
nullb->queue_depth = nullb->dev->hw_queue_depth;
-
return 0;
}
@@ -1722,9 +1873,14 @@ static int null_gendisk_register(struct nullb *nullb)
static int null_init_tag_set(struct nullb *nullb, struct blk_mq_tag_set *set)
{
+ int poll_queues;
+
set->ops = &null_mq_ops;
set->nr_hw_queues = nullb ? nullb->dev->submit_queues :
g_submit_queues;
+ poll_queues = nullb ? nullb->dev->poll_queues : g_poll_queues;
+ if (poll_queues)
+ set->nr_hw_queues += poll_queues;
set->queue_depth = nullb ? nullb->dev->hw_queue_depth :
g_hw_queue_depth;
set->numa_node = nullb ? nullb->dev->home_node : g_home_node;
@@ -1734,7 +1890,11 @@ static int null_init_tag_set(struct nullb *nullb, struct blk_mq_tag_set *set)
set->flags |= BLK_MQ_F_NO_SCHED;
if (g_shared_tag_bitmap)
set->flags |= BLK_MQ_F_TAG_HCTX_SHARED;
- set->driver_data = NULL;
+ set->driver_data = nullb;
+ if (g_poll_queues)
+ set->nr_maps = 3;
+ else
+ set->nr_maps = 1;
if ((nullb && nullb->dev->blocking) || g_blocking)
set->flags |= BLK_MQ_F_BLOCKING;
@@ -1754,6 +1914,13 @@ static int null_validate_conf(struct nullb_device *dev)
dev->submit_queues = nr_cpu_ids;
else if (dev->submit_queues == 0)
dev->submit_queues = 1;
+ dev->prev_submit_queues = dev->submit_queues;
+
+ if (dev->poll_queues > g_poll_queues)
+ dev->poll_queues = g_poll_queues;
+ else if (dev->poll_queues == 0)
+ dev->poll_queues = 1;
+ dev->prev_poll_queues = dev->poll_queues;
dev->queue_mode = min_t(unsigned int, dev->queue_mode, NULL_Q_MQ);
dev->irqmode = min_t(unsigned int, dev->irqmode, NULL_IRQ_TIMER);
diff --git a/drivers/block/null_blk/null_blk.h b/drivers/block/null_blk/null_blk.h
index 64bef125d1df..78eb56b0ca55 100644
--- a/drivers/block/null_blk/null_blk.h
+++ b/drivers/block/null_blk/null_blk.h
@@ -32,6 +32,9 @@ struct nullb_queue {
struct nullb_device *dev;
unsigned int requeue_selection;
+ struct list_head poll_list;
+ spinlock_t poll_lock;
+
struct nullb_cmd *cmds;
};
@@ -83,6 +86,9 @@ struct nullb_device {
unsigned int zone_max_open; /* max number of open zones */
unsigned int zone_max_active; /* max number of active zones */
unsigned int submit_queues; /* number of submission queues */
+ unsigned int prev_submit_queues; /* number of submission queues before change */
+ unsigned int poll_queues; /* number of IOPOLL submission queues */
+ unsigned int prev_poll_queues; /* number of IOPOLL submission queues before change */
unsigned int home_node; /* home node for the device */
unsigned int queue_mode; /* block interface */
unsigned int blocksize; /* block size */
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index f9cdd11f02f5..f6b1d63e96e1 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -183,8 +183,6 @@ static int pcd_audio_ioctl(struct cdrom_device_info *cdi,
static int pcd_packet(struct cdrom_device_info *cdi,
struct packet_command *cgc);
-static int pcd_detect(void);
-static void pcd_probe_capabilities(void);
static void do_pcd_read_drq(void);
static blk_status_t pcd_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd);
@@ -302,53 +300,6 @@ static const struct blk_mq_ops pcd_mq_ops = {
.queue_rq = pcd_queue_rq,
};
-static void pcd_init_units(void)
-{
- struct pcd_unit *cd;
- int unit;
-
- pcd_drive_count = 0;
- for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
- struct gendisk *disk;
-
- if (blk_mq_alloc_sq_tag_set(&cd->tag_set, &pcd_mq_ops, 1,
- BLK_MQ_F_SHOULD_MERGE))
- continue;
-
- disk = blk_mq_alloc_disk(&cd->tag_set, cd);
- if (IS_ERR(disk)) {
- blk_mq_free_tag_set(&cd->tag_set);
- continue;
- }
-
- INIT_LIST_HEAD(&cd->rq_list);
- blk_queue_bounce_limit(disk->queue, BLK_BOUNCE_HIGH);
- cd->disk = disk;
- cd->pi = &cd->pia;
- cd->present = 0;
- cd->last_sense = 0;
- cd->changed = 1;
- cd->drive = (*drives[unit])[D_SLV];
- if ((*drives[unit])[D_PRT])
- pcd_drive_count++;
-
- cd->name = &cd->info.name[0];
- snprintf(cd->name, sizeof(cd->info.name), "%s%d", name, unit);
- cd->info.ops = &pcd_dops;
- cd->info.handle = cd;
- cd->info.speed = 0;
- cd->info.capacity = 1;
- cd->info.mask = 0;
- disk->major = major;
- disk->first_minor = unit;
- disk->minors = 1;
- strcpy(disk->disk_name, cd->name); /* umm... */
- disk->fops = &pcd_bdops;
- disk->flags = GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
- disk->events = DISK_EVENT_MEDIA_CHANGE;
- }
-}
-
static int pcd_open(struct cdrom_device_info *cdi, int purpose)
{
struct pcd_unit *cd = cdi->handle;
@@ -630,10 +581,11 @@ static int pcd_drive_status(struct cdrom_device_info *cdi, int slot_nr)
return CDS_DISC_OK;
}
-static int pcd_identify(struct pcd_unit *cd, char *id)
+static int pcd_identify(struct pcd_unit *cd)
{
- int k, s;
char id_cmd[12] = { 0x12, 0, 0, 0, 36, 0, 0, 0, 0, 0, 0, 0 };
+ char id[18];
+ int k, s;
pcd_bufblk = -1;
@@ -661,108 +613,47 @@ static int pcd_identify(struct pcd_unit *cd, char *id)
}
/*
- * returns 0, with id set if drive is detected
- * -1, if drive detection failed
+ * returns 0, with id set if drive is detected, otherwise an error code.
*/
-static int pcd_probe(struct pcd_unit *cd, int ms, char *id)
+static int pcd_probe(struct pcd_unit *cd, int ms)
{
if (ms == -1) {
for (cd->drive = 0; cd->drive <= 1; cd->drive++)
- if (!pcd_reset(cd) && !pcd_identify(cd, id))
+ if (!pcd_reset(cd) && !pcd_identify(cd))
return 0;
} else {
cd->drive = ms;
- if (!pcd_reset(cd) && !pcd_identify(cd, id))
+ if (!pcd_reset(cd) && !pcd_identify(cd))
return 0;
}
- return -1;
+ return -ENODEV;
}
-static void pcd_probe_capabilities(void)
+static int pcd_probe_capabilities(struct pcd_unit *cd)
{
- int unit, r;
- char buffer[32];
char cmd[12] = { 0x5a, 1 << 3, 0x2a, 0, 0, 0, 0, 18, 0, 0, 0, 0 };
- struct pcd_unit *cd;
-
- for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
- if (!cd->present)
- continue;
- r = pcd_atapi(cd, cmd, 18, buffer, "mode sense capabilities");
- if (r)
- continue;
- /* we should now have the cap page */
- if ((buffer[11] & 1) == 0)
- cd->info.mask |= CDC_CD_R;
- if ((buffer[11] & 2) == 0)
- cd->info.mask |= CDC_CD_RW;
- if ((buffer[12] & 1) == 0)
- cd->info.mask |= CDC_PLAY_AUDIO;
- if ((buffer[14] & 1) == 0)
- cd->info.mask |= CDC_LOCK;
- if ((buffer[14] & 8) == 0)
- cd->info.mask |= CDC_OPEN_TRAY;
- if ((buffer[14] >> 6) == 0)
- cd->info.mask |= CDC_CLOSE_TRAY;
- }
-}
-
-static int pcd_detect(void)
-{
- char id[18];
- int k, unit;
- struct pcd_unit *cd;
+ char buffer[32];
+ int ret;
- printk("%s: %s version %s, major %d, nice %d\n",
- name, name, PCD_VERSION, major, nice);
+ ret = pcd_atapi(cd, cmd, 18, buffer, "mode sense capabilities");
+ if (ret)
+ return ret;
+
+ /* we should now have the cap page */
+ if ((buffer[11] & 1) == 0)
+ cd->info.mask |= CDC_CD_R;
+ if ((buffer[11] & 2) == 0)
+ cd->info.mask |= CDC_CD_RW;
+ if ((buffer[12] & 1) == 0)
+ cd->info.mask |= CDC_PLAY_AUDIO;
+ if ((buffer[14] & 1) == 0)
+ cd->info.mask |= CDC_LOCK;
+ if ((buffer[14] & 8) == 0)
+ cd->info.mask |= CDC_OPEN_TRAY;
+ if ((buffer[14] >> 6) == 0)
+ cd->info.mask |= CDC_CLOSE_TRAY;
- par_drv = pi_register_driver(name);
- if (!par_drv) {
- pr_err("failed to register %s driver\n", name);
- return -1;
- }
-
- k = 0;
- if (pcd_drive_count == 0) { /* nothing spec'd - so autoprobe for 1 */
- cd = pcd;
- if (cd->disk && pi_init(cd->pi, 1, -1, -1, -1, -1, -1,
- pcd_buffer, PI_PCD, verbose, cd->name)) {
- if (!pcd_probe(cd, -1, id)) {
- cd->present = 1;
- k++;
- } else
- pi_release(cd->pi);
- }
- } else {
- for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
- int *conf = *drives[unit];
- if (!conf[D_PRT])
- continue;
- if (!cd->disk)
- continue;
- if (!pi_init(cd->pi, 0, conf[D_PRT], conf[D_MOD],
- conf[D_UNI], conf[D_PRO], conf[D_DLY],
- pcd_buffer, PI_PCD, verbose, cd->name))
- continue;
- if (!pcd_probe(cd, conf[D_SLV], id)) {
- cd->present = 1;
- k++;
- } else
- pi_release(cd->pi);
- }
- }
- if (k)
- return 0;
-
- printk("%s: No CD-ROM drive found\n", name);
- for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
- if (!cd->disk)
- continue;
- blk_cleanup_disk(cd->disk);
- blk_mq_free_tag_set(&cd->tag_set);
- }
- pi_unregister_driver(par_drv);
- return -1;
+ return 0;
}
/* I/O request processing */
@@ -999,43 +890,130 @@ static int pcd_get_mcn(struct cdrom_device_info *cdi, struct cdrom_mcn *mcn)
return 0;
}
+static int pcd_init_unit(struct pcd_unit *cd, bool autoprobe, int port,
+ int mode, int unit, int protocol, int delay, int ms)
+{
+ struct gendisk *disk;
+ int ret;
+
+ ret = blk_mq_alloc_sq_tag_set(&cd->tag_set, &pcd_mq_ops, 1,
+ BLK_MQ_F_SHOULD_MERGE);
+ if (ret)
+ return ret;
+
+ disk = blk_mq_alloc_disk(&cd->tag_set, cd);
+ if (IS_ERR(disk)) {
+ ret = PTR_ERR(disk);
+ goto out_free_tag_set;
+ }
+
+ INIT_LIST_HEAD(&cd->rq_list);
+ blk_queue_bounce_limit(disk->queue, BLK_BOUNCE_HIGH);
+ cd->disk = disk;
+ cd->pi = &cd->pia;
+ cd->present = 0;
+ cd->last_sense = 0;
+ cd->changed = 1;
+ cd->drive = (*drives[cd - pcd])[D_SLV];
+
+ cd->name = &cd->info.name[0];
+ snprintf(cd->name, sizeof(cd->info.name), "%s%d", name, unit);
+ cd->info.ops = &pcd_dops;
+ cd->info.handle = cd;
+ cd->info.speed = 0;
+ cd->info.capacity = 1;
+ cd->info.mask = 0;
+ disk->major = major;
+ disk->first_minor = unit;
+ disk->minors = 1;
+ strcpy(disk->disk_name, cd->name); /* umm... */
+ disk->fops = &pcd_bdops;
+ disk->flags = GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
+ disk->events = DISK_EVENT_MEDIA_CHANGE;
+
+ if (!pi_init(cd->pi, autoprobe, port, mode, unit, protocol, delay,
+ pcd_buffer, PI_PCD, verbose, cd->name)) {
+ ret = -ENODEV;
+ goto out_free_disk;
+ }
+ ret = pcd_probe(cd, ms);
+ if (ret)
+ goto out_pi_release;
+
+ cd->present = 1;
+ pcd_probe_capabilities(cd);
+ ret = register_cdrom(cd->disk, &cd->info);
+ if (ret)
+ goto out_pi_release;
+ ret = add_disk(cd->disk);
+ if (ret)
+ goto out_unreg_cdrom;
+ return 0;
+
+out_unreg_cdrom:
+ unregister_cdrom(&cd->info);
+out_pi_release:
+ pi_release(cd->pi);
+out_free_disk:
+ blk_cleanup_disk(cd->disk);
+out_free_tag_set:
+ blk_mq_free_tag_set(&cd->tag_set);
+ return ret;
+}
+
static int __init pcd_init(void)
{
- struct pcd_unit *cd;
- int unit;
+ int found = 0, unit;
if (disable)
return -EINVAL;
- pcd_init_units();
+ if (register_blkdev(major, name))
+ return -EBUSY;
- if (pcd_detect())
- return -ENODEV;
+ pr_info("%s: %s version %s, major %d, nice %d\n",
+ name, name, PCD_VERSION, major, nice);
- /* get the atapi capabilities page */
- pcd_probe_capabilities();
+ par_drv = pi_register_driver(name);
+ if (!par_drv) {
+ pr_err("failed to register %s driver\n", name);
+ goto out_unregister_blkdev;
+ }
- if (register_blkdev(major, name)) {
- for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
- if (!cd->disk)
- continue;
+ for (unit = 0; unit < PCD_UNITS; unit++) {
+ if ((*drives[unit])[D_PRT])
+ pcd_drive_count++;
+ }
+
+ if (pcd_drive_count == 0) { /* nothing spec'd - so autoprobe for 1 */
+ if (!pcd_init_unit(pcd, 1, -1, -1, -1, -1, -1, -1))
+ found++;
+ } else {
+ for (unit = 0; unit < PCD_UNITS; unit++) {
+ struct pcd_unit *cd = &pcd[unit];
+ int *conf = *drives[unit];
- blk_cleanup_queue(cd->disk->queue);
- blk_mq_free_tag_set(&cd->tag_set);
- put_disk(cd->disk);
+ if (!conf[D_PRT])
+ continue;
+ if (!pcd_init_unit(cd, 0, conf[D_PRT], conf[D_MOD],
+ conf[D_UNI], conf[D_PRO], conf[D_DLY],
+ conf[D_SLV]))
+ found++;
}
- return -EBUSY;
}
- for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
- if (cd->present) {
- register_cdrom(cd->disk, &cd->info);
- cd->disk->private_data = cd;
- add_disk(cd->disk);
- }
+ if (!found) {
+ pr_info("%s: No CD-ROM drive found\n", name);
+ goto out_unregister_pi_driver;
}
return 0;
+
+out_unregister_pi_driver:
+ pi_unregister_driver(par_drv);
+out_unregister_blkdev:
+ unregister_blkdev(major, name);
+ return -ENODEV;
}
static void __exit pcd_exit(void)
@@ -1044,20 +1022,18 @@ static void __exit pcd_exit(void)
int unit;
for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
- if (!cd->disk)
+ if (!cd->present)
continue;
- if (cd->present) {
- del_gendisk(cd->disk);
- pi_release(cd->pi);
- unregister_cdrom(&cd->info);
- }
- blk_cleanup_queue(cd->disk->queue);
+ unregister_cdrom(&cd->info);
+ del_gendisk(cd->disk);
+ pi_release(cd->pi);
+ blk_cleanup_disk(cd->disk);
+
blk_mq_free_tag_set(&cd->tag_set);
- put_disk(cd->disk);
}
- unregister_blkdev(major, name);
pi_unregister_driver(par_drv);
+ unregister_blkdev(major, name);
}
MODULE_LICENSE("GPL");
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index 675327df6aff..fba865058a17 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -775,14 +775,14 @@ static int pd_special_command(struct pd_unit *disk,
struct request *rq;
struct pd_req *req;
- rq = blk_get_request(disk->gd->queue, REQ_OP_DRV_IN, 0);
+ rq = blk_mq_alloc_request(disk->gd->queue, REQ_OP_DRV_IN, 0);
if (IS_ERR(rq))
return PTR_ERR(rq);
req = blk_mq_rq_to_pdu(rq);
req->func = func;
blk_execute_rq(disk->gd, rq, 0);
- blk_put_request(rq);
+ blk_mq_free_request(rq);
return 0;
}
@@ -875,9 +875,27 @@ static const struct blk_mq_ops pd_mq_ops = {
.queue_rq = pd_queue_rq,
};
-static void pd_probe_drive(struct pd_unit *disk)
+static int pd_probe_drive(struct pd_unit *disk, int autoprobe, int port,
+ int mode, int unit, int protocol, int delay)
{
+ int index = disk - pd;
+ int *parm = *drives[index];
struct gendisk *p;
+ int ret;
+
+ disk->pi = &disk->pia;
+ disk->access = 0;
+ disk->changed = 1;
+ disk->capacity = 0;
+ disk->drive = parm[D_SLV];
+ snprintf(disk->name, PD_NAMELEN, "%s%c", name, 'a' + index);
+ disk->alt_geom = parm[D_GEO];
+ disk->standby = parm[D_SBY];
+ INIT_LIST_HEAD(&disk->rq_list);
+
+ if (!pi_init(disk->pi, autoprobe, port, mode, unit, protocol, delay,
+ pd_scratch, PI_PD, verbose, disk->name))
+ return -ENXIO;
memset(&disk->tag_set, 0, sizeof(disk->tag_set));
disk->tag_set.ops = &pd_mq_ops;
@@ -887,14 +905,14 @@ static void pd_probe_drive(struct pd_unit *disk)
disk->tag_set.queue_depth = 2;
disk->tag_set.numa_node = NUMA_NO_NODE;
disk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING;
-
- if (blk_mq_alloc_tag_set(&disk->tag_set))
- return;
+ ret = blk_mq_alloc_tag_set(&disk->tag_set);
+ if (ret)
+ goto pi_release;
p = blk_mq_alloc_disk(&disk->tag_set, disk);
if (IS_ERR(p)) {
- blk_mq_free_tag_set(&disk->tag_set);
- return;
+ ret = PTR_ERR(p);
+ goto free_tag_set;
}
disk->gd = p;
@@ -905,102 +923,88 @@ static void pd_probe_drive(struct pd_unit *disk)
p->minors = 1 << PD_BITS;
p->events = DISK_EVENT_MEDIA_CHANGE;
p->private_data = disk;
-
blk_queue_max_hw_sectors(p->queue, cluster);
blk_queue_bounce_limit(p->queue, BLK_BOUNCE_HIGH);
if (disk->drive == -1) {
- for (disk->drive = 0; disk->drive <= 1; disk->drive++)
- if (pd_special_command(disk, pd_identify) == 0)
- return;
- } else if (pd_special_command(disk, pd_identify) == 0)
- return;
- disk->gd = NULL;
+ for (disk->drive = 0; disk->drive <= 1; disk->drive++) {
+ ret = pd_special_command(disk, pd_identify);
+ if (ret == 0)
+ break;
+ }
+ } else {
+ ret = pd_special_command(disk, pd_identify);
+ }
+ if (ret)
+ goto put_disk;
+ set_capacity(disk->gd, disk->capacity);
+ ret = add_disk(disk->gd);
+ if (ret)
+ goto cleanup_disk;
+ return 0;
+cleanup_disk:
+ blk_cleanup_disk(disk->gd);
+put_disk:
put_disk(p);
+ disk->gd = NULL;
+free_tag_set:
+ blk_mq_free_tag_set(&disk->tag_set);
+pi_release:
+ pi_release(disk->pi);
+ return ret;
}
-static int pd_detect(void)
+static int __init pd_init(void)
{
int found = 0, unit, pd_drive_count = 0;
struct pd_unit *disk;
- for (unit = 0; unit < PD_UNITS; unit++) {
- int *parm = *drives[unit];
- struct pd_unit *disk = pd + unit;
- disk->pi = &disk->pia;
- disk->access = 0;
- disk->changed = 1;
- disk->capacity = 0;
- disk->drive = parm[D_SLV];
- snprintf(disk->name, PD_NAMELEN, "%s%c", name, 'a'+unit);
- disk->alt_geom = parm[D_GEO];
- disk->standby = parm[D_SBY];
- if (parm[D_PRT])
- pd_drive_count++;
- INIT_LIST_HEAD(&disk->rq_list);
- }
+ if (disable)
+ return -ENODEV;
+
+ if (register_blkdev(major, name))
+ return -ENODEV;
+
+ printk("%s: %s version %s, major %d, cluster %d, nice %d\n",
+ name, name, PD_VERSION, major, cluster, nice);
par_drv = pi_register_driver(name);
if (!par_drv) {
pr_err("failed to register %s driver\n", name);
- return -1;
+ goto out_unregister_blkdev;
}
- if (pd_drive_count == 0) { /* nothing spec'd - so autoprobe for 1 */
- disk = pd;
- if (pi_init(disk->pi, 1, -1, -1, -1, -1, -1, pd_scratch,
- PI_PD, verbose, disk->name)) {
- pd_probe_drive(disk);
- if (!disk->gd)
- pi_release(disk->pi);
- }
+ for (unit = 0; unit < PD_UNITS; unit++) {
+ int *parm = *drives[unit];
+ if (parm[D_PRT])
+ pd_drive_count++;
+ }
+
+ if (pd_drive_count == 0) { /* nothing spec'd - so autoprobe for 1 */
+ if (!pd_probe_drive(pd, 1, -1, -1, -1, -1, -1))
+ found++;
} else {
for (unit = 0, disk = pd; unit < PD_UNITS; unit++, disk++) {
int *parm = *drives[unit];
if (!parm[D_PRT])
continue;
- if (pi_init(disk->pi, 0, parm[D_PRT], parm[D_MOD],
- parm[D_UNI], parm[D_PRO], parm[D_DLY],
- pd_scratch, PI_PD, verbose, disk->name)) {
- pd_probe_drive(disk);
- if (!disk->gd)
- pi_release(disk->pi);
- }
- }
- }
- for (unit = 0, disk = pd; unit < PD_UNITS; unit++, disk++) {
- if (disk->gd) {
- set_capacity(disk->gd, disk->capacity);
- add_disk(disk->gd);
- found = 1;
+ if (!pd_probe_drive(disk, 0, parm[D_PRT], parm[D_MOD],
+ parm[D_UNI], parm[D_PRO], parm[D_DLY]))
+ found++;
}
}
if (!found) {
printk("%s: no valid drive found\n", name);
- pi_unregister_driver(par_drv);
+ goto out_pi_unregister_driver;
}
- return found;
-}
-
-static int __init pd_init(void)
-{
- if (disable)
- goto out1;
-
- if (register_blkdev(major, name))
- goto out1;
-
- printk("%s: %s version %s, major %d, cluster %d, nice %d\n",
- name, name, PD_VERSION, major, cluster, nice);
- if (!pd_detect())
- goto out2;
return 0;
-out2:
+out_pi_unregister_driver:
+ pi_unregister_driver(par_drv);
+out_unregister_blkdev:
unregister_blkdev(major, name);
-out1:
return -ENODEV;
}
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index d5b9c88ba76f..bf8d0ef41a0a 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -214,7 +214,6 @@ static int pf_getgeo(struct block_device *bdev, struct hd_geometry *geo);
static void pf_release(struct gendisk *disk, fmode_t mode);
-static int pf_detect(void);
static void do_pf_read(void);
static void do_pf_read_start(void);
static void do_pf_write(void);
@@ -285,45 +284,6 @@ static const struct blk_mq_ops pf_mq_ops = {
.queue_rq = pf_queue_rq,
};
-static void __init pf_init_units(void)
-{
- struct pf_unit *pf;
- int unit;
-
- pf_drive_count = 0;
- for (unit = 0, pf = units; unit < PF_UNITS; unit++, pf++) {
- struct gendisk *disk;
-
- if (blk_mq_alloc_sq_tag_set(&pf->tag_set, &pf_mq_ops, 1,
- BLK_MQ_F_SHOULD_MERGE))
- continue;
-
- disk = blk_mq_alloc_disk(&pf->tag_set, pf);
- if (IS_ERR(disk)) {
- blk_mq_free_tag_set(&pf->tag_set);
- continue;
- }
-
- INIT_LIST_HEAD(&pf->rq_list);
- blk_queue_max_segments(disk->queue, cluster);
- blk_queue_bounce_limit(disk->queue, BLK_BOUNCE_HIGH);
- pf->disk = disk;
- pf->pi = &pf->pia;
- pf->media_status = PF_NM;
- pf->drive = (*drives[unit])[D_SLV];
- pf->lun = (*drives[unit])[D_LUN];
- snprintf(pf->name, PF_NAMELEN, "%s%d", name, unit);
- disk->major = major;
- disk->first_minor = unit;
- disk->minors = 1;
- strcpy(disk->disk_name, pf->name);
- disk->fops = &pf_fops;
- disk->events = DISK_EVENT_MEDIA_CHANGE;
- if (!(*drives[unit])[D_PRT])
- pf_drive_count++;
- }
-}
-
static int pf_open(struct block_device *bdev, fmode_t mode)
{
struct pf_unit *pf = bdev->bd_disk->private_data;
@@ -691,9 +651,9 @@ static int pf_identify(struct pf_unit *pf)
return 0;
}
-/* returns 0, with id set if drive is detected
- -1, if drive detection failed
-*/
+/*
+ * returns 0, with id set if drive is detected, otherwise an error code.
+ */
static int pf_probe(struct pf_unit *pf)
{
if (pf->drive == -1) {
@@ -715,60 +675,7 @@ static int pf_probe(struct pf_unit *pf)
if (!pf_identify(pf))
return 0;
}
- return -1;
-}
-
-static int pf_detect(void)
-{
- struct pf_unit *pf = units;
- int k, unit;
-
- printk("%s: %s version %s, major %d, cluster %d, nice %d\n",
- name, name, PF_VERSION, major, cluster, nice);
-
- par_drv = pi_register_driver(name);
- if (!par_drv) {
- pr_err("failed to register %s driver\n", name);
- return -1;
- }
- k = 0;
- if (pf_drive_count == 0) {
- if (pi_init(pf->pi, 1, -1, -1, -1, -1, -1, pf_scratch, PI_PF,
- verbose, pf->name)) {
- if (!pf_probe(pf) && pf->disk) {
- pf->present = 1;
- k++;
- } else
- pi_release(pf->pi);
- }
-
- } else
- for (unit = 0; unit < PF_UNITS; unit++, pf++) {
- int *conf = *drives[unit];
- if (!conf[D_PRT])
- continue;
- if (pi_init(pf->pi, 0, conf[D_PRT], conf[D_MOD],
- conf[D_UNI], conf[D_PRO], conf[D_DLY],
- pf_scratch, PI_PF, verbose, pf->name)) {
- if (pf->disk && !pf_probe(pf)) {
- pf->present = 1;
- k++;
- } else
- pi_release(pf->pi);
- }
- }
- if (k)
- return 0;
-
- printk("%s: No ATAPI disk detected\n", name);
- for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
- if (!pf->disk)
- continue;
- blk_cleanup_disk(pf->disk);
- blk_mq_free_tag_set(&pf->tag_set);
- }
- pi_unregister_driver(par_drv);
- return -1;
+ return -ENODEV;
}
/* The i/o request engine */
@@ -1014,61 +921,134 @@ static void do_pf_write_done(void)
next_request(0);
}
+static int __init pf_init_unit(struct pf_unit *pf, bool autoprobe, int port,
+ int mode, int unit, int protocol, int delay, int ms)
+{
+ struct gendisk *disk;
+ int ret;
+
+ ret = blk_mq_alloc_sq_tag_set(&pf->tag_set, &pf_mq_ops, 1,
+ BLK_MQ_F_SHOULD_MERGE);
+ if (ret)
+ return ret;
+
+ disk = blk_mq_alloc_disk(&pf->tag_set, pf);
+ if (IS_ERR(disk)) {
+ ret = PTR_ERR(disk);
+ goto out_free_tag_set;
+ }
+ disk->major = major;
+ disk->first_minor = pf - units;
+ disk->minors = 1;
+ strcpy(disk->disk_name, pf->name);
+ disk->fops = &pf_fops;
+ disk->events = DISK_EVENT_MEDIA_CHANGE;
+ disk->private_data = pf;
+
+ blk_queue_max_segments(disk->queue, cluster);
+ blk_queue_bounce_limit(disk->queue, BLK_BOUNCE_HIGH);
+
+ INIT_LIST_HEAD(&pf->rq_list);
+ pf->disk = disk;
+ pf->pi = &pf->pia;
+ pf->media_status = PF_NM;
+ pf->drive = (*drives[disk->first_minor])[D_SLV];
+ pf->lun = (*drives[disk->first_minor])[D_LUN];
+ snprintf(pf->name, PF_NAMELEN, "%s%d", name, disk->first_minor);
+
+ if (!pi_init(pf->pi, autoprobe, port, mode, unit, protocol, delay,
+ pf_scratch, PI_PF, verbose, pf->name)) {
+ ret = -ENODEV;
+ goto out_free_disk;
+ }
+ ret = pf_probe(pf);
+ if (ret)
+ goto out_pi_release;
+
+ ret = add_disk(disk);
+ if (ret)
+ goto out_pi_release;
+ pf->present = 1;
+ return 0;
+
+out_pi_release:
+ pi_release(pf->pi);
+out_free_disk:
+ blk_cleanup_disk(pf->disk);
+out_free_tag_set:
+ blk_mq_free_tag_set(&pf->tag_set);
+ return ret;
+}
+
static int __init pf_init(void)
{ /* preliminary initialisation */
struct pf_unit *pf;
- int unit;
+ int found = 0, unit;
if (disable)
return -EINVAL;
- pf_init_units();
+ if (register_blkdev(major, name))
+ return -EBUSY;
- if (pf_detect())
- return -ENODEV;
- pf_busy = 0;
+ printk("%s: %s version %s, major %d, cluster %d, nice %d\n",
+ name, name, PF_VERSION, major, cluster, nice);
- if (register_blkdev(major, name)) {
- for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
- if (!pf->disk)
- continue;
- blk_cleanup_queue(pf->disk->queue);
- blk_mq_free_tag_set(&pf->tag_set);
- put_disk(pf->disk);
- }
- return -EBUSY;
+ par_drv = pi_register_driver(name);
+ if (!par_drv) {
+ pr_err("failed to register %s driver\n", name);
+ goto out_unregister_blkdev;
}
- for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
- struct gendisk *disk = pf->disk;
+ for (unit = 0; unit < PF_UNITS; unit++) {
+ if (!(*drives[unit])[D_PRT])
+ pf_drive_count++;
+ }
- if (!pf->present)
- continue;
- disk->private_data = pf;
- add_disk(disk);
+ pf = units;
+ if (pf_drive_count == 0) {
+ if (pf_init_unit(pf, 1, -1, -1, -1, -1, -1, verbose))
+ found++;
+ } else {
+ for (unit = 0; unit < PF_UNITS; unit++, pf++) {
+ int *conf = *drives[unit];
+ if (!conf[D_PRT])
+ continue;
+ if (pf_init_unit(pf, 0, conf[D_PRT], conf[D_MOD],
+ conf[D_UNI], conf[D_PRO], conf[D_DLY],
+ verbose))
+ found++;
+ }
+ }
+ if (!found) {
+ printk("%s: No ATAPI disk detected\n", name);
+ goto out_unregister_pi_driver;
}
+ pf_busy = 0;
return 0;
+
+out_unregister_pi_driver:
+ pi_unregister_driver(par_drv);
+out_unregister_blkdev:
+ unregister_blkdev(major, name);
+ return -ENODEV;
}
static void __exit pf_exit(void)
{
struct pf_unit *pf;
int unit;
- unregister_blkdev(major, name);
+
for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
- if (!pf->disk)
+ if (!pf->present)
continue;
-
- if (pf->present)
- del_gendisk(pf->disk);
-
- blk_cleanup_queue(pf->disk->queue);
+ del_gendisk(pf->disk);
+ blk_cleanup_disk(pf->disk);
blk_mq_free_tag_set(&pf->tag_set);
- put_disk(pf->disk);
-
- if (pf->present)
- pi_release(pf->pi);
+ pi_release(pf->pi);
}
+
+ unregister_blkdev(major, name);
}
MODULE_LICENSE("GPL");
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 0f26b2510a75..b53f648302c1 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -703,7 +703,7 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *
struct request *rq;
int ret = 0;
- rq = blk_get_request(q, (cgc->data_direction == CGC_DATA_WRITE) ?
+ rq = scsi_alloc_request(q, (cgc->data_direction == CGC_DATA_WRITE) ?
REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
if (IS_ERR(rq))
return PTR_ERR(rq);
@@ -726,7 +726,7 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *
if (scsi_req(rq)->result)
ret = -EIO;
out:
- blk_put_request(rq);
+ blk_mq_free_request(rq);
return ret;
}
@@ -2400,7 +2400,7 @@ static void pkt_make_request_write(struct request_queue *q, struct bio *bio)
}
}
-static blk_qc_t pkt_submit_bio(struct bio *bio)
+static void pkt_submit_bio(struct bio *bio)
{
struct pktcdvd_device *pd;
char b[BDEVNAME_SIZE];
@@ -2423,7 +2423,7 @@ static blk_qc_t pkt_submit_bio(struct bio *bio)
*/
if (bio_data_dir(bio) == READ) {
pkt_make_request_read(pd, bio);
- return BLK_QC_T_NONE;
+ return;
}
if (!test_bit(PACKET_WRITABLE, &pd->flags)) {
@@ -2455,10 +2455,9 @@ static blk_qc_t pkt_submit_bio(struct bio *bio)
pkt_make_request_write(bio->bi_bdev->bd_disk->queue, split);
} while (split != bio);
- return BLK_QC_T_NONE;
+ return;
end_io:
bio_io_error(bio);
- return BLK_QC_T_NONE;
}
static void pkt_init_queue(struct pktcdvd_device *pd)
@@ -2537,6 +2536,7 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
int i;
char b[BDEVNAME_SIZE];
struct block_device *bdev;
+ struct scsi_device *sdev;
if (pd->pkt_dev == dev) {
pkt_err(pd, "recursive setup not allowed\n");
@@ -2560,10 +2560,12 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
bdev = blkdev_get_by_dev(dev, FMODE_READ | FMODE_NDELAY, NULL);
if (IS_ERR(bdev))
return PTR_ERR(bdev);
- if (!blk_queue_scsi_passthrough(bdev_get_queue(bdev))) {
+ sdev = scsi_device_from_queue(bdev->bd_disk->queue);
+ if (!sdev) {
blkdev_put(bdev, FMODE_READ | FMODE_NDELAY);
return -EINVAL;
}
+ put_device(&sdev->sdev_gendev);
/* This is safe, since we have a reference from open(). */
__module_get(THIS_MODULE);
@@ -2729,7 +2731,9 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev)
/* inherit events of the host device */
disk->events = pd->bdev->bd_disk->events;
- add_disk(disk);
+ ret = add_disk(disk);
+ if (ret)
+ goto out_mem2;
pkt_sysfs_dev_new(pd);
pkt_debugfs_dev_new(pd);
diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c
index c7b19e128b03..d1ebf193cb9a 100644
--- a/drivers/block/ps3vram.c
+++ b/drivers/block/ps3vram.c
@@ -578,7 +578,7 @@ out:
return next;
}
-static blk_qc_t ps3vram_submit_bio(struct bio *bio)
+static void ps3vram_submit_bio(struct bio *bio)
{
struct ps3_system_bus_device *dev = bio->bi_bdev->bd_disk->private_data;
struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
@@ -594,13 +594,11 @@ static blk_qc_t ps3vram_submit_bio(struct bio *bio)
spin_unlock_irq(&priv->lock);
if (busy)
- return BLK_QC_T_NONE;
+ return;
do {
bio = ps3vram_do_bio(dev, bio);
} while (bio);
-
- return BLK_QC_T_NONE;
}
static const struct block_device_operations ps3vram_fops = {
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index e65c9d706f6f..953fa134cd3d 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -836,7 +836,7 @@ struct rbd_options {
u32 alloc_hint_flags; /* CEPH_OSD_OP_ALLOC_HINT_FLAG_* */
};
-#define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ
+#define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_DEFAULT_RQ
#define RBD_ALLOC_SIZE_DEFAULT (64 * 1024)
#define RBD_LOCK_TIMEOUT_DEFAULT 0 /* no timeout */
#define RBD_READ_ONLY_DEFAULT false
@@ -7054,7 +7054,9 @@ static ssize_t do_rbd_add(struct bus_type *bus,
if (rc)
goto err_out_image_lock;
- device_add_disk(&rbd_dev->dev, rbd_dev->disk, NULL);
+ rc = device_add_disk(&rbd_dev->dev, rbd_dev->disk, NULL);
+ if (rc)
+ goto err_out_cleanup_disk;
spin_lock(&rbd_dev_list_lock);
list_add_tail(&rbd_dev->node, &rbd_dev_list);
@@ -7068,6 +7070,8 @@ out:
module_put(THIS_MODULE);
return rc;
+err_out_cleanup_disk:
+ rbd_free_disk(rbd_dev);
err_out_image_lock:
rbd_dev_image_unlock(rbd_dev);
rbd_dev_device_release(rbd_dev);
diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c
index bd4a41afbbfc..2df0657cdf00 100644
--- a/drivers/block/rnbd/rnbd-clt.c
+++ b/drivers/block/rnbd/rnbd-clt.c
@@ -1176,7 +1176,7 @@ static blk_status_t rnbd_queue_rq(struct blk_mq_hw_ctx *hctx,
return ret;
}
-static int rnbd_rdma_poll(struct blk_mq_hw_ctx *hctx)
+static int rnbd_rdma_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
{
struct rnbd_queue *q = hctx->driver_data;
struct rnbd_clt_dev *dev = q->dev;
@@ -1384,8 +1384,10 @@ static void setup_request_queue(struct rnbd_clt_dev *dev)
blk_queue_write_cache(dev->queue, dev->wc, dev->fua);
}
-static void rnbd_clt_setup_gen_disk(struct rnbd_clt_dev *dev, int idx)
+static int rnbd_clt_setup_gen_disk(struct rnbd_clt_dev *dev, int idx)
{
+ int err;
+
dev->gd->major = rnbd_client_major;
dev->gd->first_minor = idx << RNBD_PART_BITS;
dev->gd->minors = 1 << RNBD_PART_BITS;
@@ -1410,7 +1412,11 @@ static void rnbd_clt_setup_gen_disk(struct rnbd_clt_dev *dev, int idx)
if (!dev->rotational)
blk_queue_flag_set(QUEUE_FLAG_NONROT, dev->queue);
- add_disk(dev->gd);
+ err = add_disk(dev->gd);
+ if (err)
+ blk_cleanup_disk(dev->gd);
+
+ return err;
}
static int rnbd_client_setup_device(struct rnbd_clt_dev *dev)
@@ -1426,8 +1432,7 @@ static int rnbd_client_setup_device(struct rnbd_clt_dev *dev)
rnbd_init_mq_hw_queues(dev);
setup_request_queue(dev);
- rnbd_clt_setup_gen_disk(dev, idx);
- return 0;
+ return rnbd_clt_setup_gen_disk(dev, idx);
}
static struct rnbd_clt_dev *init_dev(struct rnbd_clt_session *sess,
diff --git a/drivers/block/rnbd/rnbd-proto.h b/drivers/block/rnbd/rnbd-proto.h
index c1bc5c0fef71..de5d5a8df81d 100644
--- a/drivers/block/rnbd/rnbd-proto.h
+++ b/drivers/block/rnbd/rnbd-proto.h
@@ -10,7 +10,7 @@
#define RNBD_PROTO_H
#include <linux/types.h>
-#include <linux/blkdev.h>
+#include <linux/blk-mq.h>
#include <linux/limits.h>
#include <linux/inet.h>
#include <linux/in.h>
diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c
index 83636714b8d7..8d9d69f5dfbc 100644
--- a/drivers/block/rsxx/core.c
+++ b/drivers/block/rsxx/core.c
@@ -935,7 +935,9 @@ static int rsxx_pci_probe(struct pci_dev *dev,
card->size8 = 0;
}
- rsxx_attach_dev(card);
+ st = rsxx_attach_dev(card);
+ if (st)
+ goto failed_create_dev;
/************* Setup Debugfs *************/
rsxx_debugfs_dev_new(card);
diff --git a/drivers/block/rsxx/dev.c b/drivers/block/rsxx/dev.c
index 1cc40b0ea761..dd33f1bdf3b8 100644
--- a/drivers/block/rsxx/dev.c
+++ b/drivers/block/rsxx/dev.c
@@ -50,7 +50,7 @@ struct rsxx_bio_meta {
static struct kmem_cache *bio_meta_pool;
-static blk_qc_t rsxx_submit_bio(struct bio *bio);
+static void rsxx_submit_bio(struct bio *bio);
/*----------------- Block Device Operations -----------------*/
static int rsxx_blkdev_ioctl(struct block_device *bdev,
@@ -120,7 +120,7 @@ static void bio_dma_done_cb(struct rsxx_cardinfo *card,
}
}
-static blk_qc_t rsxx_submit_bio(struct bio *bio)
+static void rsxx_submit_bio(struct bio *bio)
{
struct rsxx_cardinfo *card = bio->bi_bdev->bd_disk->private_data;
struct rsxx_bio_meta *bio_meta;
@@ -169,7 +169,7 @@ static blk_qc_t rsxx_submit_bio(struct bio *bio)
if (st)
goto queue_err;
- return BLK_QC_T_NONE;
+ return;
queue_err:
kmem_cache_free(bio_meta_pool, bio_meta);
@@ -177,7 +177,6 @@ req_err:
if (st)
bio->bi_status = st;
bio_endio(bio);
- return BLK_QC_T_NONE;
}
/*----------------- Device Setup -------------------*/
@@ -192,6 +191,8 @@ static bool rsxx_discard_supported(struct rsxx_cardinfo *card)
int rsxx_attach_dev(struct rsxx_cardinfo *card)
{
+ int err = 0;
+
mutex_lock(&card->dev_lock);
/* The block device requires the stripe size from the config. */
@@ -200,13 +201,17 @@ int rsxx_attach_dev(struct rsxx_cardinfo *card)
set_capacity(card->gendisk, card->size8 >> 9);
else
set_capacity(card->gendisk, 0);
- device_add_disk(CARD_TO_DEV(card), card->gendisk, NULL);
- card->bdev_attached = 1;
+ err = device_add_disk(CARD_TO_DEV(card), card->gendisk, NULL);
+ if (err == 0)
+ card->bdev_attached = 1;
}
mutex_unlock(&card->dev_lock);
- return 0;
+ if (err)
+ blk_cleanup_disk(card->gendisk);
+
+ return err;
}
void rsxx_detach_dev(struct rsxx_cardinfo *card)
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index 7ccc8d2a41bc..821594cd1315 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -16,6 +16,7 @@
#include <linux/fd.h>
#include <linux/slab.h>
#include <linux/blk-mq.h>
+#include <linux/major.h>
#include <linux/mutex.h>
#include <linux/hdreg.h>
#include <linux/kernel.h>
@@ -184,6 +185,7 @@ struct floppy_state {
int track;
int ref_count;
+ bool registered;
struct gendisk *disk;
struct blk_mq_tag_set tag_set;
@@ -771,6 +773,20 @@ static const struct blk_mq_ops swim_mq_ops = {
.queue_rq = swim_queue_rq,
};
+static void swim_cleanup_floppy_disk(struct floppy_state *fs)
+{
+ struct gendisk *disk = fs->disk;
+
+ if (!disk)
+ return;
+
+ if (fs->registered)
+ del_gendisk(fs->disk);
+
+ blk_cleanup_disk(disk);
+ blk_mq_free_tag_set(&fs->tag_set);
+}
+
static int swim_floppy_init(struct swim_priv *swd)
{
int err;
@@ -827,7 +843,10 @@ static int swim_floppy_init(struct swim_priv *swd)
swd->unit[drive].disk->events = DISK_EVENT_MEDIA_CHANGE;
swd->unit[drive].disk->private_data = &swd->unit[drive];
set_capacity(swd->unit[drive].disk, 2880);
- add_disk(swd->unit[drive].disk);
+ err = add_disk(swd->unit[drive].disk);
+ if (err)
+ goto exit_put_disks;
+ swd->unit[drive].registered = true;
}
return 0;
@@ -835,12 +854,7 @@ static int swim_floppy_init(struct swim_priv *swd)
exit_put_disks:
unregister_blkdev(FLOPPY_MAJOR, "fd");
do {
- struct gendisk *disk = swd->unit[drive].disk;
-
- if (!disk)
- continue;
- blk_cleanup_disk(disk);
- blk_mq_free_tag_set(&swd->unit[drive].tag_set);
+ swim_cleanup_floppy_disk(&swd->unit[drive]);
} while (drive--);
return err;
}
@@ -909,12 +923,8 @@ static int swim_remove(struct platform_device *dev)
int drive;
struct resource *res;
- for (drive = 0; drive < swd->floppy_count; drive++) {
- del_gendisk(swd->unit[drive].disk);
- blk_cleanup_queue(swd->unit[drive].disk->queue);
- blk_mq_free_tag_set(&swd->unit[drive].tag_set);
- put_disk(swd->unit[drive].disk);
- }
+ for (drive = 0; drive < swd->floppy_count; drive++)
+ swim_cleanup_floppy_disk(&swd->unit[drive]);
unregister_blkdev(FLOPPY_MAJOR, "fd");
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
index 965af0a3e95b..4b91c9aa5892 100644
--- a/drivers/block/swim3.c
+++ b/drivers/block/swim3.c
@@ -27,6 +27,7 @@
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/wait.h>
+#include <linux/major.h>
#include <asm/io.h>
#include <asm/dbdma.h>
#include <asm/prom.h>
@@ -1229,7 +1230,9 @@ static int swim3_attach(struct macio_dev *mdev,
disk->flags |= GENHD_FL_REMOVABLE;
sprintf(disk->disk_name, "fd%d", floppy_count);
set_capacity(disk, 2880);
- add_disk(disk);
+ rc = add_disk(disk);
+ if (rc)
+ goto out_cleanup_disk;
disks[floppy_count++] = disk;
return 0;
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
index 420cd952ddc4..d1676fe0da1a 100644
--- a/drivers/block/sx8.c
+++ b/drivers/block/sx8.c
@@ -297,6 +297,7 @@ struct carm_host {
struct work_struct fsm_task;
+ int probe_err;
struct completion probe_comp;
};
@@ -1181,8 +1182,11 @@ static void carm_fsm_task (struct work_struct *work)
struct gendisk *disk = port->disk;
set_capacity(disk, port->capacity);
- add_disk(disk);
- activated++;
+ host->probe_err = add_disk(disk);
+ if (!host->probe_err)
+ activated++;
+ else
+ break;
}
printk(KERN_INFO DRV_NAME "(%s): %d ports activated\n",
@@ -1192,11 +1196,9 @@ static void carm_fsm_task (struct work_struct *work)
reschedule = 1;
break;
}
-
case HST_PROBE_FINISHED:
complete(&host->probe_comp);
break;
-
case HST_ERROR:
/* FIXME: TODO */
break;
@@ -1507,7 +1509,12 @@ static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_free_irq;
DPRINTK("waiting for probe_comp\n");
+ host->probe_err = -ENODEV;
wait_for_completion(&host->probe_comp);
+ if (host->probe_err) {
+ rc = host->probe_err;
+ goto err_out_free_irq;
+ }
printk(KERN_INFO "%s: pci %s, ports %d, io %llx, irq %u, major %d\n",
host->name, pci_name(pdev), (int) CARM_MAX_PORTS,
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 303caf2d17d0..97bf051a50ce 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -24,6 +24,19 @@
/* The maximum number of sg elements that fit into a virtqueue */
#define VIRTIO_BLK_MAX_SG_ELEMS 32768
+#ifdef CONFIG_ARCH_NO_SG_CHAIN
+#define VIRTIO_BLK_INLINE_SG_CNT 0
+#else
+#define VIRTIO_BLK_INLINE_SG_CNT 2
+#endif
+
+static unsigned int num_request_queues;
+module_param(num_request_queues, uint, 0644);
+MODULE_PARM_DESC(num_request_queues,
+ "Limit the number of request queues to use for blk device. "
+ "0 for no limit. "
+ "Values > nr_cpu_ids truncated to nr_cpu_ids.");
+
static int major;
static DEFINE_IDA(vd_index_ida);
@@ -77,6 +90,7 @@ struct virtio_blk {
struct virtblk_req {
struct virtio_blk_outhdr out_hdr;
u8 status;
+ struct sg_table sg_table;
struct scatterlist sg[];
};
@@ -162,12 +176,93 @@ static int virtblk_setup_discard_write_zeroes(struct request *req, bool unmap)
return 0;
}
-static inline void virtblk_request_done(struct request *req)
+static void virtblk_unmap_data(struct request *req, struct virtblk_req *vbr)
{
- struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
+ if (blk_rq_nr_phys_segments(req))
+ sg_free_table_chained(&vbr->sg_table,
+ VIRTIO_BLK_INLINE_SG_CNT);
+}
+
+static int virtblk_map_data(struct blk_mq_hw_ctx *hctx, struct request *req,
+ struct virtblk_req *vbr)
+{
+ int err;
+
+ if (!blk_rq_nr_phys_segments(req))
+ return 0;
+
+ vbr->sg_table.sgl = vbr->sg;
+ err = sg_alloc_table_chained(&vbr->sg_table,
+ blk_rq_nr_phys_segments(req),
+ vbr->sg_table.sgl,
+ VIRTIO_BLK_INLINE_SG_CNT);
+ if (unlikely(err))
+ return -ENOMEM;
+ return blk_rq_map_sg(hctx->queue, req, vbr->sg_table.sgl);
+}
+
+static void virtblk_cleanup_cmd(struct request *req)
+{
if (req->rq_flags & RQF_SPECIAL_PAYLOAD)
kfree(bvec_virt(&req->special_vec));
+}
+
+static blk_status_t virtblk_setup_cmd(struct virtio_device *vdev,
+ struct request *req,
+ struct virtblk_req *vbr)
+{
+ bool unmap = false;
+ u32 type;
+
+ vbr->out_hdr.sector = 0;
+
+ switch (req_op(req)) {
+ case REQ_OP_READ:
+ type = VIRTIO_BLK_T_IN;
+ vbr->out_hdr.sector = cpu_to_virtio64(vdev,
+ blk_rq_pos(req));
+ break;
+ case REQ_OP_WRITE:
+ type = VIRTIO_BLK_T_OUT;
+ vbr->out_hdr.sector = cpu_to_virtio64(vdev,
+ blk_rq_pos(req));
+ break;
+ case REQ_OP_FLUSH:
+ type = VIRTIO_BLK_T_FLUSH;
+ break;
+ case REQ_OP_DISCARD:
+ type = VIRTIO_BLK_T_DISCARD;
+ break;
+ case REQ_OP_WRITE_ZEROES:
+ type = VIRTIO_BLK_T_WRITE_ZEROES;
+ unmap = !(req->cmd_flags & REQ_NOUNMAP);
+ break;
+ case REQ_OP_DRV_IN:
+ type = VIRTIO_BLK_T_GET_ID;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return BLK_STS_IOERR;
+ }
+
+ vbr->out_hdr.type = cpu_to_virtio32(vdev, type);
+ vbr->out_hdr.ioprio = cpu_to_virtio32(vdev, req_get_ioprio(req));
+
+ if (type == VIRTIO_BLK_T_DISCARD || type == VIRTIO_BLK_T_WRITE_ZEROES) {
+ if (virtblk_setup_discard_write_zeroes(req, unmap))
+ return BLK_STS_RESOURCE;
+ }
+
+ return 0;
+}
+
+static inline void virtblk_request_done(struct request *req)
+{
+ struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
+
+ virtblk_unmap_data(req, vbr);
+ virtblk_cleanup_cmd(req);
blk_mq_end_request(req, virtblk_result(vbr));
}
@@ -223,59 +318,26 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
unsigned long flags;
unsigned int num;
int qid = hctx->queue_num;
- int err;
bool notify = false;
- bool unmap = false;
- u32 type;
+ blk_status_t status;
+ int err;
BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
- switch (req_op(req)) {
- case REQ_OP_READ:
- case REQ_OP_WRITE:
- type = 0;
- break;
- case REQ_OP_FLUSH:
- type = VIRTIO_BLK_T_FLUSH;
- break;
- case REQ_OP_DISCARD:
- type = VIRTIO_BLK_T_DISCARD;
- break;
- case REQ_OP_WRITE_ZEROES:
- type = VIRTIO_BLK_T_WRITE_ZEROES;
- unmap = !(req->cmd_flags & REQ_NOUNMAP);
- break;
- case REQ_OP_DRV_IN:
- type = VIRTIO_BLK_T_GET_ID;
- break;
- default:
- WARN_ON_ONCE(1);
- return BLK_STS_IOERR;
- }
-
- vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, type);
- vbr->out_hdr.sector = type ?
- 0 : cpu_to_virtio64(vblk->vdev, blk_rq_pos(req));
- vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(req));
+ status = virtblk_setup_cmd(vblk->vdev, req, vbr);
+ if (unlikely(status))
+ return status;
blk_mq_start_request(req);
- if (type == VIRTIO_BLK_T_DISCARD || type == VIRTIO_BLK_T_WRITE_ZEROES) {
- err = virtblk_setup_discard_write_zeroes(req, unmap);
- if (err)
- return BLK_STS_RESOURCE;
- }
-
- num = blk_rq_map_sg(hctx->queue, req, vbr->sg);
- if (num) {
- if (rq_data_dir(req) == WRITE)
- vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_OUT);
- else
- vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_IN);
+ num = virtblk_map_data(hctx, req, vbr);
+ if (unlikely(num < 0)) {
+ virtblk_cleanup_cmd(req);
+ return BLK_STS_RESOURCE;
}
spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
- err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num);
+ err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg_table.sgl, num);
if (err) {
virtqueue_kick(vblk->vqs[qid].vq);
/* Don't stop the queue if -ENOMEM: we may have failed to
@@ -284,6 +346,8 @@ static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
if (err == -ENOSPC)
blk_mq_stop_hw_queue(hctx);
spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
+ virtblk_unmap_data(req, vbr);
+ virtblk_cleanup_cmd(req);
switch (err) {
case -ENOSPC:
return BLK_STS_DEV_RESOURCE;
@@ -312,7 +376,7 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str)
struct request *req;
int err;
- req = blk_get_request(q, REQ_OP_DRV_IN, 0);
+ req = blk_mq_alloc_request(q, REQ_OP_DRV_IN, 0);
if (IS_ERR(req))
return PTR_ERR(req);
@@ -323,7 +387,7 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str)
blk_execute_rq(vblk->disk, req, false);
err = blk_status_to_errno(virtblk_result(blk_mq_rq_to_pdu(req)));
out:
- blk_put_request(req);
+ blk_mq_free_request(req);
return err;
}
@@ -497,8 +561,14 @@ static int init_vq(struct virtio_blk *vblk)
&num_vqs);
if (err)
num_vqs = 1;
+ if (!err && !num_vqs) {
+ dev_err(&vdev->dev, "MQ advertised but zero queues reported\n");
+ return -EINVAL;
+ }
- num_vqs = min_t(unsigned int, nr_cpu_ids, num_vqs);
+ num_vqs = min_t(unsigned int,
+ min_not_zero(num_request_queues, nr_cpu_ids),
+ num_vqs);
vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL);
if (!vblk->vqs)
@@ -624,7 +694,7 @@ cache_type_show(struct device *dev, struct device_attribute *attr, char *buf)
u8 writeback = virtblk_get_cache_mode(vblk->vdev);
BUG_ON(writeback >= ARRAY_SIZE(virtblk_cache_types));
- return snprintf(buf, 40, "%s\n", virtblk_cache_types[writeback]);
+ return sysfs_emit(buf, "%s\n", virtblk_cache_types[writeback]);
}
static DEVICE_ATTR_RW(cache_type);
@@ -660,16 +730,6 @@ static const struct attribute_group *virtblk_attr_groups[] = {
NULL,
};
-static int virtblk_init_request(struct blk_mq_tag_set *set, struct request *rq,
- unsigned int hctx_idx, unsigned int numa_node)
-{
- struct virtio_blk *vblk = set->driver_data;
- struct virtblk_req *vbr = blk_mq_rq_to_pdu(rq);
-
- sg_init_table(vbr->sg, vblk->sg_elems);
- return 0;
-}
-
static int virtblk_map_queues(struct blk_mq_tag_set *set)
{
struct virtio_blk *vblk = set->driver_data;
@@ -682,7 +742,6 @@ static const struct blk_mq_ops virtio_mq_ops = {
.queue_rq = virtio_queue_rq,
.commit_rqs = virtio_commit_rqs,
.complete = virtblk_request_done,
- .init_request = virtblk_init_request,
.map_queues = virtblk_map_queues,
};
@@ -762,7 +821,7 @@ static int virtblk_probe(struct virtio_device *vdev)
vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
vblk->tag_set.cmd_size =
sizeof(struct virtblk_req) +
- sizeof(struct scatterlist) * sg_elems;
+ sizeof(struct scatterlist) * VIRTIO_BLK_INLINE_SG_CNT;
vblk->tag_set.driver_data = vblk;
vblk->tag_set.nr_hw_queues = vblk->num_vqs;
@@ -815,9 +874,17 @@ static int virtblk_probe(struct virtio_device *vdev)
err = virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE,
struct virtio_blk_config, blk_size,
&blk_size);
- if (!err)
+ if (!err) {
+ err = blk_validate_block_size(blk_size);
+ if (err) {
+ dev_err(&vdev->dev,
+ "virtio_blk: invalid block size: 0x%x\n",
+ blk_size);
+ goto out_cleanup_disk;
+ }
+
blk_queue_logical_block_size(q, blk_size);
- else
+ } else
blk_size = queue_logical_block_size(q);
/* Use topology information if available */
@@ -982,6 +1049,7 @@ static struct virtio_driver virtio_blk = {
.feature_table_size = ARRAY_SIZE(features),
.feature_table_legacy = features_legacy,
.feature_table_size_legacy = ARRAY_SIZE(features_legacy),
+ .suppress_used_validation = true,
.driver.name = KBUILD_MODNAME,
.driver.owner = THIS_MODULE,
.id_table = id_table,
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 33eba3df4dd9..914587aabca0 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -98,7 +98,7 @@ static void xen_update_blkif_status(struct xen_blkif *blkif)
return;
}
- err = filemap_write_and_wait(blkif->vbd.bdev->bd_inode->i_mapping);
+ err = sync_blockdev(blkif->vbd.bdev);
if (err) {
xenbus_dev_error(blkif->be->dev, err, "block flush");
return;
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 72902104f111..8e3983e456f3 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -42,6 +42,7 @@
#include <linux/cdrom.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/major.h>
#include <linux/mutex.h>
#include <linux/scatterlist.h>
#include <linux/bitmap.h>
@@ -2385,7 +2386,13 @@ static void blkfront_connect(struct blkfront_info *info)
for_each_rinfo(info, rinfo, i)
kick_pending_request_queues(rinfo);
- device_add_disk(&info->xbdev->dev, info->gd, NULL);
+ err = device_add_disk(&info->xbdev->dev, info->gd, NULL);
+ if (err) {
+ blk_cleanup_disk(info->gd);
+ blk_mq_free_tag_set(&info->tag_set);
+ info->rq = NULL;
+ goto fail;
+ }
info->is_ready = 1;
return;
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index fcaf2750f68f..a68297fb51a2 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -1598,22 +1598,18 @@ static void __zram_make_request(struct zram *zram, struct bio *bio)
/*
* Handler function for all zram I/O requests.
*/
-static blk_qc_t zram_submit_bio(struct bio *bio)
+static void zram_submit_bio(struct bio *bio)
{
struct zram *zram = bio->bi_bdev->bd_disk->private_data;
if (!valid_io_request(zram, bio->bi_iter.bi_sector,
bio->bi_iter.bi_size)) {
atomic64_inc(&zram->stats.invalid_io);
- goto error;
+ bio_io_error(bio);
+ return;
}
__zram_make_request(zram, bio);
- return BLK_QC_T_NONE;
-
-error:
- bio_io_error(bio);
- return BLK_QC_T_NONE;
}
static void zram_slot_free_notify(struct block_device *bdev,
diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c
index f1705b46fc88..9359bff47296 100644
--- a/drivers/bluetooth/btintel.c
+++ b/drivers/bluetooth/btintel.c
@@ -1037,8 +1037,9 @@ static bool btintel_firmware_version(struct hci_dev *hdev,
params = (void *)(fw_ptr + sizeof(*cmd));
- bt_dev_info(hdev, "Boot Address: 0x%x",
- le32_to_cpu(params->boot_addr));
+ *boot_addr = le32_to_cpu(params->boot_addr);
+
+ bt_dev_info(hdev, "Boot Address: 0x%x", *boot_addr);
bt_dev_info(hdev, "Firmware Version: %u-%u.%u",
params->fw_build_num, params->fw_build_ww,
@@ -1071,9 +1072,6 @@ int btintel_download_firmware(struct hci_dev *hdev,
/* Skip version checking */
break;
default:
- /* Skip reading firmware file version in bootloader mode */
- if (ver->fw_variant == 0x06)
- break;
/* Skip download if firmware has the same version */
if (btintel_firmware_version(hdev, ver->fw_build_num,
@@ -1114,19 +1112,16 @@ static int btintel_download_fw_tlv(struct hci_dev *hdev,
int err;
u32 css_header_ver;
- /* Skip reading firmware file version in bootloader mode */
- if (ver->img_type != 0x01) {
- /* Skip download if firmware has the same version */
- if (btintel_firmware_version(hdev, ver->min_fw_build_nn,
- ver->min_fw_build_cw,
- ver->min_fw_build_yy,
- fw, boot_param)) {
- bt_dev_info(hdev, "Firmware already loaded");
- /* Return -EALREADY to indicate that firmware has
- * already been loaded.
- */
- return -EALREADY;
- }
+ /* Skip download if firmware has the same version */
+ if (btintel_firmware_version(hdev, ver->min_fw_build_nn,
+ ver->min_fw_build_cw,
+ ver->min_fw_build_yy,
+ fw, boot_param)) {
+ bt_dev_info(hdev, "Firmware already loaded");
+ /* Return -EALREADY to indicate that firmware has
+ * already been loaded.
+ */
+ return -EALREADY;
}
/* The firmware variant determines if the device is in bootloader
@@ -1285,12 +1280,16 @@ static int btintel_read_debug_features(struct hci_dev *hdev,
static int btintel_set_debug_features(struct hci_dev *hdev,
const struct intel_debug_features *features)
{
- u8 mask[11] = { 0x0a, 0x92, 0x02, 0x07, 0x00, 0x00, 0x00, 0x00,
+ u8 mask[11] = { 0x0a, 0x92, 0x02, 0x7f, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00 };
+ u8 period[5] = { 0x04, 0x91, 0x02, 0x05, 0x00 };
+ u8 trace_enable = 0x02;
struct sk_buff *skb;
- if (!features)
+ if (!features) {
+ bt_dev_warn(hdev, "Debug features not read");
return -EINVAL;
+ }
if (!(features->page1[0] & 0x3f)) {
bt_dev_info(hdev, "Telemetry exception format not supported");
@@ -1303,11 +1302,95 @@ static int btintel_set_debug_features(struct hci_dev *hdev,
PTR_ERR(skb));
return PTR_ERR(skb);
}
+ kfree_skb(skb);
+
+ skb = __hci_cmd_sync(hdev, 0xfc8b, 5, period, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ bt_dev_err(hdev, "Setting periodicity for link statistics traces failed (%ld)",
+ PTR_ERR(skb));
+ return PTR_ERR(skb);
+ }
+ kfree_skb(skb);
+
+ skb = __hci_cmd_sync(hdev, 0xfca1, 1, &trace_enable, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ bt_dev_err(hdev, "Enable tracing of link statistics events failed (%ld)",
+ PTR_ERR(skb));
+ return PTR_ERR(skb);
+ }
+ kfree_skb(skb);
+
+ bt_dev_info(hdev, "set debug features: trace_enable 0x%02x mask 0x%02x",
+ trace_enable, mask[3]);
+
+ return 0;
+}
+
+static int btintel_reset_debug_features(struct hci_dev *hdev,
+ const struct intel_debug_features *features)
+{
+ u8 mask[11] = { 0x0a, 0x92, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00 };
+ u8 trace_enable = 0x00;
+ struct sk_buff *skb;
+
+ if (!features) {
+ bt_dev_warn(hdev, "Debug features not read");
+ return -EINVAL;
+ }
+
+ if (!(features->page1[0] & 0x3f)) {
+ bt_dev_info(hdev, "Telemetry exception format not supported");
+ return 0;
+ }
+
+ /* Should stop the trace before writing ddc event mask. */
+ skb = __hci_cmd_sync(hdev, 0xfca1, 1, &trace_enable, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ bt_dev_err(hdev, "Stop tracing of link statistics events failed (%ld)",
+ PTR_ERR(skb));
+ return PTR_ERR(skb);
+ }
+ kfree_skb(skb);
+ skb = __hci_cmd_sync(hdev, 0xfc8b, 11, mask, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ bt_dev_err(hdev, "Setting Intel telemetry ddc write event mask failed (%ld)",
+ PTR_ERR(skb));
+ return PTR_ERR(skb);
+ }
kfree_skb(skb);
+
+ bt_dev_info(hdev, "reset debug features: trace_enable 0x%02x mask 0x%02x",
+ trace_enable, mask[3]);
+
return 0;
}
+int btintel_set_quality_report(struct hci_dev *hdev, bool enable)
+{
+ struct intel_debug_features features;
+ int err;
+
+ bt_dev_dbg(hdev, "enable %d", enable);
+
+ /* Read the Intel supported features and if new exception formats
+ * supported, need to load the additional DDC config to enable.
+ */
+ err = btintel_read_debug_features(hdev, &features);
+ if (err)
+ return err;
+
+ /* Set or reset the debug features. */
+ if (enable)
+ err = btintel_set_debug_features(hdev, &features);
+ else
+ err = btintel_reset_debug_features(hdev, &features);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(btintel_set_quality_report);
+
static const struct firmware *btintel_legacy_rom_get_fw(struct hci_dev *hdev,
struct intel_version *ver)
{
@@ -1893,7 +1976,6 @@ static int btintel_bootloader_setup(struct hci_dev *hdev,
u32 boot_param;
char ddcname[64];
int err;
- struct intel_debug_features features;
BT_DBG("%s", hdev->name);
@@ -1934,14 +2016,7 @@ static int btintel_bootloader_setup(struct hci_dev *hdev,
btintel_load_ddc_config(hdev, ddcname);
}
- /* Read the Intel supported features and if new exception formats
- * supported, need to load the additional DDC config to enable.
- */
- err = btintel_read_debug_features(hdev, &features);
- if (!err) {
- /* Set DDC mask for available debug features */
- btintel_set_debug_features(hdev, &features);
- }
+ hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
/* Read the Intel version information after loading the FW */
err = btintel_read_version(hdev, &new_ver);
@@ -2083,13 +2158,102 @@ done:
return err;
}
+static int btintel_get_codec_config_data(struct hci_dev *hdev,
+ __u8 link, struct bt_codec *codec,
+ __u8 *ven_len, __u8 **ven_data)
+{
+ int err = 0;
+
+ if (!ven_data || !ven_len)
+ return -EINVAL;
+
+ *ven_len = 0;
+ *ven_data = NULL;
+
+ if (link != ESCO_LINK) {
+ bt_dev_err(hdev, "Invalid link type(%u)", link);
+ return -EINVAL;
+ }
+
+ *ven_data = kmalloc(sizeof(__u8), GFP_KERNEL);
+ if (!*ven_data) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ /* supports only CVSD and mSBC offload codecs */
+ switch (codec->id) {
+ case 0x02:
+ **ven_data = 0x00;
+ break;
+ case 0x05:
+ **ven_data = 0x01;
+ break;
+ default:
+ err = -EINVAL;
+ bt_dev_err(hdev, "Invalid codec id(%u)", codec->id);
+ goto error;
+ }
+ /* codec and its capabilities are pre-defined to ids
+ * preset id = 0x00 represents CVSD codec with sampling rate 8K
+ * preset id = 0x01 represents mSBC codec with sampling rate 16K
+ */
+ *ven_len = sizeof(__u8);
+ return err;
+
+error:
+ kfree(*ven_data);
+ *ven_data = NULL;
+ return err;
+}
+
+static int btintel_get_data_path_id(struct hci_dev *hdev, __u8 *data_path_id)
+{
+ /* Intel uses 1 as data path id for all the usecases */
+ *data_path_id = 1;
+ return 0;
+}
+
+static int btintel_configure_offload(struct hci_dev *hdev)
+{
+ struct sk_buff *skb;
+ int err = 0;
+ struct intel_offload_use_cases *use_cases;
+
+ skb = __hci_cmd_sync(hdev, 0xfc86, 0, NULL, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ bt_dev_err(hdev, "Reading offload use cases failed (%ld)",
+ PTR_ERR(skb));
+ return PTR_ERR(skb);
+ }
+
+ if (skb->len < sizeof(*use_cases)) {
+ err = -EIO;
+ goto error;
+ }
+
+ use_cases = (void *)skb->data;
+
+ if (use_cases->status) {
+ err = -bt_to_errno(skb->data[0]);
+ goto error;
+ }
+
+ if (use_cases->preset[0] & 0x03) {
+ hdev->get_data_path_id = btintel_get_data_path_id;
+ hdev->get_codec_config_data = btintel_get_codec_config_data;
+ }
+error:
+ kfree_skb(skb);
+ return err;
+}
+
static int btintel_bootloader_setup_tlv(struct hci_dev *hdev,
struct intel_version_tlv *ver)
{
u32 boot_param;
char ddcname[64];
int err;
- struct intel_debug_features features;
struct intel_version_tlv new_ver;
bt_dev_dbg(hdev, "");
@@ -2125,14 +2289,10 @@ static int btintel_bootloader_setup_tlv(struct hci_dev *hdev,
*/
btintel_load_ddc_config(hdev, ddcname);
- /* Read the Intel supported features and if new exception formats
- * supported, need to load the additional DDC config to enable.
- */
- err = btintel_read_debug_features(hdev, &features);
- if (!err) {
- /* Set DDC mask for available debug features */
- btintel_set_debug_features(hdev, &features);
- }
+ /* Read supported use cases and set callbacks to fetch datapath id */
+ btintel_configure_offload(hdev);
+
+ hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
/* Read the Intel version information after loading the FW */
err = btintel_read_version_tlv(hdev, &new_ver);
@@ -2232,6 +2392,9 @@ static int btintel_setup_combined(struct hci_dev *hdev)
set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
set_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks);
+ /* Set up the quality report callback for Intel devices */
+ hdev->set_quality_report = btintel_set_quality_report;
+
/* For Legacy device, check the HW platform value and size */
if (skb->len == sizeof(ver) && skb->data[1] == 0x37) {
bt_dev_dbg(hdev, "Read the legacy Intel version information");
diff --git a/drivers/bluetooth/btintel.h b/drivers/bluetooth/btintel.h
index aa64072bbe68..e500c0d7a729 100644
--- a/drivers/bluetooth/btintel.h
+++ b/drivers/bluetooth/btintel.h
@@ -132,6 +132,11 @@ struct intel_debug_features {
__u8 page1[16];
} __packed;
+struct intel_offload_use_cases {
+ __u8 status;
+ __u8 preset[8];
+} __packed;
+
#define INTEL_HW_PLATFORM(cnvx_bt) ((u8)(((cnvx_bt) & 0x0000ff00) >> 8))
#define INTEL_HW_VARIANT(cnvx_bt) ((u8)(((cnvx_bt) & 0x003f0000) >> 16))
#define INTEL_CNVX_TOP_TYPE(cnvx_top) ((cnvx_top) & 0x00000fff)
@@ -204,6 +209,7 @@ int btintel_configure_setup(struct hci_dev *hdev);
void btintel_bootup(struct hci_dev *hdev, const void *ptr, unsigned int len);
void btintel_secure_send_result(struct hci_dev *hdev,
const void *ptr, unsigned int len);
+int btintel_set_quality_report(struct hci_dev *hdev, bool enable);
#else
static inline int btintel_check_bdaddr(struct hci_dev *hdev)
@@ -294,4 +300,9 @@ static inline void btintel_secure_send_result(struct hci_dev *hdev,
const void *ptr, unsigned int len)
{
}
+
+static inline int btintel_set_quality_report(struct hci_dev *hdev, bool enable)
+{
+ return -ENODEV;
+}
#endif
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
index 8b9d78ce6bb2..5ccbe4d459d0 100644
--- a/drivers/bluetooth/btmrvl_main.c
+++ b/drivers/bluetooth/btmrvl_main.c
@@ -587,12 +587,12 @@ static int btmrvl_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
return 0;
}
-static bool btmrvl_prevent_wake(struct hci_dev *hdev)
+static bool btmrvl_wakeup(struct hci_dev *hdev)
{
struct btmrvl_private *priv = hci_get_drvdata(hdev);
struct btmrvl_sdio_card *card = priv->btmrvl_dev.card;
- return !device_may_wakeup(&card->func->dev);
+ return device_may_wakeup(&card->func->dev);
}
/*
@@ -696,7 +696,7 @@ int btmrvl_register_hdev(struct btmrvl_private *priv)
hdev->send = btmrvl_send_frame;
hdev->setup = btmrvl_setup;
hdev->set_bdaddr = btmrvl_set_bdaddr;
- hdev->prevent_wake = btmrvl_prevent_wake;
+ hdev->wakeup = btmrvl_wakeup;
SET_HCIDEV_DEV(hdev, &card->func->dev);
hdev->dev_type = priv->btmrvl_dev.dev_type;
diff --git a/drivers/bluetooth/btmtkuart.c b/drivers/bluetooth/btmtkuart.c
index e9d91d7c0db4..9ba22b13b4fa 100644
--- a/drivers/bluetooth/btmtkuart.c
+++ b/drivers/bluetooth/btmtkuart.c
@@ -158,8 +158,10 @@ static int mtk_hci_wmt_sync(struct hci_dev *hdev,
int err;
hlen = sizeof(*hdr) + wmt_params->dlen;
- if (hlen > 255)
- return -EINVAL;
+ if (hlen > 255) {
+ err = -EINVAL;
+ goto err_free_skb;
+ }
hdr = (struct mtk_wmt_hdr *)&wc;
hdr->dir = 1;
@@ -173,7 +175,7 @@ static int mtk_hci_wmt_sync(struct hci_dev *hdev,
err = __hci_cmd_send(hdev, 0xfc6f, hlen, &wc);
if (err < 0) {
clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state);
- return err;
+ goto err_free_skb;
}
/* The vendor specific WMT commands are all answered by a vendor
@@ -190,13 +192,14 @@ static int mtk_hci_wmt_sync(struct hci_dev *hdev,
if (err == -EINTR) {
bt_dev_err(hdev, "Execution of wmt command interrupted");
clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state);
- return err;
+ goto err_free_skb;
}
if (err) {
bt_dev_err(hdev, "Execution of wmt command timed out");
clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state);
- return -ETIMEDOUT;
+ err = -ETIMEDOUT;
+ goto err_free_skb;
}
/* Parse and handle the return WMT event */
diff --git a/drivers/bluetooth/btrsi.c b/drivers/bluetooth/btrsi.c
index 8646b6dd11e9..634cf8f5ed2d 100644
--- a/drivers/bluetooth/btrsi.c
+++ b/drivers/bluetooth/btrsi.c
@@ -19,7 +19,6 @@
#include <net/bluetooth/hci_core.h>
#include <asm/unaligned.h>
#include <net/rsi_91x.h>
-#include <net/genetlink.h>
#define RSI_DMA_ALIGN 8
#define RSI_FRAME_DESC_SIZE 16
diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
index 1f8afa0244d8..c2bdd1e6060e 100644
--- a/drivers/bluetooth/btrtl.c
+++ b/drivers/bluetooth/btrtl.c
@@ -59,6 +59,7 @@ struct id_table {
__u8 hci_bus;
bool config_needed;
bool has_rom_version;
+ bool has_msft_ext;
char *fw_name;
char *cfg_name;
};
@@ -121,6 +122,7 @@ static const struct id_table ic_id_table[] = {
{ IC_INFO(RTL_ROM_LMP_8821A, 0xc, 0x8, HCI_USB),
.config_needed = false,
.has_rom_version = true,
+ .has_msft_ext = true,
.fw_name = "rtl_bt/rtl8821c_fw.bin",
.cfg_name = "rtl_bt/rtl8821c_config" },
@@ -135,6 +137,7 @@ static const struct id_table ic_id_table[] = {
{ IC_INFO(RTL_ROM_LMP_8761A, 0xb, 0xa, HCI_UART),
.config_needed = false,
.has_rom_version = true,
+ .has_msft_ext = true,
.fw_name = "rtl_bt/rtl8761b_fw.bin",
.cfg_name = "rtl_bt/rtl8761b_config" },
@@ -149,6 +152,7 @@ static const struct id_table ic_id_table[] = {
{ IC_INFO(RTL_ROM_LMP_8822B, 0xc, 0xa, HCI_UART),
.config_needed = true,
.has_rom_version = true,
+ .has_msft_ext = true,
.fw_name = "rtl_bt/rtl8822cs_fw.bin",
.cfg_name = "rtl_bt/rtl8822cs_config" },
@@ -156,6 +160,7 @@ static const struct id_table ic_id_table[] = {
{ IC_INFO(RTL_ROM_LMP_8822B, 0xc, 0xa, HCI_USB),
.config_needed = false,
.has_rom_version = true,
+ .has_msft_ext = true,
.fw_name = "rtl_bt/rtl8822cu_fw.bin",
.cfg_name = "rtl_bt/rtl8822cu_config" },
@@ -163,6 +168,7 @@ static const struct id_table ic_id_table[] = {
{ IC_INFO(RTL_ROM_LMP_8822B, 0xb, 0x7, HCI_USB),
.config_needed = true,
.has_rom_version = true,
+ .has_msft_ext = true,
.fw_name = "rtl_bt/rtl8822b_fw.bin",
.cfg_name = "rtl_bt/rtl8822b_config" },
@@ -170,6 +176,7 @@ static const struct id_table ic_id_table[] = {
{ IC_INFO(RTL_ROM_LMP_8852A, 0xa, 0xb, HCI_USB),
.config_needed = false,
.has_rom_version = true,
+ .has_msft_ext = true,
.fw_name = "rtl_bt/rtl8852au_fw.bin",
.cfg_name = "rtl_bt/rtl8852au_config" },
};
@@ -594,8 +601,10 @@ struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev,
hci_rev = le16_to_cpu(resp->hci_rev);
lmp_subver = le16_to_cpu(resp->lmp_subver);
- if (resp->hci_ver == 0x8 && le16_to_cpu(resp->hci_rev) == 0x826c &&
- resp->lmp_ver == 0x8 && le16_to_cpu(resp->lmp_subver) == 0xa99e)
+ btrtl_dev->ic_info = btrtl_match_ic(lmp_subver, hci_rev, hci_ver,
+ hdev->bus);
+
+ if (!btrtl_dev->ic_info)
btrtl_dev->drop_fw = true;
if (btrtl_dev->drop_fw) {
@@ -634,13 +643,13 @@ struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev,
hci_ver = resp->hci_ver;
hci_rev = le16_to_cpu(resp->hci_rev);
lmp_subver = le16_to_cpu(resp->lmp_subver);
+
+ btrtl_dev->ic_info = btrtl_match_ic(lmp_subver, hci_rev, hci_ver,
+ hdev->bus);
}
out_free:
kfree_skb(skb);
- btrtl_dev->ic_info = btrtl_match_ic(lmp_subver, hci_rev, hci_ver,
- hdev->bus);
-
if (!btrtl_dev->ic_info) {
rtl_dev_info(hdev, "unknown IC info, lmp subver %04x, hci rev %04x, hci ver %04x",
lmp_subver, hci_rev, hci_ver);
@@ -684,12 +693,8 @@ out_free:
/* The following chips supports the Microsoft vendor extension,
* therefore set the corresponding VsMsftOpCode.
*/
- switch (lmp_subver) {
- case RTL_ROM_LMP_8822B:
- case RTL_ROM_LMP_8852A:
+ if (btrtl_dev->ic_info->has_msft_ext)
hci_set_msft_opcode(hdev, 0xFCF0);
- break;
- }
return btrtl_dev;
@@ -746,6 +751,7 @@ void btrtl_set_quirks(struct hci_dev *hdev, struct btrtl_device_info *btrtl_dev)
case CHIP_ID_8852A:
set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks);
set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
+ hci_set_aosp_capable(hdev);
break;
default:
rtl_dev_dbg(hdev, "Central-peripheral role not enabled.");
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 60d2fce59a71..75c83768c257 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -384,6 +384,12 @@ static const struct usb_device_id blacklist_table[] = {
/* Realtek 8852AE Bluetooth devices */
{ USB_DEVICE(0x0bda, 0xc852), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x0bda, 0x4852), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x04c5, 0x165c), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
+ { USB_DEVICE(0x04ca, 0x4006), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
/* Realtek Bluetooth devices */
{ USB_VENDOR_AND_INTERFACE_INFO(0x0bda, 0xe0, 0x01, 0x01),
@@ -410,6 +416,9 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x13d3, 0x3563), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
+ { USB_DEVICE(0x13d3, 0x3564), .driver_info = BTUSB_MEDIATEK |
+ BTUSB_WIDEBAND_SPEECH |
+ BTUSB_VALID_LE_STATES },
{ USB_DEVICE(0x0489, 0xe0cd), .driver_info = BTUSB_MEDIATEK |
BTUSB_WIDEBAND_SPEECH |
BTUSB_VALID_LE_STATES },
@@ -433,6 +442,10 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0bda, 0xb009), .driver_info = BTUSB_REALTEK },
{ USB_DEVICE(0x2ff8, 0xb011), .driver_info = BTUSB_REALTEK },
+ /* Additional Realtek 8761B Bluetooth devices */
+ { USB_DEVICE(0x2357, 0x0604), .driver_info = BTUSB_REALTEK |
+ BTUSB_WIDEBAND_SPEECH },
+
/* Additional Realtek 8761BU Bluetooth devices */
{ USB_DEVICE(0x0b05, 0x190e), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
@@ -451,10 +464,6 @@ static const struct usb_device_id blacklist_table[] = {
/* Additional Realtek 8822CE Bluetooth devices */
{ USB_DEVICE(0x04ca, 0x4005), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
- /* Bluetooth component of Realtek 8852AE device */
- { USB_DEVICE(0x04ca, 0x4006), .driver_info = BTUSB_REALTEK |
- BTUSB_WIDEBAND_SPEECH },
-
{ USB_DEVICE(0x04c5, 0x161f), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0b05, 0x18ef), .driver_info = BTUSB_REALTEK |
@@ -652,11 +661,33 @@ static void btusb_rtl_cmd_timeout(struct hci_dev *hdev)
static void btusb_qca_cmd_timeout(struct hci_dev *hdev)
{
struct btusb_data *data = hci_get_drvdata(hdev);
+ struct gpio_desc *reset_gpio = data->reset_gpio;
int err;
if (++data->cmd_timeout_cnt < 5)
return;
+ if (reset_gpio) {
+ bt_dev_err(hdev, "Reset qca device via bt_en gpio");
+
+ /* Toggle the hard reset line. The qca bt device is going to
+ * yank itself off the USB and then replug. The cleanup is handled
+ * correctly on the way out (standard USB disconnect), and the new
+ * device is detected cleanly and bound to the driver again like
+ * it should be.
+ */
+ if (test_and_set_bit(BTUSB_HW_RESET_ACTIVE, &data->flags)) {
+ bt_dev_err(hdev, "last reset failed? Not resetting again");
+ return;
+ }
+
+ gpiod_set_value_cansleep(reset_gpio, 0);
+ msleep(200);
+ gpiod_set_value_cansleep(reset_gpio, 1);
+
+ return;
+ }
+
bt_dev_err(hdev, "Multiple cmd timeouts seen. Resetting usb device.");
/* This is not an unbalanced PM reference since the device will reset */
err = usb_autopm_get_interface(data->intf);
@@ -2200,6 +2231,23 @@ struct btmtk_section_map {
};
} __packed;
+static int btusb_set_bdaddr_mtk(struct hci_dev *hdev, const bdaddr_t *bdaddr)
+{
+ struct sk_buff *skb;
+ long ret;
+
+ skb = __hci_cmd_sync(hdev, 0xfc1a, sizeof(bdaddr), bdaddr, HCI_INIT_TIMEOUT);
+ if (IS_ERR(skb)) {
+ ret = PTR_ERR(skb);
+ bt_dev_err(hdev, "changing Mediatek device address failed (%ld)",
+ ret);
+ return ret;
+ }
+ kfree_skb(skb);
+
+ return 0;
+}
+
static void btusb_mtk_wmt_recv(struct urb *urb)
{
struct hci_dev *hdev = urb->context;
@@ -2804,6 +2852,7 @@ static int btusb_mtk_setup(struct hci_dev *hdev)
case 0x7668:
fwname = FIRMWARE_MT7668;
break;
+ case 0x7922:
case 0x7961:
snprintf(fw_bin_name, sizeof(fw_bin_name),
"mediatek/BT_RAM_CODE_MT%04x_1_%x_hdr.bin",
@@ -3591,11 +3640,11 @@ static void btusb_check_needs_reset_resume(struct usb_interface *intf)
interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME;
}
-static bool btusb_prevent_wake(struct hci_dev *hdev)
+static bool btusb_wakeup(struct hci_dev *hdev)
{
struct btusb_data *data = hci_get_drvdata(hdev);
- return !device_may_wakeup(&data->udev->dev);
+ return device_may_wakeup(&data->udev->dev);
}
static int btusb_shutdown_qca(struct hci_dev *hdev)
@@ -3752,7 +3801,7 @@ static int btusb_probe(struct usb_interface *intf,
hdev->flush = btusb_flush;
hdev->send = btusb_send_frame;
hdev->notify = btusb_notify;
- hdev->prevent_wake = btusb_prevent_wake;
+ hdev->wakeup = btusb_wakeup;
#ifdef CONFIG_PM
err = btusb_config_oob_wake(hdev);
@@ -3819,6 +3868,7 @@ static int btusb_probe(struct usb_interface *intf,
hdev->shutdown = btusb_mtk_shutdown;
hdev->manufacturer = 70;
hdev->cmd_timeout = btusb_mtk_cmd_timeout;
+ hdev->set_bdaddr = btusb_set_bdaddr_mtk;
set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
data->recv_acl = btusb_recv_acl_mtk;
}
diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
index 0c0dedece59c..34286ffe0568 100644
--- a/drivers/bluetooth/hci_h5.c
+++ b/drivers/bluetooth/hci_h5.c
@@ -587,9 +587,11 @@ static int h5_recv(struct hci_uart *hu, const void *data, int count)
count -= processed;
}
- pm_runtime_get(&hu->serdev->dev);
- pm_runtime_mark_last_busy(&hu->serdev->dev);
- pm_runtime_put_autosuspend(&hu->serdev->dev);
+ if (hu->serdev) {
+ pm_runtime_get(&hu->serdev->dev);
+ pm_runtime_mark_last_busy(&hu->serdev->dev);
+ pm_runtime_put_autosuspend(&hu->serdev->dev);
+ }
return 0;
}
@@ -814,7 +816,6 @@ static int h5_serdev_probe(struct serdev_device *serdev)
struct device *dev = &serdev->dev;
struct h5 *h5;
const struct h5_device_data *data;
- int err;
h5 = devm_kzalloc(dev, sizeof(*h5), GFP_KERNEL);
if (!h5)
@@ -846,6 +847,8 @@ static int h5_serdev_probe(struct serdev_device *serdev)
h5->vnd = data->vnd;
}
+ if (data->driver_info & H5_INFO_WAKEUP_DISABLE)
+ set_bit(H5_WAKEUP_DISABLE, &h5->flags);
h5->enable_gpio = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_LOW);
if (IS_ERR(h5->enable_gpio))
@@ -856,14 +859,7 @@ static int h5_serdev_probe(struct serdev_device *serdev)
if (IS_ERR(h5->device_wake_gpio))
return PTR_ERR(h5->device_wake_gpio);
- err = hci_uart_register_device(&h5->serdev_hu, &h5p);
- if (err)
- return err;
-
- if (data->driver_info & H5_INFO_WAKEUP_DISABLE)
- set_bit(H5_WAKEUP_DISABLE, &h5->flags);
-
- return 0;
+ return hci_uart_register_device(&h5->serdev_hu, &h5p);
}
static void h5_serdev_remove(struct serdev_device *serdev)
@@ -962,11 +958,13 @@ static void h5_btrtl_open(struct h5 *h5)
serdev_device_set_parity(h5->hu->serdev, SERDEV_PARITY_EVEN);
serdev_device_set_baudrate(h5->hu->serdev, 115200);
- pm_runtime_set_active(&h5->hu->serdev->dev);
- pm_runtime_use_autosuspend(&h5->hu->serdev->dev);
- pm_runtime_set_autosuspend_delay(&h5->hu->serdev->dev,
- SUSPEND_TIMEOUT_MS);
- pm_runtime_enable(&h5->hu->serdev->dev);
+ if (!test_bit(H5_WAKEUP_DISABLE, &h5->flags)) {
+ pm_runtime_set_active(&h5->hu->serdev->dev);
+ pm_runtime_use_autosuspend(&h5->hu->serdev->dev);
+ pm_runtime_set_autosuspend_delay(&h5->hu->serdev->dev,
+ SUSPEND_TIMEOUT_MS);
+ pm_runtime_enable(&h5->hu->serdev->dev);
+ }
/* The controller needs up to 500ms to wakeup */
gpiod_set_value_cansleep(h5->enable_gpio, 1);
@@ -976,7 +974,8 @@ static void h5_btrtl_open(struct h5 *h5)
static void h5_btrtl_close(struct h5 *h5)
{
- pm_runtime_disable(&h5->hu->serdev->dev);
+ if (!test_bit(H5_WAKEUP_DISABLE, &h5->flags))
+ pm_runtime_disable(&h5->hu->serdev->dev);
gpiod_set_value_cansleep(h5->device_wake_gpio, 0);
gpiod_set_value_cansleep(h5->enable_gpio, 0);
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index 5ed2cfa7da1d..5e32e4d5367a 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -479,6 +479,9 @@ static int hci_uart_tty_open(struct tty_struct *tty)
BT_DBG("tty %p", tty);
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
/* Error if the tty has no write op instead of leaving an exploitable
* hole
*/
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 53deea2eb7b4..dd768a8ed7cb 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -1577,7 +1577,7 @@ static void qca_cmd_timeout(struct hci_dev *hdev)
mutex_unlock(&qca->hci_memdump_lock);
}
-static bool qca_prevent_wake(struct hci_dev *hdev)
+static bool qca_wakeup(struct hci_dev *hdev)
{
struct hci_uart *hu = hci_get_drvdata(hdev);
bool wakeup;
@@ -1730,6 +1730,7 @@ retry:
if (qca_is_wcn399x(soc_type) ||
qca_is_wcn6750(soc_type)) {
set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
+ hci_set_aosp_capable(hdev);
ret = qca_read_soc_version(hdev, &ver, soc_type);
if (ret)
@@ -1764,7 +1765,7 @@ retry:
qca_debugfs_init(hdev);
hu->hdev->hw_error = qca_hw_error;
hu->hdev->cmd_timeout = qca_cmd_timeout;
- hu->hdev->prevent_wake = qca_prevent_wake;
+ hu->hdev->wakeup = qca_wakeup;
} else if (ret == -ENOENT) {
/* No patch/nvm-config found, run with original fw/config */
set_bit(QCA_ROM_FW, &qca->flags);
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index 8ab26dec5f6e..b45db0db347c 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -21,6 +21,7 @@
#include <linux/skbuff.h>
#include <linux/miscdevice.h>
+#include <linux/debugfs.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
@@ -37,6 +38,9 @@ struct vhci_data {
struct mutex open_mutex;
struct delayed_work open_timeout;
+
+ bool suspended;
+ bool wakeup;
};
static int vhci_open_dev(struct hci_dev *hdev)
@@ -73,6 +77,115 @@ static int vhci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
return 0;
}
+static int vhci_get_data_path_id(struct hci_dev *hdev, u8 *data_path_id)
+{
+ *data_path_id = 0;
+ return 0;
+}
+
+static int vhci_get_codec_config_data(struct hci_dev *hdev, __u8 type,
+ struct bt_codec *codec, __u8 *vnd_len,
+ __u8 **vnd_data)
+{
+ if (type != ESCO_LINK)
+ return -EINVAL;
+
+ *vnd_len = 0;
+ *vnd_data = NULL;
+ return 0;
+}
+
+static bool vhci_wakeup(struct hci_dev *hdev)
+{
+ struct vhci_data *data = hci_get_drvdata(hdev);
+
+ return data->wakeup;
+}
+
+static ssize_t force_suspend_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct vhci_data *data = file->private_data;
+ char buf[3];
+
+ buf[0] = data->suspended ? 'Y' : 'N';
+ buf[1] = '\n';
+ buf[2] = '\0';
+ return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
+}
+
+static ssize_t force_suspend_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct vhci_data *data = file->private_data;
+ bool enable;
+ int err;
+
+ err = kstrtobool_from_user(user_buf, count, &enable);
+ if (err)
+ return err;
+
+ if (data->suspended == enable)
+ return -EALREADY;
+
+ if (enable)
+ err = hci_suspend_dev(data->hdev);
+ else
+ err = hci_resume_dev(data->hdev);
+
+ if (err)
+ return err;
+
+ data->suspended = enable;
+
+ return count;
+}
+
+static const struct file_operations force_suspend_fops = {
+ .open = simple_open,
+ .read = force_suspend_read,
+ .write = force_suspend_write,
+ .llseek = default_llseek,
+};
+
+static ssize_t force_wakeup_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct vhci_data *data = file->private_data;
+ char buf[3];
+
+ buf[0] = data->wakeup ? 'Y' : 'N';
+ buf[1] = '\n';
+ buf[2] = '\0';
+ return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
+}
+
+static ssize_t force_wakeup_write(struct file *file,
+ const char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ struct vhci_data *data = file->private_data;
+ bool enable;
+ int err;
+
+ err = kstrtobool_from_user(user_buf, count, &enable);
+ if (err)
+ return err;
+
+ if (data->wakeup == enable)
+ return -EALREADY;
+
+ return count;
+}
+
+static const struct file_operations force_wakeup_fops = {
+ .open = simple_open,
+ .read = force_wakeup_read,
+ .write = force_wakeup_write,
+ .llseek = default_llseek,
+};
+
static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
{
struct hci_dev *hdev;
@@ -112,6 +225,9 @@ static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
hdev->close = vhci_close_dev;
hdev->flush = vhci_flush;
hdev->send = vhci_send_frame;
+ hdev->get_data_path_id = vhci_get_data_path_id;
+ hdev->get_codec_config_data = vhci_get_codec_config_data;
+ hdev->wakeup = vhci_wakeup;
/* bit 6 is for external configuration */
if (opcode & 0x40)
@@ -129,6 +245,12 @@ static int __vhci_create_device(struct vhci_data *data, __u8 opcode)
return -EBUSY;
}
+ debugfs_create_file("force_suspend", 0644, hdev->debugfs, data,
+ &force_suspend_fops);
+
+ debugfs_create_file("force_wakeup", 0644, hdev->debugfs, data,
+ &force_wakeup_fops);
+
hci_skb_pkt_type(skb) = HCI_VENDOR_PKT;
skb_put_u8(skb, 0xff);
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index a4cf3d692dc3..3c68e174a113 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -30,7 +30,7 @@ config ARM_INTEGRATOR_LM
found on the ARM Integrator AP (Application Platform)
config BRCMSTB_GISB_ARB
- bool "Broadcom STB GISB bus arbiter"
+ tristate "Broadcom STB GISB bus arbiter"
depends on ARM || ARM64 || MIPS
default ARCH_BRCMSTB || BMIPS_GENERIC
help
diff --git a/drivers/bus/brcmstb_gisb.c b/drivers/bus/brcmstb_gisb.c
index 6551286a60cc..4c2f7d61cb9b 100644
--- a/drivers/bus/brcmstb_gisb.c
+++ b/drivers/bus/brcmstb_gisb.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2014-2017 Broadcom
+ * Copyright (C) 2014-2021 Broadcom
*/
#include <linux/init.h>
@@ -536,6 +536,7 @@ static struct platform_driver brcmstb_gisb_arb_driver = {
.name = "brcm-gisb-arb",
.of_match_table = brcmstb_gisb_arb_of_match,
.pm = &brcmstb_gisb_arb_pm_ops,
+ .suppress_bind_attrs = true,
},
};
@@ -546,3 +547,7 @@ static int __init brcm_gisb_driver_init(void)
}
module_init(brcm_gisb_driver_init);
+
+MODULE_AUTHOR("Broadcom");
+MODULE_DESCRIPTION("Broadcom STB GISB arbiter driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/bus/fsl-mc/Makefile b/drivers/bus/fsl-mc/Makefile
index 4ae292a30e53..892946245527 100644
--- a/drivers/bus/fsl-mc/Makefile
+++ b/drivers/bus/fsl-mc/Makefile
@@ -15,7 +15,8 @@ mc-bus-driver-objs := fsl-mc-bus.o \
dprc-driver.o \
fsl-mc-allocator.o \
fsl-mc-msi.o \
- dpmcp.o
+ dpmcp.o \
+ obj-api.o
# MC userspace support
obj-$(CONFIG_FSL_MC_UAPI_SUPPORT) += fsl-mc-uapi.o
diff --git a/drivers/bus/fsl-mc/fsl-mc-private.h b/drivers/bus/fsl-mc/fsl-mc-private.h
index 1958fa065360..b3520ea1b9f4 100644
--- a/drivers/bus/fsl-mc/fsl-mc-private.h
+++ b/drivers/bus/fsl-mc/fsl-mc-private.h
@@ -48,7 +48,6 @@ struct dpmng_rsp_get_version {
/* DPMCP command IDs */
#define DPMCP_CMDID_CLOSE DPMCP_CMD(0x800)
-#define DPMCP_CMDID_OPEN DPMCP_CMD(0x80b)
#define DPMCP_CMDID_RESET DPMCP_CMD(0x005)
struct dpmcp_cmd_open {
@@ -91,7 +90,6 @@ int dpmcp_reset(struct fsl_mc_io *mc_io,
/* DPRC command IDs */
#define DPRC_CMDID_CLOSE DPRC_CMD(0x800)
-#define DPRC_CMDID_OPEN DPRC_CMD(0x805)
#define DPRC_CMDID_GET_API_VERSION DPRC_CMD(0xa05)
#define DPRC_CMDID_GET_ATTR DPRC_CMD(0x004)
@@ -453,7 +451,6 @@ int dprc_get_connection(struct fsl_mc_io *mc_io,
/* Command IDs */
#define DPBP_CMDID_CLOSE DPBP_CMD(0x800)
-#define DPBP_CMDID_OPEN DPBP_CMD(0x804)
#define DPBP_CMDID_ENABLE DPBP_CMD(0x002)
#define DPBP_CMDID_DISABLE DPBP_CMD(0x003)
@@ -492,7 +489,6 @@ struct dpbp_rsp_get_attributes {
/* Command IDs */
#define DPCON_CMDID_CLOSE DPCON_CMD(0x800)
-#define DPCON_CMDID_OPEN DPCON_CMD(0x808)
#define DPCON_CMDID_ENABLE DPCON_CMD(0x002)
#define DPCON_CMDID_DISABLE DPCON_CMD(0x003)
@@ -524,6 +520,41 @@ struct dpcon_cmd_set_notification {
__le64 user_ctx;
};
+/*
+ * Generic FSL MC API
+ */
+
+/* generic command versioning */
+#define OBJ_CMD_BASE_VERSION 1
+#define OBJ_CMD_ID_OFFSET 4
+
+#define OBJ_CMD(id) (((id) << OBJ_CMD_ID_OFFSET) | OBJ_CMD_BASE_VERSION)
+
+/* open command codes */
+#define DPRTC_CMDID_OPEN OBJ_CMD(0x810)
+#define DPNI_CMDID_OPEN OBJ_CMD(0x801)
+#define DPSW_CMDID_OPEN OBJ_CMD(0x802)
+#define DPIO_CMDID_OPEN OBJ_CMD(0x803)
+#define DPBP_CMDID_OPEN OBJ_CMD(0x804)
+#define DPRC_CMDID_OPEN OBJ_CMD(0x805)
+#define DPDMUX_CMDID_OPEN OBJ_CMD(0x806)
+#define DPCI_CMDID_OPEN OBJ_CMD(0x807)
+#define DPCON_CMDID_OPEN OBJ_CMD(0x808)
+#define DPSECI_CMDID_OPEN OBJ_CMD(0x809)
+#define DPAIOP_CMDID_OPEN OBJ_CMD(0x80a)
+#define DPMCP_CMDID_OPEN OBJ_CMD(0x80b)
+#define DPMAC_CMDID_OPEN OBJ_CMD(0x80c)
+#define DPDCEI_CMDID_OPEN OBJ_CMD(0x80d)
+#define DPDMAI_CMDID_OPEN OBJ_CMD(0x80e)
+#define DPDBG_CMDID_OPEN OBJ_CMD(0x80f)
+
+/* Generic object command IDs */
+#define OBJ_CMDID_CLOSE OBJ_CMD(0x800)
+#define OBJ_CMDID_RESET OBJ_CMD(0x005)
+
+struct fsl_mc_obj_cmd_open {
+ __le32 obj_id;
+};
/**
* struct fsl_mc_resource_pool - Pool of MC resources of a given
diff --git a/drivers/bus/fsl-mc/obj-api.c b/drivers/bus/fsl-mc/obj-api.c
new file mode 100644
index 000000000000..06c1dd84e38d
--- /dev/null
+++ b/drivers/bus/fsl-mc/obj-api.c
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
+/*
+ * Copyright 2021 NXP
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/fsl/mc.h>
+
+#include "fsl-mc-private.h"
+
+static int fsl_mc_get_open_cmd_id(const char *type)
+{
+ static const struct {
+ int cmd_id;
+ const char *type;
+ } dev_ids[] = {
+ { DPRTC_CMDID_OPEN, "dprtc" },
+ { DPRC_CMDID_OPEN, "dprc" },
+ { DPNI_CMDID_OPEN, "dpni" },
+ { DPIO_CMDID_OPEN, "dpio" },
+ { DPSW_CMDID_OPEN, "dpsw" },
+ { DPBP_CMDID_OPEN, "dpbp" },
+ { DPCON_CMDID_OPEN, "dpcon" },
+ { DPMCP_CMDID_OPEN, "dpmcp" },
+ { DPMAC_CMDID_OPEN, "dpmac" },
+ { DPSECI_CMDID_OPEN, "dpseci" },
+ { DPDMUX_CMDID_OPEN, "dpdmux" },
+ { DPDCEI_CMDID_OPEN, "dpdcei" },
+ { DPAIOP_CMDID_OPEN, "dpaiop" },
+ { DPCI_CMDID_OPEN, "dpci" },
+ { DPDMAI_CMDID_OPEN, "dpdmai" },
+ { DPDBG_CMDID_OPEN, "dpdbg" },
+ { 0, NULL }
+ };
+ int i;
+
+ for (i = 0; dev_ids[i].type; i++)
+ if (!strcmp(dev_ids[i].type, type))
+ return dev_ids[i].cmd_id;
+
+ return -1;
+}
+
+int fsl_mc_obj_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int obj_id,
+ char *obj_type,
+ u16 *token)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct fsl_mc_obj_cmd_open *cmd_params;
+ int err = 0;
+ int cmd_id = fsl_mc_get_open_cmd_id(obj_type);
+
+ if (cmd_id == -1)
+ return -ENODEV;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(cmd_id, cmd_flags, 0);
+ cmd_params = (struct fsl_mc_obj_cmd_open *)cmd.params;
+ cmd_params->obj_id = cpu_to_le32(obj_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = mc_cmd_hdr_read_token(&cmd);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(fsl_mc_obj_open);
+
+int fsl_mc_obj_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(OBJ_CMDID_CLOSE, cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL_GPL(fsl_mc_obj_close);
+
+int fsl_mc_obj_reset(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(OBJ_CMDID_RESET, cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+EXPORT_SYMBOL_GPL(fsl_mc_obj_reset);
diff --git a/drivers/bus/sun50i-de2.c b/drivers/bus/sun50i-de2.c
index 672518741f86..414f29cdedf0 100644
--- a/drivers/bus/sun50i-de2.c
+++ b/drivers/bus/sun50i-de2.c
@@ -15,10 +15,9 @@ static int sun50i_de2_bus_probe(struct platform_device *pdev)
int ret;
ret = sunxi_sram_claim(&pdev->dev);
- if (ret) {
- dev_err(&pdev->dev, "Error couldn't map SRAM to device\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "Couldn't map SRAM to device\n");
of_platform_populate(np, NULL, NULL, &pdev->dev);
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index 6a8b7fb5be58..54c0ee6dda30 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -6,6 +6,7 @@
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/clkdev.h>
+#include <linux/cpu_pm.h>
#include <linux/delay.h>
#include <linux/list.h>
#include <linux/module.h>
@@ -17,6 +18,7 @@
#include <linux/of_platform.h>
#include <linux/slab.h>
#include <linux/sys_soc.h>
+#include <linux/timekeeping.h>
#include <linux/iopoll.h>
#include <linux/platform_data/ti-sysc.h>
@@ -51,11 +53,18 @@ struct sysc_address {
struct list_head node;
};
+struct sysc_module {
+ struct sysc *ddata;
+ struct list_head node;
+};
+
struct sysc_soc_info {
unsigned long general_purpose:1;
enum sysc_soc soc;
- struct mutex list_lock; /* disabled modules list lock */
+ struct mutex list_lock; /* disabled and restored modules list lock */
struct list_head disabled_modules;
+ struct list_head restored_modules;
+ struct notifier_block nb;
};
enum sysc_clocks {
@@ -131,6 +140,7 @@ struct sysc {
struct ti_sysc_cookie cookie;
const char *name;
u32 revision;
+ u32 sysconfig;
unsigned int reserved:1;
unsigned int enabled:1;
unsigned int needs_resume:1;
@@ -147,6 +157,7 @@ struct sysc {
static void sysc_parse_dts_quirks(struct sysc *ddata, struct device_node *np,
bool is_child);
+static int sysc_reset(struct sysc *ddata);
static void sysc_write(struct sysc *ddata, int offset, u32 value)
{
@@ -223,37 +234,77 @@ static u32 sysc_read_sysstatus(struct sysc *ddata)
return sysc_read(ddata, offset);
}
-/* Poll on reset status */
-static int sysc_wait_softreset(struct sysc *ddata)
+static int sysc_poll_reset_sysstatus(struct sysc *ddata)
{
- u32 sysc_mask, syss_done, rstval;
- int syss_offset, error = 0;
-
- if (ddata->cap->regbits->srst_shift < 0)
- return 0;
-
- syss_offset = ddata->offsets[SYSC_SYSSTATUS];
- sysc_mask = BIT(ddata->cap->regbits->srst_shift);
+ int error, retries;
+ u32 syss_done, rstval;
if (ddata->cfg.quirks & SYSS_QUIRK_RESETDONE_INVERTED)
syss_done = 0;
else
syss_done = ddata->cfg.syss_mask;
- if (syss_offset >= 0) {
+ if (likely(!timekeeping_suspended)) {
error = readx_poll_timeout_atomic(sysc_read_sysstatus, ddata,
rstval, (rstval & ddata->cfg.syss_mask) ==
syss_done, 100, MAX_MODULE_SOFTRESET_WAIT);
+ } else {
+ retries = MAX_MODULE_SOFTRESET_WAIT;
+ while (retries--) {
+ rstval = sysc_read_sysstatus(ddata);
+ if ((rstval & ddata->cfg.syss_mask) == syss_done)
+ return 0;
+ udelay(2); /* Account for udelay flakeyness */
+ }
+ error = -ETIMEDOUT;
+ }
+
+ return error;
+}
+
+static int sysc_poll_reset_sysconfig(struct sysc *ddata)
+{
+ int error, retries;
+ u32 sysc_mask, rstval;
+
+ sysc_mask = BIT(ddata->cap->regbits->srst_shift);
- } else if (ddata->cfg.quirks & SYSC_QUIRK_RESET_STATUS) {
+ if (likely(!timekeeping_suspended)) {
error = readx_poll_timeout_atomic(sysc_read_sysconfig, ddata,
rstval, !(rstval & sysc_mask),
100, MAX_MODULE_SOFTRESET_WAIT);
+ } else {
+ retries = MAX_MODULE_SOFTRESET_WAIT;
+ while (retries--) {
+ rstval = sysc_read_sysconfig(ddata);
+ if (!(rstval & sysc_mask))
+ return 0;
+ udelay(2); /* Account for udelay flakeyness */
+ }
+ error = -ETIMEDOUT;
}
return error;
}
+/* Poll on reset status */
+static int sysc_wait_softreset(struct sysc *ddata)
+{
+ int syss_offset, error = 0;
+
+ if (ddata->cap->regbits->srst_shift < 0)
+ return 0;
+
+ syss_offset = ddata->offsets[SYSC_SYSSTATUS];
+
+ if (syss_offset >= 0)
+ error = sysc_poll_reset_sysstatus(ddata);
+ else if (ddata->cfg.quirks & SYSC_QUIRK_RESET_STATUS)
+ error = sysc_poll_reset_sysconfig(ddata);
+
+ return error;
+}
+
static int sysc_add_named_clock_from_child(struct sysc *ddata,
const char *name,
const char *optfck_name)
@@ -1094,7 +1145,8 @@ set_midle:
best_mode = fls(ddata->cfg.midlemodes) - 1;
if (best_mode > SYSC_IDLE_MASK) {
dev_err(dev, "%s: invalid midlemode\n", __func__);
- return -EINVAL;
+ error = -EINVAL;
+ goto save_context;
}
if (ddata->cfg.quirks & SYSC_QUIRK_SWSUP_MSTANDBY)
@@ -1112,13 +1164,16 @@ set_autoidle:
sysc_write_sysconfig(ddata, reg);
}
- /* Flush posted write */
- sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
+ error = 0;
+
+save_context:
+ /* Save context and flush posted write */
+ ddata->sysconfig = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
if (ddata->module_enable_quirk)
ddata->module_enable_quirk(ddata);
- return 0;
+ return error;
}
static int sysc_best_idle_mode(u32 idlemodes, u32 *best_mode)
@@ -1175,8 +1230,10 @@ static int sysc_disable_module(struct device *dev)
set_sidle:
/* Set SIDLE mode */
idlemodes = ddata->cfg.sidlemodes;
- if (!idlemodes || regbits->sidle_shift < 0)
- return 0;
+ if (!idlemodes || regbits->sidle_shift < 0) {
+ ret = 0;
+ goto save_context;
+ }
if (ddata->cfg.quirks & SYSC_QUIRK_SWSUP_SIDLE) {
best_mode = SYSC_IDLE_FORCE;
@@ -1184,7 +1241,8 @@ set_sidle:
ret = sysc_best_idle_mode(idlemodes, &best_mode);
if (ret) {
dev_err(dev, "%s: invalid sidlemode\n", __func__);
- return ret;
+ ret = -EINVAL;
+ goto save_context;
}
}
@@ -1195,10 +1253,13 @@ set_sidle:
reg |= 1 << regbits->autoidle_shift;
sysc_write_sysconfig(ddata, reg);
- /* Flush posted write */
- sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
+ ret = 0;
- return 0;
+save_context:
+ /* Save context and flush posted write */
+ ddata->sysconfig = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
+
+ return ret;
}
static int __maybe_unused sysc_runtime_suspend_legacy(struct device *dev,
@@ -1336,13 +1397,40 @@ err_allow_idle:
return error;
}
+/*
+ * Checks if device context was lost. Assumes the sysconfig register value
+ * after lost context is different from the configured value. Only works for
+ * enabled devices.
+ *
+ * Eventually we may want to also add support to using the context lost
+ * registers that some SoCs have.
+ */
+static int sysc_check_context(struct sysc *ddata)
+{
+ u32 reg;
+
+ if (!ddata->enabled)
+ return -ENODATA;
+
+ reg = sysc_read(ddata, ddata->offsets[SYSC_SYSCONFIG]);
+ if (reg == ddata->sysconfig)
+ return 0;
+
+ return -EACCES;
+}
+
static int sysc_reinit_module(struct sysc *ddata, bool leave_enabled)
{
struct device *dev = ddata->dev;
int error;
- /* Disable target module if it is enabled */
if (ddata->enabled) {
+ /* Nothing to do if enabled and context not lost */
+ error = sysc_check_context(ddata);
+ if (!error)
+ return 0;
+
+ /* Disable target module if it is enabled */
error = sysc_runtime_suspend(dev);
if (error)
dev_warn(dev, "reinit suspend failed: %i\n", error);
@@ -1353,6 +1441,15 @@ static int sysc_reinit_module(struct sysc *ddata, bool leave_enabled)
if (error)
dev_warn(dev, "reinit resume failed: %i\n", error);
+ /* Some modules like am335x gpmc need reset and restore of sysconfig */
+ if (ddata->cfg.quirks & SYSC_QUIRK_RESET_ON_CTX_LOST) {
+ error = sysc_reset(ddata);
+ if (error)
+ dev_warn(dev, "reinit reset failed: %i\n", error);
+
+ sysc_write_sysconfig(ddata, ddata->sysconfig);
+ }
+
if (leave_enabled)
return error;
@@ -1442,10 +1539,6 @@ struct sysc_revision_quirk {
static const struct sysc_revision_quirk sysc_revision_quirks[] = {
/* These drivers need to be fixed to not use pm_runtime_irq_safe() */
- SYSC_QUIRK("gpio", 0, 0, 0x10, 0x114, 0x50600801, 0xffff00ff,
- SYSC_QUIRK_LEGACY_IDLE | SYSC_QUIRK_OPT_CLKS_IN_RESET),
- SYSC_QUIRK("sham", 0, 0x100, 0x110, 0x114, 0x40000c03, 0xffffffff,
- SYSC_QUIRK_LEGACY_IDLE),
SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000046, 0xffffffff,
SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_LEGACY_IDLE),
SYSC_QUIRK("uart", 0, 0x50, 0x54, 0x58, 0x00000052, 0xffffffff,
@@ -1479,7 +1572,10 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
SYSC_QUIRK_CLKDM_NOAUTO),
SYSC_QUIRK("dwc3", 0x488c0000, 0, 0x10, -ENODEV, 0x500a0200, 0xffffffff,
SYSC_QUIRK_CLKDM_NOAUTO),
+ SYSC_QUIRK("gpio", 0, 0, 0x10, 0x114, 0x50600801, 0xffff00ff,
+ SYSC_QUIRK_OPT_CLKS_IN_RESET),
SYSC_QUIRK("gpmc", 0, 0, 0x10, 0x14, 0x00000060, 0xffffffff,
+ SYSC_QUIRK_REINIT_ON_CTX_LOST | SYSC_QUIRK_RESET_ON_CTX_LOST |
SYSC_QUIRK_GPMC_DEBUG),
SYSC_QUIRK("hdmi", 0, 0, 0x10, -ENODEV, 0x50030200, 0xffffffff,
SYSC_QUIRK_OPT_CLKS_NEEDED),
@@ -1515,10 +1611,11 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
SYSC_QUIRK("usb_host_hs", 0, 0, 0x10, -ENODEV, 0x50700101, 0xffffffff,
SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
SYSC_QUIRK("usb_otg_hs", 0, 0x400, 0x404, 0x408, 0x00000050,
- 0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY),
+ 0xffffffff, SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY |
+ SYSC_MODULE_QUIRK_OTG),
SYSC_QUIRK("usb_otg_hs", 0, 0, 0x10, -ENODEV, 0x4ea2080d, 0xffffffff,
SYSC_QUIRK_SWSUP_SIDLE | SYSC_QUIRK_SWSUP_MSTANDBY |
- SYSC_QUIRK_REINIT_ON_RESUME),
+ SYSC_QUIRK_REINIT_ON_CTX_LOST),
SYSC_QUIRK("wdt", 0, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0,
SYSC_MODULE_QUIRK_WDT),
/* PRUSS on am3, am4 and am5 */
@@ -1583,6 +1680,7 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
SYSC_QUIRK("sdio", 0, 0, 0x10, -ENODEV, 0x40202301, 0xffff0ff0, 0),
SYSC_QUIRK("sdio", 0, 0x2fc, 0x110, 0x114, 0x31010000, 0xffffffff, 0),
SYSC_QUIRK("sdma", 0, 0, 0x2c, 0x28, 0x00010900, 0xffffffff, 0),
+ SYSC_QUIRK("sham", 0, 0x100, 0x110, 0x114, 0x40000c03, 0xffffffff, 0),
SYSC_QUIRK("slimbus", 0, 0, 0x10, -ENODEV, 0x40000902, 0xffffffff, 0),
SYSC_QUIRK("slimbus", 0, 0, 0x10, -ENODEV, 0x40002903, 0xffffffff, 0),
SYSC_QUIRK("smartreflex", 0, -ENODEV, 0x24, -ENODEV, 0x00000000, 0xffffffff, 0),
@@ -1874,6 +1972,22 @@ static void sysc_module_lock_quirk_rtc(struct sysc *ddata)
sysc_quirk_rtc(ddata, true);
}
+/* OTG omap2430 glue layer up to omap4 needs OTG_FORCESTDBY configured */
+static void sysc_module_enable_quirk_otg(struct sysc *ddata)
+{
+ int offset = 0x414; /* OTG_FORCESTDBY */
+
+ sysc_write(ddata, offset, 0);
+}
+
+static void sysc_module_disable_quirk_otg(struct sysc *ddata)
+{
+ int offset = 0x414; /* OTG_FORCESTDBY */
+ u32 val = BIT(0); /* ENABLEFORCE */
+
+ sysc_write(ddata, offset, val);
+}
+
/* 36xx SGX needs a quirk for to bypass OCP IPG interrupt logic */
static void sysc_module_enable_quirk_sgx(struct sysc *ddata)
{
@@ -1956,6 +2070,11 @@ static void sysc_init_module_quirks(struct sysc *ddata)
return;
}
+ if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_OTG) {
+ ddata->module_enable_quirk = sysc_module_enable_quirk_otg;
+ ddata->module_disable_quirk = sysc_module_disable_quirk_otg;
+ }
+
if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_SGX)
ddata->module_enable_quirk = sysc_module_enable_quirk_sgx;
@@ -2401,6 +2520,78 @@ static struct dev_pm_domain sysc_child_pm_domain = {
}
};
+/* Caller needs to take list_lock if ever used outside of cpu_pm */
+static void sysc_reinit_modules(struct sysc_soc_info *soc)
+{
+ struct sysc_module *module;
+ struct list_head *pos;
+ struct sysc *ddata;
+
+ list_for_each(pos, &sysc_soc->restored_modules) {
+ module = list_entry(pos, struct sysc_module, node);
+ ddata = module->ddata;
+ sysc_reinit_module(ddata, ddata->enabled);
+ }
+}
+
+/**
+ * sysc_context_notifier - optionally reset and restore module after idle
+ * @nb: notifier block
+ * @cmd: unused
+ * @v: unused
+ *
+ * Some interconnect target modules need to be restored, or reset and restored
+ * on CPU_PM CPU_PM_CLUSTER_EXIT notifier. This is needed at least for am335x
+ * OTG and GPMC target modules even if the modules are unused.
+ */
+static int sysc_context_notifier(struct notifier_block *nb, unsigned long cmd,
+ void *v)
+{
+ struct sysc_soc_info *soc;
+
+ soc = container_of(nb, struct sysc_soc_info, nb);
+
+ switch (cmd) {
+ case CPU_CLUSTER_PM_ENTER:
+ break;
+ case CPU_CLUSTER_PM_ENTER_FAILED: /* No need to restore context */
+ break;
+ case CPU_CLUSTER_PM_EXIT:
+ sysc_reinit_modules(soc);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+/**
+ * sysc_add_restored - optionally add reset and restore quirk hanlling
+ * @ddata: device data
+ */
+static void sysc_add_restored(struct sysc *ddata)
+{
+ struct sysc_module *restored_module;
+
+ restored_module = kzalloc(sizeof(*restored_module), GFP_KERNEL);
+ if (!restored_module)
+ return;
+
+ restored_module->ddata = ddata;
+
+ mutex_lock(&sysc_soc->list_lock);
+
+ list_add(&restored_module->node, &sysc_soc->restored_modules);
+
+ if (sysc_soc->nb.notifier_call)
+ goto out_unlock;
+
+ sysc_soc->nb.notifier_call = sysc_context_notifier;
+ cpu_pm_register_notifier(&sysc_soc->nb);
+
+out_unlock:
+ mutex_unlock(&sysc_soc->list_lock);
+}
+
/**
* sysc_legacy_idle_quirk - handle children in omap_device compatible way
* @ddata: device driver data
@@ -2900,12 +3091,14 @@ static int sysc_add_disabled(unsigned long base)
}
/*
- * One time init to detect the booted SoC and disable unavailable features.
+ * One time init to detect the booted SoC, disable unavailable features
+ * and initialize list for optional cpu_pm notifier.
+ *
* Note that we initialize static data shared across all ti-sysc instances
* so ddata is only used for SoC type. This can be called from module_init
* once we no longer need to rely on platform data.
*/
-static int sysc_init_soc(struct sysc *ddata)
+static int sysc_init_static_data(struct sysc *ddata)
{
const struct soc_device_attribute *match;
struct ti_sysc_platform_data *pdata;
@@ -2921,6 +3114,7 @@ static int sysc_init_soc(struct sysc *ddata)
mutex_init(&sysc_soc->list_lock);
INIT_LIST_HEAD(&sysc_soc->disabled_modules);
+ INIT_LIST_HEAD(&sysc_soc->restored_modules);
sysc_soc->general_purpose = true;
pdata = dev_get_platdata(ddata->dev);
@@ -2985,15 +3179,24 @@ static int sysc_init_soc(struct sysc *ddata)
return 0;
}
-static void sysc_cleanup_soc(void)
+static void sysc_cleanup_static_data(void)
{
+ struct sysc_module *restored_module;
struct sysc_address *disabled_module;
struct list_head *pos, *tmp;
if (!sysc_soc)
return;
+ if (sysc_soc->nb.notifier_call)
+ cpu_pm_unregister_notifier(&sysc_soc->nb);
+
mutex_lock(&sysc_soc->list_lock);
+ list_for_each_safe(pos, tmp, &sysc_soc->restored_modules) {
+ restored_module = list_entry(pos, struct sysc_module, node);
+ list_del(pos);
+ kfree(restored_module);
+ }
list_for_each_safe(pos, tmp, &sysc_soc->disabled_modules) {
disabled_module = list_entry(pos, struct sysc_address, node);
list_del(pos);
@@ -3061,7 +3264,7 @@ static int sysc_probe(struct platform_device *pdev)
ddata->dev = &pdev->dev;
platform_set_drvdata(pdev, ddata);
- error = sysc_init_soc(ddata);
+ error = sysc_init_static_data(ddata);
if (error)
return error;
@@ -3159,6 +3362,9 @@ static int sysc_probe(struct platform_device *pdev)
pm_runtime_put(&pdev->dev);
}
+ if (ddata->cfg.quirks & SYSC_QUIRK_REINIT_ON_CTX_LOST)
+ sysc_add_restored(ddata);
+
return 0;
err:
@@ -3240,7 +3446,7 @@ static void __exit sysc_exit(void)
{
bus_unregister_notifier(&platform_bus_type, &sysc_nb);
platform_driver_unregister(&sysc_driver);
- sysc_cleanup_soc();
+ sysc_cleanup_static_data();
}
module_exit(sysc_exit);
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index bd2e5b1560f5..9877e413fce3 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -344,6 +344,12 @@ static void cdrom_sysctl_register(void);
static LIST_HEAD(cdrom_list);
+static void signal_media_change(struct cdrom_device_info *cdi)
+{
+ cdi->mc_flags = 0x3; /* set media changed bits, on both queues */
+ cdi->last_media_change_ms = ktime_to_ms(ktime_get());
+}
+
int cdrom_dummy_generic_packet(struct cdrom_device_info *cdi,
struct packet_command *cgc)
{
@@ -616,6 +622,7 @@ int register_cdrom(struct gendisk *disk, struct cdrom_device_info *cdi)
ENSURE(cdo, generic_packet, CDC_GENERIC_PACKET);
cdi->mc_flags = 0;
cdi->options = CDO_USE_FFLAGS;
+ cdi->last_media_change_ms = ktime_to_ms(ktime_get());
if (autoclose == 1 && CDROM_CAN(CDC_CLOSE_TRAY))
cdi->options |= (int) CDO_AUTO_CLOSE;
@@ -864,7 +871,7 @@ static void cdrom_mmc3_profile(struct cdrom_device_info *cdi)
{
struct packet_command cgc;
char buffer[32];
- int ret, mmc3_profile;
+ int mmc3_profile;
init_cdrom_command(&cgc, buffer, sizeof(buffer), CGC_DATA_READ);
@@ -874,7 +881,7 @@ static void cdrom_mmc3_profile(struct cdrom_device_info *cdi)
cgc.cmd[8] = sizeof(buffer); /* Allocation Length */
cgc.quiet = 1;
- if ((ret = cdi->ops->generic_packet(cdi, &cgc)))
+ if (cdi->ops->generic_packet(cdi, &cgc))
mmc3_profile = 0xffff;
else
mmc3_profile = (buffer[6] << 8) | buffer[7];
@@ -1421,8 +1428,7 @@ static int cdrom_select_disc(struct cdrom_device_info *cdi, int slot)
cdi->ops->check_events(cdi, 0, slot);
if (slot == CDSL_NONE) {
- /* set media changed bits, on both queues */
- cdi->mc_flags = 0x3;
+ signal_media_change(cdi);
return cdrom_load_unload(cdi, -1);
}
@@ -1455,7 +1461,7 @@ static int cdrom_select_disc(struct cdrom_device_info *cdi, int slot)
slot = curslot;
/* set media changed bits on both queues */
- cdi->mc_flags = 0x3;
+ signal_media_change(cdi);
if ((ret = cdrom_load_unload(cdi, slot)))
return ret;
@@ -1521,7 +1527,7 @@ int media_changed(struct cdrom_device_info *cdi, int queue)
cdi->ioctl_events = 0;
if (changed) {
- cdi->mc_flags = 0x3; /* set bit on both queues */
+ signal_media_change(cdi);
ret |= 1;
cdi->media_written = 0;
}
@@ -2336,6 +2342,49 @@ static int cdrom_ioctl_media_changed(struct cdrom_device_info *cdi,
return ret;
}
+/*
+ * Media change detection with timing information.
+ *
+ * arg is a pointer to a cdrom_timed_media_change_info struct.
+ * arg->last_media_change may be set by calling code to signal
+ * the timestamp (in ms) of the last known media change (by the caller).
+ * Upon successful return, ioctl call will set arg->last_media_change
+ * to the latest media change timestamp known by the kernel/driver
+ * and set arg->has_changed to 1 if that timestamp is more recent
+ * than the timestamp set by the caller.
+ */
+static int cdrom_ioctl_timed_media_change(struct cdrom_device_info *cdi,
+ unsigned long arg)
+{
+ int ret;
+ struct cdrom_timed_media_change_info __user *info;
+ struct cdrom_timed_media_change_info tmp_info;
+
+ if (!CDROM_CAN(CDC_MEDIA_CHANGED))
+ return -ENOSYS;
+
+ info = (struct cdrom_timed_media_change_info __user *)arg;
+ cd_dbg(CD_DO_IOCTL, "entering CDROM_TIMED_MEDIA_CHANGE\n");
+
+ ret = cdrom_ioctl_media_changed(cdi, CDSL_CURRENT);
+ if (ret < 0)
+ return ret;
+
+ if (copy_from_user(&tmp_info, info, sizeof(tmp_info)) != 0)
+ return -EFAULT;
+
+ tmp_info.media_flags = 0;
+ if (tmp_info.last_media_change - cdi->last_media_change_ms < 0)
+ tmp_info.media_flags |= MEDIA_CHANGED_FLAG;
+
+ tmp_info.last_media_change = cdi->last_media_change_ms;
+
+ if (copy_to_user(info, &tmp_info, sizeof(*info)) != 0)
+ return -EFAULT;
+
+ return 0;
+}
+
static int cdrom_ioctl_set_options(struct cdrom_device_info *cdi,
unsigned long arg)
{
@@ -3313,6 +3362,8 @@ int cdrom_ioctl(struct cdrom_device_info *cdi, struct block_device *bdev,
return cdrom_ioctl_eject_sw(cdi, arg);
case CDROM_MEDIA_CHANGED:
return cdrom_ioctl_media_changed(cdi, arg);
+ case CDROM_TIMED_MEDIA_CHANGE:
+ return cdrom_ioctl_timed_media_change(cdi, arg);
case CDROM_SET_OPTIONS:
return cdrom_ioctl_set_options(cdi, arg);
case CDROM_CLEAR_OPTIONS:
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index 8e1fe75af93f..d50cc1fd34d5 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -805,9 +805,14 @@ static int probe_gdrom(struct platform_device *devptr)
err = -ENOMEM;
goto probe_fail_free_irqs;
}
- add_disk(gd.disk);
+ err = add_disk(gd.disk);
+ if (err)
+ goto probe_fail_add_disk;
+
return 0;
+probe_fail_add_disk:
+ kfree(gd.toc);
probe_fail_free_irqs:
free_irq(HW_EVENT_GDROM_DMA, &gd);
free_irq(HW_EVENT_GDROM_CMD, &gd);
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 239eca4d6805..814b3d0ca7b7 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -63,7 +63,7 @@ config HW_RANDOM_AMD
config HW_RANDOM_ATMEL
tristate "Atmel Random Number Generator support"
- depends on ARCH_AT91 && HAVE_CLK && OF
+ depends on (ARCH_AT91 || COMPILE_TEST) && HAVE_CLK && OF
default HW_RANDOM
help
This driver provides kernel-side support for the Random Number
@@ -87,7 +87,7 @@ config HW_RANDOM_BA431
config HW_RANDOM_BCM2835
tristate "Broadcom BCM2835/BCM63xx Random Number Generator support"
depends on ARCH_BCM2835 || ARCH_BCM_NSP || ARCH_BCM_5301X || \
- ARCH_BCM_63XX || BCM63XX || BMIPS_GENERIC
+ ARCH_BCM_63XX || BCM63XX || BMIPS_GENERIC || COMPILE_TEST
default HW_RANDOM
help
This driver provides kernel-side support for the Random Number
@@ -100,7 +100,7 @@ config HW_RANDOM_BCM2835
config HW_RANDOM_IPROC_RNG200
tristate "Broadcom iProc/STB RNG200 support"
- depends on ARCH_BCM_IPROC || ARCH_BCM2835 || ARCH_BRCMSTB
+ depends on ARCH_BCM_IPROC || ARCH_BCM2835 || ARCH_BRCMSTB || COMPILE_TEST
default HW_RANDOM
help
This driver provides kernel-side support for the RNG200
@@ -165,7 +165,7 @@ config HW_RANDOM_IXP4XX
config HW_RANDOM_OMAP
tristate "OMAP Random Number Generator support"
- depends on ARCH_OMAP16XX || ARCH_OMAP2PLUS || ARCH_MVEBU || ARCH_K3
+ depends on ARCH_OMAP16XX || ARCH_OMAP2PLUS || ARCH_MVEBU || ARCH_K3 || COMPILE_TEST
default HW_RANDOM
help
This driver provides kernel-side support for the Random Number
@@ -179,7 +179,7 @@ config HW_RANDOM_OMAP
config HW_RANDOM_OMAP3_ROM
tristate "OMAP3 ROM Random Number Generator support"
- depends on ARCH_OMAP3
+ depends on ARCH_OMAP3 || COMPILE_TEST
default HW_RANDOM
help
This driver provides kernel-side support for the Random Number
@@ -298,7 +298,7 @@ config HW_RANDOM_INGENIC_TRNG
config HW_RANDOM_NOMADIK
tristate "ST-Ericsson Nomadik Random Number Generator support"
- depends on ARCH_NOMADIK
+ depends on ARCH_NOMADIK || COMPILE_TEST
default HW_RANDOM
help
This driver provides kernel-side support for the Random Number
diff --git a/drivers/char/hw_random/ixp4xx-rng.c b/drivers/char/hw_random/ixp4xx-rng.c
index 188854dd16a9..7df5e9f7519d 100644
--- a/drivers/char/hw_random/ixp4xx-rng.c
+++ b/drivers/char/hw_random/ixp4xx-rng.c
@@ -42,13 +42,11 @@ static int ixp4xx_rng_probe(struct platform_device *pdev)
{
void __iomem * rng_base;
struct device *dev = &pdev->dev;
- struct resource *res;
if (!cpu_is_ixp46x()) /* includes IXP455 */
return -ENOSYS;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- rng_base = devm_ioremap_resource(dev, res);
+ rng_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rng_base))
return PTR_ERR(rng_base);
diff --git a/drivers/char/hw_random/meson-rng.c b/drivers/char/hw_random/meson-rng.c
index e446236e81f2..8bb30282ca46 100644
--- a/drivers/char/hw_random/meson-rng.c
+++ b/drivers/char/hw_random/meson-rng.c
@@ -54,9 +54,10 @@ static int meson_rng_probe(struct platform_device *pdev)
if (IS_ERR(data->base))
return PTR_ERR(data->base);
- data->core_clk = devm_clk_get(dev, "core");
+ data->core_clk = devm_clk_get_optional(dev, "core");
if (IS_ERR(data->core_clk))
- data->core_clk = NULL;
+ return dev_err_probe(dev, PTR_ERR(data->core_clk),
+ "Failed to get core clock\n");
if (data->core_clk) {
ret = clk_prepare_enable(data->core_clk);
diff --git a/drivers/char/hw_random/mtk-rng.c b/drivers/char/hw_random/mtk-rng.c
index 8ad7b515a51b..6c00ea008555 100644
--- a/drivers/char/hw_random/mtk-rng.c
+++ b/drivers/char/hw_random/mtk-rng.c
@@ -166,8 +166,13 @@ static int mtk_rng_runtime_resume(struct device *dev)
return mtk_rng_init(&priv->rng);
}
-static UNIVERSAL_DEV_PM_OPS(mtk_rng_pm_ops, mtk_rng_runtime_suspend,
- mtk_rng_runtime_resume, NULL);
+static const struct dev_pm_ops mtk_rng_pm_ops = {
+ SET_RUNTIME_PM_OPS(mtk_rng_runtime_suspend,
+ mtk_rng_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+};
+
#define MTK_RNG_PM_OPS (&mtk_rng_pm_ops)
#else /* CONFIG_PM */
#define MTK_RNG_PM_OPS NULL
diff --git a/drivers/char/hw_random/s390-trng.c b/drivers/char/hw_random/s390-trng.c
index 7c673afd7241..2beaa35c0d74 100644
--- a/drivers/char/hw_random/s390-trng.c
+++ b/drivers/char/hw_random/s390-trng.c
@@ -111,7 +111,7 @@ static ssize_t trng_counter_show(struct device *dev,
#if IS_ENABLED(CONFIG_ARCH_RANDOM)
u64 arch_counter = atomic64_read(&s390_arch_random_counter);
- return snprintf(buf, PAGE_SIZE,
+ return sysfs_emit(buf,
"trng: %llu\n"
"hwrng: %llu\n"
"arch: %llu\n"
@@ -119,7 +119,7 @@ static ssize_t trng_counter_show(struct device *dev,
dev_counter, hwrng_counter, arch_counter,
dev_counter + hwrng_counter + arch_counter);
#else
- return snprintf(buf, PAGE_SIZE,
+ return sysfs_emit(buf,
"trng: %llu\n"
"hwrng: %llu\n"
"total: %llu\n",
diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
index a90001e02bf7..0a7dde135db1 100644
--- a/drivers/char/hw_random/virtio-rng.c
+++ b/drivers/char/hw_random/virtio-rng.c
@@ -18,13 +18,20 @@ static DEFINE_IDA(rng_index_ida);
struct virtrng_info {
struct hwrng hwrng;
struct virtqueue *vq;
- struct completion have_data;
char name[25];
- unsigned int data_avail;
int index;
- bool busy;
bool hwrng_register_done;
bool hwrng_removed;
+ /* data transfer */
+ struct completion have_data;
+ unsigned int data_avail;
+ unsigned int data_idx;
+ /* minimal size returned by rng_buffer_size() */
+#if SMP_CACHE_BYTES < 32
+ u8 data[32];
+#else
+ u8 data[SMP_CACHE_BYTES];
+#endif
};
static void random_recv_done(struct virtqueue *vq)
@@ -35,54 +42,88 @@ static void random_recv_done(struct virtqueue *vq)
if (!virtqueue_get_buf(vi->vq, &vi->data_avail))
return;
+ vi->data_idx = 0;
+
complete(&vi->have_data);
}
-/* The host will fill any buffer we give it with sweet, sweet randomness. */
-static void register_buffer(struct virtrng_info *vi, u8 *buf, size_t size)
+static void request_entropy(struct virtrng_info *vi)
{
struct scatterlist sg;
- sg_init_one(&sg, buf, size);
+ reinit_completion(&vi->have_data);
+ vi->data_avail = 0;
+ vi->data_idx = 0;
+
+ sg_init_one(&sg, vi->data, sizeof(vi->data));
/* There should always be room for one buffer. */
- virtqueue_add_inbuf(vi->vq, &sg, 1, buf, GFP_KERNEL);
+ virtqueue_add_inbuf(vi->vq, &sg, 1, vi->data, GFP_KERNEL);
virtqueue_kick(vi->vq);
}
+static unsigned int copy_data(struct virtrng_info *vi, void *buf,
+ unsigned int size)
+{
+ size = min_t(unsigned int, size, vi->data_avail);
+ memcpy(buf, vi->data + vi->data_idx, size);
+ vi->data_idx += size;
+ vi->data_avail -= size;
+ if (vi->data_avail == 0)
+ request_entropy(vi);
+ return size;
+}
+
static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait)
{
int ret;
struct virtrng_info *vi = (struct virtrng_info *)rng->priv;
+ unsigned int chunk;
+ size_t read;
if (vi->hwrng_removed)
return -ENODEV;
- if (!vi->busy) {
- vi->busy = true;
- reinit_completion(&vi->have_data);
- register_buffer(vi, buf, size);
+ read = 0;
+
+ /* copy available data */
+ if (vi->data_avail) {
+ chunk = copy_data(vi, buf, size);
+ size -= chunk;
+ read += chunk;
}
if (!wait)
- return 0;
-
- ret = wait_for_completion_killable(&vi->have_data);
- if (ret < 0)
- return ret;
+ return read;
+
+ /* We have already copied available entropy,
+ * so either size is 0 or data_avail is 0
+ */
+ while (size != 0) {
+ /* data_avail is 0 but a request is pending */
+ ret = wait_for_completion_killable(&vi->have_data);
+ if (ret < 0)
+ return ret;
+ /* if vi->data_avail is 0, we have been interrupted
+ * by a cleanup, but buffer stays in the queue
+ */
+ if (vi->data_avail == 0)
+ return read;
- vi->busy = false;
+ chunk = copy_data(vi, buf + read, size);
+ size -= chunk;
+ read += chunk;
+ }
- return vi->data_avail;
+ return read;
}
static void virtio_cleanup(struct hwrng *rng)
{
struct virtrng_info *vi = (struct virtrng_info *)rng->priv;
- if (vi->busy)
- wait_for_completion(&vi->have_data);
+ complete(&vi->have_data);
}
static int probe_common(struct virtio_device *vdev)
@@ -118,6 +159,9 @@ static int probe_common(struct virtio_device *vdev)
goto err_find;
}
+ /* we always have a pending entropy request */
+ request_entropy(vi);
+
return 0;
err_find:
@@ -133,9 +177,9 @@ static void remove_common(struct virtio_device *vdev)
vi->hwrng_removed = true;
vi->data_avail = 0;
+ vi->data_idx = 0;
complete(&vi->have_data);
vdev->config->reset(vdev);
- vi->busy = false;
if (vi->hwrng_register_done)
hwrng_unregister(&vi->hwrng);
vdev->config->del_vqs(vdev);
diff --git a/drivers/char/ipmi/Kconfig b/drivers/char/ipmi/Kconfig
index 249b31197eea..b061e6b513ed 100644
--- a/drivers/char/ipmi/Kconfig
+++ b/drivers/char/ipmi/Kconfig
@@ -69,12 +69,21 @@ config IPMI_SI
config IPMI_SSIF
tristate 'IPMI SMBus handler (SSIF)'
- select I2C
+ depends on I2C
help
Provides a driver for a SMBus interface to a BMC, meaning that you
have a driver that must be accessed over an I2C bus instead of a
standard interface. This module requires I2C support.
+config IPMI_IPMB
+ tristate 'IPMI IPMB interface'
+ depends on I2C && I2C_SLAVE
+ help
+ Provides a driver for a system running right on the IPMB bus.
+ It supports normal system interface messages to a BMC on the IPMB
+ bus, and it also supports direct messaging on the bus using
+ IPMB direct messages. This module requires I2C support.
+
config IPMI_POWERNV
depends on PPC_POWERNV
tristate 'POWERNV (OPAL firmware) IPMI interface'
diff --git a/drivers/char/ipmi/Makefile b/drivers/char/ipmi/Makefile
index 84f47d18007f..7ce790efad92 100644
--- a/drivers/char/ipmi/Makefile
+++ b/drivers/char/ipmi/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_IPMI_SI) += ipmi_si.o
obj-$(CONFIG_IPMI_DMI_DECODE) += ipmi_dmi.o
obj-$(CONFIG_IPMI_PLAT_DATA) += ipmi_plat_data.o
obj-$(CONFIG_IPMI_SSIF) += ipmi_ssif.o
+obj-$(CONFIG_IPMI_IPMB) += ipmi_ipmb.o
obj-$(CONFIG_IPMI_POWERNV) += ipmi_powernv.o
obj-$(CONFIG_IPMI_WATCHDOG) += ipmi_watchdog.o
obj-$(CONFIG_IPMI_POWEROFF) += ipmi_poweroff.o
diff --git a/drivers/char/ipmi/bt-bmc.c b/drivers/char/ipmi/bt-bmc.c
index 6e3d247b55d1..7450904e330a 100644
--- a/drivers/char/ipmi/bt-bmc.c
+++ b/drivers/char/ipmi/bt-bmc.c
@@ -8,13 +8,11 @@
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/io.h>
-#include <linux/mfd/syscon.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/poll.h>
-#include <linux/regmap.h>
#include <linux/sched.h>
#include <linux/timer.h>
@@ -59,8 +57,7 @@
struct bt_bmc {
struct device dev;
struct miscdevice miscdev;
- struct regmap *map;
- int offset;
+ void __iomem *base;
int irq;
wait_queue_head_t queue;
struct timer_list poll_timer;
@@ -69,29 +66,14 @@ struct bt_bmc {
static atomic_t open_count = ATOMIC_INIT(0);
-static const struct regmap_config bt_regmap_cfg = {
- .reg_bits = 32,
- .val_bits = 32,
- .reg_stride = 4,
-};
-
static u8 bt_inb(struct bt_bmc *bt_bmc, int reg)
{
- uint32_t val = 0;
- int rc;
-
- rc = regmap_read(bt_bmc->map, bt_bmc->offset + reg, &val);
- WARN(rc != 0, "regmap_read() failed: %d\n", rc);
-
- return rc == 0 ? (u8) val : 0;
+ return readb(bt_bmc->base + reg);
}
static void bt_outb(struct bt_bmc *bt_bmc, u8 data, int reg)
{
- int rc;
-
- rc = regmap_write(bt_bmc->map, bt_bmc->offset + reg, data);
- WARN(rc != 0, "regmap_write() failed: %d\n", rc);
+ writeb(data, bt_bmc->base + reg);
}
static void clr_rd_ptr(struct bt_bmc *bt_bmc)
@@ -376,18 +358,15 @@ static irqreturn_t bt_bmc_irq(int irq, void *arg)
{
struct bt_bmc *bt_bmc = arg;
u32 reg;
- int rc;
- rc = regmap_read(bt_bmc->map, bt_bmc->offset + BT_CR2, &reg);
- if (rc)
- return IRQ_NONE;
+ reg = readl(bt_bmc->base + BT_CR2);
reg &= BT_CR2_IRQ_H2B | BT_CR2_IRQ_HBUSY;
if (!reg)
return IRQ_NONE;
/* ack pending IRQs */
- regmap_write(bt_bmc->map, bt_bmc->offset + BT_CR2, reg);
+ writel(reg, bt_bmc->base + BT_CR2);
wake_up(&bt_bmc->queue);
return IRQ_HANDLED;
@@ -398,6 +377,7 @@ static int bt_bmc_config_irq(struct bt_bmc *bt_bmc,
{
struct device *dev = &pdev->dev;
int rc;
+ u32 reg;
bt_bmc->irq = platform_get_irq_optional(pdev, 0);
if (bt_bmc->irq < 0)
@@ -417,11 +397,11 @@ static int bt_bmc_config_irq(struct bt_bmc *bt_bmc,
* will be cleared (along with B2H) when we can write the next
* message to the BT buffer
*/
- rc = regmap_update_bits(bt_bmc->map, bt_bmc->offset + BT_CR1,
- (BT_CR1_IRQ_H2B | BT_CR1_IRQ_HBUSY),
- (BT_CR1_IRQ_H2B | BT_CR1_IRQ_HBUSY));
+ reg = readl(bt_bmc->base + BT_CR1);
+ reg |= BT_CR1_IRQ_H2B | BT_CR1_IRQ_HBUSY;
+ writel(reg, bt_bmc->base + BT_CR1);
- return rc;
+ return 0;
}
static int bt_bmc_probe(struct platform_device *pdev)
@@ -439,25 +419,9 @@ static int bt_bmc_probe(struct platform_device *pdev)
dev_set_drvdata(&pdev->dev, bt_bmc);
- bt_bmc->map = syscon_node_to_regmap(pdev->dev.parent->of_node);
- if (IS_ERR(bt_bmc->map)) {
- void __iomem *base;
-
- /*
- * Assume it's not the MFD-based devicetree description, in
- * which case generate a regmap ourselves
- */
- base = devm_platform_ioremap_resource(pdev, 0);
- if (IS_ERR(base))
- return PTR_ERR(base);
-
- bt_bmc->map = devm_regmap_init_mmio(dev, base, &bt_regmap_cfg);
- bt_bmc->offset = 0;
- } else {
- rc = of_property_read_u32(dev->of_node, "reg", &bt_bmc->offset);
- if (rc)
- return rc;
- }
+ bt_bmc->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(bt_bmc->base))
+ return PTR_ERR(bt_bmc->base);
mutex_init(&bt_bmc->mutex);
init_waitqueue_head(&bt_bmc->queue);
@@ -483,12 +447,12 @@ static int bt_bmc_probe(struct platform_device *pdev)
add_timer(&bt_bmc->poll_timer);
}
- regmap_write(bt_bmc->map, bt_bmc->offset + BT_CR0,
- (BT_IO_BASE << BT_CR0_IO_BASE) |
+ writel((BT_IO_BASE << BT_CR0_IO_BASE) |
(BT_IRQ << BT_CR0_IRQ) |
BT_CR0_EN_CLR_SLV_RDP |
BT_CR0_EN_CLR_SLV_WRP |
- BT_CR0_ENABLE_IBT);
+ BT_CR0_ENABLE_IBT,
+ bt_bmc->base + BT_CR0);
clr_b_busy(bt_bmc);
@@ -508,6 +472,7 @@ static int bt_bmc_remove(struct platform_device *pdev)
static const struct of_device_id bt_bmc_match[] = {
{ .compatible = "aspeed,ast2400-ibt-bmc" },
{ .compatible = "aspeed,ast2500-ibt-bmc" },
+ { .compatible = "aspeed,ast2600-ibt-bmc" },
{ },
};
diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c
index 3dd1d5abb298..d160fa4c73fe 100644
--- a/drivers/char/ipmi/ipmi_devintf.c
+++ b/drivers/char/ipmi/ipmi_devintf.c
@@ -247,11 +247,13 @@ static int handle_recv(struct ipmi_file_private *priv,
if (msg->msg.data_len > 0) {
if (rsp->msg.data_len < msg->msg.data_len) {
- rv2 = -EMSGSIZE;
- if (trunc)
+ if (trunc) {
+ rv2 = -EMSGSIZE;
msg->msg.data_len = rsp->msg.data_len;
- else
+ } else {
+ rv = -EMSGSIZE;
goto recv_putback_on_err;
+ }
}
if (copy_to_user(rsp->msg.data,
diff --git a/drivers/char/ipmi/ipmi_ipmb.c b/drivers/char/ipmi/ipmi_ipmb.c
new file mode 100644
index 000000000000..ba0c2d2c6bbe
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_ipmb.c
@@ -0,0 +1,539 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Driver to talk to a remote management controller on IPMB.
+ */
+
+#include <linux/acpi.h>
+#include <linux/errno.h>
+#include <linux/i2c.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/semaphore.h>
+#include <linux/kthread.h>
+#include <linux/wait.h>
+#include <linux/ipmi_msgdefs.h>
+#include <linux/ipmi_smi.h>
+
+#define DEVICE_NAME "ipmi-ipmb"
+
+static int bmcaddr = 0x20;
+module_param(bmcaddr, int, 0644);
+MODULE_PARM_DESC(bmcaddr, "Address to use for BMC.");
+
+static unsigned int retry_time_ms = 250;
+module_param(retry_time_ms, uint, 0644);
+MODULE_PARM_DESC(max_retries, "Timeout time between retries, in milliseconds.");
+
+static unsigned int max_retries = 1;
+module_param(max_retries, uint, 0644);
+MODULE_PARM_DESC(max_retries, "Max resends of a command before timing out.");
+
+/* Add room for the two slave addresses, two checksums, and rqSeq. */
+#define IPMB_MAX_MSG_LEN (IPMI_MAX_MSG_LENGTH + 5)
+
+struct ipmi_ipmb_dev {
+ struct ipmi_smi *intf;
+ struct i2c_client *client;
+
+ struct ipmi_smi_handlers handlers;
+
+ bool ready;
+
+ u8 curr_seq;
+
+ u8 bmcaddr;
+ u32 retry_time_ms;
+ u32 max_retries;
+
+ struct ipmi_smi_msg *next_msg;
+ struct ipmi_smi_msg *working_msg;
+
+ /* Transmit thread. */
+ struct task_struct *thread;
+ struct semaphore wake_thread;
+ struct semaphore got_rsp;
+ spinlock_t lock;
+ bool stopping;
+
+ u8 xmitmsg[IPMB_MAX_MSG_LEN];
+ unsigned int xmitlen;
+
+ u8 rcvmsg[IPMB_MAX_MSG_LEN];
+ unsigned int rcvlen;
+ bool overrun;
+};
+
+static bool valid_ipmb(struct ipmi_ipmb_dev *iidev)
+{
+ u8 *msg = iidev->rcvmsg;
+ u8 netfn;
+
+ if (iidev->overrun)
+ return false;
+
+ /* Minimum message size. */
+ if (iidev->rcvlen < 7)
+ return false;
+
+ /* Is it a response? */
+ netfn = msg[1] >> 2;
+ if (netfn & 1) {
+ /* Response messages have an added completion code. */
+ if (iidev->rcvlen < 8)
+ return false;
+ }
+
+ if (ipmb_checksum(msg, 3) != 0)
+ return false;
+ if (ipmb_checksum(msg + 3, iidev->rcvlen - 3) != 0)
+ return false;
+
+ return true;
+}
+
+static void ipmi_ipmb_check_msg_done(struct ipmi_ipmb_dev *iidev)
+{
+ struct ipmi_smi_msg *imsg = NULL;
+ u8 *msg = iidev->rcvmsg;
+ bool is_cmd;
+ unsigned long flags;
+
+ if (iidev->rcvlen == 0)
+ return;
+ if (!valid_ipmb(iidev))
+ goto done;
+
+ is_cmd = ((msg[1] >> 2) & 1) == 0;
+
+ if (is_cmd) {
+ /* Ignore commands until we are up. */
+ if (!iidev->ready)
+ goto done;
+
+ /* It's a command, allocate a message for it. */
+ imsg = ipmi_alloc_smi_msg();
+ if (!imsg)
+ goto done;
+ imsg->type = IPMI_SMI_MSG_TYPE_IPMB_DIRECT;
+ imsg->data_size = 0;
+ } else {
+ spin_lock_irqsave(&iidev->lock, flags);
+ if (iidev->working_msg) {
+ u8 seq = msg[4] >> 2;
+ bool xmit_rsp = (iidev->working_msg->data[0] >> 2) & 1;
+
+ /*
+ * Responses should carry the sequence we sent
+ * them with. If it's a transmitted response,
+ * ignore it. And if the message hasn't been
+ * transmitted, ignore it.
+ */
+ if (!xmit_rsp && seq == iidev->curr_seq) {
+ iidev->curr_seq = (iidev->curr_seq + 1) & 0x3f;
+
+ imsg = iidev->working_msg;
+ iidev->working_msg = NULL;
+ }
+ }
+ spin_unlock_irqrestore(&iidev->lock, flags);
+ }
+
+ if (!imsg)
+ goto done;
+
+ if (imsg->type == IPMI_SMI_MSG_TYPE_IPMB_DIRECT) {
+ imsg->rsp[0] = msg[1]; /* NetFn/LUN */
+ /*
+ * Keep the source address, rqSeq. Drop the trailing
+ * checksum.
+ */
+ memcpy(imsg->rsp + 1, msg + 3, iidev->rcvlen - 4);
+ imsg->rsp_size = iidev->rcvlen - 3;
+ } else {
+ imsg->rsp[0] = msg[1]; /* NetFn/LUN */
+ /*
+ * Skip the source address, rqSeq. Drop the trailing
+ * checksum.
+ */
+ memcpy(imsg->rsp + 1, msg + 5, iidev->rcvlen - 6);
+ imsg->rsp_size = iidev->rcvlen - 5;
+ }
+ ipmi_smi_msg_received(iidev->intf, imsg);
+ if (!is_cmd)
+ up(&iidev->got_rsp);
+
+done:
+ iidev->overrun = false;
+ iidev->rcvlen = 0;
+}
+
+/*
+ * The IPMB protocol only supports i2c writes so there is no need to
+ * support I2C_SLAVE_READ* events, except to know if the other end has
+ * issued a read without going to stop mode.
+ */
+static int ipmi_ipmb_slave_cb(struct i2c_client *client,
+ enum i2c_slave_event event, u8 *val)
+{
+ struct ipmi_ipmb_dev *iidev = i2c_get_clientdata(client);
+
+ switch (event) {
+ case I2C_SLAVE_WRITE_REQUESTED:
+ ipmi_ipmb_check_msg_done(iidev);
+ /*
+ * First byte is the slave address, to ease the checksum
+ * calculation.
+ */
+ iidev->rcvmsg[0] = client->addr << 1;
+ iidev->rcvlen = 1;
+ break;
+
+ case I2C_SLAVE_WRITE_RECEIVED:
+ if (iidev->rcvlen >= sizeof(iidev->rcvmsg))
+ iidev->overrun = true;
+ else
+ iidev->rcvmsg[iidev->rcvlen++] = *val;
+ break;
+
+ case I2C_SLAVE_READ_REQUESTED:
+ case I2C_SLAVE_STOP:
+ ipmi_ipmb_check_msg_done(iidev);
+ break;
+
+ case I2C_SLAVE_READ_PROCESSED:
+ break;
+ }
+
+ return 0;
+}
+
+static void ipmi_ipmb_send_response(struct ipmi_ipmb_dev *iidev,
+ struct ipmi_smi_msg *msg, u8 cc)
+{
+ if ((msg->data[0] >> 2) & 1) {
+ /*
+ * It's a response being sent, we needto return a
+ * response response. Fake a send msg command
+ * response with channel 0. This will always be ipmb
+ * direct.
+ */
+ msg->data[0] = (IPMI_NETFN_APP_REQUEST | 1) << 2;
+ msg->data[3] = IPMI_SEND_MSG_CMD;
+ msg->data[4] = cc;
+ msg->data_size = 5;
+ }
+ msg->rsp[0] = msg->data[0] | (1 << 2);
+ if (msg->type == IPMI_SMI_MSG_TYPE_IPMB_DIRECT) {
+ msg->rsp[1] = msg->data[1];
+ msg->rsp[2] = msg->data[2];
+ msg->rsp[3] = msg->data[3];
+ msg->rsp[4] = cc;
+ msg->rsp_size = 5;
+ } else {
+ msg->rsp[1] = msg->data[1];
+ msg->rsp[2] = cc;
+ msg->rsp_size = 3;
+ }
+ ipmi_smi_msg_received(iidev->intf, msg);
+}
+
+static void ipmi_ipmb_format_for_xmit(struct ipmi_ipmb_dev *iidev,
+ struct ipmi_smi_msg *msg)
+{
+ if (msg->type == IPMI_SMI_MSG_TYPE_IPMB_DIRECT) {
+ iidev->xmitmsg[0] = msg->data[1];
+ iidev->xmitmsg[1] = msg->data[0];
+ memcpy(iidev->xmitmsg + 4, msg->data + 2, msg->data_size - 2);
+ iidev->xmitlen = msg->data_size + 2;
+ } else {
+ iidev->xmitmsg[0] = iidev->bmcaddr;
+ iidev->xmitmsg[1] = msg->data[0];
+ iidev->xmitmsg[4] = 0;
+ memcpy(iidev->xmitmsg + 5, msg->data + 1, msg->data_size - 1);
+ iidev->xmitlen = msg->data_size + 4;
+ }
+ iidev->xmitmsg[3] = iidev->client->addr << 1;
+ if (((msg->data[0] >> 2) & 1) == 0)
+ /* If it's a command, put in our own sequence number. */
+ iidev->xmitmsg[4] = ((iidev->xmitmsg[4] & 0x03) |
+ (iidev->curr_seq << 2));
+
+ /* Now add on the final checksums. */
+ iidev->xmitmsg[2] = ipmb_checksum(iidev->xmitmsg, 2);
+ iidev->xmitmsg[iidev->xmitlen] =
+ ipmb_checksum(iidev->xmitmsg + 3, iidev->xmitlen - 3);
+ iidev->xmitlen++;
+}
+
+static int ipmi_ipmb_thread(void *data)
+{
+ struct ipmi_ipmb_dev *iidev = data;
+
+ while (!kthread_should_stop()) {
+ long ret;
+ struct i2c_msg i2c_msg;
+ struct ipmi_smi_msg *msg = NULL;
+ unsigned long flags;
+ unsigned int retries = 0;
+
+ /* Wait for a message to send */
+ ret = down_interruptible(&iidev->wake_thread);
+ if (iidev->stopping)
+ break;
+ if (ret)
+ continue;
+
+ spin_lock_irqsave(&iidev->lock, flags);
+ if (iidev->next_msg) {
+ msg = iidev->next_msg;
+ iidev->next_msg = NULL;
+ }
+ spin_unlock_irqrestore(&iidev->lock, flags);
+ if (!msg)
+ continue;
+
+ ipmi_ipmb_format_for_xmit(iidev, msg);
+
+retry:
+ i2c_msg.len = iidev->xmitlen - 1;
+ if (i2c_msg.len > 32) {
+ ipmi_ipmb_send_response(iidev, msg,
+ IPMI_REQ_LEN_EXCEEDED_ERR);
+ continue;
+ }
+
+ i2c_msg.addr = iidev->xmitmsg[0] >> 1;
+ i2c_msg.flags = 0;
+ i2c_msg.buf = iidev->xmitmsg + 1;
+
+ /* Rely on i2c_transfer for a barrier. */
+ iidev->working_msg = msg;
+
+ ret = i2c_transfer(iidev->client->adapter, &i2c_msg, 1);
+
+ if ((msg->data[0] >> 2) & 1) {
+ /*
+ * It's a response, nothing will be returned
+ * by the other end.
+ */
+
+ iidev->working_msg = NULL;
+ ipmi_ipmb_send_response(iidev, msg,
+ ret < 0 ? IPMI_BUS_ERR : 0);
+ continue;
+ }
+ if (ret < 0) {
+ iidev->working_msg = NULL;
+ ipmi_ipmb_send_response(iidev, msg, IPMI_BUS_ERR);
+ continue;
+ }
+
+ /* A command was sent, wait for its response. */
+ ret = down_timeout(&iidev->got_rsp,
+ msecs_to_jiffies(iidev->retry_time_ms));
+
+ /*
+ * Grab the message if we can. If the handler hasn't
+ * already handled it, the message will still be there.
+ */
+ spin_lock_irqsave(&iidev->lock, flags);
+ msg = iidev->working_msg;
+ iidev->working_msg = NULL;
+ spin_unlock_irqrestore(&iidev->lock, flags);
+
+ if (!msg && ret) {
+ /*
+ * If working_msg is not set and we timed out,
+ * that means the message grabbed by
+ * check_msg_done before we could grab it
+ * here. Wait again for check_msg_done to up
+ * the semaphore.
+ */
+ down(&iidev->got_rsp);
+ } else if (msg && ++retries <= iidev->max_retries) {
+ spin_lock_irqsave(&iidev->lock, flags);
+ iidev->working_msg = msg;
+ spin_unlock_irqrestore(&iidev->lock, flags);
+ goto retry;
+ }
+
+ if (msg)
+ ipmi_ipmb_send_response(iidev, msg, IPMI_TIMEOUT_ERR);
+ }
+
+ if (iidev->next_msg)
+ /* Return an unspecified error. */
+ ipmi_ipmb_send_response(iidev, iidev->next_msg, 0xff);
+
+ return 0;
+}
+
+static int ipmi_ipmb_start_processing(void *send_info,
+ struct ipmi_smi *new_intf)
+{
+ struct ipmi_ipmb_dev *iidev = send_info;
+
+ iidev->intf = new_intf;
+ iidev->ready = true;
+ return 0;
+}
+
+static void ipmi_ipmb_stop_thread(struct ipmi_ipmb_dev *iidev)
+{
+ if (iidev->thread) {
+ struct task_struct *t = iidev->thread;
+
+ iidev->thread = NULL;
+ iidev->stopping = true;
+ up(&iidev->wake_thread);
+ up(&iidev->got_rsp);
+ kthread_stop(t);
+ }
+}
+
+static void ipmi_ipmb_shutdown(void *send_info)
+{
+ struct ipmi_ipmb_dev *iidev = send_info;
+
+ ipmi_ipmb_stop_thread(iidev);
+}
+
+static void ipmi_ipmb_sender(void *send_info,
+ struct ipmi_smi_msg *msg)
+{
+ struct ipmi_ipmb_dev *iidev = send_info;
+ unsigned long flags;
+
+ spin_lock_irqsave(&iidev->lock, flags);
+ BUG_ON(iidev->next_msg);
+
+ iidev->next_msg = msg;
+ spin_unlock_irqrestore(&iidev->lock, flags);
+
+ up(&iidev->wake_thread);
+}
+
+static void ipmi_ipmb_request_events(void *send_info)
+{
+ /* We don't fetch events here. */
+}
+
+static int ipmi_ipmb_remove(struct i2c_client *client)
+{
+ struct ipmi_ipmb_dev *iidev = i2c_get_clientdata(client);
+
+ if (iidev->client) {
+ iidev->client = NULL;
+ i2c_slave_unregister(client);
+ }
+ ipmi_ipmb_stop_thread(iidev);
+
+ return 0;
+}
+
+static int ipmi_ipmb_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct ipmi_ipmb_dev *iidev;
+ int rv;
+
+ iidev = devm_kzalloc(&client->dev, sizeof(*iidev), GFP_KERNEL);
+ if (!iidev)
+ return -ENOMEM;
+
+ if (of_property_read_u8(dev->of_node, "bmcaddr", &iidev->bmcaddr) != 0)
+ iidev->bmcaddr = bmcaddr;
+ if (iidev->bmcaddr == 0 || iidev->bmcaddr & 1) {
+ /* Can't have the write bit set. */
+ dev_notice(&client->dev,
+ "Invalid bmc address value %2.2x\n", iidev->bmcaddr);
+ return -EINVAL;
+ }
+
+ if (of_property_read_u32(dev->of_node, "retry-time",
+ &iidev->retry_time_ms) != 0)
+ iidev->retry_time_ms = retry_time_ms;
+
+ if (of_property_read_u32(dev->of_node, "max-retries",
+ &iidev->max_retries) != 0)
+ iidev->max_retries = max_retries;
+
+ i2c_set_clientdata(client, iidev);
+ client->flags |= I2C_CLIENT_SLAVE;
+
+ rv = i2c_slave_register(client, ipmi_ipmb_slave_cb);
+ if (rv)
+ return rv;
+
+ iidev->client = client;
+
+ iidev->handlers.flags = IPMI_SMI_CAN_HANDLE_IPMB_DIRECT;
+ iidev->handlers.start_processing = ipmi_ipmb_start_processing;
+ iidev->handlers.shutdown = ipmi_ipmb_shutdown;
+ iidev->handlers.sender = ipmi_ipmb_sender;
+ iidev->handlers.request_events = ipmi_ipmb_request_events;
+
+ spin_lock_init(&iidev->lock);
+ sema_init(&iidev->wake_thread, 0);
+ sema_init(&iidev->got_rsp, 0);
+
+ iidev->thread = kthread_run(ipmi_ipmb_thread, iidev,
+ "kipmb%4.4x", client->addr);
+ if (IS_ERR(iidev->thread)) {
+ rv = PTR_ERR(iidev->thread);
+ dev_notice(&client->dev,
+ "Could not start kernel thread: error %d\n", rv);
+ goto out_err;
+ }
+
+ rv = ipmi_register_smi(&iidev->handlers,
+ iidev,
+ &client->dev,
+ iidev->bmcaddr);
+ if (rv)
+ goto out_err;
+
+ return 0;
+
+out_err:
+ ipmi_ipmb_remove(client);
+ return rv;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id of_ipmi_ipmb_match[] = {
+ { .type = "ipmi", .compatible = DEVICE_NAME },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_ipmi_ipmb_match);
+#else
+#define of_ipmi_ipmb_match NULL
+#endif
+
+static const struct i2c_device_id ipmi_ipmb_id[] = {
+ { DEVICE_NAME, 0 },
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, ipmi_ipmb_id);
+
+static struct i2c_driver ipmi_ipmb_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = DEVICE_NAME,
+ .of_match_table = of_ipmi_ipmb_match,
+ },
+ .probe = ipmi_ipmb_probe,
+ .remove = ipmi_ipmb_remove,
+ .id_table = ipmi_ipmb_id,
+};
+module_i2c_driver(ipmi_ipmb_driver);
+
+MODULE_AUTHOR("Corey Minyard");
+MODULE_DESCRIPTION("IPMI IPMB driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index e96cb5c4f97a..deed355422f4 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -653,6 +653,11 @@ static int is_ipmb_bcast_addr(struct ipmi_addr *addr)
return addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE;
}
+static int is_ipmb_direct_addr(struct ipmi_addr *addr)
+{
+ return addr->addr_type == IPMI_IPMB_DIRECT_ADDR_TYPE;
+}
+
static void free_recv_msg_list(struct list_head *q)
{
struct ipmi_recv_msg *msg, *msg2;
@@ -805,6 +810,17 @@ ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2)
&& (ipmb_addr1->lun == ipmb_addr2->lun));
}
+ if (is_ipmb_direct_addr(addr1)) {
+ struct ipmi_ipmb_direct_addr *daddr1
+ = (struct ipmi_ipmb_direct_addr *) addr1;
+ struct ipmi_ipmb_direct_addr *daddr2
+ = (struct ipmi_ipmb_direct_addr *) addr2;
+
+ return daddr1->slave_addr == daddr2->slave_addr &&
+ daddr1->rq_lun == daddr2->rq_lun &&
+ daddr1->rs_lun == daddr2->rs_lun;
+ }
+
if (is_lan_addr(addr1)) {
struct ipmi_lan_addr *lan_addr1
= (struct ipmi_lan_addr *) addr1;
@@ -843,6 +859,23 @@ int ipmi_validate_addr(struct ipmi_addr *addr, int len)
return 0;
}
+ if (is_ipmb_direct_addr(addr)) {
+ struct ipmi_ipmb_direct_addr *daddr = (void *) addr;
+
+ if (addr->channel != 0)
+ return -EINVAL;
+ if (len < sizeof(struct ipmi_ipmb_direct_addr))
+ return -EINVAL;
+
+ if (daddr->slave_addr & 0x01)
+ return -EINVAL;
+ if (daddr->rq_lun >= 4)
+ return -EINVAL;
+ if (daddr->rs_lun >= 4)
+ return -EINVAL;
+ return 0;
+ }
+
if (is_lan_addr(addr)) {
if (len < sizeof(struct ipmi_lan_addr))
return -EINVAL;
@@ -862,6 +895,9 @@ unsigned int ipmi_addr_length(int addr_type)
|| (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
return sizeof(struct ipmi_ipmb_addr);
+ if (addr_type == IPMI_IPMB_DIRECT_ADDR_TYPE)
+ return sizeof(struct ipmi_ipmb_direct_addr);
+
if (addr_type == IPMI_LAN_ADDR_TYPE)
return sizeof(struct ipmi_lan_addr);
@@ -1710,7 +1746,7 @@ int ipmi_unregister_for_cmd(struct ipmi_user *user,
}
EXPORT_SYMBOL(ipmi_unregister_for_cmd);
-static unsigned char
+unsigned char
ipmb_checksum(unsigned char *data, int size)
{
unsigned char csum = 0;
@@ -1720,6 +1756,7 @@ ipmb_checksum(unsigned char *data, int size)
return -csum;
}
+EXPORT_SYMBOL(ipmb_checksum);
static inline void format_ipmb_msg(struct ipmi_smi_msg *smi_msg,
struct kernel_ipmi_msg *msg,
@@ -2051,6 +2088,58 @@ out_err:
return rv;
}
+static int i_ipmi_req_ipmb_direct(struct ipmi_smi *intf,
+ struct ipmi_addr *addr,
+ long msgid,
+ struct kernel_ipmi_msg *msg,
+ struct ipmi_smi_msg *smi_msg,
+ struct ipmi_recv_msg *recv_msg,
+ unsigned char source_lun)
+{
+ struct ipmi_ipmb_direct_addr *daddr;
+ bool is_cmd = !(recv_msg->msg.netfn & 0x1);
+
+ if (!(intf->handlers->flags & IPMI_SMI_CAN_HANDLE_IPMB_DIRECT))
+ return -EAFNOSUPPORT;
+
+ /* Responses must have a completion code. */
+ if (!is_cmd && msg->data_len < 1) {
+ ipmi_inc_stat(intf, sent_invalid_commands);
+ return -EINVAL;
+ }
+
+ if ((msg->data_len + 4) > IPMI_MAX_MSG_LENGTH) {
+ ipmi_inc_stat(intf, sent_invalid_commands);
+ return -EMSGSIZE;
+ }
+
+ daddr = (struct ipmi_ipmb_direct_addr *) addr;
+ if (daddr->rq_lun > 3 || daddr->rs_lun > 3) {
+ ipmi_inc_stat(intf, sent_invalid_commands);
+ return -EINVAL;
+ }
+
+ smi_msg->type = IPMI_SMI_MSG_TYPE_IPMB_DIRECT;
+ smi_msg->msgid = msgid;
+
+ if (is_cmd) {
+ smi_msg->data[0] = msg->netfn << 2 | daddr->rs_lun;
+ smi_msg->data[2] = recv_msg->msgid << 2 | daddr->rq_lun;
+ } else {
+ smi_msg->data[0] = msg->netfn << 2 | daddr->rq_lun;
+ smi_msg->data[2] = recv_msg->msgid << 2 | daddr->rs_lun;
+ }
+ smi_msg->data[1] = daddr->slave_addr;
+ smi_msg->data[3] = msg->cmd;
+
+ memcpy(smi_msg->data + 4, msg->data, msg->data_len);
+ smi_msg->data_size = msg->data_len + 4;
+
+ smi_msg->user_data = recv_msg;
+
+ return 0;
+}
+
static int i_ipmi_req_lan(struct ipmi_smi *intf,
struct ipmi_addr *addr,
long msgid,
@@ -2240,6 +2329,9 @@ static int i_ipmi_request(struct ipmi_user *user,
rv = i_ipmi_req_ipmb(intf, addr, msgid, msg, smi_msg, recv_msg,
source_address, source_lun,
retries, retry_time_ms);
+ } else if (is_ipmb_direct_addr(addr)) {
+ rv = i_ipmi_req_ipmb_direct(intf, addr, msgid, msg, smi_msg,
+ recv_msg, source_lun);
} else if (is_lan_addr(addr)) {
rv = i_ipmi_req_lan(intf, addr, msgid, msg, smi_msg, recv_msg,
source_lun, retries, retry_time_ms);
@@ -2369,6 +2461,13 @@ static void bmc_device_id_handler(struct ipmi_smi *intf,
return;
}
+ if (msg->msg.data[0]) {
+ dev_warn(intf->si_dev, "device id fetch failed: 0x%2.2x\n",
+ msg->msg.data[0]);
+ intf->bmc->dyn_id_set = 0;
+ goto out;
+ }
+
rv = ipmi_demangle_device_id(msg->msg.netfn, msg->msg.cmd,
msg->msg.data, msg->msg.data_len, &intf->bmc->fetch_id);
if (rv) {
@@ -2384,7 +2483,7 @@ static void bmc_device_id_handler(struct ipmi_smi *intf,
smp_wmb();
intf->bmc->dyn_id_set = 1;
}
-
+out:
wake_up(&intf->waitq);
}
@@ -2617,7 +2716,7 @@ static ssize_t device_id_show(struct device *dev,
if (rv)
return rv;
- return snprintf(buf, 10, "%u\n", id.device_id);
+ return sysfs_emit(buf, "%u\n", id.device_id);
}
static DEVICE_ATTR_RO(device_id);
@@ -2633,7 +2732,7 @@ static ssize_t provides_device_sdrs_show(struct device *dev,
if (rv)
return rv;
- return snprintf(buf, 10, "%u\n", (id.device_revision & 0x80) >> 7);
+ return sysfs_emit(buf, "%u\n", (id.device_revision & 0x80) >> 7);
}
static DEVICE_ATTR_RO(provides_device_sdrs);
@@ -2648,7 +2747,7 @@ static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
if (rv)
return rv;
- return snprintf(buf, 20, "%u\n", id.device_revision & 0x0F);
+ return sysfs_emit(buf, "%u\n", id.device_revision & 0x0F);
}
static DEVICE_ATTR_RO(revision);
@@ -2664,7 +2763,7 @@ static ssize_t firmware_revision_show(struct device *dev,
if (rv)
return rv;
- return snprintf(buf, 20, "%u.%x\n", id.firmware_revision_1,
+ return sysfs_emit(buf, "%u.%x\n", id.firmware_revision_1,
id.firmware_revision_2);
}
static DEVICE_ATTR_RO(firmware_revision);
@@ -2681,7 +2780,7 @@ static ssize_t ipmi_version_show(struct device *dev,
if (rv)
return rv;
- return snprintf(buf, 20, "%u.%u\n",
+ return sysfs_emit(buf, "%u.%u\n",
ipmi_version_major(&id),
ipmi_version_minor(&id));
}
@@ -2699,7 +2798,7 @@ static ssize_t add_dev_support_show(struct device *dev,
if (rv)
return rv;
- return snprintf(buf, 10, "0x%02x\n", id.additional_device_support);
+ return sysfs_emit(buf, "0x%02x\n", id.additional_device_support);
}
static DEVICE_ATTR(additional_device_support, S_IRUGO, add_dev_support_show,
NULL);
@@ -2716,7 +2815,7 @@ static ssize_t manufacturer_id_show(struct device *dev,
if (rv)
return rv;
- return snprintf(buf, 20, "0x%6.6x\n", id.manufacturer_id);
+ return sysfs_emit(buf, "0x%6.6x\n", id.manufacturer_id);
}
static DEVICE_ATTR_RO(manufacturer_id);
@@ -2732,7 +2831,7 @@ static ssize_t product_id_show(struct device *dev,
if (rv)
return rv;
- return snprintf(buf, 10, "0x%4.4x\n", id.product_id);
+ return sysfs_emit(buf, "0x%4.4x\n", id.product_id);
}
static DEVICE_ATTR_RO(product_id);
@@ -2748,7 +2847,7 @@ static ssize_t aux_firmware_rev_show(struct device *dev,
if (rv)
return rv;
- return snprintf(buf, 21, "0x%02x 0x%02x 0x%02x 0x%02x\n",
+ return sysfs_emit(buf, "0x%02x 0x%02x 0x%02x 0x%02x\n",
id.aux_firmware_revision[3],
id.aux_firmware_revision[2],
id.aux_firmware_revision[1],
@@ -2770,7 +2869,7 @@ static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
if (!guid_set)
return -ENOENT;
- return snprintf(buf, UUID_STRING_LEN + 1 + 1, "%pUl\n", &guid);
+ return sysfs_emit(buf, "%pUl\n", &guid);
}
static DEVICE_ATTR_RO(guid);
@@ -3794,6 +3893,123 @@ static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf,
return rv;
}
+static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf,
+ struct ipmi_smi_msg *msg)
+{
+ struct cmd_rcvr *rcvr;
+ int rv = 0;
+ struct ipmi_user *user = NULL;
+ struct ipmi_ipmb_direct_addr *daddr;
+ struct ipmi_recv_msg *recv_msg;
+ unsigned char netfn = msg->rsp[0] >> 2;
+ unsigned char cmd = msg->rsp[3];
+
+ rcu_read_lock();
+ /* We always use channel 0 for direct messages. */
+ rcvr = find_cmd_rcvr(intf, netfn, cmd, 0);
+ if (rcvr) {
+ user = rcvr->user;
+ kref_get(&user->refcount);
+ } else
+ user = NULL;
+ rcu_read_unlock();
+
+ if (user == NULL) {
+ /* We didn't find a user, deliver an error response. */
+ ipmi_inc_stat(intf, unhandled_commands);
+
+ msg->data[0] = ((netfn + 1) << 2) | (msg->rsp[4] & 0x3);
+ msg->data[1] = msg->rsp[2];
+ msg->data[2] = msg->rsp[4] & ~0x3;
+ msg->data[3] = cmd;
+ msg->data[4] = IPMI_INVALID_CMD_COMPLETION_CODE;
+ msg->data_size = 5;
+
+ rcu_read_lock();
+ if (!intf->in_shutdown) {
+ smi_send(intf, intf->handlers, msg, 0);
+ /*
+ * We used the message, so return the value
+ * that causes it to not be freed or
+ * queued.
+ */
+ rv = -1;
+ }
+ rcu_read_unlock();
+ } else {
+ recv_msg = ipmi_alloc_recv_msg();
+ if (!recv_msg) {
+ /*
+ * We couldn't allocate memory for the
+ * message, so requeue it for handling
+ * later.
+ */
+ rv = 1;
+ kref_put(&user->refcount, free_user);
+ } else {
+ /* Extract the source address from the data. */
+ daddr = (struct ipmi_ipmb_direct_addr *)&recv_msg->addr;
+ daddr->addr_type = IPMI_IPMB_DIRECT_ADDR_TYPE;
+ daddr->channel = 0;
+ daddr->slave_addr = msg->rsp[1];
+ daddr->rs_lun = msg->rsp[0] & 3;
+ daddr->rq_lun = msg->rsp[2] & 3;
+
+ /*
+ * Extract the rest of the message information
+ * from the IPMB header.
+ */
+ recv_msg->user = user;
+ recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
+ recv_msg->msgid = (msg->rsp[2] >> 2);
+ recv_msg->msg.netfn = msg->rsp[0] >> 2;
+ recv_msg->msg.cmd = msg->rsp[3];
+ recv_msg->msg.data = recv_msg->msg_data;
+
+ recv_msg->msg.data_len = msg->rsp_size - 4;
+ memcpy(recv_msg->msg_data, msg->rsp + 4,
+ msg->rsp_size - 4);
+ if (deliver_response(intf, recv_msg))
+ ipmi_inc_stat(intf, unhandled_commands);
+ else
+ ipmi_inc_stat(intf, handled_commands);
+ }
+ }
+
+ return rv;
+}
+
+static int handle_ipmb_direct_rcv_rsp(struct ipmi_smi *intf,
+ struct ipmi_smi_msg *msg)
+{
+ struct ipmi_recv_msg *recv_msg;
+ struct ipmi_ipmb_direct_addr *daddr;
+
+ recv_msg = (struct ipmi_recv_msg *) msg->user_data;
+ if (recv_msg == NULL) {
+ dev_warn(intf->si_dev,
+ "IPMI message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vendor for assistance.\n");
+ return 0;
+ }
+
+ recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
+ recv_msg->msgid = msg->msgid;
+ daddr = (struct ipmi_ipmb_direct_addr *) &recv_msg->addr;
+ daddr->addr_type = IPMI_IPMB_DIRECT_ADDR_TYPE;
+ daddr->channel = 0;
+ daddr->slave_addr = msg->rsp[1];
+ daddr->rq_lun = msg->rsp[0] & 3;
+ daddr->rs_lun = msg->rsp[2] & 3;
+ recv_msg->msg.netfn = msg->rsp[0] >> 2;
+ recv_msg->msg.cmd = msg->rsp[3];
+ memcpy(recv_msg->msg_data, &msg->rsp[4], msg->rsp_size - 4);
+ recv_msg->msg.data = recv_msg->msg_data;
+ recv_msg->msg.data_len = msg->rsp_size - 4;
+ deliver_local_response(intf, recv_msg);
+
+ return 0;
+}
+
static int handle_lan_get_msg_rsp(struct ipmi_smi *intf,
struct ipmi_smi_msg *msg)
{
@@ -4219,18 +4435,40 @@ static int handle_bmc_rsp(struct ipmi_smi *intf,
static int handle_one_recv_msg(struct ipmi_smi *intf,
struct ipmi_smi_msg *msg)
{
- int requeue;
+ int requeue = 0;
int chan;
+ unsigned char cc;
+ bool is_cmd = !((msg->rsp[0] >> 2) & 1);
pr_debug("Recv: %*ph\n", msg->rsp_size, msg->rsp);
- if ((msg->data_size >= 2)
+ if (msg->rsp_size < 2) {
+ /* Message is too small to be correct. */
+ dev_warn(intf->si_dev,
+ "BMC returned too small a message for netfn %x cmd %x, got %d bytes\n",
+ (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size);
+
+return_unspecified:
+ /* Generate an error response for the message. */
+ msg->rsp[0] = msg->data[0] | (1 << 2);
+ msg->rsp[1] = msg->data[1];
+ msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
+ msg->rsp_size = 3;
+ } else if (msg->type == IPMI_SMI_MSG_TYPE_IPMB_DIRECT) {
+ /* commands must have at least 3 bytes, responses 4. */
+ if (is_cmd && (msg->rsp_size < 3)) {
+ ipmi_inc_stat(intf, invalid_commands);
+ goto out;
+ }
+ if (!is_cmd && (msg->rsp_size < 4))
+ goto return_unspecified;
+ } else if ((msg->data_size >= 2)
&& (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
&& (msg->data[1] == IPMI_SEND_MSG_CMD)
&& (msg->user_data == NULL)) {
if (intf->in_shutdown)
- goto free_msg;
+ goto out;
/*
* This is the local response to a command send, start
@@ -4265,21 +4503,6 @@ static int handle_one_recv_msg(struct ipmi_smi *intf,
} else
/* The message was sent, start the timer. */
intf_start_seq_timer(intf, msg->msgid);
-free_msg:
- requeue = 0;
- goto out;
-
- } else if (msg->rsp_size < 2) {
- /* Message is too small to be correct. */
- dev_warn(intf->si_dev,
- "BMC returned too small a message for netfn %x cmd %x, got %d bytes\n",
- (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size);
-
- /* Generate an error response for the message. */
- msg->rsp[0] = msg->data[0] | (1 << 2);
- msg->rsp[1] = msg->data[1];
- msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
- msg->rsp_size = 3;
} else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1))
|| (msg->rsp[1] != msg->data[1])) {
/*
@@ -4291,39 +4514,46 @@ free_msg:
(msg->data[0] >> 2) | 1, msg->data[1],
msg->rsp[0] >> 2, msg->rsp[1]);
- /* Generate an error response for the message. */
- msg->rsp[0] = msg->data[0] | (1 << 2);
- msg->rsp[1] = msg->data[1];
- msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
- msg->rsp_size = 3;
+ goto return_unspecified;
}
- if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
- && (msg->rsp[1] == IPMI_SEND_MSG_CMD)
- && (msg->user_data != NULL)) {
+ if (msg->type == IPMI_SMI_MSG_TYPE_IPMB_DIRECT) {
+ if ((msg->data[0] >> 2) & 1) {
+ /* It's a response to a sent response. */
+ chan = 0;
+ cc = msg->rsp[4];
+ goto process_response_response;
+ }
+ if (is_cmd)
+ requeue = handle_ipmb_direct_rcv_cmd(intf, msg);
+ else
+ requeue = handle_ipmb_direct_rcv_rsp(intf, msg);
+ } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
+ && (msg->rsp[1] == IPMI_SEND_MSG_CMD)
+ && (msg->user_data != NULL)) {
/*
* It's a response to a response we sent. For this we
* deliver a send message response to the user.
*/
- struct ipmi_recv_msg *recv_msg = msg->user_data;
-
- requeue = 0;
- if (msg->rsp_size < 2)
- /* Message is too small to be correct. */
- goto out;
+ struct ipmi_recv_msg *recv_msg;
chan = msg->data[2] & 0x0f;
if (chan >= IPMI_MAX_CHANNELS)
/* Invalid channel number */
goto out;
+ cc = msg->rsp[2];
+process_response_response:
+ recv_msg = msg->user_data;
+
+ requeue = 0;
if (!recv_msg)
goto out;
recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE;
recv_msg->msg.data = recv_msg->msg_data;
+ recv_msg->msg_data[0] = cc;
recv_msg->msg.data_len = 1;
- recv_msg->msg_data[0] = msg->rsp[2];
deliver_local_response(intf, recv_msg);
} else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
&& (msg->rsp[1] == IPMI_GET_MSG_CMD)) {
@@ -4789,7 +5019,9 @@ static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0);
static void free_smi_msg(struct ipmi_smi_msg *msg)
{
atomic_dec(&smi_msg_inuse_count);
- kfree(msg);
+ /* Try to keep as much stuff out of the panic path as possible. */
+ if (!oops_in_progress)
+ kfree(msg);
}
struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
@@ -4808,7 +5040,9 @@ EXPORT_SYMBOL(ipmi_alloc_smi_msg);
static void free_recv_msg(struct ipmi_recv_msg *msg)
{
atomic_dec(&recv_msg_inuse_count);
- kfree(msg);
+ /* Try to keep as much stuff out of the panic path as possible. */
+ if (!oops_in_progress)
+ kfree(msg);
}
static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
@@ -4826,7 +5060,7 @@ static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
{
- if (msg->user)
+ if (msg->user && !oops_in_progress)
kref_put(&msg->user->refcount, free_user);
msg->done(msg);
}
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 6f3272b58ced..64dedb3ef8ec 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -1603,7 +1603,7 @@ static ssize_t name##_show(struct device *dev, \
{ \
struct smi_info *smi_info = dev_get_drvdata(dev); \
\
- return snprintf(buf, 10, "%u\n", smi_get_stat(smi_info, name)); \
+ return sysfs_emit(buf, "%u\n", smi_get_stat(smi_info, name)); \
} \
static DEVICE_ATTR_RO(name)
@@ -1613,7 +1613,7 @@ static ssize_t type_show(struct device *dev,
{
struct smi_info *smi_info = dev_get_drvdata(dev);
- return snprintf(buf, 10, "%s\n", si_to_str[smi_info->io.si_type]);
+ return sysfs_emit(buf, "%s\n", si_to_str[smi_info->io.si_type]);
}
static DEVICE_ATTR_RO(type);
@@ -1624,7 +1624,7 @@ static ssize_t interrupts_enabled_show(struct device *dev,
struct smi_info *smi_info = dev_get_drvdata(dev);
int enabled = smi_info->io.irq && !smi_info->interrupt_disabled;
- return snprintf(buf, 10, "%d\n", enabled);
+ return sysfs_emit(buf, "%d\n", enabled);
}
static DEVICE_ATTR_RO(interrupts_enabled);
@@ -1646,7 +1646,7 @@ static ssize_t params_show(struct device *dev,
{
struct smi_info *smi_info = dev_get_drvdata(dev);
- return snprintf(buf, 200,
+ return sysfs_emit(buf,
"%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n",
si_to_str[smi_info->io.si_type],
addr_space_to_str[smi_info->io.addr_space],
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index 20d5af92966d..0c62e578749e 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -1190,7 +1190,7 @@ static ssize_t ipmi_##name##_show(struct device *dev, \
{ \
struct ssif_info *ssif_info = dev_get_drvdata(dev); \
\
- return snprintf(buf, 10, "%u\n", ssif_get_stat(ssif_info, name));\
+ return sysfs_emit(buf, "%u\n", ssif_get_stat(ssif_info, name));\
} \
static DEVICE_ATTR(name, S_IRUGO, ipmi_##name##_show, NULL)
@@ -1198,7 +1198,7 @@ static ssize_t ipmi_type_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- return snprintf(buf, 10, "ssif\n");
+ return sysfs_emit(buf, "ssif\n");
}
static DEVICE_ATTR(type, S_IRUGO, ipmi_type_show, NULL);
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index e4ff3b50de7f..883b4a341012 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -342,13 +342,17 @@ static atomic_t msg_tofree = ATOMIC_INIT(0);
static DECLARE_COMPLETION(msg_wait);
static void msg_free_smi(struct ipmi_smi_msg *msg)
{
- if (atomic_dec_and_test(&msg_tofree))
- complete(&msg_wait);
+ if (atomic_dec_and_test(&msg_tofree)) {
+ if (!oops_in_progress)
+ complete(&msg_wait);
+ }
}
static void msg_free_recv(struct ipmi_recv_msg *msg)
{
- if (atomic_dec_and_test(&msg_tofree))
- complete(&msg_wait);
+ if (atomic_dec_and_test(&msg_tofree)) {
+ if (!oops_in_progress)
+ complete(&msg_wait);
+ }
}
static struct ipmi_smi_msg smi_msg = {
.done = msg_free_smi
@@ -434,8 +438,10 @@ static int _ipmi_set_timeout(int do_heartbeat)
rv = __ipmi_set_timeout(&smi_msg,
&recv_msg,
&send_heartbeat_now);
- if (rv)
+ if (rv) {
+ atomic_set(&msg_tofree, 0);
return rv;
+ }
wait_for_completion(&msg_wait);
@@ -497,7 +503,7 @@ static void panic_halt_ipmi_heartbeat(void)
msg.cmd = IPMI_WDOG_RESET_TIMER;
msg.data = NULL;
msg.data_len = 0;
- atomic_inc(&panic_done_count);
+ atomic_add(2, &panic_done_count);
rv = ipmi_request_supply_msgs(watchdog_user,
(struct ipmi_addr *) &addr,
0,
@@ -507,7 +513,7 @@ static void panic_halt_ipmi_heartbeat(void)
&panic_halt_heartbeat_recv_msg,
1);
if (rv)
- atomic_dec(&panic_done_count);
+ atomic_sub(2, &panic_done_count);
}
static struct ipmi_smi_msg panic_halt_smi_msg = {
@@ -531,12 +537,12 @@ static void panic_halt_ipmi_set_timeout(void)
/* Wait for the messages to be free. */
while (atomic_read(&panic_done_count) != 0)
ipmi_poll_interface(watchdog_user);
- atomic_inc(&panic_done_count);
+ atomic_add(2, &panic_done_count);
rv = __ipmi_set_timeout(&panic_halt_smi_msg,
&panic_halt_recv_msg,
&send_heartbeat_now);
if (rv) {
- atomic_dec(&panic_done_count);
+ atomic_sub(2, &panic_done_count);
pr_warn("Unable to extend the watchdog timeout\n");
} else {
if (send_heartbeat_now)
@@ -580,6 +586,7 @@ restart:
&recv_msg,
1);
if (rv) {
+ atomic_set(&msg_tofree, 0);
pr_warn("heartbeat send failure: %d\n", rv);
return rv;
}
diff --git a/drivers/char/ipmi/kcs_bmc_serio.c b/drivers/char/ipmi/kcs_bmc_serio.c
index 7948cabde50b..7e2067628a6c 100644
--- a/drivers/char/ipmi/kcs_bmc_serio.c
+++ b/drivers/char/ipmi/kcs_bmc_serio.c
@@ -73,10 +73,12 @@ static int kcs_bmc_serio_add_device(struct kcs_bmc_device *kcs_bmc)
struct serio *port;
priv = devm_kzalloc(kcs_bmc->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
/* Use kzalloc() as the allocation is cleaned up with kfree() via serio_unregister_port() */
port = kzalloc(sizeof(*port), GFP_KERNEL);
- if (!(priv && port))
+ if (!port)
return -ENOMEM;
port->id.type = SERIO_8042;
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index 8f1bce0b4fe5..adaec8fd4b16 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -116,8 +116,9 @@ struct cm4000_dev {
wait_queue_head_t atrq; /* wait for ATR valid */
wait_queue_head_t readq; /* used by write to wake blk.read */
- /* warning: do not move this fields.
+ /* warning: do not move this struct group.
* initialising to zero depends on it - see ZERO_DEV below. */
+ struct_group(init,
unsigned char atr_csum;
unsigned char atr_len_retry;
unsigned short atr_len;
@@ -140,12 +141,10 @@ struct cm4000_dev {
struct timer_list timer; /* used to keep monitor running */
int monitor_running;
+ );
};
-#define ZERO_DEV(dev) \
- memset(&dev->atr_csum,0, \
- sizeof(struct cm4000_dev) - \
- offsetof(struct cm4000_dev, atr_csum))
+#define ZERO_DEV(dev) memset(&((dev)->init), 0, sizeof((dev)->init))
static struct pcmcia_device *dev_table[CM4000_MAX_DEV];
static struct class *cmm_class;
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index d6ba644f6b00..4a5516406c22 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -76,7 +76,7 @@ config TCG_TIS_SPI_CR50
config TCG_TIS_SYNQUACER
tristate "TPM Interface Specification 1.2 Interface / TPM 2.0 FIFO Interface (MMIO - SynQuacer)"
- depends on ARCH_SYNQUACER
+ depends on ARCH_SYNQUACER || COMPILE_TEST
select TCG_TIS_CORE
help
If you have a TPM security chip that is compliant with the
diff --git a/drivers/char/tpm/tpm2-space.c b/drivers/char/tpm/tpm2-space.c
index 784b8b3cb903..97e916856cf3 100644
--- a/drivers/char/tpm/tpm2-space.c
+++ b/drivers/char/tpm/tpm2-space.c
@@ -455,6 +455,9 @@ static int tpm2_map_response_body(struct tpm_chip *chip, u32 cc, u8 *rsp,
if (be32_to_cpu(data->capability) != TPM2_CAP_HANDLES)
return 0;
+ if (be32_to_cpu(data->count) > (UINT_MAX - TPM_HEADER_SIZE - 9) / 4)
+ return -EFAULT;
+
if (len != TPM_HEADER_SIZE + 9 + 4 * be32_to_cpu(data->count))
return -EFAULT;
diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c
index 69579efb247b..b2659a4c4016 100644
--- a/drivers/char/tpm/tpm_tis_core.c
+++ b/drivers/char/tpm/tpm_tis_core.c
@@ -48,6 +48,7 @@ static int wait_for_tpm_stat(struct tpm_chip *chip, u8 mask,
unsigned long timeout, wait_queue_head_t *queue,
bool check_cancel)
{
+ struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev);
unsigned long stop;
long rc;
u8 status;
@@ -80,8 +81,8 @@ again:
}
} else {
do {
- usleep_range(TPM_TIMEOUT_USECS_MIN,
- TPM_TIMEOUT_USECS_MAX);
+ usleep_range(priv->timeout_min,
+ priv->timeout_max);
status = chip->ops->status(chip);
if ((status & mask) == mask)
return 0;
@@ -945,7 +946,22 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
chip->timeout_b = msecs_to_jiffies(TIS_TIMEOUT_B_MAX);
chip->timeout_c = msecs_to_jiffies(TIS_TIMEOUT_C_MAX);
chip->timeout_d = msecs_to_jiffies(TIS_TIMEOUT_D_MAX);
+ priv->timeout_min = TPM_TIMEOUT_USECS_MIN;
+ priv->timeout_max = TPM_TIMEOUT_USECS_MAX;
priv->phy_ops = phy_ops;
+
+ rc = tpm_tis_read32(priv, TPM_DID_VID(0), &vendor);
+ if (rc < 0)
+ goto out_err;
+
+ priv->manufacturer_id = vendor;
+
+ if (priv->manufacturer_id == TPM_VID_ATML &&
+ !(chip->flags & TPM_CHIP_FLAG_TPM2)) {
+ priv->timeout_min = TIS_TIMEOUT_MIN_ATML;
+ priv->timeout_max = TIS_TIMEOUT_MAX_ATML;
+ }
+
dev_set_drvdata(&chip->dev, priv);
if (is_bsw()) {
@@ -988,12 +1004,6 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
if (rc)
goto out_err;
- rc = tpm_tis_read32(priv, TPM_DID_VID(0), &vendor);
- if (rc < 0)
- goto out_err;
-
- priv->manufacturer_id = vendor;
-
rc = tpm_tis_read8(priv, TPM_RID(0), &rid);
if (rc < 0)
goto out_err;
diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h
index b2a3c6c72882..3be24f221e32 100644
--- a/drivers/char/tpm/tpm_tis_core.h
+++ b/drivers/char/tpm/tpm_tis_core.h
@@ -54,6 +54,8 @@ enum tis_defaults {
TIS_MEM_LEN = 0x5000,
TIS_SHORT_TIMEOUT = 750, /* ms */
TIS_LONG_TIMEOUT = 2000, /* 2 sec */
+ TIS_TIMEOUT_MIN_ATML = 14700, /* usecs */
+ TIS_TIMEOUT_MAX_ATML = 15000, /* usecs */
};
/* Some timeout values are needed before it is known whether the chip is
@@ -98,6 +100,8 @@ struct tpm_tis_data {
wait_queue_head_t read_queue;
const struct tpm_tis_phy_ops *phy_ops;
unsigned short rng_quality;
+ unsigned int timeout_min; /* usecs */
+ unsigned int timeout_max; /* usecs */
};
struct tpm_tis_phy_ops {
diff --git a/drivers/char/tpm/tpm_tis_spi_main.c b/drivers/char/tpm/tpm_tis_spi_main.c
index 54584b4b00d1..aaa59a00eeae 100644
--- a/drivers/char/tpm/tpm_tis_spi_main.c
+++ b/drivers/char/tpm/tpm_tis_spi_main.c
@@ -267,6 +267,7 @@ static const struct spi_device_id tpm_tis_spi_id[] = {
{ "st33htpm-spi", (unsigned long)tpm_tis_spi_probe },
{ "slb9670", (unsigned long)tpm_tis_spi_probe },
{ "tpm_tis_spi", (unsigned long)tpm_tis_spi_probe },
+ { "tpm_tis-spi", (unsigned long)tpm_tis_spi_probe },
{ "cr50", (unsigned long)cr50_spi_probe },
{}
};
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 7eaf303a7a86..660c5c388c29 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -28,6 +28,7 @@
#include "../tty/hvc/hvc_console.h"
#define is_rproc_enabled IS_ENABLED(CONFIG_REMOTEPROC)
+#define VIRTCONS_MAX_PORTS 0x8000
/*
* This is a global struct for storing common data for all the devices
@@ -2036,6 +2037,14 @@ static int virtcons_probe(struct virtio_device *vdev)
virtio_cread_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT,
struct virtio_console_config, max_nr_ports,
&portdev->max_nr_ports) == 0) {
+ if (portdev->max_nr_ports == 0 ||
+ portdev->max_nr_ports > VIRTCONS_MAX_PORTS) {
+ dev_err(&vdev->dev,
+ "Invalidate max_nr_ports %d",
+ portdev->max_nr_ports);
+ err = -EINVAL;
+ goto free;
+ }
multiport = true;
}
diff --git a/drivers/clk/at91/at91rm9200.c b/drivers/clk/at91/at91rm9200.c
index 428a6f4b9ebc..fff4fdda974f 100644
--- a/drivers/clk/at91/at91rm9200.c
+++ b/drivers/clk/at91/at91rm9200.c
@@ -152,7 +152,7 @@ static void __init at91rm9200_pmc_setup(struct device_node *np)
"masterck_pres",
&at91rm9200_master_layout,
&rm9200_mck_characteristics,
- &rm9200_mck_lock, CLK_SET_RATE_GATE);
+ &rm9200_mck_lock, CLK_SET_RATE_GATE, 0);
if (IS_ERR(hw))
goto err_free;
diff --git a/drivers/clk/at91/at91sam9260.c b/drivers/clk/at91/at91sam9260.c
index b29843bea278..79802f864ee5 100644
--- a/drivers/clk/at91/at91sam9260.c
+++ b/drivers/clk/at91/at91sam9260.c
@@ -429,7 +429,7 @@ static void __init at91sam926x_pmc_setup(struct device_node *np,
&at91rm9200_master_layout,
data->mck_characteristics,
&at91sam9260_mck_lock,
- CLK_SET_RATE_GATE);
+ CLK_SET_RATE_GATE, 0);
if (IS_ERR(hw))
goto err_free;
diff --git a/drivers/clk/at91/at91sam9g45.c b/drivers/clk/at91/at91sam9g45.c
index 15da0dfe3ef2..7ed984f8058c 100644
--- a/drivers/clk/at91/at91sam9g45.c
+++ b/drivers/clk/at91/at91sam9g45.c
@@ -164,7 +164,7 @@ static void __init at91sam9g45_pmc_setup(struct device_node *np)
&at91rm9200_master_layout,
&mck_characteristics,
&at91sam9g45_mck_lock,
- CLK_SET_RATE_GATE);
+ CLK_SET_RATE_GATE, 0);
if (IS_ERR(hw))
goto err_free;
diff --git a/drivers/clk/at91/at91sam9n12.c b/drivers/clk/at91/at91sam9n12.c
index 7fe435f4b46b..63cc58944b00 100644
--- a/drivers/clk/at91/at91sam9n12.c
+++ b/drivers/clk/at91/at91sam9n12.c
@@ -191,7 +191,7 @@ static void __init at91sam9n12_pmc_setup(struct device_node *np)
&at91sam9x5_master_layout,
&mck_characteristics,
&at91sam9n12_mck_lock,
- CLK_SET_RATE_GATE);
+ CLK_SET_RATE_GATE, 0);
if (IS_ERR(hw))
goto err_free;
diff --git a/drivers/clk/at91/at91sam9rl.c b/drivers/clk/at91/at91sam9rl.c
index ecbabf5162bd..4d4faf6c61d8 100644
--- a/drivers/clk/at91/at91sam9rl.c
+++ b/drivers/clk/at91/at91sam9rl.c
@@ -132,7 +132,7 @@ static void __init at91sam9rl_pmc_setup(struct device_node *np)
"masterck_pres",
&at91rm9200_master_layout,
&sam9rl_mck_characteristics,
- &sam9rl_mck_lock, CLK_SET_RATE_GATE);
+ &sam9rl_mck_lock, CLK_SET_RATE_GATE, 0);
if (IS_ERR(hw))
goto err_free;
diff --git a/drivers/clk/at91/at91sam9x5.c b/drivers/clk/at91/at91sam9x5.c
index 5cce48c64ea2..bd8007b4f3e0 100644
--- a/drivers/clk/at91/at91sam9x5.c
+++ b/drivers/clk/at91/at91sam9x5.c
@@ -210,7 +210,7 @@ static void __init at91sam9x5_pmc_setup(struct device_node *np,
"masterck_pres",
&at91sam9x5_master_layout,
&mck_characteristics, &mck_lock,
- CLK_SET_RATE_GATE);
+ CLK_SET_RATE_GATE, 0);
if (IS_ERR(hw))
goto err_free;
diff --git a/drivers/clk/at91/clk-generated.c b/drivers/clk/at91/clk-generated.c
index b656d25a9767..23cc8297ec4c 100644
--- a/drivers/clk/at91/clk-generated.c
+++ b/drivers/clk/at91/clk-generated.c
@@ -27,6 +27,7 @@ struct clk_generated {
u32 id;
u32 gckdiv;
const struct clk_pcr_layout *layout;
+ struct at91_clk_pms pms;
u8 parent_id;
int chg_pid;
};
@@ -34,25 +35,35 @@ struct clk_generated {
#define to_clk_generated(hw) \
container_of(hw, struct clk_generated, hw)
-static int clk_generated_enable(struct clk_hw *hw)
+static int clk_generated_set(struct clk_generated *gck, int status)
{
- struct clk_generated *gck = to_clk_generated(hw);
unsigned long flags;
-
- pr_debug("GCLK: %s, gckdiv = %d, parent id = %d\n",
- __func__, gck->gckdiv, gck->parent_id);
+ unsigned int enable = status ? AT91_PMC_PCR_GCKEN : 0;
spin_lock_irqsave(gck->lock, flags);
regmap_write(gck->regmap, gck->layout->offset,
(gck->id & gck->layout->pid_mask));
regmap_update_bits(gck->regmap, gck->layout->offset,
AT91_PMC_PCR_GCKDIV_MASK | gck->layout->gckcss_mask |
- gck->layout->cmd | AT91_PMC_PCR_GCKEN,
+ gck->layout->cmd | enable,
field_prep(gck->layout->gckcss_mask, gck->parent_id) |
gck->layout->cmd |
FIELD_PREP(AT91_PMC_PCR_GCKDIV_MASK, gck->gckdiv) |
- AT91_PMC_PCR_GCKEN);
+ enable);
spin_unlock_irqrestore(gck->lock, flags);
+
+ return 0;
+}
+
+static int clk_generated_enable(struct clk_hw *hw)
+{
+ struct clk_generated *gck = to_clk_generated(hw);
+
+ pr_debug("GCLK: %s, gckdiv = %d, parent id = %d\n",
+ __func__, gck->gckdiv, gck->parent_id);
+
+ clk_generated_set(gck, 1);
+
return 0;
}
@@ -245,6 +256,23 @@ static int clk_generated_set_rate(struct clk_hw *hw,
return 0;
}
+static int clk_generated_save_context(struct clk_hw *hw)
+{
+ struct clk_generated *gck = to_clk_generated(hw);
+
+ gck->pms.status = clk_generated_is_enabled(&gck->hw);
+
+ return 0;
+}
+
+static void clk_generated_restore_context(struct clk_hw *hw)
+{
+ struct clk_generated *gck = to_clk_generated(hw);
+
+ if (gck->pms.status)
+ clk_generated_set(gck, gck->pms.status);
+}
+
static const struct clk_ops generated_ops = {
.enable = clk_generated_enable,
.disable = clk_generated_disable,
@@ -254,6 +282,8 @@ static const struct clk_ops generated_ops = {
.get_parent = clk_generated_get_parent,
.set_parent = clk_generated_set_parent,
.set_rate = clk_generated_set_rate,
+ .save_context = clk_generated_save_context,
+ .restore_context = clk_generated_restore_context,
};
/**
@@ -320,8 +350,6 @@ at91_clk_register_generated(struct regmap *regmap, spinlock_t *lock,
if (ret) {
kfree(gck);
hw = ERR_PTR(ret);
- } else {
- pmc_register_id(id);
}
return hw;
diff --git a/drivers/clk/at91/clk-main.c b/drivers/clk/at91/clk-main.c
index cfae2f59df66..8601b27c1ae0 100644
--- a/drivers/clk/at91/clk-main.c
+++ b/drivers/clk/at91/clk-main.c
@@ -28,6 +28,7 @@
struct clk_main_osc {
struct clk_hw hw;
struct regmap *regmap;
+ struct at91_clk_pms pms;
};
#define to_clk_main_osc(hw) container_of(hw, struct clk_main_osc, hw)
@@ -37,6 +38,7 @@ struct clk_main_rc_osc {
struct regmap *regmap;
unsigned long frequency;
unsigned long accuracy;
+ struct at91_clk_pms pms;
};
#define to_clk_main_rc_osc(hw) container_of(hw, struct clk_main_rc_osc, hw)
@@ -51,6 +53,7 @@ struct clk_rm9200_main {
struct clk_sam9x5_main {
struct clk_hw hw;
struct regmap *regmap;
+ struct at91_clk_pms pms;
u8 parent;
};
@@ -120,10 +123,29 @@ static int clk_main_osc_is_prepared(struct clk_hw *hw)
return (status & AT91_PMC_MOSCS) && clk_main_parent_select(tmp);
}
+static int clk_main_osc_save_context(struct clk_hw *hw)
+{
+ struct clk_main_osc *osc = to_clk_main_osc(hw);
+
+ osc->pms.status = clk_main_osc_is_prepared(hw);
+
+ return 0;
+}
+
+static void clk_main_osc_restore_context(struct clk_hw *hw)
+{
+ struct clk_main_osc *osc = to_clk_main_osc(hw);
+
+ if (osc->pms.status)
+ clk_main_osc_prepare(hw);
+}
+
static const struct clk_ops main_osc_ops = {
.prepare = clk_main_osc_prepare,
.unprepare = clk_main_osc_unprepare,
.is_prepared = clk_main_osc_is_prepared,
+ .save_context = clk_main_osc_save_context,
+ .restore_context = clk_main_osc_restore_context,
};
struct clk_hw * __init
@@ -240,12 +262,31 @@ static unsigned long clk_main_rc_osc_recalc_accuracy(struct clk_hw *hw,
return osc->accuracy;
}
+static int clk_main_rc_osc_save_context(struct clk_hw *hw)
+{
+ struct clk_main_rc_osc *osc = to_clk_main_rc_osc(hw);
+
+ osc->pms.status = clk_main_rc_osc_is_prepared(hw);
+
+ return 0;
+}
+
+static void clk_main_rc_osc_restore_context(struct clk_hw *hw)
+{
+ struct clk_main_rc_osc *osc = to_clk_main_rc_osc(hw);
+
+ if (osc->pms.status)
+ clk_main_rc_osc_prepare(hw);
+}
+
static const struct clk_ops main_rc_osc_ops = {
.prepare = clk_main_rc_osc_prepare,
.unprepare = clk_main_rc_osc_unprepare,
.is_prepared = clk_main_rc_osc_is_prepared,
.recalc_rate = clk_main_rc_osc_recalc_rate,
.recalc_accuracy = clk_main_rc_osc_recalc_accuracy,
+ .save_context = clk_main_rc_osc_save_context,
+ .restore_context = clk_main_rc_osc_restore_context,
};
struct clk_hw * __init
@@ -465,12 +506,37 @@ static u8 clk_sam9x5_main_get_parent(struct clk_hw *hw)
return clk_main_parent_select(status);
}
+static int clk_sam9x5_main_save_context(struct clk_hw *hw)
+{
+ struct clk_sam9x5_main *clkmain = to_clk_sam9x5_main(hw);
+
+ clkmain->pms.status = clk_main_rc_osc_is_prepared(&clkmain->hw);
+ clkmain->pms.parent = clk_sam9x5_main_get_parent(&clkmain->hw);
+
+ return 0;
+}
+
+static void clk_sam9x5_main_restore_context(struct clk_hw *hw)
+{
+ struct clk_sam9x5_main *clkmain = to_clk_sam9x5_main(hw);
+ int ret;
+
+ ret = clk_sam9x5_main_set_parent(hw, clkmain->pms.parent);
+ if (ret)
+ return;
+
+ if (clkmain->pms.status)
+ clk_sam9x5_main_prepare(hw);
+}
+
static const struct clk_ops sam9x5_main_ops = {
.prepare = clk_sam9x5_main_prepare,
.is_prepared = clk_sam9x5_main_is_prepared,
.recalc_rate = clk_sam9x5_main_recalc_rate,
.set_parent = clk_sam9x5_main_set_parent,
.get_parent = clk_sam9x5_main_get_parent,
+ .save_context = clk_sam9x5_main_save_context,
+ .restore_context = clk_sam9x5_main_restore_context,
};
struct clk_hw * __init
diff --git a/drivers/clk/at91/clk-master.c b/drivers/clk/at91/clk-master.c
index a80427980bf7..b2d0a7f4f7f9 100644
--- a/drivers/clk/at91/clk-master.c
+++ b/drivers/clk/at91/clk-master.c
@@ -5,6 +5,7 @@
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
+#include <linux/clk.h>
#include <linux/clk/at91_pmc.h>
#include <linux/of.h>
#include <linux/mfd/syscon.h>
@@ -17,15 +18,7 @@
#define MASTER_DIV_SHIFT 8
#define MASTER_DIV_MASK 0x7
-#define PMC_MCR 0x30
-#define PMC_MCR_ID_MSK GENMASK(3, 0)
-#define PMC_MCR_CMD BIT(7)
-#define PMC_MCR_DIV GENMASK(10, 8)
-#define PMC_MCR_CSS GENMASK(20, 16)
#define PMC_MCR_CSS_SHIFT (16)
-#define PMC_MCR_EN BIT(28)
-
-#define PMC_MCR_ID(x) ((x) & PMC_MCR_ID_MSK)
#define MASTER_MAX_ID 4
@@ -37,14 +30,19 @@ struct clk_master {
spinlock_t *lock;
const struct clk_master_layout *layout;
const struct clk_master_characteristics *characteristics;
+ struct at91_clk_pms pms;
u32 *mux_table;
u32 mckr;
int chg_pid;
u8 id;
u8 parent;
u8 div;
+ u32 safe_div;
};
+/* MCK div reference to be used by notifier. */
+static struct clk_master *master_div;
+
static inline bool clk_master_ready(struct clk_master *master)
{
unsigned int bit = master->id ? AT91_PMC_MCKXRDY : AT91_PMC_MCKRDY;
@@ -112,97 +110,244 @@ static unsigned long clk_master_div_recalc_rate(struct clk_hw *hw,
return rate;
}
+static int clk_master_div_save_context(struct clk_hw *hw)
+{
+ struct clk_master *master = to_clk_master(hw);
+ struct clk_hw *parent_hw = clk_hw_get_parent(hw);
+ unsigned long flags;
+ unsigned int mckr, div;
+
+ spin_lock_irqsave(master->lock, flags);
+ regmap_read(master->regmap, master->layout->offset, &mckr);
+ spin_unlock_irqrestore(master->lock, flags);
+
+ mckr &= master->layout->mask;
+ div = (mckr >> MASTER_DIV_SHIFT) & MASTER_DIV_MASK;
+ div = master->characteristics->divisors[div];
+
+ master->pms.parent_rate = clk_hw_get_rate(parent_hw);
+ master->pms.rate = DIV_ROUND_CLOSEST(master->pms.parent_rate, div);
+
+ return 0;
+}
+
+static void clk_master_div_restore_context(struct clk_hw *hw)
+{
+ struct clk_master *master = to_clk_master(hw);
+ unsigned long flags;
+ unsigned int mckr;
+ u8 div;
+
+ spin_lock_irqsave(master->lock, flags);
+ regmap_read(master->regmap, master->layout->offset, &mckr);
+ spin_unlock_irqrestore(master->lock, flags);
+
+ mckr &= master->layout->mask;
+ div = (mckr >> MASTER_DIV_SHIFT) & MASTER_DIV_MASK;
+ div = master->characteristics->divisors[div];
+
+ if (div != DIV_ROUND_CLOSEST(master->pms.parent_rate, master->pms.rate))
+ pr_warn("MCKR DIV not configured properly by firmware!\n");
+}
+
static const struct clk_ops master_div_ops = {
.prepare = clk_master_prepare,
.is_prepared = clk_master_is_prepared,
.recalc_rate = clk_master_div_recalc_rate,
+ .save_context = clk_master_div_save_context,
+ .restore_context = clk_master_div_restore_context,
};
-static int clk_master_div_set_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long parent_rate)
+/* This function must be called with lock acquired. */
+static int clk_master_div_set(struct clk_master *master,
+ unsigned long parent_rate, int div)
{
- struct clk_master *master = to_clk_master(hw);
const struct clk_master_characteristics *characteristics =
master->characteristics;
- unsigned long flags;
- int div, i;
-
- div = DIV_ROUND_CLOSEST(parent_rate, rate);
- if (div > ARRAY_SIZE(characteristics->divisors))
- return -EINVAL;
+ unsigned long rate = parent_rate;
+ unsigned int max_div = 0, div_index = 0, max_div_index = 0;
+ unsigned int i, mckr, tmp;
+ int ret;
for (i = 0; i < ARRAY_SIZE(characteristics->divisors); i++) {
if (!characteristics->divisors[i])
break;
- if (div == characteristics->divisors[i]) {
- div = i;
- break;
+ if (div == characteristics->divisors[i])
+ div_index = i;
+
+ if (max_div < characteristics->divisors[i]) {
+ max_div = characteristics->divisors[i];
+ max_div_index = i;
}
}
- if (i == ARRAY_SIZE(characteristics->divisors))
- return -EINVAL;
+ if (div > max_div)
+ div_index = max_div_index;
+
+ ret = regmap_read(master->regmap, master->layout->offset, &mckr);
+ if (ret)
+ return ret;
+
+ mckr &= master->layout->mask;
+ tmp = (mckr >> MASTER_DIV_SHIFT) & MASTER_DIV_MASK;
+ if (tmp == div_index)
+ return 0;
+
+ rate /= characteristics->divisors[div_index];
+ if (rate < characteristics->output.min)
+ pr_warn("master clk div is underclocked");
+ else if (rate > characteristics->output.max)
+ pr_warn("master clk div is overclocked");
+
+ mckr &= ~(MASTER_DIV_MASK << MASTER_DIV_SHIFT);
+ mckr |= (div_index << MASTER_DIV_SHIFT);
+ ret = regmap_write(master->regmap, master->layout->offset, mckr);
+ if (ret)
+ return ret;
- spin_lock_irqsave(master->lock, flags);
- regmap_update_bits(master->regmap, master->layout->offset,
- (MASTER_DIV_MASK << MASTER_DIV_SHIFT),
- (div << MASTER_DIV_SHIFT));
while (!clk_master_ready(master))
cpu_relax();
- spin_unlock_irqrestore(master->lock, flags);
+
+ master->div = characteristics->divisors[div_index];
return 0;
}
-static int clk_master_div_determine_rate(struct clk_hw *hw,
- struct clk_rate_request *req)
+static unsigned long clk_master_div_recalc_rate_chg(struct clk_hw *hw,
+ unsigned long parent_rate)
{
struct clk_master *master = to_clk_master(hw);
+
+ return DIV_ROUND_CLOSEST_ULL(parent_rate, master->div);
+}
+
+static void clk_master_div_restore_context_chg(struct clk_hw *hw)
+{
+ struct clk_master *master = to_clk_master(hw);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(master->lock, flags);
+ ret = clk_master_div_set(master, master->pms.parent_rate,
+ DIV_ROUND_CLOSEST(master->pms.parent_rate,
+ master->pms.rate));
+ spin_unlock_irqrestore(master->lock, flags);
+ if (ret)
+ pr_warn("Failed to restore MCK DIV clock\n");
+}
+
+static const struct clk_ops master_div_ops_chg = {
+ .prepare = clk_master_prepare,
+ .is_prepared = clk_master_is_prepared,
+ .recalc_rate = clk_master_div_recalc_rate_chg,
+ .save_context = clk_master_div_save_context,
+ .restore_context = clk_master_div_restore_context_chg,
+};
+
+static int clk_master_div_notifier_fn(struct notifier_block *notifier,
+ unsigned long code, void *data)
+{
const struct clk_master_characteristics *characteristics =
- master->characteristics;
- struct clk_hw *parent;
- unsigned long parent_rate, tmp_rate, best_rate = 0;
- int i, best_diff = INT_MIN, tmp_diff;
+ master_div->characteristics;
+ struct clk_notifier_data *cnd = data;
+ unsigned long flags, new_parent_rate, new_rate;
+ unsigned int mckr, div, new_div = 0;
+ int ret, i;
+ long tmp_diff;
+ long best_diff = -1;
+
+ spin_lock_irqsave(master_div->lock, flags);
+ switch (code) {
+ case PRE_RATE_CHANGE:
+ /*
+ * We want to avoid any overclocking of MCK DIV domain. To do
+ * this we set a safe divider (the underclocking is not of
+ * interest as we can go as low as 32KHz). The relation
+ * b/w this clock and its parents are as follows:
+ *
+ * FRAC PLL -> DIV PLL -> MCK DIV
+ *
+ * With the proper safe divider we should be good even with FRAC
+ * PLL at its maximum value.
+ */
+ ret = regmap_read(master_div->regmap, master_div->layout->offset,
+ &mckr);
+ if (ret) {
+ ret = NOTIFY_STOP_MASK;
+ goto unlock;
+ }
- parent = clk_hw_get_parent(hw);
- if (!parent)
- return -EINVAL;
+ mckr &= master_div->layout->mask;
+ div = (mckr >> MASTER_DIV_SHIFT) & MASTER_DIV_MASK;
+
+ /* Switch to safe divider. */
+ clk_master_div_set(master_div,
+ cnd->old_rate * characteristics->divisors[div],
+ master_div->safe_div);
+ break;
+
+ case POST_RATE_CHANGE:
+ /*
+ * At this point we want to restore MCK DIV domain to its maximum
+ * allowed rate.
+ */
+ ret = regmap_read(master_div->regmap, master_div->layout->offset,
+ &mckr);
+ if (ret) {
+ ret = NOTIFY_STOP_MASK;
+ goto unlock;
+ }
- parent_rate = clk_hw_get_rate(parent);
- if (!parent_rate)
- return -EINVAL;
+ mckr &= master_div->layout->mask;
+ div = (mckr >> MASTER_DIV_SHIFT) & MASTER_DIV_MASK;
+ new_parent_rate = cnd->new_rate * characteristics->divisors[div];
- for (i = 0; i < ARRAY_SIZE(characteristics->divisors); i++) {
- if (!characteristics->divisors[i])
- break;
+ for (i = 0; i < ARRAY_SIZE(characteristics->divisors); i++) {
+ if (!characteristics->divisors[i])
+ break;
+
+ new_rate = DIV_ROUND_CLOSEST_ULL(new_parent_rate,
+ characteristics->divisors[i]);
- tmp_rate = DIV_ROUND_CLOSEST_ULL(parent_rate,
- characteristics->divisors[i]);
- tmp_diff = abs(tmp_rate - req->rate);
+ tmp_diff = characteristics->output.max - new_rate;
+ if (tmp_diff < 0)
+ continue;
- if (!best_rate || best_diff > tmp_diff) {
- best_diff = tmp_diff;
- best_rate = tmp_rate;
+ if (best_diff < 0 || best_diff > tmp_diff) {
+ new_div = characteristics->divisors[i];
+ best_diff = tmp_diff;
+ }
+
+ if (!tmp_diff)
+ break;
}
- if (!best_diff)
- break;
+ if (!new_div) {
+ ret = NOTIFY_STOP_MASK;
+ goto unlock;
+ }
+
+ /* Update the div to preserve MCK DIV clock rate. */
+ clk_master_div_set(master_div, new_parent_rate,
+ new_div);
+
+ ret = NOTIFY_OK;
+ break;
+
+ default:
+ ret = NOTIFY_DONE;
+ break;
}
- req->best_parent_rate = best_rate;
- req->best_parent_hw = parent;
- req->rate = best_rate;
+unlock:
+ spin_unlock_irqrestore(master_div->lock, flags);
- return 0;
+ return ret;
}
-static const struct clk_ops master_div_ops_chg = {
- .prepare = clk_master_prepare,
- .is_prepared = clk_master_is_prepared,
- .recalc_rate = clk_master_div_recalc_rate,
- .determine_rate = clk_master_div_determine_rate,
- .set_rate = clk_master_div_set_rate,
+static struct notifier_block clk_master_div_notifier = {
+ .notifier_call = clk_master_div_notifier_fn,
};
static void clk_sama7g5_master_best_diff(struct clk_rate_request *req,
@@ -272,7 +417,8 @@ static int clk_master_pres_set_rate(struct clk_hw *hw, unsigned long rate,
{
struct clk_master *master = to_clk_master(hw);
unsigned long flags;
- unsigned int pres;
+ unsigned int pres, mckr, tmp;
+ int ret;
pres = DIV_ROUND_CLOSEST(parent_rate, rate);
if (pres > MASTER_PRES_MAX)
@@ -280,19 +426,31 @@ static int clk_master_pres_set_rate(struct clk_hw *hw, unsigned long rate,
else if (pres == 3)
pres = MASTER_PRES_MAX;
- else
+ else if (pres)
pres = ffs(pres) - 1;
spin_lock_irqsave(master->lock, flags);
- regmap_update_bits(master->regmap, master->layout->offset,
- (MASTER_PRES_MASK << master->layout->pres_shift),
- (pres << master->layout->pres_shift));
+ ret = regmap_read(master->regmap, master->layout->offset, &mckr);
+ if (ret)
+ goto unlock;
+
+ mckr &= master->layout->mask;
+ tmp = (mckr >> master->layout->pres_shift) & MASTER_PRES_MASK;
+ if (pres == tmp)
+ goto unlock;
+
+ mckr &= ~(MASTER_PRES_MASK << master->layout->pres_shift);
+ mckr |= (pres << master->layout->pres_shift);
+ ret = regmap_write(master->regmap, master->layout->offset, mckr);
+ if (ret)
+ goto unlock;
while (!clk_master_ready(master))
cpu_relax();
+unlock:
spin_unlock_irqrestore(master->lock, flags);
- return 0;
+ return ret;
}
static unsigned long clk_master_pres_recalc_rate(struct clk_hw *hw,
@@ -308,8 +466,9 @@ static unsigned long clk_master_pres_recalc_rate(struct clk_hw *hw,
regmap_read(master->regmap, master->layout->offset, &val);
spin_unlock_irqrestore(master->lock, flags);
+ val &= master->layout->mask;
pres = (val >> master->layout->pres_shift) & MASTER_PRES_MASK;
- if (pres == 3 && characteristics->have_div3_pres)
+ if (pres == MASTER_PRES_MAX && characteristics->have_div3_pres)
pres = 3;
else
pres = (1 << pres);
@@ -327,14 +486,73 @@ static u8 clk_master_pres_get_parent(struct clk_hw *hw)
regmap_read(master->regmap, master->layout->offset, &mckr);
spin_unlock_irqrestore(master->lock, flags);
+ mckr &= master->layout->mask;
+
return mckr & AT91_PMC_CSS;
}
+static int clk_master_pres_save_context(struct clk_hw *hw)
+{
+ struct clk_master *master = to_clk_master(hw);
+ struct clk_hw *parent_hw = clk_hw_get_parent(hw);
+ unsigned long flags;
+ unsigned int val, pres;
+
+ spin_lock_irqsave(master->lock, flags);
+ regmap_read(master->regmap, master->layout->offset, &val);
+ spin_unlock_irqrestore(master->lock, flags);
+
+ val &= master->layout->mask;
+ pres = (val >> master->layout->pres_shift) & MASTER_PRES_MASK;
+ if (pres == MASTER_PRES_MAX && master->characteristics->have_div3_pres)
+ pres = 3;
+ else
+ pres = (1 << pres);
+
+ master->pms.parent = val & AT91_PMC_CSS;
+ master->pms.parent_rate = clk_hw_get_rate(parent_hw);
+ master->pms.rate = DIV_ROUND_CLOSEST_ULL(master->pms.parent_rate, pres);
+
+ return 0;
+}
+
+static void clk_master_pres_restore_context(struct clk_hw *hw)
+{
+ struct clk_master *master = to_clk_master(hw);
+ unsigned long flags;
+ unsigned int val, pres;
+
+ spin_lock_irqsave(master->lock, flags);
+ regmap_read(master->regmap, master->layout->offset, &val);
+ spin_unlock_irqrestore(master->lock, flags);
+
+ val &= master->layout->mask;
+ pres = (val >> master->layout->pres_shift) & MASTER_PRES_MASK;
+ if (pres == MASTER_PRES_MAX && master->characteristics->have_div3_pres)
+ pres = 3;
+ else
+ pres = (1 << pres);
+
+ if (master->pms.rate !=
+ DIV_ROUND_CLOSEST_ULL(master->pms.parent_rate, pres) ||
+ (master->pms.parent != (val & AT91_PMC_CSS)))
+ pr_warn("MCKR PRES was not configured properly by firmware!\n");
+}
+
+static void clk_master_pres_restore_context_chg(struct clk_hw *hw)
+{
+ struct clk_master *master = to_clk_master(hw);
+
+ clk_master_pres_set_rate(hw, master->pms.rate, master->pms.parent_rate);
+}
+
static const struct clk_ops master_pres_ops = {
.prepare = clk_master_prepare,
.is_prepared = clk_master_is_prepared,
.recalc_rate = clk_master_pres_recalc_rate,
.get_parent = clk_master_pres_get_parent,
+ .save_context = clk_master_pres_save_context,
+ .restore_context = clk_master_pres_restore_context,
};
static const struct clk_ops master_pres_ops_chg = {
@@ -344,6 +562,8 @@ static const struct clk_ops master_pres_ops_chg = {
.recalc_rate = clk_master_pres_recalc_rate,
.get_parent = clk_master_pres_get_parent,
.set_rate = clk_master_pres_set_rate,
+ .save_context = clk_master_pres_save_context,
+ .restore_context = clk_master_pres_restore_context_chg,
};
static struct clk_hw * __init
@@ -358,6 +578,8 @@ at91_clk_register_master_internal(struct regmap *regmap,
struct clk_master *master;
struct clk_init_data init;
struct clk_hw *hw;
+ unsigned int mckr;
+ unsigned long irqflags;
int ret;
if (!name || !num_parents || !parent_names || !lock)
@@ -380,6 +602,16 @@ at91_clk_register_master_internal(struct regmap *regmap,
master->chg_pid = chg_pid;
master->lock = lock;
+ if (ops == &master_div_ops_chg) {
+ spin_lock_irqsave(master->lock, irqflags);
+ regmap_read(master->regmap, master->layout->offset, &mckr);
+ spin_unlock_irqrestore(master->lock, irqflags);
+
+ mckr &= layout->mask;
+ mckr = (mckr >> MASTER_DIV_SHIFT) & MASTER_DIV_MASK;
+ master->div = characteristics->divisors[mckr];
+ }
+
hw = &master->hw;
ret = clk_hw_register(NULL, &master->hw);
if (ret) {
@@ -416,19 +648,29 @@ at91_clk_register_master_div(struct regmap *regmap,
const char *name, const char *parent_name,
const struct clk_master_layout *layout,
const struct clk_master_characteristics *characteristics,
- spinlock_t *lock, u32 flags)
+ spinlock_t *lock, u32 flags, u32 safe_div)
{
const struct clk_ops *ops;
+ struct clk_hw *hw;
if (flags & CLK_SET_RATE_GATE)
ops = &master_div_ops;
else
ops = &master_div_ops_chg;
- return at91_clk_register_master_internal(regmap, name, 1,
- &parent_name, layout,
- characteristics, ops,
- lock, flags, -EINVAL);
+ hw = at91_clk_register_master_internal(regmap, name, 1,
+ &parent_name, layout,
+ characteristics, ops,
+ lock, flags, -EINVAL);
+
+ if (!IS_ERR(hw) && safe_div) {
+ master_div = to_clk_master(hw);
+ master_div->safe_div = safe_div;
+ clk_notifier_register(hw->clk,
+ &clk_master_div_notifier);
+ }
+
+ return hw;
}
static unsigned long
@@ -539,30 +781,40 @@ static int clk_sama7g5_master_set_parent(struct clk_hw *hw, u8 index)
return 0;
}
-static int clk_sama7g5_master_enable(struct clk_hw *hw)
+static void clk_sama7g5_master_set(struct clk_master *master,
+ unsigned int status)
{
- struct clk_master *master = to_clk_master(hw);
unsigned long flags;
unsigned int val, cparent;
+ unsigned int enable = status ? AT91_PMC_MCR_V2_EN : 0;
+ unsigned int parent = master->parent << PMC_MCR_CSS_SHIFT;
+ unsigned int div = master->div << MASTER_DIV_SHIFT;
spin_lock_irqsave(master->lock, flags);
- regmap_write(master->regmap, PMC_MCR, PMC_MCR_ID(master->id));
- regmap_read(master->regmap, PMC_MCR, &val);
- regmap_update_bits(master->regmap, PMC_MCR,
- PMC_MCR_EN | PMC_MCR_CSS | PMC_MCR_DIV |
- PMC_MCR_CMD | PMC_MCR_ID_MSK,
- PMC_MCR_EN | (master->parent << PMC_MCR_CSS_SHIFT) |
- (master->div << MASTER_DIV_SHIFT) |
- PMC_MCR_CMD | PMC_MCR_ID(master->id));
+ regmap_write(master->regmap, AT91_PMC_MCR_V2,
+ AT91_PMC_MCR_V2_ID(master->id));
+ regmap_read(master->regmap, AT91_PMC_MCR_V2, &val);
+ regmap_update_bits(master->regmap, AT91_PMC_MCR_V2,
+ enable | AT91_PMC_MCR_V2_CSS | AT91_PMC_MCR_V2_DIV |
+ AT91_PMC_MCR_V2_CMD | AT91_PMC_MCR_V2_ID_MSK,
+ enable | parent | div | AT91_PMC_MCR_V2_CMD |
+ AT91_PMC_MCR_V2_ID(master->id));
- cparent = (val & PMC_MCR_CSS) >> PMC_MCR_CSS_SHIFT;
+ cparent = (val & AT91_PMC_MCR_V2_CSS) >> PMC_MCR_CSS_SHIFT;
/* Wait here only if parent is being changed. */
while ((cparent != master->parent) && !clk_master_ready(master))
cpu_relax();
spin_unlock_irqrestore(master->lock, flags);
+}
+
+static int clk_sama7g5_master_enable(struct clk_hw *hw)
+{
+ struct clk_master *master = to_clk_master(hw);
+
+ clk_sama7g5_master_set(master, 1);
return 0;
}
@@ -574,10 +826,12 @@ static void clk_sama7g5_master_disable(struct clk_hw *hw)
spin_lock_irqsave(master->lock, flags);
- regmap_write(master->regmap, PMC_MCR, master->id);
- regmap_update_bits(master->regmap, PMC_MCR,
- PMC_MCR_EN | PMC_MCR_CMD | PMC_MCR_ID_MSK,
- PMC_MCR_CMD | PMC_MCR_ID(master->id));
+ regmap_write(master->regmap, AT91_PMC_MCR_V2, master->id);
+ regmap_update_bits(master->regmap, AT91_PMC_MCR_V2,
+ AT91_PMC_MCR_V2_EN | AT91_PMC_MCR_V2_CMD |
+ AT91_PMC_MCR_V2_ID_MSK,
+ AT91_PMC_MCR_V2_CMD |
+ AT91_PMC_MCR_V2_ID(master->id));
spin_unlock_irqrestore(master->lock, flags);
}
@@ -590,12 +844,12 @@ static int clk_sama7g5_master_is_enabled(struct clk_hw *hw)
spin_lock_irqsave(master->lock, flags);
- regmap_write(master->regmap, PMC_MCR, master->id);
- regmap_read(master->regmap, PMC_MCR, &val);
+ regmap_write(master->regmap, AT91_PMC_MCR_V2, master->id);
+ regmap_read(master->regmap, AT91_PMC_MCR_V2, &val);
spin_unlock_irqrestore(master->lock, flags);
- return !!(val & PMC_MCR_EN);
+ return !!(val & AT91_PMC_MCR_V2_EN);
}
static int clk_sama7g5_master_set_rate(struct clk_hw *hw, unsigned long rate,
@@ -610,7 +864,7 @@ static int clk_sama7g5_master_set_rate(struct clk_hw *hw, unsigned long rate,
if (div == 3)
div = MASTER_PRES_MAX;
- else
+ else if (div)
div = ffs(div) - 1;
spin_lock_irqsave(master->lock, flags);
@@ -620,6 +874,23 @@ static int clk_sama7g5_master_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
+static int clk_sama7g5_master_save_context(struct clk_hw *hw)
+{
+ struct clk_master *master = to_clk_master(hw);
+
+ master->pms.status = clk_sama7g5_master_is_enabled(hw);
+
+ return 0;
+}
+
+static void clk_sama7g5_master_restore_context(struct clk_hw *hw)
+{
+ struct clk_master *master = to_clk_master(hw);
+
+ if (master->pms.status)
+ clk_sama7g5_master_set(master, master->pms.status);
+}
+
static const struct clk_ops sama7g5_master_ops = {
.enable = clk_sama7g5_master_enable,
.disable = clk_sama7g5_master_disable,
@@ -629,6 +900,8 @@ static const struct clk_ops sama7g5_master_ops = {
.set_rate = clk_sama7g5_master_set_rate,
.get_parent = clk_sama7g5_master_get_parent,
.set_parent = clk_sama7g5_master_set_parent,
+ .save_context = clk_sama7g5_master_save_context,
+ .restore_context = clk_sama7g5_master_restore_context,
};
struct clk_hw * __init
@@ -672,10 +945,10 @@ at91_clk_sama7g5_register_master(struct regmap *regmap,
master->mux_table = mux_table;
spin_lock_irqsave(master->lock, flags);
- regmap_write(master->regmap, PMC_MCR, master->id);
- regmap_read(master->regmap, PMC_MCR, &val);
- master->parent = (val & PMC_MCR_CSS) >> PMC_MCR_CSS_SHIFT;
- master->div = (val & PMC_MCR_DIV) >> MASTER_DIV_SHIFT;
+ regmap_write(master->regmap, AT91_PMC_MCR_V2, master->id);
+ regmap_read(master->regmap, AT91_PMC_MCR_V2, &val);
+ master->parent = (val & AT91_PMC_MCR_V2_CSS) >> PMC_MCR_CSS_SHIFT;
+ master->div = (val & AT91_PMC_MCR_V2_DIV) >> MASTER_DIV_SHIFT;
spin_unlock_irqrestore(master->lock, flags);
hw = &master->hw;
diff --git a/drivers/clk/at91/clk-peripheral.c b/drivers/clk/at91/clk-peripheral.c
index 7a27ba8e0577..e14fa5ac734c 100644
--- a/drivers/clk/at91/clk-peripheral.c
+++ b/drivers/clk/at91/clk-peripheral.c
@@ -37,6 +37,7 @@ struct clk_sam9x5_peripheral {
u32 id;
u32 div;
const struct clk_pcr_layout *layout;
+ struct at91_clk_pms pms;
bool auto_div;
int chg_pid;
};
@@ -155,10 +156,11 @@ static void clk_sam9x5_peripheral_autodiv(struct clk_sam9x5_peripheral *periph)
periph->div = shift;
}
-static int clk_sam9x5_peripheral_enable(struct clk_hw *hw)
+static int clk_sam9x5_peripheral_set(struct clk_sam9x5_peripheral *periph,
+ unsigned int status)
{
- struct clk_sam9x5_peripheral *periph = to_clk_sam9x5_peripheral(hw);
unsigned long flags;
+ unsigned int enable = status ? AT91_PMC_PCR_EN : 0;
if (periph->id < PERIPHERAL_ID_MIN)
return 0;
@@ -168,15 +170,21 @@ static int clk_sam9x5_peripheral_enable(struct clk_hw *hw)
(periph->id & periph->layout->pid_mask));
regmap_update_bits(periph->regmap, periph->layout->offset,
periph->layout->div_mask | periph->layout->cmd |
- AT91_PMC_PCR_EN,
+ enable,
field_prep(periph->layout->div_mask, periph->div) |
- periph->layout->cmd |
- AT91_PMC_PCR_EN);
+ periph->layout->cmd | enable);
spin_unlock_irqrestore(periph->lock, flags);
return 0;
}
+static int clk_sam9x5_peripheral_enable(struct clk_hw *hw)
+{
+ struct clk_sam9x5_peripheral *periph = to_clk_sam9x5_peripheral(hw);
+
+ return clk_sam9x5_peripheral_set(periph, 1);
+}
+
static void clk_sam9x5_peripheral_disable(struct clk_hw *hw)
{
struct clk_sam9x5_peripheral *periph = to_clk_sam9x5_peripheral(hw);
@@ -393,6 +401,23 @@ static int clk_sam9x5_peripheral_set_rate(struct clk_hw *hw,
return -EINVAL;
}
+static int clk_sam9x5_peripheral_save_context(struct clk_hw *hw)
+{
+ struct clk_sam9x5_peripheral *periph = to_clk_sam9x5_peripheral(hw);
+
+ periph->pms.status = clk_sam9x5_peripheral_is_enabled(hw);
+
+ return 0;
+}
+
+static void clk_sam9x5_peripheral_restore_context(struct clk_hw *hw)
+{
+ struct clk_sam9x5_peripheral *periph = to_clk_sam9x5_peripheral(hw);
+
+ if (periph->pms.status)
+ clk_sam9x5_peripheral_set(periph, periph->pms.status);
+}
+
static const struct clk_ops sam9x5_peripheral_ops = {
.enable = clk_sam9x5_peripheral_enable,
.disable = clk_sam9x5_peripheral_disable,
@@ -400,6 +425,8 @@ static const struct clk_ops sam9x5_peripheral_ops = {
.recalc_rate = clk_sam9x5_peripheral_recalc_rate,
.round_rate = clk_sam9x5_peripheral_round_rate,
.set_rate = clk_sam9x5_peripheral_set_rate,
+ .save_context = clk_sam9x5_peripheral_save_context,
+ .restore_context = clk_sam9x5_peripheral_restore_context,
};
static const struct clk_ops sam9x5_peripheral_chg_ops = {
@@ -409,6 +436,8 @@ static const struct clk_ops sam9x5_peripheral_chg_ops = {
.recalc_rate = clk_sam9x5_peripheral_recalc_rate,
.determine_rate = clk_sam9x5_peripheral_determine_rate,
.set_rate = clk_sam9x5_peripheral_set_rate,
+ .save_context = clk_sam9x5_peripheral_save_context,
+ .restore_context = clk_sam9x5_peripheral_restore_context,
};
struct clk_hw * __init
@@ -460,7 +489,6 @@ at91_clk_register_sam9x5_peripheral(struct regmap *regmap, spinlock_t *lock,
hw = ERR_PTR(ret);
} else {
clk_sam9x5_peripheral_autodiv(periph);
- pmc_register_id(id);
}
return hw;
diff --git a/drivers/clk/at91/clk-pll.c b/drivers/clk/at91/clk-pll.c
index 6ed986d3eee0..249d6a53cedf 100644
--- a/drivers/clk/at91/clk-pll.c
+++ b/drivers/clk/at91/clk-pll.c
@@ -40,6 +40,7 @@ struct clk_pll {
u16 mul;
const struct clk_pll_layout *layout;
const struct clk_pll_characteristics *characteristics;
+ struct at91_clk_pms pms;
};
static inline bool clk_pll_ready(struct regmap *regmap, int id)
@@ -260,6 +261,42 @@ static int clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
+static int clk_pll_save_context(struct clk_hw *hw)
+{
+ struct clk_pll *pll = to_clk_pll(hw);
+ struct clk_hw *parent_hw = clk_hw_get_parent(hw);
+
+ pll->pms.parent_rate = clk_hw_get_rate(parent_hw);
+ pll->pms.rate = clk_pll_recalc_rate(&pll->hw, pll->pms.parent_rate);
+ pll->pms.status = clk_pll_ready(pll->regmap, PLL_REG(pll->id));
+
+ return 0;
+}
+
+static void clk_pll_restore_context(struct clk_hw *hw)
+{
+ struct clk_pll *pll = to_clk_pll(hw);
+ unsigned long calc_rate;
+ unsigned int pllr, pllr_out, pllr_count;
+ u8 out = 0;
+
+ if (pll->characteristics->out)
+ out = pll->characteristics->out[pll->range];
+
+ regmap_read(pll->regmap, PLL_REG(pll->id), &pllr);
+
+ calc_rate = (pll->pms.parent_rate / PLL_DIV(pllr)) *
+ (PLL_MUL(pllr, pll->layout) + 1);
+ pllr_count = (pllr >> PLL_COUNT_SHIFT) & PLL_MAX_COUNT;
+ pllr_out = (pllr >> PLL_OUT_SHIFT) & out;
+
+ if (pll->pms.rate != calc_rate ||
+ pll->pms.status != clk_pll_ready(pll->regmap, PLL_REG(pll->id)) ||
+ pllr_count != PLL_MAX_COUNT ||
+ (out && pllr_out != out))
+ pr_warn("PLLAR was not configured properly by firmware\n");
+}
+
static const struct clk_ops pll_ops = {
.prepare = clk_pll_prepare,
.unprepare = clk_pll_unprepare,
@@ -267,6 +304,8 @@ static const struct clk_ops pll_ops = {
.recalc_rate = clk_pll_recalc_rate,
.round_rate = clk_pll_round_rate,
.set_rate = clk_pll_set_rate,
+ .save_context = clk_pll_save_context,
+ .restore_context = clk_pll_restore_context,
};
struct clk_hw * __init
diff --git a/drivers/clk/at91/clk-programmable.c b/drivers/clk/at91/clk-programmable.c
index fcf8f6a1c2c6..6c4b259d31d3 100644
--- a/drivers/clk/at91/clk-programmable.c
+++ b/drivers/clk/at91/clk-programmable.c
@@ -24,6 +24,7 @@ struct clk_programmable {
u32 *mux_table;
u8 id;
const struct clk_programmable_layout *layout;
+ struct at91_clk_pms pms;
};
#define to_clk_programmable(hw) container_of(hw, struct clk_programmable, hw)
@@ -177,12 +178,38 @@ static int clk_programmable_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
+static int clk_programmable_save_context(struct clk_hw *hw)
+{
+ struct clk_programmable *prog = to_clk_programmable(hw);
+ struct clk_hw *parent_hw = clk_hw_get_parent(hw);
+
+ prog->pms.parent = clk_programmable_get_parent(hw);
+ prog->pms.parent_rate = clk_hw_get_rate(parent_hw);
+ prog->pms.rate = clk_programmable_recalc_rate(hw, prog->pms.parent_rate);
+
+ return 0;
+}
+
+static void clk_programmable_restore_context(struct clk_hw *hw)
+{
+ struct clk_programmable *prog = to_clk_programmable(hw);
+ int ret;
+
+ ret = clk_programmable_set_parent(hw, prog->pms.parent);
+ if (ret)
+ return;
+
+ clk_programmable_set_rate(hw, prog->pms.rate, prog->pms.parent_rate);
+}
+
static const struct clk_ops programmable_ops = {
.recalc_rate = clk_programmable_recalc_rate,
.determine_rate = clk_programmable_determine_rate,
.get_parent = clk_programmable_get_parent,
.set_parent = clk_programmable_set_parent,
.set_rate = clk_programmable_set_rate,
+ .save_context = clk_programmable_save_context,
+ .restore_context = clk_programmable_restore_context,
};
struct clk_hw * __init
@@ -221,8 +248,6 @@ at91_clk_register_programmable(struct regmap *regmap,
if (ret) {
kfree(prog);
hw = ERR_PTR(ret);
- } else {
- pmc_register_pck(id);
}
return hw;
diff --git a/drivers/clk/at91/clk-sam9x60-pll.c b/drivers/clk/at91/clk-sam9x60-pll.c
index 34e3ab13741a..d757003004cb 100644
--- a/drivers/clk/at91/clk-sam9x60-pll.c
+++ b/drivers/clk/at91/clk-sam9x60-pll.c
@@ -5,6 +5,7 @@
*/
#include <linux/bitfield.h>
+#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
#include <linux/clk/at91_pmc.h>
@@ -38,19 +39,24 @@ struct sam9x60_pll_core {
struct sam9x60_frac {
struct sam9x60_pll_core core;
+ struct at91_clk_pms pms;
u32 frac;
u16 mul;
};
struct sam9x60_div {
struct sam9x60_pll_core core;
+ struct at91_clk_pms pms;
u8 div;
+ u8 safe_div;
};
#define to_sam9x60_pll_core(hw) container_of(hw, struct sam9x60_pll_core, hw)
#define to_sam9x60_frac(core) container_of(core, struct sam9x60_frac, core)
#define to_sam9x60_div(core) container_of(core, struct sam9x60_div, core)
+static struct sam9x60_div *notifier_div;
+
static inline bool sam9x60_pll_ready(struct regmap *regmap, int id)
{
unsigned int status;
@@ -71,13 +77,12 @@ static unsigned long sam9x60_frac_pll_recalc_rate(struct clk_hw *hw,
struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw);
struct sam9x60_frac *frac = to_sam9x60_frac(core);
- return (parent_rate * (frac->mul + 1) +
- ((u64)parent_rate * frac->frac >> 22));
+ return parent_rate * (frac->mul + 1) +
+ DIV_ROUND_CLOSEST_ULL((u64)parent_rate * frac->frac, (1 << 22));
}
-static int sam9x60_frac_pll_prepare(struct clk_hw *hw)
+static int sam9x60_frac_pll_set(struct sam9x60_pll_core *core)
{
- struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw);
struct sam9x60_frac *frac = to_sam9x60_frac(core);
struct regmap *regmap = core->regmap;
unsigned int val, cfrac, cmul;
@@ -141,6 +146,13 @@ unlock:
return 0;
}
+static int sam9x60_frac_pll_prepare(struct clk_hw *hw)
+{
+ struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw);
+
+ return sam9x60_frac_pll_set(core);
+}
+
static void sam9x60_frac_pll_unprepare(struct clk_hw *hw)
{
struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw);
@@ -280,6 +292,25 @@ unlock:
return ret;
}
+static int sam9x60_frac_pll_save_context(struct clk_hw *hw)
+{
+ struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw);
+ struct sam9x60_frac *frac = to_sam9x60_frac(core);
+
+ frac->pms.status = sam9x60_pll_ready(core->regmap, core->id);
+
+ return 0;
+}
+
+static void sam9x60_frac_pll_restore_context(struct clk_hw *hw)
+{
+ struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw);
+ struct sam9x60_frac *frac = to_sam9x60_frac(core);
+
+ if (frac->pms.status)
+ sam9x60_frac_pll_set(core);
+}
+
static const struct clk_ops sam9x60_frac_pll_ops = {
.prepare = sam9x60_frac_pll_prepare,
.unprepare = sam9x60_frac_pll_unprepare,
@@ -287,6 +318,8 @@ static const struct clk_ops sam9x60_frac_pll_ops = {
.recalc_rate = sam9x60_frac_pll_recalc_rate,
.round_rate = sam9x60_frac_pll_round_rate,
.set_rate = sam9x60_frac_pll_set_rate,
+ .save_context = sam9x60_frac_pll_save_context,
+ .restore_context = sam9x60_frac_pll_restore_context,
};
static const struct clk_ops sam9x60_frac_pll_ops_chg = {
@@ -296,11 +329,32 @@ static const struct clk_ops sam9x60_frac_pll_ops_chg = {
.recalc_rate = sam9x60_frac_pll_recalc_rate,
.round_rate = sam9x60_frac_pll_round_rate,
.set_rate = sam9x60_frac_pll_set_rate_chg,
+ .save_context = sam9x60_frac_pll_save_context,
+ .restore_context = sam9x60_frac_pll_restore_context,
};
-static int sam9x60_div_pll_prepare(struct clk_hw *hw)
+/* This function should be called with spinlock acquired. */
+static void sam9x60_div_pll_set_div(struct sam9x60_pll_core *core, u32 div,
+ bool enable)
+{
+ struct regmap *regmap = core->regmap;
+ u32 ena_msk = enable ? core->layout->endiv_mask : 0;
+ u32 ena_val = enable ? (1 << core->layout->endiv_shift) : 0;
+
+ regmap_update_bits(regmap, AT91_PMC_PLL_CTRL0,
+ core->layout->div_mask | ena_msk,
+ (div << core->layout->div_shift) | ena_val);
+
+ regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
+ AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
+ AT91_PMC_PLL_UPDT_UPDATE | core->id);
+
+ while (!sam9x60_pll_ready(regmap, core->id))
+ cpu_relax();
+}
+
+static int sam9x60_div_pll_set(struct sam9x60_pll_core *core)
{
- struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw);
struct sam9x60_div *div = to_sam9x60_div(core);
struct regmap *regmap = core->regmap;
unsigned long flags;
@@ -316,17 +370,7 @@ static int sam9x60_div_pll_prepare(struct clk_hw *hw)
if (!!(val & core->layout->endiv_mask) && cdiv == div->div)
goto unlock;
- regmap_update_bits(regmap, AT91_PMC_PLL_CTRL0,
- core->layout->div_mask | core->layout->endiv_mask,
- (div->div << core->layout->div_shift) |
- (1 << core->layout->endiv_shift));
-
- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
- AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
- AT91_PMC_PLL_UPDT_UPDATE | core->id);
-
- while (!sam9x60_pll_ready(regmap, core->id))
- cpu_relax();
+ sam9x60_div_pll_set_div(core, div->div, 1);
unlock:
spin_unlock_irqrestore(core->lock, flags);
@@ -334,6 +378,13 @@ unlock:
return 0;
}
+static int sam9x60_div_pll_prepare(struct clk_hw *hw)
+{
+ struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw);
+
+ return sam9x60_div_pll_set(core);
+}
+
static void sam9x60_div_pll_unprepare(struct clk_hw *hw)
{
struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw);
@@ -465,16 +516,7 @@ static int sam9x60_div_pll_set_rate_chg(struct clk_hw *hw, unsigned long rate,
if (cdiv == div->div)
goto unlock;
- regmap_update_bits(regmap, AT91_PMC_PLL_CTRL0,
- core->layout->div_mask,
- (div->div << core->layout->div_shift));
-
- regmap_update_bits(regmap, AT91_PMC_PLL_UPDT,
- AT91_PMC_PLL_UPDT_UPDATE | AT91_PMC_PLL_UPDT_ID_MSK,
- AT91_PMC_PLL_UPDT_UPDATE | core->id);
-
- while (!sam9x60_pll_ready(regmap, core->id))
- cpu_relax();
+ sam9x60_div_pll_set_div(core, div->div, 0);
unlock:
spin_unlock_irqrestore(core->lock, irqflags);
@@ -482,6 +524,67 @@ unlock:
return 0;
}
+static int sam9x60_div_pll_save_context(struct clk_hw *hw)
+{
+ struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw);
+ struct sam9x60_div *div = to_sam9x60_div(core);
+
+ div->pms.status = sam9x60_div_pll_is_prepared(hw);
+
+ return 0;
+}
+
+static void sam9x60_div_pll_restore_context(struct clk_hw *hw)
+{
+ struct sam9x60_pll_core *core = to_sam9x60_pll_core(hw);
+ struct sam9x60_div *div = to_sam9x60_div(core);
+
+ if (div->pms.status)
+ sam9x60_div_pll_set(core);
+}
+
+static int sam9x60_div_pll_notifier_fn(struct notifier_block *notifier,
+ unsigned long code, void *data)
+{
+ struct sam9x60_div *div = notifier_div;
+ struct sam9x60_pll_core core = div->core;
+ struct regmap *regmap = core.regmap;
+ unsigned long irqflags;
+ u32 val, cdiv;
+ int ret = NOTIFY_DONE;
+
+ if (code != PRE_RATE_CHANGE)
+ return ret;
+
+ /*
+ * We switch to safe divider to avoid overclocking of other domains
+ * feed by us while the frac PLL (our parent) is changed.
+ */
+ div->div = div->safe_div;
+
+ spin_lock_irqsave(core.lock, irqflags);
+ regmap_update_bits(regmap, AT91_PMC_PLL_UPDT, AT91_PMC_PLL_UPDT_ID_MSK,
+ core.id);
+ regmap_read(regmap, AT91_PMC_PLL_CTRL0, &val);
+ cdiv = (val & core.layout->div_mask) >> core.layout->div_shift;
+
+ /* Stop if nothing changed. */
+ if (cdiv == div->safe_div)
+ goto unlock;
+
+ sam9x60_div_pll_set_div(&core, div->div, 0);
+ ret = NOTIFY_OK;
+
+unlock:
+ spin_unlock_irqrestore(core.lock, irqflags);
+
+ return ret;
+}
+
+static struct notifier_block sam9x60_div_pll_notifier = {
+ .notifier_call = sam9x60_div_pll_notifier_fn,
+};
+
static const struct clk_ops sam9x60_div_pll_ops = {
.prepare = sam9x60_div_pll_prepare,
.unprepare = sam9x60_div_pll_unprepare,
@@ -489,6 +592,8 @@ static const struct clk_ops sam9x60_div_pll_ops = {
.recalc_rate = sam9x60_div_pll_recalc_rate,
.round_rate = sam9x60_div_pll_round_rate,
.set_rate = sam9x60_div_pll_set_rate,
+ .save_context = sam9x60_div_pll_save_context,
+ .restore_context = sam9x60_div_pll_restore_context,
};
static const struct clk_ops sam9x60_div_pll_ops_chg = {
@@ -498,6 +603,8 @@ static const struct clk_ops sam9x60_div_pll_ops_chg = {
.recalc_rate = sam9x60_div_pll_recalc_rate,
.round_rate = sam9x60_div_pll_round_rate,
.set_rate = sam9x60_div_pll_set_rate_chg,
+ .save_context = sam9x60_div_pll_save_context,
+ .restore_context = sam9x60_div_pll_restore_context,
};
struct clk_hw * __init
@@ -587,7 +694,8 @@ struct clk_hw * __init
sam9x60_clk_register_div_pll(struct regmap *regmap, spinlock_t *lock,
const char *name, const char *parent_name, u8 id,
const struct clk_pll_characteristics *characteristics,
- const struct clk_pll_layout *layout, u32 flags)
+ const struct clk_pll_layout *layout, u32 flags,
+ u32 safe_div)
{
struct sam9x60_div *div;
struct clk_hw *hw;
@@ -596,9 +704,13 @@ sam9x60_clk_register_div_pll(struct regmap *regmap, spinlock_t *lock,
unsigned int val;
int ret;
- if (id > PLL_MAX_ID || !lock)
+ /* We only support one changeable PLL. */
+ if (id > PLL_MAX_ID || !lock || (safe_div && notifier_div))
return ERR_PTR(-EINVAL);
+ if (safe_div >= PLL_DIV_MAX)
+ safe_div = PLL_DIV_MAX - 1;
+
div = kzalloc(sizeof(*div), GFP_KERNEL);
if (!div)
return ERR_PTR(-ENOMEM);
@@ -618,6 +730,7 @@ sam9x60_clk_register_div_pll(struct regmap *regmap, spinlock_t *lock,
div->core.layout = layout;
div->core.regmap = regmap;
div->core.lock = lock;
+ div->safe_div = safe_div;
spin_lock_irqsave(div->core.lock, irqflags);
@@ -633,6 +746,9 @@ sam9x60_clk_register_div_pll(struct regmap *regmap, spinlock_t *lock,
if (ret) {
kfree(div);
hw = ERR_PTR(ret);
+ } else if (div->safe_div) {
+ notifier_div = div;
+ clk_notifier_register(hw->clk, &sam9x60_div_pll_notifier);
}
return hw;
diff --git a/drivers/clk/at91/clk-system.c b/drivers/clk/at91/clk-system.c
index f83ec0de86c3..80720fd1a9cf 100644
--- a/drivers/clk/at91/clk-system.c
+++ b/drivers/clk/at91/clk-system.c
@@ -20,6 +20,7 @@
struct clk_system {
struct clk_hw hw;
struct regmap *regmap;
+ struct at91_clk_pms pms;
u8 id;
};
@@ -77,10 +78,29 @@ static int clk_system_is_prepared(struct clk_hw *hw)
return !!(status & (1 << sys->id));
}
+static int clk_system_save_context(struct clk_hw *hw)
+{
+ struct clk_system *sys = to_clk_system(hw);
+
+ sys->pms.status = clk_system_is_prepared(hw);
+
+ return 0;
+}
+
+static void clk_system_restore_context(struct clk_hw *hw)
+{
+ struct clk_system *sys = to_clk_system(hw);
+
+ if (sys->pms.status)
+ clk_system_prepare(&sys->hw);
+}
+
static const struct clk_ops system_ops = {
.prepare = clk_system_prepare,
.unprepare = clk_system_unprepare,
.is_prepared = clk_system_is_prepared,
+ .save_context = clk_system_save_context,
+ .restore_context = clk_system_restore_context,
};
struct clk_hw * __init
diff --git a/drivers/clk/at91/clk-usb.c b/drivers/clk/at91/clk-usb.c
index 31d5c45e30d7..b0696a928aa9 100644
--- a/drivers/clk/at91/clk-usb.c
+++ b/drivers/clk/at91/clk-usb.c
@@ -24,6 +24,7 @@
struct at91sam9x5_clk_usb {
struct clk_hw hw;
struct regmap *regmap;
+ struct at91_clk_pms pms;
u32 usbs_mask;
u8 num_parents;
};
@@ -148,12 +149,38 @@ static int at91sam9x5_clk_usb_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
+static int at91sam9x5_usb_save_context(struct clk_hw *hw)
+{
+ struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw);
+ struct clk_hw *parent_hw = clk_hw_get_parent(hw);
+
+ usb->pms.parent = at91sam9x5_clk_usb_get_parent(hw);
+ usb->pms.parent_rate = clk_hw_get_rate(parent_hw);
+ usb->pms.rate = at91sam9x5_clk_usb_recalc_rate(hw, usb->pms.parent_rate);
+
+ return 0;
+}
+
+static void at91sam9x5_usb_restore_context(struct clk_hw *hw)
+{
+ struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw);
+ int ret;
+
+ ret = at91sam9x5_clk_usb_set_parent(hw, usb->pms.parent);
+ if (ret)
+ return;
+
+ at91sam9x5_clk_usb_set_rate(hw, usb->pms.rate, usb->pms.parent_rate);
+}
+
static const struct clk_ops at91sam9x5_usb_ops = {
.recalc_rate = at91sam9x5_clk_usb_recalc_rate,
.determine_rate = at91sam9x5_clk_usb_determine_rate,
.get_parent = at91sam9x5_clk_usb_get_parent,
.set_parent = at91sam9x5_clk_usb_set_parent,
.set_rate = at91sam9x5_clk_usb_set_rate,
+ .save_context = at91sam9x5_usb_save_context,
+ .restore_context = at91sam9x5_usb_restore_context,
};
static int at91sam9n12_clk_usb_enable(struct clk_hw *hw)
diff --git a/drivers/clk/at91/clk-utmi.c b/drivers/clk/at91/clk-utmi.c
index df9f3fc3b6a6..a22c10d9a1b9 100644
--- a/drivers/clk/at91/clk-utmi.c
+++ b/drivers/clk/at91/clk-utmi.c
@@ -23,6 +23,7 @@ struct clk_utmi {
struct clk_hw hw;
struct regmap *regmap_pmc;
struct regmap *regmap_sfr;
+ struct at91_clk_pms pms;
};
#define to_clk_utmi(hw) container_of(hw, struct clk_utmi, hw)
@@ -113,11 +114,30 @@ static unsigned long clk_utmi_recalc_rate(struct clk_hw *hw,
return UTMI_RATE;
}
+static int clk_utmi_save_context(struct clk_hw *hw)
+{
+ struct clk_utmi *utmi = to_clk_utmi(hw);
+
+ utmi->pms.status = clk_utmi_is_prepared(hw);
+
+ return 0;
+}
+
+static void clk_utmi_restore_context(struct clk_hw *hw)
+{
+ struct clk_utmi *utmi = to_clk_utmi(hw);
+
+ if (utmi->pms.status)
+ clk_utmi_prepare(hw);
+}
+
static const struct clk_ops utmi_ops = {
.prepare = clk_utmi_prepare,
.unprepare = clk_utmi_unprepare,
.is_prepared = clk_utmi_is_prepared,
.recalc_rate = clk_utmi_recalc_rate,
+ .save_context = clk_utmi_save_context,
+ .restore_context = clk_utmi_restore_context,
};
static struct clk_hw * __init
@@ -232,10 +252,29 @@ static int clk_utmi_sama7g5_is_prepared(struct clk_hw *hw)
return 0;
}
+static int clk_utmi_sama7g5_save_context(struct clk_hw *hw)
+{
+ struct clk_utmi *utmi = to_clk_utmi(hw);
+
+ utmi->pms.status = clk_utmi_sama7g5_is_prepared(hw);
+
+ return 0;
+}
+
+static void clk_utmi_sama7g5_restore_context(struct clk_hw *hw)
+{
+ struct clk_utmi *utmi = to_clk_utmi(hw);
+
+ if (utmi->pms.status)
+ clk_utmi_sama7g5_prepare(hw);
+}
+
static const struct clk_ops sama7g5_utmi_ops = {
.prepare = clk_utmi_sama7g5_prepare,
.is_prepared = clk_utmi_sama7g5_is_prepared,
.recalc_rate = clk_utmi_recalc_rate,
+ .save_context = clk_utmi_sama7g5_save_context,
+ .restore_context = clk_utmi_sama7g5_restore_context,
};
struct clk_hw * __init
diff --git a/drivers/clk/at91/dt-compat.c b/drivers/clk/at91/dt-compat.c
index a97b99c2dc12..ca2dbb65b9df 100644
--- a/drivers/clk/at91/dt-compat.c
+++ b/drivers/clk/at91/dt-compat.c
@@ -399,7 +399,7 @@ of_at91_clk_master_setup(struct device_node *np,
hw = at91_clk_register_master_div(regmap, name, "masterck_pres",
layout, characteristics,
- &mck_lock, CLK_SET_RATE_GATE);
+ &mck_lock, CLK_SET_RATE_GATE, 0);
if (IS_ERR(hw))
goto out_free_characteristics;
diff --git a/drivers/clk/at91/pmc.c b/drivers/clk/at91/pmc.c
index 20ee9dccee78..5aa9c1f1c886 100644
--- a/drivers/clk/at91/pmc.c
+++ b/drivers/clk/at91/pmc.c
@@ -3,10 +3,12 @@
* Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com>
*/
+#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/clkdev.h>
#include <linux/clk/at91_pmc.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/mfd/syscon.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
@@ -14,8 +16,6 @@
#include <asm/proc-fns.h>
-#include <dt-bindings/clock/at91.h>
-
#include "pmc.h"
#define PMC_MAX_IDS 128
@@ -111,151 +111,46 @@ struct pmc_data *pmc_data_allocate(unsigned int ncore, unsigned int nsystem,
}
#ifdef CONFIG_PM
-static struct regmap *pmcreg;
-static u8 registered_ids[PMC_MAX_IDS];
-static u8 registered_pcks[PMC_MAX_PCKS];
+/* Address in SECURAM that say if we suspend to backup mode. */
+static void __iomem *at91_pmc_backup_suspend;
-static struct
+static int at91_pmc_suspend(void)
{
- u32 scsr;
- u32 pcsr0;
- u32 uckr;
- u32 mor;
- u32 mcfr;
- u32 pllar;
- u32 mckr;
- u32 usb;
- u32 imr;
- u32 pcsr1;
- u32 pcr[PMC_MAX_IDS];
- u32 audio_pll0;
- u32 audio_pll1;
- u32 pckr[PMC_MAX_PCKS];
-} pmc_cache;
+ unsigned int backup;
-/*
- * As Peripheral ID 0 is invalid on AT91 chips, the identifier is stored
- * without alteration in the table, and 0 is for unused clocks.
- */
-void pmc_register_id(u8 id)
-{
- int i;
-
- for (i = 0; i < PMC_MAX_IDS; i++) {
- if (registered_ids[i] == 0) {
- registered_ids[i] = id;
- break;
- }
- if (registered_ids[i] == id)
- break;
- }
-}
+ if (!at91_pmc_backup_suspend)
+ return 0;
-/*
- * As Programmable Clock 0 is valid on AT91 chips, there is an offset
- * of 1 between the stored value and the real clock ID.
- */
-void pmc_register_pck(u8 pck)
-{
- int i;
-
- for (i = 0; i < PMC_MAX_PCKS; i++) {
- if (registered_pcks[i] == 0) {
- registered_pcks[i] = pck + 1;
- break;
- }
- if (registered_pcks[i] == (pck + 1))
- break;
- }
-}
-
-static int pmc_suspend(void)
-{
- int i;
- u8 num;
-
- regmap_read(pmcreg, AT91_PMC_SCSR, &pmc_cache.scsr);
- regmap_read(pmcreg, AT91_PMC_PCSR, &pmc_cache.pcsr0);
- regmap_read(pmcreg, AT91_CKGR_UCKR, &pmc_cache.uckr);
- regmap_read(pmcreg, AT91_CKGR_MOR, &pmc_cache.mor);
- regmap_read(pmcreg, AT91_CKGR_MCFR, &pmc_cache.mcfr);
- regmap_read(pmcreg, AT91_CKGR_PLLAR, &pmc_cache.pllar);
- regmap_read(pmcreg, AT91_PMC_MCKR, &pmc_cache.mckr);
- regmap_read(pmcreg, AT91_PMC_USB, &pmc_cache.usb);
- regmap_read(pmcreg, AT91_PMC_IMR, &pmc_cache.imr);
- regmap_read(pmcreg, AT91_PMC_PCSR1, &pmc_cache.pcsr1);
-
- for (i = 0; registered_ids[i]; i++) {
- regmap_write(pmcreg, AT91_PMC_PCR,
- (registered_ids[i] & AT91_PMC_PCR_PID_MASK));
- regmap_read(pmcreg, AT91_PMC_PCR,
- &pmc_cache.pcr[registered_ids[i]]);
- }
- for (i = 0; registered_pcks[i]; i++) {
- num = registered_pcks[i] - 1;
- regmap_read(pmcreg, AT91_PMC_PCKR(num), &pmc_cache.pckr[num]);
- }
+ backup = readl_relaxed(at91_pmc_backup_suspend);
+ if (!backup)
+ return 0;
- return 0;
+ return clk_save_context();
}
-static bool pmc_ready(unsigned int mask)
+static void at91_pmc_resume(void)
{
- unsigned int status;
+ unsigned int backup;
- regmap_read(pmcreg, AT91_PMC_SR, &status);
+ if (!at91_pmc_backup_suspend)
+ return;
- return ((status & mask) == mask) ? 1 : 0;
-}
+ backup = readl_relaxed(at91_pmc_backup_suspend);
+ if (!backup)
+ return;
-static void pmc_resume(void)
-{
- int i;
- u8 num;
- u32 tmp;
- u32 mask = AT91_PMC_MCKRDY | AT91_PMC_LOCKA;
-
- regmap_read(pmcreg, AT91_PMC_MCKR, &tmp);
- if (pmc_cache.mckr != tmp)
- pr_warn("MCKR was not configured properly by the firmware\n");
- regmap_read(pmcreg, AT91_CKGR_PLLAR, &tmp);
- if (pmc_cache.pllar != tmp)
- pr_warn("PLLAR was not configured properly by the firmware\n");
-
- regmap_write(pmcreg, AT91_PMC_SCER, pmc_cache.scsr);
- regmap_write(pmcreg, AT91_PMC_PCER, pmc_cache.pcsr0);
- regmap_write(pmcreg, AT91_CKGR_UCKR, pmc_cache.uckr);
- regmap_write(pmcreg, AT91_CKGR_MOR, pmc_cache.mor);
- regmap_write(pmcreg, AT91_CKGR_MCFR, pmc_cache.mcfr);
- regmap_write(pmcreg, AT91_PMC_USB, pmc_cache.usb);
- regmap_write(pmcreg, AT91_PMC_IMR, pmc_cache.imr);
- regmap_write(pmcreg, AT91_PMC_PCER1, pmc_cache.pcsr1);
-
- for (i = 0; registered_ids[i]; i++) {
- regmap_write(pmcreg, AT91_PMC_PCR,
- pmc_cache.pcr[registered_ids[i]] |
- AT91_PMC_PCR_CMD);
- }
- for (i = 0; registered_pcks[i]; i++) {
- num = registered_pcks[i] - 1;
- regmap_write(pmcreg, AT91_PMC_PCKR(num), pmc_cache.pckr[num]);
- }
-
- if (pmc_cache.uckr & AT91_PMC_UPLLEN)
- mask |= AT91_PMC_LOCKU;
-
- while (!pmc_ready(mask))
- cpu_relax();
+ clk_restore_context();
}
static struct syscore_ops pmc_syscore_ops = {
- .suspend = pmc_suspend,
- .resume = pmc_resume,
+ .suspend = at91_pmc_suspend,
+ .resume = at91_pmc_resume,
};
-static const struct of_device_id sama5d2_pmc_dt_ids[] = {
+static const struct of_device_id pmc_dt_ids[] = {
{ .compatible = "atmel,sama5d2-pmc" },
+ { .compatible = "microchip,sama7g5-pmc", },
{ /* sentinel */ }
};
@@ -263,14 +158,31 @@ static int __init pmc_register_ops(void)
{
struct device_node *np;
- np = of_find_matching_node(NULL, sama5d2_pmc_dt_ids);
+ np = of_find_matching_node(NULL, pmc_dt_ids);
+ if (!np)
+ return -ENODEV;
+
+ if (!of_device_is_available(np)) {
+ of_node_put(np);
+ return -ENODEV;
+ }
+ of_node_put(np);
+
+ np = of_find_compatible_node(NULL, NULL, "atmel,sama5d2-securam");
if (!np)
return -ENODEV;
- pmcreg = device_node_to_regmap(np);
+ if (!of_device_is_available(np)) {
+ of_node_put(np);
+ return -ENODEV;
+ }
of_node_put(np);
- if (IS_ERR(pmcreg))
- return PTR_ERR(pmcreg);
+
+ at91_pmc_backup_suspend = of_iomap(np, 0);
+ if (!at91_pmc_backup_suspend) {
+ pr_warn("%s(): unable to map securam\n", __func__);
+ return -ENOMEM;
+ }
register_syscore_ops(&pmc_syscore_ops);
diff --git a/drivers/clk/at91/pmc.h b/drivers/clk/at91/pmc.h
index a49076c804a9..3a1bf6194c28 100644
--- a/drivers/clk/at91/pmc.h
+++ b/drivers/clk/at91/pmc.h
@@ -13,6 +13,8 @@
#include <linux/regmap.h>
#include <linux/spinlock.h>
+#include <dt-bindings/clock/at91.h>
+
extern spinlock_t pmc_pcr_lock;
struct pmc_data {
@@ -98,6 +100,20 @@ struct clk_pcr_layout {
u32 pid_mask;
};
+/**
+ * struct at91_clk_pms - Power management state for AT91 clock
+ * @rate: clock rate
+ * @parent_rate: clock parent rate
+ * @status: clock status (enabled or disabled)
+ * @parent: clock parent index
+ */
+struct at91_clk_pms {
+ unsigned long rate;
+ unsigned long parent_rate;
+ unsigned int status;
+ unsigned int parent;
+};
+
#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1))
#define field_prep(_mask, _val) (((_val) << (ffs(_mask) - 1)) & (_mask))
@@ -166,7 +182,7 @@ at91_clk_register_master_div(struct regmap *regmap, const char *name,
const char *parent_names,
const struct clk_master_layout *layout,
const struct clk_master_characteristics *characteristics,
- spinlock_t *lock, u32 flags);
+ spinlock_t *lock, u32 flags, u32 safe_div);
struct clk_hw * __init
at91_clk_sama7g5_register_master(struct regmap *regmap,
@@ -198,7 +214,8 @@ struct clk_hw * __init
sam9x60_clk_register_div_pll(struct regmap *regmap, spinlock_t *lock,
const char *name, const char *parent_name, u8 id,
const struct clk_pll_characteristics *characteristics,
- const struct clk_pll_layout *layout, u32 flags);
+ const struct clk_pll_layout *layout, u32 flags,
+ u32 safe_div);
struct clk_hw * __init
sam9x60_clk_register_frac_pll(struct regmap *regmap, spinlock_t *lock,
@@ -248,12 +265,4 @@ struct clk_hw * __init
at91_clk_sama7g5_register_utmi(struct regmap *regmap, const char *name,
const char *parent_name);
-#ifdef CONFIG_PM
-void pmc_register_id(u8 id);
-void pmc_register_pck(u8 pck);
-#else
-static inline void pmc_register_id(u8 id) {}
-static inline void pmc_register_pck(u8 pck) {}
-#endif
-
#endif /* __PMC_H_ */
diff --git a/drivers/clk/at91/sam9x60.c b/drivers/clk/at91/sam9x60.c
index 5f6fa89571b7..5c264185f261 100644
--- a/drivers/clk/at91/sam9x60.c
+++ b/drivers/clk/at91/sam9x60.c
@@ -242,7 +242,7 @@ static void __init sam9x60_pmc_setup(struct device_node *np)
* This feeds CPU. It should not
* be disabled.
*/
- CLK_IS_CRITICAL | CLK_SET_RATE_GATE);
+ CLK_IS_CRITICAL | CLK_SET_RATE_GATE, 0);
if (IS_ERR(hw))
goto err_free;
@@ -260,7 +260,7 @@ static void __init sam9x60_pmc_setup(struct device_node *np)
&pll_div_layout,
CLK_SET_RATE_GATE |
CLK_SET_PARENT_GATE |
- CLK_SET_RATE_PARENT);
+ CLK_SET_RATE_PARENT, 0);
if (IS_ERR(hw))
goto err_free;
@@ -279,7 +279,7 @@ static void __init sam9x60_pmc_setup(struct device_node *np)
hw = at91_clk_register_master_div(regmap, "masterck_div",
"masterck_pres", &sam9x60_master_layout,
&mck_characteristics, &mck_lock,
- CLK_SET_RATE_GATE);
+ CLK_SET_RATE_GATE, 0);
if (IS_ERR(hw))
goto err_free;
diff --git a/drivers/clk/at91/sama5d2.c b/drivers/clk/at91/sama5d2.c
index 3d1f78176c3e..d027294a0089 100644
--- a/drivers/clk/at91/sama5d2.c
+++ b/drivers/clk/at91/sama5d2.c
@@ -249,7 +249,7 @@ static void __init sama5d2_pmc_setup(struct device_node *np)
"masterck_pres",
&at91sam9x5_master_layout,
&mck_characteristics, &mck_lock,
- CLK_SET_RATE_GATE);
+ CLK_SET_RATE_GATE, 0);
if (IS_ERR(hw))
goto err_free;
diff --git a/drivers/clk/at91/sama5d3.c b/drivers/clk/at91/sama5d3.c
index d376257807d2..339d0f382ff0 100644
--- a/drivers/clk/at91/sama5d3.c
+++ b/drivers/clk/at91/sama5d3.c
@@ -184,7 +184,7 @@ static void __init sama5d3_pmc_setup(struct device_node *np)
"masterck_pres",
&at91sam9x5_master_layout,
&mck_characteristics, &mck_lock,
- CLK_SET_RATE_GATE);
+ CLK_SET_RATE_GATE, 0);
if (IS_ERR(hw))
goto err_free;
diff --git a/drivers/clk/at91/sama5d4.c b/drivers/clk/at91/sama5d4.c
index 5cbaac68da44..4af75b1e39e9 100644
--- a/drivers/clk/at91/sama5d4.c
+++ b/drivers/clk/at91/sama5d4.c
@@ -199,7 +199,7 @@ static void __init sama5d4_pmc_setup(struct device_node *np)
"masterck_pres",
&at91sam9x5_master_layout,
&mck_characteristics, &mck_lock,
- CLK_SET_RATE_GATE);
+ CLK_SET_RATE_GATE, 0);
if (IS_ERR(hw))
goto err_free;
diff --git a/drivers/clk/at91/sama7g5.c b/drivers/clk/at91/sama7g5.c
index cf8c079aa086..369dfafabbca 100644
--- a/drivers/clk/at91/sama7g5.c
+++ b/drivers/clk/at91/sama7g5.c
@@ -127,6 +127,8 @@ static const struct clk_pll_characteristics pll_characteristics = {
* @t: clock type
* @f: clock flags
* @eid: export index in sama7g5->chws[] array
+ * @safe_div: intermediate divider need to be set on PRE_RATE_CHANGE
+ * notification
*/
static const struct {
const char *n;
@@ -136,6 +138,7 @@ static const struct {
unsigned long f;
u8 t;
u8 eid;
+ u8 safe_div;
} sama7g5_plls[][PLL_ID_MAX] = {
[PLL_ID_CPU] = {
{ .n = "cpupll_fracck",
@@ -156,7 +159,12 @@ static const struct {
.t = PLL_TYPE_DIV,
/* This feeds CPU. It should not be disabled. */
.f = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
- .eid = PMC_CPUPLL, },
+ .eid = PMC_CPUPLL,
+ /*
+ * Safe div=15 should be safe even for switching b/w 1GHz and
+ * 90MHz (frac pll might go up to 1.2GHz).
+ */
+ .safe_div = 15, },
},
[PLL_ID_SYS] = {
@@ -377,6 +385,7 @@ static const struct {
u8 id;
} sama7g5_periphck[] = {
{ .n = "pioA_clk", .p = "mck0", .id = 11, },
+ { .n = "securam_clk", .p = "mck0", .id = 18, },
{ .n = "sfr_clk", .p = "mck1", .id = 19, },
{ .n = "hsmc_clk", .p = "mck1", .id = 21, },
{ .n = "xdmac0_clk", .p = "mck1", .id = 22, },
@@ -841,7 +850,7 @@ static const struct {
/* MCK0 characteristics. */
static const struct clk_master_characteristics mck0_characteristics = {
- .output = { .min = 50000000, .max = 200000000 },
+ .output = { .min = 32768, .max = 200000000 },
.divisors = { 1, 2, 4, 3, 5 },
.have_div3_pres = 1,
};
@@ -966,7 +975,8 @@ static void __init sama7g5_pmc_setup(struct device_node *np)
sama7g5_plls[i][j].p, i,
sama7g5_plls[i][j].c,
sama7g5_plls[i][j].l,
- sama7g5_plls[i][j].f);
+ sama7g5_plls[i][j].f,
+ sama7g5_plls[i][j].safe_div);
break;
default:
@@ -982,18 +992,9 @@ static void __init sama7g5_pmc_setup(struct device_node *np)
}
parent_names[0] = "cpupll_divpmcck";
- hw = at91_clk_register_master_pres(regmap, "cpuck", 1, parent_names,
- &mck0_layout, &mck0_characteristics,
- &pmc_mck0_lock,
- CLK_SET_RATE_PARENT, 0);
- if (IS_ERR(hw))
- goto err_free;
-
- sama7g5_pmc->chws[PMC_CPU] = hw;
-
- hw = at91_clk_register_master_div(regmap, "mck0", "cpuck",
+ hw = at91_clk_register_master_div(regmap, "mck0", "cpupll_divpmcck",
&mck0_layout, &mck0_characteristics,
- &pmc_mck0_lock, 0);
+ &pmc_mck0_lock, CLK_GET_RATE_NOCACHE, 5);
if (IS_ERR(hw))
goto err_free;
diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
index 0506046a5f4b..c04ae0e7e4b4 100644
--- a/drivers/clk/clk-composite.c
+++ b/drivers/clk/clk-composite.c
@@ -42,6 +42,29 @@ static unsigned long clk_composite_recalc_rate(struct clk_hw *hw,
return rate_ops->recalc_rate(rate_hw, parent_rate);
}
+static int clk_composite_determine_rate_for_parent(struct clk_hw *rate_hw,
+ struct clk_rate_request *req,
+ struct clk_hw *parent_hw,
+ const struct clk_ops *rate_ops)
+{
+ long rate;
+
+ req->best_parent_hw = parent_hw;
+ req->best_parent_rate = clk_hw_get_rate(parent_hw);
+
+ if (rate_ops->determine_rate)
+ return rate_ops->determine_rate(rate_hw, req);
+
+ rate = rate_ops->round_rate(rate_hw, req->rate,
+ &req->best_parent_rate);
+ if (rate < 0)
+ return rate;
+
+ req->rate = rate;
+
+ return 0;
+}
+
static int clk_composite_determine_rate(struct clk_hw *hw,
struct clk_rate_request *req)
{
@@ -51,54 +74,56 @@ static int clk_composite_determine_rate(struct clk_hw *hw,
struct clk_hw *rate_hw = composite->rate_hw;
struct clk_hw *mux_hw = composite->mux_hw;
struct clk_hw *parent;
- unsigned long parent_rate;
- long tmp_rate, best_rate = 0;
unsigned long rate_diff;
unsigned long best_rate_diff = ULONG_MAX;
- long rate;
- int i;
+ unsigned long best_rate = 0;
+ int i, ret;
- if (rate_hw && rate_ops && rate_ops->determine_rate) {
- __clk_hw_set_clk(rate_hw, hw);
- return rate_ops->determine_rate(rate_hw, req);
- } else if (rate_hw && rate_ops && rate_ops->round_rate &&
- mux_hw && mux_ops && mux_ops->set_parent) {
+ if (rate_hw && rate_ops &&
+ (rate_ops->determine_rate || rate_ops->round_rate) &&
+ mux_hw && mux_ops && mux_ops->set_parent) {
req->best_parent_hw = NULL;
if (clk_hw_get_flags(hw) & CLK_SET_RATE_NO_REPARENT) {
+ struct clk_rate_request tmp_req = *req;
+
parent = clk_hw_get_parent(mux_hw);
- req->best_parent_hw = parent;
- req->best_parent_rate = clk_hw_get_rate(parent);
- rate = rate_ops->round_rate(rate_hw, req->rate,
- &req->best_parent_rate);
- if (rate < 0)
- return rate;
+ ret = clk_composite_determine_rate_for_parent(rate_hw,
+ &tmp_req,
+ parent,
+ rate_ops);
+ if (ret)
+ return ret;
+
+ req->rate = tmp_req.rate;
+ req->best_parent_rate = tmp_req.best_parent_rate;
- req->rate = rate;
return 0;
}
for (i = 0; i < clk_hw_get_num_parents(mux_hw); i++) {
+ struct clk_rate_request tmp_req = *req;
+
parent = clk_hw_get_parent_by_index(mux_hw, i);
if (!parent)
continue;
- parent_rate = clk_hw_get_rate(parent);
-
- tmp_rate = rate_ops->round_rate(rate_hw, req->rate,
- &parent_rate);
- if (tmp_rate < 0)
+ ret = clk_composite_determine_rate_for_parent(rate_hw,
+ &tmp_req,
+ parent,
+ rate_ops);
+ if (ret)
continue;
- rate_diff = abs(req->rate - tmp_rate);
+ rate_diff = abs(req->rate - tmp_req.rate);
if (!rate_diff || !req->best_parent_hw
|| best_rate_diff > rate_diff) {
req->best_parent_hw = parent;
- req->best_parent_rate = parent_rate;
+ req->best_parent_rate = tmp_req.best_parent_rate;
best_rate_diff = rate_diff;
- best_rate = tmp_rate;
+ best_rate = tmp_req.rate;
}
if (!rate_diff)
@@ -107,6 +132,9 @@ static int clk_composite_determine_rate(struct clk_hw *hw,
req->rate = best_rate;
return 0;
+ } else if (rate_hw && rate_ops && rate_ops->determine_rate) {
+ __clk_hw_set_clk(rate_hw, hw);
+ return rate_ops->determine_rate(rate_hw, req);
} else if (mux_hw && mux_ops && mux_ops->determine_rate) {
__clk_hw_set_clk(mux_hw, hw);
return mux_ops->determine_rate(mux_hw, req);
@@ -362,6 +390,7 @@ struct clk *clk_register_composite(struct device *dev, const char *name,
return ERR_CAST(hw);
return hw->clk;
}
+EXPORT_SYMBOL_GPL(clk_register_composite);
struct clk *clk_register_composite_pdata(struct device *dev, const char *name,
const struct clk_parent_data *parent_data,
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 65508eb89ec9..f467d63bbf1e 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -3108,7 +3108,10 @@ static int clk_rate_get(void *data, u64 *val)
{
struct clk_core *core = data;
- *val = core->rate;
+ clk_prepare_lock();
+ *val = clk_core_get_rate_recalc(core);
+ clk_prepare_unlock();
+
return 0;
}
diff --git a/drivers/clk/imx/Kconfig b/drivers/clk/imx/Kconfig
index 47d9ec3abd2f..c08edbd04d22 100644
--- a/drivers/clk/imx/Kconfig
+++ b/drivers/clk/imx/Kconfig
@@ -98,3 +98,10 @@ config CLK_IMX8QXP
select MXC_CLK_SCU
help
Build the driver for IMX8QXP SCU based clocks.
+
+config CLK_IMX8ULP
+ tristate "IMX8ULP CCM Clock Driver"
+ depends on ARCH_MXC || COMPILE_TEST
+ select MXC_CLK
+ help
+ Build the driver for i.MX8ULP CCM Clock Driver
diff --git a/drivers/clk/imx/Makefile b/drivers/clk/imx/Makefile
index c24a2acbfa56..b5e040026dfb 100644
--- a/drivers/clk/imx/Makefile
+++ b/drivers/clk/imx/Makefile
@@ -31,6 +31,8 @@ clk-imx-scu-$(CONFIG_CLK_IMX8QXP) += clk-scu.o clk-imx8qxp.o \
clk-imx8qxp-rsrc.o clk-imx8qm-rsrc.o
clk-imx-lpcg-scu-$(CONFIG_CLK_IMX8QXP) += clk-lpcg-scu.o clk-imx8qxp-lpcg.o
+obj-$(CONFIG_CLK_IMX8ULP) += clk-imx8ulp.o
+
obj-$(CONFIG_CLK_IMX1) += clk-imx1.o
obj-$(CONFIG_CLK_IMX25) += clk-imx25.o
obj-$(CONFIG_CLK_IMX27) += clk-imx27.o
diff --git a/drivers/clk/imx/clk-composite-7ulp.c b/drivers/clk/imx/clk-composite-7ulp.c
index d85ba78abbb1..4eedd45dbaa8 100644
--- a/drivers/clk/imx/clk-composite-7ulp.c
+++ b/drivers/clk/imx/clk-composite-7ulp.c
@@ -8,6 +8,7 @@
#include <linux/bits.h>
#include <linux/clk-provider.h>
#include <linux/err.h>
+#include <linux/io.h>
#include <linux/slab.h>
#include "../clk-fractional-divider.h"
@@ -23,17 +24,61 @@
#define PCG_PCD_WIDTH 3
#define PCG_PCD_MASK 0x7
-struct clk_hw *imx7ulp_clk_hw_composite(const char *name,
+#define SW_RST BIT(28)
+
+static int pcc_gate_enable(struct clk_hw *hw)
+{
+ struct clk_gate *gate = to_clk_gate(hw);
+ unsigned long flags;
+ u32 val;
+ int ret;
+
+ ret = clk_gate_ops.enable(hw);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(gate->lock, flags);
+ /*
+ * release the sw reset for peripherals associated with
+ * with this pcc clock.
+ */
+ val = readl(gate->reg);
+ val |= SW_RST;
+ writel(val, gate->reg);
+
+ spin_unlock_irqrestore(gate->lock, flags);
+
+ return 0;
+}
+
+static void pcc_gate_disable(struct clk_hw *hw)
+{
+ clk_gate_ops.disable(hw);
+}
+
+static int pcc_gate_is_enabled(struct clk_hw *hw)
+{
+ return clk_gate_ops.is_enabled(hw);
+}
+
+static const struct clk_ops pcc_gate_ops = {
+ .enable = pcc_gate_enable,
+ .disable = pcc_gate_disable,
+ .is_enabled = pcc_gate_is_enabled,
+};
+
+static struct clk_hw *imx_ulp_clk_hw_composite(const char *name,
const char * const *parent_names,
int num_parents, bool mux_present,
bool rate_present, bool gate_present,
- void __iomem *reg)
+ void __iomem *reg, bool has_swrst)
{
struct clk_hw *mux_hw = NULL, *fd_hw = NULL, *gate_hw = NULL;
struct clk_fractional_divider *fd = NULL;
struct clk_gate *gate = NULL;
struct clk_mux *mux = NULL;
struct clk_hw *hw;
+ u32 val;
if (mux_present) {
mux = kzalloc(sizeof(*mux), GFP_KERNEL);
@@ -43,6 +88,8 @@ struct clk_hw *imx7ulp_clk_hw_composite(const char *name,
mux->reg = reg;
mux->shift = PCG_PCS_SHIFT;
mux->mask = PCG_PCS_MASK;
+ if (has_swrst)
+ mux->lock = &imx_ccm_lock;
}
if (rate_present) {
@@ -60,6 +107,8 @@ struct clk_hw *imx7ulp_clk_hw_composite(const char *name,
fd->nwidth = PCG_PCD_WIDTH;
fd->nmask = PCG_PCD_MASK;
fd->flags = CLK_FRAC_DIVIDER_ZERO_BASED;
+ if (has_swrst)
+ fd->lock = &imx_ccm_lock;
}
if (gate_present) {
@@ -72,13 +121,27 @@ struct clk_hw *imx7ulp_clk_hw_composite(const char *name,
gate_hw = &gate->hw;
gate->reg = reg;
gate->bit_idx = PCG_CGC_SHIFT;
+ if (has_swrst)
+ gate->lock = &imx_ccm_lock;
+ /*
+ * make sure clock is gated during clock tree initialization,
+ * the HW ONLY allow clock parent/rate changed with clock gated,
+ * during clock tree initialization, clocks could be enabled
+ * by bootloader, so the HW status will mismatch with clock tree
+ * prepare count, then clock core driver will allow parent/rate
+ * change since the prepare count is zero, but HW actually
+ * prevent the parent/rate change due to the clock is enabled.
+ */
+ val = readl_relaxed(reg);
+ val &= ~(1 << PCG_CGC_SHIFT);
+ writel_relaxed(val, reg);
}
hw = clk_hw_register_composite(NULL, name, parent_names, num_parents,
mux_hw, &clk_mux_ops, fd_hw,
&clk_fractional_divider_ops, gate_hw,
- &clk_gate_ops, CLK_SET_RATE_GATE |
- CLK_SET_PARENT_GATE);
+ has_swrst ? &pcc_gate_ops : &clk_gate_ops, CLK_SET_RATE_GATE |
+ CLK_SET_PARENT_GATE | CLK_SET_RATE_NO_REPARENT);
if (IS_ERR(hw)) {
kfree(mux);
kfree(fd);
@@ -87,3 +150,20 @@ struct clk_hw *imx7ulp_clk_hw_composite(const char *name,
return hw;
}
+
+struct clk_hw *imx7ulp_clk_hw_composite(const char *name, const char * const *parent_names,
+ int num_parents, bool mux_present, bool rate_present,
+ bool gate_present, void __iomem *reg)
+{
+ return imx_ulp_clk_hw_composite(name, parent_names, num_parents, mux_present, rate_present,
+ gate_present, reg, false);
+}
+
+struct clk_hw *imx8ulp_clk_hw_composite(const char *name, const char * const *parent_names,
+ int num_parents, bool mux_present, bool rate_present,
+ bool gate_present, void __iomem *reg, bool has_swrst)
+{
+ return imx_ulp_clk_hw_composite(name, parent_names, num_parents, mux_present, rate_present,
+ gate_present, reg, has_swrst);
+}
+EXPORT_SYMBOL_GPL(imx8ulp_clk_hw_composite);
diff --git a/drivers/clk/imx/clk-composite-8m.c b/drivers/clk/imx/clk-composite-8m.c
index 04e728538cef..2dfd6149e528 100644
--- a/drivers/clk/imx/clk-composite-8m.c
+++ b/drivers/clk/imx/clk-composite-8m.c
@@ -171,7 +171,7 @@ static const struct clk_ops imx8m_clk_composite_mux_ops = {
.determine_rate = imx8m_clk_composite_mux_determine_rate,
};
-struct clk_hw *imx8m_clk_hw_composite_flags(const char *name,
+struct clk_hw *__imx8m_clk_hw_composite(const char *name,
const char * const *parent_names,
int num_parents, void __iomem *reg,
u32 composite_flags,
@@ -246,4 +246,4 @@ fail:
kfree(mux);
return ERR_CAST(hw);
}
-EXPORT_SYMBOL_GPL(imx8m_clk_hw_composite_flags);
+EXPORT_SYMBOL_GPL(__imx8m_clk_hw_composite);
diff --git a/drivers/clk/imx/clk-imx6ul.c b/drivers/clk/imx/clk-imx6ul.c
index 5dbb6a937732..520b100bff4b 100644
--- a/drivers/clk/imx/clk-imx6ul.c
+++ b/drivers/clk/imx/clk-imx6ul.c
@@ -161,7 +161,6 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
hws[IMX6UL_PLL5_BYPASS] = imx_clk_hw_mux_flags("pll5_bypass", base + 0xa0, 16, 1, pll5_bypass_sels, ARRAY_SIZE(pll5_bypass_sels), CLK_SET_RATE_PARENT);
hws[IMX6UL_PLL6_BYPASS] = imx_clk_hw_mux_flags("pll6_bypass", base + 0xe0, 16, 1, pll6_bypass_sels, ARRAY_SIZE(pll6_bypass_sels), CLK_SET_RATE_PARENT);
hws[IMX6UL_PLL7_BYPASS] = imx_clk_hw_mux_flags("pll7_bypass", base + 0x20, 16, 1, pll7_bypass_sels, ARRAY_SIZE(pll7_bypass_sels), CLK_SET_RATE_PARENT);
- hws[IMX6UL_CLK_CSI_SEL] = imx_clk_hw_mux_flags("csi_sel", base + 0x3c, 9, 2, csi_sels, ARRAY_SIZE(csi_sels), CLK_SET_RATE_PARENT);
/* Do not bypass PLLs initially */
clk_set_parent(hws[IMX6UL_PLL1_BYPASS]->clk, hws[IMX6UL_CLK_PLL1]->clk);
@@ -270,6 +269,7 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
hws[IMX6UL_CLK_ECSPI_SEL] = imx_clk_hw_mux("ecspi_sel", base + 0x38, 18, 1, ecspi_sels, ARRAY_SIZE(ecspi_sels));
hws[IMX6UL_CLK_LCDIF_PRE_SEL] = imx_clk_hw_mux_flags("lcdif_pre_sel", base + 0x38, 15, 3, lcdif_pre_sels, ARRAY_SIZE(lcdif_pre_sels), CLK_SET_RATE_PARENT);
hws[IMX6UL_CLK_LCDIF_SEL] = imx_clk_hw_mux("lcdif_sel", base + 0x38, 9, 3, lcdif_sels, ARRAY_SIZE(lcdif_sels));
+ hws[IMX6UL_CLK_CSI_SEL] = imx_clk_hw_mux("csi_sel", base + 0x3c, 9, 2, csi_sels, ARRAY_SIZE(csi_sels));
hws[IMX6UL_CLK_LDB_DI0_DIV_SEL] = imx_clk_hw_mux("ldb_di0", base + 0x20, 10, 1, ldb_di0_div_sels, ARRAY_SIZE(ldb_di0_div_sels));
hws[IMX6UL_CLK_LDB_DI1_DIV_SEL] = imx_clk_hw_mux("ldb_di1", base + 0x20, 11, 1, ldb_di1_div_sels, ARRAY_SIZE(ldb_di1_div_sels));
@@ -380,7 +380,6 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
hws[IMX6ULL_CLK_ESAI_IPG] = imx_clk_hw_gate2_shared("esai_ipg", "ahb", base + 0x70, 0, &share_count_esai);
hws[IMX6ULL_CLK_ESAI_MEM] = imx_clk_hw_gate2_shared("esai_mem", "ahb", base + 0x70, 0, &share_count_esai);
}
- hws[IMX6UL_CLK_CSI] = imx_clk_hw_gate2("csi", "csi_podf", base + 0x70, 2);
hws[IMX6UL_CLK_I2C1] = imx_clk_hw_gate2("i2c1", "perclk", base + 0x70, 6);
hws[IMX6UL_CLK_I2C2] = imx_clk_hw_gate2("i2c2", "perclk", base + 0x70, 8);
hws[IMX6UL_CLK_I2C3] = imx_clk_hw_gate2("i2c3", "perclk", base + 0x70, 10);
@@ -391,6 +390,12 @@ static void __init imx6ul_clocks_init(struct device_node *ccm_node)
hws[IMX6UL_CLK_PXP] = imx_clk_hw_gate2("pxp", "axi", base + 0x70, 30);
/* CCGR3 */
+ /*
+ * Although the imx6ull reference manual lists CCGR2 as the csi clk
+ * gate register, tests have shown that it is actually the CCGR3
+ * register bit 0/1, same as for the imx6ul.
+ */
+ hws[IMX6UL_CLK_CSI] = imx_clk_hw_gate2("csi", "csi_podf", base + 0x74, 0);
hws[IMX6UL_CLK_UART5_IPG] = imx_clk_hw_gate2("uart5_ipg", "ipg", base + 0x74, 2);
hws[IMX6UL_CLK_UART5_SERIAL] = imx_clk_hw_gate2("uart5_serial", "uart_podf", base + 0x74, 2);
if (clk_on_imx6ul()) {
diff --git a/drivers/clk/imx/clk-imx7ulp.c b/drivers/clk/imx/clk-imx7ulp.c
index 779e09105da7..b6e45e77ee39 100644
--- a/drivers/clk/imx/clk-imx7ulp.c
+++ b/drivers/clk/imx/clk-imx7ulp.c
@@ -78,20 +78,20 @@ static void __init imx7ulp_clk_scg1_init(struct device_node *np)
hws[IMX7ULP_CLK_SPLL_PRE_DIV] = imx_clk_hw_divider_flags("spll_pre_div", "spll_pre_sel", base + 0x608, 8, 3, CLK_SET_RATE_GATE);
/* name parent_name base */
- hws[IMX7ULP_CLK_APLL] = imx_clk_hw_pllv4("apll", "apll_pre_div", base + 0x500);
- hws[IMX7ULP_CLK_SPLL] = imx_clk_hw_pllv4("spll", "spll_pre_div", base + 0x600);
+ hws[IMX7ULP_CLK_APLL] = imx_clk_hw_pllv4(IMX_PLLV4_IMX7ULP, "apll", "apll_pre_div", base + 0x500);
+ hws[IMX7ULP_CLK_SPLL] = imx_clk_hw_pllv4(IMX_PLLV4_IMX7ULP, "spll", "spll_pre_div", base + 0x600);
/* APLL PFDs */
- hws[IMX7ULP_CLK_APLL_PFD0] = imx_clk_hw_pfdv2("apll_pfd0", "apll", base + 0x50c, 0);
- hws[IMX7ULP_CLK_APLL_PFD1] = imx_clk_hw_pfdv2("apll_pfd1", "apll", base + 0x50c, 1);
- hws[IMX7ULP_CLK_APLL_PFD2] = imx_clk_hw_pfdv2("apll_pfd2", "apll", base + 0x50c, 2);
- hws[IMX7ULP_CLK_APLL_PFD3] = imx_clk_hw_pfdv2("apll_pfd3", "apll", base + 0x50c, 3);
+ hws[IMX7ULP_CLK_APLL_PFD0] = imx_clk_hw_pfdv2(IMX_PFDV2_IMX7ULP, "apll_pfd0", "apll", base + 0x50c, 0);
+ hws[IMX7ULP_CLK_APLL_PFD1] = imx_clk_hw_pfdv2(IMX_PFDV2_IMX7ULP, "apll_pfd1", "apll", base + 0x50c, 1);
+ hws[IMX7ULP_CLK_APLL_PFD2] = imx_clk_hw_pfdv2(IMX_PFDV2_IMX7ULP, "apll_pfd2", "apll", base + 0x50c, 2);
+ hws[IMX7ULP_CLK_APLL_PFD3] = imx_clk_hw_pfdv2(IMX_PFDV2_IMX7ULP, "apll_pfd3", "apll", base + 0x50c, 3);
/* SPLL PFDs */
- hws[IMX7ULP_CLK_SPLL_PFD0] = imx_clk_hw_pfdv2("spll_pfd0", "spll", base + 0x60C, 0);
- hws[IMX7ULP_CLK_SPLL_PFD1] = imx_clk_hw_pfdv2("spll_pfd1", "spll", base + 0x60C, 1);
- hws[IMX7ULP_CLK_SPLL_PFD2] = imx_clk_hw_pfdv2("spll_pfd2", "spll", base + 0x60C, 2);
- hws[IMX7ULP_CLK_SPLL_PFD3] = imx_clk_hw_pfdv2("spll_pfd3", "spll", base + 0x60C, 3);
+ hws[IMX7ULP_CLK_SPLL_PFD0] = imx_clk_hw_pfdv2(IMX_PFDV2_IMX7ULP, "spll_pfd0", "spll", base + 0x60C, 0);
+ hws[IMX7ULP_CLK_SPLL_PFD1] = imx_clk_hw_pfdv2(IMX_PFDV2_IMX7ULP, "spll_pfd1", "spll", base + 0x60C, 1);
+ hws[IMX7ULP_CLK_SPLL_PFD2] = imx_clk_hw_pfdv2(IMX_PFDV2_IMX7ULP, "spll_pfd2", "spll", base + 0x60C, 2);
+ hws[IMX7ULP_CLK_SPLL_PFD3] = imx_clk_hw_pfdv2(IMX_PFDV2_IMX7ULP, "spll_pfd3", "spll", base + 0x60C, 3);
/* PLL Mux */
hws[IMX7ULP_CLK_APLL_PFD_SEL] = imx_clk_hw_mux_flags("apll_pfd_sel", base + 0x508, 14, 2, apll_pfd_sels, ARRAY_SIZE(apll_pfd_sels), CLK_SET_RATE_PARENT | CLK_SET_PARENT_GATE);
diff --git a/drivers/clk/imx/clk-imx8ulp.c b/drivers/clk/imx/clk-imx8ulp.c
new file mode 100644
index 000000000000..6699437e17b8
--- /dev/null
+++ b/drivers/clk/imx/clk-imx8ulp.c
@@ -0,0 +1,569 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2021 NXP
+ */
+
+#include <dt-bindings/clock/imx8ulp-clock.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
+#include <linux/slab.h>
+
+#include "clk.h"
+
+static const char * const pll_pre_sels[] = { "sosc", "frosc", };
+static const char * const a35_sels[] = { "frosc", "spll2", "sosc", "lvds", };
+static const char * const nic_sels[] = { "frosc", "spll3_pfd0", "sosc", "lvds", };
+static const char * const pcc3_periph_bus_sels[] = { "dummy", "lposc", "sosc_div2",
+ "frosc_div2", "xbar_divbus", "spll3_pfd1_div1",
+ "spll3_pfd0_div2", "spll3_pfd0_div1", };
+static const char * const pcc4_periph_bus_sels[] = { "dummy", "dummy", "lposc",
+ "sosc_div2", "frosc_div2", "xbar_divbus",
+ "spll3_vcodiv", "spll3_pfd0_div1", };
+static const char * const pcc4_periph_plat_sels[] = { "dummy", "sosc_div1", "frosc_div1",
+ "spll3_pfd3_div2", "spll3_pfd3_div1",
+ "spll3_pfd2_div2", "spll3_pfd2_div1",
+ "spll3_pfd1_div2", };
+static const char * const pcc5_periph_bus_sels[] = { "dummy", "dummy", "lposc",
+ "sosc_div2", "frosc_div2", "lpav_bus_clk",
+ "pll4_vcodiv", "pll4_pfd3_div1", };
+static const char * const pcc5_periph_plat_sels[] = { "dummy", "pll4_pfd3_div2", "pll4_pfd2_div2",
+ "pll4_pfd2_div1", "pll4_pfd1_div2",
+ "pll4_pfd1_div1", "pll4_pfd0_div2",
+ "pll4_pfd0_div1", };
+static const char * const hifi_sels[] = { "frosc", "pll4", "pll4_pfd0", "sosc",
+ "lvds", "dummy", "dummy", "dummy", };
+static const char * const ddr_sels[] = { "frosc", "pll4_pfd1", "sosc", "lvds",
+ "pll4", "pll4", "pll4", "pll4", };
+static const char * const lpav_sels[] = { "frosc", "pll4_pfd1", "sosc", "lvds", };
+static const char * const sai45_sels[] = { "spll3_pfd1_div1", "aud_clk1", "aud_clk2", "sosc", };
+static const char * const sai67_sels[] = { "spll1_pfd2_div", "spll3_pfd1_div1", "aud_clk0", "aud_clk1", "aud_clk2", "sosc", "dummy", "dummy", };
+static const char * const aud_clk1_sels[] = { "ext_aud_mclk2", "sai4_rx_bclk", "sai4_tx_bclk", "sai5_rx_bclk", "sai5_tx_bclk", "dummy", "dummy", "dummy", };
+static const char * const aud_clk2_sels[] = { "ext_aud_mclk3", "sai6_rx_bclk", "sai6_tx_bclk", "sai7_rx_bclk", "sai7_tx_bclk", "spdif_rx", "dummy", "dummy", };
+static const char * const enet_ts_sels[] = { "ext_rmii_clk", "ext_ts_clk", "rosc", "ext_aud_mclk", "sosc", "dummy", "dummy", "dummy"};
+static const char * const xbar_divbus[] = { "xbar_divbus" };
+static const char * const nic_per_divplat[] = { "nic_per_divplat" };
+static const char * const lpav_axi_div[] = { "lpav_axi_div" };
+static const char * const lpav_bus_div[] = { "lpav_bus_div" };
+
+struct pcc_reset_dev {
+ void __iomem *base;
+ struct reset_controller_dev rcdev;
+ const u32 *resets;
+ /* Set to imx_ccm_lock to protect register access shared with clock control */
+ spinlock_t *lock;
+};
+
+#define PCC_SW_RST BIT(28)
+#define to_pcc_reset_dev(_rcdev) container_of(_rcdev, struct pcc_reset_dev, rcdev)
+
+static const u32 pcc3_resets[] = {
+ 0xa8, 0xac, 0xc8, 0xcc, 0xd0,
+ 0xd4, 0xd8, 0xdc, 0xe0, 0xe4,
+ 0xe8, 0xec, 0xf0
+};
+
+static const u32 pcc4_resets[] = {
+ 0x4, 0x8, 0xc, 0x10, 0x14,
+ 0x18, 0x1c, 0x20, 0x24, 0x34,
+ 0x38, 0x3c, 0x40, 0x44, 0x48,
+ 0x4c, 0x54
+};
+
+static const u32 pcc5_resets[] = {
+ 0xa0, 0xa4, 0xa8, 0xac, 0xb0,
+ 0xb4, 0xbc, 0xc0, 0xc8, 0xcc,
+ 0xd0, 0xf0, 0xf4, 0xf8
+};
+
+static int imx8ulp_pcc_assert(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ struct pcc_reset_dev *pcc_reset = to_pcc_reset_dev(rcdev);
+ u32 offset = pcc_reset->resets[id];
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(pcc_reset->lock, flags);
+
+ val = readl(pcc_reset->base + offset);
+ val &= ~PCC_SW_RST;
+ writel(val, pcc_reset->base + offset);
+
+ spin_unlock_irqrestore(pcc_reset->lock, flags);
+
+ return 0;
+}
+
+static int imx8ulp_pcc_deassert(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ struct pcc_reset_dev *pcc_reset = to_pcc_reset_dev(rcdev);
+ u32 offset = pcc_reset->resets[id];
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(pcc_reset->lock, flags);
+
+ val = readl(pcc_reset->base + offset);
+ val |= PCC_SW_RST;
+ writel(val, pcc_reset->base + offset);
+
+ spin_unlock_irqrestore(pcc_reset->lock, flags);
+
+ return 0;
+}
+
+static const struct reset_control_ops imx8ulp_pcc_reset_ops = {
+ .assert = imx8ulp_pcc_assert,
+ .deassert = imx8ulp_pcc_deassert,
+};
+
+static int imx8ulp_pcc_reset_init(struct platform_device *pdev, void __iomem *base,
+ const u32 *resets, unsigned int nr_resets)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct pcc_reset_dev *pcc_reset;
+
+ pcc_reset = devm_kzalloc(dev, sizeof(*pcc_reset), GFP_KERNEL);
+ if (!pcc_reset)
+ return -ENOMEM;
+
+ pcc_reset->base = base;
+ pcc_reset->lock = &imx_ccm_lock;
+ pcc_reset->resets = resets;
+ pcc_reset->rcdev.owner = THIS_MODULE;
+ pcc_reset->rcdev.nr_resets = nr_resets;
+ pcc_reset->rcdev.ops = &imx8ulp_pcc_reset_ops;
+ pcc_reset->rcdev.of_node = np;
+
+ return devm_reset_controller_register(dev, &pcc_reset->rcdev);
+}
+
+static int imx8ulp_clk_cgc1_init(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct clk_hw_onecell_data *clk_data;
+ struct clk_hw **clks;
+ void __iomem *base;
+
+ clk_data = devm_kzalloc(dev, struct_size(clk_data, hws, IMX8ULP_CLK_CGC1_END),
+ GFP_KERNEL);
+ if (!clk_data)
+ return -ENOMEM;
+
+ clk_data->num = IMX8ULP_CLK_CGC1_END;
+ clks = clk_data->hws;
+
+ clks[IMX8ULP_CLK_DUMMY] = imx_clk_hw_fixed("dummy", 0);
+
+ /* CGC1 */
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (WARN_ON(IS_ERR(base)))
+ return PTR_ERR(base);
+
+ clks[IMX8ULP_CLK_SPLL2_PRE_SEL] = imx_clk_hw_mux_flags("spll2_pre_sel", base + 0x510, 0, 1, pll_pre_sels, ARRAY_SIZE(pll_pre_sels), CLK_SET_PARENT_GATE);
+ clks[IMX8ULP_CLK_SPLL3_PRE_SEL] = imx_clk_hw_mux_flags("spll3_pre_sel", base + 0x610, 0, 1, pll_pre_sels, ARRAY_SIZE(pll_pre_sels), CLK_SET_PARENT_GATE);
+
+ clks[IMX8ULP_CLK_SPLL2] = imx_clk_hw_pllv4(IMX_PLLV4_IMX8ULP, "spll2", "spll2_pre_sel", base + 0x500);
+ clks[IMX8ULP_CLK_SPLL3] = imx_clk_hw_pllv4(IMX_PLLV4_IMX8ULP, "spll3", "spll3_pre_sel", base + 0x600);
+ clks[IMX8ULP_CLK_SPLL3_VCODIV] = imx_clk_hw_divider("spll3_vcodiv", "spll3", base + 0x604, 0, 6);
+
+ clks[IMX8ULP_CLK_SPLL3_PFD0] = imx_clk_hw_pfdv2(IMX_PFDV2_IMX8ULP, "spll3_pfd0", "spll3_vcodiv", base + 0x614, 0);
+ clks[IMX8ULP_CLK_SPLL3_PFD1] = imx_clk_hw_pfdv2(IMX_PFDV2_IMX8ULP, "spll3_pfd1", "spll3_vcodiv", base + 0x614, 1);
+ clks[IMX8ULP_CLK_SPLL3_PFD2] = imx_clk_hw_pfdv2(IMX_PFDV2_IMX8ULP, "spll3_pfd2", "spll3_vcodiv", base + 0x614, 2);
+ clks[IMX8ULP_CLK_SPLL3_PFD3] = imx_clk_hw_pfdv2(IMX_PFDV2_IMX8ULP, "spll3_pfd3", "spll3_vcodiv", base + 0x614, 3);
+
+ clks[IMX8ULP_CLK_SPLL3_PFD0_DIV1_GATE] = imx_clk_hw_gate_dis("spll3_pfd0_div1_gate", "spll3_pfd0", base + 0x608, 7);
+ clks[IMX8ULP_CLK_SPLL3_PFD0_DIV2_GATE] = imx_clk_hw_gate_dis("spll3_pfd0_div2_gate", "spll3_pfd0", base + 0x608, 15);
+ clks[IMX8ULP_CLK_SPLL3_PFD1_DIV1_GATE] = imx_clk_hw_gate_dis("spll3_pfd1_div1_gate", "spll3_pfd1", base + 0x608, 23);
+ clks[IMX8ULP_CLK_SPLL3_PFD1_DIV2_GATE] = imx_clk_hw_gate_dis("spll3_pfd1_div2_gate", "spll3_pfd1", base + 0x608, 31);
+ clks[IMX8ULP_CLK_SPLL3_PFD2_DIV1_GATE] = imx_clk_hw_gate_dis("spll3_pfd2_div1_gate", "spll3_pfd2", base + 0x60c, 7);
+ clks[IMX8ULP_CLK_SPLL3_PFD2_DIV2_GATE] = imx_clk_hw_gate_dis("spll3_pfd2_div2_gate", "spll3_pfd2", base + 0x60c, 15);
+ clks[IMX8ULP_CLK_SPLL3_PFD3_DIV1_GATE] = imx_clk_hw_gate_dis("spll3_pfd3_div1_gate", "spll3_pfd3", base + 0x60c, 23);
+ clks[IMX8ULP_CLK_SPLL3_PFD3_DIV2_GATE] = imx_clk_hw_gate_dis("spll3_pfd3_div2_gate", "spll3_pfd3", base + 0x60c, 31);
+ clks[IMX8ULP_CLK_SPLL3_PFD0_DIV1] = imx_clk_hw_divider("spll3_pfd0_div1", "spll3_pfd0_div1_gate", base + 0x608, 0, 6);
+ clks[IMX8ULP_CLK_SPLL3_PFD0_DIV2] = imx_clk_hw_divider("spll3_pfd0_div2", "spll3_pfd0_div2_gate", base + 0x608, 8, 6);
+ clks[IMX8ULP_CLK_SPLL3_PFD1_DIV1] = imx_clk_hw_divider("spll3_pfd1_div1", "spll3_pfd1_div1_gate", base + 0x608, 16, 6);
+ clks[IMX8ULP_CLK_SPLL3_PFD1_DIV2] = imx_clk_hw_divider("spll3_pfd1_div2", "spll3_pfd1_div2_gate", base + 0x608, 24, 6);
+ clks[IMX8ULP_CLK_SPLL3_PFD2_DIV1] = imx_clk_hw_divider("spll3_pfd2_div1", "spll3_pfd2_div1_gate", base + 0x60c, 0, 6);
+ clks[IMX8ULP_CLK_SPLL3_PFD2_DIV2] = imx_clk_hw_divider("spll3_pfd2_div2", "spll3_pfd2_div2_gate", base + 0x60c, 8, 6);
+ clks[IMX8ULP_CLK_SPLL3_PFD3_DIV1] = imx_clk_hw_divider("spll3_pfd3_div1", "spll3_pfd3_div1_gate", base + 0x60c, 16, 6);
+ clks[IMX8ULP_CLK_SPLL3_PFD3_DIV2] = imx_clk_hw_divider("spll3_pfd3_div2", "spll3_pfd3_div2_gate", base + 0x60c, 24, 6);
+
+ clks[IMX8ULP_CLK_A35_SEL] = imx_clk_hw_mux2("a35_sel", base + 0x14, 28, 2, a35_sels, ARRAY_SIZE(a35_sels));
+ clks[IMX8ULP_CLK_A35_DIV] = imx_clk_hw_divider_flags("a35_div", "a35_sel", base + 0x14, 21, 6, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL);
+
+ clks[IMX8ULP_CLK_NIC_SEL] = imx_clk_hw_mux2("nic_sel", base + 0x34, 28, 2, nic_sels, ARRAY_SIZE(nic_sels));
+ clks[IMX8ULP_CLK_NIC_AD_DIVPLAT] = imx_clk_hw_divider_flags("nic_ad_divplat", "nic_sel", base + 0x34, 21, 6, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL);
+ clks[IMX8ULP_CLK_NIC_PER_DIVPLAT] = imx_clk_hw_divider_flags("nic_per_divplat", "nic_ad_divplat", base + 0x34, 14, 6, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL);
+ clks[IMX8ULP_CLK_XBAR_AD_DIVPLAT] = imx_clk_hw_divider_flags("xbar_ad_divplat", "nic_ad_divplat", base + 0x38, 14, 6, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL);
+ clks[IMX8ULP_CLK_XBAR_DIVBUS] = imx_clk_hw_divider_flags("xbar_divbus", "nic_ad_divplat", base + 0x38, 7, 6, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL);
+ clks[IMX8ULP_CLK_XBAR_AD_SLOW] = imx_clk_hw_divider_flags("xbar_ad_slow", "nic_ad_divplat", base + 0x38, 0, 6, CLK_SET_RATE_PARENT | CLK_IS_CRITICAL);
+
+ clks[IMX8ULP_CLK_SOSC_DIV1_GATE] = imx_clk_hw_gate_dis("sosc_div1_gate", "sosc", base + 0x108, 7);
+ clks[IMX8ULP_CLK_SOSC_DIV2_GATE] = imx_clk_hw_gate_dis("sosc_div2_gate", "sosc", base + 0x108, 15);
+ clks[IMX8ULP_CLK_SOSC_DIV3_GATE] = imx_clk_hw_gate_dis("sosc_div3_gate", "sosc", base + 0x108, 23);
+ clks[IMX8ULP_CLK_SOSC_DIV1] = imx_clk_hw_divider("sosc_div1", "sosc_div1_gate", base + 0x108, 0, 6);
+ clks[IMX8ULP_CLK_SOSC_DIV2] = imx_clk_hw_divider("sosc_div2", "sosc_div2_gate", base + 0x108, 8, 6);
+ clks[IMX8ULP_CLK_SOSC_DIV3] = imx_clk_hw_divider("sosc_div3", "sosc_div3_gate", base + 0x108, 16, 6);
+
+ clks[IMX8ULP_CLK_FROSC_DIV1_GATE] = imx_clk_hw_gate_dis("frosc_div1_gate", "frosc", base + 0x208, 7);
+ clks[IMX8ULP_CLK_FROSC_DIV2_GATE] = imx_clk_hw_gate_dis("frosc_div2_gate", "frosc", base + 0x208, 15);
+ clks[IMX8ULP_CLK_FROSC_DIV3_GATE] = imx_clk_hw_gate_dis("frosc_div3_gate", "frosc", base + 0x208, 23);
+ clks[IMX8ULP_CLK_FROSC_DIV1] = imx_clk_hw_divider("frosc_div1", "frosc_div1_gate", base + 0x208, 0, 6);
+ clks[IMX8ULP_CLK_FROSC_DIV2] = imx_clk_hw_divider("frosc_div2", "frosc_div2_gate", base + 0x208, 8, 6);
+ clks[IMX8ULP_CLK_FROSC_DIV3] = imx_clk_hw_divider("frosc_div3", "frosc_div3_gate", base + 0x208, 16, 6);
+ clks[IMX8ULP_CLK_AUD_CLK1] = imx_clk_hw_mux2("aud_clk1", base + 0x900, 0, 3, aud_clk1_sels, ARRAY_SIZE(aud_clk1_sels));
+ clks[IMX8ULP_CLK_SAI4_SEL] = imx_clk_hw_mux2("sai4_sel", base + 0x904, 0, 2, sai45_sels, ARRAY_SIZE(sai45_sels));
+ clks[IMX8ULP_CLK_SAI5_SEL] = imx_clk_hw_mux2("sai5_sel", base + 0x904, 8, 2, sai45_sels, ARRAY_SIZE(sai45_sels));
+ clks[IMX8ULP_CLK_ENET_TS_SEL] = imx_clk_hw_mux2("enet_ts", base + 0x700, 24, 3, enet_ts_sels, ARRAY_SIZE(enet_ts_sels));
+
+ imx_check_clk_hws(clks, clk_data->num);
+
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, clk_data);
+}
+
+static int imx8ulp_clk_cgc2_init(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct clk_hw_onecell_data *clk_data;
+ struct clk_hw **clks;
+ void __iomem *base;
+
+ clk_data = devm_kzalloc(dev, struct_size(clk_data, hws, IMX8ULP_CLK_CGC2_END),
+ GFP_KERNEL);
+ if (!clk_data)
+ return -ENOMEM;
+
+ clk_data->num = IMX8ULP_CLK_CGC2_END;
+ clks = clk_data->hws;
+
+ /* CGC2 */
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (WARN_ON(IS_ERR(base)))
+ return PTR_ERR(base);
+
+ clks[IMX8ULP_CLK_PLL4_PRE_SEL] = imx_clk_hw_mux_flags("pll4_pre_sel", base + 0x610, 0, 1, pll_pre_sels, ARRAY_SIZE(pll_pre_sels), CLK_SET_PARENT_GATE);
+
+ clks[IMX8ULP_CLK_PLL4] = imx_clk_hw_pllv4(IMX_PLLV4_IMX8ULP, "pll4", "pll4_pre_sel", base + 0x600);
+ clks[IMX8ULP_CLK_PLL4_VCODIV] = imx_clk_hw_divider("pll4_vcodiv", "pll4", base + 0x604, 0, 6);
+
+ clks[IMX8ULP_CLK_HIFI_SEL] = imx_clk_hw_mux_flags("hifi_sel", base + 0x14, 28, 3, hifi_sels, ARRAY_SIZE(hifi_sels), CLK_SET_PARENT_GATE);
+ clks[IMX8ULP_CLK_HIFI_DIVCORE] = imx_clk_hw_divider("hifi_core_div", "hifi_sel", base + 0x14, 21, 6);
+ clks[IMX8ULP_CLK_HIFI_DIVPLAT] = imx_clk_hw_divider("hifi_plat_div", "hifi_core_div", base + 0x14, 14, 6);
+
+ clks[IMX8ULP_CLK_DDR_SEL] = imx_clk_hw_mux_flags("ddr_sel", base + 0x40, 28, 3, ddr_sels, ARRAY_SIZE(ddr_sels), CLK_SET_PARENT_GATE);
+ clks[IMX8ULP_CLK_DDR_DIV] = imx_clk_hw_divider_flags("ddr_div", "ddr_sel", base + 0x40, 21, 6, CLK_IS_CRITICAL);
+ clks[IMX8ULP_CLK_LPAV_AXI_SEL] = imx_clk_hw_mux("lpav_sel", base + 0x3c, 28, 2, lpav_sels, ARRAY_SIZE(lpav_sels));
+ clks[IMX8ULP_CLK_LPAV_AXI_DIV] = imx_clk_hw_divider_flags("lpav_axi_div", "lpav_sel", base + 0x3c, 21, 6, CLK_IS_CRITICAL);
+ clks[IMX8ULP_CLK_LPAV_AHB_DIV] = imx_clk_hw_divider_flags("lpav_ahb_div", "lpav_axi_div", base + 0x3c, 14, 6, CLK_IS_CRITICAL);
+ clks[IMX8ULP_CLK_LPAV_BUS_DIV] = imx_clk_hw_divider_flags("lpav_bus_div", "lpav_axi_div", base + 0x3c, 7, 6, CLK_IS_CRITICAL);
+
+ clks[IMX8ULP_CLK_PLL4_PFD0] = imx_clk_hw_pfdv2(IMX_PFDV2_IMX8ULP, "pll4_pfd0", "pll4_vcodiv", base + 0x614, 0);
+ clks[IMX8ULP_CLK_PLL4_PFD1] = imx_clk_hw_pfdv2(IMX_PFDV2_IMX8ULP, "pll4_pfd1", "pll4_vcodiv", base + 0x614, 1);
+ clks[IMX8ULP_CLK_PLL4_PFD2] = imx_clk_hw_pfdv2(IMX_PFDV2_IMX8ULP, "pll4_pfd2", "pll4_vcodiv", base + 0x614, 2);
+ clks[IMX8ULP_CLK_PLL4_PFD3] = imx_clk_hw_pfdv2(IMX_PFDV2_IMX8ULP, "pll4_pfd3", "pll4_vcodiv", base + 0x614, 3);
+
+ clks[IMX8ULP_CLK_PLL4_PFD0_DIV1_GATE] = imx_clk_hw_gate_dis("pll4_pfd0_div1_gate", "pll4_pfd0", base + 0x608, 7);
+ clks[IMX8ULP_CLK_PLL4_PFD0_DIV2_GATE] = imx_clk_hw_gate_dis("pll4_pfd0_div2_gate", "pll4_pfd0", base + 0x608, 15);
+ clks[IMX8ULP_CLK_PLL4_PFD1_DIV1_GATE] = imx_clk_hw_gate_dis("pll4_pfd1_div1_gate", "pll4_pfd1", base + 0x608, 23);
+ clks[IMX8ULP_CLK_PLL4_PFD1_DIV2_GATE] = imx_clk_hw_gate_dis("pll4_pfd1_div2_gate", "pll4_pfd1", base + 0x608, 31);
+ clks[IMX8ULP_CLK_PLL4_PFD2_DIV1_GATE] = imx_clk_hw_gate_dis("pll4_pfd2_div1_gate", "pll4_pfd2", base + 0x60c, 7);
+ clks[IMX8ULP_CLK_PLL4_PFD2_DIV2_GATE] = imx_clk_hw_gate_dis("pll4_pfd2_div2_gate", "pll4_pfd2", base + 0x60c, 15);
+ clks[IMX8ULP_CLK_PLL4_PFD3_DIV1_GATE] = imx_clk_hw_gate_dis("pll4_pfd3_div1_gate", "pll4_pfd3", base + 0x60c, 23);
+ clks[IMX8ULP_CLK_PLL4_PFD3_DIV2_GATE] = imx_clk_hw_gate_dis("pll4_pfd3_div2_gate", "pll4_pfd3", base + 0x60c, 31);
+ clks[IMX8ULP_CLK_PLL4_PFD0_DIV1] = imx_clk_hw_divider("pll4_pfd0_div1", "pll4_pfd0_div1_gate", base + 0x608, 0, 6);
+ clks[IMX8ULP_CLK_PLL4_PFD0_DIV2] = imx_clk_hw_divider("pll4_pfd0_div2", "pll4_pfd0_div2_gate", base + 0x608, 8, 6);
+ clks[IMX8ULP_CLK_PLL4_PFD1_DIV1] = imx_clk_hw_divider("pll4_pfd1_div1", "pll4_pfd1_div1_gate", base + 0x608, 16, 6);
+ clks[IMX8ULP_CLK_PLL4_PFD1_DIV2] = imx_clk_hw_divider("pll4_pfd1_div2", "pll4_pfd1_div2_gate", base + 0x608, 24, 6);
+ clks[IMX8ULP_CLK_PLL4_PFD2_DIV1] = imx_clk_hw_divider("pll4_pfd2_div1", "pll4_pfd2_div1_gate", base + 0x60c, 0, 6);
+ clks[IMX8ULP_CLK_PLL4_PFD2_DIV2] = imx_clk_hw_divider("pll4_pfd2_div2", "pll4_pfd2_div2_gate", base + 0x60c, 8, 6);
+ clks[IMX8ULP_CLK_PLL4_PFD3_DIV1] = imx_clk_hw_divider("pll4_pfd3_div1", "pll4_pfd3_div1_gate", base + 0x60c, 16, 6);
+ clks[IMX8ULP_CLK_PLL4_PFD3_DIV2] = imx_clk_hw_divider("pll4_pfd3_div2", "pll4_pfd3_div2_gate", base + 0x60c, 24, 6);
+
+ clks[IMX8ULP_CLK_CGC2_SOSC_DIV1_GATE] = imx_clk_hw_gate_dis("cgc2_sosc_div1_gate", "sosc", base + 0x108, 7);
+ clks[IMX8ULP_CLK_CGC2_SOSC_DIV2_GATE] = imx_clk_hw_gate_dis("cgc2_sosc_div2_gate", "sosc", base + 0x108, 15);
+ clks[IMX8ULP_CLK_CGC2_SOSC_DIV3_GATE] = imx_clk_hw_gate_dis("cgc2_sosc_div3_gate", "sosc", base + 0x108, 23);
+ clks[IMX8ULP_CLK_CGC2_SOSC_DIV1] = imx_clk_hw_divider("cgc2_sosc_div1", "cgc2_sosc_div1_gate", base + 0x108, 0, 6);
+ clks[IMX8ULP_CLK_CGC2_SOSC_DIV2] = imx_clk_hw_divider("cgc2_sosc_div2", "cgc2_sosc_div2_gate", base + 0x108, 8, 6);
+ clks[IMX8ULP_CLK_CGC2_SOSC_DIV3] = imx_clk_hw_divider("cgc2_sosc_div3", "cgc2_sosc_div3_gate", base + 0x108, 16, 6);
+
+ clks[IMX8ULP_CLK_CGC2_FROSC_DIV1_GATE] = imx_clk_hw_gate_dis("cgc2_frosc_div1_gate", "frosc", base + 0x208, 7);
+ clks[IMX8ULP_CLK_CGC2_FROSC_DIV2_GATE] = imx_clk_hw_gate_dis("cgc2_frosc_div2_gate", "frosc", base + 0x208, 15);
+ clks[IMX8ULP_CLK_CGC2_FROSC_DIV3_GATE] = imx_clk_hw_gate_dis("cgc2_frosc_div3_gate", "frosc", base + 0x208, 23);
+ clks[IMX8ULP_CLK_CGC2_FROSC_DIV1] = imx_clk_hw_divider("cgc2_frosc_div1", "cgc2_frosc_div1_gate", base + 0x208, 0, 6);
+ clks[IMX8ULP_CLK_CGC2_FROSC_DIV2] = imx_clk_hw_divider("cgc2_frosc_div2", "cgc2_frosc_div2_gate", base + 0x208, 8, 6);
+ clks[IMX8ULP_CLK_CGC2_FROSC_DIV3] = imx_clk_hw_divider("cgc2_frosc_div3", "cgc2_frosc_div3_gate", base + 0x208, 16, 6);
+ clks[IMX8ULP_CLK_AUD_CLK2] = imx_clk_hw_mux2("aud_clk2", base + 0x900, 0, 3, aud_clk2_sels, ARRAY_SIZE(aud_clk2_sels));
+ clks[IMX8ULP_CLK_SAI6_SEL] = imx_clk_hw_mux2("sai6_sel", base + 0x904, 0, 3, sai67_sels, ARRAY_SIZE(sai67_sels));
+ clks[IMX8ULP_CLK_SAI7_SEL] = imx_clk_hw_mux2("sai7_sel", base + 0x904, 8, 3, sai67_sels, ARRAY_SIZE(sai67_sels));
+ clks[IMX8ULP_CLK_SPDIF_SEL] = imx_clk_hw_mux2("spdif_sel", base + 0x910, 0, 3, sai67_sels, ARRAY_SIZE(sai67_sels));
+ clks[IMX8ULP_CLK_DSI_PHY_REF] = imx_clk_hw_fixed("dsi_phy_ref", 24000000);
+
+ imx_check_clk_hws(clks, clk_data->num);
+
+ return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, clk_data);
+}
+
+static int imx8ulp_clk_pcc3_init(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct clk_hw_onecell_data *clk_data;
+ struct clk_hw **clks;
+ void __iomem *base;
+ int ret;
+
+ clk_data = devm_kzalloc(dev, struct_size(clk_data, hws, IMX8ULP_CLK_PCC3_END),
+ GFP_KERNEL);
+ if (!clk_data)
+ return -ENOMEM;
+
+ clk_data->num = IMX8ULP_CLK_PCC3_END;
+ clks = clk_data->hws;
+
+ /* PCC3 */
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (WARN_ON(IS_ERR(base)))
+ return PTR_ERR(base);
+
+ clks[IMX8ULP_CLK_WDOG3] = imx8ulp_clk_hw_composite("wdog3", pcc3_periph_bus_sels, ARRAY_SIZE(pcc3_periph_bus_sels), true, true, true, base + 0xa8, 1);
+ clks[IMX8ULP_CLK_WDOG4] = imx8ulp_clk_hw_composite("wdog4", pcc3_periph_bus_sels, ARRAY_SIZE(pcc3_periph_bus_sels), true, true, true, base + 0xac, 1);
+ clks[IMX8ULP_CLK_LPIT1] = imx8ulp_clk_hw_composite("lpit1", pcc3_periph_bus_sels, ARRAY_SIZE(pcc3_periph_bus_sels), true, true, true, base + 0xc8, 1);
+ clks[IMX8ULP_CLK_TPM4] = imx8ulp_clk_hw_composite("tpm4", pcc3_periph_bus_sels, ARRAY_SIZE(pcc3_periph_bus_sels), true, true, true, base + 0xcc, 1);
+ clks[IMX8ULP_CLK_TPM5] = imx8ulp_clk_hw_composite("tpm5", pcc3_periph_bus_sels, ARRAY_SIZE(pcc3_periph_bus_sels), true, true, true, base + 0xd0, 1);
+ clks[IMX8ULP_CLK_FLEXIO1] = imx8ulp_clk_hw_composite("flexio1", pcc3_periph_bus_sels, ARRAY_SIZE(pcc3_periph_bus_sels), true, true, true, base + 0xd4, 1);
+ clks[IMX8ULP_CLK_I3C2] = imx8ulp_clk_hw_composite("i3c2", pcc3_periph_bus_sels, ARRAY_SIZE(pcc3_periph_bus_sels), true, true, true, base + 0xd8, 1);
+ clks[IMX8ULP_CLK_LPI2C4] = imx8ulp_clk_hw_composite("lpi2c4", pcc3_periph_bus_sels, ARRAY_SIZE(pcc3_periph_bus_sels), true, true, true, base + 0xdc, 1);
+ clks[IMX8ULP_CLK_LPI2C5] = imx8ulp_clk_hw_composite("lpi2c5", pcc3_periph_bus_sels, ARRAY_SIZE(pcc3_periph_bus_sels), true, true, true, base + 0xe0, 1);
+ clks[IMX8ULP_CLK_LPUART4] = imx8ulp_clk_hw_composite("lpuart4", pcc3_periph_bus_sels, ARRAY_SIZE(pcc3_periph_bus_sels), true, true, true, base + 0xe4, 1);
+ clks[IMX8ULP_CLK_LPUART5] = imx8ulp_clk_hw_composite("lpuart5", pcc3_periph_bus_sels, ARRAY_SIZE(pcc3_periph_bus_sels), true, true, true, base + 0xe8, 1);
+ clks[IMX8ULP_CLK_LPSPI4] = imx8ulp_clk_hw_composite("lpspi4", pcc3_periph_bus_sels, ARRAY_SIZE(pcc3_periph_bus_sels), true, true, true, base + 0xec, 1);
+ clks[IMX8ULP_CLK_LPSPI5] = imx8ulp_clk_hw_composite("lpspi5", pcc3_periph_bus_sels, ARRAY_SIZE(pcc3_periph_bus_sels), true, true, true, base + 0xf0, 1);
+
+ clks[IMX8ULP_CLK_DMA1_MP] = imx_clk_hw_gate("pcc_dma1_mp", "xbar_ad_divplat", base + 0x4, 30);
+ clks[IMX8ULP_CLK_DMA1_CH0] = imx_clk_hw_gate("pcc_dma1_ch0", "xbar_ad_divplat", base + 0x8, 30);
+ clks[IMX8ULP_CLK_DMA1_CH1] = imx_clk_hw_gate("pcc_dma1_ch1", "xbar_ad_divplat", base + 0xc, 30);
+ clks[IMX8ULP_CLK_DMA1_CH2] = imx_clk_hw_gate("pcc_dma1_ch2", "xbar_ad_divplat", base + 0x10, 30);
+ clks[IMX8ULP_CLK_DMA1_CH3] = imx_clk_hw_gate("pcc_dma1_ch3", "xbar_ad_divplat", base + 0x14, 30);
+ clks[IMX8ULP_CLK_DMA1_CH4] = imx_clk_hw_gate("pcc_dma1_ch4", "xbar_ad_divplat", base + 0x18, 30);
+ clks[IMX8ULP_CLK_DMA1_CH5] = imx_clk_hw_gate("pcc_dma1_ch5", "xbar_ad_divplat", base + 0x1c, 30);
+ clks[IMX8ULP_CLK_DMA1_CH6] = imx_clk_hw_gate("pcc_dma1_ch6", "xbar_ad_divplat", base + 0x20, 30);
+ clks[IMX8ULP_CLK_DMA1_CH7] = imx_clk_hw_gate("pcc_dma1_ch7", "xbar_ad_divplat", base + 0x24, 30);
+ clks[IMX8ULP_CLK_DMA1_CH8] = imx_clk_hw_gate("pcc_dma1_ch8", "xbar_ad_divplat", base + 0x28, 30);
+ clks[IMX8ULP_CLK_DMA1_CH9] = imx_clk_hw_gate("pcc_dma1_ch9", "xbar_ad_divplat", base + 0x2c, 30);
+ clks[IMX8ULP_CLK_DMA1_CH10] = imx_clk_hw_gate("pcc_dma1_ch10", "xbar_ad_divplat", base + 0x30, 30);
+ clks[IMX8ULP_CLK_DMA1_CH11] = imx_clk_hw_gate("pcc_dma1_ch11", "xbar_ad_divplat", base + 0x34, 30);
+ clks[IMX8ULP_CLK_DMA1_CH12] = imx_clk_hw_gate("pcc_dma1_ch12", "xbar_ad_divplat", base + 0x38, 30);
+ clks[IMX8ULP_CLK_DMA1_CH13] = imx_clk_hw_gate("pcc_dma1_ch13", "xbar_ad_divplat", base + 0x3c, 30);
+ clks[IMX8ULP_CLK_DMA1_CH14] = imx_clk_hw_gate("pcc_dma1_ch14", "xbar_ad_divplat", base + 0x40, 30);
+ clks[IMX8ULP_CLK_DMA1_CH15] = imx_clk_hw_gate("pcc_dma1_ch15", "xbar_ad_divplat", base + 0x44, 30);
+ clks[IMX8ULP_CLK_DMA1_CH16] = imx_clk_hw_gate("pcc_dma1_ch16", "xbar_ad_divplat", base + 0x48, 30);
+ clks[IMX8ULP_CLK_DMA1_CH17] = imx_clk_hw_gate("pcc_dma1_ch17", "xbar_ad_divplat", base + 0x4c, 30);
+ clks[IMX8ULP_CLK_DMA1_CH18] = imx_clk_hw_gate("pcc_dma1_ch18", "xbar_ad_divplat", base + 0x50, 30);
+ clks[IMX8ULP_CLK_DMA1_CH19] = imx_clk_hw_gate("pcc_dma1_ch19", "xbar_ad_divplat", base + 0x54, 30);
+ clks[IMX8ULP_CLK_DMA1_CH20] = imx_clk_hw_gate("pcc_dma1_ch20", "xbar_ad_divplat", base + 0x58, 30);
+ clks[IMX8ULP_CLK_DMA1_CH21] = imx_clk_hw_gate("pcc_dma1_ch21", "xbar_ad_divplat", base + 0x5c, 30);
+ clks[IMX8ULP_CLK_DMA1_CH22] = imx_clk_hw_gate("pcc_dma1_ch22", "xbar_ad_divplat", base + 0x60, 30);
+ clks[IMX8ULP_CLK_DMA1_CH23] = imx_clk_hw_gate("pcc_dma1_ch23", "xbar_ad_divplat", base + 0x64, 30);
+ clks[IMX8ULP_CLK_DMA1_CH24] = imx_clk_hw_gate("pcc_dma1_ch24", "xbar_ad_divplat", base + 0x68, 30);
+ clks[IMX8ULP_CLK_DMA1_CH25] = imx_clk_hw_gate("pcc_dma1_ch25", "xbar_ad_divplat", base + 0x6c, 30);
+ clks[IMX8ULP_CLK_DMA1_CH26] = imx_clk_hw_gate("pcc_dma1_ch26", "xbar_ad_divplat", base + 0x70, 30);
+ clks[IMX8ULP_CLK_DMA1_CH27] = imx_clk_hw_gate("pcc_dma1_ch27", "xbar_ad_divplat", base + 0x74, 30);
+ clks[IMX8ULP_CLK_DMA1_CH28] = imx_clk_hw_gate("pcc_dma1_ch28", "xbar_ad_divplat", base + 0x78, 30);
+ clks[IMX8ULP_CLK_DMA1_CH29] = imx_clk_hw_gate("pcc_dma1_ch29", "xbar_ad_divplat", base + 0x7c, 30);
+ clks[IMX8ULP_CLK_DMA1_CH30] = imx_clk_hw_gate("pcc_dma1_ch30", "xbar_ad_divplat", base + 0x80, 30);
+ clks[IMX8ULP_CLK_DMA1_CH31] = imx_clk_hw_gate("pcc_dma1_ch31", "xbar_ad_divplat", base + 0x84, 30);
+ clks[IMX8ULP_CLK_MU0_B] = imx_clk_hw_gate("mu0_b", "xbar_ad_divplat", base + 0x88, 30);
+ clks[IMX8ULP_CLK_MU3_A] = imx_clk_hw_gate("mu3_a", "xbar_ad_divplat", base + 0x8c, 30);
+
+ imx_check_clk_hws(clks, clk_data->num);
+
+ ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, clk_data);
+ if (ret)
+ return ret;
+
+ imx_register_uart_clocks(1);
+
+ /* register the pcc3 reset controller */
+ return imx8ulp_pcc_reset_init(pdev, base, pcc3_resets, ARRAY_SIZE(pcc3_resets));
+}
+
+static int imx8ulp_clk_pcc4_init(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct clk_hw_onecell_data *clk_data;
+ struct clk_hw **clks;
+ void __iomem *base;
+ int ret;
+
+ clk_data = devm_kzalloc(dev, struct_size(clk_data, hws, IMX8ULP_CLK_PCC4_END),
+ GFP_KERNEL);
+ if (!clk_data)
+ return -ENOMEM;
+
+ clk_data->num = IMX8ULP_CLK_PCC4_END;
+ clks = clk_data->hws;
+
+ /* PCC4 */
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (WARN_ON(IS_ERR(base)))
+ return PTR_ERR(base);
+
+ clks[IMX8ULP_CLK_FLEXSPI2] = imx8ulp_clk_hw_composite("flexspi2", pcc4_periph_plat_sels, ARRAY_SIZE(pcc4_periph_plat_sels), true, true, true, base + 0x4, 1);
+ clks[IMX8ULP_CLK_TPM6] = imx8ulp_clk_hw_composite("tpm6", pcc4_periph_bus_sels, ARRAY_SIZE(pcc4_periph_bus_sels), true, true, true, base + 0x8, 1);
+ clks[IMX8ULP_CLK_TPM7] = imx8ulp_clk_hw_composite("tpm7", pcc4_periph_bus_sels, ARRAY_SIZE(pcc4_periph_bus_sels), true, true, true, base + 0xc, 1);
+ clks[IMX8ULP_CLK_LPI2C6] = imx8ulp_clk_hw_composite("lpi2c6", pcc4_periph_bus_sels, ARRAY_SIZE(pcc4_periph_bus_sels), true, true, true, base + 0x10, 1);
+ clks[IMX8ULP_CLK_LPI2C7] = imx8ulp_clk_hw_composite("lpi2c7", pcc4_periph_bus_sels, ARRAY_SIZE(pcc4_periph_bus_sels), true, true, true, base + 0x14, 1);
+ clks[IMX8ULP_CLK_LPUART6] = imx8ulp_clk_hw_composite("lpuart6", pcc4_periph_bus_sels, ARRAY_SIZE(pcc4_periph_bus_sels), true, true, true, base + 0x18, 1);
+ clks[IMX8ULP_CLK_LPUART7] = imx8ulp_clk_hw_composite("lpuart7", pcc4_periph_bus_sels, ARRAY_SIZE(pcc4_periph_bus_sels), true, true, true, base + 0x1c, 1);
+ clks[IMX8ULP_CLK_SAI4] = imx8ulp_clk_hw_composite("sai4", xbar_divbus, 1, false, false, true, base + 0x20, 1); /* sai ipg, NOT from sai sel */
+ clks[IMX8ULP_CLK_SAI5] = imx8ulp_clk_hw_composite("sai5", xbar_divbus, 1, false, false, true, base + 0x24, 1); /* sai ipg */
+ clks[IMX8ULP_CLK_PCTLE] = imx_clk_hw_gate("pctle", "xbar_divbus", base + 0x28, 30);
+ clks[IMX8ULP_CLK_PCTLF] = imx_clk_hw_gate("pctlf", "xbar_divbus", base + 0x2c, 30);
+ clks[IMX8ULP_CLK_USDHC0] = imx8ulp_clk_hw_composite("usdhc0", pcc4_periph_plat_sels, ARRAY_SIZE(pcc4_periph_plat_sels), true, false, true, base + 0x34, 1);
+ clks[IMX8ULP_CLK_USDHC1] = imx8ulp_clk_hw_composite("usdhc1", pcc4_periph_plat_sels, ARRAY_SIZE(pcc4_periph_plat_sels), true, false, true, base + 0x38, 1);
+ clks[IMX8ULP_CLK_USDHC2] = imx8ulp_clk_hw_composite("usdhc2", pcc4_periph_plat_sels, ARRAY_SIZE(pcc4_periph_plat_sels), true, false, true, base + 0x3c, 1);
+ clks[IMX8ULP_CLK_USB0] = imx8ulp_clk_hw_composite("usb0", nic_per_divplat, 1, false, false, true, base + 0x40, 1);
+ clks[IMX8ULP_CLK_USB0_PHY] = imx8ulp_clk_hw_composite("usb0_phy", xbar_divbus, 1, false, false, true, base + 0x44, 1);
+ clks[IMX8ULP_CLK_USB1] = imx8ulp_clk_hw_composite("usb1", nic_per_divplat, 1, false, false, true, base + 0x48, 1);
+ clks[IMX8ULP_CLK_USB1_PHY] = imx8ulp_clk_hw_composite("usb1_phy", xbar_divbus, 1, false, false, true, base + 0x4c, 1);
+ clks[IMX8ULP_CLK_USB_XBAR] = imx_clk_hw_gate("usb_xbar", "xbar_divbus", base + 0x50, 30);
+ clks[IMX8ULP_CLK_ENET] = imx8ulp_clk_hw_composite("enet", nic_per_divplat, 1, false, false, true, base + 0x54, 1);
+ clks[IMX8ULP_CLK_RGPIOE] = imx_clk_hw_gate("rgpioe", "nic_per_divplat", base + 0x78, 30);
+ clks[IMX8ULP_CLK_RGPIOF] = imx_clk_hw_gate("rgpiof", "nic_per_divplat", base + 0x7c, 30);
+
+ imx_check_clk_hws(clks, clk_data->num);
+
+ ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, clk_data);
+ if (ret)
+ return ret;
+
+ /* register the pcc4 reset controller */
+ return imx8ulp_pcc_reset_init(pdev, base, pcc4_resets, ARRAY_SIZE(pcc4_resets));
+
+}
+
+static int imx8ulp_clk_pcc5_init(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct clk_hw_onecell_data *clk_data;
+ struct clk_hw **clks;
+ void __iomem *base;
+ int ret;
+
+ clk_data = devm_kzalloc(dev, struct_size(clk_data, hws, IMX8ULP_CLK_PCC5_END),
+ GFP_KERNEL);
+ if (!clk_data)
+ return -ENOMEM;
+
+ clk_data->num = IMX8ULP_CLK_PCC5_END;
+ clks = clk_data->hws;
+
+ /* PCC5 */
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (WARN_ON(IS_ERR(base)))
+ return PTR_ERR(base);
+
+ clks[IMX8ULP_CLK_DMA2_MP] = imx_clk_hw_gate("pcc_dma2_mp", "lpav_axi_div", base + 0x0, 30);
+ clks[IMX8ULP_CLK_DMA2_CH0] = imx_clk_hw_gate("pcc_dma2_ch0", "lpav_axi_div", base + 0x4, 30);
+ clks[IMX8ULP_CLK_DMA2_CH1] = imx_clk_hw_gate("pcc_dma2_ch1", "lpav_axi_div", base + 0x8, 30);
+ clks[IMX8ULP_CLK_DMA2_CH2] = imx_clk_hw_gate("pcc_dma2_ch2", "lpav_axi_div", base + 0xc, 30);
+ clks[IMX8ULP_CLK_DMA2_CH3] = imx_clk_hw_gate("pcc_dma2_ch3", "lpav_axi_div", base + 0x10, 30);
+ clks[IMX8ULP_CLK_DMA2_CH4] = imx_clk_hw_gate("pcc_dma2_ch4", "lpav_axi_div", base + 0x14, 30);
+ clks[IMX8ULP_CLK_DMA2_CH5] = imx_clk_hw_gate("pcc_dma2_ch5", "lpav_axi_div", base + 0x18, 30);
+ clks[IMX8ULP_CLK_DMA2_CH6] = imx_clk_hw_gate("pcc_dma2_ch6", "lpav_axi_div", base + 0x1c, 30);
+ clks[IMX8ULP_CLK_DMA2_CH7] = imx_clk_hw_gate("pcc_dma2_ch7", "lpav_axi_div", base + 0x20, 30);
+ clks[IMX8ULP_CLK_DMA2_CH8] = imx_clk_hw_gate("pcc_dma2_ch8", "lpav_axi_div", base + 0x24, 30);
+ clks[IMX8ULP_CLK_DMA2_CH9] = imx_clk_hw_gate("pcc_dma2_ch9", "lpav_axi_div", base + 0x28, 30);
+ clks[IMX8ULP_CLK_DMA2_CH10] = imx_clk_hw_gate("pcc_dma2_ch10", "lpav_axi_div", base + 0x2c, 30);
+ clks[IMX8ULP_CLK_DMA2_CH11] = imx_clk_hw_gate("pcc_dma2_ch11", "lpav_axi_div", base + 0x30, 30);
+ clks[IMX8ULP_CLK_DMA2_CH12] = imx_clk_hw_gate("pcc_dma2_ch12", "lpav_axi_div", base + 0x34, 30);
+ clks[IMX8ULP_CLK_DMA2_CH13] = imx_clk_hw_gate("pcc_dma2_ch13", "lpav_axi_div", base + 0x38, 30);
+ clks[IMX8ULP_CLK_DMA2_CH14] = imx_clk_hw_gate("pcc_dma2_ch14", "lpav_axi_div", base + 0x3c, 30);
+ clks[IMX8ULP_CLK_DMA2_CH15] = imx_clk_hw_gate("pcc_dma2_ch15", "lpav_axi_div", base + 0x40, 30);
+ clks[IMX8ULP_CLK_DMA2_CH16] = imx_clk_hw_gate("pcc_dma2_ch16", "lpav_axi_div", base + 0x44, 30);
+ clks[IMX8ULP_CLK_DMA2_CH17] = imx_clk_hw_gate("pcc_dma2_ch17", "lpav_axi_div", base + 0x48, 30);
+ clks[IMX8ULP_CLK_DMA2_CH18] = imx_clk_hw_gate("pcc_dma2_ch18", "lpav_axi_div", base + 0x4c, 30);
+ clks[IMX8ULP_CLK_DMA2_CH19] = imx_clk_hw_gate("pcc_dma2_ch19", "lpav_axi_div", base + 0x50, 30);
+ clks[IMX8ULP_CLK_DMA2_CH20] = imx_clk_hw_gate("pcc_dma2_ch20", "lpav_axi_div", base + 0x54, 30);
+ clks[IMX8ULP_CLK_DMA2_CH21] = imx_clk_hw_gate("pcc_dma2_ch21", "lpav_axi_div", base + 0x58, 30);
+ clks[IMX8ULP_CLK_DMA2_CH22] = imx_clk_hw_gate("pcc_dma2_ch22", "lpav_axi_div", base + 0x5c, 30);
+ clks[IMX8ULP_CLK_DMA2_CH23] = imx_clk_hw_gate("pcc_dma2_ch23", "lpav_axi_div", base + 0x60, 30);
+ clks[IMX8ULP_CLK_DMA2_CH24] = imx_clk_hw_gate("pcc_dma2_ch24", "lpav_axi_div", base + 0x64, 30);
+ clks[IMX8ULP_CLK_DMA2_CH25] = imx_clk_hw_gate("pcc_dma2_ch25", "lpav_axi_div", base + 0x68, 30);
+ clks[IMX8ULP_CLK_DMA2_CH26] = imx_clk_hw_gate("pcc_dma2_ch26", "lpav_axi_div", base + 0x6c, 30);
+ clks[IMX8ULP_CLK_DMA2_CH27] = imx_clk_hw_gate("pcc_dma2_ch27", "lpav_axi_div", base + 0x70, 30);
+ clks[IMX8ULP_CLK_DMA2_CH28] = imx_clk_hw_gate("pcc_dma2_ch28", "lpav_axi_div", base + 0x74, 30);
+ clks[IMX8ULP_CLK_DMA2_CH29] = imx_clk_hw_gate("pcc_dma2_ch29", "lpav_axi_div", base + 0x78, 30);
+ clks[IMX8ULP_CLK_DMA2_CH30] = imx_clk_hw_gate("pcc_dma2_ch30", "lpav_axi_div", base + 0x7c, 30);
+ clks[IMX8ULP_CLK_DMA2_CH31] = imx_clk_hw_gate("pcc_dma2_ch31", "lpav_axi_div", base + 0x80, 30);
+
+ clks[IMX8ULP_CLK_AVD_SIM] = imx_clk_hw_gate("avd_sim", "lpav_bus_div", base + 0x94, 30);
+ clks[IMX8ULP_CLK_TPM8] = imx8ulp_clk_hw_composite("tpm8", pcc5_periph_bus_sels, ARRAY_SIZE(pcc5_periph_bus_sels), true, true, true, base + 0xa0, 1);
+ clks[IMX8ULP_CLK_MU2_B] = imx_clk_hw_gate("mu2_b", "lpav_bus_div", base + 0x84, 30);
+ clks[IMX8ULP_CLK_MU3_B] = imx_clk_hw_gate("mu3_b", "lpav_bus_div", base + 0x88, 30);
+ clks[IMX8ULP_CLK_SAI6] = imx8ulp_clk_hw_composite("sai6", lpav_bus_div, 1, false, false, true, base + 0xa4, 1);
+ clks[IMX8ULP_CLK_SAI7] = imx8ulp_clk_hw_composite("sai7", lpav_bus_div, 1, false, false, true, base + 0xa8, 1);
+ clks[IMX8ULP_CLK_SPDIF] = imx8ulp_clk_hw_composite("spdif", lpav_bus_div, 1, false, false, true, base + 0xac, 1);
+ clks[IMX8ULP_CLK_ISI] = imx8ulp_clk_hw_composite("isi", lpav_axi_div, 1, false, false, true, base + 0xb0, 1);
+ clks[IMX8ULP_CLK_CSI_REGS] = imx8ulp_clk_hw_composite("csi_regs", lpav_bus_div, 1, false, false, true, base + 0xb4, 1);
+ clks[IMX8ULP_CLK_CSI] = imx8ulp_clk_hw_composite("csi", pcc5_periph_plat_sels, ARRAY_SIZE(pcc5_periph_plat_sels), true, true, true, base + 0xbc, 1);
+ clks[IMX8ULP_CLK_DSI] = imx8ulp_clk_hw_composite("dsi", pcc5_periph_plat_sels, ARRAY_SIZE(pcc5_periph_plat_sels), true, true, true, base + 0xc0, 1);
+ clks[IMX8ULP_CLK_WDOG5] = imx8ulp_clk_hw_composite("wdog5", pcc5_periph_bus_sels, ARRAY_SIZE(pcc5_periph_bus_sels), true, true, true, base + 0xc8, 1);
+ clks[IMX8ULP_CLK_EPDC] = imx8ulp_clk_hw_composite("epdc", pcc5_periph_plat_sels, ARRAY_SIZE(pcc5_periph_plat_sels), true, true, true, base + 0xcc, 1);
+ clks[IMX8ULP_CLK_PXP] = imx8ulp_clk_hw_composite("pxp", lpav_axi_div, 1, false, false, true, base + 0xd0, 1);
+ clks[IMX8ULP_CLK_GPU2D] = imx8ulp_clk_hw_composite("gpu2d", pcc5_periph_plat_sels, ARRAY_SIZE(pcc5_periph_plat_sels), true, true, true, base + 0xf0, 1);
+ clks[IMX8ULP_CLK_GPU3D] = imx8ulp_clk_hw_composite("gpu3d", pcc5_periph_plat_sels, ARRAY_SIZE(pcc5_periph_plat_sels), true, true, true, base + 0xf4, 1);
+ clks[IMX8ULP_CLK_DC_NANO] = imx8ulp_clk_hw_composite("dc_nano", pcc5_periph_plat_sels, ARRAY_SIZE(pcc5_periph_plat_sels), true, true, true, base + 0xf8, 1);
+ clks[IMX8ULP_CLK_CSI_CLK_UI] = imx8ulp_clk_hw_composite("csi_clk_ui", pcc5_periph_plat_sels, ARRAY_SIZE(pcc5_periph_plat_sels), true, true, true, base + 0x10c, 1);
+ clks[IMX8ULP_CLK_CSI_CLK_ESC] = imx8ulp_clk_hw_composite("csi_clk_esc", pcc5_periph_plat_sels, ARRAY_SIZE(pcc5_periph_plat_sels), true, true, true, base + 0x110, 1);
+ clks[IMX8ULP_CLK_RGPIOD] = imx_clk_hw_gate("rgpiod", "lpav_axi_div", base + 0x114, 30);
+ clks[IMX8ULP_CLK_DSI_TX_ESC] = imx_clk_hw_fixed_factor("mipi_dsi_tx_esc", "dsi", 1, 4);
+
+ imx_check_clk_hws(clks, clk_data->num);
+
+ ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, clk_data);
+ if (ret)
+ return ret;
+
+ /* register the pcc5 reset controller */
+ return imx8ulp_pcc_reset_init(pdev, base, pcc5_resets, ARRAY_SIZE(pcc5_resets));
+}
+
+static int imx8ulp_clk_probe(struct platform_device *pdev)
+{
+ int (*probe)(struct platform_device *pdev);
+
+ probe = of_device_get_match_data(&pdev->dev);
+
+ if (probe)
+ return probe(pdev);
+
+ return 0;
+}
+
+static const struct of_device_id imx8ulp_clk_dt_ids[] = {
+ { .compatible = "fsl,imx8ulp-pcc3", .data = imx8ulp_clk_pcc3_init },
+ { .compatible = "fsl,imx8ulp-pcc4", .data = imx8ulp_clk_pcc4_init },
+ { .compatible = "fsl,imx8ulp-pcc5", .data = imx8ulp_clk_pcc5_init },
+ { .compatible = "fsl,imx8ulp-cgc2", .data = imx8ulp_clk_cgc2_init },
+ { .compatible = "fsl,imx8ulp-cgc1", .data = imx8ulp_clk_cgc1_init },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, imx8ulp_clk_dt_ids);
+
+static struct platform_driver imx8ulp_clk_driver = {
+ .probe = imx8ulp_clk_probe,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .of_match_table = imx8ulp_clk_dt_ids,
+ },
+};
+module_platform_driver(imx8ulp_clk_driver);
+
+MODULE_AUTHOR("Peng Fan <peng.fan@nxp.com>");
+MODULE_DESCRIPTION("NXP i.MX8ULP clock driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/imx/clk-pfdv2.c b/drivers/clk/imx/clk-pfdv2.c
index 6b744c84278e..6ca53a960eb7 100644
--- a/drivers/clk/imx/clk-pfdv2.c
+++ b/drivers/clk/imx/clk-pfdv2.c
@@ -161,8 +161,17 @@ static int clk_pfdv2_set_rate(struct clk_hw *hw, unsigned long rate,
if (!rate)
return -EINVAL;
- /* PFD can NOT change rate without gating */
- WARN_ON(clk_pfdv2_is_enabled(hw));
+ /*
+ * PFD can NOT change rate without gating.
+ * as the PFDs may enabled in HW by default but no
+ * consumer used it, the enable count is '0', so the
+ * 'SET_RATE_GATE' can NOT help on blocking the set_rate
+ * ops especially for 'assigned-clock-xxx'. In order
+ * to simplify the case, just disable the PFD if it is
+ * enabled in HW but not in SW.
+ */
+ if (clk_pfdv2_is_enabled(hw))
+ clk_pfdv2_disable(hw);
tmp = tmp * 18 + rate / 2;
do_div(tmp, rate);
@@ -191,8 +200,8 @@ static const struct clk_ops clk_pfdv2_ops = {
.is_enabled = clk_pfdv2_is_enabled,
};
-struct clk_hw *imx_clk_hw_pfdv2(const char *name, const char *parent_name,
- void __iomem *reg, u8 idx)
+struct clk_hw *imx_clk_hw_pfdv2(enum imx_pfdv2_type type, const char *name,
+ const char *parent_name, void __iomem *reg, u8 idx)
{
struct clk_init_data init;
struct clk_pfdv2 *pfd;
@@ -214,7 +223,10 @@ struct clk_hw *imx_clk_hw_pfdv2(const char *name, const char *parent_name,
init.ops = &clk_pfdv2_ops;
init.parent_names = &parent_name;
init.num_parents = 1;
- init.flags = CLK_SET_RATE_GATE | CLK_SET_RATE_PARENT;
+ if (type == IMX_PFDV2_IMX7ULP)
+ init.flags = CLK_SET_RATE_GATE | CLK_SET_RATE_PARENT;
+ else
+ init.flags = CLK_SET_RATE_GATE;
pfd->hw.init = &init;
@@ -227,3 +239,4 @@ struct clk_hw *imx_clk_hw_pfdv2(const char *name, const char *parent_name,
return hw;
}
+EXPORT_SYMBOL_GPL(imx_clk_hw_pfdv2);
diff --git a/drivers/clk/imx/clk-pllv4.c b/drivers/clk/imx/clk-pllv4.c
index 8ec703f27417..6e7e34571fc8 100644
--- a/drivers/clk/imx/clk-pllv4.c
+++ b/drivers/clk/imx/clk-pllv4.c
@@ -23,14 +23,17 @@
/* PLL Configuration Register (xPLLCFG) */
#define PLL_CFG_OFFSET 0x08
+#define IMX8ULP_PLL_CFG_OFFSET 0x10
#define BP_PLL_MULT 16
#define BM_PLL_MULT (0x7f << 16)
/* PLL Numerator Register (xPLLNUM) */
#define PLL_NUM_OFFSET 0x10
+#define IMX8ULP_PLL_NUM_OFFSET 0x1c
/* PLL Denominator Register (xPLLDENOM) */
#define PLL_DENOM_OFFSET 0x14
+#define IMX8ULP_PLL_DENOM_OFFSET 0x18
#define MAX_MFD 0x3fffffff
#define DEFAULT_MFD 1000000
@@ -38,6 +41,9 @@
struct clk_pllv4 {
struct clk_hw hw;
void __iomem *base;
+ u32 cfg_offset;
+ u32 num_offset;
+ u32 denom_offset;
};
/* Valid PLL MULT Table */
@@ -72,12 +78,12 @@ static unsigned long clk_pllv4_recalc_rate(struct clk_hw *hw,
u32 mult, mfn, mfd;
u64 temp64;
- mult = readl_relaxed(pll->base + PLL_CFG_OFFSET);
+ mult = readl_relaxed(pll->base + pll->cfg_offset);
mult &= BM_PLL_MULT;
mult >>= BP_PLL_MULT;
- mfn = readl_relaxed(pll->base + PLL_NUM_OFFSET);
- mfd = readl_relaxed(pll->base + PLL_DENOM_OFFSET);
+ mfn = readl_relaxed(pll->base + pll->num_offset);
+ mfd = readl_relaxed(pll->base + pll->denom_offset);
temp64 = parent_rate;
temp64 *= mfn;
do_div(temp64, mfd);
@@ -165,13 +171,13 @@ static int clk_pllv4_set_rate(struct clk_hw *hw, unsigned long rate,
do_div(temp64, parent_rate);
mfn = temp64;
- val = readl_relaxed(pll->base + PLL_CFG_OFFSET);
+ val = readl_relaxed(pll->base + pll->cfg_offset);
val &= ~BM_PLL_MULT;
val |= mult << BP_PLL_MULT;
- writel_relaxed(val, pll->base + PLL_CFG_OFFSET);
+ writel_relaxed(val, pll->base + pll->cfg_offset);
- writel_relaxed(mfn, pll->base + PLL_NUM_OFFSET);
- writel_relaxed(mfd, pll->base + PLL_DENOM_OFFSET);
+ writel_relaxed(mfn, pll->base + pll->num_offset);
+ writel_relaxed(mfd, pll->base + pll->denom_offset);
return 0;
}
@@ -207,8 +213,8 @@ static const struct clk_ops clk_pllv4_ops = {
.is_prepared = clk_pllv4_is_prepared,
};
-struct clk_hw *imx_clk_hw_pllv4(const char *name, const char *parent_name,
- void __iomem *base)
+struct clk_hw *imx_clk_hw_pllv4(enum imx_pllv4_type type, const char *name,
+ const char *parent_name, void __iomem *base)
{
struct clk_pllv4 *pll;
struct clk_hw *hw;
@@ -221,6 +227,16 @@ struct clk_hw *imx_clk_hw_pllv4(const char *name, const char *parent_name,
pll->base = base;
+ if (type == IMX_PLLV4_IMX8ULP) {
+ pll->cfg_offset = IMX8ULP_PLL_CFG_OFFSET;
+ pll->num_offset = IMX8ULP_PLL_NUM_OFFSET;
+ pll->denom_offset = IMX8ULP_PLL_DENOM_OFFSET;
+ } else {
+ pll->cfg_offset = PLL_CFG_OFFSET;
+ pll->num_offset = PLL_NUM_OFFSET;
+ pll->denom_offset = PLL_DENOM_OFFSET;
+ }
+
init.name = name;
init.ops = &clk_pllv4_ops;
init.parent_names = &parent_name;
@@ -238,3 +254,4 @@ struct clk_hw *imx_clk_hw_pllv4(const char *name, const char *parent_name,
return hw;
}
+EXPORT_SYMBOL_GPL(imx_clk_hw_pllv4);
diff --git a/drivers/clk/imx/clk.h b/drivers/clk/imx/clk.h
index e144f983fd8c..819949973db1 100644
--- a/drivers/clk/imx/clk.h
+++ b/drivers/clk/imx/clk.h
@@ -42,6 +42,16 @@ enum imx_pll14xx_type {
PLL_1443X,
};
+enum imx_pllv4_type {
+ IMX_PLLV4_IMX7ULP,
+ IMX_PLLV4_IMX8ULP,
+};
+
+enum imx_pfdv2_type {
+ IMX_PFDV2_IMX7ULP,
+ IMX_PFDV2_IMX8ULP,
+};
+
/* NOTE: Rate table should be kept sorted in descending order. */
struct imx_pll14xx_rate_table {
unsigned int rate;
@@ -88,9 +98,6 @@ extern struct imx_pll14xx_clk imx_1443x_dram_pll;
#define imx_clk_divider(name, parent, reg, shift, width) \
to_clk(imx_clk_hw_divider(name, parent, reg, shift, width))
-#define imx_clk_divider2(name, parent, reg, shift, width) \
- to_clk(imx_clk_hw_divider2(name, parent, reg, shift, width))
-
#define imx_clk_divider_flags(name, parent, reg, shift, width, flags) \
to_clk(imx_clk_hw_divider_flags(name, parent, reg, shift, width, flags))
@@ -103,40 +110,93 @@ extern struct imx_pll14xx_clk imx_1443x_dram_pll;
#define imx_clk_gate2(name, parent, reg, shift) \
to_clk(imx_clk_hw_gate2(name, parent, reg, shift))
+#define imx_clk_gate2_cgr(name, parent, reg, shift, cgr_val) \
+ to_clk(__imx_clk_hw_gate2(name, parent, reg, shift, cgr_val, 0, NULL))
+
#define imx_clk_gate2_flags(name, parent, reg, shift, flags) \
to_clk(imx_clk_hw_gate2_flags(name, parent, reg, shift, flags))
-#define imx_clk_gate2_shared2(name, parent, reg, shift, share_count) \
- to_clk(imx_clk_hw_gate2_shared2(name, parent, reg, shift, share_count))
-
-#define imx_clk_gate3(name, parent, reg, shift) \
- to_clk(imx_clk_hw_gate3(name, parent, reg, shift))
-
-#define imx_clk_gate4(name, parent, reg, shift) \
- to_clk(imx_clk_hw_gate4(name, parent, reg, shift))
-
#define imx_clk_mux(name, reg, shift, width, parents, num_parents) \
to_clk(imx_clk_hw_mux(name, reg, shift, width, parents, num_parents))
+#define imx_clk_mux_flags(name, reg, shift, width, parents, num_parents, flags) \
+ to_clk(imx_clk_hw_mux_flags(name, reg, shift, width, parents, num_parents, flags))
+
+#define imx_clk_mux2_flags(name, reg, shift, width, parents, num_parents, flags) \
+ to_clk(imx_clk_hw_mux2_flags(name, reg, shift, width, parents, num_parents, flags))
+
#define imx_clk_pllv1(type, name, parent, base) \
to_clk(imx_clk_hw_pllv1(type, name, parent, base))
#define imx_clk_pllv2(name, parent, base) \
to_clk(imx_clk_hw_pllv2(name, parent, base))
-#define imx_clk_frac_pll(name, parent_name, base) \
- to_clk(imx_clk_hw_frac_pll(name, parent_name, base))
+#define imx_clk_mux_flags(name, reg, shift, width, parents, num_parents, flags) \
+ to_clk(imx_clk_hw_mux_flags(name, reg, shift, width, parents, num_parents, flags))
+
+#define imx_clk_hw_gate(name, parent, reg, shift) \
+ imx_clk_hw_gate_flags(name, parent, reg, shift, 0)
+
+#define imx_clk_hw_gate2(name, parent, reg, shift) \
+ imx_clk_hw_gate2_flags(name, parent, reg, shift, 0)
+
+#define imx_clk_hw_gate_dis(name, parent, reg, shift) \
+ imx_clk_hw_gate_dis_flags(name, parent, reg, shift, 0)
+
+#define imx_clk_hw_gate_dis_flags(name, parent, reg, shift, flags) \
+ __imx_clk_hw_gate(name, parent, reg, shift, flags, CLK_GATE_SET_TO_DISABLE)
+
+#define imx_clk_hw_gate_flags(name, parent, reg, shift, flags) \
+ __imx_clk_hw_gate(name, parent, reg, shift, flags, 0)
+
+#define imx_clk_hw_gate2_flags(name, parent, reg, shift, flags) \
+ __imx_clk_hw_gate2(name, parent, reg, shift, 0x3, flags, NULL)
+
+#define imx_clk_hw_gate2_shared(name, parent, reg, shift, shared_count) \
+ __imx_clk_hw_gate2(name, parent, reg, shift, 0x3, 0, shared_count)
-#define imx_clk_sscg_pll(name, parent_names, num_parents, parent,\
- bypass1, bypass2, base, flags) \
- to_clk(imx_clk_hw_sscg_pll(name, parent_names, num_parents, parent,\
- bypass1, bypass2, base, flags))
+#define imx_clk_hw_gate2_shared2(name, parent, reg, shift, shared_count) \
+ __imx_clk_hw_gate2(name, parent, reg, shift, 0x3, CLK_OPS_PARENT_ENABLE, shared_count)
-struct clk *imx_clk_pll14xx(const char *name, const char *parent_name,
- void __iomem *base, const struct imx_pll14xx_clk *pll_clk);
+#define imx_clk_hw_gate3(name, parent, reg, shift) \
+ imx_clk_hw_gate3_flags(name, parent, reg, shift, 0)
-#define imx_clk_pll14xx(name, parent_name, base, pll_clk) \
- to_clk(imx_clk_hw_pll14xx(name, parent_name, base, pll_clk))
+#define imx_clk_hw_gate3_flags(name, parent, reg, shift, flags) \
+ __imx_clk_hw_gate(name, parent, reg, shift, flags | CLK_OPS_PARENT_ENABLE, 0)
+
+#define imx_clk_hw_gate4(name, parent, reg, shift) \
+ imx_clk_hw_gate4_flags(name, parent, reg, shift, 0)
+
+#define imx_clk_hw_gate4_flags(name, parent, reg, shift, flags) \
+ imx_clk_hw_gate2_flags(name, parent, reg, shift, flags | CLK_OPS_PARENT_ENABLE)
+
+#define imx_clk_hw_mux2(name, reg, shift, width, parents, num_parents) \
+ imx_clk_hw_mux2_flags(name, reg, shift, width, parents, num_parents, 0)
+
+#define imx_clk_hw_mux(name, reg, shift, width, parents, num_parents) \
+ __imx_clk_hw_mux(name, reg, shift, width, parents, num_parents, 0, 0)
+
+#define imx_clk_hw_mux_flags(name, reg, shift, width, parents, num_parents, flags) \
+ __imx_clk_hw_mux(name, reg, shift, width, parents, num_parents, flags, 0)
+
+#define imx_clk_hw_mux_ldb(name, reg, shift, width, parents, num_parents) \
+ __imx_clk_hw_mux(name, reg, shift, width, parents, num_parents, CLK_SET_RATE_PARENT, CLK_MUX_READ_ONLY)
+
+#define imx_clk_hw_mux2_flags(name, reg, shift, width, parents, num_parents, flags) \
+ __imx_clk_hw_mux(name, reg, shift, width, parents, num_parents, flags | CLK_OPS_PARENT_ENABLE, 0)
+
+#define imx_clk_hw_divider(name, parent, reg, shift, width) \
+ __imx_clk_hw_divider(name, parent, reg, shift, width, CLK_SET_RATE_PARENT)
+
+#define imx_clk_hw_divider2(name, parent, reg, shift, width) \
+ __imx_clk_hw_divider(name, parent, reg, shift, width, \
+ CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE)
+
+#define imx_clk_hw_divider_flags(name, parent, reg, shift, width, flags) \
+ __imx_clk_hw_divider(name, parent, reg, shift, width, flags)
+
+#define imx_clk_hw_pll14xx(name, parent_name, base, pll_clk) \
+ imx_dev_clk_hw_pll14xx(NULL, name, parent_name, base, pll_clk)
struct clk_hw *imx_dev_clk_hw_pll14xx(struct device *dev, const char *name,
const char *parent_name, void __iomem *base,
@@ -191,8 +251,8 @@ struct clk_hw *imx_clk_hw_pllv3(enum imx_pllv3_type type, const char *name,
.kdiv = (_k), \
}
-struct clk_hw *imx_clk_hw_pllv4(const char *name, const char *parent_name,
- void __iomem *base);
+struct clk_hw *imx_clk_hw_pllv4(enum imx_pllv4_type type, const char *name,
+ const char *parent_name, void __iomem *base);
struct clk_hw *clk_hw_register_gate2(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
@@ -215,8 +275,8 @@ struct clk_hw *imx_clk_hw_gate_exclusive(const char *name, const char *parent,
struct clk_hw *imx_clk_hw_pfd(const char *name, const char *parent_name,
void __iomem *reg, u8 idx);
-struct clk_hw *imx_clk_hw_pfdv2(const char *name, const char *parent_name,
- void __iomem *reg, u8 idx);
+struct clk_hw *imx_clk_hw_pfdv2(enum imx_pfdv2_type type, const char *name,
+ const char *parent_name, void __iomem *reg, u8 idx);
struct clk_hw *imx_clk_hw_busy_divider(const char *name, const char *parent_name,
void __iomem *reg, u8 shift, u8 width,
@@ -232,6 +292,12 @@ struct clk_hw *imx7ulp_clk_hw_composite(const char *name,
bool rate_present, bool gate_present,
void __iomem *reg);
+struct clk_hw *imx8ulp_clk_hw_composite(const char *name,
+ const char * const *parent_names,
+ int num_parents, bool mux_present,
+ bool rate_present, bool gate_present,
+ void __iomem *reg, bool has_swrst);
+
struct clk_hw *imx_clk_hw_fixup_divider(const char *name, const char *parent,
void __iomem *reg, u8 shift, u8 width,
void (*fixup)(u32 *val));
@@ -247,27 +313,11 @@ static inline struct clk *to_clk(struct clk_hw *hw)
return hw->clk;
}
-static inline struct clk_hw *imx_clk_hw_pll14xx(const char *name, const char *parent_name,
- void __iomem *base,
- const struct imx_pll14xx_clk *pll_clk)
-{
- return imx_dev_clk_hw_pll14xx(NULL, name, parent_name, base, pll_clk);
-}
-
static inline struct clk_hw *imx_clk_hw_fixed(const char *name, int rate)
{
return clk_hw_register_fixed_rate(NULL, name, NULL, 0, rate);
}
-static inline struct clk_hw *imx_clk_hw_mux_ldb(const char *name, void __iomem *reg,
- u8 shift, u8 width, const char * const *parents,
- int num_parents)
-{
- return clk_hw_register_mux(NULL, name, parents, num_parents,
- CLK_SET_RATE_NO_REPARENT | CLK_SET_RATE_PARENT, reg,
- shift, width, CLK_MUX_READ_ONLY, &imx_ccm_lock);
-}
-
static inline struct clk_hw *imx_clk_hw_fixed_factor(const char *name,
const char *parent, unsigned int mult, unsigned int div)
{
@@ -275,16 +325,7 @@ static inline struct clk_hw *imx_clk_hw_fixed_factor(const char *name,
CLK_SET_RATE_PARENT, mult, div);
}
-static inline struct clk_hw *imx_clk_hw_divider(const char *name,
- const char *parent,
- void __iomem *reg, u8 shift,
- u8 width)
-{
- return clk_hw_register_divider(NULL, name, parent, CLK_SET_RATE_PARENT,
- reg, shift, width, 0, &imx_ccm_lock);
-}
-
-static inline struct clk_hw *imx_clk_hw_divider_flags(const char *name,
+static inline struct clk_hw *__imx_clk_hw_divider(const char *name,
const char *parent,
void __iomem *reg, u8 shift,
u8 width, unsigned long flags)
@@ -293,237 +334,31 @@ static inline struct clk_hw *imx_clk_hw_divider_flags(const char *name,
reg, shift, width, 0, &imx_ccm_lock);
}
-static inline struct clk_hw *imx_clk_hw_divider2(const char *name, const char *parent,
- void __iomem *reg, u8 shift, u8 width)
-{
- return clk_hw_register_divider(NULL, name, parent,
- CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
- reg, shift, width, 0, &imx_ccm_lock);
-}
-
-static inline struct clk *imx_clk_divider2_flags(const char *name,
- const char *parent, void __iomem *reg, u8 shift, u8 width,
- unsigned long flags)
-{
- return clk_register_divider(NULL, name, parent,
- flags | CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
- reg, shift, width, 0, &imx_ccm_lock);
-}
-
-static inline struct clk_hw *imx_clk_hw_gate_flags(const char *name, const char *parent,
- void __iomem *reg, u8 shift, unsigned long flags)
-{
- return clk_hw_register_gate(NULL, name, parent, flags | CLK_SET_RATE_PARENT, reg,
- shift, 0, &imx_ccm_lock);
-}
-
-static inline struct clk_hw *imx_clk_hw_gate(const char *name, const char *parent,
- void __iomem *reg, u8 shift)
-{
- return clk_hw_register_gate(NULL, name, parent, CLK_SET_RATE_PARENT, reg,
- shift, 0, &imx_ccm_lock);
-}
-
-static inline struct clk_hw *imx_dev_clk_hw_gate(struct device *dev, const char *name,
- const char *parent, void __iomem *reg, u8 shift)
-{
- return clk_hw_register_gate(dev, name, parent, CLK_SET_RATE_PARENT, reg,
- shift, 0, &imx_ccm_lock);
-}
-
-static inline struct clk_hw *imx_clk_hw_gate_dis(const char *name, const char *parent,
- void __iomem *reg, u8 shift)
-{
- return clk_hw_register_gate(NULL, name, parent, CLK_SET_RATE_PARENT, reg,
- shift, CLK_GATE_SET_TO_DISABLE, &imx_ccm_lock);
-}
-
-static inline struct clk_hw *imx_clk_hw_gate_dis_flags(const char *name, const char *parent,
- void __iomem *reg, u8 shift, unsigned long flags)
+static inline struct clk_hw *__imx_clk_hw_gate(const char *name, const char *parent,
+ void __iomem *reg, u8 shift,
+ unsigned long flags,
+ unsigned long clk_gate_flags)
{
return clk_hw_register_gate(NULL, name, parent, flags | CLK_SET_RATE_PARENT, reg,
- shift, CLK_GATE_SET_TO_DISABLE, &imx_ccm_lock);
+ shift, clk_gate_flags, &imx_ccm_lock);
}
-static inline struct clk_hw *imx_clk_hw_gate2(const char *name, const char *parent,
- void __iomem *reg, u8 shift)
-{
- return clk_hw_register_gate2(NULL, name, parent, CLK_SET_RATE_PARENT, reg,
- shift, 0x3, 0x3, 0, &imx_ccm_lock, NULL);
-}
-
-static inline struct clk_hw *imx_clk_hw_gate2_flags(const char *name, const char *parent,
- void __iomem *reg, u8 shift, unsigned long flags)
+static inline struct clk_hw *__imx_clk_hw_gate2(const char *name, const char *parent,
+ void __iomem *reg, u8 shift, u8 cgr_val,
+ unsigned long flags,
+ unsigned int *share_count)
{
return clk_hw_register_gate2(NULL, name, parent, flags | CLK_SET_RATE_PARENT, reg,
- shift, 0x3, 0x3, 0, &imx_ccm_lock, NULL);
-}
-
-static inline struct clk_hw *imx_clk_hw_gate2_shared(const char *name,
- const char *parent, void __iomem *reg, u8 shift,
- unsigned int *share_count)
-{
- return clk_hw_register_gate2(NULL, name, parent, CLK_SET_RATE_PARENT, reg,
- shift, 0x3, 0x3, 0, &imx_ccm_lock, share_count);
-}
-
-static inline struct clk_hw *imx_clk_hw_gate2_shared2(const char *name,
- const char *parent, void __iomem *reg, u8 shift,
- unsigned int *share_count)
-{
- return clk_hw_register_gate2(NULL, name, parent, CLK_SET_RATE_PARENT |
- CLK_OPS_PARENT_ENABLE, reg, shift, 0x3, 0x3, 0,
- &imx_ccm_lock, share_count);
-}
-
-static inline struct clk_hw *imx_dev_clk_hw_gate_shared(struct device *dev,
- const char *name, const char *parent,
- void __iomem *reg, u8 shift,
- unsigned int *share_count)
-{
- return clk_hw_register_gate2(NULL, name, parent, CLK_SET_RATE_PARENT |
- CLK_OPS_PARENT_ENABLE, reg, shift, 0x1,
- 0x1, 0, &imx_ccm_lock, share_count);
-}
-
-static inline struct clk *imx_clk_gate2_cgr(const char *name,
- const char *parent, void __iomem *reg, u8 shift, u8 cgr_val)
-{
- return clk_register_gate2(NULL, name, parent, CLK_SET_RATE_PARENT, reg,
- shift, cgr_val, 0x3, 0, &imx_ccm_lock, NULL);
-}
-
-static inline struct clk_hw *imx_clk_hw_gate3(const char *name, const char *parent,
- void __iomem *reg, u8 shift)
-{
- return clk_hw_register_gate(NULL, name, parent,
- CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
- reg, shift, 0, &imx_ccm_lock);
-}
-
-static inline struct clk_hw *imx_clk_hw_gate3_flags(const char *name,
- const char *parent, void __iomem *reg, u8 shift,
- unsigned long flags)
-{
- return clk_hw_register_gate(NULL, name, parent,
- flags | CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
- reg, shift, 0, &imx_ccm_lock);
-}
-
-#define imx_clk_gate3_flags(name, parent, reg, shift, flags) \
- to_clk(imx_clk_hw_gate3_flags(name, parent, reg, shift, flags))
-
-static inline struct clk_hw *imx_clk_hw_gate4(const char *name, const char *parent,
- void __iomem *reg, u8 shift)
-{
- return clk_hw_register_gate2(NULL, name, parent,
- CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
- reg, shift, 0x3, 0x3, 0, &imx_ccm_lock, NULL);
-}
-
-static inline struct clk_hw *imx_clk_hw_gate4_flags(const char *name,
- const char *parent, void __iomem *reg, u8 shift,
- unsigned long flags)
-{
- return clk_hw_register_gate2(NULL, name, parent,
- flags | CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
- reg, shift, 0x3, 0x3, 0, &imx_ccm_lock, NULL);
-}
-
-#define imx_clk_gate4_flags(name, parent, reg, shift, flags) \
- to_clk(imx_clk_hw_gate4_flags(name, parent, reg, shift, flags))
-
-static inline struct clk_hw *imx_clk_hw_mux(const char *name, void __iomem *reg,
- u8 shift, u8 width, const char * const *parents,
- int num_parents)
-{
- return clk_hw_register_mux(NULL, name, parents, num_parents,
- CLK_SET_RATE_NO_REPARENT, reg, shift,
- width, 0, &imx_ccm_lock);
-}
-
-static inline struct clk_hw *imx_dev_clk_hw_mux(struct device *dev,
- const char *name, void __iomem *reg, u8 shift,
- u8 width, const char * const *parents, int num_parents)
-{
- return clk_hw_register_mux(dev, name, parents, num_parents,
- CLK_SET_RATE_NO_REPARENT | CLK_SET_PARENT_GATE,
- reg, shift, width, 0, &imx_ccm_lock);
+ shift, cgr_val, 0x3, 0, &imx_ccm_lock, share_count);
}
-static inline struct clk *imx_clk_mux2(const char *name, void __iomem *reg,
+static inline struct clk_hw *__imx_clk_hw_mux(const char *name, void __iomem *reg,
u8 shift, u8 width, const char * const *parents,
- int num_parents)
-{
- return clk_register_mux(NULL, name, parents, num_parents,
- CLK_SET_RATE_NO_REPARENT | CLK_OPS_PARENT_ENABLE,
- reg, shift, width, 0, &imx_ccm_lock);
-}
-
-static inline struct clk_hw *imx_clk_hw_mux2(const char *name, void __iomem *reg,
- u8 shift, u8 width,
- const char * const *parents,
- int num_parents)
-{
- return clk_hw_register_mux(NULL, name, parents, num_parents,
- CLK_SET_RATE_NO_REPARENT |
- CLK_OPS_PARENT_ENABLE,
- reg, shift, width, 0, &imx_ccm_lock);
-}
-
-static inline struct clk *imx_clk_mux_flags(const char *name,
- void __iomem *reg, u8 shift, u8 width,
- const char * const *parents, int num_parents,
- unsigned long flags)
-{
- return clk_register_mux(NULL, name, parents, num_parents,
- flags | CLK_SET_RATE_NO_REPARENT, reg, shift, width, 0,
- &imx_ccm_lock);
-}
-
-static inline struct clk_hw *imx_clk_hw_mux2_flags(const char *name,
- void __iomem *reg, u8 shift, u8 width,
- const char * const *parents,
- int num_parents, unsigned long flags)
-{
- return clk_hw_register_mux(NULL, name, parents, num_parents,
- flags | CLK_SET_RATE_NO_REPARENT | CLK_OPS_PARENT_ENABLE,
- reg, shift, width, 0, &imx_ccm_lock);
-}
-
-static inline struct clk *imx_clk_mux2_flags(const char *name,
- void __iomem *reg, u8 shift, u8 width,
- const char * const *parents,
- int num_parents, unsigned long flags)
-{
- return clk_register_mux(NULL, name, parents, num_parents,
- flags | CLK_SET_RATE_NO_REPARENT | CLK_OPS_PARENT_ENABLE,
- reg, shift, width, 0, &imx_ccm_lock);
-}
-
-static inline struct clk_hw *imx_clk_hw_mux_flags(const char *name,
- void __iomem *reg, u8 shift,
- u8 width,
- const char * const *parents,
- int num_parents,
- unsigned long flags)
+ int num_parents, unsigned long flags, unsigned long clk_mux_flags)
{
return clk_hw_register_mux(NULL, name, parents, num_parents,
- flags | CLK_SET_RATE_NO_REPARENT,
- reg, shift, width, 0, &imx_ccm_lock);
-}
-
-static inline struct clk_hw *imx_dev_clk_hw_mux_flags(struct device *dev,
- const char *name,
- void __iomem *reg, u8 shift,
- u8 width,
- const char * const *parents,
- int num_parents,
- unsigned long flags)
-{
- return clk_hw_register_mux(dev, name, parents, num_parents,
- flags | CLK_SET_RATE_NO_REPARENT,
- reg, shift, width, 0, &imx_ccm_lock);
+ flags | CLK_SET_RATE_NO_REPARENT, reg, shift,
+ width, clk_mux_flags, &imx_ccm_lock);
}
struct clk_hw *imx_clk_hw_cpu(const char *name, const char *parent_name,
@@ -534,65 +369,55 @@ struct clk_hw *imx_clk_hw_cpu(const char *name, const char *parent_name,
#define IMX_COMPOSITE_BUS BIT(1)
#define IMX_COMPOSITE_FW_MANAGED BIT(2)
-struct clk_hw *imx8m_clk_hw_composite_flags(const char *name,
+#define IMX_COMPOSITE_CLK_FLAGS_DEFAULT \
+ (CLK_SET_RATE_NO_REPARENT | CLK_OPS_PARENT_ENABLE)
+#define IMX_COMPOSITE_CLK_FLAGS_CRITICAL \
+ (IMX_COMPOSITE_CLK_FLAGS_DEFAULT | CLK_IS_CRITICAL)
+#define IMX_COMPOSITE_CLK_FLAGS_GET_RATE_NO_CACHE \
+ (IMX_COMPOSITE_CLK_FLAGS_DEFAULT | CLK_GET_RATE_NOCACHE)
+#define IMX_COMPOSITE_CLK_FLAGS_CRITICAL_GET_RATE_NO_CACHE \
+ (IMX_COMPOSITE_CLK_FLAGS_GET_RATE_NO_CACHE | CLK_IS_CRITICAL)
+
+struct clk_hw *__imx8m_clk_hw_composite(const char *name,
const char * const *parent_names,
int num_parents,
void __iomem *reg,
u32 composite_flags,
unsigned long flags);
+#define _imx8m_clk_hw_composite(name, parent_names, reg, composite_flags, flags) \
+ __imx8m_clk_hw_composite(name, parent_names, \
+ ARRAY_SIZE(parent_names), reg, composite_flags, flags)
+
+#define imx8m_clk_hw_composite(name, parent_names, reg) \
+ _imx8m_clk_hw_composite(name, parent_names, reg, \
+ IMX_COMPOSITE_CORE, IMX_COMPOSITE_CLK_FLAGS_DEFAULT)
+
+#define imx8m_clk_hw_composite_critical(name, parent_names, reg) \
+ _imx8m_clk_hw_composite(name, parent_names, reg, \
+ IMX_COMPOSITE_CORE, IMX_COMPOSITE_CLK_FLAGS_CRITICAL)
+
#define imx8m_clk_hw_composite_bus(name, parent_names, reg) \
- imx8m_clk_hw_composite_flags(name, parent_names, \
- ARRAY_SIZE(parent_names), reg, \
- IMX_COMPOSITE_BUS, \
- CLK_SET_RATE_NO_REPARENT | CLK_OPS_PARENT_ENABLE)
+ _imx8m_clk_hw_composite(name, parent_names, reg, \
+ IMX_COMPOSITE_BUS, IMX_COMPOSITE_CLK_FLAGS_DEFAULT)
#define imx8m_clk_hw_composite_bus_critical(name, parent_names, reg) \
- imx8m_clk_hw_composite_flags(name, parent_names, ARRAY_SIZE(parent_names), reg, \
- IMX_COMPOSITE_BUS, \
- CLK_SET_RATE_NO_REPARENT | CLK_OPS_PARENT_ENABLE | CLK_IS_CRITICAL)
+ _imx8m_clk_hw_composite(name, parent_names, reg, \
+ IMX_COMPOSITE_BUS, IMX_COMPOSITE_CLK_FLAGS_CRITICAL)
#define imx8m_clk_hw_composite_core(name, parent_names, reg) \
- imx8m_clk_hw_composite_flags(name, parent_names, \
- ARRAY_SIZE(parent_names), reg, \
- IMX_COMPOSITE_CORE, \
- CLK_SET_RATE_NO_REPARENT | CLK_OPS_PARENT_ENABLE)
-
-#define imx8m_clk_composite_flags(name, parent_names, num_parents, reg, \
- flags) \
- to_clk(imx8m_clk_hw_composite_flags(name, parent_names, \
- num_parents, reg, 0, flags))
-
-#define __imx8m_clk_hw_composite(name, parent_names, reg, flags) \
- imx8m_clk_hw_composite_flags(name, parent_names, \
- ARRAY_SIZE(parent_names), reg, 0, \
- flags | CLK_SET_RATE_NO_REPARENT | CLK_OPS_PARENT_ENABLE)
-
-#define __imx8m_clk_hw_fw_managed_composite(name, parent_names, reg, flags) \
- imx8m_clk_hw_composite_flags(name, parent_names, \
- ARRAY_SIZE(parent_names), reg, IMX_COMPOSITE_FW_MANAGED, \
- flags | CLK_GET_RATE_NOCACHE | CLK_SET_RATE_NO_REPARENT | CLK_OPS_PARENT_ENABLE)
+ _imx8m_clk_hw_composite(name, parent_names, reg, \
+ IMX_COMPOSITE_CORE, IMX_COMPOSITE_CLK_FLAGS_DEFAULT)
#define imx8m_clk_hw_fw_managed_composite(name, parent_names, reg) \
- __imx8m_clk_hw_fw_managed_composite(name, parent_names, reg, 0)
+ _imx8m_clk_hw_composite(name, parent_names, reg, \
+ IMX_COMPOSITE_FW_MANAGED, \
+ IMX_COMPOSITE_CLK_FLAGS_GET_RATE_NO_CACHE)
#define imx8m_clk_hw_fw_managed_composite_critical(name, parent_names, reg) \
- __imx8m_clk_hw_fw_managed_composite(name, parent_names, reg, CLK_IS_CRITICAL)
-
-#define __imx8m_clk_composite(name, parent_names, reg, flags) \
- to_clk(__imx8m_clk_hw_composite(name, parent_names, reg, flags))
-
-#define imx8m_clk_hw_composite(name, parent_names, reg) \
- __imx8m_clk_hw_composite(name, parent_names, reg, 0)
-
-#define imx8m_clk_composite(name, parent_names, reg) \
- __imx8m_clk_composite(name, parent_names, reg, 0)
-
-#define imx8m_clk_hw_composite_critical(name, parent_names, reg) \
- __imx8m_clk_hw_composite(name, parent_names, reg, CLK_IS_CRITICAL)
-
-#define imx8m_clk_composite_critical(name, parent_names, reg) \
- __imx8m_clk_composite(name, parent_names, reg, CLK_IS_CRITICAL)
+ _imx8m_clk_hw_composite(name, parent_names, reg, \
+ IMX_COMPOSITE_FW_MANAGED, \
+ IMX_COMPOSITE_CLK_FLAGS_CRITICAL_GET_RATE_NO_CACHE)
struct clk_hw *imx_clk_hw_divider_gate(const char *name, const char *parent_name,
unsigned long flags, void __iomem *reg, u8 shift, u8 width,
diff --git a/drivers/clk/mediatek/Kconfig b/drivers/clk/mediatek/Kconfig
index 439b7c8d0d07..3ce6fb04d8ff 100644
--- a/drivers/clk/mediatek/Kconfig
+++ b/drivers/clk/mediatek/Kconfig
@@ -6,7 +6,7 @@ menu "Clock driver for MediaTek SoC"
depends on ARCH_MEDIATEK || COMPILE_TEST
config COMMON_CLK_MEDIATEK
- bool
+ tristate
select RESET_CONTROLLER
help
MediaTek SoCs' clock support.
@@ -204,7 +204,7 @@ config COMMON_CLK_MT6765_MIPI2BSYS
This driver supports MediaTek MT6765 mipi2bsys clocks.
config COMMON_CLK_MT6779
- bool "Clock driver for MediaTek MT6779"
+ tristate "Clock driver for MediaTek MT6779"
depends on (ARCH_MEDIATEK && ARM64) || COMPILE_TEST
select COMMON_CLK_MEDIATEK
default ARCH_MEDIATEK && ARM64
@@ -212,49 +212,49 @@ config COMMON_CLK_MT6779
This driver supports MediaTek MT6779 basic clocks.
config COMMON_CLK_MT6779_MMSYS
- bool "Clock driver for MediaTek MT6779 mmsys"
+ tristate "Clock driver for MediaTek MT6779 mmsys"
depends on COMMON_CLK_MT6779
help
This driver supports MediaTek MT6779 mmsys clocks.
config COMMON_CLK_MT6779_IMGSYS
- bool "Clock driver for MediaTek MT6779 imgsys"
+ tristate "Clock driver for MediaTek MT6779 imgsys"
depends on COMMON_CLK_MT6779
help
This driver supports MediaTek MT6779 imgsys clocks.
config COMMON_CLK_MT6779_IPESYS
- bool "Clock driver for MediaTek MT6779 ipesys"
+ tristate "Clock driver for MediaTek MT6779 ipesys"
depends on COMMON_CLK_MT6779
help
This driver supports MediaTek MT6779 ipesys clocks.
config COMMON_CLK_MT6779_CAMSYS
- bool "Clock driver for MediaTek MT6779 camsys"
+ tristate "Clock driver for MediaTek MT6779 camsys"
depends on COMMON_CLK_MT6779
help
This driver supports MediaTek MT6779 camsys clocks.
config COMMON_CLK_MT6779_VDECSYS
- bool "Clock driver for MediaTek MT6779 vdecsys"
+ tristate "Clock driver for MediaTek MT6779 vdecsys"
depends on COMMON_CLK_MT6779
help
This driver supports MediaTek MT6779 vdecsys clocks.
config COMMON_CLK_MT6779_VENCSYS
- bool "Clock driver for MediaTek MT6779 vencsys"
+ tristate "Clock driver for MediaTek MT6779 vencsys"
depends on COMMON_CLK_MT6779
help
This driver supports MediaTek MT6779 vencsys clocks.
config COMMON_CLK_MT6779_MFGCFG
- bool "Clock driver for MediaTek MT6779 mfgcfg"
+ tristate "Clock driver for MediaTek MT6779 mfgcfg"
depends on COMMON_CLK_MT6779
help
This driver supports MediaTek MT6779 mfgcfg clocks.
config COMMON_CLK_MT6779_AUDSYS
- bool "Clock driver for Mediatek MT6779 audsys"
+ tristate "Clock driver for Mediatek MT6779 audsys"
depends on COMMON_CLK_MT6779
help
This driver supports Mediatek MT6779 audsys clocks.
@@ -575,6 +575,14 @@ config COMMON_CLK_MT8192_VENCSYS
help
This driver supports MediaTek MT8192 vencsys clocks.
+config COMMON_CLK_MT8195
+ bool "Clock driver for MediaTek MT8195"
+ depends on ARM64 || COMPILE_TEST
+ select COMMON_CLK_MEDIATEK
+ default ARCH_MEDIATEK
+ help
+ This driver supports MediaTek MT8195 clocks.
+
config COMMON_CLK_MT8516
bool "Clock driver for MediaTek MT8516"
depends on ARCH_MEDIATEK || COMPILE_TEST
diff --git a/drivers/clk/mediatek/Makefile b/drivers/clk/mediatek/Makefile
index 15bc045f0b71..dc96038a0155 100644
--- a/drivers/clk/mediatek/Makefile
+++ b/drivers/clk/mediatek/Makefile
@@ -80,5 +80,13 @@ obj-$(CONFIG_COMMON_CLK_MT8192_MSDC) += clk-mt8192-msdc.o
obj-$(CONFIG_COMMON_CLK_MT8192_SCP_ADSP) += clk-mt8192-scp_adsp.o
obj-$(CONFIG_COMMON_CLK_MT8192_VDECSYS) += clk-mt8192-vdec.o
obj-$(CONFIG_COMMON_CLK_MT8192_VENCSYS) += clk-mt8192-venc.o
+obj-$(CONFIG_COMMON_CLK_MT8195) += clk-mt8195-apmixedsys.o clk-mt8195-topckgen.o \
+ clk-mt8195-peri_ao.o clk-mt8195-infra_ao.o \
+ clk-mt8195-cam.o clk-mt8195-ccu.o clk-mt8195-img.o \
+ clk-mt8195-ipe.o clk-mt8195-mfg.o clk-mt8195-scp_adsp.o \
+ clk-mt8195-vdec.o clk-mt8195-vdo0.o clk-mt8195-vdo1.o \
+ clk-mt8195-venc.o clk-mt8195-vpp0.o clk-mt8195-vpp1.o \
+ clk-mt8195-wpe.o clk-mt8195-imp_iic_wrap.o \
+ clk-mt8195-apusys_pll.o
obj-$(CONFIG_COMMON_CLK_MT8516) += clk-mt8516.o
obj-$(CONFIG_COMMON_CLK_MT8516_AUDSYS) += clk-mt8516-aud.o
diff --git a/drivers/clk/mediatek/clk-apmixed.c b/drivers/clk/mediatek/clk-apmixed.c
index 258d128370f2..caa9119413f1 100644
--- a/drivers/clk/mediatek/clk-apmixed.c
+++ b/drivers/clk/mediatek/clk-apmixed.c
@@ -5,6 +5,7 @@
*/
#include <linux/delay.h>
+#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/slab.h>
@@ -97,3 +98,5 @@ struct clk * __init mtk_clk_register_ref2usb_tx(const char *name,
return clk;
}
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-cpumux.c b/drivers/clk/mediatek/clk-cpumux.c
index 61eeae4e60fb..e188018bc906 100644
--- a/drivers/clk/mediatek/clk-cpumux.c
+++ b/drivers/clk/mediatek/clk-cpumux.c
@@ -6,6 +6,7 @@
#include <linux/clk-provider.h>
#include <linux/mfd/syscon.h>
+#include <linux/module.h>
#include <linux/slab.h>
#include "clk-mtk.h"
@@ -106,3 +107,5 @@ int mtk_clk_register_cpumuxes(struct device_node *node,
return 0;
}
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-gate.c b/drivers/clk/mediatek/clk-gate.c
index a35cf0b22150..b02d2f74dd0d 100644
--- a/drivers/clk/mediatek/clk-gate.c
+++ b/drivers/clk/mediatek/clk-gate.c
@@ -11,6 +11,7 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/clkdev.h>
+#include <linux/module.h>
#include "clk-mtk.h"
#include "clk-gate.h"
@@ -122,24 +123,28 @@ const struct clk_ops mtk_clk_gate_ops_setclr = {
.enable = mtk_cg_enable,
.disable = mtk_cg_disable,
};
+EXPORT_SYMBOL_GPL(mtk_clk_gate_ops_setclr);
const struct clk_ops mtk_clk_gate_ops_setclr_inv = {
.is_enabled = mtk_cg_bit_is_set,
.enable = mtk_cg_enable_inv,
.disable = mtk_cg_disable_inv,
};
+EXPORT_SYMBOL_GPL(mtk_clk_gate_ops_setclr_inv);
const struct clk_ops mtk_clk_gate_ops_no_setclr = {
.is_enabled = mtk_cg_bit_is_cleared,
.enable = mtk_cg_enable_no_setclr,
.disable = mtk_cg_disable_no_setclr,
};
+EXPORT_SYMBOL_GPL(mtk_clk_gate_ops_no_setclr);
const struct clk_ops mtk_clk_gate_ops_no_setclr_inv = {
.is_enabled = mtk_cg_bit_is_set,
.enable = mtk_cg_enable_inv_no_setclr,
.disable = mtk_cg_disable_inv_no_setclr,
};
+EXPORT_SYMBOL_GPL(mtk_clk_gate_ops_no_setclr_inv);
struct clk *mtk_clk_register_gate(
const char *name,
@@ -181,3 +186,6 @@ struct clk *mtk_clk_register_gate(
return clk;
}
+EXPORT_SYMBOL_GPL(mtk_clk_register_gate);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6779-aud.c b/drivers/clk/mediatek/clk-mt6779-aud.c
index 11b209f95e25..9e889e4c361a 100644
--- a/drivers/clk/mediatek/clk-mt6779-aud.c
+++ b/drivers/clk/mediatek/clk-mt6779-aud.c
@@ -4,6 +4,7 @@
* Author: Wendell Lin <wendell.lin@mediatek.com>
*/
+#include <linux/module.h>
#include <linux/clk-provider.h>
#include <linux/of.h>
#include <linux/of_address.h>
@@ -114,4 +115,5 @@ static struct platform_driver clk_mt6779_aud_drv = {
},
};
-builtin_platform_driver(clk_mt6779_aud_drv);
+module_platform_driver(clk_mt6779_aud_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6779-cam.c b/drivers/clk/mediatek/clk-mt6779-cam.c
index 244d4208b7fb..7f07a2a139ac 100644
--- a/drivers/clk/mediatek/clk-mt6779-cam.c
+++ b/drivers/clk/mediatek/clk-mt6779-cam.c
@@ -4,6 +4,7 @@
* Author: Wendell Lin <wendell.lin@mediatek.com>
*/
+#include <linux/module.h>
#include <linux/clk-provider.h>
#include <linux/platform_device.h>
#include <dt-bindings/clock/mt6779-clk.h>
@@ -63,4 +64,5 @@ static struct platform_driver clk_mt6779_cam_drv = {
},
};
-builtin_platform_driver(clk_mt6779_cam_drv);
+module_platform_driver(clk_mt6779_cam_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6779-img.c b/drivers/clk/mediatek/clk-mt6779-img.c
index 26292a45c613..f0961fa1a286 100644
--- a/drivers/clk/mediatek/clk-mt6779-img.c
+++ b/drivers/clk/mediatek/clk-mt6779-img.c
@@ -4,6 +4,7 @@
* Author: Wendell Lin <wendell.lin@mediatek.com>
*/
+#include <linux/module.h>
#include <linux/clk-provider.h>
#include <linux/platform_device.h>
#include <dt-bindings/clock/mt6779-clk.h>
@@ -55,4 +56,5 @@ static struct platform_driver clk_mt6779_img_drv = {
},
};
-builtin_platform_driver(clk_mt6779_img_drv);
+module_platform_driver(clk_mt6779_img_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6779-ipe.c b/drivers/clk/mediatek/clk-mt6779-ipe.c
index bb519075639c..8c6f3e154bf3 100644
--- a/drivers/clk/mediatek/clk-mt6779-ipe.c
+++ b/drivers/clk/mediatek/clk-mt6779-ipe.c
@@ -4,6 +4,7 @@
* Author: Wendell Lin <wendell.lin@mediatek.com>
*/
+#include <linux/module.h>
#include <linux/clk-provider.h>
#include <linux/platform_device.h>
#include <dt-bindings/clock/mt6779-clk.h>
@@ -57,4 +58,5 @@ static struct platform_driver clk_mt6779_ipe_drv = {
},
};
-builtin_platform_driver(clk_mt6779_ipe_drv);
+module_platform_driver(clk_mt6779_ipe_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6779-mfg.c b/drivers/clk/mediatek/clk-mt6779-mfg.c
index c6ee2a89c070..9f3372886e6b 100644
--- a/drivers/clk/mediatek/clk-mt6779-mfg.c
+++ b/drivers/clk/mediatek/clk-mt6779-mfg.c
@@ -4,6 +4,7 @@
* Author: Wendell Lin <wendell.lin@mediatek.com>
*/
+#include <linux/module.h>
#include <linux/clk-provider.h>
#include <linux/platform_device.h>
@@ -52,4 +53,5 @@ static struct platform_driver clk_mt6779_mfg_drv = {
},
};
-builtin_platform_driver(clk_mt6779_mfg_drv);
+module_platform_driver(clk_mt6779_mfg_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6779-mm.c b/drivers/clk/mediatek/clk-mt6779-mm.c
index 059c1a41ac7a..33946e647122 100644
--- a/drivers/clk/mediatek/clk-mt6779-mm.c
+++ b/drivers/clk/mediatek/clk-mt6779-mm.c
@@ -4,6 +4,7 @@
* Author: Wendell Lin <wendell.lin@mediatek.com>
*/
+#include <linux/module.h>
#include <linux/clk-provider.h>
#include <linux/platform_device.h>
#include <dt-bindings/clock/mt6779-clk.h>
@@ -105,4 +106,5 @@ static struct platform_driver clk_mt6779_mm_drv = {
},
};
-builtin_platform_driver(clk_mt6779_mm_drv);
+module_platform_driver(clk_mt6779_mm_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6779-vdec.c b/drivers/clk/mediatek/clk-mt6779-vdec.c
index 1900da2586a1..f4358844c2e0 100644
--- a/drivers/clk/mediatek/clk-mt6779-vdec.c
+++ b/drivers/clk/mediatek/clk-mt6779-vdec.c
@@ -4,6 +4,7 @@
* Author: Wendell Lin <wendell.lin@mediatek.com>
*/
+#include <linux/module.h>
#include <linux/clk-provider.h>
#include <linux/platform_device.h>
@@ -64,4 +65,5 @@ static struct platform_driver clk_mt6779_vdec_drv = {
},
};
-builtin_platform_driver(clk_mt6779_vdec_drv);
+module_platform_driver(clk_mt6779_vdec_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6779-venc.c b/drivers/clk/mediatek/clk-mt6779-venc.c
index b41d1f859edc..ff67084af5aa 100644
--- a/drivers/clk/mediatek/clk-mt6779-venc.c
+++ b/drivers/clk/mediatek/clk-mt6779-venc.c
@@ -4,6 +4,7 @@
* Author: Wendell Lin <wendell.lin@mediatek.com>
*/
+#include <linux/module.h>
#include <linux/clk-provider.h>
#include <linux/platform_device.h>
@@ -55,4 +56,5 @@ static struct platform_driver clk_mt6779_venc_drv = {
},
};
-builtin_platform_driver(clk_mt6779_venc_drv);
+module_platform_driver(clk_mt6779_venc_drv);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt6779.c b/drivers/clk/mediatek/clk-mt6779.c
index 6e0d3a166729..9825385c9f94 100644
--- a/drivers/clk/mediatek/clk-mt6779.c
+++ b/drivers/clk/mediatek/clk-mt6779.c
@@ -4,6 +4,7 @@
* Author: Wendell Lin <wendell.lin@mediatek.com>
*/
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
@@ -1314,3 +1315,4 @@ static int __init clk_mt6779_init(void)
}
arch_initcall(clk_mt6779_init);
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mt8195-apmixedsys.c b/drivers/clk/mediatek/clk-mt8195-apmixedsys.c
new file mode 100644
index 000000000000..6156ceeed71e
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8195-apmixedsys.c
@@ -0,0 +1,145 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (c) 2021 MediaTek Inc.
+// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+#include <dt-bindings/clock/mt8195-clk.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+static const struct mtk_gate_regs apmixed_cg_regs = {
+ .set_ofs = 0x8,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x8,
+};
+
+#define GATE_APMIXED(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &apmixed_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
+
+static const struct mtk_gate apmixed_clks[] = {
+ GATE_APMIXED(CLK_APMIXED_PLL_SSUSB26M, "pll_ssusb26m", "clk26m", 1),
+};
+
+#define MT8195_PLL_FMAX (3800UL * MHZ)
+#define MT8195_PLL_FMIN (1500UL * MHZ)
+#define MT8195_INTEGER_BITS 8
+
+#define PLL(_id, _name, _reg, _pwr_reg, _en_mask, _flags, \
+ _rst_bar_mask, _pcwbits, _pd_reg, _pd_shift, \
+ _tuner_reg, _tuner_en_reg, _tuner_en_bit, \
+ _pcw_reg, _pcw_shift, _pcw_chg_reg, \
+ _en_reg, _pll_en_bit) { \
+ .id = _id, \
+ .name = _name, \
+ .reg = _reg, \
+ .pwr_reg = _pwr_reg, \
+ .en_mask = _en_mask, \
+ .flags = _flags, \
+ .rst_bar_mask = _rst_bar_mask, \
+ .fmax = MT8195_PLL_FMAX, \
+ .fmin = MT8195_PLL_FMIN, \
+ .pcwbits = _pcwbits, \
+ .pcwibits = MT8195_INTEGER_BITS, \
+ .pd_reg = _pd_reg, \
+ .pd_shift = _pd_shift, \
+ .tuner_reg = _tuner_reg, \
+ .tuner_en_reg = _tuner_en_reg, \
+ .tuner_en_bit = _tuner_en_bit, \
+ .pcw_reg = _pcw_reg, \
+ .pcw_shift = _pcw_shift, \
+ .pcw_chg_reg = _pcw_chg_reg, \
+ .en_reg = _en_reg, \
+ .pll_en_bit = _pll_en_bit, \
+ }
+
+static const struct mtk_pll_data plls[] = {
+ PLL(CLK_APMIXED_NNAPLL, "nnapll", 0x0390, 0x03a0, 0,
+ 0, 0, 22, 0x0398, 24, 0, 0, 0, 0x0398, 0, 0x0398, 0, 9),
+ PLL(CLK_APMIXED_RESPLL, "respll", 0x0190, 0x0320, 0,
+ 0, 0, 22, 0x0198, 24, 0, 0, 0, 0x0198, 0, 0x0198, 0, 9),
+ PLL(CLK_APMIXED_ETHPLL, "ethpll", 0x0360, 0x0370, 0,
+ 0, 0, 22, 0x0368, 24, 0, 0, 0, 0x0368, 0, 0x0368, 0, 9),
+ PLL(CLK_APMIXED_MSDCPLL, "msdcpll", 0x0710, 0x0720, 0,
+ 0, 0, 22, 0x0718, 24, 0, 0, 0, 0x0718, 0, 0x0718, 0, 9),
+ PLL(CLK_APMIXED_TVDPLL1, "tvdpll1", 0x00a0, 0x00b0, 0,
+ 0, 0, 22, 0x00a8, 24, 0, 0, 0, 0x00a8, 0, 0x00a8, 0, 9),
+ PLL(CLK_APMIXED_TVDPLL2, "tvdpll2", 0x00c0, 0x00d0, 0,
+ 0, 0, 22, 0x00c8, 24, 0, 0, 0, 0x00c8, 0, 0x00c8, 0, 9),
+ PLL(CLK_APMIXED_MMPLL, "mmpll", 0x00e0, 0x00f0, 0xff000000,
+ HAVE_RST_BAR, BIT(23), 22, 0x00e8, 24, 0, 0, 0, 0x00e8, 0, 0x00e8, 0, 9),
+ PLL(CLK_APMIXED_MAINPLL, "mainpll", 0x01d0, 0x01e0, 0xff000000,
+ HAVE_RST_BAR, BIT(23), 22, 0x01d8, 24, 0, 0, 0, 0x01d8, 0, 0x01d8, 0, 9),
+ PLL(CLK_APMIXED_VDECPLL, "vdecpll", 0x0890, 0x08a0, 0,
+ 0, 0, 22, 0x0898, 24, 0, 0, 0, 0x0898, 0, 0x0898, 0, 9),
+ PLL(CLK_APMIXED_IMGPLL, "imgpll", 0x0100, 0x0110, 0,
+ 0, 0, 22, 0x0108, 24, 0, 0, 0, 0x0108, 0, 0x0108, 0, 9),
+ PLL(CLK_APMIXED_UNIVPLL, "univpll", 0x01f0, 0x0700, 0xff000000,
+ HAVE_RST_BAR, BIT(23), 22, 0x01f8, 24, 0, 0, 0, 0x01f8, 0, 0x01f8, 0, 9),
+ PLL(CLK_APMIXED_HDMIPLL1, "hdmipll1", 0x08c0, 0x08d0, 0,
+ 0, 0, 22, 0x08c8, 24, 0, 0, 0, 0x08c8, 0, 0x08c8, 0, 9),
+ PLL(CLK_APMIXED_HDMIPLL2, "hdmipll2", 0x0870, 0x0880, 0,
+ 0, 0, 22, 0x0878, 24, 0, 0, 0, 0x0878, 0, 0x0878, 0, 9),
+ PLL(CLK_APMIXED_HDMIRX_APLL, "hdmirx_apll", 0x08e0, 0x0dd4, 0,
+ 0, 0, 32, 0x08e8, 24, 0, 0, 0, 0x08ec, 0, 0x08e8, 0, 9),
+ PLL(CLK_APMIXED_USB1PLL, "usb1pll", 0x01a0, 0x01b0, 0,
+ 0, 0, 22, 0x01a8, 24, 0, 0, 0, 0x01a8, 0, 0x01a8, 0, 9),
+ PLL(CLK_APMIXED_ADSPPLL, "adsppll", 0x07e0, 0x07f0, 0,
+ 0, 0, 22, 0x07e8, 24, 0, 0, 0, 0x07e8, 0, 0x07e8, 0, 9),
+ PLL(CLK_APMIXED_APLL1, "apll1", 0x07c0, 0x0dc0, 0,
+ 0, 0, 32, 0x07c8, 24, 0x0470, 0x0000, 12, 0x07cc, 0, 0x07c8, 0, 9),
+ PLL(CLK_APMIXED_APLL2, "apll2", 0x0780, 0x0dc4, 0,
+ 0, 0, 32, 0x0788, 24, 0x0474, 0x0000, 13, 0x078c, 0, 0x0788, 0, 9),
+ PLL(CLK_APMIXED_APLL3, "apll3", 0x0760, 0x0dc8, 0,
+ 0, 0, 32, 0x0768, 24, 0x0478, 0x0000, 14, 0x076c, 0, 0x0768, 0, 9),
+ PLL(CLK_APMIXED_APLL4, "apll4", 0x0740, 0x0dcc, 0,
+ 0, 0, 32, 0x0748, 24, 0x047C, 0x0000, 15, 0x074c, 0, 0x0748, 0, 9),
+ PLL(CLK_APMIXED_APLL5, "apll5", 0x07a0, 0x0dd0, 0x100000,
+ 0, 0, 32, 0x07a8, 24, 0x0480, 0x0000, 16, 0x07ac, 0, 0x07a8, 0, 9),
+ PLL(CLK_APMIXED_MFGPLL, "mfgpll", 0x0340, 0x0350, 0,
+ 0, 0, 22, 0x0348, 24, 0, 0, 0, 0x0348, 0, 0x0348, 0, 9),
+ PLL(CLK_APMIXED_DGIPLL, "dgipll", 0x0150, 0x0160, 0,
+ 0, 0, 22, 0x0158, 24, 0, 0, 0, 0x0158, 0, 0x0158, 0, 9),
+};
+
+static const struct of_device_id of_match_clk_mt8195_apmixed[] = {
+ { .compatible = "mediatek,mt8195-apmixedsys", },
+ {}
+};
+
+static int clk_mt8195_apmixed_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ int r;
+
+ clk_data = mtk_alloc_clk_data(CLK_APMIXED_NR_CLK);
+ if (!clk_data)
+ return -ENOMEM;
+
+ mtk_clk_register_plls(node, plls, ARRAY_SIZE(plls), clk_data);
+ r = mtk_clk_register_gates(node, apmixed_clks, ARRAY_SIZE(apmixed_clks), clk_data);
+ if (r)
+ goto free_apmixed_data;
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ if (r)
+ goto free_apmixed_data;
+
+ return r;
+
+free_apmixed_data:
+ mtk_free_clk_data(clk_data);
+ return r;
+}
+
+static struct platform_driver clk_mt8195_apmixed_drv = {
+ .probe = clk_mt8195_apmixed_probe,
+ .driver = {
+ .name = "clk-mt8195-apmixed",
+ .of_match_table = of_match_clk_mt8195_apmixed,
+ },
+};
+builtin_platform_driver(clk_mt8195_apmixed_drv);
diff --git a/drivers/clk/mediatek/clk-mt8195-apusys_pll.c b/drivers/clk/mediatek/clk-mt8195-apusys_pll.c
new file mode 100644
index 000000000000..f1c84186346e
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8195-apusys_pll.c
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (c) 2021 MediaTek Inc.
+// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+
+#include "clk-mtk.h"
+
+#include <dt-bindings/clock/mt8195-clk.h>
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#define MT8195_PLL_FMAX (3800UL * MHZ)
+#define MT8195_PLL_FMIN (1500UL * MHZ)
+#define MT8195_INTEGER_BITS (8)
+#define MT8195_PCW_BITS (22)
+#define MT8195_POSDIV_SHIFT (24)
+#define MT8195_PLL_EN_BIT (0)
+#define MT8195_PCW_SHIFT (0)
+
+/*
+ * The "en_reg" and "pcw_chg_reg" fields are standard offset register compared
+ * with "reg" field, so set zero to imply it.
+ * No tuner control in apu pll, so set "tuner_XXX" as zero to imply it.
+ * No rst or post divider enable in apu pll, so set "rst_bar_mask" and "en_mask"
+ * as zero to imply it.
+ */
+#define PLL(_id, _name, _reg, _pwr_reg, _pd_reg, _pcw_reg) { \
+ .id = _id, \
+ .name = _name, \
+ .reg = _reg, \
+ .pwr_reg = _pwr_reg, \
+ .en_mask = 0, \
+ .flags = 0, \
+ .rst_bar_mask = 0, \
+ .fmax = MT8195_PLL_FMAX, \
+ .fmin = MT8195_PLL_FMIN, \
+ .pcwbits = MT8195_PCW_BITS, \
+ .pcwibits = MT8195_INTEGER_BITS, \
+ .pd_reg = _pd_reg, \
+ .pd_shift = MT8195_POSDIV_SHIFT, \
+ .tuner_reg = 0, \
+ .tuner_en_reg = 0, \
+ .tuner_en_bit = 0, \
+ .pcw_reg = _pcw_reg, \
+ .pcw_shift = MT8195_PCW_SHIFT, \
+ .pcw_chg_reg = 0, \
+ .en_reg = 0, \
+ .pll_en_bit = MT8195_PLL_EN_BIT, \
+ }
+
+static const struct mtk_pll_data apusys_plls[] = {
+ PLL(CLK_APUSYS_PLL_APUPLL, "apusys_pll_apupll", 0x008, 0x014, 0x00c, 0x00c),
+ PLL(CLK_APUSYS_PLL_NPUPLL, "apusys_pll_npupll", 0x018, 0x024, 0x01c, 0x01c),
+ PLL(CLK_APUSYS_PLL_APUPLL1, "apusys_pll_apupll1", 0x028, 0x034, 0x02c, 0x02c),
+ PLL(CLK_APUSYS_PLL_APUPLL2, "apusys_pll_apupll2", 0x038, 0x044, 0x03c, 0x03c),
+};
+
+static int clk_mt8195_apusys_pll_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ int r;
+
+ clk_data = mtk_alloc_clk_data(CLK_APUSYS_PLL_NR_CLK);
+ if (!clk_data)
+ return -ENOMEM;
+
+ mtk_clk_register_plls(node, apusys_plls, ARRAY_SIZE(apusys_plls), clk_data);
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ if (r)
+ goto free_apusys_pll_data;
+
+ return r;
+
+free_apusys_pll_data:
+ mtk_free_clk_data(clk_data);
+ return r;
+}
+
+static const struct of_device_id of_match_clk_mt8195_apusys_pll[] = {
+ { .compatible = "mediatek,mt8195-apusys_pll", },
+ {}
+};
+
+static struct platform_driver clk_mt8195_apusys_pll_drv = {
+ .probe = clk_mt8195_apusys_pll_probe,
+ .driver = {
+ .name = "clk-mt8195-apusys_pll",
+ .of_match_table = of_match_clk_mt8195_apusys_pll,
+ },
+};
+builtin_platform_driver(clk_mt8195_apusys_pll_drv);
diff --git a/drivers/clk/mediatek/clk-mt8195-cam.c b/drivers/clk/mediatek/clk-mt8195-cam.c
new file mode 100644
index 000000000000..3d261fc3848e
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8195-cam.c
@@ -0,0 +1,142 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (c) 2021 MediaTek Inc.
+// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+#include <dt-bindings/clock/mt8195-clk.h>
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+static const struct mtk_gate_regs cam_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x0,
+};
+
+#define GATE_CAM(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &cam_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+static const struct mtk_gate cam_clks[] = {
+ GATE_CAM(CLK_CAM_LARB13, "cam_larb13", "top_cam", 0),
+ GATE_CAM(CLK_CAM_LARB14, "cam_larb14", "top_cam", 1),
+ GATE_CAM(CLK_CAM_MAIN_CAM, "cam_main_cam", "top_cam", 3),
+ GATE_CAM(CLK_CAM_MAIN_CAMTG, "cam_main_camtg", "top_cam", 4),
+ GATE_CAM(CLK_CAM_SENINF, "cam_seninf", "top_cam", 5),
+ GATE_CAM(CLK_CAM_GCAMSVA, "cam_gcamsva", "top_cam", 6),
+ GATE_CAM(CLK_CAM_GCAMSVB, "cam_gcamsvb", "top_cam", 7),
+ GATE_CAM(CLK_CAM_GCAMSVC, "cam_gcamsvc", "top_cam", 8),
+ GATE_CAM(CLK_CAM_SCAMSA, "cam_scamsa", "top_cam", 9),
+ GATE_CAM(CLK_CAM_SCAMSB, "cam_scamsb", "top_cam", 10),
+ GATE_CAM(CLK_CAM_CAMSV_TOP, "cam_camsv_top", "top_cam", 11),
+ GATE_CAM(CLK_CAM_CAMSV_CQ, "cam_camsv_cq", "top_cam", 12),
+ GATE_CAM(CLK_CAM_ADL, "cam_adl", "top_cam", 16),
+ GATE_CAM(CLK_CAM_ASG, "cam_asg", "top_cam", 17),
+ GATE_CAM(CLK_CAM_PDA, "cam_pda", "top_cam", 18),
+ GATE_CAM(CLK_CAM_FAKE_ENG, "cam_fake_eng", "top_cam", 19),
+ GATE_CAM(CLK_CAM_MAIN_MRAW0, "cam_main_mraw0", "top_cam", 20),
+ GATE_CAM(CLK_CAM_MAIN_MRAW1, "cam_main_mraw1", "top_cam", 21),
+ GATE_CAM(CLK_CAM_MAIN_MRAW2, "cam_main_mraw2", "top_cam", 22),
+ GATE_CAM(CLK_CAM_MAIN_MRAW3, "cam_main_mraw3", "top_cam", 23),
+ GATE_CAM(CLK_CAM_CAM2MM0_GALS, "cam_cam2mm0_gals", "top_cam", 24),
+ GATE_CAM(CLK_CAM_CAM2MM1_GALS, "cam_cam2mm1_gals", "top_cam", 25),
+ GATE_CAM(CLK_CAM_CAM2SYS_GALS, "cam_cam2sys_gals", "top_cam", 26),
+};
+
+static const struct mtk_gate cam_mraw_clks[] = {
+ GATE_CAM(CLK_CAM_MRAW_LARBX, "cam_mraw_larbx", "top_cam", 0),
+ GATE_CAM(CLK_CAM_MRAW_CAMTG, "cam_mraw_camtg", "top_cam", 2),
+ GATE_CAM(CLK_CAM_MRAW_MRAW0, "cam_mraw_mraw0", "top_cam", 3),
+ GATE_CAM(CLK_CAM_MRAW_MRAW1, "cam_mraw_mraw1", "top_cam", 4),
+ GATE_CAM(CLK_CAM_MRAW_MRAW2, "cam_mraw_mraw2", "top_cam", 5),
+ GATE_CAM(CLK_CAM_MRAW_MRAW3, "cam_mraw_mraw3", "top_cam", 6),
+};
+
+static const struct mtk_gate cam_rawa_clks[] = {
+ GATE_CAM(CLK_CAM_RAWA_LARBX, "cam_rawa_larbx", "top_cam", 0),
+ GATE_CAM(CLK_CAM_RAWA_CAM, "cam_rawa_cam", "top_cam", 1),
+ GATE_CAM(CLK_CAM_RAWA_CAMTG, "cam_rawa_camtg", "top_cam", 2),
+};
+
+static const struct mtk_gate cam_rawb_clks[] = {
+ GATE_CAM(CLK_CAM_RAWB_LARBX, "cam_rawb_larbx", "top_cam", 0),
+ GATE_CAM(CLK_CAM_RAWB_CAM, "cam_rawb_cam", "top_cam", 1),
+ GATE_CAM(CLK_CAM_RAWB_CAMTG, "cam_rawb_camtg", "top_cam", 2),
+};
+
+static const struct mtk_gate cam_yuva_clks[] = {
+ GATE_CAM(CLK_CAM_YUVA_LARBX, "cam_yuva_larbx", "top_cam", 0),
+ GATE_CAM(CLK_CAM_YUVA_CAM, "cam_yuva_cam", "top_cam", 1),
+ GATE_CAM(CLK_CAM_YUVA_CAMTG, "cam_yuva_camtg", "top_cam", 2),
+};
+
+static const struct mtk_gate cam_yuvb_clks[] = {
+ GATE_CAM(CLK_CAM_YUVB_LARBX, "cam_yuvb_larbx", "top_cam", 0),
+ GATE_CAM(CLK_CAM_YUVB_CAM, "cam_yuvb_cam", "top_cam", 1),
+ GATE_CAM(CLK_CAM_YUVB_CAMTG, "cam_yuvb_camtg", "top_cam", 2),
+};
+
+static const struct mtk_clk_desc cam_desc = {
+ .clks = cam_clks,
+ .num_clks = ARRAY_SIZE(cam_clks),
+};
+
+static const struct mtk_clk_desc cam_mraw_desc = {
+ .clks = cam_mraw_clks,
+ .num_clks = ARRAY_SIZE(cam_mraw_clks),
+};
+
+static const struct mtk_clk_desc cam_rawa_desc = {
+ .clks = cam_rawa_clks,
+ .num_clks = ARRAY_SIZE(cam_rawa_clks),
+};
+
+static const struct mtk_clk_desc cam_rawb_desc = {
+ .clks = cam_rawb_clks,
+ .num_clks = ARRAY_SIZE(cam_rawb_clks),
+};
+
+static const struct mtk_clk_desc cam_yuva_desc = {
+ .clks = cam_yuva_clks,
+ .num_clks = ARRAY_SIZE(cam_yuva_clks),
+};
+
+static const struct mtk_clk_desc cam_yuvb_desc = {
+ .clks = cam_yuvb_clks,
+ .num_clks = ARRAY_SIZE(cam_yuvb_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8195_cam[] = {
+ {
+ .compatible = "mediatek,mt8195-camsys",
+ .data = &cam_desc,
+ }, {
+ .compatible = "mediatek,mt8195-camsys_mraw",
+ .data = &cam_mraw_desc,
+ }, {
+ .compatible = "mediatek,mt8195-camsys_rawa",
+ .data = &cam_rawa_desc,
+ }, {
+ .compatible = "mediatek,mt8195-camsys_rawb",
+ .data = &cam_rawb_desc,
+ }, {
+ .compatible = "mediatek,mt8195-camsys_yuva",
+ .data = &cam_yuva_desc,
+ }, {
+ .compatible = "mediatek,mt8195-camsys_yuvb",
+ .data = &cam_yuvb_desc,
+ }, {
+ /* sentinel */
+ }
+};
+
+static struct platform_driver clk_mt8195_cam_drv = {
+ .probe = mtk_clk_simple_probe,
+ .driver = {
+ .name = "clk-mt8195-cam",
+ .of_match_table = of_match_clk_mt8195_cam,
+ },
+};
+builtin_platform_driver(clk_mt8195_cam_drv);
diff --git a/drivers/clk/mediatek/clk-mt8195-ccu.c b/drivers/clk/mediatek/clk-mt8195-ccu.c
new file mode 100644
index 000000000000..f846f1d73605
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8195-ccu.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (c) 2021 MediaTek Inc.
+// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+#include <dt-bindings/clock/mt8195-clk.h>
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+static const struct mtk_gate_regs ccu_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x0,
+};
+
+#define GATE_CCU(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &ccu_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+static const struct mtk_gate ccu_clks[] = {
+ GATE_CCU(CLK_CCU_LARB18, "ccu_larb18", "top_ccu", 0),
+ GATE_CCU(CLK_CCU_AHB, "ccu_ahb", "top_ccu", 1),
+ GATE_CCU(CLK_CCU_CCU0, "ccu_ccu0", "top_ccu", 2),
+ GATE_CCU(CLK_CCU_CCU1, "ccu_ccu1", "top_ccu", 3),
+};
+
+static const struct mtk_clk_desc ccu_desc = {
+ .clks = ccu_clks,
+ .num_clks = ARRAY_SIZE(ccu_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8195_ccu[] = {
+ {
+ .compatible = "mediatek,mt8195-ccusys",
+ .data = &ccu_desc,
+ }, {
+ /* sentinel */
+ }
+};
+
+static struct platform_driver clk_mt8195_ccu_drv = {
+ .probe = mtk_clk_simple_probe,
+ .driver = {
+ .name = "clk-mt8195-ccu",
+ .of_match_table = of_match_clk_mt8195_ccu,
+ },
+};
+builtin_platform_driver(clk_mt8195_ccu_drv);
diff --git a/drivers/clk/mediatek/clk-mt8195-img.c b/drivers/clk/mediatek/clk-mt8195-img.c
new file mode 100644
index 000000000000..22b52a8f15fe
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8195-img.c
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (c) 2021 MediaTek Inc.
+// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+#include <dt-bindings/clock/mt8195-clk.h>
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+static const struct mtk_gate_regs img_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x0,
+};
+
+#define GATE_IMG(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &img_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+static const struct mtk_gate img_clks[] = {
+ GATE_IMG(CLK_IMG_LARB9, "img_larb9", "top_img", 0),
+ GATE_IMG(CLK_IMG_TRAW0, "img_traw0", "top_img", 1),
+ GATE_IMG(CLK_IMG_TRAW1, "img_traw1", "top_img", 2),
+ GATE_IMG(CLK_IMG_TRAW2, "img_traw2", "top_img", 3),
+ GATE_IMG(CLK_IMG_TRAW3, "img_traw3", "top_img", 4),
+ GATE_IMG(CLK_IMG_DIP0, "img_dip0", "top_img", 8),
+ GATE_IMG(CLK_IMG_WPE0, "img_wpe0", "top_img", 9),
+ GATE_IMG(CLK_IMG_IPE, "img_ipe", "top_img", 10),
+ GATE_IMG(CLK_IMG_DIP1, "img_dip1", "top_img", 11),
+ GATE_IMG(CLK_IMG_WPE1, "img_wpe1", "top_img", 12),
+ GATE_IMG(CLK_IMG_GALS, "img_gals", "top_img", 31),
+};
+
+static const struct mtk_gate img1_dip_top_clks[] = {
+ GATE_IMG(CLK_IMG1_DIP_TOP_LARB10, "img1_dip_top_larb10", "top_img", 0),
+ GATE_IMG(CLK_IMG1_DIP_TOP_DIP_TOP, "img1_dip_top_dip_top", "top_img", 1),
+};
+
+static const struct mtk_gate img1_dip_nr_clks[] = {
+ GATE_IMG(CLK_IMG1_DIP_NR_RESERVE, "img1_dip_nr_reserve", "top_img", 0),
+ GATE_IMG(CLK_IMG1_DIP_NR_DIP_NR, "img1_dip_nr_dip_nr", "top_img", 1),
+};
+
+static const struct mtk_gate img1_wpe_clks[] = {
+ GATE_IMG(CLK_IMG1_WPE_LARB11, "img1_wpe_larb11", "top_img", 0),
+ GATE_IMG(CLK_IMG1_WPE_WPE, "img1_wpe_wpe", "top_img", 1),
+};
+
+static const struct mtk_clk_desc img_desc = {
+ .clks = img_clks,
+ .num_clks = ARRAY_SIZE(img_clks),
+};
+
+static const struct mtk_clk_desc img1_dip_top_desc = {
+ .clks = img1_dip_top_clks,
+ .num_clks = ARRAY_SIZE(img1_dip_top_clks),
+};
+
+static const struct mtk_clk_desc img1_dip_nr_desc = {
+ .clks = img1_dip_nr_clks,
+ .num_clks = ARRAY_SIZE(img1_dip_nr_clks),
+};
+
+static const struct mtk_clk_desc img1_wpe_desc = {
+ .clks = img1_wpe_clks,
+ .num_clks = ARRAY_SIZE(img1_wpe_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8195_img[] = {
+ {
+ .compatible = "mediatek,mt8195-imgsys",
+ .data = &img_desc,
+ }, {
+ .compatible = "mediatek,mt8195-imgsys1_dip_top",
+ .data = &img1_dip_top_desc,
+ }, {
+ .compatible = "mediatek,mt8195-imgsys1_dip_nr",
+ .data = &img1_dip_nr_desc,
+ }, {
+ .compatible = "mediatek,mt8195-imgsys1_wpe",
+ .data = &img1_wpe_desc,
+ }, {
+ /* sentinel */
+ }
+};
+
+static struct platform_driver clk_mt8195_img_drv = {
+ .probe = mtk_clk_simple_probe,
+ .driver = {
+ .name = "clk-mt8195-img",
+ .of_match_table = of_match_clk_mt8195_img,
+ },
+};
+builtin_platform_driver(clk_mt8195_img_drv);
diff --git a/drivers/clk/mediatek/clk-mt8195-imp_iic_wrap.c b/drivers/clk/mediatek/clk-mt8195-imp_iic_wrap.c
new file mode 100644
index 000000000000..0e2ac0a30aa0
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8195-imp_iic_wrap.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (c) 2021 MediaTek Inc.
+// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+#include <dt-bindings/clock/mt8195-clk.h>
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/clock/mt8195-clk.h>
+
+static const struct mtk_gate_regs imp_iic_wrap_cg_regs = {
+ .set_ofs = 0xe08,
+ .clr_ofs = 0xe04,
+ .sta_ofs = 0xe00,
+};
+
+#define GATE_IMP_IIC_WRAP(_id, _name, _parent, _shift) \
+ GATE_MTK_FLAGS(_id, _name, _parent, &imp_iic_wrap_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr, CLK_OPS_PARENT_ENABLE)
+
+static const struct mtk_gate imp_iic_wrap_s_clks[] = {
+ GATE_IMP_IIC_WRAP(CLK_IMP_IIC_WRAP_S_I2C5, "imp_iic_wrap_s_i2c5", "top_i2c", 0),
+ GATE_IMP_IIC_WRAP(CLK_IMP_IIC_WRAP_S_I2C6, "imp_iic_wrap_s_i2c6", "top_i2c", 1),
+ GATE_IMP_IIC_WRAP(CLK_IMP_IIC_WRAP_S_I2C7, "imp_iic_wrap_s_i2c7", "top_i2c", 2),
+};
+
+static const struct mtk_gate imp_iic_wrap_w_clks[] = {
+ GATE_IMP_IIC_WRAP(CLK_IMP_IIC_WRAP_W_I2C0, "imp_iic_wrap_w_i2c0", "top_i2c", 0),
+ GATE_IMP_IIC_WRAP(CLK_IMP_IIC_WRAP_W_I2C1, "imp_iic_wrap_w_i2c1", "top_i2c", 1),
+ GATE_IMP_IIC_WRAP(CLK_IMP_IIC_WRAP_W_I2C2, "imp_iic_wrap_w_i2c2", "top_i2c", 2),
+ GATE_IMP_IIC_WRAP(CLK_IMP_IIC_WRAP_W_I2C3, "imp_iic_wrap_w_i2c3", "top_i2c", 3),
+ GATE_IMP_IIC_WRAP(CLK_IMP_IIC_WRAP_W_I2C4, "imp_iic_wrap_w_i2c4", "top_i2c", 4),
+};
+
+static const struct mtk_clk_desc imp_iic_wrap_s_desc = {
+ .clks = imp_iic_wrap_s_clks,
+ .num_clks = ARRAY_SIZE(imp_iic_wrap_s_clks),
+};
+
+static const struct mtk_clk_desc imp_iic_wrap_w_desc = {
+ .clks = imp_iic_wrap_w_clks,
+ .num_clks = ARRAY_SIZE(imp_iic_wrap_w_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8195_imp_iic_wrap[] = {
+ {
+ .compatible = "mediatek,mt8195-imp_iic_wrap_s",
+ .data = &imp_iic_wrap_s_desc,
+ }, {
+ .compatible = "mediatek,mt8195-imp_iic_wrap_w",
+ .data = &imp_iic_wrap_w_desc,
+ }, {
+ /* sentinel */
+ }
+};
+
+static struct platform_driver clk_mt8195_imp_iic_wrap_drv = {
+ .probe = mtk_clk_simple_probe,
+ .driver = {
+ .name = "clk-mt8195-imp_iic_wrap",
+ .of_match_table = of_match_clk_mt8195_imp_iic_wrap,
+ },
+};
+builtin_platform_driver(clk_mt8195_imp_iic_wrap_drv);
diff --git a/drivers/clk/mediatek/clk-mt8195-infra_ao.c b/drivers/clk/mediatek/clk-mt8195-infra_ao.c
new file mode 100644
index 000000000000..5f9b69967459
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8195-infra_ao.c
@@ -0,0 +1,206 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (c) 2021 MediaTek Inc.
+// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+#include <dt-bindings/clock/mt8195-clk.h>
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+static const struct mtk_gate_regs infra_ao0_cg_regs = {
+ .set_ofs = 0x80,
+ .clr_ofs = 0x84,
+ .sta_ofs = 0x90,
+};
+
+static const struct mtk_gate_regs infra_ao1_cg_regs = {
+ .set_ofs = 0x88,
+ .clr_ofs = 0x8c,
+ .sta_ofs = 0x94,
+};
+
+static const struct mtk_gate_regs infra_ao2_cg_regs = {
+ .set_ofs = 0xa4,
+ .clr_ofs = 0xa8,
+ .sta_ofs = 0xac,
+};
+
+static const struct mtk_gate_regs infra_ao3_cg_regs = {
+ .set_ofs = 0xc0,
+ .clr_ofs = 0xc4,
+ .sta_ofs = 0xc8,
+};
+
+static const struct mtk_gate_regs infra_ao4_cg_regs = {
+ .set_ofs = 0xe0,
+ .clr_ofs = 0xe4,
+ .sta_ofs = 0xe8,
+};
+
+#define GATE_INFRA_AO0_FLAGS(_id, _name, _parent, _shift, _flag) \
+ GATE_MTK_FLAGS(_id, _name, _parent, &infra_ao0_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr, _flag)
+
+#define GATE_INFRA_AO0(_id, _name, _parent, _shift) \
+ GATE_INFRA_AO0_FLAGS(_id, _name, _parent, _shift, 0)
+
+#define GATE_INFRA_AO1_FLAGS(_id, _name, _parent, _shift, _flag) \
+ GATE_MTK_FLAGS(_id, _name, _parent, &infra_ao1_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr, _flag)
+
+#define GATE_INFRA_AO1(_id, _name, _parent, _shift) \
+ GATE_INFRA_AO1_FLAGS(_id, _name, _parent, _shift, 0)
+
+#define GATE_INFRA_AO2(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &infra_ao2_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+#define GATE_INFRA_AO3_FLAGS(_id, _name, _parent, _shift, _flag) \
+ GATE_MTK_FLAGS(_id, _name, _parent, &infra_ao3_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr, _flag)
+
+#define GATE_INFRA_AO3(_id, _name, _parent, _shift) \
+ GATE_INFRA_AO3_FLAGS(_id, _name, _parent, _shift, 0)
+
+#define GATE_INFRA_AO4_FLAGS(_id, _name, _parent, _shift, _flag) \
+ GATE_MTK_FLAGS(_id, _name, _parent, &infra_ao4_cg_regs, _shift, \
+ &mtk_clk_gate_ops_setclr, _flag)
+
+#define GATE_INFRA_AO4(_id, _name, _parent, _shift) \
+ GATE_INFRA_AO4_FLAGS(_id, _name, _parent, _shift, 0)
+
+static const struct mtk_gate infra_ao_clks[] = {
+ /* INFRA_AO0 */
+ GATE_INFRA_AO0(CLK_INFRA_AO_PMIC_TMR, "infra_ao_pmic_tmr", "top_pwrap_ulposc", 0),
+ GATE_INFRA_AO0(CLK_INFRA_AO_PMIC_AP, "infra_ao_pmic_ap", "top_pwrap_ulposc", 1),
+ GATE_INFRA_AO0(CLK_INFRA_AO_PMIC_MD, "infra_ao_pmic_md", "top_pwrap_ulposc", 2),
+ GATE_INFRA_AO0(CLK_INFRA_AO_PMIC_CONN, "infra_ao_pmic_conn", "top_pwrap_ulposc", 3),
+ /* infra_ao_sej is main clock is for secure engine with JTAG support */
+ GATE_INFRA_AO0_FLAGS(CLK_INFRA_AO_SEJ, "infra_ao_sej", "top_axi", 5, CLK_IS_CRITICAL),
+ GATE_INFRA_AO0(CLK_INFRA_AO_APXGPT, "infra_ao_apxgpt", "top_axi", 6),
+ GATE_INFRA_AO0(CLK_INFRA_AO_GCE, "infra_ao_gce", "top_axi", 8),
+ GATE_INFRA_AO0(CLK_INFRA_AO_GCE2, "infra_ao_gce2", "top_axi", 9),
+ GATE_INFRA_AO0(CLK_INFRA_AO_THERM, "infra_ao_therm", "top_axi", 10),
+ GATE_INFRA_AO0(CLK_INFRA_AO_PWM_H, "infra_ao_pwm_h", "top_axi", 15),
+ GATE_INFRA_AO0(CLK_INFRA_AO_PWM1, "infra_ao_pwm1", "top_pwm", 16),
+ GATE_INFRA_AO0(CLK_INFRA_AO_PWM2, "infra_ao_pwm2", "top_pwm", 17),
+ GATE_INFRA_AO0(CLK_INFRA_AO_PWM3, "infra_ao_pwm3", "top_pwm", 18),
+ GATE_INFRA_AO0(CLK_INFRA_AO_PWM4, "infra_ao_pwm4", "top_pwm", 19),
+ GATE_INFRA_AO0(CLK_INFRA_AO_PWM, "infra_ao_pwm", "top_pwm", 21),
+ GATE_INFRA_AO0(CLK_INFRA_AO_UART0, "infra_ao_uart0", "top_uart", 22),
+ GATE_INFRA_AO0(CLK_INFRA_AO_UART1, "infra_ao_uart1", "top_uart", 23),
+ GATE_INFRA_AO0(CLK_INFRA_AO_UART2, "infra_ao_uart2", "top_uart", 24),
+ GATE_INFRA_AO0(CLK_INFRA_AO_UART3, "infra_ao_uart3", "top_uart", 25),
+ GATE_INFRA_AO0(CLK_INFRA_AO_UART4, "infra_ao_uart4", "top_uart", 26),
+ GATE_INFRA_AO0(CLK_INFRA_AO_GCE_26M, "infra_ao_gce_26m", "clk26m", 27),
+ GATE_INFRA_AO0(CLK_INFRA_AO_CQ_DMA_FPC, "infra_ao_cq_dma_fpc", "fpc", 28),
+ GATE_INFRA_AO0(CLK_INFRA_AO_UART5, "infra_ao_uart5", "top_uart", 29),
+ /* INFRA_AO1 */
+ GATE_INFRA_AO1(CLK_INFRA_AO_HDMI_26M, "infra_ao_hdmi_26m", "clk26m", 0),
+ GATE_INFRA_AO1(CLK_INFRA_AO_SPI0, "infra_ao_spi0", "top_spi", 1),
+ GATE_INFRA_AO1(CLK_INFRA_AO_MSDC0, "infra_ao_msdc0", "top_msdc50_0_hclk", 2),
+ GATE_INFRA_AO1(CLK_INFRA_AO_MSDC1, "infra_ao_msdc1", "top_axi", 4),
+ GATE_INFRA_AO1(CLK_INFRA_AO_CG1_MSDC2, "infra_ao_cg1_msdc2", "top_axi", 5),
+ GATE_INFRA_AO1(CLK_INFRA_AO_MSDC0_SRC, "infra_ao_msdc0_src", "top_msdc50_0", 6),
+ GATE_INFRA_AO1(CLK_INFRA_AO_TRNG, "infra_ao_trng", "top_axi", 9),
+ GATE_INFRA_AO1(CLK_INFRA_AO_AUXADC, "infra_ao_auxadc", "clk26m", 10),
+ GATE_INFRA_AO1(CLK_INFRA_AO_CPUM, "infra_ao_cpum", "top_axi", 11),
+ GATE_INFRA_AO1(CLK_INFRA_AO_HDMI_32K, "infra_ao_hdmi_32k", "clk32k", 12),
+ GATE_INFRA_AO1(CLK_INFRA_AO_CEC_66M_H, "infra_ao_cec_66m_h", "top_axi", 13),
+ GATE_INFRA_AO1(CLK_INFRA_AO_IRRX, "infra_ao_irrx", "top_axi", 14),
+ GATE_INFRA_AO1(CLK_INFRA_AO_PCIE_TL_26M, "infra_ao_pcie_tl_26m", "clk26m", 15),
+ GATE_INFRA_AO1(CLK_INFRA_AO_MSDC1_SRC, "infra_ao_msdc1_src", "top_msdc30_1", 16),
+ GATE_INFRA_AO1(CLK_INFRA_AO_CEC_66M_B, "infra_ao_cec_66m_b", "top_axi", 17),
+ GATE_INFRA_AO1(CLK_INFRA_AO_PCIE_TL_96M, "infra_ao_pcie_tl_96m", "top_tl", 18),
+ /* infra_ao_device_apc is for device access permission control module */
+ GATE_INFRA_AO1_FLAGS(CLK_INFRA_AO_DEVICE_APC, "infra_ao_device_apc", "top_axi", 20,
+ CLK_IS_CRITICAL),
+ GATE_INFRA_AO1(CLK_INFRA_AO_ECC_66M_H, "infra_ao_ecc_66m_h", "top_axi", 23),
+ GATE_INFRA_AO1(CLK_INFRA_AO_DEBUGSYS, "infra_ao_debugsys", "top_axi", 24),
+ GATE_INFRA_AO1(CLK_INFRA_AO_AUDIO, "infra_ao_audio", "top_axi", 25),
+ GATE_INFRA_AO1(CLK_INFRA_AO_PCIE_TL_32K, "infra_ao_pcie_tl_32k", "clk32k", 26),
+ GATE_INFRA_AO1(CLK_INFRA_AO_DBG_TRACE, "infra_ao_dbg_trace", "top_axi", 29),
+ GATE_INFRA_AO1(CLK_INFRA_AO_DRAMC_F26M, "infra_ao_dramc_f26m", "clk26m", 31),
+ /* INFRA_AO2 */
+ GATE_INFRA_AO2(CLK_INFRA_AO_IRTX, "infra_ao_irtx", "top_axi", 0),
+ GATE_INFRA_AO2(CLK_INFRA_AO_SSUSB, "infra_ao_ssusb", "top_usb_top", 1),
+ GATE_INFRA_AO2(CLK_INFRA_AO_DISP_PWM, "infra_ao_disp_pwm", "top_disp_pwm0", 2),
+ GATE_INFRA_AO2(CLK_INFRA_AO_CLDMA_B, "infra_ao_cldma_b", "top_axi", 3),
+ GATE_INFRA_AO2(CLK_INFRA_AO_AUDIO_26M_B, "infra_ao_audio_26m_b", "clk26m", 4),
+ GATE_INFRA_AO2(CLK_INFRA_AO_SPI1, "infra_ao_spi1", "top_spi", 6),
+ GATE_INFRA_AO2(CLK_INFRA_AO_SPI2, "infra_ao_spi2", "top_spi", 9),
+ GATE_INFRA_AO2(CLK_INFRA_AO_SPI3, "infra_ao_spi3", "top_spi", 10),
+ GATE_INFRA_AO2(CLK_INFRA_AO_UNIPRO_SYS, "infra_ao_unipro_sys", "top_ufs", 11),
+ GATE_INFRA_AO2(CLK_INFRA_AO_UNIPRO_TICK, "infra_ao_unipro_tick", "top_ufs_tick1us", 12),
+ GATE_INFRA_AO2(CLK_INFRA_AO_UFS_MP_SAP_B, "infra_ao_ufs_mp_sap_b", "top_ufs_mp_sap_cfg", 13),
+ GATE_INFRA_AO2(CLK_INFRA_AO_PWRMCU, "infra_ao_pwrmcu", "top_pwrmcu", 15),
+ GATE_INFRA_AO2(CLK_INFRA_AO_PWRMCU_BUS_H, "infra_ao_pwrmcu_bus_h", "top_axi", 17),
+ GATE_INFRA_AO2(CLK_INFRA_AO_APDMA_B, "infra_ao_apdma_b", "top_axi", 18),
+ GATE_INFRA_AO2(CLK_INFRA_AO_SPI4, "infra_ao_spi4", "top_spi", 25),
+ GATE_INFRA_AO2(CLK_INFRA_AO_SPI5, "infra_ao_spi5", "top_spi", 26),
+ GATE_INFRA_AO2(CLK_INFRA_AO_CQ_DMA, "infra_ao_cq_dma", "top_axi", 27),
+ GATE_INFRA_AO2(CLK_INFRA_AO_AES_UFSFDE, "infra_ao_aes_ufsfde", "top_ufs", 28),
+ GATE_INFRA_AO2(CLK_INFRA_AO_AES, "infra_ao_aes", "top_aes_ufsfde", 29),
+ GATE_INFRA_AO2(CLK_INFRA_AO_UFS_TICK, "infra_ao_ufs_tick", "top_ufs_tick1us", 30),
+ GATE_INFRA_AO2(CLK_INFRA_AO_SSUSB_XHCI, "infra_ao_ssusb_xhci", "top_ssusb_xhci", 31),
+ /* INFRA_AO3 */
+ GATE_INFRA_AO3(CLK_INFRA_AO_MSDC0_SELF, "infra_ao_msdc0f", "top_msdc50_0", 0),
+ GATE_INFRA_AO3(CLK_INFRA_AO_MSDC1_SELF, "infra_ao_msdc1f", "top_msdc50_0", 1),
+ GATE_INFRA_AO3(CLK_INFRA_AO_MSDC2_SELF, "infra_ao_msdc2f", "top_msdc50_0", 2),
+ GATE_INFRA_AO3(CLK_INFRA_AO_I2S_DMA, "infra_ao_i2s_dma", "top_axi", 5),
+ GATE_INFRA_AO3(CLK_INFRA_AO_AP_MSDC0, "infra_ao_ap_msdc0", "top_msdc50_0", 7),
+ GATE_INFRA_AO3(CLK_INFRA_AO_MD_MSDC0, "infra_ao_md_msdc0", "top_msdc50_0", 8),
+ GATE_INFRA_AO3(CLK_INFRA_AO_CG3_MSDC2, "infra_ao_cg3_msdc2", "top_msdc30_2", 9),
+ GATE_INFRA_AO3(CLK_INFRA_AO_GCPU, "infra_ao_gcpu", "top_gcpu", 10),
+ GATE_INFRA_AO3(CLK_INFRA_AO_PCIE_PERI_26M, "infra_ao_pcie_peri_26m", "clk26m", 15),
+ GATE_INFRA_AO3(CLK_INFRA_AO_GCPU_66M_B, "infra_ao_gcpu_66m_b", "top_axi", 16),
+ GATE_INFRA_AO3(CLK_INFRA_AO_GCPU_133M_B, "infra_ao_gcpu_133m_b", "top_axi", 17),
+ GATE_INFRA_AO3(CLK_INFRA_AO_DISP_PWM1, "infra_ao_disp_pwm1", "top_disp_pwm1", 20),
+ GATE_INFRA_AO3(CLK_INFRA_AO_FBIST2FPC, "infra_ao_fbist2fpc", "top_msdc50_0", 24),
+ /* infra_ao_device_apc_sync is for device access permission control module */
+ GATE_INFRA_AO3_FLAGS(CLK_INFRA_AO_DEVICE_APC_SYNC, "infra_ao_device_apc_sync", "top_axi", 25,
+ CLK_IS_CRITICAL),
+ GATE_INFRA_AO3(CLK_INFRA_AO_PCIE_P1_PERI_26M, "infra_ao_pcie_p1_peri_26m", "clk26m", 26),
+ GATE_INFRA_AO3(CLK_INFRA_AO_SPIS0, "infra_ao_spis0", "top_spis", 28),
+ GATE_INFRA_AO3(CLK_INFRA_AO_SPIS1, "infra_ao_spis1", "top_spis", 29),
+ /* INFRA_AO4 */
+ /* infra_ao_133m_m_peri infra_ao_66m_m_peri are main clocks of peripheral */
+ GATE_INFRA_AO4_FLAGS(CLK_INFRA_AO_133M_M_PERI, "infra_ao_133m_m_peri", "top_axi", 0,
+ CLK_IS_CRITICAL),
+ GATE_INFRA_AO4_FLAGS(CLK_INFRA_AO_66M_M_PERI, "infra_ao_66m_m_peri", "top_axi", 1,
+ CLK_IS_CRITICAL),
+ GATE_INFRA_AO4(CLK_INFRA_AO_PCIE_PL_P_250M_P0, "infra_ao_pcie_pl_p_250m_p0", "pextp_pipe", 7),
+ GATE_INFRA_AO4(CLK_INFRA_AO_PCIE_PL_P_250M_P1, "infra_ao_pcie_pl_p_250m_p1",
+ "ssusb_u3phy_p1_p_p0", 8),
+ GATE_INFRA_AO4(CLK_INFRA_AO_PCIE_P1_TL_96M, "infra_ao_pcie_p1_tl_96m", "top_tl_p1", 17),
+ GATE_INFRA_AO4(CLK_INFRA_AO_AES_MSDCFDE_0P, "infra_ao_aes_msdcfde_0p", "top_aes_msdcfde", 18),
+ GATE_INFRA_AO4(CLK_INFRA_AO_UFS_TX_SYMBOL, "infra_ao_ufs_tx_symbol", "ufs_tx_symbol", 22),
+ GATE_INFRA_AO4(CLK_INFRA_AO_UFS_RX_SYMBOL, "infra_ao_ufs_rx_symbol", "ufs_rx_symbol", 23),
+ GATE_INFRA_AO4(CLK_INFRA_AO_UFS_RX_SYMBOL1, "infra_ao_ufs_rx_symbol1", "ufs_rx_symbol1", 24),
+ GATE_INFRA_AO4(CLK_INFRA_AO_PERI_UFS_MEM_SUB, "infra_ao_peri_ufs_mem_sub", "mem_466m", 31),
+};
+
+static const struct mtk_clk_desc infra_ao_desc = {
+ .clks = infra_ao_clks,
+ .num_clks = ARRAY_SIZE(infra_ao_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8195_infra_ao[] = {
+ {
+ .compatible = "mediatek,mt8195-infracfg_ao",
+ .data = &infra_ao_desc,
+ }, {
+ /* sentinel */
+ }
+};
+
+static struct platform_driver clk_mt8195_infra_ao_drv = {
+ .probe = mtk_clk_simple_probe,
+ .driver = {
+ .name = "clk-mt8195-infra_ao",
+ .of_match_table = of_match_clk_mt8195_infra_ao,
+ },
+};
+builtin_platform_driver(clk_mt8195_infra_ao_drv);
diff --git a/drivers/clk/mediatek/clk-mt8195-ipe.c b/drivers/clk/mediatek/clk-mt8195-ipe.c
new file mode 100644
index 000000000000..fc1d42b6ac84
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8195-ipe.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (c) 2021 MediaTek Inc.
+// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+#include <dt-bindings/clock/mt8195-clk.h>
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+static const struct mtk_gate_regs ipe_cg_regs = {
+ .set_ofs = 0x0,
+ .clr_ofs = 0x0,
+ .sta_ofs = 0x0,
+};
+
+#define GATE_IPE(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &ipe_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)
+
+static const struct mtk_gate ipe_clks[] = {
+ GATE_IPE(CLK_IPE_DPE, "ipe_dpe", "top_ipe", 0),
+ GATE_IPE(CLK_IPE_FDVT, "ipe_fdvt", "top_ipe", 1),
+ GATE_IPE(CLK_IPE_ME, "ipe_me", "top_ipe", 2),
+ GATE_IPE(CLK_IPE_TOP, "ipe_top", "top_ipe", 3),
+ GATE_IPE(CLK_IPE_SMI_LARB12, "ipe_smi_larb12", "top_ipe", 4),
+};
+
+static const struct mtk_clk_desc ipe_desc = {
+ .clks = ipe_clks,
+ .num_clks = ARRAY_SIZE(ipe_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8195_ipe[] = {
+ {
+ .compatible = "mediatek,mt8195-ipesys",
+ .data = &ipe_desc,
+ }, {
+ /* sentinel */
+ }
+};
+
+static struct platform_driver clk_mt8195_ipe_drv = {
+ .probe = mtk_clk_simple_probe,
+ .driver = {
+ .name = "clk-mt8195-ipe",
+ .of_match_table = of_match_clk_mt8195_ipe,
+ },
+};
+builtin_platform_driver(clk_mt8195_ipe_drv);
diff --git a/drivers/clk/mediatek/clk-mt8195-mfg.c b/drivers/clk/mediatek/clk-mt8195-mfg.c
new file mode 100644
index 000000000000..aca6d9c0837c
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8195-mfg.c
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (c) 2021 MediaTek Inc.
+// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+#include <dt-bindings/clock/mt8195-clk.h>
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+static const struct mtk_gate_regs mfg_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x0,
+};
+
+#define GATE_MFG(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &mfg_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+static const struct mtk_gate mfg_clks[] = {
+ GATE_MFG(CLK_MFG_BG3D, "mfg_bg3d", "top_mfg_core_tmp", 0),
+};
+
+static const struct mtk_clk_desc mfg_desc = {
+ .clks = mfg_clks,
+ .num_clks = ARRAY_SIZE(mfg_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8195_mfg[] = {
+ {
+ .compatible = "mediatek,mt8195-mfgcfg",
+ .data = &mfg_desc,
+ }, {
+ /* sentinel */
+ }
+};
+
+static struct platform_driver clk_mt8195_mfg_drv = {
+ .probe = mtk_clk_simple_probe,
+ .driver = {
+ .name = "clk-mt8195-mfg",
+ .of_match_table = of_match_clk_mt8195_mfg,
+ },
+};
+builtin_platform_driver(clk_mt8195_mfg_drv);
diff --git a/drivers/clk/mediatek/clk-mt8195-peri_ao.c b/drivers/clk/mediatek/clk-mt8195-peri_ao.c
new file mode 100644
index 000000000000..907a92b22de8
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8195-peri_ao.c
@@ -0,0 +1,62 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (c) 2021 MediaTek Inc.
+// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+#include <dt-bindings/clock/mt8195-clk.h>
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+static const struct mtk_gate_regs peri_ao_cg_regs = {
+ .set_ofs = 0x10,
+ .clr_ofs = 0x14,
+ .sta_ofs = 0x18,
+};
+
+#define GATE_PERI_AO(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &peri_ao_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+static const struct mtk_gate peri_ao_clks[] = {
+ GATE_PERI_AO(CLK_PERI_AO_ETHERNET, "peri_ao_ethernet", "top_axi", 0),
+ GATE_PERI_AO(CLK_PERI_AO_ETHERNET_BUS, "peri_ao_ethernet_bus", "top_axi", 1),
+ GATE_PERI_AO(CLK_PERI_AO_FLASHIF_BUS, "peri_ao_flashif_bus", "top_axi", 3),
+ GATE_PERI_AO(CLK_PERI_AO_FLASHIF_FLASH, "peri_ao_flashif_flash", "top_spinor", 5),
+ GATE_PERI_AO(CLK_PERI_AO_SSUSB_1P_BUS, "peri_ao_ssusb_1p_bus", "top_usb_top_1p", 7),
+ GATE_PERI_AO(CLK_PERI_AO_SSUSB_1P_XHCI, "peri_ao_ssusb_1p_xhci", "top_ssusb_xhci_1p", 8),
+ GATE_PERI_AO(CLK_PERI_AO_SSUSB_2P_BUS, "peri_ao_ssusb_2p_bus", "top_usb_top_2p", 9),
+ GATE_PERI_AO(CLK_PERI_AO_SSUSB_2P_XHCI, "peri_ao_ssusb_2p_xhci", "top_ssusb_xhci_2p", 10),
+ GATE_PERI_AO(CLK_PERI_AO_SSUSB_3P_BUS, "peri_ao_ssusb_3p_bus", "top_usb_top_3p", 11),
+ GATE_PERI_AO(CLK_PERI_AO_SSUSB_3P_XHCI, "peri_ao_ssusb_3p_xhci", "top_ssusb_xhci_3p", 12),
+ GATE_PERI_AO(CLK_PERI_AO_SPINFI, "peri_ao_spinfi", "top_spinfi_bclk", 15),
+ GATE_PERI_AO(CLK_PERI_AO_ETHERNET_MAC, "peri_ao_ethernet_mac", "top_snps_eth_250m", 16),
+ GATE_PERI_AO(CLK_PERI_AO_NFI_H, "peri_ao_nfi_h", "top_axi", 19),
+ GATE_PERI_AO(CLK_PERI_AO_FNFI1X, "peri_ao_fnfi1x", "top_nfi1x", 20),
+ GATE_PERI_AO(CLK_PERI_AO_PCIE_P0_MEM, "peri_ao_pcie_p0_mem", "mem_466m", 24),
+ GATE_PERI_AO(CLK_PERI_AO_PCIE_P1_MEM, "peri_ao_pcie_p1_mem", "mem_466m", 25),
+};
+
+static const struct mtk_clk_desc peri_ao_desc = {
+ .clks = peri_ao_clks,
+ .num_clks = ARRAY_SIZE(peri_ao_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8195_peri_ao[] = {
+ {
+ .compatible = "mediatek,mt8195-pericfg_ao",
+ .data = &peri_ao_desc,
+ }, {
+ /* sentinel */
+ }
+};
+
+static struct platform_driver clk_mt8195_peri_ao_drv = {
+ .probe = mtk_clk_simple_probe,
+ .driver = {
+ .name = "clk-mt8195-peri_ao",
+ .of_match_table = of_match_clk_mt8195_peri_ao,
+ },
+};
+builtin_platform_driver(clk_mt8195_peri_ao_drv);
diff --git a/drivers/clk/mediatek/clk-mt8195-scp_adsp.c b/drivers/clk/mediatek/clk-mt8195-scp_adsp.c
new file mode 100644
index 000000000000..26b4846c5894
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8195-scp_adsp.c
@@ -0,0 +1,47 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (c) 2021 MediaTek Inc.
+// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+#include <dt-bindings/clock/mt8195-clk.h>
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+static const struct mtk_gate_regs scp_adsp_cg_regs = {
+ .set_ofs = 0x180,
+ .clr_ofs = 0x180,
+ .sta_ofs = 0x180,
+};
+
+#define GATE_SCP_ADSP(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &scp_adsp_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr)
+
+static const struct mtk_gate scp_adsp_clks[] = {
+ GATE_SCP_ADSP(CLK_SCP_ADSP_AUDIODSP, "scp_adsp_audiodsp", "top_adsp", 0),
+};
+
+static const struct mtk_clk_desc scp_adsp_desc = {
+ .clks = scp_adsp_clks,
+ .num_clks = ARRAY_SIZE(scp_adsp_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8195_scp_adsp[] = {
+ {
+ .compatible = "mediatek,mt8195-scp_adsp",
+ .data = &scp_adsp_desc,
+ }, {
+ /* sentinel */
+ }
+};
+
+static struct platform_driver clk_mt8195_scp_adsp_drv = {
+ .probe = mtk_clk_simple_probe,
+ .driver = {
+ .name = "clk-mt8195-scp_adsp",
+ .of_match_table = of_match_clk_mt8195_scp_adsp,
+ },
+};
+builtin_platform_driver(clk_mt8195_scp_adsp_drv);
diff --git a/drivers/clk/mediatek/clk-mt8195-topckgen.c b/drivers/clk/mediatek/clk-mt8195-topckgen.c
new file mode 100644
index 000000000000..3e2aba9c40bb
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8195-topckgen.c
@@ -0,0 +1,1273 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (c) 2021 MediaTek Inc.
+// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+#include "clk-mux.h"
+
+#include <dt-bindings/clock/mt8195-clk.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+static DEFINE_SPINLOCK(mt8195_clk_lock);
+
+static const struct mtk_fixed_clk top_fixed_clks[] = {
+ FIXED_CLK(CLK_TOP_IN_DGI, "in_dgi", NULL, 165000000),
+ FIXED_CLK(CLK_TOP_ULPOSC1, "ulposc1", NULL, 248000000),
+ FIXED_CLK(CLK_TOP_ULPOSC2, "ulposc2", NULL, 326000000),
+ FIXED_CLK(CLK_TOP_MEM_466M, "mem_466m", NULL, 533000000),
+ FIXED_CLK(CLK_TOP_MPHONE_SLAVE_B, "mphone_slave_b", NULL, 49152000),
+ FIXED_CLK(CLK_TOP_PEXTP_PIPE, "pextp_pipe", NULL, 250000000),
+ FIXED_CLK(CLK_TOP_UFS_RX_SYMBOL, "ufs_rx_symbol", NULL, 166000000),
+ FIXED_CLK(CLK_TOP_UFS_TX_SYMBOL, "ufs_tx_symbol", NULL, 166000000),
+ FIXED_CLK(CLK_TOP_SSUSB_U3PHY_P1_P_P0, "ssusb_u3phy_p1_p_p0", NULL, 131000000),
+ FIXED_CLK(CLK_TOP_UFS_RX_SYMBOL1, "ufs_rx_symbol1", NULL, 166000000),
+ FIXED_CLK(CLK_TOP_FPC, "fpc", NULL, 50000000),
+ FIXED_CLK(CLK_TOP_HDMIRX_P, "hdmirx_p", NULL, 594000000),
+};
+
+static const struct mtk_fixed_factor top_divs[] = {
+ FACTOR(CLK_TOP_CLK26M_D2, "clk26m_d2", "clk26m", 1, 2),
+ FACTOR(CLK_TOP_CLK26M_D52, "clk26m_d52", "clk26m", 1, 52),
+ FACTOR(CLK_TOP_IN_DGI_D2, "in_dgi_d2", "in_dgi", 1, 2),
+ FACTOR(CLK_TOP_IN_DGI_D4, "in_dgi_d4", "in_dgi", 1, 4),
+ FACTOR(CLK_TOP_IN_DGI_D6, "in_dgi_d6", "in_dgi", 1, 6),
+ FACTOR(CLK_TOP_IN_DGI_D8, "in_dgi_d8", "in_dgi", 1, 8),
+ FACTOR(CLK_TOP_MAINPLL_D3, "mainpll_d3", "mainpll", 1, 3),
+ FACTOR(CLK_TOP_MAINPLL_D4, "mainpll_d4", "mainpll", 1, 4),
+ FACTOR(CLK_TOP_MAINPLL_D4_D2, "mainpll_d4_d2", "mainpll_d4", 1, 2),
+ FACTOR(CLK_TOP_MAINPLL_D4_D4, "mainpll_d4_d4", "mainpll_d4", 1, 4),
+ FACTOR(CLK_TOP_MAINPLL_D4_D8, "mainpll_d4_d8", "mainpll_d4", 1, 8),
+ FACTOR(CLK_TOP_MAINPLL_D5, "mainpll_d5", "mainpll", 1, 5),
+ FACTOR(CLK_TOP_MAINPLL_D5_D2, "mainpll_d5_d2", "mainpll_d5", 1, 2),
+ FACTOR(CLK_TOP_MAINPLL_D5_D4, "mainpll_d5_d4", "mainpll_d5", 1, 4),
+ FACTOR(CLK_TOP_MAINPLL_D5_D8, "mainpll_d5_d8", "mainpll_d5", 1, 8),
+ FACTOR(CLK_TOP_MAINPLL_D6, "mainpll_d6", "mainpll", 1, 6),
+ FACTOR(CLK_TOP_MAINPLL_D6_D2, "mainpll_d6_d2", "mainpll_d6", 1, 2),
+ FACTOR(CLK_TOP_MAINPLL_D6_D4, "mainpll_d6_d4", "mainpll_d6", 1, 4),
+ FACTOR(CLK_TOP_MAINPLL_D6_D8, "mainpll_d6_d8", "mainpll_d6", 1, 8),
+ FACTOR(CLK_TOP_MAINPLL_D7, "mainpll_d7", "mainpll", 1, 7),
+ FACTOR(CLK_TOP_MAINPLL_D7_D2, "mainpll_d7_d2", "mainpll_d7", 1, 2),
+ FACTOR(CLK_TOP_MAINPLL_D7_D4, "mainpll_d7_d4", "mainpll_d7", 1, 4),
+ FACTOR(CLK_TOP_MAINPLL_D7_D8, "mainpll_d7_d8", "mainpll_d7", 1, 8),
+ FACTOR(CLK_TOP_MAINPLL_D9, "mainpll_d9", "mainpll", 1, 9),
+ FACTOR(CLK_TOP_UNIVPLL_D2, "univpll_d2", "univpll", 1, 2),
+ FACTOR(CLK_TOP_UNIVPLL_D3, "univpll_d3", "univpll", 1, 3),
+ FACTOR(CLK_TOP_UNIVPLL_D4, "univpll_d4", "univpll", 1, 4),
+ FACTOR(CLK_TOP_UNIVPLL_D4_D2, "univpll_d4_d2", "univpll_d4", 1, 2),
+ FACTOR(CLK_TOP_UNIVPLL_D4_D4, "univpll_d4_d4", "univpll_d4", 1, 4),
+ FACTOR(CLK_TOP_UNIVPLL_D4_D8, "univpll_d4_d8", "univpll_d4", 1, 8),
+ FACTOR(CLK_TOP_UNIVPLL_D5, "univpll_d5", "univpll", 1, 5),
+ FACTOR(CLK_TOP_UNIVPLL_D5_D2, "univpll_d5_d2", "univpll_d5", 1, 2),
+ FACTOR(CLK_TOP_UNIVPLL_D5_D4, "univpll_d5_d4", "univpll_d5", 1, 4),
+ FACTOR(CLK_TOP_UNIVPLL_D5_D8, "univpll_d5_d8", "univpll_d5", 1, 8),
+ FACTOR(CLK_TOP_UNIVPLL_D6, "univpll_d6", "univpll", 1, 6),
+ FACTOR(CLK_TOP_UNIVPLL_D6_D2, "univpll_d6_d2", "univpll_d6", 1, 2),
+ FACTOR(CLK_TOP_UNIVPLL_D6_D4, "univpll_d6_d4", "univpll_d6", 1, 4),
+ FACTOR(CLK_TOP_UNIVPLL_D6_D8, "univpll_d6_d8", "univpll_d6", 1, 8),
+ FACTOR(CLK_TOP_UNIVPLL_D6_D16, "univpll_d6_d16", "univpll_d6", 1, 16),
+ FACTOR(CLK_TOP_UNIVPLL_D7, "univpll_d7", "univpll", 1, 7),
+ FACTOR(CLK_TOP_UNIVPLL_192M, "univpll_192m", "univpll", 1, 13),
+ FACTOR(CLK_TOP_UNIVPLL_192M_D4, "univpll_192m_d4", "univpll_192m", 1, 4),
+ FACTOR(CLK_TOP_UNIVPLL_192M_D8, "univpll_192m_d8", "univpll_192m", 1, 8),
+ FACTOR(CLK_TOP_UNIVPLL_192M_D16, "univpll_192m_d16", "univpll_192m", 1, 16),
+ FACTOR(CLK_TOP_UNIVPLL_192M_D32, "univpll_192m_d32", "univpll_192m", 1, 32),
+ FACTOR(CLK_TOP_APLL1_D3, "apll1_d3", "apll1", 1, 3),
+ FACTOR(CLK_TOP_APLL1_D4, "apll1_d4", "apll1", 1, 4),
+ FACTOR(CLK_TOP_APLL2_D3, "apll2_d3", "apll2", 1, 3),
+ FACTOR(CLK_TOP_APLL2_D4, "apll2_d4", "apll2", 1, 4),
+ FACTOR(CLK_TOP_APLL3_D4, "apll3_d4", "apll3", 1, 4),
+ FACTOR(CLK_TOP_APLL4_D4, "apll4_d4", "apll4", 1, 4),
+ FACTOR(CLK_TOP_APLL5_D4, "apll5_d4", "apll5", 1, 4),
+ FACTOR(CLK_TOP_HDMIRX_APLL_D3, "hdmirx_apll_d3", "hdmirx_apll", 1, 3),
+ FACTOR(CLK_TOP_HDMIRX_APLL_D4, "hdmirx_apll_d4", "hdmirx_apll", 1, 4),
+ FACTOR(CLK_TOP_HDMIRX_APLL_D6, "hdmirx_apll_d6", "hdmirx_apll", 1, 6),
+ FACTOR(CLK_TOP_MMPLL_D4, "mmpll_d4", "mmpll", 1, 4),
+ FACTOR(CLK_TOP_MMPLL_D4_D2, "mmpll_d4_d2", "mmpll_d4", 1, 2),
+ FACTOR(CLK_TOP_MMPLL_D4_D4, "mmpll_d4_d4", "mmpll_d4", 1, 4),
+ FACTOR(CLK_TOP_MMPLL_D5, "mmpll_d5", "mmpll", 1, 5),
+ FACTOR(CLK_TOP_MMPLL_D5_D2, "mmpll_d5_d2", "mmpll_d5", 1, 2),
+ FACTOR(CLK_TOP_MMPLL_D5_D4, "mmpll_d5_d4", "mmpll_d5", 1, 4),
+ FACTOR(CLK_TOP_MMPLL_D6, "mmpll_d6", "mmpll", 1, 6),
+ FACTOR(CLK_TOP_MMPLL_D6_D2, "mmpll_d6_d2", "mmpll_d6", 1, 2),
+ FACTOR(CLK_TOP_MMPLL_D7, "mmpll_d7", "mmpll", 1, 7),
+ FACTOR(CLK_TOP_MMPLL_D9, "mmpll_d9", "mmpll", 1, 9),
+ FACTOR(CLK_TOP_TVDPLL1_D2, "tvdpll1_d2", "tvdpll1", 1, 2),
+ FACTOR(CLK_TOP_TVDPLL1_D4, "tvdpll1_d4", "tvdpll1", 1, 4),
+ FACTOR(CLK_TOP_TVDPLL1_D8, "tvdpll1_d8", "tvdpll1", 1, 8),
+ FACTOR(CLK_TOP_TVDPLL1_D16, "tvdpll1_d16", "tvdpll1", 1, 16),
+ FACTOR(CLK_TOP_TVDPLL2_D2, "tvdpll2_d2", "tvdpll2", 1, 2),
+ FACTOR(CLK_TOP_TVDPLL2_D4, "tvdpll2_d4", "tvdpll2", 1, 4),
+ FACTOR(CLK_TOP_TVDPLL2_D8, "tvdpll2_d8", "tvdpll2", 1, 8),
+ FACTOR(CLK_TOP_TVDPLL2_D16, "tvdpll2_d16", "tvdpll2", 1, 16),
+ FACTOR(CLK_TOP_MSDCPLL_D2, "msdcpll_d2", "msdcpll", 1, 2),
+ FACTOR(CLK_TOP_MSDCPLL_D4, "msdcpll_d4", "msdcpll", 1, 4),
+ FACTOR(CLK_TOP_MSDCPLL_D16, "msdcpll_d16", "msdcpll", 1, 16),
+ FACTOR(CLK_TOP_ETHPLL_D2, "ethpll_d2", "ethpll", 1, 2),
+ FACTOR(CLK_TOP_ETHPLL_D8, "ethpll_d8", "ethpll", 1, 8),
+ FACTOR(CLK_TOP_ETHPLL_D10, "ethpll_d10", "ethpll", 1, 10),
+ FACTOR(CLK_TOP_DGIPLL_D2, "dgipll_d2", "dgipll", 1, 2),
+ FACTOR(CLK_TOP_ULPOSC1_D2, "ulposc1_d2", "ulposc1", 1, 2),
+ FACTOR(CLK_TOP_ULPOSC1_D4, "ulposc1_d4", "ulposc1", 1, 4),
+ FACTOR(CLK_TOP_ULPOSC1_D7, "ulposc1_d7", "ulposc1", 1, 7),
+ FACTOR(CLK_TOP_ULPOSC1_D8, "ulposc1_d8", "ulposc1", 1, 8),
+ FACTOR(CLK_TOP_ULPOSC1_D10, "ulposc1_d10", "ulposc1", 1, 10),
+ FACTOR(CLK_TOP_ULPOSC1_D16, "ulposc1_d16", "ulposc1", 1, 16),
+ FACTOR(CLK_TOP_ADSPPLL_D2, "adsppll_d2", "adsppll", 1, 2),
+ FACTOR(CLK_TOP_ADSPPLL_D4, "adsppll_d4", "adsppll", 1, 4),
+ FACTOR(CLK_TOP_ADSPPLL_D8, "adsppll_d8", "adsppll", 1, 8),
+};
+
+static const char * const axi_parents[] = {
+ "clk26m",
+ "mainpll_d4_d4",
+ "mainpll_d7_d2",
+ "mainpll_d4_d2",
+ "mainpll_d5_d2",
+ "mainpll_d6_d2",
+ "ulposc1_d4"
+};
+
+static const char * const spm_parents[] = {
+ "clk26m",
+ "ulposc1_d10",
+ "mainpll_d7_d4",
+ "clk32k"
+};
+
+static const char * const scp_parents[] = {
+ "clk26m",
+ "univpll_d4",
+ "mainpll_d6",
+ "univpll_d6",
+ "univpll_d4_d2",
+ "mainpll_d4_d2",
+ "mainpll_d4",
+ "mainpll_d6_d2"
+};
+
+static const char * const bus_aximem_parents[] = {
+ "clk26m",
+ "mainpll_d7_d2",
+ "mainpll_d4_d2",
+ "mainpll_d5_d2",
+ "mainpll_d6"
+};
+
+static const char * const vpp_parents[] = {
+ "clk26m",
+ "univpll_d6_d2",
+ "mainpll_d5_d2",
+ "mmpll_d6_d2",
+ "univpll_d5_d2",
+ "univpll_d4_d2",
+ "mmpll_d4_d2",
+ "mmpll_d7",
+ "univpll_d6",
+ "mainpll_d4",
+ "mmpll_d5",
+ "tvdpll1",
+ "tvdpll2",
+ "univpll_d4",
+ "mmpll_d4"
+};
+
+static const char * const ethdr_parents[] = {
+ "clk26m",
+ "univpll_d6_d2",
+ "mainpll_d5_d2",
+ "mmpll_d6_d2",
+ "univpll_d5_d2",
+ "univpll_d4_d2",
+ "mmpll_d4_d2",
+ "mmpll_d7",
+ "univpll_d6",
+ "mainpll_d4",
+ "mmpll_d5_d4",
+ "tvdpll1",
+ "tvdpll2",
+ "univpll_d4",
+ "mmpll_d4"
+};
+
+static const char * const ipe_parents[] = {
+ "clk26m",
+ "imgpll",
+ "mainpll_d4",
+ "mmpll_d6",
+ "univpll_d6",
+ "mainpll_d6",
+ "mmpll_d4_d2",
+ "univpll_d4_d2",
+ "mainpll_d4_d2",
+ "mmpll_d6_d2",
+ "univpll_d5_d2"
+};
+
+static const char * const cam_parents[] = {
+ "clk26m",
+ "mainpll_d4",
+ "mmpll_d4",
+ "univpll_d4",
+ "univpll_d5",
+ "univpll_d6",
+ "mmpll_d7",
+ "univpll_d4_d2",
+ "mainpll_d4_d2",
+ "imgpll"
+};
+
+static const char * const ccu_parents[] = {
+ "clk26m",
+ "univpll_d6",
+ "mainpll_d4_d2",
+ "mainpll_d4",
+ "univpll_d5",
+ "mainpll_d6",
+ "mmpll_d6",
+ "mmpll_d7",
+ "univpll_d4_d2",
+ "univpll_d7"
+};
+
+static const char * const img_parents[] = {
+ "clk26m",
+ "imgpll",
+ "univpll_d4",
+ "mainpll_d4",
+ "univpll_d5",
+ "mmpll_d6",
+ "univpll_d6",
+ "mainpll_d6",
+ "mmpll_d4_d2",
+ "univpll_d4_d2",
+ "mainpll_d4_d2",
+ "univpll_d5_d2"
+};
+
+static const char * const camtm_parents[] = {
+ "clk26m",
+ "univpll_d4_d4",
+ "univpll_d6_d2",
+ "univpll_d6_d4"
+};
+
+static const char * const dsp_parents[] = {
+ "clk26m",
+ "univpll_d6_d2",
+ "univpll_d4_d2",
+ "univpll_d5",
+ "univpll_d4",
+ "mmpll_d4",
+ "mainpll_d3",
+ "univpll_d3"
+};
+
+static const char * const dsp1_parents[] = {
+ "clk26m",
+ "univpll_d6_d2",
+ "mainpll_d4_d2",
+ "univpll_d5",
+ "mmpll_d5",
+ "univpll_d4",
+ "mainpll_d3",
+ "univpll_d3"
+};
+
+static const char * const dsp2_parents[] = {
+ "clk26m",
+ "univpll_d6_d2",
+ "univpll_d4_d2",
+ "mainpll_d4",
+ "univpll_d4",
+ "mmpll_d4",
+ "mainpll_d3",
+ "univpll_d3"
+};
+
+static const char * const ipu_if_parents[] = {
+ "clk26m",
+ "univpll_d6_d2",
+ "univpll_d5_d2",
+ "mainpll_d4_d2",
+ "mainpll_d6",
+ "univpll_d5",
+ "univpll_d4",
+ "mmpll_d4"
+};
+
+static const char * const mfg_parents[] = {
+ "clk26m",
+ "mainpll_d5_d2",
+ "univpll_d6",
+ "univpll_d7"
+};
+
+static const char * const camtg_parents[] = {
+ "clk26m",
+ "univpll_192m_d8",
+ "univpll_d6_d8",
+ "univpll_192m_d4",
+ "univpll_d6_d16",
+ "clk26m_d2",
+ "univpll_192m_d16",
+ "univpll_192m_d32"
+};
+
+static const char * const uart_parents[] = {
+ "clk26m",
+ "univpll_d6_d8"
+};
+
+static const char * const spi_parents[] = {
+ "clk26m",
+ "mainpll_d5_d4",
+ "mainpll_d6_d4",
+ "msdcpll_d4",
+ "univpll_d6_d2",
+ "mainpll_d6_d2",
+ "mainpll_d4_d4",
+ "univpll_d5_d4"
+};
+
+static const char * const spis_parents[] = {
+ "clk26m",
+ "univpll_d6",
+ "mainpll_d6",
+ "univpll_d4_d2",
+ "univpll_d6_d2",
+ "univpll_d4_d4",
+ "univpll_d6_d4",
+ "mainpll_d7_d4"
+};
+
+static const char * const msdc50_0_h_parents[] = {
+ "clk26m",
+ "mainpll_d4_d2",
+ "mainpll_d6_d2"
+};
+
+static const char * const msdc50_0_parents[] = {
+ "clk26m",
+ "msdcpll",
+ "msdcpll_d2",
+ "univpll_d4_d4",
+ "mainpll_d6_d2",
+ "univpll_d4_d2"
+};
+
+static const char * const msdc30_parents[] = {
+ "clk26m",
+ "univpll_d6_d2",
+ "mainpll_d6_d2",
+ "mainpll_d7_d2",
+ "msdcpll_d2"
+};
+
+static const char * const intdir_parents[] = {
+ "clk26m",
+ "univpll_d6",
+ "mainpll_d4",
+ "univpll_d4"
+};
+
+static const char * const aud_intbus_parents[] = {
+ "clk26m",
+ "mainpll_d4_d4",
+ "mainpll_d7_d4"
+};
+
+static const char * const audio_h_parents[] = {
+ "clk26m",
+ "univpll_d7",
+ "apll1",
+ "apll2"
+};
+
+static const char * const pwrap_ulposc_parents[] = {
+ "ulposc1_d10",
+ "clk26m",
+ "ulposc1_d4",
+ "ulposc1_d7",
+ "ulposc1_d8",
+ "ulposc1_d16",
+ "mainpll_d4_d8",
+ "univpll_d5_d8"
+};
+
+static const char * const atb_parents[] = {
+ "clk26m",
+ "mainpll_d4_d2",
+ "mainpll_d5_d2"
+};
+
+static const char * const pwrmcu_parents[] = {
+ "clk26m",
+ "mainpll_d7_d2",
+ "mainpll_d6_d2",
+ "mainpll_d5_d2",
+ "mainpll_d9",
+ "mainpll_d4_d2"
+};
+
+static const char * const dp_parents[] = {
+ "clk26m",
+ "tvdpll1_d2",
+ "tvdpll2_d2",
+ "tvdpll1_d4",
+ "tvdpll2_d4",
+ "tvdpll1_d8",
+ "tvdpll2_d8",
+ "tvdpll1_d16",
+ "tvdpll2_d16"
+};
+
+static const char * const disp_pwm_parents[] = {
+ "clk26m",
+ "univpll_d6_d4",
+ "ulposc1_d2",
+ "ulposc1_d4",
+ "ulposc1_d16"
+};
+
+static const char * const usb_parents[] = {
+ "clk26m",
+ "univpll_d5_d4",
+ "univpll_d6_d4",
+ "univpll_d5_d2"
+};
+
+static const char * const i2c_parents[] = {
+ "clk26m",
+ "mainpll_d4_d8",
+ "univpll_d5_d4"
+};
+
+static const char * const seninf_parents[] = {
+ "clk26m",
+ "univpll_d4_d4",
+ "univpll_d6_d2",
+ "univpll_d4_d2",
+ "univpll_d7",
+ "univpll_d6",
+ "mmpll_d6",
+ "univpll_d5"
+};
+
+static const char * const gcpu_parents[] = {
+ "clk26m",
+ "mainpll_d6",
+ "univpll_d4_d2",
+ "mmpll_d5_d2",
+ "univpll_d5_d2"
+};
+
+static const char * const dxcc_parents[] = {
+ "clk26m",
+ "mainpll_d4_d2",
+ "mainpll_d4_d4",
+ "mainpll_d4_d8"
+};
+
+static const char * const dpmaif_parents[] = {
+ "clk26m",
+ "univpll_d4_d4",
+ "mainpll_d6",
+ "mainpll_d4_d2",
+ "univpll_d4_d2"
+};
+
+static const char * const aes_fde_parents[] = {
+ "clk26m",
+ "mainpll_d4_d2",
+ "mainpll_d6",
+ "mainpll_d4_d4",
+ "univpll_d4_d2",
+ "univpll_d6"
+};
+
+static const char * const ufs_parents[] = {
+ "clk26m",
+ "mainpll_d4_d4",
+ "mainpll_d4_d8",
+ "univpll_d4_d4",
+ "mainpll_d6_d2",
+ "univpll_d6_d2",
+ "msdcpll_d2"
+};
+
+static const char * const ufs_tick1us_parents[] = {
+ "clk26m_d52",
+ "clk26m"
+};
+
+static const char * const ufs_mp_sap_parents[] = {
+ "clk26m",
+ "msdcpll_d16"
+};
+
+static const char * const venc_parents[] = {
+ "clk26m",
+ "mmpll_d4_d2",
+ "mainpll_d6",
+ "univpll_d4_d2",
+ "mainpll_d4_d2",
+ "univpll_d6",
+ "mmpll_d6",
+ "mainpll_d5_d2",
+ "mainpll_d6_d2",
+ "mmpll_d9",
+ "univpll_d4_d4",
+ "mainpll_d4",
+ "univpll_d4",
+ "univpll_d5",
+ "univpll_d5_d2",
+ "mainpll_d5"
+};
+
+static const char * const vdec_parents[] = {
+ "clk26m",
+ "mainpll_d5_d2",
+ "mmpll_d6_d2",
+ "univpll_d4_d2",
+ "mmpll_d4_d2",
+ "mainpll_d5",
+ "mmpll_d6",
+ "mmpll_d5",
+ "vdecpll",
+ "univpll_d4",
+ "mmpll_d4",
+ "univpll_d6_d2",
+ "mmpll_d9",
+ "univpll_d6",
+ "univpll_d5",
+ "mainpll_d4"
+};
+
+static const char * const pwm_parents[] = {
+ "clk26m",
+ "univpll_d4_d8"
+};
+
+static const char * const mcupm_parents[] = {
+ "clk26m",
+ "mainpll_d6_d2",
+ "mainpll_d7_d4",
+};
+
+static const char * const spmi_parents[] = {
+ "clk26m",
+ "clk26m_d2",
+ "ulposc1_d8",
+ "ulposc1_d10",
+ "ulposc1_d16",
+ "ulposc1_d7",
+ "clk32k",
+ "mainpll_d7_d8",
+ "mainpll_d6_d8",
+ "mainpll_d5_d8"
+};
+
+static const char * const dvfsrc_parents[] = {
+ "clk26m",
+ "ulposc1_d10",
+ "univpll_d6_d8",
+ "msdcpll_d16"
+};
+
+static const char * const tl_parents[] = {
+ "clk26m",
+ "univpll_d5_d4",
+ "mainpll_d4_d4"
+};
+
+static const char * const dsi_occ_parents[] = {
+ "clk26m",
+ "mainpll_d6_d2",
+ "univpll_d5_d2",
+ "univpll_d4_d2"
+};
+
+static const char * const wpe_vpp_parents[] = {
+ "clk26m",
+ "mainpll_d5_d2",
+ "mmpll_d6_d2",
+ "univpll_d5_d2",
+ "mainpll_d4_d2",
+ "univpll_d4_d2",
+ "mmpll_d4_d2",
+ "mainpll_d6",
+ "mmpll_d7",
+ "univpll_d6",
+ "mainpll_d5",
+ "univpll_d5",
+ "mainpll_d4",
+ "tvdpll1",
+ "univpll_d4"
+};
+
+static const char * const hdcp_parents[] = {
+ "clk26m",
+ "univpll_d4_d8",
+ "mainpll_d5_d8",
+ "univpll_d6_d4"
+};
+
+static const char * const hdcp_24m_parents[] = {
+ "clk26m",
+ "univpll_192m_d4",
+ "univpll_192m_d8",
+ "univpll_d6_d8"
+};
+
+static const char * const hd20_dacr_ref_parents[] = {
+ "clk26m",
+ "univpll_d4_d2",
+ "univpll_d4_d4",
+ "univpll_d4_d8"
+};
+
+static const char * const hd20_hdcp_c_parents[] = {
+ "clk26m",
+ "msdcpll_d4",
+ "univpll_d4_d8",
+ "univpll_d6_d8"
+};
+
+static const char * const hdmi_xtal_parents[] = {
+ "clk26m",
+ "clk26m_d2"
+};
+
+static const char * const hdmi_apb_parents[] = {
+ "clk26m",
+ "univpll_d6_d4",
+ "msdcpll_d2"
+};
+
+static const char * const snps_eth_250m_parents[] = {
+ "clk26m",
+ "ethpll_d2"
+};
+
+static const char * const snps_eth_62p4m_ptp_parents[] = {
+ "apll2_d3",
+ "apll1_d3",
+ "clk26m",
+ "ethpll_d8"
+};
+
+static const char * const snps_eth_50m_rmii_parents[] = {
+ "clk26m",
+ "ethpll_d10"
+};
+
+static const char * const dgi_out_parents[] = {
+ "clk26m",
+ "dgipll",
+ "dgipll_d2",
+ "in_dgi",
+ "in_dgi_d2",
+ "mmpll_d4_d4"
+};
+
+static const char * const nna_parents[] = {
+ "clk26m",
+ "nnapll",
+ "univpll_d4",
+ "mainpll_d4",
+ "univpll_d5",
+ "mmpll_d6",
+ "univpll_d6",
+ "mainpll_d6",
+ "mmpll_d4_d2",
+ "univpll_d4_d2",
+ "mainpll_d4_d2",
+ "mmpll_d6_d2"
+};
+
+static const char * const adsp_parents[] = {
+ "clk26m",
+ "clk26m_d2",
+ "mainpll_d6",
+ "mainpll_d5_d2",
+ "univpll_d4_d4",
+ "univpll_d4",
+ "univpll_d6",
+ "ulposc1",
+ "adsppll",
+ "adsppll_d2",
+ "adsppll_d4",
+ "adsppll_d8"
+};
+
+static const char * const asm_parents[] = {
+ "clk26m",
+ "univpll_d6_d4",
+ "univpll_d6_d2",
+ "mainpll_d5_d2"
+};
+
+static const char * const apll1_parents[] = {
+ "clk26m",
+ "apll1_d4"
+};
+
+static const char * const apll2_parents[] = {
+ "clk26m",
+ "apll2_d4"
+};
+
+static const char * const apll3_parents[] = {
+ "clk26m",
+ "apll3_d4"
+};
+
+static const char * const apll4_parents[] = {
+ "clk26m",
+ "apll4_d4"
+};
+
+static const char * const apll5_parents[] = {
+ "clk26m",
+ "apll5_d4"
+};
+
+static const char * const i2s_parents[] = {
+ "clk26m",
+ "apll1",
+ "apll2",
+ "apll3",
+ "apll4",
+ "apll5",
+ "hdmirx_apll"
+};
+
+static const char * const a1sys_hp_parents[] = {
+ "clk26m",
+ "apll1_d4"
+};
+
+static const char * const a2sys_parents[] = {
+ "clk26m",
+ "apll2_d4"
+};
+
+static const char * const a3sys_parents[] = {
+ "clk26m",
+ "apll3_d4",
+ "apll4_d4",
+ "apll5_d4",
+ "hdmirx_apll_d3",
+ "hdmirx_apll_d4",
+ "hdmirx_apll_d6"
+};
+
+static const char * const spinfi_b_parents[] = {
+ "clk26m",
+ "univpll_d6_d8",
+ "univpll_d5_d8",
+ "mainpll_d4_d8",
+ "mainpll_d7_d4",
+ "mainpll_d6_d4",
+ "univpll_d6_d4",
+ "univpll_d5_d4"
+};
+
+static const char * const nfi1x_parents[] = {
+ "clk26m",
+ "univpll_d5_d4",
+ "mainpll_d7_d4",
+ "mainpll_d6_d4",
+ "univpll_d6_d4",
+ "mainpll_d4_d4",
+ "mainpll_d7_d2",
+ "mainpll_d6_d2"
+};
+
+static const char * const ecc_parents[] = {
+ "clk26m",
+ "mainpll_d4_d4",
+ "mainpll_d5_d2",
+ "mainpll_d4_d2",
+ "mainpll_d6",
+ "univpll_d6"
+};
+
+static const char * const audio_local_bus_parents[] = {
+ "clk26m",
+ "clk26m_d2",
+ "mainpll_d4_d4",
+ "mainpll_d7_d2",
+ "mainpll_d4_d2",
+ "mainpll_d5_d2",
+ "mainpll_d6_d2",
+ "mainpll_d7",
+ "univpll_d6",
+ "ulposc1",
+ "ulposc1_d4",
+ "ulposc1_d2"
+};
+
+static const char * const spinor_parents[] = {
+ "clk26m",
+ "clk26m_d2",
+ "mainpll_d7_d8",
+ "univpll_d6_d8"
+};
+
+static const char * const dvio_dgi_ref_parents[] = {
+ "clk26m",
+ "in_dgi",
+ "in_dgi_d2",
+ "in_dgi_d4",
+ "in_dgi_d6",
+ "in_dgi_d8",
+ "mmpll_d4_d4"
+};
+
+static const char * const ulposc_parents[] = {
+ "ulposc1",
+ "ethpll_d2",
+ "mainpll_d4_d2",
+ "ethpll_d10"
+};
+
+static const char * const ulposc_core_parents[] = {
+ "ulposc2",
+ "univpll_d7",
+ "mainpll_d6",
+ "ethpll_d10"
+};
+
+static const char * const srck_parents[] = {
+ "ulposc1_d10",
+ "clk26m"
+};
+
+static const char * const mfg_fast_parents[] = {
+ "top_mfg_core_tmp",
+ "mfgpll"
+};
+
+static const struct mtk_mux top_mtk_muxes[] = {
+ /*
+ * CLK_CFG_0
+ * top_axi and top_bus_aximem are bus clocks, should not be closed by Linux.
+ * top_spm and top_scp are main clocks in always-on co-processor.
+ */
+ MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_AXI, "top_axi",
+ axi_parents, 0x020, 0x024, 0x028, 0, 3, 7, 0x04, 0, CLK_IS_CRITICAL),
+ MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_SPM, "top_spm",
+ spm_parents, 0x020, 0x024, 0x028, 8, 2, 15, 0x04, 1, CLK_IS_CRITICAL),
+ MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_SCP, "top_scp",
+ scp_parents, 0x020, 0x024, 0x028, 16, 3, 23, 0x04, 2, CLK_IS_CRITICAL),
+ MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_BUS_AXIMEM, "top_bus_aximem",
+ bus_aximem_parents, 0x020, 0x024, 0x028, 24, 3, 31, 0x04, 3, CLK_IS_CRITICAL),
+ /* CLK_CFG_1 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_VPP, "top_vpp",
+ vpp_parents, 0x02C, 0x030, 0x034, 0, 4, 7, 0x04, 4),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_ETHDR, "top_ethdr",
+ ethdr_parents, 0x02C, 0x030, 0x034, 8, 4, 15, 0x04, 5),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_IPE, "top_ipe",
+ ipe_parents, 0x02C, 0x030, 0x034, 16, 4, 23, 0x04, 6),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_CAM, "top_cam",
+ cam_parents, 0x02C, 0x030, 0x034, 24, 4, 31, 0x04, 7),
+ /* CLK_CFG_2 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_CCU, "top_ccu",
+ ccu_parents, 0x038, 0x03C, 0x040, 0, 4, 7, 0x04, 8),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_IMG, "top_img",
+ img_parents, 0x038, 0x03C, 0x040, 8, 4, 15, 0x04, 9),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTM, "top_camtm",
+ camtm_parents, 0x038, 0x03C, 0x040, 16, 2, 23, 0x04, 10),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DSP, "top_dsp",
+ dsp_parents, 0x038, 0x03C, 0x040, 24, 3, 31, 0x04, 11),
+ /* CLK_CFG_3 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DSP1, "top_dsp1",
+ dsp1_parents, 0x044, 0x048, 0x04C, 0, 3, 7, 0x04, 12),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DSP2, "top_dsp2",
+ dsp1_parents, 0x044, 0x048, 0x04C, 8, 3, 15, 0x04, 13),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DSP3, "top_dsp3",
+ dsp1_parents, 0x044, 0x048, 0x04C, 16, 3, 23, 0x04, 14),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DSP4, "top_dsp4",
+ dsp2_parents, 0x044, 0x048, 0x04C, 24, 3, 31, 0x04, 15),
+ /* CLK_CFG_4 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DSP5, "top_dsp5",
+ dsp2_parents, 0x050, 0x054, 0x058, 0, 3, 7, 0x04, 16),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DSP6, "top_dsp6",
+ dsp2_parents, 0x050, 0x054, 0x058, 8, 3, 15, 0x04, 17),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DSP7, "top_dsp7",
+ dsp_parents, 0x050, 0x054, 0x058, 16, 3, 23, 0x04, 18),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_IPU_IF, "top_ipu_if",
+ ipu_if_parents, 0x050, 0x054, 0x058, 24, 3, 31, 0x04, 19),
+ /* CLK_CFG_5 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MFG_CORE_TMP, "top_mfg_core_tmp",
+ mfg_parents, 0x05C, 0x060, 0x064, 0, 2, 7, 0x04, 20),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTG, "top_camtg",
+ camtg_parents, 0x05C, 0x060, 0x064, 8, 3, 15, 0x04, 21),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTG2, "top_camtg2",
+ camtg_parents, 0x05C, 0x060, 0x064, 16, 3, 23, 0x04, 22),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTG3, "top_camtg3",
+ camtg_parents, 0x05C, 0x060, 0x064, 24, 3, 31, 0x04, 23),
+ /* CLK_CFG_6 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTG4, "top_camtg4",
+ camtg_parents, 0x068, 0x06C, 0x070, 0, 3, 7, 0x04, 24),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_CAMTG5, "top_camtg5",
+ camtg_parents, 0x068, 0x06C, 0x070, 8, 3, 15, 0x04, 25),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_UART, "top_uart",
+ uart_parents, 0x068, 0x06C, 0x070, 16, 1, 23, 0x04, 26),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SPI, "top_spi",
+ spi_parents, 0x068, 0x06C, 0x070, 24, 3, 31, 0x04, 27),
+ /* CLK_CFG_7 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SPIS, "top_spis",
+ spis_parents, 0x074, 0x078, 0x07C, 0, 3, 7, 0x04, 28),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MSDC50_0_HCLK, "top_msdc50_0_hclk",
+ msdc50_0_h_parents, 0x074, 0x078, 0x07C, 8, 2, 15, 0x04, 29),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MSDC50_0, "top_msdc50_0",
+ msdc50_0_parents, 0x074, 0x078, 0x07C, 16, 3, 23, 0x04, 30),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MSDC30_1, "top_msdc30_1",
+ msdc30_parents, 0x074, 0x078, 0x07C, 24, 3, 31, 0x04, 31),
+ /* CLK_CFG_8 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_MSDC30_2, "top_msdc30_2",
+ msdc30_parents, 0x080, 0x084, 0x088, 0, 3, 7, 0x08, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_INTDIR, "top_intdir",
+ intdir_parents, 0x080, 0x084, 0x088, 8, 2, 15, 0x08, 1),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AUD_INTBUS, "top_aud_intbus",
+ aud_intbus_parents, 0x080, 0x084, 0x088, 16, 2, 23, 0x08, 2),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AUDIO_H, "top_audio_h",
+ audio_h_parents, 0x080, 0x084, 0x088, 24, 2, 31, 0x08, 3),
+ /*
+ * CLK_CFG_9
+ * top_pwrmcu is main clock in other co-processor, should not be
+ * handled by Linux.
+ */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_PWRAP_ULPOSC, "top_pwrap_ulposc",
+ pwrap_ulposc_parents, 0x08C, 0x090, 0x094, 0, 3, 7, 0x08, 4),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_ATB, "top_atb",
+ atb_parents, 0x08C, 0x090, 0x094, 8, 2, 15, 0x08, 5),
+ MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_PWRMCU, "top_pwrmcu",
+ pwrmcu_parents, 0x08C, 0x090, 0x094, 16, 3, 23, 0x08, 6, CLK_IS_CRITICAL),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DP, "top_dp",
+ dp_parents, 0x08C, 0x090, 0x094, 24, 4, 31, 0x08, 7),
+ /* CLK_CFG_10 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_EDP, "top_edp",
+ dp_parents, 0x098, 0x09C, 0x0A0, 0, 4, 7, 0x08, 8),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DPI, "top_dpi",
+ dp_parents, 0x098, 0x09C, 0x0A0, 8, 4, 15, 0x08, 9),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DISP_PWM0, "top_disp_pwm0",
+ disp_pwm_parents, 0x098, 0x09C, 0x0A0, 16, 3, 23, 0x08, 10),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DISP_PWM1, "top_disp_pwm1",
+ disp_pwm_parents, 0x098, 0x09C, 0x0A0, 24, 3, 31, 0x08, 11),
+ /* CLK_CFG_11 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_USB_TOP, "top_usb_top",
+ usb_parents, 0x0A4, 0x0A8, 0x0AC, 0, 2, 7, 0x08, 12),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SSUSB_XHCI, "top_ssusb_xhci",
+ usb_parents, 0x0A4, 0x0A8, 0x0AC, 8, 2, 15, 0x08, 13),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_USB_TOP_1P, "top_usb_top_1p",
+ usb_parents, 0x0A4, 0x0A8, 0x0AC, 16, 2, 23, 0x08, 14),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SSUSB_XHCI_1P, "top_ssusb_xhci_1p",
+ usb_parents, 0x0A4, 0x0A8, 0x0AC, 24, 2, 31, 0x08, 15),
+ /* CLK_CFG_12 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_USB_TOP_2P, "top_usb_top_2p",
+ usb_parents, 0x0B0, 0x0B4, 0x0B8, 0, 2, 7, 0x08, 16),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SSUSB_XHCI_2P, "top_ssusb_xhci_2p",
+ usb_parents, 0x0B0, 0x0B4, 0x0B8, 8, 2, 15, 0x08, 17),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_USB_TOP_3P, "top_usb_top_3p",
+ usb_parents, 0x0B0, 0x0B4, 0x0B8, 16, 2, 23, 0x08, 18),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SSUSB_XHCI_3P, "top_ssusb_xhci_3p",
+ usb_parents, 0x0B0, 0x0B4, 0x0B8, 24, 2, 31, 0x08, 19),
+ /* CLK_CFG_13 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_I2C, "top_i2c",
+ i2c_parents, 0x0BC, 0x0C0, 0x0C4, 0, 2, 7, 0x08, 20),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SENINF, "top_seninf",
+ seninf_parents, 0x0BC, 0x0C0, 0x0C4, 8, 3, 15, 0x08, 21),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SENINF1, "top_seninf1",
+ seninf_parents, 0x0BC, 0x0C0, 0x0C4, 16, 3, 23, 0x08, 22),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SENINF2, "top_seninf2",
+ seninf_parents, 0x0BC, 0x0C0, 0x0C4, 24, 3, 31, 0x08, 23),
+ /* CLK_CFG_14 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SENINF3, "top_seninf3",
+ seninf_parents, 0x0C8, 0x0CC, 0x0D0, 0, 3, 7, 0x08, 24),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_GCPU, "top_gcpu",
+ gcpu_parents, 0x0C8, 0x0CC, 0x0D0, 8, 3, 15, 0x08, 25),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DXCC, "top_dxcc",
+ dxcc_parents, 0x0C8, 0x0CC, 0x0D0, 16, 2, 23, 0x08, 26),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DPMAIF_MAIN, "top_dpmaif_main",
+ dpmaif_parents, 0x0C8, 0x0CC, 0x0D0, 24, 3, 31, 0x08, 27),
+ /* CLK_CFG_15 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AES_UFSFDE, "top_aes_ufsfde",
+ aes_fde_parents, 0x0D4, 0x0D8, 0x0DC, 0, 3, 7, 0x08, 28),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_UFS, "top_ufs",
+ ufs_parents, 0x0D4, 0x0D8, 0x0DC, 8, 3, 15, 0x08, 29),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_UFS_TICK1US, "top_ufs_tick1us",
+ ufs_tick1us_parents, 0x0D4, 0x0D8, 0x0DC, 16, 1, 23, 0x08, 30),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_UFS_MP_SAP_CFG, "top_ufs_mp_sap_cfg",
+ ufs_mp_sap_parents, 0x0D4, 0x0D8, 0x0DC, 24, 1, 31, 0x08, 31),
+ /*
+ * CLK_CFG_16
+ * top_mcupm is main clock in other co-processor, should not be
+ * handled by Linux.
+ */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_VENC, "top_venc",
+ venc_parents, 0x0E0, 0x0E4, 0x0E8, 0, 4, 7, 0x0C, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_VDEC, "top_vdec",
+ vdec_parents, 0x0E0, 0x0E4, 0x0E8, 8, 4, 15, 0x0C, 1),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_PWM, "top_pwm",
+ pwm_parents, 0x0E0, 0x0E4, 0x0E8, 16, 1, 23, 0x0C, 2),
+ MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_MCUPM, "top_mcupm",
+ mcupm_parents, 0x0E0, 0x0E4, 0x0E8, 24, 2, 31, 0x0C, 3, CLK_IS_CRITICAL),
+ /*
+ * CLK_CFG_17
+ * top_dvfsrc is for internal DVFS usage, should not be handled by Linux.
+ */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SPMI_P_MST, "top_spmi_p_mst",
+ spmi_parents, 0x0EC, 0x0F0, 0x0F4, 0, 4, 7, 0x0C, 4),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SPMI_M_MST, "top_spmi_m_mst",
+ spmi_parents, 0x0EC, 0x0F0, 0x0F4, 8, 4, 15, 0x0C, 5),
+ MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_DVFSRC, "top_dvfsrc",
+ dvfsrc_parents, 0x0EC, 0x0F0, 0x0F4, 16, 2, 23, 0x0C, 6, CLK_IS_CRITICAL),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_TL, "top_tl",
+ tl_parents, 0x0EC, 0x0F0, 0x0F4, 24, 2, 31, 0x0C, 7),
+ /* CLK_CFG_18 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_TL_P1, "top_tl_p1",
+ tl_parents, 0x0F8, 0x0FC, 0x0100, 0, 2, 7, 0x0C, 8),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AES_MSDCFDE, "top_aes_msdcfde",
+ aes_fde_parents, 0x0F8, 0x0FC, 0x0100, 8, 3, 15, 0x0C, 9),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DSI_OCC, "top_dsi_occ",
+ dsi_occ_parents, 0x0F8, 0x0FC, 0x0100, 16, 2, 23, 0x0C, 10),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_WPE_VPP, "top_wpe_vpp",
+ wpe_vpp_parents, 0x0F8, 0x0FC, 0x0100, 24, 4, 31, 0x0C, 11),
+ /* CLK_CFG_19 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_HDCP, "top_hdcp",
+ hdcp_parents, 0x0104, 0x0108, 0x010C, 0, 2, 7, 0x0C, 12),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_HDCP_24M, "top_hdcp_24m",
+ hdcp_24m_parents, 0x0104, 0x0108, 0x010C, 8, 2, 15, 0x0C, 13),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_HD20_DACR_REF_CLK, "top_hd20_dacr_ref_clk",
+ hd20_dacr_ref_parents, 0x0104, 0x0108, 0x010C, 16, 2, 23, 0x0C, 14),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_HD20_HDCP_CCLK, "top_hd20_hdcp_cclk",
+ hd20_hdcp_c_parents, 0x0104, 0x0108, 0x010C, 24, 2, 31, 0x0C, 15),
+ /* CLK_CFG_20 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_HDMI_XTAL, "top_hdmi_xtal",
+ hdmi_xtal_parents, 0x0110, 0x0114, 0x0118, 0, 1, 7, 0x0C, 16),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_HDMI_APB, "top_hdmi_apb",
+ hdmi_apb_parents, 0x0110, 0x0114, 0x0118, 8, 2, 15, 0x0C, 17),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SNPS_ETH_250M, "top_snps_eth_250m",
+ snps_eth_250m_parents, 0x0110, 0x0114, 0x0118, 16, 1, 23, 0x0C, 18),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SNPS_ETH_62P4M_PTP, "top_snps_eth_62p4m_ptp",
+ snps_eth_62p4m_ptp_parents, 0x0110, 0x0114, 0x0118, 24, 2, 31, 0x0C, 19),
+ /* CLK_CFG_21 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SNPS_ETH_50M_RMII, "snps_eth_50m_rmii",
+ snps_eth_50m_rmii_parents, 0x011C, 0x0120, 0x0124, 0, 1, 7, 0x0C, 20),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DGI_OUT, "top_dgi_out",
+ dgi_out_parents, 0x011C, 0x0120, 0x0124, 8, 3, 15, 0x0C, 21),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_NNA0, "top_nna0",
+ nna_parents, 0x011C, 0x0120, 0x0124, 16, 4, 23, 0x0C, 22),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_NNA1, "top_nna1",
+ nna_parents, 0x011C, 0x0120, 0x0124, 24, 4, 31, 0x0C, 23),
+ /* CLK_CFG_22 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_ADSP, "top_adsp",
+ adsp_parents, 0x0128, 0x012C, 0x0130, 0, 4, 7, 0x0C, 24),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_ASM_H, "top_asm_h",
+ asm_parents, 0x0128, 0x012C, 0x0130, 8, 2, 15, 0x0C, 25),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_ASM_M, "top_asm_m",
+ asm_parents, 0x0128, 0x012C, 0x0130, 16, 2, 23, 0x0C, 26),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_ASM_L, "top_asm_l",
+ asm_parents, 0x0128, 0x012C, 0x0130, 24, 2, 31, 0x0C, 27),
+ /* CLK_CFG_23 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_APLL1, "top_apll1",
+ apll1_parents, 0x0134, 0x0138, 0x013C, 0, 1, 7, 0x0C, 28),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_APLL2, "top_apll2",
+ apll2_parents, 0x0134, 0x0138, 0x013C, 8, 1, 15, 0x0C, 29),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_APLL3, "top_apll3",
+ apll3_parents, 0x0134, 0x0138, 0x013C, 16, 1, 23, 0x0C, 30),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_APLL4, "top_apll4",
+ apll4_parents, 0x0134, 0x0138, 0x013C, 24, 1, 31, 0x0C, 31),
+ /*
+ * CLK_CFG_24
+ * i2so4_mck is not used in MT8195.
+ */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_APLL5, "top_apll5",
+ apll5_parents, 0x0140, 0x0144, 0x0148, 0, 1, 7, 0x010, 0),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_I2SO1_MCK, "top_i2so1_mck",
+ i2s_parents, 0x0140, 0x0144, 0x0148, 8, 3, 15, 0x010, 1),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_I2SO2_MCK, "top_i2so2_mck",
+ i2s_parents, 0x0140, 0x0144, 0x0148, 16, 3, 23, 0x010, 2),
+ /*
+ * CLK_CFG_25
+ * i2so5_mck and i2si4_mck are not used in MT8195.
+ */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_I2SI1_MCK, "top_i2si1_mck",
+ i2s_parents, 0x014C, 0x0150, 0x0154, 8, 3, 15, 0x010, 5),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_I2SI2_MCK, "top_i2si2_mck",
+ i2s_parents, 0x014C, 0x0150, 0x0154, 16, 3, 23, 0x010, 6),
+ /*
+ * CLK_CFG_26
+ * i2si5_mck is not used in MT8195.
+ */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DPTX_MCK, "top_dptx_mck",
+ i2s_parents, 0x0158, 0x015C, 0x0160, 8, 3, 15, 0x010, 9),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AUD_IEC_CLK, "top_aud_iec_clk",
+ i2s_parents, 0x0158, 0x015C, 0x0160, 16, 3, 23, 0x010, 10),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_A1SYS_HP, "top_a1sys_hp",
+ a1sys_hp_parents, 0x0158, 0x015C, 0x0160, 24, 1, 31, 0x010, 11),
+ /* CLK_CFG_27 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_A2SYS_HF, "top_a2sys_hf",
+ a2sys_parents, 0x0164, 0x0168, 0x016C, 0, 1, 7, 0x010, 12),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_A3SYS_HF, "top_a3sys_hf",
+ a3sys_parents, 0x0164, 0x0168, 0x016C, 8, 3, 15, 0x010, 13),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_A4SYS_HF, "top_a4sys_hf",
+ a3sys_parents, 0x0164, 0x0168, 0x016C, 16, 3, 23, 0x010, 14),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SPINFI_BCLK, "top_spinfi_bclk",
+ spinfi_b_parents, 0x0164, 0x0168, 0x016C, 24, 3, 31, 0x010, 15),
+ /* CLK_CFG_28 */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_NFI1X, "top_nfi1x",
+ nfi1x_parents, 0x0170, 0x0174, 0x0178, 0, 3, 7, 0x010, 16),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_ECC, "top_ecc",
+ ecc_parents, 0x0170, 0x0174, 0x0178, 8, 3, 15, 0x010, 17),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_AUDIO_LOCAL_BUS, "top_audio_local_bus",
+ audio_local_bus_parents, 0x0170, 0x0174, 0x0178, 16, 4, 23, 0x010, 18),
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_SPINOR, "top_spinor",
+ spinor_parents, 0x0170, 0x0174, 0x0178, 24, 2, 31, 0x010, 19),
+ /*
+ * CLK_CFG_29
+ * top_ulposc/top_ulposc_core/top_srck are clock source of always on co-processor,
+ * should not be closed by Linux.
+ */
+ MUX_GATE_CLR_SET_UPD(CLK_TOP_DVIO_DGI_REF, "top_dvio_dgi_ref",
+ dvio_dgi_ref_parents, 0x017C, 0x0180, 0x0184, 0, 3, 7, 0x010, 20),
+ MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_ULPOSC, "top_ulposc",
+ ulposc_parents, 0x017C, 0x0180, 0x0184, 8, 2, 15, 0x010, 21, CLK_IS_CRITICAL),
+ MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_ULPOSC_CORE, "top_ulposc_core",
+ ulposc_core_parents, 0x017C, 0x0180, 0x0184, 16, 2, 23, 0x010, 22, CLK_IS_CRITICAL),
+ MUX_GATE_CLR_SET_UPD_FLAGS(CLK_TOP_SRCK, "top_srck",
+ srck_parents, 0x017C, 0x0180, 0x0184, 24, 1, 31, 0x010, 23, CLK_IS_CRITICAL),
+ /*
+ * the clocks in CLK_CFG_30 ~ 37 are backup clock source, no need to handled
+ * by Linux.
+ */
+};
+
+static struct mtk_composite top_muxes[] = {
+ /* CLK_MISC_CFG_3 */
+ MUX(CLK_TOP_MFG_CK_FAST_REF, "mfg_ck_fast_ref", mfg_fast_parents, 0x0250, 8, 1),
+};
+
+static const struct mtk_composite top_adj_divs[] = {
+ DIV_GATE(CLK_TOP_APLL12_DIV0, "apll12_div0", "top_i2si1_mck", 0x0320, 0, 0x0328, 8, 0),
+ DIV_GATE(CLK_TOP_APLL12_DIV1, "apll12_div1", "top_i2si2_mck", 0x0320, 1, 0x0328, 8, 8),
+ DIV_GATE(CLK_TOP_APLL12_DIV2, "apll12_div2", "top_i2so1_mck", 0x0320, 2, 0x0328, 8, 16),
+ DIV_GATE(CLK_TOP_APLL12_DIV3, "apll12_div3", "top_i2so2_mck", 0x0320, 3, 0x0328, 8, 24),
+ DIV_GATE(CLK_TOP_APLL12_DIV4, "apll12_div4", "top_aud_iec_clk", 0x0320, 4, 0x0334, 8, 0),
+ /* apll12_div5 ~ 8 are not used in MT8195. */
+ DIV_GATE(CLK_TOP_APLL12_DIV9, "apll12_div9", "top_dptx_mck", 0x0320, 9, 0x0338, 8, 8),
+};
+
+static const struct mtk_gate_regs top0_cg_regs = {
+ .set_ofs = 0x238,
+ .clr_ofs = 0x238,
+ .sta_ofs = 0x238,
+};
+
+static const struct mtk_gate_regs top1_cg_regs = {
+ .set_ofs = 0x250,
+ .clr_ofs = 0x250,
+ .sta_ofs = 0x250,
+};
+
+#define GATE_TOP0_FLAGS(_id, _name, _parent, _shift, _flag) \
+ GATE_MTK_FLAGS(_id, _name, _parent, &top0_cg_regs, _shift, \
+ &mtk_clk_gate_ops_no_setclr_inv, _flag)
+
+#define GATE_TOP0(_id, _name, _parent, _shift) \
+ GATE_TOP0_FLAGS(_id, _name, _parent, _shift, 0)
+
+#define GATE_TOP1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &top1_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
+
+static const struct mtk_gate top_clks[] = {
+ /* TOP0 */
+ GATE_TOP0(CLK_TOP_CFG_VPP0, "cfg_vpp0", "top_vpp", 0),
+ GATE_TOP0(CLK_TOP_CFG_VPP1, "cfg_vpp1", "top_vpp", 1),
+ GATE_TOP0(CLK_TOP_CFG_VDO0, "cfg_vdo0", "top_vpp", 2),
+ GATE_TOP0(CLK_TOP_CFG_VDO1, "cfg_vdo1", "top_vpp", 3),
+ GATE_TOP0(CLK_TOP_CFG_UNIPLL_SES, "cfg_unipll_ses", "univpll_d2", 4),
+ GATE_TOP0(CLK_TOP_CFG_26M_VPP0, "cfg_26m_vpp0", "clk26m", 5),
+ GATE_TOP0(CLK_TOP_CFG_26M_VPP1, "cfg_26m_vpp1", "clk26m", 6),
+ GATE_TOP0(CLK_TOP_CFG_26M_AUD, "cfg_26m_aud", "clk26m", 9),
+ /*
+ * cfg_axi_east, cfg_axi_east_north, cfg_axi_north and cfg_axi_south
+ * are peripheral bus clock branches.
+ */
+ GATE_TOP0_FLAGS(CLK_TOP_CFG_AXI_EAST, "cfg_axi_east", "top_axi", 10, CLK_IS_CRITICAL),
+ GATE_TOP0_FLAGS(CLK_TOP_CFG_AXI_EAST_NORTH, "cfg_axi_east_north", "top_axi", 11,
+ CLK_IS_CRITICAL),
+ GATE_TOP0_FLAGS(CLK_TOP_CFG_AXI_NORTH, "cfg_axi_north", "top_axi", 12, CLK_IS_CRITICAL),
+ GATE_TOP0_FLAGS(CLK_TOP_CFG_AXI_SOUTH, "cfg_axi_south", "top_axi", 13, CLK_IS_CRITICAL),
+ GATE_TOP0(CLK_TOP_CFG_EXT_TEST, "cfg_ext_test", "msdcpll_d2", 15),
+ /* TOP1 */
+ GATE_TOP1(CLK_TOP_SSUSB_REF, "ssusb_ref", "clk26m", 0),
+ GATE_TOP1(CLK_TOP_SSUSB_PHY_REF, "ssusb_phy_ref", "clk26m", 1),
+ GATE_TOP1(CLK_TOP_SSUSB_P1_REF, "ssusb_p1_ref", "clk26m", 2),
+ GATE_TOP1(CLK_TOP_SSUSB_PHY_P1_REF, "ssusb_phy_p1_ref", "clk26m", 3),
+ GATE_TOP1(CLK_TOP_SSUSB_P2_REF, "ssusb_p2_ref", "clk26m", 4),
+ GATE_TOP1(CLK_TOP_SSUSB_PHY_P2_REF, "ssusb_phy_p2_ref", "clk26m", 5),
+ GATE_TOP1(CLK_TOP_SSUSB_P3_REF, "ssusb_p3_ref", "clk26m", 6),
+ GATE_TOP1(CLK_TOP_SSUSB_PHY_P3_REF, "ssusb_phy_p3_ref", "clk26m", 7),
+};
+
+static const struct of_device_id of_match_clk_mt8195_topck[] = {
+ { .compatible = "mediatek,mt8195-topckgen", },
+ {}
+};
+
+static int clk_mt8195_topck_probe(struct platform_device *pdev)
+{
+ struct clk_onecell_data *top_clk_data;
+ struct device_node *node = pdev->dev.of_node;
+ int r;
+ void __iomem *base;
+
+ top_clk_data = mtk_alloc_clk_data(CLK_TOP_NR_CLK);
+ if (!top_clk_data)
+ return -ENOMEM;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base)) {
+ r = PTR_ERR(base);
+ goto free_top_data;
+ }
+
+ mtk_clk_register_fixed_clks(top_fixed_clks, ARRAY_SIZE(top_fixed_clks),
+ top_clk_data);
+ mtk_clk_register_factors(top_divs, ARRAY_SIZE(top_divs), top_clk_data);
+ mtk_clk_register_muxes(top_mtk_muxes, ARRAY_SIZE(top_mtk_muxes), node,
+ &mt8195_clk_lock, top_clk_data);
+ mtk_clk_register_composites(top_muxes, ARRAY_SIZE(top_muxes), base,
+ &mt8195_clk_lock, top_clk_data);
+ mtk_clk_register_composites(top_adj_divs, ARRAY_SIZE(top_adj_divs), base,
+ &mt8195_clk_lock, top_clk_data);
+ r = mtk_clk_register_gates(node, top_clks, ARRAY_SIZE(top_clks), top_clk_data);
+ if (r)
+ goto free_top_data;
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, top_clk_data);
+ if (r)
+ goto free_top_data;
+
+ return r;
+
+free_top_data:
+ mtk_free_clk_data(top_clk_data);
+ return r;
+}
+
+static struct platform_driver clk_mt8195_topck_drv = {
+ .probe = clk_mt8195_topck_probe,
+ .driver = {
+ .name = "clk-mt8195-topck",
+ .of_match_table = of_match_clk_mt8195_topck,
+ },
+};
+builtin_platform_driver(clk_mt8195_topck_drv);
diff --git a/drivers/clk/mediatek/clk-mt8195-vdec.c b/drivers/clk/mediatek/clk-mt8195-vdec.c
new file mode 100644
index 000000000000..a1df04f42a90
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8195-vdec.c
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (c) 2021 MediaTek Inc.
+// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+#include <dt-bindings/clock/mt8195-clk.h>
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+static const struct mtk_gate_regs vdec0_cg_regs = {
+ .set_ofs = 0x0,
+ .clr_ofs = 0x4,
+ .sta_ofs = 0x0,
+};
+
+static const struct mtk_gate_regs vdec1_cg_regs = {
+ .set_ofs = 0x200,
+ .clr_ofs = 0x204,
+ .sta_ofs = 0x200,
+};
+
+static const struct mtk_gate_regs vdec2_cg_regs = {
+ .set_ofs = 0x8,
+ .clr_ofs = 0xc,
+ .sta_ofs = 0x8,
+};
+
+#define GATE_VDEC0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vdec0_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
+
+#define GATE_VDEC1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vdec1_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
+
+#define GATE_VDEC2(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vdec2_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
+
+static const struct mtk_gate vdec_clks[] = {
+ /* VDEC0 */
+ GATE_VDEC0(CLK_VDEC_VDEC, "vdec_vdec", "top_vdec", 0),
+ /* VDEC1 */
+ GATE_VDEC1(CLK_VDEC_LAT, "vdec_lat", "top_vdec", 0),
+ /* VDEC2 */
+ GATE_VDEC2(CLK_VDEC_LARB1, "vdec_larb1", "top_vdec", 0),
+};
+
+static const struct mtk_gate vdec_core1_clks[] = {
+ /* VDEC0 */
+ GATE_VDEC0(CLK_VDEC_CORE1_VDEC, "vdec_core1_vdec", "top_vdec", 0),
+ /* VDEC1 */
+ GATE_VDEC1(CLK_VDEC_CORE1_LAT, "vdec_core1_lat", "top_vdec", 0),
+ /* VDEC2 */
+ GATE_VDEC2(CLK_VDEC_CORE1_LARB1, "vdec_core1_larb1", "top_vdec", 0),
+};
+
+static const struct mtk_gate vdec_soc_clks[] = {
+ /* VDEC0 */
+ GATE_VDEC0(CLK_VDEC_SOC_VDEC, "vdec_soc_vdec", "top_vdec", 0),
+ /* VDEC1 */
+ GATE_VDEC1(CLK_VDEC_SOC_LAT, "vdec_soc_lat", "top_vdec", 0),
+ /* VDEC2 */
+ GATE_VDEC2(CLK_VDEC_SOC_LARB1, "vdec_soc_larb1", "top_vdec", 0),
+};
+
+static const struct mtk_clk_desc vdec_desc = {
+ .clks = vdec_clks,
+ .num_clks = ARRAY_SIZE(vdec_clks),
+};
+
+static const struct mtk_clk_desc vdec_core1_desc = {
+ .clks = vdec_core1_clks,
+ .num_clks = ARRAY_SIZE(vdec_core1_clks),
+};
+
+static const struct mtk_clk_desc vdec_soc_desc = {
+ .clks = vdec_soc_clks,
+ .num_clks = ARRAY_SIZE(vdec_soc_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8195_vdec[] = {
+ {
+ .compatible = "mediatek,mt8195-vdecsys",
+ .data = &vdec_desc,
+ }, {
+ .compatible = "mediatek,mt8195-vdecsys_core1",
+ .data = &vdec_core1_desc,
+ }, {
+ .compatible = "mediatek,mt8195-vdecsys_soc",
+ .data = &vdec_soc_desc,
+ }, {
+ /* sentinel */
+ }
+};
+
+static struct platform_driver clk_mt8195_vdec_drv = {
+ .probe = mtk_clk_simple_probe,
+ .driver = {
+ .name = "clk-mt8195-vdec",
+ .of_match_table = of_match_clk_mt8195_vdec,
+ },
+};
+builtin_platform_driver(clk_mt8195_vdec_drv);
diff --git a/drivers/clk/mediatek/clk-mt8195-vdo0.c b/drivers/clk/mediatek/clk-mt8195-vdo0.c
new file mode 100644
index 000000000000..f7ff7618c714
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8195-vdo0.c
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (c) 2021 MediaTek Inc.
+// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+#include <dt-bindings/clock/mt8195-clk.h>
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+static const struct mtk_gate_regs vdo0_0_cg_regs = {
+ .set_ofs = 0x104,
+ .clr_ofs = 0x108,
+ .sta_ofs = 0x100,
+};
+
+static const struct mtk_gate_regs vdo0_1_cg_regs = {
+ .set_ofs = 0x114,
+ .clr_ofs = 0x118,
+ .sta_ofs = 0x110,
+};
+
+static const struct mtk_gate_regs vdo0_2_cg_regs = {
+ .set_ofs = 0x124,
+ .clr_ofs = 0x128,
+ .sta_ofs = 0x120,
+};
+
+#define GATE_VDO0_0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vdo0_0_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+#define GATE_VDO0_1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vdo0_1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+#define GATE_VDO0_2(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vdo0_2_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+static const struct mtk_gate vdo0_clks[] = {
+ /* VDO0_0 */
+ GATE_VDO0_0(CLK_VDO0_DISP_OVL0, "vdo0_disp_ovl0", "top_vpp", 0),
+ GATE_VDO0_0(CLK_VDO0_DISP_COLOR0, "vdo0_disp_color0", "top_vpp", 2),
+ GATE_VDO0_0(CLK_VDO0_DISP_COLOR1, "vdo0_disp_color1", "top_vpp", 3),
+ GATE_VDO0_0(CLK_VDO0_DISP_CCORR0, "vdo0_disp_ccorr0", "top_vpp", 4),
+ GATE_VDO0_0(CLK_VDO0_DISP_CCORR1, "vdo0_disp_ccorr1", "top_vpp", 5),
+ GATE_VDO0_0(CLK_VDO0_DISP_AAL0, "vdo0_disp_aal0", "top_vpp", 6),
+ GATE_VDO0_0(CLK_VDO0_DISP_AAL1, "vdo0_disp_aal1", "top_vpp", 7),
+ GATE_VDO0_0(CLK_VDO0_DISP_GAMMA0, "vdo0_disp_gamma0", "top_vpp", 8),
+ GATE_VDO0_0(CLK_VDO0_DISP_GAMMA1, "vdo0_disp_gamma1", "top_vpp", 9),
+ GATE_VDO0_0(CLK_VDO0_DISP_DITHER0, "vdo0_disp_dither0", "top_vpp", 10),
+ GATE_VDO0_0(CLK_VDO0_DISP_DITHER1, "vdo0_disp_dither1", "top_vpp", 11),
+ GATE_VDO0_0(CLK_VDO0_DISP_OVL1, "vdo0_disp_ovl1", "top_vpp", 16),
+ GATE_VDO0_0(CLK_VDO0_DISP_WDMA0, "vdo0_disp_wdma0", "top_vpp", 17),
+ GATE_VDO0_0(CLK_VDO0_DISP_WDMA1, "vdo0_disp_wdma1", "top_vpp", 18),
+ GATE_VDO0_0(CLK_VDO0_DISP_RDMA0, "vdo0_disp_rdma0", "top_vpp", 19),
+ GATE_VDO0_0(CLK_VDO0_DISP_RDMA1, "vdo0_disp_rdma1", "top_vpp", 20),
+ GATE_VDO0_0(CLK_VDO0_DSI0, "vdo0_dsi0", "top_vpp", 21),
+ GATE_VDO0_0(CLK_VDO0_DSI1, "vdo0_dsi1", "top_vpp", 22),
+ GATE_VDO0_0(CLK_VDO0_DSC_WRAP0, "vdo0_dsc_wrap0", "top_vpp", 23),
+ GATE_VDO0_0(CLK_VDO0_VPP_MERGE0, "vdo0_vpp_merge0", "top_vpp", 24),
+ GATE_VDO0_0(CLK_VDO0_DP_INTF0, "vdo0_dp_intf0", "top_vpp", 25),
+ GATE_VDO0_0(CLK_VDO0_DISP_MUTEX0, "vdo0_disp_mutex0", "top_vpp", 26),
+ GATE_VDO0_0(CLK_VDO0_DISP_IL_ROT0, "vdo0_disp_il_rot0", "top_vpp", 27),
+ GATE_VDO0_0(CLK_VDO0_APB_BUS, "vdo0_apb_bus", "top_vpp", 28),
+ GATE_VDO0_0(CLK_VDO0_FAKE_ENG0, "vdo0_fake_eng0", "top_vpp", 29),
+ GATE_VDO0_0(CLK_VDO0_FAKE_ENG1, "vdo0_fake_eng1", "top_vpp", 30),
+ /* VDO0_1 */
+ GATE_VDO0_1(CLK_VDO0_DL_ASYNC0, "vdo0_dl_async0", "top_vpp", 0),
+ GATE_VDO0_1(CLK_VDO0_DL_ASYNC1, "vdo0_dl_async1", "top_vpp", 1),
+ GATE_VDO0_1(CLK_VDO0_DL_ASYNC2, "vdo0_dl_async2", "top_vpp", 2),
+ GATE_VDO0_1(CLK_VDO0_DL_ASYNC3, "vdo0_dl_async3", "top_vpp", 3),
+ GATE_VDO0_1(CLK_VDO0_DL_ASYNC4, "vdo0_dl_async4", "top_vpp", 4),
+ GATE_VDO0_1(CLK_VDO0_DISP_MONITOR0, "vdo0_disp_monitor0", "top_vpp", 5),
+ GATE_VDO0_1(CLK_VDO0_DISP_MONITOR1, "vdo0_disp_monitor1", "top_vpp", 6),
+ GATE_VDO0_1(CLK_VDO0_DISP_MONITOR2, "vdo0_disp_monitor2", "top_vpp", 7),
+ GATE_VDO0_1(CLK_VDO0_DISP_MONITOR3, "vdo0_disp_monitor3", "top_vpp", 8),
+ GATE_VDO0_1(CLK_VDO0_DISP_MONITOR4, "vdo0_disp_monitor4", "top_vpp", 9),
+ GATE_VDO0_1(CLK_VDO0_SMI_GALS, "vdo0_smi_gals", "top_vpp", 10),
+ GATE_VDO0_1(CLK_VDO0_SMI_COMMON, "vdo0_smi_common", "top_vpp", 11),
+ GATE_VDO0_1(CLK_VDO0_SMI_EMI, "vdo0_smi_emi", "top_vpp", 12),
+ GATE_VDO0_1(CLK_VDO0_SMI_IOMMU, "vdo0_smi_iommu", "top_vpp", 13),
+ GATE_VDO0_1(CLK_VDO0_SMI_LARB, "vdo0_smi_larb", "top_vpp", 14),
+ GATE_VDO0_1(CLK_VDO0_SMI_RSI, "vdo0_smi_rsi", "top_vpp", 15),
+ /* VDO0_2 */
+ GATE_VDO0_2(CLK_VDO0_DSI0_DSI, "vdo0_dsi0_dsi", "top_dsi_occ", 0),
+ GATE_VDO0_2(CLK_VDO0_DSI1_DSI, "vdo0_dsi1_dsi", "top_dsi_occ", 8),
+ GATE_VDO0_2(CLK_VDO0_DP_INTF0_DP_INTF, "vdo0_dp_intf0_dp_intf", "top_edp", 16),
+};
+
+static int clk_mt8195_vdo0_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->parent->of_node;
+ struct clk_onecell_data *clk_data;
+ int r;
+
+ clk_data = mtk_alloc_clk_data(CLK_VDO0_NR_CLK);
+ if (!clk_data)
+ return -ENOMEM;
+
+ r = mtk_clk_register_gates(node, vdo0_clks, ARRAY_SIZE(vdo0_clks), clk_data);
+ if (r)
+ goto free_vdo0_data;
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ if (r)
+ goto free_vdo0_data;
+
+ return r;
+
+free_vdo0_data:
+ mtk_free_clk_data(clk_data);
+ return r;
+}
+
+static struct platform_driver clk_mt8195_vdo0_drv = {
+ .probe = clk_mt8195_vdo0_probe,
+ .driver = {
+ .name = "clk-mt8195-vdo0",
+ },
+};
+builtin_platform_driver(clk_mt8195_vdo0_drv);
diff --git a/drivers/clk/mediatek/clk-mt8195-vdo1.c b/drivers/clk/mediatek/clk-mt8195-vdo1.c
new file mode 100644
index 000000000000..03df8eae8838
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8195-vdo1.c
@@ -0,0 +1,140 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (c) 2021 MediaTek Inc.
+// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+#include <dt-bindings/clock/mt8195-clk.h>
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+static const struct mtk_gate_regs vdo1_0_cg_regs = {
+ .set_ofs = 0x104,
+ .clr_ofs = 0x108,
+ .sta_ofs = 0x100,
+};
+
+static const struct mtk_gate_regs vdo1_1_cg_regs = {
+ .set_ofs = 0x124,
+ .clr_ofs = 0x128,
+ .sta_ofs = 0x120,
+};
+
+static const struct mtk_gate_regs vdo1_2_cg_regs = {
+ .set_ofs = 0x134,
+ .clr_ofs = 0x138,
+ .sta_ofs = 0x130,
+};
+
+static const struct mtk_gate_regs vdo1_3_cg_regs = {
+ .set_ofs = 0x144,
+ .clr_ofs = 0x148,
+ .sta_ofs = 0x140,
+};
+
+#define GATE_VDO1_0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vdo1_0_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+#define GATE_VDO1_1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vdo1_1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+#define GATE_VDO1_2(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vdo1_2_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+#define GATE_VDO1_3(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vdo1_3_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+static const struct mtk_gate vdo1_clks[] = {
+ /* VDO1_0 */
+ GATE_VDO1_0(CLK_VDO1_SMI_LARB2, "vdo1_smi_larb2", "top_vpp", 0),
+ GATE_VDO1_0(CLK_VDO1_SMI_LARB3, "vdo1_smi_larb3", "top_vpp", 1),
+ GATE_VDO1_0(CLK_VDO1_GALS, "vdo1_gals", "top_vpp", 2),
+ GATE_VDO1_0(CLK_VDO1_FAKE_ENG0, "vdo1_fake_eng0", "top_vpp", 3),
+ GATE_VDO1_0(CLK_VDO1_FAKE_ENG, "vdo1_fake_eng", "top_vpp", 4),
+ GATE_VDO1_0(CLK_VDO1_MDP_RDMA0, "vdo1_mdp_rdma0", "top_vpp", 5),
+ GATE_VDO1_0(CLK_VDO1_MDP_RDMA1, "vdo1_mdp_rdma1", "top_vpp", 6),
+ GATE_VDO1_0(CLK_VDO1_MDP_RDMA2, "vdo1_mdp_rdma2", "top_vpp", 7),
+ GATE_VDO1_0(CLK_VDO1_MDP_RDMA3, "vdo1_mdp_rdma3", "top_vpp", 8),
+ GATE_VDO1_0(CLK_VDO1_VPP_MERGE0, "vdo1_vpp_merge0", "top_vpp", 9),
+ GATE_VDO1_0(CLK_VDO1_VPP_MERGE1, "vdo1_vpp_merge1", "top_vpp", 10),
+ GATE_VDO1_0(CLK_VDO1_VPP_MERGE2, "vdo1_vpp_merge2", "top_vpp", 11),
+ GATE_VDO1_0(CLK_VDO1_VPP_MERGE3, "vdo1_vpp_merge3", "top_vpp", 12),
+ GATE_VDO1_0(CLK_VDO1_VPP_MERGE4, "vdo1_vpp_merge4", "top_vpp", 13),
+ GATE_VDO1_0(CLK_VDO1_VPP2_TO_VDO1_DL_ASYNC, "vdo1_vpp2_to_vdo1_dl_async", "top_vpp", 14),
+ GATE_VDO1_0(CLK_VDO1_VPP3_TO_VDO1_DL_ASYNC, "vdo1_vpp3_to_vdo1_dl_async", "top_vpp", 15),
+ GATE_VDO1_0(CLK_VDO1_DISP_MUTEX, "vdo1_disp_mutex", "top_vpp", 16),
+ GATE_VDO1_0(CLK_VDO1_MDP_RDMA4, "vdo1_mdp_rdma4", "top_vpp", 17),
+ GATE_VDO1_0(CLK_VDO1_MDP_RDMA5, "vdo1_mdp_rdma5", "top_vpp", 18),
+ GATE_VDO1_0(CLK_VDO1_MDP_RDMA6, "vdo1_mdp_rdma6", "top_vpp", 19),
+ GATE_VDO1_0(CLK_VDO1_MDP_RDMA7, "vdo1_mdp_rdma7", "top_vpp", 20),
+ GATE_VDO1_0(CLK_VDO1_DP_INTF0_MM, "vdo1_dp_intf0_mm", "top_vpp", 21),
+ GATE_VDO1_0(CLK_VDO1_DPI0_MM, "vdo1_dpi0_mm", "top_vpp", 22),
+ GATE_VDO1_0(CLK_VDO1_DPI1_MM, "vdo1_dpi1_mm", "top_vpp", 23),
+ GATE_VDO1_0(CLK_VDO1_DISP_MONITOR, "vdo1_disp_monitor", "top_vpp", 24),
+ GATE_VDO1_0(CLK_VDO1_MERGE0_DL_ASYNC, "vdo1_merge0_dl_async", "top_vpp", 25),
+ GATE_VDO1_0(CLK_VDO1_MERGE1_DL_ASYNC, "vdo1_merge1_dl_async", "top_vpp", 26),
+ GATE_VDO1_0(CLK_VDO1_MERGE2_DL_ASYNC, "vdo1_merge2_dl_async", "top_vpp", 27),
+ GATE_VDO1_0(CLK_VDO1_MERGE3_DL_ASYNC, "vdo1_merge3_dl_async", "top_vpp", 28),
+ GATE_VDO1_0(CLK_VDO1_MERGE4_DL_ASYNC, "vdo1_merge4_dl_async", "top_vpp", 29),
+ GATE_VDO1_0(CLK_VDO1_VDO0_DSC_TO_VDO1_DL_ASYNC, "vdo1_vdo0_dsc_to_vdo1_dl_async",
+ "top_vpp", 30),
+ GATE_VDO1_0(CLK_VDO1_VDO0_MERGE_TO_VDO1_DL_ASYNC, "vdo1_vdo0_merge_to_vdo1_dl_async",
+ "top_vpp", 31),
+ /* VDO1_1 */
+ GATE_VDO1_1(CLK_VDO1_HDR_VDO_FE0, "vdo1_hdr_vdo_fe0", "top_vpp", 0),
+ GATE_VDO1_1(CLK_VDO1_HDR_GFX_FE0, "vdo1_hdr_gfx_fe0", "top_vpp", 1),
+ GATE_VDO1_1(CLK_VDO1_HDR_VDO_BE, "vdo1_hdr_vdo_be", "top_vpp", 2),
+ GATE_VDO1_1(CLK_VDO1_HDR_VDO_FE1, "vdo1_hdr_vdo_fe1", "top_vpp", 16),
+ GATE_VDO1_1(CLK_VDO1_HDR_GFX_FE1, "vdo1_hdr_gfx_fe1", "top_vpp", 17),
+ GATE_VDO1_1(CLK_VDO1_DISP_MIXER, "vdo1_disp_mixer", "top_vpp", 18),
+ GATE_VDO1_1(CLK_VDO1_HDR_VDO_FE0_DL_ASYNC, "vdo1_hdr_vdo_fe0_dl_async", "top_vpp", 19),
+ GATE_VDO1_1(CLK_VDO1_HDR_VDO_FE1_DL_ASYNC, "vdo1_hdr_vdo_fe1_dl_async", "top_vpp", 20),
+ GATE_VDO1_1(CLK_VDO1_HDR_GFX_FE0_DL_ASYNC, "vdo1_hdr_gfx_fe0_dl_async", "top_vpp", 21),
+ GATE_VDO1_1(CLK_VDO1_HDR_GFX_FE1_DL_ASYNC, "vdo1_hdr_gfx_fe1_dl_async", "top_vpp", 22),
+ GATE_VDO1_1(CLK_VDO1_HDR_VDO_BE_DL_ASYNC, "vdo1_hdr_vdo_be_dl_async", "top_vpp", 23),
+ /* VDO1_2 */
+ GATE_VDO1_2(CLK_VDO1_DPI0, "vdo1_dpi0", "top_vpp", 0),
+ GATE_VDO1_2(CLK_VDO1_DISP_MONITOR_DPI0, "vdo1_disp_monitor_dpi0", "top_vpp", 1),
+ GATE_VDO1_2(CLK_VDO1_DPI1, "vdo1_dpi1", "top_vpp", 8),
+ GATE_VDO1_2(CLK_VDO1_DISP_MONITOR_DPI1, "vdo1_disp_monitor_dpi1", "top_vpp", 9),
+ GATE_VDO1_2(CLK_VDO1_DPINTF, "vdo1_dpintf", "top_vpp", 16),
+ GATE_VDO1_2(CLK_VDO1_DISP_MONITOR_DPINTF, "vdo1_disp_monitor_dpintf", "top_vpp", 17),
+ /* VDO1_3 */
+ GATE_VDO1_3(CLK_VDO1_26M_SLOW, "vdo1_26m_slow", "clk26m", 8),
+};
+
+static int clk_mt8195_vdo1_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->parent->of_node;
+ struct clk_onecell_data *clk_data;
+ int r;
+
+ clk_data = mtk_alloc_clk_data(CLK_VDO1_NR_CLK);
+ if (!clk_data)
+ return -ENOMEM;
+
+ r = mtk_clk_register_gates(node, vdo1_clks, ARRAY_SIZE(vdo1_clks), clk_data);
+ if (r)
+ goto free_vdo1_data;
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ if (r)
+ goto free_vdo1_data;
+
+ return r;
+
+free_vdo1_data:
+ mtk_free_clk_data(clk_data);
+ return r;
+}
+
+static struct platform_driver clk_mt8195_vdo1_drv = {
+ .probe = clk_mt8195_vdo1_probe,
+ .driver = {
+ .name = "clk-mt8195-vdo1",
+ },
+};
+builtin_platform_driver(clk_mt8195_vdo1_drv);
diff --git a/drivers/clk/mediatek/clk-mt8195-venc.c b/drivers/clk/mediatek/clk-mt8195-venc.c
new file mode 100644
index 000000000000..7339851a0856
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8195-venc.c
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (c) 2021 MediaTek Inc.
+// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+#include <dt-bindings/clock/mt8195-clk.h>
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+static const struct mtk_gate_regs venc_cg_regs = {
+ .set_ofs = 0x4,
+ .clr_ofs = 0x8,
+ .sta_ofs = 0x0,
+};
+
+#define GATE_VENC(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &venc_cg_regs, _shift, &mtk_clk_gate_ops_setclr_inv)
+
+static const struct mtk_gate venc_clks[] = {
+ GATE_VENC(CLK_VENC_LARB, "venc_larb", "top_venc", 0),
+ GATE_VENC(CLK_VENC_VENC, "venc_venc", "top_venc", 4),
+ GATE_VENC(CLK_VENC_JPGENC, "venc_jpgenc", "top_venc", 8),
+ GATE_VENC(CLK_VENC_JPGDEC, "venc_jpgdec", "top_venc", 12),
+ GATE_VENC(CLK_VENC_JPGDEC_C1, "venc_jpgdec_c1", "top_venc", 16),
+ GATE_VENC(CLK_VENC_GALS, "venc_gals", "top_venc", 28),
+};
+
+static const struct mtk_gate venc_core1_clks[] = {
+ GATE_VENC(CLK_VENC_CORE1_LARB, "venc_core1_larb", "top_venc", 0),
+ GATE_VENC(CLK_VENC_CORE1_VENC, "venc_core1_venc", "top_venc", 4),
+ GATE_VENC(CLK_VENC_CORE1_JPGENC, "venc_core1_jpgenc", "top_venc", 8),
+ GATE_VENC(CLK_VENC_CORE1_JPGDEC, "venc_core1_jpgdec", "top_venc", 12),
+ GATE_VENC(CLK_VENC_CORE1_JPGDEC_C1, "venc_core1_jpgdec_c1", "top_venc", 16),
+ GATE_VENC(CLK_VENC_CORE1_GALS, "venc_core1_gals", "top_venc", 28),
+};
+
+static const struct mtk_clk_desc venc_desc = {
+ .clks = venc_clks,
+ .num_clks = ARRAY_SIZE(venc_clks),
+};
+
+static const struct mtk_clk_desc venc_core1_desc = {
+ .clks = venc_core1_clks,
+ .num_clks = ARRAY_SIZE(venc_core1_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8195_venc[] = {
+ {
+ .compatible = "mediatek,mt8195-vencsys",
+ .data = &venc_desc,
+ }, {
+ .compatible = "mediatek,mt8195-vencsys_core1",
+ .data = &venc_core1_desc,
+ }, {
+ /* sentinel */
+ }
+};
+
+static struct platform_driver clk_mt8195_venc_drv = {
+ .probe = mtk_clk_simple_probe,
+ .driver = {
+ .name = "clk-mt8195-venc",
+ .of_match_table = of_match_clk_mt8195_venc,
+ },
+};
+builtin_platform_driver(clk_mt8195_venc_drv);
diff --git a/drivers/clk/mediatek/clk-mt8195-vpp0.c b/drivers/clk/mediatek/clk-mt8195-vpp0.c
new file mode 100644
index 000000000000..c3241466a8d0
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8195-vpp0.c
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (c) 2021 MediaTek Inc.
+// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+#include <dt-bindings/clock/mt8195-clk.h>
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+static const struct mtk_gate_regs vpp0_0_cg_regs = {
+ .set_ofs = 0x24,
+ .clr_ofs = 0x28,
+ .sta_ofs = 0x20,
+};
+
+static const struct mtk_gate_regs vpp0_1_cg_regs = {
+ .set_ofs = 0x30,
+ .clr_ofs = 0x34,
+ .sta_ofs = 0x2c,
+};
+
+static const struct mtk_gate_regs vpp0_2_cg_regs = {
+ .set_ofs = 0x3c,
+ .clr_ofs = 0x40,
+ .sta_ofs = 0x38,
+};
+
+#define GATE_VPP0_0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vpp0_0_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+#define GATE_VPP0_1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vpp0_1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+#define GATE_VPP0_2(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vpp0_2_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+static const struct mtk_gate vpp0_clks[] = {
+ /* VPP0_0 */
+ GATE_VPP0_0(CLK_VPP0_MDP_FG, "vpp0_mdp_fg", "top_vpp", 1),
+ GATE_VPP0_0(CLK_VPP0_STITCH, "vpp0_stitch", "top_vpp", 2),
+ GATE_VPP0_0(CLK_VPP0_PADDING, "vpp0_padding", "top_vpp", 7),
+ GATE_VPP0_0(CLK_VPP0_MDP_TCC, "vpp0_mdp_tcc", "top_vpp", 8),
+ GATE_VPP0_0(CLK_VPP0_WARP0_ASYNC_TX, "vpp0_warp0_async_tx", "top_vpp", 10),
+ GATE_VPP0_0(CLK_VPP0_WARP1_ASYNC_TX, "vpp0_warp1_async_tx", "top_vpp", 11),
+ GATE_VPP0_0(CLK_VPP0_MUTEX, "vpp0_mutex", "top_vpp", 13),
+ GATE_VPP0_0(CLK_VPP0_VPP02VPP1_RELAY, "vpp0_vpp02vpp1_relay", "top_vpp", 14),
+ GATE_VPP0_0(CLK_VPP0_VPP12VPP0_ASYNC, "vpp0_vpp12vpp0_async", "top_vpp", 15),
+ GATE_VPP0_0(CLK_VPP0_MMSYSRAM_TOP, "vpp0_mmsysram_top", "top_vpp", 16),
+ GATE_VPP0_0(CLK_VPP0_MDP_AAL, "vpp0_mdp_aal", "top_vpp", 17),
+ GATE_VPP0_0(CLK_VPP0_MDP_RSZ, "vpp0_mdp_rsz", "top_vpp", 18),
+ /* VPP0_1 */
+ GATE_VPP0_1(CLK_VPP0_SMI_COMMON, "vpp0_smi_common", "top_vpp", 0),
+ GATE_VPP0_1(CLK_VPP0_GALS_VDO0_LARB0, "vpp0_gals_vdo0_larb0", "top_vpp", 1),
+ GATE_VPP0_1(CLK_VPP0_GALS_VDO0_LARB1, "vpp0_gals_vdo0_larb1", "top_vpp", 2),
+ GATE_VPP0_1(CLK_VPP0_GALS_VENCSYS, "vpp0_gals_vencsys", "top_vpp", 3),
+ GATE_VPP0_1(CLK_VPP0_GALS_VENCSYS_CORE1, "vpp0_gals_vencsys_core1", "top_vpp", 4),
+ GATE_VPP0_1(CLK_VPP0_GALS_INFRA, "vpp0_gals_infra", "top_vpp", 5),
+ GATE_VPP0_1(CLK_VPP0_GALS_CAMSYS, "vpp0_gals_camsys", "top_vpp", 6),
+ GATE_VPP0_1(CLK_VPP0_GALS_VPP1_LARB5, "vpp0_gals_vpp1_larb5", "top_vpp", 7),
+ GATE_VPP0_1(CLK_VPP0_GALS_VPP1_LARB6, "vpp0_gals_vpp1_larb6", "top_vpp", 8),
+ GATE_VPP0_1(CLK_VPP0_SMI_REORDER, "vpp0_smi_reorder", "top_vpp", 9),
+ GATE_VPP0_1(CLK_VPP0_SMI_IOMMU, "vpp0_smi_iommu", "top_vpp", 10),
+ GATE_VPP0_1(CLK_VPP0_GALS_IMGSYS_CAMSYS, "vpp0_gals_imgsys_camsys", "top_vpp", 11),
+ GATE_VPP0_1(CLK_VPP0_MDP_RDMA, "vpp0_mdp_rdma", "top_vpp", 12),
+ GATE_VPP0_1(CLK_VPP0_MDP_WROT, "vpp0_mdp_wrot", "top_vpp", 13),
+ GATE_VPP0_1(CLK_VPP0_GALS_EMI0_EMI1, "vpp0_gals_emi0_emi1", "top_vpp", 16),
+ GATE_VPP0_1(CLK_VPP0_SMI_SUB_COMMON_REORDER, "vpp0_smi_sub_common_reorder", "top_vpp", 17),
+ GATE_VPP0_1(CLK_VPP0_SMI_RSI, "vpp0_smi_rsi", "top_vpp", 18),
+ GATE_VPP0_1(CLK_VPP0_SMI_COMMON_LARB4, "vpp0_smi_common_larb4", "top_vpp", 19),
+ GATE_VPP0_1(CLK_VPP0_GALS_VDEC_VDEC_CORE1, "vpp0_gals_vdec_vdec_core1", "top_vpp", 20),
+ GATE_VPP0_1(CLK_VPP0_GALS_VPP1_WPE, "vpp0_gals_vpp1_wpe", "top_vpp", 21),
+ GATE_VPP0_1(CLK_VPP0_GALS_VDO0_VDO1_VENCSYS_CORE1, "vpp0_gals_vdo0_vdo1_vencsys_core1",
+ "top_vpp", 22),
+ GATE_VPP0_1(CLK_VPP0_FAKE_ENG, "vpp0_fake_eng", "top_vpp", 23),
+ GATE_VPP0_1(CLK_VPP0_MDP_HDR, "vpp0_mdp_hdr", "top_vpp", 24),
+ GATE_VPP0_1(CLK_VPP0_MDP_TDSHP, "vpp0_mdp_tdshp", "top_vpp", 25),
+ GATE_VPP0_1(CLK_VPP0_MDP_COLOR, "vpp0_mdp_color", "top_vpp", 26),
+ GATE_VPP0_1(CLK_VPP0_MDP_OVL, "vpp0_mdp_ovl", "top_vpp", 27),
+ /* VPP0_2 */
+ GATE_VPP0_2(CLK_VPP0_WARP0_RELAY, "vpp0_warp0_relay", "top_wpe_vpp", 0),
+ GATE_VPP0_2(CLK_VPP0_WARP0_MDP_DL_ASYNC, "vpp0_warp0_mdp_dl_async", "top_wpe_vpp", 1),
+ GATE_VPP0_2(CLK_VPP0_WARP1_RELAY, "vpp0_warp1_relay", "top_wpe_vpp", 2),
+ GATE_VPP0_2(CLK_VPP0_WARP1_MDP_DL_ASYNC, "vpp0_warp1_mdp_dl_async", "top_wpe_vpp", 3),
+};
+
+static const struct mtk_clk_desc vpp0_desc = {
+ .clks = vpp0_clks,
+ .num_clks = ARRAY_SIZE(vpp0_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8195_vpp0[] = {
+ {
+ .compatible = "mediatek,mt8195-vppsys0",
+ .data = &vpp0_desc,
+ }, {
+ /* sentinel */
+ }
+};
+
+static struct platform_driver clk_mt8195_vpp0_drv = {
+ .probe = mtk_clk_simple_probe,
+ .driver = {
+ .name = "clk-mt8195-vpp0",
+ .of_match_table = of_match_clk_mt8195_vpp0,
+ },
+};
+builtin_platform_driver(clk_mt8195_vpp0_drv);
diff --git a/drivers/clk/mediatek/clk-mt8195-vpp1.c b/drivers/clk/mediatek/clk-mt8195-vpp1.c
new file mode 100644
index 000000000000..ce0b9a40a179
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8195-vpp1.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (c) 2021 MediaTek Inc.
+// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+#include <dt-bindings/clock/mt8195-clk.h>
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+static const struct mtk_gate_regs vpp1_0_cg_regs = {
+ .set_ofs = 0x104,
+ .clr_ofs = 0x108,
+ .sta_ofs = 0x100,
+};
+
+static const struct mtk_gate_regs vpp1_1_cg_regs = {
+ .set_ofs = 0x114,
+ .clr_ofs = 0x118,
+ .sta_ofs = 0x110,
+};
+
+#define GATE_VPP1_0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vpp1_0_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+#define GATE_VPP1_1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &vpp1_1_cg_regs, _shift, &mtk_clk_gate_ops_setclr)
+
+static const struct mtk_gate vpp1_clks[] = {
+ /* VPP1_0 */
+ GATE_VPP1_0(CLK_VPP1_SVPP1_MDP_OVL, "vpp1_svpp1_mdp_ovl", "top_vpp", 0),
+ GATE_VPP1_0(CLK_VPP1_SVPP1_MDP_TCC, "vpp1_svpp1_mdp_tcc", "top_vpp", 1),
+ GATE_VPP1_0(CLK_VPP1_SVPP1_MDP_WROT, "vpp1_svpp1_mdp_wrot", "top_vpp", 2),
+ GATE_VPP1_0(CLK_VPP1_SVPP1_VPP_PAD, "vpp1_svpp1_vpp_pad", "top_vpp", 3),
+ GATE_VPP1_0(CLK_VPP1_SVPP2_MDP_WROT, "vpp1_svpp2_mdp_wrot", "top_vpp", 4),
+ GATE_VPP1_0(CLK_VPP1_SVPP2_VPP_PAD, "vpp1_svpp2_vpp_pad", "top_vpp", 5),
+ GATE_VPP1_0(CLK_VPP1_SVPP3_MDP_WROT, "vpp1_svpp3_mdp_wrot", "top_vpp", 6),
+ GATE_VPP1_0(CLK_VPP1_SVPP3_VPP_PAD, "vpp1_svpp3_vpp_pad", "top_vpp", 7),
+ GATE_VPP1_0(CLK_VPP1_SVPP1_MDP_RDMA, "vpp1_svpp1_mdp_rdma", "top_vpp", 8),
+ GATE_VPP1_0(CLK_VPP1_SVPP1_MDP_FG, "vpp1_svpp1_mdp_fg", "top_vpp", 9),
+ GATE_VPP1_0(CLK_VPP1_SVPP2_MDP_RDMA, "vpp1_svpp2_mdp_rdma", "top_vpp", 10),
+ GATE_VPP1_0(CLK_VPP1_SVPP2_MDP_FG, "vpp1_svpp2_mdp_fg", "top_vpp", 11),
+ GATE_VPP1_0(CLK_VPP1_SVPP3_MDP_RDMA, "vpp1_svpp3_mdp_rdma", "top_vpp", 12),
+ GATE_VPP1_0(CLK_VPP1_SVPP3_MDP_FG, "vpp1_svpp3_mdp_fg", "top_vpp", 13),
+ GATE_VPP1_0(CLK_VPP1_VPP_SPLIT, "vpp1_vpp_split", "top_vpp", 14),
+ GATE_VPP1_0(CLK_VPP1_SVPP2_VDO0_DL_RELAY, "vpp1_svpp2_vdo0_dl_relay", "top_vpp", 15),
+ GATE_VPP1_0(CLK_VPP1_SVPP1_MDP_TDSHP, "vpp1_svpp1_mdp_tdshp", "top_vpp", 16),
+ GATE_VPP1_0(CLK_VPP1_SVPP1_MDP_COLOR, "vpp1_svpp1_mdp_color", "top_vpp", 17),
+ GATE_VPP1_0(CLK_VPP1_SVPP3_VDO1_DL_RELAY, "vpp1_svpp3_vdo1_dl_relay", "top_vpp", 18),
+ GATE_VPP1_0(CLK_VPP1_SVPP2_VPP_MERGE, "vpp1_svpp2_vpp_merge", "top_vpp", 19),
+ GATE_VPP1_0(CLK_VPP1_SVPP2_MDP_COLOR, "vpp1_svpp2_mdp_color", "top_vpp", 20),
+ GATE_VPP1_0(CLK_VPP1_VPPSYS1_GALS, "vpp1_vppsys1_gals", "top_vpp", 21),
+ GATE_VPP1_0(CLK_VPP1_SVPP3_VPP_MERGE, "vpp1_svpp3_vpp_merge", "top_vpp", 22),
+ GATE_VPP1_0(CLK_VPP1_SVPP3_MDP_COLOR, "vpp1_svpp3_mdp_color", "top_vpp", 23),
+ GATE_VPP1_0(CLK_VPP1_VPPSYS1_LARB, "vpp1_vppsys1_larb", "top_vpp", 24),
+ GATE_VPP1_0(CLK_VPP1_SVPP1_MDP_RSZ, "vpp1_svpp1_mdp_rsz", "top_vpp", 25),
+ GATE_VPP1_0(CLK_VPP1_SVPP1_MDP_HDR, "vpp1_svpp1_mdp_hdr", "top_vpp", 26),
+ GATE_VPP1_0(CLK_VPP1_SVPP1_MDP_AAL, "vpp1_svpp1_mdp_aal", "top_vpp", 27),
+ GATE_VPP1_0(CLK_VPP1_SVPP2_MDP_HDR, "vpp1_svpp2_mdp_hdr", "top_vpp", 28),
+ GATE_VPP1_0(CLK_VPP1_SVPP2_MDP_AAL, "vpp1_svpp2_mdp_aal", "top_vpp", 29),
+ GATE_VPP1_0(CLK_VPP1_DL_ASYNC, "vpp1_dl_async", "top_vpp", 30),
+ GATE_VPP1_0(CLK_VPP1_LARB5_FAKE_ENG, "vpp1_larb5_fake_eng", "top_vpp", 31),
+ /* VPP1_1 */
+ GATE_VPP1_1(CLK_VPP1_SVPP3_MDP_HDR, "vpp1_svpp3_mdp_hdr", "top_vpp", 0),
+ GATE_VPP1_1(CLK_VPP1_SVPP3_MDP_AAL, "vpp1_svpp3_mdp_aal", "top_vpp", 1),
+ GATE_VPP1_1(CLK_VPP1_SVPP2_VDO1_DL_RELAY, "vpp1_svpp2_vdo1_dl_relay", "top_vpp", 2),
+ GATE_VPP1_1(CLK_VPP1_LARB6_FAKE_ENG, "vpp1_larb6_fake_eng", "top_vpp", 3),
+ GATE_VPP1_1(CLK_VPP1_SVPP2_MDP_RSZ, "vpp1_svpp2_mdp_rsz", "top_vpp", 4),
+ GATE_VPP1_1(CLK_VPP1_SVPP3_MDP_RSZ, "vpp1_svpp3_mdp_rsz", "top_vpp", 5),
+ GATE_VPP1_1(CLK_VPP1_SVPP3_VDO0_DL_RELAY, "vpp1_svpp3_vdo0_dl_relay", "top_vpp", 6),
+ GATE_VPP1_1(CLK_VPP1_DISP_MUTEX, "vpp1_disp_mutex", "top_vpp", 7),
+ GATE_VPP1_1(CLK_VPP1_SVPP2_MDP_TDSHP, "vpp1_svpp2_mdp_tdshp", "top_vpp", 8),
+ GATE_VPP1_1(CLK_VPP1_SVPP3_MDP_TDSHP, "vpp1_svpp3_mdp_tdshp", "top_vpp", 9),
+ GATE_VPP1_1(CLK_VPP1_VPP0_DL1_RELAY, "vpp1_vpp0_dl1_relay", "top_vpp", 10),
+ GATE_VPP1_1(CLK_VPP1_HDMI_META, "vpp1_hdmi_meta", "hdmirx_p", 11),
+ GATE_VPP1_1(CLK_VPP1_VPP_SPLIT_HDMI, "vpp1_vpp_split_hdmi", "hdmirx_p", 12),
+ GATE_VPP1_1(CLK_VPP1_DGI_IN, "vpp1_dgi_in", "in_dgi", 13),
+ GATE_VPP1_1(CLK_VPP1_DGI_OUT, "vpp1_dgi_out", "top_dgi_out", 14),
+ GATE_VPP1_1(CLK_VPP1_VPP_SPLIT_DGI, "vpp1_vpp_split_dgi", "top_dgi_out", 15),
+ GATE_VPP1_1(CLK_VPP1_VPP0_DL_ASYNC, "vpp1_vpp0_dl_async", "top_vpp", 16),
+ GATE_VPP1_1(CLK_VPP1_VPP0_DL_RELAY, "vpp1_vpp0_dl_relay", "top_vpp", 17),
+ GATE_VPP1_1(CLK_VPP1_VPP_SPLIT_26M, "vpp1_vpp_split_26m", "clk26m", 26),
+};
+
+static const struct mtk_clk_desc vpp1_desc = {
+ .clks = vpp1_clks,
+ .num_clks = ARRAY_SIZE(vpp1_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8195_vpp1[] = {
+ {
+ .compatible = "mediatek,mt8195-vppsys1",
+ .data = &vpp1_desc,
+ }, {
+ /* sentinel */
+ }
+};
+
+static struct platform_driver clk_mt8195_vpp1_drv = {
+ .probe = mtk_clk_simple_probe,
+ .driver = {
+ .name = "clk-mt8195-vpp1",
+ .of_match_table = of_match_clk_mt8195_vpp1,
+ },
+};
+builtin_platform_driver(clk_mt8195_vpp1_drv);
diff --git a/drivers/clk/mediatek/clk-mt8195-wpe.c b/drivers/clk/mediatek/clk-mt8195-wpe.c
new file mode 100644
index 000000000000..274d60838d8e
--- /dev/null
+++ b/drivers/clk/mediatek/clk-mt8195-wpe.c
@@ -0,0 +1,143 @@
+// SPDX-License-Identifier: GPL-2.0-only
+//
+// Copyright (c) 2021 MediaTek Inc.
+// Author: Chun-Jie Chen <chun-jie.chen@mediatek.com>
+
+#include "clk-gate.h"
+#include "clk-mtk.h"
+
+#include <dt-bindings/clock/mt8195-clk.h>
+#include <linux/clk-provider.h>
+#include <linux/platform_device.h>
+
+static const struct mtk_gate_regs wpe_cg_regs = {
+ .set_ofs = 0x0,
+ .clr_ofs = 0x0,
+ .sta_ofs = 0x0,
+};
+
+static const struct mtk_gate_regs wpe_vpp0_cg_regs = {
+ .set_ofs = 0x58,
+ .clr_ofs = 0x58,
+ .sta_ofs = 0x58,
+};
+
+static const struct mtk_gate_regs wpe_vpp1_cg_regs = {
+ .set_ofs = 0x5c,
+ .clr_ofs = 0x5c,
+ .sta_ofs = 0x5c,
+};
+
+#define GATE_WPE(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &wpe_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
+
+#define GATE_WPE_VPP0(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &wpe_vpp0_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
+
+#define GATE_WPE_VPP1(_id, _name, _parent, _shift) \
+ GATE_MTK(_id, _name, _parent, &wpe_vpp1_cg_regs, _shift, &mtk_clk_gate_ops_no_setclr_inv)
+
+static const struct mtk_gate wpe_clks[] = {
+ GATE_WPE(CLK_WPE_VPP0, "wpe_vpp0", "top_wpe_vpp", 16),
+ GATE_WPE(CLK_WPE_VPP1, "wpe_vpp1", "top_wpe_vpp", 17),
+ GATE_WPE(CLK_WPE_SMI_LARB7, "wpe_smi_larb7", "top_wpe_vpp", 18),
+ GATE_WPE(CLK_WPE_SMI_LARB8, "wpe_smi_larb8", "top_wpe_vpp", 19),
+ GATE_WPE(CLK_WPE_EVENT_TX, "wpe_event_tx", "top_wpe_vpp", 20),
+ GATE_WPE(CLK_WPE_SMI_LARB7_P, "wpe_smi_larb7_p", "top_wpe_vpp", 24),
+ GATE_WPE(CLK_WPE_SMI_LARB8_P, "wpe_smi_larb8_p", "top_wpe_vpp", 25),
+};
+
+static const struct mtk_gate wpe_vpp0_clks[] = {
+ /* WPE_VPP0 */
+ GATE_WPE_VPP0(CLK_WPE_VPP0_VGEN, "wpe_vpp0_vgen", "top_img", 0),
+ GATE_WPE_VPP0(CLK_WPE_VPP0_EXT, "wpe_vpp0_ext", "top_img", 1),
+ GATE_WPE_VPP0(CLK_WPE_VPP0_VFC, "wpe_vpp0_vfc", "top_img", 2),
+ GATE_WPE_VPP0(CLK_WPE_VPP0_CACH0_TOP, "wpe_vpp0_cach0_top", "top_img", 3),
+ GATE_WPE_VPP0(CLK_WPE_VPP0_CACH0_DMA, "wpe_vpp0_cach0_dma", "top_img", 4),
+ GATE_WPE_VPP0(CLK_WPE_VPP0_CACH1_TOP, "wpe_vpp0_cach1_top", "top_img", 5),
+ GATE_WPE_VPP0(CLK_WPE_VPP0_CACH1_DMA, "wpe_vpp0_cach1_dma", "top_img", 6),
+ GATE_WPE_VPP0(CLK_WPE_VPP0_CACH2_TOP, "wpe_vpp0_cach2_top", "top_img", 7),
+ GATE_WPE_VPP0(CLK_WPE_VPP0_CACH2_DMA, "wpe_vpp0_cach2_dma", "top_img", 8),
+ GATE_WPE_VPP0(CLK_WPE_VPP0_CACH3_TOP, "wpe_vpp0_cach3_top", "top_img", 9),
+ GATE_WPE_VPP0(CLK_WPE_VPP0_CACH3_DMA, "wpe_vpp0_cach3_dma", "top_img", 10),
+ GATE_WPE_VPP0(CLK_WPE_VPP0_PSP, "wpe_vpp0_psp", "top_img", 11),
+ GATE_WPE_VPP0(CLK_WPE_VPP0_PSP2, "wpe_vpp0_psp2", "top_img", 12),
+ GATE_WPE_VPP0(CLK_WPE_VPP0_SYNC, "wpe_vpp0_sync", "top_img", 13),
+ GATE_WPE_VPP0(CLK_WPE_VPP0_C24, "wpe_vpp0_c24", "top_img", 14),
+ GATE_WPE_VPP0(CLK_WPE_VPP0_MDP_CROP, "wpe_vpp0_mdp_crop", "top_img", 15),
+ GATE_WPE_VPP0(CLK_WPE_VPP0_ISP_CROP, "wpe_vpp0_isp_crop", "top_img", 16),
+ GATE_WPE_VPP0(CLK_WPE_VPP0_TOP, "wpe_vpp0_top", "top_img", 17),
+ /* WPE_VPP1 */
+ GATE_WPE_VPP1(CLK_WPE_VPP0_VECI, "wpe_vpp0_veci", "top_img", 0),
+ GATE_WPE_VPP1(CLK_WPE_VPP0_VEC2I, "wpe_vpp0_vec2i", "top_img", 1),
+ GATE_WPE_VPP1(CLK_WPE_VPP0_VEC3I, "wpe_vpp0_vec3i", "top_img", 2),
+ GATE_WPE_VPP1(CLK_WPE_VPP0_WPEO, "wpe_vpp0_wpeo", "top_img", 3),
+ GATE_WPE_VPP1(CLK_WPE_VPP0_MSKO, "wpe_vpp0_msko", "top_img", 4),
+};
+
+static const struct mtk_gate wpe_vpp1_clks[] = {
+ /* WPE_VPP0 */
+ GATE_WPE_VPP0(CLK_WPE_VPP1_VGEN, "wpe_vpp1_vgen", "top_img", 0),
+ GATE_WPE_VPP0(CLK_WPE_VPP1_EXT, "wpe_vpp1_ext", "top_img", 1),
+ GATE_WPE_VPP0(CLK_WPE_VPP1_VFC, "wpe_vpp1_vfc", "top_img", 2),
+ GATE_WPE_VPP0(CLK_WPE_VPP1_CACH0_TOP, "wpe_vpp1_cach0_top", "top_img", 3),
+ GATE_WPE_VPP0(CLK_WPE_VPP1_CACH0_DMA, "wpe_vpp1_cach0_dma", "top_img", 4),
+ GATE_WPE_VPP0(CLK_WPE_VPP1_CACH1_TOP, "wpe_vpp1_cach1_top", "top_img", 5),
+ GATE_WPE_VPP0(CLK_WPE_VPP1_CACH1_DMA, "wpe_vpp1_cach1_dma", "top_img", 6),
+ GATE_WPE_VPP0(CLK_WPE_VPP1_CACH2_TOP, "wpe_vpp1_cach2_top", "top_img", 7),
+ GATE_WPE_VPP0(CLK_WPE_VPP1_CACH2_DMA, "wpe_vpp1_cach2_dma", "top_img", 8),
+ GATE_WPE_VPP0(CLK_WPE_VPP1_CACH3_TOP, "wpe_vpp1_cach3_top", "top_img", 9),
+ GATE_WPE_VPP0(CLK_WPE_VPP1_CACH3_DMA, "wpe_vpp1_cach3_dma", "top_img", 10),
+ GATE_WPE_VPP0(CLK_WPE_VPP1_PSP, "wpe_vpp1_psp", "top_img", 11),
+ GATE_WPE_VPP0(CLK_WPE_VPP1_PSP2, "wpe_vpp1_psp2", "top_img", 12),
+ GATE_WPE_VPP0(CLK_WPE_VPP1_SYNC, "wpe_vpp1_sync", "top_img", 13),
+ GATE_WPE_VPP0(CLK_WPE_VPP1_C24, "wpe_vpp1_c24", "top_img", 14),
+ GATE_WPE_VPP0(CLK_WPE_VPP1_MDP_CROP, "wpe_vpp1_mdp_crop", "top_img", 15),
+ GATE_WPE_VPP0(CLK_WPE_VPP1_ISP_CROP, "wpe_vpp1_isp_crop", "top_img", 16),
+ GATE_WPE_VPP0(CLK_WPE_VPP1_TOP, "wpe_vpp1_top", "top_img", 17),
+ /* WPE_VPP1 */
+ GATE_WPE_VPP1(CLK_WPE_VPP1_VECI, "wpe_vpp1_veci", "top_img", 0),
+ GATE_WPE_VPP1(CLK_WPE_VPP1_VEC2I, "wpe_vpp1_vec2i", "top_img", 1),
+ GATE_WPE_VPP1(CLK_WPE_VPP1_VEC3I, "wpe_vpp1_vec3i", "top_img", 2),
+ GATE_WPE_VPP1(CLK_WPE_VPP1_WPEO, "wpe_vpp1_wpeo", "top_img", 3),
+ GATE_WPE_VPP1(CLK_WPE_VPP1_MSKO, "wpe_vpp1_msko", "top_img", 4),
+};
+
+static const struct mtk_clk_desc wpe_desc = {
+ .clks = wpe_clks,
+ .num_clks = ARRAY_SIZE(wpe_clks),
+};
+
+static const struct mtk_clk_desc wpe_vpp0_desc = {
+ .clks = wpe_vpp0_clks,
+ .num_clks = ARRAY_SIZE(wpe_vpp0_clks),
+};
+
+static const struct mtk_clk_desc wpe_vpp1_desc = {
+ .clks = wpe_vpp1_clks,
+ .num_clks = ARRAY_SIZE(wpe_vpp1_clks),
+};
+
+static const struct of_device_id of_match_clk_mt8195_wpe[] = {
+ {
+ .compatible = "mediatek,mt8195-wpesys",
+ .data = &wpe_desc,
+ }, {
+ .compatible = "mediatek,mt8195-wpesys_vpp0",
+ .data = &wpe_vpp0_desc,
+ }, {
+ .compatible = "mediatek,mt8195-wpesys_vpp1",
+ .data = &wpe_vpp1_desc,
+ }, {
+ /* sentinel */
+ }
+};
+
+static struct platform_driver clk_mt8195_wpe_drv = {
+ .probe = mtk_clk_simple_probe,
+ .driver = {
+ .name = "clk-mt8195-wpe",
+ .of_match_table = of_match_clk_mt8195_wpe,
+ },
+};
+builtin_platform_driver(clk_mt8195_wpe_drv);
diff --git a/drivers/clk/mediatek/clk-mtk.c b/drivers/clk/mediatek/clk-mtk.c
index 4b6096c44d74..8d5791b3f460 100644
--- a/drivers/clk/mediatek/clk-mtk.c
+++ b/drivers/clk/mediatek/clk-mtk.c
@@ -11,6 +11,7 @@
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/clkdev.h>
+#include <linux/module.h>
#include <linux/mfd/syscon.h>
#include <linux/device.h>
#include <linux/of_device.h>
@@ -42,6 +43,16 @@ err_out:
return NULL;
}
+EXPORT_SYMBOL_GPL(mtk_alloc_clk_data);
+
+void mtk_free_clk_data(struct clk_onecell_data *clk_data)
+{
+ if (!clk_data)
+ return;
+
+ kfree(clk_data->clks);
+ kfree(clk_data);
+}
void mtk_clk_register_fixed_clks(const struct mtk_fixed_clk *clks,
int num, struct clk_onecell_data *clk_data)
@@ -68,6 +79,7 @@ void mtk_clk_register_fixed_clks(const struct mtk_fixed_clk *clks,
clk_data->clks[rc->id] = clk;
}
}
+EXPORT_SYMBOL_GPL(mtk_clk_register_fixed_clks);
void mtk_clk_register_factors(const struct mtk_fixed_factor *clks,
int num, struct clk_onecell_data *clk_data)
@@ -94,6 +106,7 @@ void mtk_clk_register_factors(const struct mtk_fixed_factor *clks,
clk_data->clks[ff->id] = clk;
}
}
+EXPORT_SYMBOL_GPL(mtk_clk_register_factors);
int mtk_clk_register_gates_with_dev(struct device_node *node,
const struct mtk_gate *clks,
@@ -146,6 +159,7 @@ int mtk_clk_register_gates(struct device_node *node,
return mtk_clk_register_gates_with_dev(node,
clks, num, clk_data, NULL);
}
+EXPORT_SYMBOL_GPL(mtk_clk_register_gates);
struct clk *mtk_clk_register_composite(const struct mtk_composite *mc,
void __iomem *base, spinlock_t *lock)
@@ -259,6 +273,7 @@ void mtk_clk_register_composites(const struct mtk_composite *mcs,
clk_data->clks[mc->id] = clk;
}
}
+EXPORT_SYMBOL_GPL(mtk_clk_register_composites);
void mtk_clk_register_dividers(const struct mtk_clk_divider *mcds,
int num, void __iomem *base, spinlock_t *lock,
@@ -305,7 +320,17 @@ int mtk_clk_simple_probe(struct platform_device *pdev)
r = mtk_clk_register_gates(node, mcd->clks, mcd->num_clks, clk_data);
if (r)
- return r;
+ goto free_data;
+
+ r = of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ if (r)
+ goto free_data;
- return of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
+ return r;
+
+free_data:
+ mtk_free_clk_data(clk_data);
+ return r;
}
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-mtk.h b/drivers/clk/mediatek/clk-mtk.h
index 7de41c3b3206..0ff289d93452 100644
--- a/drivers/clk/mediatek/clk-mtk.h
+++ b/drivers/clk/mediatek/clk-mtk.h
@@ -202,6 +202,7 @@ void mtk_clk_register_dividers(const struct mtk_clk_divider *mcds,
struct clk_onecell_data *clk_data);
struct clk_onecell_data *mtk_alloc_clk_data(unsigned int clk_num);
+void mtk_free_clk_data(struct clk_onecell_data *clk_data);
#define HAVE_RST_BAR BIT(0)
#define PLL_AO BIT(1)
diff --git a/drivers/clk/mediatek/clk-mux.c b/drivers/clk/mediatek/clk-mux.c
index 855b0a1f7eb9..6d3a50eb7d6f 100644
--- a/drivers/clk/mediatek/clk-mux.c
+++ b/drivers/clk/mediatek/clk-mux.c
@@ -8,6 +8,7 @@
#include <linux/of_address.h>
#include <linux/slab.h>
#include <linux/mfd/syscon.h>
+#include <linux/module.h>
#include "clk-mtk.h"
#include "clk-mux.h"
@@ -120,6 +121,7 @@ const struct clk_ops mtk_mux_clr_set_upd_ops = {
.get_parent = mtk_clk_mux_get_parent,
.set_parent = mtk_clk_mux_set_parent_setclr_lock,
};
+EXPORT_SYMBOL_GPL(mtk_mux_clr_set_upd_ops);
const struct clk_ops mtk_mux_gate_clr_set_upd_ops = {
.enable = mtk_clk_mux_enable_setclr,
@@ -128,6 +130,7 @@ const struct clk_ops mtk_mux_gate_clr_set_upd_ops = {
.get_parent = mtk_clk_mux_get_parent,
.set_parent = mtk_clk_mux_set_parent_setclr_lock,
};
+EXPORT_SYMBOL_GPL(mtk_mux_gate_clr_set_upd_ops);
static struct clk *mtk_clk_register_mux(const struct mtk_mux *mux,
struct regmap *regmap,
@@ -195,3 +198,6 @@ int mtk_clk_register_muxes(const struct mtk_mux *muxes,
return 0;
}
+EXPORT_SYMBOL_GPL(mtk_clk_register_muxes);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/clk-pll.c b/drivers/clk/mediatek/clk-pll.c
index 7fb001a4e7d8..60d7ffa0b924 100644
--- a/drivers/clk/mediatek/clk-pll.c
+++ b/drivers/clk/mediatek/clk-pll.c
@@ -7,6 +7,7 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/io.h>
+#include <linux/module.h>
#include <linux/slab.h>
#include <linux/clkdev.h>
#include <linux/delay.h>
@@ -332,7 +333,7 @@ static struct clk *mtk_clk_register_pll(const struct mtk_pll_data *data,
pll->pcw_chg_addr = pll->base_addr + REG_CON1;
if (data->tuner_reg)
pll->tuner_addr = base + data->tuner_reg;
- if (data->tuner_en_reg)
+ if (data->tuner_en_reg || data->tuner_en_bit)
pll->tuner_en_addr = base + data->tuner_en_reg;
if (data->en_reg)
pll->en_addr = base + data->en_reg;
@@ -385,3 +386,6 @@ void mtk_clk_register_plls(struct device_node *node,
clk_data->clks[pll->id] = clk;
}
}
+EXPORT_SYMBOL_GPL(mtk_clk_register_plls);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/mediatek/reset.c b/drivers/clk/mediatek/reset.c
index e562dc3c10a4..ffe464ce7ff8 100644
--- a/drivers/clk/mediatek/reset.c
+++ b/drivers/clk/mediatek/reset.c
@@ -137,3 +137,5 @@ void mtk_register_reset_controller_set_clr(struct device_node *np,
mtk_register_reset_controller_common(np, num_regs, regofs,
&mtk_reset_ops_set_clr);
}
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/clk/meson/meson8b.c b/drivers/clk/meson/meson8b.c
index a844d35b553a..cd0f5bae24d4 100644
--- a/drivers/clk/meson/meson8b.c
+++ b/drivers/clk/meson/meson8b.c
@@ -118,6 +118,56 @@ static struct clk_regmap meson8b_fixed_pll = {
},
};
+static struct clk_fixed_factor hdmi_pll_dco_in = {
+ .mult = 2,
+ .div = 1,
+ .hw.init = &(struct clk_init_data){
+ .name = "hdmi_pll_dco_in",
+ .ops = &clk_fixed_factor_ops,
+ .parent_data = &(const struct clk_parent_data) {
+ .fw_name = "xtal",
+ .index = -1,
+ },
+ .num_parents = 1,
+ },
+};
+
+/*
+ * Taken from the vendor driver for the 2970/2975MHz (both only differ in the
+ * FRAC part in HHI_VID_PLL_CNTL2) where these values are identical for Meson8,
+ * Meson8b and Meson8m2. This doubles the input (or output - it's not clear
+ * which one but the result is the same) clock. The vendor driver additionally
+ * has the following comment about: "optimise HPLL VCO 2.97GHz performance".
+ */
+static const struct reg_sequence meson8b_hdmi_pll_init_regs[] = {
+ { .reg = HHI_VID_PLL_CNTL2, .def = 0x69c84000 },
+ { .reg = HHI_VID_PLL_CNTL3, .def = 0x8a46c023 },
+ { .reg = HHI_VID_PLL_CNTL4, .def = 0x4123b100 },
+ { .reg = HHI_VID_PLL_CNTL5, .def = 0x00012385 },
+ { .reg = HHI_VID2_PLL_CNTL2, .def = 0x0430a800 },
+};
+
+static const struct pll_params_table hdmi_pll_params_table[] = {
+ PLL_PARAMS(40, 1),
+ PLL_PARAMS(42, 1),
+ PLL_PARAMS(44, 1),
+ PLL_PARAMS(45, 1),
+ PLL_PARAMS(49, 1),
+ PLL_PARAMS(52, 1),
+ PLL_PARAMS(54, 1),
+ PLL_PARAMS(56, 1),
+ PLL_PARAMS(59, 1),
+ PLL_PARAMS(60, 1),
+ PLL_PARAMS(61, 1),
+ PLL_PARAMS(62, 1),
+ PLL_PARAMS(64, 1),
+ PLL_PARAMS(66, 1),
+ PLL_PARAMS(68, 1),
+ PLL_PARAMS(71, 1),
+ PLL_PARAMS(82, 1),
+ { /* sentinel */ }
+};
+
static struct clk_regmap meson8b_hdmi_pll_dco = {
.data = &(struct meson_clk_pll_data){
.en = {
@@ -150,15 +200,16 @@ static struct clk_regmap meson8b_hdmi_pll_dco = {
.shift = 29,
.width = 1,
},
+ .table = hdmi_pll_params_table,
+ .init_regs = meson8b_hdmi_pll_init_regs,
+ .init_count = ARRAY_SIZE(meson8b_hdmi_pll_init_regs),
},
.hw.init = &(struct clk_init_data){
/* sometimes also called "HPLL" or "HPLL PLL" */
.name = "hdmi_pll_dco",
- .ops = &meson_clk_pll_ro_ops,
- .parent_data = &(const struct clk_parent_data) {
- .fw_name = "xtal",
- .name = "xtal",
- .index = -1,
+ .ops = &meson_clk_pll_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &hdmi_pll_dco_in.hw
},
.num_parents = 1,
},
@@ -173,7 +224,7 @@ static struct clk_regmap meson8b_hdmi_pll_lvds_out = {
},
.hw.init = &(struct clk_init_data){
.name = "hdmi_pll_lvds_out",
- .ops = &clk_regmap_divider_ro_ops,
+ .ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_hdmi_pll_dco.hw
},
@@ -191,7 +242,7 @@ static struct clk_regmap meson8b_hdmi_pll_hdmi_out = {
},
.hw.init = &(struct clk_init_data){
.name = "hdmi_pll_hdmi_out",
- .ops = &clk_regmap_divider_ro_ops,
+ .ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_hdmi_pll_dco.hw
},
@@ -1045,6 +1096,23 @@ static struct clk_regmap meson8b_l2_dram_clk_gate = {
},
};
+/* also called LVDS_CLK_EN */
+static struct clk_regmap meson8b_vid_pll_lvds_en = {
+ .data = &(struct clk_regmap_gate_data){
+ .offset = HHI_VID_DIVIDER_CNTL,
+ .bit_idx = 11,
+ },
+ .hw.init = &(struct clk_init_data){
+ .name = "vid_pll_lvds_en",
+ .ops = &clk_regmap_gate_ops,
+ .parent_hws = (const struct clk_hw *[]) {
+ &meson8b_hdmi_pll_lvds_out.hw
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ },
+};
+
static struct clk_regmap meson8b_vid_pll_in_sel = {
.data = &(struct clk_regmap_mux_data){
.offset = HHI_VID_DIVIDER_CNTL,
@@ -1053,7 +1121,7 @@ static struct clk_regmap meson8b_vid_pll_in_sel = {
},
.hw.init = &(struct clk_init_data){
.name = "vid_pll_in_sel",
- .ops = &clk_regmap_mux_ro_ops,
+ .ops = &clk_regmap_mux_ops,
/*
* TODO: depending on the SoC there is also a second parent:
* Meson8: unknown
@@ -1061,7 +1129,7 @@ static struct clk_regmap meson8b_vid_pll_in_sel = {
* Meson8m2: vid2_pll
*/
.parent_hws = (const struct clk_hw *[]) {
- &meson8b_hdmi_pll_lvds_out.hw
+ &meson8b_vid_pll_lvds_en.hw
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1075,7 +1143,7 @@ static struct clk_regmap meson8b_vid_pll_in_en = {
},
.hw.init = &(struct clk_init_data){
.name = "vid_pll_in_en",
- .ops = &clk_regmap_gate_ro_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vid_pll_in_sel.hw
},
@@ -1092,7 +1160,7 @@ static struct clk_regmap meson8b_vid_pll_pre_div = {
},
.hw.init = &(struct clk_init_data){
.name = "vid_pll_pre_div",
- .ops = &clk_regmap_divider_ro_ops,
+ .ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vid_pll_in_en.hw
},
@@ -1109,7 +1177,7 @@ static struct clk_regmap meson8b_vid_pll_post_div = {
},
.hw.init = &(struct clk_init_data){
.name = "vid_pll_post_div",
- .ops = &clk_regmap_divider_ro_ops,
+ .ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vid_pll_pre_div.hw
},
@@ -1126,7 +1194,7 @@ static struct clk_regmap meson8b_vid_pll = {
},
.hw.init = &(struct clk_init_data){
.name = "vid_pll",
- .ops = &clk_regmap_mux_ro_ops,
+ .ops = &clk_regmap_mux_ops,
/* TODO: parent 0x2 is vid_pll_pre_div_mult7_div2 */
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vid_pll_pre_div.hw,
@@ -1145,7 +1213,7 @@ static struct clk_regmap meson8b_vid_pll_final_div = {
},
.hw.init = &(struct clk_init_data){
.name = "vid_pll_final_div",
- .ops = &clk_regmap_divider_ro_ops,
+ .ops = &clk_regmap_divider_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vid_pll.hw
},
@@ -1172,10 +1240,10 @@ static struct clk_regmap meson8b_vclk_in_sel = {
},
.hw.init = &(struct clk_init_data){
.name = "vclk_in_sel",
- .ops = &clk_regmap_mux_ro_ops,
+ .ops = &clk_regmap_mux_ops,
.parent_hws = meson8b_vclk_mux_parent_hws,
.num_parents = ARRAY_SIZE(meson8b_vclk_mux_parent_hws),
- .flags = CLK_SET_RATE_PARENT,
+ .flags = CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
},
};
@@ -1186,7 +1254,7 @@ static struct clk_regmap meson8b_vclk_in_en = {
},
.hw.init = &(struct clk_init_data){
.name = "vclk_in_en",
- .ops = &clk_regmap_gate_ro_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk_in_sel.hw
},
@@ -1202,7 +1270,7 @@ static struct clk_regmap meson8b_vclk_en = {
},
.hw.init = &(struct clk_init_data){
.name = "vclk_en",
- .ops = &clk_regmap_gate_ro_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk_in_en.hw
},
@@ -1218,7 +1286,7 @@ static struct clk_regmap meson8b_vclk_div1_gate = {
},
.hw.init = &(struct clk_init_data){
.name = "vclk_div1_en",
- .ops = &clk_regmap_gate_ro_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk_en.hw
},
@@ -1248,7 +1316,7 @@ static struct clk_regmap meson8b_vclk_div2_div_gate = {
},
.hw.init = &(struct clk_init_data){
.name = "vclk_div2_en",
- .ops = &clk_regmap_gate_ro_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk_div2_div.hw
},
@@ -1278,7 +1346,7 @@ static struct clk_regmap meson8b_vclk_div4_div_gate = {
},
.hw.init = &(struct clk_init_data){
.name = "vclk_div4_en",
- .ops = &clk_regmap_gate_ro_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk_div4_div.hw
},
@@ -1308,7 +1376,7 @@ static struct clk_regmap meson8b_vclk_div6_div_gate = {
},
.hw.init = &(struct clk_init_data){
.name = "vclk_div6_en",
- .ops = &clk_regmap_gate_ro_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk_div6_div.hw
},
@@ -1338,7 +1406,7 @@ static struct clk_regmap meson8b_vclk_div12_div_gate = {
},
.hw.init = &(struct clk_init_data){
.name = "vclk_div12_en",
- .ops = &clk_regmap_gate_ro_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk_div12_div.hw
},
@@ -1355,10 +1423,10 @@ static struct clk_regmap meson8b_vclk2_in_sel = {
},
.hw.init = &(struct clk_init_data){
.name = "vclk2_in_sel",
- .ops = &clk_regmap_mux_ro_ops,
+ .ops = &clk_regmap_mux_ops,
.parent_hws = meson8b_vclk_mux_parent_hws,
.num_parents = ARRAY_SIZE(meson8b_vclk_mux_parent_hws),
- .flags = CLK_SET_RATE_PARENT,
+ .flags = CLK_SET_RATE_PARENT | CLK_SET_RATE_NO_REPARENT,
},
};
@@ -1369,7 +1437,7 @@ static struct clk_regmap meson8b_vclk2_clk_in_en = {
},
.hw.init = &(struct clk_init_data){
.name = "vclk2_in_en",
- .ops = &clk_regmap_gate_ro_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk2_in_sel.hw
},
@@ -1385,7 +1453,7 @@ static struct clk_regmap meson8b_vclk2_clk_en = {
},
.hw.init = &(struct clk_init_data){
.name = "vclk2_en",
- .ops = &clk_regmap_gate_ro_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk2_clk_in_en.hw
},
@@ -1401,7 +1469,7 @@ static struct clk_regmap meson8b_vclk2_div1_gate = {
},
.hw.init = &(struct clk_init_data){
.name = "vclk2_div1_en",
- .ops = &clk_regmap_gate_ro_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk2_clk_en.hw
},
@@ -1431,7 +1499,7 @@ static struct clk_regmap meson8b_vclk2_div2_div_gate = {
},
.hw.init = &(struct clk_init_data){
.name = "vclk2_div2_en",
- .ops = &clk_regmap_gate_ro_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk2_div2_div.hw
},
@@ -1461,7 +1529,7 @@ static struct clk_regmap meson8b_vclk2_div4_div_gate = {
},
.hw.init = &(struct clk_init_data){
.name = "vclk2_div4_en",
- .ops = &clk_regmap_gate_ro_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk2_div4_div.hw
},
@@ -1491,7 +1559,7 @@ static struct clk_regmap meson8b_vclk2_div6_div_gate = {
},
.hw.init = &(struct clk_init_data){
.name = "vclk2_div6_en",
- .ops = &clk_regmap_gate_ro_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk2_div6_div.hw
},
@@ -1521,7 +1589,7 @@ static struct clk_regmap meson8b_vclk2_div12_div_gate = {
},
.hw.init = &(struct clk_init_data){
.name = "vclk2_div12_en",
- .ops = &clk_regmap_gate_ro_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_vclk2_div12_div.hw
},
@@ -1546,7 +1614,7 @@ static struct clk_regmap meson8b_cts_enct_sel = {
},
.hw.init = &(struct clk_init_data){
.name = "cts_enct_sel",
- .ops = &clk_regmap_mux_ro_ops,
+ .ops = &clk_regmap_mux_ops,
.parent_hws = meson8b_vclk_enc_mux_parent_hws,
.num_parents = ARRAY_SIZE(meson8b_vclk_enc_mux_parent_hws),
.flags = CLK_SET_RATE_PARENT,
@@ -1560,7 +1628,7 @@ static struct clk_regmap meson8b_cts_enct = {
},
.hw.init = &(struct clk_init_data){
.name = "cts_enct",
- .ops = &clk_regmap_gate_ro_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_cts_enct_sel.hw
},
@@ -1577,7 +1645,7 @@ static struct clk_regmap meson8b_cts_encp_sel = {
},
.hw.init = &(struct clk_init_data){
.name = "cts_encp_sel",
- .ops = &clk_regmap_mux_ro_ops,
+ .ops = &clk_regmap_mux_ops,
.parent_hws = meson8b_vclk_enc_mux_parent_hws,
.num_parents = ARRAY_SIZE(meson8b_vclk_enc_mux_parent_hws),
.flags = CLK_SET_RATE_PARENT,
@@ -1591,7 +1659,7 @@ static struct clk_regmap meson8b_cts_encp = {
},
.hw.init = &(struct clk_init_data){
.name = "cts_encp",
- .ops = &clk_regmap_gate_ro_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_cts_encp_sel.hw
},
@@ -1608,7 +1676,7 @@ static struct clk_regmap meson8b_cts_enci_sel = {
},
.hw.init = &(struct clk_init_data){
.name = "cts_enci_sel",
- .ops = &clk_regmap_mux_ro_ops,
+ .ops = &clk_regmap_mux_ops,
.parent_hws = meson8b_vclk_enc_mux_parent_hws,
.num_parents = ARRAY_SIZE(meson8b_vclk_enc_mux_parent_hws),
.flags = CLK_SET_RATE_PARENT,
@@ -1622,7 +1690,7 @@ static struct clk_regmap meson8b_cts_enci = {
},
.hw.init = &(struct clk_init_data){
.name = "cts_enci",
- .ops = &clk_regmap_gate_ro_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_cts_enci_sel.hw
},
@@ -1639,7 +1707,7 @@ static struct clk_regmap meson8b_hdmi_tx_pixel_sel = {
},
.hw.init = &(struct clk_init_data){
.name = "hdmi_tx_pixel_sel",
- .ops = &clk_regmap_mux_ro_ops,
+ .ops = &clk_regmap_mux_ops,
.parent_hws = meson8b_vclk_enc_mux_parent_hws,
.num_parents = ARRAY_SIZE(meson8b_vclk_enc_mux_parent_hws),
.flags = CLK_SET_RATE_PARENT,
@@ -1653,7 +1721,7 @@ static struct clk_regmap meson8b_hdmi_tx_pixel = {
},
.hw.init = &(struct clk_init_data){
.name = "hdmi_tx_pixel",
- .ops = &clk_regmap_gate_ro_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_hdmi_tx_pixel_sel.hw
},
@@ -1678,7 +1746,7 @@ static struct clk_regmap meson8b_cts_encl_sel = {
},
.hw.init = &(struct clk_init_data){
.name = "cts_encl_sel",
- .ops = &clk_regmap_mux_ro_ops,
+ .ops = &clk_regmap_mux_ops,
.parent_hws = meson8b_vclk2_enc_mux_parent_hws,
.num_parents = ARRAY_SIZE(meson8b_vclk2_enc_mux_parent_hws),
.flags = CLK_SET_RATE_PARENT,
@@ -1692,7 +1760,7 @@ static struct clk_regmap meson8b_cts_encl = {
},
.hw.init = &(struct clk_init_data){
.name = "cts_encl",
- .ops = &clk_regmap_gate_ro_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_cts_encl_sel.hw
},
@@ -1709,7 +1777,7 @@ static struct clk_regmap meson8b_cts_vdac0_sel = {
},
.hw.init = &(struct clk_init_data){
.name = "cts_vdac0_sel",
- .ops = &clk_regmap_mux_ro_ops,
+ .ops = &clk_regmap_mux_ops,
.parent_hws = meson8b_vclk2_enc_mux_parent_hws,
.num_parents = ARRAY_SIZE(meson8b_vclk2_enc_mux_parent_hws),
.flags = CLK_SET_RATE_PARENT,
@@ -1723,7 +1791,7 @@ static struct clk_regmap meson8b_cts_vdac0 = {
},
.hw.init = &(struct clk_init_data){
.name = "cts_vdac0",
- .ops = &clk_regmap_gate_ro_ops,
+ .ops = &clk_regmap_gate_ops,
.parent_hws = (const struct clk_hw *[]) {
&meson8b_cts_vdac0_sel.hw
},
@@ -2905,6 +2973,8 @@ static struct clk_hw_onecell_data meson8_hw_onecell_data = {
[CLKID_CTS_MCLK_I958_DIV] = &meson8b_cts_mclk_i958_div.hw,
[CLKID_CTS_MCLK_I958] = &meson8b_cts_mclk_i958.hw,
[CLKID_CTS_I958] = &meson8b_cts_i958.hw,
+ [CLKID_VID_PLL_LVDS_EN] = &meson8b_vid_pll_lvds_en.hw,
+ [CLKID_HDMI_PLL_DCO_IN] = &hdmi_pll_dco_in.hw,
[CLK_NR_CLKS] = NULL,
},
.num = CLK_NR_CLKS,
@@ -3122,6 +3192,8 @@ static struct clk_hw_onecell_data meson8b_hw_onecell_data = {
[CLKID_CTS_MCLK_I958_DIV] = &meson8b_cts_mclk_i958_div.hw,
[CLKID_CTS_MCLK_I958] = &meson8b_cts_mclk_i958.hw,
[CLKID_CTS_I958] = &meson8b_cts_i958.hw,
+ [CLKID_VID_PLL_LVDS_EN] = &meson8b_vid_pll_lvds_en.hw,
+ [CLKID_HDMI_PLL_DCO_IN] = &hdmi_pll_dco_in.hw,
[CLK_NR_CLKS] = NULL,
},
.num = CLK_NR_CLKS,
@@ -3341,6 +3413,8 @@ static struct clk_hw_onecell_data meson8m2_hw_onecell_data = {
[CLKID_CTS_MCLK_I958_DIV] = &meson8b_cts_mclk_i958_div.hw,
[CLKID_CTS_MCLK_I958] = &meson8b_cts_mclk_i958.hw,
[CLKID_CTS_I958] = &meson8b_cts_i958.hw,
+ [CLKID_VID_PLL_LVDS_EN] = &meson8b_vid_pll_lvds_en.hw,
+ [CLKID_HDMI_PLL_DCO_IN] = &hdmi_pll_dco_in.hw,
[CLK_NR_CLKS] = NULL,
},
.num = CLK_NR_CLKS,
@@ -3539,6 +3613,7 @@ static struct clk_regmap *const meson8b_clk_regmaps[] = {
&meson8b_cts_mclk_i958_div,
&meson8b_cts_mclk_i958,
&meson8b_cts_i958,
+ &meson8b_vid_pll_lvds_en,
};
static const struct meson8b_clk_reset_line {
diff --git a/drivers/clk/meson/meson8b.h b/drivers/clk/meson/meson8b.h
index b1a5074cf148..ce62ed47cbfc 100644
--- a/drivers/clk/meson/meson8b.h
+++ b/drivers/clk/meson/meson8b.h
@@ -51,6 +51,16 @@
#define HHI_SYS_PLL_CNTL 0x300 /* 0xc0 offset in data sheet */
#define HHI_VID_PLL_CNTL 0x320 /* 0xc8 offset in data sheet */
#define HHI_VID_PLL_CNTL2 0x324 /* 0xc9 offset in data sheet */
+#define HHI_VID_PLL_CNTL3 0x328 /* 0xca offset in data sheet */
+#define HHI_VID_PLL_CNTL4 0x32c /* 0xcb offset in data sheet */
+#define HHI_VID_PLL_CNTL5 0x330 /* 0xcc offset in data sheet */
+#define HHI_VID_PLL_CNTL6 0x334 /* 0xcd offset in data sheet */
+#define HHI_VID2_PLL_CNTL 0x380 /* 0xe0 offset in data sheet */
+#define HHI_VID2_PLL_CNTL2 0x384 /* 0xe1 offset in data sheet */
+#define HHI_VID2_PLL_CNTL3 0x388 /* 0xe2 offset in data sheet */
+#define HHI_VID2_PLL_CNTL4 0x38c /* 0xe3 offset in data sheet */
+#define HHI_VID2_PLL_CNTL5 0x390 /* 0xe4 offset in data sheet */
+#define HHI_VID2_PLL_CNTL6 0x394 /* 0xe5 offset in data sheet */
/*
* MPLL register offeset taken from the S905 datasheet. Vendor kernel source
@@ -107,14 +117,11 @@
#define CLKID_PERIPH_SEL 125
#define CLKID_AXI_SEL 127
#define CLKID_L2_DRAM_SEL 129
-#define CLKID_HDMI_PLL_LVDS_OUT 131
-#define CLKID_HDMI_PLL_HDMI_OUT 132
+#define CLKID_HDMI_PLL_LVDS_OUT 131
#define CLKID_VID_PLL_IN_SEL 133
#define CLKID_VID_PLL_IN_EN 134
#define CLKID_VID_PLL_PRE_DIV 135
#define CLKID_VID_PLL_POST_DIV 136
-#define CLKID_VID_PLL_FINAL_DIV 137
-#define CLKID_VCLK_IN_SEL 138
#define CLKID_VCLK_IN_EN 139
#define CLKID_VCLK_DIV1 140
#define CLKID_VCLK_DIV2_DIV 141
@@ -125,7 +132,6 @@
#define CLKID_VCLK_DIV6 146
#define CLKID_VCLK_DIV12_DIV 147
#define CLKID_VCLK_DIV12 148
-#define CLKID_VCLK2_IN_SEL 149
#define CLKID_VCLK2_IN_EN 150
#define CLKID_VCLK2_DIV1 151
#define CLKID_VCLK2_DIV2_DIV 152
@@ -137,17 +143,11 @@
#define CLKID_VCLK2_DIV12_DIV 158
#define CLKID_VCLK2_DIV12 159
#define CLKID_CTS_ENCT_SEL 160
-#define CLKID_CTS_ENCT 161
#define CLKID_CTS_ENCP_SEL 162
-#define CLKID_CTS_ENCP 163
#define CLKID_CTS_ENCI_SEL 164
-#define CLKID_CTS_ENCI 165
#define CLKID_HDMI_TX_PIXEL_SEL 166
-#define CLKID_HDMI_TX_PIXEL 167
#define CLKID_CTS_ENCL_SEL 168
-#define CLKID_CTS_ENCL 169
#define CLKID_CTS_VDAC0_SEL 170
-#define CLKID_CTS_VDAC0 171
#define CLKID_HDMI_SYS_SEL 172
#define CLKID_HDMI_SYS_DIV 173
#define CLKID_MALI_0_SEL 175
@@ -182,8 +182,10 @@
#define CLKID_CTS_MCLK_I958_DIV 211
#define CLKID_VCLK_EN 214
#define CLKID_VCLK2_EN 215
+#define CLKID_VID_PLL_LVDS_EN 216
+#define CLKID_HDMI_PLL_DCO_IN 217
-#define CLK_NR_CLKS 216
+#define CLK_NR_CLKS 218
/*
* include the CLKID and RESETID that have
diff --git a/drivers/clk/mvebu/ap-cpu-clk.c b/drivers/clk/mvebu/ap-cpu-clk.c
index 08ba59ec3fb1..71bdd7c3ff03 100644
--- a/drivers/clk/mvebu/ap-cpu-clk.c
+++ b/drivers/clk/mvebu/ap-cpu-clk.c
@@ -256,12 +256,15 @@ static int ap_cpu_clock_probe(struct platform_device *pdev)
int cpu, err;
err = of_property_read_u32(dn, "reg", &cpu);
- if (WARN_ON(err))
+ if (WARN_ON(err)) {
+ of_node_put(dn);
return err;
+ }
/* If cpu2 or cpu3 is enabled */
if (cpu & APN806_CLUSTER_NUM_MASK) {
nclusters = 2;
+ of_node_put(dn);
break;
}
}
@@ -288,8 +291,10 @@ static int ap_cpu_clock_probe(struct platform_device *pdev)
int cpu, err;
err = of_property_read_u32(dn, "reg", &cpu);
- if (WARN_ON(err))
+ if (WARN_ON(err)) {
+ of_node_put(dn);
return err;
+ }
cluster_index = cpu & APN806_CLUSTER_NUM_MASK;
cluster_index >>= APN806_CLUSTER_NUM_OFFSET;
@@ -301,6 +306,7 @@ static int ap_cpu_clock_probe(struct platform_device *pdev)
parent = of_clk_get(np, cluster_index);
if (IS_ERR(parent)) {
dev_err(dev, "Could not get the clock parent\n");
+ of_node_put(dn);
return -EINVAL;
}
parent_name = __clk_get_name(parent);
@@ -319,8 +325,10 @@ static int ap_cpu_clock_probe(struct platform_device *pdev)
init.parent_names = &parent_name;
ret = devm_clk_hw_register(dev, &ap_cpu_clk[cluster_index].hw);
- if (ret)
+ if (ret) {
+ of_node_put(dn);
return ret;
+ }
ap_cpu_data->hws[cluster_index] = &ap_cpu_clk[cluster_index].hw;
}
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index 9ef007b3cf9b..74efc82127e1 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -324,6 +324,14 @@ config MSM_MMCC_8998
Say Y if you want to support multimedia devices such as display,
graphics, video encode/decode, camera, etc.
+config QCM_GCC_2290
+ tristate "QCM2290 Global Clock Controller"
+ select QCOM_GDSC
+ help
+ Support for the global clock controller on QCM2290 devices.
+ Say Y if you want to use multimedia devices or peripheral
+ devices such as UART, SPI, I2C, USB, SD/eMMC etc.
+
config QCS_GCC_404
tristate "QCS404 Global Clock Controller"
help
@@ -340,6 +348,15 @@ config SC_CAMCC_7180
Say Y if you want to support camera devices and functionality such as
capturing pictures.
+config SC_CAMCC_7280
+ tristate "SC7280 Camera Clock Controller"
+ select SC_GCC_7280
+ help
+ Support for the camera clock controller on Qualcomm Technologies, Inc
+ SC7280 devices.
+ Say Y if you want to support camera devices and functionality such as
+ capturing pictures.
+
config SC_DISPCC_7180
tristate "SC7180 Display Clock Controller"
select SC_GCC_7180
@@ -385,15 +402,6 @@ config SC_GCC_8180X
Say Y if you want to use peripheral devices such as UART, SPI,
I2C, USB, UFS, SDCC, etc.
-config SC_LPASS_CORECC_7180
- tristate "SC7180 LPASS Core Clock Controller"
- select SC_GCC_7180
- help
- Support for the LPASS(Low Power Audio Subsystem) core clock controller
- on SC7180 devices.
- Say Y if you want to use LPASS clocks and power domains of the LPASS
- core clock controller.
-
config SC_GPUCC_7180
tristate "SC7180 Graphics Clock Controller"
select SC_GCC_7180
@@ -410,6 +418,23 @@ config SC_GPUCC_7280
Say Y if you want to support graphics controller devices and
functionality such as 3D graphics.
+config SC_LPASSCC_7280
+ tristate "SC7280 Low Power Audio Subsystem (LPASS) Clock Controller"
+ select SC_GCC_7280
+ help
+ Support for the LPASS clock controller on SC7280 devices.
+ Say Y if you want to use the LPASS branch clocks of the LPASS clock
+ controller to reset the LPASS subsystem.
+
+config SC_LPASS_CORECC_7180
+ tristate "SC7180 LPASS Core Clock Controller"
+ select SC_GCC_7180
+ help
+ Support for the LPASS(Low Power Audio Subsystem) core clock controller
+ on SC7180 devices.
+ Say Y if you want to use LPASS clocks and power domains of the LPASS
+ core clock controller.
+
config SC_MSS_7180
tristate "SC7180 Modem Clock Controller"
select SC_GCC_7180
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 9825ef843f4a..1718c34d3551 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -54,10 +54,12 @@ obj-$(CONFIG_QCOM_CLK_APCS_SDX55) += apcs-sdx55.o
obj-$(CONFIG_QCOM_CLK_RPM) += clk-rpm.o
obj-$(CONFIG_QCOM_CLK_RPMH) += clk-rpmh.o
obj-$(CONFIG_QCOM_CLK_SMD_RPM) += clk-smd-rpm.o
+obj-$(CONFIG_QCM_GCC_2290) += gcc-qcm2290.o
obj-$(CONFIG_QCS_GCC_404) += gcc-qcs404.o
obj-$(CONFIG_QCS_Q6SSTOP_404) += q6sstop-qcs404.o
obj-$(CONFIG_QCS_TURING_404) += turingcc-qcs404.o
obj-$(CONFIG_SC_CAMCC_7180) += camcc-sc7180.o
+obj-$(CONFIG_SC_CAMCC_7280) += camcc-sc7280.o
obj-$(CONFIG_SC_DISPCC_7180) += dispcc-sc7180.o
obj-$(CONFIG_SC_DISPCC_7280) += dispcc-sc7280.o
obj-$(CONFIG_SC_GCC_7180) += gcc-sc7180.o
@@ -65,6 +67,7 @@ obj-$(CONFIG_SC_GCC_7280) += gcc-sc7280.o
obj-$(CONFIG_SC_GCC_8180X) += gcc-sc8180x.o
obj-$(CONFIG_SC_GPUCC_7180) += gpucc-sc7180.o
obj-$(CONFIG_SC_GPUCC_7280) += gpucc-sc7280.o
+obj-$(CONFIG_SC_LPASSCC_7280) += lpasscc-sc7280.o
obj-$(CONFIG_SC_LPASS_CORECC_7180) += lpasscorecc-sc7180.o
obj-$(CONFIG_SC_MSS_7180) += mss-sc7180.o
obj-$(CONFIG_SC_VIDEOCC_7180) += videocc-sc7180.o
diff --git a/drivers/clk/qcom/a53-pll.c b/drivers/clk/qcom/a53-pll.c
index 9e6decb9c26f..329d2c5356d8 100644
--- a/drivers/clk/qcom/a53-pll.c
+++ b/drivers/clk/qcom/a53-pll.c
@@ -90,7 +90,6 @@ static int qcom_a53pll_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct regmap *regmap;
- struct resource *res;
struct clk_pll *pll;
void __iomem *base;
struct clk_init_data init = { };
@@ -100,8 +99,7 @@ static int qcom_a53pll_probe(struct platform_device *pdev)
if (!pll)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/clk/qcom/camcc-sc7280.c b/drivers/clk/qcom/camcc-sc7280.c
new file mode 100644
index 000000000000..ec163ea769f5
--- /dev/null
+++ b/drivers/clk/qcom/camcc-sc7280.c
@@ -0,0 +1,2484 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,camcc-sc7280.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ P_BI_TCXO,
+ P_CAM_CC_PLL0_OUT_EVEN,
+ P_CAM_CC_PLL0_OUT_MAIN,
+ P_CAM_CC_PLL0_OUT_ODD,
+ P_CAM_CC_PLL1_OUT_EVEN,
+ P_CAM_CC_PLL2_OUT_AUX2,
+ P_CAM_CC_PLL2_OUT_EARLY,
+ P_CAM_CC_PLL3_OUT_EVEN,
+ P_CAM_CC_PLL4_OUT_EVEN,
+ P_CAM_CC_PLL5_OUT_EVEN,
+ P_CAM_CC_PLL6_OUT_EVEN,
+ P_CAM_CC_PLL6_OUT_MAIN,
+ P_CAM_CC_PLL6_OUT_ODD,
+ P_SLEEP_CLK,
+};
+
+static struct pll_vco lucid_vco[] = {
+ { 249600000, 2000000000, 0 },
+};
+
+static struct pll_vco zonda_vco[] = {
+ { 595200000UL, 3600000000UL, 0 },
+};
+
+/* 1200MHz Configuration */
+static const struct alpha_pll_config cam_cc_pll0_config = {
+ .l = 0x3E,
+ .alpha = 0x8000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002261,
+ .config_ctl_hi1_val = 0x329A299C,
+ .user_ctl_val = 0x00003101,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x00000000,
+};
+
+static struct clk_alpha_pll cam_cc_pll0 = {
+ .offset = 0x0,
+ .vco_table = lucid_vco,
+ .num_vco = ARRAY_SIZE(lucid_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_pll0",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll0_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll0_out_even = {
+ .offset = 0x0,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_cam_cc_pll0_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll0_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_pll0_out_even",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_pll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ops,
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll0_out_odd[] = {
+ { 0x3, 3 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll0_out_odd = {
+ .offset = 0x0,
+ .post_div_shift = 12,
+ .post_div_table = post_div_table_cam_cc_pll0_out_odd,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll0_out_odd),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_pll0_out_odd",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_pll0.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ops,
+ },
+};
+
+/* 600MHz Configuration */
+static const struct alpha_pll_config cam_cc_pll1_config = {
+ .l = 0x1F,
+ .alpha = 0x4000,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002261,
+ .config_ctl_hi1_val = 0x329A299C,
+ .user_ctl_val = 0x00000101,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x00000000,
+};
+
+static struct clk_alpha_pll cam_cc_pll1 = {
+ .offset = 0x1000,
+ .vco_table = lucid_vco,
+ .num_vco = ARRAY_SIZE(lucid_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_pll1",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll1_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll1_out_even = {
+ .offset = 0x1000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_cam_cc_pll1_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll1_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_pll1_out_even",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_pll1.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ops,
+ },
+};
+
+/* 1440MHz Configuration */
+static const struct alpha_pll_config cam_cc_pll2_config = {
+ .l = 0x4B,
+ .alpha = 0x0,
+ .config_ctl_val = 0x08200800,
+ .config_ctl_hi_val = 0x05022011,
+ .config_ctl_hi1_val = 0x08000000,
+ .user_ctl_val = 0x00000301,
+};
+
+static struct clk_alpha_pll cam_cc_pll2 = {
+ .offset = 0x2000,
+ .vco_table = zonda_vco,
+ .num_vco = ARRAY_SIZE(zonda_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_ZONDA],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_pll2",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_zonda_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll2_out_aux[] = {
+ { 0x3, 4 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll2_out_aux = {
+ .offset = 0x2000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_cam_cc_pll2_out_aux,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll2_out_aux),
+ .width = 2,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_ZONDA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_pll2_out_aux",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_pll2.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_zonda_ops,
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll2_out_aux2[] = {
+ { 0x3, 4 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll2_out_aux2 = {
+ .offset = 0x2000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_cam_cc_pll2_out_aux2,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll2_out_aux2),
+ .width = 2,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_ZONDA],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_pll2_out_aux2",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_pll2.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_zonda_ops,
+ },
+};
+
+/* 760MHz Configuration */
+static const struct alpha_pll_config cam_cc_pll3_config = {
+ .l = 0x27,
+ .alpha = 0x9555,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002261,
+ .config_ctl_hi1_val = 0x329A299C,
+ .user_ctl_val = 0x00000101,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x00000000,
+};
+
+static struct clk_alpha_pll cam_cc_pll3 = {
+ .offset = 0x3000,
+ .vco_table = lucid_vco,
+ .num_vco = ARRAY_SIZE(lucid_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_pll3",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll3_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll3_out_even = {
+ .offset = 0x3000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_cam_cc_pll3_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll3_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_pll3_out_even",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_pll3.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ops,
+ },
+};
+
+/* 760MHz Configuration */
+static const struct alpha_pll_config cam_cc_pll4_config = {
+ .l = 0x27,
+ .alpha = 0x9555,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002261,
+ .config_ctl_hi1_val = 0x329A299C,
+ .user_ctl_val = 0x00000101,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x00000000,
+};
+
+static struct clk_alpha_pll cam_cc_pll4 = {
+ .offset = 0x4000,
+ .vco_table = lucid_vco,
+ .num_vco = ARRAY_SIZE(lucid_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_pll4",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll4_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll4_out_even = {
+ .offset = 0x4000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_cam_cc_pll4_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll4_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_pll4_out_even",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_pll4.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ops,
+ },
+};
+
+/* 760MHz Configuration */
+static const struct alpha_pll_config cam_cc_pll5_config = {
+ .l = 0x27,
+ .alpha = 0x9555,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002261,
+ .config_ctl_hi1_val = 0x329A299C,
+ .user_ctl_val = 0x00000101,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x00000000,
+};
+
+static struct clk_alpha_pll cam_cc_pll5 = {
+ .offset = 0x5000,
+ .vco_table = lucid_vco,
+ .num_vco = ARRAY_SIZE(lucid_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_pll5",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll5_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll5_out_even = {
+ .offset = 0x5000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_cam_cc_pll5_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll5_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_pll5_out_even",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_pll5.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ops,
+ },
+};
+
+/* 960MHz Configuration */
+static const struct alpha_pll_config cam_cc_pll6_config = {
+ .l = 0x32,
+ .alpha = 0x0,
+ .config_ctl_val = 0x20485699,
+ .config_ctl_hi_val = 0x00002261,
+ .config_ctl_hi1_val = 0x329A299C,
+ .user_ctl_val = 0x00003101,
+ .user_ctl_hi_val = 0x00000805,
+ .user_ctl_hi1_val = 0x00000000,
+};
+
+static struct clk_alpha_pll cam_cc_pll6 = {
+ .offset = 0x6000,
+ .vco_table = lucid_vco,
+ .num_vco = ARRAY_SIZE(lucid_vco),
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr = {
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_pll6",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_lucid_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll6_out_even[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll6_out_even = {
+ .offset = 0x6000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_cam_cc_pll6_out_even,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll6_out_even),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_pll6_out_even",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_pll6.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ops,
+ },
+};
+
+static const struct clk_div_table post_div_table_cam_cc_pll6_out_odd[] = {
+ { 0x3, 3 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv cam_cc_pll6_out_odd = {
+ .offset = 0x6000,
+ .post_div_shift = 12,
+ .post_div_table = post_div_table_cam_cc_pll6_out_odd,
+ .num_post_div = ARRAY_SIZE(post_div_table_cam_cc_pll6_out_odd),
+ .width = 4,
+ .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_pll6_out_odd",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_pll6.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_lucid_ops,
+ },
+};
+
+static const struct parent_map cam_cc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL0_OUT_MAIN, 1 },
+ { P_CAM_CC_PLL0_OUT_EVEN, 2 },
+ { P_CAM_CC_PLL0_OUT_ODD, 3 },
+ { P_CAM_CC_PLL6_OUT_EVEN, 5 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_0[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &cam_cc_pll0.clkr.hw },
+ { .hw = &cam_cc_pll0_out_even.clkr.hw },
+ { .hw = &cam_cc_pll0_out_odd.clkr.hw },
+ { .hw = &cam_cc_pll6_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL0_OUT_MAIN, 1 },
+ { P_CAM_CC_PLL0_OUT_EVEN, 2 },
+ { P_CAM_CC_PLL0_OUT_ODD, 3 },
+ { P_CAM_CC_PLL6_OUT_MAIN, 4 },
+ { P_CAM_CC_PLL6_OUT_EVEN, 5 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_1[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &cam_cc_pll0.clkr.hw },
+ { .hw = &cam_cc_pll0_out_even.clkr.hw },
+ { .hw = &cam_cc_pll0_out_odd.clkr.hw },
+ { .hw = &cam_cc_pll6.clkr.hw },
+ { .hw = &cam_cc_pll6_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL2_OUT_AUX2, 3 },
+ { P_CAM_CC_PLL2_OUT_EARLY, 5 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_2[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &cam_cc_pll2_out_aux2.clkr.hw },
+ { .hw = &cam_cc_pll2.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL0_OUT_MAIN, 1 },
+ { P_CAM_CC_PLL0_OUT_EVEN, 2 },
+ { P_CAM_CC_PLL0_OUT_ODD, 3 },
+ { P_CAM_CC_PLL6_OUT_EVEN, 5 },
+ { P_CAM_CC_PLL6_OUT_ODD, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_3[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &cam_cc_pll0.clkr.hw },
+ { .hw = &cam_cc_pll0_out_even.clkr.hw },
+ { .hw = &cam_cc_pll0_out_odd.clkr.hw },
+ { .hw = &cam_cc_pll6_out_even.clkr.hw },
+ { .hw = &cam_cc_pll6_out_odd.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL3_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_4[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &cam_cc_pll3_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL4_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_5[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &cam_cc_pll4_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL5_OUT_EVEN, 6 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_6[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &cam_cc_pll5_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_7[] = {
+ { P_BI_TCXO, 0 },
+ { P_CAM_CC_PLL1_OUT_EVEN, 4 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_7[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &cam_cc_pll1_out_even.clkr.hw },
+};
+
+static const struct parent_map cam_cc_parent_map_8[] = {
+ { P_SLEEP_CLK, 0 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_8[] = {
+ { .fw_name = "sleep_clk" },
+};
+
+static const struct parent_map cam_cc_parent_map_9[] = {
+ { P_BI_TCXO, 0 },
+};
+
+static const struct clk_parent_data cam_cc_parent_data_9_ao[] = {
+ { .fw_name = "bi_tcxo_ao" },
+};
+
+static const struct freq_tbl ftbl_cam_cc_bps_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(100000000, P_CAM_CC_PLL0_OUT_ODD, 4, 0, 0),
+ F(200000000, P_CAM_CC_PLL0_OUT_ODD, 2, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+ F(480000000, P_CAM_CC_PLL6_OUT_EVEN, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_bps_clk_src = {
+ .cmd_rcgr = 0x7010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_bps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_bps_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_camnoc_axi_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(150000000, P_CAM_CC_PLL0_OUT_EVEN, 4, 0, 0),
+ F(240000000, P_CAM_CC_PLL6_OUT_EVEN, 2, 0, 0),
+ F(320000000, P_CAM_CC_PLL6_OUT_ODD, 1, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ F(480000000, P_CAM_CC_PLL6_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_camnoc_axi_clk_src = {
+ .cmd_rcgr = 0xc124,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_3,
+ .freq_tbl = ftbl_cam_cc_camnoc_axi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_camnoc_axi_clk_src",
+ .parent_data = cam_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_3),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_cci_0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(37500000, P_CAM_CC_PLL0_OUT_EVEN, 16, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_cci_0_clk_src = {
+ .cmd_rcgr = 0xc0e0,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cci_0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_cci_0_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_cci_1_clk_src = {
+ .cmd_rcgr = 0xc0fc,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_cci_0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_cci_1_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_cphy_rx_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_EVEN, 1.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_cphy_rx_clk_src = {
+ .cmd_rcgr = 0xa064,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_cphy_rx_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_cphy_rx_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_csi0phytimer_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_csi0phytimer_clk_src = {
+ .cmd_rcgr = 0xe0ac,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_csi0phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi1phytimer_clk_src = {
+ .cmd_rcgr = 0xe0d0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_csi1phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi2phytimer_clk_src = {
+ .cmd_rcgr = 0xe0f4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_csi2phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi3phytimer_clk_src = {
+ .cmd_rcgr = 0xe11c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_csi3phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_csi4phytimer_clk_src = {
+ .cmd_rcgr = 0xe140,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_csi0phytimer_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_csi4phytimer_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_fast_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(50000000, P_CAM_CC_PLL0_OUT_EVEN, 12, 0, 0),
+ F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
+ F(200000000, P_CAM_CC_PLL0_OUT_EVEN, 3, 0, 0),
+ F(300000000, P_CAM_CC_PLL0_OUT_MAIN, 4, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_fast_ahb_clk_src = {
+ .cmd_rcgr = 0x703c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_fast_ahb_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_fast_ahb_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_icp_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+ F(480000000, P_CAM_CC_PLL6_OUT_EVEN, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_icp_clk_src = {
+ .cmd_rcgr = 0xc0b8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_icp_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_icp_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(380000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ F(510000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ F(637000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ F(760000000, P_CAM_CC_PLL3_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ife_0_clk_src = {
+ .cmd_rcgr = 0xa010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_4,
+ .freq_tbl = ftbl_cam_cc_ife_0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_0_clk_src",
+ .parent_data = cam_cc_parent_data_4,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_4),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_1_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(380000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ F(510000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ F(637000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ F(760000000, P_CAM_CC_PLL4_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ife_1_clk_src = {
+ .cmd_rcgr = 0xb010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_5,
+ .freq_tbl = ftbl_cam_cc_ife_1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_1_clk_src",
+ .parent_data = cam_cc_parent_data_5,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_5),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_0_csid_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(75000000, P_CAM_CC_PLL0_OUT_EVEN, 8, 0, 0),
+ F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_EVEN, 1.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ife_0_csid_clk_src = {
+ .cmd_rcgr = 0xa03c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_ife_0_csid_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_0_csid_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_1_csid_clk_src = {
+ .cmd_rcgr = 0xb03c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_ife_0_csid_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_1_csid_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_2_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(380000000, P_CAM_CC_PLL5_OUT_EVEN, 1, 0, 0),
+ F(510000000, P_CAM_CC_PLL5_OUT_EVEN, 1, 0, 0),
+ F(637000000, P_CAM_CC_PLL5_OUT_EVEN, 1, 0, 0),
+ F(760000000, P_CAM_CC_PLL5_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ife_2_clk_src = {
+ .cmd_rcgr = 0xb07c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_6,
+ .freq_tbl = ftbl_cam_cc_ife_2_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_2_clk_src",
+ .parent_data = cam_cc_parent_data_6,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_6),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_2_csid_clk_src = {
+ .cmd_rcgr = 0xb0a8,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_ife_0_csid_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_2_csid_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ife_lite_0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(320000000, P_CAM_CC_PLL6_OUT_ODD, 1, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_ODD, 1, 0, 0),
+ F(480000000, P_CAM_CC_PLL6_OUT_EVEN, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL0_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_0_clk_src = {
+ .cmd_rcgr = 0xc004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_3,
+ .freq_tbl = ftbl_cam_cc_ife_lite_0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_lite_0_clk_src",
+ .parent_data = cam_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_3),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_0_csid_clk_src = {
+ .cmd_rcgr = 0xc020,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_ife_0_csid_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_lite_0_csid_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_1_clk_src = {
+ .cmd_rcgr = 0xc048,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_3,
+ .freq_tbl = ftbl_cam_cc_ife_lite_0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_lite_1_clk_src",
+ .parent_data = cam_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_3),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_ife_lite_1_csid_clk_src = {
+ .cmd_rcgr = 0xc064,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_1,
+ .freq_tbl = ftbl_cam_cc_ife_0_csid_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_lite_1_csid_clk_src",
+ .parent_data = cam_cc_parent_data_1,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_1),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_ipe_0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(300000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+ F(430000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+ F(520000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+ F(600000000, P_CAM_CC_PLL1_OUT_EVEN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_ipe_0_clk_src = {
+ .cmd_rcgr = 0x8010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_7,
+ .freq_tbl = ftbl_cam_cc_ipe_0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ipe_0_clk_src",
+ .parent_data = cam_cc_parent_data_7,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_7),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_jpeg_clk_src = {
+ .cmd_rcgr = 0xc08c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_bps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_jpeg_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_lrme_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(100000000, P_CAM_CC_PLL0_OUT_EVEN, 6, 0, 0),
+ F(240000000, P_CAM_CC_PLL6_OUT_EVEN, 2, 0, 0),
+ F(300000000, P_CAM_CC_PLL0_OUT_EVEN, 2, 0, 0),
+ F(320000000, P_CAM_CC_PLL6_OUT_ODD, 1, 0, 0),
+ F(400000000, P_CAM_CC_PLL0_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_lrme_clk_src = {
+ .cmd_rcgr = 0xc150,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_3,
+ .freq_tbl = ftbl_cam_cc_lrme_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_lrme_clk_src",
+ .parent_data = cam_cc_parent_data_3,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_3),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_mclk0_clk_src[] = {
+ F(19200000, P_CAM_CC_PLL2_OUT_EARLY, 1, 1, 75),
+ F(24000000, P_CAM_CC_PLL2_OUT_EARLY, 10, 1, 6),
+ F(34285714, P_CAM_CC_PLL2_OUT_EARLY, 2, 1, 21),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_mclk0_clk_src = {
+ .cmd_rcgr = 0xe000,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_2,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_mclk0_clk_src",
+ .parent_data = cam_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_2),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk1_clk_src = {
+ .cmd_rcgr = 0xe01c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_2,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_mclk1_clk_src",
+ .parent_data = cam_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_2),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk2_clk_src = {
+ .cmd_rcgr = 0xe038,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_2,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_mclk2_clk_src",
+ .parent_data = cam_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_2),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk3_clk_src = {
+ .cmd_rcgr = 0xe054,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_2,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_mclk3_clk_src",
+ .parent_data = cam_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_2),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk4_clk_src = {
+ .cmd_rcgr = 0xe070,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_2,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_mclk4_clk_src",
+ .parent_data = cam_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_2),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static struct clk_rcg2 cam_cc_mclk5_clk_src = {
+ .cmd_rcgr = 0xe08c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_2,
+ .freq_tbl = ftbl_cam_cc_mclk0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_mclk5_clk_src",
+ .parent_data = cam_cc_parent_data_2,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_2),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_sleep_clk_src[] = {
+ F(32000, P_SLEEP_CLK, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_sleep_clk_src = {
+ .cmd_rcgr = 0xc1c0,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_8,
+ .freq_tbl = ftbl_cam_cc_sleep_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_sleep_clk_src",
+ .parent_data = cam_cc_parent_data_8,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_slow_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(80000000, P_CAM_CC_PLL0_OUT_EVEN, 7.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_slow_ahb_clk_src = {
+ .cmd_rcgr = 0x7058,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_0,
+ .freq_tbl = ftbl_cam_cc_slow_ahb_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_slow_ahb_clk_src",
+ .parent_data = cam_cc_parent_data_0,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_0),
+ .ops = &clk_rcg2_shared_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_cam_cc_xo_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 cam_cc_xo_clk_src = {
+ .cmd_rcgr = 0xc1a4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = cam_cc_parent_map_9,
+ .freq_tbl = ftbl_cam_cc_xo_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "cam_cc_xo_clk_src",
+ .parent_data = cam_cc_parent_data_9_ao,
+ .num_parents = ARRAY_SIZE(cam_cc_parent_data_9_ao),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch cam_cc_bps_ahb_clk = {
+ .halt_reg = 0x7070,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7070,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_bps_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_bps_areg_clk = {
+ .halt_reg = 0x7054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_bps_areg_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_bps_axi_clk = {
+ .halt_reg = 0x7038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_bps_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_camnoc_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_bps_clk = {
+ .halt_reg = 0x7028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x7028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_bps_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_bps_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_camnoc_axi_clk = {
+ .halt_reg = 0xc140,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc140,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_camnoc_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_camnoc_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_camnoc_dcd_xo_clk = {
+ .halt_reg = 0xc148,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc148,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_camnoc_dcd_xo_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cci_0_clk = {
+ .halt_reg = 0xc0f8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc0f8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_cci_0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_cci_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cci_1_clk = {
+ .halt_reg = 0xc114,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc114,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_cci_1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_cci_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_core_ahb_clk = {
+ .halt_reg = 0xc1a0,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0xc1a0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_core_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_cpas_ahb_clk = {
+ .halt_reg = 0xc11c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc11c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_cpas_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi0phytimer_clk = {
+ .halt_reg = 0xe0c4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe0c4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_csi0phytimer_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_csi0phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi1phytimer_clk = {
+ .halt_reg = 0xe0e8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe0e8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_csi1phytimer_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_csi1phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi2phytimer_clk = {
+ .halt_reg = 0xe10c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe10c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_csi2phytimer_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_csi2phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi3phytimer_clk = {
+ .halt_reg = 0xe134,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe134,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_csi3phytimer_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_csi3phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csi4phytimer_clk = {
+ .halt_reg = 0xe158,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe158,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_csi4phytimer_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_csi4phytimer_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy0_clk = {
+ .halt_reg = 0xe0c8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe0c8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_csiphy0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy1_clk = {
+ .halt_reg = 0xe0ec,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe0ec,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_csiphy1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy2_clk = {
+ .halt_reg = 0xe110,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe110,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_csiphy2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy3_clk = {
+ .halt_reg = 0xe138,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe138,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_csiphy3_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_csiphy4_clk = {
+ .halt_reg = 0xe15c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe15c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_csiphy4_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_gdsc_clk = {
+ .halt_reg = 0xc1bc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc1bc,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_gdsc_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_xo_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_icp_ahb_clk = {
+ .halt_reg = 0xc0d8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc0d8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_icp_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_icp_clk = {
+ .halt_reg = 0xc0d0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc0d0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_icp_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_icp_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_axi_clk = {
+ .halt_reg = 0xa080,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa080,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_0_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_camnoc_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_clk = {
+ .halt_reg = 0xa028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_ife_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_cphy_rx_clk = {
+ .halt_reg = 0xa07c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa07c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_0_cphy_rx_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_csid_clk = {
+ .halt_reg = 0xa054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_0_csid_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_ife_0_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_0_dsp_clk = {
+ .halt_reg = 0xa038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xa038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_0_dsp_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_ife_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_axi_clk = {
+ .halt_reg = 0xb068,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb068,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_1_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_camnoc_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_clk = {
+ .halt_reg = 0xb028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_ife_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_cphy_rx_clk = {
+ .halt_reg = 0xb064,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb064,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_1_cphy_rx_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_csid_clk = {
+ .halt_reg = 0xb054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_1_csid_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_ife_1_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_1_dsp_clk = {
+ .halt_reg = 0xb038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_1_dsp_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_ife_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_2_axi_clk = {
+ .halt_reg = 0xb0d4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb0d4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_2_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_camnoc_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_2_clk = {
+ .halt_reg = 0xb094,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb094,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_ife_2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_2_cphy_rx_clk = {
+ .halt_reg = 0xb0d0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb0d0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_2_cphy_rx_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_2_csid_clk = {
+ .halt_reg = 0xb0c0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb0c0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_2_csid_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_ife_2_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_2_dsp_clk = {
+ .halt_reg = 0xb0a4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xb0a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_2_dsp_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_ife_2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_0_clk = {
+ .halt_reg = 0xc01c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc01c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_lite_0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_ife_lite_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_0_cphy_rx_clk = {
+ .halt_reg = 0xc040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_lite_0_cphy_rx_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_0_csid_clk = {
+ .halt_reg = 0xc038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_lite_0_csid_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_ife_lite_0_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_1_clk = {
+ .halt_reg = 0xc060,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc060,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_lite_1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_ife_lite_1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_1_cphy_rx_clk = {
+ .halt_reg = 0xc084,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc084,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_lite_1_cphy_rx_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_cphy_rx_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ife_lite_1_csid_clk = {
+ .halt_reg = 0xc07c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc07c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ife_lite_1_csid_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_ife_lite_1_csid_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_0_ahb_clk = {
+ .halt_reg = 0x8040,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8040,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ipe_0_ahb_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_slow_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_0_areg_clk = {
+ .halt_reg = 0x803c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x803c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ipe_0_areg_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_fast_ahb_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_0_axi_clk = {
+ .halt_reg = 0x8038,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8038,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ipe_0_axi_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_camnoc_axi_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_ipe_0_clk = {
+ .halt_reg = 0x8028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x8028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_ipe_0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_ipe_0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_jpeg_clk = {
+ .halt_reg = 0xc0a4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc0a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_jpeg_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_jpeg_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_lrme_clk = {
+ .halt_reg = 0xc168,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc168,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_lrme_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_lrme_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk0_clk = {
+ .halt_reg = 0xe018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_mclk0_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_mclk0_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk1_clk = {
+ .halt_reg = 0xe034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe034,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_mclk1_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_mclk1_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk2_clk = {
+ .halt_reg = 0xe050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_mclk2_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_mclk2_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk3_clk = {
+ .halt_reg = 0xe06c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe06c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_mclk3_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_mclk3_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk4_clk = {
+ .halt_reg = 0xe088,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe088,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_mclk4_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_mclk4_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_mclk5_clk = {
+ .halt_reg = 0xe0a4,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xe0a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_mclk5_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_mclk5_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch cam_cc_sleep_clk = {
+ .halt_reg = 0xc1d8,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0xc1d8,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "cam_cc_sleep_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .hw = &cam_cc_sleep_clk_src.clkr.hw,
+ },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc cam_cc_titan_top_gdsc = {
+ .gdscr = 0xc194,
+ .pd = {
+ .name = "cam_cc_titan_top_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = RETAIN_FF_ENABLE,
+};
+
+static struct gdsc cam_cc_bps_gdsc = {
+ .gdscr = 0x7004,
+ .pd = {
+ .name = "cam_cc_bps_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = HW_CTRL | RETAIN_FF_ENABLE,
+};
+
+static struct gdsc cam_cc_ife_0_gdsc = {
+ .gdscr = 0xa004,
+ .pd = {
+ .name = "cam_cc_ife_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = RETAIN_FF_ENABLE,
+};
+
+static struct gdsc cam_cc_ife_1_gdsc = {
+ .gdscr = 0xb004,
+ .pd = {
+ .name = "cam_cc_ife_1_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = RETAIN_FF_ENABLE,
+};
+
+static struct gdsc cam_cc_ife_2_gdsc = {
+ .gdscr = 0xb070,
+ .pd = {
+ .name = "cam_cc_ife_2_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = RETAIN_FF_ENABLE,
+};
+
+static struct gdsc cam_cc_ipe_0_gdsc = {
+ .gdscr = 0x8004,
+ .pd = {
+ .name = "cam_cc_ipe_0_gdsc",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = HW_CTRL | RETAIN_FF_ENABLE,
+};
+
+static struct clk_regmap *cam_cc_sc7280_clocks[] = {
+ [CAM_CC_BPS_AHB_CLK] = &cam_cc_bps_ahb_clk.clkr,
+ [CAM_CC_BPS_AREG_CLK] = &cam_cc_bps_areg_clk.clkr,
+ [CAM_CC_BPS_AXI_CLK] = &cam_cc_bps_axi_clk.clkr,
+ [CAM_CC_BPS_CLK] = &cam_cc_bps_clk.clkr,
+ [CAM_CC_BPS_CLK_SRC] = &cam_cc_bps_clk_src.clkr,
+ [CAM_CC_CAMNOC_AXI_CLK] = &cam_cc_camnoc_axi_clk.clkr,
+ [CAM_CC_CAMNOC_AXI_CLK_SRC] = &cam_cc_camnoc_axi_clk_src.clkr,
+ [CAM_CC_CAMNOC_DCD_XO_CLK] = &cam_cc_camnoc_dcd_xo_clk.clkr,
+ [CAM_CC_CCI_0_CLK] = &cam_cc_cci_0_clk.clkr,
+ [CAM_CC_CCI_0_CLK_SRC] = &cam_cc_cci_0_clk_src.clkr,
+ [CAM_CC_CCI_1_CLK] = &cam_cc_cci_1_clk.clkr,
+ [CAM_CC_CCI_1_CLK_SRC] = &cam_cc_cci_1_clk_src.clkr,
+ [CAM_CC_CORE_AHB_CLK] = &cam_cc_core_ahb_clk.clkr,
+ [CAM_CC_CPAS_AHB_CLK] = &cam_cc_cpas_ahb_clk.clkr,
+ [CAM_CC_CPHY_RX_CLK_SRC] = &cam_cc_cphy_rx_clk_src.clkr,
+ [CAM_CC_CSI0PHYTIMER_CLK] = &cam_cc_csi0phytimer_clk.clkr,
+ [CAM_CC_CSI0PHYTIMER_CLK_SRC] = &cam_cc_csi0phytimer_clk_src.clkr,
+ [CAM_CC_CSI1PHYTIMER_CLK] = &cam_cc_csi1phytimer_clk.clkr,
+ [CAM_CC_CSI1PHYTIMER_CLK_SRC] = &cam_cc_csi1phytimer_clk_src.clkr,
+ [CAM_CC_CSI2PHYTIMER_CLK] = &cam_cc_csi2phytimer_clk.clkr,
+ [CAM_CC_CSI2PHYTIMER_CLK_SRC] = &cam_cc_csi2phytimer_clk_src.clkr,
+ [CAM_CC_CSI3PHYTIMER_CLK] = &cam_cc_csi3phytimer_clk.clkr,
+ [CAM_CC_CSI3PHYTIMER_CLK_SRC] = &cam_cc_csi3phytimer_clk_src.clkr,
+ [CAM_CC_CSI4PHYTIMER_CLK] = &cam_cc_csi4phytimer_clk.clkr,
+ [CAM_CC_CSI4PHYTIMER_CLK_SRC] = &cam_cc_csi4phytimer_clk_src.clkr,
+ [CAM_CC_CSIPHY0_CLK] = &cam_cc_csiphy0_clk.clkr,
+ [CAM_CC_CSIPHY1_CLK] = &cam_cc_csiphy1_clk.clkr,
+ [CAM_CC_CSIPHY2_CLK] = &cam_cc_csiphy2_clk.clkr,
+ [CAM_CC_CSIPHY3_CLK] = &cam_cc_csiphy3_clk.clkr,
+ [CAM_CC_CSIPHY4_CLK] = &cam_cc_csiphy4_clk.clkr,
+ [CAM_CC_FAST_AHB_CLK_SRC] = &cam_cc_fast_ahb_clk_src.clkr,
+ [CAM_CC_GDSC_CLK] = &cam_cc_gdsc_clk.clkr,
+ [CAM_CC_ICP_AHB_CLK] = &cam_cc_icp_ahb_clk.clkr,
+ [CAM_CC_ICP_CLK] = &cam_cc_icp_clk.clkr,
+ [CAM_CC_ICP_CLK_SRC] = &cam_cc_icp_clk_src.clkr,
+ [CAM_CC_IFE_0_AXI_CLK] = &cam_cc_ife_0_axi_clk.clkr,
+ [CAM_CC_IFE_0_CLK] = &cam_cc_ife_0_clk.clkr,
+ [CAM_CC_IFE_0_CLK_SRC] = &cam_cc_ife_0_clk_src.clkr,
+ [CAM_CC_IFE_0_CPHY_RX_CLK] = &cam_cc_ife_0_cphy_rx_clk.clkr,
+ [CAM_CC_IFE_0_CSID_CLK] = &cam_cc_ife_0_csid_clk.clkr,
+ [CAM_CC_IFE_0_CSID_CLK_SRC] = &cam_cc_ife_0_csid_clk_src.clkr,
+ [CAM_CC_IFE_0_DSP_CLK] = &cam_cc_ife_0_dsp_clk.clkr,
+ [CAM_CC_IFE_1_AXI_CLK] = &cam_cc_ife_1_axi_clk.clkr,
+ [CAM_CC_IFE_1_CLK] = &cam_cc_ife_1_clk.clkr,
+ [CAM_CC_IFE_1_CLK_SRC] = &cam_cc_ife_1_clk_src.clkr,
+ [CAM_CC_IFE_1_CPHY_RX_CLK] = &cam_cc_ife_1_cphy_rx_clk.clkr,
+ [CAM_CC_IFE_1_CSID_CLK] = &cam_cc_ife_1_csid_clk.clkr,
+ [CAM_CC_IFE_1_CSID_CLK_SRC] = &cam_cc_ife_1_csid_clk_src.clkr,
+ [CAM_CC_IFE_1_DSP_CLK] = &cam_cc_ife_1_dsp_clk.clkr,
+ [CAM_CC_IFE_2_AXI_CLK] = &cam_cc_ife_2_axi_clk.clkr,
+ [CAM_CC_IFE_2_CLK] = &cam_cc_ife_2_clk.clkr,
+ [CAM_CC_IFE_2_CLK_SRC] = &cam_cc_ife_2_clk_src.clkr,
+ [CAM_CC_IFE_2_CPHY_RX_CLK] = &cam_cc_ife_2_cphy_rx_clk.clkr,
+ [CAM_CC_IFE_2_CSID_CLK] = &cam_cc_ife_2_csid_clk.clkr,
+ [CAM_CC_IFE_2_CSID_CLK_SRC] = &cam_cc_ife_2_csid_clk_src.clkr,
+ [CAM_CC_IFE_2_DSP_CLK] = &cam_cc_ife_2_dsp_clk.clkr,
+ [CAM_CC_IFE_LITE_0_CLK] = &cam_cc_ife_lite_0_clk.clkr,
+ [CAM_CC_IFE_LITE_0_CLK_SRC] = &cam_cc_ife_lite_0_clk_src.clkr,
+ [CAM_CC_IFE_LITE_0_CPHY_RX_CLK] = &cam_cc_ife_lite_0_cphy_rx_clk.clkr,
+ [CAM_CC_IFE_LITE_0_CSID_CLK] = &cam_cc_ife_lite_0_csid_clk.clkr,
+ [CAM_CC_IFE_LITE_0_CSID_CLK_SRC] = &cam_cc_ife_lite_0_csid_clk_src.clkr,
+ [CAM_CC_IFE_LITE_1_CLK] = &cam_cc_ife_lite_1_clk.clkr,
+ [CAM_CC_IFE_LITE_1_CLK_SRC] = &cam_cc_ife_lite_1_clk_src.clkr,
+ [CAM_CC_IFE_LITE_1_CPHY_RX_CLK] = &cam_cc_ife_lite_1_cphy_rx_clk.clkr,
+ [CAM_CC_IFE_LITE_1_CSID_CLK] = &cam_cc_ife_lite_1_csid_clk.clkr,
+ [CAM_CC_IFE_LITE_1_CSID_CLK_SRC] = &cam_cc_ife_lite_1_csid_clk_src.clkr,
+ [CAM_CC_IPE_0_AHB_CLK] = &cam_cc_ipe_0_ahb_clk.clkr,
+ [CAM_CC_IPE_0_AREG_CLK] = &cam_cc_ipe_0_areg_clk.clkr,
+ [CAM_CC_IPE_0_AXI_CLK] = &cam_cc_ipe_0_axi_clk.clkr,
+ [CAM_CC_IPE_0_CLK] = &cam_cc_ipe_0_clk.clkr,
+ [CAM_CC_IPE_0_CLK_SRC] = &cam_cc_ipe_0_clk_src.clkr,
+ [CAM_CC_JPEG_CLK] = &cam_cc_jpeg_clk.clkr,
+ [CAM_CC_JPEG_CLK_SRC] = &cam_cc_jpeg_clk_src.clkr,
+ [CAM_CC_LRME_CLK] = &cam_cc_lrme_clk.clkr,
+ [CAM_CC_LRME_CLK_SRC] = &cam_cc_lrme_clk_src.clkr,
+ [CAM_CC_MCLK0_CLK] = &cam_cc_mclk0_clk.clkr,
+ [CAM_CC_MCLK0_CLK_SRC] = &cam_cc_mclk0_clk_src.clkr,
+ [CAM_CC_MCLK1_CLK] = &cam_cc_mclk1_clk.clkr,
+ [CAM_CC_MCLK1_CLK_SRC] = &cam_cc_mclk1_clk_src.clkr,
+ [CAM_CC_MCLK2_CLK] = &cam_cc_mclk2_clk.clkr,
+ [CAM_CC_MCLK2_CLK_SRC] = &cam_cc_mclk2_clk_src.clkr,
+ [CAM_CC_MCLK3_CLK] = &cam_cc_mclk3_clk.clkr,
+ [CAM_CC_MCLK3_CLK_SRC] = &cam_cc_mclk3_clk_src.clkr,
+ [CAM_CC_MCLK4_CLK] = &cam_cc_mclk4_clk.clkr,
+ [CAM_CC_MCLK4_CLK_SRC] = &cam_cc_mclk4_clk_src.clkr,
+ [CAM_CC_MCLK5_CLK] = &cam_cc_mclk5_clk.clkr,
+ [CAM_CC_MCLK5_CLK_SRC] = &cam_cc_mclk5_clk_src.clkr,
+ [CAM_CC_PLL0] = &cam_cc_pll0.clkr,
+ [CAM_CC_PLL0_OUT_EVEN] = &cam_cc_pll0_out_even.clkr,
+ [CAM_CC_PLL0_OUT_ODD] = &cam_cc_pll0_out_odd.clkr,
+ [CAM_CC_PLL1] = &cam_cc_pll1.clkr,
+ [CAM_CC_PLL1_OUT_EVEN] = &cam_cc_pll1_out_even.clkr,
+ [CAM_CC_PLL2] = &cam_cc_pll2.clkr,
+ [CAM_CC_PLL2_OUT_AUX] = &cam_cc_pll2_out_aux.clkr,
+ [CAM_CC_PLL2_OUT_AUX2] = &cam_cc_pll2_out_aux2.clkr,
+ [CAM_CC_PLL3] = &cam_cc_pll3.clkr,
+ [CAM_CC_PLL3_OUT_EVEN] = &cam_cc_pll3_out_even.clkr,
+ [CAM_CC_PLL4] = &cam_cc_pll4.clkr,
+ [CAM_CC_PLL4_OUT_EVEN] = &cam_cc_pll4_out_even.clkr,
+ [CAM_CC_PLL5] = &cam_cc_pll5.clkr,
+ [CAM_CC_PLL5_OUT_EVEN] = &cam_cc_pll5_out_even.clkr,
+ [CAM_CC_PLL6] = &cam_cc_pll6.clkr,
+ [CAM_CC_PLL6_OUT_EVEN] = &cam_cc_pll6_out_even.clkr,
+ [CAM_CC_PLL6_OUT_ODD] = &cam_cc_pll6_out_odd.clkr,
+ [CAM_CC_SLEEP_CLK] = &cam_cc_sleep_clk.clkr,
+ [CAM_CC_SLEEP_CLK_SRC] = &cam_cc_sleep_clk_src.clkr,
+ [CAM_CC_SLOW_AHB_CLK_SRC] = &cam_cc_slow_ahb_clk_src.clkr,
+ [CAM_CC_XO_CLK_SRC] = &cam_cc_xo_clk_src.clkr,
+};
+
+static struct gdsc *cam_cc_sc7280_gdscs[] = {
+ [CAM_CC_TITAN_TOP_GDSC] = &cam_cc_titan_top_gdsc,
+ [CAM_CC_BPS_GDSC] = &cam_cc_bps_gdsc,
+ [CAM_CC_IFE_0_GDSC] = &cam_cc_ife_0_gdsc,
+ [CAM_CC_IFE_1_GDSC] = &cam_cc_ife_1_gdsc,
+ [CAM_CC_IFE_2_GDSC] = &cam_cc_ife_2_gdsc,
+ [CAM_CC_IPE_0_GDSC] = &cam_cc_ipe_0_gdsc,
+};
+
+static const struct regmap_config cam_cc_sc7280_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xf00c,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc cam_cc_sc7280_desc = {
+ .config = &cam_cc_sc7280_regmap_config,
+ .clks = cam_cc_sc7280_clocks,
+ .num_clks = ARRAY_SIZE(cam_cc_sc7280_clocks),
+ .gdscs = cam_cc_sc7280_gdscs,
+ .num_gdscs = ARRAY_SIZE(cam_cc_sc7280_gdscs),
+};
+
+static const struct of_device_id cam_cc_sc7280_match_table[] = {
+ { .compatible = "qcom,sc7280-camcc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, cam_cc_sc7280_match_table);
+
+static int cam_cc_sc7280_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+
+ regmap = qcom_cc_map(pdev, &cam_cc_sc7280_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ clk_lucid_pll_configure(&cam_cc_pll0, regmap, &cam_cc_pll0_config);
+ clk_lucid_pll_configure(&cam_cc_pll1, regmap, &cam_cc_pll1_config);
+ clk_zonda_pll_configure(&cam_cc_pll2, regmap, &cam_cc_pll2_config);
+ clk_lucid_pll_configure(&cam_cc_pll3, regmap, &cam_cc_pll3_config);
+ clk_lucid_pll_configure(&cam_cc_pll4, regmap, &cam_cc_pll4_config);
+ clk_lucid_pll_configure(&cam_cc_pll5, regmap, &cam_cc_pll5_config);
+ clk_lucid_pll_configure(&cam_cc_pll6, regmap, &cam_cc_pll6_config);
+
+ return qcom_cc_really_probe(pdev, &cam_cc_sc7280_desc, regmap);
+}
+
+static struct platform_driver cam_cc_sc7280_driver = {
+ .probe = cam_cc_sc7280_probe,
+ .driver = {
+ .name = "cam_cc-sc7280",
+ .of_match_table = cam_cc_sc7280_match_table,
+ },
+};
+
+static int __init cam_cc_sc7280_init(void)
+{
+ return platform_driver_register(&cam_cc_sc7280_driver);
+}
+subsys_initcall(cam_cc_sc7280_init);
+
+static void __exit cam_cc_sc7280_exit(void)
+{
+ platform_driver_unregister(&cam_cc_sc7280_driver);
+}
+module_exit(cam_cc_sc7280_exit);
+
+MODULE_DESCRIPTION("QTI CAM_CC SC7280 Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c
index 66d7807ee38e..5776d85a1e5c 100644
--- a/drivers/clk/qcom/clk-smd-rpm.c
+++ b/drivers/clk/qcom/clk-smd-rpm.c
@@ -118,14 +118,15 @@
__DEFINE_CLK_SMD_RPM(_platform, _name, _active, type, r_id, \
0, QCOM_RPM_SMD_KEY_STATE)
-#define DEFINE_CLK_SMD_RPM_XO_BUFFER(_platform, _name, _active, r_id) \
+#define DEFINE_CLK_SMD_RPM_XO_BUFFER(_platform, _name, _active, r_id, r) \
__DEFINE_CLK_SMD_RPM_BRANCH(_platform, _name, _active, \
- QCOM_SMD_RPM_CLK_BUF_A, r_id, 0, 1000, \
+ QCOM_SMD_RPM_CLK_BUF_A, r_id, 0, r, \
QCOM_RPM_KEY_SOFTWARE_ENABLE)
-#define DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(_platform, _name, _active, r_id) \
+#define DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(_platform, _name, _active, \
+ r_id, r) \
__DEFINE_CLK_SMD_RPM_BRANCH(_platform, _name, _active, \
- QCOM_SMD_RPM_CLK_BUF_A, r_id, 0, 1000, \
+ QCOM_SMD_RPM_CLK_BUF_A, r_id, 0, r, \
QCOM_RPM_KEY_PIN_CTRL_CLK_BUFFER_ENABLE_KEY)
#define to_clk_smd_rpm(_hw) container_of(_hw, struct clk_smd_rpm, hw)
@@ -195,6 +196,10 @@ static int clk_smd_rpm_set_rate_active(struct clk_smd_rpm *r,
.value = cpu_to_le32(DIV_ROUND_UP(rate, 1000)), /* to kHz */
};
+ /* Buffered clock needs a binary value */
+ if (r->rpm_res_type == QCOM_SMD_RPM_CLK_BUF_A)
+ req.value = cpu_to_le32(!!req.value);
+
return qcom_rpm_smd_write(r->rpm, QCOM_SMD_RPM_ACTIVE_STATE,
r->rpm_res_type, r->rpm_clk_id, &req,
sizeof(req));
@@ -209,6 +214,10 @@ static int clk_smd_rpm_set_rate_sleep(struct clk_smd_rpm *r,
.value = cpu_to_le32(DIV_ROUND_UP(rate, 1000)), /* to kHz */
};
+ /* Buffered clock needs a binary value */
+ if (r->rpm_res_type == QCOM_SMD_RPM_CLK_BUF_A)
+ req.value = cpu_to_le32(!!req.value);
+
return qcom_rpm_smd_write(r->rpm, QCOM_SMD_RPM_SLEEP_STATE,
r->rpm_res_type, r->rpm_clk_id, &req,
sizeof(req));
@@ -416,20 +425,21 @@ static const struct clk_ops clk_smd_rpm_ops = {
static const struct clk_ops clk_smd_rpm_branch_ops = {
.prepare = clk_smd_rpm_prepare,
.unprepare = clk_smd_rpm_unprepare,
+ .recalc_rate = clk_smd_rpm_recalc_rate,
};
DEFINE_CLK_SMD_RPM(msm8916, pcnoc_clk, pcnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 0);
DEFINE_CLK_SMD_RPM(msm8916, snoc_clk, snoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 1);
DEFINE_CLK_SMD_RPM(msm8916, bimc_clk, bimc_a_clk, QCOM_SMD_RPM_MEM_CLK, 0);
DEFINE_CLK_SMD_RPM_QDSS(msm8916, qdss_clk, qdss_a_clk, QCOM_SMD_RPM_MISC_CLK, 1);
-DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8916, bb_clk1, bb_clk1_a, 1);
-DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8916, bb_clk2, bb_clk2_a, 2);
-DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8916, rf_clk1, rf_clk1_a, 4);
-DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8916, rf_clk2, rf_clk2_a, 5);
-DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8916, bb_clk1_pin, bb_clk1_a_pin, 1);
-DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8916, bb_clk2_pin, bb_clk2_a_pin, 2);
-DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8916, rf_clk1_pin, rf_clk1_a_pin, 4);
-DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8916, rf_clk2_pin, rf_clk2_a_pin, 5);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8916, bb_clk1, bb_clk1_a, 1, 19200000);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8916, bb_clk2, bb_clk2_a, 2, 19200000);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8916, rf_clk1, rf_clk1_a, 4, 19200000);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8916, rf_clk2, rf_clk2_a, 5, 19200000);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8916, bb_clk1_pin, bb_clk1_a_pin, 1, 19200000);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8916, bb_clk2_pin, bb_clk2_a_pin, 2, 19200000);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8916, rf_clk1_pin, rf_clk1_a_pin, 4, 19200000);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8916, rf_clk2_pin, rf_clk2_a_pin, 5, 19200000);
static struct clk_smd_rpm *msm8916_clks[] = {
[RPM_SMD_PCNOC_CLK] = &msm8916_pcnoc_clk,
@@ -503,19 +513,19 @@ DEFINE_CLK_SMD_RPM(msm8974, cnoc_clk, cnoc_a_clk, QCOM_SMD_RPM_BUS_CLK, 2);
DEFINE_CLK_SMD_RPM(msm8974, mmssnoc_ahb_clk, mmssnoc_ahb_a_clk, QCOM_SMD_RPM_BUS_CLK, 3);
DEFINE_CLK_SMD_RPM(msm8974, gfx3d_clk_src, gfx3d_a_clk_src, QCOM_SMD_RPM_MEM_CLK, 1);
DEFINE_CLK_SMD_RPM(msm8974, ocmemgx_clk, ocmemgx_a_clk, QCOM_SMD_RPM_MEM_CLK, 2);
-DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8974, cxo_d0, cxo_d0_a, 1);
-DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8974, cxo_d1, cxo_d1_a, 2);
-DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8974, cxo_a0, cxo_a0_a, 4);
-DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8974, cxo_a1, cxo_a1_a, 5);
-DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8974, cxo_a2, cxo_a2_a, 6);
-DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8974, diff_clk, diff_a_clk, 7);
-DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8974, div_clk1, div_a_clk1, 11);
-DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8974, div_clk2, div_a_clk2, 12);
-DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8974, cxo_d0_pin, cxo_d0_a_pin, 1);
-DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8974, cxo_d1_pin, cxo_d1_a_pin, 2);
-DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8974, cxo_a0_pin, cxo_a0_a_pin, 4);
-DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8974, cxo_a1_pin, cxo_a1_a_pin, 5);
-DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8974, cxo_a2_pin, cxo_a2_a_pin, 6);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8974, cxo_d0, cxo_d0_a, 1, 19200000);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8974, cxo_d1, cxo_d1_a, 2, 19200000);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8974, cxo_a0, cxo_a0_a, 4, 19200000);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8974, cxo_a1, cxo_a1_a, 5, 19200000);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8974, cxo_a2, cxo_a2_a, 6, 19200000);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8974, diff_clk, diff_a_clk, 7, 19200000);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8974, div_clk1, div_a_clk1, 11, 19200000);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8974, div_clk2, div_a_clk2, 12, 19200000);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8974, cxo_d0_pin, cxo_d0_a_pin, 1, 19200000);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8974, cxo_d1_pin, cxo_d1_a_pin, 2, 19200000);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8974, cxo_a0_pin, cxo_a0_a_pin, 4, 19200000);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8974, cxo_a1_pin, cxo_a1_a_pin, 5, 19200000);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8974, cxo_a2_pin, cxo_a2_a_pin, 6, 19200000);
static struct clk_smd_rpm *msm8974_clks[] = {
[RPM_SMD_PNOC_CLK] = &msm8916_pcnoc_clk,
@@ -603,8 +613,8 @@ static const struct rpm_smd_clk_desc rpm_clk_msm8976 = {
.num_clks = ARRAY_SIZE(msm8976_clks),
};
-DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8992, div_clk3, div_clk3_a, 13);
-DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8992, ln_bb_clk, ln_bb_a_clk, 8);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8992, div_clk3, div_clk3_a, 13, 19200000);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8992, ln_bb_clk, ln_bb_a_clk, 8, 19200000);
DEFINE_CLK_SMD_RPM(msm8992, ce1_clk, ce1_a_clk, QCOM_SMD_RPM_CE_CLK, 0);
DEFINE_CLK_SMD_RPM(msm8992, ce2_clk, ce2_a_clk, QCOM_SMD_RPM_CE_CLK, 1);
@@ -782,7 +792,7 @@ static const struct rpm_smd_clk_desc rpm_clk_msm8996 = {
DEFINE_CLK_SMD_RPM(qcs404, bimc_gpu_clk, bimc_gpu_a_clk, QCOM_SMD_RPM_MEM_CLK, 2);
DEFINE_CLK_SMD_RPM(qcs404, qpic_clk, qpic_a_clk, QCOM_SMD_RPM_QPIC_CLK, 0);
-DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(qcs404, ln_bb_clk_pin, ln_bb_clk_a_pin, 8);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(qcs404, ln_bb_clk_pin, ln_bb_clk_a_pin, 8, 19200000);
static struct clk_smd_rpm *qcs404_clks[] = {
[RPM_SMD_QDSS_CLK] = &msm8916_qdss_clk,
@@ -811,13 +821,13 @@ static const struct rpm_smd_clk_desc rpm_clk_qcs404 = {
};
DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8998, ln_bb_clk3_pin, ln_bb_clk3_a_pin,
- 3);
+ 3, 19200000);
DEFINE_CLK_SMD_RPM(msm8998, aggre1_noc_clk, aggre1_noc_a_clk,
QCOM_SMD_RPM_AGGR_CLK, 1);
DEFINE_CLK_SMD_RPM(msm8998, aggre2_noc_clk, aggre2_noc_a_clk,
QCOM_SMD_RPM_AGGR_CLK, 2);
-DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8998, rf_clk3, rf_clk3_a, 6);
-DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8998, rf_clk3_pin, rf_clk3_a_pin, 6);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(msm8998, rf_clk3, rf_clk3_a, 6, 19200000);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(msm8998, rf_clk3_pin, rf_clk3_a_pin, 6, 19200000);
static struct clk_smd_rpm *msm8998_clks[] = {
[RPM_SMD_BIMC_CLK] = &msm8916_bimc_clk,
[RPM_SMD_BIMC_A_CLK] = &msm8916_bimc_a_clk,
@@ -864,8 +874,8 @@ static const struct rpm_smd_clk_desc rpm_clk_msm8998 = {
DEFINE_CLK_SMD_RPM_BRANCH(sdm660, bi_tcxo, bi_tcxo_a, QCOM_SMD_RPM_MISC_CLK, 0,
19200000);
-DEFINE_CLK_SMD_RPM_XO_BUFFER(sdm660, ln_bb_clk3, ln_bb_clk3_a, 3);
-DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(sdm660, ln_bb_clk3_pin, ln_bb_clk3_pin_a, 3);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(sdm660, ln_bb_clk3, ln_bb_clk3_a, 3, 19200000);
+DEFINE_CLK_SMD_RPM_XO_BUFFER_PINCTRL(sdm660, ln_bb_clk3_pin, ln_bb_clk3_pin_a, 3, 19200000);
static struct clk_smd_rpm *sdm660_clks[] = {
[RPM_SMD_XO_CLK_SRC] = &sdm660_bi_tcxo,
@@ -1067,6 +1077,64 @@ static const struct rpm_smd_clk_desc rpm_clk_sm6115 = {
.num_clks = ARRAY_SIZE(sm6115_clks),
};
+/* QCM2290 */
+DEFINE_CLK_SMD_RPM_XO_BUFFER(qcm2290, ln_bb_clk2, ln_bb_clk2_a, 0x2, 19200000);
+DEFINE_CLK_SMD_RPM_XO_BUFFER(qcm2290, rf_clk3, rf_clk3_a, 6, 38400000);
+
+DEFINE_CLK_SMD_RPM(qcm2290, qpic_clk, qpic_a_clk, QCOM_SMD_RPM_QPIC_CLK, 0);
+DEFINE_CLK_SMD_RPM(qcm2290, hwkm_clk, hwkm_a_clk, QCOM_SMD_RPM_HWKM_CLK, 0);
+DEFINE_CLK_SMD_RPM(qcm2290, pka_clk, pka_a_clk, QCOM_SMD_RPM_PKA_CLK, 0);
+DEFINE_CLK_SMD_RPM(qcm2290, cpuss_gnoc_clk, cpuss_gnoc_a_clk,
+ QCOM_SMD_RPM_MEM_CLK, 1);
+DEFINE_CLK_SMD_RPM(qcm2290, bimc_gpu_clk, bimc_gpu_a_clk,
+ QCOM_SMD_RPM_MEM_CLK, 2);
+
+static struct clk_smd_rpm *qcm2290_clks[] = {
+ [RPM_SMD_XO_CLK_SRC] = &sdm660_bi_tcxo,
+ [RPM_SMD_XO_A_CLK_SRC] = &sdm660_bi_tcxo_a,
+ [RPM_SMD_SNOC_CLK] = &sm6125_snoc_clk,
+ [RPM_SMD_SNOC_A_CLK] = &sm6125_snoc_a_clk,
+ [RPM_SMD_BIMC_CLK] = &msm8916_bimc_clk,
+ [RPM_SMD_BIMC_A_CLK] = &msm8916_bimc_a_clk,
+ [RPM_SMD_QDSS_CLK] = &sm6125_qdss_clk,
+ [RPM_SMD_QDSS_A_CLK] = &sm6125_qdss_a_clk,
+ [RPM_SMD_LN_BB_CLK2] = &qcm2290_ln_bb_clk2,
+ [RPM_SMD_LN_BB_CLK2_A] = &qcm2290_ln_bb_clk2_a,
+ [RPM_SMD_RF_CLK3] = &qcm2290_rf_clk3,
+ [RPM_SMD_RF_CLK3_A] = &qcm2290_rf_clk3_a,
+ [RPM_SMD_CNOC_CLK] = &sm6125_cnoc_clk,
+ [RPM_SMD_CNOC_A_CLK] = &sm6125_cnoc_a_clk,
+ [RPM_SMD_IPA_CLK] = &msm8976_ipa_clk,
+ [RPM_SMD_IPA_A_CLK] = &msm8976_ipa_a_clk,
+ [RPM_SMD_QUP_CLK] = &sm6125_qup_clk,
+ [RPM_SMD_QUP_A_CLK] = &sm6125_qup_a_clk,
+ [RPM_SMD_MMRT_CLK] = &sm6125_mmrt_clk,
+ [RPM_SMD_MMRT_A_CLK] = &sm6125_mmrt_a_clk,
+ [RPM_SMD_MMNRT_CLK] = &sm6125_mmnrt_clk,
+ [RPM_SMD_MMNRT_A_CLK] = &sm6125_mmnrt_a_clk,
+ [RPM_SMD_SNOC_PERIPH_CLK] = &sm6125_snoc_periph_clk,
+ [RPM_SMD_SNOC_PERIPH_A_CLK] = &sm6125_snoc_periph_a_clk,
+ [RPM_SMD_SNOC_LPASS_CLK] = &sm6125_snoc_lpass_clk,
+ [RPM_SMD_SNOC_LPASS_A_CLK] = &sm6125_snoc_lpass_a_clk,
+ [RPM_SMD_CE1_CLK] = &msm8992_ce1_clk,
+ [RPM_SMD_CE1_A_CLK] = &msm8992_ce1_a_clk,
+ [RPM_SMD_QPIC_CLK] = &qcm2290_qpic_clk,
+ [RPM_SMD_QPIC_CLK_A] = &qcm2290_qpic_a_clk,
+ [RPM_SMD_HWKM_CLK] = &qcm2290_hwkm_clk,
+ [RPM_SMD_HWKM_A_CLK] = &qcm2290_hwkm_a_clk,
+ [RPM_SMD_PKA_CLK] = &qcm2290_pka_clk,
+ [RPM_SMD_PKA_A_CLK] = &qcm2290_pka_a_clk,
+ [RPM_SMD_BIMC_GPU_CLK] = &qcm2290_bimc_gpu_clk,
+ [RPM_SMD_BIMC_GPU_A_CLK] = &qcm2290_bimc_gpu_a_clk,
+ [RPM_SMD_CPUSS_GNOC_CLK] = &qcm2290_cpuss_gnoc_clk,
+ [RPM_SMD_CPUSS_GNOC_A_CLK] = &qcm2290_cpuss_gnoc_a_clk,
+};
+
+static const struct rpm_smd_clk_desc rpm_clk_qcm2290 = {
+ .clks = qcm2290_clks,
+ .num_clks = ARRAY_SIZE(qcm2290_clks),
+};
+
static const struct of_device_id rpm_smd_clk_match_table[] = {
{ .compatible = "qcom,rpmcc-mdm9607", .data = &rpm_clk_mdm9607 },
{ .compatible = "qcom,rpmcc-msm8226", .data = &rpm_clk_msm8974 },
@@ -1079,6 +1147,7 @@ static const struct of_device_id rpm_smd_clk_match_table[] = {
{ .compatible = "qcom,rpmcc-msm8994", .data = &rpm_clk_msm8994 },
{ .compatible = "qcom,rpmcc-msm8996", .data = &rpm_clk_msm8996 },
{ .compatible = "qcom,rpmcc-msm8998", .data = &rpm_clk_msm8998 },
+ { .compatible = "qcom,rpmcc-qcm2290", .data = &rpm_clk_qcm2290 },
{ .compatible = "qcom,rpmcc-qcs404", .data = &rpm_clk_qcs404 },
{ .compatible = "qcom,rpmcc-sdm660", .data = &rpm_clk_sdm660 },
{ .compatible = "qcom,rpmcc-sm6115", .data = &rpm_clk_sm6115 },
diff --git a/drivers/clk/qcom/common.c b/drivers/clk/qcom/common.c
index 60d2a78d1395..0932e019dd12 100644
--- a/drivers/clk/qcom/common.c
+++ b/drivers/clk/qcom/common.c
@@ -73,11 +73,9 @@ struct regmap *
qcom_cc_map(struct platform_device *pdev, const struct qcom_cc_desc *desc)
{
void __iomem *base;
- struct resource *res;
struct device *dev = &pdev->dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return ERR_CAST(base);
@@ -313,11 +311,9 @@ int qcom_cc_probe_by_index(struct platform_device *pdev, int index,
const struct qcom_cc_desc *desc)
{
struct regmap *regmap;
- struct resource *res;
void __iomem *base;
- res = platform_get_resource(pdev, IORESOURCE_MEM, index);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, index);
if (IS_ERR(base))
return -ENOMEM;
diff --git a/drivers/clk/qcom/dispcc-sm8250.c b/drivers/clk/qcom/dispcc-sm8250.c
index bf9ffe1a1cf4..566fdfa0a15b 100644
--- a/drivers/clk/qcom/dispcc-sm8250.c
+++ b/drivers/clk/qcom/dispcc-sm8250.c
@@ -6,6 +6,7 @@
#include <linux/clk-provider.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/reset-controller.h>
@@ -1228,13 +1229,31 @@ static const struct of_device_id disp_cc_sm8250_match_table[] = {
};
MODULE_DEVICE_TABLE(of, disp_cc_sm8250_match_table);
+static void disp_cc_sm8250_pm_runtime_disable(void *data)
+{
+ pm_runtime_disable(data);
+}
+
static int disp_cc_sm8250_probe(struct platform_device *pdev)
{
struct regmap *regmap;
+ int ret;
+
+ pm_runtime_enable(&pdev->dev);
+
+ ret = devm_add_action_or_reset(&pdev->dev, disp_cc_sm8250_pm_runtime_disable, &pdev->dev);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret)
+ return ret;
regmap = qcom_cc_map(pdev, &disp_cc_sm8250_desc);
- if (IS_ERR(regmap))
+ if (IS_ERR(regmap)) {
+ pm_runtime_put(&pdev->dev);
return PTR_ERR(regmap);
+ }
/* note: trion == lucid, except for the prepare() op */
BUILD_BUG_ON(CLK_ALPHA_PLL_TYPE_TRION != CLK_ALPHA_PLL_TYPE_LUCID);
@@ -1259,7 +1278,11 @@ static int disp_cc_sm8250_probe(struct platform_device *pdev)
/* DISP_CC_XO_CLK always-on */
regmap_update_bits(regmap, 0x605c, BIT(0), BIT(0));
- return qcom_cc_really_probe(pdev, &disp_cc_sm8250_desc, regmap);
+ ret = qcom_cc_really_probe(pdev, &disp_cc_sm8250_desc, regmap);
+
+ pm_runtime_put(&pdev->dev);
+
+ return ret;
}
static struct platform_driver disp_cc_sm8250_driver = {
diff --git a/drivers/clk/qcom/gcc-msm8953.c b/drivers/clk/qcom/gcc-msm8953.c
index 49513f1366ff..8aafa6591e84 100644
--- a/drivers/clk/qcom/gcc-msm8953.c
+++ b/drivers/clk/qcom/gcc-msm8953.c
@@ -4230,7 +4230,6 @@ static struct platform_driver gcc_msm8953_driver = {
.driver = {
.name = "gcc-msm8953",
.of_match_table = gcc_msm8953_match_table,
- .owner = THIS_MODULE,
},
};
diff --git a/drivers/clk/qcom/gcc-msm8994.c b/drivers/clk/qcom/gcc-msm8994.c
index 144d2ba7a9be..702a9bdc0559 100644
--- a/drivers/clk/qcom/gcc-msm8994.c
+++ b/drivers/clk/qcom/gcc-msm8994.c
@@ -8,6 +8,7 @@
#include <linux/ctype.h>
#include <linux/io.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/module.h>
#include <linux/regmap.h>
@@ -28,50 +29,17 @@ enum {
P_GPLL4,
};
-static const struct parent_map gcc_xo_gpll0_map[] = {
- { P_XO, 0 },
- { P_GPLL0, 1 },
-};
-
-static const char * const gcc_xo_gpll0[] = {
- "xo",
- "gpll0",
-};
-
-static const struct parent_map gcc_xo_gpll0_gpll4_map[] = {
- { P_XO, 0 },
- { P_GPLL0, 1 },
- { P_GPLL4, 5 },
-};
-
-static const char * const gcc_xo_gpll0_gpll4[] = {
- "xo",
- "gpll0",
- "gpll4",
-};
-
-static struct clk_fixed_factor xo = {
- .mult = 1,
- .div = 1,
- .hw.init = &(struct clk_init_data)
- {
- .name = "xo",
- .parent_names = (const char *[]) { "xo_board" },
- .num_parents = 1,
- .ops = &clk_fixed_factor_ops,
- },
-};
-
static struct clk_alpha_pll gpll0_early = {
- .offset = 0x00000,
+ .offset = 0,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
.clkr = {
.enable_reg = 0x1480,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gpll0_early",
- .parent_names = (const char *[]) { "xo" },
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_ops,
},
@@ -79,10 +47,9 @@ static struct clk_alpha_pll gpll0_early = {
};
static struct clk_alpha_pll_postdiv gpll0 = {
- .offset = 0x00000,
+ .offset = 0,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "gpll0",
.parent_names = (const char *[]) { "gpll0_early" },
.num_parents = 1,
@@ -96,10 +63,11 @@ static struct clk_alpha_pll gpll4_early = {
.clkr = {
.enable_reg = 0x1480,
.enable_mask = BIT(4),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gpll4_early",
- .parent_names = (const char *[]) { "xo" },
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_ops,
},
@@ -109,8 +77,7 @@ static struct clk_alpha_pll gpll4_early = {
static struct clk_alpha_pll_postdiv gpll4 = {
.offset = 0x1dc0,
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_DEFAULT],
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "gpll4",
.parent_names = (const char *[]) { "gpll4_early" },
.num_parents = 1,
@@ -118,6 +85,64 @@ static struct clk_alpha_pll_postdiv gpll4 = {
},
};
+static const struct parent_map gcc_xo_gpll0_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+};
+
+static const struct clk_parent_data gcc_xo_gpll0[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0.clkr.hw },
+};
+
+static const struct parent_map gcc_xo_gpll0_gpll4_map[] = {
+ { P_XO, 0 },
+ { P_GPLL0, 1 },
+ { P_GPLL4, 5 },
+};
+
+static const struct clk_parent_data gcc_xo_gpll0_gpll4[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll4.clkr.hw },
+};
+
+static struct clk_rcg2 system_noc_clk_src = {
+ .cmd_rcgr = 0x0120,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "system_noc_clk_src",
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 config_noc_clk_src = {
+ .cmd_rcgr = 0x0150,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "config_noc_clk_src",
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 periph_noc_clk_src = {
+ .cmd_rcgr = 0x0190,
+ .hid_width = 5,
+ .parent_map = gcc_xo_gpll0_map,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "periph_noc_clk_src",
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
static struct freq_tbl ftbl_ufs_axi_clk_src[] = {
F(50000000, P_GPLL0, 12, 0, 0),
F(100000000, P_GPLL0, 6, 0, 0),
@@ -134,11 +159,10 @@ static struct clk_rcg2 ufs_axi_clk_src = {
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
.freq_tbl = ftbl_ufs_axi_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "ufs_axi_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -155,11 +179,10 @@ static struct clk_rcg2 usb30_master_clk_src = {
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
.freq_tbl = ftbl_usb30_master_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "usb30_master_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -175,16 +198,15 @@ static struct clk_rcg2 blsp1_qup1_i2c_apps_clk_src = {
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
.freq_tbl = ftbl_blsp_i2c_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup1_i2c_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
-static struct freq_tbl ftbl_blspqup_spi_apps_clk_src[] = {
+static struct freq_tbl ftbl_blsp1_qup1_spi_apps_clk_src[] = {
F(960000, P_XO, 10, 1, 2),
F(4800000, P_XO, 4, 0, 0),
F(9600000, P_XO, 2, 0, 0),
@@ -197,17 +219,27 @@ static struct freq_tbl ftbl_blspqup_spi_apps_clk_src[] = {
{ }
};
+static struct freq_tbl ftbl_blsp1_qup_spi_apps_clk_src_8992[] = {
+ F(960000, P_XO, 10, 1, 2),
+ F(4800000, P_XO, 4, 0, 0),
+ F(9600000, P_XO, 2, 0, 0),
+ F(15000000, P_GPLL0, 10, 1, 4),
+ F(19200000, P_XO, 1, 0, 0),
+ F(25000000, P_GPLL0, 12, 1, 2),
+ F(50000000, P_GPLL0, 12, 0, 0),
+ { }
+};
+
static struct clk_rcg2 blsp1_qup1_spi_apps_clk_src = {
.cmd_rcgr = 0x064c,
.mnd_width = 8,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
- .freq_tbl = ftbl_blspqup_spi_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup1_spi_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -217,26 +249,37 @@ static struct clk_rcg2 blsp1_qup2_i2c_apps_clk_src = {
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
.freq_tbl = ftbl_blsp_i2c_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup2_i2c_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
+static struct freq_tbl ftbl_blsp1_qup2_spi_apps_clk_src[] = {
+ F(960000, P_XO, 10, 1, 2),
+ F(4800000, P_XO, 4, 0, 0),
+ F(9600000, P_XO, 2, 0, 0),
+ F(15000000, P_GPLL0, 10, 1, 4),
+ F(19200000, P_XO, 1, 0, 0),
+ F(24000000, P_GPLL0, 12.5, 1, 2),
+ F(25000000, P_GPLL0, 12, 1, 2),
+ F(42860000, P_GPLL0, 14, 0, 0),
+ F(46150000, P_GPLL0, 13, 0, 0),
+ { }
+};
+
static struct clk_rcg2 blsp1_qup2_spi_apps_clk_src = {
.cmd_rcgr = 0x06cc,
.mnd_width = 8,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
- .freq_tbl = ftbl_blspqup_spi_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .freq_tbl = ftbl_blsp1_qup2_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup2_spi_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -246,26 +289,37 @@ static struct clk_rcg2 blsp1_qup3_i2c_apps_clk_src = {
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
.freq_tbl = ftbl_blsp_i2c_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup3_i2c_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
+static struct freq_tbl ftbl_blsp1_qup3_4_spi_apps_clk_src[] = {
+ F(960000, P_XO, 10, 1, 2),
+ F(4800000, P_XO, 4, 0, 0),
+ F(9600000, P_XO, 2, 0, 0),
+ F(15000000, P_GPLL0, 10, 1, 4),
+ F(19200000, P_XO, 1, 0, 0),
+ F(24000000, P_GPLL0, 12.5, 1, 2),
+ F(25000000, P_GPLL0, 12, 1, 2),
+ F(42860000, P_GPLL0, 14, 0, 0),
+ F(44440000, P_GPLL0, 13.5, 0, 0),
+ { }
+};
+
static struct clk_rcg2 blsp1_qup3_spi_apps_clk_src = {
.cmd_rcgr = 0x074c,
.mnd_width = 8,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
- .freq_tbl = ftbl_blspqup_spi_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .freq_tbl = ftbl_blsp1_qup3_4_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup3_spi_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -275,11 +329,10 @@ static struct clk_rcg2 blsp1_qup4_i2c_apps_clk_src = {
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
.freq_tbl = ftbl_blsp_i2c_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup4_i2c_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -289,12 +342,11 @@ static struct clk_rcg2 blsp1_qup4_spi_apps_clk_src = {
.mnd_width = 8,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
- .freq_tbl = ftbl_blspqup_spi_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .freq_tbl = ftbl_blsp1_qup3_4_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup4_spi_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -304,26 +356,37 @@ static struct clk_rcg2 blsp1_qup5_i2c_apps_clk_src = {
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
.freq_tbl = ftbl_blsp_i2c_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup5_i2c_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
+static struct freq_tbl ftbl_blsp1_qup5_spi_apps_clk_src[] = {
+ F(960000, P_XO, 10, 1, 2),
+ F(4800000, P_XO, 4, 0, 0),
+ F(9600000, P_XO, 2, 0, 0),
+ F(15000000, P_GPLL0, 10, 1, 4),
+ F(19200000, P_XO, 1, 0, 0),
+ F(24000000, P_GPLL0, 12.5, 1, 2),
+ F(25000000, P_GPLL0, 12, 1, 2),
+ F(40000000, P_GPLL0, 15, 0, 0),
+ F(42860000, P_GPLL0, 14, 0, 0),
+ { }
+};
+
static struct clk_rcg2 blsp1_qup5_spi_apps_clk_src = {
.cmd_rcgr = 0x084c,
.mnd_width = 8,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
- .freq_tbl = ftbl_blspqup_spi_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .freq_tbl = ftbl_blsp1_qup5_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup5_spi_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -333,26 +396,37 @@ static struct clk_rcg2 blsp1_qup6_i2c_apps_clk_src = {
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
.freq_tbl = ftbl_blsp_i2c_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup6_i2c_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
+static struct freq_tbl ftbl_blsp1_qup6_spi_apps_clk_src[] = {
+ F(960000, P_XO, 10, 1, 2),
+ F(4800000, P_XO, 4, 0, 0),
+ F(9600000, P_XO, 2, 0, 0),
+ F(15000000, P_GPLL0, 10, 1, 4),
+ F(19200000, P_XO, 1, 0, 0),
+ F(24000000, P_GPLL0, 12.5, 1, 2),
+ F(27906976, P_GPLL0, 1, 2, 43),
+ F(41380000, P_GPLL0, 15, 0, 0),
+ F(42860000, P_GPLL0, 14, 0, 0),
+ { }
+};
+
static struct clk_rcg2 blsp1_qup6_spi_apps_clk_src = {
.cmd_rcgr = 0x08cc,
.mnd_width = 8,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
- .freq_tbl = ftbl_blspqup_spi_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .freq_tbl = ftbl_blsp1_qup6_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup6_spi_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -382,11 +456,10 @@ static struct clk_rcg2 blsp1_uart1_apps_clk_src = {
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
.freq_tbl = ftbl_blsp_uart_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_uart1_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -397,11 +470,10 @@ static struct clk_rcg2 blsp1_uart2_apps_clk_src = {
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
.freq_tbl = ftbl_blsp_uart_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_uart2_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -412,11 +484,10 @@ static struct clk_rcg2 blsp1_uart3_apps_clk_src = {
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
.freq_tbl = ftbl_blsp_uart_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_uart3_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -427,11 +498,10 @@ static struct clk_rcg2 blsp1_uart4_apps_clk_src = {
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
.freq_tbl = ftbl_blsp_uart_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_uart4_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -442,11 +512,10 @@ static struct clk_rcg2 blsp1_uart5_apps_clk_src = {
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
.freq_tbl = ftbl_blsp_uart_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_uart5_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -457,11 +526,10 @@ static struct clk_rcg2 blsp1_uart6_apps_clk_src = {
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
.freq_tbl = ftbl_blsp_uart_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_uart6_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -471,26 +539,37 @@ static struct clk_rcg2 blsp2_qup1_i2c_apps_clk_src = {
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
.freq_tbl = ftbl_blsp_i2c_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup1_i2c_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
+static struct freq_tbl ftbl_blsp2_qup1_2_spi_apps_clk_src[] = {
+ F(960000, P_XO, 10, 1, 2),
+ F(4800000, P_XO, 4, 0, 0),
+ F(9600000, P_XO, 2, 0, 0),
+ F(15000000, P_GPLL0, 10, 1, 4),
+ F(19200000, P_XO, 1, 0, 0),
+ F(24000000, P_GPLL0, 12.5, 1, 2),
+ F(25000000, P_GPLL0, 12, 1, 2),
+ F(42860000, P_GPLL0, 14, 0, 0),
+ F(44440000, P_GPLL0, 13.5, 0, 0),
+ { }
+};
+
static struct clk_rcg2 blsp2_qup1_spi_apps_clk_src = {
.cmd_rcgr = 0x098c,
.mnd_width = 8,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
- .freq_tbl = ftbl_blspqup_spi_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .freq_tbl = ftbl_blsp2_qup1_2_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup1_spi_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -500,11 +579,10 @@ static struct clk_rcg2 blsp2_qup2_i2c_apps_clk_src = {
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
.freq_tbl = ftbl_blsp_i2c_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup2_i2c_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -514,26 +592,37 @@ static struct clk_rcg2 blsp2_qup2_spi_apps_clk_src = {
.mnd_width = 8,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
- .freq_tbl = ftbl_blspqup_spi_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .freq_tbl = ftbl_blsp2_qup1_2_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup2_spi_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
+static struct freq_tbl ftbl_blsp2_qup3_4_spi_apps_clk_src[] = {
+ F(960000, P_XO, 10, 1, 2),
+ F(4800000, P_XO, 4, 0, 0),
+ F(9600000, P_XO, 2, 0, 0),
+ F(15000000, P_GPLL0, 10, 1, 4),
+ F(19200000, P_XO, 1, 0, 0),
+ F(24000000, P_GPLL0, 12.5, 1, 2),
+ F(25000000, P_GPLL0, 12, 1, 2),
+ F(42860000, P_GPLL0, 14, 0, 0),
+ F(48000000, P_GPLL0, 12.5, 0, 0),
+ { }
+};
+
static struct clk_rcg2 blsp2_qup3_i2c_apps_clk_src = {
.cmd_rcgr = 0x0aa0,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
.freq_tbl = ftbl_blsp_i2c_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup3_i2c_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -543,12 +632,11 @@ static struct clk_rcg2 blsp2_qup3_spi_apps_clk_src = {
.mnd_width = 8,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
- .freq_tbl = ftbl_blspqup_spi_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .freq_tbl = ftbl_blsp2_qup3_4_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup3_spi_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -558,11 +646,10 @@ static struct clk_rcg2 blsp2_qup4_i2c_apps_clk_src = {
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
.freq_tbl = ftbl_blsp_i2c_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup4_i2c_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -572,12 +659,11 @@ static struct clk_rcg2 blsp2_qup4_spi_apps_clk_src = {
.mnd_width = 8,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
- .freq_tbl = ftbl_blspqup_spi_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .freq_tbl = ftbl_blsp2_qup3_4_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup4_spi_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -587,11 +673,10 @@ static struct clk_rcg2 blsp2_qup5_i2c_apps_clk_src = {
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
.freq_tbl = ftbl_blsp_i2c_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup5_i2c_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -601,12 +686,12 @@ static struct clk_rcg2 blsp2_qup5_spi_apps_clk_src = {
.mnd_width = 8,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
- .freq_tbl = ftbl_blspqup_spi_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ /* BLSP1 QUP1 and BLSP2 QUP5 use the same freqs */
+ .freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup5_spi_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -616,26 +701,37 @@ static struct clk_rcg2 blsp2_qup6_i2c_apps_clk_src = {
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
.freq_tbl = ftbl_blsp_i2c_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup6_i2c_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
+static struct freq_tbl ftbl_blsp2_qup6_spi_apps_clk_src[] = {
+ F(960000, P_XO, 10, 1, 2),
+ F(4800000, P_XO, 4, 0, 0),
+ F(9600000, P_XO, 2, 0, 0),
+ F(15000000, P_GPLL0, 10, 1, 4),
+ F(19200000, P_XO, 1, 0, 0),
+ F(24000000, P_GPLL0, 12.5, 1, 2),
+ F(25000000, P_GPLL0, 12, 1, 2),
+ F(44440000, P_GPLL0, 13.5, 0, 0),
+ F(48000000, P_GPLL0, 12.5, 0, 0),
+ { }
+};
+
static struct clk_rcg2 blsp2_qup6_spi_apps_clk_src = {
.cmd_rcgr = 0x0c0c,
.mnd_width = 8,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
- .freq_tbl = ftbl_blspqup_spi_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .freq_tbl = ftbl_blsp2_qup6_spi_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup6_spi_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -646,11 +742,10 @@ static struct clk_rcg2 blsp2_uart1_apps_clk_src = {
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
.freq_tbl = ftbl_blsp_uart_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_uart1_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -661,11 +756,10 @@ static struct clk_rcg2 blsp2_uart2_apps_clk_src = {
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
.freq_tbl = ftbl_blsp_uart_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_uart2_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -676,11 +770,10 @@ static struct clk_rcg2 blsp2_uart3_apps_clk_src = {
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
.freq_tbl = ftbl_blsp_uart_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_uart3_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -691,11 +784,10 @@ static struct clk_rcg2 blsp2_uart4_apps_clk_src = {
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
.freq_tbl = ftbl_blsp_uart_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_uart4_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -706,11 +798,10 @@ static struct clk_rcg2 blsp2_uart5_apps_clk_src = {
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
.freq_tbl = ftbl_blsp_uart_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_uart5_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -721,11 +812,10 @@ static struct clk_rcg2 blsp2_uart6_apps_clk_src = {
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
.freq_tbl = ftbl_blsp_uart_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_uart6_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -743,11 +833,10 @@ static struct clk_rcg2 gp1_clk_src = {
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
.freq_tbl = ftbl_gp1_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "gp1_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -765,11 +854,10 @@ static struct clk_rcg2 gp2_clk_src = {
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
.freq_tbl = ftbl_gp2_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "gp2_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -787,11 +875,10 @@ static struct clk_rcg2 gp3_clk_src = {
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
.freq_tbl = ftbl_gp3_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "gp3_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -806,10 +893,11 @@ static struct clk_rcg2 pcie_0_aux_clk_src = {
.mnd_width = 8,
.hid_width = 5,
.freq_tbl = ftbl_pcie_0_aux_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "pcie_0_aux_clk_src",
- .parent_names = (const char *[]) { "xo" },
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ },
.num_parents = 1,
.ops = &clk_rcg2_ops,
},
@@ -824,10 +912,11 @@ static struct clk_rcg2 pcie_0_pipe_clk_src = {
.cmd_rcgr = 0x1adc,
.hid_width = 5,
.freq_tbl = ftbl_pcie_pipe_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "pcie_0_pipe_clk_src",
- .parent_names = (const char *[]) { "xo" },
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ },
.num_parents = 1,
.ops = &clk_rcg2_ops,
},
@@ -843,10 +932,11 @@ static struct clk_rcg2 pcie_1_aux_clk_src = {
.mnd_width = 8,
.hid_width = 5,
.freq_tbl = ftbl_pcie_1_aux_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "pcie_1_aux_clk_src",
- .parent_names = (const char *[]) { "xo" },
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ },
.num_parents = 1,
.ops = &clk_rcg2_ops,
},
@@ -856,10 +946,11 @@ static struct clk_rcg2 pcie_1_pipe_clk_src = {
.cmd_rcgr = 0x1b5c,
.hid_width = 5,
.freq_tbl = ftbl_pcie_pipe_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "pcie_1_pipe_clk_src",
- .parent_names = (const char *[]) { "xo" },
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ },
.num_parents = 1,
.ops = &clk_rcg2_ops,
},
@@ -875,11 +966,10 @@ static struct clk_rcg2 pdm2_clk_src = {
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
.freq_tbl = ftbl_pdm2_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "pdm2_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -896,17 +986,28 @@ static struct freq_tbl ftbl_sdcc1_apps_clk_src[] = {
{ }
};
+static struct freq_tbl ftbl_sdcc1_apps_clk_src_8992[] = {
+ F(144000, P_XO, 16, 3, 25),
+ F(400000, P_XO, 12, 1, 4),
+ F(20000000, P_GPLL0, 15, 1, 2),
+ F(25000000, P_GPLL0, 12, 1, 2),
+ F(50000000, P_GPLL0, 12, 0, 0),
+ F(100000000, P_GPLL0, 6, 0, 0),
+ F(172000000, P_GPLL4, 2, 0, 0),
+ F(344000000, P_GPLL4, 1, 0, 0),
+ { }
+};
+
static struct clk_rcg2 sdcc1_apps_clk_src = {
.cmd_rcgr = 0x04d0,
.mnd_width = 8,
.hid_width = 5,
.parent_map = gcc_xo_gpll0_gpll4_map,
.freq_tbl = ftbl_sdcc1_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "sdcc1_apps_clk_src",
- .parent_names = gcc_xo_gpll0_gpll4,
- .num_parents = 3,
+ .parent_data = gcc_xo_gpll0_gpll4,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0_gpll4),
.ops = &clk_rcg2_floor_ops,
},
};
@@ -928,11 +1029,10 @@ static struct clk_rcg2 sdcc2_apps_clk_src = {
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
.freq_tbl = ftbl_sdcc2_4_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "sdcc2_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_floor_ops,
},
};
@@ -943,11 +1043,10 @@ static struct clk_rcg2 sdcc3_apps_clk_src = {
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
.freq_tbl = ftbl_sdcc2_4_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "sdcc3_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_floor_ops,
},
};
@@ -958,11 +1057,10 @@ static struct clk_rcg2 sdcc4_apps_clk_src = {
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
.freq_tbl = ftbl_sdcc2_4_apps_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "sdcc4_apps_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_floor_ops,
},
};
@@ -977,10 +1075,11 @@ static struct clk_rcg2 tsif_ref_clk_src = {
.mnd_width = 8,
.hid_width = 5,
.freq_tbl = ftbl_tsif_ref_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "tsif_ref_clk_src",
- .parent_names = (const char *[]) { "xo" },
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ },
.num_parents = 1,
.ops = &clk_rcg2_ops,
},
@@ -997,11 +1096,10 @@ static struct clk_rcg2 usb30_mock_utmi_clk_src = {
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
.freq_tbl = ftbl_usb30_mock_utmi_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "usb30_mock_utmi_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -1015,10 +1113,11 @@ static struct clk_rcg2 usb3_phy_aux_clk_src = {
.cmd_rcgr = 0x1414,
.hid_width = 5,
.freq_tbl = ftbl_usb3_phy_aux_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "usb3_phy_aux_clk_src",
- .parent_names = (const char *[]) { "xo" },
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "xo",
+ },
.num_parents = 1,
.ops = &clk_rcg2_ops,
},
@@ -1034,11 +1133,10 @@ static struct clk_rcg2 usb_hs_system_clk_src = {
.hid_width = 5,
.parent_map = gcc_xo_gpll0_map,
.freq_tbl = ftbl_usb_hs_system_clk_src,
- .clkr.hw.init = &(struct clk_init_data)
- {
+ .clkr.hw.init = &(struct clk_init_data){
.name = "usb_hs_system_clk_src",
- .parent_names = gcc_xo_gpll0,
- .num_parents = 2,
+ .parent_data = gcc_xo_gpll0,
+ .num_parents = ARRAY_SIZE(gcc_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -1049,9 +1147,10 @@ static struct clk_branch gcc_blsp1_ahb_clk = {
.clkr = {
.enable_reg = 0x1484,
.enable_mask = BIT(17),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw },
+ .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -1062,12 +1161,9 @@ static struct clk_branch gcc_blsp1_qup1_i2c_apps_clk = {
.clkr = {
.enable_reg = 0x0648,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup1_i2c_apps_clk",
- .parent_names = (const char *[]) {
- "blsp1_qup1_i2c_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &blsp1_qup1_i2c_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1080,12 +1176,9 @@ static struct clk_branch gcc_blsp1_qup1_spi_apps_clk = {
.clkr = {
.enable_reg = 0x0644,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup1_spi_apps_clk",
- .parent_names = (const char *[]) {
- "blsp1_qup1_spi_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &blsp1_qup1_spi_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1098,12 +1191,9 @@ static struct clk_branch gcc_blsp1_qup2_i2c_apps_clk = {
.clkr = {
.enable_reg = 0x06c8,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup2_i2c_apps_clk",
- .parent_names = (const char *[]) {
- "blsp1_qup2_i2c_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &blsp1_qup2_i2c_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1116,12 +1206,9 @@ static struct clk_branch gcc_blsp1_qup2_spi_apps_clk = {
.clkr = {
.enable_reg = 0x06c4,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup2_spi_apps_clk",
- .parent_names = (const char *[]) {
- "blsp1_qup2_spi_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &blsp1_qup2_spi_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1134,12 +1221,9 @@ static struct clk_branch gcc_blsp1_qup3_i2c_apps_clk = {
.clkr = {
.enable_reg = 0x0748,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup3_i2c_apps_clk",
- .parent_names = (const char *[]) {
- "blsp1_qup3_i2c_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &blsp1_qup3_i2c_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1152,12 +1236,9 @@ static struct clk_branch gcc_blsp1_qup3_spi_apps_clk = {
.clkr = {
.enable_reg = 0x0744,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup3_spi_apps_clk",
- .parent_names = (const char *[]) {
- "blsp1_qup3_spi_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &blsp1_qup3_spi_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1170,12 +1251,9 @@ static struct clk_branch gcc_blsp1_qup4_i2c_apps_clk = {
.clkr = {
.enable_reg = 0x07c8,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup4_i2c_apps_clk",
- .parent_names = (const char *[]) {
- "blsp1_qup4_i2c_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &blsp1_qup4_i2c_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1188,12 +1266,9 @@ static struct clk_branch gcc_blsp1_qup4_spi_apps_clk = {
.clkr = {
.enable_reg = 0x07c4,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup4_spi_apps_clk",
- .parent_names = (const char *[]) {
- "blsp1_qup4_spi_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &blsp1_qup4_spi_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1206,12 +1281,9 @@ static struct clk_branch gcc_blsp1_qup5_i2c_apps_clk = {
.clkr = {
.enable_reg = 0x0848,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup5_i2c_apps_clk",
- .parent_names = (const char *[]) {
- "blsp1_qup5_i2c_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &blsp1_qup5_i2c_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1224,12 +1296,9 @@ static struct clk_branch gcc_blsp1_qup5_spi_apps_clk = {
.clkr = {
.enable_reg = 0x0844,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup5_spi_apps_clk",
- .parent_names = (const char *[]) {
- "blsp1_qup5_spi_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &blsp1_qup5_spi_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1242,12 +1311,9 @@ static struct clk_branch gcc_blsp1_qup6_i2c_apps_clk = {
.clkr = {
.enable_reg = 0x08c8,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup6_i2c_apps_clk",
- .parent_names = (const char *[]) {
- "blsp1_qup6_i2c_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &blsp1_qup6_i2c_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1260,12 +1326,9 @@ static struct clk_branch gcc_blsp1_qup6_spi_apps_clk = {
.clkr = {
.enable_reg = 0x08c4,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup6_spi_apps_clk",
- .parent_names = (const char *[]) {
- "blsp1_qup6_spi_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &blsp1_qup6_spi_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1278,12 +1341,9 @@ static struct clk_branch gcc_blsp1_uart1_apps_clk = {
.clkr = {
.enable_reg = 0x0684,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_uart1_apps_clk",
- .parent_names = (const char *[]) {
- "blsp1_uart1_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &blsp1_uart1_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1296,12 +1356,9 @@ static struct clk_branch gcc_blsp1_uart2_apps_clk = {
.clkr = {
.enable_reg = 0x0704,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_uart2_apps_clk",
- .parent_names = (const char *[]) {
- "blsp1_uart2_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &blsp1_uart2_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1314,12 +1371,9 @@ static struct clk_branch gcc_blsp1_uart3_apps_clk = {
.clkr = {
.enable_reg = 0x0784,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_uart3_apps_clk",
- .parent_names = (const char *[]) {
- "blsp1_uart3_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &blsp1_uart3_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1332,12 +1386,9 @@ static struct clk_branch gcc_blsp1_uart4_apps_clk = {
.clkr = {
.enable_reg = 0x0804,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_uart4_apps_clk",
- .parent_names = (const char *[]) {
- "blsp1_uart4_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &blsp1_uart4_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1350,12 +1401,9 @@ static struct clk_branch gcc_blsp1_uart5_apps_clk = {
.clkr = {
.enable_reg = 0x0884,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_uart5_apps_clk",
- .parent_names = (const char *[]) {
- "blsp1_uart5_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &blsp1_uart5_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1368,12 +1416,9 @@ static struct clk_branch gcc_blsp1_uart6_apps_clk = {
.clkr = {
.enable_reg = 0x0904,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_uart6_apps_clk",
- .parent_names = (const char *[]) {
- "blsp1_uart6_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &blsp1_uart6_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1387,9 +1432,10 @@ static struct clk_branch gcc_blsp2_ahb_clk = {
.clkr = {
.enable_reg = 0x1484,
.enable_mask = BIT(15),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw },
+ .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -1400,12 +1446,9 @@ static struct clk_branch gcc_blsp2_qup1_i2c_apps_clk = {
.clkr = {
.enable_reg = 0x0988,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_qup1_i2c_apps_clk",
- .parent_names = (const char *[]) {
- "blsp2_qup1_i2c_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &blsp2_qup1_i2c_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1418,12 +1461,9 @@ static struct clk_branch gcc_blsp2_qup1_spi_apps_clk = {
.clkr = {
.enable_reg = 0x0984,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_qup1_spi_apps_clk",
- .parent_names = (const char *[]) {
- "blsp2_qup1_spi_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &blsp2_qup1_spi_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1436,12 +1476,9 @@ static struct clk_branch gcc_blsp2_qup2_i2c_apps_clk = {
.clkr = {
.enable_reg = 0x0a08,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_qup2_i2c_apps_clk",
- .parent_names = (const char *[]) {
- "blsp2_qup2_i2c_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &blsp2_qup2_i2c_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1454,12 +1491,9 @@ static struct clk_branch gcc_blsp2_qup2_spi_apps_clk = {
.clkr = {
.enable_reg = 0x0a04,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_qup2_spi_apps_clk",
- .parent_names = (const char *[]) {
- "blsp2_qup2_spi_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &blsp2_qup2_spi_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1472,12 +1506,9 @@ static struct clk_branch gcc_blsp2_qup3_i2c_apps_clk = {
.clkr = {
.enable_reg = 0x0a88,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_qup3_i2c_apps_clk",
- .parent_names = (const char *[]) {
- "blsp2_qup3_i2c_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &blsp2_qup3_i2c_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1490,12 +1521,9 @@ static struct clk_branch gcc_blsp2_qup3_spi_apps_clk = {
.clkr = {
.enable_reg = 0x0a84,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_qup3_spi_apps_clk",
- .parent_names = (const char *[]) {
- "blsp2_qup3_spi_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &blsp2_qup3_spi_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1508,12 +1536,9 @@ static struct clk_branch gcc_blsp2_qup4_i2c_apps_clk = {
.clkr = {
.enable_reg = 0x0b08,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_qup4_i2c_apps_clk",
- .parent_names = (const char *[]) {
- "blsp2_qup4_i2c_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &blsp2_qup4_i2c_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1526,12 +1551,9 @@ static struct clk_branch gcc_blsp2_qup4_spi_apps_clk = {
.clkr = {
.enable_reg = 0x0b04,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_qup4_spi_apps_clk",
- .parent_names = (const char *[]) {
- "blsp2_qup4_spi_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &blsp2_qup4_spi_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1544,12 +1566,9 @@ static struct clk_branch gcc_blsp2_qup5_i2c_apps_clk = {
.clkr = {
.enable_reg = 0x0b88,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_qup5_i2c_apps_clk",
- .parent_names = (const char *[]) {
- "blsp2_qup5_i2c_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &blsp2_qup5_i2c_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1562,12 +1581,9 @@ static struct clk_branch gcc_blsp2_qup5_spi_apps_clk = {
.clkr = {
.enable_reg = 0x0b84,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_qup5_spi_apps_clk",
- .parent_names = (const char *[]) {
- "blsp2_qup5_spi_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &blsp2_qup5_spi_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1580,12 +1596,9 @@ static struct clk_branch gcc_blsp2_qup6_i2c_apps_clk = {
.clkr = {
.enable_reg = 0x0c08,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_qup6_i2c_apps_clk",
- .parent_names = (const char *[]) {
- "blsp2_qup6_i2c_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &blsp2_qup6_i2c_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1598,12 +1611,9 @@ static struct clk_branch gcc_blsp2_qup6_spi_apps_clk = {
.clkr = {
.enable_reg = 0x0c04,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_qup6_spi_apps_clk",
- .parent_names = (const char *[]) {
- "blsp2_qup6_spi_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &blsp2_qup6_spi_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1616,12 +1626,9 @@ static struct clk_branch gcc_blsp2_uart1_apps_clk = {
.clkr = {
.enable_reg = 0x09c4,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_uart1_apps_clk",
- .parent_names = (const char *[]) {
- "blsp2_uart1_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &blsp2_uart1_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1634,12 +1641,9 @@ static struct clk_branch gcc_blsp2_uart2_apps_clk = {
.clkr = {
.enable_reg = 0x0a44,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_uart2_apps_clk",
- .parent_names = (const char *[]) {
- "blsp2_uart2_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &blsp2_uart2_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1652,12 +1656,9 @@ static struct clk_branch gcc_blsp2_uart3_apps_clk = {
.clkr = {
.enable_reg = 0x0ac4,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_uart3_apps_clk",
- .parent_names = (const char *[]) {
- "blsp2_uart3_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &blsp2_uart3_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1670,12 +1671,9 @@ static struct clk_branch gcc_blsp2_uart4_apps_clk = {
.clkr = {
.enable_reg = 0x0b44,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_uart4_apps_clk",
- .parent_names = (const char *[]) {
- "blsp2_uart4_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &blsp2_uart4_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1688,12 +1686,9 @@ static struct clk_branch gcc_blsp2_uart5_apps_clk = {
.clkr = {
.enable_reg = 0x0bc4,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_uart5_apps_clk",
- .parent_names = (const char *[]) {
- "blsp2_uart5_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &blsp2_uart5_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1706,12 +1701,9 @@ static struct clk_branch gcc_blsp2_uart6_apps_clk = {
.clkr = {
.enable_reg = 0x0c44,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_uart6_apps_clk",
- .parent_names = (const char *[]) {
- "blsp2_uart6_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &blsp2_uart6_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1724,12 +1716,9 @@ static struct clk_branch gcc_gp1_clk = {
.clkr = {
.enable_reg = 0x1900,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_gp1_clk",
- .parent_names = (const char *[]) {
- "gp1_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &gp1_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1742,12 +1731,9 @@ static struct clk_branch gcc_gp2_clk = {
.clkr = {
.enable_reg = 0x1940,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_gp2_clk",
- .parent_names = (const char *[]) {
- "gp2_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &gp2_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1760,12 +1746,9 @@ static struct clk_branch gcc_gp3_clk = {
.clkr = {
.enable_reg = 0x1980,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_gp3_clk",
- .parent_names = (const char *[]) {
- "gp3_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &gp3_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1778,9 +1761,10 @@ static struct clk_branch gcc_lpass_q6_axi_clk = {
.clkr = {
.enable_reg = 0x0280,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_lpass_q6_axi_clk",
+ .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw },
+ .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -1791,9 +1775,10 @@ static struct clk_branch gcc_mss_q6_bimc_axi_clk = {
.clkr = {
.enable_reg = 0x0284,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_mss_q6_bimc_axi_clk",
+ .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw },
+ .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -1804,12 +1789,9 @@ static struct clk_branch gcc_pcie_0_aux_clk = {
.clkr = {
.enable_reg = 0x1ad4,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_pcie_0_aux_clk",
- .parent_names = (const char *[]) {
- "pcie_0_aux_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &pcie_0_aux_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1822,9 +1804,11 @@ static struct clk_branch gcc_pcie_0_cfg_ahb_clk = {
.clkr = {
.enable_reg = 0x1ad0,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_pcie_0_cfg_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &config_noc_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1835,9 +1819,11 @@ static struct clk_branch gcc_pcie_0_mstr_axi_clk = {
.clkr = {
.enable_reg = 0x1acc,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_pcie_0_mstr_axi_clk",
+ .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1849,12 +1835,9 @@ static struct clk_branch gcc_pcie_0_pipe_clk = {
.clkr = {
.enable_reg = 0x1ad8,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_pcie_0_pipe_clk",
- .parent_names = (const char *[]) {
- "pcie_0_pipe_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &pcie_0_pipe_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1868,9 +1851,11 @@ static struct clk_branch gcc_pcie_0_slv_axi_clk = {
.clkr = {
.enable_reg = 0x1ac8,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_pcie_0_slv_axi_clk",
+ .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1881,12 +1866,9 @@ static struct clk_branch gcc_pcie_1_aux_clk = {
.clkr = {
.enable_reg = 0x1b54,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_pcie_1_aux_clk",
- .parent_names = (const char *[]) {
- "pcie_1_aux_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &pcie_1_aux_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1899,9 +1881,11 @@ static struct clk_branch gcc_pcie_1_cfg_ahb_clk = {
.clkr = {
.enable_reg = 0x1b54,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_pcie_1_cfg_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &config_noc_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1912,9 +1896,11 @@ static struct clk_branch gcc_pcie_1_mstr_axi_clk = {
.clkr = {
.enable_reg = 0x1b50,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_pcie_1_mstr_axi_clk",
+ .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1926,12 +1912,9 @@ static struct clk_branch gcc_pcie_1_pipe_clk = {
.clkr = {
.enable_reg = 0x1b58,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_pcie_1_pipe_clk",
- .parent_names = (const char *[]) {
- "pcie_1_pipe_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &pcie_1_pipe_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1944,9 +1927,11 @@ static struct clk_branch gcc_pcie_1_slv_axi_clk = {
.clkr = {
.enable_reg = 0x1b48,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_pcie_1_slv_axi_clk",
+ .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -1957,12 +1942,9 @@ static struct clk_branch gcc_pdm2_clk = {
.clkr = {
.enable_reg = 0x0ccc,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_pdm2_clk",
- .parent_names = (const char *[]) {
- "pdm2_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &pdm2_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -1975,9 +1957,10 @@ static struct clk_branch gcc_pdm_ahb_clk = {
.clkr = {
.enable_reg = 0x0cc4,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_pdm_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw },
+ .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -1988,12 +1971,9 @@ static struct clk_branch gcc_sdcc1_apps_clk = {
.clkr = {
.enable_reg = 0x04c4,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_sdcc1_apps_clk",
- .parent_names = (const char *[]) {
- "sdcc1_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &sdcc1_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2006,13 +1986,11 @@ static struct clk_branch gcc_sdcc1_ahb_clk = {
.clkr = {
.enable_reg = 0x04c8,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_sdcc1_ahb_clk",
- .parent_names = (const char *[]){
- "periph_noc_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw },
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2023,13 +2001,11 @@ static struct clk_branch gcc_sdcc2_ahb_clk = {
.clkr = {
.enable_reg = 0x0508,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_sdcc2_ahb_clk",
- .parent_names = (const char *[]){
- "periph_noc_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw },
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2040,12 +2016,9 @@ static struct clk_branch gcc_sdcc2_apps_clk = {
.clkr = {
.enable_reg = 0x0504,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_sdcc2_apps_clk",
- .parent_names = (const char *[]) {
- "sdcc2_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &sdcc2_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2058,13 +2031,11 @@ static struct clk_branch gcc_sdcc3_ahb_clk = {
.clkr = {
.enable_reg = 0x0548,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_sdcc3_ahb_clk",
- .parent_names = (const char *[]){
- "periph_noc_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw },
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2075,12 +2046,9 @@ static struct clk_branch gcc_sdcc3_apps_clk = {
.clkr = {
.enable_reg = 0x0544,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_sdcc3_apps_clk",
- .parent_names = (const char *[]) {
- "sdcc3_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &sdcc3_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2093,13 +2061,11 @@ static struct clk_branch gcc_sdcc4_ahb_clk = {
.clkr = {
.enable_reg = 0x0588,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_sdcc4_ahb_clk",
- .parent_names = (const char *[]){
- "periph_noc_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw },
.num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
},
},
@@ -2110,12 +2076,9 @@ static struct clk_branch gcc_sdcc4_apps_clk = {
.clkr = {
.enable_reg = 0x0584,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_sdcc4_apps_clk",
- .parent_names = (const char *[]) {
- "sdcc4_apps_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &sdcc4_apps_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2128,12 +2091,9 @@ static struct clk_branch gcc_sys_noc_ufs_axi_clk = {
.clkr = {
.enable_reg = 0x1d7c,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_sys_noc_ufs_axi_clk",
- .parent_names = (const char *[]) {
- "ufs_axi_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &ufs_axi_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2146,12 +2106,9 @@ static struct clk_branch gcc_sys_noc_usb3_axi_clk = {
.clkr = {
.enable_reg = 0x03fc,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_sys_noc_usb3_axi_clk",
- .parent_names = (const char *[]) {
- "usb30_master_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &usb30_master_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2164,9 +2121,10 @@ static struct clk_branch gcc_tsif_ahb_clk = {
.clkr = {
.enable_reg = 0x0d84,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_tsif_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw },
+ .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -2177,12 +2135,9 @@ static struct clk_branch gcc_tsif_ref_clk = {
.clkr = {
.enable_reg = 0x0d88,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_tsif_ref_clk",
- .parent_names = (const char *[]) {
- "tsif_ref_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &tsif_ref_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2195,9 +2150,10 @@ static struct clk_branch gcc_ufs_ahb_clk = {
.clkr = {
.enable_reg = 0x1d4c,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_ufs_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &config_noc_clk_src.clkr.hw },
+ .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -2208,12 +2164,9 @@ static struct clk_branch gcc_ufs_axi_clk = {
.clkr = {
.enable_reg = 0x1d48,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_ufs_axi_clk",
- .parent_names = (const char *[]) {
- "ufs_axi_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &ufs_axi_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2226,12 +2179,9 @@ static struct clk_branch gcc_ufs_rx_cfg_clk = {
.clkr = {
.enable_reg = 0x1d54,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_ufs_rx_cfg_clk",
- .parent_names = (const char *[]) {
- "ufs_axi_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &ufs_axi_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2245,9 +2195,10 @@ static struct clk_branch gcc_ufs_rx_symbol_0_clk = {
.clkr = {
.enable_reg = 0x1d60,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_ufs_rx_symbol_0_clk",
+ .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw },
+ .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -2259,9 +2210,10 @@ static struct clk_branch gcc_ufs_rx_symbol_1_clk = {
.clkr = {
.enable_reg = 0x1d64,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_ufs_rx_symbol_1_clk",
+ .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw },
+ .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -2272,12 +2224,9 @@ static struct clk_branch gcc_ufs_tx_cfg_clk = {
.clkr = {
.enable_reg = 0x1d50,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_ufs_tx_cfg_clk",
- .parent_names = (const char *[]) {
- "ufs_axi_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &ufs_axi_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2291,9 +2240,10 @@ static struct clk_branch gcc_ufs_tx_symbol_0_clk = {
.clkr = {
.enable_reg = 0x1d58,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_ufs_tx_symbol_0_clk",
+ .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw },
+ .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -2305,9 +2255,10 @@ static struct clk_branch gcc_ufs_tx_symbol_1_clk = {
.clkr = {
.enable_reg = 0x1d5c,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_ufs_tx_symbol_1_clk",
+ .parent_hws = (const struct clk_hw *[]){ &system_noc_clk_src.clkr.hw },
+ .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -2318,9 +2269,13 @@ static struct clk_branch gcc_usb2_hs_phy_sleep_clk = {
.clkr = {
.enable_reg = 0x04ac,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_usb2_hs_phy_sleep_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "sleep",
+ .name = "sleep"
+ },
+ .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -2331,12 +2286,9 @@ static struct clk_branch gcc_usb30_master_clk = {
.clkr = {
.enable_reg = 0x03c8,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_usb30_master_clk",
- .parent_names = (const char *[]) {
- "usb30_master_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &usb30_master_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2349,12 +2301,9 @@ static struct clk_branch gcc_usb30_mock_utmi_clk = {
.clkr = {
.enable_reg = 0x03d0,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_usb30_mock_utmi_clk",
- .parent_names = (const char *[]) {
- "usb30_mock_utmi_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &usb30_mock_utmi_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2367,9 +2316,13 @@ static struct clk_branch gcc_usb30_sleep_clk = {
.clkr = {
.enable_reg = 0x03cc,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_usb30_sleep_clk",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "sleep",
+ .name = "sleep"
+ },
+ .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -2380,12 +2333,9 @@ static struct clk_branch gcc_usb3_phy_aux_clk = {
.clkr = {
.enable_reg = 0x1408,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_usb3_phy_aux_clk",
- .parent_names = (const char *[]) {
- "usb3_phy_aux_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &usb3_phy_aux_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2393,14 +2343,28 @@ static struct clk_branch gcc_usb3_phy_aux_clk = {
},
};
+static struct clk_branch gcc_usb3_phy_pipe_clk = {
+ .halt_reg = 0x140c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x140c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_phy_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_branch gcc_usb_hs_ahb_clk = {
.halt_reg = 0x0488,
.clkr = {
.enable_reg = 0x0488,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_usb_hs_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw },
+ .num_parents = 1,
.ops = &clk_branch2_ops,
},
},
@@ -2411,12 +2375,9 @@ static struct clk_branch gcc_usb_hs_system_clk = {
.clkr = {
.enable_reg = 0x0484,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_usb_hs_system_clk",
- .parent_names = (const char *[]) {
- "usb_hs_system_clk_src",
- },
+ .parent_hws = (const struct clk_hw *[]){ &usb_hs_system_clk_src.clkr.hw },
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
.ops = &clk_branch2_ops,
@@ -2429,20 +2390,123 @@ static struct clk_branch gcc_usb_phy_cfg_ahb2phy_clk = {
.clkr = {
.enable_reg = 0x1a84,
.enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data)
- {
+ .hw.init = &(struct clk_init_data){
.name = "gcc_usb_phy_cfg_ahb2phy_clk",
.ops = &clk_branch2_ops,
},
},
};
-static struct gdsc pcie_gdsc = {
- .gdscr = 0x1e18,
- .pd = {
- .name = "pcie",
+static struct clk_branch gpll0_out_mmsscc = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x1484,
+ .enable_mask = BIT(26),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll0_out_mmsscc",
+ .parent_hws = (const struct clk_hw *[]){ &gpll0.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
},
- .pwrsts = PWRSTS_OFF_ON,
+ },
+};
+
+static struct clk_branch gpll0_out_msscc = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x1484,
+ .enable_mask = BIT(27),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll0_out_msscc",
+ .parent_hws = (const struct clk_hw *[]){ &gpll0.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch pcie_0_phy_ldo = {
+ .halt_reg = 0x1e00,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x1E00,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "pcie_0_phy_ldo",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch pcie_1_phy_ldo = {
+ .halt_reg = 0x1e04,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x1E04,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "pcie_1_phy_ldo",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch ufs_phy_ldo = {
+ .halt_reg = 0x1e0c,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x1E0C,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "ufs_phy_ldo",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch usb_ss_phy_ldo = {
+ .halt_reg = 0x1e08,
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x1E08,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "usb_ss_phy_ldo",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .halt_reg = 0x0e04,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x0e04,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x1484,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_boot_rom_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &config_noc_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_prng_ahb_clk = {
+ .halt_reg = 0x0d04,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x1484,
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_prng_ahb_clk",
+ .parent_hws = (const struct clk_hw *[]){ &periph_noc_clk_src.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_branch2_ops,
+ },
+ },
};
static struct gdsc pcie_0_gdsc = {
@@ -2482,6 +2546,9 @@ static struct clk_regmap *gcc_msm8994_clocks[] = {
[GPLL0] = &gpll0.clkr,
[GPLL4_EARLY] = &gpll4_early.clkr,
[GPLL4] = &gpll4.clkr,
+ [CONFIG_NOC_CLK_SRC] = &config_noc_clk_src.clkr,
+ [PERIPH_NOC_CLK_SRC] = &periph_noc_clk_src.clkr,
+ [SYSTEM_NOC_CLK_SRC] = &system_noc_clk_src.clkr,
[UFS_AXI_CLK_SRC] = &ufs_axi_clk_src.clkr,
[USB30_MASTER_CLK_SRC] = &usb30_master_clk_src.clkr,
[BLSP1_QUP1_I2C_APPS_CLK_SRC] = &blsp1_qup1_i2c_apps_clk_src.clkr,
@@ -2616,13 +2683,23 @@ static struct clk_regmap *gcc_msm8994_clocks[] = {
[GCC_USB30_MOCK_UTMI_CLK] = &gcc_usb30_mock_utmi_clk.clkr,
[GCC_USB30_SLEEP_CLK] = &gcc_usb30_sleep_clk.clkr,
[GCC_USB3_PHY_AUX_CLK] = &gcc_usb3_phy_aux_clk.clkr,
+ [GCC_USB3_PHY_PIPE_CLK] = &gcc_usb3_phy_pipe_clk.clkr,
[GCC_USB_HS_AHB_CLK] = &gcc_usb_hs_ahb_clk.clkr,
[GCC_USB_HS_SYSTEM_CLK] = &gcc_usb_hs_system_clk.clkr,
[GCC_USB_PHY_CFG_AHB2PHY_CLK] = &gcc_usb_phy_cfg_ahb2phy_clk.clkr,
+ [GPLL0_OUT_MMSSCC] = &gpll0_out_mmsscc.clkr,
+ [GPLL0_OUT_MSSCC] = &gpll0_out_msscc.clkr,
+ [PCIE_0_PHY_LDO] = &pcie_0_phy_ldo.clkr,
+ [PCIE_1_PHY_LDO] = &pcie_1_phy_ldo.clkr,
+ [UFS_PHY_LDO] = &ufs_phy_ldo.clkr,
+ [USB_SS_PHY_LDO] = &usb_ss_phy_ldo.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_PRNG_AHB_CLK] = &gcc_prng_ahb_clk.clkr,
};
static struct gdsc *gcc_msm8994_gdscs[] = {
- [PCIE_GDSC] = &pcie_gdsc,
+ /* This GDSC does not exist, but ABI has to remain intact */
+ [PCIE_GDSC] = NULL,
[PCIE_0_GDSC] = &pcie_0_gdsc,
[PCIE_1_GDSC] = &pcie_1_gdsc,
[USB30_GDSC] = &usb30_gdsc,
@@ -2632,6 +2709,7 @@ static struct gdsc *gcc_msm8994_gdscs[] = {
static const struct qcom_reset_map gcc_msm8994_resets[] = {
[USB3_PHY_RESET] = { 0x1400 },
[USB3PHY_PHY_RESET] = { 0x1404 },
+ [MSS_RESET] = { 0x1680 },
[PCIE_PHY_0_RESET] = { 0x1b18 },
[PCIE_PHY_1_RESET] = { 0x1b98 },
[QUSB2_PHY_RESET] = { 0x04b8 },
@@ -2656,19 +2734,57 @@ static const struct qcom_cc_desc gcc_msm8994_desc = {
};
static const struct of_device_id gcc_msm8994_match_table[] = {
- { .compatible = "qcom,gcc-msm8994" },
+ { .compatible = "qcom,gcc-msm8992" },
+ { .compatible = "qcom,gcc-msm8994" }, /* V2 and V2.1 */
{}
};
MODULE_DEVICE_TABLE(of, gcc_msm8994_match_table);
static int gcc_msm8994_probe(struct platform_device *pdev)
{
- struct device *dev = &pdev->dev;
- struct clk *clk;
-
- clk = devm_clk_register(dev, &xo.hw);
- if (IS_ERR(clk))
- return PTR_ERR(clk);
+ if (of_device_is_compatible(pdev->dev.of_node, "qcom,gcc-msm8992")) {
+ /* MSM8992 features less clocks and some have different freq tables */
+ gcc_msm8994_desc.clks[UFS_AXI_CLK_SRC] = NULL;
+ gcc_msm8994_desc.clks[GCC_LPASS_Q6_AXI_CLK] = NULL;
+ gcc_msm8994_desc.clks[UFS_PHY_LDO] = NULL;
+ gcc_msm8994_desc.clks[GCC_UFS_AHB_CLK] = NULL;
+ gcc_msm8994_desc.clks[GCC_UFS_AXI_CLK] = NULL;
+ gcc_msm8994_desc.clks[GCC_UFS_RX_CFG_CLK] = NULL;
+ gcc_msm8994_desc.clks[GCC_UFS_RX_SYMBOL_0_CLK] = NULL;
+ gcc_msm8994_desc.clks[GCC_UFS_RX_SYMBOL_1_CLK] = NULL;
+ gcc_msm8994_desc.clks[GCC_UFS_TX_CFG_CLK] = NULL;
+ gcc_msm8994_desc.clks[GCC_UFS_TX_SYMBOL_0_CLK] = NULL;
+ gcc_msm8994_desc.clks[GCC_UFS_TX_SYMBOL_1_CLK] = NULL;
+
+ sdcc1_apps_clk_src.freq_tbl = ftbl_sdcc1_apps_clk_src_8992;
+ blsp1_qup1_i2c_apps_clk_src.freq_tbl = ftbl_blsp1_qup_spi_apps_clk_src_8992;
+ blsp1_qup2_i2c_apps_clk_src.freq_tbl = ftbl_blsp1_qup_spi_apps_clk_src_8992;
+ blsp1_qup3_i2c_apps_clk_src.freq_tbl = ftbl_blsp1_qup_spi_apps_clk_src_8992;
+ blsp1_qup4_i2c_apps_clk_src.freq_tbl = ftbl_blsp1_qup_spi_apps_clk_src_8992;
+ blsp1_qup5_i2c_apps_clk_src.freq_tbl = ftbl_blsp1_qup_spi_apps_clk_src_8992;
+ blsp1_qup6_i2c_apps_clk_src.freq_tbl = ftbl_blsp1_qup_spi_apps_clk_src_8992;
+ blsp2_qup1_i2c_apps_clk_src.freq_tbl = ftbl_blsp1_qup_spi_apps_clk_src_8992;
+ blsp2_qup2_i2c_apps_clk_src.freq_tbl = ftbl_blsp1_qup_spi_apps_clk_src_8992;
+ blsp2_qup3_i2c_apps_clk_src.freq_tbl = ftbl_blsp1_qup_spi_apps_clk_src_8992;
+ blsp2_qup4_i2c_apps_clk_src.freq_tbl = ftbl_blsp1_qup_spi_apps_clk_src_8992;
+ blsp2_qup5_i2c_apps_clk_src.freq_tbl = ftbl_blsp1_qup_spi_apps_clk_src_8992;
+ blsp2_qup6_i2c_apps_clk_src.freq_tbl = ftbl_blsp1_qup_spi_apps_clk_src_8992;
+
+ /*
+ * Some 8992 boards might *possibly* use
+ * PCIe1 clocks and controller, but it's not
+ * standard and they should be disabled otherwise.
+ */
+ gcc_msm8994_desc.clks[PCIE_1_AUX_CLK_SRC] = NULL;
+ gcc_msm8994_desc.clks[PCIE_1_PIPE_CLK_SRC] = NULL;
+ gcc_msm8994_desc.clks[PCIE_1_PHY_LDO] = NULL;
+ gcc_msm8994_desc.clks[GCC_PCIE_1_AUX_CLK] = NULL;
+ gcc_msm8994_desc.clks[GCC_PCIE_1_CFG_AHB_CLK] = NULL;
+ gcc_msm8994_desc.clks[GCC_PCIE_1_MSTR_AXI_CLK] = NULL;
+ gcc_msm8994_desc.clks[GCC_PCIE_1_PIPE_CLK] = NULL;
+ gcc_msm8994_desc.clks[GCC_PCIE_1_SLV_AXI_CLK] = NULL;
+ gcc_msm8994_desc.clks[GCC_SYS_NOC_UFS_AXI_CLK] = NULL;
+ }
return qcom_cc_probe(pdev, &gcc_msm8994_desc);
}
diff --git a/drivers/clk/qcom/gcc-msm8998.c b/drivers/clk/qcom/gcc-msm8998.c
index 050c91af888e..407e2c5caea4 100644
--- a/drivers/clk/qcom/gcc-msm8998.c
+++ b/drivers/clk/qcom/gcc-msm8998.c
@@ -25,109 +25,6 @@
#include "reset.h"
#include "gdsc.h"
-enum {
- P_AUD_REF_CLK,
- P_CORE_BI_PLL_TEST_SE,
- P_GPLL0_OUT_MAIN,
- P_GPLL4_OUT_MAIN,
- P_PLL0_EARLY_DIV_CLK_SRC,
- P_SLEEP_CLK,
- P_XO,
-};
-
-static const struct parent_map gcc_parent_map_0[] = {
- { P_XO, 0 },
- { P_GPLL0_OUT_MAIN, 1 },
- { P_PLL0_EARLY_DIV_CLK_SRC, 6 },
- { P_CORE_BI_PLL_TEST_SE, 7 },
-};
-
-static const char * const gcc_parent_names_0[] = {
- "xo",
- "gpll0_out_main",
- "gpll0_out_main",
- "core_bi_pll_test_se",
-};
-
-static const struct parent_map gcc_parent_map_1[] = {
- { P_XO, 0 },
- { P_GPLL0_OUT_MAIN, 1 },
- { P_CORE_BI_PLL_TEST_SE, 7 },
-};
-
-static const char * const gcc_parent_names_1[] = {
- "xo",
- "gpll0_out_main",
- "core_bi_pll_test_se",
-};
-
-static const struct parent_map gcc_parent_map_2[] = {
- { P_XO, 0 },
- { P_GPLL0_OUT_MAIN, 1 },
- { P_SLEEP_CLK, 5 },
- { P_PLL0_EARLY_DIV_CLK_SRC, 6 },
- { P_CORE_BI_PLL_TEST_SE, 7 },
-};
-
-static const char * const gcc_parent_names_2[] = {
- "xo",
- "gpll0_out_main",
- "core_pi_sleep_clk",
- "gpll0_out_main",
- "core_bi_pll_test_se",
-};
-
-static const struct parent_map gcc_parent_map_3[] = {
- { P_XO, 0 },
- { P_SLEEP_CLK, 5 },
- { P_CORE_BI_PLL_TEST_SE, 7 },
-};
-
-static const char * const gcc_parent_names_3[] = {
- "xo",
- "core_pi_sleep_clk",
- "core_bi_pll_test_se",
-};
-
-static const struct parent_map gcc_parent_map_4[] = {
- { P_XO, 0 },
- { P_GPLL0_OUT_MAIN, 1 },
- { P_GPLL4_OUT_MAIN, 5 },
- { P_CORE_BI_PLL_TEST_SE, 7 },
-};
-
-static const char * const gcc_parent_names_4[] = {
- "xo",
- "gpll0_out_main",
- "gpll4_out_main",
- "core_bi_pll_test_se",
-};
-
-static const struct parent_map gcc_parent_map_5[] = {
- { P_XO, 0 },
- { P_GPLL0_OUT_MAIN, 1 },
- { P_AUD_REF_CLK, 2 },
- { P_CORE_BI_PLL_TEST_SE, 7 },
-};
-
-static const char * const gcc_parent_names_5[] = {
- "xo",
- "gpll0_out_main",
- "aud_ref_clk",
- "core_bi_pll_test_se",
-};
-
-static struct clk_fixed_factor xo = {
- .mult = 1,
- .div = 1,
- .hw.init = &(struct clk_init_data){
- .name = "xo",
- .parent_names = (const char *[]){ "xo_board" },
- .num_parents = 1,
- .ops = &clk_fixed_factor_ops,
- },
-};
-
static struct pll_vco fabia_vco[] = {
{ 250000000, 2000000000, 0 },
{ 125000000, 1000000000, 1 },
@@ -143,7 +40,9 @@ static struct clk_alpha_pll gpll0 = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gpll0",
- .parent_names = (const char *[]){ "xo" },
+ .parent_data = (const struct clk_parent_data []) {
+ { .fw_name = "xo" },
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_fixed_fabia_ops,
}
@@ -155,7 +54,9 @@ static struct clk_alpha_pll_postdiv gpll0_out_even = {
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll0_out_even",
- .parent_names = (const char *[]){ "gpll0" },
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll0.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_fabia_ops,
},
@@ -166,7 +67,9 @@ static struct clk_alpha_pll_postdiv gpll0_out_main = {
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll0_out_main",
- .parent_names = (const char *[]){ "gpll0" },
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll0.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_fabia_ops,
},
@@ -177,7 +80,9 @@ static struct clk_alpha_pll_postdiv gpll0_out_odd = {
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll0_out_odd",
- .parent_names = (const char *[]){ "gpll0" },
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll0.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_fabia_ops,
},
@@ -188,7 +93,9 @@ static struct clk_alpha_pll_postdiv gpll0_out_test = {
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll0_out_test",
- .parent_names = (const char *[]){ "gpll0" },
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll0.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_fabia_ops,
},
@@ -204,7 +111,9 @@ static struct clk_alpha_pll gpll1 = {
.enable_mask = BIT(1),
.hw.init = &(struct clk_init_data){
.name = "gpll1",
- .parent_names = (const char *[]){ "xo" },
+ .parent_data = (const struct clk_parent_data []) {
+ { .fw_name = "xo" },
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_fixed_fabia_ops,
}
@@ -216,7 +125,9 @@ static struct clk_alpha_pll_postdiv gpll1_out_even = {
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll1_out_even",
- .parent_names = (const char *[]){ "gpll1" },
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll1.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_fabia_ops,
},
@@ -227,7 +138,9 @@ static struct clk_alpha_pll_postdiv gpll1_out_main = {
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll1_out_main",
- .parent_names = (const char *[]){ "gpll1" },
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll1.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_fabia_ops,
},
@@ -238,7 +151,9 @@ static struct clk_alpha_pll_postdiv gpll1_out_odd = {
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll1_out_odd",
- .parent_names = (const char *[]){ "gpll1" },
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll1.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_fabia_ops,
},
@@ -249,7 +164,9 @@ static struct clk_alpha_pll_postdiv gpll1_out_test = {
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll1_out_test",
- .parent_names = (const char *[]){ "gpll1" },
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll1.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_fabia_ops,
},
@@ -265,7 +182,9 @@ static struct clk_alpha_pll gpll2 = {
.enable_mask = BIT(2),
.hw.init = &(struct clk_init_data){
.name = "gpll2",
- .parent_names = (const char *[]){ "xo" },
+ .parent_data = (const struct clk_parent_data []) {
+ { .fw_name = "xo" },
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_fixed_fabia_ops,
}
@@ -277,7 +196,9 @@ static struct clk_alpha_pll_postdiv gpll2_out_even = {
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll2_out_even",
- .parent_names = (const char *[]){ "gpll2" },
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll2.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_fabia_ops,
},
@@ -288,7 +209,9 @@ static struct clk_alpha_pll_postdiv gpll2_out_main = {
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll2_out_main",
- .parent_names = (const char *[]){ "gpll2" },
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll2.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_fabia_ops,
},
@@ -299,7 +222,9 @@ static struct clk_alpha_pll_postdiv gpll2_out_odd = {
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll2_out_odd",
- .parent_names = (const char *[]){ "gpll2" },
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll2.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_fabia_ops,
},
@@ -310,7 +235,9 @@ static struct clk_alpha_pll_postdiv gpll2_out_test = {
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll2_out_test",
- .parent_names = (const char *[]){ "gpll2" },
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll2.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_fabia_ops,
},
@@ -326,7 +253,9 @@ static struct clk_alpha_pll gpll3 = {
.enable_mask = BIT(3),
.hw.init = &(struct clk_init_data){
.name = "gpll3",
- .parent_names = (const char *[]){ "xo" },
+ .parent_data = (const struct clk_parent_data []) {
+ { .fw_name = "xo" },
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_fixed_fabia_ops,
}
@@ -338,7 +267,9 @@ static struct clk_alpha_pll_postdiv gpll3_out_even = {
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll3_out_even",
- .parent_names = (const char *[]){ "gpll3" },
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll3.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_fabia_ops,
},
@@ -349,7 +280,9 @@ static struct clk_alpha_pll_postdiv gpll3_out_main = {
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll3_out_main",
- .parent_names = (const char *[]){ "gpll3" },
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll3.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_fabia_ops,
},
@@ -360,7 +293,9 @@ static struct clk_alpha_pll_postdiv gpll3_out_odd = {
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll3_out_odd",
- .parent_names = (const char *[]){ "gpll3" },
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll3.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_fabia_ops,
},
@@ -371,7 +306,9 @@ static struct clk_alpha_pll_postdiv gpll3_out_test = {
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll3_out_test",
- .parent_names = (const char *[]){ "gpll3" },
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll3.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_fabia_ops,
},
@@ -387,7 +324,9 @@ static struct clk_alpha_pll gpll4 = {
.enable_mask = BIT(4),
.hw.init = &(struct clk_init_data){
.name = "gpll4",
- .parent_names = (const char *[]){ "xo" },
+ .parent_data = (const struct clk_parent_data []) {
+ { .fw_name = "xo" },
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_fixed_fabia_ops,
}
@@ -399,7 +338,9 @@ static struct clk_alpha_pll_postdiv gpll4_out_even = {
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll4_out_even",
- .parent_names = (const char *[]){ "gpll4" },
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll4.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_fabia_ops,
},
@@ -410,7 +351,9 @@ static struct clk_alpha_pll_postdiv gpll4_out_main = {
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll4_out_main",
- .parent_names = (const char *[]){ "gpll4" },
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll4.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_fabia_ops,
},
@@ -421,7 +364,9 @@ static struct clk_alpha_pll_postdiv gpll4_out_odd = {
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll4_out_odd",
- .parent_names = (const char *[]){ "gpll4" },
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll4.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_fabia_ops,
},
@@ -432,12 +377,106 @@ static struct clk_alpha_pll_postdiv gpll4_out_test = {
.regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_FABIA],
.clkr.hw.init = &(struct clk_init_data){
.name = "gpll4_out_test",
- .parent_names = (const char *[]){ "gpll4" },
+ .parent_hws = (const struct clk_hw*[]) {
+ &gpll4.clkr.hw,
+ },
.num_parents = 1,
.ops = &clk_alpha_pll_postdiv_fabia_ops,
},
};
+enum {
+ P_AUD_REF_CLK,
+ P_CORE_BI_PLL_TEST_SE,
+ P_GPLL0_OUT_MAIN,
+ P_GPLL4_OUT_MAIN,
+ P_PLL0_EARLY_DIV_CLK_SRC,
+ P_SLEEP_CLK,
+ P_XO,
+};
+
+static const struct parent_map gcc_parent_map_0[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_PLL0_EARLY_DIV_CLK_SRC, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const struct clk_parent_data gcc_parent_data_0[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0_out_main.clkr.hw },
+ { .hw = &gpll0_out_main.clkr.hw },
+ { .fw_name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map gcc_parent_map_1[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const struct clk_parent_data gcc_parent_data_1[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0_out_main.clkr.hw },
+ { .fw_name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map gcc_parent_map_2[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_SLEEP_CLK, 5 },
+ { P_PLL0_EARLY_DIV_CLK_SRC, 6 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const struct clk_parent_data gcc_parent_data_2[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0_out_main.clkr.hw },
+ { .fw_name = "sleep_clk" },
+ { .hw = &gpll0_out_main.clkr.hw },
+ { .fw_name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map gcc_parent_map_3[] = {
+ { P_XO, 0 },
+ { P_SLEEP_CLK, 5 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const struct clk_parent_data gcc_parent_data_3[] = {
+ { .fw_name = "xo" },
+ { .fw_name = "sleep_clk" },
+ { .fw_name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map gcc_parent_map_4[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_GPLL4_OUT_MAIN, 5 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const struct clk_parent_data gcc_parent_data_4[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0_out_main.clkr.hw },
+ { .hw = &gpll4_out_main.clkr.hw },
+ { .fw_name = "core_bi_pll_test_se" },
+};
+
+static const struct parent_map gcc_parent_map_5[] = {
+ { P_XO, 0 },
+ { P_GPLL0_OUT_MAIN, 1 },
+ { P_AUD_REF_CLK, 2 },
+ { P_CORE_BI_PLL_TEST_SE, 7 },
+};
+
+static const struct clk_parent_data gcc_parent_data_5[] = {
+ { .fw_name = "xo" },
+ { .hw = &gpll0_out_main.clkr.hw },
+ { .fw_name = "aud_ref_clk" },
+ { .fw_name = "core_bi_pll_test_se" },
+};
+
static const struct freq_tbl ftbl_blsp1_qup1_i2c_apps_clk_src[] = {
F(19200000, P_XO, 1, 0, 0),
F(50000000, P_GPLL0_OUT_MAIN, 12, 0, 0),
@@ -452,8 +491,8 @@ static struct clk_rcg2 blsp1_qup1_i2c_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup1_i2c_apps_clk_src",
- .parent_names = gcc_parent_names_1,
- .num_parents = 3,
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
.ops = &clk_rcg2_ops,
},
};
@@ -477,8 +516,8 @@ static struct clk_rcg2 blsp1_qup1_spi_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup1_spi_apps_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.ops = &clk_rcg2_ops,
},
};
@@ -491,8 +530,8 @@ static struct clk_rcg2 blsp1_qup2_i2c_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup2_i2c_apps_clk_src",
- .parent_names = gcc_parent_names_1,
- .num_parents = 3,
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
.ops = &clk_rcg2_ops,
},
};
@@ -505,8 +544,8 @@ static struct clk_rcg2 blsp1_qup2_spi_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup2_spi_apps_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.ops = &clk_rcg2_ops,
},
};
@@ -519,8 +558,8 @@ static struct clk_rcg2 blsp1_qup3_i2c_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup3_i2c_apps_clk_src",
- .parent_names = gcc_parent_names_1,
- .num_parents = 3,
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
.ops = &clk_rcg2_ops,
},
};
@@ -533,8 +572,8 @@ static struct clk_rcg2 blsp1_qup3_spi_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup3_spi_apps_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.ops = &clk_rcg2_ops,
},
};
@@ -547,8 +586,8 @@ static struct clk_rcg2 blsp1_qup4_i2c_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup4_i2c_apps_clk_src",
- .parent_names = gcc_parent_names_1,
- .num_parents = 3,
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
.ops = &clk_rcg2_ops,
},
};
@@ -561,8 +600,8 @@ static struct clk_rcg2 blsp1_qup4_spi_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup4_spi_apps_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.ops = &clk_rcg2_ops,
},
};
@@ -575,8 +614,8 @@ static struct clk_rcg2 blsp1_qup5_i2c_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup5_i2c_apps_clk_src",
- .parent_names = gcc_parent_names_1,
- .num_parents = 3,
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
.ops = &clk_rcg2_ops,
},
};
@@ -589,8 +628,8 @@ static struct clk_rcg2 blsp1_qup5_spi_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup5_spi_apps_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.ops = &clk_rcg2_ops,
},
};
@@ -603,8 +642,8 @@ static struct clk_rcg2 blsp1_qup6_i2c_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup6_i2c_apps_clk_src",
- .parent_names = gcc_parent_names_1,
- .num_parents = 3,
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
.ops = &clk_rcg2_ops,
},
};
@@ -617,8 +656,8 @@ static struct clk_rcg2 blsp1_qup6_spi_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup6_spi_apps_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.ops = &clk_rcg2_ops,
},
};
@@ -650,8 +689,8 @@ static struct clk_rcg2 blsp1_uart1_apps_clk_src = {
.freq_tbl = ftbl_blsp1_uart1_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_uart1_apps_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.ops = &clk_rcg2_ops,
},
};
@@ -664,8 +703,8 @@ static struct clk_rcg2 blsp1_uart2_apps_clk_src = {
.freq_tbl = ftbl_blsp1_uart1_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_uart2_apps_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.ops = &clk_rcg2_ops,
},
};
@@ -678,8 +717,8 @@ static struct clk_rcg2 blsp1_uart3_apps_clk_src = {
.freq_tbl = ftbl_blsp1_uart1_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_uart3_apps_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.ops = &clk_rcg2_ops,
},
};
@@ -692,8 +731,8 @@ static struct clk_rcg2 blsp2_qup1_i2c_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup1_i2c_apps_clk_src",
- .parent_names = gcc_parent_names_1,
- .num_parents = 3,
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
.ops = &clk_rcg2_ops,
},
};
@@ -706,8 +745,8 @@ static struct clk_rcg2 blsp2_qup1_spi_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup1_spi_apps_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.ops = &clk_rcg2_ops,
},
};
@@ -720,8 +759,8 @@ static struct clk_rcg2 blsp2_qup2_i2c_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup2_i2c_apps_clk_src",
- .parent_names = gcc_parent_names_1,
- .num_parents = 3,
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
.ops = &clk_rcg2_ops,
},
};
@@ -734,8 +773,8 @@ static struct clk_rcg2 blsp2_qup2_spi_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup2_spi_apps_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.ops = &clk_rcg2_ops,
},
};
@@ -748,8 +787,8 @@ static struct clk_rcg2 blsp2_qup3_i2c_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup3_i2c_apps_clk_src",
- .parent_names = gcc_parent_names_1,
- .num_parents = 3,
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
.ops = &clk_rcg2_ops,
},
};
@@ -762,8 +801,8 @@ static struct clk_rcg2 blsp2_qup3_spi_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup3_spi_apps_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.ops = &clk_rcg2_ops,
},
};
@@ -776,8 +815,8 @@ static struct clk_rcg2 blsp2_qup4_i2c_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup4_i2c_apps_clk_src",
- .parent_names = gcc_parent_names_1,
- .num_parents = 3,
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
.ops = &clk_rcg2_ops,
},
};
@@ -790,8 +829,8 @@ static struct clk_rcg2 blsp2_qup4_spi_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup4_spi_apps_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.ops = &clk_rcg2_ops,
},
};
@@ -804,8 +843,8 @@ static struct clk_rcg2 blsp2_qup5_i2c_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup5_i2c_apps_clk_src",
- .parent_names = gcc_parent_names_1,
- .num_parents = 3,
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
.ops = &clk_rcg2_ops,
},
};
@@ -818,8 +857,8 @@ static struct clk_rcg2 blsp2_qup5_spi_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup5_spi_apps_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.ops = &clk_rcg2_ops,
},
};
@@ -832,8 +871,8 @@ static struct clk_rcg2 blsp2_qup6_i2c_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup1_i2c_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup6_i2c_apps_clk_src",
- .parent_names = gcc_parent_names_1,
- .num_parents = 3,
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
.ops = &clk_rcg2_ops,
},
};
@@ -846,8 +885,8 @@ static struct clk_rcg2 blsp2_qup6_spi_apps_clk_src = {
.freq_tbl = ftbl_blsp1_qup1_spi_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup6_spi_apps_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.ops = &clk_rcg2_ops,
},
};
@@ -860,8 +899,8 @@ static struct clk_rcg2 blsp2_uart1_apps_clk_src = {
.freq_tbl = ftbl_blsp1_uart1_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_uart1_apps_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.ops = &clk_rcg2_ops,
},
};
@@ -874,8 +913,8 @@ static struct clk_rcg2 blsp2_uart2_apps_clk_src = {
.freq_tbl = ftbl_blsp1_uart1_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_uart2_apps_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.ops = &clk_rcg2_ops,
},
};
@@ -888,8 +927,8 @@ static struct clk_rcg2 blsp2_uart3_apps_clk_src = {
.freq_tbl = ftbl_blsp1_uart1_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_uart3_apps_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.ops = &clk_rcg2_ops,
},
};
@@ -909,8 +948,8 @@ static struct clk_rcg2 gp1_clk_src = {
.freq_tbl = ftbl_gp1_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "gp1_clk_src",
- .parent_names = gcc_parent_names_2,
- .num_parents = 5,
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
.ops = &clk_rcg2_ops,
},
};
@@ -923,8 +962,8 @@ static struct clk_rcg2 gp2_clk_src = {
.freq_tbl = ftbl_gp1_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "gp2_clk_src",
- .parent_names = gcc_parent_names_2,
- .num_parents = 5,
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
.ops = &clk_rcg2_ops,
},
};
@@ -937,8 +976,8 @@ static struct clk_rcg2 gp3_clk_src = {
.freq_tbl = ftbl_gp1_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "gp3_clk_src",
- .parent_names = gcc_parent_names_2,
- .num_parents = 5,
+ .parent_data = gcc_parent_data_2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_2),
.ops = &clk_rcg2_ops,
},
};
@@ -958,8 +997,8 @@ static struct clk_rcg2 hmss_ahb_clk_src = {
.freq_tbl = ftbl_hmss_ahb_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "hmss_ahb_clk_src",
- .parent_names = gcc_parent_names_1,
- .num_parents = 3,
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
.ops = &clk_rcg2_ops,
},
};
@@ -977,8 +1016,8 @@ static struct clk_rcg2 hmss_rbcpr_clk_src = {
.freq_tbl = ftbl_hmss_rbcpr_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "hmss_rbcpr_clk_src",
- .parent_names = gcc_parent_names_1,
- .num_parents = 3,
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
.ops = &clk_rcg2_ops,
},
};
@@ -996,8 +1035,8 @@ static struct clk_rcg2 pcie_aux_clk_src = {
.freq_tbl = ftbl_pcie_aux_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "pcie_aux_clk_src",
- .parent_names = gcc_parent_names_3,
- .num_parents = 3,
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
.ops = &clk_rcg2_ops,
},
};
@@ -1015,8 +1054,8 @@ static struct clk_rcg2 pdm2_clk_src = {
.freq_tbl = ftbl_pdm2_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "pdm2_clk_src",
- .parent_names = gcc_parent_names_1,
- .num_parents = 3,
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
.ops = &clk_rcg2_ops,
},
};
@@ -1040,8 +1079,8 @@ static struct clk_rcg2 sdcc2_apps_clk_src = {
.freq_tbl = ftbl_sdcc2_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "sdcc2_apps_clk_src",
- .parent_names = gcc_parent_names_4,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_4),
.ops = &clk_rcg2_floor_ops,
},
};
@@ -1064,8 +1103,8 @@ static struct clk_rcg2 sdcc4_apps_clk_src = {
.freq_tbl = ftbl_sdcc4_apps_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "sdcc4_apps_clk_src",
- .parent_names = gcc_parent_names_1,
- .num_parents = 3,
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
.ops = &clk_rcg2_floor_ops,
},
};
@@ -1083,8 +1122,8 @@ static struct clk_rcg2 tsif_ref_clk_src = {
.freq_tbl = ftbl_tsif_ref_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "tsif_ref_clk_src",
- .parent_names = gcc_parent_names_5,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_5,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_5),
.ops = &clk_rcg2_ops,
},
};
@@ -1104,8 +1143,8 @@ static struct clk_rcg2 ufs_axi_clk_src = {
.freq_tbl = ftbl_ufs_axi_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "ufs_axi_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.ops = &clk_rcg2_ops,
},
};
@@ -1125,8 +1164,8 @@ static struct clk_rcg2 ufs_unipro_core_clk_src = {
.freq_tbl = ftbl_ufs_unipro_core_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "ufs_unipro_core_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.ops = &clk_rcg2_ops,
},
};
@@ -1147,8 +1186,8 @@ static struct clk_rcg2 usb30_master_clk_src = {
.freq_tbl = ftbl_usb30_master_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "usb30_master_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.ops = &clk_rcg2_ops,
},
};
@@ -1161,8 +1200,8 @@ static struct clk_rcg2 usb30_mock_utmi_clk_src = {
.freq_tbl = ftbl_hmss_rbcpr_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "usb30_mock_utmi_clk_src",
- .parent_names = gcc_parent_names_0,
- .num_parents = 4,
+ .parent_data = gcc_parent_data_0,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_0),
.ops = &clk_rcg2_ops,
},
};
@@ -1180,8 +1219,8 @@ static struct clk_rcg2 usb3_phy_aux_clk_src = {
.freq_tbl = ftbl_usb3_phy_aux_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "usb3_phy_aux_clk_src",
- .parent_names = gcc_parent_names_3,
- .num_parents = 3,
+ .parent_data = gcc_parent_data_3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_3),
.ops = &clk_rcg2_ops,
},
};
@@ -1207,8 +1246,8 @@ static struct clk_branch gcc_aggre1_ufs_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_aggre1_ufs_axi_clk",
- .parent_names = (const char *[]){
- "ufs_axi_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &ufs_axi_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1225,8 +1264,8 @@ static struct clk_branch gcc_aggre1_usb3_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_aggre1_usb3_axi_clk",
- .parent_names = (const char *[]){
- "usb30_master_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &usb30_master_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1348,8 +1387,8 @@ static struct clk_branch gcc_mmss_gpll0_clk = {
.enable_mask = BIT(1),
.hw.init = &(struct clk_init_data){
.name = "gcc_mmss_gpll0_clk",
- .parent_names = (const char *[]){
- "gpll0_out_main",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gpll0_out_main.clkr.hw,
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -1390,8 +1429,8 @@ static struct clk_branch gcc_blsp1_qup1_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup1_i2c_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup1_i2c_apps_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp1_qup1_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1408,8 +1447,8 @@ static struct clk_branch gcc_blsp1_qup1_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup1_spi_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup1_spi_apps_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp1_qup1_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1426,8 +1465,8 @@ static struct clk_branch gcc_blsp1_qup2_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup2_i2c_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup2_i2c_apps_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp1_qup2_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1444,8 +1483,8 @@ static struct clk_branch gcc_blsp1_qup2_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup2_spi_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup2_spi_apps_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp1_qup2_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1462,8 +1501,8 @@ static struct clk_branch gcc_blsp1_qup3_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup3_i2c_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup3_i2c_apps_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp1_qup3_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1480,8 +1519,8 @@ static struct clk_branch gcc_blsp1_qup3_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup3_spi_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup3_spi_apps_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp1_qup3_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1498,8 +1537,8 @@ static struct clk_branch gcc_blsp1_qup4_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup4_i2c_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup4_i2c_apps_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp1_qup4_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1516,8 +1555,8 @@ static struct clk_branch gcc_blsp1_qup4_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup4_spi_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup4_spi_apps_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp1_qup4_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1534,8 +1573,8 @@ static struct clk_branch gcc_blsp1_qup5_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup5_i2c_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup5_i2c_apps_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp1_qup5_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1552,8 +1591,8 @@ static struct clk_branch gcc_blsp1_qup5_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup5_spi_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup5_spi_apps_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp1_qup5_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1570,8 +1609,8 @@ static struct clk_branch gcc_blsp1_qup6_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup6_i2c_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup6_i2c_apps_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp1_qup6_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1588,8 +1627,8 @@ static struct clk_branch gcc_blsp1_qup6_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_qup6_spi_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_qup6_spi_apps_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp1_qup6_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1619,8 +1658,8 @@ static struct clk_branch gcc_blsp1_uart1_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_uart1_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_uart1_apps_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp1_uart1_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1637,8 +1676,8 @@ static struct clk_branch gcc_blsp1_uart2_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_uart2_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_uart2_apps_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp1_uart2_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1655,8 +1694,8 @@ static struct clk_branch gcc_blsp1_uart3_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp1_uart3_apps_clk",
- .parent_names = (const char *[]){
- "blsp1_uart3_apps_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp1_uart3_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1686,8 +1725,8 @@ static struct clk_branch gcc_blsp2_qup1_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_qup1_i2c_apps_clk",
- .parent_names = (const char *[]){
- "blsp2_qup1_i2c_apps_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp2_qup1_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1704,8 +1743,8 @@ static struct clk_branch gcc_blsp2_qup1_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_qup1_spi_apps_clk",
- .parent_names = (const char *[]){
- "blsp2_qup1_spi_apps_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp2_qup1_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1722,8 +1761,8 @@ static struct clk_branch gcc_blsp2_qup2_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_qup2_i2c_apps_clk",
- .parent_names = (const char *[]){
- "blsp2_qup2_i2c_apps_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp2_qup2_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1740,8 +1779,8 @@ static struct clk_branch gcc_blsp2_qup2_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_qup2_spi_apps_clk",
- .parent_names = (const char *[]){
- "blsp2_qup2_spi_apps_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp2_qup2_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1758,8 +1797,8 @@ static struct clk_branch gcc_blsp2_qup3_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_qup3_i2c_apps_clk",
- .parent_names = (const char *[]){
- "blsp2_qup3_i2c_apps_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp2_qup3_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1776,8 +1815,8 @@ static struct clk_branch gcc_blsp2_qup3_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_qup3_spi_apps_clk",
- .parent_names = (const char *[]){
- "blsp2_qup3_spi_apps_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp2_qup3_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1794,8 +1833,8 @@ static struct clk_branch gcc_blsp2_qup4_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_qup4_i2c_apps_clk",
- .parent_names = (const char *[]){
- "blsp2_qup4_i2c_apps_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp2_qup4_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1812,8 +1851,8 @@ static struct clk_branch gcc_blsp2_qup4_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_qup4_spi_apps_clk",
- .parent_names = (const char *[]){
- "blsp2_qup4_spi_apps_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp2_qup4_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1830,8 +1869,8 @@ static struct clk_branch gcc_blsp2_qup5_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_qup5_i2c_apps_clk",
- .parent_names = (const char *[]){
- "blsp2_qup5_i2c_apps_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp2_qup5_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1848,8 +1887,8 @@ static struct clk_branch gcc_blsp2_qup5_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_qup5_spi_apps_clk",
- .parent_names = (const char *[]){
- "blsp2_qup5_spi_apps_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp2_qup5_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1866,8 +1905,8 @@ static struct clk_branch gcc_blsp2_qup6_i2c_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_qup6_i2c_apps_clk",
- .parent_names = (const char *[]){
- "blsp2_qup6_i2c_apps_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp2_qup6_i2c_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1884,8 +1923,8 @@ static struct clk_branch gcc_blsp2_qup6_spi_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_qup6_spi_apps_clk",
- .parent_names = (const char *[]){
- "blsp2_qup6_spi_apps_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp2_qup6_spi_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1915,8 +1954,8 @@ static struct clk_branch gcc_blsp2_uart1_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_uart1_apps_clk",
- .parent_names = (const char *[]){
- "blsp2_uart1_apps_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp2_uart1_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1933,8 +1972,8 @@ static struct clk_branch gcc_blsp2_uart2_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_uart2_apps_clk",
- .parent_names = (const char *[]){
- "blsp2_uart2_apps_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp2_uart2_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1951,8 +1990,8 @@ static struct clk_branch gcc_blsp2_uart3_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_blsp2_uart3_apps_clk",
- .parent_names = (const char *[]){
- "blsp2_uart3_apps_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &blsp2_uart3_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1969,8 +2008,8 @@ static struct clk_branch gcc_cfg_noc_usb3_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_cfg_noc_usb3_axi_clk",
- .parent_names = (const char *[]){
- "usb30_master_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &usb30_master_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -1987,8 +2026,8 @@ static struct clk_branch gcc_gp1_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_gp1_clk",
- .parent_names = (const char *[]){
- "gp1_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gp1_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2005,8 +2044,8 @@ static struct clk_branch gcc_gp2_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_gp2_clk",
- .parent_names = (const char *[]){
- "gp2_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gp2_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2023,8 +2062,8 @@ static struct clk_branch gcc_gp3_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_gp3_clk",
- .parent_names = (const char *[]){
- "gp3_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &gp3_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2112,8 +2151,8 @@ static struct clk_branch gcc_hmss_ahb_clk = {
.enable_mask = BIT(21),
.hw.init = &(struct clk_init_data){
.name = "gcc_hmss_ahb_clk",
- .parent_names = (const char *[]){
- "hmss_ahb_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &hmss_ahb_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2143,8 +2182,8 @@ static struct clk_branch gcc_hmss_rbcpr_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_hmss_rbcpr_clk",
- .parent_names = (const char *[]){
- "hmss_rbcpr_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &hmss_rbcpr_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2179,8 +2218,8 @@ static struct clk_rcg2 hmss_gpll0_clk_src = {
.freq_tbl = ftbl_hmss_gpll0_clk_src,
.clkr.hw.init = &(struct clk_init_data) {
.name = "hmss_gpll0_clk_src",
- .parent_names = gcc_parent_names_1,
- .num_parents = ARRAY_SIZE(gcc_parent_names_1),
+ .parent_data = gcc_parent_data_1,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_1),
.ops = &clk_rcg2_ops,
},
};
@@ -2264,8 +2303,8 @@ static struct clk_branch gcc_pcie_0_aux_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_0_aux_clk",
- .parent_names = (const char *[]){
- "pcie_aux_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &pcie_aux_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2334,8 +2373,8 @@ static struct clk_branch gcc_pcie_phy_aux_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_phy_aux_clk",
- .parent_names = (const char *[]){
- "pcie_aux_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &pcie_aux_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2352,8 +2391,8 @@ static struct clk_branch gcc_pdm2_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pdm2_clk",
- .parent_names = (const char *[]){
- "pdm2_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &pdm2_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2422,8 +2461,8 @@ static struct clk_branch gcc_sdcc2_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_sdcc2_apps_clk",
- .parent_names = (const char *[]){
- "sdcc2_apps_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &sdcc2_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2453,8 +2492,8 @@ static struct clk_branch gcc_sdcc4_apps_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_sdcc4_apps_clk",
- .parent_names = (const char *[]){
- "sdcc4_apps_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &sdcc4_apps_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2497,8 +2536,8 @@ static struct clk_branch gcc_tsif_ref_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_tsif_ref_clk",
- .parent_names = (const char *[]){
- "tsif_ref_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &tsif_ref_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2528,8 +2567,8 @@ static struct clk_branch gcc_ufs_axi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_axi_clk",
- .parent_names = (const char *[]){
- "ufs_axi_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &ufs_axi_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2611,8 +2650,8 @@ static struct clk_branch gcc_ufs_unipro_core_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_unipro_core_clk",
- .parent_names = (const char *[]){
- "ufs_unipro_core_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &ufs_unipro_core_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2629,8 +2668,8 @@ static struct clk_branch gcc_usb30_master_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb30_master_clk",
- .parent_names = (const char *[]){
- "usb30_master_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &usb30_master_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2647,8 +2686,8 @@ static struct clk_branch gcc_usb30_mock_utmi_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb30_mock_utmi_clk",
- .parent_names = (const char *[]){
- "usb30_mock_utmi_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &usb30_mock_utmi_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2678,8 +2717,8 @@ static struct clk_branch gcc_usb3_phy_aux_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb3_phy_aux_clk",
- .parent_names = (const char *[]){
- "usb3_phy_aux_clk_src",
+ .parent_hws = (const struct clk_hw *[]) {
+ &usb3_phy_aux_clk_src.clkr.hw,
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
@@ -2721,7 +2760,9 @@ static struct clk_branch gcc_hdmi_clkref_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_hdmi_clkref_clk",
- .parent_names = (const char *[]){ "xo" },
+ .parent_data = (const struct clk_parent_data []) {
+ { .fw_name = "xo" },
+ },
.num_parents = 1,
.ops = &clk_branch2_ops,
},
@@ -2735,7 +2776,9 @@ static struct clk_branch gcc_ufs_clkref_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_ufs_clkref_clk",
- .parent_names = (const char *[]){ "xo" },
+ .parent_data = (const struct clk_parent_data []) {
+ { .fw_name = "xo" },
+ },
.num_parents = 1,
.ops = &clk_branch2_ops,
},
@@ -2749,7 +2792,9 @@ static struct clk_branch gcc_usb3_clkref_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb3_clkref_clk",
- .parent_names = (const char *[]){ "xo" },
+ .parent_data = (const struct clk_parent_data []) {
+ { .fw_name = "xo" },
+ },
.num_parents = 1,
.ops = &clk_branch2_ops,
},
@@ -2763,7 +2808,9 @@ static struct clk_branch gcc_pcie_clkref_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_pcie_clkref_clk",
- .parent_names = (const char *[]){ "xo" },
+ .parent_data = (const struct clk_parent_data []) {
+ { .fw_name = "xo" },
+ },
.num_parents = 1,
.ops = &clk_branch2_ops,
},
@@ -2777,7 +2824,9 @@ static struct clk_branch gcc_rx1_usb2_clkref_clk = {
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_rx1_usb2_clkref_clk",
- .parent_names = (const char *[]){ "xo" },
+ .parent_data = (const struct clk_parent_data []) {
+ { .fw_name = "xo" },
+ },
.num_parents = 1,
.ops = &clk_branch2_ops,
},
@@ -3115,10 +3164,6 @@ static const struct regmap_config gcc_msm8998_regmap_config = {
.fast_io = true,
};
-static struct clk_hw *gcc_msm8998_hws[] = {
- &xo.hw,
-};
-
static const struct qcom_cc_desc gcc_msm8998_desc = {
.config = &gcc_msm8998_regmap_config,
.clks = gcc_msm8998_clocks,
@@ -3127,8 +3172,6 @@ static const struct qcom_cc_desc gcc_msm8998_desc = {
.num_resets = ARRAY_SIZE(gcc_msm8998_resets),
.gdscs = gcc_msm8998_gdscs,
.num_gdscs = ARRAY_SIZE(gcc_msm8998_gdscs),
- .clk_hws = gcc_msm8998_hws,
- .num_clk_hws = ARRAY_SIZE(gcc_msm8998_hws),
};
static int gcc_msm8998_probe(struct platform_device *pdev)
diff --git a/drivers/clk/qcom/gcc-qcm2290.c b/drivers/clk/qcom/gcc-qcm2290.c
new file mode 100644
index 000000000000..b6fa7b8e8006
--- /dev/null
+++ b/drivers/clk/qcom/gcc-qcm2290.c
@@ -0,0 +1,3044 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,gcc-qcm2290.h>
+
+#include "clk-alpha-pll.h"
+#include "clk-branch.h"
+#include "clk-rcg.h"
+#include "clk-regmap-divider.h"
+#include "common.h"
+#include "gdsc.h"
+#include "reset.h"
+
+enum {
+ P_BI_TCXO,
+ P_GPLL0_OUT_AUX2,
+ P_GPLL0_OUT_EARLY,
+ P_GPLL10_OUT_MAIN,
+ P_GPLL11_OUT_AUX,
+ P_GPLL11_OUT_AUX2,
+ P_GPLL11_OUT_MAIN,
+ P_GPLL3_OUT_EARLY,
+ P_GPLL3_OUT_MAIN,
+ P_GPLL4_OUT_MAIN,
+ P_GPLL5_OUT_MAIN,
+ P_GPLL6_OUT_EARLY,
+ P_GPLL6_OUT_MAIN,
+ P_GPLL7_OUT_MAIN,
+ P_GPLL8_OUT_EARLY,
+ P_GPLL8_OUT_MAIN,
+ P_GPLL9_OUT_EARLY,
+ P_GPLL9_OUT_MAIN,
+ P_SLEEP_CLK,
+};
+
+static const struct pll_vco brammo_vco[] = {
+ { 500000000, 1250000000, 0 },
+};
+
+static const struct pll_vco default_vco[] = {
+ { 500000000, 1000000000, 2 },
+};
+
+static const struct pll_vco spark_vco[] = {
+ { 750000000, 1500000000, 1 },
+};
+
+static const u8 clk_alpha_pll_regs_offset[][PLL_OFF_MAX_REGS] = {
+ [CLK_ALPHA_PLL_TYPE_DEFAULT] = {
+ [PLL_OFF_L_VAL] = 0x04,
+ [PLL_OFF_ALPHA_VAL] = 0x08,
+ [PLL_OFF_ALPHA_VAL_U] = 0x0c,
+ [PLL_OFF_TEST_CTL] = 0x10,
+ [PLL_OFF_TEST_CTL_U] = 0x14,
+ [PLL_OFF_USER_CTL] = 0x18,
+ [PLL_OFF_USER_CTL_U] = 0x1C,
+ [PLL_OFF_CONFIG_CTL] = 0x20,
+ [PLL_OFF_STATUS] = 0x24,
+ },
+ [CLK_ALPHA_PLL_TYPE_BRAMMO] = {
+ [PLL_OFF_L_VAL] = 0x04,
+ [PLL_OFF_ALPHA_VAL] = 0x08,
+ [PLL_OFF_ALPHA_VAL_U] = 0x0c,
+ [PLL_OFF_TEST_CTL] = 0x10,
+ [PLL_OFF_TEST_CTL_U] = 0x14,
+ [PLL_OFF_USER_CTL] = 0x18,
+ [PLL_OFF_CONFIG_CTL] = 0x1C,
+ [PLL_OFF_STATUS] = 0x20,
+ },
+};
+
+static struct clk_alpha_pll gpll0 = {
+ .offset = 0x0,
+ .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x79000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll0",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gpll0_out_aux2[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpll0_out_aux2 = {
+ .offset = 0x0,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_gpll0_out_aux2,
+ .num_post_div = ARRAY_SIZE(post_div_table_gpll0_out_aux2),
+ .width = 4,
+ .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll0_out_aux2",
+ .parent_hws = (const struct clk_hw *[]){ &gpll0.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll1 = {
+ .offset = 0x1000,
+ .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x79000,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll1",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+/* 1152MHz configuration */
+static const struct alpha_pll_config gpll10_config = {
+ .l = 0x3c,
+ .alpha = 0x0,
+ .vco_val = 0x1 << 20,
+ .vco_mask = GENMASK(21, 20),
+ .main_output_mask = BIT(0),
+ .config_ctl_val = 0x4001055B,
+ .test_ctl_hi1_val = 0x1,
+};
+
+static struct clk_alpha_pll gpll10 = {
+ .offset = 0xa000,
+ .vco_table = spark_vco,
+ .num_vco = ARRAY_SIZE(spark_vco),
+ .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x79000,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll10",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+/* 532MHz configuration */
+static const struct alpha_pll_config gpll11_config = {
+ .l = 0x1B,
+ .alpha = 0x55555555,
+ .alpha_hi = 0xB5,
+ .alpha_en_mask = BIT(24),
+ .vco_val = 0x2 << 20,
+ .vco_mask = GENMASK(21, 20),
+ .main_output_mask = BIT(0),
+ .config_ctl_val = 0x4001055B,
+ .test_ctl_hi1_val = 0x1,
+};
+
+static struct clk_alpha_pll gpll11 = {
+ .offset = 0xb000,
+ .vco_table = default_vco,
+ .num_vco = ARRAY_SIZE(default_vco),
+ .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .flags = SUPPORTS_DYNAMIC_UPDATE,
+ .clkr = {
+ .enable_reg = 0x79000,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll11",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gpll3 = {
+ .offset = 0x3000,
+ .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x79000,
+ .enable_mask = BIT(3),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll3",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gpll3_out_main[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpll3_out_main = {
+ .offset = 0x3000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_gpll3_out_main,
+ .num_post_div = ARRAY_SIZE(post_div_table_gpll3_out_main),
+ .width = 4,
+ .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll3_out_main",
+ .parent_hws = (const struct clk_hw *[]){ &gpll3.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll4 = {
+ .offset = 0x4000,
+ .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x79000,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll4",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gpll5 = {
+ .offset = 0x5000,
+ .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x79000,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll5",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static struct clk_alpha_pll gpll6 = {
+ .offset = 0x6000,
+ .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x79000,
+ .enable_mask = BIT(6),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll6",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gpll6_out_main[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpll6_out_main = {
+ .offset = 0x6000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_gpll6_out_main,
+ .num_post_div = ARRAY_SIZE(post_div_table_gpll6_out_main),
+ .width = 4,
+ .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll6_out_main",
+ .parent_hws = (const struct clk_hw *[]){ &gpll6.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+ },
+};
+
+static struct clk_alpha_pll gpll7 = {
+ .offset = 0x7000,
+ .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr = {
+ .enable_reg = 0x79000,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll7",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+/* 533.2MHz configuration */
+static const struct alpha_pll_config gpll8_config = {
+ .l = 0x1B,
+ .alpha = 0x55555555,
+ .alpha_hi = 0xC5,
+ .alpha_en_mask = BIT(24),
+ .vco_val = 0x2 << 20,
+ .vco_mask = GENMASK(21, 20),
+ .main_output_mask = BIT(0),
+ .early_output_mask = BIT(3),
+ .post_div_val = 0x1 << 8,
+ .post_div_mask = GENMASK(11, 8),
+ .config_ctl_val = 0x4001055B,
+ .test_ctl_hi1_val = 0x1,
+};
+
+static struct clk_alpha_pll gpll8 = {
+ .offset = 0x8000,
+ .vco_table = default_vco,
+ .num_vco = ARRAY_SIZE(default_vco),
+ .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .flags = SUPPORTS_DYNAMIC_UPDATE,
+ .clkr = {
+ .enable_reg = 0x79000,
+ .enable_mask = BIT(8),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll8",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gpll8_out_main[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpll8_out_main = {
+ .offset = 0x8000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_gpll8_out_main,
+ .num_post_div = ARRAY_SIZE(post_div_table_gpll8_out_main),
+ .width = 4,
+ .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_DEFAULT],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll8_out_main",
+ .parent_hws = (const struct clk_hw *[]){ &gpll8.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+ },
+};
+
+/* 1152MHz configuration */
+static const struct alpha_pll_config gpll9_config = {
+ .l = 0x3C,
+ .alpha = 0x0,
+ .post_div_val = 0x1 << 8,
+ .post_div_mask = GENMASK(9, 8),
+ .main_output_mask = BIT(0),
+ .early_output_mask = BIT(3),
+ .config_ctl_val = 0x00004289,
+ .test_ctl_val = 0x08000000,
+};
+
+static struct clk_alpha_pll gpll9 = {
+ .offset = 0x9000,
+ .vco_table = brammo_vco,
+ .num_vco = ARRAY_SIZE(brammo_vco),
+ .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_BRAMMO],
+ .clkr = {
+ .enable_reg = 0x79000,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gpll9",
+ .parent_data = &(const struct clk_parent_data){
+ .fw_name = "bi_tcxo",
+ },
+ .num_parents = 1,
+ .ops = &clk_alpha_pll_ops,
+ },
+ },
+};
+
+static const struct clk_div_table post_div_table_gpll9_out_main[] = {
+ { 0x1, 2 },
+ { }
+};
+
+static struct clk_alpha_pll_postdiv gpll9_out_main = {
+ .offset = 0x9000,
+ .post_div_shift = 8,
+ .post_div_table = post_div_table_gpll9_out_main,
+ .num_post_div = ARRAY_SIZE(post_div_table_gpll9_out_main),
+ .width = 2,
+ .regs = clk_alpha_pll_regs_offset[CLK_ALPHA_PLL_TYPE_BRAMMO],
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gpll9_out_main",
+ .parent_hws = (const struct clk_hw *[]){ &gpll9.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_alpha_pll_postdiv_ro_ops,
+ },
+};
+
+static const struct parent_map gcc_parent_map_0[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_EARLY, 1 },
+ { P_GPLL0_OUT_AUX2, 2 },
+};
+
+static const struct clk_parent_data gcc_parents_0[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_out_aux2.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_1[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_EARLY, 1 },
+ { P_GPLL0_OUT_AUX2, 2 },
+ { P_GPLL6_OUT_MAIN, 4 },
+};
+
+static const struct clk_parent_data gcc_parents_1[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_out_aux2.clkr.hw },
+ { .hw = &gpll6_out_main.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_2[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_EARLY, 1 },
+ { P_GPLL0_OUT_AUX2, 2 },
+ { P_SLEEP_CLK, 5 },
+};
+
+static const struct clk_parent_data gcc_parents_2[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_out_aux2.clkr.hw },
+ { .fw_name = "sleep_clk" },
+};
+
+static const struct parent_map gcc_parent_map_3[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_EARLY, 1 },
+ { P_GPLL9_OUT_EARLY, 2 },
+ { P_GPLL10_OUT_MAIN, 3 },
+ { P_GPLL9_OUT_MAIN, 5 },
+ { P_GPLL3_OUT_MAIN, 6 },
+};
+
+static const struct clk_parent_data gcc_parents_3[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll9.clkr.hw },
+ { .hw = &gpll10.clkr.hw },
+ { .hw = &gpll9_out_main.clkr.hw },
+ { .hw = &gpll3_out_main.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_4[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_EARLY, 1 },
+ { P_GPLL0_OUT_AUX2, 2 },
+ { P_GPLL10_OUT_MAIN, 3 },
+ { P_GPLL4_OUT_MAIN, 5 },
+ { P_GPLL3_OUT_EARLY, 6 },
+};
+
+static const struct clk_parent_data gcc_parents_4[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_out_aux2.clkr.hw },
+ { .hw = &gpll10.clkr.hw },
+ { .hw = &gpll4.clkr.hw },
+ { .hw = &gpll3.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_5[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_EARLY, 1 },
+ { P_GPLL0_OUT_AUX2, 2 },
+ { P_GPLL4_OUT_MAIN, 5 },
+ { P_GPLL3_OUT_MAIN, 6 },
+};
+
+static const struct clk_parent_data gcc_parents_5[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_out_aux2.clkr.hw },
+ { .hw = &gpll4.clkr.hw },
+ { .hw = &gpll3_out_main.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_6[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_EARLY, 1 },
+ { P_GPLL8_OUT_EARLY, 2 },
+ { P_GPLL10_OUT_MAIN, 3 },
+ { P_GPLL8_OUT_MAIN, 4 },
+ { P_GPLL9_OUT_MAIN, 5 },
+ { P_GPLL3_OUT_EARLY, 6 },
+};
+
+static const struct clk_parent_data gcc_parents_6[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll8.clkr.hw },
+ { .hw = &gpll10.clkr.hw },
+ { .hw = &gpll8_out_main.clkr.hw },
+ { .hw = &gpll9_out_main.clkr.hw },
+ { .hw = &gpll3.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_7[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_EARLY, 1 },
+ { P_GPLL8_OUT_EARLY, 2 },
+ { P_GPLL10_OUT_MAIN, 3 },
+ { P_GPLL8_OUT_MAIN, 4 },
+ { P_GPLL9_OUT_MAIN, 5 },
+ { P_GPLL3_OUT_MAIN, 6 },
+};
+
+static const struct clk_parent_data gcc_parents_7[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll8.clkr.hw },
+ { .hw = &gpll10.clkr.hw },
+ { .hw = &gpll8_out_main.clkr.hw },
+ { .hw = &gpll9_out_main.clkr.hw },
+ { .hw = &gpll3_out_main.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_8[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_EARLY, 1 },
+ { P_GPLL8_OUT_EARLY, 2 },
+ { P_GPLL10_OUT_MAIN, 3 },
+ { P_GPLL6_OUT_MAIN, 4 },
+ { P_GPLL9_OUT_MAIN, 5 },
+ { P_GPLL3_OUT_EARLY, 6 },
+};
+
+static const struct clk_parent_data gcc_parents_8[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll8.clkr.hw },
+ { .hw = &gpll10.clkr.hw },
+ { .hw = &gpll6_out_main.clkr.hw },
+ { .hw = &gpll9_out_main.clkr.hw },
+ { .hw = &gpll3.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_9[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_EARLY, 1 },
+ { P_GPLL0_OUT_AUX2, 2 },
+ { P_GPLL10_OUT_MAIN, 3 },
+ { P_GPLL8_OUT_MAIN, 4 },
+ { P_GPLL9_OUT_MAIN, 5 },
+ { P_GPLL3_OUT_EARLY, 6 },
+};
+
+static const struct clk_parent_data gcc_parents_9[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_out_aux2.clkr.hw },
+ { .hw = &gpll10.clkr.hw },
+ { .hw = &gpll8_out_main.clkr.hw },
+ { .hw = &gpll9_out_main.clkr.hw },
+ { .hw = &gpll3.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_10[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_EARLY, 1 },
+ { P_GPLL8_OUT_EARLY, 2 },
+ { P_GPLL10_OUT_MAIN, 3 },
+ { P_GPLL6_OUT_EARLY, 5 },
+ { P_GPLL3_OUT_MAIN, 6 },
+};
+
+static const struct clk_parent_data gcc_parents_10[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll8.clkr.hw },
+ { .hw = &gpll10.clkr.hw },
+ { .hw = &gpll6.clkr.hw },
+ { .hw = &gpll3_out_main.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_12[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL0_OUT_EARLY, 1 },
+ { P_GPLL0_OUT_AUX2, 2 },
+ { P_GPLL7_OUT_MAIN, 3 },
+ { P_GPLL4_OUT_MAIN, 5 },
+};
+
+static const struct clk_parent_data gcc_parents_12[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpll0.clkr.hw },
+ { .hw = &gpll0_out_aux2.clkr.hw },
+ { .hw = &gpll7.clkr.hw },
+ { .hw = &gpll4.clkr.hw },
+};
+
+static const struct parent_map gcc_parent_map_13[] = {
+ { P_BI_TCXO, 0 },
+ { P_SLEEP_CLK, 5 },
+};
+
+static const struct clk_parent_data gcc_parents_13[] = {
+ { .fw_name = "bi_tcxo" },
+ { .fw_name = "sleep_clk" },
+};
+
+static const struct parent_map gcc_parent_map_14[] = {
+ { P_BI_TCXO, 0 },
+ { P_GPLL11_OUT_MAIN, 1 },
+ { P_GPLL11_OUT_AUX, 2 },
+ { P_GPLL11_OUT_AUX2, 3 },
+};
+
+static const struct clk_parent_data gcc_parents_14[] = {
+ { .fw_name = "bi_tcxo" },
+ { .hw = &gpll11.clkr.hw },
+ { .hw = &gpll11.clkr.hw },
+ { .hw = &gpll11.clkr.hw },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_prim_mock_utmi_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_prim_mock_utmi_clk_src = {
+ .cmd_rcgr = 0x1a034,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_prim_mock_utmi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_mock_utmi_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_regmap_div gcc_usb30_prim_mock_utmi_postdiv = {
+ .reg = 0x1a04c,
+ .shift = 0,
+ .width = 2,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gcc_usb30_prim_mock_utmi_postdiv",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_usb30_prim_mock_utmi_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_regmap_div_ro_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_axi_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(150000000, P_GPLL0_OUT_AUX2, 2, 0, 0),
+ F(200000000, P_GPLL0_OUT_AUX2, 1.5, 0, 0),
+ F(300000000, P_GPLL0_OUT_AUX2, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_camss_axi_clk_src = {
+ .cmd_rcgr = 0x5802c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_camss_axi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_axi_clk_src",
+ .parent_data = gcc_parents_4,
+ .num_parents = ARRAY_SIZE(gcc_parents_4),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_cci_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(37500000, P_GPLL0_OUT_AUX2, 8, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_camss_cci_clk_src = {
+ .cmd_rcgr = 0x56000,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_9,
+ .freq_tbl = ftbl_gcc_camss_cci_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_cci_clk_src",
+ .parent_data = gcc_parents_9,
+ .num_parents = ARRAY_SIZE(gcc_parents_9),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_csi0phytimer_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(100000000, P_GPLL0_OUT_AUX2, 3, 0, 0),
+ F(200000000, P_GPLL0_OUT_AUX2, 1.5, 0, 0),
+ F(268800000, P_GPLL4_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_camss_csi0phytimer_clk_src = {
+ .cmd_rcgr = 0x45000,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_camss_csi0phytimer_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi0phytimer_clk_src",
+ .parent_data = gcc_parents_5,
+ .num_parents = ARRAY_SIZE(gcc_parents_5),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_camss_csi1phytimer_clk_src = {
+ .cmd_rcgr = 0x4501c,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_5,
+ .freq_tbl = ftbl_gcc_camss_csi0phytimer_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi1phytimer_clk_src",
+ .parent_data = gcc_parents_5,
+ .num_parents = ARRAY_SIZE(gcc_parents_5),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_mclk0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(24000000, P_GPLL9_OUT_MAIN, 1, 1, 24),
+ F(64000000, P_GPLL9_OUT_EARLY, 9, 1, 2),
+ { }
+};
+
+static struct clk_rcg2 gcc_camss_mclk0_clk_src = {
+ .cmd_rcgr = 0x51000,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_camss_mclk0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_mclk0_clk_src",
+ .parent_data = gcc_parents_3,
+ .num_parents = ARRAY_SIZE(gcc_parents_3),
+ .flags = CLK_OPS_PARENT_ENABLE,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_camss_mclk1_clk_src = {
+ .cmd_rcgr = 0x5101c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_camss_mclk0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_mclk1_clk_src",
+ .parent_data = gcc_parents_3,
+ .num_parents = ARRAY_SIZE(gcc_parents_3),
+ .flags = CLK_OPS_PARENT_ENABLE,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_camss_mclk2_clk_src = {
+ .cmd_rcgr = 0x51038,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_camss_mclk0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_mclk2_clk_src",
+ .parent_data = gcc_parents_3,
+ .num_parents = ARRAY_SIZE(gcc_parents_3),
+ .flags = CLK_OPS_PARENT_ENABLE,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_camss_mclk3_clk_src = {
+ .cmd_rcgr = 0x51054,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_3,
+ .freq_tbl = ftbl_gcc_camss_mclk0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_mclk3_clk_src",
+ .parent_data = gcc_parents_3,
+ .num_parents = ARRAY_SIZE(gcc_parents_3),
+ .flags = CLK_OPS_PARENT_ENABLE,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_ope_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(171428571, P_GPLL0_OUT_EARLY, 3.5, 0, 0),
+ F(240000000, P_GPLL0_OUT_EARLY, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_camss_ope_ahb_clk_src = {
+ .cmd_rcgr = 0x55024,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_6,
+ .freq_tbl = ftbl_gcc_camss_ope_ahb_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_ope_ahb_clk_src",
+ .parent_data = gcc_parents_6,
+ .num_parents = ARRAY_SIZE(gcc_parents_6),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_ope_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(200000000, P_GPLL8_OUT_MAIN, 2, 0, 0),
+ F(266600000, P_GPLL8_OUT_MAIN, 1, 0, 0),
+ F(465000000, P_GPLL8_OUT_MAIN, 1, 0, 0),
+ F(580000000, P_GPLL8_OUT_EARLY, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_camss_ope_clk_src = {
+ .cmd_rcgr = 0x55004,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_6,
+ .freq_tbl = ftbl_gcc_camss_ope_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_ope_clk_src",
+ .parent_data = gcc_parents_6,
+ .num_parents = ARRAY_SIZE(gcc_parents_6),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_tfe_0_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(128000000, P_GPLL10_OUT_MAIN, 9, 0, 0),
+ F(135529412, P_GPLL10_OUT_MAIN, 8.5, 0, 0),
+ F(144000000, P_GPLL10_OUT_MAIN, 8, 0, 0),
+ F(153600000, P_GPLL10_OUT_MAIN, 7.5, 0, 0),
+ F(164571429, P_GPLL10_OUT_MAIN, 7, 0, 0),
+ F(177230769, P_GPLL10_OUT_MAIN, 6.5, 0, 0),
+ F(192000000, P_GPLL10_OUT_MAIN, 6, 0, 0),
+ F(209454545, P_GPLL10_OUT_MAIN, 5.5, 0, 0),
+ F(230400000, P_GPLL10_OUT_MAIN, 5, 0, 0),
+ F(256000000, P_GPLL10_OUT_MAIN, 4.5, 0, 0),
+ F(288000000, P_GPLL10_OUT_MAIN, 4, 0, 0),
+ F(329142857, P_GPLL10_OUT_MAIN, 3.5, 0, 0),
+ F(384000000, P_GPLL10_OUT_MAIN, 3, 0, 0),
+ F(460800000, P_GPLL10_OUT_MAIN, 2.5, 0, 0),
+ F(576000000, P_GPLL10_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_camss_tfe_0_clk_src = {
+ .cmd_rcgr = 0x52004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_7,
+ .freq_tbl = ftbl_gcc_camss_tfe_0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_0_clk_src",
+ .parent_data = gcc_parents_7,
+ .num_parents = ARRAY_SIZE(gcc_parents_7),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_tfe_0_csid_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(120000000, P_GPLL0_OUT_EARLY, 5, 0, 0),
+ F(192000000, P_GPLL6_OUT_MAIN, 2, 0, 0),
+ F(240000000, P_GPLL0_OUT_EARLY, 2.5, 0, 0),
+ F(384000000, P_GPLL6_OUT_MAIN, 1, 0, 0),
+ F(426400000, P_GPLL3_OUT_EARLY, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_camss_tfe_0_csid_clk_src = {
+ .cmd_rcgr = 0x52094,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_gcc_camss_tfe_0_csid_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_0_csid_clk_src",
+ .parent_data = gcc_parents_8,
+ .num_parents = ARRAY_SIZE(gcc_parents_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_camss_tfe_1_clk_src = {
+ .cmd_rcgr = 0x52024,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_7,
+ .freq_tbl = ftbl_gcc_camss_tfe_0_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_1_clk_src",
+ .parent_data = gcc_parents_7,
+ .num_parents = ARRAY_SIZE(gcc_parents_7),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_camss_tfe_1_csid_clk_src = {
+ .cmd_rcgr = 0x520b4,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_8,
+ .freq_tbl = ftbl_gcc_camss_tfe_0_csid_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_1_csid_clk_src",
+ .parent_data = gcc_parents_8,
+ .num_parents = ARRAY_SIZE(gcc_parents_8),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_tfe_cphy_rx_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(240000000, P_GPLL0_OUT_EARLY, 2.5, 0, 0),
+ F(341333333, P_GPLL6_OUT_EARLY, 1, 4, 9),
+ F(384000000, P_GPLL6_OUT_EARLY, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_camss_tfe_cphy_rx_clk_src = {
+ .cmd_rcgr = 0x52064,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_10,
+ .freq_tbl = ftbl_gcc_camss_tfe_cphy_rx_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_cphy_rx_clk_src",
+ .parent_data = gcc_parents_10,
+ .num_parents = ARRAY_SIZE(gcc_parents_10),
+ .flags = CLK_OPS_PARENT_ENABLE,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_camss_top_ahb_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(40000000, P_GPLL0_OUT_AUX2, 7.5, 0, 0),
+ F(80000000, P_GPLL0_OUT_EARLY, 7.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_camss_top_ahb_clk_src = {
+ .cmd_rcgr = 0x58010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_4,
+ .freq_tbl = ftbl_gcc_camss_top_ahb_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_top_ahb_clk_src",
+ .parent_data = gcc_parents_4,
+ .num_parents = ARRAY_SIZE(gcc_parents_4),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = {
+ F(25000000, P_GPLL0_OUT_AUX2, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_AUX2, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_AUX2, 3, 0, 0),
+ F(200000000, P_GPLL0_OUT_AUX2, 1.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_gp1_clk_src = {
+ .cmd_rcgr = 0x4d004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk_src",
+ .parent_data = gcc_parents_2,
+ .num_parents = ARRAY_SIZE(gcc_parents_2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp2_clk_src = {
+ .cmd_rcgr = 0x4e004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk_src",
+ .parent_data = gcc_parents_2,
+ .num_parents = ARRAY_SIZE(gcc_parents_2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_gp3_clk_src = {
+ .cmd_rcgr = 0x4f004,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_2,
+ .freq_tbl = ftbl_gcc_gp1_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk_src",
+ .parent_data = gcc_parents_2,
+ .num_parents = ARRAY_SIZE(gcc_parents_2),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_pdm2_clk_src[] = {
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(60000000, P_GPLL0_OUT_AUX2, 5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_pdm2_clk_src = {
+ .cmd_rcgr = 0x20010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_pdm2_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm2_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_qupv3_wrap0_s0_clk_src[] = {
+ F(7372800, P_GPLL0_OUT_AUX2, 1, 384, 15625),
+ F(14745600, P_GPLL0_OUT_AUX2, 1, 768, 15625),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(29491200, P_GPLL0_OUT_AUX2, 1, 1536, 15625),
+ F(32000000, P_GPLL0_OUT_AUX2, 1, 8, 75),
+ F(48000000, P_GPLL0_OUT_AUX2, 1, 4, 25),
+ F(64000000, P_GPLL0_OUT_AUX2, 1, 16, 75),
+ F(75000000, P_GPLL0_OUT_AUX2, 4, 0, 0),
+ F(80000000, P_GPLL0_OUT_AUX2, 1, 4, 15),
+ F(96000000, P_GPLL0_OUT_AUX2, 1, 8, 25),
+ F(100000000, P_GPLL0_OUT_AUX2, 3, 0, 0),
+ F(102400000, P_GPLL0_OUT_AUX2, 1, 128, 375),
+ F(112000000, P_GPLL0_OUT_AUX2, 1, 28, 75),
+ F(117964800, P_GPLL0_OUT_AUX2, 1, 6144, 15625),
+ F(120000000, P_GPLL0_OUT_AUX2, 2.5, 0, 0),
+ F(128000000, P_GPLL6_OUT_MAIN, 3, 0, 0),
+ { }
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s0_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s0_clk_src",
+ .parent_data = gcc_parents_1,
+ .num_parents = ARRAY_SIZE(gcc_parents_1),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s0_clk_src = {
+ .cmd_rcgr = 0x1f148,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s0_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s1_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s1_clk_src",
+ .parent_data = gcc_parents_1,
+ .num_parents = ARRAY_SIZE(gcc_parents_1),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s1_clk_src = {
+ .cmd_rcgr = 0x1f278,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s1_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s2_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s2_clk_src",
+ .parent_data = gcc_parents_1,
+ .num_parents = ARRAY_SIZE(gcc_parents_1),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s2_clk_src = {
+ .cmd_rcgr = 0x1f3a8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s2_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s3_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s3_clk_src",
+ .parent_data = gcc_parents_1,
+ .num_parents = ARRAY_SIZE(gcc_parents_1),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s3_clk_src = {
+ .cmd_rcgr = 0x1f4d8,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s3_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s4_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s4_clk_src",
+ .parent_data = gcc_parents_1,
+ .num_parents = ARRAY_SIZE(gcc_parents_1),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s4_clk_src = {
+ .cmd_rcgr = 0x1f608,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s4_clk_src_init,
+};
+
+static struct clk_init_data gcc_qupv3_wrap0_s5_clk_src_init = {
+ .name = "gcc_qupv3_wrap0_s5_clk_src",
+ .parent_data = gcc_parents_1,
+ .num_parents = ARRAY_SIZE(gcc_parents_1),
+ .ops = &clk_rcg2_ops,
+};
+
+static struct clk_rcg2 gcc_qupv3_wrap0_s5_clk_src = {
+ .cmd_rcgr = 0x1f738,
+ .mnd_width = 16,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_qupv3_wrap0_s0_clk_src,
+ .clkr.hw.init = &gcc_qupv3_wrap0_s5_clk_src_init,
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc1_apps_clk_src[] = {
+ F(144000, P_BI_TCXO, 16, 3, 25),
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(20000000, P_GPLL0_OUT_AUX2, 5, 1, 3),
+ F(25000000, P_GPLL0_OUT_AUX2, 6, 1, 2),
+ F(50000000, P_GPLL0_OUT_AUX2, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_AUX2, 3, 0, 0),
+ F(192000000, P_GPLL6_OUT_MAIN, 2, 0, 0),
+ F(384000000, P_GPLL6_OUT_MAIN, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc1_apps_clk_src = {
+ .cmd_rcgr = 0x38028,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_1,
+ .freq_tbl = ftbl_gcc_sdcc1_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_apps_clk_src",
+ .parent_data = gcc_parents_1,
+ .num_parents = ARRAY_SIZE(gcc_parents_1),
+ .ops = &clk_rcg2_floor_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc1_ice_core_clk_src[] = {
+ F(75000000, P_GPLL0_OUT_AUX2, 4, 0, 0),
+ F(100000000, P_GPLL0_OUT_AUX2, 3, 0, 0),
+ F(150000000, P_GPLL0_OUT_AUX2, 2, 0, 0),
+ F(200000000, P_GPLL0_OUT_EARLY, 3, 0, 0),
+ F(300000000, P_GPLL0_OUT_AUX2, 1, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc1_ice_core_clk_src = {
+ .cmd_rcgr = 0x38010,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_sdcc1_ice_core_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_ice_core_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_sdcc2_apps_clk_src[] = {
+ F(400000, P_BI_TCXO, 12, 1, 4),
+ F(19200000, P_BI_TCXO, 1, 0, 0),
+ F(25000000, P_GPLL0_OUT_AUX2, 12, 0, 0),
+ F(50000000, P_GPLL0_OUT_AUX2, 6, 0, 0),
+ F(100000000, P_GPLL0_OUT_AUX2, 3, 0, 0),
+ F(202000000, P_GPLL7_OUT_MAIN, 4, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_sdcc2_apps_clk_src = {
+ .cmd_rcgr = 0x1e00c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_12,
+ .freq_tbl = ftbl_gcc_sdcc2_apps_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_apps_clk_src",
+ .parent_data = gcc_parents_12,
+ .num_parents = ARRAY_SIZE(gcc_parents_12),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_usb30_prim_master_clk_src[] = {
+ F(66666667, P_GPLL0_OUT_AUX2, 4.5, 0, 0),
+ F(133333333, P_GPLL0_OUT_EARLY, 4.5, 0, 0),
+ F(200000000, P_GPLL0_OUT_EARLY, 3, 0, 0),
+ F(240000000, P_GPLL0_OUT_EARLY, 2.5, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_usb30_prim_master_clk_src = {
+ .cmd_rcgr = 0x1a01c,
+ .mnd_width = 8,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_0,
+ .freq_tbl = ftbl_gcc_usb30_prim_master_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_master_clk_src",
+ .parent_data = gcc_parents_0,
+ .num_parents = ARRAY_SIZE(gcc_parents_0),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_rcg2 gcc_usb3_prim_phy_aux_clk_src = {
+ .cmd_rcgr = 0x1a060,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_13,
+ .freq_tbl = ftbl_gcc_usb30_prim_mock_utmi_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_aux_clk_src",
+ .parent_data = gcc_parents_13,
+ .num_parents = ARRAY_SIZE(gcc_parents_13),
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static const struct freq_tbl ftbl_gcc_video_venus_clk_src[] = {
+ F(133333333, P_GPLL11_OUT_MAIN, 4.5, 0, 0),
+ F(240000000, P_GPLL11_OUT_MAIN, 2.5, 0, 0),
+ F(300000000, P_GPLL11_OUT_MAIN, 2, 0, 0),
+ F(384000000, P_GPLL11_OUT_MAIN, 2, 0, 0),
+ { }
+};
+
+static struct clk_rcg2 gcc_video_venus_clk_src = {
+ .cmd_rcgr = 0x58060,
+ .mnd_width = 0,
+ .hid_width = 5,
+ .parent_map = gcc_parent_map_14,
+ .freq_tbl = ftbl_gcc_video_venus_clk_src,
+ .clkr.hw.init = &(struct clk_init_data){
+ .name = "gcc_video_venus_clk_src",
+ .parent_data = gcc_parents_14,
+ .num_parents = ARRAY_SIZE(gcc_parents_14),
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_rcg2_ops,
+ },
+};
+
+static struct clk_branch gcc_ahb2phy_csi_clk = {
+ .halt_reg = 0x1d004,
+ .halt_check = BRANCH_HALT_DELAY,
+ .hwcg_reg = 0x1d004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x1d004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ahb2phy_csi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_ahb2phy_usb_clk = {
+ .halt_reg = 0x1d008,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x1d008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x1d008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_ahb2phy_usb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_bimc_gpu_axi_clk = {
+ .halt_reg = 0x71154,
+ .halt_check = BRANCH_HALT_DELAY,
+ .hwcg_reg = 0x71154,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x71154,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_bimc_gpu_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_boot_rom_ahb_clk = {
+ .halt_reg = 0x23004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x23004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x79004,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_boot_rom_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cam_throttle_nrt_clk = {
+ .halt_reg = 0x17070,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x17070,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x79004,
+ .enable_mask = BIT(27),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cam_throttle_nrt_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cam_throttle_rt_clk = {
+ .halt_reg = 0x1706c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1706c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x79004,
+ .enable_mask = BIT(26),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cam_throttle_rt_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_ahb_clk = {
+ .halt_reg = 0x17008,
+ .halt_check = BRANCH_HALT_DELAY,
+ .hwcg_reg = 0x17008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x17008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camera_ahb_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camera_xo_clk = {
+ .halt_reg = 0x17028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x17028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camera_xo_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_axi_clk = {
+ .halt_reg = 0x58044,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x58044,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_axi_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_camss_axi_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_camnoc_atb_clk = {
+ .halt_reg = 0x5804c,
+ .halt_check = BRANCH_HALT_DELAY,
+ .hwcg_reg = 0x5804c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x5804c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_camnoc_atb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_camnoc_nts_xo_clk = {
+ .halt_reg = 0x58050,
+ .halt_check = BRANCH_HALT_DELAY,
+ .hwcg_reg = 0x58050,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x58050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_camnoc_nts_xo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_cci_0_clk = {
+ .halt_reg = 0x56018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x56018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_cci_0_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_camss_cci_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_cphy_0_clk = {
+ .halt_reg = 0x52088,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x52088,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_cphy_0_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_camss_tfe_cphy_rx_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_cphy_1_clk = {
+ .halt_reg = 0x5208c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5208c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_cphy_1_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_camss_tfe_cphy_rx_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi0phytimer_clk = {
+ .halt_reg = 0x45018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x45018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi0phytimer_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_camss_csi0phytimer_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_csi1phytimer_clk = {
+ .halt_reg = 0x45034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x45034,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_csi1phytimer_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_camss_csi1phytimer_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_mclk0_clk = {
+ .halt_reg = 0x51018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x51018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_mclk0_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_camss_mclk0_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_mclk1_clk = {
+ .halt_reg = 0x51034,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x51034,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_mclk1_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_camss_mclk1_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_mclk2_clk = {
+ .halt_reg = 0x51050,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x51050,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_mclk2_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_camss_mclk2_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_mclk3_clk = {
+ .halt_reg = 0x5106c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5106c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_mclk3_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_camss_mclk3_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_nrt_axi_clk = {
+ .halt_reg = 0x58054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x58054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_nrt_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_ope_ahb_clk = {
+ .halt_reg = 0x5503c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5503c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_ope_ahb_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_camss_ope_ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_ope_clk = {
+ .halt_reg = 0x5501c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5501c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_ope_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_camss_ope_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_rt_axi_clk = {
+ .halt_reg = 0x5805c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5805c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_rt_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_tfe_0_clk = {
+ .halt_reg = 0x5201c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5201c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_0_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_camss_tfe_0_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_tfe_0_cphy_rx_clk = {
+ .halt_reg = 0x5207c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5207c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_0_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_camss_tfe_cphy_rx_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_tfe_0_csid_clk = {
+ .halt_reg = 0x520ac,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x520ac,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_0_csid_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_camss_tfe_0_csid_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_tfe_1_clk = {
+ .halt_reg = 0x5203c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5203c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_1_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_camss_tfe_1_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_tfe_1_cphy_rx_clk = {
+ .halt_reg = 0x52080,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x52080,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_1_cphy_rx_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_camss_tfe_cphy_rx_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_tfe_1_csid_clk = {
+ .halt_reg = 0x520cc,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x520cc,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_tfe_1_csid_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_camss_tfe_1_csid_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_camss_top_ahb_clk = {
+ .halt_reg = 0x58028,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x58028,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_camss_top_ahb_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_camss_top_ahb_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_cfg_noc_usb3_prim_axi_clk = {
+ .halt_reg = 0x1a084,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x1a084,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x1a084,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_cfg_noc_usb3_prim_axi_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_usb30_prim_master_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_ahb_clk = {
+ .halt_reg = 0x1700c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x1700c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x1700c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_ahb_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_regmap_div gcc_disp_gpll0_clk_src = {
+ .reg = 0x17058,
+ .shift = 0,
+ .width = 2,
+ .clkr.hw.init = &(struct clk_init_data) {
+ .name = "gcc_disp_gpll0_clk_src",
+ .parent_hws = (const struct clk_hw *[]){ &gpll0.clkr.hw },
+ .num_parents = 1,
+ .ops = &clk_regmap_div_ops,
+ },
+};
+
+static struct clk_branch gcc_disp_gpll0_div_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x79004,
+ .enable_mask = BIT(20),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_gpll0_div_clk_src",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_disp_gpll0_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_hf_axi_clk = {
+ .halt_reg = 0x17020,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x17020,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x17020,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_hf_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_throttle_core_clk = {
+ .halt_reg = 0x17064,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x17064,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(5),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_throttle_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_disp_xo_clk = {
+ .halt_reg = 0x1702c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1702c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_disp_xo_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp1_clk = {
+ .halt_reg = 0x4d000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4d000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp1_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_gp1_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp2_clk = {
+ .halt_reg = 0x4e000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4e000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp2_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_gp2_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gp3_clk = {
+ .halt_reg = 0x4f000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x4f000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gp3_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_gp3_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_cfg_ahb_clk = {
+ .halt_reg = 0x36004,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x36004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x36004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_cfg_ahb_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x79004,
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_gpll0_clk_src",
+ .parent_hws = (const struct clk_hw *[])
+ { &gpll0.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_gpll0_div_clk_src = {
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x79004,
+ .enable_mask = BIT(16),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_gpll0_div_clk_src",
+ .parent_hws = (const struct clk_hw *[])
+ { &gpll0_out_aux2.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_iref_clk = {
+ .halt_reg = 0x36100,
+ .halt_check = BRANCH_HALT_DELAY,
+ .clkr = {
+ .enable_reg = 0x36100,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_iref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_memnoc_gfx_clk = {
+ .halt_reg = 0x3600c,
+ .halt_check = BRANCH_VOTED,
+ .hwcg_reg = 0x3600c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3600c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_memnoc_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_snoc_dvm_gfx_clk = {
+ .halt_reg = 0x36018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x36018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_snoc_dvm_gfx_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_gpu_throttle_core_clk = {
+ .halt_reg = 0x36048,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x36048,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x79004,
+ .enable_mask = BIT(31),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_gpu_throttle_core_clk",
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm2_clk = {
+ .halt_reg = 0x2000c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2000c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm2_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_pdm2_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_ahb_clk = {
+ .halt_reg = 0x20004,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x20004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x20004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pdm_xo4_clk = {
+ .halt_reg = 0x20008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x20008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pdm_xo4_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_pwm0_xo512_clk = {
+ .halt_reg = 0x2002c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x2002c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_pwm0_xo512_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_nrt_ahb_clk = {
+ .halt_reg = 0x17014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x17014,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_camera_nrt_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_camera_rt_ahb_clk = {
+ .halt_reg = 0x17060,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x17060,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(2),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_camera_rt_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_disp_ahb_clk = {
+ .halt_reg = 0x17018,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x17018,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(1),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_disp_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_gpu_cfg_ahb_clk = {
+ .halt_reg = 0x36040,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x36040,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(4),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_gpu_cfg_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qmip_video_vcodec_ahb_clk = {
+ .halt_reg = 0x17010,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x17010,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x79004,
+ .enable_mask = BIT(25),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qmip_video_vcodec_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_2x_clk = {
+ .halt_reg = 0x1f014,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(9),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_core_2x_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_core_clk = {
+ .halt_reg = 0x1f00c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(8),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s0_clk = {
+ .halt_reg = 0x1f144,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(10),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s0_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_qupv3_wrap0_s0_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s1_clk = {
+ .halt_reg = 0x1f274,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(11),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s1_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_qupv3_wrap0_s1_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s2_clk = {
+ .halt_reg = 0x1f3a4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(12),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s2_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_qupv3_wrap0_s2_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s3_clk = {
+ .halt_reg = 0x1f4d4,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(13),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s3_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_qupv3_wrap0_s3_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s4_clk = {
+ .halt_reg = 0x1f604,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(14),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s4_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_qupv3_wrap0_s4_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap0_s5_clk = {
+ .halt_reg = 0x1f734,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(15),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap0_s5_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_qupv3_wrap0_s5_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_m_ahb_clk = {
+ .halt_reg = 0x1f004,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1f004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(6),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_0_m_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_qupv3_wrap_0_s_ahb_clk = {
+ .halt_reg = 0x1f008,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x1f008,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x7900c,
+ .enable_mask = BIT(7),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_qupv3_wrap_0_s_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ahb_clk = {
+ .halt_reg = 0x38008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x38008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_apps_clk = {
+ .halt_reg = 0x38004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x38004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_apps_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_sdcc1_apps_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc1_ice_core_clk = {
+ .halt_reg = 0x3800c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x3800c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x3800c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc1_ice_core_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_sdcc1_ice_core_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_ahb_clk = {
+ .halt_reg = 0x1e008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1e008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sdcc2_apps_clk = {
+ .halt_reg = 0x1e004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1e004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sdcc2_apps_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_sdcc2_apps_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sys_noc_cpuss_ahb_clk = {
+ .halt_reg = 0x2b06c,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x2b06c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x79004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sys_noc_cpuss_ahb_clk",
+ .flags = CLK_IS_CRITICAL,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_sys_noc_usb3_prim_axi_clk = {
+ .halt_reg = 0x1a080,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x1a080,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x1a080,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_sys_noc_usb3_prim_axi_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_usb30_prim_master_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_master_clk = {
+ .halt_reg = 0x1a010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1a010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_master_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_usb30_prim_master_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_mock_utmi_clk = {
+ .halt_reg = 0x1a018,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1a018,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_mock_utmi_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_usb30_prim_mock_utmi_postdiv.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb30_prim_sleep_clk = {
+ .halt_reg = 0x1a014,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1a014,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb30_prim_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_clkref_clk = {
+ .halt_reg = 0x9f000,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x9f000,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_clkref_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_com_aux_clk = {
+ .halt_reg = 0x1a054,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1a054,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_com_aux_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_usb3_prim_phy_aux_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_usb3_prim_phy_pipe_clk = {
+ .halt_reg = 0x1a058,
+ .halt_check = BRANCH_HALT_SKIP,
+ .hwcg_reg = 0x1a058,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x1a058,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_usb3_prim_phy_pipe_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_vcodec0_axi_clk = {
+ .halt_reg = 0x6e008,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6e008,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_vcodec0_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_venus_ahb_clk = {
+ .halt_reg = 0x6e010,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6e010,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_venus_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_venus_ctl_axi_clk = {
+ .halt_reg = 0x6e004,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x6e004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_venus_ctl_axi_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_ahb_clk = {
+ .halt_reg = 0x17004,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x17004,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x17004,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_ahb_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_axi0_clk = {
+ .halt_reg = 0x1701c,
+ .halt_check = BRANCH_HALT,
+ .hwcg_reg = 0x1701c,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x1701c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_axi0_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_throttle_core_clk = {
+ .halt_reg = 0x17068,
+ .halt_check = BRANCH_HALT_VOTED,
+ .hwcg_reg = 0x17068,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x79004,
+ .enable_mask = BIT(28),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_throttle_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_vcodec0_sys_clk = {
+ .halt_reg = 0x580a4,
+ .halt_check = BRANCH_HALT_DELAY,
+ .hwcg_reg = 0x580a4,
+ .hwcg_bit = 1,
+ .clkr = {
+ .enable_reg = 0x580a4,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_vcodec0_sys_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_video_venus_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_venus_ctl_clk = {
+ .halt_reg = 0x5808c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x5808c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_venus_ctl_clk",
+ .parent_hws = (const struct clk_hw *[])
+ { &gcc_video_venus_clk_src.clkr.hw },
+ .num_parents = 1,
+ .flags = CLK_SET_RATE_PARENT,
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch gcc_video_xo_clk = {
+ .halt_reg = 0x17024,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x17024,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "gcc_video_xo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct gdsc gcc_camss_top_gdsc = {
+ .gdscr = 0x58004,
+ .pd = {
+ .name = "gcc_camss_top",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc gcc_usb30_prim_gdsc = {
+ .gdscr = 0x1a004,
+ .pd = {
+ .name = "gcc_usb30_prim",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc gcc_vcodec0_gdsc = {
+ .gdscr = 0x58098,
+ .pd = {
+ .name = "gcc_vcodec0",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc gcc_venus_gdsc = {
+ .gdscr = 0x5807c,
+ .pd = {
+ .name = "gcc_venus",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+};
+
+static struct gdsc hlos1_vote_turing_mmu_tbu1_gdsc = {
+ .gdscr = 0x7d060,
+ .pd = {
+ .name = "hlos1_vote_turing_mmu_tbu1",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_turing_mmu_tbu0_gdsc = {
+ .gdscr = 0x7d07c,
+ .pd = {
+ .name = "hlos1_vote_turing_mmu_tbu0",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_mm_snoc_mmu_tbu_rt_gdsc = {
+ .gdscr = 0x7d074,
+ .pd = {
+ .name = "hlos1_vote_mm_snoc_mmu_tbu_rt",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct gdsc hlos1_vote_mm_snoc_mmu_tbu_nrt_gdsc = {
+ .gdscr = 0x7d078,
+ .pd = {
+ .name = "hlos1_vote_mm_snoc_mmu_tbu_nrt",
+ },
+ .pwrsts = PWRSTS_OFF_ON,
+ .flags = VOTABLE,
+};
+
+static struct clk_regmap *gcc_qcm2290_clocks[] = {
+ [GCC_AHB2PHY_CSI_CLK] = &gcc_ahb2phy_csi_clk.clkr,
+ [GCC_AHB2PHY_USB_CLK] = &gcc_ahb2phy_usb_clk.clkr,
+ [GCC_BIMC_GPU_AXI_CLK] = &gcc_bimc_gpu_axi_clk.clkr,
+ [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr,
+ [GCC_CAM_THROTTLE_NRT_CLK] = &gcc_cam_throttle_nrt_clk.clkr,
+ [GCC_CAM_THROTTLE_RT_CLK] = &gcc_cam_throttle_rt_clk.clkr,
+ [GCC_CAMERA_AHB_CLK] = &gcc_camera_ahb_clk.clkr,
+ [GCC_CAMERA_XO_CLK] = &gcc_camera_xo_clk.clkr,
+ [GCC_CAMSS_AXI_CLK] = &gcc_camss_axi_clk.clkr,
+ [GCC_CAMSS_AXI_CLK_SRC] = &gcc_camss_axi_clk_src.clkr,
+ [GCC_CAMSS_CAMNOC_ATB_CLK] = &gcc_camss_camnoc_atb_clk.clkr,
+ [GCC_CAMSS_CAMNOC_NTS_XO_CLK] = &gcc_camss_camnoc_nts_xo_clk.clkr,
+ [GCC_CAMSS_CCI_0_CLK] = &gcc_camss_cci_0_clk.clkr,
+ [GCC_CAMSS_CCI_CLK_SRC] = &gcc_camss_cci_clk_src.clkr,
+ [GCC_CAMSS_CPHY_0_CLK] = &gcc_camss_cphy_0_clk.clkr,
+ [GCC_CAMSS_CPHY_1_CLK] = &gcc_camss_cphy_1_clk.clkr,
+ [GCC_CAMSS_CSI0PHYTIMER_CLK] = &gcc_camss_csi0phytimer_clk.clkr,
+ [GCC_CAMSS_CSI0PHYTIMER_CLK_SRC] = &gcc_camss_csi0phytimer_clk_src.clkr,
+ [GCC_CAMSS_CSI1PHYTIMER_CLK] = &gcc_camss_csi1phytimer_clk.clkr,
+ [GCC_CAMSS_CSI1PHYTIMER_CLK_SRC] = &gcc_camss_csi1phytimer_clk_src.clkr,
+ [GCC_CAMSS_MCLK0_CLK] = &gcc_camss_mclk0_clk.clkr,
+ [GCC_CAMSS_MCLK0_CLK_SRC] = &gcc_camss_mclk0_clk_src.clkr,
+ [GCC_CAMSS_MCLK1_CLK] = &gcc_camss_mclk1_clk.clkr,
+ [GCC_CAMSS_MCLK1_CLK_SRC] = &gcc_camss_mclk1_clk_src.clkr,
+ [GCC_CAMSS_MCLK2_CLK] = &gcc_camss_mclk2_clk.clkr,
+ [GCC_CAMSS_MCLK2_CLK_SRC] = &gcc_camss_mclk2_clk_src.clkr,
+ [GCC_CAMSS_MCLK3_CLK] = &gcc_camss_mclk3_clk.clkr,
+ [GCC_CAMSS_MCLK3_CLK_SRC] = &gcc_camss_mclk3_clk_src.clkr,
+ [GCC_CAMSS_NRT_AXI_CLK] = &gcc_camss_nrt_axi_clk.clkr,
+ [GCC_CAMSS_OPE_AHB_CLK] = &gcc_camss_ope_ahb_clk.clkr,
+ [GCC_CAMSS_OPE_AHB_CLK_SRC] = &gcc_camss_ope_ahb_clk_src.clkr,
+ [GCC_CAMSS_OPE_CLK] = &gcc_camss_ope_clk.clkr,
+ [GCC_CAMSS_OPE_CLK_SRC] = &gcc_camss_ope_clk_src.clkr,
+ [GCC_CAMSS_RT_AXI_CLK] = &gcc_camss_rt_axi_clk.clkr,
+ [GCC_CAMSS_TFE_0_CLK] = &gcc_camss_tfe_0_clk.clkr,
+ [GCC_CAMSS_TFE_0_CLK_SRC] = &gcc_camss_tfe_0_clk_src.clkr,
+ [GCC_CAMSS_TFE_0_CPHY_RX_CLK] = &gcc_camss_tfe_0_cphy_rx_clk.clkr,
+ [GCC_CAMSS_TFE_0_CSID_CLK] = &gcc_camss_tfe_0_csid_clk.clkr,
+ [GCC_CAMSS_TFE_0_CSID_CLK_SRC] = &gcc_camss_tfe_0_csid_clk_src.clkr,
+ [GCC_CAMSS_TFE_1_CLK] = &gcc_camss_tfe_1_clk.clkr,
+ [GCC_CAMSS_TFE_1_CLK_SRC] = &gcc_camss_tfe_1_clk_src.clkr,
+ [GCC_CAMSS_TFE_1_CPHY_RX_CLK] = &gcc_camss_tfe_1_cphy_rx_clk.clkr,
+ [GCC_CAMSS_TFE_1_CSID_CLK] = &gcc_camss_tfe_1_csid_clk.clkr,
+ [GCC_CAMSS_TFE_1_CSID_CLK_SRC] = &gcc_camss_tfe_1_csid_clk_src.clkr,
+ [GCC_CAMSS_TFE_CPHY_RX_CLK_SRC] = &gcc_camss_tfe_cphy_rx_clk_src.clkr,
+ [GCC_CAMSS_TOP_AHB_CLK] = &gcc_camss_top_ahb_clk.clkr,
+ [GCC_CAMSS_TOP_AHB_CLK_SRC] = &gcc_camss_top_ahb_clk_src.clkr,
+ [GCC_CFG_NOC_USB3_PRIM_AXI_CLK] = &gcc_cfg_noc_usb3_prim_axi_clk.clkr,
+ [GCC_DISP_AHB_CLK] = &gcc_disp_ahb_clk.clkr,
+ [GCC_DISP_GPLL0_CLK_SRC] = &gcc_disp_gpll0_clk_src.clkr,
+ [GCC_DISP_GPLL0_DIV_CLK_SRC] = &gcc_disp_gpll0_div_clk_src.clkr,
+ [GCC_DISP_HF_AXI_CLK] = &gcc_disp_hf_axi_clk.clkr,
+ [GCC_DISP_THROTTLE_CORE_CLK] = &gcc_disp_throttle_core_clk.clkr,
+ [GCC_DISP_XO_CLK] = &gcc_disp_xo_clk.clkr,
+ [GCC_GP1_CLK] = &gcc_gp1_clk.clkr,
+ [GCC_GP1_CLK_SRC] = &gcc_gp1_clk_src.clkr,
+ [GCC_GP2_CLK] = &gcc_gp2_clk.clkr,
+ [GCC_GP2_CLK_SRC] = &gcc_gp2_clk_src.clkr,
+ [GCC_GP3_CLK] = &gcc_gp3_clk.clkr,
+ [GCC_GP3_CLK_SRC] = &gcc_gp3_clk_src.clkr,
+ [GCC_GPU_CFG_AHB_CLK] = &gcc_gpu_cfg_ahb_clk.clkr,
+ [GCC_GPU_GPLL0_CLK_SRC] = &gcc_gpu_gpll0_clk_src.clkr,
+ [GCC_GPU_GPLL0_DIV_CLK_SRC] = &gcc_gpu_gpll0_div_clk_src.clkr,
+ [GCC_GPU_IREF_CLK] = &gcc_gpu_iref_clk.clkr,
+ [GCC_GPU_MEMNOC_GFX_CLK] = &gcc_gpu_memnoc_gfx_clk.clkr,
+ [GCC_GPU_SNOC_DVM_GFX_CLK] = &gcc_gpu_snoc_dvm_gfx_clk.clkr,
+ [GCC_GPU_THROTTLE_CORE_CLK] = &gcc_gpu_throttle_core_clk.clkr,
+ [GCC_PDM2_CLK] = &gcc_pdm2_clk.clkr,
+ [GCC_PDM2_CLK_SRC] = &gcc_pdm2_clk_src.clkr,
+ [GCC_PDM_AHB_CLK] = &gcc_pdm_ahb_clk.clkr,
+ [GCC_PDM_XO4_CLK] = &gcc_pdm_xo4_clk.clkr,
+ [GCC_PWM0_XO512_CLK] = &gcc_pwm0_xo512_clk.clkr,
+ [GCC_QMIP_CAMERA_NRT_AHB_CLK] = &gcc_qmip_camera_nrt_ahb_clk.clkr,
+ [GCC_QMIP_CAMERA_RT_AHB_CLK] = &gcc_qmip_camera_rt_ahb_clk.clkr,
+ [GCC_QMIP_DISP_AHB_CLK] = &gcc_qmip_disp_ahb_clk.clkr,
+ [GCC_QMIP_GPU_CFG_AHB_CLK] = &gcc_qmip_gpu_cfg_ahb_clk.clkr,
+ [GCC_QMIP_VIDEO_VCODEC_AHB_CLK] = &gcc_qmip_video_vcodec_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP0_CORE_2X_CLK] = &gcc_qupv3_wrap0_core_2x_clk.clkr,
+ [GCC_QUPV3_WRAP0_CORE_CLK] = &gcc_qupv3_wrap0_core_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK] = &gcc_qupv3_wrap0_s0_clk.clkr,
+ [GCC_QUPV3_WRAP0_S0_CLK_SRC] = &gcc_qupv3_wrap0_s0_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK] = &gcc_qupv3_wrap0_s1_clk.clkr,
+ [GCC_QUPV3_WRAP0_S1_CLK_SRC] = &gcc_qupv3_wrap0_s1_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK] = &gcc_qupv3_wrap0_s2_clk.clkr,
+ [GCC_QUPV3_WRAP0_S2_CLK_SRC] = &gcc_qupv3_wrap0_s2_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK] = &gcc_qupv3_wrap0_s3_clk.clkr,
+ [GCC_QUPV3_WRAP0_S3_CLK_SRC] = &gcc_qupv3_wrap0_s3_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK] = &gcc_qupv3_wrap0_s4_clk.clkr,
+ [GCC_QUPV3_WRAP0_S4_CLK_SRC] = &gcc_qupv3_wrap0_s4_clk_src.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK] = &gcc_qupv3_wrap0_s5_clk.clkr,
+ [GCC_QUPV3_WRAP0_S5_CLK_SRC] = &gcc_qupv3_wrap0_s5_clk_src.clkr,
+ [GCC_QUPV3_WRAP_0_M_AHB_CLK] = &gcc_qupv3_wrap_0_m_ahb_clk.clkr,
+ [GCC_QUPV3_WRAP_0_S_AHB_CLK] = &gcc_qupv3_wrap_0_s_ahb_clk.clkr,
+ [GCC_SDCC1_AHB_CLK] = &gcc_sdcc1_ahb_clk.clkr,
+ [GCC_SDCC1_APPS_CLK] = &gcc_sdcc1_apps_clk.clkr,
+ [GCC_SDCC1_APPS_CLK_SRC] = &gcc_sdcc1_apps_clk_src.clkr,
+ [GCC_SDCC1_ICE_CORE_CLK] = &gcc_sdcc1_ice_core_clk.clkr,
+ [GCC_SDCC1_ICE_CORE_CLK_SRC] = &gcc_sdcc1_ice_core_clk_src.clkr,
+ [GCC_SDCC2_AHB_CLK] = &gcc_sdcc2_ahb_clk.clkr,
+ [GCC_SDCC2_APPS_CLK] = &gcc_sdcc2_apps_clk.clkr,
+ [GCC_SDCC2_APPS_CLK_SRC] = &gcc_sdcc2_apps_clk_src.clkr,
+ [GCC_SYS_NOC_CPUSS_AHB_CLK] = &gcc_sys_noc_cpuss_ahb_clk.clkr,
+ [GCC_SYS_NOC_USB3_PRIM_AXI_CLK] = &gcc_sys_noc_usb3_prim_axi_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK] = &gcc_usb30_prim_master_clk.clkr,
+ [GCC_USB30_PRIM_MASTER_CLK_SRC] = &gcc_usb30_prim_master_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK] = &gcc_usb30_prim_mock_utmi_clk.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC] =
+ &gcc_usb30_prim_mock_utmi_clk_src.clkr,
+ [GCC_USB30_PRIM_MOCK_UTMI_POSTDIV] =
+ &gcc_usb30_prim_mock_utmi_postdiv.clkr,
+ [GCC_USB30_PRIM_SLEEP_CLK] = &gcc_usb30_prim_sleep_clk.clkr,
+ [GCC_USB3_PRIM_CLKREF_CLK] = &gcc_usb3_prim_clkref_clk.clkr,
+ [GCC_USB3_PRIM_PHY_AUX_CLK_SRC] = &gcc_usb3_prim_phy_aux_clk_src.clkr,
+ [GCC_USB3_PRIM_PHY_COM_AUX_CLK] = &gcc_usb3_prim_phy_com_aux_clk.clkr,
+ [GCC_USB3_PRIM_PHY_PIPE_CLK] = &gcc_usb3_prim_phy_pipe_clk.clkr,
+ [GCC_VCODEC0_AXI_CLK] = &gcc_vcodec0_axi_clk.clkr,
+ [GCC_VENUS_AHB_CLK] = &gcc_venus_ahb_clk.clkr,
+ [GCC_VENUS_CTL_AXI_CLK] = &gcc_venus_ctl_axi_clk.clkr,
+ [GCC_VIDEO_AHB_CLK] = &gcc_video_ahb_clk.clkr,
+ [GCC_VIDEO_AXI0_CLK] = &gcc_video_axi0_clk.clkr,
+ [GCC_VIDEO_THROTTLE_CORE_CLK] = &gcc_video_throttle_core_clk.clkr,
+ [GCC_VIDEO_VCODEC0_SYS_CLK] = &gcc_video_vcodec0_sys_clk.clkr,
+ [GCC_VIDEO_VENUS_CLK_SRC] = &gcc_video_venus_clk_src.clkr,
+ [GCC_VIDEO_VENUS_CTL_CLK] = &gcc_video_venus_ctl_clk.clkr,
+ [GCC_VIDEO_XO_CLK] = &gcc_video_xo_clk.clkr,
+ [GPLL0] = &gpll0.clkr,
+ [GPLL0_OUT_AUX2] = &gpll0_out_aux2.clkr,
+ [GPLL1] = &gpll1.clkr,
+ [GPLL10] = &gpll10.clkr,
+ [GPLL11] = &gpll11.clkr,
+ [GPLL3] = &gpll3.clkr,
+ [GPLL3_OUT_MAIN] = &gpll3_out_main.clkr,
+ [GPLL4] = &gpll4.clkr,
+ [GPLL5] = &gpll5.clkr,
+ [GPLL6] = &gpll6.clkr,
+ [GPLL6_OUT_MAIN] = &gpll6_out_main.clkr,
+ [GPLL7] = &gpll7.clkr,
+ [GPLL8] = &gpll8.clkr,
+ [GPLL8_OUT_MAIN] = &gpll8_out_main.clkr,
+ [GPLL9] = &gpll9.clkr,
+ [GPLL9_OUT_MAIN] = &gpll9_out_main.clkr,
+};
+
+static const struct qcom_reset_map gcc_qcm2290_resets[] = {
+ [GCC_CAMSS_OPE_BCR] = { 0x55000 },
+ [GCC_CAMSS_TFE_BCR] = { 0x52000 },
+ [GCC_CAMSS_TOP_BCR] = { 0x58000 },
+ [GCC_GPU_BCR] = { 0x36000 },
+ [GCC_MMSS_BCR] = { 0x17000 },
+ [GCC_PDM_BCR] = { 0x20000 },
+ [GCC_QUPV3_WRAPPER_0_BCR] = { 0x1f000 },
+ [GCC_QUSB2PHY_PRIM_BCR] = { 0x1c000 },
+ [GCC_SDCC1_BCR] = { 0x38000 },
+ [GCC_SDCC2_BCR] = { 0x1e000 },
+ [GCC_USB30_PRIM_BCR] = { 0x1a000 },
+ [GCC_USB3_PHY_PRIM_SP0_BCR] = { 0x1b000 },
+ [GCC_USB3PHY_PHY_PRIM_SP0_BCR] = { 0x1b008 },
+ [GCC_USB_PHY_CFG_AHB2PHY_BCR] = { 0x1d000 },
+ [GCC_VCODEC0_BCR] = { 0x58094 },
+ [GCC_VENUS_BCR] = { 0x58078 },
+ [GCC_VIDEO_INTERFACE_BCR] = { 0x6e000 },
+};
+
+static struct gdsc *gcc_qcm2290_gdscs[] = {
+ [GCC_CAMSS_TOP_GDSC] = &gcc_camss_top_gdsc,
+ [GCC_USB30_PRIM_GDSC] = &gcc_usb30_prim_gdsc,
+ [GCC_VCODEC0_GDSC] = &gcc_vcodec0_gdsc,
+ [GCC_VENUS_GDSC] = &gcc_venus_gdsc,
+ [HLOS1_VOTE_TURING_MMU_TBU1_GDSC] = &hlos1_vote_turing_mmu_tbu1_gdsc,
+ [HLOS1_VOTE_TURING_MMU_TBU0_GDSC] = &hlos1_vote_turing_mmu_tbu0_gdsc,
+ [HLOS1_VOTE_MM_SNOC_MMU_TBU_RT_GDSC] = &hlos1_vote_mm_snoc_mmu_tbu_rt_gdsc,
+ [HLOS1_VOTE_MM_SNOC_MMU_TBU_NRT_GDSC] = &hlos1_vote_mm_snoc_mmu_tbu_nrt_gdsc,
+};
+
+static const struct clk_rcg_dfs_data gcc_dfs_clocks[] = {
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s0_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s1_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s2_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s3_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s4_clk_src),
+ DEFINE_RCG_DFS(gcc_qupv3_wrap0_s5_clk_src),
+};
+
+static const struct regmap_config gcc_qcm2290_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xc7000,
+ .fast_io = true,
+};
+
+static const struct qcom_cc_desc gcc_qcm2290_desc = {
+ .config = &gcc_qcm2290_regmap_config,
+ .clks = gcc_qcm2290_clocks,
+ .num_clks = ARRAY_SIZE(gcc_qcm2290_clocks),
+ .resets = gcc_qcm2290_resets,
+ .num_resets = ARRAY_SIZE(gcc_qcm2290_resets),
+ .gdscs = gcc_qcm2290_gdscs,
+ .num_gdscs = ARRAY_SIZE(gcc_qcm2290_gdscs),
+};
+
+static const struct of_device_id gcc_qcm2290_match_table[] = {
+ { .compatible = "qcom,gcc-qcm2290" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, gcc_qcm2290_match_table);
+
+static int gcc_qcm2290_probe(struct platform_device *pdev)
+{
+ struct regmap *regmap;
+ int ret;
+
+ regmap = qcom_cc_map(pdev, &gcc_qcm2290_desc);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ ret = qcom_cc_register_rcg_dfs(regmap, gcc_dfs_clocks,
+ ARRAY_SIZE(gcc_dfs_clocks));
+ if (ret)
+ return ret;
+
+ clk_alpha_pll_configure(&gpll10, regmap, &gpll10_config);
+ clk_alpha_pll_configure(&gpll11, regmap, &gpll11_config);
+ clk_alpha_pll_configure(&gpll8, regmap, &gpll8_config);
+ clk_alpha_pll_configure(&gpll9, regmap, &gpll9_config);
+
+ return qcom_cc_really_probe(pdev, &gcc_qcm2290_desc, regmap);
+}
+
+static struct platform_driver gcc_qcm2290_driver = {
+ .probe = gcc_qcm2290_probe,
+ .driver = {
+ .name = "gcc-qcm2290",
+ .of_match_table = gcc_qcm2290_match_table,
+ },
+};
+
+static int __init gcc_qcm2290_init(void)
+{
+ return platform_driver_register(&gcc_qcm2290_driver);
+}
+subsys_initcall(gcc_qcm2290_init);
+
+static void __exit gcc_qcm2290_exit(void)
+{
+ platform_driver_unregister(&gcc_qcm2290_driver);
+}
+module_exit(gcc_qcm2290_exit);
+
+MODULE_DESCRIPTION("QTI GCC QCM2290 Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/gcc-sc7280.c b/drivers/clk/qcom/gcc-sc7280.c
index 6cefcdc86990..8fb6bd69f240 100644
--- a/drivers/clk/qcom/gcc-sc7280.c
+++ b/drivers/clk/qcom/gcc-sc7280.c
@@ -197,12 +197,6 @@ static const struct clk_parent_data gcc_parent_data_0[] = {
{ .hw = &gcc_gpll0_out_even.clkr.hw },
};
-static const struct clk_parent_data gcc_parent_data_0_ao[] = {
- { .fw_name = "bi_tcxo_ao" },
- { .hw = &gcc_gpll0.clkr.hw },
- { .hw = &gcc_gpll0_out_even.clkr.hw },
-};
-
static const struct parent_map gcc_parent_map_1[] = {
{ P_BI_TCXO, 0 },
{ P_GCC_GPLL0_OUT_MAIN, 1 },
@@ -479,24 +473,6 @@ static struct clk_regmap_mux gcc_usb3_sec_phy_pipe_clk_src = {
},
},
};
-static const struct freq_tbl ftbl_gcc_cpuss_ahb_clk_src[] = {
- F(19200000, P_BI_TCXO, 1, 0, 0),
- { }
-};
-
-static struct clk_rcg2 gcc_cpuss_ahb_clk_src = {
- .cmd_rcgr = 0x4800c,
- .mnd_width = 0,
- .hid_width = 5,
- .parent_map = gcc_parent_map_0,
- .freq_tbl = ftbl_gcc_cpuss_ahb_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "gcc_cpuss_ahb_clk_src",
- .parent_data = gcc_parent_data_0_ao,
- .num_parents = ARRAY_SIZE(gcc_parent_data_0_ao),
- .ops = &clk_rcg2_ops,
- },
-};
static const struct freq_tbl ftbl_gcc_gp1_clk_src[] = {
F(50000000, P_GCC_GPLL0_OUT_EVEN, 6, 0, 0),
@@ -1239,21 +1215,6 @@ static struct clk_rcg2 gcc_sec_ctrl_clk_src = {
},
};
-static struct clk_regmap_div gcc_cpuss_ahb_postdiv_clk_src = {
- .reg = 0x48024,
- .shift = 0,
- .width = 4,
- .clkr.hw.init = &(struct clk_init_data) {
- .name = "gcc_cpuss_ahb_postdiv_clk_src",
- .parent_hws = (const struct clk_hw*[]){
- &gcc_cpuss_ahb_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT,
- .ops = &clk_regmap_div_ro_ops,
- },
-};
-
static struct clk_regmap_div gcc_usb30_prim_mock_utmi_postdiv_clk_src = {
.reg = 0xf050,
.shift = 0,
@@ -1500,27 +1461,6 @@ static struct clk_branch gcc_cfg_noc_usb3_sec_axi_clk = {
},
};
-/* For CPUSS functionality the AHB clock needs to be left enabled */
-static struct clk_branch gcc_cpuss_ahb_clk = {
- .halt_reg = 0x48000,
- .halt_check = BRANCH_HALT_VOTED,
- .hwcg_reg = 0x48000,
- .hwcg_bit = 1,
- .clkr = {
- .enable_reg = 0x52000,
- .enable_mask = BIT(21),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_cpuss_ahb_clk",
- .parent_hws = (const struct clk_hw*[]){
- &gcc_cpuss_ahb_postdiv_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch gcc_ddrss_gpu_axi_clk = {
.halt_reg = 0x71154,
.halt_check = BRANCH_HALT_SKIP,
@@ -2608,27 +2548,6 @@ static struct clk_branch gcc_sdcc4_apps_clk = {
},
};
-/* For CPUSS functionality the AHB clock needs to be left enabled */
-static struct clk_branch gcc_sys_noc_cpuss_ahb_clk = {
- .halt_reg = 0x48178,
- .halt_check = BRANCH_HALT_VOTED,
- .hwcg_reg = 0x48178,
- .hwcg_bit = 1,
- .clkr = {
- .enable_reg = 0x52000,
- .enable_mask = BIT(0),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_sys_noc_cpuss_ahb_clk",
- .parent_hws = (const struct clk_hw*[]){
- &gcc_cpuss_ahb_postdiv_clk_src.clkr.hw,
- },
- .num_parents = 1,
- .flags = CLK_IS_CRITICAL | CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch gcc_throttle_pcie_ahb_clk = {
.halt_reg = 0x9001c,
.halt_check = BRANCH_HALT,
@@ -3294,9 +3213,6 @@ static struct clk_regmap *gcc_sc7280_clocks[] = {
[GCC_CAMERA_SF_AXI_CLK] = &gcc_camera_sf_axi_clk.clkr,
[GCC_CFG_NOC_USB3_PRIM_AXI_CLK] = &gcc_cfg_noc_usb3_prim_axi_clk.clkr,
[GCC_CFG_NOC_USB3_SEC_AXI_CLK] = &gcc_cfg_noc_usb3_sec_axi_clk.clkr,
- [GCC_CPUSS_AHB_CLK] = &gcc_cpuss_ahb_clk.clkr,
- [GCC_CPUSS_AHB_CLK_SRC] = &gcc_cpuss_ahb_clk_src.clkr,
- [GCC_CPUSS_AHB_POSTDIV_CLK_SRC] = &gcc_cpuss_ahb_postdiv_clk_src.clkr,
[GCC_DDRSS_GPU_AXI_CLK] = &gcc_ddrss_gpu_axi_clk.clkr,
[GCC_DDRSS_PCIE_SF_CLK] = &gcc_ddrss_pcie_sf_clk.clkr,
[GCC_DISP_GPLL0_CLK_SRC] = &gcc_disp_gpll0_clk_src.clkr,
@@ -3403,7 +3319,6 @@ static struct clk_regmap *gcc_sc7280_clocks[] = {
[GCC_SDCC4_AHB_CLK] = &gcc_sdcc4_ahb_clk.clkr,
[GCC_SDCC4_APPS_CLK] = &gcc_sdcc4_apps_clk.clkr,
[GCC_SDCC4_APPS_CLK_SRC] = &gcc_sdcc4_apps_clk_src.clkr,
- [GCC_SYS_NOC_CPUSS_AHB_CLK] = &gcc_sys_noc_cpuss_ahb_clk.clkr,
[GCC_THROTTLE_PCIE_AHB_CLK] = &gcc_throttle_pcie_ahb_clk.clkr,
[GCC_TITAN_NRT_THROTTLE_CORE_CLK] =
&gcc_titan_nrt_throttle_core_clk.clkr,
diff --git a/drivers/clk/qcom/gcc-sdm660.c b/drivers/clk/qcom/gcc-sdm660.c
index 4d36f96e9ae2..9b97425008ce 100644
--- a/drivers/clk/qcom/gcc-sdm660.c
+++ b/drivers/clk/qcom/gcc-sdm660.c
@@ -284,7 +284,7 @@ static struct clk_rcg2 blsp1_qup1_i2c_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup1_i2c_apps_clk_src",
.parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
.ops = &clk_rcg2_ops,
},
};
@@ -309,7 +309,7 @@ static struct clk_rcg2 blsp1_qup1_spi_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup1_spi_apps_clk_src",
.parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
.ops = &clk_rcg2_ops,
},
};
@@ -323,7 +323,7 @@ static struct clk_rcg2 blsp1_qup2_i2c_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup2_i2c_apps_clk_src",
.parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
.ops = &clk_rcg2_ops,
},
};
@@ -337,7 +337,7 @@ static struct clk_rcg2 blsp1_qup2_spi_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup2_spi_apps_clk_src",
.parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
.ops = &clk_rcg2_ops,
},
};
@@ -351,7 +351,7 @@ static struct clk_rcg2 blsp1_qup3_i2c_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup3_i2c_apps_clk_src",
.parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
.ops = &clk_rcg2_ops,
},
};
@@ -365,7 +365,7 @@ static struct clk_rcg2 blsp1_qup3_spi_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup3_spi_apps_clk_src",
.parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
.ops = &clk_rcg2_ops,
},
};
@@ -379,7 +379,7 @@ static struct clk_rcg2 blsp1_qup4_i2c_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup4_i2c_apps_clk_src",
.parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
.ops = &clk_rcg2_ops,
},
};
@@ -393,7 +393,7 @@ static struct clk_rcg2 blsp1_qup4_spi_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_qup4_spi_apps_clk_src",
.parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
.ops = &clk_rcg2_ops,
},
};
@@ -426,7 +426,7 @@ static struct clk_rcg2 blsp1_uart1_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_uart1_apps_clk_src",
.parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
.ops = &clk_rcg2_ops,
},
};
@@ -440,7 +440,7 @@ static struct clk_rcg2 blsp1_uart2_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp1_uart2_apps_clk_src",
.parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
.ops = &clk_rcg2_ops,
},
};
@@ -454,7 +454,7 @@ static struct clk_rcg2 blsp2_qup1_i2c_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup1_i2c_apps_clk_src",
.parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
.ops = &clk_rcg2_ops,
},
};
@@ -468,7 +468,7 @@ static struct clk_rcg2 blsp2_qup1_spi_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup1_spi_apps_clk_src",
.parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
.ops = &clk_rcg2_ops,
},
};
@@ -482,7 +482,7 @@ static struct clk_rcg2 blsp2_qup2_i2c_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup2_i2c_apps_clk_src",
.parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
.ops = &clk_rcg2_ops,
},
};
@@ -496,7 +496,7 @@ static struct clk_rcg2 blsp2_qup2_spi_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup2_spi_apps_clk_src",
.parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
.ops = &clk_rcg2_ops,
},
};
@@ -510,7 +510,7 @@ static struct clk_rcg2 blsp2_qup3_i2c_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup3_i2c_apps_clk_src",
.parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
.ops = &clk_rcg2_ops,
},
};
@@ -524,7 +524,7 @@ static struct clk_rcg2 blsp2_qup3_spi_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup3_spi_apps_clk_src",
.parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
.ops = &clk_rcg2_ops,
},
};
@@ -538,7 +538,7 @@ static struct clk_rcg2 blsp2_qup4_i2c_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup4_i2c_apps_clk_src",
.parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
.ops = &clk_rcg2_ops,
},
};
@@ -552,7 +552,7 @@ static struct clk_rcg2 blsp2_qup4_spi_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_qup4_spi_apps_clk_src",
.parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
.ops = &clk_rcg2_ops,
},
};
@@ -566,7 +566,7 @@ static struct clk_rcg2 blsp2_uart1_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_uart1_apps_clk_src",
.parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
.ops = &clk_rcg2_ops,
},
};
@@ -580,7 +580,7 @@ static struct clk_rcg2 blsp2_uart2_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "blsp2_uart2_apps_clk_src",
.parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
.ops = &clk_rcg2_ops,
},
};
@@ -601,7 +601,7 @@ static struct clk_rcg2 gp1_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gp1_clk_src",
.parent_data = gcc_parent_data_xo_gpll0_sleep_clk_gpll0_early_div,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_sleep_clk_gpll0_early_div),
.ops = &clk_rcg2_ops,
},
};
@@ -615,7 +615,7 @@ static struct clk_rcg2 gp2_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gp2_clk_src",
.parent_data = gcc_parent_data_xo_gpll0_sleep_clk_gpll0_early_div,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_sleep_clk_gpll0_early_div),
.ops = &clk_rcg2_ops,
},
};
@@ -629,7 +629,7 @@ static struct clk_rcg2 gp3_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gp3_clk_src",
.parent_data = gcc_parent_data_xo_gpll0_sleep_clk_gpll0_early_div,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_sleep_clk_gpll0_early_div),
.ops = &clk_rcg2_ops,
},
};
@@ -649,7 +649,7 @@ static struct clk_rcg2 hmss_gpll0_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "hmss_gpll0_clk_src",
.parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
.ops = &clk_rcg2_ops,
},
};
@@ -670,7 +670,7 @@ static struct clk_rcg2 hmss_gpll4_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "hmss_gpll4_clk_src",
.parent_data = gcc_parent_data_xo_gpll4,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll4),
.ops = &clk_rcg2_ops,
},
};
@@ -689,7 +689,7 @@ static struct clk_rcg2 hmss_rbcpr_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "hmss_rbcpr_clk_src",
.parent_data = gcc_parent_data_xo_gpll0,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -708,7 +708,7 @@ static struct clk_rcg2 pdm2_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "pdm2_clk_src",
.parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
.ops = &clk_rcg2_ops,
},
};
@@ -730,7 +730,7 @@ static struct clk_rcg2 qspi_ser_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "qspi_ser_clk_src",
.parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div_gpll1_gpll4_gpll1_early_div,
- .num_parents = 6,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div_gpll1_gpll4_gpll1_early_div),
.ops = &clk_rcg2_ops,
},
};
@@ -756,7 +756,7 @@ static struct clk_rcg2 sdcc1_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "sdcc1_apps_clk_src",
.parent_data = gcc_parent_data_xo_gpll0_gpll4_gpll0_early_div,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll4_gpll0_early_div),
.ops = &clk_rcg2_ops,
},
};
@@ -778,7 +778,7 @@ static struct clk_rcg2 sdcc1_ice_core_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "sdcc1_ice_core_clk_src",
.parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
.ops = &clk_rcg2_ops,
},
};
@@ -804,7 +804,7 @@ static struct clk_rcg2 sdcc2_apps_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "sdcc2_apps_clk_src",
.parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div_gpll4,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div_gpll4),
.ops = &clk_rcg2_floor_ops,
},
};
@@ -827,7 +827,7 @@ static struct clk_rcg2 ufs_axi_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "ufs_axi_clk_src",
.parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
.ops = &clk_rcg2_ops,
},
};
@@ -848,7 +848,7 @@ static struct clk_rcg2 ufs_ice_core_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "ufs_ice_core_clk_src",
.parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
.ops = &clk_rcg2_ops,
},
};
@@ -862,7 +862,7 @@ static struct clk_rcg2 ufs_phy_aux_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "ufs_phy_aux_clk_src",
.parent_data = gcc_parent_data_xo_sleep_clk,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_sleep_clk),
.ops = &clk_rcg2_ops,
},
};
@@ -883,7 +883,7 @@ static struct clk_rcg2 ufs_unipro_core_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "ufs_unipro_core_clk_src",
.parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
.ops = &clk_rcg2_ops,
},
};
@@ -904,7 +904,7 @@ static struct clk_rcg2 usb20_master_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "usb20_master_clk_src",
.parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
.ops = &clk_rcg2_ops,
},
};
@@ -924,7 +924,7 @@ static struct clk_rcg2 usb20_mock_utmi_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "usb20_mock_utmi_clk_src",
.parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
.ops = &clk_rcg2_ops,
},
};
@@ -949,7 +949,7 @@ static struct clk_rcg2 usb30_master_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "usb30_master_clk_src",
.parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
.ops = &clk_rcg2_ops,
},
};
@@ -970,7 +970,7 @@ static struct clk_rcg2 usb30_mock_utmi_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "usb30_mock_utmi_clk_src",
.parent_data = gcc_parent_data_xo_gpll0_gpll0_early_div,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_gpll0_gpll0_early_div),
.ops = &clk_rcg2_ops,
},
};
@@ -990,7 +990,7 @@ static struct clk_rcg2 usb3_phy_aux_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "usb3_phy_aux_clk_src",
.parent_data = gcc_parent_data_xo_sleep_clk,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gcc_parent_data_xo_sleep_clk),
.ops = &clk_rcg2_ops,
},
};
diff --git a/drivers/clk/qcom/gdsc.c b/drivers/clk/qcom/gdsc.c
index 4ece326ea233..7e1dd8ccfa38 100644
--- a/drivers/clk/qcom/gdsc.c
+++ b/drivers/clk/qcom/gdsc.c
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/ktime.h>
#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/reset-controller.h>
@@ -50,6 +51,22 @@ enum gdsc_status {
GDSC_ON
};
+static int gdsc_pm_runtime_get(struct gdsc *sc)
+{
+ if (!sc->dev)
+ return 0;
+
+ return pm_runtime_resume_and_get(sc->dev);
+}
+
+static int gdsc_pm_runtime_put(struct gdsc *sc)
+{
+ if (!sc->dev)
+ return 0;
+
+ return pm_runtime_put_sync(sc->dev);
+}
+
/* Returns 1 if GDSC status is status, 0 if not, and < 0 on error */
static int gdsc_check_status(struct gdsc *sc, enum gdsc_status status)
{
@@ -232,9 +249,8 @@ static void gdsc_retain_ff_on(struct gdsc *sc)
regmap_update_bits(sc->regmap, sc->gdscr, mask, mask);
}
-static int gdsc_enable(struct generic_pm_domain *domain)
+static int _gdsc_enable(struct gdsc *sc)
{
- struct gdsc *sc = domain_to_gdsc(domain);
int ret;
if (sc->pwrsts == PWRSTS_ON)
@@ -290,11 +306,22 @@ static int gdsc_enable(struct generic_pm_domain *domain)
return 0;
}
-static int gdsc_disable(struct generic_pm_domain *domain)
+static int gdsc_enable(struct generic_pm_domain *domain)
{
struct gdsc *sc = domain_to_gdsc(domain);
int ret;
+ ret = gdsc_pm_runtime_get(sc);
+ if (ret)
+ return ret;
+
+ return _gdsc_enable(sc);
+}
+
+static int _gdsc_disable(struct gdsc *sc)
+{
+ int ret;
+
if (sc->pwrsts == PWRSTS_ON)
return gdsc_assert_reset(sc);
@@ -329,6 +356,18 @@ static int gdsc_disable(struct generic_pm_domain *domain)
return 0;
}
+static int gdsc_disable(struct generic_pm_domain *domain)
+{
+ struct gdsc *sc = domain_to_gdsc(domain);
+ int ret;
+
+ ret = _gdsc_disable(sc);
+
+ gdsc_pm_runtime_put(sc);
+
+ return ret;
+}
+
static int gdsc_init(struct gdsc *sc)
{
u32 mask, val;
@@ -443,6 +482,8 @@ int gdsc_register(struct gdsc_desc *desc,
for (i = 0; i < num; i++) {
if (!scs[i])
continue;
+ if (pm_runtime_enabled(dev))
+ scs[i]->dev = dev;
scs[i]->regmap = regmap;
scs[i]->rcdev = rcdev;
ret = gdsc_init(scs[i]);
@@ -457,6 +498,8 @@ int gdsc_register(struct gdsc_desc *desc,
continue;
if (scs[i]->parent)
pm_genpd_add_subdomain(scs[i]->parent, &scs[i]->pd);
+ else if (!IS_ERR_OR_NULL(dev->pm_domain))
+ pm_genpd_add_subdomain(pd_to_genpd(dev->pm_domain), &scs[i]->pd);
}
return of_genpd_add_provider_onecell(dev->of_node, data);
@@ -475,6 +518,8 @@ void gdsc_unregister(struct gdsc_desc *desc)
continue;
if (scs[i]->parent)
pm_genpd_remove_subdomain(scs[i]->parent, &scs[i]->pd);
+ else if (!IS_ERR_OR_NULL(dev->pm_domain))
+ pm_genpd_remove_subdomain(pd_to_genpd(dev->pm_domain), &scs[i]->pd);
}
of_genpd_del_provider(dev->of_node);
}
diff --git a/drivers/clk/qcom/gdsc.h b/drivers/clk/qcom/gdsc.h
index 5bb396b344d1..d7cc4c21a9d4 100644
--- a/drivers/clk/qcom/gdsc.h
+++ b/drivers/clk/qcom/gdsc.h
@@ -25,6 +25,7 @@ struct reset_controller_dev;
* @resets: ids of resets associated with this gdsc
* @reset_count: number of @resets
* @rcdev: reset controller
+ * @dev: the device holding the GDSC, used for pm_runtime calls
*/
struct gdsc {
struct generic_pm_domain pd;
@@ -58,6 +59,7 @@ struct gdsc {
const char *supply;
struct regulator *rsupply;
+ struct device *dev;
};
struct gdsc_desc {
diff --git a/drivers/clk/qcom/gpucc-msm8998.c b/drivers/clk/qcom/gpucc-msm8998.c
index fedfffaf0a8d..a925ac90018d 100644
--- a/drivers/clk/qcom/gpucc-msm8998.c
+++ b/drivers/clk/qcom/gpucc-msm8998.c
@@ -40,8 +40,7 @@ static struct clk_branch gpucc_cxo_clk = {
.hw.init = &(struct clk_init_data){
.name = "gpucc_cxo_clk",
.parent_data = &(const struct clk_parent_data){
- .fw_name = "xo",
- .name = "xo"
+ .fw_name = "xo"
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -99,7 +98,7 @@ static const struct parent_map gpu_xo_gpll0_map[] = {
static const struct clk_parent_data gpu_xo_gpll0[] = {
{ .hw = &gpucc_cxo_clk.clkr.hw },
- { .fw_name = "gpll0", .name = "gpll0" },
+ { .fw_name = "gpll0" },
};
static const struct parent_map gpu_xo_gpupll0_map[] = {
@@ -126,7 +125,7 @@ static struct clk_rcg2 rbcpr_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "rbcpr_clk_src",
.parent_data = gpu_xo_gpll0,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gpu_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -144,7 +143,7 @@ static struct clk_rcg2 gfx3d_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gfx3d_clk_src",
.parent_data = gpu_xo_gpupll0,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gpu_xo_gpupll0),
.ops = &clk_rcg2_ops,
.flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
},
@@ -163,7 +162,7 @@ static struct clk_rcg2 rbbmtimer_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "rbbmtimer_clk_src",
.parent_data = gpu_xo_gpll0,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gpu_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -184,7 +183,7 @@ static struct clk_rcg2 gfx3d_isense_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gfx3d_isense_clk_src",
.parent_data = gpu_xo_gpll0,
- .num_parents = 2,
+ .num_parents = ARRAY_SIZE(gpu_xo_gpll0),
.ops = &clk_rcg2_ops,
},
};
diff --git a/drivers/clk/qcom/gpucc-sdm660.c b/drivers/clk/qcom/gpucc-sdm660.c
index 1ebcceb3a50d..41bba96a08b3 100644
--- a/drivers/clk/qcom/gpucc-sdm660.c
+++ b/drivers/clk/qcom/gpucc-sdm660.c
@@ -44,8 +44,7 @@ static struct clk_branch gpucc_cxo_clk = {
.hw.init = &(struct clk_init_data){
.name = "gpucc_cxo_clk",
.parent_data = &(const struct clk_parent_data){
- .fw_name = "xo",
- .name = "xo"
+ .fw_name = "xo"
},
.num_parents = 1,
.ops = &clk_branch2_ops,
@@ -101,7 +100,7 @@ static const struct clk_parent_data gpucc_parent_data_1[] = {
{ .hw = &gpucc_cxo_clk.clkr.hw },
{ .hw = &gpu_pll0_pll_out_main.clkr.hw },
{ .hw = &gpu_pll1_pll_out_main.clkr.hw },
- { .fw_name = "gcc_gpu_gpll0_clk", .name = "gcc_gpu_gpll0_clk" },
+ { .fw_name = "gcc_gpu_gpll0_clk" },
};
static struct clk_rcg2_gfx3d gfx3d_clk_src = {
@@ -114,7 +113,7 @@ static struct clk_rcg2_gfx3d gfx3d_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "gfx3d_clk_src",
.parent_data = gpucc_parent_data_1,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(gpucc_parent_data_1),
.ops = &clk_gfx3d_ops,
.flags = CLK_SET_RATE_PARENT | CLK_OPS_PARENT_ENABLE,
},
@@ -154,8 +153,8 @@ static const struct parent_map gpucc_parent_map_0[] = {
static const struct clk_parent_data gpucc_parent_data_0[] = {
{ .hw = &gpucc_cxo_clk.clkr.hw },
- { .fw_name = "gcc_gpu_gpll0_clk", .name = "gcc_gpu_gpll0_clk" },
- { .fw_name = "gcc_gpu_gpll0_div_clk", .name = "gcc_gpu_gpll0_div_clk" },
+ { .fw_name = "gcc_gpu_gpll0_clk" },
+ { .fw_name = "gcc_gpu_gpll0_div_clk" },
};
static const struct freq_tbl ftbl_rbbmtimer_clk_src[] = {
@@ -172,7 +171,7 @@ static struct clk_rcg2 rbbmtimer_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "rbbmtimer_clk_src",
.parent_data = gpucc_parent_data_0,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gpucc_parent_data_0),
.ops = &clk_rcg2_ops,
},
};
@@ -192,7 +191,7 @@ static struct clk_rcg2 rbcpr_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "rbcpr_clk_src",
.parent_data = gpucc_parent_data_0,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(gpucc_parent_data_0),
.ops = &clk_rcg2_ops,
},
};
diff --git a/drivers/clk/qcom/kpss-xcc.c b/drivers/clk/qcom/kpss-xcc.c
index 8590b5edd19d..4fec1f9142b8 100644
--- a/drivers/clk/qcom/kpss-xcc.c
+++ b/drivers/clk/qcom/kpss-xcc.c
@@ -33,7 +33,6 @@ static int kpss_xcc_driver_probe(struct platform_device *pdev)
{
const struct of_device_id *id;
struct clk *clk;
- struct resource *res;
void __iomem *base;
const char *name;
@@ -41,8 +40,7 @@ static int kpss_xcc_driver_probe(struct platform_device *pdev)
if (!id)
return -ENODEV;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/clk/qcom/lpasscc-sc7280.c b/drivers/clk/qcom/lpasscc-sc7280.c
new file mode 100644
index 000000000000..89f1ad6631da
--- /dev/null
+++ b/drivers/clk/qcom/lpasscc-sc7280.c
@@ -0,0 +1,216 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2021, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/pm_clock.h>
+#include <linux/pm_runtime.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,lpass-sc7280.h>
+
+#include "clk-regmap.h"
+#include "clk-branch.h"
+#include "common.h"
+
+static struct clk_branch lpass_q6ss_ahbm_clk = {
+ .halt_reg = 0x1c,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x1c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "lpass_q6ss_ahbm_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch lpass_q6ss_ahbs_clk = {
+ .halt_reg = 0x20,
+ .halt_check = BRANCH_HALT_VOTED,
+ .clkr = {
+ .enable_reg = 0x20,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "lpass_q6ss_ahbs_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch lpass_top_cc_lpi_q6_axim_hs_clk = {
+ .halt_reg = 0x0,
+ .halt_check = BRANCH_HALT,
+ .clkr = {
+ .enable_reg = 0x0,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "lpass_top_cc_lpi_q6_axim_hs_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch lpass_qdsp6ss_core_clk = {
+ .halt_reg = 0x20,
+ /* CLK_OFF would not toggle until LPASS is out of reset */
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x20,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "lpass_qdsp6ss_core_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch lpass_qdsp6ss_xo_clk = {
+ .halt_reg = 0x38,
+ /* CLK_OFF would not toggle until LPASS is out of reset */
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x38,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "lpass_qdsp6ss_xo_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch lpass_qdsp6ss_sleep_clk = {
+ .halt_reg = 0x3c,
+ /* CLK_OFF would not toggle until LPASS is out of reset */
+ .halt_check = BRANCH_HALT_SKIP,
+ .clkr = {
+ .enable_reg = 0x3c,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "lpass_qdsp6ss_sleep_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct regmap_config lpass_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .fast_io = true,
+};
+
+static struct clk_regmap *lpass_cc_sc7280_clocks[] = {
+ [LPASS_Q6SS_AHBM_CLK] = &lpass_q6ss_ahbm_clk.clkr,
+ [LPASS_Q6SS_AHBS_CLK] = &lpass_q6ss_ahbs_clk.clkr,
+};
+
+static const struct qcom_cc_desc lpass_cc_sc7280_desc = {
+ .config = &lpass_regmap_config,
+ .clks = lpass_cc_sc7280_clocks,
+ .num_clks = ARRAY_SIZE(lpass_cc_sc7280_clocks),
+};
+
+static struct clk_regmap *lpass_cc_top_sc7280_clocks[] = {
+ [LPASS_TOP_CC_LPI_Q6_AXIM_HS_CLK] =
+ &lpass_top_cc_lpi_q6_axim_hs_clk.clkr,
+};
+
+static const struct qcom_cc_desc lpass_cc_top_sc7280_desc = {
+ .config = &lpass_regmap_config,
+ .clks = lpass_cc_top_sc7280_clocks,
+ .num_clks = ARRAY_SIZE(lpass_cc_top_sc7280_clocks),
+};
+
+static struct clk_regmap *lpass_qdsp6ss_sc7280_clocks[] = {
+ [LPASS_QDSP6SS_XO_CLK] = &lpass_qdsp6ss_xo_clk.clkr,
+ [LPASS_QDSP6SS_SLEEP_CLK] = &lpass_qdsp6ss_sleep_clk.clkr,
+ [LPASS_QDSP6SS_CORE_CLK] = &lpass_qdsp6ss_core_clk.clkr,
+};
+
+static const struct qcom_cc_desc lpass_qdsp6ss_sc7280_desc = {
+ .config = &lpass_regmap_config,
+ .clks = lpass_qdsp6ss_sc7280_clocks,
+ .num_clks = ARRAY_SIZE(lpass_qdsp6ss_sc7280_clocks),
+};
+
+static int lpass_cc_sc7280_probe(struct platform_device *pdev)
+{
+ const struct qcom_cc_desc *desc;
+ int ret;
+
+ pm_runtime_enable(&pdev->dev);
+ ret = pm_clk_create(&pdev->dev);
+ if (ret)
+ goto disable_pm_runtime;
+
+ ret = pm_clk_add(&pdev->dev, "iface");
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to acquire iface clock\n");
+ goto destroy_pm_clk;
+ }
+
+ lpass_regmap_config.name = "qdsp6ss";
+ desc = &lpass_qdsp6ss_sc7280_desc;
+
+ ret = qcom_cc_probe_by_index(pdev, 0, desc);
+ if (ret)
+ goto destroy_pm_clk;
+
+ lpass_regmap_config.name = "top_cc";
+ desc = &lpass_cc_top_sc7280_desc;
+
+ ret = qcom_cc_probe_by_index(pdev, 1, desc);
+ if (ret)
+ goto destroy_pm_clk;
+
+ lpass_regmap_config.name = "cc";
+ desc = &lpass_cc_sc7280_desc;
+
+ ret = qcom_cc_probe_by_index(pdev, 2, desc);
+ if (ret)
+ goto destroy_pm_clk;
+
+ return 0;
+
+destroy_pm_clk:
+ pm_clk_destroy(&pdev->dev);
+
+disable_pm_runtime:
+ pm_runtime_disable(&pdev->dev);
+
+ return ret;
+}
+
+static const struct of_device_id lpass_cc_sc7280_match_table[] = {
+ { .compatible = "qcom,sc7280-lpasscc" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, lpass_cc_sc7280_match_table);
+
+static struct platform_driver lpass_cc_sc7280_driver = {
+ .probe = lpass_cc_sc7280_probe,
+ .driver = {
+ .name = "sc7280-lpasscc",
+ .of_match_table = lpass_cc_sc7280_match_table,
+ },
+};
+
+static int __init lpass_cc_sc7280_init(void)
+{
+ return platform_driver_register(&lpass_cc_sc7280_driver);
+}
+subsys_initcall(lpass_cc_sc7280_init);
+
+static void __exit lpass_cc_sc7280_exit(void)
+{
+ platform_driver_unregister(&lpass_cc_sc7280_driver);
+}
+module_exit(lpass_cc_sc7280_exit);
+
+MODULE_DESCRIPTION("QTI LPASS_CC SC7280 Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/clk/qcom/mmcc-msm8998.c b/drivers/clk/qcom/mmcc-msm8998.c
index 467dadccde02..c421b1291651 100644
--- a/drivers/clk/qcom/mmcc-msm8998.c
+++ b/drivers/clk/qcom/mmcc-msm8998.c
@@ -53,8 +53,7 @@ static struct clk_fixed_factor gpll0_div = {
.hw.init = &(struct clk_init_data){
.name = "mmss_gpll0_div",
.parent_data = &(const struct clk_parent_data){
- .fw_name = "gpll0",
- .name = "gpll0"
+ .fw_name = "gpll0"
},
.num_parents = 1,
.ops = &clk_fixed_factor_ops,
@@ -78,8 +77,7 @@ static struct clk_alpha_pll mmpll0 = {
.hw.init = &(struct clk_init_data){
.name = "mmpll0",
.parent_data = &(const struct clk_parent_data){
- .fw_name = "xo",
- .name = "xo"
+ .fw_name = "xo"
},
.num_parents = 1,
.ops = &clk_alpha_pll_fixed_fabia_ops,
@@ -111,8 +109,7 @@ static struct clk_alpha_pll mmpll1 = {
.hw.init = &(struct clk_init_data){
.name = "mmpll1",
.parent_data = &(const struct clk_parent_data){
- .fw_name = "xo",
- .name = "xo"
+ .fw_name = "xo"
},
.num_parents = 1,
.ops = &clk_alpha_pll_fixed_fabia_ops,
@@ -141,8 +138,7 @@ static struct clk_alpha_pll mmpll3 = {
.clkr.hw.init = &(struct clk_init_data){
.name = "mmpll3",
.parent_data = &(const struct clk_parent_data){
- .fw_name = "xo",
- .name = "xo"
+ .fw_name = "xo"
},
.num_parents = 1,
.ops = &clk_alpha_pll_fixed_fabia_ops,
@@ -170,8 +166,7 @@ static struct clk_alpha_pll mmpll4 = {
.clkr.hw.init = &(struct clk_init_data){
.name = "mmpll4",
.parent_data = &(const struct clk_parent_data){
- .fw_name = "xo",
- .name = "xo"
+ .fw_name = "xo"
},
.num_parents = 1,
.ops = &clk_alpha_pll_fixed_fabia_ops,
@@ -199,8 +194,7 @@ static struct clk_alpha_pll mmpll5 = {
.clkr.hw.init = &(struct clk_init_data){
.name = "mmpll5",
.parent_data = &(const struct clk_parent_data){
- .fw_name = "xo",
- .name = "xo"
+ .fw_name = "xo"
},
.num_parents = 1,
.ops = &clk_alpha_pll_fixed_fabia_ops,
@@ -228,8 +222,7 @@ static struct clk_alpha_pll mmpll6 = {
.clkr.hw.init = &(struct clk_init_data){
.name = "mmpll6",
.parent_data = &(const struct clk_parent_data){
- .fw_name = "xo",
- .name = "xo"
+ .fw_name = "xo"
},
.num_parents = 1,
.ops = &clk_alpha_pll_fixed_fabia_ops,
@@ -257,8 +250,7 @@ static struct clk_alpha_pll mmpll7 = {
.clkr.hw.init = &(struct clk_init_data){
.name = "mmpll7",
.parent_data = &(const struct clk_parent_data){
- .fw_name = "xo",
- .name = "xo"
+ .fw_name = "xo"
},
.num_parents = 1,
.ops = &clk_alpha_pll_fixed_fabia_ops,
@@ -286,8 +278,7 @@ static struct clk_alpha_pll mmpll10 = {
.clkr.hw.init = &(struct clk_init_data){
.name = "mmpll10",
.parent_data = &(const struct clk_parent_data){
- .fw_name = "xo",
- .name = "xo"
+ .fw_name = "xo"
},
.num_parents = 1,
.ops = &clk_alpha_pll_fixed_fabia_ops,
@@ -316,9 +307,9 @@ static const struct parent_map mmss_xo_hdmi_map[] = {
};
static const struct clk_parent_data mmss_xo_hdmi[] = {
- { .fw_name = "xo", .name = "xo" },
- { .fw_name = "hdmipll", .name = "hdmipll" },
- { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+ { .fw_name = "xo" },
+ { .fw_name = "hdmipll" },
+ { .fw_name = "core_bi_pll_test_se" },
};
static const struct parent_map mmss_xo_dsi0pll_dsi1pll_map[] = {
@@ -329,10 +320,10 @@ static const struct parent_map mmss_xo_dsi0pll_dsi1pll_map[] = {
};
static const struct clk_parent_data mmss_xo_dsi0pll_dsi1pll[] = {
- { .fw_name = "xo", .name = "xo" },
- { .fw_name = "dsi0dsi", .name = "dsi0dsi" },
- { .fw_name = "dsi1dsi", .name = "dsi1dsi" },
- { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+ { .fw_name = "xo" },
+ { .fw_name = "dsi0dsi" },
+ { .fw_name = "dsi1dsi" },
+ { .fw_name = "core_bi_pll_test_se" },
};
static const struct parent_map mmss_xo_dsibyte_map[] = {
@@ -343,10 +334,10 @@ static const struct parent_map mmss_xo_dsibyte_map[] = {
};
static const struct clk_parent_data mmss_xo_dsibyte[] = {
- { .fw_name = "xo", .name = "xo" },
- { .fw_name = "dsi0byte", .name = "dsi0byte" },
- { .fw_name = "dsi1byte", .name = "dsi1byte" },
- { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+ { .fw_name = "xo" },
+ { .fw_name = "dsi0byte" },
+ { .fw_name = "dsi1byte" },
+ { .fw_name = "core_bi_pll_test_se" },
};
static const struct parent_map mmss_xo_dp_map[] = {
@@ -357,10 +348,10 @@ static const struct parent_map mmss_xo_dp_map[] = {
};
static const struct clk_parent_data mmss_xo_dp[] = {
- { .fw_name = "xo", .name = "xo" },
- { .fw_name = "dplink", .name = "dplink" },
- { .fw_name = "dpvco", .name = "dpvco" },
- { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+ { .fw_name = "xo" },
+ { .fw_name = "dplink" },
+ { .fw_name = "dpvco" },
+ { .fw_name = "core_bi_pll_test_se" },
};
static const struct parent_map mmss_xo_gpll0_gpll0_div_map[] = {
@@ -371,10 +362,10 @@ static const struct parent_map mmss_xo_gpll0_gpll0_div_map[] = {
};
static const struct clk_parent_data mmss_xo_gpll0_gpll0_div[] = {
- { .fw_name = "xo", .name = "xo" },
- { .fw_name = "gpll0", .name = "gpll0" },
+ { .fw_name = "xo" },
+ { .fw_name = "gpll0" },
{ .hw = &gpll0_div.hw },
- { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+ { .fw_name = "core_bi_pll_test_se" },
};
static const struct parent_map mmss_xo_mmpll0_gpll0_gpll0_div_map[] = {
@@ -386,11 +377,11 @@ static const struct parent_map mmss_xo_mmpll0_gpll0_gpll0_div_map[] = {
};
static const struct clk_parent_data mmss_xo_mmpll0_gpll0_gpll0_div[] = {
- { .fw_name = "xo", .name = "xo" },
+ { .fw_name = "xo" },
{ .hw = &mmpll0_out_even.clkr.hw },
- { .fw_name = "gpll0", .name = "gpll0" },
+ { .fw_name = "gpll0" },
{ .hw = &gpll0_div.hw },
- { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+ { .fw_name = "core_bi_pll_test_se" },
};
static const struct parent_map mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div_map[] = {
@@ -403,12 +394,12 @@ static const struct parent_map mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div_map[] = {
};
static const struct clk_parent_data mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div[] = {
- { .fw_name = "xo", .name = "xo" },
+ { .fw_name = "xo" },
{ .hw = &mmpll0_out_even.clkr.hw },
{ .hw = &mmpll1_out_even.clkr.hw },
- { .fw_name = "gpll0", .name = "gpll0" },
+ { .fw_name = "gpll0" },
{ .hw = &gpll0_div.hw },
- { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+ { .fw_name = "core_bi_pll_test_se" },
};
static const struct parent_map mmss_xo_mmpll0_mmpll5_gpll0_gpll0_div_map[] = {
@@ -421,12 +412,12 @@ static const struct parent_map mmss_xo_mmpll0_mmpll5_gpll0_gpll0_div_map[] = {
};
static const struct clk_parent_data mmss_xo_mmpll0_mmpll5_gpll0_gpll0_div[] = {
- { .fw_name = "xo", .name = "xo" },
+ { .fw_name = "xo" },
{ .hw = &mmpll0_out_even.clkr.hw },
{ .hw = &mmpll5_out_even.clkr.hw },
- { .fw_name = "gpll0", .name = "gpll0" },
+ { .fw_name = "gpll0" },
{ .hw = &gpll0_div.hw },
- { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+ { .fw_name = "core_bi_pll_test_se" },
};
static const struct parent_map mmss_xo_mmpll0_mmpll3_mmpll6_gpll0_gpll0_div_map[] = {
@@ -440,13 +431,13 @@ static const struct parent_map mmss_xo_mmpll0_mmpll3_mmpll6_gpll0_gpll0_div_map[
};
static const struct clk_parent_data mmss_xo_mmpll0_mmpll3_mmpll6_gpll0_gpll0_div[] = {
- { .fw_name = "xo", .name = "xo" },
+ { .fw_name = "xo" },
{ .hw = &mmpll0_out_even.clkr.hw },
{ .hw = &mmpll3_out_even.clkr.hw },
{ .hw = &mmpll6_out_even.clkr.hw },
- { .fw_name = "gpll0", .name = "gpll0" },
+ { .fw_name = "gpll0" },
{ .hw = &gpll0_div.hw },
- { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+ { .fw_name = "core_bi_pll_test_se" },
};
static const struct parent_map mmss_xo_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map[] = {
@@ -460,13 +451,13 @@ static const struct parent_map mmss_xo_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map
};
static const struct clk_parent_data mmss_xo_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div[] = {
- { .fw_name = "xo", .name = "xo" },
+ { .fw_name = "xo" },
{ .hw = &mmpll4_out_even.clkr.hw },
{ .hw = &mmpll7_out_even.clkr.hw },
{ .hw = &mmpll10_out_even.clkr.hw },
- { .fw_name = "gpll0", .name = "gpll0" },
+ { .fw_name = "gpll0" },
{ .hw = &gpll0_div.hw },
- { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+ { .fw_name = "core_bi_pll_test_se" },
};
static const struct parent_map mmss_xo_mmpll0_mmpll7_mmpll10_gpll0_gpll0_div_map[] = {
@@ -480,13 +471,13 @@ static const struct parent_map mmss_xo_mmpll0_mmpll7_mmpll10_gpll0_gpll0_div_map
};
static const struct clk_parent_data mmss_xo_mmpll0_mmpll7_mmpll10_gpll0_gpll0_div[] = {
- { .fw_name = "xo", .name = "xo" },
+ { .fw_name = "xo" },
{ .hw = &mmpll0_out_even.clkr.hw },
{ .hw = &mmpll7_out_even.clkr.hw },
{ .hw = &mmpll10_out_even.clkr.hw },
- { .fw_name = "gpll0", .name = "gpll0" },
+ { .fw_name = "gpll0" },
{ .hw = &gpll0_div.hw },
- { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+ { .fw_name = "core_bi_pll_test_se" },
};
static const struct parent_map mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div_map[] = {
@@ -501,14 +492,14 @@ static const struct parent_map mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_
};
static const struct clk_parent_data mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div[] = {
- { .fw_name = "xo", .name = "xo" },
+ { .fw_name = "xo" },
{ .hw = &mmpll0_out_even.clkr.hw },
{ .hw = &mmpll4_out_even.clkr.hw },
{ .hw = &mmpll7_out_even.clkr.hw },
{ .hw = &mmpll10_out_even.clkr.hw },
- { .fw_name = "gpll0", .name = "gpll0" },
+ { .fw_name = "gpll0" },
{ .hw = &gpll0_div.hw },
- { .fw_name = "core_bi_pll_test_se", .name = "core_bi_pll_test_se" },
+ { .fw_name = "core_bi_pll_test_se" },
};
static struct clk_rcg2 byte0_clk_src = {
@@ -518,7 +509,7 @@ static struct clk_rcg2 byte0_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "byte0_clk_src",
.parent_data = mmss_xo_dsibyte,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(mmss_xo_dsibyte),
.ops = &clk_byte2_ops,
.flags = CLK_SET_RATE_PARENT,
},
@@ -531,7 +522,7 @@ static struct clk_rcg2 byte1_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "byte1_clk_src",
.parent_data = mmss_xo_dsibyte,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(mmss_xo_dsibyte),
.ops = &clk_byte2_ops,
.flags = CLK_SET_RATE_PARENT,
},
@@ -552,7 +543,7 @@ static struct clk_rcg2 cci_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "cci_clk_src",
.parent_data = mmss_xo_mmpll0_mmpll7_mmpll10_gpll0_gpll0_div,
- .num_parents = 7,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll7_mmpll10_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -576,7 +567,7 @@ static struct clk_rcg2 cpp_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "cpp_clk_src",
.parent_data = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
- .num_parents = 8,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -599,7 +590,7 @@ static struct clk_rcg2 csi0_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "csi0_clk_src",
.parent_data = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
- .num_parents = 8,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -612,7 +603,7 @@ static struct clk_rcg2 csi1_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "csi1_clk_src",
.parent_data = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
- .num_parents = 8,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -625,7 +616,7 @@ static struct clk_rcg2 csi2_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "csi2_clk_src",
.parent_data = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
- .num_parents = 8,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -638,7 +629,7 @@ static struct clk_rcg2 csi3_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "csi3_clk_src",
.parent_data = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
- .num_parents = 8,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -660,7 +651,7 @@ static struct clk_rcg2 csiphy_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "csiphy_clk_src",
.parent_data = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
- .num_parents = 8,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -679,7 +670,7 @@ static struct clk_rcg2 csi0phytimer_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "csi0phytimer_clk_src",
.parent_data = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
- .num_parents = 8,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -692,7 +683,7 @@ static struct clk_rcg2 csi1phytimer_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "csi1phytimer_clk_src",
.parent_data = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
- .num_parents = 8,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -705,7 +696,7 @@ static struct clk_rcg2 csi2phytimer_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "csi2phytimer_clk_src",
.parent_data = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
- .num_parents = 8,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -723,7 +714,7 @@ static struct clk_rcg2 dp_aux_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "dp_aux_clk_src",
.parent_data = mmss_xo_gpll0_gpll0_div,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(mmss_xo_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -743,7 +734,7 @@ static struct clk_rcg2 dp_crypto_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "dp_crypto_clk_src",
.parent_data = mmss_xo_dp,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(mmss_xo_dp),
.ops = &clk_rcg2_ops,
},
};
@@ -763,7 +754,7 @@ static struct clk_rcg2 dp_link_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "dp_link_clk_src",
.parent_data = mmss_xo_dp,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(mmss_xo_dp),
.ops = &clk_rcg2_ops,
},
};
@@ -783,7 +774,7 @@ static struct clk_rcg2 dp_pixel_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "dp_pixel_clk_src",
.parent_data = mmss_xo_dp,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(mmss_xo_dp),
.ops = &clk_rcg2_ops,
},
};
@@ -801,7 +792,7 @@ static struct clk_rcg2 esc0_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "esc0_clk_src",
.parent_data = mmss_xo_dsibyte,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(mmss_xo_dsibyte),
.ops = &clk_rcg2_ops,
},
};
@@ -814,7 +805,7 @@ static struct clk_rcg2 esc1_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "esc1_clk_src",
.parent_data = mmss_xo_dsibyte,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(mmss_xo_dsibyte),
.ops = &clk_rcg2_ops,
},
};
@@ -832,7 +823,7 @@ static struct clk_rcg2 extpclk_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "extpclk_clk_src",
.parent_data = mmss_xo_hdmi,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(mmss_xo_hdmi),
.ops = &clk_byte_ops,
.flags = CLK_SET_RATE_PARENT,
},
@@ -855,7 +846,7 @@ static struct clk_rcg2 fd_core_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "fd_core_clk_src",
.parent_data = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
- .num_parents = 8,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -873,7 +864,7 @@ static struct clk_rcg2 hdmi_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "hdmi_clk_src",
.parent_data = mmss_xo_gpll0_gpll0_div,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(mmss_xo_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -894,7 +885,7 @@ static struct clk_rcg2 jpeg0_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "jpeg0_clk_src",
.parent_data = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
- .num_parents = 8,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -916,7 +907,7 @@ static struct clk_rcg2 maxi_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "maxi_clk_src",
.parent_data = mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div,
- .num_parents = 6,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -943,7 +934,7 @@ static struct clk_rcg2 mclk0_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "mclk0_clk_src",
.parent_data = mmss_xo_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
- .num_parents = 7,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -956,7 +947,7 @@ static struct clk_rcg2 mclk1_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "mclk1_clk_src",
.parent_data = mmss_xo_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
- .num_parents = 7,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -969,7 +960,7 @@ static struct clk_rcg2 mclk2_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "mclk2_clk_src",
.parent_data = mmss_xo_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
- .num_parents = 7,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -982,7 +973,7 @@ static struct clk_rcg2 mclk3_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "mclk3_clk_src",
.parent_data = mmss_xo_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
- .num_parents = 7,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -1008,7 +999,7 @@ static struct clk_rcg2 mdp_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "mdp_clk_src",
.parent_data = mmss_xo_mmpll0_mmpll5_gpll0_gpll0_div,
- .num_parents = 6,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll5_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -1026,7 +1017,7 @@ static struct clk_rcg2 vsync_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "vsync_clk_src",
.parent_data = mmss_xo_gpll0_gpll0_div,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(mmss_xo_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -1046,7 +1037,7 @@ static struct clk_rcg2 ahb_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "ahb_clk_src",
.parent_data = mmss_xo_mmpll0_gpll0_gpll0_div,
- .num_parents = 5,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -1069,7 +1060,7 @@ static struct clk_rcg2 axi_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "axi_clk_src",
.parent_data = mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div,
- .num_parents = 6,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll1_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -1082,7 +1073,7 @@ static struct clk_rcg2 pclk0_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "pclk0_clk_src",
.parent_data = mmss_xo_dsi0pll_dsi1pll,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(mmss_xo_dsi0pll_dsi1pll),
.ops = &clk_pixel_ops,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1096,7 +1087,7 @@ static struct clk_rcg2 pclk1_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "pclk1_clk_src",
.parent_data = mmss_xo_dsi0pll_dsi1pll,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(mmss_xo_dsi0pll_dsi1pll),
.ops = &clk_pixel_ops,
.flags = CLK_SET_RATE_PARENT,
},
@@ -1118,7 +1109,7 @@ static struct clk_rcg2 rot_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "rot_clk_src",
.parent_data = mmss_xo_mmpll0_mmpll5_gpll0_gpll0_div,
- .num_parents = 6,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll5_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -1140,7 +1131,7 @@ static struct clk_rcg2 video_core_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "video_core_clk_src",
.parent_data = mmss_xo_mmpll0_mmpll3_mmpll6_gpll0_gpll0_div,
- .num_parents = 7,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll3_mmpll6_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -1153,7 +1144,7 @@ static struct clk_rcg2 video_subcore0_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "video_subcore0_clk_src",
.parent_data = mmss_xo_mmpll0_mmpll3_mmpll6_gpll0_gpll0_div,
- .num_parents = 7,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll3_mmpll6_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -1166,7 +1157,7 @@ static struct clk_rcg2 video_subcore1_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "video_subcore1_clk_src",
.parent_data = mmss_xo_mmpll0_mmpll3_mmpll6_gpll0_gpll0_div,
- .num_parents = 7,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll3_mmpll6_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -1191,7 +1182,7 @@ static struct clk_rcg2 vfe0_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "vfe0_clk_src",
.parent_data = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
- .num_parents = 8,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -1204,7 +1195,7 @@ static struct clk_rcg2 vfe1_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "vfe1_clk_src",
.parent_data = mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
- .num_parents = 8,
+ .num_parents = ARRAY_SIZE(mmss_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
diff --git a/drivers/clk/qcom/mmcc-sdm660.c b/drivers/clk/qcom/mmcc-sdm660.c
index 941993bc610d..bc19a23e13f8 100644
--- a/drivers/clk/qcom/mmcc-sdm660.c
+++ b/drivers/clk/qcom/mmcc-sdm660.c
@@ -483,7 +483,7 @@ static struct clk_rcg2 ahb_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "ahb_clk_src",
.parent_data = mmcc_xo_mmpll0_gpll0_gpll0_div,
- .num_parents = 4,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll0_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -496,7 +496,7 @@ static struct clk_rcg2 byte0_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "byte0_clk_src",
.parent_data = mmcc_xo_dsibyte,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(mmcc_xo_dsibyte),
.ops = &clk_byte2_ops,
.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
},
@@ -510,7 +510,7 @@ static struct clk_rcg2 byte1_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "byte1_clk_src",
.parent_data = mmcc_xo_dsibyte,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(mmcc_xo_dsibyte),
.ops = &clk_byte2_ops,
.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
},
@@ -538,7 +538,7 @@ static struct clk_rcg2 camss_gp0_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "camss_gp0_clk_src",
.parent_data = mmcc_xo_mmpll0_mmpll7_mmpll10_sleep_gpll0_gpll0_div,
- .num_parents = 7,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll0_mmpll7_mmpll10_sleep_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -552,7 +552,7 @@ static struct clk_rcg2 camss_gp1_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "camss_gp1_clk_src",
.parent_data = mmcc_xo_mmpll0_mmpll7_mmpll10_sleep_gpll0_gpll0_div,
- .num_parents = 7,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll0_mmpll7_mmpll10_sleep_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -573,7 +573,7 @@ static struct clk_rcg2 cci_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "cci_clk_src",
.parent_data = mmcc_xo_mmpll0_mmpll7_mmpll10_sleep_gpll0_gpll0_div,
- .num_parents = 7,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll0_mmpll7_mmpll10_sleep_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -597,7 +597,7 @@ static struct clk_rcg2 cpp_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "cpp_clk_src",
.parent_data = mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_mmpll6,
- .num_parents = 7,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_mmpll6),
.ops = &clk_rcg2_ops,
},
};
@@ -620,7 +620,7 @@ static struct clk_rcg2 csi0_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "csi0_clk_src",
.parent_data = mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll8_gpll0_gpll0_div,
- .num_parents = 7,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll8_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -641,7 +641,7 @@ static struct clk_rcg2 csi0phytimer_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "csi0phytimer_clk_src",
.parent_data = mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
- .num_parents = 7,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -655,7 +655,7 @@ static struct clk_rcg2 csi1_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "csi1_clk_src",
.parent_data = mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll8_gpll0_gpll0_div,
- .num_parents = 7,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll8_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -669,7 +669,7 @@ static struct clk_rcg2 csi1phytimer_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "csi1phytimer_clk_src",
.parent_data = mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
- .num_parents = 7,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -683,7 +683,7 @@ static struct clk_rcg2 csi2_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "csi2_clk_src",
.parent_data = mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll8_gpll0_gpll0_div,
- .num_parents = 7,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll8_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -697,7 +697,7 @@ static struct clk_rcg2 csi2phytimer_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "csi2phytimer_clk_src",
.parent_data = mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
- .num_parents = 7,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -711,7 +711,7 @@ static struct clk_rcg2 csi3_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "csi3_clk_src",
.parent_data = mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll8_gpll0_gpll0_div,
- .num_parents = 7,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll8_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -733,7 +733,7 @@ static struct clk_rcg2 csiphy_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "csiphy_clk_src",
.parent_data = mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll8_gpll0_gpll0_div,
- .num_parents = 7,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll8_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -752,7 +752,7 @@ static struct clk_rcg2 dp_aux_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "dp_aux_clk_src",
.parent_data = mmcc_xo_gpll0_gpll0_div,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(mmcc_xo_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -773,7 +773,7 @@ static struct clk_rcg2 dp_crypto_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "dp_crypto_clk_src",
.parent_data = mmcc_xo_dplink_dpvco,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(mmcc_xo_dplink_dpvco),
.ops = &clk_rcg2_ops,
},
};
@@ -793,7 +793,7 @@ static struct clk_rcg2 dp_gtc_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "dp_gtc_clk_src",
.parent_data = mmcc_xo_gpll0_gpll0_div,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(mmcc_xo_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -814,7 +814,7 @@ static struct clk_rcg2 dp_link_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "dp_link_clk_src",
.parent_data = mmcc_xo_dplink_dpvco,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(mmcc_xo_dplink_dpvco),
.ops = &clk_rcg2_ops,
.flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT,
},
@@ -828,7 +828,7 @@ static struct clk_rcg2 dp_pixel_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "dp_pixel_clk_src",
.parent_data = mmcc_xo_dplink_dpvco,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(mmcc_xo_dplink_dpvco),
.ops = &clk_dp_ops,
.flags = CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT,
},
@@ -842,7 +842,7 @@ static struct clk_rcg2 esc0_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "esc0_clk_src",
.parent_data = mmcc_xo_dsibyte,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(mmcc_xo_dsibyte),
.ops = &clk_rcg2_ops,
},
};
@@ -855,7 +855,7 @@ static struct clk_rcg2 esc1_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "esc1_clk_src",
.parent_data = mmcc_xo_dsibyte,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(mmcc_xo_dsibyte),
.ops = &clk_rcg2_ops,
},
};
@@ -878,7 +878,7 @@ static struct clk_rcg2 jpeg0_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "jpeg0_clk_src",
.parent_data = mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
- .num_parents = 7,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -906,7 +906,7 @@ static struct clk_rcg2 mclk0_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "mclk0_clk_src",
.parent_data = mmcc_xo_mmpll4_mmpll7_mmpll10_sleep_gpll0_gpll0_div,
- .num_parents = 7,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll4_mmpll7_mmpll10_sleep_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -920,7 +920,7 @@ static struct clk_rcg2 mclk1_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "mclk1_clk_src",
.parent_data = mmcc_xo_mmpll4_mmpll7_mmpll10_sleep_gpll0_gpll0_div,
- .num_parents = 7,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll4_mmpll7_mmpll10_sleep_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -934,7 +934,7 @@ static struct clk_rcg2 mclk2_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "mclk2_clk_src",
.parent_data = mmcc_xo_mmpll4_mmpll7_mmpll10_sleep_gpll0_gpll0_div,
- .num_parents = 7,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll4_mmpll7_mmpll10_sleep_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -948,7 +948,7 @@ static struct clk_rcg2 mclk3_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "mclk3_clk_src",
.parent_data = mmcc_xo_mmpll4_mmpll7_mmpll10_sleep_gpll0_gpll0_div,
- .num_parents = 7,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll4_mmpll7_mmpll10_sleep_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -974,7 +974,7 @@ static struct clk_rcg2 mdp_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "mdp_clk_src",
.parent_data = mmcc_xo_mmpll0_mmpll5_mmpll7_gpll0_gpll0_div,
- .num_parents = 6,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll0_mmpll5_mmpll7_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -987,7 +987,7 @@ static struct clk_rcg2 pclk0_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "pclk0_clk_src",
.parent_data = mmcc_xo_dsi0pll_dsi1pll,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(mmcc_xo_dsi0pll_dsi1pll),
.ops = &clk_pixel_ops,
.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
},
@@ -1001,7 +1001,7 @@ static struct clk_rcg2 pclk1_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "pclk1_clk_src",
.parent_data = mmcc_xo_dsi0pll_dsi1pll,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(mmcc_xo_dsi0pll_dsi1pll),
.ops = &clk_pixel_ops,
.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE,
},
@@ -1025,7 +1025,7 @@ static struct clk_rcg2 rot_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "rot_clk_src",
.parent_data = mmcc_xo_mmpll0_mmpll5_mmpll7_gpll0_gpll0_div,
- .num_parents = 6,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll0_mmpll5_mmpll7_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -1051,7 +1051,7 @@ static struct clk_rcg2 vfe0_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "vfe0_clk_src",
.parent_data = mmcc_mmpll0_mmpll4_mmpll7_mmpll10_mmpll6_gpll0,
- .num_parents = 7,
+ .num_parents = ARRAY_SIZE(mmcc_mmpll0_mmpll4_mmpll7_mmpll10_mmpll6_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -1065,7 +1065,7 @@ static struct clk_rcg2 vfe1_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "vfe1_clk_src",
.parent_data = mmcc_mmpll0_mmpll4_mmpll7_mmpll10_mmpll6_gpll0,
- .num_parents = 7,
+ .num_parents = ARRAY_SIZE(mmcc_mmpll0_mmpll4_mmpll7_mmpll10_mmpll6_gpll0),
.ops = &clk_rcg2_ops,
},
};
@@ -1089,7 +1089,7 @@ static struct clk_rcg2 video_core_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "video_core_clk_src",
.parent_data = mmcc_xo_mmpll0_mmpll8_mmpll3_mmpll6_gpll0_mmpll7,
- .num_parents = 7,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll0_mmpll8_mmpll3_mmpll6_gpll0_mmpll7),
.ops = &clk_rcg2_ops,
.flags = CLK_IS_CRITICAL,
},
@@ -1104,7 +1104,7 @@ static struct clk_rcg2 vsync_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "vsync_clk_src",
.parent_data = mmcc_xo_gpll0_gpll0_div,
- .num_parents = 3,
+ .num_parents = ARRAY_SIZE(mmcc_xo_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -2055,7 +2055,7 @@ static struct clk_rcg2 axi_clk_src = {
.clkr.hw.init = &(struct clk_init_data){
.name = "axi_clk_src",
.parent_data = mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div,
- .num_parents = 7,
+ .num_parents = ARRAY_SIZE(mmcc_xo_mmpll0_mmpll4_mmpll7_mmpll10_gpll0_gpll0_div),
.ops = &clk_rcg2_ops,
},
};
@@ -2560,6 +2560,8 @@ static struct clk_branch video_subcore0_clk = {
static struct gdsc venus_gdsc = {
.gdscr = 0x1024,
+ .cxcs = (unsigned int[]){ 0x1028, 0x1034, 0x1048 },
+ .cxc_count = 3,
.pd = {
.name = "venus",
},
@@ -2573,6 +2575,7 @@ static struct gdsc venus_core0_gdsc = {
},
.parent = &venus_gdsc.pd,
.pwrsts = PWRSTS_OFF_ON,
+ .flags = HW_CTRL,
};
static struct gdsc mdss_gdsc = {
diff --git a/drivers/clk/qcom/videocc-sm8250.c b/drivers/clk/qcom/videocc-sm8250.c
index 7b435a1c2c4b..8617454e4a77 100644
--- a/drivers/clk/qcom/videocc-sm8250.c
+++ b/drivers/clk/qcom/videocc-sm8250.c
@@ -6,6 +6,7 @@
#include <linux/clk-provider.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <dt-bindings/clock/qcom,videocc-sm8250.h>
@@ -364,13 +365,31 @@ static const struct of_device_id video_cc_sm8250_match_table[] = {
};
MODULE_DEVICE_TABLE(of, video_cc_sm8250_match_table);
+static void video_cc_sm8250_pm_runtime_disable(void *data)
+{
+ pm_runtime_disable(data);
+}
+
static int video_cc_sm8250_probe(struct platform_device *pdev)
{
struct regmap *regmap;
+ int ret;
+
+ pm_runtime_enable(&pdev->dev);
+
+ ret = devm_add_action_or_reset(&pdev->dev, video_cc_sm8250_pm_runtime_disable, &pdev->dev);
+ if (ret)
+ return ret;
+
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret)
+ return ret;
regmap = qcom_cc_map(pdev, &video_cc_sm8250_desc);
- if (IS_ERR(regmap))
+ if (IS_ERR(regmap)) {
+ pm_runtime_put(&pdev->dev);
return PTR_ERR(regmap);
+ }
clk_lucid_pll_configure(&video_pll0, regmap, &video_pll0_config);
clk_lucid_pll_configure(&video_pll1, regmap, &video_pll1_config);
@@ -379,7 +398,11 @@ static int video_cc_sm8250_probe(struct platform_device *pdev)
regmap_update_bits(regmap, 0xe58, BIT(0), BIT(0));
regmap_update_bits(regmap, 0xeec, BIT(0), BIT(0));
- return qcom_cc_really_probe(pdev, &video_cc_sm8250_desc, regmap);
+ ret = qcom_cc_really_probe(pdev, &video_cc_sm8250_desc, regmap);
+
+ pm_runtime_put(&pdev->dev);
+
+ return ret;
}
static struct platform_driver video_cc_sm8250_driver = {
diff --git a/drivers/clk/renesas/r8a7795-cpg-mssr.c b/drivers/clk/renesas/r8a7795-cpg-mssr.c
index c32d2c678046..d6b1d0148bfd 100644
--- a/drivers/clk/renesas/r8a7795-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7795-cpg-mssr.c
@@ -229,6 +229,7 @@ static struct mssr_mod_clk r8a7795_mod_clks[] __initdata = {
DEF_MOD("lvds", 727, R8A7795_CLK_S0D4),
DEF_MOD("hdmi1", 728, R8A7795_CLK_HDMI),
DEF_MOD("hdmi0", 729, R8A7795_CLK_HDMI),
+ DEF_MOD("mlp", 802, R8A7795_CLK_S2D1),
DEF_MOD("vin7", 804, R8A7795_CLK_S0D2),
DEF_MOD("vin6", 805, R8A7795_CLK_S0D2),
DEF_MOD("vin5", 806, R8A7795_CLK_S0D2),
diff --git a/drivers/clk/renesas/r8a7796-cpg-mssr.c b/drivers/clk/renesas/r8a7796-cpg-mssr.c
index 41593c126faf..9c22977e42c2 100644
--- a/drivers/clk/renesas/r8a7796-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a7796-cpg-mssr.c
@@ -207,6 +207,7 @@ static struct mssr_mod_clk r8a7796_mod_clks[] __initdata = {
DEF_MOD("du0", 724, R8A7796_CLK_S2D1),
DEF_MOD("lvds", 727, R8A7796_CLK_S2D1),
DEF_MOD("hdmi0", 729, R8A7796_CLK_HDMI),
+ DEF_MOD("mlp", 802, R8A7796_CLK_S2D1),
DEF_MOD("vin7", 804, R8A7796_CLK_S0D2),
DEF_MOD("vin6", 805, R8A7796_CLK_S0D2),
DEF_MOD("vin5", 806, R8A7796_CLK_S0D2),
diff --git a/drivers/clk/renesas/r8a77965-cpg-mssr.c b/drivers/clk/renesas/r8a77965-cpg-mssr.c
index bc1be8bcbbe4..7eee45a31b2a 100644
--- a/drivers/clk/renesas/r8a77965-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a77965-cpg-mssr.c
@@ -205,6 +205,7 @@ static const struct mssr_mod_clk r8a77965_mod_clks[] __initconst = {
DEF_MOD("lvds", 727, R8A77965_CLK_S2D1),
DEF_MOD("hdmi0", 729, R8A77965_CLK_HDMI),
+ DEF_MOD("mlp", 802, R8A77965_CLK_S2D1),
DEF_MOD("vin7", 804, R8A77965_CLK_S0D2),
DEF_MOD("vin6", 805, R8A77965_CLK_S0D2),
DEF_MOD("vin5", 806, R8A77965_CLK_S0D2),
diff --git a/drivers/clk/renesas/r8a779a0-cpg-mssr.c b/drivers/clk/renesas/r8a779a0-cpg-mssr.c
index f16d125ca009..fbd7454f2beb 100644
--- a/drivers/clk/renesas/r8a779a0-cpg-mssr.c
+++ b/drivers/clk/renesas/r8a779a0-cpg-mssr.c
@@ -33,9 +33,13 @@ enum rcar_r8a779a0_clk_types {
CLK_TYPE_R8A779A0_PLL1,
CLK_TYPE_R8A779A0_PLL2X_3X, /* PLL[23][01] */
CLK_TYPE_R8A779A0_PLL5,
+ CLK_TYPE_R8A779A0_Z,
CLK_TYPE_R8A779A0_SD,
CLK_TYPE_R8A779A0_MDSEL, /* Select parent/divider using mode pin */
CLK_TYPE_R8A779A0_OSC, /* OSC EXTAL predivider and fixed divider */
+ CLK_TYPE_R8A779A0_RPCSRC,
+ CLK_TYPE_R8A779A0_RPC,
+ CLK_TYPE_R8A779A0_RPCD2,
};
struct rcar_r8a779a0_cpg_pll_config {
@@ -84,6 +88,10 @@ enum clk_ids {
DEF_BASE(_name, _id, CLK_TYPE_R8A779A0_PLL2X_3X, CLK_MAIN, \
.offset = _offset)
+#define DEF_Z(_name, _id, _parent, _div, _offset) \
+ DEF_BASE(_name, _id, CLK_TYPE_R8A779A0_Z, _parent, .div = _div, \
+ .offset = _offset)
+
#define DEF_SD(_name, _id, _parent, _offset) \
DEF_BASE(_name, _id, CLK_TYPE_R8A779A0_SD, _parent, .offset = _offset)
@@ -120,8 +128,14 @@ static const struct cpg_core_clk r8a779a0_core_clks[] __initconst = {
DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 4, 1),
DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL5_DIV4, 1, 1),
DEF_RATE(".oco", CLK_OCO, 32768),
+ DEF_BASE(".rpcsrc", CLK_RPCSRC, CLK_TYPE_R8A779A0_RPCSRC, CLK_PLL5),
+ DEF_BASE("rpc", R8A779A0_CLK_RPC, CLK_TYPE_R8A779A0_RPC, CLK_RPCSRC),
+ DEF_BASE("rpcd2", R8A779A0_CLK_RPCD2, CLK_TYPE_R8A779A0_RPCD2,
+ R8A779A0_CLK_RPC),
/* Core Clock Outputs */
+ DEF_Z("z0", R8A779A0_CLK_Z0, CLK_PLL20, 2, 0),
+ DEF_Z("z1", R8A779A0_CLK_Z1, CLK_PLL21, 2, 8),
DEF_FIXED("zx", R8A779A0_CLK_ZX, CLK_PLL20_DIV2, 2, 1),
DEF_FIXED("s1d1", R8A779A0_CLK_S1D1, CLK_S1, 1, 1),
DEF_FIXED("s1d2", R8A779A0_CLK_S1D2, CLK_S1, 2, 1),
@@ -193,6 +207,7 @@ static const struct mssr_mod_clk r8a779a0_mod_clks[] __initconst = {
DEF_MOD("msi3", 621, R8A779A0_CLK_MSO),
DEF_MOD("msi4", 622, R8A779A0_CLK_MSO),
DEF_MOD("msi5", 623, R8A779A0_CLK_MSO),
+ DEF_MOD("rpc-if", 629, R8A779A0_CLK_RPCD2),
DEF_MOD("scif0", 702, R8A779A0_CLK_S1D8),
DEF_MOD("scif1", 703, R8A779A0_CLK_S1D8),
DEF_MOD("scif3", 704, R8A779A0_CLK_S1D8),
@@ -205,6 +220,7 @@ static const struct mssr_mod_clk r8a779a0_mod_clks[] __initconst = {
DEF_MOD("tmu2", 715, R8A779A0_CLK_S1D4),
DEF_MOD("tmu3", 716, R8A779A0_CLK_S1D4),
DEF_MOD("tmu4", 717, R8A779A0_CLK_S1D4),
+ DEF_MOD("tpu0", 718, R8A779A0_CLK_S1D8),
DEF_MOD("vin00", 730, R8A779A0_CLK_S1D1),
DEF_MOD("vin01", 731, R8A779A0_CLK_S1D1),
DEF_MOD("vin02", 800, R8A779A0_CLK_S1D1),
@@ -259,6 +275,162 @@ static const struct rcar_r8a779a0_cpg_pll_config *cpg_pll_config __initdata;
static unsigned int cpg_clk_extalr __initdata;
static u32 cpg_mode __initdata;
+/*
+ * Z0 Clock & Z1 Clock
+ */
+#define CPG_FRQCRB 0x00000804
+#define CPG_FRQCRB_KICK BIT(31)
+#define CPG_FRQCRC 0x00000808
+
+struct cpg_z_clk {
+ struct clk_hw hw;
+ void __iomem *reg;
+ void __iomem *kick_reg;
+ unsigned long max_rate; /* Maximum rate for normal mode */
+ unsigned int fixed_div;
+ u32 mask;
+};
+
+#define to_z_clk(_hw) container_of(_hw, struct cpg_z_clk, hw)
+
+static unsigned long cpg_z_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct cpg_z_clk *zclk = to_z_clk(hw);
+ unsigned int mult;
+ u32 val;
+
+ val = readl(zclk->reg) & zclk->mask;
+ mult = 32 - (val >> __ffs(zclk->mask));
+
+ return DIV_ROUND_CLOSEST_ULL((u64)parent_rate * mult,
+ 32 * zclk->fixed_div);
+}
+
+static int cpg_z_clk_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ struct cpg_z_clk *zclk = to_z_clk(hw);
+ unsigned int min_mult, max_mult, mult;
+ unsigned long rate, prate;
+
+ rate = min(req->rate, req->max_rate);
+ if (rate <= zclk->max_rate) {
+ /* Set parent rate to initial value for normal modes */
+ prate = zclk->max_rate;
+ } else {
+ /* Set increased parent rate for boost modes */
+ prate = rate;
+ }
+ req->best_parent_rate = clk_hw_round_rate(clk_hw_get_parent(hw),
+ prate * zclk->fixed_div);
+
+ prate = req->best_parent_rate / zclk->fixed_div;
+ min_mult = max(div64_ul(req->min_rate * 32ULL, prate), 1ULL);
+ max_mult = min(div64_ul(req->max_rate * 32ULL, prate), 32ULL);
+ if (max_mult < min_mult)
+ return -EINVAL;
+
+ mult = DIV_ROUND_CLOSEST_ULL(rate * 32ULL, prate);
+ mult = clamp(mult, min_mult, max_mult);
+
+ req->rate = DIV_ROUND_CLOSEST_ULL((u64)prate * mult, 32);
+ return 0;
+}
+
+static int cpg_z_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct cpg_z_clk *zclk = to_z_clk(hw);
+ unsigned int mult;
+ unsigned int i;
+
+ mult = DIV64_U64_ROUND_CLOSEST(rate * 32ULL * zclk->fixed_div,
+ parent_rate);
+ mult = clamp(mult, 1U, 32U);
+
+ if (readl(zclk->kick_reg) & CPG_FRQCRB_KICK)
+ return -EBUSY;
+
+ cpg_reg_modify(zclk->reg, zclk->mask, (32 - mult) << __ffs(zclk->mask));
+
+ /*
+ * Set KICK bit in FRQCRB to update hardware setting and wait for
+ * clock change completion.
+ */
+ cpg_reg_modify(zclk->kick_reg, 0, CPG_FRQCRB_KICK);
+
+ /*
+ * Note: There is no HW information about the worst case latency.
+ *
+ * Using experimental measurements, it seems that no more than
+ * ~10 iterations are needed, independently of the CPU rate.
+ * Since this value might be dependent on external xtal rate, pll1
+ * rate or even the other emulation clocks rate, use 1000 as a
+ * "super" safe value.
+ */
+ for (i = 1000; i; i--) {
+ if (!(readl(zclk->kick_reg) & CPG_FRQCRB_KICK))
+ return 0;
+
+ cpu_relax();
+ }
+
+ return -ETIMEDOUT;
+}
+
+static const struct clk_ops cpg_z_clk_ops = {
+ .recalc_rate = cpg_z_clk_recalc_rate,
+ .determine_rate = cpg_z_clk_determine_rate,
+ .set_rate = cpg_z_clk_set_rate,
+};
+
+static struct clk * __init cpg_z_clk_register(const char *name,
+ const char *parent_name,
+ void __iomem *reg,
+ unsigned int div,
+ unsigned int offset)
+{
+ struct clk_init_data init = {};
+ struct cpg_z_clk *zclk;
+ struct clk *clk;
+
+ zclk = kzalloc(sizeof(*zclk), GFP_KERNEL);
+ if (!zclk)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &cpg_z_clk_ops;
+ init.flags = CLK_SET_RATE_PARENT;
+ init.parent_names = &parent_name;
+ init.num_parents = 1;
+
+ zclk->reg = reg + CPG_FRQCRC;
+ zclk->kick_reg = reg + CPG_FRQCRB;
+ zclk->hw.init = &init;
+ zclk->mask = GENMASK(offset + 4, offset);
+ zclk->fixed_div = div; /* PLLVCO x 1/div x SYS-CPU divider */
+
+ clk = clk_register(NULL, &zclk->hw);
+ if (IS_ERR(clk)) {
+ kfree(zclk);
+ return clk;
+ }
+
+ zclk->max_rate = clk_hw_get_rate(clk_hw_get_parent(&zclk->hw)) /
+ zclk->fixed_div;
+ return clk;
+}
+
+/*
+ * RPC Clocks
+ */
+#define CPG_RPCCKCR 0x874
+
+static const struct clk_div_table cpg_rpcsrc_div_table[] = {
+ { 0, 4 }, { 1, 6 }, { 2, 5 }, { 3, 6 }, { 0, 0 },
+};
+
static struct clk * __init rcar_r8a779a0_cpg_clk_register(struct device *dev,
const struct cpg_core_clk *core, const struct cpg_mssr_info *info,
struct clk **clks, void __iomem *base,
@@ -293,6 +465,10 @@ static struct clk * __init rcar_r8a779a0_cpg_clk_register(struct device *dev,
div = cpg_pll_config->pll5_div;
break;
+ case CLK_TYPE_R8A779A0_Z:
+ return cpg_z_clk_register(core->name, __clk_get_name(parent),
+ base, core->div, core->offset);
+
case CLK_TYPE_R8A779A0_SD:
return cpg_sd_clk_register(core->name, base, core->offset,
__clk_get_name(parent), notifiers,
@@ -322,6 +498,21 @@ static struct clk * __init rcar_r8a779a0_cpg_clk_register(struct device *dev,
div = cpg_pll_config->osc_prediv * core->div;
break;
+ case CLK_TYPE_R8A779A0_RPCSRC:
+ return clk_register_divider_table(NULL, core->name,
+ __clk_get_name(parent), 0,
+ base + CPG_RPCCKCR, 3, 2, 0,
+ cpg_rpcsrc_div_table,
+ &cpg_lock);
+
+ case CLK_TYPE_R8A779A0_RPC:
+ return cpg_rpc_clk_register(core->name, base + CPG_RPCCKCR,
+ __clk_get_name(parent), notifiers);
+
+ case CLK_TYPE_R8A779A0_RPCD2:
+ return cpg_rpcd2_clk_register(core->name, base + CPG_RPCCKCR,
+ __clk_get_name(parent));
+
default:
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/clk/renesas/r9a07g044-cpg.c b/drivers/clk/renesas/r9a07g044-cpg.c
index 1490446985e2..47c16265fca9 100644
--- a/drivers/clk/renesas/r9a07g044-cpg.c
+++ b/drivers/clk/renesas/r9a07g044-cpg.c
@@ -29,15 +29,27 @@ enum clk_ids {
CLK_PLL2_DIV16,
CLK_PLL2_DIV20,
CLK_PLL3,
+ CLK_PLL3_400,
+ CLK_PLL3_533,
CLK_PLL3_DIV2,
CLK_PLL3_DIV2_4,
CLK_PLL3_DIV2_4_2,
CLK_PLL3_DIV4,
+ CLK_SEL_PLL3_3,
+ CLK_DIV_PLL3_C,
CLK_PLL4,
CLK_PLL5,
- CLK_PLL5_DIV2,
+ CLK_PLL5_FOUT3,
+ CLK_PLL5_250,
CLK_PLL6,
+ CLK_PLL6_250,
CLK_P1_DIV2,
+ CLK_PLL2_800,
+ CLK_PLL2_SDHI_533,
+ CLK_PLL2_SDHI_400,
+ CLK_PLL2_SDHI_266,
+ CLK_SD0_DIV4,
+ CLK_SD1_DIV4,
/* Module Clocks */
MOD_CLK_BASE,
@@ -53,6 +65,11 @@ static const struct clk_div_table dtable_1_32[] = {
{0, 0},
};
+/* Mux clock tables */
+static const char * const sel_pll3_3[] = { ".pll3_533", ".pll3_400" };
+static const char * const sel_pll6_2[] = { ".pll6_250", ".pll5_250" };
+static const char * const sel_shdi[] = { ".clk_533", ".clk_400", ".clk_266" };
+
static const struct cpg_core_clk r9a07g044_core_clks[] __initconst = {
/* External Clock Inputs */
DEF_INPUT("extal", CLK_EXTAL),
@@ -63,8 +80,20 @@ static const struct cpg_core_clk r9a07g044_core_clks[] __initconst = {
DEF_SAMPLL(".pll1", CLK_PLL1, CLK_EXTAL, PLL146_CONF(0)),
DEF_FIXED(".pll2", CLK_PLL2, CLK_EXTAL, 133, 2),
DEF_FIXED(".pll3", CLK_PLL3, CLK_EXTAL, 133, 2),
+ DEF_FIXED(".pll3_400", CLK_PLL3_400, CLK_PLL3, 1, 4),
+ DEF_FIXED(".pll3_533", CLK_PLL3_533, CLK_PLL3, 1, 3),
+
+ DEF_FIXED(".pll5", CLK_PLL5, CLK_EXTAL, 125, 1),
+ DEF_FIXED(".pll5_fout3", CLK_PLL5_FOUT3, CLK_PLL5, 1, 6),
+
+ DEF_FIXED(".pll6", CLK_PLL6, CLK_EXTAL, 125, 6),
DEF_FIXED(".pll2_div2", CLK_PLL2_DIV2, CLK_PLL2, 1, 2),
+ DEF_FIXED(".clk_800", CLK_PLL2_800, CLK_PLL2, 1, 2),
+ DEF_FIXED(".clk_533", CLK_PLL2_SDHI_533, CLK_PLL2, 1, 3),
+ DEF_FIXED(".clk_400", CLK_PLL2_SDHI_400, CLK_PLL2_800, 1, 2),
+ DEF_FIXED(".clk_266", CLK_PLL2_SDHI_266, CLK_PLL2_SDHI_533, 1, 2),
+
DEF_FIXED(".pll2_div16", CLK_PLL2_DIV16, CLK_PLL2, 1, 16),
DEF_FIXED(".pll2_div20", CLK_PLL2_DIV20, CLK_PLL2, 1, 20),
@@ -72,6 +101,13 @@ static const struct cpg_core_clk r9a07g044_core_clks[] __initconst = {
DEF_FIXED(".pll3_div2_4", CLK_PLL3_DIV2_4, CLK_PLL3_DIV2, 1, 4),
DEF_FIXED(".pll3_div2_4_2", CLK_PLL3_DIV2_4_2, CLK_PLL3_DIV2_4, 1, 2),
DEF_FIXED(".pll3_div4", CLK_PLL3_DIV4, CLK_PLL3, 1, 4),
+ DEF_MUX(".sel_pll3_3", CLK_SEL_PLL3_3, SEL_PLL3_3,
+ sel_pll3_3, ARRAY_SIZE(sel_pll3_3), 0, CLK_MUX_READ_ONLY),
+ DEF_DIV("divpl3c", CLK_DIV_PLL3_C, CLK_SEL_PLL3_3,
+ DIVPL3C, dtable_1_32, CLK_DIVIDER_HIWORD_MASK),
+
+ DEF_FIXED(".pll5_250", CLK_PLL5_250, CLK_PLL5_FOUT3, 1, 2),
+ DEF_FIXED(".pll6_250", CLK_PLL6_250, CLK_PLL6, 1, 2),
/* Core output clk */
DEF_FIXED("I", R9A07G044_CLK_I, CLK_PLL1, 1, 1),
@@ -84,6 +120,18 @@ static const struct cpg_core_clk r9a07g044_core_clks[] __initconst = {
DEF_FIXED("P1_DIV2", CLK_P1_DIV2, R9A07G044_CLK_P1, 1, 2),
DEF_DIV("P2", R9A07G044_CLK_P2, CLK_PLL3_DIV2_4_2,
DIVPL3A, dtable_1_32, CLK_DIVIDER_HIWORD_MASK),
+ DEF_FIXED("M0", R9A07G044_CLK_M0, CLK_PLL3_DIV2_4, 1, 1),
+ DEF_FIXED("ZT", R9A07G044_CLK_ZT, CLK_PLL3_DIV2_4_2, 1, 1),
+ DEF_MUX("HP", R9A07G044_CLK_HP, SEL_PLL6_2,
+ sel_pll6_2, ARRAY_SIZE(sel_pll6_2), 0, CLK_MUX_HIWORD_MASK),
+ DEF_FIXED("SPI0", R9A07G044_CLK_SPI0, CLK_DIV_PLL3_C, 1, 2),
+ DEF_FIXED("SPI1", R9A07G044_CLK_SPI1, CLK_DIV_PLL3_C, 1, 4),
+ DEF_SD_MUX("SD0", R9A07G044_CLK_SD0, SEL_SDHI0,
+ sel_shdi, ARRAY_SIZE(sel_shdi)),
+ DEF_SD_MUX("SD1", R9A07G044_CLK_SD1, SEL_SDHI1,
+ sel_shdi, ARRAY_SIZE(sel_shdi)),
+ DEF_FIXED("SD0_DIV4", CLK_SD0_DIV4, R9A07G044_CLK_SD0, 1, 4),
+ DEF_FIXED("SD1_DIV4", CLK_SD1_DIV4, R9A07G044_CLK_SD1, 1, 4),
};
static struct rzg2l_mod_clk r9a07g044_mod_clks[] = {
@@ -97,6 +145,26 @@ static struct rzg2l_mod_clk r9a07g044_mod_clks[] = {
0x52c, 0),
DEF_MOD("dmac_pclk", R9A07G044_DMAC_PCLK, CLK_P1_DIV2,
0x52c, 1),
+ DEF_MOD("spi_clk2", R9A07G044_SPI_CLK2, R9A07G044_CLK_SPI1,
+ 0x550, 0),
+ DEF_MOD("spi_clk", R9A07G044_SPI_CLK, R9A07G044_CLK_SPI0,
+ 0x550, 1),
+ DEF_MOD("sdhi0_imclk", R9A07G044_SDHI0_IMCLK, CLK_SD0_DIV4,
+ 0x554, 0),
+ DEF_MOD("sdhi0_imclk2", R9A07G044_SDHI0_IMCLK2, CLK_SD0_DIV4,
+ 0x554, 1),
+ DEF_MOD("sdhi0_clk_hs", R9A07G044_SDHI0_CLK_HS, R9A07G044_CLK_SD0,
+ 0x554, 2),
+ DEF_MOD("sdhi0_aclk", R9A07G044_SDHI0_ACLK, R9A07G044_CLK_P1,
+ 0x554, 3),
+ DEF_MOD("sdhi1_imclk", R9A07G044_SDHI1_IMCLK, CLK_SD1_DIV4,
+ 0x554, 4),
+ DEF_MOD("sdhi1_imclk2", R9A07G044_SDHI1_IMCLK2, CLK_SD1_DIV4,
+ 0x554, 5),
+ DEF_MOD("sdhi1_clk_hs", R9A07G044_SDHI1_CLK_HS, R9A07G044_CLK_SD1,
+ 0x554, 6),
+ DEF_MOD("sdhi1_aclk", R9A07G044_SDHI1_ACLK, R9A07G044_CLK_P1,
+ 0x554, 7),
DEF_MOD("ssi0_pclk", R9A07G044_SSI0_PCLK2, R9A07G044_CLK_P0,
0x570, 0),
DEF_MOD("ssi0_sfr", R9A07G044_SSI0_PCLK_SFR, R9A07G044_CLK_P0,
@@ -121,6 +189,14 @@ static struct rzg2l_mod_clk r9a07g044_mod_clks[] = {
0x578, 2),
DEF_MOD("usb_pclk", R9A07G044_USB_PCLK, R9A07G044_CLK_P1,
0x578, 3),
+ DEF_COUPLED("eth0_axi", R9A07G044_ETH0_CLK_AXI, R9A07G044_CLK_M0,
+ 0x57c, 0),
+ DEF_COUPLED("eth0_chi", R9A07G044_ETH0_CLK_CHI, R9A07G044_CLK_ZT,
+ 0x57c, 0),
+ DEF_COUPLED("eth1_axi", R9A07G044_ETH1_CLK_AXI, R9A07G044_CLK_M0,
+ 0x57c, 1),
+ DEF_COUPLED("eth1_chi", R9A07G044_ETH1_CLK_CHI, R9A07G044_CLK_ZT,
+ 0x57c, 1),
DEF_MOD("i2c0", R9A07G044_I2C0_PCLK, R9A07G044_CLK_P0,
0x580, 0),
DEF_MOD("i2c1", R9A07G044_I2C1_PCLK, R9A07G044_CLK_P0,
@@ -157,6 +233,9 @@ static struct rzg2l_reset r9a07g044_resets[] = {
DEF_RST(R9A07G044_IA55_RESETN, 0x818, 0),
DEF_RST(R9A07G044_DMAC_ARESETN, 0x82c, 0),
DEF_RST(R9A07G044_DMAC_RST_ASYNC, 0x82c, 1),
+ DEF_RST(R9A07G044_SPI_RST, 0x850, 0),
+ DEF_RST(R9A07G044_SDHI0_IXRST, 0x854, 0),
+ DEF_RST(R9A07G044_SDHI1_IXRST, 0x854, 1),
DEF_RST(R9A07G044_SSI0_RST_M2_REG, 0x870, 0),
DEF_RST(R9A07G044_SSI1_RST_M2_REG, 0x870, 1),
DEF_RST(R9A07G044_SSI2_RST_M2_REG, 0x870, 2),
@@ -165,6 +244,8 @@ static struct rzg2l_reset r9a07g044_resets[] = {
DEF_RST(R9A07G044_USB_U2H1_HRESETN, 0x878, 1),
DEF_RST(R9A07G044_USB_U2P_EXL_SYSRST, 0x878, 2),
DEF_RST(R9A07G044_USB_PRESETN, 0x878, 3),
+ DEF_RST(R9A07G044_ETH0_RST_HW_N, 0x87c, 0),
+ DEF_RST(R9A07G044_ETH1_RST_HW_N, 0x87c, 1),
DEF_RST(R9A07G044_I2C0_MRST, 0x880, 0),
DEF_RST(R9A07G044_I2C1_MRST, 0x880, 1),
DEF_RST(R9A07G044_I2C2_MRST, 0x880, 2),
diff --git a/drivers/clk/renesas/rcar-cpg-lib.c b/drivers/clk/renesas/rcar-cpg-lib.c
index 5678768ee1f2..e93f0011eb07 100644
--- a/drivers/clk/renesas/rcar-cpg-lib.c
+++ b/drivers/clk/renesas/rcar-cpg-lib.c
@@ -267,4 +267,87 @@ free_clock:
return clk;
}
+struct rpc_clock {
+ struct clk_divider div;
+ struct clk_gate gate;
+ /*
+ * One notifier covers both RPC and RPCD2 clocks as they are both
+ * controlled by the same RPCCKCR register...
+ */
+ struct cpg_simple_notifier csn;
+};
+
+static const struct clk_div_table cpg_rpc_div_table[] = {
+ { 1, 2 }, { 3, 4 }, { 5, 6 }, { 7, 8 }, { 0, 0 },
+};
+
+struct clk * __init cpg_rpc_clk_register(const char *name,
+ void __iomem *rpcckcr, const char *parent_name,
+ struct raw_notifier_head *notifiers)
+{
+ struct rpc_clock *rpc;
+ struct clk *clk;
+
+ rpc = kzalloc(sizeof(*rpc), GFP_KERNEL);
+ if (!rpc)
+ return ERR_PTR(-ENOMEM);
+
+ rpc->div.reg = rpcckcr;
+ rpc->div.width = 3;
+ rpc->div.table = cpg_rpc_div_table;
+ rpc->div.lock = &cpg_lock;
+
+ rpc->gate.reg = rpcckcr;
+ rpc->gate.bit_idx = 8;
+ rpc->gate.flags = CLK_GATE_SET_TO_DISABLE;
+ rpc->gate.lock = &cpg_lock;
+
+ rpc->csn.reg = rpcckcr;
+
+ clk = clk_register_composite(NULL, name, &parent_name, 1, NULL, NULL,
+ &rpc->div.hw, &clk_divider_ops,
+ &rpc->gate.hw, &clk_gate_ops,
+ CLK_SET_RATE_PARENT);
+ if (IS_ERR(clk)) {
+ kfree(rpc);
+ return clk;
+ }
+
+ cpg_simple_notifier_register(notifiers, &rpc->csn);
+ return clk;
+}
+
+struct rpcd2_clock {
+ struct clk_fixed_factor fixed;
+ struct clk_gate gate;
+};
+
+struct clk * __init cpg_rpcd2_clk_register(const char *name,
+ void __iomem *rpcckcr,
+ const char *parent_name)
+{
+ struct rpcd2_clock *rpcd2;
+ struct clk *clk;
+
+ rpcd2 = kzalloc(sizeof(*rpcd2), GFP_KERNEL);
+ if (!rpcd2)
+ return ERR_PTR(-ENOMEM);
+
+ rpcd2->fixed.mult = 1;
+ rpcd2->fixed.div = 2;
+
+ rpcd2->gate.reg = rpcckcr;
+ rpcd2->gate.bit_idx = 9;
+ rpcd2->gate.flags = CLK_GATE_SET_TO_DISABLE;
+ rpcd2->gate.lock = &cpg_lock;
+
+ clk = clk_register_composite(NULL, name, &parent_name, 1, NULL, NULL,
+ &rpcd2->fixed.hw, &clk_fixed_factor_ops,
+ &rpcd2->gate.hw, &clk_gate_ops,
+ CLK_SET_RATE_PARENT);
+ if (IS_ERR(clk))
+ kfree(rpcd2);
+
+ return clk;
+}
diff --git a/drivers/clk/renesas/rcar-cpg-lib.h b/drivers/clk/renesas/rcar-cpg-lib.h
index d00c91b116ca..35c0217c2f8b 100644
--- a/drivers/clk/renesas/rcar-cpg-lib.h
+++ b/drivers/clk/renesas/rcar-cpg-lib.h
@@ -30,4 +30,11 @@ struct clk * __init cpg_sd_clk_register(const char *name,
void __iomem *base, unsigned int offset, const char *parent_name,
struct raw_notifier_head *notifiers, bool skip_first);
+struct clk * __init cpg_rpc_clk_register(const char *name,
+ void __iomem *rpcckcr, const char *parent_name,
+ struct raw_notifier_head *notifiers);
+
+struct clk * __init cpg_rpcd2_clk_register(const char *name,
+ void __iomem *rpcckcr,
+ const char *parent_name);
#endif
diff --git a/drivers/clk/renesas/rcar-gen3-cpg.c b/drivers/clk/renesas/rcar-gen3-cpg.c
index 558191c99b48..741f6e74bbcf 100644
--- a/drivers/clk/renesas/rcar-gen3-cpg.c
+++ b/drivers/clk/renesas/rcar-gen3-cpg.c
@@ -301,95 +301,10 @@ static struct clk * __init cpg_z_clk_register(const char *name,
return clk;
}
-struct rpc_clock {
- struct clk_divider div;
- struct clk_gate gate;
- /*
- * One notifier covers both RPC and RPCD2 clocks as they are both
- * controlled by the same RPCCKCR register...
- */
- struct cpg_simple_notifier csn;
-};
-
static const struct clk_div_table cpg_rpcsrc_div_table[] = {
{ 2, 5 }, { 3, 6 }, { 0, 0 },
};
-static const struct clk_div_table cpg_rpc_div_table[] = {
- { 1, 2 }, { 3, 4 }, { 5, 6 }, { 7, 8 }, { 0, 0 },
-};
-
-static struct clk * __init cpg_rpc_clk_register(const char *name,
- void __iomem *base, const char *parent_name,
- struct raw_notifier_head *notifiers)
-{
- struct rpc_clock *rpc;
- struct clk *clk;
-
- rpc = kzalloc(sizeof(*rpc), GFP_KERNEL);
- if (!rpc)
- return ERR_PTR(-ENOMEM);
-
- rpc->div.reg = base + CPG_RPCCKCR;
- rpc->div.width = 3;
- rpc->div.table = cpg_rpc_div_table;
- rpc->div.lock = &cpg_lock;
-
- rpc->gate.reg = base + CPG_RPCCKCR;
- rpc->gate.bit_idx = 8;
- rpc->gate.flags = CLK_GATE_SET_TO_DISABLE;
- rpc->gate.lock = &cpg_lock;
-
- rpc->csn.reg = base + CPG_RPCCKCR;
-
- clk = clk_register_composite(NULL, name, &parent_name, 1, NULL, NULL,
- &rpc->div.hw, &clk_divider_ops,
- &rpc->gate.hw, &clk_gate_ops,
- CLK_SET_RATE_PARENT);
- if (IS_ERR(clk)) {
- kfree(rpc);
- return clk;
- }
-
- cpg_simple_notifier_register(notifiers, &rpc->csn);
- return clk;
-}
-
-struct rpcd2_clock {
- struct clk_fixed_factor fixed;
- struct clk_gate gate;
-};
-
-static struct clk * __init cpg_rpcd2_clk_register(const char *name,
- void __iomem *base,
- const char *parent_name)
-{
- struct rpcd2_clock *rpcd2;
- struct clk *clk;
-
- rpcd2 = kzalloc(sizeof(*rpcd2), GFP_KERNEL);
- if (!rpcd2)
- return ERR_PTR(-ENOMEM);
-
- rpcd2->fixed.mult = 1;
- rpcd2->fixed.div = 2;
-
- rpcd2->gate.reg = base + CPG_RPCCKCR;
- rpcd2->gate.bit_idx = 9;
- rpcd2->gate.flags = CLK_GATE_SET_TO_DISABLE;
- rpcd2->gate.lock = &cpg_lock;
-
- clk = clk_register_composite(NULL, name, &parent_name, 1, NULL, NULL,
- &rpcd2->fixed.hw, &clk_fixed_factor_ops,
- &rpcd2->gate.hw, &clk_gate_ops,
- CLK_SET_RATE_PARENT);
- if (IS_ERR(clk))
- kfree(rpcd2);
-
- return clk;
-}
-
-
static const struct rcar_gen3_cpg_pll_config *cpg_pll_config __initdata;
static unsigned int cpg_clk_extalr __initdata;
static u32 cpg_mode __initdata;
@@ -600,11 +515,11 @@ struct clk * __init rcar_gen3_cpg_clk_register(struct device *dev,
break;
case CLK_TYPE_GEN3_RPC:
- return cpg_rpc_clk_register(core->name, base,
+ return cpg_rpc_clk_register(core->name, base + CPG_RPCCKCR,
__clk_get_name(parent), notifiers);
case CLK_TYPE_GEN3_RPCD2:
- return cpg_rpcd2_clk_register(core->name, base,
+ return cpg_rpcd2_clk_register(core->name, base + CPG_RPCCKCR,
__clk_get_name(parent));
default:
diff --git a/drivers/clk/renesas/rzg2l-cpg.c b/drivers/clk/renesas/rzg2l-cpg.c
index 761922ea5db7..4021f6cabda4 100644
--- a/drivers/clk/renesas/rzg2l-cpg.c
+++ b/drivers/clk/renesas/rzg2l-cpg.c
@@ -17,6 +17,7 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/init.h>
+#include <linux/iopoll.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
#include <linux/of_address.h>
@@ -55,6 +56,14 @@
#define GET_REG_SAMPLL_CLK1(val) ((val >> 22) & 0xfff)
#define GET_REG_SAMPLL_CLK2(val) ((val >> 12) & 0xfff)
+struct sd_hw_data {
+ struct clk_hw hw;
+ u32 conf;
+ struct rzg2l_cpg_priv *priv;
+};
+
+#define to_sd_hw_data(_hw) container_of(_hw, struct sd_hw_data, hw)
+
/**
* struct rzg2l_cpg_priv - Clock Pulse Generator Private Data
*
@@ -130,6 +139,132 @@ rzg2l_cpg_div_clk_register(const struct cpg_core_clk *core,
return clk_hw->clk;
}
+static struct clk * __init
+rzg2l_cpg_mux_clk_register(const struct cpg_core_clk *core,
+ void __iomem *base,
+ struct rzg2l_cpg_priv *priv)
+{
+ const struct clk_hw *clk_hw;
+
+ clk_hw = devm_clk_hw_register_mux(priv->dev, core->name,
+ core->parent_names, core->num_parents,
+ core->flag,
+ base + GET_REG_OFFSET(core->conf),
+ GET_SHIFT(core->conf),
+ GET_WIDTH(core->conf),
+ core->mux_flags, &priv->rmw_lock);
+ if (IS_ERR(clk_hw))
+ return ERR_CAST(clk_hw);
+
+ return clk_hw->clk;
+}
+
+static int rzg2l_cpg_sd_clk_mux_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
+{
+ return clk_mux_determine_rate_flags(hw, req, 0);
+}
+
+static int rzg2l_cpg_sd_clk_mux_set_parent(struct clk_hw *hw, u8 index)
+{
+ struct sd_hw_data *hwdata = to_sd_hw_data(hw);
+ struct rzg2l_cpg_priv *priv = hwdata->priv;
+ u32 off = GET_REG_OFFSET(hwdata->conf);
+ u32 shift = GET_SHIFT(hwdata->conf);
+ const u32 clk_src_266 = 2;
+ u32 bitmask;
+
+ /*
+ * As per the HW manual, we should not directly switch from 533 MHz to
+ * 400 MHz and vice versa. To change the setting from 2’b01 (533 MHz)
+ * to 2’b10 (400 MHz) or vice versa, Switch to 2’b11 (266 MHz) first,
+ * and then switch to the target setting (2’b01 (533 MHz) or 2’b10
+ * (400 MHz)).
+ * Setting a value of '0' to the SEL_SDHI0_SET or SEL_SDHI1_SET clock
+ * switching register is prohibited.
+ * The clock mux has 3 input clocks(533 MHz, 400 MHz, and 266 MHz), and
+ * the index to value mapping is done by adding 1 to the index.
+ */
+ bitmask = (GENMASK(GET_WIDTH(hwdata->conf) - 1, 0) << shift) << 16;
+ if (index != clk_src_266) {
+ u32 msk, val;
+ int ret;
+
+ writel(bitmask | ((clk_src_266 + 1) << shift), priv->base + off);
+
+ msk = off ? CPG_CLKSTATUS_SELSDHI1_STS : CPG_CLKSTATUS_SELSDHI0_STS;
+
+ ret = readl_poll_timeout(priv->base + CPG_CLKSTATUS, val,
+ !(val & msk), 100,
+ CPG_SDHI_CLK_SWITCH_STATUS_TIMEOUT_US);
+ if (ret) {
+ dev_err(priv->dev, "failed to switch clk source\n");
+ return ret;
+ }
+ }
+
+ writel(bitmask | ((index + 1) << shift), priv->base + off);
+
+ return 0;
+}
+
+static u8 rzg2l_cpg_sd_clk_mux_get_parent(struct clk_hw *hw)
+{
+ struct sd_hw_data *hwdata = to_sd_hw_data(hw);
+ struct rzg2l_cpg_priv *priv = hwdata->priv;
+ u32 val = readl(priv->base + GET_REG_OFFSET(hwdata->conf));
+
+ val >>= GET_SHIFT(hwdata->conf);
+ val &= GENMASK(GET_WIDTH(hwdata->conf) - 1, 0);
+ if (val) {
+ val--;
+ } else {
+ /* Prohibited clk source, change it to 533 MHz(reset value) */
+ rzg2l_cpg_sd_clk_mux_set_parent(hw, 0);
+ }
+
+ return val;
+}
+
+static const struct clk_ops rzg2l_cpg_sd_clk_mux_ops = {
+ .determine_rate = rzg2l_cpg_sd_clk_mux_determine_rate,
+ .set_parent = rzg2l_cpg_sd_clk_mux_set_parent,
+ .get_parent = rzg2l_cpg_sd_clk_mux_get_parent,
+};
+
+static struct clk * __init
+rzg2l_cpg_sd_mux_clk_register(const struct cpg_core_clk *core,
+ void __iomem *base,
+ struct rzg2l_cpg_priv *priv)
+{
+ struct sd_hw_data *clk_hw_data;
+ struct clk_init_data init;
+ struct clk_hw *clk_hw;
+ int ret;
+
+ clk_hw_data = devm_kzalloc(priv->dev, sizeof(*clk_hw_data), GFP_KERNEL);
+ if (!clk_hw_data)
+ return ERR_PTR(-ENOMEM);
+
+ clk_hw_data->priv = priv;
+ clk_hw_data->conf = core->conf;
+
+ init.name = GET_SHIFT(core->conf) ? "sd1" : "sd0";
+ init.ops = &rzg2l_cpg_sd_clk_mux_ops;
+ init.flags = 0;
+ init.num_parents = core->num_parents;
+ init.parent_names = core->parent_names;
+
+ clk_hw = &clk_hw_data->hw;
+ clk_hw->init = &init;
+
+ ret = devm_clk_hw_register(priv->dev, clk_hw);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return clk_hw->clk;
+}
+
struct pll_clk {
struct clk_hw hw;
unsigned int conf;
@@ -288,6 +423,12 @@ rzg2l_cpg_register_core_clk(const struct cpg_core_clk *core,
clk = rzg2l_cpg_div_clk_register(core, priv->clks,
priv->base, priv);
break;
+ case CLK_TYPE_MUX:
+ clk = rzg2l_cpg_mux_clk_register(core, priv->base, priv);
+ break;
+ case CLK_TYPE_SD_MUX:
+ clk = rzg2l_cpg_sd_mux_clk_register(core, priv->base, priv);
+ break;
default:
goto fail;
}
@@ -310,13 +451,17 @@ fail:
* @hw: handle between common and hardware-specific interfaces
* @off: register offset
* @bit: ON/MON bit
+ * @enabled: soft state of the clock, if it is coupled with another clock
* @priv: CPG/MSTP private data
+ * @sibling: pointer to the other coupled clock
*/
struct mstp_clock {
struct clk_hw hw;
u16 off;
u8 bit;
+ bool enabled;
struct rzg2l_cpg_priv *priv;
+ struct mstp_clock *sibling;
};
#define to_mod_clock(_hw) container_of(_hw, struct mstp_clock, hw)
@@ -369,11 +514,41 @@ static int rzg2l_mod_clock_endisable(struct clk_hw *hw, bool enable)
static int rzg2l_mod_clock_enable(struct clk_hw *hw)
{
+ struct mstp_clock *clock = to_mod_clock(hw);
+
+ if (clock->sibling) {
+ struct rzg2l_cpg_priv *priv = clock->priv;
+ unsigned long flags;
+ bool enabled;
+
+ spin_lock_irqsave(&priv->rmw_lock, flags);
+ enabled = clock->sibling->enabled;
+ clock->enabled = true;
+ spin_unlock_irqrestore(&priv->rmw_lock, flags);
+ if (enabled)
+ return 0;
+ }
+
return rzg2l_mod_clock_endisable(hw, true);
}
static void rzg2l_mod_clock_disable(struct clk_hw *hw)
{
+ struct mstp_clock *clock = to_mod_clock(hw);
+
+ if (clock->sibling) {
+ struct rzg2l_cpg_priv *priv = clock->priv;
+ unsigned long flags;
+ bool enabled;
+
+ spin_lock_irqsave(&priv->rmw_lock, flags);
+ enabled = clock->sibling->enabled;
+ clock->enabled = false;
+ spin_unlock_irqrestore(&priv->rmw_lock, flags);
+ if (enabled)
+ return;
+ }
+
rzg2l_mod_clock_endisable(hw, false);
}
@@ -389,6 +564,9 @@ static int rzg2l_mod_clock_is_enabled(struct clk_hw *hw)
return 1;
}
+ if (clock->sibling)
+ return clock->enabled;
+
value = readl(priv->base + CLK_MON_R(clock->off));
return value & bitmask;
@@ -400,6 +578,28 @@ static const struct clk_ops rzg2l_mod_clock_ops = {
.is_enabled = rzg2l_mod_clock_is_enabled,
};
+static struct mstp_clock
+*rzg2l_mod_clock__get_sibling(struct mstp_clock *clock,
+ struct rzg2l_cpg_priv *priv)
+{
+ struct clk_hw *hw;
+ unsigned int i;
+
+ for (i = 0; i < priv->num_mod_clks; i++) {
+ struct mstp_clock *clk;
+
+ if (priv->clks[priv->num_core_clks + i] == ERR_PTR(-ENOENT))
+ continue;
+
+ hw = __clk_get_hw(priv->clks[priv->num_core_clks + i]);
+ clk = to_mod_clock(hw);
+ if (clock->off == clk->off && clock->bit == clk->bit)
+ return clk;
+ }
+
+ return NULL;
+}
+
static void __init
rzg2l_cpg_register_mod_clk(const struct rzg2l_mod_clk *mod,
const struct rzg2l_cpg_info *info,
@@ -461,6 +661,18 @@ rzg2l_cpg_register_mod_clk(const struct rzg2l_mod_clk *mod,
dev_dbg(dev, "Module clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
priv->clks[id] = clk;
+
+ if (mod->is_coupled) {
+ struct mstp_clock *sibling;
+
+ clock->enabled = rzg2l_mod_clock_is_enabled(&clock->hw);
+ sibling = rzg2l_mod_clock__get_sibling(clock, priv);
+ if (sibling) {
+ clock->sibling = sibling;
+ sibling->sibling = clock;
+ }
+ }
+
return;
fail:
diff --git a/drivers/clk/renesas/rzg2l-cpg.h b/drivers/clk/renesas/rzg2l-cpg.h
index 63695280ce8b..7fb6b4030f72 100644
--- a/drivers/clk/renesas/rzg2l-cpg.h
+++ b/drivers/clk/renesas/rzg2l-cpg.h
@@ -11,6 +11,15 @@
#define CPG_PL2_DDIV (0x204)
#define CPG_PL3A_DDIV (0x208)
+#define CPG_PL2SDHI_DSEL (0x218)
+#define CPG_CLKSTATUS (0x280)
+#define CPG_PL3_SSEL (0x408)
+#define CPG_PL6_ETH_SSEL (0x418)
+
+#define CPG_CLKSTATUS_SELSDHI0_STS BIT(28)
+#define CPG_CLKSTATUS_SELSDHI1_STS BIT(29)
+
+#define CPG_SDHI_CLK_SWITCH_STATUS_TIMEOUT_US 20000
/* n = 0/1/2 for PLL1/4/6 */
#define CPG_SAMPLL_CLK1(n) (0x04 + (16 * n))
@@ -23,6 +32,16 @@
#define DIVPL2A DDIV_PACK(CPG_PL2_DDIV, 0, 3)
#define DIVPL3A DDIV_PACK(CPG_PL3A_DDIV, 0, 3)
#define DIVPL3B DDIV_PACK(CPG_PL3A_DDIV, 4, 3)
+#define DIVPL3C DDIV_PACK(CPG_PL3A_DDIV, 8, 3)
+
+#define SEL_PLL_PACK(offset, bitpos, size) \
+ (((offset) << 20) | ((bitpos) << 12) | ((size) << 8))
+
+#define SEL_PLL3_3 SEL_PLL_PACK(CPG_PL3_SSEL, 8, 1)
+#define SEL_PLL6_2 SEL_PLL_PACK(CPG_PL6_ETH_SSEL, 0, 1)
+
+#define SEL_SDHI0 DDIV_PACK(CPG_PL2SDHI_DSEL, 0, 2)
+#define SEL_SDHI1 DDIV_PACK(CPG_PL2SDHI_DSEL, 4, 2)
/**
* Definitions of CPG Core Clocks
@@ -43,6 +62,7 @@ struct cpg_core_clk {
const struct clk_div_table *dtable;
const char * const *parent_names;
int flag;
+ int mux_flags;
int num_parents;
};
@@ -54,6 +74,12 @@ enum clk_types {
/* Clock with divider */
CLK_TYPE_DIV,
+
+ /* Clock with clock source selector */
+ CLK_TYPE_MUX,
+
+ /* Clock with SD clock source selector */
+ CLK_TYPE_SD_MUX,
};
#define DEF_TYPE(_name, _id, _type...) \
@@ -69,6 +95,14 @@ enum clk_types {
#define DEF_DIV(_name, _id, _parent, _conf, _dtable, _flag) \
DEF_TYPE(_name, _id, CLK_TYPE_DIV, .conf = _conf, \
.parent = _parent, .dtable = _dtable, .flag = _flag)
+#define DEF_MUX(_name, _id, _conf, _parent_names, _num_parents, _flag, \
+ _mux_flags) \
+ DEF_TYPE(_name, _id, CLK_TYPE_MUX, .conf = _conf, \
+ .parent_names = _parent_names, .num_parents = _num_parents, \
+ .flag = _flag, .mux_flags = _mux_flags)
+#define DEF_SD_MUX(_name, _id, _conf, _parent_names, _num_parents) \
+ DEF_TYPE(_name, _id, CLK_TYPE_SD_MUX, .conf = _conf, \
+ .parent_names = _parent_names, .num_parents = _num_parents)
/**
* struct rzg2l_mod_clk - Module Clocks definitions
@@ -78,6 +112,7 @@ enum clk_types {
* @parent: id of parent clock
* @off: register offset
* @bit: ON/MON bit
+ * @is_coupled: flag to indicate coupled clock
*/
struct rzg2l_mod_clk {
const char *name;
@@ -85,17 +120,25 @@ struct rzg2l_mod_clk {
unsigned int parent;
u16 off;
u8 bit;
+ bool is_coupled;
};
-#define DEF_MOD(_name, _id, _parent, _off, _bit) \
+#define DEF_MOD_BASE(_name, _id, _parent, _off, _bit, _is_coupled) \
{ \
.name = _name, \
.id = MOD_CLK_BASE + (_id), \
.parent = (_parent), \
.off = (_off), \
.bit = (_bit), \
+ .is_coupled = (_is_coupled), \
}
+#define DEF_MOD(_name, _id, _parent, _off, _bit) \
+ DEF_MOD_BASE(_name, _id, _parent, _off, _bit, false)
+
+#define DEF_COUPLED(_name, _id, _parent, _off, _bit) \
+ DEF_MOD_BASE(_name, _id, _parent, _off, _bit, true)
+
/**
* struct rzg2l_reset - Reset definitions
*
diff --git a/drivers/clk/rockchip/clk-rk3399.c b/drivers/clk/rockchip/clk-rk3399.c
index 62a4f2543960..7924598747b6 100644
--- a/drivers/clk/rockchip/clk-rk3399.c
+++ b/drivers/clk/rockchip/clk-rk3399.c
@@ -481,7 +481,7 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
COMPOSITE_NOMUX(0, "atclk_core_l", "armclkl", CLK_IGNORE_UNUSED,
RK3399_CLKSEL_CON(1), 0, 5, DFLAGS | CLK_DIVIDER_READ_ONLY,
RK3399_CLKGATE_CON(0), 5, GFLAGS),
- COMPOSITE_NOMUX(0, "pclk_dbg_core_l", "armclkl", CLK_IGNORE_UNUSED,
+ COMPOSITE_NOMUX(PCLK_COREDBG_L, "pclk_dbg_core_l", "armclkl", CLK_IGNORE_UNUSED,
RK3399_CLKSEL_CON(1), 8, 5, DFLAGS | CLK_DIVIDER_READ_ONLY,
RK3399_CLKGATE_CON(0), 6, GFLAGS),
@@ -531,7 +531,7 @@ static struct rockchip_clk_branch rk3399_clk_branches[] __initdata = {
GATE(ACLK_GIC_ADB400_CORE_B_2_GIC, "aclk_core_adb400_core_b_2_gic", "armclkb", CLK_IGNORE_UNUSED,
RK3399_CLKGATE_CON(14), 4, GFLAGS),
- DIV(0, "pclken_dbg_core_b", "pclk_dbg_core_b", CLK_IGNORE_UNUSED,
+ DIV(PCLK_COREDBG_B, "pclken_dbg_core_b", "pclk_dbg_core_b", CLK_IGNORE_UNUSED,
RK3399_CLKSEL_CON(3), 13, 2, DFLAGS | CLK_DIVIDER_READ_ONLY),
GATE(0, "pclk_dbg_cxcs_pd_core_b", "pclk_dbg_core_b", CLK_IGNORE_UNUSED,
@@ -1514,7 +1514,10 @@ static const char *const rk3399_cru_critical_clocks[] __initconst = {
"aclk_vio_noc",
/* ddrc */
- "sclk_ddrc"
+ "sclk_ddrc",
+
+ "armclkl",
+ "armclkb",
};
static const char *const rk3399_pmucru_critical_clocks[] __initconst = {
@@ -1549,9 +1552,6 @@ static void __init rk3399_clk_init(struct device_node *np)
rockchip_clk_register_branches(ctx, rk3399_clk_branches,
ARRAY_SIZE(rk3399_clk_branches));
- rockchip_clk_protect_critical(rk3399_cru_critical_clocks,
- ARRAY_SIZE(rk3399_cru_critical_clocks));
-
rockchip_clk_register_armclk(ctx, ARMCLKL, "armclkl",
mux_armclkl_p, ARRAY_SIZE(mux_armclkl_p),
&rk3399_cpuclkl_data, rk3399_cpuclkl_rates,
@@ -1562,6 +1562,9 @@ static void __init rk3399_clk_init(struct device_node *np)
&rk3399_cpuclkb_data, rk3399_cpuclkb_rates,
ARRAY_SIZE(rk3399_cpuclkb_rates));
+ rockchip_clk_protect_critical(rk3399_cru_critical_clocks,
+ ARRAY_SIZE(rk3399_cru_critical_clocks));
+
rockchip_register_softrst(np, 21, reg_base + RK3399_SOFTRST_CON(0),
ROCKCHIP_SOFTRST_HIWORD_MASK);
@@ -1653,7 +1656,7 @@ static struct platform_driver clk_rk3399_driver = {
.suppress_bind_attrs = true,
},
};
-builtin_platform_driver_probe(clk_rk3399_driver, clk_rk3399_probe);
+module_platform_driver_probe(clk_rk3399_driver, clk_rk3399_probe);
MODULE_DESCRIPTION("Rockchip RK3399 Clock Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/rockchip/clk-rk3568.c b/drivers/clk/rockchip/clk-rk3568.c
index 75ca855e720d..939e7079c334 100644
--- a/drivers/clk/rockchip/clk-rk3568.c
+++ b/drivers/clk/rockchip/clk-rk3568.c
@@ -1719,7 +1719,7 @@ static struct platform_driver clk_rk3568_driver = {
.suppress_bind_attrs = true,
},
};
-builtin_platform_driver_probe(clk_rk3568_driver, clk_rk3568_probe);
+module_platform_driver_probe(clk_rk3568_driver, clk_rk3568_probe);
MODULE_DESCRIPTION("Rockchip RK3568 Clock Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/clk/samsung/Kconfig b/drivers/clk/samsung/Kconfig
index 0441c4f73ac9..0e18d6ff2916 100644
--- a/drivers/clk/samsung/Kconfig
+++ b/drivers/clk/samsung/Kconfig
@@ -67,7 +67,8 @@ config EXYNOS_5420_COMMON_CLK
depends on COMMON_CLK_SAMSUNG
help
Support for the clock controller present on the Samsung
- Exynos5420 SoCs. Choose Y here only if you build for this SoC.
+ Exynos5420/Exynos5422/Exynos5800 SoCs. Choose Y here only if you
+ build for this SoC.
config EXYNOS_ARM64_COMMON_CLK
bool "Samsung Exynos ARMv8-family clock controller support" if COMPILE_TEST
@@ -79,38 +80,47 @@ config EXYNOS_AUDSS_CLK_CON
default y if ARCH_EXYNOS
help
Support for the Audio Subsystem CLKCON clock controller present
- on some Exynos SoC variants. Choose M or Y here if you want to
- use audio devices such as I2S, PCM, etc.
+ on some Samsung Exynos SoC variants. Choose M or Y here if you want
+ to use audio devices such as I2S, PCM, etc.
config EXYNOS_CLKOUT
tristate "Samsung Exynos clock output driver"
depends on COMMON_CLK_SAMSUNG
default y if ARCH_EXYNOS
help
- Support for the clock output (XCLKOUT) present on some of Exynos SoC
- variants. Usually the XCLKOUT is used to monitor the status of the
- certains clocks from SoC, but it could also be tied to other devices
- as an input clock.
+ Support for the clock output (XCLKOUT) present on some of Samsung
+ Exynos SoC variants. Usually the XCLKOUT is used to monitor the
+ status of the certains clocks from SoC, but it could also be tied to
+ other devices as an input clock.
# For S3C24XX platforms, select following symbols:
config S3C2410_COMMON_CLK
bool "Samsung S3C2410 clock controller support" if COMPILE_TEST
select COMMON_CLK_SAMSUNG
help
- Build the s3c2410 clock driver based on the common clock framework.
+ Support for the clock controller present on the Samsung
+ S3C2410/S3C2440/S3C2442 SoCs. Choose Y here only if you build for
+ this SoC.
config S3C2410_COMMON_DCLK
bool
select COMMON_CLK_SAMSUNG
select REGMAP_MMIO
help
- Temporary symbol to build the dclk driver based on the common clock
- framework.
+ Support for the dclk clock controller present on the Samsung
+ S3C2410/S3C2412/S3C2440/S3C2443 SoCs. Choose Y here only if you build
+ for this SoC.
config S3C2412_COMMON_CLK
bool "Samsung S3C2412 clock controller support" if COMPILE_TEST
select COMMON_CLK_SAMSUNG
+ help
+ Support for the clock controller present on the Samsung S3C2412 SoCs.
+ Choose Y here only if you build for this SoC.
config S3C2443_COMMON_CLK
bool "Samsung S3C2443 clock controller support" if COMPILE_TEST
select COMMON_CLK_SAMSUNG
+ help
+ Support for the clock controller present on the Samsung
+ S3C2416/S3C2443 SoCs. Choose Y here only if you build for this SoC.
diff --git a/drivers/clk/samsung/Makefile b/drivers/clk/samsung/Makefile
index 028b2e27a37e..c46cf11e4d0b 100644
--- a/drivers/clk/samsung/Makefile
+++ b/drivers/clk/samsung/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos5433.o
obj-$(CONFIG_EXYNOS_AUDSS_CLK_CON) += clk-exynos-audss.o
obj-$(CONFIG_EXYNOS_CLKOUT) += clk-exynos-clkout.o
obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos7.o
+obj-$(CONFIG_EXYNOS_ARM64_COMMON_CLK) += clk-exynos850.o
obj-$(CONFIG_S3C2410_COMMON_CLK)+= clk-s3c2410.o
obj-$(CONFIG_S3C2410_COMMON_DCLK)+= clk-s3c2410-dclk.o
obj-$(CONFIG_S3C2412_COMMON_CLK)+= clk-s3c2412.o
diff --git a/drivers/clk/samsung/clk-cpu.c b/drivers/clk/samsung/clk-cpu.c
index 00ef4d1b0888..7f20d9aedaa9 100644
--- a/drivers/clk/samsung/clk-cpu.c
+++ b/drivers/clk/samsung/clk-cpu.c
@@ -469,3 +469,21 @@ free_cpuclk:
kfree(cpuclk);
return ret;
}
+
+void __init samsung_clk_register_cpu(struct samsung_clk_provider *ctx,
+ const struct samsung_cpu_clock *list, unsigned int nr_clk)
+{
+ unsigned int idx;
+ unsigned int num_cfgs;
+ struct clk_hw **hws = ctx->clk_data.hws;
+
+ for (idx = 0; idx < nr_clk; idx++, list++) {
+ /* find count of configuration rates in cfg */
+ for (num_cfgs = 0; list->cfg[num_cfgs].prate != 0; )
+ num_cfgs++;
+
+ exynos_register_cpu_clock(ctx, list->id, list->name, hws[list->parent_id],
+ hws[list->alt_parent_id], list->offset, list->cfg, num_cfgs,
+ list->flags);
+ }
+}
diff --git a/drivers/clk/samsung/clk-exynos-audss.c b/drivers/clk/samsung/clk-exynos-audss.c
index 42b5d32c6cc7..9cc127a162ad 100644
--- a/drivers/clk/samsung/clk-exynos-audss.c
+++ b/drivers/clk/samsung/clk-exynos-audss.c
@@ -129,7 +129,6 @@ static int exynos_audss_clk_probe(struct platform_device *pdev)
struct clk *pll_ref, *pll_in, *cdclk, *sclk_audio, *sclk_pcm_in;
const struct exynos_audss_clk_drvdata *variant;
struct clk_hw **clk_table;
- struct resource *res;
struct device *dev = &pdev->dev;
int i, ret = 0;
@@ -137,8 +136,7 @@ static int exynos_audss_clk_probe(struct platform_device *pdev)
if (!variant)
return -EINVAL;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- reg_base = devm_ioremap_resource(dev, res);
+ reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg_base))
return PTR_ERR(reg_base);
diff --git a/drivers/clk/samsung/clk-exynos4412-isp.c b/drivers/clk/samsung/clk-exynos4412-isp.c
index b69e381b8c0c..471a6fb82670 100644
--- a/drivers/clk/samsung/clk-exynos4412-isp.c
+++ b/drivers/clk/samsung/clk-exynos4412-isp.c
@@ -110,11 +110,9 @@ static int __init exynos4x12_isp_clk_probe(struct platform_device *pdev)
struct samsung_clk_provider *ctx;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
- struct resource *res;
void __iomem *reg_base;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- reg_base = devm_ioremap_resource(dev, res);
+ reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg_base))
return PTR_ERR(reg_base);
diff --git a/drivers/clk/samsung/clk-exynos5433.c b/drivers/clk/samsung/clk-exynos5433.c
index f203074d858b..f9daae20f393 100644
--- a/drivers/clk/samsung/clk-exynos5433.c
+++ b/drivers/clk/samsung/clk-exynos5433.c
@@ -3675,44 +3675,32 @@ static const struct exynos_cpuclk_cfg_data exynos5433_apolloclk_d[] __initconst
{ 0 },
};
+static const struct samsung_cpu_clock apollo_cpu_clks[] __initconst = {
+ CPU_CLK(CLK_SCLK_APOLLO, "apolloclk", CLK_MOUT_APOLLO_PLL,
+ CLK_MOUT_BUS_PLL_APOLLO_USER,
+ CLK_CPU_HAS_E5433_REGS_LAYOUT, 0x200,
+ exynos5433_apolloclk_d),
+};
+
+static const struct samsung_cmu_info apollo_cmu_info __initconst = {
+ .pll_clks = apollo_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(apollo_pll_clks),
+ .mux_clks = apollo_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(apollo_mux_clks),
+ .div_clks = apollo_div_clks,
+ .nr_div_clks = ARRAY_SIZE(apollo_div_clks),
+ .gate_clks = apollo_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(apollo_gate_clks),
+ .cpu_clks = apollo_cpu_clks,
+ .nr_cpu_clks = ARRAY_SIZE(apollo_cpu_clks),
+ .nr_clk_ids = APOLLO_NR_CLK,
+ .clk_regs = apollo_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(apollo_clk_regs),
+};
+
static void __init exynos5433_cmu_apollo_init(struct device_node *np)
{
- void __iomem *reg_base;
- struct samsung_clk_provider *ctx;
- struct clk_hw **hws;
-
- reg_base = of_iomap(np, 0);
- if (!reg_base) {
- panic("%s: failed to map registers\n", __func__);
- return;
- }
-
- ctx = samsung_clk_init(np, reg_base, APOLLO_NR_CLK);
- if (!ctx) {
- panic("%s: unable to allocate ctx\n", __func__);
- return;
- }
-
- samsung_clk_register_pll(ctx, apollo_pll_clks,
- ARRAY_SIZE(apollo_pll_clks), reg_base);
- samsung_clk_register_mux(ctx, apollo_mux_clks,
- ARRAY_SIZE(apollo_mux_clks));
- samsung_clk_register_div(ctx, apollo_div_clks,
- ARRAY_SIZE(apollo_div_clks));
- samsung_clk_register_gate(ctx, apollo_gate_clks,
- ARRAY_SIZE(apollo_gate_clks));
-
- hws = ctx->clk_data.hws;
-
- exynos_register_cpu_clock(ctx, CLK_SCLK_APOLLO, "apolloclk",
- hws[CLK_MOUT_APOLLO_PLL], hws[CLK_MOUT_BUS_PLL_APOLLO_USER], 0x200,
- exynos5433_apolloclk_d, ARRAY_SIZE(exynos5433_apolloclk_d),
- CLK_CPU_HAS_E5433_REGS_LAYOUT);
-
- samsung_clk_sleep_init(reg_base, apollo_clk_regs,
- ARRAY_SIZE(apollo_clk_regs));
-
- samsung_clk_of_add_provider(np, ctx);
+ samsung_cmu_register_one(np, &apollo_cmu_info);
}
CLK_OF_DECLARE(exynos5433_cmu_apollo, "samsung,exynos5433-cmu-apollo",
exynos5433_cmu_apollo_init);
@@ -3932,44 +3920,32 @@ static const struct exynos_cpuclk_cfg_data exynos5433_atlasclk_d[] __initconst =
{ 0 },
};
-static void __init exynos5433_cmu_atlas_init(struct device_node *np)
-{
- void __iomem *reg_base;
- struct samsung_clk_provider *ctx;
- struct clk_hw **hws;
-
- reg_base = of_iomap(np, 0);
- if (!reg_base) {
- panic("%s: failed to map registers\n", __func__);
- return;
- }
-
- ctx = samsung_clk_init(np, reg_base, ATLAS_NR_CLK);
- if (!ctx) {
- panic("%s: unable to allocate ctx\n", __func__);
- return;
- }
-
- samsung_clk_register_pll(ctx, atlas_pll_clks,
- ARRAY_SIZE(atlas_pll_clks), reg_base);
- samsung_clk_register_mux(ctx, atlas_mux_clks,
- ARRAY_SIZE(atlas_mux_clks));
- samsung_clk_register_div(ctx, atlas_div_clks,
- ARRAY_SIZE(atlas_div_clks));
- samsung_clk_register_gate(ctx, atlas_gate_clks,
- ARRAY_SIZE(atlas_gate_clks));
-
- hws = ctx->clk_data.hws;
-
- exynos_register_cpu_clock(ctx, CLK_SCLK_ATLAS, "atlasclk",
- hws[CLK_MOUT_ATLAS_PLL], hws[CLK_MOUT_BUS_PLL_ATLAS_USER], 0x200,
- exynos5433_atlasclk_d, ARRAY_SIZE(exynos5433_atlasclk_d),
- CLK_CPU_HAS_E5433_REGS_LAYOUT);
+static const struct samsung_cpu_clock atlas_cpu_clks[] __initconst = {
+ CPU_CLK(CLK_SCLK_ATLAS, "atlasclk", CLK_MOUT_ATLAS_PLL,
+ CLK_MOUT_BUS_PLL_ATLAS_USER,
+ CLK_CPU_HAS_E5433_REGS_LAYOUT, 0x200,
+ exynos5433_atlasclk_d),
+};
- samsung_clk_sleep_init(reg_base, atlas_clk_regs,
- ARRAY_SIZE(atlas_clk_regs));
+static const struct samsung_cmu_info atlas_cmu_info __initconst = {
+ .pll_clks = atlas_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(atlas_pll_clks),
+ .mux_clks = atlas_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(atlas_mux_clks),
+ .div_clks = atlas_div_clks,
+ .nr_div_clks = ARRAY_SIZE(atlas_div_clks),
+ .gate_clks = atlas_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(atlas_gate_clks),
+ .cpu_clks = atlas_cpu_clks,
+ .nr_cpu_clks = ARRAY_SIZE(atlas_cpu_clks),
+ .nr_clk_ids = ATLAS_NR_CLK,
+ .clk_regs = atlas_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(atlas_clk_regs),
+};
- samsung_clk_of_add_provider(np, ctx);
+static void __init exynos5433_cmu_atlas_init(struct device_node *np)
+{
+ samsung_cmu_register_one(np, &atlas_cmu_info);
}
CLK_OF_DECLARE(exynos5433_cmu_atlas, "samsung,exynos5433-cmu-atlas",
exynos5433_cmu_atlas_init);
@@ -5564,7 +5540,6 @@ static int __init exynos5433_cmu_probe(struct platform_device *pdev)
struct exynos5433_cmu_data *data;
struct samsung_clk_provider *ctx;
struct device *dev = &pdev->dev;
- struct resource *res;
void __iomem *reg_base;
int i;
@@ -5577,8 +5552,7 @@ static int __init exynos5433_cmu_probe(struct platform_device *pdev)
return -ENOMEM;
ctx = &data->ctx;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- reg_base = devm_ioremap_resource(dev, res);
+ reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg_base))
return PTR_ERR(reg_base);
diff --git a/drivers/clk/samsung/clk-exynos850.c b/drivers/clk/samsung/clk-exynos850.c
new file mode 100644
index 000000000000..2294989e244c
--- /dev/null
+++ b/drivers/clk/samsung/clk-exynos850.c
@@ -0,0 +1,835 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2021 Linaro Ltd.
+ * Author: Sam Protsenko <semen.protsenko@linaro.org>
+ *
+ * Common Clock Framework support for Exynos850 SoC.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include <dt-bindings/clock/exynos850.h>
+
+#include "clk.h"
+
+/* Gate register bits */
+#define GATE_MANUAL BIT(20)
+#define GATE_ENABLE_HWACG BIT(28)
+
+/* Gate register offsets range */
+#define GATE_OFF_START 0x2000
+#define GATE_OFF_END 0x2fff
+
+/**
+ * exynos850_init_clocks - Set clocks initial configuration
+ * @np: CMU device tree node with "reg" property (CMU addr)
+ * @reg_offs: Register offsets array for clocks to init
+ * @reg_offs_len: Number of register offsets in reg_offs array
+ *
+ * Set manual control mode for all gate clocks.
+ */
+static void __init exynos850_init_clocks(struct device_node *np,
+ const unsigned long *reg_offs, size_t reg_offs_len)
+{
+ void __iomem *reg_base;
+ size_t i;
+
+ reg_base = of_iomap(np, 0);
+ if (!reg_base)
+ panic("%s: failed to map registers\n", __func__);
+
+ for (i = 0; i < reg_offs_len; ++i) {
+ void __iomem *reg = reg_base + reg_offs[i];
+ u32 val;
+
+ /* Modify only gate clock registers */
+ if (reg_offs[i] < GATE_OFF_START || reg_offs[i] > GATE_OFF_END)
+ continue;
+
+ val = readl(reg);
+ val |= GATE_MANUAL;
+ val &= ~GATE_ENABLE_HWACG;
+ writel(val, reg);
+ }
+
+ iounmap(reg_base);
+}
+
+/* ---- CMU_TOP ------------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_TOP (0x120e0000) */
+#define PLL_LOCKTIME_PLL_MMC 0x0000
+#define PLL_LOCKTIME_PLL_SHARED0 0x0004
+#define PLL_LOCKTIME_PLL_SHARED1 0x0008
+#define PLL_CON0_PLL_MMC 0x0100
+#define PLL_CON3_PLL_MMC 0x010c
+#define PLL_CON0_PLL_SHARED0 0x0140
+#define PLL_CON3_PLL_SHARED0 0x014c
+#define PLL_CON0_PLL_SHARED1 0x0180
+#define PLL_CON3_PLL_SHARED1 0x018c
+#define CLK_CON_MUX_MUX_CLKCMU_CORE_BUS 0x1014
+#define CLK_CON_MUX_MUX_CLKCMU_CORE_CCI 0x1018
+#define CLK_CON_MUX_MUX_CLKCMU_CORE_MMC_EMBD 0x101c
+#define CLK_CON_MUX_MUX_CLKCMU_CORE_SSS 0x1020
+#define CLK_CON_MUX_MUX_CLKCMU_DPU 0x1034
+#define CLK_CON_MUX_MUX_CLKCMU_HSI_BUS 0x103c
+#define CLK_CON_MUX_MUX_CLKCMU_HSI_MMC_CARD 0x1040
+#define CLK_CON_MUX_MUX_CLKCMU_HSI_USB20DRD 0x1044
+#define CLK_CON_MUX_MUX_CLKCMU_PERI_BUS 0x1070
+#define CLK_CON_MUX_MUX_CLKCMU_PERI_IP 0x1074
+#define CLK_CON_MUX_MUX_CLKCMU_PERI_UART 0x1078
+#define CLK_CON_DIV_CLKCMU_CORE_BUS 0x1820
+#define CLK_CON_DIV_CLKCMU_CORE_CCI 0x1824
+#define CLK_CON_DIV_CLKCMU_CORE_MMC_EMBD 0x1828
+#define CLK_CON_DIV_CLKCMU_CORE_SSS 0x182c
+#define CLK_CON_DIV_CLKCMU_DPU 0x1840
+#define CLK_CON_DIV_CLKCMU_HSI_BUS 0x1848
+#define CLK_CON_DIV_CLKCMU_HSI_MMC_CARD 0x184c
+#define CLK_CON_DIV_CLKCMU_HSI_USB20DRD 0x1850
+#define CLK_CON_DIV_CLKCMU_PERI_BUS 0x187c
+#define CLK_CON_DIV_CLKCMU_PERI_IP 0x1880
+#define CLK_CON_DIV_CLKCMU_PERI_UART 0x1884
+#define CLK_CON_DIV_PLL_SHARED0_DIV2 0x188c
+#define CLK_CON_DIV_PLL_SHARED0_DIV3 0x1890
+#define CLK_CON_DIV_PLL_SHARED0_DIV4 0x1894
+#define CLK_CON_DIV_PLL_SHARED1_DIV2 0x1898
+#define CLK_CON_DIV_PLL_SHARED1_DIV3 0x189c
+#define CLK_CON_DIV_PLL_SHARED1_DIV4 0x18a0
+#define CLK_CON_GAT_GATE_CLKCMU_CORE_BUS 0x201c
+#define CLK_CON_GAT_GATE_CLKCMU_CORE_CCI 0x2020
+#define CLK_CON_GAT_GATE_CLKCMU_CORE_MMC_EMBD 0x2024
+#define CLK_CON_GAT_GATE_CLKCMU_CORE_SSS 0x2028
+#define CLK_CON_GAT_GATE_CLKCMU_DPU 0x203c
+#define CLK_CON_GAT_GATE_CLKCMU_HSI_BUS 0x2044
+#define CLK_CON_GAT_GATE_CLKCMU_HSI_MMC_CARD 0x2048
+#define CLK_CON_GAT_GATE_CLKCMU_HSI_USB20DRD 0x204c
+#define CLK_CON_GAT_GATE_CLKCMU_PERI_BUS 0x2080
+#define CLK_CON_GAT_GATE_CLKCMU_PERI_IP 0x2084
+#define CLK_CON_GAT_GATE_CLKCMU_PERI_UART 0x2088
+
+static const unsigned long top_clk_regs[] __initconst = {
+ PLL_LOCKTIME_PLL_MMC,
+ PLL_LOCKTIME_PLL_SHARED0,
+ PLL_LOCKTIME_PLL_SHARED1,
+ PLL_CON0_PLL_MMC,
+ PLL_CON3_PLL_MMC,
+ PLL_CON0_PLL_SHARED0,
+ PLL_CON3_PLL_SHARED0,
+ PLL_CON0_PLL_SHARED1,
+ PLL_CON3_PLL_SHARED1,
+ CLK_CON_MUX_MUX_CLKCMU_CORE_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_CORE_CCI,
+ CLK_CON_MUX_MUX_CLKCMU_CORE_MMC_EMBD,
+ CLK_CON_MUX_MUX_CLKCMU_CORE_SSS,
+ CLK_CON_MUX_MUX_CLKCMU_DPU,
+ CLK_CON_MUX_MUX_CLKCMU_HSI_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_HSI_MMC_CARD,
+ CLK_CON_MUX_MUX_CLKCMU_HSI_USB20DRD,
+ CLK_CON_MUX_MUX_CLKCMU_PERI_BUS,
+ CLK_CON_MUX_MUX_CLKCMU_PERI_IP,
+ CLK_CON_MUX_MUX_CLKCMU_PERI_UART,
+ CLK_CON_DIV_CLKCMU_CORE_BUS,
+ CLK_CON_DIV_CLKCMU_CORE_CCI,
+ CLK_CON_DIV_CLKCMU_CORE_MMC_EMBD,
+ CLK_CON_DIV_CLKCMU_CORE_SSS,
+ CLK_CON_DIV_CLKCMU_DPU,
+ CLK_CON_DIV_CLKCMU_HSI_BUS,
+ CLK_CON_DIV_CLKCMU_HSI_MMC_CARD,
+ CLK_CON_DIV_CLKCMU_HSI_USB20DRD,
+ CLK_CON_DIV_CLKCMU_PERI_BUS,
+ CLK_CON_DIV_CLKCMU_PERI_IP,
+ CLK_CON_DIV_CLKCMU_PERI_UART,
+ CLK_CON_DIV_PLL_SHARED0_DIV2,
+ CLK_CON_DIV_PLL_SHARED0_DIV3,
+ CLK_CON_DIV_PLL_SHARED0_DIV4,
+ CLK_CON_DIV_PLL_SHARED1_DIV2,
+ CLK_CON_DIV_PLL_SHARED1_DIV3,
+ CLK_CON_DIV_PLL_SHARED1_DIV4,
+ CLK_CON_GAT_GATE_CLKCMU_CORE_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_CORE_CCI,
+ CLK_CON_GAT_GATE_CLKCMU_CORE_MMC_EMBD,
+ CLK_CON_GAT_GATE_CLKCMU_CORE_SSS,
+ CLK_CON_GAT_GATE_CLKCMU_DPU,
+ CLK_CON_GAT_GATE_CLKCMU_HSI_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_HSI_MMC_CARD,
+ CLK_CON_GAT_GATE_CLKCMU_HSI_USB20DRD,
+ CLK_CON_GAT_GATE_CLKCMU_PERI_BUS,
+ CLK_CON_GAT_GATE_CLKCMU_PERI_IP,
+ CLK_CON_GAT_GATE_CLKCMU_PERI_UART,
+};
+
+/*
+ * Do not provide PLL tables to core PLLs, as MANUAL_PLL_CTRL bit is not set
+ * for those PLLs by default, so set_rate operation would fail.
+ */
+static const struct samsung_pll_clock top_pll_clks[] __initconst = {
+ /* CMU_TOP_PURECLKCOMP */
+ PLL(pll_0822x, CLK_FOUT_SHARED0_PLL, "fout_shared0_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED0, PLL_CON3_PLL_SHARED0,
+ NULL),
+ PLL(pll_0822x, CLK_FOUT_SHARED1_PLL, "fout_shared1_pll", "oscclk",
+ PLL_LOCKTIME_PLL_SHARED1, PLL_CON3_PLL_SHARED1,
+ NULL),
+ PLL(pll_0831x, CLK_FOUT_MMC_PLL, "fout_mmc_pll", "oscclk",
+ PLL_LOCKTIME_PLL_MMC, PLL_CON3_PLL_MMC, NULL),
+};
+
+/* List of parent clocks for Muxes in CMU_TOP */
+PNAME(mout_shared0_pll_p) = { "oscclk", "fout_shared0_pll" };
+PNAME(mout_shared1_pll_p) = { "oscclk", "fout_shared1_pll" };
+PNAME(mout_mmc_pll_p) = { "oscclk", "fout_mmc_pll" };
+/* List of parent clocks for Muxes in CMU_TOP: for CMU_CORE */
+PNAME(mout_core_bus_p) = { "dout_shared1_div2", "dout_shared0_div3",
+ "dout_shared1_div3", "dout_shared0_div4" };
+PNAME(mout_core_cci_p) = { "dout_shared0_div2", "dout_shared1_div2",
+ "dout_shared0_div3", "dout_shared1_div3" };
+PNAME(mout_core_mmc_embd_p) = { "oscclk", "dout_shared0_div2",
+ "dout_shared1_div2", "dout_shared0_div3",
+ "dout_shared1_div3", "mout_mmc_pll",
+ "oscclk", "oscclk" };
+PNAME(mout_core_sss_p) = { "dout_shared0_div3", "dout_shared1_div3",
+ "dout_shared0_div4", "dout_shared1_div4" };
+/* List of parent clocks for Muxes in CMU_TOP: for CMU_HSI */
+PNAME(mout_hsi_bus_p) = { "dout_shared0_div2", "dout_shared1_div2" };
+PNAME(mout_hsi_mmc_card_p) = { "oscclk", "dout_shared0_div2",
+ "dout_shared1_div2", "dout_shared0_div3",
+ "dout_shared1_div3", "mout_mmc_pll",
+ "oscclk", "oscclk" };
+PNAME(mout_hsi_usb20drd_p) = { "oscclk", "dout_shared0_div4",
+ "dout_shared1_div4", "oscclk" };
+/* List of parent clocks for Muxes in CMU_TOP: for CMU_PERI */
+PNAME(mout_peri_bus_p) = { "dout_shared0_div4", "dout_shared1_div4" };
+PNAME(mout_peri_uart_p) = { "oscclk", "dout_shared0_div4",
+ "dout_shared1_div4", "oscclk" };
+PNAME(mout_peri_ip_p) = { "oscclk", "dout_shared0_div4",
+ "dout_shared1_div4", "oscclk" };
+
+/* List of parent clocks for Muxes in CMU_TOP: for CMU_DPU */
+PNAME(mout_dpu_p) = { "dout_shared0_div3", "dout_shared1_div3",
+ "dout_shared0_div4", "dout_shared1_div4" };
+
+static const struct samsung_mux_clock top_mux_clks[] __initconst = {
+ /* CMU_TOP_PURECLKCOMP */
+ MUX(CLK_MOUT_SHARED0_PLL, "mout_shared0_pll", mout_shared0_pll_p,
+ PLL_CON0_PLL_SHARED0, 4, 1),
+ MUX(CLK_MOUT_SHARED1_PLL, "mout_shared1_pll", mout_shared1_pll_p,
+ PLL_CON0_PLL_SHARED1, 4, 1),
+ MUX(CLK_MOUT_MMC_PLL, "mout_mmc_pll", mout_mmc_pll_p,
+ PLL_CON0_PLL_MMC, 4, 1),
+
+ /* CORE */
+ MUX(CLK_MOUT_CORE_BUS, "mout_core_bus", mout_core_bus_p,
+ CLK_CON_MUX_MUX_CLKCMU_CORE_BUS, 0, 2),
+ MUX(CLK_MOUT_CORE_CCI, "mout_core_cci", mout_core_cci_p,
+ CLK_CON_MUX_MUX_CLKCMU_CORE_CCI, 0, 2),
+ MUX(CLK_MOUT_CORE_MMC_EMBD, "mout_core_mmc_embd", mout_core_mmc_embd_p,
+ CLK_CON_MUX_MUX_CLKCMU_CORE_MMC_EMBD, 0, 3),
+ MUX(CLK_MOUT_CORE_SSS, "mout_core_sss", mout_core_sss_p,
+ CLK_CON_MUX_MUX_CLKCMU_CORE_SSS, 0, 2),
+
+ /* DPU */
+ MUX(CLK_MOUT_DPU, "mout_dpu", mout_dpu_p,
+ CLK_CON_MUX_MUX_CLKCMU_DPU, 0, 2),
+
+ /* HSI */
+ MUX(CLK_MOUT_HSI_BUS, "mout_hsi_bus", mout_hsi_bus_p,
+ CLK_CON_MUX_MUX_CLKCMU_HSI_BUS, 0, 1),
+ MUX(CLK_MOUT_HSI_MMC_CARD, "mout_hsi_mmc_card", mout_hsi_mmc_card_p,
+ CLK_CON_MUX_MUX_CLKCMU_HSI_MMC_CARD, 0, 3),
+ MUX(CLK_MOUT_HSI_USB20DRD, "mout_hsi_usb20drd", mout_hsi_usb20drd_p,
+ CLK_CON_MUX_MUX_CLKCMU_HSI_USB20DRD, 0, 2),
+
+ /* PERI */
+ MUX(CLK_MOUT_PERI_BUS, "mout_peri_bus", mout_peri_bus_p,
+ CLK_CON_MUX_MUX_CLKCMU_PERI_BUS, 0, 1),
+ MUX(CLK_MOUT_PERI_UART, "mout_peri_uart", mout_peri_uart_p,
+ CLK_CON_MUX_MUX_CLKCMU_PERI_UART, 0, 2),
+ MUX(CLK_MOUT_PERI_IP, "mout_peri_ip", mout_peri_ip_p,
+ CLK_CON_MUX_MUX_CLKCMU_PERI_IP, 0, 2),
+};
+
+static const struct samsung_div_clock top_div_clks[] __initconst = {
+ /* CMU_TOP_PURECLKCOMP */
+ DIV(CLK_DOUT_SHARED0_DIV3, "dout_shared0_div3", "mout_shared0_pll",
+ CLK_CON_DIV_PLL_SHARED0_DIV3, 0, 2),
+ DIV(CLK_DOUT_SHARED0_DIV2, "dout_shared0_div2", "mout_shared0_pll",
+ CLK_CON_DIV_PLL_SHARED0_DIV2, 0, 1),
+ DIV(CLK_DOUT_SHARED1_DIV3, "dout_shared1_div3", "mout_shared1_pll",
+ CLK_CON_DIV_PLL_SHARED1_DIV3, 0, 2),
+ DIV(CLK_DOUT_SHARED1_DIV2, "dout_shared1_div2", "mout_shared1_pll",
+ CLK_CON_DIV_PLL_SHARED1_DIV2, 0, 1),
+ DIV(CLK_DOUT_SHARED0_DIV4, "dout_shared0_div4", "dout_shared0_div2",
+ CLK_CON_DIV_PLL_SHARED0_DIV4, 0, 1),
+ DIV(CLK_DOUT_SHARED1_DIV4, "dout_shared1_div4", "dout_shared1_div2",
+ CLK_CON_DIV_PLL_SHARED1_DIV4, 0, 1),
+
+ /* CORE */
+ DIV(CLK_DOUT_CORE_BUS, "dout_core_bus", "gout_core_bus",
+ CLK_CON_DIV_CLKCMU_CORE_BUS, 0, 4),
+ DIV(CLK_DOUT_CORE_CCI, "dout_core_cci", "gout_core_cci",
+ CLK_CON_DIV_CLKCMU_CORE_CCI, 0, 4),
+ DIV(CLK_DOUT_CORE_MMC_EMBD, "dout_core_mmc_embd", "gout_core_mmc_embd",
+ CLK_CON_DIV_CLKCMU_CORE_MMC_EMBD, 0, 9),
+ DIV(CLK_DOUT_CORE_SSS, "dout_core_sss", "gout_core_sss",
+ CLK_CON_DIV_CLKCMU_CORE_SSS, 0, 4),
+
+ /* DPU */
+ DIV(CLK_DOUT_DPU, "dout_dpu", "gout_dpu",
+ CLK_CON_DIV_CLKCMU_DPU, 0, 4),
+
+ /* HSI */
+ DIV(CLK_DOUT_HSI_BUS, "dout_hsi_bus", "gout_hsi_bus",
+ CLK_CON_DIV_CLKCMU_HSI_BUS, 0, 4),
+ DIV(CLK_DOUT_HSI_MMC_CARD, "dout_hsi_mmc_card", "gout_hsi_mmc_card",
+ CLK_CON_DIV_CLKCMU_HSI_MMC_CARD, 0, 9),
+ DIV(CLK_DOUT_HSI_USB20DRD, "dout_hsi_usb20drd", "gout_hsi_usb20drd",
+ CLK_CON_DIV_CLKCMU_HSI_USB20DRD, 0, 4),
+
+ /* PERI */
+ DIV(CLK_DOUT_PERI_BUS, "dout_peri_bus", "gout_peri_bus",
+ CLK_CON_DIV_CLKCMU_PERI_BUS, 0, 4),
+ DIV(CLK_DOUT_PERI_UART, "dout_peri_uart", "gout_peri_uart",
+ CLK_CON_DIV_CLKCMU_PERI_UART, 0, 4),
+ DIV(CLK_DOUT_PERI_IP, "dout_peri_ip", "gout_peri_ip",
+ CLK_CON_DIV_CLKCMU_PERI_IP, 0, 4),
+};
+
+static const struct samsung_gate_clock top_gate_clks[] __initconst = {
+ /* CORE */
+ GATE(CLK_GOUT_CORE_BUS, "gout_core_bus", "mout_core_bus",
+ CLK_CON_GAT_GATE_CLKCMU_CORE_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_CORE_CCI, "gout_core_cci", "mout_core_cci",
+ CLK_CON_GAT_GATE_CLKCMU_CORE_CCI, 21, 0, 0),
+ GATE(CLK_GOUT_CORE_MMC_EMBD, "gout_core_mmc_embd", "mout_core_mmc_embd",
+ CLK_CON_GAT_GATE_CLKCMU_CORE_MMC_EMBD, 21, 0, 0),
+ GATE(CLK_GOUT_CORE_SSS, "gout_core_sss", "mout_core_sss",
+ CLK_CON_GAT_GATE_CLKCMU_CORE_SSS, 21, 0, 0),
+
+ /* DPU */
+ GATE(CLK_GOUT_DPU, "gout_dpu", "mout_dpu",
+ CLK_CON_GAT_GATE_CLKCMU_DPU, 21, 0, 0),
+
+ /* HSI */
+ GATE(CLK_GOUT_HSI_BUS, "gout_hsi_bus", "mout_hsi_bus",
+ CLK_CON_GAT_GATE_CLKCMU_HSI_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_HSI_MMC_CARD, "gout_hsi_mmc_card", "mout_hsi_mmc_card",
+ CLK_CON_GAT_GATE_CLKCMU_HSI_MMC_CARD, 21, 0, 0),
+ GATE(CLK_GOUT_HSI_USB20DRD, "gout_hsi_usb20drd", "mout_hsi_usb20drd",
+ CLK_CON_GAT_GATE_CLKCMU_HSI_USB20DRD, 21, 0, 0),
+
+ /* PERI */
+ GATE(CLK_GOUT_PERI_BUS, "gout_peri_bus", "mout_peri_bus",
+ CLK_CON_GAT_GATE_CLKCMU_PERI_BUS, 21, 0, 0),
+ GATE(CLK_GOUT_PERI_UART, "gout_peri_uart", "mout_peri_uart",
+ CLK_CON_GAT_GATE_CLKCMU_PERI_UART, 21, 0, 0),
+ GATE(CLK_GOUT_PERI_IP, "gout_peri_ip", "mout_peri_ip",
+ CLK_CON_GAT_GATE_CLKCMU_PERI_IP, 21, 0, 0),
+};
+
+static const struct samsung_cmu_info top_cmu_info __initconst = {
+ .pll_clks = top_pll_clks,
+ .nr_pll_clks = ARRAY_SIZE(top_pll_clks),
+ .mux_clks = top_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(top_mux_clks),
+ .div_clks = top_div_clks,
+ .nr_div_clks = ARRAY_SIZE(top_div_clks),
+ .gate_clks = top_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(top_gate_clks),
+ .nr_clk_ids = TOP_NR_CLK,
+ .clk_regs = top_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(top_clk_regs),
+};
+
+static void __init exynos850_cmu_top_init(struct device_node *np)
+{
+ exynos850_init_clocks(np, top_clk_regs, ARRAY_SIZE(top_clk_regs));
+ samsung_cmu_register_one(np, &top_cmu_info);
+}
+
+CLK_OF_DECLARE(exynos850_cmu_top, "samsung,exynos850-cmu-top",
+ exynos850_cmu_top_init);
+
+/* ---- CMU_HSI ------------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_HSI (0x13400000) */
+#define PLL_CON0_MUX_CLKCMU_HSI_BUS_USER 0x0600
+#define PLL_CON0_MUX_CLKCMU_HSI_MMC_CARD_USER 0x0610
+#define PLL_CON0_MUX_CLKCMU_HSI_USB20DRD_USER 0x0620
+#define CLK_CON_MUX_MUX_CLK_HSI_RTC 0x1000
+#define CLK_CON_GAT_HSI_USB20DRD_TOP_I_RTC_CLK__ALV 0x2008
+#define CLK_CON_GAT_HSI_USB20DRD_TOP_I_REF_CLK_50 0x200c
+#define CLK_CON_GAT_HSI_USB20DRD_TOP_I_PHY_REFCLK_26 0x2010
+#define CLK_CON_GAT_GOUT_HSI_GPIO_HSI_PCLK 0x2018
+#define CLK_CON_GAT_GOUT_HSI_MMC_CARD_I_ACLK 0x2024
+#define CLK_CON_GAT_GOUT_HSI_MMC_CARD_SDCLKIN 0x2028
+#define CLK_CON_GAT_GOUT_HSI_SYSREG_HSI_PCLK 0x2038
+#define CLK_CON_GAT_GOUT_HSI_USB20DRD_TOP_ACLK_PHYCTRL_20 0x203c
+#define CLK_CON_GAT_GOUT_HSI_USB20DRD_TOP_BUS_CLK_EARLY 0x2040
+
+static const unsigned long hsi_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_HSI_BUS_USER,
+ PLL_CON0_MUX_CLKCMU_HSI_MMC_CARD_USER,
+ PLL_CON0_MUX_CLKCMU_HSI_USB20DRD_USER,
+ CLK_CON_MUX_MUX_CLK_HSI_RTC,
+ CLK_CON_GAT_HSI_USB20DRD_TOP_I_RTC_CLK__ALV,
+ CLK_CON_GAT_HSI_USB20DRD_TOP_I_REF_CLK_50,
+ CLK_CON_GAT_HSI_USB20DRD_TOP_I_PHY_REFCLK_26,
+ CLK_CON_GAT_GOUT_HSI_GPIO_HSI_PCLK,
+ CLK_CON_GAT_GOUT_HSI_MMC_CARD_I_ACLK,
+ CLK_CON_GAT_GOUT_HSI_MMC_CARD_SDCLKIN,
+ CLK_CON_GAT_GOUT_HSI_SYSREG_HSI_PCLK,
+ CLK_CON_GAT_GOUT_HSI_USB20DRD_TOP_ACLK_PHYCTRL_20,
+ CLK_CON_GAT_GOUT_HSI_USB20DRD_TOP_BUS_CLK_EARLY,
+};
+
+/* List of parent clocks for Muxes in CMU_PERI */
+PNAME(mout_hsi_bus_user_p) = { "oscclk", "dout_hsi_bus" };
+PNAME(mout_hsi_mmc_card_user_p) = { "oscclk", "dout_hsi_mmc_card" };
+PNAME(mout_hsi_usb20drd_user_p) = { "oscclk", "dout_hsi_usb20drd" };
+PNAME(mout_hsi_rtc_p) = { "rtcclk", "oscclk" };
+
+static const struct samsung_mux_clock hsi_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_HSI_BUS_USER, "mout_hsi_bus_user", mout_hsi_bus_user_p,
+ PLL_CON0_MUX_CLKCMU_HSI_BUS_USER, 4, 1),
+ MUX_F(CLK_MOUT_HSI_MMC_CARD_USER, "mout_hsi_mmc_card_user",
+ mout_hsi_mmc_card_user_p, PLL_CON0_MUX_CLKCMU_HSI_MMC_CARD_USER,
+ 4, 1, CLK_SET_RATE_PARENT, 0),
+ MUX(CLK_MOUT_HSI_USB20DRD_USER, "mout_hsi_usb20drd_user",
+ mout_hsi_usb20drd_user_p, PLL_CON0_MUX_CLKCMU_HSI_USB20DRD_USER,
+ 4, 1),
+ MUX(CLK_MOUT_HSI_RTC, "mout_hsi_rtc", mout_hsi_rtc_p,
+ CLK_CON_MUX_MUX_CLK_HSI_RTC, 0, 1),
+};
+
+static const struct samsung_gate_clock hsi_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_USB_RTC_CLK, "gout_usb_rtc", "mout_hsi_rtc",
+ CLK_CON_GAT_HSI_USB20DRD_TOP_I_RTC_CLK__ALV, 21, 0, 0),
+ GATE(CLK_GOUT_USB_REF_CLK, "gout_usb_ref", "mout_hsi_usb20drd_user",
+ CLK_CON_GAT_HSI_USB20DRD_TOP_I_REF_CLK_50, 21, 0, 0),
+ GATE(CLK_GOUT_USB_PHY_REF_CLK, "gout_usb_phy_ref", "oscclk",
+ CLK_CON_GAT_HSI_USB20DRD_TOP_I_PHY_REFCLK_26, 21, 0, 0),
+ GATE(CLK_GOUT_GPIO_HSI_PCLK, "gout_gpio_hsi_pclk", "mout_hsi_bus_user",
+ CLK_CON_GAT_GOUT_HSI_GPIO_HSI_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_MMC_CARD_ACLK, "gout_mmc_card_aclk", "mout_hsi_bus_user",
+ CLK_CON_GAT_GOUT_HSI_MMC_CARD_I_ACLK, 21, 0, 0),
+ GATE(CLK_GOUT_MMC_CARD_SDCLKIN, "gout_mmc_card_sdclkin",
+ "mout_hsi_mmc_card_user",
+ CLK_CON_GAT_GOUT_HSI_MMC_CARD_SDCLKIN, 21, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_SYSREG_HSI_PCLK, "gout_sysreg_hsi_pclk",
+ "mout_hsi_bus_user",
+ CLK_CON_GAT_GOUT_HSI_SYSREG_HSI_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_USB_PHY_ACLK, "gout_usb_phy_aclk", "mout_hsi_bus_user",
+ CLK_CON_GAT_GOUT_HSI_USB20DRD_TOP_ACLK_PHYCTRL_20, 21, 0, 0),
+ GATE(CLK_GOUT_USB_BUS_EARLY_CLK, "gout_usb_bus_early",
+ "mout_hsi_bus_user",
+ CLK_CON_GAT_GOUT_HSI_USB20DRD_TOP_BUS_CLK_EARLY, 21, 0, 0),
+};
+
+static const struct samsung_cmu_info hsi_cmu_info __initconst = {
+ .mux_clks = hsi_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(hsi_mux_clks),
+ .gate_clks = hsi_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(hsi_gate_clks),
+ .nr_clk_ids = HSI_NR_CLK,
+ .clk_regs = hsi_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(hsi_clk_regs),
+ .clk_name = "dout_hsi_bus",
+};
+
+/* ---- CMU_PERI ------------------------------------------------------------ */
+
+/* Register Offset definitions for CMU_PERI (0x10030000) */
+#define PLL_CON0_MUX_CLKCMU_PERI_BUS_USER 0x0600
+#define PLL_CON0_MUX_CLKCMU_PERI_HSI2C_USER 0x0610
+#define PLL_CON0_MUX_CLKCMU_PERI_SPI_USER 0x0620
+#define PLL_CON0_MUX_CLKCMU_PERI_UART_USER 0x0630
+#define CLK_CON_DIV_DIV_CLK_PERI_HSI2C_0 0x1800
+#define CLK_CON_DIV_DIV_CLK_PERI_HSI2C_1 0x1804
+#define CLK_CON_DIV_DIV_CLK_PERI_HSI2C_2 0x1808
+#define CLK_CON_DIV_DIV_CLK_PERI_SPI_0 0x180c
+#define CLK_CON_GAT_GATE_CLK_PERI_HSI2C_0 0x200c
+#define CLK_CON_GAT_GATE_CLK_PERI_HSI2C_1 0x2010
+#define CLK_CON_GAT_GATE_CLK_PERI_HSI2C_2 0x2014
+#define CLK_CON_GAT_GOUT_PERI_GPIO_PERI_PCLK 0x2020
+#define CLK_CON_GAT_GOUT_PERI_HSI2C_0_IPCLK 0x2024
+#define CLK_CON_GAT_GOUT_PERI_HSI2C_0_PCLK 0x2028
+#define CLK_CON_GAT_GOUT_PERI_HSI2C_1_IPCLK 0x202c
+#define CLK_CON_GAT_GOUT_PERI_HSI2C_1_PCLK 0x2030
+#define CLK_CON_GAT_GOUT_PERI_HSI2C_2_IPCLK 0x2034
+#define CLK_CON_GAT_GOUT_PERI_HSI2C_2_PCLK 0x2038
+#define CLK_CON_GAT_GOUT_PERI_I2C_0_PCLK 0x203c
+#define CLK_CON_GAT_GOUT_PERI_I2C_1_PCLK 0x2040
+#define CLK_CON_GAT_GOUT_PERI_I2C_2_PCLK 0x2044
+#define CLK_CON_GAT_GOUT_PERI_I2C_3_PCLK 0x2048
+#define CLK_CON_GAT_GOUT_PERI_I2C_4_PCLK 0x204c
+#define CLK_CON_GAT_GOUT_PERI_I2C_5_PCLK 0x2050
+#define CLK_CON_GAT_GOUT_PERI_I2C_6_PCLK 0x2054
+#define CLK_CON_GAT_GOUT_PERI_MCT_PCLK 0x205c
+#define CLK_CON_GAT_GOUT_PERI_PWM_MOTOR_PCLK 0x2064
+#define CLK_CON_GAT_GOUT_PERI_SPI_0_IPCLK 0x209c
+#define CLK_CON_GAT_GOUT_PERI_SPI_0_PCLK 0x20a0
+#define CLK_CON_GAT_GOUT_PERI_SYSREG_PERI_PCLK 0x20a4
+#define CLK_CON_GAT_GOUT_PERI_UART_IPCLK 0x20a8
+#define CLK_CON_GAT_GOUT_PERI_UART_PCLK 0x20ac
+#define CLK_CON_GAT_GOUT_PERI_WDT_0_PCLK 0x20b0
+#define CLK_CON_GAT_GOUT_PERI_WDT_1_PCLK 0x20b4
+
+static const unsigned long peri_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_PERI_BUS_USER,
+ PLL_CON0_MUX_CLKCMU_PERI_HSI2C_USER,
+ PLL_CON0_MUX_CLKCMU_PERI_SPI_USER,
+ PLL_CON0_MUX_CLKCMU_PERI_UART_USER,
+ CLK_CON_DIV_DIV_CLK_PERI_HSI2C_0,
+ CLK_CON_DIV_DIV_CLK_PERI_HSI2C_1,
+ CLK_CON_DIV_DIV_CLK_PERI_HSI2C_2,
+ CLK_CON_DIV_DIV_CLK_PERI_SPI_0,
+ CLK_CON_GAT_GATE_CLK_PERI_HSI2C_0,
+ CLK_CON_GAT_GATE_CLK_PERI_HSI2C_1,
+ CLK_CON_GAT_GATE_CLK_PERI_HSI2C_2,
+ CLK_CON_GAT_GOUT_PERI_GPIO_PERI_PCLK,
+ CLK_CON_GAT_GOUT_PERI_HSI2C_0_IPCLK,
+ CLK_CON_GAT_GOUT_PERI_HSI2C_0_PCLK,
+ CLK_CON_GAT_GOUT_PERI_HSI2C_1_IPCLK,
+ CLK_CON_GAT_GOUT_PERI_HSI2C_1_PCLK,
+ CLK_CON_GAT_GOUT_PERI_HSI2C_2_IPCLK,
+ CLK_CON_GAT_GOUT_PERI_HSI2C_2_PCLK,
+ CLK_CON_GAT_GOUT_PERI_I2C_0_PCLK,
+ CLK_CON_GAT_GOUT_PERI_I2C_1_PCLK,
+ CLK_CON_GAT_GOUT_PERI_I2C_2_PCLK,
+ CLK_CON_GAT_GOUT_PERI_I2C_3_PCLK,
+ CLK_CON_GAT_GOUT_PERI_I2C_4_PCLK,
+ CLK_CON_GAT_GOUT_PERI_I2C_5_PCLK,
+ CLK_CON_GAT_GOUT_PERI_I2C_6_PCLK,
+ CLK_CON_GAT_GOUT_PERI_MCT_PCLK,
+ CLK_CON_GAT_GOUT_PERI_PWM_MOTOR_PCLK,
+ CLK_CON_GAT_GOUT_PERI_SPI_0_IPCLK,
+ CLK_CON_GAT_GOUT_PERI_SPI_0_PCLK,
+ CLK_CON_GAT_GOUT_PERI_SYSREG_PERI_PCLK,
+ CLK_CON_GAT_GOUT_PERI_UART_IPCLK,
+ CLK_CON_GAT_GOUT_PERI_UART_PCLK,
+ CLK_CON_GAT_GOUT_PERI_WDT_0_PCLK,
+ CLK_CON_GAT_GOUT_PERI_WDT_1_PCLK,
+};
+
+/* List of parent clocks for Muxes in CMU_PERI */
+PNAME(mout_peri_bus_user_p) = { "oscclk", "dout_peri_bus" };
+PNAME(mout_peri_uart_user_p) = { "oscclk", "dout_peri_uart" };
+PNAME(mout_peri_hsi2c_user_p) = { "oscclk", "dout_peri_ip" };
+PNAME(mout_peri_spi_user_p) = { "oscclk", "dout_peri_ip" };
+
+static const struct samsung_mux_clock peri_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_PERI_BUS_USER, "mout_peri_bus_user", mout_peri_bus_user_p,
+ PLL_CON0_MUX_CLKCMU_PERI_BUS_USER, 4, 1),
+ MUX(CLK_MOUT_PERI_UART_USER, "mout_peri_uart_user",
+ mout_peri_uart_user_p, PLL_CON0_MUX_CLKCMU_PERI_UART_USER, 4, 1),
+ MUX(CLK_MOUT_PERI_HSI2C_USER, "mout_peri_hsi2c_user",
+ mout_peri_hsi2c_user_p, PLL_CON0_MUX_CLKCMU_PERI_HSI2C_USER, 4, 1),
+ MUX(CLK_MOUT_PERI_SPI_USER, "mout_peri_spi_user", mout_peri_spi_user_p,
+ PLL_CON0_MUX_CLKCMU_PERI_SPI_USER, 4, 1),
+};
+
+static const struct samsung_div_clock peri_div_clks[] __initconst = {
+ DIV(CLK_DOUT_PERI_HSI2C0, "dout_peri_hsi2c0", "gout_peri_hsi2c0",
+ CLK_CON_DIV_DIV_CLK_PERI_HSI2C_0, 0, 5),
+ DIV(CLK_DOUT_PERI_HSI2C1, "dout_peri_hsi2c1", "gout_peri_hsi2c1",
+ CLK_CON_DIV_DIV_CLK_PERI_HSI2C_1, 0, 5),
+ DIV(CLK_DOUT_PERI_HSI2C2, "dout_peri_hsi2c2", "gout_peri_hsi2c2",
+ CLK_CON_DIV_DIV_CLK_PERI_HSI2C_2, 0, 5),
+ DIV(CLK_DOUT_PERI_SPI0, "dout_peri_spi0", "mout_peri_spi_user",
+ CLK_CON_DIV_DIV_CLK_PERI_SPI_0, 0, 5),
+};
+
+static const struct samsung_gate_clock peri_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_PERI_HSI2C0, "gout_peri_hsi2c0", "mout_peri_hsi2c_user",
+ CLK_CON_GAT_GATE_CLK_PERI_HSI2C_0, 21, 0, 0),
+ GATE(CLK_GOUT_PERI_HSI2C1, "gout_peri_hsi2c1", "mout_peri_hsi2c_user",
+ CLK_CON_GAT_GATE_CLK_PERI_HSI2C_1, 21, 0, 0),
+ GATE(CLK_GOUT_PERI_HSI2C2, "gout_peri_hsi2c2", "mout_peri_hsi2c_user",
+ CLK_CON_GAT_GATE_CLK_PERI_HSI2C_2, 21, 0, 0),
+ GATE(CLK_GOUT_HSI2C0_IPCLK, "gout_hsi2c0_ipclk", "dout_peri_hsi2c0",
+ CLK_CON_GAT_GOUT_PERI_HSI2C_0_IPCLK, 21, 0, 0),
+ GATE(CLK_GOUT_HSI2C0_PCLK, "gout_hsi2c0_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_HSI2C_0_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_HSI2C1_IPCLK, "gout_hsi2c1_ipclk", "dout_peri_hsi2c1",
+ CLK_CON_GAT_GOUT_PERI_HSI2C_1_IPCLK, 21, 0, 0),
+ GATE(CLK_GOUT_HSI2C1_PCLK, "gout_hsi2c1_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_HSI2C_1_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_HSI2C2_IPCLK, "gout_hsi2c2_ipclk", "dout_peri_hsi2c2",
+ CLK_CON_GAT_GOUT_PERI_HSI2C_2_IPCLK, 21, 0, 0),
+ GATE(CLK_GOUT_HSI2C2_PCLK, "gout_hsi2c2_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_HSI2C_2_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_I2C0_PCLK, "gout_i2c0_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_I2C_0_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_I2C1_PCLK, "gout_i2c1_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_I2C_1_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_I2C2_PCLK, "gout_i2c2_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_I2C_2_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_I2C3_PCLK, "gout_i2c3_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_I2C_3_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_I2C4_PCLK, "gout_i2c4_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_I2C_4_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_I2C5_PCLK, "gout_i2c5_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_I2C_5_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_I2C6_PCLK, "gout_i2c6_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_I2C_6_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_MCT_PCLK, "gout_mct_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_MCT_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_PWM_MOTOR_PCLK, "gout_pwm_motor_pclk",
+ "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_PWM_MOTOR_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_SPI0_IPCLK, "gout_spi0_ipclk", "dout_peri_spi0",
+ CLK_CON_GAT_GOUT_PERI_SPI_0_IPCLK, 21, 0, 0),
+ GATE(CLK_GOUT_SPI0_PCLK, "gout_spi0_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_SPI_0_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_SYSREG_PERI_PCLK, "gout_sysreg_peri_pclk",
+ "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_SYSREG_PERI_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_UART_IPCLK, "gout_uart_ipclk", "mout_peri_uart_user",
+ CLK_CON_GAT_GOUT_PERI_UART_IPCLK, 21, 0, 0),
+ GATE(CLK_GOUT_UART_PCLK, "gout_uart_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_UART_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_WDT0_PCLK, "gout_wdt0_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_WDT_0_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_WDT1_PCLK, "gout_wdt1_pclk", "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_WDT_1_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_GPIO_PERI_PCLK, "gout_gpio_peri_pclk",
+ "mout_peri_bus_user",
+ CLK_CON_GAT_GOUT_PERI_GPIO_PERI_PCLK, 21, 0, 0),
+};
+
+static const struct samsung_cmu_info peri_cmu_info __initconst = {
+ .mux_clks = peri_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(peri_mux_clks),
+ .div_clks = peri_div_clks,
+ .nr_div_clks = ARRAY_SIZE(peri_div_clks),
+ .gate_clks = peri_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(peri_gate_clks),
+ .nr_clk_ids = PERI_NR_CLK,
+ .clk_regs = peri_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(peri_clk_regs),
+ .clk_name = "dout_peri_bus",
+};
+
+/* ---- CMU_CORE ------------------------------------------------------------ */
+
+/* Register Offset definitions for CMU_CORE (0x12000000) */
+#define PLL_CON0_MUX_CLKCMU_CORE_BUS_USER 0x0600
+#define PLL_CON0_MUX_CLKCMU_CORE_CCI_USER 0x0610
+#define PLL_CON0_MUX_CLKCMU_CORE_MMC_EMBD_USER 0x0620
+#define PLL_CON0_MUX_CLKCMU_CORE_SSS_USER 0x0630
+#define CLK_CON_MUX_MUX_CLK_CORE_GIC 0x1000
+#define CLK_CON_DIV_DIV_CLK_CORE_BUSP 0x1800
+#define CLK_CON_GAT_GOUT_CORE_CCI_550_ACLK 0x2038
+#define CLK_CON_GAT_GOUT_CORE_GIC_CLK 0x2040
+#define CLK_CON_GAT_GOUT_CORE_MMC_EMBD_I_ACLK 0x20e8
+#define CLK_CON_GAT_GOUT_CORE_MMC_EMBD_SDCLKIN 0x20ec
+#define CLK_CON_GAT_GOUT_CORE_SSS_I_ACLK 0x2128
+#define CLK_CON_GAT_GOUT_CORE_SSS_I_PCLK 0x212c
+
+static const unsigned long core_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_CORE_BUS_USER,
+ PLL_CON0_MUX_CLKCMU_CORE_CCI_USER,
+ PLL_CON0_MUX_CLKCMU_CORE_MMC_EMBD_USER,
+ PLL_CON0_MUX_CLKCMU_CORE_SSS_USER,
+ CLK_CON_MUX_MUX_CLK_CORE_GIC,
+ CLK_CON_DIV_DIV_CLK_CORE_BUSP,
+ CLK_CON_GAT_GOUT_CORE_CCI_550_ACLK,
+ CLK_CON_GAT_GOUT_CORE_GIC_CLK,
+ CLK_CON_GAT_GOUT_CORE_MMC_EMBD_I_ACLK,
+ CLK_CON_GAT_GOUT_CORE_MMC_EMBD_SDCLKIN,
+ CLK_CON_GAT_GOUT_CORE_SSS_I_ACLK,
+ CLK_CON_GAT_GOUT_CORE_SSS_I_PCLK,
+};
+
+/* List of parent clocks for Muxes in CMU_CORE */
+PNAME(mout_core_bus_user_p) = { "oscclk", "dout_core_bus" };
+PNAME(mout_core_cci_user_p) = { "oscclk", "dout_core_cci" };
+PNAME(mout_core_mmc_embd_user_p) = { "oscclk", "dout_core_mmc_embd" };
+PNAME(mout_core_sss_user_p) = { "oscclk", "dout_core_sss" };
+PNAME(mout_core_gic_p) = { "dout_core_busp", "oscclk" };
+
+static const struct samsung_mux_clock core_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_CORE_BUS_USER, "mout_core_bus_user", mout_core_bus_user_p,
+ PLL_CON0_MUX_CLKCMU_CORE_BUS_USER, 4, 1),
+ MUX(CLK_MOUT_CORE_CCI_USER, "mout_core_cci_user", mout_core_cci_user_p,
+ PLL_CON0_MUX_CLKCMU_CORE_CCI_USER, 4, 1),
+ MUX_F(CLK_MOUT_CORE_MMC_EMBD_USER, "mout_core_mmc_embd_user",
+ mout_core_mmc_embd_user_p, PLL_CON0_MUX_CLKCMU_CORE_MMC_EMBD_USER,
+ 4, 1, CLK_SET_RATE_PARENT, 0),
+ MUX(CLK_MOUT_CORE_SSS_USER, "mout_core_sss_user", mout_core_sss_user_p,
+ PLL_CON0_MUX_CLKCMU_CORE_SSS_USER, 4, 1),
+ MUX(CLK_MOUT_CORE_GIC, "mout_core_gic", mout_core_gic_p,
+ CLK_CON_MUX_MUX_CLK_CORE_GIC, 0, 1),
+};
+
+static const struct samsung_div_clock core_div_clks[] __initconst = {
+ DIV(CLK_DOUT_CORE_BUSP, "dout_core_busp", "mout_core_bus_user",
+ CLK_CON_DIV_DIV_CLK_CORE_BUSP, 0, 2),
+};
+
+static const struct samsung_gate_clock core_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_CCI_ACLK, "gout_cci_aclk", "mout_core_cci_user",
+ CLK_CON_GAT_GOUT_CORE_CCI_550_ACLK, 21, 0, 0),
+ GATE(CLK_GOUT_GIC_CLK, "gout_gic_clk", "mout_core_gic",
+ CLK_CON_GAT_GOUT_CORE_GIC_CLK, 21, 0, 0),
+ GATE(CLK_GOUT_MMC_EMBD_ACLK, "gout_mmc_embd_aclk", "dout_core_busp",
+ CLK_CON_GAT_GOUT_CORE_MMC_EMBD_I_ACLK, 21, 0, 0),
+ GATE(CLK_GOUT_MMC_EMBD_SDCLKIN, "gout_mmc_embd_sdclkin",
+ "mout_core_mmc_embd_user", CLK_CON_GAT_GOUT_CORE_MMC_EMBD_SDCLKIN,
+ 21, CLK_SET_RATE_PARENT, 0),
+ GATE(CLK_GOUT_SSS_ACLK, "gout_sss_aclk", "mout_core_sss_user",
+ CLK_CON_GAT_GOUT_CORE_SSS_I_ACLK, 21, 0, 0),
+ GATE(CLK_GOUT_SSS_PCLK, "gout_sss_pclk", "dout_core_busp",
+ CLK_CON_GAT_GOUT_CORE_SSS_I_PCLK, 21, 0, 0),
+};
+
+static const struct samsung_cmu_info core_cmu_info __initconst = {
+ .mux_clks = core_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(core_mux_clks),
+ .div_clks = core_div_clks,
+ .nr_div_clks = ARRAY_SIZE(core_div_clks),
+ .gate_clks = core_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(core_gate_clks),
+ .nr_clk_ids = CORE_NR_CLK,
+ .clk_regs = core_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(core_clk_regs),
+ .clk_name = "dout_core_bus",
+};
+
+/* ---- CMU_DPU ------------------------------------------------------------- */
+
+/* Register Offset definitions for CMU_DPU (0x13000000) */
+#define PLL_CON0_MUX_CLKCMU_DPU_USER 0x0600
+#define CLK_CON_DIV_DIV_CLK_DPU_BUSP 0x1800
+#define CLK_CON_GAT_CLK_DPU_CMU_DPU_PCLK 0x2004
+#define CLK_CON_GAT_GOUT_DPU_ACLK_DECON0 0x2010
+#define CLK_CON_GAT_GOUT_DPU_ACLK_DMA 0x2014
+#define CLK_CON_GAT_GOUT_DPU_ACLK_DPP 0x2018
+#define CLK_CON_GAT_GOUT_DPU_PPMU_ACLK 0x2028
+#define CLK_CON_GAT_GOUT_DPU_PPMU_PCLK 0x202c
+#define CLK_CON_GAT_GOUT_DPU_SMMU_CLK 0x2038
+#define CLK_CON_GAT_GOUT_DPU_SYSREG_PCLK 0x203c
+
+static const unsigned long dpu_clk_regs[] __initconst = {
+ PLL_CON0_MUX_CLKCMU_DPU_USER,
+ CLK_CON_DIV_DIV_CLK_DPU_BUSP,
+ CLK_CON_GAT_CLK_DPU_CMU_DPU_PCLK,
+ CLK_CON_GAT_GOUT_DPU_ACLK_DECON0,
+ CLK_CON_GAT_GOUT_DPU_ACLK_DMA,
+ CLK_CON_GAT_GOUT_DPU_ACLK_DPP,
+ CLK_CON_GAT_GOUT_DPU_PPMU_ACLK,
+ CLK_CON_GAT_GOUT_DPU_PPMU_PCLK,
+ CLK_CON_GAT_GOUT_DPU_SMMU_CLK,
+ CLK_CON_GAT_GOUT_DPU_SYSREG_PCLK,
+};
+
+/* List of parent clocks for Muxes in CMU_CORE */
+PNAME(mout_dpu_user_p) = { "oscclk", "dout_dpu" };
+
+static const struct samsung_mux_clock dpu_mux_clks[] __initconst = {
+ MUX(CLK_MOUT_DPU_USER, "mout_dpu_user", mout_dpu_user_p,
+ PLL_CON0_MUX_CLKCMU_DPU_USER, 4, 1),
+};
+
+static const struct samsung_div_clock dpu_div_clks[] __initconst = {
+ DIV(CLK_DOUT_DPU_BUSP, "dout_dpu_busp", "mout_dpu_user",
+ CLK_CON_DIV_DIV_CLK_DPU_BUSP, 0, 3),
+};
+
+static const struct samsung_gate_clock dpu_gate_clks[] __initconst = {
+ GATE(CLK_GOUT_DPU_CMU_DPU_PCLK, "gout_dpu_cmu_dpu_pclk",
+ "dout_dpu_busp", CLK_CON_GAT_CLK_DPU_CMU_DPU_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_DPU_DECON0_ACLK, "gout_dpu_decon0_aclk", "mout_dpu_user",
+ CLK_CON_GAT_GOUT_DPU_ACLK_DECON0, 21, 0, 0),
+ GATE(CLK_GOUT_DPU_DMA_ACLK, "gout_dpu_dma_aclk", "mout_dpu_user",
+ CLK_CON_GAT_GOUT_DPU_ACLK_DMA, 21, 0, 0),
+ GATE(CLK_GOUT_DPU_DPP_ACLK, "gout_dpu_dpp_aclk", "mout_dpu_user",
+ CLK_CON_GAT_GOUT_DPU_ACLK_DPP, 21, 0, 0),
+ GATE(CLK_GOUT_DPU_PPMU_ACLK, "gout_dpu_ppmu_aclk", "mout_dpu_user",
+ CLK_CON_GAT_GOUT_DPU_PPMU_ACLK, 21, 0, 0),
+ GATE(CLK_GOUT_DPU_PPMU_PCLK, "gout_dpu_ppmu_pclk", "dout_dpu_busp",
+ CLK_CON_GAT_GOUT_DPU_PPMU_PCLK, 21, 0, 0),
+ GATE(CLK_GOUT_DPU_SMMU_CLK, "gout_dpu_smmu_clk", "mout_dpu_user",
+ CLK_CON_GAT_GOUT_DPU_SMMU_CLK, 21, 0, 0),
+ GATE(CLK_GOUT_DPU_SYSREG_PCLK, "gout_dpu_sysreg_pclk", "dout_dpu_busp",
+ CLK_CON_GAT_GOUT_DPU_SYSREG_PCLK, 21, 0, 0),
+};
+
+static const struct samsung_cmu_info dpu_cmu_info __initconst = {
+ .mux_clks = dpu_mux_clks,
+ .nr_mux_clks = ARRAY_SIZE(dpu_mux_clks),
+ .div_clks = dpu_div_clks,
+ .nr_div_clks = ARRAY_SIZE(dpu_div_clks),
+ .gate_clks = dpu_gate_clks,
+ .nr_gate_clks = ARRAY_SIZE(dpu_gate_clks),
+ .nr_clk_ids = DPU_NR_CLK,
+ .clk_regs = dpu_clk_regs,
+ .nr_clk_regs = ARRAY_SIZE(dpu_clk_regs),
+ .clk_name = "dout_dpu",
+};
+
+/* ---- platform_driver ----------------------------------------------------- */
+
+static int __init exynos850_cmu_probe(struct platform_device *pdev)
+{
+ const struct samsung_cmu_info *info;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+
+ info = of_device_get_match_data(dev);
+ exynos850_init_clocks(np, info->clk_regs, info->nr_clk_regs);
+ samsung_cmu_register_one(np, info);
+
+ /* Keep bus clock running, so it's possible to access CMU registers */
+ if (info->clk_name) {
+ struct clk *bus_clk;
+
+ bus_clk = clk_get(dev, info->clk_name);
+ if (IS_ERR(bus_clk)) {
+ pr_err("%s: could not find bus clock %s; err = %ld\n",
+ __func__, info->clk_name, PTR_ERR(bus_clk));
+ } else {
+ clk_prepare_enable(bus_clk);
+ }
+ }
+
+ return 0;
+}
+
+/* CMUs which belong to Power Domains and need runtime PM to be implemented */
+static const struct of_device_id exynos850_cmu_of_match[] = {
+ {
+ .compatible = "samsung,exynos850-cmu-hsi",
+ .data = &hsi_cmu_info,
+ }, {
+ .compatible = "samsung,exynos850-cmu-peri",
+ .data = &peri_cmu_info,
+ }, {
+ .compatible = "samsung,exynos850-cmu-core",
+ .data = &core_cmu_info,
+ }, {
+ .compatible = "samsung,exynos850-cmu-dpu",
+ .data = &dpu_cmu_info,
+ }, {
+ },
+};
+
+static struct platform_driver exynos850_cmu_driver __refdata = {
+ .driver = {
+ .name = "exynos850-cmu",
+ .of_match_table = exynos850_cmu_of_match,
+ .suppress_bind_attrs = true,
+ },
+ .probe = exynos850_cmu_probe,
+};
+
+static int __init exynos850_cmu_init(void)
+{
+ return platform_driver_register(&exynos850_cmu_driver);
+}
+core_initcall(exynos850_cmu_init);
diff --git a/drivers/clk/samsung/clk-pll.c b/drivers/clk/samsung/clk-pll.c
index 5873a9354b50..83d1b03647db 100644
--- a/drivers/clk/samsung/clk-pll.c
+++ b/drivers/clk/samsung/clk-pll.c
@@ -416,6 +416,186 @@ static const struct clk_ops samsung_pll36xx_clk_min_ops = {
};
/*
+ * PLL0822x Clock Type
+ */
+/* Maximum lock time can be 150 * PDIV cycles */
+#define PLL0822X_LOCK_FACTOR (150)
+
+#define PLL0822X_MDIV_MASK (0x3FF)
+#define PLL0822X_PDIV_MASK (0x3F)
+#define PLL0822X_SDIV_MASK (0x7)
+#define PLL0822X_MDIV_SHIFT (16)
+#define PLL0822X_PDIV_SHIFT (8)
+#define PLL0822X_SDIV_SHIFT (0)
+#define PLL0822X_LOCK_STAT_SHIFT (29)
+#define PLL0822X_ENABLE_SHIFT (31)
+
+static unsigned long samsung_pll0822x_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct samsung_clk_pll *pll = to_clk_pll(hw);
+ u32 mdiv, pdiv, sdiv, pll_con3;
+ u64 fvco = parent_rate;
+
+ pll_con3 = readl_relaxed(pll->con_reg);
+ mdiv = (pll_con3 >> PLL0822X_MDIV_SHIFT) & PLL0822X_MDIV_MASK;
+ pdiv = (pll_con3 >> PLL0822X_PDIV_SHIFT) & PLL0822X_PDIV_MASK;
+ sdiv = (pll_con3 >> PLL0822X_SDIV_SHIFT) & PLL0822X_SDIV_MASK;
+
+ fvco *= mdiv;
+ do_div(fvco, (pdiv << sdiv));
+
+ return (unsigned long)fvco;
+}
+
+static int samsung_pll0822x_set_rate(struct clk_hw *hw, unsigned long drate,
+ unsigned long prate)
+{
+ const struct samsung_pll_rate_table *rate;
+ struct samsung_clk_pll *pll = to_clk_pll(hw);
+ u32 pll_con3;
+
+ /* Get required rate settings from table */
+ rate = samsung_get_pll_settings(pll, drate);
+ if (!rate) {
+ pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__,
+ drate, clk_hw_get_name(hw));
+ return -EINVAL;
+ }
+
+ /* Change PLL PMS values */
+ pll_con3 = readl_relaxed(pll->con_reg);
+ pll_con3 &= ~((PLL0822X_MDIV_MASK << PLL0822X_MDIV_SHIFT) |
+ (PLL0822X_PDIV_MASK << PLL0822X_PDIV_SHIFT) |
+ (PLL0822X_SDIV_MASK << PLL0822X_SDIV_SHIFT));
+ pll_con3 |= (rate->mdiv << PLL0822X_MDIV_SHIFT) |
+ (rate->pdiv << PLL0822X_PDIV_SHIFT) |
+ (rate->sdiv << PLL0822X_SDIV_SHIFT);
+
+ /* Set PLL lock time */
+ writel_relaxed(rate->pdiv * PLL0822X_LOCK_FACTOR,
+ pll->lock_reg);
+
+ /* Write PMS values */
+ writel_relaxed(pll_con3, pll->con_reg);
+
+ /* Wait for PLL lock if the PLL is enabled */
+ if (pll_con3 & BIT(pll->enable_offs))
+ return samsung_pll_lock_wait(pll, BIT(pll->lock_offs));
+
+ return 0;
+}
+
+static const struct clk_ops samsung_pll0822x_clk_ops = {
+ .recalc_rate = samsung_pll0822x_recalc_rate,
+ .round_rate = samsung_pll_round_rate,
+ .set_rate = samsung_pll0822x_set_rate,
+ .enable = samsung_pll3xxx_enable,
+ .disable = samsung_pll3xxx_disable,
+};
+
+static const struct clk_ops samsung_pll0822x_clk_min_ops = {
+ .recalc_rate = samsung_pll0822x_recalc_rate,
+};
+
+/*
+ * PLL0831x Clock Type
+ */
+/* Maximum lock time can be 500 * PDIV cycles */
+#define PLL0831X_LOCK_FACTOR (500)
+
+#define PLL0831X_KDIV_MASK (0xFFFF)
+#define PLL0831X_MDIV_MASK (0x1FF)
+#define PLL0831X_PDIV_MASK (0x3F)
+#define PLL0831X_SDIV_MASK (0x7)
+#define PLL0831X_MDIV_SHIFT (16)
+#define PLL0831X_PDIV_SHIFT (8)
+#define PLL0831X_SDIV_SHIFT (0)
+#define PLL0831X_KDIV_SHIFT (0)
+#define PLL0831X_LOCK_STAT_SHIFT (29)
+#define PLL0831X_ENABLE_SHIFT (31)
+
+static unsigned long samsung_pll0831x_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct samsung_clk_pll *pll = to_clk_pll(hw);
+ u32 mdiv, pdiv, sdiv, pll_con3, pll_con5;
+ s16 kdiv;
+ u64 fvco = parent_rate;
+
+ pll_con3 = readl_relaxed(pll->con_reg);
+ pll_con5 = readl_relaxed(pll->con_reg + 8);
+ mdiv = (pll_con3 >> PLL0831X_MDIV_SHIFT) & PLL0831X_MDIV_MASK;
+ pdiv = (pll_con3 >> PLL0831X_PDIV_SHIFT) & PLL0831X_PDIV_MASK;
+ sdiv = (pll_con3 >> PLL0831X_SDIV_SHIFT) & PLL0831X_SDIV_MASK;
+ kdiv = (s16)((pll_con5 >> PLL0831X_KDIV_SHIFT) & PLL0831X_KDIV_MASK);
+
+ fvco *= (mdiv << 16) + kdiv;
+ do_div(fvco, (pdiv << sdiv));
+ fvco >>= 16;
+
+ return (unsigned long)fvco;
+}
+
+static int samsung_pll0831x_set_rate(struct clk_hw *hw, unsigned long drate,
+ unsigned long parent_rate)
+{
+ const struct samsung_pll_rate_table *rate;
+ struct samsung_clk_pll *pll = to_clk_pll(hw);
+ u32 pll_con3, pll_con5;
+
+ /* Get required rate settings from table */
+ rate = samsung_get_pll_settings(pll, drate);
+ if (!rate) {
+ pr_err("%s: Invalid rate : %lu for pll clk %s\n", __func__,
+ drate, clk_hw_get_name(hw));
+ return -EINVAL;
+ }
+
+ pll_con3 = readl_relaxed(pll->con_reg);
+ pll_con5 = readl_relaxed(pll->con_reg + 8);
+
+ /* Change PLL PMSK values */
+ pll_con3 &= ~((PLL0831X_MDIV_MASK << PLL0831X_MDIV_SHIFT) |
+ (PLL0831X_PDIV_MASK << PLL0831X_PDIV_SHIFT) |
+ (PLL0831X_SDIV_MASK << PLL0831X_SDIV_SHIFT));
+ pll_con3 |= (rate->mdiv << PLL0831X_MDIV_SHIFT) |
+ (rate->pdiv << PLL0831X_PDIV_SHIFT) |
+ (rate->sdiv << PLL0831X_SDIV_SHIFT);
+ pll_con5 &= ~(PLL0831X_KDIV_MASK << PLL0831X_KDIV_SHIFT);
+ /*
+ * kdiv is 16-bit 2's complement (s16), but stored as unsigned int.
+ * Cast it to u16 to avoid leading 0xffff's in case of negative value.
+ */
+ pll_con5 |= ((u16)rate->kdiv << PLL0831X_KDIV_SHIFT);
+
+ /* Set PLL lock time */
+ writel_relaxed(rate->pdiv * PLL0831X_LOCK_FACTOR, pll->lock_reg);
+
+ /* Write PMSK values */
+ writel_relaxed(pll_con3, pll->con_reg);
+ writel_relaxed(pll_con5, pll->con_reg + 8);
+
+ /* Wait for PLL lock if the PLL is enabled */
+ if (pll_con3 & BIT(pll->enable_offs))
+ return samsung_pll_lock_wait(pll, BIT(pll->lock_offs));
+
+ return 0;
+}
+
+static const struct clk_ops samsung_pll0831x_clk_ops = {
+ .recalc_rate = samsung_pll0831x_recalc_rate,
+ .set_rate = samsung_pll0831x_set_rate,
+ .round_rate = samsung_pll_round_rate,
+ .enable = samsung_pll3xxx_enable,
+ .disable = samsung_pll3xxx_disable,
+};
+
+static const struct clk_ops samsung_pll0831x_clk_min_ops = {
+ .recalc_rate = samsung_pll0831x_recalc_rate,
+};
+
+/*
* PLL45xx Clock Type
*/
#define PLL4502_LOCK_FACTOR 400
@@ -1296,6 +1476,14 @@ static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx,
else
init.ops = &samsung_pll35xx_clk_ops;
break;
+ case pll_0822x:
+ pll->enable_offs = PLL0822X_ENABLE_SHIFT;
+ pll->lock_offs = PLL0822X_LOCK_STAT_SHIFT;
+ if (!pll->rate_table)
+ init.ops = &samsung_pll0822x_clk_min_ops;
+ else
+ init.ops = &samsung_pll0822x_clk_ops;
+ break;
case pll_4500:
init.ops = &samsung_pll45xx_clk_min_ops;
break;
@@ -1316,6 +1504,14 @@ static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx,
else
init.ops = &samsung_pll36xx_clk_ops;
break;
+ case pll_0831x:
+ pll->enable_offs = PLL0831X_ENABLE_SHIFT;
+ pll->lock_offs = PLL0831X_LOCK_STAT_SHIFT;
+ if (!pll->rate_table)
+ init.ops = &samsung_pll0831x_clk_min_ops;
+ else
+ init.ops = &samsung_pll0831x_clk_ops;
+ break;
case pll_6552:
case pll_6552_s3c2416:
init.ops = &samsung_pll6552_clk_ops;
diff --git a/drivers/clk/samsung/clk-pll.h b/drivers/clk/samsung/clk-pll.h
index 79e41c226b90..a739f2b7ae80 100644
--- a/drivers/clk/samsung/clk-pll.h
+++ b/drivers/clk/samsung/clk-pll.h
@@ -36,6 +36,8 @@ enum samsung_pll_type {
pll_1451x,
pll_1452x,
pll_1460x,
+ pll_0822x,
+ pll_0831x,
};
#define PLL_RATE(_fin, _m, _p, _s, _k, _ks) \
diff --git a/drivers/clk/samsung/clk-s5pv210-audss.c b/drivers/clk/samsung/clk-s5pv210-audss.c
index a7827a120695..b31c00ea331f 100644
--- a/drivers/clk/samsung/clk-s5pv210-audss.c
+++ b/drivers/clk/samsung/clk-s5pv210-audss.c
@@ -63,15 +63,13 @@ static struct syscore_ops s5pv210_audss_clk_syscore_ops = {
static int s5pv210_audss_clk_probe(struct platform_device *pdev)
{
int i, ret = 0;
- struct resource *res;
const char *mout_audss_p[2];
const char *mout_i2s_p[3];
const char *hclk_p;
struct clk_hw **clk_table;
struct clk *hclk, *pll_ref, *pll_in, *cdclk, *sclk_audio;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- reg_base = devm_ioremap_resource(&pdev->dev, res);
+ reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg_base))
return PTR_ERR(reg_base);
diff --git a/drivers/clk/samsung/clk.c b/drivers/clk/samsung/clk.c
index 1949ae7851b2..336243c6f120 100644
--- a/drivers/clk/samsung/clk.c
+++ b/drivers/clk/samsung/clk.c
@@ -378,6 +378,8 @@ struct samsung_clk_provider * __init samsung_cmu_register_one(
samsung_clk_extended_sleep_init(reg_base,
cmu->clk_regs, cmu->nr_clk_regs,
cmu->suspend_regs, cmu->nr_suspend_regs);
+ if (cmu->cpu_clks)
+ samsung_clk_register_cpu(ctx, cmu->cpu_clks, cmu->nr_cpu_clks);
samsung_clk_of_add_provider(np, ctx);
diff --git a/drivers/clk/samsung/clk.h b/drivers/clk/samsung/clk.h
index c1e1a6b2f499..26499e97275b 100644
--- a/drivers/clk/samsung/clk.h
+++ b/drivers/clk/samsung/clk.h
@@ -271,6 +271,27 @@ struct samsung_pll_clock {
__PLL(_typ, _id, _name, _pname, CLK_GET_RATE_NOCACHE, _lock, \
_con, _rtable)
+struct samsung_cpu_clock {
+ unsigned int id;
+ const char *name;
+ unsigned int parent_id;
+ unsigned int alt_parent_id;
+ unsigned long flags;
+ int offset;
+ const struct exynos_cpuclk_cfg_data *cfg;
+};
+
+#define CPU_CLK(_id, _name, _pid, _apid, _flags, _offset, _cfg) \
+ { \
+ .id = _id, \
+ .name = _name, \
+ .parent_id = _pid, \
+ .alt_parent_id = _apid, \
+ .flags = _flags, \
+ .offset = _offset, \
+ .cfg = _cfg, \
+ }
+
struct samsung_clock_reg_cache {
struct list_head node;
void __iomem *reg_base;
@@ -301,6 +322,9 @@ struct samsung_cmu_info {
unsigned int nr_fixed_factor_clks;
/* total number of clocks with IDs assigned*/
unsigned int nr_clk_ids;
+ /* list of cpu clocks and respective count */
+ const struct samsung_cpu_clock *cpu_clks;
+ unsigned int nr_cpu_clks;
/* list and number of clocks registers */
const unsigned long *clk_regs;
@@ -350,6 +374,8 @@ extern void __init samsung_clk_register_gate(struct samsung_clk_provider *ctx,
extern void __init samsung_clk_register_pll(struct samsung_clk_provider *ctx,
const struct samsung_pll_clock *pll_list,
unsigned int nr_clk, void __iomem *base);
+extern void samsung_clk_register_cpu(struct samsung_clk_provider *ctx,
+ const struct samsung_cpu_clock *list, unsigned int nr_clk);
extern struct samsung_clk_provider __init *samsung_cmu_register_one(
struct device_node *,
diff --git a/drivers/clk/sunxi-ng/Kconfig b/drivers/clk/sunxi-ng/Kconfig
index cd46d8853876..e76e1676f0f0 100644
--- a/drivers/clk/sunxi-ng/Kconfig
+++ b/drivers/clk/sunxi-ng/Kconfig
@@ -71,6 +71,7 @@ config SUN8I_A33_CCU
config SUN8I_A83T_CCU
bool "Support for the Allwinner A83T CCU"
default MACH_SUN8I
+ depends on MACH_SUN8I || COMPILE_TEST
config SUN8I_H3_CCU
bool "Support for the Allwinner H3 CCU"
diff --git a/drivers/clk/sunxi-ng/ccu-sun4i-a10.c b/drivers/clk/sunxi-ng/ccu-sun4i-a10.c
index f32366d9336e..bd9a8782fec3 100644
--- a/drivers/clk/sunxi-ng/ccu-sun4i-a10.c
+++ b/drivers/clk/sunxi-ng/ccu-sun4i-a10.c
@@ -1464,7 +1464,7 @@ static void __init sun4i_ccu_init(struct device_node *node,
val &= ~GENMASK(7, 6);
writel(val | (2 << 6), reg + SUN4I_AHB_REG);
- sunxi_ccu_probe(node, reg, desc);
+ of_sunxi_ccu_probe(node, reg, desc);
}
static void __init sun4i_a10_ccu_setup(struct device_node *node)
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a100-r.c b/drivers/clk/sunxi-ng/ccu-sun50i-a100-r.c
index a56142b90993..804729e0a208 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-a100-r.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-a100-r.c
@@ -196,7 +196,7 @@ static int sun50i_a100_r_ccu_probe(struct platform_device *pdev)
if (IS_ERR(reg))
return PTR_ERR(reg);
- return sunxi_ccu_probe(pdev->dev.of_node, reg, &sun50i_a100_r_ccu_desc);
+ return devm_sunxi_ccu_probe(&pdev->dev, reg, &sun50i_a100_r_ccu_desc);
}
static const struct of_device_id sun50i_a100_r_ccu_ids[] = {
@@ -208,6 +208,7 @@ static struct platform_driver sun50i_a100_r_ccu_driver = {
.probe = sun50i_a100_r_ccu_probe,
.driver = {
.name = "sun50i-a100-r-ccu",
+ .suppress_bind_attrs = true,
.of_match_table = sun50i_a100_r_ccu_ids,
},
};
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a100.c b/drivers/clk/sunxi-ng/ccu-sun50i-a100.c
index 81b48c73d389..1d475d5a3d91 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-a100.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-a100.c
@@ -1247,7 +1247,7 @@ static int sun50i_a100_ccu_probe(struct platform_device *pdev)
writel(val, reg + sun50i_a100_usb2_clk_regs[i]);
}
- ret = sunxi_ccu_probe(pdev->dev.of_node, reg, &sun50i_a100_ccu_desc);
+ ret = devm_sunxi_ccu_probe(&pdev->dev, reg, &sun50i_a100_ccu_desc);
if (ret)
return ret;
@@ -1270,6 +1270,7 @@ static struct platform_driver sun50i_a100_ccu_driver = {
.probe = sun50i_a100_ccu_probe,
.driver = {
.name = "sun50i-a100-ccu",
+ .suppress_bind_attrs = true,
.of_match_table = sun50i_a100_ccu_ids,
},
};
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
index 149cfde817cb..a8c5a92b7d0c 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-a64.c
@@ -938,13 +938,11 @@ static struct ccu_mux_nb sun50i_a64_cpu_nb = {
static int sun50i_a64_ccu_probe(struct platform_device *pdev)
{
- struct resource *res;
void __iomem *reg;
u32 val;
int ret;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- reg = devm_ioremap_resource(&pdev->dev, res);
+ reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg))
return PTR_ERR(reg);
@@ -955,7 +953,7 @@ static int sun50i_a64_ccu_probe(struct platform_device *pdev)
writel(0x515, reg + SUN50I_A64_PLL_MIPI_REG);
- ret = sunxi_ccu_probe(pdev->dev.of_node, reg, &sun50i_a64_ccu_desc);
+ ret = devm_sunxi_ccu_probe(&pdev->dev, reg, &sun50i_a64_ccu_desc);
if (ret)
return ret;
@@ -978,6 +976,7 @@ static struct platform_driver sun50i_a64_ccu_driver = {
.probe = sun50i_a64_ccu_probe,
.driver = {
.name = "sun50i-a64-ccu",
+ .suppress_bind_attrs = true,
.of_match_table = sun50i_a64_ccu_ids,
},
};
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c
index f8909a7ed553..f30d7eb5424d 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6-r.c
@@ -232,7 +232,7 @@ static void __init sunxi_r_ccu_init(struct device_node *node,
return;
}
- sunxi_ccu_probe(node, reg, desc);
+ of_sunxi_ccu_probe(node, reg, desc);
}
static void __init sun50i_h6_r_ccu_setup(struct device_node *node)
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
index bff446b78290..e5672c10d065 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h6.c
@@ -1183,13 +1183,11 @@ static const u32 usb2_clk_regs[] = {
static int sun50i_h6_ccu_probe(struct platform_device *pdev)
{
- struct resource *res;
void __iomem *reg;
u32 val;
int i;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- reg = devm_ioremap_resource(&pdev->dev, res);
+ reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg))
return PTR_ERR(reg);
@@ -1240,7 +1238,7 @@ static int sun50i_h6_ccu_probe(struct platform_device *pdev)
val |= BIT(24);
writel(val, reg + SUN50I_H6_HDMI_CEC_CLK_REG);
- return sunxi_ccu_probe(pdev->dev.of_node, reg, &sun50i_h6_ccu_desc);
+ return devm_sunxi_ccu_probe(&pdev->dev, reg, &sun50i_h6_ccu_desc);
}
static const struct of_device_id sun50i_h6_ccu_ids[] = {
@@ -1252,6 +1250,7 @@ static struct platform_driver sun50i_h6_ccu_driver = {
.probe = sun50i_h6_ccu_probe,
.driver = {
.name = "sun50i-h6-ccu",
+ .suppress_bind_attrs = true,
.of_match_table = sun50i_h6_ccu_ids,
},
};
diff --git a/drivers/clk/sunxi-ng/ccu-sun50i-h616.c b/drivers/clk/sunxi-ng/ccu-sun50i-h616.c
index 225307305880..22eb18079a15 100644
--- a/drivers/clk/sunxi-ng/ccu-sun50i-h616.c
+++ b/drivers/clk/sunxi-ng/ccu-sun50i-h616.c
@@ -1141,9 +1141,7 @@ static void __init sun50i_h616_ccu_setup(struct device_node *node)
val |= BIT(24);
writel(val, reg + SUN50I_H616_HDMI_CEC_CLK_REG);
- i = sunxi_ccu_probe(node, reg, &sun50i_h616_ccu_desc);
- if (i)
- pr_err("%pOF: probing clocks fails: %d\n", node, i);
+ of_sunxi_ccu_probe(node, reg, &sun50i_h616_ccu_desc);
}
CLK_OF_DECLARE(sun50i_h616_ccu, "allwinner,sun50i-h616-ccu",
diff --git a/drivers/clk/sunxi-ng/ccu-sun5i.c b/drivers/clk/sunxi-ng/ccu-sun5i.c
index b78e9b507c1c..1f4bc0e773a7 100644
--- a/drivers/clk/sunxi-ng/ccu-sun5i.c
+++ b/drivers/clk/sunxi-ng/ccu-sun5i.c
@@ -1012,7 +1012,7 @@ static void __init sun5i_ccu_init(struct device_node *node,
val &= ~GENMASK(7, 6);
writel(val | (2 << 6), reg + SUN5I_AHB_REG);
- sunxi_ccu_probe(node, reg, desc);
+ of_sunxi_ccu_probe(node, reg, desc);
}
static void __init sun5i_a10s_ccu_setup(struct device_node *node)
diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
index 9b40d53266a3..3df5c0b41580 100644
--- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
+++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
@@ -1257,7 +1257,7 @@ static void __init sun6i_a31_ccu_setup(struct device_node *node)
val |= 0x3 << 12;
writel(val, reg + SUN6I_A31_AHB1_REG);
- sunxi_ccu_probe(node, reg, &sun6i_a31_ccu_desc);
+ of_sunxi_ccu_probe(node, reg, &sun6i_a31_ccu_desc);
ccu_mux_notifier_register(pll_cpu_clk.common.hw.clk,
&sun6i_a31_cpu_nb);
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a23.c b/drivers/clk/sunxi-ng/ccu-sun8i-a23.c
index 103aa504f6c8..577bb235d658 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-a23.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-a23.c
@@ -745,7 +745,7 @@ static void __init sun8i_a23_ccu_setup(struct device_node *node)
val &= ~BIT(16);
writel(val, reg + SUN8I_A23_PLL_MIPI_REG);
- sunxi_ccu_probe(node, reg, &sun8i_a23_ccu_desc);
+ of_sunxi_ccu_probe(node, reg, &sun8i_a23_ccu_desc);
}
CLK_OF_DECLARE(sun8i_a23_ccu, "allwinner,sun8i-a23-ccu",
sun8i_a23_ccu_setup);
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a33.c b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
index 91838cd11037..8f65cd03f5ac 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
@@ -805,7 +805,7 @@ static void __init sun8i_a33_ccu_setup(struct device_node *node)
val &= ~BIT(16);
writel(val, reg + SUN8I_A33_PLL_MIPI_REG);
- sunxi_ccu_probe(node, reg, &sun8i_a33_ccu_desc);
+ of_sunxi_ccu_probe(node, reg, &sun8i_a33_ccu_desc);
/* Gate then ungate PLL CPU after any rate changes */
ccu_pll_notifier_register(&sun8i_a33_pll_cpu_nb);
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c b/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c
index 2b434521c5cc..3c310aea8cfa 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-a83t.c
@@ -887,12 +887,10 @@ static void sun8i_a83t_cpu_pll_fixup(void __iomem *reg)
static int sun8i_a83t_ccu_probe(struct platform_device *pdev)
{
- struct resource *res;
void __iomem *reg;
u32 val;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- reg = devm_ioremap_resource(&pdev->dev, res);
+ reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg))
return PTR_ERR(reg);
@@ -906,7 +904,7 @@ static int sun8i_a83t_ccu_probe(struct platform_device *pdev)
sun8i_a83t_cpu_pll_fixup(reg + SUN8I_A83T_PLL_C0CPUX_REG);
sun8i_a83t_cpu_pll_fixup(reg + SUN8I_A83T_PLL_C1CPUX_REG);
- return sunxi_ccu_probe(pdev->dev.of_node, reg, &sun8i_a83t_ccu_desc);
+ return devm_sunxi_ccu_probe(&pdev->dev, reg, &sun8i_a83t_ccu_desc);
}
static const struct of_device_id sun8i_a83t_ccu_ids[] = {
@@ -918,6 +916,7 @@ static struct platform_driver sun8i_a83t_ccu_driver = {
.probe = sun8i_a83t_ccu_probe,
.driver = {
.name = "sun8i-a83t-ccu",
+ .suppress_bind_attrs = true,
.of_match_table = sun8i_a83t_ccu_ids,
},
};
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-de2.c b/drivers/clk/sunxi-ng/ccu-sun8i-de2.c
index 524f33275bc7..573b5051d305 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-de2.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-de2.c
@@ -280,7 +280,6 @@ static const struct sunxi_ccu_desc sun50i_h5_de2_clk_desc = {
static int sunxi_de2_clk_probe(struct platform_device *pdev)
{
- struct resource *res;
struct clk *bus_clk, *mod_clk;
struct reset_control *rstc;
void __iomem *reg;
@@ -291,8 +290,7 @@ static int sunxi_de2_clk_probe(struct platform_device *pdev)
if (!ccu_desc)
return -EINVAL;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- reg = devm_ioremap_resource(&pdev->dev, res);
+ reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg))
return PTR_ERR(reg);
@@ -342,7 +340,7 @@ static int sunxi_de2_clk_probe(struct platform_device *pdev)
goto err_disable_mod_clk;
}
- ret = sunxi_ccu_probe(pdev->dev.of_node, reg, ccu_desc);
+ ret = devm_sunxi_ccu_probe(&pdev->dev, reg, ccu_desc);
if (ret)
goto err_assert_reset;
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
index 7e629a4493af..d2fc2903787d 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-h3.c
@@ -1154,7 +1154,7 @@ static void __init sunxi_h3_h5_ccu_init(struct device_node *node,
val &= ~GENMASK(19, 16);
writel(val | (0 << 16), reg + SUN8I_H3_PLL_AUDIO_REG);
- sunxi_ccu_probe(node, reg, desc);
+ of_sunxi_ccu_probe(node, reg, desc);
/* Gate then ungate PLL CPU after any rate changes */
ccu_pll_notifier_register(&sun8i_h3_pll_cpu_nb);
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-r.c b/drivers/clk/sunxi-ng/ccu-sun8i-r.c
index 4c8c491b87c2..9e754d1f754a 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-r.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-r.c
@@ -265,7 +265,7 @@ static void __init sunxi_r_ccu_init(struct device_node *node,
return;
}
- sunxi_ccu_probe(node, reg, desc);
+ of_sunxi_ccu_probe(node, reg, desc);
}
static void __init sun8i_a83t_r_ccu_setup(struct device_node *node)
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-r40.c b/drivers/clk/sunxi-ng/ccu-sun8i-r40.c
index 84153418453f..8bb18d9add05 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-r40.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-r40.c
@@ -1307,14 +1307,12 @@ static struct regmap_config sun8i_r40_ccu_regmap_config = {
static int sun8i_r40_ccu_probe(struct platform_device *pdev)
{
- struct resource *res;
struct regmap *regmap;
void __iomem *reg;
u32 val;
int ret;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- reg = devm_ioremap_resource(&pdev->dev, res);
+ reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg))
return PTR_ERR(reg);
@@ -1346,7 +1344,7 @@ static int sun8i_r40_ccu_probe(struct platform_device *pdev)
if (IS_ERR(regmap))
return PTR_ERR(regmap);
- ret = sunxi_ccu_probe(pdev->dev.of_node, reg, &sun8i_r40_ccu_desc);
+ ret = devm_sunxi_ccu_probe(&pdev->dev, reg, &sun8i_r40_ccu_desc);
if (ret)
return ret;
@@ -1369,6 +1367,7 @@ static struct platform_driver sun8i_r40_ccu_driver = {
.probe = sun8i_r40_ccu_probe,
.driver = {
.name = "sun8i-r40-ccu",
+ .suppress_bind_attrs = true,
.of_match_table = sun8i_r40_ccu_ids,
},
};
diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
index f49724a22540..ce150f83ab54 100644
--- a/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
+++ b/drivers/clk/sunxi-ng/ccu-sun8i-v3s.c
@@ -822,7 +822,7 @@ static void __init sun8i_v3_v3s_ccu_init(struct device_node *node,
val &= ~GENMASK(19, 16);
writel(val, reg + SUN8I_V3S_PLL_AUDIO_REG);
- sunxi_ccu_probe(node, reg, ccu_desc);
+ of_sunxi_ccu_probe(node, reg, ccu_desc);
}
static void __init sun8i_v3s_ccu_setup(struct device_node *node)
diff --git a/drivers/clk/sunxi-ng/ccu-sun9i-a80-de.c b/drivers/clk/sunxi-ng/ccu-sun9i-a80-de.c
index 6616e8114f62..3cde2610f467 100644
--- a/drivers/clk/sunxi-ng/ccu-sun9i-a80-de.c
+++ b/drivers/clk/sunxi-ng/ccu-sun9i-a80-de.c
@@ -203,14 +203,12 @@ static const struct sunxi_ccu_desc sun9i_a80_de_clk_desc = {
static int sun9i_a80_de_clk_probe(struct platform_device *pdev)
{
- struct resource *res;
struct clk *bus_clk;
struct reset_control *rstc;
void __iomem *reg;
int ret;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- reg = devm_ioremap_resource(&pdev->dev, res);
+ reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg))
return PTR_ERR(reg);
@@ -246,8 +244,7 @@ static int sun9i_a80_de_clk_probe(struct platform_device *pdev)
goto err_disable_clk;
}
- ret = sunxi_ccu_probe(pdev->dev.of_node, reg,
- &sun9i_a80_de_clk_desc);
+ ret = devm_sunxi_ccu_probe(&pdev->dev, reg, &sun9i_a80_de_clk_desc);
if (ret)
goto err_assert_reset;
@@ -269,6 +266,7 @@ static struct platform_driver sun9i_a80_de_clk_driver = {
.probe = sun9i_a80_de_clk_probe,
.driver = {
.name = "sun9i-a80-de-clks",
+ .suppress_bind_attrs = true,
.of_match_table = sun9i_a80_de_clk_ids,
},
};
diff --git a/drivers/clk/sunxi-ng/ccu-sun9i-a80-usb.c b/drivers/clk/sunxi-ng/ccu-sun9i-a80-usb.c
index 4b4a507d04ed..0740e8978ae8 100644
--- a/drivers/clk/sunxi-ng/ccu-sun9i-a80-usb.c
+++ b/drivers/clk/sunxi-ng/ccu-sun9i-a80-usb.c
@@ -92,13 +92,11 @@ static const struct sunxi_ccu_desc sun9i_a80_usb_clk_desc = {
static int sun9i_a80_usb_clk_probe(struct platform_device *pdev)
{
- struct resource *res;
struct clk *bus_clk;
void __iomem *reg;
int ret;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- reg = devm_ioremap_resource(&pdev->dev, res);
+ reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg))
return PTR_ERR(reg);
@@ -117,8 +115,7 @@ static int sun9i_a80_usb_clk_probe(struct platform_device *pdev)
return ret;
}
- ret = sunxi_ccu_probe(pdev->dev.of_node, reg,
- &sun9i_a80_usb_clk_desc);
+ ret = devm_sunxi_ccu_probe(&pdev->dev, reg, &sun9i_a80_usb_clk_desc);
if (ret)
goto err_disable_clk;
diff --git a/drivers/clk/sunxi-ng/ccu-sun9i-a80.c b/drivers/clk/sunxi-ng/ccu-sun9i-a80.c
index ef29582676f6..d416af29e0d3 100644
--- a/drivers/clk/sunxi-ng/ccu-sun9i-a80.c
+++ b/drivers/clk/sunxi-ng/ccu-sun9i-a80.c
@@ -1213,12 +1213,10 @@ static void sun9i_a80_cpu_pll_fixup(void __iomem *reg)
static int sun9i_a80_ccu_probe(struct platform_device *pdev)
{
- struct resource *res;
void __iomem *reg;
u32 val;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- reg = devm_ioremap_resource(&pdev->dev, res);
+ reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg))
return PTR_ERR(reg);
@@ -1231,7 +1229,7 @@ static int sun9i_a80_ccu_probe(struct platform_device *pdev)
sun9i_a80_cpu_pll_fixup(reg + SUN9I_A80_PLL_C0CPUX_REG);
sun9i_a80_cpu_pll_fixup(reg + SUN9I_A80_PLL_C1CPUX_REG);
- return sunxi_ccu_probe(pdev->dev.of_node, reg, &sun9i_a80_ccu_desc);
+ return devm_sunxi_ccu_probe(&pdev->dev, reg, &sun9i_a80_ccu_desc);
}
static const struct of_device_id sun9i_a80_ccu_ids[] = {
@@ -1243,6 +1241,7 @@ static struct platform_driver sun9i_a80_ccu_driver = {
.probe = sun9i_a80_ccu_probe,
.driver = {
.name = "sun9i-a80-ccu",
+ .suppress_bind_attrs = true,
.of_match_table = sun9i_a80_ccu_ids,
},
};
diff --git a/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c b/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c
index 7ecc3a5a5b5e..61ad7ee91c11 100644
--- a/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c
+++ b/drivers/clk/sunxi-ng/ccu-suniv-f1c100s.c
@@ -538,7 +538,7 @@ static void __init suniv_f1c100s_ccu_setup(struct device_node *node)
val &= ~GENMASK(19, 16);
writel(val | (3 << 16), reg + SUNIV_PLL_AUDIO_REG);
- sunxi_ccu_probe(node, reg, &suniv_ccu_desc);
+ of_sunxi_ccu_probe(node, reg, &suniv_ccu_desc);
/* Gate then ungate PLL CPU after any rate changes */
ccu_pll_notifier_register(&suniv_pll_cpu_nb);
diff --git a/drivers/clk/sunxi-ng/ccu_common.c b/drivers/clk/sunxi-ng/ccu_common.c
index 2e20e650b6c0..31af8b6b5286 100644
--- a/drivers/clk/sunxi-ng/ccu_common.c
+++ b/drivers/clk/sunxi-ng/ccu_common.c
@@ -7,6 +7,7 @@
#include <linux/clk.h>
#include <linux/clk-provider.h>
+#include <linux/device.h>
#include <linux/iopoll.h>
#include <linux/slab.h>
@@ -14,7 +15,11 @@
#include "ccu_gate.h"
#include "ccu_reset.h"
-static DEFINE_SPINLOCK(ccu_lock);
+struct sunxi_ccu {
+ const struct sunxi_ccu_desc *desc;
+ spinlock_t lock;
+ struct ccu_reset reset;
+};
void ccu_helper_wait_for_lock(struct ccu_common *common, u32 lock)
{
@@ -79,12 +84,17 @@ int ccu_pll_notifier_register(struct ccu_pll_nb *pll_nb)
&pll_nb->clk_nb);
}
-int sunxi_ccu_probe(struct device_node *node, void __iomem *reg,
- const struct sunxi_ccu_desc *desc)
+static int sunxi_ccu_probe(struct sunxi_ccu *ccu, struct device *dev,
+ struct device_node *node, void __iomem *reg,
+ const struct sunxi_ccu_desc *desc)
{
struct ccu_reset *reset;
int i, ret;
+ ccu->desc = desc;
+
+ spin_lock_init(&ccu->lock);
+
for (i = 0; i < desc->num_ccu_clks; i++) {
struct ccu_common *cclk = desc->ccu_clks[i];
@@ -92,7 +102,7 @@ int sunxi_ccu_probe(struct device_node *node, void __iomem *reg,
continue;
cclk->base = reg;
- cclk->lock = &ccu_lock;
+ cclk->lock = &ccu->lock;
}
for (i = 0; i < desc->hw_clks->num ; i++) {
@@ -103,7 +113,10 @@ int sunxi_ccu_probe(struct device_node *node, void __iomem *reg,
continue;
name = hw->init->name;
- ret = of_clk_hw_register(node, hw);
+ if (dev)
+ ret = clk_hw_register(dev, hw);
+ else
+ ret = of_clk_hw_register(node, hw);
if (ret) {
pr_err("Couldn't register clock %d - %s\n", i, name);
goto err_clk_unreg;
@@ -115,29 +128,22 @@ int sunxi_ccu_probe(struct device_node *node, void __iomem *reg,
if (ret)
goto err_clk_unreg;
- reset = kzalloc(sizeof(*reset), GFP_KERNEL);
- if (!reset) {
- ret = -ENOMEM;
- goto err_alloc_reset;
- }
-
+ reset = &ccu->reset;
reset->rcdev.of_node = node;
reset->rcdev.ops = &ccu_reset_ops;
- reset->rcdev.owner = THIS_MODULE;
+ reset->rcdev.owner = dev ? dev->driver->owner : THIS_MODULE;
reset->rcdev.nr_resets = desc->num_resets;
reset->base = reg;
- reset->lock = &ccu_lock;
+ reset->lock = &ccu->lock;
reset->reset_map = desc->resets;
ret = reset_controller_register(&reset->rcdev);
if (ret)
- goto err_of_clk_unreg;
+ goto err_del_provider;
return 0;
-err_of_clk_unreg:
- kfree(reset);
-err_alloc_reset:
+err_del_provider:
of_clk_del_provider(node);
err_clk_unreg:
while (--i >= 0) {
@@ -149,3 +155,59 @@ err_clk_unreg:
}
return ret;
}
+
+static void devm_sunxi_ccu_release(struct device *dev, void *res)
+{
+ struct sunxi_ccu *ccu = res;
+ const struct sunxi_ccu_desc *desc = ccu->desc;
+ int i;
+
+ reset_controller_unregister(&ccu->reset.rcdev);
+ of_clk_del_provider(dev->of_node);
+
+ for (i = 0; i < desc->hw_clks->num; i++) {
+ struct clk_hw *hw = desc->hw_clks->hws[i];
+
+ if (!hw)
+ continue;
+ clk_hw_unregister(hw);
+ }
+}
+
+int devm_sunxi_ccu_probe(struct device *dev, void __iomem *reg,
+ const struct sunxi_ccu_desc *desc)
+{
+ struct sunxi_ccu *ccu;
+ int ret;
+
+ ccu = devres_alloc(devm_sunxi_ccu_release, sizeof(*ccu), GFP_KERNEL);
+ if (!ccu)
+ return -ENOMEM;
+
+ ret = sunxi_ccu_probe(ccu, dev, dev->of_node, reg, desc);
+ if (ret) {
+ devres_free(ccu);
+ return ret;
+ }
+
+ devres_add(dev, ccu);
+
+ return 0;
+}
+
+void of_sunxi_ccu_probe(struct device_node *node, void __iomem *reg,
+ const struct sunxi_ccu_desc *desc)
+{
+ struct sunxi_ccu *ccu;
+ int ret;
+
+ ccu = kzalloc(sizeof(*ccu), GFP_KERNEL);
+ if (!ccu)
+ return;
+
+ ret = sunxi_ccu_probe(ccu, NULL, node, reg, desc);
+ if (ret) {
+ pr_err("%pOF: probing clocks failed: %d\n", node, ret);
+ kfree(ccu);
+ }
+}
diff --git a/drivers/clk/sunxi-ng/ccu_common.h b/drivers/clk/sunxi-ng/ccu_common.h
index 04e7a12200a2..98a1834b58bb 100644
--- a/drivers/clk/sunxi-ng/ccu_common.h
+++ b/drivers/clk/sunxi-ng/ccu_common.h
@@ -63,7 +63,9 @@ struct ccu_pll_nb {
int ccu_pll_notifier_register(struct ccu_pll_nb *pll_nb);
-int sunxi_ccu_probe(struct device_node *node, void __iomem *reg,
- const struct sunxi_ccu_desc *desc);
+int devm_sunxi_ccu_probe(struct device *dev, void __iomem *reg,
+ const struct sunxi_ccu_desc *desc);
+void of_sunxi_ccu_probe(struct device_node *node, void __iomem *reg,
+ const struct sunxi_ccu_desc *desc);
#endif /* _COMMON_H_ */
diff --git a/drivers/clk/sunxi-ng/ccu_mux.h b/drivers/clk/sunxi-ng/ccu_mux.h
index f165395effb5..e31efc509b3d 100644
--- a/drivers/clk/sunxi-ng/ccu_mux.h
+++ b/drivers/clk/sunxi-ng/ccu_mux.h
@@ -40,7 +40,6 @@ struct ccu_mux_internal {
_SUNXI_CCU_MUX_TABLE(_shift, _width, NULL)
struct ccu_mux {
- u16 reg;
u32 enable;
struct ccu_mux_internal mux;
diff --git a/drivers/clk/sunxi/clk-mod0.c b/drivers/clk/sunxi/clk-mod0.c
index f9d715ec9908..51800289ada9 100644
--- a/drivers/clk/sunxi/clk-mod0.c
+++ b/drivers/clk/sunxi/clk-mod0.c
@@ -88,14 +88,12 @@ CLK_OF_DECLARE_DRIVER(sun4i_a10_mod0, "allwinner,sun4i-a10-mod0-clk",
static int sun4i_a10_mod0_clk_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
- struct resource *r;
void __iomem *reg;
if (!np)
return -ENODEV;
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- reg = devm_ioremap_resource(&pdev->dev, r);
+ reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg))
return PTR_ERR(reg);
diff --git a/drivers/clk/sunxi/clk-sun6i-apb0-gates.c b/drivers/clk/sunxi/clk-sun6i-apb0-gates.c
index 4c75b0770c74..e4cf1180b088 100644
--- a/drivers/clk/sunxi/clk-sun6i-apb0-gates.c
+++ b/drivers/clk/sunxi/clk-sun6i-apb0-gates.c
@@ -40,7 +40,6 @@ static int sun6i_a31_apb0_gates_clk_probe(struct platform_device *pdev)
const struct gates_data *data;
const char *clk_parent;
const char *clk_name;
- struct resource *r;
void __iomem *reg;
int ngates;
int i;
@@ -53,8 +52,7 @@ static int sun6i_a31_apb0_gates_clk_probe(struct platform_device *pdev)
if (!data)
return -ENODEV;
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- reg = devm_ioremap_resource(&pdev->dev, r);
+ reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg))
return PTR_ERR(reg);
diff --git a/drivers/clk/sunxi/clk-sun6i-apb0.c b/drivers/clk/sunxi/clk-sun6i-apb0.c
index 10f70c35c265..f80c67bafe38 100644
--- a/drivers/clk/sunxi/clk-sun6i-apb0.c
+++ b/drivers/clk/sunxi/clk-sun6i-apb0.c
@@ -32,12 +32,10 @@ static int sun6i_a31_apb0_clk_probe(struct platform_device *pdev)
struct device_node *np = pdev->dev.of_node;
const char *clk_name = np->name;
const char *clk_parent;
- struct resource *r;
void __iomem *reg;
struct clk *clk;
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- reg = devm_ioremap_resource(&pdev->dev, r);
+ reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg))
return PTR_ERR(reg);
diff --git a/drivers/clk/sunxi/clk-sun6i-ar100.c b/drivers/clk/sunxi/clk-sun6i-ar100.c
index 54babc2b4b9e..9f9a2cf54f41 100644
--- a/drivers/clk/sunxi/clk-sun6i-ar100.c
+++ b/drivers/clk/sunxi/clk-sun6i-ar100.c
@@ -71,12 +71,10 @@ static DEFINE_SPINLOCK(sun6i_ar100_lock);
static int sun6i_a31_ar100_clk_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
- struct resource *r;
void __iomem *reg;
struct clk *clk;
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- reg = devm_ioremap_resource(&pdev->dev, r);
+ reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg))
return PTR_ERR(reg);
diff --git a/drivers/clk/sunxi/clk-sun8i-apb0.c b/drivers/clk/sunxi/clk-sun8i-apb0.c
index fc5d6e3b77d1..f605ecca879f 100644
--- a/drivers/clk/sunxi/clk-sun8i-apb0.c
+++ b/drivers/clk/sunxi/clk-sun8i-apb0.c
@@ -87,12 +87,10 @@ CLK_OF_DECLARE_DRIVER(sun8i_a23_apb0, "allwinner,sun8i-a23-apb0-clk",
static int sun8i_a23_apb0_clk_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
- struct resource *r;
void __iomem *reg;
struct clk *clk;
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- reg = devm_ioremap_resource(&pdev->dev, r);
+ reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(reg))
return PTR_ERR(reg);
diff --git a/drivers/clk/ux500/Makefile b/drivers/clk/ux500/Makefile
index 53fd29002401..c29b83df403e 100644
--- a/drivers/clk/ux500/Makefile
+++ b/drivers/clk/ux500/Makefile
@@ -8,6 +8,9 @@ obj-y += clk-prcc.o
obj-y += clk-prcmu.o
obj-y += clk-sysctrl.o
+# Reset control
+obj-y += reset-prcc.o
+
# Clock definitions
obj-y += u8500_of_clk.o
diff --git a/drivers/clk/ux500/prcc.h b/drivers/clk/ux500/prcc.h
new file mode 100644
index 000000000000..5b6774d79506
--- /dev/null
+++ b/drivers/clk/ux500/prcc.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __PRCC_H
+#define __PRCC_H
+
+#define PRCC_NUM_PERIPH_CLUSTERS 6
+#define PRCC_PERIPHS_PER_CLUSTER 32
+
+/* CLKRST4 is missing making it hard to index things */
+enum clkrst_index {
+ CLKRST1_INDEX = 0,
+ CLKRST2_INDEX,
+ CLKRST3_INDEX,
+ CLKRST5_INDEX,
+ CLKRST6_INDEX,
+ CLKRST_MAX,
+};
+
+#endif
diff --git a/drivers/clk/ux500/reset-prcc.c b/drivers/clk/ux500/reset-prcc.c
new file mode 100644
index 000000000000..fcd5d042806a
--- /dev/null
+++ b/drivers/clk/ux500/reset-prcc.c
@@ -0,0 +1,181 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Reset controller portions for the U8500 PRCC
+ * Copyright (C) 2021 Linus Walleij <linus.walleij@linaro.org>
+ */
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/types.h>
+#include <linux/reset-controller.h>
+#include <linux/bits.h>
+#include <linux/delay.h>
+
+#include "prcc.h"
+#include "reset-prcc.h"
+
+#define to_u8500_prcc_reset(p) container_of((p), struct u8500_prcc_reset, rcdev)
+
+/* This macro flattens the 2-dimensional PRCC numberspace */
+#define PRCC_RESET_LINE(prcc_num, bit) \
+ (((prcc_num) * PRCC_PERIPHS_PER_CLUSTER) + (bit))
+
+/*
+ * Reset registers in each PRCC - the reset lines are active low
+ * so what you need to do is write a bit for the peripheral you
+ * want to put into reset into the CLEAR register, this will assert
+ * the reset by pulling the line low. SET take the device out of
+ * reset. The status reflects the actual state of the line.
+ */
+#define PRCC_K_SOFTRST_SET 0x018
+#define PRCC_K_SOFTRST_CLEAR 0x01c
+#define PRCC_K_RST_STATUS 0x020
+
+static int prcc_num_to_index(unsigned int num)
+{
+ switch (num) {
+ case 1:
+ return CLKRST1_INDEX;
+ case 2:
+ return CLKRST2_INDEX;
+ case 3:
+ return CLKRST3_INDEX;
+ case 5:
+ return CLKRST5_INDEX;
+ case 6:
+ return CLKRST6_INDEX;
+ }
+ return -EINVAL;
+}
+
+static void __iomem *u8500_prcc_reset_base(struct u8500_prcc_reset *ur,
+ unsigned long id)
+{
+ unsigned int prcc_num, index;
+
+ prcc_num = id / PRCC_PERIPHS_PER_CLUSTER;
+ index = prcc_num_to_index(prcc_num);
+
+ if (index > ARRAY_SIZE(ur->base))
+ return NULL;
+
+ return ur->base[index];
+}
+
+static int u8500_prcc_reset(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct u8500_prcc_reset *ur = to_u8500_prcc_reset(rcdev);
+ void __iomem *base = u8500_prcc_reset_base(ur, id);
+ unsigned int bit = id % PRCC_PERIPHS_PER_CLUSTER;
+
+ pr_debug("PRCC cycle reset id %lu, bit %u\n", id, bit);
+
+ /*
+ * Assert reset and then release it. The one microsecond
+ * delay is found in the vendor reference code.
+ */
+ writel(BIT(bit), base + PRCC_K_SOFTRST_CLEAR);
+ udelay(1);
+ writel(BIT(bit), base + PRCC_K_SOFTRST_SET);
+ udelay(1);
+
+ return 0;
+}
+
+static int u8500_prcc_reset_assert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct u8500_prcc_reset *ur = to_u8500_prcc_reset(rcdev);
+ void __iomem *base = u8500_prcc_reset_base(ur, id);
+ unsigned int bit = id % PRCC_PERIPHS_PER_CLUSTER;
+
+ pr_debug("PRCC assert reset id %lu, bit %u\n", id, bit);
+ writel(BIT(bit), base + PRCC_K_SOFTRST_CLEAR);
+
+ return 0;
+}
+
+static int u8500_prcc_reset_deassert(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct u8500_prcc_reset *ur = to_u8500_prcc_reset(rcdev);
+ void __iomem *base = u8500_prcc_reset_base(ur, id);
+ unsigned int bit = id % PRCC_PERIPHS_PER_CLUSTER;
+
+ pr_debug("PRCC deassert reset id %lu, bit %u\n", id, bit);
+ writel(BIT(bit), base + PRCC_K_SOFTRST_SET);
+
+ return 0;
+}
+
+static int u8500_prcc_reset_status(struct reset_controller_dev *rcdev,
+ unsigned long id)
+{
+ struct u8500_prcc_reset *ur = to_u8500_prcc_reset(rcdev);
+ void __iomem *base = u8500_prcc_reset_base(ur, id);
+ unsigned int bit = id % PRCC_PERIPHS_PER_CLUSTER;
+ u32 val;
+
+ pr_debug("PRCC check status on reset line id %lu, bit %u\n", id, bit);
+ val = readl(base + PRCC_K_RST_STATUS);
+
+ /* Active low so return the inverse value of the bit */
+ return !(val & BIT(bit));
+}
+
+static const struct reset_control_ops u8500_prcc_reset_ops = {
+ .reset = u8500_prcc_reset,
+ .assert = u8500_prcc_reset_assert,
+ .deassert = u8500_prcc_reset_deassert,
+ .status = u8500_prcc_reset_status,
+};
+
+static int u8500_prcc_reset_xlate(struct reset_controller_dev *rcdev,
+ const struct of_phandle_args *reset_spec)
+{
+ unsigned int prcc_num, bit;
+
+ if (reset_spec->args_count != 2)
+ return -EINVAL;
+
+ prcc_num = reset_spec->args[0];
+ bit = reset_spec->args[1];
+
+ if (prcc_num != 1 && prcc_num != 2 && prcc_num != 3 &&
+ prcc_num != 5 && prcc_num != 6) {
+ pr_err("%s: invalid PRCC %d\n", __func__, prcc_num);
+ return -EINVAL;
+ }
+
+ pr_debug("located reset line %d at PRCC %d bit %d\n",
+ PRCC_RESET_LINE(prcc_num, bit), prcc_num, bit);
+
+ return PRCC_RESET_LINE(prcc_num, bit);
+}
+
+void u8500_prcc_reset_init(struct device_node *np, struct u8500_prcc_reset *ur)
+{
+ struct reset_controller_dev *rcdev = &ur->rcdev;
+ int ret;
+ int i;
+
+ for (i = 0; i < CLKRST_MAX; i++) {
+ ur->base[i] = ioremap(ur->phy_base[i], SZ_4K);
+ if (!ur->base[i])
+ pr_err("PRCC failed to remap for reset base %d (%08x)\n",
+ i, ur->phy_base[i]);
+ }
+
+ rcdev->owner = THIS_MODULE;
+ rcdev->ops = &u8500_prcc_reset_ops;
+ rcdev->of_node = np;
+ rcdev->of_reset_n_cells = 2;
+ rcdev->of_xlate = u8500_prcc_reset_xlate;
+
+ ret = reset_controller_register(rcdev);
+ if (ret)
+ pr_err("PRCC failed to register reset controller\n");
+}
diff --git a/drivers/clk/ux500/reset-prcc.h b/drivers/clk/ux500/reset-prcc.h
new file mode 100644
index 000000000000..353c9719f2e6
--- /dev/null
+++ b/drivers/clk/ux500/reset-prcc.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __RESET_PRCC_H
+#define __RESET_PRCC_H
+
+#include <linux/reset-controller.h>
+#include <linux/io.h>
+
+/**
+ * struct u8500_prcc_reset - U8500 PRCC reset controller state
+ * @rcdev: reset controller device
+ * @phy_base: the physical base address for each PRCC block
+ * @base: the remapped PRCC bases
+ */
+struct u8500_prcc_reset {
+ struct reset_controller_dev rcdev;
+ u32 phy_base[CLKRST_MAX];
+ void __iomem *base[CLKRST_MAX];
+};
+
+void u8500_prcc_reset_init(struct device_node *np, struct u8500_prcc_reset *ur);
+
+#endif
diff --git a/drivers/clk/ux500/u8500_of_clk.c b/drivers/clk/ux500/u8500_of_clk.c
index 528c5bb397cc..e86ed2eec3fd 100644
--- a/drivers/clk/ux500/u8500_of_clk.c
+++ b/drivers/clk/ux500/u8500_of_clk.c
@@ -10,10 +10,10 @@
#include <linux/of_address.h>
#include <linux/clk-provider.h>
#include <linux/mfd/dbx500-prcmu.h>
-#include "clk.h"
-#define PRCC_NUM_PERIPH_CLUSTERS 6
-#define PRCC_PERIPHS_PER_CLUSTER 32
+#include "clk.h"
+#include "prcc.h"
+#include "reset-prcc.h"
static struct clk *prcmu_clk[PRCMU_NUM_CLKS];
static struct clk *prcc_pclk[(PRCC_NUM_PERIPH_CLUSTERS + 1) * PRCC_PERIPHS_PER_CLUSTER];
@@ -46,16 +46,6 @@ static struct clk *ux500_twocell_get(struct of_phandle_args *clkspec,
return PRCC_SHOW(clk_data, base, bit);
}
-/* CLKRST4 is missing making it hard to index things */
-enum clkrst_index {
- CLKRST1_INDEX = 0,
- CLKRST2_INDEX,
- CLKRST3_INDEX,
- CLKRST5_INDEX,
- CLKRST6_INDEX,
- CLKRST_MAX,
-};
-
static void u8500_clk_init(struct device_node *np)
{
struct prcmu_fw_version *fw_version;
@@ -63,8 +53,18 @@ static void u8500_clk_init(struct device_node *np)
const char *sgaclk_parent = NULL;
struct clk *clk, *rtc_clk, *twd_clk;
u32 bases[CLKRST_MAX];
+ struct u8500_prcc_reset *rstc;
int i;
+ /*
+ * We allocate the reset controller here so that we can fill in the
+ * base addresses properly and pass to the reset controller init
+ * function later on.
+ */
+ rstc = kzalloc(sizeof(*rstc), GFP_KERNEL);
+ if (!rstc)
+ return;
+
for (i = 0; i < ARRAY_SIZE(bases); i++) {
struct resource r;
@@ -73,6 +73,7 @@ static void u8500_clk_init(struct device_node *np)
pr_err("failed to get CLKRST %d base address\n",
i + 1);
bases[i] = r.start;
+ rstc->phy_base[i] = r.start;
}
/* Clock sources */
@@ -563,6 +564,9 @@ static void u8500_clk_init(struct device_node *np)
if (of_node_name_eq(child, "smp-twd-clock"))
of_clk_add_provider(child, of_clk_src_simple_get, twd_clk);
+
+ if (of_node_name_eq(child, "prcc-reset-controller"))
+ u8500_prcc_reset_init(child, rstc);
}
}
CLK_OF_DECLARE(u8500_clks, "stericsson,u8500-clks", u8500_clk_init);
diff --git a/drivers/clk/versatile/Kconfig b/drivers/clk/versatile/Kconfig
index 481de5657d85..403f164954c2 100644
--- a/drivers/clk/versatile/Kconfig
+++ b/drivers/clk/versatile/Kconfig
@@ -2,8 +2,9 @@
menu "Clock driver for ARM Reference designs"
depends on HAS_IOMEM
+ depends on ARM || ARM64 || COMPILE_TEST
-config ICST
+config CLK_ICST
bool "Clock driver for ARM Reference designs ICST"
select REGMAP_MMIO
help
diff --git a/drivers/clk/versatile/Makefile b/drivers/clk/versatile/Makefile
index 4ff563e6e3a0..e7d05308e4f0 100644
--- a/drivers/clk/versatile/Makefile
+++ b/drivers/clk/versatile/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
# Makefile for Versatile-specific clocks
-obj-$(CONFIG_ICST) += icst.o clk-icst.o clk-versatile.o
+obj-$(CONFIG_CLK_ICST) += icst.o clk-icst.o clk-versatile.o
obj-$(CONFIG_INTEGRATOR_IMPD1) += clk-impd1.o
obj-$(CONFIG_CLK_SP810) += clk-sp810.o
obj-$(CONFIG_CLK_VEXPRESS_OSC) += clk-vexpress-osc.o
diff --git a/drivers/clk/versatile/clk-icst.c b/drivers/clk/versatile/clk-icst.c
index fdd6aa3cb1fc..77fd0ecaf155 100644
--- a/drivers/clk/versatile/clk-icst.c
+++ b/drivers/clk/versatile/clk-icst.c
@@ -501,7 +501,8 @@ static void __init of_syscon_icst_setup(struct device_node *np)
return;
}
- if (of_property_read_u32(np, "vco-offset", &icst_desc.vco_offset)) {
+ if (of_property_read_u32(np, "reg", &icst_desc.vco_offset) &&
+ of_property_read_u32(np, "vco-offset", &icst_desc.vco_offset)) {
pr_err("no VCO register offset for ICST clock\n");
return;
}
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 0f5e3983951a..f65e31bab9ae 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -24,6 +24,7 @@ config I8253_LOCK
config OMAP_DM_TIMER
bool
+ select TIMER_OF
config CLKBLD_I8253
def_bool y if CLKSRC_I8253 || CLKEVT_I8253 || I8253_LOCK
@@ -418,12 +419,14 @@ config ATMEL_TCB_CLKSRC
config CLKSRC_EXYNOS_MCT
bool "Exynos multi core timer driver" if COMPILE_TEST
depends on ARM || ARM64
+ depends on ARCH_EXYNOS || COMPILE_TEST
help
Support for Multi Core Timer controller on Exynos SoCs.
config CLKSRC_SAMSUNG_PWM
bool "PWM timer driver for Samsung S3C, S5P" if COMPILE_TEST
depends on HAS_IOMEM
+ depends on ARCH_EXYNOS || ARCH_S3C24XX || ARCH_S3C64XX || ARCH_S5PV210 || COMPILE_TEST
help
This is a new clocksource driver for the PWM timer found in
Samsung S3C, S5P and Exynos SoCs, replacing an earlier driver
diff --git a/drivers/clocksource/arc_timer.c b/drivers/clocksource/arc_timer.c
index de93dd1a8c7b..cb18524cc13d 100644
--- a/drivers/clocksource/arc_timer.c
+++ b/drivers/clocksource/arc_timer.c
@@ -225,7 +225,7 @@ static int __init arc_cs_setup_timer1(struct device_node *node)
write_aux_reg(ARC_REG_TIMER1_LIMIT, ARC_TIMERN_MAX);
write_aux_reg(ARC_REG_TIMER1_CNT, 0);
- write_aux_reg(ARC_REG_TIMER1_CTRL, TIMER_CTRL_NH);
+ write_aux_reg(ARC_REG_TIMER1_CTRL, ARC_TIMER_CTRL_NH);
sched_clock_register(arc_timer1_clock_read, 32, arc_timer_freq);
@@ -245,7 +245,7 @@ static void arc_timer_event_setup(unsigned int cycles)
write_aux_reg(ARC_REG_TIMER0_LIMIT, cycles);
write_aux_reg(ARC_REG_TIMER0_CNT, 0); /* start from 0 */
- write_aux_reg(ARC_REG_TIMER0_CTRL, TIMER_CTRL_IE | TIMER_CTRL_NH);
+ write_aux_reg(ARC_REG_TIMER0_CTRL, ARC_TIMER_CTRL_IE | ARC_TIMER_CTRL_NH);
}
@@ -294,7 +294,7 @@ static irqreturn_t timer_irq_handler(int irq, void *dev_id)
* explicitly clears IP bit
* 2. Re-arm interrupt if periodic by writing to IE bit [0]
*/
- write_aux_reg(ARC_REG_TIMER0_CTRL, irq_reenable | TIMER_CTRL_NH);
+ write_aux_reg(ARC_REG_TIMER0_CTRL, irq_reenable | ARC_TIMER_CTRL_NH);
evt->event_handler(evt);
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index be6d741d404c..9a04eacc4412 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -44,23 +44,29 @@
#define CNTACR_RWVT BIT(4)
#define CNTACR_RWPT BIT(5)
-#define CNTVCT_LO 0x08
-#define CNTVCT_HI 0x0c
+#define CNTVCT_LO 0x00
+#define CNTPCT_LO 0x08
#define CNTFRQ 0x10
-#define CNTP_TVAL 0x28
+#define CNTP_CVAL_LO 0x20
#define CNTP_CTL 0x2c
-#define CNTV_TVAL 0x38
+#define CNTV_CVAL_LO 0x30
#define CNTV_CTL 0x3c
-static unsigned arch_timers_present __initdata;
+/*
+ * The minimum amount of time a generic counter is guaranteed to not roll over
+ * (40 years)
+ */
+#define MIN_ROLLOVER_SECS (40ULL * 365 * 24 * 3600)
-static void __iomem *arch_counter_base __ro_after_init;
+static unsigned arch_timers_present __initdata;
struct arch_timer {
void __iomem *base;
struct clock_event_device evt;
};
+static struct arch_timer *arch_timer_mem __ro_after_init;
+
#define to_arch_timer(e) container_of(e, struct arch_timer, evt)
static u32 arch_timer_rate __ro_after_init;
@@ -96,32 +102,57 @@ static int __init early_evtstrm_cfg(char *buf)
early_param("clocksource.arm_arch_timer.evtstrm", early_evtstrm_cfg);
/*
+ * Makes an educated guess at a valid counter width based on the Generic Timer
+ * specification. Of note:
+ * 1) the system counter is at least 56 bits wide
+ * 2) a roll-over time of not less than 40 years
+ *
+ * See 'ARM DDI 0487G.a D11.1.2 ("The system counter")' for more details.
+ */
+static int arch_counter_get_width(void)
+{
+ u64 min_cycles = MIN_ROLLOVER_SECS * arch_timer_rate;
+
+ /* guarantee the returned width is within the valid range */
+ return clamp_val(ilog2(min_cycles - 1) + 1, 56, 64);
+}
+
+/*
* Architected system timer support.
*/
static __always_inline
-void arch_timer_reg_write(int access, enum arch_timer_reg reg, u32 val,
+void arch_timer_reg_write(int access, enum arch_timer_reg reg, u64 val,
struct clock_event_device *clk)
{
if (access == ARCH_TIMER_MEM_PHYS_ACCESS) {
struct arch_timer *timer = to_arch_timer(clk);
switch (reg) {
case ARCH_TIMER_REG_CTRL:
- writel_relaxed(val, timer->base + CNTP_CTL);
+ writel_relaxed((u32)val, timer->base + CNTP_CTL);
break;
- case ARCH_TIMER_REG_TVAL:
- writel_relaxed(val, timer->base + CNTP_TVAL);
+ case ARCH_TIMER_REG_CVAL:
+ /*
+ * Not guaranteed to be atomic, so the timer
+ * must be disabled at this point.
+ */
+ writeq_relaxed(val, timer->base + CNTP_CVAL_LO);
break;
+ default:
+ BUILD_BUG();
}
} else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
struct arch_timer *timer = to_arch_timer(clk);
switch (reg) {
case ARCH_TIMER_REG_CTRL:
- writel_relaxed(val, timer->base + CNTV_CTL);
+ writel_relaxed((u32)val, timer->base + CNTV_CTL);
break;
- case ARCH_TIMER_REG_TVAL:
- writel_relaxed(val, timer->base + CNTV_TVAL);
+ case ARCH_TIMER_REG_CVAL:
+ /* Same restriction as above */
+ writeq_relaxed(val, timer->base + CNTV_CVAL_LO);
break;
+ default:
+ BUILD_BUG();
}
} else {
arch_timer_reg_write_cp15(access, reg, val);
@@ -140,9 +171,8 @@ u32 arch_timer_reg_read(int access, enum arch_timer_reg reg,
case ARCH_TIMER_REG_CTRL:
val = readl_relaxed(timer->base + CNTP_CTL);
break;
- case ARCH_TIMER_REG_TVAL:
- val = readl_relaxed(timer->base + CNTP_TVAL);
- break;
+ default:
+ BUILD_BUG();
}
} else if (access == ARCH_TIMER_MEM_VIRT_ACCESS) {
struct arch_timer *timer = to_arch_timer(clk);
@@ -150,9 +180,8 @@ u32 arch_timer_reg_read(int access, enum arch_timer_reg reg,
case ARCH_TIMER_REG_CTRL:
val = readl_relaxed(timer->base + CNTV_CTL);
break;
- case ARCH_TIMER_REG_TVAL:
- val = readl_relaxed(timer->base + CNTV_TVAL);
- break;
+ default:
+ BUILD_BUG();
}
} else {
val = arch_timer_reg_read_cp15(access, reg);
@@ -205,13 +234,11 @@ static struct clocksource clocksource_counter = {
.id = CSID_ARM_ARCH_COUNTER,
.rating = 400,
.read = arch_counter_read,
- .mask = CLOCKSOURCE_MASK(56),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static struct cyclecounter cyclecounter __ro_after_init = {
.read = arch_counter_read_cc,
- .mask = CLOCKSOURCE_MASK(56),
};
struct ate_acpi_oem_info {
@@ -239,16 +266,6 @@ struct ate_acpi_oem_info {
_new; \
})
-static u32 notrace fsl_a008585_read_cntp_tval_el0(void)
-{
- return __fsl_a008585_read_reg(cntp_tval_el0);
-}
-
-static u32 notrace fsl_a008585_read_cntv_tval_el0(void)
-{
- return __fsl_a008585_read_reg(cntv_tval_el0);
-}
-
static u64 notrace fsl_a008585_read_cntpct_el0(void)
{
return __fsl_a008585_read_reg(cntpct_el0);
@@ -285,16 +302,6 @@ static u64 notrace fsl_a008585_read_cntvct_el0(void)
_new; \
})
-static u32 notrace hisi_161010101_read_cntp_tval_el0(void)
-{
- return __hisi_161010101_read_reg(cntp_tval_el0);
-}
-
-static u32 notrace hisi_161010101_read_cntv_tval_el0(void)
-{
- return __hisi_161010101_read_reg(cntv_tval_el0);
-}
-
static u64 notrace hisi_161010101_read_cntpct_el0(void)
{
return __hisi_161010101_read_reg(cntpct_el0);
@@ -379,16 +386,6 @@ static u64 notrace sun50i_a64_read_cntvct_el0(void)
{
return __sun50i_a64_read_reg(cntvct_el0);
}
-
-static u32 notrace sun50i_a64_read_cntp_tval_el0(void)
-{
- return read_sysreg(cntp_cval_el0) - sun50i_a64_read_cntpct_el0();
-}
-
-static u32 notrace sun50i_a64_read_cntv_tval_el0(void)
-{
- return read_sysreg(cntv_cval_el0) - sun50i_a64_read_cntvct_el0();
-}
#endif
#ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND
@@ -397,7 +394,7 @@ EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround);
static atomic_t timer_unstable_counter_workaround_in_use = ATOMIC_INIT(0);
-static void erratum_set_next_event_tval_generic(const int access, unsigned long evt,
+static void erratum_set_next_event_generic(const int access, unsigned long evt,
struct clock_event_device *clk)
{
unsigned long ctrl;
@@ -418,17 +415,17 @@ static void erratum_set_next_event_tval_generic(const int access, unsigned long
arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
}
-static __maybe_unused int erratum_set_next_event_tval_virt(unsigned long evt,
+static __maybe_unused int erratum_set_next_event_virt(unsigned long evt,
struct clock_event_device *clk)
{
- erratum_set_next_event_tval_generic(ARCH_TIMER_VIRT_ACCESS, evt, clk);
+ erratum_set_next_event_generic(ARCH_TIMER_VIRT_ACCESS, evt, clk);
return 0;
}
-static __maybe_unused int erratum_set_next_event_tval_phys(unsigned long evt,
+static __maybe_unused int erratum_set_next_event_phys(unsigned long evt,
struct clock_event_device *clk)
{
- erratum_set_next_event_tval_generic(ARCH_TIMER_PHYS_ACCESS, evt, clk);
+ erratum_set_next_event_generic(ARCH_TIMER_PHYS_ACCESS, evt, clk);
return 0;
}
@@ -438,12 +435,10 @@ static const struct arch_timer_erratum_workaround ool_workarounds[] = {
.match_type = ate_match_dt,
.id = "fsl,erratum-a008585",
.desc = "Freescale erratum a005858",
- .read_cntp_tval_el0 = fsl_a008585_read_cntp_tval_el0,
- .read_cntv_tval_el0 = fsl_a008585_read_cntv_tval_el0,
.read_cntpct_el0 = fsl_a008585_read_cntpct_el0,
.read_cntvct_el0 = fsl_a008585_read_cntvct_el0,
- .set_next_event_phys = erratum_set_next_event_tval_phys,
- .set_next_event_virt = erratum_set_next_event_tval_virt,
+ .set_next_event_phys = erratum_set_next_event_phys,
+ .set_next_event_virt = erratum_set_next_event_virt,
},
#endif
#ifdef CONFIG_HISILICON_ERRATUM_161010101
@@ -451,23 +446,19 @@ static const struct arch_timer_erratum_workaround ool_workarounds[] = {
.match_type = ate_match_dt,
.id = "hisilicon,erratum-161010101",
.desc = "HiSilicon erratum 161010101",
- .read_cntp_tval_el0 = hisi_161010101_read_cntp_tval_el0,
- .read_cntv_tval_el0 = hisi_161010101_read_cntv_tval_el0,
.read_cntpct_el0 = hisi_161010101_read_cntpct_el0,
.read_cntvct_el0 = hisi_161010101_read_cntvct_el0,
- .set_next_event_phys = erratum_set_next_event_tval_phys,
- .set_next_event_virt = erratum_set_next_event_tval_virt,
+ .set_next_event_phys = erratum_set_next_event_phys,
+ .set_next_event_virt = erratum_set_next_event_virt,
},
{
.match_type = ate_match_acpi_oem_info,
.id = hisi_161010101_oem_info,
.desc = "HiSilicon erratum 161010101",
- .read_cntp_tval_el0 = hisi_161010101_read_cntp_tval_el0,
- .read_cntv_tval_el0 = hisi_161010101_read_cntv_tval_el0,
.read_cntpct_el0 = hisi_161010101_read_cntpct_el0,
.read_cntvct_el0 = hisi_161010101_read_cntvct_el0,
- .set_next_event_phys = erratum_set_next_event_tval_phys,
- .set_next_event_virt = erratum_set_next_event_tval_virt,
+ .set_next_event_phys = erratum_set_next_event_phys,
+ .set_next_event_virt = erratum_set_next_event_virt,
},
#endif
#ifdef CONFIG_ARM64_ERRATUM_858921
@@ -484,12 +475,10 @@ static const struct arch_timer_erratum_workaround ool_workarounds[] = {
.match_type = ate_match_dt,
.id = "allwinner,erratum-unknown1",
.desc = "Allwinner erratum UNKNOWN1",
- .read_cntp_tval_el0 = sun50i_a64_read_cntp_tval_el0,
- .read_cntv_tval_el0 = sun50i_a64_read_cntv_tval_el0,
.read_cntpct_el0 = sun50i_a64_read_cntpct_el0,
.read_cntvct_el0 = sun50i_a64_read_cntvct_el0,
- .set_next_event_phys = erratum_set_next_event_tval_phys,
- .set_next_event_virt = erratum_set_next_event_tval_virt,
+ .set_next_event_phys = erratum_set_next_event_phys,
+ .set_next_event_virt = erratum_set_next_event_virt,
},
#endif
#ifdef CONFIG_ARM64_ERRATUM_1418040
@@ -727,10 +716,18 @@ static __always_inline void set_next_event(const int access, unsigned long evt,
struct clock_event_device *clk)
{
unsigned long ctrl;
+ u64 cnt;
+
ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
ctrl |= ARCH_TIMER_CTRL_ENABLE;
ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
- arch_timer_reg_write(access, ARCH_TIMER_REG_TVAL, evt, clk);
+
+ if (access == ARCH_TIMER_PHYS_ACCESS)
+ cnt = __arch_counter_get_cntpct();
+ else
+ cnt = __arch_counter_get_cntvct();
+
+ arch_timer_reg_write(access, ARCH_TIMER_REG_CVAL, evt + cnt, clk);
arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
}
@@ -748,23 +745,79 @@ static int arch_timer_set_next_event_phys(unsigned long evt,
return 0;
}
+static u64 arch_counter_get_cnt_mem(struct arch_timer *t, int offset_lo)
+{
+ u32 cnt_lo, cnt_hi, tmp_hi;
+
+ do {
+ cnt_hi = readl_relaxed(t->base + offset_lo + 4);
+ cnt_lo = readl_relaxed(t->base + offset_lo);
+ tmp_hi = readl_relaxed(t->base + offset_lo + 4);
+ } while (cnt_hi != tmp_hi);
+
+ return ((u64) cnt_hi << 32) | cnt_lo;
+}
+
+static __always_inline void set_next_event_mem(const int access, unsigned long evt,
+ struct clock_event_device *clk)
+{
+ struct arch_timer *timer = to_arch_timer(clk);
+ unsigned long ctrl;
+ u64 cnt;
+
+ ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
+ ctrl |= ARCH_TIMER_CTRL_ENABLE;
+ ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;
+
+ if (access == ARCH_TIMER_MEM_VIRT_ACCESS)
+ cnt = arch_counter_get_cnt_mem(timer, CNTVCT_LO);
+ else
+ cnt = arch_counter_get_cnt_mem(timer, CNTPCT_LO);
+
+ arch_timer_reg_write(access, ARCH_TIMER_REG_CVAL, evt + cnt, clk);
+ arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
+}
+
static int arch_timer_set_next_event_virt_mem(unsigned long evt,
struct clock_event_device *clk)
{
- set_next_event(ARCH_TIMER_MEM_VIRT_ACCESS, evt, clk);
+ set_next_event_mem(ARCH_TIMER_MEM_VIRT_ACCESS, evt, clk);
return 0;
}
static int arch_timer_set_next_event_phys_mem(unsigned long evt,
struct clock_event_device *clk)
{
- set_next_event(ARCH_TIMER_MEM_PHYS_ACCESS, evt, clk);
+ set_next_event_mem(ARCH_TIMER_MEM_PHYS_ACCESS, evt, clk);
return 0;
}
+static u64 __arch_timer_check_delta(void)
+{
+#ifdef CONFIG_ARM64
+ const struct midr_range broken_cval_midrs[] = {
+ /*
+ * XGene-1 implements CVAL in terms of TVAL, meaning
+ * that the maximum timer range is 32bit. Shame on them.
+ */
+ MIDR_ALL_VERSIONS(MIDR_CPU_MODEL(ARM_CPU_IMP_APM,
+ APM_CPU_PART_POTENZA)),
+ {},
+ };
+
+ if (is_midr_in_range_list(read_cpuid_id(), broken_cval_midrs)) {
+ pr_warn_once("Broken CNTx_CVAL_EL1, limiting width to 32bits");
+ return CLOCKSOURCE_MASK(32);
+ }
+#endif
+ return CLOCKSOURCE_MASK(arch_counter_get_width());
+}
+
static void __arch_timer_setup(unsigned type,
struct clock_event_device *clk)
{
+ u64 max_delta;
+
clk->features = CLOCK_EVT_FEAT_ONESHOT;
if (type == ARCH_TIMER_TYPE_CP15) {
@@ -796,6 +849,7 @@ static void __arch_timer_setup(unsigned type,
}
clk->set_next_event = sne;
+ max_delta = __arch_timer_check_delta();
} else {
clk->features |= CLOCK_EVT_FEAT_DYNIRQ;
clk->name = "arch_mem_timer";
@@ -812,11 +866,13 @@ static void __arch_timer_setup(unsigned type,
clk->set_next_event =
arch_timer_set_next_event_phys_mem;
}
+
+ max_delta = CLOCKSOURCE_MASK(56);
}
clk->set_state_shutdown(clk);
- clockevents_config_and_register(clk, arch_timer_rate, 0xf, 0x7fffffff);
+ clockevents_config_and_register(clk, arch_timer_rate, 0xf, max_delta);
}
static void arch_timer_evtstrm_enable(int divider)
@@ -986,15 +1042,7 @@ bool arch_timer_evtstrm_available(void)
static u64 arch_counter_get_cntvct_mem(void)
{
- u32 vct_lo, vct_hi, tmp_hi;
-
- do {
- vct_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
- vct_lo = readl_relaxed(arch_counter_base + CNTVCT_LO);
- tmp_hi = readl_relaxed(arch_counter_base + CNTVCT_HI);
- } while (vct_hi != tmp_hi);
-
- return ((u64) vct_hi << 32) | vct_lo;
+ return arch_counter_get_cnt_mem(arch_timer_mem, CNTVCT_LO);
}
static struct arch_timer_kvm_info arch_timer_kvm_info;
@@ -1007,6 +1055,7 @@ struct arch_timer_kvm_info *arch_timer_get_kvm_info(void)
static void __init arch_counter_register(unsigned type)
{
u64 start_count;
+ int width;
/* Register the CP15 based counter if we have one */
if (type & ARCH_TIMER_TYPE_CP15) {
@@ -1031,6 +1080,10 @@ static void __init arch_counter_register(unsigned type)
arch_timer_read_counter = arch_counter_get_cntvct_mem;
}
+ width = arch_counter_get_width();
+ clocksource_counter.mask = CLOCKSOURCE_MASK(width);
+ cyclecounter.mask = CLOCKSOURCE_MASK(width);
+
if (!arch_counter_suspend_stop)
clocksource_counter.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
start_count = arch_timer_read_counter();
@@ -1040,8 +1093,7 @@ static void __init arch_counter_register(unsigned type)
timecounter_init(&arch_timer_kvm_info.timecounter,
&cyclecounter, start_count);
- /* 56 bits minimum, so we assume worst case rollover */
- sched_clock_register(arch_timer_read_counter, 56, arch_timer_rate);
+ sched_clock_register(arch_timer_read_counter, width, arch_timer_rate);
}
static void arch_timer_stop(struct clock_event_device *clk)
@@ -1182,25 +1234,25 @@ static int __init arch_timer_mem_register(void __iomem *base, unsigned int irq)
{
int ret;
irq_handler_t func;
- struct arch_timer *t;
- t = kzalloc(sizeof(*t), GFP_KERNEL);
- if (!t)
+ arch_timer_mem = kzalloc(sizeof(*arch_timer_mem), GFP_KERNEL);
+ if (!arch_timer_mem)
return -ENOMEM;
- t->base = base;
- t->evt.irq = irq;
- __arch_timer_setup(ARCH_TIMER_TYPE_MEM, &t->evt);
+ arch_timer_mem->base = base;
+ arch_timer_mem->evt.irq = irq;
+ __arch_timer_setup(ARCH_TIMER_TYPE_MEM, &arch_timer_mem->evt);
if (arch_timer_mem_use_virtual)
func = arch_timer_handler_virt_mem;
else
func = arch_timer_handler_phys_mem;
- ret = request_irq(irq, func, IRQF_TIMER, "arch_mem_timer", &t->evt);
+ ret = request_irq(irq, func, IRQF_TIMER, "arch_mem_timer", &arch_timer_mem->evt);
if (ret) {
pr_err("Failed to request mem timer irq\n");
- kfree(t);
+ kfree(arch_timer_mem);
+ arch_timer_mem = NULL;
}
return ret;
@@ -1458,7 +1510,6 @@ arch_timer_mem_frame_register(struct arch_timer_mem_frame *frame)
return ret;
}
- arch_counter_base = base;
arch_timers_present |= ARCH_TIMER_TYPE_MEM;
return 0;
diff --git a/drivers/clocksource/timer-riscv.c b/drivers/clocksource/timer-riscv.c
index c51c5ed15aa7..1767f8bf2013 100644
--- a/drivers/clocksource/timer-riscv.c
+++ b/drivers/clocksource/timer-riscv.c
@@ -13,10 +13,12 @@
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
+#include <linux/module.h>
#include <linux/sched_clock.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/interrupt.h>
#include <linux/of_irq.h>
+#include <clocksource/timer-riscv.h>
#include <asm/smp.h>
#include <asm/sbi.h>
#include <asm/timex.h>
@@ -79,6 +81,13 @@ static int riscv_timer_dying_cpu(unsigned int cpu)
return 0;
}
+void riscv_cs_get_mult_shift(u32 *mult, u32 *shift)
+{
+ *mult = riscv_clocksource.mult;
+ *shift = riscv_clocksource.shift;
+}
+EXPORT_SYMBOL_GPL(riscv_cs_get_mult_shift);
+
/* called directly from the low-level interrupt handler */
static irqreturn_t riscv_timer_interrupt(int irq, void *dev_id)
{
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 28467d83c745..3d514b82d055 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -470,7 +470,8 @@ static unsigned int acpi_cpufreq_fast_switch(struct cpufreq_policy *policy,
if (policy->cached_target_freq == target_freq)
index = policy->cached_resolved_idx;
else
- index = cpufreq_table_find_index_dl(policy, target_freq);
+ index = cpufreq_table_find_index_dl(policy, target_freq,
+ false);
entry = &policy->freq_table[index];
next_freq = entry->frequency;
diff --git a/drivers/cpufreq/amd_freq_sensitivity.c b/drivers/cpufreq/amd_freq_sensitivity.c
index d0b10baf039a..6448e03bcf48 100644
--- a/drivers/cpufreq/amd_freq_sensitivity.c
+++ b/drivers/cpufreq/amd_freq_sensitivity.c
@@ -91,7 +91,8 @@ static unsigned int amd_powersave_bias_target(struct cpufreq_policy *policy,
unsigned int index;
index = cpufreq_table_find_index_h(policy,
- policy->cur - 1);
+ policy->cur - 1,
+ relation & CPUFREQ_RELATION_E);
freq_next = policy->freq_table[index].frequency;
}
diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
index d4c27022b9c9..db17196266e4 100644
--- a/drivers/cpufreq/cppc_cpufreq.c
+++ b/drivers/cpufreq/cppc_cpufreq.c
@@ -741,8 +741,6 @@ static int __init cppc_cpufreq_init(void)
if ((acpi_disabled) || !acpi_cpc_valid())
return -ENODEV;
- INIT_LIST_HEAD(&cpu_data_list);
-
cppc_check_hisi_workaround();
cppc_freq_invariance_init();
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 5782b15a8caa..e338d2f010fe 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -554,7 +554,7 @@ static unsigned int __resolve_freq(struct cpufreq_policy *policy,
unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
unsigned int target_freq)
{
- return __resolve_freq(policy, target_freq, CPUFREQ_RELATION_L);
+ return __resolve_freq(policy, target_freq, CPUFREQ_RELATION_LE);
}
EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq);
@@ -2260,8 +2260,16 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
!(cpufreq_driver->flags & CPUFREQ_NEED_UPDATE_LIMITS))
return 0;
- if (cpufreq_driver->target)
+ if (cpufreq_driver->target) {
+ /*
+ * If the driver hasn't setup a single inefficient frequency,
+ * it's unlikely it knows how to decode CPUFREQ_RELATION_E.
+ */
+ if (!policy->efficiencies_available)
+ relation &= ~CPUFREQ_RELATION_E;
+
return cpufreq_driver->target(policy, target_freq, relation);
+ }
if (!cpufreq_driver->target_index)
return -EINVAL;
@@ -2523,8 +2531,15 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
if (ret)
return ret;
+ /*
+ * Resolve policy min/max to available frequencies. It ensures
+ * no frequency resolution will neither overshoot the requested maximum
+ * nor undershoot the requested minimum.
+ */
policy->min = new_data.min;
policy->max = new_data.max;
+ policy->min = __resolve_freq(policy, policy->min, CPUFREQ_RELATION_L);
+ policy->max = __resolve_freq(policy, policy->max, CPUFREQ_RELATION_H);
trace_cpu_frequency_limits(policy);
policy->cached_target_freq = UINT_MAX;
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index aa39ff31ec9f..0879ec3c170c 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -111,7 +111,8 @@ static unsigned int cs_dbs_update(struct cpufreq_policy *policy)
if (requested_freq > policy->max)
requested_freq = policy->max;
- __cpufreq_driver_target(policy, requested_freq, CPUFREQ_RELATION_H);
+ __cpufreq_driver_target(policy, requested_freq,
+ CPUFREQ_RELATION_HE);
dbs_info->requested_freq = requested_freq;
goto out;
}
@@ -134,7 +135,8 @@ static unsigned int cs_dbs_update(struct cpufreq_policy *policy)
else
requested_freq = policy->min;
- __cpufreq_driver_target(policy, requested_freq, CPUFREQ_RELATION_L);
+ __cpufreq_driver_target(policy, requested_freq,
+ CPUFREQ_RELATION_LE);
dbs_info->requested_freq = requested_freq;
}
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index eb4320b619c9..3b8f924771b4 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -83,9 +83,11 @@ static unsigned int generic_powersave_bias_target(struct cpufreq_policy *policy,
freq_avg = freq_req - freq_reduc;
/* Find freq bounds for freq_avg in freq_table */
- index = cpufreq_table_find_index_h(policy, freq_avg);
+ index = cpufreq_table_find_index_h(policy, freq_avg,
+ relation & CPUFREQ_RELATION_E);
freq_lo = freq_table[index].frequency;
- index = cpufreq_table_find_index_l(policy, freq_avg);
+ index = cpufreq_table_find_index_l(policy, freq_avg,
+ relation & CPUFREQ_RELATION_E);
freq_hi = freq_table[index].frequency;
/* Find out how long we have to be in hi and lo freqs */
@@ -118,12 +120,12 @@ static void dbs_freq_increase(struct cpufreq_policy *policy, unsigned int freq)
if (od_tuners->powersave_bias)
freq = od_ops.powersave_bias_target(policy, freq,
- CPUFREQ_RELATION_H);
+ CPUFREQ_RELATION_HE);
else if (policy->cur == policy->max)
return;
__cpufreq_driver_target(policy, freq, od_tuners->powersave_bias ?
- CPUFREQ_RELATION_L : CPUFREQ_RELATION_H);
+ CPUFREQ_RELATION_LE : CPUFREQ_RELATION_HE);
}
/*
@@ -161,9 +163,9 @@ static void od_update(struct cpufreq_policy *policy)
if (od_tuners->powersave_bias)
freq_next = od_ops.powersave_bias_target(policy,
freq_next,
- CPUFREQ_RELATION_L);
+ CPUFREQ_RELATION_LE);
- __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_C);
+ __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_CE);
}
}
@@ -182,7 +184,7 @@ static unsigned int od_dbs_update(struct cpufreq_policy *policy)
*/
if (sample_type == OD_SUB_SAMPLE && policy_dbs->sample_delay_ns > 0) {
__cpufreq_driver_target(policy, dbs_info->freq_lo,
- CPUFREQ_RELATION_H);
+ CPUFREQ_RELATION_HE);
return dbs_info->freq_lo_delay_us;
}
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 8c176b7dae41..349ddbaef796 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -32,6 +32,7 @@
#include <asm/cpu_device_id.h>
#include <asm/cpufeature.h>
#include <asm/intel-family.h>
+#include "../drivers/thermal/intel/thermal_interrupt.h"
#define INTEL_PSTATE_SAMPLING_INTERVAL (10 * NSEC_PER_MSEC)
@@ -219,6 +220,7 @@ struct global_params {
* @sched_flags: Store scheduler flags for possible cross CPU update
* @hwp_boost_min: Last HWP boosted min performance
* @suspended: Whether or not the driver has been suspended.
+ * @hwp_notify_work: workqueue for HWP notifications.
*
* This structure stores per CPU instance data for all CPUs.
*/
@@ -257,6 +259,7 @@ struct cpudata {
unsigned int sched_flags;
u32 hwp_boost_min;
bool suspended;
+ struct delayed_work hwp_notify_work;
};
static struct cpudata **all_cpu_data;
@@ -537,7 +540,8 @@ static void intel_pstate_hybrid_hwp_adjust(struct cpudata *cpu)
* scaling factor is too high, so recompute it to make the HWP_CAP
* highest performance correspond to the maximum turbo frequency.
*/
- if (turbo_freq < cpu->pstate.turbo_pstate * scaling) {
+ cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * scaling;
+ if (turbo_freq < cpu->pstate.turbo_freq) {
cpu->pstate.turbo_freq = turbo_freq;
scaling = DIV_ROUND_UP(turbo_freq, cpu->pstate.turbo_pstate);
cpu->pstate.scaling = scaling;
@@ -985,11 +989,15 @@ skip_epp:
wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value);
}
+static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata);
+
static void intel_pstate_hwp_offline(struct cpudata *cpu)
{
u64 value = READ_ONCE(cpu->hwp_req_cached);
int min_perf;
+ intel_pstate_disable_hwp_interrupt(cpu);
+
if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
/*
* In case the EPP has been set to "performance" by the
@@ -1053,6 +1061,9 @@ static int intel_pstate_suspend(struct cpufreq_policy *policy)
cpu->suspended = true;
+ /* disable HWP interrupt and cancel any pending work */
+ intel_pstate_disable_hwp_interrupt(cpu);
+
return 0;
}
@@ -1546,15 +1557,105 @@ static void intel_pstate_sysfs_hide_hwp_dynamic_boost(void)
/************************** sysfs end ************************/
+static void intel_pstate_notify_work(struct work_struct *work)
+{
+ struct cpudata *cpudata =
+ container_of(to_delayed_work(work), struct cpudata, hwp_notify_work);
+
+ cpufreq_update_policy(cpudata->cpu);
+ wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_STATUS, 0);
+}
+
+static DEFINE_SPINLOCK(hwp_notify_lock);
+static cpumask_t hwp_intr_enable_mask;
+
+void notify_hwp_interrupt(void)
+{
+ unsigned int this_cpu = smp_processor_id();
+ struct cpudata *cpudata;
+ unsigned long flags;
+ u64 value;
+
+ if (!READ_ONCE(hwp_active) || !boot_cpu_has(X86_FEATURE_HWP_NOTIFY))
+ return;
+
+ rdmsrl_safe(MSR_HWP_STATUS, &value);
+ if (!(value & 0x01))
+ return;
+
+ spin_lock_irqsave(&hwp_notify_lock, flags);
+
+ if (!cpumask_test_cpu(this_cpu, &hwp_intr_enable_mask))
+ goto ack_intr;
+
+ /*
+ * Currently we never free all_cpu_data. And we can't reach here
+ * without this allocated. But for safety for future changes, added
+ * check.
+ */
+ if (unlikely(!READ_ONCE(all_cpu_data)))
+ goto ack_intr;
+
+ /*
+ * The free is done during cleanup, when cpufreq registry is failed.
+ * We wouldn't be here if it fails on init or switch status. But for
+ * future changes, added check.
+ */
+ cpudata = READ_ONCE(all_cpu_data[this_cpu]);
+ if (unlikely(!cpudata))
+ goto ack_intr;
+
+ schedule_delayed_work(&cpudata->hwp_notify_work, msecs_to_jiffies(10));
+
+ spin_unlock_irqrestore(&hwp_notify_lock, flags);
+
+ return;
+
+ack_intr:
+ wrmsrl_safe(MSR_HWP_STATUS, 0);
+ spin_unlock_irqrestore(&hwp_notify_lock, flags);
+}
+
+static void intel_pstate_disable_hwp_interrupt(struct cpudata *cpudata)
+{
+ unsigned long flags;
+
+ /* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */
+ wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
+
+ spin_lock_irqsave(&hwp_notify_lock, flags);
+ if (cpumask_test_and_clear_cpu(cpudata->cpu, &hwp_intr_enable_mask))
+ cancel_delayed_work(&cpudata->hwp_notify_work);
+ spin_unlock_irqrestore(&hwp_notify_lock, flags);
+}
+
+static void intel_pstate_enable_hwp_interrupt(struct cpudata *cpudata)
+{
+ /* Enable HWP notification interrupt for guaranteed performance change */
+ if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&hwp_notify_lock, flags);
+ INIT_DELAYED_WORK(&cpudata->hwp_notify_work, intel_pstate_notify_work);
+ cpumask_set_cpu(cpudata->cpu, &hwp_intr_enable_mask);
+ spin_unlock_irqrestore(&hwp_notify_lock, flags);
+
+ /* wrmsrl_on_cpu has to be outside spinlock as this can result in IPC */
+ wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x01);
+ }
+}
+
static void intel_pstate_hwp_enable(struct cpudata *cpudata)
{
- /* First disable HWP notification interrupt as we don't process them */
+ /* First disable HWP notification interrupt till we activate again */
if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY))
wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
if (cpudata->epp_default == -EINVAL)
cpudata->epp_default = intel_pstate_get_epp(cpudata, 0);
+
+ intel_pstate_enable_hwp_interrupt(cpudata);
}
static int atom_get_min_pstate(void)
@@ -2266,7 +2367,7 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
if (!cpu)
return -ENOMEM;
- all_cpu_data[cpunum] = cpu;
+ WRITE_ONCE(all_cpu_data[cpunum], cpu);
cpu->cpu = cpunum;
@@ -2929,8 +3030,10 @@ static void intel_pstate_driver_cleanup(void)
if (intel_pstate_driver == &intel_pstate)
intel_pstate_clear_update_util_hook(cpu);
+ spin_lock(&hwp_notify_lock);
kfree(all_cpu_data[cpu]);
- all_cpu_data[cpu] = NULL;
+ WRITE_ONCE(all_cpu_data[cpu], NULL);
+ spin_unlock(&hwp_notify_lock);
}
}
cpus_read_unlock();
@@ -3199,6 +3302,7 @@ static bool intel_pstate_hwp_is_enabled(void)
static int __init intel_pstate_init(void)
{
+ static struct cpudata **_all_cpu_data;
const struct x86_cpu_id *id;
int rc;
@@ -3224,7 +3328,7 @@ static int __init intel_pstate_init(void)
* deal with it.
*/
if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) || hwp_forced) {
- hwp_active++;
+ WRITE_ONCE(hwp_active, 1);
hwp_mode_bdw = id->driver_data;
intel_pstate.attr = hwp_cpufreq_attrs;
intel_cpufreq.attr = hwp_cpufreq_attrs;
@@ -3275,10 +3379,12 @@ hwp_cpu_matched:
pr_info("Intel P-state driver initializing\n");
- all_cpu_data = vzalloc(array_size(sizeof(void *), num_possible_cpus()));
- if (!all_cpu_data)
+ _all_cpu_data = vzalloc(array_size(sizeof(void *), num_possible_cpus()));
+ if (!_all_cpu_data)
return -ENOMEM;
+ WRITE_ONCE(all_cpu_data, _all_cpu_data);
+
intel_pstate_request_control_from_smm();
intel_pstate_sysfs_expose_params();
diff --git a/drivers/cpufreq/mediatek-cpufreq-hw.c b/drivers/cpufreq/mediatek-cpufreq-hw.c
index 0cf18dd46b92..8ddbd0c5ce37 100644
--- a/drivers/cpufreq/mediatek-cpufreq-hw.c
+++ b/drivers/cpufreq/mediatek-cpufreq-hw.c
@@ -109,7 +109,7 @@ static unsigned int mtk_cpufreq_hw_fast_switch(struct cpufreq_policy *policy,
struct mtk_cpufreq_data *data = policy->driver_data;
unsigned int index;
- index = cpufreq_table_find_index_dl(policy, target_freq);
+ index = cpufreq_table_find_index_dl(policy, target_freq, false);
writel_relaxed(index, data->reg_bases[REG_FREQ_PERF_STATE]);
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index 5a2cf5f91ccb..fddbd1ea1635 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -934,7 +934,7 @@ static void powernv_cpufreq_work_fn(struct work_struct *work)
policy = cpufreq_cpu_get(cpu);
if (!policy)
continue;
- index = cpufreq_table_find_index_c(policy, policy->cur);
+ index = cpufreq_table_find_index_c(policy, policy->cur, false);
powernv_cpufreq_target_index(policy, index);
cpumask_andnot(&mask, &mask, policy->cpus);
cpufreq_cpu_put(policy);
@@ -1022,7 +1022,7 @@ static unsigned int powernv_fast_switch(struct cpufreq_policy *policy,
int index;
struct powernv_smp_call_data freq_data;
- index = cpufreq_table_find_index_dl(policy, target_freq);
+ index = cpufreq_table_find_index_dl(policy, target_freq, false);
freq_data.pstate_id = powernv_freqs[index].driver_data;
freq_data.gpstate_id = powernv_freqs[index].driver_data;
set_pstate(&freq_data);
diff --git a/drivers/cpufreq/s3c2440-cpufreq.c b/drivers/cpufreq/s3c2440-cpufreq.c
index 148e8aedefa9..2011fb9c03a4 100644
--- a/drivers/cpufreq/s3c2440-cpufreq.c
+++ b/drivers/cpufreq/s3c2440-cpufreq.c
@@ -173,12 +173,14 @@ static void s3c2440_cpufreq_setdivs(struct s3c_cpufreq_config *cfg)
case 6:
camdiv |= S3C2440_CAMDIVN_HCLK3_HALF;
+ fallthrough;
case 3:
clkdiv |= S3C2440_CLKDIVN_HDIVN_3_6;
break;
case 8:
camdiv |= S3C2440_CAMDIVN_HCLK4_HALF;
+ fallthrough;
case 4:
clkdiv |= S3C2440_CLKDIVN_HDIVN_4_8;
break;
diff --git a/drivers/cpufreq/s5pv210-cpufreq.c b/drivers/cpufreq/s5pv210-cpufreq.c
index ad7d4f272ddc..76c888ed8d16 100644
--- a/drivers/cpufreq/s5pv210-cpufreq.c
+++ b/drivers/cpufreq/s5pv210-cpufreq.c
@@ -243,7 +243,7 @@ static int s5pv210_target(struct cpufreq_policy *policy, unsigned int index)
new_freq = s5pv210_freq_table[index].frequency;
/* Finding current running level index */
- priv_index = cpufreq_table_find_index_h(policy, old_freq);
+ priv_index = cpufreq_table_find_index_h(policy, old_freq, false);
arm_volt = dvs_conf[index].arm_volt;
int_volt = dvs_conf[index].int_volt;
diff --git a/drivers/cpufreq/tegra186-cpufreq.c b/drivers/cpufreq/tegra186-cpufreq.c
index 5d1943e787b0..6c88827f4e62 100644
--- a/drivers/cpufreq/tegra186-cpufreq.c
+++ b/drivers/cpufreq/tegra186-cpufreq.c
@@ -159,6 +159,10 @@ static struct cpufreq_frequency_table *init_vhint_table(
table = ERR_PTR(err);
goto free;
}
+ if (msg.rx.ret) {
+ table = ERR_PTR(-EINVAL);
+ goto free;
+ }
for (i = data->vfloor; i <= data->vceil; i++) {
u16 ndiv = data->ndiv[i];
diff --git a/drivers/cpufreq/tegra194-cpufreq.c b/drivers/cpufreq/tegra194-cpufreq.c
index a9620e4489ae..ac381db25dbe 100644
--- a/drivers/cpufreq/tegra194-cpufreq.c
+++ b/drivers/cpufreq/tegra194-cpufreq.c
@@ -242,7 +242,7 @@ static int tegra194_cpufreq_init(struct cpufreq_policy *policy)
smp_call_function_single(policy->cpu, get_cpu_cluster, &cl, true);
- if (cl >= data->num_clusters)
+ if (cl >= data->num_clusters || !data->tables[cl])
return -EINVAL;
/* set same policy for all cpus in a cluster */
@@ -310,6 +310,12 @@ init_freq_table(struct platform_device *pdev, struct tegra_bpmp *bpmp,
err = tegra_bpmp_transfer(bpmp, &msg);
if (err)
return ERR_PTR(err);
+ if (msg.rx.ret == -BPMP_EINVAL) {
+ /* Cluster not available */
+ return NULL;
+ }
+ if (msg.rx.ret)
+ return ERR_PTR(-EINVAL);
/*
* Make sure frequency table step is a multiple of mdiv to match
diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm
index 334f83e56120..15d6c46c0a47 100644
--- a/drivers/cpuidle/Kconfig.arm
+++ b/drivers/cpuidle/Kconfig.arm
@@ -99,7 +99,7 @@ config ARM_MVEBU_V7_CPUIDLE
config ARM_TEGRA_CPUIDLE
bool "CPU Idle Driver for NVIDIA Tegra SoCs"
- depends on ARCH_TEGRA && !ARM64
+ depends on (ARCH_TEGRA || COMPILE_TEST) && !ARM64 && MMU
select ARCH_NEEDS_CPU_IDLE_COUPLED if SMP
select ARM_CPU_SUSPEND
help
@@ -112,6 +112,7 @@ config ARM_QCOM_SPM_CPUIDLE
select CPU_IDLE_MULTIPLE_DRIVERS
select DT_IDLE_STATES
select QCOM_SCM
+ select QCOM_SPM
help
Select this to enable cpuidle for Qualcomm processors.
The Subsystem Power Manager (SPM) controls low power modes for the
diff --git a/drivers/cpuidle/cpuidle-qcom-spm.c b/drivers/cpuidle/cpuidle-qcom-spm.c
index c0e7971da2da..01e77913a414 100644
--- a/drivers/cpuidle/cpuidle-qcom-spm.c
+++ b/drivers/cpuidle/cpuidle-qcom-spm.c
@@ -18,158 +18,18 @@
#include <linux/cpuidle.h>
#include <linux/cpu_pm.h>
#include <linux/qcom_scm.h>
+#include <soc/qcom/spm.h>
#include <asm/proc-fns.h>
#include <asm/suspend.h>
#include "dt_idle_states.h"
-#define MAX_PMIC_DATA 2
-#define MAX_SEQ_DATA 64
-#define SPM_CTL_INDEX 0x7f
-#define SPM_CTL_INDEX_SHIFT 4
-#define SPM_CTL_EN BIT(0)
-
-enum pm_sleep_mode {
- PM_SLEEP_MODE_STBY,
- PM_SLEEP_MODE_RET,
- PM_SLEEP_MODE_SPC,
- PM_SLEEP_MODE_PC,
- PM_SLEEP_MODE_NR,
-};
-
-enum spm_reg {
- SPM_REG_CFG,
- SPM_REG_SPM_CTL,
- SPM_REG_DLY,
- SPM_REG_PMIC_DLY,
- SPM_REG_PMIC_DATA_0,
- SPM_REG_PMIC_DATA_1,
- SPM_REG_VCTL,
- SPM_REG_SEQ_ENTRY,
- SPM_REG_SPM_STS,
- SPM_REG_PMIC_STS,
- SPM_REG_NR,
-};
-
-struct spm_reg_data {
- const u8 *reg_offset;
- u32 spm_cfg;
- u32 spm_dly;
- u32 pmic_dly;
- u32 pmic_data[MAX_PMIC_DATA];
- u8 seq[MAX_SEQ_DATA];
- u8 start_index[PM_SLEEP_MODE_NR];
-};
-
-struct spm_driver_data {
+struct cpuidle_qcom_spm_data {
struct cpuidle_driver cpuidle_driver;
- void __iomem *reg_base;
- const struct spm_reg_data *reg_data;
-};
-
-static const u8 spm_reg_offset_v2_1[SPM_REG_NR] = {
- [SPM_REG_CFG] = 0x08,
- [SPM_REG_SPM_CTL] = 0x30,
- [SPM_REG_DLY] = 0x34,
- [SPM_REG_SEQ_ENTRY] = 0x80,
-};
-
-/* SPM register data for 8974, 8084 */
-static const struct spm_reg_data spm_reg_8974_8084_cpu = {
- .reg_offset = spm_reg_offset_v2_1,
- .spm_cfg = 0x1,
- .spm_dly = 0x3C102800,
- .seq = { 0x03, 0x0B, 0x0F, 0x00, 0x20, 0x80, 0x10, 0xE8, 0x5B, 0x03,
- 0x3B, 0xE8, 0x5B, 0x82, 0x10, 0x0B, 0x30, 0x06, 0x26, 0x30,
- 0x0F },
- .start_index[PM_SLEEP_MODE_STBY] = 0,
- .start_index[PM_SLEEP_MODE_SPC] = 3,
+ struct spm_driver_data *spm;
};
-/* SPM register data for 8226 */
-static const struct spm_reg_data spm_reg_8226_cpu = {
- .reg_offset = spm_reg_offset_v2_1,
- .spm_cfg = 0x0,
- .spm_dly = 0x3C102800,
- .seq = { 0x60, 0x03, 0x60, 0x0B, 0x0F, 0x20, 0x10, 0x80, 0x30, 0x90,
- 0x5B, 0x60, 0x03, 0x60, 0x3B, 0x76, 0x76, 0x0B, 0x94, 0x5B,
- 0x80, 0x10, 0x26, 0x30, 0x0F },
- .start_index[PM_SLEEP_MODE_STBY] = 0,
- .start_index[PM_SLEEP_MODE_SPC] = 5,
-};
-
-static const u8 spm_reg_offset_v1_1[SPM_REG_NR] = {
- [SPM_REG_CFG] = 0x08,
- [SPM_REG_SPM_CTL] = 0x20,
- [SPM_REG_PMIC_DLY] = 0x24,
- [SPM_REG_PMIC_DATA_0] = 0x28,
- [SPM_REG_PMIC_DATA_1] = 0x2C,
- [SPM_REG_SEQ_ENTRY] = 0x80,
-};
-
-/* SPM register data for 8064 */
-static const struct spm_reg_data spm_reg_8064_cpu = {
- .reg_offset = spm_reg_offset_v1_1,
- .spm_cfg = 0x1F,
- .pmic_dly = 0x02020004,
- .pmic_data[0] = 0x0084009C,
- .pmic_data[1] = 0x00A4001C,
- .seq = { 0x03, 0x0F, 0x00, 0x24, 0x54, 0x10, 0x09, 0x03, 0x01,
- 0x10, 0x54, 0x30, 0x0C, 0x24, 0x30, 0x0F },
- .start_index[PM_SLEEP_MODE_STBY] = 0,
- .start_index[PM_SLEEP_MODE_SPC] = 2,
-};
-
-static inline void spm_register_write(struct spm_driver_data *drv,
- enum spm_reg reg, u32 val)
-{
- if (drv->reg_data->reg_offset[reg])
- writel_relaxed(val, drv->reg_base +
- drv->reg_data->reg_offset[reg]);
-}
-
-/* Ensure a guaranteed write, before return */
-static inline void spm_register_write_sync(struct spm_driver_data *drv,
- enum spm_reg reg, u32 val)
-{
- u32 ret;
-
- if (!drv->reg_data->reg_offset[reg])
- return;
-
- do {
- writel_relaxed(val, drv->reg_base +
- drv->reg_data->reg_offset[reg]);
- ret = readl_relaxed(drv->reg_base +
- drv->reg_data->reg_offset[reg]);
- if (ret == val)
- break;
- cpu_relax();
- } while (1);
-}
-
-static inline u32 spm_register_read(struct spm_driver_data *drv,
- enum spm_reg reg)
-{
- return readl_relaxed(drv->reg_base + drv->reg_data->reg_offset[reg]);
-}
-
-static void spm_set_low_power_mode(struct spm_driver_data *drv,
- enum pm_sleep_mode mode)
-{
- u32 start_index;
- u32 ctl_val;
-
- start_index = drv->reg_data->start_index[mode];
-
- ctl_val = spm_register_read(drv, SPM_REG_SPM_CTL);
- ctl_val &= ~(SPM_CTL_INDEX << SPM_CTL_INDEX_SHIFT);
- ctl_val |= start_index << SPM_CTL_INDEX_SHIFT;
- ctl_val |= SPM_CTL_EN;
- spm_register_write_sync(drv, SPM_REG_SPM_CTL, ctl_val);
-}
-
static int qcom_pm_collapse(unsigned long int unused)
{
qcom_scm_cpu_power_down(QCOM_SCM_CPU_PWR_DOWN_L2_ON);
@@ -201,10 +61,10 @@ static int qcom_cpu_spc(struct spm_driver_data *drv)
static int spm_enter_idle_state(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int idx)
{
- struct spm_driver_data *data = container_of(drv, struct spm_driver_data,
- cpuidle_driver);
+ struct cpuidle_qcom_spm_data *data = container_of(drv, struct cpuidle_qcom_spm_data,
+ cpuidle_driver);
- return CPU_PM_CPU_IDLE_ENTER_PARAM(qcom_cpu_spc, idx, data);
+ return CPU_PM_CPU_IDLE_ENTER_PARAM(qcom_cpu_spc, idx, data->spm);
}
static struct cpuidle_driver qcom_spm_idle_driver = {
@@ -225,134 +85,92 @@ static const struct of_device_id qcom_idle_state_match[] = {
{ },
};
-static int spm_cpuidle_init(struct cpuidle_driver *drv, int cpu)
+static int spm_cpuidle_register(struct device *cpuidle_dev, int cpu)
{
+ struct platform_device *pdev = NULL;
+ struct device_node *cpu_node, *saw_node;
+ struct cpuidle_qcom_spm_data *data = NULL;
int ret;
- memcpy(drv, &qcom_spm_idle_driver, sizeof(*drv));
- drv->cpumask = (struct cpumask *)cpumask_of(cpu);
+ cpu_node = of_cpu_device_node_get(cpu);
+ if (!cpu_node)
+ return -ENODEV;
- /* Parse idle states from device tree */
- ret = dt_init_idle_driver(drv, qcom_idle_state_match, 1);
- if (ret <= 0)
- return ret ? : -ENODEV;
+ saw_node = of_parse_phandle(cpu_node, "qcom,saw", 0);
+ if (!saw_node)
+ return -ENODEV;
- /* We have atleast one power down mode */
- return qcom_scm_set_warm_boot_addr(cpu_resume_arm, drv->cpumask);
-}
+ pdev = of_find_device_by_node(saw_node);
+ of_node_put(saw_node);
+ of_node_put(cpu_node);
+ if (!pdev)
+ return -ENODEV;
-static struct spm_driver_data *spm_get_drv(struct platform_device *pdev,
- int *spm_cpu)
-{
- struct spm_driver_data *drv = NULL;
- struct device_node *cpu_node, *saw_node;
- int cpu;
- bool found = 0;
+ data = devm_kzalloc(cpuidle_dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
- for_each_possible_cpu(cpu) {
- cpu_node = of_cpu_device_node_get(cpu);
- if (!cpu_node)
- continue;
- saw_node = of_parse_phandle(cpu_node, "qcom,saw", 0);
- found = (saw_node == pdev->dev.of_node);
- of_node_put(saw_node);
- of_node_put(cpu_node);
- if (found)
- break;
- }
+ data->spm = dev_get_drvdata(&pdev->dev);
+ if (!data->spm)
+ return -EINVAL;
- if (found) {
- drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
- if (drv)
- *spm_cpu = cpu;
- }
+ data->cpuidle_driver = qcom_spm_idle_driver;
+ data->cpuidle_driver.cpumask = (struct cpumask *)cpumask_of(cpu);
- return drv;
-}
+ ret = dt_init_idle_driver(&data->cpuidle_driver,
+ qcom_idle_state_match, 1);
+ if (ret <= 0)
+ return ret ? : -ENODEV;
-static const struct of_device_id spm_match_table[] = {
- { .compatible = "qcom,msm8226-saw2-v2.1-cpu",
- .data = &spm_reg_8226_cpu },
- { .compatible = "qcom,msm8974-saw2-v2.1-cpu",
- .data = &spm_reg_8974_8084_cpu },
- { .compatible = "qcom,apq8084-saw2-v2.1-cpu",
- .data = &spm_reg_8974_8084_cpu },
- { .compatible = "qcom,apq8064-saw2-v1.1-cpu",
- .data = &spm_reg_8064_cpu },
- { },
-};
+ ret = qcom_scm_set_warm_boot_addr(cpu_resume_arm, cpumask_of(cpu));
+ if (ret)
+ return ret;
+
+ return cpuidle_register(&data->cpuidle_driver, NULL);
+}
-static int spm_dev_probe(struct platform_device *pdev)
+static int spm_cpuidle_drv_probe(struct platform_device *pdev)
{
- struct spm_driver_data *drv;
- struct resource *res;
- const struct of_device_id *match_id;
- void __iomem *addr;
int cpu, ret;
if (!qcom_scm_is_available())
return -EPROBE_DEFER;
- drv = spm_get_drv(pdev, &cpu);
- if (!drv)
- return -EINVAL;
- platform_set_drvdata(pdev, drv);
+ for_each_possible_cpu(cpu) {
+ ret = spm_cpuidle_register(&pdev->dev, cpu);
+ if (ret && ret != -ENODEV) {
+ dev_err(&pdev->dev,
+ "Cannot register for CPU%d: %d\n", cpu, ret);
+ }
+ }
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- drv->reg_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(drv->reg_base))
- return PTR_ERR(drv->reg_base);
+ return 0;
+}
- match_id = of_match_node(spm_match_table, pdev->dev.of_node);
- if (!match_id)
- return -ENODEV;
+static struct platform_driver spm_cpuidle_driver = {
+ .probe = spm_cpuidle_drv_probe,
+ .driver = {
+ .name = "qcom-spm-cpuidle",
+ .suppress_bind_attrs = true,
+ },
+};
- drv->reg_data = match_id->data;
+static int __init qcom_spm_cpuidle_init(void)
+{
+ struct platform_device *pdev;
+ int ret;
- ret = spm_cpuidle_init(&drv->cpuidle_driver, cpu);
+ ret = platform_driver_register(&spm_cpuidle_driver);
if (ret)
return ret;
- /* Write the SPM sequences first.. */
- addr = drv->reg_base + drv->reg_data->reg_offset[SPM_REG_SEQ_ENTRY];
- __iowrite32_copy(addr, drv->reg_data->seq,
- ARRAY_SIZE(drv->reg_data->seq) / 4);
-
- /*
- * ..and then the control registers.
- * On some SoC if the control registers are written first and if the
- * CPU was held in reset, the reset signal could trigger the SPM state
- * machine, before the sequences are completely written.
- */
- spm_register_write(drv, SPM_REG_CFG, drv->reg_data->spm_cfg);
- spm_register_write(drv, SPM_REG_DLY, drv->reg_data->spm_dly);
- spm_register_write(drv, SPM_REG_PMIC_DLY, drv->reg_data->pmic_dly);
- spm_register_write(drv, SPM_REG_PMIC_DATA_0,
- drv->reg_data->pmic_data[0]);
- spm_register_write(drv, SPM_REG_PMIC_DATA_1,
- drv->reg_data->pmic_data[1]);
-
- /* Set up Standby as the default low power mode */
- spm_set_low_power_mode(drv, PM_SLEEP_MODE_STBY);
-
- return cpuidle_register(&drv->cpuidle_driver, NULL);
-}
-
-static int spm_dev_remove(struct platform_device *pdev)
-{
- struct spm_driver_data *drv = platform_get_drvdata(pdev);
+ pdev = platform_device_register_simple("qcom-spm-cpuidle",
+ -1, NULL, 0);
+ if (IS_ERR(pdev)) {
+ platform_driver_unregister(&spm_cpuidle_driver);
+ return PTR_ERR(pdev);
+ }
- cpuidle_unregister(&drv->cpuidle_driver);
return 0;
}
-
-static struct platform_driver spm_driver = {
- .probe = spm_dev_probe,
- .remove = spm_dev_remove,
- .driver = {
- .name = "saw",
- .of_match_table = spm_match_table,
- },
-};
-
-builtin_platform_driver(spm_driver);
+device_initcall(qcom_spm_cpuidle_init);
diff --git a/drivers/cpuidle/cpuidle-tegra.c b/drivers/cpuidle/cpuidle-tegra.c
index 508bd9f23792..9845629aeb6d 100644
--- a/drivers/cpuidle/cpuidle-tegra.c
+++ b/drivers/cpuidle/cpuidle-tegra.c
@@ -337,6 +337,9 @@ static void tegra_cpuidle_setup_tegra114_c7_state(void)
static int tegra_cpuidle_probe(struct platform_device *pdev)
{
+ if (tegra_pmc_get_suspend_mode() == TEGRA_SUSPEND_NOT_READY)
+ return -EPROBE_DEFER;
+
/* LP2 could be disabled in device-tree */
if (tegra_pmc_get_suspend_mode() < TEGRA_SUSPEND_LP2)
tegra_cpuidle_disable_state(TEGRA_CC6);
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index 53ec9585ccd4..469e18547d06 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -488,6 +488,7 @@ static int cpuidle_add_state_sysfs(struct cpuidle_device *device)
&kdev->kobj, "state%d", i);
if (ret) {
kobject_put(&kobj->kobj);
+ kfree(kobj);
goto error_state;
}
cpuidle_add_s2idle_attr_group(kobj);
@@ -619,6 +620,7 @@ static int cpuidle_add_driver_sysfs(struct cpuidle_device *dev)
&kdev->kobj, "driver");
if (ret) {
kobject_put(&kdrv->kobj);
+ kfree(kdrv);
return ret;
}
@@ -705,7 +707,6 @@ int cpuidle_add_sysfs(struct cpuidle_device *dev)
if (!kdev)
return -ENOMEM;
kdev->dev = dev;
- dev->kobj_dev = kdev;
init_completion(&kdev->kobj_unregister);
@@ -713,9 +714,11 @@ int cpuidle_add_sysfs(struct cpuidle_device *dev)
"cpuidle");
if (error) {
kobject_put(&kdev->kobj);
+ kfree(kdev);
return error;
}
+ dev->kobj_dev = kdev;
kobject_uevent(&kdev->kobj, KOBJ_ADD);
return 0;
diff --git a/drivers/crypto/caam/caampkc.c b/drivers/crypto/caam/caampkc.c
index e313233ec6de..bf6275ffc4aa 100644
--- a/drivers/crypto/caam/caampkc.c
+++ b/drivers/crypto/caam/caampkc.c
@@ -1153,16 +1153,27 @@ static struct caam_akcipher_alg caam_rsa = {
int caam_pkc_init(struct device *ctrldev)
{
struct caam_drv_private *priv = dev_get_drvdata(ctrldev);
- u32 pk_inst;
+ u32 pk_inst, pkha;
int err;
init_done = false;
/* Determine public key hardware accelerator presence. */
- if (priv->era < 10)
+ if (priv->era < 10) {
pk_inst = (rd_reg32(&priv->ctrl->perfmon.cha_num_ls) &
CHA_ID_LS_PK_MASK) >> CHA_ID_LS_PK_SHIFT;
- else
- pk_inst = rd_reg32(&priv->ctrl->vreg.pkha) & CHA_VER_NUM_MASK;
+ } else {
+ pkha = rd_reg32(&priv->ctrl->vreg.pkha);
+ pk_inst = pkha & CHA_VER_NUM_MASK;
+
+ /*
+ * Newer CAAMs support partially disabled functionality. If this is the
+ * case, the number is non-zero, but this bit is set to indicate that
+ * no encryption or decryption is supported. Only signing and verifying
+ * is supported.
+ */
+ if (pkha & CHA_VER_MISC_PKHA_NO_CRYPT)
+ pk_inst = 0;
+ }
/* Do not register algorithms if PKHA is not present. */
if (!pk_inst)
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index af61f3a2c0d4..3738625c0250 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -322,6 +322,9 @@ struct version_regs {
/* CHA Miscellaneous Information - AESA_MISC specific */
#define CHA_VER_MISC_AES_GCM BIT(1 + CHA_VER_MISC_SHIFT)
+/* CHA Miscellaneous Information - PKHA_MISC specific */
+#define CHA_VER_MISC_PKHA_NO_CRYPT BIT(7 + CHA_VER_MISC_SHIFT)
+
/*
* caam_perfmon - Performance Monitor/Secure Memory Status/
* CAAM Global Status/Component Version IDs
diff --git a/drivers/crypto/ccp/ccp-dev-v3.c b/drivers/crypto/ccp/ccp-dev-v3.c
index 0d5576f6ad21..fe69053b2394 100644
--- a/drivers/crypto/ccp/ccp-dev-v3.c
+++ b/drivers/crypto/ccp/ccp-dev-v3.c
@@ -467,8 +467,8 @@ static int ccp_init(struct ccp_device *ccp)
cmd_q = &ccp->cmd_q[i];
- kthread = kthread_create(ccp_cmd_queue_thread, cmd_q,
- "%s-q%u", ccp->name, cmd_q->id);
+ kthread = kthread_run(ccp_cmd_queue_thread, cmd_q,
+ "%s-q%u", ccp->name, cmd_q->id);
if (IS_ERR(kthread)) {
dev_err(dev, "error creating queue thread (%ld)\n",
PTR_ERR(kthread));
@@ -477,7 +477,6 @@ static int ccp_init(struct ccp_device *ccp)
}
cmd_q->kthread = kthread;
- wake_up_process(kthread);
}
dev_dbg(dev, "Enabling interrupts...\n");
diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c
index 7838f63bab32..7b73332d6aa1 100644
--- a/drivers/crypto/ccp/ccp-dev-v5.c
+++ b/drivers/crypto/ccp/ccp-dev-v5.c
@@ -950,8 +950,8 @@ static int ccp5_init(struct ccp_device *ccp)
cmd_q = &ccp->cmd_q[i];
- kthread = kthread_create(ccp_cmd_queue_thread, cmd_q,
- "%s-q%u", ccp->name, cmd_q->id);
+ kthread = kthread_run(ccp_cmd_queue_thread, cmd_q,
+ "%s-q%u", ccp->name, cmd_q->id);
if (IS_ERR(kthread)) {
dev_err(dev, "error creating queue thread (%ld)\n",
PTR_ERR(kthread));
@@ -960,7 +960,6 @@ static int ccp5_init(struct ccp_device *ccp)
}
cmd_q->kthread = kthread;
- wake_up_process(kthread);
}
dev_dbg(dev, "Enabling interrupts...\n");
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index 2ecb0e1f65d8..e09925d86bf3 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -134,7 +134,7 @@ static int sev_cmd_buffer_len(int cmd)
case SEV_CMD_DOWNLOAD_FIRMWARE: return sizeof(struct sev_data_download_firmware);
case SEV_CMD_GET_ID: return sizeof(struct sev_data_get_id);
case SEV_CMD_ATTESTATION_REPORT: return sizeof(struct sev_data_attestation_report);
- case SEV_CMD_SEND_CANCEL: return sizeof(struct sev_data_send_cancel);
+ case SEV_CMD_SEND_CANCEL: return sizeof(struct sev_data_send_cancel);
default: return 0;
}
diff --git a/drivers/crypto/ccree/cc_driver.c b/drivers/crypto/ccree/cc_driver.c
index e599ac6dc162..790fa9058a36 100644
--- a/drivers/crypto/ccree/cc_driver.c
+++ b/drivers/crypto/ccree/cc_driver.c
@@ -103,7 +103,8 @@ MODULE_DEVICE_TABLE(of, arm_ccree_dev_of_match);
static void init_cc_cache_params(struct cc_drvdata *drvdata)
{
struct device *dev = drvdata_to_dev(drvdata);
- u32 cache_params, ace_const, val, mask;
+ u32 cache_params, ace_const, val;
+ u64 mask;
/* compute CC_AXIM_CACHE_PARAMS */
cache_params = cc_ioread(drvdata, CC_REG(AXIM_CACHE_PARAMS));
diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h
index e89f9e0094b4..c7816c83e324 100644
--- a/drivers/crypto/chelsio/chcr_crypto.h
+++ b/drivers/crypto/chelsio/chcr_crypto.h
@@ -222,8 +222,10 @@ struct chcr_authenc_ctx {
};
struct __aead_ctx {
- struct chcr_gcm_ctx gcm[0];
- struct chcr_authenc_ctx authenc[];
+ union {
+ DECLARE_FLEX_ARRAY(struct chcr_gcm_ctx, gcm);
+ DECLARE_FLEX_ARRAY(struct chcr_authenc_ctx, authenc);
+ };
};
struct chcr_aead_ctx {
@@ -245,9 +247,11 @@ struct hmac_ctx {
};
struct __crypto_ctx {
- struct hmac_ctx hmacctx[0];
- struct ablk_ctx ablkctx[0];
- struct chcr_aead_ctx aeadctx[];
+ union {
+ DECLARE_FLEX_ARRAY(struct hmac_ctx, hmacctx);
+ DECLARE_FLEX_ARRAY(struct ablk_ctx, ablkctx);
+ DECLARE_FLEX_ARRAY(struct chcr_aead_ctx, aeadctx);
+ };
};
struct chcr_context {
diff --git a/drivers/crypto/hisilicon/qm.c b/drivers/crypto/hisilicon/qm.c
index 369562d34d66..fed52ae516ba 100644
--- a/drivers/crypto/hisilicon/qm.c
+++ b/drivers/crypto/hisilicon/qm.c
@@ -233,6 +233,8 @@
#define QM_DBG_WRITE_LEN 1024
#define QM_DBG_TMP_BUF_LEN 22
#define QM_PCI_COMMAND_INVALID ~0
+#define QM_RESET_STOP_TX_OFFSET 1
+#define QM_RESET_STOP_RX_OFFSET 2
#define WAIT_PERIOD 20
#define REMOVE_WAIT_DELAY 10
@@ -883,6 +885,20 @@ static irqreturn_t qm_mb_cmd_irq(int irq, void *data)
return IRQ_HANDLED;
}
+static void qm_set_qp_disable(struct hisi_qp *qp, int offset)
+{
+ u32 *addr;
+
+ if (qp->is_in_kernel)
+ return;
+
+ addr = (u32 *)(qp->qdma.va + qp->qdma.size) - offset;
+ *addr = 1;
+
+ /* make sure setup is completed */
+ mb();
+}
+
static irqreturn_t qm_aeq_irq(int irq, void *data)
{
struct hisi_qm *qm = data;
@@ -2467,6 +2483,15 @@ static void *qm_get_avail_sqe(struct hisi_qp *qp)
return qp->sqe + sq_tail * qp->qm->sqe_size;
}
+static void hisi_qm_unset_hw_reset(struct hisi_qp *qp)
+{
+ u64 *addr;
+
+ /* Use last 64 bits of DUS to reset status. */
+ addr = (u64 *)(qp->qdma.va + qp->qdma.size) - QM_RESET_STOP_TX_OFFSET;
+ *addr = 0;
+}
+
static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type)
{
struct device *dev = &qm->pdev->dev;
@@ -2492,7 +2517,7 @@ static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type)
}
qp = &qm->qp_array[qp_id];
-
+ hisi_qm_unset_hw_reset(qp);
memset(qp->cqe, 0, sizeof(struct qm_cqe) * QM_Q_DEPTH);
qp->event_cb = NULL;
@@ -2912,6 +2937,14 @@ static int hisi_qm_get_available_instances(struct uacce_device *uacce)
return hisi_qm_get_free_qp_num(uacce->priv);
}
+static void hisi_qm_set_hw_reset(struct hisi_qm *qm, int offset)
+{
+ int i;
+
+ for (i = 0; i < qm->qp_num; i++)
+ qm_set_qp_disable(&qm->qp_array[i], offset);
+}
+
static int hisi_qm_uacce_get_queue(struct uacce_device *uacce,
unsigned long arg,
struct uacce_queue *q)
@@ -3094,7 +3127,7 @@ static int qm_alloc_uacce(struct hisi_qm *qm)
if (IS_ERR(uacce))
return PTR_ERR(uacce);
- if (uacce->flags & UACCE_DEV_SVA && qm->mode == UACCE_MODE_SVA) {
+ if (uacce->flags & UACCE_DEV_SVA) {
qm->use_sva = true;
} else {
/* only consider sva case */
@@ -3122,8 +3155,10 @@ static int qm_alloc_uacce(struct hisi_qm *qm)
else
mmio_page_nr = qm->db_interval / PAGE_SIZE;
+ /* Add one more page for device or qp status */
dus_page_nr = (PAGE_SIZE - 1 + qm->sqe_size * QM_Q_DEPTH +
- sizeof(struct qm_cqe) * QM_Q_DEPTH) >> PAGE_SHIFT;
+ sizeof(struct qm_cqe) * QM_Q_DEPTH + PAGE_SIZE) >>
+ PAGE_SHIFT;
uacce->qf_pg_num[UACCE_QFRT_MMIO] = mmio_page_nr;
uacce->qf_pg_num[UACCE_QFRT_DUS] = dus_page_nr;
@@ -3367,8 +3402,10 @@ void hisi_qm_uninit(struct hisi_qm *qm)
qm_irq_unregister(qm);
hisi_qm_pci_uninit(qm);
- uacce_remove(qm->uacce);
- qm->uacce = NULL;
+ if (qm->use_sva) {
+ uacce_remove(qm->uacce);
+ qm->uacce = NULL;
+ }
up_write(&qm->qps_lock);
}
@@ -3682,11 +3719,13 @@ int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r)
if (qm->status.stop_reason == QM_SOFT_RESET ||
qm->status.stop_reason == QM_FLR) {
+ hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET);
ret = qm_stop_started_qp(qm);
if (ret < 0) {
dev_err(dev, "Failed to stop started qp!\n");
goto err_unlock;
}
+ hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET);
}
/* Mask eq and aeq irq */
@@ -4185,7 +4224,7 @@ static ssize_t qm_qos_value_init(const char *buf, unsigned long *val)
return -EINVAL;
}
- ret = sscanf(buf, "%ld", val);
+ ret = sscanf(buf, "%lu", val);
if (ret != QM_QOS_VAL_NUM)
return -EINVAL;
@@ -5045,6 +5084,8 @@ static int qm_controller_reset(struct hisi_qm *qm)
ret = qm_controller_reset_prepare(qm);
if (ret) {
+ hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET);
+ hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET);
clear_bit(QM_RST_SCHED, &qm->misc_ctl);
return ret;
}
@@ -5131,6 +5172,8 @@ void hisi_qm_reset_prepare(struct pci_dev *pdev)
ret = hisi_qm_stop(qm, QM_FLR);
if (ret) {
pci_err(pdev, "Failed to stop QM, ret = %d.\n", ret);
+ hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET);
+ hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET);
return;
}
@@ -5314,9 +5357,14 @@ static void qm_pf_reset_vf_prepare(struct hisi_qm *qm,
atomic_set(&qm->status.flags, QM_STOP);
cmd = QM_VF_PREPARE_FAIL;
goto err_prepare;
+ } else {
+ goto out;
}
err_prepare:
+ hisi_qm_set_hw_reset(qm, QM_RESET_STOP_TX_OFFSET);
+ hisi_qm_set_hw_reset(qm, QM_RESET_STOP_RX_OFFSET);
+out:
pci_save_state(pdev);
ret = qm->ops->ping_pf(qm, cmd);
if (ret)
@@ -5777,9 +5825,11 @@ int hisi_qm_init(struct hisi_qm *qm)
goto err_irq_register;
}
- ret = qm_alloc_uacce(qm);
- if (ret < 0)
- dev_warn(dev, "fail to alloc uacce (%d)\n", ret);
+ if (qm->mode == UACCE_MODE_SVA) {
+ ret = qm_alloc_uacce(qm);
+ if (ret < 0)
+ dev_warn(dev, "fail to alloc uacce (%d)\n", ret);
+ }
ret = hisi_qm_memory_init(qm);
if (ret)
@@ -5792,8 +5842,10 @@ int hisi_qm_init(struct hisi_qm *qm)
return 0;
err_alloc_uacce:
- uacce_remove(qm->uacce);
- qm->uacce = NULL;
+ if (qm->use_sva) {
+ uacce_remove(qm->uacce);
+ qm->uacce = NULL;
+ }
err_irq_register:
qm_irq_unregister(qm);
err_pci_init:
diff --git a/drivers/crypto/hisilicon/zip/zip_main.c b/drivers/crypto/hisilicon/zip/zip_main.c
index 7148201ce76e..873971ef9aee 100644
--- a/drivers/crypto/hisilicon/zip/zip_main.c
+++ b/drivers/crypto/hisilicon/zip/zip_main.c
@@ -218,7 +218,7 @@ static const struct debugfs_reg32 hzip_dfx_regs[] = {
{"HZIP_AVG_DELAY ", 0x28ull},
{"HZIP_MEM_VISIBLE_DATA ", 0x30ull},
{"HZIP_MEM_VISIBLE_ADDR ", 0x34ull},
- {"HZIP_COMSUMED_BYTE ", 0x38ull},
+ {"HZIP_CONSUMED_BYTE ", 0x38ull},
{"HZIP_PRODUCED_BYTE ", 0x40ull},
{"HZIP_COMP_INF ", 0x70ull},
{"HZIP_PRE_OUT ", 0x78ull},
diff --git a/drivers/crypto/img-hash.c b/drivers/crypto/img-hash.c
index aa4c7b2af3e2..d8e82d69745d 100644
--- a/drivers/crypto/img-hash.c
+++ b/drivers/crypto/img-hash.c
@@ -674,14 +674,12 @@ static int img_hash_digest(struct ahash_request *req)
static int img_hash_cra_init(struct crypto_tfm *tfm, const char *alg_name)
{
struct img_hash_ctx *ctx = crypto_tfm_ctx(tfm);
- int err = -ENOMEM;
ctx->fallback = crypto_alloc_ahash(alg_name, 0,
CRYPTO_ALG_NEED_FALLBACK);
if (IS_ERR(ctx->fallback)) {
pr_err("img_hash: Could not load fallback driver.\n");
- err = PTR_ERR(ctx->fallback);
- goto err;
+ return PTR_ERR(ctx->fallback);
}
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct img_hash_request_ctx) +
@@ -689,9 +687,6 @@ static int img_hash_cra_init(struct crypto_tfm *tfm, const char *alg_name)
IMG_HASH_DMA_THRESHOLD);
return 0;
-
-err:
- return err;
}
static int img_hash_cra_md5_init(struct crypto_tfm *tfm)
diff --git a/drivers/crypto/keembay/Kconfig b/drivers/crypto/keembay/Kconfig
index 00cf8f028cb9..7942b48dd55a 100644
--- a/drivers/crypto/keembay/Kconfig
+++ b/drivers/crypto/keembay/Kconfig
@@ -39,6 +39,25 @@ config CRYPTO_DEV_KEEMBAY_OCS_AES_SM4_CTS
Intel does not recommend use of CTS mode with AES/SM4.
+config CRYPTO_DEV_KEEMBAY_OCS_ECC
+ tristate "Support for Intel Keem Bay OCS ECC HW acceleration"
+ depends on ARCH_KEEMBAY || COMPILE_TEST
+ depends on OF || COMPILE_TEST
+ depends on HAS_IOMEM
+ select CRYPTO_ECDH
+ select CRYPTO_ENGINE
+ help
+ Support for Intel Keem Bay Offload and Crypto Subsystem (OCS)
+ Elliptic Curve Cryptography (ECC) hardware acceleration for use with
+ Crypto API.
+
+ Provides OCS acceleration for ECDH-256 and ECDH-384.
+
+ Say Y or M if you are compiling for the Intel Keem Bay SoC. The
+ module will be called keembay-ocs-ecc.
+
+ If unsure, say N.
+
config CRYPTO_DEV_KEEMBAY_OCS_HCU
tristate "Support for Intel Keem Bay OCS HCU HW acceleration"
select CRYPTO_HASH
diff --git a/drivers/crypto/keembay/Makefile b/drivers/crypto/keembay/Makefile
index aea03d4432c4..7c12c3c138bd 100644
--- a/drivers/crypto/keembay/Makefile
+++ b/drivers/crypto/keembay/Makefile
@@ -4,5 +4,7 @@
obj-$(CONFIG_CRYPTO_DEV_KEEMBAY_OCS_AES_SM4) += keembay-ocs-aes.o
keembay-ocs-aes-objs := keembay-ocs-aes-core.o ocs-aes.o
+obj-$(CONFIG_CRYPTO_DEV_KEEMBAY_OCS_ECC) += keembay-ocs-ecc.o
+
obj-$(CONFIG_CRYPTO_DEV_KEEMBAY_OCS_HCU) += keembay-ocs-hcu.o
keembay-ocs-hcu-objs := keembay-ocs-hcu-core.o ocs-hcu.o
diff --git a/drivers/crypto/keembay/keembay-ocs-ecc.c b/drivers/crypto/keembay/keembay-ocs-ecc.c
new file mode 100644
index 000000000000..679e6ae295e0
--- /dev/null
+++ b/drivers/crypto/keembay/keembay-ocs-ecc.c
@@ -0,0 +1,1017 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel Keem Bay OCS ECC Crypto Driver.
+ *
+ * Copyright (C) 2019-2021 Intel Corporation
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/crypto.h>
+#include <linux/delay.h>
+#include <linux/fips.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include <crypto/ecc_curve.h>
+#include <crypto/ecdh.h>
+#include <crypto/engine.h>
+#include <crypto/kpp.h>
+#include <crypto/rng.h>
+
+#include <crypto/internal/ecc.h>
+#include <crypto/internal/kpp.h>
+
+#define DRV_NAME "keembay-ocs-ecc"
+
+#define KMB_OCS_ECC_PRIORITY 350
+
+#define HW_OFFS_OCS_ECC_COMMAND 0x00000000
+#define HW_OFFS_OCS_ECC_STATUS 0x00000004
+#define HW_OFFS_OCS_ECC_DATA_IN 0x00000080
+#define HW_OFFS_OCS_ECC_CX_DATA_OUT 0x00000100
+#define HW_OFFS_OCS_ECC_CY_DATA_OUT 0x00000180
+#define HW_OFFS_OCS_ECC_ISR 0x00000400
+#define HW_OFFS_OCS_ECC_IER 0x00000404
+
+#define HW_OCS_ECC_ISR_INT_STATUS_DONE BIT(0)
+#define HW_OCS_ECC_COMMAND_INS_BP BIT(0)
+
+#define HW_OCS_ECC_COMMAND_START_VAL BIT(0)
+
+#define OCS_ECC_OP_SIZE_384 BIT(8)
+#define OCS_ECC_OP_SIZE_256 0
+
+/* ECC Instruction : for ECC_COMMAND */
+#define OCS_ECC_INST_WRITE_AX (0x1 << HW_OCS_ECC_COMMAND_INS_BP)
+#define OCS_ECC_INST_WRITE_AY (0x2 << HW_OCS_ECC_COMMAND_INS_BP)
+#define OCS_ECC_INST_WRITE_BX_D (0x3 << HW_OCS_ECC_COMMAND_INS_BP)
+#define OCS_ECC_INST_WRITE_BY_L (0x4 << HW_OCS_ECC_COMMAND_INS_BP)
+#define OCS_ECC_INST_WRITE_P (0x5 << HW_OCS_ECC_COMMAND_INS_BP)
+#define OCS_ECC_INST_WRITE_A (0x6 << HW_OCS_ECC_COMMAND_INS_BP)
+#define OCS_ECC_INST_CALC_D_IDX_A (0x8 << HW_OCS_ECC_COMMAND_INS_BP)
+#define OCS_ECC_INST_CALC_A_POW_B_MODP (0xB << HW_OCS_ECC_COMMAND_INS_BP)
+#define OCS_ECC_INST_CALC_A_MUL_B_MODP (0xC << HW_OCS_ECC_COMMAND_INS_BP)
+#define OCS_ECC_INST_CALC_A_ADD_B_MODP (0xD << HW_OCS_ECC_COMMAND_INS_BP)
+
+#define ECC_ENABLE_INTR 1
+
+#define POLL_USEC 100
+#define TIMEOUT_USEC 10000
+
+#define KMB_ECC_VLI_MAX_DIGITS ECC_CURVE_NIST_P384_DIGITS
+#define KMB_ECC_VLI_MAX_BYTES (KMB_ECC_VLI_MAX_DIGITS \
+ << ECC_DIGITS_TO_BYTES_SHIFT)
+
+#define POW_CUBE 3
+
+/**
+ * struct ocs_ecc_dev - ECC device context
+ * @list: List of device contexts
+ * @dev: OCS ECC device
+ * @base_reg: IO base address of OCS ECC
+ * @engine: Crypto engine for the device
+ * @irq_done: IRQ done completion.
+ * @irq: IRQ number
+ */
+struct ocs_ecc_dev {
+ struct list_head list;
+ struct device *dev;
+ void __iomem *base_reg;
+ struct crypto_engine *engine;
+ struct completion irq_done;
+ int irq;
+};
+
+/**
+ * struct ocs_ecc_ctx - Transformation context.
+ * @engine_ctx: Crypto engine ctx.
+ * @ecc_dev: The ECC driver associated with this context.
+ * @curve: The elliptic curve used by this transformation.
+ * @private_key: The private key.
+ */
+struct ocs_ecc_ctx {
+ struct crypto_engine_ctx engine_ctx;
+ struct ocs_ecc_dev *ecc_dev;
+ const struct ecc_curve *curve;
+ u64 private_key[KMB_ECC_VLI_MAX_DIGITS];
+};
+
+/* Driver data. */
+struct ocs_ecc_drv {
+ struct list_head dev_list;
+ spinlock_t lock; /* Protects dev_list. */
+};
+
+/* Global variable holding the list of OCS ECC devices (only one expected). */
+static struct ocs_ecc_drv ocs_ecc = {
+ .dev_list = LIST_HEAD_INIT(ocs_ecc.dev_list),
+ .lock = __SPIN_LOCK_UNLOCKED(ocs_ecc.lock),
+};
+
+/* Get OCS ECC tfm context from kpp_request. */
+static inline struct ocs_ecc_ctx *kmb_ocs_ecc_tctx(struct kpp_request *req)
+{
+ return kpp_tfm_ctx(crypto_kpp_reqtfm(req));
+}
+
+/* Converts number of digits to number of bytes. */
+static inline unsigned int digits_to_bytes(unsigned int n)
+{
+ return n << ECC_DIGITS_TO_BYTES_SHIFT;
+}
+
+/*
+ * Wait for ECC idle i.e when an operation (other than write operations)
+ * is done.
+ */
+static inline int ocs_ecc_wait_idle(struct ocs_ecc_dev *dev)
+{
+ u32 value;
+
+ return readl_poll_timeout((dev->base_reg + HW_OFFS_OCS_ECC_STATUS),
+ value,
+ !(value & HW_OCS_ECC_ISR_INT_STATUS_DONE),
+ POLL_USEC, TIMEOUT_USEC);
+}
+
+static void ocs_ecc_cmd_start(struct ocs_ecc_dev *ecc_dev, u32 op_size)
+{
+ iowrite32(op_size | HW_OCS_ECC_COMMAND_START_VAL,
+ ecc_dev->base_reg + HW_OFFS_OCS_ECC_COMMAND);
+}
+
+/* Direct write of u32 buffer to ECC engine with associated instruction. */
+static void ocs_ecc_write_cmd_and_data(struct ocs_ecc_dev *dev,
+ u32 op_size,
+ u32 inst,
+ const void *data_in,
+ size_t data_size)
+{
+ iowrite32(op_size | inst, dev->base_reg + HW_OFFS_OCS_ECC_COMMAND);
+
+ /* MMIO Write src uint32 to dst. */
+ memcpy_toio(dev->base_reg + HW_OFFS_OCS_ECC_DATA_IN, data_in,
+ data_size);
+}
+
+/* Start OCS ECC operation and wait for its completion. */
+static int ocs_ecc_trigger_op(struct ocs_ecc_dev *ecc_dev, u32 op_size,
+ u32 inst)
+{
+ reinit_completion(&ecc_dev->irq_done);
+
+ iowrite32(ECC_ENABLE_INTR, ecc_dev->base_reg + HW_OFFS_OCS_ECC_IER);
+ iowrite32(op_size | inst, ecc_dev->base_reg + HW_OFFS_OCS_ECC_COMMAND);
+
+ return wait_for_completion_interruptible(&ecc_dev->irq_done);
+}
+
+/**
+ * ocs_ecc_read_cx_out() - Read the CX data output buffer.
+ * @dev: The OCS ECC device to read from.
+ * @cx_out: The buffer where to store the CX value. Must be at least
+ * @byte_count byte long.
+ * @byte_count: The amount of data to read.
+ */
+static inline void ocs_ecc_read_cx_out(struct ocs_ecc_dev *dev, void *cx_out,
+ size_t byte_count)
+{
+ memcpy_fromio(cx_out, dev->base_reg + HW_OFFS_OCS_ECC_CX_DATA_OUT,
+ byte_count);
+}
+
+/**
+ * ocs_ecc_read_cy_out() - Read the CX data output buffer.
+ * @dev: The OCS ECC device to read from.
+ * @cy_out: The buffer where to store the CY value. Must be at least
+ * @byte_count byte long.
+ * @byte_count: The amount of data to read.
+ */
+static inline void ocs_ecc_read_cy_out(struct ocs_ecc_dev *dev, void *cy_out,
+ size_t byte_count)
+{
+ memcpy_fromio(cy_out, dev->base_reg + HW_OFFS_OCS_ECC_CY_DATA_OUT,
+ byte_count);
+}
+
+static struct ocs_ecc_dev *kmb_ocs_ecc_find_dev(struct ocs_ecc_ctx *tctx)
+{
+ if (tctx->ecc_dev)
+ return tctx->ecc_dev;
+
+ spin_lock(&ocs_ecc.lock);
+
+ /* Only a single OCS device available. */
+ tctx->ecc_dev = list_first_entry(&ocs_ecc.dev_list, struct ocs_ecc_dev,
+ list);
+
+ spin_unlock(&ocs_ecc.lock);
+
+ return tctx->ecc_dev;
+}
+
+/* Do point multiplication using OCS ECC HW. */
+static int kmb_ecc_point_mult(struct ocs_ecc_dev *ecc_dev,
+ struct ecc_point *result,
+ const struct ecc_point *point,
+ u64 *scalar,
+ const struct ecc_curve *curve)
+{
+ u8 sca[KMB_ECC_VLI_MAX_BYTES]; /* Use the maximum data size. */
+ u32 op_size = (curve->g.ndigits > ECC_CURVE_NIST_P256_DIGITS) ?
+ OCS_ECC_OP_SIZE_384 : OCS_ECC_OP_SIZE_256;
+ size_t nbytes = digits_to_bytes(curve->g.ndigits);
+ int rc = 0;
+
+ /* Generate random nbytes for Simple and Differential SCA protection. */
+ rc = crypto_get_default_rng();
+ if (rc)
+ return rc;
+
+ rc = crypto_rng_get_bytes(crypto_default_rng, sca, nbytes);
+ crypto_put_default_rng();
+ if (rc)
+ return rc;
+
+ /* Wait engine to be idle before starting new operation. */
+ rc = ocs_ecc_wait_idle(ecc_dev);
+ if (rc)
+ return rc;
+
+ /* Send ecc_start pulse as well as indicating operation size. */
+ ocs_ecc_cmd_start(ecc_dev, op_size);
+
+ /* Write ax param; Base point (Gx). */
+ ocs_ecc_write_cmd_and_data(ecc_dev, op_size, OCS_ECC_INST_WRITE_AX,
+ point->x, nbytes);
+
+ /* Write ay param; Base point (Gy). */
+ ocs_ecc_write_cmd_and_data(ecc_dev, op_size, OCS_ECC_INST_WRITE_AY,
+ point->y, nbytes);
+
+ /*
+ * Write the private key into DATA_IN reg.
+ *
+ * Since DATA_IN register is used to write different values during the
+ * computation private Key value is overwritten with
+ * side-channel-resistance value.
+ */
+ ocs_ecc_write_cmd_and_data(ecc_dev, op_size, OCS_ECC_INST_WRITE_BX_D,
+ scalar, nbytes);
+
+ /* Write operand by/l. */
+ ocs_ecc_write_cmd_and_data(ecc_dev, op_size, OCS_ECC_INST_WRITE_BY_L,
+ sca, nbytes);
+ memzero_explicit(sca, sizeof(sca));
+
+ /* Write p = curve prime(GF modulus). */
+ ocs_ecc_write_cmd_and_data(ecc_dev, op_size, OCS_ECC_INST_WRITE_P,
+ curve->p, nbytes);
+
+ /* Write a = curve coefficient. */
+ ocs_ecc_write_cmd_and_data(ecc_dev, op_size, OCS_ECC_INST_WRITE_A,
+ curve->a, nbytes);
+
+ /* Make hardware perform the multiplication. */
+ rc = ocs_ecc_trigger_op(ecc_dev, op_size, OCS_ECC_INST_CALC_D_IDX_A);
+ if (rc)
+ return rc;
+
+ /* Read result. */
+ ocs_ecc_read_cx_out(ecc_dev, result->x, nbytes);
+ ocs_ecc_read_cy_out(ecc_dev, result->y, nbytes);
+
+ return 0;
+}
+
+/**
+ * kmb_ecc_do_scalar_op() - Perform Scalar operation using OCS ECC HW.
+ * @ecc_dev: The OCS ECC device to use.
+ * @scalar_out: Where to store the output scalar.
+ * @scalar_a: Input scalar operand 'a'.
+ * @scalar_b: Input scalar operand 'b'
+ * @curve: The curve on which the operation is performed.
+ * @ndigits: The size of the operands (in digits).
+ * @inst: The operation to perform (as an OCS ECC instruction).
+ *
+ * Return: 0 on success, negative error code otherwise.
+ */
+static int kmb_ecc_do_scalar_op(struct ocs_ecc_dev *ecc_dev, u64 *scalar_out,
+ const u64 *scalar_a, const u64 *scalar_b,
+ const struct ecc_curve *curve,
+ unsigned int ndigits, const u32 inst)
+{
+ u32 op_size = (ndigits > ECC_CURVE_NIST_P256_DIGITS) ?
+ OCS_ECC_OP_SIZE_384 : OCS_ECC_OP_SIZE_256;
+ size_t nbytes = digits_to_bytes(ndigits);
+ int rc;
+
+ /* Wait engine to be idle before starting new operation. */
+ rc = ocs_ecc_wait_idle(ecc_dev);
+ if (rc)
+ return rc;
+
+ /* Send ecc_start pulse as well as indicating operation size. */
+ ocs_ecc_cmd_start(ecc_dev, op_size);
+
+ /* Write ax param (Base point (Gx).*/
+ ocs_ecc_write_cmd_and_data(ecc_dev, op_size, OCS_ECC_INST_WRITE_AX,
+ scalar_a, nbytes);
+
+ /* Write ay param Base point (Gy).*/
+ ocs_ecc_write_cmd_and_data(ecc_dev, op_size, OCS_ECC_INST_WRITE_AY,
+ scalar_b, nbytes);
+
+ /* Write p = curve prime(GF modulus).*/
+ ocs_ecc_write_cmd_and_data(ecc_dev, op_size, OCS_ECC_INST_WRITE_P,
+ curve->p, nbytes);
+
+ /* Give instruction A.B or A+B to ECC engine. */
+ rc = ocs_ecc_trigger_op(ecc_dev, op_size, inst);
+ if (rc)
+ return rc;
+
+ ocs_ecc_read_cx_out(ecc_dev, scalar_out, nbytes);
+
+ if (vli_is_zero(scalar_out, ndigits))
+ return -EINVAL;
+
+ return 0;
+}
+
+/* SP800-56A section 5.6.2.3.4 partial verification: ephemeral keys only */
+static int kmb_ocs_ecc_is_pubkey_valid_partial(struct ocs_ecc_dev *ecc_dev,
+ const struct ecc_curve *curve,
+ struct ecc_point *pk)
+{
+ u64 xxx[KMB_ECC_VLI_MAX_DIGITS] = { 0 };
+ u64 yy[KMB_ECC_VLI_MAX_DIGITS] = { 0 };
+ u64 w[KMB_ECC_VLI_MAX_DIGITS] = { 0 };
+ int rc;
+
+ if (WARN_ON(pk->ndigits != curve->g.ndigits))
+ return -EINVAL;
+
+ /* Check 1: Verify key is not the zero point. */
+ if (ecc_point_is_zero(pk))
+ return -EINVAL;
+
+ /* Check 2: Verify key is in the range [0, p-1]. */
+ if (vli_cmp(curve->p, pk->x, pk->ndigits) != 1)
+ return -EINVAL;
+
+ if (vli_cmp(curve->p, pk->y, pk->ndigits) != 1)
+ return -EINVAL;
+
+ /* Check 3: Verify that y^2 == (x^3 + a·x + b) mod p */
+
+ /* y^2 */
+ /* Compute y^2 -> store in yy */
+ rc = kmb_ecc_do_scalar_op(ecc_dev, yy, pk->y, pk->y, curve, pk->ndigits,
+ OCS_ECC_INST_CALC_A_MUL_B_MODP);
+ if (rc)
+ goto exit;
+
+ /* x^3 */
+ /* Assigning w = 3, used for calculating x^3. */
+ w[0] = POW_CUBE;
+ /* Load the next stage.*/
+ rc = kmb_ecc_do_scalar_op(ecc_dev, xxx, pk->x, w, curve, pk->ndigits,
+ OCS_ECC_INST_CALC_A_POW_B_MODP);
+ if (rc)
+ goto exit;
+
+ /* Do a*x -> store in w. */
+ rc = kmb_ecc_do_scalar_op(ecc_dev, w, curve->a, pk->x, curve,
+ pk->ndigits,
+ OCS_ECC_INST_CALC_A_MUL_B_MODP);
+ if (rc)
+ goto exit;
+
+ /* Do ax + b == w + b; store in w. */
+ rc = kmb_ecc_do_scalar_op(ecc_dev, w, w, curve->b, curve,
+ pk->ndigits,
+ OCS_ECC_INST_CALC_A_ADD_B_MODP);
+ if (rc)
+ goto exit;
+
+ /* x^3 + ax + b == x^3 + w -> store in w. */
+ rc = kmb_ecc_do_scalar_op(ecc_dev, w, xxx, w, curve, pk->ndigits,
+ OCS_ECC_INST_CALC_A_ADD_B_MODP);
+ if (rc)
+ goto exit;
+
+ /* Compare y^2 == x^3 + a·x + b. */
+ rc = vli_cmp(yy, w, pk->ndigits);
+ if (rc)
+ rc = -EINVAL;
+
+exit:
+ memzero_explicit(xxx, sizeof(xxx));
+ memzero_explicit(yy, sizeof(yy));
+ memzero_explicit(w, sizeof(w));
+
+ return rc;
+}
+
+/* SP800-56A section 5.6.2.3.3 full verification */
+static int kmb_ocs_ecc_is_pubkey_valid_full(struct ocs_ecc_dev *ecc_dev,
+ const struct ecc_curve *curve,
+ struct ecc_point *pk)
+{
+ struct ecc_point *nQ;
+ int rc;
+
+ /* Checks 1 through 3 */
+ rc = kmb_ocs_ecc_is_pubkey_valid_partial(ecc_dev, curve, pk);
+ if (rc)
+ return rc;
+
+ /* Check 4: Verify that nQ is the zero point. */
+ nQ = ecc_alloc_point(pk->ndigits);
+ if (!nQ)
+ return -ENOMEM;
+
+ rc = kmb_ecc_point_mult(ecc_dev, nQ, pk, curve->n, curve);
+ if (rc)
+ goto exit;
+
+ if (!ecc_point_is_zero(nQ))
+ rc = -EINVAL;
+
+exit:
+ ecc_free_point(nQ);
+
+ return rc;
+}
+
+static int kmb_ecc_is_key_valid(const struct ecc_curve *curve,
+ const u64 *private_key, size_t private_key_len)
+{
+ size_t ndigits = curve->g.ndigits;
+ u64 one[KMB_ECC_VLI_MAX_DIGITS] = {1};
+ u64 res[KMB_ECC_VLI_MAX_DIGITS];
+
+ if (private_key_len != digits_to_bytes(ndigits))
+ return -EINVAL;
+
+ if (!private_key)
+ return -EINVAL;
+
+ /* Make sure the private key is in the range [2, n-3]. */
+ if (vli_cmp(one, private_key, ndigits) != -1)
+ return -EINVAL;
+
+ vli_sub(res, curve->n, one, ndigits);
+ vli_sub(res, res, one, ndigits);
+ if (vli_cmp(res, private_key, ndigits) != 1)
+ return -EINVAL;
+
+ return 0;
+}
+
+/*
+ * ECC private keys are generated using the method of extra random bits,
+ * equivalent to that described in FIPS 186-4, Appendix B.4.1.
+ *
+ * d = (c mod(n–1)) + 1 where c is a string of random bits, 64 bits longer
+ * than requested
+ * 0 <= c mod(n-1) <= n-2 and implies that
+ * 1 <= d <= n-1
+ *
+ * This method generates a private key uniformly distributed in the range
+ * [1, n-1].
+ */
+static int kmb_ecc_gen_privkey(const struct ecc_curve *curve, u64 *privkey)
+{
+ size_t nbytes = digits_to_bytes(curve->g.ndigits);
+ u64 priv[KMB_ECC_VLI_MAX_DIGITS];
+ size_t nbits;
+ int rc;
+
+ nbits = vli_num_bits(curve->n, curve->g.ndigits);
+
+ /* Check that N is included in Table 1 of FIPS 186-4, section 6.1.1 */
+ if (nbits < 160 || curve->g.ndigits > ARRAY_SIZE(priv))
+ return -EINVAL;
+
+ /*
+ * FIPS 186-4 recommends that the private key should be obtained from a
+ * RBG with a security strength equal to or greater than the security
+ * strength associated with N.
+ *
+ * The maximum security strength identified by NIST SP800-57pt1r4 for
+ * ECC is 256 (N >= 512).
+ *
+ * This condition is met by the default RNG because it selects a favored
+ * DRBG with a security strength of 256.
+ */
+ if (crypto_get_default_rng())
+ return -EFAULT;
+
+ rc = crypto_rng_get_bytes(crypto_default_rng, (u8 *)priv, nbytes);
+ crypto_put_default_rng();
+ if (rc)
+ goto cleanup;
+
+ rc = kmb_ecc_is_key_valid(curve, priv, nbytes);
+ if (rc)
+ goto cleanup;
+
+ ecc_swap_digits(priv, privkey, curve->g.ndigits);
+
+cleanup:
+ memzero_explicit(&priv, sizeof(priv));
+
+ return rc;
+}
+
+static int kmb_ocs_ecdh_set_secret(struct crypto_kpp *tfm, const void *buf,
+ unsigned int len)
+{
+ struct ocs_ecc_ctx *tctx = kpp_tfm_ctx(tfm);
+ struct ecdh params;
+ int rc = 0;
+
+ rc = crypto_ecdh_decode_key(buf, len, &params);
+ if (rc)
+ goto cleanup;
+
+ /* Ensure key size is not bigger then expected. */
+ if (params.key_size > digits_to_bytes(tctx->curve->g.ndigits)) {
+ rc = -EINVAL;
+ goto cleanup;
+ }
+
+ /* Auto-generate private key is not provided. */
+ if (!params.key || !params.key_size) {
+ rc = kmb_ecc_gen_privkey(tctx->curve, tctx->private_key);
+ goto cleanup;
+ }
+
+ rc = kmb_ecc_is_key_valid(tctx->curve, (const u64 *)params.key,
+ params.key_size);
+ if (rc)
+ goto cleanup;
+
+ ecc_swap_digits((const u64 *)params.key, tctx->private_key,
+ tctx->curve->g.ndigits);
+cleanup:
+ memzero_explicit(&params, sizeof(params));
+
+ if (rc)
+ tctx->curve = NULL;
+
+ return rc;
+}
+
+/* Compute shared secret. */
+static int kmb_ecc_do_shared_secret(struct ocs_ecc_ctx *tctx,
+ struct kpp_request *req)
+{
+ struct ocs_ecc_dev *ecc_dev = tctx->ecc_dev;
+ const struct ecc_curve *curve = tctx->curve;
+ u64 shared_secret[KMB_ECC_VLI_MAX_DIGITS];
+ u64 pubk_buf[KMB_ECC_VLI_MAX_DIGITS * 2];
+ size_t copied, nbytes, pubk_len;
+ struct ecc_point *pk, *result;
+ int rc;
+
+ nbytes = digits_to_bytes(curve->g.ndigits);
+
+ /* Public key is a point, thus it has two coordinates */
+ pubk_len = 2 * nbytes;
+
+ /* Copy public key from SG list to pubk_buf. */
+ copied = sg_copy_to_buffer(req->src,
+ sg_nents_for_len(req->src, pubk_len),
+ pubk_buf, pubk_len);
+ if (copied != pubk_len)
+ return -EINVAL;
+
+ /* Allocate and initialize public key point. */
+ pk = ecc_alloc_point(curve->g.ndigits);
+ if (!pk)
+ return -ENOMEM;
+
+ ecc_swap_digits(pubk_buf, pk->x, curve->g.ndigits);
+ ecc_swap_digits(&pubk_buf[curve->g.ndigits], pk->y, curve->g.ndigits);
+
+ /*
+ * Check the public key for following
+ * Check 1: Verify key is not the zero point.
+ * Check 2: Verify key is in the range [1, p-1].
+ * Check 3: Verify that y^2 == (x^3 + a·x + b) mod p
+ */
+ rc = kmb_ocs_ecc_is_pubkey_valid_partial(ecc_dev, curve, pk);
+ if (rc)
+ goto exit_free_pk;
+
+ /* Allocate point for storing computed shared secret. */
+ result = ecc_alloc_point(pk->ndigits);
+ if (!result) {
+ rc = -ENOMEM;
+ goto exit_free_pk;
+ }
+
+ /* Calculate the shared secret.*/
+ rc = kmb_ecc_point_mult(ecc_dev, result, pk, tctx->private_key, curve);
+ if (rc)
+ goto exit_free_result;
+
+ if (ecc_point_is_zero(result)) {
+ rc = -EFAULT;
+ goto exit_free_result;
+ }
+
+ /* Copy shared secret from point to buffer. */
+ ecc_swap_digits(result->x, shared_secret, result->ndigits);
+
+ /* Request might ask for less bytes than what we have. */
+ nbytes = min_t(size_t, nbytes, req->dst_len);
+
+ copied = sg_copy_from_buffer(req->dst,
+ sg_nents_for_len(req->dst, nbytes),
+ shared_secret, nbytes);
+
+ if (copied != nbytes)
+ rc = -EINVAL;
+
+ memzero_explicit(shared_secret, sizeof(shared_secret));
+
+exit_free_result:
+ ecc_free_point(result);
+
+exit_free_pk:
+ ecc_free_point(pk);
+
+ return rc;
+}
+
+/* Compute public key. */
+static int kmb_ecc_do_public_key(struct ocs_ecc_ctx *tctx,
+ struct kpp_request *req)
+{
+ const struct ecc_curve *curve = tctx->curve;
+ u64 pubk_buf[KMB_ECC_VLI_MAX_DIGITS * 2];
+ struct ecc_point *pk;
+ size_t pubk_len;
+ size_t copied;
+ int rc;
+
+ /* Public key is a point, so it has double the digits. */
+ pubk_len = 2 * digits_to_bytes(curve->g.ndigits);
+
+ pk = ecc_alloc_point(curve->g.ndigits);
+ if (!pk)
+ return -ENOMEM;
+
+ /* Public Key(pk) = priv * G. */
+ rc = kmb_ecc_point_mult(tctx->ecc_dev, pk, &curve->g, tctx->private_key,
+ curve);
+ if (rc)
+ goto exit;
+
+ /* SP800-56A rev 3 5.6.2.1.3 key check */
+ if (kmb_ocs_ecc_is_pubkey_valid_full(tctx->ecc_dev, curve, pk)) {
+ rc = -EAGAIN;
+ goto exit;
+ }
+
+ /* Copy public key from point to buffer. */
+ ecc_swap_digits(pk->x, pubk_buf, pk->ndigits);
+ ecc_swap_digits(pk->y, &pubk_buf[pk->ndigits], pk->ndigits);
+
+ /* Copy public key to req->dst. */
+ copied = sg_copy_from_buffer(req->dst,
+ sg_nents_for_len(req->dst, pubk_len),
+ pubk_buf, pubk_len);
+
+ if (copied != pubk_len)
+ rc = -EINVAL;
+
+exit:
+ ecc_free_point(pk);
+
+ return rc;
+}
+
+static int kmb_ocs_ecc_do_one_request(struct crypto_engine *engine,
+ void *areq)
+{
+ struct kpp_request *req = container_of(areq, struct kpp_request, base);
+ struct ocs_ecc_ctx *tctx = kmb_ocs_ecc_tctx(req);
+ struct ocs_ecc_dev *ecc_dev = tctx->ecc_dev;
+ int rc;
+
+ if (req->src)
+ rc = kmb_ecc_do_shared_secret(tctx, req);
+ else
+ rc = kmb_ecc_do_public_key(tctx, req);
+
+ crypto_finalize_kpp_request(ecc_dev->engine, req, rc);
+
+ return 0;
+}
+
+static int kmb_ocs_ecdh_generate_public_key(struct kpp_request *req)
+{
+ struct ocs_ecc_ctx *tctx = kmb_ocs_ecc_tctx(req);
+ const struct ecc_curve *curve = tctx->curve;
+
+ /* Ensure kmb_ocs_ecdh_set_secret() has been successfully called. */
+ if (!tctx->curve)
+ return -EINVAL;
+
+ /* Ensure dst is present. */
+ if (!req->dst)
+ return -EINVAL;
+
+ /* Check the request dst is big enough to hold the public key. */
+ if (req->dst_len < (2 * digits_to_bytes(curve->g.ndigits)))
+ return -EINVAL;
+
+ /* 'src' is not supposed to be present when generate pubk is called. */
+ if (req->src)
+ return -EINVAL;
+
+ return crypto_transfer_kpp_request_to_engine(tctx->ecc_dev->engine,
+ req);
+}
+
+static int kmb_ocs_ecdh_compute_shared_secret(struct kpp_request *req)
+{
+ struct ocs_ecc_ctx *tctx = kmb_ocs_ecc_tctx(req);
+ const struct ecc_curve *curve = tctx->curve;
+
+ /* Ensure kmb_ocs_ecdh_set_secret() has been successfully called. */
+ if (!tctx->curve)
+ return -EINVAL;
+
+ /* Ensure dst is present. */
+ if (!req->dst)
+ return -EINVAL;
+
+ /* Ensure src is present. */
+ if (!req->src)
+ return -EINVAL;
+
+ /*
+ * req->src is expected to the (other-side) public key, so its length
+ * must be 2 * coordinate size (in bytes).
+ */
+ if (req->src_len != 2 * digits_to_bytes(curve->g.ndigits))
+ return -EINVAL;
+
+ return crypto_transfer_kpp_request_to_engine(tctx->ecc_dev->engine,
+ req);
+}
+
+static int kmb_ecc_tctx_init(struct ocs_ecc_ctx *tctx, unsigned int curve_id)
+{
+ memset(tctx, 0, sizeof(*tctx));
+
+ tctx->ecc_dev = kmb_ocs_ecc_find_dev(tctx);
+
+ if (IS_ERR(tctx->ecc_dev)) {
+ pr_err("Failed to find the device : %ld\n",
+ PTR_ERR(tctx->ecc_dev));
+ return PTR_ERR(tctx->ecc_dev);
+ }
+
+ tctx->curve = ecc_get_curve(curve_id);
+ if (!tctx->curve)
+ return -EOPNOTSUPP;
+
+ tctx->engine_ctx.op.prepare_request = NULL;
+ tctx->engine_ctx.op.do_one_request = kmb_ocs_ecc_do_one_request;
+ tctx->engine_ctx.op.unprepare_request = NULL;
+
+ return 0;
+}
+
+static int kmb_ocs_ecdh_nist_p256_init_tfm(struct crypto_kpp *tfm)
+{
+ struct ocs_ecc_ctx *tctx = kpp_tfm_ctx(tfm);
+
+ return kmb_ecc_tctx_init(tctx, ECC_CURVE_NIST_P256);
+}
+
+static int kmb_ocs_ecdh_nist_p384_init_tfm(struct crypto_kpp *tfm)
+{
+ struct ocs_ecc_ctx *tctx = kpp_tfm_ctx(tfm);
+
+ return kmb_ecc_tctx_init(tctx, ECC_CURVE_NIST_P384);
+}
+
+static void kmb_ocs_ecdh_exit_tfm(struct crypto_kpp *tfm)
+{
+ struct ocs_ecc_ctx *tctx = kpp_tfm_ctx(tfm);
+
+ memzero_explicit(tctx->private_key, sizeof(*tctx->private_key));
+}
+
+static unsigned int kmb_ocs_ecdh_max_size(struct crypto_kpp *tfm)
+{
+ struct ocs_ecc_ctx *tctx = kpp_tfm_ctx(tfm);
+
+ /* Public key is made of two coordinates, so double the digits. */
+ return digits_to_bytes(tctx->curve->g.ndigits) * 2;
+}
+
+static struct kpp_alg ocs_ecdh_p256 = {
+ .set_secret = kmb_ocs_ecdh_set_secret,
+ .generate_public_key = kmb_ocs_ecdh_generate_public_key,
+ .compute_shared_secret = kmb_ocs_ecdh_compute_shared_secret,
+ .init = kmb_ocs_ecdh_nist_p256_init_tfm,
+ .exit = kmb_ocs_ecdh_exit_tfm,
+ .max_size = kmb_ocs_ecdh_max_size,
+ .base = {
+ .cra_name = "ecdh-nist-p256",
+ .cra_driver_name = "ecdh-nist-p256-keembay-ocs",
+ .cra_priority = KMB_OCS_ECC_PRIORITY,
+ .cra_module = THIS_MODULE,
+ .cra_ctxsize = sizeof(struct ocs_ecc_ctx),
+ },
+};
+
+static struct kpp_alg ocs_ecdh_p384 = {
+ .set_secret = kmb_ocs_ecdh_set_secret,
+ .generate_public_key = kmb_ocs_ecdh_generate_public_key,
+ .compute_shared_secret = kmb_ocs_ecdh_compute_shared_secret,
+ .init = kmb_ocs_ecdh_nist_p384_init_tfm,
+ .exit = kmb_ocs_ecdh_exit_tfm,
+ .max_size = kmb_ocs_ecdh_max_size,
+ .base = {
+ .cra_name = "ecdh-nist-p384",
+ .cra_driver_name = "ecdh-nist-p384-keembay-ocs",
+ .cra_priority = KMB_OCS_ECC_PRIORITY,
+ .cra_module = THIS_MODULE,
+ .cra_ctxsize = sizeof(struct ocs_ecc_ctx),
+ },
+};
+
+static irqreturn_t ocs_ecc_irq_handler(int irq, void *dev_id)
+{
+ struct ocs_ecc_dev *ecc_dev = dev_id;
+ u32 status;
+
+ /*
+ * Read the status register and write it back to clear the
+ * DONE_INT_STATUS bit.
+ */
+ status = ioread32(ecc_dev->base_reg + HW_OFFS_OCS_ECC_ISR);
+ iowrite32(status, ecc_dev->base_reg + HW_OFFS_OCS_ECC_ISR);
+
+ if (!(status & HW_OCS_ECC_ISR_INT_STATUS_DONE))
+ return IRQ_NONE;
+
+ complete(&ecc_dev->irq_done);
+
+ return IRQ_HANDLED;
+}
+
+static int kmb_ocs_ecc_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ocs_ecc_dev *ecc_dev;
+ int rc;
+
+ ecc_dev = devm_kzalloc(dev, sizeof(*ecc_dev), GFP_KERNEL);
+ if (!ecc_dev)
+ return -ENOMEM;
+
+ ecc_dev->dev = dev;
+
+ platform_set_drvdata(pdev, ecc_dev);
+
+ INIT_LIST_HEAD(&ecc_dev->list);
+ init_completion(&ecc_dev->irq_done);
+
+ /* Get base register address. */
+ ecc_dev->base_reg = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(ecc_dev->base_reg)) {
+ dev_err(dev, "Failed to get base address\n");
+ rc = PTR_ERR(ecc_dev->base_reg);
+ goto list_del;
+ }
+
+ /* Get and request IRQ */
+ ecc_dev->irq = platform_get_irq(pdev, 0);
+ if (ecc_dev->irq < 0) {
+ rc = ecc_dev->irq;
+ goto list_del;
+ }
+
+ rc = devm_request_threaded_irq(dev, ecc_dev->irq, ocs_ecc_irq_handler,
+ NULL, 0, "keembay-ocs-ecc", ecc_dev);
+ if (rc < 0) {
+ dev_err(dev, "Could not request IRQ\n");
+ goto list_del;
+ }
+
+ /* Add device to the list of OCS ECC devices. */
+ spin_lock(&ocs_ecc.lock);
+ list_add_tail(&ecc_dev->list, &ocs_ecc.dev_list);
+ spin_unlock(&ocs_ecc.lock);
+
+ /* Initialize crypto engine. */
+ ecc_dev->engine = crypto_engine_alloc_init(dev, 1);
+ if (!ecc_dev->engine) {
+ dev_err(dev, "Could not allocate crypto engine\n");
+ goto list_del;
+ }
+
+ rc = crypto_engine_start(ecc_dev->engine);
+ if (rc) {
+ dev_err(dev, "Could not start crypto engine\n");
+ goto cleanup;
+ }
+
+ /* Register the KPP algo. */
+ rc = crypto_register_kpp(&ocs_ecdh_p256);
+ if (rc) {
+ dev_err(dev,
+ "Could not register OCS algorithms with Crypto API\n");
+ goto cleanup;
+ }
+
+ rc = crypto_register_kpp(&ocs_ecdh_p384);
+ if (rc) {
+ dev_err(dev,
+ "Could not register OCS algorithms with Crypto API\n");
+ goto ocs_ecdh_p384_error;
+ }
+
+ return 0;
+
+ocs_ecdh_p384_error:
+ crypto_unregister_kpp(&ocs_ecdh_p256);
+
+cleanup:
+ crypto_engine_exit(ecc_dev->engine);
+
+list_del:
+ spin_lock(&ocs_ecc.lock);
+ list_del(&ecc_dev->list);
+ spin_unlock(&ocs_ecc.lock);
+
+ return rc;
+}
+
+static int kmb_ocs_ecc_remove(struct platform_device *pdev)
+{
+ struct ocs_ecc_dev *ecc_dev;
+
+ ecc_dev = platform_get_drvdata(pdev);
+ if (!ecc_dev)
+ return -ENODEV;
+
+ crypto_unregister_kpp(&ocs_ecdh_p384);
+ crypto_unregister_kpp(&ocs_ecdh_p256);
+
+ spin_lock(&ocs_ecc.lock);
+ list_del(&ecc_dev->list);
+ spin_unlock(&ocs_ecc.lock);
+
+ crypto_engine_exit(ecc_dev->engine);
+
+ return 0;
+}
+
+/* Device tree driver match. */
+static const struct of_device_id kmb_ocs_ecc_of_match[] = {
+ {
+ .compatible = "intel,keembay-ocs-ecc",
+ },
+ {}
+};
+
+/* The OCS driver is a platform device. */
+static struct platform_driver kmb_ocs_ecc_driver = {
+ .probe = kmb_ocs_ecc_probe,
+ .remove = kmb_ocs_ecc_remove,
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = kmb_ocs_ecc_of_match,
+ },
+};
+module_platform_driver(kmb_ocs_ecc_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Intel Keem Bay OCS ECC Driver");
+MODULE_ALIAS_CRYPTO("ecdh-nist-p256");
+MODULE_ALIAS_CRYPTO("ecdh-nist-p384");
+MODULE_ALIAS_CRYPTO("ecdh-nist-p256-keembay-ocs");
+MODULE_ALIAS_CRYPTO("ecdh-nist-p384-keembay-ocs");
diff --git a/drivers/crypto/marvell/cesa/cesa.c b/drivers/crypto/marvell/cesa/cesa.c
index f14aac532f53..5cd332880653 100644
--- a/drivers/crypto/marvell/cesa/cesa.c
+++ b/drivers/crypto/marvell/cesa/cesa.c
@@ -615,7 +615,6 @@ static struct platform_driver marvell_cesa = {
};
module_platform_driver(marvell_cesa);
-MODULE_ALIAS("platform:mv_crypto");
MODULE_AUTHOR("Boris Brezillon <boris.brezillon@free-electrons.com>");
MODULE_AUTHOR("Arnaud Ebalard <arno@natisbad.org>");
MODULE_DESCRIPTION("Support for Marvell's cryptographic engine");
diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c
index a72723455df7..877a948469bd 100644
--- a/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c
+++ b/drivers/crypto/marvell/octeontx2/otx2_cptvf_algs.c
@@ -1274,6 +1274,7 @@ static int aead_do_fallback(struct aead_request *req, bool is_enc)
req->base.complete, req->base.data);
aead_request_set_crypt(&rctx->fbk_req, req->src,
req->dst, req->cryptlen, req->iv);
+ aead_request_set_ad(&rctx->fbk_req, req->assoclen);
ret = is_enc ? crypto_aead_encrypt(&rctx->fbk_req) :
crypto_aead_decrypt(&rctx->fbk_req);
} else {
diff --git a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c
index 33d8e50dcbda..fa768f10635f 100644
--- a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c
+++ b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
/* Copyright(c) 2020 Intel Corporation */
+#include <linux/iopoll.h>
#include <adf_accel_devices.h>
#include <adf_common_drv.h>
#include <adf_pf2vf_msg.h>
@@ -161,7 +162,36 @@ static void adf_enable_ints(struct adf_accel_dev *accel_dev)
ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_MASK_OFFSET, 0);
}
-static int adf_enable_pf2vf_comms(struct adf_accel_dev *accel_dev)
+static int adf_init_device(struct adf_accel_dev *accel_dev)
+{
+ void __iomem *addr;
+ u32 status;
+ u32 csr;
+ int ret;
+
+ addr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr;
+
+ /* Temporarily mask PM interrupt */
+ csr = ADF_CSR_RD(addr, ADF_4XXX_ERRMSK2);
+ csr |= ADF_4XXX_PM_SOU;
+ ADF_CSR_WR(addr, ADF_4XXX_ERRMSK2, csr);
+
+ /* Set DRV_ACTIVE bit to power up the device */
+ ADF_CSR_WR(addr, ADF_4XXX_PM_INTERRUPT, ADF_4XXX_PM_DRV_ACTIVE);
+
+ /* Poll status register to make sure the device is powered up */
+ ret = read_poll_timeout(ADF_CSR_RD, status,
+ status & ADF_4XXX_PM_INIT_STATE,
+ ADF_4XXX_PM_POLL_DELAY_US,
+ ADF_4XXX_PM_POLL_TIMEOUT_US, true, addr,
+ ADF_4XXX_PM_STATUS);
+ if (ret)
+ dev_err(&GET_DEV(accel_dev), "Failed to power up the device\n");
+
+ return ret;
+}
+
+static int pfvf_comms_disabled(struct adf_accel_dev *accel_dev)
{
return 0;
}
@@ -215,6 +245,7 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data)
hw_data->exit_arb = adf_exit_arb;
hw_data->get_arb_mapping = adf_get_arbiter_mapping;
hw_data->enable_ints = adf_enable_ints;
+ hw_data->init_device = adf_init_device;
hw_data->reset_device = adf_reset_flr;
hw_data->admin_ae_mask = ADF_4XXX_ADMIN_AE_MASK;
hw_data->uof_get_num_objs = uof_get_num_objs;
@@ -222,7 +253,7 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data)
hw_data->uof_get_ae_mask = uof_get_ae_mask;
hw_data->set_msix_rttable = set_msix_default_rttable;
hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer;
- hw_data->enable_pfvf_comms = adf_enable_pf2vf_comms;
+ hw_data->enable_pfvf_comms = pfvf_comms_disabled;
hw_data->disable_iov = adf_disable_sriov;
hw_data->min_iov_compat_ver = ADF_PFVF_COMPAT_THIS_VERSION;
diff --git a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h
index 4fe2a776293c..924bac6feb37 100644
--- a/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h
+++ b/drivers/crypto/qat/qat_4xxx/adf_4xxx_hw_data.h
@@ -62,6 +62,16 @@
#define ADF_4XXX_ADMINMSGLR_OFFSET (0x500578)
#define ADF_4XXX_MAILBOX_BASE_OFFSET (0x600970)
+/* Power management */
+#define ADF_4XXX_PM_POLL_DELAY_US 20
+#define ADF_4XXX_PM_POLL_TIMEOUT_US USEC_PER_SEC
+#define ADF_4XXX_PM_STATUS (0x50A00C)
+#define ADF_4XXX_PM_INTERRUPT (0x50A028)
+#define ADF_4XXX_PM_DRV_ACTIVE BIT(20)
+#define ADF_4XXX_PM_INIT_STATE BIT(21)
+/* Power management source in ERRSOU2 and ERRMSK2 */
+#define ADF_4XXX_PM_SOU BIT(18)
+
/* Firmware Binaries */
#define ADF_4XXX_FW "qat_4xxx.bin"
#define ADF_4XXX_MMP "qat_4xxx_mmp.bin"
diff --git a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c
index 3027c01bc89e..1fa690219d92 100644
--- a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c
+++ b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.c
@@ -48,34 +48,6 @@ static u32 get_ae_mask(struct adf_hw_device_data *self)
return ~(fuses | straps) & ADF_C3XXX_ACCELENGINES_MASK;
}
-static u32 get_num_accels(struct adf_hw_device_data *self)
-{
- u32 i, ctr = 0;
-
- if (!self || !self->accel_mask)
- return 0;
-
- for (i = 0; i < ADF_C3XXX_MAX_ACCELERATORS; i++) {
- if (self->accel_mask & (1 << i))
- ctr++;
- }
- return ctr;
-}
-
-static u32 get_num_aes(struct adf_hw_device_data *self)
-{
- u32 i, ctr = 0;
-
- if (!self || !self->ae_mask)
- return 0;
-
- for (i = 0; i < ADF_C3XXX_MAX_ACCELENGINES; i++) {
- if (self->ae_mask & (1 << i))
- ctr++;
- }
- return ctr;
-}
-
static u32 get_misc_bar_id(struct adf_hw_device_data *self)
{
return ADF_C3XXX_PMISC_BAR;
@@ -88,12 +60,12 @@ static u32 get_etr_bar_id(struct adf_hw_device_data *self)
static u32 get_sram_bar_id(struct adf_hw_device_data *self)
{
- return 0;
+ return ADF_C3XXX_SRAM_BAR;
}
static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
{
- int aes = get_num_aes(self);
+ int aes = self->get_num_aes(self);
if (aes == 6)
return DEV_SKU_4;
@@ -106,41 +78,6 @@ static const u32 *adf_get_arbiter_mapping(void)
return thrd_to_arb_map;
}
-static u32 get_pf2vf_offset(u32 i)
-{
- return ADF_C3XXX_PF2VF_OFFSET(i);
-}
-
-static void adf_enable_error_correction(struct adf_accel_dev *accel_dev)
-{
- struct adf_hw_device_data *hw_device = accel_dev->hw_device;
- struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_C3XXX_PMISC_BAR];
- unsigned long accel_mask = hw_device->accel_mask;
- unsigned long ae_mask = hw_device->ae_mask;
- void __iomem *csr = misc_bar->virt_addr;
- unsigned int val, i;
-
- /* Enable Accel Engine error detection & correction */
- for_each_set_bit(i, &ae_mask, GET_MAX_ACCELENGINES(accel_dev)) {
- val = ADF_CSR_RD(csr, ADF_C3XXX_AE_CTX_ENABLES(i));
- val |= ADF_C3XXX_ENABLE_AE_ECC_ERR;
- ADF_CSR_WR(csr, ADF_C3XXX_AE_CTX_ENABLES(i), val);
- val = ADF_CSR_RD(csr, ADF_C3XXX_AE_MISC_CONTROL(i));
- val |= ADF_C3XXX_ENABLE_AE_ECC_PARITY_CORR;
- ADF_CSR_WR(csr, ADF_C3XXX_AE_MISC_CONTROL(i), val);
- }
-
- /* Enable shared memory error detection & correction */
- for_each_set_bit(i, &accel_mask, ADF_C3XXX_MAX_ACCELERATORS) {
- val = ADF_CSR_RD(csr, ADF_C3XXX_UERRSSMSH(i));
- val |= ADF_C3XXX_ERRSSMSH_EN;
- ADF_CSR_WR(csr, ADF_C3XXX_UERRSSMSH(i), val);
- val = ADF_CSR_RD(csr, ADF_C3XXX_CERRSSMSH(i));
- val |= ADF_C3XXX_ERRSSMSH_EN;
- ADF_CSR_WR(csr, ADF_C3XXX_CERRSSMSH(i), val);
- }
-}
-
static void adf_enable_ints(struct adf_accel_dev *accel_dev)
{
void __iomem *addr;
@@ -154,13 +91,6 @@ static void adf_enable_ints(struct adf_accel_dev *accel_dev)
ADF_C3XXX_SMIA1_MASK);
}
-static int adf_enable_pf2vf_comms(struct adf_accel_dev *accel_dev)
-{
- spin_lock_init(&accel_dev->pf.vf2pf_ints_lock);
-
- return 0;
-}
-
static void configure_iov_threads(struct adf_accel_dev *accel_dev, bool enable)
{
adf_gen2_cfg_iov_thds(accel_dev, enable,
@@ -177,16 +107,16 @@ void adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data)
hw_data->num_accel = ADF_C3XXX_MAX_ACCELERATORS;
hw_data->num_logical_accel = 1;
hw_data->num_engines = ADF_C3XXX_MAX_ACCELENGINES;
- hw_data->tx_rx_gap = ADF_C3XXX_RX_RINGS_OFFSET;
- hw_data->tx_rings_mask = ADF_C3XXX_TX_RINGS_MASK;
+ hw_data->tx_rx_gap = ADF_GEN2_RX_RINGS_OFFSET;
+ hw_data->tx_rings_mask = ADF_GEN2_TX_RINGS_MASK;
hw_data->alloc_irq = adf_isr_resource_alloc;
hw_data->free_irq = adf_isr_resource_free;
- hw_data->enable_error_correction = adf_enable_error_correction;
+ hw_data->enable_error_correction = adf_gen2_enable_error_correction;
hw_data->get_accel_mask = get_accel_mask;
hw_data->get_ae_mask = get_ae_mask;
hw_data->get_accel_cap = adf_gen2_get_accel_cap;
- hw_data->get_num_accels = get_num_accels;
- hw_data->get_num_aes = get_num_aes;
+ hw_data->get_num_accels = adf_gen2_get_num_accels;
+ hw_data->get_num_aes = adf_gen2_get_num_aes;
hw_data->get_sram_bar_id = get_sram_bar_id;
hw_data->get_etr_bar_id = get_etr_bar_id;
hw_data->get_misc_bar_id = get_misc_bar_id;
@@ -205,7 +135,10 @@ void adf_init_hw_data_c3xxx(struct adf_hw_device_data *hw_data)
hw_data->enable_ints = adf_enable_ints;
hw_data->reset_device = adf_reset_flr;
hw_data->set_ssm_wdtimer = adf_gen2_set_ssm_wdtimer;
- hw_data->get_pf2vf_offset = get_pf2vf_offset;
+ hw_data->get_pf2vf_offset = adf_gen2_get_pf2vf_offset;
+ hw_data->get_vf2pf_sources = adf_gen2_get_vf2pf_sources;
+ hw_data->enable_vf2pf_interrupts = adf_gen2_enable_vf2pf_interrupts;
+ hw_data->disable_vf2pf_interrupts = adf_gen2_disable_vf2pf_interrupts;
hw_data->enable_pfvf_comms = adf_enable_pf2vf_comms;
hw_data->disable_iov = adf_disable_sriov;
hw_data->min_iov_compat_ver = ADF_PFVF_COMPAT_THIS_VERSION;
diff --git a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h
index 86ee02a86789..1b86f828725c 100644
--- a/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h
+++ b/drivers/crypto/qat/qat_c3xxx/adf_c3xxx_hw_data.h
@@ -6,8 +6,7 @@
/* PCIe configuration space */
#define ADF_C3XXX_PMISC_BAR 0
#define ADF_C3XXX_ETR_BAR 1
-#define ADF_C3XXX_RX_RINGS_OFFSET 8
-#define ADF_C3XXX_TX_RINGS_MASK 0xFF
+#define ADF_C3XXX_SRAM_BAR 0
#define ADF_C3XXX_MAX_ACCELERATORS 3
#define ADF_C3XXX_MAX_ACCELENGINES 6
#define ADF_C3XXX_ACCELERATORS_REG_OFFSET 16
@@ -19,16 +18,6 @@
#define ADF_C3XXX_SMIA0_MASK 0xFFFF
#define ADF_C3XXX_SMIA1_MASK 0x1
#define ADF_C3XXX_SOFTSTRAP_CSR_OFFSET 0x2EC
-/* Error detection and correction */
-#define ADF_C3XXX_AE_CTX_ENABLES(i) (i * 0x1000 + 0x20818)
-#define ADF_C3XXX_AE_MISC_CONTROL(i) (i * 0x1000 + 0x20960)
-#define ADF_C3XXX_ENABLE_AE_ECC_ERR BIT(28)
-#define ADF_C3XXX_ENABLE_AE_ECC_PARITY_CORR (BIT(24) | BIT(12))
-#define ADF_C3XXX_UERRSSMSH(i) (i * 0x4000 + 0x18)
-#define ADF_C3XXX_CERRSSMSH(i) (i * 0x4000 + 0x10)
-#define ADF_C3XXX_ERRSSMSH_EN BIT(3)
-
-#define ADF_C3XXX_PF2VF_OFFSET(i) (0x3A000 + 0x280 + ((i) * 0x04))
/* AE to function mapping */
#define ADF_C3XXX_AE2FUNC_MAP_GRP_A_NUM_REGS 48
diff --git a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c
index b023c80873bb..0613db077689 100644
--- a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c
+++ b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.c
@@ -48,34 +48,6 @@ static u32 get_ae_mask(struct adf_hw_device_data *self)
return ~(fuses | straps) & ADF_C62X_ACCELENGINES_MASK;
}
-static u32 get_num_accels(struct adf_hw_device_data *self)
-{
- u32 i, ctr = 0;
-
- if (!self || !self->accel_mask)
- return 0;
-
- for (i = 0; i < ADF_C62X_MAX_ACCELERATORS; i++) {
- if (self->accel_mask & (1 << i))
- ctr++;
- }
- return ctr;
-}
-
-static u32 get_num_aes(struct adf_hw_device_data *self)
-{
- u32 i, ctr = 0;
-
- if (!self || !self->ae_mask)
- return 0;
-
- for (i = 0; i < ADF_C62X_MAX_ACCELENGINES; i++) {
- if (self->ae_mask & (1 << i))
- ctr++;
- }
- return ctr;
-}
-
static u32 get_misc_bar_id(struct adf_hw_device_data *self)
{
return ADF_C62X_PMISC_BAR;
@@ -93,7 +65,7 @@ static u32 get_sram_bar_id(struct adf_hw_device_data *self)
static enum dev_sku_info get_sku(struct adf_hw_device_data *self)
{
- int aes = get_num_aes(self);
+ int aes = self->get_num_aes(self);
if (aes == 8)
return DEV_SKU_2;
@@ -108,41 +80,6 @@ static const u32 *adf_get_arbiter_mapping(void)
return thrd_to_arb_map;
}
-static u32 get_pf2vf_offset(u32 i)
-{
- return ADF_C62X_PF2VF_OFFSET(i);
-}
-
-static void adf_enable_error_correction(struct adf_accel_dev *accel_dev)
-{
- struct adf_hw_device_data *hw_device = accel_dev->hw_device;
- struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_C62X_PMISC_BAR];
- unsigned long accel_mask = hw_device->accel_mask;
- unsigned long ae_mask = hw_device->ae_mask;
- void __iomem *csr = misc_bar->virt_addr;
- unsigned int val, i;
-
- /* Enable Accel Engine error detection & correction */
- for_each_set_bit(i, &ae_mask, GET_MAX_ACCELENGINES(accel_dev)) {
- val = ADF_CSR_RD(csr, ADF_C62X_AE_CTX_ENABLES(i));
- val |= ADF_C62X_ENABLE_AE_ECC_ERR;
- ADF_CSR_WR(csr, ADF_C62X_AE_CTX_ENABLES(i), val);
- val = ADF_CSR_RD(csr, ADF_C62X_AE_MISC_CONTROL(i));
- val |= ADF_C62X_ENABLE_AE_ECC_PARITY_CORR;
- ADF_CSR_WR(csr, ADF_C62X_AE_MISC_CONTROL(i), val);
- }
-
- /* Enable shared memory error detection & correction */
- for_each_set_bit(i, &accel_mask, ADF_C62X_MAX_ACCELERATORS) {
- val = ADF_CSR_RD(csr, ADF_C62X_UERRSSMSH(i));
- val |= ADF_C62X_ERRSSMSH_EN;
- ADF_CSR_WR(csr, ADF_C62X_UERRSSMSH(i), val);
- val = ADF_CSR_RD(csr, ADF_C62X_CERRSSMSH(i));
- val |= ADF_C62X_ERRSSMSH_EN;
- ADF_CSR_WR(csr, ADF_C62X_CERRSSMSH(i), val);
- }
-}
-
static void adf_enable_ints(struct adf_accel_dev *accel_dev)
{
void __iomem *addr;
@@ -156,13 +93,6 @@ static void adf_enable_ints(struct adf_accel_dev *accel_dev)
ADF_C62X_SMIA1_MASK);
}
-static int adf_enable_pf2vf_comms(struct adf_accel_dev *accel_dev)
-{
- spin_lock_init(&accel_dev->pf.vf2pf_ints_lock);
-
- return 0;
-}
-
static void configure_iov_threads(struct adf_accel_dev *accel_dev, bool enable)
{
adf_gen2_cfg_iov_thds(accel_dev, enable,
@@ -179,16 +109,16 @@ void adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data)
hw_data->num_accel = ADF_C62X_MAX_ACCELERATORS;
hw_data->num_logical_accel = 1;
hw_data->num_engines = ADF_C62X_MAX_ACCELENGINES;
- hw_data->tx_rx_gap = ADF_C62X_RX_RINGS_OFFSET;
- hw_data->tx_rings_mask = ADF_C62X_TX_RINGS_MASK;
+ hw_data->tx_rx_gap = ADF_GEN2_RX_RINGS_OFFSET;
+ hw_data->tx_rings_mask = ADF_GEN2_TX_RINGS_MASK;
hw_data->alloc_irq = adf_isr_resource_alloc;
hw_data->free_irq = adf_isr_resource_free;
- hw_data->enable_error_correction = adf_enable_error_correction;
+ hw_data->enable_error_correction = adf_gen2_enable_error_correction;
hw_data->get_accel_mask = get_accel_mask;
hw_data->get_ae_mask = get_ae_mask;
hw_data->get_accel_cap = adf_gen2_get_accel_cap;
- hw_data->get_num_accels = get_num_accels;
- hw_data->get_num_aes = get_num_aes;
+ hw_data->get_num_accels = adf_gen2_get_num_accels;
+ hw_data->get_num_aes = adf_gen2_get_num_aes;
hw_data->get_sram_bar_id = get_sram_bar_id;
hw_data->get_etr_bar_id = get_etr_bar_id;
hw_data->get_misc_bar_id = get_misc_bar_id;
@@ -207,7 +137,10 @@ void adf_init_hw_data_c62x(struct adf_hw_device_data *hw_data)
hw_data->enable_ints = adf_enable_ints;
hw_data->reset_device = adf_reset_flr;
hw_data->set_ssm_wdtimer = adf_gen2_set_ssm_wdtimer;
- hw_data->get_pf2vf_offset = get_pf2vf_offset;
+ hw_data->get_pf2vf_offset = adf_gen2_get_pf2vf_offset;
+ hw_data->get_vf2pf_sources = adf_gen2_get_vf2pf_sources;
+ hw_data->enable_vf2pf_interrupts = adf_gen2_enable_vf2pf_interrupts;
+ hw_data->disable_vf2pf_interrupts = adf_gen2_disable_vf2pf_interrupts;
hw_data->enable_pfvf_comms = adf_enable_pf2vf_comms;
hw_data->disable_iov = adf_disable_sriov;
hw_data->min_iov_compat_ver = ADF_PFVF_COMPAT_THIS_VERSION;
diff --git a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h
index e6664bd20c91..68c3436bd3aa 100644
--- a/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h
+++ b/drivers/crypto/qat/qat_c62x/adf_c62x_hw_data.h
@@ -7,8 +7,6 @@
#define ADF_C62X_SRAM_BAR 0
#define ADF_C62X_PMISC_BAR 1
#define ADF_C62X_ETR_BAR 2
-#define ADF_C62X_RX_RINGS_OFFSET 8
-#define ADF_C62X_TX_RINGS_MASK 0xFF
#define ADF_C62X_MAX_ACCELERATORS 5
#define ADF_C62X_MAX_ACCELENGINES 10
#define ADF_C62X_ACCELERATORS_REG_OFFSET 16
@@ -20,16 +18,6 @@
#define ADF_C62X_SMIA0_MASK 0xFFFF
#define ADF_C62X_SMIA1_MASK 0x1
#define ADF_C62X_SOFTSTRAP_CSR_OFFSET 0x2EC
-/* Error detection and correction */
-#define ADF_C62X_AE_CTX_ENABLES(i) (i * 0x1000 + 0x20818)
-#define ADF_C62X_AE_MISC_CONTROL(i) (i * 0x1000 + 0x20960)
-#define ADF_C62X_ENABLE_AE_ECC_ERR BIT(28)
-#define ADF_C62X_ENABLE_AE_ECC_PARITY_CORR (BIT(24) | BIT(12))
-#define ADF_C62X_UERRSSMSH(i) (i * 0x4000 + 0x18)
-#define ADF_C62X_CERRSSMSH(i) (i * 0x4000 + 0x10)
-#define ADF_C62X_ERRSSMSH_EN BIT(3)
-
-#define ADF_C62X_PF2VF_OFFSET(i) (0x3A000 + 0x280 + ((i) * 0x04))
/* AE to function mapping */
#define ADF_C62X_AE2FUNC_MAP_GRP_A_NUM_REGS 80
diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h
index 38c0af6d4e43..57d9ca08e611 100644
--- a/drivers/crypto/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h
@@ -42,13 +42,17 @@ struct adf_bar {
resource_size_t base_addr;
void __iomem *virt_addr;
resource_size_t size;
-} __packed;
+};
+
+struct adf_irq {
+ bool enabled;
+ char name[ADF_MAX_MSIX_VECTOR_NAME];
+};
struct adf_accel_msix {
- struct msix_entry *entries;
- char **names;
+ struct adf_irq *irqs;
u32 num_entries;
-} __packed;
+};
struct adf_accel_pci {
struct pci_dev *pci_dev;
@@ -56,7 +60,7 @@ struct adf_accel_pci {
struct adf_bar pci_bars[ADF_PCI_MAX_BARS];
u8 revid;
u8 sku;
-} __packed;
+};
enum dev_state {
DEV_DOWN = 0,
@@ -96,7 +100,7 @@ struct adf_hw_device_class {
const char *name;
const enum adf_device_type type;
u32 instances;
-} __packed;
+};
struct arb_info {
u32 arb_cfg;
@@ -166,12 +170,18 @@ struct adf_hw_device_data {
int (*init_arb)(struct adf_accel_dev *accel_dev);
void (*exit_arb)(struct adf_accel_dev *accel_dev);
const u32 *(*get_arb_mapping)(void);
+ int (*init_device)(struct adf_accel_dev *accel_dev);
void (*disable_iov)(struct adf_accel_dev *accel_dev);
void (*configure_iov_threads)(struct adf_accel_dev *accel_dev,
bool enable);
void (*enable_ints)(struct adf_accel_dev *accel_dev);
void (*set_ssm_wdtimer)(struct adf_accel_dev *accel_dev);
int (*enable_pfvf_comms)(struct adf_accel_dev *accel_dev);
+ u32 (*get_vf2pf_sources)(void __iomem *pmisc_addr);
+ void (*enable_vf2pf_interrupts)(void __iomem *pmisc_bar_addr,
+ u32 vf_mask);
+ void (*disable_vf2pf_interrupts)(void __iomem *pmisc_bar_addr,
+ u32 vf_mask);
void (*reset_device)(struct adf_accel_dev *accel_dev);
void (*set_msix_rttable)(struct adf_accel_dev *accel_dev);
char *(*uof_get_name)(u32 obj_num);
@@ -195,7 +205,7 @@ struct adf_hw_device_data {
u8 num_logical_accel;
u8 num_engines;
u8 min_iov_compat_ver;
-} __packed;
+};
/* CSR write macro */
#define ADF_CSR_WR(csr_base, csr_offset, val) \
@@ -251,7 +261,8 @@ struct adf_accel_dev {
struct adf_accel_vf_info *vf_info;
} pf;
struct {
- char *irq_name;
+ bool irq_enabled;
+ char irq_name[ADF_MAX_MSIX_VECTOR_NAME];
struct tasklet_struct pf2vf_bh_tasklet;
struct mutex vf2pf_lock; /* protect CSR access */
struct completion iov_msg_completion;
@@ -261,5 +272,5 @@ struct adf_accel_dev {
};
bool is_vf;
u32 accel_id;
-} __packed;
+};
#endif
diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h
index 4261749fae8d..2cc6622833c4 100644
--- a/drivers/crypto/qat/qat_common/adf_common_drv.h
+++ b/drivers/crypto/qat/qat_common/adf_common_drv.h
@@ -62,7 +62,6 @@ int adf_dev_start(struct adf_accel_dev *accel_dev);
void adf_dev_stop(struct adf_accel_dev *accel_dev);
void adf_dev_shutdown(struct adf_accel_dev *accel_dev);
-int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr);
void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev);
int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev);
void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info);
@@ -197,10 +196,11 @@ void adf_disable_vf2pf_interrupts_irq(struct adf_accel_dev *accel_dev,
u32 vf_mask);
void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
u32 vf_mask);
+int adf_enable_pf2vf_comms(struct adf_accel_dev *accel_dev);
void adf_enable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
void adf_disable_pf2vf_interrupts(struct adf_accel_dev *accel_dev);
void adf_schedule_vf2pf_handler(struct adf_accel_vf_info *vf_info);
-
+int adf_send_vf2pf_msg(struct adf_accel_dev *accel_dev, u32 msg);
int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev);
void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev);
int adf_init_pf_wq(void);
@@ -211,6 +211,11 @@ void adf_flush_vf_wq(struct adf_accel_dev *accel_dev);
#else
#define adf_sriov_configure NULL
+static inline int adf_enable_pf2vf_comms(struct adf_accel_dev *accel_dev)
+{
+ return 0;
+}
+
static inline void adf_disable_sriov(struct adf_accel_dev *accel_dev)
{
}
diff --git a/drivers/crypto/qat/qat_common/adf_gen2_hw_data.c b/drivers/crypto/qat/qat_common/adf_gen2_hw_data.c
index 9e560c7d4163..262bdc05dab4 100644
--- a/drivers/crypto/qat/qat_common/adf_gen2_hw_data.c
+++ b/drivers/crypto/qat/qat_common/adf_gen2_hw_data.c
@@ -4,6 +4,104 @@
#include "icp_qat_hw.h"
#include <linux/pci.h>
+#define ADF_GEN2_PF2VF_OFFSET(i) (0x3A000 + 0x280 + ((i) * 0x04))
+
+u32 adf_gen2_get_pf2vf_offset(u32 i)
+{
+ return ADF_GEN2_PF2VF_OFFSET(i);
+}
+EXPORT_SYMBOL_GPL(adf_gen2_get_pf2vf_offset);
+
+u32 adf_gen2_get_vf2pf_sources(void __iomem *pmisc_addr)
+{
+ u32 errsou3, errmsk3, vf_int_mask;
+
+ /* Get the interrupt sources triggered by VFs */
+ errsou3 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRSOU3);
+ vf_int_mask = ADF_GEN2_ERR_REG_VF2PF(errsou3);
+
+ /* To avoid adding duplicate entries to work queue, clear
+ * vf_int_mask_sets bits that are already masked in ERRMSK register.
+ */
+ errmsk3 = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3);
+ vf_int_mask &= ~ADF_GEN2_ERR_REG_VF2PF(errmsk3);
+
+ return vf_int_mask;
+}
+EXPORT_SYMBOL_GPL(adf_gen2_get_vf2pf_sources);
+
+void adf_gen2_enable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask)
+{
+ /* Enable VF2PF Messaging Ints - VFs 0 through 15 per vf_mask[15:0] */
+ if (vf_mask & 0xFFFF) {
+ u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3)
+ & ~ADF_GEN2_ERR_MSK_VF2PF(vf_mask);
+ ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, val);
+ }
+}
+EXPORT_SYMBOL_GPL(adf_gen2_enable_vf2pf_interrupts);
+
+void adf_gen2_disable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask)
+{
+ /* Disable VF2PF interrupts for VFs 0 through 15 per vf_mask[15:0] */
+ if (vf_mask & 0xFFFF) {
+ u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK3)
+ | ADF_GEN2_ERR_MSK_VF2PF(vf_mask);
+ ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK3, val);
+ }
+}
+EXPORT_SYMBOL_GPL(adf_gen2_disable_vf2pf_interrupts);
+
+u32 adf_gen2_get_num_accels(struct adf_hw_device_data *self)
+{
+ if (!self || !self->accel_mask)
+ return 0;
+
+ return hweight16(self->accel_mask);
+}
+EXPORT_SYMBOL_GPL(adf_gen2_get_num_accels);
+
+u32 adf_gen2_get_num_aes(struct adf_hw_device_data *self)
+{
+ if (!self || !self->ae_mask)
+ return 0;
+
+ return hweight32(self->ae_mask);
+}
+EXPORT_SYMBOL_GPL(adf_gen2_get_num_aes);
+
+void adf_gen2_enable_error_correction(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct adf_bar *misc_bar = &GET_BARS(accel_dev)
+ [hw_data->get_misc_bar_id(hw_data)];
+ unsigned long accel_mask = hw_data->accel_mask;
+ unsigned long ae_mask = hw_data->ae_mask;
+ void __iomem *csr = misc_bar->virt_addr;
+ unsigned int val, i;
+
+ /* Enable Accel Engine error detection & correction */
+ for_each_set_bit(i, &ae_mask, hw_data->num_engines) {
+ val = ADF_CSR_RD(csr, ADF_GEN2_AE_CTX_ENABLES(i));
+ val |= ADF_GEN2_ENABLE_AE_ECC_ERR;
+ ADF_CSR_WR(csr, ADF_GEN2_AE_CTX_ENABLES(i), val);
+ val = ADF_CSR_RD(csr, ADF_GEN2_AE_MISC_CONTROL(i));
+ val |= ADF_GEN2_ENABLE_AE_ECC_PARITY_CORR;
+ ADF_CSR_WR(csr, ADF_GEN2_AE_MISC_CONTROL(i), val);
+ }
+
+ /* Enable shared memory error detection & correction */
+ for_each_set_bit(i, &accel_mask, hw_data->num_accel) {
+ val = ADF_CSR_RD(csr, ADF_GEN2_UERRSSMSH(i));
+ val |= ADF_GEN2_ERRSSMSH_EN;
+ ADF_CSR_WR(csr, ADF_GEN2_UERRSSMSH(i), val);
+ val = ADF_CSR_RD(csr, ADF_GEN2_CERRSSMSH(i));
+ val |= ADF_GEN2_ERRSSMSH_EN;
+ ADF_CSR_WR(csr, ADF_GEN2_CERRSSMSH(i), val);
+ }
+}
+EXPORT_SYMBOL_GPL(adf_gen2_enable_error_correction);
+
void adf_gen2_cfg_iov_thds(struct adf_accel_dev *accel_dev, bool enable,
int num_a_regs, int num_b_regs)
{
diff --git a/drivers/crypto/qat/qat_common/adf_gen2_hw_data.h b/drivers/crypto/qat/qat_common/adf_gen2_hw_data.h
index 756b0ddfac5e..c169d704097d 100644
--- a/drivers/crypto/qat/qat_common/adf_gen2_hw_data.h
+++ b/drivers/crypto/qat/qat_common/adf_gen2_hw_data.h
@@ -22,6 +22,8 @@
#define ADF_RING_CSR_INT_FLAG_AND_COL 0x184
#define ADF_RING_CSR_INT_COL_CTL_ENABLE 0x80000000
#define ADF_RING_BUNDLE_SIZE 0x1000
+#define ADF_GEN2_RX_RINGS_OFFSET 8
+#define ADF_GEN2_TX_RINGS_MASK 0xFF
#define BUILD_RING_BASE_ADDR(addr, size) \
(((addr) >> 6) & (GENMASK_ULL(63, 0) << (size)))
@@ -125,6 +127,31 @@ do { \
#define ADF_SSMWDT(i) (ADF_SSMWDT_OFFSET + ((i) * 0x4000))
#define ADF_SSMWDTPKE(i) (ADF_SSMWDTPKE_OFFSET + ((i) * 0x4000))
+/* Error detection and correction */
+#define ADF_GEN2_AE_CTX_ENABLES(i) ((i) * 0x1000 + 0x20818)
+#define ADF_GEN2_AE_MISC_CONTROL(i) ((i) * 0x1000 + 0x20960)
+#define ADF_GEN2_ENABLE_AE_ECC_ERR BIT(28)
+#define ADF_GEN2_ENABLE_AE_ECC_PARITY_CORR (BIT(24) | BIT(12))
+#define ADF_GEN2_UERRSSMSH(i) ((i) * 0x4000 + 0x18)
+#define ADF_GEN2_CERRSSMSH(i) ((i) * 0x4000 + 0x10)
+#define ADF_GEN2_ERRSSMSH_EN BIT(3)
+
+ /* VF2PF interrupts */
+#define ADF_GEN2_ERRSOU3 (0x3A000 + 0x0C)
+#define ADF_GEN2_ERRSOU5 (0x3A000 + 0xD8)
+#define ADF_GEN2_ERRMSK3 (0x3A000 + 0x1C)
+#define ADF_GEN2_ERRMSK5 (0x3A000 + 0xDC)
+#define ADF_GEN2_ERR_REG_VF2PF(vf_src) (((vf_src) & 0x01FFFE00) >> 9)
+#define ADF_GEN2_ERR_MSK_VF2PF(vf_mask) (((vf_mask) & 0xFFFF) << 9)
+
+u32 adf_gen2_get_pf2vf_offset(u32 i);
+u32 adf_gen2_get_vf2pf_sources(void __iomem *pmisc_bar);
+void adf_gen2_enable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask);
+void adf_gen2_disable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask);
+
+u32 adf_gen2_get_num_accels(struct adf_hw_device_data *self);
+u32 adf_gen2_get_num_aes(struct adf_hw_device_data *self);
+void adf_gen2_enable_error_correction(struct adf_accel_dev *accel_dev);
void adf_gen2_cfg_iov_thds(struct adf_accel_dev *accel_dev, bool enable,
int num_a_regs, int num_b_regs);
void adf_gen2_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops);
diff --git a/drivers/crypto/qat/qat_common/adf_init.c b/drivers/crypto/qat/qat_common/adf_init.c
index 60bc7b991d35..e3749e5817d9 100644
--- a/drivers/crypto/qat/qat_common/adf_init.c
+++ b/drivers/crypto/qat/qat_common/adf_init.c
@@ -79,6 +79,11 @@ int adf_dev_init(struct adf_accel_dev *accel_dev)
return -EFAULT;
}
+ if (hw_data->init_device && hw_data->init_device(accel_dev)) {
+ dev_err(&GET_DEV(accel_dev), "Failed to initialize device\n");
+ return -EFAULT;
+ }
+
if (hw_data->init_admin_comms && hw_data->init_admin_comms(accel_dev)) {
dev_err(&GET_DEV(accel_dev), "Failed initialize admin comms\n");
return -EFAULT;
diff --git a/drivers/crypto/qat/qat_common/adf_isr.c b/drivers/crypto/qat/qat_common/adf_isr.c
index c678d5c531aa..40593c9449a2 100644
--- a/drivers/crypto/qat/qat_common/adf_isr.c
+++ b/drivers/crypto/qat/qat_common/adf_isr.c
@@ -16,46 +16,31 @@
#include "adf_transport_internal.h"
#define ADF_MAX_NUM_VFS 32
-#define ADF_ERRSOU3 (0x3A000 + 0x0C)
-#define ADF_ERRSOU5 (0x3A000 + 0xD8)
-#define ADF_ERRMSK3 (0x3A000 + 0x1C)
-#define ADF_ERRMSK5 (0x3A000 + 0xDC)
-#define ADF_ERR_REG_VF2PF_L(vf_src) (((vf_src) & 0x01FFFE00) >> 9)
-#define ADF_ERR_REG_VF2PF_U(vf_src) (((vf_src) & 0x0000FFFF) << 16)
static int adf_enable_msix(struct adf_accel_dev *accel_dev)
{
struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
- u32 msix_num_entries = 1;
+ u32 msix_num_entries = hw_data->num_banks + 1;
+ int ret;
if (hw_data->set_msix_rttable)
hw_data->set_msix_rttable(accel_dev);
- /* If SR-IOV is disabled, add entries for each bank */
- if (!accel_dev->pf.vf_info) {
- int i;
-
- msix_num_entries += hw_data->num_banks;
- for (i = 0; i < msix_num_entries; i++)
- pci_dev_info->msix_entries.entries[i].entry = i;
- } else {
- pci_dev_info->msix_entries.entries[0].entry =
- hw_data->num_banks;
- }
-
- if (pci_enable_msix_exact(pci_dev_info->pci_dev,
- pci_dev_info->msix_entries.entries,
- msix_num_entries)) {
- dev_err(&GET_DEV(accel_dev), "Failed to enable MSI-X IRQ(s)\n");
- return -EFAULT;
+ ret = pci_alloc_irq_vectors(pci_dev_info->pci_dev, msix_num_entries,
+ msix_num_entries, PCI_IRQ_MSIX);
+ if (unlikely(ret < 0)) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to allocate %d MSI-X vectors\n",
+ msix_num_entries);
+ return ret;
}
return 0;
}
static void adf_disable_msix(struct adf_accel_pci *pci_dev_info)
{
- pci_disable_msix(pci_dev_info->pci_dev);
+ pci_free_irq_vectors(pci_dev_info->pci_dev);
}
static irqreturn_t adf_msix_isr_bundle(int irq, void *bank_ptr)
@@ -80,22 +65,10 @@ static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr)
struct adf_bar *pmisc =
&GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
void __iomem *pmisc_addr = pmisc->virt_addr;
- u32 errsou3, errsou5, errmsk3, errmsk5;
unsigned long vf_mask;
/* Get the interrupt sources triggered by VFs */
- errsou3 = ADF_CSR_RD(pmisc_addr, ADF_ERRSOU3);
- errsou5 = ADF_CSR_RD(pmisc_addr, ADF_ERRSOU5);
- vf_mask = ADF_ERR_REG_VF2PF_L(errsou3);
- vf_mask |= ADF_ERR_REG_VF2PF_U(errsou5);
-
- /* To avoid adding duplicate entries to work queue, clear
- * vf_int_mask_sets bits that are already masked in ERRMSK register.
- */
- errmsk3 = ADF_CSR_RD(pmisc_addr, ADF_ERRMSK3);
- errmsk5 = ADF_CSR_RD(pmisc_addr, ADF_ERRMSK5);
- vf_mask &= ~ADF_ERR_REG_VF2PF_L(errmsk3);
- vf_mask &= ~ADF_ERR_REG_VF2PF_U(errmsk5);
+ vf_mask = hw_data->get_vf2pf_sources(pmisc_addr);
if (vf_mask) {
struct adf_accel_vf_info *vf_info;
@@ -135,13 +108,39 @@ static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr)
return IRQ_NONE;
}
+static void adf_free_irqs(struct adf_accel_dev *accel_dev)
+{
+ struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct adf_irq *irqs = pci_dev_info->msix_entries.irqs;
+ struct adf_etr_data *etr_data = accel_dev->transport;
+ int clust_irq = hw_data->num_banks;
+ int irq, i = 0;
+
+ if (pci_dev_info->msix_entries.num_entries > 1) {
+ for (i = 0; i < hw_data->num_banks; i++) {
+ if (irqs[i].enabled) {
+ irq = pci_irq_vector(pci_dev_info->pci_dev, i);
+ irq_set_affinity_hint(irq, NULL);
+ free_irq(irq, &etr_data->banks[i]);
+ }
+ }
+ }
+
+ if (irqs[i].enabled) {
+ irq = pci_irq_vector(pci_dev_info->pci_dev, clust_irq);
+ free_irq(irq, accel_dev);
+ }
+}
+
static int adf_request_irqs(struct adf_accel_dev *accel_dev)
{
struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
- struct msix_entry *msixe = pci_dev_info->msix_entries.entries;
+ struct adf_irq *irqs = pci_dev_info->msix_entries.irqs;
struct adf_etr_data *etr_data = accel_dev->transport;
- int ret, i = 0;
+ int clust_irq = hw_data->num_banks;
+ int ret, irq, i = 0;
char *name;
/* Request msix irq for all banks unless SR-IOV enabled */
@@ -150,105 +149,82 @@ static int adf_request_irqs(struct adf_accel_dev *accel_dev)
struct adf_etr_bank_data *bank = &etr_data->banks[i];
unsigned int cpu, cpus = num_online_cpus();
- name = *(pci_dev_info->msix_entries.names + i);
+ name = irqs[i].name;
snprintf(name, ADF_MAX_MSIX_VECTOR_NAME,
"qat%d-bundle%d", accel_dev->accel_id, i);
- ret = request_irq(msixe[i].vector,
- adf_msix_isr_bundle, 0, name, bank);
+ irq = pci_irq_vector(pci_dev_info->pci_dev, i);
+ if (unlikely(irq < 0)) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to get IRQ number of device vector %d - %s\n",
+ i, name);
+ ret = irq;
+ goto err;
+ }
+ ret = request_irq(irq, adf_msix_isr_bundle, 0,
+ &name[0], bank);
if (ret) {
dev_err(&GET_DEV(accel_dev),
- "failed to enable irq %d for %s\n",
- msixe[i].vector, name);
- return ret;
+ "Failed to allocate IRQ %d for %s\n",
+ irq, name);
+ goto err;
}
cpu = ((accel_dev->accel_id * hw_data->num_banks) +
i) % cpus;
- irq_set_affinity_hint(msixe[i].vector,
- get_cpu_mask(cpu));
+ irq_set_affinity_hint(irq, get_cpu_mask(cpu));
+ irqs[i].enabled = true;
}
}
/* Request msix irq for AE */
- name = *(pci_dev_info->msix_entries.names + i);
+ name = irqs[i].name;
snprintf(name, ADF_MAX_MSIX_VECTOR_NAME,
"qat%d-ae-cluster", accel_dev->accel_id);
- ret = request_irq(msixe[i].vector, adf_msix_isr_ae, 0, name, accel_dev);
+ irq = pci_irq_vector(pci_dev_info->pci_dev, clust_irq);
+ if (unlikely(irq < 0)) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to get IRQ number of device vector %d - %s\n",
+ i, name);
+ ret = irq;
+ goto err;
+ }
+ ret = request_irq(irq, adf_msix_isr_ae, 0, &name[0], accel_dev);
if (ret) {
dev_err(&GET_DEV(accel_dev),
- "failed to enable irq %d, for %s\n",
- msixe[i].vector, name);
- return ret;
+ "Failed to allocate IRQ %d for %s\n", irq, name);
+ goto err;
}
+ irqs[i].enabled = true;
+ return ret;
+err:
+ adf_free_irqs(accel_dev);
return ret;
}
-static void adf_free_irqs(struct adf_accel_dev *accel_dev)
-{
- struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
- struct adf_hw_device_data *hw_data = accel_dev->hw_device;
- struct msix_entry *msixe = pci_dev_info->msix_entries.entries;
- struct adf_etr_data *etr_data = accel_dev->transport;
- int i = 0;
-
- if (pci_dev_info->msix_entries.num_entries > 1) {
- for (i = 0; i < hw_data->num_banks; i++) {
- irq_set_affinity_hint(msixe[i].vector, NULL);
- free_irq(msixe[i].vector, &etr_data->banks[i]);
- }
- }
- irq_set_affinity_hint(msixe[i].vector, NULL);
- free_irq(msixe[i].vector, accel_dev);
-}
-
-static int adf_isr_alloc_msix_entry_table(struct adf_accel_dev *accel_dev)
+static int adf_isr_alloc_msix_vectors_data(struct adf_accel_dev *accel_dev)
{
- int i;
- char **names;
- struct msix_entry *entries;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
u32 msix_num_entries = 1;
+ struct adf_irq *irqs;
/* If SR-IOV is disabled (vf_info is NULL), add entries for each bank */
if (!accel_dev->pf.vf_info)
msix_num_entries += hw_data->num_banks;
- entries = kcalloc_node(msix_num_entries, sizeof(*entries),
- GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev)));
- if (!entries)
+ irqs = kzalloc_node(msix_num_entries * sizeof(*irqs),
+ GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev)));
+ if (!irqs)
return -ENOMEM;
- names = kcalloc(msix_num_entries, sizeof(char *), GFP_KERNEL);
- if (!names) {
- kfree(entries);
- return -ENOMEM;
- }
- for (i = 0; i < msix_num_entries; i++) {
- *(names + i) = kzalloc(ADF_MAX_MSIX_VECTOR_NAME, GFP_KERNEL);
- if (!(*(names + i)))
- goto err;
- }
accel_dev->accel_pci_dev.msix_entries.num_entries = msix_num_entries;
- accel_dev->accel_pci_dev.msix_entries.entries = entries;
- accel_dev->accel_pci_dev.msix_entries.names = names;
+ accel_dev->accel_pci_dev.msix_entries.irqs = irqs;
return 0;
-err:
- for (i = 0; i < msix_num_entries; i++)
- kfree(*(names + i));
- kfree(entries);
- kfree(names);
- return -ENOMEM;
}
-static void adf_isr_free_msix_entry_table(struct adf_accel_dev *accel_dev)
+static void adf_isr_free_msix_vectors_data(struct adf_accel_dev *accel_dev)
{
- char **names = accel_dev->accel_pci_dev.msix_entries.names;
- int i;
-
- kfree(accel_dev->accel_pci_dev.msix_entries.entries);
- for (i = 0; i < accel_dev->accel_pci_dev.msix_entries.num_entries; i++)
- kfree(*(names + i));
- kfree(names);
+ kfree(accel_dev->accel_pci_dev.msix_entries.irqs);
+ accel_dev->accel_pci_dev.msix_entries.irqs = NULL;
}
static int adf_setup_bh(struct adf_accel_dev *accel_dev)
@@ -287,7 +263,7 @@ void adf_isr_resource_free(struct adf_accel_dev *accel_dev)
adf_free_irqs(accel_dev);
adf_cleanup_bh(accel_dev);
adf_disable_msix(&accel_dev->accel_pci_dev);
- adf_isr_free_msix_entry_table(accel_dev);
+ adf_isr_free_msix_vectors_data(accel_dev);
}
EXPORT_SYMBOL_GPL(adf_isr_resource_free);
@@ -303,7 +279,7 @@ int adf_isr_resource_alloc(struct adf_accel_dev *accel_dev)
{
int ret;
- ret = adf_isr_alloc_msix_entry_table(accel_dev);
+ ret = adf_isr_alloc_msix_vectors_data(accel_dev);
if (ret)
goto err_out;
@@ -328,7 +304,7 @@ err_disable_msix:
adf_disable_msix(&accel_dev->accel_pci_dev);
err_free_msix_table:
- adf_isr_free_msix_entry_table(accel_dev);
+ adf_isr_free_msix_vectors_data(accel_dev);
err_out:
return ret;
diff --git a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c
index 976b9ab7617c..59860bdaedb6 100644
--- a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c
+++ b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.c
@@ -5,82 +5,51 @@
#include "adf_common_drv.h"
#include "adf_pf2vf_msg.h"
-#define ADF_DH895XCC_EP_OFFSET 0x3A000
-#define ADF_DH895XCC_ERRMSK3 (ADF_DH895XCC_EP_OFFSET + 0x1C)
-#define ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask) ((vf_mask & 0xFFFF) << 9)
-#define ADF_DH895XCC_ERRMSK5 (ADF_DH895XCC_EP_OFFSET + 0xDC)
-#define ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask) (vf_mask >> 16)
-
-static void __adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
- u32 vf_mask)
-{
- struct adf_hw_device_data *hw_data = accel_dev->hw_device;
- struct adf_bar *pmisc =
- &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
- void __iomem *pmisc_addr = pmisc->virt_addr;
- u32 reg;
-
- /* Enable VF2PF Messaging Ints - VFs 1 through 16 per vf_mask[15:0] */
- if (vf_mask & 0xFFFF) {
- reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK3);
- reg &= ~ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask);
- ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK3, reg);
- }
-
- /* Enable VF2PF Messaging Ints - VFs 17 through 32 per vf_mask[31:16] */
- if (vf_mask >> 16) {
- reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK5);
- reg &= ~ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask);
- ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK5, reg);
- }
-}
+#define ADF_PFVF_MSG_COLLISION_DETECT_DELAY 10
+#define ADF_PFVF_MSG_ACK_DELAY 2
+#define ADF_PFVF_MSG_ACK_MAX_RETRY 100
+#define ADF_PFVF_MSG_RETRY_DELAY 5
+#define ADF_PFVF_MSG_MAX_RETRIES 3
+#define ADF_PFVF_MSG_RESP_TIMEOUT (ADF_PFVF_MSG_ACK_DELAY * \
+ ADF_PFVF_MSG_ACK_MAX_RETRY + \
+ ADF_PFVF_MSG_COLLISION_DETECT_DELAY)
void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask)
{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ u32 misc_bar_id = hw_data->get_misc_bar_id(hw_data);
+ struct adf_bar *pmisc = &GET_BARS(accel_dev)[misc_bar_id];
+ void __iomem *pmisc_addr = pmisc->virt_addr;
unsigned long flags;
spin_lock_irqsave(&accel_dev->pf.vf2pf_ints_lock, flags);
- __adf_enable_vf2pf_interrupts(accel_dev, vf_mask);
+ hw_data->enable_vf2pf_interrupts(pmisc_addr, vf_mask);
spin_unlock_irqrestore(&accel_dev->pf.vf2pf_ints_lock, flags);
}
-static void __adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev,
- u32 vf_mask)
+void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask)
{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
- struct adf_bar *pmisc =
- &GET_BARS(accel_dev)[hw_data->get_misc_bar_id(hw_data)];
+ u32 misc_bar_id = hw_data->get_misc_bar_id(hw_data);
+ struct adf_bar *pmisc = &GET_BARS(accel_dev)[misc_bar_id];
void __iomem *pmisc_addr = pmisc->virt_addr;
- u32 reg;
-
- /* Disable VF2PF interrupts for VFs 1 through 16 per vf_mask[15:0] */
- if (vf_mask & 0xFFFF) {
- reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK3) |
- ADF_DH895XCC_ERRMSK3_VF2PF_L_MASK(vf_mask);
- ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK3, reg);
- }
-
- /* Disable VF2PF interrupts for VFs 17 through 32 per vf_mask[31:16] */
- if (vf_mask >> 16) {
- reg = ADF_CSR_RD(pmisc_addr, ADF_DH895XCC_ERRMSK5) |
- ADF_DH895XCC_ERRMSK5_VF2PF_U_MASK(vf_mask);
- ADF_CSR_WR(pmisc_addr, ADF_DH895XCC_ERRMSK5, reg);
- }
-}
-
-void adf_disable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask)
-{
unsigned long flags;
spin_lock_irqsave(&accel_dev->pf.vf2pf_ints_lock, flags);
- __adf_disable_vf2pf_interrupts(accel_dev, vf_mask);
+ hw_data->disable_vf2pf_interrupts(pmisc_addr, vf_mask);
spin_unlock_irqrestore(&accel_dev->pf.vf2pf_ints_lock, flags);
}
-void adf_disable_vf2pf_interrupts_irq(struct adf_accel_dev *accel_dev, u32 vf_mask)
+void adf_disable_vf2pf_interrupts_irq(struct adf_accel_dev *accel_dev,
+ u32 vf_mask)
{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ u32 misc_bar_id = hw_data->get_misc_bar_id(hw_data);
+ struct adf_bar *pmisc = &GET_BARS(accel_dev)[misc_bar_id];
+ void __iomem *pmisc_addr = pmisc->virt_addr;
+
spin_lock(&accel_dev->pf.vf2pf_ints_lock);
- __adf_disable_vf2pf_interrupts(accel_dev, vf_mask);
+ hw_data->disable_vf2pf_interrupts(pmisc_addr, vf_mask);
spin_unlock(&accel_dev->pf.vf2pf_ints_lock);
}
@@ -117,44 +86,33 @@ static int __adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
mutex_lock(lock);
- /* Check if PF2VF CSR is in use by remote function */
+ /* Check if the PFVF CSR is in use by remote function */
val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset);
if ((val & remote_in_use_mask) == remote_in_use_pattern) {
dev_dbg(&GET_DEV(accel_dev),
- "PF2VF CSR in use by remote function\n");
+ "PFVF CSR in use by remote function\n");
ret = -EBUSY;
goto out;
}
- /* Attempt to get ownership of PF2VF CSR */
msg &= ~local_in_use_mask;
msg |= local_in_use_pattern;
- ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, msg);
-
- /* Wait in case remote func also attempting to get ownership */
- msleep(ADF_IOV_MSG_COLLISION_DETECT_DELAY);
-
- val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset);
- if ((val & local_in_use_mask) != local_in_use_pattern) {
- dev_dbg(&GET_DEV(accel_dev),
- "PF2VF CSR in use by remote - collision detected\n");
- ret = -EBUSY;
- goto out;
- }
- /*
- * This function now owns the PV2VF CSR. The IN_USE_BY pattern must
- * remain in the PF2VF CSR for all writes including ACK from remote
- * until this local function relinquishes the CSR. Send the message
- * by interrupting the remote.
- */
+ /* Attempt to get ownership of the PFVF CSR */
ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, msg | int_bit);
/* Wait for confirmation from remote func it received the message */
do {
- msleep(ADF_IOV_MSG_ACK_DELAY);
+ msleep(ADF_PFVF_MSG_ACK_DELAY);
val = ADF_CSR_RD(pmisc_bar_addr, pf2vf_offset);
- } while ((val & int_bit) && (count++ < ADF_IOV_MSG_ACK_MAX_RETRY));
+ } while ((val & int_bit) && (count++ < ADF_PFVF_MSG_ACK_MAX_RETRY));
+
+ if (val != msg) {
+ dev_dbg(&GET_DEV(accel_dev),
+ "Collision - PFVF CSR overwritten by remote function\n");
+ ret = -EIO;
+ goto out;
+ }
if (val & int_bit) {
dev_dbg(&GET_DEV(accel_dev), "ACK not received from remote\n");
@@ -162,7 +120,7 @@ static int __adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
ret = -EIO;
}
- /* Finished with PF2VF CSR; relinquish it and leave msg in CSR */
+ /* Finished with the PFVF CSR; relinquish it and leave msg in CSR */
ADF_CSR_WR(pmisc_bar_addr, pf2vf_offset, val & ~local_in_use_mask);
out:
mutex_unlock(lock);
@@ -170,16 +128,17 @@ out:
}
/**
- * adf_iov_putmsg() - send PF2VF message
+ * adf_iov_putmsg() - send PFVF message
* @accel_dev: Pointer to acceleration device.
* @msg: Message to send
- * @vf_nr: VF number to which the message will be sent
+ * @vf_nr: VF number to which the message will be sent if on PF, ignored
+ * otherwise
*
- * Function sends a message from the PF to a VF
+ * Function sends a message through the PFVF channel
*
* Return: 0 on success, error code otherwise.
*/
-int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
+static int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
{
u32 count = 0;
int ret;
@@ -187,12 +146,77 @@ int adf_iov_putmsg(struct adf_accel_dev *accel_dev, u32 msg, u8 vf_nr)
do {
ret = __adf_iov_putmsg(accel_dev, msg, vf_nr);
if (ret)
- msleep(ADF_IOV_MSG_RETRY_DELAY);
- } while (ret && (count++ < ADF_IOV_MSG_MAX_RETRIES));
+ msleep(ADF_PFVF_MSG_RETRY_DELAY);
+ } while (ret && (count++ < ADF_PFVF_MSG_MAX_RETRIES));
return ret;
}
+/**
+ * adf_send_pf2vf_msg() - send PF to VF message
+ * @accel_dev: Pointer to acceleration device
+ * @vf_nr: VF number to which the message will be sent
+ * @msg: Message to send
+ *
+ * This function allows the PF to send a message to a specific VF.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+static int adf_send_pf2vf_msg(struct adf_accel_dev *accel_dev, u8 vf_nr, u32 msg)
+{
+ return adf_iov_putmsg(accel_dev, msg, vf_nr);
+}
+
+/**
+ * adf_send_vf2pf_msg() - send VF to PF message
+ * @accel_dev: Pointer to acceleration device
+ * @msg: Message to send
+ *
+ * This function allows the VF to send a message to the PF.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_send_vf2pf_msg(struct adf_accel_dev *accel_dev, u32 msg)
+{
+ return adf_iov_putmsg(accel_dev, msg, 0);
+}
+
+/**
+ * adf_send_vf2pf_req() - send VF2PF request message
+ * @accel_dev: Pointer to acceleration device.
+ * @msg: Request message to send
+ *
+ * This function sends a message that requires a response from the VF to the PF
+ * and waits for a reply.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+static int adf_send_vf2pf_req(struct adf_accel_dev *accel_dev, u32 msg)
+{
+ unsigned long timeout = msecs_to_jiffies(ADF_PFVF_MSG_RESP_TIMEOUT);
+ int ret;
+
+ reinit_completion(&accel_dev->vf.iov_msg_completion);
+
+ /* Send request from VF to PF */
+ ret = adf_send_vf2pf_msg(accel_dev, msg);
+ if (ret) {
+ dev_err(&GET_DEV(accel_dev),
+ "Failed to send request msg to PF\n");
+ return ret;
+ }
+
+ /* Wait for response */
+ if (!wait_for_completion_timeout(&accel_dev->vf.iov_msg_completion,
+ timeout)) {
+ dev_err(&GET_DEV(accel_dev),
+ "PFVF request/response message timeout expired\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info)
{
struct adf_accel_dev *accel_dev = vf_info->accel_dev;
@@ -204,6 +228,11 @@ void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info)
/* Read message from the VF */
msg = ADF_CSR_RD(pmisc_addr, hw_data->get_pf2vf_offset(vf_nr));
+ if (!(msg & ADF_VF2PF_INT)) {
+ dev_info(&GET_DEV(accel_dev),
+ "Spurious VF2PF interrupt, msg %X. Ignored\n", msg);
+ goto out;
+ }
/* To ACK, clear the VF2PFINT bit */
msg &= ~ADF_VF2PF_INT;
@@ -284,9 +313,10 @@ void adf_vf2pf_req_hndl(struct adf_accel_vf_info *vf_info)
goto err;
}
- if (resp && adf_iov_putmsg(accel_dev, resp, vf_nr))
+ if (resp && adf_send_pf2vf_msg(accel_dev, vf_nr, resp))
dev_err(&GET_DEV(accel_dev), "Failed to send response to VF\n");
+out:
/* re-enable interrupt on PF from this VF */
adf_enable_vf2pf_interrupts(accel_dev, (1 << vf_nr));
@@ -304,7 +334,7 @@ void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev)
int i, num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev));
for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) {
- if (vf->init && adf_iov_putmsg(accel_dev, msg, i))
+ if (vf->init && adf_send_pf2vf_msg(accel_dev, i, msg))
dev_err(&GET_DEV(accel_dev),
"Failed to send restarting msg to VF%d\n", i);
}
@@ -312,7 +342,6 @@ void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev)
static int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev)
{
- unsigned long timeout = msecs_to_jiffies(ADF_IOV_MSG_RESP_TIMEOUT);
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
u32 msg = 0;
int ret;
@@ -322,24 +351,13 @@ static int adf_vf2pf_request_version(struct adf_accel_dev *accel_dev)
msg |= ADF_PFVF_COMPAT_THIS_VERSION << ADF_VF2PF_COMPAT_VER_REQ_SHIFT;
BUILD_BUG_ON(ADF_PFVF_COMPAT_THIS_VERSION > 255);
- reinit_completion(&accel_dev->vf.iov_msg_completion);
-
- /* Send request from VF to PF */
- ret = adf_iov_putmsg(accel_dev, msg, 0);
+ ret = adf_send_vf2pf_req(accel_dev, msg);
if (ret) {
dev_err(&GET_DEV(accel_dev),
"Failed to send Compatibility Version Request.\n");
return ret;
}
- /* Wait for response */
- if (!wait_for_completion_timeout(&accel_dev->vf.iov_msg_completion,
- timeout)) {
- dev_err(&GET_DEV(accel_dev),
- "IOV request/response message timeout expired\n");
- return -EIO;
- }
-
/* Response from PF received, check compatibility */
switch (accel_dev->vf.compatible) {
case ADF_PF2VF_VF_COMPATIBLE:
@@ -378,3 +396,21 @@ int adf_enable_vf2pf_comms(struct adf_accel_dev *accel_dev)
return adf_vf2pf_request_version(accel_dev);
}
EXPORT_SYMBOL_GPL(adf_enable_vf2pf_comms);
+
+/**
+ * adf_enable_pf2vf_comms() - Function enables communication from pf to vf
+ *
+ * @accel_dev: Pointer to acceleration device virtual function.
+ *
+ * This function carries out the necessary steps to setup and start the PFVF
+ * communication channel, if any.
+ *
+ * Return: 0 on success, error code otherwise.
+ */
+int adf_enable_pf2vf_comms(struct adf_accel_dev *accel_dev)
+{
+ spin_lock_init(&accel_dev->pf.vf2pf_ints_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adf_enable_pf2vf_comms);
diff --git a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.h b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.h
index ffd43aa50b57..a7d8f8367345 100644
--- a/drivers/crypto/qat/qat_common/adf_pf2vf_msg.h
+++ b/drivers/crypto/qat/qat_common/adf_pf2vf_msg.h
@@ -90,13 +90,4 @@
/* VF->PF Compatible Version Request */
#define ADF_VF2PF_COMPAT_VER_REQ_SHIFT 22
-/* Collision detection */
-#define ADF_IOV_MSG_COLLISION_DETECT_DELAY 10
-#define ADF_IOV_MSG_ACK_DELAY 2
-#define ADF_IOV_MSG_ACK_MAX_RETRY 100
-#define ADF_IOV_MSG_RETRY_DELAY 5
-#define ADF_IOV_MSG_MAX_RETRIES 3
-#define ADF_IOV_MSG_RESP_TIMEOUT (ADF_IOV_MSG_ACK_DELAY * \
- ADF_IOV_MSG_ACK_MAX_RETRY + \
- ADF_IOV_MSG_COLLISION_DETECT_DELAY)
#endif /* ADF_IOV_MSG_H */
diff --git a/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c b/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c
index 3e25fac051b2..8d11bb24cea0 100644
--- a/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c
+++ b/drivers/crypto/qat/qat_common/adf_vf2pf_msg.c
@@ -17,7 +17,7 @@ int adf_vf2pf_notify_init(struct adf_accel_dev *accel_dev)
u32 msg = (ADF_VF2PF_MSGORIGIN_SYSTEM |
(ADF_VF2PF_MSGTYPE_INIT << ADF_VF2PF_MSGTYPE_SHIFT));
- if (adf_iov_putmsg(accel_dev, msg, 0)) {
+ if (adf_send_vf2pf_msg(accel_dev, msg)) {
dev_err(&GET_DEV(accel_dev),
"Failed to send Init event to PF\n");
return -EFAULT;
@@ -41,7 +41,7 @@ void adf_vf2pf_notify_shutdown(struct adf_accel_dev *accel_dev)
(ADF_VF2PF_MSGTYPE_SHUTDOWN << ADF_VF2PF_MSGTYPE_SHIFT));
if (test_bit(ADF_STATUS_PF_RUNNING, &accel_dev->status))
- if (adf_iov_putmsg(accel_dev, msg, 0))
+ if (adf_send_vf2pf_msg(accel_dev, msg))
dev_err(&GET_DEV(accel_dev),
"Failed to send Shutdown event to PF\n");
}
diff --git a/drivers/crypto/qat/qat_common/adf_vf_isr.c b/drivers/crypto/qat/qat_common/adf_vf_isr.c
index 7828a6573f3e..db5e7abbe5f3 100644
--- a/drivers/crypto/qat/qat_common/adf_vf_isr.c
+++ b/drivers/crypto/qat/qat_common/adf_vf_isr.c
@@ -53,27 +53,22 @@ EXPORT_SYMBOL_GPL(adf_disable_pf2vf_interrupts);
static int adf_enable_msi(struct adf_accel_dev *accel_dev)
{
struct adf_accel_pci *pci_dev_info = &accel_dev->accel_pci_dev;
- int stat = pci_enable_msi(pci_dev_info->pci_dev);
-
- if (stat) {
+ int stat = pci_alloc_irq_vectors(pci_dev_info->pci_dev, 1, 1,
+ PCI_IRQ_MSI);
+ if (unlikely(stat < 0)) {
dev_err(&GET_DEV(accel_dev),
- "Failed to enable MSI interrupts\n");
+ "Failed to enable MSI interrupt: %d\n", stat);
return stat;
}
- accel_dev->vf.irq_name = kzalloc(ADF_MAX_MSIX_VECTOR_NAME, GFP_KERNEL);
- if (!accel_dev->vf.irq_name)
- return -ENOMEM;
-
- return stat;
+ return 0;
}
static void adf_disable_msi(struct adf_accel_dev *accel_dev)
{
struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
- kfree(accel_dev->vf.irq_name);
- pci_disable_msi(pdev);
+ pci_free_irq_vectors(pdev);
}
static void adf_dev_stop_async(struct work_struct *work)
@@ -101,6 +96,11 @@ static void adf_pf2vf_bh_handler(void *data)
/* Read the message from PF */
msg = ADF_CSR_RD(pmisc_bar_addr, hw_data->get_pf2vf_offset(0));
+ if (!(msg & ADF_PF2VF_INT)) {
+ dev_info(&GET_DEV(accel_dev),
+ "Spurious PF2VF interrupt, msg %X. Ignored\n", msg);
+ goto out;
+ }
if (!(msg & ADF_PF2VF_MSGORIGIN_SYSTEM))
/* Ignore legacy non-system (non-kernel) PF2VF messages */
@@ -149,6 +149,7 @@ static void adf_pf2vf_bh_handler(void *data)
msg &= ~ADF_PF2VF_INT;
ADF_CSR_WR(pmisc_bar_addr, hw_data->get_pf2vf_offset(0), msg);
+out:
/* Re-enable PF2VF interrupts */
adf_enable_pf2vf_interrupts(accel_dev);
return;
@@ -240,6 +241,7 @@ static int adf_request_msi_irq(struct adf_accel_dev *accel_dev)
}
cpu = accel_dev->accel_id % num_online_cpus();
irq_set_affinity_hint(pdev->irq, get_cpu_mask(cpu));
+ accel_dev->vf.irq_enabled = true;
return ret;
}
@@ -271,8 +273,10 @@ void adf_vf_isr_resource_free(struct adf_accel_dev *accel_dev)
{
struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
- irq_set_affinity_hint(pdev->irq, NULL);
- free_irq(pdev->irq, (void *)accel_dev);
+ if (accel_dev->vf.irq_enabled) {
+ irq_set_affinity_hint(pdev->irq, NULL);
+ free_irq(pdev->irq, accel_dev);
+ }
adf_cleanup_bh(accel_dev);
adf_cleanup_pf2vf_bh(accel_dev);
adf_disable_msi(accel_dev);
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
index 0a9ce365a544..8e2e1554dcf6 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
@@ -35,34 +35,6 @@ static u32 get_ae_mask(struct adf_hw_device_data *self)
return ~fuses & ADF_DH895XCC_ACCELENGINES_MASK;
}
-static u32 get_num_accels(struct adf_hw_device_data *self)
-{
- u32 i, ctr = 0;
-
- if (!self || !self->accel_mask)
- return 0;
-
- for (i = 0; i < ADF_DH895XCC_MAX_ACCELERATORS; i++) {
- if (self->accel_mask & (1 << i))
- ctr++;
- }
- return ctr;
-}
-
-static u32 get_num_aes(struct adf_hw_device_data *self)
-{
- u32 i, ctr = 0;
-
- if (!self || !self->ae_mask)
- return 0;
-
- for (i = 0; i < ADF_DH895XCC_MAX_ACCELENGINES; i++) {
- if (self->ae_mask & (1 << i))
- ctr++;
- }
- return ctr;
-}
-
static u32 get_misc_bar_id(struct adf_hw_device_data *self)
{
return ADF_DH895XCC_PMISC_BAR;
@@ -126,41 +98,6 @@ static const u32 *adf_get_arbiter_mapping(void)
return thrd_to_arb_map;
}
-static u32 get_pf2vf_offset(u32 i)
-{
- return ADF_DH895XCC_PF2VF_OFFSET(i);
-}
-
-static void adf_enable_error_correction(struct adf_accel_dev *accel_dev)
-{
- struct adf_hw_device_data *hw_device = accel_dev->hw_device;
- struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_DH895XCC_PMISC_BAR];
- unsigned long accel_mask = hw_device->accel_mask;
- unsigned long ae_mask = hw_device->ae_mask;
- void __iomem *csr = misc_bar->virt_addr;
- unsigned int val, i;
-
- /* Enable Accel Engine error detection & correction */
- for_each_set_bit(i, &ae_mask, GET_MAX_ACCELENGINES(accel_dev)) {
- val = ADF_CSR_RD(csr, ADF_DH895XCC_AE_CTX_ENABLES(i));
- val |= ADF_DH895XCC_ENABLE_AE_ECC_ERR;
- ADF_CSR_WR(csr, ADF_DH895XCC_AE_CTX_ENABLES(i), val);
- val = ADF_CSR_RD(csr, ADF_DH895XCC_AE_MISC_CONTROL(i));
- val |= ADF_DH895XCC_ENABLE_AE_ECC_PARITY_CORR;
- ADF_CSR_WR(csr, ADF_DH895XCC_AE_MISC_CONTROL(i), val);
- }
-
- /* Enable shared memory error detection & correction */
- for_each_set_bit(i, &accel_mask, ADF_DH895XCC_MAX_ACCELERATORS) {
- val = ADF_CSR_RD(csr, ADF_DH895XCC_UERRSSMSH(i));
- val |= ADF_DH895XCC_ERRSSMSH_EN;
- ADF_CSR_WR(csr, ADF_DH895XCC_UERRSSMSH(i), val);
- val = ADF_CSR_RD(csr, ADF_DH895XCC_CERRSSMSH(i));
- val |= ADF_DH895XCC_ERRSSMSH_EN;
- ADF_CSR_WR(csr, ADF_DH895XCC_CERRSSMSH(i), val);
- }
-}
-
static void adf_enable_ints(struct adf_accel_dev *accel_dev)
{
void __iomem *addr;
@@ -175,11 +112,50 @@ static void adf_enable_ints(struct adf_accel_dev *accel_dev)
ADF_DH895XCC_SMIA1_MASK);
}
-static int adf_enable_pf2vf_comms(struct adf_accel_dev *accel_dev)
+static u32 get_vf2pf_sources(void __iomem *pmisc_bar)
+{
+ u32 errsou5, errmsk5, vf_int_mask;
+
+ vf_int_mask = adf_gen2_get_vf2pf_sources(pmisc_bar);
+
+ /* Get the interrupt sources triggered by VFs, but to avoid duplicates
+ * in the work queue, clear vf_int_mask_sets bits that are already
+ * masked in ERRMSK register.
+ */
+ errsou5 = ADF_CSR_RD(pmisc_bar, ADF_GEN2_ERRSOU5);
+ errmsk5 = ADF_CSR_RD(pmisc_bar, ADF_GEN2_ERRMSK5);
+ vf_int_mask |= ADF_DH895XCC_ERR_REG_VF2PF_U(errsou5);
+ vf_int_mask &= ~ADF_DH895XCC_ERR_REG_VF2PF_U(errmsk5);
+
+ return vf_int_mask;
+}
+
+static void enable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask)
{
- spin_lock_init(&accel_dev->pf.vf2pf_ints_lock);
+ /* Enable VF2PF Messaging Ints - VFs 0 through 15 per vf_mask[15:0] */
+ adf_gen2_enable_vf2pf_interrupts(pmisc_addr, vf_mask);
- return 0;
+ /* Enable VF2PF Messaging Ints - VFs 16 through 31 per vf_mask[31:16] */
+ if (vf_mask >> 16) {
+ u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK5)
+ & ~ADF_DH895XCC_ERR_MSK_VF2PF_U(vf_mask);
+
+ ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, val);
+ }
+}
+
+static void disable_vf2pf_interrupts(void __iomem *pmisc_addr, u32 vf_mask)
+{
+ /* Disable VF2PF interrupts for VFs 0 through 15 per vf_mask[15:0] */
+ adf_gen2_disable_vf2pf_interrupts(pmisc_addr, vf_mask);
+
+ /* Disable VF2PF interrupts for VFs 16 through 31 per vf_mask[31:16] */
+ if (vf_mask >> 16) {
+ u32 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_ERRMSK5)
+ | ADF_DH895XCC_ERR_MSK_VF2PF_U(vf_mask);
+
+ ADF_CSR_WR(pmisc_addr, ADF_GEN2_ERRMSK5, val);
+ }
}
static void configure_iov_threads(struct adf_accel_dev *accel_dev, bool enable)
@@ -198,16 +174,16 @@ void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
hw_data->num_accel = ADF_DH895XCC_MAX_ACCELERATORS;
hw_data->num_logical_accel = 1;
hw_data->num_engines = ADF_DH895XCC_MAX_ACCELENGINES;
- hw_data->tx_rx_gap = ADF_DH895XCC_RX_RINGS_OFFSET;
- hw_data->tx_rings_mask = ADF_DH895XCC_TX_RINGS_MASK;
+ hw_data->tx_rx_gap = ADF_GEN2_RX_RINGS_OFFSET;
+ hw_data->tx_rings_mask = ADF_GEN2_TX_RINGS_MASK;
hw_data->alloc_irq = adf_isr_resource_alloc;
hw_data->free_irq = adf_isr_resource_free;
- hw_data->enable_error_correction = adf_enable_error_correction;
+ hw_data->enable_error_correction = adf_gen2_enable_error_correction;
hw_data->get_accel_mask = get_accel_mask;
hw_data->get_ae_mask = get_ae_mask;
hw_data->get_accel_cap = get_accel_cap;
- hw_data->get_num_accels = get_num_accels;
- hw_data->get_num_aes = get_num_aes;
+ hw_data->get_num_accels = adf_gen2_get_num_accels;
+ hw_data->get_num_aes = adf_gen2_get_num_aes;
hw_data->get_etr_bar_id = get_etr_bar_id;
hw_data->get_misc_bar_id = get_misc_bar_id;
hw_data->get_admin_info = adf_gen2_get_admin_info;
@@ -225,7 +201,10 @@ void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
hw_data->get_arb_mapping = adf_get_arbiter_mapping;
hw_data->enable_ints = adf_enable_ints;
hw_data->reset_device = adf_reset_sbr;
- hw_data->get_pf2vf_offset = get_pf2vf_offset;
+ hw_data->get_pf2vf_offset = adf_gen2_get_pf2vf_offset;
+ hw_data->get_vf2pf_sources = get_vf2pf_sources;
+ hw_data->enable_vf2pf_interrupts = enable_vf2pf_interrupts;
+ hw_data->disable_vf2pf_interrupts = disable_vf2pf_interrupts;
hw_data->enable_pfvf_comms = adf_enable_pf2vf_comms;
hw_data->disable_iov = adf_disable_sriov;
hw_data->min_iov_compat_ver = ADF_PFVF_COMPAT_THIS_VERSION;
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
index f99319cd4543..0af34dd8708a 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.h
@@ -7,8 +7,6 @@
#define ADF_DH895XCC_SRAM_BAR 0
#define ADF_DH895XCC_PMISC_BAR 1
#define ADF_DH895XCC_ETR_BAR 2
-#define ADF_DH895XCC_RX_RINGS_OFFSET 8
-#define ADF_DH895XCC_TX_RINGS_MASK 0xFF
#define ADF_DH895XCC_FUSECTL_SKU_MASK 0x300000
#define ADF_DH895XCC_FUSECTL_SKU_SHIFT 20
#define ADF_DH895XCC_FUSECTL_SKU_1 0x0
@@ -25,16 +23,10 @@
#define ADF_DH895XCC_SMIAPF1_MASK_OFFSET (0x3A000 + 0x30)
#define ADF_DH895XCC_SMIA0_MASK 0xFFFFFFFF
#define ADF_DH895XCC_SMIA1_MASK 0x1
-/* Error detection and correction */
-#define ADF_DH895XCC_AE_CTX_ENABLES(i) (i * 0x1000 + 0x20818)
-#define ADF_DH895XCC_AE_MISC_CONTROL(i) (i * 0x1000 + 0x20960)
-#define ADF_DH895XCC_ENABLE_AE_ECC_ERR BIT(28)
-#define ADF_DH895XCC_ENABLE_AE_ECC_PARITY_CORR (BIT(24) | BIT(12))
-#define ADF_DH895XCC_UERRSSMSH(i) (i * 0x4000 + 0x18)
-#define ADF_DH895XCC_CERRSSMSH(i) (i * 0x4000 + 0x10)
-#define ADF_DH895XCC_ERRSSMSH_EN BIT(3)
-#define ADF_DH895XCC_PF2VF_OFFSET(i) (0x3A000 + 0x280 + ((i) * 0x04))
+/* Masks for VF2PF interrupts */
+#define ADF_DH895XCC_ERR_REG_VF2PF_U(vf_src) (((vf_src) & 0x0000FFFF) << 16)
+#define ADF_DH895XCC_ERR_MSK_VF2PF_U(vf_mask) ((vf_mask) >> 16)
/* AE to function mapping */
#define ADF_DH895XCC_AE2FUNC_MAP_GRP_A_NUM_REGS 96
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index 55aa3a71169b..7717e9e5977b 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -2171,6 +2171,8 @@ static int s5p_aes_probe(struct platform_device *pdev)
variant = find_s5p_sss_version(pdev);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
/*
* Note: HASH and PRNG uses the same registers in secss, avoid
diff --git a/drivers/crypto/sa2ul.c b/drivers/crypto/sa2ul.c
index 544d7040cfc5..bcbc38dc6ae8 100644
--- a/drivers/crypto/sa2ul.c
+++ b/drivers/crypto/sa2ul.c
@@ -2412,8 +2412,7 @@ static int sa_ul_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
ret = pm_runtime_resume_and_get(dev);
if (ret < 0) {
- dev_err(&pdev->dev, "%s: failed to get sync: %d\n", __func__,
- ret);
+ dev_err(dev, "%s: failed to get sync: %d\n", __func__, ret);
pm_runtime_disable(dev);
return ret;
}
@@ -2435,16 +2434,16 @@ static int sa_ul_probe(struct platform_device *pdev)
sa_register_algos(dev_data);
- ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
+ ret = of_platform_populate(node, NULL, NULL, dev);
if (ret)
goto release_dma;
- device_for_each_child(&pdev->dev, &pdev->dev, sa_link_child);
+ device_for_each_child(dev, dev, sa_link_child);
return 0;
release_dma:
- sa_unregister_algos(&pdev->dev);
+ sa_unregister_algos(dev);
dma_release_channel(dev_data->dma_rx2);
dma_release_channel(dev_data->dma_rx1);
@@ -2453,8 +2452,8 @@ release_dma:
destroy_dma_pool:
dma_pool_destroy(dev_data->sc_pool);
- pm_runtime_put_sync(&pdev->dev);
- pm_runtime_disable(&pdev->dev);
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
return ret;
}
diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h
index 53927f9fa77e..9db0c402c9ce 100644
--- a/drivers/cxl/cxl.h
+++ b/drivers/cxl/cxl.h
@@ -75,52 +75,27 @@ static inline int cxl_hdm_decoder_count(u32 cap_hdr)
#define CXLDEV_MBOX_BG_CMD_STATUS_OFFSET 0x18
#define CXLDEV_MBOX_PAYLOAD_OFFSET 0x20
-#define CXL_COMPONENT_REGS() \
- void __iomem *hdm_decoder
-
-#define CXL_DEVICE_REGS() \
- void __iomem *status; \
- void __iomem *mbox; \
- void __iomem *memdev
-
-/* See note for 'struct cxl_regs' for the rationale of this organization */
-/*
- * CXL_COMPONENT_REGS - Common set of CXL Component register block base pointers
- * @hdm_decoder: CXL 2.0 8.2.5.12 CXL HDM Decoder Capability Structure
- */
-struct cxl_component_regs {
- CXL_COMPONENT_REGS();
-};
-
-/* See note for 'struct cxl_regs' for the rationale of this organization */
-/*
- * CXL_DEVICE_REGS - Common set of CXL Device register block base pointers
- * @status: CXL 2.0 8.2.8.3 Device Status Registers
- * @mbox: CXL 2.0 8.2.8.4 Mailbox Registers
- * @memdev: CXL 2.0 8.2.8.5 Memory Device Registers
- */
-struct cxl_device_regs {
- CXL_DEVICE_REGS();
-};
-
/*
- * Note, the anonymous union organization allows for per
- * register-block-type helper routines, without requiring block-type
- * agnostic code to include the prefix.
+ * Using struct_group() allows for per register-block-type helper routines,
+ * without requiring block-type agnostic code to include the prefix.
*/
struct cxl_regs {
- union {
- struct {
- CXL_COMPONENT_REGS();
- };
- struct cxl_component_regs component;
- };
- union {
- struct {
- CXL_DEVICE_REGS();
- };
- struct cxl_device_regs device_regs;
- };
+ /*
+ * Common set of CXL Component register block base pointers
+ * @hdm_decoder: CXL 2.0 8.2.5.12 CXL HDM Decoder Capability Structure
+ */
+ struct_group_tagged(cxl_component_regs, component,
+ void __iomem *hdm_decoder;
+ );
+ /*
+ * Common set of CXL Device register block base pointers
+ * @status: CXL 2.0 8.2.8.3 Device Status Registers
+ * @mbox: CXL 2.0 8.2.8.4 Mailbox Registers
+ * @memdev: CXL 2.0 8.2.8.5 Memory Device Registers
+ */
+ struct_group_tagged(cxl_device_regs, device_regs,
+ void __iomem *status, *mbox, *memdev;
+ );
};
struct cxl_reg_map {
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 85faa7a5c7d1..06333d430382 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -827,7 +827,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
goto err_dev;
}
- if (!devfreq->profile->max_state && !devfreq->profile->freq_table) {
+ if (!devfreq->profile->max_state || !devfreq->profile->freq_table) {
mutex_unlock(&devfreq->lock);
err = set_freq_table(devfreq);
if (err < 0)
diff --git a/drivers/devfreq/event/exynos-ppmu.c b/drivers/devfreq/event/exynos-ppmu.c
index 17ed980d9099..9b849d781116 100644
--- a/drivers/devfreq/event/exynos-ppmu.c
+++ b/drivers/devfreq/event/exynos-ppmu.c
@@ -94,11 +94,16 @@ static struct __exynos_ppmu_events {
PPMU_EVENT(d1-general),
PPMU_EVENT(d1-rt),
- /* For Exynos5422 SoC */
+ /* For Exynos5422 SoC, deprecated (backwards compatible) */
PPMU_EVENT(dmc0_0),
PPMU_EVENT(dmc0_1),
PPMU_EVENT(dmc1_0),
PPMU_EVENT(dmc1_1),
+ /* For Exynos5422 SoC */
+ PPMU_EVENT(dmc0-0),
+ PPMU_EVENT(dmc0-1),
+ PPMU_EVENT(dmc1-0),
+ PPMU_EVENT(dmc1-1),
};
static int __exynos_ppmu_find_ppmu_id(const char *edev_name)
@@ -561,13 +566,10 @@ static int of_get_devfreq_events(struct device_node *np,
* use default if not.
*/
if (info->ppmu_type == EXYNOS_TYPE_PPMU_V2) {
- int id;
/* Not all registers take the same value for
* read+write data count.
*/
- id = __exynos_ppmu_find_ppmu_id(desc[j].name);
-
- switch (id) {
+ switch (ppmu_events[i].id) {
case PPMU_PMNCNT0:
case PPMU_PMNCNT1:
case PPMU_PMNCNT2:
diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile
index 40d81f23cacf..1ef021273a06 100644
--- a/drivers/dma-buf/Makefile
+++ b/drivers/dma-buf/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-y := dma-buf.o dma-fence.o dma-fence-array.o dma-fence-chain.o \
- dma-resv.o seqno-fence.o
+ dma-resv.o
obj-$(CONFIG_DMABUF_HEAPS) += dma-heap.o
obj-$(CONFIG_DMABUF_HEAPS) += heaps/
obj-$(CONFIG_SYNC_FILE) += sync_file.o
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 6c2b5ea828a6..2ea59cb8edcd 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -74,7 +74,7 @@ static void dma_buf_release(struct dentry *dentry)
* If you hit this BUG() it means someone dropped their ref to the
* dma-buf while still having pending operation to the buffer.
*/
- BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active);
+ BUG_ON(dmabuf->cb_in.active || dmabuf->cb_out.active);
dma_buf_stats_teardown(dmabuf);
dmabuf->ops->release(dmabuf);
@@ -82,6 +82,7 @@ static void dma_buf_release(struct dentry *dentry)
if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])
dma_resv_fini(dmabuf->resv);
+ WARN_ON(!list_empty(&dmabuf->attachments));
module_put(dmabuf->owner);
kfree(dmabuf->name);
kfree(dmabuf);
@@ -205,16 +206,55 @@ static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
wake_up_locked_poll(dcb->poll, dcb->active);
dcb->active = 0;
spin_unlock_irqrestore(&dcb->poll->lock, flags);
+ dma_fence_put(fence);
+}
+
+static bool dma_buf_poll_shared(struct dma_resv *resv,
+ struct dma_buf_poll_cb_t *dcb)
+{
+ struct dma_resv_list *fobj = dma_resv_shared_list(resv);
+ struct dma_fence *fence;
+ int i, r;
+
+ if (!fobj)
+ return false;
+
+ for (i = 0; i < fobj->shared_count; ++i) {
+ fence = rcu_dereference_protected(fobj->shared[i],
+ dma_resv_held(resv));
+ dma_fence_get(fence);
+ r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb);
+ if (!r)
+ return true;
+ dma_fence_put(fence);
+ }
+
+ return false;
+}
+
+static bool dma_buf_poll_excl(struct dma_resv *resv,
+ struct dma_buf_poll_cb_t *dcb)
+{
+ struct dma_fence *fence = dma_resv_excl_fence(resv);
+ int r;
+
+ if (!fence)
+ return false;
+
+ dma_fence_get(fence);
+ r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb);
+ if (!r)
+ return true;
+ dma_fence_put(fence);
+
+ return false;
}
static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
{
struct dma_buf *dmabuf;
struct dma_resv *resv;
- struct dma_resv_list *fobj;
- struct dma_fence *fence_excl;
__poll_t events;
- unsigned shared_count, seq;
dmabuf = file->private_data;
if (!dmabuf || !dmabuf->resv)
@@ -228,101 +268,50 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
if (!events)
return 0;
-retry:
- seq = read_seqcount_begin(&resv->seq);
- rcu_read_lock();
-
- fobj = rcu_dereference(resv->fence);
- if (fobj)
- shared_count = fobj->shared_count;
- else
- shared_count = 0;
- fence_excl = dma_resv_excl_fence(resv);
- if (read_seqcount_retry(&resv->seq, seq)) {
- rcu_read_unlock();
- goto retry;
- }
-
- if (fence_excl && (!(events & EPOLLOUT) || shared_count == 0)) {
- struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
- __poll_t pevents = EPOLLIN;
+ dma_resv_lock(resv, NULL);
- if (shared_count == 0)
- pevents |= EPOLLOUT;
+ if (events & EPOLLOUT) {
+ struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_out;
+ /* Check that callback isn't busy */
spin_lock_irq(&dmabuf->poll.lock);
- if (dcb->active) {
- dcb->active |= pevents;
- events &= ~pevents;
- } else
- dcb->active = pevents;
+ if (dcb->active)
+ events &= ~EPOLLOUT;
+ else
+ dcb->active = EPOLLOUT;
spin_unlock_irq(&dmabuf->poll.lock);
- if (events & pevents) {
- if (!dma_fence_get_rcu(fence_excl)) {
- /* force a recheck */
- events &= ~pevents;
- dma_buf_poll_cb(NULL, &dcb->cb);
- } else if (!dma_fence_add_callback(fence_excl, &dcb->cb,
- dma_buf_poll_cb)) {
- events &= ~pevents;
- dma_fence_put(fence_excl);
- } else {
- /*
- * No callback queued, wake up any additional
- * waiters.
- */
- dma_fence_put(fence_excl);
+ if (events & EPOLLOUT) {
+ if (!dma_buf_poll_shared(resv, dcb) &&
+ !dma_buf_poll_excl(resv, dcb))
+ /* No callback queued, wake up any other waiters */
dma_buf_poll_cb(NULL, &dcb->cb);
- }
+ else
+ events &= ~EPOLLOUT;
}
}
- if ((events & EPOLLOUT) && shared_count > 0) {
- struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_shared;
- int i;
+ if (events & EPOLLIN) {
+ struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_in;
- /* Only queue a new callback if no event has fired yet */
+ /* Check that callback isn't busy */
spin_lock_irq(&dmabuf->poll.lock);
if (dcb->active)
- events &= ~EPOLLOUT;
+ events &= ~EPOLLIN;
else
- dcb->active = EPOLLOUT;
+ dcb->active = EPOLLIN;
spin_unlock_irq(&dmabuf->poll.lock);
- if (!(events & EPOLLOUT))
- goto out;
-
- for (i = 0; i < shared_count; ++i) {
- struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
-
- if (!dma_fence_get_rcu(fence)) {
- /*
- * fence refcount dropped to zero, this means
- * that fobj has been freed
- *
- * call dma_buf_poll_cb and force a recheck!
- */
- events &= ~EPOLLOUT;
+ if (events & EPOLLIN) {
+ if (!dma_buf_poll_excl(resv, dcb))
+ /* No callback queued, wake up any other waiters */
dma_buf_poll_cb(NULL, &dcb->cb);
- break;
- }
- if (!dma_fence_add_callback(fence, &dcb->cb,
- dma_buf_poll_cb)) {
- dma_fence_put(fence);
- events &= ~EPOLLOUT;
- break;
- }
- dma_fence_put(fence);
+ else
+ events &= ~EPOLLIN;
}
-
- /* No callback queued, wake up any additional waiters. */
- if (i == shared_count)
- dma_buf_poll_cb(NULL, &dcb->cb);
}
-out:
- rcu_read_unlock();
+ dma_resv_unlock(resv);
return events;
}
@@ -565,8 +554,8 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
dmabuf->owner = exp_info->owner;
spin_lock_init(&dmabuf->name_lock);
init_waitqueue_head(&dmabuf->poll);
- dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll;
- dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
+ dmabuf->cb_in.poll = dmabuf->cb_out.poll = &dmabuf->poll;
+ dmabuf->cb_in.active = dmabuf->cb_out.active = 0;
if (!resv) {
resv = (struct dma_resv *)&dmabuf[1];
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
index ce0f5eff575d..1e82ecd443fa 100644
--- a/drivers/dma-buf/dma-fence.c
+++ b/drivers/dma-buf/dma-fence.c
@@ -616,20 +616,17 @@ EXPORT_SYMBOL(dma_fence_enable_sw_signaling);
* @cb: the callback to register
* @func: the function to call
*
+ * Add a software callback to the fence. The caller should keep a reference to
+ * the fence.
+ *
* @cb will be initialized by dma_fence_add_callback(), no initialization
* by the caller is required. Any number of callbacks can be registered
* to a fence, but a callback can only be registered to one fence at a time.
*
- * Note that the callback can be called from an atomic context. If
- * fence is already signaled, this function will return -ENOENT (and
+ * If fence is already signaled, this function will return -ENOENT (and
* *not* call the callback).
*
- * Add a software callback to the fence. Same restrictions apply to
- * refcount as it does to dma_fence_wait(), however the caller doesn't need to
- * keep a refcount to fence afterward dma_fence_add_callback() has returned:
- * when software access is enabled, the creator of the fence is required to keep
- * the fence alive until after it signals with dma_fence_signal(). The callback
- * itself can be called from irq context.
+ * Note that the callback can be called from an atomic context or irq context.
*
* Returns 0 in case of success, -ENOENT if the fence is already signaled
* and -EINVAL in case of error.
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
index e744fd87c63c..a480af9581bd 100644
--- a/drivers/dma-buf/dma-resv.c
+++ b/drivers/dma-buf/dma-resv.c
@@ -48,6 +48,8 @@
* write operations) or N shared fences (read operations). The RCU
* mechanism is used to protect read access to fences from locked
* write-side updates.
+ *
+ * See struct dma_resv for more details.
*/
DEFINE_WD_CLASS(reservation_ww_class);
@@ -137,7 +139,11 @@ EXPORT_SYMBOL(dma_resv_fini);
* @num_fences: number of fences we want to add
*
* Should be called before dma_resv_add_shared_fence(). Must
- * be called with obj->lock held.
+ * be called with @obj locked through dma_resv_lock().
+ *
+ * Note that the preallocated slots need to be re-reserved if @obj is unlocked
+ * at any time before calling dma_resv_add_shared_fence(). This is validated
+ * when CONFIG_DEBUG_MUTEXES is enabled.
*
* RETURNS
* Zero for success, or -errno
@@ -234,8 +240,10 @@ EXPORT_SYMBOL(dma_resv_reset_shared_max);
* @obj: the reservation object
* @fence: the shared fence to add
*
- * Add a fence to a shared slot, obj->lock must be held, and
+ * Add a fence to a shared slot, @obj must be locked with dma_resv_lock(), and
* dma_resv_reserve_shared() has been called.
+ *
+ * See also &dma_resv.fence for a discussion of the semantics.
*/
void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
{
@@ -278,9 +286,11 @@ EXPORT_SYMBOL(dma_resv_add_shared_fence);
/**
* dma_resv_add_excl_fence - Add an exclusive fence.
* @obj: the reservation object
- * @fence: the shared fence to add
+ * @fence: the exclusive fence to add
*
- * Add a fence to the exclusive slot. The obj->lock must be held.
+ * Add a fence to the exclusive slot. @obj must be locked with dma_resv_lock().
+ * Note that this function replaces all fences attached to @obj, see also
+ * &dma_resv.fence_excl for a discussion of the semantics.
*/
void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
{
@@ -314,6 +324,106 @@ void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
EXPORT_SYMBOL(dma_resv_add_excl_fence);
/**
+ * dma_resv_iter_restart_unlocked - restart the unlocked iterator
+ * @cursor: The dma_resv_iter object to restart
+ *
+ * Restart the unlocked iteration by initializing the cursor object.
+ */
+static void dma_resv_iter_restart_unlocked(struct dma_resv_iter *cursor)
+{
+ cursor->seq = read_seqcount_begin(&cursor->obj->seq);
+ cursor->index = -1;
+ if (cursor->all_fences)
+ cursor->fences = dma_resv_shared_list(cursor->obj);
+ else
+ cursor->fences = NULL;
+ cursor->is_restarted = true;
+}
+
+/**
+ * dma_resv_iter_walk_unlocked - walk over fences in a dma_resv obj
+ * @cursor: cursor to record the current position
+ *
+ * Return all the fences in the dma_resv object which are not yet signaled.
+ * The returned fence has an extra local reference so will stay alive.
+ * If a concurrent modify is detected the whole iteration is started over again.
+ */
+static void dma_resv_iter_walk_unlocked(struct dma_resv_iter *cursor)
+{
+ struct dma_resv *obj = cursor->obj;
+
+ do {
+ /* Drop the reference from the previous round */
+ dma_fence_put(cursor->fence);
+
+ if (cursor->index == -1) {
+ cursor->fence = dma_resv_excl_fence(obj);
+ cursor->index++;
+ if (!cursor->fence)
+ continue;
+
+ } else if (!cursor->fences ||
+ cursor->index >= cursor->fences->shared_count) {
+ cursor->fence = NULL;
+ break;
+
+ } else {
+ struct dma_resv_list *fences = cursor->fences;
+ unsigned int idx = cursor->index++;
+
+ cursor->fence = rcu_dereference(fences->shared[idx]);
+ }
+ cursor->fence = dma_fence_get_rcu(cursor->fence);
+ if (!cursor->fence || !dma_fence_is_signaled(cursor->fence))
+ break;
+ } while (true);
+}
+
+/**
+ * dma_resv_iter_first_unlocked - first fence in an unlocked dma_resv obj.
+ * @cursor: the cursor with the current position
+ *
+ * Returns the first fence from an unlocked dma_resv obj.
+ */
+struct dma_fence *dma_resv_iter_first_unlocked(struct dma_resv_iter *cursor)
+{
+ rcu_read_lock();
+ do {
+ dma_resv_iter_restart_unlocked(cursor);
+ dma_resv_iter_walk_unlocked(cursor);
+ } while (read_seqcount_retry(&cursor->obj->seq, cursor->seq));
+ rcu_read_unlock();
+
+ return cursor->fence;
+}
+EXPORT_SYMBOL(dma_resv_iter_first_unlocked);
+
+/**
+ * dma_resv_iter_next_unlocked - next fence in an unlocked dma_resv obj.
+ * @cursor: the cursor with the current position
+ *
+ * Returns the next fence from an unlocked dma_resv obj.
+ */
+struct dma_fence *dma_resv_iter_next_unlocked(struct dma_resv_iter *cursor)
+{
+ bool restart;
+
+ rcu_read_lock();
+ cursor->is_restarted = false;
+ restart = read_seqcount_retry(&cursor->obj->seq, cursor->seq);
+ do {
+ if (restart)
+ dma_resv_iter_restart_unlocked(cursor);
+ dma_resv_iter_walk_unlocked(cursor);
+ restart = true;
+ } while (read_seqcount_retry(&cursor->obj->seq, cursor->seq));
+ rcu_read_unlock();
+
+ return cursor->fence;
+}
+EXPORT_SYMBOL(dma_resv_iter_next_unlocked);
+
+/**
* dma_resv_copy_fences - Copy all fences from src to dst.
* @dst: the destination reservation object
* @src: the source reservation object
@@ -322,74 +432,54 @@ EXPORT_SYMBOL(dma_resv_add_excl_fence);
*/
int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
{
- struct dma_resv_list *src_list, *dst_list;
- struct dma_fence *old, *new;
- unsigned int i;
+ struct dma_resv_iter cursor;
+ struct dma_resv_list *list;
+ struct dma_fence *f, *excl;
dma_resv_assert_held(dst);
- rcu_read_lock();
- src_list = dma_resv_shared_list(src);
-
-retry:
- if (src_list) {
- unsigned int shared_count = src_list->shared_count;
-
- rcu_read_unlock();
+ list = NULL;
+ excl = NULL;
- dst_list = dma_resv_list_alloc(shared_count);
- if (!dst_list)
- return -ENOMEM;
+ dma_resv_iter_begin(&cursor, src, true);
+ dma_resv_for_each_fence_unlocked(&cursor, f) {
- rcu_read_lock();
- src_list = dma_resv_shared_list(src);
- if (!src_list || src_list->shared_count > shared_count) {
- kfree(dst_list);
- goto retry;
- }
+ if (dma_resv_iter_is_restarted(&cursor)) {
+ dma_resv_list_free(list);
+ dma_fence_put(excl);
- dst_list->shared_count = 0;
- for (i = 0; i < src_list->shared_count; ++i) {
- struct dma_fence __rcu **dst;
- struct dma_fence *fence;
+ if (cursor.fences) {
+ unsigned int cnt = cursor.fences->shared_count;
- fence = rcu_dereference(src_list->shared[i]);
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
- &fence->flags))
- continue;
+ list = dma_resv_list_alloc(cnt);
+ if (!list) {
+ dma_resv_iter_end(&cursor);
+ return -ENOMEM;
+ }
- if (!dma_fence_get_rcu(fence)) {
- dma_resv_list_free(dst_list);
- src_list = dma_resv_shared_list(src);
- goto retry;
- }
+ list->shared_count = 0;
- if (dma_fence_is_signaled(fence)) {
- dma_fence_put(fence);
- continue;
+ } else {
+ list = NULL;
}
-
- dst = &dst_list->shared[dst_list->shared_count++];
- rcu_assign_pointer(*dst, fence);
+ excl = NULL;
}
- } else {
- dst_list = NULL;
- }
- new = dma_fence_get_rcu_safe(&src->fence_excl);
- rcu_read_unlock();
-
- src_list = dma_resv_shared_list(dst);
- old = dma_resv_excl_fence(dst);
+ dma_fence_get(f);
+ if (dma_resv_iter_is_exclusive(&cursor))
+ excl = f;
+ else
+ RCU_INIT_POINTER(list->shared[list->shared_count++], f);
+ }
+ dma_resv_iter_end(&cursor);
write_seqcount_begin(&dst->seq);
- /* write_seqcount_begin provides the necessary memory barrier */
- RCU_INIT_POINTER(dst->fence_excl, new);
- RCU_INIT_POINTER(dst->fence, dst_list);
+ excl = rcu_replace_pointer(dst->fence_excl, excl, dma_resv_held(dst));
+ list = rcu_replace_pointer(dst->fence, list, dma_resv_held(dst));
write_seqcount_end(&dst->seq);
- dma_resv_list_free(src_list);
- dma_fence_put(old);
+ dma_resv_list_free(list);
+ dma_fence_put(excl);
return 0;
}
@@ -399,99 +489,61 @@ EXPORT_SYMBOL(dma_resv_copy_fences);
* dma_resv_get_fences - Get an object's shared and exclusive
* fences without update side lock held
* @obj: the reservation object
- * @pfence_excl: the returned exclusive fence (or NULL)
- * @pshared_count: the number of shared fences returned
- * @pshared: the array of shared fence ptrs returned (array is krealloc'd to
+ * @fence_excl: the returned exclusive fence (or NULL)
+ * @shared_count: the number of shared fences returned
+ * @shared: the array of shared fence ptrs returned (array is krealloc'd to
* the required size, and must be freed by caller)
*
* Retrieve all fences from the reservation object. If the pointer for the
* exclusive fence is not specified the fence is put into the array of the
* shared fences as well. Returns either zero or -ENOMEM.
*/
-int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **pfence_excl,
- unsigned int *pshared_count,
- struct dma_fence ***pshared)
+int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **fence_excl,
+ unsigned int *shared_count, struct dma_fence ***shared)
{
- struct dma_fence **shared = NULL;
- struct dma_fence *fence_excl;
- unsigned int shared_count;
- int ret = 1;
-
- do {
- struct dma_resv_list *fobj;
- unsigned int i, seq;
- size_t sz = 0;
-
- shared_count = i = 0;
-
- rcu_read_lock();
- seq = read_seqcount_begin(&obj->seq);
+ struct dma_resv_iter cursor;
+ struct dma_fence *fence;
- fence_excl = dma_resv_excl_fence(obj);
- if (fence_excl && !dma_fence_get_rcu(fence_excl))
- goto unlock;
+ *shared_count = 0;
+ *shared = NULL;
- fobj = dma_resv_shared_list(obj);
- if (fobj)
- sz += sizeof(*shared) * fobj->shared_max;
+ if (fence_excl)
+ *fence_excl = NULL;
- if (!pfence_excl && fence_excl)
- sz += sizeof(*shared);
+ dma_resv_iter_begin(&cursor, obj, true);
+ dma_resv_for_each_fence_unlocked(&cursor, fence) {
- if (sz) {
- struct dma_fence **nshared;
+ if (dma_resv_iter_is_restarted(&cursor)) {
+ unsigned int count;
- nshared = krealloc(shared, sz,
- GFP_NOWAIT | __GFP_NOWARN);
- if (!nshared) {
- rcu_read_unlock();
+ while (*shared_count)
+ dma_fence_put((*shared)[--(*shared_count)]);
- dma_fence_put(fence_excl);
- fence_excl = NULL;
+ if (fence_excl)
+ dma_fence_put(*fence_excl);
- nshared = krealloc(shared, sz, GFP_KERNEL);
- if (nshared) {
- shared = nshared;
- continue;
- }
+ count = cursor.fences ? cursor.fences->shared_count : 0;
+ count += fence_excl ? 0 : 1;
- ret = -ENOMEM;
- break;
+ /* Eventually re-allocate the array */
+ *shared = krealloc_array(*shared, count,
+ sizeof(void *),
+ GFP_KERNEL);
+ if (count && !*shared) {
+ dma_resv_iter_end(&cursor);
+ return -ENOMEM;
}
- shared = nshared;
- shared_count = fobj ? fobj->shared_count : 0;
- for (i = 0; i < shared_count; ++i) {
- shared[i] = rcu_dereference(fobj->shared[i]);
- if (!dma_fence_get_rcu(shared[i]))
- break;
- }
- }
-
- if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) {
- while (i--)
- dma_fence_put(shared[i]);
- dma_fence_put(fence_excl);
- goto unlock;
}
- ret = 0;
-unlock:
- rcu_read_unlock();
- } while (ret);
-
- if (pfence_excl)
- *pfence_excl = fence_excl;
- else if (fence_excl)
- shared[shared_count++] = fence_excl;
-
- if (!shared_count) {
- kfree(shared);
- shared = NULL;
+ dma_fence_get(fence);
+ if (dma_resv_iter_is_exclusive(&cursor) && fence_excl)
+ *fence_excl = fence;
+ else
+ (*shared)[(*shared_count)++] = fence;
}
+ dma_resv_iter_end(&cursor);
- *pshared_count = shared_count;
- *pshared = shared;
- return ret;
+ return 0;
}
EXPORT_SYMBOL_GPL(dma_resv_get_fences);
@@ -513,94 +565,25 @@ long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr,
unsigned long timeout)
{
long ret = timeout ? timeout : 1;
- unsigned int seq, shared_count;
+ struct dma_resv_iter cursor;
struct dma_fence *fence;
- int i;
-
-retry:
- shared_count = 0;
- seq = read_seqcount_begin(&obj->seq);
- rcu_read_lock();
- i = -1;
-
- fence = dma_resv_excl_fence(obj);
- if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
- if (!dma_fence_get_rcu(fence))
- goto unlock_retry;
-
- if (dma_fence_is_signaled(fence)) {
- dma_fence_put(fence);
- fence = NULL;
- }
-
- } else {
- fence = NULL;
- }
-
- if (wait_all) {
- struct dma_resv_list *fobj = dma_resv_shared_list(obj);
-
- if (fobj)
- shared_count = fobj->shared_count;
-
- for (i = 0; !fence && i < shared_count; ++i) {
- struct dma_fence *lfence;
- lfence = rcu_dereference(fobj->shared[i]);
- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
- &lfence->flags))
- continue;
-
- if (!dma_fence_get_rcu(lfence))
- goto unlock_retry;
-
- if (dma_fence_is_signaled(lfence)) {
- dma_fence_put(lfence);
- continue;
- }
+ dma_resv_iter_begin(&cursor, obj, wait_all);
+ dma_resv_for_each_fence_unlocked(&cursor, fence) {
- fence = lfence;
- break;
+ ret = dma_fence_wait_timeout(fence, intr, ret);
+ if (ret <= 0) {
+ dma_resv_iter_end(&cursor);
+ return ret;
}
}
+ dma_resv_iter_end(&cursor);
- rcu_read_unlock();
- if (fence) {
- if (read_seqcount_retry(&obj->seq, seq)) {
- dma_fence_put(fence);
- goto retry;
- }
-
- ret = dma_fence_wait_timeout(fence, intr, ret);
- dma_fence_put(fence);
- if (ret > 0 && wait_all && (i + 1 < shared_count))
- goto retry;
- }
return ret;
-
-unlock_retry:
- rcu_read_unlock();
- goto retry;
}
EXPORT_SYMBOL_GPL(dma_resv_wait_timeout);
-static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
-{
- struct dma_fence *fence, *lfence = passed_fence;
- int ret = 1;
-
- if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &lfence->flags)) {
- fence = dma_fence_get_rcu(lfence);
- if (!fence)
- return -1;
-
- ret = !!dma_fence_is_signaled(fence);
- dma_fence_put(fence);
- }
- return ret;
-}
-
/**
* dma_resv_test_signaled - Test if a reservation object's fences have been
* signaled.
@@ -609,49 +592,24 @@ static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
* fence
*
* Callers are not required to hold specific locks, but maybe hold
- * dma_resv_lock() already
+ * dma_resv_lock() already.
+ *
* RETURNS
- * true if all fences signaled, else false
+ *
+ * True if all fences signaled, else false.
*/
bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all)
{
+ struct dma_resv_iter cursor;
struct dma_fence *fence;
- unsigned int seq;
- int ret;
-
- rcu_read_lock();
-retry:
- ret = true;
- seq = read_seqcount_begin(&obj->seq);
-
- if (test_all) {
- struct dma_resv_list *fobj = dma_resv_shared_list(obj);
- unsigned int i, shared_count;
-
- shared_count = fobj ? fobj->shared_count : 0;
- for (i = 0; i < shared_count; ++i) {
- fence = rcu_dereference(fobj->shared[i]);
- ret = dma_resv_test_signaled_single(fence);
- if (ret < 0)
- goto retry;
- else if (!ret)
- break;
- }
- }
-
- fence = dma_resv_excl_fence(obj);
- if (ret && fence) {
- ret = dma_resv_test_signaled_single(fence);
- if (ret < 0)
- goto retry;
+ dma_resv_iter_begin(&cursor, obj, test_all);
+ dma_resv_for_each_fence_unlocked(&cursor, fence) {
+ dma_resv_iter_end(&cursor);
+ return false;
}
-
- if (read_seqcount_retry(&obj->seq, seq))
- goto retry;
-
- rcu_read_unlock();
- return ret;
+ dma_resv_iter_end(&cursor);
+ return true;
}
EXPORT_SYMBOL_GPL(dma_resv_test_signaled);
diff --git a/drivers/dma-buf/heaps/system_heap.c b/drivers/dma-buf/heaps/system_heap.c
index 23a7e74ef966..f57a39ddd063 100644
--- a/drivers/dma-buf/heaps/system_heap.c
+++ b/drivers/dma-buf/heaps/system_heap.c
@@ -40,11 +40,12 @@ struct dma_heap_attachment {
bool mapped;
};
+#define LOW_ORDER_GFP (GFP_HIGHUSER | __GFP_ZERO | __GFP_COMP)
+#define MID_ORDER_GFP (LOW_ORDER_GFP | __GFP_NOWARN)
#define HIGH_ORDER_GFP (((GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN \
| __GFP_NORETRY) & ~__GFP_RECLAIM) \
| __GFP_COMP)
-#define LOW_ORDER_GFP (GFP_HIGHUSER | __GFP_ZERO | __GFP_COMP)
-static gfp_t order_flags[] = {HIGH_ORDER_GFP, LOW_ORDER_GFP, LOW_ORDER_GFP};
+static gfp_t order_flags[] = {HIGH_ORDER_GFP, MID_ORDER_GFP, LOW_ORDER_GFP};
/*
* The selection of the orders used for allocation (1MB, 64K, 4K) is designed
* to match with the sizes often found in IOMMUs. Using order 4 pages instead
diff --git a/drivers/dma-buf/seqno-fence.c b/drivers/dma-buf/seqno-fence.c
deleted file mode 100644
index bfe14e94c488..000000000000
--- a/drivers/dma-buf/seqno-fence.c
+++ /dev/null
@@ -1,71 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * seqno-fence, using a dma-buf to synchronize fencing
- *
- * Copyright (C) 2012 Texas Instruments
- * Copyright (C) 2012-2014 Canonical Ltd
- * Authors:
- * Rob Clark <robdclark@gmail.com>
- * Maarten Lankhorst <maarten.lankhorst@canonical.com>
- */
-
-#include <linux/slab.h>
-#include <linux/export.h>
-#include <linux/seqno-fence.h>
-
-static const char *seqno_fence_get_driver_name(struct dma_fence *fence)
-{
- struct seqno_fence *seqno_fence = to_seqno_fence(fence);
-
- return seqno_fence->ops->get_driver_name(fence);
-}
-
-static const char *seqno_fence_get_timeline_name(struct dma_fence *fence)
-{
- struct seqno_fence *seqno_fence = to_seqno_fence(fence);
-
- return seqno_fence->ops->get_timeline_name(fence);
-}
-
-static bool seqno_enable_signaling(struct dma_fence *fence)
-{
- struct seqno_fence *seqno_fence = to_seqno_fence(fence);
-
- return seqno_fence->ops->enable_signaling(fence);
-}
-
-static bool seqno_signaled(struct dma_fence *fence)
-{
- struct seqno_fence *seqno_fence = to_seqno_fence(fence);
-
- return seqno_fence->ops->signaled && seqno_fence->ops->signaled(fence);
-}
-
-static void seqno_release(struct dma_fence *fence)
-{
- struct seqno_fence *f = to_seqno_fence(fence);
-
- dma_buf_put(f->sync_buf);
- if (f->ops->release)
- f->ops->release(fence);
- else
- dma_fence_free(&f->base);
-}
-
-static signed long seqno_wait(struct dma_fence *fence, bool intr,
- signed long timeout)
-{
- struct seqno_fence *f = to_seqno_fence(fence);
-
- return f->ops->wait(fence, intr, timeout);
-}
-
-const struct dma_fence_ops seqno_fence_ops = {
- .get_driver_name = seqno_fence_get_driver_name,
- .get_timeline_name = seqno_fence_get_timeline_name,
- .enable_signaling = seqno_enable_signaling,
- .signaled = seqno_signaled,
- .wait = seqno_wait,
- .release = seqno_release,
-};
-EXPORT_SYMBOL(seqno_fence_ops);
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c
index 4a2a796e348c..52d04641e361 100644
--- a/drivers/dma/pxa_dma.c
+++ b/drivers/dma/pxa_dma.c
@@ -742,8 +742,7 @@ pxad_alloc_desc(struct pxad_chan *chan, unsigned int nb_hw_desc)
dma_addr_t dma;
int i;
- sw_desc = kzalloc(sizeof(*sw_desc) +
- nb_hw_desc * sizeof(struct pxad_desc_hw *),
+ sw_desc = kzalloc(struct_size(sw_desc, hw_desc, nb_hw_desc),
GFP_NOWAIT);
if (!sw_desc)
return NULL;
diff --git a/drivers/edac/al_mc_edac.c b/drivers/edac/al_mc_edac.c
index 7d4f396c27b5..178b9e581a72 100644
--- a/drivers/edac/al_mc_edac.c
+++ b/drivers/edac/al_mc_edac.c
@@ -238,11 +238,9 @@ static int al_mc_edac_probe(struct platform_device *pdev)
if (!mci)
return -ENOMEM;
- ret = devm_add_action(&pdev->dev, devm_al_mc_edac_free, mci);
- if (ret) {
- edac_mc_free(mci);
+ ret = devm_add_action_or_reset(&pdev->dev, devm_al_mc_edac_free, mci);
+ if (ret)
return ret;
- }
platform_set_drvdata(pdev, mci);
al_mc = mci->pvt_info;
@@ -293,11 +291,9 @@ static int al_mc_edac_probe(struct platform_device *pdev)
return ret;
}
- ret = devm_add_action(&pdev->dev, devm_al_mc_edac_del, &pdev->dev);
- if (ret) {
- edac_mc_del_mc(&pdev->dev);
+ ret = devm_add_action_or_reset(&pdev->dev, devm_al_mc_edac_del, &pdev->dev);
+ if (ret)
return ret;
- }
if (al_mc->irq_ue > 0) {
ret = devm_request_irq(&pdev->dev,
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 99b06a3e8fb1..4fce75013674 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -1065,12 +1065,14 @@ static void debug_dump_dramcfg_low(struct amd64_pvt *pvt, u32 dclr, int chan)
#define CS_ODD_PRIMARY BIT(1)
#define CS_EVEN_SECONDARY BIT(2)
#define CS_ODD_SECONDARY BIT(3)
+#define CS_3R_INTERLEAVE BIT(4)
#define CS_EVEN (CS_EVEN_PRIMARY | CS_EVEN_SECONDARY)
#define CS_ODD (CS_ODD_PRIMARY | CS_ODD_SECONDARY)
static int f17_get_cs_mode(int dimm, u8 ctrl, struct amd64_pvt *pvt)
{
+ u8 base, count = 0;
int cs_mode = 0;
if (csrow_enabled(2 * dimm, ctrl, pvt))
@@ -1083,6 +1085,20 @@ static int f17_get_cs_mode(int dimm, u8 ctrl, struct amd64_pvt *pvt)
if (csrow_sec_enabled(2 * dimm + 1, ctrl, pvt))
cs_mode |= CS_ODD_SECONDARY;
+ /*
+ * 3 Rank inteleaving support.
+ * There should be only three bases enabled and their two masks should
+ * be equal.
+ */
+ for_each_chip_select(base, ctrl, pvt)
+ count += csrow_enabled(base, ctrl, pvt);
+
+ if (count == 3 &&
+ pvt->csels[ctrl].csmasks[0] == pvt->csels[ctrl].csmasks[1]) {
+ edac_dbg(1, "3R interleaving in use.\n");
+ cs_mode |= CS_3R_INTERLEAVE;
+ }
+
return cs_mode;
}
@@ -1891,10 +1907,14 @@ static int f17_addr_mask_to_cs_size(struct amd64_pvt *pvt, u8 umc,
*
* The MSB is the number of bits in the full mask because BIT[0] is
* always 0.
+ *
+ * In the special 3 Rank interleaving case, a single bit is flipped
+ * without swapping with the most significant bit. This can be handled
+ * by keeping the MSB where it is and ignoring the single zero bit.
*/
msb = fls(addr_mask_orig) - 1;
weight = hweight_long(addr_mask_orig);
- num_zero_bits = msb - weight;
+ num_zero_bits = msb - weight - !!(cs_mode & CS_3R_INTERLEAVE);
/* Take the number of zero bits off from the top of the mask. */
addr_mask_deinterleaved = GENMASK_ULL(msb - num_zero_bits, 1);
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 2c5975674723..9f82ca295353 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -66,14 +66,12 @@ unsigned int edac_dimm_info_location(struct dimm_info *dimm, char *buf,
char *p = buf;
for (i = 0; i < mci->n_layers; i++) {
- n = snprintf(p, len, "%s %d ",
+ n = scnprintf(p, len, "%s %d ",
edac_layer_name[mci->layers[i].type],
dimm->location[i]);
p += n;
len -= n;
count += n;
- if (!len)
- break;
}
return count;
@@ -341,19 +339,16 @@ static int edac_mc_alloc_dimms(struct mem_ctl_info *mci)
*/
len = sizeof(dimm->label);
p = dimm->label;
- n = snprintf(p, len, "mc#%u", mci->mc_idx);
+ n = scnprintf(p, len, "mc#%u", mci->mc_idx);
p += n;
len -= n;
for (layer = 0; layer < mci->n_layers; layer++) {
- n = snprintf(p, len, "%s#%u",
- edac_layer_name[mci->layers[layer].type],
- pos[layer]);
+ n = scnprintf(p, len, "%s#%u",
+ edac_layer_name[mci->layers[layer].type],
+ pos[layer]);
p += n;
len -= n;
dimm->location[layer] = pos[layer];
-
- if (len <= 0)
- break;
}
/* Link it to the csrows old API data */
@@ -1027,12 +1022,13 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
const char *other_detail)
{
struct dimm_info *dimm;
- char *p;
+ char *p, *end;
int row = -1, chan = -1;
int pos[EDAC_MAX_LAYERS] = { top_layer, mid_layer, low_layer };
int i, n_labels = 0;
struct edac_raw_error_desc *e = &mci->error_desc;
bool any_memory = true;
+ const char *prefix;
edac_dbg(3, "MC%d\n", mci->mc_idx);
@@ -1087,6 +1083,8 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
*/
p = e->label;
*p = '\0';
+ end = p + sizeof(e->label);
+ prefix = "";
mci_for_each_dimm(mci, dimm) {
if (top_layer >= 0 && top_layer != dimm->location[0])
@@ -1114,12 +1112,8 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
p = e->label;
*p = '\0';
} else {
- if (p != e->label) {
- strcpy(p, OTHER_LABEL);
- p += strlen(OTHER_LABEL);
- }
- strcpy(p, dimm->label);
- p += strlen(p);
+ p += scnprintf(p, end - p, "%s%s", prefix, dimm->label);
+ prefix = OTHER_LABEL;
}
/*
@@ -1141,25 +1135,25 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
}
if (any_memory)
- strcpy(e->label, "any memory");
+ strscpy(e->label, "any memory", sizeof(e->label));
else if (!*e->label)
- strcpy(e->label, "unknown memory");
+ strscpy(e->label, "unknown memory", sizeof(e->label));
edac_inc_csrow(e, row, chan);
/* Fill the RAM location data */
p = e->location;
+ end = p + sizeof(e->location);
+ prefix = "";
for (i = 0; i < mci->n_layers; i++) {
if (pos[i] < 0)
continue;
- p += sprintf(p, "%s:%d ",
- edac_layer_name[mci->layers[i].type],
- pos[i]);
+ p += scnprintf(p, end - p, "%s%s:%d", prefix,
+ edac_layer_name[mci->layers[i].type], pos[i]);
+ prefix = " ";
}
- if (p > e->location)
- *(p - 1) = '\0';
edac_raw_mc_handle_error(e);
}
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 2f9f1e74bb35..0a638c97702a 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -744,7 +744,7 @@ static ssize_t mci_ue_count_show(struct device *dev,
{
struct mem_ctl_info *mci = to_mci(dev);
- return sprintf(data, "%d\n", mci->ue_mc);
+ return sprintf(data, "%u\n", mci->ue_mc);
}
static ssize_t mci_ce_count_show(struct device *dev,
@@ -753,7 +753,7 @@ static ssize_t mci_ce_count_show(struct device *dev,
{
struct mem_ctl_info *mci = to_mci(dev);
- return sprintf(data, "%d\n", mci->ce_mc);
+ return sprintf(data, "%u\n", mci->ce_mc);
}
static ssize_t mci_ce_noinfo_show(struct device *dev,
@@ -762,7 +762,7 @@ static ssize_t mci_ce_noinfo_show(struct device *dev,
{
struct mem_ctl_info *mci = to_mci(dev);
- return sprintf(data, "%d\n", mci->ce_noinfo_count);
+ return sprintf(data, "%u\n", mci->ce_noinfo_count);
}
static ssize_t mci_ue_noinfo_show(struct device *dev,
@@ -771,7 +771,7 @@ static ssize_t mci_ue_noinfo_show(struct device *dev,
{
struct mem_ctl_info *mci = to_mci(dev);
- return sprintf(data, "%d\n", mci->ue_noinfo_count);
+ return sprintf(data, "%u\n", mci->ue_noinfo_count);
}
static ssize_t mci_seconds_show(struct device *dev,
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index 4c626fcd4dcb..1522d4aa2ca6 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -1052,7 +1052,7 @@ static u64 haswell_get_tohm(struct sbridge_pvt *pvt)
pci_read_config_dword(pvt->info.pci_vtd, HASWELL_TOHM_1, &reg);
rc = ((reg << 6) | rc) << 26;
- return rc | 0x1ffffff;
+ return rc | 0x3ffffff;
}
static u64 knl_get_tolm(struct sbridge_pvt *pvt)
diff --git a/drivers/edac/ti_edac.c b/drivers/edac/ti_edac.c
index 169f96e51c29..6971ded598de 100644
--- a/drivers/edac/ti_edac.c
+++ b/drivers/edac/ti_edac.c
@@ -245,11 +245,8 @@ static int ti_edac_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
reg = devm_ioremap_resource(dev, res);
- if (IS_ERR(reg)) {
- edac_printk(KERN_ERR, EDAC_MOD_NAME,
- "EMIF controller regs not defined\n");
+ if (IS_ERR(reg))
return PTR_ERR(reg);
- }
layers[0].type = EDAC_MC_LAYER_ALL_MEM;
layers[0].size = 1;
@@ -281,8 +278,6 @@ static int ti_edac_probe(struct platform_device *pdev)
error_irq = platform_get_irq(pdev, 0);
if (error_irq < 0) {
ret = error_irq;
- edac_printk(KERN_ERR, EDAC_MOD_NAME,
- "EMIF irq number not defined.\n");
goto err;
}
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index fb6c651214f3..9f89c17730b1 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -10,6 +10,7 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
+#include <linux/err.h>
#include <linux/errno.h>
#include <linux/firewire.h>
#include <linux/firewire-cdev.h>
@@ -953,11 +954,25 @@ static enum dma_data_direction iso_dma_direction(struct fw_iso_context *context)
return DMA_FROM_DEVICE;
}
+static struct fw_iso_context *fw_iso_mc_context_create(struct fw_card *card,
+ fw_iso_mc_callback_t callback,
+ void *callback_data)
+{
+ struct fw_iso_context *ctx;
+
+ ctx = fw_iso_context_create(card, FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL,
+ 0, 0, 0, NULL, callback_data);
+ if (!IS_ERR(ctx))
+ ctx->callback.mc = callback;
+
+ return ctx;
+}
+
static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
{
struct fw_cdev_create_iso_context *a = &arg->create_iso_context;
struct fw_iso_context *context;
- fw_iso_callback_t cb;
+ union fw_iso_callback cb;
int ret;
BUILD_BUG_ON(FW_CDEV_ISO_CONTEXT_TRANSMIT != FW_ISO_CONTEXT_TRANSMIT ||
@@ -970,7 +985,7 @@ static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
if (a->speed > SCODE_3200 || a->channel > 63)
return -EINVAL;
- cb = iso_callback;
+ cb.sc = iso_callback;
break;
case FW_ISO_CONTEXT_RECEIVE:
@@ -978,19 +993,24 @@ static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
a->channel > 63)
return -EINVAL;
- cb = iso_callback;
+ cb.sc = iso_callback;
break;
case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
- cb = (fw_iso_callback_t)iso_mc_callback;
+ cb.mc = iso_mc_callback;
break;
default:
return -EINVAL;
}
- context = fw_iso_context_create(client->device->card, a->type,
- a->channel, a->speed, a->header_size, cb, client);
+ if (a->type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL)
+ context = fw_iso_mc_context_create(client->device->card, cb.mc,
+ client);
+ else
+ context = fw_iso_context_create(client->device->card, a->type,
+ a->channel, a->speed,
+ a->header_size, cb.sc, client);
if (IS_ERR(context))
return PTR_ERR(context);
if (client->version < FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW)
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index 4c3fd2eed1da..dcc141068128 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -1443,8 +1443,8 @@ static int fwnet_probe(struct fw_unit *unit,
struct net_device *net;
bool allocated_netdev = false;
struct fwnet_device *dev;
+ union fwnet_hwaddr ha;
int ret;
- union fwnet_hwaddr *ha;
mutex_lock(&fwnet_device_mutex);
@@ -1491,12 +1491,12 @@ static int fwnet_probe(struct fw_unit *unit,
net->max_mtu = 4096U;
/* Set our hardware address while we're at it */
- ha = (union fwnet_hwaddr *)net->dev_addr;
- put_unaligned_be64(card->guid, &ha->uc.uniq_id);
- ha->uc.max_rec = dev->card->max_receive;
- ha->uc.sspd = dev->card->link_speed;
- put_unaligned_be16(dev->local_fifo >> 32, &ha->uc.fifo_hi);
- put_unaligned_be32(dev->local_fifo & 0xffffffff, &ha->uc.fifo_lo);
+ ha.uc.uniq_id = cpu_to_be64(card->guid);
+ ha.uc.max_rec = dev->card->max_receive;
+ ha.uc.sspd = dev->card->link_speed;
+ ha.uc.fifo_hi = cpu_to_be16(dev->local_fifo >> 32);
+ ha.uc.fifo_lo = cpu_to_be32(dev->local_fifo & 0xffffffff);
+ dev_addr_set(net, ha.u);
memset(net->broadcast, -1, net->addr_len);
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index cda7d7162cbb..75cb91055c17 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -295,6 +295,7 @@ config TURRIS_MOX_RWTM
source "drivers/firmware/arm_ffa/Kconfig"
source "drivers/firmware/broadcom/Kconfig"
+source "drivers/firmware/cirrus/Kconfig"
source "drivers/firmware/google/Kconfig"
source "drivers/firmware/efi/Kconfig"
source "drivers/firmware/imx/Kconfig"
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 5ced0673d94b..4e58cb474a68 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -28,6 +28,7 @@ obj-$(CONFIG_TURRIS_MOX_RWTM) += turris-mox-rwtm.o
obj-y += arm_ffa/
obj-y += arm_scmi/
obj-y += broadcom/
+obj-y += cirrus/
obj-y += meson/
obj-$(CONFIG_GOOGLE_FIRMWARE) += google/
obj-$(CONFIG_EFI) += efi/
diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c
index c9fb56afbcb4..14f900047ac0 100644
--- a/drivers/firmware/arm_ffa/driver.c
+++ b/drivers/firmware/arm_ffa/driver.c
@@ -167,6 +167,27 @@ struct ffa_drv_info {
static struct ffa_drv_info *drv_info;
+/*
+ * The driver must be able to support all the versions from the earliest
+ * supported FFA_MIN_VERSION to the latest supported FFA_DRIVER_VERSION.
+ * The specification states that if firmware supports a FFA implementation
+ * that is incompatible with and at a greater version number than specified
+ * by the caller(FFA_DRIVER_VERSION passed as parameter to FFA_VERSION),
+ * it must return the NOT_SUPPORTED error code.
+ */
+static u32 ffa_compatible_version_find(u32 version)
+{
+ u16 major = MAJOR_VERSION(version), minor = MINOR_VERSION(version);
+ u16 drv_major = MAJOR_VERSION(FFA_DRIVER_VERSION);
+ u16 drv_minor = MINOR_VERSION(FFA_DRIVER_VERSION);
+
+ if ((major < drv_major) || (major == drv_major && minor <= drv_minor))
+ return version;
+
+ pr_info("Firmware version higher than driver version, downgrading\n");
+ return FFA_DRIVER_VERSION;
+}
+
static int ffa_version_check(u32 *version)
{
ffa_value_t ver;
@@ -180,15 +201,20 @@ static int ffa_version_check(u32 *version)
return -EOPNOTSUPP;
}
- if (ver.a0 < FFA_MIN_VERSION || ver.a0 > FFA_DRIVER_VERSION) {
- pr_err("Incompatible version %d.%d found\n",
- MAJOR_VERSION(ver.a0), MINOR_VERSION(ver.a0));
+ if (ver.a0 < FFA_MIN_VERSION) {
+ pr_err("Incompatible v%d.%d! Earliest supported v%d.%d\n",
+ MAJOR_VERSION(ver.a0), MINOR_VERSION(ver.a0),
+ MAJOR_VERSION(FFA_MIN_VERSION),
+ MINOR_VERSION(FFA_MIN_VERSION));
return -EINVAL;
}
- *version = ver.a0;
- pr_info("Version %d.%d found\n", MAJOR_VERSION(ver.a0),
+ pr_info("Driver version %d.%d\n", MAJOR_VERSION(FFA_DRIVER_VERSION),
+ MINOR_VERSION(FFA_DRIVER_VERSION));
+ pr_info("Firmware version %d.%d found\n", MAJOR_VERSION(ver.a0),
MINOR_VERSION(ver.a0));
+ *version = ffa_compatible_version_find(ver.a0);
+
return 0;
}
@@ -586,6 +612,22 @@ ffa_memory_share(struct ffa_device *dev, struct ffa_mem_ops_args *args)
return ffa_memory_ops(FFA_FN_NATIVE(MEM_SHARE), args);
}
+static int
+ffa_memory_lend(struct ffa_device *dev, struct ffa_mem_ops_args *args)
+{
+ /* Note that upon a successful MEM_LEND request the caller
+ * must ensure that the memory region specified is not accessed
+ * until a successful MEM_RECALIM call has been made.
+ * On systems with a hypervisor present this will been enforced,
+ * however on systems without a hypervisor the responsibility
+ * falls to the calling kernel driver to prevent access.
+ */
+ if (dev->mode_32bit)
+ return ffa_memory_ops(FFA_MEM_LEND, args);
+
+ return ffa_memory_ops(FFA_FN_NATIVE(MEM_LEND), args);
+}
+
static const struct ffa_dev_ops ffa_ops = {
.api_version_get = ffa_api_version_get,
.partition_info_get = ffa_partition_info_get,
@@ -593,6 +635,7 @@ static const struct ffa_dev_ops ffa_ops = {
.sync_send_receive = ffa_sync_send_receive,
.memory_reclaim = ffa_memory_reclaim,
.memory_share = ffa_memory_share,
+ .memory_lend = ffa_memory_lend,
};
const struct ffa_dev_ops *ffa_dev_ops_get(struct ffa_device *dev)
diff --git a/drivers/firmware/cirrus/Kconfig b/drivers/firmware/cirrus/Kconfig
new file mode 100644
index 000000000000..f9503cb481d2
--- /dev/null
+++ b/drivers/firmware/cirrus/Kconfig
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+config CS_DSP
+ tristate
+ default n
diff --git a/drivers/firmware/cirrus/Makefile b/drivers/firmware/cirrus/Makefile
new file mode 100644
index 000000000000..f074e2638c9c
--- /dev/null
+++ b/drivers/firmware/cirrus/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+obj-$(CONFIG_CS_DSP) += cs_dsp.o
diff --git a/drivers/firmware/cirrus/cs_dsp.c b/drivers/firmware/cirrus/cs_dsp.c
new file mode 100644
index 000000000000..948dd8382686
--- /dev/null
+++ b/drivers/firmware/cirrus/cs_dsp.c
@@ -0,0 +1,3109 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * cs_dsp.c -- Cirrus Logic DSP firmware support
+ *
+ * Based on sound/soc/codecs/wm_adsp.c
+ *
+ * Copyright 2012 Wolfson Microelectronics plc
+ * Copyright (C) 2015-2021 Cirrus Logic, Inc. and
+ * Cirrus Logic International Semiconductor Ltd.
+ */
+
+#include <linux/ctype.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/firmware.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/workqueue.h>
+
+#include <linux/firmware/cirrus/cs_dsp.h>
+#include <linux/firmware/cirrus/wmfw.h>
+
+#define cs_dsp_err(_dsp, fmt, ...) \
+ dev_err(_dsp->dev, "%s: " fmt, _dsp->name, ##__VA_ARGS__)
+#define cs_dsp_warn(_dsp, fmt, ...) \
+ dev_warn(_dsp->dev, "%s: " fmt, _dsp->name, ##__VA_ARGS__)
+#define cs_dsp_info(_dsp, fmt, ...) \
+ dev_info(_dsp->dev, "%s: " fmt, _dsp->name, ##__VA_ARGS__)
+#define cs_dsp_dbg(_dsp, fmt, ...) \
+ dev_dbg(_dsp->dev, "%s: " fmt, _dsp->name, ##__VA_ARGS__)
+
+#define ADSP1_CONTROL_1 0x00
+#define ADSP1_CONTROL_2 0x02
+#define ADSP1_CONTROL_3 0x03
+#define ADSP1_CONTROL_4 0x04
+#define ADSP1_CONTROL_5 0x06
+#define ADSP1_CONTROL_6 0x07
+#define ADSP1_CONTROL_7 0x08
+#define ADSP1_CONTROL_8 0x09
+#define ADSP1_CONTROL_9 0x0A
+#define ADSP1_CONTROL_10 0x0B
+#define ADSP1_CONTROL_11 0x0C
+#define ADSP1_CONTROL_12 0x0D
+#define ADSP1_CONTROL_13 0x0F
+#define ADSP1_CONTROL_14 0x10
+#define ADSP1_CONTROL_15 0x11
+#define ADSP1_CONTROL_16 0x12
+#define ADSP1_CONTROL_17 0x13
+#define ADSP1_CONTROL_18 0x14
+#define ADSP1_CONTROL_19 0x16
+#define ADSP1_CONTROL_20 0x17
+#define ADSP1_CONTROL_21 0x18
+#define ADSP1_CONTROL_22 0x1A
+#define ADSP1_CONTROL_23 0x1B
+#define ADSP1_CONTROL_24 0x1C
+#define ADSP1_CONTROL_25 0x1E
+#define ADSP1_CONTROL_26 0x20
+#define ADSP1_CONTROL_27 0x21
+#define ADSP1_CONTROL_28 0x22
+#define ADSP1_CONTROL_29 0x23
+#define ADSP1_CONTROL_30 0x24
+#define ADSP1_CONTROL_31 0x26
+
+/*
+ * ADSP1 Control 19
+ */
+#define ADSP1_WDMA_BUFFER_LENGTH_MASK 0x00FF /* DSP1_WDMA_BUFFER_LENGTH - [7:0] */
+#define ADSP1_WDMA_BUFFER_LENGTH_SHIFT 0 /* DSP1_WDMA_BUFFER_LENGTH - [7:0] */
+#define ADSP1_WDMA_BUFFER_LENGTH_WIDTH 8 /* DSP1_WDMA_BUFFER_LENGTH - [7:0] */
+
+/*
+ * ADSP1 Control 30
+ */
+#define ADSP1_DBG_CLK_ENA 0x0008 /* DSP1_DBG_CLK_ENA */
+#define ADSP1_DBG_CLK_ENA_MASK 0x0008 /* DSP1_DBG_CLK_ENA */
+#define ADSP1_DBG_CLK_ENA_SHIFT 3 /* DSP1_DBG_CLK_ENA */
+#define ADSP1_DBG_CLK_ENA_WIDTH 1 /* DSP1_DBG_CLK_ENA */
+#define ADSP1_SYS_ENA 0x0004 /* DSP1_SYS_ENA */
+#define ADSP1_SYS_ENA_MASK 0x0004 /* DSP1_SYS_ENA */
+#define ADSP1_SYS_ENA_SHIFT 2 /* DSP1_SYS_ENA */
+#define ADSP1_SYS_ENA_WIDTH 1 /* DSP1_SYS_ENA */
+#define ADSP1_CORE_ENA 0x0002 /* DSP1_CORE_ENA */
+#define ADSP1_CORE_ENA_MASK 0x0002 /* DSP1_CORE_ENA */
+#define ADSP1_CORE_ENA_SHIFT 1 /* DSP1_CORE_ENA */
+#define ADSP1_CORE_ENA_WIDTH 1 /* DSP1_CORE_ENA */
+#define ADSP1_START 0x0001 /* DSP1_START */
+#define ADSP1_START_MASK 0x0001 /* DSP1_START */
+#define ADSP1_START_SHIFT 0 /* DSP1_START */
+#define ADSP1_START_WIDTH 1 /* DSP1_START */
+
+/*
+ * ADSP1 Control 31
+ */
+#define ADSP1_CLK_SEL_MASK 0x0007 /* CLK_SEL_ENA */
+#define ADSP1_CLK_SEL_SHIFT 0 /* CLK_SEL_ENA */
+#define ADSP1_CLK_SEL_WIDTH 3 /* CLK_SEL_ENA */
+
+#define ADSP2_CONTROL 0x0
+#define ADSP2_CLOCKING 0x1
+#define ADSP2V2_CLOCKING 0x2
+#define ADSP2_STATUS1 0x4
+#define ADSP2_WDMA_CONFIG_1 0x30
+#define ADSP2_WDMA_CONFIG_2 0x31
+#define ADSP2V2_WDMA_CONFIG_2 0x32
+#define ADSP2_RDMA_CONFIG_1 0x34
+
+#define ADSP2_SCRATCH0 0x40
+#define ADSP2_SCRATCH1 0x41
+#define ADSP2_SCRATCH2 0x42
+#define ADSP2_SCRATCH3 0x43
+
+#define ADSP2V2_SCRATCH0_1 0x40
+#define ADSP2V2_SCRATCH2_3 0x42
+
+/*
+ * ADSP2 Control
+ */
+#define ADSP2_MEM_ENA 0x0010 /* DSP1_MEM_ENA */
+#define ADSP2_MEM_ENA_MASK 0x0010 /* DSP1_MEM_ENA */
+#define ADSP2_MEM_ENA_SHIFT 4 /* DSP1_MEM_ENA */
+#define ADSP2_MEM_ENA_WIDTH 1 /* DSP1_MEM_ENA */
+#define ADSP2_SYS_ENA 0x0004 /* DSP1_SYS_ENA */
+#define ADSP2_SYS_ENA_MASK 0x0004 /* DSP1_SYS_ENA */
+#define ADSP2_SYS_ENA_SHIFT 2 /* DSP1_SYS_ENA */
+#define ADSP2_SYS_ENA_WIDTH 1 /* DSP1_SYS_ENA */
+#define ADSP2_CORE_ENA 0x0002 /* DSP1_CORE_ENA */
+#define ADSP2_CORE_ENA_MASK 0x0002 /* DSP1_CORE_ENA */
+#define ADSP2_CORE_ENA_SHIFT 1 /* DSP1_CORE_ENA */
+#define ADSP2_CORE_ENA_WIDTH 1 /* DSP1_CORE_ENA */
+#define ADSP2_START 0x0001 /* DSP1_START */
+#define ADSP2_START_MASK 0x0001 /* DSP1_START */
+#define ADSP2_START_SHIFT 0 /* DSP1_START */
+#define ADSP2_START_WIDTH 1 /* DSP1_START */
+
+/*
+ * ADSP2 clocking
+ */
+#define ADSP2_CLK_SEL_MASK 0x0007 /* CLK_SEL_ENA */
+#define ADSP2_CLK_SEL_SHIFT 0 /* CLK_SEL_ENA */
+#define ADSP2_CLK_SEL_WIDTH 3 /* CLK_SEL_ENA */
+
+/*
+ * ADSP2V2 clocking
+ */
+#define ADSP2V2_CLK_SEL_MASK 0x70000 /* CLK_SEL_ENA */
+#define ADSP2V2_CLK_SEL_SHIFT 16 /* CLK_SEL_ENA */
+#define ADSP2V2_CLK_SEL_WIDTH 3 /* CLK_SEL_ENA */
+
+#define ADSP2V2_RATE_MASK 0x7800 /* DSP_RATE */
+#define ADSP2V2_RATE_SHIFT 11 /* DSP_RATE */
+#define ADSP2V2_RATE_WIDTH 4 /* DSP_RATE */
+
+/*
+ * ADSP2 Status 1
+ */
+#define ADSP2_RAM_RDY 0x0001
+#define ADSP2_RAM_RDY_MASK 0x0001
+#define ADSP2_RAM_RDY_SHIFT 0
+#define ADSP2_RAM_RDY_WIDTH 1
+
+/*
+ * ADSP2 Lock support
+ */
+#define ADSP2_LOCK_CODE_0 0x5555
+#define ADSP2_LOCK_CODE_1 0xAAAA
+
+#define ADSP2_WATCHDOG 0x0A
+#define ADSP2_BUS_ERR_ADDR 0x52
+#define ADSP2_REGION_LOCK_STATUS 0x64
+#define ADSP2_LOCK_REGION_1_LOCK_REGION_0 0x66
+#define ADSP2_LOCK_REGION_3_LOCK_REGION_2 0x68
+#define ADSP2_LOCK_REGION_5_LOCK_REGION_4 0x6A
+#define ADSP2_LOCK_REGION_7_LOCK_REGION_6 0x6C
+#define ADSP2_LOCK_REGION_9_LOCK_REGION_8 0x6E
+#define ADSP2_LOCK_REGION_CTRL 0x7A
+#define ADSP2_PMEM_ERR_ADDR_XMEM_ERR_ADDR 0x7C
+
+#define ADSP2_REGION_LOCK_ERR_MASK 0x8000
+#define ADSP2_ADDR_ERR_MASK 0x4000
+#define ADSP2_WDT_TIMEOUT_STS_MASK 0x2000
+#define ADSP2_CTRL_ERR_PAUSE_ENA 0x0002
+#define ADSP2_CTRL_ERR_EINT 0x0001
+
+#define ADSP2_BUS_ERR_ADDR_MASK 0x00FFFFFF
+#define ADSP2_XMEM_ERR_ADDR_MASK 0x0000FFFF
+#define ADSP2_PMEM_ERR_ADDR_MASK 0x7FFF0000
+#define ADSP2_PMEM_ERR_ADDR_SHIFT 16
+#define ADSP2_WDT_ENA_MASK 0xFFFFFFFD
+
+#define ADSP2_LOCK_REGION_SHIFT 16
+
+/*
+ * Event control messages
+ */
+#define CS_DSP_FW_EVENT_SHUTDOWN 0x000001
+
+/*
+ * HALO system info
+ */
+#define HALO_AHBM_WINDOW_DEBUG_0 0x02040
+#define HALO_AHBM_WINDOW_DEBUG_1 0x02044
+
+/*
+ * HALO core
+ */
+#define HALO_SCRATCH1 0x005c0
+#define HALO_SCRATCH2 0x005c8
+#define HALO_SCRATCH3 0x005d0
+#define HALO_SCRATCH4 0x005d8
+#define HALO_CCM_CORE_CONTROL 0x41000
+#define HALO_CORE_SOFT_RESET 0x00010
+#define HALO_WDT_CONTROL 0x47000
+
+/*
+ * HALO MPU banks
+ */
+#define HALO_MPU_XMEM_ACCESS_0 0x43000
+#define HALO_MPU_YMEM_ACCESS_0 0x43004
+#define HALO_MPU_WINDOW_ACCESS_0 0x43008
+#define HALO_MPU_XREG_ACCESS_0 0x4300C
+#define HALO_MPU_YREG_ACCESS_0 0x43014
+#define HALO_MPU_XMEM_ACCESS_1 0x43018
+#define HALO_MPU_YMEM_ACCESS_1 0x4301C
+#define HALO_MPU_WINDOW_ACCESS_1 0x43020
+#define HALO_MPU_XREG_ACCESS_1 0x43024
+#define HALO_MPU_YREG_ACCESS_1 0x4302C
+#define HALO_MPU_XMEM_ACCESS_2 0x43030
+#define HALO_MPU_YMEM_ACCESS_2 0x43034
+#define HALO_MPU_WINDOW_ACCESS_2 0x43038
+#define HALO_MPU_XREG_ACCESS_2 0x4303C
+#define HALO_MPU_YREG_ACCESS_2 0x43044
+#define HALO_MPU_XMEM_ACCESS_3 0x43048
+#define HALO_MPU_YMEM_ACCESS_3 0x4304C
+#define HALO_MPU_WINDOW_ACCESS_3 0x43050
+#define HALO_MPU_XREG_ACCESS_3 0x43054
+#define HALO_MPU_YREG_ACCESS_3 0x4305C
+#define HALO_MPU_XM_VIO_ADDR 0x43100
+#define HALO_MPU_XM_VIO_STATUS 0x43104
+#define HALO_MPU_YM_VIO_ADDR 0x43108
+#define HALO_MPU_YM_VIO_STATUS 0x4310C
+#define HALO_MPU_PM_VIO_ADDR 0x43110
+#define HALO_MPU_PM_VIO_STATUS 0x43114
+#define HALO_MPU_LOCK_CONFIG 0x43140
+
+/*
+ * HALO_AHBM_WINDOW_DEBUG_1
+ */
+#define HALO_AHBM_CORE_ERR_ADDR_MASK 0x0fffff00
+#define HALO_AHBM_CORE_ERR_ADDR_SHIFT 8
+#define HALO_AHBM_FLAGS_ERR_MASK 0x000000ff
+
+/*
+ * HALO_CCM_CORE_CONTROL
+ */
+#define HALO_CORE_RESET 0x00000200
+#define HALO_CORE_EN 0x00000001
+
+/*
+ * HALO_CORE_SOFT_RESET
+ */
+#define HALO_CORE_SOFT_RESET_MASK 0x00000001
+
+/*
+ * HALO_WDT_CONTROL
+ */
+#define HALO_WDT_EN_MASK 0x00000001
+
+/*
+ * HALO_MPU_?M_VIO_STATUS
+ */
+#define HALO_MPU_VIO_STS_MASK 0x007e0000
+#define HALO_MPU_VIO_STS_SHIFT 17
+#define HALO_MPU_VIO_ERR_WR_MASK 0x00008000
+#define HALO_MPU_VIO_ERR_SRC_MASK 0x00007fff
+#define HALO_MPU_VIO_ERR_SRC_SHIFT 0
+
+struct cs_dsp_ops {
+ bool (*validate_version)(struct cs_dsp *dsp, unsigned int version);
+ unsigned int (*parse_sizes)(struct cs_dsp *dsp,
+ const char * const file,
+ unsigned int pos,
+ const struct firmware *firmware);
+ int (*setup_algs)(struct cs_dsp *dsp);
+ unsigned int (*region_to_reg)(struct cs_dsp_region const *mem,
+ unsigned int offset);
+
+ void (*show_fw_status)(struct cs_dsp *dsp);
+ void (*stop_watchdog)(struct cs_dsp *dsp);
+
+ int (*enable_memory)(struct cs_dsp *dsp);
+ void (*disable_memory)(struct cs_dsp *dsp);
+ int (*lock_memory)(struct cs_dsp *dsp, unsigned int lock_regions);
+
+ int (*enable_core)(struct cs_dsp *dsp);
+ void (*disable_core)(struct cs_dsp *dsp);
+
+ int (*start_core)(struct cs_dsp *dsp);
+ void (*stop_core)(struct cs_dsp *dsp);
+};
+
+static const struct cs_dsp_ops cs_dsp_adsp1_ops;
+static const struct cs_dsp_ops cs_dsp_adsp2_ops[];
+static const struct cs_dsp_ops cs_dsp_halo_ops;
+
+struct cs_dsp_buf {
+ struct list_head list;
+ void *buf;
+};
+
+static struct cs_dsp_buf *cs_dsp_buf_alloc(const void *src, size_t len,
+ struct list_head *list)
+{
+ struct cs_dsp_buf *buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+
+ if (buf == NULL)
+ return NULL;
+
+ buf->buf = vmalloc(len);
+ if (!buf->buf) {
+ kfree(buf);
+ return NULL;
+ }
+ memcpy(buf->buf, src, len);
+
+ if (list)
+ list_add_tail(&buf->list, list);
+
+ return buf;
+}
+
+static void cs_dsp_buf_free(struct list_head *list)
+{
+ while (!list_empty(list)) {
+ struct cs_dsp_buf *buf = list_first_entry(list,
+ struct cs_dsp_buf,
+ list);
+ list_del(&buf->list);
+ vfree(buf->buf);
+ kfree(buf);
+ }
+}
+
+/**
+ * cs_dsp_mem_region_name() - Return a name string for a memory type
+ * @type: the memory type to match
+ *
+ * Return: A const string identifying the memory region.
+ */
+const char *cs_dsp_mem_region_name(unsigned int type)
+{
+ switch (type) {
+ case WMFW_ADSP1_PM:
+ return "PM";
+ case WMFW_HALO_PM_PACKED:
+ return "PM_PACKED";
+ case WMFW_ADSP1_DM:
+ return "DM";
+ case WMFW_ADSP2_XM:
+ return "XM";
+ case WMFW_HALO_XM_PACKED:
+ return "XM_PACKED";
+ case WMFW_ADSP2_YM:
+ return "YM";
+ case WMFW_HALO_YM_PACKED:
+ return "YM_PACKED";
+ case WMFW_ADSP1_ZM:
+ return "ZM";
+ default:
+ return NULL;
+ }
+}
+EXPORT_SYMBOL_GPL(cs_dsp_mem_region_name);
+
+#ifdef CONFIG_DEBUG_FS
+static void cs_dsp_debugfs_save_wmfwname(struct cs_dsp *dsp, const char *s)
+{
+ char *tmp = kasprintf(GFP_KERNEL, "%s\n", s);
+
+ kfree(dsp->wmfw_file_name);
+ dsp->wmfw_file_name = tmp;
+}
+
+static void cs_dsp_debugfs_save_binname(struct cs_dsp *dsp, const char *s)
+{
+ char *tmp = kasprintf(GFP_KERNEL, "%s\n", s);
+
+ kfree(dsp->bin_file_name);
+ dsp->bin_file_name = tmp;
+}
+
+static void cs_dsp_debugfs_clear(struct cs_dsp *dsp)
+{
+ kfree(dsp->wmfw_file_name);
+ kfree(dsp->bin_file_name);
+ dsp->wmfw_file_name = NULL;
+ dsp->bin_file_name = NULL;
+}
+
+static ssize_t cs_dsp_debugfs_wmfw_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct cs_dsp *dsp = file->private_data;
+ ssize_t ret;
+
+ mutex_lock(&dsp->pwr_lock);
+
+ if (!dsp->wmfw_file_name || !dsp->booted)
+ ret = 0;
+ else
+ ret = simple_read_from_buffer(user_buf, count, ppos,
+ dsp->wmfw_file_name,
+ strlen(dsp->wmfw_file_name));
+
+ mutex_unlock(&dsp->pwr_lock);
+ return ret;
+}
+
+static ssize_t cs_dsp_debugfs_bin_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct cs_dsp *dsp = file->private_data;
+ ssize_t ret;
+
+ mutex_lock(&dsp->pwr_lock);
+
+ if (!dsp->bin_file_name || !dsp->booted)
+ ret = 0;
+ else
+ ret = simple_read_from_buffer(user_buf, count, ppos,
+ dsp->bin_file_name,
+ strlen(dsp->bin_file_name));
+
+ mutex_unlock(&dsp->pwr_lock);
+ return ret;
+}
+
+static const struct {
+ const char *name;
+ const struct file_operations fops;
+} cs_dsp_debugfs_fops[] = {
+ {
+ .name = "wmfw_file_name",
+ .fops = {
+ .open = simple_open,
+ .read = cs_dsp_debugfs_wmfw_read,
+ },
+ },
+ {
+ .name = "bin_file_name",
+ .fops = {
+ .open = simple_open,
+ .read = cs_dsp_debugfs_bin_read,
+ },
+ },
+};
+
+/**
+ * cs_dsp_init_debugfs() - Create and populate DSP representation in debugfs
+ * @dsp: pointer to DSP structure
+ * @debugfs_root: pointer to debugfs directory in which to create this DSP
+ * representation
+ */
+void cs_dsp_init_debugfs(struct cs_dsp *dsp, struct dentry *debugfs_root)
+{
+ struct dentry *root = NULL;
+ int i;
+
+ root = debugfs_create_dir(dsp->name, debugfs_root);
+
+ debugfs_create_bool("booted", 0444, root, &dsp->booted);
+ debugfs_create_bool("running", 0444, root, &dsp->running);
+ debugfs_create_x32("fw_id", 0444, root, &dsp->fw_id);
+ debugfs_create_x32("fw_version", 0444, root, &dsp->fw_id_version);
+
+ for (i = 0; i < ARRAY_SIZE(cs_dsp_debugfs_fops); ++i)
+ debugfs_create_file(cs_dsp_debugfs_fops[i].name, 0444, root,
+ dsp, &cs_dsp_debugfs_fops[i].fops);
+
+ dsp->debugfs_root = root;
+}
+EXPORT_SYMBOL_GPL(cs_dsp_init_debugfs);
+
+/**
+ * cs_dsp_cleanup_debugfs() - Removes DSP representation from debugfs
+ * @dsp: pointer to DSP structure
+ */
+void cs_dsp_cleanup_debugfs(struct cs_dsp *dsp)
+{
+ cs_dsp_debugfs_clear(dsp);
+ debugfs_remove_recursive(dsp->debugfs_root);
+ dsp->debugfs_root = NULL;
+}
+EXPORT_SYMBOL_GPL(cs_dsp_cleanup_debugfs);
+#else
+void cs_dsp_init_debugfs(struct cs_dsp *dsp, struct dentry *debugfs_root)
+{
+}
+EXPORT_SYMBOL_GPL(cs_dsp_init_debugfs);
+
+void cs_dsp_cleanup_debugfs(struct cs_dsp *dsp)
+{
+}
+EXPORT_SYMBOL_GPL(cs_dsp_cleanup_debugfs);
+
+static inline void cs_dsp_debugfs_save_wmfwname(struct cs_dsp *dsp,
+ const char *s)
+{
+}
+
+static inline void cs_dsp_debugfs_save_binname(struct cs_dsp *dsp,
+ const char *s)
+{
+}
+
+static inline void cs_dsp_debugfs_clear(struct cs_dsp *dsp)
+{
+}
+#endif
+
+static const struct cs_dsp_region *cs_dsp_find_region(struct cs_dsp *dsp,
+ int type)
+{
+ int i;
+
+ for (i = 0; i < dsp->num_mems; i++)
+ if (dsp->mem[i].type == type)
+ return &dsp->mem[i];
+
+ return NULL;
+}
+
+static unsigned int cs_dsp_region_to_reg(struct cs_dsp_region const *mem,
+ unsigned int offset)
+{
+ switch (mem->type) {
+ case WMFW_ADSP1_PM:
+ return mem->base + (offset * 3);
+ case WMFW_ADSP1_DM:
+ case WMFW_ADSP2_XM:
+ case WMFW_ADSP2_YM:
+ case WMFW_ADSP1_ZM:
+ return mem->base + (offset * 2);
+ default:
+ WARN(1, "Unknown memory region type");
+ return offset;
+ }
+}
+
+static unsigned int cs_dsp_halo_region_to_reg(struct cs_dsp_region const *mem,
+ unsigned int offset)
+{
+ switch (mem->type) {
+ case WMFW_ADSP2_XM:
+ case WMFW_ADSP2_YM:
+ return mem->base + (offset * 4);
+ case WMFW_HALO_XM_PACKED:
+ case WMFW_HALO_YM_PACKED:
+ return (mem->base + (offset * 3)) & ~0x3;
+ case WMFW_HALO_PM_PACKED:
+ return mem->base + (offset * 5);
+ default:
+ WARN(1, "Unknown memory region type");
+ return offset;
+ }
+}
+
+static void cs_dsp_read_fw_status(struct cs_dsp *dsp,
+ int noffs, unsigned int *offs)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < noffs; ++i) {
+ ret = regmap_read(dsp->regmap, dsp->base + offs[i], &offs[i]);
+ if (ret) {
+ cs_dsp_err(dsp, "Failed to read SCRATCH%u: %d\n", i, ret);
+ return;
+ }
+ }
+}
+
+static void cs_dsp_adsp2_show_fw_status(struct cs_dsp *dsp)
+{
+ unsigned int offs[] = {
+ ADSP2_SCRATCH0, ADSP2_SCRATCH1, ADSP2_SCRATCH2, ADSP2_SCRATCH3,
+ };
+
+ cs_dsp_read_fw_status(dsp, ARRAY_SIZE(offs), offs);
+
+ cs_dsp_dbg(dsp, "FW SCRATCH 0:0x%x 1:0x%x 2:0x%x 3:0x%x\n",
+ offs[0], offs[1], offs[2], offs[3]);
+}
+
+static void cs_dsp_adsp2v2_show_fw_status(struct cs_dsp *dsp)
+{
+ unsigned int offs[] = { ADSP2V2_SCRATCH0_1, ADSP2V2_SCRATCH2_3 };
+
+ cs_dsp_read_fw_status(dsp, ARRAY_SIZE(offs), offs);
+
+ cs_dsp_dbg(dsp, "FW SCRATCH 0:0x%x 1:0x%x 2:0x%x 3:0x%x\n",
+ offs[0] & 0xFFFF, offs[0] >> 16,
+ offs[1] & 0xFFFF, offs[1] >> 16);
+}
+
+static void cs_dsp_halo_show_fw_status(struct cs_dsp *dsp)
+{
+ unsigned int offs[] = {
+ HALO_SCRATCH1, HALO_SCRATCH2, HALO_SCRATCH3, HALO_SCRATCH4,
+ };
+
+ cs_dsp_read_fw_status(dsp, ARRAY_SIZE(offs), offs);
+
+ cs_dsp_dbg(dsp, "FW SCRATCH 0:0x%x 1:0x%x 2:0x%x 3:0x%x\n",
+ offs[0], offs[1], offs[2], offs[3]);
+}
+
+static int cs_dsp_coeff_base_reg(struct cs_dsp_coeff_ctl *ctl, unsigned int *reg)
+{
+ const struct cs_dsp_alg_region *alg_region = &ctl->alg_region;
+ struct cs_dsp *dsp = ctl->dsp;
+ const struct cs_dsp_region *mem;
+
+ mem = cs_dsp_find_region(dsp, alg_region->type);
+ if (!mem) {
+ cs_dsp_err(dsp, "No base for region %x\n",
+ alg_region->type);
+ return -EINVAL;
+ }
+
+ *reg = dsp->ops->region_to_reg(mem, ctl->alg_region.base + ctl->offset);
+
+ return 0;
+}
+
+/**
+ * cs_dsp_coeff_write_acked_control() - Sends event_id to the acked control
+ * @ctl: pointer to acked coefficient control
+ * @event_id: the value to write to the given acked control
+ *
+ * Once the value has been written to the control the function shall block
+ * until the running firmware acknowledges the write or timeout is exceeded.
+ *
+ * Must be called with pwr_lock held.
+ *
+ * Return: Zero for success, a negative number on error.
+ */
+int cs_dsp_coeff_write_acked_control(struct cs_dsp_coeff_ctl *ctl, unsigned int event_id)
+{
+ struct cs_dsp *dsp = ctl->dsp;
+ __be32 val = cpu_to_be32(event_id);
+ unsigned int reg;
+ int i, ret;
+
+ if (!dsp->running)
+ return -EPERM;
+
+ ret = cs_dsp_coeff_base_reg(ctl, &reg);
+ if (ret)
+ return ret;
+
+ cs_dsp_dbg(dsp, "Sending 0x%x to acked control alg 0x%x %s:0x%x\n",
+ event_id, ctl->alg_region.alg,
+ cs_dsp_mem_region_name(ctl->alg_region.type), ctl->offset);
+
+ ret = regmap_raw_write(dsp->regmap, reg, &val, sizeof(val));
+ if (ret) {
+ cs_dsp_err(dsp, "Failed to write %x: %d\n", reg, ret);
+ return ret;
+ }
+
+ /*
+ * Poll for ack, we initially poll at ~1ms intervals for firmwares
+ * that respond quickly, then go to ~10ms polls. A firmware is unlikely
+ * to ack instantly so we do the first 1ms delay before reading the
+ * control to avoid a pointless bus transaction
+ */
+ for (i = 0; i < CS_DSP_ACKED_CTL_TIMEOUT_MS;) {
+ switch (i) {
+ case 0 ... CS_DSP_ACKED_CTL_N_QUICKPOLLS - 1:
+ usleep_range(1000, 2000);
+ i++;
+ break;
+ default:
+ usleep_range(10000, 20000);
+ i += 10;
+ break;
+ }
+
+ ret = regmap_raw_read(dsp->regmap, reg, &val, sizeof(val));
+ if (ret) {
+ cs_dsp_err(dsp, "Failed to read %x: %d\n", reg, ret);
+ return ret;
+ }
+
+ if (val == 0) {
+ cs_dsp_dbg(dsp, "Acked control ACKED at poll %u\n", i);
+ return 0;
+ }
+ }
+
+ cs_dsp_warn(dsp, "Acked control @0x%x alg:0x%x %s:0x%x timed out\n",
+ reg, ctl->alg_region.alg,
+ cs_dsp_mem_region_name(ctl->alg_region.type),
+ ctl->offset);
+
+ return -ETIMEDOUT;
+}
+EXPORT_SYMBOL_GPL(cs_dsp_coeff_write_acked_control);
+
+static int cs_dsp_coeff_write_ctrl_raw(struct cs_dsp_coeff_ctl *ctl,
+ const void *buf, size_t len)
+{
+ struct cs_dsp *dsp = ctl->dsp;
+ void *scratch;
+ int ret;
+ unsigned int reg;
+
+ ret = cs_dsp_coeff_base_reg(ctl, &reg);
+ if (ret)
+ return ret;
+
+ scratch = kmemdup(buf, len, GFP_KERNEL | GFP_DMA);
+ if (!scratch)
+ return -ENOMEM;
+
+ ret = regmap_raw_write(dsp->regmap, reg, scratch,
+ len);
+ if (ret) {
+ cs_dsp_err(dsp, "Failed to write %zu bytes to %x: %d\n",
+ len, reg, ret);
+ kfree(scratch);
+ return ret;
+ }
+ cs_dsp_dbg(dsp, "Wrote %zu bytes to %x\n", len, reg);
+
+ kfree(scratch);
+
+ return 0;
+}
+
+/**
+ * cs_dsp_coeff_write_ctrl() - Writes the given buffer to the given coefficient control
+ * @ctl: pointer to coefficient control
+ * @buf: the buffer to write to the given control
+ * @len: the length of the buffer
+ *
+ * Must be called with pwr_lock held.
+ *
+ * Return: Zero for success, a negative number on error.
+ */
+int cs_dsp_coeff_write_ctrl(struct cs_dsp_coeff_ctl *ctl, const void *buf, size_t len)
+{
+ int ret = 0;
+
+ if (ctl->flags & WMFW_CTL_FLAG_VOLATILE)
+ ret = -EPERM;
+ else if (buf != ctl->cache)
+ memcpy(ctl->cache, buf, len);
+
+ ctl->set = 1;
+ if (ctl->enabled && ctl->dsp->running)
+ ret = cs_dsp_coeff_write_ctrl_raw(ctl, buf, len);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cs_dsp_coeff_write_ctrl);
+
+static int cs_dsp_coeff_read_ctrl_raw(struct cs_dsp_coeff_ctl *ctl, void *buf, size_t len)
+{
+ struct cs_dsp *dsp = ctl->dsp;
+ void *scratch;
+ int ret;
+ unsigned int reg;
+
+ ret = cs_dsp_coeff_base_reg(ctl, &reg);
+ if (ret)
+ return ret;
+
+ scratch = kmalloc(len, GFP_KERNEL | GFP_DMA);
+ if (!scratch)
+ return -ENOMEM;
+
+ ret = regmap_raw_read(dsp->regmap, reg, scratch, len);
+ if (ret) {
+ cs_dsp_err(dsp, "Failed to read %zu bytes from %x: %d\n",
+ len, reg, ret);
+ kfree(scratch);
+ return ret;
+ }
+ cs_dsp_dbg(dsp, "Read %zu bytes from %x\n", len, reg);
+
+ memcpy(buf, scratch, len);
+ kfree(scratch);
+
+ return 0;
+}
+
+/**
+ * cs_dsp_coeff_read_ctrl() - Reads the given coefficient control into the given buffer
+ * @ctl: pointer to coefficient control
+ * @buf: the buffer to store to the given control
+ * @len: the length of the buffer
+ *
+ * Must be called with pwr_lock held.
+ *
+ * Return: Zero for success, a negative number on error.
+ */
+int cs_dsp_coeff_read_ctrl(struct cs_dsp_coeff_ctl *ctl, void *buf, size_t len)
+{
+ int ret = 0;
+
+ if (ctl->flags & WMFW_CTL_FLAG_VOLATILE) {
+ if (ctl->enabled && ctl->dsp->running)
+ return cs_dsp_coeff_read_ctrl_raw(ctl, buf, len);
+ else
+ return -EPERM;
+ } else {
+ if (!ctl->flags && ctl->enabled && ctl->dsp->running)
+ ret = cs_dsp_coeff_read_ctrl_raw(ctl, ctl->cache, ctl->len);
+
+ if (buf != ctl->cache)
+ memcpy(buf, ctl->cache, len);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cs_dsp_coeff_read_ctrl);
+
+static int cs_dsp_coeff_init_control_caches(struct cs_dsp *dsp)
+{
+ struct cs_dsp_coeff_ctl *ctl;
+ int ret;
+
+ list_for_each_entry(ctl, &dsp->ctl_list, list) {
+ if (!ctl->enabled || ctl->set)
+ continue;
+ if (ctl->flags & WMFW_CTL_FLAG_VOLATILE)
+ continue;
+
+ /*
+ * For readable controls populate the cache from the DSP memory.
+ * For non-readable controls the cache was zero-filled when
+ * created so we don't need to do anything.
+ */
+ if (!ctl->flags || (ctl->flags & WMFW_CTL_FLAG_READABLE)) {
+ ret = cs_dsp_coeff_read_ctrl_raw(ctl, ctl->cache, ctl->len);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int cs_dsp_coeff_sync_controls(struct cs_dsp *dsp)
+{
+ struct cs_dsp_coeff_ctl *ctl;
+ int ret;
+
+ list_for_each_entry(ctl, &dsp->ctl_list, list) {
+ if (!ctl->enabled)
+ continue;
+ if (ctl->set && !(ctl->flags & WMFW_CTL_FLAG_VOLATILE)) {
+ ret = cs_dsp_coeff_write_ctrl_raw(ctl, ctl->cache,
+ ctl->len);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void cs_dsp_signal_event_controls(struct cs_dsp *dsp,
+ unsigned int event)
+{
+ struct cs_dsp_coeff_ctl *ctl;
+ int ret;
+
+ list_for_each_entry(ctl, &dsp->ctl_list, list) {
+ if (ctl->type != WMFW_CTL_TYPE_HOSTEVENT)
+ continue;
+
+ if (!ctl->enabled)
+ continue;
+
+ ret = cs_dsp_coeff_write_acked_control(ctl, event);
+ if (ret)
+ cs_dsp_warn(dsp,
+ "Failed to send 0x%x event to alg 0x%x (%d)\n",
+ event, ctl->alg_region.alg, ret);
+ }
+}
+
+static void cs_dsp_free_ctl_blk(struct cs_dsp_coeff_ctl *ctl)
+{
+ kfree(ctl->cache);
+ kfree(ctl->subname);
+ kfree(ctl);
+}
+
+static int cs_dsp_create_control(struct cs_dsp *dsp,
+ const struct cs_dsp_alg_region *alg_region,
+ unsigned int offset, unsigned int len,
+ const char *subname, unsigned int subname_len,
+ unsigned int flags, unsigned int type)
+{
+ struct cs_dsp_coeff_ctl *ctl;
+ int ret;
+
+ list_for_each_entry(ctl, &dsp->ctl_list, list) {
+ if (ctl->fw_name == dsp->fw_name &&
+ ctl->alg_region.alg == alg_region->alg &&
+ ctl->alg_region.type == alg_region->type) {
+ if ((!subname && !ctl->subname) ||
+ (subname && !strncmp(ctl->subname, subname, ctl->subname_len))) {
+ if (!ctl->enabled)
+ ctl->enabled = 1;
+ return 0;
+ }
+ }
+ }
+
+ ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
+ if (!ctl)
+ return -ENOMEM;
+
+ ctl->fw_name = dsp->fw_name;
+ ctl->alg_region = *alg_region;
+ if (subname && dsp->fw_ver >= 2) {
+ ctl->subname_len = subname_len;
+ ctl->subname = kmemdup(subname,
+ strlen(subname) + 1, GFP_KERNEL);
+ if (!ctl->subname) {
+ ret = -ENOMEM;
+ goto err_ctl;
+ }
+ }
+ ctl->enabled = 1;
+ ctl->set = 0;
+ ctl->dsp = dsp;
+
+ ctl->flags = flags;
+ ctl->type = type;
+ ctl->offset = offset;
+ ctl->len = len;
+ ctl->cache = kzalloc(ctl->len, GFP_KERNEL);
+ if (!ctl->cache) {
+ ret = -ENOMEM;
+ goto err_ctl_subname;
+ }
+
+ list_add(&ctl->list, &dsp->ctl_list);
+
+ if (dsp->client_ops->control_add) {
+ ret = dsp->client_ops->control_add(ctl);
+ if (ret)
+ goto err_list_del;
+ }
+
+ return 0;
+
+err_list_del:
+ list_del(&ctl->list);
+ kfree(ctl->cache);
+err_ctl_subname:
+ kfree(ctl->subname);
+err_ctl:
+ kfree(ctl);
+
+ return ret;
+}
+
+struct cs_dsp_coeff_parsed_alg {
+ int id;
+ const u8 *name;
+ int name_len;
+ int ncoeff;
+};
+
+struct cs_dsp_coeff_parsed_coeff {
+ int offset;
+ int mem_type;
+ const u8 *name;
+ int name_len;
+ unsigned int ctl_type;
+ int flags;
+ int len;
+};
+
+static int cs_dsp_coeff_parse_string(int bytes, const u8 **pos, const u8 **str)
+{
+ int length;
+
+ switch (bytes) {
+ case 1:
+ length = **pos;
+ break;
+ case 2:
+ length = le16_to_cpu(*((__le16 *)*pos));
+ break;
+ default:
+ return 0;
+ }
+
+ if (str)
+ *str = *pos + bytes;
+
+ *pos += ((length + bytes) + 3) & ~0x03;
+
+ return length;
+}
+
+static int cs_dsp_coeff_parse_int(int bytes, const u8 **pos)
+{
+ int val = 0;
+
+ switch (bytes) {
+ case 2:
+ val = le16_to_cpu(*((__le16 *)*pos));
+ break;
+ case 4:
+ val = le32_to_cpu(*((__le32 *)*pos));
+ break;
+ default:
+ break;
+ }
+
+ *pos += bytes;
+
+ return val;
+}
+
+static inline void cs_dsp_coeff_parse_alg(struct cs_dsp *dsp, const u8 **data,
+ struct cs_dsp_coeff_parsed_alg *blk)
+{
+ const struct wmfw_adsp_alg_data *raw;
+
+ switch (dsp->fw_ver) {
+ case 0:
+ case 1:
+ raw = (const struct wmfw_adsp_alg_data *)*data;
+ *data = raw->data;
+
+ blk->id = le32_to_cpu(raw->id);
+ blk->name = raw->name;
+ blk->name_len = strlen(raw->name);
+ blk->ncoeff = le32_to_cpu(raw->ncoeff);
+ break;
+ default:
+ blk->id = cs_dsp_coeff_parse_int(sizeof(raw->id), data);
+ blk->name_len = cs_dsp_coeff_parse_string(sizeof(u8), data,
+ &blk->name);
+ cs_dsp_coeff_parse_string(sizeof(u16), data, NULL);
+ blk->ncoeff = cs_dsp_coeff_parse_int(sizeof(raw->ncoeff), data);
+ break;
+ }
+
+ cs_dsp_dbg(dsp, "Algorithm ID: %#x\n", blk->id);
+ cs_dsp_dbg(dsp, "Algorithm name: %.*s\n", blk->name_len, blk->name);
+ cs_dsp_dbg(dsp, "# of coefficient descriptors: %#x\n", blk->ncoeff);
+}
+
+static inline void cs_dsp_coeff_parse_coeff(struct cs_dsp *dsp, const u8 **data,
+ struct cs_dsp_coeff_parsed_coeff *blk)
+{
+ const struct wmfw_adsp_coeff_data *raw;
+ const u8 *tmp;
+ int length;
+
+ switch (dsp->fw_ver) {
+ case 0:
+ case 1:
+ raw = (const struct wmfw_adsp_coeff_data *)*data;
+ *data = *data + sizeof(raw->hdr) + le32_to_cpu(raw->hdr.size);
+
+ blk->offset = le16_to_cpu(raw->hdr.offset);
+ blk->mem_type = le16_to_cpu(raw->hdr.type);
+ blk->name = raw->name;
+ blk->name_len = strlen(raw->name);
+ blk->ctl_type = le16_to_cpu(raw->ctl_type);
+ blk->flags = le16_to_cpu(raw->flags);
+ blk->len = le32_to_cpu(raw->len);
+ break;
+ default:
+ tmp = *data;
+ blk->offset = cs_dsp_coeff_parse_int(sizeof(raw->hdr.offset), &tmp);
+ blk->mem_type = cs_dsp_coeff_parse_int(sizeof(raw->hdr.type), &tmp);
+ length = cs_dsp_coeff_parse_int(sizeof(raw->hdr.size), &tmp);
+ blk->name_len = cs_dsp_coeff_parse_string(sizeof(u8), &tmp,
+ &blk->name);
+ cs_dsp_coeff_parse_string(sizeof(u8), &tmp, NULL);
+ cs_dsp_coeff_parse_string(sizeof(u16), &tmp, NULL);
+ blk->ctl_type = cs_dsp_coeff_parse_int(sizeof(raw->ctl_type), &tmp);
+ blk->flags = cs_dsp_coeff_parse_int(sizeof(raw->flags), &tmp);
+ blk->len = cs_dsp_coeff_parse_int(sizeof(raw->len), &tmp);
+
+ *data = *data + sizeof(raw->hdr) + length;
+ break;
+ }
+
+ cs_dsp_dbg(dsp, "\tCoefficient type: %#x\n", blk->mem_type);
+ cs_dsp_dbg(dsp, "\tCoefficient offset: %#x\n", blk->offset);
+ cs_dsp_dbg(dsp, "\tCoefficient name: %.*s\n", blk->name_len, blk->name);
+ cs_dsp_dbg(dsp, "\tCoefficient flags: %#x\n", blk->flags);
+ cs_dsp_dbg(dsp, "\tALSA control type: %#x\n", blk->ctl_type);
+ cs_dsp_dbg(dsp, "\tALSA control len: %#x\n", blk->len);
+}
+
+static int cs_dsp_check_coeff_flags(struct cs_dsp *dsp,
+ const struct cs_dsp_coeff_parsed_coeff *coeff_blk,
+ unsigned int f_required,
+ unsigned int f_illegal)
+{
+ if ((coeff_blk->flags & f_illegal) ||
+ ((coeff_blk->flags & f_required) != f_required)) {
+ cs_dsp_err(dsp, "Illegal flags 0x%x for control type 0x%x\n",
+ coeff_blk->flags, coeff_blk->ctl_type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cs_dsp_parse_coeff(struct cs_dsp *dsp,
+ const struct wmfw_region *region)
+{
+ struct cs_dsp_alg_region alg_region = {};
+ struct cs_dsp_coeff_parsed_alg alg_blk;
+ struct cs_dsp_coeff_parsed_coeff coeff_blk;
+ const u8 *data = region->data;
+ int i, ret;
+
+ cs_dsp_coeff_parse_alg(dsp, &data, &alg_blk);
+ for (i = 0; i < alg_blk.ncoeff; i++) {
+ cs_dsp_coeff_parse_coeff(dsp, &data, &coeff_blk);
+
+ switch (coeff_blk.ctl_type) {
+ case WMFW_CTL_TYPE_BYTES:
+ break;
+ case WMFW_CTL_TYPE_ACKED:
+ if (coeff_blk.flags & WMFW_CTL_FLAG_SYS)
+ continue; /* ignore */
+
+ ret = cs_dsp_check_coeff_flags(dsp, &coeff_blk,
+ WMFW_CTL_FLAG_VOLATILE |
+ WMFW_CTL_FLAG_WRITEABLE |
+ WMFW_CTL_FLAG_READABLE,
+ 0);
+ if (ret)
+ return -EINVAL;
+ break;
+ case WMFW_CTL_TYPE_HOSTEVENT:
+ ret = cs_dsp_check_coeff_flags(dsp, &coeff_blk,
+ WMFW_CTL_FLAG_SYS |
+ WMFW_CTL_FLAG_VOLATILE |
+ WMFW_CTL_FLAG_WRITEABLE |
+ WMFW_CTL_FLAG_READABLE,
+ 0);
+ if (ret)
+ return -EINVAL;
+ break;
+ case WMFW_CTL_TYPE_HOST_BUFFER:
+ ret = cs_dsp_check_coeff_flags(dsp, &coeff_blk,
+ WMFW_CTL_FLAG_SYS |
+ WMFW_CTL_FLAG_VOLATILE |
+ WMFW_CTL_FLAG_READABLE,
+ 0);
+ if (ret)
+ return -EINVAL;
+ break;
+ default:
+ cs_dsp_err(dsp, "Unknown control type: %d\n",
+ coeff_blk.ctl_type);
+ return -EINVAL;
+ }
+
+ alg_region.type = coeff_blk.mem_type;
+ alg_region.alg = alg_blk.id;
+
+ ret = cs_dsp_create_control(dsp, &alg_region,
+ coeff_blk.offset,
+ coeff_blk.len,
+ coeff_blk.name,
+ coeff_blk.name_len,
+ coeff_blk.flags,
+ coeff_blk.ctl_type);
+ if (ret < 0)
+ cs_dsp_err(dsp, "Failed to create control: %.*s, %d\n",
+ coeff_blk.name_len, coeff_blk.name, ret);
+ }
+
+ return 0;
+}
+
+static unsigned int cs_dsp_adsp1_parse_sizes(struct cs_dsp *dsp,
+ const char * const file,
+ unsigned int pos,
+ const struct firmware *firmware)
+{
+ const struct wmfw_adsp1_sizes *adsp1_sizes;
+
+ adsp1_sizes = (void *)&firmware->data[pos];
+
+ cs_dsp_dbg(dsp, "%s: %d DM, %d PM, %d ZM\n", file,
+ le32_to_cpu(adsp1_sizes->dm), le32_to_cpu(adsp1_sizes->pm),
+ le32_to_cpu(adsp1_sizes->zm));
+
+ return pos + sizeof(*adsp1_sizes);
+}
+
+static unsigned int cs_dsp_adsp2_parse_sizes(struct cs_dsp *dsp,
+ const char * const file,
+ unsigned int pos,
+ const struct firmware *firmware)
+{
+ const struct wmfw_adsp2_sizes *adsp2_sizes;
+
+ adsp2_sizes = (void *)&firmware->data[pos];
+
+ cs_dsp_dbg(dsp, "%s: %d XM, %d YM %d PM, %d ZM\n", file,
+ le32_to_cpu(adsp2_sizes->xm), le32_to_cpu(adsp2_sizes->ym),
+ le32_to_cpu(adsp2_sizes->pm), le32_to_cpu(adsp2_sizes->zm));
+
+ return pos + sizeof(*adsp2_sizes);
+}
+
+static bool cs_dsp_validate_version(struct cs_dsp *dsp, unsigned int version)
+{
+ switch (version) {
+ case 0:
+ cs_dsp_warn(dsp, "Deprecated file format %d\n", version);
+ return true;
+ case 1:
+ case 2:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool cs_dsp_halo_validate_version(struct cs_dsp *dsp, unsigned int version)
+{
+ switch (version) {
+ case 3:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware,
+ const char *file)
+{
+ LIST_HEAD(buf_list);
+ struct regmap *regmap = dsp->regmap;
+ unsigned int pos = 0;
+ const struct wmfw_header *header;
+ const struct wmfw_adsp1_sizes *adsp1_sizes;
+ const struct wmfw_footer *footer;
+ const struct wmfw_region *region;
+ const struct cs_dsp_region *mem;
+ const char *region_name;
+ char *text = NULL;
+ struct cs_dsp_buf *buf;
+ unsigned int reg;
+ int regions = 0;
+ int ret, offset, type;
+
+ ret = -EINVAL;
+
+ pos = sizeof(*header) + sizeof(*adsp1_sizes) + sizeof(*footer);
+ if (pos >= firmware->size) {
+ cs_dsp_err(dsp, "%s: file too short, %zu bytes\n",
+ file, firmware->size);
+ goto out_fw;
+ }
+
+ header = (void *)&firmware->data[0];
+
+ if (memcmp(&header->magic[0], "WMFW", 4) != 0) {
+ cs_dsp_err(dsp, "%s: invalid magic\n", file);
+ goto out_fw;
+ }
+
+ if (!dsp->ops->validate_version(dsp, header->ver)) {
+ cs_dsp_err(dsp, "%s: unknown file format %d\n",
+ file, header->ver);
+ goto out_fw;
+ }
+
+ cs_dsp_info(dsp, "Firmware version: %d\n", header->ver);
+ dsp->fw_ver = header->ver;
+
+ if (header->core != dsp->type) {
+ cs_dsp_err(dsp, "%s: invalid core %d != %d\n",
+ file, header->core, dsp->type);
+ goto out_fw;
+ }
+
+ pos = sizeof(*header);
+ pos = dsp->ops->parse_sizes(dsp, file, pos, firmware);
+
+ footer = (void *)&firmware->data[pos];
+ pos += sizeof(*footer);
+
+ if (le32_to_cpu(header->len) != pos) {
+ cs_dsp_err(dsp, "%s: unexpected header length %d\n",
+ file, le32_to_cpu(header->len));
+ goto out_fw;
+ }
+
+ cs_dsp_dbg(dsp, "%s: timestamp %llu\n", file,
+ le64_to_cpu(footer->timestamp));
+
+ while (pos < firmware->size &&
+ sizeof(*region) < firmware->size - pos) {
+ region = (void *)&(firmware->data[pos]);
+ region_name = "Unknown";
+ reg = 0;
+ text = NULL;
+ offset = le32_to_cpu(region->offset) & 0xffffff;
+ type = be32_to_cpu(region->type) & 0xff;
+
+ switch (type) {
+ case WMFW_NAME_TEXT:
+ region_name = "Firmware name";
+ text = kzalloc(le32_to_cpu(region->len) + 1,
+ GFP_KERNEL);
+ break;
+ case WMFW_ALGORITHM_DATA:
+ region_name = "Algorithm";
+ ret = cs_dsp_parse_coeff(dsp, region);
+ if (ret != 0)
+ goto out_fw;
+ break;
+ case WMFW_INFO_TEXT:
+ region_name = "Information";
+ text = kzalloc(le32_to_cpu(region->len) + 1,
+ GFP_KERNEL);
+ break;
+ case WMFW_ABSOLUTE:
+ region_name = "Absolute";
+ reg = offset;
+ break;
+ case WMFW_ADSP1_PM:
+ case WMFW_ADSP1_DM:
+ case WMFW_ADSP2_XM:
+ case WMFW_ADSP2_YM:
+ case WMFW_ADSP1_ZM:
+ case WMFW_HALO_PM_PACKED:
+ case WMFW_HALO_XM_PACKED:
+ case WMFW_HALO_YM_PACKED:
+ mem = cs_dsp_find_region(dsp, type);
+ if (!mem) {
+ cs_dsp_err(dsp, "No region of type: %x\n", type);
+ ret = -EINVAL;
+ goto out_fw;
+ }
+
+ region_name = cs_dsp_mem_region_name(type);
+ reg = dsp->ops->region_to_reg(mem, offset);
+ break;
+ default:
+ cs_dsp_warn(dsp,
+ "%s.%d: Unknown region type %x at %d(%x)\n",
+ file, regions, type, pos, pos);
+ break;
+ }
+
+ cs_dsp_dbg(dsp, "%s.%d: %d bytes at %d in %s\n", file,
+ regions, le32_to_cpu(region->len), offset,
+ region_name);
+
+ if (le32_to_cpu(region->len) >
+ firmware->size - pos - sizeof(*region)) {
+ cs_dsp_err(dsp,
+ "%s.%d: %s region len %d bytes exceeds file length %zu\n",
+ file, regions, region_name,
+ le32_to_cpu(region->len), firmware->size);
+ ret = -EINVAL;
+ goto out_fw;
+ }
+
+ if (text) {
+ memcpy(text, region->data, le32_to_cpu(region->len));
+ cs_dsp_info(dsp, "%s: %s\n", file, text);
+ kfree(text);
+ text = NULL;
+ }
+
+ if (reg) {
+ buf = cs_dsp_buf_alloc(region->data,
+ le32_to_cpu(region->len),
+ &buf_list);
+ if (!buf) {
+ cs_dsp_err(dsp, "Out of memory\n");
+ ret = -ENOMEM;
+ goto out_fw;
+ }
+
+ ret = regmap_raw_write_async(regmap, reg, buf->buf,
+ le32_to_cpu(region->len));
+ if (ret != 0) {
+ cs_dsp_err(dsp,
+ "%s.%d: Failed to write %d bytes at %d in %s: %d\n",
+ file, regions,
+ le32_to_cpu(region->len), offset,
+ region_name, ret);
+ goto out_fw;
+ }
+ }
+
+ pos += le32_to_cpu(region->len) + sizeof(*region);
+ regions++;
+ }
+
+ ret = regmap_async_complete(regmap);
+ if (ret != 0) {
+ cs_dsp_err(dsp, "Failed to complete async write: %d\n", ret);
+ goto out_fw;
+ }
+
+ if (pos > firmware->size)
+ cs_dsp_warn(dsp, "%s.%d: %zu bytes at end of file\n",
+ file, regions, pos - firmware->size);
+
+ cs_dsp_debugfs_save_wmfwname(dsp, file);
+
+out_fw:
+ regmap_async_complete(regmap);
+ cs_dsp_buf_free(&buf_list);
+ kfree(text);
+
+ return ret;
+}
+
+/**
+ * cs_dsp_get_ctl() - Finds a matching coefficient control
+ * @dsp: pointer to DSP structure
+ * @name: pointer to string to match with a control's subname
+ * @type: the algorithm type to match
+ * @alg: the algorithm id to match
+ *
+ * Find cs_dsp_coeff_ctl with input name as its subname
+ *
+ * Return: pointer to the control on success, NULL if not found
+ */
+struct cs_dsp_coeff_ctl *cs_dsp_get_ctl(struct cs_dsp *dsp, const char *name, int type,
+ unsigned int alg)
+{
+ struct cs_dsp_coeff_ctl *pos, *rslt = NULL;
+
+ list_for_each_entry(pos, &dsp->ctl_list, list) {
+ if (!pos->subname)
+ continue;
+ if (strncmp(pos->subname, name, pos->subname_len) == 0 &&
+ pos->fw_name == dsp->fw_name &&
+ pos->alg_region.alg == alg &&
+ pos->alg_region.type == type) {
+ rslt = pos;
+ break;
+ }
+ }
+
+ return rslt;
+}
+EXPORT_SYMBOL_GPL(cs_dsp_get_ctl);
+
+static void cs_dsp_ctl_fixup_base(struct cs_dsp *dsp,
+ const struct cs_dsp_alg_region *alg_region)
+{
+ struct cs_dsp_coeff_ctl *ctl;
+
+ list_for_each_entry(ctl, &dsp->ctl_list, list) {
+ if (ctl->fw_name == dsp->fw_name &&
+ alg_region->alg == ctl->alg_region.alg &&
+ alg_region->type == ctl->alg_region.type) {
+ ctl->alg_region.base = alg_region->base;
+ }
+ }
+}
+
+static void *cs_dsp_read_algs(struct cs_dsp *dsp, size_t n_algs,
+ const struct cs_dsp_region *mem,
+ unsigned int pos, unsigned int len)
+{
+ void *alg;
+ unsigned int reg;
+ int ret;
+ __be32 val;
+
+ if (n_algs == 0) {
+ cs_dsp_err(dsp, "No algorithms\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (n_algs > 1024) {
+ cs_dsp_err(dsp, "Algorithm count %zx excessive\n", n_algs);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Read the terminator first to validate the length */
+ reg = dsp->ops->region_to_reg(mem, pos + len);
+
+ ret = regmap_raw_read(dsp->regmap, reg, &val, sizeof(val));
+ if (ret != 0) {
+ cs_dsp_err(dsp, "Failed to read algorithm list end: %d\n",
+ ret);
+ return ERR_PTR(ret);
+ }
+
+ if (be32_to_cpu(val) != 0xbedead)
+ cs_dsp_warn(dsp, "Algorithm list end %x 0x%x != 0xbedead\n",
+ reg, be32_to_cpu(val));
+
+ /* Convert length from DSP words to bytes */
+ len *= sizeof(u32);
+
+ alg = kzalloc(len, GFP_KERNEL | GFP_DMA);
+ if (!alg)
+ return ERR_PTR(-ENOMEM);
+
+ reg = dsp->ops->region_to_reg(mem, pos);
+
+ ret = regmap_raw_read(dsp->regmap, reg, alg, len);
+ if (ret != 0) {
+ cs_dsp_err(dsp, "Failed to read algorithm list: %d\n", ret);
+ kfree(alg);
+ return ERR_PTR(ret);
+ }
+
+ return alg;
+}
+
+/**
+ * cs_dsp_find_alg_region() - Finds a matching algorithm region
+ * @dsp: pointer to DSP structure
+ * @type: the algorithm type to match
+ * @id: the algorithm id to match
+ *
+ * Return: Pointer to matching algorithm region, or NULL if not found.
+ */
+struct cs_dsp_alg_region *cs_dsp_find_alg_region(struct cs_dsp *dsp,
+ int type, unsigned int id)
+{
+ struct cs_dsp_alg_region *alg_region;
+
+ list_for_each_entry(alg_region, &dsp->alg_regions, list) {
+ if (id == alg_region->alg && type == alg_region->type)
+ return alg_region;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(cs_dsp_find_alg_region);
+
+static struct cs_dsp_alg_region *cs_dsp_create_region(struct cs_dsp *dsp,
+ int type, __be32 id,
+ __be32 base)
+{
+ struct cs_dsp_alg_region *alg_region;
+
+ alg_region = kzalloc(sizeof(*alg_region), GFP_KERNEL);
+ if (!alg_region)
+ return ERR_PTR(-ENOMEM);
+
+ alg_region->type = type;
+ alg_region->alg = be32_to_cpu(id);
+ alg_region->base = be32_to_cpu(base);
+
+ list_add_tail(&alg_region->list, &dsp->alg_regions);
+
+ if (dsp->fw_ver > 0)
+ cs_dsp_ctl_fixup_base(dsp, alg_region);
+
+ return alg_region;
+}
+
+static void cs_dsp_free_alg_regions(struct cs_dsp *dsp)
+{
+ struct cs_dsp_alg_region *alg_region;
+
+ while (!list_empty(&dsp->alg_regions)) {
+ alg_region = list_first_entry(&dsp->alg_regions,
+ struct cs_dsp_alg_region,
+ list);
+ list_del(&alg_region->list);
+ kfree(alg_region);
+ }
+}
+
+static void cs_dsp_parse_wmfw_id_header(struct cs_dsp *dsp,
+ struct wmfw_id_hdr *fw, int nalgs)
+{
+ dsp->fw_id = be32_to_cpu(fw->id);
+ dsp->fw_id_version = be32_to_cpu(fw->ver);
+
+ cs_dsp_info(dsp, "Firmware: %x v%d.%d.%d, %d algorithms\n",
+ dsp->fw_id, (dsp->fw_id_version & 0xff0000) >> 16,
+ (dsp->fw_id_version & 0xff00) >> 8, dsp->fw_id_version & 0xff,
+ nalgs);
+}
+
+static void cs_dsp_parse_wmfw_v3_id_header(struct cs_dsp *dsp,
+ struct wmfw_v3_id_hdr *fw, int nalgs)
+{
+ dsp->fw_id = be32_to_cpu(fw->id);
+ dsp->fw_id_version = be32_to_cpu(fw->ver);
+ dsp->fw_vendor_id = be32_to_cpu(fw->vendor_id);
+
+ cs_dsp_info(dsp, "Firmware: %x vendor: 0x%x v%d.%d.%d, %d algorithms\n",
+ dsp->fw_id, dsp->fw_vendor_id,
+ (dsp->fw_id_version & 0xff0000) >> 16,
+ (dsp->fw_id_version & 0xff00) >> 8, dsp->fw_id_version & 0xff,
+ nalgs);
+}
+
+static int cs_dsp_create_regions(struct cs_dsp *dsp, __be32 id, int nregions,
+ const int *type, __be32 *base)
+{
+ struct cs_dsp_alg_region *alg_region;
+ int i;
+
+ for (i = 0; i < nregions; i++) {
+ alg_region = cs_dsp_create_region(dsp, type[i], id, base[i]);
+ if (IS_ERR(alg_region))
+ return PTR_ERR(alg_region);
+ }
+
+ return 0;
+}
+
+static int cs_dsp_adsp1_setup_algs(struct cs_dsp *dsp)
+{
+ struct wmfw_adsp1_id_hdr adsp1_id;
+ struct wmfw_adsp1_alg_hdr *adsp1_alg;
+ struct cs_dsp_alg_region *alg_region;
+ const struct cs_dsp_region *mem;
+ unsigned int pos, len;
+ size_t n_algs;
+ int i, ret;
+
+ mem = cs_dsp_find_region(dsp, WMFW_ADSP1_DM);
+ if (WARN_ON(!mem))
+ return -EINVAL;
+
+ ret = regmap_raw_read(dsp->regmap, mem->base, &adsp1_id,
+ sizeof(adsp1_id));
+ if (ret != 0) {
+ cs_dsp_err(dsp, "Failed to read algorithm info: %d\n",
+ ret);
+ return ret;
+ }
+
+ n_algs = be32_to_cpu(adsp1_id.n_algs);
+
+ cs_dsp_parse_wmfw_id_header(dsp, &adsp1_id.fw, n_algs);
+
+ alg_region = cs_dsp_create_region(dsp, WMFW_ADSP1_ZM,
+ adsp1_id.fw.id, adsp1_id.zm);
+ if (IS_ERR(alg_region))
+ return PTR_ERR(alg_region);
+
+ alg_region = cs_dsp_create_region(dsp, WMFW_ADSP1_DM,
+ adsp1_id.fw.id, adsp1_id.dm);
+ if (IS_ERR(alg_region))
+ return PTR_ERR(alg_region);
+
+ /* Calculate offset and length in DSP words */
+ pos = sizeof(adsp1_id) / sizeof(u32);
+ len = (sizeof(*adsp1_alg) * n_algs) / sizeof(u32);
+
+ adsp1_alg = cs_dsp_read_algs(dsp, n_algs, mem, pos, len);
+ if (IS_ERR(adsp1_alg))
+ return PTR_ERR(adsp1_alg);
+
+ for (i = 0; i < n_algs; i++) {
+ cs_dsp_info(dsp, "%d: ID %x v%d.%d.%d DM@%x ZM@%x\n",
+ i, be32_to_cpu(adsp1_alg[i].alg.id),
+ (be32_to_cpu(adsp1_alg[i].alg.ver) & 0xff0000) >> 16,
+ (be32_to_cpu(adsp1_alg[i].alg.ver) & 0xff00) >> 8,
+ be32_to_cpu(adsp1_alg[i].alg.ver) & 0xff,
+ be32_to_cpu(adsp1_alg[i].dm),
+ be32_to_cpu(adsp1_alg[i].zm));
+
+ alg_region = cs_dsp_create_region(dsp, WMFW_ADSP1_DM,
+ adsp1_alg[i].alg.id,
+ adsp1_alg[i].dm);
+ if (IS_ERR(alg_region)) {
+ ret = PTR_ERR(alg_region);
+ goto out;
+ }
+ if (dsp->fw_ver == 0) {
+ if (i + 1 < n_algs) {
+ len = be32_to_cpu(adsp1_alg[i + 1].dm);
+ len -= be32_to_cpu(adsp1_alg[i].dm);
+ len *= 4;
+ cs_dsp_create_control(dsp, alg_region, 0,
+ len, NULL, 0, 0,
+ WMFW_CTL_TYPE_BYTES);
+ } else {
+ cs_dsp_warn(dsp, "Missing length info for region DM with ID %x\n",
+ be32_to_cpu(adsp1_alg[i].alg.id));
+ }
+ }
+
+ alg_region = cs_dsp_create_region(dsp, WMFW_ADSP1_ZM,
+ adsp1_alg[i].alg.id,
+ adsp1_alg[i].zm);
+ if (IS_ERR(alg_region)) {
+ ret = PTR_ERR(alg_region);
+ goto out;
+ }
+ if (dsp->fw_ver == 0) {
+ if (i + 1 < n_algs) {
+ len = be32_to_cpu(adsp1_alg[i + 1].zm);
+ len -= be32_to_cpu(adsp1_alg[i].zm);
+ len *= 4;
+ cs_dsp_create_control(dsp, alg_region, 0,
+ len, NULL, 0, 0,
+ WMFW_CTL_TYPE_BYTES);
+ } else {
+ cs_dsp_warn(dsp, "Missing length info for region ZM with ID %x\n",
+ be32_to_cpu(adsp1_alg[i].alg.id));
+ }
+ }
+ }
+
+out:
+ kfree(adsp1_alg);
+ return ret;
+}
+
+static int cs_dsp_adsp2_setup_algs(struct cs_dsp *dsp)
+{
+ struct wmfw_adsp2_id_hdr adsp2_id;
+ struct wmfw_adsp2_alg_hdr *adsp2_alg;
+ struct cs_dsp_alg_region *alg_region;
+ const struct cs_dsp_region *mem;
+ unsigned int pos, len;
+ size_t n_algs;
+ int i, ret;
+
+ mem = cs_dsp_find_region(dsp, WMFW_ADSP2_XM);
+ if (WARN_ON(!mem))
+ return -EINVAL;
+
+ ret = regmap_raw_read(dsp->regmap, mem->base, &adsp2_id,
+ sizeof(adsp2_id));
+ if (ret != 0) {
+ cs_dsp_err(dsp, "Failed to read algorithm info: %d\n",
+ ret);
+ return ret;
+ }
+
+ n_algs = be32_to_cpu(adsp2_id.n_algs);
+
+ cs_dsp_parse_wmfw_id_header(dsp, &adsp2_id.fw, n_algs);
+
+ alg_region = cs_dsp_create_region(dsp, WMFW_ADSP2_XM,
+ adsp2_id.fw.id, adsp2_id.xm);
+ if (IS_ERR(alg_region))
+ return PTR_ERR(alg_region);
+
+ alg_region = cs_dsp_create_region(dsp, WMFW_ADSP2_YM,
+ adsp2_id.fw.id, adsp2_id.ym);
+ if (IS_ERR(alg_region))
+ return PTR_ERR(alg_region);
+
+ alg_region = cs_dsp_create_region(dsp, WMFW_ADSP2_ZM,
+ adsp2_id.fw.id, adsp2_id.zm);
+ if (IS_ERR(alg_region))
+ return PTR_ERR(alg_region);
+
+ /* Calculate offset and length in DSP words */
+ pos = sizeof(adsp2_id) / sizeof(u32);
+ len = (sizeof(*adsp2_alg) * n_algs) / sizeof(u32);
+
+ adsp2_alg = cs_dsp_read_algs(dsp, n_algs, mem, pos, len);
+ if (IS_ERR(adsp2_alg))
+ return PTR_ERR(adsp2_alg);
+
+ for (i = 0; i < n_algs; i++) {
+ cs_dsp_info(dsp,
+ "%d: ID %x v%d.%d.%d XM@%x YM@%x ZM@%x\n",
+ i, be32_to_cpu(adsp2_alg[i].alg.id),
+ (be32_to_cpu(adsp2_alg[i].alg.ver) & 0xff0000) >> 16,
+ (be32_to_cpu(adsp2_alg[i].alg.ver) & 0xff00) >> 8,
+ be32_to_cpu(adsp2_alg[i].alg.ver) & 0xff,
+ be32_to_cpu(adsp2_alg[i].xm),
+ be32_to_cpu(adsp2_alg[i].ym),
+ be32_to_cpu(adsp2_alg[i].zm));
+
+ alg_region = cs_dsp_create_region(dsp, WMFW_ADSP2_XM,
+ adsp2_alg[i].alg.id,
+ adsp2_alg[i].xm);
+ if (IS_ERR(alg_region)) {
+ ret = PTR_ERR(alg_region);
+ goto out;
+ }
+ if (dsp->fw_ver == 0) {
+ if (i + 1 < n_algs) {
+ len = be32_to_cpu(adsp2_alg[i + 1].xm);
+ len -= be32_to_cpu(adsp2_alg[i].xm);
+ len *= 4;
+ cs_dsp_create_control(dsp, alg_region, 0,
+ len, NULL, 0, 0,
+ WMFW_CTL_TYPE_BYTES);
+ } else {
+ cs_dsp_warn(dsp, "Missing length info for region XM with ID %x\n",
+ be32_to_cpu(adsp2_alg[i].alg.id));
+ }
+ }
+
+ alg_region = cs_dsp_create_region(dsp, WMFW_ADSP2_YM,
+ adsp2_alg[i].alg.id,
+ adsp2_alg[i].ym);
+ if (IS_ERR(alg_region)) {
+ ret = PTR_ERR(alg_region);
+ goto out;
+ }
+ if (dsp->fw_ver == 0) {
+ if (i + 1 < n_algs) {
+ len = be32_to_cpu(adsp2_alg[i + 1].ym);
+ len -= be32_to_cpu(adsp2_alg[i].ym);
+ len *= 4;
+ cs_dsp_create_control(dsp, alg_region, 0,
+ len, NULL, 0, 0,
+ WMFW_CTL_TYPE_BYTES);
+ } else {
+ cs_dsp_warn(dsp, "Missing length info for region YM with ID %x\n",
+ be32_to_cpu(adsp2_alg[i].alg.id));
+ }
+ }
+
+ alg_region = cs_dsp_create_region(dsp, WMFW_ADSP2_ZM,
+ adsp2_alg[i].alg.id,
+ adsp2_alg[i].zm);
+ if (IS_ERR(alg_region)) {
+ ret = PTR_ERR(alg_region);
+ goto out;
+ }
+ if (dsp->fw_ver == 0) {
+ if (i + 1 < n_algs) {
+ len = be32_to_cpu(adsp2_alg[i + 1].zm);
+ len -= be32_to_cpu(adsp2_alg[i].zm);
+ len *= 4;
+ cs_dsp_create_control(dsp, alg_region, 0,
+ len, NULL, 0, 0,
+ WMFW_CTL_TYPE_BYTES);
+ } else {
+ cs_dsp_warn(dsp, "Missing length info for region ZM with ID %x\n",
+ be32_to_cpu(adsp2_alg[i].alg.id));
+ }
+ }
+ }
+
+out:
+ kfree(adsp2_alg);
+ return ret;
+}
+
+static int cs_dsp_halo_create_regions(struct cs_dsp *dsp, __be32 id,
+ __be32 xm_base, __be32 ym_base)
+{
+ static const int types[] = {
+ WMFW_ADSP2_XM, WMFW_HALO_XM_PACKED,
+ WMFW_ADSP2_YM, WMFW_HALO_YM_PACKED
+ };
+ __be32 bases[] = { xm_base, xm_base, ym_base, ym_base };
+
+ return cs_dsp_create_regions(dsp, id, ARRAY_SIZE(types), types, bases);
+}
+
+static int cs_dsp_halo_setup_algs(struct cs_dsp *dsp)
+{
+ struct wmfw_halo_id_hdr halo_id;
+ struct wmfw_halo_alg_hdr *halo_alg;
+ const struct cs_dsp_region *mem;
+ unsigned int pos, len;
+ size_t n_algs;
+ int i, ret;
+
+ mem = cs_dsp_find_region(dsp, WMFW_ADSP2_XM);
+ if (WARN_ON(!mem))
+ return -EINVAL;
+
+ ret = regmap_raw_read(dsp->regmap, mem->base, &halo_id,
+ sizeof(halo_id));
+ if (ret != 0) {
+ cs_dsp_err(dsp, "Failed to read algorithm info: %d\n",
+ ret);
+ return ret;
+ }
+
+ n_algs = be32_to_cpu(halo_id.n_algs);
+
+ cs_dsp_parse_wmfw_v3_id_header(dsp, &halo_id.fw, n_algs);
+
+ ret = cs_dsp_halo_create_regions(dsp, halo_id.fw.id,
+ halo_id.xm_base, halo_id.ym_base);
+ if (ret)
+ return ret;
+
+ /* Calculate offset and length in DSP words */
+ pos = sizeof(halo_id) / sizeof(u32);
+ len = (sizeof(*halo_alg) * n_algs) / sizeof(u32);
+
+ halo_alg = cs_dsp_read_algs(dsp, n_algs, mem, pos, len);
+ if (IS_ERR(halo_alg))
+ return PTR_ERR(halo_alg);
+
+ for (i = 0; i < n_algs; i++) {
+ cs_dsp_info(dsp,
+ "%d: ID %x v%d.%d.%d XM@%x YM@%x\n",
+ i, be32_to_cpu(halo_alg[i].alg.id),
+ (be32_to_cpu(halo_alg[i].alg.ver) & 0xff0000) >> 16,
+ (be32_to_cpu(halo_alg[i].alg.ver) & 0xff00) >> 8,
+ be32_to_cpu(halo_alg[i].alg.ver) & 0xff,
+ be32_to_cpu(halo_alg[i].xm_base),
+ be32_to_cpu(halo_alg[i].ym_base));
+
+ ret = cs_dsp_halo_create_regions(dsp, halo_alg[i].alg.id,
+ halo_alg[i].xm_base,
+ halo_alg[i].ym_base);
+ if (ret)
+ goto out;
+ }
+
+out:
+ kfree(halo_alg);
+ return ret;
+}
+
+static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware,
+ const char *file)
+{
+ LIST_HEAD(buf_list);
+ struct regmap *regmap = dsp->regmap;
+ struct wmfw_coeff_hdr *hdr;
+ struct wmfw_coeff_item *blk;
+ const struct cs_dsp_region *mem;
+ struct cs_dsp_alg_region *alg_region;
+ const char *region_name;
+ int ret, pos, blocks, type, offset, reg;
+ struct cs_dsp_buf *buf;
+
+ if (!firmware)
+ return 0;
+
+ ret = -EINVAL;
+
+ if (sizeof(*hdr) >= firmware->size) {
+ cs_dsp_err(dsp, "%s: coefficient file too short, %zu bytes\n",
+ file, firmware->size);
+ goto out_fw;
+ }
+
+ hdr = (void *)&firmware->data[0];
+ if (memcmp(hdr->magic, "WMDR", 4) != 0) {
+ cs_dsp_err(dsp, "%s: invalid coefficient magic\n", file);
+ goto out_fw;
+ }
+
+ switch (be32_to_cpu(hdr->rev) & 0xff) {
+ case 1:
+ break;
+ default:
+ cs_dsp_err(dsp, "%s: Unsupported coefficient file format %d\n",
+ file, be32_to_cpu(hdr->rev) & 0xff);
+ ret = -EINVAL;
+ goto out_fw;
+ }
+
+ cs_dsp_dbg(dsp, "%s: v%d.%d.%d\n", file,
+ (le32_to_cpu(hdr->ver) >> 16) & 0xff,
+ (le32_to_cpu(hdr->ver) >> 8) & 0xff,
+ le32_to_cpu(hdr->ver) & 0xff);
+
+ pos = le32_to_cpu(hdr->len);
+
+ blocks = 0;
+ while (pos < firmware->size &&
+ sizeof(*blk) < firmware->size - pos) {
+ blk = (void *)(&firmware->data[pos]);
+
+ type = le16_to_cpu(blk->type);
+ offset = le16_to_cpu(blk->offset);
+
+ cs_dsp_dbg(dsp, "%s.%d: %x v%d.%d.%d\n",
+ file, blocks, le32_to_cpu(blk->id),
+ (le32_to_cpu(blk->ver) >> 16) & 0xff,
+ (le32_to_cpu(blk->ver) >> 8) & 0xff,
+ le32_to_cpu(blk->ver) & 0xff);
+ cs_dsp_dbg(dsp, "%s.%d: %d bytes at 0x%x in %x\n",
+ file, blocks, le32_to_cpu(blk->len), offset, type);
+
+ reg = 0;
+ region_name = "Unknown";
+ switch (type) {
+ case (WMFW_NAME_TEXT << 8):
+ case (WMFW_INFO_TEXT << 8):
+ case (WMFW_METADATA << 8):
+ break;
+ case (WMFW_ABSOLUTE << 8):
+ /*
+ * Old files may use this for global
+ * coefficients.
+ */
+ if (le32_to_cpu(blk->id) == dsp->fw_id &&
+ offset == 0) {
+ region_name = "global coefficients";
+ mem = cs_dsp_find_region(dsp, type);
+ if (!mem) {
+ cs_dsp_err(dsp, "No ZM\n");
+ break;
+ }
+ reg = dsp->ops->region_to_reg(mem, 0);
+
+ } else {
+ region_name = "register";
+ reg = offset;
+ }
+ break;
+
+ case WMFW_ADSP1_DM:
+ case WMFW_ADSP1_ZM:
+ case WMFW_ADSP2_XM:
+ case WMFW_ADSP2_YM:
+ case WMFW_HALO_XM_PACKED:
+ case WMFW_HALO_YM_PACKED:
+ case WMFW_HALO_PM_PACKED:
+ cs_dsp_dbg(dsp, "%s.%d: %d bytes in %x for %x\n",
+ file, blocks, le32_to_cpu(blk->len),
+ type, le32_to_cpu(blk->id));
+
+ mem = cs_dsp_find_region(dsp, type);
+ if (!mem) {
+ cs_dsp_err(dsp, "No base for region %x\n", type);
+ break;
+ }
+
+ alg_region = cs_dsp_find_alg_region(dsp, type,
+ le32_to_cpu(blk->id));
+ if (alg_region) {
+ reg = alg_region->base;
+ reg = dsp->ops->region_to_reg(mem, reg);
+ reg += offset;
+ } else {
+ cs_dsp_err(dsp, "No %x for algorithm %x\n",
+ type, le32_to_cpu(blk->id));
+ }
+ break;
+
+ default:
+ cs_dsp_err(dsp, "%s.%d: Unknown region type %x at %d\n",
+ file, blocks, type, pos);
+ break;
+ }
+
+ if (reg) {
+ if (le32_to_cpu(blk->len) >
+ firmware->size - pos - sizeof(*blk)) {
+ cs_dsp_err(dsp,
+ "%s.%d: %s region len %d bytes exceeds file length %zu\n",
+ file, blocks, region_name,
+ le32_to_cpu(blk->len),
+ firmware->size);
+ ret = -EINVAL;
+ goto out_fw;
+ }
+
+ buf = cs_dsp_buf_alloc(blk->data,
+ le32_to_cpu(blk->len),
+ &buf_list);
+ if (!buf) {
+ cs_dsp_err(dsp, "Out of memory\n");
+ ret = -ENOMEM;
+ goto out_fw;
+ }
+
+ cs_dsp_dbg(dsp, "%s.%d: Writing %d bytes at %x\n",
+ file, blocks, le32_to_cpu(blk->len),
+ reg);
+ ret = regmap_raw_write_async(regmap, reg, buf->buf,
+ le32_to_cpu(blk->len));
+ if (ret != 0) {
+ cs_dsp_err(dsp,
+ "%s.%d: Failed to write to %x in %s: %d\n",
+ file, blocks, reg, region_name, ret);
+ }
+ }
+
+ pos += (le32_to_cpu(blk->len) + sizeof(*blk) + 3) & ~0x03;
+ blocks++;
+ }
+
+ ret = regmap_async_complete(regmap);
+ if (ret != 0)
+ cs_dsp_err(dsp, "Failed to complete async write: %d\n", ret);
+
+ if (pos > firmware->size)
+ cs_dsp_warn(dsp, "%s.%d: %zu bytes at end of file\n",
+ file, blocks, pos - firmware->size);
+
+ cs_dsp_debugfs_save_binname(dsp, file);
+
+out_fw:
+ regmap_async_complete(regmap);
+ cs_dsp_buf_free(&buf_list);
+ return ret;
+}
+
+static int cs_dsp_create_name(struct cs_dsp *dsp)
+{
+ if (!dsp->name) {
+ dsp->name = devm_kasprintf(dsp->dev, GFP_KERNEL, "DSP%d",
+ dsp->num);
+ if (!dsp->name)
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int cs_dsp_common_init(struct cs_dsp *dsp)
+{
+ int ret;
+
+ ret = cs_dsp_create_name(dsp);
+ if (ret)
+ return ret;
+
+ INIT_LIST_HEAD(&dsp->alg_regions);
+ INIT_LIST_HEAD(&dsp->ctl_list);
+
+ mutex_init(&dsp->pwr_lock);
+
+ return 0;
+}
+
+/**
+ * cs_dsp_adsp1_init() - Initialise a cs_dsp structure representing a ADSP1 device
+ * @dsp: pointer to DSP structure
+ *
+ * Return: Zero for success, a negative number on error.
+ */
+int cs_dsp_adsp1_init(struct cs_dsp *dsp)
+{
+ dsp->ops = &cs_dsp_adsp1_ops;
+
+ return cs_dsp_common_init(dsp);
+}
+EXPORT_SYMBOL_GPL(cs_dsp_adsp1_init);
+
+/**
+ * cs_dsp_adsp1_power_up() - Load and start the named firmware
+ * @dsp: pointer to DSP structure
+ * @wmfw_firmware: the firmware to be sent
+ * @wmfw_filename: file name of firmware to be sent
+ * @coeff_firmware: the coefficient data to be sent
+ * @coeff_filename: file name of coefficient to data be sent
+ * @fw_name: the user-friendly firmware name
+ *
+ * Return: Zero for success, a negative number on error.
+ */
+int cs_dsp_adsp1_power_up(struct cs_dsp *dsp,
+ const struct firmware *wmfw_firmware, char *wmfw_filename,
+ const struct firmware *coeff_firmware, char *coeff_filename,
+ const char *fw_name)
+{
+ unsigned int val;
+ int ret;
+
+ mutex_lock(&dsp->pwr_lock);
+
+ dsp->fw_name = fw_name;
+
+ regmap_update_bits(dsp->regmap, dsp->base + ADSP1_CONTROL_30,
+ ADSP1_SYS_ENA, ADSP1_SYS_ENA);
+
+ /*
+ * For simplicity set the DSP clock rate to be the
+ * SYSCLK rate rather than making it configurable.
+ */
+ if (dsp->sysclk_reg) {
+ ret = regmap_read(dsp->regmap, dsp->sysclk_reg, &val);
+ if (ret != 0) {
+ cs_dsp_err(dsp, "Failed to read SYSCLK state: %d\n", ret);
+ goto err_mutex;
+ }
+
+ val = (val & dsp->sysclk_mask) >> dsp->sysclk_shift;
+
+ ret = regmap_update_bits(dsp->regmap,
+ dsp->base + ADSP1_CONTROL_31,
+ ADSP1_CLK_SEL_MASK, val);
+ if (ret != 0) {
+ cs_dsp_err(dsp, "Failed to set clock rate: %d\n", ret);
+ goto err_mutex;
+ }
+ }
+
+ ret = cs_dsp_load(dsp, wmfw_firmware, wmfw_filename);
+ if (ret != 0)
+ goto err_ena;
+
+ ret = cs_dsp_adsp1_setup_algs(dsp);
+ if (ret != 0)
+ goto err_ena;
+
+ ret = cs_dsp_load_coeff(dsp, coeff_firmware, coeff_filename);
+ if (ret != 0)
+ goto err_ena;
+
+ /* Initialize caches for enabled and unset controls */
+ ret = cs_dsp_coeff_init_control_caches(dsp);
+ if (ret != 0)
+ goto err_ena;
+
+ /* Sync set controls */
+ ret = cs_dsp_coeff_sync_controls(dsp);
+ if (ret != 0)
+ goto err_ena;
+
+ dsp->booted = true;
+
+ /* Start the core running */
+ regmap_update_bits(dsp->regmap, dsp->base + ADSP1_CONTROL_30,
+ ADSP1_CORE_ENA | ADSP1_START,
+ ADSP1_CORE_ENA | ADSP1_START);
+
+ dsp->running = true;
+
+ mutex_unlock(&dsp->pwr_lock);
+
+ return 0;
+
+err_ena:
+ regmap_update_bits(dsp->regmap, dsp->base + ADSP1_CONTROL_30,
+ ADSP1_SYS_ENA, 0);
+err_mutex:
+ mutex_unlock(&dsp->pwr_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cs_dsp_adsp1_power_up);
+
+/**
+ * cs_dsp_adsp1_power_down() - Halts the DSP
+ * @dsp: pointer to DSP structure
+ */
+void cs_dsp_adsp1_power_down(struct cs_dsp *dsp)
+{
+ struct cs_dsp_coeff_ctl *ctl;
+
+ mutex_lock(&dsp->pwr_lock);
+
+ dsp->running = false;
+ dsp->booted = false;
+
+ /* Halt the core */
+ regmap_update_bits(dsp->regmap, dsp->base + ADSP1_CONTROL_30,
+ ADSP1_CORE_ENA | ADSP1_START, 0);
+
+ regmap_update_bits(dsp->regmap, dsp->base + ADSP1_CONTROL_19,
+ ADSP1_WDMA_BUFFER_LENGTH_MASK, 0);
+
+ regmap_update_bits(dsp->regmap, dsp->base + ADSP1_CONTROL_30,
+ ADSP1_SYS_ENA, 0);
+
+ list_for_each_entry(ctl, &dsp->ctl_list, list)
+ ctl->enabled = 0;
+
+ cs_dsp_free_alg_regions(dsp);
+
+ mutex_unlock(&dsp->pwr_lock);
+}
+EXPORT_SYMBOL_GPL(cs_dsp_adsp1_power_down);
+
+static int cs_dsp_adsp2v2_enable_core(struct cs_dsp *dsp)
+{
+ unsigned int val;
+ int ret, count;
+
+ /* Wait for the RAM to start, should be near instantaneous */
+ for (count = 0; count < 10; ++count) {
+ ret = regmap_read(dsp->regmap, dsp->base + ADSP2_STATUS1, &val);
+ if (ret != 0)
+ return ret;
+
+ if (val & ADSP2_RAM_RDY)
+ break;
+
+ usleep_range(250, 500);
+ }
+
+ if (!(val & ADSP2_RAM_RDY)) {
+ cs_dsp_err(dsp, "Failed to start DSP RAM\n");
+ return -EBUSY;
+ }
+
+ cs_dsp_dbg(dsp, "RAM ready after %d polls\n", count);
+
+ return 0;
+}
+
+static int cs_dsp_adsp2_enable_core(struct cs_dsp *dsp)
+{
+ int ret;
+
+ ret = regmap_update_bits_async(dsp->regmap, dsp->base + ADSP2_CONTROL,
+ ADSP2_SYS_ENA, ADSP2_SYS_ENA);
+ if (ret != 0)
+ return ret;
+
+ return cs_dsp_adsp2v2_enable_core(dsp);
+}
+
+static int cs_dsp_adsp2_lock(struct cs_dsp *dsp, unsigned int lock_regions)
+{
+ struct regmap *regmap = dsp->regmap;
+ unsigned int code0, code1, lock_reg;
+
+ if (!(lock_regions & CS_ADSP2_REGION_ALL))
+ return 0;
+
+ lock_regions &= CS_ADSP2_REGION_ALL;
+ lock_reg = dsp->base + ADSP2_LOCK_REGION_1_LOCK_REGION_0;
+
+ while (lock_regions) {
+ code0 = code1 = 0;
+ if (lock_regions & BIT(0)) {
+ code0 = ADSP2_LOCK_CODE_0;
+ code1 = ADSP2_LOCK_CODE_1;
+ }
+ if (lock_regions & BIT(1)) {
+ code0 |= ADSP2_LOCK_CODE_0 << ADSP2_LOCK_REGION_SHIFT;
+ code1 |= ADSP2_LOCK_CODE_1 << ADSP2_LOCK_REGION_SHIFT;
+ }
+ regmap_write(regmap, lock_reg, code0);
+ regmap_write(regmap, lock_reg, code1);
+ lock_regions >>= 2;
+ lock_reg += 2;
+ }
+
+ return 0;
+}
+
+static int cs_dsp_adsp2_enable_memory(struct cs_dsp *dsp)
+{
+ return regmap_update_bits(dsp->regmap, dsp->base + ADSP2_CONTROL,
+ ADSP2_MEM_ENA, ADSP2_MEM_ENA);
+}
+
+static void cs_dsp_adsp2_disable_memory(struct cs_dsp *dsp)
+{
+ regmap_update_bits(dsp->regmap, dsp->base + ADSP2_CONTROL,
+ ADSP2_MEM_ENA, 0);
+}
+
+static void cs_dsp_adsp2_disable_core(struct cs_dsp *dsp)
+{
+ regmap_write(dsp->regmap, dsp->base + ADSP2_RDMA_CONFIG_1, 0);
+ regmap_write(dsp->regmap, dsp->base + ADSP2_WDMA_CONFIG_1, 0);
+ regmap_write(dsp->regmap, dsp->base + ADSP2_WDMA_CONFIG_2, 0);
+
+ regmap_update_bits(dsp->regmap, dsp->base + ADSP2_CONTROL,
+ ADSP2_SYS_ENA, 0);
+}
+
+static void cs_dsp_adsp2v2_disable_core(struct cs_dsp *dsp)
+{
+ regmap_write(dsp->regmap, dsp->base + ADSP2_RDMA_CONFIG_1, 0);
+ regmap_write(dsp->regmap, dsp->base + ADSP2_WDMA_CONFIG_1, 0);
+ regmap_write(dsp->regmap, dsp->base + ADSP2V2_WDMA_CONFIG_2, 0);
+}
+
+static int cs_dsp_halo_configure_mpu(struct cs_dsp *dsp, unsigned int lock_regions)
+{
+ struct reg_sequence config[] = {
+ { dsp->base + HALO_MPU_LOCK_CONFIG, 0x5555 },
+ { dsp->base + HALO_MPU_LOCK_CONFIG, 0xAAAA },
+ { dsp->base + HALO_MPU_XMEM_ACCESS_0, 0xFFFFFFFF },
+ { dsp->base + HALO_MPU_YMEM_ACCESS_0, 0xFFFFFFFF },
+ { dsp->base + HALO_MPU_WINDOW_ACCESS_0, lock_regions },
+ { dsp->base + HALO_MPU_XREG_ACCESS_0, lock_regions },
+ { dsp->base + HALO_MPU_YREG_ACCESS_0, lock_regions },
+ { dsp->base + HALO_MPU_XMEM_ACCESS_1, 0xFFFFFFFF },
+ { dsp->base + HALO_MPU_YMEM_ACCESS_1, 0xFFFFFFFF },
+ { dsp->base + HALO_MPU_WINDOW_ACCESS_1, lock_regions },
+ { dsp->base + HALO_MPU_XREG_ACCESS_1, lock_regions },
+ { dsp->base + HALO_MPU_YREG_ACCESS_1, lock_regions },
+ { dsp->base + HALO_MPU_XMEM_ACCESS_2, 0xFFFFFFFF },
+ { dsp->base + HALO_MPU_YMEM_ACCESS_2, 0xFFFFFFFF },
+ { dsp->base + HALO_MPU_WINDOW_ACCESS_2, lock_regions },
+ { dsp->base + HALO_MPU_XREG_ACCESS_2, lock_regions },
+ { dsp->base + HALO_MPU_YREG_ACCESS_2, lock_regions },
+ { dsp->base + HALO_MPU_XMEM_ACCESS_3, 0xFFFFFFFF },
+ { dsp->base + HALO_MPU_YMEM_ACCESS_3, 0xFFFFFFFF },
+ { dsp->base + HALO_MPU_WINDOW_ACCESS_3, lock_regions },
+ { dsp->base + HALO_MPU_XREG_ACCESS_3, lock_regions },
+ { dsp->base + HALO_MPU_YREG_ACCESS_3, lock_regions },
+ { dsp->base + HALO_MPU_LOCK_CONFIG, 0 },
+ };
+
+ return regmap_multi_reg_write(dsp->regmap, config, ARRAY_SIZE(config));
+}
+
+/**
+ * cs_dsp_set_dspclk() - Applies the given frequency to the given cs_dsp
+ * @dsp: pointer to DSP structure
+ * @freq: clock rate to set
+ *
+ * This is only for use on ADSP2 cores.
+ *
+ * Return: Zero for success, a negative number on error.
+ */
+int cs_dsp_set_dspclk(struct cs_dsp *dsp, unsigned int freq)
+{
+ int ret;
+
+ ret = regmap_update_bits(dsp->regmap, dsp->base + ADSP2_CLOCKING,
+ ADSP2_CLK_SEL_MASK,
+ freq << ADSP2_CLK_SEL_SHIFT);
+ if (ret)
+ cs_dsp_err(dsp, "Failed to set clock rate: %d\n", ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cs_dsp_set_dspclk);
+
+static void cs_dsp_stop_watchdog(struct cs_dsp *dsp)
+{
+ regmap_update_bits(dsp->regmap, dsp->base + ADSP2_WATCHDOG,
+ ADSP2_WDT_ENA_MASK, 0);
+}
+
+static void cs_dsp_halo_stop_watchdog(struct cs_dsp *dsp)
+{
+ regmap_update_bits(dsp->regmap, dsp->base + HALO_WDT_CONTROL,
+ HALO_WDT_EN_MASK, 0);
+}
+
+/**
+ * cs_dsp_power_up() - Downloads firmware to the DSP
+ * @dsp: pointer to DSP structure
+ * @wmfw_firmware: the firmware to be sent
+ * @wmfw_filename: file name of firmware to be sent
+ * @coeff_firmware: the coefficient data to be sent
+ * @coeff_filename: file name of coefficient to data be sent
+ * @fw_name: the user-friendly firmware name
+ *
+ * This function is used on ADSP2 and Halo DSP cores, it powers-up the DSP core
+ * and downloads the firmware but does not start the firmware running. The
+ * cs_dsp booted flag will be set once completed and if the core has a low-power
+ * memory retention mode it will be put into this state after the firmware is
+ * downloaded.
+ *
+ * Return: Zero for success, a negative number on error.
+ */
+int cs_dsp_power_up(struct cs_dsp *dsp,
+ const struct firmware *wmfw_firmware, char *wmfw_filename,
+ const struct firmware *coeff_firmware, char *coeff_filename,
+ const char *fw_name)
+{
+ int ret;
+
+ mutex_lock(&dsp->pwr_lock);
+
+ dsp->fw_name = fw_name;
+
+ if (dsp->ops->enable_memory) {
+ ret = dsp->ops->enable_memory(dsp);
+ if (ret != 0)
+ goto err_mutex;
+ }
+
+ if (dsp->ops->enable_core) {
+ ret = dsp->ops->enable_core(dsp);
+ if (ret != 0)
+ goto err_mem;
+ }
+
+ ret = cs_dsp_load(dsp, wmfw_firmware, wmfw_filename);
+ if (ret != 0)
+ goto err_ena;
+
+ ret = dsp->ops->setup_algs(dsp);
+ if (ret != 0)
+ goto err_ena;
+
+ ret = cs_dsp_load_coeff(dsp, coeff_firmware, coeff_filename);
+ if (ret != 0)
+ goto err_ena;
+
+ /* Initialize caches for enabled and unset controls */
+ ret = cs_dsp_coeff_init_control_caches(dsp);
+ if (ret != 0)
+ goto err_ena;
+
+ if (dsp->ops->disable_core)
+ dsp->ops->disable_core(dsp);
+
+ dsp->booted = true;
+
+ mutex_unlock(&dsp->pwr_lock);
+
+ return 0;
+err_ena:
+ if (dsp->ops->disable_core)
+ dsp->ops->disable_core(dsp);
+err_mem:
+ if (dsp->ops->disable_memory)
+ dsp->ops->disable_memory(dsp);
+err_mutex:
+ mutex_unlock(&dsp->pwr_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cs_dsp_power_up);
+
+/**
+ * cs_dsp_power_down() - Powers-down the DSP
+ * @dsp: pointer to DSP structure
+ *
+ * cs_dsp_stop() must have been called before this function. The core will be
+ * fully powered down and so the memory will not be retained.
+ */
+void cs_dsp_power_down(struct cs_dsp *dsp)
+{
+ struct cs_dsp_coeff_ctl *ctl;
+
+ mutex_lock(&dsp->pwr_lock);
+
+ cs_dsp_debugfs_clear(dsp);
+
+ dsp->fw_id = 0;
+ dsp->fw_id_version = 0;
+
+ dsp->booted = false;
+
+ if (dsp->ops->disable_memory)
+ dsp->ops->disable_memory(dsp);
+
+ list_for_each_entry(ctl, &dsp->ctl_list, list)
+ ctl->enabled = 0;
+
+ cs_dsp_free_alg_regions(dsp);
+
+ mutex_unlock(&dsp->pwr_lock);
+
+ cs_dsp_dbg(dsp, "Shutdown complete\n");
+}
+EXPORT_SYMBOL_GPL(cs_dsp_power_down);
+
+static int cs_dsp_adsp2_start_core(struct cs_dsp *dsp)
+{
+ return regmap_update_bits(dsp->regmap, dsp->base + ADSP2_CONTROL,
+ ADSP2_CORE_ENA | ADSP2_START,
+ ADSP2_CORE_ENA | ADSP2_START);
+}
+
+static void cs_dsp_adsp2_stop_core(struct cs_dsp *dsp)
+{
+ regmap_update_bits(dsp->regmap, dsp->base + ADSP2_CONTROL,
+ ADSP2_CORE_ENA | ADSP2_START, 0);
+}
+
+/**
+ * cs_dsp_run() - Starts the firmware running
+ * @dsp: pointer to DSP structure
+ *
+ * cs_dsp_power_up() must have previously been called successfully.
+ *
+ * Return: Zero for success, a negative number on error.
+ */
+int cs_dsp_run(struct cs_dsp *dsp)
+{
+ int ret;
+
+ mutex_lock(&dsp->pwr_lock);
+
+ if (!dsp->booted) {
+ ret = -EIO;
+ goto err;
+ }
+
+ if (dsp->ops->enable_core) {
+ ret = dsp->ops->enable_core(dsp);
+ if (ret != 0)
+ goto err;
+ }
+
+ /* Sync set controls */
+ ret = cs_dsp_coeff_sync_controls(dsp);
+ if (ret != 0)
+ goto err;
+
+ if (dsp->ops->lock_memory) {
+ ret = dsp->ops->lock_memory(dsp, dsp->lock_regions);
+ if (ret != 0) {
+ cs_dsp_err(dsp, "Error configuring MPU: %d\n", ret);
+ goto err;
+ }
+ }
+
+ if (dsp->ops->start_core) {
+ ret = dsp->ops->start_core(dsp);
+ if (ret != 0)
+ goto err;
+ }
+
+ dsp->running = true;
+
+ if (dsp->client_ops->post_run) {
+ ret = dsp->client_ops->post_run(dsp);
+ if (ret)
+ goto err;
+ }
+
+ mutex_unlock(&dsp->pwr_lock);
+
+ return 0;
+
+err:
+ if (dsp->ops->stop_core)
+ dsp->ops->stop_core(dsp);
+ if (dsp->ops->disable_core)
+ dsp->ops->disable_core(dsp);
+ mutex_unlock(&dsp->pwr_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cs_dsp_run);
+
+/**
+ * cs_dsp_stop() - Stops the firmware
+ * @dsp: pointer to DSP structure
+ *
+ * Memory will not be disabled so firmware will remain loaded.
+ */
+void cs_dsp_stop(struct cs_dsp *dsp)
+{
+ /* Tell the firmware to cleanup */
+ cs_dsp_signal_event_controls(dsp, CS_DSP_FW_EVENT_SHUTDOWN);
+
+ if (dsp->ops->stop_watchdog)
+ dsp->ops->stop_watchdog(dsp);
+
+ /* Log firmware state, it can be useful for analysis */
+ if (dsp->ops->show_fw_status)
+ dsp->ops->show_fw_status(dsp);
+
+ mutex_lock(&dsp->pwr_lock);
+
+ dsp->running = false;
+
+ if (dsp->ops->stop_core)
+ dsp->ops->stop_core(dsp);
+ if (dsp->ops->disable_core)
+ dsp->ops->disable_core(dsp);
+
+ if (dsp->client_ops->post_stop)
+ dsp->client_ops->post_stop(dsp);
+
+ mutex_unlock(&dsp->pwr_lock);
+
+ cs_dsp_dbg(dsp, "Execution stopped\n");
+}
+EXPORT_SYMBOL_GPL(cs_dsp_stop);
+
+static int cs_dsp_halo_start_core(struct cs_dsp *dsp)
+{
+ return regmap_update_bits(dsp->regmap,
+ dsp->base + HALO_CCM_CORE_CONTROL,
+ HALO_CORE_RESET | HALO_CORE_EN,
+ HALO_CORE_RESET | HALO_CORE_EN);
+}
+
+static void cs_dsp_halo_stop_core(struct cs_dsp *dsp)
+{
+ regmap_update_bits(dsp->regmap, dsp->base + HALO_CCM_CORE_CONTROL,
+ HALO_CORE_EN, 0);
+
+ /* reset halo core with CORE_SOFT_RESET */
+ regmap_update_bits(dsp->regmap, dsp->base + HALO_CORE_SOFT_RESET,
+ HALO_CORE_SOFT_RESET_MASK, 1);
+}
+
+/**
+ * cs_dsp_adsp2_init() - Initialise a cs_dsp structure representing a ADSP2 core
+ * @dsp: pointer to DSP structure
+ *
+ * Return: Zero for success, a negative number on error.
+ */
+int cs_dsp_adsp2_init(struct cs_dsp *dsp)
+{
+ int ret;
+
+ switch (dsp->rev) {
+ case 0:
+ /*
+ * Disable the DSP memory by default when in reset for a small
+ * power saving.
+ */
+ ret = regmap_update_bits(dsp->regmap, dsp->base + ADSP2_CONTROL,
+ ADSP2_MEM_ENA, 0);
+ if (ret) {
+ cs_dsp_err(dsp,
+ "Failed to clear memory retention: %d\n", ret);
+ return ret;
+ }
+
+ dsp->ops = &cs_dsp_adsp2_ops[0];
+ break;
+ case 1:
+ dsp->ops = &cs_dsp_adsp2_ops[1];
+ break;
+ default:
+ dsp->ops = &cs_dsp_adsp2_ops[2];
+ break;
+ }
+
+ return cs_dsp_common_init(dsp);
+}
+EXPORT_SYMBOL_GPL(cs_dsp_adsp2_init);
+
+/**
+ * cs_dsp_halo_init() - Initialise a cs_dsp structure representing a HALO Core DSP
+ * @dsp: pointer to DSP structure
+ *
+ * Return: Zero for success, a negative number on error.
+ */
+int cs_dsp_halo_init(struct cs_dsp *dsp)
+{
+ dsp->ops = &cs_dsp_halo_ops;
+
+ return cs_dsp_common_init(dsp);
+}
+EXPORT_SYMBOL_GPL(cs_dsp_halo_init);
+
+/**
+ * cs_dsp_remove() - Clean a cs_dsp before deletion
+ * @dsp: pointer to DSP structure
+ */
+void cs_dsp_remove(struct cs_dsp *dsp)
+{
+ struct cs_dsp_coeff_ctl *ctl;
+
+ while (!list_empty(&dsp->ctl_list)) {
+ ctl = list_first_entry(&dsp->ctl_list, struct cs_dsp_coeff_ctl, list);
+
+ if (dsp->client_ops->control_remove)
+ dsp->client_ops->control_remove(ctl);
+
+ list_del(&ctl->list);
+ cs_dsp_free_ctl_blk(ctl);
+ }
+}
+EXPORT_SYMBOL_GPL(cs_dsp_remove);
+
+/**
+ * cs_dsp_read_raw_data_block() - Reads a block of data from DSP memory
+ * @dsp: pointer to DSP structure
+ * @mem_type: the type of DSP memory containing the data to be read
+ * @mem_addr: the address of the data within the memory region
+ * @num_words: the length of the data to read
+ * @data: a buffer to store the fetched data
+ *
+ * If this is used to read unpacked 24-bit memory, each 24-bit DSP word will
+ * occupy 32-bits in data (MSbyte will be 0). This padding can be removed using
+ * cs_dsp_remove_padding()
+ *
+ * Return: Zero for success, a negative number on error.
+ */
+int cs_dsp_read_raw_data_block(struct cs_dsp *dsp, int mem_type, unsigned int mem_addr,
+ unsigned int num_words, __be32 *data)
+{
+ struct cs_dsp_region const *mem = cs_dsp_find_region(dsp, mem_type);
+ unsigned int reg;
+ int ret;
+
+ if (!mem)
+ return -EINVAL;
+
+ reg = dsp->ops->region_to_reg(mem, mem_addr);
+
+ ret = regmap_raw_read(dsp->regmap, reg, data,
+ sizeof(*data) * num_words);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cs_dsp_read_raw_data_block);
+
+/**
+ * cs_dsp_read_data_word() - Reads a word from DSP memory
+ * @dsp: pointer to DSP structure
+ * @mem_type: the type of DSP memory containing the data to be read
+ * @mem_addr: the address of the data within the memory region
+ * @data: a buffer to store the fetched data
+ *
+ * Return: Zero for success, a negative number on error.
+ */
+int cs_dsp_read_data_word(struct cs_dsp *dsp, int mem_type, unsigned int mem_addr, u32 *data)
+{
+ __be32 raw;
+ int ret;
+
+ ret = cs_dsp_read_raw_data_block(dsp, mem_type, mem_addr, 1, &raw);
+ if (ret < 0)
+ return ret;
+
+ *data = be32_to_cpu(raw) & 0x00ffffffu;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cs_dsp_read_data_word);
+
+/**
+ * cs_dsp_write_data_word() - Writes a word to DSP memory
+ * @dsp: pointer to DSP structure
+ * @mem_type: the type of DSP memory containing the data to be written
+ * @mem_addr: the address of the data within the memory region
+ * @data: the data to be written
+ *
+ * Return: Zero for success, a negative number on error.
+ */
+int cs_dsp_write_data_word(struct cs_dsp *dsp, int mem_type, unsigned int mem_addr, u32 data)
+{
+ struct cs_dsp_region const *mem = cs_dsp_find_region(dsp, mem_type);
+ __be32 val = cpu_to_be32(data & 0x00ffffffu);
+ unsigned int reg;
+
+ if (!mem)
+ return -EINVAL;
+
+ reg = dsp->ops->region_to_reg(mem, mem_addr);
+
+ return regmap_raw_write(dsp->regmap, reg, &val, sizeof(val));
+}
+EXPORT_SYMBOL_GPL(cs_dsp_write_data_word);
+
+/**
+ * cs_dsp_remove_padding() - Convert unpacked words to packed bytes
+ * @buf: buffer containing DSP words read from DSP memory
+ * @nwords: number of words to convert
+ *
+ * DSP words from the register map have pad bytes and the data bytes
+ * are in swapped order. This swaps to the native endian order and
+ * strips the pad bytes.
+ */
+void cs_dsp_remove_padding(u32 *buf, int nwords)
+{
+ const __be32 *pack_in = (__be32 *)buf;
+ u8 *pack_out = (u8 *)buf;
+ int i;
+
+ for (i = 0; i < nwords; i++) {
+ u32 word = be32_to_cpu(*pack_in++);
+ *pack_out++ = (u8)word;
+ *pack_out++ = (u8)(word >> 8);
+ *pack_out++ = (u8)(word >> 16);
+ }
+}
+EXPORT_SYMBOL_GPL(cs_dsp_remove_padding);
+
+/**
+ * cs_dsp_adsp2_bus_error() - Handle a DSP bus error interrupt
+ * @dsp: pointer to DSP structure
+ *
+ * The firmware and DSP state will be logged for future analysis.
+ */
+void cs_dsp_adsp2_bus_error(struct cs_dsp *dsp)
+{
+ unsigned int val;
+ struct regmap *regmap = dsp->regmap;
+ int ret = 0;
+
+ mutex_lock(&dsp->pwr_lock);
+
+ ret = regmap_read(regmap, dsp->base + ADSP2_LOCK_REGION_CTRL, &val);
+ if (ret) {
+ cs_dsp_err(dsp,
+ "Failed to read Region Lock Ctrl register: %d\n", ret);
+ goto error;
+ }
+
+ if (val & ADSP2_WDT_TIMEOUT_STS_MASK) {
+ cs_dsp_err(dsp, "watchdog timeout error\n");
+ dsp->ops->stop_watchdog(dsp);
+ if (dsp->client_ops->watchdog_expired)
+ dsp->client_ops->watchdog_expired(dsp);
+ }
+
+ if (val & (ADSP2_ADDR_ERR_MASK | ADSP2_REGION_LOCK_ERR_MASK)) {
+ if (val & ADSP2_ADDR_ERR_MASK)
+ cs_dsp_err(dsp, "bus error: address error\n");
+ else
+ cs_dsp_err(dsp, "bus error: region lock error\n");
+
+ ret = regmap_read(regmap, dsp->base + ADSP2_BUS_ERR_ADDR, &val);
+ if (ret) {
+ cs_dsp_err(dsp,
+ "Failed to read Bus Err Addr register: %d\n",
+ ret);
+ goto error;
+ }
+
+ cs_dsp_err(dsp, "bus error address = 0x%x\n",
+ val & ADSP2_BUS_ERR_ADDR_MASK);
+
+ ret = regmap_read(regmap,
+ dsp->base + ADSP2_PMEM_ERR_ADDR_XMEM_ERR_ADDR,
+ &val);
+ if (ret) {
+ cs_dsp_err(dsp,
+ "Failed to read Pmem Xmem Err Addr register: %d\n",
+ ret);
+ goto error;
+ }
+
+ cs_dsp_err(dsp, "xmem error address = 0x%x\n",
+ val & ADSP2_XMEM_ERR_ADDR_MASK);
+ cs_dsp_err(dsp, "pmem error address = 0x%x\n",
+ (val & ADSP2_PMEM_ERR_ADDR_MASK) >>
+ ADSP2_PMEM_ERR_ADDR_SHIFT);
+ }
+
+ regmap_update_bits(regmap, dsp->base + ADSP2_LOCK_REGION_CTRL,
+ ADSP2_CTRL_ERR_EINT, ADSP2_CTRL_ERR_EINT);
+
+error:
+ mutex_unlock(&dsp->pwr_lock);
+}
+EXPORT_SYMBOL_GPL(cs_dsp_adsp2_bus_error);
+
+/**
+ * cs_dsp_halo_bus_error() - Handle a DSP bus error interrupt
+ * @dsp: pointer to DSP structure
+ *
+ * The firmware and DSP state will be logged for future analysis.
+ */
+void cs_dsp_halo_bus_error(struct cs_dsp *dsp)
+{
+ struct regmap *regmap = dsp->regmap;
+ unsigned int fault[6];
+ struct reg_sequence clear[] = {
+ { dsp->base + HALO_MPU_XM_VIO_STATUS, 0x0 },
+ { dsp->base + HALO_MPU_YM_VIO_STATUS, 0x0 },
+ { dsp->base + HALO_MPU_PM_VIO_STATUS, 0x0 },
+ };
+ int ret;
+
+ mutex_lock(&dsp->pwr_lock);
+
+ ret = regmap_read(regmap, dsp->base_sysinfo + HALO_AHBM_WINDOW_DEBUG_1,
+ fault);
+ if (ret) {
+ cs_dsp_warn(dsp, "Failed to read AHB DEBUG_1: %d\n", ret);
+ goto exit_unlock;
+ }
+
+ cs_dsp_warn(dsp, "AHB: STATUS: 0x%x ADDR: 0x%x\n",
+ *fault & HALO_AHBM_FLAGS_ERR_MASK,
+ (*fault & HALO_AHBM_CORE_ERR_ADDR_MASK) >>
+ HALO_AHBM_CORE_ERR_ADDR_SHIFT);
+
+ ret = regmap_read(regmap, dsp->base_sysinfo + HALO_AHBM_WINDOW_DEBUG_0,
+ fault);
+ if (ret) {
+ cs_dsp_warn(dsp, "Failed to read AHB DEBUG_0: %d\n", ret);
+ goto exit_unlock;
+ }
+
+ cs_dsp_warn(dsp, "AHB: SYS_ADDR: 0x%x\n", *fault);
+
+ ret = regmap_bulk_read(regmap, dsp->base + HALO_MPU_XM_VIO_ADDR,
+ fault, ARRAY_SIZE(fault));
+ if (ret) {
+ cs_dsp_warn(dsp, "Failed to read MPU fault info: %d\n", ret);
+ goto exit_unlock;
+ }
+
+ cs_dsp_warn(dsp, "XM: STATUS:0x%x ADDR:0x%x\n", fault[1], fault[0]);
+ cs_dsp_warn(dsp, "YM: STATUS:0x%x ADDR:0x%x\n", fault[3], fault[2]);
+ cs_dsp_warn(dsp, "PM: STATUS:0x%x ADDR:0x%x\n", fault[5], fault[4]);
+
+ ret = regmap_multi_reg_write(dsp->regmap, clear, ARRAY_SIZE(clear));
+ if (ret)
+ cs_dsp_warn(dsp, "Failed to clear MPU status: %d\n", ret);
+
+exit_unlock:
+ mutex_unlock(&dsp->pwr_lock);
+}
+EXPORT_SYMBOL_GPL(cs_dsp_halo_bus_error);
+
+/**
+ * cs_dsp_halo_wdt_expire() - Handle DSP watchdog expiry
+ * @dsp: pointer to DSP structure
+ *
+ * This is logged for future analysis.
+ */
+void cs_dsp_halo_wdt_expire(struct cs_dsp *dsp)
+{
+ mutex_lock(&dsp->pwr_lock);
+
+ cs_dsp_warn(dsp, "WDT Expiry Fault\n");
+
+ dsp->ops->stop_watchdog(dsp);
+ if (dsp->client_ops->watchdog_expired)
+ dsp->client_ops->watchdog_expired(dsp);
+
+ mutex_unlock(&dsp->pwr_lock);
+}
+EXPORT_SYMBOL_GPL(cs_dsp_halo_wdt_expire);
+
+static const struct cs_dsp_ops cs_dsp_adsp1_ops = {
+ .validate_version = cs_dsp_validate_version,
+ .parse_sizes = cs_dsp_adsp1_parse_sizes,
+ .region_to_reg = cs_dsp_region_to_reg,
+};
+
+static const struct cs_dsp_ops cs_dsp_adsp2_ops[] = {
+ {
+ .parse_sizes = cs_dsp_adsp2_parse_sizes,
+ .validate_version = cs_dsp_validate_version,
+ .setup_algs = cs_dsp_adsp2_setup_algs,
+ .region_to_reg = cs_dsp_region_to_reg,
+
+ .show_fw_status = cs_dsp_adsp2_show_fw_status,
+
+ .enable_memory = cs_dsp_adsp2_enable_memory,
+ .disable_memory = cs_dsp_adsp2_disable_memory,
+
+ .enable_core = cs_dsp_adsp2_enable_core,
+ .disable_core = cs_dsp_adsp2_disable_core,
+
+ .start_core = cs_dsp_adsp2_start_core,
+ .stop_core = cs_dsp_adsp2_stop_core,
+
+ },
+ {
+ .parse_sizes = cs_dsp_adsp2_parse_sizes,
+ .validate_version = cs_dsp_validate_version,
+ .setup_algs = cs_dsp_adsp2_setup_algs,
+ .region_to_reg = cs_dsp_region_to_reg,
+
+ .show_fw_status = cs_dsp_adsp2v2_show_fw_status,
+
+ .enable_memory = cs_dsp_adsp2_enable_memory,
+ .disable_memory = cs_dsp_adsp2_disable_memory,
+ .lock_memory = cs_dsp_adsp2_lock,
+
+ .enable_core = cs_dsp_adsp2v2_enable_core,
+ .disable_core = cs_dsp_adsp2v2_disable_core,
+
+ .start_core = cs_dsp_adsp2_start_core,
+ .stop_core = cs_dsp_adsp2_stop_core,
+ },
+ {
+ .parse_sizes = cs_dsp_adsp2_parse_sizes,
+ .validate_version = cs_dsp_validate_version,
+ .setup_algs = cs_dsp_adsp2_setup_algs,
+ .region_to_reg = cs_dsp_region_to_reg,
+
+ .show_fw_status = cs_dsp_adsp2v2_show_fw_status,
+ .stop_watchdog = cs_dsp_stop_watchdog,
+
+ .enable_memory = cs_dsp_adsp2_enable_memory,
+ .disable_memory = cs_dsp_adsp2_disable_memory,
+ .lock_memory = cs_dsp_adsp2_lock,
+
+ .enable_core = cs_dsp_adsp2v2_enable_core,
+ .disable_core = cs_dsp_adsp2v2_disable_core,
+
+ .start_core = cs_dsp_adsp2_start_core,
+ .stop_core = cs_dsp_adsp2_stop_core,
+ },
+};
+
+static const struct cs_dsp_ops cs_dsp_halo_ops = {
+ .parse_sizes = cs_dsp_adsp2_parse_sizes,
+ .validate_version = cs_dsp_halo_validate_version,
+ .setup_algs = cs_dsp_halo_setup_algs,
+ .region_to_reg = cs_dsp_halo_region_to_reg,
+
+ .show_fw_status = cs_dsp_halo_show_fw_status,
+ .stop_watchdog = cs_dsp_halo_stop_watchdog,
+
+ .lock_memory = cs_dsp_halo_configure_mpu,
+
+ .start_core = cs_dsp_halo_start_core,
+ .stop_core = cs_dsp_halo_stop_core,
+};
+
+MODULE_DESCRIPTION("Cirrus Logic DSP Support");
+MODULE_AUTHOR("Simon Trimmer <simont@opensource.cirrus.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 847f33ffc4ae..ae79c3300129 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -66,7 +66,7 @@ struct mm_struct efi_mm = {
struct workqueue_struct *efi_rts_wq;
-static bool disable_runtime;
+static bool disable_runtime = IS_ENABLED(CONFIG_PREEMPT_RT);
static int __init setup_noefi(char *arg)
{
disable_runtime = true;
@@ -97,6 +97,9 @@ static int __init parse_efi_cmdline(char *str)
if (parse_option_str(str, "noruntime"))
disable_runtime = true;
+ if (parse_option_str(str, "runtime"))
+ disable_runtime = false;
+
if (parse_option_str(str, "nosoftreserve"))
set_bit(EFI_MEM_NO_SOFT_RESERVE, &efi.flags);
diff --git a/drivers/firmware/psci/psci_checker.c b/drivers/firmware/psci/psci_checker.c
index 9a369a2eda71..116eb465cdb4 100644
--- a/drivers/firmware/psci/psci_checker.c
+++ b/drivers/firmware/psci/psci_checker.c
@@ -155,7 +155,7 @@ static int alloc_init_cpu_groups(cpumask_var_t **pcpu_groups)
if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
return -ENOMEM;
- cpu_groups = kcalloc(nb_available_cpus, sizeof(cpu_groups),
+ cpu_groups = kcalloc(nb_available_cpus, sizeof(*cpu_groups),
GFP_KERNEL);
if (!cpu_groups) {
free_cpumask_var(tmp);
diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
index 2ee97bab7440..7db8066b19fd 100644
--- a/drivers/firmware/qcom_scm.c
+++ b/drivers/firmware/qcom_scm.c
@@ -252,7 +252,7 @@ static bool __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
break;
default:
pr_err("Unknown SMC convention being used\n");
- return -EINVAL;
+ return false;
}
ret = qcom_scm_call(dev, &desc, &res);
@@ -1348,6 +1348,10 @@ static const struct of_device_id qcom_scm_dt_match[] = {
SCM_HAS_IFACE_CLK |
SCM_HAS_BUS_CLK)
},
+ { .compatible = "qcom,scm-msm8953", .data = (void *)(SCM_HAS_CORE_CLK |
+ SCM_HAS_IFACE_CLK |
+ SCM_HAS_BUS_CLK)
+ },
{ .compatible = "qcom,scm-msm8974", .data = (void *)(SCM_HAS_CORE_CLK |
SCM_HAS_IFACE_CLK |
SCM_HAS_BUS_CLK)
diff --git a/drivers/firmware/tegra/bpmp-debugfs.c b/drivers/firmware/tegra/bpmp-debugfs.c
index 3e9fa4b54358..6d66fe03fb6a 100644
--- a/drivers/firmware/tegra/bpmp-debugfs.c
+++ b/drivers/firmware/tegra/bpmp-debugfs.c
@@ -74,28 +74,36 @@ static void seqbuf_seek(struct seqbuf *seqbuf, ssize_t offset)
static const char *get_filename(struct tegra_bpmp *bpmp,
const struct file *file, char *buf, int size)
{
- char root_path_buf[512];
- const char *root_path;
- const char *filename;
+ const char *root_path, *filename = NULL;
+ char *root_path_buf;
size_t root_len;
+ root_path_buf = kzalloc(512, GFP_KERNEL);
+ if (!root_path_buf)
+ goto out;
+
root_path = dentry_path(bpmp->debugfs_mirror, root_path_buf,
sizeof(root_path_buf));
if (IS_ERR(root_path))
- return NULL;
+ goto out;
root_len = strlen(root_path);
filename = dentry_path(file->f_path.dentry, buf, size);
- if (IS_ERR(filename))
- return NULL;
+ if (IS_ERR(filename)) {
+ filename = NULL;
+ goto out;
+ }
- if (strlen(filename) < root_len ||
- strncmp(filename, root_path, root_len))
- return NULL;
+ if (strlen(filename) < root_len || strncmp(filename, root_path, root_len)) {
+ filename = NULL;
+ goto out;
+ }
filename += root_len;
+out:
+ kfree(root_path_buf);
return filename;
}
diff --git a/drivers/firmware/tegra/bpmp-tegra210.c b/drivers/firmware/tegra/bpmp-tegra210.c
index c32754055c60..c9c830f658c3 100644
--- a/drivers/firmware/tegra/bpmp-tegra210.c
+++ b/drivers/firmware/tegra/bpmp-tegra210.c
@@ -162,7 +162,6 @@ static int tegra210_bpmp_init(struct tegra_bpmp *bpmp)
{
struct platform_device *pdev = to_platform_device(bpmp->dev);
struct tegra210_bpmp *priv;
- struct resource *res;
unsigned int i;
int err;
@@ -172,13 +171,11 @@ static int tegra210_bpmp_init(struct tegra_bpmp *bpmp)
bpmp->priv = priv;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->atomics = devm_ioremap_resource(&pdev->dev, res);
+ priv->atomics = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->atomics))
return PTR_ERR(priv->atomics);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- priv->arb_sema = devm_ioremap_resource(&pdev->dev, res);
+ priv->arb_sema = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(priv->arb_sema))
return PTR_ERR(priv->arb_sema);
diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c
index a3cadbaf3cba..1436e03ff4f7 100644
--- a/drivers/firmware/xilinx/zynqmp.c
+++ b/drivers/firmware/xilinx/zynqmp.c
@@ -648,6 +648,23 @@ int zynqmp_pm_sd_dll_reset(u32 node_id, u32 type)
EXPORT_SYMBOL_GPL(zynqmp_pm_sd_dll_reset);
/**
+ * zynqmp_pm_ospi_mux_select() - OSPI Mux selection
+ *
+ * @dev_id: Device Id of the OSPI device.
+ * @select: OSPI Mux select value.
+ *
+ * This function select the OSPI Mux.
+ *
+ * Return: Returns status, either success or error+reason
+ */
+int zynqmp_pm_ospi_mux_select(u32 dev_id, u32 select)
+{
+ return zynqmp_pm_invoke_fn(PM_IOCTL, dev_id, IOCTL_OSPI_MUX_SELECT,
+ select, 0, NULL);
+}
+EXPORT_SYMBOL_GPL(zynqmp_pm_ospi_mux_select);
+
+/**
* zynqmp_pm_write_ggs() - PM API for writing global general storage (ggs)
* @index: GGS register index
* @value: Register value to be written
diff --git a/drivers/gpio/gpio-amdpt.c b/drivers/gpio/gpio-amdpt.c
index 44398992ae15..bbf53e289141 100644
--- a/drivers/gpio/gpio-amdpt.c
+++ b/drivers/gpio/gpio-amdpt.c
@@ -72,12 +72,10 @@ static void pt_gpio_free(struct gpio_chip *gc, unsigned offset)
static int pt_gpio_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct acpi_device *acpi_dev;
- acpi_handle handle = ACPI_HANDLE(dev);
struct pt_gpio_chip *pt_gpio;
int ret = 0;
- if (acpi_bus_get_device(handle, &acpi_dev)) {
+ if (!ACPI_COMPANION(dev)) {
dev_err(dev, "PT GPIO device node not found\n");
return -ENODEV;
}
diff --git a/drivers/gpio/gpio-mlxbf2.c b/drivers/gpio/gpio-mlxbf2.c
index 177d03ef4529..40a052bc6784 100644
--- a/drivers/gpio/gpio-mlxbf2.c
+++ b/drivers/gpio/gpio-mlxbf2.c
@@ -256,6 +256,11 @@ mlxbf2_gpio_probe(struct platform_device *pdev)
NULL,
0);
+ if (ret) {
+ dev_err(dev, "bgpio_init failed\n");
+ return ret;
+ }
+
gc->direction_input = mlxbf2_gpio_direction_input;
gc->direction_output = mlxbf2_gpio_direction_output;
gc->ngpio = npins;
diff --git a/drivers/gpio/gpio-xgs-iproc.c b/drivers/gpio/gpio-xgs-iproc.c
index fa9b4d8c3ff5..43ca52fa6f9a 100644
--- a/drivers/gpio/gpio-xgs-iproc.c
+++ b/drivers/gpio/gpio-xgs-iproc.c
@@ -224,7 +224,7 @@ static int iproc_gpio_probe(struct platform_device *pdev)
}
chip->gc.label = dev_name(dev);
- if (of_property_read_u32(dn, "ngpios", &num_gpios))
+ if (!of_property_read_u32(dn, "ngpios", &num_gpios))
chip->gc.ngpio = num_gpios;
irq = platform_get_irq(pdev, 0);
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index cea777ae7fb9..2a926d0de423 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -103,7 +103,7 @@ config DRM_DEBUG_DP_MST_TOPOLOGY_REFS
config DRM_FBDEV_EMULATION
bool "Enable legacy fbdev support for your modesetting driver"
depends on DRM
- depends on FB
+ depends on FB=y || FB=DRM
select DRM_KMS_HELPER
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
@@ -211,7 +211,7 @@ config DRM_KMS_CMA_HELPER
config DRM_GEM_SHMEM_HELPER
bool
- depends on DRM
+ depends on DRM && MMU
help
Choose this if you need the GEM shmem helper functions
@@ -271,7 +271,8 @@ source "drivers/gpu/drm/kmb/Kconfig"
config DRM_VGEM
tristate "Virtual GEM provider"
- depends on DRM
+ depends on DRM && MMU
+ select DRM_GEM_SHMEM_HELPER
help
Choose this option to get a virtual graphics memory manager,
as used by Mesa's software renderer for enhanced performance.
@@ -279,7 +280,7 @@ config DRM_VGEM
config DRM_VKMS
tristate "Virtual KMS (EXPERIMENTAL)"
- depends on DRM
+ depends on DRM && MMU
select DRM_KMS_HELPER
select DRM_GEM_SHMEM_HELPER
select CRC32
@@ -351,8 +352,6 @@ source "drivers/gpu/drm/hisilicon/Kconfig"
source "drivers/gpu/drm/mediatek/Kconfig"
-source "drivers/gpu/drm/zte/Kconfig"
-
source "drivers/gpu/drm/mxsfb/Kconfig"
source "drivers/gpu/drm/meson/Kconfig"
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index ad1112154898..0dff40bb863c 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -113,7 +113,6 @@ obj-y += bridge/
obj-$(CONFIG_DRM_FSL_DCU) += fsl-dcu/
obj-$(CONFIG_DRM_ETNAVIV) += etnaviv/
obj-y += hisilicon/
-obj-$(CONFIG_DRM_ZTE) += zte/
obj-$(CONFIG_DRM_MXSFB) += mxsfb/
obj-y += tiny/
obj-$(CONFIG_DRM_PL111) += pl111/
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 8d0748184a14..653726588956 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -73,10 +73,8 @@ amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce
amdgpu-y += \
vi.o mxgpu_vi.o nbio_v6_1.o soc15.o emu_soc.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o \
- vega20_reg_init.o nbio_v7_4.o nbio_v2_3.o nv.o navi10_reg_init.o navi14_reg_init.o \
- arct_reg_init.o navi12_reg_init.o mxgpu_nv.o sienna_cichlid_reg_init.o vangogh_reg_init.o \
- nbio_v7_2.o dimgrey_cavefish_reg_init.o hdp_v4_0.o hdp_v5_0.o aldebaran_reg_init.o aldebaran.o \
- beige_goby_reg_init.o yellow_carp_reg_init.o cyan_skillfish_reg_init.o
+ vega20_reg_init.o nbio_v7_4.o nbio_v2_3.o nv.o arct_reg_init.o mxgpu_nv.o \
+ nbio_v7_2.o hdp_v4_0.o hdp_v5_0.o aldebaran_reg_init.o aldebaran.o
# add DF block
amdgpu-y += \
diff --git a/drivers/gpu/drm/amd/amdgpu/aldebaran.c b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
index 148f6c3343ab..bcfdb63b1d42 100644
--- a/drivers/gpu/drm/amd/amdgpu/aldebaran.c
+++ b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
@@ -307,6 +307,8 @@ static int aldebaran_mode2_restore_ip(struct amdgpu_device *adev)
adev->ip_blocks[i].status.late_initialized = true;
}
+ amdgpu_ras_set_error_query_ready(adev, true);
+
amdgpu_device_set_cg_state(adev, AMD_CG_STATE_GATE);
amdgpu_device_set_pg_state(adev, AMD_PG_STATE_GATE);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 269437b01328..b85b67a88a3d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -205,6 +205,7 @@ extern struct amdgpu_mgpu_info mgpu_info;
extern int amdgpu_ras_enable;
extern uint amdgpu_ras_mask;
extern int amdgpu_bad_page_threshold;
+extern bool amdgpu_ignore_bad_page_threshold;
extern struct amdgpu_watchdog_timer amdgpu_watchdog_timer;
extern int amdgpu_async_gfx_ring;
extern int amdgpu_mcbp;
@@ -744,6 +745,7 @@ enum amd_hw_ip_block_type {
UVD_HWIP,
VCN_HWIP = UVD_HWIP,
JPEG_HWIP = VCN_HWIP,
+ VCN1_HWIP,
VCE_HWIP,
DF_HWIP,
DCE_HWIP,
@@ -755,11 +757,16 @@ enum amd_hw_ip_block_type {
CLK_HWIP,
UMC_HWIP,
RSMU_HWIP,
+ XGMI_HWIP,
+ DCI_HWIP,
MAX_HWIP
};
#define HWIP_MAX_INSTANCE 10
+#define HW_ID_MAX 300
+#define IP_VERSION(mj, mn, rv) (((mj) << 16) | ((mn) << 8) | (rv))
+
struct amd_powerplay {
void *pp_handle;
const struct amd_pm_funcs *pp_funcs;
@@ -830,6 +837,7 @@ struct amdgpu_device {
struct notifier_block acpi_nb;
struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS];
struct debugfs_blob_wrapper debugfs_vbios_blob;
+ struct debugfs_blob_wrapper debugfs_discovery_blob;
struct mutex srbm_mutex;
/* GRBM index mutex. Protects concurrent access to GRBM index */
struct mutex grbm_idx_mutex;
@@ -1078,8 +1086,6 @@ struct amdgpu_device {
char product_name[32];
char serial[20];
- struct amdgpu_autodump autodump;
-
atomic_t throttling_logging_enabled;
struct ratelimit_state throttling_logging_rs;
uint32_t ras_hw_enabled;
@@ -1090,6 +1096,7 @@ struct amdgpu_device {
pci_channel_state_t pci_channel_state;
struct amdgpu_reset_control *reset_cntl;
+ uint32_t ip_versions[HW_ID_MAX][HWIP_MAX_INSTANCE];
};
static inline struct amdgpu_device *drm_to_adev(struct drm_device *ddev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 1d41c2c00623..7077f21f0021 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -31,6 +31,8 @@
#include <linux/dma-buf.h>
#include "amdgpu_xgmi.h"
#include <uapi/linux/kfd_ioctl.h>
+#include "amdgpu_ras.h"
+#include "amdgpu_umc.h"
/* Total memory size in system memory and all GPU VRAM. Used to
* estimate worst case amount of memory to reserve for page tables
@@ -70,8 +72,7 @@ void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev)
if (!kfd_initialized)
return;
- adev->kfd.dev = kgd2kfd_probe((struct kgd_dev *)adev,
- adev->pdev, adev->asic_type, vf);
+ adev->kfd.dev = kgd2kfd_probe((struct kgd_dev *)adev, vf);
if (adev->kfd.dev)
amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size;
@@ -780,3 +781,15 @@ bool amdgpu_amdkfd_have_atomics_support(struct kgd_dev *kgd)
return adev->have_atomics_support;
}
+
+void amdgpu_amdkfd_ras_poison_consumption_handler(struct kgd_dev *kgd)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+ struct ras_err_data err_data = {0, 0, 0, NULL};
+
+ /* CPU MCA will handle page retirement if connected_to_cpu is 1 */
+ if (!adev->gmc.xgmi.connected_to_cpu)
+ amdgpu_umc_process_ras_data_cb(adev, &err_data, NULL);
+ else
+ amdgpu_amdkfd_gpu_reset(kgd);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 3bc52b2c604f..751557af09bb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -279,6 +279,8 @@ int amdgpu_amdkfd_gpuvm_sync_memory(
struct kgd_dev *kgd, struct kgd_mem *mem, bool intr);
int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
struct kgd_mem *mem, void **kptr, uint64_t *size);
+void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_dev *kgd, struct kgd_mem *mem);
+
int amdgpu_amdkfd_gpuvm_restore_process_bos(void *process_info,
struct dma_fence **ef);
int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
@@ -290,6 +292,7 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
uint64_t *mmap_offset);
int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd,
struct tile_config *config);
+void amdgpu_amdkfd_ras_poison_consumption_handler(struct kgd_dev *kgd);
#if IS_ENABLED(CONFIG_HSA_AMD)
void amdgpu_amdkfd_gpuvm_init_mem_limits(void);
void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
@@ -321,8 +324,7 @@ int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm,
#if IS_ENABLED(CONFIG_HSA_AMD)
int kgd2kfd_init(void);
void kgd2kfd_exit(void);
-struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev,
- unsigned int asic_type, bool vf);
+struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, bool vf);
bool kgd2kfd_device_init(struct kfd_dev *kfd,
struct drm_device *ddev,
const struct kgd2kfd_shared_resources *gpu_resources);
@@ -346,8 +348,7 @@ static inline void kgd2kfd_exit(void)
}
static inline
-struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev,
- unsigned int asic_type, bool vf)
+struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, bool vf)
{
return NULL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 054c1a224def..0e9cfe99ae9e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -1503,7 +1503,7 @@ allocate_init_user_pages_failed:
remove_kgd_mem_from_kfd_bo_list(*mem, avm->process_info);
drm_vma_node_revoke(&gobj->vma_node, drm_priv);
err_node_allow:
- amdgpu_bo_unref(&bo);
+ drm_gem_object_put(gobj);
/* Don't unreserve system mem limit twice */
goto err_reserve_limit;
err_bo_create:
@@ -1871,6 +1871,16 @@ bo_reserve_failed:
return ret;
}
+void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_dev *kgd, struct kgd_mem *mem)
+{
+ struct amdgpu_bo *bo = mem->bo;
+
+ amdgpu_bo_reserve(bo, true);
+ amdgpu_bo_kunmap(bo);
+ amdgpu_bo_unpin(bo);
+ amdgpu_bo_unreserve(bo);
+}
+
int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
struct kfd_vm_fault_info *mem)
{
@@ -2041,19 +2051,26 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
/* Get updated user pages */
ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
if (ret) {
- pr_debug("%s: Failed to get user pages: %d\n",
- __func__, ret);
+ pr_debug("Failed %d to get user pages\n", ret);
+
+ /* Return -EFAULT bad address error as success. It will
+ * fail later with a VM fault if the GPU tries to access
+ * it. Better than hanging indefinitely with stalled
+ * user mode queues.
+ *
+ * Return other error -EBUSY or -ENOMEM to retry restore
+ */
+ if (ret != -EFAULT)
+ return ret;
+ } else {
- /* Return error -EBUSY or -ENOMEM, retry restore */
- return ret;
+ /*
+ * FIXME: Cannot ignore the return code, must hold
+ * notifier_lock
+ */
+ amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
}
- /*
- * FIXME: Cannot ignore the return code, must hold
- * notifier_lock
- */
- amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm);
-
/* Mark the BO as valid unless it was invalidated
* again concurrently.
*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
index 15c45b2a3983..714178f1b6c6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
@@ -61,7 +61,7 @@ static void amdgpu_bo_list_free(struct kref *ref)
int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp,
struct drm_amdgpu_bo_list_entry *info,
- unsigned num_entries, struct amdgpu_bo_list **result)
+ size_t num_entries, struct amdgpu_bo_list **result)
{
unsigned last_entry = 0, first_userptr = num_entries;
struct amdgpu_bo_list_entry *array;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
index c905a4cfc173..044b41f0bfd9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
@@ -61,7 +61,7 @@ int amdgpu_bo_create_list_entry_array(struct drm_amdgpu_bo_list_in *in,
int amdgpu_bo_list_create(struct amdgpu_device *adev,
struct drm_file *filp,
struct drm_amdgpu_bo_list_entry *info,
- unsigned num_entries,
+ size_t num_entries,
struct amdgpu_bo_list **list);
static inline struct amdgpu_bo_list_entry *
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 913f9eaa9cd6..0311d799a010 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -1222,6 +1222,8 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
if (r)
goto error_unlock;
+ drm_sched_job_arm(&job->base);
+
/* No memory allocation is allowed while holding the notifier lock.
* The lock is held until amdgpu_cs_submit is finished and fence is
* added to BOs.
@@ -1259,7 +1261,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
trace_amdgpu_cs_ioctl(job);
amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->ticket);
- drm_sched_entity_push_job(&job->base, entity);
+ drm_sched_entity_push_job(&job->base);
amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index e7a010b7ca1f..468003583b2a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -43,14 +43,61 @@ const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
[AMDGPU_HW_IP_VCN_JPEG] = 1,
};
+bool amdgpu_ctx_priority_is_valid(int32_t ctx_prio)
+{
+ switch (ctx_prio) {
+ case AMDGPU_CTX_PRIORITY_UNSET:
+ case AMDGPU_CTX_PRIORITY_VERY_LOW:
+ case AMDGPU_CTX_PRIORITY_LOW:
+ case AMDGPU_CTX_PRIORITY_NORMAL:
+ case AMDGPU_CTX_PRIORITY_HIGH:
+ case AMDGPU_CTX_PRIORITY_VERY_HIGH:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static enum drm_sched_priority
+amdgpu_ctx_to_drm_sched_prio(int32_t ctx_prio)
+{
+ switch (ctx_prio) {
+ case AMDGPU_CTX_PRIORITY_UNSET:
+ return DRM_SCHED_PRIORITY_UNSET;
+
+ case AMDGPU_CTX_PRIORITY_VERY_LOW:
+ return DRM_SCHED_PRIORITY_MIN;
+
+ case AMDGPU_CTX_PRIORITY_LOW:
+ return DRM_SCHED_PRIORITY_MIN;
+
+ case AMDGPU_CTX_PRIORITY_NORMAL:
+ return DRM_SCHED_PRIORITY_NORMAL;
+
+ case AMDGPU_CTX_PRIORITY_HIGH:
+ return DRM_SCHED_PRIORITY_HIGH;
+
+ case AMDGPU_CTX_PRIORITY_VERY_HIGH:
+ return DRM_SCHED_PRIORITY_HIGH;
+
+ /* This should not happen as we sanitized userspace provided priority
+ * already, WARN if this happens.
+ */
+ default:
+ WARN(1, "Invalid context priority %d\n", ctx_prio);
+ return DRM_SCHED_PRIORITY_NORMAL;
+ }
+
+}
+
static int amdgpu_ctx_priority_permit(struct drm_file *filp,
- enum drm_sched_priority priority)
+ int32_t priority)
{
- if (priority < 0 || priority >= DRM_SCHED_PRIORITY_COUNT)
+ if (!amdgpu_ctx_priority_is_valid(priority))
return -EINVAL;
/* NORMAL and below are accessible by everyone */
- if (priority <= DRM_SCHED_PRIORITY_NORMAL)
+ if (priority <= AMDGPU_CTX_PRIORITY_NORMAL)
return 0;
if (capable(CAP_SYS_NICE))
@@ -62,26 +109,51 @@ static int amdgpu_ctx_priority_permit(struct drm_file *filp,
return -EACCES;
}
-static enum gfx_pipe_priority amdgpu_ctx_sched_prio_to_compute_prio(enum drm_sched_priority prio)
+static enum amdgpu_gfx_pipe_priority amdgpu_ctx_prio_to_compute_prio(int32_t prio)
{
switch (prio) {
- case DRM_SCHED_PRIORITY_HIGH:
- case DRM_SCHED_PRIORITY_KERNEL:
+ case AMDGPU_CTX_PRIORITY_HIGH:
+ case AMDGPU_CTX_PRIORITY_VERY_HIGH:
return AMDGPU_GFX_PIPE_PRIO_HIGH;
default:
return AMDGPU_GFX_PIPE_PRIO_NORMAL;
}
}
-static unsigned int amdgpu_ctx_prio_sched_to_hw(struct amdgpu_device *adev,
- enum drm_sched_priority prio,
- u32 hw_ip)
+static enum amdgpu_ring_priority_level amdgpu_ctx_sched_prio_to_ring_prio(int32_t prio)
{
+ switch (prio) {
+ case AMDGPU_CTX_PRIORITY_HIGH:
+ return AMDGPU_RING_PRIO_1;
+ case AMDGPU_CTX_PRIORITY_VERY_HIGH:
+ return AMDGPU_RING_PRIO_2;
+ default:
+ return AMDGPU_RING_PRIO_0;
+ }
+}
+
+static unsigned int amdgpu_ctx_get_hw_prio(struct amdgpu_ctx *ctx, u32 hw_ip)
+{
+ struct amdgpu_device *adev = ctx->adev;
+ int32_t ctx_prio;
unsigned int hw_prio;
- hw_prio = (hw_ip == AMDGPU_HW_IP_COMPUTE) ?
- amdgpu_ctx_sched_prio_to_compute_prio(prio) :
- AMDGPU_RING_PRIO_DEFAULT;
+ ctx_prio = (ctx->override_priority == AMDGPU_CTX_PRIORITY_UNSET) ?
+ ctx->init_priority : ctx->override_priority;
+
+ switch (hw_ip) {
+ case AMDGPU_HW_IP_COMPUTE:
+ hw_prio = amdgpu_ctx_prio_to_compute_prio(ctx_prio);
+ break;
+ case AMDGPU_HW_IP_VCE:
+ case AMDGPU_HW_IP_VCN_ENC:
+ hw_prio = amdgpu_ctx_sched_prio_to_ring_prio(ctx_prio);
+ break;
+ default:
+ hw_prio = AMDGPU_RING_PRIO_DEFAULT;
+ break;
+ }
+
hw_ip = array_index_nospec(hw_ip, AMDGPU_HW_IP_NUM);
if (adev->gpu_sched[hw_ip][hw_prio].num_scheds == 0)
hw_prio = AMDGPU_RING_PRIO_DEFAULT;
@@ -89,15 +161,17 @@ static unsigned int amdgpu_ctx_prio_sched_to_hw(struct amdgpu_device *adev,
return hw_prio;
}
+
static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip,
- const u32 ring)
+ const u32 ring)
{
struct amdgpu_device *adev = ctx->adev;
struct amdgpu_ctx_entity *entity;
struct drm_gpu_scheduler **scheds = NULL, *sched = NULL;
unsigned num_scheds = 0;
+ int32_t ctx_prio;
unsigned int hw_prio;
- enum drm_sched_priority priority;
+ enum drm_sched_priority drm_prio;
int r;
entity = kzalloc(struct_size(entity, fences, amdgpu_sched_jobs),
@@ -105,10 +179,11 @@ static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip,
if (!entity)
return -ENOMEM;
+ ctx_prio = (ctx->override_priority == AMDGPU_CTX_PRIORITY_UNSET) ?
+ ctx->init_priority : ctx->override_priority;
entity->sequence = 1;
- priority = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ?
- ctx->init_priority : ctx->override_priority;
- hw_prio = amdgpu_ctx_prio_sched_to_hw(adev, priority, hw_ip);
+ hw_prio = amdgpu_ctx_get_hw_prio(ctx, hw_ip);
+ drm_prio = amdgpu_ctx_to_drm_sched_prio(ctx_prio);
hw_ip = array_index_nospec(hw_ip, AMDGPU_HW_IP_NUM);
scheds = adev->gpu_sched[hw_ip][hw_prio].sched;
@@ -124,7 +199,7 @@ static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip,
num_scheds = 1;
}
- r = drm_sched_entity_init(&entity->entity, priority, scheds, num_scheds,
+ r = drm_sched_entity_init(&entity->entity, drm_prio, scheds, num_scheds,
&ctx->guilty);
if (r)
goto error_free_entity;
@@ -139,7 +214,7 @@ error_free_entity:
}
static int amdgpu_ctx_init(struct amdgpu_device *adev,
- enum drm_sched_priority priority,
+ int32_t priority,
struct drm_file *filp,
struct amdgpu_ctx *ctx)
{
@@ -161,7 +236,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
ctx->reset_counter_query = ctx->reset_counter;
ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
ctx->init_priority = priority;
- ctx->override_priority = DRM_SCHED_PRIORITY_UNSET;
+ ctx->override_priority = AMDGPU_CTX_PRIORITY_UNSET;
return 0;
}
@@ -234,7 +309,7 @@ int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance,
static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
struct amdgpu_fpriv *fpriv,
struct drm_file *filp,
- enum drm_sched_priority priority,
+ int32_t priority,
uint32_t *id)
{
struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
@@ -397,19 +472,19 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
{
int r;
uint32_t id;
- enum drm_sched_priority priority;
+ int32_t priority;
union drm_amdgpu_ctx *args = data;
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_fpriv *fpriv = filp->driver_priv;
id = args->in.ctx_id;
- r = amdgpu_to_sched_priority(args->in.priority, &priority);
+ priority = args->in.priority;
/* For backwards compatibility reasons, we need to accept
* ioctls with garbage in the priority field */
- if (r == -EINVAL)
- priority = DRM_SCHED_PRIORITY_NORMAL;
+ if (!amdgpu_ctx_priority_is_valid(priority))
+ priority = AMDGPU_CTX_PRIORITY_NORMAL;
switch (args->in.op) {
case AMDGPU_CTX_OP_ALLOC_CTX:
@@ -515,9 +590,9 @@ struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
}
static void amdgpu_ctx_set_entity_priority(struct amdgpu_ctx *ctx,
- struct amdgpu_ctx_entity *aentity,
- int hw_ip,
- enum drm_sched_priority priority)
+ struct amdgpu_ctx_entity *aentity,
+ int hw_ip,
+ int32_t priority)
{
struct amdgpu_device *adev = ctx->adev;
unsigned int hw_prio;
@@ -525,12 +600,12 @@ static void amdgpu_ctx_set_entity_priority(struct amdgpu_ctx *ctx,
unsigned num_scheds;
/* set sw priority */
- drm_sched_entity_set_priority(&aentity->entity, priority);
+ drm_sched_entity_set_priority(&aentity->entity,
+ amdgpu_ctx_to_drm_sched_prio(priority));
/* set hw priority */
if (hw_ip == AMDGPU_HW_IP_COMPUTE) {
- hw_prio = amdgpu_ctx_prio_sched_to_hw(adev, priority,
- AMDGPU_HW_IP_COMPUTE);
+ hw_prio = amdgpu_ctx_get_hw_prio(ctx, hw_ip);
hw_prio = array_index_nospec(hw_prio, AMDGPU_RING_PRIO_MAX);
scheds = adev->gpu_sched[hw_ip][hw_prio].sched;
num_scheds = adev->gpu_sched[hw_ip][hw_prio].num_scheds;
@@ -540,14 +615,14 @@ static void amdgpu_ctx_set_entity_priority(struct amdgpu_ctx *ctx,
}
void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
- enum drm_sched_priority priority)
+ int32_t priority)
{
- enum drm_sched_priority ctx_prio;
+ int32_t ctx_prio;
unsigned i, j;
ctx->override_priority = priority;
- ctx_prio = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ?
+ ctx_prio = (ctx->override_priority == AMDGPU_CTX_PRIORITY_UNSET) ?
ctx->init_priority : ctx->override_priority;
for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
index 14db16bc3322..a44b8b8ed39c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
@@ -47,8 +47,8 @@ struct amdgpu_ctx {
spinlock_t ring_lock;
struct amdgpu_ctx_entity *entities[AMDGPU_HW_IP_NUM][AMDGPU_MAX_ENTITY_NUM];
bool preamble_presented;
- enum drm_sched_priority init_priority;
- enum drm_sched_priority override_priority;
+ int32_t init_priority;
+ int32_t override_priority;
struct mutex lock;
atomic_t guilty;
unsigned long ras_counter_ce;
@@ -75,8 +75,8 @@ void amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,
struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
struct drm_sched_entity *entity,
uint64_t seq);
-void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
- enum drm_sched_priority priority);
+bool amdgpu_ctx_priority_is_valid(int32_t ctx_prio);
+void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx, int32_t ctx_prio);
int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index 463b9c0283f7..164d6a9e9fbb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -27,7 +27,6 @@
#include <linux/pci.h>
#include <linux/uaccess.h>
#include <linux/pm_runtime.h>
-#include <linux/poll.h>
#include "amdgpu.h"
#include "amdgpu_pm.h"
@@ -36,87 +35,10 @@
#include "amdgpu_rap.h"
#include "amdgpu_securedisplay.h"
#include "amdgpu_fw_attestation.h"
-
-int amdgpu_debugfs_wait_dump(struct amdgpu_device *adev)
-{
-#if defined(CONFIG_DEBUG_FS)
- unsigned long timeout = 600 * HZ;
- int ret;
-
- wake_up_interruptible(&adev->autodump.gpu_hang);
-
- ret = wait_for_completion_interruptible_timeout(&adev->autodump.dumping, timeout);
- if (ret == 0) {
- pr_err("autodump: timeout, move on to gpu recovery\n");
- return -ETIMEDOUT;
- }
-#endif
- return 0;
-}
+#include "amdgpu_umr.h"
#if defined(CONFIG_DEBUG_FS)
-static int amdgpu_debugfs_autodump_open(struct inode *inode, struct file *file)
-{
- struct amdgpu_device *adev = inode->i_private;
- int ret;
-
- file->private_data = adev;
-
- ret = down_read_killable(&adev->reset_sem);
- if (ret)
- return ret;
-
- if (adev->autodump.dumping.done) {
- reinit_completion(&adev->autodump.dumping);
- ret = 0;
- } else {
- ret = -EBUSY;
- }
-
- up_read(&adev->reset_sem);
-
- return ret;
-}
-
-static int amdgpu_debugfs_autodump_release(struct inode *inode, struct file *file)
-{
- struct amdgpu_device *adev = file->private_data;
-
- complete_all(&adev->autodump.dumping);
- return 0;
-}
-
-static unsigned int amdgpu_debugfs_autodump_poll(struct file *file, struct poll_table_struct *poll_table)
-{
- struct amdgpu_device *adev = file->private_data;
-
- poll_wait(file, &adev->autodump.gpu_hang, poll_table);
-
- if (amdgpu_in_reset(adev))
- return POLLIN | POLLRDNORM | POLLWRNORM;
-
- return 0;
-}
-
-static const struct file_operations autodump_debug_fops = {
- .owner = THIS_MODULE,
- .open = amdgpu_debugfs_autodump_open,
- .poll = amdgpu_debugfs_autodump_poll,
- .release = amdgpu_debugfs_autodump_release,
-};
-
-static void amdgpu_debugfs_autodump_init(struct amdgpu_device *adev)
-{
- init_completion(&adev->autodump.dumping);
- complete_all(&adev->autodump.dumping);
- init_waitqueue_head(&adev->autodump.gpu_hang);
-
- debugfs_create_file("amdgpu_autodump", 0600,
- adev_to_drm(adev)->primary->debugfs_root,
- adev, &autodump_debug_fops);
-}
-
/**
* amdgpu_debugfs_process_reg_op - Handle MMIO register reads/writes
*
@@ -279,6 +201,145 @@ static ssize_t amdgpu_debugfs_regs_write(struct file *f, const char __user *buf,
return amdgpu_debugfs_process_reg_op(false, f, (char __user *)buf, size, pos);
}
+static int amdgpu_debugfs_regs2_open(struct inode *inode, struct file *file)
+{
+ struct amdgpu_debugfs_regs2_data *rd;
+
+ rd = kzalloc(sizeof *rd, GFP_KERNEL);
+ if (!rd)
+ return -ENOMEM;
+ rd->adev = file_inode(file)->i_private;
+ file->private_data = rd;
+ mutex_init(&rd->lock);
+
+ return 0;
+}
+
+static int amdgpu_debugfs_regs2_release(struct inode *inode, struct file *file)
+{
+ struct amdgpu_debugfs_regs2_data *rd = file->private_data;
+ mutex_destroy(&rd->lock);
+ kfree(file->private_data);
+ return 0;
+}
+
+static ssize_t amdgpu_debugfs_regs2_op(struct file *f, char __user *buf, u32 offset, size_t size, int write_en)
+{
+ struct amdgpu_debugfs_regs2_data *rd = f->private_data;
+ struct amdgpu_device *adev = rd->adev;
+ ssize_t result = 0;
+ int r;
+ uint32_t value;
+
+ if (size & 0x3 || offset & 0x3)
+ return -EINVAL;
+
+ r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
+ if (r < 0) {
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ return r;
+ }
+
+ r = amdgpu_virt_enable_access_debugfs(adev);
+ if (r < 0) {
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ return r;
+ }
+
+ mutex_lock(&rd->lock);
+
+ if (rd->id.use_grbm) {
+ if ((rd->id.grbm.sh != 0xFFFFFFFF && rd->id.grbm.sh >= adev->gfx.config.max_sh_per_se) ||
+ (rd->id.grbm.se != 0xFFFFFFFF && rd->id.grbm.se >= adev->gfx.config.max_shader_engines)) {
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ amdgpu_virt_disable_access_debugfs(adev);
+ mutex_unlock(&rd->lock);
+ return -EINVAL;
+ }
+ mutex_lock(&adev->grbm_idx_mutex);
+ amdgpu_gfx_select_se_sh(adev, rd->id.grbm.se,
+ rd->id.grbm.sh,
+ rd->id.grbm.instance);
+ }
+
+ if (rd->id.use_srbm) {
+ mutex_lock(&adev->srbm_mutex);
+ amdgpu_gfx_select_me_pipe_q(adev, rd->id.srbm.me, rd->id.srbm.pipe,
+ rd->id.srbm.queue, rd->id.srbm.vmid);
+ }
+
+ if (rd->id.pg_lock)
+ mutex_lock(&adev->pm.mutex);
+
+ while (size) {
+ if (!write_en) {
+ value = RREG32(offset >> 2);
+ r = put_user(value, (uint32_t *)buf);
+ } else {
+ r = get_user(value, (uint32_t *)buf);
+ if (!r)
+ amdgpu_mm_wreg_mmio_rlc(adev, offset >> 2, value);
+ }
+ if (r) {
+ result = r;
+ goto end;
+ }
+ offset += 4;
+ size -= 4;
+ result += 4;
+ buf += 4;
+ }
+end:
+ if (rd->id.use_grbm) {
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
+ }
+
+ if (rd->id.use_srbm) {
+ amdgpu_gfx_select_me_pipe_q(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ }
+
+ if (rd->id.pg_lock)
+ mutex_unlock(&adev->pm.mutex);
+
+ mutex_unlock(&rd->lock);
+
+ pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+
+ amdgpu_virt_disable_access_debugfs(adev);
+ return result;
+}
+
+static long amdgpu_debugfs_regs2_ioctl(struct file *f, unsigned int cmd, unsigned long data)
+{
+ struct amdgpu_debugfs_regs2_data *rd = f->private_data;
+ int r;
+
+ switch (cmd) {
+ case AMDGPU_DEBUGFS_REGS2_IOC_SET_STATE:
+ mutex_lock(&rd->lock);
+ r = copy_from_user(&rd->id, (struct amdgpu_debugfs_regs2_iocdata *)data, sizeof rd->id);
+ mutex_unlock(&rd->lock);
+ return r ? -EINVAL : 0;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static ssize_t amdgpu_debugfs_regs2_read(struct file *f, char __user *buf, size_t size, loff_t *pos)
+{
+ return amdgpu_debugfs_regs2_op(f, buf, *pos, size, 0);
+}
+
+static ssize_t amdgpu_debugfs_regs2_write(struct file *f, const char __user *buf, size_t size, loff_t *pos)
+{
+ return amdgpu_debugfs_regs2_op(f, (char __user *)buf, *pos, size, 1);
+}
+
/**
* amdgpu_debugfs_regs_pcie_read - Read from a PCIE register
@@ -1091,6 +1152,16 @@ static ssize_t amdgpu_debugfs_gfxoff_read(struct file *f, char __user *buf,
return result;
}
+static const struct file_operations amdgpu_debugfs_regs2_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = amdgpu_debugfs_regs2_ioctl,
+ .read = amdgpu_debugfs_regs2_read,
+ .write = amdgpu_debugfs_regs2_write,
+ .open = amdgpu_debugfs_regs2_open,
+ .release = amdgpu_debugfs_regs2_release,
+ .llseek = default_llseek
+};
+
static const struct file_operations amdgpu_debugfs_regs_fops = {
.owner = THIS_MODULE,
.read = amdgpu_debugfs_regs_read,
@@ -1148,6 +1219,7 @@ static const struct file_operations amdgpu_debugfs_gfxoff_fops = {
static const struct file_operations *debugfs_regs[] = {
&amdgpu_debugfs_regs_fops,
+ &amdgpu_debugfs_regs2_fops,
&amdgpu_debugfs_regs_didt_fops,
&amdgpu_debugfs_regs_pcie_fops,
&amdgpu_debugfs_regs_smc_fops,
@@ -1160,6 +1232,7 @@ static const struct file_operations *debugfs_regs[] = {
static const char *debugfs_regs_names[] = {
"amdgpu_regs",
+ "amdgpu_regs2",
"amdgpu_regs_didt",
"amdgpu_regs_pcie",
"amdgpu_regs_smc",
@@ -1206,7 +1279,7 @@ static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused)
}
/* Avoid accidently unparking the sched thread during GPU reset */
- r = down_read_killable(&adev->reset_sem);
+ r = down_write_killable(&adev->reset_sem);
if (r)
return r;
@@ -1235,7 +1308,7 @@ static int amdgpu_debugfs_test_ib_show(struct seq_file *m, void *unused)
kthread_unpark(ring->sched.thread);
}
- up_read(&adev->reset_sem);
+ up_write(&adev->reset_sem);
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
@@ -1255,7 +1328,7 @@ static int amdgpu_debugfs_evict_vram(void *data, u64 *val)
return r;
}
- *val = amdgpu_bo_evict_vram(adev);
+ *val = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM);
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
@@ -1268,17 +1341,15 @@ static int amdgpu_debugfs_evict_gtt(void *data, u64 *val)
{
struct amdgpu_device *adev = (struct amdgpu_device *)data;
struct drm_device *dev = adev_to_drm(adev);
- struct ttm_resource_manager *man;
int r;
r = pm_runtime_get_sync(dev->dev);
if (r < 0) {
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ pm_runtime_put_autosuspend(dev->dev);
return r;
}
- man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
- *val = ttm_resource_manager_evict_all(&adev->mman.bdev, man);
+ *val = amdgpu_ttm_evict_resources(adev, TTM_PL_TT);
pm_runtime_mark_last_busy(dev->dev);
pm_runtime_put_autosuspend(dev->dev);
@@ -1544,6 +1615,9 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
struct dentry *ent;
int r, i;
+ if (!debugfs_initialized())
+ return 0;
+
ent = debugfs_create_file("amdgpu_preempt_ib", 0600, root, adev,
&fops_ib_preempt);
if (IS_ERR(ent)) {
@@ -1582,13 +1656,10 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
if (!ring)
continue;
- if (amdgpu_debugfs_ring_init(adev, ring)) {
- DRM_ERROR("Failed to register debugfs file for rings !\n");
- }
+ amdgpu_debugfs_ring_init(adev, ring);
}
amdgpu_ras_debugfs_create_all(adev);
- amdgpu_debugfs_autodump_init(adev);
amdgpu_rap_debugfs_init(adev);
amdgpu_securedisplay_debugfs_init(adev);
amdgpu_fw_attestation_debugfs_init(adev);
@@ -1607,6 +1678,11 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
debugfs_create_blob("amdgpu_vbios", 0444, root,
&adev->debugfs_vbios_blob);
+ adev->debugfs_discovery_blob.data = adev->mman.discovery_bin;
+ adev->debugfs_discovery_blob.size = adev->mman.discovery_tmr_size;
+ debugfs_create_blob("amdgpu_discovery", 0444, root,
+ &adev->debugfs_discovery_blob);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
index 141a8474e24f..371a6f0deb29 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
@@ -22,14 +22,9 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
*/
-
/*
* Debugfs
*/
-struct amdgpu_autodump {
- struct completion dumping;
- struct wait_queue_head gpu_hang;
-};
int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
int amdgpu_debugfs_init(struct amdgpu_device *adev);
@@ -37,4 +32,3 @@ void amdgpu_debugfs_fini(struct amdgpu_device *adev);
void amdgpu_debugfs_fence_init(struct amdgpu_device *adev);
void amdgpu_debugfs_firmware_init(struct amdgpu_device *adev);
void amdgpu_debugfs_gem_init(struct amdgpu_device *adev);
-int amdgpu_debugfs_wait_dump(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index af9bdf16eefd..6e40cc1bc6dc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -125,6 +125,7 @@ const char *amdgpu_asic_name[] = {
"DIMGREY_CAVEFISH",
"BEIGE_GOBY",
"YELLOW_CARP",
+ "IP DISCOVERY",
"LAST",
};
@@ -305,7 +306,7 @@ void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos,
uint64_t last;
int idx;
- if (!drm_dev_enter(&adev->ddev, &idx))
+ if (!drm_dev_enter(adev_to_drm(adev), &idx))
return;
BUG_ON(!IS_ALIGNED(pos, 4) || !IS_ALIGNED(size, 4));
@@ -2126,46 +2127,11 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
if (r)
return r;
break;
- case CHIP_VEGA10:
- case CHIP_VEGA12:
- case CHIP_VEGA20:
- case CHIP_RAVEN:
- case CHIP_ARCTURUS:
- case CHIP_RENOIR:
- case CHIP_ALDEBARAN:
- if (adev->flags & AMD_IS_APU)
- adev->family = AMDGPU_FAMILY_RV;
- else
- adev->family = AMDGPU_FAMILY_AI;
-
- r = soc15_set_ip_blocks(adev);
- if (r)
- return r;
- break;
- case CHIP_NAVI10:
- case CHIP_NAVI14:
- case CHIP_NAVI12:
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
- case CHIP_VANGOGH:
- case CHIP_YELLOW_CARP:
- case CHIP_CYAN_SKILLFISH:
- if (adev->asic_type == CHIP_VANGOGH)
- adev->family = AMDGPU_FAMILY_VGH;
- else if (adev->asic_type == CHIP_YELLOW_CARP)
- adev->family = AMDGPU_FAMILY_YC;
- else
- adev->family = AMDGPU_FAMILY_NV;
-
- r = nv_set_ip_blocks(adev);
+ default:
+ r = amdgpu_discovery_set_ip_blocks(adev);
if (r)
return r;
break;
- default:
- /* FIXME: not supported yet */
- return -EINVAL;
}
amdgpu_amdkfd_device_probe(adev);
@@ -2745,6 +2711,11 @@ static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
adev->ip_blocks[i].status.hw = false;
}
+ if (amdgpu_sriov_vf(adev)) {
+ if (amdgpu_virt_release_full_gpu(adev, false))
+ DRM_ERROR("failed to release exclusive mode on fini\n");
+ }
+
return 0;
}
@@ -2805,10 +2776,6 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
amdgpu_ras_fini(adev);
- if (amdgpu_sriov_vf(adev))
- if (amdgpu_virt_release_full_gpu(adev, false))
- DRM_ERROR("failed to release exclusive mode on fini\n");
-
return 0;
}
@@ -3240,6 +3207,7 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
case CHIP_NAVI14:
case CHIP_NAVI12:
case CHIP_RENOIR:
+ case CHIP_CYAN_SKILLFISH:
case CHIP_SIENNA_CICHLID:
case CHIP_NAVY_FLOUNDER:
case CHIP_DIMGREY_CAVEFISH:
@@ -3247,13 +3215,15 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
case CHIP_VANGOGH:
case CHIP_YELLOW_CARP:
#endif
+ default:
return amdgpu_dc != 0;
-#endif
+#else
default:
if (amdgpu_dc > 0)
DRM_INFO_ONCE("Display Core has been requested via kernel parameter "
"but isn't supported by ASIC, ignoring\n");
return false;
+#endif
}
}
@@ -3354,6 +3324,8 @@ static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev)
continue;
} else if (timeout < 0) {
timeout = MAX_SCHEDULE_TIMEOUT;
+ dev_warn(adev->dev, "lockup timeout disabled");
+ add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK);
} else {
timeout = msecs_to_jiffies(timeout);
}
@@ -3538,17 +3510,6 @@ int amdgpu_device_init(struct amdgpu_device *adev,
DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)adev->rmmio_base);
DRM_INFO("register mmio size: %u\n", (unsigned)adev->rmmio_size);
- /* enable PCIE atomic ops */
- r = pci_enable_atomic_ops_to_root(adev->pdev,
- PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
- PCI_EXP_DEVCAP2_ATOMIC_COMP64);
- if (r) {
- adev->have_atomics_support = false;
- DRM_INFO("PCIE atomic ops is not supported\n");
- } else {
- adev->have_atomics_support = true;
- }
-
amdgpu_device_get_pcie_info(adev);
if (amdgpu_mcbp)
@@ -3571,6 +3532,19 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (r)
return r;
+ /* enable PCIE atomic ops */
+ if (amdgpu_sriov_vf(adev))
+ adev->have_atomics_support = ((struct amd_sriov_msg_pf2vf_info *)
+ adev->virt.fw_reserve.p_pf2vf)->pcie_atomic_ops_enabled_flags ==
+ (PCI_EXP_DEVCAP2_ATOMIC_COMP32 | PCI_EXP_DEVCAP2_ATOMIC_COMP64);
+ else
+ adev->have_atomics_support =
+ !pci_enable_atomic_ops_to_root(adev->pdev,
+ PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
+ PCI_EXP_DEVCAP2_ATOMIC_COMP64);
+ if (!adev->have_atomics_support)
+ dev_info(adev->dev, "PCIE atomic ops is not supported\n");
+
/* doorbell bar mapping and doorbell index init*/
amdgpu_device_doorbell_init(adev);
@@ -3865,9 +3839,11 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
amdgpu_fbdev_fini(adev);
+ amdgpu_device_ip_fini_early(adev);
+
amdgpu_irq_fini_hw(adev);
- amdgpu_device_ip_fini_early(adev);
+ ttm_device_clear_dma_mappings(&adev->mman.bdev);
amdgpu_gart_dummy_page_fini(adev);
@@ -3876,8 +3852,8 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
void amdgpu_device_fini_sw(struct amdgpu_device *adev)
{
- amdgpu_device_ip_fini(adev);
amdgpu_fence_driver_sw_fini(adev);
+ amdgpu_device_ip_fini(adev);
release_firmware(adev->firmware.gpu_info_fw);
adev->firmware.gpu_info_fw = NULL;
adev->accel_working = false;
@@ -3909,6 +3885,25 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev)
}
+/**
+ * amdgpu_device_evict_resources - evict device resources
+ * @adev: amdgpu device object
+ *
+ * Evicts all ttm device resources(vram BOs, gart table) from the lru list
+ * of the vram memory type. Mainly used for evicting device resources
+ * at suspend time.
+ *
+ */
+static void amdgpu_device_evict_resources(struct amdgpu_device *adev)
+{
+ /* No need to evict vram on APUs for suspend to ram */
+ if (adev->in_s3 && (adev->flags & AMD_IS_APU))
+ return;
+
+ if (amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM))
+ DRM_WARN("evicting device resources failed\n");
+
+}
/*
* Suspend & resume.
@@ -3949,17 +3944,16 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
if (!adev->in_s0ix)
amdgpu_amdkfd_suspend(adev, adev->in_runpm);
- /* evict vram memory */
- amdgpu_bo_evict_vram(adev);
+ /* First evict vram memory */
+ amdgpu_device_evict_resources(adev);
amdgpu_fence_driver_hw_fini(adev);
amdgpu_device_ip_suspend_phase2(adev);
- /* evict remaining vram memory
- * This second call to evict vram is to evict the gart page table
- * using the CPU.
+ /* This second call to evict device resources is to evict
+ * the gart page table using the CPU.
*/
- amdgpu_bo_evict_vram(adev);
+ amdgpu_device_evict_resources(adev);
return 0;
}
@@ -4466,10 +4460,6 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev,
if (reset_context->reset_req_dev == adev)
job = reset_context->job;
- /* no need to dump if device is not in good state during probe period */
- if (!adev->gmc.xgmi.pending_reset)
- amdgpu_debugfs_wait_dump(adev);
-
if (amdgpu_sriov_vf(adev)) {
/* stop the data exchange thread */
amdgpu_virt_fini_data_exchange(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h
index 52488bb45112..6b25837955c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_df.h
@@ -52,6 +52,7 @@ struct amdgpu_df_funcs {
uint64_t (*get_fica)(struct amdgpu_device *adev, uint32_t ficaa_val);
void (*set_fica)(struct amdgpu_device *adev, uint32_t ficaa_val,
uint32_t ficadl_val, uint32_t ficadh_val);
+ bool (*query_ras_poison_mode)(struct amdgpu_device *adev);
};
struct amdgpu_df {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index ada7bc19118a..d7c8d9e3c203 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -21,16 +21,58 @@
*
*/
+#include <linux/firmware.h>
+
#include "amdgpu.h"
#include "amdgpu_discovery.h"
#include "soc15_hw_ip.h"
#include "discovery.h"
+#include "soc15.h"
+#include "gfx_v9_0.h"
+#include "gmc_v9_0.h"
+#include "df_v1_7.h"
+#include "df_v3_6.h"
+#include "nbio_v6_1.h"
+#include "nbio_v7_0.h"
+#include "nbio_v7_4.h"
+#include "hdp_v4_0.h"
+#include "vega10_ih.h"
+#include "vega20_ih.h"
+#include "sdma_v4_0.h"
+#include "uvd_v7_0.h"
+#include "vce_v4_0.h"
+#include "vcn_v1_0.h"
+#include "vcn_v2_5.h"
+#include "jpeg_v2_5.h"
+#include "smuio_v9_0.h"
+#include "gmc_v10_0.h"
+#include "gfxhub_v2_0.h"
+#include "mmhub_v2_0.h"
+#include "nbio_v2_3.h"
+#include "nbio_v7_2.h"
+#include "hdp_v5_0.h"
+#include "nv.h"
+#include "navi10_ih.h"
+#include "gfx_v10_0.h"
+#include "sdma_v5_0.h"
+#include "sdma_v5_2.h"
+#include "vcn_v2_0.h"
+#include "jpeg_v2_0.h"
+#include "vcn_v3_0.h"
+#include "jpeg_v3_0.h"
+#include "amdgpu_vkms.h"
+#include "mes_v10_1.h"
+#include "smuio_v11_0.h"
+#include "smuio_v11_0_6.h"
+#include "smuio_v13_0.h"
+
+MODULE_FIRMWARE("amdgpu/ip_discovery.bin");
+
#define mmRCC_CONFIG_MEMSIZE 0xde3
#define mmMM_INDEX 0x0
#define mmMM_INDEX_HI 0x6
#define mmMM_DATA 0x1
-#define HW_ID_MAX 300
static const char *hw_id_names[HW_ID_MAX] = {
[MP1_HWID] = "MP1",
@@ -66,6 +108,8 @@ static const char *hw_id_names[HW_ID_MAX] = {
[HDP_HWID] = "HDP",
[SDMA0_HWID] = "SDMA0",
[SDMA1_HWID] = "SDMA1",
+ [SDMA2_HWID] = "SDMA2",
+ [SDMA3_HWID] = "SDMA3",
[ISP_HWID] = "ISP",
[DBGU_IO_HWID] = "DBGU_IO",
[DF_HWID] = "DF",
@@ -129,6 +173,8 @@ static int hw_id_map[MAX_HWIP] = {
[THM_HWIP] = THM_HWID,
[CLK_HWIP] = CLKA_HWID,
[UMC_HWIP] = UMC_HWID,
+ [XGMI_HWIP] = XGMI_HWID,
+ [DCI_HWIP] = DCI_HWID,
};
static int amdgpu_discovery_read_binary(struct amdgpu_device *adev, uint8_t *binary)
@@ -164,6 +210,7 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
struct binary_header *bhdr;
struct ip_discovery_header *ihdr;
struct gpu_info_header *ghdr;
+ const struct firmware *fw;
uint16_t offset;
uint16_t size;
uint16_t checksum;
@@ -174,10 +221,21 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
if (!adev->mman.discovery_bin)
return -ENOMEM;
- r = amdgpu_discovery_read_binary(adev, adev->mman.discovery_bin);
- if (r) {
- DRM_ERROR("failed to read ip discovery binary\n");
- goto out;
+ if (amdgpu_discovery == 2) {
+ r = request_firmware(&fw, "amdgpu/ip_discovery.bin", adev->dev);
+ if (r)
+ goto get_from_vram;
+ dev_info(adev->dev, "Using IP discovery from file\n");
+ memcpy((u8 *)adev->mman.discovery_bin, (u8 *)fw->data,
+ adev->mman.discovery_tmr_size);
+ release_firmware(fw);
+ } else {
+get_from_vram:
+ r = amdgpu_discovery_read_binary(adev, adev->mman.discovery_bin);
+ if (r) {
+ DRM_ERROR("failed to read ip discovery binary\n");
+ goto out;
+ }
}
bhdr = (struct binary_header *)adev->mman.discovery_bin;
@@ -245,6 +303,22 @@ void amdgpu_discovery_fini(struct amdgpu_device *adev)
adev->mman.discovery_bin = NULL;
}
+static int amdgpu_discovery_validate_ip(const struct ip *ip)
+{
+ if (ip->number_instance >= HWIP_MAX_INSTANCE) {
+ DRM_ERROR("Unexpected number_instance (%d) from ip discovery blob\n",
+ ip->number_instance);
+ return -EINVAL;
+ }
+ if (le16_to_cpu(ip->hw_id) >= HW_ID_MAX) {
+ DRM_ERROR("Unexpected hw_id (%d) from ip discovery blob\n",
+ le16_to_cpu(ip->hw_id));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
{
struct binary_header *bhdr;
@@ -290,6 +364,10 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
for (j = 0; j < num_ips; j++) {
ip = (struct ip *)(adev->mman.discovery_bin + ip_offset);
+
+ if (amdgpu_discovery_validate_ip(ip))
+ goto next_ip;
+
num_base_address = ip->num_base_address;
DRM_DEBUG("%s(%d) #%d v%d.%d.%d:\n",
@@ -301,6 +379,11 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
if (le16_to_cpu(ip->hw_id) == VCN_HWID)
adev->vcn.num_vcn_inst++;
+ if (le16_to_cpu(ip->hw_id) == SDMA0_HWID ||
+ le16_to_cpu(ip->hw_id) == SDMA1_HWID ||
+ le16_to_cpu(ip->hw_id) == SDMA2_HWID ||
+ le16_to_cpu(ip->hw_id) == SDMA3_HWID)
+ adev->sdma.num_instances++;
for (k = 0; k < num_base_address; k++) {
/*
@@ -317,10 +400,21 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
hw_id_names[le16_to_cpu(ip->hw_id)]);
adev->reg_offset[hw_ip][ip->number_instance] =
ip->base_address;
+ /* Instance support is somewhat inconsistent.
+ * SDMA is a good example. Sienna cichlid has 4 total
+ * SDMA instances, each enumerated separately (HWIDs
+ * 42, 43, 68, 69). Arcturus has 8 total SDMA instances,
+ * but they are enumerated as multiple instances of the
+ * same HWIDs (4x HWID 42, 4x HWID 43). UMC is another
+ * example. On most chips there are multiple instances
+ * with the same HWID.
+ */
+ adev->ip_versions[hw_ip][ip->number_instance] =
+ IP_VERSION(ip->major, ip->minor, ip->revision);
}
-
}
+next_ip:
ip_offset += sizeof(*ip) + 4 * (ip->num_base_address - 1);
}
}
@@ -401,6 +495,10 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
switch (le32_to_cpu(harvest_info->list[i].hw_id)) {
case VCN_HWID:
vcn_harvest_count++;
+ if (harvest_info->list[i].number_instance == 0)
+ adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN0;
+ else
+ adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
break;
case DMU_HWID:
adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
@@ -409,10 +507,21 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
break;
}
}
+ /* some IP discovery tables on Navy Flounder don't have this set correctly */
+ if ((adev->ip_versions[UVD_HWIP][1] == IP_VERSION(3, 0, 1)) &&
+ (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 2)))
+ adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
if (vcn_harvest_count == adev->vcn.num_vcn_inst) {
adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
}
+ if ((adev->pdev->device == 0x731E &&
+ (adev->pdev->revision == 0xC6 || adev->pdev->revision == 0xC7)) ||
+ (adev->pdev->device == 0x7340 && adev->pdev->revision == 0xC9) ||
+ (adev->pdev->device == 0x7360 && adev->pdev->revision == 0xC7)) {
+ adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
+ adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
+ }
}
int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
@@ -450,3 +559,753 @@ int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
return 0;
}
+
+static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev)
+{
+ /* what IP to use for this? */
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(9, 0, 1):
+ case IP_VERSION(9, 1, 0):
+ case IP_VERSION(9, 2, 1):
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 3, 0):
+ case IP_VERSION(9, 4, 0):
+ case IP_VERSION(9, 4, 1):
+ case IP_VERSION(9, 4, 2):
+ amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
+ break;
+ case IP_VERSION(10, 1, 10):
+ case IP_VERSION(10, 1, 1):
+ case IP_VERSION(10, 1, 2):
+ case IP_VERSION(10, 1, 3):
+ case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 1):
+ case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 3):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
+ amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev)
+{
+ /* use GC or MMHUB IP version */
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(9, 0, 1):
+ case IP_VERSION(9, 1, 0):
+ case IP_VERSION(9, 2, 1):
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 3, 0):
+ case IP_VERSION(9, 4, 0):
+ case IP_VERSION(9, 4, 1):
+ case IP_VERSION(9, 4, 2):
+ amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
+ break;
+ case IP_VERSION(10, 1, 10):
+ case IP_VERSION(10, 1, 1):
+ case IP_VERSION(10, 1, 2):
+ case IP_VERSION(10, 1, 3):
+ case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 1):
+ case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 3):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
+ amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev)
+{
+ switch (adev->ip_versions[OSSSYS_HWIP][0]) {
+ case IP_VERSION(4, 0, 0):
+ case IP_VERSION(4, 0, 1):
+ case IP_VERSION(4, 1, 0):
+ case IP_VERSION(4, 1, 1):
+ case IP_VERSION(4, 3, 0):
+ amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
+ break;
+ case IP_VERSION(4, 2, 0):
+ case IP_VERSION(4, 2, 1):
+ case IP_VERSION(4, 4, 0):
+ amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
+ break;
+ case IP_VERSION(5, 0, 0):
+ case IP_VERSION(5, 0, 1):
+ case IP_VERSION(5, 0, 2):
+ case IP_VERSION(5, 0, 3):
+ case IP_VERSION(5, 2, 0):
+ case IP_VERSION(5, 2, 1):
+ amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev)
+{
+ switch (adev->ip_versions[MP0_HWIP][0]) {
+ case IP_VERSION(9, 0, 0):
+ amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
+ break;
+ case IP_VERSION(10, 0, 0):
+ case IP_VERSION(10, 0, 1):
+ amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block);
+ break;
+ case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 4):
+ case IP_VERSION(11, 0, 5):
+ case IP_VERSION(11, 0, 9):
+ case IP_VERSION(11, 0, 7):
+ case IP_VERSION(11, 0, 11):
+ case IP_VERSION(11, 0, 12):
+ case IP_VERSION(11, 0, 13):
+ case IP_VERSION(11, 5, 0):
+ amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
+ break;
+ case IP_VERSION(11, 0, 8):
+ amdgpu_device_ip_block_add(adev, &psp_v11_0_8_ip_block);
+ break;
+ case IP_VERSION(11, 0, 3):
+ case IP_VERSION(12, 0, 1):
+ amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block);
+ break;
+ case IP_VERSION(13, 0, 1):
+ case IP_VERSION(13, 0, 2):
+ case IP_VERSION(13, 0, 3):
+ amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
+{
+ switch (adev->ip_versions[MP1_HWIP][0]) {
+ case IP_VERSION(9, 0, 0):
+ case IP_VERSION(10, 0, 0):
+ case IP_VERSION(10, 0, 1):
+ case IP_VERSION(11, 0, 2):
+ if (adev->asic_type == CHIP_ARCTURUS)
+ amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
+ else
+ amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
+ break;
+ case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 5):
+ case IP_VERSION(11, 0, 9):
+ case IP_VERSION(11, 0, 7):
+ case IP_VERSION(11, 0, 8):
+ case IP_VERSION(11, 0, 11):
+ case IP_VERSION(11, 0, 12):
+ case IP_VERSION(11, 0, 13):
+ case IP_VERSION(11, 5, 0):
+ amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
+ break;
+ case IP_VERSION(12, 0, 0):
+ case IP_VERSION(12, 0, 1):
+ amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block);
+ break;
+ case IP_VERSION(13, 0, 1):
+ case IP_VERSION(13, 0, 2):
+ case IP_VERSION(13, 0, 3):
+ amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev)
+{
+ if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) {
+ amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
+#if defined(CONFIG_DRM_AMD_DC)
+ } else if (adev->ip_versions[DCE_HWIP][0]) {
+ switch (adev->ip_versions[DCE_HWIP][0]) {
+ case IP_VERSION(1, 0, 0):
+ case IP_VERSION(1, 0, 1):
+ case IP_VERSION(2, 0, 2):
+ case IP_VERSION(2, 0, 0):
+ case IP_VERSION(2, 0, 3):
+ case IP_VERSION(2, 1, 0):
+ case IP_VERSION(3, 0, 0):
+ case IP_VERSION(3, 0, 2):
+ case IP_VERSION(3, 0, 3):
+ case IP_VERSION(3, 0, 1):
+ case IP_VERSION(3, 1, 2):
+ case IP_VERSION(3, 1, 3):
+ amdgpu_device_ip_block_add(adev, &dm_ip_block);
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else if (adev->ip_versions[DCI_HWIP][0]) {
+ switch (adev->ip_versions[DCI_HWIP][0]) {
+ case IP_VERSION(12, 0, 0):
+ case IP_VERSION(12, 0, 1):
+ case IP_VERSION(12, 1, 0):
+ amdgpu_device_ip_block_add(adev, &dm_ip_block);
+ break;
+ default:
+ return -EINVAL;
+ }
+#endif
+ }
+ return 0;
+}
+
+static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev)
+{
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(9, 0, 1):
+ case IP_VERSION(9, 1, 0):
+ case IP_VERSION(9, 2, 1):
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 3, 0):
+ case IP_VERSION(9, 4, 0):
+ case IP_VERSION(9, 4, 1):
+ case IP_VERSION(9, 4, 2):
+ amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
+ break;
+ case IP_VERSION(10, 1, 10):
+ case IP_VERSION(10, 1, 2):
+ case IP_VERSION(10, 1, 1):
+ case IP_VERSION(10, 1, 3):
+ case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 1):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
+ case IP_VERSION(10, 3, 3):
+ amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev)
+{
+ switch (adev->ip_versions[SDMA0_HWIP][0]) {
+ case IP_VERSION(4, 0, 0):
+ case IP_VERSION(4, 0, 1):
+ case IP_VERSION(4, 1, 0):
+ case IP_VERSION(4, 1, 1):
+ case IP_VERSION(4, 1, 2):
+ case IP_VERSION(4, 2, 0):
+ case IP_VERSION(4, 2, 2):
+ case IP_VERSION(4, 4, 0):
+ amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
+ break;
+ case IP_VERSION(5, 0, 0):
+ case IP_VERSION(5, 0, 1):
+ case IP_VERSION(5, 0, 2):
+ case IP_VERSION(5, 0, 5):
+ amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
+ break;
+ case IP_VERSION(5, 2, 0):
+ case IP_VERSION(5, 2, 2):
+ case IP_VERSION(5, 2, 4):
+ case IP_VERSION(5, 2, 5):
+ case IP_VERSION(5, 2, 3):
+ case IP_VERSION(5, 2, 1):
+ amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
+{
+ if (adev->ip_versions[VCE_HWIP][0]) {
+ switch (adev->ip_versions[UVD_HWIP][0]) {
+ case IP_VERSION(7, 0, 0):
+ case IP_VERSION(7, 2, 0):
+ /* UVD is not supported on vega20 SR-IOV */
+ if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev)))
+ amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
+ break;
+ default:
+ return -EINVAL;
+ }
+ switch (adev->ip_versions[VCE_HWIP][0]) {
+ case IP_VERSION(4, 0, 0):
+ case IP_VERSION(4, 1, 0):
+ /* VCE is not supported on vega20 SR-IOV */
+ if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev)))
+ amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else {
+ switch (adev->ip_versions[UVD_HWIP][0]) {
+ case IP_VERSION(1, 0, 0):
+ case IP_VERSION(1, 0, 1):
+ amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block);
+ break;
+ case IP_VERSION(2, 0, 0):
+ case IP_VERSION(2, 0, 2):
+ case IP_VERSION(2, 2, 0):
+ amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
+ amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
+ break;
+ case IP_VERSION(2, 0, 3):
+ break;
+ case IP_VERSION(2, 5, 0):
+ amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
+ amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block);
+ break;
+ case IP_VERSION(2, 6, 0):
+ amdgpu_device_ip_block_add(adev, &vcn_v2_6_ip_block);
+ amdgpu_device_ip_block_add(adev, &jpeg_v2_6_ip_block);
+ break;
+ case IP_VERSION(3, 0, 0):
+ case IP_VERSION(3, 0, 16):
+ case IP_VERSION(3, 1, 1):
+ case IP_VERSION(3, 0, 2):
+ amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
+ if (!amdgpu_sriov_vf(adev))
+ amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
+ break;
+ case IP_VERSION(3, 0, 33):
+ amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev)
+{
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(10, 1, 10):
+ case IP_VERSION(10, 1, 1):
+ case IP_VERSION(10, 1, 2):
+ case IP_VERSION(10, 1, 3):
+ case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 1):
+ case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 3):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
+ amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
+ break;
+ default:
+ break;;
+ }
+ return 0;
+}
+
+int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
+{
+ int r;
+
+ switch (adev->asic_type) {
+ case CHIP_VEGA10:
+ vega10_reg_base_init(adev);
+ adev->sdma.num_instances = 2;
+ adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 0, 0);
+ adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 0, 0);
+ adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 0);
+ adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 0);
+ adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 0);
+ adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 0);
+ adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0);
+ adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 1, 0);
+ adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 0, 0);
+ adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0);
+ adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0);
+ adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0);
+ adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 0);
+ adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 0, 1);
+ adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0);
+ adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0);
+ adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 0);
+ break;
+ case CHIP_VEGA12:
+ vega10_reg_base_init(adev);
+ adev->sdma.num_instances = 2;
+ adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 3, 0);
+ adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 3, 0);
+ adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 1);
+ adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 1);
+ adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 1);
+ adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 1);
+ adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 5, 0);
+ adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 2, 0);
+ adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 0);
+ adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0);
+ adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0);
+ adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0);
+ adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 1);
+ adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 1);
+ adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0);
+ adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0);
+ adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 1);
+ break;
+ case CHIP_RAVEN:
+ vega10_reg_base_init(adev);
+ adev->sdma.num_instances = 1;
+ adev->vcn.num_vcn_inst = 1;
+ if (adev->apu_flags & AMD_APU_IS_RAVEN2) {
+ adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 2, 0);
+ adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 2, 0);
+ adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 1);
+ adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 1);
+ adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 1);
+ adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 1);
+ adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 1);
+ adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 5, 0);
+ adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 1);
+ adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 1);
+ adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 1, 0);
+ adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 1);
+ adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 2);
+ adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 1);
+ adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 1);
+ } else {
+ adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 1, 0);
+ adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 1, 0);
+ adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 0);
+ adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 0);
+ adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 0);
+ adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0);
+ adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 0);
+ adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 0, 0);
+ adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 0);
+ adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 0);
+ adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 0, 0);
+ adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 0);
+ adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 1, 0);
+ adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 0);
+ adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 0);
+ }
+ break;
+ case CHIP_VEGA20:
+ vega20_reg_base_init(adev);
+ adev->sdma.num_instances = 2;
+ adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 0);
+ adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 0);
+ adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 0);
+ adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 0);
+ adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 0);
+ adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 0);
+ adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 0);
+ adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 0);
+ adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 1);
+ adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 2);
+ adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2);
+ adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 2);
+ adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 2);
+ adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 0);
+ adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 2, 0);
+ adev->ip_versions[UVD_HWIP][1] = IP_VERSION(7, 2, 0);
+ adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 1, 0);
+ adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 1, 0);
+ break;
+ case CHIP_ARCTURUS:
+ arct_reg_base_init(adev);
+ adev->sdma.num_instances = 8;
+ adev->vcn.num_vcn_inst = 2;
+ adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 1);
+ adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 1);
+ adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 1);
+ adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 1);
+ adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 2);
+ adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 2);
+ adev->ip_versions[SDMA1_HWIP][1] = IP_VERSION(4, 2, 2);
+ adev->ip_versions[SDMA1_HWIP][2] = IP_VERSION(4, 2, 2);
+ adev->ip_versions[SDMA1_HWIP][3] = IP_VERSION(4, 2, 2);
+ adev->ip_versions[SDMA1_HWIP][4] = IP_VERSION(4, 2, 2);
+ adev->ip_versions[SDMA1_HWIP][5] = IP_VERSION(4, 2, 2);
+ adev->ip_versions[SDMA1_HWIP][6] = IP_VERSION(4, 2, 2);
+ adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 1);
+ adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 1);
+ adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 2);
+ adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 4);
+ adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2);
+ adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 3);
+ adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 3);
+ adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 1);
+ adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 5, 0);
+ adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 5, 0);
+ break;
+ case CHIP_ALDEBARAN:
+ aldebaran_reg_base_init(adev);
+ adev->sdma.num_instances = 5;
+ adev->vcn.num_vcn_inst = 2;
+ adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 2);
+ adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 2);
+ adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 4, 0);
+ adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 4, 0);
+ adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 4, 0);
+ adev->ip_versions[SDMA0_HWIP][1] = IP_VERSION(4, 4, 0);
+ adev->ip_versions[SDMA0_HWIP][2] = IP_VERSION(4, 4, 0);
+ adev->ip_versions[SDMA0_HWIP][3] = IP_VERSION(4, 4, 0);
+ adev->ip_versions[SDMA0_HWIP][4] = IP_VERSION(4, 4, 0);
+ adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 2);
+ adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 4);
+ adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 7, 0);
+ adev->ip_versions[MP0_HWIP][0] = IP_VERSION(13, 0, 2);
+ adev->ip_versions[MP1_HWIP][0] = IP_VERSION(13, 0, 2);
+ adev->ip_versions[THM_HWIP][0] = IP_VERSION(13, 0, 2);
+ adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(13, 0, 2);
+ adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 2);
+ adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 6, 0);
+ adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 6, 0);
+ adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 1, 0);
+ break;
+ default:
+ r = amdgpu_discovery_reg_base_init(adev);
+ if (r)
+ return -EINVAL;
+
+ amdgpu_discovery_harvest_ip(adev);
+
+ if (!adev->mman.discovery_bin) {
+ DRM_ERROR("ip discovery uninitialized\n");
+ return -EINVAL;
+ }
+ break;
+ }
+
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(9, 0, 1):
+ case IP_VERSION(9, 2, 1):
+ case IP_VERSION(9, 4, 0):
+ case IP_VERSION(9, 4, 1):
+ case IP_VERSION(9, 4, 2):
+ adev->family = AMDGPU_FAMILY_AI;
+ break;
+ case IP_VERSION(9, 1, 0):
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 3, 0):
+ adev->family = AMDGPU_FAMILY_RV;
+ break;
+ case IP_VERSION(10, 1, 10):
+ case IP_VERSION(10, 1, 1):
+ case IP_VERSION(10, 1, 2):
+ case IP_VERSION(10, 1, 3):
+ case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
+ adev->family = AMDGPU_FAMILY_NV;
+ break;
+ case IP_VERSION(10, 3, 1):
+ adev->family = AMDGPU_FAMILY_VGH;
+ break;
+ case IP_VERSION(10, 3, 3):
+ adev->family = AMDGPU_FAMILY_YC;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (adev->ip_versions[XGMI_HWIP][0] == IP_VERSION(4, 8, 0))
+ adev->gmc.xgmi.supported = true;
+
+ /* set NBIO version */
+ switch (adev->ip_versions[NBIO_HWIP][0]) {
+ case IP_VERSION(6, 1, 0):
+ case IP_VERSION(6, 2, 0):
+ adev->nbio.funcs = &nbio_v6_1_funcs;
+ adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg;
+ break;
+ case IP_VERSION(7, 0, 0):
+ case IP_VERSION(7, 0, 1):
+ case IP_VERSION(2, 5, 0):
+ adev->nbio.funcs = &nbio_v7_0_funcs;
+ adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg;
+ break;
+ case IP_VERSION(7, 4, 0):
+ case IP_VERSION(7, 4, 1):
+ adev->nbio.funcs = &nbio_v7_4_funcs;
+ adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg;
+ break;
+ case IP_VERSION(7, 4, 4):
+ adev->nbio.funcs = &nbio_v7_4_funcs;
+ adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg_ald;
+ break;
+ case IP_VERSION(7, 2, 0):
+ case IP_VERSION(7, 2, 1):
+ case IP_VERSION(7, 5, 0):
+ adev->nbio.funcs = &nbio_v7_2_funcs;
+ adev->nbio.hdp_flush_reg = &nbio_v7_2_hdp_flush_reg;
+ break;
+ case IP_VERSION(2, 1, 1):
+ case IP_VERSION(2, 3, 0):
+ case IP_VERSION(2, 3, 1):
+ case IP_VERSION(2, 3, 2):
+ adev->nbio.funcs = &nbio_v2_3_funcs;
+ adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
+ break;
+ case IP_VERSION(3, 3, 0):
+ case IP_VERSION(3, 3, 1):
+ case IP_VERSION(3, 3, 2):
+ case IP_VERSION(3, 3, 3):
+ adev->nbio.funcs = &nbio_v2_3_funcs;
+ adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg_sc;
+ break;
+ default:
+ break;
+ }
+
+ switch (adev->ip_versions[HDP_HWIP][0]) {
+ case IP_VERSION(4, 0, 0):
+ case IP_VERSION(4, 0, 1):
+ case IP_VERSION(4, 1, 0):
+ case IP_VERSION(4, 1, 1):
+ case IP_VERSION(4, 1, 2):
+ case IP_VERSION(4, 2, 0):
+ case IP_VERSION(4, 2, 1):
+ case IP_VERSION(4, 4, 0):
+ adev->hdp.funcs = &hdp_v4_0_funcs;
+ break;
+ case IP_VERSION(5, 0, 0):
+ case IP_VERSION(5, 0, 1):
+ case IP_VERSION(5, 0, 2):
+ case IP_VERSION(5, 0, 3):
+ case IP_VERSION(5, 0, 4):
+ case IP_VERSION(5, 2, 0):
+ adev->hdp.funcs = &hdp_v5_0_funcs;
+ break;
+ default:
+ break;
+ }
+
+ switch (adev->ip_versions[DF_HWIP][0]) {
+ case IP_VERSION(3, 6, 0):
+ case IP_VERSION(3, 6, 1):
+ case IP_VERSION(3, 6, 2):
+ adev->df.funcs = &df_v3_6_funcs;
+ break;
+ case IP_VERSION(2, 1, 0):
+ case IP_VERSION(2, 1, 1):
+ case IP_VERSION(2, 5, 0):
+ case IP_VERSION(3, 5, 1):
+ case IP_VERSION(3, 5, 2):
+ adev->df.funcs = &df_v1_7_funcs;
+ break;
+ default:
+ break;
+ }
+
+ switch (adev->ip_versions[SMUIO_HWIP][0]) {
+ case IP_VERSION(9, 0, 0):
+ case IP_VERSION(9, 0, 1):
+ case IP_VERSION(10, 0, 0):
+ case IP_VERSION(10, 0, 1):
+ case IP_VERSION(10, 0, 2):
+ adev->smuio.funcs = &smuio_v9_0_funcs;
+ break;
+ case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 3):
+ case IP_VERSION(11, 0, 4):
+ case IP_VERSION(11, 0, 7):
+ case IP_VERSION(11, 0, 8):
+ adev->smuio.funcs = &smuio_v11_0_funcs;
+ break;
+ case IP_VERSION(11, 0, 6):
+ case IP_VERSION(11, 0, 10):
+ case IP_VERSION(11, 0, 11):
+ case IP_VERSION(11, 5, 0):
+ case IP_VERSION(13, 0, 1):
+ adev->smuio.funcs = &smuio_v11_0_6_funcs;
+ break;
+ case IP_VERSION(13, 0, 2):
+ adev->smuio.funcs = &smuio_v13_0_funcs;
+ break;
+ default:
+ break;
+ }
+
+ r = amdgpu_discovery_set_common_ip_blocks(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_discovery_set_gmc_ip_blocks(adev);
+ if (r)
+ return r;
+
+ /* For SR-IOV, PSP needs to be initialized before IH */
+ if (amdgpu_sriov_vf(adev)) {
+ r = amdgpu_discovery_set_psp_ip_blocks(adev);
+ if (r)
+ return r;
+ r = amdgpu_discovery_set_ih_ip_blocks(adev);
+ if (r)
+ return r;
+ } else {
+ r = amdgpu_discovery_set_ih_ip_blocks(adev);
+ if (r)
+ return r;
+
+ if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
+ r = amdgpu_discovery_set_psp_ip_blocks(adev);
+ if (r)
+ return r;
+ }
+ }
+
+ if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
+ r = amdgpu_discovery_set_smu_ip_blocks(adev);
+ if (r)
+ return r;
+ }
+
+ r = amdgpu_discovery_set_display_ip_blocks(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_discovery_set_gc_ip_blocks(adev);
+ if (r)
+ return r;
+
+ r = amdgpu_discovery_set_sdma_ip_blocks(adev);
+ if (r)
+ return r;
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
+ !amdgpu_sriov_vf(adev)) {
+ r = amdgpu_discovery_set_smu_ip_blocks(adev);
+ if (r)
+ return r;
+ }
+
+ r = amdgpu_discovery_set_mm_ip_blocks(adev);
+ if (r)
+ return r;
+
+ if (adev->enable_mes) {
+ r = amdgpu_discovery_set_mes_ip_blocks(adev);
+ if (r)
+ return r;
+ }
+
+ return 0;
+}
+
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
index 48e6b88cfdfe..0ea029e3b850 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
@@ -36,5 +36,6 @@ int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id, int n
int amdgpu_discovery_get_vcn_version(struct amdgpu_device *adev, int vcn_instance,
int *major, int *minor, int *revision);
int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev);
+int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev);
#endif /* __AMDGPU_DISCOVERY__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index f18240f87387..ad95de6399af 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -38,6 +38,7 @@
#include <drm/drm_probe_helper.h>
#include <linux/mmu_notifier.h>
#include <linux/suspend.h>
+#include <linux/cc_platform.h>
#include "amdgpu.h"
#include "amdgpu_irq.h"
@@ -96,9 +97,11 @@
* - 3.40.0 - Add AMDGPU_IDS_FLAGS_TMZ
* - 3.41.0 - Add video codec query
* - 3.42.0 - Add 16bpc fixed point display support
+ * - 3.43.0 - Add device hot plug/unplug support
+ * - 3.44.0 - DCN3 supports DCC independent block settings: !64B && 128B, 64B && 128B
*/
#define KMS_DRIVER_MAJOR 3
-#define KMS_DRIVER_MINOR 42
+#define KMS_DRIVER_MINOR 44
#define KMS_DRIVER_PATCHLEVEL 0
int amdgpu_vram_limit;
@@ -627,7 +630,7 @@ module_param_named(mcbp, amdgpu_mcbp, int, 0444);
/**
* DOC: discovery (int)
* Allow driver to discover hardware IP information from IP Discovery table at the top of VRAM.
- * (-1 = auto (default), 0 = disabled, 1 = enabled)
+ * (-1 = auto (default), 0 = disabled, 1 = enabled, 2 = use ip_discovery table from file)
*/
MODULE_PARM_DESC(discovery,
"Allow driver to discover hardware IPs from IP Discovery table at the top of VRAM");
@@ -875,7 +878,7 @@ module_param_named(reset_method, amdgpu_reset_method, int, 0444);
* result in the GPU entering bad status when the number of total
* faulty pages by ECC exceeds the threshold value.
*/
-MODULE_PARM_DESC(bad_page_threshold, "Bad page threshold(-1 = auto(default value), 0 = disable bad page retirement)");
+MODULE_PARM_DESC(bad_page_threshold, "Bad page threshold(-1 = auto(default value), 0 = disable bad page retirement, -2 = ignore bad page threshold)");
module_param_named(bad_page_threshold, amdgpu_bad_page_threshold, int, 0444);
MODULE_PARM_DESC(num_kcq, "number of kernel compute queue user want to setup (8 if set to greater than 8 or less than 0, only affect gfx 8+)");
@@ -890,6 +893,636 @@ MODULE_PARM_DESC(smu_pptable_id,
"specify pptable id to be used (-1 = auto(default) value, 0 = use pptable from vbios, > 0 = soft pptable id)");
module_param_named(smu_pptable_id, amdgpu_smu_pptable_id, int, 0444);
+/* These devices are not supported by amdgpu.
+ * They are supported by the mach64, r128, radeon drivers
+ */
+static const u16 amdgpu_unsupported_pciidlist[] = {
+ /* mach64 */
+ 0x4354,
+ 0x4358,
+ 0x4554,
+ 0x4742,
+ 0x4744,
+ 0x4749,
+ 0x474C,
+ 0x474D,
+ 0x474E,
+ 0x474F,
+ 0x4750,
+ 0x4751,
+ 0x4752,
+ 0x4753,
+ 0x4754,
+ 0x4755,
+ 0x4756,
+ 0x4757,
+ 0x4758,
+ 0x4759,
+ 0x475A,
+ 0x4C42,
+ 0x4C44,
+ 0x4C47,
+ 0x4C49,
+ 0x4C4D,
+ 0x4C4E,
+ 0x4C50,
+ 0x4C51,
+ 0x4C52,
+ 0x4C53,
+ 0x5654,
+ 0x5655,
+ 0x5656,
+ /* r128 */
+ 0x4c45,
+ 0x4c46,
+ 0x4d46,
+ 0x4d4c,
+ 0x5041,
+ 0x5042,
+ 0x5043,
+ 0x5044,
+ 0x5045,
+ 0x5046,
+ 0x5047,
+ 0x5048,
+ 0x5049,
+ 0x504A,
+ 0x504B,
+ 0x504C,
+ 0x504D,
+ 0x504E,
+ 0x504F,
+ 0x5050,
+ 0x5051,
+ 0x5052,
+ 0x5053,
+ 0x5054,
+ 0x5055,
+ 0x5056,
+ 0x5057,
+ 0x5058,
+ 0x5245,
+ 0x5246,
+ 0x5247,
+ 0x524b,
+ 0x524c,
+ 0x534d,
+ 0x5446,
+ 0x544C,
+ 0x5452,
+ /* radeon */
+ 0x3150,
+ 0x3151,
+ 0x3152,
+ 0x3154,
+ 0x3155,
+ 0x3E50,
+ 0x3E54,
+ 0x4136,
+ 0x4137,
+ 0x4144,
+ 0x4145,
+ 0x4146,
+ 0x4147,
+ 0x4148,
+ 0x4149,
+ 0x414A,
+ 0x414B,
+ 0x4150,
+ 0x4151,
+ 0x4152,
+ 0x4153,
+ 0x4154,
+ 0x4155,
+ 0x4156,
+ 0x4237,
+ 0x4242,
+ 0x4336,
+ 0x4337,
+ 0x4437,
+ 0x4966,
+ 0x4967,
+ 0x4A48,
+ 0x4A49,
+ 0x4A4A,
+ 0x4A4B,
+ 0x4A4C,
+ 0x4A4D,
+ 0x4A4E,
+ 0x4A4F,
+ 0x4A50,
+ 0x4A54,
+ 0x4B48,
+ 0x4B49,
+ 0x4B4A,
+ 0x4B4B,
+ 0x4B4C,
+ 0x4C57,
+ 0x4C58,
+ 0x4C59,
+ 0x4C5A,
+ 0x4C64,
+ 0x4C66,
+ 0x4C67,
+ 0x4E44,
+ 0x4E45,
+ 0x4E46,
+ 0x4E47,
+ 0x4E48,
+ 0x4E49,
+ 0x4E4A,
+ 0x4E4B,
+ 0x4E50,
+ 0x4E51,
+ 0x4E52,
+ 0x4E53,
+ 0x4E54,
+ 0x4E56,
+ 0x5144,
+ 0x5145,
+ 0x5146,
+ 0x5147,
+ 0x5148,
+ 0x514C,
+ 0x514D,
+ 0x5157,
+ 0x5158,
+ 0x5159,
+ 0x515A,
+ 0x515E,
+ 0x5460,
+ 0x5462,
+ 0x5464,
+ 0x5548,
+ 0x5549,
+ 0x554A,
+ 0x554B,
+ 0x554C,
+ 0x554D,
+ 0x554E,
+ 0x554F,
+ 0x5550,
+ 0x5551,
+ 0x5552,
+ 0x5554,
+ 0x564A,
+ 0x564B,
+ 0x564F,
+ 0x5652,
+ 0x5653,
+ 0x5657,
+ 0x5834,
+ 0x5835,
+ 0x5954,
+ 0x5955,
+ 0x5974,
+ 0x5975,
+ 0x5960,
+ 0x5961,
+ 0x5962,
+ 0x5964,
+ 0x5965,
+ 0x5969,
+ 0x5a41,
+ 0x5a42,
+ 0x5a61,
+ 0x5a62,
+ 0x5b60,
+ 0x5b62,
+ 0x5b63,
+ 0x5b64,
+ 0x5b65,
+ 0x5c61,
+ 0x5c63,
+ 0x5d48,
+ 0x5d49,
+ 0x5d4a,
+ 0x5d4c,
+ 0x5d4d,
+ 0x5d4e,
+ 0x5d4f,
+ 0x5d50,
+ 0x5d52,
+ 0x5d57,
+ 0x5e48,
+ 0x5e4a,
+ 0x5e4b,
+ 0x5e4c,
+ 0x5e4d,
+ 0x5e4f,
+ 0x6700,
+ 0x6701,
+ 0x6702,
+ 0x6703,
+ 0x6704,
+ 0x6705,
+ 0x6706,
+ 0x6707,
+ 0x6708,
+ 0x6709,
+ 0x6718,
+ 0x6719,
+ 0x671c,
+ 0x671d,
+ 0x671f,
+ 0x6720,
+ 0x6721,
+ 0x6722,
+ 0x6723,
+ 0x6724,
+ 0x6725,
+ 0x6726,
+ 0x6727,
+ 0x6728,
+ 0x6729,
+ 0x6738,
+ 0x6739,
+ 0x673e,
+ 0x6740,
+ 0x6741,
+ 0x6742,
+ 0x6743,
+ 0x6744,
+ 0x6745,
+ 0x6746,
+ 0x6747,
+ 0x6748,
+ 0x6749,
+ 0x674A,
+ 0x6750,
+ 0x6751,
+ 0x6758,
+ 0x6759,
+ 0x675B,
+ 0x675D,
+ 0x675F,
+ 0x6760,
+ 0x6761,
+ 0x6762,
+ 0x6763,
+ 0x6764,
+ 0x6765,
+ 0x6766,
+ 0x6767,
+ 0x6768,
+ 0x6770,
+ 0x6771,
+ 0x6772,
+ 0x6778,
+ 0x6779,
+ 0x677B,
+ 0x6840,
+ 0x6841,
+ 0x6842,
+ 0x6843,
+ 0x6849,
+ 0x684C,
+ 0x6850,
+ 0x6858,
+ 0x6859,
+ 0x6880,
+ 0x6888,
+ 0x6889,
+ 0x688A,
+ 0x688C,
+ 0x688D,
+ 0x6898,
+ 0x6899,
+ 0x689b,
+ 0x689c,
+ 0x689d,
+ 0x689e,
+ 0x68a0,
+ 0x68a1,
+ 0x68a8,
+ 0x68a9,
+ 0x68b0,
+ 0x68b8,
+ 0x68b9,
+ 0x68ba,
+ 0x68be,
+ 0x68bf,
+ 0x68c0,
+ 0x68c1,
+ 0x68c7,
+ 0x68c8,
+ 0x68c9,
+ 0x68d8,
+ 0x68d9,
+ 0x68da,
+ 0x68de,
+ 0x68e0,
+ 0x68e1,
+ 0x68e4,
+ 0x68e5,
+ 0x68e8,
+ 0x68e9,
+ 0x68f1,
+ 0x68f2,
+ 0x68f8,
+ 0x68f9,
+ 0x68fa,
+ 0x68fe,
+ 0x7100,
+ 0x7101,
+ 0x7102,
+ 0x7103,
+ 0x7104,
+ 0x7105,
+ 0x7106,
+ 0x7108,
+ 0x7109,
+ 0x710A,
+ 0x710B,
+ 0x710C,
+ 0x710E,
+ 0x710F,
+ 0x7140,
+ 0x7141,
+ 0x7142,
+ 0x7143,
+ 0x7144,
+ 0x7145,
+ 0x7146,
+ 0x7147,
+ 0x7149,
+ 0x714A,
+ 0x714B,
+ 0x714C,
+ 0x714D,
+ 0x714E,
+ 0x714F,
+ 0x7151,
+ 0x7152,
+ 0x7153,
+ 0x715E,
+ 0x715F,
+ 0x7180,
+ 0x7181,
+ 0x7183,
+ 0x7186,
+ 0x7187,
+ 0x7188,
+ 0x718A,
+ 0x718B,
+ 0x718C,
+ 0x718D,
+ 0x718F,
+ 0x7193,
+ 0x7196,
+ 0x719B,
+ 0x719F,
+ 0x71C0,
+ 0x71C1,
+ 0x71C2,
+ 0x71C3,
+ 0x71C4,
+ 0x71C5,
+ 0x71C6,
+ 0x71C7,
+ 0x71CD,
+ 0x71CE,
+ 0x71D2,
+ 0x71D4,
+ 0x71D5,
+ 0x71D6,
+ 0x71DA,
+ 0x71DE,
+ 0x7200,
+ 0x7210,
+ 0x7211,
+ 0x7240,
+ 0x7243,
+ 0x7244,
+ 0x7245,
+ 0x7246,
+ 0x7247,
+ 0x7248,
+ 0x7249,
+ 0x724A,
+ 0x724B,
+ 0x724C,
+ 0x724D,
+ 0x724E,
+ 0x724F,
+ 0x7280,
+ 0x7281,
+ 0x7283,
+ 0x7284,
+ 0x7287,
+ 0x7288,
+ 0x7289,
+ 0x728B,
+ 0x728C,
+ 0x7290,
+ 0x7291,
+ 0x7293,
+ 0x7297,
+ 0x7834,
+ 0x7835,
+ 0x791e,
+ 0x791f,
+ 0x793f,
+ 0x7941,
+ 0x7942,
+ 0x796c,
+ 0x796d,
+ 0x796e,
+ 0x796f,
+ 0x9400,
+ 0x9401,
+ 0x9402,
+ 0x9403,
+ 0x9405,
+ 0x940A,
+ 0x940B,
+ 0x940F,
+ 0x94A0,
+ 0x94A1,
+ 0x94A3,
+ 0x94B1,
+ 0x94B3,
+ 0x94B4,
+ 0x94B5,
+ 0x94B9,
+ 0x9440,
+ 0x9441,
+ 0x9442,
+ 0x9443,
+ 0x9444,
+ 0x9446,
+ 0x944A,
+ 0x944B,
+ 0x944C,
+ 0x944E,
+ 0x9450,
+ 0x9452,
+ 0x9456,
+ 0x945A,
+ 0x945B,
+ 0x945E,
+ 0x9460,
+ 0x9462,
+ 0x946A,
+ 0x946B,
+ 0x947A,
+ 0x947B,
+ 0x9480,
+ 0x9487,
+ 0x9488,
+ 0x9489,
+ 0x948A,
+ 0x948F,
+ 0x9490,
+ 0x9491,
+ 0x9495,
+ 0x9498,
+ 0x949C,
+ 0x949E,
+ 0x949F,
+ 0x94C0,
+ 0x94C1,
+ 0x94C3,
+ 0x94C4,
+ 0x94C5,
+ 0x94C6,
+ 0x94C7,
+ 0x94C8,
+ 0x94C9,
+ 0x94CB,
+ 0x94CC,
+ 0x94CD,
+ 0x9500,
+ 0x9501,
+ 0x9504,
+ 0x9505,
+ 0x9506,
+ 0x9507,
+ 0x9508,
+ 0x9509,
+ 0x950F,
+ 0x9511,
+ 0x9515,
+ 0x9517,
+ 0x9519,
+ 0x9540,
+ 0x9541,
+ 0x9542,
+ 0x954E,
+ 0x954F,
+ 0x9552,
+ 0x9553,
+ 0x9555,
+ 0x9557,
+ 0x955f,
+ 0x9580,
+ 0x9581,
+ 0x9583,
+ 0x9586,
+ 0x9587,
+ 0x9588,
+ 0x9589,
+ 0x958A,
+ 0x958B,
+ 0x958C,
+ 0x958D,
+ 0x958E,
+ 0x958F,
+ 0x9590,
+ 0x9591,
+ 0x9593,
+ 0x9595,
+ 0x9596,
+ 0x9597,
+ 0x9598,
+ 0x9599,
+ 0x959B,
+ 0x95C0,
+ 0x95C2,
+ 0x95C4,
+ 0x95C5,
+ 0x95C6,
+ 0x95C7,
+ 0x95C9,
+ 0x95CC,
+ 0x95CD,
+ 0x95CE,
+ 0x95CF,
+ 0x9610,
+ 0x9611,
+ 0x9612,
+ 0x9613,
+ 0x9614,
+ 0x9615,
+ 0x9616,
+ 0x9640,
+ 0x9641,
+ 0x9642,
+ 0x9643,
+ 0x9644,
+ 0x9645,
+ 0x9647,
+ 0x9648,
+ 0x9649,
+ 0x964a,
+ 0x964b,
+ 0x964c,
+ 0x964e,
+ 0x964f,
+ 0x9710,
+ 0x9711,
+ 0x9712,
+ 0x9713,
+ 0x9714,
+ 0x9715,
+ 0x9802,
+ 0x9803,
+ 0x9804,
+ 0x9805,
+ 0x9806,
+ 0x9807,
+ 0x9808,
+ 0x9809,
+ 0x980A,
+ 0x9900,
+ 0x9901,
+ 0x9903,
+ 0x9904,
+ 0x9905,
+ 0x9906,
+ 0x9907,
+ 0x9908,
+ 0x9909,
+ 0x990A,
+ 0x990B,
+ 0x990C,
+ 0x990D,
+ 0x990E,
+ 0x990F,
+ 0x9910,
+ 0x9913,
+ 0x9917,
+ 0x9918,
+ 0x9919,
+ 0x9990,
+ 0x9991,
+ 0x9992,
+ 0x9993,
+ 0x9994,
+ 0x9995,
+ 0x9996,
+ 0x9997,
+ 0x9998,
+ 0x9999,
+ 0x999A,
+ 0x999B,
+ 0x999C,
+ 0x999D,
+ 0x99A0,
+ 0x99A2,
+ 0x99A4,
+};
+
static const struct pci_device_id pciidlist[] = {
#ifdef CONFIG_DRM_AMDGPU_SI
{0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
@@ -1239,6 +1872,16 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x7423, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY},
{0x1002, 0x743F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY},
+ { PCI_DEVICE(0x1002, PCI_ANY_ID),
+ .class = PCI_CLASS_DISPLAY_VGA << 8,
+ .class_mask = 0xffffff,
+ .driver_data = CHIP_IP_DISCOVERY },
+
+ { PCI_DEVICE(0x1002, PCI_ANY_ID),
+ .class = PCI_CLASS_DISPLAY_OTHER << 8,
+ .class_mask = 0xffffff,
+ .driver_data = CHIP_IP_DISCOVERY },
+
{0, 0, 0}
};
@@ -1252,9 +1895,20 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
struct drm_device *ddev;
struct amdgpu_device *adev;
unsigned long flags = ent->driver_data;
- int ret, retry = 0;
+ int ret, retry = 0, i;
bool supports_atomic = false;
+ /* skip devices which are owned by radeon */
+ for (i = 0; i < ARRAY_SIZE(amdgpu_unsupported_pciidlist); i++) {
+ if (amdgpu_unsupported_pciidlist[i] == pdev->device)
+ return -ENODEV;
+ }
+
+ if (flags == 0) {
+ DRM_INFO("Unsupported asic. Remove me when IP discovery init is in place.\n");
+ return -ENODEV;
+ }
+
if (amdgpu_virtual_display ||
amdgpu_device_asic_has_dc_support(flags & AMD_ASIC_MASK))
supports_atomic = true;
@@ -1269,7 +1923,8 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
* however, SME requires an indirect IOMMU mapping because the encryption
* bit is beyond the DMA mask of the chip.
*/
- if (mem_encrypt_active() && ((flags & AMD_ASIC_MASK) == CHIP_RAVEN)) {
+ if (cc_platform_has(CC_ATTR_MEM_ENCRYPT) &&
+ ((flags & AMD_ASIC_MASK) == CHIP_RAVEN)) {
dev_info(&pdev->dev,
"SME is not compatible with RAVEN\n");
return -ENOTSUPP;
@@ -1508,6 +2163,10 @@ static int amdgpu_pmops_resume(struct device *dev)
struct amdgpu_device *adev = drm_to_adev(drm_dev);
int r;
+ /* Avoids registers access if device is physically gone */
+ if (!pci_device_is_present(adev->pdev))
+ adev->no_hw_access = true;
+
r = amdgpu_device_resume(drm_dev, true);
if (amdgpu_acpi_is_s0ix_active(adev))
adev->in_s0ix = false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 8d682befe0d6..3b7e86ea7167 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -266,7 +266,6 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring)
struct amdgpu_fence_driver *drv = &ring->fence_drv;
struct amdgpu_device *adev = ring->adev;
uint32_t seq, last_seq;
- int r;
do {
last_seq = atomic_read(&ring->fence_drv.last_seq);
@@ -298,12 +297,7 @@ bool amdgpu_fence_process(struct amdgpu_ring *ring)
if (!fence)
continue;
- r = dma_fence_signal(fence);
- if (!r)
- DMA_FENCE_TRACE(fence, "signaled from irq context\n");
- else
- BUG();
-
+ dma_fence_signal(fence);
dma_fence_put(fence);
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
@@ -556,7 +550,7 @@ void amdgpu_fence_driver_hw_fini(struct amdgpu_device *adev)
drm_sched_stop(&ring->sched, NULL);
/* You can't wait for HW to signal if it's gone */
- if (!drm_dev_is_unplugged(&adev->ddev))
+ if (!drm_dev_is_unplugged(adev_to_drm(adev)))
r = amdgpu_fence_wait_empty(ring);
else
r = -ENODEV;
@@ -684,8 +678,6 @@ static bool amdgpu_fence_enable_signaling(struct dma_fence *f)
if (!timer_pending(&ring->fence_drv.fallback_timer))
amdgpu_fence_schedule_fallback(ring);
- DMA_FENCE_TRACE(f, "armed on ring %i!\n", ring->idx);
-
return true;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
index 76efd5f8950f..d3e4203f6217 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
@@ -34,6 +34,7 @@
#include <asm/set_memory.h>
#endif
#include "amdgpu.h"
+#include <drm/drm_drv.h>
/*
* GART
@@ -230,12 +231,16 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
u64 page_base;
/* Starting from VEGA10, system bit must be 0 to mean invalid. */
uint64_t flags = 0;
+ int idx;
if (!adev->gart.ready) {
WARN(1, "trying to unbind memory from uninitialized GART !\n");
return -EINVAL;
}
+ if (!drm_dev_enter(adev_to_drm(adev), &idx))
+ return 0;
+
t = offset / AMDGPU_GPU_PAGE_SIZE;
p = t / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
for (i = 0; i < pages; i++, p++) {
@@ -254,6 +259,7 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
for (i = 0; i < adev->num_vmhubs; i++)
amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0);
+ drm_dev_exit(idx);
return 0;
}
@@ -276,12 +282,16 @@ int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
{
uint64_t page_base;
unsigned i, j, t;
+ int idx;
if (!adev->gart.ready) {
WARN(1, "trying to bind memory to uninitialized GART !\n");
return -EINVAL;
}
+ if (!drm_dev_enter(adev_to_drm(adev), &idx))
+ return 0;
+
t = offset / AMDGPU_GPU_PAGE_SIZE;
for (i = 0; i < pages; i++) {
@@ -291,6 +301,7 @@ int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset,
page_base += AMDGPU_GPU_PAGE_SIZE;
}
}
+ drm_dev_exit(idx);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index d6aa032890ee..a573424a6e0b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -60,10 +60,9 @@ static vm_fault_t amdgpu_gem_fault(struct vm_fault *vmf)
goto unlock;
}
- ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
- TTM_BO_VM_NUM_PREFAULT, 1);
-
- drm_dev_exit(idx);
+ ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
+ TTM_BO_VM_NUM_PREFAULT, 1);
+ drm_dev_exit(idx);
} else {
ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index d43fe2ed8116..f851196c83a5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -42,10 +42,9 @@
#define AMDGPU_MAX_GFX_QUEUES KGD_MAX_QUEUES
#define AMDGPU_MAX_COMPUTE_QUEUES KGD_MAX_QUEUES
-enum gfx_pipe_priority {
- AMDGPU_GFX_PIPE_PRIO_NORMAL = 1,
- AMDGPU_GFX_PIPE_PRIO_HIGH,
- AMDGPU_GFX_PIPE_PRIO_MAX
+enum amdgpu_gfx_pipe_priority {
+ AMDGPU_GFX_PIPE_PRIO_NORMAL = AMDGPU_RING_PRIO_1,
+ AMDGPU_GFX_PIPE_PRIO_HIGH = AMDGPU_RING_PRIO_2
};
/* Argument for PPSMC_MSG_GpuChangeState */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 9ff600a38559..08478fce00f2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -153,10 +153,6 @@ int amdgpu_gmc_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
{
void __iomem *ptr = (void *)cpu_pt_addr;
uint64_t value;
- int idx;
-
- if (!drm_dev_enter(&adev->ddev, &idx))
- return 0;
/*
* The following is for PTE only. GART does not have PDEs.
@@ -165,8 +161,6 @@ int amdgpu_gmc_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr,
value |= flags;
writeq(value, ptr + (gpu_page_idx * 8));
- drm_dev_exit(idx);
-
return 0;
}
@@ -749,6 +743,10 @@ void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev)
adev->gmc.xgmi.physical_node_id * adev->gmc.xgmi.node_segment_size;
u64 vram_end = vram_addr + vram_size;
u64 gart_ptb_gpu_pa = amdgpu_gmc_vram_pa(adev, adev->gart.bo);
+ int idx;
+
+ if (!drm_dev_enter(adev_to_drm(adev), &idx))
+ return;
flags |= AMDGPU_PTE_VALID | AMDGPU_PTE_READABLE;
flags |= AMDGPU_PTE_WRITEABLE;
@@ -770,6 +768,7 @@ void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev)
flags |= AMDGPU_PDE_BFS(0) | AMDGPU_PTE_SNOOPED;
/* Requires gart_ptb_gpu_pa to be 4K aligned */
amdgpu_gmc_set_pte_pde(adev, adev->gmc.ptr_pdb0, i, gart_ptb_gpu_pa, flags);
+ drm_dev_exit(idx);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index c076a6b9a5a2..bc1297dcdf97 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -300,20 +300,15 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
*/
int amdgpu_ib_pool_init(struct amdgpu_device *adev)
{
- unsigned size;
int r, i;
if (adev->ib_pool_ready)
return 0;
for (i = 0; i < AMDGPU_IB_POOL_MAX; i++) {
- if (i == AMDGPU_IB_POOL_DIRECT)
- size = PAGE_SIZE * 6;
- else
- size = AMDGPU_IB_POOL_SIZE;
-
r = amdgpu_sa_bo_manager_init(adev, &adev->ib_pools[i],
- size, AMDGPU_GPU_PAGE_SIZE,
+ AMDGPU_IB_POOL_SIZE,
+ AMDGPU_GPU_PAGE_SIZE,
AMDGPU_GEM_DOMAIN_GTT);
if (r)
goto error;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index de29518673dd..bfc47bea23db 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -38,7 +38,7 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
struct amdgpu_device *adev = ring->adev;
int idx;
- if (!drm_dev_enter(&adev->ddev, &idx)) {
+ if (!drm_dev_enter(adev_to_drm(adev), &idx)) {
DRM_INFO("%s - device unplugged skipping recovery on scheduler:%s",
__func__, s_job->sched->name);
@@ -182,9 +182,11 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
if (r)
return r;
+ drm_sched_job_arm(&job->base);
+
*f = dma_fence_get(&job->base.s_fence->finished);
amdgpu_job_free_resources(job);
- drm_sched_entity_push_job(&job->base, entity);
+ drm_sched_entity_push_job(&job->base);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 7e45640fbee0..dfe667ea8b05 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -340,28 +340,35 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
case AMDGPU_INFO_FW_TA:
switch (query_fw->index) {
case TA_FW_TYPE_PSP_XGMI:
- fw_info->ver = adev->psp.ta_fw_version;
- fw_info->feature = adev->psp.xgmi.feature_version;
+ fw_info->ver = adev->psp.xgmi_context.context.bin_desc.fw_version;
+ fw_info->feature = adev->psp.xgmi_context.context
+ .bin_desc.feature_version;
break;
case TA_FW_TYPE_PSP_RAS:
- fw_info->ver = adev->psp.ta_fw_version;
- fw_info->feature = adev->psp.ras.feature_version;
+ fw_info->ver = adev->psp.ras_context.context.bin_desc.fw_version;
+ fw_info->feature = adev->psp.ras_context.context
+ .bin_desc.feature_version;
break;
case TA_FW_TYPE_PSP_HDCP:
- fw_info->ver = adev->psp.ta_fw_version;
- fw_info->feature = adev->psp.hdcp.feature_version;
+ fw_info->ver = adev->psp.hdcp_context.context.bin_desc.fw_version;
+ fw_info->feature = adev->psp.hdcp_context.context
+ .bin_desc.feature_version;
break;
case TA_FW_TYPE_PSP_DTM:
- fw_info->ver = adev->psp.ta_fw_version;
- fw_info->feature = adev->psp.dtm.feature_version;
+ fw_info->ver = adev->psp.dtm_context.context.bin_desc.fw_version;
+ fw_info->feature = adev->psp.dtm_context.context
+ .bin_desc.feature_version;
break;
case TA_FW_TYPE_PSP_RAP:
- fw_info->ver = adev->psp.ta_fw_version;
- fw_info->feature = adev->psp.rap.feature_version;
+ fw_info->ver = adev->psp.rap_context.context.bin_desc.fw_version;
+ fw_info->feature = adev->psp.rap_context.context
+ .bin_desc.feature_version;
break;
case TA_FW_TYPE_PSP_SECUREDISPLAY:
- fw_info->ver = adev->psp.ta_fw_version;
- fw_info->feature = adev->psp.securedisplay.feature_version;
+ fw_info->ver = adev->psp.securedisplay_context.context.bin_desc.fw_version;
+ fw_info->feature =
+ adev->psp.securedisplay_context.context.bin_desc
+ .feature_version;
break;
default:
return -EINVAL;
@@ -378,8 +385,8 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info,
fw_info->feature = adev->psp.sos.feature_version;
break;
case AMDGPU_INFO_FW_ASD:
- fw_info->ver = adev->psp.asd.fw_version;
- fw_info->feature = adev->psp.asd.feature_version;
+ fw_info->ver = adev->psp.asd_context.bin_desc.fw_version;
+ fw_info->feature = adev->psp.asd_context.bin_desc.feature_version;
break;
case AMDGPU_INFO_FW_DMCU:
fw_info->ver = adev->dm.dmcu_fw_version;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
index a2d3dbbf7d25..ce538f4819f9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c
@@ -31,7 +31,7 @@ void amdgpu_mca_query_correctable_error_count(struct amdgpu_device *adev,
uint64_t mc_status_addr,
unsigned long *error_count)
{
- uint64_t mc_status = RREG64_PCIE(mc_status_addr * 4);
+ uint64_t mc_status = RREG64_PCIE(mc_status_addr);
if (REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)
@@ -42,7 +42,7 @@ void amdgpu_mca_query_uncorrectable_error_count(struct amdgpu_device *adev,
uint64_t mc_status_addr,
unsigned long *error_count)
{
- uint64_t mc_status = RREG64_PCIE(mc_status_addr * 4);
+ uint64_t mc_status = RREG64_PCIE(mc_status_addr);
if ((REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) &&
(REG_GET_FIELD(mc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 ||
@@ -56,7 +56,7 @@ void amdgpu_mca_query_uncorrectable_error_count(struct amdgpu_device *adev,
void amdgpu_mca_reset_error_count(struct amdgpu_device *adev,
uint64_t mc_status_addr)
{
- WREG64_PCIE(mc_status_addr * 4, 0x0ULL);
+ WREG64_PCIE(mc_status_addr, 0x0ULL);
}
void amdgpu_mca_query_ras_error_count(struct amdgpu_device *adev,
@@ -87,8 +87,8 @@ int amdgpu_mca_ras_late_init(struct amdgpu_device *adev,
if (!mca_dev->ras_if)
return -ENOMEM;
mca_dev->ras_if->block = mca_dev->ras_funcs->ras_block;
+ mca_dev->ras_if->sub_block_index = mca_dev->ras_funcs->ras_sub_block;
mca_dev->ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
- mca_dev->ras_if->sub_block_index = 0;
}
ih_info.head = fs_info.head = *mca_dev->ras_if;
r = amdgpu_ras_late_init(adev, mca_dev->ras_if,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h
index f860f2f0e296..c74bc7177066 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h
@@ -29,6 +29,7 @@ struct amdgpu_mca_ras_funcs {
void (*query_ras_error_address)(struct amdgpu_device *adev,
void *ras_error_status);
uint32_t ras_block;
+ uint32_t ras_sub_block;
const char* sysfs_name;
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 01a78c786536..aeb92e5677ac 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -695,40 +695,6 @@ int amdgpu_bo_create_vm(struct amdgpu_device *adev,
}
/**
- * amdgpu_bo_validate - validate an &amdgpu_bo buffer object
- * @bo: pointer to the buffer object
- *
- * Sets placement according to domain; and changes placement and caching
- * policy of the buffer object according to the placement.
- * This is used for validating shadow bos. It calls ttm_bo_validate() to
- * make sure the buffer is resident where it needs to be.
- *
- * Returns:
- * 0 for success or a negative error code on failure.
- */
-int amdgpu_bo_validate(struct amdgpu_bo *bo)
-{
- struct ttm_operation_ctx ctx = { false, false };
- uint32_t domain;
- int r;
-
- if (bo->tbo.pin_count)
- return 0;
-
- domain = bo->preferred_domains;
-
-retry:
- amdgpu_bo_placement_from_domain(bo, domain);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
- if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
- domain = bo->allowed_domains;
- goto retry;
- }
-
- return r;
-}
-
-/**
* amdgpu_bo_add_to_shadow_list - add a BO to the shadow list
*
* @vmbo: BO that will be inserted into the shadow list
@@ -1038,29 +1004,6 @@ void amdgpu_bo_unpin(struct amdgpu_bo *bo)
}
}
-/**
- * amdgpu_bo_evict_vram - evict VRAM buffers
- * @adev: amdgpu device object
- *
- * Evicts all VRAM buffers on the lru list of the memory type.
- * Mainly used for evicting vram at suspend time.
- *
- * Returns:
- * 0 for success or a negative error code on failure.
- */
-int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
-{
- struct ttm_resource_manager *man;
-
- if (adev->in_s3 && (adev->flags & AMD_IS_APU)) {
- /* No need to evict vram on APUs for suspend to ram */
- return 0;
- }
-
- man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
- return ttm_resource_manager_evict_all(&adev->mman.bdev, man);
-}
-
static const char *amdgpu_vram_names[] = {
"UNKNOWN",
"GDDR1",
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 9d6c001c15f8..4c9cbdc66995 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -304,7 +304,6 @@ int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain);
int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
u64 min_offset, u64 max_offset);
void amdgpu_bo_unpin(struct amdgpu_bo *bo);
-int amdgpu_bo_evict_vram(struct amdgpu_device *adev);
int amdgpu_bo_init(struct amdgpu_device *adev);
void amdgpu_bo_fini(struct amdgpu_device *adev);
int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags);
@@ -327,7 +326,6 @@ int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv,
int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr);
u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo);
-int amdgpu_bo_validate(struct amdgpu_bo *bo);
void amdgpu_bo_get_memory(struct amdgpu_bo *bo, uint64_t *vram_mem,
uint64_t *gtt_mem, uint64_t *cpu_mem);
void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo_vm *vmbo);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 9b41cb8c3de5..c641f84649d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -46,6 +46,10 @@ static int psp_sysfs_init(struct amdgpu_device *adev);
static void psp_sysfs_fini(struct amdgpu_device *adev);
static int psp_load_smu_fw(struct psp_context *psp);
+static int psp_ta_unload(struct psp_context *psp, struct ta_context *context);
+static int psp_ta_load(struct psp_context *psp, struct ta_context *context);
+static int psp_rap_terminate(struct psp_context *psp);
+static int psp_securedisplay_terminate(struct psp_context *psp);
/*
* Due to DF Cstate management centralized to PMFW, the firmware
@@ -61,23 +65,32 @@ static int psp_load_smu_fw(struct psp_context *psp);
*
* This new sequence is required for
* - Arcturus and onwards
- * - Navi12 and onwards
*/
static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
- psp->pmfw_centralized_cstate_management = false;
-
- if (amdgpu_sriov_vf(adev))
- return;
-
- if (adev->flags & AMD_IS_APU)
+ if (amdgpu_sriov_vf(adev)) {
+ psp->pmfw_centralized_cstate_management = false;
return;
+ }
- if ((adev->asic_type >= CHIP_ARCTURUS) ||
- (adev->asic_type >= CHIP_NAVI12))
+ switch (adev->ip_versions[MP0_HWIP][0]) {
+ case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 4):
+ case IP_VERSION(11, 0, 5):
+ case IP_VERSION(11, 0, 7):
+ case IP_VERSION(11, 0, 9):
+ case IP_VERSION(11, 0, 11):
+ case IP_VERSION(11, 0, 12):
+ case IP_VERSION(11, 0, 13):
+ case IP_VERSION(13, 0, 2):
psp->pmfw_centralized_cstate_management = true;
+ break;
+ default:
+ psp->pmfw_centralized_cstate_management = false;
+ break;
+ }
}
static int psp_early_init(void *handle)
@@ -85,43 +98,45 @@ static int psp_early_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct psp_context *psp = &adev->psp;
- switch (adev->asic_type) {
- case CHIP_VEGA10:
- case CHIP_VEGA12:
+ switch (adev->ip_versions[MP0_HWIP][0]) {
+ case IP_VERSION(9, 0, 0):
psp_v3_1_set_psp_funcs(psp);
psp->autoload_supported = false;
break;
- case CHIP_RAVEN:
+ case IP_VERSION(10, 0, 0):
+ case IP_VERSION(10, 0, 1):
psp_v10_0_set_psp_funcs(psp);
psp->autoload_supported = false;
break;
- case CHIP_VEGA20:
- case CHIP_ARCTURUS:
+ case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 4):
psp_v11_0_set_psp_funcs(psp);
psp->autoload_supported = false;
break;
- case CHIP_NAVI10:
- case CHIP_NAVI14:
- case CHIP_NAVI12:
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_VANGOGH:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
+ case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 5):
+ case IP_VERSION(11, 0, 9):
+ case IP_VERSION(11, 0, 7):
+ case IP_VERSION(11, 0, 11):
+ case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 0, 12):
+ case IP_VERSION(11, 0, 13):
psp_v11_0_set_psp_funcs(psp);
psp->autoload_supported = true;
break;
- case CHIP_RENOIR:
+ case IP_VERSION(11, 0, 3):
+ case IP_VERSION(12, 0, 1):
psp_v12_0_set_psp_funcs(psp);
break;
- case CHIP_ALDEBARAN:
+ case IP_VERSION(13, 0, 2):
psp_v13_0_set_psp_funcs(psp);
break;
- case CHIP_YELLOW_CARP:
+ case IP_VERSION(13, 0, 1):
+ case IP_VERSION(13, 0, 3):
psp_v13_0_set_psp_funcs(psp);
psp->autoload_supported = true;
break;
- case CHIP_CYAN_SKILLFISH:
+ case IP_VERSION(11, 0, 8):
if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) {
psp_v11_0_8_set_psp_funcs(psp);
psp->autoload_supported = false;
@@ -264,7 +279,8 @@ static int psp_sw_init(void *handle)
DRM_ERROR("Failed to load psp firmware!\n");
return ret;
}
- } else if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_ALDEBARAN) {
+ } else if (amdgpu_sriov_vf(adev) &&
+ adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 2)) {
ret = psp_init_ta_microcode(psp, "aldebaran");
if (ret) {
DRM_ERROR("Failed to initialize ta microcode!\n");
@@ -307,7 +323,8 @@ static int psp_sw_init(void *handle)
}
}
- if (adev->asic_type == CHIP_NAVI10 || adev->asic_type == CHIP_SIENNA_CICHLID) {
+ if (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 0) ||
+ adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 7)) {
ret= psp_sysfs_init(adev);
if (ret) {
return ret;
@@ -337,8 +354,8 @@ static int psp_sw_fini(void *handle)
psp->ta_fw = NULL;
}
- if (adev->asic_type == CHIP_NAVI10 ||
- adev->asic_type == CHIP_SIENNA_CICHLID)
+ if (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 0) ||
+ adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 7))
psp_sysfs_fini(adev);
kfree(cmd);
@@ -424,7 +441,7 @@ psp_cmd_submit_buf(struct psp_context *psp,
if (psp->adev->no_hw_access)
return 0;
- if (!drm_dev_enter(&psp->adev->ddev, &idx))
+ if (!drm_dev_enter(adev_to_drm(psp->adev), &idx))
return 0;
memset(psp->cmd_buf_mem, 0, PSP_CMD_BUFFER_SIZE);
@@ -597,10 +614,10 @@ static int psp_tmr_init(struct psp_context *psp)
static bool psp_skip_tmr(struct psp_context *psp)
{
- switch (psp->adev->asic_type) {
- case CHIP_NAVI12:
- case CHIP_SIENNA_CICHLID:
- case CHIP_ALDEBARAN:
+ switch (psp->adev->ip_versions[MP0_HWIP][0]) {
+ case IP_VERSION(11, 0, 9):
+ case IP_VERSION(11, 0, 7):
+ case IP_VERSION(13, 0, 2):
return true;
default:
return false;
@@ -778,46 +795,29 @@ static int psp_rl_load(struct amdgpu_device *adev)
return ret;
}
-static void psp_prep_asd_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
- uint64_t asd_mc, uint32_t size)
+static int psp_asd_load(struct psp_context *psp)
{
- cmd->cmd_id = GFX_CMD_ID_LOAD_ASD;
- cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(asd_mc);
- cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(asd_mc);
- cmd->cmd.cmd_load_ta.app_len = size;
-
- cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = 0;
- cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = 0;
- cmd->cmd.cmd_load_ta.cmd_buf_len = 0;
+ return psp_ta_load(psp, &psp->asd_context);
}
-static int psp_asd_load(struct psp_context *psp)
+static int psp_asd_initialize(struct psp_context *psp)
{
int ret;
- struct psp_gfx_cmd_resp *cmd;
/* If PSP version doesn't match ASD version, asd loading will be failed.
* add workaround to bypass it for sriov now.
* TODO: add version check to make it common
*/
- if (amdgpu_sriov_vf(psp->adev) || !psp->asd.size_bytes)
+ if (amdgpu_sriov_vf(psp->adev) || !psp->asd_context.bin_desc.size_bytes)
return 0;
- cmd = acquire_psp_cmd_buf(psp);
-
- psp_copy_fw(psp, psp->asd.start_addr, psp->asd.size_bytes);
-
- psp_prep_asd_load_cmd_buf(cmd, psp->fw_pri_mc_addr,
- psp->asd.size_bytes);
-
- ret = psp_cmd_submit_buf(psp, NULL, cmd,
- psp->fence_buf_mc_addr);
- if (!ret) {
- psp->asd_context.asd_initialized = true;
- psp->asd_context.session_id = cmd->resp.session_id;
- }
+ psp->asd_context.mem_context.shared_mc_addr = 0;
+ psp->asd_context.mem_context.shared_mem_size = PSP_ASD_SHARED_MEM_SIZE;
+ psp->asd_context.ta_load_type = GFX_CMD_ID_LOAD_ASD;
- release_psp_cmd_buf(psp);
+ ret = psp_asd_load(psp);
+ if (!ret)
+ psp->asd_context.initialized = true;
return ret;
}
@@ -829,27 +829,39 @@ static void psp_prep_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd,
cmd->cmd.cmd_unload_ta.session_id = session_id;
}
+static int psp_ta_unload(struct psp_context *psp, struct ta_context *context)
+{
+ int ret;
+ struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
+
+ psp_prep_ta_unload_cmd_buf(cmd, context->session_id);
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
+
+ release_psp_cmd_buf(psp);
+
+ return ret;
+}
+
static int psp_asd_unload(struct psp_context *psp)
{
+ return psp_ta_unload(psp, &psp->asd_context);
+}
+
+static int psp_asd_terminate(struct psp_context *psp)
+{
int ret;
- struct psp_gfx_cmd_resp *cmd;
if (amdgpu_sriov_vf(psp->adev))
return 0;
- if (!psp->asd_context.asd_initialized)
+ if (!psp->asd_context.initialized)
return 0;
- cmd = acquire_psp_cmd_buf(psp);
-
- psp_prep_ta_unload_cmd_buf(cmd, psp->asd_context.session_id);
+ ret = psp_asd_unload(psp);
- ret = psp_cmd_submit_buf(psp, NULL, cmd,
- psp->fence_buf_mc_addr);
if (!ret)
- psp->asd_context.asd_initialized = false;
-
- release_psp_cmd_buf(psp);
+ psp->asd_context.initialized = false;
return ret;
}
@@ -885,23 +897,22 @@ int psp_reg_program(struct psp_context *psp, enum psp_reg_prog_id reg,
static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd,
uint64_t ta_bin_mc,
- uint32_t ta_bin_size,
- uint64_t ta_shared_mc,
- uint32_t ta_shared_size)
+ struct ta_context *context)
{
- cmd->cmd_id = GFX_CMD_ID_LOAD_TA;
+ cmd->cmd_id = context->ta_load_type;
cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(ta_bin_mc);
cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(ta_bin_mc);
- cmd->cmd.cmd_load_ta.app_len = ta_bin_size;
+ cmd->cmd.cmd_load_ta.app_len = context->bin_desc.size_bytes;
- cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(ta_shared_mc);
- cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(ta_shared_mc);
- cmd->cmd.cmd_load_ta.cmd_buf_len = ta_shared_size;
+ cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo =
+ lower_32_bits(context->mem_context.shared_mc_addr);
+ cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi =
+ upper_32_bits(context->mem_context.shared_mc_addr);
+ cmd->cmd.cmd_load_ta.cmd_buf_len = context->mem_context.shared_mem_size;
}
static int psp_ta_init_shared_buf(struct psp_context *psp,
- struct ta_mem_context *mem_ctx,
- uint32_t shared_mem_size)
+ struct ta_mem_context *mem_ctx)
{
int ret;
@@ -909,8 +920,8 @@ static int psp_ta_init_shared_buf(struct psp_context *psp,
* Allocate 16k memory aligned to 4k from Frame Buffer (local
* physical) for ta to host memory
*/
- ret = amdgpu_bo_create_kernel(psp->adev, shared_mem_size, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
+ ret = amdgpu_bo_create_kernel(psp->adev, mem_ctx->shared_mem_size,
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM,
&mem_ctx->shared_bo,
&mem_ctx->shared_mc_addr,
&mem_ctx->shared_buf);
@@ -926,8 +937,7 @@ static void psp_ta_free_shared_buf(struct ta_mem_context *mem_ctx)
static int psp_xgmi_init_shared_buf(struct psp_context *psp)
{
- return psp_ta_init_shared_buf(psp, &psp->xgmi_context.context.mem_context,
- PSP_XGMI_SHARED_MEM_SIZE);
+ return psp_ta_init_shared_buf(psp, &psp->xgmi_context.context.mem_context);
}
static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
@@ -941,12 +951,12 @@ static void psp_prep_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd,
static int psp_ta_invoke(struct psp_context *psp,
uint32_t ta_cmd_id,
- uint32_t session_id)
+ struct ta_context *context)
{
int ret;
struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
- psp_prep_ta_invoke_cmd_buf(cmd, ta_cmd_id, session_id);
+ psp_prep_ta_invoke_cmd_buf(cmd, ta_cmd_id, context->session_id);
ret = psp_cmd_submit_buf(psp, NULL, cmd,
psp->fence_buf_mc_addr);
@@ -956,31 +966,23 @@ static int psp_ta_invoke(struct psp_context *psp,
return ret;
}
-static int psp_xgmi_load(struct psp_context *psp)
+static int psp_ta_load(struct psp_context *psp, struct ta_context *context)
{
int ret;
struct psp_gfx_cmd_resp *cmd;
- /*
- * TODO: bypass the loading in sriov for now
- */
-
cmd = acquire_psp_cmd_buf(psp);
- psp_copy_fw(psp, psp->xgmi.start_addr, psp->xgmi.size_bytes);
+ psp_copy_fw(psp, context->bin_desc.start_addr,
+ context->bin_desc.size_bytes);
- psp_prep_ta_load_cmd_buf(cmd,
- psp->fw_pri_mc_addr,
- psp->xgmi.size_bytes,
- psp->xgmi_context.context.mem_context.shared_mc_addr,
- PSP_XGMI_SHARED_MEM_SIZE);
+ psp_prep_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, context);
ret = psp_cmd_submit_buf(psp, NULL, cmd,
psp->fence_buf_mc_addr);
if (!ret) {
- psp->xgmi_context.context.initialized = true;
- psp->xgmi_context.context.session_id = cmd->resp.session_id;
+ context->session_id = cmd->resp.session_id;
}
release_psp_cmd_buf(psp);
@@ -988,41 +990,31 @@ static int psp_xgmi_load(struct psp_context *psp)
return ret;
}
-static int psp_xgmi_unload(struct psp_context *psp)
+static int psp_xgmi_load(struct psp_context *psp)
{
- int ret;
- struct psp_gfx_cmd_resp *cmd;
- struct amdgpu_device *adev = psp->adev;
-
- /* XGMI TA unload currently is not supported on Arcturus/Aldebaran A+A */
- if (adev->asic_type == CHIP_ARCTURUS ||
- (adev->asic_type == CHIP_ALDEBARAN && adev->gmc.xgmi.connected_to_cpu))
- return 0;
-
- /*
- * TODO: bypass the unloading in sriov for now
- */
-
- cmd = acquire_psp_cmd_buf(psp);
-
- psp_prep_ta_unload_cmd_buf(cmd, psp->xgmi_context.context.session_id);
-
- ret = psp_cmd_submit_buf(psp, NULL, cmd,
- psp->fence_buf_mc_addr);
-
- release_psp_cmd_buf(psp);
+ return psp_ta_load(psp, &psp->xgmi_context.context);
+}
- return ret;
+static int psp_xgmi_unload(struct psp_context *psp)
+{
+ return psp_ta_unload(psp, &psp->xgmi_context.context);
}
int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
{
- return psp_ta_invoke(psp, ta_cmd_id, psp->xgmi_context.context.session_id);
+ return psp_ta_invoke(psp, ta_cmd_id, &psp->xgmi_context.context);
}
int psp_xgmi_terminate(struct psp_context *psp)
{
int ret;
+ struct amdgpu_device *adev = psp->adev;
+
+ /* XGMI TA unload currently is not supported on Arcturus/Aldebaran A+A */
+ if (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 4) ||
+ (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 2) &&
+ adev->gmc.xgmi.connected_to_cpu))
+ return 0;
if (!psp->xgmi_context.context.initialized)
return 0;
@@ -1045,13 +1037,16 @@ int psp_xgmi_initialize(struct psp_context *psp, bool set_extended_data, bool lo
int ret;
if (!psp->ta_fw ||
- !psp->xgmi.size_bytes ||
- !psp->xgmi.start_addr)
+ !psp->xgmi_context.context.bin_desc.size_bytes ||
+ !psp->xgmi_context.context.bin_desc.start_addr)
return -ENOENT;
if (!load_ta)
goto invoke;
+ psp->xgmi_context.context.mem_context.shared_mem_size = PSP_XGMI_SHARED_MEM_SIZE;
+ psp->xgmi_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
+
if (!psp->xgmi_context.context.initialized) {
ret = psp_xgmi_init_shared_buf(psp);
if (ret)
@@ -1060,7 +1055,9 @@ int psp_xgmi_initialize(struct psp_context *psp, bool set_extended_data, bool lo
/* Load XGMI TA */
ret = psp_xgmi_load(psp);
- if (ret)
+ if (!ret)
+ psp->xgmi_context.context.initialized = true;
+ else
return ret;
invoke:
@@ -1117,8 +1114,8 @@ int psp_xgmi_get_node_id(struct psp_context *psp, uint64_t *node_id)
static bool psp_xgmi_peer_link_info_supported(struct psp_context *psp)
{
- return psp->adev->asic_type == CHIP_ALDEBARAN &&
- psp->xgmi.feature_version >= 0x2000000b;
+ return psp->adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 2) &&
+ psp->xgmi_context.context.bin_desc.fw_version >= 0x2000000b;
}
/*
@@ -1282,80 +1279,40 @@ int psp_xgmi_set_topology_info(struct psp_context *psp,
// ras begin
static int psp_ras_init_shared_buf(struct psp_context *psp)
{
- return psp_ta_init_shared_buf(psp, &psp->ras_context.context.mem_context,
- PSP_RAS_SHARED_MEM_SIZE);
+ return psp_ta_init_shared_buf(psp, &psp->ras_context.context.mem_context);
}
static int psp_ras_load(struct psp_context *psp)
{
- int ret;
- struct psp_gfx_cmd_resp *cmd;
- struct ta_ras_shared_memory *ras_cmd;
-
- /*
- * TODO: bypass the loading in sriov for now
- */
- if (amdgpu_sriov_vf(psp->adev))
- return 0;
-
- psp_copy_fw(psp, psp->ras.start_addr, psp->ras.size_bytes);
-
- ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
-
- if (psp->adev->gmc.xgmi.connected_to_cpu)
- ras_cmd->ras_in_message.init_flags.poison_mode_en = 1;
- else
- ras_cmd->ras_in_message.init_flags.dgpu_mode = 1;
-
- cmd = acquire_psp_cmd_buf(psp);
-
- psp_prep_ta_load_cmd_buf(cmd,
- psp->fw_pri_mc_addr,
- psp->ras.size_bytes,
- psp->ras_context.context.mem_context.shared_mc_addr,
- PSP_RAS_SHARED_MEM_SIZE);
-
- ret = psp_cmd_submit_buf(psp, NULL, cmd,
- psp->fence_buf_mc_addr);
-
- if (!ret) {
- psp->ras_context.context.session_id = cmd->resp.session_id;
-
- if (!ras_cmd->ras_status)
- psp->ras_context.context.initialized = true;
- else
- dev_warn(psp->adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status);
- }
-
- release_psp_cmd_buf(psp);
-
- if (ret || ras_cmd->ras_status)
- amdgpu_ras_fini(psp->adev);
-
- return ret;
+ return psp_ta_load(psp, &psp->ras_context.context);
}
static int psp_ras_unload(struct psp_context *psp)
{
- int ret;
- struct psp_gfx_cmd_resp *cmd;
-
- /*
- * TODO: bypass the unloading in sriov for now
- */
- if (amdgpu_sriov_vf(psp->adev))
- return 0;
-
- cmd = acquire_psp_cmd_buf(psp);
-
- psp_prep_ta_unload_cmd_buf(cmd, psp->ras_context.context.session_id);
-
- ret = psp_cmd_submit_buf(psp, NULL, cmd,
- psp->fence_buf_mc_addr);
+ return psp_ta_unload(psp, &psp->ras_context.context);
+}
- release_psp_cmd_buf(psp);
+static void psp_ras_ta_check_status(struct psp_context *psp)
+{
+ struct ta_ras_shared_memory *ras_cmd =
+ (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
- return ret;
+ switch (ras_cmd->ras_status) {
+ case TA_RAS_STATUS__ERROR_UNSUPPORTED_IP:
+ dev_warn(psp->adev->dev,
+ "RAS WARNING: cmd failed due to unsupported ip\n");
+ break;
+ case TA_RAS_STATUS__ERROR_UNSUPPORTED_ERROR_INJ:
+ dev_warn(psp->adev->dev,
+ "RAS WARNING: cmd failed due to unsupported error injection\n");
+ break;
+ case TA_RAS_STATUS__SUCCESS:
+ break;
+ default:
+ dev_warn(psp->adev->dev,
+ "RAS WARNING: ras status = 0x%X\n", ras_cmd->ras_status);
+ break;
+ }
}
int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
@@ -1371,7 +1328,7 @@ int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
if (amdgpu_sriov_vf(psp->adev))
return 0;
- ret = psp_ta_invoke(psp, ta_cmd_id, psp->ras_context.context.session_id);
+ ret = psp_ta_invoke(psp, ta_cmd_id, &psp->ras_context.context);
if (amdgpu_ras_intr_triggered())
return ret;
@@ -1391,31 +1348,8 @@ int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
else if (ras_cmd->ras_out_message.flags.reg_access_failure_flag)
dev_warn(psp->adev->dev,
"RAS internal register access blocked\n");
- }
-
- return ret;
-}
-static int psp_ras_status_to_errno(struct amdgpu_device *adev,
- enum ta_ras_status ras_status)
-{
- int ret = -EINVAL;
-
- switch (ras_status) {
- case TA_RAS_STATUS__SUCCESS:
- ret = 0;
- break;
- case TA_RAS_STATUS__RESET_NEEDED:
- ret = -EAGAIN;
- break;
- case TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE:
- dev_warn(adev->dev, "RAS WARN: ras function unavailable\n");
- break;
- case TA_RAS_STATUS__ERROR_ASD_READ_WRITE:
- dev_warn(adev->dev, "RAS WARN: asd read or write failed\n");
- break;
- default:
- dev_err(adev->dev, "RAS ERROR: ras function failed ret 0x%X\n", ret);
+ psp_ras_ta_check_status(psp);
}
return ret;
@@ -1444,7 +1378,7 @@ int psp_ras_enable_features(struct psp_context *psp,
if (ret)
return -EINVAL;
- return psp_ras_status_to_errno(psp->adev, ras_cmd->ras_status);
+ return 0;
}
static int psp_ras_terminate(struct psp_context *psp)
@@ -1477,6 +1411,7 @@ static int psp_ras_initialize(struct psp_context *psp)
int ret;
uint32_t boot_cfg = 0xFF;
struct amdgpu_device *adev = psp->adev;
+ struct ta_ras_shared_memory *ras_cmd;
/*
* TODO: bypass the initialize in sriov for now
@@ -1484,8 +1419,8 @@ static int psp_ras_initialize(struct psp_context *psp)
if (amdgpu_sriov_vf(adev))
return 0;
- if (!adev->psp.ras.size_bytes ||
- !adev->psp.ras.start_addr) {
+ if (!adev->psp.ras_context.context.bin_desc.size_bytes ||
+ !adev->psp.ras_context.context.bin_desc.start_addr) {
dev_info(adev->dev, "RAS: optional ras ta ucode is not available\n");
return 0;
}
@@ -1531,17 +1466,34 @@ static int psp_ras_initialize(struct psp_context *psp)
}
}
+ psp->ras_context.context.mem_context.shared_mem_size = PSP_RAS_SHARED_MEM_SIZE;
+ psp->ras_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
+
if (!psp->ras_context.context.initialized) {
ret = psp_ras_init_shared_buf(psp);
if (ret)
return ret;
}
+ ras_cmd = (struct ta_ras_shared_memory *)psp->ras_context.context.mem_context.shared_buf;
+ memset(ras_cmd, 0, sizeof(struct ta_ras_shared_memory));
+
+ if (amdgpu_ras_is_poison_mode_supported(adev))
+ ras_cmd->ras_in_message.init_flags.poison_mode_en = 1;
+ if (!adev->gmc.xgmi.connected_to_cpu)
+ ras_cmd->ras_in_message.init_flags.dgpu_mode = 1;
+
ret = psp_ras_load(psp);
- if (ret)
- return ret;
- return 0;
+ if (!ret && !ras_cmd->ras_status)
+ psp->ras_context.context.initialized = true;
+ else {
+ if (ras_cmd->ras_status)
+ dev_warn(psp->adev->dev, "RAS Init Status: 0x%X\n", ras_cmd->ras_status);
+ amdgpu_ras_fini(psp->adev);
+ }
+
+ return ret;
}
int psp_ras_trigger_error(struct psp_context *psp,
@@ -1568,51 +1520,24 @@ int psp_ras_trigger_error(struct psp_context *psp,
if (amdgpu_ras_intr_triggered())
return 0;
- return psp_ras_status_to_errno(psp->adev, ras_cmd->ras_status);
+ if (ras_cmd->ras_status)
+ return -EINVAL;
+
+ return 0;
}
// ras end
// HDCP start
static int psp_hdcp_init_shared_buf(struct psp_context *psp)
{
- return psp_ta_init_shared_buf(psp, &psp->hdcp_context.context.mem_context,
- PSP_HDCP_SHARED_MEM_SIZE);
+ return psp_ta_init_shared_buf(psp, &psp->hdcp_context.context.mem_context);
}
static int psp_hdcp_load(struct psp_context *psp)
{
- int ret;
- struct psp_gfx_cmd_resp *cmd;
-
- /*
- * TODO: bypass the loading in sriov for now
- */
- if (amdgpu_sriov_vf(psp->adev))
- return 0;
-
- psp_copy_fw(psp, psp->hdcp.start_addr,
- psp->hdcp.size_bytes);
-
- cmd = acquire_psp_cmd_buf(psp);
-
- psp_prep_ta_load_cmd_buf(cmd,
- psp->fw_pri_mc_addr,
- psp->hdcp.size_bytes,
- psp->hdcp_context.context.mem_context.shared_mc_addr,
- PSP_HDCP_SHARED_MEM_SIZE);
-
- ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
-
- if (!ret) {
- psp->hdcp_context.context.initialized = true;
- psp->hdcp_context.context.session_id = cmd->resp.session_id;
- mutex_init(&psp->hdcp_context.mutex);
- }
-
- release_psp_cmd_buf(psp);
-
- return ret;
+ return psp_ta_load(psp, &psp->hdcp_context.context);
}
+
static int psp_hdcp_initialize(struct psp_context *psp)
{
int ret;
@@ -1623,12 +1548,15 @@ static int psp_hdcp_initialize(struct psp_context *psp)
if (amdgpu_sriov_vf(psp->adev))
return 0;
- if (!psp->hdcp.size_bytes ||
- !psp->hdcp.start_addr) {
+ if (!psp->hdcp_context.context.bin_desc.size_bytes ||
+ !psp->hdcp_context.context.bin_desc.start_addr) {
dev_info(psp->adev->dev, "HDCP: optional hdcp ta ucode is not available\n");
return 0;
}
+ psp->hdcp_context.context.mem_context.shared_mem_size = PSP_HDCP_SHARED_MEM_SIZE;
+ psp->hdcp_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
+
if (!psp->hdcp_context.context.initialized) {
ret = psp_hdcp_init_shared_buf(psp);
if (ret)
@@ -1636,32 +1564,17 @@ static int psp_hdcp_initialize(struct psp_context *psp)
}
ret = psp_hdcp_load(psp);
- if (ret)
- return ret;
+ if (!ret) {
+ psp->hdcp_context.context.initialized = true;
+ mutex_init(&psp->hdcp_context.mutex);
+ }
- return 0;
+ return ret;
}
static int psp_hdcp_unload(struct psp_context *psp)
{
- int ret;
- struct psp_gfx_cmd_resp *cmd;
-
- /*
- * TODO: bypass the unloading in sriov for now
- */
- if (amdgpu_sriov_vf(psp->adev))
- return 0;
-
- cmd = acquire_psp_cmd_buf(psp);
-
- psp_prep_ta_unload_cmd_buf(cmd, psp->hdcp_context.context.session_id);
-
- ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
-
- release_psp_cmd_buf(psp);
-
- return ret;
+ return psp_ta_unload(psp, &psp->hdcp_context.context);
}
int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
@@ -1672,7 +1585,7 @@ int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
if (amdgpu_sriov_vf(psp->adev))
return 0;
- return psp_ta_invoke(psp, ta_cmd_id, psp->hdcp_context.context.session_id);
+ return psp_ta_invoke(psp, ta_cmd_id, &psp->hdcp_context.context);
}
static int psp_hdcp_terminate(struct psp_context *psp)
@@ -1709,42 +1622,12 @@ out:
// DTM start
static int psp_dtm_init_shared_buf(struct psp_context *psp)
{
- return psp_ta_init_shared_buf(psp, &psp->dtm_context.context.mem_context,
- PSP_DTM_SHARED_MEM_SIZE);
+ return psp_ta_init_shared_buf(psp, &psp->dtm_context.context.mem_context);
}
static int psp_dtm_load(struct psp_context *psp)
{
- int ret;
- struct psp_gfx_cmd_resp *cmd;
-
- /*
- * TODO: bypass the loading in sriov for now
- */
- if (amdgpu_sriov_vf(psp->adev))
- return 0;
-
- psp_copy_fw(psp, psp->dtm.start_addr, psp->dtm.size_bytes);
-
- cmd = acquire_psp_cmd_buf(psp);
-
- psp_prep_ta_load_cmd_buf(cmd,
- psp->fw_pri_mc_addr,
- psp->dtm.size_bytes,
- psp->dtm_context.context.mem_context.shared_mc_addr,
- PSP_DTM_SHARED_MEM_SIZE);
-
- ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
-
- if (!ret) {
- psp->dtm_context.context.initialized = true;
- psp->dtm_context.context.session_id = cmd->resp.session_id;
- mutex_init(&psp->dtm_context.mutex);
- }
-
- release_psp_cmd_buf(psp);
-
- return ret;
+ return psp_ta_load(psp, &psp->dtm_context.context);
}
static int psp_dtm_initialize(struct psp_context *psp)
@@ -1757,12 +1640,15 @@ static int psp_dtm_initialize(struct psp_context *psp)
if (amdgpu_sriov_vf(psp->adev))
return 0;
- if (!psp->dtm.size_bytes ||
- !psp->dtm.start_addr) {
+ if (!psp->dtm_context.context.bin_desc.size_bytes ||
+ !psp->dtm_context.context.bin_desc.start_addr) {
dev_info(psp->adev->dev, "DTM: optional dtm ta ucode is not available\n");
return 0;
}
+ psp->dtm_context.context.mem_context.shared_mem_size = PSP_DTM_SHARED_MEM_SIZE;
+ psp->dtm_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
+
if (!psp->dtm_context.context.initialized) {
ret = psp_dtm_init_shared_buf(psp);
if (ret)
@@ -1770,32 +1656,17 @@ static int psp_dtm_initialize(struct psp_context *psp)
}
ret = psp_dtm_load(psp);
- if (ret)
- return ret;
+ if (!ret) {
+ psp->dtm_context.context.initialized = true;
+ mutex_init(&psp->dtm_context.mutex);
+ }
- return 0;
+ return ret;
}
static int psp_dtm_unload(struct psp_context *psp)
{
- int ret;
- struct psp_gfx_cmd_resp *cmd;
-
- /*
- * TODO: bypass the unloading in sriov for now
- */
- if (amdgpu_sriov_vf(psp->adev))
- return 0;
-
- cmd = acquire_psp_cmd_buf(psp);
-
- psp_prep_ta_unload_cmd_buf(cmd, psp->dtm_context.context.session_id);
-
- ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
-
- release_psp_cmd_buf(psp);
-
- return ret;
+ return psp_ta_unload(psp, &psp->dtm_context.context);
}
int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
@@ -1806,7 +1677,7 @@ int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
if (amdgpu_sriov_vf(psp->adev))
return 0;
- return psp_ta_invoke(psp, ta_cmd_id, psp->dtm_context.context.session_id);
+ return psp_ta_invoke(psp, ta_cmd_id, &psp->dtm_context.context);
}
static int psp_dtm_terminate(struct psp_context *psp)
@@ -1843,50 +1714,17 @@ out:
// RAP start
static int psp_rap_init_shared_buf(struct psp_context *psp)
{
- return psp_ta_init_shared_buf(psp, &psp->rap_context.context.mem_context,
- PSP_RAP_SHARED_MEM_SIZE);
+ return psp_ta_init_shared_buf(psp, &psp->rap_context.context.mem_context);
}
static int psp_rap_load(struct psp_context *psp)
{
- int ret;
- struct psp_gfx_cmd_resp *cmd;
-
- psp_copy_fw(psp, psp->rap.start_addr, psp->rap.size_bytes);
-
- cmd = acquire_psp_cmd_buf(psp);
-
- psp_prep_ta_load_cmd_buf(cmd,
- psp->fw_pri_mc_addr,
- psp->rap.size_bytes,
- psp->rap_context.context.mem_context.shared_mc_addr,
- PSP_RAP_SHARED_MEM_SIZE);
-
- ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
-
- if (!ret) {
- psp->rap_context.context.initialized = true;
- psp->rap_context.context.session_id = cmd->resp.session_id;
- mutex_init(&psp->rap_context.mutex);
- }
-
- release_psp_cmd_buf(psp);
-
- return ret;
+ return psp_ta_load(psp, &psp->rap_context.context);
}
static int psp_rap_unload(struct psp_context *psp)
{
- int ret;
- struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
-
- psp_prep_ta_unload_cmd_buf(cmd, psp->rap_context.context.session_id);
-
- ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
-
- release_psp_cmd_buf(psp);
-
- return ret;
+ return psp_ta_unload(psp, &psp->rap_context.context);
}
static int psp_rap_initialize(struct psp_context *psp)
@@ -1900,12 +1738,15 @@ static int psp_rap_initialize(struct psp_context *psp)
if (amdgpu_sriov_vf(psp->adev))
return 0;
- if (!psp->rap.size_bytes ||
- !psp->rap.start_addr) {
+ if (!psp->rap_context.context.bin_desc.size_bytes ||
+ !psp->rap_context.context.bin_desc.start_addr) {
dev_info(psp->adev->dev, "RAP: optional rap ta ucode is not available\n");
return 0;
}
+ psp->rap_context.context.mem_context.shared_mem_size = PSP_RAP_SHARED_MEM_SIZE;
+ psp->rap_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
+
if (!psp->rap_context.context.initialized) {
ret = psp_rap_init_shared_buf(psp);
if (ret)
@@ -1913,16 +1754,15 @@ static int psp_rap_initialize(struct psp_context *psp)
}
ret = psp_rap_load(psp);
- if (ret)
+ if (!ret) {
+ psp->rap_context.context.initialized = true;
+ mutex_init(&psp->rap_context.mutex);
+ } else
return ret;
ret = psp_rap_invoke(psp, TA_CMD_RAP__INITIALIZE, &status);
if (ret || status != TA_RAP_STATUS__SUCCESS) {
- psp_rap_unload(psp);
-
- psp_ta_free_shared_buf(&psp->rap_context.context.mem_context);
-
- psp->rap_context.context.initialized = false;
+ psp_rap_terminate(psp);
dev_warn(psp->adev->dev, "RAP TA initialize fail (%d) status %d.\n",
ret, status);
@@ -1971,7 +1811,7 @@ int psp_rap_invoke(struct psp_context *psp, uint32_t ta_cmd_id, enum ta_rap_stat
rap_cmd->cmd_id = ta_cmd_id;
rap_cmd->validation_method_id = METHOD_A;
- ret = psp_ta_invoke(psp, rap_cmd->cmd_id, psp->rap_context.context.session_id);
+ ret = psp_ta_invoke(psp, rap_cmd->cmd_id, &psp->rap_context.context);
if (ret)
goto out_unlock;
@@ -1989,49 +1829,17 @@ out_unlock:
static int psp_securedisplay_init_shared_buf(struct psp_context *psp)
{
return psp_ta_init_shared_buf(
- psp, &psp->securedisplay_context.context.mem_context,
- PSP_SECUREDISPLAY_SHARED_MEM_SIZE);
+ psp, &psp->securedisplay_context.context.mem_context);
}
static int psp_securedisplay_load(struct psp_context *psp)
{
- int ret;
- struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
-
- memset(psp->fw_pri_buf, 0, PSP_1_MEG);
- memcpy(psp->fw_pri_buf, psp->securedisplay.start_addr, psp->securedisplay.size_bytes);
-
- psp_prep_ta_load_cmd_buf(cmd,
- psp->fw_pri_mc_addr,
- psp->securedisplay.size_bytes,
- psp->securedisplay_context.context.mem_context.shared_mc_addr,
- PSP_SECUREDISPLAY_SHARED_MEM_SIZE);
-
- ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
-
- if (!ret) {
- psp->securedisplay_context.context.initialized = true;
- psp->securedisplay_context.context.session_id = cmd->resp.session_id;
- mutex_init(&psp->securedisplay_context.mutex);
- }
-
- release_psp_cmd_buf(psp);
-
- return ret;
+ return psp_ta_load(psp, &psp->securedisplay_context.context);
}
static int psp_securedisplay_unload(struct psp_context *psp)
{
- int ret;
- struct psp_gfx_cmd_resp *cmd = acquire_psp_cmd_buf(psp);
-
- psp_prep_ta_unload_cmd_buf(cmd, psp->securedisplay_context.context.session_id);
-
- ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
-
- release_psp_cmd_buf(psp);
-
- return ret;
+ return psp_ta_unload(psp, &psp->securedisplay_context.context);
}
static int psp_securedisplay_initialize(struct psp_context *psp)
@@ -2045,12 +1853,16 @@ static int psp_securedisplay_initialize(struct psp_context *psp)
if (amdgpu_sriov_vf(psp->adev))
return 0;
- if (!psp->securedisplay.size_bytes ||
- !psp->securedisplay.start_addr) {
+ if (!psp->securedisplay_context.context.bin_desc.size_bytes ||
+ !psp->securedisplay_context.context.bin_desc.start_addr) {
dev_info(psp->adev->dev, "SECUREDISPLAY: securedisplay ta ucode is not available\n");
return 0;
}
+ psp->securedisplay_context.context.mem_context.shared_mem_size =
+ PSP_SECUREDISPLAY_SHARED_MEM_SIZE;
+ psp->securedisplay_context.context.ta_load_type = GFX_CMD_ID_LOAD_TA;
+
if (!psp->securedisplay_context.context.initialized) {
ret = psp_securedisplay_init_shared_buf(psp);
if (ret)
@@ -2058,7 +1870,10 @@ static int psp_securedisplay_initialize(struct psp_context *psp)
}
ret = psp_securedisplay_load(psp);
- if (ret)
+ if (!ret) {
+ psp->securedisplay_context.context.initialized = true;
+ mutex_init(&psp->securedisplay_context.mutex);
+ } else
return ret;
psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd,
@@ -2066,12 +1881,7 @@ static int psp_securedisplay_initialize(struct psp_context *psp)
ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__QUERY_TA);
if (ret) {
- psp_securedisplay_unload(psp);
-
- psp_ta_free_shared_buf(&psp->securedisplay_context.context.mem_context);
-
- psp->securedisplay_context.context.initialized = false;
-
+ psp_securedisplay_terminate(psp);
dev_err(psp->adev->dev, "SECUREDISPLAY TA initialize fail.\n");
return -EINVAL;
}
@@ -2123,7 +1933,7 @@ int psp_securedisplay_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
mutex_lock(&psp->securedisplay_context.mutex);
- ret = psp_ta_invoke(psp, ta_cmd_id, psp->securedisplay_context.context.session_id);
+ ret = psp_ta_invoke(psp, ta_cmd_id, &psp->securedisplay_context.context);
mutex_unlock(&psp->securedisplay_context.mutex);
@@ -2443,8 +2253,8 @@ static int psp_load_smu_fw(struct psp_context *psp)
if ((amdgpu_in_reset(adev) &&
ras && adev->ras_enabled &&
- (adev->asic_type == CHIP_ARCTURUS ||
- adev->asic_type == CHIP_VEGA20))) {
+ (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 4) ||
+ adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 2)))) {
ret = amdgpu_dpm_set_mp1_state(adev, PP_MP1_STATE_UNLOAD);
if (ret) {
DRM_WARN("Failed to set MP1 state prepare for reload\n");
@@ -2541,8 +2351,9 @@ static int psp_load_non_psp_fw(struct psp_context *psp)
continue;
if (psp->autoload_supported &&
- (adev->asic_type >= CHIP_SIENNA_CICHLID &&
- adev->asic_type <= CHIP_DIMGREY_CAVEFISH) &&
+ (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 7) ||
+ adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 11) ||
+ adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 12)) &&
(ucode->ucode_id == AMDGPU_UCODE_ID_SDMA1 ||
ucode->ucode_id == AMDGPU_UCODE_ID_SDMA2 ||
ucode->ucode_id == AMDGPU_UCODE_ID_SDMA3))
@@ -2629,7 +2440,7 @@ skip_memalloc:
if (ret)
goto failed;
- ret = psp_asd_load(psp);
+ ret = psp_asd_initialize(psp);
if (ret) {
DRM_ERROR("PSP load asd failed!\n");
return ret;
@@ -2721,7 +2532,7 @@ static int psp_hw_fini(void *handle)
psp_hdcp_terminate(psp);
}
- psp_asd_unload(psp);
+ psp_asd_terminate(psp);
psp_tmr_terminate(psp);
psp_ring_destroy(psp, PSP_RING_TYPE__KM);
@@ -2779,9 +2590,9 @@ static int psp_suspend(void *handle)
}
}
- ret = psp_asd_unload(psp);
+ ret = psp_asd_terminate(psp);
if (ret) {
- DRM_ERROR("Failed to unload asd\n");
+ DRM_ERROR("Failed to terminate asd\n");
return ret;
}
@@ -2826,12 +2637,18 @@ static int psp_resume(void *handle)
if (ret)
goto failed;
- ret = psp_asd_load(psp);
+ ret = psp_asd_initialize(psp);
if (ret) {
DRM_ERROR("PSP load asd failed!\n");
goto failed;
}
+ ret = psp_rl_load(adev);
+ if (ret) {
+ dev_err(adev->dev, "PSP load RL failed!\n");
+ goto failed;
+ }
+
if (adev->gmc.xgmi.num_physical_nodes > 1) {
ret = psp_xgmi_initialize(psp, false, true);
/* Warning the XGMI seesion initialize failure
@@ -2994,10 +2811,10 @@ int psp_init_asd_microcode(struct psp_context *psp,
goto out;
asd_hdr = (const struct psp_firmware_header_v1_0 *)adev->psp.asd_fw->data;
- adev->psp.asd.fw_version = le32_to_cpu(asd_hdr->header.ucode_version);
- adev->psp.asd.feature_version = le32_to_cpu(asd_hdr->sos.fw_version);
- adev->psp.asd.size_bytes = le32_to_cpu(asd_hdr->header.ucode_size_bytes);
- adev->psp.asd.start_addr = (uint8_t *)asd_hdr +
+ adev->psp.asd_context.bin_desc.fw_version = le32_to_cpu(asd_hdr->header.ucode_version);
+ adev->psp.asd_context.bin_desc.feature_version = le32_to_cpu(asd_hdr->sos.fw_version);
+ adev->psp.asd_context.bin_desc.size_bytes = le32_to_cpu(asd_hdr->header.ucode_size_bytes);
+ adev->psp.asd_context.bin_desc.start_addr = (uint8_t *)asd_hdr +
le32_to_cpu(asd_hdr->header.ucode_array_offset_bytes);
return 0;
out:
@@ -3129,7 +2946,8 @@ static int psp_init_sos_base_fw(struct amdgpu_device *adev)
ucode_array_start_addr = (uint8_t *)sos_hdr +
le32_to_cpu(sos_hdr->header.ucode_array_offset_bytes);
- if (adev->gmc.xgmi.connected_to_cpu || (adev->asic_type != CHIP_ALDEBARAN)) {
+ if (adev->gmc.xgmi.connected_to_cpu ||
+ (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(13, 0, 2))) {
adev->psp.sos.fw_version = le32_to_cpu(sos_hdr->header.ucode_version);
adev->psp.sos.feature_version = le32_to_cpu(sos_hdr->sos.fw_version);
@@ -3284,40 +3102,43 @@ static int parse_ta_bin_descriptor(struct psp_context *psp,
switch (desc->fw_type) {
case TA_FW_TYPE_PSP_ASD:
- psp->asd.fw_version = le32_to_cpu(desc->fw_version);
- psp->asd.feature_version = le32_to_cpu(desc->fw_version);
- psp->asd.size_bytes = le32_to_cpu(desc->size_bytes);
- psp->asd.start_addr = ucode_start_addr;
+ psp->asd_context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
+ psp->asd_context.bin_desc.feature_version = le32_to_cpu(desc->fw_version);
+ psp->asd_context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
+ psp->asd_context.bin_desc.start_addr = ucode_start_addr;
break;
case TA_FW_TYPE_PSP_XGMI:
- psp->xgmi.feature_version = le32_to_cpu(desc->fw_version);
- psp->xgmi.size_bytes = le32_to_cpu(desc->size_bytes);
- psp->xgmi.start_addr = ucode_start_addr;
+ psp->xgmi_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
+ psp->xgmi_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
+ psp->xgmi_context.context.bin_desc.start_addr = ucode_start_addr;
break;
case TA_FW_TYPE_PSP_RAS:
- psp->ras.feature_version = le32_to_cpu(desc->fw_version);
- psp->ras.size_bytes = le32_to_cpu(desc->size_bytes);
- psp->ras.start_addr = ucode_start_addr;
+ psp->ras_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
+ psp->ras_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
+ psp->ras_context.context.bin_desc.start_addr = ucode_start_addr;
break;
case TA_FW_TYPE_PSP_HDCP:
- psp->hdcp.feature_version = le32_to_cpu(desc->fw_version);
- psp->hdcp.size_bytes = le32_to_cpu(desc->size_bytes);
- psp->hdcp.start_addr = ucode_start_addr;
+ psp->hdcp_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
+ psp->hdcp_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
+ psp->hdcp_context.context.bin_desc.start_addr = ucode_start_addr;
break;
case TA_FW_TYPE_PSP_DTM:
- psp->dtm.feature_version = le32_to_cpu(desc->fw_version);
- psp->dtm.size_bytes = le32_to_cpu(desc->size_bytes);
- psp->dtm.start_addr = ucode_start_addr;
+ psp->dtm_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
+ psp->dtm_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
+ psp->dtm_context.context.bin_desc.start_addr = ucode_start_addr;
break;
case TA_FW_TYPE_PSP_RAP:
- psp->rap.feature_version = le32_to_cpu(desc->fw_version);
- psp->rap.size_bytes = le32_to_cpu(desc->size_bytes);
- psp->rap.start_addr = ucode_start_addr;
+ psp->rap_context.context.bin_desc.fw_version = le32_to_cpu(desc->fw_version);
+ psp->rap_context.context.bin_desc.size_bytes = le32_to_cpu(desc->size_bytes);
+ psp->rap_context.context.bin_desc.start_addr = ucode_start_addr;
break;
case TA_FW_TYPE_PSP_SECUREDISPLAY:
- psp->securedisplay.feature_version = le32_to_cpu(desc->fw_version);
- psp->securedisplay.size_bytes = le32_to_cpu(desc->size_bytes);
- psp->securedisplay.start_addr = ucode_start_addr;
+ psp->securedisplay_context.context.bin_desc.fw_version =
+ le32_to_cpu(desc->fw_version);
+ psp->securedisplay_context.context.bin_desc.size_bytes =
+ le32_to_cpu(desc->size_bytes);
+ psp->securedisplay_context.context.bin_desc.start_addr =
+ ucode_start_addr;
break;
default:
dev_warn(psp->adev->dev, "Unsupported TA type: %d\n", desc->fw_type);
@@ -3478,7 +3299,7 @@ void psp_copy_fw(struct psp_context *psp, uint8_t *start_addr, uint32_t bin_size
{
int idx;
- if (!drm_dev_enter(&psp->adev->ddev, &idx))
+ if (!drm_dev_enter(adev_to_drm(psp->adev), &idx))
return;
memset(psp->fw_pri_buf, 0, PSP_1_MEG);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index 8ef2d28af92a..f29afabbff1f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -34,17 +34,20 @@
#define PSP_FENCE_BUFFER_SIZE 0x1000
#define PSP_CMD_BUFFER_SIZE 0x1000
-#define PSP_XGMI_SHARED_MEM_SIZE 0x4000
-#define PSP_RAS_SHARED_MEM_SIZE 0x4000
#define PSP_1_MEG 0x100000
#define PSP_TMR_SIZE(adev) ((adev)->asic_type == CHIP_ALDEBARAN ? 0x800000 : 0x400000)
-#define PSP_HDCP_SHARED_MEM_SIZE 0x4000
-#define PSP_DTM_SHARED_MEM_SIZE 0x4000
-#define PSP_RAP_SHARED_MEM_SIZE 0x4000
-#define PSP_SECUREDISPLAY_SHARED_MEM_SIZE 0x4000
-#define PSP_SHARED_MEM_SIZE 0x4000
#define PSP_FW_NAME_LEN 0x24
+enum psp_shared_mem_size {
+ PSP_ASD_SHARED_MEM_SIZE = 0x0,
+ PSP_XGMI_SHARED_MEM_SIZE = 0x4000,
+ PSP_RAS_SHARED_MEM_SIZE = 0x4000,
+ PSP_HDCP_SHARED_MEM_SIZE = 0x4000,
+ PSP_DTM_SHARED_MEM_SIZE = 0x4000,
+ PSP_RAP_SHARED_MEM_SIZE = 0x4000,
+ PSP_SECUREDISPLAY_SHARED_MEM_SIZE = 0x4000,
+};
+
struct psp_context;
struct psp_xgmi_node_info;
struct psp_xgmi_topology_info;
@@ -131,21 +134,26 @@ struct psp_xgmi_topology_info {
struct psp_xgmi_node_info nodes[AMDGPU_XGMI_MAX_CONNECTED_NODES];
};
-struct psp_asd_context {
- bool asd_initialized;
- uint32_t session_id;
+struct psp_bin_desc {
+ uint32_t fw_version;
+ uint32_t feature_version;
+ uint32_t size_bytes;
+ uint8_t *start_addr;
};
struct ta_mem_context {
struct amdgpu_bo *shared_bo;
uint64_t shared_mc_addr;
void *shared_buf;
+ enum psp_shared_mem_size shared_mem_size;
};
struct ta_context {
bool initialized;
uint32_t session_id;
struct ta_mem_context mem_context;
+ struct psp_bin_desc bin_desc;
+ enum psp_gfx_cmd_id ta_load_type;
};
struct ta_cp_context {
@@ -263,13 +271,6 @@ struct psp_runtime_boot_cfg_entry {
uint32_t reserved;
};
-struct psp_bin_desc {
- uint32_t fw_version;
- uint32_t feature_version;
- uint32_t size_bytes;
- uint8_t *start_addr;
-};
-
struct psp_context
{
struct amdgpu_device *adev;
@@ -301,7 +302,6 @@ struct psp_context
/* asd firmware */
const struct firmware *asd_fw;
- struct psp_bin_desc asd;
/* toc firmware */
const struct firmware *toc_fw;
@@ -326,14 +326,8 @@ struct psp_context
/* xgmi ta firmware and buffer */
const struct firmware *ta_fw;
uint32_t ta_fw_version;
- struct psp_bin_desc xgmi;
- struct psp_bin_desc ras;
- struct psp_bin_desc hdcp;
- struct psp_bin_desc dtm;
- struct psp_bin_desc rap;
- struct psp_bin_desc securedisplay;
-
- struct psp_asd_context asd_context;
+
+ struct ta_context asd_context;
struct psp_xgmi_context xgmi_context;
struct psp_ras_context ras_context;
struct ta_cp_context hdcp_context;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 96a8fd0ca1df..08133de21fdd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -35,7 +35,11 @@
#include "amdgpu_xgmi.h"
#include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
#include "atom.h"
+#ifdef CONFIG_X86_MCE_AMD
+#include <asm/mce.h>
+static bool notifier_registered;
+#endif
static const char *RAS_FS_NAME = "ras";
const char *ras_error_string[] = {
@@ -61,8 +65,30 @@ const char *ras_block_string[] = {
"mp0",
"mp1",
"fuse",
+ "mca",
};
+const char *ras_mca_block_string[] = {
+ "mca_mp0",
+ "mca_mp1",
+ "mca_mpio",
+ "mca_iohc",
+};
+
+const char *get_ras_block_str(struct ras_common_if *ras_block)
+{
+ if (!ras_block)
+ return "NULL";
+
+ if (ras_block->block >= AMDGPU_RAS_BLOCK_COUNT)
+ return "OUT OF RANGE";
+
+ if (ras_block->block == AMDGPU_RAS_BLOCK__MCA)
+ return ras_mca_block_string[ras_block->sub_block_index];
+
+ return ras_block_string[ras_block->block];
+}
+
#define ras_err_str(i) (ras_error_string[ffs(i)])
#define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS)
@@ -85,6 +111,14 @@ static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con,
uint64_t addr);
static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev,
uint64_t addr);
+#ifdef CONFIG_X86_MCE_AMD
+static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev);
+struct mce_notifier_adev_list {
+ struct amdgpu_device *devs[MAX_GPU_INSTANCE];
+ int num_gpu;
+};
+static struct mce_notifier_adev_list mce_adev_list;
+#endif
void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready)
{
@@ -187,7 +221,7 @@ static int amdgpu_ras_find_block_id_by_name(const char *name, int *block_id)
for (i = 0; i < ARRAY_SIZE(ras_block_string); i++) {
*block_id = i;
- if (strcmp(name, ras_block_str(i)) == 0)
+ if (strcmp(name, ras_block_string[i]) == 0)
return 0;
}
return -EINVAL;
@@ -509,7 +543,6 @@ static ssize_t amdgpu_ras_sysfs_read(struct device *dev,
if (amdgpu_ras_query_error_status(obj->adev, &info))
return -EINVAL;
-
if (obj->adev->asic_type == CHIP_ALDEBARAN) {
if (amdgpu_ras_reset_error_status(obj->adev, info.head.block))
DRM_WARN("Failed to reset error counter and error status");
@@ -529,7 +562,7 @@ static inline void put_obj(struct ras_manager *obj)
if (obj && (--obj->use == 0))
list_del(&obj->node);
if (obj && (obj->use < 0))
- DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", ras_block_str(obj->head.block));
+ DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", get_ras_block_str(&obj->head));
}
/* make one obj and return it. */
@@ -545,7 +578,14 @@ static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev,
if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
return NULL;
- obj = &con->objs[head->block];
+ if (head->block == AMDGPU_RAS_BLOCK__MCA) {
+ if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST)
+ return NULL;
+
+ obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index];
+ } else
+ obj = &con->objs[head->block];
+
/* already exist. return obj? */
if (alive_obj(obj))
return NULL;
@@ -573,19 +613,21 @@ struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev,
if (head->block >= AMDGPU_RAS_BLOCK_COUNT)
return NULL;
- obj = &con->objs[head->block];
+ if (head->block == AMDGPU_RAS_BLOCK__MCA) {
+ if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST)
+ return NULL;
+
+ obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index];
+ } else
+ obj = &con->objs[head->block];
- if (alive_obj(obj)) {
- WARN_ON(head->block != obj->head.block);
+ if (alive_obj(obj))
return obj;
- }
} else {
- for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT; i++) {
+ for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT + AMDGPU_RAS_MCA_BLOCK_COUNT; i++) {
obj = &con->objs[i];
- if (alive_obj(obj)) {
- WARN_ON(i != obj->head.block);
+ if (alive_obj(obj))
return obj;
- }
}
}
@@ -626,8 +668,6 @@ static int __amdgpu_ras_feature_enable(struct amdgpu_device *adev,
*/
if (!amdgpu_ras_is_feature_allowed(adev, head))
return 0;
- if (!(!!enable ^ !!amdgpu_ras_is_feature_enabled(adev, head)))
- return 0;
if (enable) {
if (!obj) {
@@ -678,19 +718,14 @@ int amdgpu_ras_feature_enable(struct amdgpu_device *adev,
/* Do not enable if it is not allowed. */
WARN_ON(enable && !amdgpu_ras_is_feature_allowed(adev, head));
- /* Are we alerady in that state we are going to set? */
- if (!(!!enable ^ !!amdgpu_ras_is_feature_enabled(adev, head))) {
- ret = 0;
- goto out;
- }
if (!amdgpu_ras_intr_triggered()) {
ret = psp_ras_enable_features(&adev->psp, info, enable);
if (ret) {
- dev_err(adev->dev, "ras %s %s failed %d\n",
+ dev_err(adev->dev, "ras %s %s failed poison:%d ret:%d\n",
enable ? "enable":"disable",
- ras_block_str(head->block),
- ret);
+ get_ras_block_str(head),
+ amdgpu_ras_is_poison_mode_supported(adev), ret);
goto out;
}
}
@@ -731,7 +766,7 @@ int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev,
if (!ret)
dev_info(adev->dev,
"RAS INFO: %s setup object\n",
- ras_block_str(head->block));
+ get_ras_block_str(head));
}
} else {
/* setup the object then issue a ras TA disable cmd.*/
@@ -781,17 +816,39 @@ static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev,
bool bypass)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
- int ras_block_count = AMDGPU_RAS_BLOCK_COUNT;
int i;
- const enum amdgpu_ras_error_type default_ras_type =
- AMDGPU_RAS_ERROR__NONE;
+ const enum amdgpu_ras_error_type default_ras_type = AMDGPU_RAS_ERROR__NONE;
- for (i = 0; i < ras_block_count; i++) {
+ for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT; i++) {
struct ras_common_if head = {
.block = i,
.type = default_ras_type,
.sub_block_index = 0,
};
+
+ if (i == AMDGPU_RAS_BLOCK__MCA)
+ continue;
+
+ if (bypass) {
+ /*
+ * bypass psp. vbios enable ras for us.
+ * so just create the obj
+ */
+ if (__amdgpu_ras_feature_enable(adev, &head, 1))
+ break;
+ } else {
+ if (amdgpu_ras_feature_enable(adev, &head, 1))
+ break;
+ }
+ }
+
+ for (i = 0; i < AMDGPU_RAS_MCA_BLOCK_COUNT; i++) {
+ struct ras_common_if head = {
+ .block = AMDGPU_RAS_BLOCK__MCA,
+ .type = default_ras_type,
+ .sub_block_index = i,
+ };
+
if (bypass) {
/*
* bypass psp. vbios enable ras for us.
@@ -809,6 +866,32 @@ static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev,
}
/* feature ctl end */
+
+void amdgpu_ras_mca_query_error_status(struct amdgpu_device *adev,
+ struct ras_common_if *ras_block,
+ struct ras_err_data *err_data)
+{
+ switch (ras_block->sub_block_index) {
+ case AMDGPU_RAS_MCA_BLOCK__MP0:
+ if (adev->mca.mp0.ras_funcs &&
+ adev->mca.mp0.ras_funcs->query_ras_error_count)
+ adev->mca.mp0.ras_funcs->query_ras_error_count(adev, &err_data);
+ break;
+ case AMDGPU_RAS_MCA_BLOCK__MP1:
+ if (adev->mca.mp1.ras_funcs &&
+ adev->mca.mp1.ras_funcs->query_ras_error_count)
+ adev->mca.mp1.ras_funcs->query_ras_error_count(adev, &err_data);
+ break;
+ case AMDGPU_RAS_MCA_BLOCK__MPIO:
+ if (adev->mca.mpio.ras_funcs &&
+ adev->mca.mpio.ras_funcs->query_ras_error_count)
+ adev->mca.mpio.ras_funcs->query_ras_error_count(adev, &err_data);
+ break;
+ default:
+ break;
+ }
+}
+
/* query/inject/cure begin */
int amdgpu_ras_query_error_status(struct amdgpu_device *adev,
struct ras_query_if *info)
@@ -872,6 +955,9 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev,
adev->hdp.ras_funcs->query_ras_error_count)
adev->hdp.ras_funcs->query_ras_error_count(adev, &err_data);
break;
+ case AMDGPU_RAS_BLOCK__MCA:
+ amdgpu_ras_mca_query_error_status(adev, &info->head, &err_data);
+ break;
default:
break;
}
@@ -893,13 +979,13 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev,
adev->smuio.funcs->get_socket_id(adev),
adev->smuio.funcs->get_die_id(adev),
obj->err_data.ce_count,
- ras_block_str(info->head.block));
+ get_ras_block_str(&info->head));
} else {
dev_info(adev->dev, "%ld correctable hardware errors "
"detected in %s block, no user "
"action is needed.\n",
obj->err_data.ce_count,
- ras_block_str(info->head.block));
+ get_ras_block_str(&info->head));
}
}
if (err_data.ue_count) {
@@ -912,15 +998,18 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev,
adev->smuio.funcs->get_socket_id(adev),
adev->smuio.funcs->get_die_id(adev),
obj->err_data.ue_count,
- ras_block_str(info->head.block));
+ get_ras_block_str(&info->head));
} else {
dev_info(adev->dev, "%ld uncorrectable hardware errors "
"detected in %s block\n",
obj->err_data.ue_count,
- ras_block_str(info->head.block));
+ get_ras_block_str(&info->head));
}
}
+ if (!amdgpu_persistent_edc_harvesting_supported(adev))
+ amdgpu_ras_reset_error_status(adev, info->head.block);
+
return 0;
}
@@ -1027,6 +1116,7 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev,
case AMDGPU_RAS_BLOCK__SDMA:
case AMDGPU_RAS_BLOCK__MMHUB:
case AMDGPU_RAS_BLOCK__PCIE_BIF:
+ case AMDGPU_RAS_BLOCK__MCA:
ret = psp_ras_trigger_error(&adev->psp, &block_info);
break;
case AMDGPU_RAS_BLOCK__XGMI_WAFL:
@@ -1034,13 +1124,13 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev,
break;
default:
dev_info(adev->dev, "%s error injection is not supported yet\n",
- ras_block_str(info->head.block));
+ get_ras_block_str(&info->head));
ret = -EINVAL;
}
if (ret)
dev_err(adev->dev, "ras inject %s failed %d\n",
- ras_block_str(info->head.block), ret);
+ get_ras_block_str(&info->head), ret);
return ret;
}
@@ -1383,7 +1473,7 @@ void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
if (amdgpu_ras_is_supported(adev, obj->head.block) &&
(obj->attr_inuse == 1)) {
sprintf(fs_info.debugfs_name, "%s_err_inject",
- ras_block_str(obj->head.block));
+ get_ras_block_str(&obj->head));
fs_info.head = obj->head;
amdgpu_ras_debugfs_create(adev, &fs_info, dir);
}
@@ -1469,22 +1559,28 @@ static void amdgpu_ras_interrupt_handler(struct ras_manager *obj)
data->rptr = (data->aligned_element_size +
data->rptr) % data->ring_size;
- /* Let IP handle its data, maybe we need get the output
- * from the callback to udpate the error type/count, etc
- */
if (data->cb) {
- ret = data->cb(obj->adev, &err_data, &entry);
- /* ue will trigger an interrupt, and in that case
- * we need do a reset to recovery the whole system.
- * But leave IP do that recovery, here we just dispatch
- * the error.
- */
- if (ret == AMDGPU_RAS_SUCCESS) {
- /* these counts could be left as 0 if
- * some blocks do not count error number
+ if (amdgpu_ras_is_poison_mode_supported(obj->adev) &&
+ obj->head.block == AMDGPU_RAS_BLOCK__UMC)
+ dev_info(obj->adev->dev,
+ "Poison is created, no user action is needed.\n");
+ else {
+ /* Let IP handle its data, maybe we need get the output
+ * from the callback to udpate the error type/count, etc
+ */
+ ret = data->cb(obj->adev, &err_data, &entry);
+ /* ue will trigger an interrupt, and in that case
+ * we need do a reset to recovery the whole system.
+ * But leave IP do that recovery, here we just dispatch
+ * the error.
*/
- obj->err_data.ue_count += err_data.ue_count;
- obj->err_data.ce_count += err_data.ce_count;
+ if (ret == AMDGPU_RAS_SUCCESS) {
+ /* these counts could be left as 0 if
+ * some blocks do not count error number
+ */
+ obj->err_data.ue_count += err_data.ue_count;
+ obj->err_data.ce_count += err_data.ce_count;
+ }
}
}
}
@@ -2014,6 +2110,11 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev)
adev->smu.ppt_funcs->send_hbm_bad_pages_num(&adev->smu, con->eeprom_control.ras_num_recs);
}
+#ifdef CONFIG_X86_MCE_AMD
+ if ((adev->asic_type == CHIP_ALDEBARAN) &&
+ (adev->gmc.xgmi.connected_to_cpu))
+ amdgpu_register_bad_pages_mca_notifier(adev);
+#endif
return 0;
free:
@@ -2056,19 +2157,6 @@ static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev)
}
/* recovery end */
-/* return 0 if ras will reset gpu and repost.*/
-int amdgpu_ras_request_reset_on_boot(struct amdgpu_device *adev,
- unsigned int block)
-{
- struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
-
- if (!ras)
- return -EINVAL;
-
- ras->flags |= AMDGPU_RAS_FLAG_INIT_NEED_RESET;
- return 0;
-}
-
static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev)
{
return adev->asic_type == CHIP_VEGA10 ||
@@ -2176,12 +2264,14 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
{
struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
int r;
+ bool df_poison, umc_poison;
if (con)
return 0;
con = kmalloc(sizeof(struct amdgpu_ras) +
- sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNT,
+ sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNT +
+ sizeof(struct ras_manager) * AMDGPU_RAS_MCA_BLOCK_COUNT,
GFP_KERNEL|__GFP_ZERO);
if (!con)
return -ENOMEM;
@@ -2245,6 +2335,23 @@ int amdgpu_ras_init(struct amdgpu_device *adev)
goto release_con;
}
+ /* Init poison supported flag, the default value is false */
+ if (adev->df.funcs &&
+ adev->df.funcs->query_ras_poison_mode &&
+ adev->umc.ras_funcs &&
+ adev->umc.ras_funcs->query_ras_poison_mode) {
+ df_poison =
+ adev->df.funcs->query_ras_poison_mode(adev);
+ umc_poison =
+ adev->umc.ras_funcs->query_ras_poison_mode(adev);
+ /* Only poison is set in both DF and UMC, we can support it */
+ if (df_poison && umc_poison)
+ con->poison_supported = true;
+ else if (df_poison != umc_poison)
+ dev_warn(adev->dev, "Poison setting is inconsistent in DF/UMC(%d:%d)!\n",
+ df_poison, umc_poison);
+ }
+
if (amdgpu_ras_fs_init(adev)) {
r = -EINVAL;
goto release_con;
@@ -2288,6 +2395,16 @@ static int amdgpu_persistent_edc_harvesting(struct amdgpu_device *adev,
return 0;
}
+bool amdgpu_ras_is_poison_mode_supported(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+
+ if (!con)
+ return false;
+
+ return con->poison_supported;
+}
+
/* helper function to handle common stuff in ip late init phase */
int amdgpu_ras_late_init(struct amdgpu_device *adev,
struct ras_common_if *ras_block,
@@ -2306,12 +2423,7 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev,
r = amdgpu_ras_feature_enable_on_boot(adev, ras_block, 1);
if (r) {
- if (r == -EAGAIN) {
- /* request gpu reset. will run again */
- amdgpu_ras_request_reset_on_boot(adev,
- ras_block->block);
- return 0;
- } else if (adev->in_suspend || amdgpu_in_reset(adev)) {
+ if (adev->in_suspend || amdgpu_in_reset(adev)) {
/* in resume phase, if fail to enable ras,
* clean up all ras fs nodes, and disable ras */
goto cleanup;
@@ -2403,19 +2515,6 @@ void amdgpu_ras_resume(struct amdgpu_device *adev)
}
}
}
-
- if (con->flags & AMDGPU_RAS_FLAG_INIT_NEED_RESET) {
- con->flags &= ~AMDGPU_RAS_FLAG_INIT_NEED_RESET;
- /* setup ras obj state as disabled.
- * for init_by_vbios case.
- * if we want to enable ras, just enable it in a normal way.
- * If we want do disable it, need setup ras obj as enabled,
- * then issue another TA disable cmd.
- * See feature_enable_on_boot
- */
- amdgpu_ras_disable_all_features(adev, 1);
- amdgpu_ras_reset_gpu(adev);
- }
}
void amdgpu_ras_suspend(struct amdgpu_device *adev)
@@ -2507,3 +2606,136 @@ void amdgpu_release_ras_context(struct amdgpu_device *adev)
kfree(con);
}
}
+
+#ifdef CONFIG_X86_MCE_AMD
+static struct amdgpu_device *find_adev(uint32_t node_id)
+{
+ int i;
+ struct amdgpu_device *adev = NULL;
+
+ for (i = 0; i < mce_adev_list.num_gpu; i++) {
+ adev = mce_adev_list.devs[i];
+
+ if (adev && adev->gmc.xgmi.connected_to_cpu &&
+ adev->gmc.xgmi.physical_node_id == node_id)
+ break;
+ adev = NULL;
+ }
+
+ return adev;
+}
+
+#define GET_MCA_IPID_GPUID(m) (((m) >> 44) & 0xF)
+#define GET_UMC_INST(m) (((m) >> 21) & 0x7)
+#define GET_CHAN_INDEX(m) ((((m) >> 12) & 0x3) | (((m) >> 18) & 0x4))
+#define GPU_ID_OFFSET 8
+
+static int amdgpu_bad_page_notifier(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ struct mce *m = (struct mce *)data;
+ struct amdgpu_device *adev = NULL;
+ uint32_t gpu_id = 0;
+ uint32_t umc_inst = 0;
+ uint32_t ch_inst, channel_index = 0;
+ struct ras_err_data err_data = {0, 0, 0, NULL};
+ struct eeprom_table_record err_rec;
+ uint64_t retired_page;
+
+ /*
+ * If the error was generated in UMC_V2, which belongs to GPU UMCs,
+ * and error occurred in DramECC (Extended error code = 0) then only
+ * process the error, else bail out.
+ */
+ if (!m || !((smca_get_bank_type(m->bank) == SMCA_UMC_V2) &&
+ (XEC(m->status, 0x3f) == 0x0)))
+ return NOTIFY_DONE;
+
+ /*
+ * If it is correctable error, return.
+ */
+ if (mce_is_correctable(m))
+ return NOTIFY_OK;
+
+ /*
+ * GPU Id is offset by GPU_ID_OFFSET in MCA_IPID_UMC register.
+ */
+ gpu_id = GET_MCA_IPID_GPUID(m->ipid) - GPU_ID_OFFSET;
+
+ adev = find_adev(gpu_id);
+ if (!adev) {
+ DRM_WARN("%s: Unable to find adev for gpu_id: %d\n", __func__,
+ gpu_id);
+ return NOTIFY_DONE;
+ }
+
+ /*
+ * If it is uncorrectable error, then find out UMC instance and
+ * channel index.
+ */
+ umc_inst = GET_UMC_INST(m->ipid);
+ ch_inst = GET_CHAN_INDEX(m->ipid);
+
+ dev_info(adev->dev, "Uncorrectable error detected in UMC inst: %d, chan_idx: %d",
+ umc_inst, ch_inst);
+
+ memset(&err_rec, 0x0, sizeof(struct eeprom_table_record));
+
+ /*
+ * Translate UMC channel address to Physical address
+ */
+ channel_index =
+ adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num
+ + ch_inst];
+
+ retired_page = ADDR_OF_8KB_BLOCK(m->addr) |
+ ADDR_OF_256B_BLOCK(channel_index) |
+ OFFSET_IN_256B_BLOCK(m->addr);
+
+ err_rec.address = m->addr;
+ err_rec.retired_page = retired_page >> AMDGPU_GPU_PAGE_SHIFT;
+ err_rec.ts = (uint64_t)ktime_get_real_seconds();
+ err_rec.err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE;
+ err_rec.cu = 0;
+ err_rec.mem_channel = channel_index;
+ err_rec.mcumc_id = umc_inst;
+
+ err_data.err_addr = &err_rec;
+ err_data.err_addr_cnt = 1;
+
+ if (amdgpu_bad_page_threshold != 0) {
+ amdgpu_ras_add_bad_pages(adev, err_data.err_addr,
+ err_data.err_addr_cnt);
+ amdgpu_ras_save_bad_pages(adev);
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block amdgpu_bad_page_nb = {
+ .notifier_call = amdgpu_bad_page_notifier,
+ .priority = MCE_PRIO_UC,
+};
+
+static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev)
+{
+ /*
+ * Add the adev to the mce_adev_list.
+ * During mode2 reset, amdgpu device is temporarily
+ * removed from the mgpu_info list which can cause
+ * page retirement to fail.
+ * Use this list instead of mgpu_info to find the amdgpu
+ * device on which the UMC error was reported.
+ */
+ mce_adev_list.devs[mce_adev_list.num_gpu++] = adev;
+
+ /*
+ * Register the x86 notifier only once
+ * with MCE subsystem.
+ */
+ if (notifier_registered == false) {
+ mce_register_decode_chain(&amdgpu_bad_page_nb);
+ notifier_registered = true;
+ }
+}
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
index eae604fd90b8..e36f4de9fa55 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -32,7 +32,6 @@
#include "amdgpu_ras_eeprom.h"
#define AMDGPU_RAS_FLAG_INIT_BY_VBIOS (0x1 << 0)
-#define AMDGPU_RAS_FLAG_INIT_NEED_RESET (0x1 << 1)
enum amdgpu_ras_block {
AMDGPU_RAS_BLOCK__UMC = 0,
@@ -49,15 +48,22 @@ enum amdgpu_ras_block {
AMDGPU_RAS_BLOCK__MP0,
AMDGPU_RAS_BLOCK__MP1,
AMDGPU_RAS_BLOCK__FUSE,
- AMDGPU_RAS_BLOCK__MPIO,
+ AMDGPU_RAS_BLOCK__MCA,
AMDGPU_RAS_BLOCK__LAST
};
-extern const char *ras_block_string[];
+enum amdgpu_ras_mca_block {
+ AMDGPU_RAS_MCA_BLOCK__MP0 = 0,
+ AMDGPU_RAS_MCA_BLOCK__MP1,
+ AMDGPU_RAS_MCA_BLOCK__MPIO,
+ AMDGPU_RAS_MCA_BLOCK__IOHC,
+
+ AMDGPU_RAS_MCA_BLOCK__LAST
+};
-#define ras_block_str(i) (ras_block_string[i])
#define AMDGPU_RAS_BLOCK_COUNT AMDGPU_RAS_BLOCK__LAST
+#define AMDGPU_RAS_MCA_BLOCK_COUNT AMDGPU_RAS_MCA_BLOCK__LAST
#define AMDGPU_RAS_BLOCK_MASK ((1ULL << AMDGPU_RAS_BLOCK_COUNT) - 1)
enum amdgpu_ras_gfx_subblock {
@@ -345,6 +351,9 @@ struct amdgpu_ras {
/* disable ras error count harvest in recovery */
bool disable_ras_err_cnt_harvest;
+ /* is poison mode supported */
+ bool poison_supported;
+
/* RAS count errors delayed work */
struct delayed_work ras_counte_delay_work;
atomic_t ras_ue_count;
@@ -488,8 +497,6 @@ static inline int amdgpu_ras_is_supported(struct amdgpu_device *adev,
}
int amdgpu_ras_recovery_init(struct amdgpu_device *adev);
-int amdgpu_ras_request_reset_on_boot(struct amdgpu_device *adev,
- unsigned int block);
void amdgpu_ras_resume(struct amdgpu_device *adev);
void amdgpu_ras_suspend(struct amdgpu_device *adev);
@@ -544,6 +551,8 @@ amdgpu_ras_block_to_ta(enum amdgpu_ras_block block) {
return TA_RAS_BLOCK__MP1;
case AMDGPU_RAS_BLOCK__FUSE:
return TA_RAS_BLOCK__FUSE;
+ case AMDGPU_RAS_BLOCK__MCA:
+ return TA_RAS_BLOCK__MCA;
default:
WARN_ONCE(1, "RAS ERROR: unexpected block id %d\n", block);
return TA_RAS_BLOCK__UMC;
@@ -638,4 +647,8 @@ void amdgpu_release_ras_context(struct amdgpu_device *adev);
int amdgpu_persistent_edc_harvesting_supported(struct amdgpu_device *adev);
+const char *get_ras_block_str(struct ras_common_if *ras_block);
+
+bool amdgpu_ras_is_poison_mode_supported(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
index 98732518543e..05117eda105b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -1077,6 +1077,13 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control,
if (res)
DRM_ERROR("RAS table incorrect checksum or error:%d\n",
res);
+
+ /* Warn if we are at 90% of the threshold or above
+ */
+ if (10 * control->ras_num_recs >= 9 * ras->bad_page_cnt_threshold)
+ dev_warn(adev->dev, "RAS records:%u exceeds 90%% of threshold:%d",
+ control->ras_num_recs,
+ ras->bad_page_cnt_threshold);
} else if (hdr->header == RAS_TABLE_HDR_BAD &&
amdgpu_bad_page_threshold != 0) {
res = __verify_ras_table_checksum(control);
@@ -1098,11 +1105,18 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control,
res = amdgpu_ras_eeprom_correct_header_tag(control,
RAS_TABLE_HDR_VAL);
} else {
- *exceed_err_limit = true;
- dev_err(adev->dev,
- "RAS records:%d exceed threshold:%d, "
- "maybe retire this GPU?",
+ dev_err(adev->dev, "RAS records:%d exceed threshold:%d",
control->ras_num_recs, ras->bad_page_cnt_threshold);
+ if (amdgpu_bad_page_threshold == -2) {
+ dev_warn(adev->dev, "GPU will be initialized due to bad_page_threshold = -2.");
+ res = 0;
+ } else {
+ *exceed_err_limit = true;
+ dev_err(adev->dev,
+ "RAS records:%d exceed threshold:%d, "
+ "GPU will not be initialized. Replace this GPU or increase the threshold",
+ control->ras_num_recs, ras->bad_page_cnt_threshold);
+ }
}
} else {
DRM_INFO("Creating a new EEPROM table");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index 0554576d3695..ab2351ba9574 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -415,26 +415,20 @@ static const struct file_operations amdgpu_debugfs_ring_fops = {
#endif
-int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
- struct amdgpu_ring *ring)
+void amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring)
{
#if defined(CONFIG_DEBUG_FS)
struct drm_minor *minor = adev_to_drm(adev)->primary;
- struct dentry *ent, *root = minor->debugfs_root;
+ struct dentry *root = minor->debugfs_root;
char name[32];
sprintf(name, "amdgpu_ring_%s", ring->name);
+ debugfs_create_file_size(name, S_IFREG | S_IRUGO, root, ring,
+ &amdgpu_debugfs_ring_fops,
+ ring->ring_size + 12);
- ent = debugfs_create_file(name,
- S_IFREG | S_IRUGO, root,
- ring, &amdgpu_debugfs_ring_fops);
- if (IS_ERR(ent))
- return PTR_ERR(ent);
-
- i_size_write(ent->d_inode, ring->ring_size + 12);
- ring->ent = ent;
#endif
- return 0;
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index e713d31619fe..4d380e79752c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -36,8 +36,13 @@
#define AMDGPU_MAX_VCE_RINGS 3
#define AMDGPU_MAX_UVD_ENC_RINGS 2
-#define AMDGPU_RING_PRIO_DEFAULT 1
-#define AMDGPU_RING_PRIO_MAX AMDGPU_GFX_PIPE_PRIO_MAX
+enum amdgpu_ring_priority_level {
+ AMDGPU_RING_PRIO_0,
+ AMDGPU_RING_PRIO_1,
+ AMDGPU_RING_PRIO_DEFAULT = 1,
+ AMDGPU_RING_PRIO_2,
+ AMDGPU_RING_PRIO_MAX
+};
/* some special values for the owner field */
#define AMDGPU_FENCE_OWNER_UNDEFINED ((void *)0ul)
@@ -248,10 +253,6 @@ struct amdgpu_ring {
bool has_compute_vm_bug;
bool no_scheduler;
int hw_prio;
-
-#if defined(CONFIG_DEBUG_FS)
- struct dentry *ent;
-#endif
};
#define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
@@ -351,8 +352,6 @@ static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring,
int amdgpu_ring_test_helper(struct amdgpu_ring *ring);
-int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
- struct amdgpu_ring *ring);
-void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring);
-
+void amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
index b7d861ed5284..e9b45089a28a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
@@ -32,37 +32,9 @@
#include "amdgpu_sched.h"
#include "amdgpu_vm.h"
-int amdgpu_to_sched_priority(int amdgpu_priority,
- enum drm_sched_priority *prio)
-{
- switch (amdgpu_priority) {
- case AMDGPU_CTX_PRIORITY_VERY_HIGH:
- *prio = DRM_SCHED_PRIORITY_HIGH;
- break;
- case AMDGPU_CTX_PRIORITY_HIGH:
- *prio = DRM_SCHED_PRIORITY_HIGH;
- break;
- case AMDGPU_CTX_PRIORITY_NORMAL:
- *prio = DRM_SCHED_PRIORITY_NORMAL;
- break;
- case AMDGPU_CTX_PRIORITY_LOW:
- case AMDGPU_CTX_PRIORITY_VERY_LOW:
- *prio = DRM_SCHED_PRIORITY_MIN;
- break;
- case AMDGPU_CTX_PRIORITY_UNSET:
- *prio = DRM_SCHED_PRIORITY_UNSET;
- break;
- default:
- WARN(1, "Invalid context priority %d\n", amdgpu_priority);
- return -EINVAL;
- }
-
- return 0;
-}
-
static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev,
int fd,
- enum drm_sched_priority priority)
+ int32_t priority)
{
struct fd f = fdget(fd);
struct amdgpu_fpriv *fpriv;
@@ -89,7 +61,7 @@ static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev,
static int amdgpu_sched_context_priority_override(struct amdgpu_device *adev,
int fd,
unsigned ctx_id,
- enum drm_sched_priority priority)
+ int32_t priority)
{
struct fd f = fdget(fd);
struct amdgpu_fpriv *fpriv;
@@ -124,7 +96,6 @@ int amdgpu_sched_ioctl(struct drm_device *dev, void *data,
{
union drm_amdgpu_sched *args = data;
struct amdgpu_device *adev = drm_to_adev(dev);
- enum drm_sched_priority priority;
int r;
/* First check the op, then the op's argument.
@@ -138,21 +109,22 @@ int amdgpu_sched_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
- r = amdgpu_to_sched_priority(args->in.priority, &priority);
- if (r)
- return r;
+ if (!amdgpu_ctx_priority_is_valid(args->in.priority)) {
+ WARN(1, "Invalid context priority %d\n", args->in.priority);
+ return -EINVAL;
+ }
switch (args->in.op) {
case AMDGPU_SCHED_OP_PROCESS_PRIORITY_OVERRIDE:
r = amdgpu_sched_process_priority_override(adev,
args->in.fd,
- priority);
+ args->in.priority);
break;
case AMDGPU_SCHED_OP_CONTEXT_PRIORITY_OVERRIDE:
r = amdgpu_sched_context_priority_override(adev,
args->in.fd,
args->in.ctx_id,
- priority);
+ args->in.priority);
break;
default:
/* Impossible.
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 4f03e0a2953e..c875f1cdd2af 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -699,6 +699,9 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
true, NULL);
out_unlock:
mmap_read_unlock(mm);
+ if (r)
+ pr_debug("failed %d to get user pages 0x%lx\n", r, start);
+
mmput(mm);
return r;
@@ -897,7 +900,7 @@ static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
DRM_ERROR("failed to pin userptr\n");
return r;
}
- } else if (ttm->page_flags & TTM_PAGE_FLAG_SG) {
+ } else if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) {
if (!ttm->sg) {
struct dma_buf_attachment *attach;
struct sg_table *sgt;
@@ -1069,8 +1072,6 @@ static void amdgpu_ttm_backend_destroy(struct ttm_device *bdev,
{
struct amdgpu_ttm_tt *gtt = (void *)ttm;
- amdgpu_ttm_backend_unbind(bdev, ttm);
- ttm_tt_destroy_common(bdev, ttm);
if (gtt->usertask)
put_task_struct(gtt->usertask);
@@ -1124,6 +1125,8 @@ static int amdgpu_ttm_tt_populate(struct ttm_device *bdev,
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ pgoff_t i;
+ int ret;
/* user pages are bound by amdgpu_ttm_tt_pin_userptr() */
if (gtt->userptr) {
@@ -1133,10 +1136,17 @@ static int amdgpu_ttm_tt_populate(struct ttm_device *bdev,
return 0;
}
- if (ttm->page_flags & TTM_PAGE_FLAG_SG)
+ if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL)
return 0;
- return ttm_pool_alloc(&adev->mman.bdev.pool, ttm, ctx);
+ ret = ttm_pool_alloc(&adev->mman.bdev.pool, ttm, ctx);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ttm->num_pages; ++i)
+ ttm->pages[i]->mapping = bdev->dev_mapping;
+
+ return 0;
}
/*
@@ -1150,6 +1160,9 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev,
{
struct amdgpu_ttm_tt *gtt = (void *)ttm;
struct amdgpu_device *adev;
+ pgoff_t i;
+
+ amdgpu_ttm_backend_unbind(bdev, ttm);
if (gtt->userptr) {
amdgpu_ttm_tt_set_user_pages(ttm, NULL);
@@ -1158,9 +1171,12 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev,
return;
}
- if (ttm->page_flags & TTM_PAGE_FLAG_SG)
+ if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL)
return;
+ for (i = 0; i < ttm->num_pages; ++i)
+ ttm->pages[i]->mapping = NULL;
+
adev = amdgpu_ttm_adev(bdev);
return ttm_pool_free(&adev->mman.bdev.pool, ttm);
}
@@ -1188,8 +1204,8 @@ int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo,
return -ENOMEM;
}
- /* Set TTM_PAGE_FLAG_SG before populate but after create. */
- bo->ttm->page_flags |= TTM_PAGE_FLAG_SG;
+ /* Set TTM_TT_FLAG_EXTERNAL before populate but after create. */
+ bo->ttm->page_flags |= TTM_TT_FLAG_EXTERNAL;
gtt = (void *)bo->ttm;
gtt->userptr = addr;
@@ -1225,7 +1241,7 @@ struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
*
*/
bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
- unsigned long end)
+ unsigned long end, unsigned long *userptr)
{
struct amdgpu_ttm_tt *gtt = (void *)ttm;
unsigned long size;
@@ -1240,6 +1256,8 @@ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
if (gtt->userptr > end || gtt->userptr + size <= start)
return false;
+ if (userptr)
+ *userptr = gtt->userptr;
return true;
}
@@ -2039,6 +2057,36 @@ error_free:
return r;
}
+/**
+ * amdgpu_ttm_evict_resources - evict memory buffers
+ * @adev: amdgpu device object
+ * @mem_type: evicted BO's memory type
+ *
+ * Evicts all @mem_type buffers on the lru list of the memory type.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
+int amdgpu_ttm_evict_resources(struct amdgpu_device *adev, int mem_type)
+{
+ struct ttm_resource_manager *man;
+
+ switch (mem_type) {
+ case TTM_PL_VRAM:
+ case TTM_PL_TT:
+ case AMDGPU_PL_GWS:
+ case AMDGPU_PL_GDS:
+ case AMDGPU_PL_OA:
+ man = ttm_manager_type(&adev->mman.bdev, mem_type);
+ break;
+ default:
+ DRM_ERROR("Trying to evict invalid memory type\n");
+ return -EINVAL;
+ }
+
+ return ttm_resource_manager_evict_all(&adev->mman.bdev, man);
+}
+
#if defined(CONFIG_DEBUG_FS)
static int amdgpu_mm_vram_table_show(struct seq_file *m, void *unused)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 3205fd520060..7346ecff4438 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -182,7 +182,7 @@ int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo,
bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm);
struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm);
bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
- unsigned long end);
+ unsigned long end, unsigned long *userptr);
bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
int *last_invalidated);
bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm);
@@ -190,6 +190,7 @@ bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem);
uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
struct ttm_resource *mem);
+int amdgpu_ttm_evict_resources(struct amdgpu_device *adev, int mem_type);
void amdgpu_ttm_debugfs_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index abd8469380e5..ca3350502618 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -416,10 +416,11 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type)
else
return AMDGPU_FW_LOAD_PSP;
default:
- DRM_ERROR("Unknown firmware load type\n");
+ if (!load_type)
+ return AMDGPU_FW_LOAD_DIRECT;
+ else
+ return AMDGPU_FW_LOAD_PSP;
}
-
- return AMDGPU_FW_LOAD_DIRECT;
}
const char *amdgpu_ucode_name(enum AMDGPU_UCODE_ID ucode_id)
@@ -508,7 +509,7 @@ static ssize_t show_##name(struct device *dev, \
struct drm_device *ddev = dev_get_drvdata(dev); \
struct amdgpu_device *adev = drm_to_adev(ddev); \
\
- return snprintf(buf, PAGE_SIZE, "0x%08x\n", adev->field); \
+ return sysfs_emit(buf, "0x%08x\n", adev->field); \
} \
static DEVICE_ATTR(name, mode, show_##name, NULL)
@@ -525,9 +526,9 @@ FW_VERSION_ATTR(rlc_srls_fw_version, 0444, gfx.rlc_srls_fw_version);
FW_VERSION_ATTR(mec_fw_version, 0444, gfx.mec_fw_version);
FW_VERSION_ATTR(mec2_fw_version, 0444, gfx.mec2_fw_version);
FW_VERSION_ATTR(sos_fw_version, 0444, psp.sos.fw_version);
-FW_VERSION_ATTR(asd_fw_version, 0444, psp.asd.fw_version);
-FW_VERSION_ATTR(ta_ras_fw_version, 0444, psp.ras.feature_version);
-FW_VERSION_ATTR(ta_xgmi_fw_version, 0444, psp.xgmi.feature_version);
+FW_VERSION_ATTR(asd_fw_version, 0444, psp.asd_context.bin_desc.fw_version);
+FW_VERSION_ATTR(ta_ras_fw_version, 0444, psp.ras_context.context.bin_desc.fw_version);
+FW_VERSION_ATTR(ta_xgmi_fw_version, 0444, psp.xgmi_context.context.bin_desc.fw_version);
FW_VERSION_ATTR(smc_fw_version, 0444, pm.fw_version);
FW_VERSION_ATTR(sdma_fw_version, 0444, sdma.instance[0].fw_version);
FW_VERSION_ATTR(sdma2_fw_version, 0444, sdma.instance[1].fw_version);
@@ -572,6 +573,7 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev,
const struct dmcu_firmware_header_v1_0 *dmcu_hdr = NULL;
const struct dmcub_firmware_header_v1_0 *dmcub_hdr = NULL;
const struct mes_firmware_header_v1_0 *mes_hdr = NULL;
+ u8 *ucode_addr;
if (NULL == ucode->fw)
return 0;
@@ -588,94 +590,83 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev,
dmcub_hdr = (const struct dmcub_firmware_header_v1_0 *)ucode->fw->data;
mes_hdr = (const struct mes_firmware_header_v1_0 *)ucode->fw->data;
- if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP ||
- (ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC1 &&
- ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC2 &&
- ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC1_JT &&
- ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC2_JT &&
- ucode->ucode_id != AMDGPU_UCODE_ID_CP_MES &&
- ucode->ucode_id != AMDGPU_UCODE_ID_CP_MES_DATA &&
- ucode->ucode_id != AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL &&
- ucode->ucode_id != AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM &&
- ucode->ucode_id != AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM &&
- ucode->ucode_id != AMDGPU_UCODE_ID_RLC_IRAM &&
- ucode->ucode_id != AMDGPU_UCODE_ID_RLC_DRAM &&
- ucode->ucode_id != AMDGPU_UCODE_ID_DMCU_ERAM &&
- ucode->ucode_id != AMDGPU_UCODE_ID_DMCU_INTV &&
- ucode->ucode_id != AMDGPU_UCODE_ID_DMCUB)) {
- ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes);
-
- memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data +
- le32_to_cpu(header->ucode_array_offset_bytes)),
- ucode->ucode_size);
- } else if (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1 ||
- ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2) {
- ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes) -
- le32_to_cpu(cp_hdr->jt_size) * 4;
-
- memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data +
- le32_to_cpu(header->ucode_array_offset_bytes)),
- ucode->ucode_size);
- } else if (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC1_JT ||
- ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT) {
- ucode->ucode_size = le32_to_cpu(cp_hdr->jt_size) * 4;
-
- memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data +
- le32_to_cpu(header->ucode_array_offset_bytes) +
- le32_to_cpu(cp_hdr->jt_offset) * 4),
- ucode->ucode_size);
- } else if (ucode->ucode_id == AMDGPU_UCODE_ID_DMCU_ERAM) {
- ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes) -
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ switch (ucode->ucode_id) {
+ case AMDGPU_UCODE_ID_CP_MEC1:
+ case AMDGPU_UCODE_ID_CP_MEC2:
+ ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes) -
+ le32_to_cpu(cp_hdr->jt_size) * 4;
+ ucode_addr = (u8 *)ucode->fw->data +
+ le32_to_cpu(header->ucode_array_offset_bytes);
+ break;
+ case AMDGPU_UCODE_ID_CP_MEC1_JT:
+ case AMDGPU_UCODE_ID_CP_MEC2_JT:
+ ucode->ucode_size = le32_to_cpu(cp_hdr->jt_size) * 4;
+ ucode_addr = (u8 *)ucode->fw->data +
+ le32_to_cpu(header->ucode_array_offset_bytes) +
+ le32_to_cpu(cp_hdr->jt_offset) * 4;
+ break;
+ case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL:
+ ucode->ucode_size = adev->gfx.rlc.save_restore_list_cntl_size_bytes;
+ ucode_addr = adev->gfx.rlc.save_restore_list_cntl;
+ break;
+ case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM:
+ ucode->ucode_size = adev->gfx.rlc.save_restore_list_gpm_size_bytes;
+ ucode_addr = adev->gfx.rlc.save_restore_list_gpm;
+ break;
+ case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM:
+ ucode->ucode_size = adev->gfx.rlc.save_restore_list_srm_size_bytes;
+ ucode_addr = adev->gfx.rlc.save_restore_list_srm;
+ break;
+ case AMDGPU_UCODE_ID_RLC_IRAM:
+ ucode->ucode_size = adev->gfx.rlc.rlc_iram_ucode_size_bytes;
+ ucode_addr = adev->gfx.rlc.rlc_iram_ucode;
+ break;
+ case AMDGPU_UCODE_ID_RLC_DRAM:
+ ucode->ucode_size = adev->gfx.rlc.rlc_dram_ucode_size_bytes;
+ ucode_addr = adev->gfx.rlc.rlc_dram_ucode;
+ break;
+ case AMDGPU_UCODE_ID_CP_MES:
+ ucode->ucode_size = le32_to_cpu(mes_hdr->mes_ucode_size_bytes);
+ ucode_addr = (u8 *)ucode->fw->data +
+ le32_to_cpu(mes_hdr->mes_ucode_offset_bytes);
+ break;
+ case AMDGPU_UCODE_ID_CP_MES_DATA:
+ ucode->ucode_size = le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes);
+ ucode_addr = (u8 *)ucode->fw->data +
+ le32_to_cpu(mes_hdr->mes_ucode_data_offset_bytes);
+ break;
+ case AMDGPU_UCODE_ID_DMCU_ERAM:
+ ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes) -
le32_to_cpu(dmcu_hdr->intv_size_bytes);
-
- memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data +
- le32_to_cpu(header->ucode_array_offset_bytes)),
- ucode->ucode_size);
- } else if (ucode->ucode_id == AMDGPU_UCODE_ID_DMCU_INTV) {
- ucode->ucode_size = le32_to_cpu(dmcu_hdr->intv_size_bytes);
-
- memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data +
- le32_to_cpu(header->ucode_array_offset_bytes) +
- le32_to_cpu(dmcu_hdr->intv_offset_bytes)),
- ucode->ucode_size);
- } else if (ucode->ucode_id == AMDGPU_UCODE_ID_DMCUB) {
- ucode->ucode_size = le32_to_cpu(dmcub_hdr->inst_const_bytes);
- memcpy(ucode->kaddr,
- (void *)((uint8_t *)ucode->fw->data +
- le32_to_cpu(header->ucode_array_offset_bytes)),
- ucode->ucode_size);
- } else if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL) {
- ucode->ucode_size = adev->gfx.rlc.save_restore_list_cntl_size_bytes;
- memcpy(ucode->kaddr, adev->gfx.rlc.save_restore_list_cntl,
- ucode->ucode_size);
- } else if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM) {
- ucode->ucode_size = adev->gfx.rlc.save_restore_list_gpm_size_bytes;
- memcpy(ucode->kaddr, adev->gfx.rlc.save_restore_list_gpm,
- ucode->ucode_size);
- } else if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM) {
- ucode->ucode_size = adev->gfx.rlc.save_restore_list_srm_size_bytes;
- memcpy(ucode->kaddr, adev->gfx.rlc.save_restore_list_srm,
- ucode->ucode_size);
- } else if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_IRAM) {
- ucode->ucode_size = adev->gfx.rlc.rlc_iram_ucode_size_bytes;
- memcpy(ucode->kaddr, adev->gfx.rlc.rlc_iram_ucode,
- ucode->ucode_size);
- } else if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_DRAM) {
- ucode->ucode_size = adev->gfx.rlc.rlc_dram_ucode_size_bytes;
- memcpy(ucode->kaddr, adev->gfx.rlc.rlc_dram_ucode,
- ucode->ucode_size);
- } else if (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MES) {
- ucode->ucode_size = le32_to_cpu(mes_hdr->mes_ucode_size_bytes);
- memcpy(ucode->kaddr, (void *)((uint8_t *)adev->mes.fw->data +
- le32_to_cpu(mes_hdr->mes_ucode_offset_bytes)),
- ucode->ucode_size);
- } else if (ucode->ucode_id == AMDGPU_UCODE_ID_CP_MES_DATA) {
- ucode->ucode_size = le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes);
- memcpy(ucode->kaddr, (void *)((uint8_t *)adev->mes.fw->data +
- le32_to_cpu(mes_hdr->mes_ucode_data_offset_bytes)),
- ucode->ucode_size);
+ ucode_addr = (u8 *)ucode->fw->data +
+ le32_to_cpu(header->ucode_array_offset_bytes);
+ break;
+ case AMDGPU_UCODE_ID_DMCU_INTV:
+ ucode->ucode_size = le32_to_cpu(dmcu_hdr->intv_size_bytes);
+ ucode_addr = (u8 *)ucode->fw->data +
+ le32_to_cpu(header->ucode_array_offset_bytes) +
+ le32_to_cpu(dmcu_hdr->intv_offset_bytes);
+ break;
+ case AMDGPU_UCODE_ID_DMCUB:
+ ucode->ucode_size = le32_to_cpu(dmcub_hdr->inst_const_bytes);
+ ucode_addr = (u8 *)ucode->fw->data +
+ le32_to_cpu(header->ucode_array_offset_bytes);
+ break;
+ default:
+ ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes);
+ ucode_addr = (u8 *)ucode->fw->data +
+ le32_to_cpu(header->ucode_array_offset_bytes);
+ break;
+ }
+ } else {
+ ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes);
+ ucode_addr = (u8 *)ucode->fw->data +
+ le32_to_cpu(header->ucode_array_offset_bytes);
}
+ memcpy(ucode->kaddr, ucode_addr, ucode->ucode_size);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
index e5a75fb788dd..1f5fe2315236 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h
@@ -48,6 +48,7 @@ struct amdgpu_umc_ras_funcs {
void *ras_error_status);
void (*query_ras_error_address)(struct amdgpu_device *adev,
void *ras_error_status);
+ bool (*query_ras_poison_mode)(struct amdgpu_device *adev);
};
struct amdgpu_umc_funcs {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umr.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umr.h
new file mode 100644
index 000000000000..919d9d401750
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umr.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <linux/ioctl.h>
+
+/*
+ * MMIO debugfs IOCTL structure
+ */
+struct amdgpu_debugfs_regs2_iocdata {
+ __u32 use_srbm, use_grbm, pg_lock;
+ struct {
+ __u32 se, sh, instance;
+ } grbm;
+ struct {
+ __u32 me, pipe, queue, vmid;
+ } srbm;
+};
+
+/*
+ * MMIO debugfs state data (per file* handle)
+ */
+struct amdgpu_debugfs_regs2_data {
+ struct amdgpu_device *adev;
+ struct mutex lock;
+ struct amdgpu_debugfs_regs2_iocdata id;
+};
+
+enum AMDGPU_DEBUGFS_REGS2_CMDS {
+ AMDGPU_DEBUGFS_REGS2_CMD_SET_STATE=0,
+};
+
+#define AMDGPU_DEBUGFS_REGS2_IOC_SET_STATE _IOWR(0x20, AMDGPU_DEBUGFS_REGS2_CMD_SET_STATE, struct amdgpu_debugfs_regs2_iocdata)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index d451c359606a..6f8de11a17f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -134,6 +134,51 @@ MODULE_FIRMWARE(FIRMWARE_VEGA12);
MODULE_FIRMWARE(FIRMWARE_VEGA20);
static void amdgpu_uvd_idle_work_handler(struct work_struct *work);
+static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *abo);
+
+static int amdgpu_uvd_create_msg_bo_helper(struct amdgpu_device *adev,
+ uint32_t size,
+ struct amdgpu_bo **bo_ptr)
+{
+ struct ttm_operation_ctx ctx = { true, false };
+ struct amdgpu_bo *bo = NULL;
+ void *addr;
+ int r;
+
+ r = amdgpu_bo_create_reserved(adev, size, PAGE_SIZE,
+ AMDGPU_GEM_DOMAIN_GTT,
+ &bo, NULL, &addr);
+ if (r)
+ return r;
+
+ if (adev->uvd.address_64_bit)
+ goto succ;
+
+ amdgpu_bo_kunmap(bo);
+ amdgpu_bo_unpin(bo);
+ amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
+ amdgpu_uvd_force_into_uvd_segment(bo);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ if (r)
+ goto err;
+ r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_VRAM);
+ if (r)
+ goto err_pin;
+ r = amdgpu_bo_kmap(bo, &addr);
+ if (r)
+ goto err_kmap;
+succ:
+ amdgpu_bo_unreserve(bo);
+ *bo_ptr = bo;
+ return 0;
+err_kmap:
+ amdgpu_bo_unpin(bo);
+err_pin:
+err:
+ amdgpu_bo_unreserve(bo);
+ amdgpu_bo_unref(&bo);
+ return r;
+}
int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
{
@@ -302,6 +347,10 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
if (!amdgpu_device_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0))
adev->uvd.address_64_bit = true;
+ r = amdgpu_uvd_create_msg_bo_helper(adev, 128 << 10, &adev->uvd.ib_bo);
+ if (r)
+ return r;
+
switch (adev->asic_type) {
case CHIP_TONGA:
adev->uvd.use_ctx_buf = adev->uvd.fw_version >= FW_1_65_10;
@@ -324,6 +373,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
{
+ void *addr = amdgpu_bo_kptr(adev->uvd.ib_bo);
int i, j;
drm_sched_entity_destroy(&adev->uvd.entity);
@@ -342,6 +392,7 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
for (i = 0; i < AMDGPU_MAX_UVD_ENC_RINGS; ++i)
amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]);
}
+ amdgpu_bo_free_kernel(&adev->uvd.ib_bo, NULL, &addr);
release_firmware(adev->uvd.fw);
return 0;
@@ -403,7 +454,7 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
if (!adev->uvd.inst[j].saved_bo)
return -ENOMEM;
- if (drm_dev_enter(&adev->ddev, &idx)) {
+ if (drm_dev_enter(adev_to_drm(adev), &idx)) {
/* re-write 0 since err_event_athub will corrupt VCPU buffer */
if (in_ras_intr)
memset(adev->uvd.inst[j].saved_bo, 0, size);
@@ -436,7 +487,7 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
ptr = adev->uvd.inst[i].cpu_addr;
if (adev->uvd.inst[i].saved_bo != NULL) {
- if (drm_dev_enter(&adev->ddev, &idx)) {
+ if (drm_dev_enter(adev_to_drm(adev), &idx)) {
memcpy_toio(ptr, adev->uvd.inst[i].saved_bo, size);
drm_dev_exit(idx);
}
@@ -449,7 +500,7 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
hdr = (const struct common_firmware_header *)adev->uvd.fw->data;
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
- if (drm_dev_enter(&adev->ddev, &idx)) {
+ if (drm_dev_enter(adev_to_drm(adev), &idx)) {
memcpy_toio(adev->uvd.inst[i].cpu_addr, adev->uvd.fw->data + offset,
le32_to_cpu(hdr->ucode_size_bytes));
drm_dev_exit(idx);
@@ -1080,23 +1131,10 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
unsigned offset_idx = 0;
unsigned offset[3] = { UVD_BASE_SI, 0, 0 };
- amdgpu_bo_kunmap(bo);
- amdgpu_bo_unpin(bo);
-
- if (!ring->adev->uvd.address_64_bit) {
- struct ttm_operation_ctx ctx = { true, false };
-
- amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
- amdgpu_uvd_force_into_uvd_segment(bo);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
- if (r)
- goto err;
- }
-
r = amdgpu_job_alloc_with_ib(adev, 64, direct ? AMDGPU_IB_POOL_DIRECT :
AMDGPU_IB_POOL_DELAYED, &job);
if (r)
- goto err;
+ return r;
if (adev->asic_type >= CHIP_VEGA10) {
offset_idx = 1 + ring->me;
@@ -1147,9 +1185,9 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
goto err_free;
}
+ amdgpu_bo_reserve(bo, true);
amdgpu_bo_fence(bo, f, false);
amdgpu_bo_unreserve(bo);
- amdgpu_bo_unref(&bo);
if (fence)
*fence = dma_fence_get(f);
@@ -1159,10 +1197,6 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
err_free:
amdgpu_job_free(job);
-
-err:
- amdgpu_bo_unreserve(bo);
- amdgpu_bo_unref(&bo);
return r;
}
@@ -1173,16 +1207,11 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
struct dma_fence **fence)
{
struct amdgpu_device *adev = ring->adev;
- struct amdgpu_bo *bo = NULL;
+ struct amdgpu_bo *bo = adev->uvd.ib_bo;
uint32_t *msg;
- int r, i;
-
- r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_GTT,
- &bo, NULL, (void **)&msg);
- if (r)
- return r;
+ int i;
+ msg = amdgpu_bo_kptr(bo);
/* stitch together an UVD create msg */
msg[0] = cpu_to_le32(0x00000de4);
msg[1] = cpu_to_le32(0x00000000);
@@ -1199,6 +1228,7 @@ int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
msg[i] = cpu_to_le32(0x0);
return amdgpu_uvd_send_msg(ring, bo, true, fence);
+
}
int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
@@ -1209,12 +1239,15 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
uint32_t *msg;
int r, i;
- r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_GTT,
- &bo, NULL, (void **)&msg);
- if (r)
- return r;
+ if (direct) {
+ bo = adev->uvd.ib_bo;
+ } else {
+ r = amdgpu_uvd_create_msg_bo_helper(adev, 4096, &bo);
+ if (r)
+ return r;
+ }
+ msg = amdgpu_bo_kptr(bo);
/* stitch together an UVD destroy msg */
msg[0] = cpu_to_le32(0x00000de4);
msg[1] = cpu_to_le32(0x00000002);
@@ -1223,7 +1256,12 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
for (i = 4; i < 1024; ++i)
msg[i] = cpu_to_le32(0x0);
- return amdgpu_uvd_send_msg(ring, bo, direct, fence);
+ r = amdgpu_uvd_send_msg(ring, bo, direct, fence);
+
+ if (!direct)
+ amdgpu_bo_free_kernel(&bo, NULL, (void **)&msg);
+
+ return r;
}
static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
@@ -1298,10 +1336,17 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
struct dma_fence *fence;
long r;
- r = amdgpu_uvd_get_create_msg(ring, 1, NULL);
+ r = amdgpu_uvd_get_create_msg(ring, 1, &fence);
if (r)
goto error;
+ r = dma_fence_wait_timeout(fence, false, timeout);
+ dma_fence_put(fence);
+ if (r == 0)
+ r = -ETIMEDOUT;
+ if (r < 0)
+ goto error;
+
r = amdgpu_uvd_get_destroy_msg(ring, 1, true, &fence);
if (r)
goto error;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
index edbb8194ee81..76ac9699885d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
@@ -68,6 +68,7 @@ struct amdgpu_uvd {
/* store image width to adjust nb memory state */
unsigned decode_image_width;
uint32_t keyselect;
+ struct amdgpu_bo *ib_bo;
};
int amdgpu_uvd_sw_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 8e8dee9fac9f..688bef1649b5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -82,7 +82,6 @@ MODULE_FIRMWARE(FIRMWARE_VEGA20);
static void amdgpu_vce_idle_work_handler(struct work_struct *work);
static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct amdgpu_bo *bo,
struct dma_fence **fence);
static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
bool direct, struct dma_fence **fence);
@@ -314,7 +313,7 @@ int amdgpu_vce_resume(struct amdgpu_device *adev)
hdr = (const struct common_firmware_header *)adev->vce.fw->data;
offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
- if (drm_dev_enter(&adev->ddev, &idx)) {
+ if (drm_dev_enter(adev_to_drm(adev), &idx)) {
memcpy_toio(cpu_addr, adev->vce.fw->data + offset,
adev->vce.fw->size - offset);
drm_dev_exit(idx);
@@ -441,12 +440,12 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
* Open up a stream for HW test
*/
static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct amdgpu_bo *bo,
struct dma_fence **fence)
{
const unsigned ib_size_dw = 1024;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
+ struct amdgpu_ib ib_msg;
struct dma_fence *f = NULL;
uint64_t addr;
int i, r;
@@ -456,9 +455,17 @@ static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
if (r)
return r;
- ib = &job->ibs[0];
+ memset(&ib_msg, 0, sizeof(ib_msg));
+ /* only one gpu page is needed, alloc +1 page to make addr aligned. */
+ r = amdgpu_ib_get(ring->adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2,
+ AMDGPU_IB_POOL_DIRECT,
+ &ib_msg);
+ if (r)
+ goto err;
- addr = amdgpu_bo_gpu_offset(bo);
+ ib = &job->ibs[0];
+ /* let addr point to page boundary */
+ addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg.gpu_addr);
/* stitch together an VCE create msg */
ib->length_dw = 0;
@@ -498,6 +505,7 @@ static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
ib->ptr[i] = 0x0;
r = amdgpu_job_submit_direct(job, ring, &f);
+ amdgpu_ib_free(ring->adev, &ib_msg, f);
if (r)
goto err;
@@ -1134,20 +1142,13 @@ int amdgpu_vce_ring_test_ring(struct amdgpu_ring *ring)
int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct dma_fence *fence = NULL;
- struct amdgpu_bo *bo = NULL;
long r;
/* skip vce ring1/2 ib test for now, since it's not reliable */
if (ring != &ring->adev->vce.ring[0])
return 0;
- r = amdgpu_bo_create_reserved(ring->adev, 512, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- &bo, NULL, NULL);
- if (r)
- return r;
-
- r = amdgpu_vce_get_create_msg(ring, 1, bo, NULL);
+ r = amdgpu_vce_get_create_msg(ring, 1, NULL);
if (r)
goto error;
@@ -1163,7 +1164,19 @@ int amdgpu_vce_ring_test_ib(struct amdgpu_ring *ring, long timeout)
error:
dma_fence_put(fence);
- amdgpu_bo_unreserve(bo);
- amdgpu_bo_free_kernel(&bo, NULL, NULL);
return r;
}
+
+enum amdgpu_ring_priority_level amdgpu_vce_get_ring_prio(int ring)
+{
+ switch(ring) {
+ case 0:
+ return AMDGPU_RING_PRIO_0;
+ case 1:
+ return AMDGPU_RING_PRIO_1;
+ case 2:
+ return AMDGPU_RING_PRIO_2;
+ default:
+ return AMDGPU_RING_PRIO_0;
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
index d6d83a3ec803..be4a6e773c5b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
@@ -71,5 +71,6 @@ void amdgpu_vce_ring_begin_use(struct amdgpu_ring *ring);
void amdgpu_vce_ring_end_use(struct amdgpu_ring *ring);
unsigned amdgpu_vce_ring_get_emit_ib_size(struct amdgpu_ring *ring);
unsigned amdgpu_vce_ring_get_dma_frame_size(struct amdgpu_ring *ring);
+enum amdgpu_ring_priority_level amdgpu_vce_get_ring_prio(int ring);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 008a308a4eca..2658414c503d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -86,8 +86,9 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
for (i = 0; i < adev->vcn.num_vcn_inst; i++)
atomic_set(&adev->vcn.inst[i].dpg_enc_submission_cnt, 0);
- switch (adev->asic_type) {
- case CHIP_RAVEN:
+ switch (adev->ip_versions[UVD_HWIP][0]) {
+ case IP_VERSION(1, 0, 0):
+ case IP_VERSION(1, 0, 1):
if (adev->apu_flags & AMD_APU_IS_RAVEN2)
fw_name = FIRMWARE_RAVEN2;
else if (adev->apu_flags & AMD_APU_IS_PICASSO)
@@ -95,13 +96,13 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
else
fw_name = FIRMWARE_RAVEN;
break;
- case CHIP_ARCTURUS:
+ case IP_VERSION(2, 5, 0):
fw_name = FIRMWARE_ARCTURUS;
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
adev->vcn.indirect_sram = true;
break;
- case CHIP_RENOIR:
+ case IP_VERSION(2, 2, 0):
if (adev->apu_flags & AMD_APU_IS_RENOIR)
fw_name = FIRMWARE_RENOIR;
else
@@ -111,58 +112,52 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
adev->vcn.indirect_sram = true;
break;
- case CHIP_ALDEBARAN:
+ case IP_VERSION(2, 6, 0):
fw_name = FIRMWARE_ALDEBARAN;
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
adev->vcn.indirect_sram = true;
break;
- case CHIP_NAVI10:
+ case IP_VERSION(2, 0, 0):
fw_name = FIRMWARE_NAVI10;
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
adev->vcn.indirect_sram = true;
break;
- case CHIP_NAVI14:
- fw_name = FIRMWARE_NAVI14;
- if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
- (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
- adev->vcn.indirect_sram = true;
- break;
- case CHIP_NAVI12:
- fw_name = FIRMWARE_NAVI12;
- if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
- (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
- adev->vcn.indirect_sram = true;
- break;
- case CHIP_SIENNA_CICHLID:
- fw_name = FIRMWARE_SIENNA_CICHLID;
+ case IP_VERSION(2, 0, 2):
+ if (adev->asic_type == CHIP_NAVI12)
+ fw_name = FIRMWARE_NAVI12;
+ else
+ fw_name = FIRMWARE_NAVI14;
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
adev->vcn.indirect_sram = true;
break;
- case CHIP_NAVY_FLOUNDER:
- fw_name = FIRMWARE_NAVY_FLOUNDER;
+ case IP_VERSION(3, 0, 0):
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0))
+ fw_name = FIRMWARE_SIENNA_CICHLID;
+ else
+ fw_name = FIRMWARE_NAVY_FLOUNDER;
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
adev->vcn.indirect_sram = true;
break;
- case CHIP_VANGOGH:
+ case IP_VERSION(3, 0, 2):
fw_name = FIRMWARE_VANGOGH;
break;
- case CHIP_DIMGREY_CAVEFISH:
+ case IP_VERSION(3, 0, 16):
fw_name = FIRMWARE_DIMGREY_CAVEFISH;
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
adev->vcn.indirect_sram = true;
break;
- case CHIP_BEIGE_GOBY:
+ case IP_VERSION(3, 0, 33):
fw_name = FIRMWARE_BEIGE_GOBY;
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
adev->vcn.indirect_sram = true;
break;
- case CHIP_YELLOW_CARP:
+ case IP_VERSION(3, 1, 1):
fw_name = FIRMWARE_YELLOW_CARP;
if ((adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) &&
(adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG))
@@ -330,7 +325,7 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev)
if (!adev->vcn.inst[i].saved_bo)
return -ENOMEM;
- if (drm_dev_enter(&adev->ddev, &idx)) {
+ if (drm_dev_enter(adev_to_drm(adev), &idx)) {
memcpy_fromio(adev->vcn.inst[i].saved_bo, ptr, size);
drm_dev_exit(idx);
}
@@ -354,7 +349,7 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev)
ptr = adev->vcn.inst[i].cpu_addr;
if (adev->vcn.inst[i].saved_bo != NULL) {
- if (drm_dev_enter(&adev->ddev, &idx)) {
+ if (drm_dev_enter(adev_to_drm(adev), &idx)) {
memcpy_toio(ptr, adev->vcn.inst[i].saved_bo, size);
drm_dev_exit(idx);
}
@@ -367,7 +362,7 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev)
hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
- if (drm_dev_enter(&adev->ddev, &idx)) {
+ if (drm_dev_enter(adev_to_drm(adev), &idx)) {
memcpy_toio(adev->vcn.inst[i].cpu_addr, adev->vcn.fw->data + offset,
le32_to_cpu(hdr->ucode_size_bytes));
drm_dev_exit(idx);
@@ -541,15 +536,14 @@ int amdgpu_vcn_dec_sw_ring_test_ring(struct amdgpu_ring *ring)
}
static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
- struct amdgpu_bo *bo,
+ struct amdgpu_ib *ib_msg,
struct dma_fence **fence)
{
struct amdgpu_device *adev = ring->adev;
struct dma_fence *f = NULL;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
- uint64_t addr;
- void *msg = NULL;
+ uint64_t addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
int i, r;
r = amdgpu_job_alloc_with_ib(adev, 64,
@@ -558,8 +552,6 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
goto err;
ib = &job->ibs[0];
- addr = amdgpu_bo_gpu_offset(bo);
- msg = amdgpu_bo_kptr(bo);
ib->ptr[0] = PACKET0(adev->vcn.internal.data0, 0);
ib->ptr[1] = addr;
ib->ptr[2] = PACKET0(adev->vcn.internal.data1, 0);
@@ -576,9 +568,7 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
if (r)
goto err_free;
- amdgpu_bo_fence(bo, f, false);
- amdgpu_bo_unreserve(bo);
- amdgpu_bo_free_kernel(&bo, NULL, (void **)&msg);
+ amdgpu_ib_free(adev, ib_msg, f);
if (fence)
*fence = dma_fence_get(f);
@@ -588,27 +578,26 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring,
err_free:
amdgpu_job_free(job);
-
err:
- amdgpu_bo_unreserve(bo);
- amdgpu_bo_free_kernel(&bo, NULL, (void **)&msg);
+ amdgpu_ib_free(adev, ib_msg, f);
return r;
}
static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct amdgpu_bo **bo)
+ struct amdgpu_ib *ib)
{
struct amdgpu_device *adev = ring->adev;
uint32_t *msg;
int r, i;
- *bo = NULL;
- r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- bo, NULL, (void **)&msg);
+ memset(ib, 0, sizeof(*ib));
+ r = amdgpu_ib_get(adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2,
+ AMDGPU_IB_POOL_DIRECT,
+ ib);
if (r)
return r;
+ msg = (uint32_t *)AMDGPU_GPU_PAGE_ALIGN((unsigned long)ib->ptr);
msg[0] = cpu_to_le32(0x00000028);
msg[1] = cpu_to_le32(0x00000038);
msg[2] = cpu_to_le32(0x00000001);
@@ -630,19 +619,20 @@ static int amdgpu_vcn_dec_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
}
static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct amdgpu_bo **bo)
+ struct amdgpu_ib *ib)
{
struct amdgpu_device *adev = ring->adev;
uint32_t *msg;
int r, i;
- *bo = NULL;
- r = amdgpu_bo_create_reserved(adev, 1024, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- bo, NULL, (void **)&msg);
+ memset(ib, 0, sizeof(*ib));
+ r = amdgpu_ib_get(adev, NULL, AMDGPU_GPU_PAGE_SIZE * 2,
+ AMDGPU_IB_POOL_DIRECT,
+ ib);
if (r)
return r;
+ msg = (uint32_t *)AMDGPU_GPU_PAGE_ALIGN((unsigned long)ib->ptr);
msg[0] = cpu_to_le32(0x00000028);
msg[1] = cpu_to_le32(0x00000018);
msg[2] = cpu_to_le32(0x00000000);
@@ -658,21 +648,21 @@ static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct dma_fence *fence = NULL;
- struct amdgpu_bo *bo;
+ struct amdgpu_ib ib;
long r;
- r = amdgpu_vcn_dec_get_create_msg(ring, 1, &bo);
+ r = amdgpu_vcn_dec_get_create_msg(ring, 1, &ib);
if (r)
goto error;
- r = amdgpu_vcn_dec_send_msg(ring, bo, NULL);
+ r = amdgpu_vcn_dec_send_msg(ring, &ib, NULL);
if (r)
goto error;
- r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &bo);
+ r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &ib);
if (r)
goto error;
- r = amdgpu_vcn_dec_send_msg(ring, bo, &fence);
+ r = amdgpu_vcn_dec_send_msg(ring, &ib, &fence);
if (r)
goto error;
@@ -688,8 +678,8 @@ error:
}
static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
- struct amdgpu_bo *bo,
- struct dma_fence **fence)
+ struct amdgpu_ib *ib_msg,
+ struct dma_fence **fence)
{
struct amdgpu_vcn_decode_buffer *decode_buffer = NULL;
const unsigned int ib_size_dw = 64;
@@ -697,7 +687,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
struct dma_fence *f = NULL;
struct amdgpu_job *job;
struct amdgpu_ib *ib;
- uint64_t addr;
+ uint64_t addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
int i, r;
r = amdgpu_job_alloc_with_ib(adev, ib_size_dw * 4,
@@ -706,7 +696,6 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
goto err;
ib = &job->ibs[0];
- addr = amdgpu_bo_gpu_offset(bo);
ib->length_dw = 0;
ib->ptr[ib->length_dw++] = sizeof(struct amdgpu_vcn_decode_buffer) + 8;
@@ -726,9 +715,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
if (r)
goto err_free;
- amdgpu_bo_fence(bo, f, false);
- amdgpu_bo_unreserve(bo);
- amdgpu_bo_unref(&bo);
+ amdgpu_ib_free(adev, ib_msg, f);
if (fence)
*fence = dma_fence_get(f);
@@ -738,31 +725,29 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring,
err_free:
amdgpu_job_free(job);
-
err:
- amdgpu_bo_unreserve(bo);
- amdgpu_bo_unref(&bo);
+ amdgpu_ib_free(adev, ib_msg, f);
return r;
}
int amdgpu_vcn_dec_sw_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct dma_fence *fence = NULL;
- struct amdgpu_bo *bo;
+ struct amdgpu_ib ib;
long r;
- r = amdgpu_vcn_dec_get_create_msg(ring, 1, &bo);
+ r = amdgpu_vcn_dec_get_create_msg(ring, 1, &ib);
if (r)
goto error;
- r = amdgpu_vcn_dec_sw_send_msg(ring, bo, NULL);
+ r = amdgpu_vcn_dec_sw_send_msg(ring, &ib, NULL);
if (r)
goto error;
- r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &bo);
+ r = amdgpu_vcn_dec_get_destroy_msg(ring, 1, &ib);
if (r)
goto error;
- r = amdgpu_vcn_dec_sw_send_msg(ring, bo, &fence);
+ r = amdgpu_vcn_dec_sw_send_msg(ring, &ib, &fence);
if (r)
goto error;
@@ -809,7 +794,7 @@ int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
}
static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct amdgpu_bo *bo,
+ struct amdgpu_ib *ib_msg,
struct dma_fence **fence)
{
const unsigned ib_size_dw = 16;
@@ -825,7 +810,7 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand
return r;
ib = &job->ibs[0];
- addr = amdgpu_bo_gpu_offset(bo);
+ addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
ib->length_dw = 0;
ib->ptr[ib->length_dw++] = 0x00000018;
@@ -863,7 +848,7 @@ err:
}
static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
- struct amdgpu_bo *bo,
+ struct amdgpu_ib *ib_msg,
struct dma_fence **fence)
{
const unsigned ib_size_dw = 16;
@@ -879,7 +864,7 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
return r;
ib = &job->ibs[0];
- addr = amdgpu_bo_gpu_offset(bo);
+ addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr);
ib->length_dw = 0;
ib->ptr[ib->length_dw++] = 0x00000018;
@@ -918,21 +903,23 @@ err:
int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
+ struct amdgpu_device *adev = ring->adev;
struct dma_fence *fence = NULL;
- struct amdgpu_bo *bo = NULL;
+ struct amdgpu_ib ib;
long r;
- r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- &bo, NULL, NULL);
+ memset(&ib, 0, sizeof(ib));
+ r = amdgpu_ib_get(adev, NULL, (128 << 10) + AMDGPU_GPU_PAGE_SIZE,
+ AMDGPU_IB_POOL_DIRECT,
+ &ib);
if (r)
return r;
- r = amdgpu_vcn_enc_get_create_msg(ring, 1, bo, NULL);
+ r = amdgpu_vcn_enc_get_create_msg(ring, 1, &ib, NULL);
if (r)
goto error;
- r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, bo, &fence);
+ r = amdgpu_vcn_enc_get_destroy_msg(ring, 1, &ib, &fence);
if (r)
goto error;
@@ -943,9 +930,49 @@ int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = 0;
error:
+ amdgpu_ib_free(adev, &ib, fence);
dma_fence_put(fence);
- amdgpu_bo_unreserve(bo);
- amdgpu_bo_free_kernel(&bo, NULL, NULL);
return r;
}
+
+enum amdgpu_ring_priority_level amdgpu_vcn_get_enc_ring_prio(int ring)
+{
+ switch(ring) {
+ case 0:
+ return AMDGPU_RING_PRIO_0;
+ case 1:
+ return AMDGPU_RING_PRIO_1;
+ case 2:
+ return AMDGPU_RING_PRIO_2;
+ default:
+ return AMDGPU_RING_PRIO_0;
+ }
+}
+
+void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev)
+{
+ int i;
+ unsigned int idx;
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ const struct common_firmware_header *hdr;
+ hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
+
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+ /* currently only support 2 FW instances */
+ if (i >= 2) {
+ dev_info(adev->dev, "More then 2 VCN FW instances!\n");
+ break;
+ }
+ idx = AMDGPU_UCODE_ID_VCN + i;
+ adev->firmware.ucode[idx].ucode_id = idx;
+ adev->firmware.ucode[idx].fw = adev->vcn.fw;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
+ }
+ dev_info(adev->dev, "Will use PSP to load VCN firmware\n");
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index d74c62b49795..bfa27ea94804 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -308,4 +308,8 @@ int amdgpu_vcn_dec_sw_ring_test_ib(struct amdgpu_ring *ring, long timeout);
int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring);
int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout);
+enum amdgpu_ring_priority_level amdgpu_vcn_get_enc_ring_prio(int ring);
+
+void amdgpu_vcn_setup_ucode(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index ca058fbcccd4..04cf9b207e62 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -532,9 +532,12 @@ static void amdgpu_virt_populate_vf2pf_ucode_info(struct amdgpu_device *adev)
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC, adev->gfx.mec_fw_version);
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC2, adev->gfx.mec2_fw_version);
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SOS, adev->psp.sos.fw_version);
- POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ASD, adev->psp.asd.fw_version);
- POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_RAS, adev->psp.ras.feature_version);
- POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_XGMI, adev->psp.xgmi.feature_version);
+ POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ASD,
+ adev->psp.asd_context.bin_desc.fw_version);
+ POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_RAS,
+ adev->psp.ras_context.context.bin_desc.fw_version);
+ POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_XGMI,
+ adev->psp.xgmi_context.context.bin_desc.fw_version);
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SMC, adev->pm.fw_version);
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SDMA, adev->sdma.instance[0].fw_version);
POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SDMA2, adev->sdma.instance[1].fw_version);
@@ -581,6 +584,7 @@ static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev)
vf2pf_info->encode_usage = 0;
vf2pf_info->decode_usage = 0;
+ vf2pf_info->dummy_page_addr = (uint64_t)adev->dummy_page_addr;
vf2pf_info->checksum =
amd_sriov_msg_checksum(
vf2pf_info, vf2pf_info->header.size, 0, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 6b15cad78de9..0e7dc23f78e7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -800,7 +800,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
struct amdgpu_bo *bo = &vmbo->bo;
unsigned entries, ats_entries;
uint64_t addr;
- int r;
+ int r, idx;
/* Figure out our place in the hierarchy */
if (ancestor->parent) {
@@ -845,9 +845,12 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
return r;
}
+ if (!drm_dev_enter(adev_to_drm(adev), &idx))
+ return -ENODEV;
+
r = vm->update_funcs->map_table(vmbo);
if (r)
- return r;
+ goto exit;
memset(&params, 0, sizeof(params));
params.adev = adev;
@@ -856,7 +859,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
r = vm->update_funcs->prepare(&params, NULL, AMDGPU_SYNC_EXPLICIT);
if (r)
- return r;
+ goto exit;
addr = 0;
if (ats_entries) {
@@ -872,7 +875,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
r = vm->update_funcs->update(&params, vmbo, addr, 0, ats_entries,
value, flags);
if (r)
- return r;
+ goto exit;
addr += ats_entries * 8;
}
@@ -895,10 +898,13 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
r = vm->update_funcs->update(&params, vmbo, addr, 0, entries,
value, flags);
if (r)
- return r;
+ goto exit;
}
- return vm->update_funcs->commit(&params, NULL);
+ r = vm->update_funcs->commit(&params, NULL);
+exit:
+ drm_dev_exit(idx);
+ return r;
}
/**
@@ -1384,11 +1390,14 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
struct amdgpu_vm *vm, bool immediate)
{
struct amdgpu_vm_update_params params;
- int r;
+ int r, idx;
if (list_empty(&vm->relocated))
return 0;
+ if (!drm_dev_enter(adev_to_drm(adev), &idx))
+ return -ENODEV;
+
memset(&params, 0, sizeof(params));
params.adev = adev;
params.vm = vm;
@@ -1396,7 +1405,7 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
r = vm->update_funcs->prepare(&params, NULL, AMDGPU_SYNC_EXPLICIT);
if (r)
- return r;
+ goto exit;
while (!list_empty(&vm->relocated)) {
struct amdgpu_vm_bo_base *entry;
@@ -1414,10 +1423,13 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
r = vm->update_funcs->commit(&params, &vm->last_update);
if (r)
goto error;
+ drm_dev_exit(idx);
return 0;
error:
amdgpu_vm_invalidate_pds(adev, vm);
+exit:
+ drm_dev_exit(idx);
return r;
}
@@ -1706,7 +1718,7 @@ int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
enum amdgpu_sync_mode sync_mode;
int r, idx;
- if (!drm_dev_enter(&adev->ddev, &idx))
+ if (!drm_dev_enter(adev_to_drm(adev), &idx))
return -ENODEV;
memset(&params, 0, sizeof(params));
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
index a434c71fde8e..7326b6c1b71c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
@@ -204,8 +204,10 @@ struct amd_sriov_msg_pf2vf_info {
} mm_bw_management[AMD_SRIOV_MSG_RESERVE_VCN_INST];
/* UUID info */
struct amd_sriov_msg_uuid_info uuid_info;
+ /* pcie atomic Ops info */
+ uint32_t pcie_atomic_ops_enabled_flags;
/* reserved */
- uint32_t reserved[256 - 47];
+ uint32_t reserved[256 - 48];
};
struct amd_sriov_msg_vf2pf_info_header {
@@ -259,9 +261,10 @@ struct amd_sriov_msg_vf2pf_info {
uint8_t id;
uint32_t version;
} ucode_info[AMD_SRIOV_MSG_RESERVE_UCODE];
+ uint64_t dummy_page_addr;
/* reserved */
- uint32_t reserved[256-68];
+ uint32_t reserved[256-70];
};
/* mailbox message send from guest to host */
diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c
index 3ac505d954c4..ab6a07e5e8c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c
@@ -77,10 +77,9 @@ int athub_v2_0_set_clockgating(struct amdgpu_device *adev,
if (amdgpu_sriov_vf(adev))
return 0;
- switch (adev->asic_type) {
- case CHIP_NAVI10:
- case CHIP_NAVI14:
- case CHIP_NAVI12:
+ switch (adev->ip_versions[ATHUB_HWIP][0]) {
+ case IP_VERSION(2, 0, 0):
+ case IP_VERSION(2, 0, 2):
athub_v2_0_update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE);
athub_v2_0_update_medium_grain_light_sleep(adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v2_1.c b/drivers/gpu/drm/amd/amdgpu/athub_v2_1.c
index c12c2900732b..2edefd10e56c 100644
--- a/drivers/gpu/drm/amd/amdgpu/athub_v2_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/athub_v2_1.c
@@ -70,11 +70,10 @@ int athub_v2_1_set_clockgating(struct amdgpu_device *adev,
if (amdgpu_sriov_vf(adev))
return 0;
- switch (adev->asic_type) {
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
+ switch (adev->ip_versions[ATHUB_HWIP][0]) {
+ case IP_VERSION(2, 1, 0):
+ case IP_VERSION(2, 1, 1):
+ case IP_VERSION(2, 1, 2):
athub_v2_1_update_medium_grain_clock_gating(adev, state == AMD_CG_STATE_GATE);
athub_v2_1_update_medium_grain_light_sleep(adev, state == AMD_CG_STATE_GATE);
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/beige_goby_reg_init.c b/drivers/gpu/drm/amd/amdgpu/beige_goby_reg_init.c
deleted file mode 100644
index 608a113ce354..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/beige_goby_reg_init.c
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Copyright 2020 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#include "amdgpu.h"
-#include "nv.h"
-
-#include "soc15_common.h"
-#include "soc15_hw_ip.h"
-#include "beige_goby_ip_offset.h"
-
-int beige_goby_reg_base_init(struct amdgpu_device *adev)
-{
- /* HW has more IP blocks, only initialize the block needed by driver */
- uint32_t i;
- for (i = 0 ; i < MAX_INSTANCE ; ++i) {
- adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
- adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i]));
- adev->reg_offset[MMHUB_HWIP][i] = (uint32_t *)(&(MMHUB_BASE.instance[i]));
- adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i]));
- adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i]));
- adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i]));
- adev->reg_offset[MP1_HWIP][i] = (uint32_t *)(&(MP1_BASE.instance[i]));
- adev->reg_offset[VCN_HWIP][i] = (uint32_t *)(&(VCN0_BASE.instance[i]));
- adev->reg_offset[DF_HWIP][i] = (uint32_t *)(&(DF_BASE.instance[i]));
- adev->reg_offset[DCE_HWIP][i] = (uint32_t *)(&(DCN_BASE.instance[i]));
- adev->reg_offset[OSSSYS_HWIP][i] = (uint32_t *)(&(OSSSYS_BASE.instance[i]));
- adev->reg_offset[SDMA0_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
- adev->reg_offset[SDMA1_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
- adev->reg_offset[SDMA2_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
- adev->reg_offset[SDMA3_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
- adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i]));
- adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i]));
- }
- return 0;
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c b/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c
deleted file mode 100644
index 58808814d8fb..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/cyan_skillfish_reg_init.c
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright 2018 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#include "amdgpu.h"
-#include "nv.h"
-
-#include "soc15_common.h"
-#include "soc15_hw_ip.h"
-#include "cyan_skillfish_ip_offset.h"
-
-int cyan_skillfish_reg_base_init(struct amdgpu_device *adev)
-{
- /* HW has more IP blocks, only initialized the blocke needed by driver */
- uint32_t i;
- for (i = 0 ; i < MAX_INSTANCE ; ++i) {
- adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
- adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i]));
- adev->reg_offset[MMHUB_HWIP][i] = (uint32_t *)(&(MMHUB_BASE.instance[i]));
- adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i]));
- adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i]));
- adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i]));
- adev->reg_offset[MP1_HWIP][i] = (uint32_t *)(&(MP1_BASE.instance[i]));
- adev->reg_offset[VCN_HWIP][i] = (uint32_t *)(&(UVD0_BASE.instance[i]));
- adev->reg_offset[DF_HWIP][i] = (uint32_t *)(&(DF_BASE.instance[i]));
- adev->reg_offset[DCE_HWIP][i] = (uint32_t *)(&(DMU_BASE.instance[i]));
- adev->reg_offset[OSSSYS_HWIP][i] = (uint32_t *)(&(OSSSYS_BASE.instance[i]));
- adev->reg_offset[SDMA0_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
- adev->reg_offset[SDMA1_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
- adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i]));
- }
- return 0;
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
index 14514a145c17..43c5e3ec9a39 100644
--- a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
+++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c
@@ -637,6 +637,36 @@ static void df_v3_6_pmc_get_count(struct amdgpu_device *adev,
}
}
+static bool df_v3_6_query_ras_poison_mode(struct amdgpu_device *adev)
+{
+ uint32_t hw_assert_msklo, hw_assert_mskhi;
+ uint32_t v0, v1, v28, v31;
+
+ hw_assert_msklo = RREG32_SOC15(DF, 0,
+ mmDF_CS_UMC_AON0_HardwareAssertMaskLow);
+ hw_assert_mskhi = RREG32_SOC15(DF, 0,
+ mmDF_NCS_PG0_HardwareAssertMaskHigh);
+
+ v0 = REG_GET_FIELD(hw_assert_msklo,
+ DF_CS_UMC_AON0_HardwareAssertMaskLow, HWAssertMsk0);
+ v1 = REG_GET_FIELD(hw_assert_msklo,
+ DF_CS_UMC_AON0_HardwareAssertMaskLow, HWAssertMsk1);
+ v28 = REG_GET_FIELD(hw_assert_mskhi,
+ DF_NCS_PG0_HardwareAssertMaskHigh, HWAssertMsk28);
+ v31 = REG_GET_FIELD(hw_assert_mskhi,
+ DF_NCS_PG0_HardwareAssertMaskHigh, HWAssertMsk31);
+
+ if (v0 && v1 && v28 && v31)
+ return true;
+ else if (!v0 && !v1 && !v28 && !v31)
+ return false;
+ else {
+ dev_warn(adev->dev, "DF poison setting is inconsistent(%d:%d:%d:%d)!\n",
+ v0, v1, v28, v31);
+ return false;
+ }
+}
+
const struct amdgpu_df_funcs df_v3_6_funcs = {
.sw_init = df_v3_6_sw_init,
.sw_fini = df_v3_6_sw_fini,
@@ -651,4 +681,5 @@ const struct amdgpu_df_funcs df_v3_6_funcs = {
.pmc_get_count = df_v3_6_pmc_get_count,
.get_fica = df_v3_6_get_fica,
.set_fica = df_v3_6_set_fica,
+ .query_ras_poison_mode = df_v3_6_query_ras_poison_mode,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 16dbe593cba2..90a834dc4008 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -270,25 +270,6 @@ MODULE_FIRMWARE("amdgpu/cyan_skillfish2_mec.bin");
MODULE_FIRMWARE("amdgpu/cyan_skillfish2_mec2.bin");
MODULE_FIRMWARE("amdgpu/cyan_skillfish2_rlc.bin");
-static const struct soc15_reg_golden golden_settings_gc_10_0[] =
-{
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_INDEX, 0xffffffff, 0x00000000),
- /* TA_GRAD_ADJ_UCONFIG -> TA_GRAD_ADJ */
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x2544c382),
- /* VGT_TF_RING_SIZE_UMD -> VGT_TF_RING_SIZE */
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x2262c24e),
- /* VGT_HS_OFFCHIP_PARAM_UMD -> VGT_HS_OFFCHIP_PARAM */
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x226cc24f),
- /* VGT_TF_MEMORY_BASE_UMD -> VGT_TF_MEMORY_BASE */
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x226ec250),
- /* VGT_TF_MEMORY_BASE_HI_UMD -> VGT_TF_MEMORY_BASE_HI */
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x2278c261),
- /* VGT_ESGS_RING_SIZE_UMD -> VGT_ESGS_RING_SIZE */
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x2232c240),
- /* VGT_GSVS_RING_SIZE_UMD -> VGT_GSVS_RING_SIZE */
- SOC15_REG_GOLDEN_VALUE(GC, 0, mmGRBM_CAM_DATA, 0xffffffff, 0x2233c241),
-};
-
static const struct soc15_reg_golden golden_settings_gc_10_1[] =
{
SOC15_REG_GOLDEN_VALUE(GC, 0, mmCB_HW_CONTROL_4, 0xffffffff, 0x00400014),
@@ -1537,7 +1518,7 @@ static u32 gfx_v10_rlcg_rw(struct amdgpu_device *adev, u32 offset, u32 v, uint32
scratch_reg3 = adev->rmmio +
(adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG3) * 4;
- if (adev->asic_type >= CHIP_SIENNA_CICHLID) {
+ if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0)) {
spare_int = adev->rmmio +
(adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_0_Sienna_Cichlid_BASE_IDX]
+ mmRLC_SPARE_INT_0_Sienna_Cichlid) * 4;
@@ -3727,18 +3708,18 @@ static void gfx_v10_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)
static void gfx_v10_0_init_spm_golden_registers(struct amdgpu_device *adev)
{
- switch (adev->asic_type) {
- case CHIP_NAVI10:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(10, 1, 10):
soc15_program_register_sequence(adev,
golden_settings_gc_rlc_spm_10_0_nv10,
(const u32)ARRAY_SIZE(golden_settings_gc_rlc_spm_10_0_nv10));
break;
- case CHIP_NAVI14:
+ case IP_VERSION(10, 1, 1):
soc15_program_register_sequence(adev,
golden_settings_gc_rlc_spm_10_1_nv14,
(const u32)ARRAY_SIZE(golden_settings_gc_rlc_spm_10_1_nv14));
break;
- case CHIP_NAVI12:
+ case IP_VERSION(10, 1, 2):
soc15_program_register_sequence(adev,
golden_settings_gc_rlc_spm_10_1_2_nv12,
(const u32)ARRAY_SIZE(golden_settings_gc_rlc_spm_10_1_2_nv12));
@@ -3750,8 +3731,8 @@ static void gfx_v10_0_init_spm_golden_registers(struct amdgpu_device *adev)
static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev)
{
- switch (adev->asic_type) {
- case CHIP_NAVI10:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(10, 1, 10):
soc15_program_register_sequence(adev,
golden_settings_gc_10_1,
(const u32)ARRAY_SIZE(golden_settings_gc_10_1));
@@ -3759,7 +3740,7 @@ static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev)
golden_settings_gc_10_0_nv10,
(const u32)ARRAY_SIZE(golden_settings_gc_10_0_nv10));
break;
- case CHIP_NAVI14:
+ case IP_VERSION(10, 1, 1):
soc15_program_register_sequence(adev,
golden_settings_gc_10_1_1,
(const u32)ARRAY_SIZE(golden_settings_gc_10_1_1));
@@ -3767,7 +3748,7 @@ static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev)
golden_settings_gc_10_1_nv14,
(const u32)ARRAY_SIZE(golden_settings_gc_10_1_nv14));
break;
- case CHIP_NAVI12:
+ case IP_VERSION(10, 1, 2):
soc15_program_register_sequence(adev,
golden_settings_gc_10_1_2,
(const u32)ARRAY_SIZE(golden_settings_gc_10_1_2));
@@ -3775,7 +3756,7 @@ static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev)
golden_settings_gc_10_1_2_nv12,
(const u32)ARRAY_SIZE(golden_settings_gc_10_1_2_nv12));
break;
- case CHIP_SIENNA_CICHLID:
+ case IP_VERSION(10, 3, 0):
soc15_program_register_sequence(adev,
golden_settings_gc_10_3,
(const u32)ARRAY_SIZE(golden_settings_gc_10_3));
@@ -3783,35 +3764,32 @@ static void gfx_v10_0_init_golden_registers(struct amdgpu_device *adev)
golden_settings_gc_10_3_sienna_cichlid,
(const u32)ARRAY_SIZE(golden_settings_gc_10_3_sienna_cichlid));
break;
- case CHIP_NAVY_FLOUNDER:
+ case IP_VERSION(10, 3, 2):
soc15_program_register_sequence(adev,
golden_settings_gc_10_3_2,
(const u32)ARRAY_SIZE(golden_settings_gc_10_3_2));
break;
- case CHIP_VANGOGH:
+ case IP_VERSION(10, 3, 1):
soc15_program_register_sequence(adev,
golden_settings_gc_10_3_vangogh,
(const u32)ARRAY_SIZE(golden_settings_gc_10_3_vangogh));
break;
- case CHIP_YELLOW_CARP:
+ case IP_VERSION(10, 3, 3):
soc15_program_register_sequence(adev,
golden_settings_gc_10_3_3,
(const u32)ARRAY_SIZE(golden_settings_gc_10_3_3));
break;
- case CHIP_DIMGREY_CAVEFISH:
+ case IP_VERSION(10, 3, 4):
soc15_program_register_sequence(adev,
golden_settings_gc_10_3_4,
(const u32)ARRAY_SIZE(golden_settings_gc_10_3_4));
break;
- case CHIP_BEIGE_GOBY:
+ case IP_VERSION(10, 3, 5):
soc15_program_register_sequence(adev,
golden_settings_gc_10_3_5,
(const u32)ARRAY_SIZE(golden_settings_gc_10_3_5));
break;
- case CHIP_CYAN_SKILLFISH:
- soc15_program_register_sequence(adev,
- golden_settings_gc_10_0,
- (const u32)ARRAY_SIZE(golden_settings_gc_10_0));
+ case IP_VERSION(10, 1, 3):
soc15_program_register_sequence(adev,
golden_settings_gc_10_0_cyan_skillfish,
(const u32)ARRAY_SIZE(golden_settings_gc_10_0_cyan_skillfish));
@@ -3985,11 +3963,11 @@ static void gfx_v10_0_check_fw_write_wait(struct amdgpu_device *adev)
{
adev->gfx.cp_fw_write_wait = false;
- switch (adev->asic_type) {
- case CHIP_NAVI10:
- case CHIP_NAVI12:
- case CHIP_NAVI14:
- case CHIP_CYAN_SKILLFISH:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(10, 1, 10):
+ case IP_VERSION(10, 1, 2):
+ case IP_VERSION(10, 1, 1):
+ case IP_VERSION(10, 1, 3):
if ((adev->gfx.me_fw_version >= 0x00000046) &&
(adev->gfx.me_feature_version >= 27) &&
(adev->gfx.pfp_fw_version >= 0x00000068) &&
@@ -3998,12 +3976,12 @@ static void gfx_v10_0_check_fw_write_wait(struct amdgpu_device *adev)
(adev->gfx.mec_feature_version >= 27))
adev->gfx.cp_fw_write_wait = true;
break;
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_VANGOGH:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
- case CHIP_YELLOW_CARP:
+ case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 1):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
+ case IP_VERSION(10, 3, 3):
adev->gfx.cp_fw_write_wait = true;
break;
default:
@@ -4066,8 +4044,8 @@ static bool gfx_v10_0_navi10_gfxoff_should_enable(struct amdgpu_device *adev)
static void gfx_v10_0_check_gfxoff_flag(struct amdgpu_device *adev)
{
- switch (adev->asic_type) {
- case CHIP_NAVI10:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(10, 1, 10):
if (!gfx_v10_0_navi10_gfxoff_should_enable(adev))
adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
break;
@@ -4093,38 +4071,38 @@ static int gfx_v10_0_init_microcode(struct amdgpu_device *adev)
DRM_DEBUG("\n");
- switch (adev->asic_type) {
- case CHIP_NAVI10:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(10, 1, 10):
chip_name = "navi10";
break;
- case CHIP_NAVI14:
+ case IP_VERSION(10, 1, 1):
chip_name = "navi14";
if (!(adev->pdev->device == 0x7340 &&
adev->pdev->revision != 0x00))
wks = "_wks";
break;
- case CHIP_NAVI12:
+ case IP_VERSION(10, 1, 2):
chip_name = "navi12";
break;
- case CHIP_SIENNA_CICHLID:
+ case IP_VERSION(10, 3, 0):
chip_name = "sienna_cichlid";
break;
- case CHIP_NAVY_FLOUNDER:
+ case IP_VERSION(10, 3, 2):
chip_name = "navy_flounder";
break;
- case CHIP_VANGOGH:
+ case IP_VERSION(10, 3, 1):
chip_name = "vangogh";
break;
- case CHIP_DIMGREY_CAVEFISH:
+ case IP_VERSION(10, 3, 4):
chip_name = "dimgrey_cavefish";
break;
- case CHIP_BEIGE_GOBY:
+ case IP_VERSION(10, 3, 5):
chip_name = "beige_goby";
break;
- case CHIP_YELLOW_CARP:
+ case IP_VERSION(10, 3, 3):
chip_name = "yellow_carp";
break;
- case CHIP_CYAN_SKILLFISH:
+ case IP_VERSION(10, 1, 3):
if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2)
chip_name = "cyan_skillfish2";
else
@@ -4684,10 +4662,10 @@ static void gfx_v10_0_gpu_early_init(struct amdgpu_device *adev)
adev->gfx.funcs = &gfx_v10_0_gfx_funcs;
- switch (adev->asic_type) {
- case CHIP_NAVI10:
- case CHIP_NAVI14:
- case CHIP_NAVI12:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(10, 1, 10):
+ case IP_VERSION(10, 1, 1):
+ case IP_VERSION(10, 1, 2):
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
@@ -4695,12 +4673,12 @@ static void gfx_v10_0_gpu_early_init(struct amdgpu_device *adev)
adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
gb_addr_config = RREG32_SOC15(GC, 0, mmGB_ADDR_CONFIG);
break;
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_VANGOGH:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
- case CHIP_YELLOW_CARP:
+ case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 1):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
+ case IP_VERSION(10, 3, 3):
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
@@ -4710,7 +4688,7 @@ static void gfx_v10_0_gpu_early_init(struct amdgpu_device *adev)
adev->gfx.config.gb_addr_config_fields.num_pkrs =
1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PKRS);
break;
- case CHIP_CYAN_SKILLFISH:
+ case IP_VERSION(10, 1, 3):
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
@@ -4818,11 +4796,11 @@ static int gfx_v10_0_sw_init(void *handle)
struct amdgpu_kiq *kiq;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- switch (adev->asic_type) {
- case CHIP_NAVI10:
- case CHIP_NAVI14:
- case CHIP_NAVI12:
- case CHIP_CYAN_SKILLFISH:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(10, 1, 10):
+ case IP_VERSION(10, 1, 1):
+ case IP_VERSION(10, 1, 2):
+ case IP_VERSION(10, 1, 3):
adev->gfx.me.num_me = 1;
adev->gfx.me.num_pipe_per_me = 1;
adev->gfx.me.num_queue_per_pipe = 1;
@@ -4830,12 +4808,12 @@ static int gfx_v10_0_sw_init(void *handle)
adev->gfx.mec.num_pipe_per_mec = 4;
adev->gfx.mec.num_queue_per_pipe = 8;
break;
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_VANGOGH:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
- case CHIP_YELLOW_CARP:
+ case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 1):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
+ case IP_VERSION(10, 3, 3):
adev->gfx.me.num_me = 1;
adev->gfx.me.num_pipe_per_me = 1;
adev->gfx.me.num_queue_per_pipe = 1;
@@ -5068,8 +5046,8 @@ static void gfx_v10_0_setup_rb(struct amdgpu_device *adev)
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
bitmap = i * adev->gfx.config.max_sh_per_se + j;
- if (((adev->asic_type == CHIP_SIENNA_CICHLID) ||
- (adev->asic_type == CHIP_YELLOW_CARP)) &&
+ if (((adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) ||
+ (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 3))) &&
((gfx_v10_3_get_disabled_sa(adev) >> bitmap) & 1))
continue;
gfx_v10_0_select_se_sh(adev, i, j, 0xffffffff);
@@ -5096,7 +5074,7 @@ static u32 gfx_v10_0_init_pa_sc_tile_steering_override(struct amdgpu_device *ade
/* for ASICs that integrates GFX v10.3
* pa_sc_tile_steering_override should be set to 0 */
- if (adev->asic_type >= CHIP_SIENNA_CICHLID)
+ if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
return 0;
/* init num_sc */
@@ -5249,7 +5227,7 @@ static void gfx_v10_0_get_tcc_info(struct amdgpu_device *adev)
/* TCCs are global (not instanced). */
uint32_t tcc_disable;
- if (adev->asic_type >= CHIP_SIENNA_CICHLID) {
+ if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0)) {
tcc_disable = RREG32_SOC15(GC, 0, mmCGTS_TCC_DISABLE_gc_10_3) |
RREG32_SOC15(GC, 0, mmCGTS_USER_TCC_DISABLE_gc_10_3);
} else {
@@ -5326,7 +5304,7 @@ static int gfx_v10_0_init_csb(struct amdgpu_device *adev)
adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
/* csib */
- if (adev->asic_type == CHIP_NAVI12) {
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 2)) {
WREG32_SOC15_RLC(GC, 0, mmRLC_CSIB_ADDR_HI,
adev->gfx.rlc.clear_state_gpu_addr >> 32);
WREG32_SOC15_RLC(GC, 0, mmRLC_CSIB_ADDR_LO,
@@ -5948,7 +5926,7 @@ static int gfx_v10_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, PFP_HALT, enable ? 0 : 1);
tmp = REG_SET_FIELD(tmp, CP_ME_CNTL, CE_HALT, enable ? 0 : 1);
- if (adev->asic_type == CHIP_NAVI12) {
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 2)) {
WREG32_SOC15_RLC(GC, 0, mmCP_ME_CNTL, tmp);
} else {
WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp);
@@ -6337,13 +6315,13 @@ static void gfx_v10_0_cp_gfx_set_doorbell(struct amdgpu_device *adev,
}
WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_CONTROL, tmp);
}
- switch (adev->asic_type) {
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_VANGOGH:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
- case CHIP_YELLOW_CARP:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 1):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
+ case IP_VERSION(10, 3, 3):
tmp = REG_SET_FIELD(0, CP_RB_DOORBELL_RANGE_LOWER,
DOORBELL_RANGE_LOWER_Sienna_Cichlid, ring->doorbell_index);
WREG32_SOC15(GC, 0, mmCP_RB_DOORBELL_RANGE_LOWER, tmp);
@@ -6474,13 +6452,13 @@ static int gfx_v10_0_cp_gfx_resume(struct amdgpu_device *adev)
static void gfx_v10_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
{
if (enable) {
- switch (adev->asic_type) {
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_VANGOGH:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
- case CHIP_YELLOW_CARP:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 1):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
+ case IP_VERSION(10, 3, 3):
WREG32_SOC15(GC, 0, mmCP_MEC_CNTL_Sienna_Cichlid, 0);
break;
default:
@@ -6488,13 +6466,13 @@ static void gfx_v10_0_cp_compute_enable(struct amdgpu_device *adev, bool enable)
break;
}
} else {
- switch (adev->asic_type) {
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_VANGOGH:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
- case CHIP_YELLOW_CARP:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 1):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
+ case IP_VERSION(10, 3, 3):
WREG32_SOC15(GC, 0, mmCP_MEC_CNTL_Sienna_Cichlid,
(CP_MEC_CNTL__MEC_ME1_HALT_MASK |
CP_MEC_CNTL__MEC_ME2_HALT_MASK));
@@ -6586,13 +6564,13 @@ static void gfx_v10_0_kiq_setting(struct amdgpu_ring *ring)
struct amdgpu_device *adev = ring->adev;
/* tell RLC which is KIQ queue */
- switch (adev->asic_type) {
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_VANGOGH:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
- case CHIP_YELLOW_CARP:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 1):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
+ case IP_VERSION(10, 3, 3):
tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS_Sienna_Cichlid);
tmp &= 0xffffff00;
tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue);
@@ -7303,11 +7281,11 @@ static bool gfx_v10_0_check_grbm_cam_remapping(struct amdgpu_device *adev)
/* check if mmVGT_ESGS_RING_SIZE_UMD
* has been remapped to mmVGT_ESGS_RING_SIZE */
- switch (adev->asic_type) {
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
data = RREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_Sienna_Cichlid);
WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_Sienna_Cichlid, 0);
WREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE_UMD, pattern);
@@ -7320,8 +7298,8 @@ static bool gfx_v10_0_check_grbm_cam_remapping(struct amdgpu_device *adev)
return false;
}
break;
- case CHIP_VANGOGH:
- case CHIP_YELLOW_CARP:
+ case IP_VERSION(10, 3, 1):
+ case IP_VERSION(10, 3, 3):
return true;
default:
data = RREG32_SOC15(GC, 0, mmVGT_ESGS_RING_SIZE);
@@ -7350,13 +7328,13 @@ static void gfx_v10_0_setup_grbm_cam_remapping(struct amdgpu_device *adev)
* index will auto-inc after each data writting */
WREG32_SOC15(GC, 0, mmGRBM_CAM_INDEX, 0);
- switch (adev->asic_type) {
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_VANGOGH:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
- case CHIP_YELLOW_CARP:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 1):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
+ case IP_VERSION(10, 3, 3):
/* mmVGT_TF_RING_SIZE_UMD -> mmVGT_TF_RING_SIZE */
data = (SOC15_REG_OFFSET(GC, 0, mmVGT_TF_RING_SIZE_UMD) <<
GRBM_CAM_DATA__CAM_ADDR__SHIFT) |
@@ -7520,19 +7498,19 @@ static int gfx_v10_0_hw_init(void *handle)
* init golden registers and rlc resume may override some registers,
* reconfig them here
*/
- if (adev->asic_type == CHIP_NAVI10 ||
- adev->asic_type == CHIP_NAVI14 ||
- adev->asic_type == CHIP_NAVI12)
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 10) ||
+ adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 1) ||
+ adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 2))
gfx_v10_0_tcp_harvest(adev);
r = gfx_v10_0_cp_resume(adev);
if (r)
return r;
- if (adev->asic_type == CHIP_SIENNA_CICHLID)
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0))
gfx_v10_3_program_pbb_mode(adev);
- if (adev->asic_type >= CHIP_SIENNA_CICHLID)
+ if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
gfx_v10_3_set_power_brake_sequence(adev);
return r;
@@ -7584,7 +7562,7 @@ static int gfx_v10_0_hw_fini(void *handle)
if (amdgpu_sriov_vf(adev)) {
gfx_v10_0_cp_gfx_enable(adev, false);
/* Program KIQ position of RLC_CP_SCHEDULERS during destroy */
- if (adev->asic_type >= CHIP_SIENNA_CICHLID) {
+ if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0)) {
tmp = RREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS_Sienna_Cichlid);
tmp &= 0xffffff00;
WREG32_SOC15(GC, 0, mmRLC_CP_SCHEDULERS_Sienna_Cichlid, tmp);
@@ -7670,13 +7648,13 @@ static int gfx_v10_0_soft_reset(void *handle)
/* GRBM_STATUS2 */
tmp = RREG32_SOC15(GC, 0, mmGRBM_STATUS2);
- switch (adev->asic_type) {
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_VANGOGH:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
- case CHIP_YELLOW_CARP:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 1):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
+ case IP_VERSION(10, 3, 3):
if (REG_GET_FIELD(tmp, GRBM_STATUS2, RLC_BUSY_Sienna_Cichlid))
grbm_soft_reset = REG_SET_FIELD(grbm_soft_reset,
GRBM_SOFT_RESET,
@@ -7726,9 +7704,9 @@ static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev)
{
uint64_t clock, clock_lo, clock_hi, hi_check;
- switch (adev->asic_type) {
- case CHIP_VANGOGH:
- case CHIP_YELLOW_CARP:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(10, 3, 1):
+ case IP_VERSION(10, 3, 3):
clock = (uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Vangogh) |
((uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Vangogh) << 32ULL);
break;
@@ -7784,19 +7762,19 @@ static int gfx_v10_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- switch (adev->asic_type) {
- case CHIP_NAVI10:
- case CHIP_NAVI14:
- case CHIP_NAVI12:
- case CHIP_CYAN_SKILLFISH:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(10, 1, 10):
+ case IP_VERSION(10, 1, 1):
+ case IP_VERSION(10, 1, 2):
+ case IP_VERSION(10, 1, 3):
adev->gfx.num_gfx_rings = GFX10_NUM_GFX_RINGS_NV1X;
break;
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_VANGOGH:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
- case CHIP_YELLOW_CARP:
+ case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 1):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
+ case IP_VERSION(10, 3, 3):
adev->gfx.num_gfx_rings = GFX10_NUM_GFX_RINGS_Sienna_Cichlid;
break;
default:
@@ -7848,13 +7826,13 @@ static void gfx_v10_0_set_safe_mode(struct amdgpu_device *adev)
data = RLC_SAFE_MODE__CMD_MASK;
data |= (1 << RLC_SAFE_MODE__MESSAGE__SHIFT);
- switch (adev->asic_type) {
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_VANGOGH:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
- case CHIP_YELLOW_CARP:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 1):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
+ case IP_VERSION(10, 3, 3):
WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE_Sienna_Cichlid, data);
/* wait for RLC_SAFE_MODE */
@@ -7884,13 +7862,13 @@ static void gfx_v10_0_unset_safe_mode(struct amdgpu_device *adev)
uint32_t data;
data = RLC_SAFE_MODE__CMD_MASK;
- switch (adev->asic_type) {
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_VANGOGH:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
- case CHIP_YELLOW_CARP:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 1):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
+ case IP_VERSION(10, 3, 3):
WREG32_SOC15(GC, 0, mmRLC_SAFE_MODE_Sienna_Cichlid, data);
break;
default:
@@ -8193,7 +8171,7 @@ static void gfx_v10_0_apply_medium_grain_clock_gating_workaround(struct amdgpu_d
mmCGTS_SA1_QUAD1_SM_CTRL_REG
};
- if (adev->asic_type == CHIP_NAVI12) {
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 2)) {
for (i = 0; i < ARRAY_SIZE(tcp_ctrl_regs_nv12); i++) {
reg_idx = adev->reg_offset[GC_HWIP][0][mmCGTS_SA0_WGP00_CU0_TCP_CTRL_REG_BASE_IDX] +
tcp_ctrl_regs_nv12[i];
@@ -8238,8 +8216,9 @@ static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev,
/* === CGCG + CGLS === */
gfx_v10_0_update_coarse_grain_clock_gating(adev, enable);
- if ((adev->asic_type >= CHIP_NAVI10) &&
- (adev->asic_type <= CHIP_NAVI12))
+ if ((adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 10)) ||
+ (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 1)) ||
+ (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 1, 2)))
gfx_v10_0_apply_medium_grain_clock_gating_workaround(adev);
} else {
/* CGCG/CGLS should be disabled before MGCG/MGLS
@@ -8335,12 +8314,12 @@ static void gfx_v10_cntl_power_gating(struct amdgpu_device *adev, bool enable)
* Power/performance team will optimize it and might give a new value later.
*/
if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) {
- switch (adev->asic_type) {
- case CHIP_VANGOGH:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(10, 3, 1):
data = 0x4E20 & RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK_Vangogh;
WREG32_SOC15(GC, 0, mmRLC_PG_DELAY_3, data);
break;
- case CHIP_YELLOW_CARP:
+ case IP_VERSION(10, 3, 3):
data = 0x1388 & RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK_Vangogh;
WREG32_SOC15(GC, 0, mmRLC_PG_DELAY_3, data);
break;
@@ -8399,18 +8378,18 @@ static int gfx_v10_0_set_powergating_state(void *handle,
if (amdgpu_sriov_vf(adev))
return 0;
- switch (adev->asic_type) {
- case CHIP_NAVI10:
- case CHIP_NAVI14:
- case CHIP_NAVI12:
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(10, 1, 10):
+ case IP_VERSION(10, 1, 1):
+ case IP_VERSION(10, 1, 2):
+ case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
amdgpu_gfx_off_ctrl(adev, enable);
break;
- case CHIP_VANGOGH:
- case CHIP_YELLOW_CARP:
+ case IP_VERSION(10, 3, 1):
+ case IP_VERSION(10, 3, 3):
gfx_v10_cntl_pg(adev, enable);
amdgpu_gfx_off_ctrl(adev, enable);
break;
@@ -8428,16 +8407,16 @@ static int gfx_v10_0_set_clockgating_state(void *handle,
if (amdgpu_sriov_vf(adev))
return 0;
- switch (adev->asic_type) {
- case CHIP_NAVI10:
- case CHIP_NAVI14:
- case CHIP_NAVI12:
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_VANGOGH:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
- case CHIP_YELLOW_CARP:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(10, 1, 10):
+ case IP_VERSION(10, 1, 1):
+ case IP_VERSION(10, 1, 2):
+ case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 1):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
+ case IP_VERSION(10, 3, 3):
gfx_v10_0_update_gfx_clock_gating(adev,
state == AMD_CG_STATE_GATE);
break;
@@ -9541,19 +9520,19 @@ static void gfx_v10_0_set_irq_funcs(struct amdgpu_device *adev)
static void gfx_v10_0_set_rlc_funcs(struct amdgpu_device *adev)
{
- switch (adev->asic_type) {
- case CHIP_NAVI10:
- case CHIP_NAVI14:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_VANGOGH:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
- case CHIP_YELLOW_CARP:
- case CHIP_CYAN_SKILLFISH:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(10, 1, 10):
+ case IP_VERSION(10, 1, 1):
+ case IP_VERSION(10, 1, 3):
+ case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 1):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
+ case IP_VERSION(10, 3, 3):
adev->gfx.rlc.funcs = &gfx_v10_0_rlc_funcs;
break;
- case CHIP_NAVI12:
- case CHIP_SIENNA_CICHLID:
+ case IP_VERSION(10, 1, 2):
+ case IP_VERSION(10, 3, 0):
adev->gfx.rlc.funcs = &gfx_v10_0_rlc_funcs_sriov;
break;
default:
@@ -9641,8 +9620,8 @@ static int gfx_v10_0_get_cu_info(struct amdgpu_device *adev,
for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
bitmap = i * adev->gfx.config.max_sh_per_se + j;
- if (((adev->asic_type == CHIP_SIENNA_CICHLID) ||
- (adev->asic_type == CHIP_YELLOW_CARP)) &&
+ if (((adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) ||
+ (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 3))) &&
((gfx_v10_3_get_disabled_sa(adev) >> bitmap) & 1))
continue;
mask = 1;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 025184a556ee..7f944bb11298 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -953,8 +953,8 @@ static void gfx_v9_0_set_kiq_pm4_funcs(struct amdgpu_device *adev)
static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
{
- switch (adev->asic_type) {
- case CHIP_VEGA10:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(9, 0, 1):
soc15_program_register_sequence(adev,
golden_settings_gc_9_0,
ARRAY_SIZE(golden_settings_gc_9_0));
@@ -962,7 +962,7 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
golden_settings_gc_9_0_vg10,
ARRAY_SIZE(golden_settings_gc_9_0_vg10));
break;
- case CHIP_VEGA12:
+ case IP_VERSION(9, 2, 1):
soc15_program_register_sequence(adev,
golden_settings_gc_9_2_1,
ARRAY_SIZE(golden_settings_gc_9_2_1));
@@ -970,7 +970,7 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
golden_settings_gc_9_2_1_vg12,
ARRAY_SIZE(golden_settings_gc_9_2_1_vg12));
break;
- case CHIP_VEGA20:
+ case IP_VERSION(9, 4, 0):
soc15_program_register_sequence(adev,
golden_settings_gc_9_0,
ARRAY_SIZE(golden_settings_gc_9_0));
@@ -978,12 +978,13 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
golden_settings_gc_9_0_vg20,
ARRAY_SIZE(golden_settings_gc_9_0_vg20));
break;
- case CHIP_ARCTURUS:
+ case IP_VERSION(9, 4, 1):
soc15_program_register_sequence(adev,
golden_settings_gc_9_4_1_arct,
ARRAY_SIZE(golden_settings_gc_9_4_1_arct));
break;
- case CHIP_RAVEN:
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 1, 0):
soc15_program_register_sequence(adev, golden_settings_gc_9_1,
ARRAY_SIZE(golden_settings_gc_9_1));
if (adev->apu_flags & AMD_APU_IS_RAVEN2)
@@ -995,12 +996,12 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
golden_settings_gc_9_1_rv1,
ARRAY_SIZE(golden_settings_gc_9_1_rv1));
break;
- case CHIP_RENOIR:
+ case IP_VERSION(9, 3, 0):
soc15_program_register_sequence(adev,
golden_settings_gc_9_1_rn,
ARRAY_SIZE(golden_settings_gc_9_1_rn));
return; /* for renoir, don't need common goldensetting */
- case CHIP_ALDEBARAN:
+ case IP_VERSION(9, 4, 2):
gfx_v9_4_2_init_golden_registers(adev,
adev->smuio.funcs->get_die_id(adev));
break;
@@ -1008,8 +1009,8 @@ static void gfx_v9_0_init_golden_registers(struct amdgpu_device *adev)
break;
}
- if ((adev->asic_type != CHIP_ARCTURUS) &&
- (adev->asic_type != CHIP_ALDEBARAN))
+ if ((adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 1)) &&
+ (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 2)))
soc15_program_register_sequence(adev, golden_settings_gc_9_x_common,
(const u32)ARRAY_SIZE(golden_settings_gc_9_x_common));
}
@@ -1193,15 +1194,15 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
adev->gfx.me_fw_write_wait = false;
adev->gfx.mec_fw_write_wait = false;
- if ((adev->asic_type != CHIP_ARCTURUS) &&
+ if ((adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 1)) &&
((adev->gfx.mec_fw_version < 0x000001a5) ||
(adev->gfx.mec_feature_version < 46) ||
(adev->gfx.pfp_fw_version < 0x000000b7) ||
(adev->gfx.pfp_feature_version < 46)))
DRM_WARN_ONCE("CP firmware version too old, please update!");
- switch (adev->asic_type) {
- case CHIP_VEGA10:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(9, 0, 1):
if ((adev->gfx.me_fw_version >= 0x0000009c) &&
(adev->gfx.me_feature_version >= 42) &&
(adev->gfx.pfp_fw_version >= 0x000000b1) &&
@@ -1212,7 +1213,7 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
(adev->gfx.mec_feature_version >= 42))
adev->gfx.mec_fw_write_wait = true;
break;
- case CHIP_VEGA12:
+ case IP_VERSION(9, 2, 1):
if ((adev->gfx.me_fw_version >= 0x0000009c) &&
(adev->gfx.me_feature_version >= 44) &&
(adev->gfx.pfp_fw_version >= 0x000000b2) &&
@@ -1223,7 +1224,7 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
(adev->gfx.mec_feature_version >= 44))
adev->gfx.mec_fw_write_wait = true;
break;
- case CHIP_VEGA20:
+ case IP_VERSION(9, 4, 0):
if ((adev->gfx.me_fw_version >= 0x0000009c) &&
(adev->gfx.me_feature_version >= 44) &&
(adev->gfx.pfp_fw_version >= 0x000000b2) &&
@@ -1234,7 +1235,8 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
(adev->gfx.mec_feature_version >= 44))
adev->gfx.mec_fw_write_wait = true;
break;
- case CHIP_RAVEN:
+ case IP_VERSION(9, 1, 0):
+ case IP_VERSION(9, 2, 2):
if ((adev->gfx.me_fw_version >= 0x0000009c) &&
(adev->gfx.me_feature_version >= 42) &&
(adev->gfx.pfp_fw_version >= 0x000000b1) &&
@@ -1297,7 +1299,7 @@ static bool is_raven_kicker(struct amdgpu_device *adev)
static bool check_if_enlarge_doorbell_range(struct amdgpu_device *adev)
{
- if ((adev->asic_type == CHIP_RENOIR) &&
+ if ((adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 3, 0)) &&
(adev->gfx.me_fw_version >= 0x000000a5) &&
(adev->gfx.me_feature_version >= 52))
return true;
@@ -1310,12 +1312,13 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
if (gfx_v9_0_should_disable_gfxoff(adev->pdev))
adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
- switch (adev->asic_type) {
- case CHIP_VEGA10:
- case CHIP_VEGA12:
- case CHIP_VEGA20:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(9, 0, 1):
+ case IP_VERSION(9, 2, 1):
+ case IP_VERSION(9, 4, 0):
break;
- case CHIP_RAVEN:
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 1, 0):
if (!((adev->apu_flags & AMD_APU_IS_RAVEN2) ||
(adev->apu_flags & AMD_APU_IS_PICASSO)) &&
((!is_raven_kicker(adev) &&
@@ -1329,7 +1332,7 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
AMD_PG_SUPPORT_CP |
AMD_PG_SUPPORT_RLC_SMU_HS;
break;
- case CHIP_RENOIR:
+ case IP_VERSION(9, 3, 0):
if (adev->pm.pp_feature & PP_GFXOFF_MASK)
adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG |
AMD_PG_SUPPORT_CP |
@@ -1553,9 +1556,9 @@ out:
static bool gfx_v9_0_load_mec2_fw_bin_support(struct amdgpu_device *adev)
{
- if (adev->asic_type == CHIP_ALDEBARAN ||
- adev->asic_type == CHIP_ARCTURUS ||
- adev->asic_type == CHIP_RENOIR)
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2) ||
+ adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1) ||
+ adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 3, 0))
return false;
return true;
@@ -1663,17 +1666,18 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
DRM_DEBUG("\n");
- switch (adev->asic_type) {
- case CHIP_VEGA10:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(9, 0, 1):
chip_name = "vega10";
break;
- case CHIP_VEGA12:
+ case IP_VERSION(9, 2, 1):
chip_name = "vega12";
break;
- case CHIP_VEGA20:
+ case IP_VERSION(9, 4, 0):
chip_name = "vega20";
break;
- case CHIP_RAVEN:
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 1, 0):
if (adev->apu_flags & AMD_APU_IS_RAVEN2)
chip_name = "raven2";
else if (adev->apu_flags & AMD_APU_IS_PICASSO)
@@ -1681,16 +1685,16 @@ static int gfx_v9_0_init_microcode(struct amdgpu_device *adev)
else
chip_name = "raven";
break;
- case CHIP_ARCTURUS:
+ case IP_VERSION(9, 4, 1):
chip_name = "arcturus";
break;
- case CHIP_RENOIR:
+ case IP_VERSION(9, 3, 0):
if (adev->apu_flags & AMD_APU_IS_RENOIR)
chip_name = "renoir";
else
chip_name = "green_sardine";
break;
- case CHIP_ALDEBARAN:
+ case IP_VERSION(9, 4, 2):
chip_name = "aldebaran";
break;
default:
@@ -1794,7 +1798,7 @@ static void gfx_v9_0_init_always_on_cu_mask(struct amdgpu_device *adev)
if (adev->flags & AMD_IS_APU)
always_on_cu_num = 4;
- else if (adev->asic_type == CHIP_VEGA12)
+ else if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 2, 1))
always_on_cu_num = 8;
else
always_on_cu_num = 12;
@@ -1963,11 +1967,12 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
return r;
}
- switch (adev->asic_type) {
- case CHIP_RAVEN:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 1, 0):
gfx_v9_0_init_lbpw(adev);
break;
- case CHIP_VEGA20:
+ case IP_VERSION(9, 4, 0):
gfx_v9_4_init_lbpw(adev);
break;
default:
@@ -2142,8 +2147,8 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
adev->gfx.funcs = &gfx_v9_0_gfx_funcs;
- switch (adev->asic_type) {
- case CHIP_VEGA10:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(9, 0, 1):
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
@@ -2151,7 +2156,7 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
adev->gfx.config.sc_earlyz_tile_fifo_size = 0x4C0;
gb_addr_config = VEGA10_GB_ADDR_CONFIG_GOLDEN;
break;
- case CHIP_VEGA12:
+ case IP_VERSION(9, 2, 1):
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
@@ -2160,7 +2165,7 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
gb_addr_config = VEGA12_GB_ADDR_CONFIG_GOLDEN;
DRM_INFO("fix gfx.config for vega12\n");
break;
- case CHIP_VEGA20:
+ case IP_VERSION(9, 4, 0):
adev->gfx.ras_funcs = &gfx_v9_0_ras_funcs;
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
@@ -2175,7 +2180,8 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
if (err)
return err;
break;
- case CHIP_RAVEN:
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 1, 0):
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
@@ -2186,7 +2192,7 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
else
gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN;
break;
- case CHIP_ARCTURUS:
+ case IP_VERSION(9, 4, 1):
adev->gfx.ras_funcs = &gfx_v9_4_ras_funcs;
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
@@ -2197,7 +2203,7 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
gb_addr_config &= ~0xf3e777ff;
gb_addr_config |= 0x22014042;
break;
- case CHIP_RENOIR:
+ case IP_VERSION(9, 3, 0):
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
@@ -2207,7 +2213,7 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
gb_addr_config &= ~0xf3e777ff;
gb_addr_config |= 0x22010042;
break;
- case CHIP_ALDEBARAN:
+ case IP_VERSION(9, 4, 2):
adev->gfx.ras_funcs = &gfx_v9_4_2_ras_funcs;
adev->gfx.config.max_hw_contexts = 8;
adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
@@ -2305,14 +2311,15 @@ static int gfx_v9_0_sw_init(void *handle)
struct amdgpu_kiq *kiq;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- switch (adev->asic_type) {
- case CHIP_VEGA10:
- case CHIP_VEGA12:
- case CHIP_VEGA20:
- case CHIP_RAVEN:
- case CHIP_ARCTURUS:
- case CHIP_RENOIR:
- case CHIP_ALDEBARAN:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(9, 0, 1):
+ case IP_VERSION(9, 2, 1):
+ case IP_VERSION(9, 4, 0):
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 1, 0):
+ case IP_VERSION(9, 4, 1):
+ case IP_VERSION(9, 3, 0):
+ case IP_VERSION(9, 4, 2):
adev->gfx.mec.num_mec = 2;
break;
default:
@@ -2596,8 +2603,8 @@ static void gfx_v9_0_init_sq_config(struct amdgpu_device *adev)
{
uint32_t tmp;
- switch (adev->asic_type) {
- case CHIP_ARCTURUS:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(9, 4, 1):
tmp = RREG32_SOC15(GC, 0, mmSQ_CONFIG);
tmp = REG_SET_FIELD(tmp, SQ_CONFIG,
DISABLE_BARRIER_WAITCNT, 1);
@@ -2932,7 +2939,7 @@ static void gfx_v9_0_init_gfx_power_gating(struct amdgpu_device *adev)
/* program GRBM_REG_SAVE_GFX_IDLE_THRESHOLD to 0x55f0 */
data |= (0x55f0 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT);
WREG32(SOC15_REG_OFFSET(GC, 0, mmRLC_AUTO_PG_CTRL), data);
- if (adev->asic_type != CHIP_RENOIR)
+ if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 3, 0))
pwr_10_0_gfxip_control_over_cgpg(adev, true);
}
}
@@ -3044,7 +3051,7 @@ static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
* And it's needed by gfxoff feature.
*/
if (adev->gfx.rlc.is_rlc_v2_1) {
- if (adev->asic_type == CHIP_VEGA12 ||
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 2, 1) ||
(adev->apu_flags & AMD_APU_IS_RAVEN2))
gfx_v9_1_init_rlc_save_restore_list(adev);
gfx_v9_0_enable_save_restore_machine(adev);
@@ -3157,14 +3164,15 @@ static int gfx_v9_0_rlc_resume(struct amdgpu_device *adev)
return r;
}
- switch (adev->asic_type) {
- case CHIP_RAVEN:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 1, 0):
if (amdgpu_lbpw == 0)
gfx_v9_0_enable_lbpw(adev, false);
else
gfx_v9_0_enable_lbpw(adev, true);
break;
- case CHIP_VEGA20:
+ case IP_VERSION(9, 4, 0):
if (amdgpu_lbpw > 0)
gfx_v9_0_enable_lbpw(adev, true);
else
@@ -3959,8 +3967,8 @@ static void gfx_v9_0_init_tcp_config(struct amdgpu_device *adev)
{
u32 tmp;
- if (adev->asic_type != CHIP_ARCTURUS &&
- adev->asic_type != CHIP_ALDEBARAN)
+ if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 1) &&
+ adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 2))
return;
tmp = RREG32_SOC15(GC, 0, mmTCP_ADDR_CONFIG);
@@ -4000,7 +4008,7 @@ static int gfx_v9_0_hw_init(void *handle)
if (r)
return r;
- if (adev->asic_type == CHIP_ALDEBARAN)
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))
gfx_v9_4_2_set_power_brake_sequence(adev);
return r;
@@ -4232,7 +4240,7 @@ static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
amdgpu_gfx_off_ctrl(adev, false);
mutex_lock(&adev->gfx.gpu_clock_mutex);
- if (adev->asic_type == CHIP_VEGA10 && amdgpu_sriov_runtime(adev)) {
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 0, 1) && amdgpu_sriov_runtime(adev)) {
clock = gfx_v9_0_kiq_read_clock(adev);
} else {
WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
@@ -4582,7 +4590,7 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
if (!ring->sched.ready)
return 0;
- if (adev->asic_type == CHIP_ARCTURUS) {
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1)) {
vgpr_init_shader_ptr = vgpr_init_compute_shader_arcturus;
vgpr_init_shader_size = sizeof(vgpr_init_compute_shader_arcturus);
vgpr_init_regs_ptr = vgpr_init_regs_arcturus;
@@ -4732,8 +4740,8 @@ static int gfx_v9_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (adev->asic_type == CHIP_ARCTURUS ||
- adev->asic_type == CHIP_ALDEBARAN)
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1) ||
+ adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))
adev->gfx.num_gfx_rings = 0;
else
adev->gfx.num_gfx_rings = GFX9_NUM_GFX_RINGS;
@@ -4767,7 +4775,7 @@ static int gfx_v9_0_ecc_late_init(void *handle)
}
/* requires IBs so do in late init after IB pool is initialized */
- if (adev->asic_type == CHIP_ALDEBARAN)
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))
r = gfx_v9_4_2_do_edc_gpr_workarounds(adev);
else
r = gfx_v9_0_do_edc_gpr_workarounds(adev);
@@ -4895,7 +4903,7 @@ static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
/* 1 - RLC_CGTT_MGCG_OVERRIDE */
def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
- if (adev->asic_type != CHIP_VEGA12)
+ if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 2, 1))
data &= ~RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK;
data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
@@ -4929,7 +4937,7 @@ static void gfx_v9_0_update_medium_grain_clock_gating(struct amdgpu_device *adev
/* 1 - MGCG_OVERRIDE */
def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
- if (adev->asic_type != CHIP_VEGA12)
+ if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 2, 1))
data |= RLC_CGTT_MGCG_OVERRIDE__CPF_CGTT_SCLK_OVERRIDE_MASK;
data |= (RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK |
@@ -5035,7 +5043,7 @@ static void gfx_v9_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev
/* enable cgcg FSM(0x0000363F) */
def = RREG32_SOC15(GC, 0, mmRLC_CGCG_CGLS_CTRL);
- if (adev->asic_type == CHIP_ARCTURUS)
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1))
data = (0x2000 << RLC_CGCG_CGLS_CTRL__CGCG_GFX_IDLE_THRESHOLD__SHIFT) |
RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK;
else
@@ -5161,9 +5169,10 @@ static int gfx_v9_0_set_powergating_state(void *handle,
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
bool enable = (state == AMD_PG_STATE_GATE);
- switch (adev->asic_type) {
- case CHIP_RAVEN:
- case CHIP_RENOIR:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 1, 0):
+ case IP_VERSION(9, 3, 0):
if (!enable)
amdgpu_gfx_off_ctrl(adev, false);
@@ -5189,7 +5198,7 @@ static int gfx_v9_0_set_powergating_state(void *handle,
if (enable)
amdgpu_gfx_off_ctrl(adev, true);
break;
- case CHIP_VEGA12:
+ case IP_VERSION(9, 2, 1):
amdgpu_gfx_off_ctrl(adev, enable);
break;
default:
@@ -5207,14 +5216,15 @@ static int gfx_v9_0_set_clockgating_state(void *handle,
if (amdgpu_sriov_vf(adev))
return 0;
- switch (adev->asic_type) {
- case CHIP_VEGA10:
- case CHIP_VEGA12:
- case CHIP_VEGA20:
- case CHIP_RAVEN:
- case CHIP_ARCTURUS:
- case CHIP_RENOIR:
- case CHIP_ALDEBARAN:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(9, 0, 1):
+ case IP_VERSION(9, 2, 1):
+ case IP_VERSION(9, 4, 0):
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 1, 0):
+ case IP_VERSION(9, 4, 1):
+ case IP_VERSION(9, 3, 0):
+ case IP_VERSION(9, 4, 2):
gfx_v9_0_update_gfx_clock_gating(adev,
state == AMD_CG_STATE_GATE);
break;
@@ -5256,7 +5266,7 @@ static void gfx_v9_0_get_clockgating_state(void *handle, u32 *flags)
if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK)
*flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS;
- if (adev->asic_type != CHIP_ARCTURUS) {
+ if (adev->ip_versions[GC_HWIP][0] != IP_VERSION(9, 4, 1)) {
/* AMD_CG_SUPPORT_GFX_3D_CGCG */
data = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_CGCG_CGLS_CTRL_3D));
if (data & RLC_CGCG_CGLS_CTRL_3D__CGCG_EN_MASK)
@@ -7027,14 +7037,15 @@ static void gfx_v9_0_set_irq_funcs(struct amdgpu_device *adev)
static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev)
{
- switch (adev->asic_type) {
- case CHIP_VEGA10:
- case CHIP_VEGA12:
- case CHIP_VEGA20:
- case CHIP_RAVEN:
- case CHIP_ARCTURUS:
- case CHIP_RENOIR:
- case CHIP_ALDEBARAN:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(9, 0, 1):
+ case IP_VERSION(9, 2, 1):
+ case IP_VERSION(9, 4, 0):
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 1, 0):
+ case IP_VERSION(9, 4, 1):
+ case IP_VERSION(9, 3, 0):
+ case IP_VERSION(9, 4, 2):
adev->gfx.rlc.funcs = &gfx_v9_0_rlc_funcs;
break;
default:
@@ -7045,17 +7056,18 @@ static void gfx_v9_0_set_rlc_funcs(struct amdgpu_device *adev)
static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev)
{
/* init asci gds info */
- switch (adev->asic_type) {
- case CHIP_VEGA10:
- case CHIP_VEGA12:
- case CHIP_VEGA20:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(9, 0, 1):
+ case IP_VERSION(9, 2, 1):
+ case IP_VERSION(9, 4, 0):
adev->gds.gds_size = 0x10000;
break;
- case CHIP_RAVEN:
- case CHIP_ARCTURUS:
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 1, 0):
+ case IP_VERSION(9, 4, 1):
adev->gds.gds_size = 0x1000;
break;
- case CHIP_ALDEBARAN:
+ case IP_VERSION(9, 4, 2):
/* aldebaran removed all the GDS internal memory,
* only support GWS opcode in kernel, like barrier
* semaphore.etc */
@@ -7066,24 +7078,25 @@ static void gfx_v9_0_set_gds_init(struct amdgpu_device *adev)
break;
}
- switch (adev->asic_type) {
- case CHIP_VEGA10:
- case CHIP_VEGA20:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(9, 0, 1):
+ case IP_VERSION(9, 4, 0):
adev->gds.gds_compute_max_wave_id = 0x7ff;
break;
- case CHIP_VEGA12:
+ case IP_VERSION(9, 2, 1):
adev->gds.gds_compute_max_wave_id = 0x27f;
break;
- case CHIP_RAVEN:
+ case IP_VERSION(9, 2, 2):
+ case IP_VERSION(9, 1, 0):
if (adev->apu_flags & AMD_APU_IS_RAVEN2)
adev->gds.gds_compute_max_wave_id = 0x77; /* raven2 */
else
adev->gds.gds_compute_max_wave_id = 0x15f; /* raven1 */
break;
- case CHIP_ARCTURUS:
+ case IP_VERSION(9, 4, 1):
adev->gds.gds_compute_max_wave_id = 0xfff;
break;
- case CHIP_ALDEBARAN:
+ case IP_VERSION(9, 4, 2):
/* deprecated for Aldebaran, no usage at all */
adev->gds.gds_compute_max_wave_id = 0;
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
index 00a2b36a24b3..c4f37a161875 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c
@@ -706,6 +706,11 @@ int gfx_v9_4_2_do_edc_gpr_workarounds(struct amdgpu_device *adev)
if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
return 0;
+ /* Workaround for ALDEBARAN, skip GPRs init in GPU reset.
+ Will remove it once GPRs init algorithm works for all CU settings. */
+ if (amdgpu_in_reset(adev))
+ return 0;
+
gfx_v9_4_2_do_sgprs_init(adev);
gfx_v9_4_2_do_vgprs_init(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
index 1a374ec0514a..e80d1dc43079 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_1.c
@@ -506,8 +506,8 @@ static int gfxhub_v2_1_get_xgmi_info(struct amdgpu_device *adev)
u32 max_num_physical_nodes = 0;
u32 max_physical_node_id = 0;
- switch (adev->asic_type) {
- case CHIP_SIENNA_CICHLID:
+ switch (adev->ip_versions[XGMI_HWIP][0]) {
+ case IP_VERSION(4, 8, 0):
max_num_physical_nodes = 4;
max_physical_node_id = 3;
break;
@@ -544,7 +544,7 @@ static void gfxhub_v2_1_utcl2_harvest(struct amdgpu_device *adev)
adev->gfx.config.max_sh_per_se *
adev->gfx.config.max_shader_engines);
- if (adev->asic_type == CHIP_YELLOW_CARP) {
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 3)) {
/* Get SA disabled bitmap from eFuse setting */
efuse_setting = RREG32_SOC15(GC, 0, mmCC_GC_SA_UNIT_DISABLE);
efuse_setting &= CC_GC_SA_UNIT_DISABLE__SA_DISABLE_MASK;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index e47104a1f559..3ec5ff5a6dbe 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -133,7 +133,7 @@ static int gmc_v10_0_process_interrupt(struct amdgpu_device *adev,
* the new fast GRBM interface.
*/
if ((entry->vmid_src == AMDGPU_GFXHUB_0) &&
- (adev->asic_type < CHIP_SIENNA_CICHLID))
+ (adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 3, 0)))
RREG32(hub->vm_l2_pro_fault_status);
status = RREG32(hub->vm_l2_pro_fault_status);
@@ -268,7 +268,7 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid,
* to avoid a false ACK due to the new fast GRBM interface.
*/
if ((vmhub == AMDGPU_GFXHUB_0) &&
- (adev->asic_type < CHIP_SIENNA_CICHLID))
+ (adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 3, 0)))
RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_req +
hub->eng_distance * eng, hub_ip);
@@ -657,8 +657,8 @@ static void gmc_v10_0_set_gmc_funcs(struct amdgpu_device *adev)
static void gmc_v10_0_set_umc_funcs(struct amdgpu_device *adev)
{
- switch (adev->asic_type) {
- case CHIP_SIENNA_CICHLID:
+ switch (adev->ip_versions[UMC_HWIP][0]) {
+ case IP_VERSION(8, 7, 0):
adev->umc.max_ras_err_cnt_per_query = UMC_V8_7_TOTAL_CHANNEL_NUM;
adev->umc.channel_inst_num = UMC_V8_7_CHANNEL_INSTANCE_NUM;
adev->umc.umc_inst_num = UMC_V8_7_UMC_INSTANCE_NUM;
@@ -674,9 +674,9 @@ static void gmc_v10_0_set_umc_funcs(struct amdgpu_device *adev)
static void gmc_v10_0_set_mmhub_funcs(struct amdgpu_device *adev)
{
- switch (adev->asic_type) {
- case CHIP_VANGOGH:
- case CHIP_YELLOW_CARP:
+ switch (adev->ip_versions[MMHUB_HWIP][0]) {
+ case IP_VERSION(2, 3, 0):
+ case IP_VERSION(2, 4, 0):
adev->mmhub.funcs = &mmhub_v2_3_funcs;
break;
default:
@@ -687,13 +687,13 @@ static void gmc_v10_0_set_mmhub_funcs(struct amdgpu_device *adev)
static void gmc_v10_0_set_gfxhub_funcs(struct amdgpu_device *adev)
{
- switch (adev->asic_type) {
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_VANGOGH:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
- case CHIP_YELLOW_CARP:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 1):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
+ case IP_VERSION(10, 3, 3):
adev->gfxhub.funcs = &gfxhub_v2_1_funcs;
break;
default:
@@ -800,23 +800,9 @@ static int gmc_v10_0_mc_init(struct amdgpu_device *adev)
adev->gmc.visible_vram_size = adev->gmc.real_vram_size;
/* set the gart size */
- if (amdgpu_gart_size == -1) {
- switch (adev->asic_type) {
- case CHIP_NAVI10:
- case CHIP_NAVI14:
- case CHIP_NAVI12:
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_VANGOGH:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
- case CHIP_YELLOW_CARP:
- case CHIP_CYAN_SKILLFISH:
- default:
- adev->gmc.gart_size = 512ULL << 20;
- break;
- }
- } else
+ if (amdgpu_gart_size == -1)
+ adev->gmc.gart_size = 512ULL << 20;
+ else
adev->gmc.gart_size = (u64)amdgpu_gart_size << 20;
gmc_v10_0_vram_gtt_location(adev, &adev->gmc);
@@ -871,17 +857,17 @@ static int gmc_v10_0_sw_init(void *handle)
adev->gmc.vram_vendor = vram_vendor;
}
- switch (adev->asic_type) {
- case CHIP_NAVI10:
- case CHIP_NAVI14:
- case CHIP_NAVI12:
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_VANGOGH:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
- case CHIP_YELLOW_CARP:
- case CHIP_CYAN_SKILLFISH:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(10, 1, 10):
+ case IP_VERSION(10, 1, 1):
+ case IP_VERSION(10, 1, 2):
+ case IP_VERSION(10, 1, 3):
+ case IP_VERSION(10, 3, 0):
+ case IP_VERSION(10, 3, 2):
+ case IP_VERSION(10, 3, 1):
+ case IP_VERSION(10, 3, 4):
+ case IP_VERSION(10, 3, 5):
+ case IP_VERSION(10, 3, 3):
adev->num_vmhubs = 2;
/*
* To fulfill 4-level page support,
@@ -989,21 +975,6 @@ static int gmc_v10_0_sw_fini(void *handle)
static void gmc_v10_0_init_golden_registers(struct amdgpu_device *adev)
{
- switch (adev->asic_type) {
- case CHIP_NAVI10:
- case CHIP_NAVI14:
- case CHIP_NAVI12:
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_VANGOGH:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
- case CHIP_YELLOW_CARP:
- case CHIP_CYAN_SKILLFISH:
- break;
- default:
- break;
- }
}
/**
@@ -1162,8 +1133,7 @@ static int gmc_v10_0_set_clockgating_state(void *handle,
if (r)
return r;
- if (adev->asic_type >= CHIP_SIENNA_CICHLID &&
- adev->asic_type <= CHIP_YELLOW_CARP)
+ if (adev->ip_versions[ATHUB_HWIP][0] >= IP_VERSION(2, 1, 0))
return athub_v2_1_set_clockgating(adev, state);
else
return athub_v2_0_set_clockgating(adev, state);
@@ -1175,8 +1145,7 @@ static void gmc_v10_0_get_clockgating_state(void *handle, u32 *flags)
adev->mmhub.funcs->get_clockgating(adev, flags);
- if (adev->asic_type >= CHIP_SIENNA_CICHLID &&
- adev->asic_type <= CHIP_YELLOW_CARP)
+ if (adev->ip_versions[ATHUB_HWIP][0] >= IP_VERSION(2, 1, 0))
athub_v2_1_get_clockgating(adev, flags);
else
athub_v2_0_get_clockgating(adev, flags);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index 0e81e03e9b49..0fe714f54cca 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -841,12 +841,12 @@ static int gmc_v6_0_sw_init(void *handle)
adev->gmc.mc_mask = 0xffffffffffULL;
- r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(44));
+ r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(40));
if (r) {
dev_warn(adev->dev, "No suitable DMA available.\n");
return r;
}
- adev->need_swiotlb = drm_need_swiotlb(44);
+ adev->need_swiotlb = drm_need_swiotlb(40);
r = gmc_v6_0_init_microcode(adev);
if (r) {
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 5551359d5dfd..cb82404df534 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -579,7 +579,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
* the new fast GRBM interface.
*/
if ((entry->vmid_src == AMDGPU_GFXHUB_0) &&
- (adev->asic_type < CHIP_ALDEBARAN))
+ (adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 4, 2)))
RREG32(hub->vm_l2_pro_fault_status);
status = RREG32(hub->vm_l2_pro_fault_status);
@@ -597,26 +597,28 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
gfxhub_client_ids[cid],
cid);
} else {
- switch (adev->asic_type) {
- case CHIP_VEGA10:
+ switch (adev->ip_versions[MMHUB_HWIP][0]) {
+ case IP_VERSION(9, 0, 0):
mmhub_cid = mmhub_client_ids_vega10[cid][rw];
break;
- case CHIP_VEGA12:
+ case IP_VERSION(9, 3, 0):
mmhub_cid = mmhub_client_ids_vega12[cid][rw];
break;
- case CHIP_VEGA20:
+ case IP_VERSION(9, 4, 0):
mmhub_cid = mmhub_client_ids_vega20[cid][rw];
break;
- case CHIP_ARCTURUS:
+ case IP_VERSION(9, 4, 1):
mmhub_cid = mmhub_client_ids_arcturus[cid][rw];
break;
- case CHIP_RAVEN:
+ case IP_VERSION(9, 1, 0):
+ case IP_VERSION(9, 2, 0):
mmhub_cid = mmhub_client_ids_raven[cid][rw];
break;
- case CHIP_RENOIR:
+ case IP_VERSION(1, 5, 0):
+ case IP_VERSION(2, 4, 0):
mmhub_cid = mmhub_client_ids_renoir[cid][rw];
break;
- case CHIP_ALDEBARAN:
+ case IP_VERSION(9, 4, 2):
mmhub_cid = mmhub_client_ids_aldebaran[cid][rw];
break;
default:
@@ -694,7 +696,7 @@ static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
static bool gmc_v9_0_use_invalidate_semaphore(struct amdgpu_device *adev,
uint32_t vmhub)
{
- if (adev->asic_type == CHIP_ALDEBARAN)
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))
return false;
return ((vmhub == AMDGPU_MMHUB_0 ||
@@ -745,7 +747,7 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
hub = &adev->vmhub[vmhub];
if (adev->gmc.xgmi.num_physical_nodes &&
- adev->asic_type == CHIP_VEGA20) {
+ adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 0)) {
/* Vega20+XGMI caches PTEs in TC and TLB. Add a
* heavy-weight TLB flush (type 2), which flushes
* both. Due to a race condition with concurrent
@@ -808,7 +810,7 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
* GRBM interface.
*/
if ((vmhub == AMDGPU_GFXHUB_0) &&
- (adev->asic_type < CHIP_ALDEBARAN))
+ (adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 4, 2)))
RREG32_NO_KIQ(hub->vm_inv_eng0_req +
hub->eng_distance * eng);
@@ -874,7 +876,7 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
* still need a second TLB flush after this.
*/
bool vega20_xgmi_wa = (adev->gmc.xgmi.num_physical_nodes &&
- adev->asic_type == CHIP_VEGA20);
+ adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 0));
/* 2 dwords flush + 8 dwords fence */
unsigned int ndw = kiq->pmf->invalidate_tlbs_size + 8;
@@ -1088,13 +1090,13 @@ static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev,
*flags &= ~AMDGPU_PTE_VALID;
}
- if ((adev->asic_type == CHIP_ARCTURUS ||
- adev->asic_type == CHIP_ALDEBARAN) &&
+ if ((adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1) ||
+ adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2)) &&
!(*flags & AMDGPU_PTE_SYSTEM) &&
mapping->bo_va->is_xgmi)
*flags |= AMDGPU_PTE_SNOOPED;
- if (adev->asic_type == CHIP_ALDEBARAN)
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2))
*flags |= mapping->flags & AMDGPU_PTE_SNOOPED;
}
@@ -1108,9 +1110,10 @@ static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
} else {
u32 viewport;
- switch (adev->asic_type) {
- case CHIP_RAVEN:
- case CHIP_RENOIR:
+ switch (adev->ip_versions[DCE_HWIP][0]) {
+ case IP_VERSION(1, 0, 0):
+ case IP_VERSION(1, 0, 1):
+ case IP_VERSION(2, 1, 0):
viewport = RREG32_SOC15(DCE, 0, mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION);
size = (REG_GET_FIELD(viewport,
HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_HEIGHT) *
@@ -1118,9 +1121,6 @@ static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev)
HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION, PRI_VIEWPORT_WIDTH) *
4);
break;
- case CHIP_VEGA10:
- case CHIP_VEGA12:
- case CHIP_VEGA20:
default:
viewport = RREG32_SOC15(DCE, 0, mmSCL0_VIEWPORT_SIZE);
size = (REG_GET_FIELD(viewport, SCL0_VIEWPORT_SIZE, VIEWPORT_HEIGHT) *
@@ -1151,11 +1151,11 @@ static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev)
static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
{
- switch (adev->asic_type) {
- case CHIP_VEGA10:
+ switch (adev->ip_versions[UMC_HWIP][0]) {
+ case IP_VERSION(6, 0, 0):
adev->umc.funcs = &umc_v6_0_funcs;
break;
- case CHIP_VEGA20:
+ case IP_VERSION(6, 1, 1):
adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
@@ -1163,7 +1163,7 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
adev->umc.ras_funcs = &umc_v6_1_ras_funcs;
break;
- case CHIP_ARCTURUS:
+ case IP_VERSION(6, 1, 2):
adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM;
adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM;
adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM;
@@ -1171,7 +1171,7 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0];
adev->umc.ras_funcs = &umc_v6_1_ras_funcs;
break;
- case CHIP_ALDEBARAN:
+ case IP_VERSION(6, 7, 0):
adev->umc.max_ras_err_cnt_per_query = UMC_V6_7_TOTAL_CHANNEL_NUM;
adev->umc.channel_inst_num = UMC_V6_7_CHANNEL_INSTANCE_NUM;
adev->umc.umc_inst_num = UMC_V6_7_UMC_INSTANCE_NUM;
@@ -1190,11 +1190,11 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev)
static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev)
{
- switch (adev->asic_type) {
- case CHIP_ARCTURUS:
+ switch (adev->ip_versions[MMHUB_HWIP][0]) {
+ case IP_VERSION(9, 4, 1):
adev->mmhub.funcs = &mmhub_v9_4_funcs;
break;
- case CHIP_ALDEBARAN:
+ case IP_VERSION(9, 4, 2):
adev->mmhub.funcs = &mmhub_v1_7_funcs;
break;
default:
@@ -1205,14 +1205,14 @@ static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev)
static void gmc_v9_0_set_mmhub_ras_funcs(struct amdgpu_device *adev)
{
- switch (adev->asic_type) {
- case CHIP_VEGA20:
+ switch (adev->ip_versions[MMHUB_HWIP][0]) {
+ case IP_VERSION(9, 4, 0):
adev->mmhub.ras_funcs = &mmhub_v1_0_ras_funcs;
break;
- case CHIP_ARCTURUS:
+ case IP_VERSION(9, 4, 1):
adev->mmhub.ras_funcs = &mmhub_v9_4_ras_funcs;
break;
- case CHIP_ALDEBARAN:
+ case IP_VERSION(9, 4, 2):
adev->mmhub.ras_funcs = &mmhub_v1_7_ras_funcs;
break;
default:
@@ -1233,8 +1233,9 @@ static void gmc_v9_0_set_hdp_ras_funcs(struct amdgpu_device *adev)
static void gmc_v9_0_set_mca_funcs(struct amdgpu_device *adev)
{
- switch (adev->asic_type) {
- case CHIP_ALDEBARAN:
+ /* is UMC the right IP to check for MCA? Maybe DF? */
+ switch (adev->ip_versions[UMC_HWIP][0]) {
+ case IP_VERSION(6, 7, 0):
if (!adev->gmc.xgmi.connected_to_cpu)
adev->mca.funcs = &mca_v3_0_funcs;
break;
@@ -1247,11 +1248,12 @@ static int gmc_v9_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ /* ARCT and VEGA20 don't have XGMI defined in their IP discovery tables */
if (adev->asic_type == CHIP_VEGA20 ||
adev->asic_type == CHIP_ARCTURUS)
adev->gmc.xgmi.supported = true;
- if (adev->asic_type == CHIP_ALDEBARAN) {
+ if (adev->ip_versions[XGMI_HWIP][0] == IP_VERSION(6, 1, 0)) {
adev->gmc.xgmi.supported = true;
adev->gmc.xgmi.connected_to_cpu =
adev->smuio.funcs->is_host_gpu_xgmi_supported(adev);
@@ -1289,7 +1291,8 @@ static int gmc_v9_0_late_init(void *handle)
* Workaround performance drop issue with VBIOS enables partial
* writes, while disables HBM ECC for vega10.
*/
- if (!amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_VEGA10)) {
+ if (!amdgpu_sriov_vf(adev) &&
+ (adev->ip_versions[UMC_HWIP][0] == IP_VERSION(6, 0, 0))) {
if (!(adev->ras_enabled & (1 << AMDGPU_RAS_BLOCK__UMC))) {
if (adev->df.funcs->enable_ecc_force_par_wr_rmw)
adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false);
@@ -1393,17 +1396,18 @@ static int gmc_v9_0_mc_init(struct amdgpu_device *adev)
/* set the gart size */
if (amdgpu_gart_size == -1) {
- switch (adev->asic_type) {
- case CHIP_VEGA10: /* all engines support GPUVM */
- case CHIP_VEGA12: /* all engines support GPUVM */
- case CHIP_VEGA20:
- case CHIP_ARCTURUS:
- case CHIP_ALDEBARAN:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(9, 0, 1): /* all engines support GPUVM */
+ case IP_VERSION(9, 2, 1): /* all engines support GPUVM */
+ case IP_VERSION(9, 4, 0):
+ case IP_VERSION(9, 4, 1):
+ case IP_VERSION(9, 4, 2):
default:
adev->gmc.gart_size = 512ULL << 20;
break;
- case CHIP_RAVEN: /* DCE SG support */
- case CHIP_RENOIR:
+ case IP_VERSION(9, 1, 0): /* DCE SG support */
+ case IP_VERSION(9, 2, 2): /* DCE SG support */
+ case IP_VERSION(9, 3, 0):
adev->gmc.gart_size = 1024ULL << 20;
break;
}
@@ -1464,7 +1468,8 @@ static int gmc_v9_0_gart_init(struct amdgpu_device *adev)
*/
static void gmc_v9_0_save_registers(struct amdgpu_device *adev)
{
- if (adev->asic_type == CHIP_RAVEN)
+ if ((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
+ (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1)))
adev->gmc.sdpif_register = RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0);
}
@@ -1507,8 +1512,9 @@ static int gmc_v9_0_sw_init(void *handle)
adev->gmc.vram_type = vram_type;
adev->gmc.vram_vendor = vram_vendor;
- switch (adev->asic_type) {
- case CHIP_RAVEN:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(9, 1, 0):
+ case IP_VERSION(9, 2, 2):
adev->num_vmhubs = 2;
if (adev->rev_id == 0x0 || adev->rev_id == 0x1) {
@@ -1520,11 +1526,11 @@ static int gmc_v9_0_sw_init(void *handle)
adev->vm_manager.num_level > 1;
}
break;
- case CHIP_VEGA10:
- case CHIP_VEGA12:
- case CHIP_VEGA20:
- case CHIP_RENOIR:
- case CHIP_ALDEBARAN:
+ case IP_VERSION(9, 0, 1):
+ case IP_VERSION(9, 2, 1):
+ case IP_VERSION(9, 4, 0):
+ case IP_VERSION(9, 3, 0):
+ case IP_VERSION(9, 4, 2):
adev->num_vmhubs = 2;
@@ -1539,7 +1545,7 @@ static int gmc_v9_0_sw_init(void *handle)
else
amdgpu_vm_adjust_size(adev, 256 * 1024, 9, 3, 48);
break;
- case CHIP_ARCTURUS:
+ case IP_VERSION(9, 4, 1):
adev->num_vmhubs = 3;
/* Keep the vm size same with Vega20 */
@@ -1555,7 +1561,7 @@ static int gmc_v9_0_sw_init(void *handle)
if (r)
return r;
- if (adev->asic_type == CHIP_ARCTURUS) {
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1)) {
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VMC1, VMC_1_0__SRCID__VM_FAULT,
&adev->gmc.vm_fault);
if (r)
@@ -1622,8 +1628,8 @@ static int gmc_v9_0_sw_init(void *handle)
* for video processing.
*/
adev->vm_manager.first_kfd_vmid =
- (adev->asic_type == CHIP_ARCTURUS ||
- adev->asic_type == CHIP_ALDEBARAN) ? 3 : 8;
+ (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 1) ||
+ adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 2)) ? 3 : 8;
amdgpu_vm_manager_init(adev);
@@ -1649,12 +1655,12 @@ static int gmc_v9_0_sw_fini(void *handle)
static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
{
- switch (adev->asic_type) {
- case CHIP_VEGA10:
+ switch (adev->ip_versions[MMHUB_HWIP][0]) {
+ case IP_VERSION(9, 0, 0):
if (amdgpu_sriov_vf(adev))
break;
fallthrough;
- case CHIP_VEGA20:
+ case IP_VERSION(9, 4, 0):
soc15_program_register_sequence(adev,
golden_settings_mmhub_1_0_0,
ARRAY_SIZE(golden_settings_mmhub_1_0_0));
@@ -1662,9 +1668,8 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
golden_settings_athub_1_0_0,
ARRAY_SIZE(golden_settings_athub_1_0_0));
break;
- case CHIP_VEGA12:
- break;
- case CHIP_RAVEN:
+ case IP_VERSION(9, 1, 0):
+ case IP_VERSION(9, 2, 0):
/* TODO for renoir */
soc15_program_register_sequence(adev,
golden_settings_athub_1_0_0,
@@ -1684,7 +1689,8 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
*/
void gmc_v9_0_restore_registers(struct amdgpu_device *adev)
{
- if (adev->asic_type == CHIP_RAVEN) {
+ if ((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) ||
+ (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) {
WREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0, adev->gmc.sdpif_register);
WARN_ON(adev->gmc.sdpif_register !=
RREG32_SOC15(DCE, 0, mmDCHUBBUB_SDPIF_MMIO_CNTRL_0));
diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
index 74b90cc2bf48..eecfb1545c1e 100644
--- a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c
@@ -49,7 +49,7 @@ static void hdp_v4_0_flush_hdp(struct amdgpu_device *adev,
static void hdp_v4_0_invalidate_hdp(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
- if (adev->asic_type == CHIP_ALDEBARAN)
+ if (adev->ip_versions[HDP_HWIP][0] == IP_VERSION(4, 4, 0))
return;
if (!ring || !ring->funcs->emit_wreg)
@@ -79,7 +79,7 @@ static void hdp_v4_0_reset_ras_error_count(struct amdgpu_device *adev)
if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__HDP))
return;
- if (adev->asic_type >= CHIP_ALDEBARAN)
+ if (adev->ip_versions[HDP_HWIP][0] >= IP_VERSION(4, 4, 0))
WREG32_SOC15(HDP, 0, mmHDP_EDC_CNT, 0);
else
/*read back hdp ras counter to reset it to 0 */
@@ -91,9 +91,10 @@ static void hdp_v4_0_update_clock_gating(struct amdgpu_device *adev,
{
uint32_t def, data;
- if (adev->asic_type == CHIP_VEGA10 ||
- adev->asic_type == CHIP_VEGA12 ||
- adev->asic_type == CHIP_RAVEN) {
+ if (adev->ip_versions[HDP_HWIP][0] == IP_VERSION(4, 0, 0) ||
+ adev->ip_versions[HDP_HWIP][0] == IP_VERSION(4, 0, 1) ||
+ adev->ip_versions[HDP_HWIP][0] == IP_VERSION(4, 1, 1) ||
+ adev->ip_versions[HDP_HWIP][0] == IP_VERSION(4, 1, 0)) {
def = data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS));
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
@@ -135,8 +136,8 @@ static void hdp_v4_0_get_clockgating_state(struct amdgpu_device *adev,
static void hdp_v4_0_init_registers(struct amdgpu_device *adev)
{
- switch (adev->asic_type) {
- case CHIP_ARCTURUS:
+ switch (adev->ip_versions[HDP_HWIP][0]) {
+ case IP_VERSION(4, 2, 1):
WREG32_FIELD15(HDP, 0, HDP_MMHUB_CNTL, HDP_MMHUB_GCC, 1);
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
index 85967a5570cb..299de1d131d8 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
@@ -32,26 +32,6 @@
#include "vcn/vcn_2_0_0_sh_mask.h"
#include "ivsrcid/vcn/irqsrcs_vcn_2_0.h"
-#define mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET 0x1bfff
-#define mmUVD_JPEG_GPCOM_CMD_INTERNAL_OFFSET 0x4029
-#define mmUVD_JPEG_GPCOM_DATA0_INTERNAL_OFFSET 0x402a
-#define mmUVD_JPEG_GPCOM_DATA1_INTERNAL_OFFSET 0x402b
-#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW_INTERNAL_OFFSET 0x40ea
-#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x40eb
-#define mmUVD_LMI_JRBC_IB_VMID_INTERNAL_OFFSET 0x40cf
-#define mmUVD_LMI_JPEG_VMID_INTERNAL_OFFSET 0x40d1
-#define mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET 0x40e8
-#define mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x40e9
-#define mmUVD_JRBC_IB_SIZE_INTERNAL_OFFSET 0x4082
-#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW_INTERNAL_OFFSET 0x40ec
-#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x40ed
-#define mmUVD_JRBC_RB_COND_RD_TIMER_INTERNAL_OFFSET 0x4085
-#define mmUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET 0x4084
-#define mmUVD_JRBC_STATUS_INTERNAL_OFFSET 0x4089
-#define mmUVD_JPEG_PITCH_INTERNAL_OFFSET 0x401f
-
-#define JRBC_DEC_EXTERNAL_REG_WRITE_ADDR 0x18000
-
static void jpeg_v2_0_set_dec_ring_funcs(struct amdgpu_device *adev);
static void jpeg_v2_0_set_irq_funcs(struct amdgpu_device *adev);
static int jpeg_v2_0_set_powergating_state(void *handle,
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h
index 15a344ed340f..1a03baa59755 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h
@@ -24,6 +24,26 @@
#ifndef __JPEG_V2_0_H__
#define __JPEG_V2_0_H__
+#define mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET 0x1bfff
+#define mmUVD_JPEG_GPCOM_CMD_INTERNAL_OFFSET 0x4029
+#define mmUVD_JPEG_GPCOM_DATA0_INTERNAL_OFFSET 0x402a
+#define mmUVD_JPEG_GPCOM_DATA1_INTERNAL_OFFSET 0x402b
+#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_LOW_INTERNAL_OFFSET 0x40ea
+#define mmUVD_LMI_JRBC_RB_MEM_WR_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x40eb
+#define mmUVD_LMI_JRBC_IB_VMID_INTERNAL_OFFSET 0x40cf
+#define mmUVD_LMI_JPEG_VMID_INTERNAL_OFFSET 0x40d1
+#define mmUVD_LMI_JRBC_IB_64BIT_BAR_LOW_INTERNAL_OFFSET 0x40e8
+#define mmUVD_LMI_JRBC_IB_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x40e9
+#define mmUVD_JRBC_IB_SIZE_INTERNAL_OFFSET 0x4082
+#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_LOW_INTERNAL_OFFSET 0x40ec
+#define mmUVD_LMI_JRBC_RB_MEM_RD_64BIT_BAR_HIGH_INTERNAL_OFFSET 0x40ed
+#define mmUVD_JRBC_RB_COND_RD_TIMER_INTERNAL_OFFSET 0x4085
+#define mmUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET 0x4084
+#define mmUVD_JRBC_STATUS_INTERNAL_OFFSET 0x4089
+#define mmUVD_JPEG_PITCH_INTERNAL_OFFSET 0x401f
+
+#define JRBC_DEC_EXTERNAL_REG_WRITE_ADDR 0x18000
+
void jpeg_v2_0_dec_ring_insert_start(struct amdgpu_ring *ring);
void jpeg_v2_0_dec_ring_insert_end(struct amdgpu_ring *ring);
void jpeg_v2_0_dec_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
index 46096ad7f0d9..a29c86617fb5 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
@@ -423,6 +423,42 @@ static void jpeg_v2_5_dec_ring_set_wptr(struct amdgpu_ring *ring)
}
}
+/**
+ * jpeg_v2_6_dec_ring_insert_start - insert a start command
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Write a start command to the ring.
+ */
+static void jpeg_v2_6_dec_ring_insert_start(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
+ 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x6aa04); /* PCTL0_MMHUB_DEEPSLEEP_IB */
+
+ amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
+ 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x80000000 | (1 << (ring->me * 2 + 14)));
+}
+
+/**
+ * jpeg_v2_6_dec_ring_insert_end - insert a end command
+ *
+ * @ring: amdgpu_ring pointer
+ *
+ * Write a end command to the ring.
+ */
+static void jpeg_v2_6_dec_ring_insert_end(struct amdgpu_ring *ring)
+{
+ amdgpu_ring_write(ring, PACKETJ(mmUVD_JRBC_EXTERNAL_REG_INTERNAL_OFFSET,
+ 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, 0x6aa04); /* PCTL0_MMHUB_DEEPSLEEP_IB */
+
+ amdgpu_ring_write(ring, PACKETJ(JRBC_DEC_EXTERNAL_REG_WRITE_ADDR,
+ 0, 0, PACKETJ_TYPE0));
+ amdgpu_ring_write(ring, (1 << (ring->me * 2 + 14)));
+}
+
static bool jpeg_v2_5_is_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -633,8 +669,8 @@ static const struct amdgpu_ring_funcs jpeg_v2_6_dec_ring_vm_funcs = {
.test_ring = amdgpu_jpeg_dec_ring_test_ring,
.test_ib = amdgpu_jpeg_dec_ring_test_ib,
.insert_nop = jpeg_v2_0_dec_ring_nop,
- .insert_start = jpeg_v2_0_dec_ring_insert_start,
- .insert_end = jpeg_v2_0_dec_ring_insert_end,
+ .insert_start = jpeg_v2_6_dec_ring_insert_start,
+ .insert_end = jpeg_v2_6_dec_ring_insert_end,
.pad_ib = amdgpu_ring_generic_pad_ib,
.begin_use = amdgpu_jpeg_ring_begin_use,
.end_use = amdgpu_jpeg_ring_end_use,
diff --git a/drivers/gpu/drm/amd/amdgpu/mca_v3_0.c b/drivers/gpu/drm/amd/amdgpu/mca_v3_0.c
index 058b65730a84..8f7107d392af 100644
--- a/drivers/gpu/drm/amd/amdgpu/mca_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mca_v3_0.c
@@ -52,7 +52,8 @@ const struct amdgpu_mca_ras_funcs mca_v3_0_mp0_ras_funcs = {
.ras_fini = mca_v3_0_mp0_ras_fini,
.query_ras_error_count = mca_v3_0_mp0_query_ras_error_count,
.query_ras_error_address = NULL,
- .ras_block = AMDGPU_RAS_BLOCK__MP0,
+ .ras_block = AMDGPU_RAS_BLOCK__MCA,
+ .ras_sub_block = AMDGPU_RAS_MCA_BLOCK__MP0,
.sysfs_name = "mp0_err_count",
};
@@ -79,7 +80,8 @@ const struct amdgpu_mca_ras_funcs mca_v3_0_mp1_ras_funcs = {
.ras_fini = mca_v3_0_mp1_ras_fini,
.query_ras_error_count = mca_v3_0_mp1_query_ras_error_count,
.query_ras_error_address = NULL,
- .ras_block = AMDGPU_RAS_BLOCK__MP1,
+ .ras_block = AMDGPU_RAS_BLOCK__MCA,
+ .ras_sub_block = AMDGPU_RAS_MCA_BLOCK__MP1,
.sysfs_name = "mp1_err_count",
};
@@ -106,7 +108,8 @@ const struct amdgpu_mca_ras_funcs mca_v3_0_mpio_ras_funcs = {
.ras_fini = mca_v3_0_mpio_ras_fini,
.query_ras_error_count = mca_v3_0_mpio_query_ras_error_count,
.query_ras_error_address = NULL,
- .ras_block = AMDGPU_RAS_BLOCK__MPIO,
+ .ras_block = AMDGPU_RAS_BLOCK__MCA,
+ .ras_sub_block = AMDGPU_RAS_MCA_BLOCK__MPIO,
.sysfs_name = "mpio_err_count",
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
index 7ded6b2f058e..25f8e93e5ec3 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
@@ -153,18 +153,16 @@ mmhub_v2_0_print_l2_protection_fault_status(struct amdgpu_device *adev,
dev_err(adev->dev,
"MMVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
status);
- switch (adev->asic_type) {
- case CHIP_NAVI10:
- case CHIP_NAVI12:
- case CHIP_NAVI14:
+ switch (adev->ip_versions[MMHUB_HWIP][0]) {
+ case IP_VERSION(2, 0, 0):
+ case IP_VERSION(2, 0, 2):
mmhub_cid = mmhub_client_ids_navi1x[cid][rw];
break;
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_DIMGREY_CAVEFISH:
+ case IP_VERSION(2, 1, 0):
+ case IP_VERSION(2, 1, 1):
mmhub_cid = mmhub_client_ids_sienna_cichlid[cid][rw];
break;
- case CHIP_BEIGE_GOBY:
+ case IP_VERSION(2, 1, 2):
mmhub_cid = mmhub_client_ids_beige_goby[cid][rw];
break;
default:
@@ -571,11 +569,10 @@ static void mmhub_v2_0_update_medium_grain_clock_gating(struct amdgpu_device *ad
if (!(adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG))
return;
- switch (adev->asic_type) {
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
+ switch (adev->ip_versions[MMHUB_HWIP][0]) {
+ case IP_VERSION(2, 1, 0):
+ case IP_VERSION(2, 1, 1):
+ case IP_VERSION(2, 1, 2):
def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG_Sienna_Cichlid);
def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2_Sienna_Cichlid);
break;
@@ -606,11 +603,10 @@ static void mmhub_v2_0_update_medium_grain_clock_gating(struct amdgpu_device *ad
DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK);
}
- switch (adev->asic_type) {
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
+ switch (adev->ip_versions[MMHUB_HWIP][0]) {
+ case IP_VERSION(2, 1, 0):
+ case IP_VERSION(2, 1, 1):
+ case IP_VERSION(2, 1, 2):
if (def != data)
WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG_Sienna_Cichlid, data);
if (def1 != data1)
@@ -633,11 +629,10 @@ static void mmhub_v2_0_update_medium_grain_light_sleep(struct amdgpu_device *ade
if (!(adev->cg_flags & AMD_CG_SUPPORT_MC_LS))
return;
- switch (adev->asic_type) {
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
+ switch (adev->ip_versions[MMHUB_HWIP][0]) {
+ case IP_VERSION(2, 1, 0):
+ case IP_VERSION(2, 1, 1):
+ case IP_VERSION(2, 1, 2):
def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG_Sienna_Cichlid);
break;
default:
@@ -651,11 +646,10 @@ static void mmhub_v2_0_update_medium_grain_light_sleep(struct amdgpu_device *ade
data &= ~MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK;
if (def != data) {
- switch (adev->asic_type) {
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
+ switch (adev->ip_versions[MMHUB_HWIP][0]) {
+ case IP_VERSION(2, 1, 0):
+ case IP_VERSION(2, 1, 1):
+ case IP_VERSION(2, 1, 2):
WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG_Sienna_Cichlid, data);
break;
default:
@@ -671,14 +665,12 @@ static int mmhub_v2_0_set_clockgating(struct amdgpu_device *adev,
if (amdgpu_sriov_vf(adev))
return 0;
- switch (adev->asic_type) {
- case CHIP_NAVI10:
- case CHIP_NAVI14:
- case CHIP_NAVI12:
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
+ switch (adev->ip_versions[MMHUB_HWIP][0]) {
+ case IP_VERSION(2, 0, 0):
+ case IP_VERSION(2, 0, 2):
+ case IP_VERSION(2, 1, 0):
+ case IP_VERSION(2, 1, 1):
+ case IP_VERSION(2, 1, 2):
mmhub_v2_0_update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE);
mmhub_v2_0_update_medium_grain_light_sleep(adev,
@@ -698,11 +690,10 @@ static void mmhub_v2_0_get_clockgating(struct amdgpu_device *adev, u32 *flags)
if (amdgpu_sriov_vf(adev))
*flags = 0;
- switch (adev->asic_type) {
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
+ switch (adev->ip_versions[MMHUB_HWIP][0]) {
+ case IP_VERSION(2, 1, 0):
+ case IP_VERSION(2, 1, 1):
+ case IP_VERSION(2, 1, 2):
data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG_Sienna_Cichlid);
data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2_Sienna_Cichlid);
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
index 88e457a150e0..a11d60ec6321 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c
@@ -90,9 +90,9 @@ mmhub_v2_3_print_l2_protection_fault_status(struct amdgpu_device *adev,
dev_err(adev->dev,
"MMVM_L2_PROTECTION_FAULT_STATUS:0x%08X\n",
status);
- switch (adev->asic_type) {
- case CHIP_VANGOGH:
- case CHIP_YELLOW_CARP:
+ switch (adev->ip_versions[MMHUB_HWIP][0]) {
+ case IP_VERSION(2, 3, 0):
+ case IP_VERSION(2, 4, 0):
mmhub_cid = mmhub_client_ids_vangogh[cid][rw];
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
index 530011622801..1d8414c3fadb 100644
--- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
@@ -107,7 +107,7 @@ force_update_wptr_for_self_int(struct amdgpu_device *adev,
{
u32 ih_cntl, ih_rb_cntl;
- if (adev->asic_type < CHIP_SIENNA_CICHLID)
+ if (adev->ip_versions[OSSSYS_HWIP][0] < IP_VERSION(5, 0, 3))
return;
ih_cntl = RREG32_SOC15(OSSSYS, 0, mmIH_CNTL2);
@@ -332,13 +332,10 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev)
if (unlikely(adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT)) {
if (ih[0]->use_bus_addr) {
- switch (adev->asic_type) {
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_VANGOGH:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
- case CHIP_YELLOW_CARP:
+ switch (adev->ip_versions[OSSSYS_HWIP][0]) {
+ case IP_VERSION(5, 0, 3):
+ case IP_VERSION(5, 2, 0):
+ case IP_VERSION(5, 2, 1):
ih_chicken = RREG32_SOC15(OSSSYS, 0, mmIH_CHICKEN_Sienna_Cichlid);
ih_chicken = REG_SET_FIELD(ih_chicken,
IH_CHICKEN, MC_SPACE_GPA_ENABLE, 1);
diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_reg_init.c b/drivers/gpu/drm/amd/amdgpu/navi10_reg_init.c
deleted file mode 100644
index 88efaecf9f70..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/navi10_reg_init.c
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Copyright 2018 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#include "amdgpu.h"
-#include "nv.h"
-
-#include "soc15_common.h"
-#include "navi10_ip_offset.h"
-
-int navi10_reg_base_init(struct amdgpu_device *adev)
-{
- int i;
-
- for (i = 0 ; i < MAX_INSTANCE ; ++i) {
- adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
- adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i]));
- adev->reg_offset[MMHUB_HWIP][i] = (uint32_t *)(&(MMHUB_BASE.instance[i]));
- adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i]));
- adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i]));
- adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i]));
- adev->reg_offset[MP1_HWIP][i] = (uint32_t *)(&(MP1_BASE.instance[i]));
- adev->reg_offset[VCN_HWIP][i] = (uint32_t *)(&(VCN_BASE.instance[i]));
- adev->reg_offset[DF_HWIP][i] = (uint32_t *)(&(DF_BASE.instance[i]));
- adev->reg_offset[DCE_HWIP][i] = (uint32_t *)(&(DCN_BASE.instance[i]));
- adev->reg_offset[OSSSYS_HWIP][i] = (uint32_t *)(&(OSSSYS_BASE.instance[i]));
- adev->reg_offset[SDMA0_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
- adev->reg_offset[SDMA1_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
- adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i]));
- adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i]));
- adev->reg_offset[CLK_HWIP][i] = (uint32_t *)(&(CLK_BASE.instance[i]));
- }
-
- return 0;
-}
-
-
diff --git a/drivers/gpu/drm/amd/amdgpu/navi12_reg_init.c b/drivers/gpu/drm/amd/amdgpu/navi12_reg_init.c
deleted file mode 100644
index a786d159e5e9..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/navi12_reg_init.c
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright 2018 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#include "amdgpu.h"
-#include "nv.h"
-
-#include "soc15_common.h"
-#include "navi12_ip_offset.h"
-
-int navi12_reg_base_init(struct amdgpu_device *adev)
-{
- /* HW has more IP blocks, only initialized the blocks needed by driver */
- uint32_t i;
- for (i = 0 ; i < MAX_INSTANCE ; ++i) {
- adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
- adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i]));
- adev->reg_offset[MMHUB_HWIP][i] = (uint32_t *)(&(MMHUB_BASE.instance[i]));
- adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i]));
- adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIF0_BASE.instance[i]));
- adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i]));
- adev->reg_offset[MP1_HWIP][i] = (uint32_t *)(&(MP1_BASE.instance[i]));
- adev->reg_offset[VCN_HWIP][i] = (uint32_t *)(&(UVD0_BASE.instance[i]));
- adev->reg_offset[DF_HWIP][i] = (uint32_t *)(&(DF_BASE.instance[i]));
- adev->reg_offset[DCE_HWIP][i] = (uint32_t *)(&(DMU_BASE.instance[i]));
- adev->reg_offset[OSSSYS_HWIP][i] = (uint32_t *)(&(OSSSYS_BASE.instance[i]));
- adev->reg_offset[SDMA0_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
- adev->reg_offset[SDMA1_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
- adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i]));
- adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i]));
- adev->reg_offset[CLK_HWIP][i] = (uint32_t *)(&(CLK_BASE.instance[i]));
- }
- return 0;
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/navi14_reg_init.c b/drivers/gpu/drm/amd/amdgpu/navi14_reg_init.c
deleted file mode 100644
index 4ea1e8fbb601..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/navi14_reg_init.c
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright 2018 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#include "amdgpu.h"
-#include "nv.h"
-
-#include "soc15_common.h"
-#include "navi14_ip_offset.h"
-
-int navi14_reg_base_init(struct amdgpu_device *adev)
-{
- int i;
-
- for (i = 0 ; i < MAX_INSTANCE ; ++i) {
- adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
- adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i]));
- adev->reg_offset[MMHUB_HWIP][i] = (uint32_t *)(&(MMHUB_BASE.instance[i]));
- adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i]));
- adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIF0_BASE.instance[i]));
- adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i]));
- adev->reg_offset[MP1_HWIP][i] = (uint32_t *)(&(MP1_BASE.instance[i]));
- adev->reg_offset[VCN_HWIP][i] = (uint32_t *)(&(UVD0_BASE.instance[i]));
- adev->reg_offset[DF_HWIP][i] = (uint32_t *)(&(DF_BASE.instance[i]));
- adev->reg_offset[DCE_HWIP][i] = (uint32_t *)(&(DMU_BASE.instance[i]));
- adev->reg_offset[OSSSYS_HWIP][i] = (uint32_t *)(&(OSSSYS_BASE.instance[i]));
- adev->reg_offset[SDMA0_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
- adev->reg_offset[SDMA1_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
- adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i]));
- adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i]));
- adev->reg_offset[CLK_HWIP][i] = (uint32_t *)(&(CLK_BASE.instance[i]));
- }
-
- return 0;
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
index b184b656b9b6..4ecd2b5808ce 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c
@@ -53,6 +53,16 @@
#define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288
+#define GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK 0x00001000L /* Don't use. Firmware uses this bit internally */
+#define GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK 0x00002000L
+#define GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK 0x00004000L
+#define GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK 0x00008000L
+#define GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK 0x00010000L
+#define GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK 0x00020000L
+#define GPU_HDP_FLUSH_DONE__RSVD_ENG6_MASK 0x00040000L
+#define GPU_HDP_FLUSH_DONE__RSVD_ENG7_MASK 0x00080000L
+#define GPU_HDP_FLUSH_DONE__RSVD_ENG8_MASK 0x00100000L
+
static void nbio_v2_3_remap_hdp_registers(struct amdgpu_device *adev)
{
WREG32_SOC15(NBIO, 0, mmREMAP_HDP_MEM_FLUSH_CNTL,
@@ -318,6 +328,27 @@ const struct nbio_hdp_flush_reg nbio_v2_3_hdp_flush_reg = {
.ref_and_mask_sdma1 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__SDMA1_MASK,
};
+const struct nbio_hdp_flush_reg nbio_v2_3_hdp_flush_reg_sc = {
+ .ref_and_mask_cp0 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP0_MASK,
+ .ref_and_mask_cp1 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP1_MASK,
+ .ref_and_mask_cp2 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP2_MASK,
+ .ref_and_mask_cp3 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP3_MASK,
+ .ref_and_mask_cp4 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP4_MASK,
+ .ref_and_mask_cp5 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP5_MASK,
+ .ref_and_mask_cp6 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP6_MASK,
+ .ref_and_mask_cp7 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP7_MASK,
+ .ref_and_mask_cp8 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP8_MASK,
+ .ref_and_mask_cp9 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP9_MASK,
+ .ref_and_mask_sdma0 = GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK,
+ .ref_and_mask_sdma1 = GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK,
+ .ref_and_mask_sdma2 = GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK,
+ .ref_and_mask_sdma3 = GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK,
+ .ref_and_mask_sdma4 = GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK,
+ .ref_and_mask_sdma5 = GPU_HDP_FLUSH_DONE__RSVD_ENG6_MASK,
+ .ref_and_mask_sdma6 = GPU_HDP_FLUSH_DONE__RSVD_ENG7_MASK,
+ .ref_and_mask_sdma7 = GPU_HDP_FLUSH_DONE__RSVD_ENG8_MASK,
+};
+
static void nbio_v2_3_init_registers(struct amdgpu_device *adev)
{
uint32_t def, data;
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.h b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.h
index a43b60acf7f6..6074dd3a1ed8 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.h
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.h
@@ -27,6 +27,7 @@
#include "soc15_common.h"
extern const struct nbio_hdp_flush_reg nbio_v2_3_hdp_flush_reg;
+extern const struct nbio_hdp_flush_reg nbio_v2_3_hdp_flush_reg_sc;
extern const struct amdgpu_nbio_funcs nbio_v2_3_funcs;
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
index f50045cebd44..b8bd03d16dba 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
@@ -56,12 +56,15 @@
* These are nbio v7_4_1 registers mask. Temporarily define these here since
* nbio v7_4_1 header is incomplete.
*/
-#define GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK 0x00001000L
+#define GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK 0x00001000L /* Don't use. Firmware uses this bit internally */
#define GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK 0x00002000L
#define GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK 0x00004000L
#define GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK 0x00008000L
#define GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK 0x00010000L
#define GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK 0x00020000L
+#define GPU_HDP_FLUSH_DONE__RSVD_ENG6_MASK 0x00040000L
+#define GPU_HDP_FLUSH_DONE__RSVD_ENG7_MASK 0x00080000L
+#define GPU_HDP_FLUSH_DONE__RSVD_ENG8_MASK 0x00100000L
#define mmBIF_MMSCH1_DOORBELL_RANGE 0x01dc
#define mmBIF_MMSCH1_DOORBELL_RANGE_BASE_IDX 2
@@ -334,12 +337,27 @@ const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg = {
.ref_and_mask_cp9 = GPU_HDP_FLUSH_DONE__CP9_MASK,
.ref_and_mask_sdma0 = GPU_HDP_FLUSH_DONE__SDMA0_MASK,
.ref_and_mask_sdma1 = GPU_HDP_FLUSH_DONE__SDMA1_MASK,
- .ref_and_mask_sdma2 = GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK,
- .ref_and_mask_sdma3 = GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK,
- .ref_and_mask_sdma4 = GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK,
- .ref_and_mask_sdma5 = GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK,
- .ref_and_mask_sdma6 = GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK,
- .ref_and_mask_sdma7 = GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK,
+};
+
+const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg_ald = {
+ .ref_and_mask_cp0 = GPU_HDP_FLUSH_DONE__CP0_MASK,
+ .ref_and_mask_cp1 = GPU_HDP_FLUSH_DONE__CP1_MASK,
+ .ref_and_mask_cp2 = GPU_HDP_FLUSH_DONE__CP2_MASK,
+ .ref_and_mask_cp3 = GPU_HDP_FLUSH_DONE__CP3_MASK,
+ .ref_and_mask_cp4 = GPU_HDP_FLUSH_DONE__CP4_MASK,
+ .ref_and_mask_cp5 = GPU_HDP_FLUSH_DONE__CP5_MASK,
+ .ref_and_mask_cp6 = GPU_HDP_FLUSH_DONE__CP6_MASK,
+ .ref_and_mask_cp7 = GPU_HDP_FLUSH_DONE__CP7_MASK,
+ .ref_and_mask_cp8 = GPU_HDP_FLUSH_DONE__CP8_MASK,
+ .ref_and_mask_cp9 = GPU_HDP_FLUSH_DONE__CP9_MASK,
+ .ref_and_mask_sdma0 = GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK,
+ .ref_and_mask_sdma1 = GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK,
+ .ref_and_mask_sdma2 = GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK,
+ .ref_and_mask_sdma3 = GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK,
+ .ref_and_mask_sdma4 = GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK,
+ .ref_and_mask_sdma5 = GPU_HDP_FLUSH_DONE__RSVD_ENG6_MASK,
+ .ref_and_mask_sdma6 = GPU_HDP_FLUSH_DONE__RSVD_ENG7_MASK,
+ .ref_and_mask_sdma7 = GPU_HDP_FLUSH_DONE__RSVD_ENG8_MASK,
};
static void nbio_v7_4_init_registers(struct amdgpu_device *adev)
@@ -387,13 +405,13 @@ static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device
"errors detected in %s block, "
"no user action is needed.\n",
obj->err_data.ce_count,
- ras_block_str(adev->nbio.ras_if->block));
+ get_ras_block_str(adev->nbio.ras_if));
if (err_data.ue_count)
dev_info(adev->dev, "%ld uncorrectable hardware "
"errors detected in %s block\n",
obj->err_data.ue_count,
- ras_block_str(adev->nbio.ras_if->block));
+ get_ras_block_str(adev->nbio.ras_if));
}
dev_info(adev->dev, "RAS controller interrupt triggered "
@@ -566,7 +584,9 @@ static int nbio_v7_4_init_ras_err_event_athub_interrupt (struct amdgpu_device *a
return r;
}
-#define smnPARITY_ERROR_STATUS_UNCORR_GRP2 0x13a20030
+#define smnPARITY_ERROR_STATUS_UNCORR_GRP2 0x13a20030
+#define smnPARITY_ERROR_STATUS_UNCORR_GRP2_ALDE 0x13b20030
+#define smnRAS_GLOBAL_STATUS_LO_ALDE 0x13b20020
static void nbio_v7_4_query_ras_error_count(struct amdgpu_device *adev,
void *ras_error_status)
@@ -575,12 +595,20 @@ static void nbio_v7_4_query_ras_error_count(struct amdgpu_device *adev,
uint32_t corr, fatal, non_fatal;
struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
- global_sts = RREG32_PCIE(smnRAS_GLOBAL_STATUS_LO);
+ if (adev->asic_type == CHIP_ALDEBARAN)
+ global_sts = RREG32_PCIE(smnRAS_GLOBAL_STATUS_LO_ALDE);
+ else
+ global_sts = RREG32_PCIE(smnRAS_GLOBAL_STATUS_LO);
+
corr = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO, ParityErrCorr);
fatal = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO, ParityErrFatal);
non_fatal = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO,
ParityErrNonFatal);
- parity_sts = RREG32_PCIE(smnPARITY_ERROR_STATUS_UNCORR_GRP2);
+
+ if (adev->asic_type == CHIP_ALDEBARAN)
+ parity_sts = RREG32_PCIE(smnPARITY_ERROR_STATUS_UNCORR_GRP2_ALDE);
+ else
+ parity_sts = RREG32_PCIE(smnPARITY_ERROR_STATUS_UNCORR_GRP2);
if (corr)
err_data->ce_count++;
@@ -589,13 +617,21 @@ static void nbio_v7_4_query_ras_error_count(struct amdgpu_device *adev,
if (corr || fatal || non_fatal) {
central_sts = RREG32_PCIE(smnBIFL_RAS_CENTRAL_STATUS);
+
/* clear error status register */
- WREG32_PCIE(smnRAS_GLOBAL_STATUS_LO, global_sts);
+ if (adev->asic_type == CHIP_ALDEBARAN)
+ WREG32_PCIE(smnRAS_GLOBAL_STATUS_LO_ALDE, global_sts);
+ else
+ WREG32_PCIE(smnRAS_GLOBAL_STATUS_LO, global_sts);
if (fatal)
+ {
/* clear parity fatal error indication field */
- WREG32_PCIE(smnPARITY_ERROR_STATUS_UNCORR_GRP2,
- parity_sts);
+ if (adev->asic_type == CHIP_ALDEBARAN)
+ WREG32_PCIE(smnPARITY_ERROR_STATUS_UNCORR_GRP2_ALDE, parity_sts);
+ else
+ WREG32_PCIE(smnPARITY_ERROR_STATUS_UNCORR_GRP2, parity_sts);
+ }
if (REG_GET_FIELD(central_sts, BIFL_RAS_CENTRAL_STATUS,
BIFL_RasContller_Intr_Recv)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h
index b8216581ec8d..cc5692db6f98 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h
@@ -27,6 +27,7 @@
#include "soc15_common.h"
extern const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg;
+extern const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg_ald;
extern const struct amdgpu_nbio_funcs nbio_v7_4_funcs;
extern const struct amdgpu_nbio_ras_funcs nbio_v7_4_ras_funcs;
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index ff80786e3918..febc903adf58 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -180,8 +180,8 @@ static const struct amdgpu_video_codecs yc_video_codecs_decode = {
static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode,
const struct amdgpu_video_codecs **codecs)
{
- switch (adev->asic_type) {
- case CHIP_SIENNA_CICHLID:
+ switch (adev->ip_versions[UVD_HWIP][0]) {
+ case IP_VERSION(3, 0, 0):
if (amdgpu_sriov_vf(adev)) {
if (encode)
*codecs = &sriov_sc_video_codecs_encode;
@@ -194,29 +194,27 @@ static int nv_query_video_codecs(struct amdgpu_device *adev, bool encode,
*codecs = &sc_video_codecs_decode;
}
return 0;
- case CHIP_NAVY_FLOUNDER:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_VANGOGH:
+ case IP_VERSION(3, 0, 16):
+ case IP_VERSION(3, 0, 2):
if (encode)
*codecs = &nv_video_codecs_encode;
else
*codecs = &sc_video_codecs_decode;
return 0;
- case CHIP_YELLOW_CARP:
+ case IP_VERSION(3, 1, 1):
if (encode)
*codecs = &nv_video_codecs_encode;
else
*codecs = &yc_video_codecs_decode;
return 0;
- case CHIP_BEIGE_GOBY:
+ case IP_VERSION(3, 0, 33):
if (encode)
*codecs = &bg_video_codecs_encode;
else
*codecs = &bg_video_codecs_decode;
return 0;
- case CHIP_NAVI10:
- case CHIP_NAVI14:
- case CHIP_NAVI12:
+ case IP_VERSION(2, 0, 0):
+ case IP_VERSION(2, 0, 2):
if (encode)
*codecs = &nv_video_codecs_encode;
else
@@ -511,14 +509,15 @@ nv_asic_reset_method(struct amdgpu_device *adev)
dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n",
amdgpu_reset_method);
- switch (adev->asic_type) {
- case CHIP_VANGOGH:
- case CHIP_YELLOW_CARP:
+ switch (adev->ip_versions[MP1_HWIP][0]) {
+ case IP_VERSION(11, 5, 0):
+ case IP_VERSION(13, 0, 1):
+ case IP_VERSION(13, 0, 3):
return AMD_RESET_METHOD_MODE2;
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
+ case IP_VERSION(11, 0, 7):
+ case IP_VERSION(11, 0, 11):
+ case IP_VERSION(11, 0, 12):
+ case IP_VERSION(11, 0, 13):
return AMD_RESET_METHOD_MODE1;
default:
if (amdgpu_dpm_is_baco_supported(adev))
@@ -599,7 +598,7 @@ static void nv_enable_doorbell_aperture(struct amdgpu_device *adev,
adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable);
}
-static const struct amdgpu_ip_block_version nv_common_ip_block =
+const struct amdgpu_ip_block_version nv_common_ip_block =
{
.type = AMD_IP_BLOCK_TYPE_COMMON,
.major = 1,
@@ -608,314 +607,11 @@ static const struct amdgpu_ip_block_version nv_common_ip_block =
.funcs = &nv_common_ip_funcs,
};
-static bool nv_is_headless_sku(struct pci_dev *pdev)
-{
- if ((pdev->device == 0x731E &&
- (pdev->revision == 0xC6 || pdev->revision == 0xC7)) ||
- (pdev->device == 0x7340 && pdev->revision == 0xC9) ||
- (pdev->device == 0x7360 && pdev->revision == 0xC7))
- return true;
- return false;
-}
-
-static int nv_reg_base_init(struct amdgpu_device *adev)
-{
- int r;
-
- if (amdgpu_discovery) {
- r = amdgpu_discovery_reg_base_init(adev);
- if (r) {
- DRM_WARN("failed to init reg base from ip discovery table, "
- "fallback to legacy init method\n");
- goto legacy_init;
- }
-
- amdgpu_discovery_harvest_ip(adev);
- if (nv_is_headless_sku(adev->pdev)) {
- adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
- adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
- }
-
- return 0;
- }
-
-legacy_init:
- switch (adev->asic_type) {
- case CHIP_NAVI10:
- navi10_reg_base_init(adev);
- break;
- case CHIP_NAVI14:
- navi14_reg_base_init(adev);
- break;
- case CHIP_NAVI12:
- navi12_reg_base_init(adev);
- break;
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- sienna_cichlid_reg_base_init(adev);
- break;
- case CHIP_VANGOGH:
- vangogh_reg_base_init(adev);
- break;
- case CHIP_DIMGREY_CAVEFISH:
- dimgrey_cavefish_reg_base_init(adev);
- break;
- case CHIP_BEIGE_GOBY:
- beige_goby_reg_base_init(adev);
- break;
- case CHIP_YELLOW_CARP:
- yellow_carp_reg_base_init(adev);
- break;
- case CHIP_CYAN_SKILLFISH:
- cyan_skillfish_reg_base_init(adev);
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
void nv_set_virt_ops(struct amdgpu_device *adev)
{
adev->virt.ops = &xgpu_nv_virt_ops;
}
-int nv_set_ip_blocks(struct amdgpu_device *adev)
-{
- int r;
-
- if (adev->asic_type == CHIP_CYAN_SKILLFISH) {
- adev->nbio.funcs = &nbio_v2_3_funcs;
- adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
- } else if (adev->flags & AMD_IS_APU) {
- adev->nbio.funcs = &nbio_v7_2_funcs;
- adev->nbio.hdp_flush_reg = &nbio_v7_2_hdp_flush_reg;
- } else {
- adev->nbio.funcs = &nbio_v2_3_funcs;
- adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
- }
- adev->hdp.funcs = &hdp_v5_0_funcs;
-
- if (adev->asic_type >= CHIP_SIENNA_CICHLID)
- adev->smuio.funcs = &smuio_v11_0_6_funcs;
- else
- adev->smuio.funcs = &smuio_v11_0_funcs;
-
- if (adev->asic_type == CHIP_SIENNA_CICHLID)
- adev->gmc.xgmi.supported = true;
-
- /* Set IP register base before any HW register access */
- r = nv_reg_base_init(adev);
- if (r)
- return r;
-
- switch (adev->asic_type) {
- case CHIP_NAVI10:
- case CHIP_NAVI14:
- amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
- amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
- amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
- amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
- !amdgpu_sriov_vf(adev))
- amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
- if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
- amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
-#if defined(CONFIG_DRM_AMD_DC)
- else if (amdgpu_device_has_dc_support(adev))
- amdgpu_device_ip_block_add(adev, &dm_ip_block);
-#endif
- amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
- amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
- !amdgpu_sriov_vf(adev))
- amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
- amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
- amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
- if (adev->enable_mes)
- amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
- break;
- case CHIP_NAVI12:
- amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
- amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
- if (!amdgpu_sriov_vf(adev)) {
- amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
- amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
- } else {
- amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
- amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
- }
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)
- amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
- if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
- amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
-#if defined(CONFIG_DRM_AMD_DC)
- else if (amdgpu_device_has_dc_support(adev))
- amdgpu_device_ip_block_add(adev, &dm_ip_block);
-#endif
- amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
- amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
- !amdgpu_sriov_vf(adev))
- amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
- amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
- if (!amdgpu_sriov_vf(adev))
- amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
- break;
- case CHIP_SIENNA_CICHLID:
- amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
- amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
- if (!amdgpu_sriov_vf(adev)) {
- amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
- if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
- amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
- } else {
- if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
- amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
- amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
- }
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
- is_support_sw_smu(adev))
- amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
- if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
- amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
-#if defined(CONFIG_DRM_AMD_DC)
- else if (amdgpu_device_has_dc_support(adev))
- amdgpu_device_ip_block_add(adev, &dm_ip_block);
-#endif
- amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
- amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
- amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
- if (!amdgpu_sriov_vf(adev))
- amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
- if (adev->enable_mes)
- amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
- break;
- case CHIP_NAVY_FLOUNDER:
- amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
- amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
- amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
- if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
- amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
- is_support_sw_smu(adev))
- amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
- if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
- amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
-#if defined(CONFIG_DRM_AMD_DC)
- else if (amdgpu_device_has_dc_support(adev))
- amdgpu_device_ip_block_add(adev, &dm_ip_block);
-#endif
- amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
- amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
- amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
- amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
- is_support_sw_smu(adev))
- amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
- break;
- case CHIP_VANGOGH:
- amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
- amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
- amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
- if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
- amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
- amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
- if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
- amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
-#if defined(CONFIG_DRM_AMD_DC)
- else if (amdgpu_device_has_dc_support(adev))
- amdgpu_device_ip_block_add(adev, &dm_ip_block);
-#endif
- amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
- amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
- amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
- amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
- break;
- case CHIP_DIMGREY_CAVEFISH:
- amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
- amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
- amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
- if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
- amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
- is_support_sw_smu(adev))
- amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
- if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
- amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
-#if defined(CONFIG_DRM_AMD_DC)
- else if (amdgpu_device_has_dc_support(adev))
- amdgpu_device_ip_block_add(adev, &dm_ip_block);
-#endif
- amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
- amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
- amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
- amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
- break;
- case CHIP_BEIGE_GOBY:
- amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
- amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
- amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
- if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
- amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP &&
- is_support_sw_smu(adev))
- amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
- amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
- amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
- if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
- amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
-#if defined(CONFIG_DRM_AMD_DC)
- else if (amdgpu_device_has_dc_support(adev))
- amdgpu_device_ip_block_add(adev, &dm_ip_block);
-#endif
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
- is_support_sw_smu(adev))
- amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
- amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
- break;
- case CHIP_YELLOW_CARP:
- amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
- amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
- amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
- if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
- amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block);
- amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block);
- if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
- amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
- amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
- amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
- if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
- amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
-#if defined(CONFIG_DRM_AMD_DC)
- else if (amdgpu_device_has_dc_support(adev))
- amdgpu_device_ip_block_add(adev, &dm_ip_block);
-#endif
- amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
- amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
- break;
- case CHIP_CYAN_SKILLFISH:
- amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
- amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
- amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
- if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2) {
- if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
- amdgpu_device_ip_block_add(adev, &psp_v11_0_8_ip_block);
- amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
- }
- if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
- amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
- amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
- amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
static uint32_t nv_get_rev_id(struct amdgpu_device *adev)
{
return adev->nbio.funcs->get_rev_id(adev);
@@ -1056,8 +752,11 @@ static int nv_common_early_init(void *handle)
adev->rev_id = nv_get_rev_id(adev);
adev->external_rev_id = 0xff;
- switch (adev->asic_type) {
- case CHIP_NAVI10:
+ /* TODO: split the GC and PG flags based on the relevant IP version for which
+ * they are relevant.
+ */
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(10, 1, 10):
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_CGCG |
AMD_CG_SUPPORT_IH_CG |
@@ -1079,7 +778,7 @@ static int nv_common_early_init(void *handle)
AMD_PG_SUPPORT_ATHUB;
adev->external_rev_id = adev->rev_id + 0x1;
break;
- case CHIP_NAVI14:
+ case IP_VERSION(10, 1, 1):
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_CGCG |
AMD_CG_SUPPORT_IH_CG |
@@ -1100,7 +799,7 @@ static int nv_common_early_init(void *handle)
AMD_PG_SUPPORT_VCN_DPG;
adev->external_rev_id = adev->rev_id + 20;
break;
- case CHIP_NAVI12:
+ case IP_VERSION(10, 1, 2):
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
AMD_CG_SUPPORT_GFX_CGCG |
@@ -1129,7 +828,7 @@ static int nv_common_early_init(void *handle)
adev->rev_id = 0;
adev->external_rev_id = adev->rev_id + 0xa;
break;
- case CHIP_SIENNA_CICHLID:
+ case IP_VERSION(10, 3, 0):
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_CGCG |
AMD_CG_SUPPORT_GFX_CGLS |
@@ -1153,7 +852,7 @@ static int nv_common_early_init(void *handle)
}
adev->external_rev_id = adev->rev_id + 0x28;
break;
- case CHIP_NAVY_FLOUNDER:
+ case IP_VERSION(10, 3, 2):
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_CGCG |
AMD_CG_SUPPORT_GFX_CGLS |
@@ -1172,8 +871,7 @@ static int nv_common_early_init(void *handle)
AMD_PG_SUPPORT_MMHUB;
adev->external_rev_id = adev->rev_id + 0x32;
break;
-
- case CHIP_VANGOGH:
+ case IP_VERSION(10, 3, 1):
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
AMD_CG_SUPPORT_GFX_CP_LS |
@@ -1196,7 +894,7 @@ static int nv_common_early_init(void *handle)
if (adev->apu_flags & AMD_APU_IS_VANGOGH)
adev->external_rev_id = adev->rev_id + 0x01;
break;
- case CHIP_DIMGREY_CAVEFISH:
+ case IP_VERSION(10, 3, 4):
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_CGCG |
AMD_CG_SUPPORT_GFX_CGLS |
@@ -1215,7 +913,7 @@ static int nv_common_early_init(void *handle)
AMD_PG_SUPPORT_MMHUB;
adev->external_rev_id = adev->rev_id + 0x3c;
break;
- case CHIP_BEIGE_GOBY:
+ case IP_VERSION(10, 3, 5):
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_CGCG |
AMD_CG_SUPPORT_GFX_CGLS |
@@ -1232,7 +930,7 @@ static int nv_common_early_init(void *handle)
AMD_PG_SUPPORT_MMHUB;
adev->external_rev_id = adev->rev_id + 0x46;
break;
- case CHIP_YELLOW_CARP:
+ case IP_VERSION(10, 3, 3):
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
AMD_CG_SUPPORT_GFX_CGCG |
@@ -1257,11 +955,11 @@ static int nv_common_early_init(void *handle)
AMD_PG_SUPPORT_VCN_DPG |
AMD_PG_SUPPORT_JPEG;
if (adev->pdev->device == 0x1681)
- adev->external_rev_id = adev->rev_id + 0x19;
+ adev->external_rev_id = 0x20;
else
adev->external_rev_id = adev->rev_id + 0x01;
break;
- case CHIP_CYAN_SKILLFISH:
+ case IP_VERSION(10, 1, 3):
adev->cg_flags = 0;
adev->pg_flags = 0;
adev->external_rev_id = adev->rev_id + 0x82;
@@ -1388,14 +1086,14 @@ static int nv_common_set_clockgating_state(void *handle,
if (amdgpu_sriov_vf(adev))
return 0;
- switch (adev->asic_type) {
- case CHIP_NAVI10:
- case CHIP_NAVI14:
- case CHIP_NAVI12:
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
+ switch (adev->ip_versions[NBIO_HWIP][0]) {
+ case IP_VERSION(2, 3, 0):
+ case IP_VERSION(2, 3, 1):
+ case IP_VERSION(2, 3, 2):
+ case IP_VERSION(3, 3, 0):
+ case IP_VERSION(3, 3, 1):
+ case IP_VERSION(3, 3, 2):
+ case IP_VERSION(3, 3, 3):
adev->nbio.funcs->update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE);
adev->nbio.funcs->update_medium_grain_light_sleep(adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.h b/drivers/gpu/drm/amd/amdgpu/nv.h
index 1f40ba3b0460..83e9782aef39 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.h
+++ b/drivers/gpu/drm/amd/amdgpu/nv.h
@@ -26,18 +26,10 @@
#include "nbio_v2_3.h"
+extern const struct amdgpu_ip_block_version nv_common_ip_block;
+
void nv_grbm_select(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 queue, u32 vmid);
void nv_set_virt_ops(struct amdgpu_device *adev);
-int nv_set_ip_blocks(struct amdgpu_device *adev);
-int navi10_reg_base_init(struct amdgpu_device *adev);
-int navi14_reg_base_init(struct amdgpu_device *adev);
-int navi12_reg_base_init(struct amdgpu_device *adev);
-int sienna_cichlid_reg_base_init(struct amdgpu_device *adev);
-void vangogh_reg_base_init(struct amdgpu_device *adev);
-int dimgrey_cavefish_reg_base_init(struct amdgpu_device *adev);
-int beige_goby_reg_base_init(struct amdgpu_device *adev);
-int yellow_carp_reg_base_init(struct amdgpu_device *adev);
-int cyan_skillfish_reg_base_init(struct amdgpu_device *adev);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
index 5872d68ed13d..ed2293686f0d 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
@@ -84,28 +84,28 @@ static int psp_v10_0_init_microcode(struct psp_context *psp)
ta_hdr = (const struct ta_firmware_header_v1_0 *)
adev->psp.ta_fw->data;
- adev->psp.hdcp.feature_version =
+ adev->psp.hdcp_context.context.bin_desc.fw_version =
le32_to_cpu(ta_hdr->hdcp.fw_version);
- adev->psp.hdcp.size_bytes =
+ adev->psp.hdcp_context.context.bin_desc.size_bytes =
le32_to_cpu(ta_hdr->hdcp.size_bytes);
- adev->psp.hdcp.start_addr =
+ adev->psp.hdcp_context.context.bin_desc.start_addr =
(uint8_t *)ta_hdr +
le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
- adev->psp.dtm.feature_version =
+ adev->psp.dtm_context.context.bin_desc.fw_version =
le32_to_cpu(ta_hdr->dtm.fw_version);
- adev->psp.dtm.size_bytes =
+ adev->psp.dtm_context.context.bin_desc.size_bytes =
le32_to_cpu(ta_hdr->dtm.size_bytes);
- adev->psp.dtm.start_addr =
- (uint8_t *)adev->psp.hdcp.start_addr +
+ adev->psp.dtm_context.context.bin_desc.start_addr =
+ (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
le32_to_cpu(ta_hdr->dtm.offset_bytes);
- adev->psp.securedisplay.feature_version =
+ adev->psp.securedisplay_context.context.bin_desc.fw_version =
le32_to_cpu(ta_hdr->securedisplay.fw_version);
- adev->psp.securedisplay.size_bytes =
+ adev->psp.securedisplay_context.context.bin_desc.size_bytes =
le32_to_cpu(ta_hdr->securedisplay.size_bytes);
- adev->psp.securedisplay.start_addr =
- (uint8_t *)adev->psp.hdcp.start_addr +
+ adev->psp.securedisplay_context.context.bin_desc.start_addr =
+ (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
le32_to_cpu(ta_hdr->securedisplay.offset_bytes);
adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index 29bf9f09944b..2176ef85f137 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -93,35 +93,35 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
DRM_DEBUG("\n");
- switch (adev->asic_type) {
- case CHIP_VEGA20:
+ switch (adev->ip_versions[MP0_HWIP][0]) {
+ case IP_VERSION(11, 0, 2):
chip_name = "vega20";
break;
- case CHIP_NAVI10:
+ case IP_VERSION(11, 0, 0):
chip_name = "navi10";
break;
- case CHIP_NAVI14:
+ case IP_VERSION(11, 0, 5):
chip_name = "navi14";
break;
- case CHIP_NAVI12:
+ case IP_VERSION(11, 0, 9):
chip_name = "navi12";
break;
- case CHIP_ARCTURUS:
+ case IP_VERSION(11, 0, 4):
chip_name = "arcturus";
break;
- case CHIP_SIENNA_CICHLID:
+ case IP_VERSION(11, 0, 7):
chip_name = "sienna_cichlid";
break;
- case CHIP_NAVY_FLOUNDER:
+ case IP_VERSION(11, 0, 11):
chip_name = "navy_flounder";
break;
- case CHIP_VANGOGH:
+ case IP_VERSION(11, 5, 0):
chip_name = "vangogh";
break;
- case CHIP_DIMGREY_CAVEFISH:
+ case IP_VERSION(11, 0, 12):
chip_name = "dimgrey_cavefish";
break;
- case CHIP_BEIGE_GOBY:
+ case IP_VERSION(11, 0, 13):
chip_name = "beige_goby";
break;
default:
@@ -129,9 +129,9 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
}
- switch (adev->asic_type) {
- case CHIP_VEGA20:
- case CHIP_ARCTURUS:
+ switch (adev->ip_versions[MP0_HWIP][0]) {
+ case IP_VERSION(11, 0, 2):
+ case IP_VERSION(11, 0, 4):
err = psp_init_sos_microcode(psp, chip_name);
if (err)
return err;
@@ -151,20 +151,26 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
goto out2;
ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data;
- adev->psp.xgmi.feature_version = le32_to_cpu(ta_hdr->xgmi.fw_version);
- adev->psp.xgmi.size_bytes = le32_to_cpu(ta_hdr->xgmi.size_bytes);
- adev->psp.xgmi.start_addr = (uint8_t *)ta_hdr +
+ adev->psp.xgmi_context.context.bin_desc.fw_version =
+ le32_to_cpu(ta_hdr->xgmi.fw_version);
+ adev->psp.xgmi_context.context.bin_desc.size_bytes =
+ le32_to_cpu(ta_hdr->xgmi.size_bytes);
+ adev->psp.xgmi_context.context.bin_desc.start_addr =
+ (uint8_t *)ta_hdr +
le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
- adev->psp.ras.feature_version = le32_to_cpu(ta_hdr->ras.fw_version);
- adev->psp.ras.size_bytes = le32_to_cpu(ta_hdr->ras.size_bytes);
- adev->psp.ras.start_addr = (uint8_t *)adev->psp.xgmi.start_addr +
+ adev->psp.ras_context.context.bin_desc.fw_version =
+ le32_to_cpu(ta_hdr->ras.fw_version);
+ adev->psp.ras_context.context.bin_desc.size_bytes =
+ le32_to_cpu(ta_hdr->ras.size_bytes);
+ adev->psp.ras_context.context.bin_desc.start_addr =
+ (uint8_t *)adev->psp.xgmi_context.context.bin_desc.start_addr +
le32_to_cpu(ta_hdr->ras.offset_bytes);
}
break;
- case CHIP_NAVI10:
- case CHIP_NAVI14:
- case CHIP_NAVI12:
+ case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 5):
+ case IP_VERSION(11, 0, 9):
err = psp_init_sos_microcode(psp, chip_name);
if (err)
return err;
@@ -186,30 +192,31 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
goto out2;
ta_hdr = (const struct ta_firmware_header_v1_0 *)adev->psp.ta_fw->data;
- adev->psp.hdcp.feature_version = le32_to_cpu(ta_hdr->hdcp.fw_version);
- adev->psp.hdcp.size_bytes = le32_to_cpu(ta_hdr->hdcp.size_bytes);
- adev->psp.hdcp.start_addr = (uint8_t *)ta_hdr +
- le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
+ adev->psp.hdcp_context.context.bin_desc.fw_version =
+ le32_to_cpu(ta_hdr->hdcp.fw_version);
+ adev->psp.hdcp_context.context.bin_desc.size_bytes =
+ le32_to_cpu(ta_hdr->hdcp.size_bytes);
+ adev->psp.hdcp_context.context.bin_desc.start_addr =
+ (uint8_t *)ta_hdr +
+ le32_to_cpu(
+ ta_hdr->header.ucode_array_offset_bytes);
adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
- adev->psp.dtm.feature_version = le32_to_cpu(ta_hdr->dtm.fw_version);
- adev->psp.dtm.size_bytes = le32_to_cpu(ta_hdr->dtm.size_bytes);
- adev->psp.dtm.start_addr = (uint8_t *)adev->psp.hdcp.start_addr +
+ adev->psp.dtm_context.context.bin_desc.fw_version =
+ le32_to_cpu(ta_hdr->dtm.fw_version);
+ adev->psp.dtm_context.context.bin_desc.size_bytes =
+ le32_to_cpu(ta_hdr->dtm.size_bytes);
+ adev->psp.dtm_context.context.bin_desc.start_addr =
+ (uint8_t *)adev->psp.hdcp_context.context
+ .bin_desc.start_addr +
le32_to_cpu(ta_hdr->dtm.offset_bytes);
}
break;
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_DIMGREY_CAVEFISH:
- err = psp_init_sos_microcode(psp, chip_name);
- if (err)
- return err;
- err = psp_init_ta_microcode(psp, chip_name);
- if (err)
- return err;
- break;
- case CHIP_BEIGE_GOBY:
+ case IP_VERSION(11, 0, 7):
+ case IP_VERSION(11, 0, 11):
+ case IP_VERSION(11, 0, 12):
+ case IP_VERSION(11, 0, 13):
err = psp_init_sos_microcode(psp, chip_name);
if (err)
return err;
@@ -217,7 +224,7 @@ static int psp_v11_0_init_microcode(struct psp_context *psp)
if (err)
return err;
break;
- case CHIP_VANGOGH:
+ case IP_VERSION(11, 5, 0):
err = psp_init_asd_microcode(psp, chip_name);
if (err)
return err;
@@ -691,7 +698,7 @@ static int psp_v11_0_memory_training(struct psp_context *psp, uint32_t ops)
return -ENOMEM;
}
- if (drm_dev_enter(&adev->ddev, &idx)) {
+ if (drm_dev_enter(adev_to_drm(adev), &idx)) {
memcpy_fromio(buf, adev->mman.aper_base_kaddr, sz);
ret = psp_v11_0_memory_training_send_msg(psp, PSP_BL__DRAM_LONG_TRAIN);
if (ret) {
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
index cc649406234b..a2588200ea58 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c
@@ -84,22 +84,22 @@ static int psp_v12_0_init_microcode(struct psp_context *psp)
ta_hdr = (const struct ta_firmware_header_v1_0 *)
adev->psp.ta_fw->data;
- adev->psp.hdcp.feature_version =
+ adev->psp.hdcp_context.context.bin_desc.fw_version =
le32_to_cpu(ta_hdr->hdcp.fw_version);
- adev->psp.hdcp.size_bytes =
+ adev->psp.hdcp_context.context.bin_desc.size_bytes =
le32_to_cpu(ta_hdr->hdcp.size_bytes);
- adev->psp.hdcp.start_addr =
+ adev->psp.hdcp_context.context.bin_desc.start_addr =
(uint8_t *)ta_hdr +
le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes);
adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version);
- adev->psp.dtm.feature_version =
+ adev->psp.dtm_context.context.bin_desc.fw_version =
le32_to_cpu(ta_hdr->dtm.fw_version);
- adev->psp.dtm.size_bytes =
+ adev->psp.dtm_context.context.bin_desc.size_bytes =
le32_to_cpu(ta_hdr->dtm.size_bytes);
- adev->psp.dtm.start_addr =
- (uint8_t *)adev->psp.hdcp.start_addr +
+ adev->psp.dtm_context.context.bin_desc.start_addr =
+ (uint8_t *)adev->psp.hdcp_context.context.bin_desc.start_addr +
le32_to_cpu(ta_hdr->dtm.offset_bytes);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
index 47a500f64db2..17655bc6d2f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c
@@ -47,18 +47,19 @@ static int psp_v13_0_init_microcode(struct psp_context *psp)
const char *chip_name;
int err = 0;
- switch (adev->asic_type) {
- case CHIP_ALDEBARAN:
+ switch (adev->ip_versions[MP0_HWIP][0]) {
+ case IP_VERSION(13, 0, 2):
chip_name = "aldebaran";
break;
- case CHIP_YELLOW_CARP:
+ case IP_VERSION(13, 0, 1):
+ case IP_VERSION(13, 0, 3):
chip_name = "yellow_carp";
break;
default:
BUG();
}
- switch (adev->asic_type) {
- case CHIP_ALDEBARAN:
+ switch (adev->ip_versions[MP0_HWIP][0]) {
+ case IP_VERSION(13, 0, 2):
err = psp_init_sos_microcode(psp, chip_name);
if (err)
return err;
@@ -66,7 +67,8 @@ static int psp_v13_0_init_microcode(struct psp_context *psp)
if (err)
return err;
break;
- case CHIP_YELLOW_CARP:
+ case IP_VERSION(13, 0, 1):
+ case IP_VERSION(13, 0, 3):
err = psp_init_asd_microcode(psp, chip_name);
if (err)
return err;
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index 8931000dcd41..e8e4749e9c79 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -469,8 +469,8 @@ static int sdma_v4_0_irq_id_to_seq(unsigned client_id)
static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev)
{
- switch (adev->asic_type) {
- case CHIP_VEGA10:
+ switch (adev->ip_versions[SDMA0_HWIP][0]) {
+ case IP_VERSION(4, 0, 0):
soc15_program_register_sequence(adev,
golden_settings_sdma_4,
ARRAY_SIZE(golden_settings_sdma_4));
@@ -478,7 +478,7 @@ static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev)
golden_settings_sdma_vg10,
ARRAY_SIZE(golden_settings_sdma_vg10));
break;
- case CHIP_VEGA12:
+ case IP_VERSION(4, 0, 1):
soc15_program_register_sequence(adev,
golden_settings_sdma_4,
ARRAY_SIZE(golden_settings_sdma_4));
@@ -486,7 +486,7 @@ static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev)
golden_settings_sdma_vg12,
ARRAY_SIZE(golden_settings_sdma_vg12));
break;
- case CHIP_VEGA20:
+ case IP_VERSION(4, 2, 0):
soc15_program_register_sequence(adev,
golden_settings_sdma0_4_2_init,
ARRAY_SIZE(golden_settings_sdma0_4_2_init));
@@ -497,17 +497,18 @@ static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev)
golden_settings_sdma1_4_2,
ARRAY_SIZE(golden_settings_sdma1_4_2));
break;
- case CHIP_ARCTURUS:
+ case IP_VERSION(4, 2, 2):
soc15_program_register_sequence(adev,
golden_settings_sdma_arct,
ARRAY_SIZE(golden_settings_sdma_arct));
break;
- case CHIP_ALDEBARAN:
+ case IP_VERSION(4, 4, 0):
soc15_program_register_sequence(adev,
golden_settings_sdma_aldebaran,
ARRAY_SIZE(golden_settings_sdma_aldebaran));
break;
- case CHIP_RAVEN:
+ case IP_VERSION(4, 1, 0):
+ case IP_VERSION(4, 1, 1):
soc15_program_register_sequence(adev,
golden_settings_sdma_4_1,
ARRAY_SIZE(golden_settings_sdma_4_1));
@@ -520,7 +521,7 @@ static void sdma_v4_0_init_golden_registers(struct amdgpu_device *adev)
golden_settings_sdma_rv1,
ARRAY_SIZE(golden_settings_sdma_rv1));
break;
- case CHIP_RENOIR:
+ case IP_VERSION(4, 1, 2):
soc15_program_register_sequence(adev,
golden_settings_sdma_4_3,
ARRAY_SIZE(golden_settings_sdma_4_3));
@@ -538,12 +539,12 @@ static void sdma_v4_0_setup_ulv(struct amdgpu_device *adev)
* The only chips with SDMAv4 and ULV are VG10 and VG20.
* Server SKUs take a different hysteresis setting from other SKUs.
*/
- switch (adev->asic_type) {
- case CHIP_VEGA10:
+ switch (adev->ip_versions[SDMA0_HWIP][0]) {
+ case IP_VERSION(4, 0, 0):
if (adev->pdev->device == 0x6860)
break;
return;
- case CHIP_VEGA20:
+ case IP_VERSION(4, 2, 0):
if (adev->pdev->device == 0x66a1)
break;
return;
@@ -589,8 +590,8 @@ static void sdma_v4_0_destroy_inst_ctx(struct amdgpu_device *adev)
/* arcturus shares the same FW memory across
all SDMA isntances */
- if (adev->asic_type == CHIP_ARCTURUS ||
- adev->asic_type == CHIP_ALDEBARAN)
+ if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) ||
+ adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 4, 0))
break;
}
@@ -620,17 +621,18 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev)
DRM_DEBUG("\n");
- switch (adev->asic_type) {
- case CHIP_VEGA10:
+ switch (adev->ip_versions[SDMA0_HWIP][0]) {
+ case IP_VERSION(4, 0, 0):
chip_name = "vega10";
break;
- case CHIP_VEGA12:
+ case IP_VERSION(4, 0, 1):
chip_name = "vega12";
break;
- case CHIP_VEGA20:
+ case IP_VERSION(4, 2, 0):
chip_name = "vega20";
break;
- case CHIP_RAVEN:
+ case IP_VERSION(4, 1, 0):
+ case IP_VERSION(4, 1, 1):
if (adev->apu_flags & AMD_APU_IS_RAVEN2)
chip_name = "raven2";
else if (adev->apu_flags & AMD_APU_IS_PICASSO)
@@ -638,16 +640,16 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev)
else
chip_name = "raven";
break;
- case CHIP_ARCTURUS:
+ case IP_VERSION(4, 2, 2):
chip_name = "arcturus";
break;
- case CHIP_RENOIR:
+ case IP_VERSION(4, 1, 2):
if (adev->apu_flags & AMD_APU_IS_RENOIR)
chip_name = "renoir";
else
chip_name = "green_sardine";
break;
- case CHIP_ALDEBARAN:
+ case IP_VERSION(4, 4, 0):
chip_name = "aldebaran";
break;
default:
@@ -665,8 +667,8 @@ static int sdma_v4_0_init_microcode(struct amdgpu_device *adev)
goto out;
for (i = 1; i < adev->sdma.num_instances; i++) {
- if (adev->asic_type == CHIP_ARCTURUS ||
- adev->asic_type == CHIP_ALDEBARAN) {
+ if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) ||
+ adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 4, 0)) {
/* Acturus & Aldebaran will leverage the same FW memory
for every SDMA instance */
memcpy((void *)&adev->sdma.instance[i],
@@ -1106,7 +1108,7 @@ static void sdma_v4_0_ctx_switch_enable(struct amdgpu_device *adev, bool enable)
* Arcturus for the moment and firmware version 14
* and above.
*/
- if (adev->asic_type == CHIP_ARCTURUS &&
+ if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) &&
adev->sdma.instance[i].fw_version >= 14)
WREG32_SDMA(i, mmSDMA0_PUB_DUMMY_REG2, enable);
/* Extend page fault timeout to avoid interrupt storm */
@@ -1393,9 +1395,10 @@ static void sdma_v4_0_init_pg(struct amdgpu_device *adev)
if (!(adev->pg_flags & AMD_PG_SUPPORT_SDMA))
return;
- switch (adev->asic_type) {
- case CHIP_RAVEN:
- case CHIP_RENOIR:
+ switch (adev->ip_versions[SDMA0_HWIP][0]) {
+ case IP_VERSION(4, 1, 0):
+ case IP_VERSION(4, 1, 1):
+ case IP_VERSION(4, 1, 2):
sdma_v4_1_init_power_gating(adev);
sdma_v4_1_update_power_gating(adev, true);
break;
@@ -1835,13 +1838,13 @@ static bool sdma_v4_0_fw_support_paging_queue(struct amdgpu_device *adev)
{
uint fw_version = adev->sdma.instance[0].fw_version;
- switch (adev->asic_type) {
- case CHIP_VEGA10:
+ switch (adev->ip_versions[SDMA0_HWIP][0]) {
+ case IP_VERSION(4, 0, 0):
return fw_version >= 430;
- case CHIP_VEGA12:
+ case IP_VERSION(4, 0, 1):
/*return fw_version >= 31;*/
return false;
- case CHIP_VEGA20:
+ case IP_VERSION(4, 2, 0):
return fw_version >= 123;
default:
return false;
@@ -1853,15 +1856,6 @@ static int sdma_v4_0_early_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
- if (adev->flags & AMD_IS_APU)
- adev->sdma.num_instances = 1;
- else if (adev->asic_type == CHIP_ARCTURUS)
- adev->sdma.num_instances = 8;
- else if (adev->asic_type == CHIP_ALDEBARAN)
- adev->sdma.num_instances = 5;
- else
- adev->sdma.num_instances = 2;
-
r = sdma_v4_0_init_microcode(adev);
if (r) {
DRM_ERROR("Failed to load sdma firmware!\n");
@@ -1869,7 +1863,8 @@ static int sdma_v4_0_early_init(void *handle)
}
/* TODO: Page queue breaks driver reload under SRIOV */
- if ((adev->asic_type == CHIP_VEGA10) && amdgpu_sriov_vf((adev)))
+ if ((adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 0, 0)) &&
+ amdgpu_sriov_vf((adev)))
adev->sdma.has_page_queue = false;
else if (sdma_v4_0_fw_support_paging_queue(adev))
adev->sdma.has_page_queue = true;
@@ -2141,14 +2136,14 @@ static int sdma_v4_0_process_trap_irq(struct amdgpu_device *adev,
amdgpu_fence_process(&adev->sdma.instance[instance].ring);
break;
case 1:
- if (adev->asic_type == CHIP_VEGA20)
+ if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 0))
amdgpu_fence_process(&adev->sdma.instance[instance].page);
break;
case 2:
/* XXX compute */
break;
case 3:
- if (adev->asic_type != CHIP_VEGA20)
+ if (adev->ip_versions[SDMA0_HWIP][0] != IP_VERSION(4, 2, 0))
amdgpu_fence_process(&adev->sdma.instance[instance].page);
break;
}
@@ -2364,9 +2359,10 @@ static int sdma_v4_0_set_powergating_state(void *handle,
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- switch (adev->asic_type) {
- case CHIP_RAVEN:
- case CHIP_RENOIR:
+ switch (adev->ip_versions[SDMA0_HWIP][0]) {
+ case IP_VERSION(4, 1, 0):
+ case IP_VERSION(4, 1, 1):
+ case IP_VERSION(4, 1, 2):
sdma_v4_1_update_power_gating(adev,
state == AMD_PG_STATE_GATE);
break;
@@ -2551,7 +2547,7 @@ static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev)
int i;
for (i = 0; i < adev->sdma.num_instances; i++) {
- if (adev->asic_type == CHIP_ARCTURUS && i >= 5)
+ if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) && i >= 5)
adev->sdma.instance[i].ring.funcs =
&sdma_v4_0_ring_funcs_2nd_mmhub;
else
@@ -2559,7 +2555,7 @@ static void sdma_v4_0_set_ring_funcs(struct amdgpu_device *adev)
&sdma_v4_0_ring_funcs;
adev->sdma.instance[i].ring.me = i;
if (adev->sdma.has_page_queue) {
- if (adev->asic_type == CHIP_ARCTURUS && i >= 5)
+ if (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(4, 2, 2) && i >= 5)
adev->sdma.instance[i].page.funcs =
&sdma_v4_0_page_ring_funcs_2nd_mmhub;
else
@@ -2786,12 +2782,12 @@ static const struct amdgpu_sdma_ras_funcs sdma_v4_0_ras_funcs = {
static void sdma_v4_0_set_ras_funcs(struct amdgpu_device *adev)
{
- switch (adev->asic_type) {
- case CHIP_VEGA20:
- case CHIP_ARCTURUS:
+ switch (adev->ip_versions[SDMA0_HWIP][0]) {
+ case IP_VERSION(4, 2, 0):
+ case IP_VERSION(4, 2, 2):
adev->sdma.funcs = &sdma_v4_0_ras_funcs;
break;
- case CHIP_ALDEBARAN:
+ case IP_VERSION(4, 4, 0):
adev->sdma.funcs = &sdma_v4_4_ras_funcs;
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index 50bf3b71bc93..853d1511b889 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -187,8 +187,8 @@ static u32 sdma_v5_0_get_reg_offset(struct amdgpu_device *adev, u32 instance, u3
static void sdma_v5_0_init_golden_registers(struct amdgpu_device *adev)
{
- switch (adev->asic_type) {
- case CHIP_NAVI10:
+ switch (adev->ip_versions[SDMA0_HWIP][0]) {
+ case IP_VERSION(5, 0, 0):
soc15_program_register_sequence(adev,
golden_settings_sdma_5,
(const u32)ARRAY_SIZE(golden_settings_sdma_5));
@@ -196,7 +196,7 @@ static void sdma_v5_0_init_golden_registers(struct amdgpu_device *adev)
golden_settings_sdma_nv10,
(const u32)ARRAY_SIZE(golden_settings_sdma_nv10));
break;
- case CHIP_NAVI14:
+ case IP_VERSION(5, 0, 2):
soc15_program_register_sequence(adev,
golden_settings_sdma_5,
(const u32)ARRAY_SIZE(golden_settings_sdma_5));
@@ -204,7 +204,7 @@ static void sdma_v5_0_init_golden_registers(struct amdgpu_device *adev)
golden_settings_sdma_nv14,
(const u32)ARRAY_SIZE(golden_settings_sdma_nv14));
break;
- case CHIP_NAVI12:
+ case IP_VERSION(5, 0, 5):
if (amdgpu_sriov_vf(adev))
soc15_program_register_sequence(adev,
golden_settings_sdma_5_sriov,
@@ -217,7 +217,7 @@ static void sdma_v5_0_init_golden_registers(struct amdgpu_device *adev)
golden_settings_sdma_nv12,
(const u32)ARRAY_SIZE(golden_settings_sdma_nv12));
break;
- case CHIP_CYAN_SKILLFISH:
+ case IP_VERSION(5, 0, 1):
soc15_program_register_sequence(adev,
golden_settings_sdma_cyan_skillfish,
(const u32)ARRAY_SIZE(golden_settings_sdma_cyan_skillfish));
@@ -248,22 +248,22 @@ static int sdma_v5_0_init_microcode(struct amdgpu_device *adev)
const struct common_firmware_header *header = NULL;
const struct sdma_firmware_header_v1_0 *hdr;
- if (amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_NAVI12))
+ if (amdgpu_sriov_vf(adev) && (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(5, 0, 5)))
return 0;
DRM_DEBUG("\n");
- switch (adev->asic_type) {
- case CHIP_NAVI10:
+ switch (adev->ip_versions[SDMA0_HWIP][0]) {
+ case IP_VERSION(5, 0, 0):
chip_name = "navi10";
break;
- case CHIP_NAVI14:
+ case IP_VERSION(5, 0, 2):
chip_name = "navi14";
break;
- case CHIP_NAVI12:
+ case IP_VERSION(5, 0, 5):
chip_name = "navi12";
break;
- case CHIP_CYAN_SKILLFISH:
+ case IP_VERSION(5, 0, 1):
if (adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2)
chip_name = "cyan_skillfish2";
else
@@ -1295,8 +1295,6 @@ static int sdma_v5_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- adev->sdma.num_instances = 2;
-
sdma_v5_0_set_ring_funcs(adev);
sdma_v5_0_set_buffer_funcs(adev);
sdma_v5_0_set_vm_pte_funcs(adev);
@@ -1636,10 +1634,10 @@ static int sdma_v5_0_set_clockgating_state(void *handle,
if (amdgpu_sriov_vf(adev))
return 0;
- switch (adev->asic_type) {
- case CHIP_NAVI10:
- case CHIP_NAVI14:
- case CHIP_NAVI12:
+ switch (adev->ip_versions[SDMA0_HWIP][0]) {
+ case IP_VERSION(5, 0, 0):
+ case IP_VERSION(5, 0, 2):
+ case IP_VERSION(5, 0, 5):
sdma_v5_0_update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE);
sdma_v5_0_update_medium_grain_light_sleep(adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
index e32efcfb0c8b..4d4d1aa51b8a 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c
@@ -136,23 +136,23 @@ static int sdma_v5_2_init_microcode(struct amdgpu_device *adev)
DRM_DEBUG("\n");
- switch (adev->asic_type) {
- case CHIP_SIENNA_CICHLID:
+ switch (adev->ip_versions[SDMA0_HWIP][0]) {
+ case IP_VERSION(5, 2, 0):
chip_name = "sienna_cichlid";
break;
- case CHIP_NAVY_FLOUNDER:
+ case IP_VERSION(5, 2, 2):
chip_name = "navy_flounder";
break;
- case CHIP_VANGOGH:
+ case IP_VERSION(5, 2, 1):
chip_name = "vangogh";
break;
- case CHIP_DIMGREY_CAVEFISH:
+ case IP_VERSION(5, 2, 4):
chip_name = "dimgrey_cavefish";
break;
- case CHIP_BEIGE_GOBY:
+ case IP_VERSION(5, 2, 5):
chip_name = "beige_goby";
break;
- case CHIP_YELLOW_CARP:
+ case IP_VERSION(5, 2, 3):
chip_name = "yellow_carp";
break;
default:
@@ -174,7 +174,7 @@ static int sdma_v5_2_init_microcode(struct amdgpu_device *adev)
(void *)&adev->sdma.instance[0],
sizeof(struct amdgpu_sdma_instance));
- if (amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_SIENNA_CICHLID))
+ if (amdgpu_sriov_vf(adev) && (adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(5, 2, 0)))
return 0;
DRM_DEBUG("psp_load == '%s'\n",
@@ -375,10 +375,10 @@ static void sdma_v5_2_ring_emit_ib(struct amdgpu_ring *ring,
*/
static void sdma_v5_2_ring_emit_mem_sync(struct amdgpu_ring *ring)
{
- uint32_t gcr_cntl =
- SDMA_GCR_GL2_INV | SDMA_GCR_GL2_WB | SDMA_GCR_GLM_INV |
- SDMA_GCR_GL1_INV | SDMA_GCR_GLV_INV | SDMA_GCR_GLK_INV |
- SDMA_GCR_GLI_INV(1);
+ uint32_t gcr_cntl = SDMA_GCR_GL2_INV | SDMA_GCR_GL2_WB |
+ SDMA_GCR_GLM_INV | SDMA_GCR_GL1_INV |
+ SDMA_GCR_GLV_INV | SDMA_GCR_GLK_INV |
+ SDMA_GCR_GLI_INV(1);
/* flush entire cache L0/L1/L2, this can be optimized by performance requirement */
amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_GCR_REQ));
@@ -1217,23 +1217,6 @@ static int sdma_v5_2_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- switch (adev->asic_type) {
- case CHIP_SIENNA_CICHLID:
- adev->sdma.num_instances = 4;
- break;
- case CHIP_NAVY_FLOUNDER:
- case CHIP_DIMGREY_CAVEFISH:
- adev->sdma.num_instances = 2;
- break;
- case CHIP_VANGOGH:
- case CHIP_BEIGE_GOBY:
- case CHIP_YELLOW_CARP:
- adev->sdma.num_instances = 1;
- break;
- default:
- break;
- }
-
sdma_v5_2_set_ring_funcs(adev);
sdma_v5_2_set_buffer_funcs(adev);
sdma_v5_2_set_vm_pte_funcs(adev);
@@ -1555,7 +1538,7 @@ static void sdma_v5_2_update_medium_grain_clock_gating(struct amdgpu_device *ade
for (i = 0; i < adev->sdma.num_instances; i++) {
- if (adev->sdma.instance[i].fw_version < 70 && adev->asic_type == CHIP_VANGOGH)
+ if (adev->sdma.instance[i].fw_version < 70 && adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(5, 2, 1))
adev->cg_flags &= ~AMD_CG_SUPPORT_SDMA_MGCG;
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) {
@@ -1592,7 +1575,7 @@ static void sdma_v5_2_update_medium_grain_light_sleep(struct amdgpu_device *adev
for (i = 0; i < adev->sdma.num_instances; i++) {
- if (adev->sdma.instance[i].fw_version < 70 && adev->asic_type == CHIP_VANGOGH)
+ if (adev->sdma.instance[i].fw_version < 70 && adev->ip_versions[SDMA0_HWIP][0] == IP_VERSION(5, 2, 1))
adev->cg_flags &= ~AMD_CG_SUPPORT_SDMA_LS;
if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS)) {
@@ -1621,13 +1604,13 @@ static int sdma_v5_2_set_clockgating_state(void *handle,
if (amdgpu_sriov_vf(adev))
return 0;
- switch (adev->asic_type) {
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_VANGOGH:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
- case CHIP_YELLOW_CARP:
+ switch (adev->ip_versions[SDMA0_HWIP][0]) {
+ case IP_VERSION(5, 2, 0):
+ case IP_VERSION(5, 2, 2):
+ case IP_VERSION(5, 2, 1):
+ case IP_VERSION(5, 2, 4):
+ case IP_VERSION(5, 2, 5):
+ case IP_VERSION(5, 2, 3):
sdma_v5_2_update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE);
sdma_v5_2_update_medium_grain_light_sleep(adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/sienna_cichlid_reg_init.c b/drivers/gpu/drm/amd/amdgpu/sienna_cichlid_reg_init.c
deleted file mode 100644
index 5ee69f70c49b..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/sienna_cichlid_reg_init.c
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Copyright 2019 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#include "amdgpu.h"
-#include "nv.h"
-
-#include "soc15_common.h"
-#include "soc15_hw_ip.h"
-#include "sienna_cichlid_ip_offset.h"
-
-int sienna_cichlid_reg_base_init(struct amdgpu_device *adev)
-{
- /* HW has more IP blocks, only initialized the blocke needed by driver */
- uint32_t i;
- for (i = 0 ; i < MAX_INSTANCE ; ++i) {
- adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
- adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i]));
- adev->reg_offset[MMHUB_HWIP][i] = (uint32_t *)(&(MMHUB_BASE.instance[i]));
- adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i]));
- adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i]));
- adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i]));
- adev->reg_offset[MP1_HWIP][i] = (uint32_t *)(&(MP1_BASE.instance[i]));
- adev->reg_offset[VCN_HWIP][i] = (uint32_t *)(&(VCN_BASE.instance[i]));
- adev->reg_offset[DF_HWIP][i] = (uint32_t *)(&(DF_BASE.instance[i]));
- adev->reg_offset[DCE_HWIP][i] = (uint32_t *)(&(DCN_BASE.instance[i]));
- adev->reg_offset[OSSSYS_HWIP][i] = (uint32_t *)(&(OSSSYS_BASE.instance[i]));
- adev->reg_offset[SDMA0_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
- adev->reg_offset[SDMA1_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
- adev->reg_offset[SDMA2_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
- adev->reg_offset[SDMA3_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
- adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i]));
- adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i]));
- }
- return 0;
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 0fc97c364fd7..0c316a2d42ed 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -85,6 +85,8 @@
#define mmMP0_MISC_LIGHT_SLEEP_CTRL 0x01ba
#define mmMP0_MISC_LIGHT_SLEEP_CTRL_BASE_IDX 0
+static const struct amd_ip_funcs soc15_common_ip_funcs;
+
/* Vega, Raven, Arcturus */
static const struct amdgpu_video_codec_info vega_video_codecs_encode_array[] =
{
@@ -154,31 +156,38 @@ static const struct amdgpu_video_codecs rn_video_codecs_decode =
static int soc15_query_video_codecs(struct amdgpu_device *adev, bool encode,
const struct amdgpu_video_codecs **codecs)
{
- switch (adev->asic_type) {
- case CHIP_VEGA20:
- case CHIP_VEGA10:
- case CHIP_VEGA12:
- if (encode)
- *codecs = &vega_video_codecs_encode;
- else
- *codecs = &vega_video_codecs_decode;
- return 0;
- case CHIP_RAVEN:
- if (encode)
- *codecs = &vega_video_codecs_encode;
- else
- *codecs = &rv_video_codecs_decode;
- return 0;
- case CHIP_ARCTURUS:
- case CHIP_ALDEBARAN:
- case CHIP_RENOIR:
- if (encode)
- *codecs = &vega_video_codecs_encode;
- else
- *codecs = &rn_video_codecs_decode;
- return 0;
- default:
- return -EINVAL;
+ if (adev->ip_versions[VCE_HWIP][0]) {
+ switch (adev->ip_versions[VCE_HWIP][0]) {
+ case IP_VERSION(4, 0, 0):
+ case IP_VERSION(4, 1, 0):
+ if (encode)
+ *codecs = &vega_video_codecs_encode;
+ else
+ *codecs = &vega_video_codecs_decode;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+ } else {
+ switch (adev->ip_versions[UVD_HWIP][0]) {
+ case IP_VERSION(1, 0, 0):
+ case IP_VERSION(1, 0, 1):
+ if (encode)
+ *codecs = &vega_video_codecs_encode;
+ else
+ *codecs = &rv_video_codecs_decode;
+ return 0;
+ case IP_VERSION(2, 5, 0):
+ case IP_VERSION(2, 6, 0):
+ case IP_VERSION(2, 2, 0):
+ if (encode)
+ *codecs = &vega_video_codecs_encode;
+ else
+ *codecs = &rn_video_codecs_decode;
+ return 0;
+ default:
+ return -EINVAL;
+ }
}
}
@@ -332,9 +341,11 @@ static u32 soc15_get_xclk(struct amdgpu_device *adev)
{
u32 reference_clock = adev->clock.spll.reference_freq;
- if (adev->asic_type == CHIP_RENOIR)
+ if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 0) ||
+ adev->ip_versions[MP1_HWIP][0] == IP_VERSION(12, 0, 1))
return 10000;
- if (adev->asic_type == CHIP_RAVEN)
+ if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(10, 0, 0) ||
+ adev->ip_versions[MP1_HWIP][0] == IP_VERSION(10, 0, 1))
return reference_clock / 4;
return reference_clock;
@@ -565,28 +576,29 @@ soc15_asic_reset_method(struct amdgpu_device *adev)
dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n",
amdgpu_reset_method);
- switch (adev->asic_type) {
- case CHIP_RAVEN:
- case CHIP_RENOIR:
+ switch (adev->ip_versions[MP1_HWIP][0]) {
+ case IP_VERSION(10, 0, 0):
+ case IP_VERSION(10, 0, 1):
+ case IP_VERSION(12, 0, 0):
+ case IP_VERSION(12, 0, 1):
return AMD_RESET_METHOD_MODE2;
- case CHIP_VEGA10:
- case CHIP_VEGA12:
- case CHIP_ARCTURUS:
- baco_reset = amdgpu_dpm_is_baco_supported(adev);
- break;
- case CHIP_VEGA20:
- if (adev->psp.sos.fw_version >= 0x80067)
+ case IP_VERSION(9, 0, 0):
+ case IP_VERSION(11, 0, 2):
+ if (adev->asic_type == CHIP_VEGA20) {
+ if (adev->psp.sos.fw_version >= 0x80067)
+ baco_reset = amdgpu_dpm_is_baco_supported(adev);
+ /*
+ * 1. PMFW version > 0x284300: all cases use baco
+ * 2. PMFW version <= 0x284300: only sGPU w/o RAS use baco
+ */
+ if (ras && adev->ras_enabled &&
+ adev->pm.fw_version <= 0x283400)
+ baco_reset = false;
+ } else {
baco_reset = amdgpu_dpm_is_baco_supported(adev);
-
- /*
- * 1. PMFW version > 0x284300: all cases use baco
- * 2. PMFW version <= 0x284300: only sGPU w/o RAS use baco
- */
- if (ras && adev->ras_enabled &&
- adev->pm.fw_version <= 0x283400)
- baco_reset = false;
+ }
break;
- case CHIP_ALDEBARAN:
+ case IP_VERSION(13, 0, 2):
/*
* 1.connected to cpu: driver issue mode2 reset
* 2.discret gpu: driver issue mode1 reset
@@ -629,15 +641,17 @@ static int soc15_asic_reset(struct amdgpu_device *adev)
static bool soc15_supports_baco(struct amdgpu_device *adev)
{
- switch (adev->asic_type) {
- case CHIP_VEGA10:
- case CHIP_VEGA12:
- case CHIP_ARCTURUS:
- return amdgpu_dpm_is_baco_supported(adev);
- case CHIP_VEGA20:
- if (adev->psp.sos.fw_version >= 0x80067)
+ switch (adev->ip_versions[MP1_HWIP][0]) {
+ case IP_VERSION(9, 0, 0):
+ case IP_VERSION(11, 0, 2):
+ if (adev->asic_type == CHIP_VEGA20) {
+ if (adev->psp.sos.fw_version >= 0x80067)
+ return amdgpu_dpm_is_baco_supported(adev);
+ return false;
+ } else {
return amdgpu_dpm_is_baco_supported(adev);
- return false;
+ }
+ break;
default:
return false;
}
@@ -704,7 +718,7 @@ static void soc15_enable_doorbell_aperture(struct amdgpu_device *adev,
adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable);
}
-static const struct amdgpu_ip_block_version vega10_common_ip_block =
+const struct amdgpu_ip_block_version vega10_common_ip_block =
{
.type = AMD_IP_BLOCK_TYPE_COMMON,
.major = 2,
@@ -766,185 +780,6 @@ void soc15_set_virt_ops(struct amdgpu_device *adev)
soc15_reg_base_init(adev);
}
-int soc15_set_ip_blocks(struct amdgpu_device *adev)
-{
- /* for bare metal case */
- if (!amdgpu_sriov_vf(adev))
- soc15_reg_base_init(adev);
-
- if (adev->flags & AMD_IS_APU) {
- adev->nbio.funcs = &nbio_v7_0_funcs;
- adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg;
- } else if (adev->asic_type == CHIP_VEGA20 ||
- adev->asic_type == CHIP_ARCTURUS ||
- adev->asic_type == CHIP_ALDEBARAN) {
- adev->nbio.funcs = &nbio_v7_4_funcs;
- adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg;
- } else {
- adev->nbio.funcs = &nbio_v6_1_funcs;
- adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg;
- }
- adev->hdp.funcs = &hdp_v4_0_funcs;
-
- if (adev->asic_type == CHIP_VEGA20 ||
- adev->asic_type == CHIP_ARCTURUS ||
- adev->asic_type == CHIP_ALDEBARAN)
- adev->df.funcs = &df_v3_6_funcs;
- else
- adev->df.funcs = &df_v1_7_funcs;
-
- if (adev->asic_type == CHIP_VEGA20 ||
- adev->asic_type == CHIP_ARCTURUS)
- adev->smuio.funcs = &smuio_v11_0_funcs;
- else if (adev->asic_type == CHIP_ALDEBARAN)
- adev->smuio.funcs = &smuio_v13_0_funcs;
- else
- adev->smuio.funcs = &smuio_v9_0_funcs;
-
- adev->rev_id = soc15_get_rev_id(adev);
-
- switch (adev->asic_type) {
- case CHIP_VEGA10:
- case CHIP_VEGA12:
- case CHIP_VEGA20:
- amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
- amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
-
- /* For Vega10 SR-IOV, PSP need to be initialized before IH */
- if (amdgpu_sriov_vf(adev)) {
- if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
- if (adev->asic_type == CHIP_VEGA20)
- amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
- else
- amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
- }
- if (adev->asic_type == CHIP_VEGA20)
- amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
- else
- amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
- } else {
- if (adev->asic_type == CHIP_VEGA20)
- amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
- else
- amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
- if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
- if (adev->asic_type == CHIP_VEGA20)
- amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
- else
- amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
- }
- }
- amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
- amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
- if (is_support_sw_smu(adev)) {
- if (!amdgpu_sriov_vf(adev))
- amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
- } else {
- amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
- }
- if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
- amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
-#if defined(CONFIG_DRM_AMD_DC)
- else if (amdgpu_device_has_dc_support(adev))
- amdgpu_device_ip_block_add(adev, &dm_ip_block);
-#endif
- if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev))) {
- amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
- amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
- }
- break;
- case CHIP_RAVEN:
- amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
- amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
- amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
- if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
- amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block);
- amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
- amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
- amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
- if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
- amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
-#if defined(CONFIG_DRM_AMD_DC)
- else if (amdgpu_device_has_dc_support(adev))
- amdgpu_device_ip_block_add(adev, &dm_ip_block);
-#endif
- amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block);
- break;
- case CHIP_ARCTURUS:
- amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
- amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
-
- if (amdgpu_sriov_vf(adev)) {
- if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
- amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
- amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
- } else {
- amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
- if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
- amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
- }
-
- if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
- amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
- amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
- amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
- amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
-
- if (amdgpu_sriov_vf(adev)) {
- if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
- amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
- } else {
- amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
- }
- if (!amdgpu_sriov_vf(adev))
- amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block);
- break;
- case CHIP_RENOIR:
- amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
- amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
- amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
- if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
- amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block);
- amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block);
- amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
- amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
- if (adev->enable_virtual_display || amdgpu_sriov_vf(adev))
- amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
-#if defined(CONFIG_DRM_AMD_DC)
- else if (amdgpu_device_has_dc_support(adev))
- amdgpu_device_ip_block_add(adev, &dm_ip_block);
-#endif
- amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
- amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
- break;
- case CHIP_ALDEBARAN:
- amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
- amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
-
- if (amdgpu_sriov_vf(adev)) {
- if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
- amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block);
- amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
- } else {
- amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
- if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP))
- amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block);
- }
-
- amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
- amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
-
- amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block);
- amdgpu_device_ip_block_add(adev, &vcn_v2_6_ip_block);
- amdgpu_device_ip_block_add(adev, &jpeg_v2_6_ip_block);
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
static bool soc15_need_full_reset(struct amdgpu_device *adev)
{
/* change this when we implement soft reset */
@@ -1153,10 +988,13 @@ static int soc15_common_early_init(void *handle)
adev->se_cac_rreg = &soc15_se_cac_rreg;
adev->se_cac_wreg = &soc15_se_cac_wreg;
-
+ adev->rev_id = soc15_get_rev_id(adev);
adev->external_rev_id = 0xFF;
- switch (adev->asic_type) {
- case CHIP_VEGA10:
+ /* TODO: split the GC and PG flags based on the relevant IP version for which
+ * they are relevant.
+ */
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(9, 0, 1):
adev->asic_funcs = &soc15_asic_funcs;
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
@@ -1180,7 +1018,7 @@ static int soc15_common_early_init(void *handle)
adev->pg_flags = 0;
adev->external_rev_id = 0x1;
break;
- case CHIP_VEGA12:
+ case IP_VERSION(9, 2, 1):
adev->asic_funcs = &soc15_asic_funcs;
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
@@ -1203,7 +1041,7 @@ static int soc15_common_early_init(void *handle)
adev->pg_flags = 0;
adev->external_rev_id = adev->rev_id + 0x14;
break;
- case CHIP_VEGA20:
+ case IP_VERSION(9, 4, 0):
adev->asic_funcs = &vega20_asic_funcs;
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
@@ -1226,7 +1064,8 @@ static int soc15_common_early_init(void *handle)
adev->pg_flags = 0;
adev->external_rev_id = adev->rev_id + 0x28;
break;
- case CHIP_RAVEN:
+ case IP_VERSION(9, 1, 0):
+ case IP_VERSION(9, 2, 2):
adev->asic_funcs = &soc15_asic_funcs;
if (adev->rev_id >= 0x8)
@@ -1299,7 +1138,7 @@ static int soc15_common_early_init(void *handle)
adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN;
}
break;
- case CHIP_ARCTURUS:
+ case IP_VERSION(9, 4, 1):
adev->asic_funcs = &vega20_asic_funcs;
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
@@ -1318,7 +1157,7 @@ static int soc15_common_early_init(void *handle)
adev->pg_flags = AMD_PG_SUPPORT_VCN | AMD_PG_SUPPORT_VCN_DPG;
adev->external_rev_id = adev->rev_id + 0x32;
break;
- case CHIP_RENOIR:
+ case IP_VERSION(9, 3, 0):
adev->asic_funcs = &soc15_asic_funcs;
if (adev->apu_flags & AMD_APU_IS_RENOIR)
@@ -1349,7 +1188,7 @@ static int soc15_common_early_init(void *handle)
AMD_PG_SUPPORT_JPEG |
AMD_PG_SUPPORT_VCN_DPG;
break;
- case CHIP_ALDEBARAN:
+ case IP_VERSION(9, 4, 2):
adev->asic_funcs = &vega20_asic_funcs;
adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG |
AMD_CG_SUPPORT_GFX_MGLS |
@@ -1564,10 +1403,10 @@ static int soc15_common_set_clockgating_state(void *handle,
if (amdgpu_sriov_vf(adev))
return 0;
- switch (adev->asic_type) {
- case CHIP_VEGA10:
- case CHIP_VEGA12:
- case CHIP_VEGA20:
+ switch (adev->ip_versions[NBIO_HWIP][0]) {
+ case IP_VERSION(6, 1, 0):
+ case IP_VERSION(6, 2, 0):
+ case IP_VERSION(7, 4, 0):
adev->nbio.funcs->update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE);
adev->nbio.funcs->update_medium_grain_light_sleep(adev,
@@ -1583,8 +1422,9 @@ static int soc15_common_set_clockgating_state(void *handle,
adev->df.funcs->update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE);
break;
- case CHIP_RAVEN:
- case CHIP_RENOIR:
+ case IP_VERSION(7, 0, 0):
+ case IP_VERSION(7, 0, 1):
+ case IP_VERSION(2, 5, 0):
adev->nbio.funcs->update_medium_grain_clock_gating(adev,
state == AMD_CG_STATE_GATE);
adev->nbio.funcs->update_medium_grain_light_sleep(adev,
@@ -1596,8 +1436,8 @@ static int soc15_common_set_clockgating_state(void *handle,
soc15_update_drm_light_sleep(adev,
state == AMD_CG_STATE_GATE);
break;
- case CHIP_ARCTURUS:
- case CHIP_ALDEBARAN:
+ case IP_VERSION(7, 4, 1):
+ case IP_VERSION(7, 4, 4):
adev->hdp.funcs->update_clock_gating(adev,
state == AMD_CG_STATE_GATE);
break;
@@ -1619,7 +1459,7 @@ static void soc15_common_get_clockgating_state(void *handle, u32 *flags)
adev->hdp.funcs->get_clock_gating_state(adev, flags);
- if (adev->asic_type != CHIP_ALDEBARAN) {
+ if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(13, 0, 2)) {
/* AMD_CG_SUPPORT_DRM_MGCG */
data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0));
@@ -1645,7 +1485,7 @@ static int soc15_common_set_powergating_state(void *handle,
return 0;
}
-const struct amd_ip_funcs soc15_common_ip_funcs = {
+static const struct amd_ip_funcs soc15_common_ip_funcs = {
.name = "soc15_common",
.early_init = soc15_common_early_init,
.late_init = soc15_common_late_init,
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h
index 034cfdfc4dbe..efc2a253e8db 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.h
@@ -28,11 +28,11 @@
#include "nbio_v7_0.h"
#include "nbio_v7_4.h"
+extern const struct amdgpu_ip_block_version vega10_common_ip_block;
+
#define SOC15_FLUSH_GPU_TLB_NUM_WREG 6
#define SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT 3
-extern const struct amd_ip_funcs soc15_common_ip_funcs;
-
struct soc15_reg_golden {
u32 hwip;
u32 instance;
@@ -102,7 +102,6 @@ struct soc15_ras_field_entry {
void soc15_grbm_select(struct amdgpu_device *adev,
u32 me, u32 pipe, u32 queue, u32 vmid);
void soc15_set_virt_ops(struct amdgpu_device *adev);
-int soc15_set_ip_blocks(struct amdgpu_device *adev);
void soc15_program_register_sequence(struct amdgpu_device *adev,
const struct soc15_reg_golden *registers,
diff --git a/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h b/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h
index 0f214a398dd8..5093826a43d1 100644
--- a/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h
@@ -38,9 +38,8 @@ enum ras_command {
TA_RAS_COMMAND__TRIGGER_ERROR,
};
-enum ta_ras_status
-{
- TA_RAS_STATUS__SUCCESS = 0x00,
+enum ta_ras_status {
+ TA_RAS_STATUS__SUCCESS = 0x0000,
TA_RAS_STATUS__RESET_NEEDED = 0xA001,
TA_RAS_STATUS__ERROR_INVALID_PARAMETER = 0xA002,
TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE = 0xA003,
@@ -55,7 +54,17 @@ enum ta_ras_status
TA_RAS_STATUS__ERROR_GET_DEV_INFO = 0xA00C,
TA_RAS_STATUS__ERROR_UNSUPPORTED_DEV = 0xA00D,
TA_RAS_STATUS__ERROR_NOT_INITIALIZED = 0xA00E,
- TA_RAS_STATUS__ERROR_TEE_INTERNAL = 0xA00F
+ TA_RAS_STATUS__ERROR_TEE_INTERNAL = 0xA00F,
+ TA_RAS_STATUS__ERROR_UNSUPPORTED_FUNCTION = 0xA010,
+ TA_RAS_STATUS__ERROR_SYS_DRV_REG_ACCESS = 0xA011,
+ TA_RAS_STATUS__ERROR_RAS_READ_WRITE = 0xA012,
+ TA_RAS_STATUS__ERROR_NULL_PTR = 0xA013,
+ TA_RAS_STATUS__ERROR_UNSUPPORTED_IP = 0xA014,
+ TA_RAS_STATUS__ERROR_PCS_STATE_QUIET = 0xA015,
+ TA_RAS_STATUS__ERROR_PCS_STATE_ERROR = 0xA016,
+ TA_RAS_STATUS__ERROR_PCS_STATE_HANG = 0xA017,
+ TA_RAS_STATUS__ERROR_PCS_STATE_UNKNOWN = 0xA018,
+ TA_RAS_STATUS__ERROR_UNSUPPORTED_ERROR_INJ = 0xA019
};
enum ta_ras_block {
@@ -73,9 +82,18 @@ enum ta_ras_block {
TA_RAS_BLOCK__MP0,
TA_RAS_BLOCK__MP1,
TA_RAS_BLOCK__FUSE,
+ TA_RAS_BLOCK__MCA,
TA_NUM_BLOCK_MAX
};
+enum ta_ras_mca_block {
+ TA_RAS_MCA_BLOCK__MP0 = 0,
+ TA_RAS_MCA_BLOCK__MP1 = 1,
+ TA_RAS_MCA_BLOCK__MPIO = 2,
+ TA_RAS_MCA_BLOCK__IOHC = 3,
+ TA_MCA_NUM_BLOCK_MAX
+};
+
enum ta_ras_error_type {
TA_RAS_ERROR__NONE = 0,
TA_RAS_ERROR__PARITY = 1,
@@ -105,17 +123,15 @@ struct ta_ras_trigger_error_input {
uint64_t value; // method if error injection. i.e persistent, coherent etc.
};
-struct ta_ras_init_flags
-{
- uint8_t poison_mode_en;
- uint8_t dgpu_mode;
+struct ta_ras_init_flags {
+ uint8_t poison_mode_en;
+ uint8_t dgpu_mode;
};
-struct ta_ras_output_flags
-{
- uint8_t ras_init_success_flag;
- uint8_t err_inject_switch_disable_flag;
- uint8_t reg_access_failure_flag;
+struct ta_ras_output_flags {
+ uint8_t ras_init_success_flag;
+ uint8_t err_inject_switch_disable_flag;
+ uint8_t reg_access_failure_flag;
};
/* Common input structure for RAS callbacks */
@@ -126,14 +142,13 @@ union ta_ras_cmd_input {
struct ta_ras_disable_features_input disable_features;
struct ta_ras_trigger_error_input trigger_error;
- uint32_t reserve_pad[256];
+ uint32_t reserve_pad[256];
};
-union ta_ras_cmd_output
-{
- struct ta_ras_output_flags flags;
+union ta_ras_cmd_output {
+ struct ta_ras_output_flags flags;
- uint32_t reserve_pad[256];
+ uint32_t reserve_pad[256];
};
/* Shared Memory structures */
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c
index bb30336b1e8d..f7ec3fe134e5 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c
@@ -288,9 +288,43 @@ static void umc_v6_7_query_ras_error_address(struct amdgpu_device *adev,
}
}
+static uint32_t umc_v6_7_query_ras_poison_mode_per_channel(
+ struct amdgpu_device *adev,
+ uint32_t umc_reg_offset)
+{
+ uint32_t ecc_ctrl_addr, ecc_ctrl;
+
+ ecc_ctrl_addr =
+ SOC15_REG_OFFSET(UMC, 0, regUMCCH0_0_EccCtrl);
+ ecc_ctrl = RREG32_PCIE((ecc_ctrl_addr +
+ umc_reg_offset) * 4);
+
+ return REG_GET_FIELD(ecc_ctrl, UMCCH0_0_EccCtrl, UCFatalEn);
+}
+
+static bool umc_v6_7_query_ras_poison_mode(struct amdgpu_device *adev)
+{
+ uint32_t umc_inst = 0;
+ uint32_t ch_inst = 0;
+ uint32_t umc_reg_offset = 0;
+
+ LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
+ umc_reg_offset = get_umc_v6_7_reg_offset(adev,
+ umc_inst,
+ ch_inst);
+ /* Enabling fatal error in one channel will be considered
+ as fatal error mode */
+ if (umc_v6_7_query_ras_poison_mode_per_channel(adev, umc_reg_offset))
+ return false;
+ }
+
+ return true;
+}
+
const struct amdgpu_umc_ras_funcs umc_v6_7_ras_funcs = {
.ras_late_init = amdgpu_umc_ras_late_init,
.ras_fini = amdgpu_umc_ras_fini,
.query_ras_error_count = umc_v6_7_query_ras_error_count,
.query_ras_error_address = umc_v6_7_query_ras_error_address,
+ .query_ras_poison_mode = umc_v6_7_query_ras_poison_mode,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
index 7232241e3bfb..0fef925b6602 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v3_1.c
@@ -698,6 +698,19 @@ static int uvd_v3_1_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ cancel_delayed_work_sync(&adev->uvd.idle_work);
+
+ if (RREG32(mmUVD_STATUS) != 0)
+ uvd_v3_1_stop(adev);
+
+ return 0;
+}
+
+static int uvd_v3_1_suspend(void *handle)
+{
+ int r;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
/*
* Proper cleanups before halting the HW engine:
* - cancel the delayed idle work
@@ -722,17 +735,6 @@ static int uvd_v3_1_hw_fini(void *handle)
AMD_CG_STATE_GATE);
}
- if (RREG32(mmUVD_STATUS) != 0)
- uvd_v3_1_stop(adev);
-
- return 0;
-}
-
-static int uvd_v3_1_suspend(void *handle)
-{
- int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
r = uvd_v3_1_hw_fini(adev);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index 52d6de969f46..c108b8381795 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -212,6 +212,19 @@ static int uvd_v4_2_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ cancel_delayed_work_sync(&adev->uvd.idle_work);
+
+ if (RREG32(mmUVD_STATUS) != 0)
+ uvd_v4_2_stop(adev);
+
+ return 0;
+}
+
+static int uvd_v4_2_suspend(void *handle)
+{
+ int r;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
/*
* Proper cleanups before halting the HW engine:
* - cancel the delayed idle work
@@ -236,17 +249,6 @@ static int uvd_v4_2_hw_fini(void *handle)
AMD_CG_STATE_GATE);
}
- if (RREG32(mmUVD_STATUS) != 0)
- uvd_v4_2_stop(adev);
-
- return 0;
-}
-
-static int uvd_v4_2_suspend(void *handle)
-{
- int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
r = uvd_v4_2_hw_fini(adev);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index db6d06758e4d..563493d1f830 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -210,6 +210,19 @@ static int uvd_v5_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ cancel_delayed_work_sync(&adev->uvd.idle_work);
+
+ if (RREG32(mmUVD_STATUS) != 0)
+ uvd_v5_0_stop(adev);
+
+ return 0;
+}
+
+static int uvd_v5_0_suspend(void *handle)
+{
+ int r;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
/*
* Proper cleanups before halting the HW engine:
* - cancel the delayed idle work
@@ -234,17 +247,6 @@ static int uvd_v5_0_hw_fini(void *handle)
AMD_CG_STATE_GATE);
}
- if (RREG32(mmUVD_STATUS) != 0)
- uvd_v5_0_stop(adev);
-
- return 0;
-}
-
-static int uvd_v5_0_suspend(void *handle)
-{
- int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
r = uvd_v5_0_hw_fini(adev);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index bc571833632e..d5d023a24269 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -332,15 +332,9 @@ err:
static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct dma_fence *fence = NULL;
- struct amdgpu_bo *bo = NULL;
+ struct amdgpu_bo *bo = ring->adev->uvd.ib_bo;
long r;
- r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- &bo, NULL, NULL);
- if (r)
- return r;
-
r = uvd_v6_0_enc_get_create_msg(ring, 1, bo, NULL);
if (r)
goto error;
@@ -357,9 +351,6 @@ static int uvd_v6_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
error:
dma_fence_put(fence);
- amdgpu_bo_unpin(bo);
- amdgpu_bo_unreserve(bo);
- amdgpu_bo_unref(&bo);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index b6e82d75561f..b483f03b4591 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -338,15 +338,9 @@ err:
static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
struct dma_fence *fence = NULL;
- struct amdgpu_bo *bo = NULL;
+ struct amdgpu_bo *bo = ring->adev->uvd.ib_bo;
long r;
- r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
- AMDGPU_GEM_DOMAIN_VRAM,
- &bo, NULL, NULL);
- if (r)
- return r;
-
r = uvd_v7_0_enc_get_create_msg(ring, 1, bo, NULL);
if (r)
goto error;
@@ -363,9 +357,6 @@ static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
error:
dma_fence_put(fence);
- amdgpu_bo_unpin(bo);
- amdgpu_bo_unreserve(bo);
- amdgpu_bo_unref(&bo);
return r;
}
@@ -606,6 +597,23 @@ static int uvd_v7_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ cancel_delayed_work_sync(&adev->uvd.idle_work);
+
+ if (!amdgpu_sriov_vf(adev))
+ uvd_v7_0_stop(adev);
+ else {
+ /* full access mode, so don't touch any UVD register */
+ DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
+ }
+
+ return 0;
+}
+
+static int uvd_v7_0_suspend(void *handle)
+{
+ int r;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
/*
* Proper cleanups before halting the HW engine:
* - cancel the delayed idle work
@@ -630,21 +638,6 @@ static int uvd_v7_0_hw_fini(void *handle)
AMD_CG_STATE_GATE);
}
- if (!amdgpu_sriov_vf(adev))
- uvd_v7_0_stop(adev);
- else {
- /* full access mode, so don't touch any UVD register */
- DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
- }
-
- return 0;
-}
-
-static int uvd_v7_0_suspend(void *handle)
-{
- int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
r = uvd_v7_0_hw_fini(adev);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/vangogh_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vangogh_reg_init.c
deleted file mode 100644
index d64d681a05dc..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/vangogh_reg_init.c
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright 2019 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#include "amdgpu.h"
-#include "nv.h"
-
-#include "soc15_common.h"
-#include "soc15_hw_ip.h"
-#include "vangogh_ip_offset.h"
-
-void vangogh_reg_base_init(struct amdgpu_device *adev)
-{
- /* HW has more IP blocks, only initialized the blocke needed by driver */
- uint32_t i;
- for (i = 0 ; i < MAX_INSTANCE ; ++i) {
- adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
- adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i]));
- adev->reg_offset[MMHUB_HWIP][i] = (uint32_t *)(&(MMHUB_BASE.instance[i]));
- adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i]));
- adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i]));
- adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i]));
- adev->reg_offset[MP1_HWIP][i] = (uint32_t *)(&(MP1_BASE.instance[i]));
- adev->reg_offset[VCN_HWIP][i] = (uint32_t *)(&(VCN_BASE.instance[i]));
- adev->reg_offset[DF_HWIP][i] = (uint32_t *)(&(DF_BASE.instance[i]));
- adev->reg_offset[DCE_HWIP][i] = (uint32_t *)(&(DCN_BASE.instance[i]));
- adev->reg_offset[OSSSYS_HWIP][i] = (uint32_t *)(&(OSSSYS_BASE.instance[i]));
- adev->reg_offset[SDMA0_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
- adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i]));
- adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i]));
- }
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
index b70c17f0c52e..67eb01fef789 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
@@ -431,10 +431,12 @@ static int vce_v2_0_sw_init(void *handle)
return r;
for (i = 0; i < adev->vce.num_rings; i++) {
+ enum amdgpu_ring_priority_level hw_prio = amdgpu_vce_get_ring_prio(i);
+
ring = &adev->vce.ring[i];
sprintf(ring->name, "vce%d", i);
r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0,
- AMDGPU_RING_PRIO_DEFAULT, NULL);
+ hw_prio, NULL);
if (r)
return r;
}
@@ -479,6 +481,17 @@ static int vce_v2_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ cancel_delayed_work_sync(&adev->vce.idle_work);
+
+ return 0;
+}
+
+static int vce_v2_0_suspend(void *handle)
+{
+ int r;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+
/*
* Proper cleanups before halting the HW engine:
* - cancel the delayed idle work
@@ -502,14 +515,6 @@ static int vce_v2_0_hw_fini(void *handle)
AMD_CG_STATE_GATE);
}
- return 0;
-}
-
-static int vce_v2_0_suspend(void *handle)
-{
- int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
r = vce_v2_0_hw_fini(adev);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 9de66893ccd6..142e291983b4 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -440,10 +440,12 @@ static int vce_v3_0_sw_init(void *handle)
return r;
for (i = 0; i < adev->vce.num_rings; i++) {
+ enum amdgpu_ring_priority_level hw_prio = amdgpu_vce_get_ring_prio(i);
+
ring = &adev->vce.ring[i];
sprintf(ring->name, "vce%d", i);
r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0,
- AMDGPU_RING_PRIO_DEFAULT, NULL);
+ hw_prio, NULL);
if (r)
return r;
}
@@ -490,6 +492,21 @@ static int vce_v3_0_hw_fini(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ cancel_delayed_work_sync(&adev->vce.idle_work);
+
+ r = vce_v3_0_wait_for_idle(handle);
+ if (r)
+ return r;
+
+ vce_v3_0_stop(adev);
+ return vce_v3_0_set_clockgating_state(adev, AMD_CG_STATE_GATE);
+}
+
+static int vce_v3_0_suspend(void *handle)
+{
+ int r;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
/*
* Proper cleanups before halting the HW engine:
* - cancel the delayed idle work
@@ -513,19 +530,6 @@ static int vce_v3_0_hw_fini(void *handle)
AMD_CG_STATE_GATE);
}
- r = vce_v3_0_wait_for_idle(handle);
- if (r)
- return r;
-
- vce_v3_0_stop(adev);
- return vce_v3_0_set_clockgating_state(adev, AMD_CG_STATE_GATE);
-}
-
-static int vce_v3_0_suspend(void *handle)
-{
- int r;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
r = vce_v3_0_hw_fini(adev);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
index fec902b800c2..d1fc4e0b8265 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -463,6 +463,8 @@ static int vce_v4_0_sw_init(void *handle)
}
for (i = 0; i < adev->vce.num_rings; i++) {
+ enum amdgpu_ring_priority_level hw_prio = amdgpu_vce_get_ring_prio(i);
+
ring = &adev->vce.ring[i];
sprintf(ring->name, "vce%d", i);
if (amdgpu_sriov_vf(adev)) {
@@ -478,7 +480,7 @@ static int vce_v4_0_sw_init(void *handle)
ring->doorbell_index = adev->doorbell_index.uvd_vce.vce_ring2_3 * 2 + 1;
}
r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0,
- AMDGPU_RING_PRIO_DEFAULT, NULL);
+ hw_prio, NULL);
if (r)
return r;
}
@@ -542,29 +544,8 @@ static int vce_v4_0_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- /*
- * Proper cleanups before halting the HW engine:
- * - cancel the delayed idle work
- * - enable powergating
- * - enable clockgating
- * - disable dpm
- *
- * TODO: to align with the VCN implementation, move the
- * jobs for clockgating/powergating/dpm setting to
- * ->set_powergating_state().
- */
cancel_delayed_work_sync(&adev->vce.idle_work);
- if (adev->pm.dpm_enabled) {
- amdgpu_dpm_enable_vce(adev, false);
- } else {
- amdgpu_asic_set_vce_clocks(adev, 0, 0);
- amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
- AMD_PG_STATE_GATE);
- amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
- AMD_CG_STATE_GATE);
- }
-
if (!amdgpu_sriov_vf(adev)) {
/* vce_v4_0_wait_for_idle(handle); */
vce_v4_0_stop(adev);
@@ -584,7 +565,7 @@ static int vce_v4_0_suspend(void *handle)
if (adev->vce.vcpu_bo == NULL)
return 0;
- if (drm_dev_enter(&adev->ddev, &idx)) {
+ if (drm_dev_enter(adev_to_drm(adev), &idx)) {
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
unsigned size = amdgpu_bo_size(adev->vce.vcpu_bo);
void *ptr = adev->vce.cpu_addr;
@@ -594,6 +575,29 @@ static int vce_v4_0_suspend(void *handle)
drm_dev_exit(idx);
}
+ /*
+ * Proper cleanups before halting the HW engine:
+ * - cancel the delayed idle work
+ * - enable powergating
+ * - enable clockgating
+ * - disable dpm
+ *
+ * TODO: to align with the VCN implementation, move the
+ * jobs for clockgating/powergating/dpm setting to
+ * ->set_powergating_state().
+ */
+ cancel_delayed_work_sync(&adev->vce.idle_work);
+
+ if (adev->pm.dpm_enabled) {
+ amdgpu_dpm_enable_vce(adev, false);
+ } else {
+ amdgpu_asic_set_vce_clocks(adev, 0, 0);
+ amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_PG_STATE_GATE);
+ amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_CG_STATE_GATE);
+ }
+
r = vce_v4_0_hw_fini(adev);
if (r)
return r;
@@ -611,7 +615,7 @@ static int vce_v4_0_resume(void *handle)
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- if (drm_dev_enter(&adev->ddev, &idx)) {
+ if (drm_dev_enter(adev_to_drm(adev), &idx)) {
unsigned size = amdgpu_bo_size(adev->vce.vcpu_bo);
void *ptr = adev->vce.cpu_addr;
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index 121ee9f2b8d1..d54d720b3cf6 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -66,7 +66,6 @@ static int vcn_v1_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- adev->vcn.num_vcn_inst = 1;
adev->vcn.num_enc_rings = 2;
vcn_v1_0_set_dec_ring_funcs(adev);
@@ -112,15 +111,7 @@ static int vcn_v1_0_sw_init(void *handle)
/* Override the work func */
adev->vcn.idle_work.work.func = vcn_v1_0_idle_work_handler;
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- const struct common_firmware_header *hdr;
- hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
- adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].ucode_id = AMDGPU_UCODE_ID_VCN;
- adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
- dev_info(adev->dev, "Will use PSP to load VCN firmware\n");
- }
+ amdgpu_vcn_setup_ucode(adev);
r = amdgpu_vcn_resume(adev);
if (r)
@@ -145,10 +136,12 @@ static int vcn_v1_0_sw_init(void *handle)
SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP);
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
+ enum amdgpu_ring_priority_level hw_prio = amdgpu_vcn_get_enc_ring_prio(i);
+
ring = &adev->vcn.inst->ring_enc[i];
sprintf(ring->name, "vcn_enc%d", i);
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
- AMDGPU_RING_PRIO_DEFAULT, NULL);
+ hw_prio, NULL);
if (r)
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index f4686e918e0d..313fc1b53999 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -22,6 +22,7 @@
*/
#include <linux/firmware.h>
+#include <drm/drm_drv.h>
#include "amdgpu.h"
#include "amdgpu_vcn.h"
@@ -68,7 +69,6 @@ static int vcn_v2_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- adev->vcn.num_vcn_inst = 1;
if (amdgpu_sriov_vf(adev))
adev->vcn.num_enc_rings = 1;
else
@@ -115,15 +115,7 @@ static int vcn_v2_0_sw_init(void *handle)
if (r)
return r;
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- const struct common_firmware_header *hdr;
- hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
- adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].ucode_id = AMDGPU_UCODE_ID_VCN;
- adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
- dev_info(adev->dev, "Will use PSP to load VCN firmware\n");
- }
+ amdgpu_vcn_setup_ucode(adev);
r = amdgpu_vcn_resume(adev);
if (r)
@@ -159,6 +151,8 @@ static int vcn_v2_0_sw_init(void *handle)
adev->vcn.inst->external.nop = SOC15_REG_OFFSET(UVD, 0, mmUVD_NO_OP);
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
+ enum amdgpu_ring_priority_level hw_prio = amdgpu_vcn_get_enc_ring_prio(i);
+
ring = &adev->vcn.inst->ring_enc[i];
ring->use_doorbell = true;
if (!amdgpu_sriov_vf(adev))
@@ -167,7 +161,7 @@ static int vcn_v2_0_sw_init(void *handle)
ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + i;
sprintf(ring->name, "vcn_enc%d", i);
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0,
- AMDGPU_RING_PRIO_DEFAULT, NULL);
+ hw_prio, NULL);
if (r)
return r;
}
@@ -192,11 +186,14 @@ static int vcn_v2_0_sw_init(void *handle)
*/
static int vcn_v2_0_sw_fini(void *handle)
{
- int r;
+ int r, idx;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
volatile struct amdgpu_fw_shared *fw_shared = adev->vcn.inst->fw_shared_cpu_addr;
- fw_shared->present_flag_0 = 0;
+ if (drm_dev_enter(adev_to_drm(adev), &idx)) {
+ fw_shared->present_flag_0 = 0;
+ drm_dev_exit(idx);
+ }
amdgpu_virt_free_mm_table(adev);
@@ -1879,15 +1876,14 @@ static int vcn_v2_0_start_sriov(struct amdgpu_device *adev)
/* mc resume*/
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- tmp = AMDGPU_UCODE_ID_VCN;
MMSCH_V2_0_INSERT_DIRECT_WT(
SOC15_REG_OFFSET(UVD, i,
mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
- adev->firmware.ucode[tmp].tmr_mc_addr_lo);
+ adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo);
MMSCH_V2_0_INSERT_DIRECT_WT(
SOC15_REG_OFFSET(UVD, i,
mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
- adev->firmware.ucode[tmp].tmr_mc_addr_hi);
+ adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi);
offset = 0;
} else {
MMSCH_V2_0_INSERT_DIRECT_WT(
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index e0c0c3734432..44fc4c218433 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -22,6 +22,7 @@
*/
#include <linux/firmware.h>
+#include <drm/drm_drv.h>
#include "amdgpu.h"
#include "amdgpu_vcn.h"
@@ -82,7 +83,7 @@ static int vcn_v2_5_early_init(void *handle)
} else {
u32 harvest;
int i;
- adev->vcn.num_vcn_inst = VCN25_MAX_HW_INSTANCES_ARCTURUS;
+
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
harvest = RREG32_SOC15(VCN, i, mmCC_UVD_HARVESTING);
if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
@@ -138,22 +139,7 @@ static int vcn_v2_5_sw_init(void *handle)
if (r)
return r;
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- const struct common_firmware_header *hdr;
- hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
- adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].ucode_id = AMDGPU_UCODE_ID_VCN;
- adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
-
- if (adev->vcn.num_vcn_inst == VCN25_MAX_HW_INSTANCES_ARCTURUS) {
- adev->firmware.ucode[AMDGPU_UCODE_ID_VCN1].ucode_id = AMDGPU_UCODE_ID_VCN1;
- adev->firmware.ucode[AMDGPU_UCODE_ID_VCN1].fw = adev->vcn.fw;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
- }
- dev_info(adev->dev, "Will use PSP to load VCN firmware\n");
- }
+ amdgpu_vcn_setup_ucode(adev);
r = amdgpu_vcn_resume(adev);
if (r)
@@ -194,6 +180,8 @@ static int vcn_v2_5_sw_init(void *handle)
return r;
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
+ enum amdgpu_ring_priority_level hw_prio = amdgpu_vcn_get_enc_ring_prio(i);
+
ring = &adev->vcn.inst[j].ring_enc[i];
ring->use_doorbell = true;
@@ -203,7 +191,7 @@ static int vcn_v2_5_sw_init(void *handle)
sprintf(ring->name, "vcn_enc_%d.%d", j, i);
r = amdgpu_ring_init(adev, ring, 512,
&adev->vcn.inst[j].irq, 0,
- AMDGPU_RING_PRIO_DEFAULT, NULL);
+ hw_prio, NULL);
if (r)
return r;
}
@@ -233,17 +221,21 @@ static int vcn_v2_5_sw_init(void *handle)
*/
static int vcn_v2_5_sw_fini(void *handle)
{
- int i, r;
+ int i, r, idx;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
volatile struct amdgpu_fw_shared *fw_shared;
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- if (adev->vcn.harvest_config & (1 << i))
- continue;
- fw_shared = adev->vcn.inst[i].fw_shared_cpu_addr;
- fw_shared->present_flag_0 = 0;
+ if (drm_dev_enter(adev_to_drm(adev), &idx)) {
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+ fw_shared = adev->vcn.inst[i].fw_shared_cpu_addr;
+ fw_shared->present_flag_0 = 0;
+ }
+ drm_dev_exit(idx);
}
+
if (amdgpu_sriov_vf(adev))
amdgpu_virt_free_mm_table(adev);
@@ -1713,7 +1705,7 @@ static void vcn_v2_5_set_dec_ring_funcs(struct amdgpu_device *adev)
for (i = 0; i < adev->vcn.num_vcn_inst; ++i) {
if (adev->vcn.harvest_config & (1 << i))
continue;
- if (adev->asic_type == CHIP_ARCTURUS)
+ if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(2, 5, 0))
adev->vcn.inst[i].ring_dec.funcs = &vcn_v2_5_dec_ring_vm_funcs;
else /* CHIP_ALDEBARAN */
adev->vcn.inst[i].ring_dec.funcs = &vcn_v2_6_dec_ring_vm_funcs;
@@ -1730,7 +1722,7 @@ static void vcn_v2_5_set_enc_ring_funcs(struct amdgpu_device *adev)
if (adev->vcn.harvest_config & (1 << j))
continue;
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
- if (adev->asic_type == CHIP_ARCTURUS)
+ if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(2, 5, 0))
adev->vcn.inst[j].ring_enc[i].funcs = &vcn_v2_5_enc_ring_vm_funcs;
else /* CHIP_ALDEBARAN */
adev->vcn.inst[j].ring_enc[i].funcs = &vcn_v2_6_enc_ring_vm_funcs;
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
index 3d18aab88b4e..da11ceba0698 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c
@@ -60,11 +60,6 @@ static int amdgpu_ih_clientid_vcns[] = {
SOC15_IH_CLIENTID_VCN1
};
-static int amdgpu_ucode_id_vcns[] = {
- AMDGPU_UCODE_ID_VCN,
- AMDGPU_UCODE_ID_VCN1
-};
-
static int vcn_v3_0_start_sriov(struct amdgpu_device *adev);
static void vcn_v3_0_set_dec_ring_funcs(struct amdgpu_device *adev);
static void vcn_v3_0_set_enc_ring_funcs(struct amdgpu_device *adev);
@@ -87,7 +82,6 @@ static void vcn_v3_0_enc_ring_set_wptr(struct amdgpu_ring *ring);
static int vcn_v3_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- int i;
if (amdgpu_sriov_vf(adev)) {
adev->vcn.num_vcn_inst = VCN_INSTANCES_SIENNA_CICHLID;
@@ -95,24 +89,12 @@ static int vcn_v3_0_early_init(void *handle)
adev->vcn.num_enc_rings = 1;
} else {
- if (adev->asic_type == CHIP_SIENNA_CICHLID) {
- u32 harvest;
-
- adev->vcn.num_vcn_inst = VCN_INSTANCES_SIENNA_CICHLID;
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- harvest = RREG32_SOC15(VCN, i, mmCC_UVD_HARVESTING);
- if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
- adev->vcn.harvest_config |= 1 << i;
- }
-
- if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 |
- AMDGPU_VCN_HARVEST_VCN1))
- /* both instances are harvested, disable the block */
- return -ENOENT;
- } else
- adev->vcn.num_vcn_inst = 1;
+ if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 |
+ AMDGPU_VCN_HARVEST_VCN1))
+ /* both instances are harvested, disable the block */
+ return -ENOENT;
- if (adev->asic_type == CHIP_BEIGE_GOBY)
+ if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(3, 0, 33))
adev->vcn.num_enc_rings = 0;
else
adev->vcn.num_enc_rings = 2;
@@ -143,22 +125,7 @@ static int vcn_v3_0_sw_init(void *handle)
if (r)
return r;
- if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- const struct common_firmware_header *hdr;
- hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
- adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].ucode_id = AMDGPU_UCODE_ID_VCN;
- adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
-
- if (adev->vcn.num_vcn_inst == VCN_INSTANCES_SIENNA_CICHLID) {
- adev->firmware.ucode[AMDGPU_UCODE_ID_VCN1].ucode_id = AMDGPU_UCODE_ID_VCN1;
- adev->firmware.ucode[AMDGPU_UCODE_ID_VCN1].fw = adev->vcn.fw;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
- }
- dev_info(adev->dev, "Will use PSP to load VCN firmware\n");
- }
+ amdgpu_vcn_setup_ucode(adev);
r = amdgpu_vcn_resume(adev);
if (r)
@@ -224,6 +191,8 @@ static int vcn_v3_0_sw_init(void *handle)
return r;
for (j = 0; j < adev->vcn.num_enc_rings; ++j) {
+ enum amdgpu_ring_priority_level hw_prio = amdgpu_vcn_get_enc_ring_prio(j);
+
/* VCN ENC TRAP */
r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i],
j + VCN_2_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst[i].irq);
@@ -239,8 +208,7 @@ static int vcn_v3_0_sw_init(void *handle)
}
sprintf(ring->name, "vcn_enc_%d.%d", i, j);
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0,
- AMDGPU_RING_PRIO_DEFAULT,
- &adev->vcn.inst[i].sched_score);
+ hw_prio, &adev->vcn.inst[i].sched_score);
if (r)
return r;
}
@@ -275,7 +243,7 @@ static int vcn_v3_0_sw_fini(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i, r, idx;
- if (drm_dev_enter(&adev->ddev, &idx)) {
+ if (drm_dev_enter(adev_to_drm(adev), &idx)) {
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
volatile struct amdgpu_fw_shared *fw_shared;
@@ -1271,7 +1239,7 @@ static int vcn_v3_0_start(struct amdgpu_device *adev)
fw_shared->rb.wptr = lower_32_bits(ring->wptr);
fw_shared->multi_queue.decode_queue_mode &= cpu_to_le32(~FW_QUEUE_RING_RESET);
- if (adev->asic_type != CHIP_BEIGE_GOBY) {
+ if (adev->ip_versions[UVD_HWIP][0] != IP_VERSION(3, 0, 33)) {
fw_shared->multi_queue.encode_generalpurpose_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
ring = &adev->vcn.inst[i].ring_enc[0];
WREG32_SOC15(VCN, i, mmUVD_RB_RPTR, lower_32_bits(ring->wptr));
@@ -1305,7 +1273,6 @@ static int vcn_v3_0_start_sriov(struct amdgpu_device *adev)
uint32_t param, resp, expected;
uint32_t offset, cache_size;
uint32_t tmp, timeout;
- uint32_t id;
struct amdgpu_mm_table *table = &adev->virt.mm_table;
uint32_t *table_loc;
@@ -1349,13 +1316,12 @@ static int vcn_v3_0_start_sriov(struct amdgpu_device *adev)
cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- id = amdgpu_ucode_id_vcns[i];
MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
- adev->firmware.ucode[id].tmr_mc_addr_lo);
+ adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo);
MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
- adev->firmware.ucode[id].tmr_mc_addr_hi);
+ adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi);
offset = 0;
MMSCH_V3_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, i,
mmUVD_VCPU_CACHE_OFFSET0),
@@ -1643,7 +1609,7 @@ static int vcn_v3_0_pause_dpg_mode(struct amdgpu_device *adev,
UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK,
~UVD_POWER_STATUS__STALL_DPG_POWER_UP_MASK);
- if (adev->asic_type != CHIP_BEIGE_GOBY) {
+ if (adev->ip_versions[UVD_HWIP][0] != IP_VERSION(3, 0, 33)) {
/* Restore */
fw_shared = adev->vcn.inst[inst_idx].fw_shared_cpu_addr;
fw_shared->multi_queue.encode_generalpurpose_queue_mode |= cpu_to_le32(FW_QUEUE_RING_RESET);
diff --git a/drivers/gpu/drm/amd/amdgpu/yellow_carp_reg_init.c b/drivers/gpu/drm/amd/amdgpu/yellow_carp_reg_init.c
deleted file mode 100644
index 3d89421275ed..000000000000
--- a/drivers/gpu/drm/amd/amdgpu/yellow_carp_reg_init.c
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright 2019 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-#include "amdgpu.h"
-#include "nv.h"
-
-#include "soc15_common.h"
-#include "soc15_hw_ip.h"
-#include "yellow_carp_offset.h"
-
-int yellow_carp_reg_base_init(struct amdgpu_device *adev)
-{
- /* HW has more IP blocks, only initialized the block needed by driver */
- uint32_t i;
- for (i = 0 ; i < MAX_INSTANCE ; ++i) {
- adev->reg_offset[GC_HWIP][i] = (uint32_t *)(&(GC_BASE.instance[i]));
- adev->reg_offset[HDP_HWIP][i] = (uint32_t *)(&(HDP_BASE.instance[i]));
- adev->reg_offset[MMHUB_HWIP][i] = (uint32_t *)(&(MMHUB_BASE.instance[i]));
- adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i]));
- adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i]));
- adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i]));
- adev->reg_offset[MP1_HWIP][i] = (uint32_t *)(&(MP1_BASE.instance[i]));
- adev->reg_offset[VCN_HWIP][i] = (uint32_t *)(&(VCN_BASE.instance[i]));
- adev->reg_offset[DF_HWIP][i] = (uint32_t *)(&(DF_BASE.instance[i]));
- adev->reg_offset[DCE_HWIP][i] = (uint32_t *)(&(DCN_BASE.instance[i]));
- adev->reg_offset[OSSSYS_HWIP][i] = (uint32_t *)(&(OSSSYS_BASE.instance[i]));
- adev->reg_offset[SDMA0_HWIP][i] = (uint32_t *)(&(SDMA0_BASE.instance[i]));
- adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i]));
- adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i]));
- }
- return 0;
-}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 86afd37b098d..24ebd61395d8 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -405,7 +405,7 @@ static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p,
mutex_lock(&p->mutex);
- retval = pqm_update_queue(&p->pqm, args->queue_id, &properties);
+ retval = pqm_update_queue_properties(&p->pqm, args->queue_id, &properties);
mutex_unlock(&p->mutex);
@@ -418,7 +418,7 @@ static int kfd_ioctl_set_cu_mask(struct file *filp, struct kfd_process *p,
int retval;
const int max_num_cus = 1024;
struct kfd_ioctl_set_cu_mask_args *args = data;
- struct queue_properties properties;
+ struct mqd_update_info minfo = {0};
uint32_t __user *cu_mask_ptr = (uint32_t __user *)args->cu_mask_ptr;
size_t cu_mask_size = sizeof(uint32_t) * (args->num_cu_mask / 32);
@@ -428,8 +428,8 @@ static int kfd_ioctl_set_cu_mask(struct file *filp, struct kfd_process *p,
return -EINVAL;
}
- properties.cu_mask_count = args->num_cu_mask;
- if (properties.cu_mask_count == 0) {
+ minfo.cu_mask.count = args->num_cu_mask;
+ if (minfo.cu_mask.count == 0) {
pr_debug("CU mask cannot be 0");
return -EINVAL;
}
@@ -438,32 +438,33 @@ static int kfd_ioctl_set_cu_mask(struct file *filp, struct kfd_process *p,
* limit of max_num_cus bits. We can then just drop any CU mask bits
* past max_num_cus bits and just use the first max_num_cus bits.
*/
- if (properties.cu_mask_count > max_num_cus) {
+ if (minfo.cu_mask.count > max_num_cus) {
pr_debug("CU mask cannot be greater than 1024 bits");
- properties.cu_mask_count = max_num_cus;
+ minfo.cu_mask.count = max_num_cus;
cu_mask_size = sizeof(uint32_t) * (max_num_cus/32);
}
- properties.cu_mask = kzalloc(cu_mask_size, GFP_KERNEL);
- if (!properties.cu_mask)
+ minfo.cu_mask.ptr = kzalloc(cu_mask_size, GFP_KERNEL);
+ if (!minfo.cu_mask.ptr)
return -ENOMEM;
- retval = copy_from_user(properties.cu_mask, cu_mask_ptr, cu_mask_size);
+ retval = copy_from_user(minfo.cu_mask.ptr, cu_mask_ptr, cu_mask_size);
if (retval) {
pr_debug("Could not copy CU mask from userspace");
- kfree(properties.cu_mask);
- return -EFAULT;
+ retval = -EFAULT;
+ goto out;
}
+ minfo.update_flag = UPDATE_FLAG_CU_MASK;
+
mutex_lock(&p->mutex);
- retval = pqm_set_cu_mask(&p->pqm, args->queue_id, &properties);
+ retval = pqm_update_mqd(&p->pqm, args->queue_id, &minfo);
mutex_unlock(&p->mutex);
- if (retval)
- kfree(properties.cu_mask);
-
+out:
+ kfree(minfo.cu_mask.ptr);
return retval;
}
@@ -1011,11 +1012,6 @@ static int kfd_ioctl_create_event(struct file *filp, struct kfd_process *p,
void *mem, *kern_addr;
uint64_t size;
- if (p->signal_page) {
- pr_err("Event page is already set\n");
- return -EINVAL;
- }
-
kfd = kfd_device_by_id(GET_GPU_ID(args->event_page_offset));
if (!kfd) {
pr_err("Getting device by id failed in %s\n", __func__);
@@ -1023,6 +1019,13 @@ static int kfd_ioctl_create_event(struct file *filp, struct kfd_process *p,
}
mutex_lock(&p->mutex);
+
+ if (p->signal_page) {
+ pr_err("Event page is already set\n");
+ err = -EINVAL;
+ goto out_unlock;
+ }
+
pdd = kfd_bind_process_to_device(kfd, p);
if (IS_ERR(pdd)) {
err = PTR_ERR(pdd);
@@ -1037,20 +1040,24 @@ static int kfd_ioctl_create_event(struct file *filp, struct kfd_process *p,
err = -EINVAL;
goto out_unlock;
}
- mutex_unlock(&p->mutex);
err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(kfd->kgd,
mem, &kern_addr, &size);
if (err) {
pr_err("Failed to map event page to kernel\n");
- return err;
+ goto out_unlock;
}
err = kfd_event_page_set(p, kern_addr, size);
if (err) {
pr_err("Failed to set event page\n");
- return err;
+ amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(kfd->kgd, mem);
+ goto out_unlock;
}
+
+ p->signal_handle = args->event_page_offset;
+
+ mutex_unlock(&p->mutex);
}
err = kfd_event_create(filp, p, args->event_type,
@@ -1259,6 +1266,23 @@ static int kfd_ioctl_alloc_memory_of_gpu(struct file *filep,
if (args->size == 0)
return -EINVAL;
+#if IS_ENABLED(CONFIG_HSA_AMD_SVM)
+ /* Flush pending deferred work to avoid racing with deferred actions
+ * from previous memory map changes (e.g. munmap).
+ */
+ svm_range_list_lock_and_flush_work(&p->svms, current->mm);
+ mutex_lock(&p->svms.lock);
+ mmap_write_unlock(current->mm);
+ if (interval_tree_iter_first(&p->svms.objects,
+ args->va_addr >> PAGE_SHIFT,
+ (args->va_addr + args->size - 1) >> PAGE_SHIFT)) {
+ pr_err("Address: 0x%llx already allocated by SVM\n",
+ args->va_addr);
+ mutex_unlock(&p->svms.lock);
+ return -EADDRINUSE;
+ }
+ mutex_unlock(&p->svms.lock);
+#endif
dev = kfd_device_by_id(args->gpu_id);
if (!dev)
return -EINVAL;
@@ -1351,6 +1375,15 @@ static int kfd_ioctl_free_memory_of_gpu(struct file *filep,
return -EINVAL;
mutex_lock(&p->mutex);
+ /*
+ * Safeguard to prevent user space from freeing signal BO.
+ * It will be freed at process termination.
+ */
+ if (p->signal_handle && (p->signal_handle == args->handle)) {
+ pr_err("Free signal BO is not allowed\n");
+ ret = -EPERM;
+ goto err_unlock;
+ }
pdd = kfd_get_process_device_data(dev, p);
if (!pdd) {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 4a416231b24c..0fffaf859c59 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -32,6 +32,7 @@
#include "amdgpu_amdkfd.h"
#include "kfd_smi_events.h"
#include "kfd_migrate.h"
+#include "amdgpu.h"
#define MQD_SIZE_ALIGNED 768
@@ -52,41 +53,6 @@ extern const struct kfd2kgd_calls aldebaran_kfd2kgd;
extern const struct kfd2kgd_calls gfx_v10_kfd2kgd;
extern const struct kfd2kgd_calls gfx_v10_3_kfd2kgd;
-static const struct kfd2kgd_calls *kfd2kgd_funcs[] = {
-#ifdef KFD_SUPPORT_IOMMU_V2
-#ifdef CONFIG_DRM_AMDGPU_CIK
- [CHIP_KAVERI] = &gfx_v7_kfd2kgd,
-#endif
- [CHIP_CARRIZO] = &gfx_v8_kfd2kgd,
- [CHIP_RAVEN] = &gfx_v9_kfd2kgd,
-#endif
-#ifdef CONFIG_DRM_AMDGPU_CIK
- [CHIP_HAWAII] = &gfx_v7_kfd2kgd,
-#endif
- [CHIP_TONGA] = &gfx_v8_kfd2kgd,
- [CHIP_FIJI] = &gfx_v8_kfd2kgd,
- [CHIP_POLARIS10] = &gfx_v8_kfd2kgd,
- [CHIP_POLARIS11] = &gfx_v8_kfd2kgd,
- [CHIP_POLARIS12] = &gfx_v8_kfd2kgd,
- [CHIP_VEGAM] = &gfx_v8_kfd2kgd,
- [CHIP_VEGA10] = &gfx_v9_kfd2kgd,
- [CHIP_VEGA12] = &gfx_v9_kfd2kgd,
- [CHIP_VEGA20] = &gfx_v9_kfd2kgd,
- [CHIP_RENOIR] = &gfx_v9_kfd2kgd,
- [CHIP_ARCTURUS] = &arcturus_kfd2kgd,
- [CHIP_ALDEBARAN] = &aldebaran_kfd2kgd,
- [CHIP_NAVI10] = &gfx_v10_kfd2kgd,
- [CHIP_NAVI12] = &gfx_v10_kfd2kgd,
- [CHIP_NAVI14] = &gfx_v10_kfd2kgd,
- [CHIP_SIENNA_CICHLID] = &gfx_v10_3_kfd2kgd,
- [CHIP_NAVY_FLOUNDER] = &gfx_v10_3_kfd2kgd,
- [CHIP_VANGOGH] = &gfx_v10_3_kfd2kgd,
- [CHIP_DIMGREY_CAVEFISH] = &gfx_v10_3_kfd2kgd,
- [CHIP_BEIGE_GOBY] = &gfx_v10_3_kfd2kgd,
- [CHIP_YELLOW_CARP] = &gfx_v10_3_kfd2kgd,
- [CHIP_CYAN_SKILLFISH] = &gfx_v10_kfd2kgd,
-};
-
#ifdef KFD_SUPPORT_IOMMU_V2
static const struct kfd_device_info kaveri_device_info = {
.asic_family = CHIP_KAVERI,
@@ -127,7 +93,6 @@ static const struct kfd_device_info carrizo_device_info = {
.num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 2,
};
-#endif
static const struct kfd_device_info raven_device_info = {
.asic_family = CHIP_RAVEN,
@@ -147,7 +112,9 @@ static const struct kfd_device_info raven_device_info = {
.num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 2,
};
+#endif
+#ifdef CONFIG_DRM_AMDGPU_CIK
static const struct kfd_device_info hawaii_device_info = {
.asic_family = CHIP_HAWAII,
.asic_name = "hawaii",
@@ -167,6 +134,7 @@ static const struct kfd_device_info hawaii_device_info = {
.num_xgmi_sdma_engines = 0,
.num_sdma_queues_per_engine = 2,
};
+#endif
static const struct kfd_device_info tonga_device_info = {
.asic_family = CHIP_TONGA,
@@ -653,63 +621,202 @@ static const struct kfd_device_info cyan_skillfish_device_info = {
.num_sdma_queues_per_engine = 8,
};
-/* For each entry, [0] is regular and [1] is virtualisation device. */
-static const struct kfd_device_info *kfd_supported_devices[][2] = {
-#ifdef KFD_SUPPORT_IOMMU_V2
- [CHIP_KAVERI] = {&kaveri_device_info, NULL},
- [CHIP_CARRIZO] = {&carrizo_device_info, NULL},
-#endif
- [CHIP_RAVEN] = {&raven_device_info, NULL},
- [CHIP_HAWAII] = {&hawaii_device_info, NULL},
- [CHIP_TONGA] = {&tonga_device_info, NULL},
- [CHIP_FIJI] = {&fiji_device_info, &fiji_vf_device_info},
- [CHIP_POLARIS10] = {&polaris10_device_info, &polaris10_vf_device_info},
- [CHIP_POLARIS11] = {&polaris11_device_info, NULL},
- [CHIP_POLARIS12] = {&polaris12_device_info, NULL},
- [CHIP_VEGAM] = {&vegam_device_info, NULL},
- [CHIP_VEGA10] = {&vega10_device_info, &vega10_vf_device_info},
- [CHIP_VEGA12] = {&vega12_device_info, NULL},
- [CHIP_VEGA20] = {&vega20_device_info, NULL},
- [CHIP_RENOIR] = {&renoir_device_info, NULL},
- [CHIP_ARCTURUS] = {&arcturus_device_info, &arcturus_device_info},
- [CHIP_ALDEBARAN] = {&aldebaran_device_info, &aldebaran_device_info},
- [CHIP_NAVI10] = {&navi10_device_info, NULL},
- [CHIP_NAVI12] = {&navi12_device_info, &navi12_device_info},
- [CHIP_NAVI14] = {&navi14_device_info, NULL},
- [CHIP_SIENNA_CICHLID] = {&sienna_cichlid_device_info, &sienna_cichlid_device_info},
- [CHIP_NAVY_FLOUNDER] = {&navy_flounder_device_info, &navy_flounder_device_info},
- [CHIP_VANGOGH] = {&vangogh_device_info, NULL},
- [CHIP_DIMGREY_CAVEFISH] = {&dimgrey_cavefish_device_info, &dimgrey_cavefish_device_info},
- [CHIP_BEIGE_GOBY] = {&beige_goby_device_info, &beige_goby_device_info},
- [CHIP_YELLOW_CARP] = {&yellow_carp_device_info, NULL},
- [CHIP_CYAN_SKILLFISH] = {&cyan_skillfish_device_info, NULL},
-};
-
static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
unsigned int chunk_size);
static void kfd_gtt_sa_fini(struct kfd_dev *kfd);
static int kfd_resume(struct kfd_dev *kfd);
-struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
- struct pci_dev *pdev, unsigned int asic_type, bool vf)
+struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, bool vf)
{
struct kfd_dev *kfd;
const struct kfd_device_info *device_info;
const struct kfd2kgd_calls *f2g;
+ struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+ struct pci_dev *pdev = adev->pdev;
- if (asic_type >= sizeof(kfd_supported_devices) / (sizeof(void *) * 2)
- || asic_type >= sizeof(kfd2kgd_funcs) / sizeof(void *)) {
- dev_err(kfd_device, "asic_type %d out of range\n", asic_type);
- return NULL; /* asic_type out of range */
+ switch (adev->asic_type) {
+#ifdef KFD_SUPPORT_IOMMU_V2
+#ifdef CONFIG_DRM_AMDGPU_CIK
+ case CHIP_KAVERI:
+ if (vf)
+ device_info = NULL;
+ else
+ device_info = &kaveri_device_info;
+ f2g = &gfx_v7_kfd2kgd;
+ break;
+#endif
+ case CHIP_CARRIZO:
+ if (vf)
+ device_info = NULL;
+ else
+ device_info = &carrizo_device_info;
+ f2g = &gfx_v8_kfd2kgd;
+ break;
+#endif
+#ifdef CONFIG_DRM_AMDGPU_CIK
+ case CHIP_HAWAII:
+ if (vf)
+ device_info = NULL;
+ else
+ device_info = &hawaii_device_info;
+ f2g = &gfx_v7_kfd2kgd;
+ break;
+#endif
+ case CHIP_TONGA:
+ if (vf)
+ device_info = NULL;
+ else
+ device_info = &tonga_device_info;
+ f2g = &gfx_v8_kfd2kgd;
+ break;
+ case CHIP_FIJI:
+ if (vf)
+ device_info = &fiji_vf_device_info;
+ else
+ device_info = &fiji_device_info;
+ f2g = &gfx_v8_kfd2kgd;
+ break;
+ case CHIP_POLARIS10:
+ if (vf)
+ device_info = &polaris10_vf_device_info;
+ else
+ device_info = &polaris10_device_info;
+ f2g = &gfx_v8_kfd2kgd;
+ break;
+ case CHIP_POLARIS11:
+ if (vf)
+ device_info = NULL;
+ else
+ device_info = &polaris11_device_info;
+ f2g = &gfx_v8_kfd2kgd;
+ break;
+ case CHIP_POLARIS12:
+ if (vf)
+ device_info = NULL;
+ else
+ device_info = &polaris12_device_info;
+ f2g = &gfx_v8_kfd2kgd;
+ break;
+ case CHIP_VEGAM:
+ if (vf)
+ device_info = NULL;
+ else
+ device_info = &vegam_device_info;
+ f2g = &gfx_v8_kfd2kgd;
+ break;
+ default:
+ switch (adev->ip_versions[GC_HWIP][0]) {
+ case IP_VERSION(9, 0, 1):
+ if (vf)
+ device_info = &vega10_vf_device_info;
+ else
+ device_info = &vega10_device_info;
+ f2g = &gfx_v9_kfd2kgd;
+ break;
+#ifdef KFD_SUPPORT_IOMMU_V2
+ case IP_VERSION(9, 1, 0):
+ case IP_VERSION(9, 2, 2):
+ if (vf)
+ device_info = NULL;
+ else
+ device_info = &raven_device_info;
+ f2g = &gfx_v9_kfd2kgd;
+ break;
+#endif
+ case IP_VERSION(9, 2, 1):
+ if (vf)
+ device_info = NULL;
+ else
+ device_info = &vega12_device_info;
+ f2g = &gfx_v9_kfd2kgd;
+ break;
+ case IP_VERSION(9, 3, 0):
+ if (vf)
+ device_info = NULL;
+ else
+ device_info = &renoir_device_info;
+ f2g = &gfx_v9_kfd2kgd;
+ break;
+ case IP_VERSION(9, 4, 0):
+ if (vf)
+ device_info = NULL;
+ else
+ device_info = &vega20_device_info;
+ f2g = &gfx_v9_kfd2kgd;
+ break;
+ case IP_VERSION(9, 4, 1):
+ device_info = &arcturus_device_info;
+ f2g = &arcturus_kfd2kgd;
+ break;
+ case IP_VERSION(9, 4, 2):
+ device_info = &aldebaran_device_info;
+ f2g = &aldebaran_kfd2kgd;
+ break;
+ case IP_VERSION(10, 1, 10):
+ if (vf)
+ device_info = NULL;
+ else
+ device_info = &navi10_device_info;
+ f2g = &gfx_v10_kfd2kgd;
+ break;
+ case IP_VERSION(10, 1, 2):
+ device_info = &navi12_device_info;
+ f2g = &gfx_v10_kfd2kgd;
+ break;
+ case IP_VERSION(10, 1, 1):
+ if (vf)
+ device_info = NULL;
+ else
+ device_info = &navi14_device_info;
+ f2g = &gfx_v10_kfd2kgd;
+ break;
+ case IP_VERSION(10, 1, 3):
+ if (vf)
+ device_info = NULL;
+ else
+ device_info = &cyan_skillfish_device_info;
+ f2g = &gfx_v10_kfd2kgd;
+ break;
+ case IP_VERSION(10, 3, 0):
+ device_info = &sienna_cichlid_device_info;
+ f2g = &gfx_v10_3_kfd2kgd;
+ break;
+ case IP_VERSION(10, 3, 2):
+ device_info = &navy_flounder_device_info;
+ f2g = &gfx_v10_3_kfd2kgd;
+ break;
+ case IP_VERSION(10, 3, 1):
+ if (vf)
+ device_info = NULL;
+ else
+ device_info = &vangogh_device_info;
+ f2g = &gfx_v10_3_kfd2kgd;
+ break;
+ case IP_VERSION(10, 3, 4):
+ device_info = &dimgrey_cavefish_device_info;
+ f2g = &gfx_v10_3_kfd2kgd;
+ break;
+ case IP_VERSION(10, 3, 5):
+ device_info = &beige_goby_device_info;
+ f2g = &gfx_v10_3_kfd2kgd;
+ break;
+ case IP_VERSION(10, 3, 3):
+ if (vf)
+ device_info = NULL;
+ else
+ device_info = &yellow_carp_device_info;
+ f2g = &gfx_v10_3_kfd2kgd;
+ break;
+ default:
+ return NULL;
+ }
+ break;
}
- device_info = kfd_supported_devices[asic_type][vf];
- f2g = kfd2kgd_funcs[asic_type];
-
if (!device_info || !f2g) {
dev_err(kfd_device, "%s %s not supported in kfd\n",
- amdgpu_asic_name[asic_type], vf ? "VF" : "");
+ amdgpu_asic_name[adev->asic_type], vf ? "VF" : "");
return NULL;
}
@@ -916,6 +1023,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
kfd_double_confirm_iommu_support(kfd);
if (kfd_iommu_device_init(kfd)) {
+ kfd->use_iommu_v2 = false;
dev_err(kfd_device, "Error initializing iommuv2\n");
goto device_iommu_error;
}
@@ -924,6 +1032,9 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
svm_migrate_init((struct amdgpu_device *)kfd->kgd);
+ if(kgd2kfd_resume_iommu(kfd))
+ goto device_iommu_error;
+
if (kfd_resume(kfd))
goto kfd_resume_error;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index f8fce9d05f50..533b27b35fc9 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -557,7 +557,8 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
return retval;
}
-static int update_queue(struct device_queue_manager *dqm, struct queue *q)
+static int update_queue(struct device_queue_manager *dqm, struct queue *q,
+ struct mqd_update_info *minfo)
{
int retval = 0;
struct mqd_manager *mqd_mgr;
@@ -605,7 +606,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
}
}
- mqd_mgr->update_mqd(mqd_mgr, q->mqd, &q->properties);
+ mqd_mgr->update_mqd(mqd_mgr, q->mqd, &q->properties, minfo);
/*
* check active state vs. the previous state and modify
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index c8719682c4da..499fc0ea387f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -93,7 +93,7 @@ struct device_queue_manager_ops {
struct queue *q);
int (*update_queue)(struct device_queue_manager *dqm,
- struct queue *q);
+ struct queue *q, struct mqd_update_info *minfo);
int (*register_process)(struct device_queue_manager *dqm,
struct qcm_process_device *qpd);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
index 12d91e53556c..543e7ea75593 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
@@ -231,7 +231,7 @@ static void event_interrupt_wq_v9(struct kfd_dev *dev,
if (sq_intr_err != SQ_INTERRUPT_ERROR_TYPE_ILLEGAL_INST &&
sq_intr_err != SQ_INTERRUPT_ERROR_TYPE_MEMVIOL) {
kfd_signal_poison_consumed_event(dev, pasid);
- amdgpu_amdkfd_gpu_reset(dev->kgd);
+ amdgpu_amdkfd_ras_poison_consumption_handler(dev->kgd);
return;
}
break;
@@ -253,7 +253,7 @@ static void event_interrupt_wq_v9(struct kfd_dev *dev,
kfd_signal_event_interrupt(pasid, context_id0 & 0xfffffff, 28);
} else if (source_id == SOC15_INTSRC_SDMA_ECC) {
kfd_signal_poison_consumed_event(dev, pasid);
- amdgpu_amdkfd_gpu_reset(dev->kgd);
+ amdgpu_amdkfd_ras_poison_consumption_handler(dev->kgd);
return;
}
} else if (client_id == SOC15_IH_CLIENTID_VMC ||
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
index a2b77d1df854..64b4ac339904 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -136,7 +136,6 @@ static bool kq_initialize(struct kernel_queue *kq, struct kfd_dev *dev,
prop.write_ptr = (uint32_t *) kq->wptr_gpu_addr;
prop.eop_ring_buffer_address = kq->eop_gpu_addr;
prop.eop_ring_buffer_size = PAGE_SIZE;
- prop.cu_mask = NULL;
if (init_queue(&kq->queue, &prop) != 0)
goto err_init_queue;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index 4a16e3c257b9..6d8634e40b3b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -20,7 +20,6 @@
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
-
#include <linux/types.h>
#include <linux/hmm.h>
#include <linux/dma-direction.h>
@@ -34,6 +33,11 @@
#include "kfd_svm.h"
#include "kfd_migrate.h"
+#ifdef dev_fmt
+#undef dev_fmt
+#endif
+#define dev_fmt(fmt) "kfd_migrate: %s: " fmt, __func__
+
static uint64_t
svm_migrate_direct_mapping_addr(struct amdgpu_device *adev, uint64_t addr)
{
@@ -151,14 +155,14 @@ svm_migrate_copy_memory_gart(struct amdgpu_device *adev, dma_addr_t *sys,
gart_d = svm_migrate_direct_mapping_addr(adev, *vram);
}
if (r) {
- pr_debug("failed %d to create gart mapping\n", r);
+ dev_err(adev->dev, "fail %d create gart mapping\n", r);
goto out_unlock;
}
r = amdgpu_copy_buffer(ring, gart_s, gart_d, size * PAGE_SIZE,
NULL, &next, false, true, false);
if (r) {
- pr_debug("failed %d to copy memory\n", r);
+ dev_err(adev->dev, "fail %d to copy memory\n", r);
goto out_unlock;
}
@@ -264,6 +268,19 @@ static void svm_migrate_put_sys_page(unsigned long addr)
put_page(page);
}
+static unsigned long svm_migrate_successful_pages(struct migrate_vma *migrate)
+{
+ unsigned long cpages = 0;
+ unsigned long i;
+
+ for (i = 0; i < migrate->npages; i++) {
+ if (migrate->src[i] & MIGRATE_PFN_VALID &&
+ migrate->src[i] & MIGRATE_PFN_MIGRATE)
+ cpages++;
+ }
+ return cpages;
+}
+
static int
svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
struct migrate_vma *migrate, struct dma_fence **mfence,
@@ -285,7 +302,7 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
r = svm_range_vram_node_new(adev, prange, true);
if (r) {
- pr_debug("failed %d get 0x%llx pages from vram\n", r, npages);
+ dev_err(adev->dev, "fail %d to alloc vram\n", r);
goto out;
}
@@ -305,7 +322,7 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
DMA_TO_DEVICE);
r = dma_mapping_error(dev, src[i]);
if (r) {
- pr_debug("failed %d dma_map_page\n", r);
+ dev_err(adev->dev, "fail %d dma_map_page\n", r);
goto out_free_vram_pages;
}
} else {
@@ -325,8 +342,8 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
continue;
}
- pr_debug("dma mapping src to 0x%llx, page_to_pfn 0x%lx\n",
- src[i] >> PAGE_SHIFT, page_to_pfn(spage));
+ pr_debug_ratelimited("dma mapping src to 0x%llx, pfn 0x%lx\n",
+ src[i] >> PAGE_SHIFT, page_to_pfn(spage));
if (j >= (cursor.size >> PAGE_SHIFT) - 1 && i < npages - 1) {
r = svm_migrate_copy_memory_gart(adev, src + i - j,
@@ -372,7 +389,7 @@ out:
return r;
}
-static int
+static long
svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
struct vm_area_struct *vma, uint64_t start,
uint64_t end)
@@ -381,6 +398,7 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
struct kfd_process_device *pdd;
struct dma_fence *mfence = NULL;
struct migrate_vma migrate;
+ unsigned long cpages = 0;
dma_addr_t *scratch;
size_t size;
void *buf;
@@ -405,23 +423,31 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
r = migrate_vma_setup(&migrate);
if (r) {
- pr_debug("failed %d prepare migrate svms 0x%p [0x%lx 0x%lx]\n",
- r, prange->svms, prange->start, prange->last);
+ dev_err(adev->dev, "vma setup fail %d range [0x%lx 0x%lx]\n", r,
+ prange->start, prange->last);
goto out_free;
}
- if (migrate.cpages != npages) {
- pr_debug("Partial migration. 0x%lx/0x%llx pages can be migrated\n",
- migrate.cpages,
- npages);
- }
- if (migrate.cpages) {
- r = svm_migrate_copy_to_vram(adev, prange, &migrate, &mfence,
- scratch);
- migrate_vma_pages(&migrate);
- svm_migrate_copy_done(adev, mfence);
- migrate_vma_finalize(&migrate);
+ cpages = migrate.cpages;
+ if (!cpages) {
+ pr_debug("failed collect migrate sys pages [0x%lx 0x%lx]\n",
+ prange->start, prange->last);
+ goto out_free;
}
+ if (cpages != npages)
+ pr_debug("partial migration, 0x%lx/0x%llx pages migrated\n",
+ cpages, npages);
+ else
+ pr_debug("0x%lx pages migrated\n", cpages);
+
+ r = svm_migrate_copy_to_vram(adev, prange, &migrate, &mfence, scratch);
+ migrate_vma_pages(&migrate);
+
+ pr_debug("successful/cpages/npages 0x%lx/0x%lx/0x%lx\n",
+ svm_migrate_successful_pages(&migrate), cpages, migrate.npages);
+
+ svm_migrate_copy_done(adev, mfence);
+ migrate_vma_finalize(&migrate);
svm_range_dma_unmap(adev->dev, scratch, 0, npages);
svm_range_free_dma_mappings(prange);
@@ -429,12 +455,13 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
out_free:
kvfree(buf);
out:
- if (!r) {
+ if (!r && cpages) {
pdd = svm_range_get_pdd_by_adev(prange, adev);
if (pdd)
- WRITE_ONCE(pdd->page_in, pdd->page_in + migrate.cpages);
- }
+ WRITE_ONCE(pdd->page_in, pdd->page_in + cpages);
+ return cpages;
+ }
return r;
}
@@ -456,7 +483,8 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
unsigned long addr, start, end;
struct vm_area_struct *vma;
struct amdgpu_device *adev;
- int r = 0;
+ unsigned long cpages = 0;
+ long r = 0;
if (prange->actual_loc == best_loc) {
pr_debug("svms 0x%p [0x%lx 0x%lx] already on best_loc 0x%x\n",
@@ -488,17 +516,19 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc,
next = min(vma->vm_end, end);
r = svm_migrate_vma_to_vram(adev, prange, vma, addr, next);
- if (r) {
- pr_debug("failed to migrate\n");
+ if (r < 0) {
+ pr_debug("failed %ld to migrate\n", r);
break;
+ } else {
+ cpages += r;
}
addr = next;
}
- if (!r)
+ if (cpages)
prange->actual_loc = best_loc;
- return r;
+ return r < 0 ? r : 0;
}
static void svm_migrate_page_free(struct page *page)
@@ -506,7 +536,7 @@ static void svm_migrate_page_free(struct page *page)
struct svm_range_bo *svm_bo = page->zone_device_data;
if (svm_bo) {
- pr_debug("svm_bo ref left: %d\n", kref_read(&svm_bo->kref));
+ pr_debug_ratelimited("ref: %d\n", kref_read(&svm_bo->kref));
svm_range_bo_unref(svm_bo);
}
}
@@ -572,12 +602,12 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
dst[i] = dma_map_page(dev, dpage, 0, PAGE_SIZE, DMA_FROM_DEVICE);
r = dma_mapping_error(dev, dst[i]);
if (r) {
- pr_debug("failed %d dma_map_page\n", r);
+ dev_err(adev->dev, "fail %d dma_map_page\n", r);
goto out_oom;
}
- pr_debug("dma mapping dst to 0x%llx, page_to_pfn 0x%lx\n",
- dst[i] >> PAGE_SHIFT, page_to_pfn(dpage));
+ pr_debug_ratelimited("dma mapping dst to 0x%llx, pfn 0x%lx\n",
+ dst[i] >> PAGE_SHIFT, page_to_pfn(dpage));
migrate->dst[i] = migrate_pfn(page_to_pfn(dpage));
migrate->dst[i] |= MIGRATE_PFN_LOCKED;
@@ -599,7 +629,7 @@ out_oom:
return r;
}
-static int
+static long
svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
struct vm_area_struct *vma, uint64_t start, uint64_t end)
{
@@ -607,6 +637,7 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
struct kfd_process_device *pdd;
struct dma_fence *mfence = NULL;
struct migrate_vma migrate;
+ unsigned long cpages = 0;
dma_addr_t *scratch;
size_t size;
void *buf;
@@ -631,34 +662,43 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange,
r = migrate_vma_setup(&migrate);
if (r) {
- pr_debug("failed %d prepare migrate svms 0x%p [0x%lx 0x%lx]\n",
- r, prange->svms, prange->start, prange->last);
+ dev_err(adev->dev, "vma setup fail %d range [0x%lx 0x%lx]\n", r,
+ prange->start, prange->last);
goto out_free;
}
- pr_debug("cpages %ld\n", migrate.cpages);
-
- if (migrate.cpages) {
- r = svm_migrate_copy_to_ram(adev, prange, &migrate, &mfence,
- scratch, npages);
- migrate_vma_pages(&migrate);
- svm_migrate_copy_done(adev, mfence);
- migrate_vma_finalize(&migrate);
- } else {
+ cpages = migrate.cpages;
+ if (!cpages) {
pr_debug("failed collect migrate device pages [0x%lx 0x%lx]\n",
prange->start, prange->last);
+ goto out_free;
}
+ if (cpages != npages)
+ pr_debug("partial migration, 0x%lx/0x%llx pages migrated\n",
+ cpages, npages);
+ else
+ pr_debug("0x%lx pages migrated\n", cpages);
+ r = svm_migrate_copy_to_ram(adev, prange, &migrate, &mfence,
+ scratch, npages);
+ migrate_vma_pages(&migrate);
+
+ pr_debug("successful/cpages/npages 0x%lx/0x%lx/0x%lx\n",
+ svm_migrate_successful_pages(&migrate), cpages, migrate.npages);
+
+ svm_migrate_copy_done(adev, mfence);
+ migrate_vma_finalize(&migrate);
svm_range_dma_unmap(adev->dev, scratch, 0, npages);
out_free:
kvfree(buf);
out:
- if (!r) {
+ if (!r && cpages) {
pdd = svm_range_get_pdd_by_adev(prange, adev);
if (pdd)
- WRITE_ONCE(pdd->page_out,
- pdd->page_out + migrate.cpages);
+ WRITE_ONCE(pdd->page_out, pdd->page_out + cpages);
+
+ return cpages;
}
return r;
}
@@ -680,7 +720,8 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm)
unsigned long addr;
unsigned long start;
unsigned long end;
- int r = 0;
+ unsigned long cpages = 0;
+ long r = 0;
if (!prange->actual_loc) {
pr_debug("[0x%lx 0x%lx] already migrated to ram\n",
@@ -711,18 +752,21 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm)
next = min(vma->vm_end, end);
r = svm_migrate_vma_to_ram(adev, prange, vma, addr, next);
- if (r) {
- pr_debug("failed %d to migrate\n", r);
+ if (r < 0) {
+ pr_debug("failed %ld to migrate\n", r);
break;
+ } else {
+ cpages += r;
}
addr = next;
}
- if (!r) {
+ if (cpages) {
svm_range_vram_node_free(prange);
prange->actual_loc = 0;
}
- return r;
+
+ return r < 0 ? r : 0;
}
/**
@@ -901,8 +945,7 @@ int svm_migrate_init(struct amdgpu_device *adev)
/* Disable SVM support capability */
pgmap->type = 0;
- devm_release_mem_region(adev->dev, res->start,
- res->end - res->start + 1);
+ devm_release_mem_region(adev->dev, res->start, resource_size(res));
return PTR_ERR(r);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
index 6e6918ccedfd..965e17c5dbb4 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
@@ -80,7 +80,8 @@ struct mqd_manager {
struct mm_struct *mms);
void (*update_mqd)(struct mqd_manager *mm, void *mqd,
- struct queue_properties *q);
+ struct queue_properties *q,
+ struct mqd_update_info *minfo);
int (*destroy_mqd)(struct mqd_manager *mm, void *mqd,
enum kfd_preempt_type type,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
index 064914e1e8d6..8128f4d312f1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
@@ -42,16 +42,17 @@ static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd)
}
static void update_cu_mask(struct mqd_manager *mm, void *mqd,
- struct queue_properties *q)
+ struct mqd_update_info *minfo)
{
struct cik_mqd *m;
uint32_t se_mask[4] = {0}; /* 4 is the max # of SEs */
- if (q->cu_mask_count == 0)
+ if (!minfo || (minfo->update_flag != UPDATE_FLAG_CU_MASK) ||
+ !minfo->cu_mask.ptr)
return;
mqd_symmetrically_map_cu_mask(mm,
- q->cu_mask, q->cu_mask_count, se_mask);
+ minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask);
m = get_mqd(mqd);
m->compute_static_thread_mgmt_se0 = se_mask[0];
@@ -135,7 +136,7 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
*mqd = m;
if (gart_addr)
*gart_addr = addr;
- mm->update_mqd(mm, m, q);
+ mm->update_mqd(mm, m, q, NULL);
}
static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
@@ -152,7 +153,7 @@ static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
if (gart_addr)
*gart_addr = mqd_mem_obj->gpu_addr;
- mm->update_mqd(mm, m, q);
+ mm->update_mqd(mm, m, q, NULL);
}
static void free_mqd(struct mqd_manager *mm, void *mqd,
@@ -185,7 +186,8 @@ static int load_mqd_sdma(struct mqd_manager *mm, void *mqd,
}
static void __update_mqd(struct mqd_manager *mm, void *mqd,
- struct queue_properties *q, unsigned int atc_bit)
+ struct queue_properties *q, struct mqd_update_info *minfo,
+ unsigned int atc_bit)
{
struct cik_mqd *m;
@@ -214,16 +216,17 @@ static void __update_mqd(struct mqd_manager *mm, void *mqd,
if (q->format == KFD_QUEUE_FORMAT_AQL)
m->cp_hqd_pq_control |= NO_UPDATE_RPTR;
- update_cu_mask(mm, mqd, q);
+ update_cu_mask(mm, mqd, minfo);
set_priority(m, q);
q->is_active = QUEUE_IS_ACTIVE(*q);
}
static void update_mqd(struct mqd_manager *mm, void *mqd,
- struct queue_properties *q)
+ struct queue_properties *q,
+ struct mqd_update_info *minfo)
{
- __update_mqd(mm, mqd, q, 1);
+ __update_mqd(mm, mqd, q, minfo, 1);
}
static uint32_t read_doorbell_id(void *mqd)
@@ -234,13 +237,15 @@ static uint32_t read_doorbell_id(void *mqd)
}
static void update_mqd_hawaii(struct mqd_manager *mm, void *mqd,
- struct queue_properties *q)
+ struct queue_properties *q,
+ struct mqd_update_info *minfo)
{
- __update_mqd(mm, mqd, q, 0);
+ __update_mqd(mm, mqd, q, minfo, 0);
}
static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
- struct queue_properties *q)
+ struct queue_properties *q,
+ struct mqd_update_info *minfo)
{
struct cik_sdma_rlc_registers *m;
@@ -318,7 +323,8 @@ static void init_mqd_hiq(struct mqd_manager *mm, void **mqd,
}
static void update_mqd_hiq(struct mqd_manager *mm, void *mqd,
- struct queue_properties *q)
+ struct queue_properties *q,
+ struct mqd_update_info *minfo)
{
struct cik_mqd *m;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
index c7fb59ca597f..270160fc401b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
@@ -42,16 +42,17 @@ static inline struct v10_sdma_mqd *get_sdma_mqd(void *mqd)
}
static void update_cu_mask(struct mqd_manager *mm, void *mqd,
- struct queue_properties *q)
+ struct mqd_update_info *minfo)
{
struct v10_compute_mqd *m;
uint32_t se_mask[4] = {0}; /* 4 is the max # of SEs */
- if (q->cu_mask_count == 0)
+ if (!minfo || (minfo->update_flag != UPDATE_FLAG_CU_MASK) ||
+ !minfo->cu_mask.ptr)
return;
mqd_symmetrically_map_cu_mask(mm,
- q->cu_mask, q->cu_mask_count, se_mask);
+ minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask);
m = get_mqd(mqd);
m->compute_static_thread_mgmt_se0 = se_mask[0];
@@ -136,7 +137,7 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
*mqd = m;
if (gart_addr)
*gart_addr = addr;
- mm->update_mqd(mm, m, q);
+ mm->update_mqd(mm, m, q, NULL);
}
static int load_mqd(struct mqd_manager *mm, void *mqd,
@@ -162,7 +163,8 @@ static int hiq_load_mqd_kiq(struct mqd_manager *mm, void *mqd,
}
static void update_mqd(struct mqd_manager *mm, void *mqd,
- struct queue_properties *q)
+ struct queue_properties *q,
+ struct mqd_update_info *minfo)
{
struct v10_compute_mqd *m;
@@ -218,7 +220,7 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
if (mm->dev->cwsr_enabled)
m->cp_hqd_ctx_save_control = 0;
- update_cu_mask(mm, mqd, q);
+ update_cu_mask(mm, mqd, minfo);
set_priority(m, q);
q->is_active = QUEUE_IS_ACTIVE(*q);
@@ -311,7 +313,7 @@ static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
if (gart_addr)
*gart_addr = mqd_mem_obj->gpu_addr;
- mm->update_mqd(mm, m, q);
+ mm->update_mqd(mm, m, q, NULL);
}
static int load_mqd_sdma(struct mqd_manager *mm, void *mqd,
@@ -326,7 +328,8 @@ static int load_mqd_sdma(struct mqd_manager *mm, void *mqd,
#define SDMA_RLC_DUMMY_DEFAULT 0xf
static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
- struct queue_properties *q)
+ struct queue_properties *q,
+ struct mqd_update_info *minfo)
{
struct v10_sdma_mqd *m;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
index 7f4e102ff4bd..4e5932f54b5a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
@@ -43,16 +43,17 @@ static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd)
}
static void update_cu_mask(struct mqd_manager *mm, void *mqd,
- struct queue_properties *q)
+ struct mqd_update_info *minfo)
{
struct v9_mqd *m;
uint32_t se_mask[KFD_MAX_NUM_SE] = {0};
- if (q->cu_mask_count == 0)
+ if (!minfo || (minfo->update_flag != UPDATE_FLAG_CU_MASK) ||
+ !minfo->cu_mask.ptr)
return;
mqd_symmetrically_map_cu_mask(mm,
- q->cu_mask, q->cu_mask_count, se_mask);
+ minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask);
m = get_mqd(mqd);
m->compute_static_thread_mgmt_se0 = se_mask[0];
@@ -188,7 +189,7 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
*mqd = m;
if (gart_addr)
*gart_addr = addr;
- mm->update_mqd(mm, m, q);
+ mm->update_mqd(mm, m, q, NULL);
}
static int load_mqd(struct mqd_manager *mm, void *mqd,
@@ -212,7 +213,8 @@ static int hiq_load_mqd_kiq(struct mqd_manager *mm, void *mqd,
}
static void update_mqd(struct mqd_manager *mm, void *mqd,
- struct queue_properties *q)
+ struct queue_properties *q,
+ struct mqd_update_info *minfo)
{
struct v9_mqd *m;
@@ -269,7 +271,7 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address)
m->cp_hqd_ctx_save_control = 0;
- update_cu_mask(mm, mqd, q);
+ update_cu_mask(mm, mqd, minfo);
set_priority(m, q);
q->is_active = QUEUE_IS_ACTIVE(*q);
@@ -366,7 +368,7 @@ static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
if (gart_addr)
*gart_addr = mqd_mem_obj->gpu_addr;
- mm->update_mqd(mm, m, q);
+ mm->update_mqd(mm, m, q, NULL);
}
static int load_mqd_sdma(struct mqd_manager *mm, void *mqd,
@@ -381,7 +383,8 @@ static int load_mqd_sdma(struct mqd_manager *mm, void *mqd,
#define SDMA_RLC_DUMMY_DEFAULT 0xf
static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
- struct queue_properties *q)
+ struct queue_properties *q,
+ struct mqd_update_info *minfo)
{
struct v9_sdma_mqd *m;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
index 33dbd22d290f..cd9220eb8a7a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
@@ -45,16 +45,17 @@ static inline struct vi_sdma_mqd *get_sdma_mqd(void *mqd)
}
static void update_cu_mask(struct mqd_manager *mm, void *mqd,
- struct queue_properties *q)
+ struct mqd_update_info *minfo)
{
struct vi_mqd *m;
uint32_t se_mask[4] = {0}; /* 4 is the max # of SEs */
- if (q->cu_mask_count == 0)
+ if (!minfo || (minfo->update_flag != UPDATE_FLAG_CU_MASK) ||
+ !minfo->cu_mask.ptr)
return;
mqd_symmetrically_map_cu_mask(mm,
- q->cu_mask, q->cu_mask_count, se_mask);
+ minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask);
m = get_mqd(mqd);
m->compute_static_thread_mgmt_se0 = se_mask[0];
@@ -150,7 +151,7 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
*mqd = m;
if (gart_addr)
*gart_addr = addr;
- mm->update_mqd(mm, m, q);
+ mm->update_mqd(mm, m, q, NULL);
}
static int load_mqd(struct mqd_manager *mm, void *mqd,
@@ -167,8 +168,8 @@ static int load_mqd(struct mqd_manager *mm, void *mqd,
}
static void __update_mqd(struct mqd_manager *mm, void *mqd,
- struct queue_properties *q, unsigned int mtype,
- unsigned int atc_bit)
+ struct queue_properties *q, struct mqd_update_info *minfo,
+ unsigned int mtype, unsigned int atc_bit)
{
struct vi_mqd *m;
@@ -230,7 +231,7 @@ static void __update_mqd(struct mqd_manager *mm, void *mqd,
atc_bit << CP_HQD_CTX_SAVE_CONTROL__ATC__SHIFT |
mtype << CP_HQD_CTX_SAVE_CONTROL__MTYPE__SHIFT;
- update_cu_mask(mm, mqd, q);
+ update_cu_mask(mm, mqd, minfo);
set_priority(m, q);
q->is_active = QUEUE_IS_ACTIVE(*q);
@@ -238,9 +239,10 @@ static void __update_mqd(struct mqd_manager *mm, void *mqd,
static void update_mqd(struct mqd_manager *mm, void *mqd,
- struct queue_properties *q)
+ struct queue_properties *q,
+ struct mqd_update_info *minfo)
{
- __update_mqd(mm, mqd, q, MTYPE_CC, 1);
+ __update_mqd(mm, mqd, q, minfo, MTYPE_CC, 1);
}
static uint32_t read_doorbell_id(void *mqd)
@@ -251,9 +253,10 @@ static uint32_t read_doorbell_id(void *mqd)
}
static void update_mqd_tonga(struct mqd_manager *mm, void *mqd,
- struct queue_properties *q)
+ struct queue_properties *q,
+ struct mqd_update_info *minfo)
{
- __update_mqd(mm, mqd, q, MTYPE_UC, 0);
+ __update_mqd(mm, mqd, q, minfo, MTYPE_UC, 0);
}
static int destroy_mqd(struct mqd_manager *mm, void *mqd,
@@ -317,9 +320,10 @@ static void init_mqd_hiq(struct mqd_manager *mm, void **mqd,
}
static void update_mqd_hiq(struct mqd_manager *mm, void *mqd,
- struct queue_properties *q)
+ struct queue_properties *q,
+ struct mqd_update_info *minfo)
{
- __update_mqd(mm, mqd, q, MTYPE_UC, 0);
+ __update_mqd(mm, mqd, q, minfo, MTYPE_UC, 0);
}
static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
@@ -336,7 +340,7 @@ static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
if (gart_addr)
*gart_addr = mqd_mem_obj->gpu_addr;
- mm->update_mqd(mm, m, q);
+ mm->update_mqd(mm, m, q, NULL);
}
static int load_mqd_sdma(struct mqd_manager *mm, void *mqd,
@@ -349,7 +353,8 @@ static int load_mqd_sdma(struct mqd_manager *mm, void *mqd,
}
static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
- struct queue_properties *q)
+ struct queue_properties *q,
+ struct mqd_update_info *minfo)
{
struct vi_sdma_mqd *m;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 6d8f9bb2d905..4104b167e721 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -472,9 +472,6 @@ struct queue_properties {
uint32_t ctl_stack_size;
uint64_t tba_addr;
uint64_t tma_addr;
- /* Relevant for CU */
- uint32_t cu_mask_count; /* Must be a multiple of 32 */
- uint32_t *cu_mask;
};
#define QUEUE_IS_ACTIVE(q) ((q).queue_size > 0 && \
@@ -482,6 +479,20 @@ struct queue_properties {
(q).queue_percent > 0 && \
!(q).is_evicted)
+enum mqd_update_flag {
+ UPDATE_FLAG_CU_MASK = 0,
+};
+
+struct mqd_update_info {
+ union {
+ struct {
+ uint32_t count; /* Must be a multiple of 32 */
+ uint32_t *ptr;
+ } cu_mask;
+ };
+ enum mqd_update_flag update_flag;
+};
+
/**
* struct queue
*
@@ -608,12 +619,14 @@ struct qcm_process_device {
uint32_t sh_hidden_private_base;
/* CWSR memory */
+ struct kgd_mem *cwsr_mem;
void *cwsr_kaddr;
uint64_t cwsr_base;
uint64_t tba_addr;
uint64_t tma_addr;
/* IB memory */
+ struct kgd_mem *ib_mem;
uint64_t ib_base;
void *ib_kaddr;
@@ -808,6 +821,7 @@ struct kfd_process {
/* Event ID allocator and lookup */
struct idr event_idr;
/* Event page */
+ u64 signal_handle;
struct kfd_signal_page *signal_page;
size_t signal_mapped_size;
size_t signal_event_count;
@@ -1031,10 +1045,10 @@ int pqm_create_queue(struct process_queue_manager *pqm,
unsigned int *qid,
uint32_t *p_doorbell_offset_in_process);
int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid);
-int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
- struct queue_properties *p);
-int pqm_set_cu_mask(struct process_queue_manager *pqm, unsigned int qid,
+int pqm_update_queue_properties(struct process_queue_manager *pqm, unsigned int qid,
struct queue_properties *p);
+int pqm_update_mqd(struct process_queue_manager *pqm, unsigned int qid,
+ struct mqd_update_info *minfo);
int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid,
void *gws);
struct kernel_queue *pqm_get_kernel_queue(struct process_queue_manager *pqm,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 21ec8a18cad2..457863861d6f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -72,6 +72,8 @@ static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep);
static void evict_process_worker(struct work_struct *work);
static void restore_process_worker(struct work_struct *work);
+static void kfd_process_device_destroy_cwsr_dgpu(struct kfd_process_device *pdd);
+
struct kfd_procfs_tree {
struct kobject *kobj;
};
@@ -685,10 +687,15 @@ void kfd_process_destroy_wq(void)
}
static void kfd_process_free_gpuvm(struct kgd_mem *mem,
- struct kfd_process_device *pdd)
+ struct kfd_process_device *pdd, void *kptr)
{
struct kfd_dev *dev = pdd->dev;
+ if (kptr) {
+ amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(dev->kgd, mem);
+ kptr = NULL;
+ }
+
amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(dev->kgd, mem, pdd->drm_priv);
amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, mem, pdd->drm_priv,
NULL);
@@ -702,63 +709,46 @@ static void kfd_process_free_gpuvm(struct kgd_mem *mem,
*/
static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
uint64_t gpu_va, uint32_t size,
- uint32_t flags, void **kptr)
+ uint32_t flags, struct kgd_mem **mem, void **kptr)
{
struct kfd_dev *kdev = pdd->dev;
- struct kgd_mem *mem = NULL;
- int handle;
int err;
err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(kdev->kgd, gpu_va, size,
- pdd->drm_priv, &mem, NULL, flags);
+ pdd->drm_priv, mem, NULL, flags);
if (err)
goto err_alloc_mem;
- err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->kgd, mem,
+ err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->kgd, *mem,
pdd->drm_priv, NULL);
if (err)
goto err_map_mem;
- err = amdgpu_amdkfd_gpuvm_sync_memory(kdev->kgd, mem, true);
+ err = amdgpu_amdkfd_gpuvm_sync_memory(kdev->kgd, *mem, true);
if (err) {
pr_debug("Sync memory failed, wait interrupted by user signal\n");
goto sync_memory_failed;
}
- /* Create an obj handle so kfd_process_device_remove_obj_handle
- * will take care of the bo removal when the process finishes.
- * We do not need to take p->mutex, because the process is just
- * created and the ioctls have not had the chance to run.
- */
- handle = kfd_process_device_create_obj_handle(pdd, mem);
-
- if (handle < 0) {
- err = handle;
- goto free_gpuvm;
- }
-
if (kptr) {
err = amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(kdev->kgd,
- (struct kgd_mem *)mem, kptr, NULL);
+ (struct kgd_mem *)*mem, kptr, NULL);
if (err) {
pr_debug("Map GTT BO to kernel failed\n");
- goto free_obj_handle;
+ goto sync_memory_failed;
}
}
return err;
-free_obj_handle:
- kfd_process_device_remove_obj_handle(pdd, handle);
-free_gpuvm:
sync_memory_failed:
- kfd_process_free_gpuvm(mem, pdd);
- return err;
+ amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(kdev->kgd, *mem, pdd->drm_priv);
err_map_mem:
- amdgpu_amdkfd_gpuvm_free_memory_of_gpu(kdev->kgd, mem, pdd->drm_priv,
+ amdgpu_amdkfd_gpuvm_free_memory_of_gpu(kdev->kgd, *mem, pdd->drm_priv,
NULL);
err_alloc_mem:
+ *mem = NULL;
*kptr = NULL;
return err;
}
@@ -776,6 +766,7 @@ static int kfd_process_device_reserve_ib_mem(struct kfd_process_device *pdd)
KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE |
KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE |
KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
+ struct kgd_mem *mem;
void *kaddr;
int ret;
@@ -784,15 +775,26 @@ static int kfd_process_device_reserve_ib_mem(struct kfd_process_device *pdd)
/* ib_base is only set for dGPU */
ret = kfd_process_alloc_gpuvm(pdd, qpd->ib_base, PAGE_SIZE, flags,
- &kaddr);
+ &mem, &kaddr);
if (ret)
return ret;
+ qpd->ib_mem = mem;
qpd->ib_kaddr = kaddr;
return 0;
}
+static void kfd_process_device_destroy_ib_mem(struct kfd_process_device *pdd)
+{
+ struct qcm_process_device *qpd = &pdd->qpd;
+
+ if (!qpd->ib_kaddr || !qpd->ib_base)
+ return;
+
+ kfd_process_free_gpuvm(qpd->ib_mem, pdd, qpd->ib_kaddr);
+}
+
struct kfd_process *kfd_create_process(struct file *filep)
{
struct kfd_process *process;
@@ -947,6 +949,37 @@ static void kfd_process_device_free_bos(struct kfd_process_device *pdd)
}
}
+/*
+ * Just kunmap and unpin signal BO here. It will be freed in
+ * kfd_process_free_outstanding_kfd_bos()
+ */
+static void kfd_process_kunmap_signal_bo(struct kfd_process *p)
+{
+ struct kfd_process_device *pdd;
+ struct kfd_dev *kdev;
+ void *mem;
+
+ kdev = kfd_device_by_id(GET_GPU_ID(p->signal_handle));
+ if (!kdev)
+ return;
+
+ mutex_lock(&p->mutex);
+
+ pdd = kfd_get_process_device_data(kdev, p);
+ if (!pdd)
+ goto out;
+
+ mem = kfd_process_device_translate_handle(
+ pdd, GET_IDR_HANDLE(p->signal_handle));
+ if (!mem)
+ goto out;
+
+ amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(kdev->kgd, mem);
+
+out:
+ mutex_unlock(&p->mutex);
+}
+
static void kfd_process_free_outstanding_kfd_bos(struct kfd_process *p)
{
int i;
@@ -965,6 +998,9 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
pr_debug("Releasing pdd (topology id %d) for process (pasid 0x%x)\n",
pdd->dev->id, p->pasid);
+ kfd_process_device_destroy_cwsr_dgpu(pdd);
+ kfd_process_device_destroy_ib_mem(pdd);
+
if (pdd->drm_file) {
amdgpu_amdkfd_gpuvm_release_process_vm(
pdd->dev->kgd, pdd->drm_priv);
@@ -1049,9 +1085,11 @@ static void kfd_process_wq_release(struct work_struct *work)
{
struct kfd_process *p = container_of(work, struct kfd_process,
release_work);
+
kfd_process_remove_sysfs(p);
kfd_iommu_unbind_process(p);
+ kfd_process_kunmap_signal_bo(p);
kfd_process_free_outstanding_kfd_bos(p);
svm_range_list_fini(p);
@@ -1198,6 +1236,7 @@ static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd)
uint32_t flags = KFD_IOC_ALLOC_MEM_FLAGS_GTT
| KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE
| KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
+ struct kgd_mem *mem;
void *kaddr;
int ret;
@@ -1206,10 +1245,11 @@ static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd)
/* cwsr_base is only set for dGPU */
ret = kfd_process_alloc_gpuvm(pdd, qpd->cwsr_base,
- KFD_CWSR_TBA_TMA_SIZE, flags, &kaddr);
+ KFD_CWSR_TBA_TMA_SIZE, flags, &mem, &kaddr);
if (ret)
return ret;
+ qpd->cwsr_mem = mem;
qpd->cwsr_kaddr = kaddr;
qpd->tba_addr = qpd->cwsr_base;
@@ -1222,6 +1262,17 @@ static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd)
return 0;
}
+static void kfd_process_device_destroy_cwsr_dgpu(struct kfd_process_device *pdd)
+{
+ struct kfd_dev *dev = pdd->dev;
+ struct qcm_process_device *qpd = &pdd->qpd;
+
+ if (!dev->cwsr_enabled || !qpd->cwsr_kaddr || !qpd->cwsr_base)
+ return;
+
+ kfd_process_free_gpuvm(qpd->cwsr_mem, pdd, qpd->cwsr_kaddr);
+}
+
void kfd_process_set_trap_handler(struct qcm_process_device *qpd,
uint64_t tba_addr,
uint64_t tma_addr)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 243dd1efcdbf..3627e7ac161b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -121,7 +121,7 @@ int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid,
pdd->qpd.num_gws = gws ? amdgpu_amdkfd_get_num_gws(dev->kgd) : 0;
return pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm,
- pqn->q);
+ pqn->q, NULL);
}
void kfd_process_dequeue_from_all_devices(struct kfd_process *p)
@@ -394,8 +394,6 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
pdd->qpd.num_gws = 0;
}
- kfree(pqn->q->properties.cu_mask);
- pqn->q->properties.cu_mask = NULL;
uninit_queue(pqn->q);
}
@@ -411,8 +409,8 @@ err_destroy_queue:
return retval;
}
-int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
- struct queue_properties *p)
+int pqm_update_queue_properties(struct process_queue_manager *pqm,
+ unsigned int qid, struct queue_properties *p)
{
int retval;
struct process_queue_node *pqn;
@@ -429,15 +427,15 @@ int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
pqn->q->properties.priority = p->priority;
retval = pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm,
- pqn->q);
+ pqn->q, NULL);
if (retval != 0)
return retval;
return 0;
}
-int pqm_set_cu_mask(struct process_queue_manager *pqm, unsigned int qid,
- struct queue_properties *p)
+int pqm_update_mqd(struct process_queue_manager *pqm,
+ unsigned int qid, struct mqd_update_info *minfo)
{
int retval;
struct process_queue_node *pqn;
@@ -448,16 +446,8 @@ int pqm_set_cu_mask(struct process_queue_manager *pqm, unsigned int qid,
return -EFAULT;
}
- /* Free the old CU mask memory if it is already allocated, then
- * allocate memory for the new CU mask.
- */
- kfree(pqn->q->properties.cu_mask);
-
- pqn->q->properties.cu_mask_count = p->cu_mask_count;
- pqn->q->properties.cu_mask = p->cu_mask;
-
retval = pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm,
- pqn->q);
+ pqn->q, minfo);
if (retval != 0)
return retval;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 9d0f65a90002..b691c8495d66 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -33,6 +33,11 @@
#include "kfd_svm.h"
#include "kfd_migrate.h"
+#ifdef dev_fmt
+#undef dev_fmt
+#endif
+#define dev_fmt(fmt) "kfd_svm: %s: " fmt, __func__
+
#define AMDGPU_SVM_RANGE_RESTORE_DELAY_MS 1
/* Long enough to ensure no retry fault comes after svm range is restored and
@@ -45,7 +50,9 @@ static bool
svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni,
const struct mmu_notifier_range *range,
unsigned long cur_seq);
-
+static int
+svm_range_check_vm(struct kfd_process *p, uint64_t start, uint64_t last,
+ uint64_t *bo_s, uint64_t *bo_l);
static const struct mmu_interval_notifier_ops svm_range_mn_ops = {
.invalidate = svm_range_cpu_invalidate_pagetables,
};
@@ -158,17 +165,17 @@ svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange,
bo_adev->vm_manager.vram_base_offset -
bo_adev->kfd.dev->pgmap.range.start;
addr[i] |= SVM_RANGE_VRAM_DOMAIN;
- pr_debug("vram address detected: 0x%llx\n", addr[i]);
+ pr_debug_ratelimited("vram address: 0x%llx\n", addr[i]);
continue;
}
addr[i] = dma_map_page(dev, page, 0, PAGE_SIZE, dir);
r = dma_mapping_error(dev, addr[i]);
if (r) {
- pr_debug("failed %d dma_map_page\n", r);
+ dev_err(dev, "failed %d dma_map_page\n", r);
return r;
}
- pr_debug("dma mapping 0x%llx for page addr 0x%lx\n",
- addr[i] >> PAGE_SHIFT, page_to_pfn(page));
+ pr_debug_ratelimited("dma mapping 0x%llx for page addr 0x%lx\n",
+ addr[i] >> PAGE_SHIFT, page_to_pfn(page));
}
return 0;
}
@@ -217,7 +224,7 @@ void svm_range_dma_unmap(struct device *dev, dma_addr_t *dma_addr,
for (i = offset; i < offset + npages; i++) {
if (!svm_is_valid_dma_mapping_addr(dev, dma_addr[i]))
continue;
- pr_debug("dma unmapping 0x%llx\n", dma_addr[i] >> PAGE_SHIFT);
+ pr_debug_ratelimited("unmap 0x%llx\n", dma_addr[i] >> PAGE_SHIFT);
dma_unmap_page(dev, dma_addr[i], PAGE_SIZE, dir);
dma_addr[i] = 0;
}
@@ -1307,7 +1314,7 @@ struct svm_validate_context {
struct svm_range *prange;
bool intr;
unsigned long bitmap[MAX_GPU_INSTANCE];
- struct ttm_validate_buffer tv[MAX_GPU_INSTANCE+1];
+ struct ttm_validate_buffer tv[MAX_GPU_INSTANCE];
struct list_head validate_list;
struct ww_acquire_ctx ticket;
};
@@ -1334,11 +1341,6 @@ static int svm_range_reserve_bos(struct svm_validate_context *ctx)
ctx->tv[gpuidx].num_shared = 4;
list_add(&ctx->tv[gpuidx].head, &ctx->validate_list);
}
- if (ctx->prange->svm_bo && ctx->prange->ttm_res) {
- ctx->tv[MAX_GPU_INSTANCE].bo = &ctx->prange->svm_bo->bo->tbo;
- ctx->tv[MAX_GPU_INSTANCE].num_shared = 1;
- list_add(&ctx->tv[MAX_GPU_INSTANCE].head, &ctx->validate_list);
- }
r = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->validate_list,
ctx->intr, NULL);
@@ -1459,7 +1461,7 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
/* This should never happen. actual_loc gets set by
* svm_migrate_ram_to_vram after allocating a BO.
*/
- WARN(1, "VRAM BO missing during validation\n");
+ WARN_ONCE(1, "VRAM BO missing during validation\n");
return -EINVAL;
}
@@ -1552,7 +1554,7 @@ unreserve_out:
* Context: Returns with mmap write lock held, pending deferred work flushed
*
*/
-static void
+void
svm_range_list_lock_and_flush_work(struct svm_range_list *svms,
struct mm_struct *mm)
{
@@ -2308,6 +2310,7 @@ svm_range_best_restore_location(struct svm_range *prange,
return -1;
}
+
static int
svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr,
unsigned long *start, unsigned long *last)
@@ -2355,8 +2358,59 @@ svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr,
vma->vm_end >> PAGE_SHIFT, *last);
return 0;
+}
+static int
+svm_range_check_vm_userptr(struct kfd_process *p, uint64_t start, uint64_t last,
+ uint64_t *bo_s, uint64_t *bo_l)
+{
+ struct amdgpu_bo_va_mapping *mapping;
+ struct interval_tree_node *node;
+ struct amdgpu_bo *bo = NULL;
+ unsigned long userptr;
+ uint32_t i;
+ int r;
+
+ for (i = 0; i < p->n_pdds; i++) {
+ struct amdgpu_vm *vm;
+
+ if (!p->pdds[i]->drm_priv)
+ continue;
+
+ vm = drm_priv_to_vm(p->pdds[i]->drm_priv);
+ r = amdgpu_bo_reserve(vm->root.bo, false);
+ if (r)
+ return r;
+
+ /* Check userptr by searching entire vm->va interval tree */
+ node = interval_tree_iter_first(&vm->va, 0, ~0ULL);
+ while (node) {
+ mapping = container_of((struct rb_node *)node,
+ struct amdgpu_bo_va_mapping, rb);
+ bo = mapping->bo_va->base.bo;
+
+ if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm,
+ start << PAGE_SHIFT,
+ last << PAGE_SHIFT,
+ &userptr)) {
+ node = interval_tree_iter_next(node, 0, ~0ULL);
+ continue;
+ }
+
+ pr_debug("[0x%llx 0x%llx] already userptr mapped\n",
+ start, last);
+ if (bo_s && bo_l) {
+ *bo_s = userptr >> PAGE_SHIFT;
+ *bo_l = *bo_s + bo->tbo.ttm->num_pages - 1;
+ }
+ amdgpu_bo_unreserve(vm->root.bo);
+ return -EADDRINUSE;
+ }
+ amdgpu_bo_unreserve(vm->root.bo);
+ }
+ return 0;
}
+
static struct
svm_range *svm_range_create_unregistered_range(struct amdgpu_device *adev,
struct kfd_process *p,
@@ -2366,10 +2420,26 @@ svm_range *svm_range_create_unregistered_range(struct amdgpu_device *adev,
struct svm_range *prange = NULL;
unsigned long start, last;
uint32_t gpuid, gpuidx;
+ uint64_t bo_s = 0;
+ uint64_t bo_l = 0;
+ int r;
if (svm_range_get_range_boundaries(p, addr, &start, &last))
return NULL;
+ r = svm_range_check_vm(p, start, last, &bo_s, &bo_l);
+ if (r != -EADDRINUSE)
+ r = svm_range_check_vm_userptr(p, start, last, &bo_s, &bo_l);
+
+ if (r == -EADDRINUSE) {
+ if (addr >= bo_s && addr <= bo_l)
+ return NULL;
+
+ /* Create one page svm range if 2MB range overlapping */
+ start = addr;
+ last = addr;
+ }
+
prange = svm_range_new(&p->svms, start, last);
if (!prange) {
pr_debug("Failed to create prange in address [0x%llx]\n", addr);
@@ -2668,8 +2738,67 @@ int svm_range_list_init(struct kfd_process *p)
}
/**
+ * svm_range_check_vm - check if virtual address range mapped already
+ * @p: current kfd_process
+ * @start: range start address, in pages
+ * @last: range last address, in pages
+ * @bo_s: mapping start address in pages if address range already mapped
+ * @bo_l: mapping last address in pages if address range already mapped
+ *
+ * The purpose is to avoid virtual address ranges already allocated by
+ * kfd_ioctl_alloc_memory_of_gpu ioctl.
+ * It looks for each pdd in the kfd_process.
+ *
+ * Context: Process context
+ *
+ * Return 0 - OK, if the range is not mapped.
+ * Otherwise error code:
+ * -EADDRINUSE - if address is mapped already by kfd_ioctl_alloc_memory_of_gpu
+ * -ERESTARTSYS - A wait for the buffer to become unreserved was interrupted by
+ * a signal. Release all buffer reservations and return to user-space.
+ */
+static int
+svm_range_check_vm(struct kfd_process *p, uint64_t start, uint64_t last,
+ uint64_t *bo_s, uint64_t *bo_l)
+{
+ struct amdgpu_bo_va_mapping *mapping;
+ struct interval_tree_node *node;
+ uint32_t i;
+ int r;
+
+ for (i = 0; i < p->n_pdds; i++) {
+ struct amdgpu_vm *vm;
+
+ if (!p->pdds[i]->drm_priv)
+ continue;
+
+ vm = drm_priv_to_vm(p->pdds[i]->drm_priv);
+ r = amdgpu_bo_reserve(vm->root.bo, false);
+ if (r)
+ return r;
+
+ node = interval_tree_iter_first(&vm->va, start, last);
+ if (node) {
+ pr_debug("range [0x%llx 0x%llx] already TTM mapped\n",
+ start, last);
+ mapping = container_of((struct rb_node *)node,
+ struct amdgpu_bo_va_mapping, rb);
+ if (bo_s && bo_l) {
+ *bo_s = mapping->start;
+ *bo_l = mapping->last;
+ }
+ amdgpu_bo_unreserve(vm->root.bo);
+ return -EADDRINUSE;
+ }
+ amdgpu_bo_unreserve(vm->root.bo);
+ }
+
+ return 0;
+}
+
+/**
* svm_range_is_valid - check if virtual address range is valid
- * @mm: current process mm_struct
+ * @p: current kfd_process
* @start: range start address, in pages
* @size: range size, in pages
*
@@ -2678,28 +2807,28 @@ int svm_range_list_init(struct kfd_process *p)
* Context: Process context
*
* Return:
- * true - valid svm range
- * false - invalid svm range
+ * 0 - OK, otherwise error code
*/
-static bool
-svm_range_is_valid(struct mm_struct *mm, uint64_t start, uint64_t size)
+static int
+svm_range_is_valid(struct kfd_process *p, uint64_t start, uint64_t size)
{
const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP;
struct vm_area_struct *vma;
unsigned long end;
+ unsigned long start_unchg = start;
start <<= PAGE_SHIFT;
end = start + (size << PAGE_SHIFT);
-
do {
- vma = find_vma(mm, start);
+ vma = find_vma(p->mm, start);
if (!vma || start < vma->vm_start ||
(vma->vm_flags & device_vma))
- return false;
+ return -EFAULT;
start = min(end, vma->vm_end);
} while (start < end);
- return true;
+ return svm_range_check_vm(p, start_unchg, (end - 1) >> PAGE_SHIFT, NULL,
+ NULL);
}
/**
@@ -3002,9 +3131,9 @@ svm_range_set_attr(struct kfd_process *p, uint64_t start, uint64_t size,
svm_range_list_lock_and_flush_work(svms, mm);
- if (!svm_range_is_valid(mm, start, size)) {
- pr_debug("invalid range\n");
- r = -EFAULT;
+ r = svm_range_is_valid(p, start, size);
+ if (r) {
+ pr_debug("invalid range r=%d\n", r);
mmap_write_unlock(mm);
goto out;
}
@@ -3106,6 +3235,7 @@ svm_range_get_attr(struct kfd_process *p, uint64_t start, uint64_t size,
uint32_t flags_or = 0;
int gpuidx;
uint32_t i;
+ int r = 0;
pr_debug("svms 0x%p [0x%llx 0x%llx] nattr 0x%x\n", &p->svms, start,
start + size - 1, nattr);
@@ -3119,12 +3249,12 @@ svm_range_get_attr(struct kfd_process *p, uint64_t start, uint64_t size,
flush_work(&p->svms.deferred_list_work);
mmap_read_lock(mm);
- if (!svm_range_is_valid(mm, start, size)) {
- pr_debug("invalid range\n");
- mmap_read_unlock(mm);
- return -EINVAL;
- }
+ r = svm_range_is_valid(p, start, size);
mmap_read_unlock(mm);
+ if (r) {
+ pr_debug("invalid range r=%d\n", r);
+ return r;
+ }
for (i = 0; i < nattr; i++) {
switch (attrs[i].type) {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
index c6ec55354c7b..6dc91c33e80f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h
@@ -188,6 +188,7 @@ void svm_range_prefault(struct svm_range *prange, struct mm_struct *mm,
void *owner);
struct kfd_process_device *
svm_range_get_pdd_by_adev(struct svm_range *prange, struct amdgpu_device *adev);
+void svm_range_list_lock_and_flush_work(struct svm_range_list *svms, struct mm_struct *mm);
/* SVM API and HMM page migration work together, device memory type
* is initialized to not 0 when page migration register device memory.
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 98cca5f2b27f..dd593ad0614a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -1296,6 +1296,24 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
proximity_domain = atomic_inc_return(&topology_crat_proximity_domain);
+ adev = (struct amdgpu_device *)(gpu->kgd);
+
+ /* Include the CPU in xGMI hive if xGMI connected by assigning it the hive ID. */
+ if (gpu->hive_id && adev->gmc.xgmi.connected_to_cpu) {
+ struct kfd_topology_device *top_dev;
+
+ down_read(&topology_lock);
+
+ list_for_each_entry(top_dev, &topology_device_list, list) {
+ if (top_dev->gpu)
+ break;
+
+ top_dev->node_props.hive_id = gpu->hive_id;
+ }
+
+ up_read(&topology_lock);
+ }
+
/* Check to see if this gpu device exists in the topology_device_list.
* If so, assign the gpu to that device,
* else create a Virtual CRAT for this gpu device and then parse that
@@ -1457,7 +1475,6 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
dev->node_props.max_waves_per_simd = 10;
}
- adev = (struct amdgpu_device *)(dev->gpu->kgd);
/* kfd only concerns sram ecc on GFX and HBM ecc on UMC */
dev->node_props.capability |=
((adev->ras_enabled & BIT(AMDGPU_RAS_BLOCK__GFX)) != 0) ?
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 1ea31dcc7a8b..43e983e42c0f 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -29,6 +29,7 @@
#include "dm_services_types.h"
#include "dc.h"
#include "dc_link_dp.h"
+#include "link_enc_cfg.h"
#include "dc/inc/core_types.h"
#include "dal_asic_id.h"
#include "dmub/dmub_srv.h"
@@ -215,6 +216,8 @@ static void handle_cursor_update(struct drm_plane *plane,
static const struct drm_format_info *
amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
+static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
+
static bool
is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
struct drm_crtc_state *new_crtc_state);
@@ -618,6 +621,121 @@ static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
}
#endif
+/**
+ * dmub_aux_setconfig_reply_callback - Callback for AUX or SET_CONFIG command.
+ * @adev: amdgpu_device pointer
+ * @notify: dmub notification structure
+ *
+ * Dmub AUX or SET_CONFIG command completion processing callback
+ * Copies dmub notification to DM which is to be read by AUX command.
+ * issuing thread and also signals the event to wake up the thread.
+ */
+void dmub_aux_setconfig_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
+{
+ if (adev->dm.dmub_notify)
+ memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
+ if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
+ complete(&adev->dm.dmub_aux_transfer_done);
+}
+
+/**
+ * dmub_hpd_callback - DMUB HPD interrupt processing callback.
+ * @adev: amdgpu_device pointer
+ * @notify: dmub notification structure
+ *
+ * Dmub Hpd interrupt processing callback. Gets displayindex through the
+ * ink index and calls helper to do the processing.
+ */
+void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
+{
+ struct amdgpu_dm_connector *aconnector;
+ struct amdgpu_dm_connector *hpd_aconnector = NULL;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
+ struct dc_link *link;
+ uint8_t link_index = 0;
+ struct drm_device *dev = adev->dm.ddev;
+
+ if (adev == NULL)
+ return;
+
+ if (notify == NULL) {
+ DRM_ERROR("DMUB HPD callback notification was NULL");
+ return;
+ }
+
+ if (notify->link_index > adev->dm.dc->link_count) {
+ DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
+ return;
+ }
+
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+
+ link_index = notify->link_index;
+
+ link = adev->dm.dc->links[link_index];
+
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
+ aconnector = to_amdgpu_dm_connector(connector);
+ if (link && aconnector->dc_link == link) {
+ DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
+ hpd_aconnector = aconnector;
+ break;
+ }
+ }
+ drm_connector_list_iter_end(&iter);
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+
+ if (hpd_aconnector)
+ handle_hpd_irq_helper(hpd_aconnector);
+}
+
+/**
+ * register_dmub_notify_callback - Sets callback for DMUB notify
+ * @adev: amdgpu_device pointer
+ * @type: Type of dmub notification
+ * @callback: Dmub interrupt callback function
+ * @dmub_int_thread_offload: offload indicator
+ *
+ * API to register a dmub callback handler for a dmub notification
+ * Also sets indicator whether callback processing to be offloaded.
+ * to dmub interrupt handling thread
+ * Return: true if successfully registered, false if there is existing registration
+ */
+bool register_dmub_notify_callback(struct amdgpu_device *adev, enum dmub_notification_type type,
+dmub_notify_interrupt_callback_t callback, bool dmub_int_thread_offload)
+{
+ if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
+ adev->dm.dmub_callback[type] = callback;
+ adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
+ } else
+ return false;
+
+ return true;
+}
+
+static void dm_handle_hpd_work(struct work_struct *work)
+{
+ struct dmub_hpd_work *dmub_hpd_wrk;
+
+ dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
+
+ if (!dmub_hpd_wrk->dmub_notify) {
+ DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
+ return;
+ }
+
+ if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
+ dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
+ dmub_hpd_wrk->dmub_notify);
+ }
+
+ kfree(dmub_hpd_wrk->dmub_notify);
+ kfree(dmub_hpd_wrk);
+
+}
+
#define DMUB_TRACE_MAX_READ 64
/**
* dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
@@ -634,22 +752,47 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params)
struct amdgpu_display_manager *dm = &adev->dm;
struct dmcub_trace_buf_entry entry = { 0 };
uint32_t count = 0;
+ struct dmub_hpd_work *dmub_hpd_wrk;
+ struct dc_link *plink = NULL;
- if (dc_enable_dmub_notifications(adev->dm.dc)) {
- if (irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
- do {
- dc_stat_get_dmub_notification(adev->dm.dc, &notify);
- } while (notify.pending_notification);
-
- if (adev->dm.dmub_notify)
- memcpy(adev->dm.dmub_notify, &notify, sizeof(struct dmub_notification));
- if (notify.type == DMUB_NOTIFICATION_AUX_REPLY)
- complete(&adev->dm.dmub_aux_transfer_done);
- // TODO : HPD Implementation
+ if (dc_enable_dmub_notifications(adev->dm.dc) &&
+ irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
- } else {
- DRM_ERROR("DM: Failed to receive correct outbox IRQ !");
- }
+ do {
+ dc_stat_get_dmub_notification(adev->dm.dc, &notify);
+ if (notify.type > ARRAY_SIZE(dm->dmub_thread_offload)) {
+ DRM_ERROR("DM: notify type %d invalid!", notify.type);
+ continue;
+ }
+ if (dm->dmub_thread_offload[notify.type] == true) {
+ dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
+ if (!dmub_hpd_wrk) {
+ DRM_ERROR("Failed to allocate dmub_hpd_wrk");
+ return;
+ }
+ dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC);
+ if (!dmub_hpd_wrk->dmub_notify) {
+ kfree(dmub_hpd_wrk);
+ DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify");
+ return;
+ }
+ INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
+ if (dmub_hpd_wrk->dmub_notify)
+ memcpy(dmub_hpd_wrk->dmub_notify, &notify, sizeof(struct dmub_notification));
+ dmub_hpd_wrk->adev = adev;
+ if (notify.type == DMUB_NOTIFICATION_HPD) {
+ plink = adev->dm.dc->links[notify.link_index];
+ if (plink) {
+ plink->hpd_status =
+ notify.hpd_status ==
+ DP_HPD_PLUG ? true : false;
+ }
+ }
+ queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
+ } else {
+ dm->dmub_callback[notify.type](adev, &notify);
+ }
+ } while (notify.pending_notification);
}
@@ -667,7 +810,8 @@ static void dm_dmub_outbox1_low_irq(void *interrupt_params)
} while (count <= DMUB_TRACE_MAX_READ);
- ASSERT(count <= DMUB_TRACE_MAX_READ);
+ if (count > DMUB_TRACE_MAX_READ)
+ DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ");
}
#endif
@@ -873,6 +1017,7 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
const unsigned char *fw_inst_const, *fw_bss_data;
uint32_t i, fw_inst_const_size, fw_bss_data_size;
bool has_hw_support;
+ struct dc *dc = adev->dm.dc;
if (!dmub_srv)
/* DMUB isn't supported on the ASIC. */
@@ -959,6 +1104,19 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
for (i = 0; i < fb_info->num_fb; ++i)
hw_params.fb[i] = &fb_info->fb[i];
+ switch (adev->asic_type) {
+ case CHIP_YELLOW_CARP:
+ if (dc->ctx->asic_id.hw_internal_rev != YELLOW_CARP_A0) {
+ hw_params.dpia_supported = true;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ hw_params.disable_dpia = dc->debug.dpia_debug.bits.disable_dpia;
+#endif
+ }
+ break;
+ default:
+ break;
+ }
+
status = dmub_srv_hw_init(dmub_srv, &hw_params);
if (status != DMUB_STATUS_OK) {
DRM_ERROR("Error initializing DMUB HW: %d\n", status);
@@ -1083,6 +1241,114 @@ static void vblank_control_worker(struct work_struct *work)
}
#endif
+
+static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
+{
+ struct hpd_rx_irq_offload_work *offload_work;
+ struct amdgpu_dm_connector *aconnector;
+ struct dc_link *dc_link;
+ struct amdgpu_device *adev;
+ enum dc_connection_type new_connection_type = dc_connection_none;
+ unsigned long flags;
+
+ offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
+ aconnector = offload_work->offload_wq->aconnector;
+
+ if (!aconnector) {
+ DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
+ goto skip;
+ }
+
+ adev = drm_to_adev(aconnector->base.dev);
+ dc_link = aconnector->dc_link;
+
+ mutex_lock(&aconnector->hpd_lock);
+ if (!dc_link_detect_sink(dc_link, &new_connection_type))
+ DRM_ERROR("KMS: Failed to detect connector\n");
+ mutex_unlock(&aconnector->hpd_lock);
+
+ if (new_connection_type == dc_connection_none)
+ goto skip;
+
+ if (amdgpu_in_reset(adev))
+ goto skip;
+
+ mutex_lock(&adev->dm.dc_lock);
+ if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
+ dc_link_dp_handle_automated_test(dc_link);
+ else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
+ hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
+ dc_link_dp_allow_hpd_rx_irq(dc_link)) {
+ dc_link_dp_handle_link_loss(dc_link);
+ spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
+ offload_work->offload_wq->is_handling_link_loss = false;
+ spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
+ }
+ mutex_unlock(&adev->dm.dc_lock);
+
+skip:
+ kfree(offload_work);
+
+}
+
+static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
+{
+ int max_caps = dc->caps.max_links;
+ int i = 0;
+ struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
+
+ hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
+
+ if (!hpd_rx_offload_wq)
+ return NULL;
+
+
+ for (i = 0; i < max_caps; i++) {
+ hpd_rx_offload_wq[i].wq =
+ create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
+
+ if (hpd_rx_offload_wq[i].wq == NULL) {
+ DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
+ return NULL;
+ }
+
+ spin_lock_init(&hpd_rx_offload_wq[i].offload_lock);
+ }
+
+ return hpd_rx_offload_wq;
+}
+
+struct amdgpu_stutter_quirk {
+ u16 chip_vendor;
+ u16 chip_device;
+ u16 subsys_vendor;
+ u16 subsys_device;
+ u8 revision;
+};
+
+static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
+ { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
+ { 0, 0, 0, 0, 0 },
+};
+
+static bool dm_should_disable_stutter(struct pci_dev *pdev)
+{
+ const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
+
+ while (p && p->chip_device != 0) {
+ if (pdev->vendor == p->chip_vendor &&
+ pdev->device == p->chip_device &&
+ pdev->subsystem_vendor == p->subsys_vendor &&
+ pdev->subsystem_device == p->subsys_device &&
+ pdev->revision == p->revision) {
+ return true;
+ }
+ ++p;
+ }
+ return false;
+}
+
static int amdgpu_dm_init(struct amdgpu_device *adev)
{
struct dc_init_data init_data;
@@ -1138,17 +1404,27 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
switch (adev->asic_type) {
case CHIP_CARRIZO:
case CHIP_STONEY:
- case CHIP_RAVEN:
- case CHIP_RENOIR:
- init_data.flags.gpu_vm_support = true;
- if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
- init_data.flags.disable_dmcu = true;
- break;
- case CHIP_VANGOGH:
- case CHIP_YELLOW_CARP:
init_data.flags.gpu_vm_support = true;
break;
default:
+ switch (adev->ip_versions[DCE_HWIP][0]) {
+ case IP_VERSION(2, 1, 0):
+ init_data.flags.gpu_vm_support = true;
+ init_data.flags.disable_dmcu = true;
+ break;
+ case IP_VERSION(1, 0, 0):
+ case IP_VERSION(1, 0, 1):
+ case IP_VERSION(3, 0, 1):
+ case IP_VERSION(3, 1, 2):
+ case IP_VERSION(3, 1, 3):
+ init_data.flags.gpu_vm_support = true;
+ break;
+ case IP_VERSION(2, 0, 3):
+ init_data.flags.disable_dmcu = true;
+ break;
+ default:
+ break;
+ }
break;
}
@@ -1184,6 +1460,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
+ if (dm_should_disable_stutter(adev->pdev))
+ adev->dm.dc->debug.disable_stutter = true;
if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
adev->dm.dc->debug.disable_stutter = true;
@@ -1202,6 +1480,12 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
dc_hardware_init(adev->dm.dc);
+ adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
+ if (!adev->dm.hpd_rx_offload_wq) {
+ DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
+ goto error;
+ }
+
#if defined(CONFIG_DRM_AMD_DC_DCN)
if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
struct dc_phy_addr_space_config pa_config;
@@ -1233,7 +1517,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
#endif
#ifdef CONFIG_DRM_AMD_DC_HDCP
- if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
+ if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) {
adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
if (!adev->dm.hdcp_workqueue)
@@ -1254,7 +1538,25 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
goto error;
}
+
+ adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
+ if (!adev->dm.delayed_hpd_wq) {
+ DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
+ goto error;
+ }
+
amdgpu_dm_outbox_init(adev);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
+ dmub_aux_setconfig_callback, false)) {
+ DRM_ERROR("amdgpu: fail to register dmub aux callback");
+ goto error;
+ }
+ if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
+ DRM_ERROR("amdgpu: fail to register dmub hpd callback");
+ goto error;
+ }
+#endif
}
if (amdgpu_dm_initialize_drm_device(adev)) {
@@ -1336,6 +1638,8 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
if (dc_enable_dmub_notifications(adev->dm.dc)) {
kfree(adev->dm.dmub_notify);
adev->dm.dmub_notify = NULL;
+ destroy_workqueue(adev->dm.delayed_hpd_wq);
+ adev->dm.delayed_hpd_wq = NULL;
}
if (adev->dm.dmub_bo)
@@ -1343,6 +1647,18 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
&adev->dm.dmub_bo_gpu_addr,
&adev->dm.dmub_bo_cpu_addr);
+ if (adev->dm.hpd_rx_offload_wq) {
+ for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
+ if (adev->dm.hpd_rx_offload_wq[i].wq) {
+ destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
+ adev->dm.hpd_rx_offload_wq[i].wq = NULL;
+ }
+ }
+
+ kfree(adev->dm.hpd_rx_offload_wq);
+ adev->dm.hpd_rx_offload_wq = NULL;
+ }
+
/* DC Destroy TODO: Replace destroy DAL */
if (adev->dm.dc)
dc_destroy(&adev->dm.dc);
@@ -1396,15 +1712,6 @@ static int load_dmcu_fw(struct amdgpu_device *adev)
case CHIP_VEGA10:
case CHIP_VEGA12:
case CHIP_VEGA20:
- case CHIP_NAVI10:
- case CHIP_NAVI14:
- case CHIP_RENOIR:
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
- case CHIP_VANGOGH:
- case CHIP_YELLOW_CARP:
return 0;
case CHIP_NAVI12:
fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
@@ -1418,6 +1725,21 @@ static int load_dmcu_fw(struct amdgpu_device *adev)
return 0;
break;
default:
+ switch (adev->ip_versions[DCE_HWIP][0]) {
+ case IP_VERSION(2, 0, 2):
+ case IP_VERSION(2, 0, 3):
+ case IP_VERSION(2, 0, 0):
+ case IP_VERSION(2, 1, 0):
+ case IP_VERSION(3, 0, 0):
+ case IP_VERSION(3, 0, 2):
+ case IP_VERSION(3, 0, 3):
+ case IP_VERSION(3, 0, 1):
+ case IP_VERSION(3, 1, 2):
+ case IP_VERSION(3, 1, 3):
+ return 0;
+ default:
+ break;
+ }
DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
return -EINVAL;
}
@@ -1496,35 +1818,37 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
enum dmub_status status;
int r;
- switch (adev->asic_type) {
- case CHIP_RENOIR:
+ switch (adev->ip_versions[DCE_HWIP][0]) {
+ case IP_VERSION(2, 1, 0):
dmub_asic = DMUB_ASIC_DCN21;
fw_name_dmub = FIRMWARE_RENOIR_DMUB;
if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
break;
- case CHIP_SIENNA_CICHLID:
- dmub_asic = DMUB_ASIC_DCN30;
- fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
- break;
- case CHIP_NAVY_FLOUNDER:
- dmub_asic = DMUB_ASIC_DCN30;
- fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
+ case IP_VERSION(3, 0, 0):
+ if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) {
+ dmub_asic = DMUB_ASIC_DCN30;
+ fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
+ } else {
+ dmub_asic = DMUB_ASIC_DCN30;
+ fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
+ }
break;
- case CHIP_VANGOGH:
+ case IP_VERSION(3, 0, 1):
dmub_asic = DMUB_ASIC_DCN301;
fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
break;
- case CHIP_DIMGREY_CAVEFISH:
+ case IP_VERSION(3, 0, 2):
dmub_asic = DMUB_ASIC_DCN302;
fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
break;
- case CHIP_BEIGE_GOBY:
+ case IP_VERSION(3, 0, 3):
dmub_asic = DMUB_ASIC_DCN303;
fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
break;
- case CHIP_YELLOW_CARP:
- dmub_asic = DMUB_ASIC_DCN31;
+ case IP_VERSION(3, 1, 2):
+ case IP_VERSION(3, 1, 3):
+ dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31;
fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
break;
@@ -1823,10 +2147,9 @@ static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
* therefore, this function apply to navi10/12/14 but not Renoir
* *
*/
- switch(adev->asic_type) {
- case CHIP_NAVI10:
- case CHIP_NAVI14:
- case CHIP_NAVI12:
+ switch (adev->ip_versions[DCE_HWIP][0]) {
+ case IP_VERSION(2, 0, 2):
+ case IP_VERSION(2, 0, 0):
break;
default:
return 0;
@@ -1980,6 +2303,16 @@ context_alloc_fail:
return res;
}
+static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
+{
+ int i;
+
+ if (dm->hpd_rx_offload_wq) {
+ for (i = 0; i < dm->dc->caps.max_links; i++)
+ flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
+ }
+}
+
static int dm_suspend(void *handle)
{
struct amdgpu_device *adev = handle;
@@ -2001,6 +2334,8 @@ static int dm_suspend(void *handle)
amdgpu_dm_irq_suspend(adev);
+ hpd_rx_irq_work_suspend(dm);
+
return ret;
}
@@ -2011,6 +2346,8 @@ static int dm_suspend(void *handle)
amdgpu_dm_irq_suspend(adev);
+ hpd_rx_irq_work_suspend(dm);
+
dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
return 0;
@@ -2157,7 +2494,7 @@ cleanup:
return;
}
-static void dm_set_dpms_off(struct dc_link *link)
+static void dm_set_dpms_off(struct dc_link *link, struct dm_crtc_state *acrtc_state)
{
struct dc_stream_state *stream_state;
struct amdgpu_dm_connector *aconnector = link->priv;
@@ -2178,6 +2515,7 @@ static void dm_set_dpms_off(struct dc_link *link)
}
stream_update.stream = stream_state;
+ acrtc_state->force_dpms_off = true;
dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
stream_state, &stream_update,
stream_state->ctx->dc->current_state);
@@ -2615,20 +2953,22 @@ void amdgpu_dm_update_connector_after_detect(
dc_sink_release(sink);
}
-static void handle_hpd_irq(void *param)
+static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
{
- struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
struct drm_connector *connector = &aconnector->base;
struct drm_device *dev = connector->dev;
enum dc_connection_type new_connection_type = dc_connection_none;
struct amdgpu_device *adev = drm_to_adev(dev);
-#ifdef CONFIG_DRM_AMD_DC_HDCP
struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
-#endif
+ struct dm_crtc_state *dm_crtc_state = NULL;
if (adev->dm.disable_hpd_irq)
return;
+ if (dm_con_state->base.state && dm_con_state->base.crtc)
+ dm_crtc_state = to_dm_crtc_state(drm_atomic_get_crtc_state(
+ dm_con_state->base.state,
+ dm_con_state->base.crtc));
/*
* In case of failure or MST no need to update connector status or notify the OS
* since (for MST case) MST does this in its own context.
@@ -2650,7 +2990,6 @@ static void handle_hpd_irq(void *param)
if (aconnector->base.force && new_connection_type == dc_connection_none) {
emulated_link_detect(aconnector->dc_link);
-
drm_modeset_lock_all(dev);
dm_restore_drm_connector_state(dev, connector);
drm_modeset_unlock_all(dev);
@@ -2660,8 +2999,9 @@ static void handle_hpd_irq(void *param)
} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
if (new_connection_type == dc_connection_none &&
- aconnector->dc_link->type == dc_connection_none)
- dm_set_dpms_off(aconnector->dc_link);
+ aconnector->dc_link->type == dc_connection_none &&
+ dm_crtc_state)
+ dm_set_dpms_off(aconnector->dc_link, dm_crtc_state);
amdgpu_dm_update_connector_after_detect(aconnector);
@@ -2676,7 +3016,15 @@ static void handle_hpd_irq(void *param)
}
-static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
+static void handle_hpd_irq(void *param)
+{
+ struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
+
+ handle_hpd_irq_helper(aconnector);
+
+}
+
+static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
{
uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
uint8_t dret;
@@ -2754,6 +3102,25 @@ static void dm_handle_hpd_rx_irq(struct amdgpu_dm_connector *aconnector)
DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
}
+static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
+ union hpd_irq_data hpd_irq_data)
+{
+ struct hpd_rx_irq_offload_work *offload_work =
+ kzalloc(sizeof(*offload_work), GFP_KERNEL);
+
+ if (!offload_work) {
+ DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
+ return;
+ }
+
+ INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
+ offload_work->data = hpd_irq_data;
+ offload_work->offload_wq = offload_wq;
+
+ queue_work(offload_wq->wq, &offload_work->work);
+ DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
+}
+
static void handle_hpd_rx_irq(void *param)
{
struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
@@ -2765,14 +3132,16 @@ static void handle_hpd_rx_irq(void *param)
enum dc_connection_type new_connection_type = dc_connection_none;
struct amdgpu_device *adev = drm_to_adev(dev);
union hpd_irq_data hpd_irq_data;
- bool lock_flag = 0;
+ bool link_loss = false;
+ bool has_left_work = false;
+ int idx = aconnector->base.index;
+ struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
if (adev->dm.disable_hpd_irq)
return;
-
/*
* TODO:Temporary add mutex to protect hpd interrupt not have a gpio
* conflict, after implement i2c helper, this mutex should be
@@ -2780,43 +3149,41 @@ static void handle_hpd_rx_irq(void *param)
*/
mutex_lock(&aconnector->hpd_lock);
- read_hpd_rx_irq_data(dc_link, &hpd_irq_data);
+ result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
+ &link_loss, true, &has_left_work);
- if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
- (dc_link->type == dc_connection_mst_branch)) {
- if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
- result = true;
- dm_handle_hpd_rx_irq(aconnector);
- goto out;
- } else if (hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
- result = false;
- dm_handle_hpd_rx_irq(aconnector);
+ if (!has_left_work)
+ goto out;
+
+ if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
+ schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
+ goto out;
+ }
+
+ if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
+ if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
+ hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
+ dm_handle_mst_sideband_msg(aconnector);
goto out;
}
- }
- /*
- * TODO: We need the lock to avoid touching DC state while it's being
- * modified during automated compliance testing, or when link loss
- * happens. While this should be split into subhandlers and proper
- * interfaces to avoid having to conditionally lock like this in the
- * outer layer, we need this workaround temporarily to allow MST
- * lightup in some scenarios to avoid timeout.
- */
- if (!amdgpu_in_reset(adev) &&
- (hpd_rx_irq_check_link_loss_status(dc_link, &hpd_irq_data) ||
- hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST)) {
- mutex_lock(&adev->dm.dc_lock);
- lock_flag = 1;
- }
+ if (link_loss) {
+ bool skip = false;
-#ifdef CONFIG_DRM_AMD_DC_HDCP
- result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL);
-#else
- result = dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL);
-#endif
- if (!amdgpu_in_reset(adev) && lock_flag)
- mutex_unlock(&adev->dm.dc_lock);
+ spin_lock(&offload_wq->offload_lock);
+ skip = offload_wq->is_handling_link_loss;
+
+ if (!skip)
+ offload_wq->is_handling_link_loss = true;
+
+ spin_unlock(&offload_wq->offload_lock);
+
+ if (!skip)
+ schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
+
+ goto out;
+ }
+ }
out:
if (result && !is_mst_root_connector) {
@@ -2901,6 +3268,10 @@ static void register_hpd_handlers(struct amdgpu_device *adev)
amdgpu_dm_irq_register_interrupt(adev, &int_params,
handle_hpd_rx_irq,
(void *) aconnector);
+
+ if (adev->dm.hpd_rx_offload_wq)
+ adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
+ aconnector;
}
}
}
@@ -2998,7 +3369,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
int i;
unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
- if (adev->asic_type >= CHIP_VEGA10)
+ if (adev->family >= AMDGPU_FAMILY_AI)
client_id = SOC15_IH_CLIENTID_DCE;
int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
@@ -3715,6 +4086,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
int32_t primary_planes;
enum dc_connection_type new_connection_type = dc_connection_none;
const struct dc_plane_cap *plane;
+ bool psr_feature_enabled = false;
dm->display_indexes_num = dm->dc->caps.max_streams;
/* Update the actual used number of crtc */
@@ -3783,18 +4155,32 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
#if defined(CONFIG_DRM_AMD_DC_DCN)
/* Use Outbox interrupt */
- switch (adev->asic_type) {
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_YELLOW_CARP:
- case CHIP_RENOIR:
+ switch (adev->ip_versions[DCE_HWIP][0]) {
+ case IP_VERSION(3, 0, 0):
+ case IP_VERSION(3, 1, 2):
+ case IP_VERSION(3, 1, 3):
+ case IP_VERSION(2, 1, 0):
if (register_outbox_irq_handlers(dm->adev)) {
DRM_ERROR("DM: Failed to initialize IRQ\n");
goto fail;
}
break;
default:
- DRM_DEBUG_KMS("Unsupported ASIC type for outbox: 0x%X\n", adev->asic_type);
+ DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n",
+ adev->ip_versions[DCE_HWIP][0]);
+ }
+
+ /* Determine whether to enable PSR support by default. */
+ if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) {
+ switch (adev->ip_versions[DCE_HWIP][0]) {
+ case IP_VERSION(3, 1, 2):
+ case IP_VERSION(3, 1, 3):
+ psr_feature_enabled = true;
+ break;
+ default:
+ psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK;
+ break;
+ }
}
#endif
@@ -3839,7 +4225,8 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
amdgpu_dm_update_connector_after_detect(aconnector);
register_backlight_device(dm, link);
- if (amdgpu_dc_feature_mask & DC_PSR_MASK)
+
+ if (psr_feature_enabled)
amdgpu_dm_set_psr_caps(link);
}
@@ -3880,27 +4267,33 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
goto fail;
}
break;
+ default:
#if defined(CONFIG_DRM_AMD_DC_DCN)
- case CHIP_RAVEN:
- case CHIP_NAVI12:
- case CHIP_NAVI10:
- case CHIP_NAVI14:
- case CHIP_RENOIR:
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
- case CHIP_VANGOGH:
- case CHIP_YELLOW_CARP:
- if (dcn10_register_irq_handlers(dm->adev)) {
- DRM_ERROR("DM: Failed to initialize IRQ\n");
+ switch (adev->ip_versions[DCE_HWIP][0]) {
+ case IP_VERSION(1, 0, 0):
+ case IP_VERSION(1, 0, 1):
+ case IP_VERSION(2, 0, 2):
+ case IP_VERSION(2, 0, 3):
+ case IP_VERSION(2, 0, 0):
+ case IP_VERSION(2, 1, 0):
+ case IP_VERSION(3, 0, 0):
+ case IP_VERSION(3, 0, 2):
+ case IP_VERSION(3, 0, 3):
+ case IP_VERSION(3, 0, 1):
+ case IP_VERSION(3, 1, 2):
+ case IP_VERSION(3, 1, 3):
+ if (dcn10_register_irq_handlers(dm->adev)) {
+ DRM_ERROR("DM: Failed to initialize IRQ\n");
+ goto fail;
+ }
+ break;
+ default:
+ DRM_ERROR("Unsupported DCE IP versions: 0x%X\n",
+ adev->ip_versions[DCE_HWIP][0]);
goto fail;
}
- break;
#endif
- default:
- DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
- goto fail;
+ break;
}
return 0;
@@ -4047,42 +4440,44 @@ static int dm_early_init(void *handle)
adev->mode_info.num_hpd = 6;
adev->mode_info.num_dig = 6;
break;
+ default:
#if defined(CONFIG_DRM_AMD_DC_DCN)
- case CHIP_RAVEN:
- case CHIP_RENOIR:
- case CHIP_VANGOGH:
- adev->mode_info.num_crtc = 4;
- adev->mode_info.num_hpd = 4;
- adev->mode_info.num_dig = 4;
- break;
- case CHIP_NAVI10:
- case CHIP_NAVI12:
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- adev->mode_info.num_crtc = 6;
- adev->mode_info.num_hpd = 6;
- adev->mode_info.num_dig = 6;
- break;
- case CHIP_YELLOW_CARP:
- adev->mode_info.num_crtc = 4;
- adev->mode_info.num_hpd = 4;
- adev->mode_info.num_dig = 4;
- break;
- case CHIP_NAVI14:
- case CHIP_DIMGREY_CAVEFISH:
- adev->mode_info.num_crtc = 5;
- adev->mode_info.num_hpd = 5;
- adev->mode_info.num_dig = 5;
- break;
- case CHIP_BEIGE_GOBY:
- adev->mode_info.num_crtc = 2;
- adev->mode_info.num_hpd = 2;
- adev->mode_info.num_dig = 2;
- break;
+ switch (adev->ip_versions[DCE_HWIP][0]) {
+ case IP_VERSION(2, 0, 2):
+ case IP_VERSION(3, 0, 0):
+ adev->mode_info.num_crtc = 6;
+ adev->mode_info.num_hpd = 6;
+ adev->mode_info.num_dig = 6;
+ break;
+ case IP_VERSION(2, 0, 0):
+ case IP_VERSION(3, 0, 2):
+ adev->mode_info.num_crtc = 5;
+ adev->mode_info.num_hpd = 5;
+ adev->mode_info.num_dig = 5;
+ break;
+ case IP_VERSION(2, 0, 3):
+ case IP_VERSION(3, 0, 3):
+ adev->mode_info.num_crtc = 2;
+ adev->mode_info.num_hpd = 2;
+ adev->mode_info.num_dig = 2;
+ break;
+ case IP_VERSION(1, 0, 0):
+ case IP_VERSION(1, 0, 1):
+ case IP_VERSION(3, 0, 1):
+ case IP_VERSION(2, 1, 0):
+ case IP_VERSION(3, 1, 2):
+ case IP_VERSION(3, 1, 3):
+ adev->mode_info.num_crtc = 4;
+ adev->mode_info.num_hpd = 4;
+ adev->mode_info.num_dig = 4;
+ break;
+ default:
+ DRM_ERROR("Unsupported DCE IP versions: 0x%x\n",
+ adev->ip_versions[DCE_HWIP][0]);
+ return -EINVAL;
+ }
#endif
- default:
- DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
- return -EINVAL;
+ break;
}
amdgpu_dm_set_irq_funcs(adev);
@@ -4301,12 +4696,7 @@ fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
tiling_info->gfx9.num_rb_per_se =
adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
tiling_info->gfx9.shaderEnable = 1;
- if (adev->asic_type == CHIP_SIENNA_CICHLID ||
- adev->asic_type == CHIP_NAVY_FLOUNDER ||
- adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
- adev->asic_type == CHIP_BEIGE_GOBY ||
- adev->asic_type == CHIP_YELLOW_CARP ||
- adev->asic_type == CHIP_VANGOGH)
+ if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
}
@@ -4672,6 +5062,16 @@ add_gfx10_3_modifiers(const struct amdgpu_device *adev,
AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
AMD_FMT_MOD_SET(PACKERS, pkrs) |
AMD_FMT_MOD_SET(DCC, 1) |
+ AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
+ AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
+ AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
+
+ add_modifier(mods, size, capacity, AMD_FMT_MOD |
+ AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
+ AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
+ AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
+ AMD_FMT_MOD_SET(PACKERS, pkrs) |
+ AMD_FMT_MOD_SET(DCC, 1) |
AMD_FMT_MOD_SET(DCC_RETILE, 1) |
AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
@@ -4682,6 +5082,17 @@ add_gfx10_3_modifiers(const struct amdgpu_device *adev,
AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
+ AMD_FMT_MOD_SET(PACKERS, pkrs) |
+ AMD_FMT_MOD_SET(DCC, 1) |
+ AMD_FMT_MOD_SET(DCC_RETILE, 1) |
+ AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
+ AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
+ AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B));
+
+ add_modifier(mods, size, capacity, AMD_FMT_MOD |
+ AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
+ AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
+ AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
AMD_FMT_MOD_SET(PACKERS, pkrs));
add_modifier(mods, size, capacity, AMD_FMT_MOD |
@@ -4726,7 +5137,7 @@ get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, u
case AMDGPU_FAMILY_NV:
case AMDGPU_FAMILY_VGH:
case AMDGPU_FAMILY_YC:
- if (adev->asic_type >= CHIP_SIENNA_CICHLID)
+ if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0))
add_gfx10_3_modifiers(adev, mods, &size, &capacity);
else
add_gfx10_1_modifiers(adev, mods, &size, &capacity);
@@ -4763,10 +5174,27 @@ fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
if (modifier_has_dcc(modifier) && !force_disable_dcc) {
uint64_t dcc_address = afb->address + afb->base.offsets[1];
+ bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
+ bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier);
dcc->enable = 1;
dcc->meta_pitch = afb->base.pitches[1];
- dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
+ dcc->independent_64b_blks = independent_64b_blks;
+ if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) {
+ if (independent_64b_blks && independent_128b_blks)
+ dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl;
+ else if (independent_128b_blks)
+ dcc->dcc_ind_blk = hubp_ind_block_128b;
+ else if (independent_64b_blks && !independent_128b_blks)
+ dcc->dcc_ind_blk = hubp_ind_block_64b;
+ else
+ dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
+ } else {
+ if (independent_64b_blks)
+ dcc->dcc_ind_blk = hubp_ind_block_64b;
+ else
+ dcc->dcc_ind_blk = hubp_ind_block_unconstrained;
+ }
address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
@@ -5602,9 +6030,15 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
{
struct drm_connector *drm_connector = &aconnector->base;
uint32_t link_bandwidth_kbps;
+ uint32_t max_dsc_target_bpp_limit_override = 0;
link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
dc_link_get_link_cap(aconnector->dc_link));
+
+ if (stream->link && stream->link->local_sink)
+ max_dsc_target_bpp_limit_override =
+ stream->link->local_sink->edid_caps.panel_patch.max_dsc_target_bpp_limit;
+
/* Set DSC policy according to dsc_clock_en */
dc_dsc_policy_set_enable_dsc_when_not_needed(
aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
@@ -5614,7 +6048,7 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
dsc_caps,
aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
- 0,
+ max_dsc_target_bpp_limit_override,
link_bandwidth_kbps,
&stream->timing,
&stream->timing.dsc_cfg)) {
@@ -5654,7 +6088,7 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
* - Cinema HFR (48 FPS)
* - TV/PAL (50 FPS)
* - Commonly used (60 FPS)
- * - Multiples of 24 (48,72,96 FPS)
+ * - Multiples of 24 (48,72,96,120 FPS)
*
* The list of standards video format is not huge and can be added to the
* connector modeset list beforehand. With that, userspace can leverage
@@ -5965,6 +6399,7 @@ dm_crtc_duplicate_state(struct drm_crtc *crtc)
state->freesync_config = cur->freesync_config;
state->cm_has_degamma = cur->cm_has_degamma;
state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
+ state->force_dpms_off = cur->force_dpms_off;
/* TODO Duplicate dc_stream after objects are stream object is flattened */
return &state->base;
@@ -7615,19 +8050,19 @@ static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
/* Standard FPS values
*
- * 23.976 - TV/NTSC
- * 24 - Cinema
- * 25 - TV/PAL
- * 29.97 - TV/NTSC
- * 30 - TV/NTSC
- * 48 - Cinema HFR
- * 50 - TV/PAL
- * 60 - Commonly used
- * 48,72,96 - Multiples of 24
+ * 23.976 - TV/NTSC
+ * 24 - Cinema
+ * 25 - TV/PAL
+ * 29.97 - TV/NTSC
+ * 30 - TV/NTSC
+ * 48 - Cinema HFR
+ * 50 - TV/PAL
+ * 60 - Commonly used
+ * 48,72,96,120 - Multiples of 24
*/
static const uint32_t common_rates[] = {
23976, 24000, 25000, 29970, 30000,
- 48000, 50000, 60000, 72000, 96000
+ 48000, 50000, 60000, 72000, 96000, 120000
};
/*
@@ -7755,7 +8190,17 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
break;
case DRM_MODE_CONNECTOR_DisplayPort:
aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
- aconnector->base.ycbcr_420_allowed =
+ if (link->is_dig_mapping_flexible &&
+ link->dc->res_pool->funcs->link_encs_assign) {
+ link->link_enc =
+ link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link);
+ if (!link->link_enc)
+ link->link_enc =
+ link_enc_cfg_get_next_avail_link_enc(link->ctx->dc);
+ }
+
+ if (link->link_enc)
+ aconnector->base.ycbcr_420_allowed =
link->link_enc->features.dp_ycbcr420_supported ? true : false;
break;
case DRM_MODE_CONNECTOR_DVID:
@@ -7870,7 +8315,8 @@ create_i2c(struct ddc_service *ddc_service,
snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
i2c_set_adapdata(&i2c->base, i2c);
i2c->ddc_service = ddc_service;
- i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
+ if (i2c->ddc_service->ddc_pin)
+ i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
return i2c;
}
@@ -8681,7 +9127,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
* and rely on sending it from software.
*/
if (acrtc_attach->base.state->event &&
- acrtc_state->active_planes > 0) {
+ acrtc_state->active_planes > 0 &&
+ !acrtc_state->force_dpms_off) {
drm_crtc_vblank_get(pcrtc);
spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
@@ -10158,18 +10605,18 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state,
struct drm_crtc *crtc,
struct drm_crtc_state *new_crtc_state)
{
- struct drm_plane_state *new_cursor_state, *new_primary_state;
- int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
+ struct drm_plane *cursor = crtc->cursor, *underlying;
+ struct drm_plane_state *new_cursor_state, *new_underlying_state;
+ int i;
+ int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h;
/* On DCE and DCN there is no dedicated hardware cursor plane. We get a
* cursor per pipe but it's going to inherit the scaling and
* positioning from the underlying pipe. Check the cursor plane's
- * blending properties match the primary plane's. */
+ * blending properties match the underlying planes'. */
- new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
- new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
- if (!new_cursor_state || !new_primary_state ||
- !new_cursor_state->fb || !new_primary_state->fb) {
+ new_cursor_state = drm_atomic_get_new_plane_state(state, cursor);
+ if (!new_cursor_state || !new_cursor_state->fb) {
return 0;
}
@@ -10178,15 +10625,34 @@ static int dm_check_crtc_cursor(struct drm_atomic_state *state,
cursor_scale_h = new_cursor_state->crtc_h * 1000 /
(new_cursor_state->src_h >> 16);
- primary_scale_w = new_primary_state->crtc_w * 1000 /
- (new_primary_state->src_w >> 16);
- primary_scale_h = new_primary_state->crtc_h * 1000 /
- (new_primary_state->src_h >> 16);
+ for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) {
+ /* Narrow down to non-cursor planes on the same CRTC as the cursor */
+ if (new_underlying_state->crtc != crtc || underlying == crtc->cursor)
+ continue;
+
+ /* Ignore disabled planes */
+ if (!new_underlying_state->fb)
+ continue;
- if (cursor_scale_w != primary_scale_w ||
- cursor_scale_h != primary_scale_h) {
- drm_dbg_atomic(crtc->dev, "Cursor plane scaling doesn't match primary plane\n");
- return -EINVAL;
+ underlying_scale_w = new_underlying_state->crtc_w * 1000 /
+ (new_underlying_state->src_w >> 16);
+ underlying_scale_h = new_underlying_state->crtc_h * 1000 /
+ (new_underlying_state->src_h >> 16);
+
+ if (cursor_scale_w != underlying_scale_w ||
+ cursor_scale_h != underlying_scale_h) {
+ drm_dbg_atomic(crtc->dev,
+ "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n",
+ cursor->base.id, cursor->name, underlying->base.id, underlying->name);
+ return -EINVAL;
+ }
+
+ /* If this plane covers the whole CRTC, no need to check planes underneath */
+ if (new_underlying_state->crtc_x <= 0 &&
+ new_underlying_state->crtc_y <= 0 &&
+ new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay &&
+ new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay)
+ break;
}
return 0;
@@ -10217,53 +10683,6 @@ static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm
}
#endif
-static int validate_overlay(struct drm_atomic_state *state)
-{
- int i;
- struct drm_plane *plane;
- struct drm_plane_state *new_plane_state;
- struct drm_plane_state *primary_state, *overlay_state = NULL;
-
- /* Check if primary plane is contained inside overlay */
- for_each_new_plane_in_state_reverse(state, plane, new_plane_state, i) {
- if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
- if (drm_atomic_plane_disabling(plane->state, new_plane_state))
- return 0;
-
- overlay_state = new_plane_state;
- continue;
- }
- }
-
- /* check if we're making changes to the overlay plane */
- if (!overlay_state)
- return 0;
-
- /* check if overlay plane is enabled */
- if (!overlay_state->crtc)
- return 0;
-
- /* find the primary plane for the CRTC that the overlay is enabled on */
- primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary);
- if (IS_ERR(primary_state))
- return PTR_ERR(primary_state);
-
- /* check if primary plane is enabled */
- if (!primary_state->crtc)
- return 0;
-
- /* Perform the bounds check to ensure the overlay plane covers the primary */
- if (primary_state->crtc_x < overlay_state->crtc_x ||
- primary_state->crtc_y < overlay_state->crtc_y ||
- primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w ||
- primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) {
- DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n");
- return -EINVAL;
- }
-
- return 0;
-}
-
/**
* amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
* @dev: The DRM device
@@ -10306,6 +10725,8 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
struct dm_crtc_state *dm_old_crtc_state;
#if defined(CONFIG_DRM_AMD_DC_DCN)
struct dsc_mst_fairness_vars vars[MAX_PIPES];
+ struct drm_dp_mst_topology_state *mst_state;
+ struct drm_dp_mst_topology_mgr *mgr;
#endif
trace_amdgpu_dm_atomic_check_begin(state);
@@ -10445,10 +10866,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
goto fail;
}
- ret = validate_overlay(state);
- if (ret)
- goto fail;
-
/* Add new/modified planes */
for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
ret = dm_update_plane_state(dc, state, plane,
@@ -10514,6 +10931,33 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
lock_and_validation_needed = true;
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ /* set the slot info for each mst_state based on the link encoding format */
+ for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
+ struct amdgpu_dm_connector *aconnector;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter iter;
+ u8 link_coding_cap;
+
+ if (!mgr->mst_state )
+ continue;
+
+ drm_connector_list_iter_begin(dev, &iter);
+ drm_for_each_connector_iter(connector, &iter) {
+ int id = connector->index;
+
+ if (id == mst_state->mgr->conn_base_id) {
+ aconnector = to_amdgpu_dm_connector(connector);
+ link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
+ drm_dp_mst_update_slots(mst_state, link_coding_cap);
+
+ break;
+ }
+ }
+ drm_connector_list_iter_end(&iter);
+
+ }
+#endif
/**
* Streams and planes are reset when there are changes that affect
* bandwidth. Anything that affects bandwidth needs to go through
@@ -10821,6 +11265,7 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
struct amdgpu_dm_connector *amdgpu_dm_connector =
to_amdgpu_dm_connector(connector);
struct dm_connector_state *dm_con_state = NULL;
+ struct dc_sink *sink;
struct drm_device *dev = connector->dev;
struct amdgpu_device *adev = drm_to_adev(dev);
@@ -10832,28 +11277,31 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
goto update;
}
- if (!edid) {
+ sink = amdgpu_dm_connector->dc_sink ?
+ amdgpu_dm_connector->dc_sink :
+ amdgpu_dm_connector->dc_em_sink;
+
+ if (!edid || !sink) {
dm_con_state = to_dm_connector_state(connector->state);
amdgpu_dm_connector->min_vfreq = 0;
amdgpu_dm_connector->max_vfreq = 0;
amdgpu_dm_connector->pixel_clock_mhz = 0;
+ connector->display_info.monitor_range.min_vfreq = 0;
+ connector->display_info.monitor_range.max_vfreq = 0;
+ freesync_capable = false;
goto update;
}
dm_con_state = to_dm_connector_state(connector->state);
- if (!amdgpu_dm_connector->dc_sink) {
- DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
- goto update;
- }
if (!adev->dm.freesync_module)
goto update;
- if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
- || amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
+ if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
+ || sink->sink_signal == SIGNAL_TYPE_EDP) {
bool edid_check_required = false;
if (edid) {
@@ -10900,7 +11348,7 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
freesync_capable = true;
}
}
- } else if (edid && amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
+ } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
if (i >= 0 && vsdb_info.freesync_supported) {
timing = &edid->detailed_timings[i];
@@ -10982,29 +11430,75 @@ uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
return value;
}
-int amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context *ctx, unsigned int linkIndex,
- struct aux_payload *payload, enum aux_return_code_type *operation_result)
+int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux, struct dc_context *ctx,
+ uint8_t status_type, uint32_t *operation_result)
+{
+ struct amdgpu_device *adev = ctx->driver_context;
+ int return_status = -1;
+ struct dmub_notification *p_notify = adev->dm.dmub_notify;
+
+ if (is_cmd_aux) {
+ if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
+ return_status = p_notify->aux_reply.length;
+ *operation_result = p_notify->result;
+ } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT) {
+ *operation_result = AUX_RET_ERROR_TIMEOUT;
+ } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) {
+ *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE;
+ } else {
+ *operation_result = AUX_RET_ERROR_UNKNOWN;
+ }
+ } else {
+ if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS) {
+ return_status = 0;
+ *operation_result = p_notify->sc_status;
+ } else {
+ *operation_result = SET_CONFIG_UNKNOWN_ERROR;
+ }
+ }
+
+ return return_status;
+}
+
+int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context *ctx,
+ unsigned int link_index, void *cmd_payload, void *operation_result)
{
struct amdgpu_device *adev = ctx->driver_context;
int ret = 0;
- dc_process_dmub_aux_transfer_async(ctx->dc, linkIndex, payload);
- ret = wait_for_completion_interruptible_timeout(&adev->dm.dmub_aux_transfer_done, 10*HZ);
+ if (is_cmd_aux) {
+ dc_process_dmub_aux_transfer_async(ctx->dc,
+ link_index, (struct aux_payload *)cmd_payload);
+ } else if (dc_process_dmub_set_config_async(ctx->dc, link_index,
+ (struct set_config_cmd_payload *)cmd_payload,
+ adev->dm.dmub_notify)) {
+ return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
+ ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
+ (uint32_t *)operation_result);
+ }
+
+ ret = wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ);
if (ret == 0) {
- *operation_result = AUX_RET_ERROR_TIMEOUT;
- return -1;
+ DRM_ERROR("wait_for_completion_timeout timeout!");
+ return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
+ ctx, DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT,
+ (uint32_t *)operation_result);
}
- *operation_result = (enum aux_return_code_type)adev->dm.dmub_notify->result;
- if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
- (*payload->reply) = adev->dm.dmub_notify->aux_reply.command;
+ if (is_cmd_aux) {
+ if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
+ struct aux_payload *payload = (struct aux_payload *)cmd_payload;
- // For read case, Copy data to payload
- if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
- (*payload->reply == AUX_TRANSACTION_REPLY_AUX_ACK))
- memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
- adev->dm.dmub_notify->aux_reply.length);
+ payload->reply[0] = adev->dm.dmub_notify->aux_reply.command;
+ if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
+ payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) {
+ memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
+ adev->dm.dmub_notify->aux_reply.length);
+ }
+ }
}
- return adev->dm.dmub_notify->aux_reply.length;
+ return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux,
+ ctx, DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS,
+ (uint32_t *)operation_result);
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index d1d353a7c77d..37e61a88d49e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -47,6 +47,15 @@
#define AMDGPU_DM_MAX_CRTC 6
#define AMDGPU_DM_MAX_NUM_EDP 2
+
+#define AMDGPU_DMUB_NOTIFICATION_MAX 5
+
+/**
+ * DMUB Async to Sync Mechanism Status
+ **/
+#define DMUB_ASYNC_TO_SYNC_ACCESS_FAIL 1
+#define DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT 2
+#define DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS 3
/*
#include "include/amdgpu_dal_power_if.h"
#include "amdgpu_dm_irq.h"
@@ -86,6 +95,21 @@ struct dm_compressor_info {
uint64_t gpu_addr;
};
+typedef void (*dmub_notify_interrupt_callback_t)(struct amdgpu_device *adev, struct dmub_notification *notify);
+
+/**
+ * struct dmub_hpd_work - Handle time consuming work in low priority outbox IRQ
+ *
+ * @handle_hpd_work: Work to be executed in a separate thread to handle hpd_low_irq
+ * @dmub_notify: notification for callback function
+ * @adev: amdgpu_device pointer
+ */
+struct dmub_hpd_work {
+ struct work_struct handle_hpd_work;
+ struct dmub_notification *dmub_notify;
+ struct amdgpu_device *adev;
+};
+
/**
* struct vblank_control_work - Work data for vblank control
* @work: Kernel work data for the work event
@@ -155,6 +179,48 @@ struct dal_allocation {
};
/**
+ * struct hpd_rx_irq_offload_work_queue - Work queue to handle hpd_rx_irq
+ * offload work
+ */
+struct hpd_rx_irq_offload_work_queue {
+ /**
+ * @wq: workqueue structure to queue offload work.
+ */
+ struct workqueue_struct *wq;
+ /**
+ * @offload_lock: To protect fields of offload work queue.
+ */
+ spinlock_t offload_lock;
+ /**
+ * @is_handling_link_loss: Used to prevent inserting link loss event when
+ * we're handling link loss
+ */
+ bool is_handling_link_loss;
+ /**
+ * @aconnector: The aconnector that this work queue is attached to
+ */
+ struct amdgpu_dm_connector *aconnector;
+};
+
+/**
+ * struct hpd_rx_irq_offload_work - hpd_rx_irq offload work structure
+ */
+struct hpd_rx_irq_offload_work {
+ /**
+ * @work: offload work
+ */
+ struct work_struct work;
+ /**
+ * @data: reference irq data which is used while handling offload work
+ */
+ union hpd_irq_data data;
+ /**
+ * @offload_wq: offload work queue that this work is queued to
+ */
+ struct hpd_rx_irq_offload_work_queue *offload_wq;
+};
+
+/**
* struct amdgpu_display_manager - Central amdgpu display manager device
*
* @dc: Display Core control structure
@@ -190,9 +256,31 @@ struct amdgpu_display_manager {
*/
struct dmub_srv *dmub_srv;
+ /**
+ * @dmub_notify:
+ *
+ * Notification from DMUB.
+ */
+
struct dmub_notification *dmub_notify;
/**
+ * @dmub_callback:
+ *
+ * Callback functions to handle notification from DMUB.
+ */
+
+ dmub_notify_interrupt_callback_t dmub_callback[AMDGPU_DMUB_NOTIFICATION_MAX];
+
+ /**
+ * @dmub_thread_offload:
+ *
+ * Flag to indicate if callback is offload.
+ */
+
+ bool dmub_thread_offload[AMDGPU_DMUB_NOTIFICATION_MAX];
+
+ /**
* @dmub_fb_info:
*
* Framebuffer regions for the DMUB.
@@ -422,7 +510,12 @@ struct amdgpu_display_manager {
*/
struct crc_rd_work *crc_rd_wrk;
#endif
-
+ /**
+ * @hpd_rx_offload_wq:
+ *
+ * Work queue to offload works of hpd_rx_irq
+ */
+ struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq;
/**
* @mst_encoders:
*
@@ -439,6 +532,7 @@ struct amdgpu_display_manager {
*/
struct list_head da_list;
struct completion dmub_aux_transfer_done;
+ struct workqueue_struct *delayed_hpd_wq;
/**
* @brightness:
@@ -542,6 +636,8 @@ struct dm_crtc_state {
bool dsc_force_changed;
bool vrr_supported;
+
+ bool force_dpms_off;
struct mod_freesync_config freesync_config;
struct dc_info_packet vrr_infopacket;
@@ -632,6 +728,7 @@ void amdgpu_dm_update_connector_after_detect(
extern const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs;
-int amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context *ctx, unsigned int linkIndex,
- struct aux_payload *payload, enum aux_return_code_type *operation_result);
+int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux,
+ struct dc_context *ctx, unsigned int link_index,
+ void *payload, void *operation_result);
#endif /* __AMDGPU_DM_H__ */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index 87daa78a32b8..3655663e079b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -247,6 +247,7 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
{
struct amdgpu_dm_connector *connector = file_inode(f)->i_private;
struct dc_link *link = connector->dc_link;
+ struct dc *dc = (struct dc *)link->dc;
struct dc_link_settings prefer_link_settings;
char *wr_buf = NULL;
const uint32_t wr_buf_size = 40;
@@ -263,7 +264,7 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
if (!wr_buf)
return -ENOSPC;
- if (parse_write_buffer_into_params(wr_buf, size,
+ if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
(long *)param, buf,
max_param_num,
&param_nums)) {
@@ -293,6 +294,9 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
case LINK_RATE_RBR2:
case LINK_RATE_HIGH2:
case LINK_RATE_HIGH3:
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ case LINK_RATE_UHBR10:
+#endif
break;
default:
valid_input = false;
@@ -313,7 +317,7 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf,
prefer_link_settings.lane_count = param[0];
prefer_link_settings.link_rate = param[1];
- dp_retrain_link_dp_test(link, &prefer_link_settings, false);
+ dc_link_set_preferred_training_settings(dc, &prefer_link_settings, NULL, link, true);
kfree(wr_buf);
return size;
@@ -378,9 +382,9 @@ static ssize_t dp_phy_settings_read(struct file *f, char __user *buf,
return -EINVAL;
snprintf(rd_buf, rd_buf_size, " %d %d %d\n",
- link->cur_lane_setting.VOLTAGE_SWING,
- link->cur_lane_setting.PRE_EMPHASIS,
- link->cur_lane_setting.POST_CURSOR2);
+ link->cur_lane_setting[0].VOLTAGE_SWING,
+ link->cur_lane_setting[0].PRE_EMPHASIS,
+ link->cur_lane_setting[0].POST_CURSOR2);
while (size) {
if (*pos >= rd_buf_size)
@@ -487,7 +491,7 @@ static ssize_t dp_phy_settings_write(struct file *f, const char __user *buf,
if (!wr_buf)
return -ENOSPC;
- if (parse_write_buffer_into_params(wr_buf, size,
+ if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
(long *)param, buf,
max_param_num,
&param_nums)) {
@@ -639,7 +643,7 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us
if (!wr_buf)
return -ENOSPC;
- if (parse_write_buffer_into_params(wr_buf, size,
+ if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
(long *)param, buf,
max_param_num,
&param_nums)) {
@@ -732,7 +736,7 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us
}
for (i = 0; i < (unsigned int)(link_training_settings.link_settings.lane_count); i++)
- link_training_settings.lane_settings[i] = link->cur_lane_setting;
+ link_training_settings.lane_settings[i] = link->cur_lane_setting[i];
dc_link_set_test_pattern(
link,
@@ -914,7 +918,7 @@ static ssize_t dp_dsc_passthrough_set(struct file *f, const char __user *buf,
return -ENOSPC;
}
- if (parse_write_buffer_into_params(wr_buf, size,
+ if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
&param, buf,
max_param_num,
&param_nums)) {
@@ -1211,7 +1215,7 @@ static ssize_t trigger_hotplug(struct file *f, const char __user *buf,
return -ENOSPC;
}
- if (parse_write_buffer_into_params(wr_buf, size,
+ if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
(long *)param, buf,
max_param_num,
&param_nums)) {
@@ -1396,7 +1400,7 @@ static ssize_t dp_dsc_clock_en_write(struct file *f, const char __user *buf,
return -ENOSPC;
}
- if (parse_write_buffer_into_params(wr_buf, size,
+ if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
(long *)param, buf,
max_param_num,
&param_nums)) {
@@ -1581,7 +1585,7 @@ static ssize_t dp_dsc_slice_width_write(struct file *f, const char __user *buf,
return -ENOSPC;
}
- if (parse_write_buffer_into_params(wr_buf, size,
+ if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
(long *)param, buf,
max_param_num,
&param_nums)) {
@@ -1766,7 +1770,7 @@ static ssize_t dp_dsc_slice_height_write(struct file *f, const char __user *buf,
return -ENOSPC;
}
- if (parse_write_buffer_into_params(wr_buf, size,
+ if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
(long *)param, buf,
max_param_num,
&param_nums)) {
@@ -1944,7 +1948,7 @@ static ssize_t dp_dsc_bits_per_pixel_write(struct file *f, const char __user *bu
return -ENOSPC;
}
- if (parse_write_buffer_into_params(wr_buf, size,
+ if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
(long *)param, buf,
max_param_num,
&param_nums)) {
@@ -2382,7 +2386,7 @@ static ssize_t dp_max_bpc_write(struct file *f, const char __user *buf,
return -ENOSPC;
}
- if (parse_write_buffer_into_params(wr_buf, size,
+ if (parse_write_buffer_into_params(wr_buf, wr_buf_size,
(long *)param, buf,
max_param_num,
&param_nums)) {
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
index c5f1dc3b5961..5bfdc66b5867 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
@@ -448,6 +448,8 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
struct mod_hdcp_display *display = &hdcp_work[link_index].display;
struct mod_hdcp_link *link = &hdcp_work[link_index].link;
struct drm_connector_state *conn_state;
+ struct dc_sink *sink = NULL;
+ bool link_is_hdcp14 = false;
if (config->dpms_off) {
hdcp_remove_display(hdcp_work, link_index, aconnector);
@@ -460,8 +462,13 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
display->index = aconnector->base.index;
display->state = MOD_HDCP_DISPLAY_ACTIVE;
- if (aconnector->dc_sink != NULL)
- link->mode = mod_hdcp_signal_type_to_operation_mode(aconnector->dc_sink->sink_signal);
+ if (aconnector->dc_sink)
+ sink = aconnector->dc_sink;
+ else if (aconnector->dc_em_sink)
+ sink = aconnector->dc_em_sink;
+
+ if (sink != NULL)
+ link->mode = mod_hdcp_signal_type_to_operation_mode(sink->sink_signal);
display->controller = CONTROLLER_ID_D0 + config->otg_inst;
display->dig_fe = config->dig_fe;
@@ -470,8 +477,9 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
display->stream_enc_idx = config->stream_enc_idx;
link->link_enc_idx = config->link_enc_idx;
link->phy_idx = config->phy_idx;
- link->hdcp_supported_informational = dc_link_is_hdcp14(aconnector->dc_link,
- aconnector->dc_sink->sink_signal) ? 1 : 0;
+ if (sink)
+ link_is_hdcp14 = dc_link_is_hdcp14(aconnector->dc_link, sink->sink_signal);
+ link->hdcp_supported_informational = link_is_hdcp14;
link->dp.rev = aconnector->dc_link->dpcd_caps.dpcd_rev.raw;
link->dp.assr_enabled = config->assr_enabled;
link->dp.mst_enabled = config->mst_enabled;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index 6fee12c91ef5..8cbeeb7c986d 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -40,6 +40,39 @@
#include "dm_helpers.h"
+struct monitor_patch_info {
+ unsigned int manufacturer_id;
+ unsigned int product_id;
+ void (*patch_func)(struct dc_edid_caps *edid_caps, unsigned int param);
+ unsigned int patch_param;
+};
+static void set_max_dsc_bpp_limit(struct dc_edid_caps *edid_caps, unsigned int param);
+
+static const struct monitor_patch_info monitor_patch_table[] = {
+{0x6D1E, 0x5BBF, set_max_dsc_bpp_limit, 15},
+{0x6D1E, 0x5B9A, set_max_dsc_bpp_limit, 15},
+};
+
+static void set_max_dsc_bpp_limit(struct dc_edid_caps *edid_caps, unsigned int param)
+{
+ if (edid_caps)
+ edid_caps->panel_patch.max_dsc_target_bpp_limit = param;
+}
+
+static int amdgpu_dm_patch_edid_caps(struct dc_edid_caps *edid_caps)
+{
+ int i, ret = 0;
+
+ for (i = 0; i < ARRAY_SIZE(monitor_patch_table); i++)
+ if ((edid_caps->manufacturer_id == monitor_patch_table[i].manufacturer_id)
+ && (edid_caps->product_id == monitor_patch_table[i].product_id)) {
+ monitor_patch_table[i].patch_func(edid_caps, monitor_patch_table[i].patch_param);
+ ret++;
+ }
+
+ return ret;
+}
+
/* dm_helpers_parse_edid_caps
*
* Parse edid caps
@@ -125,6 +158,8 @@ enum dc_edid_status dm_helpers_parse_edid_caps(
kfree(sads);
kfree(sadb);
+ amdgpu_dm_patch_edid_caps(edid_caps);
+
return result;
}
@@ -184,6 +219,7 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
struct drm_dp_mst_topology_mgr *mst_mgr;
struct drm_dp_mst_port *mst_port;
bool ret;
+ u8 link_coding_cap = DP_8b_10b_ENCODING;
aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
/* Accessing the connector state is required for vcpi_slots allocation
@@ -203,6 +239,10 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
mst_port = aconnector->port;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link);
+#endif
+
if (enable) {
ret = drm_dp_mst_allocate_vcpi(mst_mgr, mst_port,
@@ -216,7 +256,7 @@ bool dm_helpers_dp_mst_write_payload_allocation_table(
}
/* It's OK for this to fail */
- drm_dp_update_payload_part1(mst_mgr);
+ drm_dp_update_payload_part1(mst_mgr, (link_coding_cap == DP_CAP_ANSI_128B132B) ? 0:1);
/* mst_mgr->->payloads are VC payload notify MST branch using DPCD or
* AUX message. The sequence is slot 1-63 allocated sequence for each
@@ -648,8 +688,21 @@ int dm_helper_dmub_aux_transfer_sync(
struct aux_payload *payload,
enum aux_return_code_type *operation_result)
{
- return amdgpu_dm_process_dmub_aux_transfer_sync(ctx, link->link_index, payload, operation_result);
+ return amdgpu_dm_process_dmub_aux_transfer_sync(true, ctx,
+ link->link_index, (void *)payload,
+ (void *)operation_result);
}
+
+int dm_helpers_dmub_set_config_sync(struct dc_context *ctx,
+ const struct dc_link *link,
+ struct set_config_cmd_payload *payload,
+ enum set_config_status *operation_result)
+{
+ return amdgpu_dm_process_dmub_aux_transfer_sync(false, ctx,
+ link->link_index, (void *)payload,
+ (void *)operation_result);
+}
+
void dm_set_dcn_clocks(struct dc_context *ctx, struct dc_clocks *clks)
{
/* TODO: something */
@@ -751,3 +804,17 @@ void dm_helpers_mst_enable_stream_features(const struct dc_stream_state *stream)
&new_downspread.raw,
sizeof(new_downspread));
}
+
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+void dm_set_phyd32clk(struct dc_context *ctx, int freq_khz)
+{
+ // FPGA programming for this clock in diags framework that
+ // needs to go through dm layer, therefore leave dummy interace here
+}
+
+
+void dm_helpers_enable_periodic_detection(struct dc_context *ctx, bool enable)
+{
+ /* TODO: add peridic detection implementation */
+}
+#endif
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 7af0d58c231b..874a49b605c7 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -64,6 +64,8 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
payload.i2c_over_aux = (msg->request & DP_AUX_NATIVE_WRITE) == 0;
payload.write = (msg->request & DP_AUX_I2C_READ) == 0;
payload.mot = (msg->request & DP_AUX_I2C_MOT) != 0;
+ payload.write_status_update =
+ (msg->request & DP_AUX_I2C_WRITE_STATUS_UPDATE) != 0;
payload.defer_delay = 0;
result = dc_link_aux_transfer_raw(TO_DM_AUX(aux)->ddc_service, &payload,
@@ -542,7 +544,7 @@ static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *p
params[i].sink->ctx->dc->res_pool->dscs[0],
&params[i].sink->dsc_caps.dsc_dec_caps,
params[i].sink->ctx->dc->debug.dsc_min_slice_height_override,
- 0,
+ params[i].sink->edid_caps.panel_patch.max_dsc_target_bpp_limit,
0,
params[i].timing,
&params[i].timing->dsc_cfg)) {
@@ -574,7 +576,7 @@ static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn)
param.sink->ctx->dc->res_pool->dscs[0],
&param.sink->dsc_caps.dsc_dec_caps,
param.sink->ctx->dc->debug.dsc_min_slice_height_override,
- 0,
+ param.sink->edid_caps.panel_patch.max_dsc_target_bpp_limit,
(int) kbps, param.timing, &dsc_config);
return dsc_config.bits_per_pixel;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
index 70a554f1e725..c022e56f9459 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
@@ -107,6 +107,8 @@ bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
*/
// Init fail safe of 2 frames static
unsigned int num_frames_static = 2;
+ unsigned int power_opt = 0;
+ bool psr_enable = true;
DRM_DEBUG_DRIVER("Enabling psr...\n");
@@ -133,7 +135,9 @@ bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
&stream, 1,
&params);
- return dc_link_set_psr_allow_active(link, true, false, false);
+ power_opt |= psr_power_opt_z10_static_screen;
+
+ return dc_link_set_psr_allow_active(link, &psr_enable, false, false, &power_opt);
}
/*
@@ -144,10 +148,12 @@ bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
*/
bool amdgpu_dm_psr_disable(struct dc_stream_state *stream)
{
+ unsigned int power_opt = 0;
+ bool psr_enable = false;
DRM_DEBUG_DRIVER("Disabling psr...\n");
- return dc_link_set_psr_allow_active(stream->link, false, true, false);
+ return dc_link_set_psr_allow_active(stream->link, &psr_enable, true, false, &power_opt);
}
/*
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c
index b1bf80da3a55..ab0c6d191038 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c
@@ -52,7 +52,7 @@ static DEFINE_PER_CPU(int, fpu_recursion_depth);
* This function tells if the code is already under FPU protection or not. A
* function that works as an API for a set of FPU operations can use this
* function for checking if the caller invoked it after DC_FP_START(). For
- * example, take a look at dcn2x.c file.
+ * example, take a look at dcn20_fpu.c file.
*/
inline void dc_assert_fp_enabled(void)
{
diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile
index 943fcb164876..b1f0d6260226 100644
--- a/drivers/gpu/drm/amd/display/dc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/Makefile
@@ -30,6 +30,7 @@ DC_LIBS += dcn20
DC_LIBS += dsc
DC_LIBS += dcn10 dml
DC_LIBS += dcn21
+DC_LIBS += dcn201
DC_LIBS += dcn30
DC_LIBS += dcn301
DC_LIBS += dcn302
@@ -58,7 +59,7 @@ include $(AMD_DC)
DISPLAY_CORE = dc.o dc_stat.o dc_link.o dc_resource.o dc_hw_sequencer.o dc_sink.o \
dc_surface.o dc_link_hwss.o dc_link_dp.o dc_link_ddc.o dc_debug.o dc_stream.o \
-dc_link_enc_cfg.o dc_link_dpcd.o
+dc_link_enc_cfg.o dc_link_dpia.o dc_link_dpcd.o
ifdef CONFIG_DRM_AMD_DC_DCN
DISPLAY_CORE += dc_vm_helper.o
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index 6dbde74c1e06..a4bef4364afd 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -99,6 +99,10 @@ static enum bp_result get_firmware_info_v3_2(
struct bios_parser *bp,
struct dc_firmware_info *info);
+static enum bp_result get_firmware_info_v3_4(
+ struct bios_parser *bp,
+ struct dc_firmware_info *info);
+
static struct atom_hpd_int_record *get_hpd_record(struct bios_parser *bp,
struct atom_display_object_path_v2 *object);
@@ -1426,8 +1430,10 @@ static enum bp_result bios_parser_get_firmware_info(
break;
case 2:
case 3:
- case 4:
result = get_firmware_info_v3_2(bp, info);
+ break;
+ case 4:
+ result = get_firmware_info_v3_4(bp, info);
break;
default:
break;
@@ -1575,6 +1581,88 @@ static enum bp_result get_firmware_info_v3_2(
return BP_RESULT_OK;
}
+static enum bp_result get_firmware_info_v3_4(
+ struct bios_parser *bp,
+ struct dc_firmware_info *info)
+{
+ struct atom_firmware_info_v3_4 *firmware_info;
+ struct atom_common_table_header *header;
+ struct atom_data_revision revision;
+ struct atom_display_controller_info_v4_1 *dce_info_v4_1 = NULL;
+ struct atom_display_controller_info_v4_4 *dce_info_v4_4 = NULL;
+ if (!info)
+ return BP_RESULT_BADINPUT;
+
+ firmware_info = GET_IMAGE(struct atom_firmware_info_v3_4,
+ DATA_TABLES(firmwareinfo));
+
+ if (!firmware_info)
+ return BP_RESULT_BADBIOSTABLE;
+
+ memset(info, 0, sizeof(*info));
+
+ header = GET_IMAGE(struct atom_common_table_header,
+ DATA_TABLES(dce_info));
+
+ get_atom_data_table_revision(header, &revision);
+
+ switch (revision.major) {
+ case 4:
+ switch (revision.minor) {
+ case 4:
+ dce_info_v4_4 = GET_IMAGE(struct atom_display_controller_info_v4_4,
+ DATA_TABLES(dce_info));
+
+ if (!dce_info_v4_4)
+ return BP_RESULT_BADBIOSTABLE;
+
+ /* 100MHz expected */
+ info->pll_info.crystal_frequency = dce_info_v4_4->dce_refclk_10khz * 10;
+ info->dp_phy_ref_clk = dce_info_v4_4->dpphy_refclk_10khz * 10;
+ /* 50MHz expected */
+ info->i2c_engine_ref_clk = dce_info_v4_4->i2c_engine_refclk_10khz * 10;
+
+ /* Get SMU Display PLL VCO Frequency in KHz*/
+ info->smu_gpu_pll_output_freq = dce_info_v4_4->dispclk_pll_vco_freq * 10;
+ break;
+
+ default:
+ /* should not come here, keep as backup, as was before */
+ dce_info_v4_1 = GET_IMAGE(struct atom_display_controller_info_v4_1,
+ DATA_TABLES(dce_info));
+
+ if (!dce_info_v4_1)
+ return BP_RESULT_BADBIOSTABLE;
+
+ info->pll_info.crystal_frequency = dce_info_v4_1->dce_refclk_10khz * 10;
+ info->dp_phy_ref_clk = dce_info_v4_1->dpphy_refclk_10khz * 10;
+ info->i2c_engine_ref_clk = dce_info_v4_1->i2c_engine_refclk_10khz * 10;
+ break;
+ }
+ break;
+
+ default:
+ ASSERT(0);
+ break;
+ }
+
+ header = GET_IMAGE(struct atom_common_table_header,
+ DATA_TABLES(smu_info));
+ get_atom_data_table_revision(header, &revision);
+
+ // We need to convert from 10KHz units into KHz units.
+ info->default_memory_clk = firmware_info->bootup_mclk_in10khz * 10;
+
+ if (firmware_info->board_i2c_feature_id == 0x2) {
+ info->oem_i2c_present = true;
+ info->oem_i2c_obj_id = firmware_info->board_i2c_feature_gpio_id;
+ } else {
+ info->oem_i2c_present = false;
+ }
+
+ return BP_RESULT_OK;
+}
+
static enum bp_result bios_parser_get_encoder_cap_info(
struct dc_bios *dcb,
struct graphics_object_id object_id,
@@ -1604,6 +1692,16 @@ static enum bp_result bios_parser_get_encoder_cap_info(
ATOM_ENCODER_CAP_RECORD_HBR3_EN) ? 1 : 0;
info->HDMI_6GB_EN = (record->encodercaps &
ATOM_ENCODER_CAP_RECORD_HDMI6Gbps_EN) ? 1 : 0;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ info->IS_DP2_CAPABLE = (record->encodercaps &
+ ATOM_ENCODER_CAP_RECORD_DP2) ? 1 : 0;
+ info->DP_UHBR10_EN = (record->encodercaps &
+ ATOM_ENCODER_CAP_RECORD_UHBR10_EN) ? 1 : 0;
+ info->DP_UHBR13_5_EN = (record->encodercaps &
+ ATOM_ENCODER_CAP_RECORD_UHBR13_5_EN) ? 1 : 0;
+ info->DP_UHBR20_EN = (record->encodercaps &
+ ATOM_ENCODER_CAP_RECORD_UHBR20_EN) ? 1 : 0;
+#endif
info->DP_IS_USB_C = (record->encodercaps &
ATOM_ENCODER_CAP_RECORD_USB_C_TYPE) ? 1 : 0;
@@ -2223,6 +2321,8 @@ static enum bp_result get_integrated_info_v2_2(
info->ext_disp_conn_info.checksum =
info_v2_2->extdispconninfo.checksum;
+ info->ext_disp_conn_info.fixdpvoltageswing =
+ info_v2_2->extdispconninfo.fixdpvoltageswing;
info->edp1_info.edp_backlight_pwm_hz =
le16_to_cpu(info_v2_2->edp1_info.edp_backlight_pwm_hz);
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
index f1f672a997d7..9afa5eb2e6d3 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
@@ -44,9 +44,7 @@
bp->base.ctx->logger
#define GET_INDEX_INTO_MASTER_TABLE(MasterOrData, FieldName)\
- (((char *)(&((\
- struct atom_master_list_of_##MasterOrData##_functions_v2_1 *)0)\
- ->FieldName)-(char *)0)/sizeof(uint16_t))
+ (offsetof(struct atom_master_list_of_##MasterOrData##_functions_v2_1, FieldName) / sizeof(uint16_t))
#define EXEC_BIOS_CMD_TABLE(fname, params)\
(amdgpu_atom_execute_table(((struct amdgpu_device *)bp->base.ctx->driver_context)->mode_info.atom_context, \
@@ -340,6 +338,13 @@ static enum bp_result transmitter_control_v1_7(
const struct command_table_helper *cmd = bp->cmd_helper;
struct dmub_dig_transmitter_control_data_v1_7 dig_v1_7 = {0};
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ uint8_t hpo_instance = (uint8_t)cntl->hpo_engine_id - ENGINE_ID_HPO_0;
+
+ if (dc_is_dp_signal(cntl->signal))
+ hpo_instance = (uint8_t)cntl->hpo_engine_id - ENGINE_ID_HPO_DP_0;
+#endif
+
dig_v1_7.phyid = cmd->phy_id_to_atom(cntl->transmitter);
dig_v1_7.action = (uint8_t)cntl->action;
@@ -353,6 +358,9 @@ static enum bp_result transmitter_control_v1_7(
dig_v1_7.hpdsel = cmd->hpd_sel_to_atom(cntl->hpd_sel);
dig_v1_7.digfe_sel = cmd->dig_encoder_sel_to_atom(cntl->engine_id);
dig_v1_7.connobj_id = (uint8_t)cntl->connector_obj_id.id;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ dig_v1_7.HPO_instance = hpo_instance;
+#endif
dig_v1_7.symclk_units.symclk_10khz = cntl->pixel_clock/10;
if (cntl->action == TRANSMITTER_CONTROL_ENABLE ||
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
index cb3fd44cb1ed..eedc553f340e 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
@@ -70,6 +70,7 @@ bool dal_bios_parser_init_cmd_tbl_helper2(
case DCN_VERSION_1_01:
case DCN_VERSION_2_0:
case DCN_VERSION_2_1:
+ case DCN_VERSION_2_01:
case DCN_VERSION_3_0:
case DCN_VERSION_3_01:
case DCN_VERSION_3_02:
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
index 0e18df1283b6..6b248cd2a461 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
@@ -459,9 +459,9 @@ static void dcn_bw_calc_rq_dlg_ttu(
struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &pipe->dlg_regs;
struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &pipe->ttu_regs;
struct _vcs_dpi_display_rq_regs_st *rq_regs = &pipe->rq_regs;
- struct _vcs_dpi_display_rq_params_st rq_param = {0};
- struct _vcs_dpi_display_dlg_sys_params_st dlg_sys_param = {0};
- struct _vcs_dpi_display_e2e_pipe_params_st input = { { { 0 } } };
+ struct _vcs_dpi_display_rq_params_st *rq_param = &pipe->dml_rq_param;
+ struct _vcs_dpi_display_dlg_sys_params_st *dlg_sys_param = &pipe->dml_dlg_sys_param;
+ struct _vcs_dpi_display_e2e_pipe_params_st *input = &pipe->dml_input;
float total_active_bw = 0;
float total_prefetch_bw = 0;
int total_flip_bytes = 0;
@@ -470,45 +470,48 @@ static void dcn_bw_calc_rq_dlg_ttu(
memset(dlg_regs, 0, sizeof(*dlg_regs));
memset(ttu_regs, 0, sizeof(*ttu_regs));
memset(rq_regs, 0, sizeof(*rq_regs));
+ memset(rq_param, 0, sizeof(*rq_param));
+ memset(dlg_sys_param, 0, sizeof(*dlg_sys_param));
+ memset(input, 0, sizeof(*input));
for (i = 0; i < number_of_planes; i++) {
total_active_bw += v->read_bandwidth[i];
total_prefetch_bw += v->prefetch_bandwidth[i];
total_flip_bytes += v->total_immediate_flip_bytes[i];
}
- dlg_sys_param.total_flip_bw = v->return_bw - dcn_bw_max2(total_active_bw, total_prefetch_bw);
- if (dlg_sys_param.total_flip_bw < 0.0)
- dlg_sys_param.total_flip_bw = 0;
-
- dlg_sys_param.t_mclk_wm_us = v->dram_clock_change_watermark;
- dlg_sys_param.t_sr_wm_us = v->stutter_enter_plus_exit_watermark;
- dlg_sys_param.t_urg_wm_us = v->urgent_watermark;
- dlg_sys_param.t_extra_us = v->urgent_extra_latency;
- dlg_sys_param.deepsleep_dcfclk_mhz = v->dcf_clk_deep_sleep;
- dlg_sys_param.total_flip_bytes = total_flip_bytes;
-
- pipe_ctx_to_e2e_pipe_params(pipe, &input.pipe);
- input.clks_cfg.dcfclk_mhz = v->dcfclk;
- input.clks_cfg.dispclk_mhz = v->dispclk;
- input.clks_cfg.dppclk_mhz = v->dppclk;
- input.clks_cfg.refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0;
- input.clks_cfg.socclk_mhz = v->socclk;
- input.clks_cfg.voltage = v->voltage_level;
+ dlg_sys_param->total_flip_bw = v->return_bw - dcn_bw_max2(total_active_bw, total_prefetch_bw);
+ if (dlg_sys_param->total_flip_bw < 0.0)
+ dlg_sys_param->total_flip_bw = 0;
+
+ dlg_sys_param->t_mclk_wm_us = v->dram_clock_change_watermark;
+ dlg_sys_param->t_sr_wm_us = v->stutter_enter_plus_exit_watermark;
+ dlg_sys_param->t_urg_wm_us = v->urgent_watermark;
+ dlg_sys_param->t_extra_us = v->urgent_extra_latency;
+ dlg_sys_param->deepsleep_dcfclk_mhz = v->dcf_clk_deep_sleep;
+ dlg_sys_param->total_flip_bytes = total_flip_bytes;
+
+ pipe_ctx_to_e2e_pipe_params(pipe, &input->pipe);
+ input->clks_cfg.dcfclk_mhz = v->dcfclk;
+ input->clks_cfg.dispclk_mhz = v->dispclk;
+ input->clks_cfg.dppclk_mhz = v->dppclk;
+ input->clks_cfg.refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0;
+ input->clks_cfg.socclk_mhz = v->socclk;
+ input->clks_cfg.voltage = v->voltage_level;
// dc->dml.logger = pool->base.logger;
- input.dout.output_format = (v->output_format[in_idx] == dcn_bw_420) ? dm_420 : dm_444;
- input.dout.output_type = (v->output[in_idx] == dcn_bw_hdmi) ? dm_hdmi : dm_dp;
+ input->dout.output_format = (v->output_format[in_idx] == dcn_bw_420) ? dm_420 : dm_444;
+ input->dout.output_type = (v->output[in_idx] == dcn_bw_hdmi) ? dm_hdmi : dm_dp;
//input[in_idx].dout.output_standard;
/*todo: soc->sr_enter_plus_exit_time??*/
- dlg_sys_param.t_srx_delay_us = dc->dcn_ip->dcfclk_cstate_latency / v->dcf_clk_deep_sleep;
+ dlg_sys_param->t_srx_delay_us = dc->dcn_ip->dcfclk_cstate_latency / v->dcf_clk_deep_sleep;
- dml1_rq_dlg_get_rq_params(dml, &rq_param, input.pipe.src);
+ dml1_rq_dlg_get_rq_params(dml, rq_param, &input->pipe.src);
dml1_extract_rq_regs(dml, rq_regs, rq_param);
dml1_rq_dlg_get_dlg_params(
dml,
dlg_regs,
ttu_regs,
- rq_param.dlg,
+ &rq_param->dlg,
dlg_sys_param,
input,
true,
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
index 7fa0b007a7ea..6bd73e49a6d2 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
@@ -94,6 +94,15 @@ AMD_DAL_CLK_MGR_DCN20 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn20/,$(CLK_MGR_DC
AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN20)
###############################################################################
+# DCN201
+###############################################################################
+CLK_MGR_DCN201 = dcn201_clk_mgr.o
+
+AMD_DAL_CLK_MGR_DCN201 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn201/,$(CLK_MGR_DCN201))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN201)
+
+###############################################################################
# DCN21
###############################################################################
CLK_MGR_DCN21 = rn_clk_mgr.o rn_clk_mgr_vbios_smu.o
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
index bb31541f8072..26f96ee32472 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
@@ -39,6 +39,7 @@
#include "dcn10/rv2_clk_mgr.h"
#include "dcn20/dcn20_clk_mgr.h"
#include "dcn21/rn_clk_mgr.h"
+#include "dcn201/dcn201_clk_mgr.h"
#include "dcn30/dcn30_clk_mgr.h"
#include "dcn301/vg_clk_mgr.h"
#include "dcn31/dcn31_clk_mgr.h"
@@ -99,11 +100,13 @@ void clk_mgr_exit_optimized_pwr_state(const struct dc *dc, struct clk_mgr *clk_m
if (edp_num) {
for (panel_inst = 0; panel_inst < edp_num; panel_inst++) {
+ bool allow_active = false;
+
edp_link = edp_links[panel_inst];
if (!edp_link->psr_settings.psr_feature_enabled)
continue;
clk_mgr->psr_allow_active_cache = edp_link->psr_settings.psr_allow_active;
- dc_link_set_psr_allow_active(edp_link, false, false, false);
+ dc_link_set_psr_allow_active(edp_link, &allow_active, false, false, NULL);
}
}
@@ -123,7 +126,7 @@ void clk_mgr_optimize_pwr_state(const struct dc *dc, struct clk_mgr *clk_mgr)
if (!edp_link->psr_settings.psr_feature_enabled)
continue;
dc_link_set_psr_allow_active(edp_link,
- clk_mgr->psr_allow_active_cache, false, false);
+ &clk_mgr->psr_allow_active_cache, false, false, NULL);
}
}
@@ -256,6 +259,10 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
dcn3_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
return &clk_mgr->base;
}
+ if (asic_id.chip_id == DEVICE_ID_NV_13FE) {
+ dcn201_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
+ return &clk_mgr->base;
+ }
dcn20_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
return &clk_mgr->base;
}
@@ -278,13 +285,8 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
BREAK_TO_DEBUGGER();
return NULL;
}
- if (ASICREV_IS_YELLOW_CARP(asic_id.hw_internal_rev)) {
- /* TODO: to add DCN31 clk_mgr support, once CLK IP header files are available,
- * for now use DCN3.0 clk mgr.
- */
- dcn31_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
- return &clk_mgr->base.base;
- }
+
+ dcn31_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
return &clk_mgr->base.base;
}
#endif
@@ -321,7 +323,6 @@ void dc_destroy_clk_mgr(struct clk_mgr *clk_mgr_base)
break;
case FAMILY_YELLOW_CARP:
- if (ASICREV_IS_YELLOW_CARP(clk_mgr_base->ctx->asic_id.hw_internal_rev))
dcn31_clk_mgr_destroy(clk_mgr);
break;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
index 0d01aa9f15a6..2108bff49d4e 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
@@ -38,6 +38,8 @@
#include "clk/clk_11_0_0_offset.h"
#include "clk/clk_11_0_0_sh_mask.h"
+#include "irq/dcn20/irq_service_dcn20.h"
+
#undef FN
#define FN(reg_name, field_name) \
clk_mgr->clk_mgr_shift->field_name, clk_mgr->clk_mgr_mask->field_name
@@ -221,6 +223,8 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
bool force_reset = false;
bool p_state_change_support;
int total_plane_count;
+ int irq_src;
+ uint32_t hpd_state;
if (dc->work_arounds.skip_clock_update)
return;
@@ -238,7 +242,13 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
if (dc->res_pool->pp_smu)
pp_smu = &dc->res_pool->pp_smu->nv_funcs;
- if (display_count == 0)
+ for (irq_src = DC_IRQ_SOURCE_HPD1; irq_src <= DC_IRQ_SOURCE_HPD6; irq_src++) {
+ hpd_state = dc_get_hpd_state_dcn20(dc->res_pool->irqs, irq_src);
+ if (hpd_state)
+ break;
+ }
+
+ if (display_count == 0 && !hpd_state)
enter_display_off = true;
if (enter_display_off == safe_to_lower) {
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c
new file mode 100644
index 000000000000..db9950244c7b
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.c
@@ -0,0 +1,258 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "reg_helper.h"
+#include "core_types.h"
+#include "dccg.h"
+#include "clk_mgr_internal.h"
+#include "dcn201_clk_mgr.h"
+#include "dcn20/dcn20_clk_mgr.h"
+#include "dce100/dce_clk_mgr.h"
+#include "dm_helpers.h"
+#include "dm_services.h"
+
+#include "cyan_skillfish_ip_offset.h"
+#include "dcn/dcn_2_0_3_offset.h"
+#include "dcn/dcn_2_0_3_sh_mask.h"
+#include "clk/clk_11_0_1_offset.h"
+#include "clk/clk_11_0_1_sh_mask.h"
+
+#define REG(reg) \
+ (clk_mgr->regs->reg)
+
+#define BASE_INNER(seg) DMU_BASE__INST0_SEG ## seg
+
+#define BASE(seg) BASE_INNER(seg)
+
+#define SR(reg_name)\
+ .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \
+ mm ## reg_name
+
+#define CLK_BASE_INNER(seg) \
+ CLK_BASE__INST0_SEG ## seg
+
+#undef FN
+#define FN(reg_name, field_name) \
+ clk_mgr->clk_mgr_shift->field_name, clk_mgr->clk_mgr_mask->field_name
+
+#define CTX \
+ clk_mgr->base.ctx
+#define DC_LOGGER \
+ clk_mgr->base.ctx->logger
+
+static const struct clk_mgr_registers clk_mgr_regs = {
+ CLK_COMMON_REG_LIST_DCN_201()
+};
+
+static const struct clk_mgr_shift clk_mgr_shift = {
+ CLK_COMMON_MASK_SH_LIST_DCN201_BASE(__SHIFT)
+};
+
+static const struct clk_mgr_mask clk_mgr_mask = {
+ CLK_COMMON_MASK_SH_LIST_DCN201_BASE(_MASK)
+};
+
+void dcn201_update_clocks_vbios(struct clk_mgr *clk_mgr,
+ struct dc_state *context,
+ bool safe_to_lower)
+{
+ struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
+
+ bool update_dppclk = false;
+ bool update_dispclk = false;
+
+ if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->clks.dppclk_khz)) {
+ clk_mgr->clks.dppclk_khz = new_clocks->dppclk_khz;
+ update_dppclk = true;
+ }
+
+ if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr->clks.dispclk_khz)) {
+ clk_mgr->clks.dispclk_khz = new_clocks->dispclk_khz;
+ update_dispclk = true;
+ }
+
+ if (update_dppclk || update_dispclk) {
+ struct bp_set_dce_clock_parameters dce_clk_params;
+ struct dc_bios *bp = clk_mgr->ctx->dc_bios;
+
+ if (update_dispclk) {
+ memset(&dce_clk_params, 0, sizeof(dce_clk_params));
+ dce_clk_params.target_clock_frequency = new_clocks->dispclk_khz;
+ dce_clk_params.pll_id = CLOCK_SOURCE_ID_DFS;
+ dce_clk_params.clock_type = DCECLOCK_TYPE_DISPLAY_CLOCK;
+ bp->funcs->set_dce_clock(bp, &dce_clk_params);
+ }
+ /* currently there is no DCECLOCK_TYPE_DPPCLK type defined in VBIOS interface.
+ * vbios program DPPCLK to the same DispCLK limitation
+ */
+ }
+}
+
+static void dcn201_init_clocks(struct clk_mgr *clk_mgr)
+{
+ memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks));
+ clk_mgr->clks.p_state_change_support = true;
+ clk_mgr->clks.prev_p_state_change_support = true;
+ clk_mgr->clks.max_supported_dppclk_khz = 1200000;
+ clk_mgr->clks.max_supported_dispclk_khz = 1200000;
+}
+
+static void dcn201_update_clocks(struct clk_mgr *clk_mgr_base,
+ struct dc_state *context,
+ bool safe_to_lower)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
+ struct dc *dc = clk_mgr_base->ctx->dc;
+ int display_count;
+ bool update_dppclk = false;
+ bool update_dispclk = false;
+ bool enter_display_off = false;
+ bool dpp_clock_lowered = false;
+ bool force_reset = false;
+ bool p_state_change_support;
+ int total_plane_count;
+
+ if (dc->work_arounds.skip_clock_update)
+ return;
+
+ if (clk_mgr_base->clks.dispclk_khz == 0 ||
+ dc->debug.force_clock_mode & 0x1) {
+ force_reset = true;
+
+ dcn2_read_clocks_from_hw_dentist(clk_mgr_base);
+ }
+
+ display_count = clk_mgr_helper_get_active_display_cnt(dc, context);
+
+ if (display_count == 0)
+ enter_display_off = true;
+
+ if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, clk_mgr_base->clks.phyclk_khz))
+ clk_mgr_base->clks.phyclk_khz = new_clocks->phyclk_khz;
+
+ if (dc->debug.force_min_dcfclk_mhz > 0)
+ new_clocks->dcfclk_khz = (new_clocks->dcfclk_khz > (dc->debug.force_min_dcfclk_mhz * 1000)) ?
+ new_clocks->dcfclk_khz : (dc->debug.force_min_dcfclk_mhz * 1000);
+
+ if (should_set_clock(safe_to_lower, new_clocks->dcfclk_khz, clk_mgr_base->clks.dcfclk_khz))
+ clk_mgr_base->clks.dcfclk_khz = new_clocks->dcfclk_khz;
+
+ if (should_set_clock(safe_to_lower,
+ new_clocks->dcfclk_deep_sleep_khz, clk_mgr_base->clks.dcfclk_deep_sleep_khz))
+ clk_mgr_base->clks.dcfclk_deep_sleep_khz = new_clocks->dcfclk_deep_sleep_khz;
+
+ if (should_set_clock(safe_to_lower, new_clocks->socclk_khz, clk_mgr_base->clks.socclk_khz))
+ clk_mgr_base->clks.socclk_khz = new_clocks->socclk_khz;
+
+ total_plane_count = clk_mgr_helper_get_active_plane_cnt(dc, context);
+ p_state_change_support = new_clocks->p_state_change_support || (total_plane_count == 0);
+ if (should_update_pstate_support(safe_to_lower, p_state_change_support, clk_mgr_base->clks.p_state_change_support)) {
+ clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support;
+ clk_mgr_base->clks.p_state_change_support = p_state_change_support;
+ }
+
+ if (should_set_clock(safe_to_lower, new_clocks->dramclk_khz, clk_mgr_base->clks.dramclk_khz))
+ clk_mgr_base->clks.dramclk_khz = new_clocks->dramclk_khz;
+
+ if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) {
+ if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz)
+ dpp_clock_lowered = true;
+ clk_mgr->base.clks.dppclk_khz = new_clocks->dppclk_khz;
+
+ update_dppclk = true;
+ }
+
+ if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
+ clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
+
+ update_dispclk = true;
+ }
+
+ if (dc->config.forced_clocks == false || (force_reset && safe_to_lower)) {
+ if (dpp_clock_lowered) {
+ dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
+ dcn20_update_clocks_update_dentist(clk_mgr, context);
+ } else {
+ if (update_dppclk || update_dispclk)
+ dcn20_update_clocks_update_dentist(clk_mgr, context);
+ if (new_clocks->dppclk_khz >= dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz)
+ dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
+ }
+ }
+}
+
+struct clk_mgr_funcs dcn201_funcs = {
+ .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
+ .update_clocks = dcn201_update_clocks,
+ .init_clocks = dcn201_init_clocks,
+ .get_clock = dcn2_get_clock,
+};
+
+void dcn201_clk_mgr_construct(struct dc_context *ctx,
+ struct clk_mgr_internal *clk_mgr,
+ struct pp_smu_funcs *pp_smu,
+ struct dccg *dccg)
+{
+ struct dc_debug_options *debug = &ctx->dc->debug;
+ struct dc_bios *bp = ctx->dc_bios;
+ clk_mgr->base.ctx = ctx;
+ clk_mgr->base.funcs = &dcn201_funcs;
+ clk_mgr->regs = &clk_mgr_regs;
+ clk_mgr->clk_mgr_shift = &clk_mgr_shift;
+ clk_mgr->clk_mgr_mask = &clk_mgr_mask;
+
+ clk_mgr->dccg = dccg;
+
+ clk_mgr->dfs_bypass_disp_clk = 0;
+
+ clk_mgr->dprefclk_ss_percentage = 0;
+ clk_mgr->dprefclk_ss_divider = 1000;
+ clk_mgr->ss_on_dprefclk = false;
+
+ if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) {
+ dcn201_funcs.update_clocks = dcn2_update_clocks_fpga;
+ clk_mgr->base.dprefclk_khz = 600000;
+ clk_mgr->base.dentist_vco_freq_khz = 3000000;
+ } else {
+ clk_mgr->base.dprefclk_khz = REG_READ(CLK4_CLK2_CURRENT_CNT);
+ clk_mgr->base.dprefclk_khz *= 100;
+
+ if (clk_mgr->base.dprefclk_khz == 0)
+ clk_mgr->base.dprefclk_khz = 600000;
+
+ REG_GET(CLK4_CLK_PLL_REQ, FbMult_int, &clk_mgr->base.dentist_vco_freq_khz);
+ clk_mgr->base.dentist_vco_freq_khz *= 100000;
+
+ if (clk_mgr->base.dentist_vco_freq_khz == 0)
+ clk_mgr->base.dentist_vco_freq_khz = 3000000;
+ }
+
+ if (!debug->disable_dfs_bypass && bp->integrated_info)
+ if (bp->integrated_info->gpu_cap_info & DFS_BYPASS_ENABLE)
+ clk_mgr->dfs_bypass_enabled = true;
+
+ dce_clock_read_ss_info(clk_mgr);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.h
new file mode 100644
index 000000000000..ae463baaff47
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn201/dcn201_clk_mgr.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DCN201_CLK_MGR_H__
+#define __DCN201_CLK_MGR_H__
+
+void dcn201_clk_mgr_construct(struct dc_context *ctx,
+ struct clk_mgr_internal *clk_mgr,
+ struct pp_smu_funcs *pp_smu,
+ struct dccg *dccg);
+
+#endif //__DCN201_CLK_MGR_H__ \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index 6185f9475fa2..ac2d4c4f04e4 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -42,6 +42,7 @@
#include "clk/clk_10_0_2_sh_mask.h"
#include "renoir_ip_offset.h"
+#include "irq/dcn21/irq_service_dcn21.h"
/* Constants */
@@ -66,11 +67,9 @@ int rn_get_active_display_cnt_wa(
for (i = 0; i < context->stream_count; i++) {
const struct dc_stream_state *stream = context->streams[i];
- /* Extend the WA to DP for Linux*/
if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A ||
stream->signal == SIGNAL_TYPE_DVI_SINGLE_LINK ||
- stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK ||
- stream->signal == SIGNAL_TYPE_DISPLAY_PORT)
+ stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK)
tmds_present = true;
}
@@ -131,9 +130,11 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base,
struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
struct dc *dc = clk_mgr_base->ctx->dc;
int display_count;
+ int irq_src;
bool update_dppclk = false;
bool update_dispclk = false;
bool dpp_clock_lowered = false;
+ uint32_t hpd_state;
struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu;
@@ -149,8 +150,15 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base,
if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) {
display_count = rn_get_active_display_cnt_wa(dc, context);
+
+ for (irq_src = DC_IRQ_SOURCE_HPD1; irq_src <= DC_IRQ_SOURCE_HPD5; irq_src++) {
+ hpd_state = dc_get_hpd_state_dcn21(dc->res_pool->irqs, irq_src);
+ if (hpd_state)
+ break;
+ }
+
/* if we can go lower, go lower */
- if (display_count == 0) {
+ if (display_count == 0 && !hpd_state) {
rn_vbios_smu_set_dcn_low_power_state(clk_mgr, DCN_PWR_STATE_LOW_POWER);
/* update power state */
clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
index 7046da14bb2a..3eee32faa208 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn301/vg_clk_mgr.c
@@ -582,8 +582,8 @@ static struct wm_table lpddr5_wm_table = {
.wm_inst = WM_A,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
- .sr_exit_time_us = 5.32,
- .sr_enter_plus_exit_time_us = 6.38,
+ .sr_exit_time_us = 7.95,
+ .sr_enter_plus_exit_time_us = 9,
.valid = true,
},
{
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
index 4a4894e9d9c9..f4c9a458ace8 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c
@@ -87,7 +87,7 @@ int dcn31_get_active_display_cnt_wa(
const struct dc_link *link = dc->links[i];
/* abusing the fact that the dig and phy are coupled to see if the phy is enabled */
- if (link->link_enc->funcs->is_dig_enabled &&
+ if (link->link_enc && link->link_enc->funcs->is_dig_enabled &&
link->link_enc->funcs->is_dig_enabled(link->link_enc))
display_count++;
}
@@ -142,6 +142,7 @@ static void dcn31_update_clocks(struct clk_mgr *clk_mgr_base,
if (new_clocks->zstate_support == DCN_ZSTATE_SUPPORT_ALLOW &&
new_clocks->zstate_support != clk_mgr_base->clks.zstate_support) {
dcn31_smu_set_Z9_support(clk_mgr, true);
+ dm_helpers_enable_periodic_detection(clk_mgr_base->ctx, true);
clk_mgr_base->clks.zstate_support = new_clocks->zstate_support;
}
@@ -166,6 +167,7 @@ static void dcn31_update_clocks(struct clk_mgr *clk_mgr_base,
if (new_clocks->zstate_support == DCN_ZSTATE_SUPPORT_DISALLOW &&
new_clocks->zstate_support != clk_mgr_base->clks.zstate_support) {
dcn31_smu_set_Z9_support(clk_mgr, false);
+ dm_helpers_enable_periodic_detection(clk_mgr_base->ctx, false);
clk_mgr_base->clks.zstate_support = new_clocks->zstate_support;
}
@@ -217,14 +219,17 @@ static void dcn31_update_clocks(struct clk_mgr *clk_mgr_base,
update_dispclk = true;
}
- /* TODO: add back DTO programming when DPPCLK restore is fixed in FSDL*/
if (dpp_clock_lowered) {
// increase per DPP DTO before lowering global dppclk
+ dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
dcn31_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz);
} else {
// increase global DPPCLK before lowering per DPP DTO
if (update_dppclk || update_dispclk)
dcn31_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz);
+ // always update dtos unless clock is lowered and not safe to lower
+ if (new_clocks->dppclk_khz >= dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz)
+ dcn20_update_clocks_update_dpp_dto(clk_mgr, context, safe_to_lower);
}
// notify DMCUB of latest clocks
@@ -366,32 +371,32 @@ static struct wm_table lpddr5_wm_table = {
.wm_inst = WM_A,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
- .sr_exit_time_us = 5.32,
- .sr_enter_plus_exit_time_us = 6.38,
+ .sr_exit_time_us = 11.5,
+ .sr_enter_plus_exit_time_us = 14.5,
.valid = true,
},
{
.wm_inst = WM_B,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
- .sr_exit_time_us = 9.82,
- .sr_enter_plus_exit_time_us = 11.196,
+ .sr_exit_time_us = 11.5,
+ .sr_enter_plus_exit_time_us = 14.5,
.valid = true,
},
{
.wm_inst = WM_C,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
- .sr_exit_time_us = 9.89,
- .sr_enter_plus_exit_time_us = 11.24,
+ .sr_exit_time_us = 11.5,
+ .sr_enter_plus_exit_time_us = 14.5,
.valid = true,
},
{
.wm_inst = WM_D,
.wm_type = WM_TYPE_PSTATE_CHG,
.pstate_latency_us = 11.65333,
- .sr_exit_time_us = 9.748,
- .sr_enter_plus_exit_time_us = 11.102,
+ .sr_exit_time_us = 11.5,
+ .sr_enter_plus_exit_time_us = 14.5,
.valid = true,
},
}
@@ -518,14 +523,21 @@ static unsigned int find_clk_for_voltage(
unsigned int voltage)
{
int i;
+ int max_voltage = 0;
+ int clock = 0;
for (i = 0; i < NUM_SOC_VOLTAGE_LEVELS; i++) {
- if (clock_table->SocVoltage[i] == voltage)
+ if (clock_table->SocVoltage[i] == voltage) {
return clocks[i];
+ } else if (clock_table->SocVoltage[i] >= max_voltage &&
+ clock_table->SocVoltage[i] < voltage) {
+ max_voltage = clock_table->SocVoltage[i];
+ clock = clocks[i];
+ }
}
- ASSERT(0);
- return 0;
+ ASSERT(clock);
+ return clock;
}
void dcn31_clk_mgr_helper_populate_bw_params(
@@ -640,7 +652,7 @@ void dcn31_clk_mgr_construct(
sizeof(struct dcn31_watermarks),
&clk_mgr->smu_wm_set.mc_address.quad_part);
- if (clk_mgr->smu_wm_set.wm_set == 0) {
+ if (!clk_mgr->smu_wm_set.wm_set) {
clk_mgr->smu_wm_set.wm_set = &dummy_wms;
clk_mgr->smu_wm_set.mc_address.quad_part = 0;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index c798c65d4276..12e5470fa567 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -229,6 +229,25 @@ static bool create_links(
DC_LOG_DC("BIOS object table - end");
+ /* Create a link for each usb4 dpia port */
+ for (i = 0; i < dc->res_pool->usb4_dpia_count; i++) {
+ struct link_init_data link_init_params = {0};
+ struct dc_link *link;
+
+ link_init_params.ctx = dc->ctx;
+ link_init_params.connector_index = i;
+ link_init_params.link_index = dc->link_count;
+ link_init_params.dc = dc;
+ link_init_params.is_dpia_link = true;
+
+ link = link_create(&link_init_params);
+ if (link) {
+ dc->links[dc->link_count] = link;
+ link->dc = dc;
+ ++dc->link_count;
+ }
+ }
+
for (i = 0; i < num_virtual_links; i++) {
struct dc_link *link = kzalloc(sizeof(*link), GFP_KERNEL);
struct encoder_init_data enc_init = {0};
@@ -255,6 +274,24 @@ static bool create_links(
goto failed_alloc;
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) &&
+ dc->caps.dp_hpo &&
+ link->dc->res_pool->res_cap->num_hpo_dp_link_encoder > 0) {
+ /* FPGA case - Allocate HPO DP link encoder */
+ if (i < link->dc->res_pool->res_cap->num_hpo_dp_link_encoder) {
+ link->hpo_dp_link_enc = link->dc->res_pool->hpo_dp_link_enc[i];
+
+ if (link->hpo_dp_link_enc == NULL) {
+ BREAK_TO_DEBUGGER();
+ goto failed_alloc;
+ }
+ link->hpo_dp_link_enc->hpd_source = link->link_enc->hpd_source;
+ link->hpo_dp_link_enc->transmitter = link->link_enc->transmitter;
+ }
+ }
+#endif
+
link->link_status.dpcd_caps = &link->dpcd_caps;
enc_init.ctx = dc->ctx;
@@ -276,6 +313,75 @@ failed_alloc:
return false;
}
+/* Create additional DIG link encoder objects if fewer than the platform
+ * supports were created during link construction. This can happen if the
+ * number of physical connectors is less than the number of DIGs.
+ */
+static bool create_link_encoders(struct dc *dc)
+{
+ bool res = true;
+ unsigned int num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia;
+ unsigned int num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc;
+ int i;
+
+ /* A platform without USB4 DPIA endpoints has a fixed mapping between DIG
+ * link encoders and physical display endpoints and does not require
+ * additional link encoder objects.
+ */
+ if (num_usb4_dpia == 0)
+ return res;
+
+ /* Create as many link encoder objects as the platform supports. DPIA
+ * endpoints can be programmably mapped to any DIG.
+ */
+ if (num_dig_link_enc > dc->res_pool->dig_link_enc_count) {
+ for (i = 0; i < num_dig_link_enc; i++) {
+ struct link_encoder *link_enc = dc->res_pool->link_encoders[i];
+
+ if (!link_enc && dc->res_pool->funcs->link_enc_create_minimal) {
+ link_enc = dc->res_pool->funcs->link_enc_create_minimal(dc->ctx,
+ (enum engine_id)(ENGINE_ID_DIGA + i));
+ if (link_enc) {
+ dc->res_pool->link_encoders[i] = link_enc;
+ dc->res_pool->dig_link_enc_count++;
+ } else {
+ res = false;
+ }
+ }
+ }
+ }
+
+ return res;
+}
+
+/* Destroy any additional DIG link encoder objects created by
+ * create_link_encoders().
+ * NB: Must only be called after destroy_links().
+ */
+static void destroy_link_encoders(struct dc *dc)
+{
+ unsigned int num_usb4_dpia = dc->res_pool->res_cap->num_usb4_dpia;
+ unsigned int num_dig_link_enc = dc->res_pool->res_cap->num_dig_link_enc;
+ int i;
+
+ /* A platform without USB4 DPIA endpoints has a fixed mapping between DIG
+ * link encoders and physical display endpoints and does not require
+ * additional link encoder objects.
+ */
+ if (num_usb4_dpia == 0)
+ return;
+
+ for (i = 0; i < num_dig_link_enc; i++) {
+ struct link_encoder *link_enc = dc->res_pool->link_encoders[i];
+
+ if (link_enc) {
+ link_enc->funcs->destroy(&link_enc);
+ dc->res_pool->link_encoders[i] = NULL;
+ dc->res_pool->dig_link_enc_count--;
+ }
+ }
+}
+
static struct dc_perf_trace *dc_perf_trace_create(void)
{
return kzalloc(sizeof(struct dc_perf_trace), GFP_KERNEL);
@@ -709,6 +815,8 @@ static void dc_destruct(struct dc *dc)
destroy_links(dc);
+ destroy_link_encoders(dc);
+
if (dc->clk_mgr) {
dc_destroy_clk_mgr(dc->clk_mgr);
dc->clk_mgr = NULL;
@@ -913,6 +1021,12 @@ static bool dc_construct(struct dc *dc,
if (!create_links(dc, init_params->num_virtual_links))
goto fail;
+ /* Create additional DIG link encoder objects if fewer than the platform
+ * supports were created during link construction.
+ */
+ if (!create_link_encoders(dc))
+ goto fail;
+
/* Initialise DIG link encoder resource tracking variables. */
link_enc_cfg_init(dc, dc->current_state);
@@ -1544,7 +1658,7 @@ static uint8_t get_stream_mask(struct dc *dc, struct dc_state *context)
}
#if defined(CONFIG_DRM_AMD_DC_DCN)
-void dc_z10_restore(struct dc *dc)
+void dc_z10_restore(const struct dc *dc)
{
if (dc->hwss.z10_restore)
dc->hwss.z10_restore(dc);
@@ -1773,6 +1887,27 @@ static bool is_flip_pending_in_pipes(struct dc *dc, struct dc_state *context)
return false;
}
+/* Perform updates here which need to be deferred until next vupdate
+ *
+ * i.e. blnd lut, 3dlut, and shaper lut bypass regs are double buffered
+ * but forcing lut memory to shutdown state is immediate. This causes
+ * single frame corruption as lut gets disabled mid-frame unless shutdown
+ * is deferred until after entering bypass.
+ */
+static void process_deferred_updates(struct dc *dc)
+{
+#ifdef CONFIG_DRM_AMD_DC_DCN
+ int i = 0;
+
+ if (dc->debug.enable_mem_low_power.bits.cm) {
+ ASSERT(dc->dcn_ip->max_num_dpp);
+ for (i = 0; i < dc->dcn_ip->max_num_dpp; i++)
+ if (dc->res_pool->dpps[i]->funcs->dpp_deferred_update)
+ dc->res_pool->dpps[i]->funcs->dpp_deferred_update(dc->res_pool->dpps[i]);
+ }
+#endif
+}
+
void dc_post_update_surfaces_to_stream(struct dc *dc)
{
int i;
@@ -1783,6 +1918,11 @@ void dc_post_update_surfaces_to_stream(struct dc *dc)
post_surface_trace(dc);
+ if (dc->ctx->dce_version >= DCE_VERSION_MAX)
+ TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
+ else
+ TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
+
if (is_flip_pending_in_pipes(dc, context))
return;
@@ -1793,6 +1933,8 @@ void dc_post_update_surfaces_to_stream(struct dc *dc)
dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]);
}
+ process_deferred_updates(dc);
+
dc->hwss.optimize_bandwidth(dc, context);
dc->optimized_required = false;
@@ -1990,7 +2132,7 @@ static enum surface_update_type get_plane_info_update_type(const struct dc_surfa
}
if (u->plane_info->dcc.enable != u->surface->dcc.enable
- || u->plane_info->dcc.independent_64b_blks != u->surface->dcc.independent_64b_blks
+ || u->plane_info->dcc.dcc_ind_blk != u->surface->dcc.dcc_ind_blk
|| u->plane_info->dcc.meta_pitch != u->surface->dcc.meta_pitch) {
/* During DCC on/off, stutter period is calculated before
* DCC has fully transitioned. This results in incorrect
@@ -2143,6 +2285,9 @@ static enum surface_update_type det_surface_update(const struct dc *dc,
update_flags->bits.gamma_change = 1;
}
+ if (u->lut3d_func || u->func_shaper)
+ update_flags->bits.lut_3d = 1;
+
if (u->hdr_mult.value)
if (u->hdr_mult.value != u->surface->hdr_mult.value) {
update_flags->bits.hdr_mult = 1;
@@ -2156,6 +2301,7 @@ static enum surface_update_type det_surface_update(const struct dc *dc,
if (update_flags->bits.input_csc_change
|| update_flags->bits.coeff_reduction_change
+ || update_flags->bits.lut_3d
|| update_flags->bits.gamma_change
|| update_flags->bits.gamut_remap_change) {
type = UPDATE_TYPE_FULL;
@@ -2214,6 +2360,11 @@ static enum surface_update_type check_update_surfaces_for_stream(
if (stream_update->dsc_config)
su_flags->bits.dsc_changed = 1;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (stream_update->mst_bw_update)
+ su_flags->bits.mst_bw = 1;
+#endif
+
if (su_flags->raw != 0)
overall_type = UPDATE_TYPE_FULL;
@@ -2591,6 +2742,15 @@ static void commit_planes_do_stream_update(struct dc *dc,
if (stream_update->dsc_config)
dp_update_dsc_config(pipe_ctx);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (stream_update->mst_bw_update) {
+ if (stream_update->mst_bw_update->is_increase)
+ dc_link_increase_mst_payload(pipe_ctx, stream_update->mst_bw_update->mst_stream_bw);
+ else
+ dc_link_reduce_mst_payload(pipe_ctx, stream_update->mst_bw_update->mst_stream_bw);
+ }
+#endif
+
if (stream_update->pending_test_pattern) {
dc_link_dp_set_test_pattern(stream->link,
stream->test_pattern.type,
@@ -2968,6 +3128,14 @@ void dc_commit_updates_for_stream(struct dc *dc,
if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
new_pipe->plane_state->force_full_update = true;
}
+ } else if (update_type == UPDATE_TYPE_FAST && dc_ctx->dce_version >= DCE_VERSION_MAX) {
+ /*
+ * Previous frame finished and HW is ready for optimization.
+ *
+ * Only relevant for DCN behavior where we can guarantee the optimization
+ * is safe to apply - retain the legacy behavior for DCE.
+ */
+ dc_post_update_surfaces_to_stream(dc);
}
@@ -3024,14 +3192,11 @@ void dc_commit_updates_for_stream(struct dc *dc,
pipe_ctx->plane_state->force_full_update = false;
}
}
- /*let's use current_state to update watermark etc*/
- if (update_type >= UPDATE_TYPE_FULL) {
- dc_post_update_surfaces_to_stream(dc);
- if (dc_ctx->dce_version >= DCE_VERSION_MAX)
- TRACE_DCN_CLOCK_STATE(&context->bw_ctx.bw.dcn.clk);
- else
- TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
+ /* Legacy optimization path for DCE. */
+ if (update_type >= UPDATE_TYPE_FULL && dc_ctx->dce_version < DCE_VERSION_MAX) {
+ dc_post_update_surfaces_to_stream(dc);
+ TRACE_DCE_CLOCK_STATE(&context->bw_ctx.bw.dce);
}
return;
@@ -3334,6 +3499,7 @@ void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_
bool dc_set_psr_allow_active(struct dc *dc, bool enable)
{
int i;
+ bool allow_active;
for (i = 0; i < dc->current_state->stream_count ; i++) {
struct dc_link *link;
@@ -3345,10 +3511,12 @@ bool dc_set_psr_allow_active(struct dc *dc, bool enable)
if (link->psr_settings.psr_feature_enabled) {
if (enable && !link->psr_settings.psr_allow_active) {
- if (!dc_link_set_psr_allow_active(link, true, false, false))
+ allow_active = true;
+ if (!dc_link_set_psr_allow_active(link, &allow_active, false, false, NULL))
return false;
} else if (!enable && link->psr_settings.psr_allow_active) {
- if (!dc_link_set_psr_allow_active(link, false, true, false))
+ allow_active = false;
+ if (!dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL))
return false;
}
}
@@ -3432,6 +3600,12 @@ void dc_hardware_release(struct dc *dc)
*/
bool dc_enable_dmub_notifications(struct dc *dc)
{
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ /* YELLOW_CARP B0 USB4 DPIA needs dmub notifications for interrupts */
+ if (dc->ctx->asic_id.chip_family == FAMILY_YELLOW_CARP &&
+ dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0)
+ return true;
+#endif
/* dmub aux needs dmub notifications to be enabled */
return dc->debug.enable_dmub_aux_for_legacy_ddc;
}
@@ -3457,7 +3631,12 @@ bool dc_process_dmub_aux_transfer_async(struct dc *dc,
cmd.dp_aux_access.header.type = DMUB_CMD__DP_AUX_ACCESS;
cmd.dp_aux_access.header.payload_bytes = 0;
- cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_LEGACY_DDC;
+ /* For dpia, ddc_pin is set to NULL */
+ if (!dc->links[link_index]->ddc->ddc_pin)
+ cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_DPIA;
+ else
+ cmd.dp_aux_access.aux_control.type = AUX_CHANNEL_LEGACY_DDC;
+
cmd.dp_aux_access.aux_control.instance = dc->links[link_index]->ddc_hw_inst;
cmd.dp_aux_access.aux_control.sw_crc_enabled = 0;
cmd.dp_aux_access.aux_control.timeout = 0;
@@ -3501,6 +3680,130 @@ bool dc_process_dmub_aux_transfer_async(struct dc *dc,
return true;
}
+uint8_t get_link_index_from_dpia_port_index(const struct dc *dc,
+ uint8_t dpia_port_index)
+{
+ uint8_t index, link_index = 0xFF;
+
+ for (index = 0; index < dc->link_count; index++) {
+ /* ddc_hw_inst has dpia port index for dpia links
+ * and ddc instance for legacy links
+ */
+ if (!dc->links[index]->ddc->ddc_pin) {
+ if (dc->links[index]->ddc_hw_inst == dpia_port_index) {
+ link_index = index;
+ break;
+ }
+ }
+ }
+ ASSERT(link_index != 0xFF);
+ return link_index;
+}
+
+/**
+ *****************************************************************************
+ * Function: dc_process_dmub_set_config_async
+ *
+ * @brief
+ * Submits set_config command to dmub via inbox message
+ *
+ * @param
+ * [in] dc: dc structure
+ * [in] link_index: link index
+ * [in] payload: aux payload
+ * [out] notify: set_config immediate reply
+ *
+ * @return
+ * True if successful, False if failure
+ *****************************************************************************
+ */
+bool dc_process_dmub_set_config_async(struct dc *dc,
+ uint32_t link_index,
+ struct set_config_cmd_payload *payload,
+ struct dmub_notification *notify)
+{
+ union dmub_rb_cmd cmd = {0};
+ struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv;
+ bool is_cmd_complete = true;
+
+ /* prepare SET_CONFIG command */
+ cmd.set_config_access.header.type = DMUB_CMD__DPIA;
+ cmd.set_config_access.header.sub_type = DMUB_CMD__DPIA_SET_CONFIG_ACCESS;
+
+ cmd.set_config_access.set_config_control.instance = dc->links[link_index]->ddc_hw_inst;
+ cmd.set_config_access.set_config_control.cmd_pkt.msg_type = payload->msg_type;
+ cmd.set_config_access.set_config_control.cmd_pkt.msg_data = payload->msg_data;
+
+ if (!dc_dmub_srv_cmd_with_reply_data(dmub_srv, &cmd)) {
+ /* command is not processed by dmub */
+ notify->sc_status = SET_CONFIG_UNKNOWN_ERROR;
+ return is_cmd_complete;
+ }
+
+ /* command processed by dmub, if ret_status is 1, it is completed instantly */
+ if (cmd.set_config_access.header.ret_status == 1)
+ notify->sc_status = cmd.set_config_access.set_config_control.immed_status;
+ else
+ /* cmd pending, will receive notification via outbox */
+ is_cmd_complete = false;
+
+ return is_cmd_complete;
+}
+
+/**
+ *****************************************************************************
+ * Function: dc_process_dmub_set_mst_slots
+ *
+ * @brief
+ * Submits mst slot allocation command to dmub via inbox message
+ *
+ * @param
+ * [in] dc: dc structure
+ * [in] link_index: link index
+ * [in] mst_alloc_slots: mst slots to be allotted
+ * [out] mst_slots_in_use: mst slots in use returned in failure case
+ *
+ * @return
+ * DC_OK if successful, DC_ERROR if failure
+ *****************************************************************************
+ */
+enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc,
+ uint32_t link_index,
+ uint8_t mst_alloc_slots,
+ uint8_t *mst_slots_in_use)
+{
+ union dmub_rb_cmd cmd = {0};
+ struct dc_dmub_srv *dmub_srv = dc->ctx->dmub_srv;
+
+ /* prepare MST_ALLOC_SLOTS command */
+ cmd.set_mst_alloc_slots.header.type = DMUB_CMD__DPIA;
+ cmd.set_mst_alloc_slots.header.sub_type = DMUB_CMD__DPIA_MST_ALLOC_SLOTS;
+
+ cmd.set_mst_alloc_slots.mst_slots_control.instance = dc->links[link_index]->ddc_hw_inst;
+ cmd.set_mst_alloc_slots.mst_slots_control.mst_alloc_slots = mst_alloc_slots;
+
+ if (!dc_dmub_srv_cmd_with_reply_data(dmub_srv, &cmd))
+ /* command is not processed by dmub */
+ return DC_ERROR_UNEXPECTED;
+
+ /* command processed by dmub, if ret_status is 1 */
+ if (cmd.set_config_access.header.ret_status != 1)
+ /* command processing error */
+ return DC_ERROR_UNEXPECTED;
+
+ /* command processed and we have a status of 2, mst not enabled in dpia */
+ if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 2)
+ return DC_FAIL_UNSUPPORTED_1;
+
+ /* previously configured mst alloc and used slots did not match */
+ if (cmd.set_mst_alloc_slots.mst_slots_control.immed_status == 3) {
+ *mst_slots_in_use = cmd.set_mst_alloc_slots.mst_slots_control.mst_slots_in_use;
+ return DC_NOT_SUPPORTED;
+ }
+
+ return DC_OK;
+}
+
/**
* dc_disable_accelerated_mode - disable accelerated mode
* @dc: dc structure
@@ -3509,3 +3812,57 @@ void dc_disable_accelerated_mode(struct dc *dc)
{
bios_set_scratch_acc_mode_change(dc->ctx->dc_bios, 0);
}
+
+
+/**
+ *****************************************************************************
+ * dc_notify_vsync_int_state() - notifies vsync enable/disable state
+ * @dc: dc structure
+ * @stream: stream where vsync int state changed
+ * @enable: whether vsync is enabled or disabled
+ *
+ * Called when vsync is enabled/disabled
+ * Will notify DMUB to start/stop ABM interrupts after steady state is reached
+ *
+ *****************************************************************************
+ */
+void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bool enable)
+{
+ int i;
+ int edp_num;
+ struct pipe_ctx *pipe = NULL;
+ struct dc_link *link = stream->sink->link;
+ struct dc_link *edp_links[MAX_NUM_EDP];
+
+
+ if (link->psr_settings.psr_feature_enabled)
+ return;
+
+ /*find primary pipe associated with stream*/
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ if (pipe->stream == stream && pipe->stream_res.tg)
+ break;
+ }
+
+ if (i == MAX_PIPES) {
+ ASSERT(0);
+ return;
+ }
+
+ get_edp_links(dc, edp_links, &edp_num);
+
+ /* Determine panel inst */
+ for (i = 0; i < edp_num; i++) {
+ if (edp_links[i] == link)
+ break;
+ }
+
+ if (i == edp_num) {
+ return;
+ }
+
+ if (pipe->stream_res.abm && pipe->stream_res.abm->funcs->set_abm_pause)
+ pipe->stream_res.abm->funcs->set_abm_pause(pipe->stream_res.abm, !enable, i, pipe->stream_res.tg->inst);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index 1e44b13c1c7d..2796bdd17de1 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -51,6 +51,8 @@
#include "inc/link_enc_cfg.h"
#include "inc/link_dpcd.h"
+#include "dc/dcn30/dcn30_vpg.h"
+
#define DC_LOGGER_INIT(logger)
#define LINK_INFO(...) \
@@ -64,6 +66,31 @@
/*******************************************************************************
* Private functions
******************************************************************************/
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+static bool add_dp_hpo_link_encoder_to_link(struct dc_link *link)
+{
+ struct hpo_dp_link_encoder *enc = resource_get_unused_hpo_dp_link_encoder(
+ link->dc->res_pool);
+
+ if (!link->hpo_dp_link_enc && enc) {
+ link->hpo_dp_link_enc = enc;
+ link->hpo_dp_link_enc->transmitter = link->link_enc->transmitter;
+ link->hpo_dp_link_enc->hpd_source = link->link_enc->hpd_source;
+ }
+
+ return (link->hpo_dp_link_enc != NULL);
+}
+
+static void remove_dp_hpo_link_encoder_from_link(struct dc_link *link)
+{
+ if (link->hpo_dp_link_enc) {
+ link->hpo_dp_link_enc->hpd_source = HPD_SOURCEID_UNKNOWN;
+ link->hpo_dp_link_enc->transmitter = TRANSMITTER_UNKNOWN;
+ link->hpo_dp_link_enc = NULL;
+ }
+}
+#endif
+
static void dc_link_destruct(struct dc_link *link)
{
int i;
@@ -91,6 +118,12 @@ static void dc_link_destruct(struct dc_link *link)
link->link_enc->funcs->destroy(&link->link_enc);
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (link->hpo_dp_link_enc) {
+ remove_dp_hpo_link_encoder_from_link(link);
+ }
+#endif
+
if (link->local_sink)
dc_sink_release(link->local_sink);
@@ -641,13 +674,13 @@ static void query_hdcp_capability(enum signal_type signal, struct dc_link *link)
static void read_current_link_settings_on_detect(struct dc_link *link)
{
- union lane_count_set lane_count_set = { {0} };
+ union lane_count_set lane_count_set = {0};
uint8_t link_bw_set;
uint8_t link_rate_set;
uint32_t read_dpcd_retry_cnt = 10;
enum dc_status status = DC_ERROR_UNEXPECTED;
int i;
- union max_down_spread max_down_spread = { {0} };
+ union max_down_spread max_down_spread = {0};
// Read DPCD 00101h to find out the number of lanes currently set
for (i = 0; i < read_dpcd_retry_cnt; i++) {
@@ -928,6 +961,11 @@ static bool dc_link_detect_helper(struct dc_link *link,
return false;
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (dp_get_link_encoding_format(&link->reported_link_cap) == DP_128b_132b_ENCODING)
+ add_dp_hpo_link_encoder_to_link(link);
+#endif
+
if (link->type == dc_connection_mst_branch) {
LINK_INFO("link=%d, mst branch is now Connected\n",
link->link_index);
@@ -1173,6 +1211,11 @@ static bool dc_link_detect_helper(struct dc_link *link,
sizeof(link->mst_stream_alloc_table.stream_allocations));
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (dp_get_link_encoding_format(&link->cur_link_settings) == DP_128b_132b_ENCODING)
+ reset_dp_hpo_stream_encoders_for_link(link);
+#endif
+
link->type = dc_connection_none;
sink_caps.signal = SIGNAL_TYPE_NONE;
/* When we unplug a passive DP-HDMI dongle connection, dongle_max_pix_clk
@@ -1209,6 +1252,10 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
}
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ dc_z10_restore(dc);
+#endif
+
/* get out of low power state */
if (!can_apply_seamless_boot && reason != DETECT_REASON_BOOT)
clk_mgr_exit_optimized_pwr_state(dc, dc->clk_mgr);
@@ -1378,8 +1425,8 @@ static enum transmitter translate_encoder_to_transmitter(struct graphics_object_
}
}
-static bool dc_link_construct(struct dc_link *link,
- const struct link_init_data *init_params)
+static bool dc_link_construct_legacy(struct dc_link *link,
+ const struct link_init_data *init_params)
{
uint8_t i;
struct ddc_service_init_data ddc_service_init_data = { { 0 } };
@@ -1549,6 +1596,9 @@ static bool dc_link_construct(struct dc_link *link,
}
DC_LOG_DC("BIOS object table - DP_IS_USB_C: %d", link->link_enc->features.flags.bits.DP_IS_USB_C);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ DC_LOG_DC("BIOS object table - IS_DP2_CAPABLE: %d", link->link_enc->features.flags.bits.IS_DP2_CAPABLE);
+#endif
/* Update link encoder tracking variables. These are used for the dynamic
* assignment of link encoders to streams.
@@ -1610,6 +1660,14 @@ static bool dc_link_construct(struct dc_link *link,
DC_LOG_DC("BIOS object table - ddi_channel_mapping: 0x%04X", link->ddi_channel_mapping.raw);
DC_LOG_DC("BIOS object table - chip_caps: %d", link->chip_caps);
}
+
+ if (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) {
+ link->bios_forced_drive_settings.VOLTAGE_SWING =
+ (info->ext_disp_conn_info.fixdpvoltageswing & 0x3);
+ link->bios_forced_drive_settings.PRE_EMPHASIS =
+ ((info->ext_disp_conn_info.fixdpvoltageswing >> 2) & 0x3);
+ }
+
break;
}
}
@@ -1651,6 +1709,80 @@ create_fail:
return false;
}
+static bool dc_link_construct_dpia(struct dc_link *link,
+ const struct link_init_data *init_params)
+{
+ struct ddc_service_init_data ddc_service_init_data = { { 0 } };
+ struct dc_context *dc_ctx = init_params->ctx;
+
+ DC_LOGGER_INIT(dc_ctx->logger);
+
+ /* Initialized irq source for hpd and hpd rx */
+ link->irq_source_hpd = DC_IRQ_SOURCE_INVALID;
+ link->irq_source_hpd_rx = DC_IRQ_SOURCE_INVALID;
+ link->link_status.dpcd_caps = &link->dpcd_caps;
+
+ link->dc = init_params->dc;
+ link->ctx = dc_ctx;
+ link->link_index = init_params->link_index;
+
+ memset(&link->preferred_training_settings, 0,
+ sizeof(struct dc_link_training_overrides));
+ memset(&link->preferred_link_setting, 0,
+ sizeof(struct dc_link_settings));
+
+ /* Dummy Init for linkid */
+ link->link_id.type = OBJECT_TYPE_CONNECTOR;
+ link->link_id.id = CONNECTOR_ID_DISPLAY_PORT;
+ link->link_id.enum_id = ENUM_ID_1 + init_params->connector_index;
+ link->is_internal_display = false;
+ link->connector_signal = SIGNAL_TYPE_DISPLAY_PORT;
+ LINK_INFO("Connector[%d] description:signal %d\n",
+ init_params->connector_index,
+ link->connector_signal);
+
+ link->ep_type = DISPLAY_ENDPOINT_USB4_DPIA;
+ link->is_dig_mapping_flexible = true;
+
+ /* TODO: Initialize link : funcs->link_init */
+
+ ddc_service_init_data.ctx = link->ctx;
+ ddc_service_init_data.id = link->link_id;
+ ddc_service_init_data.link = link;
+ /* Set indicator for dpia link so that ddc won't be created */
+ ddc_service_init_data.is_dpia_link = true;
+
+ link->ddc = dal_ddc_service_create(&ddc_service_init_data);
+ if (!link->ddc) {
+ DC_ERROR("Failed to create ddc_service!\n");
+ goto ddc_create_fail;
+ }
+
+ /* Set dpia port index : 0 to number of dpia ports */
+ link->ddc_hw_inst = init_params->connector_index;
+
+ /* TODO: Create link encoder */
+
+ link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
+
+ /* Some docks seem to NAK I2C writes to segment pointer with mot=0. */
+ link->wa_flags.dp_mot_reset_segment = true;
+
+ return true;
+
+ddc_create_fail:
+ return false;
+}
+
+static bool dc_link_construct(struct dc_link *link,
+ const struct link_init_data *init_params)
+{
+ /* Handle dpia case */
+ if (init_params->is_dpia_link)
+ return dc_link_construct_dpia(link, init_params);
+ else
+ return dc_link_construct_legacy(link, init_params);
+}
/*******************************************************************************
* Public functions
******************************************************************************/
@@ -1741,17 +1873,47 @@ static enum dc_status enable_link_dp(struct dc_state *state,
/* get link settings for video mode timing */
decide_link_settings(stream, &link_settings);
+ /* Train with fallback when enabling DPIA link. Conventional links are
+ * trained with fallback during sink detection.
+ */
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
+ do_fallback = true;
+
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ /*
+ * Temporary w/a to get DP2.0 link rates to work with SST.
+ * TODO DP2.0 - Workaround: Remove w/a if and when the issue is resolved.
+ */
+ if (dp_get_link_encoding_format(&link_settings) == DP_128b_132b_ENCODING &&
+ pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT &&
+ link->dc->debug.set_mst_en_for_sst) {
+ dp_enable_mst_on_sink(link, true);
+ }
+#endif
+
if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP) {
/*in case it is not on*/
link->dc->hwss.edp_power_control(link, true);
link->dc->hwss.edp_wait_for_hpd_ready(link, true);
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (dp_get_link_encoding_format(&link_settings) == DP_128b_132b_ENCODING) {
+ /* TODO - DP2.0 HW: calculate 32 symbol clock for HPO encoder */
+ } else {
+ pipe_ctx->stream_res.pix_clk_params.requested_sym_clk =
+ link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ;
+ if (state->clk_mgr && !apply_seamless_boot_optimization)
+ state->clk_mgr->funcs->update_clocks(state->clk_mgr,
+ state, false);
+ }
+#else
pipe_ctx->stream_res.pix_clk_params.requested_sym_clk =
- link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ;
+ link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ;
if (state->clk_mgr && !apply_seamless_boot_optimization)
state->clk_mgr->funcs->update_clocks(state->clk_mgr,
- state, false);
+ state, false);
+#endif
// during mode switch we do DP_SET_POWER off then on, and OUI is lost
dpcd_set_source_specific_data(link);
@@ -1780,7 +1942,12 @@ static enum dc_status enable_link_dp(struct dc_state *state,
else
fec_enable = true;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (dp_get_link_encoding_format(&link_settings) == DP_8b_10b_ENCODING)
+ dp_set_fec_enable(link, fec_enable);
+#else
dp_set_fec_enable(link, fec_enable);
+#endif
// during mode set we do DP_SET_POWER off then on, aux writes are lost
if (link->dpcd_sink_ext_caps.bits.oled == 1 ||
@@ -2284,6 +2451,9 @@ static void disable_link(struct dc_link *link, enum signal_type signal)
if (dc_is_dp_signal(signal)) {
/* SST DP, eDP */
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ struct dc_link_settings link_settings = link->cur_link_settings;
+#endif
if (dc_is_dp_sst_signal(signal))
dp_disable_link_phy(link, signal);
else
@@ -2291,8 +2461,15 @@ static void disable_link(struct dc_link *link, enum signal_type signal)
if (dc_is_dp_sst_signal(signal) ||
link->mst_stream_alloc_table.stream_count == 0) {
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (dp_get_link_encoding_format(&link_settings) == DP_8b_10b_ENCODING) {
+ dp_set_fec_enable(link, false);
+ dp_set_fec_ready(link, false);
+ }
+#else
dp_set_fec_enable(link, false);
dp_set_fec_ready(link, false);
+#endif
}
} else {
if (signal != SIGNAL_TYPE_VIRTUAL)
@@ -2475,9 +2652,14 @@ static bool dp_active_dongle_validate_timing(
break;
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (dpcd_caps->dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER &&
+ dongle_caps->extendedCapValid == true) {
+#else
if (dpcd_caps->dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER ||
dongle_caps->extendedCapValid == false)
return true;
+#endif
/* Check Pixel Encoding */
switch (timing->pixel_encoding) {
@@ -2520,6 +2702,89 @@ static bool dp_active_dongle_validate_timing(
if (get_timing_pixel_clock_100hz(timing) > (dongle_caps->dp_hdmi_max_pixel_clk_in_khz * 10))
return false;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ }
+
+ if (dpcd_caps->channel_coding_cap.bits.DP_128b_132b_SUPPORTED == 0 &&
+ dpcd_caps->dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_PASSTHROUGH_SUPPORT == 0 &&
+ dongle_caps->dfp_cap_ext.supported) {
+
+ if (dongle_caps->dfp_cap_ext.max_pixel_rate_in_mps < (timing->pix_clk_100hz / 10000))
+ return false;
+
+ if (dongle_caps->dfp_cap_ext.max_video_h_active_width < timing->h_addressable)
+ return false;
+
+ if (dongle_caps->dfp_cap_ext.max_video_v_active_height < timing->v_addressable)
+ return false;
+
+ if (timing->pixel_encoding == PIXEL_ENCODING_RGB) {
+ if (!dongle_caps->dfp_cap_ext.encoding_format_caps.support_rgb)
+ return false;
+ if (timing->display_color_depth == COLOR_DEPTH_666 &&
+ !dongle_caps->dfp_cap_ext.rgb_color_depth_caps.support_6bpc)
+ return false;
+ else if (timing->display_color_depth == COLOR_DEPTH_888 &&
+ !dongle_caps->dfp_cap_ext.rgb_color_depth_caps.support_8bpc)
+ return false;
+ else if (timing->display_color_depth == COLOR_DEPTH_101010 &&
+ !dongle_caps->dfp_cap_ext.rgb_color_depth_caps.support_10bpc)
+ return false;
+ else if (timing->display_color_depth == COLOR_DEPTH_121212 &&
+ !dongle_caps->dfp_cap_ext.rgb_color_depth_caps.support_12bpc)
+ return false;
+ else if (timing->display_color_depth == COLOR_DEPTH_161616 &&
+ !dongle_caps->dfp_cap_ext.rgb_color_depth_caps.support_16bpc)
+ return false;
+ } else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR444) {
+ if (!dongle_caps->dfp_cap_ext.encoding_format_caps.support_rgb)
+ return false;
+ if (timing->display_color_depth == COLOR_DEPTH_888 &&
+ !dongle_caps->dfp_cap_ext.ycbcr444_color_depth_caps.support_8bpc)
+ return false;
+ else if (timing->display_color_depth == COLOR_DEPTH_101010 &&
+ !dongle_caps->dfp_cap_ext.ycbcr444_color_depth_caps.support_10bpc)
+ return false;
+ else if (timing->display_color_depth == COLOR_DEPTH_121212 &&
+ !dongle_caps->dfp_cap_ext.ycbcr444_color_depth_caps.support_12bpc)
+ return false;
+ else if (timing->display_color_depth == COLOR_DEPTH_161616 &&
+ !dongle_caps->dfp_cap_ext.ycbcr444_color_depth_caps.support_16bpc)
+ return false;
+ } else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) {
+ if (!dongle_caps->dfp_cap_ext.encoding_format_caps.support_rgb)
+ return false;
+ if (timing->display_color_depth == COLOR_DEPTH_888 &&
+ !dongle_caps->dfp_cap_ext.ycbcr422_color_depth_caps.support_8bpc)
+ return false;
+ else if (timing->display_color_depth == COLOR_DEPTH_101010 &&
+ !dongle_caps->dfp_cap_ext.ycbcr422_color_depth_caps.support_10bpc)
+ return false;
+ else if (timing->display_color_depth == COLOR_DEPTH_121212 &&
+ !dongle_caps->dfp_cap_ext.ycbcr422_color_depth_caps.support_12bpc)
+ return false;
+ else if (timing->display_color_depth == COLOR_DEPTH_161616 &&
+ !dongle_caps->dfp_cap_ext.ycbcr422_color_depth_caps.support_16bpc)
+ return false;
+ } else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) {
+ if (!dongle_caps->dfp_cap_ext.encoding_format_caps.support_rgb)
+ return false;
+ if (timing->display_color_depth == COLOR_DEPTH_888 &&
+ !dongle_caps->dfp_cap_ext.ycbcr420_color_depth_caps.support_8bpc)
+ return false;
+ else if (timing->display_color_depth == COLOR_DEPTH_101010 &&
+ !dongle_caps->dfp_cap_ext.ycbcr420_color_depth_caps.support_10bpc)
+ return false;
+ else if (timing->display_color_depth == COLOR_DEPTH_121212 &&
+ !dongle_caps->dfp_cap_ext.ycbcr420_color_depth_caps.support_12bpc)
+ return false;
+ else if (timing->display_color_depth == COLOR_DEPTH_161616 &&
+ !dongle_caps->dfp_cap_ext.ycbcr420_color_depth_caps.support_16bpc)
+ return false;
+ }
+ }
+#endif
+
return true;
}
@@ -2662,8 +2927,8 @@ bool dc_link_set_backlight_level(const struct dc_link *link,
return true;
}
-bool dc_link_set_psr_allow_active(struct dc_link *link, bool allow_active,
- bool wait, bool force_static)
+bool dc_link_set_psr_allow_active(struct dc_link *link, const bool *allow_active,
+ bool wait, bool force_static, const unsigned int *power_opts)
{
struct dc *dc = link->ctx->dc;
struct dmcu *dmcu = dc->res_pool->dmcu;
@@ -2676,20 +2941,33 @@ bool dc_link_set_psr_allow_active(struct dc_link *link, bool allow_active,
if (!dc_get_edp_link_panel_inst(dc, link, &panel_inst))
return false;
- link->psr_settings.psr_allow_active = allow_active;
+ /* Set power optimization flag */
+ if (power_opts && link->psr_settings.psr_power_opt != *power_opts) {
+ link->psr_settings.psr_power_opt = *power_opts;
+
+ if (psr != NULL && link->psr_settings.psr_feature_enabled && psr->funcs->psr_set_power_opt)
+ psr->funcs->psr_set_power_opt(psr, link->psr_settings.psr_power_opt);
+ }
+
+ /* Enable or Disable PSR */
+ if (allow_active && link->psr_settings.psr_allow_active != *allow_active) {
+ link->psr_settings.psr_allow_active = *allow_active;
+
#if defined(CONFIG_DRM_AMD_DC_DCN)
- if (!allow_active)
- dc_z10_restore(dc);
+ if (!link->psr_settings.psr_allow_active)
+ dc_z10_restore(dc);
#endif
- if (psr != NULL && link->psr_settings.psr_feature_enabled) {
- if (force_static && psr->funcs->psr_force_static)
- psr->funcs->psr_force_static(psr, panel_inst);
- psr->funcs->psr_enable(psr, allow_active, wait, panel_inst);
- } else if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && link->psr_settings.psr_feature_enabled)
- dmcu->funcs->set_psr_enable(dmcu, allow_active, wait);
- else
- return false;
+ if (psr != NULL && link->psr_settings.psr_feature_enabled) {
+ if (force_static && psr->funcs->psr_force_static)
+ psr->funcs->psr_force_static(psr, panel_inst);
+ psr->funcs->psr_enable(psr, link->psr_settings.psr_allow_active, wait, panel_inst);
+ } else if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) &&
+ link->psr_settings.psr_feature_enabled)
+ dmcu->funcs->set_psr_enable(dmcu, link->psr_settings.psr_allow_active, wait);
+ else
+ return false;
+ }
return true;
}
@@ -2978,10 +3256,12 @@ static struct fixed31_32 get_pbn_from_timing(struct pipe_ctx *pipe_ctx)
static void update_mst_stream_alloc_table(
struct dc_link *link,
struct stream_encoder *stream_enc,
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ struct hpo_dp_stream_encoder *hpo_dp_stream_enc, // TODO: Rename stream_enc to dio_stream_enc?
+#endif
const struct dp_mst_stream_allocation_table *proposed_table)
{
- struct link_mst_stream_allocation work_table[MAX_CONTROLLER_NUM] = {
- { 0 } };
+ struct link_mst_stream_allocation work_table[MAX_CONTROLLER_NUM] = { 0 };
struct link_mst_stream_allocation *dc_alloc;
int i;
@@ -3014,6 +3294,9 @@ static void update_mst_stream_alloc_table(
work_table[i].slot_count =
proposed_table->stream_allocations[i].slot_count;
work_table[i].stream_enc = stream_enc;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ work_table[i].hpo_dp_stream_enc = hpo_dp_stream_enc;
+#endif
}
}
@@ -3024,6 +3307,108 @@ static void update_mst_stream_alloc_table(
link->mst_stream_alloc_table.stream_allocations[i] =
work_table[i];
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+static void dc_log_vcp_x_y(const struct dc_link *link, struct fixed31_32 avg_time_slots_per_mtp)
+{
+ const uint32_t VCP_Y_PRECISION = 1000;
+ uint64_t vcp_x, vcp_y;
+
+ // Add 0.5*(1/VCP_Y_PRECISION) to round up to decimal precision
+ avg_time_slots_per_mtp = dc_fixpt_add(
+ avg_time_slots_per_mtp, dc_fixpt_from_fraction(1, 2 * VCP_Y_PRECISION));
+
+ vcp_x = dc_fixpt_floor(avg_time_slots_per_mtp);
+ vcp_y = dc_fixpt_floor(
+ dc_fixpt_mul_int(
+ dc_fixpt_sub_int(avg_time_slots_per_mtp, dc_fixpt_floor(avg_time_slots_per_mtp)),
+ VCP_Y_PRECISION));
+
+ if (link->type == dc_connection_mst_branch)
+ DC_LOG_DP2("MST Update Payload: set_throttled_vcp_size slot X.Y for MST stream "
+ "X: %lld Y: %lld/%d", vcp_x, vcp_y, VCP_Y_PRECISION);
+ else
+ DC_LOG_DP2("SST Update Payload: set_throttled_vcp_size slot X.Y for SST stream "
+ "X: %lld Y: %lld/%d", vcp_x, vcp_y, VCP_Y_PRECISION);
+}
+
+/*
+ * Payload allocation/deallocation for SST introduced in DP2.0
+ */
+enum dc_status dc_link_update_sst_payload(struct pipe_ctx *pipe_ctx, bool allocate)
+{
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->link;
+ struct hpo_dp_link_encoder *hpo_dp_link_encoder = link->hpo_dp_link_enc;
+ struct hpo_dp_stream_encoder *hpo_dp_stream_encoder = pipe_ctx->stream_res.hpo_dp_stream_enc;
+ struct link_mst_stream_allocation_table proposed_table = {0};
+ struct fixed31_32 avg_time_slots_per_mtp;
+ DC_LOGGER_INIT(link->ctx->logger);
+
+ /* slot X.Y for SST payload deallocate */
+ if (!allocate) {
+ avg_time_slots_per_mtp = dc_fixpt_from_int(0);
+
+ dc_log_vcp_x_y(link, avg_time_slots_per_mtp);
+
+ hpo_dp_link_encoder->funcs->set_throttled_vcp_size(
+ hpo_dp_link_encoder,
+ hpo_dp_stream_encoder->inst,
+ avg_time_slots_per_mtp);
+ }
+
+ /* calculate VC payload and update branch with new payload allocation table*/
+ if (!dpcd_write_128b_132b_sst_payload_allocation_table(
+ stream,
+ link,
+ &proposed_table,
+ allocate)) {
+ DC_LOG_ERROR("SST Update Payload: Failed to update "
+ "allocation table for "
+ "pipe idx: %d\n",
+ pipe_ctx->pipe_idx);
+ }
+
+ proposed_table.stream_allocations[0].hpo_dp_stream_enc = hpo_dp_stream_encoder;
+
+ ASSERT(proposed_table.stream_count == 1);
+
+ //TODO - DP2.0 Logging: Instead of hpo_dp_stream_enc pointer, log instance id
+ DC_LOG_DP2("SST Update Payload: hpo_dp_stream_enc: %p "
+ "vcp_id: %d "
+ "slot_count: %d\n",
+ (void *) proposed_table.stream_allocations[0].hpo_dp_stream_enc,
+ proposed_table.stream_allocations[0].vcp_id,
+ proposed_table.stream_allocations[0].slot_count);
+
+ /* program DP source TX for payload */
+ hpo_dp_link_encoder->funcs->update_stream_allocation_table(
+ hpo_dp_link_encoder,
+ &proposed_table);
+
+ /* poll for ACT handled */
+ if (!dpcd_poll_for_allocation_change_trigger(link)) {
+ // Failures will result in blackscreen and errors logged
+ BREAK_TO_DEBUGGER();
+ }
+
+ /* slot X.Y for SST payload allocate */
+ if (allocate) {
+ avg_time_slots_per_mtp = calculate_sst_avg_time_slots_per_mtp(stream, link);
+
+ dc_log_vcp_x_y(link, avg_time_slots_per_mtp);
+
+ hpo_dp_link_encoder->funcs->set_throttled_vcp_size(
+ hpo_dp_link_encoder,
+ hpo_dp_stream_encoder->inst,
+ avg_time_slots_per_mtp);
+ }
+
+ /* Always return DC_OK.
+ * If part of sequence fails, log failure(s) and show blackscreen
+ */
+ return DC_OK;
+}
+#endif
/* convert link_mst_stream_alloc_table to dm dp_mst_stream_alloc_table
* because stream_encoder is not exposed to dm
@@ -3032,16 +3417,27 @@ enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx)
{
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->link;
- struct link_encoder *link_encoder = link->link_enc;
+ struct link_encoder *link_encoder = NULL;
struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ struct hpo_dp_link_encoder *hpo_dp_link_encoder = link->hpo_dp_link_enc;
+ struct hpo_dp_stream_encoder *hpo_dp_stream_encoder = pipe_ctx->stream_res.hpo_dp_stream_enc;
+#endif
struct dp_mst_stream_allocation_table proposed_table = {0};
struct fixed31_32 avg_time_slots_per_mtp;
struct fixed31_32 pbn;
struct fixed31_32 pbn_per_slot;
- uint8_t i;
+ int i;
enum act_return_status ret;
DC_LOGGER_INIT(link->ctx->logger);
+ /* Link encoder may have been dynamically assigned to non-physical display endpoint. */
+ if (link->ep_type == DISPLAY_ENDPOINT_PHY)
+ link_encoder = link->link_enc;
+ else if (link->dc->res_pool->funcs->link_encs_assign)
+ link_encoder = link_enc_cfg_get_link_enc_used_by_stream(pipe_ctx->stream->ctx->dc, stream);
+ ASSERT(link_encoder);
+
/* enable_link_dp_mst already check link->enabled_stream_count
* and stream is in link->stream[]. This is called during set mode,
* stream_enc is available.
@@ -3054,7 +3450,14 @@ enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx)
&proposed_table,
true)) {
update_mst_stream_alloc_table(
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ link,
+ pipe_ctx->stream_res.stream_enc,
+ pipe_ctx->stream_res.hpo_dp_stream_enc,
+ &proposed_table);
+#else
link, pipe_ctx->stream_res.stream_enc, &proposed_table);
+#endif
}
else
DC_LOG_WARNING("Failed to update"
@@ -3068,23 +3471,70 @@ enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx)
link->mst_stream_alloc_table.stream_count);
for (i = 0; i < MAX_CONTROLLER_NUM; i++) {
+#if defined(CONFIG_DRM_AMD_DC_DCN)
DC_LOG_MST("stream_enc[%d]: %p "
+ "stream[%d].hpo_dp_stream_enc: %p "
"stream[%d].vcp_id: %d "
"stream[%d].slot_count: %d\n",
i,
(void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc,
i,
+ (void *) link->mst_stream_alloc_table.stream_allocations[i].hpo_dp_stream_enc,
+ i,
link->mst_stream_alloc_table.stream_allocations[i].vcp_id,
i,
link->mst_stream_alloc_table.stream_allocations[i].slot_count);
+#else
+ DC_LOG_MST("stream_enc[%d]: %p "
+ "stream[%d].vcp_id: %d "
+ "stream[%d].slot_count: %d\n",
+ i,
+ (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc,
+ i,
+ link->mst_stream_alloc_table.stream_allocations[i].vcp_id,
+ i,
+ link->mst_stream_alloc_table.stream_allocations[i].slot_count);
+#endif
}
ASSERT(proposed_table.stream_count > 0);
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
+ static enum dc_status status;
+ uint8_t mst_alloc_slots = 0, prev_mst_slots_in_use = 0xFF;
+
+ for (i = 0; i < link->mst_stream_alloc_table.stream_count; i++)
+ mst_alloc_slots += link->mst_stream_alloc_table.stream_allocations[i].slot_count;
+
+ status = dc_process_dmub_set_mst_slots(link->dc, link->link_index,
+ mst_alloc_slots, &prev_mst_slots_in_use);
+ ASSERT(status == DC_OK);
+ DC_LOG_MST("dpia : status[%d]: alloc_slots[%d]: used_slots[%d]\n",
+ status, mst_alloc_slots, prev_mst_slots_in_use);
+ }
+
/* program DP source TX for payload */
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ switch (dp_get_link_encoding_format(&link->cur_link_settings)) {
+ case DP_8b_10b_ENCODING:
+ link_encoder->funcs->update_mst_stream_allocation_table(
+ link_encoder,
+ &link->mst_stream_alloc_table);
+ break;
+ case DP_128b_132b_ENCODING:
+ hpo_dp_link_encoder->funcs->update_stream_allocation_table(
+ hpo_dp_link_encoder,
+ &link->mst_stream_alloc_table);
+ break;
+ case DP_UNKNOWN_ENCODING:
+ DC_LOG_ERROR("Failure: unknown encoding format\n");
+ return DC_ERROR_UNEXPECTED;
+ }
+#else
link_encoder->funcs->update_mst_stream_allocation_table(
link_encoder,
&link->mst_stream_alloc_table);
+#endif
/* send down message */
ret = dm_helpers_dp_mst_poll_for_allocation_change_trigger(
@@ -3107,26 +3557,215 @@ enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx)
pbn = get_pbn_from_timing(pipe_ctx);
avg_time_slots_per_mtp = dc_fixpt_div(pbn, pbn_per_slot);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ switch (dp_get_link_encoding_format(&link->cur_link_settings)) {
+ case DP_8b_10b_ENCODING:
+ stream_encoder->funcs->set_throttled_vcp_size(
+ stream_encoder,
+ avg_time_slots_per_mtp);
+ break;
+ case DP_128b_132b_ENCODING:
+ hpo_dp_link_encoder->funcs->set_throttled_vcp_size(
+ hpo_dp_link_encoder,
+ hpo_dp_stream_encoder->inst,
+ avg_time_slots_per_mtp);
+ break;
+ case DP_UNKNOWN_ENCODING:
+ DC_LOG_ERROR("Failure: unknown encoding format\n");
+ return DC_ERROR_UNEXPECTED;
+ }
+#else
stream_encoder->funcs->set_throttled_vcp_size(
stream_encoder,
avg_time_slots_per_mtp);
+#endif
return DC_OK;
}
-static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+enum dc_status dc_link_reduce_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t bw_in_kbps)
{
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->link;
+ struct fixed31_32 avg_time_slots_per_mtp;
+ struct fixed31_32 pbn;
+ struct fixed31_32 pbn_per_slot;
struct link_encoder *link_encoder = link->link_enc;
struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc;
struct dp_mst_stream_allocation_table proposed_table = {0};
- struct fixed31_32 avg_time_slots_per_mtp = dc_fixpt_from_int(0);
uint8_t i;
+ enum act_return_status ret;
+ DC_LOGGER_INIT(link->ctx->logger);
+
+ /* decrease throttled vcp size */
+ pbn_per_slot = get_pbn_per_slot(stream);
+ pbn = get_pbn_from_bw_in_kbps(bw_in_kbps);
+ avg_time_slots_per_mtp = dc_fixpt_div(pbn, pbn_per_slot);
+
+ stream_encoder->funcs->set_throttled_vcp_size(
+ stream_encoder,
+ avg_time_slots_per_mtp);
+
+ /* send ALLOCATE_PAYLOAD sideband message with updated pbn */
+ dm_helpers_dp_mst_send_payload_allocation(
+ stream->ctx,
+ stream,
+ true);
+
+ /* notify immediate branch device table update */
+ if (dm_helpers_dp_mst_write_payload_allocation_table(
+ stream->ctx,
+ stream,
+ &proposed_table,
+ true)) {
+ /* update mst stream allocation table software state */
+ update_mst_stream_alloc_table(
+ link,
+ pipe_ctx->stream_res.stream_enc,
+ pipe_ctx->stream_res.hpo_dp_stream_enc,
+ &proposed_table);
+ } else {
+ DC_LOG_WARNING("Failed to update"
+ "MST allocation table for"
+ "pipe idx:%d\n",
+ pipe_ctx->pipe_idx);
+ }
+
+ DC_LOG_MST("%s "
+ "stream_count: %d: \n ",
+ __func__,
+ link->mst_stream_alloc_table.stream_count);
+
+ for (i = 0; i < MAX_CONTROLLER_NUM; i++) {
+ DC_LOG_MST("stream_enc[%d]: %p "
+ "stream[%d].vcp_id: %d "
+ "stream[%d].slot_count: %d\n",
+ i,
+ (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc,
+ i,
+ link->mst_stream_alloc_table.stream_allocations[i].vcp_id,
+ i,
+ link->mst_stream_alloc_table.stream_allocations[i].slot_count);
+ }
+
+ ASSERT(proposed_table.stream_count > 0);
+
+ /* update mst stream allocation table hardware state */
+ link_encoder->funcs->update_mst_stream_allocation_table(
+ link_encoder,
+ &link->mst_stream_alloc_table);
+
+ /* poll for immediate branch device ACT handled */
+ ret = dm_helpers_dp_mst_poll_for_allocation_change_trigger(
+ stream->ctx,
+ stream);
+
+ return DC_OK;
+}
+
+enum dc_status dc_link_increase_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t bw_in_kbps)
+{
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->link;
+ struct fixed31_32 avg_time_slots_per_mtp;
+ struct fixed31_32 pbn;
+ struct fixed31_32 pbn_per_slot;
+ struct link_encoder *link_encoder = link->link_enc;
+ struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc;
+ struct dp_mst_stream_allocation_table proposed_table = {0};
+ uint8_t i;
+ enum act_return_status ret;
+ DC_LOGGER_INIT(link->ctx->logger);
+
+ /* notify immediate branch device table update */
+ if (dm_helpers_dp_mst_write_payload_allocation_table(
+ stream->ctx,
+ stream,
+ &proposed_table,
+ true)) {
+ /* update mst stream allocation table software state */
+ update_mst_stream_alloc_table(
+ link,
+ pipe_ctx->stream_res.stream_enc,
+ pipe_ctx->stream_res.hpo_dp_stream_enc,
+ &proposed_table);
+ }
+
+ DC_LOG_MST("%s "
+ "stream_count: %d: \n ",
+ __func__,
+ link->mst_stream_alloc_table.stream_count);
+
+ for (i = 0; i < MAX_CONTROLLER_NUM; i++) {
+ DC_LOG_MST("stream_enc[%d]: %p "
+ "stream[%d].vcp_id: %d "
+ "stream[%d].slot_count: %d\n",
+ i,
+ (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc,
+ i,
+ link->mst_stream_alloc_table.stream_allocations[i].vcp_id,
+ i,
+ link->mst_stream_alloc_table.stream_allocations[i].slot_count);
+ }
+
+ ASSERT(proposed_table.stream_count > 0);
+
+ /* update mst stream allocation table hardware state */
+ link_encoder->funcs->update_mst_stream_allocation_table(
+ link_encoder,
+ &link->mst_stream_alloc_table);
+
+ /* poll for immediate branch device ACT handled */
+ ret = dm_helpers_dp_mst_poll_for_allocation_change_trigger(
+ stream->ctx,
+ stream);
+
+ if (ret != ACT_LINK_LOST) {
+ /* send ALLOCATE_PAYLOAD sideband message with updated pbn */
+ dm_helpers_dp_mst_send_payload_allocation(
+ stream->ctx,
+ stream,
+ true);
+ }
+
+ /* increase throttled vcp size */
+ pbn = get_pbn_from_bw_in_kbps(bw_in_kbps);
+ pbn_per_slot = get_pbn_per_slot(stream);
+ avg_time_slots_per_mtp = dc_fixpt_div(pbn, pbn_per_slot);
+
+ stream_encoder->funcs->set_throttled_vcp_size(
+ stream_encoder,
+ avg_time_slots_per_mtp);
+
+ return DC_OK;
+}
+#endif
+
+static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
+{
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->link;
+ struct link_encoder *link_encoder = NULL;
+ struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ struct hpo_dp_link_encoder *hpo_dp_link_encoder = link->hpo_dp_link_enc;
+ struct hpo_dp_stream_encoder *hpo_dp_stream_encoder = pipe_ctx->stream_res.hpo_dp_stream_enc;
+#endif
+ struct dp_mst_stream_allocation_table proposed_table = {0};
+ struct fixed31_32 avg_time_slots_per_mtp = dc_fixpt_from_int(0);
+ int i;
bool mst_mode = (link->type == dc_connection_mst_branch);
DC_LOGGER_INIT(link->ctx->logger);
+ /* Link encoder may have been dynamically assigned to non-physical display endpoint. */
+ if (link->ep_type == DISPLAY_ENDPOINT_PHY)
+ link_encoder = link->link_enc;
+ else if (link->dc->res_pool->funcs->link_encs_assign)
+ link_encoder = link_enc_cfg_get_link_enc_used_by_stream(pipe_ctx->stream->ctx->dc, stream);
+ ASSERT(link_encoder);
+
/* deallocate_mst_payload is called before disable link. When mode or
* disable/enable monitor, new stream is created which is not in link
* stream[] yet. For this, payload is not allocated yet, so de-alloc
@@ -3135,9 +3774,28 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
*/
/* slot X.Y */
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ switch (dp_get_link_encoding_format(&link->cur_link_settings)) {
+ case DP_8b_10b_ENCODING:
+ stream_encoder->funcs->set_throttled_vcp_size(
+ stream_encoder,
+ avg_time_slots_per_mtp);
+ break;
+ case DP_128b_132b_ENCODING:
+ hpo_dp_link_encoder->funcs->set_throttled_vcp_size(
+ hpo_dp_link_encoder,
+ hpo_dp_stream_encoder->inst,
+ avg_time_slots_per_mtp);
+ break;
+ case DP_UNKNOWN_ENCODING:
+ DC_LOG_ERROR("Failure: unknown encoding format\n");
+ return DC_ERROR_UNEXPECTED;
+ }
+#else
stream_encoder->funcs->set_throttled_vcp_size(
stream_encoder,
avg_time_slots_per_mtp);
+#endif
/* TODO: which component is responsible for remove payload table? */
if (mst_mode) {
@@ -3147,8 +3805,16 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
&proposed_table,
false)) {
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ update_mst_stream_alloc_table(
+ link,
+ pipe_ctx->stream_res.stream_enc,
+ pipe_ctx->stream_res.hpo_dp_stream_enc,
+ &proposed_table);
+#else
update_mst_stream_alloc_table(
link, pipe_ctx->stream_res.stream_enc, &proposed_table);
+#endif
}
else {
DC_LOG_WARNING("Failed to update"
@@ -3164,6 +3830,20 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
link->mst_stream_alloc_table.stream_count);
for (i = 0; i < MAX_CONTROLLER_NUM; i++) {
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ DC_LOG_MST("stream_enc[%d]: %p "
+ "stream[%d].hpo_dp_stream_enc: %p "
+ "stream[%d].vcp_id: %d "
+ "stream[%d].slot_count: %d\n",
+ i,
+ (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc,
+ i,
+ (void *) link->mst_stream_alloc_table.stream_allocations[i].hpo_dp_stream_enc,
+ i,
+ link->mst_stream_alloc_table.stream_allocations[i].vcp_id,
+ i,
+ link->mst_stream_alloc_table.stream_allocations[i].slot_count);
+#else
DC_LOG_MST("stream_enc[%d]: %p "
"stream[%d].vcp_id: %d "
"stream[%d].slot_count: %d\n",
@@ -3173,11 +3853,44 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
link->mst_stream_alloc_table.stream_allocations[i].vcp_id,
i,
link->mst_stream_alloc_table.stream_allocations[i].slot_count);
+#endif
+ }
+
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
+ enum dc_status status;
+ uint8_t mst_alloc_slots = 0, prev_mst_slots_in_use = 0xFF;
+
+ for (i = 0; i < link->mst_stream_alloc_table.stream_count; i++)
+ mst_alloc_slots += link->mst_stream_alloc_table.stream_allocations[i].slot_count;
+
+ status = dc_process_dmub_set_mst_slots(link->dc, link->link_index,
+ mst_alloc_slots, &prev_mst_slots_in_use);
+ ASSERT(status != DC_NOT_SUPPORTED);
+ DC_LOG_MST("dpia : status[%d]: alloc_slots[%d]: used_slots[%d]\n",
+ status, mst_alloc_slots, prev_mst_slots_in_use);
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ switch (dp_get_link_encoding_format(&link->cur_link_settings)) {
+ case DP_8b_10b_ENCODING:
+ link_encoder->funcs->update_mst_stream_allocation_table(
+ link_encoder,
+ &link->mst_stream_alloc_table);
+ break;
+ case DP_128b_132b_ENCODING:
+ hpo_dp_link_encoder->funcs->update_stream_allocation_table(
+ hpo_dp_link_encoder,
+ &link->mst_stream_alloc_table);
+ break;
+ case DP_UNKNOWN_ENCODING:
+ DC_LOG_ERROR("Failure: unknown encoding format\n");
+ return DC_ERROR_UNEXPECTED;
+ }
+#else
link_encoder->funcs->update_mst_stream_allocation_table(
link_encoder,
&link->mst_stream_alloc_table);
+#endif
if (mst_mode) {
dm_helpers_dp_mst_poll_for_allocation_change_trigger(
@@ -3198,6 +3911,13 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx)
static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
{
struct cp_psp *cp_psp = &pipe_ctx->stream->ctx->cp_psp;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ struct link_encoder *link_enc = NULL;
+ struct dc_state *state = pipe_ctx->stream->ctx->dc->current_state;
+ struct link_enc_assignment link_enc_assign;
+ int i;
+#endif
+
if (cp_psp && cp_psp->funcs.update_stream_config) {
struct cp_psp_stream_config config = {0};
enum dp_panel_mode panel_mode =
@@ -3209,8 +3929,86 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
config.dig_be = pipe_ctx->stream->link->link_enc_hw_inst;
#if defined(CONFIG_DRM_AMD_DC_DCN)
config.stream_enc_idx = pipe_ctx->stream_res.stream_enc->id - ENGINE_ID_DIGA;
- config.link_enc_idx = pipe_ctx->stream->link->link_enc->transmitter - TRANSMITTER_UNIPHY_A;
- config.phy_idx = pipe_ctx->stream->link->link_enc->transmitter - TRANSMITTER_UNIPHY_A;
+
+ if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_PHY ||
+ pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
+ link_enc = pipe_ctx->stream->link->link_enc;
+ config.dio_output_type = pipe_ctx->stream->link->ep_type;
+ config.dio_output_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A;
+ if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_PHY)
+ link_enc = pipe_ctx->stream->link->link_enc;
+ else if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
+ if (pipe_ctx->stream->link->dc->res_pool->funcs->link_encs_assign) {
+ link_enc = link_enc_cfg_get_link_enc_used_by_stream(
+ pipe_ctx->stream->ctx->dc,
+ pipe_ctx->stream);
+ }
+ // Initialize PHY ID with ABCDE - 01234 mapping except when it is B0
+ config.phy_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A;
+
+ //look up the link_enc_assignment for the current pipe_ctx
+ for (i = 0; i < state->stream_count; i++) {
+ if (pipe_ctx->stream == state->streams[i]) {
+ link_enc_assign = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i];
+ }
+ }
+ // Add flag to guard new A0 DIG mapping
+ if (pipe_ctx->stream->ctx->dc->enable_c20_dtm_b0 == true) {
+ config.dig_be = link_enc_assign.eng_id;
+ config.dio_output_type = pipe_ctx->stream->link->ep_type;
+ config.dio_output_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A;
+ } else {
+ config.dio_output_type = 0;
+ config.dio_output_idx = 0;
+ }
+
+ // Add flag to guard B0 implementation
+ if (pipe_ctx->stream->ctx->dc->enable_c20_dtm_b0 == true &&
+ link_enc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) {
+ if (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
+ link_enc = link_enc_assign.stream->link_enc;
+
+ // enum ID 1-4 maps to DPIA PHY ID 0-3
+ config.phy_idx = link_enc_assign.ep_id.link_id.enum_id - ENUM_ID_1;
+ } else { // for non DPIA mode over B0, ABCDE maps to 01564
+
+ switch (link_enc->transmitter) {
+ case TRANSMITTER_UNIPHY_A:
+ config.phy_idx = 0;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ config.phy_idx = 1;
+ break;
+ case TRANSMITTER_UNIPHY_C:
+ config.phy_idx = 5;
+ break;
+ case TRANSMITTER_UNIPHY_D:
+ config.phy_idx = 6;
+ break;
+ case TRANSMITTER_UNIPHY_E:
+ config.phy_idx = 4;
+ break;
+ default:
+ config.phy_idx = 0;
+ break;
+ }
+
+ }
+ }
+ } else if (pipe_ctx->stream->link->dc->res_pool->funcs->link_encs_assign) {
+ link_enc = link_enc_cfg_get_link_enc_used_by_stream(
+ pipe_ctx->stream->ctx->dc,
+ pipe_ctx->stream);
+ config.phy_idx = 0; /* Clear phy_idx for non-physical display endpoints. */
+ }
+ ASSERT(link_enc);
+ if (link_enc)
+ config.link_enc_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A;
+ if (is_dp_128b_132b_signal(pipe_ctx)) {
+ config.stream_enc_idx = pipe_ctx->stream_res.hpo_dp_stream_enc->id - ENGINE_ID_HPO_DP_0;
+ config.link_enc_idx = pipe_ctx->stream->link->hpo_dp_link_enc->inst;
+ config.dp2_enabled = 1;
+ }
#endif
config.dpms_off = dpms_off;
config.dm_stream_ctx = pipe_ctx->stream->dm_stream_context;
@@ -3222,15 +4020,103 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
}
#endif
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+static void fpga_dp_hpo_enable_link_and_stream(struct dc_state *state, struct pipe_ctx *pipe_ctx)
+{
+ struct dc *dc = pipe_ctx->stream->ctx->dc;
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct link_mst_stream_allocation_table proposed_table = {0};
+ struct fixed31_32 avg_time_slots_per_mtp;
+ uint8_t req_slot_count = 0;
+ uint8_t vc_id = 1; /// VC ID always 1 for SST
+
+ struct dc_link_settings link_settings = {0};
+ DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
+
+ decide_link_settings(stream, &link_settings);
+ stream->link->cur_link_settings = link_settings;
+
+ /* Enable clock, Configure lane count, and Enable Link Encoder*/
+ enable_dp_hpo_output(stream->link, &stream->link->cur_link_settings);
+
+#ifdef DIAGS_BUILD
+ /* Workaround for FPGA HPO capture DP link data:
+ * HPO capture will set link to active mode
+ * This workaround is required to get a capture from start of frame
+ */
+ if (!dc->debug.fpga_hpo_capture_en) {
+ struct encoder_set_dp_phy_pattern_param params = {0};
+ params.dp_phy_pattern = DP_TEST_PATTERN_VIDEO_MODE;
+
+ /* Set link active */
+ stream->link->hpo_dp_link_enc->funcs->set_link_test_pattern(
+ stream->link->hpo_dp_link_enc,
+ &params);
+ }
+#endif
+
+ /* Enable DP_STREAM_ENC */
+ dc->hwss.enable_stream(pipe_ctx);
+
+ /* Set DPS PPS SDP (AKA "info frames") */
+ if (pipe_ctx->stream->timing.flags.DSC) {
+ dp_set_dsc_pps_sdp(pipe_ctx, true, true);
+ }
+
+ /* Allocate Payload */
+ if ((stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) && (state->stream_count > 1)) {
+ // MST case
+ uint8_t i;
+
+ proposed_table.stream_count = state->stream_count;
+ for (i = 0; i < state->stream_count; i++) {
+ avg_time_slots_per_mtp = calculate_sst_avg_time_slots_per_mtp(state->streams[i], state->streams[i]->link);
+ req_slot_count = dc_fixpt_ceil(avg_time_slots_per_mtp);
+ proposed_table.stream_allocations[i].slot_count = req_slot_count;
+ proposed_table.stream_allocations[i].vcp_id = i+1;
+ /* NOTE: This makes assumption that pipe_ctx index is same as stream index */
+ proposed_table.stream_allocations[i].hpo_dp_stream_enc = state->res_ctx.pipe_ctx[i].stream_res.hpo_dp_stream_enc;
+ }
+ } else {
+ // SST case
+ avg_time_slots_per_mtp = calculate_sst_avg_time_slots_per_mtp(stream, stream->link);
+ req_slot_count = dc_fixpt_ceil(avg_time_slots_per_mtp);
+ proposed_table.stream_count = 1; /// Always 1 stream for SST
+ proposed_table.stream_allocations[0].slot_count = req_slot_count;
+ proposed_table.stream_allocations[0].vcp_id = vc_id;
+ proposed_table.stream_allocations[0].hpo_dp_stream_enc = pipe_ctx->stream_res.hpo_dp_stream_enc;
+ }
+
+ stream->link->hpo_dp_link_enc->funcs->update_stream_allocation_table(
+ stream->link->hpo_dp_link_enc,
+ &proposed_table);
+
+ stream->link->hpo_dp_link_enc->funcs->set_throttled_vcp_size(
+ stream->link->hpo_dp_link_enc,
+ pipe_ctx->stream_res.hpo_dp_stream_enc->inst,
+ avg_time_slots_per_mtp);
+
+
+
+ dc->hwss.unblank_stream(pipe_ctx, &stream->link->cur_link_settings);
+}
+#endif
+
void core_link_enable_stream(
struct dc_state *state,
struct pipe_ctx *pipe_ctx)
{
struct dc *dc = pipe_ctx->stream->ctx->dc;
struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->sink->link;
enum dc_status status;
+ struct link_encoder *link_enc;
#if defined(CONFIG_DRM_AMD_DC_DCN)
enum otg_out_mux_dest otg_out_dest = OUT_MUX_DIO;
+ struct vpg *vpg = pipe_ctx->stream_res.stream_enc->vpg;
+
+ if (is_dp_128b_132b_signal(pipe_ctx))
+ vpg = pipe_ctx->stream_res.hpo_dp_stream_enc->vpg;
#endif
DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger);
@@ -3238,23 +4124,57 @@ void core_link_enable_stream(
dc_is_virtual_signal(pipe_ctx->stream->signal))
return;
+ if (dc->res_pool->funcs->link_encs_assign && stream->link->ep_type != DISPLAY_ENDPOINT_PHY)
+ link_enc = link_enc_cfg_get_link_enc_used_by_stream(dc, stream);
+ else
+ link_enc = stream->link->link_enc;
+ ASSERT(link_enc);
+
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (!dc_is_virtual_signal(pipe_ctx->stream->signal)
+ && !is_dp_128b_132b_signal(pipe_ctx)) {
+#else
if (!dc_is_virtual_signal(pipe_ctx->stream->signal)) {
- stream->link->link_enc->funcs->setup(
- stream->link->link_enc,
- pipe_ctx->stream->signal);
+#endif
+ if (link_enc)
+ link_enc->funcs->setup(
+ link_enc,
+ pipe_ctx->stream->signal);
pipe_ctx->stream_res.stream_enc->funcs->setup_stereo_sync(
pipe_ctx->stream_res.stream_enc,
pipe_ctx->stream_res.tg->inst,
stream->timing.timing_3d_format != TIMING_3D_FORMAT_NONE);
}
- if (dc_is_dp_signal(pipe_ctx->stream->signal))
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (is_dp_128b_132b_signal(pipe_ctx)) {
+ pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->set_stream_attribute(
+ pipe_ctx->stream_res.hpo_dp_stream_enc,
+ &stream->timing,
+ stream->output_color_space,
+ stream->use_vsc_sdp_for_colorimetry,
+ stream->timing.flags.DSC,
+ false);
+ otg_out_dest = OUT_MUX_HPO_DP;
+ } else if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
pipe_ctx->stream_res.stream_enc->funcs->dp_set_stream_attribute(
+ pipe_ctx->stream_res.stream_enc,
+ &stream->timing,
+ stream->output_color_space,
+ stream->use_vsc_sdp_for_colorimetry,
+ stream->link->dpcd_caps.dprx_feature.bits.SST_SPLIT_SDP_CAP);
+ }
+#else
+ pipe_ctx->stream_res.stream_enc->funcs->dp_set_stream_attribute(
pipe_ctx->stream_res.stream_enc,
&stream->timing,
stream->output_color_space,
stream->use_vsc_sdp_for_colorimetry,
stream->link->dpcd_caps.dprx_feature.bits.SST_SPLIT_SDP_CAP);
+#endif
+
+ if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DP_STREAM_ATTR);
if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal))
pipe_ctx->stream_res.stream_enc->funcs->hdmi_set_stream_attribute(
@@ -3288,9 +4208,18 @@ void core_link_enable_stream(
pipe_ctx->stream->apply_edp_fast_boot_optimization = false;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ // Enable VPG before building infoframe
+ if (vpg && vpg->funcs->vpg_poweron)
+ vpg->funcs->vpg_poweron(vpg);
+#endif
+
resource_build_info_frame(pipe_ctx);
dc->hwss.update_info_frame(pipe_ctx);
+ if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME);
+
/* Do not touch link on seamless boot optimization. */
if (pipe_ctx->stream->apply_seamless_boot_optimization) {
pipe_ctx->stream->dpms_off = false;
@@ -3365,10 +4294,16 @@ void core_link_enable_stream(
* as a workaround for the incorrect value being applied
* from transmitter control.
*/
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (!(dc_is_virtual_signal(pipe_ctx->stream->signal) ||
+ is_dp_128b_132b_signal(pipe_ctx)))
+#else
if (!dc_is_virtual_signal(pipe_ctx->stream->signal))
- stream->link->link_enc->funcs->setup(
- stream->link->link_enc,
- pipe_ctx->stream->signal);
+#endif
+ if (link_enc)
+ link_enc->funcs->setup(
+ link_enc,
+ pipe_ctx->stream->signal);
dc->hwss.enable_stream(pipe_ctx);
@@ -3377,12 +4312,17 @@ void core_link_enable_stream(
if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
dc_is_virtual_signal(pipe_ctx->stream->signal)) {
dp_set_dsc_on_rx(pipe_ctx, true);
- dp_set_dsc_pps_sdp(pipe_ctx, true);
+ dp_set_dsc_pps_sdp(pipe_ctx, true, true);
}
}
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
dc_link_allocate_mst_payload(pipe_ctx);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT &&
+ is_dp_128b_132b_signal(pipe_ctx))
+ dc_link_update_sst_payload(pipe_ctx, true);
+#endif
dc->hwss.unblank_stream(pipe_ctx,
&pipe_ctx->stream->link->cur_link_settings);
@@ -3399,6 +4339,11 @@ void core_link_enable_stream(
dc->hwss.enable_audio_stream(pipe_ctx);
} else { // if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (is_dp_128b_132b_signal(pipe_ctx)) {
+ fpga_dp_hpo_enable_link_and_stream(state, pipe_ctx);
+ }
+#endif
if (dc_is_dp_signal(pipe_ctx->stream->signal) ||
dc_is_virtual_signal(pipe_ctx->stream->signal))
dp_set_dsc_enable(pipe_ctx, true);
@@ -3415,6 +4360,12 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx)
struct dc *dc = pipe_ctx->stream->ctx->dc;
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->sink->link;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ struct vpg *vpg = pipe_ctx->stream_res.stream_enc->vpg;
+
+ if (is_dp_128b_132b_signal(pipe_ctx))
+ vpg = pipe_ctx->stream_res.hpo_dp_stream_enc->vpg;
+#endif
if (!IS_DIAG_DC(dc->ctx->dce_environment) &&
dc_is_virtual_signal(pipe_ctx->stream->signal))
@@ -3434,6 +4385,11 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx)
if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
deallocate_mst_payload(pipe_ctx);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT &&
+ is_dp_128b_132b_signal(pipe_ctx))
+ dc_link_update_sst_payload(pipe_ctx, false);
+#endif
if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) {
struct ext_hdmi_settings settings = {0};
@@ -3460,14 +4416,44 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx)
}
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT &&
+ !is_dp_128b_132b_signal(pipe_ctx)) {
+
+ /* In DP1.x SST mode, our encoder will go to TPS1
+ * when link is on but stream is off.
+ * Disabling link before stream will avoid exposing TPS1 pattern
+ * during the disable sequence as it will confuse some receivers
+ * state machine.
+ * In DP2 or MST mode, our encoder will stay video active
+ */
+ disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal);
+ dc->hwss.disable_stream(pipe_ctx);
+ } else {
+ dc->hwss.disable_stream(pipe_ctx);
+ disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal);
+ }
+#else
disable_link(pipe_ctx->stream->link, pipe_ctx->stream->signal);
dc->hwss.disable_stream(pipe_ctx);
+#endif
if (pipe_ctx->stream->timing.flags.DSC) {
if (dc_is_dp_signal(pipe_ctx->stream->signal))
dp_set_dsc_enable(pipe_ctx, false);
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (is_dp_128b_132b_signal(pipe_ctx)) {
+ if (pipe_ctx->stream_res.tg->funcs->set_out_mux)
+ pipe_ctx->stream_res.tg->funcs->set_out_mux(pipe_ctx->stream_res.tg, OUT_MUX_DIO);
+ }
+#endif
+
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (vpg && vpg->funcs->vpg_powerdown)
+ vpg->funcs->vpg_powerdown(vpg);
+#endif
}
void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable)
@@ -3600,6 +4586,13 @@ void dc_link_set_preferred_training_settings(struct dc *dc,
if (link_setting != NULL) {
link->preferred_link_setting = *link_setting;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (dp_get_link_encoding_format(link_setting) ==
+ DP_128b_132b_ENCODING && !link->hpo_dp_link_enc) {
+ if (!add_dp_hpo_link_encoder_to_link(link))
+ memset(&link->preferred_link_setting, 0, sizeof(link->preferred_link_setting));
+ }
+#endif
} else {
link->preferred_link_setting.lane_count = LANE_COUNT_UNKNOWN;
link->preferred_link_setting.link_rate = LINK_RATE_UNKNOWN;
@@ -3641,6 +4634,38 @@ uint32_t dc_link_bandwidth_kbps(
const struct dc_link *link,
const struct dc_link_settings *link_setting)
{
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ uint32_t total_data_bw_efficiency_x10000 = 0;
+ uint32_t link_rate_per_lane_kbps = 0;
+
+ switch (dp_get_link_encoding_format(link_setting)) {
+ case DP_8b_10b_ENCODING:
+ /* For 8b/10b encoding:
+ * link rate is defined in the unit of LINK_RATE_REF_FREQ_IN_KHZ per DP byte per lane.
+ * data bandwidth efficiency is 80% with additional 3% overhead if FEC is supported.
+ */
+ link_rate_per_lane_kbps = link_setting->link_rate * LINK_RATE_REF_FREQ_IN_KHZ * BITS_PER_DP_BYTE;
+ total_data_bw_efficiency_x10000 = DATA_EFFICIENCY_8b_10b_x10000;
+ if (dc_link_should_enable_fec(link)) {
+ total_data_bw_efficiency_x10000 /= 100;
+ total_data_bw_efficiency_x10000 *= DATA_EFFICIENCY_8b_10b_FEC_EFFICIENCY_x100;
+ }
+ break;
+ case DP_128b_132b_ENCODING:
+ /* For 128b/132b encoding:
+ * link rate is defined in the unit of 10mbps per lane.
+ * total data bandwidth efficiency is always 96.71%.
+ */
+ link_rate_per_lane_kbps = link_setting->link_rate * 10000;
+ total_data_bw_efficiency_x10000 = DATA_EFFICIENCY_128b_132b_x10000;
+ break;
+ default:
+ break;
+ }
+
+ /* overall effective link bandwidth = link rate per lane * lane count * total data bandwidth efficiency */
+ return link_rate_per_lane_kbps * link_setting->lane_count / 10000 * total_data_bw_efficiency_x10000;
+#else
uint32_t link_bw_kbps =
link_setting->link_rate * LINK_RATE_REF_FREQ_IN_KHZ; /* bytes per sec */
@@ -3671,9 +4696,9 @@ uint32_t dc_link_bandwidth_kbps(
long long fec_link_bw_kbps = link_bw_kbps * 970LL;
link_bw_kbps = (uint32_t)(div64_s64(fec_link_bw_kbps, 1000LL));
}
-
return link_bw_kbps;
+#endif
}
const struct dc_link_settings *dc_link_get_link_cap(
@@ -3700,14 +4725,14 @@ bool dc_link_is_fec_supported(const struct dc_link *link)
*/
if (link->is_dig_mapping_flexible &&
link->dc->res_pool->funcs->link_encs_assign) {
- link_enc = link_enc_cfg_get_link_enc_used_by_link(link->dc->current_state, link);
+ link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link);
if (link_enc == NULL)
- link_enc = link_enc_cfg_get_next_avail_link_enc(link->dc, link->dc->current_state);
+ link_enc = link_enc_cfg_get_next_avail_link_enc(link->ctx->dc);
} else
link_enc = link->link_enc;
ASSERT(link_enc);
- return (dc_is_dp_signal(link->connector_signal) &&
+ return (dc_is_dp_signal(link->connector_signal) && link_enc &&
link_enc->features.fec_supported &&
link->dpcd_caps.fec_cap.bits.FEC_CAPABLE &&
!IS_FPGA_MAXIMUS_DC(link->ctx->dce_environment));
@@ -3721,8 +4746,8 @@ bool dc_link_should_enable_fec(const struct dc_link *link)
if ((link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT_MST &&
link->local_sink &&
link->local_sink->edid_caps.panel_patch.disable_fec) ||
- (link->connector_signal == SIGNAL_TYPE_EDP &&
- link->dc->debug.force_enable_edp_fec == false)) // Disable FEC for eDP
+ (link->connector_signal == SIGNAL_TYPE_EDP
+ ))
is_fec_disable = true;
if (dc_link_is_fec_supported(link) && !link->dc->debug.disable_fec && !is_fec_disable)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
index ba6b56f20269..60539b1f2a80 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
@@ -196,7 +196,8 @@ static void ddc_service_construct(
ddc_service->link = init_data->link;
ddc_service->ctx = init_data->ctx;
- if (BP_RESULT_OK != dcb->funcs->get_i2c_info(dcb, init_data->id, &i2c_info)) {
+ if (init_data->is_dpia_link ||
+ dcb->funcs->get_i2c_info(dcb, init_data->id, &i2c_info) != BP_RESULT_OK) {
ddc_service->ddc_pin = NULL;
} else {
DC_LOGGER_INIT(ddc_service->ctx->logger);
@@ -553,6 +554,7 @@ bool dal_ddc_service_query_ddc_data(
payload.address = address;
payload.reply = NULL;
payload.defer_delay = get_defer_delay(ddc);
+ payload.write_status_update = false;
if (write_size != 0) {
payload.write = true;
@@ -624,24 +626,24 @@ bool dal_ddc_submit_aux_command(struct ddc_service *ddc,
do {
struct aux_payload current_payload;
bool is_end_of_payload = (retrieved + DEFAULT_AUX_MAX_DATA_SIZE) >=
- payload->length;
+ payload->length ? true : false;
+ uint32_t payload_length = is_end_of_payload ?
+ payload->length - retrieved : DEFAULT_AUX_MAX_DATA_SIZE;
current_payload.address = payload->address;
current_payload.data = &payload->data[retrieved];
current_payload.defer_delay = payload->defer_delay;
current_payload.i2c_over_aux = payload->i2c_over_aux;
- current_payload.length = is_end_of_payload ?
- payload->length - retrieved : DEFAULT_AUX_MAX_DATA_SIZE;
- /* set mot (middle of transaction) to false
- * if it is the last payload
- */
+ current_payload.length = payload_length;
+ /* set mot (middle of transaction) to false if it is the last payload */
current_payload.mot = is_end_of_payload ? payload->mot:true;
+ current_payload.write_status_update = false;
current_payload.reply = payload->reply;
current_payload.write = payload->write;
ret = dc_link_aux_transfer_with_retries(ddc, &current_payload);
- retrieved += current_payload.length;
+ retrieved += payload_length;
} while (retrieved < payload->length && ret == true);
return ret;
@@ -658,10 +660,12 @@ int dc_link_aux_transfer_raw(struct ddc_service *ddc,
struct aux_payload *payload,
enum aux_return_code_type *operation_result)
{
- if (dc_enable_dmub_notifications(ddc->ctx->dc))
+ if (ddc->ctx->dc->debug.enable_dmub_aux_for_legacy_ddc ||
+ !ddc->ddc_pin) {
return dce_aux_transfer_dmub_raw(ddc, payload, operation_result);
- else
+ } else {
return dce_aux_transfer_raw(ddc, payload, operation_result);
+ }
}
/* dc_link_aux_transfer_with_retries() - Attempt to submit an
@@ -760,7 +764,7 @@ void dal_ddc_service_read_scdc_data(struct ddc_service *ddc_service)
dal_ddc_service_query_ddc_data(ddc_service, slave_address, &offset,
sizeof(offset), &tmds_config, sizeof(tmds_config));
if (tmds_config & 0x1) {
- union hdmi_scdc_status_flags_data status_data = { {0} };
+ union hdmi_scdc_status_flags_data status_data = {0};
uint8_t scramble_status = 0;
offset = HDMI_SCDC_SCRAMBLER_STATUS;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 6d655e158267..cc25ba0ec7db 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -36,6 +36,7 @@
#include "dpcd_defs.h"
#include "dc_dmub_srv.h"
#include "dce/dmub_hw_lock_mgr.h"
+#include "inc/dc_link_dpia.h"
#include "inc/link_enc_cfg.h"
/*Travis*/
@@ -61,6 +62,43 @@ enum {
POST_LT_ADJ_REQ_TIMEOUT = 200
};
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+struct dp_lt_fallback_entry {
+ enum dc_lane_count lane_count;
+ enum dc_link_rate link_rate;
+};
+
+static const struct dp_lt_fallback_entry dp_lt_fallbacks[] = {
+ /* This link training fallback array is ordered by
+ * link bandwidth from highest to lowest.
+ * DP specs makes it a normative policy to always
+ * choose the next highest link bandwidth during
+ * link training fallback.
+ */
+ {LANE_COUNT_FOUR, LINK_RATE_UHBR20},
+ {LANE_COUNT_FOUR, LINK_RATE_UHBR13_5},
+ {LANE_COUNT_TWO, LINK_RATE_UHBR20},
+ {LANE_COUNT_FOUR, LINK_RATE_UHBR10},
+ {LANE_COUNT_TWO, LINK_RATE_UHBR13_5},
+ {LANE_COUNT_FOUR, LINK_RATE_HIGH3},
+ {LANE_COUNT_ONE, LINK_RATE_UHBR20},
+ {LANE_COUNT_TWO, LINK_RATE_UHBR10},
+ {LANE_COUNT_FOUR, LINK_RATE_HIGH2},
+ {LANE_COUNT_ONE, LINK_RATE_UHBR13_5},
+ {LANE_COUNT_TWO, LINK_RATE_HIGH3},
+ {LANE_COUNT_ONE, LINK_RATE_UHBR10},
+ {LANE_COUNT_TWO, LINK_RATE_HIGH2},
+ {LANE_COUNT_FOUR, LINK_RATE_HIGH},
+ {LANE_COUNT_ONE, LINK_RATE_HIGH3},
+ {LANE_COUNT_FOUR, LINK_RATE_LOW},
+ {LANE_COUNT_ONE, LINK_RATE_HIGH2},
+ {LANE_COUNT_TWO, LINK_RATE_HIGH},
+ {LANE_COUNT_TWO, LINK_RATE_LOW},
+ {LANE_COUNT_ONE, LINK_RATE_HIGH},
+ {LANE_COUNT_ONE, LINK_RATE_LOW},
+};
+#endif
+
static bool decide_fallback_link_setting(
struct dc_link_settings initial_link_settings,
struct dc_link_settings *current_link_setting,
@@ -68,21 +106,37 @@ static bool decide_fallback_link_setting(
static struct dc_link_settings get_common_supported_link_settings(
struct dc_link_settings link_setting_a,
struct dc_link_settings link_setting_b);
+static void maximize_lane_settings(const struct link_training_settings *lt_settings,
+ struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]);
+static void override_lane_settings(const struct link_training_settings *lt_settings,
+ struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]);
static uint32_t get_cr_training_aux_rd_interval(struct dc_link *link,
const struct dc_link_settings *link_settings)
{
union training_aux_rd_interval training_rd_interval;
uint32_t wait_in_micro_secs = 100;
-
+#if defined(CONFIG_DRM_AMD_DC_DCN)
memset(&training_rd_interval, 0, sizeof(training_rd_interval));
+ if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING &&
+ link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) {
+ core_link_read_dpcd(
+ link,
+ DP_TRAINING_AUX_RD_INTERVAL,
+ (uint8_t *)&training_rd_interval,
+ sizeof(training_rd_interval));
+ if (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL)
+ wait_in_micro_secs = training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL * 4000;
+ }
+#else
core_link_read_dpcd(
link,
DP_TRAINING_AUX_RD_INTERVAL,
(uint8_t *)&training_rd_interval,
sizeof(training_rd_interval));
if (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL)
- wait_in_micro_secs = training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL * 4000;
+ wait_in_micro_secs = training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL * 4000;
+#endif
return wait_in_micro_secs;
}
@@ -90,6 +144,36 @@ static uint32_t get_eq_training_aux_rd_interval(
struct dc_link *link,
const struct dc_link_settings *link_settings)
{
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ union training_aux_rd_interval training_rd_interval;
+
+ memset(&training_rd_interval, 0, sizeof(training_rd_interval));
+ if (dp_get_link_encoding_format(link_settings) == DP_128b_132b_ENCODING) {
+ core_link_read_dpcd(
+ link,
+ DP_128b_132b_TRAINING_AUX_RD_INTERVAL,
+ (uint8_t *)&training_rd_interval,
+ sizeof(training_rd_interval));
+ } else if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING &&
+ link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) {
+ core_link_read_dpcd(
+ link,
+ DP_TRAINING_AUX_RD_INTERVAL,
+ (uint8_t *)&training_rd_interval,
+ sizeof(training_rd_interval));
+ }
+
+ switch (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL) {
+ case 0: return 400;
+ case 1: return 4000;
+ case 2: return 8000;
+ case 3: return 12000;
+ case 4: return 16000;
+ case 5: return 32000;
+ case 6: return 64000;
+ default: return 400;
+ }
+#else
union training_aux_rd_interval training_rd_interval;
uint32_t wait_in_micro_secs = 400;
@@ -109,13 +193,21 @@ static uint32_t get_eq_training_aux_rd_interval(
}
return wait_in_micro_secs;
+#endif
}
void dp_wait_for_training_aux_rd_interval(
struct dc_link *link,
uint32_t wait_in_micro_secs)
{
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (wait_in_micro_secs > 16000)
+ msleep(wait_in_micro_secs/1000);
+ else
+ udelay(wait_in_micro_secs);
+#else
udelay(wait_in_micro_secs);
+#endif
DC_LOG_HW_LINK_TRAINING("%s:\n wait = %d\n",
__func__,
@@ -143,6 +235,17 @@ enum dpcd_training_patterns
case DP_TRAINING_PATTERN_SEQUENCE_4:
dpcd_tr_pattern = DPCD_TRAINING_PATTERN_4;
break;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ case DP_128b_132b_TPS1:
+ dpcd_tr_pattern = DPCD_128b_132b_TPS1;
+ break;
+ case DP_128b_132b_TPS2:
+ dpcd_tr_pattern = DPCD_128b_132b_TPS2;
+ break;
+ case DP_128b_132b_TPS2_CDS:
+ dpcd_tr_pattern = DPCD_128b_132b_TPS2_CDS;
+ break;
+#endif
case DP_TRAINING_PATTERN_VIDEOIDLE:
dpcd_tr_pattern = DPCD_TRAINING_PATTERN_VIDEOIDLE;
break;
@@ -160,7 +263,7 @@ static void dpcd_set_training_pattern(
struct dc_link *link,
enum dc_dp_training_pattern training_pattern)
{
- union dpcd_training_pattern dpcd_pattern = { {0} };
+ union dpcd_training_pattern dpcd_pattern = {0};
dpcd_pattern.v1_4.TRAINING_PATTERN_SET =
dc_dp_training_pattern_to_dpcd_training_pattern(
@@ -181,13 +284,57 @@ static void dpcd_set_training_pattern(
static enum dc_dp_training_pattern decide_cr_training_pattern(
const struct dc_link_settings *link_settings)
{
- return DP_TRAINING_PATTERN_SEQUENCE_1;
+ switch (dp_get_link_encoding_format(link_settings)) {
+ case DP_8b_10b_ENCODING:
+ default:
+ return DP_TRAINING_PATTERN_SEQUENCE_1;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ case DP_128b_132b_ENCODING:
+ return DP_128b_132b_TPS1;
+#endif
+ }
}
static enum dc_dp_training_pattern decide_eq_training_pattern(struct dc_link *link,
const struct dc_link_settings *link_settings)
{
struct link_encoder *link_enc;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ struct encoder_feature_support *enc_caps;
+ struct dpcd_caps *rx_caps = &link->dpcd_caps;
+ enum dc_dp_training_pattern pattern = DP_TRAINING_PATTERN_SEQUENCE_2;
+
+ /* Access link encoder capability based on whether it is statically
+ * or dynamically assigned to a link.
+ */
+ if (link->is_dig_mapping_flexible &&
+ link->dc->res_pool->funcs->link_encs_assign)
+ link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link);
+ else
+ link_enc = link->link_enc;
+ ASSERT(link_enc);
+ enc_caps = &link_enc->features;
+
+ switch (dp_get_link_encoding_format(link_settings)) {
+ case DP_8b_10b_ENCODING:
+ if (enc_caps->flags.bits.IS_TPS4_CAPABLE &&
+ rx_caps->max_down_spread.bits.TPS4_SUPPORTED)
+ pattern = DP_TRAINING_PATTERN_SEQUENCE_4;
+ else if (enc_caps->flags.bits.IS_TPS3_CAPABLE &&
+ rx_caps->max_ln_count.bits.TPS3_SUPPORTED)
+ pattern = DP_TRAINING_PATTERN_SEQUENCE_3;
+ else
+ pattern = DP_TRAINING_PATTERN_SEQUENCE_2;
+ break;
+ case DP_128b_132b_ENCODING:
+ pattern = DP_128b_132b_TPS2;
+ break;
+ default:
+ pattern = DP_TRAINING_PATTERN_SEQUENCE_2;
+ break;
+ }
+ return pattern;
+#else
enum dc_dp_training_pattern highest_tp = DP_TRAINING_PATTERN_SEQUENCE_2;
struct encoder_feature_support *features;
struct dpcd_caps *dpcd_caps = &link->dpcd_caps;
@@ -197,7 +344,7 @@ static enum dc_dp_training_pattern decide_eq_training_pattern(struct dc_link *li
*/
if (link->is_dig_mapping_flexible &&
link->dc->res_pool->funcs->link_encs_assign)
- link_enc = link_enc_cfg_get_link_enc_used_by_link(link->dc->current_state, link);
+ link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link);
else
link_enc = link->link_enc;
ASSERT(link_enc);
@@ -218,7 +365,38 @@ static enum dc_dp_training_pattern decide_eq_training_pattern(struct dc_link *li
return DP_TRAINING_PATTERN_SEQUENCE_3;
return DP_TRAINING_PATTERN_SEQUENCE_2;
+#endif
+}
+
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+static uint8_t get_dpcd_link_rate(const struct dc_link_settings *link_settings)
+{
+ uint8_t link_rate = 0;
+ enum dp_link_encoding encoding = dp_get_link_encoding_format(link_settings);
+
+ if (encoding == DP_128b_132b_ENCODING)
+ switch (link_settings->link_rate) {
+ case LINK_RATE_UHBR10:
+ link_rate = 0x1;
+ break;
+ case LINK_RATE_UHBR20:
+ link_rate = 0x2;
+ break;
+ case LINK_RATE_UHBR13_5:
+ link_rate = 0x4;
+ break;
+ default:
+ link_rate = 0;
+ break;
+ }
+ else if (encoding == DP_8b_10b_ENCODING)
+ link_rate = (uint8_t) link_settings->link_rate;
+ else
+ link_rate = 0;
+
+ return link_rate;
}
+#endif
enum dc_status dpcd_set_link_settings(
struct dc_link *link,
@@ -227,8 +405,8 @@ enum dc_status dpcd_set_link_settings(
uint8_t rate;
enum dc_status status;
- union down_spread_ctrl downspread = { {0} };
- union lane_count_set lane_count_set = { {0} };
+ union down_spread_ctrl downspread = {0};
+ union lane_count_set lane_count_set = {0};
downspread.raw = (uint8_t)
(lt_settings->link_settings.link_spread);
@@ -269,7 +447,11 @@ enum dc_status dpcd_set_link_settings(
status = core_link_write_dpcd(link, DP_LINK_RATE_SET,
&lt_settings->link_settings.link_rate_set, 1);
} else {
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ rate = get_dpcd_link_rate(&lt_settings->link_settings);
+#else
rate = (uint8_t) (lt_settings->link_settings.link_rate);
+#endif
status = core_link_write_dpcd(link, DP_LINK_BW_SET, &rate, 1);
}
@@ -311,6 +493,10 @@ uint8_t dc_dp_initialize_scrambling_data_symbols(
disable_scrabled_data_symbols = 1;
break;
case DP_TRAINING_PATTERN_SEQUENCE_4:
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ case DP_128b_132b_TPS1:
+ case DP_128b_132b_TPS2:
+#endif
disable_scrabled_data_symbols = 0;
break;
default:
@@ -333,13 +519,10 @@ static void dpcd_set_lt_pattern_and_lane_settings(
enum dc_dp_training_pattern pattern,
uint32_t offset)
{
- union dpcd_training_lane dpcd_lane[LANE_COUNT_DP_MAX] = { { {0} } };
-
uint32_t dpcd_base_lt_offset;
uint8_t dpcd_lt_buffer[5] = {0};
- union dpcd_training_pattern dpcd_pattern = { {0} };
- uint32_t lane;
+ union dpcd_training_pattern dpcd_pattern = { 0 };
uint32_t size_in_bytes;
bool edp_workaround = false; /* TODO link_prop.INTERNAL */
dpcd_base_lt_offset = DP_TRAINING_PATTERN_SET;
@@ -372,53 +555,57 @@ static void dpcd_set_lt_pattern_and_lane_settings(
dpcd_base_lt_offset,
dpcd_pattern.v1_4.TRAINING_PATTERN_SET);
}
- /*****************************************************************
- * DpcdAddress_Lane0Set -> DpcdAddress_Lane3Set
- *****************************************************************/
- for (lane = 0; lane <
- (uint32_t)(lt_settings->link_settings.lane_count); lane++) {
-
- dpcd_lane[lane].bits.VOLTAGE_SWING_SET =
- (uint8_t)(lt_settings->lane_settings[lane].VOLTAGE_SWING);
- dpcd_lane[lane].bits.PRE_EMPHASIS_SET =
- (uint8_t)(lt_settings->lane_settings[lane].PRE_EMPHASIS);
-
- dpcd_lane[lane].bits.MAX_SWING_REACHED =
- (lt_settings->lane_settings[lane].VOLTAGE_SWING ==
- VOLTAGE_SWING_MAX_LEVEL ? 1 : 0);
- dpcd_lane[lane].bits.MAX_PRE_EMPHASIS_REACHED =
- (lt_settings->lane_settings[lane].PRE_EMPHASIS ==
- PRE_EMPHASIS_MAX_LEVEL ? 1 : 0);
- }
/* concatenate everything into one buffer*/
-
- size_in_bytes = lt_settings->link_settings.lane_count * sizeof(dpcd_lane[0]);
+ size_in_bytes = lt_settings->link_settings.lane_count *
+ sizeof(lt_settings->dpcd_lane_settings[0]);
// 0x00103 - 0x00102
memmove(
&dpcd_lt_buffer[DP_TRAINING_LANE0_SET - DP_TRAINING_PATTERN_SET],
- dpcd_lane,
+ lt_settings->dpcd_lane_settings,
size_in_bytes);
if (is_repeater(link, offset)) {
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (dp_get_link_encoding_format(&lt_settings->link_settings) ==
+ DP_128b_132b_ENCODING)
+ DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n"
+ " 0x%X TX_FFE_PRESET_VALUE = %x\n",
+ __func__,
+ offset,
+ dpcd_base_lt_offset,
+ lt_settings->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE);
+ else if (dp_get_link_encoding_format(&lt_settings->link_settings) ==
+ DP_8b_10b_ENCODING)
+#endif
DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n"
" 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n",
__func__,
offset,
dpcd_base_lt_offset,
- dpcd_lane[0].bits.VOLTAGE_SWING_SET,
- dpcd_lane[0].bits.PRE_EMPHASIS_SET,
- dpcd_lane[0].bits.MAX_SWING_REACHED,
- dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED);
+ lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET,
+ lt_settings->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET,
+ lt_settings->dpcd_lane_settings[0].bits.MAX_SWING_REACHED,
+ lt_settings->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED);
} else {
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (dp_get_link_encoding_format(&lt_settings->link_settings) ==
+ DP_128b_132b_ENCODING)
+ DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X TX_FFE_PRESET_VALUE = %x\n",
+ __func__,
+ dpcd_base_lt_offset,
+ lt_settings->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE);
+ else if (dp_get_link_encoding_format(&lt_settings->link_settings) ==
+ DP_8b_10b_ENCODING)
+#endif
DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n",
__func__,
dpcd_base_lt_offset,
- dpcd_lane[0].bits.VOLTAGE_SWING_SET,
- dpcd_lane[0].bits.PRE_EMPHASIS_SET,
- dpcd_lane[0].bits.MAX_SWING_REACHED,
- dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED);
+ lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET,
+ lt_settings->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET,
+ lt_settings->dpcd_lane_settings[0].bits.MAX_SWING_REACHED,
+ lt_settings->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED);
}
if (edp_workaround) {
/* for eDP write in 2 parts because the 5-byte burst is
@@ -433,9 +620,18 @@ static void dpcd_set_lt_pattern_and_lane_settings(
core_link_write_dpcd(
link,
DP_TRAINING_LANE0_SET,
- (uint8_t *)(dpcd_lane),
+ (uint8_t *)(lt_settings->dpcd_lane_settings),
size_in_bytes);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ } else if (dp_get_link_encoding_format(&lt_settings->link_settings) ==
+ DP_128b_132b_ENCODING) {
+ core_link_write_dpcd(
+ link,
+ dpcd_base_lt_offset,
+ dpcd_lt_buffer,
+ sizeof(dpcd_lt_buffer));
+#endif
} else
/* write it all in (1 + number-of-lanes)-byte burst*/
core_link_write_dpcd(
@@ -443,8 +639,6 @@ static void dpcd_set_lt_pattern_and_lane_settings(
dpcd_base_lt_offset,
dpcd_lt_buffer,
size_in_bytes + sizeof(dpcd_pattern.raw));
-
- link->cur_lane_setting = lt_settings->lane_settings[0];
}
bool dp_is_cr_done(enum dc_lane_count ln_count,
@@ -486,27 +680,75 @@ bool dp_is_interlane_aligned(union lane_align_status_updated align_status)
return align_status.bits.INTERLANE_ALIGN_DONE == 1;
}
-void dp_update_drive_settings(
- struct link_training_settings *dest,
- struct link_training_settings src)
+void dp_hw_to_dpcd_lane_settings(
+ const struct link_training_settings *lt_settings,
+ const struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX],
+ union dpcd_training_lane dpcd_lane_settings[LANE_COUNT_DP_MAX])
+{
+ uint8_t lane = 0;
+
+ for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) {
+ if (dp_get_link_encoding_format(&lt_settings->link_settings) ==
+ DP_8b_10b_ENCODING) {
+ dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET =
+ (uint8_t)(hw_lane_settings[lane].VOLTAGE_SWING);
+ dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET =
+ (uint8_t)(hw_lane_settings[lane].PRE_EMPHASIS);
+ dpcd_lane_settings[lane].bits.MAX_SWING_REACHED =
+ (hw_lane_settings[lane].VOLTAGE_SWING ==
+ VOLTAGE_SWING_MAX_LEVEL ? 1 : 0);
+ dpcd_lane_settings[lane].bits.MAX_PRE_EMPHASIS_REACHED =
+ (hw_lane_settings[lane].PRE_EMPHASIS ==
+ PRE_EMPHASIS_MAX_LEVEL ? 1 : 0);
+ }
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ else if (dp_get_link_encoding_format(&lt_settings->link_settings) ==
+ DP_128b_132b_ENCODING) {
+ dpcd_lane_settings[lane].tx_ffe.PRESET_VALUE =
+ hw_lane_settings[lane].FFE_PRESET.settings.level;
+ }
+#endif
+ }
+}
+
+void dp_decide_lane_settings(
+ const struct link_training_settings *lt_settings,
+ const union lane_adjust ln_adjust[LANE_COUNT_DP_MAX],
+ struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX],
+ union dpcd_training_lane dpcd_lane_settings[LANE_COUNT_DP_MAX])
{
uint32_t lane;
- for (lane = 0; lane < src.link_settings.lane_count; lane++) {
- if (dest->voltage_swing == NULL)
- dest->lane_settings[lane].VOLTAGE_SWING = src.lane_settings[lane].VOLTAGE_SWING;
- else
- dest->lane_settings[lane].VOLTAGE_SWING = *dest->voltage_swing;
- if (dest->pre_emphasis == NULL)
- dest->lane_settings[lane].PRE_EMPHASIS = src.lane_settings[lane].PRE_EMPHASIS;
- else
- dest->lane_settings[lane].PRE_EMPHASIS = *dest->pre_emphasis;
+ for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) {
+ if (dp_get_link_encoding_format(&lt_settings->link_settings) ==
+ DP_8b_10b_ENCODING) {
+ hw_lane_settings[lane].VOLTAGE_SWING =
+ (enum dc_voltage_swing)(ln_adjust[lane].bits.
+ VOLTAGE_SWING_LANE);
+ hw_lane_settings[lane].PRE_EMPHASIS =
+ (enum dc_pre_emphasis)(ln_adjust[lane].bits.
+ PRE_EMPHASIS_LANE);
+ }
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ else if (dp_get_link_encoding_format(&lt_settings->link_settings) ==
+ DP_128b_132b_ENCODING) {
+ hw_lane_settings[lane].FFE_PRESET.raw =
+ ln_adjust[lane].tx_ffe.PRESET_VALUE;
+ }
+#endif
+ }
+ dp_hw_to_dpcd_lane_settings(lt_settings, hw_lane_settings, dpcd_lane_settings);
- if (dest->post_cursor2 == NULL)
- dest->lane_settings[lane].POST_CURSOR2 = src.lane_settings[lane].POST_CURSOR2;
- else
- dest->lane_settings[lane].POST_CURSOR2 = *dest->post_cursor2;
+ if (lt_settings->disallow_per_lane_settings) {
+ /* we find the maximum of the requested settings across all lanes*/
+ /* and set this maximum for all lanes*/
+ maximize_lane_settings(lt_settings, hw_lane_settings);
+ override_lane_settings(lt_settings, hw_lane_settings);
+
+ if (lt_settings->always_match_dpcd_with_hw_lane_settings)
+ dp_hw_to_dpcd_lane_settings(lt_settings, hw_lane_settings, dpcd_lane_settings);
}
+
}
static uint8_t get_nibble_at_index(const uint8_t *buf,
@@ -536,46 +778,31 @@ static enum dc_pre_emphasis get_max_pre_emphasis_for_voltage_swing(
}
-static void find_max_drive_settings(
- const struct link_training_settings *link_training_setting,
- struct link_training_settings *max_lt_setting)
+static void maximize_lane_settings(const struct link_training_settings *lt_settings,
+ struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX])
{
uint32_t lane;
struct dc_lane_settings max_requested;
- max_requested.VOLTAGE_SWING =
- link_training_setting->
- lane_settings[0].VOLTAGE_SWING;
- max_requested.PRE_EMPHASIS =
- link_training_setting->
- lane_settings[0].PRE_EMPHASIS;
- /*max_requested.postCursor2 =
- * link_training_setting->laneSettings[0].postCursor2;*/
+ max_requested.VOLTAGE_SWING = lane_settings[0].VOLTAGE_SWING;
+ max_requested.PRE_EMPHASIS = lane_settings[0].PRE_EMPHASIS;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ max_requested.FFE_PRESET = lane_settings[0].FFE_PRESET;
+#endif
/* Determine what the maximum of the requested settings are*/
- for (lane = 1; lane < link_training_setting->link_settings.lane_count;
- lane++) {
- if (link_training_setting->lane_settings[lane].VOLTAGE_SWING >
- max_requested.VOLTAGE_SWING)
-
- max_requested.VOLTAGE_SWING =
- link_training_setting->
- lane_settings[lane].VOLTAGE_SWING;
-
- if (link_training_setting->lane_settings[lane].PRE_EMPHASIS >
- max_requested.PRE_EMPHASIS)
- max_requested.PRE_EMPHASIS =
- link_training_setting->
- lane_settings[lane].PRE_EMPHASIS;
-
- /*
- if (link_training_setting->laneSettings[lane].postCursor2 >
- max_requested.postCursor2)
- {
- max_requested.postCursor2 =
- link_training_setting->laneSettings[lane].postCursor2;
- }
- */
+ for (lane = 1; lane < lt_settings->link_settings.lane_count; lane++) {
+ if (lane_settings[lane].VOLTAGE_SWING > max_requested.VOLTAGE_SWING)
+ max_requested.VOLTAGE_SWING = lane_settings[lane].VOLTAGE_SWING;
+
+ if (lane_settings[lane].PRE_EMPHASIS > max_requested.PRE_EMPHASIS)
+ max_requested.PRE_EMPHASIS = lane_settings[lane].PRE_EMPHASIS;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (lane_settings[lane].FFE_PRESET.settings.level >
+ max_requested.FFE_PRESET.settings.level)
+ max_requested.FFE_PRESET.settings.level =
+ lane_settings[lane].FFE_PRESET.settings.level;
+#endif
}
/* make sure the requested settings are
@@ -585,10 +812,10 @@ static void find_max_drive_settings(
if (max_requested.PRE_EMPHASIS > PRE_EMPHASIS_MAX_LEVEL)
max_requested.PRE_EMPHASIS = PRE_EMPHASIS_MAX_LEVEL;
- /*
- if (max_requested.postCursor2 > PostCursor2_MaxLevel)
- max_requested.postCursor2 = PostCursor2_MaxLevel;
- */
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (max_requested.FFE_PRESET.settings.level > DP_FFE_PRESET_MAX_LEVEL)
+ max_requested.FFE_PRESET.settings.level = DP_FFE_PRESET_MAX_LEVEL;
+#endif
/* make sure the pre-emphasis matches the voltage swing*/
if (max_requested.PRE_EMPHASIS >
@@ -598,57 +825,58 @@ static void find_max_drive_settings(
get_max_pre_emphasis_for_voltage_swing(
max_requested.VOLTAGE_SWING);
- /*
- * Post Cursor2 levels are completely independent from
- * pre-emphasis (Post Cursor1) levels. But Post Cursor2 levels
- * can only be applied to each allowable combination of voltage
- * swing and pre-emphasis levels */
- /* if ( max_requested.postCursor2 >
- * getMaxPostCursor2ForVoltageSwing(max_requested.voltageSwing))
- * max_requested.postCursor2 =
- * getMaxPostCursor2ForVoltageSwing(max_requested.voltageSwing);
- */
-
- max_lt_setting->link_settings.link_rate =
- link_training_setting->link_settings.link_rate;
- max_lt_setting->link_settings.lane_count =
- link_training_setting->link_settings.lane_count;
- max_lt_setting->link_settings.link_spread =
- link_training_setting->link_settings.link_spread;
-
- for (lane = 0; lane <
- link_training_setting->link_settings.lane_count;
- lane++) {
- max_lt_setting->lane_settings[lane].VOLTAGE_SWING =
- max_requested.VOLTAGE_SWING;
- max_lt_setting->lane_settings[lane].PRE_EMPHASIS =
- max_requested.PRE_EMPHASIS;
- /*max_lt_setting->laneSettings[lane].postCursor2 =
- * max_requested.postCursor2;
- */
+ for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) {
+ lane_settings[lane].VOLTAGE_SWING = max_requested.VOLTAGE_SWING;
+ lane_settings[lane].PRE_EMPHASIS = max_requested.PRE_EMPHASIS;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ lane_settings[lane].FFE_PRESET = max_requested.FFE_PRESET;
+#endif
}
+}
+
+static void override_lane_settings(const struct link_training_settings *lt_settings,
+ struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX])
+{
+ uint32_t lane;
+
+ if (lt_settings->voltage_swing == NULL &&
+ lt_settings->pre_emphasis == NULL &&
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ lt_settings->ffe_preset == NULL &&
+#endif
+ lt_settings->post_cursor2 == NULL)
+ return;
+
+ for (lane = 1; lane < LANE_COUNT_DP_MAX; lane++) {
+ if (lt_settings->voltage_swing)
+ lane_settings[lane].VOLTAGE_SWING = *lt_settings->voltage_swing;
+ if (lt_settings->pre_emphasis)
+ lane_settings[lane].PRE_EMPHASIS = *lt_settings->pre_emphasis;
+ if (lt_settings->post_cursor2)
+ lane_settings[lane].POST_CURSOR2 = *lt_settings->post_cursor2;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (lt_settings->ffe_preset)
+ lane_settings[lane].FFE_PRESET = *lt_settings->ffe_preset;
+#endif
+ }
}
-enum dc_status dp_get_lane_status_and_drive_settings(
+enum dc_status dp_get_lane_status_and_lane_adjust(
struct dc_link *link,
const struct link_training_settings *link_training_setting,
- union lane_status *ln_status,
- union lane_align_status_updated *ln_status_updated,
- struct link_training_settings *req_settings,
+ union lane_status ln_status[LANE_COUNT_DP_MAX],
+ union lane_align_status_updated *ln_align,
+ union lane_adjust ln_adjust[LANE_COUNT_DP_MAX],
uint32_t offset)
{
unsigned int lane01_status_address = DP_LANE0_1_STATUS;
uint8_t lane_adjust_offset = 4;
unsigned int lane01_adjust_address;
uint8_t dpcd_buf[6] = {0};
- union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = { { {0} } };
- struct link_training_settings request_settings = { {0} };
uint32_t lane;
enum dc_status status;
- memset(req_settings, '\0', sizeof(struct link_training_settings));
-
if (is_repeater(link, offset)) {
lane01_status_address =
DP_LANE0_1_STATUS_PHY_REPEATER1 +
@@ -668,11 +896,11 @@ enum dc_status dp_get_lane_status_and_drive_settings(
ln_status[lane].raw =
get_nibble_at_index(&dpcd_buf[0], lane);
- dpcd_lane_adjust[lane].raw =
+ ln_adjust[lane].raw =
get_nibble_at_index(&dpcd_buf[lane_adjust_offset], lane);
}
- ln_status_updated->raw = dpcd_buf[2];
+ ln_align->raw = dpcd_buf[2];
if (is_repeater(link, offset)) {
DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n"
@@ -711,39 +939,6 @@ enum dc_status dp_get_lane_status_and_drive_settings(
dpcd_buf[lane_adjust_offset + 1]);
}
- /*copy to req_settings*/
- request_settings.link_settings.lane_count =
- link_training_setting->link_settings.lane_count;
- request_settings.link_settings.link_rate =
- link_training_setting->link_settings.link_rate;
- request_settings.link_settings.link_spread =
- link_training_setting->link_settings.link_spread;
-
- for (lane = 0; lane <
- (uint32_t)(link_training_setting->link_settings.lane_count);
- lane++) {
-
- request_settings.lane_settings[lane].VOLTAGE_SWING =
- (enum dc_voltage_swing)(dpcd_lane_adjust[lane].bits.
- VOLTAGE_SWING_LANE);
- request_settings.lane_settings[lane].PRE_EMPHASIS =
- (enum dc_pre_emphasis)(dpcd_lane_adjust[lane].bits.
- PRE_EMPHASIS_LANE);
- }
-
- /*Note: for postcursor2, read adjusted
- * postcursor2 settings from*/
- /*DpcdAddress_AdjustRequestPostCursor2 =
- *0x020C (not implemented yet)*/
-
- /* we find the maximum of the requested settings across all lanes*/
- /* and set this maximum for all lanes*/
- find_max_drive_settings(&request_settings, req_settings);
-
- /* if post cursor 2 is needed in the future,
- * read DpcdAddress_AdjustRequestPostCursor2 = 0x020C
- */
-
return status;
}
@@ -752,8 +947,6 @@ enum dc_status dpcd_set_lane_settings(
const struct link_training_settings *link_training_setting,
uint32_t offset)
{
- union dpcd_training_lane dpcd_lane[LANE_COUNT_DP_MAX] = {{{0}}};
- uint32_t lane;
unsigned int lane0_set_address;
enum dc_status status;
@@ -763,71 +956,53 @@ enum dc_status dpcd_set_lane_settings(
lane0_set_address = DP_TRAINING_LANE0_SET_PHY_REPEATER1 +
((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (offset - 1));
- for (lane = 0; lane <
- (uint32_t)(link_training_setting->
- link_settings.lane_count);
- lane++) {
- dpcd_lane[lane].bits.VOLTAGE_SWING_SET =
- (uint8_t)(link_training_setting->
- lane_settings[lane].VOLTAGE_SWING);
- dpcd_lane[lane].bits.PRE_EMPHASIS_SET =
- (uint8_t)(link_training_setting->
- lane_settings[lane].PRE_EMPHASIS);
- dpcd_lane[lane].bits.MAX_SWING_REACHED =
- (link_training_setting->
- lane_settings[lane].VOLTAGE_SWING ==
- VOLTAGE_SWING_MAX_LEVEL ? 1 : 0);
- dpcd_lane[lane].bits.MAX_PRE_EMPHASIS_REACHED =
- (link_training_setting->
- lane_settings[lane].PRE_EMPHASIS ==
- PRE_EMPHASIS_MAX_LEVEL ? 1 : 0);
- }
-
status = core_link_write_dpcd(link,
lane0_set_address,
- (uint8_t *)(dpcd_lane),
+ (uint8_t *)(link_training_setting->dpcd_lane_settings),
link_training_setting->link_settings.lane_count);
- /*
- if (LTSettings.link.rate == LinkRate_High2)
- {
- DpcdTrainingLaneSet2 dpcd_lane2[lane_count_DPMax] = {0};
- for ( uint32_t lane = 0;
- lane < lane_count_DPMax; lane++)
- {
- dpcd_lane2[lane].bits.post_cursor2_set =
- static_cast<unsigned char>(
- LTSettings.laneSettings[lane].postCursor2);
- dpcd_lane2[lane].bits.max_post_cursor2_reached = 0;
- }
- m_pDpcdAccessSrv->WriteDpcdData(
- DpcdAddress_Lane0Set2,
- reinterpret_cast<unsigned char*>(dpcd_lane2),
- LTSettings.link.lanes);
- }
- */
-
if (is_repeater(link, offset)) {
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (dp_get_link_encoding_format(&link_training_setting->link_settings) ==
+ DP_128b_132b_ENCODING)
+ DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n"
+ " 0x%X TX_FFE_PRESET_VALUE = %x\n",
+ __func__,
+ offset,
+ lane0_set_address,
+ link_training_setting->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE);
+ else if (dp_get_link_encoding_format(&link_training_setting->link_settings) ==
+ DP_8b_10b_ENCODING)
+#endif
DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Repeater ID: %d\n"
" 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n",
__func__,
offset,
lane0_set_address,
- dpcd_lane[0].bits.VOLTAGE_SWING_SET,
- dpcd_lane[0].bits.PRE_EMPHASIS_SET,
- dpcd_lane[0].bits.MAX_SWING_REACHED,
- dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED);
+ link_training_setting->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET,
+ link_training_setting->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET,
+ link_training_setting->dpcd_lane_settings[0].bits.MAX_SWING_REACHED,
+ link_training_setting->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED);
} else {
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (dp_get_link_encoding_format(&link_training_setting->link_settings) ==
+ DP_128b_132b_ENCODING)
+ DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X TX_FFE_PRESET_VALUE = %x\n",
+ __func__,
+ lane0_set_address,
+ link_training_setting->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE);
+ else if (dp_get_link_encoding_format(&link_training_setting->link_settings) ==
+ DP_8b_10b_ENCODING)
+#endif
DC_LOG_HW_LINK_TRAINING("%s\n 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n",
__func__,
lane0_set_address,
- dpcd_lane[0].bits.VOLTAGE_SWING_SET,
- dpcd_lane[0].bits.PRE_EMPHASIS_SET,
- dpcd_lane[0].bits.MAX_SWING_REACHED,
- dpcd_lane[0].bits.MAX_PRE_EMPHASIS_REACHED);
+ link_training_setting->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET,
+ link_training_setting->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET,
+ link_training_setting->dpcd_lane_settings[0].bits.MAX_SWING_REACHED,
+ link_training_setting->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED);
}
- link->cur_lane_setting = link_training_setting->lane_settings[0];
return status;
}
@@ -839,7 +1014,7 @@ bool dp_is_max_vs_reached(
for (lane = 0; lane <
(uint32_t)(lt_settings->link_settings.lane_count);
lane++) {
- if (lt_settings->lane_settings[lane].VOLTAGE_SWING
+ if (lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET
== VOLTAGE_SWING_MAX_LEVEL)
return true;
}
@@ -869,17 +1044,17 @@ static bool perform_post_lt_adj_req_sequence(
adj_req_timer < POST_LT_ADJ_REQ_TIMEOUT;
adj_req_timer++) {
- struct link_training_settings req_settings;
union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX];
union lane_align_status_updated
dpcd_lane_status_updated;
+ union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = { { {0} } };
- dp_get_lane_status_and_drive_settings(
+ dp_get_lane_status_and_lane_adjust(
link,
lt_settings,
dpcd_lane_status,
&dpcd_lane_status_updated,
- &req_settings,
+ dpcd_lane_adjust,
DPRX);
if (dpcd_lane_status_updated.bits.
@@ -897,11 +1072,10 @@ static bool perform_post_lt_adj_req_sequence(
for (lane = 0; lane < (uint32_t)(lane_count); lane++) {
if (lt_settings->
- lane_settings[lane].VOLTAGE_SWING !=
- req_settings.lane_settings[lane].
- VOLTAGE_SWING ||
- lt_settings->lane_settings[lane].PRE_EMPHASIS !=
- req_settings.lane_settings[lane].PRE_EMPHASIS) {
+ dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET !=
+ dpcd_lane_adjust[lane].bits.VOLTAGE_SWING_LANE ||
+ lt_settings->dpcd_lane_settings[lane].bits.PRE_EMPHASIS_SET !=
+ dpcd_lane_adjust[lane].bits.PRE_EMPHASIS_LANE) {
req_drv_setting_changed = true;
break;
@@ -909,8 +1083,8 @@ static bool perform_post_lt_adj_req_sequence(
}
if (req_drv_setting_changed) {
- dp_update_drive_settings(
- lt_settings, req_settings);
+ dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
+ lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
dc_link_dp_set_drive_settings(link,
lt_settings);
@@ -954,6 +1128,14 @@ uint32_t dp_translate_training_aux_read_interval(uint32_t dpcd_aux_read_interval
case 0x04:
aux_rd_interval_us = 16000;
break;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ case 0x05:
+ aux_rd_interval_us = 32000;
+ break;
+ case 0x06:
+ aux_rd_interval_us = 64000;
+ break;
+#endif
default:
break;
}
@@ -982,20 +1164,24 @@ static enum link_training_result perform_channel_equalization_sequence(
struct link_training_settings *lt_settings,
uint32_t offset)
{
- struct link_training_settings req_settings;
enum dc_dp_training_pattern tr_pattern;
uint32_t retries_ch_eq;
uint32_t wait_time_microsec;
enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
- union lane_align_status_updated dpcd_lane_status_updated = { {0} };
- union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = { { {0} } };
+ union lane_align_status_updated dpcd_lane_status_updated = {0};
+ union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0};
+ union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0};
/* Note: also check that TPS4 is a supported feature*/
-
tr_pattern = lt_settings->pattern_for_eq;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (is_repeater(link, offset) && dp_get_link_encoding_format(&lt_settings->link_settings) == DP_8b_10b_ENCODING)
+ tr_pattern = DP_TRAINING_PATTERN_SEQUENCE_4;
+#else
if (is_repeater(link, offset))
tr_pattern = DP_TRAINING_PATTERN_SEQUENCE_4;
+#endif
dp_set_hw_training_pattern(link, tr_pattern, offset);
@@ -1032,12 +1218,12 @@ static enum link_training_result perform_channel_equalization_sequence(
/* 4. Read lane status and requested
* drive settings as set by the sink*/
- dp_get_lane_status_and_drive_settings(
+ dp_get_lane_status_and_lane_adjust(
link,
lt_settings,
dpcd_lane_status,
&dpcd_lane_status_updated,
- &req_settings,
+ dpcd_lane_adjust,
offset);
/* 5. check CR done*/
@@ -1051,7 +1237,8 @@ static enum link_training_result perform_channel_equalization_sequence(
return LINK_TRAINING_SUCCESS;
/* 7. update VS/PE/PC2 in lt_settings*/
- dp_update_drive_settings(lt_settings, req_settings);
+ dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
+ lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
}
return LINK_TRAINING_EQ_FAIL_EQ;
@@ -1077,10 +1264,10 @@ static enum link_training_result perform_clock_recovery_sequence(
uint32_t retries_cr;
uint32_t retry_count;
uint32_t wait_time_microsec;
- struct link_training_settings req_settings;
enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX];
union lane_align_status_updated dpcd_lane_status_updated;
+ union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = { { {0} } };
retries_cr = 0;
retry_count = 0;
@@ -1134,12 +1321,12 @@ static enum link_training_result perform_clock_recovery_sequence(
/* 4. Read lane status and requested drive
* settings as set by the sink
*/
- dp_get_lane_status_and_drive_settings(
+ dp_get_lane_status_and_lane_adjust(
link,
lt_settings,
dpcd_lane_status,
&dpcd_lane_status_updated,
- &req_settings,
+ dpcd_lane_adjust,
offset);
/* 5. check CR done*/
@@ -1147,23 +1334,35 @@ static enum link_training_result perform_clock_recovery_sequence(
return LINK_TRAINING_SUCCESS;
/* 6. max VS reached*/
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if ((dp_get_link_encoding_format(&lt_settings->link_settings) ==
+ DP_8b_10b_ENCODING) &&
+ dp_is_max_vs_reached(lt_settings))
+ break;
+#else
if (dp_is_max_vs_reached(lt_settings))
break;
+#endif
/* 7. same lane settings*/
/* Note: settings are the same for all lanes,
* so comparing first lane is sufficient*/
- if ((lt_settings->lane_settings[0].VOLTAGE_SWING ==
- req_settings.lane_settings[0].VOLTAGE_SWING)
- && (lt_settings->lane_settings[0].PRE_EMPHASIS ==
- req_settings.lane_settings[0].PRE_EMPHASIS))
+ if ((dp_get_link_encoding_format(&lt_settings->link_settings) == DP_8b_10b_ENCODING) &&
+ lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET ==
+ dpcd_lane_adjust[0].bits.VOLTAGE_SWING_LANE)
+ retries_cr++;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ else if ((dp_get_link_encoding_format(&lt_settings->link_settings) == DP_128b_132b_ENCODING) &&
+ lt_settings->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE ==
+ dpcd_lane_adjust[0].tx_ffe.PRESET_VALUE)
retries_cr++;
+#endif
else
retries_cr = 0;
/* 8. update VS/PE/PC2 in lt_settings*/
- dp_update_drive_settings(lt_settings, req_settings);
-
+ dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
+ lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
retry_count++;
}
@@ -1183,7 +1382,7 @@ static inline enum link_training_result dp_transition_to_video_idle(
struct link_training_settings *lt_settings,
enum link_training_result status)
{
- union lane_count_set lane_count_set = { {0} };
+ union lane_count_set lane_count_set = {0};
/* 4. mainlink output idle pattern*/
dp_set_hw_test_pattern(link, DP_TEST_PATTERN_VIDEO_MODE, NULL, 0);
@@ -1194,7 +1393,11 @@ static inline enum link_training_result dp_transition_to_video_idle(
* TPS4 must be used instead of POST_LT_ADJ_REQ.
*/
if (link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED != 1 ||
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ lt_settings->pattern_for_eq >= DP_TRAINING_PATTERN_SEQUENCE_4) {
+#else
lt_settings->pattern_for_eq == DP_TRAINING_PATTERN_SEQUENCE_4) {
+#endif
/* delay 5ms after Main Link output idle pattern and then check
* DPCD 0202h.
*/
@@ -1288,8 +1491,40 @@ static inline void decide_8b_10b_training_settings(
lt_settings->pattern_for_eq = decide_eq_training_pattern(link, link_setting);
lt_settings->enhanced_framing = 1;
lt_settings->should_set_fec_ready = true;
+ lt_settings->disallow_per_lane_settings = true;
+ lt_settings->always_match_dpcd_with_hw_lane_settings = true;
+ dp_hw_to_dpcd_lane_settings(lt_settings, lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+static inline void decide_128b_132b_training_settings(struct dc_link *link,
+ const struct dc_link_settings *link_settings,
+ struct link_training_settings *lt_settings)
+{
+ memset(lt_settings, 0, sizeof(*lt_settings));
+
+ lt_settings->link_settings = *link_settings;
+ /* TODO: should decide link spread when populating link_settings */
+ lt_settings->link_settings.link_spread = link->dp_ss_off ? LINK_SPREAD_DISABLED :
+ LINK_SPREAD_05_DOWNSPREAD_30KHZ;
+
+ lt_settings->pattern_for_cr = decide_cr_training_pattern(link_settings);
+ lt_settings->pattern_for_eq = decide_eq_training_pattern(link, link_settings);
+ lt_settings->eq_pattern_time = 2500;
+ lt_settings->eq_wait_time_limit = 400000;
+ lt_settings->eq_loop_count_limit = 20;
+ lt_settings->pattern_for_cds = DP_128b_132b_TPS2_CDS;
+ lt_settings->cds_pattern_time = 2500;
+ lt_settings->cds_wait_time_limit = (dp_convert_to_count(
+ link->dpcd_caps.lttpr_caps.phy_repeater_cnt) + 1) * 20000;
+ lt_settings->lttpr_mode = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) ?
+ LTTPR_MODE_NON_TRANSPARENT : LTTPR_MODE_TRANSPARENT;
+ lt_settings->disallow_per_lane_settings = true;
+ dp_hw_to_dpcd_lane_settings(lt_settings,
+ lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
+}
+#endif
+
void dp_decide_training_settings(
struct dc_link *link,
const struct dc_link_settings *link_settings,
@@ -1297,6 +1532,10 @@ void dp_decide_training_settings(
{
if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING)
decide_8b_10b_training_settings(link, link_settings, lt_settings);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ else if (dp_get_link_encoding_format(link_settings) == DP_128b_132b_ENCODING)
+ decide_128b_132b_training_settings(link, link_settings, lt_settings);
+#endif
}
static void override_training_settings(
@@ -1319,6 +1558,17 @@ static void override_training_settings(
lt_settings->pre_emphasis = overrides->pre_emphasis;
if (overrides->post_cursor2 != NULL)
lt_settings->post_cursor2 = overrides->post_cursor2;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (overrides->ffe_preset != NULL)
+ lt_settings->ffe_preset = overrides->ffe_preset;
+#endif
+ /* Override HW lane settings with BIOS forced values if present */
+ if (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN &&
+ link->lttpr_mode == LTTPR_MODE_TRANSPARENT) {
+ lt_settings->voltage_swing = &link->bios_forced_drive_settings.VOLTAGE_SWING;
+ lt_settings->pre_emphasis = &link->bios_forced_drive_settings.PRE_EMPHASIS;
+ lt_settings->always_match_dpcd_with_hw_lane_settings = false;
+ }
for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) {
lt_settings->lane_settings[lane].VOLTAGE_SWING =
lt_settings->voltage_swing != NULL ?
@@ -1334,6 +1584,9 @@ static void override_training_settings(
: POST_CURSOR2_DISABLED;
}
+ dp_hw_to_dpcd_lane_settings(lt_settings,
+ lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
+
/* Initialize training timings */
if (overrides->cr_pattern_time != NULL)
lt_settings->cr_pattern_time = *overrides->cr_pattern_time;
@@ -1378,7 +1631,7 @@ uint8_t dp_convert_to_count(uint8_t lttpr_repeater_count)
return 0; // invalid value
}
-enum dc_status configure_lttpr_mode_transparent(struct dc_link *link)
+static enum dc_status configure_lttpr_mode_transparent(struct dc_link *link)
{
uint8_t repeater_mode = DP_PHY_REPEATER_MODE_TRANSPARENT;
@@ -1389,7 +1642,7 @@ enum dc_status configure_lttpr_mode_transparent(struct dc_link *link)
sizeof(repeater_mode));
}
-enum dc_status configure_lttpr_mode_non_transparent(
+static enum dc_status configure_lttpr_mode_non_transparent(
struct dc_link *link,
const struct link_training_settings *lt_settings)
{
@@ -1432,6 +1685,13 @@ enum dc_status configure_lttpr_mode_non_transparent(
if (encoding == DP_8b_10b_ENCODING) {
repeater_cnt = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+
+ /* Driver does not need to train the first hop. Skip DPCD read and clear
+ * AUX_RD_INTERVAL for DPTX-to-DPIA hop.
+ */
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA)
+ link->dpcd_caps.lttpr_caps.aux_rd_interval[--repeater_cnt] = 0;
+
for (repeater_id = repeater_cnt; repeater_id > 0; repeater_id--) {
aux_interval_address = DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1 +
((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (repeater_id - 1));
@@ -1450,7 +1710,7 @@ enum dc_status configure_lttpr_mode_non_transparent(
static void repeater_training_done(struct dc_link *link, uint32_t offset)
{
- union dpcd_training_pattern dpcd_pattern = { {0} };
+ union dpcd_training_pattern dpcd_pattern = {0};
const uint32_t dpcd_base_lt_offset =
DP_TRAINING_PATTERN_SET_PHY_REPEATER1 +
@@ -1505,6 +1765,17 @@ static void print_status_message(
case LINK_RATE_HIGH3:
link_rate = "HBR3";
break;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ case LINK_RATE_UHBR10:
+ link_rate = "UHBR10";
+ break;
+ case LINK_RATE_UHBR13_5:
+ link_rate = "UHBR13.5";
+ break;
+ case LINK_RATE_UHBR20:
+ link_rate = "UHBR20";
+ break;
+#endif
default:
break;
}
@@ -1534,6 +1805,20 @@ static void print_status_message(
case LINK_TRAINING_LINK_LOSS:
lt_result = "Link loss";
break;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ case DP_128b_132b_LT_FAILED:
+ lt_result = "LT_FAILED received";
+ break;
+ case DP_128b_132b_MAX_LOOP_COUNT_REACHED:
+ lt_result = "max loop count reached";
+ break;
+ case DP_128b_132b_CHANNEL_EQ_DONE_TIMEOUT:
+ lt_result = "channel EQ timeout";
+ break;
+ case DP_128b_132b_CDS_DONE_TIMEOUT:
+ lt_result = "CDS timeout";
+ break;
+#endif
default:
break;
}
@@ -1553,6 +1838,9 @@ static void print_status_message(
}
/* Connectivity log: link training */
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ /* TODO - DP2.0 Log: add connectivity log for FFE PRESET */
+#endif
CONN_MSG_LT(link, "%sx%d %s VS=%d, PE=%d, DS=%s",
link_rate,
lt_settings->link_settings.lane_count,
@@ -1569,6 +1857,9 @@ void dc_link_dp_set_drive_settings(
/* program ASIC PHY settings*/
dp_set_hw_lane_settings(link, lt_settings, DPRX);
+ dp_hw_to_dpcd_lane_settings(lt_settings,
+ lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
+
/* Notify DP sink the PHY settings from source */
dpcd_set_lane_settings(link, lt_settings, DPRX);
}
@@ -1635,9 +1926,23 @@ enum dc_status dpcd_configure_lttpr_mode(struct dc_link *link, struct link_train
static void dpcd_exit_training_mode(struct dc_link *link)
{
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ uint8_t sink_status = 0;
+ uint8_t i;
+#endif
/* clear training pattern set */
dpcd_set_training_pattern(link, DP_TRAINING_PATTERN_VIDEOIDLE);
+
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ /* poll for intra-hop disable */
+ for (i = 0; i < 10; i++) {
+ if ((core_link_read_dpcd(link, DP_SINK_STATUS, &sink_status, 1) == DC_OK) &&
+ (sink_status & DP_INTRA_HOP_AUX_REPLY_INDICATION) == 0)
+ break;
+ udelay(1000);
+ }
+#endif
}
enum dc_status dpcd_configure_channel_coding(struct dc_link *link,
@@ -1661,6 +1966,137 @@ enum dc_status dpcd_configure_channel_coding(struct dc_link *link,
return status;
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+static void dpcd_128b_132b_get_aux_rd_interval(struct dc_link *link,
+ uint32_t *interval_in_us)
+{
+ union dp_128b_132b_training_aux_rd_interval dpcd_interval;
+ uint32_t interval_unit = 0;
+
+ dpcd_interval.raw = 0;
+ core_link_read_dpcd(link, DP_128b_132b_TRAINING_AUX_RD_INTERVAL,
+ &dpcd_interval.raw, sizeof(dpcd_interval.raw));
+ interval_unit = dpcd_interval.bits.UNIT ? 1 : 2; /* 0b = 2 ms, 1b = 1 ms */
+ /* (128b/132b_TRAINING_AUX_RD_INTERVAL value + 1) *
+ * INTERVAL_UNIT. The maximum is 256 ms
+ */
+ *interval_in_us = (dpcd_interval.bits.VALUE + 1) * interval_unit * 1000;
+}
+
+static enum link_training_result dp_perform_128b_132b_channel_eq_done_sequence(
+ struct dc_link *link,
+ struct link_training_settings *lt_settings)
+{
+ uint8_t loop_count;
+ uint32_t aux_rd_interval = 0;
+ uint32_t wait_time = 0;
+ union lane_align_status_updated dpcd_lane_status_updated = {0};
+ union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0};
+ enum link_training_result status = LINK_TRAINING_SUCCESS;
+ union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = {0};
+
+ /* Transmit 128b/132b_TPS1 over Main-Link */
+ dp_set_hw_training_pattern(link, lt_settings->pattern_for_cr, DPRX);
+ /* Set TRAINING_PATTERN_SET to 01h */
+ dpcd_set_training_pattern(link, lt_settings->pattern_for_cr);
+
+ /* Adjust TX_FFE_PRESET_VALUE and Transmit 128b/132b_TPS2 over Main-Link */
+ dpcd_128b_132b_get_aux_rd_interval(link, &aux_rd_interval);
+ dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status,
+ &dpcd_lane_status_updated, dpcd_lane_adjust, DPRX);
+ dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
+ lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
+ dp_set_hw_lane_settings(link, lt_settings, DPRX);
+ dp_set_hw_training_pattern(link, lt_settings->pattern_for_eq, DPRX);
+
+ /* Set loop counter to start from 1 */
+ loop_count = 1;
+
+ /* Set TRAINING_PATTERN_SET to 02h and TX_FFE_PRESET_VALUE in one AUX transaction */
+ dpcd_set_lt_pattern_and_lane_settings(link, lt_settings,
+ lt_settings->pattern_for_eq, DPRX);
+
+ /* poll for channel EQ done */
+ while (status == LINK_TRAINING_SUCCESS) {
+ dp_wait_for_training_aux_rd_interval(link, aux_rd_interval);
+ wait_time += aux_rd_interval;
+ dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status,
+ &dpcd_lane_status_updated, dpcd_lane_adjust, DPRX);
+ dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
+ lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
+ dpcd_128b_132b_get_aux_rd_interval(link, &aux_rd_interval);
+ if (dp_is_ch_eq_done(lt_settings->link_settings.lane_count,
+ dpcd_lane_status)) {
+ /* pass */
+ break;
+ } else if (loop_count >= lt_settings->eq_loop_count_limit) {
+ status = DP_128b_132b_MAX_LOOP_COUNT_REACHED;
+ } else if (dpcd_lane_status_updated.bits.LT_FAILED_128b_132b) {
+ status = DP_128b_132b_LT_FAILED;
+ } else {
+ dp_set_hw_lane_settings(link, lt_settings, DPRX);
+ dpcd_set_lane_settings(link, lt_settings, DPRX);
+ }
+ loop_count++;
+ }
+
+ /* poll for EQ interlane align done */
+ while (status == LINK_TRAINING_SUCCESS) {
+ if (dpcd_lane_status_updated.bits.EQ_INTERLANE_ALIGN_DONE_128b_132b) {
+ /* pass */
+ break;
+ } else if (wait_time >= lt_settings->eq_wait_time_limit) {
+ status = DP_128b_132b_CHANNEL_EQ_DONE_TIMEOUT;
+ } else if (dpcd_lane_status_updated.bits.LT_FAILED_128b_132b) {
+ status = DP_128b_132b_LT_FAILED;
+ } else {
+ dp_wait_for_training_aux_rd_interval(link,
+ lt_settings->eq_pattern_time);
+ wait_time += lt_settings->eq_pattern_time;
+ dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status,
+ &dpcd_lane_status_updated, dpcd_lane_adjust, DPRX);
+ }
+ }
+
+ return status;
+}
+
+static enum link_training_result dp_perform_128b_132b_cds_done_sequence(
+ struct dc_link *link,
+ struct link_training_settings *lt_settings)
+{
+ /* Assumption: assume hardware has transmitted eq pattern */
+ enum link_training_result status = LINK_TRAINING_SUCCESS;
+ union lane_align_status_updated dpcd_lane_status_updated = {0};
+ union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = {0};
+ union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = { { {0} } };
+ uint32_t wait_time = 0;
+
+ /* initiate CDS done sequence */
+ dpcd_set_training_pattern(link, lt_settings->pattern_for_cds);
+
+ /* poll for CDS interlane align done and symbol lock */
+ while (status == LINK_TRAINING_SUCCESS) {
+ dp_wait_for_training_aux_rd_interval(link,
+ lt_settings->cds_pattern_time);
+ wait_time += lt_settings->cds_pattern_time;
+ dp_get_lane_status_and_lane_adjust(link, lt_settings, dpcd_lane_status,
+ &dpcd_lane_status_updated, dpcd_lane_adjust, DPRX);
+ if (dp_is_symbol_locked(lt_settings->link_settings.lane_count, dpcd_lane_status) &&
+ dpcd_lane_status_updated.bits.CDS_INTERLANE_ALIGN_DONE_128b_132b) {
+ /* pass */
+ break;
+ } else if (dpcd_lane_status_updated.bits.LT_FAILED_128b_132b) {
+ status = DP_128b_132b_LT_FAILED;
+ } else if (wait_time >= lt_settings->cds_wait_time_limit) {
+ status = DP_128b_132b_CDS_DONE_TIMEOUT;
+ }
+ }
+
+ return status;
+}
+#endif
+
static enum link_training_result dp_perform_8b_10b_link_training(
struct dc_link *link,
struct link_training_settings *lt_settings)
@@ -1702,7 +2138,7 @@ static enum link_training_result dp_perform_8b_10b_link_training(
}
for (lane = 0; lane < (uint8_t)lt_settings->link_settings.lane_count; lane++)
- lt_settings->lane_settings[lane].VOLTAGE_SWING = VOLTAGE_SWING_LEVEL0;
+ lt_settings->dpcd_lane_settings[lane].bits.VOLTAGE_SWING_SET = VOLTAGE_SWING_LEVEL0;
}
if (status == LINK_TRAINING_SUCCESS) {
@@ -1717,6 +2153,35 @@ static enum link_training_result dp_perform_8b_10b_link_training(
return status;
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+static enum link_training_result dp_perform_128b_132b_link_training(
+ struct dc_link *link,
+ struct link_training_settings *lt_settings)
+{
+ enum link_training_result result = LINK_TRAINING_SUCCESS;
+
+ /* TODO - DP2.0 Link: remove legacy_dp2_lt logic */
+ if (link->dc->debug.legacy_dp2_lt) {
+ struct link_training_settings legacy_settings;
+
+ decide_8b_10b_training_settings(link,
+ &lt_settings->link_settings,
+ &legacy_settings);
+ return dp_perform_8b_10b_link_training(link, &legacy_settings);
+ }
+
+ dpcd_set_link_settings(link, lt_settings);
+
+ if (result == LINK_TRAINING_SUCCESS)
+ result = dp_perform_128b_132b_channel_eq_done_sequence(link, lt_settings);
+
+ if (result == LINK_TRAINING_SUCCESS)
+ result = dp_perform_128b_132b_cds_done_sequence(link, lt_settings);
+
+ return result;
+}
+#endif
+
enum link_training_result dc_link_dp_perform_link_training(
struct dc_link *link,
const struct dc_link_settings *link_settings,
@@ -1751,6 +2216,10 @@ enum link_training_result dc_link_dp_perform_link_training(
*/
if (encoding == DP_8b_10b_ENCODING)
status = dp_perform_8b_10b_link_training(link, &lt_settings);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ else if (encoding == DP_128b_132b_ENCODING)
+ status = dp_perform_128b_132b_link_training(link, &lt_settings);
+#endif
else
ASSERT(0);
@@ -1788,16 +2257,19 @@ bool perform_link_training_with_retries(
/* Dynamically assigned link encoders associated with stream rather than
* link.
*/
- if (link->dc->res_pool->funcs->link_encs_assign)
- link_enc = stream->link_enc;
+ if (link->is_dig_mapping_flexible && link->dc->res_pool->funcs->link_encs_assign)
+ link_enc = link_enc_cfg_get_link_enc_used_by_stream(link->ctx->dc, pipe_ctx->stream);
else
link_enc = link->link_enc;
/* We need to do this before the link training to ensure the idle pattern in SST
* mode will be sent right after the link training
*/
- link_enc->funcs->connect_dig_be_to_fe(link_enc,
+ if (dp_get_link_encoding_format(&current_setting) == DP_8b_10b_ENCODING) {
+ link_enc->funcs->connect_dig_be_to_fe(link_enc,
pipe_ctx->stream_res.stream_enc->id, true);
+ dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_BE);
+ }
for (j = 0; j < attempts; ++j) {
@@ -1836,10 +2308,22 @@ bool perform_link_training_with_retries(
dc_link_dp_perform_link_training_skip_aux(link, &current_setting);
return true;
} else {
- status = dc_link_dp_perform_link_training(
- link,
- &current_setting,
- skip_video_pattern);
+ /** @todo Consolidate USB4 DP and DPx.x training. */
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) {
+ status = dc_link_dpia_perform_link_training(link,
+ &current_setting,
+ skip_video_pattern);
+
+ /* Transmit idle pattern once training successful. */
+ if (status == LINK_TRAINING_SUCCESS)
+ dp_set_hw_test_pattern(link, DP_TEST_PATTERN_VIDEO_MODE,
+ NULL, 0);
+ } else {
+ status = dc_link_dp_perform_link_training(link,
+ &current_setting,
+ skip_video_pattern);
+ }
+
if (status == LINK_TRAINING_SUCCESS)
return true;
}
@@ -1862,12 +2346,16 @@ bool perform_link_training_with_retries(
if (type == dc_connection_none)
break;
} else if (do_fallback) {
+ uint32_t req_bw;
+ uint32_t link_bw;
+
decide_fallback_link_setting(*link_setting, &current_setting, status);
/* Fail link training if reduced link bandwidth no longer meets
* stream requirements.
*/
- if (dc_bandwidth_in_kbps_from_timing(&stream->timing) <
- dc_link_bandwidth_kbps(link, &current_setting))
+ req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing);
+ link_bw = dc_link_bandwidth_kbps(link, &current_setting);
+ if (req_bw > link_bw)
break;
}
@@ -1969,8 +2457,14 @@ enum link_training_result dc_link_dp_sync_lt_attempt(
dp_cs_id, link_settings);
/* Set FEC enable */
- fec_enable = lt_overrides->fec_enable && *lt_overrides->fec_enable;
- dp_set_fec_ready(link, fec_enable);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING) {
+#endif
+ fec_enable = lt_overrides->fec_enable && *lt_overrides->fec_enable;
+ dp_set_fec_ready(link, fec_enable);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ }
+#endif
if (lt_overrides->alternate_scrambler_reset) {
if (*lt_overrides->alternate_scrambler_reset)
@@ -2012,23 +2506,59 @@ bool dc_link_dp_sync_lt_end(struct dc_link *link, bool link_down)
* Still shouldn't turn off dp_receiver (DPCD:600h)
*/
if (link_down == true) {
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ struct dc_link_settings link_settings = link->cur_link_settings;
+#endif
dp_disable_link_phy(link, link->connector_signal);
- dp_set_fec_ready(link, false);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (dp_get_link_encoding_format(&link_settings) == DP_8b_10b_ENCODING)
+#endif
+ dp_set_fec_ready(link, false);
}
link->sync_lt_in_progress = false;
return true;
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+static enum dc_link_rate get_lttpr_max_link_rate(struct dc_link *link)
+{
+ enum dc_link_rate lttpr_max_link_rate = link->dpcd_caps.lttpr_caps.max_link_rate;
+
+ if (link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.bits.UHBR20)
+ lttpr_max_link_rate = LINK_RATE_UHBR20;
+ else if (link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.bits.UHBR13_5)
+ lttpr_max_link_rate = LINK_RATE_UHBR13_5;
+ else if (link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.bits.UHBR10)
+ lttpr_max_link_rate = LINK_RATE_UHBR10;
+
+ return lttpr_max_link_rate;
+}
+#endif
+
bool dc_link_dp_get_max_link_enc_cap(const struct dc_link *link, struct dc_link_settings *max_link_enc_cap)
{
+ struct link_encoder *link_enc = NULL;
+
if (!max_link_enc_cap) {
DC_LOG_ERROR("%s: Could not return max link encoder caps", __func__);
return false;
}
- if (link->link_enc->funcs->get_max_link_cap) {
- link->link_enc->funcs->get_max_link_cap(link->link_enc, max_link_enc_cap);
+ /* Links supporting dynamically assigned link encoder will be assigned next
+ * available encoder if one not already assigned.
+ */
+ if (link->is_dig_mapping_flexible &&
+ link->dc->res_pool->funcs->link_encs_assign) {
+ link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link);
+ if (link_enc == NULL)
+ link_enc = link_enc_cfg_get_next_avail_link_enc(link->ctx->dc);
+ } else
+ link_enc = link->link_enc;
+ ASSERT(link_enc);
+
+ if (link_enc && link_enc->funcs->get_max_link_cap) {
+ link_enc->funcs->get_max_link_cap(link_enc, max_link_enc_cap);
return true;
}
@@ -2041,9 +2571,31 @@ bool dc_link_dp_get_max_link_enc_cap(const struct dc_link *link, struct dc_link_
static struct dc_link_settings get_max_link_cap(struct dc_link *link)
{
struct dc_link_settings max_link_cap = {0};
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ enum dc_link_rate lttpr_max_link_rate;
+#endif
+ struct link_encoder *link_enc = NULL;
+
+ /* Links supporting dynamically assigned link encoder will be assigned next
+ * available encoder if one not already assigned.
+ */
+ if (link->is_dig_mapping_flexible &&
+ link->dc->res_pool->funcs->link_encs_assign) {
+ link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link);
+ if (link_enc == NULL)
+ link_enc = link_enc_cfg_get_next_avail_link_enc(link->ctx->dc);
+ } else
+ link_enc = link->link_enc;
+ ASSERT(link_enc);
/* get max link encoder capability */
- link->link_enc->funcs->get_max_link_cap(link->link_enc, &max_link_cap);
+ if (link_enc)
+ link_enc->funcs->get_max_link_cap(link_enc, &max_link_cap);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (max_link_cap.link_rate >= LINK_RATE_UHBR10 &&
+ !link->hpo_dp_link_enc)
+ max_link_cap.link_rate = LINK_RATE_HIGH3;
+#endif
/* Lower link settings based on sink's link cap */
if (link->reported_link_cap.lane_count < max_link_cap.lane_count)
@@ -2064,8 +2616,15 @@ static struct dc_link_settings get_max_link_cap(struct dc_link *link)
if (link->dpcd_caps.lttpr_caps.max_lane_count < max_link_cap.lane_count)
max_link_cap.lane_count = link->dpcd_caps.lttpr_caps.max_lane_count;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ lttpr_max_link_rate = get_lttpr_max_link_rate(link);
+
+ if (lttpr_max_link_rate < max_link_cap.link_rate)
+ max_link_cap.link_rate = lttpr_max_link_rate;
+#else
if (link->dpcd_caps.lttpr_caps.max_link_rate < max_link_cap.link_rate)
max_link_cap.link_rate = link->dpcd_caps.lttpr_caps.max_link_rate;
+#endif
DC_LOG_HW_LINK_TRAINING("%s\n Training with LTTPR, max_lane count %d max_link rate %d \n",
__func__,
@@ -2075,7 +2634,7 @@ static struct dc_link_settings get_max_link_cap(struct dc_link *link)
return max_link_cap;
}
-enum dc_status read_hpd_rx_irq_data(
+static enum dc_status read_hpd_rx_irq_data(
struct dc_link *link,
union hpd_irq_data *irq_data)
{
@@ -2206,17 +2765,32 @@ bool dp_verify_link_cap(
enum link_training_result status;
union hpd_irq_data irq_data;
- if (link->dc->debug.skip_detection_link_training) {
+ /* link training starts with the maximum common settings
+ * supported by both sink and ASIC.
+ */
+ max_link_cap = get_max_link_cap(link);
+ initial_link_settings = get_common_supported_link_settings(
+ *known_limit_link_setting,
+ max_link_cap);
+
+ /* Accept reported capabilities if link supports flexible encoder mapping or encoder already in use. */
+ if (link->dc->debug.skip_detection_link_training ||
+ link->is_dig_mapping_flexible) {
+ /* TODO - should we check link encoder's max link caps here?
+ * How do we know which link encoder to check from?
+ */
link->verified_link_cap = *known_limit_link_setting;
return true;
+ } else if (link->link_enc && link->dc->res_pool->funcs->link_encs_assign &&
+ !link_enc_cfg_is_link_enc_avail(link->ctx->dc, link->link_enc->preferred_engine, link)) {
+ link->verified_link_cap = initial_link_settings;
+ return true;
}
memset(&irq_data, 0, sizeof(irq_data));
success = false;
skip_link_training = false;
- max_link_cap = get_max_link_cap(link);
-
/* Grant extended timeout request */
if ((link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) && (link->dpcd_caps.lttpr_caps.max_ext_timeout > 0)) {
uint8_t grant = link->dpcd_caps.lttpr_caps.max_ext_timeout & 0x80;
@@ -2224,6 +2798,10 @@ bool dp_verify_link_cap(
core_link_write_dpcd(link, DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT, &grant, sizeof(grant));
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (dp_get_link_encoding_format(&link->cur_link_settings) == DP_128b_132b_ENCODING)
+ reset_dp_hpo_stream_encoders_for_link(link);
+#endif
/* TODO implement override and monitor patch later */
/* try to train the link from high to low to
@@ -2234,19 +2812,13 @@ bool dp_verify_link_cap(
dp_cs_id = get_clock_source_id(link);
- /* link training starts with the maximum common settings
- * supported by both sink and ASIC.
- */
- initial_link_settings = get_common_supported_link_settings(
- *known_limit_link_setting,
- max_link_cap);
cur_link_setting = initial_link_settings;
/* Temporary Renoir-specific workaround for SWDEV-215184;
* PHY will sometimes be in bad state on hotplugging display from certain USB-C dongle,
* so add extra cycle of enabling and disabling the PHY before first link training.
*/
- if (link->link_enc->features.flags.bits.DP_IS_USB_C &&
+ if (link->link_enc && link->link_enc->features.flags.bits.DP_IS_USB_C &&
link->dc->debug.usbc_combo_phy_reset_wa) {
dp_enable_link_phy(link, link->connector_signal, dp_cs_id, cur);
dp_disable_link_phy(link, link->connector_signal);
@@ -2333,7 +2905,7 @@ bool dp_verify_link_cap_with_retries(
link->verified_link_cap.link_spread = LINK_SPREAD_DISABLED;
break;
} else if (dp_verify_link_cap(link,
- &link->reported_link_cap,
+ known_limit_link_setting,
&fail_count) && fail_count == 0) {
success = true;
break;
@@ -2348,11 +2920,21 @@ bool dp_verify_mst_link_cap(
{
struct dc_link_settings max_link_cap = {0};
- max_link_cap = get_max_link_cap(link);
- link->verified_link_cap = get_common_supported_link_settings(
- link->reported_link_cap,
- max_link_cap);
-
+ if (dp_get_link_encoding_format(&link->reported_link_cap) ==
+ DP_8b_10b_ENCODING) {
+ max_link_cap = get_max_link_cap(link);
+ link->verified_link_cap = get_common_supported_link_settings(
+ link->reported_link_cap,
+ max_link_cap);
+ }
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ else if (dp_get_link_encoding_format(&link->reported_link_cap) ==
+ DP_128b_132b_ENCODING) {
+ dp_verify_link_cap_with_retries(link,
+ &link->reported_link_cap,
+ LINK_TRAINING_MAX_VERIFY_RETRY);
+ }
+#endif
return true;
}
@@ -2379,7 +2961,17 @@ static struct dc_link_settings get_common_supported_link_settings(
* We map it to the maximum supported link rate that
* is smaller than MAX_LINK_BW in this case.
*/
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (link_settings.link_rate > LINK_RATE_UHBR20) {
+ link_settings.link_rate = LINK_RATE_UHBR20;
+ } else if (link_settings.link_rate < LINK_RATE_UHBR20 &&
+ link_settings.link_rate > LINK_RATE_UHBR13_5) {
+ link_settings.link_rate = LINK_RATE_UHBR13_5;
+ } else if (link_settings.link_rate < LINK_RATE_UHBR10 &&
+ link_settings.link_rate > LINK_RATE_HIGH3) {
+#else
if (link_settings.link_rate > LINK_RATE_HIGH3) {
+#endif
link_settings.link_rate = LINK_RATE_HIGH3;
} else if (link_settings.link_rate < LINK_RATE_HIGH3
&& link_settings.link_rate > LINK_RATE_HIGH2) {
@@ -2424,6 +3016,14 @@ static enum dc_lane_count reduce_lane_count(enum dc_lane_count lane_count)
static enum dc_link_rate reduce_link_rate(enum dc_link_rate link_rate)
{
switch (link_rate) {
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ case LINK_RATE_UHBR20:
+ return LINK_RATE_UHBR13_5;
+ case LINK_RATE_UHBR13_5:
+ return LINK_RATE_UHBR10;
+ case LINK_RATE_UHBR10:
+ return LINK_RATE_HIGH3;
+#endif
case LINK_RATE_HIGH3:
return LINK_RATE_HIGH2;
case LINK_RATE_HIGH2:
@@ -2458,11 +3058,55 @@ static enum dc_link_rate increase_link_rate(enum dc_link_rate link_rate)
return LINK_RATE_HIGH2;
case LINK_RATE_HIGH2:
return LINK_RATE_HIGH3;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ case LINK_RATE_HIGH3:
+ return LINK_RATE_UHBR10;
+ case LINK_RATE_UHBR10:
+ return LINK_RATE_UHBR13_5;
+ case LINK_RATE_UHBR13_5:
+ return LINK_RATE_UHBR20;
+#endif
default:
return LINK_RATE_UNKNOWN;
}
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+static bool decide_fallback_link_setting_max_bw_policy(
+ const struct dc_link_settings *max,
+ struct dc_link_settings *cur)
+{
+ uint8_t cur_idx = 0, next_idx;
+ bool found = false;
+
+ while (cur_idx < ARRAY_SIZE(dp_lt_fallbacks))
+ /* find current index */
+ if (dp_lt_fallbacks[cur_idx].lane_count == cur->lane_count &&
+ dp_lt_fallbacks[cur_idx].link_rate == cur->link_rate)
+ break;
+ else
+ cur_idx++;
+
+ next_idx = cur_idx + 1;
+
+ while (next_idx < ARRAY_SIZE(dp_lt_fallbacks))
+ /* find next index */
+ if (dp_lt_fallbacks[next_idx].lane_count <= max->lane_count &&
+ dp_lt_fallbacks[next_idx].link_rate <= max->link_rate)
+ break;
+ else
+ next_idx++;
+
+ if (next_idx < ARRAY_SIZE(dp_lt_fallbacks)) {
+ cur->lane_count = dp_lt_fallbacks[next_idx].lane_count;
+ cur->link_rate = dp_lt_fallbacks[next_idx].link_rate;
+ found = true;
+ }
+
+ return found;
+}
+#endif
+
/*
* function: set link rate and lane count fallback based
* on current link setting and last link training result
@@ -2478,6 +3122,11 @@ static bool decide_fallback_link_setting(
{
if (!current_link_setting)
return false;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (dp_get_link_encoding_format(&initial_link_settings) == DP_128b_132b_ENCODING)
+ return decide_fallback_link_setting_max_bw_policy(&initial_link_settings,
+ current_link_setting);
+#endif
switch (training_result) {
case LINK_TRAINING_CR_FAIL_LANE0:
@@ -2743,7 +3392,7 @@ void decide_link_settings(struct dc_stream_state *stream,
}
/*************************Short Pulse IRQ***************************/
-static bool allow_hpd_rx_irq(const struct dc_link *link)
+bool dc_link_dp_allow_hpd_rx_irq(const struct dc_link *link)
{
/*
* Don't handle RX IRQ unless one of following is met:
@@ -2793,6 +3442,8 @@ static bool handle_hpd_irq_psr_sink(struct dc_link *link)
if (psr_error_status.bits.LINK_CRC_ERROR ||
psr_error_status.bits.RFB_STORAGE_ERROR ||
psr_error_status.bits.VSC_SDP_ERROR) {
+ bool allow_active;
+
/* Acknowledge and clear error bits */
dm_helpers_dp_write_dpcd(
link->ctx,
@@ -2802,8 +3453,10 @@ static bool handle_hpd_irq_psr_sink(struct dc_link *link)
sizeof(psr_error_status.raw));
/* PSR error, disable and re-enable PSR */
- dc_link_set_psr_allow_active(link, false, true, false);
- dc_link_set_psr_allow_active(link, true, true, false);
+ allow_active = false;
+ dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL);
+ allow_active = true;
+ dc_link_set_psr_allow_active(link, &allow_active, true, false, NULL);
return true;
} else if (psr_sink_psr_status.bits.SINK_SELF_REFRESH_STATUS ==
@@ -2850,20 +3503,24 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link)
union phy_test_pattern dpcd_test_pattern;
union lane_adjust dpcd_lane_adjustment[2];
unsigned char dpcd_post_cursor_2_adjustment = 0;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ unsigned char test_pattern_buffer[
+ (DP_TEST_264BIT_CUSTOM_PATTERN_263_256 -
+ DP_TEST_264BIT_CUSTOM_PATTERN_7_0)+1] = {0};
+#else
unsigned char test_pattern_buffer[
(DP_TEST_80BIT_CUSTOM_PATTERN_79_72 -
DP_TEST_80BIT_CUSTOM_PATTERN_7_0)+1] = {0};
+#endif
unsigned int test_pattern_size = 0;
enum dp_test_pattern test_pattern;
- struct dc_link_training_settings link_settings;
union lane_adjust dpcd_lane_adjust;
unsigned int lane;
struct link_training_settings link_training_settings;
- int i = 0;
dpcd_test_pattern.raw = 0;
memset(dpcd_lane_adjustment, 0, sizeof(dpcd_lane_adjustment));
- memset(&link_settings, 0, sizeof(link_settings));
+ memset(&link_training_settings, 0, sizeof(link_training_settings));
/* get phy test pattern and pattern parameters from DP receiver */
core_link_read_dpcd(
@@ -2918,6 +3575,35 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link)
case PHY_TEST_PATTERN_CP2520_3:
test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN4;
break;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ case PHY_TEST_PATTERN_128b_132b_TPS1:
+ test_pattern = DP_TEST_PATTERN_128b_132b_TPS1;
+ break;
+ case PHY_TEST_PATTERN_128b_132b_TPS2:
+ test_pattern = DP_TEST_PATTERN_128b_132b_TPS2;
+ break;
+ case PHY_TEST_PATTERN_PRBS9:
+ test_pattern = DP_TEST_PATTERN_PRBS9;
+ break;
+ case PHY_TEST_PATTERN_PRBS11:
+ test_pattern = DP_TEST_PATTERN_PRBS11;
+ break;
+ case PHY_TEST_PATTERN_PRBS15:
+ test_pattern = DP_TEST_PATTERN_PRBS15;
+ break;
+ case PHY_TEST_PATTERN_PRBS23:
+ test_pattern = DP_TEST_PATTERN_PRBS23;
+ break;
+ case PHY_TEST_PATTERN_PRBS31:
+ test_pattern = DP_TEST_PATTERN_PRBS31;
+ break;
+ case PHY_TEST_PATTERN_264BIT_CUSTOM:
+ test_pattern = DP_TEST_PATTERN_264BIT_CUSTOM;
+ break;
+ case PHY_TEST_PATTERN_SQUARE_PULSE:
+ test_pattern = DP_TEST_PATTERN_SQUARE_PULSE;
+ break;
+#endif
default:
test_pattern = DP_TEST_PATTERN_VIDEO_MODE;
break;
@@ -2933,30 +3619,59 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link)
test_pattern_size);
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (test_pattern == DP_TEST_PATTERN_SQUARE_PULSE) {
+ test_pattern_size = 1; // Square pattern data is 1 byte (DP spec)
+ core_link_read_dpcd(
+ link,
+ DP_PHY_SQUARE_PATTERN,
+ test_pattern_buffer,
+ test_pattern_size);
+ }
+
+ if (test_pattern == DP_TEST_PATTERN_264BIT_CUSTOM) {
+ test_pattern_size = (DP_TEST_264BIT_CUSTOM_PATTERN_263_256-
+ DP_TEST_264BIT_CUSTOM_PATTERN_7_0) + 1;
+ core_link_read_dpcd(
+ link,
+ DP_TEST_264BIT_CUSTOM_PATTERN_7_0,
+ test_pattern_buffer,
+ test_pattern_size);
+ }
+#endif
+
/* prepare link training settings */
- link_settings.link = link->cur_link_settings;
+ link_training_settings.link_settings = link->cur_link_settings;
for (lane = 0; lane <
(unsigned int)(link->cur_link_settings.lane_count);
lane++) {
dpcd_lane_adjust.raw =
get_nibble_at_index(&dpcd_lane_adjustment[0].raw, lane);
- link_settings.lane_settings[lane].VOLTAGE_SWING =
- (enum dc_voltage_swing)
- (dpcd_lane_adjust.bits.VOLTAGE_SWING_LANE);
- link_settings.lane_settings[lane].PRE_EMPHASIS =
- (enum dc_pre_emphasis)
- (dpcd_lane_adjust.bits.PRE_EMPHASIS_LANE);
- link_settings.lane_settings[lane].POST_CURSOR2 =
- (enum dc_post_cursor2)
- ((dpcd_post_cursor_2_adjustment >> (lane * 2)) & 0x03);
- }
-
- for (i = 0; i < 4; i++)
- link_training_settings.lane_settings[i] =
- link_settings.lane_settings[i];
- link_training_settings.link_settings = link_settings.link;
- link_training_settings.allow_invalid_msa_timing_param = false;
+ if (dp_get_link_encoding_format(&link->cur_link_settings) ==
+ DP_8b_10b_ENCODING) {
+ link_training_settings.hw_lane_settings[lane].VOLTAGE_SWING =
+ (enum dc_voltage_swing)
+ (dpcd_lane_adjust.bits.VOLTAGE_SWING_LANE);
+ link_training_settings.hw_lane_settings[lane].PRE_EMPHASIS =
+ (enum dc_pre_emphasis)
+ (dpcd_lane_adjust.bits.PRE_EMPHASIS_LANE);
+ link_training_settings.hw_lane_settings[lane].POST_CURSOR2 =
+ (enum dc_post_cursor2)
+ ((dpcd_post_cursor_2_adjustment >> (lane * 2)) & 0x03);
+ }
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ else if (dp_get_link_encoding_format(&link->cur_link_settings) ==
+ DP_128b_132b_ENCODING) {
+ link_training_settings.hw_lane_settings[lane].FFE_PRESET.raw =
+ dpcd_lane_adjust.tx_ffe.PRESET_VALUE;
+ }
+#endif
+ }
+
+ dp_hw_to_dpcd_lane_settings(&link_training_settings,
+ link_training_settings.hw_lane_settings,
+ link_training_settings.dpcd_lane_settings);
/*Usage: Measure DP physical lane signal
* by DP SI test equipment automatically.
* PHY test pattern request is generated by equipment via HPD interrupt.
@@ -3177,7 +3892,7 @@ static void dp_test_get_audio_test_data(struct dc_link *link, bool disable_video
}
}
-static void handle_automated_test(struct dc_link *link)
+void dc_link_dp_handle_automated_test(struct dc_link *link)
{
union test_request test_request;
union test_response test_response;
@@ -3226,17 +3941,50 @@ static void handle_automated_test(struct dc_link *link)
sizeof(test_response));
}
-bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd_irq_dpcd_data, bool *out_link_loss)
+void dc_link_dp_handle_link_loss(struct dc_link *link)
{
- union hpd_irq_data hpd_irq_dpcd_data = { { { {0} } } };
- union device_service_irq device_service_clear = { { 0 } };
+ int i;
+ struct pipe_ctx *pipe_ctx;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link)
+ break;
+ }
+
+ if (pipe_ctx == NULL || pipe_ctx->stream == NULL)
+ return;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off &&
+ pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe) {
+ core_link_disable_stream(pipe_ctx);
+ }
+ }
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off &&
+ pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe) {
+ core_link_enable_stream(link->dc->current_state, pipe_ctx);
+ }
+ }
+}
+
+bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd_irq_dpcd_data, bool *out_link_loss,
+ bool defer_handling, bool *has_left_work)
+{
+ union hpd_irq_data hpd_irq_dpcd_data = {0};
+ union device_service_irq device_service_clear = {0};
enum dc_status result;
bool status = false;
- struct pipe_ctx *pipe_ctx;
- int i;
if (out_link_loss)
*out_link_loss = false;
+
+ if (has_left_work)
+ *has_left_work = false;
/* For use cases related to down stream connection status change,
* PSR and device auto test, refer to function handle_sst_hpd_irq
* in DAL2.1*/
@@ -3268,11 +4016,14 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
&device_service_clear.raw,
sizeof(device_service_clear.raw));
device_service_clear.raw = 0;
- handle_automated_test(link);
+ if (defer_handling && has_left_work)
+ *has_left_work = true;
+ else
+ dc_link_dp_handle_automated_test(link);
return false;
}
- if (!allow_hpd_rx_irq(link)) {
+ if (!dc_link_dp_allow_hpd_rx_irq(link)) {
DC_LOG_HW_HPD_IRQ("%s: skipping HPD handling on %d\n",
__func__, link->link_index);
return false;
@@ -3286,12 +4037,18 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
* so do not handle as a normal sink status change interrupt.
*/
- if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY)
+ if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY) {
+ if (defer_handling && has_left_work)
+ *has_left_work = true;
return true;
+ }
/* check if we have MST msg and return since we poll for it */
- if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY)
+ if (hpd_irq_dpcd_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
+ if (defer_handling && has_left_work)
+ *has_left_work = true;
return false;
+ }
/* For now we only handle 'Downstream port status' case.
* If we got sink count changed it means
@@ -3308,29 +4065,10 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
sizeof(hpd_irq_dpcd_data),
"Status: ");
- for (i = 0; i < MAX_PIPES; i++) {
- pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link)
- break;
- }
-
- if (pipe_ctx == NULL || pipe_ctx->stream == NULL)
- return false;
-
-
- for (i = 0; i < MAX_PIPES; i++) {
- pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off &&
- pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe)
- core_link_disable_stream(pipe_ctx);
- }
-
- for (i = 0; i < MAX_PIPES; i++) {
- pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
- if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off &&
- pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe)
- core_link_enable_stream(link->dc->current_state, pipe_ctx);
- }
+ if (defer_handling && has_left_work)
+ *has_left_work = true;
+ else
+ dc_link_dp_handle_link_loss(link);
status = false;
if (out_link_loss)
@@ -3554,6 +4292,43 @@ static void get_active_converter_info(
dp_hw_fw_revision.ieee_fw_rev,
sizeof(dp_hw_fw_revision.ieee_fw_rev));
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14 &&
+ link->dpcd_caps.dongle_type != DISPLAY_DONGLE_NONE) {
+ union dp_dfp_cap_ext dfp_cap_ext;
+ memset(&dfp_cap_ext, '\0', sizeof (dfp_cap_ext));
+ core_link_read_dpcd(
+ link,
+ DP_DFP_CAPABILITY_EXTENSION_SUPPORT,
+ dfp_cap_ext.raw,
+ sizeof(dfp_cap_ext.raw));
+ link->dpcd_caps.dongle_caps.dfp_cap_ext.supported = dfp_cap_ext.fields.supported;
+ link->dpcd_caps.dongle_caps.dfp_cap_ext.max_pixel_rate_in_mps =
+ dfp_cap_ext.fields.max_pixel_rate_in_mps[0] +
+ (dfp_cap_ext.fields.max_pixel_rate_in_mps[1] << 8);
+ link->dpcd_caps.dongle_caps.dfp_cap_ext.max_video_h_active_width =
+ dfp_cap_ext.fields.max_video_h_active_width[0] +
+ (dfp_cap_ext.fields.max_video_h_active_width[1] << 8);
+ link->dpcd_caps.dongle_caps.dfp_cap_ext.max_video_v_active_height =
+ dfp_cap_ext.fields.max_video_v_active_height[0] +
+ (dfp_cap_ext.fields.max_video_v_active_height[1] << 8);
+ link->dpcd_caps.dongle_caps.dfp_cap_ext.encoding_format_caps =
+ dfp_cap_ext.fields.encoding_format_caps;
+ link->dpcd_caps.dongle_caps.dfp_cap_ext.rgb_color_depth_caps =
+ dfp_cap_ext.fields.rgb_color_depth_caps;
+ link->dpcd_caps.dongle_caps.dfp_cap_ext.ycbcr444_color_depth_caps =
+ dfp_cap_ext.fields.ycbcr444_color_depth_caps;
+ link->dpcd_caps.dongle_caps.dfp_cap_ext.ycbcr422_color_depth_caps =
+ dfp_cap_ext.fields.ycbcr422_color_depth_caps;
+ link->dpcd_caps.dongle_caps.dfp_cap_ext.ycbcr420_color_depth_caps =
+ dfp_cap_ext.fields.ycbcr420_color_depth_caps;
+ DC_LOG_DP2("DFP capability extension is read at link %d", link->link_index);
+ DC_LOG_DP2("\tdfp_cap_ext.supported = %s", link->dpcd_caps.dongle_caps.dfp_cap_ext.supported ? "true" : "false");
+ DC_LOG_DP2("\tdfp_cap_ext.max_pixel_rate_in_mps = %d", link->dpcd_caps.dongle_caps.dfp_cap_ext.max_pixel_rate_in_mps);
+ DC_LOG_DP2("\tdfp_cap_ext.max_video_h_active_width = %d", link->dpcd_caps.dongle_caps.dfp_cap_ext.max_video_h_active_width);
+ DC_LOG_DP2("\tdfp_cap_ext.max_video_v_active_height = %d", link->dpcd_caps.dongle_caps.dfp_cap_ext.max_video_v_active_height);
+ }
+#endif
}
static void dp_wa_power_up_0010FA(struct dc_link *link, uint8_t *dpcd_data,
@@ -3613,7 +4388,12 @@ static bool dpcd_read_sink_ext_caps(struct dc_link *link)
bool dp_retrieve_lttpr_cap(struct dc_link *link)
{
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ uint8_t lttpr_dpcd_data[8];
+ bool allow_lttpr_non_transparent_mode = 0;
+#else
uint8_t lttpr_dpcd_data[6];
+#endif
bool vbios_lttpr_enable = link->dc->caps.vbios_lttpr_enable;
bool vbios_lttpr_interop = link->dc->caps.vbios_lttpr_aware;
enum dc_status status = DC_ERROR_UNEXPECTED;
@@ -3621,6 +4401,16 @@ bool dp_retrieve_lttpr_cap(struct dc_link *link)
memset(lttpr_dpcd_data, '\0', sizeof(lttpr_dpcd_data));
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if ((link->dc->config.allow_lttpr_non_transparent_mode.bits.DP2_0 &&
+ link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED)) {
+ allow_lttpr_non_transparent_mode = 1;
+ } else if (link->dc->config.allow_lttpr_non_transparent_mode.bits.DP1_4A &&
+ !link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) {
+ allow_lttpr_non_transparent_mode = 1;
+ }
+#endif
+
/*
* Logic to determine LTTPR mode
*/
@@ -3628,17 +4418,31 @@ bool dp_retrieve_lttpr_cap(struct dc_link *link)
if (vbios_lttpr_enable && vbios_lttpr_interop)
link->lttpr_mode = LTTPR_MODE_NON_TRANSPARENT;
else if (!vbios_lttpr_enable && vbios_lttpr_interop) {
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (allow_lttpr_non_transparent_mode)
+#else
if (link->dc->config.allow_lttpr_non_transparent_mode)
+#endif
link->lttpr_mode = LTTPR_MODE_NON_TRANSPARENT;
else
link->lttpr_mode = LTTPR_MODE_TRANSPARENT;
} else if (!vbios_lttpr_enable && !vbios_lttpr_interop) {
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (!allow_lttpr_non_transparent_mode || !link->dc->caps.extended_aux_timeout_support)
+#else
if (!link->dc->config.allow_lttpr_non_transparent_mode
|| !link->dc->caps.extended_aux_timeout_support)
+#endif
link->lttpr_mode = LTTPR_MODE_NON_LTTPR;
else
link->lttpr_mode = LTTPR_MODE_NON_TRANSPARENT;
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ /* Check DP tunnel LTTPR mode debug option. */
+ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA &&
+ link->dc->debug.dpia_debug.bits.force_non_lttpr)
+ link->lttpr_mode = LTTPR_MODE_NON_LTTPR;
+#endif
if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT || link->lttpr_mode == LTTPR_MODE_TRANSPARENT) {
/* By reading LTTPR capability, RX assumes that we will enable
@@ -3678,8 +4482,19 @@ bool dp_retrieve_lttpr_cap(struct dc_link *link)
lttpr_dpcd_data[DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT -
DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ link->dpcd_caps.lttpr_caps.main_link_channel_coding.raw =
+ lttpr_dpcd_data[DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+
+ link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.raw =
+ lttpr_dpcd_data[DP_PHY_REPEATER_128b_132b_RATES -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+#endif
+
/* Attempt to train in LTTPR transparent mode if repeater count exceeds 8. */
is_lttpr_present = (dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) != 0 &&
+ link->dpcd_caps.lttpr_caps.phy_repeater_cnt < 0xff &&
link->dpcd_caps.lttpr_caps.max_lane_count > 0 &&
link->dpcd_caps.lttpr_caps.max_lane_count <= 4 &&
link->dpcd_caps.lttpr_caps.revision.raw >= 0x14);
@@ -3728,6 +4543,8 @@ static bool retrieve_link_cap(struct dc_link *link)
LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD);
is_lttpr_present = dp_retrieve_lttpr_cap(link);
+ /* Read DP tunneling information. */
+ status = dpcd_get_tunneling_device_data(link);
status = core_link_read_dpcd(link, DP_SET_POWER,
&dpcd_power_state, sizeof(dpcd_power_state));
@@ -3928,16 +4745,82 @@ static bool retrieve_link_cap(struct dc_link *link)
DP_DSC_SUPPORT,
link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
sizeof(link->dpcd_caps.dsc_caps.dsc_basic_caps.raw));
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (link->dpcd_caps.dongle_type != DISPLAY_DONGLE_NONE) {
+ status = core_link_read_dpcd(
+ link,
+ DP_DSC_BRANCH_OVERALL_THROUGHPUT_0,
+ link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
+ sizeof(link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw));
+ DC_LOG_DSC("DSC branch decoder capability is read at link %d", link->link_index);
+ DC_LOG_DSC("\tBRANCH_OVERALL_THROUGHPUT_0 = 0x%02x",
+ link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.fields.BRANCH_OVERALL_THROUGHPUT_0);
+ DC_LOG_DSC("\tBRANCH_OVERALL_THROUGHPUT_1 = 0x%02x",
+ link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.fields.BRANCH_OVERALL_THROUGHPUT_1);
+ DC_LOG_DSC("\tBRANCH_MAX_LINE_WIDTH 0x%02x",
+ link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.fields.BRANCH_MAX_LINE_WIDTH);
+ }
+#else
status = core_link_read_dpcd(
link,
DP_DSC_BRANCH_OVERALL_THROUGHPUT_0,
link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
sizeof(link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw));
+#endif
}
if (!dpcd_read_sink_ext_caps(link))
link->dpcd_sink_ext_caps.raw = 0;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ link->dpcd_caps.channel_coding_cap.raw = dpcd_data[DP_MAIN_LINK_CHANNEL_CODING_CAP - DP_DPCD_REV];
+
+ if (link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) {
+ DC_LOG_DP2("128b/132b encoding is supported at link %d", link->link_index);
+
+ core_link_read_dpcd(link,
+ DP_128b_132b_SUPPORTED_LINK_RATES,
+ &link->dpcd_caps.dp_128b_132b_supported_link_rates.raw,
+ sizeof(link->dpcd_caps.dp_128b_132b_supported_link_rates.raw));
+ if (link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR20)
+ link->reported_link_cap.link_rate = LINK_RATE_UHBR20;
+ else if (link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR13_5)
+ link->reported_link_cap.link_rate = LINK_RATE_UHBR13_5;
+ else if (link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR10)
+ link->reported_link_cap.link_rate = LINK_RATE_UHBR10;
+ else
+ dm_error("%s: Invalid RX 128b_132b_supported_link_rates\n", __func__);
+ DC_LOG_DP2("128b/132b supported link rates is read at link %d", link->link_index);
+ DC_LOG_DP2("\tmax 128b/132b link rate support is %d.%d GHz",
+ link->reported_link_cap.link_rate / 100,
+ link->reported_link_cap.link_rate % 100);
+
+ core_link_read_dpcd(link,
+ DP_SINK_VIDEO_FALLBACK_FORMATS,
+ &link->dpcd_caps.fallback_formats.raw,
+ sizeof(link->dpcd_caps.fallback_formats.raw));
+ DC_LOG_DP2("sink video fallback format is read at link %d", link->link_index);
+ if (link->dpcd_caps.fallback_formats.bits.dp_1920x1080_60Hz_24bpp_support)
+ DC_LOG_DP2("\t1920x1080@60Hz 24bpp fallback format supported");
+ if (link->dpcd_caps.fallback_formats.bits.dp_1280x720_60Hz_24bpp_support)
+ DC_LOG_DP2("\t1280x720@60Hz 24bpp fallback format supported");
+ if (link->dpcd_caps.fallback_formats.bits.dp_1024x768_60Hz_24bpp_support)
+ DC_LOG_DP2("\t1024x768@60Hz 24bpp fallback format supported");
+ if (link->dpcd_caps.fallback_formats.raw == 0) {
+ DC_LOG_DP2("\tno supported fallback formats, assume 1920x1080@60Hz 24bpp is supported");
+ link->dpcd_caps.fallback_formats.bits.dp_1920x1080_60Hz_24bpp_support = 1;
+ }
+
+ core_link_read_dpcd(link,
+ DP_FEC_CAPABILITY_1,
+ &link->dpcd_caps.fec_cap1.raw,
+ sizeof(link->dpcd_caps.fec_cap1.raw));
+ DC_LOG_DP2("FEC CAPABILITY 1 is read at link %d", link->link_index);
+ if (link->dpcd_caps.fec_cap1.bits.AGGREGATED_ERROR_COUNTERS_CAPABLE)
+ DC_LOG_DP2("\tFEC aggregated error counters are supported");
+ }
+#endif
+
/* Connectivity log: detection */
CONN_DATA_DETECT(link, dpcd_data, sizeof(dpcd_data), "Rx Caps: ");
@@ -4368,7 +5251,7 @@ bool dc_link_dp_set_test_pattern(
* MuteAudioEndpoint(pPathMode->pDisplayPath, true);
*/
/* Blank stream */
- pipes->stream_res.stream_enc->funcs->dp_blank(pipe_ctx->stream_res.stream_enc);
+ pipes->stream_res.stream_enc->funcs->dp_blank(link, pipe_ctx->stream_res.stream_enc);
}
dp_set_hw_test_pattern(link, test_pattern,
@@ -4408,6 +5291,35 @@ bool dc_link_dp_set_test_pattern(
case DP_TEST_PATTERN_CP2520_3:
pattern = PHY_TEST_PATTERN_CP2520_3;
break;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ case DP_TEST_PATTERN_128b_132b_TPS1:
+ pattern = PHY_TEST_PATTERN_128b_132b_TPS1;
+ break;
+ case DP_TEST_PATTERN_128b_132b_TPS2:
+ pattern = PHY_TEST_PATTERN_128b_132b_TPS2;
+ break;
+ case DP_TEST_PATTERN_PRBS9:
+ pattern = PHY_TEST_PATTERN_PRBS9;
+ break;
+ case DP_TEST_PATTERN_PRBS11:
+ pattern = PHY_TEST_PATTERN_PRBS11;
+ break;
+ case DP_TEST_PATTERN_PRBS15:
+ pattern = PHY_TEST_PATTERN_PRBS15;
+ break;
+ case DP_TEST_PATTERN_PRBS23:
+ pattern = PHY_TEST_PATTERN_PRBS23;
+ break;
+ case DP_TEST_PATTERN_PRBS31:
+ pattern = PHY_TEST_PATTERN_PRBS31;
+ break;
+ case DP_TEST_PATTERN_264BIT_CUSTOM:
+ pattern = PHY_TEST_PATTERN_264BIT_CUSTOM;
+ break;
+ case DP_TEST_PATTERN_SQUARE_PULSE:
+ pattern = PHY_TEST_PATTERN_SQUARE_PULSE;
+ break;
+#endif
default:
return false;
}
@@ -4670,7 +5582,7 @@ enum dc_status dp_set_fec_ready(struct dc_link *link, bool ready)
*/
if (link->is_dig_mapping_flexible &&
link->dc->res_pool->funcs->link_encs_assign)
- link_enc = link_enc_cfg_get_link_enc_used_by_link(link->dc->current_state, link);
+ link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link);
else
link_enc = link->link_enc;
ASSERT(link_enc);
@@ -4690,7 +5602,7 @@ enum dc_status dp_set_fec_ready(struct dc_link *link, bool ready)
link_enc->funcs->fec_set_ready(link_enc, true);
link->fec_state = dc_link_fec_ready;
} else {
- link_enc->funcs->fec_set_ready(link->link_enc, false);
+ link_enc->funcs->fec_set_ready(link_enc, false);
link->fec_state = dc_link_fec_not_ready;
dm_error("dpcd write failed to set fec_ready");
}
@@ -4717,8 +5629,7 @@ void dp_set_fec_enable(struct dc_link *link, bool enable)
*/
if (link->is_dig_mapping_flexible &&
link->dc->res_pool->funcs->link_encs_assign)
- link_enc = link_enc_cfg_get_link_enc_used_by_link(
- link->dc->current_state, link);
+ link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link);
else
link_enc = link->link_enc;
ASSERT(link_enc);
@@ -4938,7 +5849,7 @@ bool is_edp_ilr_optimization_required(struct dc_link *link, struct dc_crtc_timin
uint8_t link_bw_set;
uint8_t link_rate_set;
uint32_t req_bw;
- union lane_count_set lane_count_set = { {0} };
+ union lane_count_set lane_count_set = {0};
ASSERT(link || crtc_timing); // invalid input
@@ -4983,6 +5894,227 @@ enum dp_link_encoding dp_get_link_encoding_format(const struct dc_link_settings
if ((link_settings->link_rate >= LINK_RATE_LOW) &&
(link_settings->link_rate <= LINK_RATE_HIGH3))
return DP_8b_10b_ENCODING;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ else if ((link_settings->link_rate >= LINK_RATE_UHBR10) &&
+ (link_settings->link_rate <= LINK_RATE_UHBR20))
+ return DP_128b_132b_ENCODING;
+#endif
return DP_UNKNOWN_ENCODING;
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+enum dp_link_encoding dc_link_dp_mst_decide_link_encoding_format(const struct dc_link *link)
+{
+ struct dc_link_settings link_settings = {0};
+
+ if (!dc_is_dp_signal(link->connector_signal))
+ return DP_UNKNOWN_ENCODING;
+
+ if (link->preferred_link_setting.lane_count !=
+ LANE_COUNT_UNKNOWN &&
+ link->preferred_link_setting.link_rate !=
+ LINK_RATE_UNKNOWN) {
+ link_settings = link->preferred_link_setting;
+ } else {
+ decide_mst_link_settings(link, &link_settings);
+ }
+
+ return dp_get_link_encoding_format(&link_settings);
+}
+
+// TODO - DP2.0 Link: Fix get_lane_status to handle LTTPR offset (SST and MST)
+static void get_lane_status(
+ struct dc_link *link,
+ uint32_t lane_count,
+ union lane_status *status,
+ union lane_align_status_updated *status_updated)
+{
+ unsigned int lane;
+ uint8_t dpcd_buf[3] = {0};
+
+ if (status == NULL || status_updated == NULL) {
+ return;
+ }
+
+ core_link_read_dpcd(
+ link,
+ DP_LANE0_1_STATUS,
+ dpcd_buf,
+ sizeof(dpcd_buf));
+
+ for (lane = 0; lane < lane_count; lane++) {
+ status[lane].raw = get_nibble_at_index(&dpcd_buf[0], lane);
+ }
+
+ status_updated->raw = dpcd_buf[2];
+}
+
+bool dpcd_write_128b_132b_sst_payload_allocation_table(
+ const struct dc_stream_state *stream,
+ struct dc_link *link,
+ struct link_mst_stream_allocation_table *proposed_table,
+ bool allocate)
+{
+ const uint8_t vc_id = 1; /// VC ID always 1 for SST
+ const uint8_t start_time_slot = 0; /// Always start at time slot 0 for SST
+ bool result = false;
+ uint8_t req_slot_count = 0;
+ struct fixed31_32 avg_time_slots_per_mtp = { 0 };
+ union payload_table_update_status update_status = { 0 };
+ const uint32_t max_retries = 30;
+ uint32_t retries = 0;
+
+ if (allocate) {
+ avg_time_slots_per_mtp = calculate_sst_avg_time_slots_per_mtp(stream, link);
+ req_slot_count = dc_fixpt_ceil(avg_time_slots_per_mtp);
+ } else {
+ /// Leave req_slot_count = 0 if allocate is false.
+ }
+
+ /// Write DPCD 2C0 = 1 to start updating
+ update_status.bits.VC_PAYLOAD_TABLE_UPDATED = 1;
+ core_link_write_dpcd(
+ link,
+ DP_PAYLOAD_TABLE_UPDATE_STATUS,
+ &update_status.raw,
+ 1);
+
+ /// Program the changes in DPCD 1C0 - 1C2
+ ASSERT(vc_id == 1);
+ core_link_write_dpcd(
+ link,
+ DP_PAYLOAD_ALLOCATE_SET,
+ &vc_id,
+ 1);
+
+ ASSERT(start_time_slot == 0);
+ core_link_write_dpcd(
+ link,
+ DP_PAYLOAD_ALLOCATE_START_TIME_SLOT,
+ &start_time_slot,
+ 1);
+
+ ASSERT(req_slot_count <= MAX_MTP_SLOT_COUNT); /// Validation should filter out modes that exceed link BW
+ core_link_write_dpcd(
+ link,
+ DP_PAYLOAD_ALLOCATE_TIME_SLOT_COUNT,
+ &req_slot_count,
+ 1);
+
+ /// Poll till DPCD 2C0 read 1
+ /// Try for at least 150ms (30 retries, with 5ms delay after each attempt)
+
+ while (retries < max_retries) {
+ if (core_link_read_dpcd(
+ link,
+ DP_PAYLOAD_TABLE_UPDATE_STATUS,
+ &update_status.raw,
+ 1) == DC_OK) {
+ if (update_status.bits.VC_PAYLOAD_TABLE_UPDATED == 1) {
+ DC_LOG_DP2("SST Update Payload: downstream payload table updated.");
+ result = true;
+ break;
+ }
+ } else {
+ union dpcd_rev dpcdRev;
+
+ if (core_link_read_dpcd(
+ link,
+ DP_DPCD_REV,
+ &dpcdRev.raw,
+ 1) != DC_OK) {
+ DC_LOG_ERROR("SST Update Payload: Unable to read DPCD revision "
+ "of sink while polling payload table "
+ "updated status bit.");
+ break;
+ }
+ }
+ retries++;
+ udelay(5000);
+ }
+
+ if (!result && retries == max_retries) {
+ DC_LOG_ERROR("SST Update Payload: Payload table not updated after retries, "
+ "continue on. Something is wrong with the branch.");
+ // TODO - DP2.0 Payload: Read and log the payload table from downstream branch
+ }
+
+ proposed_table->stream_count = 1; /// Always 1 stream for SST
+ proposed_table->stream_allocations[0].slot_count = req_slot_count;
+ proposed_table->stream_allocations[0].vcp_id = vc_id;
+
+ return result;
+}
+
+bool dpcd_poll_for_allocation_change_trigger(struct dc_link *link)
+{
+ /*
+ * wait for ACT handled
+ */
+ int i;
+ const int act_retries = 30;
+ enum act_return_status result = ACT_FAILED;
+ union payload_table_update_status update_status = {0};
+ union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX];
+ union lane_align_status_updated lane_status_updated;
+
+ for (i = 0; i < act_retries; i++) {
+ get_lane_status(link, link->cur_link_settings.lane_count, dpcd_lane_status, &lane_status_updated);
+
+ if (!dp_is_cr_done(link->cur_link_settings.lane_count, dpcd_lane_status) ||
+ !dp_is_ch_eq_done(link->cur_link_settings.lane_count, dpcd_lane_status) ||
+ !dp_is_symbol_locked(link->cur_link_settings.lane_count, dpcd_lane_status) ||
+ !dp_is_interlane_aligned(lane_status_updated)) {
+ DC_LOG_ERROR("SST Update Payload: Link loss occurred while "
+ "polling for ACT handled.");
+ result = ACT_LINK_LOST;
+ break;
+ }
+ core_link_read_dpcd(
+ link,
+ DP_PAYLOAD_TABLE_UPDATE_STATUS,
+ &update_status.raw,
+ 1);
+
+ if (update_status.bits.ACT_HANDLED == 1) {
+ DC_LOG_DP2("SST Update Payload: ACT handled by downstream.");
+ result = ACT_SUCCESS;
+ break;
+ }
+
+ udelay(5000);
+ }
+
+ if (result == ACT_FAILED) {
+ DC_LOG_ERROR("SST Update Payload: ACT still not handled after retries, "
+ "continue on. Something is wrong with the branch.");
+ }
+
+ return (result == ACT_SUCCESS);
+}
+
+struct fixed31_32 calculate_sst_avg_time_slots_per_mtp(
+ const struct dc_stream_state *stream,
+ const struct dc_link *link)
+{
+ struct fixed31_32 link_bw_effective =
+ dc_fixpt_from_int(
+ dc_link_bandwidth_kbps(link, &link->cur_link_settings));
+ struct fixed31_32 timeslot_bw_effective =
+ dc_fixpt_div_int(link_bw_effective, MAX_MTP_SLOT_COUNT);
+ struct fixed31_32 timing_bw =
+ dc_fixpt_from_int(
+ dc_bandwidth_in_kbps_from_timing(&stream->timing));
+ struct fixed31_32 avg_time_slots_per_mtp =
+ dc_fixpt_div(timing_bw, timeslot_bw_effective);
+
+ return avg_time_slots_per_mtp;
+}
+
+bool is_dp_128b_132b_signal(struct pipe_ctx *pipe_ctx)
+{
+ return (pipe_ctx->stream_res.hpo_dp_stream_enc &&
+ pipe_ctx->stream->link->hpo_dp_link_enc &&
+ dc_is_dp_signal(pipe_ctx->stream->signal));
+}
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c
index 72970e49800a..7f25c11f4248 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c
@@ -176,12 +176,15 @@ static void dpcd_reduce_address_range(
uint8_t * const reduced_data,
const uint32_t reduced_size)
{
- const uint32_t reduced_end_address = END_ADDRESS(reduced_address, reduced_size);
- const uint32_t extended_end_address = END_ADDRESS(extended_address, extended_size);
const uint32_t offset = reduced_address - extended_address;
- if (extended_end_address == reduced_end_address && extended_address == reduced_address)
- return; /* extended and reduced address ranges point to the same data */
+ /*
+ * If the address is same, address was not extended.
+ * So we do not need to free any memory.
+ * The data is in original buffer(reduced_data).
+ */
+ if (extended_data == reduced_data)
+ return;
memcpy(&extended_data[offset], reduced_data, reduced_size);
kfree(extended_data);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c
new file mode 100644
index 000000000000..b1c9f77d6bf4
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dpia.c
@@ -0,0 +1,962 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dc.h"
+#include "dc_link_dpia.h"
+#include "inc/core_status.h"
+#include "dc_link.h"
+#include "dc_link_dp.h"
+#include "dpcd_defs.h"
+#include "link_hwss.h"
+#include "dm_helpers.h"
+#include "dmub/inc/dmub_cmd.h"
+#include "inc/link_dpcd.h"
+
+#define DC_LOGGER \
+ link->ctx->logger
+
+enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link)
+{
+ enum dc_status status = DC_OK;
+ uint8_t dpcd_dp_tun_data[3] = {0};
+ uint8_t dpcd_topology_data[DPCD_USB4_TOPOLOGY_ID_LEN] = {0};
+ uint8_t i = 0;
+
+ status = core_link_read_dpcd(link,
+ DP_TUNNELING_CAPABILITIES_SUPPORT,
+ dpcd_dp_tun_data,
+ sizeof(dpcd_dp_tun_data));
+
+ status = core_link_read_dpcd(link,
+ DP_USB4_ROUTER_TOPOLOGY_ID,
+ dpcd_topology_data,
+ sizeof(dpcd_topology_data));
+
+ link->dpcd_caps.usb4_dp_tun_info.dp_tun_cap.raw =
+ dpcd_dp_tun_data[DP_TUNNELING_CAPABILITIES_SUPPORT -
+ DP_TUNNELING_CAPABILITIES_SUPPORT];
+ link->dpcd_caps.usb4_dp_tun_info.dpia_info.raw =
+ dpcd_dp_tun_data[DP_IN_ADAPTER_INFO - DP_TUNNELING_CAPABILITIES_SUPPORT];
+ link->dpcd_caps.usb4_dp_tun_info.usb4_driver_id =
+ dpcd_dp_tun_data[DP_USB4_DRIVER_ID - DP_TUNNELING_CAPABILITIES_SUPPORT];
+
+ for (i = 0; i < DPCD_USB4_TOPOLOGY_ID_LEN; i++)
+ link->dpcd_caps.usb4_dp_tun_info.usb4_topology_id[i] = dpcd_topology_data[i];
+
+ return status;
+}
+
+/* Configure link as prescribed in link_setting; set LTTPR mode; and
+ * Initialize link training settings.
+ * Abort link training if sink unplug detected.
+ *
+ * @param link DPIA link being trained.
+ * @param[in] link_setting Lane count, link rate and downspread control.
+ * @param[out] lt_settings Link settings and drive settings (voltage swing and pre-emphasis).
+ */
+static enum link_training_result dpia_configure_link(struct dc_link *link,
+ const struct dc_link_settings *link_setting,
+ struct link_training_settings *lt_settings)
+{
+ enum dc_status status;
+ bool fec_enable;
+
+ DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) configuring\n - LTTPR mode(%d)\n",
+ __func__,
+ link->link_id.enum_id - ENUM_ID_1,
+ link->lttpr_mode);
+
+ dp_decide_training_settings(link,
+ link_setting,
+ lt_settings);
+
+ status = dpcd_configure_channel_coding(link, lt_settings);
+ if (status != DC_OK && !link->hpd_status)
+ return LINK_TRAINING_ABORT;
+
+ /* Configure lttpr mode */
+ status = dpcd_configure_lttpr_mode(link, lt_settings);
+ if (status != DC_OK && !link->hpd_status)
+ return LINK_TRAINING_ABORT;
+
+ /* Set link rate, lane count and spread. */
+ status = dpcd_set_link_settings(link, lt_settings);
+ if (status != DC_OK && !link->hpd_status)
+ return LINK_TRAINING_ABORT;
+
+ if (link->preferred_training_settings.fec_enable)
+ fec_enable = *link->preferred_training_settings.fec_enable;
+ else
+ fec_enable = true;
+ status = dp_set_fec_ready(link, fec_enable);
+ if (status != DC_OK && !link->hpd_status)
+ return LINK_TRAINING_ABORT;
+
+ return LINK_TRAINING_SUCCESS;
+}
+
+static enum dc_status core_link_send_set_config(struct dc_link *link,
+ uint8_t msg_type,
+ uint8_t msg_data)
+{
+ struct set_config_cmd_payload payload;
+ enum set_config_status set_config_result = SET_CONFIG_PENDING;
+
+ /* prepare set_config payload */
+ payload.msg_type = msg_type;
+ payload.msg_data = msg_data;
+
+ if (!link->ddc->ddc_pin && !link->aux_access_disabled &&
+ (dm_helpers_dmub_set_config_sync(link->ctx, link,
+ &payload, &set_config_result) == -1)) {
+ return DC_ERROR_UNEXPECTED;
+ }
+
+ /* set_config should return ACK if successful */
+ return (set_config_result == SET_CONFIG_ACK_RECEIVED) ? DC_OK : DC_ERROR_UNEXPECTED;
+}
+
+/* Build SET_CONFIG message data payload for specified message type. */
+static uint8_t dpia_build_set_config_data(enum dpia_set_config_type type,
+ struct dc_link *link,
+ struct link_training_settings *lt_settings)
+{
+ union dpia_set_config_data data;
+
+ data.raw = 0;
+
+ switch (type) {
+ case DPIA_SET_CFG_SET_LINK:
+ data.set_link.mode = link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT ? 1 : 0;
+ break;
+ case DPIA_SET_CFG_SET_PHY_TEST_MODE:
+ break;
+ case DPIA_SET_CFG_SET_VSPE:
+ /* Assume all lanes have same drive settings. */
+ data.set_vspe.swing = lt_settings->lane_settings[0].VOLTAGE_SWING;
+ data.set_vspe.pre_emph = lt_settings->lane_settings[0].PRE_EMPHASIS;
+ data.set_vspe.max_swing_reached =
+ lt_settings->lane_settings[0].VOLTAGE_SWING ==
+ VOLTAGE_SWING_MAX_LEVEL ? 1 : 0;
+ data.set_vspe.max_pre_emph_reached =
+ lt_settings->lane_settings[0].PRE_EMPHASIS ==
+ PRE_EMPHASIS_MAX_LEVEL ? 1 : 0;
+ break;
+ default:
+ ASSERT(false); /* Message type not supported by helper function. */
+ break;
+ }
+
+ return data.raw;
+}
+
+/* Convert DC training pattern to DPIA training stage. */
+static enum dpia_set_config_ts convert_trng_ptn_to_trng_stg(enum dc_dp_training_pattern tps)
+{
+ enum dpia_set_config_ts ts;
+
+ switch (tps) {
+ case DP_TRAINING_PATTERN_SEQUENCE_1:
+ ts = DPIA_TS_TPS1;
+ break;
+ case DP_TRAINING_PATTERN_SEQUENCE_2:
+ ts = DPIA_TS_TPS2;
+ break;
+ case DP_TRAINING_PATTERN_SEQUENCE_3:
+ ts = DPIA_TS_TPS3;
+ break;
+ case DP_TRAINING_PATTERN_SEQUENCE_4:
+ ts = DPIA_TS_TPS4;
+ break;
+ default:
+ ts = DPIA_TS_DPRX_DONE;
+ ASSERT(false); /* TPS not supported by helper function. */
+ break;
+ }
+
+ return ts;
+}
+
+/* Write training pattern to DPCD. */
+static enum dc_status dpcd_set_lt_pattern(struct dc_link *link,
+ enum dc_dp_training_pattern pattern,
+ uint32_t hop)
+{
+ union dpcd_training_pattern dpcd_pattern = { {0} };
+ uint32_t dpcd_tps_offset = DP_TRAINING_PATTERN_SET;
+ enum dc_status status;
+
+ if (hop != DPRX)
+ dpcd_tps_offset = DP_TRAINING_PATTERN_SET_PHY_REPEATER1 +
+ ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (hop - 1));
+
+ /* DpcdAddress_TrainingPatternSet */
+ dpcd_pattern.v1_4.TRAINING_PATTERN_SET =
+ dc_dp_training_pattern_to_dpcd_training_pattern(link, pattern);
+
+ dpcd_pattern.v1_4.SCRAMBLING_DISABLE =
+ dc_dp_initialize_scrambling_data_symbols(link, pattern);
+
+ if (hop != DPRX) {
+ DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Repeater ID: %d\n 0x%X pattern = %x\n",
+ __func__,
+ hop,
+ dpcd_tps_offset,
+ dpcd_pattern.v1_4.TRAINING_PATTERN_SET);
+ } else {
+ DC_LOG_HW_LINK_TRAINING("%s\n 0x%X pattern = %x\n",
+ __func__,
+ dpcd_tps_offset,
+ dpcd_pattern.v1_4.TRAINING_PATTERN_SET);
+ }
+
+ status = core_link_write_dpcd(link,
+ dpcd_tps_offset,
+ &dpcd_pattern.raw,
+ sizeof(dpcd_pattern.raw));
+
+ return status;
+}
+
+/* Execute clock recovery phase of link training for specified hop in display
+ * path.in non-transparent mode:
+ * - Driver issues both DPCD and SET_CONFIG transactions.
+ * - TPS1 is transmitted for any hops downstream of DPOA.
+ * - Drive (VS/PE) only transmitted for the hop immediately downstream of DPOA.
+ * - CR for the first hop (DPTX-to-DPIA) is assumed to be successful.
+ *
+ * @param link DPIA link being trained.
+ * @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis).
+ * @param hop The Hop in display path. DPRX = 0.
+ */
+static enum link_training_result dpia_training_cr_non_transparent(struct dc_link *link,
+ struct link_training_settings *lt_settings,
+ uint32_t hop)
+{
+ enum link_training_result result = LINK_TRAINING_CR_FAIL_LANE0;
+ uint8_t repeater_cnt = 0; /* Number of hops/repeaters in display path. */
+ enum dc_status status;
+ uint32_t retries_cr = 0; /* Number of consecutive attempts with same VS or PE. */
+ uint32_t retry_count = 0;
+ /* From DP spec, CR read interval is always 100us. */
+ uint32_t wait_time_microsec = TRAINING_AUX_RD_INTERVAL;
+ enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
+ union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = { { {0} } };
+ union lane_align_status_updated dpcd_lane_status_updated = { {0} };
+ union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = { { {0} } };
+ uint8_t set_cfg_data;
+ enum dpia_set_config_ts ts;
+
+ repeater_cnt = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+
+ /* Cap of LINK_TRAINING_MAX_CR_RETRY attempts at clock recovery.
+ * Fix inherited from perform_clock_recovery_sequence() -
+ * the DP equivalent of this function:
+ * Required for Synaptics MST hub which can put the LT in
+ * infinite loop by switching the VS between level 0 and level 1
+ * continuously.
+ */
+ while ((retries_cr < LINK_TRAINING_MAX_RETRY_COUNT) &&
+ (retry_count < LINK_TRAINING_MAX_CR_RETRY)) {
+ /* DPTX-to-DPIA */
+ if (hop == repeater_cnt) {
+ /* Send SET_CONFIG(SET_LINK:LC,LR,LTTPR) to notify DPOA that
+ * non-transparent link training has started.
+ * This also enables the transmission of clk_sync packets.
+ */
+ set_cfg_data = dpia_build_set_config_data(DPIA_SET_CFG_SET_LINK,
+ link,
+ lt_settings);
+ status = core_link_send_set_config(link,
+ DPIA_SET_CFG_SET_LINK,
+ set_cfg_data);
+ /* CR for this hop is considered successful as long as
+ * SET_CONFIG message is acknowledged by DPOA.
+ */
+ if (status == DC_OK)
+ result = LINK_TRAINING_SUCCESS;
+ else
+ result = LINK_TRAINING_ABORT;
+ break;
+ }
+
+ /* DPOA-to-x */
+ /* Instruct DPOA to transmit TPS1 then update DPCD. */
+ if (retry_count == 0) {
+ ts = convert_trng_ptn_to_trng_stg(lt_settings->pattern_for_cr);
+ status = core_link_send_set_config(link,
+ DPIA_SET_CFG_SET_TRAINING,
+ ts);
+ if (status != DC_OK) {
+ result = LINK_TRAINING_ABORT;
+ break;
+ }
+ status = dpcd_set_lt_pattern(link, lt_settings->pattern_for_cr, hop);
+ if (status != DC_OK) {
+ result = LINK_TRAINING_ABORT;
+ break;
+ }
+ }
+
+ /* Update DPOA drive settings then DPCD. DPOA does only adjusts
+ * drive settings for hops immediately downstream.
+ */
+ if (hop == repeater_cnt - 1) {
+ set_cfg_data = dpia_build_set_config_data(DPIA_SET_CFG_SET_VSPE,
+ link,
+ lt_settings);
+ status = core_link_send_set_config(link,
+ DPIA_SET_CFG_SET_VSPE,
+ set_cfg_data);
+ if (status != DC_OK) {
+ result = LINK_TRAINING_ABORT;
+ break;
+ }
+ }
+ status = dpcd_set_lane_settings(link, lt_settings, hop);
+ if (status != DC_OK) {
+ result = LINK_TRAINING_ABORT;
+ break;
+ }
+
+ dp_wait_for_training_aux_rd_interval(link, wait_time_microsec);
+
+ /* Read status and adjustment requests from DPCD. */
+ status = dp_get_lane_status_and_lane_adjust(
+ link,
+ lt_settings,
+ dpcd_lane_status,
+ &dpcd_lane_status_updated,
+ dpcd_lane_adjust,
+ hop);
+ if (status != DC_OK) {
+ result = LINK_TRAINING_ABORT;
+ break;
+ }
+
+ /* Check if clock recovery successful. */
+ if (dp_is_cr_done(lane_count, dpcd_lane_status)) {
+ result = LINK_TRAINING_SUCCESS;
+ break;
+ }
+
+ result = dp_get_cr_failure(lane_count, dpcd_lane_status);
+
+ if (dp_is_max_vs_reached(lt_settings))
+ break;
+
+ /* Count number of attempts with same drive settings.
+ * Note: settings are the same for all lanes,
+ * so comparing first lane is sufficient.
+ */
+ if ((lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET ==
+ dpcd_lane_adjust[0].bits.VOLTAGE_SWING_LANE)
+ && (lt_settings->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET ==
+ dpcd_lane_adjust[0].bits.PRE_EMPHASIS_LANE))
+ retries_cr++;
+ else
+ retries_cr = 0;
+
+ /* Update VS/PE. */
+ dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
+ lt_settings->lane_settings,
+ lt_settings->dpcd_lane_settings);
+ retry_count++;
+ }
+
+ /* Abort link training if clock recovery failed due to HPD unplug. */
+ if (!link->hpd_status)
+ result = LINK_TRAINING_ABORT;
+
+ DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) clock recovery\n"
+ " -hop(%d)\n - result(%d)\n - retries(%d)\n",
+ __func__,
+ link->link_id.enum_id - ENUM_ID_1,
+ hop,
+ result,
+ retry_count);
+
+ return result;
+}
+
+/* Execute clock recovery phase of link training in transparent LTTPR mode:
+ * - Driver only issues DPCD transactions and leaves USB4 tunneling (SET_CONFIG) messages to DPIA.
+ * - Driver writes TPS1 to DPCD to kick off training.
+ * - Clock recovery (CR) for link is handled by DPOA, which reports result to DPIA on completion.
+ * - DPIA communicates result to driver by updating CR status when driver reads DPCD.
+ *
+ * @param link DPIA link being trained.
+ * @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis).
+ */
+static enum link_training_result dpia_training_cr_transparent(struct dc_link *link,
+ struct link_training_settings *lt_settings)
+{
+ enum link_training_result result = LINK_TRAINING_CR_FAIL_LANE0;
+ enum dc_status status;
+ uint32_t retries_cr = 0; /* Number of consecutive attempts with same VS or PE. */
+ uint32_t retry_count = 0;
+ uint32_t wait_time_microsec = lt_settings->cr_pattern_time;
+ enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
+ union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = { { {0} } };
+ union lane_align_status_updated dpcd_lane_status_updated = { {0} };
+ union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = { { {0} } };
+
+ /* Cap of LINK_TRAINING_MAX_CR_RETRY attempts at clock recovery.
+ * Fix inherited from perform_clock_recovery_sequence() -
+ * the DP equivalent of this function:
+ * Required for Synaptics MST hub which can put the LT in
+ * infinite loop by switching the VS between level 0 and level 1
+ * continuously.
+ */
+ while ((retries_cr < LINK_TRAINING_MAX_RETRY_COUNT) &&
+ (retry_count < LINK_TRAINING_MAX_CR_RETRY)) {
+ /* Write TPS1 (not VS or PE) to DPCD to start CR phase.
+ * DPIA sends SET_CONFIG(SET_LINK) to notify DPOA to
+ * start link training.
+ */
+ if (retry_count == 0) {
+ status = dpcd_set_lt_pattern(link, lt_settings->pattern_for_cr, DPRX);
+ if (status != DC_OK) {
+ result = LINK_TRAINING_ABORT;
+ break;
+ }
+ }
+
+ dp_wait_for_training_aux_rd_interval(link, wait_time_microsec);
+
+ /* Read status and adjustment requests from DPCD. */
+ status = dp_get_lane_status_and_lane_adjust(
+ link,
+ lt_settings,
+ dpcd_lane_status,
+ &dpcd_lane_status_updated,
+ dpcd_lane_adjust,
+ DPRX);
+ if (status != DC_OK) {
+ result = LINK_TRAINING_ABORT;
+ break;
+ }
+
+ /* Check if clock recovery successful. */
+ if (dp_is_cr_done(lane_count, dpcd_lane_status)) {
+ result = LINK_TRAINING_SUCCESS;
+ break;
+ }
+
+ result = dp_get_cr_failure(lane_count, dpcd_lane_status);
+
+ if (dp_is_max_vs_reached(lt_settings))
+ break;
+
+ /* Count number of attempts with same drive settings.
+ * Note: settings are the same for all lanes,
+ * so comparing first lane is sufficient.
+ */
+ if ((lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET ==
+ dpcd_lane_adjust[0].bits.VOLTAGE_SWING_LANE)
+ && (lt_settings->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET ==
+ dpcd_lane_adjust[0].bits.PRE_EMPHASIS_LANE))
+ retries_cr++;
+ else
+ retries_cr = 0;
+
+ /* Update VS/PE. */
+ dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
+ lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
+ retry_count++;
+ }
+
+ /* Abort link training if clock recovery failed due to HPD unplug. */
+ if (!link->hpd_status)
+ result = LINK_TRAINING_ABORT;
+
+ DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) clock recovery\n"
+ " -hop(%d)\n - result(%d)\n - retries(%d)\n",
+ __func__,
+ link->link_id.enum_id - ENUM_ID_1,
+ DPRX,
+ result,
+ retry_count);
+
+ return result;
+}
+
+/* Execute clock recovery phase of link training for specified hop in display
+ * path.
+ *
+ * @param link DPIA link being trained.
+ * @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis).
+ * @param hop The Hop in display path. DPRX = 0.
+ */
+static enum link_training_result dpia_training_cr_phase(struct dc_link *link,
+ struct link_training_settings *lt_settings,
+ uint32_t hop)
+{
+ enum link_training_result result = LINK_TRAINING_CR_FAIL_LANE0;
+
+ if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT)
+ result = dpia_training_cr_non_transparent(link, lt_settings, hop);
+ else
+ result = dpia_training_cr_transparent(link, lt_settings);
+
+ return result;
+}
+
+/* Return status read interval during equalization phase. */
+static uint32_t dpia_get_eq_aux_rd_interval(const struct dc_link *link,
+ const struct link_training_settings *lt_settings,
+ uint32_t hop)
+{
+ uint32_t wait_time_microsec;
+
+ if (hop == DPRX)
+ wait_time_microsec = lt_settings->eq_pattern_time;
+ else
+ wait_time_microsec =
+ dp_translate_training_aux_read_interval(
+ link->dpcd_caps.lttpr_caps.aux_rd_interval[hop - 1]);
+
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ /* Check debug option for extending aux read interval. */
+ if (link->dc->debug.dpia_debug.bits.extend_aux_rd_interval)
+ wait_time_microsec = DPIA_DEBUG_EXTENDED_AUX_RD_INTERVAL_US;
+#endif
+
+ return wait_time_microsec;
+}
+
+/* Execute equalization phase of link training for specified hop in display
+ * path in non-transparent mode:
+ * - driver issues both DPCD and SET_CONFIG transactions.
+ * - TPSx is transmitted for any hops downstream of DPOA.
+ * - Drive (VS/PE) only transmitted for the hop immediately downstream of DPOA.
+ * - EQ for the first hop (DPTX-to-DPIA) is assumed to be successful.
+ * - DPRX EQ only reported successful when both DPRX and DPIA requirements
+ * (clk sync packets sent) fulfilled.
+ *
+ * @param link DPIA link being trained.
+ * @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis).
+ * @param hop The Hop in display path. DPRX = 0.
+ */
+static enum link_training_result dpia_training_eq_non_transparent(struct dc_link *link,
+ struct link_training_settings *lt_settings,
+ uint32_t hop)
+{
+ enum link_training_result result = LINK_TRAINING_EQ_FAIL_EQ;
+ uint8_t repeater_cnt = 0; /* Number of hops/repeaters in display path. */
+ uint32_t retries_eq = 0;
+ enum dc_status status;
+ enum dc_dp_training_pattern tr_pattern;
+ uint32_t wait_time_microsec;
+ enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
+ union lane_align_status_updated dpcd_lane_status_updated = { {0} };
+ union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = { { {0} } };
+ union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = { { {0} } };
+ uint8_t set_cfg_data;
+ enum dpia_set_config_ts ts;
+
+ /* Training pattern is TPS4 for repeater;
+ * TPS2/3/4 for DPRX depending on what it supports.
+ */
+ if (hop == DPRX)
+ tr_pattern = lt_settings->pattern_for_eq;
+ else
+ tr_pattern = DP_TRAINING_PATTERN_SEQUENCE_4;
+
+ repeater_cnt = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+
+ for (retries_eq = 0; retries_eq < LINK_TRAINING_MAX_RETRY_COUNT; retries_eq++) {
+ /* DPTX-to-DPIA equalization always successful. */
+ if (hop == repeater_cnt) {
+ result = LINK_TRAINING_SUCCESS;
+ break;
+ }
+
+ /* Instruct DPOA to transmit TPSn then update DPCD. */
+ if (retries_eq == 0) {
+ ts = convert_trng_ptn_to_trng_stg(tr_pattern);
+ status = core_link_send_set_config(link,
+ DPIA_SET_CFG_SET_TRAINING,
+ ts);
+ if (status != DC_OK) {
+ result = LINK_TRAINING_ABORT;
+ break;
+ }
+ status = dpcd_set_lt_pattern(link, tr_pattern, hop);
+ if (status != DC_OK) {
+ result = LINK_TRAINING_ABORT;
+ break;
+ }
+ }
+
+ /* Update DPOA drive settings then DPCD. DPOA only adjusts
+ * drive settings for hop immediately downstream.
+ */
+ if (hop == repeater_cnt - 1) {
+ set_cfg_data = dpia_build_set_config_data(DPIA_SET_CFG_SET_VSPE,
+ link,
+ lt_settings);
+ status = core_link_send_set_config(link,
+ DPIA_SET_CFG_SET_VSPE,
+ set_cfg_data);
+ if (status != DC_OK) {
+ result = LINK_TRAINING_ABORT;
+ break;
+ }
+ }
+ status = dpcd_set_lane_settings(link, lt_settings, hop);
+ if (status != DC_OK) {
+ result = LINK_TRAINING_ABORT;
+ break;
+ }
+
+ /* Extend wait time on second equalisation attempt on final hop to
+ * ensure clock sync packets have been sent.
+ */
+ if (hop == DPRX && retries_eq == 1)
+ wait_time_microsec = max(wait_time_microsec, (uint32_t)DPIA_CLK_SYNC_DELAY);
+ else
+ wait_time_microsec = dpia_get_eq_aux_rd_interval(link, lt_settings, hop);
+
+ dp_wait_for_training_aux_rd_interval(link, wait_time_microsec);
+
+ /* Read status and adjustment requests from DPCD. */
+ status = dp_get_lane_status_and_lane_adjust(
+ link,
+ lt_settings,
+ dpcd_lane_status,
+ &dpcd_lane_status_updated,
+ dpcd_lane_adjust,
+ hop);
+ if (status != DC_OK) {
+ result = LINK_TRAINING_ABORT;
+ break;
+ }
+
+ /* CR can still fail during EQ phase. Fail training if CR fails. */
+ if (!dp_is_cr_done(lane_count, dpcd_lane_status)) {
+ result = LINK_TRAINING_EQ_FAIL_CR;
+ break;
+ }
+
+ if (dp_is_ch_eq_done(lane_count, dpcd_lane_status) &&
+ dp_is_symbol_locked(link->cur_link_settings.lane_count, dpcd_lane_status) &&
+ dp_is_interlane_aligned(dpcd_lane_status_updated)) {
+ result = LINK_TRAINING_SUCCESS;
+ break;
+ }
+
+ /* Update VS/PE. */
+ dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
+ lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
+ }
+
+ /* Abort link training if equalization failed due to HPD unplug. */
+ if (!link->hpd_status)
+ result = LINK_TRAINING_ABORT;
+
+ DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) equalization\n"
+ " - hop(%d)\n - result(%d)\n - retries(%d)\n",
+ __func__,
+ link->link_id.enum_id - ENUM_ID_1,
+ hop,
+ result,
+ retries_eq);
+
+ return result;
+}
+
+/* Execute equalization phase of link training for specified hop in display
+ * path in transparent LTTPR mode:
+ * - driver only issues DPCD transactions leaves USB4 tunneling (SET_CONFIG) messages to DPIA.
+ * - driver writes TPSx to DPCD to notify DPIA that is in equalization phase.
+ * - equalization (EQ) for link is handled by DPOA, which reports result to DPIA on completion.
+ * - DPIA communicates result to driver by updating EQ status when driver reads DPCD.
+ *
+ * @param link DPIA link being trained.
+ * @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis).
+ * @param hop The Hop in display path. DPRX = 0.
+ */
+static enum link_training_result dpia_training_eq_transparent(struct dc_link *link,
+ struct link_training_settings *lt_settings)
+{
+ enum link_training_result result = LINK_TRAINING_EQ_FAIL_EQ;
+ uint32_t retries_eq = 0;
+ enum dc_status status;
+ enum dc_dp_training_pattern tr_pattern = lt_settings->pattern_for_eq;
+ uint32_t wait_time_microsec;
+ enum dc_lane_count lane_count = lt_settings->link_settings.lane_count;
+ union lane_align_status_updated dpcd_lane_status_updated = { {0} };
+ union lane_status dpcd_lane_status[LANE_COUNT_DP_MAX] = { { {0} } };
+ union lane_adjust dpcd_lane_adjust[LANE_COUNT_DP_MAX] = { { {0} } };
+
+ wait_time_microsec = dpia_get_eq_aux_rd_interval(link, lt_settings, DPRX);
+
+ for (retries_eq = 0; retries_eq < LINK_TRAINING_MAX_RETRY_COUNT; retries_eq++) {
+ if (retries_eq == 0) {
+ status = dpcd_set_lt_pattern(link, tr_pattern, DPRX);
+ if (status != DC_OK) {
+ result = LINK_TRAINING_ABORT;
+ break;
+ }
+ }
+
+ dp_wait_for_training_aux_rd_interval(link, wait_time_microsec);
+
+ /* Read status and adjustment requests from DPCD. */
+ status = dp_get_lane_status_and_lane_adjust(
+ link,
+ lt_settings,
+ dpcd_lane_status,
+ &dpcd_lane_status_updated,
+ dpcd_lane_adjust,
+ DPRX);
+ if (status != DC_OK) {
+ result = LINK_TRAINING_ABORT;
+ break;
+ }
+
+ /* CR can still fail during EQ phase. Fail training if CR fails. */
+ if (!dp_is_cr_done(lane_count, dpcd_lane_status)) {
+ result = LINK_TRAINING_EQ_FAIL_CR;
+ break;
+ }
+
+ if (dp_is_ch_eq_done(lane_count, dpcd_lane_status) &&
+ dp_is_symbol_locked(link->cur_link_settings.lane_count, dpcd_lane_status) &&
+ dp_is_interlane_aligned(dpcd_lane_status_updated)) {
+ result = LINK_TRAINING_SUCCESS;
+ break;
+ }
+
+ /* Update VS/PE. */
+ dp_decide_lane_settings(lt_settings, dpcd_lane_adjust,
+ lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings);
+ }
+
+ /* Abort link training if equalization failed due to HPD unplug. */
+ if (!link->hpd_status)
+ result = LINK_TRAINING_ABORT;
+
+ DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) equalization\n"
+ " - hop(%d)\n - result(%d)\n - retries(%d)\n",
+ __func__,
+ link->link_id.enum_id - ENUM_ID_1,
+ DPRX,
+ result,
+ retries_eq);
+
+ return result;
+}
+
+/* Execute equalization phase of link training for specified hop in display
+ * path.
+ *
+ * @param link DPIA link being trained.
+ * @param lt_settings link_setting and drive settings (voltage swing and pre-emphasis).
+ * @param hop The Hop in display path. DPRX = 0.
+ */
+static enum link_training_result dpia_training_eq_phase(struct dc_link *link,
+ struct link_training_settings *lt_settings,
+ uint32_t hop)
+{
+ enum link_training_result result;
+
+ if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT)
+ result = dpia_training_eq_non_transparent(link, lt_settings, hop);
+ else
+ result = dpia_training_eq_transparent(link, lt_settings);
+
+ return result;
+}
+
+/* End training of specified hop in display path. */
+static enum dc_status dpcd_clear_lt_pattern(struct dc_link *link, uint32_t hop)
+{
+ union dpcd_training_pattern dpcd_pattern = { {0} };
+ uint32_t dpcd_tps_offset = DP_TRAINING_PATTERN_SET;
+ enum dc_status status;
+
+ if (hop != DPRX)
+ dpcd_tps_offset = DP_TRAINING_PATTERN_SET_PHY_REPEATER1 +
+ ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (hop - 1));
+
+ status = core_link_write_dpcd(link,
+ dpcd_tps_offset,
+ &dpcd_pattern.raw,
+ sizeof(dpcd_pattern.raw));
+
+ return status;
+}
+
+/* End training of specified hop in display path.
+ *
+ * In transparent LTTPR mode:
+ * - driver clears training pattern for the specified hop in DPCD.
+ * In non-transparent LTTPR mode:
+ * - in addition to clearing training pattern, driver issues USB4 tunneling
+ * (SET_CONFIG) messages to notify DPOA when training is done for first hop
+ * (DPTX-to-DPIA) and last hop (DPRX).
+ *
+ * @param link DPIA link being trained.
+ * @param hop The Hop in display path. DPRX = 0.
+ */
+static enum link_training_result dpia_training_end(struct dc_link *link,
+ uint32_t hop)
+{
+ enum link_training_result result = LINK_TRAINING_SUCCESS;
+ uint8_t repeater_cnt = 0; /* Number of hops/repeaters in display path. */
+ enum dc_status status;
+
+ if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) {
+ repeater_cnt = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+
+ if (hop == repeater_cnt) { /* DPTX-to-DPIA */
+ /* Send SET_CONFIG(SET_TRAINING:0xff) to notify DPOA that
+ * DPTX-to-DPIA hop trained. No DPCD write needed for first hop.
+ */
+ status = core_link_send_set_config(link,
+ DPIA_SET_CFG_SET_TRAINING,
+ DPIA_TS_UFP_DONE);
+ if (status != DC_OK)
+ result = LINK_TRAINING_ABORT;
+ } else { /* DPOA-to-x */
+ /* Write 0x0 to TRAINING_PATTERN_SET */
+ status = dpcd_clear_lt_pattern(link, hop);
+ if (status != DC_OK)
+ result = LINK_TRAINING_ABORT;
+ }
+
+ /* Notify DPOA that non-transparent link training of DPRX done. */
+ if (hop == DPRX && result != LINK_TRAINING_ABORT) {
+ status = core_link_send_set_config(link,
+ DPIA_SET_CFG_SET_TRAINING,
+ DPIA_TS_DPRX_DONE);
+ if (status != DC_OK)
+ result = LINK_TRAINING_ABORT;
+ }
+
+ } else { /* non-LTTPR or transparent LTTPR. */
+ /* Write 0x0 to TRAINING_PATTERN_SET */
+ status = dpcd_clear_lt_pattern(link, hop);
+ if (status != DC_OK)
+ result = LINK_TRAINING_ABORT;
+ }
+
+ DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) end\n - hop(%d)\n - result(%d)\n - LTTPR mode(%d)\n",
+ __func__,
+ link->link_id.enum_id - ENUM_ID_1,
+ hop,
+ result,
+ link->lttpr_mode);
+
+ return result;
+}
+
+/* When aborting training of specified hop in display path, clean up by:
+ * - Attempting to clear DPCD TRAINING_PATTERN_SET, LINK_BW_SET and LANE_COUNT_SET.
+ * - Sending SET_CONFIG(SET_LINK) with lane count and link rate set to 0.
+ *
+ * @param link DPIA link being trained.
+ * @param hop The Hop in display path. DPRX = 0.
+ */
+static void dpia_training_abort(struct dc_link *link, uint32_t hop)
+{
+ uint8_t data = 0;
+ uint32_t dpcd_tps_offset = DP_TRAINING_PATTERN_SET;
+
+ DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) aborting\n - LTTPR mode(%d)\n - HPD(%d)\n",
+ __func__,
+ link->link_id.enum_id - ENUM_ID_1,
+ link->lttpr_mode,
+ link->hpd_status);
+
+ /* Abandon clean-up if sink unplugged. */
+ if (!link->hpd_status)
+ return;
+
+ if (hop != DPRX)
+ dpcd_tps_offset = DP_TRAINING_PATTERN_SET_PHY_REPEATER1 +
+ ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (hop - 1));
+
+ core_link_write_dpcd(link, dpcd_tps_offset, &data, 1);
+ core_link_write_dpcd(link, DP_LINK_BW_SET, &data, 1);
+ core_link_write_dpcd(link, DP_LANE_COUNT_SET, &data, 1);
+ core_link_send_set_config(link, DPIA_SET_CFG_SET_LINK, data);
+}
+
+enum link_training_result dc_link_dpia_perform_link_training(struct dc_link *link,
+ const struct dc_link_settings *link_setting,
+ bool skip_video_pattern)
+{
+ enum link_training_result result;
+ struct link_training_settings lt_settings;
+ uint8_t repeater_cnt = 0; /* Number of hops/repeaters in display path. */
+ int8_t repeater_id; /* Current hop. */
+
+ /* Configure link as prescribed in link_setting and set LTTPR mode. */
+ result = dpia_configure_link(link, link_setting, &lt_settings);
+ if (result != LINK_TRAINING_SUCCESS)
+ return result;
+
+ if (link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT)
+ repeater_cnt = dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt);
+
+ /* Train each hop in turn starting with the one closest to DPTX.
+ * In transparent or non-LTTPR mode, train only the final hop (DPRX).
+ */
+ for (repeater_id = repeater_cnt; repeater_id >= 0; repeater_id--) {
+ /* Clock recovery. */
+ result = dpia_training_cr_phase(link, &lt_settings, repeater_id);
+ if (result != LINK_TRAINING_SUCCESS)
+ break;
+
+ /* Equalization. */
+ result = dpia_training_eq_phase(link, &lt_settings, repeater_id);
+ if (result != LINK_TRAINING_SUCCESS)
+ break;
+
+ /* Stop training hop. */
+ result = dpia_training_end(link, repeater_id);
+ if (result != LINK_TRAINING_SUCCESS)
+ break;
+ }
+
+ /* Double-check link status if training successful; gracefully abort
+ * training of current hop if training failed due to message tunneling
+ * failure; end training of hop if training ended conventionally and
+ * falling back to lower bandwidth settings possible.
+ */
+ if (result == LINK_TRAINING_SUCCESS) {
+ msleep(5);
+ result = dp_check_link_loss_status(link, &lt_settings);
+ } else if (result == LINK_TRAINING_ABORT) {
+ dpia_training_abort(link, repeater_id);
+ } else {
+ dpia_training_end(link, repeater_id);
+ }
+ return result;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c
index de80a9ea4cfa..72b0f8594b4a 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c
@@ -1,5 +1,4 @@
-/*
- * Copyright 2021 Advanced Micro Devices, Inc.
+/* Copyright 2021 Advanced Micro Devices, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -35,78 +34,128 @@ static bool is_dig_link_enc_stream(struct dc_stream_state *stream)
int i;
/* Loop over created link encoder objects. */
- for (i = 0; i < stream->ctx->dc->res_pool->res_cap->num_dig_link_enc; i++) {
- link_enc = stream->ctx->dc->res_pool->link_encoders[i];
-
- if (link_enc &&
- ((uint32_t)stream->signal & link_enc->output_signals)) {
- if (dc_is_dp_signal(stream->signal)) {
- /* DIGs do not support DP2.0 streams with 128b/132b encoding. */
- struct dc_link_settings link_settings = {0};
-
- decide_link_settings(stream, &link_settings);
- if ((link_settings.link_rate >= LINK_RATE_LOW) &&
- link_settings.link_rate <= LINK_RATE_HIGH3) {
+ if (stream) {
+ for (i = 0; i < stream->ctx->dc->res_pool->res_cap->num_dig_link_enc; i++) {
+ link_enc = stream->ctx->dc->res_pool->link_encoders[i];
+
+ /* Need to check link signal type rather than stream signal type which may not
+ * yet match.
+ */
+ if (link_enc && ((uint32_t)stream->link->connector_signal & link_enc->output_signals)) {
+ if (dc_is_dp_signal(stream->signal)) {
+ /* DIGs do not support DP2.0 streams with 128b/132b encoding. */
+ struct dc_link_settings link_settings = {0};
+
+ decide_link_settings(stream, &link_settings);
+ if ((link_settings.link_rate >= LINK_RATE_LOW) &&
+ link_settings.link_rate <= LINK_RATE_HIGH3) {
+ is_dig_stream = true;
+ break;
+ }
+ } else {
is_dig_stream = true;
break;
}
- } else {
- is_dig_stream = true;
- break;
}
}
}
-
return is_dig_stream;
}
-/* Update DIG link encoder resource tracking variables in dc_state. */
-static void update_link_enc_assignment(
+static struct link_enc_assignment get_assignment(struct dc *dc, int i)
+{
+ struct link_enc_assignment assignment;
+
+ if (dc->current_state->res_ctx.link_enc_cfg_ctx.mode == LINK_ENC_CFG_TRANSIENT)
+ assignment = dc->current_state->res_ctx.link_enc_cfg_ctx.transient_assignments[i];
+ else /* LINK_ENC_CFG_STEADY */
+ assignment = dc->current_state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i];
+
+ return assignment;
+}
+
+/* Return stream using DIG link encoder resource. NULL if unused. */
+static struct dc_stream_state *get_stream_using_link_enc(
+ struct dc_state *state,
+ enum engine_id eng_id)
+{
+ struct dc_stream_state *stream = NULL;
+ int i;
+
+ for (i = 0; i < state->stream_count; i++) {
+ struct link_enc_assignment assignment = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i];
+
+ if ((assignment.valid == true) && (assignment.eng_id == eng_id)) {
+ stream = state->streams[i];
+ break;
+ }
+ }
+
+ return stream;
+}
+
+static void remove_link_enc_assignment(
struct dc_state *state,
struct dc_stream_state *stream,
- enum engine_id eng_id,
- bool add_enc)
+ enum engine_id eng_id)
{
int eng_idx;
- int stream_idx;
int i;
if (eng_id != ENGINE_ID_UNKNOWN) {
eng_idx = eng_id - ENGINE_ID_DIGA;
- stream_idx = -1;
- /* Index of stream in dc_state used to update correct entry in
+ /* stream ptr of stream in dc_state used to update correct entry in
* link_enc_assignments table.
*/
- for (i = 0; i < state->stream_count; i++) {
- if (stream == state->streams[i]) {
- stream_idx = i;
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct link_enc_assignment assignment = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i];
+
+ if (assignment.valid && assignment.stream == stream) {
+ state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].valid = false;
+ /* Only add link encoder back to availability pool if not being
+ * used by any other stream (i.e. removing SST stream or last MST stream).
+ */
+ if (get_stream_using_link_enc(state, eng_id) == NULL)
+ state->res_ctx.link_enc_cfg_ctx.link_enc_avail[eng_idx] = eng_id;
+ stream->link_enc = NULL;
break;
}
}
+ }
+}
+
+static void add_link_enc_assignment(
+ struct dc_state *state,
+ struct dc_stream_state *stream,
+ enum engine_id eng_id)
+{
+ int eng_idx;
+ int i;
- /* Update link encoder assignments table, link encoder availability
- * pool and link encoder assigned to stream in state.
- * Add/remove encoder resource to/from stream.
+ if (eng_id != ENGINE_ID_UNKNOWN) {
+ eng_idx = eng_id - ENGINE_ID_DIGA;
+
+ /* stream ptr of stream in dc_state used to update correct entry in
+ * link_enc_assignments table.
*/
- if (stream_idx != -1) {
- if (add_enc) {
- state->res_ctx.link_enc_assignments[stream_idx] = (struct link_enc_assignment){
+ for (i = 0; i < state->stream_count; i++) {
+ if (stream == state->streams[i]) {
+ state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i] = (struct link_enc_assignment){
.valid = true,
.ep_id = (struct display_endpoint_id) {
.link_id = stream->link->link_id,
.ep_type = stream->link->ep_type},
- .eng_id = eng_id};
- state->res_ctx.link_enc_avail[eng_idx] = ENGINE_ID_UNKNOWN;
+ .eng_id = eng_id,
+ .stream = stream};
+ state->res_ctx.link_enc_cfg_ctx.link_enc_avail[eng_idx] = ENGINE_ID_UNKNOWN;
stream->link_enc = stream->ctx->dc->res_pool->link_encoders[eng_idx];
- } else {
- state->res_ctx.link_enc_assignments[stream_idx].valid = false;
- state->res_ctx.link_enc_avail[eng_idx] = eng_id;
- stream->link_enc = NULL;
+ break;
}
- } else {
- dm_output_to_console("%s: Stream not found in dc_state.\n", __func__);
}
+
+ /* Attempted to add an encoder assignment for a stream not in dc_state. */
+ ASSERT(i != state->stream_count);
}
}
@@ -119,7 +168,7 @@ static enum engine_id find_first_avail_link_enc(
int i;
for (i = 0; i < ctx->dc->res_pool->res_cap->num_dig_link_enc; i++) {
- eng_id = state->res_ctx.link_enc_avail[i];
+ eng_id = state->res_ctx.link_enc_cfg_ctx.link_enc_avail[i];
if (eng_id != ENGINE_ID_UNKNOWN)
break;
}
@@ -127,30 +176,65 @@ static enum engine_id find_first_avail_link_enc(
return eng_id;
}
-/* Return stream using DIG link encoder resource. NULL if unused. */
-static struct dc_stream_state *get_stream_using_link_enc(
+/* Check for availability of link encoder eng_id. */
+static bool is_avail_link_enc(struct dc_state *state, enum engine_id eng_id, struct dc_stream_state *stream)
+{
+ bool is_avail = false;
+ int eng_idx = eng_id - ENGINE_ID_DIGA;
+
+ /* An encoder is available if it is still in the availability pool. */
+ if (eng_id != ENGINE_ID_UNKNOWN && state->res_ctx.link_enc_cfg_ctx.link_enc_avail[eng_idx] != ENGINE_ID_UNKNOWN) {
+ is_avail = true;
+ } else {
+ struct dc_stream_state *stream_assigned = NULL;
+
+ /* MST streams share the same link and should share the same encoder.
+ * If a stream that has already been assigned a link encoder uses as the
+ * same link as the stream checking for availability, it is an MST stream
+ * and should use the same link encoder.
+ */
+ stream_assigned = get_stream_using_link_enc(state, eng_id);
+ if (stream_assigned && stream != stream_assigned && stream->link == stream_assigned->link)
+ is_avail = true;
+ }
+
+ return is_avail;
+}
+
+/* Test for display_endpoint_id equality. */
+static bool are_ep_ids_equal(struct display_endpoint_id *lhs, struct display_endpoint_id *rhs)
+{
+ bool are_equal = false;
+
+ if (lhs->link_id.id == rhs->link_id.id &&
+ lhs->link_id.enum_id == rhs->link_id.enum_id &&
+ lhs->link_id.type == rhs->link_id.type &&
+ lhs->ep_type == rhs->ep_type)
+ are_equal = true;
+
+ return are_equal;
+}
+
+static struct link_encoder *get_link_enc_used_by_link(
struct dc_state *state,
- enum engine_id eng_id)
+ const struct dc_link *link)
{
- struct dc_stream_state *stream = NULL;
- int stream_idx = -1;
+ struct link_encoder *link_enc = NULL;
+ struct display_endpoint_id ep_id;
int i;
+ ep_id = (struct display_endpoint_id) {
+ .link_id = link->link_id,
+ .ep_type = link->ep_type};
+
for (i = 0; i < state->stream_count; i++) {
- struct link_enc_assignment assignment = state->res_ctx.link_enc_assignments[i];
+ struct link_enc_assignment assignment = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i];
- if (assignment.valid && (assignment.eng_id == eng_id)) {
- stream_idx = i;
- break;
- }
+ if (assignment.valid == true && are_ep_ids_equal(&assignment.ep_id, &ep_id))
+ link_enc = link->dc->res_pool->link_encoders[assignment.eng_id - ENGINE_ID_DIGA];
}
- if (stream_idx != -1)
- stream = state->streams[stream_idx];
- else
- dm_output_to_console("%s: No stream using DIG(%d).\n", __func__, eng_id);
-
- return stream;
+ return link_enc;
}
void link_enc_cfg_init(
@@ -161,10 +245,12 @@ void link_enc_cfg_init(
for (i = 0; i < dc->res_pool->res_cap->num_dig_link_enc; i++) {
if (dc->res_pool->link_encoders[i])
- state->res_ctx.link_enc_avail[i] = (enum engine_id) i;
+ state->res_ctx.link_enc_cfg_ctx.link_enc_avail[i] = (enum engine_id) i;
else
- state->res_ctx.link_enc_avail[i] = ENGINE_ID_UNKNOWN;
+ state->res_ctx.link_enc_cfg_ctx.link_enc_avail[i] = ENGINE_ID_UNKNOWN;
}
+
+ state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_STEADY;
}
void link_enc_cfg_link_encs_assign(
@@ -175,11 +261,17 @@ void link_enc_cfg_link_encs_assign(
{
enum engine_id eng_id = ENGINE_ID_UNKNOWN;
int i;
+ int j;
+
+ ASSERT(state->stream_count == stream_count);
/* Release DIG link encoder resources before running assignment algorithm. */
for (i = 0; i < stream_count; i++)
dc->res_pool->funcs->link_enc_unassign(state, streams[i]);
+ for (i = 0; i < MAX_PIPES; i++)
+ ASSERT(state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].valid == false);
+
/* (a) Assign DIG link encoders to physical (unmappable) endpoints first. */
for (i = 0; i < stream_count; i++) {
struct dc_stream_state *stream = streams[i];
@@ -191,26 +283,82 @@ void link_enc_cfg_link_encs_assign(
/* Physical endpoints have a fixed mapping to DIG link encoders. */
if (!stream->link->is_dig_mapping_flexible) {
eng_id = stream->link->eng_id;
- update_link_enc_assignment(state, stream, eng_id, true);
+ add_link_enc_assignment(state, stream, eng_id);
+ }
+ }
+
+ /* (b) Retain previous assignments for mappable endpoints if encoders still available. */
+ eng_id = ENGINE_ID_UNKNOWN;
+
+ if (state != dc->current_state) {
+ struct dc_state *prev_state = dc->current_state;
+
+ for (i = 0; i < stream_count; i++) {
+ struct dc_stream_state *stream = state->streams[i];
+
+ /* Skip stream if not supported by DIG link encoder. */
+ if (!is_dig_link_enc_stream(stream))
+ continue;
+
+ if (!stream->link->is_dig_mapping_flexible)
+ continue;
+
+ for (j = 0; j < prev_state->stream_count; j++) {
+ struct dc_stream_state *prev_stream = prev_state->streams[j];
+
+ if (stream == prev_stream && stream->link == prev_stream->link &&
+ prev_state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[j].valid) {
+ eng_id = prev_state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[j].eng_id;
+ if (is_avail_link_enc(state, eng_id, stream))
+ add_link_enc_assignment(state, stream, eng_id);
+ }
+ }
}
}
- /* (b) Then assign encoders to mappable endpoints. */
+ /* (c) Then assign encoders to remaining mappable endpoints. */
eng_id = ENGINE_ID_UNKNOWN;
for (i = 0; i < stream_count; i++) {
struct dc_stream_state *stream = streams[i];
/* Skip stream if not supported by DIG link encoder. */
- if (!is_dig_link_enc_stream(stream))
+ if (!is_dig_link_enc_stream(stream)) {
+ ASSERT(stream->link->is_dig_mapping_flexible != true);
continue;
+ }
/* Mappable endpoints have a flexible mapping to DIG link encoders. */
if (stream->link->is_dig_mapping_flexible) {
- eng_id = find_first_avail_link_enc(stream->ctx, state);
- update_link_enc_assignment(state, stream, eng_id, true);
+ struct link_encoder *link_enc = NULL;
+
+ /* Skip if encoder assignment retained in step (b) above. */
+ if (stream->link_enc)
+ continue;
+
+ /* For MST, multiple streams will share the same link / display
+ * endpoint. These streams should use the same link encoder
+ * assigned to that endpoint.
+ */
+ link_enc = get_link_enc_used_by_link(state, stream->link);
+ if (link_enc == NULL)
+ eng_id = find_first_avail_link_enc(stream->ctx, state);
+ else
+ eng_id = link_enc->preferred_engine;
+ add_link_enc_assignment(state, stream, eng_id);
}
}
+
+ link_enc_cfg_validate(dc, state);
+
+ /* Update transient assignments. */
+ for (i = 0; i < MAX_PIPES; i++) {
+ dc->current_state->res_ctx.link_enc_cfg_ctx.transient_assignments[i] =
+ state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i];
+ }
+
+ /* Current state mode will be set to steady once this state committed. */
+ state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_STEADY;
}
void link_enc_cfg_link_enc_unassign(
@@ -226,16 +374,16 @@ void link_enc_cfg_link_enc_unassign(
if (stream->link_enc)
eng_id = stream->link_enc->preferred_engine;
- update_link_enc_assignment(state, stream, eng_id, false);
+ remove_link_enc_assignment(state, stream, eng_id);
}
bool link_enc_cfg_is_transmitter_mappable(
- struct dc_state *state,
+ struct dc *dc,
struct link_encoder *link_enc)
{
bool is_mappable = false;
enum engine_id eng_id = link_enc->preferred_engine;
- struct dc_stream_state *stream = get_stream_using_link_enc(state, eng_id);
+ struct dc_stream_state *stream = link_enc_cfg_get_stream_using_link_enc(dc, eng_id);
if (stream)
is_mappable = stream->link->is_dig_mapping_flexible;
@@ -243,73 +391,217 @@ bool link_enc_cfg_is_transmitter_mappable(
return is_mappable;
}
-struct dc_link *link_enc_cfg_get_link_using_link_enc(
- struct dc_state *state,
+struct dc_stream_state *link_enc_cfg_get_stream_using_link_enc(
+ struct dc *dc,
enum engine_id eng_id)
{
- struct dc_link *link = NULL;
- int stream_idx = -1;
+ struct dc_stream_state *stream = NULL;
int i;
- for (i = 0; i < state->stream_count; i++) {
- struct link_enc_assignment assignment = state->res_ctx.link_enc_assignments[i];
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct link_enc_assignment assignment = get_assignment(dc, i);
- if (assignment.valid && (assignment.eng_id == eng_id)) {
- stream_idx = i;
+ if ((assignment.valid == true) && (assignment.eng_id == eng_id)) {
+ stream = assignment.stream;
break;
}
}
- if (stream_idx != -1)
- link = state->streams[stream_idx]->link;
- else
- dm_output_to_console("%s: No link using DIG(%d).\n", __func__, eng_id);
+ return stream;
+}
+
+struct dc_link *link_enc_cfg_get_link_using_link_enc(
+ struct dc *dc,
+ enum engine_id eng_id)
+{
+ struct dc_link *link = NULL;
+ struct dc_stream_state *stream = NULL;
+
+ stream = link_enc_cfg_get_stream_using_link_enc(dc, eng_id);
+
+ if (stream)
+ link = stream->link;
+ // dm_output_to_console("%s: No link using DIG(%d).\n", __func__, eng_id);
return link;
}
struct link_encoder *link_enc_cfg_get_link_enc_used_by_link(
- struct dc_state *state,
+ struct dc *dc,
const struct dc_link *link)
{
struct link_encoder *link_enc = NULL;
struct display_endpoint_id ep_id;
- int stream_idx = -1;
int i;
ep_id = (struct display_endpoint_id) {
.link_id = link->link_id,
.ep_type = link->ep_type};
- for (i = 0; i < state->stream_count; i++) {
- struct link_enc_assignment assignment = state->res_ctx.link_enc_assignments[i];
-
- if (assignment.valid &&
- assignment.ep_id.link_id.id == ep_id.link_id.id &&
- assignment.ep_id.link_id.enum_id == ep_id.link_id.enum_id &&
- assignment.ep_id.link_id.type == ep_id.link_id.type &&
- assignment.ep_id.ep_type == ep_id.ep_type) {
- stream_idx = i;
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct link_enc_assignment assignment = get_assignment(dc, i);
+
+ if (assignment.valid == true && are_ep_ids_equal(&assignment.ep_id, &ep_id)) {
+ link_enc = link->dc->res_pool->link_encoders[assignment.eng_id - ENGINE_ID_DIGA];
break;
}
}
- if (stream_idx != -1)
- link_enc = state->streams[stream_idx]->link_enc;
-
return link_enc;
}
-struct link_encoder *link_enc_cfg_get_next_avail_link_enc(
- const struct dc *dc,
- const struct dc_state *state)
+struct link_encoder *link_enc_cfg_get_next_avail_link_enc(struct dc *dc)
{
struct link_encoder *link_enc = NULL;
- enum engine_id eng_id;
+ enum engine_id encs_assigned[MAX_DIG_LINK_ENCODERS];
+ int i;
+
+ for (i = 0; i < MAX_DIG_LINK_ENCODERS; i++)
+ encs_assigned[i] = ENGINE_ID_UNKNOWN;
+
+ /* Add assigned encoders to list. */
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct link_enc_assignment assignment = get_assignment(dc, i);
+
+ if (assignment.valid)
+ encs_assigned[assignment.eng_id - ENGINE_ID_DIGA] = assignment.eng_id;
+ }
+
+ for (i = 0; i < dc->res_pool->res_cap->num_dig_link_enc; i++) {
+ if (encs_assigned[i] == ENGINE_ID_UNKNOWN) {
+ link_enc = dc->res_pool->link_encoders[i];
+ break;
+ }
+ }
+
+ return link_enc;
+}
- eng_id = find_first_avail_link_enc(dc->ctx, state);
- if (eng_id != ENGINE_ID_UNKNOWN)
- link_enc = dc->res_pool->link_encoders[eng_id - ENGINE_ID_DIGA];
+struct link_encoder *link_enc_cfg_get_link_enc_used_by_stream(
+ struct dc *dc,
+ const struct dc_stream_state *stream)
+{
+ struct link_encoder *link_enc;
+
+ link_enc = link_enc_cfg_get_link_enc_used_by_link(dc, stream->link);
return link_enc;
}
+
+bool link_enc_cfg_is_link_enc_avail(struct dc *dc, enum engine_id eng_id, struct dc_link *link)
+{
+ bool is_avail = true;
+ int i;
+
+ /* An encoder is not available if it has already been assigned to a different endpoint. */
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct link_enc_assignment assignment = get_assignment(dc, i);
+ struct display_endpoint_id ep_id = (struct display_endpoint_id) {
+ .link_id = link->link_id,
+ .ep_type = link->ep_type};
+
+ if (assignment.valid && assignment.eng_id == eng_id && !are_ep_ids_equal(&ep_id, &assignment.ep_id)) {
+ is_avail = false;
+ break;
+ }
+ }
+
+ return is_avail;
+}
+
+bool link_enc_cfg_validate(struct dc *dc, struct dc_state *state)
+{
+ bool is_valid = false;
+ bool valid_entries = true;
+ bool valid_stream_ptrs = true;
+ bool valid_uniqueness = true;
+ bool valid_avail = true;
+ bool valid_streams = true;
+ int i, j;
+ uint8_t valid_count = 0;
+ uint8_t dig_stream_count = 0;
+ int matching_stream_ptrs = 0;
+ int eng_ids_per_ep_id[MAX_PIPES] = {0};
+
+ /* (1) No. valid entries same as stream count. */
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct link_enc_assignment assignment = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i];
+
+ if (assignment.valid)
+ valid_count++;
+
+ if (is_dig_link_enc_stream(state->streams[i]))
+ dig_stream_count++;
+ }
+ if (valid_count != dig_stream_count)
+ valid_entries = false;
+
+ /* (2) Matching stream ptrs. */
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct link_enc_assignment assignment = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i];
+
+ if (assignment.valid) {
+ if (assignment.stream == state->streams[i])
+ matching_stream_ptrs++;
+ else
+ valid_stream_ptrs = false;
+ }
+ }
+
+ /* (3) Each endpoint assigned unique encoder. */
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct link_enc_assignment assignment_i = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i];
+
+ if (assignment_i.valid) {
+ struct display_endpoint_id ep_id_i = assignment_i.ep_id;
+
+ eng_ids_per_ep_id[i]++;
+ for (j = 0; j < MAX_PIPES; j++) {
+ struct link_enc_assignment assignment_j =
+ state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[j];
+
+ if (j == i)
+ continue;
+
+ if (assignment_j.valid) {
+ struct display_endpoint_id ep_id_j = assignment_j.ep_id;
+
+ if (are_ep_ids_equal(&ep_id_i, &ep_id_j) &&
+ assignment_i.eng_id != assignment_j.eng_id) {
+ valid_uniqueness = false;
+ eng_ids_per_ep_id[i]++;
+ }
+ }
+ }
+ }
+ }
+
+ /* (4) Assigned encoders not in available pool. */
+ for (i = 0; i < MAX_PIPES; i++) {
+ struct link_enc_assignment assignment = state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i];
+
+ if (assignment.valid) {
+ for (j = 0; j < dc->res_pool->res_cap->num_dig_link_enc; j++) {
+ if (state->res_ctx.link_enc_cfg_ctx.link_enc_avail[j] == assignment.eng_id) {
+ valid_avail = false;
+ break;
+ }
+ }
+ }
+ }
+
+ /* (5) All streams have valid link encoders. */
+ for (i = 0; i < state->stream_count; i++) {
+ struct dc_stream_state *stream = state->streams[i];
+
+ if (is_dig_link_enc_stream(stream) && stream->link_enc == NULL) {
+ valid_streams = false;
+ break;
+ }
+ }
+
+ is_valid = valid_entries && valid_stream_ptrs && valid_uniqueness && valid_avail && valid_streams;
+ ASSERT(is_valid);
+
+ return is_valid;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
index 9c51cd09dcf1..368e834c6809 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
@@ -17,6 +17,7 @@
#include "link_enc_cfg.h"
#include "clk_mgr.h"
#include "inc/link_dpcd.h"
+#include "dccg.h"
static uint8_t convert_to_count(uint8_t lttpr_repeater_count)
{
@@ -61,6 +62,13 @@ void dp_receiver_power_ctrl(struct dc_link *link, bool on)
sizeof(state));
}
+void dp_source_sequence_trace(struct dc_link *link, uint8_t dp_test_mode)
+{
+ if (link != NULL && link->dc->debug.enable_driver_sequence_debug)
+ core_link_write_dpcd(link, DP_SOURCE_SEQUENCE,
+ &dp_test_mode, sizeof(dp_test_mode));
+}
+
void dp_enable_link_phy(
struct dc_link *link,
enum signal_type signal,
@@ -79,7 +87,7 @@ void dp_enable_link_phy(
/* Link should always be assigned encoder when en-/disabling. */
if (link->is_dig_mapping_flexible && dc->res_pool->funcs->link_encs_assign)
- link_enc = link_enc_cfg_get_link_enc_used_by_link(link->dc->current_state, link);
+ link_enc = link_enc_cfg_get_link_enc_used_by_link(dc, link);
else
link_enc = link->link_enc;
ASSERT(link_enc);
@@ -111,12 +119,37 @@ void dp_enable_link_phy(
link->cur_link_settings = *link_settings;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (dp_get_link_encoding_format(link_settings) == DP_128b_132b_ENCODING) {
+ /* TODO - DP2.0 HW: notify link rate change here */
+ } else if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING) {
+ if (dc->clk_mgr->funcs->notify_link_rate_change)
+ dc->clk_mgr->funcs->notify_link_rate_change(dc->clk_mgr, link);
+ }
+#else
if (dc->clk_mgr->funcs->notify_link_rate_change)
dc->clk_mgr->funcs->notify_link_rate_change(dc->clk_mgr, link);
-
+#endif
if (dmcu != NULL && dmcu->funcs->lock_phy)
dmcu->funcs->lock_phy(dmcu);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (dp_get_link_encoding_format(link_settings) == DP_128b_132b_ENCODING) {
+ enable_dp_hpo_output(link, link_settings);
+ } else if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING) {
+ if (dc_is_dp_sst_signal(signal)) {
+ link_enc->funcs->enable_dp_output(
+ link_enc,
+ link_settings,
+ clock_source);
+ } else {
+ link_enc->funcs->enable_dp_mst_output(
+ link_enc,
+ link_settings,
+ clock_source);
+ }
+ }
+#else
if (dc_is_dp_sst_signal(signal)) {
link_enc->funcs->enable_dp_output(
link_enc,
@@ -128,10 +161,11 @@ void dp_enable_link_phy(
link_settings,
clock_source);
}
-
+#endif
if (dmcu != NULL && dmcu->funcs->unlock_phy)
dmcu->funcs->unlock_phy(dmcu);
+ dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_LINK_PHY);
dp_receiver_power_ctrl(link, true);
}
@@ -206,11 +240,14 @@ void dp_disable_link_phy(struct dc_link *link, enum signal_type signal)
{
struct dc *dc = link->ctx->dc;
struct dmcu *dmcu = dc->res_pool->dmcu;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ struct hpo_dp_link_encoder *hpo_link_enc = link->hpo_dp_link_enc;
+#endif
struct link_encoder *link_enc;
/* Link should always be assigned encoder when en-/disabling. */
if (link->is_dig_mapping_flexible && dc->res_pool->funcs->link_encs_assign)
- link_enc = link_enc_cfg_get_link_enc_used_by_link(link->dc->current_state, link);
+ link_enc = link_enc_cfg_get_link_enc_used_by_link(dc, link);
else
link_enc = link->link_enc;
ASSERT(link_enc);
@@ -221,18 +258,34 @@ void dp_disable_link_phy(struct dc_link *link, enum signal_type signal)
if (signal == SIGNAL_TYPE_EDP) {
if (link->dc->hwss.edp_backlight_control)
link->dc->hwss.edp_backlight_control(link, false);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (dp_get_link_encoding_format(&link->cur_link_settings) == DP_128b_132b_ENCODING)
+ disable_dp_hpo_output(link, signal);
+ else
+ link_enc->funcs->disable_output(link_enc, signal);
+#else
link_enc->funcs->disable_output(link_enc, signal);
+#endif
link->dc->hwss.edp_power_control(link, false);
} else {
if (dmcu != NULL && dmcu->funcs->lock_phy)
dmcu->funcs->lock_phy(dmcu);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (dp_get_link_encoding_format(&link->cur_link_settings) == DP_128b_132b_ENCODING &&
+ hpo_link_enc)
+ disable_dp_hpo_output(link, signal);
+ else
+ link_enc->funcs->disable_output(link_enc, signal);
+#else
link_enc->funcs->disable_output(link_enc, signal);
-
+#endif
if (dmcu != NULL && dmcu->funcs->unlock_phy)
dmcu->funcs->unlock_phy(dmcu);
}
+ dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY);
+
/* Clear current link setting.*/
memset(&link->cur_link_settings, 0,
sizeof(link->cur_link_settings));
@@ -273,6 +326,14 @@ bool dp_set_hw_training_pattern(
case DP_TRAINING_PATTERN_SEQUENCE_4:
test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN4;
break;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ case DP_128b_132b_TPS1:
+ test_pattern = DP_TEST_PATTERN_128b_132b_TPS1_TRAINING_MODE;
+ break;
+ case DP_128b_132b_TPS2:
+ test_pattern = DP_TEST_PATTERN_128b_132b_TPS2_TRAINING_MODE;
+ break;
+#endif
default:
break;
}
@@ -282,6 +343,10 @@ bool dp_set_hw_training_pattern(
return true;
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+#define DC_LOGGER \
+ link->ctx->logger
+#endif
void dp_set_hw_lane_settings(
struct dc_link *link,
const struct link_training_settings *link_settings,
@@ -293,7 +358,23 @@ void dp_set_hw_lane_settings(
return;
/* call Encoder to set lane settings */
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (dp_get_link_encoding_format(&link_settings->link_settings) ==
+ DP_128b_132b_ENCODING) {
+ link->hpo_dp_link_enc->funcs->set_ffe(
+ link->hpo_dp_link_enc,
+ &link_settings->link_settings,
+ link_settings->lane_settings[0].FFE_PRESET.raw);
+ } else if (dp_get_link_encoding_format(&link_settings->link_settings)
+ == DP_8b_10b_ENCODING) {
+ encoder->funcs->dp_set_lane_settings(encoder, link_settings);
+ }
+#else
encoder->funcs->dp_set_lane_settings(encoder, link_settings);
+#endif
+ memmove(link->cur_lane_setting,
+ link_settings->lane_settings,
+ sizeof(link->cur_lane_setting));
}
void dp_set_hw_test_pattern(
@@ -304,13 +385,16 @@ void dp_set_hw_test_pattern(
{
struct encoder_set_dp_phy_pattern_param pattern_param = {0};
struct link_encoder *encoder;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ enum dp_link_encoding link_encoding_format = dp_get_link_encoding_format(&link->cur_link_settings);
+#endif
/* Access link encoder based on whether it is statically
* or dynamically assigned to a link.
*/
if (link->is_dig_mapping_flexible &&
link->dc->res_pool->funcs->link_encs_assign)
- encoder = link_enc_cfg_get_link_enc_used_by_link(link->dc->current_state, link);
+ encoder = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link);
else
encoder = link->link_enc;
@@ -319,8 +403,28 @@ void dp_set_hw_test_pattern(
pattern_param.custom_pattern_size = custom_pattern_size;
pattern_param.dp_panel_mode = dp_get_panel_mode(link);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ switch (link_encoding_format) {
+ case DP_128b_132b_ENCODING:
+ link->hpo_dp_link_enc->funcs->set_link_test_pattern(
+ link->hpo_dp_link_enc, &pattern_param);
+ break;
+ case DP_8b_10b_ENCODING:
+ ASSERT(encoder);
+ encoder->funcs->dp_set_phy_pattern(encoder, &pattern_param);
+ break;
+ default:
+ DC_LOG_ERROR("%s: Unknown link encoding format.", __func__);
+ break;
+ }
+#else
encoder->funcs->dp_set_phy_pattern(encoder, &pattern_param);
+#endif
+ dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_SET_SOURCE_PATTERN);
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+#undef DC_LOGGER
+#endif
void dp_retrain_link_dp_test(struct dc_link *link,
struct dc_link_settings *link_setting,
@@ -338,7 +442,7 @@ void dp_retrain_link_dp_test(struct dc_link *link,
pipes[i].stream->link == link) {
udelay(100);
- pipes[i].stream_res.stream_enc->funcs->dp_blank(
+ pipes[i].stream_res.stream_enc->funcs->dp_blank(link,
pipes[i].stream_res.stream_enc);
/* disable any test pattern that might be active */
@@ -351,9 +455,10 @@ void dp_retrain_link_dp_test(struct dc_link *link,
if ((&pipes[i])->stream_res.audio && !link->dc->debug.az_endpoint_mute_only)
(&pipes[i])->stream_res.audio->funcs->az_disable((&pipes[i])->stream_res.audio);
- link->link_enc->funcs->disable_output(
- link->link_enc,
- SIGNAL_TYPE_DISPLAY_PORT);
+ if (link->link_enc)
+ link->link_enc->funcs->disable_output(
+ link->link_enc,
+ SIGNAL_TYPE_DISPLAY_PORT);
/* Clear current link setting. */
memset(&link->cur_link_settings, 0,
@@ -468,7 +573,12 @@ void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
optc_dsc_mode = dsc_optc_cfg.is_pixel_format_444 ? OPTC_DSC_ENABLED_444 : OPTC_DSC_ENABLED_NATIVE_SUBSAMPLED;
/* Enable DSC in encoder */
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)
+ && !is_dp_128b_132b_signal(pipe_ctx)) {
+#else
if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+#endif
DC_LOG_DSC("Setting stream encoder DSC config for engine %d:", (int)pipe_ctx->stream_res.stream_enc->id);
dsc_optc_config_log(dsc, &dsc_optc_cfg);
pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config(pipe_ctx->stream_res.stream_enc,
@@ -495,13 +605,22 @@ void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
/* disable DSC in stream encoder */
if (dc_is_dp_signal(stream->signal)) {
- if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
- pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config(
- pipe_ctx->stream_res.stream_enc,
- OPTC_DSC_DISABLED, 0, 0);
- pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet(
- pipe_ctx->stream_res.stream_enc, false, NULL);
- }
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (is_dp_128b_132b_signal(pipe_ctx))
+ pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_set_dsc_pps_info_packet(
+ pipe_ctx->stream_res.hpo_dp_stream_enc,
+ false,
+ NULL,
+ true);
+ else
+#endif
+ if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+ pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config(
+ pipe_ctx->stream_res.stream_enc,
+ OPTC_DSC_DISABLED, 0, 0);
+ pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet(
+ pipe_ctx->stream_res.stream_enc, false, NULL, true);
+ }
}
/* disable DSC block */
@@ -535,7 +654,16 @@ out:
return result;
}
-bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable)
+/*
+ * For dynamic bpp change case, dsc is programmed with MASTER_UPDATE_LOCK enabled;
+ * hence PPS info packet update need to use frame update instead of immediate update.
+ * Added parameter immediate_update for this purpose.
+ * The decision to use frame update is hard-coded in function dp_update_dsc_config(),
+ * which is the only place where a "false" would be passed in for param immediate_update.
+ *
+ * immediate_update is only applicable when DSC is enabled.
+ */
+bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable, bool immediate_update)
{
struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc;
struct dc_stream_state *stream = pipe_ctx->stream;
@@ -562,16 +690,35 @@ bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable)
dsc->funcs->dsc_get_packed_pps(dsc, &dsc_cfg, &dsc_packed_pps[0]);
if (dc_is_dp_signal(stream->signal)) {
DC_LOG_DSC("Setting stream encoder DSC PPS SDP for engine %d\n", (int)pipe_ctx->stream_res.stream_enc->id);
- pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet(
- pipe_ctx->stream_res.stream_enc,
- true,
- &dsc_packed_pps[0]);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (is_dp_128b_132b_signal(pipe_ctx))
+ pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_set_dsc_pps_info_packet(
+ pipe_ctx->stream_res.hpo_dp_stream_enc,
+ true,
+ &dsc_packed_pps[0],
+ immediate_update);
+ else
+#endif
+ pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet(
+ pipe_ctx->stream_res.stream_enc,
+ true,
+ &dsc_packed_pps[0],
+ immediate_update);
}
} else {
/* disable DSC PPS in stream encoder */
if (dc_is_dp_signal(stream->signal)) {
- pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet(
- pipe_ctx->stream_res.stream_enc, false, NULL);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (is_dp_128b_132b_signal(pipe_ctx))
+ pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_set_dsc_pps_info_packet(
+ pipe_ctx->stream_res.hpo_dp_stream_enc,
+ false,
+ NULL,
+ true);
+ else
+#endif
+ pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet(
+ pipe_ctx->stream_res.stream_enc, false, NULL, true);
}
}
@@ -589,7 +736,171 @@ bool dp_update_dsc_config(struct pipe_ctx *pipe_ctx)
return false;
dp_set_dsc_on_stream(pipe_ctx, true);
- dp_set_dsc_pps_sdp(pipe_ctx, true);
+ dp_set_dsc_pps_sdp(pipe_ctx, true, false);
return true;
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+#undef DC_LOGGER
+#define DC_LOGGER \
+ link->ctx->logger
+
+static enum phyd32clk_clock_source get_phyd32clk_src(struct dc_link *link)
+{
+ switch (link->link_enc->transmitter) {
+ case TRANSMITTER_UNIPHY_A:
+ return PHYD32CLKA;
+ case TRANSMITTER_UNIPHY_B:
+ return PHYD32CLKB;
+ case TRANSMITTER_UNIPHY_C:
+ return PHYD32CLKC;
+ case TRANSMITTER_UNIPHY_D:
+ return PHYD32CLKD;
+ case TRANSMITTER_UNIPHY_E:
+ return PHYD32CLKE;
+ default:
+ return PHYD32CLKA;
+ }
+}
+
+void enable_dp_hpo_output(struct dc_link *link, const struct dc_link_settings *link_settings)
+{
+ const struct dc *dc = link->dc;
+ enum phyd32clk_clock_source phyd32clk;
+
+ /* Enable PHY PLL at target bit rate
+ * UHBR10 = 10Gbps (SYMCLK32 = 312.5MHz)
+ * UBR13.5 = 13.5Gbps (SYMCLK32 = 421.875MHz)
+ * UHBR20 = 20Gbps (SYMCLK32 = 625MHz)
+ */
+ if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+ switch (link_settings->link_rate) {
+ case LINK_RATE_UHBR10:
+ dm_set_phyd32clk(dc->ctx, 312500);
+ break;
+ case LINK_RATE_UHBR13_5:
+ dm_set_phyd32clk(dc->ctx, 412875);
+ break;
+ case LINK_RATE_UHBR20:
+ dm_set_phyd32clk(dc->ctx, 625000);
+ break;
+ default:
+ return;
+ }
+ } else {
+ /* DP2.0 HW: call transmitter control to enable PHY */
+ link->hpo_dp_link_enc->funcs->enable_link_phy(
+ link->hpo_dp_link_enc,
+ link_settings,
+ link->link_enc->transmitter);
+ }
+
+ /* DCCG muxing and DTBCLK DTO */
+ if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+ dc->res_pool->dccg->funcs->set_physymclk(
+ dc->res_pool->dccg,
+ link->link_enc_hw_inst,
+ PHYSYMCLK_FORCE_SRC_PHYD32CLK,
+ true);
+
+ phyd32clk = get_phyd32clk_src(link);
+ dc->res_pool->dccg->funcs->enable_symclk32_le(
+ dc->res_pool->dccg,
+ link->hpo_dp_link_enc->inst,
+ phyd32clk);
+ link->hpo_dp_link_enc->funcs->link_enable(
+ link->hpo_dp_link_enc,
+ link_settings->lane_count);
+ }
+}
+
+void disable_dp_hpo_output(struct dc_link *link, enum signal_type signal)
+{
+ const struct dc *dc = link->dc;
+
+ link->hpo_dp_link_enc->funcs->link_disable(link->hpo_dp_link_enc);
+
+ if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+ dc->res_pool->dccg->funcs->disable_symclk32_le(
+ dc->res_pool->dccg,
+ link->hpo_dp_link_enc->inst);
+
+ dc->res_pool->dccg->funcs->set_physymclk(
+ dc->res_pool->dccg,
+ link->link_enc_hw_inst,
+ PHYSYMCLK_FORCE_SRC_SYMCLK,
+ false);
+
+ dm_set_phyd32clk(dc->ctx, 0);
+ } else {
+ /* DP2.0 HW: call transmitter control to disable PHY */
+ link->hpo_dp_link_enc->funcs->disable_link_phy(
+ link->hpo_dp_link_enc,
+ signal);
+ }
+}
+
+void setup_dp_hpo_stream(struct pipe_ctx *pipe_ctx, bool enable)
+{
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc *dc = pipe_ctx->stream->ctx->dc;
+ struct pipe_ctx *odm_pipe;
+ int odm_combine_num_segments = 1;
+ enum phyd32clk_clock_source phyd32clk;
+
+ if (enable) {
+ for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
+ odm_combine_num_segments++;
+
+ dc->res_pool->dccg->funcs->set_dpstreamclk(
+ dc->res_pool->dccg,
+ DTBCLK0,
+ pipe_ctx->stream_res.tg->inst);
+
+ phyd32clk = get_phyd32clk_src(stream->link);
+ dc->res_pool->dccg->funcs->enable_symclk32_se(
+ dc->res_pool->dccg,
+ pipe_ctx->stream_res.hpo_dp_stream_enc->inst,
+ phyd32clk);
+
+ dc->res_pool->dccg->funcs->set_dtbclk_dto(
+ dc->res_pool->dccg,
+ pipe_ctx->stream_res.tg->inst,
+ stream->phy_pix_clk,
+ odm_combine_num_segments,
+ &stream->timing);
+ } else {
+ dc->res_pool->dccg->funcs->set_dtbclk_dto(
+ dc->res_pool->dccg,
+ pipe_ctx->stream_res.tg->inst,
+ 0,
+ 0,
+ &stream->timing);
+ dc->res_pool->dccg->funcs->disable_symclk32_se(
+ dc->res_pool->dccg,
+ pipe_ctx->stream_res.hpo_dp_stream_enc->inst);
+ dc->res_pool->dccg->funcs->set_dpstreamclk(
+ dc->res_pool->dccg,
+ REFCLK,
+ pipe_ctx->stream_res.tg->inst);
+ }
+}
+
+void reset_dp_hpo_stream_encoders_for_link(struct dc_link *link)
+{
+ const struct dc *dc = link->dc;
+ struct dc_state *state = dc->current_state;
+ uint8_t i;
+
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (state->res_ctx.pipe_ctx[i].stream_res.hpo_dp_stream_enc &&
+ state->res_ctx.pipe_ctx[i].stream &&
+ state->res_ctx.pipe_ctx[i].stream->link == link &&
+ !state->res_ctx.pipe_ctx[i].stream->dpms_off) {
+ setup_dp_hpo_stream(&state->res_ctx.pipe_ctx[i], false);
+ }
+ }
+}
+
+#undef DC_LOGGER
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index a60396d5be44..c32fdccd4d92 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -41,6 +41,8 @@
#include "set_mode_types.h"
#include "virtual/virtual_stream_encoder.h"
#include "dpcd_defs.h"
+#include "link_enc_cfg.h"
+#include "dc_link_dp.h"
#if defined(CONFIG_DRM_AMD_DC_SI)
#include "dce60/dce60_resource.h"
@@ -54,6 +56,7 @@
#include "dcn10/dcn10_resource.h"
#include "dcn20/dcn20_resource.h"
#include "dcn21/dcn21_resource.h"
+#include "dcn201/dcn201_resource.h"
#include "dcn30/dcn30_resource.h"
#include "dcn301/dcn301_resource.h"
#include "dcn302/dcn302_resource.h"
@@ -128,6 +131,10 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
case FAMILY_NV:
dc_version = DCN_VERSION_2_0;
+ if (asic_id.chip_id == DEVICE_ID_NV_13FE) {
+ dc_version = DCN_VERSION_2_01;
+ break;
+ }
if (ASICREV_IS_SIENNA_CICHLID_P(asic_id.hw_internal_rev))
dc_version = DCN_VERSION_3_0;
if (ASICREV_IS_DIMGREY_CAVEFISH_P(asic_id.hw_internal_rev))
@@ -217,6 +224,9 @@ struct resource_pool *dc_create_resource_pool(struct dc *dc,
case DCN_VERSION_2_1:
res_pool = dcn21_create_resource_pool(init_data, dc);
break;
+ case DCN_VERSION_2_01:
+ res_pool = dcn201_create_resource_pool(init_data, dc);
+ break;
case DCN_VERSION_3_0:
res_pool = dcn30_create_resource_pool(init_data, dc);
break;
@@ -347,6 +357,29 @@ bool resource_construct(
}
#if defined(CONFIG_DRM_AMD_DC_DCN)
+ pool->hpo_dp_stream_enc_count = 0;
+ if (create_funcs->create_hpo_dp_stream_encoder) {
+ for (i = 0; i < caps->num_hpo_dp_stream_encoder; i++) {
+ pool->hpo_dp_stream_enc[i] = create_funcs->create_hpo_dp_stream_encoder(i+ENGINE_ID_HPO_DP_0, ctx);
+ if (pool->hpo_dp_stream_enc[i] == NULL)
+ DC_ERR("DC: failed to create HPO DP stream encoder!\n");
+ pool->hpo_dp_stream_enc_count++;
+
+ }
+ }
+
+ pool->hpo_dp_link_enc_count = 0;
+ if (create_funcs->create_hpo_dp_link_encoder) {
+ for (i = 0; i < caps->num_hpo_dp_link_encoder; i++) {
+ pool->hpo_dp_link_enc[i] = create_funcs->create_hpo_dp_link_encoder(i, ctx);
+ if (pool->hpo_dp_link_enc[i] == NULL)
+ DC_ERR("DC: failed to create HPO DP link encoder!\n");
+ pool->hpo_dp_link_enc_count++;
+ }
+ }
+#endif
+
+#if defined(CONFIG_DRM_AMD_DC_DCN)
for (i = 0; i < caps->num_mpc_3dlut; i++) {
pool->mpc_lut[i] = dc_create_3dlut_func();
if (pool->mpc_lut[i] == NULL)
@@ -1122,9 +1155,17 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_res.scl_data.recout.x += pipe_ctx->plane_res.scl_data.recout.width;
}
- if (pipe_ctx->plane_res.scl_data.viewport.height < MIN_VIEWPORT_SIZE ||
- pipe_ctx->plane_res.scl_data.viewport.width < MIN_VIEWPORT_SIZE)
- res = false;
+ if (!pipe_ctx->stream->ctx->dc->config.enable_windowed_mpo_odm) {
+ if (pipe_ctx->plane_res.scl_data.viewport.height < MIN_VIEWPORT_SIZE ||
+ pipe_ctx->plane_res.scl_data.viewport.width < MIN_VIEWPORT_SIZE)
+ res = false;
+ } else {
+ /* Clamp minimum viewport size */
+ if (pipe_ctx->plane_res.scl_data.viewport.height < MIN_VIEWPORT_SIZE)
+ pipe_ctx->plane_res.scl_data.viewport.height = MIN_VIEWPORT_SIZE;
+ if (pipe_ctx->plane_res.scl_data.viewport.width < MIN_VIEWPORT_SIZE)
+ pipe_ctx->plane_res.scl_data.viewport.width = MIN_VIEWPORT_SIZE;
+ }
DC_LOG_SCALER("%s pipe %d:\nViewport: height:%d width:%d x:%d y:%d Recout: height:%d width:%d x:%d y:%d HACTIVE:%d VACTIVE:%d\n"
"src_rect: height:%d width:%d x:%d y:%d dst_rect: height:%d width:%d x:%d y:%d clip_rect: height:%d width:%d x:%d y:%d\n",
@@ -1665,6 +1706,22 @@ static void update_stream_engine_usage(
}
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+static void update_hpo_dp_stream_engine_usage(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct hpo_dp_stream_encoder *hpo_dp_stream_enc,
+ bool acquired)
+{
+ int i;
+
+ for (i = 0; i < pool->hpo_dp_stream_enc_count; i++) {
+ if (pool->hpo_dp_stream_enc[i] == hpo_dp_stream_enc)
+ res_ctx->is_hpo_dp_stream_enc_acquired[i] = acquired;
+ }
+}
+#endif
+
/* TODO: release audio object */
void update_audio_usage(
struct resource_context *res_ctx,
@@ -1709,6 +1766,26 @@ static int acquire_first_free_pipe(
return -1;
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+static struct hpo_dp_stream_encoder *find_first_free_match_hpo_dp_stream_enc_for_link(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct dc_stream_state *stream)
+{
+ int i;
+
+ for (i = 0; i < pool->hpo_dp_stream_enc_count; i++) {
+ if (!res_ctx->is_hpo_dp_stream_enc_acquired[i] &&
+ pool->hpo_dp_stream_enc[i]) {
+
+ return pool->hpo_dp_stream_enc[i];
+ }
+ }
+
+ return NULL;
+}
+#endif
+
static struct audio *find_first_free_audio(
struct resource_context *res_ctx,
const struct resource_pool *pool,
@@ -1799,6 +1876,15 @@ enum dc_status dc_remove_stream_from_ctx(
if (dc->res_pool->funcs->link_enc_unassign)
dc->res_pool->funcs->link_enc_unassign(new_ctx, del_pipe->stream);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (is_dp_128b_132b_signal(del_pipe)) {
+ update_hpo_dp_stream_engine_usage(
+ &new_ctx->res_ctx, dc->res_pool,
+ del_pipe->stream_res.hpo_dp_stream_enc,
+ false);
+ }
+#endif
+
if (del_pipe->stream_res.audio)
update_audio_usage(
&new_ctx->res_ctx,
@@ -2051,6 +2137,31 @@ enum dc_status resource_map_pool_resources(
pipe_ctx->stream_res.stream_enc,
true);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ /* Allocate DP HPO Stream Encoder based on signal, hw capabilities
+ * and link settings
+ */
+ if (dc_is_dp_signal(stream->signal) &&
+ dc->caps.dp_hpo) {
+ struct dc_link_settings link_settings = {0};
+
+ decide_link_settings(stream, &link_settings);
+ if (dp_get_link_encoding_format(&link_settings) == DP_128b_132b_ENCODING) {
+ pipe_ctx->stream_res.hpo_dp_stream_enc =
+ find_first_free_match_hpo_dp_stream_enc_for_link(
+ &context->res_ctx, pool, stream);
+
+ if (!pipe_ctx->stream_res.hpo_dp_stream_enc)
+ return DC_NO_STREAM_ENC_RESOURCE;
+
+ update_hpo_dp_stream_engine_usage(
+ &context->res_ctx, pool,
+ pipe_ctx->stream_res.hpo_dp_stream_enc,
+ true);
+ }
+ }
+#endif
+
/* TODO: Add check if ASIC support and EDID audio */
if (!stream->converter_disable_audio &&
dc_is_audio_capable_signal(pipe_ctx->stream->signal) &&
@@ -2147,7 +2258,7 @@ enum dc_status dc_validate_global_state(
* Update link encoder to stream assignment.
* TODO: Split out reason allocation from validation.
*/
- if (dc->res_pool->funcs->link_encs_assign)
+ if (dc->res_pool->funcs->link_encs_assign && fast_validate == false)
dc->res_pool->funcs->link_encs_assign(
dc, new_ctx, new_ctx->streams, new_ctx->stream_count);
#endif
@@ -2726,9 +2837,24 @@ bool pipe_need_reprogram(
if (pipe_ctx_old->stream_res.dsc != pipe_ctx->stream_res.dsc)
return true;
- /* DIG link encoder resource assignment for stream changed. */
- if (pipe_ctx_old->stream->link_enc != pipe_ctx->stream->link_enc)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (pipe_ctx_old->stream_res.hpo_dp_stream_enc != pipe_ctx->stream_res.hpo_dp_stream_enc)
return true;
+#endif
+
+ /* DIG link encoder resource assignment for stream changed. */
+ if (pipe_ctx_old->stream->ctx->dc->res_pool->funcs->link_encs_assign) {
+ bool need_reprogram = false;
+ struct dc *dc = pipe_ctx_old->stream->ctx->dc;
+ enum link_enc_cfg_mode mode = dc->current_state->res_ctx.link_enc_cfg_ctx.mode;
+
+ dc->current_state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_STEADY;
+ if (link_enc_cfg_get_link_enc_used_by_stream(dc, pipe_ctx_old->stream) != pipe_ctx->stream->link_enc)
+ need_reprogram = true;
+ dc->current_state->res_ctx.link_enc_cfg_ctx.mode = mode;
+
+ return need_reprogram;
+ }
return false;
}
@@ -2871,7 +2997,8 @@ enum dc_status dc_validate_stream(struct dc *dc, struct dc_stream_state *stream)
res = DC_FAIL_CONTROLLER_VALIDATE;
if (res == DC_OK) {
- if (!link->link_enc->funcs->validate_output_with_stream(
+ if (link->ep_type == DISPLAY_ENDPOINT_PHY &&
+ !link->link_enc->funcs->validate_output_with_stream(
link->link_enc, stream))
res = DC_FAIL_ENC_VALIDATE;
}
@@ -2890,6 +3017,11 @@ enum dc_status dc_validate_plane(struct dc *dc, const struct dc_plane_state *pla
{
enum dc_status res = DC_OK;
+ /* check if surface has invalid dimensions */
+ if (plane_state->src_rect.width == 0 || plane_state->src_rect.height == 0 ||
+ plane_state->dst_rect.width == 0 || plane_state->dst_rect.height == 0)
+ return DC_FAIL_SURFACE_VALIDATE;
+
/* TODO For now validates pixel format only */
if (dc->res_pool->funcs->validate_plane)
return dc->res_pool->funcs->validate_plane(plane_state, &dc->caps);
@@ -2975,3 +3107,22 @@ void get_audio_check(struct audio_info *aud_modes,
}
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+struct hpo_dp_link_encoder *resource_get_unused_hpo_dp_link_encoder(
+ const struct resource_pool *pool)
+{
+ uint8_t i;
+ struct hpo_dp_link_encoder *enc = NULL;
+
+ ASSERT(pool->hpo_dp_link_enc_count <= MAX_HPO_DP2_LINK_ENCODERS);
+
+ for (i = 0; i < pool->hpo_dp_link_enc_count; i++) {
+ if (pool->hpo_dp_link_enc[i]->transmitter == TRANSMITTER_UNKNOWN) {
+ enc = pool->hpo_dp_link_enc[i];
+ break;
+ }
+ }
+
+ return enc;
+}
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stat.c b/drivers/gpu/drm/amd/display/dc/core/dc_stat.c
index 28ef9760fa34..4b372aa52801 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stat.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stat.c
@@ -61,6 +61,14 @@ void dc_stat_get_dmub_notification(const struct dc *dc, struct dmub_notification
status = dmub_srv_stat_get_notification(dmub, notify);
ASSERT(status == DMUB_STATUS_OK);
+
+ /* For HPD/HPD RX, convert dpia port index into link index */
+ if (notify->type == DMUB_NOTIFICATION_HPD ||
+ notify->type == DMUB_NOTIFICATION_HPD_IRQ ||
+ notify->type == DMUB_NOTIFICATION_SET_CONFIG_REPLY) {
+ notify->link_index =
+ get_link_index_from_dpia_port_index(dc, notify->link_index);
+ }
}
/**
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index f0f54f4d3d9b..57cf4cb82370 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -202,6 +202,10 @@ struct dc_stream_state *dc_copy_stream(const struct dc_stream_state *stream)
new_stream->stream_id = new_stream->ctx->dc_stream_id_count;
new_stream->ctx->dc_stream_id_count++;
+ /* If using dynamic encoder assignment, wait till stream committed to assign encoder. */
+ if (new_stream->ctx->dc->res_pool->funcs->link_encs_assign)
+ new_stream->link_enc = NULL;
+
kref_init(&new_stream->refcount);
return new_stream;
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 3ab52d9a82cf..a5339796902a 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -44,8 +44,10 @@
/* forward declaration */
struct aux_payload;
+struct set_config_cmd_payload;
+struct dmub_notification;
-#define DC_VER "3.2.149"
+#define DC_VER "3.2.159"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@@ -183,6 +185,9 @@ struct dc_caps {
unsigned int cursor_cache_size;
struct dc_plane_cap planes[MAX_PLANES];
struct dc_color_caps color;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ bool dp_hpo;
+#endif
bool vbios_lttpr_aware;
bool vbios_lttpr_enable;
};
@@ -206,12 +211,12 @@ struct dc_dcc_setting {
unsigned int max_uncompressed_blk_size;
bool independent_64b_blks;
#if defined(CONFIG_DRM_AMD_DC_DCN)
- //These bitfields to be used starting with DCN 3.0
+ //These bitfields to be used starting with DCN
struct {
- uint32_t dcc_256_64_64 : 1;//available in ASICs before DCN 3.0 (the worst compression case)
- uint32_t dcc_128_128_uncontrained : 1; //available in ASICs before DCN 3.0
- uint32_t dcc_256_128_128 : 1; //available starting with DCN 3.0
- uint32_t dcc_256_256_unconstrained : 1; //available in ASICs before DCN 3.0 (the best compression case)
+ uint32_t dcc_256_64_64 : 1;//available in ASICs before DCN (the worst compression case)
+ uint32_t dcc_128_128_uncontrained : 1; //available in ASICs before DCN
+ uint32_t dcc_256_128_128 : 1; //available starting with DCN
+ uint32_t dcc_256_256_unconstrained : 1; //available in ASICs before DCN (the best compression case)
} dcc_controls;
#endif
};
@@ -289,7 +294,15 @@ struct dc_cap_funcs {
struct link_training_settings;
-
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+union allow_lttpr_non_transparent_mode {
+ struct {
+ bool DP1_4A : 1;
+ bool DP2_0 : 1;
+ } bits;
+ unsigned char raw;
+};
+#endif
/* Structure to hold configuration flags set by dm at dc creation. */
struct dc_config {
bool gpu_vm_support;
@@ -302,10 +315,15 @@ struct dc_config {
bool edp_no_power_sequencing;
bool force_enum_edp;
bool forced_clocks;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ union allow_lttpr_non_transparent_mode allow_lttpr_non_transparent_mode;
+#else
bool allow_lttpr_non_transparent_mode;
+#endif
bool multi_mon_pp_mclk_switch;
bool disable_dmcu;
bool enable_4to1MPC;
+ bool enable_windowed_mpo_odm;
bool allow_edp_hotplug_detection;
#if defined(CONFIG_DRM_AMD_DC_DCN)
bool clamp_min_dcfclk;
@@ -325,6 +343,12 @@ enum visual_confirm {
VISUAL_CONFIRM_SWIZZLE = 9,
};
+enum dc_psr_power_opts {
+ psr_power_opt_invalid = 0x0,
+ psr_power_opt_smu_opt_static_screen = 0x1,
+ psr_power_opt_z10_static_screen = 0x10,
+};
+
enum dcc_option {
DCC_ENABLE = 0,
DCC_DISABLE = 1,
@@ -456,10 +480,39 @@ union mem_low_power_enable_options {
bool cm: 1;
bool mpc: 1;
bool optc: 1;
+ bool vpg: 1;
+ bool afmt: 1;
+ } bits;
+ uint32_t u32All;
+};
+
+union root_clock_optimization_options {
+ struct {
+ bool dpp: 1;
+ bool dsc: 1;
+ bool hdmistream: 1;
+ bool hdmichar: 1;
+ bool dpstream: 1;
+ bool symclk32_se: 1;
+ bool symclk32_le: 1;
+ bool symclk_fe: 1;
+ bool physymclk: 1;
+ bool dpiasymclk: 1;
+ uint32_t reserved: 22;
} bits;
uint32_t u32All;
};
+union dpia_debug_options {
+ struct {
+ uint32_t disable_dpia:1;
+ uint32_t force_non_lttpr:1;
+ uint32_t extend_aux_rd_interval:1;
+ uint32_t reserved:29;
+ } bits;
+ uint32_t raw;
+};
+
struct dc_debug_data {
uint32_t ltFailCount;
uint32_t i2cErrorCount;
@@ -548,6 +601,7 @@ struct dc_debug_options {
enum wm_report_mode pplib_wm_report_mode;
unsigned int min_disp_clk_khz;
unsigned int min_dpp_clk_khz;
+ unsigned int min_dram_clk_khz;
int sr_exit_time_dpm0_ns;
int sr_enter_plus_exit_time_dpm0_ns;
int sr_exit_time_ns;
@@ -614,19 +668,25 @@ struct dc_debug_options {
bool enable_dmcub_surface_flip;
bool usbc_combo_phy_reset_wa;
bool enable_dram_clock_change_one_display_vactive;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ /* TODO - remove once tested */
+ bool legacy_dp2_lt;
+ bool set_mst_en_for_sst;
+#endif
union mem_low_power_enable_options enable_mem_low_power;
+ union root_clock_optimization_options root_clock_optimization;
bool force_vblank_alignment;
/* Enable dmub aux for legacy ddc */
bool enable_dmub_aux_for_legacy_ddc;
bool optimize_edp_link_rate; /* eDP ILR */
- /* force enable edp FEC */
- bool force_enable_edp_fec;
/* FEC/PSR1 sequence enable delay in 100us */
uint8_t fec_enable_delay_in100us;
+ bool enable_driver_sequence_debug;
#if defined(CONFIG_DRM_AMD_DC_DCN)
bool disable_z10;
bool enable_sw_cntl_psr;
+ union dpia_debug_options dpia_debug;
#endif
};
@@ -672,6 +732,9 @@ struct dc {
#if defined(CONFIG_DRM_AMD_DC_DCN)
bool idle_optimizations_allowed;
#endif
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ bool enable_c20_dtm_b0;
+#endif
/* Require to maintain clocks and bandwidth for UEFI enabled HW */
@@ -878,6 +941,7 @@ union surface_update_flags {
uint32_t bandwidth_change:1;
uint32_t clock_change:1;
uint32_t stereo_format_change:1;
+ uint32_t lut_3d:1;
uint32_t full_update:1;
} bits;
@@ -1145,7 +1209,14 @@ struct dpcd_caps {
struct dpcd_dsc_capabilities dsc_caps;
struct dc_lttpr_caps lttpr_caps;
struct psr_caps psr_caps;
+ struct dpcd_usb4_dp_tunneling_info usb4_dp_tun_info;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ union dp_128b_132b_supported_link_rates dp_128b_132b_supported_link_rates;
+ union dp_main_line_channel_coding_cap channel_coding_cap;
+ union dp_sink_video_fallback_formats fallback_formats;
+ union dp_fec_capability1 fec_cap1;
+#endif
};
union dpcd_sink_ext_caps {
@@ -1287,6 +1358,8 @@ void dc_interrupt_ack(struct dc *dc, enum dc_irq_source src);
enum dc_irq_source dc_get_hpd_irq_source_at_index(
struct dc *dc, uint32_t link_index);
+void dc_notify_vsync_int_state(struct dc *dc, struct dc_stream_state *stream, bool enable);
+
/*******************************************************************************
* Power Interfaces
******************************************************************************/
@@ -1337,7 +1410,7 @@ void dc_hardware_release(struct dc *dc);
bool dc_set_psr_allow_active(struct dc *dc, bool enable);
#if defined(CONFIG_DRM_AMD_DC_DCN)
-void dc_z10_restore(struct dc *dc);
+void dc_z10_restore(const struct dc *dc);
void dc_z10_save_init(struct dc *dc);
#endif
@@ -1347,6 +1420,20 @@ bool dc_process_dmub_aux_transfer_async(struct dc *dc,
uint32_t link_index,
struct aux_payload *payload);
+/* Get dc link index from dpia port index */
+uint8_t get_link_index_from_dpia_port_index(const struct dc *dc,
+ uint8_t dpia_port_index);
+
+bool dc_process_dmub_set_config_async(struct dc *dc,
+ uint32_t link_index,
+ struct set_config_cmd_payload *payload,
+ struct dmub_notification *notify);
+
+enum dc_status dc_process_dmub_set_mst_slots(const struct dc *dc,
+ uint32_t link_index,
+ uint8_t mst_alloc_slots,
+ uint8_t *mst_slots_in_use);
+
/*******************************************************************************
* DSC Interfaces
******************************************************************************/
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
index 4f54bde1bb1c..bc87ea0adf94 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -53,7 +53,17 @@ enum dc_link_rate {
LINK_RATE_RBR2 = 0x0C, // Rate_5 (RBR2)- 3.24 Gbps/Lane
LINK_RATE_RATE_6 = 0x10, // Rate_6 - 4.32 Gbps/Lane
LINK_RATE_HIGH2 = 0x14, // Rate_7 (HBR2)- 5.40 Gbps/Lane
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ LINK_RATE_HIGH3 = 0x1E, // Rate_8 (HBR3)- 8.10 Gbps/Lane
+ /* Starting from DP2.0 link rate enum directly represents actual
+ * link rate value in unit of 10 mbps
+ */
+ LINK_RATE_UHBR10 = 1000, // UHBR10 - 10.0 Gbps/Lane
+ LINK_RATE_UHBR13_5 = 1350, // UHBR13.5 - 13.5 Gbps/Lane
+ LINK_RATE_UHBR20 = 2000, // UHBR10 - 20.0 Gbps/Lane
+#else
LINK_RATE_HIGH3 = 0x1E // Rate_8 (HBR3)- 8.10 Gbps/Lane
+#endif
};
enum dc_link_spread {
@@ -90,17 +100,47 @@ enum dc_post_cursor2 {
POST_CURSOR2_MAX_LEVEL = POST_CURSOR2_LEVEL3,
};
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+enum dc_dp_ffe_preset_level {
+ DP_FFE_PRESET_LEVEL0 = 0,
+ DP_FFE_PRESET_LEVEL1,
+ DP_FFE_PRESET_LEVEL2,
+ DP_FFE_PRESET_LEVEL3,
+ DP_FFE_PRESET_LEVEL4,
+ DP_FFE_PRESET_LEVEL5,
+ DP_FFE_PRESET_LEVEL6,
+ DP_FFE_PRESET_LEVEL7,
+ DP_FFE_PRESET_LEVEL8,
+ DP_FFE_PRESET_LEVEL9,
+ DP_FFE_PRESET_LEVEL10,
+ DP_FFE_PRESET_LEVEL11,
+ DP_FFE_PRESET_LEVEL12,
+ DP_FFE_PRESET_LEVEL13,
+ DP_FFE_PRESET_LEVEL14,
+ DP_FFE_PRESET_LEVEL15,
+ DP_FFE_PRESET_MAX_LEVEL = DP_FFE_PRESET_LEVEL15,
+};
+#endif
+
enum dc_dp_training_pattern {
DP_TRAINING_PATTERN_SEQUENCE_1 = 0,
DP_TRAINING_PATTERN_SEQUENCE_2,
DP_TRAINING_PATTERN_SEQUENCE_3,
DP_TRAINING_PATTERN_SEQUENCE_4,
DP_TRAINING_PATTERN_VIDEOIDLE,
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ DP_128b_132b_TPS1,
+ DP_128b_132b_TPS2,
+ DP_128b_132b_TPS2_CDS,
+#endif
};
enum dp_link_encoding {
DP_UNKNOWN_ENCODING = 0,
DP_8b_10b_ENCODING = 1,
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ DP_128b_132b_ENCODING = 2,
+#endif
};
struct dc_link_settings {
@@ -112,21 +152,35 @@ struct dc_link_settings {
bool dpcd_source_device_specific_field_support;
};
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+union dc_dp_ffe_preset {
+ struct {
+ uint8_t level : 4;
+ uint8_t reserved : 1;
+ uint8_t no_preshoot : 1;
+ uint8_t no_deemphasis : 1;
+ uint8_t method2 : 1;
+ } settings;
+ uint8_t raw;
+};
+#endif
+
struct dc_lane_settings {
enum dc_voltage_swing VOLTAGE_SWING;
enum dc_pre_emphasis PRE_EMPHASIS;
enum dc_post_cursor2 POST_CURSOR2;
-};
-
-struct dc_link_training_settings {
- struct dc_link_settings link;
- struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX];
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ union dc_dp_ffe_preset FFE_PRESET;
+#endif
};
struct dc_link_training_overrides {
enum dc_voltage_swing *voltage_swing;
enum dc_pre_emphasis *pre_emphasis;
enum dc_post_cursor2 *post_cursor2;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ union dc_dp_ffe_preset *ffe_preset;
+#endif
uint16_t *cr_pattern_time;
uint16_t *eq_pattern_time;
@@ -140,6 +194,16 @@ struct dc_link_training_overrides {
bool *fec_enable;
};
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+union payload_table_update_status {
+ struct {
+ uint8_t VC_PAYLOAD_TABLE_UPDATED:1;
+ uint8_t ACT_HANDLED:1;
+ } bits;
+ uint8_t raw;
+};
+#endif
+
union dpcd_rev {
struct {
uint8_t MINOR:4;
@@ -227,7 +291,14 @@ union lane_align_status_updated {
struct {
uint8_t INTERLANE_ALIGN_DONE:1;
uint8_t POST_LT_ADJ_REQ_IN_PROGRESS:1;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ uint8_t EQ_INTERLANE_ALIGN_DONE_128b_132b:1;
+ uint8_t CDS_INTERLANE_ALIGN_DONE_128b_132b:1;
+ uint8_t LT_FAILED_128b_132b:1;
+ uint8_t RESERVED:1;
+#else
uint8_t RESERVED:4;
+#endif
uint8_t DOWNSTREAM_PORT_STATUS_CHANGED:1;
uint8_t LINK_STATUS_UPDATED:1;
} bits;
@@ -240,6 +311,12 @@ union lane_adjust {
uint8_t PRE_EMPHASIS_LANE:2;
uint8_t RESERVED:4;
} bits;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ struct {
+ uint8_t PRESET_VALUE :4;
+ uint8_t RESERVED :4;
+ } tx_ffe;
+#endif
uint8_t raw;
};
@@ -269,6 +346,12 @@ union dpcd_training_lane {
uint8_t MAX_PRE_EMPHASIS_REACHED:1;
uint8_t RESERVED:2;
} bits;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ struct {
+ uint8_t PRESET_VALUE :4;
+ uint8_t RESERVED :4;
+ } tx_ffe;
+#endif
uint8_t raw;
};
@@ -551,12 +634,18 @@ union test_response {
union phy_test_pattern {
struct {
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ /* This field is 7 bits for DP2.0 */
+ uint8_t PATTERN :7;
+ uint8_t RESERVED :1;
+#else
/* DpcdPhyTestPatterns. This field is 2 bits for DP1.1
* and 3 bits for DP1.2.
*/
uint8_t PATTERN :3;
/* BY speci, bit7:2 is 0 for DP1.1. */
uint8_t RESERVED :5;
+#endif
} bits;
uint8_t raw;
};
@@ -634,7 +723,14 @@ union dpcd_fec_capability {
uint8_t UNCORRECTED_BLOCK_ERROR_COUNT_CAPABLE:1;
uint8_t CORRECTED_BLOCK_ERROR_COUNT_CAPABLE:1;
uint8_t BIT_ERROR_COUNT_CAPABLE:1;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ uint8_t PARITY_BLOCK_ERROR_COUNT_CAPABLE:1;
+ uint8_t ARITY_BIT_ERROR_COUNT_CAPABLE:1;
+ uint8_t FEC_RUNNING_INDICATOR_SUPPORTED:1;
+ uint8_t FEC_ERROR_REPORTING_POLICY_SUPPORTED:1;
+#else
uint8_t RESERVED:4;
+#endif
} bits;
uint8_t raw;
};
@@ -758,4 +854,200 @@ struct psr_caps {
bool psr_exit_link_training_required;
};
+/* Length of router topology ID read from DPCD in bytes. */
+#define DPCD_USB4_TOPOLOGY_ID_LEN 5
+
+/* DPCD[0xE000D] DP_TUNNELING_CAPABILITIES SUPPORT register. */
+union dp_tun_cap_support {
+ struct {
+ uint8_t dp_tunneling :1;
+ uint8_t rsvd :5;
+ uint8_t panel_replay_tun_opt :1;
+ uint8_t dpia_bw_alloc :1;
+ } bits;
+ uint8_t raw;
+};
+
+/* DPCD[0xE000E] DP_IN_ADAPTER_INFO register. */
+union dpia_info {
+ struct {
+ uint8_t dpia_num :5;
+ uint8_t rsvd :3;
+ } bits;
+ uint8_t raw;
+};
+
+/* DP Tunneling over USB4 */
+struct dpcd_usb4_dp_tunneling_info {
+ union dp_tun_cap_support dp_tun_cap;
+ union dpia_info dpia_info;
+ uint8_t usb4_driver_id;
+ uint8_t usb4_topology_id[DPCD_USB4_TOPOLOGY_ID_LEN];
+};
+
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+#ifndef DP_MAIN_LINK_CHANNEL_CODING_CAP
+#define DP_MAIN_LINK_CHANNEL_CODING_CAP 0x006
+#endif
+#ifndef DP_SINK_VIDEO_FALLBACK_FORMATS
+#define DP_SINK_VIDEO_FALLBACK_FORMATS 0x020
+#endif
+#ifndef DP_FEC_CAPABILITY_1
+#define DP_FEC_CAPABILITY_1 0x091
+#endif
+#ifndef DP_DFP_CAPABILITY_EXTENSION_SUPPORT
+#define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0A3
+#endif
+#ifndef DP_DSC_CONFIGURATION
+#define DP_DSC_CONFIGURATION 0x161
+#endif
+#ifndef DP_PHY_SQUARE_PATTERN
+#define DP_PHY_SQUARE_PATTERN 0x249
+#endif
+#ifndef DP_128b_132b_SUPPORTED_LINK_RATES
+#define DP_128b_132b_SUPPORTED_LINK_RATES 0x2215
+#endif
+#ifndef DP_128b_132b_TRAINING_AUX_RD_INTERVAL
+#define DP_128b_132b_TRAINING_AUX_RD_INTERVAL 0x2216
+#endif
+#ifndef DP_TEST_264BIT_CUSTOM_PATTERN_7_0
+#define DP_TEST_264BIT_CUSTOM_PATTERN_7_0 0X2230
+#endif
+#ifndef DP_TEST_264BIT_CUSTOM_PATTERN_263_256
+#define DP_TEST_264BIT_CUSTOM_PATTERN_263_256 0X2250
+#endif
+#ifndef DP_DSC_SUPPORT_AND_DECODER_COUNT
+#define DP_DSC_SUPPORT_AND_DECODER_COUNT 0x2260
+#endif
+#ifndef DP_DSC_MAX_SLICE_COUNT_AND_AGGREGATION_0
+#define DP_DSC_MAX_SLICE_COUNT_AND_AGGREGATION_0 0x2270
+#endif
+#ifndef DP_DSC_DECODER_0_MAXIMUM_SLICE_COUNT_MASK
+#define DP_DSC_DECODER_0_MAXIMUM_SLICE_COUNT_MASK (1 << 0)
+#endif
+#ifndef DP_DSC_DECODER_0_AGGREGATION_SUPPORT_MASK
+#define DP_DSC_DECODER_0_AGGREGATION_SUPPORT_MASK (0b111 << 1)
+#endif
+#ifndef DP_DSC_DECODER_0_AGGREGATION_SUPPORT_SHIFT
+#define DP_DSC_DECODER_0_AGGREGATION_SUPPORT_SHIFT 1
+#endif
+#ifndef DP_DSC_DECODER_COUNT_MASK
+#define DP_DSC_DECODER_COUNT_MASK (0b111 << 5)
+#endif
+#ifndef DP_DSC_DECODER_COUNT_SHIFT
+#define DP_DSC_DECODER_COUNT_SHIFT 5
+#endif
+#ifndef DP_MAIN_LINK_CHANNEL_CODING_SET
+#define DP_MAIN_LINK_CHANNEL_CODING_SET 0x108
+#endif
+#ifndef DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER
+#define DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER 0xF0006
+#endif
+#ifndef DP_PHY_REPEATER_128b_132b_RATES
+#define DP_PHY_REPEATER_128b_132b_RATES 0xF0007
+#endif
+#ifndef DP_128b_132b_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1
+#define DP_128b_132b_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1 0xF0022
+#endif
+#ifndef DP_INTRA_HOP_AUX_REPLY_INDICATION
+#define DP_INTRA_HOP_AUX_REPLY_INDICATION (1 << 3)
+#endif
+/* TODO - Use DRM header to replace above once available */
+
+union dp_main_line_channel_coding_cap {
+ struct {
+ uint8_t DP_8b_10b_SUPPORTED :1;
+ uint8_t DP_128b_132b_SUPPORTED :1;
+ uint8_t RESERVED :6;
+ } bits;
+ uint8_t raw;
+};
+
+union dp_main_link_channel_coding_lttpr_cap {
+ struct {
+ uint8_t DP_128b_132b_SUPPORTED :1;
+ uint8_t RESERVED :7;
+ } bits;
+ uint8_t raw;
+};
+
+union dp_128b_132b_supported_link_rates {
+ struct {
+ uint8_t UHBR10 :1;
+ uint8_t UHBR20 :1;
+ uint8_t UHBR13_5:1;
+ uint8_t RESERVED:5;
+ } bits;
+ uint8_t raw;
+};
+
+union dp_128b_132b_supported_lttpr_link_rates {
+ struct {
+ uint8_t UHBR10 :1;
+ uint8_t UHBR13_5:1;
+ uint8_t UHBR20 :1;
+ uint8_t RESERVED:5;
+ } bits;
+ uint8_t raw;
+};
+
+union dp_sink_video_fallback_formats {
+ struct {
+ uint8_t dp_1024x768_60Hz_24bpp_support :1;
+ uint8_t dp_1280x720_60Hz_24bpp_support :1;
+ uint8_t dp_1920x1080_60Hz_24bpp_support :1;
+ uint8_t RESERVED :5;
+ } bits;
+ uint8_t raw;
+};
+
+union dp_fec_capability1 {
+ struct {
+ uint8_t AGGREGATED_ERROR_COUNTERS_CAPABLE :1;
+ uint8_t RESERVED :7;
+ } bits;
+ uint8_t raw;
+};
+
+struct dp_color_depth_caps {
+ uint8_t support_6bpc :1;
+ uint8_t support_8bpc :1;
+ uint8_t support_10bpc :1;
+ uint8_t support_12bpc :1;
+ uint8_t support_16bpc :1;
+ uint8_t RESERVED :3;
+};
+
+struct dp_encoding_format_caps {
+ uint8_t support_rgb :1;
+ uint8_t support_ycbcr444:1;
+ uint8_t support_ycbcr422:1;
+ uint8_t support_ycbcr420:1;
+ uint8_t RESERVED :4;
+};
+
+union dp_dfp_cap_ext {
+ struct {
+ uint8_t supported;
+ uint8_t max_pixel_rate_in_mps[2];
+ uint8_t max_video_h_active_width[2];
+ uint8_t max_video_v_active_height[2];
+ struct dp_encoding_format_caps encoding_format_caps;
+ struct dp_color_depth_caps rgb_color_depth_caps;
+ struct dp_color_depth_caps ycbcr444_color_depth_caps;
+ struct dp_color_depth_caps ycbcr422_color_depth_caps;
+ struct dp_color_depth_caps ycbcr420_color_depth_caps;
+ } fields;
+ uint8_t raw[12];
+};
+
+union dp_128b_132b_training_aux_rd_interval {
+ struct {
+ uint8_t VALUE :7;
+ uint8_t UNIT :1;
+ } bits;
+ uint8_t raw;
+};
+#endif
+
#endif /* DC_DP_TYPES_H */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dsc.h b/drivers/gpu/drm/amd/display/dc/dc_dsc.h
index 16cc76ce3739..684713b2cff7 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dsc.h
@@ -51,7 +51,6 @@ struct dc_dsc_policy {
int min_slice_height; // Must not be less than 8
uint32_t max_target_bpp;
uint32_t min_target_bpp;
- uint32_t preferred_bpp_x16;
bool enable_dsc_when_not_needed;
};
@@ -81,6 +80,16 @@ bool dc_dsc_compute_config(
uint32_t dc_dsc_stream_bandwidth_in_kbps(const struct dc_crtc_timing *timing,
uint32_t bpp_x16, uint32_t num_slices_h, bool is_dp);
+uint32_t dc_dsc_stream_bandwidth_overhead_in_kbps(
+ const struct dc_crtc_timing *timing,
+ const int num_slices_h,
+ const bool is_dp);
+
+/* TODO - Hardware/specs limitation should be owned by dc dsc and returned to DM,
+ * and DM can choose to OVERRIDE the limitation on CASE BY CASE basis.
+ * Hardware/specs limitation should not be writable by DM.
+ * It should be decoupled from DM specific policy and named differently.
+ */
void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing,
uint32_t max_target_bpp_limit_override_x16,
struct dc_dsc_policy *policy);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index 83845d006c54..180ecd860296 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -45,6 +45,10 @@ struct dc_link_status {
struct link_mst_stream_allocation {
/* DIG front */
const struct stream_encoder *stream_enc;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ /* HPO DP Stream Encoder */
+ const struct hpo_dp_stream_encoder *hpo_dp_stream_enc;
+#endif
/* associate DRM payload table with DC stream encoder */
uint8_t vcp_id;
/* number of slots required for the DP stream in transport packet */
@@ -81,6 +85,7 @@ struct psr_settings {
*/
bool psr_frame_capture_indication_req;
unsigned int psr_sdp_transmit_line_num_deadline;
+ unsigned int psr_power_opt;
};
/*
@@ -117,8 +122,12 @@ struct dc_link {
struct dc_link_settings reported_link_cap;
struct dc_link_settings verified_link_cap;
struct dc_link_settings cur_link_settings;
- struct dc_lane_settings cur_lane_setting;
+ struct dc_lane_settings cur_lane_setting[LANE_COUNT_DP_MAX];
struct dc_link_settings preferred_link_setting;
+ /* preferred_training_settings are override values that
+ * come from DM. DM is responsible for the memory
+ * management of the override pointers.
+ */
struct dc_link_training_overrides preferred_training_settings;
struct dp_audio_test_data audio_test_data;
@@ -150,6 +159,9 @@ struct dc_link {
struct panel_cntl *panel_cntl;
struct link_encoder *link_enc;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ struct hpo_dp_link_encoder *hpo_dp_link_enc;
+#endif
struct graphics_object_id link_id;
/* Endpoint type distinguishes display endpoints which do not have entries
* in the BIOS connector table from those that do. Helps when tracking link
@@ -170,11 +182,15 @@ struct dc_link {
struct psr_settings psr_settings;
+ /* Drive settings read from integrated info table */
+ struct dc_lane_settings bios_forced_drive_settings;
+
/* MST record stream using this link */
struct link_flags {
bool dp_keep_receiver_powered;
bool dp_skip_DID2;
bool dp_skip_reset_segment;
+ bool dp_mot_reset_segment;
} wa_flags;
struct link_mst_stream_allocation_table mst_stream_alloc_table;
@@ -260,8 +276,8 @@ int dc_link_get_backlight_level(const struct dc_link *dc_link);
int dc_link_get_target_backlight_pwm(const struct dc_link *link);
-bool dc_link_set_psr_allow_active(struct dc_link *dc_link, bool enable,
- bool wait, bool force_static);
+bool dc_link_set_psr_allow_active(struct dc_link *dc_link, const bool *enable,
+ bool wait, bool force_static, const unsigned int *power_opts);
bool dc_link_get_psr_state(const struct dc_link *dc_link, enum dc_psr_state *state);
@@ -288,6 +304,10 @@ enum dc_detect_reason {
bool dc_link_detect(struct dc_link *dc_link, enum dc_detect_reason reason);
bool dc_link_get_hpd_state(struct dc_link *dc_link);
enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+enum dc_status dc_link_reduce_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t req_pbn);
+enum dc_status dc_link_increase_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t req_pbn);
+#endif
/* Notify DC about DP RX Interrupt (aka Short Pulse Interrupt).
* Return:
@@ -296,7 +316,8 @@ enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx);
* false - no change in Downstream port status. No further action required
* from DM. */
bool dc_link_handle_hpd_rx_irq(struct dc_link *dc_link,
- union hpd_irq_data *hpd_irq_dpcd_data, bool *out_link_loss);
+ union hpd_irq_data *hpd_irq_dpcd_data, bool *out_link_loss,
+ bool defer_handling, bool *has_left_work);
/*
* On eDP links this function call will stall until T12 has elapsed.
@@ -305,9 +326,9 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *dc_link,
*/
bool dc_link_wait_for_t12(struct dc_link *link);
-enum dc_status read_hpd_rx_irq_data(
- struct dc_link *link,
- union hpd_irq_data *irq_data);
+void dc_link_dp_handle_automated_test(struct dc_link *link);
+void dc_link_dp_handle_link_loss(struct dc_link *link);
+bool dc_link_dp_allow_hpd_rx_irq(const struct dc_link *link);
struct dc_sink_init_data;
@@ -416,4 +437,7 @@ uint32_t dc_bandwidth_in_kbps_from_timing(
bool dc_link_is_fec_supported(const struct dc_link *link);
bool dc_link_should_enable_fec(const struct dc_link *link);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+enum dp_link_encoding dc_link_dp_mst_decide_link_encoding_format(const struct dc_link *link);
+#endif
#endif /* DC_LINK_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index b8ebc1f09538..e37c4a10bfd5 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -115,6 +115,13 @@ struct periodic_interrupt_config {
int lines_offset;
};
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+struct dc_mst_stream_bw_update {
+ bool is_increase; // is bandwidth reduced or increased
+ uint32_t mst_stream_bw; // new mst bandwidth in kbps
+};
+#endif
+
union stream_update_flags {
struct {
uint32_t scaling:1;
@@ -125,6 +132,9 @@ union stream_update_flags {
uint32_t gamut_remap:1;
uint32_t wb_update:1;
uint32_t dsc_changed : 1;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ uint32_t mst_bw : 1;
+#endif
} bits;
uint32_t raw;
@@ -278,6 +288,9 @@ struct dc_stream_update {
struct dc_writeback_update *wb_update;
struct dc_dsc_config *dsc_config;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ struct dc_mst_stream_bw_update *mst_bw_update;
+#endif
struct dc_transfer_func *func_shaper;
struct dc_3dlut *lut3d_func;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index c1532930169b..388457ffc0a8 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -395,9 +395,27 @@ struct dc_lttpr_caps {
uint8_t max_link_rate;
uint8_t phy_repeater_cnt;
uint8_t max_ext_timeout;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ union dp_main_link_channel_coding_lttpr_cap main_link_channel_coding;
+ union dp_128b_132b_supported_lttpr_link_rates supported_128b_132b_rates;
+#endif
uint8_t aux_rd_interval[MAX_REPEATER_CNT - 1];
};
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+struct dc_dongle_dfp_cap_ext {
+ bool supported;
+ uint16_t max_pixel_rate_in_mps;
+ uint16_t max_video_h_active_width;
+ uint16_t max_video_v_active_height;
+ struct dp_encoding_format_caps encoding_format_caps;
+ struct dp_color_depth_caps rgb_color_depth_caps;
+ struct dp_color_depth_caps ycbcr444_color_depth_caps;
+ struct dp_color_depth_caps ycbcr422_color_depth_caps;
+ struct dp_color_depth_caps ycbcr420_color_depth_caps;
+};
+#endif
+
struct dc_dongle_caps {
/* dongle type (DP converter, CV smart dongle) */
enum display_dongle_type dongle_type;
@@ -411,6 +429,9 @@ struct dc_dongle_caps {
bool is_dp_hdmi_ycbcr420_converter;
uint32_t dp_hdmi_max_bpc;
uint32_t dp_hdmi_max_pixel_clk_in_khz;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ struct dc_dongle_dfp_cap_ext dfp_cap_ext;
+#endif
};
/* Scaling format */
enum scaling_transformation {
@@ -632,6 +653,7 @@ enum dc_psr_state {
PSR_STATE1a,
PSR_STATE2,
PSR_STATE2a,
+ PSR_STATE2b,
PSR_STATE3,
PSR_STATE3Init,
PSR_STATE4,
@@ -934,6 +956,7 @@ enum dc_psr_version {
/* Possible values of display_endpoint_id.endpoint */
enum display_endpoint_type {
DISPLAY_ENDPOINT_PHY = 0, /* Physical connector. */
+ DISPLAY_ENDPOINT_USB4_DPIA, /* USB4 DisplayPort tunnel. */
DISPLAY_ENDPOINT_UNKNOWN = -1
};
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h
index 456fadbbfac7..b699d1b2ba83 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.h
@@ -96,6 +96,22 @@
SRI(DC_ABM1_HGLS_REG_READ_PROGRESS, ABM, id), \
NBIO_SR(BIOS_SCRATCH_2)
+#define ABM_DCN302_REG_LIST(id)\
+ ABM_COMMON_REG_LIST_DCE_BASE(), \
+ SRI(DC_ABM1_HG_SAMPLE_RATE, ABM, id), \
+ SRI(DC_ABM1_LS_SAMPLE_RATE, ABM, id), \
+ SRI(BL1_PWM_BL_UPDATE_SAMPLE_RATE, ABM, id), \
+ SRI(DC_ABM1_HG_MISC_CTRL, ABM, id), \
+ SRI(DC_ABM1_IPCSC_COEFF_SEL, ABM, id), \
+ SRI(BL1_PWM_CURRENT_ABM_LEVEL, ABM, id), \
+ SRI(BL1_PWM_TARGET_ABM_LEVEL, ABM, id), \
+ SRI(BL1_PWM_USER_LEVEL, ABM, id), \
+ SRI(DC_ABM1_LS_MIN_MAX_PIXEL_VALUE_THRES, ABM, id), \
+ SRI(DC_ABM1_HGLS_REG_READ_PROGRESS, ABM, id), \
+ SRI(DC_ABM1_ACE_OFFSET_SLOPE_0, ABM, id), \
+ SRI(DC_ABM1_ACE_THRES_12, ABM, id), \
+ NBIO_SR(BIOS_SCRATCH_2)
+
#define ABM_DCN30_REG_LIST(id)\
ABM_COMMON_REG_LIST_DCE_BASE(), \
SRI(DC_ABM1_HG_SAMPLE_RATE, ABM, id), \
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
index 7866cf2a668f..27218ede150a 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_audio.c
@@ -514,13 +514,15 @@ void dce_aud_az_configure(
union audio_sample_rates sample_rates =
audio_mode->sample_rates;
uint8_t byte2 = audio_mode->max_bit_rate;
+ uint8_t channel_count = audio_mode->channel_count;
/* adjust specific properties */
switch (audio_format_code) {
case AUDIO_FORMAT_CODE_LINEARPCM: {
+
check_audio_bandwidth(
crtc_info,
- audio_mode->channel_count,
+ channel_count,
signal,
&sample_rates);
@@ -548,7 +550,7 @@ void dce_aud_az_configure(
/* fill audio format data */
set_reg_field_value(value,
- audio_mode->channel_count - 1,
+ channel_count - 1,
AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0,
MAX_CHANNELS);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
index 3c3347341103..6d42a9cc9916 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
@@ -534,17 +534,26 @@ struct dce_aux *dce110_aux_engine_construct(struct aux_engine_dce110 *aux_engine
static enum i2caux_transaction_action i2caux_action_from_payload(struct aux_payload *payload)
{
if (payload->i2c_over_aux) {
+ if (payload->write_status_update) {
+ if (payload->mot)
+ return I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST_MOT;
+ else
+ return I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST;
+ }
if (payload->write) {
if (payload->mot)
return I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT;
- return I2CAUX_TRANSACTION_ACTION_I2C_WRITE;
+ else
+ return I2CAUX_TRANSACTION_ACTION_I2C_WRITE;
}
if (payload->mot)
return I2CAUX_TRANSACTION_ACTION_I2C_READ_MOT;
+
return I2CAUX_TRANSACTION_ACTION_I2C_READ;
}
if (payload->write)
return I2CAUX_TRANSACTION_ACTION_DP_WRITE;
+
return I2CAUX_TRANSACTION_ACTION_DP_READ;
}
@@ -627,6 +636,7 @@ int dce_aux_transfer_dmub_raw(struct ddc_service *ddc,
#define AUX_MAX_I2C_DEFER_RETRIES 7
#define AUX_MAX_INVALID_REPLY_RETRIES 2
#define AUX_MAX_TIMEOUT_RETRIES 3
+#define AUX_DEFER_DELAY_FOR_DPIA 4 /*ms*/
static void dce_aux_log_payload(const char *payload_name,
unsigned char *payload, uint32_t length, uint32_t max_length_to_log)
@@ -689,15 +699,21 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
enum aux_return_code_type operation_result;
bool retry_on_defer = false;
struct ddc *ddc_pin = ddc->ddc_pin;
- struct dce_aux *aux_engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en];
- struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(aux_engine);
+ struct dce_aux *aux_engine = NULL;
+ struct aux_engine_dce110 *aux110 = NULL;
uint32_t defer_time_in_ms = 0;
int aux_ack_retries = 0,
aux_defer_retries = 0,
aux_i2c_defer_retries = 0,
aux_timeout_retries = 0,
- aux_invalid_reply_retries = 0;
+ aux_invalid_reply_retries = 0,
+ aux_ack_m_retries = 0;
+
+ if (ddc_pin) {
+ aux_engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en];
+ aux110 = FROM_AUX_ENGINE(aux_engine);
+ }
if (!payload->reply) {
payload_reply = false;
@@ -752,9 +768,27 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
aux_defer_retries,
AUX_MAX_RETRIES);
goto fail;
- } else {
+ } else
udelay(300);
+ } else if (payload->write && ret > 0) {
+ /* sink requested more time to complete the write via AUX_ACKM */
+ if (++aux_ack_m_retries >= AUX_MAX_RETRIES) {
+ DC_TRACE_LEVEL_MESSAGE(DAL_TRACE_LEVEL_ERROR,
+ LOG_FLAG_Error_I2cAux,
+ "dce_aux_transfer_with_retries: FAILURE: aux_ack_m_retries=%d >= AUX_MAX_RETRIES=%d",
+ aux_ack_m_retries,
+ AUX_MAX_RETRIES);
+ goto fail;
}
+
+ /* retry reading the write status until complete
+ * NOTE: payload is modified here
+ */
+ payload->write = false;
+ payload->write_status_update = true;
+ payload->length = 0;
+ udelay(300);
+
} else
return true;
break;
@@ -765,7 +799,10 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
"dce_aux_transfer_with_retries: AUX_RET_SUCCESS: AUX_TRANSACTION_REPLY_AUX_DEFER");
/* polling_timeout_period is in us */
- defer_time_in_ms += aux110->polling_timeout_period / 1000;
+ if (aux110)
+ defer_time_in_ms += aux110->polling_timeout_period / 1000;
+ else
+ defer_time_in_ms += AUX_DEFER_DELAY_FOR_DPIA;
++aux_defer_retries;
fallthrough;
case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER:
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
index 3139285bd403..692fa23ca02b 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
@@ -76,6 +76,15 @@
SRII(PIXEL_RATE_CNTL, OTG, 4),\
SRII(PIXEL_RATE_CNTL, OTG, 5)
+#define CS_COMMON_REG_LIST_DCN201(index, pllid) \
+ SRI(PIXCLK_RESYNC_CNTL, PHYPLL, pllid),\
+ SRII(PHASE, DP_DTO, 0),\
+ SRII(PHASE, DP_DTO, 1),\
+ SRII(MODULO, DP_DTO, 0),\
+ SRII(MODULO, DP_DTO, 1),\
+ SRII(PIXEL_RATE_CNTL, OTG, 0),\
+ SRII(PIXEL_RATE_CNTL, OTG, 1)
+
#define CS_COMMON_REG_LIST_DCN2_1(index, pllid) \
SRI(PIXCLK_RESYNC_CNTL, PHYPLL, pllid),\
SRII(PHASE, DP_DTO, 0),\
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
index 0464a8f3db3c..989f5b6907e2 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h
@@ -70,6 +70,10 @@
SRII(PIXEL_RATE_CNTL, blk, 4), \
SRII(PIXEL_RATE_CNTL, blk, 5)
+#define HWSEQ_PIXEL_RATE_REG_LIST_201(blk) \
+ SRII(PIXEL_RATE_CNTL, blk, 0), \
+ SRII(PIXEL_RATE_CNTL, blk, 1)
+
#define HWSEQ_PHYPLL_REG_LIST(blk) \
SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 0), \
SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 1), \
@@ -94,6 +98,10 @@
SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 4), \
SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 5)
+#define HWSEQ_PHYPLL_REG_LIST_201(blk) \
+ SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 0), \
+ SRII(PHYPLL_PIXEL_RATE_CNTL, blk, 1)
+
#define HWSEQ_DCE11_REG_LIST_BASE() \
SR(DC_MEM_GLOBAL_PWR_REQ_CNTL), \
SR(DCFEV_CLOCK_CONTROL), \
@@ -337,6 +345,29 @@
SR(D6VGA_CONTROL), \
SR(DC_IP_REQUEST_CNTL)
+#define HWSEQ_DCN201_REG_LIST()\
+ HWSEQ_DCN_REG_LIST(), \
+ HWSEQ_PIXEL_RATE_REG_LIST_201(OTG), \
+ HWSEQ_PHYPLL_REG_LIST_201(OTG), \
+ SR(MICROSECOND_TIME_BASE_DIV), \
+ SR(MILLISECOND_TIME_BASE_DIV), \
+ SR(DISPCLK_FREQ_CHANGE_CNTL), \
+ SR(RBBMIF_TIMEOUT_DIS), \
+ SR(RBBMIF_TIMEOUT_DIS_2), \
+ SR(DCHUBBUB_CRC_CTRL), \
+ SR(DPP_TOP0_DPP_CRC_CTRL), \
+ SR(DPP_TOP0_DPP_CRC_VAL_B_A), \
+ SR(DPP_TOP0_DPP_CRC_VAL_R_G), \
+ SR(MPC_CRC_CTRL), \
+ SR(MPC_CRC_RESULT_GB), \
+ SR(MPC_CRC_RESULT_C), \
+ SR(MPC_CRC_RESULT_AR), \
+ SR(AZALIA_AUDIO_DTO), \
+ SR(AZALIA_CONTROLLER_CLOCK_GATING), \
+ MMHUB_SR(MC_VM_FB_LOCATION_BASE), \
+ MMHUB_SR(MC_VM_FB_LOCATION_TOP), \
+ MMHUB_SR(MC_VM_FB_OFFSET)
+
#define HWSEQ_DCN30_REG_LIST()\
HWSEQ_DCN2_REG_LIST(),\
HWSEQ_DCN_REG_LIST(), \
@@ -637,6 +668,9 @@ struct dce_hwseq_registers {
uint32_t DMU_MEM_PWR_CNTL;
uint32_t MMHUBBUB_MEM_PWR_CNTL;
uint32_t DCHUBBUB_ARB_HOSTVM_CNTL;
+ uint32_t MC_VM_FB_LOCATION_BASE;
+ uint32_t MC_VM_FB_LOCATION_TOP;
+ uint32_t MC_VM_FB_OFFSET;
};
/* set field name */
#define HWS_SF(blk_name, reg_name, field_name, post_fix)\
@@ -872,6 +906,11 @@ struct dce_hwseq_registers {
HWS_SF(, DOMAIN18_PG_STATUS, DOMAIN18_PGFSM_PWR_STATUS, mask_sh), \
HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh)
+#define HWSEQ_DCN201_MASK_SH_LIST(mask_sh)\
+ HWSEQ_DCN_MASK_SH_LIST(mask_sh), \
+ HWS_SF(, DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \
+ HWS_SF(, AZALIA_AUDIO_DTO, AZALIA_AUDIO_DTO_MODULE, mask_sh)
+
#define HWSEQ_DCN30_MASK_SH_LIST(mask_sh)\
HWSEQ_DCN2_MASK_SH_LIST(mask_sh), \
HWS_SF(, AZALIA_AUDIO_DTO, AZALIA_AUDIO_DTO_MODULE, mask_sh), \
@@ -1112,7 +1151,8 @@ struct dce_hwseq_registers {
type DOMAIN_POWER_GATE;\
type DOMAIN_PGFSM_PWR_STATUS;\
type HPO_HDMISTREAMCLK_G_GATE_DIS;\
- type DISABLE_HOSTVM_FORCE_ALLOW_PSTATE;
+ type DISABLE_HOSTVM_FORCE_ALLOW_PSTATE;\
+ type I2C_LIGHT_SLEEP_FORCE;
struct dce_hwseq_shift {
HWSEQ_REG_FIELD_LIST(uint8_t)
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
index 8d4263da59f2..779bc92a2968 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
@@ -919,6 +919,7 @@ static void dce110_stream_encoder_stop_dp_info_packets(
}
static void dce110_stream_encoder_dp_blank(
+ struct dc_link *link,
struct stream_encoder *enc)
{
struct dce110_stream_encoder *enc110 = DCE110STRENC_FROM_STRENC(enc);
@@ -967,6 +968,7 @@ static void dce110_stream_encoder_dp_blank(
/* output video stream to link encoder */
static void dce110_stream_encoder_dp_unblank(
+ struct dc_link *link,
struct stream_encoder *enc,
const struct encoder_unblank_param *param)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
index 54a1408c8015..fb0dec4ed3a6 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_abm.c
@@ -203,12 +203,33 @@ static bool dmub_abm_init_config(struct abm *abm,
return true;
}
+static bool dmub_abm_set_pause(struct abm *abm, bool pause, unsigned int panel_inst, unsigned int stream_inst)
+{
+ union dmub_rb_cmd cmd;
+ struct dc_context *dc = abm->ctx;
+ uint8_t panel_mask = 0x01 << panel_inst;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.abm_pause.header.type = DMUB_CMD__ABM;
+ cmd.abm_pause.header.sub_type = DMUB_CMD__ABM_PAUSE;
+ cmd.abm_pause.abm_pause_data.enable = pause;
+ cmd.abm_pause.abm_pause_data.panel_mask = panel_mask;
+ cmd.abm_set_level.header.payload_bytes = sizeof(struct dmub_cmd_abm_pause_data);
+
+ dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
+ dc_dmub_srv_cmd_execute(dc->dmub_srv);
+ dc_dmub_srv_wait_idle(dc->dmub_srv);
+
+ return true;
+}
+
static const struct abm_funcs abm_funcs = {
.abm_init = dmub_abm_init,
.set_abm_level = dmub_abm_set_level,
.get_current_backlight = dmub_abm_get_current_backlight,
.get_target_backlight = dmub_abm_get_target_backlight,
.init_abm_config = dmub_abm_init_config,
+ .set_abm_pause = dmub_abm_set_pause,
};
static void dmub_abm_construct(
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
index aa8403bc4c83..90eb8eedacf2 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
@@ -50,6 +50,8 @@ static enum dc_psr_state convert_psr_state(uint32_t raw_state)
state = PSR_STATE2;
else if (raw_state == 0x21)
state = PSR_STATE2a;
+ else if (raw_state == 0x22)
+ state = PSR_STATE2b;
else if (raw_state == 0x30)
state = PSR_STATE3;
else if (raw_state == 0x31)
@@ -225,6 +227,25 @@ static void dmub_psr_set_level(struct dmub_psr *dmub, uint16_t psr_level, uint8_
dc_dmub_srv_wait_idle(dc->dmub_srv);
}
+/**
+ * Set PSR power optimization flags.
+ */
+static void dmub_psr_set_power_opt(struct dmub_psr *dmub, unsigned int power_opt)
+{
+ union dmub_rb_cmd cmd;
+ struct dc_context *dc = dmub->ctx;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.psr_set_power_opt.header.type = DMUB_CMD__PSR;
+ cmd.psr_set_power_opt.header.sub_type = DMUB_CMD__SET_PSR_POWER_OPT;
+ cmd.psr_set_power_opt.header.payload_bytes = sizeof(struct dmub_cmd_psr_set_power_opt_data);
+ cmd.psr_set_power_opt.psr_set_power_opt_data.power_opt = power_opt;
+
+ dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd);
+ dc_dmub_srv_cmd_execute(dc->dmub_srv);
+ dc_dmub_srv_wait_idle(dc->dmub_srv);
+}
+
/*
* Setup PSR by programming phy registers and sending psr hw context values to firmware.
*/
@@ -356,6 +377,7 @@ static const struct dmub_psr_funcs psr_funcs = {
.psr_set_level = dmub_psr_set_level,
.psr_force_static = dmub_psr_force_static,
.psr_get_residency = dmub_psr_get_residency,
+ .psr_set_power_opt = dmub_psr_set_power_opt,
};
/*
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h
index 9675c269e649..5dbd479660f1 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h
@@ -46,6 +46,7 @@ struct dmub_psr_funcs {
void (*psr_force_static)(struct dmub_psr *dmub, uint8_t panel_inst);
void (*psr_get_residency)(struct dmub_psr *dmub, uint32_t *residency,
uint8_t panel_inst);
+ void (*psr_set_power_opt)(struct dmub_psr *dmub, unsigned int power_opt);
};
struct dmub_psr *dmub_psr_create(struct dc_context *ctx);
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 62d595ded866..af3e68d3e747 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -46,6 +46,7 @@
#include "transform.h"
#include "stream_encoder.h"
#include "link_encoder.h"
+#include "link_enc_cfg.h"
#include "link_hwss.h"
#include "dc_link_dp.h"
#if defined(CONFIG_DRM_AMD_DC_DCN)
@@ -57,7 +58,8 @@
#include "audio.h"
#include "reg_helper.h"
#include "panel_cntl.h"
-
+#include "inc/link_dpcd.h"
+#include "dpcd_defs.h"
/* include DCE11 register header files */
#include "dce/dce_11_0_d.h"
#include "dce/dce_11_0_sh_mask.h"
@@ -1108,11 +1110,23 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
clk_mgr->funcs->enable_pme_wa(clk_mgr);
/* un-mute audio */
/* TODO: audio should be per stream rather than per link */
- pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (is_dp_128b_132b_signal(pipe_ctx))
+ pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->audio_mute_control(
+ pipe_ctx->stream_res.hpo_dp_stream_enc, false);
+ else
+ pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
pipe_ctx->stream_res.stream_enc, false);
+#else
+ pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
+ pipe_ctx->stream_res.stream_enc, false);
+#endif
if (pipe_ctx->stream_res.audio)
pipe_ctx->stream_res.audio->enabled = true;
}
+
+ if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ dp_source_sequence_trace(pipe_ctx->stream->link, DPCD_SOURCE_SEQ_AFTER_ENABLE_AUDIO_STREAM);
}
void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx)
@@ -1129,14 +1143,32 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx)
if (pipe_ctx->stream_res.audio && pipe_ctx->stream_res.audio->enabled == false)
return;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (is_dp_128b_132b_signal(pipe_ctx))
+ pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->audio_mute_control(
+ pipe_ctx->stream_res.hpo_dp_stream_enc, true);
+ else
+ pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
+ pipe_ctx->stream_res.stream_enc, true);
+#else
pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
pipe_ctx->stream_res.stream_enc, true);
+#endif
if (pipe_ctx->stream_res.audio) {
pipe_ctx->stream_res.audio->enabled = false;
if (dc_is_dp_signal(pipe_ctx->stream->signal))
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (is_dp_128b_132b_signal(pipe_ctx))
+ pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_audio_disable(
+ pipe_ctx->stream_res.hpo_dp_stream_enc);
+ else
+ pipe_ctx->stream_res.stream_enc->funcs->dp_audio_disable(
+ pipe_ctx->stream_res.stream_enc);
+#else
pipe_ctx->stream_res.stream_enc->funcs->dp_audio_disable(
pipe_ctx->stream_res.stream_enc);
+#endif
else
pipe_ctx->stream_res.stream_enc->funcs->hdmi_audio_disable(
pipe_ctx->stream_res.stream_enc);
@@ -1151,6 +1183,9 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx)
* stream->stream_engine_id);
*/
}
+
+ if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ dp_source_sequence_trace(pipe_ctx->stream->link, DPCD_SOURCE_SEQ_AFTER_DISABLE_AUDIO_STREAM);
}
void dce110_disable_stream(struct pipe_ctx *pipe_ctx)
@@ -1158,6 +1193,7 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx)
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->link;
struct dc *dc = pipe_ctx->stream->ctx->dc;
+ struct link_encoder *link_enc = NULL;
if (dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal)) {
pipe_ctx->stream_res.stream_enc->funcs->stop_hdmi_info_packets(
@@ -1166,17 +1202,48 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx)
pipe_ctx->stream_res.stream_enc);
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (is_dp_128b_132b_signal(pipe_ctx)) {
+ pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->stop_dp_info_packets(
+ pipe_ctx->stream_res.hpo_dp_stream_enc);
+ } else if (dc_is_dp_signal(pipe_ctx->stream->signal))
+#else
if (dc_is_dp_signal(pipe_ctx->stream->signal))
+#endif
pipe_ctx->stream_res.stream_enc->funcs->stop_dp_info_packets(
pipe_ctx->stream_res.stream_enc);
dc->hwss.disable_audio_stream(pipe_ctx);
- link->link_enc->funcs->connect_dig_be_to_fe(
+ /* Link encoder may have been dynamically assigned to non-physical display endpoint. */
+ if (link->ep_type == DISPLAY_ENDPOINT_PHY)
+ link_enc = link->link_enc;
+ else if (dc->res_pool->funcs->link_encs_assign)
+ link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link);
+ ASSERT(link_enc);
+
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (is_dp_128b_132b_signal(pipe_ctx)) {
+ pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->disable(
+ pipe_ctx->stream_res.hpo_dp_stream_enc);
+ setup_dp_hpo_stream(pipe_ctx, false);
+ /* TODO - DP2.0 HW: unmap stream from link encoder here */
+ } else {
+ if (link_enc)
+ link_enc->funcs->connect_dig_be_to_fe(
+ link_enc,
+ pipe_ctx->stream_res.stream_enc->id,
+ false);
+ }
+#else
+ if (link_enc)
+ link_enc->funcs->connect_dig_be_to_fe(
link->link_enc,
pipe_ctx->stream_res.stream_enc->id,
false);
-
+#endif
+ if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISCONNECT_DIG_FE_BE);
}
void dce110_unblank_stream(struct pipe_ctx *pipe_ctx,
@@ -1192,7 +1259,7 @@ void dce110_unblank_stream(struct pipe_ctx *pipe_ctx,
params.link_settings.link_rate = link_settings->link_rate;
if (dc_is_dp_signal(pipe_ctx->stream->signal))
- pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(pipe_ctx->stream_res.stream_enc, &params);
+ pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(link, pipe_ctx->stream_res.stream_enc, &params);
if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
hws->funcs.edp_backlight_control(link, true);
@@ -1210,8 +1277,16 @@ void dce110_blank_stream(struct pipe_ctx *pipe_ctx)
link->dc->hwss.set_abm_immediate_disable(pipe_ctx);
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (is_dp_128b_132b_signal(pipe_ctx)) {
+ /* TODO - DP2.0 HW: Set ODM mode in dp hpo encoder here */
+ pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_blank(
+ pipe_ctx->stream_res.hpo_dp_stream_enc);
+ } else if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
+#else
if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
- pipe_ctx->stream_res.stream_enc->funcs->dp_blank(pipe_ctx->stream_res.stream_enc);
+#endif
+ pipe_ctx->stream_res.stream_enc->funcs->dp_blank(link, pipe_ctx->stream_res.stream_enc);
if (!dc_is_embedded_signal(pipe_ctx->stream->signal)) {
/*
@@ -1436,6 +1511,7 @@ static enum dc_status apply_single_controller_ctx_to_hw(
struct dc *dc)
{
struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->link;
struct drr_params params = {0};
unsigned int event_triggers = 0;
struct pipe_ctx *odm_pipe = pipe_ctx->next_odm_pipe;
@@ -1451,10 +1527,23 @@ static enum dc_status apply_single_controller_ctx_to_hw(
build_audio_output(context, pipe_ctx, &audio_output);
if (dc_is_dp_signal(pipe_ctx->stream->signal))
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (is_dp_128b_132b_signal(pipe_ctx))
+ pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_audio_setup(
+ pipe_ctx->stream_res.hpo_dp_stream_enc,
+ pipe_ctx->stream_res.audio->inst,
+ &pipe_ctx->stream->audio_info);
+ else
+ pipe_ctx->stream_res.stream_enc->funcs->dp_audio_setup(
+ pipe_ctx->stream_res.stream_enc,
+ pipe_ctx->stream_res.audio->inst,
+ &pipe_ctx->stream->audio_info);
+#else
pipe_ctx->stream_res.stream_enc->funcs->dp_audio_setup(
pipe_ctx->stream_res.stream_enc,
pipe_ctx->stream_res.audio->inst,
&pipe_ctx->stream->audio_info);
+#endif
else
pipe_ctx->stream_res.stream_enc->funcs->hdmi_audio_setup(
pipe_ctx->stream_res.stream_enc,
@@ -1469,10 +1558,18 @@ static enum dc_status apply_single_controller_ctx_to_hw(
&pipe_ctx->stream->audio_info);
}
- /* */
- /* Do not touch stream timing on seamless boot optimization. */
- if (!pipe_ctx->stream->apply_seamless_boot_optimization)
- hws->funcs.enable_stream_timing(pipe_ctx, context, dc);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ /* DCN3.1 FPGA Workaround
+ * Need to enable HPO DP Stream Encoder before setting OTG master enable.
+ * To do so, move calling function enable_stream_timing to only be done AFTER calling
+ * function core_link_enable_stream
+ */
+ if (!(hws->wa.dp_hpo_and_otg_sequence && is_dp_128b_132b_signal(pipe_ctx)))
+#endif
+ /* */
+ /* Do not touch stream timing on seamless boot optimization. */
+ if (!pipe_ctx->stream->apply_seamless_boot_optimization)
+ hws->funcs.enable_stream_timing(pipe_ctx, context, dc);
if (hws->funcs.setup_vupdate_interrupt)
hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
@@ -1499,6 +1596,9 @@ static enum dc_status apply_single_controller_ctx_to_hw(
pipe_ctx->stream_res.stream_enc,
pipe_ctx->stream_res.tg->inst);
+ if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_OTG);
+
pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion(
pipe_ctx->stream_res.opp,
COLOR_SPACE_YCBCR601,
@@ -1526,6 +1626,18 @@ static enum dc_status apply_single_controller_ctx_to_hw(
if (!stream->dpms_off)
core_link_enable_stream(context, pipe_ctx);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ /* DCN3.1 FPGA Workaround
+ * Need to enable HPO DP Stream Encoder before setting OTG master enable.
+ * To do so, move calling function enable_stream_timing to only be done AFTER calling
+ * function core_link_enable_stream
+ */
+ if (hws->wa.dp_hpo_and_otg_sequence && is_dp_128b_132b_signal(pipe_ctx)) {
+ if (!pipe_ctx->stream->apply_seamless_boot_optimization)
+ hws->funcs.enable_stream_timing(pipe_ctx, context, dc);
+ }
+#endif
+
pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->bottom_pipe != 0;
pipe_ctx->stream->link->psr_settings.psr_feature_enabled = false;
@@ -1537,29 +1649,37 @@ static enum dc_status apply_single_controller_ctx_to_hw(
static void power_down_encoders(struct dc *dc)
{
- int i;
-
- /* do not know BIOS back-front mapping, simply blank all. It will not
- * hurt for non-DP
- */
- for (i = 0; i < dc->res_pool->stream_enc_count; i++) {
- dc->res_pool->stream_enc[i]->funcs->dp_blank(
- dc->res_pool->stream_enc[i]);
- }
+ int i, j;
for (i = 0; i < dc->link_count; i++) {
enum signal_type signal = dc->links[i]->connector_signal;
if ((signal == SIGNAL_TYPE_EDP) ||
- (signal == SIGNAL_TYPE_DISPLAY_PORT))
+ (signal == SIGNAL_TYPE_DISPLAY_PORT)) {
+ if (dc->links[i]->link_enc->funcs->get_dig_frontend &&
+ dc->links[i]->link_enc->funcs->is_dig_enabled(dc->links[i]->link_enc)) {
+ unsigned int fe = dc->links[i]->link_enc->funcs->get_dig_frontend(
+ dc->links[i]->link_enc);
+
+ for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
+ if (fe == dc->res_pool->stream_enc[j]->id) {
+ dc->res_pool->stream_enc[j]->funcs->dp_blank(dc->links[i],
+ dc->res_pool->stream_enc[j]);
+ break;
+ }
+ }
+ }
+
if (!dc->links[i]->wa_flags.dp_keep_receiver_powered)
dp_receiver_power_ctrl(dc->links[i], false);
+ }
if (signal != SIGNAL_TYPE_EDP)
signal = SIGNAL_TYPE_NONE;
- dc->links[i]->link_enc->funcs->disable_output(
- dc->links[i]->link_enc, signal);
+ if (dc->links[i]->ep_type == DISPLAY_ENDPOINT_PHY)
+ dc->links[i]->link_enc->funcs->disable_output(
+ dc->links[i]->link_enc, signal);
dc->links[i]->link_status.link_active = false;
memset(&dc->links[i]->cur_link_settings, 0,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
index cb9767ddf93d..44293d66b46b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
@@ -205,9 +205,17 @@ static void dpp1_power_on_dscl(
struct dcn10_dpp *dpp = TO_DCN10_DPP(dpp_base);
if (dpp->tf_regs->DSCL_MEM_PWR_CTRL) {
- REG_UPDATE(DSCL_MEM_PWR_CTRL, LUT_MEM_PWR_FORCE, power_on ? 0 : 3);
- if (power_on)
+ if (power_on) {
+ REG_UPDATE(DSCL_MEM_PWR_CTRL, LUT_MEM_PWR_FORCE, 0);
REG_WAIT(DSCL_MEM_PWR_STATUS, LUT_MEM_PWR_STATE, 0, 1, 5);
+ } else {
+ if (dpp->base.ctx->dc->debug.enable_mem_low_power.bits.dscl) {
+ dpp->base.ctx->dc->optimized_required = true;
+ dpp->base.deferred_reg_writes.bits.disable_dscl = true;
+ } else {
+ REG_UPDATE(DSCL_MEM_PWR_CTRL, LUT_MEM_PWR_FORCE, 3);
+ }
+ }
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index df8a7718a85f..a25732d07222 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -466,6 +466,71 @@ void dcn10_log_hw_state(struct dc *dc,
log_mpc_crc(dc, log_ctx);
+ {
+ int hpo_dp_link_enc_count = 0;
+
+ if (pool->hpo_dp_stream_enc_count > 0) {
+ DTN_INFO("DP HPO S_ENC: Enabled OTG Format Depth Vid SDP Compressed Link\n");
+ for (i = 0; i < pool->hpo_dp_stream_enc_count; i++) {
+ struct hpo_dp_stream_encoder_state hpo_dp_se_state = {0};
+ struct hpo_dp_stream_encoder *hpo_dp_stream_enc = pool->hpo_dp_stream_enc[i];
+
+ if (hpo_dp_stream_enc && hpo_dp_stream_enc->funcs->read_state) {
+ hpo_dp_stream_enc->funcs->read_state(hpo_dp_stream_enc, &hpo_dp_se_state);
+
+ DTN_INFO("[%d]: %d %d %6s %d %d %d %d %d\n",
+ hpo_dp_stream_enc->id - ENGINE_ID_HPO_DP_0,
+ hpo_dp_se_state.stream_enc_enabled,
+ hpo_dp_se_state.otg_inst,
+ (hpo_dp_se_state.pixel_encoding == 0) ? "4:4:4" :
+ ((hpo_dp_se_state.pixel_encoding == 1) ? "4:2:2" :
+ (hpo_dp_se_state.pixel_encoding == 2) ? "4:2:0" : "Y-Only"),
+ (hpo_dp_se_state.component_depth == 0) ? 6 :
+ ((hpo_dp_se_state.component_depth == 1) ? 8 :
+ (hpo_dp_se_state.component_depth == 2) ? 10 : 12),
+ hpo_dp_se_state.vid_stream_enabled,
+ hpo_dp_se_state.sdp_enabled,
+ hpo_dp_se_state.compressed_format,
+ hpo_dp_se_state.mapped_to_link_enc);
+ }
+ }
+
+ DTN_INFO("\n");
+ }
+
+ /* log DP HPO L_ENC section if any hpo_dp_link_enc exists */
+ for (i = 0; i < dc->link_count; i++)
+ if (dc->links[i]->hpo_dp_link_enc)
+ hpo_dp_link_enc_count++;
+
+ if (hpo_dp_link_enc_count) {
+ DTN_INFO("DP HPO L_ENC: Enabled Mode Lanes Stream Slots VC Rate X VC Rate Y\n");
+
+ for (i = 0; i < dc->link_count; i++) {
+ struct hpo_dp_link_encoder *hpo_dp_link_enc = dc->links[i]->hpo_dp_link_enc;
+ struct hpo_dp_link_enc_state hpo_dp_le_state = {0};
+
+ if (hpo_dp_link_enc && hpo_dp_link_enc->funcs->read_state) {
+ hpo_dp_link_enc->funcs->read_state(hpo_dp_link_enc, &hpo_dp_le_state);
+ DTN_INFO("[%d]: %d %6s %d %d %d %d %d\n",
+ hpo_dp_link_enc->inst,
+ hpo_dp_le_state.link_enc_enabled,
+ (hpo_dp_le_state.link_mode == 0) ? "TPS1" :
+ (hpo_dp_le_state.link_mode == 1) ? "TPS2" :
+ (hpo_dp_le_state.link_mode == 2) ? "ACTIVE" : "TEST",
+ hpo_dp_le_state.lane_count,
+ hpo_dp_le_state.stream_src[0],
+ hpo_dp_le_state.slot_count[0],
+ hpo_dp_le_state.vc_rate_x[0],
+ hpo_dp_le_state.vc_rate_y[0]);
+ DTN_INFO("\n");
+ }
+ }
+
+ DTN_INFO("\n");
+ }
+ }
+
DTN_INFO_END();
}
@@ -1313,6 +1378,12 @@ void dcn10_init_hw(struct dc *dc)
if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
+ /* Align bw context with hw config when system resume. */
+ if (dc->clk_mgr->clks.dispclk_khz != 0 && dc->clk_mgr->clks.dppclk_khz != 0) {
+ dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz = dc->clk_mgr->clks.dispclk_khz;
+ dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz = dc->clk_mgr->clks.dppclk_khz;
+ }
+
// Initialize the dccg
if (dc->res_pool->dccg && dc->res_pool->dccg->funcs->dccg_init)
dc->res_pool->dccg->funcs->dccg_init(res_pool->dccg);
@@ -1424,7 +1495,7 @@ void dcn10_init_hw(struct dc *dc)
for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
if (fe == dc->res_pool->stream_enc[j]->id) {
- dc->res_pool->stream_enc[j]->funcs->dp_blank(
+ dc->res_pool->stream_enc[j]->funcs->dp_blank(dc->links[i],
dc->res_pool->stream_enc[j]);
break;
}
@@ -1522,7 +1593,7 @@ void dcn10_power_down_on_boot(struct dc *dc)
for (i = 0; i < dc->link_count; i++) {
struct dc_link *link = dc->links[i];
- if (link->link_enc->funcs->is_dig_enabled &&
+ if (link->link_enc && link->link_enc->funcs->is_dig_enabled &&
link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
dc->hwss.power_down) {
dc->hwss.power_down(dc);
@@ -2274,8 +2345,8 @@ static void mmhub_read_vm_context0_settings(struct dcn10_hubp *hubp1,
void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp)
{
struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
- struct vm_system_aperture_param apt = { {{ 0 } } };
- struct vm_context0_param vm0 = { { { 0 } } };
+ struct vm_system_aperture_param apt = {0};
+ struct vm_context0_param vm0 = {0};
mmhub_read_vm_system_aperture_settings(hubp1, &apt, hws);
mmhub_read_vm_context0_settings(hubp1, &vm0, hws);
@@ -2448,7 +2519,7 @@ void dcn10_update_visual_confirm_color(struct dc *dc, struct pipe_ctx *pipe_ctx,
void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
struct hubp *hubp = pipe_ctx->plane_res.hubp;
- struct mpcc_blnd_cfg blnd_cfg = {{0}};
+ struct mpcc_blnd_cfg blnd_cfg = {0};
bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
int mpcc_id;
struct mpcc *new_mpcc;
@@ -3176,13 +3247,11 @@ void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
{
- struct pipe_ctx *test_pipe;
+ struct pipe_ctx *test_pipe, *split_pipe;
const struct scaler_data *scl_data = &pipe_ctx->plane_res.scl_data;
- const struct rect *r1 = &scl_data->recout, *r2;
- int r1_r = r1->x + r1->width, r1_b = r1->y + r1->height, r2_r, r2_b;
+ struct rect r1 = scl_data->recout, r2, r2_half;
+ int r1_r = r1.x + r1.width, r1_b = r1.y + r1.height, r2_r, r2_b;
int cur_layer = pipe_ctx->plane_state->layer_index;
- bool upper_pipe_exists = false;
- struct fixed31_32 one = dc_fixpt_from_int(1);
/**
* Disable the cursor if there's another pipe above this with a
@@ -3191,26 +3260,33 @@ static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
*/
for (test_pipe = pipe_ctx->top_pipe; test_pipe;
test_pipe = test_pipe->top_pipe) {
- if (!test_pipe->plane_state->visible)
+ // Skip invisible layer and pipe-split plane on same layer
+ if (!test_pipe->plane_state->visible || test_pipe->plane_state->layer_index == cur_layer)
continue;
- r2 = &test_pipe->plane_res.scl_data.recout;
- r2_r = r2->x + r2->width;
- r2_b = r2->y + r2->height;
+ r2 = test_pipe->plane_res.scl_data.recout;
+ r2_r = r2.x + r2.width;
+ r2_b = r2.y + r2.height;
+ split_pipe = test_pipe;
- if (r1->x >= r2->x && r1->y >= r2->y && r1_r <= r2_r && r1_b <= r2_b)
- return true;
+ /**
+ * There is another half plane on same layer because of
+ * pipe-split, merge together per same height.
+ */
+ for (split_pipe = pipe_ctx->top_pipe; split_pipe;
+ split_pipe = split_pipe->top_pipe)
+ if (split_pipe->plane_state->layer_index == test_pipe->plane_state->layer_index) {
+ r2_half = split_pipe->plane_res.scl_data.recout;
+ r2.x = (r2_half.x < r2.x) ? r2_half.x : r2.x;
+ r2.width = r2.width + r2_half.width;
+ r2_r = r2.x + r2.width;
+ break;
+ }
- if (test_pipe->plane_state->layer_index < cur_layer)
- upper_pipe_exists = true;
+ if (r1.x >= r2.x && r1.y >= r2.y && r1_r <= r2_r && r1_b <= r2_b)
+ return true;
}
- // if plane scaled, assume an upper plane can handle cursor if it exists.
- if (upper_pipe_exists &&
- (scl_data->ratios.horz.value != one.value ||
- scl_data->ratios.vert.value != one.value))
- return true;
-
return false;
}
@@ -3600,7 +3676,7 @@ void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx)
void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx,
struct dc_link_settings *link_settings)
{
- struct encoder_unblank_param params = { { 0 } };
+ struct encoder_unblank_param params = {0};
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->link;
struct dce_hwseq *hws = link->dc->hwseq;
@@ -3613,7 +3689,7 @@ void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx,
if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
if (params.timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
params.timing.pix_clk_100hz /= 2;
- pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(pipe_ctx->stream_res.stream_enc, &params);
+ pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(link, pipe_ctx->stream_res.stream_enc, &params);
}
if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.h
index f0e0d07b0311..e2508d637e0a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_ipp.h
@@ -60,14 +60,18 @@
SRI(CURSOR_HOT_SPOT, CURSOR0_, id), \
SRI(CURSOR_DST_OFFSET, CURSOR0_, id)
+#define IPP_REG_LIST_DCN201(id) \
+ IPP_REG_LIST_DCN(id), \
+ SRI(CURSOR_SURFACE_ADDRESS_HIGH, CURSOR0_, id), \
+ SRI(CURSOR_SURFACE_ADDRESS, CURSOR0_, id), \
+ SRI(CURSOR_SIZE, CURSOR0_, id), \
+ SRI(CURSOR_CONTROL, CURSOR0_, id), \
+ SRI(CURSOR_POSITION, CURSOR0_, id), \
+ SRI(CURSOR_HOT_SPOT, CURSOR0_, id), \
+ SRI(CURSOR_DST_OFFSET, CURSOR0_, id)
+
#define CURSOR0_CURSOR_CONTROL__CURSOR_2X_MAGNIFY__SHIFT 0x4
#define CURSOR0_CURSOR_CONTROL__CURSOR_2X_MAGNIFY_MASK 0x00000010L
-#define CURSOR1_CURSOR_CONTROL__CURSOR_2X_MAGNIFY__SHIFT 0x4
-#define CURSOR1_CURSOR_CONTROL__CURSOR_2X_MAGNIFY_MASK 0x00000010L
-#define CURSOR2_CURSOR_CONTROL__CURSOR_2X_MAGNIFY__SHIFT 0x4
-#define CURSOR2_CURSOR_CONTROL__CURSOR_2X_MAGNIFY_MASK 0x00000010L
-#define CURSOR3_CURSOR_CONTROL__CURSOR_2X_MAGNIFY__SHIFT 0x4
-#define CURSOR3_CURSOR_CONTROL__CURSOR_2X_MAGNIFY_MASK 0x00000010L
#define IPP_SF(reg_name, field_name, post_fix)\
.field_name = reg_name ## __ ## field_name ## post_fix
@@ -122,6 +126,23 @@
IPP_SF(CURSOR0_0_CURSOR_HOT_SPOT, CURSOR_HOT_SPOT_Y, mask_sh), \
IPP_SF(CURSOR0_0_CURSOR_DST_OFFSET, CURSOR_DST_X_OFFSET, mask_sh)
+#define IPP_MASK_SH_LIST_DCN201(mask_sh) \
+ IPP_MASK_SH_LIST_DCN(mask_sh), \
+ IPP_SF(CURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH, CURSOR_SURFACE_ADDRESS_HIGH, mask_sh), \
+ IPP_SF(CURSOR0_0_CURSOR_SURFACE_ADDRESS, CURSOR_SURFACE_ADDRESS, mask_sh), \
+ IPP_SF(CURSOR0_0_CURSOR_SIZE, CURSOR_WIDTH, mask_sh), \
+ IPP_SF(CURSOR0_0_CURSOR_SIZE, CURSOR_HEIGHT, mask_sh), \
+ IPP_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_MODE, mask_sh), \
+ IPP_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_2X_MAGNIFY, mask_sh), \
+ IPP_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_PITCH, mask_sh), \
+ IPP_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_LINES_PER_CHUNK, mask_sh), \
+ IPP_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_ENABLE, mask_sh), \
+ IPP_SF(CURSOR0_0_CURSOR_POSITION, CURSOR_X_POSITION, mask_sh), \
+ IPP_SF(CURSOR0_0_CURSOR_POSITION, CURSOR_Y_POSITION, mask_sh), \
+ IPP_SF(CURSOR0_0_CURSOR_HOT_SPOT, CURSOR_HOT_SPOT_X, mask_sh), \
+ IPP_SF(CURSOR0_0_CURSOR_HOT_SPOT, CURSOR_HOT_SPOT_Y, mask_sh), \
+ IPP_SF(CURSOR0_0_CURSOR_DST_OFFSET, CURSOR_DST_X_OFFSET, mask_sh)
+
#define IPP_DCN10_REG_FIELD_LIST(type) \
type CNVC_SURFACE_PIXEL_FORMAT; \
type CNVC_BYPASS; \
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
index e4701825b5a0..2dc4b4e4ba02 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
@@ -1460,5 +1460,14 @@ void dcn10_link_encoder_get_max_link_cap(struct link_encoder *enc,
if (enc->features.flags.bits.IS_HBR3_CAPABLE)
max_link_cap.link_rate = LINK_RATE_HIGH3;
+ if (enc->features.flags.bits.IS_UHBR10_CAPABLE)
+ max_link_cap.link_rate = LINK_RATE_UHBR10;
+
+ if (enc->features.flags.bits.IS_UHBR13_5_CAPABLE)
+ max_link_cap.link_rate = LINK_RATE_UHBR13_5;
+
+ if (enc->features.flags.bits.IS_UHBR20_CAPABLE)
+ max_link_cap.link_rate = LINK_RATE_UHBR20;
+
*link_settings = max_link_cap;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index 37848f4577b1..3d2a2848857a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -304,7 +304,7 @@ void optc1_program_timing(
if (optc1_is_two_pixels_per_containter(&patched_crtc_timing) || optc1->opp_count == 2)
h_div = H_TIMING_DIV_BY2;
- if (REG(OPTC_DATA_FORMAT_CONTROL)) {
+ if (REG(OPTC_DATA_FORMAT_CONTROL) && optc1->tg_mask->OPTC_DATA_FORMAT != 0) {
uint32_t data_fmt = 0;
if (patched_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR422)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index 7daadb6a5233..f37551e00023 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -1296,7 +1296,7 @@ struct stream_encoder *dcn10_find_first_free_match_stream_enc_for_link(
* in daisy chain use case
*/
j = i;
- if (pool->stream_enc[i]->id ==
+ if (link->ep_type == DISPLAY_ENDPOINT_PHY && pool->stream_enc[i]->id ==
link->link_enc->preferred_engine)
return pool->stream_enc[i];
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
index cf364ae93138..b0c08ee6bc2c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
@@ -29,6 +29,9 @@
#include "dcn10_stream_encoder.h"
#include "reg_helper.h"
#include "hw_shared.h"
+#include "inc/link_dpcd.h"
+#include "dpcd_defs.h"
+#include "dcn30/dcn30_afmt.h"
#define DC_LOGGER \
enc1->base.ctx->logger
@@ -644,6 +647,12 @@ void enc1_stream_encoder_set_throttled_vcp_size(
x),
26));
+ // If y rounds up to integer, carry it over to x.
+ if (y >> 26) {
+ x += 1;
+ y = 0;
+ }
+
REG_SET_2(DP_MSE_RATE_CNTL, 0,
DP_MSE_RATE_X, x,
DP_MSE_RATE_Y, y);
@@ -726,6 +735,16 @@ void enc1_stream_encoder_update_dp_info_packets(
0, /* packetIndex */
&info_frame->vsc);
+ /* VSC SDP at packetIndex 1 is used by PSR in DMCUB FW.
+ * Note that the enablement of GSP1 is not done below,
+ * it's done in FW.
+ */
+ if (info_frame->vsc.valid)
+ enc1_update_generic_info_packet(
+ enc1,
+ 1, /* packetIndex */
+ &info_frame->vsc);
+
if (info_frame->spd.valid)
enc1_update_generic_info_packet(
enc1,
@@ -884,6 +903,7 @@ void enc1_stream_encoder_stop_dp_info_packets(
}
void enc1_stream_encoder_dp_blank(
+ struct dc_link *link,
struct stream_encoder *enc)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
@@ -914,6 +934,8 @@ void enc1_stream_encoder_dp_blank(
/* disable DP stream */
REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, 0);
+ dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISABLE_DP_VID_STREAM);
+
/* the encoder stops sending the video stream
* at the start of the vertical blanking.
* Poll for DP_VID_STREAM_STATUS == 0
@@ -930,10 +952,13 @@ void enc1_stream_encoder_dp_blank(
*/
REG_UPDATE(DP_STEER_FIFO, DP_STEER_FIFO_RESET, true);
+
+ dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_FIFO_STEER_RESET);
}
/* output video stream to link encoder */
void enc1_stream_encoder_dp_unblank(
+ struct dc_link *link,
struct stream_encoder *enc,
const struct encoder_unblank_param *param)
{
@@ -1000,6 +1025,8 @@ void enc1_stream_encoder_dp_unblank(
*/
REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, true);
+
+ dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM);
}
void enc1_stream_encoder_set_avmute(
@@ -1444,6 +1471,10 @@ void enc1_se_hdmi_audio_setup(
void enc1_se_hdmi_audio_disable(
struct stream_encoder *enc)
{
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (enc->afmt && enc->afmt->funcs->afmt_powerdown)
+ enc->afmt->funcs->afmt_powerdown(enc->afmt);
+#endif
enc1_se_enable_audio_clock(enc, false);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
index 0d86df97878c..687d7e4bf7ca 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
@@ -627,9 +627,11 @@ void enc1_stream_encoder_stop_dp_info_packets(
struct stream_encoder *enc);
void enc1_stream_encoder_dp_blank(
+ struct dc_link *link,
struct stream_encoder *enc);
void enc1_stream_encoder_dp_unblank(
+ struct dc_link *link,
struct stream_encoder *enc,
const struct encoder_unblank_param *param);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h
index ede65100a050..f98aba308028 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h
@@ -169,7 +169,29 @@
type DTBCLK_DTO_DIV[MAX_PIPES];\
type DCCG_AUDIO_DTO_SEL;\
type DCCG_AUDIO_DTO0_SOURCE_SEL;\
- type DENTIST_DISPCLK_CHG_MODE;
+ type DENTIST_DISPCLK_CHG_MODE;\
+ type DSCCLK0_DTO_PHASE;\
+ type DSCCLK0_DTO_MODULO;\
+ type DSCCLK1_DTO_PHASE;\
+ type DSCCLK1_DTO_MODULO;\
+ type DSCCLK2_DTO_PHASE;\
+ type DSCCLK2_DTO_MODULO;\
+ type DSCCLK0_DTO_ENABLE;\
+ type DSCCLK1_DTO_ENABLE;\
+ type DSCCLK2_DTO_ENABLE;\
+ type SYMCLK32_ROOT_SE0_GATE_DISABLE;\
+ type SYMCLK32_ROOT_SE1_GATE_DISABLE;\
+ type SYMCLK32_ROOT_SE2_GATE_DISABLE;\
+ type SYMCLK32_ROOT_SE3_GATE_DISABLE;\
+ type SYMCLK32_ROOT_LE0_GATE_DISABLE;\
+ type SYMCLK32_ROOT_LE1_GATE_DISABLE;\
+ type DPSTREAMCLK_ROOT_GATE_DISABLE;\
+ type DPSTREAMCLK_GATE_DISABLE;\
+ type HDMISTREAMCLK0_DTO_PHASE;\
+ type HDMISTREAMCLK0_DTO_MODULO;\
+ type HDMICHARCLK0_GATE_DISABLE;\
+ type HDMICHARCLK0_ROOT_GATE_DISABLE;
+
struct dccg_shift {
DCCG_REG_FIELD_LIST(uint8_t)
@@ -205,6 +227,16 @@ struct dccg_registers {
uint32_t SYMCLK32_SE_CNTL;
uint32_t SYMCLK32_LE_CNTL;
uint32_t DENTIST_DISPCLK_CNTL;
+ uint32_t DSCCLK_DTO_CTRL;
+ uint32_t DSCCLK0_DTO_PARAM;
+ uint32_t DSCCLK1_DTO_PARAM;
+ uint32_t DSCCLK2_DTO_PARAM;
+ uint32_t DPSTREAMCLK_ROOT_GATE_DISABLE;
+ uint32_t DPSTREAMCLK_GATE_DISABLE;
+ uint32_t DCCG_GATE_DISABLE_CNTL3;
+ uint32_t HDMISTREAMCLK0_DTO_PARAM;
+ uint32_t DCCG_GATE_DISABLE_CNTL4;
+
};
struct dcn_dccg {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index a47ba1d45be9..cfee456c6c9a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -52,6 +52,9 @@
#include "dc_dmub_srv.h"
#include "dce/dmub_hw_lock_mgr.h"
#include "hw_sequencer.h"
+#include "inc/link_dpcd.h"
+#include "dpcd_defs.h"
+#include "inc/link_enc_cfg.h"
#define DC_LOGGER_INIT(logger)
@@ -2120,7 +2123,7 @@ void dcn20_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
void dcn20_unblank_stream(struct pipe_ctx *pipe_ctx,
struct dc_link_settings *link_settings)
{
- struct encoder_unblank_param params = { { 0 } };
+ struct encoder_unblank_param params = {0};
struct dc_stream_state *stream = pipe_ctx->stream;
struct dc_link *link = stream->link;
struct dce_hwseq *hws = link->dc->hwseq;
@@ -2135,12 +2138,17 @@ void dcn20_unblank_stream(struct pipe_ctx *pipe_ctx,
params.link_settings.link_rate = link_settings->link_rate;
- if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
+ if (is_dp_128b_132b_signal(pipe_ctx)) {
+ /* TODO - DP2.0 HW: Set ODM mode in dp hpo encoder here */
+ pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_unblank(
+ pipe_ctx->stream_res.hpo_dp_stream_enc,
+ pipe_ctx->stream_res.tg->inst);
+ } else if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
if (optc2_is_two_pixels_per_containter(&stream->timing) || params.opp_cnt > 1)
params.timing.pix_clk_100hz /= 2;
pipe_ctx->stream_res.stream_enc->funcs->dp_set_odm_combine(
pipe_ctx->stream_res.stream_enc, params.opp_cnt > 1);
- pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(pipe_ctx->stream_res.stream_enc, &params);
+ pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(link, pipe_ctx->stream_res.stream_enc, &params);
}
if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
@@ -2290,7 +2298,7 @@ void dcn20_update_visual_confirm_color(struct dc *dc, struct pipe_ctx *pipe_ctx,
void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
struct hubp *hubp = pipe_ctx->plane_res.hubp;
- struct mpcc_blnd_cfg blnd_cfg = { {0} };
+ struct mpcc_blnd_cfg blnd_cfg = {0};
bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha;
int mpcc_id;
struct mpcc *new_mpcc;
@@ -2374,14 +2382,36 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
uint32_t active_total_with_borders;
uint32_t early_control = 0;
struct timing_generator *tg = pipe_ctx->stream_res.tg;
+ struct link_encoder *link_enc;
+
+ if (link->is_dig_mapping_flexible &&
+ link->dc->res_pool->funcs->link_encs_assign)
+ link_enc = link_enc_cfg_get_link_enc_used_by_stream(link->ctx->dc, pipe_ctx->stream);
+ else
+ link_enc = link->link_enc;
+ ASSERT(link_enc);
/* For MST, there are multiply stream go to only one link.
* connect DIG back_end to front_end while enable_stream and
* disconnect them during disable_stream
* BY this, it is logic clean to separate stream and link
*/
- link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc,
- pipe_ctx->stream_res.stream_enc->id, true);
+ if (is_dp_128b_132b_signal(pipe_ctx)) {
+ setup_dp_hpo_stream(pipe_ctx, true);
+ pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->enable_stream(
+ pipe_ctx->stream_res.hpo_dp_stream_enc);
+ pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->map_stream_to_link(
+ pipe_ctx->stream_res.hpo_dp_stream_enc,
+ pipe_ctx->stream_res.hpo_dp_stream_enc->inst,
+ link->hpo_dp_link_enc->inst);
+ }
+
+ if (!is_dp_128b_132b_signal(pipe_ctx) && link_enc)
+ link_enc->funcs->connect_dig_be_to_fe(
+ link_enc, pipe_ctx->stream_res.stream_enc->id, true);
+
+ if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_BE);
if (pipe_ctx->plane_state && pipe_ctx->plane_state->flip_immediate != 1) {
if (link->dc->hwss.program_dmdata_engine)
@@ -2390,6 +2420,9 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
link->dc->hwss.update_info_frame(pipe_ctx);
+ if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME);
+
/* enable early control to avoid corruption on DP monitor*/
active_total_with_borders =
timing->h_addressable
@@ -2406,7 +2439,9 @@ void dcn20_enable_stream(struct pipe_ctx *pipe_ctx)
/* enable audio only within mode set */
if (pipe_ctx->stream_res.audio != NULL) {
- if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ if (is_dp_128b_132b_signal(pipe_ctx))
+ pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_audio_enable(pipe_ctx->stream_res.hpo_dp_stream_enc);
+ else if (dc_is_dp_signal(pipe_ctx->stream->signal))
pipe_ctx->stream_res.stream_enc->funcs->dp_audio_enable(pipe_ctx->stream_res.stream_enc);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
index f6e747f25ebe..c90b8516dcc1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
@@ -467,6 +467,11 @@ void optc2_lock_doublebuffer_enable(struct timing_generator *optc)
(h_blank_start - 200 - 1) / optc1->opp_count,
MASTER_UPDATE_LOCK_DB_Y,
v_blank_start - 1);
+
+ REG_SET_3(OTG_VUPDATE_KEEPOUT, 0,
+ MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET, 0,
+ MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET, 100,
+ OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, 1);
}
void optc2_lock_doublebuffer_disable(struct timing_generator *optc)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index e3e01b17c164..3883f918b3bb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -35,7 +35,7 @@
#include "include/irq_service_interface.h"
#include "dcn20/dcn20_resource.h"
-#include "dml/dcn2x/dcn2x.h"
+#include "dml/dcn20/dcn20_fpu.h"
#include "dcn10/dcn10_hubp.h"
#include "dcn10/dcn10_ipp.h"
@@ -63,6 +63,7 @@
#include "dcn20_dccg.h"
#include "dcn20_vmid.h"
#include "dc_link_ddc.h"
+#include "dc_link_dp.h"
#include "dce/dce_panel_cntl.h"
#include "navi10_ip_offset.h"
@@ -86,6 +87,7 @@
#include "dce/dce_aux.h"
#include "dce/dce_i2c.h"
#include "vm_helper.h"
+#include "link_enc_cfg.h"
#include "amdgpu_socbb.h"
@@ -1595,15 +1597,32 @@ static void get_pixel_clock_parameters(
const struct dc_stream_state *stream = pipe_ctx->stream;
struct pipe_ctx *odm_pipe;
int opp_cnt = 1;
+ struct dc_link *link = stream->link;
+ struct link_encoder *link_enc = NULL;
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
opp_cnt++;
pixel_clk_params->requested_pix_clk_100hz = stream->timing.pix_clk_100hz;
- pixel_clk_params->encoder_object_id = stream->link->link_enc->id;
+
+ /* Links supporting dynamically assigned link encoder will be assigned next
+ * available encoder if one not already assigned.
+ */
+ if (link->is_dig_mapping_flexible &&
+ link->dc->res_pool->funcs->link_encs_assign) {
+ link_enc = link_enc_cfg_get_link_enc_used_by_stream(stream->ctx->dc, stream);
+ if (link_enc == NULL)
+ link_enc = link_enc_cfg_get_next_avail_link_enc(stream->ctx->dc);
+ } else
+ link_enc = stream->link->link_enc;
+ ASSERT(link_enc);
+
+ if (link_enc)
+ pixel_clk_params->encoder_object_id = link_enc->id;
pixel_clk_params->signal_type = pipe_ctx->stream->signal;
pixel_clk_params->controller_id = pipe_ctx->stream_res.tg->inst + 1;
/* TODO: un-hardcode*/
+ /* TODO - DP2.0 HW: calculate requested_sym_clk for UHBR rates */
pixel_clk_params->requested_sym_clk = LINK_RATE_LOW *
LINK_RATE_REF_FREQ_IN_KHZ;
pixel_clk_params->flags.ENABLE_SS = 0;
@@ -1854,7 +1873,9 @@ static void swizzle_to_dml_params(
case DC_SW_VAR_D_X:
*sw_mode = dm_sw_var_d_x;
break;
-
+ case DC_SW_VAR_R_X:
+ *sw_mode = dm_sw_var_r_x;
+ break;
default:
ASSERT(0); /* Not supported */
break;
@@ -3044,6 +3065,8 @@ static bool is_dtbclk_required(struct dc *dc, struct dc_state *context)
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (!context->res_ctx.pipe_ctx[i].stream)
continue;
+ if (is_dp_128b_132b_signal(&context->res_ctx.pipe_ctx[i]))
+ return true;
}
return false;
}
@@ -3094,6 +3117,10 @@ void dcn20_calculate_dlg_params(
context->bw_ctx.bw.dcn.clk.dcfclk_khz = context->bw_ctx.dml.vba.DCFCLK * 1000;
context->bw_ctx.bw.dcn.clk.socclk_khz = context->bw_ctx.dml.vba.SOCCLK * 1000;
context->bw_ctx.bw.dcn.clk.dramclk_khz = context->bw_ctx.dml.vba.DRAMSpeed * 1000 / 16;
+
+ if (dc->debug.min_dram_clk_khz > context->bw_ctx.bw.dcn.clk.dramclk_khz)
+ context->bw_ctx.bw.dcn.clk.dramclk_khz = dc->debug.min_dram_clk_khz;
+
context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = context->bw_ctx.dml.vba.DCFCLKDeepSleep * 1000;
context->bw_ctx.bw.dcn.clk.fclk_khz = context->bw_ctx.dml.vba.FabricClock * 1000;
context->bw_ctx.bw.dcn.clk.p_state_change_support =
@@ -3140,6 +3167,9 @@ void dcn20_calculate_dlg_params(
if (!context->res_ctx.pipe_ctx[i].stream)
continue;
+ if (dc->ctx->dce_version == DCN_VERSION_2_01)
+ cstate_en = false;
+
context->bw_ctx.dml.funcs.rq_dlg_get_dlg_reg(&context->bw_ctx.dml,
&context->res_ctx.pipe_ctx[i].dlg_regs,
&context->res_ctx.pipe_ctx[i].ttu_regs,
@@ -3152,7 +3182,7 @@ void dcn20_calculate_dlg_params(
context->bw_ctx.dml.funcs.rq_dlg_get_rq_reg(&context->bw_ctx.dml,
&context->res_ctx.pipe_ctx[i].rq_regs,
- pipes[pipe_idx].pipe);
+ &pipes[pipe_idx].pipe);
pipe_idx++;
}
}
@@ -3630,9 +3660,6 @@ static enum dml_project get_dml_project_version(uint32_t hw_internal_rev)
return DML_PROJECT_NAVI10v2;
}
-#define fixed16_to_double(x) (((double) x) / ((double) (1 << 16)))
-#define fixed16_to_double_to_cpu(x) fixed16_to_double(le32_to_cpu(x))
-
static bool init_soc_bounding_box(struct dc *dc,
struct dcn20_resource_pool *pool)
{
@@ -3668,16 +3695,22 @@ static bool init_soc_bounding_box(struct dc *dc,
clock_limits_available = (status == PP_SMU_RESULT_OK);
}
- if (clock_limits_available && uclk_states_available && num_states)
+ if (clock_limits_available && uclk_states_available && num_states) {
+ DC_FP_START();
dcn20_update_bounding_box(dc, loaded_bb, &max_clocks, uclk_states, num_states);
- else if (clock_limits_available)
+ DC_FP_END();
+ } else if (clock_limits_available) {
+ DC_FP_START();
dcn20_cap_soc_clocks(loaded_bb, max_clocks);
+ DC_FP_END();
+ }
}
loaded_ip->max_num_otg = pool->base.res_cap->num_timing_generator;
loaded_ip->max_num_dpp = pool->base.pipe_count;
+ DC_FP_START();
dcn20_patch_bounding_box(dc, loaded_bb);
-
+ DC_FP_END();
return true;
}
@@ -3697,8 +3730,6 @@ static bool dcn20_resource_construct(
enum dml_project dml_project_version =
get_dml_project_version(ctx->asic_id.hw_internal_rev);
- DC_FP_START();
-
ctx->dc_bios->regs = &bios_regs;
pool->base.funcs = &dcn20_res_pool_funcs;
@@ -4047,12 +4078,10 @@ static bool dcn20_resource_construct(
pool->base.oem_device = NULL;
}
- DC_FP_END();
return true;
create_fail:
- DC_FP_END();
dcn20_resource_destruct(pool);
return false;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
index e6307397e0d2..aab25ca8343a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
@@ -29,6 +29,8 @@
#include "dcn20_stream_encoder.h"
#include "reg_helper.h"
#include "hw_shared.h"
+#include "inc/link_dpcd.h"
+#include "dpcd_defs.h"
#define DC_LOGGER \
enc1->base.ctx->logger
@@ -209,7 +211,8 @@ static void enc2_stream_encoder_stop_hdmi_info_packets(
/* Update GSP7 SDP 128 byte long */
static void enc2_update_gsp7_128_info_packet(
struct dcn10_stream_encoder *enc1,
- const struct dc_info_packet_128 *info_packet)
+ const struct dc_info_packet_128 *info_packet,
+ bool immediate_update)
{
uint32_t i;
@@ -264,7 +267,9 @@ static void enc2_update_gsp7_128_info_packet(
REG_WRITE(AFMT_GENERIC_7, *content++);
}
- REG_UPDATE(AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC7_FRAME_UPDATE, 1);
+ REG_UPDATE_2(AFMT_VBI_PACKET_CONTROL1,
+ AFMT_GENERIC7_FRAME_UPDATE, !immediate_update,
+ AFMT_GENERIC7_IMMEDIATE_UPDATE, immediate_update);
}
/* Set DSC-related configuration.
@@ -290,7 +295,8 @@ static void enc2_dp_set_dsc_config(struct stream_encoder *enc,
static void enc2_dp_set_dsc_pps_info_packet(struct stream_encoder *enc,
bool enable,
- uint8_t *dsc_packed_pps)
+ uint8_t *dsc_packed_pps,
+ bool immediate_update)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
@@ -306,7 +312,7 @@ static void enc2_dp_set_dsc_pps_info_packet(struct stream_encoder *enc,
pps_sdp.hb2 = 127;
pps_sdp.hb3 = 0;
memcpy(&pps_sdp.sb[0], dsc_packed_pps, sizeof(pps_sdp.sb));
- enc2_update_gsp7_128_info_packet(enc1, &pps_sdp);
+ enc2_update_gsp7_128_info_packet(enc1, &pps_sdp, immediate_update);
/* Enable Generic Stream Packet 7 (GSP) transmission */
//REG_UPDATE(DP_SEC_CNTL,
@@ -444,6 +450,7 @@ static bool is_two_pixels_per_containter(const struct dc_crtc_timing *timing)
}
void enc2_stream_encoder_dp_unblank(
+ struct dc_link *link,
struct stream_encoder *enc,
const struct encoder_unblank_param *param)
{
@@ -522,6 +529,8 @@ void enc2_stream_encoder_dp_unblank(
*/
REG_UPDATE(DP_VID_STREAM_CNTL, DP_VID_STREAM_ENABLE, true);
+
+ dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM);
}
static void enc2_dp_set_odm_combine(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h
index f3d1a0237bda..baa1e539f341 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.h
@@ -104,6 +104,7 @@ void enc2_stream_encoder_dp_set_stream_attribute(
uint32_t enable_sdp_splitting);
void enc2_stream_encoder_dp_unblank(
+ struct dc_link *link,
struct stream_encoder *enc,
const struct encoder_unblank_param *param);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/Makefile b/drivers/gpu/drm/amd/display/dc/dcn201/Makefile
new file mode 100644
index 000000000000..f68038ceb1b1
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/Makefile
@@ -0,0 +1,36 @@
+# SPDX-License-Identifier: MIT
+#
+# Makefile for DCN.
+DCN201 = dcn201_init.o dcn201_resource.o dcn201_hwseq.o \
+ dcn201_hubbub.o\
+ dcn201_mpc.o dcn201_hubp.o dcn201_opp.o dcn201_optc.o dcn201_dpp.o \
+ dcn201_dccg.o dcn201_link_encoder.o
+
+ifdef CONFIG_X86
+CFLAGS_$(AMDDALPATH)/dc/dcn201/dcn201_resource.o := -mhard-float -msse
+endif
+
+ifdef CONFIG_PPC64
+CFLAGS_$(AMDDALPATH)/dc/dcn201/dcn201_resource.o := -mhard-float -maltivec
+endif
+
+ifdef CONFIG_CC_IS_GCC
+ifeq ($(call cc-ifversion, -lt, 0701, y), y)
+IS_OLD_GCC = 1
+endif
+CFLAGS_$(AMDDALPATH)/dc/dcn201/dcn201_resource.o += -mhard-float
+endif
+
+ifdef CONFIG_X86
+ifdef IS_OLD_GCC
+# Stack alignment mismatch, proceed with caution.
+# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
+# (8B stack alignment).
+CFLAGS_$(AMDDALPATH)/dc/dcn201/dcn201_resource.o += -mpreferred-stack-boundary=4
+else
+CFLAGS_$(AMDDALPATH)/dc/dcn201/dcn201_resource.o += -msse2
+endif
+endif
+AMD_DAL_DCN201 = $(addprefix $(AMDDALPATH)/dc/dcn201/,$(DCN201))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_DCN201)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dccg.c
new file mode 100644
index 000000000000..f5bf04f7da25
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dccg.c
@@ -0,0 +1,84 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dcn201_dccg.h"
+
+#include "reg_helper.h"
+#include "core_types.h"
+
+#define TO_DCN_DCCG(dccg)\
+ container_of(dccg, struct dcn_dccg, base)
+
+#define REG(reg) \
+ (dccg_dcn->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ dccg_dcn->dccg_shift->field_name, dccg_dcn->dccg_mask->field_name
+
+#define CTX \
+ dccg_dcn->base.ctx
+
+#define DC_LOGGER \
+ dccg->ctx->logger
+
+void dccg201_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk)
+{
+ /* vbios handles it */
+}
+
+static const struct dccg_funcs dccg201_funcs = {
+ .update_dpp_dto = dccg201_update_dpp_dto,
+ .get_dccg_ref_freq = dccg2_get_dccg_ref_freq,
+ .set_fifo_errdet_ovr_en = dccg2_set_fifo_errdet_ovr_en,
+ .otg_add_pixel = dccg2_otg_add_pixel,
+ .otg_drop_pixel = dccg2_otg_drop_pixel,
+ .dccg_init = dccg2_init
+};
+
+struct dccg *dccg201_create(
+ struct dc_context *ctx,
+ const struct dccg_registers *regs,
+ const struct dccg_shift *dccg_shift,
+ const struct dccg_mask *dccg_mask)
+{
+ struct dcn_dccg *dccg_dcn = kzalloc(sizeof(*dccg_dcn), GFP_KERNEL);
+ struct dccg *base;
+
+ if (dccg_dcn == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ base = &dccg_dcn->base;
+ base->ctx = ctx;
+ base->funcs = &dccg201_funcs;
+
+ dccg_dcn->regs = regs;
+ dccg_dcn->dccg_shift = dccg_shift;
+ dccg_dcn->dccg_mask = dccg_mask;
+
+ return &dccg_dcn->base;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dccg.h
new file mode 100644
index 000000000000..80888b0484fb
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dccg.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DCN201_DCCG_H__
+#define __DCN201_DCCG_H__
+
+#include "dcn20/dcn20_dccg.h"
+
+struct dccg *dccg201_create(
+ struct dc_context *ctx,
+ const struct dccg_registers *regs,
+ const struct dccg_shift *dccg_shift,
+ const struct dccg_mask *dccg_mask);
+
+#endif //__DCN201_DCCG_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c
new file mode 100644
index 000000000000..8b6505b7dca8
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c
@@ -0,0 +1,316 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "core_types.h"
+
+#include "reg_helper.h"
+#include "dcn201_dpp.h"
+#include "basics/conversion.h"
+
+#define REG(reg)\
+ dpp->tf_regs->reg
+
+#define CTX \
+ dpp->base.ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+ dpp->tf_shift->field_name, dpp->tf_mask->field_name
+
+static void dpp201_cnv_setup(
+ struct dpp *dpp_base,
+ enum surface_pixel_format format,
+ enum expansion_mode mode,
+ struct dc_csc_transform input_csc_color_matrix,
+ enum dc_color_space input_color_space,
+ struct cnv_alpha_2bit_lut *alpha_2bit_lut)
+{
+ struct dcn201_dpp *dpp = TO_DCN201_DPP(dpp_base);
+ uint32_t pixel_format = 0;
+ uint32_t alpha_en = 1;
+ enum dc_color_space color_space = COLOR_SPACE_SRGB;
+ enum dcn10_input_csc_select select = INPUT_CSC_SELECT_BYPASS;
+ bool force_disable_cursor = false;
+ uint32_t is_2bit = 0;
+
+ REG_SET_2(FORMAT_CONTROL, 0,
+ CNVC_BYPASS, 0,
+ FORMAT_EXPANSION_MODE, mode);
+
+ REG_UPDATE(FORMAT_CONTROL, FORMAT_CNV16, 0);
+ REG_UPDATE(FORMAT_CONTROL, CNVC_BYPASS_MSB_ALIGN, 0);
+ REG_UPDATE(FORMAT_CONTROL, CLAMP_POSITIVE, 0);
+ REG_UPDATE(FORMAT_CONTROL, CLAMP_POSITIVE_C, 0);
+
+ switch (format) {
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555:
+ pixel_format = 1;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_RGB565:
+ pixel_format = 3;
+ alpha_en = 0;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888:
+ pixel_format = 8;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010:
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010:
+ pixel_format = 10;
+ is_2bit = 1;
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr:
+ force_disable_cursor = false;
+ pixel_format = 65;
+ color_space = COLOR_SPACE_YCBCR709;
+ select = INPUT_CSC_SELECT_ICSC;
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb:
+ force_disable_cursor = true;
+ pixel_format = 64;
+ color_space = COLOR_SPACE_YCBCR709;
+ select = INPUT_CSC_SELECT_ICSC;
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr:
+ force_disable_cursor = true;
+ pixel_format = 67;
+ color_space = COLOR_SPACE_YCBCR709;
+ select = INPUT_CSC_SELECT_ICSC;
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb:
+ force_disable_cursor = true;
+ pixel_format = 66;
+ color_space = COLOR_SPACE_YCBCR709;
+ select = INPUT_CSC_SELECT_ICSC;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
+ pixel_format = 22;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F:
+ pixel_format = 24;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
+ pixel_format = 25;
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888:
+ pixel_format = 12;
+ color_space = COLOR_SPACE_YCBCR709;
+ select = INPUT_CSC_SELECT_ICSC;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FIX:
+ pixel_format = 112;
+ alpha_en = 0;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FIX:
+ pixel_format = 113;
+ alpha_en = 0;
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_ACrYCb2101010:
+ pixel_format = 114;
+ color_space = COLOR_SPACE_YCBCR709;
+ select = INPUT_CSC_SELECT_ICSC;
+ is_2bit = 1;
+ break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_CrYCbA1010102:
+ pixel_format = 115;
+ color_space = COLOR_SPACE_YCBCR709;
+ select = INPUT_CSC_SELECT_ICSC;
+ is_2bit = 1;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_RGB111110_FLOAT:
+ pixel_format = 118;
+ alpha_en = 0;
+ break;
+ case SURFACE_PIXEL_FORMAT_GRPH_BGR101111_FLOAT:
+ pixel_format = 119;
+ alpha_en = 0;
+ break;
+ default:
+ break;
+ }
+
+ if (is_2bit == 1 && alpha_2bit_lut != NULL) {
+ REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT0, alpha_2bit_lut->lut0);
+ REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT1, alpha_2bit_lut->lut1);
+ REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT2, alpha_2bit_lut->lut2);
+ REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT3, alpha_2bit_lut->lut3);
+ }
+
+ REG_SET(CNVC_SURFACE_PIXEL_FORMAT, 0,
+ CNVC_SURFACE_PIXEL_FORMAT, pixel_format);
+ REG_UPDATE(FORMAT_CONTROL, FORMAT_CONTROL__ALPHA_EN, alpha_en);
+
+ dpp1_program_input_csc(dpp_base, color_space, select, NULL);
+
+ if (force_disable_cursor) {
+ REG_UPDATE(CURSOR_CONTROL,
+ CURSOR_ENABLE, 0);
+ REG_UPDATE(CURSOR0_CONTROL,
+ CUR0_ENABLE, 0);
+ }
+ dpp2_power_on_obuf(dpp_base, true);
+}
+
+#define IDENTITY_RATIO(ratio) (dc_fixpt_u3d19(ratio) == (1 << 19))
+
+static bool dpp201_get_optimal_number_of_taps(
+ struct dpp *dpp,
+ struct scaler_data *scl_data,
+ const struct scaling_taps *in_taps)
+{
+ uint32_t pixel_width;
+
+ if (scl_data->viewport.width > scl_data->recout.width)
+ pixel_width = scl_data->recout.width;
+ else
+ pixel_width = scl_data->viewport.width;
+
+ if (scl_data->viewport.width != scl_data->h_active &&
+ scl_data->viewport.height != scl_data->v_active &&
+ dpp->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT &&
+ scl_data->format == PIXEL_FORMAT_FP16)
+ return false;
+
+ if (scl_data->viewport.width > scl_data->h_active &&
+ dpp->ctx->dc->debug.max_downscale_src_width != 0 &&
+ scl_data->viewport.width > dpp->ctx->dc->debug.max_downscale_src_width)
+ return false;
+
+ if (scl_data->ratios.horz.value == (8ll << 32))
+ scl_data->ratios.horz.value--;
+ if (scl_data->ratios.vert.value == (8ll << 32))
+ scl_data->ratios.vert.value--;
+ if (scl_data->ratios.horz_c.value == (8ll << 32))
+ scl_data->ratios.horz_c.value--;
+ if (scl_data->ratios.vert_c.value == (8ll << 32))
+ scl_data->ratios.vert_c.value--;
+
+ if (in_taps->h_taps == 0) {
+ if (dc_fixpt_ceil(scl_data->ratios.horz) > 4)
+ scl_data->taps.h_taps = 8;
+ else
+ scl_data->taps.h_taps = 4;
+ } else
+ scl_data->taps.h_taps = in_taps->h_taps;
+
+ if (in_taps->v_taps == 0) {
+ if (dc_fixpt_ceil(scl_data->ratios.vert) > 4)
+ scl_data->taps.v_taps = 8;
+ else
+ scl_data->taps.v_taps = 4;
+ } else
+ scl_data->taps.v_taps = in_taps->v_taps;
+ if (in_taps->v_taps_c == 0) {
+ if (dc_fixpt_ceil(scl_data->ratios.vert_c) > 4)
+ scl_data->taps.v_taps_c = 4;
+ else
+ scl_data->taps.v_taps_c = 2;
+ } else
+ scl_data->taps.v_taps_c = in_taps->v_taps_c;
+ if (in_taps->h_taps_c == 0) {
+ if (dc_fixpt_ceil(scl_data->ratios.horz_c) > 4)
+ scl_data->taps.h_taps_c = 4;
+ else
+ scl_data->taps.h_taps_c = 2;
+ } else if ((in_taps->h_taps_c % 2) != 0 && in_taps->h_taps_c != 1)
+ scl_data->taps.h_taps_c = in_taps->h_taps_c - 1;
+ else
+ scl_data->taps.h_taps_c = in_taps->h_taps_c;
+
+ if (!dpp->ctx->dc->debug.always_scale) {
+ if (IDENTITY_RATIO(scl_data->ratios.horz))
+ scl_data->taps.h_taps = 1;
+ if (IDENTITY_RATIO(scl_data->ratios.vert))
+ scl_data->taps.v_taps = 1;
+ if (IDENTITY_RATIO(scl_data->ratios.horz_c))
+ scl_data->taps.h_taps_c = 1;
+ if (IDENTITY_RATIO(scl_data->ratios.vert_c))
+ scl_data->taps.v_taps_c = 1;
+ }
+
+ return true;
+}
+
+static struct dpp_funcs dcn201_dpp_funcs = {
+ .dpp_read_state = dpp20_read_state,
+ .dpp_reset = dpp_reset,
+ .dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale,
+ .dpp_get_optimal_number_of_taps = dpp201_get_optimal_number_of_taps,
+ .dpp_set_gamut_remap = dpp1_cm_set_gamut_remap,
+ .dpp_set_csc_adjustment = NULL,
+ .dpp_set_csc_default = NULL,
+ .dpp_program_regamma_pwl = oppn20_dummy_program_regamma_pwl,
+ .dpp_set_degamma = dpp2_set_degamma,
+ .dpp_program_input_lut = dpp2_dummy_program_input_lut,
+ .dpp_full_bypass = dpp1_full_bypass,
+ .dpp_setup = dpp201_cnv_setup,
+ .dpp_program_degamma_pwl = dpp2_set_degamma_pwl,
+ .dpp_program_blnd_lut = dpp20_program_blnd_lut,
+ .dpp_program_shaper_lut = dpp20_program_shaper,
+ .dpp_program_3dlut = dpp20_program_3dlut,
+ .dpp_program_bias_and_scale = NULL,
+ .dpp_cnv_set_alpha_keyer = dpp2_cnv_set_alpha_keyer,
+ .set_cursor_attributes = dpp2_set_cursor_attributes,
+ .set_cursor_position = dpp1_set_cursor_position,
+ .set_optional_cursor_attributes = dpp1_cnv_set_optional_cursor_attributes,
+ .dpp_dppclk_control = dpp1_dppclk_control,
+ .dpp_set_hdr_multiplier = dpp2_set_hdr_multiplier,
+};
+
+static struct dpp_caps dcn201_dpp_cap = {
+ .dscl_data_proc_format = DSCL_DATA_PRCESSING_FLOAT_FORMAT,
+ .dscl_calc_lb_num_partitions = dscl2_calc_lb_num_partitions,
+};
+
+bool dpp201_construct(
+ struct dcn201_dpp *dpp,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn201_dpp_registers *tf_regs,
+ const struct dcn201_dpp_shift *tf_shift,
+ const struct dcn201_dpp_mask *tf_mask)
+{
+ dpp->base.ctx = ctx;
+
+ dpp->base.inst = inst;
+ dpp->base.funcs = &dcn201_dpp_funcs;
+ dpp->base.caps = &dcn201_dpp_cap;
+
+ dpp->tf_regs = tf_regs;
+ dpp->tf_shift = tf_shift;
+ dpp->tf_mask = tf_mask;
+
+ dpp->lb_pixel_depth_supported =
+ LB_PIXEL_DEPTH_18BPP |
+ LB_PIXEL_DEPTH_24BPP |
+ LB_PIXEL_DEPTH_30BPP;
+
+ dpp->lb_bits_per_entry = LB_BITS_PER_ENTRY;
+ dpp->lb_memory_size = LB_TOTAL_NUMBER_OF_ENTRIES;
+
+ return true;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.h
new file mode 100644
index 000000000000..cbd5b47b4acf
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.h
@@ -0,0 +1,83 @@
+/* Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DCN201_DPP_H__
+#define __DCN201_DPP_H__
+
+#include "dcn20/dcn20_dpp.h"
+
+#define TO_DCN201_DPP(dpp)\
+ container_of(dpp, struct dcn201_dpp, base)
+
+#define TF_REG_LIST_DCN201(id) \
+ TF_REG_LIST_DCN20(id)
+
+#define TF_REG_LIST_SH_MASK_DCN201(mask_sh)\
+ TF_REG_LIST_SH_MASK_DCN20(mask_sh)
+
+#define TF_REG_FIELD_LIST_DCN201(type) \
+ TF_REG_FIELD_LIST_DCN2_0(type)
+
+struct dcn201_dpp_shift {
+ TF_REG_FIELD_LIST_DCN201(uint8_t);
+};
+
+struct dcn201_dpp_mask {
+ TF_REG_FIELD_LIST_DCN201(uint32_t);
+};
+
+#define DPP_DCN201_REG_VARIABLE_LIST \
+ DPP_DCN2_REG_VARIABLE_LIST
+
+struct dcn201_dpp_registers {
+ DPP_DCN201_REG_VARIABLE_LIST;
+};
+
+struct dcn201_dpp {
+ struct dpp base;
+
+ const struct dcn201_dpp_registers *tf_regs;
+ const struct dcn201_dpp_shift *tf_shift;
+ const struct dcn201_dpp_mask *tf_mask;
+
+ const uint16_t *filter_v;
+ const uint16_t *filter_h;
+ const uint16_t *filter_v_c;
+ const uint16_t *filter_h_c;
+ int lb_pixel_depth_supported;
+ int lb_memory_size;
+ int lb_bits_per_entry;
+ bool is_write_to_ram_a_safe;
+ struct scaler_data scl_data;
+ struct pwl_params pwl_data;
+};
+
+bool dpp201_construct(struct dcn201_dpp *dpp2,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn201_dpp_registers *tf_regs,
+ const struct dcn201_dpp_shift *tf_shift,
+ const struct dcn201_dpp_mask *tf_mask);
+
+#endif /* __DC_HWSS_DCN201_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubbub.c
new file mode 100644
index 000000000000..037d265431c6
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubbub.c
@@ -0,0 +1,107 @@
+/*
+* Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#include "dm_services.h"
+#include "dcn20/dcn20_hubbub.h"
+#include "dcn201_hubbub.h"
+#include "reg_helper.h"
+
+#define REG(reg)\
+ hubbub1->regs->reg
+
+#define DC_LOGGER \
+ hubbub1->base.ctx->logger
+
+#define CTX \
+ hubbub1->base.ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+ hubbub1->shifts->field_name, hubbub1->masks->field_name
+
+#define REG(reg)\
+ hubbub1->regs->reg
+
+#define CTX \
+ hubbub1->base.ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+ hubbub1->shifts->field_name, hubbub1->masks->field_name
+
+static bool hubbub201_program_watermarks(
+ struct hubbub *hubbub,
+ struct dcn_watermark_set *watermarks,
+ unsigned int refclk_mhz,
+ bool safe_to_lower)
+{
+ struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
+ bool wm_pending = false;
+
+ if (hubbub1_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
+ wm_pending = true;
+
+ if (hubbub1_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
+ wm_pending = true;
+
+ REG_SET(DCHUBBUB_ARB_SAT_LEVEL, 0,
+ DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz);
+ REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND,
+ DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 68);
+
+ hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
+
+ return wm_pending;
+}
+
+static const struct hubbub_funcs hubbub201_funcs = {
+ .update_dchub = hubbub2_update_dchub,
+ .init_dchub_sys_ctx = NULL,
+ .init_vm_ctx = NULL,
+ .dcc_support_swizzle = hubbub2_dcc_support_swizzle,
+ .dcc_support_pixel_format = hubbub2_dcc_support_pixel_format,
+ .get_dcc_compression_cap = hubbub2_get_dcc_compression_cap,
+ .wm_read_state = hubbub2_wm_read_state,
+ .get_dchub_ref_freq = hubbub2_get_dchub_ref_freq,
+ .program_watermarks = hubbub201_program_watermarks,
+ .hubbub_read_state = hubbub2_read_state,
+};
+
+void hubbub201_construct(struct dcn20_hubbub *hubbub,
+ struct dc_context *ctx,
+ const struct dcn_hubbub_registers *hubbub_regs,
+ const struct dcn_hubbub_shift *hubbub_shift,
+ const struct dcn_hubbub_mask *hubbub_mask)
+{
+ hubbub->base.ctx = ctx;
+
+ hubbub->base.funcs = &hubbub201_funcs;
+
+ hubbub->regs = hubbub_regs;
+ hubbub->shifts = hubbub_shift;
+ hubbub->masks = hubbub_mask;
+
+ hubbub->debug_test_index_pstate = 0xB;
+ hubbub->detile_buf_size = 164 * 1024;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubbub.h
new file mode 100644
index 000000000000..5aeca0be3e15
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubbub.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#ifndef DAL_DC_DCN201_DCN201_HUBBUB_H_
+#define DAL_DC_DCN201_DCN201_HUBBUB_H_
+
+#include "dcn20/dcn20_hubbub.h"
+
+#define HUBBUB_REG_LIST_DCN201(id)\
+ HUBBUB_REG_LIST_DCN_COMMON(), \
+ HUBBUB_VM_REG_LIST(), \
+ SR(DCHUBBUB_CRC_CTRL)
+
+#define HUBBUB_MASK_SH_LIST_DCN201(mask_sh)\
+ HUBBUB_MASK_SH_LIST_DCN_COMMON(mask_sh), \
+ HUBBUB_SF(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh)
+
+void hubbub201_construct(struct dcn20_hubbub *hubbub,
+ struct dc_context *ctx,
+ const struct dcn_hubbub_registers *hubbub_regs,
+ const struct dcn_hubbub_shift *hubbub_shift,
+ const struct dcn_hubbub_mask *hubbub_mask);
+
+#endif /* DAL_DC_DCN201_DCN201_HUBBUB_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.c
new file mode 100644
index 000000000000..6b6f74d4afd1
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.c
@@ -0,0 +1,150 @@
+/*
+ * Copyright 2012-17 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#include "dcn201_hubp.h"
+
+#include "dm_services.h"
+#include "dce_calcs.h"
+#include "reg_helper.h"
+#include "basics/conversion.h"
+
+#define REG(reg)\
+ hubp201->hubp_regs->reg
+
+#define CTX \
+ hubp201->base.ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+ hubp201->hubp_shift->field_name, hubp201->hubp_mask->field_name
+
+static void hubp201_program_surface_config(
+ struct hubp *hubp,
+ enum surface_pixel_format format,
+ union dc_tiling_info *tiling_info,
+ struct plane_size *plane_size,
+ enum dc_rotation_angle rotation,
+ struct dc_plane_dcc_param *dcc,
+ bool horizontal_mirror,
+ unsigned int compat_level)
+{
+ hubp1_dcc_control(hubp, dcc->enable, dcc->independent_64b_blks);
+ hubp1_program_tiling(hubp, tiling_info, format);
+ hubp1_program_size(hubp, format, plane_size, dcc);
+ hubp1_program_pixel_format(hubp, format);
+}
+
+void hubp201_program_deadline(
+ struct hubp *hubp,
+ struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
+ struct _vcs_dpi_display_ttu_regs_st *ttu_attr)
+{
+ hubp1_program_deadline(hubp, dlg_attr, ttu_attr);
+}
+
+void hubp201_program_requestor(
+ struct hubp *hubp,
+ struct _vcs_dpi_display_rq_regs_st *rq_regs)
+{
+ struct dcn201_hubp *hubp201 = TO_DCN201_HUBP(hubp);
+
+ REG_UPDATE(HUBPRET_CONTROL,
+ DET_BUF_PLANE1_BASE_ADDRESS, rq_regs->plane1_base_address);
+
+ REG_SET_4(DCN_EXPANSION_MODE, 0,
+ DRQ_EXPANSION_MODE, rq_regs->drq_expansion_mode,
+ PRQ_EXPANSION_MODE, rq_regs->prq_expansion_mode,
+ MRQ_EXPANSION_MODE, rq_regs->mrq_expansion_mode,
+ CRQ_EXPANSION_MODE, rq_regs->crq_expansion_mode);
+
+ REG_SET_5(DCHUBP_REQ_SIZE_CONFIG, 0,
+ CHUNK_SIZE, rq_regs->rq_regs_l.chunk_size,
+ MIN_CHUNK_SIZE, rq_regs->rq_regs_l.min_chunk_size,
+ META_CHUNK_SIZE, rq_regs->rq_regs_l.meta_chunk_size,
+ MIN_META_CHUNK_SIZE, rq_regs->rq_regs_l.min_meta_chunk_size,
+ SWATH_HEIGHT, rq_regs->rq_regs_l.swath_height);
+
+ REG_SET_5(DCHUBP_REQ_SIZE_CONFIG_C, 0,
+ CHUNK_SIZE_C, rq_regs->rq_regs_c.chunk_size,
+ MIN_CHUNK_SIZE_C, rq_regs->rq_regs_c.min_chunk_size,
+ META_CHUNK_SIZE_C, rq_regs->rq_regs_c.meta_chunk_size,
+ MIN_META_CHUNK_SIZE_C, rq_regs->rq_regs_c.min_meta_chunk_size,
+ SWATH_HEIGHT_C, rq_regs->rq_regs_c.swath_height);
+}
+
+static void hubp201_setup(
+ struct hubp *hubp,
+ struct _vcs_dpi_display_dlg_regs_st *dlg_attr,
+ struct _vcs_dpi_display_ttu_regs_st *ttu_attr,
+ struct _vcs_dpi_display_rq_regs_st *rq_regs,
+ struct _vcs_dpi_display_pipe_dest_params_st *pipe_dest)
+{
+ hubp2_vready_at_or_After_vsync(hubp, pipe_dest);
+ hubp201_program_requestor(hubp, rq_regs);
+ hubp201_program_deadline(hubp, dlg_attr, ttu_attr);
+}
+
+static struct hubp_funcs dcn201_hubp_funcs = {
+ .hubp_enable_tripleBuffer = hubp2_enable_triplebuffer,
+ .hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled,
+ .hubp_program_surface_flip_and_addr = hubp1_program_surface_flip_and_addr,
+ .hubp_program_surface_config = hubp201_program_surface_config,
+ .hubp_is_flip_pending = hubp1_is_flip_pending,
+ .hubp_setup = hubp201_setup,
+ .hubp_setup_interdependent = hubp2_setup_interdependent,
+ .set_cursor_attributes = hubp2_cursor_set_attributes,
+ .set_cursor_position = hubp1_cursor_set_position,
+ .set_blank = hubp1_set_blank,
+ .dcc_control = hubp1_dcc_control,
+ .mem_program_viewport = min_set_viewport,
+ .hubp_clk_cntl = hubp1_clk_cntl,
+ .hubp_vtg_sel = hubp1_vtg_sel,
+ .dmdata_set_attributes = hubp2_dmdata_set_attributes,
+ .dmdata_load = hubp2_dmdata_load,
+ .dmdata_status_done = hubp2_dmdata_status_done,
+ .hubp_read_state = hubp2_read_state,
+ .hubp_clear_underflow = hubp1_clear_underflow,
+ .hubp_set_flip_control_surface_gsl = hubp2_set_flip_control_surface_gsl,
+ .hubp_init = hubp1_init,
+};
+
+bool dcn201_hubp_construct(
+ struct dcn201_hubp *hubp201,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn201_hubp_registers *hubp_regs,
+ const struct dcn201_hubp_shift *hubp_shift,
+ const struct dcn201_hubp_mask *hubp_mask)
+{
+ hubp201->base.funcs = &dcn201_hubp_funcs;
+ hubp201->base.ctx = ctx;
+ hubp201->hubp_regs = hubp_regs;
+ hubp201->hubp_shift = hubp_shift;
+ hubp201->hubp_mask = hubp_mask;
+ hubp201->base.inst = inst;
+ hubp201->base.opp_id = OPP_ID_INVALID;
+ hubp201->base.mpcc_id = 0xf;
+
+ return true;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.h
new file mode 100644
index 000000000000..a1e3384eed63
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hubp.h
@@ -0,0 +1,132 @@
+/*
+ * Copyright 2012-17 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_MEM_INPUT_DCN201_H__
+#define __DC_MEM_INPUT_DCN201_H__
+
+#include "../dcn10/dcn10_hubp.h"
+#include "../dcn20/dcn20_hubp.h"
+
+#define TO_DCN201_HUBP(hubp)\
+ container_of(hubp, struct dcn201_hubp, base)
+
+#define HUBP_REG_LIST_DCN201(id)\
+ HUBP_REG_LIST_DCN(id),\
+ SRI(PREFETCH_SETTINGS, HUBPREQ, id),\
+ SRI(PREFETCH_SETTINGS_C, HUBPREQ, id),\
+ SRI(DCSURF_FLIP_CONTROL2, HUBPREQ, id), \
+ SRI(CURSOR_SETTINGS, HUBPREQ, id), \
+ SRI(CURSOR_SURFACE_ADDRESS_HIGH, CURSOR0_, id), \
+ SRI(CURSOR_SURFACE_ADDRESS, CURSOR0_, id), \
+ SRI(CURSOR_SIZE, CURSOR0_, id), \
+ SRI(CURSOR_CONTROL, CURSOR0_, id), \
+ SRI(CURSOR_POSITION, CURSOR0_, id), \
+ SRI(CURSOR_HOT_SPOT, CURSOR0_, id), \
+ SRI(CURSOR_DST_OFFSET, CURSOR0_, id), \
+ SRI(DMDATA_ADDRESS_HIGH, CURSOR0_, id), \
+ SRI(DMDATA_ADDRESS_LOW, CURSOR0_, id), \
+ SRI(DMDATA_CNTL, CURSOR0_, id), \
+ SRI(DMDATA_SW_CNTL, CURSOR0_, id), \
+ SRI(DMDATA_QOS_CNTL, CURSOR0_, id), \
+ SRI(DMDATA_SW_DATA, CURSOR0_, id), \
+ SRI(DMDATA_STATUS, CURSOR0_, id),\
+ SRI(FLIP_PARAMETERS_0, HUBPREQ, id),\
+ SRI(FLIP_PARAMETERS_2, HUBPREQ, id)
+
+#define HUBP_MASK_SH_LIST_DCN201(mask_sh)\
+ HUBP_MASK_SH_LIST_DCN(mask_sh),\
+ HUBP_SF(HUBPREQ0_PREFETCH_SETTINGS, DST_Y_PREFETCH, mask_sh),\
+ HUBP_SF(HUBPREQ0_PREFETCH_SETTINGS, VRATIO_PREFETCH, mask_sh),\
+ HUBP_SF(HUBPREQ0_PREFETCH_SETTINGS_C, VRATIO_PREFETCH_C, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_FLIP_CONTROL2, SURFACE_TRIPLE_BUFFER_ENABLE, mask_sh),\
+ HUBP_SF(HUBPREQ0_CURSOR_SETTINGS, CURSOR0_DST_Y_OFFSET, mask_sh), \
+ HUBP_SF(HUBPREQ0_CURSOR_SETTINGS, CURSOR0_CHUNK_HDL_ADJUST, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH, CURSOR_SURFACE_ADDRESS_HIGH, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_SURFACE_ADDRESS, CURSOR_SURFACE_ADDRESS, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_SIZE, CURSOR_WIDTH, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_SIZE, CURSOR_HEIGHT, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_MODE, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_2X_MAGNIFY, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_PITCH, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_LINES_PER_CHUNK, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_ENABLE, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_POSITION, CURSOR_X_POSITION, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_POSITION, CURSOR_Y_POSITION, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_HOT_SPOT, CURSOR_HOT_SPOT_X, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_HOT_SPOT, CURSOR_HOT_SPOT_Y, mask_sh), \
+ HUBP_SF(CURSOR0_0_CURSOR_DST_OFFSET, CURSOR_DST_X_OFFSET, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_ADDRESS_HIGH, DMDATA_ADDRESS_HIGH, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_CNTL, DMDATA_MODE, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_CNTL, DMDATA_UPDATED, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_CNTL, DMDATA_REPEAT, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_CNTL, DMDATA_SIZE, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_SW_CNTL, DMDATA_SW_UPDATED, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_SW_CNTL, DMDATA_SW_REPEAT, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_SW_CNTL, DMDATA_SW_SIZE, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_QOS_CNTL, DMDATA_QOS_MODE, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_QOS_CNTL, DMDATA_QOS_LEVEL, mask_sh), \
+ HUBP_SF(CURSOR0_0_DMDATA_QOS_CNTL, DMDATA_DL_DELTA, mask_sh),\
+ HUBP_SF(HUBPREQ0_FLIP_PARAMETERS_0, DST_Y_PER_VM_FLIP, mask_sh),\
+ HUBP_SF(HUBPREQ0_FLIP_PARAMETERS_0, DST_Y_PER_ROW_FLIP, mask_sh),\
+ HUBP_SF(HUBPREQ0_FLIP_PARAMETERS_2, REFCYC_PER_META_CHUNK_FLIP_L, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_VREADY_AT_OR_AFTER_VSYNC, mask_sh),\
+ HUBP_SF(HUBP0_DCHUBP_CNTL, HUBP_DISABLE_STOP_DATA_DURING_VM, mask_sh),\
+ HUBP_SF(HUBPREQ0_DCSURF_FLIP_CONTROL, HUBPREQ_MASTER_UPDATE_LOCK_STATUS, mask_sh)
+
+#define DCN201_HUBP_REG_VARIABLE_LIST \
+ DCN2_HUBP_REG_COMMON_VARIABLE_LIST
+
+#define DCN201_HUBP_REG_FIELD_VARIABLE_LIST(type) \
+ DCN2_HUBP_REG_FIELD_VARIABLE_LIST(type)
+
+struct dcn201_hubp_registers {
+ DCN201_HUBP_REG_VARIABLE_LIST;
+};
+
+struct dcn201_hubp_shift {
+ DCN201_HUBP_REG_FIELD_VARIABLE_LIST(uint8_t);
+};
+
+struct dcn201_hubp_mask {
+ DCN201_HUBP_REG_FIELD_VARIABLE_LIST(uint32_t);
+};
+
+struct dcn201_hubp {
+ struct hubp base;
+ struct dcn_hubp_state state;
+ const struct dcn201_hubp_registers *hubp_regs;
+ const struct dcn201_hubp_shift *hubp_shift;
+ const struct dcn201_hubp_mask *hubp_mask;
+};
+
+bool dcn201_hubp_construct(
+ struct dcn201_hubp *hubp201,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn201_hubp_registers *hubp_regs,
+ const struct dcn201_hubp_shift *hubp_shift,
+ const struct dcn201_hubp_mask *hubp_mask);
+
+#endif /* __DC_HWSS_DCN20_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c
new file mode 100644
index 000000000000..cfd09b3f705e
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.c
@@ -0,0 +1,630 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "basics/dc_common.h"
+#include "core_types.h"
+#include "resource.h"
+#include "dcn201_hwseq.h"
+#include "dcn201_optc.h"
+#include "dce/dce_hwseq.h"
+#include "hubp.h"
+#include "dchubbub.h"
+#include "timing_generator.h"
+#include "opp.h"
+#include "ipp.h"
+#include "mpc.h"
+#include "dccg.h"
+#include "clk_mgr.h"
+#include "reg_helper.h"
+
+#define CTX \
+ hws->ctx
+
+#define REG(reg)\
+ hws->regs->reg
+
+#define DC_LOGGER \
+ dc->ctx->logger
+
+#undef FN
+#define FN(reg_name, field_name) \
+ hws->shifts->field_name, hws->masks->field_name
+
+static bool patch_address_for_sbs_tb_stereo(
+ struct pipe_ctx *pipe_ctx, PHYSICAL_ADDRESS_LOC *addr)
+{
+ struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+ bool sec_split = pipe_ctx->top_pipe &&
+ pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
+
+ if (sec_split && plane_state->address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
+ (pipe_ctx->stream->timing.timing_3d_format ==
+ TIMING_3D_FORMAT_SIDE_BY_SIDE ||
+ pipe_ctx->stream->timing.timing_3d_format ==
+ TIMING_3D_FORMAT_TOP_AND_BOTTOM)) {
+ *addr = plane_state->address.grph_stereo.left_addr;
+ plane_state->address.grph_stereo.left_addr =
+ plane_state->address.grph_stereo.right_addr;
+ return true;
+ } else {
+ if (pipe_ctx->stream->view_format != VIEW_3D_FORMAT_NONE &&
+ plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO) {
+ plane_state->address.type = PLN_ADDR_TYPE_GRPH_STEREO;
+ plane_state->address.grph_stereo.right_addr =
+ plane_state->address.grph_stereo.left_addr;
+ plane_state->address.grph_stereo.right_meta_addr =
+ plane_state->address.grph_stereo.left_meta_addr;
+ }
+ }
+ return false;
+}
+
+static void gpu_addr_to_uma(struct dce_hwseq *hwseq,
+ PHYSICAL_ADDRESS_LOC *addr)
+{
+ bool is_in_uma;
+
+ if (hwseq->fb_base.quad_part <= addr->quad_part &&
+ addr->quad_part < hwseq->fb_top.quad_part) {
+ addr->quad_part -= hwseq->fb_base.quad_part;
+ addr->quad_part += hwseq->fb_offset.quad_part;
+ is_in_uma = true;
+ } else if (hwseq->fb_offset.quad_part <= addr->quad_part &&
+ addr->quad_part <= hwseq->uma_top.quad_part) {
+ is_in_uma = true;
+ } else if (addr->quad_part == 0) {
+ is_in_uma = false;
+ } else {
+ is_in_uma = false;
+ }
+}
+
+static void plane_address_in_gpu_space_to_uma(struct dce_hwseq *hwseq,
+ struct dc_plane_address *addr)
+{
+ switch (addr->type) {
+ case PLN_ADDR_TYPE_GRAPHICS:
+ gpu_addr_to_uma(hwseq, &addr->grph.addr);
+ gpu_addr_to_uma(hwseq, &addr->grph.meta_addr);
+ break;
+ case PLN_ADDR_TYPE_GRPH_STEREO:
+ gpu_addr_to_uma(hwseq, &addr->grph_stereo.left_addr);
+ gpu_addr_to_uma(hwseq, &addr->grph_stereo.left_meta_addr);
+ gpu_addr_to_uma(hwseq, &addr->grph_stereo.right_addr);
+ gpu_addr_to_uma(hwseq, &addr->grph_stereo.right_meta_addr);
+ break;
+ case PLN_ADDR_TYPE_VIDEO_PROGRESSIVE:
+ gpu_addr_to_uma(hwseq, &addr->video_progressive.luma_addr);
+ gpu_addr_to_uma(hwseq, &addr->video_progressive.luma_meta_addr);
+ gpu_addr_to_uma(hwseq, &addr->video_progressive.chroma_addr);
+ gpu_addr_to_uma(hwseq, &addr->video_progressive.chroma_meta_addr);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+}
+
+void dcn201_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
+{
+ bool addr_patched = false;
+ PHYSICAL_ADDRESS_LOC addr;
+ struct dc_plane_state *plane_state = pipe_ctx->plane_state;
+ struct dce_hwseq *hws = dc->hwseq;
+ struct dc_plane_address uma = plane_state->address;
+
+ if (plane_state == NULL)
+ return;
+
+ addr_patched = patch_address_for_sbs_tb_stereo(pipe_ctx, &addr);
+
+ plane_address_in_gpu_space_to_uma(hws, &uma);
+
+ pipe_ctx->plane_res.hubp->funcs->hubp_program_surface_flip_and_addr(
+ pipe_ctx->plane_res.hubp,
+ &uma,
+ plane_state->flip_immediate);
+
+ plane_state->status.requested_address = plane_state->address;
+
+ if (plane_state->flip_immediate)
+ plane_state->status.current_address = plane_state->address;
+
+ if (addr_patched)
+ pipe_ctx->plane_state->address.grph_stereo.left_addr = addr;
+}
+
+/* Blank pixel data during initialization */
+void dcn201_init_blank(
+ struct dc *dc,
+ struct timing_generator *tg)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ enum dc_color_space color_space;
+ struct tg_color black_color = {0};
+ struct output_pixel_processor *opp = NULL;
+ uint32_t num_opps, opp_id_src0, opp_id_src1;
+ uint32_t otg_active_width, otg_active_height;
+
+ /* program opp dpg blank color */
+ color_space = COLOR_SPACE_SRGB;
+ color_space_to_black_color(dc, color_space, &black_color);
+
+ /* get the OTG active size */
+ tg->funcs->get_otg_active_size(tg,
+ &otg_active_width,
+ &otg_active_height);
+
+ /* get the OPTC source */
+ tg->funcs->get_optc_source(tg, &num_opps, &opp_id_src0, &opp_id_src1);
+ ASSERT(opp_id_src0 < dc->res_pool->res_cap->num_opp);
+ opp = dc->res_pool->opps[opp_id_src0];
+
+ opp->funcs->opp_set_disp_pattern_generator(
+ opp,
+ CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR,
+ CONTROLLER_DP_COLOR_SPACE_UDEFINED,
+ COLOR_DEPTH_UNDEFINED,
+ &black_color,
+ otg_active_width,
+ otg_active_height,
+ 0);
+
+ hws->funcs.wait_for_blank_complete(opp);
+}
+
+static void read_mmhub_vm_setup(struct dce_hwseq *hws)
+{
+ uint32_t fb_base = REG_READ(MC_VM_FB_LOCATION_BASE);
+ uint32_t fb_top = REG_READ(MC_VM_FB_LOCATION_TOP);
+ uint32_t fb_offset = REG_READ(MC_VM_FB_OFFSET);
+
+ /* MC_VM_FB_LOCATION_TOP is in pages, actual top should add 1 */
+ fb_top++;
+
+ /* bit 23:0 in register map to bit 47:24 in address */
+ hws->fb_base.low_part = fb_base;
+ hws->fb_base.quad_part <<= 24;
+
+ hws->fb_top.low_part = fb_top;
+ hws->fb_top.quad_part <<= 24;
+ hws->fb_offset.low_part = fb_offset;
+ hws->fb_offset.quad_part <<= 24;
+
+ hws->uma_top.quad_part = hws->fb_top.quad_part
+ - hws->fb_base.quad_part + hws->fb_offset.quad_part;
+}
+
+void dcn201_init_hw(struct dc *dc)
+{
+ int i, j;
+ struct dce_hwseq *hws = dc->hwseq;
+ struct resource_pool *res_pool = dc->res_pool;
+ struct dc_state *context = dc->current_state;
+
+ if (res_pool->dccg->funcs->dccg_init)
+ res_pool->dccg->funcs->dccg_init(res_pool->dccg);
+
+ if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
+ dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
+
+ if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+ REG_WRITE(RBBMIF_TIMEOUT_DIS, 0xFFFFFFFF);
+ REG_WRITE(RBBMIF_TIMEOUT_DIS_2, 0xFFFFFFFF);
+
+ hws->funcs.dccg_init(hws);
+
+ REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, 2);
+ REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1);
+ REG_WRITE(REFCLK_CNTL, 0);
+ } else {
+ hws->funcs.bios_golden_init(dc);
+
+ if (dc->ctx->dc_bios->fw_info_valid) {
+ res_pool->ref_clocks.xtalin_clock_inKhz =
+ dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
+
+ if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
+ if (res_pool->dccg && res_pool->hubbub) {
+ (res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
+ dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
+ &res_pool->ref_clocks.dccg_ref_clock_inKhz);
+
+ (res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
+ res_pool->ref_clocks.dccg_ref_clock_inKhz,
+ &res_pool->ref_clocks.dchub_ref_clock_inKhz);
+ } else {
+ res_pool->ref_clocks.dccg_ref_clock_inKhz =
+ res_pool->ref_clocks.xtalin_clock_inKhz;
+ res_pool->ref_clocks.dchub_ref_clock_inKhz =
+ res_pool->ref_clocks.xtalin_clock_inKhz;
+ }
+ }
+ } else
+ ASSERT_CRITICAL(false);
+ for (i = 0; i < dc->link_count; i++) {
+ /* Power up AND update implementation according to the
+ * required signal (which may be different from the
+ * default signal on connector).
+ */
+ struct dc_link *link = dc->links[i];
+
+ link->link_enc->funcs->hw_init(link->link_enc);
+ }
+ if (hws->fb_offset.quad_part == 0)
+ read_mmhub_vm_setup(hws);
+ }
+
+ /* Blank pixel data with OPP DPG */
+ for (i = 0; i < res_pool->timing_generator_count; i++) {
+ struct timing_generator *tg = res_pool->timing_generators[i];
+
+ if (tg->funcs->is_tg_enabled(tg)) {
+ dcn201_init_blank(dc, tg);
+ }
+ }
+
+ for (i = 0; i < res_pool->timing_generator_count; i++) {
+ struct timing_generator *tg = res_pool->timing_generators[i];
+
+ if (tg->funcs->is_tg_enabled(tg))
+ tg->funcs->lock(tg);
+ }
+
+ for (i = 0; i < res_pool->pipe_count; i++) {
+ struct dpp *dpp = res_pool->dpps[i];
+
+ dpp->funcs->dpp_reset(dpp);
+ }
+
+ /* Reset all MPCC muxes */
+ res_pool->mpc->funcs->mpc_init(res_pool->mpc);
+
+ /* initialize OPP mpc_tree parameter */
+ for (i = 0; i < res_pool->res_cap->num_opp; i++) {
+ res_pool->opps[i]->mpc_tree_params.opp_id = res_pool->opps[i]->inst;
+ res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
+ for (j = 0; j < MAX_PIPES; j++)
+ res_pool->opps[i]->mpcc_disconnect_pending[j] = false;
+ }
+
+ for (i = 0; i < res_pool->timing_generator_count; i++) {
+ struct timing_generator *tg = res_pool->timing_generators[i];
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+ struct hubp *hubp = res_pool->hubps[i];
+ struct dpp *dpp = res_pool->dpps[i];
+
+ pipe_ctx->stream_res.tg = tg;
+ pipe_ctx->pipe_idx = i;
+
+ pipe_ctx->plane_res.hubp = hubp;
+ pipe_ctx->plane_res.dpp = dpp;
+ pipe_ctx->plane_res.mpcc_inst = dpp->inst;
+ hubp->mpcc_id = dpp->inst;
+ hubp->opp_id = OPP_ID_INVALID;
+ hubp->power_gated = false;
+ pipe_ctx->stream_res.opp = NULL;
+
+ hubp->funcs->hubp_init(hubp);
+
+ res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
+ pipe_ctx->stream_res.opp = res_pool->opps[i];
+ /*To do: number of MPCC != number of opp*/
+ hws->funcs.plane_atomic_disconnect(dc, pipe_ctx);
+ }
+
+ /* initialize DWB pointer to MCIF_WB */
+ for (i = 0; i < res_pool->res_cap->num_dwb; i++)
+ res_pool->dwbc[i]->mcif = res_pool->mcif_wb[i];
+
+ for (i = 0; i < res_pool->timing_generator_count; i++) {
+ struct timing_generator *tg = res_pool->timing_generators[i];
+
+ if (tg->funcs->is_tg_enabled(tg))
+ tg->funcs->unlock(tg);
+ }
+
+ for (i = 0; i < res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ dc->hwss.disable_plane(dc, pipe_ctx);
+
+ pipe_ctx->stream_res.tg = NULL;
+ pipe_ctx->plane_res.hubp = NULL;
+ }
+
+ for (i = 0; i < res_pool->timing_generator_count; i++) {
+ struct timing_generator *tg = res_pool->timing_generators[i];
+
+ tg->funcs->tg_init(tg);
+ }
+
+ /* end of FPGA. Below if real ASIC */
+ if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment))
+ return;
+
+ for (i = 0; i < res_pool->audio_count; i++) {
+ struct audio *audio = res_pool->audios[i];
+
+ audio->funcs->hw_init(audio);
+ }
+
+ /* power AFMT HDMI memory TODO: may move to dis/en output save power*/
+ REG_WRITE(DIO_MEM_PWR_CTRL, 0);
+
+ if (!dc->debug.disable_clock_gate) {
+ /* enable all DCN clock gating */
+ REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
+
+ REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
+
+ REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
+ }
+}
+
+/* trigger HW to start disconnect plane from stream on the next vsync */
+void dcn201_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ struct hubp *hubp = pipe_ctx->plane_res.hubp;
+ int dpp_id = pipe_ctx->plane_res.dpp->inst;
+ struct mpc *mpc = dc->res_pool->mpc;
+ struct mpc_tree *mpc_tree_params;
+ struct mpcc *mpcc_to_remove = NULL;
+ struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;
+ bool mpcc_removed = false;
+
+ mpc_tree_params = &(opp->mpc_tree_params);
+
+ /* check if this plane is being used by an MPCC in the secondary blending chain */
+ if (mpc->funcs->get_mpcc_for_dpp_from_secondary)
+ mpcc_to_remove = mpc->funcs->get_mpcc_for_dpp_from_secondary(mpc_tree_params, dpp_id);
+
+ /* remove MPCC from secondary if being used */
+ if (mpcc_to_remove != NULL && mpc->funcs->remove_mpcc_from_secondary) {
+ mpc->funcs->remove_mpcc_from_secondary(mpc, mpc_tree_params, mpcc_to_remove);
+ mpcc_removed = true;
+ }
+
+ /* check if this MPCC is already being used for this plane (dpp) in the primary blending chain */
+ mpcc_to_remove = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, dpp_id);
+ if (mpcc_to_remove != NULL) {
+ mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove);
+ mpcc_removed = true;
+ }
+
+ /*Already reset*/
+ if (mpcc_removed == false)
+ return;
+
+ if (opp != NULL)
+ opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
+
+ dc->optimized_required = true;
+
+ if (hubp->funcs->hubp_disconnect)
+ hubp->funcs->hubp_disconnect(hubp);
+
+ if (dc->debug.sanity_checks)
+ hws->funcs.verify_allow_pstate_change_high(dc);
+}
+
+void dcn201_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
+{
+ struct hubp *hubp = pipe_ctx->plane_res.hubp;
+ struct mpcc_blnd_cfg blnd_cfg;
+ bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
+ int mpcc_id, dpp_id;
+ struct mpcc *new_mpcc;
+ struct mpcc *remove_mpcc = NULL;
+ struct mpc *mpc = dc->res_pool->mpc;
+ struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
+
+ if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) {
+ get_hdr_visual_confirm_color(
+ pipe_ctx, &blnd_cfg.black_color);
+ } else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) {
+ get_surface_visual_confirm_color(
+ pipe_ctx, &blnd_cfg.black_color);
+ } else {
+ color_space_to_black_color(
+ dc, pipe_ctx->stream->output_color_space,
+ &blnd_cfg.black_color);
+ }
+
+ if (per_pixel_alpha)
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
+ else
+ blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
+
+ blnd_cfg.overlap_only = false;
+
+ if (pipe_ctx->plane_state->global_alpha_value)
+ blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
+ else
+ blnd_cfg.global_alpha = 0xff;
+
+ blnd_cfg.global_gain = 0xff;
+ blnd_cfg.background_color_bpc = 4;
+ blnd_cfg.bottom_gain_mode = 0;
+ blnd_cfg.top_gain = 0x1f000;
+ blnd_cfg.bottom_inside_gain = 0x1f000;
+ blnd_cfg.bottom_outside_gain = 0x1f000;
+ /*the input to MPCC is RGB*/
+ blnd_cfg.black_color.color_b_cb = 0;
+ blnd_cfg.black_color.color_g_y = 0;
+ blnd_cfg.black_color.color_r_cr = 0;
+
+ /* DCN1.0 has output CM before MPC which seems to screw with
+ * pre-multiplied alpha. This is a w/a hopefully unnecessary for DCN2.
+ */
+ blnd_cfg.pre_multiplied_alpha = per_pixel_alpha;
+
+ /*
+ * TODO: remove hack
+ * Note: currently there is a bug in init_hw such that
+ * on resume from hibernate, BIOS sets up MPCC0, and
+ * we do mpcc_remove but the mpcc cannot go to idle
+ * after remove. This cause us to pick mpcc1 here,
+ * which causes a pstate hang for yet unknown reason.
+ */
+ dpp_id = hubp->inst;
+ mpcc_id = dpp_id;
+
+ /* If there is no full update, don't need to touch MPC tree*/
+ if (!pipe_ctx->plane_state->update_flags.bits.full_update) {
+ dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id);
+ mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id);
+ return;
+ }
+
+ /* check if this plane is being used by an MPCC in the secondary blending chain */
+ if (mpc->funcs->get_mpcc_for_dpp_from_secondary)
+ remove_mpcc = mpc->funcs->get_mpcc_for_dpp_from_secondary(mpc_tree_params, dpp_id);
+
+ /* remove MPCC from secondary if being used */
+ if (remove_mpcc != NULL && mpc->funcs->remove_mpcc_from_secondary)
+ mpc->funcs->remove_mpcc_from_secondary(mpc, mpc_tree_params, remove_mpcc);
+
+ /* check if this MPCC is already being used for this plane (dpp) in the primary blending chain */
+ remove_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, dpp_id);
+ /* remove MPCC if being used */
+
+ if (remove_mpcc != NULL)
+ mpc->funcs->remove_mpcc(mpc, mpc_tree_params, remove_mpcc);
+ else
+ if (dc->debug.sanity_checks)
+ mpc->funcs->assert_mpcc_idle_before_connect(
+ dc->res_pool->mpc, mpcc_id);
+
+ /* Call MPC to insert new plane */
+ dc->hwss.update_visual_confirm_color(dc, pipe_ctx, &blnd_cfg.black_color, mpcc_id);
+ new_mpcc = mpc->funcs->insert_plane(dc->res_pool->mpc,
+ mpc_tree_params,
+ &blnd_cfg,
+ NULL,
+ NULL,
+ dpp_id,
+ mpcc_id);
+
+ ASSERT(new_mpcc != NULL);
+ hubp->opp_id = pipe_ctx->stream_res.opp->inst;
+ hubp->mpcc_id = mpcc_id;
+}
+
+void dcn201_pipe_control_lock(
+ struct dc *dc,
+ struct pipe_ctx *pipe,
+ bool lock)
+{
+ struct dce_hwseq *hws = dc->hwseq;
+ struct hubp *hubp = NULL;
+ hubp = dc->res_pool->hubps[pipe->pipe_idx];
+ /* use TG master update lock to lock everything on the TG
+ * therefore only top pipe need to lock
+ */
+ if (pipe->top_pipe)
+ return;
+
+ if (dc->debug.sanity_checks)
+ hws->funcs.verify_allow_pstate_change_high(dc);
+
+ if (pipe->plane_state != NULL && pipe->plane_state->triplebuffer_flips) {
+ if (lock)
+ pipe->stream_res.tg->funcs->triplebuffer_lock(pipe->stream_res.tg);
+ else
+ pipe->stream_res.tg->funcs->triplebuffer_unlock(pipe->stream_res.tg);
+ } else {
+ if (lock)
+ pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg);
+ else
+ pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg);
+ }
+
+ if (dc->debug.sanity_checks)
+ hws->funcs.verify_allow_pstate_change_high(dc);
+}
+
+void dcn201_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
+{
+ struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
+
+ gpu_addr_to_uma(pipe_ctx->stream->ctx->dc->hwseq, &attributes->address);
+
+ pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes(
+ pipe_ctx->plane_res.hubp, attributes);
+ pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes(
+ pipe_ctx->plane_res.dpp, attributes);
+}
+
+void dcn201_set_dmdata_attributes(struct pipe_ctx *pipe_ctx)
+{
+ struct dc_dmdata_attributes attr = { 0 };
+ struct hubp *hubp = pipe_ctx->plane_res.hubp;
+
+ gpu_addr_to_uma(pipe_ctx->stream->ctx->dc->hwseq,
+ &pipe_ctx->stream->dmdata_address);
+
+ attr.dmdata_mode = DMDATA_HW_MODE;
+ attr.dmdata_size =
+ dc_is_hdmi_signal(pipe_ctx->stream->signal) ? 32 : 36;
+ attr.address.quad_part =
+ pipe_ctx->stream->dmdata_address.quad_part;
+ attr.dmdata_dl_delta = 0;
+ attr.dmdata_qos_mode = 0;
+ attr.dmdata_qos_level = 0;
+ attr.dmdata_repeat = 1; /* always repeat */
+ attr.dmdata_updated = 1;
+ attr.dmdata_sw_data = NULL;
+
+ hubp->funcs->dmdata_set_attributes(hubp, &attr);
+}
+
+void dcn201_unblank_stream(struct pipe_ctx *pipe_ctx,
+ struct dc_link_settings *link_settings)
+{
+ struct encoder_unblank_param params = { { 0 } };
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ struct dc_link *link = stream->link;
+ struct dce_hwseq *hws = link->dc->hwseq;
+
+ /* only 3 items below are used by unblank */
+ params.timing = pipe_ctx->stream->timing;
+
+ params.link_settings.link_rate = link_settings->link_rate;
+
+ if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
+ /*check whether it is half the rate*/
+ if (optc201_is_two_pixels_per_containter(&stream->timing))
+ params.timing.pix_clk_100hz /= 2;
+
+ pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(link, pipe_ctx->stream_res.stream_enc, &params);
+ }
+
+ if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
+ hws->funcs.edp_backlight_control(link, true);
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.h
new file mode 100644
index 000000000000..26cd62be6418
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_hwseq.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_HWSS_DCN201_H__
+#define __DC_HWSS_DCN201_H__
+
+#include "hw_sequencer_private.h"
+
+void dcn201_set_dmdata_attributes(struct pipe_ctx *pipe_ctx);
+void dcn201_init_hw(struct dc *dc);
+void dcn201_unblank_stream(struct pipe_ctx *pipe_ctx,
+ struct dc_link_settings *link_settings);
+void dcn201_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn201_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn201_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn201_set_cursor_attribute(struct pipe_ctx *pipe_ctx);
+void dcn201_pipe_control_lock(
+ struct dc *dc,
+ struct pipe_ctx *pipe,
+ bool lock);
+void dcn201_init_blank(
+ struct dc *dc,
+ struct timing_generator *tg);
+#endif /* __DC_HWSS_DCN201_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.c
new file mode 100644
index 000000000000..f1f89f93603f
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.c
@@ -0,0 +1,131 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dce110/dce110_hw_sequencer.h"
+#include "dcn10/dcn10_hw_sequencer.h"
+#include "dcn20/dcn20_hwseq.h"
+#include "dcn201_hwseq.h"
+
+static const struct hw_sequencer_funcs dcn201_funcs = {
+ .program_gamut_remap = dcn10_program_gamut_remap,
+ .init_hw = dcn201_init_hw,
+ .power_down_on_boot = NULL,
+ .apply_ctx_to_hw = dce110_apply_ctx_to_hw,
+ .apply_ctx_for_surface = NULL,
+ .program_front_end_for_ctx = dcn20_program_front_end_for_ctx,
+ .wait_for_pending_cleared = dcn10_wait_for_pending_cleared,
+ .post_unlock_program_front_end = dcn10_post_unlock_program_front_end,
+ .update_plane_addr = dcn201_update_plane_addr,
+ .update_dchub = dcn10_update_dchub,
+ .update_pending_status = dcn10_update_pending_status,
+ .program_output_csc = dcn20_program_output_csc,
+ .enable_accelerated_mode = dce110_enable_accelerated_mode,
+ .enable_timing_synchronization = dcn10_enable_timing_synchronization,
+ .enable_per_frame_crtc_position_reset = dcn10_enable_per_frame_crtc_position_reset,
+ .update_info_frame = dce110_update_info_frame,
+ .send_immediate_sdp_message = dcn10_send_immediate_sdp_message,
+ .enable_stream = dce110_enable_stream,
+ .disable_stream = dce110_disable_stream,
+ .unblank_stream = dcn201_unblank_stream,
+ .blank_stream = dce110_blank_stream,
+ .enable_audio_stream = dce110_enable_audio_stream,
+ .disable_audio_stream = dce110_disable_audio_stream,
+ .disable_plane = dcn10_disable_plane,
+ .pipe_control_lock = dcn201_pipe_control_lock,
+ .interdependent_update_lock = dcn10_lock_all_pipes,
+ .cursor_lock = dcn10_cursor_lock,
+ .prepare_bandwidth = dcn20_prepare_bandwidth,
+ .optimize_bandwidth = dcn20_optimize_bandwidth,
+ .update_bandwidth = dcn20_update_bandwidth,
+ .set_drr = dcn10_set_drr,
+ .get_position = dcn10_get_position,
+ .set_static_screen_control = dcn10_set_static_screen_control,
+ .setup_stereo = dcn10_setup_stereo,
+ .set_avmute = dce110_set_avmute,
+ .log_hw_state = dcn10_log_hw_state,
+ .get_hw_state = dcn10_get_hw_state,
+ .clear_status_bits = dcn10_clear_status_bits,
+ .wait_for_mpcc_disconnect = dcn10_wait_for_mpcc_disconnect,
+ .edp_backlight_control = dce110_edp_backlight_control,
+ .edp_power_control = dce110_edp_power_control,
+ .edp_wait_for_hpd_ready = dce110_edp_wait_for_hpd_ready,
+ .setup_periodic_interrupt = dcn10_setup_periodic_interrupt,
+ .set_clock = dcn10_set_clock,
+ .get_clock = dcn10_get_clock,
+ .program_triplebuffer = dcn20_program_triple_buffer,
+ .dmdata_status_done = dcn20_dmdata_status_done,
+ .set_dmdata_attributes = dcn201_set_dmdata_attributes,
+ .get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
+ .calc_vupdate_position = dcn10_calc_vupdate_position,
+ .set_cursor_position = dcn10_set_cursor_position,
+ .set_cursor_attribute = dcn201_set_cursor_attribute,
+ .set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level,
+ .set_backlight_level = dce110_set_backlight_level,
+ .set_abm_immediate_disable = dce110_set_abm_immediate_disable,
+ .set_pipe = dce110_set_pipe,
+ .set_disp_pattern_generator = dcn20_set_disp_pattern_generator,
+ .update_visual_confirm_color = dcn20_update_visual_confirm_color,
+};
+
+static const struct hwseq_private_funcs dcn201_private_funcs = {
+ .init_pipes = NULL,
+ .update_plane_addr = dcn201_update_plane_addr,
+ .plane_atomic_disconnect = dcn201_plane_atomic_disconnect,
+ .program_pipe = dcn10_program_pipe,
+ .update_mpcc = dcn201_update_mpcc,
+ .set_input_transfer_func = dcn20_set_input_transfer_func,
+ .set_output_transfer_func = dcn20_set_output_transfer_func,
+ .power_down = dce110_power_down,
+ .enable_display_power_gating = dcn10_dummy_display_power_gating,
+ .blank_pixel_data = dcn20_blank_pixel_data,
+ .reset_hw_ctx_wrap = dcn10_reset_hw_ctx_wrap,
+ .enable_stream_timing = dcn20_enable_stream_timing,
+ .edp_backlight_control = dce110_edp_backlight_control,
+ .disable_stream_gating = NULL,
+ .enable_stream_gating = NULL,
+ .setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt,
+ .did_underflow_occur = dcn10_did_underflow_occur,
+ .init_blank = dcn201_init_blank,
+ .disable_vga = dcn10_disable_vga,
+ .bios_golden_init = dcn10_bios_golden_init,
+ .plane_atomic_disable = dcn10_plane_atomic_disable,
+ .plane_atomic_power_down = dcn10_plane_atomic_power_down,
+ .enable_power_gating_plane = dcn10_enable_power_gating_plane,
+ .dpp_pg_control = dcn10_dpp_pg_control,
+ .hubp_pg_control = dcn10_hubp_pg_control,
+ .dsc_pg_control = NULL,
+ .set_hdr_multiplier = dcn10_set_hdr_multiplier,
+ .verify_allow_pstate_change_high = dcn10_verify_allow_pstate_change_high,
+ .wait_for_blank_complete = dcn20_wait_for_blank_complete,
+ .dccg_init = dcn20_dccg_init,
+ .set_blend_lut = dcn20_set_blend_lut,
+ .set_shaper_3dlut = dcn20_set_shaper_3dlut,
+};
+
+void dcn201_hw_sequencer_construct(struct dc *dc)
+{
+ dc->hwss = dcn201_funcs;
+ dc->hwseq->funcs = dcn201_private_funcs;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.h b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.h
new file mode 100644
index 000000000000..1168887b033d
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_init.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_DCN201_INIT_H__
+#define __DC_DCN201_INIT_H__
+
+struct dc;
+
+void dcn201_hw_sequencer_construct(struct dc *dc);
+
+#endif /* __DC_DCN201_INIT_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.c
new file mode 100644
index 000000000000..a65e8f7801db
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.c
@@ -0,0 +1,209 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "reg_helper.h"
+
+#include "core_types.h"
+#include "link_encoder.h"
+#include "dcn201_link_encoder.h"
+#include "stream_encoder.h"
+#include "i2caux_interface.h"
+#include "dc_bios_types.h"
+
+#include "gpio_service_interface.h"
+
+#define CTX \
+ enc10->base.ctx
+
+#define DC_LOGGER \
+ enc10->base.ctx->logger
+
+#define REG(reg)\
+ (enc10->link_regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ enc10->link_shift->field_name, enc10->link_mask->field_name
+
+#define IND_REG(index) \
+ (enc10->link_regs->index)
+
+void dcn201_link_encoder_get_max_link_cap(struct link_encoder *enc,
+ struct dc_link_settings *link_settings)
+{
+ uint32_t value1, value2;
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+
+ dcn10_link_encoder_get_max_link_cap(enc, link_settings);
+ REG_GET_2(RDPCSTX_PHY_CNTL2, RDPCS_PHY_DPALT_DISABLE, &value1,
+ RDPCS_PHY_DPALT_DP4, &value2);
+ /*limit to combo_phy*/
+ if (enc->usbc_combo_phy) {
+ if (!value1 && !value2 && link_settings->lane_count > LANE_COUNT_TWO)
+ link_settings->lane_count = LANE_COUNT_TWO;
+ }
+}
+
+bool dcn201_link_encoder_is_in_alt_mode(struct link_encoder *enc)
+{
+ uint32_t value;
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+
+ REG_GET(RDPCSTX_PHY_CNTL2, RDPCS_PHY_DPALT_DISABLE, &value);
+
+ // if value == 1 alt mode is disabled, otherwise it is enabled
+ return !value;
+}
+
+static const struct link_encoder_funcs dcn201_link_enc_funcs = {
+ .read_state = link_enc2_read_state,
+ .validate_output_with_stream =
+ dcn10_link_encoder_validate_output_with_stream,
+ .hw_init = enc2_hw_init,
+ .setup = dcn10_link_encoder_setup,
+ .enable_tmds_output = dcn10_link_encoder_enable_tmds_output,
+ .enable_dp_output = dcn10_link_encoder_enable_dp_output,
+ .enable_dp_mst_output = dcn10_link_encoder_enable_dp_mst_output,
+ .disable_output = dcn10_link_encoder_disable_output,
+ .dp_set_lane_settings = dcn10_link_encoder_dp_set_lane_settings,
+ .dp_set_phy_pattern = dcn10_link_encoder_dp_set_phy_pattern,
+ .update_mst_stream_allocation_table =
+ dcn10_link_encoder_update_mst_stream_allocation_table,
+ .psr_program_dp_dphy_fast_training =
+ dcn10_psr_program_dp_dphy_fast_training,
+ .psr_program_secondary_packet = dcn10_psr_program_secondary_packet,
+ .connect_dig_be_to_fe = dcn10_link_encoder_connect_dig_be_to_fe,
+ .enable_hpd = dcn10_link_encoder_enable_hpd,
+ .disable_hpd = dcn10_link_encoder_disable_hpd,
+ .is_dig_enabled = dcn10_is_dig_enabled,
+ .destroy = dcn10_link_encoder_destroy,
+ .fec_set_enable = enc2_fec_set_enable,
+ .fec_set_ready = enc2_fec_set_ready,
+ .get_dig_frontend = dcn10_get_dig_frontend,
+ .fec_is_active = enc2_fec_is_active,
+ .is_in_alt_mode = dcn201_link_encoder_is_in_alt_mode,
+ .get_max_link_cap = dcn201_link_encoder_get_max_link_cap,
+};
+
+void dcn201_link_encoder_construct(
+ struct dcn20_link_encoder *enc20,
+ const struct encoder_init_data *init_data,
+ const struct encoder_feature_support *enc_features,
+ const struct dcn10_link_enc_registers *link_regs,
+ const struct dcn10_link_enc_aux_registers *aux_regs,
+ const struct dcn10_link_enc_hpd_registers *hpd_regs,
+ const struct dcn10_link_enc_shift *link_shift,
+ const struct dcn10_link_enc_mask *link_mask)
+{
+ struct bp_encoder_cap_info bp_cap_info = {0};
+ const struct dc_vbios_funcs *bp_funcs = init_data->ctx->dc_bios->funcs;
+ enum bp_result result = BP_RESULT_OK;
+ struct dcn10_link_encoder *enc10 = &enc20->enc10;
+
+ enc10->base.funcs = &dcn201_link_enc_funcs;
+ enc10->base.ctx = init_data->ctx;
+ enc10->base.id = init_data->encoder;
+
+ enc10->base.hpd_source = init_data->hpd_source;
+ enc10->base.connector = init_data->connector;
+
+ enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
+
+ enc10->base.features = *enc_features;
+
+ enc10->base.transmitter = init_data->transmitter;
+
+ /* set the flag to indicate whether driver poll the I2C data pin
+ * while doing the DP sink detect
+ */
+
+ /* if (dal_adapter_service_is_feature_supported(as,
+ * FEATURE_DP_SINK_DETECT_POLL_DATA_PIN))
+ * enc10->base.features.flags.bits.
+ * DP_SINK_DETECT_POLL_DATA_PIN = true;
+ */
+
+ enc10->base.output_signals =
+ SIGNAL_TYPE_DVI_SINGLE_LINK |
+ SIGNAL_TYPE_DVI_DUAL_LINK |
+ SIGNAL_TYPE_LVDS |
+ SIGNAL_TYPE_DISPLAY_PORT |
+ SIGNAL_TYPE_DISPLAY_PORT_MST |
+ SIGNAL_TYPE_EDP |
+ SIGNAL_TYPE_HDMI_TYPE_A;
+
+ /* For DCE 8.0 and 8.1, by design, UNIPHY is hardwired to DIG_BE.
+ * SW always assign DIG_FE 1:1 mapped to DIG_FE for non-MST UNIPHY.
+ * SW assign DIG_FE to non-MST UNIPHY first and MST last. So prefer
+ * DIG is per UNIPHY and used by SST DP, eDP, HDMI, DVI and LVDS.
+ * Prefer DIG assignment is decided by board design.
+ * For DCE 8.0, there are only max 6 UNIPHYs, we assume board design
+ * and VBIOS will filter out 7 UNIPHY for DCE 8.0.
+ * By this, adding DIGG should not hurt DCE 8.0.
+ * This will let DCE 8.1 share DCE 8.0 as much as possible
+ */
+
+ enc10->link_regs = link_regs;
+ enc10->aux_regs = aux_regs;
+ enc10->hpd_regs = hpd_regs;
+ enc10->link_shift = link_shift;
+ enc10->link_mask = link_mask;
+
+ switch (enc10->base.transmitter) {
+ case TRANSMITTER_UNIPHY_A:
+ enc10->base.preferred_engine = ENGINE_ID_DIGA;
+ break;
+ case TRANSMITTER_UNIPHY_B:
+ enc10->base.preferred_engine = ENGINE_ID_DIGB;
+ break;
+ default:
+ ASSERT_CRITICAL(false);
+ enc10->base.preferred_engine = ENGINE_ID_UNKNOWN;
+ }
+
+ /* default to one to mirror Windows behavior */
+ enc10->base.features.flags.bits.HDMI_6GB_EN = 1;
+
+ result = bp_funcs->get_encoder_cap_info(enc10->base.ctx->dc_bios,
+ enc10->base.id, &bp_cap_info);
+
+ /* Override features with DCE-specific values */
+ if (result == BP_RESULT_OK) {
+ enc10->base.features.flags.bits.IS_HBR2_CAPABLE =
+ bp_cap_info.DP_HBR2_EN;
+ enc10->base.features.flags.bits.IS_HBR3_CAPABLE =
+ bp_cap_info.DP_HBR3_EN;
+ enc10->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN;
+ enc10->base.features.flags.bits.DP_IS_USB_C =
+ bp_cap_info.DP_IS_USB_C;
+ } else {
+ DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n",
+ __func__,
+ result);
+ }
+ if (enc10->base.ctx->dc->debug.hdmi20_disable) {
+ enc10->base.features.flags.bits.HDMI_6GB_EN = 0;
+ }
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.h
new file mode 100644
index 000000000000..8b95ef251332
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_link_encoder.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_LINK_ENCODER__DCN201_H__
+#define __DC_LINK_ENCODER__DCN201_H__
+
+#include "dcn20/dcn20_link_encoder.h"
+
+#define DPCS_DCN201_MASK_SH_LIST(mask_sh)\
+ DPCS_MASK_SH_LIST(mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL2, RDPCS_PHY_DPALT_DISABLE_ACK, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL2, RDPCS_PHY_DPALT_DISABLE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL2, RDPCS_PHY_DPALT_DP4, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX0_PSTATE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX1_PSTATE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX0_MPLL_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL5, RDPCS_PHY_DP_TX1_MPLL_EN, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX2_WIDTH, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX2_RATE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX3_WIDTH, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_TX3_RATE, mask_sh),\
+ LE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL11, RDPCS_PHY_DP_REF_CLK_EN, mask_sh)
+
+#define DPCS_DCN201_REG_LIST(id) \
+ DPCS_DCN2_CMN_REG_LIST(id)
+
+void dcn201_link_encoder_construct(
+ struct dcn20_link_encoder *enc20,
+ const struct encoder_init_data *init_data,
+ const struct encoder_feature_support *enc_features,
+ const struct dcn10_link_enc_registers *link_regs,
+ const struct dcn10_link_enc_aux_registers *aux_regs,
+ const struct dcn10_link_enc_hpd_registers *hpd_regs,
+ const struct dcn10_link_enc_shift *link_shift,
+ const struct dcn10_link_enc_mask *link_mask);
+
+#endif /* __DC_LINK_ENCODER__DCN201_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_mpc.c
new file mode 100644
index 000000000000..95c4c55f067c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_mpc.c
@@ -0,0 +1,125 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "reg_helper.h"
+#include "dcn201_mpc.h"
+
+#define REG(reg)\
+ mpc201->mpc_regs->reg
+
+#define CTX \
+ mpc201->base.ctx
+
+#define DC_LOGGER \
+ mpc201->base.ctx->logger
+
+#undef FN
+#define FN(reg_name, field_name) \
+ mpc201->mpc_shift->field_name, mpc201->mpc_mask->field_name
+
+static void mpc201_set_out_rate_control(
+ struct mpc *mpc,
+ int opp_id,
+ bool enable,
+ bool rate_2x_mode,
+ struct mpc_dwb_flow_control *flow_control)
+{
+ struct dcn201_mpc *mpc201 = TO_DCN201_MPC(mpc);
+
+ REG_UPDATE_2(MUX[opp_id],
+ MPC_OUT_RATE_CONTROL_DISABLE, !enable,
+ MPC_OUT_RATE_CONTROL, rate_2x_mode);
+
+ if (flow_control)
+ REG_UPDATE_3(MUX[opp_id],
+ MPC_OUT_FLOW_CONTROL_MODE, flow_control->flow_ctrl_mode,
+ MPC_OUT_FLOW_CONTROL_COUNT0, flow_control->flow_ctrl_cnt0,
+ MPC_OUT_FLOW_CONTROL_COUNT1, flow_control->flow_ctrl_cnt1);
+}
+
+static void mpc201_init_mpcc(struct mpcc *mpcc, int mpcc_inst)
+{
+ mpcc->mpcc_id = mpcc_inst;
+ mpcc->dpp_id = 0xf;
+ mpcc->mpcc_bot = NULL;
+ mpcc->blnd_cfg.overlap_only = false;
+ mpcc->blnd_cfg.global_alpha = 0xff;
+ mpcc->blnd_cfg.global_gain = 0xff;
+ mpcc->blnd_cfg.background_color_bpc = 4;
+ mpcc->blnd_cfg.bottom_gain_mode = 0;
+ mpcc->blnd_cfg.top_gain = 0x1f000;
+ mpcc->blnd_cfg.bottom_inside_gain = 0x1f000;
+ mpcc->blnd_cfg.bottom_outside_gain = 0x1f000;
+ mpcc->sm_cfg.enable = false;
+ mpcc->shared_bottom = false;
+}
+
+const struct mpc_funcs dcn201_mpc_funcs = {
+ .read_mpcc_state = mpc1_read_mpcc_state,
+ .insert_plane = mpc1_insert_plane,
+ .remove_mpcc = mpc1_remove_mpcc,
+ .mpc_init = mpc1_mpc_init,
+ .mpc_init_single_inst = mpc1_mpc_init_single_inst,
+ .update_blending = mpc2_update_blending,
+ .cursor_lock = mpc1_cursor_lock,
+ .get_mpcc_for_dpp = mpc1_get_mpcc_for_dpp,
+ .get_mpcc_for_dpp_from_secondary = NULL,
+ .wait_for_idle = mpc2_assert_idle_mpcc,
+ .assert_mpcc_idle_before_connect = mpc2_assert_mpcc_idle_before_connect,
+ .init_mpcc_list_from_hw = mpc1_init_mpcc_list_from_hw,
+ .set_denorm = mpc2_set_denorm,
+ .set_denorm_clamp = mpc2_set_denorm_clamp,
+ .set_output_csc = mpc2_set_output_csc,
+ .set_ocsc_default = mpc2_set_ocsc_default,
+ .set_output_gamma = mpc2_set_output_gamma,
+ .set_out_rate_control = mpc201_set_out_rate_control,
+ .power_on_mpc_mem_pwr = mpc20_power_on_ogam_lut,
+ .get_mpc_out_mux = mpc1_get_mpc_out_mux,
+ .set_bg_color = mpc1_set_bg_color,
+};
+
+void dcn201_mpc_construct(struct dcn201_mpc *mpc201,
+ struct dc_context *ctx,
+ const struct dcn201_mpc_registers *mpc_regs,
+ const struct dcn201_mpc_shift *mpc_shift,
+ const struct dcn201_mpc_mask *mpc_mask,
+ int num_mpcc)
+{
+ int i;
+
+ mpc201->base.ctx = ctx;
+
+ mpc201->base.funcs = &dcn201_mpc_funcs;
+
+ mpc201->mpc_regs = mpc_regs;
+ mpc201->mpc_shift = mpc_shift;
+ mpc201->mpc_mask = mpc_mask;
+
+ mpc201->mpcc_in_use_mask = 0;
+ mpc201->num_mpcc = num_mpcc;
+
+ for (i = 0; i < MAX_MPCC; i++)
+ mpc201_init_mpcc(&mpc201->base.mpcc_array[i], i);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_mpc.h b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_mpc.h
new file mode 100644
index 000000000000..b9ce0c1ba5c7
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_mpc.h
@@ -0,0 +1,86 @@
+/* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_MPCC_DCN201_H__
+#define __DC_MPCC_DCN201_H__
+
+#include "dcn20/dcn20_mpc.h"
+
+#define TO_DCN201_MPC(mpc_base) \
+ container_of(mpc_base, struct dcn201_mpc, base)
+
+#define MPC_REG_LIST_DCN201(inst) \
+ MPC_REG_LIST_DCN2_0(inst)
+
+#define MPC_OUT_MUX_REG_LIST_DCN201(inst) \
+ MPC_OUT_MUX_REG_LIST_DCN2_0(inst)
+
+#define MPC_REG_VARIABLE_LIST_DCN201 \
+ MPC_REG_VARIABLE_LIST_DCN2_0
+
+#define MPC_COMMON_MASK_SH_LIST_DCN201(mask_sh) \
+ MPC_COMMON_MASK_SH_LIST_DCN2_0(mask_sh),\
+ SF(MPC_OUT0_MUX, MPC_OUT_RATE_CONTROL, mask_sh),\
+ SF(MPC_OUT0_MUX, MPC_OUT_RATE_CONTROL_DISABLE, mask_sh),\
+ SF(MPC_OUT0_MUX, MPC_OUT_FLOW_CONTROL_MODE, mask_sh),\
+ SF(MPC_OUT0_MUX, MPC_OUT_FLOW_CONTROL_COUNT0, mask_sh),\
+ SF(MPC_OUT0_MUX, MPC_OUT_FLOW_CONTROL_COUNT1, mask_sh)
+
+#define MPC_REG_FIELD_LIST_DCN201(type) \
+ MPC_REG_FIELD_LIST_DCN2_0(type) \
+ type MPC_OUT_RATE_CONTROL;\
+ type MPC_OUT_RATE_CONTROL_DISABLE;\
+ type MPC_OUT_FLOW_CONTROL_MODE;\
+ type MPC_OUT_FLOW_CONTROL_COUNT0;\
+ type MPC_OUT_FLOW_CONTROL_COUNT1;
+
+struct dcn201_mpc_registers {
+ MPC_REG_VARIABLE_LIST_DCN201
+};
+
+struct dcn201_mpc_shift {
+ MPC_REG_FIELD_LIST_DCN201(uint8_t)
+};
+
+struct dcn201_mpc_mask {
+ MPC_REG_FIELD_LIST_DCN201(uint32_t)
+};
+
+struct dcn201_mpc {
+ struct mpc base;
+ int mpcc_in_use_mask;
+ int num_mpcc;
+ const struct dcn201_mpc_registers *mpc_regs;
+ const struct dcn201_mpc_shift *mpc_shift;
+ const struct dcn201_mpc_mask *mpc_mask;
+};
+
+void dcn201_mpc_construct(struct dcn201_mpc *mpc201,
+ struct dc_context *ctx,
+ const struct dcn201_mpc_registers *mpc_regs,
+ const struct dcn201_mpc_shift *mpc_shift,
+ const struct dcn201_mpc_mask *mpc_mask,
+ int num_mpcc);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.c
new file mode 100644
index 000000000000..8e77db46a409
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.c
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dcn201_opp.h"
+#include "reg_helper.h"
+
+#define REG(reg) \
+ (oppn201->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ oppn201->opp_shift->field_name, oppn201->opp_mask->field_name
+
+#define CTX \
+ oppn201->base.ctx
+
+/*****************************************/
+/* Constructor, Destructor */
+/*****************************************/
+
+static struct opp_funcs dcn201_opp_funcs = {
+ .opp_set_dyn_expansion = opp1_set_dyn_expansion,
+ .opp_program_fmt = opp1_program_fmt,
+ .opp_program_bit_depth_reduction = opp1_program_bit_depth_reduction,
+ .opp_program_stereo = opp1_program_stereo,
+ .opp_pipe_clock_control = opp1_pipe_clock_control,
+ .opp_set_disp_pattern_generator = opp2_set_disp_pattern_generator,
+ .opp_program_dpg_dimensions = opp2_program_dpg_dimensions,
+ .dpg_is_blanked = opp2_dpg_is_blanked,
+ .opp_dpg_set_blank_color = opp2_dpg_set_blank_color,
+ .opp_destroy = opp1_destroy,
+ .opp_program_left_edge_extra_pixel = opp2_program_left_edge_extra_pixel,
+};
+
+void dcn201_opp_construct(struct dcn201_opp *oppn201,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn201_opp_registers *regs,
+ const struct dcn201_opp_shift *opp_shift,
+ const struct dcn201_opp_mask *opp_mask)
+{
+ oppn201->base.ctx = ctx;
+ oppn201->base.inst = inst;
+ oppn201->base.funcs = &dcn201_opp_funcs;
+
+ oppn201->regs = regs;
+ oppn201->opp_shift = opp_shift;
+ oppn201->opp_mask = opp_mask;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.h b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.h
new file mode 100644
index 000000000000..aca389ec1779
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.h
@@ -0,0 +1,74 @@
+/* Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_OPP_DCN201_H__
+#define __DC_OPP_DCN201_H__
+
+#include "dcn20/dcn20_opp.h"
+
+#define TO_DCN201_OPP(opp)\
+ container_of(opp, struct dcn201_opp, base)
+
+#define OPP_SF(reg_name, field_name, post_fix)\
+ .field_name = reg_name ## __ ## field_name ## post_fix
+
+#define OPP_REG_LIST_DCN201(id) \
+ OPP_REG_LIST_DCN10(id), \
+ OPP_DPG_REG_LIST(id), \
+ SRI(FMT_422_CONTROL, FMT, id)
+
+#define OPP_MASK_SH_LIST_DCN201(mask_sh) \
+ OPP_MASK_SH_LIST_DCN20(mask_sh)
+
+#define OPP_DCN201_REG_FIELD_LIST(type) \
+ OPP_DCN20_REG_FIELD_LIST(type);
+
+struct dcn201_opp_shift {
+ OPP_DCN201_REG_FIELD_LIST(uint8_t);
+};
+
+struct dcn201_opp_mask {
+ OPP_DCN201_REG_FIELD_LIST(uint32_t);
+};
+
+struct dcn201_opp_registers {
+ OPP_REG_VARIABLE_LIST_DCN2_0;
+};
+
+struct dcn201_opp {
+ struct output_pixel_processor base;
+ const struct dcn201_opp_registers *regs;
+ const struct dcn201_opp_shift *opp_shift;
+ const struct dcn201_opp_mask *opp_mask;
+ bool is_write_to_ram_a_safe;
+};
+
+void dcn201_opp_construct(struct dcn201_opp *oppn201,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn201_opp_registers *regs,
+ const struct dcn201_opp_shift *opp_shift,
+ const struct dcn201_opp_mask *opp_mask);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_optc.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_optc.c
new file mode 100644
index 000000000000..730875dfd8b4
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_optc.c
@@ -0,0 +1,203 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "reg_helper.h"
+#include "dcn201_optc.h"
+#include "dcn10/dcn10_optc.h"
+#include "dc.h"
+
+#define REG(reg)\
+ optc1->tg_regs->reg
+
+#define CTX \
+ optc1->base.ctx
+
+#undef FN
+#define FN(reg_name, field_name) \
+ optc1->tg_shift->field_name, optc1->tg_mask->field_name
+
+/*TEMP: Need to figure out inheritance model here.*/
+bool optc201_is_two_pixels_per_containter(const struct dc_crtc_timing *timing)
+{
+ return optc1_is_two_pixels_per_containter(timing);
+}
+
+static void optc201_triplebuffer_lock(struct timing_generator *optc)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ REG_SET(OTG_GLOBAL_CONTROL0, 0,
+ OTG_MASTER_UPDATE_LOCK_SEL, optc->inst);
+ REG_SET(OTG_VUPDATE_KEEPOUT, 0,
+ OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, 1);
+ REG_SET(OTG_MASTER_UPDATE_LOCK, 0,
+ OTG_MASTER_UPDATE_LOCK, 1);
+
+ if (optc->ctx->dce_environment != DCE_ENV_FPGA_MAXIMUS)
+ REG_WAIT(OTG_MASTER_UPDATE_LOCK,
+ UPDATE_LOCK_STATUS, 1,
+ 1, 10);
+}
+
+static void optc201_triplebuffer_unlock(struct timing_generator *optc)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ REG_SET(OTG_MASTER_UPDATE_LOCK, 0,
+ OTG_MASTER_UPDATE_LOCK, 0);
+ REG_SET(OTG_VUPDATE_KEEPOUT, 0,
+ OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, 0);
+
+}
+
+static bool optc201_validate_timing(
+ struct timing_generator *optc,
+ const struct dc_crtc_timing *timing)
+{
+ uint32_t v_blank;
+ uint32_t h_blank;
+ uint32_t min_v_blank;
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ ASSERT(timing != NULL);
+
+ v_blank = (timing->v_total - timing->v_addressable -
+ timing->v_border_top - timing->v_border_bottom);
+
+ h_blank = (timing->h_total - timing->h_addressable -
+ timing->h_border_right -
+ timing->h_border_left);
+
+ if (timing->timing_3d_format != TIMING_3D_FORMAT_NONE &&
+ timing->timing_3d_format != TIMING_3D_FORMAT_HW_FRAME_PACKING &&
+ timing->timing_3d_format != TIMING_3D_FORMAT_TOP_AND_BOTTOM &&
+ timing->timing_3d_format != TIMING_3D_FORMAT_SIDE_BY_SIDE &&
+ timing->timing_3d_format != TIMING_3D_FORMAT_FRAME_ALTERNATE &&
+ timing->timing_3d_format != TIMING_3D_FORMAT_INBAND_FA)
+ return false;
+
+ /* Check maximum number of pixels supported by Timing Generator
+ * (Currently will never fail, in order to fail needs display which
+ * needs more than 8192 horizontal and
+ * more than 8192 vertical total pixels)
+ */
+ if (timing->h_total > optc1->max_h_total ||
+ timing->v_total > optc1->max_v_total)
+ return false;
+
+ if (h_blank < optc1->min_h_blank)
+ return false;
+
+ if (timing->h_sync_width < optc1->min_h_sync_width ||
+ timing->v_sync_width < optc1->min_v_sync_width)
+ return false;
+
+ min_v_blank = timing->flags.INTERLACE?optc1->min_v_blank_interlace:optc1->min_v_blank;
+
+ if (v_blank < min_v_blank)
+ return false;
+
+ return true;
+
+}
+
+static void optc201_get_optc_source(struct timing_generator *optc,
+ uint32_t *num_of_src_opp,
+ uint32_t *src_opp_id_0,
+ uint32_t *src_opp_id_1)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ REG_GET(OPTC_DATA_SOURCE_SELECT,
+ OPTC_SEG0_SRC_SEL, src_opp_id_0);
+
+ *num_of_src_opp = 1;
+}
+
+static struct timing_generator_funcs dcn201_tg_funcs = {
+ .validate_timing = optc201_validate_timing,
+ .program_timing = optc1_program_timing,
+ .setup_vertical_interrupt0 = optc1_setup_vertical_interrupt0,
+ .setup_vertical_interrupt1 = optc1_setup_vertical_interrupt1,
+ .setup_vertical_interrupt2 = optc1_setup_vertical_interrupt2,
+ .program_global_sync = optc1_program_global_sync,
+ .enable_crtc = optc2_enable_crtc,
+ .disable_crtc = optc1_disable_crtc,
+ /* used by enable_timing_synchronization. Not need for FPGA */
+ .is_counter_moving = optc1_is_counter_moving,
+ .get_position = optc1_get_position,
+ .get_frame_count = optc1_get_vblank_counter,
+ .get_scanoutpos = optc1_get_crtc_scanoutpos,
+ .get_otg_active_size = optc1_get_otg_active_size,
+ .set_early_control = optc1_set_early_control,
+ /* used by enable_timing_synchronization. Not need for FPGA */
+ .wait_for_state = optc1_wait_for_state,
+ .set_blank = optc1_set_blank,
+ .is_blanked = optc1_is_blanked,
+ .set_blank_color = optc1_program_blank_color,
+ .did_triggered_reset_occur = optc1_did_triggered_reset_occur,
+ .enable_reset_trigger = optc1_enable_reset_trigger,
+ .enable_crtc_reset = optc1_enable_crtc_reset,
+ .disable_reset_trigger = optc1_disable_reset_trigger,
+ .triplebuffer_lock = optc201_triplebuffer_lock,
+ .triplebuffer_unlock = optc201_triplebuffer_unlock,
+ .lock = optc1_lock,
+ .unlock = optc1_unlock,
+ .enable_optc_clock = optc1_enable_optc_clock,
+ .set_drr = optc1_set_drr,
+ .get_last_used_drr_vtotal = NULL,
+ .set_vtotal_min_max = optc1_set_vtotal_min_max,
+ .set_static_screen_control = optc1_set_static_screen_control,
+ .program_stereo = optc1_program_stereo,
+ .is_stereo_left_eye = optc1_is_stereo_left_eye,
+ .set_blank_data_double_buffer = optc1_set_blank_data_double_buffer,
+ .tg_init = optc1_tg_init,
+ .is_tg_enabled = optc1_is_tg_enabled,
+ .is_optc_underflow_occurred = optc1_is_optc_underflow_occurred,
+ .clear_optc_underflow = optc1_clear_optc_underflow,
+ .get_crc = optc1_get_crc,
+ .configure_crc = optc2_configure_crc,
+ .set_dsc_config = optc2_set_dsc_config,
+ .set_dwb_source = NULL,
+ .get_optc_source = optc201_get_optc_source,
+ .set_vtg_params = optc1_set_vtg_params,
+ .program_manual_trigger = optc2_program_manual_trigger,
+ .setup_manual_trigger = optc2_setup_manual_trigger,
+ .get_hw_timing = optc1_get_hw_timing,
+};
+
+void dcn201_timing_generator_init(struct optc *optc1)
+{
+ optc1->base.funcs = &dcn201_tg_funcs;
+
+ optc1->max_h_total = optc1->tg_mask->OTG_H_TOTAL + 1;
+ optc1->max_v_total = optc1->tg_mask->OTG_V_TOTAL + 1;
+
+ optc1->min_h_blank = 32;
+ optc1->min_v_blank = 3;
+ optc1->min_v_blank_interlace = 5;
+ optc1->min_h_sync_width = 8;
+ optc1->min_v_sync_width = 1;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_optc.h b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_optc.h
new file mode 100644
index 000000000000..e9545b73513a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_optc.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_OPTC_DCN201_H__
+#define __DC_OPTC_DCN201_H__
+
+#include "dcn20/dcn20_optc.h"
+
+#define TG_COMMON_REG_LIST_DCN201(inst) \
+ TG_COMMON_REG_LIST_DCN(inst),\
+ SRI(OTG_GLOBAL_CONTROL1, OTG, inst),\
+ SRI(OTG_GLOBAL_CONTROL2, OTG, inst),\
+ SRI(OTG_GSL_WINDOW_X, OTG, inst),\
+ SRI(OTG_GSL_WINDOW_Y, OTG, inst),\
+ SRI(OTG_VUPDATE_KEEPOUT, OTG, inst),\
+ SRI(OTG_DSC_START_POSITION, OTG, inst),\
+ SRI(OPTC_DATA_FORMAT_CONTROL, ODM, inst),\
+ SRI(OPTC_BYTES_PER_PIXEL, ODM, inst),\
+ SRI(OPTC_WIDTH_CONTROL, ODM, inst),\
+ SR(DWB_SOURCE_SELECT)
+
+#define TG_COMMON_MASK_SH_LIST_DCN201(mask_sh)\
+ TG_COMMON_MASK_SH_LIST_DCN(mask_sh),\
+ SF(OTG0_OTG_GLOBAL_CONTROL1, MASTER_UPDATE_LOCK_DB_X, mask_sh),\
+ SF(OTG0_OTG_GLOBAL_CONTROL1, MASTER_UPDATE_LOCK_DB_Y, mask_sh),\
+ SF(OTG0_OTG_GLOBAL_CONTROL1, MASTER_UPDATE_LOCK_DB_EN, mask_sh),\
+ SF(OTG0_OTG_GLOBAL_CONTROL2, GLOBAL_UPDATE_LOCK_EN, mask_sh),\
+ SF(OTG0_OTG_DOUBLE_BUFFER_CONTROL, OTG_RANGE_TIMING_DBUF_UPDATE_MODE, mask_sh),\
+ SF(OTG0_OTG_GSL_WINDOW_X, OTG_GSL_WINDOW_START_X, mask_sh),\
+ SF(OTG0_OTG_GSL_WINDOW_X, OTG_GSL_WINDOW_END_X, mask_sh), \
+ SF(OTG0_OTG_GSL_WINDOW_Y, OTG_GSL_WINDOW_START_Y, mask_sh),\
+ SF(OTG0_OTG_GSL_WINDOW_Y, OTG_GSL_WINDOW_END_Y, mask_sh),\
+ SF(OTG0_OTG_VUPDATE_KEEPOUT, OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, mask_sh), \
+ SF(OTG0_OTG_VUPDATE_KEEPOUT, MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET, mask_sh), \
+ SF(OTG0_OTG_VUPDATE_KEEPOUT, MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET, mask_sh), \
+ SF(OTG0_OTG_GSL_CONTROL, OTG_GSL_MASTER_MODE, mask_sh), \
+ SF(OTG0_OTG_GSL_CONTROL, OTG_MASTER_UPDATE_LOCK_GSL_EN, mask_sh), \
+ SF(OTG0_OTG_DSC_START_POSITION, OTG_DSC_START_POSITION_X, mask_sh), \
+ SF(OTG0_OTG_DSC_START_POSITION, OTG_DSC_START_POSITION_LINE_NUM, mask_sh),\
+ SF(ODM0_OPTC_DATA_SOURCE_SELECT, OPTC_SEG0_SRC_SEL, mask_sh),\
+ SF(ODM0_OPTC_DATA_FORMAT_CONTROL, OPTC_DSC_MODE, mask_sh),\
+ SF(ODM0_OPTC_BYTES_PER_PIXEL, OPTC_DSC_BYTES_PER_PIXEL, mask_sh),\
+ SF(ODM0_OPTC_WIDTH_CONTROL, OPTC_DSC_SLICE_WIDTH, mask_sh),\
+ SF(DWB_SOURCE_SELECT, OPTC_DWB0_SOURCE_SELECT, mask_sh),\
+ SF(DWB_SOURCE_SELECT, OPTC_DWB1_SOURCE_SELECT, mask_sh),\
+ SF(DWB_SOURCE_SELECT, OPTC_DWB1_SOURCE_SELECT, mask_sh)
+
+void dcn201_timing_generator_init(struct optc *optc);
+
+bool optc201_is_two_pixels_per_containter(const struct dc_crtc_timing *timing);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c
new file mode 100644
index 000000000000..0fa381088d1d
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.c
@@ -0,0 +1,1307 @@
+/*
+* Copyright 2016 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dc.h"
+
+#include "dcn201_init.h"
+#include "dml/dcn20/dcn20_fpu.h"
+#include "resource.h"
+#include "include/irq_service_interface.h"
+#include "dcn201_resource.h"
+
+#include "dcn20/dcn20_resource.h"
+
+#include "dcn10/dcn10_hubp.h"
+#include "dcn10/dcn10_ipp.h"
+#include "dcn201_mpc.h"
+#include "dcn201_hubp.h"
+#include "irq/dcn201/irq_service_dcn201.h"
+#include "dcn201/dcn201_dpp.h"
+#include "dcn201/dcn201_hubbub.h"
+#include "dcn201_dccg.h"
+#include "dcn201_optc.h"
+#include "dcn201_hwseq.h"
+#include "dce110/dce110_hw_sequencer.h"
+#include "dcn201_opp.h"
+#include "dcn201/dcn201_link_encoder.h"
+#include "dcn20/dcn20_stream_encoder.h"
+#include "dce/dce_clock_source.h"
+#include "dce/dce_audio.h"
+#include "dce/dce_hwseq.h"
+#include "virtual/virtual_stream_encoder.h"
+#include "dce110/dce110_resource.h"
+#include "dce/dce_aux.h"
+#include "dce/dce_i2c.h"
+#include "dcn201_hubbub.h"
+#include "dcn10/dcn10_resource.h"
+
+#include "cyan_skillfish_ip_offset.h"
+
+#include "dcn/dcn_2_0_3_offset.h"
+#include "dcn/dcn_2_0_3_sh_mask.h"
+#include "dpcs/dpcs_2_0_3_offset.h"
+#include "dpcs/dpcs_2_0_3_sh_mask.h"
+
+#include "mmhub/mmhub_2_0_0_offset.h"
+#include "mmhub/mmhub_2_0_0_sh_mask.h"
+#include "nbio/nbio_7_4_offset.h"
+
+#include "reg_helper.h"
+
+#define MIN_DISP_CLK_KHZ 100000
+#define MIN_DPP_CLK_KHZ 100000
+
+struct _vcs_dpi_ip_params_st dcn201_ip = {
+ .gpuvm_enable = 0,
+ .hostvm_enable = 0,
+ .gpuvm_max_page_table_levels = 4,
+ .hostvm_max_page_table_levels = 4,
+ .hostvm_cached_page_table_levels = 0,
+ .pte_group_size_bytes = 2048,
+ .rob_buffer_size_kbytes = 168,
+ .det_buffer_size_kbytes = 164,
+ .dpte_buffer_size_in_pte_reqs_luma = 84,
+ .pde_proc_buffer_size_64k_reqs = 48,
+ .dpp_output_buffer_pixels = 2560,
+ .opp_output_buffer_lines = 1,
+ .pixel_chunk_size_kbytes = 8,
+ .pte_chunk_size_kbytes = 2,
+ .meta_chunk_size_kbytes = 2,
+ .writeback_chunk_size_kbytes = 2,
+ .line_buffer_size_bits = 789504,
+ .is_line_buffer_bpp_fixed = 0,
+ .line_buffer_fixed_bpp = 0,
+ .dcc_supported = true,
+ .max_line_buffer_lines = 12,
+ .writeback_luma_buffer_size_kbytes = 12,
+ .writeback_chroma_buffer_size_kbytes = 8,
+ .writeback_chroma_line_buffer_width_pixels = 4,
+ .writeback_max_hscl_ratio = 1,
+ .writeback_max_vscl_ratio = 1,
+ .writeback_min_hscl_ratio = 1,
+ .writeback_min_vscl_ratio = 1,
+ .writeback_max_hscl_taps = 12,
+ .writeback_max_vscl_taps = 12,
+ .writeback_line_buffer_luma_buffer_size = 0,
+ .writeback_line_buffer_chroma_buffer_size = 9600,
+ .cursor_buffer_size = 8,
+ .cursor_chunk_size = 2,
+ .max_num_otg = 2,
+ .max_num_dpp = 4,
+ .max_num_wb = 0,
+ .max_dchub_pscl_bw_pix_per_clk = 4,
+ .max_pscl_lb_bw_pix_per_clk = 2,
+ .max_lb_vscl_bw_pix_per_clk = 4,
+ .max_vscl_hscl_bw_pix_per_clk = 4,
+ .max_hscl_ratio = 8,
+ .max_vscl_ratio = 8,
+ .hscl_mults = 4,
+ .vscl_mults = 4,
+ .max_hscl_taps = 8,
+ .max_vscl_taps = 8,
+ .dispclk_ramp_margin_percent = 1,
+ .underscan_factor = 1.10,
+ .min_vblank_lines = 30,
+ .dppclk_delay_subtotal = 77,
+ .dppclk_delay_scl_lb_only = 16,
+ .dppclk_delay_scl = 50,
+ .dppclk_delay_cnvc_formatter = 8,
+ .dppclk_delay_cnvc_cursor = 6,
+ .dispclk_delay_subtotal = 87,
+ .dcfclk_cstate_latency = 10,
+ .max_inter_dcn_tile_repeaters = 8,
+ .number_of_cursors = 1,
+};
+
+struct _vcs_dpi_soc_bounding_box_st dcn201_soc = {
+ .clock_limits = {
+ {
+ .state = 0,
+ .dscclk_mhz = 400.0,
+ .dcfclk_mhz = 1000.0,
+ .fabricclk_mhz = 200.0,
+ .dispclk_mhz = 300.0,
+ .dppclk_mhz = 300.0,
+ .phyclk_mhz = 810.0,
+ .socclk_mhz = 1254.0,
+ .dram_speed_mts = 2000.0,
+ },
+ {
+ .state = 1,
+ .dscclk_mhz = 400.0,
+ .dcfclk_mhz = 1000.0,
+ .fabricclk_mhz = 250.0,
+ .dispclk_mhz = 1200.0,
+ .dppclk_mhz = 1200.0,
+ .phyclk_mhz = 810.0,
+ .socclk_mhz = 1254.0,
+ .dram_speed_mts = 3600.0,
+ },
+ {
+ .state = 2,
+ .dscclk_mhz = 400.0,
+ .dcfclk_mhz = 1000.0,
+ .fabricclk_mhz = 750.0,
+ .dispclk_mhz = 1200.0,
+ .dppclk_mhz = 1200.0,
+ .phyclk_mhz = 810.0,
+ .socclk_mhz = 1254.0,
+ .dram_speed_mts = 6800.0,
+ },
+ {
+ .state = 3,
+ .dscclk_mhz = 400.0,
+ .dcfclk_mhz = 1000.0,
+ .fabricclk_mhz = 250.0,
+ .dispclk_mhz = 1200.0,
+ .dppclk_mhz = 1200.0,
+ .phyclk_mhz = 810.0,
+ .socclk_mhz = 1254.0,
+ .dram_speed_mts = 14000.0,
+ },
+ {
+ .state = 4,
+ .dscclk_mhz = 400.0,
+ .dcfclk_mhz = 1000.0,
+ .fabricclk_mhz = 750.0,
+ .dispclk_mhz = 1200.0,
+ .dppclk_mhz = 1200.0,
+ .phyclk_mhz = 810.0,
+ .socclk_mhz = 1254.0,
+ .dram_speed_mts = 14000.0,
+ }
+ },
+ .num_states = 4,
+ .sr_exit_time_us = 9.0,
+ .sr_enter_plus_exit_time_us = 11.0,
+ .urgent_latency_us = 4.0,
+ .urgent_latency_pixel_data_only_us = 4.0,
+ .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
+ .urgent_latency_vm_data_only_us = 4.0,
+ .urgent_out_of_order_return_per_channel_pixel_only_bytes = 256,
+ .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 256,
+ .urgent_out_of_order_return_per_channel_vm_only_bytes = 256,
+ .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 80.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 80.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 80.0,
+ .max_avg_sdp_bw_use_normal_percent = 80.0,
+ .max_avg_dram_bw_use_normal_percent = 69.0,
+ .writeback_latency_us = 12.0,
+ .ideal_dram_bw_after_urgent_percent = 80.0,
+ .max_request_size_bytes = 256,
+ .dram_channel_width_bytes = 2,
+ .fabric_datapath_to_dcn_data_return_bytes = 64,
+ .dcn_downspread_percent = 0.3,
+ .downspread_percent = 0.3,
+ .dram_page_open_time_ns = 50.0,
+ .dram_rw_turnaround_time_ns = 17.5,
+ .dram_return_buffer_per_channel_bytes = 8192,
+ .round_trip_ping_latency_dcfclk_cycles = 128,
+ .urgent_out_of_order_return_per_channel_bytes = 256,
+ .channel_interleave_bytes = 256,
+ .num_banks = 8,
+ .num_chans = 16,
+ .vmm_page_size_bytes = 4096,
+ .dram_clock_change_latency_us = 250.0,
+ .writeback_dram_clock_change_latency_us = 23.0,
+ .return_bus_width_bytes = 64,
+ .dispclk_dppclk_vco_speed_mhz = 3000,
+ .use_urgent_burst_bw = 0,
+};
+
+enum dcn20_clk_src_array_id {
+ DCN20_CLK_SRC_PLL0,
+ DCN20_CLK_SRC_PLL1,
+ DCN20_CLK_SRC_TOTAL_DCN201
+};
+
+/* begin *********************
+ * macros to expend register list macro defined in HW object header file */
+
+/* DCN */
+
+#undef BASE_INNER
+#define BASE_INNER(seg) DMU_BASE__INST0_SEG ## seg
+
+#define BASE(seg) BASE_INNER(seg)
+
+#define SR(reg_name)\
+ .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \
+ mm ## reg_name
+
+#define SRI(reg_name, block, id)\
+ .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ mm ## block ## id ## _ ## reg_name
+
+#define SRIR(var_name, reg_name, block, id)\
+ .var_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ mm ## block ## id ## _ ## reg_name
+
+#define SRII(reg_name, block, id)\
+ .reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ mm ## block ## id ## _ ## reg_name
+
+#define SRI_IX(reg_name, block, id)\
+ .reg_name = ix ## block ## id ## _ ## reg_name
+
+#define DCCG_SRII(reg_name, block, id)\
+ .block ## _ ## reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ mm ## block ## id ## _ ## reg_name
+
+#define VUPDATE_SRII(reg_name, block, id)\
+ .reg_name[id] = BASE(mm ## reg_name ## _ ## block ## id ## _BASE_IDX) + \
+ mm ## reg_name ## _ ## block ## id
+
+/* NBIO */
+#define NBIO_BASE_INNER(seg) \
+ NBIO_BASE__INST0_SEG ## seg
+
+#define NBIO_BASE(seg) \
+ NBIO_BASE_INNER(seg)
+
+#define NBIO_SR(reg_name)\
+ .reg_name = NBIO_BASE(mm ## reg_name ## _BASE_IDX) + \
+ mm ## reg_name
+
+/* MMHUB */
+#define MMHUB_BASE_INNER(seg) \
+ MMHUB_BASE__INST0_SEG ## seg
+
+#define MMHUB_BASE(seg) \
+ MMHUB_BASE_INNER(seg)
+
+#define MMHUB_SR(reg_name)\
+ .reg_name = MMHUB_BASE(mmMM ## reg_name ## _BASE_IDX) + \
+ mmMM ## reg_name
+
+static const struct bios_registers bios_regs = {
+ NBIO_SR(BIOS_SCRATCH_3),
+ NBIO_SR(BIOS_SCRATCH_6)
+};
+
+#define clk_src_regs(index, pllid)\
+[index] = {\
+ CS_COMMON_REG_LIST_DCN201(index, pllid),\
+}
+
+static const struct dce110_clk_src_regs clk_src_regs[] = {
+ clk_src_regs(0, A),
+ clk_src_regs(1, B)
+};
+
+static const struct dce110_clk_src_shift cs_shift = {
+ CS_COMMON_MASK_SH_LIST_DCN2_0(__SHIFT)
+};
+
+static const struct dce110_clk_src_mask cs_mask = {
+ CS_COMMON_MASK_SH_LIST_DCN2_0(_MASK)
+};
+
+#define audio_regs(id)\
+[id] = {\
+ AUD_COMMON_REG_LIST(id)\
+}
+
+static const struct dce_audio_registers audio_regs[] = {
+ audio_regs(0),
+ audio_regs(1),
+};
+
+#define DCE120_AUD_COMMON_MASK_SH_LIST(mask_sh)\
+ SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX, AZALIA_ENDPOINT_REG_INDEX, mask_sh),\
+ SF(AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA, AZALIA_ENDPOINT_REG_DATA, mask_sh),\
+ AUD_COMMON_MASK_SH_LIST_BASE(mask_sh)
+
+static const struct dce_audio_shift audio_shift = {
+ DCE120_AUD_COMMON_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_audio_mask audio_mask = {
+ DCE120_AUD_COMMON_MASK_SH_LIST(_MASK)
+};
+
+#define stream_enc_regs(id)\
+[id] = {\
+ SE_DCN2_REG_LIST(id)\
+}
+
+static const struct dcn10_stream_enc_registers stream_enc_regs[] = {
+ stream_enc_regs(0),
+ stream_enc_regs(1)
+};
+
+static const struct dcn10_stream_encoder_shift se_shift = {
+ SE_COMMON_MASK_SH_LIST_DCN20(__SHIFT)
+};
+
+static const struct dcn10_stream_encoder_mask se_mask = {
+ SE_COMMON_MASK_SH_LIST_DCN20(_MASK)
+};
+
+static const struct dce110_aux_registers_shift aux_shift = {
+ DCN_AUX_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce110_aux_registers_mask aux_mask = {
+ DCN_AUX_MASK_SH_LIST(_MASK)
+};
+
+#define aux_regs(id)\
+[id] = {\
+ DCN2_AUX_REG_LIST(id)\
+}
+
+static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = {
+ aux_regs(0),
+ aux_regs(1),
+};
+
+#define hpd_regs(id)\
+[id] = {\
+ HPD_REG_LIST(id)\
+}
+
+static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = {
+ hpd_regs(0),
+ hpd_regs(1),
+};
+
+#define link_regs(id, phyid)\
+[id] = {\
+ LE_DCN_COMMON_REG_LIST(id), \
+ UNIPHY_DCN2_REG_LIST(phyid) \
+}
+
+static const struct dcn10_link_enc_registers link_enc_regs[] = {
+ link_regs(0, A),
+ link_regs(1, B),
+};
+
+#define LINK_ENCODER_MASK_SH_LIST_DCN201(mask_sh)\
+ LINK_ENCODER_MASK_SH_LIST_DCN20(mask_sh)
+
+static const struct dcn10_link_enc_shift le_shift = {
+ LINK_ENCODER_MASK_SH_LIST_DCN201(__SHIFT)
+};
+
+static const struct dcn10_link_enc_mask le_mask = {
+ LINK_ENCODER_MASK_SH_LIST_DCN201(_MASK)
+};
+
+#define ipp_regs(id)\
+[id] = {\
+ IPP_REG_LIST_DCN201(id),\
+}
+
+static const struct dcn10_ipp_registers ipp_regs[] = {
+ ipp_regs(0),
+ ipp_regs(1),
+ ipp_regs(2),
+ ipp_regs(3),
+};
+
+static const struct dcn10_ipp_shift ipp_shift = {
+ IPP_MASK_SH_LIST_DCN201(__SHIFT)
+};
+
+static const struct dcn10_ipp_mask ipp_mask = {
+ IPP_MASK_SH_LIST_DCN201(_MASK)
+};
+
+#define opp_regs(id)\
+[id] = {\
+ OPP_REG_LIST_DCN201(id),\
+}
+
+static const struct dcn201_opp_registers opp_regs[] = {
+ opp_regs(0),
+ opp_regs(1),
+};
+
+static const struct dcn201_opp_shift opp_shift = {
+ OPP_MASK_SH_LIST_DCN201(__SHIFT)
+};
+
+static const struct dcn201_opp_mask opp_mask = {
+ OPP_MASK_SH_LIST_DCN201(_MASK)
+};
+
+#define aux_engine_regs(id)\
+[id] = {\
+ AUX_COMMON_REG_LIST0(id), \
+ .AUX_RESET_MASK = 0 \
+}
+
+static const struct dce110_aux_registers aux_engine_regs[] = {
+ aux_engine_regs(0),
+ aux_engine_regs(1)
+};
+
+#define tf_regs(id)\
+[id] = {\
+ TF_REG_LIST_DCN201(id),\
+}
+
+static const struct dcn201_dpp_registers tf_regs[] = {
+ tf_regs(0),
+ tf_regs(1),
+ tf_regs(2),
+ tf_regs(3),
+};
+
+static const struct dcn201_dpp_shift tf_shift = {
+ TF_REG_LIST_SH_MASK_DCN201(__SHIFT)
+};
+
+static const struct dcn201_dpp_mask tf_mask = {
+ TF_REG_LIST_SH_MASK_DCN201(_MASK)
+};
+
+static const struct dcn201_mpc_registers mpc_regs = {
+ MPC_REG_LIST_DCN201(0),
+ MPC_REG_LIST_DCN201(1),
+ MPC_REG_LIST_DCN201(2),
+ MPC_REG_LIST_DCN201(3),
+ MPC_REG_LIST_DCN201(4),
+ MPC_OUT_MUX_REG_LIST_DCN201(0),
+ MPC_OUT_MUX_REG_LIST_DCN201(1),
+};
+
+static const struct dcn201_mpc_shift mpc_shift = {
+ MPC_COMMON_MASK_SH_LIST_DCN201(__SHIFT)
+};
+
+static const struct dcn201_mpc_mask mpc_mask = {
+ MPC_COMMON_MASK_SH_LIST_DCN201(_MASK)
+};
+
+#define tg_regs_dcn201(id)\
+[id] = {TG_COMMON_REG_LIST_DCN201(id)}
+
+static const struct dcn_optc_registers tg_regs[] = {
+ tg_regs_dcn201(0),
+ tg_regs_dcn201(1)
+};
+
+static const struct dcn_optc_shift tg_shift = {
+ TG_COMMON_MASK_SH_LIST_DCN201(__SHIFT)
+};
+
+static const struct dcn_optc_mask tg_mask = {
+ TG_COMMON_MASK_SH_LIST_DCN201(_MASK)
+};
+
+#define hubp_regsDCN201(id)\
+[id] = {\
+ HUBP_REG_LIST_DCN201(id)\
+}
+
+static const struct dcn201_hubp_registers hubp_regs[] = {
+ hubp_regsDCN201(0),
+ hubp_regsDCN201(1),
+ hubp_regsDCN201(2),
+ hubp_regsDCN201(3)
+};
+
+static const struct dcn201_hubp_shift hubp_shift = {
+ HUBP_MASK_SH_LIST_DCN201(__SHIFT)
+};
+
+static const struct dcn201_hubp_mask hubp_mask = {
+ HUBP_MASK_SH_LIST_DCN201(_MASK)
+};
+
+static const struct dcn_hubbub_registers hubbub_reg = {
+ HUBBUB_REG_LIST_DCN201(0)
+};
+
+static const struct dcn_hubbub_shift hubbub_shift = {
+ HUBBUB_MASK_SH_LIST_DCN201(__SHIFT)
+};
+
+static const struct dcn_hubbub_mask hubbub_mask = {
+ HUBBUB_MASK_SH_LIST_DCN201(_MASK)
+};
+
+
+static const struct dccg_registers dccg_regs = {
+ DCCG_COMMON_REG_LIST_DCN_BASE()
+};
+
+static const struct dccg_shift dccg_shift = {
+ DCCG_COMMON_MASK_SH_LIST_DCN_COMMON_BASE(__SHIFT)
+};
+
+static const struct dccg_mask dccg_mask = {
+ DCCG_COMMON_MASK_SH_LIST_DCN_COMMON_BASE(_MASK)
+};
+
+static const struct resource_caps res_cap_dnc201 = {
+ .num_timing_generator = 2,
+ .num_opp = 2,
+ .num_video_plane = 4,
+ .num_audio = 2,
+ .num_stream_encoder = 2,
+ .num_pll = 2,
+ .num_ddc = 2,
+};
+
+static const struct dc_plane_cap plane_cap = {
+ .type = DC_PLANE_TYPE_DCN_UNIVERSAL,
+ .blends_with_above = true,
+ .blends_with_below = true,
+ .per_pixel_alpha = true,
+
+ .pixel_format_support = {
+ .argb8888 = true,
+ .nv12 = false,
+ .fp16 = true,
+ .p010 = false,
+ },
+
+ .max_upscale_factor = {
+ .argb8888 = 16000,
+ .nv12 = 16000,
+ .fp16 = 1
+ },
+
+ .max_downscale_factor = {
+ .argb8888 = 250,
+ .nv12 = 250,
+ .fp16 = 250
+ },
+ 64,
+ 64
+};
+
+static const struct dc_debug_options debug_defaults_drv = {
+ .disable_dmcu = true,
+ .force_abm_enable = false,
+ .timing_trace = false,
+ .clock_trace = true,
+ .disable_pplib_clock_request = true,
+ .pipe_split_policy = MPC_SPLIT_AVOID,
+ .force_single_disp_pipe_split = false,
+ .disable_dcc = DCC_ENABLE,
+ .vsr_support = true,
+ .performance_trace = false,
+ .az_endpoint_mute_only = true,
+ .max_downscale_src_width = 3840,
+ .disable_pplib_wm_range = true,
+ .scl_reset_length10 = true,
+ .sanity_checks = false,
+ .underflow_assert_delay_us = 0xFFFFFFFF,
+ .enable_tri_buf = false,
+};
+
+static void dcn201_dpp_destroy(struct dpp **dpp)
+{
+ kfree(TO_DCN201_DPP(*dpp));
+ *dpp = NULL;
+}
+
+static struct dpp *dcn201_dpp_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dcn201_dpp *dpp =
+ kzalloc(sizeof(struct dcn201_dpp), GFP_ATOMIC);
+
+ if (!dpp)
+ return NULL;
+
+ if (dpp201_construct(dpp, ctx, inst,
+ &tf_regs[inst], &tf_shift, &tf_mask))
+ return &dpp->base;
+
+ kfree(dpp);
+ return NULL;
+}
+
+static struct input_pixel_processor *dcn201_ipp_create(
+ struct dc_context *ctx, uint32_t inst)
+{
+ struct dcn10_ipp *ipp =
+ kzalloc(sizeof(struct dcn10_ipp), GFP_ATOMIC);
+
+ if (!ipp) {
+ return NULL;
+ }
+
+ dcn20_ipp_construct(ipp, ctx, inst,
+ &ipp_regs[inst], &ipp_shift, &ipp_mask);
+ return &ipp->base;
+}
+
+
+static struct output_pixel_processor *dcn201_opp_create(
+ struct dc_context *ctx, uint32_t inst)
+{
+ struct dcn201_opp *opp =
+ kzalloc(sizeof(struct dcn201_opp), GFP_ATOMIC);
+
+ if (!opp) {
+ return NULL;
+ }
+
+ dcn201_opp_construct(opp, ctx, inst,
+ &opp_regs[inst], &opp_shift, &opp_mask);
+ return &opp->base;
+}
+
+struct dce_aux *dcn201_aux_engine_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct aux_engine_dce110 *aux_engine =
+ kzalloc(sizeof(struct aux_engine_dce110), GFP_ATOMIC);
+
+ if (!aux_engine)
+ return NULL;
+
+ dce110_aux_engine_construct(aux_engine, ctx, inst,
+ SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
+ &aux_engine_regs[inst],
+ &aux_mask,
+ &aux_shift,
+ ctx->dc->caps.extended_aux_timeout_support);
+
+ return &aux_engine->base;
+}
+#define i2c_inst_regs(id) { I2C_HW_ENGINE_COMMON_REG_LIST(id) }
+
+static const struct dce_i2c_registers i2c_hw_regs[] = {
+ i2c_inst_regs(1),
+ i2c_inst_regs(2),
+};
+
+static const struct dce_i2c_shift i2c_shifts = {
+ I2C_COMMON_MASK_SH_LIST_DCN2(__SHIFT)
+};
+
+static const struct dce_i2c_mask i2c_masks = {
+ I2C_COMMON_MASK_SH_LIST_DCN2(_MASK)
+};
+
+struct dce_i2c_hw *dcn201_i2c_hw_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dce_i2c_hw *dce_i2c_hw =
+ kzalloc(sizeof(struct dce_i2c_hw), GFP_ATOMIC);
+
+ if (!dce_i2c_hw)
+ return NULL;
+
+ dcn2_i2c_hw_construct(dce_i2c_hw, ctx, inst,
+ &i2c_hw_regs[inst], &i2c_shifts, &i2c_masks);
+
+ return dce_i2c_hw;
+}
+
+static struct mpc *dcn201_mpc_create(struct dc_context *ctx, uint32_t num_mpcc)
+{
+ struct dcn201_mpc *mpc201 = kzalloc(sizeof(struct dcn201_mpc),
+ GFP_ATOMIC);
+
+ if (!mpc201)
+ return NULL;
+
+ dcn201_mpc_construct(mpc201, ctx,
+ &mpc_regs,
+ &mpc_shift,
+ &mpc_mask,
+ num_mpcc);
+
+ return &mpc201->base;
+}
+
+static struct hubbub *dcn201_hubbub_create(struct dc_context *ctx)
+{
+ struct dcn20_hubbub *hubbub = kzalloc(sizeof(struct dcn20_hubbub),
+ GFP_ATOMIC);
+
+ if (!hubbub)
+ return NULL;
+
+ hubbub201_construct(hubbub, ctx,
+ &hubbub_reg,
+ &hubbub_shift,
+ &hubbub_mask);
+
+ return &hubbub->base;
+}
+
+static struct timing_generator *dcn201_timing_generator_create(
+ struct dc_context *ctx,
+ uint32_t instance)
+{
+ struct optc *tgn10 =
+ kzalloc(sizeof(struct optc), GFP_ATOMIC);
+
+ if (!tgn10)
+ return NULL;
+
+ tgn10->base.inst = instance;
+ tgn10->base.ctx = ctx;
+
+ tgn10->tg_regs = &tg_regs[instance];
+ tgn10->tg_shift = &tg_shift;
+ tgn10->tg_mask = &tg_mask;
+
+ dcn201_timing_generator_init(tgn10);
+
+ return &tgn10->base;
+}
+
+static const struct encoder_feature_support link_enc_feature = {
+ .max_hdmi_deep_color = COLOR_DEPTH_121212,
+ .max_hdmi_pixel_clock = 600000,
+ .hdmi_ycbcr420_supported = true,
+ .dp_ycbcr420_supported = true,
+ .fec_supported = true,
+ .flags.bits.IS_HBR2_CAPABLE = true,
+ .flags.bits.IS_HBR3_CAPABLE = true,
+ .flags.bits.IS_TPS3_CAPABLE = true,
+ .flags.bits.IS_TPS4_CAPABLE = true
+};
+
+struct link_encoder *dcn201_link_encoder_create(
+ const struct encoder_init_data *enc_init_data)
+{
+ struct dcn20_link_encoder *enc20 =
+ kzalloc(sizeof(struct dcn20_link_encoder), GFP_ATOMIC);
+ struct dcn10_link_encoder *enc10 = &enc20->enc10;
+
+ if (!enc20)
+ return NULL;
+
+ dcn201_link_encoder_construct(enc20,
+ enc_init_data,
+ &link_enc_feature,
+ &link_enc_regs[enc_init_data->transmitter],
+ &link_enc_aux_regs[enc_init_data->channel - 1],
+ &link_enc_hpd_regs[enc_init_data->hpd_source],
+ &le_shift,
+ &le_mask);
+
+ return &enc10->base;
+}
+
+struct clock_source *dcn201_clock_source_create(
+ struct dc_context *ctx,
+ struct dc_bios *bios,
+ enum clock_source_id id,
+ const struct dce110_clk_src_regs *regs,
+ bool dp_clk_src)
+{
+ struct dce110_clk_src *clk_src =
+ kzalloc(sizeof(struct dce110_clk_src), GFP_ATOMIC);
+
+ if (!clk_src)
+ return NULL;
+
+ if (dce112_clk_src_construct(clk_src, ctx, bios, id,
+ regs, &cs_shift, &cs_mask)) {
+ clk_src->base.dp_clk_src = dp_clk_src;
+ return &clk_src->base;
+ }
+ kfree(clk_src);
+ return NULL;
+}
+
+static void read_dce_straps(
+ struct dc_context *ctx,
+ struct resource_straps *straps)
+{
+ generic_reg_get(ctx, mmDC_PINSTRAPS + BASE(mmDC_PINSTRAPS_BASE_IDX),
+
+ FN(DC_PINSTRAPS, DC_PINSTRAPS_AUDIO), &straps->dc_pinstraps_audio);
+}
+
+static struct audio *dcn201_create_audio(
+ struct dc_context *ctx, unsigned int inst)
+{
+ return dce_audio_create(ctx, inst,
+ &audio_regs[inst], &audio_shift, &audio_mask);
+}
+
+static struct stream_encoder *dcn201_stream_encoder_create(
+ enum engine_id eng_id,
+ struct dc_context *ctx)
+{
+ struct dcn10_stream_encoder *enc1 =
+ kzalloc(sizeof(struct dcn10_stream_encoder), GFP_ATOMIC);
+
+ if (!enc1)
+ return NULL;
+
+ dcn20_stream_encoder_construct(enc1, ctx, ctx->dc_bios, eng_id,
+ &stream_enc_regs[eng_id],
+ &se_shift, &se_mask);
+
+ return &enc1->base;
+}
+
+static const struct dce_hwseq_registers hwseq_reg = {
+ HWSEQ_DCN201_REG_LIST()
+};
+
+static const struct dce_hwseq_shift hwseq_shift = {
+ HWSEQ_DCN201_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dce_hwseq_mask hwseq_mask = {
+ HWSEQ_DCN201_MASK_SH_LIST(_MASK)
+};
+
+static struct dce_hwseq *dcn201_hwseq_create(
+ struct dc_context *ctx)
+{
+ struct dce_hwseq *hws = kzalloc(sizeof(struct dce_hwseq), GFP_ATOMIC);
+
+ if (hws) {
+ hws->ctx = ctx;
+ hws->regs = &hwseq_reg;
+ hws->shifts = &hwseq_shift;
+ hws->masks = &hwseq_mask;
+ }
+ return hws;
+}
+
+static const struct resource_create_funcs res_create_funcs = {
+ .read_dce_straps = read_dce_straps,
+ .create_audio = dcn201_create_audio,
+ .create_stream_encoder = dcn201_stream_encoder_create,
+ .create_hwseq = dcn201_hwseq_create,
+};
+
+static const struct resource_create_funcs res_create_maximus_funcs = {
+ .read_dce_straps = NULL,
+ .create_audio = NULL,
+ .create_stream_encoder = NULL,
+ .create_hwseq = dcn201_hwseq_create,
+};
+
+void dcn201_clock_source_destroy(struct clock_source **clk_src)
+{
+ kfree(TO_DCE110_CLK_SRC(*clk_src));
+ *clk_src = NULL;
+}
+
+static void dcn201_resource_destruct(struct dcn201_resource_pool *pool)
+{
+ unsigned int i;
+
+ for (i = 0; i < pool->base.stream_enc_count; i++) {
+ if (pool->base.stream_enc[i] != NULL) {
+ kfree(DCN10STRENC_FROM_STRENC(pool->base.stream_enc[i]));
+ pool->base.stream_enc[i] = NULL;
+ }
+ }
+
+
+ if (pool->base.mpc != NULL) {
+ kfree(TO_DCN201_MPC(pool->base.mpc));
+ pool->base.mpc = NULL;
+ }
+
+ if (pool->base.hubbub != NULL) {
+ kfree(pool->base.hubbub);
+ pool->base.hubbub = NULL;
+ }
+
+ for (i = 0; i < pool->base.pipe_count; i++) {
+ if (pool->base.dpps[i] != NULL)
+ dcn201_dpp_destroy(&pool->base.dpps[i]);
+
+ if (pool->base.ipps[i] != NULL)
+ pool->base.ipps[i]->funcs->ipp_destroy(&pool->base.ipps[i]);
+
+ if (pool->base.hubps[i] != NULL) {
+ kfree(TO_DCN10_HUBP(pool->base.hubps[i]));
+ pool->base.hubps[i] = NULL;
+ }
+
+ if (pool->base.irqs != NULL) {
+ dal_irq_service_destroy(&pool->base.irqs);
+ }
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_opp; i++) {
+ if (pool->base.opps[i] != NULL)
+ pool->base.opps[i]->funcs->opp_destroy(&pool->base.opps[i]);
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
+ if (pool->base.timing_generators[i] != NULL) {
+ kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i]));
+ pool->base.timing_generators[i] = NULL;
+ }
+ }
+ for (i = 0; i < pool->base.audio_count; i++) {
+ if (pool->base.audios[i])
+ dce_aud_destroy(&pool->base.audios[i]);
+ }
+
+ for (i = 0; i < pool->base.clk_src_count; i++) {
+ if (pool->base.clock_sources[i] != NULL) {
+ dcn201_clock_source_destroy(&pool->base.clock_sources[i]);
+ pool->base.clock_sources[i] = NULL;
+ }
+ }
+
+ if (pool->base.dp_clock_source != NULL) {
+ dcn201_clock_source_destroy(&pool->base.dp_clock_source);
+ pool->base.dp_clock_source = NULL;
+ }
+
+ if (pool->base.dccg != NULL)
+ dcn_dccg_destroy(&pool->base.dccg);
+}
+
+static struct hubp *dcn201_hubp_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dcn201_hubp *hubp201 =
+ kzalloc(sizeof(struct dcn201_hubp), GFP_ATOMIC);
+
+ if (!hubp201)
+ return NULL;
+
+ if (dcn201_hubp_construct(hubp201, ctx, inst,
+ &hubp_regs[inst], &hubp_shift, &hubp_mask))
+ return &hubp201->base;
+
+ kfree(hubp201);
+ return NULL;
+}
+
+static struct pipe_ctx *dcn201_acquire_idle_pipe_for_layer(
+ struct dc_state *context,
+ const struct resource_pool *pool,
+ struct dc_stream_state *stream)
+{
+ struct resource_context *res_ctx = &context->res_ctx;
+ struct pipe_ctx *head_pipe = resource_get_head_pipe_for_stream(res_ctx, stream);
+ struct pipe_ctx *idle_pipe = find_idle_secondary_pipe(res_ctx, pool, head_pipe);
+
+ if (!head_pipe)
+ ASSERT(0);
+
+ if (!idle_pipe)
+ return NULL;
+
+ idle_pipe->stream = head_pipe->stream;
+ idle_pipe->stream_res.tg = head_pipe->stream_res.tg;
+ idle_pipe->stream_res.opp = head_pipe->stream_res.opp;
+
+ idle_pipe->plane_res.hubp = pool->hubps[idle_pipe->pipe_idx];
+ idle_pipe->plane_res.ipp = pool->ipps[idle_pipe->pipe_idx];
+ idle_pipe->plane_res.dpp = pool->dpps[idle_pipe->pipe_idx];
+ idle_pipe->plane_res.mpcc_inst = pool->dpps[idle_pipe->pipe_idx]->inst;
+
+ return idle_pipe;
+}
+
+static bool dcn201_get_dcc_compression_cap(const struct dc *dc,
+ const struct dc_dcc_surface_param *input,
+ struct dc_surface_dcc_cap *output)
+{
+ return dc->res_pool->hubbub->funcs->get_dcc_compression_cap(
+ dc->res_pool->hubbub,
+ input,
+ output);
+}
+
+
+static void dcn201_destroy_resource_pool(struct resource_pool **pool)
+{
+ struct dcn201_resource_pool *dcn201_pool = TO_DCN201_RES_POOL(*pool);
+
+ dcn201_resource_destruct(dcn201_pool);
+ kfree(dcn201_pool);
+ *pool = NULL;
+}
+
+static void dcn201_link_init(struct dc_link *link)
+{
+ if (link->ctx->dc_bios->integrated_info)
+ link->dp_ss_off = !link->ctx->dc_bios->integrated_info->dp_ss_control;
+}
+
+static struct dc_cap_funcs cap_funcs = {
+ .get_dcc_compression_cap = dcn201_get_dcc_compression_cap,
+};
+
+static struct resource_funcs dcn201_res_pool_funcs = {
+ .link_init = dcn201_link_init,
+ .destroy = dcn201_destroy_resource_pool,
+ .link_enc_create = dcn201_link_encoder_create,
+ .panel_cntl_create = NULL,
+ .validate_bandwidth = dcn20_validate_bandwidth,
+ .populate_dml_pipes = dcn20_populate_dml_pipes_from_context,
+ .add_stream_to_ctx = dcn20_add_stream_to_ctx,
+ .add_dsc_to_stream_resource = NULL,
+ .remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
+ .acquire_idle_pipe_for_layer = dcn201_acquire_idle_pipe_for_layer,
+ .patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
+ .populate_dml_writeback_from_context = dcn20_populate_dml_writeback_from_context,
+ .set_mcif_arb_params = dcn20_set_mcif_arb_params,
+ .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link
+};
+
+static bool dcn201_resource_construct(
+ uint8_t num_virtual_links,
+ struct dc *dc,
+ struct dcn201_resource_pool *pool)
+{
+ int i;
+ struct dc_context *ctx = dc->ctx;
+
+ ctx->dc_bios->regs = &bios_regs;
+
+ pool->base.res_cap = &res_cap_dnc201;
+ pool->base.funcs = &dcn201_res_pool_funcs;
+
+ /*************************************************
+ * Resource + asic cap harcoding *
+ *************************************************/
+ pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE;
+
+ pool->base.pipe_count = 4;
+ pool->base.mpcc_count = 5;
+ dc->caps.max_downscale_ratio = 200;
+ dc->caps.i2c_speed_in_khz = 100;
+ dc->caps.i2c_speed_in_khz_hdcp = 5; /*1.5 w/a applied by default*/
+ dc->caps.max_cursor_size = 256;
+ dc->caps.min_horizontal_blanking_period = 80;
+ dc->caps.dmdata_alloc_size = 2048;
+
+ dc->caps.max_slave_planes = 1;
+ dc->caps.max_slave_yuv_planes = 1;
+ dc->caps.max_slave_rgb_planes = 1;
+ dc->caps.post_blend_color_processing = true;
+ dc->caps.force_dp_tps4_for_cp2520 = true;
+ dc->caps.extended_aux_timeout_support = true;
+
+ /* Color pipeline capabilities */
+ dc->caps.color.dpp.dcn_arch = 1;
+ dc->caps.color.dpp.input_lut_shared = 0;
+ dc->caps.color.dpp.icsc = 1;
+ dc->caps.color.dpp.dgam_ram = 1;
+ dc->caps.color.dpp.dgam_rom_caps.srgb = 1;
+ dc->caps.color.dpp.dgam_rom_caps.bt2020 = 1;
+ dc->caps.color.dpp.dgam_rom_caps.gamma2_2 = 0;
+ dc->caps.color.dpp.dgam_rom_caps.pq = 0;
+ dc->caps.color.dpp.dgam_rom_caps.hlg = 0;
+ dc->caps.color.dpp.post_csc = 0;
+ dc->caps.color.dpp.gamma_corr = 0;
+ dc->caps.color.dpp.dgam_rom_for_yuv = 1;
+
+ dc->caps.color.dpp.hw_3d_lut = 1;
+ dc->caps.color.dpp.ogam_ram = 1;
+ // no OGAM ROM on DCN2
+ dc->caps.color.dpp.ogam_rom_caps.srgb = 0;
+ dc->caps.color.dpp.ogam_rom_caps.bt2020 = 0;
+ dc->caps.color.dpp.ogam_rom_caps.gamma2_2 = 0;
+ dc->caps.color.dpp.ogam_rom_caps.pq = 0;
+ dc->caps.color.dpp.ogam_rom_caps.hlg = 0;
+ dc->caps.color.dpp.ocsc = 0;
+
+ dc->caps.color.mpc.gamut_remap = 0;
+ dc->caps.color.mpc.num_3dluts = 0;
+ dc->caps.color.mpc.shared_3d_lut = 0;
+ dc->caps.color.mpc.ogam_ram = 1;
+ dc->caps.color.mpc.ogam_rom_caps.srgb = 0;
+ dc->caps.color.mpc.ogam_rom_caps.bt2020 = 0;
+ dc->caps.color.mpc.ogam_rom_caps.gamma2_2 = 0;
+ dc->caps.color.mpc.ogam_rom_caps.pq = 0;
+ dc->caps.color.mpc.ogam_rom_caps.hlg = 0;
+ dc->caps.color.mpc.ocsc = 1;
+
+ dc->debug = debug_defaults_drv;
+
+ /*a0 only, remove later*/
+ dc->work_arounds.no_connect_phy_config = true;
+ dc->work_arounds.dedcn20_305_wa = true;
+ /*************************************************
+ * Create resources *
+ *************************************************/
+
+ pool->base.clock_sources[DCN20_CLK_SRC_PLL0] =
+ dcn201_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL0,
+ &clk_src_regs[0], false);
+ pool->base.clock_sources[DCN20_CLK_SRC_PLL1] =
+ dcn201_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL1,
+ &clk_src_regs[1], false);
+
+ pool->base.clk_src_count = DCN20_CLK_SRC_TOTAL_DCN201;
+
+ /* todo: not reuse phy_pll registers */
+ pool->base.dp_clock_source =
+ dcn201_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_ID_DP_DTO,
+ &clk_src_regs[0], true);
+
+ for (i = 0; i < pool->base.clk_src_count; i++) {
+ if (pool->base.clock_sources[i] == NULL) {
+ dm_error("DC: failed to create clock sources!\n");
+ goto create_fail;
+ }
+ }
+
+ pool->base.dccg = dccg201_create(ctx, &dccg_regs, &dccg_shift, &dccg_mask);
+ if (pool->base.dccg == NULL) {
+ dm_error("DC: failed to create dccg!\n");
+ goto create_fail;
+ }
+
+ dcn201_ip.max_num_otg = pool->base.res_cap->num_timing_generator;
+ dcn201_ip.max_num_dpp = pool->base.pipe_count;
+ dml_init_instance(&dc->dml, &dcn201_soc, &dcn201_ip, DML_PROJECT_DCN201);
+ {
+ struct irq_service_init_data init_data;
+ init_data.ctx = dc->ctx;
+ pool->base.irqs = dal_irq_service_dcn201_create(&init_data);
+ if (!pool->base.irqs)
+ goto create_fail;
+ }
+
+ /* mem input -> ipp -> dpp -> opp -> TG */
+ for (i = 0; i < pool->base.pipe_count; i++) {
+ pool->base.hubps[i] = dcn201_hubp_create(ctx, i);
+ if (pool->base.hubps[i] == NULL) {
+ dm_error(
+ "DC: failed to create memory input!\n");
+ goto create_fail;
+ }
+
+ pool->base.ipps[i] = dcn201_ipp_create(ctx, i);
+ if (pool->base.ipps[i] == NULL) {
+ dm_error(
+ "DC: failed to create input pixel processor!\n");
+ goto create_fail;
+ }
+
+ pool->base.dpps[i] = dcn201_dpp_create(ctx, i);
+ if (pool->base.dpps[i] == NULL) {
+ dm_error(
+ "DC: failed to create dpps!\n");
+ goto create_fail;
+ }
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_opp; i++) {
+ pool->base.opps[i] = dcn201_opp_create(ctx, i);
+ if (pool->base.opps[i] == NULL) {
+ dm_error(
+ "DC: failed to create output pixel processor!\n");
+ goto create_fail;
+ }
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_ddc; i++) {
+ pool->base.engines[i] = dcn201_aux_engine_create(ctx, i);
+ if (pool->base.engines[i] == NULL) {
+ dm_error(
+ "DC:failed to create aux engine!!\n");
+ goto create_fail;
+ }
+ pool->base.hw_i2cs[i] = dcn201_i2c_hw_create(ctx, i);
+ if (pool->base.hw_i2cs[i] == NULL) {
+ dm_error(
+ "DC:failed to create hw i2c!!\n");
+ goto create_fail;
+ }
+ pool->base.sw_i2cs[i] = NULL;
+ }
+
+ for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) {
+ pool->base.timing_generators[i] = dcn201_timing_generator_create(
+ ctx, i);
+ if (pool->base.timing_generators[i] == NULL) {
+ dm_error("DC: failed to create tg!\n");
+ goto create_fail;
+ }
+ }
+
+ pool->base.timing_generator_count = i;
+
+ pool->base.mpc = dcn201_mpc_create(ctx, pool->base.mpcc_count);
+ if (pool->base.mpc == NULL) {
+ dm_error("DC: failed to create mpc!\n");
+ goto create_fail;
+ }
+
+ pool->base.hubbub = dcn201_hubbub_create(ctx);
+ if (pool->base.hubbub == NULL) {
+ dm_error("DC: failed to create hubbub!\n");
+ goto create_fail;
+ }
+
+ if (!resource_construct(num_virtual_links, dc, &pool->base,
+ (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ?
+ &res_create_funcs : &res_create_maximus_funcs)))
+ goto create_fail;
+
+ dcn201_hw_sequencer_construct(dc);
+
+ dc->caps.max_planes = pool->base.pipe_count;
+
+ for (i = 0; i < dc->caps.max_planes; ++i)
+ dc->caps.planes[i] = plane_cap;
+
+ dc->cap_funcs = cap_funcs;
+
+ return true;
+
+create_fail:
+
+ dcn201_resource_destruct(pool);
+
+ return false;
+}
+
+struct resource_pool *dcn201_create_resource_pool(
+ const struct dc_init_data *init_data,
+ struct dc *dc)
+{
+ struct dcn201_resource_pool *pool =
+ kzalloc(sizeof(struct dcn201_resource_pool), GFP_ATOMIC);
+
+ if (!pool)
+ return NULL;
+
+ if (dcn201_resource_construct(init_data->num_virtual_links, dc, pool))
+ return &pool->base;
+
+ kfree(pool);
+ return NULL;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.h b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.h
new file mode 100644
index 000000000000..e0467d17d4ae
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_resource.h
@@ -0,0 +1,50 @@
+/*
+* Copyright 2017 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_RESOURCE_DCN201_H__
+#define __DC_RESOURCE_DCN201_H__
+
+#include "core_types.h"
+
+#define RRDPCS_PHY_DP_TX_PSTATE_POWER_UP 0x00000000
+#define RRDPCS_PHY_DP_TX_PSTATE_HOLD 0x00000001
+#define RRDPCS_PHY_DP_TX_PSTATE_HOLD_OFF 0x00000002
+#define RRDPCS_PHY_DP_TX_PSTATE_POWER_DOWN 0x00000003
+
+#define TO_DCN201_RES_POOL(pool)\
+ container_of(pool, struct dcn201_resource_pool, base)
+
+struct dc;
+struct resource_pool;
+struct _vcs_dpi_display_pipe_params_st;
+
+struct dcn201_resource_pool {
+ struct resource_pool base;
+};
+struct resource_pool *dcn201_create_resource_pool(
+ const struct dc_init_data *init_data,
+ struct dc *dc);
+
+#endif /* __DC_RESOURCE_DCN201_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
index fbbdf9976183..d452a0d1777e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
@@ -35,7 +35,7 @@
#include "include/irq_service_interface.h"
#include "dcn20/dcn20_resource.h"
-#include "dml/dcn2x/dcn2x.h"
+#include "dml/dcn20/dcn20_fpu.h"
#include "clk_mgr.h"
#include "dcn10/dcn10_hubp.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_afmt.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_afmt.c
index fa981cd04dd0..95528e5ef89e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_afmt.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_afmt.c
@@ -44,11 +44,14 @@
afmt3->base.ctx
-static void afmt3_setup_hdmi_audio(
+void afmt3_setup_hdmi_audio(
struct afmt *afmt)
{
struct dcn30_afmt *afmt3 = DCN30_AFMT_FROM_AFMT(afmt);
+ if (afmt->funcs->afmt_poweron)
+ afmt->funcs->afmt_poweron(afmt);
+
/* AFMT_AUDIO_PACKET_CONTROL */
REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1);
@@ -113,7 +116,7 @@ static union audio_cea_channels speakers_to_channels(
return cea_channels;
}
-static void afmt3_se_audio_setup(
+void afmt3_se_audio_setup(
struct afmt *afmt,
unsigned int az_inst,
struct audio_info *audio_info)
@@ -138,20 +141,24 @@ static void afmt3_se_audio_setup(
REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_CHANNEL_ENABLE, channels);
/* Disable forced mem power off */
- REG_UPDATE(AFMT_MEM_PWR, AFMT_MEM_PWR_FORCE, 0);
+ if (afmt->funcs->afmt_poweron == NULL)
+ REG_UPDATE(AFMT_MEM_PWR, AFMT_MEM_PWR_FORCE, 0);
}
-static void afmt3_audio_mute_control(
+void afmt3_audio_mute_control(
struct afmt *afmt,
bool mute)
{
struct dcn30_afmt *afmt3 = DCN30_AFMT_FROM_AFMT(afmt);
-
+ if (mute && afmt->funcs->afmt_powerdown)
+ afmt->funcs->afmt_powerdown(afmt);
+ if (!mute && afmt->funcs->afmt_poweron)
+ afmt->funcs->afmt_poweron(afmt);
/* enable/disable transmission of audio packets */
REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, !mute);
}
-static void afmt3_audio_info_immediate_update(
+void afmt3_audio_info_immediate_update(
struct afmt *afmt)
{
struct dcn30_afmt *afmt3 = DCN30_AFMT_FROM_AFMT(afmt);
@@ -160,11 +167,14 @@ static void afmt3_audio_info_immediate_update(
REG_UPDATE(AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, 1);
}
-static void afmt3_setup_dp_audio(
+void afmt3_setup_dp_audio(
struct afmt *afmt)
{
struct dcn30_afmt *afmt3 = DCN30_AFMT_FROM_AFMT(afmt);
+ if (afmt->funcs->afmt_poweron)
+ afmt->funcs->afmt_poweron(afmt);
+
/* AFMT_AUDIO_PACKET_CONTROL */
REG_UPDATE(AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, 1);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_afmt.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_afmt.h
index 85d4619207e2..97e0cf62f98e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_afmt.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_afmt.h
@@ -121,6 +121,12 @@ struct afmt_funcs {
void (*setup_dp_audio)(
struct afmt *afmt);
+
+ void (*afmt_poweron)(
+ struct afmt *afmt);
+
+ void (*afmt_powerdown)(
+ struct afmt *afmt);
};
struct afmt {
@@ -136,6 +142,24 @@ struct dcn30_afmt {
const struct dcn30_afmt_mask *afmt_mask;
};
+void afmt3_setup_hdmi_audio(
+ struct afmt *afmt);
+
+void afmt3_se_audio_setup(
+ struct afmt *afmt,
+ unsigned int az_inst,
+ struct audio_info *audio_info);
+
+void afmt3_audio_mute_control(
+ struct afmt *afmt,
+ bool mute);
+
+void afmt3_audio_info_immediate_update(
+ struct afmt *afmt);
+
+void afmt3_setup_dp_audio(
+ struct afmt *afmt);
+
void afmt3_construct(struct dcn30_afmt *afmt3,
struct dc_context *ctx,
uint32_t inst,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c
index 46ea39f5ef8d..6f3c2fb60790 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_link_encoder.c
@@ -192,6 +192,10 @@ void dcn30_link_encoder_construct(
enc10->base.features.flags.bits.IS_HBR3_CAPABLE =
bp_cap_info.DP_HBR3_EN;
enc10->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN;
+ enc10->base.features.flags.bits.IS_DP2_CAPABLE = bp_cap_info.IS_DP2_CAPABLE;
+ enc10->base.features.flags.bits.IS_UHBR10_CAPABLE = bp_cap_info.DP_UHBR10_EN;
+ enc10->base.features.flags.bits.IS_UHBR13_5_CAPABLE = bp_cap_info.DP_UHBR13_5_EN;
+ enc10->base.features.flags.bits.IS_UHBR20_CAPABLE = bp_cap_info.DP_UHBR20_EN;
enc10->base.features.flags.bits.DP_IS_USB_C =
bp_cap_info.DP_IS_USB_C;
} else {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c
index 8487516819ef..ebd9c35c914f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dio_stream_encoder.c
@@ -77,7 +77,8 @@ static void enc3_update_hdmi_info_packet(
enc1->base.vpg->funcs->update_generic_info_packet(
enc1->base.vpg,
packet_index,
- info_packet);
+ info_packet,
+ true);
/* enable transmission of packet(s) -
* packet transmission begins on the next frame */
@@ -335,7 +336,8 @@ static void enc3_dp_set_dsc_config(struct stream_encoder *enc,
static void enc3_dp_set_dsc_pps_info_packet(struct stream_encoder *enc,
bool enable,
- uint8_t *dsc_packed_pps)
+ uint8_t *dsc_packed_pps,
+ bool immediate_update)
{
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
@@ -365,7 +367,8 @@ static void enc3_dp_set_dsc_pps_info_packet(struct stream_encoder *enc,
enc1->base.vpg->funcs->update_generic_info_packet(
enc1->base.vpg,
11 + i,
- &pps_sdp);
+ &pps_sdp,
+ immediate_update);
}
/* SW should make sure VBID[6] update line number is bigger
@@ -429,19 +432,22 @@ static void enc3_stream_encoder_update_dp_info_packets(
enc->vpg->funcs->update_generic_info_packet(
enc->vpg,
0, /* packetIndex */
- &info_frame->vsc);
+ &info_frame->vsc,
+ true);
}
if (info_frame->spd.valid) {
enc->vpg->funcs->update_generic_info_packet(
enc->vpg,
2, /* packetIndex */
- &info_frame->spd);
+ &info_frame->spd,
+ true);
}
if (info_frame->hdrsmd.valid) {
enc->vpg->funcs->update_generic_info_packet(
enc->vpg,
3, /* packetIndex */
- &info_frame->hdrsmd);
+ &info_frame->hdrsmd,
+ true);
}
/* packetIndex 4 is used for send immediate sdp message, and please
* use other packetIndex (such as 5,6) for other info packet
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
index 23a52d47e61c..c1d967ed6551 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c
@@ -488,6 +488,54 @@ void dpp3_cnv_set_bias_scale(
REG_UPDATE(FCNV_FP_SCALE_B, FCNV_FP_SCALE_B, bias_and_scale->scale_blue);
}
+void dpp3_deferred_update(
+ struct dpp *dpp_base)
+{
+ int bypass_state;
+ struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
+
+ if (dpp_base->deferred_reg_writes.bits.disable_dscl) {
+ REG_UPDATE(DSCL_MEM_PWR_CTRL, LUT_MEM_PWR_FORCE, 3);
+ dpp_base->deferred_reg_writes.bits.disable_dscl = false;
+ }
+
+ if (dpp_base->deferred_reg_writes.bits.disable_gamcor) {
+ REG_GET(CM_GAMCOR_CONTROL, CM_GAMCOR_MODE_CURRENT, &bypass_state);
+ if (bypass_state == 0) { // only program if bypass was latched
+ REG_UPDATE(CM_MEM_PWR_CTRL, GAMCOR_MEM_PWR_FORCE, 3);
+ } else
+ ASSERT(0); // LUT select was updated again before vupdate
+ dpp_base->deferred_reg_writes.bits.disable_gamcor = false;
+ }
+
+ if (dpp_base->deferred_reg_writes.bits.disable_blnd_lut) {
+ REG_GET(CM_BLNDGAM_CONTROL, CM_BLNDGAM_MODE_CURRENT, &bypass_state);
+ if (bypass_state == 0) { // only program if bypass was latched
+ REG_UPDATE(CM_MEM_PWR_CTRL, BLNDGAM_MEM_PWR_FORCE, 3);
+ } else
+ ASSERT(0); // LUT select was updated again before vupdate
+ dpp_base->deferred_reg_writes.bits.disable_blnd_lut = false;
+ }
+
+ if (dpp_base->deferred_reg_writes.bits.disable_3dlut) {
+ REG_GET(CM_3DLUT_MODE, CM_3DLUT_MODE_CURRENT, &bypass_state);
+ if (bypass_state == 0) { // only program if bypass was latched
+ REG_UPDATE(CM_MEM_PWR_CTRL2, HDR3DLUT_MEM_PWR_FORCE, 3);
+ } else
+ ASSERT(0); // LUT select was updated again before vupdate
+ dpp_base->deferred_reg_writes.bits.disable_3dlut = false;
+ }
+
+ if (dpp_base->deferred_reg_writes.bits.disable_shaper) {
+ REG_GET(CM_SHAPER_CONTROL, CM_SHAPER_MODE_CURRENT, &bypass_state);
+ if (bypass_state == 0) { // only program if bypass was latched
+ REG_UPDATE(CM_MEM_PWR_CTRL2, SHAPER_MEM_PWR_FORCE, 3);
+ } else
+ ASSERT(0); // LUT select was updated again before vupdate
+ dpp_base->deferred_reg_writes.bits.disable_shaper = false;
+ }
+}
+
static void dpp3_power_on_blnd_lut(
struct dpp *dpp_base,
bool power_on)
@@ -495,9 +543,13 @@ static void dpp3_power_on_blnd_lut(
struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) {
- REG_UPDATE(CM_MEM_PWR_CTRL, BLNDGAM_MEM_PWR_FORCE, power_on ? 0 : 3);
- if (power_on)
+ if (power_on) {
+ REG_UPDATE(CM_MEM_PWR_CTRL, BLNDGAM_MEM_PWR_FORCE, 0);
REG_WAIT(CM_MEM_PWR_STATUS, BLNDGAM_MEM_PWR_STATE, 0, 1, 5);
+ } else {
+ dpp_base->ctx->dc->optimized_required = true;
+ dpp_base->deferred_reg_writes.bits.disable_blnd_lut = true;
+ }
} else {
REG_SET(CM_MEM_PWR_CTRL, 0,
BLNDGAM_MEM_PWR_FORCE, power_on == true ? 0 : 1);
@@ -511,9 +563,13 @@ static void dpp3_power_on_hdr3dlut(
struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) {
- REG_UPDATE(CM_MEM_PWR_CTRL2, HDR3DLUT_MEM_PWR_FORCE, power_on ? 0 : 3);
- if (power_on)
+ if (power_on) {
+ REG_UPDATE(CM_MEM_PWR_CTRL2, HDR3DLUT_MEM_PWR_FORCE, 0);
REG_WAIT(CM_MEM_PWR_STATUS2, HDR3DLUT_MEM_PWR_STATE, 0, 1, 5);
+ } else {
+ dpp_base->ctx->dc->optimized_required = true;
+ dpp_base->deferred_reg_writes.bits.disable_3dlut = true;
+ }
}
}
@@ -524,9 +580,13 @@ static void dpp3_power_on_shaper(
struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) {
- REG_UPDATE(CM_MEM_PWR_CTRL2, SHAPER_MEM_PWR_FORCE, power_on ? 0 : 3);
- if (power_on)
+ if (power_on) {
+ REG_UPDATE(CM_MEM_PWR_CTRL2, SHAPER_MEM_PWR_FORCE, 0);
REG_WAIT(CM_MEM_PWR_STATUS2, SHAPER_MEM_PWR_STATE, 0, 1, 5);
+ } else {
+ dpp_base->ctx->dc->optimized_required = true;
+ dpp_base->deferred_reg_writes.bits.disable_shaper = true;
+ }
}
}
@@ -1400,6 +1460,7 @@ static struct dpp_funcs dcn30_dpp_funcs = {
.dpp_program_blnd_lut = dpp3_program_blnd_lut,
.dpp_program_shaper_lut = dpp3_program_shaper,
.dpp_program_3dlut = dpp3_program_3dlut,
+ .dpp_deferred_update = dpp3_deferred_update,
.dpp_program_bias_and_scale = NULL,
.dpp_cnv_set_alpha_keyer = dpp2_cnv_set_alpha_keyer,
.set_cursor_attributes = dpp3_set_cursor_attributes,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c
index 72c5687adc68..387eec616162 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp_cm.c
@@ -136,9 +136,13 @@ static void dpp3_power_on_gamcor_lut(
struct dcn3_dpp *dpp = TO_DCN30_DPP(dpp_base);
if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) {
- REG_UPDATE(CM_MEM_PWR_CTRL, GAMCOR_MEM_PWR_FORCE, power_on ? 0 : 3);
- if (power_on)
+ if (power_on) {
+ REG_UPDATE(CM_MEM_PWR_CTRL, GAMCOR_MEM_PWR_FORCE, 0);
REG_WAIT(CM_MEM_PWR_STATUS, GAMCOR_MEM_PWR_STATE, 0, 1, 5);
+ } else {
+ dpp_base->ctx->dc->optimized_required = true;
+ dpp_base->deferred_reg_writes.bits.disable_gamcor = true;
+ }
} else
REG_SET(CM_MEM_PWR_CTRL, 0,
GAMCOR_MEM_PWR_DIS, power_on == true ? 0:1);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
index f24612523248..eac08926b574 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hubp.c
@@ -356,12 +356,6 @@ void hubp3_dcc_control_sienna_cichlid(struct hubp *hubp,
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
- /*Workaround until UMD fix the new dcc_ind_blk interface */
- if (dcc->independent_64b_blks && dcc->dcc_ind_blk == 0)
- dcc->dcc_ind_blk = 1;
- if (dcc->independent_64b_blks_c && dcc->dcc_ind_blk_c == 0)
- dcc->dcc_ind_blk_c = 1;
-
REG_UPDATE_6(DCSURF_SURFACE_CONTROL,
PRIMARY_SURFACE_DCC_EN, dcc->enable,
PRIMARY_SURFACE_DCC_IND_BLK, dcc->dcc_ind_blk,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
index fafed1e4a998..df2717116604 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
@@ -559,7 +559,7 @@ void dcn30_init_hw(struct dc *dc)
for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
if (fe == dc->res_pool->stream_enc[j]->id) {
- dc->res_pool->stream_enc[j]->funcs->dp_blank(
+ dc->res_pool->stream_enc[j]->funcs->dp_blank(dc->links[i],
dc->res_pool->stream_enc[j]);
break;
}
@@ -1002,7 +1002,8 @@ void dcn30_set_disp_pattern_generator(const struct dc *dc,
/* turning off DPG */
pipe_ctx->plane_res.hubp->funcs->set_blank(pipe_ctx->plane_res.hubp, false);
for (mpcc_pipe = pipe_ctx->bottom_pipe; mpcc_pipe; mpcc_pipe = mpcc_pipe->bottom_pipe)
- mpcc_pipe->plane_res.hubp->funcs->set_blank(mpcc_pipe->plane_res.hubp, false);
+ if (mpcc_pipe->plane_res.hubp)
+ mpcc_pipe->plane_res.hubp->funcs->set_blank(mpcc_pipe->plane_res.hubp, false);
stream_res->opp->funcs->opp_set_disp_pattern_generator(stream_res->opp, test_pattern, color_space,
color_depth, solid_color, width, height, offset);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
index 3a5b53dd2f6d..93f32a312fee 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c
@@ -100,6 +100,7 @@ static const struct hw_sequencer_funcs dcn30_funcs = {
.set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
.get_dcc_en_bits = dcn10_get_dcc_en_bits,
.update_visual_confirm_color = dcn20_update_visual_confirm_color,
+ .is_abm_supported = dcn21_is_abm_supported
};
static const struct hwseq_private_funcs dcn30_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
index 089be7347591..5d9e6413d67a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_optc.c
@@ -73,16 +73,23 @@ void optc3_lock_doublebuffer_enable(struct timing_generator *optc)
OTG_H_BLANK_END, &h_blank_end);
REG_UPDATE_2(OTG_GLOBAL_CONTROL1,
- MASTER_UPDATE_LOCK_DB_START_Y, v_blank_start,
- MASTER_UPDATE_LOCK_DB_END_Y, v_blank_end);
+ MASTER_UPDATE_LOCK_DB_START_Y, v_blank_start - 1,
+ MASTER_UPDATE_LOCK_DB_END_Y, v_blank_start);
REG_UPDATE_2(OTG_GLOBAL_CONTROL4,
- DIG_UPDATE_POSITION_X, 20,
- DIG_UPDATE_POSITION_Y, v_blank_start);
+ DIG_UPDATE_POSITION_X, h_blank_start - 180 - 1,
+ DIG_UPDATE_POSITION_Y, v_blank_start - 1);
+ // there is a DIG_UPDATE_VCOUNT_MODE and it is 0.
+
REG_UPDATE_3(OTG_GLOBAL_CONTROL0,
MASTER_UPDATE_LOCK_DB_START_X, h_blank_start - 200 - 1,
- MASTER_UPDATE_LOCK_DB_END_X, h_blank_end,
+ MASTER_UPDATE_LOCK_DB_END_X, h_blank_start - 180,
MASTER_UPDATE_LOCK_DB_EN, 1);
REG_UPDATE(OTG_GLOBAL_CONTROL2, GLOBAL_UPDATE_LOCK_EN, 1);
+
+ REG_SET_3(OTG_VUPDATE_KEEPOUT, 0,
+ MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET, 0,
+ MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET, 100,
+ OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, 1);
}
void optc3_lock_doublebuffer_disable(struct timing_generator *optc)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
index a0de309475a9..e50c695e3c96 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c
@@ -1164,8 +1164,12 @@ struct stream_encoder *dcn30_stream_encoder_create(
vpg = dcn30_vpg_create(ctx, vpg_inst);
afmt = dcn30_afmt_create(ctx, afmt_inst);
- if (!enc1 || !vpg || !afmt)
+ if (!enc1 || !vpg || !afmt) {
+ kfree(enc1);
+ kfree(vpg);
+ kfree(afmt);
return NULL;
+ }
dcn30_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios,
eng_id, vpg, afmt,
@@ -1703,9 +1707,6 @@ bool dcn30_release_post_bldn_3dlut(
return ret;
}
-#define fixed16_to_double(x) (((double) x) / ((double) (1 << 16)))
-#define fixed16_to_double_to_cpu(x) fixed16_to_double(le32_to_cpu(x))
-
static bool is_soc_bounding_box_valid(struct dc *dc)
{
uint32_t hw_internal_rev = dc->ctx->asic_id.hw_internal_rev;
@@ -1856,7 +1857,7 @@ static struct pipe_ctx *dcn30_find_split_pipe(
return pipe;
}
-static noinline bool dcn30_internal_validate_bw(
+noinline bool dcn30_internal_validate_bw(
struct dc *dc,
struct dc_state *context,
display_e2e_pipe_params_st *pipes,
@@ -1925,23 +1926,25 @@ static noinline bool dcn30_internal_validate_bw(
if (vlevel == context->bw_ctx.dml.soc.num_states)
goto validate_fail;
- for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- struct pipe_ctx *mpo_pipe = pipe->bottom_pipe;
+ if (!dc->config.enable_windowed_mpo_odm) {
+ for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *mpo_pipe = pipe->bottom_pipe;
- if (!pipe->stream)
- continue;
+ if (!pipe->stream)
+ continue;
- /* We only support full screen mpo with ODM */
- if (vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled
- && pipe->plane_state && mpo_pipe
- && memcmp(&mpo_pipe->plane_res.scl_data.recout,
- &pipe->plane_res.scl_data.recout,
- sizeof(struct rect)) != 0) {
- ASSERT(mpo_pipe->plane_state != pipe->plane_state);
- goto validate_fail;
+ /* We only support full screen mpo with ODM */
+ if (vba->ODMCombineEnabled[vba->pipe_plane[pipe_idx]] != dm_odm_combine_mode_disabled
+ && pipe->plane_state && mpo_pipe
+ && memcmp(&mpo_pipe->plane_res.scl_data.recout,
+ &pipe->plane_res.scl_data.recout,
+ sizeof(struct rect)) != 0) {
+ ASSERT(mpo_pipe->plane_state != pipe->plane_state);
+ goto validate_fail;
+ }
+ pipe_idx++;
}
- pipe_idx++;
}
/* merge pipes if necessary */
@@ -2319,7 +2322,9 @@ bool dcn30_validate_bandwidth(struct dc *dc,
goto validate_out;
}
+ DC_FP_START();
dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel);
+ DC_FP_END();
BW_VAL_TRACE_END_WATERMARKS();
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h
index b754b89beadf..b92e4cc0232f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.h
@@ -55,6 +55,13 @@ unsigned int dcn30_calc_max_scaled_time(
bool dcn30_validate_bandwidth(struct dc *dc, struct dc_state *context,
bool fast_validate);
+bool dcn30_internal_validate_bw(
+ struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int *pipe_cnt_out,
+ int *vlevel_out,
+ bool fast_validate);
void dcn30_calculate_wm_and_dlg(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.c
index 8cfd181b4d5f..14bc44b1f886 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.c
@@ -43,10 +43,11 @@
vpg3->base.ctx
-static void vpg3_update_generic_info_packet(
+void vpg3_update_generic_info_packet(
struct vpg *vpg,
uint32_t packet_index,
- const struct dc_info_packet *info_packet)
+ const struct dc_info_packet *info_packet,
+ bool immediate_update)
{
struct dcn30_vpg *vpg3 = DCN30_VPG_FROM_VPG(vpg);
uint32_t i;
@@ -106,69 +107,138 @@ static void vpg3_update_generic_info_packet(
/* atomically update double-buffered GENERIC0 registers in immediate mode
* (update at next block_update when block_update_lock == 0).
*/
- switch (packet_index) {
- case 0:
- REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL,
- VPG_GENERIC0_IMMEDIATE_UPDATE, 1);
- break;
- case 1:
- REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL,
- VPG_GENERIC1_IMMEDIATE_UPDATE, 1);
- break;
- case 2:
- REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL,
- VPG_GENERIC2_IMMEDIATE_UPDATE, 1);
- break;
- case 3:
- REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL,
- VPG_GENERIC3_IMMEDIATE_UPDATE, 1);
- break;
- case 4:
- REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL,
- VPG_GENERIC4_IMMEDIATE_UPDATE, 1);
- break;
- case 5:
- REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL,
- VPG_GENERIC5_IMMEDIATE_UPDATE, 1);
- break;
- case 6:
- REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL,
- VPG_GENERIC6_IMMEDIATE_UPDATE, 1);
- break;
- case 7:
- REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL,
- VPG_GENERIC7_IMMEDIATE_UPDATE, 1);
- break;
- case 8:
- REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL,
- VPG_GENERIC8_IMMEDIATE_UPDATE, 1);
- break;
- case 9:
- REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL,
- VPG_GENERIC9_IMMEDIATE_UPDATE, 1);
- break;
- case 10:
- REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL,
- VPG_GENERIC10_IMMEDIATE_UPDATE, 1);
- break;
- case 11:
- REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL,
- VPG_GENERIC11_IMMEDIATE_UPDATE, 1);
- break;
- case 12:
- REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL,
- VPG_GENERIC12_IMMEDIATE_UPDATE, 1);
- break;
- case 13:
- REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL,
- VPG_GENERIC13_IMMEDIATE_UPDATE, 1);
- break;
- case 14:
- REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL,
- VPG_GENERIC14_IMMEDIATE_UPDATE, 1);
- break;
- default:
- break;
+ if (immediate_update) {
+ switch (packet_index) {
+ case 0:
+ REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL,
+ VPG_GENERIC0_IMMEDIATE_UPDATE, 1);
+ break;
+ case 1:
+ REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL,
+ VPG_GENERIC1_IMMEDIATE_UPDATE, 1);
+ break;
+ case 2:
+ REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL,
+ VPG_GENERIC2_IMMEDIATE_UPDATE, 1);
+ break;
+ case 3:
+ REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL,
+ VPG_GENERIC3_IMMEDIATE_UPDATE, 1);
+ break;
+ case 4:
+ REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL,
+ VPG_GENERIC4_IMMEDIATE_UPDATE, 1);
+ break;
+ case 5:
+ REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL,
+ VPG_GENERIC5_IMMEDIATE_UPDATE, 1);
+ break;
+ case 6:
+ REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL,
+ VPG_GENERIC6_IMMEDIATE_UPDATE, 1);
+ break;
+ case 7:
+ REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL,
+ VPG_GENERIC7_IMMEDIATE_UPDATE, 1);
+ break;
+ case 8:
+ REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL,
+ VPG_GENERIC8_IMMEDIATE_UPDATE, 1);
+ break;
+ case 9:
+ REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL,
+ VPG_GENERIC9_IMMEDIATE_UPDATE, 1);
+ break;
+ case 10:
+ REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL,
+ VPG_GENERIC10_IMMEDIATE_UPDATE, 1);
+ break;
+ case 11:
+ REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL,
+ VPG_GENERIC11_IMMEDIATE_UPDATE, 1);
+ break;
+ case 12:
+ REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL,
+ VPG_GENERIC12_IMMEDIATE_UPDATE, 1);
+ break;
+ case 13:
+ REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL,
+ VPG_GENERIC13_IMMEDIATE_UPDATE, 1);
+ break;
+ case 14:
+ REG_UPDATE(VPG_GSP_IMMEDIATE_UPDATE_CTRL,
+ VPG_GENERIC14_IMMEDIATE_UPDATE, 1);
+ break;
+ default:
+ break;
+ }
+ } else {
+ switch (packet_index) {
+ case 0:
+ REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL,
+ VPG_GENERIC0_FRAME_UPDATE, 1);
+ break;
+ case 1:
+ REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL,
+ VPG_GENERIC1_FRAME_UPDATE, 1);
+ break;
+ case 2:
+ REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL,
+ VPG_GENERIC2_FRAME_UPDATE, 1);
+ break;
+ case 3:
+ REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL,
+ VPG_GENERIC3_FRAME_UPDATE, 1);
+ break;
+ case 4:
+ REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL,
+ VPG_GENERIC4_FRAME_UPDATE, 1);
+ break;
+ case 5:
+ REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL,
+ VPG_GENERIC5_FRAME_UPDATE, 1);
+ break;
+ case 6:
+ REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL,
+ VPG_GENERIC6_FRAME_UPDATE, 1);
+ break;
+ case 7:
+ REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL,
+ VPG_GENERIC7_FRAME_UPDATE, 1);
+ break;
+ case 8:
+ REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL,
+ VPG_GENERIC8_FRAME_UPDATE, 1);
+ break;
+ case 9:
+ REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL,
+ VPG_GENERIC9_FRAME_UPDATE, 1);
+ break;
+ case 10:
+ REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL,
+ VPG_GENERIC10_FRAME_UPDATE, 1);
+ break;
+ case 11:
+ REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL,
+ VPG_GENERIC11_FRAME_UPDATE, 1);
+ break;
+ case 12:
+ REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL,
+ VPG_GENERIC12_FRAME_UPDATE, 1);
+ break;
+ case 13:
+ REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL,
+ VPG_GENERIC13_FRAME_UPDATE, 1);
+ break;
+ case 14:
+ REG_UPDATE(VPG_GSP_FRAME_UPDATE_CTRL,
+ VPG_GENERIC14_FRAME_UPDATE, 1);
+ break;
+
+ default:
+ break;
+ }
+
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.h
index 6161e9e66355..ed9a5549c389 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_vpg.h
@@ -138,7 +138,14 @@ struct vpg_funcs {
void (*update_generic_info_packet)(
struct vpg *vpg,
uint32_t packet_index,
- const struct dc_info_packet *info_packet);
+ const struct dc_info_packet *info_packet,
+ bool immediate_update);
+
+ void (*vpg_poweron)(
+ struct vpg *vpg);
+
+ void (*vpg_powerdown)(
+ struct vpg *vpg);
};
struct vpg {
@@ -154,6 +161,12 @@ struct dcn30_vpg {
const struct dcn30_vpg_mask *vpg_mask;
};
+void vpg3_update_generic_info_packet(
+ struct vpg *vpg,
+ uint32_t packet_index,
+ const struct dc_info_packet *info_packet,
+ bool immediate_update);
+
void vpg3_construct(struct dcn30_vpg *vpg3,
struct dc_context *ctx,
uint32_t inst,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/Makefile b/drivers/gpu/drm/amd/display/dc/dcn301/Makefile
index 09264716d1dc..7aa628c21973 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/Makefile
@@ -13,32 +13,6 @@
DCN301 = dcn301_init.o dcn301_resource.o dcn301_dccg.o \
dcn301_dio_link_encoder.o dcn301_hwseq.o dcn301_panel_cntl.o dcn301_hubbub.o
-ifdef CONFIG_X86
-CFLAGS_$(AMDDALPATH)/dc/dcn301/dcn301_resource.o := -msse
-endif
-
-ifdef CONFIG_PPC64
-CFLAGS_$(AMDDALPATH)/dc/dcn301/dcn301_resource.o := -mhard-float -maltivec
-endif
-
-ifdef CONFIG_CC_IS_GCC
-ifeq ($(call cc-ifversion, -lt, 0701, y), y)
-IS_OLD_GCC = 1
-endif
-CFLAGS_$(AMDDALPATH)/dc/dcn301/dcn301_resource.o += -mhard-float
-endif
-
-ifdef CONFIG_X86
-ifdef IS_OLD_GCC
-# Stack alignment mismatch, proceed with caution.
-# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
-# (8B stack alignment).
-CFLAGS_$(AMDDALPATH)/dc/dcn301/dcn301_resource.o += -mpreferred-stack-boundary=4
-else
-CFLAGS_$(AMDDALPATH)/dc/dcn301/dcn301_resource.o += -msse2
-endif
-endif
-
AMD_DAL_DCN301 = $(addprefix $(AMDDALPATH)/dc/dcn301/,$(DCN301))
AMD_DISPLAY_FILES += $(AMD_DAL_DCN301)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
index 912285fdce18..fbaa03f26d8b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
@@ -82,6 +82,7 @@
#include "dce/dce_i2c.h"
#include "dml/dcn30/display_mode_vba_30.h"
+#include "dml/dcn301/dcn301_fpu.h"
#include "vm_helper.h"
#include "dcn20/dcn20_vmid.h"
#include "amdgpu_socbb.h"
@@ -91,184 +92,6 @@
#define DC_LOGGER_INIT(logger)
-struct _vcs_dpi_ip_params_st dcn3_01_ip = {
- .odm_capable = 1,
- .gpuvm_enable = 1,
- .hostvm_enable = 1,
- .gpuvm_max_page_table_levels = 1,
- .hostvm_max_page_table_levels = 2,
- .hostvm_cached_page_table_levels = 0,
- .pte_group_size_bytes = 2048,
- .num_dsc = 3,
- .rob_buffer_size_kbytes = 184,
- .det_buffer_size_kbytes = 184,
- .dpte_buffer_size_in_pte_reqs_luma = 64,
- .dpte_buffer_size_in_pte_reqs_chroma = 32,
- .pde_proc_buffer_size_64k_reqs = 48,
- .dpp_output_buffer_pixels = 2560,
- .opp_output_buffer_lines = 1,
- .pixel_chunk_size_kbytes = 8,
- .meta_chunk_size_kbytes = 2,
- .writeback_chunk_size_kbytes = 8,
- .line_buffer_size_bits = 789504,
- .is_line_buffer_bpp_fixed = 0, // ?
- .line_buffer_fixed_bpp = 48, // ?
- .dcc_supported = true,
- .writeback_interface_buffer_size_kbytes = 90,
- .writeback_line_buffer_buffer_size = 656640,
- .max_line_buffer_lines = 12,
- .writeback_luma_buffer_size_kbytes = 12, // writeback_line_buffer_buffer_size = 656640
- .writeback_chroma_buffer_size_kbytes = 8,
- .writeback_chroma_line_buffer_width_pixels = 4,
- .writeback_max_hscl_ratio = 1,
- .writeback_max_vscl_ratio = 1,
- .writeback_min_hscl_ratio = 1,
- .writeback_min_vscl_ratio = 1,
- .writeback_max_hscl_taps = 1,
- .writeback_max_vscl_taps = 1,
- .writeback_line_buffer_luma_buffer_size = 0,
- .writeback_line_buffer_chroma_buffer_size = 14643,
- .cursor_buffer_size = 8,
- .cursor_chunk_size = 2,
- .max_num_otg = 4,
- .max_num_dpp = 4,
- .max_num_wb = 1,
- .max_dchub_pscl_bw_pix_per_clk = 4,
- .max_pscl_lb_bw_pix_per_clk = 2,
- .max_lb_vscl_bw_pix_per_clk = 4,
- .max_vscl_hscl_bw_pix_per_clk = 4,
- .max_hscl_ratio = 6,
- .max_vscl_ratio = 6,
- .hscl_mults = 4,
- .vscl_mults = 4,
- .max_hscl_taps = 8,
- .max_vscl_taps = 8,
- .dispclk_ramp_margin_percent = 1,
- .underscan_factor = 1.11,
- .min_vblank_lines = 32,
- .dppclk_delay_subtotal = 46,
- .dynamic_metadata_vm_enabled = true,
- .dppclk_delay_scl_lb_only = 16,
- .dppclk_delay_scl = 50,
- .dppclk_delay_cnvc_formatter = 27,
- .dppclk_delay_cnvc_cursor = 6,
- .dispclk_delay_subtotal = 119,
- .dcfclk_cstate_latency = 5.2, // SRExitTime
- .max_inter_dcn_tile_repeaters = 8,
- .max_num_hdmi_frl_outputs = 0,
- .odm_combine_4to1_supported = true,
-
- .xfc_supported = false,
- .xfc_fill_bw_overhead_percent = 10.0,
- .xfc_fill_constant_bytes = 0,
- .gfx7_compat_tiling_supported = 0,
- .number_of_cursors = 1,
-};
-
-struct _vcs_dpi_soc_bounding_box_st dcn3_01_soc = {
- .clock_limits = {
- {
- .state = 0,
- .dram_speed_mts = 2400.0,
- .fabricclk_mhz = 600,
- .socclk_mhz = 278.0,
- .dcfclk_mhz = 400.0,
- .dscclk_mhz = 206.0,
- .dppclk_mhz = 1015.0,
- .dispclk_mhz = 1015.0,
- .phyclk_mhz = 600.0,
- },
- {
- .state = 1,
- .dram_speed_mts = 2400.0,
- .fabricclk_mhz = 688,
- .socclk_mhz = 278.0,
- .dcfclk_mhz = 400.0,
- .dscclk_mhz = 206.0,
- .dppclk_mhz = 1015.0,
- .dispclk_mhz = 1015.0,
- .phyclk_mhz = 600.0,
- },
- {
- .state = 2,
- .dram_speed_mts = 4267.0,
- .fabricclk_mhz = 1067,
- .socclk_mhz = 278.0,
- .dcfclk_mhz = 608.0,
- .dscclk_mhz = 296.0,
- .dppclk_mhz = 1015.0,
- .dispclk_mhz = 1015.0,
- .phyclk_mhz = 810.0,
- },
-
- {
- .state = 3,
- .dram_speed_mts = 4267.0,
- .fabricclk_mhz = 1067,
- .socclk_mhz = 715.0,
- .dcfclk_mhz = 676.0,
- .dscclk_mhz = 338.0,
- .dppclk_mhz = 1015.0,
- .dispclk_mhz = 1015.0,
- .phyclk_mhz = 810.0,
- },
-
- {
- .state = 4,
- .dram_speed_mts = 4267.0,
- .fabricclk_mhz = 1067,
- .socclk_mhz = 953.0,
- .dcfclk_mhz = 810.0,
- .dscclk_mhz = 338.0,
- .dppclk_mhz = 1015.0,
- .dispclk_mhz = 1015.0,
- .phyclk_mhz = 810.0,
- },
- },
-
- .sr_exit_time_us = 9.0,
- .sr_enter_plus_exit_time_us = 11.0,
- .urgent_latency_us = 4.0,
- .urgent_latency_pixel_data_only_us = 4.0,
- .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
- .urgent_latency_vm_data_only_us = 4.0,
- .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
- .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
- .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
- .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 80.0,
- .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 75.0,
- .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0,
- .max_avg_sdp_bw_use_normal_percent = 60.0,
- .max_avg_dram_bw_use_normal_percent = 60.0,
- .writeback_latency_us = 12.0,
- .max_request_size_bytes = 256,
- .dram_channel_width_bytes = 4,
- .fabric_datapath_to_dcn_data_return_bytes = 32,
- .dcn_downspread_percent = 0.5,
- .downspread_percent = 0.38,
- .dram_page_open_time_ns = 50.0,
- .dram_rw_turnaround_time_ns = 17.5,
- .dram_return_buffer_per_channel_bytes = 8192,
- .round_trip_ping_latency_dcfclk_cycles = 191,
- .urgent_out_of_order_return_per_channel_bytes = 4096,
- .channel_interleave_bytes = 256,
- .num_banks = 8,
- .num_chans = 4,
- .gpuvm_min_page_size_bytes = 4096,
- .hostvm_min_page_size_bytes = 4096,
- .dram_clock_change_latency_us = 23.84,
- .writeback_dram_clock_change_latency_us = 23.0,
- .return_bus_width_bytes = 64,
- .dispclk_dppclk_vco_speed_mhz = 3550,
- .xfc_bus_transport_time_us = 20, // ?
- .xfc_xbuf_latency_tolerance_us = 4, // ?
- .use_urgent_burst_bw = 1, // ?
- .num_states = 5,
- .do_urgent_latency_adjustment = false,
- .urgent_latency_adjustment_fabric_clock_component_us = 0,
- .urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
-};
-
enum dcn301_clk_src_array_id {
DCN301_CLK_SRC_PLL0,
DCN301_CLK_SRC_PLL1,
@@ -1195,8 +1018,12 @@ struct stream_encoder *dcn301_stream_encoder_create(
vpg = dcn301_vpg_create(ctx, vpg_inst);
afmt = dcn301_afmt_create(ctx, afmt_inst);
- if (!enc1 || !vpg || !afmt)
+ if (!enc1 || !vpg || !afmt) {
+ kfree(enc1);
+ kfree(vpg);
+ kfree(afmt);
return NULL;
+ }
dcn30_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios,
eng_id, vpg, afmt,
@@ -1476,8 +1303,6 @@ static struct dc_cap_funcs cap_funcs = {
.get_dcc_compression_cap = dcn20_get_dcc_compression_cap
};
-#define fixed16_to_double(x) (((double) x) / ((double) (1 << 16)))
-#define fixed16_to_double_to_cpu(x) fixed16_to_double(le32_to_cpu(x))
static bool is_soc_bounding_box_valid(struct dc *dc)
{
@@ -1504,26 +1329,24 @@ static bool init_soc_bounding_box(struct dc *dc,
loaded_ip->max_num_otg = pool->base.res_cap->num_timing_generator;
loaded_ip->max_num_dpp = pool->base.pipe_count;
+ DC_FP_START();
dcn20_patch_bounding_box(dc, loaded_bb);
+ DC_FP_END();
if (dc->ctx->dc_bios->funcs->get_soc_bb_info) {
struct bp_soc_bb_info bb_info = {0};
if (dc->ctx->dc_bios->funcs->get_soc_bb_info(dc->ctx->dc_bios, &bb_info) == BP_RESULT_OK) {
- if (bb_info.dram_clock_change_latency_100ns > 0)
- dcn3_01_soc.dram_clock_change_latency_us = bb_info.dram_clock_change_latency_100ns * 10;
-
- if (bb_info.dram_sr_enter_exit_latency_100ns > 0)
- dcn3_01_soc.sr_enter_plus_exit_time_us = bb_info.dram_sr_enter_exit_latency_100ns * 10;
-
- if (bb_info.dram_sr_exit_latency_100ns > 0)
- dcn3_01_soc.sr_exit_time_us = bb_info.dram_sr_exit_latency_100ns * 10;
+ DC_FP_START();
+ dcn301_fpu_init_soc_bounding_box(bb_info);
+ DC_FP_END();
}
}
return true;
}
+
static void set_wm_ranges(
struct pp_smu_funcs *pp_smu,
struct _vcs_dpi_soc_bounding_box_st *loaded_bb)
@@ -1546,9 +1369,9 @@ static void set_wm_ranges(
ranges.reader_wm_sets[i].wm_inst = i;
ranges.reader_wm_sets[i].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
ranges.reader_wm_sets[i].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
- ranges.reader_wm_sets[i].min_fill_clk_mhz = (i > 0) ? (loaded_bb->clock_limits[i - 1].dram_speed_mts / 16) + 1 : 0;
- ranges.reader_wm_sets[i].max_fill_clk_mhz = loaded_bb->clock_limits[i].dram_speed_mts / 16;
-
+ DC_FP_START();
+ dcn301_fpu_set_wm_ranges(i, &ranges, loaded_bb);
+ DC_FP_END();
ranges.num_reader_wm_sets = i + 1;
}
@@ -1568,66 +1391,12 @@ static void set_wm_ranges(
pp_smu->nv_funcs.set_wm_ranges(&pp_smu->nv_funcs.pp_smu, &ranges);
}
-static void dcn301_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
-{
- struct dcn301_resource_pool *pool = TO_DCN301_RES_POOL(dc->res_pool);
- struct clk_limit_table *clk_table = &bw_params->clk_table;
- struct _vcs_dpi_voltage_scaling_st clock_limits[DC__VOLTAGE_STATES];
- unsigned int i, closest_clk_lvl;
- int j;
-
- // Default clock levels are used for diags, which may lead to overclocking.
- if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
- dcn3_01_ip.max_num_otg = pool->base.res_cap->num_timing_generator;
- dcn3_01_ip.max_num_dpp = pool->base.pipe_count;
- dcn3_01_soc.num_chans = bw_params->num_channels;
-
- ASSERT(clk_table->num_entries);
- for (i = 0; i < clk_table->num_entries; i++) {
- /* loop backwards*/
- for (closest_clk_lvl = 0, j = dcn3_01_soc.num_states - 1; j >= 0; j--) {
- if ((unsigned int) dcn3_01_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) {
- closest_clk_lvl = j;
- break;
- }
- }
-
- clock_limits[i].state = i;
- clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
- clock_limits[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
- clock_limits[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
- clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2;
-
- clock_limits[i].dispclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
- clock_limits[i].dppclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
- clock_limits[i].dram_bw_per_chan_gbps = dcn3_01_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
- clock_limits[i].dscclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
- clock_limits[i].dtbclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
- clock_limits[i].phyclk_d18_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
- clock_limits[i].phyclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
- }
- for (i = 0; i < clk_table->num_entries; i++)
- dcn3_01_soc.clock_limits[i] = clock_limits[i];
- if (clk_table->num_entries) {
- dcn3_01_soc.num_states = clk_table->num_entries;
- /* duplicate last level */
- dcn3_01_soc.clock_limits[dcn3_01_soc.num_states] = dcn3_01_soc.clock_limits[dcn3_01_soc.num_states - 1];
- dcn3_01_soc.clock_limits[dcn3_01_soc.num_states].state = dcn3_01_soc.num_states;
- }
- }
-
- dcn3_01_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
- dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
-
- dml_init_instance(&dc->dml, &dcn3_01_soc, &dcn3_01_ip, DML_PROJECT_DCN30);
-}
-
static struct resource_funcs dcn301_res_pool_funcs = {
.destroy = dcn301_destroy_resource_pool,
.link_enc_create = dcn301_link_encoder_create,
.panel_cntl_create = dcn301_panel_cntl_create,
.validate_bandwidth = dcn30_validate_bandwidth,
- .calculate_wm_and_dlg = dcn30_calculate_wm_and_dlg,
+ .calculate_wm_and_dlg = dcn301_calculate_wm_and_dlg,
.update_soc_for_wm_a = dcn30_update_soc_for_wm_a,
.populate_dml_pipes = dcn30_populate_dml_pipes_from_context,
.acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.h b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.h
index 17e4e91ff4b8..ae8672680cdd 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.h
@@ -32,6 +32,9 @@ struct dc;
struct resource_pool;
struct _vcs_dpi_display_pipe_params_st;
+extern struct _vcs_dpi_ip_params_st dcn3_01_ip;
+extern struct _vcs_dpi_soc_bounding_box_st dcn3_01_soc;
+
struct dcn301_resource_pool {
struct resource_pool base;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
index 7d3ff5d44402..fcf96cf08c76 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c
@@ -542,8 +542,12 @@ static struct stream_encoder *dcn302_stream_encoder_create(enum engine_id eng_id
vpg = dcn302_vpg_create(ctx, vpg_inst);
afmt = dcn302_afmt_create(ctx, afmt_inst);
- if (!enc1 || !vpg || !afmt)
+ if (!enc1 || !vpg || !afmt) {
+ kfree(enc1);
+ kfree(vpg);
+ kfree(afmt);
return NULL;
+ }
dcn30_dio_stream_encoder_construct(enc1, ctx, ctx->dc_bios, eng_id, vpg, afmt, &stream_enc_regs[eng_id],
&se_shift, &se_mask);
@@ -1462,7 +1466,7 @@ static const struct dccg_mask dccg_mask = {
};
#define abm_regs(id)\
- [id] = { ABM_DCN301_REG_LIST(id) }
+ [id] = { ABM_DCN302_REG_LIST(id) }
static const struct dce_abm_registers abm_regs[] = {
abm_regs(0),
diff --git a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
index dd38796ba30a..4a9b64023675 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn303/dcn303_resource.c
@@ -1344,6 +1344,20 @@ void dcn303_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
dcn3_03_soc.clock_limits[i].phyclk_d18_mhz = dcn3_03_soc.clock_limits[0].phyclk_d18_mhz;
dcn3_03_soc.clock_limits[i].dscclk_mhz = dcn3_03_soc.clock_limits[0].dscclk_mhz;
}
+
+ // WA: patch strobe modes to compensate for DCN303 BW issue
+ if (dcn3_03_soc.num_chans <= 4) {
+ for (i = 0; i < dcn3_03_soc.num_states; i++) {
+ if (dcn3_03_soc.clock_limits[i].dram_speed_mts > 1700)
+ break;
+
+ if (dcn3_03_soc.clock_limits[i].dram_speed_mts >= 1500) {
+ dcn3_03_soc.clock_limits[i].dcfclk_mhz = 100;
+ dcn3_03_soc.clock_limits[i].fabricclk_mhz = 100;
+ }
+ }
+ }
+
/* re-init DML with updated bb */
dml_init_instance(&dc->dml, &dcn3_03_soc, &dcn3_03_ip, DML_PROJECT_DCN30);
if (dc->current_state)
@@ -1394,7 +1408,7 @@ static const struct dccg_mask dccg_mask = {
};
#define abm_regs(id)\
- [id] = { ABM_DCN301_REG_LIST(id) }
+ [id] = { ABM_DCN302_REG_LIST(id) }
static const struct dce_abm_registers abm_regs[] = {
abm_regs(0),
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/Makefile b/drivers/gpu/drm/amd/display/dc/dcn31/Makefile
index 4bab97acb155..d20e3b8ccc30 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/Makefile
@@ -11,7 +11,9 @@
# Makefile for dcn31.
DCN31 = dcn31_resource.o dcn31_hubbub.o dcn31_hwseq.o dcn31_init.o dcn31_hubp.o \
- dcn31_dccg.o dcn31_optc.o dcn31_dio_link_encoder.o dcn31_panel_cntl.o
+ dcn31_dccg.o dcn31_optc.o dcn31_dio_link_encoder.o dcn31_panel_cntl.o \
+ dcn31_apg.o dcn31_hpo_dp_stream_encoder.o dcn31_hpo_dp_link_encoder.o \
+ dcn31_afmt.o dcn31_vpg.o
ifdef CONFIG_X86
CFLAGS_$(AMDDALPATH)/dc/dcn31/dcn31_resource.o := -msse
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_afmt.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_afmt.c
new file mode 100644
index 000000000000..d380a8ec2184
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_afmt.c
@@ -0,0 +1,92 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dc_bios_types.h"
+#include "hw_shared.h"
+#include "dcn30/dcn30_afmt.h"
+#include "dcn31_afmt.h"
+#include "reg_helper.h"
+#include "dc/dc.h"
+
+#define DC_LOGGER \
+ afmt31->base.ctx->logger
+
+#define REG(reg)\
+ (afmt31->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ afmt31->afmt_shift->field_name, afmt31->afmt_mask->field_name
+
+
+#define CTX \
+ afmt31->base.ctx
+
+static struct afmt_funcs dcn31_afmt_funcs = {
+ .setup_hdmi_audio = afmt3_setup_hdmi_audio,
+ .se_audio_setup = afmt3_se_audio_setup,
+ .audio_mute_control = afmt3_audio_mute_control,
+ .audio_info_immediate_update = afmt3_audio_info_immediate_update,
+ .setup_dp_audio = afmt3_setup_dp_audio,
+ .afmt_powerdown = afmt31_powerdown,
+ .afmt_poweron = afmt31_poweron
+};
+
+void afmt31_powerdown(struct afmt *afmt)
+{
+ struct dcn31_afmt *afmt31 = DCN31_AFMT_FROM_AFMT(afmt);
+
+ if (afmt->ctx->dc->debug.enable_mem_low_power.bits.afmt == false)
+ return;
+
+ REG_UPDATE_2(AFMT_MEM_PWR, AFMT_MEM_PWR_DIS, 0, AFMT_MEM_PWR_FORCE, 1);
+}
+
+void afmt31_poweron(struct afmt *afmt)
+{
+ struct dcn31_afmt *afmt31 = DCN31_AFMT_FROM_AFMT(afmt);
+
+ if (afmt->ctx->dc->debug.enable_mem_low_power.bits.afmt == false)
+ return;
+
+ REG_UPDATE_2(AFMT_MEM_PWR, AFMT_MEM_PWR_DIS, 1, AFMT_MEM_PWR_FORCE, 0);
+}
+
+void afmt31_construct(struct dcn31_afmt *afmt31,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn31_afmt_registers *afmt_regs,
+ const struct dcn31_afmt_shift *afmt_shift,
+ const struct dcn31_afmt_mask *afmt_mask)
+{
+ afmt31->base.ctx = ctx;
+
+ afmt31->base.inst = inst;
+ afmt31->base.funcs = &dcn31_afmt_funcs;
+
+ afmt31->regs = afmt_regs;
+ afmt31->afmt_shift = afmt_shift;
+ afmt31->afmt_mask = afmt_mask;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_afmt.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_afmt.h
new file mode 100644
index 000000000000..802cb05b6ab9
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_afmt.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_DCN31_AFMT_H__
+#define __DAL_DCN31_AFMT_H__
+
+
+#define DCN31_AFMT_FROM_AFMT(afmt)\
+ container_of(afmt, struct dcn31_afmt, base)
+
+#define AFMT_DCN31_REG_LIST(id) \
+ SRI(AFMT_INFOFRAME_CONTROL0, AFMT, id), \
+ SRI(AFMT_VBI_PACKET_CONTROL, AFMT, id), \
+ SRI(AFMT_AUDIO_PACKET_CONTROL, AFMT, id), \
+ SRI(AFMT_AUDIO_PACKET_CONTROL2, AFMT, id), \
+ SRI(AFMT_AUDIO_SRC_CONTROL, AFMT, id), \
+ SRI(AFMT_60958_0, AFMT, id), \
+ SRI(AFMT_60958_1, AFMT, id), \
+ SRI(AFMT_60958_2, AFMT, id), \
+ SRI(AFMT_MEM_PWR, AFMT, id)
+
+struct dcn31_afmt_registers {
+ uint32_t AFMT_INFOFRAME_CONTROL0;
+ uint32_t AFMT_VBI_PACKET_CONTROL;
+ uint32_t AFMT_AUDIO_PACKET_CONTROL;
+ uint32_t AFMT_AUDIO_PACKET_CONTROL2;
+ uint32_t AFMT_AUDIO_SRC_CONTROL;
+ uint32_t AFMT_60958_0;
+ uint32_t AFMT_60958_1;
+ uint32_t AFMT_60958_2;
+ uint32_t AFMT_MEM_PWR;
+};
+
+#define DCN31_AFMT_MASK_SH_LIST(mask_sh)\
+ SE_SF(AFMT0_AFMT_INFOFRAME_CONTROL0, AFMT_AUDIO_INFO_UPDATE, mask_sh),\
+ SE_SF(AFMT0_AFMT_AUDIO_SRC_CONTROL, AFMT_AUDIO_SRC_SELECT, mask_sh),\
+ SE_SF(AFMT0_AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_CHANNEL_ENABLE, mask_sh),\
+ SE_SF(AFMT0_AFMT_AUDIO_PACKET_CONTROL, AFMT_60958_CS_UPDATE, mask_sh),\
+ SE_SF(AFMT0_AFMT_AUDIO_PACKET_CONTROL2, AFMT_AUDIO_LAYOUT_OVRD, mask_sh),\
+ SE_SF(AFMT0_AFMT_AUDIO_PACKET_CONTROL2, AFMT_60958_OSF_OVRD, mask_sh),\
+ SE_SF(AFMT0_AFMT_60958_0, AFMT_60958_CS_CHANNEL_NUMBER_L, mask_sh),\
+ SE_SF(AFMT0_AFMT_60958_0, AFMT_60958_CS_CLOCK_ACCURACY, mask_sh),\
+ SE_SF(AFMT0_AFMT_60958_1, AFMT_60958_CS_CHANNEL_NUMBER_R, mask_sh),\
+ SE_SF(AFMT0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_2, mask_sh),\
+ SE_SF(AFMT0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_3, mask_sh),\
+ SE_SF(AFMT0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_4, mask_sh),\
+ SE_SF(AFMT0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_5, mask_sh),\
+ SE_SF(AFMT0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_6, mask_sh),\
+ SE_SF(AFMT0_AFMT_60958_2, AFMT_60958_CS_CHANNEL_NUMBER_7, mask_sh),\
+ SE_SF(AFMT0_AFMT_AUDIO_PACKET_CONTROL, AFMT_AUDIO_SAMPLE_SEND, mask_sh),\
+ SE_SF(AFMT0_AFMT_MEM_PWR, AFMT_MEM_PWR_FORCE, mask_sh),\
+ SE_SF(AFMT0_AFMT_MEM_PWR, AFMT_MEM_PWR_DIS, mask_sh),\
+ SE_SF(AFMT0_AFMT_MEM_PWR, AFMT_MEM_PWR_STATE, mask_sh)
+
+#define AFMT_DCN31_REG_FIELD_LIST(type) \
+ type AFMT_AUDIO_INFO_UPDATE;\
+ type AFMT_AUDIO_SRC_SELECT;\
+ type AFMT_AUDIO_CHANNEL_ENABLE;\
+ type AFMT_60958_CS_UPDATE;\
+ type AFMT_AUDIO_LAYOUT_OVRD;\
+ type AFMT_60958_OSF_OVRD;\
+ type AFMT_60958_CS_CHANNEL_NUMBER_L;\
+ type AFMT_60958_CS_CLOCK_ACCURACY;\
+ type AFMT_60958_CS_CHANNEL_NUMBER_R;\
+ type AFMT_60958_CS_CHANNEL_NUMBER_2;\
+ type AFMT_60958_CS_CHANNEL_NUMBER_3;\
+ type AFMT_60958_CS_CHANNEL_NUMBER_4;\
+ type AFMT_60958_CS_CHANNEL_NUMBER_5;\
+ type AFMT_60958_CS_CHANNEL_NUMBER_6;\
+ type AFMT_60958_CS_CHANNEL_NUMBER_7;\
+ type AFMT_AUDIO_SAMPLE_SEND;\
+ type AFMT_MEM_PWR_FORCE;\
+ type AFMT_MEM_PWR_DIS;\
+ type AFMT_MEM_PWR_STATE
+
+struct dcn31_afmt_shift {
+ AFMT_DCN31_REG_FIELD_LIST(uint8_t);
+};
+
+struct dcn31_afmt_mask {
+ AFMT_DCN31_REG_FIELD_LIST(uint32_t);
+};
+
+struct dcn31_afmt {
+ struct afmt base;
+ const struct dcn31_afmt_registers *regs;
+ const struct dcn31_afmt_shift *afmt_shift;
+ const struct dcn31_afmt_mask *afmt_mask;
+};
+
+void afmt31_poweron(
+ struct afmt *afmt);
+
+void afmt31_powerdown(
+ struct afmt *afmt);
+
+void afmt31_construct(struct dcn31_afmt *afmt31,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn31_afmt_registers *afmt_regs,
+ const struct dcn31_afmt_shift *afmt_shift,
+ const struct dcn31_afmt_mask *afmt_mask);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.c
new file mode 100644
index 000000000000..de5e18c2a3ac
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.c
@@ -0,0 +1,173 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+
+#include "dc_bios_types.h"
+#include "hw_shared.h"
+#include "dcn31_apg.h"
+#include "reg_helper.h"
+
+#define DC_LOGGER \
+ apg31->base.ctx->logger
+
+#define REG(reg)\
+ (apg31->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ apg31->apg_shift->field_name, apg31->apg_mask->field_name
+
+
+#define CTX \
+ apg31->base.ctx
+
+
+static void apg31_enable(
+ struct apg *apg)
+{
+ struct dcn31_apg *apg31 = DCN31_APG_FROM_APG(apg);
+
+ /* Reset APG */
+ REG_UPDATE(APG_CONTROL, APG_RESET, 1);
+ REG_WAIT(APG_CONTROL,
+ APG_RESET_DONE, 1,
+ 1, 10);
+ REG_UPDATE(APG_CONTROL, APG_RESET, 0);
+ REG_WAIT(APG_CONTROL,
+ APG_RESET_DONE, 0,
+ 1, 10);
+
+ /* Enable APG */
+ REG_UPDATE(APG_CONTROL2, APG_ENABLE, 1);
+}
+
+static void apg31_disable(
+ struct apg *apg)
+{
+ struct dcn31_apg *apg31 = DCN31_APG_FROM_APG(apg);
+
+ /* Disable APG */
+ REG_UPDATE(APG_CONTROL2, APG_ENABLE, 0);
+}
+
+static union audio_cea_channels speakers_to_channels(
+ struct audio_speaker_flags speaker_flags)
+{
+ union audio_cea_channels cea_channels = {0};
+
+ /* these are one to one */
+ cea_channels.channels.FL = speaker_flags.FL_FR;
+ cea_channels.channels.FR = speaker_flags.FL_FR;
+ cea_channels.channels.LFE = speaker_flags.LFE;
+ cea_channels.channels.FC = speaker_flags.FC;
+
+ /* if Rear Left and Right exist move RC speaker to channel 7
+ * otherwise to channel 5
+ */
+ if (speaker_flags.RL_RR) {
+ cea_channels.channels.RL_RC = speaker_flags.RL_RR;
+ cea_channels.channels.RR = speaker_flags.RL_RR;
+ cea_channels.channels.RC_RLC_FLC = speaker_flags.RC;
+ } else {
+ cea_channels.channels.RL_RC = speaker_flags.RC;
+ }
+
+ /* FRONT Left Right Center and REAR Left Right Center are exclusive */
+ if (speaker_flags.FLC_FRC) {
+ cea_channels.channels.RC_RLC_FLC = speaker_flags.FLC_FRC;
+ cea_channels.channels.RRC_FRC = speaker_flags.FLC_FRC;
+ } else {
+ cea_channels.channels.RC_RLC_FLC = speaker_flags.RLC_RRC;
+ cea_channels.channels.RRC_FRC = speaker_flags.RLC_RRC;
+ }
+
+ return cea_channels;
+}
+
+static void apg31_se_audio_setup(
+ struct apg *apg,
+ unsigned int az_inst,
+ struct audio_info *audio_info)
+{
+ struct dcn31_apg *apg31 = DCN31_APG_FROM_APG(apg);
+
+ uint32_t speakers = 0;
+ uint32_t channels = 0;
+
+ ASSERT(audio_info);
+ /* This should not happen.it does so we don't get BSOD*/
+ if (audio_info == NULL)
+ return;
+
+ speakers = audio_info->flags.info.ALLSPEAKERS;
+ channels = speakers_to_channels(audio_info->flags.speaker_flags).all;
+
+ /* DisplayPort only allows for one audio stream with stream ID 0 */
+ REG_UPDATE(APG_CONTROL2, APG_DP_AUDIO_STREAM_ID, 0);
+
+ /* When running in "pair mode", pairs of audio channels have their own enable
+ * this is for really old audio drivers */
+ REG_UPDATE(APG_DBG_GEN_CONTROL, APG_DBG_AUDIO_CHANNEL_ENABLE, 0xFF);
+ // REG_UPDATE(APG_DBG_GEN_CONTROL, APG_DBG_AUDIO_CHANNEL_ENABLE, channels);
+
+ /* Disable forced mem power off */
+ REG_UPDATE(APG_MEM_PWR, APG_MEM_PWR_FORCE, 0);
+
+ apg31_enable(apg);
+}
+
+static void apg31_audio_mute_control(
+ struct apg *apg,
+ bool mute)
+{
+ if (mute)
+ apg31_disable(apg);
+ else
+ apg31_enable(apg);
+}
+
+static struct apg_funcs dcn31_apg_funcs = {
+ .se_audio_setup = apg31_se_audio_setup,
+ .audio_mute_control = apg31_audio_mute_control,
+ .enable_apg = apg31_enable,
+ .disable_apg = apg31_disable,
+};
+
+void apg31_construct(struct dcn31_apg *apg31,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn31_apg_registers *apg_regs,
+ const struct dcn31_apg_shift *apg_shift,
+ const struct dcn31_apg_mask *apg_mask)
+{
+ apg31->base.ctx = ctx;
+
+ apg31->base.inst = inst;
+ apg31->base.funcs = &dcn31_apg_funcs;
+
+ apg31->regs = apg_regs;
+ apg31->apg_shift = apg_shift;
+ apg31->apg_mask = apg_mask;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.h
new file mode 100644
index 000000000000..24f568e120d8
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_apg.h
@@ -0,0 +1,115 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_DCN31_AGP_H__
+#define __DAL_DCN31_AGP_H__
+
+
+#define DCN31_APG_FROM_APG(apg)\
+ container_of(apg, struct dcn31_apg, base)
+
+#define APG_DCN31_REG_LIST(id) \
+ SRI(APG_CONTROL, APG, id), \
+ SRI(APG_CONTROL2, APG, id),\
+ SRI(APG_MEM_PWR, APG, id),\
+ SRI(APG_DBG_GEN_CONTROL, APG, id)
+
+struct dcn31_apg_registers {
+ uint32_t APG_CONTROL;
+ uint32_t APG_CONTROL2;
+ uint32_t APG_MEM_PWR;
+ uint32_t APG_DBG_GEN_CONTROL;
+};
+
+
+#define DCN31_APG_MASK_SH_LIST(mask_sh)\
+ SE_SF(APG0_APG_CONTROL, APG_RESET, mask_sh),\
+ SE_SF(APG0_APG_CONTROL, APG_RESET_DONE, mask_sh),\
+ SE_SF(APG0_APG_CONTROL2, APG_ENABLE, mask_sh),\
+ SE_SF(APG0_APG_CONTROL2, APG_DP_AUDIO_STREAM_ID, mask_sh),\
+ SE_SF(APG0_APG_DBG_GEN_CONTROL, APG_DBG_AUDIO_CHANNEL_ENABLE, mask_sh),\
+ SE_SF(APG0_APG_MEM_PWR, APG_MEM_PWR_FORCE, mask_sh)
+
+#define APG_DCN31_REG_FIELD_LIST(type) \
+ type APG_RESET;\
+ type APG_RESET_DONE;\
+ type APG_ENABLE;\
+ type APG_DP_AUDIO_STREAM_ID;\
+ type APG_DBG_AUDIO_CHANNEL_ENABLE;\
+ type APG_MEM_PWR_FORCE
+
+struct dcn31_apg_shift {
+ APG_DCN31_REG_FIELD_LIST(uint8_t);
+};
+
+struct dcn31_apg_mask {
+ APG_DCN31_REG_FIELD_LIST(uint32_t);
+};
+
+struct apg {
+ const struct apg_funcs *funcs;
+ struct dc_context *ctx;
+ int inst;
+};
+
+struct apg_funcs {
+
+ void (*setup_hdmi_audio)(
+ struct apg *apg);
+
+ void (*se_audio_setup)(
+ struct apg *apg,
+ unsigned int az_inst,
+ struct audio_info *audio_info);
+
+ void (*audio_mute_control)(
+ struct apg *apg,
+ bool mute);
+
+ void (*enable_apg)(
+ struct apg *apg);
+
+ void (*disable_apg)(
+ struct apg *apg);
+};
+
+
+
+struct dcn31_apg {
+ struct apg base;
+ const struct dcn31_apg_registers *regs;
+ const struct dcn31_apg_shift *apg_shift;
+ const struct dcn31_apg_mask *apg_mask;
+};
+
+void apg31_construct(struct dcn31_apg *apg3,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn31_apg_registers *apg_regs,
+ const struct dcn31_apg_shift *apg_shift,
+ const struct dcn31_apg_mask *apg_mask);
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c
index 696c9307715d..815481a3ef54 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c
@@ -26,6 +26,7 @@
#include "reg_helper.h"
#include "core_types.h"
#include "dcn31_dccg.h"
+#include "dal_asic_id.h"
#define TO_DCN_DCCG(dccg)\
container_of(dccg, struct dcn_dccg, base)
@@ -42,6 +43,358 @@
#define DC_LOGGER \
dccg->ctx->logger
+static void dccg31_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ if (dccg->ref_dppclk && req_dppclk) {
+ int ref_dppclk = dccg->ref_dppclk;
+ int modulo, phase;
+
+ // phase / modulo = dpp pipe clk / dpp global clk
+ modulo = 0xff; // use FF at the end
+ phase = ((modulo * req_dppclk) + ref_dppclk - 1) / ref_dppclk;
+
+ if (phase > 0xff) {
+ ASSERT(false);
+ phase = 0xff;
+ }
+
+ REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
+ DPPCLK0_DTO_PHASE, phase,
+ DPPCLK0_DTO_MODULO, modulo);
+ REG_UPDATE(DPPCLK_DTO_CTRL,
+ DPPCLK_DTO_ENABLE[dpp_inst], 1);
+ } else {
+ //DTO must be enabled to generate a 0Hz clock output
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpp) {
+ REG_UPDATE(DPPCLK_DTO_CTRL,
+ DPPCLK_DTO_ENABLE[dpp_inst], 1);
+ REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0,
+ DPPCLK0_DTO_PHASE, 0,
+ DPPCLK0_DTO_MODULO, 1);
+ } else {
+ REG_UPDATE(DPPCLK_DTO_CTRL,
+ DPPCLK_DTO_ENABLE[dpp_inst], 0);
+ }
+ }
+ dccg->pipe_dppclk_khz[dpp_inst] = req_dppclk;
+}
+
+static enum phyd32clk_clock_source get_phy_mux_symclk(
+ struct dcn_dccg *dccg_dcn,
+ enum phyd32clk_clock_source src)
+{
+ if (dccg_dcn->base.ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) {
+ if (src == PHYD32CLKC)
+ src = PHYD32CLKF;
+ if (src == PHYD32CLKD)
+ src = PHYD32CLKG;
+ }
+ return src;
+}
+
+static void dccg31_enable_dpstreamclk(struct dccg *dccg, int otg_inst)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ /* enabled to select one of the DTBCLKs for pipe */
+ switch (otg_inst) {
+ case 0:
+ REG_UPDATE(DPSTREAMCLK_CNTL,
+ DPSTREAMCLK_PIPE0_EN, 1);
+ break;
+ case 1:
+ REG_UPDATE(DPSTREAMCLK_CNTL,
+ DPSTREAMCLK_PIPE1_EN, 1);
+ break;
+ case 2:
+ REG_UPDATE(DPSTREAMCLK_CNTL,
+ DPSTREAMCLK_PIPE2_EN, 1);
+ break;
+ case 3:
+ REG_UPDATE(DPSTREAMCLK_CNTL,
+ DPSTREAMCLK_PIPE3_EN, 1);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+ DPSTREAMCLK_ROOT_GATE_DISABLE, 1);
+}
+
+static void dccg31_disable_dpstreamclk(struct dccg *dccg, int otg_inst)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+ DPSTREAMCLK_ROOT_GATE_DISABLE, 0);
+
+ switch (otg_inst) {
+ case 0:
+ REG_UPDATE(DPSTREAMCLK_CNTL,
+ DPSTREAMCLK_PIPE0_EN, 0);
+ break;
+ case 1:
+ REG_UPDATE(DPSTREAMCLK_CNTL,
+ DPSTREAMCLK_PIPE1_EN, 0);
+ break;
+ case 2:
+ REG_UPDATE(DPSTREAMCLK_CNTL,
+ DPSTREAMCLK_PIPE2_EN, 0);
+ break;
+ case 3:
+ REG_UPDATE(DPSTREAMCLK_CNTL,
+ DPSTREAMCLK_PIPE3_EN, 0);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+}
+
+void dccg31_set_dpstreamclk(
+ struct dccg *dccg,
+ enum hdmistreamclk_source src,
+ int otg_inst)
+{
+ if (src == REFCLK)
+ dccg31_disable_dpstreamclk(dccg, otg_inst);
+ else
+ dccg31_enable_dpstreamclk(dccg, otg_inst);
+}
+
+void dccg31_enable_symclk32_se(
+ struct dccg *dccg,
+ int hpo_se_inst,
+ enum phyd32clk_clock_source phyd32clk)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ phyd32clk = get_phy_mux_symclk(dccg_dcn, phyd32clk);
+
+ /* select one of the PHYD32CLKs as the source for symclk32_se */
+ switch (hpo_se_inst) {
+ case 0:
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_ROOT_SE0_GATE_DISABLE, 1);
+ REG_UPDATE_2(SYMCLK32_SE_CNTL,
+ SYMCLK32_SE0_SRC_SEL, phyd32clk,
+ SYMCLK32_SE0_EN, 1);
+ break;
+ case 1:
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_ROOT_SE1_GATE_DISABLE, 1);
+ REG_UPDATE_2(SYMCLK32_SE_CNTL,
+ SYMCLK32_SE1_SRC_SEL, phyd32clk,
+ SYMCLK32_SE1_EN, 1);
+ break;
+ case 2:
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_ROOT_SE2_GATE_DISABLE, 1);
+ REG_UPDATE_2(SYMCLK32_SE_CNTL,
+ SYMCLK32_SE2_SRC_SEL, phyd32clk,
+ SYMCLK32_SE2_EN, 1);
+ break;
+ case 3:
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_ROOT_SE3_GATE_DISABLE, 1);
+ REG_UPDATE_2(SYMCLK32_SE_CNTL,
+ SYMCLK32_SE3_SRC_SEL, phyd32clk,
+ SYMCLK32_SE3_EN, 1);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+}
+
+void dccg31_disable_symclk32_se(
+ struct dccg *dccg,
+ int hpo_se_inst)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ /* set refclk as the source for symclk32_se */
+ switch (hpo_se_inst) {
+ case 0:
+ REG_UPDATE_2(SYMCLK32_SE_CNTL,
+ SYMCLK32_SE0_SRC_SEL, 0,
+ SYMCLK32_SE0_EN, 0);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_ROOT_SE0_GATE_DISABLE, 0);
+ break;
+ case 1:
+ REG_UPDATE_2(SYMCLK32_SE_CNTL,
+ SYMCLK32_SE1_SRC_SEL, 0,
+ SYMCLK32_SE1_EN, 0);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_ROOT_SE1_GATE_DISABLE, 0);
+ break;
+ case 2:
+ REG_UPDATE_2(SYMCLK32_SE_CNTL,
+ SYMCLK32_SE2_SRC_SEL, 0,
+ SYMCLK32_SE2_EN, 0);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_ROOT_SE2_GATE_DISABLE, 0);
+ break;
+ case 3:
+ REG_UPDATE_2(SYMCLK32_SE_CNTL,
+ SYMCLK32_SE3_SRC_SEL, 0,
+ SYMCLK32_SE3_EN, 0);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_ROOT_SE3_GATE_DISABLE, 0);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+}
+
+void dccg31_enable_symclk32_le(
+ struct dccg *dccg,
+ int hpo_le_inst,
+ enum phyd32clk_clock_source phyd32clk)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ phyd32clk = get_phy_mux_symclk(dccg_dcn, phyd32clk);
+
+ /* select one of the PHYD32CLKs as the source for symclk32_le */
+ switch (hpo_le_inst) {
+ case 0:
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_ROOT_LE0_GATE_DISABLE, 1);
+ REG_UPDATE_2(SYMCLK32_LE_CNTL,
+ SYMCLK32_LE0_SRC_SEL, phyd32clk,
+ SYMCLK32_LE0_EN, 1);
+ break;
+ case 1:
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_ROOT_LE1_GATE_DISABLE, 1);
+ REG_UPDATE_2(SYMCLK32_LE_CNTL,
+ SYMCLK32_LE1_SRC_SEL, phyd32clk,
+ SYMCLK32_LE1_EN, 1);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+}
+
+void dccg31_disable_symclk32_le(
+ struct dccg *dccg,
+ int hpo_le_inst)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ /* set refclk as the source for symclk32_le */
+ switch (hpo_le_inst) {
+ case 0:
+ REG_UPDATE_2(SYMCLK32_LE_CNTL,
+ SYMCLK32_LE0_SRC_SEL, 0,
+ SYMCLK32_LE0_EN, 0);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_ROOT_LE0_GATE_DISABLE, 0);
+ break;
+ case 1:
+ REG_UPDATE_2(SYMCLK32_LE_CNTL,
+ SYMCLK32_LE1_SRC_SEL, 0,
+ SYMCLK32_LE1_EN, 0);
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le)
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL3,
+ SYMCLK32_ROOT_LE1_GATE_DISABLE, 0);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+}
+
+static void dccg31_disable_dscclk(struct dccg *dccg, int inst)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
+ return;
+ //DTO must be enabled to generate a 0 Hz clock output
+ switch (inst) {
+ case 0:
+ REG_UPDATE(DSCCLK_DTO_CTRL,
+ DSCCLK0_DTO_ENABLE, 1);
+ REG_UPDATE_2(DSCCLK0_DTO_PARAM,
+ DSCCLK0_DTO_PHASE, 0,
+ DSCCLK0_DTO_MODULO, 1);
+ break;
+ case 1:
+ REG_UPDATE(DSCCLK_DTO_CTRL,
+ DSCCLK1_DTO_ENABLE, 1);
+ REG_UPDATE_2(DSCCLK1_DTO_PARAM,
+ DSCCLK1_DTO_PHASE, 0,
+ DSCCLK1_DTO_MODULO, 1);
+ break;
+ case 2:
+ REG_UPDATE(DSCCLK_DTO_CTRL,
+ DSCCLK2_DTO_ENABLE, 1);
+ REG_UPDATE_2(DSCCLK2_DTO_PARAM,
+ DSCCLK2_DTO_PHASE, 0,
+ DSCCLK2_DTO_MODULO, 1);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+}
+
+static void dccg31_enable_dscclk(struct dccg *dccg, int inst)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
+ return;
+ //Disable DTO
+ switch (inst) {
+ case 0:
+ REG_UPDATE_2(DSCCLK0_DTO_PARAM,
+ DSCCLK0_DTO_PHASE, 0,
+ DSCCLK0_DTO_MODULO, 0);
+ REG_UPDATE(DSCCLK_DTO_CTRL,
+ DSCCLK0_DTO_ENABLE, 0);
+ break;
+ case 1:
+ REG_UPDATE_2(DSCCLK1_DTO_PARAM,
+ DSCCLK1_DTO_PHASE, 0,
+ DSCCLK1_DTO_MODULO, 0);
+ REG_UPDATE(DSCCLK_DTO_CTRL,
+ DSCCLK1_DTO_ENABLE, 0);
+ break;
+ case 2:
+ REG_UPDATE_2(DSCCLK2_DTO_PARAM,
+ DSCCLK2_DTO_PHASE, 0,
+ DSCCLK2_DTO_MODULO, 0);
+ REG_UPDATE(DSCCLK_DTO_CTRL,
+ DSCCLK2_DTO_ENABLE, 0);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+}
+
void dccg31_set_physymclk(
struct dccg *dccg,
int phy_inst,
@@ -241,16 +594,44 @@ static void dccg31_set_dispclk_change_mode(
void dccg31_init(struct dccg *dccg)
{
+ /* Set HPO stream encoder to use refclk to avoid case where PHY is
+ * disabled and SYMCLK32 for HPO SE is sourced from PHYD32CLK which
+ * will cause DCN to hang.
+ */
+ dccg31_disable_symclk32_se(dccg, 0);
+ dccg31_disable_symclk32_se(dccg, 1);
+ dccg31_disable_symclk32_se(dccg, 2);
+ dccg31_disable_symclk32_se(dccg, 3);
+
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le) {
+ dccg31_disable_symclk32_le(dccg, 0);
+ dccg31_disable_symclk32_le(dccg, 1);
+ }
+
+ if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream) {
+ dccg31_disable_dpstreamclk(dccg, 0);
+ dccg31_disable_dpstreamclk(dccg, 1);
+ dccg31_disable_dpstreamclk(dccg, 2);
+ dccg31_disable_dpstreamclk(dccg, 3);
+ }
+
}
static const struct dccg_funcs dccg31_funcs = {
- .update_dpp_dto = dccg2_update_dpp_dto,
+ .update_dpp_dto = dccg31_update_dpp_dto,
.get_dccg_ref_freq = dccg31_get_dccg_ref_freq,
.dccg_init = dccg31_init,
+ .set_dpstreamclk = dccg31_set_dpstreamclk,
+ .enable_symclk32_se = dccg31_enable_symclk32_se,
+ .disable_symclk32_se = dccg31_disable_symclk32_se,
+ .enable_symclk32_le = dccg31_enable_symclk32_le,
+ .disable_symclk32_le = dccg31_disable_symclk32_le,
.set_physymclk = dccg31_set_physymclk,
.set_dtbclk_dto = dccg31_set_dtbclk_dto,
.set_audio_dtbclk_dto = dccg31_set_audio_dtbclk_dto,
.set_dispclk_change_mode = dccg31_set_dispclk_change_mode,
+ .disable_dsc = dccg31_disable_dscclk,
+ .enable_dsc = dccg31_enable_dscclk,
};
struct dccg *dccg31_create(
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h
index 706ad80ba873..a013a32bbaf7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h
@@ -61,7 +61,13 @@
SR(DCCG_AUDIO_DTBCLK_DTO_MODULO),\
SR(DCCG_AUDIO_DTBCLK_DTO_PHASE),\
SR(DCCG_AUDIO_DTO_SOURCE),\
- SR(DENTIST_DISPCLK_CNTL)
+ SR(DENTIST_DISPCLK_CNTL),\
+ SR(DSCCLK0_DTO_PARAM),\
+ SR(DSCCLK1_DTO_PARAM),\
+ SR(DSCCLK2_DTO_PARAM),\
+ SR(DSCCLK_DTO_CTRL),\
+ SR(DCCG_GATE_DISABLE_CNTL3),\
+ SR(HDMISTREAMCLK0_DTO_PARAM)
#define DCCG_MASK_SH_LIST_DCN31(mask_sh) \
@@ -119,7 +125,26 @@
DCCG_SFII(OTG, PIXEL_RATE_CNTL, DTBCLK_DTO, DIV, 3, mask_sh),\
DCCG_SF(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO_SEL, mask_sh),\
DCCG_SF(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL, mask_sh),\
- DCCG_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_MODE, mask_sh)
+ DCCG_SF(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_MODE, mask_sh), \
+ DCCG_SF(DSCCLK0_DTO_PARAM, DSCCLK0_DTO_PHASE, mask_sh),\
+ DCCG_SF(DSCCLK0_DTO_PARAM, DSCCLK0_DTO_MODULO, mask_sh),\
+ DCCG_SF(DSCCLK1_DTO_PARAM, DSCCLK1_DTO_PHASE, mask_sh),\
+ DCCG_SF(DSCCLK1_DTO_PARAM, DSCCLK1_DTO_MODULO, mask_sh),\
+ DCCG_SF(DSCCLK2_DTO_PARAM, DSCCLK2_DTO_PHASE, mask_sh),\
+ DCCG_SF(DSCCLK2_DTO_PARAM, DSCCLK2_DTO_MODULO, mask_sh),\
+ DCCG_SF(DSCCLK_DTO_CTRL, DSCCLK0_DTO_ENABLE, mask_sh),\
+ DCCG_SF(DSCCLK_DTO_CTRL, DSCCLK1_DTO_ENABLE, mask_sh),\
+ DCCG_SF(DSCCLK_DTO_CTRL, DSCCLK2_DTO_ENABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, DPSTREAMCLK_ROOT_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, DPSTREAMCLK_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE0_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE1_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE2_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE3_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_LE0_GATE_DISABLE, mask_sh),\
+ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_LE1_GATE_DISABLE, mask_sh),\
+ DCCG_SF(HDMISTREAMCLK0_DTO_PARAM, HDMISTREAMCLK0_DTO_PHASE, mask_sh),\
+ DCCG_SF(HDMISTREAMCLK0_DTO_PARAM, HDMISTREAMCLK0_DTO_MODULO, mask_sh)
struct dccg *dccg31_create(
@@ -130,6 +155,29 @@ struct dccg *dccg31_create(
void dccg31_init(struct dccg *dccg);
+void dccg31_set_dpstreamclk(
+ struct dccg *dccg,
+ enum hdmistreamclk_source src,
+ int otg_inst);
+
+void dccg31_enable_symclk32_se(
+ struct dccg *dccg,
+ int hpo_se_inst,
+ enum phyd32clk_clock_source phyd32clk);
+
+void dccg31_disable_symclk32_se(
+ struct dccg *dccg,
+ int hpo_se_inst);
+
+void dccg31_enable_symclk32_le(
+ struct dccg *dccg,
+ int hpo_le_inst,
+ enum phyd32clk_clock_source phyd32clk);
+
+void dccg31_disable_symclk32_le(
+ struct dccg *dccg,
+ int hpo_le_inst);
+
void dccg31_set_physymclk(
struct dccg *dccg,
int phy_inst,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
index b0892443fbd5..ee6f13bef377 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c
@@ -325,6 +325,10 @@ void dcn31_link_encoder_construct(
enc10->base.features.flags.bits.IS_HBR3_CAPABLE =
bp_cap_info.DP_HBR3_EN;
enc10->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN;
+ enc10->base.features.flags.bits.IS_DP2_CAPABLE = bp_cap_info.IS_DP2_CAPABLE;
+ enc10->base.features.flags.bits.IS_UHBR10_CAPABLE = bp_cap_info.DP_UHBR10_EN;
+ enc10->base.features.flags.bits.IS_UHBR13_5_CAPABLE = bp_cap_info.DP_UHBR13_5_EN;
+ enc10->base.features.flags.bits.IS_UHBR20_CAPABLE = bp_cap_info.DP_UHBR20_EN;
enc10->base.features.flags.bits.DP_IS_USB_C =
bp_cap_info.DP_IS_USB_C;
} else {
@@ -362,19 +366,79 @@ void dcn31_link_encoder_construct_minimal(
SIGNAL_TYPE_EDP;
}
+/* DPIA equivalent of link_transmitter_control. */
+static bool link_dpia_control(struct dc_context *dc_ctx,
+ struct dmub_cmd_dig_dpia_control_data *dpia_control)
+{
+ union dmub_rb_cmd cmd;
+ struct dc_dmub_srv *dmub = dc_ctx->dmub_srv;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.dig1_dpia_control.header.type = DMUB_CMD__DPIA;
+ cmd.dig1_dpia_control.header.sub_type =
+ DMUB_CMD__DPIA_DIG1_DPIA_CONTROL;
+ cmd.dig1_dpia_control.header.payload_bytes =
+ sizeof(cmd.dig1_dpia_control) -
+ sizeof(cmd.dig1_dpia_control.header);
+
+ cmd.dig1_dpia_control.dpia_control = *dpia_control;
+
+ dc_dmub_srv_cmd_queue(dmub, &cmd);
+ dc_dmub_srv_cmd_execute(dmub);
+ dc_dmub_srv_wait_idle(dmub);
+
+ return true;
+}
+
+static void link_encoder_disable(struct dcn10_link_encoder *enc10)
+{
+ /* reset training complete */
+ REG_UPDATE(DP_LINK_CNTL, DP_LINK_TRAINING_COMPLETE, 0);
+}
+
void dcn31_link_encoder_enable_dp_output(
struct link_encoder *enc,
const struct dc_link_settings *link_settings,
enum clock_source_id clock_source)
{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+
/* Enable transmitter and encoder. */
- if (!link_enc_cfg_is_transmitter_mappable(enc->ctx->dc->current_state, enc)) {
+ if (!link_enc_cfg_is_transmitter_mappable(enc->ctx->dc, enc)) {
dcn20_link_encoder_enable_dp_output(enc, link_settings, clock_source);
} else {
- /** @todo Handle transmitter with programmable mapping to link encoder. */
+ struct dmub_cmd_dig_dpia_control_data dpia_control = { 0 };
+ struct dc_link *link;
+
+ link = link_enc_cfg_get_link_using_link_enc(enc->ctx->dc, enc->preferred_engine);
+
+ enc1_configure_encoder(enc10, link_settings);
+
+ dpia_control.action = (uint8_t)TRANSMITTER_CONTROL_ENABLE;
+ dpia_control.enc_id = enc->preferred_engine;
+ dpia_control.mode_laneset.digmode = 0; /* 0 for SST; 5 for MST */
+ dpia_control.lanenum = (uint8_t)link_settings->lane_count;
+ dpia_control.symclk_10khz = link_settings->link_rate *
+ LINK_RATE_REF_FREQ_IN_KHZ / 10;
+ /* DIG_BE_CNTL.DIG_HPD_SELECT set to 5 (hpdsel - 1) to indicate HPD pin
+ * unused by DPIA.
+ */
+ dpia_control.hpdsel = 6;
+
+ if (link) {
+ dpia_control.dpia_id = link->ddc_hw_inst;
+ dpia_control.fec_rdy = dc_link_should_enable_fec(link);
+ } else {
+ DC_LOG_ERROR("%s: Failed to execute DPIA enable DMUB command.\n", __func__);
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ link_dpia_control(enc->ctx, &dpia_control);
}
}
@@ -383,14 +447,43 @@ void dcn31_link_encoder_enable_dp_mst_output(
const struct dc_link_settings *link_settings,
enum clock_source_id clock_source)
{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+
/* Enable transmitter and encoder. */
- if (!link_enc_cfg_is_transmitter_mappable(enc->ctx->dc->current_state, enc)) {
+ if (!link_enc_cfg_is_transmitter_mappable(enc->ctx->dc, enc)) {
dcn10_link_encoder_enable_dp_mst_output(enc, link_settings, clock_source);
} else {
- /** @todo Handle transmitter with programmable mapping to link encoder. */
+ struct dmub_cmd_dig_dpia_control_data dpia_control = { 0 };
+ struct dc_link *link;
+
+ link = link_enc_cfg_get_link_using_link_enc(enc->ctx->dc, enc->preferred_engine);
+
+ enc1_configure_encoder(enc10, link_settings);
+
+ dpia_control.action = (uint8_t)TRANSMITTER_CONTROL_ENABLE;
+ dpia_control.enc_id = enc->preferred_engine;
+ dpia_control.mode_laneset.digmode = 5; /* 0 for SST; 5 for MST */
+ dpia_control.lanenum = (uint8_t)link_settings->lane_count;
+ dpia_control.symclk_10khz = link_settings->link_rate *
+ LINK_RATE_REF_FREQ_IN_KHZ / 10;
+ /* DIG_BE_CNTL.DIG_HPD_SELECT set to 5 (hpdsel - 1) to indicate HPD pin
+ * unused by DPIA.
+ */
+ dpia_control.hpdsel = 6;
+
+ if (link) {
+ dpia_control.dpia_id = link->ddc_hw_inst;
+ dpia_control.fec_rdy = dc_link_should_enable_fec(link);
+ } else {
+ DC_LOG_ERROR("%s: Failed to execute DPIA enable DMUB command.\n", __func__);
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ link_dpia_control(enc->ctx, &dpia_control);
}
}
@@ -398,14 +491,45 @@ void dcn31_link_encoder_disable_output(
struct link_encoder *enc,
enum signal_type signal)
{
+ struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
+
/* Disable transmitter and encoder. */
- if (!link_enc_cfg_is_transmitter_mappable(enc->ctx->dc->current_state, enc)) {
+ if (!link_enc_cfg_is_transmitter_mappable(enc->ctx->dc, enc)) {
dcn10_link_encoder_disable_output(enc, signal);
} else {
- /** @todo Handle transmitter with programmable mapping to link encoder. */
+ struct dmub_cmd_dig_dpia_control_data dpia_control = { 0 };
+ struct dc_link *link;
+
+ if (!dcn10_is_dig_enabled(enc))
+ return;
+
+ link = link_enc_cfg_get_link_using_link_enc(enc->ctx->dc, enc->preferred_engine);
+
+ dpia_control.action = (uint8_t)TRANSMITTER_CONTROL_DISABLE;
+ dpia_control.enc_id = enc->preferred_engine;
+ if (signal == SIGNAL_TYPE_DISPLAY_PORT) {
+ dpia_control.mode_laneset.digmode = 0; /* 0 for SST; 5 for MST */
+ } else if (signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ dpia_control.mode_laneset.digmode = 5; /* 0 for SST; 5 for MST */
+ } else {
+ DC_LOG_ERROR("%s: USB4 DPIA only supports DisplayPort.\n", __func__);
+ BREAK_TO_DEBUGGER();
+ }
+
+ if (link) {
+ dpia_control.dpia_id = link->ddc_hw_inst;
+ } else {
+ DC_LOG_ERROR("%s: Failed to execute DPIA enable DMUB command.\n", __func__);
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ link_dpia_control(enc->ctx, &dpia_control);
+
+ link_encoder_disable(enc10);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c
new file mode 100644
index 000000000000..6c08e21bb708
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.c
@@ -0,0 +1,616 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dc_bios_types.h"
+#include "dcn31_hpo_dp_link_encoder.h"
+#include "reg_helper.h"
+#include "dc_link.h"
+#include "stream_encoder.h"
+
+#define DC_LOGGER \
+ enc3->base.ctx->logger
+
+#define REG(reg)\
+ (enc3->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ enc3->hpo_le_shift->field_name, enc3->hpo_le_mask->field_name
+
+
+#define CTX \
+ enc3->base.ctx
+
+enum {
+ DP_SAT_UPDATE_MAX_RETRY = 200
+};
+
+void dcn31_hpo_dp_link_enc_enable(
+ struct hpo_dp_link_encoder *enc,
+ enum dc_lane_count num_lanes)
+{
+ struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc);
+ uint32_t dp_link_enabled;
+
+ /* get current status of link enabled */
+ REG_GET(DP_DPHY_SYM32_STATUS,
+ STATUS, &dp_link_enabled);
+
+ /* Enable clocks first */
+ REG_UPDATE(DP_LINK_ENC_CLOCK_CONTROL, DP_LINK_ENC_CLOCK_EN, 1);
+
+ /* Reset DPHY. Only reset if going from disable to enable */
+ if (!dp_link_enabled) {
+ REG_UPDATE(DP_DPHY_SYM32_CONTROL, DPHY_RESET, 1);
+ REG_UPDATE(DP_DPHY_SYM32_CONTROL, DPHY_RESET, 0);
+ }
+
+ /* Configure DPHY settings */
+ REG_UPDATE_3(DP_DPHY_SYM32_CONTROL,
+ DPHY_ENABLE, 1,
+ PRECODER_ENABLE, 1,
+ NUM_LANES, num_lanes == LANE_COUNT_ONE ? 0 : num_lanes == LANE_COUNT_TWO ? 1 : 3);
+}
+
+void dcn31_hpo_dp_link_enc_disable(
+ struct hpo_dp_link_encoder *enc)
+{
+ struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc);
+
+ /* Configure DPHY settings */
+ REG_UPDATE(DP_DPHY_SYM32_CONTROL,
+ DPHY_ENABLE, 0);
+
+ /* Shut down clock last */
+ REG_UPDATE(DP_LINK_ENC_CLOCK_CONTROL, DP_LINK_ENC_CLOCK_EN, 0);
+}
+
+void dcn31_hpo_dp_link_enc_set_link_test_pattern(
+ struct hpo_dp_link_encoder *enc,
+ struct encoder_set_dp_phy_pattern_param *tp_params)
+{
+ struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc);
+ uint32_t tp_custom;
+
+ switch (tp_params->dp_phy_pattern) {
+ case DP_TEST_PATTERN_VIDEO_MODE:
+ REG_UPDATE(DP_DPHY_SYM32_CONTROL,
+ MODE, DP2_LINK_ACTIVE);
+ break;
+ case DP_TEST_PATTERN_128b_132b_TPS1_TRAINING_MODE:
+ REG_UPDATE(DP_DPHY_SYM32_CONTROL,
+ MODE, DP2_LINK_TRAINING_TPS1);
+ break;
+ case DP_TEST_PATTERN_128b_132b_TPS2_TRAINING_MODE:
+ REG_UPDATE(DP_DPHY_SYM32_CONTROL,
+ MODE, DP2_LINK_TRAINING_TPS2);
+ break;
+ case DP_TEST_PATTERN_128b_132b_TPS1:
+ REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG,
+ TP_SELECT0, DP_DPHY_TP_SELECT_TPS1,
+ TP_SELECT1, DP_DPHY_TP_SELECT_TPS1,
+ TP_SELECT2, DP_DPHY_TP_SELECT_TPS1,
+ TP_SELECT3, DP_DPHY_TP_SELECT_TPS1);
+ REG_UPDATE(DP_DPHY_SYM32_CONTROL,
+ MODE, DP2_TEST_PATTERN);
+ break;
+ case DP_TEST_PATTERN_128b_132b_TPS2:
+ REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG,
+ TP_SELECT0, DP_DPHY_TP_SELECT_TPS2,
+ TP_SELECT1, DP_DPHY_TP_SELECT_TPS2,
+ TP_SELECT2, DP_DPHY_TP_SELECT_TPS2,
+ TP_SELECT3, DP_DPHY_TP_SELECT_TPS2);
+ REG_UPDATE(DP_DPHY_SYM32_CONTROL,
+ MODE, DP2_TEST_PATTERN);
+ break;
+ case DP_TEST_PATTERN_PRBS7:
+ REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG,
+ TP_PRBS_SEL0, DP_DPHY_TP_PRBS7,
+ TP_PRBS_SEL1, DP_DPHY_TP_PRBS7,
+ TP_PRBS_SEL2, DP_DPHY_TP_PRBS7,
+ TP_PRBS_SEL3, DP_DPHY_TP_PRBS7);
+ REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG,
+ TP_SELECT0, DP_DPHY_TP_SELECT_PRBS,
+ TP_SELECT1, DP_DPHY_TP_SELECT_PRBS,
+ TP_SELECT2, DP_DPHY_TP_SELECT_PRBS,
+ TP_SELECT3, DP_DPHY_TP_SELECT_PRBS);
+ REG_UPDATE(DP_DPHY_SYM32_CONTROL,
+ MODE, DP2_TEST_PATTERN);
+ break;
+ case DP_TEST_PATTERN_PRBS9:
+ REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG,
+ TP_PRBS_SEL0, DP_DPHY_TP_PRBS9,
+ TP_PRBS_SEL1, DP_DPHY_TP_PRBS9,
+ TP_PRBS_SEL2, DP_DPHY_TP_PRBS9,
+ TP_PRBS_SEL3, DP_DPHY_TP_PRBS9);
+ REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG,
+ TP_SELECT0, DP_DPHY_TP_SELECT_PRBS,
+ TP_SELECT1, DP_DPHY_TP_SELECT_PRBS,
+ TP_SELECT2, DP_DPHY_TP_SELECT_PRBS,
+ TP_SELECT3, DP_DPHY_TP_SELECT_PRBS);
+ REG_UPDATE(DP_DPHY_SYM32_CONTROL,
+ MODE, DP2_TEST_PATTERN);
+ break;
+ case DP_TEST_PATTERN_PRBS11:
+ REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG,
+ TP_PRBS_SEL0, DP_DPHY_TP_PRBS11,
+ TP_PRBS_SEL1, DP_DPHY_TP_PRBS11,
+ TP_PRBS_SEL2, DP_DPHY_TP_PRBS11,
+ TP_PRBS_SEL3, DP_DPHY_TP_PRBS11);
+ REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG,
+ TP_SELECT0, DP_DPHY_TP_SELECT_PRBS,
+ TP_SELECT1, DP_DPHY_TP_SELECT_PRBS,
+ TP_SELECT2, DP_DPHY_TP_SELECT_PRBS,
+ TP_SELECT3, DP_DPHY_TP_SELECT_PRBS);
+ REG_UPDATE(DP_DPHY_SYM32_CONTROL,
+ MODE, DP2_TEST_PATTERN);
+ break;
+ case DP_TEST_PATTERN_PRBS15:
+ REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG,
+ TP_PRBS_SEL0, DP_DPHY_TP_PRBS15,
+ TP_PRBS_SEL1, DP_DPHY_TP_PRBS15,
+ TP_PRBS_SEL2, DP_DPHY_TP_PRBS15,
+ TP_PRBS_SEL3, DP_DPHY_TP_PRBS15);
+ REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG,
+ TP_SELECT0, DP_DPHY_TP_SELECT_PRBS,
+ TP_SELECT1, DP_DPHY_TP_SELECT_PRBS,
+ TP_SELECT2, DP_DPHY_TP_SELECT_PRBS,
+ TP_SELECT3, DP_DPHY_TP_SELECT_PRBS);
+ REG_UPDATE(DP_DPHY_SYM32_CONTROL,
+ MODE, DP2_TEST_PATTERN);
+ break;
+ case DP_TEST_PATTERN_PRBS23:
+ REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG,
+ TP_PRBS_SEL0, DP_DPHY_TP_PRBS23,
+ TP_PRBS_SEL1, DP_DPHY_TP_PRBS23,
+ TP_PRBS_SEL2, DP_DPHY_TP_PRBS23,
+ TP_PRBS_SEL3, DP_DPHY_TP_PRBS23);
+ REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG,
+ TP_SELECT0, DP_DPHY_TP_SELECT_PRBS,
+ TP_SELECT1, DP_DPHY_TP_SELECT_PRBS,
+ TP_SELECT2, DP_DPHY_TP_SELECT_PRBS,
+ TP_SELECT3, DP_DPHY_TP_SELECT_PRBS);
+ REG_UPDATE(DP_DPHY_SYM32_CONTROL,
+ MODE, DP2_TEST_PATTERN);
+ break;
+ case DP_TEST_PATTERN_PRBS31:
+ REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG,
+ TP_PRBS_SEL0, DP_DPHY_TP_PRBS31,
+ TP_PRBS_SEL1, DP_DPHY_TP_PRBS31,
+ TP_PRBS_SEL2, DP_DPHY_TP_PRBS31,
+ TP_PRBS_SEL3, DP_DPHY_TP_PRBS31);
+ REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG,
+ TP_SELECT0, DP_DPHY_TP_SELECT_PRBS,
+ TP_SELECT1, DP_DPHY_TP_SELECT_PRBS,
+ TP_SELECT2, DP_DPHY_TP_SELECT_PRBS,
+ TP_SELECT3, DP_DPHY_TP_SELECT_PRBS);
+ REG_UPDATE(DP_DPHY_SYM32_CONTROL,
+ MODE, DP2_TEST_PATTERN);
+ break;
+ case DP_TEST_PATTERN_264BIT_CUSTOM:
+ tp_custom = (tp_params->custom_pattern[2] << 16) | (tp_params->custom_pattern[1] << 8) | tp_params->custom_pattern[0];
+ REG_SET(DP_DPHY_SYM32_TP_CUSTOM0, 0, TP_CUSTOM, tp_custom);
+ tp_custom = (tp_params->custom_pattern[5] << 16) | (tp_params->custom_pattern[4] << 8) | tp_params->custom_pattern[3];
+ REG_SET(DP_DPHY_SYM32_TP_CUSTOM1, 0, TP_CUSTOM, tp_custom);
+ tp_custom = (tp_params->custom_pattern[8] << 16) | (tp_params->custom_pattern[7] << 8) | tp_params->custom_pattern[6];
+ REG_SET(DP_DPHY_SYM32_TP_CUSTOM2, 0, TP_CUSTOM, tp_custom);
+ tp_custom = (tp_params->custom_pattern[11] << 16) | (tp_params->custom_pattern[10] << 8) | tp_params->custom_pattern[9];
+ REG_SET(DP_DPHY_SYM32_TP_CUSTOM3, 0, TP_CUSTOM, tp_custom);
+ tp_custom = (tp_params->custom_pattern[14] << 16) | (tp_params->custom_pattern[13] << 8) | tp_params->custom_pattern[12];
+ REG_SET(DP_DPHY_SYM32_TP_CUSTOM4, 0, TP_CUSTOM, tp_custom);
+ tp_custom = (tp_params->custom_pattern[17] << 16) | (tp_params->custom_pattern[16] << 8) | tp_params->custom_pattern[15];
+ REG_SET(DP_DPHY_SYM32_TP_CUSTOM5, 0, TP_CUSTOM, tp_custom);
+ tp_custom = (tp_params->custom_pattern[20] << 16) | (tp_params->custom_pattern[19] << 8) | tp_params->custom_pattern[18];
+ REG_SET(DP_DPHY_SYM32_TP_CUSTOM6, 0, TP_CUSTOM, tp_custom);
+ tp_custom = (tp_params->custom_pattern[23] << 16) | (tp_params->custom_pattern[22] << 8) | tp_params->custom_pattern[21];
+ REG_SET(DP_DPHY_SYM32_TP_CUSTOM7, 0, TP_CUSTOM, tp_custom);
+ tp_custom = (tp_params->custom_pattern[26] << 16) | (tp_params->custom_pattern[25] << 8) | tp_params->custom_pattern[24];
+ REG_SET(DP_DPHY_SYM32_TP_CUSTOM8, 0, TP_CUSTOM, tp_custom);
+ tp_custom = (tp_params->custom_pattern[29] << 16) | (tp_params->custom_pattern[28] << 8) | tp_params->custom_pattern[27];
+ REG_SET(DP_DPHY_SYM32_TP_CUSTOM9, 0, TP_CUSTOM, tp_custom);
+ tp_custom = (tp_params->custom_pattern[32] << 16) | (tp_params->custom_pattern[31] << 8) | tp_params->custom_pattern[30];
+ REG_SET(DP_DPHY_SYM32_TP_CUSTOM10, 0, TP_CUSTOM, tp_custom);
+
+ REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG,
+ TP_SELECT0, DP_DPHY_TP_SELECT_CUSTOM,
+ TP_SELECT1, DP_DPHY_TP_SELECT_CUSTOM,
+ TP_SELECT2, DP_DPHY_TP_SELECT_CUSTOM,
+ TP_SELECT3, DP_DPHY_TP_SELECT_CUSTOM);
+
+ REG_UPDATE(DP_DPHY_SYM32_CONTROL,
+ MODE, DP2_TEST_PATTERN);
+ break;
+ case DP_TEST_PATTERN_SQUARE_PULSE:
+ REG_SET(DP_DPHY_SYM32_TP_SQ_PULSE, 0,
+ TP_SQ_PULSE_WIDTH, tp_params->custom_pattern[0]);
+
+ REG_UPDATE_4(DP_DPHY_SYM32_TP_CONFIG,
+ TP_SELECT0, DP_DPHY_TP_SELECT_SQUARE,
+ TP_SELECT1, DP_DPHY_TP_SELECT_SQUARE,
+ TP_SELECT2, DP_DPHY_TP_SELECT_SQUARE,
+ TP_SELECT3, DP_DPHY_TP_SELECT_SQUARE);
+
+ REG_UPDATE(DP_DPHY_SYM32_CONTROL,
+ MODE, DP2_TEST_PATTERN);
+ break;
+ default:
+ break;
+ }
+}
+
+static void fill_stream_allocation_row_info(
+ const struct link_mst_stream_allocation *stream_allocation,
+ uint32_t *src,
+ uint32_t *slots)
+{
+ const struct hpo_dp_stream_encoder *stream_enc = stream_allocation->hpo_dp_stream_enc;
+
+ if (stream_enc && (stream_enc->id >= ENGINE_ID_HPO_DP_0)) {
+ *src = stream_enc->id - ENGINE_ID_HPO_DP_0;
+ *slots = stream_allocation->slot_count;
+ } else {
+ *src = 0;
+ *slots = 0;
+ }
+}
+
+/* programs DP VC payload allocation */
+void dcn31_hpo_dp_link_enc_update_stream_allocation_table(
+ struct hpo_dp_link_encoder *enc,
+ const struct link_mst_stream_allocation_table *table)
+{
+ struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc);
+ uint32_t slots = 0;
+ uint32_t src = 0;
+
+ /* --- Set MSE Stream Attribute -
+ * Setup VC Payload Table on Tx Side,
+ * Issue allocation change trigger
+ * to commit payload on both tx and rx side
+ */
+
+ /* we should clean-up table each time */
+
+ if (table->stream_count >= 1) {
+ fill_stream_allocation_row_info(
+ &table->stream_allocations[0],
+ &src,
+ &slots);
+ } else {
+ src = 0;
+ slots = 0;
+ }
+
+ REG_UPDATE_2(DP_DPHY_SYM32_SAT_VC0,
+ SAT_STREAM_SOURCE, src,
+ SAT_SLOT_COUNT, slots);
+
+ if (table->stream_count >= 2) {
+ fill_stream_allocation_row_info(
+ &table->stream_allocations[1],
+ &src,
+ &slots);
+ } else {
+ src = 0;
+ slots = 0;
+ }
+
+ REG_UPDATE_2(DP_DPHY_SYM32_SAT_VC1,
+ SAT_STREAM_SOURCE, src,
+ SAT_SLOT_COUNT, slots);
+
+ if (table->stream_count >= 3) {
+ fill_stream_allocation_row_info(
+ &table->stream_allocations[2],
+ &src,
+ &slots);
+ } else {
+ src = 0;
+ slots = 0;
+ }
+
+ REG_UPDATE_2(DP_DPHY_SYM32_SAT_VC2,
+ SAT_STREAM_SOURCE, src,
+ SAT_SLOT_COUNT, slots);
+
+ if (table->stream_count >= 4) {
+ fill_stream_allocation_row_info(
+ &table->stream_allocations[3],
+ &src,
+ &slots);
+ } else {
+ src = 0;
+ slots = 0;
+ }
+
+ REG_UPDATE_2(DP_DPHY_SYM32_SAT_VC3,
+ SAT_STREAM_SOURCE, src,
+ SAT_SLOT_COUNT, slots);
+
+ /* --- wait for transaction finish */
+
+ /* send allocation change trigger (ACT)
+ * this step first sends the ACT,
+ * then double buffers the SAT into the hardware
+ * making the new allocation active on the DP MST mode link
+ */
+
+ /* SAT_UPDATE:
+ * 0 - No Action
+ * 1 - Update SAT with trigger
+ * 2 - Update SAT without trigger
+ */
+ REG_UPDATE(DP_DPHY_SYM32_SAT_UPDATE,
+ SAT_UPDATE, 1);
+
+ /* wait for update to complete
+ * (i.e. SAT_UPDATE_PENDING field is set to 0)
+ * No need for HW to enforce keepout.
+ */
+ /* Best case and worst case wait time for SAT_UPDATE_PENDING
+ * best: 109 us
+ * worst: 868 us
+ */
+ REG_WAIT(DP_DPHY_SYM32_STATUS,
+ SAT_UPDATE_PENDING, 0,
+ 10, DP_SAT_UPDATE_MAX_RETRY);
+}
+
+void dcn31_hpo_dp_link_enc_set_throttled_vcp_size(
+ struct hpo_dp_link_encoder *enc,
+ uint32_t stream_encoder_inst,
+ struct fixed31_32 avg_time_slots_per_mtp)
+{
+ struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc);
+ uint32_t x = dc_fixpt_floor(
+ avg_time_slots_per_mtp);
+ uint32_t y = dc_fixpt_ceil(
+ dc_fixpt_shl(
+ dc_fixpt_sub_int(
+ avg_time_slots_per_mtp,
+ x),
+ 25));
+
+ switch (stream_encoder_inst) {
+ case 0:
+ REG_SET_2(DP_DPHY_SYM32_VC_RATE_CNTL0, 0,
+ STREAM_VC_RATE_X, x,
+ STREAM_VC_RATE_Y, y);
+ break;
+ case 1:
+ REG_SET_2(DP_DPHY_SYM32_VC_RATE_CNTL1, 0,
+ STREAM_VC_RATE_X, x,
+ STREAM_VC_RATE_Y, y);
+ break;
+ case 2:
+ REG_SET_2(DP_DPHY_SYM32_VC_RATE_CNTL2, 0,
+ STREAM_VC_RATE_X, x,
+ STREAM_VC_RATE_Y, y);
+ break;
+ case 3:
+ REG_SET_2(DP_DPHY_SYM32_VC_RATE_CNTL3, 0,
+ STREAM_VC_RATE_X, x,
+ STREAM_VC_RATE_Y, y);
+ break;
+ default:
+ ASSERT(0);
+ }
+
+ /* Best case and worst case wait time for RATE_UPDATE_PENDING
+ * best: 116 ns
+ * worst: 903 ns
+ */
+ /* wait for update to be completed on the link */
+ REG_WAIT(DP_DPHY_SYM32_STATUS,
+ RATE_UPDATE_PENDING, 0,
+ 1, 10);
+}
+
+static bool dcn31_hpo_dp_link_enc_is_in_alt_mode(
+ struct hpo_dp_link_encoder *enc)
+{
+ struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc);
+ uint32_t dp_alt_mode_disable = 0;
+
+ ASSERT((enc->transmitter >= TRANSMITTER_UNIPHY_A) && (enc->transmitter <= TRANSMITTER_UNIPHY_E));
+
+ /* if value == 1 alt mode is disabled, otherwise it is enabled */
+ REG_GET(RDPCSTX_PHY_CNTL6[enc->transmitter], RDPCS_PHY_DPALT_DISABLE, &dp_alt_mode_disable);
+ return (dp_alt_mode_disable == 0);
+}
+
+void dcn31_hpo_dp_link_enc_read_state(
+ struct hpo_dp_link_encoder *enc,
+ struct hpo_dp_link_enc_state *state)
+{
+ struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc);
+
+ ASSERT(state);
+
+ REG_GET(DP_DPHY_SYM32_STATUS,
+ STATUS, &state->link_enc_enabled);
+ REG_GET(DP_DPHY_SYM32_CONTROL,
+ NUM_LANES, &state->lane_count);
+ REG_GET(DP_DPHY_SYM32_CONTROL,
+ MODE, (uint32_t *)&state->link_mode);
+
+ REG_GET_2(DP_DPHY_SYM32_SAT_VC0,
+ SAT_STREAM_SOURCE, &state->stream_src[0],
+ SAT_SLOT_COUNT, &state->slot_count[0]);
+ REG_GET_2(DP_DPHY_SYM32_SAT_VC1,
+ SAT_STREAM_SOURCE, &state->stream_src[1],
+ SAT_SLOT_COUNT, &state->slot_count[1]);
+ REG_GET_2(DP_DPHY_SYM32_SAT_VC2,
+ SAT_STREAM_SOURCE, &state->stream_src[2],
+ SAT_SLOT_COUNT, &state->slot_count[2]);
+ REG_GET_2(DP_DPHY_SYM32_SAT_VC3,
+ SAT_STREAM_SOURCE, &state->stream_src[3],
+ SAT_SLOT_COUNT, &state->slot_count[3]);
+
+ REG_GET_2(DP_DPHY_SYM32_VC_RATE_CNTL0,
+ STREAM_VC_RATE_X, &state->vc_rate_x[0],
+ STREAM_VC_RATE_Y, &state->vc_rate_y[0]);
+ REG_GET_2(DP_DPHY_SYM32_VC_RATE_CNTL1,
+ STREAM_VC_RATE_X, &state->vc_rate_x[1],
+ STREAM_VC_RATE_Y, &state->vc_rate_y[1]);
+ REG_GET_2(DP_DPHY_SYM32_VC_RATE_CNTL2,
+ STREAM_VC_RATE_X, &state->vc_rate_x[2],
+ STREAM_VC_RATE_Y, &state->vc_rate_y[2]);
+ REG_GET_2(DP_DPHY_SYM32_VC_RATE_CNTL3,
+ STREAM_VC_RATE_X, &state->vc_rate_x[3],
+ STREAM_VC_RATE_Y, &state->vc_rate_y[3]);
+}
+
+static enum bp_result link_transmitter_control(
+ struct dcn31_hpo_dp_link_encoder *enc3,
+ struct bp_transmitter_control *cntl)
+{
+ enum bp_result result;
+ struct dc_bios *bp = enc3->base.ctx->dc_bios;
+
+ result = bp->funcs->transmitter_control(bp, cntl);
+
+ return result;
+}
+
+/* enables DP PHY output for 128b132b encoding */
+void dcn31_hpo_dp_link_enc_enable_dp_output(
+ struct hpo_dp_link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ enum transmitter transmitter)
+{
+ struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc);
+ struct bp_transmitter_control cntl = { 0 };
+ enum bp_result result;
+
+ /* Set the transmitter */
+ enc3->base.transmitter = transmitter;
+
+ /* Enable the PHY */
+ cntl.action = TRANSMITTER_CONTROL_ENABLE;
+ cntl.engine_id = ENGINE_ID_UNKNOWN;
+ cntl.transmitter = enc3->base.transmitter;
+ //cntl.pll_id = clock_source;
+ cntl.signal = SIGNAL_TYPE_DISPLAY_PORT_MST;
+ cntl.lanes_number = link_settings->lane_count;
+ cntl.hpd_sel = enc3->base.hpd_source;
+ cntl.pixel_clock = link_settings->link_rate * 1000;
+ cntl.color_depth = COLOR_DEPTH_UNDEFINED;
+ cntl.hpo_engine_id = enc->inst + ENGINE_ID_HPO_DP_0;
+
+ result = link_transmitter_control(enc3, &cntl);
+
+ if (result != BP_RESULT_OK) {
+ DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n",
+ __func__);
+ BREAK_TO_DEBUGGER();
+ }
+}
+
+void dcn31_hpo_dp_link_enc_disable_output(
+ struct hpo_dp_link_encoder *enc,
+ enum signal_type signal)
+{
+ struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc);
+ struct bp_transmitter_control cntl = { 0 };
+ enum bp_result result;
+
+ /* disable transmitter */
+ cntl.action = TRANSMITTER_CONTROL_DISABLE;
+ cntl.transmitter = enc3->base.transmitter;
+ cntl.hpd_sel = enc3->base.hpd_source;
+ cntl.signal = signal;
+
+ result = link_transmitter_control(enc3, &cntl);
+
+ if (result != BP_RESULT_OK) {
+ DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n",
+ __func__);
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+
+ /* disable encoder */
+ dcn31_hpo_dp_link_enc_disable(enc);
+}
+
+void dcn31_hpo_dp_link_enc_set_ffe(
+ struct hpo_dp_link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ uint8_t ffe_preset)
+{
+ struct dcn31_hpo_dp_link_encoder *enc3 = DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(enc);
+ struct bp_transmitter_control cntl = { 0 };
+ enum bp_result result;
+
+ /* disable transmitter */
+ cntl.transmitter = enc3->base.transmitter;
+ cntl.action = TRANSMITTER_CONTROL_SET_VOLTAGE_AND_PREEMPASIS;
+ cntl.signal = SIGNAL_TYPE_DISPLAY_PORT_MST;
+ cntl.lanes_number = link_settings->lane_count;
+ cntl.pixel_clock = link_settings->link_rate * 1000;
+ cntl.lane_settings = ffe_preset;
+
+ result = link_transmitter_control(enc3, &cntl);
+
+ if (result != BP_RESULT_OK) {
+ DC_LOG_ERROR("%s: Failed to execute VBIOS command table!\n",
+ __func__);
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+}
+
+static struct hpo_dp_link_encoder_funcs dcn31_hpo_dp_link_encoder_funcs = {
+ .enable_link_phy = dcn31_hpo_dp_link_enc_enable_dp_output,
+ .disable_link_phy = dcn31_hpo_dp_link_enc_disable_output,
+ .link_enable = dcn31_hpo_dp_link_enc_enable,
+ .link_disable = dcn31_hpo_dp_link_enc_disable,
+ .set_link_test_pattern = dcn31_hpo_dp_link_enc_set_link_test_pattern,
+ .update_stream_allocation_table = dcn31_hpo_dp_link_enc_update_stream_allocation_table,
+ .set_throttled_vcp_size = dcn31_hpo_dp_link_enc_set_throttled_vcp_size,
+ .is_in_alt_mode = dcn31_hpo_dp_link_enc_is_in_alt_mode,
+ .read_state = dcn31_hpo_dp_link_enc_read_state,
+ .set_ffe = dcn31_hpo_dp_link_enc_set_ffe,
+};
+
+void hpo_dp_link_encoder31_construct(struct dcn31_hpo_dp_link_encoder *enc31,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn31_hpo_dp_link_encoder_registers *hpo_le_regs,
+ const struct dcn31_hpo_dp_link_encoder_shift *hpo_le_shift,
+ const struct dcn31_hpo_dp_link_encoder_mask *hpo_le_mask)
+{
+ enc31->base.ctx = ctx;
+
+ enc31->base.inst = inst;
+ enc31->base.funcs = &dcn31_hpo_dp_link_encoder_funcs;
+ enc31->base.hpd_source = HPD_SOURCEID_UNKNOWN;
+ enc31->base.transmitter = TRANSMITTER_UNKNOWN;
+
+ enc31->regs = hpo_le_regs;
+ enc31->hpo_le_shift = hpo_le_shift;
+ enc31->hpo_le_mask = hpo_le_mask;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.h
new file mode 100644
index 000000000000..0706ccaf6fec
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_link_encoder.h
@@ -0,0 +1,222 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_DCN31_HPO_DP_LINK_ENCODER_H__
+#define __DAL_DCN31_HPO_DP_LINK_ENCODER_H__
+
+#include "link_encoder.h"
+
+
+#define DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(hpo_dp_link_encoder)\
+ container_of(hpo_dp_link_encoder, struct dcn31_hpo_dp_link_encoder, base)
+
+
+#define DCN3_1_HPO_DP_LINK_ENC_REG_LIST(id) \
+ SRI(DP_LINK_ENC_CLOCK_CONTROL, DP_LINK_ENC, id), \
+ SRI(DP_DPHY_SYM32_CONTROL, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_STATUS, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_TP_CONFIG, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_TP_PRBS_SEED0, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_TP_PRBS_SEED1, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_TP_PRBS_SEED2, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_TP_PRBS_SEED3, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_TP_SQ_PULSE, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_TP_CUSTOM0, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_TP_CUSTOM1, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_TP_CUSTOM2, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_TP_CUSTOM3, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_TP_CUSTOM4, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_TP_CUSTOM5, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_TP_CUSTOM6, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_TP_CUSTOM7, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_TP_CUSTOM8, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_TP_CUSTOM9, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_TP_CUSTOM10, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_SAT_VC0, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_SAT_VC1, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_SAT_VC2, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_SAT_VC3, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_VC_RATE_CNTL0, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_VC_RATE_CNTL1, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_VC_RATE_CNTL2, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_VC_RATE_CNTL3, DP_DPHY_SYM32, id), \
+ SRI(DP_DPHY_SYM32_SAT_UPDATE, DP_DPHY_SYM32, id)
+
+#define DCN3_1_RDPCSTX_REG_LIST(id) \
+ SRII(RDPCSTX_PHY_CNTL6, RDPCSTX, id)
+
+
+#define DCN3_1_HPO_DP_LINK_ENC_REGS \
+ uint32_t DP_LINK_ENC_CLOCK_CONTROL;\
+ uint32_t DP_DPHY_SYM32_CONTROL;\
+ uint32_t DP_DPHY_SYM32_STATUS;\
+ uint32_t DP_DPHY_SYM32_TP_CONFIG;\
+ uint32_t DP_DPHY_SYM32_TP_PRBS_SEED0;\
+ uint32_t DP_DPHY_SYM32_TP_PRBS_SEED1;\
+ uint32_t DP_DPHY_SYM32_TP_PRBS_SEED2;\
+ uint32_t DP_DPHY_SYM32_TP_PRBS_SEED3;\
+ uint32_t DP_DPHY_SYM32_TP_SQ_PULSE;\
+ uint32_t DP_DPHY_SYM32_TP_CUSTOM0;\
+ uint32_t DP_DPHY_SYM32_TP_CUSTOM1;\
+ uint32_t DP_DPHY_SYM32_TP_CUSTOM2;\
+ uint32_t DP_DPHY_SYM32_TP_CUSTOM3;\
+ uint32_t DP_DPHY_SYM32_TP_CUSTOM4;\
+ uint32_t DP_DPHY_SYM32_TP_CUSTOM5;\
+ uint32_t DP_DPHY_SYM32_TP_CUSTOM6;\
+ uint32_t DP_DPHY_SYM32_TP_CUSTOM7;\
+ uint32_t DP_DPHY_SYM32_TP_CUSTOM8;\
+ uint32_t DP_DPHY_SYM32_TP_CUSTOM9;\
+ uint32_t DP_DPHY_SYM32_TP_CUSTOM10;\
+ uint32_t DP_DPHY_SYM32_SAT_VC0;\
+ uint32_t DP_DPHY_SYM32_SAT_VC1;\
+ uint32_t DP_DPHY_SYM32_SAT_VC2;\
+ uint32_t DP_DPHY_SYM32_SAT_VC3;\
+ uint32_t DP_DPHY_SYM32_VC_RATE_CNTL0;\
+ uint32_t DP_DPHY_SYM32_VC_RATE_CNTL1;\
+ uint32_t DP_DPHY_SYM32_VC_RATE_CNTL2;\
+ uint32_t DP_DPHY_SYM32_VC_RATE_CNTL3;\
+ uint32_t DP_DPHY_SYM32_SAT_UPDATE
+
+struct dcn31_hpo_dp_link_encoder_registers {
+ DCN3_1_HPO_DP_LINK_ENC_REGS;
+ uint32_t RDPCSTX_PHY_CNTL6[5];
+};
+
+#define DCN3_1_HPO_DP_LINK_ENC_MASK_SH_LIST(mask_sh)\
+ SE_SF(DP_LINK_ENC0_DP_LINK_ENC_CLOCK_CONTROL, DP_LINK_ENC_CLOCK_EN, mask_sh),\
+ SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL, DPHY_RESET, mask_sh),\
+ SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL, DPHY_ENABLE, mask_sh),\
+ SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL, PRECODER_ENABLE, mask_sh),\
+ SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL, MODE, mask_sh),\
+ SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_CONTROL, NUM_LANES, mask_sh),\
+ SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_STATUS, STATUS, mask_sh),\
+ SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_STATUS, SAT_UPDATE_PENDING, mask_sh),\
+ SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_STATUS, RATE_UPDATE_PENDING, mask_sh),\
+ SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CUSTOM0, TP_CUSTOM, mask_sh),\
+ SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG, TP_SELECT0, mask_sh),\
+ SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG, TP_SELECT1, mask_sh),\
+ SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG, TP_SELECT2, mask_sh),\
+ SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG, TP_SELECT3, mask_sh),\
+ SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG, TP_PRBS_SEL0, mask_sh),\
+ SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG, TP_PRBS_SEL1, mask_sh),\
+ SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG, TP_PRBS_SEL2, mask_sh),\
+ SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_CONFIG, TP_PRBS_SEL3, mask_sh),\
+ SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_TP_SQ_PULSE, TP_SQ_PULSE_WIDTH, mask_sh),\
+ SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC0, SAT_STREAM_SOURCE, mask_sh),\
+ SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_VC0, SAT_SLOT_COUNT, mask_sh),\
+ SE_SF(RDPCSTX0_RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, mask_sh),\
+ SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL0, STREAM_VC_RATE_X, mask_sh),\
+ SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_VC_RATE_CNTL0, STREAM_VC_RATE_Y, mask_sh),\
+ SE_SF(DP_DPHY_SYM320_DP_DPHY_SYM32_SAT_UPDATE, SAT_UPDATE, mask_sh)
+
+#define DCN3_1_HPO_DP_LINK_ENC_REG_FIELD_LIST(type) \
+ type DP_LINK_ENC_CLOCK_EN;\
+ type DPHY_RESET;\
+ type DPHY_ENABLE;\
+ type PRECODER_ENABLE;\
+ type NUM_LANES;\
+ type MODE;\
+ type STATUS;\
+ type SAT_UPDATE_PENDING;\
+ type RATE_UPDATE_PENDING;\
+ type TP_CUSTOM;\
+ type TP_SELECT0;\
+ type TP_SELECT1;\
+ type TP_SELECT2;\
+ type TP_SELECT3;\
+ type TP_PRBS_SEL0;\
+ type TP_PRBS_SEL1;\
+ type TP_PRBS_SEL2;\
+ type TP_PRBS_SEL3;\
+ type TP_SQ_PULSE_WIDTH;\
+ type SAT_STREAM_SOURCE;\
+ type SAT_SLOT_COUNT;\
+ type STREAM_VC_RATE_X;\
+ type STREAM_VC_RATE_Y;\
+ type SAT_UPDATE;\
+ type RDPCS_PHY_DPALT_DISABLE
+
+
+struct dcn31_hpo_dp_link_encoder_shift {
+ DCN3_1_HPO_DP_LINK_ENC_REG_FIELD_LIST(uint8_t);
+};
+
+struct dcn31_hpo_dp_link_encoder_mask {
+ DCN3_1_HPO_DP_LINK_ENC_REG_FIELD_LIST(uint32_t);
+};
+
+struct dcn31_hpo_dp_link_encoder {
+ struct hpo_dp_link_encoder base;
+ const struct dcn31_hpo_dp_link_encoder_registers *regs;
+ const struct dcn31_hpo_dp_link_encoder_shift *hpo_le_shift;
+ const struct dcn31_hpo_dp_link_encoder_mask *hpo_le_mask;
+};
+
+void hpo_dp_link_encoder31_construct(struct dcn31_hpo_dp_link_encoder *enc31,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn31_hpo_dp_link_encoder_registers *hpo_le_regs,
+ const struct dcn31_hpo_dp_link_encoder_shift *hpo_le_shift,
+ const struct dcn31_hpo_dp_link_encoder_mask *hpo_le_mask);
+
+void dcn31_hpo_dp_link_enc_enable_dp_output(
+ struct hpo_dp_link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ enum transmitter transmitter);
+
+void dcn31_hpo_dp_link_enc_disable_output(
+ struct hpo_dp_link_encoder *enc,
+ enum signal_type signal);
+
+void dcn31_hpo_dp_link_enc_enable(
+ struct hpo_dp_link_encoder *enc,
+ enum dc_lane_count num_lanes);
+
+void dcn31_hpo_dp_link_enc_disable(
+ struct hpo_dp_link_encoder *enc);
+
+void dcn31_hpo_dp_link_enc_set_link_test_pattern(
+ struct hpo_dp_link_encoder *enc,
+ struct encoder_set_dp_phy_pattern_param *tp_params);
+
+void dcn31_hpo_dp_link_enc_update_stream_allocation_table(
+ struct hpo_dp_link_encoder *enc,
+ const struct link_mst_stream_allocation_table *table);
+
+void dcn31_hpo_dp_link_enc_set_throttled_vcp_size(
+ struct hpo_dp_link_encoder *enc,
+ uint32_t stream_encoder_inst,
+ struct fixed31_32 avg_time_slots_per_mtp);
+
+void dcn31_hpo_dp_link_enc_read_state(
+ struct hpo_dp_link_encoder *enc,
+ struct hpo_dp_link_enc_state *state);
+
+void dcn31_hpo_dp_link_enc_set_ffe(
+ struct hpo_dp_link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ uint8_t ffe_preset);
+
+#endif // __DAL_DCN31_HPO_LINK_ENCODER_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
new file mode 100644
index 000000000000..565f12dd179a
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c
@@ -0,0 +1,752 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dc_bios_types.h"
+#include "dcn31_hpo_dp_stream_encoder.h"
+#include "reg_helper.h"
+#include "dc_link.h"
+
+#define DC_LOGGER \
+ enc3->base.ctx->logger
+
+#define REG(reg)\
+ (enc3->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ enc3->hpo_se_shift->field_name, enc3->hpo_se_mask->field_name
+
+#define CTX \
+ enc3->base.ctx
+
+
+enum dp2_pixel_encoding {
+ DP_SYM32_ENC_PIXEL_ENCODING_RGB_YCBCR444,
+ DP_SYM32_ENC_PIXEL_ENCODING_YCBCR422,
+ DP_SYM32_ENC_PIXEL_ENCODING_YCBCR420,
+ DP_SYM32_ENC_PIXEL_ENCODING_Y_ONLY
+};
+
+enum dp2_uncompressed_component_depth {
+ DP_SYM32_ENC_COMPONENT_DEPTH_6BPC,
+ DP_SYM32_ENC_COMPONENT_DEPTH_8BPC,
+ DP_SYM32_ENC_COMPONENT_DEPTH_10BPC,
+ DP_SYM32_ENC_COMPONENT_DEPTH_12BPC
+};
+
+
+static void dcn31_hpo_dp_stream_enc_enable_stream(
+ struct hpo_dp_stream_encoder *enc)
+{
+ struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc);
+
+ /* Enable all clocks in the DP_STREAM_ENC */
+ REG_UPDATE(DP_STREAM_ENC_CLOCK_CONTROL,
+ DP_STREAM_ENC_CLOCK_EN, 1);
+
+ /* Assert reset to the DP_SYM32_ENC logic */
+ REG_UPDATE(DP_SYM32_ENC_CONTROL,
+ DP_SYM32_ENC_RESET, 1);
+ /* Wait for reset to complete (to assert) */
+ REG_WAIT(DP_SYM32_ENC_CONTROL,
+ DP_SYM32_ENC_RESET_DONE, 1,
+ 1, 10);
+
+ /* De-assert reset to the DP_SYM32_ENC logic */
+ REG_UPDATE(DP_SYM32_ENC_CONTROL,
+ DP_SYM32_ENC_RESET, 0);
+ /* Wait for reset to de-assert */
+ REG_WAIT(DP_SYM32_ENC_CONTROL,
+ DP_SYM32_ENC_RESET_DONE, 0,
+ 1, 10);
+
+ /* Enable idle pattern generation */
+ REG_UPDATE(DP_SYM32_ENC_CONTROL,
+ DP_SYM32_ENC_ENABLE, 1);
+}
+
+static void dcn31_hpo_dp_stream_enc_dp_unblank(
+ struct hpo_dp_stream_encoder *enc,
+ uint32_t stream_source)
+{
+ struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc);
+
+ /* Set the input mux for video stream source */
+ REG_UPDATE(DP_STREAM_ENC_INPUT_MUX_CONTROL,
+ DP_STREAM_ENC_INPUT_MUX_PIXEL_STREAM_SOURCE_SEL, stream_source);
+
+ /* Enable video transmission in main framer */
+ REG_UPDATE(DP_SYM32_ENC_VID_STREAM_CONTROL,
+ VID_STREAM_ENABLE, 1);
+
+ /* Reset and Enable Pixel to Symbol FIFO */
+ REG_UPDATE(DP_SYM32_ENC_VID_FIFO_CONTROL,
+ PIXEL_TO_SYMBOL_FIFO_RESET, 1);
+ REG_WAIT(DP_SYM32_ENC_VID_FIFO_CONTROL,
+ PIXEL_TO_SYMBOL_FIFO_RESET_DONE, 1,
+ 1, 10);
+ REG_UPDATE(DP_SYM32_ENC_VID_FIFO_CONTROL,
+ PIXEL_TO_SYMBOL_FIFO_RESET, 0);
+ REG_WAIT(DP_SYM32_ENC_VID_FIFO_CONTROL, /* Disable Clock Ramp Adjuster FIFO */
+ PIXEL_TO_SYMBOL_FIFO_RESET_DONE, 0,
+ 1, 10);
+ REG_UPDATE(DP_SYM32_ENC_VID_FIFO_CONTROL,
+ PIXEL_TO_SYMBOL_FIFO_ENABLE, 1);
+
+ /* Reset and Enable Clock Ramp Adjuster FIFO */
+ REG_UPDATE(DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0,
+ FIFO_RESET, 1);
+ REG_WAIT(DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0,
+ FIFO_RESET_DONE, 1,
+ 1, 10);
+ REG_UPDATE(DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0,
+ FIFO_RESET, 0);
+ REG_WAIT(DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0,
+ FIFO_RESET_DONE, 0,
+ 1, 10);
+
+ /* For Debug -- Enable CRC */
+ REG_UPDATE_2(DP_SYM32_ENC_VID_CRC_CONTROL,
+ CRC_ENABLE, 1,
+ CRC_CONT_MODE_ENABLE, 1);
+
+ REG_UPDATE(DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0,
+ FIFO_ENABLE, 1);
+}
+
+static void dcn31_hpo_dp_stream_enc_dp_blank(
+ struct hpo_dp_stream_encoder *enc)
+{
+ struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc);
+
+ /* Disable video transmission */
+ REG_UPDATE(DP_SYM32_ENC_VID_STREAM_CONTROL,
+ VID_STREAM_ENABLE, 0);
+
+ /* Wait for video stream transmission disabled
+ * Larger delay to wait until VBLANK - use max retry of
+ * 10us*5000=50ms. This covers 41.7ms of minimum 24 Hz mode +
+ * a little more because we may not trust delay accuracy.
+ */
+ //REG_WAIT(DP_SYM32_ENC_VID_STREAM_CONTROL,
+ // VID_STREAM_STATUS, 0,
+ // 10, 5000);
+
+ /* Disable SDP tranmission */
+ REG_UPDATE(DP_SYM32_ENC_SDP_CONTROL,
+ SDP_STREAM_ENABLE, 0);
+
+ /* Disable Pixel to Symbol FIFO */
+ REG_UPDATE(DP_SYM32_ENC_VID_FIFO_CONTROL,
+ PIXEL_TO_SYMBOL_FIFO_ENABLE, 0);
+
+ /* Disable Clock Ramp Adjuster FIFO */
+ REG_UPDATE(DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0,
+ FIFO_ENABLE, 0);
+}
+
+static void dcn31_hpo_dp_stream_enc_disable(
+ struct hpo_dp_stream_encoder *enc)
+{
+ struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc);
+
+ /* Disable DP_SYM32_ENC */
+ REG_UPDATE(DP_SYM32_ENC_CONTROL,
+ DP_SYM32_ENC_ENABLE, 0);
+
+ /* Disable clocks in the DP_STREAM_ENC */
+ REG_UPDATE(DP_STREAM_ENC_CLOCK_CONTROL,
+ DP_STREAM_ENC_CLOCK_EN, 0);
+}
+
+static void dcn31_hpo_dp_stream_enc_set_stream_attribute(
+ struct hpo_dp_stream_encoder *enc,
+ struct dc_crtc_timing *crtc_timing,
+ enum dc_color_space output_color_space,
+ bool use_vsc_sdp_for_colorimetry,
+ bool compressed_format,
+ bool double_buffer_en)
+{
+ enum dp2_pixel_encoding pixel_encoding;
+ enum dp2_uncompressed_component_depth component_depth;
+ uint32_t h_active_start;
+ uint32_t v_active_start;
+ uint32_t h_blank;
+ uint32_t h_back_porch;
+ uint32_t h_width;
+ uint32_t v_height;
+ unsigned long long v_freq;
+ uint8_t misc0 = 0;
+ uint8_t misc1 = 0;
+ uint8_t hsp;
+ uint8_t vsp;
+
+ struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc);
+ struct dc_crtc_timing hw_crtc_timing = *crtc_timing;
+
+ /* MISC0[0] = 0 video and link clocks are asynchronous
+ * MISC1[0] = 0 interlace not supported
+ * MISC1[2:1] = 0 stereo field is handled by hardware
+ * MISC1[5:3] = 0 Reserved
+ */
+
+ /* Interlaced not supported */
+ if (hw_crtc_timing.flags.INTERLACE) {
+ BREAK_TO_DEBUGGER();
+ }
+
+ /* Double buffer enable for MSA and pixel format registers
+ * Only double buffer for changing stream attributes for active streams
+ * Do not double buffer when initially enabling a stream
+ */
+ REG_UPDATE(DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL,
+ MSA_DOUBLE_BUFFER_ENABLE, double_buffer_en);
+ REG_UPDATE(DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL,
+ PIXEL_FORMAT_DOUBLE_BUFFER_ENABLE, double_buffer_en);
+
+ /* Pixel Encoding */
+ switch (hw_crtc_timing.pixel_encoding) {
+ case PIXEL_ENCODING_YCBCR422:
+ pixel_encoding = DP_SYM32_ENC_PIXEL_ENCODING_YCBCR422;
+ misc0 = misc0 | 0x2; // MISC0[2:1] = 01
+ break;
+ case PIXEL_ENCODING_YCBCR444:
+ pixel_encoding = DP_SYM32_ENC_PIXEL_ENCODING_RGB_YCBCR444;
+ misc0 = misc0 | 0x4; // MISC0[2:1] = 10
+
+ if (hw_crtc_timing.flags.Y_ONLY) {
+ pixel_encoding = DP_SYM32_ENC_PIXEL_ENCODING_Y_ONLY;
+ if (hw_crtc_timing.display_color_depth != COLOR_DEPTH_666) {
+ /* HW testing only, no use case yet.
+ * Color depth of Y-only could be
+ * 8, 10, 12, 16 bits
+ */
+ misc1 = misc1 | 0x80; // MISC1[7] = 1
+ }
+ }
+ break;
+ case PIXEL_ENCODING_YCBCR420:
+ pixel_encoding = DP_SYM32_ENC_PIXEL_ENCODING_YCBCR420;
+ misc1 = misc1 | 0x40; // MISC1[6] = 1
+ break;
+ case PIXEL_ENCODING_RGB:
+ default:
+ pixel_encoding = DP_SYM32_ENC_PIXEL_ENCODING_RGB_YCBCR444;
+ break;
+ }
+
+ /* For YCbCr420 and BT2020 Colorimetry Formats, VSC SDP shall be used.
+ * When MISC1, bit 6, is Set to 1, a Source device uses a VSC SDP to indicate the
+ * Pixel Encoding/Colorimetry Format and that a Sink device shall ignore MISC1, bit 7,
+ * and MISC0, bits 7:1 (MISC1, bit 7, and MISC0, bits 7:1, become "don't care").
+ */
+ if (use_vsc_sdp_for_colorimetry)
+ misc1 = misc1 | 0x40;
+ else
+ misc1 = misc1 & ~0x40;
+
+ /* Color depth */
+ switch (hw_crtc_timing.display_color_depth) {
+ case COLOR_DEPTH_666:
+ component_depth = DP_SYM32_ENC_COMPONENT_DEPTH_6BPC;
+ // MISC0[7:5] = 000
+ break;
+ case COLOR_DEPTH_888:
+ component_depth = DP_SYM32_ENC_COMPONENT_DEPTH_8BPC;
+ misc0 = misc0 | 0x20; // MISC0[7:5] = 001
+ break;
+ case COLOR_DEPTH_101010:
+ component_depth = DP_SYM32_ENC_COMPONENT_DEPTH_10BPC;
+ misc0 = misc0 | 0x40; // MISC0[7:5] = 010
+ break;
+ case COLOR_DEPTH_121212:
+ component_depth = DP_SYM32_ENC_COMPONENT_DEPTH_12BPC;
+ misc0 = misc0 | 0x60; // MISC0[7:5] = 011
+ break;
+ default:
+ component_depth = DP_SYM32_ENC_COMPONENT_DEPTH_6BPC;
+ break;
+ }
+
+ REG_UPDATE_3(DP_SYM32_ENC_VID_PIXEL_FORMAT,
+ PIXEL_ENCODING_TYPE, compressed_format,
+ UNCOMPRESSED_PIXEL_ENCODING, pixel_encoding,
+ UNCOMPRESSED_COMPONENT_DEPTH, component_depth);
+
+ switch (output_color_space) {
+ case COLOR_SPACE_SRGB:
+ misc1 = misc1 & ~0x80; /* bit7 = 0*/
+ break;
+ case COLOR_SPACE_SRGB_LIMITED:
+ misc0 = misc0 | 0x8; /* bit3=1 */
+ misc1 = misc1 & ~0x80; /* bit7 = 0*/
+ break;
+ case COLOR_SPACE_YCBCR601:
+ case COLOR_SPACE_YCBCR601_LIMITED:
+ misc0 = misc0 | 0x8; /* bit3=1, bit4=0 */
+ misc1 = misc1 & ~0x80; /* bit7 = 0*/
+ if (hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR422)
+ misc0 = misc0 | 0x2; /* bit2=0, bit1=1 */
+ else if (hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR444)
+ misc0 = misc0 | 0x4; /* bit2=1, bit1=0 */
+ break;
+ case COLOR_SPACE_YCBCR709:
+ case COLOR_SPACE_YCBCR709_LIMITED:
+ misc0 = misc0 | 0x18; /* bit3=1, bit4=1 */
+ misc1 = misc1 & ~0x80; /* bit7 = 0*/
+ if (hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR422)
+ misc0 = misc0 | 0x2; /* bit2=0, bit1=1 */
+ else if (hw_crtc_timing.pixel_encoding == PIXEL_ENCODING_YCBCR444)
+ misc0 = misc0 | 0x4; /* bit2=1, bit1=0 */
+ break;
+ case COLOR_SPACE_2020_RGB_LIMITEDRANGE:
+ case COLOR_SPACE_2020_RGB_FULLRANGE:
+ case COLOR_SPACE_2020_YCBCR:
+ case COLOR_SPACE_XR_RGB:
+ case COLOR_SPACE_MSREF_SCRGB:
+ case COLOR_SPACE_ADOBERGB:
+ case COLOR_SPACE_DCIP3:
+ case COLOR_SPACE_XV_YCC_709:
+ case COLOR_SPACE_XV_YCC_601:
+ case COLOR_SPACE_DISPLAYNATIVE:
+ case COLOR_SPACE_DOLBYVISION:
+ case COLOR_SPACE_APPCTRL:
+ case COLOR_SPACE_CUSTOMPOINTS:
+ case COLOR_SPACE_UNKNOWN:
+ case COLOR_SPACE_YCBCR709_BLACK:
+ /* do nothing */
+ break;
+ }
+
+ /* calculate from vesa timing parameters
+ * h_active_start related to leading edge of sync
+ */
+ h_blank = hw_crtc_timing.h_total - hw_crtc_timing.h_border_left -
+ hw_crtc_timing.h_addressable - hw_crtc_timing.h_border_right;
+
+ h_back_porch = h_blank - hw_crtc_timing.h_front_porch -
+ hw_crtc_timing.h_sync_width;
+
+ /* start at beginning of left border */
+ h_active_start = hw_crtc_timing.h_sync_width + h_back_porch;
+
+ v_active_start = hw_crtc_timing.v_total - hw_crtc_timing.v_border_top -
+ hw_crtc_timing.v_addressable - hw_crtc_timing.v_border_bottom -
+ hw_crtc_timing.v_front_porch;
+
+ h_width = hw_crtc_timing.h_border_left + hw_crtc_timing.h_addressable + hw_crtc_timing.h_border_right;
+ v_height = hw_crtc_timing.v_border_top + hw_crtc_timing.v_addressable + hw_crtc_timing.v_border_bottom;
+ hsp = hw_crtc_timing.flags.HSYNC_POSITIVE_POLARITY ? 0x80 : 0;
+ vsp = hw_crtc_timing.flags.VSYNC_POSITIVE_POLARITY ? 0x80 : 0;
+ v_freq = hw_crtc_timing.pix_clk_100hz * 100;
+
+ /* MSA Packet Mapping to 32-bit Link Symbols - DP2 spec, section 2.7.4.1
+ *
+ * Lane 0 Lane 1 Lane 2 Lane 3
+ * MSA[0] = { 0, 0, 0, VFREQ[47:40]}
+ * MSA[1] = { 0, 0, 0, VFREQ[39:32]}
+ * MSA[2] = { 0, 0, 0, VFREQ[31:24]}
+ * MSA[3] = { HTotal[15:8], HStart[15:8], HWidth[15:8], VFREQ[23:16]}
+ * MSA[4] = { HTotal[ 7:0], HStart[ 7:0], HWidth[ 7:0], VFREQ[15: 8]}
+ * MSA[5] = { VTotal[15:8], VStart[15:8], VHeight[15:8], VFREQ[ 7: 0]}
+ * MSA[6] = { VTotal[ 7:0], VStart[ 7:0], VHeight[ 7:0], MISC0[ 7: 0]}
+ * MSA[7] = { HSP|HSW[14:8], VSP|VSW[14:8], 0, MISC1[ 7: 0]}
+ * MSA[8] = { HSW[ 7:0], VSW[ 7:0], 0, 0}
+ */
+ REG_SET_4(DP_SYM32_ENC_VID_MSA0, 0,
+ MSA_DATA_LANE_0, 0,
+ MSA_DATA_LANE_1, 0,
+ MSA_DATA_LANE_2, 0,
+ MSA_DATA_LANE_3, v_freq >> 40);
+
+ REG_SET_4(DP_SYM32_ENC_VID_MSA1, 0,
+ MSA_DATA_LANE_0, 0,
+ MSA_DATA_LANE_1, 0,
+ MSA_DATA_LANE_2, 0,
+ MSA_DATA_LANE_3, (v_freq >> 32) & 0xff);
+
+ REG_SET_4(DP_SYM32_ENC_VID_MSA2, 0,
+ MSA_DATA_LANE_0, 0,
+ MSA_DATA_LANE_1, 0,
+ MSA_DATA_LANE_2, 0,
+ MSA_DATA_LANE_3, (v_freq >> 24) & 0xff);
+
+ REG_SET_4(DP_SYM32_ENC_VID_MSA3, 0,
+ MSA_DATA_LANE_0, hw_crtc_timing.h_total >> 8,
+ MSA_DATA_LANE_1, h_active_start >> 8,
+ MSA_DATA_LANE_2, h_width >> 8,
+ MSA_DATA_LANE_3, (v_freq >> 16) & 0xff);
+
+ REG_SET_4(DP_SYM32_ENC_VID_MSA4, 0,
+ MSA_DATA_LANE_0, hw_crtc_timing.h_total & 0xff,
+ MSA_DATA_LANE_1, h_active_start & 0xff,
+ MSA_DATA_LANE_2, h_width & 0xff,
+ MSA_DATA_LANE_3, (v_freq >> 8) & 0xff);
+
+ REG_SET_4(DP_SYM32_ENC_VID_MSA5, 0,
+ MSA_DATA_LANE_0, hw_crtc_timing.v_total >> 8,
+ MSA_DATA_LANE_1, v_active_start >> 8,
+ MSA_DATA_LANE_2, v_height >> 8,
+ MSA_DATA_LANE_3, v_freq & 0xff);
+
+ REG_SET_4(DP_SYM32_ENC_VID_MSA6, 0,
+ MSA_DATA_LANE_0, hw_crtc_timing.v_total & 0xff,
+ MSA_DATA_LANE_1, v_active_start & 0xff,
+ MSA_DATA_LANE_2, v_height & 0xff,
+ MSA_DATA_LANE_3, misc0);
+
+ REG_SET_4(DP_SYM32_ENC_VID_MSA7, 0,
+ MSA_DATA_LANE_0, hsp | (hw_crtc_timing.h_sync_width >> 8),
+ MSA_DATA_LANE_1, vsp | (hw_crtc_timing.v_sync_width >> 8),
+ MSA_DATA_LANE_2, 0,
+ MSA_DATA_LANE_3, misc1);
+
+ REG_SET_4(DP_SYM32_ENC_VID_MSA8, 0,
+ MSA_DATA_LANE_0, hw_crtc_timing.h_sync_width & 0xff,
+ MSA_DATA_LANE_1, hw_crtc_timing.v_sync_width & 0xff,
+ MSA_DATA_LANE_2, 0,
+ MSA_DATA_LANE_3, 0);
+}
+
+static void dcn31_hpo_dp_stream_enc_update_dp_info_packets(
+ struct hpo_dp_stream_encoder *enc,
+ const struct encoder_info_frame *info_frame)
+{
+ struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc);
+ uint32_t dmdata_packet_enabled = 0;
+ bool sdp_stream_enable = false;
+
+ if (info_frame->vsc.valid) {
+ enc->vpg->funcs->update_generic_info_packet(
+ enc->vpg,
+ 0, /* packetIndex */
+ &info_frame->vsc,
+ true);
+ sdp_stream_enable = true;
+ }
+ if (info_frame->spd.valid) {
+ enc->vpg->funcs->update_generic_info_packet(
+ enc->vpg,
+ 2, /* packetIndex */
+ &info_frame->spd,
+ true);
+ sdp_stream_enable = true;
+ }
+ if (info_frame->hdrsmd.valid) {
+ enc->vpg->funcs->update_generic_info_packet(
+ enc->vpg,
+ 3, /* packetIndex */
+ &info_frame->hdrsmd,
+ true);
+ sdp_stream_enable = true;
+ }
+ /* enable/disable transmission of packet(s).
+ * If enabled, packet transmission begins on the next frame
+ */
+ REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL0, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, info_frame->vsc.valid);
+ REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL2, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, info_frame->spd.valid);
+ REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL3, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, info_frame->hdrsmd.valid);
+
+ /* check if dynamic metadata packet transmission is enabled */
+ REG_GET(DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL,
+ METADATA_PACKET_ENABLE, &dmdata_packet_enabled);
+
+ /* Enable secondary data path */
+ REG_UPDATE(DP_SYM32_ENC_SDP_CONTROL,
+ SDP_STREAM_ENABLE, 1);
+}
+
+static void dcn31_hpo_dp_stream_enc_stop_dp_info_packets(
+ struct hpo_dp_stream_encoder *enc)
+{
+ /* stop generic packets on DP */
+ struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc);
+ uint32_t asp_enable = 0;
+ uint32_t atp_enable = 0;
+ uint32_t aip_enable = 0;
+ uint32_t acm_enable = 0;
+
+ REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL0, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, 0);
+ REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL2, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, 0);
+ REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL3, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, 0);
+
+ /* Disable secondary data path if audio is also disabled */
+ REG_GET_4(DP_SYM32_ENC_SDP_AUDIO_CONTROL0,
+ ASP_ENABLE, &asp_enable,
+ ATP_ENABLE, &atp_enable,
+ AIP_ENABLE, &aip_enable,
+ ACM_ENABLE, &acm_enable);
+ if (!(asp_enable || atp_enable || aip_enable || acm_enable))
+ REG_UPDATE(DP_SYM32_ENC_SDP_CONTROL,
+ SDP_STREAM_ENABLE, 0);
+}
+
+static uint32_t hpo_dp_is_gsp_enabled(
+ struct hpo_dp_stream_encoder *enc)
+{
+ struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc);
+ uint32_t gsp0_enabled = 0;
+ uint32_t gsp2_enabled = 0;
+ uint32_t gsp3_enabled = 0;
+ uint32_t gsp11_enabled = 0;
+
+ REG_GET(DP_SYM32_ENC_SDP_GSP_CONTROL0, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, &gsp0_enabled);
+ REG_GET(DP_SYM32_ENC_SDP_GSP_CONTROL2, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, &gsp2_enabled);
+ REG_GET(DP_SYM32_ENC_SDP_GSP_CONTROL3, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, &gsp3_enabled);
+ REG_GET(DP_SYM32_ENC_SDP_GSP_CONTROL11, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, &gsp11_enabled);
+
+ return (gsp0_enabled || gsp2_enabled || gsp3_enabled || gsp11_enabled);
+}
+
+static void dcn31_hpo_dp_stream_enc_set_dsc_pps_info_packet(
+ struct hpo_dp_stream_encoder *enc,
+ bool enable,
+ uint8_t *dsc_packed_pps,
+ bool immediate_update)
+{
+ struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc);
+
+ if (enable) {
+ struct dc_info_packet pps_sdp;
+ int i;
+
+ /* Configure for PPS packet size (128 bytes) */
+ REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL11,
+ GSP_PAYLOAD_SIZE, 3);
+
+ /* Load PPS into infoframe (SDP) registers */
+ pps_sdp.valid = true;
+ pps_sdp.hb0 = 0;
+ pps_sdp.hb1 = DC_DP_INFOFRAME_TYPE_PPS;
+ pps_sdp.hb2 = 127;
+ pps_sdp.hb3 = 0;
+
+ for (i = 0; i < 4; i++) {
+ memcpy(pps_sdp.sb, &dsc_packed_pps[i * 32], 32);
+ enc3->base.vpg->funcs->update_generic_info_packet(
+ enc3->base.vpg,
+ 11 + i,
+ &pps_sdp,
+ immediate_update);
+ }
+
+ /* SW should make sure VBID[6] update line number is bigger
+ * than PPS transmit line number
+ */
+ REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL11,
+ GSP_TRANSMISSION_LINE_NUMBER, 2);
+
+ REG_UPDATE_2(DP_SYM32_ENC_VID_VBID_CONTROL,
+ VBID_6_COMPRESSEDSTREAM_FLAG_SOF_REFERENCE, 0,
+ VBID_6_COMPRESSEDSTREAM_FLAG_LINE_NUMBER, 3);
+
+ /* Send PPS data at the line number specified above. */
+ REG_UPDATE(DP_SYM32_ENC_SDP_GSP_CONTROL11,
+ GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, 1);
+ REG_UPDATE(DP_SYM32_ENC_SDP_CONTROL,
+ SDP_STREAM_ENABLE, 1);
+ } else {
+ /* Disable Generic Stream Packet 11 (GSP) transmission */
+ REG_UPDATE_2(DP_SYM32_ENC_SDP_GSP_CONTROL11,
+ GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, 0,
+ GSP_PAYLOAD_SIZE, 0);
+ }
+}
+
+static void dcn31_hpo_dp_stream_enc_map_stream_to_link(
+ struct hpo_dp_stream_encoder *enc,
+ uint32_t stream_enc_inst,
+ uint32_t link_enc_inst)
+{
+ struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc);
+
+ ASSERT(stream_enc_inst < 4 && link_enc_inst < 2);
+
+ switch (stream_enc_inst) {
+ case 0:
+ REG_UPDATE(DP_STREAM_MAPPER_CONTROL0,
+ DP_STREAM_LINK_TARGET, link_enc_inst);
+ break;
+ case 1:
+ REG_UPDATE(DP_STREAM_MAPPER_CONTROL1,
+ DP_STREAM_LINK_TARGET, link_enc_inst);
+ break;
+ case 2:
+ REG_UPDATE(DP_STREAM_MAPPER_CONTROL2,
+ DP_STREAM_LINK_TARGET, link_enc_inst);
+ break;
+ case 3:
+ REG_UPDATE(DP_STREAM_MAPPER_CONTROL3,
+ DP_STREAM_LINK_TARGET, link_enc_inst);
+ break;
+ }
+}
+
+static void dcn31_hpo_dp_stream_enc_mute_control(
+ struct hpo_dp_stream_encoder *enc,
+ bool mute)
+{
+ ASSERT(enc->apg);
+ enc->apg->funcs->audio_mute_control(enc->apg, mute);
+}
+
+static void dcn31_hpo_dp_stream_enc_audio_setup(
+ struct hpo_dp_stream_encoder *enc,
+ unsigned int az_inst,
+ struct audio_info *info)
+{
+ struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc);
+
+ /* Set the input mux for video stream source */
+ REG_UPDATE(DP_STREAM_ENC_AUDIO_CONTROL,
+ DP_STREAM_ENC_INPUT_MUX_AUDIO_STREAM_SOURCE_SEL, az_inst);
+
+ ASSERT(enc->apg);
+ enc->apg->funcs->se_audio_setup(enc->apg, az_inst, info);
+}
+
+static void dcn31_hpo_dp_stream_enc_audio_enable(
+ struct hpo_dp_stream_encoder *enc)
+{
+ struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc);
+
+ /* Enable Audio packets */
+ REG_UPDATE(DP_SYM32_ENC_SDP_AUDIO_CONTROL0, ASP_ENABLE, 1);
+
+ /* Program the ATP and AIP next */
+ REG_UPDATE_2(DP_SYM32_ENC_SDP_AUDIO_CONTROL0,
+ ATP_ENABLE, 1,
+ AIP_ENABLE, 1);
+
+ /* Enable secondary data path */
+ REG_UPDATE(DP_SYM32_ENC_SDP_CONTROL,
+ SDP_STREAM_ENABLE, 1);
+
+ /* Enable APG block */
+ enc->apg->funcs->enable_apg(enc->apg);
+}
+
+static void dcn31_hpo_dp_stream_enc_audio_disable(
+ struct hpo_dp_stream_encoder *enc)
+{
+ struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc);
+
+ /* Disable Audio packets */
+ REG_UPDATE_4(DP_SYM32_ENC_SDP_AUDIO_CONTROL0,
+ ASP_ENABLE, 0,
+ ATP_ENABLE, 0,
+ AIP_ENABLE, 0,
+ ACM_ENABLE, 0);
+
+ /* Disable STP Stream Enable if other SDP GSP are also disabled */
+ if (!(hpo_dp_is_gsp_enabled(enc)))
+ REG_UPDATE(DP_SYM32_ENC_SDP_CONTROL,
+ SDP_STREAM_ENABLE, 0);
+
+ /* Disable APG block */
+ enc->apg->funcs->disable_apg(enc->apg);
+}
+
+static void dcn31_hpo_dp_stream_enc_read_state(
+ struct hpo_dp_stream_encoder *enc,
+ struct hpo_dp_stream_encoder_state *s)
+{
+ struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc);
+
+ REG_GET(DP_SYM32_ENC_CONTROL,
+ DP_SYM32_ENC_ENABLE, &s->stream_enc_enabled);
+ REG_GET(DP_SYM32_ENC_VID_STREAM_CONTROL,
+ VID_STREAM_ENABLE, &s->vid_stream_enabled);
+ REG_GET(DP_STREAM_ENC_INPUT_MUX_CONTROL,
+ DP_STREAM_ENC_INPUT_MUX_PIXEL_STREAM_SOURCE_SEL, &s->otg_inst);
+
+ REG_GET_3(DP_SYM32_ENC_VID_PIXEL_FORMAT,
+ PIXEL_ENCODING_TYPE, &s->compressed_format,
+ UNCOMPRESSED_PIXEL_ENCODING, &s->pixel_encoding,
+ UNCOMPRESSED_COMPONENT_DEPTH, &s->component_depth);
+
+ REG_GET(DP_SYM32_ENC_SDP_CONTROL,
+ SDP_STREAM_ENABLE, &s->sdp_enabled);
+
+ switch (enc->inst) {
+ case 0:
+ REG_GET(DP_STREAM_MAPPER_CONTROL0,
+ DP_STREAM_LINK_TARGET, &s->mapped_to_link_enc);
+ break;
+ case 1:
+ REG_GET(DP_STREAM_MAPPER_CONTROL1,
+ DP_STREAM_LINK_TARGET, &s->mapped_to_link_enc);
+ break;
+ case 2:
+ REG_GET(DP_STREAM_MAPPER_CONTROL2,
+ DP_STREAM_LINK_TARGET, &s->mapped_to_link_enc);
+ break;
+ case 3:
+ REG_GET(DP_STREAM_MAPPER_CONTROL3,
+ DP_STREAM_LINK_TARGET, &s->mapped_to_link_enc);
+ break;
+ }
+}
+
+static const struct hpo_dp_stream_encoder_funcs dcn30_str_enc_funcs = {
+ .enable_stream = dcn31_hpo_dp_stream_enc_enable_stream,
+ .dp_unblank = dcn31_hpo_dp_stream_enc_dp_unblank,
+ .dp_blank = dcn31_hpo_dp_stream_enc_dp_blank,
+ .disable = dcn31_hpo_dp_stream_enc_disable,
+ .set_stream_attribute = dcn31_hpo_dp_stream_enc_set_stream_attribute,
+ .update_dp_info_packets = dcn31_hpo_dp_stream_enc_update_dp_info_packets,
+ .stop_dp_info_packets = dcn31_hpo_dp_stream_enc_stop_dp_info_packets,
+ .dp_set_dsc_pps_info_packet = dcn31_hpo_dp_stream_enc_set_dsc_pps_info_packet,
+ .map_stream_to_link = dcn31_hpo_dp_stream_enc_map_stream_to_link,
+ .audio_mute_control = dcn31_hpo_dp_stream_enc_mute_control,
+ .dp_audio_setup = dcn31_hpo_dp_stream_enc_audio_setup,
+ .dp_audio_enable = dcn31_hpo_dp_stream_enc_audio_enable,
+ .dp_audio_disable = dcn31_hpo_dp_stream_enc_audio_disable,
+ .read_state = dcn31_hpo_dp_stream_enc_read_state,
+};
+
+void dcn31_hpo_dp_stream_encoder_construct(
+ struct dcn31_hpo_dp_stream_encoder *enc3,
+ struct dc_context *ctx,
+ struct dc_bios *bp,
+ uint32_t inst,
+ enum engine_id eng_id,
+ struct vpg *vpg,
+ struct apg *apg,
+ const struct dcn31_hpo_dp_stream_encoder_registers *regs,
+ const struct dcn31_hpo_dp_stream_encoder_shift *hpo_se_shift,
+ const struct dcn31_hpo_dp_stream_encoder_mask *hpo_se_mask)
+{
+ enc3->base.funcs = &dcn30_str_enc_funcs;
+ enc3->base.ctx = ctx;
+ enc3->base.inst = inst;
+ enc3->base.id = eng_id;
+ enc3->base.bp = bp;
+ enc3->base.vpg = vpg;
+ enc3->base.apg = apg;
+ enc3->regs = regs;
+ enc3->hpo_se_shift = hpo_se_shift;
+ enc3->hpo_se_mask = hpo_se_mask;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.h
new file mode 100644
index 000000000000..70b94fc25304
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.h
@@ -0,0 +1,241 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_DCN31_HPO_DP_STREAM_ENCODER_H__
+#define __DAL_DCN31_HPO_DP_STREAM_ENCODER_H__
+
+#include "dcn30/dcn30_vpg.h"
+#include "dcn31/dcn31_apg.h"
+#include "stream_encoder.h"
+
+
+#define DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(hpo_dp_stream_encoder)\
+ container_of(hpo_dp_stream_encoder, struct dcn31_hpo_dp_stream_encoder, base)
+
+
+/* Define MSA_DATA_LANE_[0-3] fields to make programming easier */
+#define DP_SYM32_ENC_VID_MSA__MSA_DATA_LANE_0__SHIFT 0x0
+#define DP_SYM32_ENC_VID_MSA__MSA_DATA_LANE_1__SHIFT 0x8
+#define DP_SYM32_ENC_VID_MSA__MSA_DATA_LANE_2__SHIFT 0x10
+#define DP_SYM32_ENC_VID_MSA__MSA_DATA_LANE_3__SHIFT 0x18
+#define DP_SYM32_ENC_VID_MSA__MSA_DATA_LANE_0_MASK 0x000000FFL
+#define DP_SYM32_ENC_VID_MSA__MSA_DATA_LANE_1_MASK 0x0000FF00L
+#define DP_SYM32_ENC_VID_MSA__MSA_DATA_LANE_2_MASK 0x00FF0000L
+#define DP_SYM32_ENC_VID_MSA__MSA_DATA_LANE_3_MASK 0xFF000000L
+
+
+#define DCN3_1_HPO_DP_STREAM_ENC_REG_LIST(id) \
+ SR(DP_STREAM_MAPPER_CONTROL0),\
+ SR(DP_STREAM_MAPPER_CONTROL1),\
+ SR(DP_STREAM_MAPPER_CONTROL2),\
+ SR(DP_STREAM_MAPPER_CONTROL3),\
+ SRI(DP_STREAM_ENC_CLOCK_CONTROL, DP_STREAM_ENC, id),\
+ SRI(DP_STREAM_ENC_INPUT_MUX_CONTROL, DP_STREAM_ENC, id),\
+ SRI(DP_STREAM_ENC_AUDIO_CONTROL, DP_STREAM_ENC, id),\
+ SRI(DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0, DP_STREAM_ENC, id),\
+ SRI(DP_SYM32_ENC_CONTROL, DP_SYM32_ENC, id),\
+ SRI(DP_SYM32_ENC_VID_PIXEL_FORMAT, DP_SYM32_ENC, id),\
+ SRI(DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL, DP_SYM32_ENC, id),\
+ SRI(DP_SYM32_ENC_VID_MSA0, DP_SYM32_ENC, id),\
+ SRI(DP_SYM32_ENC_VID_MSA1, DP_SYM32_ENC, id),\
+ SRI(DP_SYM32_ENC_VID_MSA2, DP_SYM32_ENC, id),\
+ SRI(DP_SYM32_ENC_VID_MSA3, DP_SYM32_ENC, id),\
+ SRI(DP_SYM32_ENC_VID_MSA4, DP_SYM32_ENC, id),\
+ SRI(DP_SYM32_ENC_VID_MSA5, DP_SYM32_ENC, id),\
+ SRI(DP_SYM32_ENC_VID_MSA6, DP_SYM32_ENC, id),\
+ SRI(DP_SYM32_ENC_VID_MSA7, DP_SYM32_ENC, id),\
+ SRI(DP_SYM32_ENC_VID_MSA8, DP_SYM32_ENC, id),\
+ SRI(DP_SYM32_ENC_VID_MSA_CONTROL, DP_SYM32_ENC, id),\
+ SRI(DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL, DP_SYM32_ENC, id),\
+ SRI(DP_SYM32_ENC_VID_FIFO_CONTROL, DP_SYM32_ENC, id),\
+ SRI(DP_SYM32_ENC_VID_STREAM_CONTROL, DP_SYM32_ENC, id),\
+ SRI(DP_SYM32_ENC_VID_VBID_CONTROL, DP_SYM32_ENC, id),\
+ SRI(DP_SYM32_ENC_SDP_CONTROL, DP_SYM32_ENC, id),\
+ SRI(DP_SYM32_ENC_SDP_GSP_CONTROL0, DP_SYM32_ENC, id),\
+ SRI(DP_SYM32_ENC_SDP_GSP_CONTROL2, DP_SYM32_ENC, id),\
+ SRI(DP_SYM32_ENC_SDP_GSP_CONTROL3, DP_SYM32_ENC, id),\
+ SRI(DP_SYM32_ENC_SDP_GSP_CONTROL5, DP_SYM32_ENC, id),\
+ SRI(DP_SYM32_ENC_SDP_GSP_CONTROL11, DP_SYM32_ENC, id),\
+ SRI(DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL, DP_SYM32_ENC, id),\
+ SRI(DP_SYM32_ENC_SDP_AUDIO_CONTROL0, DP_SYM32_ENC, id),\
+ SRI(DP_SYM32_ENC_VID_CRC_CONTROL, DP_SYM32_ENC, id)
+
+#define DCN3_1_HPO_DP_STREAM_ENC_REGS \
+ uint32_t DP_STREAM_MAPPER_CONTROL0;\
+ uint32_t DP_STREAM_MAPPER_CONTROL1;\
+ uint32_t DP_STREAM_MAPPER_CONTROL2;\
+ uint32_t DP_STREAM_MAPPER_CONTROL3;\
+ uint32_t DP_STREAM_ENC_CLOCK_CONTROL;\
+ uint32_t DP_STREAM_ENC_INPUT_MUX_CONTROL;\
+ uint32_t DP_STREAM_ENC_AUDIO_CONTROL;\
+ uint32_t DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0;\
+ uint32_t DP_SYM32_ENC_CONTROL;\
+ uint32_t DP_SYM32_ENC_VID_PIXEL_FORMAT;\
+ uint32_t DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL;\
+ uint32_t DP_SYM32_ENC_VID_MSA0;\
+ uint32_t DP_SYM32_ENC_VID_MSA1;\
+ uint32_t DP_SYM32_ENC_VID_MSA2;\
+ uint32_t DP_SYM32_ENC_VID_MSA3;\
+ uint32_t DP_SYM32_ENC_VID_MSA4;\
+ uint32_t DP_SYM32_ENC_VID_MSA5;\
+ uint32_t DP_SYM32_ENC_VID_MSA6;\
+ uint32_t DP_SYM32_ENC_VID_MSA7;\
+ uint32_t DP_SYM32_ENC_VID_MSA8;\
+ uint32_t DP_SYM32_ENC_VID_MSA_CONTROL;\
+ uint32_t DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL;\
+ uint32_t DP_SYM32_ENC_VID_FIFO_CONTROL;\
+ uint32_t DP_SYM32_ENC_VID_STREAM_CONTROL;\
+ uint32_t DP_SYM32_ENC_VID_VBID_CONTROL;\
+ uint32_t DP_SYM32_ENC_SDP_CONTROL;\
+ uint32_t DP_SYM32_ENC_SDP_GSP_CONTROL0;\
+ uint32_t DP_SYM32_ENC_SDP_GSP_CONTROL2;\
+ uint32_t DP_SYM32_ENC_SDP_GSP_CONTROL3;\
+ uint32_t DP_SYM32_ENC_SDP_GSP_CONTROL5;\
+ uint32_t DP_SYM32_ENC_SDP_GSP_CONTROL11;\
+ uint32_t DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL;\
+ uint32_t DP_SYM32_ENC_SDP_AUDIO_CONTROL0;\
+ uint32_t DP_SYM32_ENC_VID_CRC_CONTROL
+
+
+#define DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(mask_sh)\
+ SE_SF(DP_STREAM_MAPPER_CONTROL0, DP_STREAM_LINK_TARGET, mask_sh),\
+ SE_SF(DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_CONTROL, DP_STREAM_ENC_CLOCK_EN, mask_sh),\
+ SE_SF(DP_STREAM_ENC0_DP_STREAM_ENC_INPUT_MUX_CONTROL, DP_STREAM_ENC_INPUT_MUX_PIXEL_STREAM_SOURCE_SEL, mask_sh),\
+ SE_SF(DP_STREAM_ENC0_DP_STREAM_ENC_AUDIO_CONTROL, DP_STREAM_ENC_INPUT_MUX_AUDIO_STREAM_SOURCE_SEL, mask_sh),\
+ SE_SF(DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0, FIFO_RESET, mask_sh),\
+ SE_SF(DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0, FIFO_RESET_DONE, mask_sh),\
+ SE_SF(DP_STREAM_ENC0_DP_STREAM_ENC_CLOCK_RAMP_ADJUSTER_FIFO_STATUS_CONTROL0, FIFO_ENABLE, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_CONTROL, DP_SYM32_ENC_RESET, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_CONTROL, DP_SYM32_ENC_RESET_DONE, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_CONTROL, DP_SYM32_ENC_ENABLE, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_PIXEL_FORMAT, PIXEL_ENCODING_TYPE, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_PIXEL_FORMAT, UNCOMPRESSED_PIXEL_ENCODING, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_PIXEL_FORMAT, UNCOMPRESSED_COMPONENT_DEPTH, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_PIXEL_FORMAT_DOUBLE_BUFFER_CONTROL, PIXEL_FORMAT_DOUBLE_BUFFER_ENABLE, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_MSA_DOUBLE_BUFFER_CONTROL, MSA_DOUBLE_BUFFER_ENABLE, mask_sh),\
+ SE_SF(DP_SYM32_ENC_VID_MSA, MSA_DATA_LANE_0, mask_sh),\
+ SE_SF(DP_SYM32_ENC_VID_MSA, MSA_DATA_LANE_1, mask_sh),\
+ SE_SF(DP_SYM32_ENC_VID_MSA, MSA_DATA_LANE_2, mask_sh),\
+ SE_SF(DP_SYM32_ENC_VID_MSA, MSA_DATA_LANE_3, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_FIFO_CONTROL, PIXEL_TO_SYMBOL_FIFO_RESET, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_FIFO_CONTROL, PIXEL_TO_SYMBOL_FIFO_RESET_DONE, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_FIFO_CONTROL, PIXEL_TO_SYMBOL_FIFO_ENABLE, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_STREAM_CONTROL, VID_STREAM_ENABLE, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_STREAM_CONTROL, VID_STREAM_STATUS, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_VBID_CONTROL, VBID_6_COMPRESSEDSTREAM_FLAG_SOF_REFERENCE, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_VBID_CONTROL, VBID_6_COMPRESSEDSTREAM_FLAG_LINE_NUMBER, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_CONTROL, SDP_STREAM_ENABLE, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0, GSP_PAYLOAD_SIZE, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL0, GSP_TRANSMISSION_LINE_NUMBER, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5, GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5, GSP_TRANSMISSION_LINE_NUMBER, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_GSP_CONTROL5, GSP_SOF_REFERENCE, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL, METADATA_PACKET_ENABLE, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0, AUDIO_MUTE, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0, ASP_ENABLE, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0, ATP_ENABLE, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0, AIP_ENABLE, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_SDP_AUDIO_CONTROL0, ACM_ENABLE, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_CONTROL, CRC_ENABLE, mask_sh),\
+ SE_SF(DP_SYM32_ENC0_DP_SYM32_ENC_VID_CRC_CONTROL, CRC_CONT_MODE_ENABLE, mask_sh)
+
+
+#define DCN3_1_HPO_DP_STREAM_ENC_REG_FIELD_LIST(type) \
+ type DP_STREAM_LINK_TARGET;\
+ type DP_STREAM_ENC_CLOCK_EN;\
+ type DP_STREAM_ENC_INPUT_MUX_PIXEL_STREAM_SOURCE_SEL;\
+ type DP_STREAM_ENC_INPUT_MUX_AUDIO_STREAM_SOURCE_SEL;\
+ type FIFO_RESET;\
+ type FIFO_RESET_DONE;\
+ type FIFO_ENABLE;\
+ type DP_SYM32_ENC_RESET;\
+ type DP_SYM32_ENC_RESET_DONE;\
+ type DP_SYM32_ENC_ENABLE;\
+ type PIXEL_ENCODING_TYPE;\
+ type UNCOMPRESSED_PIXEL_ENCODING;\
+ type UNCOMPRESSED_COMPONENT_DEPTH;\
+ type PIXEL_FORMAT_DOUBLE_BUFFER_ENABLE;\
+ type MSA_DOUBLE_BUFFER_ENABLE;\
+ type MSA_DATA_LANE_0;\
+ type MSA_DATA_LANE_1;\
+ type MSA_DATA_LANE_2;\
+ type MSA_DATA_LANE_3;\
+ type PIXEL_TO_SYMBOL_FIFO_RESET;\
+ type PIXEL_TO_SYMBOL_FIFO_RESET_DONE;\
+ type PIXEL_TO_SYMBOL_FIFO_ENABLE;\
+ type VID_STREAM_ENABLE;\
+ type VID_STREAM_STATUS;\
+ type VBID_6_COMPRESSEDSTREAM_FLAG_SOF_REFERENCE;\
+ type VBID_6_COMPRESSEDSTREAM_FLAG_LINE_NUMBER;\
+ type SDP_STREAM_ENABLE;\
+ type AUDIO_MUTE;\
+ type ASP_ENABLE;\
+ type ATP_ENABLE;\
+ type AIP_ENABLE;\
+ type ACM_ENABLE;\
+ type GSP_VIDEO_CONTINUOUS_TRANSMISSION_ENABLE;\
+ type GSP_PAYLOAD_SIZE;\
+ type GSP_TRANSMISSION_LINE_NUMBER;\
+ type GSP_SOF_REFERENCE;\
+ type METADATA_PACKET_ENABLE;\
+ type CRC_ENABLE;\
+ type CRC_CONT_MODE_ENABLE
+
+
+struct dcn31_hpo_dp_stream_encoder_registers {
+ DCN3_1_HPO_DP_STREAM_ENC_REGS;
+};
+
+struct dcn31_hpo_dp_stream_encoder_shift {
+ DCN3_1_HPO_DP_STREAM_ENC_REG_FIELD_LIST(uint8_t);
+};
+
+struct dcn31_hpo_dp_stream_encoder_mask {
+ DCN3_1_HPO_DP_STREAM_ENC_REG_FIELD_LIST(uint32_t);
+};
+
+struct dcn31_hpo_dp_stream_encoder {
+ struct hpo_dp_stream_encoder base;
+ const struct dcn31_hpo_dp_stream_encoder_registers *regs;
+ const struct dcn31_hpo_dp_stream_encoder_shift *hpo_se_shift;
+ const struct dcn31_hpo_dp_stream_encoder_mask *hpo_se_mask;
+};
+
+
+void dcn31_hpo_dp_stream_encoder_construct(
+ struct dcn31_hpo_dp_stream_encoder *enc3,
+ struct dc_context *ctx,
+ struct dc_bios *bp,
+ uint32_t inst,
+ enum engine_id eng_id,
+ struct vpg *vpg,
+ struct apg *apg,
+ const struct dcn31_hpo_dp_stream_encoder_registers *regs,
+ const struct dcn31_hpo_dp_stream_encoder_shift *hpo_se_shift,
+ const struct dcn31_hpo_dp_stream_encoder_mask *hpo_se_mask);
+
+
+#endif // __DAL_DCN31_HPO_STREAM_ENCODER_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
index 3f2333ec67e2..d24ad7754d71 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c
@@ -48,6 +48,9 @@
#include "dc_link_dp.h"
#include "inc/link_dpcd.h"
#include "dcn10/dcn10_hw_sequencer.h"
+#include "inc/link_enc_cfg.h"
+#include "dcn30/dcn30_vpg.h"
+#include "dce/dce_i2c_hw.h"
#define DC_LOGGER_INIT(logger)
@@ -71,15 +74,10 @@ void dcn31_init_hw(struct dc *dc)
struct resource_pool *res_pool = dc->res_pool;
uint32_t backlight = MAX_BACKLIGHT_LEVEL;
int i, j;
- int edp_num;
if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
- // Initialize the dccg
- if (res_pool->dccg->funcs->dccg_init)
- res_pool->dccg->funcs->dccg_init(res_pool->dccg);
-
if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
REG_WRITE(REFCLK_CNTL, 0);
@@ -106,6 +104,9 @@ void dcn31_init_hw(struct dc *dc)
hws->funcs.bios_golden_init(dc);
hws->funcs.disable_vga(dc->hwseq);
}
+ // Initialize the dccg
+ if (res_pool->dccg->funcs->dccg_init)
+ res_pool->dccg->funcs->dccg_init(res_pool->dccg);
if (dc->debug.enable_mem_low_power.bits.dmcu) {
// Force ERAM to shutdown if DMCU is not enabled
@@ -125,6 +126,18 @@ void dcn31_init_hw(struct dc *dc)
REG_UPDATE(MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, 1);
}
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ if (dc->debug.enable_mem_low_power.bits.vpg && dc->res_pool->stream_enc[0]->vpg->funcs->vpg_powerdown) {
+ // Power down VPGs
+ for (i = 0; i < dc->res_pool->stream_enc_count; i++)
+ dc->res_pool->stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->stream_enc[i]->vpg);
+#if defined(CONFIG_DRM_AMD_DC_DP2_0)
+ for (i = 0; i < dc->res_pool->hpo_dp_stream_enc_count; i++)
+ dc->res_pool->hpo_dp_stream_enc[i]->vpg->funcs->vpg_powerdown(dc->res_pool->hpo_dp_stream_enc[i]->vpg);
+#endif
+ }
+#endif
+
if (dc->ctx->dc_bios->fw_info_valid) {
res_pool->ref_clocks.xtalin_clock_inKhz =
dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
@@ -157,6 +170,9 @@ void dcn31_init_hw(struct dc *dc)
*/
struct dc_link *link = dc->links[i];
+ if (link->ep_type != DISPLAY_ENDPOINT_PHY)
+ continue;
+
link->link_enc->funcs->hw_init(link->link_enc);
/* Check for enabled DIG to identify enabled display */
@@ -170,6 +186,10 @@ void dcn31_init_hw(struct dc *dc)
if (hws->funcs.dsc_pg_control != NULL)
hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false);
+ /* Enables outbox notifications for usb4 dpia */
+ if (dc->res_pool->usb4_dpia_count)
+ dmub_enable_outbox_notification(dc);
+
/* we want to turn off all dp displays before doing detection */
if (dc->config.power_down_display_on_boot) {
uint8_t dpcd_power_state = '\0';
@@ -184,7 +204,8 @@ void dcn31_init_hw(struct dc *dc)
&dpcd_power_state, sizeof(dpcd_power_state));
if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0) {
/* blank dp stream before power off receiver*/
- if (dc->links[i]->link_enc->funcs->get_dig_frontend) {
+ if (dc->links[i]->ep_type == DISPLAY_ENDPOINT_PHY &&
+ dc->links[i]->link_enc->funcs->get_dig_frontend) {
unsigned int fe;
fe = dc->links[i]->link_enc->funcs->get_dig_frontend(
@@ -194,7 +215,7 @@ void dcn31_init_hw(struct dc *dc)
for (j = 0; j < dc->res_pool->stream_enc_count; j++) {
if (fe == dc->res_pool->stream_enc[j]->id) {
- dc->res_pool->stream_enc[j]->funcs->dp_blank(
+ dc->res_pool->stream_enc[j]->funcs->dp_blank(dc->links[i],
dc->res_pool->stream_enc[j]);
break;
}
@@ -218,47 +239,6 @@ void dcn31_init_hw(struct dc *dc)
!dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
}
- /* In headless boot cases, DIG may be turned
- * on which causes HW/SW discrepancies.
- * To avoid this, power down hardware on boot
- * if DIG is turned on and seamless boot not enabled
- */
- if (dc->config.power_down_display_on_boot) {
- struct dc_link *edp_links[MAX_NUM_EDP];
- struct dc_link *edp_link;
- bool power_down = false;
-
- get_edp_links(dc, edp_links, &edp_num);
- if (edp_num) {
- for (i = 0; i < edp_num; i++) {
- edp_link = edp_links[i];
- if (edp_link->link_enc->funcs->is_dig_enabled &&
- edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
- dc->hwss.edp_backlight_control &&
- dc->hwss.power_down &&
- dc->hwss.edp_power_control) {
- dc->hwss.edp_backlight_control(edp_link, false);
- dc->hwss.power_down(dc);
- dc->hwss.edp_power_control(edp_link, false);
- power_down = true;
- }
- }
- }
- if (!power_down) {
- for (i = 0; i < dc->link_count; i++) {
- struct dc_link *link = dc->links[i];
-
- if (link->link_enc->funcs->is_dig_enabled &&
- link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
- dc->hwss.power_down) {
- dc->hwss.power_down(dc);
- break;
- }
-
- }
- }
- }
-
for (i = 0; i < res_pool->audio_count; i++) {
struct audio *audio = res_pool->audios[i];
@@ -280,6 +260,10 @@ void dcn31_init_hw(struct dc *dc)
/* power AFMT HDMI memory TODO: may move to dis/en output save power*/
REG_WRITE(DIO_MEM_PWR_CTRL, 0);
+ // Set i2c to light sleep until engine is setup
+ if (dc->debug.enable_mem_low_power.bits.i2c)
+ REG_UPDATE(DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, 1);
+
if (!dc->debug.disable_clock_gate) {
/* enable all DCN clock gating */
REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
@@ -303,8 +287,10 @@ void dcn31_init_hw(struct dc *dc)
if (dc->res_pool->hubbub->funcs->force_pstate_change_control)
dc->res_pool->hubbub->funcs->force_pstate_change_control(
dc->res_pool->hubbub, false, false);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
if (dc->res_pool->hubbub->funcs->init_crb)
dc->res_pool->hubbub->funcs->init_crb(dc->res_pool->hubbub);
+#endif
}
void dcn31_dsc_pg_control(
@@ -319,6 +305,12 @@ void dcn31_dsc_pg_control(
if (hws->ctx->dc->debug.disable_dsc_power_gate)
return;
+ if (hws->ctx->dc->debug.root_clock_optimization.bits.dsc &&
+ hws->ctx->dc->res_pool->dccg->funcs->enable_dsc &&
+ power_on)
+ hws->ctx->dc->res_pool->dccg->funcs->enable_dsc(
+ hws->ctx->dc->res_pool->dccg, dsc_inst);
+
REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
if (org_ip_request_cntl == 0)
REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1);
@@ -355,6 +347,13 @@ void dcn31_dsc_pg_control(
if (org_ip_request_cntl == 0)
REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0);
+
+ if (hws->ctx->dc->debug.root_clock_optimization.bits.dsc) {
+ if (hws->ctx->dc->res_pool->dccg->funcs->disable_dsc && !power_on)
+ hws->ctx->dc->res_pool->dccg->funcs->disable_dsc(
+ hws->ctx->dc->res_pool->dccg, dsc_inst);
+ }
+
}
@@ -420,7 +419,7 @@ void dcn31_z10_save_init(struct dc *dc)
dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
}
-void dcn31_z10_restore(struct dc *dc)
+void dcn31_z10_restore(const struct dc *dc)
{
union dmub_rb_cmd cmd;
@@ -594,19 +593,7 @@ void dcn31_reset_hw_ctx_wrap(
old_clk->funcs->cs_power_down(old_clk);
}
}
-}
-
-bool dcn31_is_abm_supported(struct dc *dc,
- struct dc_state *context, struct dc_stream_state *stream)
-{
- int i;
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
-
- if (pipe_ctx->stream == stream &&
- (pipe_ctx->prev_odm_pipe == NULL && pipe_ctx->next_odm_pipe == NULL))
- return true;
- }
- return false;
+ /* New dc_state in the process of being applied to hardware. */
+ dc->current_state->res_ctx.link_enc_cfg_ctx.mode = LINK_ENC_CFG_TRANSIENT;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h
index 140435e4f7ff..7ae45dd202d9 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.h
@@ -43,7 +43,7 @@ void dcn31_enable_power_gating_plane(
void dcn31_update_info_frame(struct pipe_ctx *pipe_ctx);
-void dcn31_z10_restore(struct dc *dc);
+void dcn31_z10_restore(const struct dc *dc);
void dcn31_z10_save_init(struct dc *dc);
void dcn31_hubp_pg_control(struct dce_hwseq *hws, unsigned int hubp_inst, bool power_on);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
index 40011cd3c8ef..c6a737781ad1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_init.c
@@ -34,6 +34,7 @@
static const struct hw_sequencer_funcs dcn31_funcs = {
.program_gamut_remap = dcn10_program_gamut_remap,
.init_hw = dcn31_init_hw,
+ .power_down_on_boot = dcn10_power_down_on_boot,
.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
.apply_ctx_for_surface = NULL,
.program_front_end_for_ctx = dcn20_program_front_end_for_ctx,
@@ -93,12 +94,12 @@ static const struct hw_sequencer_funcs dcn31_funcs = {
.set_flip_control_gsl = dcn20_set_flip_control_gsl,
.get_vupdate_offset_from_vsync = dcn10_get_vupdate_offset_from_vsync,
.calc_vupdate_position = dcn10_calc_vupdate_position,
+ .power_down = dce110_power_down,
.set_backlight_level = dcn21_set_backlight_level,
.set_abm_immediate_disable = dcn21_set_abm_immediate_disable,
.set_pipe = dcn21_set_pipe,
.z10_restore = dcn31_z10_restore,
.z10_save_init = dcn31_z10_save_init,
- .is_abm_supported = dcn31_is_abm_supported,
.set_disp_pattern_generator = dcn30_set_disp_pattern_generator,
.update_visual_confirm_color = dcn20_update_visual_confirm_color,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
index 0006bbac466c..87b2c2428842 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c
@@ -52,7 +52,12 @@
#include "dcn30/dcn30_vpg.h"
#include "dcn30/dcn30_afmt.h"
#include "dcn30/dcn30_dio_stream_encoder.h"
+#include "dcn31/dcn31_hpo_dp_stream_encoder.h"
+#include "dcn31/dcn31_hpo_dp_link_encoder.h"
+#include "dcn31/dcn31_apg.h"
#include "dcn31/dcn31_dio_link_encoder.h"
+#include "dcn31/dcn31_vpg.h"
+#include "dcn31/dcn31_afmt.h"
#include "dce/dce_clock_source.h"
#include "dce/dce_audio.h"
#include "dce/dce_hwseq.h"
@@ -96,8 +101,6 @@
#include "link_enc_cfg.h"
#define DC_LOGGER_INIT(logger)
-#define fixed16_to_double(x) (((double) x) / ((double) (1 << 16)))
-#define fixed16_to_double_to_cpu(x) fixed16_to_double(le32_to_cpu(x))
#define DCN3_1_DEFAULT_DET_SIZE 384
@@ -217,8 +220,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_1_soc = {
.num_states = 5,
.sr_exit_time_us = 9.0,
.sr_enter_plus_exit_time_us = 11.0,
- .sr_exit_z8_time_us = 402.0,
- .sr_enter_plus_exit_z8_time_us = 520.0,
+ .sr_exit_z8_time_us = 442.0,
+ .sr_enter_plus_exit_z8_time_us = 560.0,
.writeback_latency_us = 12.0,
.dram_channel_width_bytes = 4,
.round_trip_ping_latency_dcfclk_cycles = 106,
@@ -363,7 +366,7 @@ static const struct dce110_clk_src_mask cs_mask = {
#define abm_regs(id)\
[id] = {\
- ABM_DCN301_REG_LIST(id)\
+ ABM_DCN302_REG_LIST(id)\
}
static const struct dce_abm_registers abm_regs[] = {
@@ -411,10 +414,10 @@ static const struct dce_audio_mask audio_mask = {
#define vpg_regs(id)\
[id] = {\
- VPG_DCN3_REG_LIST(id)\
+ VPG_DCN31_REG_LIST(id)\
}
-static const struct dcn30_vpg_registers vpg_regs[] = {
+static const struct dcn31_vpg_registers vpg_regs[] = {
vpg_regs(0),
vpg_regs(1),
vpg_regs(2),
@@ -427,20 +430,20 @@ static const struct dcn30_vpg_registers vpg_regs[] = {
vpg_regs(9),
};
-static const struct dcn30_vpg_shift vpg_shift = {
- DCN3_VPG_MASK_SH_LIST(__SHIFT)
+static const struct dcn31_vpg_shift vpg_shift = {
+ DCN31_VPG_MASK_SH_LIST(__SHIFT)
};
-static const struct dcn30_vpg_mask vpg_mask = {
- DCN3_VPG_MASK_SH_LIST(_MASK)
+static const struct dcn31_vpg_mask vpg_mask = {
+ DCN31_VPG_MASK_SH_LIST(_MASK)
};
#define afmt_regs(id)\
[id] = {\
- AFMT_DCN3_REG_LIST(id)\
+ AFMT_DCN31_REG_LIST(id)\
}
-static const struct dcn30_afmt_registers afmt_regs[] = {
+static const struct dcn31_afmt_registers afmt_regs[] = {
afmt_regs(0),
afmt_regs(1),
afmt_regs(2),
@@ -449,12 +452,32 @@ static const struct dcn30_afmt_registers afmt_regs[] = {
afmt_regs(5)
};
-static const struct dcn30_afmt_shift afmt_shift = {
- DCN3_AFMT_MASK_SH_LIST(__SHIFT)
+static const struct dcn31_afmt_shift afmt_shift = {
+ DCN31_AFMT_MASK_SH_LIST(__SHIFT)
};
-static const struct dcn30_afmt_mask afmt_mask = {
- DCN3_AFMT_MASK_SH_LIST(_MASK)
+static const struct dcn31_afmt_mask afmt_mask = {
+ DCN31_AFMT_MASK_SH_LIST(_MASK)
+};
+
+#define apg_regs(id)\
+[id] = {\
+ APG_DCN31_REG_LIST(id)\
+}
+
+static const struct dcn31_apg_registers apg_regs[] = {
+ apg_regs(0),
+ apg_regs(1),
+ apg_regs(2),
+ apg_regs(3)
+};
+
+static const struct dcn31_apg_shift apg_shift = {
+ DCN31_APG_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dcn31_apg_mask apg_mask = {
+ DCN31_APG_MASK_SH_LIST(_MASK)
};
#define stream_enc_regs(id)\
@@ -538,6 +561,49 @@ static const struct dcn10_link_enc_mask le_mask = {
DPCS_DCN31_MASK_SH_LIST(_MASK)
};
+#define hpo_dp_stream_encoder_reg_list(id)\
+[id] = {\
+ DCN3_1_HPO_DP_STREAM_ENC_REG_LIST(id)\
+}
+
+static const struct dcn31_hpo_dp_stream_encoder_registers hpo_dp_stream_enc_regs[] = {
+ hpo_dp_stream_encoder_reg_list(0),
+ hpo_dp_stream_encoder_reg_list(1),
+ hpo_dp_stream_encoder_reg_list(2),
+ hpo_dp_stream_encoder_reg_list(3),
+};
+
+static const struct dcn31_hpo_dp_stream_encoder_shift hpo_dp_se_shift = {
+ DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dcn31_hpo_dp_stream_encoder_mask hpo_dp_se_mask = {
+ DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(_MASK)
+};
+
+#define hpo_dp_link_encoder_reg_list(id)\
+[id] = {\
+ DCN3_1_HPO_DP_LINK_ENC_REG_LIST(id),\
+ DCN3_1_RDPCSTX_REG_LIST(0),\
+ DCN3_1_RDPCSTX_REG_LIST(1),\
+ DCN3_1_RDPCSTX_REG_LIST(2),\
+ DCN3_1_RDPCSTX_REG_LIST(3),\
+ DCN3_1_RDPCSTX_REG_LIST(4)\
+}
+
+static const struct dcn31_hpo_dp_link_encoder_registers hpo_dp_link_enc_regs[] = {
+ hpo_dp_link_encoder_reg_list(0),
+ hpo_dp_link_encoder_reg_list(1),
+};
+
+static const struct dcn31_hpo_dp_link_encoder_shift hpo_dp_le_shift = {
+ DCN3_1_HPO_DP_LINK_ENC_MASK_SH_LIST(__SHIFT)
+};
+
+static const struct dcn31_hpo_dp_link_encoder_mask hpo_dp_le_mask = {
+ DCN3_1_HPO_DP_LINK_ENC_MASK_SH_LIST(_MASK)
+};
+
#define dpp_regs(id)\
[id] = {\
DPP_REG_LIST_DCN30(id),\
@@ -831,7 +897,8 @@ static const struct dce_hwseq_registers hwseq_reg = {
HWS_SF(, DMU_MEM_PWR_CNTL, DMCU_ERAM_MEM_PWR_FORCE, mask_sh), \
HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_UNASSIGNED_PWR_MODE, mask_sh), \
HWS_SF(, ODM_MEM_PWR_CTRL3, ODM_MEM_VBLANK_PWR_MODE, mask_sh), \
- HWS_SF(, MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, mask_sh)
+ HWS_SF(, MMHUBBUB_MEM_PWR_CNTL, VGA_MEM_PWR_FORCE, mask_sh), \
+ HWS_SF(, DIO_MEM_PWR_CTRL, I2C_LIGHT_SLEEP_FORCE, mask_sh)
static const struct dce_hwseq_shift hwseq_shift = {
HWSEQ_DCN31_MASK_SH_LIST(__SHIFT)
@@ -879,6 +946,8 @@ static const struct resource_caps res_cap_dcn31 = {
.num_audio = 5,
.num_stream_encoder = 5,
.num_dig_link_enc = 5,
+ .num_hpo_dp_stream_encoder = 4,
+ .num_hpo_dp_link_encoder = 2,
.num_pll = 5,
.num_dwb = 1,
.num_ddc = 5,
@@ -928,7 +997,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.disable_dcc = DCC_ENABLE,
.vsr_support = true,
.performance_trace = false,
- .max_downscale_src_width = 3840,/*upto 4K*/
+ .max_downscale_src_width = 4096,/*upto true 4K*/
.disable_pplib_wm_range = false,
.scl_reset_length10 = true,
.sanity_checks = false,
@@ -939,13 +1008,15 @@ static const struct dc_debug_options debug_defaults_drv = {
.use_max_lb = true,
.enable_mem_low_power = {
.bits = {
- .vga = false,
- .i2c = false,
+ .vga = true,
+ .i2c = true,
.dmcu = false, // This is previously known to cause hang on S3 cycles if enabled
- .dscl = false,
- .cm = false,
- .mpc = false,
- .optc = false,
+ .dscl = true,
+ .cm = true,
+ .mpc = true,
+ .optc = true,
+ .vpg = true,
+ .afmt = true,
}
},
.optimize_edp_link_rate = true,
@@ -1230,34 +1301,53 @@ static struct vpg *dcn31_vpg_create(
struct dc_context *ctx,
uint32_t inst)
{
- struct dcn30_vpg *vpg3 = kzalloc(sizeof(struct dcn30_vpg), GFP_KERNEL);
+ struct dcn31_vpg *vpg31 = kzalloc(sizeof(struct dcn31_vpg), GFP_KERNEL);
- if (!vpg3)
+ if (!vpg31)
return NULL;
- vpg3_construct(vpg3, ctx, inst,
+ vpg31_construct(vpg31, ctx, inst,
&vpg_regs[inst],
&vpg_shift,
&vpg_mask);
- return &vpg3->base;
+ return &vpg31->base;
}
static struct afmt *dcn31_afmt_create(
struct dc_context *ctx,
uint32_t inst)
{
- struct dcn30_afmt *afmt3 = kzalloc(sizeof(struct dcn30_afmt), GFP_KERNEL);
+ struct dcn31_afmt *afmt31 = kzalloc(sizeof(struct dcn31_afmt), GFP_KERNEL);
- if (!afmt3)
+ if (!afmt31)
return NULL;
- afmt3_construct(afmt3, ctx, inst,
+ afmt31_construct(afmt31, ctx, inst,
&afmt_regs[inst],
&afmt_shift,
&afmt_mask);
- return &afmt3->base;
+ // Light sleep by default, no need to power down here
+
+ return &afmt31->base;
+}
+
+static struct apg *dcn31_apg_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct dcn31_apg *apg31 = kzalloc(sizeof(struct dcn31_apg), GFP_KERNEL);
+
+ if (!apg31)
+ return NULL;
+
+ apg31_construct(apg31, ctx, inst,
+ &apg_regs[inst],
+ &apg_shift,
+ &apg_mask);
+
+ return &apg31->base;
}
static struct stream_encoder *dcn31_stream_encoder_create(
@@ -1281,8 +1371,12 @@ static struct stream_encoder *dcn31_stream_encoder_create(
vpg = dcn31_vpg_create(ctx, vpg_inst);
afmt = dcn31_afmt_create(ctx, afmt_inst);
- if (!enc1 || !vpg || !afmt)
+ if (!enc1 || !vpg || !afmt) {
+ kfree(enc1);
+ kfree(vpg);
+ kfree(afmt);
return NULL;
+ }
if (ctx->asic_id.chip_family == FAMILY_YELLOW_CARP &&
ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0) {
@@ -1298,6 +1392,72 @@ static struct stream_encoder *dcn31_stream_encoder_create(
return &enc1->base;
}
+static struct hpo_dp_stream_encoder *dcn31_hpo_dp_stream_encoder_create(
+ enum engine_id eng_id,
+ struct dc_context *ctx)
+{
+ struct dcn31_hpo_dp_stream_encoder *hpo_dp_enc31;
+ struct vpg *vpg;
+ struct apg *apg;
+ uint32_t hpo_dp_inst;
+ uint32_t vpg_inst;
+ uint32_t apg_inst;
+
+ ASSERT((eng_id >= ENGINE_ID_HPO_DP_0) && (eng_id <= ENGINE_ID_HPO_DP_3));
+ hpo_dp_inst = eng_id - ENGINE_ID_HPO_DP_0;
+
+ /* Mapping of VPG register blocks to HPO DP block instance:
+ * VPG[6] -> HPO_DP[0]
+ * VPG[7] -> HPO_DP[1]
+ * VPG[8] -> HPO_DP[2]
+ * VPG[9] -> HPO_DP[3]
+ */
+ vpg_inst = hpo_dp_inst + 6;
+
+ /* Mapping of APG register blocks to HPO DP block instance:
+ * APG[0] -> HPO_DP[0]
+ * APG[1] -> HPO_DP[1]
+ * APG[2] -> HPO_DP[2]
+ * APG[3] -> HPO_DP[3]
+ */
+ apg_inst = hpo_dp_inst;
+
+ /* allocate HPO stream encoder and create VPG sub-block */
+ hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_stream_encoder), GFP_KERNEL);
+ vpg = dcn31_vpg_create(ctx, vpg_inst);
+ apg = dcn31_apg_create(ctx, apg_inst);
+
+ if (!hpo_dp_enc31 || !vpg || !apg) {
+ kfree(hpo_dp_enc31);
+ kfree(vpg);
+ kfree(apg);
+ return NULL;
+ }
+
+ dcn31_hpo_dp_stream_encoder_construct(hpo_dp_enc31, ctx, ctx->dc_bios,
+ hpo_dp_inst, eng_id, vpg, apg,
+ &hpo_dp_stream_enc_regs[hpo_dp_inst],
+ &hpo_dp_se_shift, &hpo_dp_se_mask);
+
+ return &hpo_dp_enc31->base;
+}
+
+static struct hpo_dp_link_encoder *dcn31_hpo_dp_link_encoder_create(
+ uint8_t inst,
+ struct dc_context *ctx)
+{
+ struct dcn31_hpo_dp_link_encoder *hpo_dp_enc31;
+
+ /* allocate HPO link encoder */
+ hpo_dp_enc31 = kzalloc(sizeof(struct dcn31_hpo_dp_link_encoder), GFP_KERNEL);
+
+ hpo_dp_link_encoder31_construct(hpo_dp_enc31, ctx, inst,
+ &hpo_dp_link_enc_regs[inst],
+ &hpo_dp_le_shift, &hpo_dp_le_mask);
+
+ return &hpo_dp_enc31->base;
+}
+
static struct dce_hwseq *dcn31_hwseq_create(
struct dc_context *ctx)
{
@@ -1308,6 +1468,13 @@ static struct dce_hwseq *dcn31_hwseq_create(
hws->regs = &hwseq_reg;
hws->shifts = &hwseq_shift;
hws->masks = &hwseq_mask;
+ /* DCN3.1 FPGA Workaround
+ * Need to enable HPO DP Stream Encoder before setting OTG master enable.
+ * To do so, move calling function enable_stream_timing to only be done AFTER calling
+ * function core_link_enable_stream
+ */
+ if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment))
+ hws->wa.dp_hpo_and_otg_sequence = true;
}
return hws;
}
@@ -1315,6 +1482,8 @@ static const struct resource_create_funcs res_create_funcs = {
.read_dce_straps = read_dce_straps,
.create_audio = dcn31_create_audio,
.create_stream_encoder = dcn31_stream_encoder_create,
+ .create_hpo_dp_stream_encoder = dcn31_hpo_dp_stream_encoder_create,
+ .create_hpo_dp_link_encoder = dcn31_hpo_dp_link_encoder_create,
.create_hwseq = dcn31_hwseq_create,
};
@@ -1322,6 +1491,8 @@ static const struct resource_create_funcs res_create_maximus_funcs = {
.read_dce_straps = NULL,
.create_audio = NULL,
.create_stream_encoder = NULL,
+ .create_hpo_dp_stream_encoder = dcn31_hpo_dp_stream_encoder_create,
+ .create_hpo_dp_link_encoder = dcn31_hpo_dp_link_encoder_create,
.create_hwseq = dcn31_hwseq_create,
};
@@ -1344,6 +1515,28 @@ static void dcn31_resource_destruct(struct dcn31_resource_pool *pool)
}
}
+ for (i = 0; i < pool->base.hpo_dp_stream_enc_count; i++) {
+ if (pool->base.hpo_dp_stream_enc[i] != NULL) {
+ if (pool->base.hpo_dp_stream_enc[i]->vpg != NULL) {
+ kfree(DCN30_VPG_FROM_VPG(pool->base.hpo_dp_stream_enc[i]->vpg));
+ pool->base.hpo_dp_stream_enc[i]->vpg = NULL;
+ }
+ if (pool->base.hpo_dp_stream_enc[i]->apg != NULL) {
+ kfree(DCN31_APG_FROM_APG(pool->base.hpo_dp_stream_enc[i]->apg));
+ pool->base.hpo_dp_stream_enc[i]->apg = NULL;
+ }
+ kfree(DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(pool->base.hpo_dp_stream_enc[i]));
+ pool->base.hpo_dp_stream_enc[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < pool->base.hpo_dp_link_enc_count; i++) {
+ if (pool->base.hpo_dp_link_enc[i] != NULL) {
+ kfree(DCN3_1_HPO_DP_LINK_ENC_FROM_HPO_LINK_ENC(pool->base.hpo_dp_link_enc[i]));
+ pool->base.hpo_dp_link_enc[i] = NULL;
+ }
+ }
+
for (i = 0; i < pool->base.res_cap->num_dsc; i++) {
if (pool->base.dscs[i] != NULL)
dcn20_dsc_destroy(&pool->base.dscs[i]);
@@ -1590,6 +1783,13 @@ static int dcn31_populate_dml_pipes_from_context(
pipe = &res_ctx->pipe_ctx[i];
timing = &pipe->stream->timing;
+ /*
+ * Immediate flip can be set dynamically after enabling the plane.
+ * We need to require support for immediate flip or underflow can be
+ * intermittently experienced depending on peak b/w requirements.
+ */
+ pipes[pipe_cnt].pipe.src.immediate_flip = true;
+
pipes[pipe_cnt].pipe.src.unbounded_req_mode = false;
pipes[pipe_cnt].pipe.src.gpuvm = true;
pipes[pipe_cnt].pipe.src.dcc_fraction_of_zs_req_luma = 0;
@@ -1632,7 +1832,7 @@ static int dcn31_populate_dml_pipes_from_context(
return pipe_cnt;
}
-static void dcn31_update_soc_for_wm_a(struct dc *dc, struct dc_state *context)
+void dcn31_update_soc_for_wm_a(struct dc *dc, struct dc_state *context)
{
if (dc->clk_mgr->bw_params->wm_table.entries[WM_A].valid) {
context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.entries[WM_A].pstate_latency_us;
@@ -1653,6 +1853,15 @@ static void dcn31_calculate_wm_and_dlg_fp(
if (context->bw_ctx.dml.soc.min_dcfclk > dcfclk)
dcfclk = context->bw_ctx.dml.soc.min_dcfclk;
+ /* We don't recalculate clocks for 0 pipe configs, which can block
+ * S0i3 as high clocks will block low power states
+ * Override any clocks that can block S0i3 to min here
+ */
+ if (pipe_cnt == 0) {
+ context->bw_ctx.bw.dcn.clk.dcfclk_khz = dcfclk; // always should be vlevel 0
+ return;
+ }
+
pipes[0].clks_cfg.voltage = vlevel;
pipes[0].clks_cfg.dcfclk_mhz = dcfclk;
pipes[0].clks_cfg.socclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].socclk_mhz;
@@ -1767,7 +1976,7 @@ static void dcn31_calculate_wm_and_dlg_fp(
dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
}
-static void dcn31_calculate_wm_and_dlg(
+void dcn31_calculate_wm_and_dlg(
struct dc *dc, struct dc_state *context,
display_e2e_pipe_params_st *pipes,
int pipe_cnt,
@@ -1778,6 +1987,58 @@ static void dcn31_calculate_wm_and_dlg(
DC_FP_END();
}
+bool dcn31_validate_bandwidth(struct dc *dc,
+ struct dc_state *context,
+ bool fast_validate)
+{
+ bool out = false;
+
+ BW_VAL_TRACE_SETUP();
+
+ int vlevel = 0;
+ int pipe_cnt = 0;
+ display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
+ DC_LOGGER_INIT(dc->ctx->logger);
+
+ BW_VAL_TRACE_COUNT();
+
+ out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate);
+
+ // Disable fast_validate to set min dcfclk in alculate_wm_and_dlg
+ if (pipe_cnt == 0)
+ fast_validate = false;
+
+ if (!out)
+ goto validate_fail;
+
+ BW_VAL_TRACE_END_VOLTAGE_LEVEL();
+
+ if (fast_validate) {
+ BW_VAL_TRACE_SKIP(fast);
+ goto validate_out;
+ }
+
+ dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel);
+
+ BW_VAL_TRACE_END_WATERMARKS();
+
+ goto validate_out;
+
+validate_fail:
+ DC_LOG_WARNING("Mode Validation Warning: %s failed validation.\n",
+ dml_get_status_message(context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states]));
+
+ BW_VAL_TRACE_SKIP(fail);
+ out = false;
+
+validate_out:
+ kfree(pipes);
+
+ BW_VAL_TRACE_FINISH();
+
+ return out;
+}
+
static struct dc_cap_funcs cap_funcs = {
.get_dcc_compression_cap = dcn20_get_dcc_compression_cap
};
@@ -1860,7 +2121,7 @@ static struct resource_funcs dcn31_res_pool_funcs = {
.link_encs_assign = link_enc_cfg_link_encs_assign,
.link_enc_unassign = link_enc_cfg_link_enc_unassign,
.panel_cntl_create = dcn31_panel_cntl_create,
- .validate_bandwidth = dcn30_validate_bandwidth,
+ .validate_bandwidth = dcn31_validate_bandwidth,
.calculate_wm_and_dlg = dcn31_calculate_wm_and_dlg,
.update_soc_for_wm_a = dcn31_update_soc_for_wm_a,
.populate_dml_pipes = dcn31_populate_dml_pipes_from_context,
@@ -1935,6 +2196,7 @@ static bool dcn31_resource_construct(
dc->caps.max_slave_rgb_planes = 1;
dc->caps.post_blend_color_processing = true;
dc->caps.force_dp_tps4_for_cp2520 = true;
+ dc->caps.dp_hpo = true;
dc->caps.extended_aux_timeout_support = true;
dc->caps.dmcub_support = true;
dc->caps.is_apu = true;
@@ -2173,6 +2435,13 @@ static bool dcn31_resource_construct(
pool->base.sw_i2cs[i] = NULL;
}
+ if (dc->ctx->asic_id.chip_family == FAMILY_YELLOW_CARP &&
+ dc->ctx->asic_id.hw_internal_rev == YELLOW_CARP_B0 &&
+ !dc->debug.dpia_debug.bits.disable_dpia) {
+ /* YELLOW CARP B0 has 4 DPIA's */
+ pool->base.usb4_dpia_count = 4;
+ }
+
/* Audio, Stream Encoders including HPO and virtual, MPC 3D LUTs */
if (!resource_construct(num_virtual_links, dc, &pool->base,
(!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) ?
@@ -2189,6 +2458,8 @@ static bool dcn31_resource_construct(
dc->cap_funcs = cap_funcs;
+ dc->dcn_ip->max_num_dpp = dcn3_1_ip.max_num_dpp;
+
DC_FP_END();
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h
index 93571c976996..416fe7a721d8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.h
@@ -35,6 +35,16 @@ struct dcn31_resource_pool {
struct resource_pool base;
};
+bool dcn31_validate_bandwidth(struct dc *dc,
+ struct dc_state *context,
+ bool fast_validate);
+void dcn31_calculate_wm_and_dlg(
+ struct dc *dc, struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int pipe_cnt,
+ int vlevel);
+void dcn31_update_soc_for_wm_a(struct dc *dc, struct dc_state *context);
+
struct resource_pool *dcn31_create_resource_pool(
const struct dc_init_data *init_data,
struct dc *dc);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.c
new file mode 100644
index 000000000000..f1deb1c3c363
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.c
@@ -0,0 +1,87 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dc_bios_types.h"
+#include "dcn30/dcn30_vpg.h"
+#include "dcn31_vpg.h"
+#include "reg_helper.h"
+#include "dc/dc.h"
+
+#define DC_LOGGER \
+ vpg31->base.ctx->logger
+
+#define REG(reg)\
+ (vpg31->regs->reg)
+
+#undef FN
+#define FN(reg_name, field_name) \
+ vpg31->vpg_shift->field_name, vpg31->vpg_mask->field_name
+
+
+#define CTX \
+ vpg31->base.ctx
+
+static struct vpg_funcs dcn31_vpg_funcs = {
+ .update_generic_info_packet = vpg3_update_generic_info_packet,
+ .vpg_poweron = vpg31_poweron,
+ .vpg_powerdown = vpg31_powerdown,
+};
+
+void vpg31_powerdown(struct vpg *vpg)
+{
+ struct dcn31_vpg *vpg31 = DCN31_VPG_FROM_VPG(vpg);
+
+ if (vpg->ctx->dc->debug.enable_mem_low_power.bits.vpg == false)
+ return;
+
+ REG_UPDATE_2(VPG_MEM_PWR, VPG_GSP_MEM_LIGHT_SLEEP_DIS, 0, VPG_GSP_LIGHT_SLEEP_FORCE, 1);
+}
+
+void vpg31_poweron(struct vpg *vpg)
+{
+ struct dcn31_vpg *vpg31 = DCN31_VPG_FROM_VPG(vpg);
+
+ if (vpg->ctx->dc->debug.enable_mem_low_power.bits.vpg == false)
+ return;
+
+ REG_UPDATE_2(VPG_MEM_PWR, VPG_GSP_MEM_LIGHT_SLEEP_DIS, 1, VPG_GSP_LIGHT_SLEEP_FORCE, 0);
+}
+
+void vpg31_construct(struct dcn31_vpg *vpg31,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn31_vpg_registers *vpg_regs,
+ const struct dcn31_vpg_shift *vpg_shift,
+ const struct dcn31_vpg_mask *vpg_mask)
+{
+ vpg31->base.ctx = ctx;
+
+ vpg31->base.inst = inst;
+ vpg31->base.funcs = &dcn31_vpg_funcs;
+
+ vpg31->regs = vpg_regs;
+ vpg31->vpg_shift = vpg_shift;
+ vpg31->vpg_mask = vpg_mask;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.h
new file mode 100644
index 000000000000..0e76eabce441
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_vpg.h
@@ -0,0 +1,162 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_DCN31_VPG_H__
+#define __DAL_DCN31_VPG_H__
+
+
+#define DCN31_VPG_FROM_VPG(vpg)\
+ container_of(vpg, struct dcn31_vpg, base)
+
+#define VPG_DCN31_REG_LIST(id) \
+ SRI(VPG_GENERIC_STATUS, VPG, id), \
+ SRI(VPG_GENERIC_PACKET_ACCESS_CTRL, VPG, id), \
+ SRI(VPG_GENERIC_PACKET_DATA, VPG, id), \
+ SRI(VPG_GSP_FRAME_UPDATE_CTRL, VPG, id), \
+ SRI(VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG, id), \
+ SRI(VPG_MEM_PWR, VPG, id)
+
+struct dcn31_vpg_registers {
+ uint32_t VPG_GENERIC_STATUS;
+ uint32_t VPG_GENERIC_PACKET_ACCESS_CTRL;
+ uint32_t VPG_GENERIC_PACKET_DATA;
+ uint32_t VPG_GSP_FRAME_UPDATE_CTRL;
+ uint32_t VPG_GSP_IMMEDIATE_UPDATE_CTRL;
+ uint32_t VPG_MEM_PWR;
+};
+
+#define DCN31_VPG_MASK_SH_LIST(mask_sh)\
+ SE_SF(VPG0_VPG_GENERIC_STATUS, VPG_GENERIC_CONFLICT_OCCURED, mask_sh),\
+ SE_SF(VPG0_VPG_GENERIC_STATUS, VPG_GENERIC_CONFLICT_CLR, mask_sh),\
+ SE_SF(VPG0_VPG_GENERIC_PACKET_ACCESS_CTRL, VPG_GENERIC_DATA_INDEX, mask_sh),\
+ SE_SF(VPG0_VPG_GENERIC_PACKET_DATA, VPG_GENERIC_DATA_BYTE0, mask_sh),\
+ SE_SF(VPG0_VPG_GENERIC_PACKET_DATA, VPG_GENERIC_DATA_BYTE1, mask_sh),\
+ SE_SF(VPG0_VPG_GENERIC_PACKET_DATA, VPG_GENERIC_DATA_BYTE2, mask_sh),\
+ SE_SF(VPG0_VPG_GENERIC_PACKET_DATA, VPG_GENERIC_DATA_BYTE3, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC0_FRAME_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC1_FRAME_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC2_FRAME_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC3_FRAME_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC4_FRAME_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC5_FRAME_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC6_FRAME_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC7_FRAME_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC8_FRAME_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC9_FRAME_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC10_FRAME_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC11_FRAME_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC12_FRAME_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC13_FRAME_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_FRAME_UPDATE_CTRL, VPG_GENERIC14_FRAME_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG_GENERIC0_IMMEDIATE_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG_GENERIC1_IMMEDIATE_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG_GENERIC2_IMMEDIATE_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG_GENERIC3_IMMEDIATE_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG_GENERIC4_IMMEDIATE_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG_GENERIC5_IMMEDIATE_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG_GENERIC6_IMMEDIATE_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG_GENERIC7_IMMEDIATE_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG_GENERIC8_IMMEDIATE_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG_GENERIC9_IMMEDIATE_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG_GENERIC10_IMMEDIATE_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG_GENERIC11_IMMEDIATE_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG_GENERIC12_IMMEDIATE_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG_GENERIC13_IMMEDIATE_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_GSP_IMMEDIATE_UPDATE_CTRL, VPG_GENERIC14_IMMEDIATE_UPDATE, mask_sh),\
+ SE_SF(VPG0_VPG_MEM_PWR, VPG_GSP_MEM_LIGHT_SLEEP_DIS, mask_sh),\
+ SE_SF(VPG0_VPG_MEM_PWR, VPG_GSP_LIGHT_SLEEP_FORCE, mask_sh),\
+ SE_SF(VPG0_VPG_MEM_PWR, VPG_GSP_MEM_PWR_STATE, mask_sh)
+
+#define VPG_DCN31_REG_FIELD_LIST(type) \
+ type VPG_GENERIC_CONFLICT_OCCURED;\
+ type VPG_GENERIC_CONFLICT_CLR;\
+ type VPG_GENERIC_DATA_INDEX;\
+ type VPG_GENERIC_DATA_BYTE0;\
+ type VPG_GENERIC_DATA_BYTE1;\
+ type VPG_GENERIC_DATA_BYTE2;\
+ type VPG_GENERIC_DATA_BYTE3;\
+ type VPG_GENERIC0_FRAME_UPDATE;\
+ type VPG_GENERIC1_FRAME_UPDATE;\
+ type VPG_GENERIC2_FRAME_UPDATE;\
+ type VPG_GENERIC3_FRAME_UPDATE;\
+ type VPG_GENERIC4_FRAME_UPDATE;\
+ type VPG_GENERIC5_FRAME_UPDATE;\
+ type VPG_GENERIC6_FRAME_UPDATE;\
+ type VPG_GENERIC7_FRAME_UPDATE;\
+ type VPG_GENERIC8_FRAME_UPDATE;\
+ type VPG_GENERIC9_FRAME_UPDATE;\
+ type VPG_GENERIC10_FRAME_UPDATE;\
+ type VPG_GENERIC11_FRAME_UPDATE;\
+ type VPG_GENERIC12_FRAME_UPDATE;\
+ type VPG_GENERIC13_FRAME_UPDATE;\
+ type VPG_GENERIC14_FRAME_UPDATE;\
+ type VPG_GENERIC0_IMMEDIATE_UPDATE;\
+ type VPG_GENERIC1_IMMEDIATE_UPDATE;\
+ type VPG_GENERIC2_IMMEDIATE_UPDATE;\
+ type VPG_GENERIC3_IMMEDIATE_UPDATE;\
+ type VPG_GENERIC4_IMMEDIATE_UPDATE;\
+ type VPG_GENERIC5_IMMEDIATE_UPDATE;\
+ type VPG_GENERIC6_IMMEDIATE_UPDATE;\
+ type VPG_GENERIC7_IMMEDIATE_UPDATE;\
+ type VPG_GENERIC8_IMMEDIATE_UPDATE;\
+ type VPG_GENERIC9_IMMEDIATE_UPDATE;\
+ type VPG_GENERIC10_IMMEDIATE_UPDATE;\
+ type VPG_GENERIC11_IMMEDIATE_UPDATE;\
+ type VPG_GENERIC12_IMMEDIATE_UPDATE;\
+ type VPG_GENERIC13_IMMEDIATE_UPDATE;\
+ type VPG_GENERIC14_IMMEDIATE_UPDATE;\
+ type VPG_GSP_MEM_LIGHT_SLEEP_DIS;\
+ type VPG_GSP_LIGHT_SLEEP_FORCE;\
+ type VPG_GSP_MEM_PWR_STATE
+
+struct dcn31_vpg_shift {
+ VPG_DCN31_REG_FIELD_LIST(uint8_t);
+};
+
+struct dcn31_vpg_mask {
+ VPG_DCN31_REG_FIELD_LIST(uint32_t);
+};
+
+struct dcn31_vpg {
+ struct vpg base;
+ const struct dcn31_vpg_registers *regs;
+ const struct dcn31_vpg_shift *vpg_shift;
+ const struct dcn31_vpg_mask *vpg_mask;
+};
+
+void vpg31_poweron(
+ struct vpg *vpg);
+
+void vpg31_powerdown(
+ struct vpg *vpg);
+
+void vpg31_construct(struct dcn31_vpg *vpg31,
+ struct dc_context *ctx,
+ uint32_t inst,
+ const struct dcn31_vpg_registers *vpg_regs,
+ const struct dcn31_vpg_shift *vpg_shift,
+ const struct dcn31_vpg_mask *vpg_mask);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h
index a9170b9f84d3..511f9e1159c7 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h
@@ -35,8 +35,11 @@ struct cp_psp_stream_config {
uint8_t link_enc_idx;
uint8_t stream_enc_idx;
uint8_t phy_idx;
+ uint8_t dio_output_idx;
+ uint8_t dio_output_type;
uint8_t assr_enabled;
uint8_t mst_enabled;
+ uint8_t dp2_enabled;
void *dm_stream_ctx;
bool dpms_off;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
index 9ab854293ace..0fe66b080a03 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h
@@ -160,6 +160,12 @@ void dm_set_dcn_clocks(
struct dc_context *ctx,
struct dc_clocks *clks);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+void dm_helpers_enable_periodic_detection(struct dc_context *ctx, bool enable);
+#endif
+
+void dm_set_phyd32clk(struct dc_context *ctx, int freq_khz);
+
bool dm_helpers_dmub_outbox_interrupt_control(struct dc_context *ctx, bool enable);
void dm_helpers_smu_timeout(struct dc_context *ctx, unsigned int msg_id, unsigned int param, unsigned int timeout_us);
@@ -173,4 +179,9 @@ int dm_helper_dmub_aux_transfer_sync(
const struct dc_link *link,
struct aux_payload *payload,
enum aux_return_code_type *operation_result);
+enum set_config_status;
+int dm_helpers_dmub_set_config_sync(struct dc_context *ctx,
+ const struct dc_link *link,
+ struct set_config_cmd_payload *payload,
+ enum set_config_status *operation_result);
#endif /* __DM_HELPERS__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
index 56055df2e8d2..eee6672bd32d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -58,7 +58,7 @@ CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_ccflags)
ifdef CONFIG_DRM_AMD_DC_DCN
CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_vba.o := $(dml_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml/dcn2x/dcn2x.o := $(dml_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/dcn20_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20.o := $(dml_ccflags)
@@ -70,6 +70,8 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_mode_vba_30.o := $(dml_ccflags) $(fram
CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_rq_dlg_calc_30.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/display_mode_vba_31.o := $(dml_ccflags) $(frame_warn_flag)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/display_rq_dlg_calc_31.o := $(dml_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml/dcn301/dcn301_fpu.o := $(dml_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml/dsc/rc_calc_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_ccflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/display_mode_vba.o := $(dml_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn2x/dcn2x.o := $(dml_rcflags)
@@ -83,7 +85,9 @@ CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn30/display_mode_vba_30.o := $(dml_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn30/display_rq_dlg_calc_30.o := $(dml_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn31/display_mode_vba_31.o := $(dml_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn31/display_rq_dlg_calc_31.o := $(dml_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn301/dcn301_fpu.o := $(dml_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dsc/rc_calc_fpu.o := $(dml_rcflags)
endif
CFLAGS_$(AMDDALPATH)/dc/dml/dml1_display_rq_dlg_calc.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/display_rq_dlg_helpers.o := $(dml_ccflags)
@@ -93,12 +97,14 @@ CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/display_rq_dlg_helpers.o := $(dml_rcflags)
DML = display_mode_lib.o display_rq_dlg_helpers.o dml1_display_rq_dlg_calc.o \
ifdef CONFIG_DRM_AMD_DC_DCN
+DML += dcn20/dcn20_fpu.o
DML += display_mode_vba.o dcn20/display_rq_dlg_calc_20.o dcn20/display_mode_vba_20.o
-DML += dcn2x/dcn2x.o
DML += dcn20/display_rq_dlg_calc_20v2.o dcn20/display_mode_vba_20v2.o
DML += dcn21/display_rq_dlg_calc_21.o dcn21/display_mode_vba_21.o
DML += dcn30/display_mode_vba_30.o dcn30/display_rq_dlg_calc_30.o
DML += dcn31/display_mode_vba_31.o dcn31/display_rq_dlg_calc_31.o
+DML += dcn301/dcn301_fpu.o
+DML += dsc/rc_calc_fpu.o
endif
AMD_DAL_DML = $(addprefix $(AMDDALPATH)/dc/dml/,$(DML))
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn2x/dcn2x.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
index c58522436291..d590dc917363 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn2x/dcn2x.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c
@@ -26,7 +26,7 @@
#include "resource.h"
-#include "dcn2x.h"
+#include "dcn20_fpu.h"
/**
* DOC: DCN2x FPU manipulation Overview
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn2x/dcn2x.h b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h
index 331547ba0713..36f26126d574 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn2x/dcn2x.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.h
@@ -24,11 +24,11 @@
*
*/
-#ifndef __DCN2X_H__
-#define __DCN2X_H__
+#ifndef __DCN20_FPU_H__
+#define __DCN20_FPU_H__
void dcn20_populate_dml_writeback_from_context(struct dc *dc,
struct resource_context *res_ctx,
display_e2e_pipe_params_st *pipes);
-#endif /* __DCN2X_H__ */
+#endif /* __DCN20_FPU_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
index 2091dd8c252d..246071c72f6b 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
@@ -37,8 +37,8 @@
//
static void dml20_rq_dlg_get_rq_params(
struct display_mode_lib *mode_lib,
- display_rq_params_st * rq_param,
- const display_pipe_source_params_st pipe_src_param);
+ display_rq_params_st *rq_param,
+ const display_pipe_source_params_st *pipe_src_param);
// Function: dml20_rq_dlg_get_dlg_params
// Calculate deadline related parameters
@@ -49,8 +49,8 @@ static void dml20_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
const unsigned int pipe_idx,
display_dlg_regs_st *disp_dlg_regs,
display_ttu_regs_st *disp_ttu_regs,
- const display_rq_dlg_params_st rq_dlg_param,
- const display_dlg_sys_params_st dlg_sys_param,
+ const display_rq_dlg_params_st *rq_dlg_param,
+ const display_dlg_sys_params_st *dlg_sys_param,
const bool cstate_en,
const bool pstate_en);
/*
@@ -164,52 +164,52 @@ static unsigned int get_blk_size_bytes(const enum source_macro_tile_size tile_si
static void extract_rq_sizing_regs(struct display_mode_lib *mode_lib,
display_data_rq_regs_st *rq_regs,
- const display_data_rq_sizing_params_st rq_sizing)
+ const display_data_rq_sizing_params_st *rq_sizing)
{
dml_print("DML_DLG: %s: rq_sizing param\n", __func__);
print__data_rq_sizing_params_st(mode_lib, rq_sizing);
- rq_regs->chunk_size = dml_log2(rq_sizing.chunk_bytes) - 10;
+ rq_regs->chunk_size = dml_log2(rq_sizing->chunk_bytes) - 10;
- if (rq_sizing.min_chunk_bytes == 0)
+ if (rq_sizing->min_chunk_bytes == 0)
rq_regs->min_chunk_size = 0;
else
- rq_regs->min_chunk_size = dml_log2(rq_sizing.min_chunk_bytes) - 8 + 1;
+ rq_regs->min_chunk_size = dml_log2(rq_sizing->min_chunk_bytes) - 8 + 1;
- rq_regs->meta_chunk_size = dml_log2(rq_sizing.meta_chunk_bytes) - 10;
- if (rq_sizing.min_meta_chunk_bytes == 0)
+ rq_regs->meta_chunk_size = dml_log2(rq_sizing->meta_chunk_bytes) - 10;
+ if (rq_sizing->min_meta_chunk_bytes == 0)
rq_regs->min_meta_chunk_size = 0;
else
- rq_regs->min_meta_chunk_size = dml_log2(rq_sizing.min_meta_chunk_bytes) - 6 + 1;
+ rq_regs->min_meta_chunk_size = dml_log2(rq_sizing->min_meta_chunk_bytes) - 6 + 1;
- rq_regs->dpte_group_size = dml_log2(rq_sizing.dpte_group_bytes) - 6;
- rq_regs->mpte_group_size = dml_log2(rq_sizing.mpte_group_bytes) - 6;
+ rq_regs->dpte_group_size = dml_log2(rq_sizing->dpte_group_bytes) - 6;
+ rq_regs->mpte_group_size = dml_log2(rq_sizing->mpte_group_bytes) - 6;
}
static void extract_rq_regs(struct display_mode_lib *mode_lib,
display_rq_regs_st *rq_regs,
- const display_rq_params_st rq_param)
+ const display_rq_params_st *rq_param)
{
unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024;
unsigned int detile_buf_plane1_addr = 0;
- extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_l), rq_param.sizing.rq_l);
+ extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_l), &rq_param->sizing.rq_l);
- rq_regs->rq_regs_l.pte_row_height_linear = dml_floor(dml_log2(rq_param.dlg.rq_l.dpte_row_height),
+ rq_regs->rq_regs_l.pte_row_height_linear = dml_floor(dml_log2(rq_param->dlg.rq_l.dpte_row_height),
1) - 3;
- if (rq_param.yuv420) {
- extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_c), rq_param.sizing.rq_c);
- rq_regs->rq_regs_c.pte_row_height_linear = dml_floor(dml_log2(rq_param.dlg.rq_c.dpte_row_height),
+ if (rq_param->yuv420) {
+ extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_c), &rq_param->sizing.rq_c);
+ rq_regs->rq_regs_c.pte_row_height_linear = dml_floor(dml_log2(rq_param->dlg.rq_c.dpte_row_height),
1) - 3;
}
- rq_regs->rq_regs_l.swath_height = dml_log2(rq_param.dlg.rq_l.swath_height);
- rq_regs->rq_regs_c.swath_height = dml_log2(rq_param.dlg.rq_c.swath_height);
+ rq_regs->rq_regs_l.swath_height = dml_log2(rq_param->dlg.rq_l.swath_height);
+ rq_regs->rq_regs_c.swath_height = dml_log2(rq_param->dlg.rq_c.swath_height);
// TODO: take the max between luma, chroma chunk size?
// okay for now, as we are setting chunk_bytes to 8kb anyways
- if (rq_param.sizing.rq_l.chunk_bytes >= 32 * 1024) { //32kb
+ if (rq_param->sizing.rq_l.chunk_bytes >= 32 * 1024) { //32kb
rq_regs->drq_expansion_mode = 0;
} else {
rq_regs->drq_expansion_mode = 2;
@@ -218,9 +218,9 @@ static void extract_rq_regs(struct display_mode_lib *mode_lib,
rq_regs->mrq_expansion_mode = 1;
rq_regs->crq_expansion_mode = 1;
- if (rq_param.yuv420) {
- if ((double) rq_param.misc.rq_l.stored_swath_bytes
- / (double) rq_param.misc.rq_c.stored_swath_bytes <= 1.5) {
+ if (rq_param->yuv420) {
+ if ((double) rq_param->misc.rq_l.stored_swath_bytes
+ / (double) rq_param->misc.rq_c.stored_swath_bytes <= 1.5) {
detile_buf_plane1_addr = (detile_buf_size_in_bytes / 2.0 / 64.0); // half to chroma
} else {
detile_buf_plane1_addr = dml_round_to_multiple((unsigned int) ((2.0 * detile_buf_size_in_bytes) / 3.0),
@@ -233,7 +233,7 @@ static void extract_rq_regs(struct display_mode_lib *mode_lib,
static void handle_det_buf_split(struct display_mode_lib *mode_lib,
display_rq_params_st *rq_param,
- const display_pipe_source_params_st pipe_src_param)
+ const display_pipe_source_params_st *pipe_src_param)
{
unsigned int total_swath_bytes = 0;
unsigned int swath_bytes_l = 0;
@@ -242,8 +242,8 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib,
unsigned int full_swath_bytes_packed_c = 0;
bool req128_l = false;
bool req128_c = false;
- bool surf_linear = (pipe_src_param.sw_mode == dm_sw_linear);
- bool surf_vert = (pipe_src_param.source_scan == dm_vert);
+ bool surf_linear = (pipe_src_param->sw_mode == dm_sw_linear);
+ bool surf_vert = (pipe_src_param->source_scan == dm_vert);
unsigned int log2_swath_height_l = 0;
unsigned int log2_swath_height_c = 0;
unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024;
@@ -685,7 +685,7 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib,
display_data_rq_sizing_params_st *rq_sizing_param,
display_data_rq_dlg_params_st *rq_dlg_param,
display_data_rq_misc_params_st *rq_misc_param,
- const display_pipe_source_params_st pipe_src_param,
+ const display_pipe_source_params_st *pipe_src_param,
bool is_chroma)
{
bool mode_422 = false;
@@ -697,15 +697,15 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib,
// TODO check if ppe apply for both luma and chroma in 422 case
if (is_chroma) {
- vp_width = pipe_src_param.viewport_width_c / ppe;
- vp_height = pipe_src_param.viewport_height_c;
- data_pitch = pipe_src_param.data_pitch_c;
- meta_pitch = pipe_src_param.meta_pitch_c;
+ vp_width = pipe_src_param->viewport_width_c / ppe;
+ vp_height = pipe_src_param->viewport_height_c;
+ data_pitch = pipe_src_param->data_pitch_c;
+ meta_pitch = pipe_src_param->meta_pitch_c;
} else {
- vp_width = pipe_src_param.viewport_width / ppe;
- vp_height = pipe_src_param.viewport_height;
- data_pitch = pipe_src_param.data_pitch;
- meta_pitch = pipe_src_param.meta_pitch;
+ vp_width = pipe_src_param->viewport_width / ppe;
+ vp_height = pipe_src_param->viewport_height;
+ data_pitch = pipe_src_param->data_pitch;
+ meta_pitch = pipe_src_param->meta_pitch;
}
rq_sizing_param->chunk_bytes = 8192;
@@ -728,21 +728,21 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib,
vp_height,
data_pitch,
meta_pitch,
- pipe_src_param.source_format,
- pipe_src_param.sw_mode,
- pipe_src_param.macro_tile_size,
- pipe_src_param.source_scan,
+ pipe_src_param->source_format,
+ pipe_src_param->sw_mode,
+ pipe_src_param->macro_tile_size,
+ pipe_src_param->source_scan,
is_chroma);
}
static void dml20_rq_dlg_get_rq_params(struct display_mode_lib *mode_lib,
display_rq_params_st *rq_param,
- const display_pipe_source_params_st pipe_src_param)
+ const display_pipe_source_params_st *pipe_src_param)
{
// get param for luma surface
- rq_param->yuv420 = pipe_src_param.source_format == dm_420_8
- || pipe_src_param.source_format == dm_420_10;
- rq_param->yuv420_10bpc = pipe_src_param.source_format == dm_420_10;
+ rq_param->yuv420 = pipe_src_param->source_format == dm_420_8
+ || pipe_src_param->source_format == dm_420_10;
+ rq_param->yuv420_10bpc = pipe_src_param->source_format == dm_420_10;
get_surf_rq_param(mode_lib,
&(rq_param->sizing.rq_l),
@@ -751,7 +751,7 @@ static void dml20_rq_dlg_get_rq_params(struct display_mode_lib *mode_lib,
pipe_src_param,
0);
- if (is_dual_plane((enum source_format_class)(pipe_src_param.source_format))) {
+ if (is_dual_plane((enum source_format_class)(pipe_src_param->source_format))) {
// get param for chroma surface
get_surf_rq_param(mode_lib,
&(rq_param->sizing.rq_c),
@@ -763,20 +763,20 @@ static void dml20_rq_dlg_get_rq_params(struct display_mode_lib *mode_lib,
// calculate how to split the det buffer space between luma and chroma
handle_det_buf_split(mode_lib, rq_param, pipe_src_param);
- print__rq_params_st(mode_lib, *rq_param);
+ print__rq_params_st(mode_lib, rq_param);
}
void dml20_rq_dlg_get_rq_reg(struct display_mode_lib *mode_lib,
display_rq_regs_st *rq_regs,
- const display_pipe_params_st pipe_param)
+ const display_pipe_params_st *pipe_param)
{
display_rq_params_st rq_param = {0};
memset(rq_regs, 0, sizeof(*rq_regs));
- dml20_rq_dlg_get_rq_params(mode_lib, &rq_param, pipe_param.src);
- extract_rq_regs(mode_lib, rq_regs, rq_param);
+ dml20_rq_dlg_get_rq_params(mode_lib, &rq_param, &pipe_param->src);
+ extract_rq_regs(mode_lib, rq_regs, &rq_param);
- print__rq_regs_st(mode_lib, *rq_regs);
+ print__rq_regs_st(mode_lib, rq_regs);
}
// Note: currently taken in as is.
@@ -787,8 +787,8 @@ static void dml20_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
const unsigned int pipe_idx,
display_dlg_regs_st *disp_dlg_regs,
display_ttu_regs_st *disp_ttu_regs,
- const display_rq_dlg_params_st rq_dlg_param,
- const display_dlg_sys_params_st dlg_sys_param,
+ const display_rq_dlg_params_st *rq_dlg_param,
+ const display_dlg_sys_params_st *dlg_sys_param,
const bool cstate_en,
const bool pstate_en)
{
@@ -935,7 +935,7 @@ static void dml20_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
* (double) ref_freq_to_pix_freq);
ASSERT(disp_dlg_regs->refcyc_h_blank_end < (unsigned int) dml_pow(2, 13));
- min_dcfclk_mhz = dlg_sys_param.deepsleep_dcfclk_mhz;
+ min_dcfclk_mhz = dlg_sys_param->deepsleep_dcfclk_mhz;
t_calc_us = get_tcalc(mode_lib, e2e_pipe_param, num_pipes);
min_ttu_vblank = get_min_ttu_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
@@ -995,20 +995,20 @@ static void dml20_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
// vinit_bot_l = scl.vinit_bot;
// vinit_bot_c = scl.vinit_bot_c;
-// unsigned int swath_height_l = rq_dlg_param.rq_l.swath_height;
- swath_width_ub_l = rq_dlg_param.rq_l.swath_width_ub;
-// unsigned int dpte_bytes_per_row_ub_l = rq_dlg_param.rq_l.dpte_bytes_per_row_ub;
- dpte_groups_per_row_ub_l = rq_dlg_param.rq_l.dpte_groups_per_row_ub;
-// unsigned int meta_pte_bytes_per_frame_ub_l = rq_dlg_param.rq_l.meta_pte_bytes_per_frame_ub;
-// unsigned int meta_bytes_per_row_ub_l = rq_dlg_param.rq_l.meta_bytes_per_row_ub;
+// unsigned int swath_height_l = rq_dlg_param->rq_l.swath_height;
+ swath_width_ub_l = rq_dlg_param->rq_l.swath_width_ub;
+// unsigned int dpte_bytes_per_row_ub_l = rq_dlg_param->rq_l.dpte_bytes_per_row_ub;
+ dpte_groups_per_row_ub_l = rq_dlg_param->rq_l.dpte_groups_per_row_ub;
+// unsigned int meta_pte_bytes_per_frame_ub_l = rq_dlg_param->rq_l.meta_pte_bytes_per_frame_ub;
+// unsigned int meta_bytes_per_row_ub_l = rq_dlg_param->rq_l.meta_bytes_per_row_ub;
-// unsigned int swath_height_c = rq_dlg_param.rq_c.swath_height;
- swath_width_ub_c = rq_dlg_param.rq_c.swath_width_ub;
- // dpte_bytes_per_row_ub_c = rq_dlg_param.rq_c.dpte_bytes_per_row_ub;
- dpte_groups_per_row_ub_c = rq_dlg_param.rq_c.dpte_groups_per_row_ub;
+// unsigned int swath_height_c = rq_dlg_param->rq_c.swath_height;
+ swath_width_ub_c = rq_dlg_param->rq_c.swath_width_ub;
+ // dpte_bytes_per_row_ub_c = rq_dlg_param->rq_c.dpte_bytes_per_row_ub;
+ dpte_groups_per_row_ub_c = rq_dlg_param->rq_c.dpte_groups_per_row_ub;
- meta_chunks_per_row_ub_l = rq_dlg_param.rq_l.meta_chunks_per_row_ub;
- meta_chunks_per_row_ub_c = rq_dlg_param.rq_c.meta_chunks_per_row_ub;
+ meta_chunks_per_row_ub_l = rq_dlg_param->rq_l.meta_chunks_per_row_ub;
+ meta_chunks_per_row_ub_c = rq_dlg_param->rq_c.meta_chunks_per_row_ub;
vupdate_offset = dst->vupdate_offset;
vupdate_width = dst->vupdate_width;
vready_offset = dst->vready_offset;
@@ -1137,16 +1137,16 @@ static void dml20_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
dml_print("DML_DLG: %s: vratio_pre_c=%3.2f\n", __func__, vratio_pre_c);
// Active
- req_per_swath_ub_l = rq_dlg_param.rq_l.req_per_swath_ub;
- req_per_swath_ub_c = rq_dlg_param.rq_c.req_per_swath_ub;
- meta_row_height_l = rq_dlg_param.rq_l.meta_row_height;
- meta_row_height_c = rq_dlg_param.rq_c.meta_row_height;
+ req_per_swath_ub_l = rq_dlg_param->rq_l.req_per_swath_ub;
+ req_per_swath_ub_c = rq_dlg_param->rq_c.req_per_swath_ub;
+ meta_row_height_l = rq_dlg_param->rq_l.meta_row_height;
+ meta_row_height_c = rq_dlg_param->rq_c.meta_row_height;
swath_width_pixels_ub_l = 0;
swath_width_pixels_ub_c = 0;
scaler_rec_in_width_l = 0;
scaler_rec_in_width_c = 0;
- dpte_row_height_l = rq_dlg_param.rq_l.dpte_row_height;
- dpte_row_height_c = rq_dlg_param.rq_c.dpte_row_height;
+ dpte_row_height_l = rq_dlg_param->rq_l.dpte_row_height;
+ dpte_row_height_c = rq_dlg_param->rq_c.dpte_row_height;
if (mode_422) {
swath_width_pixels_ub_l = swath_width_ub_l * 2; // *2 for 2 pixel per element
@@ -1542,14 +1542,14 @@ static void dml20_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
disp_ttu_regs->min_ttu_vblank = min_ttu_vblank * refclk_freq_in_mhz;
ASSERT(disp_ttu_regs->min_ttu_vblank < dml_pow(2, 24));
- print__ttu_regs_st(mode_lib, *disp_ttu_regs);
- print__dlg_regs_st(mode_lib, *disp_dlg_regs);
+ print__ttu_regs_st(mode_lib, disp_ttu_regs);
+ print__dlg_regs_st(mode_lib, disp_dlg_regs);
}
void dml20_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib,
display_dlg_regs_st *dlg_regs,
display_ttu_regs_st *ttu_regs,
- display_e2e_pipe_params_st *e2e_pipe_param,
+ const display_e2e_pipe_params_st *e2e_pipe_param,
const unsigned int num_pipes,
const unsigned int pipe_idx,
const bool cstate_en,
@@ -1579,20 +1579,20 @@ void dml20_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib,
dlg_sys_param.t_srx_delay_us = mode_lib->ip.dcfclk_cstate_latency
/ dlg_sys_param.deepsleep_dcfclk_mhz; // TODO: Deprecated
- print__dlg_sys_params_st(mode_lib, dlg_sys_param);
+ print__dlg_sys_params_st(mode_lib, &dlg_sys_param);
// system parameter calculation done
dml_print("DML_DLG: Calculation for pipe[%d] start\n\n", pipe_idx);
- dml20_rq_dlg_get_rq_params(mode_lib, &rq_param, e2e_pipe_param[pipe_idx].pipe.src);
+ dml20_rq_dlg_get_rq_params(mode_lib, &rq_param, &e2e_pipe_param[pipe_idx].pipe.src);
dml20_rq_dlg_get_dlg_params(mode_lib,
e2e_pipe_param,
num_pipes,
pipe_idx,
dlg_regs,
ttu_regs,
- rq_param.dlg,
- dlg_sys_param,
+ &rq_param.dlg,
+ &dlg_sys_param,
cstate_en,
pstate_en);
dml_print("DML_DLG: Calculation for pipe[%d] end\n", pipe_idx);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.h b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.h
index d0b90947f540..8b23867e97c1 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.h
@@ -43,7 +43,7 @@ struct display_mode_lib;
void dml20_rq_dlg_get_rq_reg(
struct display_mode_lib *mode_lib,
display_rq_regs_st *rq_regs,
- const display_pipe_params_st pipe_param);
+ const display_pipe_params_st *pipe_param);
// Function: dml_rq_dlg_get_dlg_reg
@@ -61,7 +61,7 @@ void dml20_rq_dlg_get_dlg_reg(
struct display_mode_lib *mode_lib,
display_dlg_regs_st *dlg_regs,
display_ttu_regs_st *ttu_regs,
- display_e2e_pipe_params_st *e2e_pipe_param,
+ const display_e2e_pipe_params_st *e2e_pipe_param,
const unsigned int num_pipes,
const unsigned int pipe_idx,
const bool cstate_en,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
index 1a0c14e465fa..015e7f2c0b16 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c
@@ -38,7 +38,7 @@
static void dml20v2_rq_dlg_get_rq_params(
struct display_mode_lib *mode_lib,
display_rq_params_st * rq_param,
- const display_pipe_source_params_st pipe_src_param);
+ const display_pipe_source_params_st *pipe_src_param);
// Function: dml20v2_rq_dlg_get_dlg_params
// Calculate deadline related parameters
@@ -49,8 +49,8 @@ static void dml20v2_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
const unsigned int pipe_idx,
display_dlg_regs_st *disp_dlg_regs,
display_ttu_regs_st *disp_ttu_regs,
- const display_rq_dlg_params_st rq_dlg_param,
- const display_dlg_sys_params_st dlg_sys_param,
+ const display_rq_dlg_params_st *rq_dlg_param,
+ const display_dlg_sys_params_st *dlg_sys_param,
const bool cstate_en,
const bool pstate_en);
/*
@@ -164,52 +164,52 @@ static unsigned int get_blk_size_bytes(const enum source_macro_tile_size tile_si
static void extract_rq_sizing_regs(struct display_mode_lib *mode_lib,
display_data_rq_regs_st *rq_regs,
- const display_data_rq_sizing_params_st rq_sizing)
+ const display_data_rq_sizing_params_st *rq_sizing)
{
dml_print("DML_DLG: %s: rq_sizing param\n", __func__);
print__data_rq_sizing_params_st(mode_lib, rq_sizing);
- rq_regs->chunk_size = dml_log2(rq_sizing.chunk_bytes) - 10;
+ rq_regs->chunk_size = dml_log2(rq_sizing->chunk_bytes) - 10;
- if (rq_sizing.min_chunk_bytes == 0)
+ if (rq_sizing->min_chunk_bytes == 0)
rq_regs->min_chunk_size = 0;
else
- rq_regs->min_chunk_size = dml_log2(rq_sizing.min_chunk_bytes) - 8 + 1;
+ rq_regs->min_chunk_size = dml_log2(rq_sizing->min_chunk_bytes) - 8 + 1;
- rq_regs->meta_chunk_size = dml_log2(rq_sizing.meta_chunk_bytes) - 10;
- if (rq_sizing.min_meta_chunk_bytes == 0)
+ rq_regs->meta_chunk_size = dml_log2(rq_sizing->meta_chunk_bytes) - 10;
+ if (rq_sizing->min_meta_chunk_bytes == 0)
rq_regs->min_meta_chunk_size = 0;
else
- rq_regs->min_meta_chunk_size = dml_log2(rq_sizing.min_meta_chunk_bytes) - 6 + 1;
+ rq_regs->min_meta_chunk_size = dml_log2(rq_sizing->min_meta_chunk_bytes) - 6 + 1;
- rq_regs->dpte_group_size = dml_log2(rq_sizing.dpte_group_bytes) - 6;
- rq_regs->mpte_group_size = dml_log2(rq_sizing.mpte_group_bytes) - 6;
+ rq_regs->dpte_group_size = dml_log2(rq_sizing->dpte_group_bytes) - 6;
+ rq_regs->mpte_group_size = dml_log2(rq_sizing->mpte_group_bytes) - 6;
}
static void extract_rq_regs(struct display_mode_lib *mode_lib,
display_rq_regs_st *rq_regs,
- const display_rq_params_st rq_param)
+ const display_rq_params_st *rq_param)
{
unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024;
unsigned int detile_buf_plane1_addr = 0;
- extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_l), rq_param.sizing.rq_l);
+ extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_l), &rq_param->sizing.rq_l);
- rq_regs->rq_regs_l.pte_row_height_linear = dml_floor(dml_log2(rq_param.dlg.rq_l.dpte_row_height),
+ rq_regs->rq_regs_l.pte_row_height_linear = dml_floor(dml_log2(rq_param->dlg.rq_l.dpte_row_height),
1) - 3;
- if (rq_param.yuv420) {
- extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_c), rq_param.sizing.rq_c);
- rq_regs->rq_regs_c.pte_row_height_linear = dml_floor(dml_log2(rq_param.dlg.rq_c.dpte_row_height),
+ if (rq_param->yuv420) {
+ extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_c), &rq_param->sizing.rq_c);
+ rq_regs->rq_regs_c.pte_row_height_linear = dml_floor(dml_log2(rq_param->dlg.rq_c.dpte_row_height),
1) - 3;
}
- rq_regs->rq_regs_l.swath_height = dml_log2(rq_param.dlg.rq_l.swath_height);
- rq_regs->rq_regs_c.swath_height = dml_log2(rq_param.dlg.rq_c.swath_height);
+ rq_regs->rq_regs_l.swath_height = dml_log2(rq_param->dlg.rq_l.swath_height);
+ rq_regs->rq_regs_c.swath_height = dml_log2(rq_param->dlg.rq_c.swath_height);
// TODO: take the max between luma, chroma chunk size?
// okay for now, as we are setting chunk_bytes to 8kb anyways
- if (rq_param.sizing.rq_l.chunk_bytes >= 32 * 1024) { //32kb
+ if (rq_param->sizing.rq_l.chunk_bytes >= 32 * 1024) { //32kb
rq_regs->drq_expansion_mode = 0;
} else {
rq_regs->drq_expansion_mode = 2;
@@ -218,9 +218,9 @@ static void extract_rq_regs(struct display_mode_lib *mode_lib,
rq_regs->mrq_expansion_mode = 1;
rq_regs->crq_expansion_mode = 1;
- if (rq_param.yuv420) {
- if ((double) rq_param.misc.rq_l.stored_swath_bytes
- / (double) rq_param.misc.rq_c.stored_swath_bytes <= 1.5) {
+ if (rq_param->yuv420) {
+ if ((double) rq_param->misc.rq_l.stored_swath_bytes
+ / (double) rq_param->misc.rq_c.stored_swath_bytes <= 1.5) {
detile_buf_plane1_addr = (detile_buf_size_in_bytes / 2.0 / 64.0); // half to chroma
} else {
detile_buf_plane1_addr = dml_round_to_multiple((unsigned int) ((2.0 * detile_buf_size_in_bytes) / 3.0),
@@ -233,7 +233,7 @@ static void extract_rq_regs(struct display_mode_lib *mode_lib,
static void handle_det_buf_split(struct display_mode_lib *mode_lib,
display_rq_params_st *rq_param,
- const display_pipe_source_params_st pipe_src_param)
+ const display_pipe_source_params_st *pipe_src_param)
{
unsigned int total_swath_bytes = 0;
unsigned int swath_bytes_l = 0;
@@ -242,8 +242,8 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib,
unsigned int full_swath_bytes_packed_c = 0;
bool req128_l = false;
bool req128_c = false;
- bool surf_linear = (pipe_src_param.sw_mode == dm_sw_linear);
- bool surf_vert = (pipe_src_param.source_scan == dm_vert);
+ bool surf_linear = (pipe_src_param->sw_mode == dm_sw_linear);
+ bool surf_vert = (pipe_src_param->source_scan == dm_vert);
unsigned int log2_swath_height_l = 0;
unsigned int log2_swath_height_c = 0;
unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024;
@@ -685,7 +685,7 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib,
display_data_rq_sizing_params_st *rq_sizing_param,
display_data_rq_dlg_params_st *rq_dlg_param,
display_data_rq_misc_params_st *rq_misc_param,
- const display_pipe_source_params_st pipe_src_param,
+ const display_pipe_source_params_st *pipe_src_param,
bool is_chroma)
{
bool mode_422 = false;
@@ -697,15 +697,15 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib,
// TODO check if ppe apply for both luma and chroma in 422 case
if (is_chroma) {
- vp_width = pipe_src_param.viewport_width_c / ppe;
- vp_height = pipe_src_param.viewport_height_c;
- data_pitch = pipe_src_param.data_pitch_c;
- meta_pitch = pipe_src_param.meta_pitch_c;
+ vp_width = pipe_src_param->viewport_width_c / ppe;
+ vp_height = pipe_src_param->viewport_height_c;
+ data_pitch = pipe_src_param->data_pitch_c;
+ meta_pitch = pipe_src_param->meta_pitch_c;
} else {
- vp_width = pipe_src_param.viewport_width / ppe;
- vp_height = pipe_src_param.viewport_height;
- data_pitch = pipe_src_param.data_pitch;
- meta_pitch = pipe_src_param.meta_pitch;
+ vp_width = pipe_src_param->viewport_width / ppe;
+ vp_height = pipe_src_param->viewport_height;
+ data_pitch = pipe_src_param->data_pitch;
+ meta_pitch = pipe_src_param->meta_pitch;
}
rq_sizing_param->chunk_bytes = 8192;
@@ -728,21 +728,21 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib,
vp_height,
data_pitch,
meta_pitch,
- pipe_src_param.source_format,
- pipe_src_param.sw_mode,
- pipe_src_param.macro_tile_size,
- pipe_src_param.source_scan,
+ pipe_src_param->source_format,
+ pipe_src_param->sw_mode,
+ pipe_src_param->macro_tile_size,
+ pipe_src_param->source_scan,
is_chroma);
}
static void dml20v2_rq_dlg_get_rq_params(struct display_mode_lib *mode_lib,
display_rq_params_st *rq_param,
- const display_pipe_source_params_st pipe_src_param)
+ const display_pipe_source_params_st *pipe_src_param)
{
// get param for luma surface
- rq_param->yuv420 = pipe_src_param.source_format == dm_420_8
- || pipe_src_param.source_format == dm_420_10;
- rq_param->yuv420_10bpc = pipe_src_param.source_format == dm_420_10;
+ rq_param->yuv420 = pipe_src_param->source_format == dm_420_8
+ || pipe_src_param->source_format == dm_420_10;
+ rq_param->yuv420_10bpc = pipe_src_param->source_format == dm_420_10;
get_surf_rq_param(mode_lib,
&(rq_param->sizing.rq_l),
@@ -751,7 +751,7 @@ static void dml20v2_rq_dlg_get_rq_params(struct display_mode_lib *mode_lib,
pipe_src_param,
0);
- if (is_dual_plane((enum source_format_class)(pipe_src_param.source_format))) {
+ if (is_dual_plane((enum source_format_class)(pipe_src_param->source_format))) {
// get param for chroma surface
get_surf_rq_param(mode_lib,
&(rq_param->sizing.rq_c),
@@ -763,20 +763,20 @@ static void dml20v2_rq_dlg_get_rq_params(struct display_mode_lib *mode_lib,
// calculate how to split the det buffer space between luma and chroma
handle_det_buf_split(mode_lib, rq_param, pipe_src_param);
- print__rq_params_st(mode_lib, *rq_param);
+ print__rq_params_st(mode_lib, rq_param);
}
void dml20v2_rq_dlg_get_rq_reg(struct display_mode_lib *mode_lib,
display_rq_regs_st *rq_regs,
- const display_pipe_params_st pipe_param)
+ const display_pipe_params_st *pipe_param)
{
display_rq_params_st rq_param = {0};
memset(rq_regs, 0, sizeof(*rq_regs));
- dml20v2_rq_dlg_get_rq_params(mode_lib, &rq_param, pipe_param.src);
- extract_rq_regs(mode_lib, rq_regs, rq_param);
+ dml20v2_rq_dlg_get_rq_params(mode_lib, &rq_param, &pipe_param->src);
+ extract_rq_regs(mode_lib, rq_regs, &rq_param);
- print__rq_regs_st(mode_lib, *rq_regs);
+ print__rq_regs_st(mode_lib, rq_regs);
}
// Note: currently taken in as is.
@@ -787,8 +787,8 @@ static void dml20v2_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
const unsigned int pipe_idx,
display_dlg_regs_st *disp_dlg_regs,
display_ttu_regs_st *disp_ttu_regs,
- const display_rq_dlg_params_st rq_dlg_param,
- const display_dlg_sys_params_st dlg_sys_param,
+ const display_rq_dlg_params_st *rq_dlg_param,
+ const display_dlg_sys_params_st *dlg_sys_param,
const bool cstate_en,
const bool pstate_en)
{
@@ -935,7 +935,7 @@ static void dml20v2_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
* (double) ref_freq_to_pix_freq);
ASSERT(disp_dlg_regs->refcyc_h_blank_end < (unsigned int) dml_pow(2, 13));
- min_dcfclk_mhz = dlg_sys_param.deepsleep_dcfclk_mhz;
+ min_dcfclk_mhz = dlg_sys_param->deepsleep_dcfclk_mhz;
t_calc_us = get_tcalc(mode_lib, e2e_pipe_param, num_pipes);
min_ttu_vblank = get_min_ttu_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
@@ -996,20 +996,20 @@ static void dml20v2_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
// vinit_bot_l = scl.vinit_bot;
// vinit_bot_c = scl.vinit_bot_c;
-// unsigned int swath_height_l = rq_dlg_param.rq_l.swath_height;
- swath_width_ub_l = rq_dlg_param.rq_l.swath_width_ub;
-// unsigned int dpte_bytes_per_row_ub_l = rq_dlg_param.rq_l.dpte_bytes_per_row_ub;
- dpte_groups_per_row_ub_l = rq_dlg_param.rq_l.dpte_groups_per_row_ub;
-// unsigned int meta_pte_bytes_per_frame_ub_l = rq_dlg_param.rq_l.meta_pte_bytes_per_frame_ub;
-// unsigned int meta_bytes_per_row_ub_l = rq_dlg_param.rq_l.meta_bytes_per_row_ub;
+// unsigned int swath_height_l = rq_dlg_param->rq_l.swath_height;
+ swath_width_ub_l = rq_dlg_param->rq_l.swath_width_ub;
+// unsigned int dpte_bytes_per_row_ub_l = rq_dlg_param->rq_l.dpte_bytes_per_row_ub;
+ dpte_groups_per_row_ub_l = rq_dlg_param->rq_l.dpte_groups_per_row_ub;
+// unsigned int meta_pte_bytes_per_frame_ub_l = rq_dlg_param->rq_l.meta_pte_bytes_per_frame_ub;
+// unsigned int meta_bytes_per_row_ub_l = rq_dlg_param->rq_l.meta_bytes_per_row_ub;
-// unsigned int swath_height_c = rq_dlg_param.rq_c.swath_height;
- swath_width_ub_c = rq_dlg_param.rq_c.swath_width_ub;
- // dpte_bytes_per_row_ub_c = rq_dlg_param.rq_c.dpte_bytes_per_row_ub;
- dpte_groups_per_row_ub_c = rq_dlg_param.rq_c.dpte_groups_per_row_ub;
+// unsigned int swath_height_c = rq_dlg_param->rq_c.swath_height;
+ swath_width_ub_c = rq_dlg_param->rq_c.swath_width_ub;
+ // dpte_bytes_per_row_ub_c = rq_dlg_param->rq_c.dpte_bytes_per_row_ub;
+ dpte_groups_per_row_ub_c = rq_dlg_param->rq_c.dpte_groups_per_row_ub;
- meta_chunks_per_row_ub_l = rq_dlg_param.rq_l.meta_chunks_per_row_ub;
- meta_chunks_per_row_ub_c = rq_dlg_param.rq_c.meta_chunks_per_row_ub;
+ meta_chunks_per_row_ub_l = rq_dlg_param->rq_l.meta_chunks_per_row_ub;
+ meta_chunks_per_row_ub_c = rq_dlg_param->rq_c.meta_chunks_per_row_ub;
vupdate_offset = dst->vupdate_offset;
vupdate_width = dst->vupdate_width;
vready_offset = dst->vready_offset;
@@ -1138,16 +1138,16 @@ static void dml20v2_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
dml_print("DML_DLG: %s: vratio_pre_c=%3.2f\n", __func__, vratio_pre_c);
// Active
- req_per_swath_ub_l = rq_dlg_param.rq_l.req_per_swath_ub;
- req_per_swath_ub_c = rq_dlg_param.rq_c.req_per_swath_ub;
- meta_row_height_l = rq_dlg_param.rq_l.meta_row_height;
- meta_row_height_c = rq_dlg_param.rq_c.meta_row_height;
+ req_per_swath_ub_l = rq_dlg_param->rq_l.req_per_swath_ub;
+ req_per_swath_ub_c = rq_dlg_param->rq_c.req_per_swath_ub;
+ meta_row_height_l = rq_dlg_param->rq_l.meta_row_height;
+ meta_row_height_c = rq_dlg_param->rq_c.meta_row_height;
swath_width_pixels_ub_l = 0;
swath_width_pixels_ub_c = 0;
scaler_rec_in_width_l = 0;
scaler_rec_in_width_c = 0;
- dpte_row_height_l = rq_dlg_param.rq_l.dpte_row_height;
- dpte_row_height_c = rq_dlg_param.rq_c.dpte_row_height;
+ dpte_row_height_l = rq_dlg_param->rq_l.dpte_row_height;
+ dpte_row_height_c = rq_dlg_param->rq_c.dpte_row_height;
if (mode_422) {
swath_width_pixels_ub_l = swath_width_ub_l * 2; // *2 for 2 pixel per element
@@ -1543,14 +1543,14 @@ static void dml20v2_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
disp_ttu_regs->min_ttu_vblank = min_ttu_vblank * refclk_freq_in_mhz;
ASSERT(disp_ttu_regs->min_ttu_vblank < dml_pow(2, 24));
- print__ttu_regs_st(mode_lib, *disp_ttu_regs);
- print__dlg_regs_st(mode_lib, *disp_dlg_regs);
+ print__ttu_regs_st(mode_lib, disp_ttu_regs);
+ print__dlg_regs_st(mode_lib, disp_dlg_regs);
}
void dml20v2_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib,
display_dlg_regs_st *dlg_regs,
display_ttu_regs_st *ttu_regs,
- display_e2e_pipe_params_st *e2e_pipe_param,
+ const display_e2e_pipe_params_st *e2e_pipe_param,
const unsigned int num_pipes,
const unsigned int pipe_idx,
const bool cstate_en,
@@ -1580,20 +1580,20 @@ void dml20v2_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib,
dlg_sys_param.t_srx_delay_us = mode_lib->ip.dcfclk_cstate_latency
/ dlg_sys_param.deepsleep_dcfclk_mhz; // TODO: Deprecated
- print__dlg_sys_params_st(mode_lib, dlg_sys_param);
+ print__dlg_sys_params_st(mode_lib, &dlg_sys_param);
// system parameter calculation done
dml_print("DML_DLG: Calculation for pipe[%d] start\n\n", pipe_idx);
- dml20v2_rq_dlg_get_rq_params(mode_lib, &rq_param, e2e_pipe_param[pipe_idx].pipe.src);
+ dml20v2_rq_dlg_get_rq_params(mode_lib, &rq_param, &e2e_pipe_param[pipe_idx].pipe.src);
dml20v2_rq_dlg_get_dlg_params(mode_lib,
e2e_pipe_param,
num_pipes,
pipe_idx,
dlg_regs,
ttu_regs,
- rq_param.dlg,
- dlg_sys_param,
+ &rq_param.dlg,
+ &dlg_sys_param,
cstate_en,
pstate_en);
dml_print("DML_DLG: Calculation for pipe[%d] end\n", pipe_idx);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.h b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.h
index 27cf8bed9376..2b4e46ea1c3d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.h
@@ -43,7 +43,7 @@ struct display_mode_lib;
void dml20v2_rq_dlg_get_rq_reg(
struct display_mode_lib *mode_lib,
display_rq_regs_st *rq_regs,
- const display_pipe_params_st pipe_param);
+ const display_pipe_params_st *pipe_param);
// Function: dml_rq_dlg_get_dlg_reg
@@ -61,7 +61,7 @@ void dml20v2_rq_dlg_get_dlg_reg(
struct display_mode_lib *mode_lib,
display_dlg_regs_st *dlg_regs,
display_ttu_regs_st *ttu_regs,
- display_e2e_pipe_params_st *e2e_pipe_param,
+ const display_e2e_pipe_params_st *e2e_pipe_param,
const unsigned int num_pipes,
const unsigned int pipe_idx,
const bool cstate_en,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
index 4136eb8256cb..8a7485e21d53 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c
@@ -3394,6 +3394,127 @@ static unsigned int TruncToValidBPP(
}
}
+
+static noinline void CalculatePrefetchSchedulePerPlane(
+ struct display_mode_lib *mode_lib,
+ int i,
+ unsigned j,
+ unsigned k)
+{
+ struct vba_vars_st *locals = &mode_lib->vba;
+ Pipe myPipe;
+ HostVM myHostVM;
+
+ if (mode_lib->vba.XFCEnabled[k] == true) {
+ mode_lib->vba.XFCRemoteSurfaceFlipDelay =
+ CalculateRemoteSurfaceFlipDelay(
+ mode_lib,
+ mode_lib->vba.VRatio[k],
+ locals->SwathWidthYThisState[k],
+ dml_ceil(locals->BytePerPixelInDETY[k], 1.0),
+ mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k],
+ mode_lib->vba.XFCTSlvVupdateOffset,
+ mode_lib->vba.XFCTSlvVupdateWidth,
+ mode_lib->vba.XFCTSlvVreadyOffset,
+ mode_lib->vba.XFCXBUFLatencyTolerance,
+ mode_lib->vba.XFCFillBWOverhead,
+ mode_lib->vba.XFCSlvChunkSize,
+ mode_lib->vba.XFCBusTransportTime,
+ mode_lib->vba.TimeCalc,
+ mode_lib->vba.TWait,
+ &mode_lib->vba.SrcActiveDrainRate,
+ &mode_lib->vba.TInitXFill,
+ &mode_lib->vba.TslvChk);
+ } else {
+ mode_lib->vba.XFCRemoteSurfaceFlipDelay = 0.0;
+ }
+
+ myPipe.DPPCLK = locals->RequiredDPPCLK[i][j][k];
+ myPipe.DISPCLK = locals->RequiredDISPCLK[i][j];
+ myPipe.PixelClock = mode_lib->vba.PixelClock[k];
+ myPipe.DCFCLKDeepSleep = mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0];
+ myPipe.DPPPerPlane = locals->NoOfDPP[i][j][k];
+ myPipe.ScalerEnabled = mode_lib->vba.ScalerEnabled[k];
+ myPipe.SourceScan = mode_lib->vba.SourceScan[k];
+ myPipe.BlockWidth256BytesY = locals->Read256BlockWidthY[k];
+ myPipe.BlockHeight256BytesY = locals->Read256BlockHeightY[k];
+ myPipe.BlockWidth256BytesC = locals->Read256BlockWidthC[k];
+ myPipe.BlockHeight256BytesC = locals->Read256BlockHeightC[k];
+ myPipe.InterlaceEnable = mode_lib->vba.Interlace[k];
+ myPipe.NumberOfCursors = mode_lib->vba.NumberOfCursors[k];
+ myPipe.VBlank = mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k];
+ myPipe.HTotal = mode_lib->vba.HTotal[k];
+
+
+ myHostVM.Enable = mode_lib->vba.HostVMEnable;
+ myHostVM.MaxPageTableLevels = mode_lib->vba.HostVMMaxPageTableLevels;
+ myHostVM.CachedPageTableLevels = mode_lib->vba.HostVMCachedPageTableLevels;
+
+
+ mode_lib->vba.IsErrorResult[i][j][k] = CalculatePrefetchSchedule(
+ mode_lib,
+ mode_lib->vba.PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
+ mode_lib->vba.PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
+ &myPipe,
+ locals->DSCDelayPerState[i][k],
+ mode_lib->vba.DPPCLKDelaySubtotal,
+ mode_lib->vba.DPPCLKDelaySCL,
+ mode_lib->vba.DPPCLKDelaySCLLBOnly,
+ mode_lib->vba.DPPCLKDelayCNVCFormater,
+ mode_lib->vba.DPPCLKDelayCNVCCursor,
+ mode_lib->vba.DISPCLKDelaySubtotal,
+ locals->SwathWidthYThisState[k] / mode_lib->vba.HRatio[k],
+ mode_lib->vba.OutputFormat[k],
+ mode_lib->vba.MaxInterDCNTileRepeaters,
+ dml_min(mode_lib->vba.MaxVStartup, locals->MaximumVStartup[0][0][k]),
+ locals->MaximumVStartup[0][0][k],
+ mode_lib->vba.GPUVMMaxPageTableLevels,
+ mode_lib->vba.GPUVMEnable,
+ &myHostVM,
+ mode_lib->vba.DynamicMetadataEnable[k],
+ mode_lib->vba.DynamicMetadataLinesBeforeActiveRequired[k],
+ mode_lib->vba.DynamicMetadataTransmittedBytes[k],
+ mode_lib->vba.DCCEnable[k],
+ mode_lib->vba.UrgentLatency,
+ mode_lib->vba.ExtraLatency,
+ mode_lib->vba.TimeCalc,
+ locals->PDEAndMetaPTEBytesPerFrame[0][0][k],
+ locals->MetaRowBytes[0][0][k],
+ locals->DPTEBytesPerRow[0][0][k],
+ locals->PrefetchLinesY[0][0][k],
+ locals->SwathWidthYThisState[k],
+ locals->BytePerPixelInDETY[k],
+ locals->PrefillY[k],
+ locals->MaxNumSwY[k],
+ locals->PrefetchLinesC[0][0][k],
+ locals->BytePerPixelInDETC[k],
+ locals->PrefillC[k],
+ locals->MaxNumSwC[k],
+ locals->SwathHeightYThisState[k],
+ locals->SwathHeightCThisState[k],
+ mode_lib->vba.TWait,
+ mode_lib->vba.XFCEnabled[k],
+ mode_lib->vba.XFCRemoteSurfaceFlipDelay,
+ mode_lib->vba.ProgressiveToInterlaceUnitInOPP,
+ &locals->dst_x_after_scaler,
+ &locals->dst_y_after_scaler,
+ &locals->LineTimesForPrefetch[k],
+ &locals->PrefetchBW[k],
+ &locals->LinesForMetaPTE[k],
+ &locals->LinesForMetaAndDPTERow[k],
+ &locals->VRatioPreY[i][j][k],
+ &locals->VRatioPreC[i][j][k],
+ &locals->RequiredPrefetchPixelDataBWLuma[i][j][k],
+ &locals->RequiredPrefetchPixelDataBWChroma[i][j][k],
+ &locals->VStartupRequiredWhenNotEnoughTimeForDynamicMetadata,
+ &locals->Tno_bw[k],
+ &locals->prefetch_vmrow_bw[k],
+ locals->swath_width_luma_ub,
+ locals->swath_width_chroma_ub,
+ &mode_lib->vba.VUpdateOffsetPix[k],
+ &mode_lib->vba.VUpdateWidthPix[k],
+ &mode_lib->vba.VReadyOffsetPix[k]);
+}
void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_lib)
{
struct vba_vars_st *locals = &mode_lib->vba;
@@ -4676,120 +4797,9 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
mode_lib->vba.DRAMClockChangeLatency,
mode_lib->vba.UrgentLatency,
mode_lib->vba.SREnterPlusExitTime);
- for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
- Pipe myPipe;
- HostVM myHostVM;
-
- if (mode_lib->vba.XFCEnabled[k] == true) {
- mode_lib->vba.XFCRemoteSurfaceFlipDelay =
- CalculateRemoteSurfaceFlipDelay(
- mode_lib,
- mode_lib->vba.VRatio[k],
- locals->SwathWidthYThisState[k],
- dml_ceil(locals->BytePerPixelInDETY[k], 1.0),
- mode_lib->vba.HTotal[k] / mode_lib->vba.PixelClock[k],
- mode_lib->vba.XFCTSlvVupdateOffset,
- mode_lib->vba.XFCTSlvVupdateWidth,
- mode_lib->vba.XFCTSlvVreadyOffset,
- mode_lib->vba.XFCXBUFLatencyTolerance,
- mode_lib->vba.XFCFillBWOverhead,
- mode_lib->vba.XFCSlvChunkSize,
- mode_lib->vba.XFCBusTransportTime,
- mode_lib->vba.TimeCalc,
- mode_lib->vba.TWait,
- &mode_lib->vba.SrcActiveDrainRate,
- &mode_lib->vba.TInitXFill,
- &mode_lib->vba.TslvChk);
- } else {
- mode_lib->vba.XFCRemoteSurfaceFlipDelay = 0.0;
- }
+ for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++)
+ CalculatePrefetchSchedulePerPlane(mode_lib, i, j, k);
- myPipe.DPPCLK = locals->RequiredDPPCLK[i][j][k];
- myPipe.DISPCLK = locals->RequiredDISPCLK[i][j];
- myPipe.PixelClock = mode_lib->vba.PixelClock[k];
- myPipe.DCFCLKDeepSleep = mode_lib->vba.ProjectedDCFCLKDeepSleep[0][0];
- myPipe.DPPPerPlane = locals->NoOfDPP[i][j][k];
- myPipe.ScalerEnabled = mode_lib->vba.ScalerEnabled[k];
- myPipe.SourceScan = mode_lib->vba.SourceScan[k];
- myPipe.BlockWidth256BytesY = locals->Read256BlockWidthY[k];
- myPipe.BlockHeight256BytesY = locals->Read256BlockHeightY[k];
- myPipe.BlockWidth256BytesC = locals->Read256BlockWidthC[k];
- myPipe.BlockHeight256BytesC = locals->Read256BlockHeightC[k];
- myPipe.InterlaceEnable = mode_lib->vba.Interlace[k];
- myPipe.NumberOfCursors = mode_lib->vba.NumberOfCursors[k];
- myPipe.VBlank = mode_lib->vba.VTotal[k] - mode_lib->vba.VActive[k];
- myPipe.HTotal = mode_lib->vba.HTotal[k];
-
-
- myHostVM.Enable = mode_lib->vba.HostVMEnable;
- myHostVM.MaxPageTableLevels = mode_lib->vba.HostVMMaxPageTableLevels;
- myHostVM.CachedPageTableLevels = mode_lib->vba.HostVMCachedPageTableLevels;
-
-
- mode_lib->vba.IsErrorResult[i][j][k] = CalculatePrefetchSchedule(
- mode_lib,
- mode_lib->vba.PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyPixelMixedWithVMData,
- mode_lib->vba.PercentOfIdealDRAMFabricAndSDPPortBWReceivedAfterUrgLatencyVMDataOnly,
- &myPipe,
- locals->DSCDelayPerState[i][k],
- mode_lib->vba.DPPCLKDelaySubtotal,
- mode_lib->vba.DPPCLKDelaySCL,
- mode_lib->vba.DPPCLKDelaySCLLBOnly,
- mode_lib->vba.DPPCLKDelayCNVCFormater,
- mode_lib->vba.DPPCLKDelayCNVCCursor,
- mode_lib->vba.DISPCLKDelaySubtotal,
- locals->SwathWidthYThisState[k] / mode_lib->vba.HRatio[k],
- mode_lib->vba.OutputFormat[k],
- mode_lib->vba.MaxInterDCNTileRepeaters,
- dml_min(mode_lib->vba.MaxVStartup, locals->MaximumVStartup[0][0][k]),
- locals->MaximumVStartup[0][0][k],
- mode_lib->vba.GPUVMMaxPageTableLevels,
- mode_lib->vba.GPUVMEnable,
- &myHostVM,
- mode_lib->vba.DynamicMetadataEnable[k],
- mode_lib->vba.DynamicMetadataLinesBeforeActiveRequired[k],
- mode_lib->vba.DynamicMetadataTransmittedBytes[k],
- mode_lib->vba.DCCEnable[k],
- mode_lib->vba.UrgentLatency,
- mode_lib->vba.ExtraLatency,
- mode_lib->vba.TimeCalc,
- locals->PDEAndMetaPTEBytesPerFrame[0][0][k],
- locals->MetaRowBytes[0][0][k],
- locals->DPTEBytesPerRow[0][0][k],
- locals->PrefetchLinesY[0][0][k],
- locals->SwathWidthYThisState[k],
- locals->BytePerPixelInDETY[k],
- locals->PrefillY[k],
- locals->MaxNumSwY[k],
- locals->PrefetchLinesC[0][0][k],
- locals->BytePerPixelInDETC[k],
- locals->PrefillC[k],
- locals->MaxNumSwC[k],
- locals->SwathHeightYThisState[k],
- locals->SwathHeightCThisState[k],
- mode_lib->vba.TWait,
- mode_lib->vba.XFCEnabled[k],
- mode_lib->vba.XFCRemoteSurfaceFlipDelay,
- mode_lib->vba.ProgressiveToInterlaceUnitInOPP,
- &locals->dst_x_after_scaler,
- &locals->dst_y_after_scaler,
- &locals->LineTimesForPrefetch[k],
- &locals->PrefetchBW[k],
- &locals->LinesForMetaPTE[k],
- &locals->LinesForMetaAndDPTERow[k],
- &locals->VRatioPreY[i][j][k],
- &locals->VRatioPreC[i][j][k],
- &locals->RequiredPrefetchPixelDataBWLuma[i][j][k],
- &locals->RequiredPrefetchPixelDataBWChroma[i][j][k],
- &locals->VStartupRequiredWhenNotEnoughTimeForDynamicMetadata,
- &locals->Tno_bw[k],
- &locals->prefetch_vmrow_bw[k],
- locals->swath_width_luma_ub,
- locals->swath_width_chroma_ub,
- &mode_lib->vba.VUpdateOffsetPix[k],
- &mode_lib->vba.VUpdateWidthPix[k],
- &mode_lib->vba.VReadyOffsetPix[k]);
- }
mode_lib->vba.MaximumReadBandwidthWithoutPrefetch = 0.0;
mode_lib->vba.MaximumReadBandwidthWithPrefetch = 0.0;
for (k = 0; k <= mode_lib->vba.NumberOfActivePlanes - 1; k++) {
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
index 287e31052b30..46c433c0bcb0 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c
@@ -141,55 +141,55 @@ static unsigned int get_blk_size_bytes(const enum source_macro_tile_size tile_si
static void extract_rq_sizing_regs(
struct display_mode_lib *mode_lib,
display_data_rq_regs_st *rq_regs,
- const display_data_rq_sizing_params_st rq_sizing)
+ const display_data_rq_sizing_params_st *rq_sizing)
{
dml_print("DML_DLG: %s: rq_sizing param\n", __func__);
print__data_rq_sizing_params_st(mode_lib, rq_sizing);
- rq_regs->chunk_size = dml_log2(rq_sizing.chunk_bytes) - 10;
+ rq_regs->chunk_size = dml_log2(rq_sizing->chunk_bytes) - 10;
- if (rq_sizing.min_chunk_bytes == 0)
+ if (rq_sizing->min_chunk_bytes == 0)
rq_regs->min_chunk_size = 0;
else
- rq_regs->min_chunk_size = dml_log2(rq_sizing.min_chunk_bytes) - 8 + 1;
+ rq_regs->min_chunk_size = dml_log2(rq_sizing->min_chunk_bytes) - 8 + 1;
- rq_regs->meta_chunk_size = dml_log2(rq_sizing.meta_chunk_bytes) - 10;
- if (rq_sizing.min_meta_chunk_bytes == 0)
+ rq_regs->meta_chunk_size = dml_log2(rq_sizing->meta_chunk_bytes) - 10;
+ if (rq_sizing->min_meta_chunk_bytes == 0)
rq_regs->min_meta_chunk_size = 0;
else
- rq_regs->min_meta_chunk_size = dml_log2(rq_sizing.min_meta_chunk_bytes) - 6 + 1;
+ rq_regs->min_meta_chunk_size = dml_log2(rq_sizing->min_meta_chunk_bytes) - 6 + 1;
- rq_regs->dpte_group_size = dml_log2(rq_sizing.dpte_group_bytes) - 6;
- rq_regs->mpte_group_size = dml_log2(rq_sizing.mpte_group_bytes) - 6;
+ rq_regs->dpte_group_size = dml_log2(rq_sizing->dpte_group_bytes) - 6;
+ rq_regs->mpte_group_size = dml_log2(rq_sizing->mpte_group_bytes) - 6;
}
static void extract_rq_regs(
struct display_mode_lib *mode_lib,
display_rq_regs_st *rq_regs,
- const display_rq_params_st rq_param)
+ const display_rq_params_st *rq_param)
{
unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024;
unsigned int detile_buf_plane1_addr = 0;
- extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_l), rq_param.sizing.rq_l);
+ extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_l), &rq_param->sizing.rq_l);
rq_regs->rq_regs_l.pte_row_height_linear = dml_floor(
- dml_log2(rq_param.dlg.rq_l.dpte_row_height),
+ dml_log2(rq_param->dlg.rq_l.dpte_row_height),
1) - 3;
- if (rq_param.yuv420) {
- extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_c), rq_param.sizing.rq_c);
+ if (rq_param->yuv420) {
+ extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_c), &rq_param->sizing.rq_c);
rq_regs->rq_regs_c.pte_row_height_linear = dml_floor(
- dml_log2(rq_param.dlg.rq_c.dpte_row_height),
+ dml_log2(rq_param->dlg.rq_c.dpte_row_height),
1) - 3;
}
- rq_regs->rq_regs_l.swath_height = dml_log2(rq_param.dlg.rq_l.swath_height);
- rq_regs->rq_regs_c.swath_height = dml_log2(rq_param.dlg.rq_c.swath_height);
+ rq_regs->rq_regs_l.swath_height = dml_log2(rq_param->dlg.rq_l.swath_height);
+ rq_regs->rq_regs_c.swath_height = dml_log2(rq_param->dlg.rq_c.swath_height);
// FIXME: take the max between luma, chroma chunk size?
// okay for now, as we are setting chunk_bytes to 8kb anyways
- if (rq_param.sizing.rq_l.chunk_bytes >= 32 * 1024) { //32kb
+ if (rq_param->sizing.rq_l.chunk_bytes >= 32 * 1024) { //32kb
rq_regs->drq_expansion_mode = 0;
} else {
rq_regs->drq_expansion_mode = 2;
@@ -198,9 +198,9 @@ static void extract_rq_regs(
rq_regs->mrq_expansion_mode = 1;
rq_regs->crq_expansion_mode = 1;
- if (rq_param.yuv420) {
- if ((double) rq_param.misc.rq_l.stored_swath_bytes
- / (double) rq_param.misc.rq_c.stored_swath_bytes <= 1.5) {
+ if (rq_param->yuv420) {
+ if ((double) rq_param->misc.rq_l.stored_swath_bytes
+ / (double) rq_param->misc.rq_c.stored_swath_bytes <= 1.5) {
detile_buf_plane1_addr = (detile_buf_size_in_bytes / 2.0 / 64.0); // half to chroma
} else {
detile_buf_plane1_addr = dml_round_to_multiple(
@@ -215,7 +215,7 @@ static void extract_rq_regs(
static void handle_det_buf_split(
struct display_mode_lib *mode_lib,
display_rq_params_st *rq_param,
- const display_pipe_source_params_st pipe_src_param)
+ const display_pipe_source_params_st *pipe_src_param)
{
unsigned int total_swath_bytes = 0;
unsigned int swath_bytes_l = 0;
@@ -224,8 +224,8 @@ static void handle_det_buf_split(
unsigned int full_swath_bytes_packed_c = 0;
bool req128_l = false;
bool req128_c = false;
- bool surf_linear = (pipe_src_param.sw_mode == dm_sw_linear);
- bool surf_vert = (pipe_src_param.source_scan == dm_vert);
+ bool surf_linear = (pipe_src_param->sw_mode == dm_sw_linear);
+ bool surf_vert = (pipe_src_param->source_scan == dm_vert);
unsigned int log2_swath_height_l = 0;
unsigned int log2_swath_height_c = 0;
unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024;
@@ -694,7 +694,7 @@ static void get_surf_rq_param(
display_data_rq_sizing_params_st *rq_sizing_param,
display_data_rq_dlg_params_st *rq_dlg_param,
display_data_rq_misc_params_st *rq_misc_param,
- const display_pipe_params_st pipe_param,
+ const display_pipe_params_st *pipe_param,
bool is_chroma)
{
bool mode_422 = false;
@@ -706,30 +706,30 @@ static void get_surf_rq_param(
// FIXME check if ppe apply for both luma and chroma in 422 case
if (is_chroma) {
- vp_width = pipe_param.src.viewport_width_c / ppe;
- vp_height = pipe_param.src.viewport_height_c;
- data_pitch = pipe_param.src.data_pitch_c;
- meta_pitch = pipe_param.src.meta_pitch_c;
+ vp_width = pipe_param->src.viewport_width_c / ppe;
+ vp_height = pipe_param->src.viewport_height_c;
+ data_pitch = pipe_param->src.data_pitch_c;
+ meta_pitch = pipe_param->src.meta_pitch_c;
} else {
- vp_width = pipe_param.src.viewport_width / ppe;
- vp_height = pipe_param.src.viewport_height;
- data_pitch = pipe_param.src.data_pitch;
- meta_pitch = pipe_param.src.meta_pitch;
+ vp_width = pipe_param->src.viewport_width / ppe;
+ vp_height = pipe_param->src.viewport_height;
+ data_pitch = pipe_param->src.data_pitch;
+ meta_pitch = pipe_param->src.meta_pitch;
}
- if (pipe_param.dest.odm_combine) {
+ if (pipe_param->dest.odm_combine) {
unsigned int access_dir;
unsigned int full_src_vp_width;
unsigned int hactive_half;
unsigned int src_hactive_half;
- access_dir = (pipe_param.src.source_scan == dm_vert); // vp access direction: horizontal or vertical accessed
- hactive_half = pipe_param.dest.hactive / 2;
+ access_dir = (pipe_param->src.source_scan == dm_vert); // vp access direction: horizontal or vertical accessed
+ hactive_half = pipe_param->dest.hactive / 2;
if (is_chroma) {
- full_src_vp_width = pipe_param.scale_ratio_depth.hscl_ratio_c * pipe_param.dest.full_recout_width;
- src_hactive_half = pipe_param.scale_ratio_depth.hscl_ratio_c * hactive_half;
+ full_src_vp_width = pipe_param->scale_ratio_depth.hscl_ratio_c * pipe_param->dest.full_recout_width;
+ src_hactive_half = pipe_param->scale_ratio_depth.hscl_ratio_c * hactive_half;
} else {
- full_src_vp_width = pipe_param.scale_ratio_depth.hscl_ratio * pipe_param.dest.full_recout_width;
- src_hactive_half = pipe_param.scale_ratio_depth.hscl_ratio * hactive_half;
+ full_src_vp_width = pipe_param->scale_ratio_depth.hscl_ratio * pipe_param->dest.full_recout_width;
+ src_hactive_half = pipe_param->scale_ratio_depth.hscl_ratio * hactive_half;
}
if (access_dir == 0) {
@@ -754,7 +754,7 @@ static void get_surf_rq_param(
rq_sizing_param->meta_chunk_bytes = 2048;
rq_sizing_param->min_meta_chunk_bytes = 256;
- if (pipe_param.src.hostvm)
+ if (pipe_param->src.hostvm)
rq_sizing_param->mpte_group_bytes = 512;
else
rq_sizing_param->mpte_group_bytes = 2048;
@@ -768,23 +768,23 @@ static void get_surf_rq_param(
vp_height,
data_pitch,
meta_pitch,
- pipe_param.src.source_format,
- pipe_param.src.sw_mode,
- pipe_param.src.macro_tile_size,
- pipe_param.src.source_scan,
- pipe_param.src.hostvm,
+ pipe_param->src.source_format,
+ pipe_param->src.sw_mode,
+ pipe_param->src.macro_tile_size,
+ pipe_param->src.source_scan,
+ pipe_param->src.hostvm,
is_chroma);
}
static void dml_rq_dlg_get_rq_params(
struct display_mode_lib *mode_lib,
display_rq_params_st *rq_param,
- const display_pipe_params_st pipe_param)
+ const display_pipe_params_st *pipe_param)
{
// get param for luma surface
- rq_param->yuv420 = pipe_param.src.source_format == dm_420_8
- || pipe_param.src.source_format == dm_420_10;
- rq_param->yuv420_10bpc = pipe_param.src.source_format == dm_420_10;
+ rq_param->yuv420 = pipe_param->src.source_format == dm_420_8
+ || pipe_param->src.source_format == dm_420_10;
+ rq_param->yuv420_10bpc = pipe_param->src.source_format == dm_420_10;
get_surf_rq_param(
mode_lib,
@@ -794,7 +794,7 @@ static void dml_rq_dlg_get_rq_params(
pipe_param,
0);
- if (is_dual_plane((enum source_format_class) (pipe_param.src.source_format))) {
+ if (is_dual_plane((enum source_format_class) (pipe_param->src.source_format))) {
// get param for chroma surface
get_surf_rq_param(
mode_lib,
@@ -806,22 +806,22 @@ static void dml_rq_dlg_get_rq_params(
}
// calculate how to split the det buffer space between luma and chroma
- handle_det_buf_split(mode_lib, rq_param, pipe_param.src);
- print__rq_params_st(mode_lib, *rq_param);
+ handle_det_buf_split(mode_lib, rq_param, &pipe_param->src);
+ print__rq_params_st(mode_lib, rq_param);
}
void dml21_rq_dlg_get_rq_reg(
struct display_mode_lib *mode_lib,
display_rq_regs_st *rq_regs,
- const display_pipe_params_st pipe_param)
+ const display_pipe_params_st *pipe_param)
{
display_rq_params_st rq_param = {0};
memset(rq_regs, 0, sizeof(*rq_regs));
dml_rq_dlg_get_rq_params(mode_lib, &rq_param, pipe_param);
- extract_rq_regs(mode_lib, rq_regs, rq_param);
+ extract_rq_regs(mode_lib, rq_regs, &rq_param);
- print__rq_regs_st(mode_lib, *rq_regs);
+ print__rq_regs_st(mode_lib, rq_regs);
}
// Note: currently taken in as is.
@@ -833,8 +833,8 @@ static void dml_rq_dlg_get_dlg_params(
const unsigned int pipe_idx,
display_dlg_regs_st *disp_dlg_regs,
display_ttu_regs_st *disp_ttu_regs,
- const display_rq_dlg_params_st rq_dlg_param,
- const display_dlg_sys_params_st dlg_sys_param,
+ const display_rq_dlg_params_st *rq_dlg_param,
+ const display_dlg_sys_params_st *dlg_sys_param,
const bool cstate_en,
const bool pstate_en)
{
@@ -981,7 +981,7 @@ static void dml_rq_dlg_get_dlg_params(
* (double) ref_freq_to_pix_freq);
ASSERT(disp_dlg_regs->refcyc_h_blank_end < (unsigned int)dml_pow(2, 13));
- min_dcfclk_mhz = dlg_sys_param.deepsleep_dcfclk_mhz;
+ min_dcfclk_mhz = dlg_sys_param->deepsleep_dcfclk_mhz;
t_calc_us = get_tcalc(mode_lib, e2e_pipe_param, num_pipes);
min_ttu_vblank = get_min_ttu_vblank(mode_lib, e2e_pipe_param, num_pipes, pipe_idx);
@@ -1042,13 +1042,13 @@ static void dml_rq_dlg_get_dlg_params(
scl_enable = scl->scl_enable;
line_time_in_us = (htotal / pclk_freq_in_mhz);
- swath_width_ub_l = rq_dlg_param.rq_l.swath_width_ub;
- dpte_groups_per_row_ub_l = rq_dlg_param.rq_l.dpte_groups_per_row_ub;
- swath_width_ub_c = rq_dlg_param.rq_c.swath_width_ub;
- dpte_groups_per_row_ub_c = rq_dlg_param.rq_c.dpte_groups_per_row_ub;
+ swath_width_ub_l = rq_dlg_param->rq_l.swath_width_ub;
+ dpte_groups_per_row_ub_l = rq_dlg_param->rq_l.dpte_groups_per_row_ub;
+ swath_width_ub_c = rq_dlg_param->rq_c.swath_width_ub;
+ dpte_groups_per_row_ub_c = rq_dlg_param->rq_c.dpte_groups_per_row_ub;
- meta_chunks_per_row_ub_l = rq_dlg_param.rq_l.meta_chunks_per_row_ub;
- meta_chunks_per_row_ub_c = rq_dlg_param.rq_c.meta_chunks_per_row_ub;
+ meta_chunks_per_row_ub_l = rq_dlg_param->rq_l.meta_chunks_per_row_ub;
+ meta_chunks_per_row_ub_c = rq_dlg_param->rq_c.meta_chunks_per_row_ub;
vupdate_offset = dst->vupdate_offset;
vupdate_width = dst->vupdate_width;
vready_offset = dst->vready_offset;
@@ -1189,16 +1189,16 @@ static void dml_rq_dlg_get_dlg_params(
dml_print("DML_DLG: %s: vratio_pre_c=%3.2f\n", __func__, vratio_pre_c);
// Active
- req_per_swath_ub_l = rq_dlg_param.rq_l.req_per_swath_ub;
- req_per_swath_ub_c = rq_dlg_param.rq_c.req_per_swath_ub;
- meta_row_height_l = rq_dlg_param.rq_l.meta_row_height;
- meta_row_height_c = rq_dlg_param.rq_c.meta_row_height;
+ req_per_swath_ub_l = rq_dlg_param->rq_l.req_per_swath_ub;
+ req_per_swath_ub_c = rq_dlg_param->rq_c.req_per_swath_ub;
+ meta_row_height_l = rq_dlg_param->rq_l.meta_row_height;
+ meta_row_height_c = rq_dlg_param->rq_c.meta_row_height;
swath_width_pixels_ub_l = 0;
swath_width_pixels_ub_c = 0;
scaler_rec_in_width_l = 0;
scaler_rec_in_width_c = 0;
- dpte_row_height_l = rq_dlg_param.rq_l.dpte_row_height;
- dpte_row_height_c = rq_dlg_param.rq_c.dpte_row_height;
+ dpte_row_height_l = rq_dlg_param->rq_l.dpte_row_height;
+ dpte_row_height_c = rq_dlg_param->rq_c.dpte_row_height;
if (mode_422) {
swath_width_pixels_ub_l = swath_width_ub_l * 2; // *2 for 2 pixel per element
@@ -1650,15 +1650,15 @@ static void dml_rq_dlg_get_dlg_params(
disp_ttu_regs->min_ttu_vblank = min_ttu_vblank * refclk_freq_in_mhz;
ASSERT(disp_ttu_regs->min_ttu_vblank < dml_pow(2, 24));
- print__ttu_regs_st(mode_lib, *disp_ttu_regs);
- print__dlg_regs_st(mode_lib, *disp_dlg_regs);
+ print__ttu_regs_st(mode_lib, disp_ttu_regs);
+ print__dlg_regs_st(mode_lib, disp_dlg_regs);
}
void dml21_rq_dlg_get_dlg_reg(
struct display_mode_lib *mode_lib,
display_dlg_regs_st *dlg_regs,
display_ttu_regs_st *ttu_regs,
- display_e2e_pipe_params_st *e2e_pipe_param,
+ const display_e2e_pipe_params_st *e2e_pipe_param,
const unsigned int num_pipes,
const unsigned int pipe_idx,
const bool cstate_en,
@@ -1691,12 +1691,12 @@ void dml21_rq_dlg_get_dlg_reg(
dlg_sys_param.t_srx_delay_us = mode_lib->ip.dcfclk_cstate_latency
/ dlg_sys_param.deepsleep_dcfclk_mhz; // TODO: Deprecated
- print__dlg_sys_params_st(mode_lib, dlg_sys_param);
+ print__dlg_sys_params_st(mode_lib, &dlg_sys_param);
// system parameter calculation done
dml_print("DML_DLG: Calculation for pipe[%d] start\n\n", pipe_idx);
- dml_rq_dlg_get_rq_params(mode_lib, &rq_param, e2e_pipe_param[pipe_idx].pipe);
+ dml_rq_dlg_get_rq_params(mode_lib, &rq_param, &e2e_pipe_param[pipe_idx].pipe);
dml_rq_dlg_get_dlg_params(
mode_lib,
e2e_pipe_param,
@@ -1704,8 +1704,8 @@ void dml21_rq_dlg_get_dlg_reg(
pipe_idx,
dlg_regs,
ttu_regs,
- rq_param.dlg,
- dlg_sys_param,
+ &rq_param.dlg,
+ &dlg_sys_param,
cstate_en,
pstate_en);
dml_print("DML_DLG: Calculation for pipe[%d] end\n", pipe_idx);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.h b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.h
index e8f7785e3fc6..af6ad0ca9cf8 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.h
@@ -44,7 +44,7 @@ struct display_mode_lib;
void dml21_rq_dlg_get_rq_reg(
struct display_mode_lib *mode_lib,
display_rq_regs_st *rq_regs,
- const display_pipe_params_st pipe_param);
+ const display_pipe_params_st *pipe_param);
// Function: dml_rq_dlg_get_dlg_reg
// Calculate and return DLG and TTU register struct given the system setting
@@ -61,7 +61,7 @@ void dml21_rq_dlg_get_dlg_reg(
struct display_mode_lib *mode_lib,
display_dlg_regs_st *dlg_regs,
display_ttu_regs_st *ttu_regs,
- display_e2e_pipe_params_st *e2e_pipe_param,
+ const display_e2e_pipe_params_st *e2e_pipe_param,
const unsigned int num_pipes,
const unsigned int pipe_idx,
const bool cstate_en,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
index 0d934fae1c3a..aef854270054 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c
@@ -89,52 +89,52 @@ static unsigned int get_blk_size_bytes(const enum source_macro_tile_size tile_si
static void extract_rq_sizing_regs(struct display_mode_lib *mode_lib,
display_data_rq_regs_st *rq_regs,
- const display_data_rq_sizing_params_st rq_sizing)
+ const display_data_rq_sizing_params_st *rq_sizing)
{
dml_print("DML_DLG: %s: rq_sizing param\n", __func__);
print__data_rq_sizing_params_st(mode_lib, rq_sizing);
- rq_regs->chunk_size = dml_log2(rq_sizing.chunk_bytes) - 10;
+ rq_regs->chunk_size = dml_log2(rq_sizing->chunk_bytes) - 10;
- if (rq_sizing.min_chunk_bytes == 0)
+ if (rq_sizing->min_chunk_bytes == 0)
rq_regs->min_chunk_size = 0;
else
- rq_regs->min_chunk_size = dml_log2(rq_sizing.min_chunk_bytes) - 8 + 1;
+ rq_regs->min_chunk_size = dml_log2(rq_sizing->min_chunk_bytes) - 8 + 1;
- rq_regs->meta_chunk_size = dml_log2(rq_sizing.meta_chunk_bytes) - 10;
- if (rq_sizing.min_meta_chunk_bytes == 0)
+ rq_regs->meta_chunk_size = dml_log2(rq_sizing->meta_chunk_bytes) - 10;
+ if (rq_sizing->min_meta_chunk_bytes == 0)
rq_regs->min_meta_chunk_size = 0;
else
- rq_regs->min_meta_chunk_size = dml_log2(rq_sizing.min_meta_chunk_bytes) - 6 + 1;
+ rq_regs->min_meta_chunk_size = dml_log2(rq_sizing->min_meta_chunk_bytes) - 6 + 1;
- rq_regs->dpte_group_size = dml_log2(rq_sizing.dpte_group_bytes) - 6;
- rq_regs->mpte_group_size = dml_log2(rq_sizing.mpte_group_bytes) - 6;
+ rq_regs->dpte_group_size = dml_log2(rq_sizing->dpte_group_bytes) - 6;
+ rq_regs->mpte_group_size = dml_log2(rq_sizing->mpte_group_bytes) - 6;
}
static void extract_rq_regs(struct display_mode_lib *mode_lib,
display_rq_regs_st *rq_regs,
- const display_rq_params_st rq_param)
+ const display_rq_params_st *rq_param)
{
unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024;
unsigned int detile_buf_plane1_addr = 0;
- extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_l), rq_param.sizing.rq_l);
+ extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_l), &rq_param->sizing.rq_l);
- rq_regs->rq_regs_l.pte_row_height_linear = dml_floor(dml_log2(rq_param.dlg.rq_l.dpte_row_height),
+ rq_regs->rq_regs_l.pte_row_height_linear = dml_floor(dml_log2(rq_param->dlg.rq_l.dpte_row_height),
1) - 3;
- if (rq_param.yuv420) {
- extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_c), rq_param.sizing.rq_c);
- rq_regs->rq_regs_c.pte_row_height_linear = dml_floor(dml_log2(rq_param.dlg.rq_c.dpte_row_height),
+ if (rq_param->yuv420) {
+ extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_c), &rq_param->sizing.rq_c);
+ rq_regs->rq_regs_c.pte_row_height_linear = dml_floor(dml_log2(rq_param->dlg.rq_c.dpte_row_height),
1) - 3;
}
- rq_regs->rq_regs_l.swath_height = dml_log2(rq_param.dlg.rq_l.swath_height);
- rq_regs->rq_regs_c.swath_height = dml_log2(rq_param.dlg.rq_c.swath_height);
+ rq_regs->rq_regs_l.swath_height = dml_log2(rq_param->dlg.rq_l.swath_height);
+ rq_regs->rq_regs_c.swath_height = dml_log2(rq_param->dlg.rq_c.swath_height);
// FIXME: take the max between luma, chroma chunk size?
// okay for now, as we are setting chunk_bytes to 8kb anyways
- if (rq_param.sizing.rq_l.chunk_bytes >= 32 * 1024 || (rq_param.yuv420 && rq_param.sizing.rq_c.chunk_bytes >= 32 * 1024)) { //32kb
+ if (rq_param->sizing.rq_l.chunk_bytes >= 32 * 1024 || (rq_param->yuv420 && rq_param->sizing.rq_c.chunk_bytes >= 32 * 1024)) { //32kb
rq_regs->drq_expansion_mode = 0;
} else {
rq_regs->drq_expansion_mode = 2;
@@ -143,9 +143,9 @@ static void extract_rq_regs(struct display_mode_lib *mode_lib,
rq_regs->mrq_expansion_mode = 1;
rq_regs->crq_expansion_mode = 1;
- if (rq_param.yuv420) {
- if ((double)rq_param.misc.rq_l.stored_swath_bytes
- / (double)rq_param.misc.rq_c.stored_swath_bytes <= 1.5) {
+ if (rq_param->yuv420) {
+ if ((double)rq_param->misc.rq_l.stored_swath_bytes
+ / (double)rq_param->misc.rq_c.stored_swath_bytes <= 1.5) {
detile_buf_plane1_addr = (detile_buf_size_in_bytes / 2.0 / 64.0); // half to chroma
} else {
detile_buf_plane1_addr = dml_round_to_multiple((unsigned int)((2.0 * detile_buf_size_in_bytes) / 3.0),
@@ -158,7 +158,7 @@ static void extract_rq_regs(struct display_mode_lib *mode_lib,
static void handle_det_buf_split(struct display_mode_lib *mode_lib,
display_rq_params_st *rq_param,
- const display_pipe_source_params_st pipe_src_param)
+ const display_pipe_source_params_st *pipe_src_param)
{
unsigned int total_swath_bytes = 0;
unsigned int swath_bytes_l = 0;
@@ -167,8 +167,8 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib,
unsigned int full_swath_bytes_packed_c = 0;
bool req128_l = false;
bool req128_c = false;
- bool surf_linear = (pipe_src_param.sw_mode == dm_sw_linear);
- bool surf_vert = (pipe_src_param.source_scan == dm_vert);
+ bool surf_linear = (pipe_src_param->sw_mode == dm_sw_linear);
+ bool surf_vert = (pipe_src_param->source_scan == dm_vert);
unsigned int log2_swath_height_l = 0;
unsigned int log2_swath_height_c = 0;
unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024;
@@ -747,7 +747,7 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib,
display_data_rq_sizing_params_st *rq_sizing_param,
display_data_rq_dlg_params_st *rq_dlg_param,
display_data_rq_misc_params_st *rq_misc_param,
- const display_pipe_params_st pipe_param,
+ const display_pipe_params_st *pipe_param,
bool is_chroma,
bool is_alpha)
{
@@ -761,32 +761,32 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib,
// FIXME check if ppe apply for both luma and chroma in 422 case
if (is_chroma | is_alpha) {
- vp_width = pipe_param.src.viewport_width_c / ppe;
- vp_height = pipe_param.src.viewport_height_c;
- data_pitch = pipe_param.src.data_pitch_c;
- meta_pitch = pipe_param.src.meta_pitch_c;
- surface_height = pipe_param.src.surface_height_y / 2.0;
+ vp_width = pipe_param->src.viewport_width_c / ppe;
+ vp_height = pipe_param->src.viewport_height_c;
+ data_pitch = pipe_param->src.data_pitch_c;
+ meta_pitch = pipe_param->src.meta_pitch_c;
+ surface_height = pipe_param->src.surface_height_y / 2.0;
} else {
- vp_width = pipe_param.src.viewport_width / ppe;
- vp_height = pipe_param.src.viewport_height;
- data_pitch = pipe_param.src.data_pitch;
- meta_pitch = pipe_param.src.meta_pitch;
- surface_height = pipe_param.src.surface_height_y;
+ vp_width = pipe_param->src.viewport_width / ppe;
+ vp_height = pipe_param->src.viewport_height;
+ data_pitch = pipe_param->src.data_pitch;
+ meta_pitch = pipe_param->src.meta_pitch;
+ surface_height = pipe_param->src.surface_height_y;
}
- if (pipe_param.dest.odm_combine) {
+ if (pipe_param->dest.odm_combine) {
unsigned int access_dir = 0;
unsigned int full_src_vp_width = 0;
unsigned int hactive_odm = 0;
unsigned int src_hactive_odm = 0;
- access_dir = (pipe_param.src.source_scan == dm_vert); // vp access direction: horizontal or vertical accessed
- hactive_odm = pipe_param.dest.hactive / ((unsigned int)pipe_param.dest.odm_combine*2);
+ access_dir = (pipe_param->src.source_scan == dm_vert); // vp access direction: horizontal or vertical accessed
+ hactive_odm = pipe_param->dest.hactive / ((unsigned int) pipe_param->dest.odm_combine*2);
if (is_chroma) {
- full_src_vp_width = pipe_param.scale_ratio_depth.hscl_ratio_c * pipe_param.dest.full_recout_width;
- src_hactive_odm = pipe_param.scale_ratio_depth.hscl_ratio_c * hactive_odm;
+ full_src_vp_width = pipe_param->scale_ratio_depth.hscl_ratio_c * pipe_param->dest.full_recout_width;
+ src_hactive_odm = pipe_param->scale_ratio_depth.hscl_ratio_c * hactive_odm;
} else {
- full_src_vp_width = pipe_param.scale_ratio_depth.hscl_ratio * pipe_param.dest.full_recout_width;
- src_hactive_odm = pipe_param.scale_ratio_depth.hscl_ratio * hactive_odm;
+ full_src_vp_width = pipe_param->scale_ratio_depth.hscl_ratio * pipe_param->dest.full_recout_width;
+ src_hactive_odm = pipe_param->scale_ratio_depth.hscl_ratio * hactive_odm;
}
if (access_dir == 0) {
@@ -815,7 +815,7 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib,
rq_sizing_param->meta_chunk_bytes = 2048;
rq_sizing_param->min_meta_chunk_bytes = 256;
- if (pipe_param.src.hostvm)
+ if (pipe_param->src.hostvm)
rq_sizing_param->mpte_group_bytes = 512;
else
rq_sizing_param->mpte_group_bytes = 2048;
@@ -828,28 +828,28 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib,
vp_height,
data_pitch,
meta_pitch,
- pipe_param.src.source_format,
- pipe_param.src.sw_mode,
- pipe_param.src.macro_tile_size,
- pipe_param.src.source_scan,
- pipe_param.src.hostvm,
+ pipe_param->src.source_format,
+ pipe_param->src.sw_mode,
+ pipe_param->src.macro_tile_size,
+ pipe_param->src.source_scan,
+ pipe_param->src.hostvm,
is_chroma,
surface_height);
}
static void dml_rq_dlg_get_rq_params(struct display_mode_lib *mode_lib,
display_rq_params_st *rq_param,
- const display_pipe_params_st pipe_param)
+ const display_pipe_params_st *pipe_param)
{
// get param for luma surface
- rq_param->yuv420 = pipe_param.src.source_format == dm_420_8
- || pipe_param.src.source_format == dm_420_10
- || pipe_param.src.source_format == dm_rgbe_alpha
- || pipe_param.src.source_format == dm_420_12;
+ rq_param->yuv420 = pipe_param->src.source_format == dm_420_8
+ || pipe_param->src.source_format == dm_420_10
+ || pipe_param->src.source_format == dm_rgbe_alpha
+ || pipe_param->src.source_format == dm_420_12;
- rq_param->yuv420_10bpc = pipe_param.src.source_format == dm_420_10;
+ rq_param->yuv420_10bpc = pipe_param->src.source_format == dm_420_10;
- rq_param->rgbe_alpha = (pipe_param.src.source_format == dm_rgbe_alpha)?1:0;
+ rq_param->rgbe_alpha = (pipe_param->src.source_format == dm_rgbe_alpha)?1:0;
get_surf_rq_param(mode_lib,
&(rq_param->sizing.rq_l),
@@ -859,7 +859,7 @@ static void dml_rq_dlg_get_rq_params(struct display_mode_lib *mode_lib,
0,
0);
- if (is_dual_plane((enum source_format_class)(pipe_param.src.source_format))) {
+ if (is_dual_plane((enum source_format_class)(pipe_param->src.source_format))) {
// get param for chroma surface
get_surf_rq_param(mode_lib,
&(rq_param->sizing.rq_c),
@@ -871,21 +871,21 @@ static void dml_rq_dlg_get_rq_params(struct display_mode_lib *mode_lib,
}
// calculate how to split the det buffer space between luma and chroma
- handle_det_buf_split(mode_lib, rq_param, pipe_param.src);
- print__rq_params_st(mode_lib, *rq_param);
+ handle_det_buf_split(mode_lib, rq_param, &pipe_param->src);
+ print__rq_params_st(mode_lib, rq_param);
}
void dml30_rq_dlg_get_rq_reg(struct display_mode_lib *mode_lib,
display_rq_regs_st *rq_regs,
- const display_pipe_params_st pipe_param)
+ const display_pipe_params_st *pipe_param)
{
display_rq_params_st rq_param = { 0 };
memset(rq_regs, 0, sizeof(*rq_regs));
dml_rq_dlg_get_rq_params(mode_lib, &rq_param, pipe_param);
- extract_rq_regs(mode_lib, rq_regs, rq_param);
+ extract_rq_regs(mode_lib, rq_regs, &rq_param);
- print__rq_regs_st(mode_lib, *rq_regs);
+ print__rq_regs_st(mode_lib, rq_regs);
}
static void calculate_ttu_cursor(struct display_mode_lib *mode_lib,
@@ -1824,14 +1824,14 @@ static void dml_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib,
disp_ttu_regs->min_ttu_vblank = min_ttu_vblank * refclk_freq_in_mhz;
ASSERT(disp_ttu_regs->min_ttu_vblank < dml_pow(2, 24));
- print__ttu_regs_st(mode_lib, *disp_ttu_regs);
- print__dlg_regs_st(mode_lib, *disp_dlg_regs);
+ print__ttu_regs_st(mode_lib, disp_ttu_regs);
+ print__dlg_regs_st(mode_lib, disp_dlg_regs);
}
void dml30_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib,
display_dlg_regs_st *dlg_regs,
display_ttu_regs_st *ttu_regs,
- display_e2e_pipe_params_st *e2e_pipe_param,
+ const display_e2e_pipe_params_st *e2e_pipe_param,
const unsigned int num_pipes,
const unsigned int pipe_idx,
const bool cstate_en,
@@ -1861,12 +1861,12 @@ void dml30_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib,
dlg_sys_param.t_srx_delay_us = mode_lib->ip.dcfclk_cstate_latency
/ dlg_sys_param.deepsleep_dcfclk_mhz; // TODO: Deprecated
- print__dlg_sys_params_st(mode_lib, dlg_sys_param);
+ print__dlg_sys_params_st(mode_lib, &dlg_sys_param);
// system parameter calculation done
dml_print("DML_DLG: Calculation for pipe[%d] start\n\n", pipe_idx);
- dml_rq_dlg_get_rq_params(mode_lib, &rq_param, e2e_pipe_param[pipe_idx].pipe);
+ dml_rq_dlg_get_rq_params(mode_lib, &rq_param, &e2e_pipe_param[pipe_idx].pipe);
dml_rq_dlg_get_dlg_params(mode_lib,
e2e_pipe_param,
num_pipes,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.h b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.h
index c04965cceff3..625e41f8d575 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.h
@@ -41,7 +41,7 @@ struct display_mode_lib;
// See also: <display_rq_regs_st>
void dml30_rq_dlg_get_rq_reg(struct display_mode_lib *mode_lib,
display_rq_regs_st *rq_regs,
- const display_pipe_params_st pipe_param);
+ const display_pipe_params_st *pipe_param);
// Function: dml_rq_dlg_get_dlg_reg
// Calculate and return DLG and TTU register struct given the system setting
@@ -57,7 +57,7 @@ void dml30_rq_dlg_get_rq_reg(struct display_mode_lib *mode_lib,
void dml30_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib,
display_dlg_regs_st *dlg_regs,
display_ttu_regs_st *ttu_regs,
- display_e2e_pipe_params_st *e2e_pipe_param,
+ const display_e2e_pipe_params_st *e2e_pipe_param,
const unsigned int num_pipes,
const unsigned int pipe_idx,
const bool cstate_en,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c
new file mode 100644
index 000000000000..94c32832a0e7
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c
@@ -0,0 +1,390 @@
+/*
+ * Copyright 2019-2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+#include "resource.h"
+#include "clk_mgr.h"
+#include "dcn20/dcn20_resource.h"
+#include "dcn301/dcn301_resource.h"
+
+#include "dml/dcn20/dcn20_fpu.h"
+#include "dcn301_fpu.h"
+
+#define TO_DCN301_RES_POOL(pool)\
+ container_of(pool, struct dcn301_resource_pool, base)
+
+/* Based on: //vidip/dc/dcn3/doc/architecture/DCN3x_Display_Mode.xlsm#83 */
+struct _vcs_dpi_ip_params_st dcn3_01_ip = {
+ .odm_capable = 1,
+ .gpuvm_enable = 1,
+ .hostvm_enable = 1,
+ .gpuvm_max_page_table_levels = 1,
+ .hostvm_max_page_table_levels = 2,
+ .hostvm_cached_page_table_levels = 0,
+ .pte_group_size_bytes = 2048,
+ .num_dsc = 3,
+ .rob_buffer_size_kbytes = 184,
+ .det_buffer_size_kbytes = 184,
+ .dpte_buffer_size_in_pte_reqs_luma = 64,
+ .dpte_buffer_size_in_pte_reqs_chroma = 32,
+ .pde_proc_buffer_size_64k_reqs = 48,
+ .dpp_output_buffer_pixels = 2560,
+ .opp_output_buffer_lines = 1,
+ .pixel_chunk_size_kbytes = 8,
+ .meta_chunk_size_kbytes = 2,
+ .writeback_chunk_size_kbytes = 8,
+ .line_buffer_size_bits = 789504,
+ .is_line_buffer_bpp_fixed = 0, // ?
+ .line_buffer_fixed_bpp = 48, // ?
+ .dcc_supported = true,
+ .writeback_interface_buffer_size_kbytes = 90,
+ .writeback_line_buffer_buffer_size = 656640,
+ .max_line_buffer_lines = 12,
+ .writeback_luma_buffer_size_kbytes = 12, // writeback_line_buffer_buffer_size = 656640
+ .writeback_chroma_buffer_size_kbytes = 8,
+ .writeback_chroma_line_buffer_width_pixels = 4,
+ .writeback_max_hscl_ratio = 1,
+ .writeback_max_vscl_ratio = 1,
+ .writeback_min_hscl_ratio = 1,
+ .writeback_min_vscl_ratio = 1,
+ .writeback_max_hscl_taps = 1,
+ .writeback_max_vscl_taps = 1,
+ .writeback_line_buffer_luma_buffer_size = 0,
+ .writeback_line_buffer_chroma_buffer_size = 14643,
+ .cursor_buffer_size = 8,
+ .cursor_chunk_size = 2,
+ .max_num_otg = 4,
+ .max_num_dpp = 4,
+ .max_num_wb = 1,
+ .max_dchub_pscl_bw_pix_per_clk = 4,
+ .max_pscl_lb_bw_pix_per_clk = 2,
+ .max_lb_vscl_bw_pix_per_clk = 4,
+ .max_vscl_hscl_bw_pix_per_clk = 4,
+ .max_hscl_ratio = 6,
+ .max_vscl_ratio = 6,
+ .hscl_mults = 4,
+ .vscl_mults = 4,
+ .max_hscl_taps = 8,
+ .max_vscl_taps = 8,
+ .dispclk_ramp_margin_percent = 1,
+ .underscan_factor = 1.11,
+ .min_vblank_lines = 32,
+ .dppclk_delay_subtotal = 46,
+ .dynamic_metadata_vm_enabled = true,
+ .dppclk_delay_scl_lb_only = 16,
+ .dppclk_delay_scl = 50,
+ .dppclk_delay_cnvc_formatter = 27,
+ .dppclk_delay_cnvc_cursor = 6,
+ .dispclk_delay_subtotal = 119,
+ .dcfclk_cstate_latency = 5.2, // SRExitTime
+ .max_inter_dcn_tile_repeaters = 8,
+ .max_num_hdmi_frl_outputs = 0,
+ .odm_combine_4to1_supported = true,
+
+ .xfc_supported = false,
+ .xfc_fill_bw_overhead_percent = 10.0,
+ .xfc_fill_constant_bytes = 0,
+ .gfx7_compat_tiling_supported = 0,
+ .number_of_cursors = 1,
+};
+
+struct _vcs_dpi_soc_bounding_box_st dcn3_01_soc = {
+ .clock_limits = {
+ {
+ .state = 0,
+ .dram_speed_mts = 2400.0,
+ .fabricclk_mhz = 600,
+ .socclk_mhz = 278.0,
+ .dcfclk_mhz = 400.0,
+ .dscclk_mhz = 206.0,
+ .dppclk_mhz = 1015.0,
+ .dispclk_mhz = 1015.0,
+ .phyclk_mhz = 600.0,
+ },
+
+ {
+ .state = 1,
+ .dram_speed_mts = 2400.0,
+ .fabricclk_mhz = 688,
+ .socclk_mhz = 278.0,
+ .dcfclk_mhz = 400.0,
+ .dscclk_mhz = 206.0,
+ .dppclk_mhz = 1015.0,
+ .dispclk_mhz = 1015.0,
+ .phyclk_mhz = 600.0,
+ },
+
+ {
+ .state = 2,
+ .dram_speed_mts = 4267.0,
+ .fabricclk_mhz = 1067,
+ .socclk_mhz = 278.0,
+ .dcfclk_mhz = 608.0,
+ .dscclk_mhz = 296.0,
+ .dppclk_mhz = 1015.0,
+ .dispclk_mhz = 1015.0,
+ .phyclk_mhz = 810.0,
+ },
+
+ {
+ .state = 3,
+ .dram_speed_mts = 4267.0,
+ .fabricclk_mhz = 1067,
+ .socclk_mhz = 715.0,
+ .dcfclk_mhz = 676.0,
+ .dscclk_mhz = 338.0,
+ .dppclk_mhz = 1015.0,
+ .dispclk_mhz = 1015.0,
+ .phyclk_mhz = 810.0,
+ },
+
+ {
+ .state = 4,
+ .dram_speed_mts = 4267.0,
+ .fabricclk_mhz = 1067,
+ .socclk_mhz = 953.0,
+ .dcfclk_mhz = 810.0,
+ .dscclk_mhz = 338.0,
+ .dppclk_mhz = 1015.0,
+ .dispclk_mhz = 1015.0,
+ .phyclk_mhz = 810.0,
+ },
+ },
+
+ .sr_exit_time_us = 9.0,
+ .sr_enter_plus_exit_time_us = 11.0,
+ .urgent_latency_us = 4.0,
+ .urgent_latency_pixel_data_only_us = 4.0,
+ .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
+ .urgent_latency_vm_data_only_us = 4.0,
+ .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
+ .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
+ .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
+ .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 80.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 75.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0,
+ .max_avg_sdp_bw_use_normal_percent = 60.0,
+ .max_avg_dram_bw_use_normal_percent = 60.0,
+ .writeback_latency_us = 12.0,
+ .max_request_size_bytes = 256,
+ .dram_channel_width_bytes = 4,
+ .fabric_datapath_to_dcn_data_return_bytes = 32,
+ .dcn_downspread_percent = 0.5,
+ .downspread_percent = 0.38,
+ .dram_page_open_time_ns = 50.0,
+ .dram_rw_turnaround_time_ns = 17.5,
+ .dram_return_buffer_per_channel_bytes = 8192,
+ .round_trip_ping_latency_dcfclk_cycles = 191,
+ .urgent_out_of_order_return_per_channel_bytes = 4096,
+ .channel_interleave_bytes = 256,
+ .num_banks = 8,
+ .num_chans = 4,
+ .gpuvm_min_page_size_bytes = 4096,
+ .hostvm_min_page_size_bytes = 4096,
+ .dram_clock_change_latency_us = 23.84,
+ .writeback_dram_clock_change_latency_us = 23.0,
+ .return_bus_width_bytes = 64,
+ .dispclk_dppclk_vco_speed_mhz = 3550,
+ .xfc_bus_transport_time_us = 20, // ?
+ .xfc_xbuf_latency_tolerance_us = 4, // ?
+ .use_urgent_burst_bw = 1, // ?
+ .num_states = 5,
+ .do_urgent_latency_adjustment = false,
+ .urgent_latency_adjustment_fabric_clock_component_us = 0,
+ .urgent_latency_adjustment_fabric_clock_reference_mhz = 0,
+};
+
+static void calculate_wm_set_for_vlevel(int vlevel,
+ struct wm_range_table_entry *table_entry,
+ struct dcn_watermarks *wm_set,
+ struct display_mode_lib *dml,
+ display_e2e_pipe_params_st *pipes,
+ int pipe_cnt)
+{
+ double dram_clock_change_latency_cached = dml->soc.dram_clock_change_latency_us;
+
+ ASSERT(vlevel < dml->soc.num_states);
+ /* only pipe 0 is read for voltage and dcf/soc clocks */
+ pipes[0].clks_cfg.voltage = vlevel;
+ pipes[0].clks_cfg.dcfclk_mhz = dml->soc.clock_limits[vlevel].dcfclk_mhz;
+ pipes[0].clks_cfg.socclk_mhz = dml->soc.clock_limits[vlevel].socclk_mhz;
+
+ dml->soc.dram_clock_change_latency_us = table_entry->pstate_latency_us;
+ dml->soc.sr_exit_time_us = table_entry->sr_exit_time_us;
+ dml->soc.sr_enter_plus_exit_time_us = table_entry->sr_enter_plus_exit_time_us;
+
+ wm_set->urgent_ns = get_wm_urgent(dml, pipes, pipe_cnt) * 1000;
+ wm_set->cstate_pstate.cstate_enter_plus_exit_ns = get_wm_stutter_enter_exit(dml, pipes, pipe_cnt) * 1000;
+ wm_set->cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(dml, pipes, pipe_cnt) * 1000;
+ wm_set->cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(dml, pipes, pipe_cnt) * 1000;
+ wm_set->pte_meta_urgent_ns = get_wm_memory_trip(dml, pipes, pipe_cnt) * 1000;
+ wm_set->frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(dml, pipes, pipe_cnt) * 1000;
+ wm_set->frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(dml, pipes, pipe_cnt) * 1000;
+ wm_set->urgent_latency_ns = get_urgent_latency(dml, pipes, pipe_cnt) * 1000;
+ dml->soc.dram_clock_change_latency_us = dram_clock_change_latency_cached;
+
+}
+
+void dcn301_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params)
+{
+ struct dcn301_resource_pool *pool = TO_DCN301_RES_POOL(dc->res_pool);
+ struct clk_limit_table *clk_table = &bw_params->clk_table;
+ struct _vcs_dpi_voltage_scaling_st clock_limits[DC__VOLTAGE_STATES];
+ unsigned int i, closest_clk_lvl;
+ int j;
+
+ dc_assert_fp_enabled();
+
+ /* Default clock levels are used for diags, which may lead to overclocking. */
+ if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
+ dcn3_01_ip.max_num_otg = pool->base.res_cap->num_timing_generator;
+ dcn3_01_ip.max_num_dpp = pool->base.pipe_count;
+ dcn3_01_soc.num_chans = bw_params->num_channels;
+
+ ASSERT(clk_table->num_entries);
+ for (i = 0; i < clk_table->num_entries; i++) {
+ /* loop backwards*/
+ for (closest_clk_lvl = 0, j = dcn3_01_soc.num_states - 1; j >= 0; j--) {
+ if ((unsigned int) dcn3_01_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) {
+ closest_clk_lvl = j;
+ break;
+ }
+ }
+
+ clock_limits[i].state = i;
+ clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
+ clock_limits[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
+ clock_limits[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
+ clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2;
+
+ clock_limits[i].dispclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
+ clock_limits[i].dppclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
+ clock_limits[i].dram_bw_per_chan_gbps = dcn3_01_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
+ clock_limits[i].dscclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
+ clock_limits[i].dtbclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
+ clock_limits[i].phyclk_d18_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
+ clock_limits[i].phyclk_mhz = dcn3_01_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
+ }
+
+ for (i = 0; i < clk_table->num_entries; i++)
+ dcn3_01_soc.clock_limits[i] = clock_limits[i];
+
+ if (clk_table->num_entries) {
+ dcn3_01_soc.num_states = clk_table->num_entries;
+ /* duplicate last level */
+ dcn3_01_soc.clock_limits[dcn3_01_soc.num_states] = dcn3_01_soc.clock_limits[dcn3_01_soc.num_states - 1];
+ dcn3_01_soc.clock_limits[dcn3_01_soc.num_states].state = dcn3_01_soc.num_states;
+ }
+ }
+
+ dcn3_01_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
+ dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
+
+ dml_init_instance(&dc->dml, &dcn3_01_soc, &dcn3_01_ip, DML_PROJECT_DCN30);
+}
+
+void dcn301_fpu_set_wm_ranges(int i,
+ struct pp_smu_wm_range_sets *ranges,
+ struct _vcs_dpi_soc_bounding_box_st *loaded_bb)
+{
+ dc_assert_fp_enabled();
+
+ ranges->reader_wm_sets[i].min_fill_clk_mhz = (i > 0) ? (loaded_bb->clock_limits[i - 1].dram_speed_mts / 16) + 1 : 0;
+ ranges->reader_wm_sets[i].max_fill_clk_mhz = loaded_bb->clock_limits[i].dram_speed_mts / 16;
+}
+
+void dcn301_fpu_init_soc_bounding_box(struct bp_soc_bb_info bb_info)
+{
+ dc_assert_fp_enabled();
+
+ if (bb_info.dram_clock_change_latency_100ns > 0)
+ dcn3_01_soc.dram_clock_change_latency_us = bb_info.dram_clock_change_latency_100ns * 10;
+
+ if (bb_info.dram_sr_enter_exit_latency_100ns > 0)
+ dcn3_01_soc.sr_enter_plus_exit_time_us = bb_info.dram_sr_enter_exit_latency_100ns * 10;
+
+ if (bb_info.dram_sr_exit_latency_100ns > 0)
+ dcn3_01_soc.sr_exit_time_us = bb_info.dram_sr_exit_latency_100ns * 10;
+}
+
+void dcn301_calculate_wm_and_dlg(struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int pipe_cnt,
+ int vlevel_req)
+{
+ int i, pipe_idx;
+ int vlevel, vlevel_max;
+ struct wm_range_table_entry *table_entry;
+ struct clk_bw_params *bw_params = dc->clk_mgr->bw_params;
+
+ ASSERT(bw_params);
+ dc_assert_fp_enabled();
+
+ vlevel_max = bw_params->clk_table.num_entries - 1;
+
+ /* WM Set D */
+ table_entry = &bw_params->wm_table.entries[WM_D];
+ if (table_entry->wm_type == WM_TYPE_RETRAINING)
+ vlevel = 0;
+ else
+ vlevel = vlevel_max;
+ calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.d,
+ &context->bw_ctx.dml, pipes, pipe_cnt);
+ /* WM Set C */
+ table_entry = &bw_params->wm_table.entries[WM_C];
+ vlevel = min(max(vlevel_req, 2), vlevel_max);
+ calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.c,
+ &context->bw_ctx.dml, pipes, pipe_cnt);
+ /* WM Set B */
+ table_entry = &bw_params->wm_table.entries[WM_B];
+ vlevel = min(max(vlevel_req, 1), vlevel_max);
+ calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.b,
+ &context->bw_ctx.dml, pipes, pipe_cnt);
+
+ /* WM Set A */
+ table_entry = &bw_params->wm_table.entries[WM_A];
+ vlevel = min(vlevel_req, vlevel_max);
+ calculate_wm_set_for_vlevel(vlevel, table_entry, &context->bw_ctx.bw.dcn.watermarks.a,
+ &context->bw_ctx.dml, pipes, pipe_cnt);
+
+ for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
+ if (!context->res_ctx.pipe_ctx[i].stream)
+ continue;
+
+ pipes[pipe_idx].clks_cfg.dispclk_mhz = get_dispclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt);
+ pipes[pipe_idx].clks_cfg.dppclk_mhz = get_dppclk_calculated(&context->bw_ctx.dml, pipes, pipe_cnt, pipe_idx);
+
+ if (dc->config.forced_clocks) {
+ pipes[pipe_idx].clks_cfg.dispclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dispclk_mhz;
+ pipes[pipe_idx].clks_cfg.dppclk_mhz = context->bw_ctx.dml.soc.clock_limits[0].dppclk_mhz;
+ }
+ if (dc->debug.min_disp_clk_khz > pipes[pipe_idx].clks_cfg.dispclk_mhz * 1000)
+ pipes[pipe_idx].clks_cfg.dispclk_mhz = dc->debug.min_disp_clk_khz / 1000.0;
+ if (dc->debug.min_dpp_clk_khz > pipes[pipe_idx].clks_cfg.dppclk_mhz * 1000)
+ pipes[pipe_idx].clks_cfg.dppclk_mhz = dc->debug.min_dpp_clk_khz / 1000.0;
+ pipe_idx++;
+ }
+
+ dcn20_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.h
new file mode 100644
index 000000000000..fc7065d17842
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2019-2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DCN301_FPU_H__
+#define __DCN301_FPU_H__
+
+void dcn301_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params);
+
+void dcn301_fpu_set_wm_ranges(int i,
+ struct pp_smu_wm_range_sets *ranges,
+ struct _vcs_dpi_soc_bounding_box_st *loaded_bb);
+
+void dcn301_fpu_init_soc_bounding_box(struct bp_soc_bb_info bb_info);
+
+void dcn301_calculate_wm_and_dlg(struct dc *dc,
+ struct dc_state *context,
+ display_e2e_pipe_params_st *pipes,
+ int pipe_cnt,
+ int vlevel_req);
+#endif /* __DCN301_FPU_H__*/
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
index ce55c9caf9a2..d58925cff420 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c
@@ -5398,9 +5398,9 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
v->MaximumReadBandwidthWithPrefetch =
v->MaximumReadBandwidthWithPrefetch
- + dml_max4(
- v->VActivePixelBandwidth[i][j][k],
- v->VActiveCursorBandwidth[i][j][k]
+ + dml_max3(
+ v->VActivePixelBandwidth[i][j][k]
+ + v->VActiveCursorBandwidth[i][j][k]
+ v->NoOfDPP[i][j][k]
* (v->meta_row_bandwidth[i][j][k]
+ v->dpte_row_bandwidth[i][j][k]),
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
index c23905bc733a..e0fecf127bd5 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.c
@@ -175,47 +175,47 @@ static unsigned int get_blk_size_bytes(const enum source_macro_tile_size tile_si
return (4 * 1024);
}
-static void extract_rq_sizing_regs(struct display_mode_lib *mode_lib, display_data_rq_regs_st *rq_regs, const display_data_rq_sizing_params_st rq_sizing)
+static void extract_rq_sizing_regs(struct display_mode_lib *mode_lib, display_data_rq_regs_st *rq_regs, const display_data_rq_sizing_params_st *rq_sizing)
{
print__data_rq_sizing_params_st(mode_lib, rq_sizing);
- rq_regs->chunk_size = dml_log2(rq_sizing.chunk_bytes) - 10;
+ rq_regs->chunk_size = dml_log2(rq_sizing->chunk_bytes) - 10;
- if (rq_sizing.min_chunk_bytes == 0)
+ if (rq_sizing->min_chunk_bytes == 0)
rq_regs->min_chunk_size = 0;
else
- rq_regs->min_chunk_size = dml_log2(rq_sizing.min_chunk_bytes) - 8 + 1;
+ rq_regs->min_chunk_size = dml_log2(rq_sizing->min_chunk_bytes) - 8 + 1;
- rq_regs->meta_chunk_size = dml_log2(rq_sizing.meta_chunk_bytes) - 10;
- if (rq_sizing.min_meta_chunk_bytes == 0)
+ rq_regs->meta_chunk_size = dml_log2(rq_sizing->meta_chunk_bytes) - 10;
+ if (rq_sizing->min_meta_chunk_bytes == 0)
rq_regs->min_meta_chunk_size = 0;
else
- rq_regs->min_meta_chunk_size = dml_log2(rq_sizing.min_meta_chunk_bytes) - 6 + 1;
+ rq_regs->min_meta_chunk_size = dml_log2(rq_sizing->min_meta_chunk_bytes) - 6 + 1;
- rq_regs->dpte_group_size = dml_log2(rq_sizing.dpte_group_bytes) - 6;
- rq_regs->mpte_group_size = dml_log2(rq_sizing.mpte_group_bytes) - 6;
+ rq_regs->dpte_group_size = dml_log2(rq_sizing->dpte_group_bytes) - 6;
+ rq_regs->mpte_group_size = dml_log2(rq_sizing->mpte_group_bytes) - 6;
}
-static void extract_rq_regs(struct display_mode_lib *mode_lib, display_rq_regs_st *rq_regs, const display_rq_params_st rq_param)
+static void extract_rq_regs(struct display_mode_lib *mode_lib, display_rq_regs_st *rq_regs, const display_rq_params_st *rq_param)
{
unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024;
unsigned int detile_buf_plane1_addr = 0;
- extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_l), rq_param.sizing.rq_l);
+ extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_l), &rq_param->sizing.rq_l);
- rq_regs->rq_regs_l.pte_row_height_linear = dml_floor(dml_log2(rq_param.dlg.rq_l.dpte_row_height), 1) - 3;
+ rq_regs->rq_regs_l.pte_row_height_linear = dml_floor(dml_log2(rq_param->dlg.rq_l.dpte_row_height), 1) - 3;
- if (rq_param.yuv420) {
- extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_c), rq_param.sizing.rq_c);
- rq_regs->rq_regs_c.pte_row_height_linear = dml_floor(dml_log2(rq_param.dlg.rq_c.dpte_row_height), 1) - 3;
+ if (rq_param->yuv420) {
+ extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_c), &rq_param->sizing.rq_c);
+ rq_regs->rq_regs_c.pte_row_height_linear = dml_floor(dml_log2(rq_param->dlg.rq_c.dpte_row_height), 1) - 3;
}
- rq_regs->rq_regs_l.swath_height = dml_log2(rq_param.dlg.rq_l.swath_height);
- rq_regs->rq_regs_c.swath_height = dml_log2(rq_param.dlg.rq_c.swath_height);
+ rq_regs->rq_regs_l.swath_height = dml_log2(rq_param->dlg.rq_l.swath_height);
+ rq_regs->rq_regs_c.swath_height = dml_log2(rq_param->dlg.rq_c.swath_height);
// FIXME: take the max between luma, chroma chunk size?
// okay for now, as we are setting chunk_bytes to 8kb anyways
- if (rq_param.sizing.rq_l.chunk_bytes >= 32 * 1024 || (rq_param.yuv420 && rq_param.sizing.rq_c.chunk_bytes >= 32 * 1024)) { //32kb
+ if (rq_param->sizing.rq_l.chunk_bytes >= 32 * 1024 || (rq_param->yuv420 && rq_param->sizing.rq_c.chunk_bytes >= 32 * 1024)) { //32kb
rq_regs->drq_expansion_mode = 0;
} else {
rq_regs->drq_expansion_mode = 2;
@@ -225,8 +225,8 @@ static void extract_rq_regs(struct display_mode_lib *mode_lib, display_rq_regs_s
rq_regs->crq_expansion_mode = 1;
// Note: detile_buf_plane1_addr is in unit of 1KB
- if (rq_param.yuv420) {
- if ((double) rq_param.misc.rq_l.stored_swath_bytes / (double) rq_param.misc.rq_c.stored_swath_bytes <= 1.5) {
+ if (rq_param->yuv420) {
+ if ((double) rq_param->misc.rq_l.stored_swath_bytes / (double) rq_param->misc.rq_c.stored_swath_bytes <= 1.5) {
detile_buf_plane1_addr = (detile_buf_size_in_bytes / 2.0 / 1024.0); // half to chroma
#ifdef __DML_RQ_DLG_CALC_DEBUG__
dml_print("DML_DLG: %s: detile_buf_plane1_addr = %0d (1/2 to chroma)\n", __func__, detile_buf_plane1_addr);
@@ -244,14 +244,14 @@ static void extract_rq_regs(struct display_mode_lib *mode_lib, display_rq_regs_s
dml_print("DML_DLG: %s: detile_buf_size_in_bytes = %0d\n", __func__, detile_buf_size_in_bytes);
dml_print("DML_DLG: %s: detile_buf_plane1_addr = %0d\n", __func__, detile_buf_plane1_addr);
dml_print("DML_DLG: %s: plane1_base_address = %0d\n", __func__, rq_regs->plane1_base_address);
- dml_print("DML_DLG: %s: rq_l.stored_swath_bytes = %0d\n", __func__, rq_param.misc.rq_l.stored_swath_bytes);
- dml_print("DML_DLG: %s: rq_c.stored_swath_bytes = %0d\n", __func__, rq_param.misc.rq_c.stored_swath_bytes);
- dml_print("DML_DLG: %s: rq_l.swath_height = %0d\n", __func__, rq_param.dlg.rq_l.swath_height);
- dml_print("DML_DLG: %s: rq_c.swath_height = %0d\n", __func__, rq_param.dlg.rq_c.swath_height);
+ dml_print("DML_DLG: %s: rq_l.stored_swath_bytes = %0d\n", __func__, rq_param->misc.rq_l.stored_swath_bytes);
+ dml_print("DML_DLG: %s: rq_c.stored_swath_bytes = %0d\n", __func__, rq_param->misc.rq_c.stored_swath_bytes);
+ dml_print("DML_DLG: %s: rq_l.swath_height = %0d\n", __func__, rq_param->dlg.rq_l.swath_height);
+ dml_print("DML_DLG: %s: rq_c.swath_height = %0d\n", __func__, rq_param->dlg.rq_c.swath_height);
#endif
}
-static void handle_det_buf_split(struct display_mode_lib *mode_lib, display_rq_params_st *rq_param, const display_pipe_source_params_st pipe_src_param)
+static void handle_det_buf_split(struct display_mode_lib *mode_lib, display_rq_params_st *rq_param, const display_pipe_source_params_st *pipe_src_param)
{
unsigned int total_swath_bytes = 0;
unsigned int swath_bytes_l = 0;
@@ -260,8 +260,8 @@ static void handle_det_buf_split(struct display_mode_lib *mode_lib, display_rq_p
unsigned int full_swath_bytes_packed_c = 0;
bool req128_l = 0;
bool req128_c = 0;
- bool surf_linear = (pipe_src_param.sw_mode == dm_sw_linear);
- bool surf_vert = (pipe_src_param.source_scan == dm_vert);
+ bool surf_linear = (pipe_src_param->sw_mode == dm_sw_linear);
+ bool surf_vert = (pipe_src_param->source_scan == dm_vert);
unsigned int log2_swath_height_l = 0;
unsigned int log2_swath_height_c = 0;
unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024;
@@ -738,7 +738,7 @@ static void get_surf_rq_param(
display_data_rq_sizing_params_st *rq_sizing_param,
display_data_rq_dlg_params_st *rq_dlg_param,
display_data_rq_misc_params_st *rq_misc_param,
- const display_pipe_params_st pipe_param,
+ const display_pipe_params_st *pipe_param,
bool is_chroma,
bool is_alpha)
{
@@ -752,33 +752,33 @@ static void get_surf_rq_param(
// FIXME check if ppe apply for both luma and chroma in 422 case
if (is_chroma | is_alpha) {
- vp_width = pipe_param.src.viewport_width_c / ppe;
- vp_height = pipe_param.src.viewport_height_c;
- data_pitch = pipe_param.src.data_pitch_c;
- meta_pitch = pipe_param.src.meta_pitch_c;
- surface_height = pipe_param.src.surface_height_y / 2.0;
+ vp_width = pipe_param->src.viewport_width_c / ppe;
+ vp_height = pipe_param->src.viewport_height_c;
+ data_pitch = pipe_param->src.data_pitch_c;
+ meta_pitch = pipe_param->src.meta_pitch_c;
+ surface_height = pipe_param->src.surface_height_y / 2.0;
} else {
- vp_width = pipe_param.src.viewport_width / ppe;
- vp_height = pipe_param.src.viewport_height;
- data_pitch = pipe_param.src.data_pitch;
- meta_pitch = pipe_param.src.meta_pitch;
- surface_height = pipe_param.src.surface_height_y;
+ vp_width = pipe_param->src.viewport_width / ppe;
+ vp_height = pipe_param->src.viewport_height;
+ data_pitch = pipe_param->src.data_pitch;
+ meta_pitch = pipe_param->src.meta_pitch;
+ surface_height = pipe_param->src.surface_height_y;
}
- if (pipe_param.dest.odm_combine) {
+ if (pipe_param->dest.odm_combine) {
unsigned int access_dir;
unsigned int full_src_vp_width;
unsigned int hactive_odm;
unsigned int src_hactive_odm;
- access_dir = (pipe_param.src.source_scan == dm_vert); // vp access direction: horizontal or vertical accessed
- hactive_odm = pipe_param.dest.hactive / ((unsigned int) pipe_param.dest.odm_combine * 2);
+ access_dir = (pipe_param->src.source_scan == dm_vert); // vp access direction: horizontal or vertical accessed
+ hactive_odm = pipe_param->dest.hactive / ((unsigned int) pipe_param->dest.odm_combine * 2);
if (is_chroma) {
- full_src_vp_width = pipe_param.scale_ratio_depth.hscl_ratio_c * pipe_param.dest.full_recout_width;
- src_hactive_odm = pipe_param.scale_ratio_depth.hscl_ratio_c * hactive_odm;
+ full_src_vp_width = pipe_param->scale_ratio_depth.hscl_ratio_c * pipe_param->dest.full_recout_width;
+ src_hactive_odm = pipe_param->scale_ratio_depth.hscl_ratio_c * hactive_odm;
} else {
- full_src_vp_width = pipe_param.scale_ratio_depth.hscl_ratio * pipe_param.dest.full_recout_width;
- src_hactive_odm = pipe_param.scale_ratio_depth.hscl_ratio * hactive_odm;
+ full_src_vp_width = pipe_param->scale_ratio_depth.hscl_ratio * pipe_param->dest.full_recout_width;
+ src_hactive_odm = pipe_param->scale_ratio_depth.hscl_ratio * hactive_odm;
}
if (access_dir == 0) {
@@ -808,7 +808,7 @@ static void get_surf_rq_param(
rq_sizing_param->meta_chunk_bytes = 2048;
rq_sizing_param->min_meta_chunk_bytes = 256;
- if (pipe_param.src.hostvm)
+ if (pipe_param->src.hostvm)
rq_sizing_param->mpte_group_bytes = 512;
else
rq_sizing_param->mpte_group_bytes = 2048;
@@ -822,46 +822,46 @@ static void get_surf_rq_param(
vp_height,
data_pitch,
meta_pitch,
- pipe_param.src.source_format,
- pipe_param.src.sw_mode,
- pipe_param.src.macro_tile_size,
- pipe_param.src.source_scan,
- pipe_param.src.hostvm,
+ pipe_param->src.source_format,
+ pipe_param->src.sw_mode,
+ pipe_param->src.macro_tile_size,
+ pipe_param->src.source_scan,
+ pipe_param->src.hostvm,
is_chroma,
surface_height);
}
-static void dml_rq_dlg_get_rq_params(struct display_mode_lib *mode_lib, display_rq_params_st *rq_param, const display_pipe_params_st pipe_param)
+static void dml_rq_dlg_get_rq_params(struct display_mode_lib *mode_lib, display_rq_params_st *rq_param, const display_pipe_params_st *pipe_param)
{
// get param for luma surface
- rq_param->yuv420 = pipe_param.src.source_format == dm_420_8 || pipe_param.src.source_format == dm_420_10 || pipe_param.src.source_format == dm_rgbe_alpha
- || pipe_param.src.source_format == dm_420_12;
+ rq_param->yuv420 = pipe_param->src.source_format == dm_420_8 || pipe_param->src.source_format == dm_420_10 || pipe_param->src.source_format == dm_rgbe_alpha
+ || pipe_param->src.source_format == dm_420_12;
- rq_param->yuv420_10bpc = pipe_param.src.source_format == dm_420_10;
+ rq_param->yuv420_10bpc = pipe_param->src.source_format == dm_420_10;
- rq_param->rgbe_alpha = (pipe_param.src.source_format == dm_rgbe_alpha) ? 1 : 0;
+ rq_param->rgbe_alpha = (pipe_param->src.source_format == dm_rgbe_alpha) ? 1 : 0;
get_surf_rq_param(mode_lib, &(rq_param->sizing.rq_l), &(rq_param->dlg.rq_l), &(rq_param->misc.rq_l), pipe_param, 0, 0);
- if (is_dual_plane((enum source_format_class) (pipe_param.src.source_format))) {
+ if (is_dual_plane((enum source_format_class) (pipe_param->src.source_format))) {
// get param for chroma surface
get_surf_rq_param(mode_lib, &(rq_param->sizing.rq_c), &(rq_param->dlg.rq_c), &(rq_param->misc.rq_c), pipe_param, 1, rq_param->rgbe_alpha);
}
// calculate how to split the det buffer space between luma and chroma
- handle_det_buf_split(mode_lib, rq_param, pipe_param.src);
- print__rq_params_st(mode_lib, *rq_param);
+ handle_det_buf_split(mode_lib, rq_param, &pipe_param->src);
+ print__rq_params_st(mode_lib, rq_param);
}
-void dml31_rq_dlg_get_rq_reg(struct display_mode_lib *mode_lib, display_rq_regs_st *rq_regs, const display_pipe_params_st pipe_param)
+void dml31_rq_dlg_get_rq_reg(struct display_mode_lib *mode_lib, display_rq_regs_st *rq_regs, const display_pipe_params_st *pipe_param)
{
display_rq_params_st rq_param = {0};
memset(rq_regs, 0, sizeof(*rq_regs));
dml_rq_dlg_get_rq_params(mode_lib, &rq_param, pipe_param);
- extract_rq_regs(mode_lib, rq_regs, rq_param);
+ extract_rq_regs(mode_lib, rq_regs, &rq_param);
- print__rq_regs_st(mode_lib, *rq_regs);
+ print__rq_regs_st(mode_lib, rq_regs);
}
static void calculate_ttu_cursor(
@@ -943,8 +943,8 @@ static void dml_rq_dlg_get_dlg_params(
const unsigned int pipe_idx,
display_dlg_regs_st *disp_dlg_regs,
display_ttu_regs_st *disp_ttu_regs,
- const display_rq_dlg_params_st rq_dlg_param,
- const display_dlg_sys_params_st dlg_sys_param,
+ const display_rq_dlg_params_st *rq_dlg_param,
+ const display_dlg_sys_params_st *dlg_sys_param,
const bool cstate_en,
const bool pstate_en,
const bool vm_en,
@@ -1112,13 +1112,13 @@ static void dml_rq_dlg_get_dlg_params(
vratio_c = scl->vscl_ratio_c;
scl_enable = scl->scl_enable;
- swath_width_ub_l = rq_dlg_param.rq_l.swath_width_ub;
- dpte_groups_per_row_ub_l = rq_dlg_param.rq_l.dpte_groups_per_row_ub;
- swath_width_ub_c = rq_dlg_param.rq_c.swath_width_ub;
- dpte_groups_per_row_ub_c = rq_dlg_param.rq_c.dpte_groups_per_row_ub;
+ swath_width_ub_l = rq_dlg_param->rq_l.swath_width_ub;
+ dpte_groups_per_row_ub_l = rq_dlg_param->rq_l.dpte_groups_per_row_ub;
+ swath_width_ub_c = rq_dlg_param->rq_c.swath_width_ub;
+ dpte_groups_per_row_ub_c = rq_dlg_param->rq_c.dpte_groups_per_row_ub;
- meta_chunks_per_row_ub_l = rq_dlg_param.rq_l.meta_chunks_per_row_ub;
- meta_chunks_per_row_ub_c = rq_dlg_param.rq_c.meta_chunks_per_row_ub;
+ meta_chunks_per_row_ub_l = rq_dlg_param->rq_l.meta_chunks_per_row_ub;
+ meta_chunks_per_row_ub_c = rq_dlg_param->rq_c.meta_chunks_per_row_ub;
vupdate_offset = dst->vupdate_offset;
vupdate_width = dst->vupdate_width;
vready_offset = dst->vready_offset;
@@ -1239,16 +1239,16 @@ static void dml_rq_dlg_get_dlg_params(
dml_print("DML_DLG: %s: vratio_pre_c = %3.2f\n", __func__, vratio_pre_c);
// Active
- req_per_swath_ub_l = rq_dlg_param.rq_l.req_per_swath_ub;
- req_per_swath_ub_c = rq_dlg_param.rq_c.req_per_swath_ub;
- meta_row_height_l = rq_dlg_param.rq_l.meta_row_height;
- meta_row_height_c = rq_dlg_param.rq_c.meta_row_height;
+ req_per_swath_ub_l = rq_dlg_param->rq_l.req_per_swath_ub;
+ req_per_swath_ub_c = rq_dlg_param->rq_c.req_per_swath_ub;
+ meta_row_height_l = rq_dlg_param->rq_l.meta_row_height;
+ meta_row_height_c = rq_dlg_param->rq_c.meta_row_height;
swath_width_pixels_ub_l = 0;
swath_width_pixels_ub_c = 0;
scaler_rec_in_width_l = 0;
scaler_rec_in_width_c = 0;
- dpte_row_height_l = rq_dlg_param.rq_l.dpte_row_height;
- dpte_row_height_c = rq_dlg_param.rq_c.dpte_row_height;
+ dpte_row_height_l = rq_dlg_param->rq_l.dpte_row_height;
+ dpte_row_height_c = rq_dlg_param->rq_c.dpte_row_height;
if (mode_422) {
swath_width_pixels_ub_l = swath_width_ub_l * 2; // *2 for 2 pixel per element
@@ -1669,15 +1669,15 @@ static void dml_rq_dlg_get_dlg_params(
disp_ttu_regs->min_ttu_vblank = min_ttu_vblank * refclk_freq_in_mhz;
ASSERT(disp_ttu_regs->min_ttu_vblank < dml_pow(2, 24));
- print__ttu_regs_st(mode_lib, *disp_ttu_regs);
- print__dlg_regs_st(mode_lib, *disp_dlg_regs);
+ print__ttu_regs_st(mode_lib, disp_ttu_regs);
+ print__dlg_regs_st(mode_lib, disp_dlg_regs);
}
void dml31_rq_dlg_get_dlg_reg(
struct display_mode_lib *mode_lib,
display_dlg_regs_st *dlg_regs,
display_ttu_regs_st *ttu_regs,
- display_e2e_pipe_params_st *e2e_pipe_param,
+ const display_e2e_pipe_params_st *e2e_pipe_param,
const unsigned int num_pipes,
const unsigned int pipe_idx,
const bool cstate_en,
@@ -1699,12 +1699,12 @@ void dml31_rq_dlg_get_dlg_reg(
dlg_sys_param.total_flip_bw = get_total_immediate_flip_bw(mode_lib, e2e_pipe_param, num_pipes);
dlg_sys_param.total_flip_bytes = get_total_immediate_flip_bytes(mode_lib, e2e_pipe_param, num_pipes);
- print__dlg_sys_params_st(mode_lib, dlg_sys_param);
+ print__dlg_sys_params_st(mode_lib, &dlg_sys_param);
// system parameter calculation done
dml_print("DML_DLG: Calculation for pipe[%d] start\n\n", pipe_idx);
- dml_rq_dlg_get_rq_params(mode_lib, &rq_param, e2e_pipe_param[pipe_idx].pipe);
+ dml_rq_dlg_get_rq_params(mode_lib, &rq_param, &e2e_pipe_param[pipe_idx].pipe);
dml_rq_dlg_get_dlg_params(
mode_lib,
e2e_pipe_param,
@@ -1712,8 +1712,8 @@ void dml31_rq_dlg_get_dlg_reg(
pipe_idx,
dlg_regs,
ttu_regs,
- rq_param.dlg,
- dlg_sys_param,
+ &rq_param.dlg,
+ &dlg_sys_param,
cstate_en,
pstate_en,
vm_en,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.h b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.h
index adf8518f761f..8ee991351699 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_rq_dlg_calc_31.h
@@ -41,7 +41,7 @@ struct display_mode_lib;
// See also: <display_rq_regs_st>
void dml31_rq_dlg_get_rq_reg(struct display_mode_lib *mode_lib,
display_rq_regs_st *rq_regs,
- const display_pipe_params_st pipe_param);
+ const display_pipe_params_st *pipe_param);
// Function: dml_rq_dlg_get_dlg_reg
// Calculate and return DLG and TTU register struct given the system setting
@@ -57,7 +57,7 @@ void dml31_rq_dlg_get_rq_reg(struct display_mode_lib *mode_lib,
void dml31_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib,
display_dlg_regs_st *dlg_regs,
display_ttu_regs_st *ttu_regs,
- display_e2e_pipe_params_st *e2e_pipe_param,
+ const display_e2e_pipe_params_st *e2e_pipe_param,
const unsigned int num_pipes,
const unsigned int pipe_idx,
const bool cstate_en,
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h
index 1051ca1a23b8..edb9f7567d6d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_enums.h
@@ -80,11 +80,11 @@ enum dm_swizzle_mode {
dm_sw_SPARE_13 = 24,
dm_sw_64kb_s_x = 25,
dm_sw_64kb_d_x = 26,
- dm_sw_SPARE_14 = 27,
+ dm_sw_64kb_r_x = 27,
dm_sw_SPARE_15 = 28,
dm_sw_var_s_x = 29,
dm_sw_var_d_x = 30,
- dm_sw_64kb_r_x,
+ dm_sw_var_r_x = 31,
dm_sw_gfx7_2d_thin_l_vp,
dm_sw_gfx7_2d_thin_gl,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
index 8a5bd919aec8..30db51fbd8cd 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.c
@@ -82,6 +82,7 @@ void dml_init_instance(struct display_mode_lib *lib,
lib->project = project;
switch (project) {
case DML_PROJECT_NAVI10:
+ case DML_PROJECT_DCN201:
lib->funcs = dml20_funcs;
break;
case DML_PROJECT_NAVI10v2:
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
index d42a0aeca6be..6905ef1e75a6 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_lib.h
@@ -36,6 +36,7 @@ enum dml_project {
DML_PROJECT_RAVEN1,
DML_PROJECT_NAVI10,
DML_PROJECT_NAVI10v2,
+ DML_PROJECT_DCN201,
DML_PROJECT_DCN21,
DML_PROJECT_DCN30,
DML_PROJECT_DCN31,
@@ -49,7 +50,7 @@ struct dml_funcs {
struct display_mode_lib *mode_lib,
display_dlg_regs_st *dlg_regs,
display_ttu_regs_st *ttu_regs,
- display_e2e_pipe_params_st *e2e_pipe_param,
+ const display_e2e_pipe_params_st *e2e_pipe_param,
const unsigned int num_pipes,
const unsigned int pipe_idx,
const bool cstate_en,
@@ -60,7 +61,7 @@ struct dml_funcs {
void (*rq_dlg_get_rq_reg)(
struct display_mode_lib *mode_lib,
display_rq_regs_st *rq_regs,
- const display_pipe_params_st pipe_param);
+ const display_pipe_params_st *pipe_param);
void (*recalculate)(struct display_mode_lib *mode_lib);
void (*validate)(struct display_mode_lib *mode_lib);
};
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c
index e2d82aacd3bc..71ea503cb32f 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c
@@ -26,371 +26,371 @@
#include "display_rq_dlg_helpers.h"
#include "dml_logger.h"
-void print__rq_params_st(struct display_mode_lib *mode_lib, display_rq_params_st rq_param)
+void print__rq_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_rq_params_st *rq_param)
{
dml_print("DML_RQ_DLG_CALC: ***************************\n");
dml_print("DML_RQ_DLG_CALC: DISPLAY_RQ_PARAM_ST\n");
dml_print("DML_RQ_DLG_CALC: <LUMA>\n");
- print__data_rq_sizing_params_st(mode_lib, rq_param.sizing.rq_l);
+ print__data_rq_sizing_params_st(mode_lib, &rq_param->sizing.rq_l);
dml_print("DML_RQ_DLG_CALC: <CHROMA> ===\n");
- print__data_rq_sizing_params_st(mode_lib, rq_param.sizing.rq_c);
+ print__data_rq_sizing_params_st(mode_lib, &rq_param->sizing.rq_c);
dml_print("DML_RQ_DLG_CALC: <LUMA>\n");
- print__data_rq_dlg_params_st(mode_lib, rq_param.dlg.rq_l);
+ print__data_rq_dlg_params_st(mode_lib, &rq_param->dlg.rq_l);
dml_print("DML_RQ_DLG_CALC: <CHROMA>\n");
- print__data_rq_dlg_params_st(mode_lib, rq_param.dlg.rq_c);
+ print__data_rq_dlg_params_st(mode_lib, &rq_param->dlg.rq_c);
dml_print("DML_RQ_DLG_CALC: <LUMA>\n");
- print__data_rq_misc_params_st(mode_lib, rq_param.misc.rq_l);
+ print__data_rq_misc_params_st(mode_lib, &rq_param->misc.rq_l);
dml_print("DML_RQ_DLG_CALC: <CHROMA>\n");
- print__data_rq_misc_params_st(mode_lib, rq_param.misc.rq_c);
+ print__data_rq_misc_params_st(mode_lib, &rq_param->misc.rq_c);
dml_print("DML_RQ_DLG_CALC: ***************************\n");
}
-void print__data_rq_sizing_params_st(struct display_mode_lib *mode_lib, display_data_rq_sizing_params_st rq_sizing)
+void print__data_rq_sizing_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_data_rq_sizing_params_st *rq_sizing)
{
dml_print("DML_RQ_DLG_CALC: =====================================\n");
dml_print("DML_RQ_DLG_CALC: DISPLAY_DATA_RQ_SIZING_PARAM_ST\n");
- dml_print("DML_RQ_DLG_CALC: chunk_bytes = %0d\n", rq_sizing.chunk_bytes);
- dml_print("DML_RQ_DLG_CALC: min_chunk_bytes = %0d\n", rq_sizing.min_chunk_bytes);
- dml_print("DML_RQ_DLG_CALC: meta_chunk_bytes = %0d\n", rq_sizing.meta_chunk_bytes);
+ dml_print("DML_RQ_DLG_CALC: chunk_bytes = %0d\n", rq_sizing->chunk_bytes);
+ dml_print("DML_RQ_DLG_CALC: min_chunk_bytes = %0d\n", rq_sizing->min_chunk_bytes);
+ dml_print("DML_RQ_DLG_CALC: meta_chunk_bytes = %0d\n", rq_sizing->meta_chunk_bytes);
dml_print(
"DML_RQ_DLG_CALC: min_meta_chunk_bytes = %0d\n",
- rq_sizing.min_meta_chunk_bytes);
- dml_print("DML_RQ_DLG_CALC: mpte_group_bytes = %0d\n", rq_sizing.mpte_group_bytes);
- dml_print("DML_RQ_DLG_CALC: dpte_group_bytes = %0d\n", rq_sizing.dpte_group_bytes);
+ rq_sizing->min_meta_chunk_bytes);
+ dml_print("DML_RQ_DLG_CALC: mpte_group_bytes = %0d\n", rq_sizing->mpte_group_bytes);
+ dml_print("DML_RQ_DLG_CALC: dpte_group_bytes = %0d\n", rq_sizing->dpte_group_bytes);
dml_print("DML_RQ_DLG_CALC: =====================================\n");
}
-void print__data_rq_dlg_params_st(struct display_mode_lib *mode_lib, display_data_rq_dlg_params_st rq_dlg_param)
+void print__data_rq_dlg_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_data_rq_dlg_params_st *rq_dlg_param)
{
dml_print("DML_RQ_DLG_CALC: =====================================\n");
dml_print("DML_RQ_DLG_CALC: DISPLAY_DATA_RQ_DLG_PARAM_ST\n");
dml_print(
"DML_RQ_DLG_CALC: swath_width_ub = %0d\n",
- rq_dlg_param.swath_width_ub);
+ rq_dlg_param->swath_width_ub);
dml_print(
"DML_RQ_DLG_CALC: swath_height = %0d\n",
- rq_dlg_param.swath_height);
+ rq_dlg_param->swath_height);
dml_print(
"DML_RQ_DLG_CALC: req_per_swath_ub = %0d\n",
- rq_dlg_param.req_per_swath_ub);
+ rq_dlg_param->req_per_swath_ub);
dml_print(
"DML_RQ_DLG_CALC: meta_pte_bytes_per_frame_ub = %0d\n",
- rq_dlg_param.meta_pte_bytes_per_frame_ub);
+ rq_dlg_param->meta_pte_bytes_per_frame_ub);
dml_print(
"DML_RQ_DLG_CALC: dpte_req_per_row_ub = %0d\n",
- rq_dlg_param.dpte_req_per_row_ub);
+ rq_dlg_param->dpte_req_per_row_ub);
dml_print(
"DML_RQ_DLG_CALC: dpte_groups_per_row_ub = %0d\n",
- rq_dlg_param.dpte_groups_per_row_ub);
+ rq_dlg_param->dpte_groups_per_row_ub);
dml_print(
"DML_RQ_DLG_CALC: dpte_row_height = %0d\n",
- rq_dlg_param.dpte_row_height);
+ rq_dlg_param->dpte_row_height);
dml_print(
"DML_RQ_DLG_CALC: dpte_bytes_per_row_ub = %0d\n",
- rq_dlg_param.dpte_bytes_per_row_ub);
+ rq_dlg_param->dpte_bytes_per_row_ub);
dml_print(
"DML_RQ_DLG_CALC: meta_chunks_per_row_ub = %0d\n",
- rq_dlg_param.meta_chunks_per_row_ub);
+ rq_dlg_param->meta_chunks_per_row_ub);
dml_print(
"DML_RQ_DLG_CALC: meta_req_per_row_ub = %0d\n",
- rq_dlg_param.meta_req_per_row_ub);
+ rq_dlg_param->meta_req_per_row_ub);
dml_print(
"DML_RQ_DLG_CALC: meta_row_height = %0d\n",
- rq_dlg_param.meta_row_height);
+ rq_dlg_param->meta_row_height);
dml_print(
"DML_RQ_DLG_CALC: meta_bytes_per_row_ub = %0d\n",
- rq_dlg_param.meta_bytes_per_row_ub);
+ rq_dlg_param->meta_bytes_per_row_ub);
dml_print("DML_RQ_DLG_CALC: =====================================\n");
}
-void print__data_rq_misc_params_st(struct display_mode_lib *mode_lib, display_data_rq_misc_params_st rq_misc_param)
+void print__data_rq_misc_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_data_rq_misc_params_st *rq_misc_param)
{
dml_print("DML_RQ_DLG_CALC: =====================================\n");
dml_print("DML_RQ_DLG_CALC: DISPLAY_DATA_RQ_MISC_PARAM_ST\n");
dml_print(
"DML_RQ_DLG_CALC: full_swath_bytes = %0d\n",
- rq_misc_param.full_swath_bytes);
+ rq_misc_param->full_swath_bytes);
dml_print(
"DML_RQ_DLG_CALC: stored_swath_bytes = %0d\n",
- rq_misc_param.stored_swath_bytes);
- dml_print("DML_RQ_DLG_CALC: blk256_width = %0d\n", rq_misc_param.blk256_width);
- dml_print("DML_RQ_DLG_CALC: blk256_height = %0d\n", rq_misc_param.blk256_height);
- dml_print("DML_RQ_DLG_CALC: req_width = %0d\n", rq_misc_param.req_width);
- dml_print("DML_RQ_DLG_CALC: req_height = %0d\n", rq_misc_param.req_height);
+ rq_misc_param->stored_swath_bytes);
+ dml_print("DML_RQ_DLG_CALC: blk256_width = %0d\n", rq_misc_param->blk256_width);
+ dml_print("DML_RQ_DLG_CALC: blk256_height = %0d\n", rq_misc_param->blk256_height);
+ dml_print("DML_RQ_DLG_CALC: req_width = %0d\n", rq_misc_param->req_width);
+ dml_print("DML_RQ_DLG_CALC: req_height = %0d\n", rq_misc_param->req_height);
dml_print("DML_RQ_DLG_CALC: =====================================\n");
}
-void print__rq_dlg_params_st(struct display_mode_lib *mode_lib, display_rq_dlg_params_st rq_dlg_param)
+void print__rq_dlg_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_rq_dlg_params_st *rq_dlg_param)
{
dml_print("DML_RQ_DLG_CALC: =====================================\n");
dml_print("DML_RQ_DLG_CALC: DISPLAY_RQ_DLG_PARAM_ST\n");
dml_print("DML_RQ_DLG_CALC: <LUMA>\n");
- print__data_rq_dlg_params_st(mode_lib, rq_dlg_param.rq_l);
+ print__data_rq_dlg_params_st(mode_lib, &rq_dlg_param->rq_l);
dml_print("DML_RQ_DLG_CALC: <CHROMA>\n");
- print__data_rq_dlg_params_st(mode_lib, rq_dlg_param.rq_c);
+ print__data_rq_dlg_params_st(mode_lib, &rq_dlg_param->rq_c);
dml_print("DML_RQ_DLG_CALC: =====================================\n");
}
-void print__dlg_sys_params_st(struct display_mode_lib *mode_lib, display_dlg_sys_params_st dlg_sys_param)
+void print__dlg_sys_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_dlg_sys_params_st *dlg_sys_param)
{
dml_print("DML_RQ_DLG_CALC: =====================================\n");
dml_print("DML_RQ_DLG_CALC: DISPLAY_RQ_DLG_PARAM_ST\n");
- dml_print("DML_RQ_DLG_CALC: t_mclk_wm_us = %3.2f\n", dlg_sys_param.t_mclk_wm_us);
- dml_print("DML_RQ_DLG_CALC: t_urg_wm_us = %3.2f\n", dlg_sys_param.t_urg_wm_us);
- dml_print("DML_RQ_DLG_CALC: t_sr_wm_us = %3.2f\n", dlg_sys_param.t_sr_wm_us);
- dml_print("DML_RQ_DLG_CALC: t_extra_us = %3.2f\n", dlg_sys_param.t_extra_us);
+ dml_print("DML_RQ_DLG_CALC: t_mclk_wm_us = %3.2f\n", dlg_sys_param->t_mclk_wm_us);
+ dml_print("DML_RQ_DLG_CALC: t_urg_wm_us = %3.2f\n", dlg_sys_param->t_urg_wm_us);
+ dml_print("DML_RQ_DLG_CALC: t_sr_wm_us = %3.2f\n", dlg_sys_param->t_sr_wm_us);
+ dml_print("DML_RQ_DLG_CALC: t_extra_us = %3.2f\n", dlg_sys_param->t_extra_us);
dml_print(
"DML_RQ_DLG_CALC: t_srx_delay_us = %3.2f\n",
- dlg_sys_param.t_srx_delay_us);
+ dlg_sys_param->t_srx_delay_us);
dml_print(
"DML_RQ_DLG_CALC: deepsleep_dcfclk_mhz = %3.2f\n",
- dlg_sys_param.deepsleep_dcfclk_mhz);
+ dlg_sys_param->deepsleep_dcfclk_mhz);
dml_print(
"DML_RQ_DLG_CALC: total_flip_bw = %3.2f\n",
- dlg_sys_param.total_flip_bw);
+ dlg_sys_param->total_flip_bw);
dml_print(
"DML_RQ_DLG_CALC: total_flip_bytes = %i\n",
- dlg_sys_param.total_flip_bytes);
+ dlg_sys_param->total_flip_bytes);
dml_print("DML_RQ_DLG_CALC: =====================================\n");
}
-void print__data_rq_regs_st(struct display_mode_lib *mode_lib, display_data_rq_regs_st rq_regs)
+void print__data_rq_regs_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_data_rq_regs_st *rq_regs)
{
dml_print("DML_RQ_DLG_CALC: =====================================\n");
dml_print("DML_RQ_DLG_CALC: DISPLAY_DATA_RQ_REGS_ST\n");
- dml_print("DML_RQ_DLG_CALC: chunk_size = 0x%0x\n", rq_regs.chunk_size);
- dml_print("DML_RQ_DLG_CALC: min_chunk_size = 0x%0x\n", rq_regs.min_chunk_size);
- dml_print("DML_RQ_DLG_CALC: meta_chunk_size = 0x%0x\n", rq_regs.meta_chunk_size);
+ dml_print("DML_RQ_DLG_CALC: chunk_size = 0x%0x\n", rq_regs->chunk_size);
+ dml_print("DML_RQ_DLG_CALC: min_chunk_size = 0x%0x\n", rq_regs->min_chunk_size);
+ dml_print("DML_RQ_DLG_CALC: meta_chunk_size = 0x%0x\n", rq_regs->meta_chunk_size);
dml_print(
"DML_RQ_DLG_CALC: min_meta_chunk_size = 0x%0x\n",
- rq_regs.min_meta_chunk_size);
- dml_print("DML_RQ_DLG_CALC: dpte_group_size = 0x%0x\n", rq_regs.dpte_group_size);
- dml_print("DML_RQ_DLG_CALC: mpte_group_size = 0x%0x\n", rq_regs.mpte_group_size);
- dml_print("DML_RQ_DLG_CALC: swath_height = 0x%0x\n", rq_regs.swath_height);
+ rq_regs->min_meta_chunk_size);
+ dml_print("DML_RQ_DLG_CALC: dpte_group_size = 0x%0x\n", rq_regs->dpte_group_size);
+ dml_print("DML_RQ_DLG_CALC: mpte_group_size = 0x%0x\n", rq_regs->mpte_group_size);
+ dml_print("DML_RQ_DLG_CALC: swath_height = 0x%0x\n", rq_regs->swath_height);
dml_print(
"DML_RQ_DLG_CALC: pte_row_height_linear = 0x%0x\n",
- rq_regs.pte_row_height_linear);
+ rq_regs->pte_row_height_linear);
dml_print("DML_RQ_DLG_CALC: =====================================\n");
}
-void print__rq_regs_st(struct display_mode_lib *mode_lib, display_rq_regs_st rq_regs)
+void print__rq_regs_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_rq_regs_st *rq_regs)
{
dml_print("DML_RQ_DLG_CALC: =====================================\n");
dml_print("DML_RQ_DLG_CALC: DISPLAY_RQ_REGS_ST\n");
dml_print("DML_RQ_DLG_CALC: <LUMA>\n");
- print__data_rq_regs_st(mode_lib, rq_regs.rq_regs_l);
+ print__data_rq_regs_st(mode_lib, &rq_regs->rq_regs_l);
dml_print("DML_RQ_DLG_CALC: <CHROMA>\n");
- print__data_rq_regs_st(mode_lib, rq_regs.rq_regs_c);
- dml_print("DML_RQ_DLG_CALC: drq_expansion_mode = 0x%0x\n", rq_regs.drq_expansion_mode);
- dml_print("DML_RQ_DLG_CALC: prq_expansion_mode = 0x%0x\n", rq_regs.prq_expansion_mode);
- dml_print("DML_RQ_DLG_CALC: mrq_expansion_mode = 0x%0x\n", rq_regs.mrq_expansion_mode);
- dml_print("DML_RQ_DLG_CALC: crq_expansion_mode = 0x%0x\n", rq_regs.crq_expansion_mode);
- dml_print("DML_RQ_DLG_CALC: plane1_base_address = 0x%0x\n", rq_regs.plane1_base_address);
+ print__data_rq_regs_st(mode_lib, &rq_regs->rq_regs_c);
+ dml_print("DML_RQ_DLG_CALC: drq_expansion_mode = 0x%0x\n", rq_regs->drq_expansion_mode);
+ dml_print("DML_RQ_DLG_CALC: prq_expansion_mode = 0x%0x\n", rq_regs->prq_expansion_mode);
+ dml_print("DML_RQ_DLG_CALC: mrq_expansion_mode = 0x%0x\n", rq_regs->mrq_expansion_mode);
+ dml_print("DML_RQ_DLG_CALC: crq_expansion_mode = 0x%0x\n", rq_regs->crq_expansion_mode);
+ dml_print("DML_RQ_DLG_CALC: plane1_base_address = 0x%0x\n", rq_regs->plane1_base_address);
dml_print("DML_RQ_DLG_CALC: =====================================\n");
}
-void print__dlg_regs_st(struct display_mode_lib *mode_lib, display_dlg_regs_st dlg_regs)
+void print__dlg_regs_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_dlg_regs_st *dlg_regs)
{
dml_print("DML_RQ_DLG_CALC: =====================================\n");
dml_print("DML_RQ_DLG_CALC: DISPLAY_DLG_REGS_ST\n");
dml_print(
"DML_RQ_DLG_CALC: refcyc_h_blank_end = 0x%0x\n",
- dlg_regs.refcyc_h_blank_end);
+ dlg_regs->refcyc_h_blank_end);
dml_print(
"DML_RQ_DLG_CALC: dlg_vblank_end = 0x%0x\n",
- dlg_regs.dlg_vblank_end);
+ dlg_regs->dlg_vblank_end);
dml_print(
"DML_RQ_DLG_CALC: min_dst_y_next_start = 0x%0x\n",
- dlg_regs.min_dst_y_next_start);
+ dlg_regs->min_dst_y_next_start);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_htotal = 0x%0x\n",
- dlg_regs.refcyc_per_htotal);
+ dlg_regs->refcyc_per_htotal);
dml_print(
"DML_RQ_DLG_CALC: refcyc_x_after_scaler = 0x%0x\n",
- dlg_regs.refcyc_x_after_scaler);
+ dlg_regs->refcyc_x_after_scaler);
dml_print(
"DML_RQ_DLG_CALC: dst_y_after_scaler = 0x%0x\n",
- dlg_regs.dst_y_after_scaler);
+ dlg_regs->dst_y_after_scaler);
dml_print(
"DML_RQ_DLG_CALC: dst_y_prefetch = 0x%0x\n",
- dlg_regs.dst_y_prefetch);
+ dlg_regs->dst_y_prefetch);
dml_print(
"DML_RQ_DLG_CALC: dst_y_per_vm_vblank = 0x%0x\n",
- dlg_regs.dst_y_per_vm_vblank);
+ dlg_regs->dst_y_per_vm_vblank);
dml_print(
"DML_RQ_DLG_CALC: dst_y_per_row_vblank = 0x%0x\n",
- dlg_regs.dst_y_per_row_vblank);
+ dlg_regs->dst_y_per_row_vblank);
dml_print(
"DML_RQ_DLG_CALC: dst_y_per_vm_flip = 0x%0x\n",
- dlg_regs.dst_y_per_vm_flip);
+ dlg_regs->dst_y_per_vm_flip);
dml_print(
"DML_RQ_DLG_CALC: dst_y_per_row_flip = 0x%0x\n",
- dlg_regs.dst_y_per_row_flip);
+ dlg_regs->dst_y_per_row_flip);
dml_print(
"DML_RQ_DLG_CALC: ref_freq_to_pix_freq = 0x%0x\n",
- dlg_regs.ref_freq_to_pix_freq);
+ dlg_regs->ref_freq_to_pix_freq);
dml_print(
"DML_RQ_DLG_CALC: vratio_prefetch = 0x%0x\n",
- dlg_regs.vratio_prefetch);
+ dlg_regs->vratio_prefetch);
dml_print(
"DML_RQ_DLG_CALC: vratio_prefetch_c = 0x%0x\n",
- dlg_regs.vratio_prefetch_c);
+ dlg_regs->vratio_prefetch_c);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_pte_group_vblank_l = 0x%0x\n",
- dlg_regs.refcyc_per_pte_group_vblank_l);
+ dlg_regs->refcyc_per_pte_group_vblank_l);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_pte_group_vblank_c = 0x%0x\n",
- dlg_regs.refcyc_per_pte_group_vblank_c);
+ dlg_regs->refcyc_per_pte_group_vblank_c);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_meta_chunk_vblank_l = 0x%0x\n",
- dlg_regs.refcyc_per_meta_chunk_vblank_l);
+ dlg_regs->refcyc_per_meta_chunk_vblank_l);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_meta_chunk_vblank_c = 0x%0x\n",
- dlg_regs.refcyc_per_meta_chunk_vblank_c);
+ dlg_regs->refcyc_per_meta_chunk_vblank_c);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_pte_group_flip_l = 0x%0x\n",
- dlg_regs.refcyc_per_pte_group_flip_l);
+ dlg_regs->refcyc_per_pte_group_flip_l);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_pte_group_flip_c = 0x%0x\n",
- dlg_regs.refcyc_per_pte_group_flip_c);
+ dlg_regs->refcyc_per_pte_group_flip_c);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_meta_chunk_flip_l = 0x%0x\n",
- dlg_regs.refcyc_per_meta_chunk_flip_l);
+ dlg_regs->refcyc_per_meta_chunk_flip_l);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_meta_chunk_flip_c = 0x%0x\n",
- dlg_regs.refcyc_per_meta_chunk_flip_c);
+ dlg_regs->refcyc_per_meta_chunk_flip_c);
dml_print(
"DML_RQ_DLG_CALC: dst_y_per_pte_row_nom_l = 0x%0x\n",
- dlg_regs.dst_y_per_pte_row_nom_l);
+ dlg_regs->dst_y_per_pte_row_nom_l);
dml_print(
"DML_RQ_DLG_CALC: dst_y_per_pte_row_nom_c = 0x%0x\n",
- dlg_regs.dst_y_per_pte_row_nom_c);
+ dlg_regs->dst_y_per_pte_row_nom_c);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_pte_group_nom_l = 0x%0x\n",
- dlg_regs.refcyc_per_pte_group_nom_l);
+ dlg_regs->refcyc_per_pte_group_nom_l);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_pte_group_nom_c = 0x%0x\n",
- dlg_regs.refcyc_per_pte_group_nom_c);
+ dlg_regs->refcyc_per_pte_group_nom_c);
dml_print(
"DML_RQ_DLG_CALC: dst_y_per_meta_row_nom_l = 0x%0x\n",
- dlg_regs.dst_y_per_meta_row_nom_l);
+ dlg_regs->dst_y_per_meta_row_nom_l);
dml_print(
"DML_RQ_DLG_CALC: dst_y_per_meta_row_nom_c = 0x%0x\n",
- dlg_regs.dst_y_per_meta_row_nom_c);
+ dlg_regs->dst_y_per_meta_row_nom_c);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_meta_chunk_nom_l = 0x%0x\n",
- dlg_regs.refcyc_per_meta_chunk_nom_l);
+ dlg_regs->refcyc_per_meta_chunk_nom_l);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_meta_chunk_nom_c = 0x%0x\n",
- dlg_regs.refcyc_per_meta_chunk_nom_c);
+ dlg_regs->refcyc_per_meta_chunk_nom_c);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_line_delivery_pre_l = 0x%0x\n",
- dlg_regs.refcyc_per_line_delivery_pre_l);
+ dlg_regs->refcyc_per_line_delivery_pre_l);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_line_delivery_pre_c = 0x%0x\n",
- dlg_regs.refcyc_per_line_delivery_pre_c);
+ dlg_regs->refcyc_per_line_delivery_pre_c);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_line_delivery_l = 0x%0x\n",
- dlg_regs.refcyc_per_line_delivery_l);
+ dlg_regs->refcyc_per_line_delivery_l);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_line_delivery_c = 0x%0x\n",
- dlg_regs.refcyc_per_line_delivery_c);
+ dlg_regs->refcyc_per_line_delivery_c);
dml_print(
"DML_RQ_DLG_CALC: chunk_hdl_adjust_cur0 = 0x%0x\n",
- dlg_regs.chunk_hdl_adjust_cur0);
+ dlg_regs->chunk_hdl_adjust_cur0);
dml_print(
"DML_RQ_DLG_CALC: dst_y_offset_cur1 = 0x%0x\n",
- dlg_regs.dst_y_offset_cur1);
+ dlg_regs->dst_y_offset_cur1);
dml_print(
"DML_RQ_DLG_CALC: chunk_hdl_adjust_cur1 = 0x%0x\n",
- dlg_regs.chunk_hdl_adjust_cur1);
+ dlg_regs->chunk_hdl_adjust_cur1);
dml_print(
"DML_RQ_DLG_CALC: vready_after_vcount0 = 0x%0x\n",
- dlg_regs.vready_after_vcount0);
+ dlg_regs->vready_after_vcount0);
dml_print(
"DML_RQ_DLG_CALC: dst_y_delta_drq_limit = 0x%0x\n",
- dlg_regs.dst_y_delta_drq_limit);
+ dlg_regs->dst_y_delta_drq_limit);
dml_print(
"DML_RQ_DLG_CALC: xfc_reg_transfer_delay = 0x%0x\n",
- dlg_regs.xfc_reg_transfer_delay);
+ dlg_regs->xfc_reg_transfer_delay);
dml_print(
"DML_RQ_DLG_CALC: xfc_reg_precharge_delay = 0x%0x\n",
- dlg_regs.xfc_reg_precharge_delay);
+ dlg_regs->xfc_reg_precharge_delay);
dml_print(
"DML_RQ_DLG_CALC: xfc_reg_remote_surface_flip_latency = 0x%0x\n",
- dlg_regs.xfc_reg_remote_surface_flip_latency);
+ dlg_regs->xfc_reg_remote_surface_flip_latency);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_vm_dmdata = 0x%0x\n",
- dlg_regs.refcyc_per_vm_dmdata);
+ dlg_regs->refcyc_per_vm_dmdata);
dml_print("DML_RQ_DLG_CALC: =====================================\n");
}
-void print__ttu_regs_st(struct display_mode_lib *mode_lib, display_ttu_regs_st ttu_regs)
+void print__ttu_regs_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_ttu_regs_st *ttu_regs)
{
dml_print("DML_RQ_DLG_CALC: =====================================\n");
dml_print("DML_RQ_DLG_CALC: DISPLAY_TTU_REGS_ST\n");
dml_print(
"DML_RQ_DLG_CALC: qos_level_low_wm = 0x%0x\n",
- ttu_regs.qos_level_low_wm);
+ ttu_regs->qos_level_low_wm);
dml_print(
"DML_RQ_DLG_CALC: qos_level_high_wm = 0x%0x\n",
- ttu_regs.qos_level_high_wm);
+ ttu_regs->qos_level_high_wm);
dml_print(
"DML_RQ_DLG_CALC: min_ttu_vblank = 0x%0x\n",
- ttu_regs.min_ttu_vblank);
+ ttu_regs->min_ttu_vblank);
dml_print(
"DML_RQ_DLG_CALC: qos_level_flip = 0x%0x\n",
- ttu_regs.qos_level_flip);
+ ttu_regs->qos_level_flip);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_req_delivery_pre_l = 0x%0x\n",
- ttu_regs.refcyc_per_req_delivery_pre_l);
+ ttu_regs->refcyc_per_req_delivery_pre_l);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_req_delivery_l = 0x%0x\n",
- ttu_regs.refcyc_per_req_delivery_l);
+ ttu_regs->refcyc_per_req_delivery_l);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_req_delivery_pre_c = 0x%0x\n",
- ttu_regs.refcyc_per_req_delivery_pre_c);
+ ttu_regs->refcyc_per_req_delivery_pre_c);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_req_delivery_c = 0x%0x\n",
- ttu_regs.refcyc_per_req_delivery_c);
+ ttu_regs->refcyc_per_req_delivery_c);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_req_delivery_cur0 = 0x%0x\n",
- ttu_regs.refcyc_per_req_delivery_cur0);
+ ttu_regs->refcyc_per_req_delivery_cur0);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_req_delivery_pre_cur0 = 0x%0x\n",
- ttu_regs.refcyc_per_req_delivery_pre_cur0);
+ ttu_regs->refcyc_per_req_delivery_pre_cur0);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_req_delivery_cur1 = 0x%0x\n",
- ttu_regs.refcyc_per_req_delivery_cur1);
+ ttu_regs->refcyc_per_req_delivery_cur1);
dml_print(
"DML_RQ_DLG_CALC: refcyc_per_req_delivery_pre_cur1 = 0x%0x\n",
- ttu_regs.refcyc_per_req_delivery_pre_cur1);
+ ttu_regs->refcyc_per_req_delivery_pre_cur1);
dml_print(
"DML_RQ_DLG_CALC: qos_level_fixed_l = 0x%0x\n",
- ttu_regs.qos_level_fixed_l);
+ ttu_regs->qos_level_fixed_l);
dml_print(
"DML_RQ_DLG_CALC: qos_ramp_disable_l = 0x%0x\n",
- ttu_regs.qos_ramp_disable_l);
+ ttu_regs->qos_ramp_disable_l);
dml_print(
"DML_RQ_DLG_CALC: qos_level_fixed_c = 0x%0x\n",
- ttu_regs.qos_level_fixed_c);
+ ttu_regs->qos_level_fixed_c);
dml_print(
"DML_RQ_DLG_CALC: qos_ramp_disable_c = 0x%0x\n",
- ttu_regs.qos_ramp_disable_c);
+ ttu_regs->qos_ramp_disable_c);
dml_print(
"DML_RQ_DLG_CALC: qos_level_fixed_cur0 = 0x%0x\n",
- ttu_regs.qos_level_fixed_cur0);
+ ttu_regs->qos_level_fixed_cur0);
dml_print(
"DML_RQ_DLG_CALC: qos_ramp_disable_cur0 = 0x%0x\n",
- ttu_regs.qos_ramp_disable_cur0);
+ ttu_regs->qos_ramp_disable_cur0);
dml_print(
"DML_RQ_DLG_CALC: qos_level_fixed_cur1 = 0x%0x\n",
- ttu_regs.qos_level_fixed_cur1);
+ ttu_regs->qos_level_fixed_cur1);
dml_print(
"DML_RQ_DLG_CALC: qos_ramp_disable_cur1 = 0x%0x\n",
- ttu_regs.qos_ramp_disable_cur1);
+ ttu_regs->qos_ramp_disable_cur1);
dml_print("DML_RQ_DLG_CALC: =====================================\n");
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h
index 2555ef0358c2..ebcd717744e5 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.h
@@ -31,16 +31,16 @@
/* Function: Printer functions
* Print various struct
*/
-void print__rq_params_st(struct display_mode_lib *mode_lib, display_rq_params_st rq_param);
-void print__data_rq_sizing_params_st(struct display_mode_lib *mode_lib, display_data_rq_sizing_params_st rq_sizing);
-void print__data_rq_dlg_params_st(struct display_mode_lib *mode_lib, display_data_rq_dlg_params_st rq_dlg_param);
-void print__data_rq_misc_params_st(struct display_mode_lib *mode_lib, display_data_rq_misc_params_st rq_misc_param);
-void print__rq_dlg_params_st(struct display_mode_lib *mode_lib, display_rq_dlg_params_st rq_dlg_param);
-void print__dlg_sys_params_st(struct display_mode_lib *mode_lib, display_dlg_sys_params_st dlg_sys_param);
+void print__rq_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_rq_params_st *rq_param);
+void print__data_rq_sizing_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_data_rq_sizing_params_st *rq_sizing);
+void print__data_rq_dlg_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_data_rq_dlg_params_st *rq_dlg_param);
+void print__data_rq_misc_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_data_rq_misc_params_st *rq_misc_param);
+void print__rq_dlg_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_rq_dlg_params_st *rq_dlg_param);
+void print__dlg_sys_params_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_dlg_sys_params_st *dlg_sys_param);
-void print__data_rq_regs_st(struct display_mode_lib *mode_lib, display_data_rq_regs_st data_rq_regs);
-void print__rq_regs_st(struct display_mode_lib *mode_lib, display_rq_regs_st rq_regs);
-void print__dlg_regs_st(struct display_mode_lib *mode_lib, display_dlg_regs_st dlg_regs);
-void print__ttu_regs_st(struct display_mode_lib *mode_lib, display_ttu_regs_st ttu_regs);
+void print__data_rq_regs_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_data_rq_regs_st *rq_regs);
+void print__rq_regs_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_rq_regs_st *rq_regs);
+void print__dlg_regs_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_dlg_regs_st *dlg_regs);
+void print__ttu_regs_st(struct display_mode_lib *mode_lib, const struct _vcs_dpi_display_ttu_regs_st *ttu_regs);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
index 8f2b1684c231..59dc2c5b58dd 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
@@ -206,47 +206,47 @@ static unsigned int get_blk_size_bytes(const enum source_macro_tile_size tile_si
static void extract_rq_sizing_regs(
struct display_mode_lib *mode_lib,
struct _vcs_dpi_display_data_rq_regs_st *rq_regs,
- const struct _vcs_dpi_display_data_rq_sizing_params_st rq_sizing)
+ const struct _vcs_dpi_display_data_rq_sizing_params_st *rq_sizing)
{
DTRACE("DLG: %s: rq_sizing param", __func__);
print__data_rq_sizing_params_st(mode_lib, rq_sizing);
- rq_regs->chunk_size = dml_log2(rq_sizing.chunk_bytes) - 10;
+ rq_regs->chunk_size = dml_log2(rq_sizing->chunk_bytes) - 10;
- if (rq_sizing.min_chunk_bytes == 0)
+ if (rq_sizing->min_chunk_bytes == 0)
rq_regs->min_chunk_size = 0;
else
- rq_regs->min_chunk_size = dml_log2(rq_sizing.min_chunk_bytes) - 8 + 1;
+ rq_regs->min_chunk_size = dml_log2(rq_sizing->min_chunk_bytes) - 8 + 1;
- rq_regs->meta_chunk_size = dml_log2(rq_sizing.meta_chunk_bytes) - 10;
- if (rq_sizing.min_meta_chunk_bytes == 0)
+ rq_regs->meta_chunk_size = dml_log2(rq_sizing->meta_chunk_bytes) - 10;
+ if (rq_sizing->min_meta_chunk_bytes == 0)
rq_regs->min_meta_chunk_size = 0;
else
- rq_regs->min_meta_chunk_size = dml_log2(rq_sizing.min_meta_chunk_bytes) - 6 + 1;
+ rq_regs->min_meta_chunk_size = dml_log2(rq_sizing->min_meta_chunk_bytes) - 6 + 1;
- rq_regs->dpte_group_size = dml_log2(rq_sizing.dpte_group_bytes) - 6;
- rq_regs->mpte_group_size = dml_log2(rq_sizing.mpte_group_bytes) - 6;
+ rq_regs->dpte_group_size = dml_log2(rq_sizing->dpte_group_bytes) - 6;
+ rq_regs->mpte_group_size = dml_log2(rq_sizing->mpte_group_bytes) - 6;
}
void dml1_extract_rq_regs(
struct display_mode_lib *mode_lib,
struct _vcs_dpi_display_rq_regs_st *rq_regs,
- const struct _vcs_dpi_display_rq_params_st rq_param)
+ const struct _vcs_dpi_display_rq_params_st *rq_param)
{
unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024;
unsigned int detile_buf_plane1_addr = 0;
- extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_l), rq_param.sizing.rq_l);
- if (rq_param.yuv420)
- extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_c), rq_param.sizing.rq_c);
+ extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_l), &rq_param->sizing.rq_l);
+ if (rq_param->yuv420)
+ extract_rq_sizing_regs(mode_lib, &(rq_regs->rq_regs_c), &rq_param->sizing.rq_c);
- rq_regs->rq_regs_l.swath_height = dml_log2(rq_param.dlg.rq_l.swath_height);
- rq_regs->rq_regs_c.swath_height = dml_log2(rq_param.dlg.rq_c.swath_height);
+ rq_regs->rq_regs_l.swath_height = dml_log2(rq_param->dlg.rq_l.swath_height);
+ rq_regs->rq_regs_c.swath_height = dml_log2(rq_param->dlg.rq_c.swath_height);
/* TODO: take the max between luma, chroma chunk size?
* okay for now, as we are setting chunk_bytes to 8kb anyways
*/
- if (rq_param.sizing.rq_l.chunk_bytes >= 32 * 1024) { /*32kb */
+ if (rq_param->sizing.rq_l.chunk_bytes >= 32 * 1024) { /*32kb */
rq_regs->drq_expansion_mode = 0;
} else {
rq_regs->drq_expansion_mode = 2;
@@ -255,9 +255,9 @@ void dml1_extract_rq_regs(
rq_regs->mrq_expansion_mode = 1;
rq_regs->crq_expansion_mode = 1;
- if (rq_param.yuv420) {
- if ((double) rq_param.misc.rq_l.stored_swath_bytes
- / (double) rq_param.misc.rq_c.stored_swath_bytes <= 1.5) {
+ if (rq_param->yuv420) {
+ if ((double) rq_param->misc.rq_l.stored_swath_bytes
+ / (double) rq_param->misc.rq_c.stored_swath_bytes <= 1.5) {
detile_buf_plane1_addr = (detile_buf_size_in_bytes / 2.0 / 64.0); /* half to chroma */
} else {
detile_buf_plane1_addr = dml_round_to_multiple(
@@ -272,7 +272,7 @@ void dml1_extract_rq_regs(
static void handle_det_buf_split(
struct display_mode_lib *mode_lib,
struct _vcs_dpi_display_rq_params_st *rq_param,
- const struct _vcs_dpi_display_pipe_source_params_st pipe_src_param)
+ const struct _vcs_dpi_display_pipe_source_params_st *pipe_src_param)
{
unsigned int total_swath_bytes = 0;
unsigned int swath_bytes_l = 0;
@@ -281,8 +281,8 @@ static void handle_det_buf_split(
unsigned int full_swath_bytes_packed_c = 0;
bool req128_l = 0;
bool req128_c = 0;
- bool surf_linear = (pipe_src_param.sw_mode == dm_sw_linear);
- bool surf_vert = (pipe_src_param.source_scan == dm_vert);
+ bool surf_linear = (pipe_src_param->sw_mode == dm_sw_linear);
+ bool surf_vert = (pipe_src_param->source_scan == dm_vert);
unsigned int log2_swath_height_l = 0;
unsigned int log2_swath_height_c = 0;
unsigned int detile_buf_size_in_bytes = mode_lib->ip.det_buffer_size_kbytes * 1024;
@@ -556,7 +556,7 @@ static void get_surf_rq_param(
struct _vcs_dpi_display_data_rq_sizing_params_st *rq_sizing_param,
struct _vcs_dpi_display_data_rq_dlg_params_st *rq_dlg_param,
struct _vcs_dpi_display_data_rq_misc_params_st *rq_misc_param,
- const struct _vcs_dpi_display_pipe_source_params_st pipe_src_param,
+ const struct _vcs_dpi_display_pipe_source_params_st *pipe_src_param,
bool is_chroma)
{
bool mode_422 = 0;
@@ -622,15 +622,15 @@ static void get_surf_rq_param(
/* TODO check if ppe apply for both luma and chroma in 422 case */
if (is_chroma) {
- vp_width = pipe_src_param.viewport_width_c / ppe;
- vp_height = pipe_src_param.viewport_height_c;
- data_pitch = pipe_src_param.data_pitch_c;
- meta_pitch = pipe_src_param.meta_pitch_c;
+ vp_width = pipe_src_param->viewport_width_c / ppe;
+ vp_height = pipe_src_param->viewport_height_c;
+ data_pitch = pipe_src_param->data_pitch_c;
+ meta_pitch = pipe_src_param->meta_pitch_c;
} else {
- vp_width = pipe_src_param.viewport_width / ppe;
- vp_height = pipe_src_param.viewport_height;
- data_pitch = pipe_src_param.data_pitch;
- meta_pitch = pipe_src_param.meta_pitch;
+ vp_width = pipe_src_param->viewport_width / ppe;
+ vp_height = pipe_src_param->viewport_height;
+ data_pitch = pipe_src_param->data_pitch;
+ meta_pitch = pipe_src_param->meta_pitch;
}
rq_sizing_param->chunk_bytes = 8192;
@@ -645,11 +645,11 @@ static void get_surf_rq_param(
rq_sizing_param->mpte_group_bytes = 2048;
- surf_linear = (pipe_src_param.sw_mode == dm_sw_linear);
- surf_vert = (pipe_src_param.source_scan == dm_vert);
+ surf_linear = (pipe_src_param->sw_mode == dm_sw_linear);
+ surf_vert = (pipe_src_param->source_scan == dm_vert);
bytes_per_element = get_bytes_per_element(
- (enum source_format_class) pipe_src_param.source_format,
+ (enum source_format_class) pipe_src_param->source_format,
is_chroma);
log2_bytes_per_element = dml_log2(bytes_per_element);
blk256_width = 0;
@@ -671,7 +671,7 @@ static void get_surf_rq_param(
log2_blk256_height = dml_log2((double) blk256_height);
blk_bytes =
surf_linear ? 256 : get_blk_size_bytes(
- (enum source_macro_tile_size) pipe_src_param.macro_tile_size);
+ (enum source_macro_tile_size) pipe_src_param->macro_tile_size);
log2_blk_bytes = dml_log2((double) blk_bytes);
log2_blk_height = 0;
log2_blk_width = 0;
@@ -682,7 +682,7 @@ static void get_surf_rq_param(
* "/2" is like square root
* blk is vertical biased
*/
- if (pipe_src_param.sw_mode != dm_sw_linear)
+ if (pipe_src_param->sw_mode != dm_sw_linear)
log2_blk_height = log2_blk256_height
+ dml_ceil((double) (log2_blk_bytes - 8) / 2.0, 1);
else
@@ -930,10 +930,10 @@ static void get_surf_rq_param(
&func_meta_row_height,
vp_width,
data_pitch,
- pipe_src_param.source_format,
- pipe_src_param.sw_mode,
- pipe_src_param.macro_tile_size,
- pipe_src_param.source_scan,
+ pipe_src_param->source_format,
+ pipe_src_param->sw_mode,
+ pipe_src_param->macro_tile_size,
+ pipe_src_param->source_scan,
is_chroma);
/* Just a check to make sure this function and the new one give the same
@@ -960,12 +960,12 @@ static void get_surf_rq_param(
void dml1_rq_dlg_get_rq_params(
struct display_mode_lib *mode_lib,
struct _vcs_dpi_display_rq_params_st *rq_param,
- const struct _vcs_dpi_display_pipe_source_params_st pipe_src_param)
+ const struct _vcs_dpi_display_pipe_source_params_st *pipe_src_param)
{
/* get param for luma surface */
- rq_param->yuv420 = pipe_src_param.source_format == dm_420_8
- || pipe_src_param.source_format == dm_420_10;
- rq_param->yuv420_10bpc = pipe_src_param.source_format == dm_420_10;
+ rq_param->yuv420 = pipe_src_param->source_format == dm_420_8
+ || pipe_src_param->source_format == dm_420_10;
+ rq_param->yuv420_10bpc = pipe_src_param->source_format == dm_420_10;
get_surf_rq_param(
mode_lib,
@@ -975,7 +975,7 @@ void dml1_rq_dlg_get_rq_params(
pipe_src_param,
0);
- if (is_dual_plane((enum source_format_class) pipe_src_param.source_format)) {
+ if (is_dual_plane((enum source_format_class) pipe_src_param->source_format)) {
/* get param for chroma surface */
get_surf_rq_param(
mode_lib,
@@ -988,7 +988,7 @@ void dml1_rq_dlg_get_rq_params(
/* calculate how to split the det buffer space between luma and chroma */
handle_det_buf_split(mode_lib, rq_param, pipe_src_param);
- print__rq_params_st(mode_lib, *rq_param);
+ print__rq_params_st(mode_lib, rq_param);
}
/* Note: currently taken in as is.
@@ -998,26 +998,26 @@ void dml1_rq_dlg_get_dlg_params(
struct display_mode_lib *mode_lib,
struct _vcs_dpi_display_dlg_regs_st *disp_dlg_regs,
struct _vcs_dpi_display_ttu_regs_st *disp_ttu_regs,
- const struct _vcs_dpi_display_rq_dlg_params_st rq_dlg_param,
- const struct _vcs_dpi_display_dlg_sys_params_st dlg_sys_param,
- const struct _vcs_dpi_display_e2e_pipe_params_st e2e_pipe_param,
+ const struct _vcs_dpi_display_rq_dlg_params_st *rq_dlg_param,
+ const struct _vcs_dpi_display_dlg_sys_params_st *dlg_sys_param,
+ const struct _vcs_dpi_display_e2e_pipe_params_st *e2e_pipe_param,
const bool cstate_en,
const bool pstate_en,
const bool vm_en,
const bool iflip_en)
{
/* Timing */
- unsigned int htotal = e2e_pipe_param.pipe.dest.htotal;
- unsigned int hblank_end = e2e_pipe_param.pipe.dest.hblank_end;
- unsigned int vblank_start = e2e_pipe_param.pipe.dest.vblank_start;
- unsigned int vblank_end = e2e_pipe_param.pipe.dest.vblank_end;
- bool interlaced = e2e_pipe_param.pipe.dest.interlaced;
+ unsigned int htotal = e2e_pipe_param->pipe.dest.htotal;
+ unsigned int hblank_end = e2e_pipe_param->pipe.dest.hblank_end;
+ unsigned int vblank_start = e2e_pipe_param->pipe.dest.vblank_start;
+ unsigned int vblank_end = e2e_pipe_param->pipe.dest.vblank_end;
+ bool interlaced = e2e_pipe_param->pipe.dest.interlaced;
unsigned int min_vblank = mode_lib->ip.min_vblank_lines;
- double pclk_freq_in_mhz = e2e_pipe_param.pipe.dest.pixel_rate_mhz;
- double refclk_freq_in_mhz = e2e_pipe_param.clks_cfg.refclk_mhz;
- double dppclk_freq_in_mhz = e2e_pipe_param.clks_cfg.dppclk_mhz;
- double dispclk_freq_in_mhz = e2e_pipe_param.clks_cfg.dispclk_mhz;
+ double pclk_freq_in_mhz = e2e_pipe_param->pipe.dest.pixel_rate_mhz;
+ double refclk_freq_in_mhz = e2e_pipe_param->clks_cfg.refclk_mhz;
+ double dppclk_freq_in_mhz = e2e_pipe_param->clks_cfg.dppclk_mhz;
+ double dispclk_freq_in_mhz = e2e_pipe_param->clks_cfg.dispclk_mhz;
double ref_freq_to_pix_freq;
double prefetch_xy_calc_in_dcfclk;
@@ -1160,13 +1160,13 @@ void dml1_rq_dlg_get_dlg_params(
disp_dlg_regs->dlg_vblank_end = interlaced ? (vblank_end / 2) : vblank_end; /* 15 bits */
prefetch_xy_calc_in_dcfclk = 24.0; /* TODO: ip_param */
- min_dcfclk_mhz = dlg_sys_param.deepsleep_dcfclk_mhz;
+ min_dcfclk_mhz = dlg_sys_param->deepsleep_dcfclk_mhz;
t_calc_us = prefetch_xy_calc_in_dcfclk / min_dcfclk_mhz;
- min_ttu_vblank = dlg_sys_param.t_urg_wm_us;
+ min_ttu_vblank = dlg_sys_param->t_urg_wm_us;
if (cstate_en)
- min_ttu_vblank = dml_max(dlg_sys_param.t_sr_wm_us, min_ttu_vblank);
+ min_ttu_vblank = dml_max(dlg_sys_param->t_sr_wm_us, min_ttu_vblank);
if (pstate_en)
- min_ttu_vblank = dml_max(dlg_sys_param.t_mclk_wm_us, min_ttu_vblank);
+ min_ttu_vblank = dml_max(dlg_sys_param->t_mclk_wm_us, min_ttu_vblank);
min_ttu_vblank = min_ttu_vblank + t_calc_us;
min_dst_y_ttu_vblank = min_ttu_vblank * pclk_freq_in_mhz / (double) htotal;
@@ -1197,59 +1197,59 @@ void dml1_rq_dlg_get_dlg_params(
/* ------------------------- */
/* Prefetch Calc */
/* Source */
- dcc_en = e2e_pipe_param.pipe.src.dcc;
+ dcc_en = e2e_pipe_param->pipe.src.dcc;
dual_plane = is_dual_plane(
- (enum source_format_class) e2e_pipe_param.pipe.src.source_format);
+ (enum source_format_class) e2e_pipe_param->pipe.src.source_format);
mode_422 = 0; /* TODO */
- access_dir = (e2e_pipe_param.pipe.src.source_scan == dm_vert); /* vp access direction: horizontal or vertical accessed */
+ access_dir = (e2e_pipe_param->pipe.src.source_scan == dm_vert); /* vp access direction: horizontal or vertical accessed */
bytes_per_element_l = get_bytes_per_element(
- (enum source_format_class) e2e_pipe_param.pipe.src.source_format,
+ (enum source_format_class) e2e_pipe_param->pipe.src.source_format,
0);
bytes_per_element_c = get_bytes_per_element(
- (enum source_format_class) e2e_pipe_param.pipe.src.source_format,
+ (enum source_format_class) e2e_pipe_param->pipe.src.source_format,
1);
- vp_height_l = e2e_pipe_param.pipe.src.viewport_height;
- vp_width_l = e2e_pipe_param.pipe.src.viewport_width;
- vp_height_c = e2e_pipe_param.pipe.src.viewport_height_c;
- vp_width_c = e2e_pipe_param.pipe.src.viewport_width_c;
+ vp_height_l = e2e_pipe_param->pipe.src.viewport_height;
+ vp_width_l = e2e_pipe_param->pipe.src.viewport_width;
+ vp_height_c = e2e_pipe_param->pipe.src.viewport_height_c;
+ vp_width_c = e2e_pipe_param->pipe.src.viewport_width_c;
/* Scaling */
- htaps_l = e2e_pipe_param.pipe.scale_taps.htaps;
- htaps_c = e2e_pipe_param.pipe.scale_taps.htaps_c;
- hratios_l = e2e_pipe_param.pipe.scale_ratio_depth.hscl_ratio;
- hratios_c = e2e_pipe_param.pipe.scale_ratio_depth.hscl_ratio_c;
- vratio_l = e2e_pipe_param.pipe.scale_ratio_depth.vscl_ratio;
- vratio_c = e2e_pipe_param.pipe.scale_ratio_depth.vscl_ratio_c;
+ htaps_l = e2e_pipe_param->pipe.scale_taps.htaps;
+ htaps_c = e2e_pipe_param->pipe.scale_taps.htaps_c;
+ hratios_l = e2e_pipe_param->pipe.scale_ratio_depth.hscl_ratio;
+ hratios_c = e2e_pipe_param->pipe.scale_ratio_depth.hscl_ratio_c;
+ vratio_l = e2e_pipe_param->pipe.scale_ratio_depth.vscl_ratio;
+ vratio_c = e2e_pipe_param->pipe.scale_ratio_depth.vscl_ratio_c;
line_time_in_us = (htotal / pclk_freq_in_mhz);
- vinit_l = e2e_pipe_param.pipe.scale_ratio_depth.vinit;
- vinit_c = e2e_pipe_param.pipe.scale_ratio_depth.vinit_c;
- vinit_bot_l = e2e_pipe_param.pipe.scale_ratio_depth.vinit_bot;
- vinit_bot_c = e2e_pipe_param.pipe.scale_ratio_depth.vinit_bot_c;
-
- swath_height_l = rq_dlg_param.rq_l.swath_height;
- swath_width_ub_l = rq_dlg_param.rq_l.swath_width_ub;
- dpte_bytes_per_row_ub_l = rq_dlg_param.rq_l.dpte_bytes_per_row_ub;
- dpte_groups_per_row_ub_l = rq_dlg_param.rq_l.dpte_groups_per_row_ub;
- meta_pte_bytes_per_frame_ub_l = rq_dlg_param.rq_l.meta_pte_bytes_per_frame_ub;
- meta_bytes_per_row_ub_l = rq_dlg_param.rq_l.meta_bytes_per_row_ub;
-
- swath_height_c = rq_dlg_param.rq_c.swath_height;
- swath_width_ub_c = rq_dlg_param.rq_c.swath_width_ub;
- dpte_bytes_per_row_ub_c = rq_dlg_param.rq_c.dpte_bytes_per_row_ub;
- dpte_groups_per_row_ub_c = rq_dlg_param.rq_c.dpte_groups_per_row_ub;
-
- meta_chunks_per_row_ub_l = rq_dlg_param.rq_l.meta_chunks_per_row_ub;
- vupdate_offset = e2e_pipe_param.pipe.dest.vupdate_offset;
- vupdate_width = e2e_pipe_param.pipe.dest.vupdate_width;
- vready_offset = e2e_pipe_param.pipe.dest.vready_offset;
+ vinit_l = e2e_pipe_param->pipe.scale_ratio_depth.vinit;
+ vinit_c = e2e_pipe_param->pipe.scale_ratio_depth.vinit_c;
+ vinit_bot_l = e2e_pipe_param->pipe.scale_ratio_depth.vinit_bot;
+ vinit_bot_c = e2e_pipe_param->pipe.scale_ratio_depth.vinit_bot_c;
+
+ swath_height_l = rq_dlg_param->rq_l.swath_height;
+ swath_width_ub_l = rq_dlg_param->rq_l.swath_width_ub;
+ dpte_bytes_per_row_ub_l = rq_dlg_param->rq_l.dpte_bytes_per_row_ub;
+ dpte_groups_per_row_ub_l = rq_dlg_param->rq_l.dpte_groups_per_row_ub;
+ meta_pte_bytes_per_frame_ub_l = rq_dlg_param->rq_l.meta_pte_bytes_per_frame_ub;
+ meta_bytes_per_row_ub_l = rq_dlg_param->rq_l.meta_bytes_per_row_ub;
+
+ swath_height_c = rq_dlg_param->rq_c.swath_height;
+ swath_width_ub_c = rq_dlg_param->rq_c.swath_width_ub;
+ dpte_bytes_per_row_ub_c = rq_dlg_param->rq_c.dpte_bytes_per_row_ub;
+ dpte_groups_per_row_ub_c = rq_dlg_param->rq_c.dpte_groups_per_row_ub;
+
+ meta_chunks_per_row_ub_l = rq_dlg_param->rq_l.meta_chunks_per_row_ub;
+ vupdate_offset = e2e_pipe_param->pipe.dest.vupdate_offset;
+ vupdate_width = e2e_pipe_param->pipe.dest.vupdate_width;
+ vready_offset = e2e_pipe_param->pipe.dest.vready_offset;
dppclk_delay_subtotal = mode_lib->ip.dppclk_delay_subtotal;
dispclk_delay_subtotal = mode_lib->ip.dispclk_delay_subtotal;
pixel_rate_delay_subtotal = dppclk_delay_subtotal * pclk_freq_in_mhz / dppclk_freq_in_mhz
+ dispclk_delay_subtotal * pclk_freq_in_mhz / dispclk_freq_in_mhz;
- vstartup_start = e2e_pipe_param.pipe.dest.vstartup_start;
+ vstartup_start = e2e_pipe_param->pipe.dest.vstartup_start;
if (interlaced)
vstartup_start = vstartup_start / 2;
@@ -1276,13 +1276,13 @@ void dml1_rq_dlg_get_dlg_params(
dst_x_after_scaler = 0;
dst_y_after_scaler = 0;
- if (e2e_pipe_param.pipe.src.is_hsplit)
+ if (e2e_pipe_param->pipe.src.is_hsplit)
dst_x_after_scaler = pixel_rate_delay_subtotal
- + e2e_pipe_param.pipe.dest.recout_width;
+ + e2e_pipe_param->pipe.dest.recout_width;
else
dst_x_after_scaler = pixel_rate_delay_subtotal;
- if (e2e_pipe_param.dout.output_format == dm_420)
+ if (e2e_pipe_param->dout.output_format == dm_420)
dst_y_after_scaler = 1;
else
dst_y_after_scaler = 0;
@@ -1334,7 +1334,7 @@ void dml1_rq_dlg_get_dlg_params(
DTRACE(
"DLG: %s: t_srx_delay_us = %3.2f",
__func__,
- (double) dlg_sys_param.t_srx_delay_us);
+ (double) dlg_sys_param->t_srx_delay_us);
DTRACE("DLG: %s: line_time_in_us = %3.2f", __func__, (double) line_time_in_us);
DTRACE("DLG: %s: vupdate_offset = %d", __func__, vupdate_offset);
DTRACE("DLG: %s: vupdate_width = %d", __func__, vupdate_width);
@@ -1408,12 +1408,12 @@ void dml1_rq_dlg_get_dlg_params(
DTRACE("DLG: %s: dpte_row_bytes = %d", __func__, dpte_row_bytes);
prefetch_bw = (vm_bytes + 2 * dpte_row_bytes + 2 * meta_row_bytes + sw_bytes) / t_pre_us;
- flip_bw = ((vm_bytes + dpte_row_bytes + meta_row_bytes) * dlg_sys_param.total_flip_bw)
- / (double) dlg_sys_param.total_flip_bytes;
+ flip_bw = ((vm_bytes + dpte_row_bytes + meta_row_bytes) * dlg_sys_param->total_flip_bw)
+ / (double) dlg_sys_param->total_flip_bytes;
t_vm_us = line_time_in_us / 4.0;
if (vm_en && dcc_en) {
t_vm_us = dml_max(
- dlg_sys_param.t_extra_us,
+ dlg_sys_param->t_extra_us,
dml_max((double) vm_bytes / prefetch_bw, t_vm_us));
if (iflip_en && !dual_plane) {
@@ -1423,12 +1423,12 @@ void dml1_rq_dlg_get_dlg_params(
}
}
- t_r0_us = dml_max(dlg_sys_param.t_extra_us - t_vm_us, line_time_in_us - t_vm_us);
+ t_r0_us = dml_max(dlg_sys_param->t_extra_us - t_vm_us, line_time_in_us - t_vm_us);
if (vm_en || dcc_en) {
t_r0_us = dml_max(
(double) (dpte_row_bytes + meta_row_bytes) / prefetch_bw,
- dlg_sys_param.t_extra_us);
+ dlg_sys_param->t_extra_us);
t_r0_us = dml_max((double) (line_time_in_us - t_vm_us), t_r0_us);
if (iflip_en && !dual_plane) {
@@ -1550,15 +1550,15 @@ void dml1_rq_dlg_get_dlg_params(
disp_dlg_regs->refcyc_per_meta_chunk_vblank_l;/* dcc for 4:2:0 is not supported in dcn1.0. assigned to be the same as _l for now */
/* Active */
- req_per_swath_ub_l = rq_dlg_param.rq_l.req_per_swath_ub;
- req_per_swath_ub_c = rq_dlg_param.rq_c.req_per_swath_ub;
- meta_row_height_l = rq_dlg_param.rq_l.meta_row_height;
+ req_per_swath_ub_l = rq_dlg_param->rq_l.req_per_swath_ub;
+ req_per_swath_ub_c = rq_dlg_param->rq_c.req_per_swath_ub;
+ meta_row_height_l = rq_dlg_param->rq_l.meta_row_height;
swath_width_pixels_ub_l = 0;
swath_width_pixels_ub_c = 0;
scaler_rec_in_width_l = 0;
scaler_rec_in_width_c = 0;
- dpte_row_height_l = rq_dlg_param.rq_l.dpte_row_height;
- dpte_row_height_c = rq_dlg_param.rq_c.dpte_row_height;
+ dpte_row_height_l = rq_dlg_param->rq_l.dpte_row_height;
+ dpte_row_height_c = rq_dlg_param->rq_c.dpte_row_height;
disp_dlg_regs->dst_y_per_pte_row_nom_l = (unsigned int) ((double) dpte_row_height_l
/ (double) vratio_l * dml_pow(2, 2));
@@ -1650,14 +1650,14 @@ void dml1_rq_dlg_get_dlg_params(
refcyc_per_req_delivery_cur0 = 0.;
full_recout_width = 0;
- if (e2e_pipe_param.pipe.src.is_hsplit) {
- if (e2e_pipe_param.pipe.dest.full_recout_width == 0) {
+ if (e2e_pipe_param->pipe.src.is_hsplit) {
+ if (e2e_pipe_param->pipe.dest.full_recout_width == 0) {
DTRACE("DLG: %s: Warningfull_recout_width not set in hsplit mode", __func__);
- full_recout_width = e2e_pipe_param.pipe.dest.recout_width * 2; /* assume half split for dcn1 */
+ full_recout_width = e2e_pipe_param->pipe.dest.recout_width * 2; /* assume half split for dcn1 */
} else
- full_recout_width = e2e_pipe_param.pipe.dest.full_recout_width;
+ full_recout_width = e2e_pipe_param->pipe.dest.full_recout_width;
} else
- full_recout_width = e2e_pipe_param.pipe.dest.recout_width;
+ full_recout_width = e2e_pipe_param->pipe.dest.recout_width;
refcyc_per_line_delivery_pre_l = get_refcyc_per_delivery(
mode_lib,
@@ -1824,9 +1824,9 @@ void dml1_rq_dlg_get_dlg_params(
}
/* TTU - Cursor */
- hratios_cur0 = e2e_pipe_param.pipe.scale_ratio_depth.hscl_ratio;
- cur0_src_width = e2e_pipe_param.pipe.src.cur0_src_width; /* cursor source width */
- cur0_bpp = (enum cursor_bpp) e2e_pipe_param.pipe.src.cur0_bpp;
+ hratios_cur0 = e2e_pipe_param->pipe.scale_ratio_depth.hscl_ratio;
+ cur0_src_width = e2e_pipe_param->pipe.src.cur0_src_width; /* cursor source width */
+ cur0_bpp = (enum cursor_bpp) e2e_pipe_param->pipe.src.cur0_bpp;
cur0_req_size = 0;
cur0_req_width = 0;
cur0_width_ub = 0.0;
@@ -1927,6 +1927,6 @@ void dml1_rq_dlg_get_dlg_params(
disp_ttu_regs->min_ttu_vblank = min_ttu_vblank * refclk_freq_in_mhz;
ASSERT(disp_ttu_regs->min_ttu_vblank < dml_pow(2, 24));
- print__ttu_regs_st(mode_lib, *disp_ttu_regs);
- print__dlg_regs_st(mode_lib, *disp_dlg_regs);
+ print__ttu_regs_st(mode_lib, disp_ttu_regs);
+ print__dlg_regs_st(mode_lib, disp_dlg_regs);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.h b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.h
index 9c06913ad767..e19ee3bde45f 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.h
@@ -33,7 +33,7 @@ struct display_mode_lib;
void dml1_extract_rq_regs(
struct display_mode_lib *mode_lib,
struct _vcs_dpi_display_rq_regs_st *rq_regs,
- const struct _vcs_dpi_display_rq_params_st rq_param);
+ const struct _vcs_dpi_display_rq_params_st *rq_param);
/* Function: dml_rq_dlg_get_rq_params
* Calculate requestor related parameters that register definition agnostic
* (i.e. this layer does try to separate real values from register definition)
@@ -45,7 +45,7 @@ void dml1_extract_rq_regs(
void dml1_rq_dlg_get_rq_params(
struct display_mode_lib *mode_lib,
struct _vcs_dpi_display_rq_params_st *rq_param,
- const struct _vcs_dpi_display_pipe_source_params_st pipe_src_param);
+ const struct _vcs_dpi_display_pipe_source_params_st *pipe_src_param);
/* Function: dml_rq_dlg_get_dlg_params
@@ -55,9 +55,9 @@ void dml1_rq_dlg_get_dlg_params(
struct display_mode_lib *mode_lib,
struct _vcs_dpi_display_dlg_regs_st *dlg_regs,
struct _vcs_dpi_display_ttu_regs_st *ttu_regs,
- const struct _vcs_dpi_display_rq_dlg_params_st rq_dlg_param,
- const struct _vcs_dpi_display_dlg_sys_params_st dlg_sys_param,
- const struct _vcs_dpi_display_e2e_pipe_params_st e2e_pipe_param,
+ const struct _vcs_dpi_display_rq_dlg_params_st *rq_dlg_param,
+ const struct _vcs_dpi_display_dlg_sys_params_st *dlg_sys_param,
+ const struct _vcs_dpi_display_e2e_pipe_params_st *e2e_pipe_param,
const bool cstate_en,
const bool pstate_en,
const bool vm_en,
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/qp_tables.h b/drivers/gpu/drm/amd/display/dc/dml/dsc/qp_tables.h
index e5fac9f4181d..e5fac9f4181d 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/qp_tables.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dsc/qp_tables.h
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.c
new file mode 100644
index 000000000000..3ee858f311d1
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.c
@@ -0,0 +1,291 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "rc_calc_fpu.h"
+
+#include "qp_tables.h"
+#include "amdgpu_dm/dc_fpu.h"
+
+#define table_hash(mode, bpc, max_min) ((mode << 16) | (bpc << 8) | max_min)
+
+#define MODE_SELECT(val444, val422, val420) \
+ (cm == CM_444 || cm == CM_RGB) ? (val444) : (cm == CM_422 ? (val422) : (val420))
+
+
+#define TABLE_CASE(mode, bpc, max) case (table_hash(mode, BPC_##bpc, max)): \
+ table = qp_table_##mode##_##bpc##bpc_##max; \
+ table_size = sizeof(qp_table_##mode##_##bpc##bpc_##max)/sizeof(*qp_table_##mode##_##bpc##bpc_##max); \
+ break
+
+static int median3(int a, int b, int c)
+{
+ if (a > b)
+ swap(a, b);
+ if (b > c)
+ swap(b, c);
+ if (a > b)
+ swap(b, c);
+
+ return b;
+}
+
+static double dsc_roundf(double num)
+{
+ if (num < 0.0)
+ num = num - 0.5;
+ else
+ num = num + 0.5;
+
+ return (int)(num);
+}
+
+static double dsc_ceil(double num)
+{
+ double retval = (int)num;
+
+ if (retval != num && num > 0)
+ retval = num + 1;
+
+ return (int)retval;
+}
+
+static void get_qp_set(qp_set qps, enum colour_mode cm, enum bits_per_comp bpc,
+ enum max_min max_min, float bpp)
+{
+ int mode = MODE_SELECT(444, 422, 420);
+ int sel = table_hash(mode, bpc, max_min);
+ int table_size = 0;
+ int index;
+ const struct qp_entry *table = 0L;
+
+ // alias enum
+ enum { min = DAL_MM_MIN, max = DAL_MM_MAX };
+ switch (sel) {
+ TABLE_CASE(444, 8, max);
+ TABLE_CASE(444, 8, min);
+ TABLE_CASE(444, 10, max);
+ TABLE_CASE(444, 10, min);
+ TABLE_CASE(444, 12, max);
+ TABLE_CASE(444, 12, min);
+ TABLE_CASE(422, 8, max);
+ TABLE_CASE(422, 8, min);
+ TABLE_CASE(422, 10, max);
+ TABLE_CASE(422, 10, min);
+ TABLE_CASE(422, 12, max);
+ TABLE_CASE(422, 12, min);
+ TABLE_CASE(420, 8, max);
+ TABLE_CASE(420, 8, min);
+ TABLE_CASE(420, 10, max);
+ TABLE_CASE(420, 10, min);
+ TABLE_CASE(420, 12, max);
+ TABLE_CASE(420, 12, min);
+ }
+
+ if (table == 0)
+ return;
+
+ index = (bpp - table[0].bpp) * 2;
+
+ /* requested size is bigger than the table */
+ if (index >= table_size) {
+ dm_error("ERROR: Requested rc_calc to find a bpp entry that exceeds the table size\n");
+ return;
+ }
+
+ memcpy(qps, table[index].qps, sizeof(qp_set));
+}
+
+static void get_ofs_set(qp_set ofs, enum colour_mode mode, float bpp)
+{
+ int *p = ofs;
+
+ if (mode == CM_444 || mode == CM_RGB) {
+ *p++ = (bpp <= 6) ? (0) : ((((bpp >= 8) && (bpp <= 12))) ? (2) : ((bpp >= 15) ? (10) : ((((bpp > 6) && (bpp < 8))) ? (0 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (2 + dsc_roundf((bpp - 12) * (8 / 3.0))))));
+ *p++ = (bpp <= 6) ? (-2) : ((((bpp >= 8) && (bpp <= 12))) ? (0) : ((bpp >= 15) ? (8) : ((((bpp > 6) && (bpp < 8))) ? (-2 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (0 + dsc_roundf((bpp - 12) * (8 / 3.0))))));
+ *p++ = (bpp <= 6) ? (-2) : ((((bpp >= 8) && (bpp <= 12))) ? (0) : ((bpp >= 15) ? (6) : ((((bpp > 6) && (bpp < 8))) ? (-2 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (0 + dsc_roundf((bpp - 12) * (6 / 3.0))))));
+ *p++ = (bpp <= 6) ? (-4) : ((((bpp >= 8) && (bpp <= 12))) ? (-2) : ((bpp >= 15) ? (4) : ((((bpp > 6) && (bpp < 8))) ? (-4 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (-2 + dsc_roundf((bpp - 12) * (6 / 3.0))))));
+ *p++ = (bpp <= 6) ? (-6) : ((((bpp >= 8) && (bpp <= 12))) ? (-4) : ((bpp >= 15) ? (2) : ((((bpp > 6) && (bpp < 8))) ? (-6 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (-4 + dsc_roundf((bpp - 12) * (6 / 3.0))))));
+ *p++ = (bpp <= 12) ? (-6) : ((bpp >= 15) ? (0) : (-6 + dsc_roundf((bpp - 12) * (6 / 3.0))));
+ *p++ = (bpp <= 12) ? (-8) : ((bpp >= 15) ? (-2) : (-8 + dsc_roundf((bpp - 12) * (6 / 3.0))));
+ *p++ = (bpp <= 12) ? (-8) : ((bpp >= 15) ? (-4) : (-8 + dsc_roundf((bpp - 12) * (4 / 3.0))));
+ *p++ = (bpp <= 12) ? (-8) : ((bpp >= 15) ? (-6) : (-8 + dsc_roundf((bpp - 12) * (2 / 3.0))));
+ *p++ = (bpp <= 12) ? (-10) : ((bpp >= 15) ? (-8) : (-10 + dsc_roundf((bpp - 12) * (2 / 3.0))));
+ *p++ = -10;
+ *p++ = (bpp <= 6) ? (-12) : ((bpp >= 8) ? (-10) : (-12 + dsc_roundf((bpp - 6) * (2 / 2.0))));
+ *p++ = -12;
+ *p++ = -12;
+ *p++ = -12;
+ } else if (mode == CM_422) {
+ *p++ = (bpp <= 8) ? (2) : ((bpp >= 10) ? (10) : (2 + dsc_roundf((bpp - 8) * (8 / 2.0))));
+ *p++ = (bpp <= 8) ? (0) : ((bpp >= 10) ? (8) : (0 + dsc_roundf((bpp - 8) * (8 / 2.0))));
+ *p++ = (bpp <= 8) ? (0) : ((bpp >= 10) ? (6) : (0 + dsc_roundf((bpp - 8) * (6 / 2.0))));
+ *p++ = (bpp <= 8) ? (-2) : ((bpp >= 10) ? (4) : (-2 + dsc_roundf((bpp - 8) * (6 / 2.0))));
+ *p++ = (bpp <= 8) ? (-4) : ((bpp >= 10) ? (2) : (-4 + dsc_roundf((bpp - 8) * (6 / 2.0))));
+ *p++ = (bpp <= 8) ? (-6) : ((bpp >= 10) ? (0) : (-6 + dsc_roundf((bpp - 8) * (6 / 2.0))));
+ *p++ = (bpp <= 8) ? (-8) : ((bpp >= 10) ? (-2) : (-8 + dsc_roundf((bpp - 8) * (6 / 2.0))));
+ *p++ = (bpp <= 8) ? (-8) : ((bpp >= 10) ? (-4) : (-8 + dsc_roundf((bpp - 8) * (4 / 2.0))));
+ *p++ = (bpp <= 8) ? (-8) : ((bpp >= 10) ? (-6) : (-8 + dsc_roundf((bpp - 8) * (2 / 2.0))));
+ *p++ = (bpp <= 8) ? (-10) : ((bpp >= 10) ? (-8) : (-10 + dsc_roundf((bpp - 8) * (2 / 2.0))));
+ *p++ = -10;
+ *p++ = (bpp <= 6) ? (-12) : ((bpp >= 7) ? (-10) : (-12 + dsc_roundf((bpp - 6) * (2.0 / 1))));
+ *p++ = -12;
+ *p++ = -12;
+ *p++ = -12;
+ } else {
+ *p++ = (bpp <= 6) ? (2) : ((bpp >= 8) ? (10) : (2 + dsc_roundf((bpp - 6) * (8 / 2.0))));
+ *p++ = (bpp <= 6) ? (0) : ((bpp >= 8) ? (8) : (0 + dsc_roundf((bpp - 6) * (8 / 2.0))));
+ *p++ = (bpp <= 6) ? (0) : ((bpp >= 8) ? (6) : (0 + dsc_roundf((bpp - 6) * (6 / 2.0))));
+ *p++ = (bpp <= 6) ? (-2) : ((bpp >= 8) ? (4) : (-2 + dsc_roundf((bpp - 6) * (6 / 2.0))));
+ *p++ = (bpp <= 6) ? (-4) : ((bpp >= 8) ? (2) : (-4 + dsc_roundf((bpp - 6) * (6 / 2.0))));
+ *p++ = (bpp <= 6) ? (-6) : ((bpp >= 8) ? (0) : (-6 + dsc_roundf((bpp - 6) * (6 / 2.0))));
+ *p++ = (bpp <= 6) ? (-8) : ((bpp >= 8) ? (-2) : (-8 + dsc_roundf((bpp - 6) * (6 / 2.0))));
+ *p++ = (bpp <= 6) ? (-8) : ((bpp >= 8) ? (-4) : (-8 + dsc_roundf((bpp - 6) * (4 / 2.0))));
+ *p++ = (bpp <= 6) ? (-8) : ((bpp >= 8) ? (-6) : (-8 + dsc_roundf((bpp - 6) * (2 / 2.0))));
+ *p++ = (bpp <= 6) ? (-10) : ((bpp >= 8) ? (-8) : (-10 + dsc_roundf((bpp - 6) * (2 / 2.0))));
+ *p++ = -10;
+ *p++ = (bpp <= 4) ? (-12) : ((bpp >= 5) ? (-10) : (-12 + dsc_roundf((bpp - 4) * (2 / 1.0))));
+ *p++ = -12;
+ *p++ = -12;
+ *p++ = -12;
+ }
+}
+
+void _do_calc_rc_params(struct rc_params *rc,
+ enum colour_mode cm,
+ enum bits_per_comp bpc,
+ u16 drm_bpp,
+ bool is_navite_422_or_420,
+ int slice_width,
+ int slice_height,
+ int minor_version)
+{
+ float bpp;
+ float bpp_group;
+ float initial_xmit_delay_factor;
+ int padding_pixels;
+ int i;
+
+ dc_assert_fp_enabled();
+
+ bpp = ((float)drm_bpp / 16.0);
+ /* in native_422 or native_420 modes, the bits_per_pixel is double the
+ * target bpp (the latter is what calc_rc_params expects)
+ */
+ if (is_navite_422_or_420)
+ bpp /= 2.0;
+
+ rc->rc_quant_incr_limit0 = ((bpc == BPC_8) ? 11 : (bpc == BPC_10 ? 15 : 19)) - ((minor_version == 1 && cm == CM_444) ? 1 : 0);
+ rc->rc_quant_incr_limit1 = ((bpc == BPC_8) ? 11 : (bpc == BPC_10 ? 15 : 19)) - ((minor_version == 1 && cm == CM_444) ? 1 : 0);
+
+ bpp_group = MODE_SELECT(bpp, bpp * 2.0, bpp * 2.0);
+
+ switch (cm) {
+ case CM_420:
+ rc->initial_fullness_offset = (bpp >= 6) ? (2048) : ((bpp <= 4) ? (6144) : ((((bpp > 4) && (bpp <= 5))) ? (6144 - dsc_roundf((bpp - 4) * (512))) : (5632 - dsc_roundf((bpp - 5) * (3584)))));
+ rc->first_line_bpg_offset = median3(0, (12 + (int) (0.09 * min(34, slice_height - 8))), (int)((3 * bpc * 3) - (3 * bpp_group)));
+ rc->second_line_bpg_offset = median3(0, 12, (int)((3 * bpc * 3) - (3 * bpp_group)));
+ break;
+ case CM_422:
+ rc->initial_fullness_offset = (bpp >= 8) ? (2048) : ((bpp <= 7) ? (5632) : (5632 - dsc_roundf((bpp - 7) * (3584))));
+ rc->first_line_bpg_offset = median3(0, (12 + (int) (0.09 * min(34, slice_height - 8))), (int)((3 * bpc * 4) - (3 * bpp_group)));
+ rc->second_line_bpg_offset = 0;
+ break;
+ case CM_444:
+ case CM_RGB:
+ rc->initial_fullness_offset = (bpp >= 12) ? (2048) : ((bpp <= 8) ? (6144) : ((((bpp > 8) && (bpp <= 10))) ? (6144 - dsc_roundf((bpp - 8) * (512 / 2))) : (5632 - dsc_roundf((bpp - 10) * (3584 / 2)))));
+ rc->first_line_bpg_offset = median3(0, (12 + (int) (0.09 * min(34, slice_height - 8))), (int)(((3 * bpc + (cm == CM_444 ? 0 : 2)) * 3) - (3 * bpp_group)));
+ rc->second_line_bpg_offset = 0;
+ break;
+ }
+
+ initial_xmit_delay_factor = (cm == CM_444 || cm == CM_RGB) ? 1.0 : 2.0;
+ rc->initial_xmit_delay = dsc_roundf(8192.0/2.0/bpp/initial_xmit_delay_factor);
+
+ if (cm == CM_422 || cm == CM_420)
+ slice_width /= 2;
+
+ padding_pixels = ((slice_width % 3) != 0) ? (3 - (slice_width % 3)) * (rc->initial_xmit_delay / slice_width) : 0;
+ if (3 * bpp_group >= (((rc->initial_xmit_delay + 2) / 3) * (3 + (cm == CM_422)))) {
+ if ((rc->initial_xmit_delay + padding_pixels) % 3 == 1)
+ rc->initial_xmit_delay++;
+ }
+
+ rc->flatness_min_qp = ((bpc == BPC_8) ? (3) : ((bpc == BPC_10) ? (7) : (11))) - ((minor_version == 1 && cm == CM_444) ? 1 : 0);
+ rc->flatness_max_qp = ((bpc == BPC_8) ? (12) : ((bpc == BPC_10) ? (16) : (20))) - ((minor_version == 1 && cm == CM_444) ? 1 : 0);
+ rc->flatness_det_thresh = 2 << (bpc - 8);
+
+ get_qp_set(rc->qp_min, cm, bpc, DAL_MM_MIN, bpp);
+ get_qp_set(rc->qp_max, cm, bpc, DAL_MM_MAX, bpp);
+ if (cm == CM_444 && minor_version == 1) {
+ for (i = 0; i < QP_SET_SIZE; ++i) {
+ rc->qp_min[i] = rc->qp_min[i] > 0 ? rc->qp_min[i] - 1 : 0;
+ rc->qp_max[i] = rc->qp_max[i] > 0 ? rc->qp_max[i] - 1 : 0;
+ }
+ }
+ get_ofs_set(rc->ofs, cm, bpp);
+
+ /* fixed parameters */
+ rc->rc_model_size = 8192;
+ rc->rc_edge_factor = 6;
+ rc->rc_tgt_offset_hi = 3;
+ rc->rc_tgt_offset_lo = 3;
+
+ rc->rc_buf_thresh[0] = 896;
+ rc->rc_buf_thresh[1] = 1792;
+ rc->rc_buf_thresh[2] = 2688;
+ rc->rc_buf_thresh[3] = 3584;
+ rc->rc_buf_thresh[4] = 4480;
+ rc->rc_buf_thresh[5] = 5376;
+ rc->rc_buf_thresh[6] = 6272;
+ rc->rc_buf_thresh[7] = 6720;
+ rc->rc_buf_thresh[8] = 7168;
+ rc->rc_buf_thresh[9] = 7616;
+ rc->rc_buf_thresh[10] = 7744;
+ rc->rc_buf_thresh[11] = 7872;
+ rc->rc_buf_thresh[12] = 8000;
+ rc->rc_buf_thresh[13] = 8064;
+}
+
+u32 _do_bytes_per_pixel_calc(int slice_width,
+ u16 drm_bpp,
+ bool is_navite_422_or_420)
+{
+ float bpp;
+ u32 bytes_per_pixel;
+ double d_bytes_per_pixel;
+
+ dc_assert_fp_enabled();
+
+ bpp = ((float)drm_bpp / 16.0);
+ d_bytes_per_pixel = dsc_ceil(bpp * slice_width / 8.0) / slice_width;
+ // TODO: Make sure the formula for calculating this is precise (ceiling
+ // vs. floor, and at what point they should be applied)
+ if (is_navite_422_or_420)
+ d_bytes_per_pixel /= 2;
+
+ bytes_per_pixel = (u32)dsc_ceil(d_bytes_per_pixel * 0x10000000);
+
+ return bytes_per_pixel;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.h
new file mode 100644
index 000000000000..b93b95409fbe
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml/dsc/rc_calc_fpu.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __RC_CALC_FPU_H__
+#define __RC_CALC_FPU_H__
+
+#include "os_types.h"
+#include <drm/drm_dsc.h>
+
+#define QP_SET_SIZE 15
+
+typedef int qp_set[QP_SET_SIZE];
+
+struct rc_params {
+ int rc_quant_incr_limit0;
+ int rc_quant_incr_limit1;
+ int initial_fullness_offset;
+ int initial_xmit_delay;
+ int first_line_bpg_offset;
+ int second_line_bpg_offset;
+ int flatness_min_qp;
+ int flatness_max_qp;
+ int flatness_det_thresh;
+ qp_set qp_min;
+ qp_set qp_max;
+ qp_set ofs;
+ int rc_model_size;
+ int rc_edge_factor;
+ int rc_tgt_offset_hi;
+ int rc_tgt_offset_lo;
+ int rc_buf_thresh[QP_SET_SIZE - 1];
+};
+
+enum colour_mode {
+ CM_RGB, /* 444 RGB */
+ CM_444, /* 444 YUV or simple 422 */
+ CM_422, /* native 422 */
+ CM_420 /* native 420 */
+};
+
+enum bits_per_comp {
+ BPC_8 = 8,
+ BPC_10 = 10,
+ BPC_12 = 12
+};
+
+enum max_min {
+ DAL_MM_MIN = 0,
+ DAL_MM_MAX = 1
+};
+
+struct qp_entry {
+ float bpp;
+ const qp_set qps;
+};
+
+typedef struct qp_entry qp_table[];
+
+u32 _do_bytes_per_pixel_calc(int slice_width,
+ u16 drm_bpp,
+ bool is_navite_422_or_420);
+
+void _do_calc_rc_params(struct rc_params *rc,
+ enum colour_mode cm,
+ enum bits_per_comp bpc,
+ u16 drm_bpp,
+ bool is_navite_422_or_420,
+ int slice_width,
+ int slice_height,
+ int minor_version);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/Makefile b/drivers/gpu/drm/amd/display/dc/dsc/Makefile
index 8d31eb75c6a6..a2537229ee88 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dsc/Makefile
@@ -1,35 +1,6 @@
# SPDX-License-Identifier: MIT
#
# Makefile for the 'dsc' sub-component of DAL.
-
-ifdef CONFIG_X86
-dsc_ccflags := -mhard-float -msse
-endif
-
-ifdef CONFIG_PPC64
-dsc_ccflags := -mhard-float -maltivec
-endif
-
-ifdef CONFIG_CC_IS_GCC
-ifeq ($(call cc-ifversion, -lt, 0701, y), y)
-IS_OLD_GCC = 1
-endif
-endif
-
-ifdef CONFIG_X86
-ifdef IS_OLD_GCC
-# Stack alignment mismatch, proceed with caution.
-# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3
-# (8B stack alignment).
-dsc_ccflags += -mpreferred-stack-boundary=4
-else
-dsc_ccflags += -msse2
-endif
-endif
-
-CFLAGS_$(AMDDALPATH)/dc/dsc/rc_calc.o := $(dsc_ccflags)
-CFLAGS_REMOVE_$(AMDDALPATH)/dc/dsc/rc_calc.o := $(dsc_rcflags)
-
DSC = dc_dsc.o rc_calc.o rc_calc_dpi.o
AMD_DAL_DSC = $(addprefix $(AMDDALPATH)/dc/dsc/,$(DSC))
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
index f5b7da0e64c0..0321b4446e05 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c
@@ -40,8 +40,15 @@ static bool dsc_policy_enable_dsc_when_not_needed;
static bool dsc_policy_disable_dsc_stream_overhead;
+#ifndef MAX
+#define MAX(X, Y) ((X) > (Y) ? (X) : (Y))
+#endif
+#ifndef MIN
+#define MIN(X, Y) ((X) < (Y) ? (X) : (Y))
+#endif
+
/* Forward Declerations */
-static void get_dsc_bandwidth_range(
+static bool decide_dsc_bandwidth_range(
const uint32_t min_bpp_x16,
const uint32_t max_bpp_x16,
const uint32_t num_slices_h,
@@ -76,11 +83,6 @@ static bool setup_dsc_config(
int max_dsc_target_bpp_limit_override_x16,
struct dc_dsc_config *dsc_cfg);
-static struct fixed31_32 compute_dsc_max_bandwidth_overhead(
- const struct dc_crtc_timing *timing,
- const int num_slices_h,
- const bool is_dp);
-
static bool dsc_buff_block_size_from_dpcd(int dpcd_buff_block_size, int *buff_block_size)
{
@@ -361,7 +363,7 @@ bool dc_dsc_compute_bandwidth_range(
dsc_min_slice_height_override, max_bpp_x16, &config);
if (is_dsc_possible)
- get_dsc_bandwidth_range(min_bpp_x16, max_bpp_x16,
+ is_dsc_possible = decide_dsc_bandwidth_range(min_bpp_x16, max_bpp_x16,
config.num_slices_h, &dsc_common_caps, timing, range);
return is_dsc_possible;
@@ -462,32 +464,6 @@ static inline uint32_t dsc_div_by_10_round_up(uint32_t value)
return (value + 9) / 10;
}
-static struct fixed31_32 compute_dsc_max_bandwidth_overhead(
- const struct dc_crtc_timing *timing,
- const int num_slices_h,
- const bool is_dp)
-{
- struct fixed31_32 max_dsc_overhead;
- struct fixed31_32 refresh_rate;
-
- if (dsc_policy_disable_dsc_stream_overhead || !is_dp)
- return dc_fixpt_from_int(0);
-
- /* use target bpp that can take entire target bandwidth */
- refresh_rate = dc_fixpt_from_int(timing->pix_clk_100hz);
- refresh_rate = dc_fixpt_div_int(refresh_rate, timing->h_total);
- refresh_rate = dc_fixpt_div_int(refresh_rate, timing->v_total);
- refresh_rate = dc_fixpt_mul_int(refresh_rate, 100);
-
- max_dsc_overhead = dc_fixpt_from_int(num_slices_h);
- max_dsc_overhead = dc_fixpt_mul_int(max_dsc_overhead, timing->v_total);
- max_dsc_overhead = dc_fixpt_mul_int(max_dsc_overhead, 256);
- max_dsc_overhead = dc_fixpt_div_int(max_dsc_overhead, 1000);
- max_dsc_overhead = dc_fixpt_mul(max_dsc_overhead, refresh_rate);
-
- return max_dsc_overhead;
-}
-
static uint32_t compute_bpp_x16_from_target_bandwidth(
const uint32_t bandwidth_in_kbps,
const struct dc_crtc_timing *timing,
@@ -495,14 +471,14 @@ static uint32_t compute_bpp_x16_from_target_bandwidth(
const uint32_t bpp_increment_div,
const bool is_dp)
{
- struct fixed31_32 overhead_in_kbps;
+ uint32_t overhead_in_kbps;
struct fixed31_32 effective_bandwidth_in_kbps;
struct fixed31_32 bpp_x16;
- overhead_in_kbps = compute_dsc_max_bandwidth_overhead(
+ overhead_in_kbps = dc_dsc_stream_bandwidth_overhead_in_kbps(
timing, num_slices_h, is_dp);
effective_bandwidth_in_kbps = dc_fixpt_from_int(bandwidth_in_kbps);
- effective_bandwidth_in_kbps = dc_fixpt_sub(effective_bandwidth_in_kbps,
+ effective_bandwidth_in_kbps = dc_fixpt_sub_int(effective_bandwidth_in_kbps,
overhead_in_kbps);
bpp_x16 = dc_fixpt_mul_int(effective_bandwidth_in_kbps, 10);
bpp_x16 = dc_fixpt_div_int(bpp_x16, timing->pix_clk_100hz);
@@ -512,10 +488,12 @@ static uint32_t compute_bpp_x16_from_target_bandwidth(
return dc_fixpt_floor(bpp_x16);
}
-/* Get DSC bandwidth range based on [min_bpp, max_bpp] target bitrate range, and timing's pixel clock
- * and uncompressed bandwidth.
+/* Decide DSC bandwidth range based on signal, timing, specs specific and input min and max
+ * requirements.
+ * The range output includes decided min/max target bpp, the respective bandwidth requirements
+ * and native timing bandwidth requirement when DSC is not used.
*/
-static void get_dsc_bandwidth_range(
+static bool decide_dsc_bandwidth_range(
const uint32_t min_bpp_x16,
const uint32_t max_bpp_x16,
const uint32_t num_slices_h,
@@ -523,39 +501,45 @@ static void get_dsc_bandwidth_range(
const struct dc_crtc_timing *timing,
struct dc_dsc_bw_range *range)
{
- /* native stream bandwidth */
- range->stream_kbps = dc_bandwidth_in_kbps_from_timing(timing);
-
- /* max dsc target bpp */
- range->max_kbps = dc_dsc_stream_bandwidth_in_kbps(timing,
- max_bpp_x16, num_slices_h, dsc_caps->is_dp);
- range->max_target_bpp_x16 = max_bpp_x16;
- if (range->max_kbps > range->stream_kbps) {
- /* max dsc target bpp is capped to native bandwidth */
- range->max_kbps = range->stream_kbps;
- range->max_target_bpp_x16 = compute_bpp_x16_from_target_bandwidth(
- range->max_kbps, timing, num_slices_h,
- dsc_caps->bpp_increment_div,
- dsc_caps->is_dp);
+ uint32_t preferred_bpp_x16 = timing->dsc_fixed_bits_per_pixel_x16;
+
+ memset(range, 0, sizeof(*range));
+
+ /* apply signal, timing, specs and explicitly specified DSC range requirements */
+ if (preferred_bpp_x16) {
+ if (preferred_bpp_x16 <= max_bpp_x16 &&
+ preferred_bpp_x16 >= min_bpp_x16) {
+ range->max_target_bpp_x16 = preferred_bpp_x16;
+ range->min_target_bpp_x16 = preferred_bpp_x16;
+ }
}
+ else {
+ range->max_target_bpp_x16 = max_bpp_x16;
+ range->min_target_bpp_x16 = min_bpp_x16;
+ }
+
+ /* populate output structure */
+ if (range->max_target_bpp_x16 >= range->min_target_bpp_x16 && range->min_target_bpp_x16 > 0) {
+ /* native stream bandwidth */
+ range->stream_kbps = dc_bandwidth_in_kbps_from_timing(timing);
+
+ /* max dsc target bpp */
+ range->max_kbps = dc_dsc_stream_bandwidth_in_kbps(timing,
+ range->max_target_bpp_x16, num_slices_h, dsc_caps->is_dp);
- /* min dsc target bpp */
- range->min_kbps = dc_dsc_stream_bandwidth_in_kbps(timing,
- min_bpp_x16, num_slices_h, dsc_caps->is_dp);
- range->min_target_bpp_x16 = min_bpp_x16;
- if (range->min_kbps > range->max_kbps) {
- /* min dsc target bpp is capped to max dsc bandwidth*/
- range->min_kbps = range->max_kbps;
- range->min_target_bpp_x16 = range->max_target_bpp_x16;
+ /* min dsc target bpp */
+ range->min_kbps = dc_dsc_stream_bandwidth_in_kbps(timing,
+ range->min_target_bpp_x16, num_slices_h, dsc_caps->is_dp);
}
+
+ return range->max_kbps >= range->min_kbps && range->min_kbps > 0;
}
/* Decides if DSC should be used and calculates target bpp if it should, applying DSC policy.
*
* Returns:
- * - 'true' if DSC was required by policy and was successfully applied
- * - 'false' if DSC was not necessary (e.g. if uncompressed stream fits 'target_bandwidth_kbps'),
- * or if it couldn't be applied based on DSC policy.
+ * - 'true' if target bpp is decided
+ * - 'false' if target bpp cannot be decided (e.g. cannot fit even with min DSC bpp),
*/
static bool decide_dsc_target_bpp_x16(
const struct dc_dsc_policy *policy,
@@ -565,40 +549,29 @@ static bool decide_dsc_target_bpp_x16(
const int num_slices_h,
int *target_bpp_x16)
{
- bool should_use_dsc = false;
struct dc_dsc_bw_range range;
- memset(&range, 0, sizeof(range));
-
- get_dsc_bandwidth_range(policy->min_target_bpp * 16, policy->max_target_bpp * 16,
- num_slices_h, dsc_common_caps, timing, &range);
- if (!policy->enable_dsc_when_not_needed && target_bandwidth_kbps >= range.stream_kbps) {
- /* enough bandwidth without dsc */
- *target_bpp_x16 = 0;
- should_use_dsc = false;
- } else if (policy->preferred_bpp_x16 > 0 &&
- policy->preferred_bpp_x16 <= range.max_target_bpp_x16 &&
- policy->preferred_bpp_x16 >= range.min_target_bpp_x16) {
- *target_bpp_x16 = policy->preferred_bpp_x16;
- should_use_dsc = true;
- } else if (target_bandwidth_kbps >= range.max_kbps) {
- /* use max target bpp allowed */
- *target_bpp_x16 = range.max_target_bpp_x16;
- should_use_dsc = true;
- } else if (target_bandwidth_kbps >= range.min_kbps) {
- /* use target bpp that can take entire target bandwidth */
- *target_bpp_x16 = compute_bpp_x16_from_target_bandwidth(
- target_bandwidth_kbps, timing, num_slices_h,
- dsc_common_caps->bpp_increment_div,
- dsc_common_caps->is_dp);
- should_use_dsc = true;
- } else {
- /* not enough bandwidth to fulfill minimum requirement */
- *target_bpp_x16 = 0;
- should_use_dsc = false;
+ *target_bpp_x16 = 0;
+
+ if (decide_dsc_bandwidth_range(policy->min_target_bpp * 16, policy->max_target_bpp * 16,
+ num_slices_h, dsc_common_caps, timing, &range)) {
+ if (target_bandwidth_kbps >= range.stream_kbps) {
+ if (policy->enable_dsc_when_not_needed)
+ /* enable max bpp even dsc is not needed */
+ *target_bpp_x16 = range.max_target_bpp_x16;
+ } else if (target_bandwidth_kbps >= range.max_kbps) {
+ /* use max target bpp allowed */
+ *target_bpp_x16 = range.max_target_bpp_x16;
+ } else if (target_bandwidth_kbps >= range.min_kbps) {
+ /* use target bpp that can take entire target bandwidth */
+ *target_bpp_x16 = compute_bpp_x16_from_target_bandwidth(
+ target_bandwidth_kbps, timing, num_slices_h,
+ dsc_common_caps->bpp_increment_div,
+ dsc_common_caps->is_dp);
+ }
}
- return should_use_dsc;
+ return *target_bpp_x16 != 0;
}
#define MIN_AVAILABLE_SLICES_SIZE 4
@@ -994,19 +967,45 @@ bool dc_dsc_compute_config(
uint32_t dc_dsc_stream_bandwidth_in_kbps(const struct dc_crtc_timing *timing,
uint32_t bpp_x16, uint32_t num_slices_h, bool is_dp)
{
- struct fixed31_32 overhead_in_kbps;
+ uint32_t overhead_in_kbps;
struct fixed31_32 bpp;
struct fixed31_32 actual_bandwidth_in_kbps;
- overhead_in_kbps = compute_dsc_max_bandwidth_overhead(
+ overhead_in_kbps = dc_dsc_stream_bandwidth_overhead_in_kbps(
timing, num_slices_h, is_dp);
bpp = dc_fixpt_from_fraction(bpp_x16, 16);
actual_bandwidth_in_kbps = dc_fixpt_from_fraction(timing->pix_clk_100hz, 10);
actual_bandwidth_in_kbps = dc_fixpt_mul(actual_bandwidth_in_kbps, bpp);
- actual_bandwidth_in_kbps = dc_fixpt_add(actual_bandwidth_in_kbps, overhead_in_kbps);
+ actual_bandwidth_in_kbps = dc_fixpt_add_int(actual_bandwidth_in_kbps, overhead_in_kbps);
return dc_fixpt_ceil(actual_bandwidth_in_kbps);
}
+uint32_t dc_dsc_stream_bandwidth_overhead_in_kbps(
+ const struct dc_crtc_timing *timing,
+ const int num_slices_h,
+ const bool is_dp)
+{
+ struct fixed31_32 max_dsc_overhead;
+ struct fixed31_32 refresh_rate;
+
+ if (dsc_policy_disable_dsc_stream_overhead || !is_dp)
+ return 0;
+
+ /* use target bpp that can take entire target bandwidth */
+ refresh_rate = dc_fixpt_from_int(timing->pix_clk_100hz);
+ refresh_rate = dc_fixpt_div_int(refresh_rate, timing->h_total);
+ refresh_rate = dc_fixpt_div_int(refresh_rate, timing->v_total);
+ refresh_rate = dc_fixpt_mul_int(refresh_rate, 100);
+
+ max_dsc_overhead = dc_fixpt_from_int(num_slices_h);
+ max_dsc_overhead = dc_fixpt_mul_int(max_dsc_overhead, timing->v_total);
+ max_dsc_overhead = dc_fixpt_mul_int(max_dsc_overhead, 256);
+ max_dsc_overhead = dc_fixpt_div_int(max_dsc_overhead, 1000);
+ max_dsc_overhead = dc_fixpt_mul(max_dsc_overhead, refresh_rate);
+
+ return dc_fixpt_ceil(max_dsc_overhead);
+}
+
void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing,
uint32_t max_target_bpp_limit_override_x16,
struct dc_dsc_policy *policy)
@@ -1064,8 +1063,6 @@ void dc_dsc_get_policy_for_timing(const struct dc_crtc_timing *timing,
return;
}
- policy->preferred_bpp_x16 = timing->dsc_fixed_bits_per_pixel_x16;
-
/* internal upper limit, default 16 bpp */
if (policy->max_target_bpp > dsc_policy_max_target_bpp_limit)
policy->max_target_bpp = dsc_policy_max_target_bpp_limit;
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c
index 7b294f637881..b19d3aeb5962 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c
@@ -23,266 +23,7 @@
* Authors: AMD
*
*/
-#include <drm/drm_dsc.h>
-
-#include "os_types.h"
#include "rc_calc.h"
-#include "qp_tables.h"
-
-#define table_hash(mode, bpc, max_min) ((mode << 16) | (bpc << 8) | max_min)
-
-#define MODE_SELECT(val444, val422, val420) \
- (cm == CM_444 || cm == CM_RGB) ? (val444) : (cm == CM_422 ? (val422) : (val420))
-
-
-#define TABLE_CASE(mode, bpc, max) case (table_hash(mode, BPC_##bpc, max)): \
- table = qp_table_##mode##_##bpc##bpc_##max; \
- table_size = sizeof(qp_table_##mode##_##bpc##bpc_##max)/sizeof(*qp_table_##mode##_##bpc##bpc_##max); \
- break
-
-
-static void get_qp_set(qp_set qps, enum colour_mode cm, enum bits_per_comp bpc,
- enum max_min max_min, float bpp)
-{
- int mode = MODE_SELECT(444, 422, 420);
- int sel = table_hash(mode, bpc, max_min);
- int table_size = 0;
- int index;
- const struct qp_entry *table = 0L;
-
- // alias enum
- enum { min = DAL_MM_MIN, max = DAL_MM_MAX };
- switch (sel) {
- TABLE_CASE(444, 8, max);
- TABLE_CASE(444, 8, min);
- TABLE_CASE(444, 10, max);
- TABLE_CASE(444, 10, min);
- TABLE_CASE(444, 12, max);
- TABLE_CASE(444, 12, min);
- TABLE_CASE(422, 8, max);
- TABLE_CASE(422, 8, min);
- TABLE_CASE(422, 10, max);
- TABLE_CASE(422, 10, min);
- TABLE_CASE(422, 12, max);
- TABLE_CASE(422, 12, min);
- TABLE_CASE(420, 8, max);
- TABLE_CASE(420, 8, min);
- TABLE_CASE(420, 10, max);
- TABLE_CASE(420, 10, min);
- TABLE_CASE(420, 12, max);
- TABLE_CASE(420, 12, min);
- }
-
- if (table == 0)
- return;
-
- index = (bpp - table[0].bpp) * 2;
-
- /* requested size is bigger than the table */
- if (index >= table_size) {
- dm_error("ERROR: Requested rc_calc to find a bpp entry that exceeds the table size\n");
- return;
- }
-
- memcpy(qps, table[index].qps, sizeof(qp_set));
-}
-
-static double dsc_roundf(double num)
-{
- if (num < 0.0)
- num = num - 0.5;
- else
- num = num + 0.5;
-
- return (int)(num);
-}
-
-static double dsc_ceil(double num)
-{
- double retval = (int)num;
-
- if (retval != num && num > 0)
- retval = num + 1;
-
- return (int)retval;
-}
-
-static void get_ofs_set(qp_set ofs, enum colour_mode mode, float bpp)
-{
- int *p = ofs;
-
- if (mode == CM_444 || mode == CM_RGB) {
- *p++ = (bpp <= 6) ? (0) : ((((bpp >= 8) && (bpp <= 12))) ? (2) : ((bpp >= 15) ? (10) : ((((bpp > 6) && (bpp < 8))) ? (0 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (2 + dsc_roundf((bpp - 12) * (8 / 3.0))))));
- *p++ = (bpp <= 6) ? (-2) : ((((bpp >= 8) && (bpp <= 12))) ? (0) : ((bpp >= 15) ? (8) : ((((bpp > 6) && (bpp < 8))) ? (-2 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (0 + dsc_roundf((bpp - 12) * (8 / 3.0))))));
- *p++ = (bpp <= 6) ? (-2) : ((((bpp >= 8) && (bpp <= 12))) ? (0) : ((bpp >= 15) ? (6) : ((((bpp > 6) && (bpp < 8))) ? (-2 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (0 + dsc_roundf((bpp - 12) * (6 / 3.0))))));
- *p++ = (bpp <= 6) ? (-4) : ((((bpp >= 8) && (bpp <= 12))) ? (-2) : ((bpp >= 15) ? (4) : ((((bpp > 6) && (bpp < 8))) ? (-4 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (-2 + dsc_roundf((bpp - 12) * (6 / 3.0))))));
- *p++ = (bpp <= 6) ? (-6) : ((((bpp >= 8) && (bpp <= 12))) ? (-4) : ((bpp >= 15) ? (2) : ((((bpp > 6) && (bpp < 8))) ? (-6 + dsc_roundf((bpp - 6) * (2 / 2.0))) : (-4 + dsc_roundf((bpp - 12) * (6 / 3.0))))));
- *p++ = (bpp <= 12) ? (-6) : ((bpp >= 15) ? (0) : (-6 + dsc_roundf((bpp - 12) * (6 / 3.0))));
- *p++ = (bpp <= 12) ? (-8) : ((bpp >= 15) ? (-2) : (-8 + dsc_roundf((bpp - 12) * (6 / 3.0))));
- *p++ = (bpp <= 12) ? (-8) : ((bpp >= 15) ? (-4) : (-8 + dsc_roundf((bpp - 12) * (4 / 3.0))));
- *p++ = (bpp <= 12) ? (-8) : ((bpp >= 15) ? (-6) : (-8 + dsc_roundf((bpp - 12) * (2 / 3.0))));
- *p++ = (bpp <= 12) ? (-10) : ((bpp >= 15) ? (-8) : (-10 + dsc_roundf((bpp - 12) * (2 / 3.0))));
- *p++ = -10;
- *p++ = (bpp <= 6) ? (-12) : ((bpp >= 8) ? (-10) : (-12 + dsc_roundf((bpp - 6) * (2 / 2.0))));
- *p++ = -12;
- *p++ = -12;
- *p++ = -12;
- } else if (mode == CM_422) {
- *p++ = (bpp <= 8) ? (2) : ((bpp >= 10) ? (10) : (2 + dsc_roundf((bpp - 8) * (8 / 2.0))));
- *p++ = (bpp <= 8) ? (0) : ((bpp >= 10) ? (8) : (0 + dsc_roundf((bpp - 8) * (8 / 2.0))));
- *p++ = (bpp <= 8) ? (0) : ((bpp >= 10) ? (6) : (0 + dsc_roundf((bpp - 8) * (6 / 2.0))));
- *p++ = (bpp <= 8) ? (-2) : ((bpp >= 10) ? (4) : (-2 + dsc_roundf((bpp - 8) * (6 / 2.0))));
- *p++ = (bpp <= 8) ? (-4) : ((bpp >= 10) ? (2) : (-4 + dsc_roundf((bpp - 8) * (6 / 2.0))));
- *p++ = (bpp <= 8) ? (-6) : ((bpp >= 10) ? (0) : (-6 + dsc_roundf((bpp - 8) * (6 / 2.0))));
- *p++ = (bpp <= 8) ? (-8) : ((bpp >= 10) ? (-2) : (-8 + dsc_roundf((bpp - 8) * (6 / 2.0))));
- *p++ = (bpp <= 8) ? (-8) : ((bpp >= 10) ? (-4) : (-8 + dsc_roundf((bpp - 8) * (4 / 2.0))));
- *p++ = (bpp <= 8) ? (-8) : ((bpp >= 10) ? (-6) : (-8 + dsc_roundf((bpp - 8) * (2 / 2.0))));
- *p++ = (bpp <= 8) ? (-10) : ((bpp >= 10) ? (-8) : (-10 + dsc_roundf((bpp - 8) * (2 / 2.0))));
- *p++ = -10;
- *p++ = (bpp <= 6) ? (-12) : ((bpp >= 7) ? (-10) : (-12 + dsc_roundf((bpp - 6) * (2.0 / 1))));
- *p++ = -12;
- *p++ = -12;
- *p++ = -12;
- } else {
- *p++ = (bpp <= 6) ? (2) : ((bpp >= 8) ? (10) : (2 + dsc_roundf((bpp - 6) * (8 / 2.0))));
- *p++ = (bpp <= 6) ? (0) : ((bpp >= 8) ? (8) : (0 + dsc_roundf((bpp - 6) * (8 / 2.0))));
- *p++ = (bpp <= 6) ? (0) : ((bpp >= 8) ? (6) : (0 + dsc_roundf((bpp - 6) * (6 / 2.0))));
- *p++ = (bpp <= 6) ? (-2) : ((bpp >= 8) ? (4) : (-2 + dsc_roundf((bpp - 6) * (6 / 2.0))));
- *p++ = (bpp <= 6) ? (-4) : ((bpp >= 8) ? (2) : (-4 + dsc_roundf((bpp - 6) * (6 / 2.0))));
- *p++ = (bpp <= 6) ? (-6) : ((bpp >= 8) ? (0) : (-6 + dsc_roundf((bpp - 6) * (6 / 2.0))));
- *p++ = (bpp <= 6) ? (-8) : ((bpp >= 8) ? (-2) : (-8 + dsc_roundf((bpp - 6) * (6 / 2.0))));
- *p++ = (bpp <= 6) ? (-8) : ((bpp >= 8) ? (-4) : (-8 + dsc_roundf((bpp - 6) * (4 / 2.0))));
- *p++ = (bpp <= 6) ? (-8) : ((bpp >= 8) ? (-6) : (-8 + dsc_roundf((bpp - 6) * (2 / 2.0))));
- *p++ = (bpp <= 6) ? (-10) : ((bpp >= 8) ? (-8) : (-10 + dsc_roundf((bpp - 6) * (2 / 2.0))));
- *p++ = -10;
- *p++ = (bpp <= 4) ? (-12) : ((bpp >= 5) ? (-10) : (-12 + dsc_roundf((bpp - 4) * (2 / 1.0))));
- *p++ = -12;
- *p++ = -12;
- *p++ = -12;
- }
-}
-
-static int median3(int a, int b, int c)
-{
- if (a > b)
- swap(a, b);
- if (b > c)
- swap(b, c);
- if (a > b)
- swap(b, c);
-
- return b;
-}
-
-static void _do_calc_rc_params(struct rc_params *rc, enum colour_mode cm,
- enum bits_per_comp bpc, u16 drm_bpp,
- bool is_navite_422_or_420,
- int slice_width, int slice_height,
- int minor_version)
-{
- float bpp;
- float bpp_group;
- float initial_xmit_delay_factor;
- int padding_pixels;
- int i;
-
- bpp = ((float)drm_bpp / 16.0);
- /* in native_422 or native_420 modes, the bits_per_pixel is double the
- * target bpp (the latter is what calc_rc_params expects)
- */
- if (is_navite_422_or_420)
- bpp /= 2.0;
-
- rc->rc_quant_incr_limit0 = ((bpc == BPC_8) ? 11 : (bpc == BPC_10 ? 15 : 19)) - ((minor_version == 1 && cm == CM_444) ? 1 : 0);
- rc->rc_quant_incr_limit1 = ((bpc == BPC_8) ? 11 : (bpc == BPC_10 ? 15 : 19)) - ((minor_version == 1 && cm == CM_444) ? 1 : 0);
-
- bpp_group = MODE_SELECT(bpp, bpp * 2.0, bpp * 2.0);
-
- switch (cm) {
- case CM_420:
- rc->initial_fullness_offset = (bpp >= 6) ? (2048) : ((bpp <= 4) ? (6144) : ((((bpp > 4) && (bpp <= 5))) ? (6144 - dsc_roundf((bpp - 4) * (512))) : (5632 - dsc_roundf((bpp - 5) * (3584)))));
- rc->first_line_bpg_offset = median3(0, (12 + (int) (0.09 * min(34, slice_height - 8))), (int)((3 * bpc * 3) - (3 * bpp_group)));
- rc->second_line_bpg_offset = median3(0, 12, (int)((3 * bpc * 3) - (3 * bpp_group)));
- break;
- case CM_422:
- rc->initial_fullness_offset = (bpp >= 8) ? (2048) : ((bpp <= 7) ? (5632) : (5632 - dsc_roundf((bpp - 7) * (3584))));
- rc->first_line_bpg_offset = median3(0, (12 + (int) (0.09 * min(34, slice_height - 8))), (int)((3 * bpc * 4) - (3 * bpp_group)));
- rc->second_line_bpg_offset = 0;
- break;
- case CM_444:
- case CM_RGB:
- rc->initial_fullness_offset = (bpp >= 12) ? (2048) : ((bpp <= 8) ? (6144) : ((((bpp > 8) && (bpp <= 10))) ? (6144 - dsc_roundf((bpp - 8) * (512 / 2))) : (5632 - dsc_roundf((bpp - 10) * (3584 / 2)))));
- rc->first_line_bpg_offset = median3(0, (12 + (int) (0.09 * min(34, slice_height - 8))), (int)(((3 * bpc + (cm == CM_444 ? 0 : 2)) * 3) - (3 * bpp_group)));
- rc->second_line_bpg_offset = 0;
- break;
- }
-
- initial_xmit_delay_factor = (cm == CM_444 || cm == CM_RGB) ? 1.0 : 2.0;
- rc->initial_xmit_delay = dsc_roundf(8192.0/2.0/bpp/initial_xmit_delay_factor);
-
- if (cm == CM_422 || cm == CM_420)
- slice_width /= 2;
-
- padding_pixels = ((slice_width % 3) != 0) ? (3 - (slice_width % 3)) * (rc->initial_xmit_delay / slice_width) : 0;
- if (3 * bpp_group >= (((rc->initial_xmit_delay + 2) / 3) * (3 + (cm == CM_422)))) {
- if ((rc->initial_xmit_delay + padding_pixels) % 3 == 1)
- rc->initial_xmit_delay++;
- }
-
- rc->flatness_min_qp = ((bpc == BPC_8) ? (3) : ((bpc == BPC_10) ? (7) : (11))) - ((minor_version == 1 && cm == CM_444) ? 1 : 0);
- rc->flatness_max_qp = ((bpc == BPC_8) ? (12) : ((bpc == BPC_10) ? (16) : (20))) - ((minor_version == 1 && cm == CM_444) ? 1 : 0);
- rc->flatness_det_thresh = 2 << (bpc - 8);
-
- get_qp_set(rc->qp_min, cm, bpc, DAL_MM_MIN, bpp);
- get_qp_set(rc->qp_max, cm, bpc, DAL_MM_MAX, bpp);
- if (cm == CM_444 && minor_version == 1) {
- for (i = 0; i < QP_SET_SIZE; ++i) {
- rc->qp_min[i] = rc->qp_min[i] > 0 ? rc->qp_min[i] - 1 : 0;
- rc->qp_max[i] = rc->qp_max[i] > 0 ? rc->qp_max[i] - 1 : 0;
- }
- }
- get_ofs_set(rc->ofs, cm, bpp);
-
- /* fixed parameters */
- rc->rc_model_size = 8192;
- rc->rc_edge_factor = 6;
- rc->rc_tgt_offset_hi = 3;
- rc->rc_tgt_offset_lo = 3;
-
- rc->rc_buf_thresh[0] = 896;
- rc->rc_buf_thresh[1] = 1792;
- rc->rc_buf_thresh[2] = 2688;
- rc->rc_buf_thresh[3] = 3584;
- rc->rc_buf_thresh[4] = 4480;
- rc->rc_buf_thresh[5] = 5376;
- rc->rc_buf_thresh[6] = 6272;
- rc->rc_buf_thresh[7] = 6720;
- rc->rc_buf_thresh[8] = 7168;
- rc->rc_buf_thresh[9] = 7616;
- rc->rc_buf_thresh[10] = 7744;
- rc->rc_buf_thresh[11] = 7872;
- rc->rc_buf_thresh[12] = 8000;
- rc->rc_buf_thresh[13] = 8064;
-}
-
-static u32 _do_bytes_per_pixel_calc(int slice_width, u16 drm_bpp,
- bool is_navite_422_or_420)
-{
- float bpp;
- u32 bytes_per_pixel;
- double d_bytes_per_pixel;
-
- bpp = ((float)drm_bpp / 16.0);
- d_bytes_per_pixel = dsc_ceil(bpp * slice_width / 8.0) / slice_width;
- // TODO: Make sure the formula for calculating this is precise (ceiling
- // vs. floor, and at what point they should be applied)
- if (is_navite_422_or_420)
- d_bytes_per_pixel /= 2;
-
- bytes_per_pixel = (u32)dsc_ceil(d_bytes_per_pixel * 0x10000000);
-
- return bytes_per_pixel;
-}
/**
* calc_rc_params - reads the user's cmdline mode
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h
index 262f06afcbf9..c2340e001b57 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h
+++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.h
@@ -27,55 +27,7 @@
#ifndef __RC_CALC_H__
#define __RC_CALC_H__
-
-#define QP_SET_SIZE 15
-
-typedef int qp_set[QP_SET_SIZE];
-
-struct rc_params {
- int rc_quant_incr_limit0;
- int rc_quant_incr_limit1;
- int initial_fullness_offset;
- int initial_xmit_delay;
- int first_line_bpg_offset;
- int second_line_bpg_offset;
- int flatness_min_qp;
- int flatness_max_qp;
- int flatness_det_thresh;
- qp_set qp_min;
- qp_set qp_max;
- qp_set ofs;
- int rc_model_size;
- int rc_edge_factor;
- int rc_tgt_offset_hi;
- int rc_tgt_offset_lo;
- int rc_buf_thresh[QP_SET_SIZE - 1];
-};
-
-enum colour_mode {
- CM_RGB, /* 444 RGB */
- CM_444, /* 444 YUV or simple 422 */
- CM_422, /* native 422 */
- CM_420 /* native 420 */
-};
-
-enum bits_per_comp {
- BPC_8 = 8,
- BPC_10 = 10,
- BPC_12 = 12
-};
-
-enum max_min {
- DAL_MM_MIN = 0,
- DAL_MM_MAX = 1
-};
-
-struct qp_entry {
- float bpp;
- const qp_set qps;
-};
-
-typedef struct qp_entry qp_table[];
+#include "dml/dsc/rc_calc_fpu.h"
void calc_rc_params(struct rc_params *rc, const struct drm_dsc_config *pps);
u32 calc_dsc_bytes_per_pixel(const struct drm_dsc_config *pps);
diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c
index ef830aded5b1..1e19dd674e5a 100644
--- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c
+++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc_dpi.c
@@ -22,7 +22,6 @@
* Authors: AMD
*
*/
-#include "os_types.h"
#include <drm/drm_dsc.h>
#include "dscc_types.h"
#include "rc_calc.h"
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
index c5c840a06050..5029d4e42dbf 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
@@ -105,6 +105,7 @@ bool dal_hw_factory_init(
case DCN_VERSION_2_0:
dal_hw_factory_dcn20_init(factory);
return true;
+ case DCN_VERSION_2_01:
case DCN_VERSION_2_1:
dal_hw_factory_dcn21_init(factory);
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
index 4a9848308766..904bd30bed68 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
@@ -100,6 +100,7 @@ bool dal_hw_translate_init(
case DCN_VERSION_2_0:
dal_hw_translate_dcn20_init(translate);
return true;
+ case DCN_VERSION_2_01:
case DCN_VERSION_2_1:
dal_hw_translate_dcn21_init(translate);
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index 45a6216dfa2a..6fc6488c54c0 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -62,6 +62,7 @@ struct link_init_data {
uint32_t connector_index; /* this will be mapped to the HPD pins */
uint32_t link_index; /* this is mapped to DAL display_index
TODO: remove it when DC is complete. */
+ bool is_dpia_link;
};
struct dc_link *link_create(const struct link_init_data *init_params);
@@ -245,8 +246,16 @@ struct resource_pool {
* entries in link_encoders array.
*/
unsigned int dig_link_enc_count;
+ /* Number of USB4 DPIA (DisplayPort Input Adapter) link objects created.*/
+ unsigned int usb4_dpia_count;
#if defined(CONFIG_DRM_AMD_DC_DCN)
+ unsigned int hpo_dp_stream_enc_count;
+ struct hpo_dp_stream_encoder *hpo_dp_stream_enc[MAX_HPO_DP2_ENCODERS];
+ unsigned int hpo_dp_link_enc_count;
+ struct hpo_dp_link_encoder *hpo_dp_link_enc[MAX_HPO_DP2_LINK_ENCODERS];
+#endif
+#if defined(CONFIG_DRM_AMD_DC_DCN)
struct dc_3dlut *mpc_lut[MAX_PIPES];
struct dc_transfer_func *mpc_shaper[MAX_PIPES];
#endif
@@ -298,6 +307,9 @@ struct stream_resource {
struct display_stream_compressor *dsc;
struct timing_generator *tg;
struct stream_encoder *stream_enc;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ struct hpo_dp_stream_encoder *hpo_dp_stream_enc;
+#endif
struct audio *audio;
struct pixel_clk_params pix_clk_params;
@@ -366,6 +378,9 @@ struct pipe_ctx {
struct _vcs_dpi_display_ttu_regs_st ttu_regs;
struct _vcs_dpi_display_rq_regs_st rq_regs;
struct _vcs_dpi_display_pipe_dest_params_st pipe_dlg_param;
+ struct _vcs_dpi_display_rq_params_st dml_rq_param;
+ struct _vcs_dpi_display_dlg_sys_params_st dml_dlg_sys_param;
+ struct _vcs_dpi_display_e2e_pipe_params_st dml_input;
int det_buffer_size_kb;
bool unbounded_req;
#endif
@@ -375,6 +390,17 @@ struct pipe_ctx {
bool vtp_locked;
};
+/* Data used for dynamic link encoder assignment.
+ * Tracks current and future assignments; available link encoders;
+ * and mode of operation (whether to use current or future assignments).
+ */
+struct link_enc_cfg_context {
+ enum link_enc_cfg_mode mode;
+ struct link_enc_assignment link_enc_assignments[MAX_PIPES];
+ enum engine_id link_enc_avail[MAX_DIG_LINK_ENCODERS];
+ struct link_enc_assignment transient_assignments[MAX_PIPES];
+};
+
struct resource_context {
struct pipe_ctx pipe_ctx[MAX_PIPES];
bool is_stream_enc_acquired[MAX_PIPES * 2];
@@ -382,12 +408,10 @@ struct resource_context {
uint8_t clock_source_ref_count[MAX_CLOCK_SOURCES];
uint8_t dp_clock_source_ref_count;
bool is_dsc_acquired[MAX_PIPES];
- /* A table/array of encoder-to-link assignments. One entry per stream.
- * Indexed by stream index in dc_state.
- */
- struct link_enc_assignment link_enc_assignments[MAX_PIPES];
- /* List of available link encoders. Uses engine ID as encoder identifier. */
- enum engine_id link_enc_avail[MAX_DIG_LINK_ENCODERS];
+ struct link_enc_cfg_context link_enc_cfg_ctx;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ bool is_hpo_dp_stream_enc_acquired[MAX_HPO_DP2_ENCODERS];
+#endif
#if defined(CONFIG_DRM_AMD_DC_DCN)
bool is_mpc_3dlut_acquired[MAX_PIPES];
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
index 4d7b271b6409..95fb61d62778 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
@@ -69,6 +69,7 @@ struct ddc_service_init_data {
struct graphics_object_id id;
struct dc_context *ctx;
struct dc_link *link;
+ bool is_dpia_link;
};
struct ddc_service *dal_ddc_service_create(
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
index 01c3a31be191..a6d3d859754a 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
@@ -30,6 +30,7 @@
#define LINK_TRAINING_RETRY_DELAY 50 /* ms */
#define LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD 3200 /*us*/
#define LINK_AUX_DEFAULT_TIMEOUT_PERIOD 552 /*us*/
+#define MAX_MTP_SLOT_COUNT 64
#define DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE 0x50
#define TRAINING_AUX_RD_INTERVAL 100 //us
@@ -120,12 +121,12 @@ enum dc_status dpcd_set_lane_settings(
const struct link_training_settings *link_training_setting,
uint32_t offset);
/* Read training status and adjustment requests from DPCD. */
-enum dc_status dp_get_lane_status_and_drive_settings(
+enum dc_status dp_get_lane_status_and_lane_adjust(
struct dc_link *link,
const struct link_training_settings *link_training_setting,
- union lane_status *ln_status,
- union lane_align_status_updated *ln_status_updated,
- struct link_training_settings *req_settings,
+ union lane_status ln_status[LANE_COUNT_DP_MAX],
+ union lane_align_status_updated *ln_align,
+ union lane_adjust ln_adjust[LANE_COUNT_DP_MAX],
uint32_t offset);
void dp_wait_for_training_aux_rd_interval(
@@ -146,10 +147,15 @@ bool dp_is_interlane_aligned(union lane_align_status_updated align_status);
bool dp_is_max_vs_reached(
const struct link_training_settings *lt_settings);
-
-void dp_update_drive_settings(
- struct link_training_settings *dest,
- struct link_training_settings src);
+void dp_hw_to_dpcd_lane_settings(
+ const struct link_training_settings *lt_settings,
+ const struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX],
+ union dpcd_training_lane dpcd_lane_settings[LANE_COUNT_DP_MAX]);
+void dp_decide_lane_settings(
+ const struct link_training_settings *lt_settings,
+ const union lane_adjust ln_adjust[LANE_COUNT_DP_MAX],
+ struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX],
+ union dpcd_training_lane dpcd_lane_settings[LANE_COUNT_DP_MAX]);
uint32_t dp_translate_training_aux_read_interval(uint32_t dpcd_aux_read_interval);
@@ -165,7 +171,7 @@ uint8_t dc_dp_initialize_scrambling_data_symbols(
enum dc_status dp_set_fec_ready(struct dc_link *link, bool ready);
void dp_set_fec_enable(struct dc_link *link, bool enable);
bool dp_set_dsc_enable(struct pipe_ctx *pipe_ctx, bool enable);
-bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable);
+bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable, bool immediate_update);
void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable);
bool dp_update_dsc_config(struct pipe_ctx *pipe_ctx);
bool dp_set_dsc_on_rx(struct pipe_ctx *pipe_ctx, bool enable);
@@ -189,5 +195,26 @@ enum dc_status dpcd_configure_lttpr_mode(
struct link_training_settings *lt_settings);
enum dp_link_encoding dp_get_link_encoding_format(const struct dc_link_settings *link_settings);
+bool dpcd_write_128b_132b_sst_payload_allocation_table(
+ const struct dc_stream_state *stream,
+ struct dc_link *link,
+ struct link_mst_stream_allocation_table *proposed_table,
+ bool allocate);
+
+enum dc_status dpcd_configure_channel_coding(
+ struct dc_link *link,
+ struct link_training_settings *lt_settings);
+
+bool dpcd_poll_for_allocation_change_trigger(struct dc_link *link);
+
+struct fixed31_32 calculate_sst_avg_time_slots_per_mtp(
+ const struct dc_stream_state *stream,
+ const struct dc_link *link);
+void enable_dp_hpo_output(struct dc_link *link, const struct dc_link_settings *link_settings);
+void disable_dp_hpo_output(struct dc_link *link, enum signal_type signal);
+void setup_dp_hpo_stream(struct pipe_ctx *pipe_ctx, bool enable);
+bool is_dp_128b_132b_signal(struct pipe_ctx *pipe_ctx);
+void reset_dp_hpo_stream_encoders_for_link(struct dc_link *link);
+
bool dp_retrieve_lttpr_cap(struct dc_link *link);
#endif /* __DC_LINK_DP_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dpia.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dpia.h
new file mode 100644
index 000000000000..974d703e3771
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dpia.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DC_LINK_DPIA_H__
+#define __DC_LINK_DPIA_H__
+
+/* This module implements functionality for training DPIA links. */
+
+struct dc_link;
+struct dc_link_settings;
+
+/* The approximate time (us) it takes to transmit 9 USB4 DP clock sync packets. */
+#define DPIA_CLK_SYNC_DELAY 16000
+
+/* Extend interval between training status checks for manual testing. */
+#define DPIA_DEBUG_EXTENDED_AUX_RD_INTERVAL_US 60000000
+
+/** @note Can remove once DP tunneling registers in upstream include/drm/drm_dp_helper.h */
+/* DPCD DP Tunneling over USB4 */
+#define DP_TUNNELING_CAPABILITIES_SUPPORT 0xe000d
+#define DP_IN_ADAPTER_INFO 0xe000e
+#define DP_USB4_DRIVER_ID 0xe000f
+#define DP_USB4_ROUTER_TOPOLOGY_ID 0xe001b
+
+/* SET_CONFIG message types sent by driver. */
+enum dpia_set_config_type {
+ DPIA_SET_CFG_SET_LINK = 0x01,
+ DPIA_SET_CFG_SET_PHY_TEST_MODE = 0x05,
+ DPIA_SET_CFG_SET_TRAINING = 0x18,
+ DPIA_SET_CFG_SET_VSPE = 0x19
+};
+
+/* Training stages (TS) in SET_CONFIG(SET_TRAINING) message. */
+enum dpia_set_config_ts {
+ DPIA_TS_DPRX_DONE = 0x00, /* Done training DPRX. */
+ DPIA_TS_TPS1 = 0x01,
+ DPIA_TS_TPS2 = 0x02,
+ DPIA_TS_TPS3 = 0x03,
+ DPIA_TS_TPS4 = 0x07,
+ DPIA_TS_UFP_DONE = 0xff /* Done training DPTX-to-DPIA hop. */
+};
+
+/* SET_CONFIG message data associated with messages sent by driver. */
+union dpia_set_config_data {
+ struct {
+ uint8_t mode : 1;
+ uint8_t reserved : 7;
+ } set_link;
+ struct {
+ uint8_t stage;
+ } set_training;
+ struct {
+ uint8_t swing : 2;
+ uint8_t max_swing_reached : 1;
+ uint8_t pre_emph : 2;
+ uint8_t max_pre_emph_reached : 1;
+ uint8_t reserved : 2;
+ } set_vspe;
+ uint8_t raw;
+};
+
+/* Read tunneling device capability from DPCD and update link capability
+ * accordingly.
+ */
+enum dc_status dpcd_get_tunneling_device_data(struct dc_link *link);
+
+/* Train DP tunneling link for USB4 DPIA display endpoint.
+ * DPIA equivalent of dc_link_dp_perfrorm_link_training.
+ * Aborts link training upon detection of sink unplug.
+ */
+enum link_training_result
+dc_link_dpia_perform_link_training(struct dc_link *link,
+ const struct dc_link_settings *link_setting,
+ bool skip_video_pattern);
+
+#endif /* __DC_LINK_DPIA_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
index 142753644377..ecb4191b6e64 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/abm.h
@@ -54,6 +54,7 @@ struct abm_funcs {
const char *src,
unsigned int bytes,
unsigned int inst);
+ bool (*set_abm_pause)(struct abm *abm, bool pause, unsigned int panel_inst, unsigned int otg_inst);
};
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
index a262f3278c21..1391c20f1852 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
@@ -102,6 +102,11 @@ enum dentist_divider_range {
.MP1_SMN_C2PMSG_83 = mmMP1_SMN_C2PMSG_83, \
.MP1_SMN_C2PMSG_67 = mmMP1_SMN_C2PMSG_67
+#define CLK_COMMON_REG_LIST_DCN_201() \
+ SR(DENTIST_DISPCLK_CNTL), \
+ CLK_SRI(CLK4_CLK_PLL_REQ, CLK4, 0), \
+ CLK_SRI(CLK4_CLK2_CURRENT_CNT, CLK4, 0)
+
#define CLK_REG_LIST_NV10() \
SR(DENTIST_DISPCLK_CNTL), \
CLK_SRI(CLK3_CLK_PLL_REQ, CLK3, 0), \
@@ -144,6 +149,12 @@ enum dentist_divider_range {
CLK_SF(CLK3_0_CLK3_CLK_PLL_REQ, FbMult_int, mask_sh),\
CLK_SF(CLK3_0_CLK3_CLK_PLL_REQ, FbMult_frac, mask_sh)
+#define CLK_COMMON_MASK_SH_LIST_DCN201_BASE(mask_sh) \
+ CLK_COMMON_MASK_SH_LIST_DCN_COMMON_BASE(mask_sh),\
+ CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DPPCLK_WDIVIDER, mask_sh),\
+ CLK_SF(DENTIST_DISPCLK_CNTL, DENTIST_DPPCLK_CHG_DONE, mask_sh),\
+ CLK_SF(CLK4_0_CLK4_CLK_PLL_REQ, FbMult_int, mask_sh)
+
#define CLK_REG_FIELD_LIST(type) \
type DPREFCLK_SRC_SEL; \
type DENTIST_DPREFCLK_WDIVIDER; \
@@ -179,6 +190,8 @@ struct clk_mgr_mask {
struct clk_mgr_registers {
uint32_t DPREFCLK_CNTL;
uint32_t DENTIST_DISPCLK_CNTL;
+ uint32_t CLK4_CLK2_CURRENT_CNT;
+ uint32_t CLK4_CLK_PLL_REQ;
uint32_t CLK3_CLK2_DFS_CNTL;
uint32_t CLK3_CLK_PLL_REQ;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
index 0afa2364a986..c940fdfda144 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
@@ -79,7 +79,30 @@ struct dccg_funcs {
void (*otg_drop_pixel)(struct dccg *dccg,
uint32_t otg_inst);
void (*dccg_init)(struct dccg *dccg);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ void (*set_dpstreamclk)(
+ struct dccg *dccg,
+ enum hdmistreamclk_source src,
+ int otg_inst);
+
+ void (*enable_symclk32_se)(
+ struct dccg *dccg,
+ int hpo_se_inst,
+ enum phyd32clk_clock_source phyd32clk);
+ void (*disable_symclk32_se)(
+ struct dccg *dccg,
+ int hpo_se_inst);
+
+ void (*enable_symclk32_le)(
+ struct dccg *dccg,
+ int hpo_le_inst,
+ enum phyd32clk_clock_source phyd32clk);
+
+ void (*disable_symclk32_le)(
+ struct dccg *dccg,
+ int hpo_le_inst);
+#endif
void (*set_physymclk)(
struct dccg *dccg,
int phy_inst,
@@ -100,6 +123,15 @@ struct dccg_funcs {
void (*set_dispclk_change_mode)(
struct dccg *dccg,
enum dentist_dispclk_change_mode change_mode);
+
+ void (*disable_dsc)(
+ struct dccg *dccg,
+ int inst);
+
+ void (*enable_dsc)(
+ struct dccg *dccg,
+ int inst);
+
};
#endif //__DAL_DCCG_H__
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
index 00fc81431b43..3ef7faa92052 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dpp.h
@@ -29,6 +29,17 @@
#include "transform.h"
+union defer_reg_writes {
+ struct {
+ bool disable_blnd_lut:1;
+ bool disable_3dlut:1;
+ bool disable_shaper:1;
+ bool disable_gamcor:1;
+ bool disable_dscl:1;
+ } bits;
+ uint32_t raw;
+};
+
struct dpp {
const struct dpp_funcs *funcs;
struct dc_context *ctx;
@@ -43,6 +54,7 @@ struct dpp {
struct pwl_params regamma_params;
struct pwl_params degamma_params;
struct dpp_cursor_attributes cur_attr;
+ union defer_reg_writes deferred_reg_writes;
struct pwl_params shaper_params;
bool cm_bypass_mode;
@@ -245,6 +257,8 @@ struct dpp_funcs {
bool dppclk_div,
bool enable);
+ void (*dpp_deferred_update)(
+ struct dpp *dpp);
bool (*dpp_program_blnd_lut)(
struct dpp *dpp,
const struct pwl_params *params);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h
index ec28cb9c3a8e..fd6572ba3fb2 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h
@@ -171,10 +171,9 @@ struct dwbc {
bool dwb_is_efc_transition;
bool dwb_is_drc;
int wb_src_plane_inst;/*hubp, mpcc, inst*/
- bool update_privacymask;
uint32_t mask_id;
- int otg_inst;
- bool mvc_cfg;
+ int otg_inst;
+ bool mvc_cfg;
};
struct dwbc_funcs {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
index 31a1713bb49f..10ecbc667ffa 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
@@ -38,6 +38,10 @@
#define MAX_PIPES 6
#define MAX_DIG_LINK_ENCODERS 7
#define MAX_DWB_PIPES 1
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+#define MAX_HPO_DP2_ENCODERS 4
+#define MAX_HPO_DP2_LINK_ENCODERS 2
+#endif
struct gamma_curve {
uint32_t offset;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
index 9eaf345aa2a1..bb0e91756ddd 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
@@ -59,6 +59,10 @@ struct encoder_feature_support {
uint32_t IS_TPS3_CAPABLE:1;
uint32_t IS_TPS4_CAPABLE:1;
uint32_t HDMI_6GB_EN:1;
+ uint32_t IS_DP2_CAPABLE:1;
+ uint32_t IS_UHBR10_CAPABLE:1;
+ uint32_t IS_UHBR13_5_CAPABLE:1;
+ uint32_t IS_UHBR20_CAPABLE:1;
uint32_t DP_IS_USB_C:1;
} bits;
uint32_t raw;
@@ -208,6 +212,99 @@ struct link_enc_assignment {
bool valid;
struct display_endpoint_id ep_id;
enum engine_id eng_id;
+ struct dc_stream_state *stream;
};
+enum link_enc_cfg_mode {
+ LINK_ENC_CFG_STEADY, /* Normal operation - use current_state. */
+ LINK_ENC_CFG_TRANSIENT /* During commit state - use state to be committed. */
+};
+
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+enum dp2_link_mode {
+ DP2_LINK_TRAINING_TPS1,
+ DP2_LINK_TRAINING_TPS2,
+ DP2_LINK_ACTIVE,
+ DP2_TEST_PATTERN
+};
+
+enum dp2_phy_tp_select {
+ DP_DPHY_TP_SELECT_TPS1,
+ DP_DPHY_TP_SELECT_TPS2,
+ DP_DPHY_TP_SELECT_PRBS,
+ DP_DPHY_TP_SELECT_CUSTOM,
+ DP_DPHY_TP_SELECT_SQUARE
+};
+
+enum dp2_phy_tp_prbs {
+ DP_DPHY_TP_PRBS7,
+ DP_DPHY_TP_PRBS9,
+ DP_DPHY_TP_PRBS11,
+ DP_DPHY_TP_PRBS15,
+ DP_DPHY_TP_PRBS23,
+ DP_DPHY_TP_PRBS31
+};
+
+struct hpo_dp_link_enc_state {
+ uint32_t link_enc_enabled;
+ uint32_t link_mode;
+ uint32_t lane_count;
+ uint32_t slot_count[4];
+ uint32_t stream_src[4];
+ uint32_t vc_rate_x[4];
+ uint32_t vc_rate_y[4];
+};
+
+struct hpo_dp_link_encoder {
+ const struct hpo_dp_link_encoder_funcs *funcs;
+ struct dc_context *ctx;
+ int inst;
+ enum engine_id preferred_engine;
+ enum transmitter transmitter;
+ enum hpd_source_id hpd_source;
+};
+
+struct hpo_dp_link_encoder_funcs {
+
+ void (*enable_link_phy)(struct hpo_dp_link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ enum transmitter transmitter);
+
+ void (*disable_link_phy)(struct hpo_dp_link_encoder *link_enc,
+ enum signal_type signal);
+
+ void (*link_enable)(
+ struct hpo_dp_link_encoder *enc,
+ enum dc_lane_count num_lanes);
+
+ void (*link_disable)(
+ struct hpo_dp_link_encoder *enc);
+
+ void (*set_link_test_pattern)(
+ struct hpo_dp_link_encoder *enc,
+ struct encoder_set_dp_phy_pattern_param *tp_params);
+
+ void (*update_stream_allocation_table)(
+ struct hpo_dp_link_encoder *enc,
+ const struct link_mst_stream_allocation_table *table);
+
+ void (*set_throttled_vcp_size)(
+ struct hpo_dp_link_encoder *enc,
+ uint32_t stream_encoder_inst,
+ struct fixed31_32 avg_time_slots_per_mtp);
+
+ bool (*is_in_alt_mode) (
+ struct hpo_dp_link_encoder *enc);
+
+ void (*read_state)(
+ struct hpo_dp_link_encoder *enc,
+ struct hpo_dp_link_enc_state *state);
+
+ void (*set_ffe)(
+ struct hpo_dp_link_encoder *enc,
+ const struct dc_link_settings *link_settings,
+ uint8_t ffe_preset);
+};
+#endif
+
#endif /* LINK_ENCODER_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
index 640bb432bd6a..04d6ec3f021f 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
@@ -281,6 +281,7 @@ struct mpc_funcs {
struct mpcc* (*get_mpcc_for_dpp_from_secondary)(
struct mpc_tree *tree,
int dpp_id);
+
struct mpcc* (*get_mpcc_for_dpp)(
struct mpc_tree *tree,
int dpp_id);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
index 564ea6a727b0..c88e113b94d1 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
@@ -165,9 +165,11 @@ struct stream_encoder_funcs {
struct stream_encoder *enc);
void (*dp_blank)(
+ struct dc_link *link,
struct stream_encoder *enc);
void (*dp_unblank)(
+ struct dc_link *link,
struct stream_encoder *enc,
const struct encoder_unblank_param *param);
@@ -227,7 +229,8 @@ struct stream_encoder_funcs {
void (*dp_set_dsc_pps_info_packet)(struct stream_encoder *enc,
bool enable,
- uint8_t *dsc_packed_pps);
+ uint8_t *dsc_packed_pps,
+ bool immediate_update);
void (*set_dynamic_metadata)(struct stream_encoder *enc,
bool enable,
@@ -242,4 +245,86 @@ struct stream_encoder_funcs {
struct stream_encoder *enc);
};
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+struct hpo_dp_stream_encoder_state {
+ uint32_t stream_enc_enabled;
+ uint32_t vid_stream_enabled;
+ uint32_t otg_inst;
+ uint32_t pixel_encoding;
+ uint32_t component_depth;
+ uint32_t compressed_format;
+ uint32_t sdp_enabled;
+ uint32_t mapped_to_link_enc;
+};
+
+struct hpo_dp_stream_encoder {
+ const struct hpo_dp_stream_encoder_funcs *funcs;
+ struct dc_context *ctx;
+ struct dc_bios *bp;
+ uint32_t inst;
+ enum engine_id id;
+ struct vpg *vpg;
+ struct apg *apg;
+};
+
+struct hpo_dp_stream_encoder_funcs {
+ void (*enable_stream)(
+ struct hpo_dp_stream_encoder *enc);
+
+ void (*dp_unblank)(
+ struct hpo_dp_stream_encoder *enc,
+ uint32_t stream_source);
+
+ void (*dp_blank)(
+ struct hpo_dp_stream_encoder *enc);
+
+ void (*disable)(
+ struct hpo_dp_stream_encoder *enc);
+
+ void (*set_stream_attribute)(
+ struct hpo_dp_stream_encoder *enc,
+ struct dc_crtc_timing *crtc_timing,
+ enum dc_color_space output_color_space,
+ bool use_vsc_sdp_for_colorimetry,
+ bool compressed_format,
+ bool double_buffer_en);
+
+ void (*update_dp_info_packets)(
+ struct hpo_dp_stream_encoder *enc,
+ const struct encoder_info_frame *info_frame);
+
+ void (*stop_dp_info_packets)(
+ struct hpo_dp_stream_encoder *enc);
+
+ void (*dp_set_dsc_pps_info_packet)(
+ struct hpo_dp_stream_encoder *enc,
+ bool enable,
+ uint8_t *dsc_packed_pps,
+ bool immediate_update);
+
+ void (*map_stream_to_link)(
+ struct hpo_dp_stream_encoder *enc,
+ uint32_t stream_enc_inst,
+ uint32_t link_enc_inst);
+
+ void (*audio_mute_control)(
+ struct hpo_dp_stream_encoder *enc, bool mute);
+
+ void (*dp_audio_setup)(
+ struct hpo_dp_stream_encoder *enc,
+ unsigned int az_inst,
+ struct audio_info *info);
+
+ void (*dp_audio_enable)(
+ struct hpo_dp_stream_encoder *enc);
+
+ void (*dp_audio_disable)(
+ struct hpo_dp_stream_encoder *enc);
+
+ void (*read_state)(
+ struct hpo_dp_stream_encoder *enc,
+ struct hpo_dp_stream_encoder_state *state);
+};
+#endif
+
#endif /* STREAM_ENCODER_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
index 03f47f23fb65..7390baf916b5 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h
@@ -100,6 +100,9 @@ enum crc_selection {
enum otg_out_mux_dest {
OUT_MUX_DIO = 0,
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ OUT_MUX_HPO_DP = 2,
+#endif
};
enum h_timing_div_mode {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index ad5f2adcc40d..d50f4bd06b5d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -236,7 +236,7 @@ struct hw_sequencer_funcs {
const struct tg_color *solid_color,
int width, int height, int offset);
- void (*z10_restore)(struct dc *dc);
+ void (*z10_restore)(const struct dc *dc);
void (*z10_save_init)(struct dc *dc);
void (*update_visual_confirm_color)(struct dc *dc,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
index f7f7e4fff0c2..f324285394be 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
@@ -41,6 +41,9 @@ struct dce_hwseq_wa {
bool DEGVIDCN10_254;
bool DEGVIDCN21;
bool disallow_self_refresh_during_multi_plane_transition;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ bool dp_hpo_and_otg_sequence;
+#endif
};
struct hwseq_wa_state {
@@ -151,6 +154,10 @@ struct dce_hwseq {
struct hwseq_wa_state wa_state;
struct hwseq_private_funcs funcs;
+ PHYSICAL_ADDRESS_LOC fb_base;
+ PHYSICAL_ADDRESS_LOC fb_top;
+ PHYSICAL_ADDRESS_LOC fb_offset;
+ PHYSICAL_ADDRESS_LOC uma_top;
};
#endif /* __DC_HW_SEQUENCER_PRIVATE_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h b/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h
index 883dd8733ea4..10dcf6a5e9b1 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h
@@ -70,22 +70,36 @@ void link_enc_cfg_link_enc_unassign(
* endpoint.
*/
bool link_enc_cfg_is_transmitter_mappable(
- struct dc_state *state,
+ struct dc *dc,
struct link_encoder *link_enc);
+/* Return stream using DIG link encoder resource. NULL if unused. */
+struct dc_stream_state *link_enc_cfg_get_stream_using_link_enc(
+ struct dc *dc,
+ enum engine_id eng_id);
+
/* Return link using DIG link encoder resource. NULL if unused. */
struct dc_link *link_enc_cfg_get_link_using_link_enc(
- struct dc_state *state,
+ struct dc *dc,
enum engine_id eng_id);
/* Return DIG link encoder used by link. NULL if unused. */
struct link_encoder *link_enc_cfg_get_link_enc_used_by_link(
- struct dc_state *state,
+ struct dc *dc,
const struct dc_link *link);
/* Return next available DIG link encoder. NULL if none available. */
-struct link_encoder *link_enc_cfg_get_next_avail_link_enc(
- const struct dc *dc,
- const struct dc_state *state);
+struct link_encoder *link_enc_cfg_get_next_avail_link_enc(struct dc *dc);
+
+/* Return DIG link encoder used by stream. NULL if unused. */
+struct link_encoder *link_enc_cfg_get_link_enc_used_by_stream(
+ struct dc *dc,
+ const struct dc_stream_state *stream);
+
+/* Return true if encoder available to use. */
+bool link_enc_cfg_is_link_enc_avail(struct dc *dc, enum engine_id eng_id, struct dc_link *link);
+
+/* Returns true if encoder assignments in supplied state pass validity checks. */
+bool link_enc_cfg_validate(struct dc *dc, struct dc_state *state);
#endif /* DC_INC_LINK_ENC_CFG_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h b/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h
index fc1d289bb9fe..ba664bc49595 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h
@@ -37,6 +37,7 @@ void dp_enable_link_phy(
const struct dc_link_settings *link_settings);
void dp_receiver_power_ctrl(struct dc_link *link, bool on);
+void dp_source_sequence_trace(struct dc_link *link, uint8_t dp_test_mode);
void edp_add_delay_for_T9(struct dc_link *link);
bool edp_receiver_ready_T9(struct dc_link *link);
bool edp_receiver_ready_T7(struct dc_link *link);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index fe1e5833c96a..372c0898facd 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -49,6 +49,11 @@ struct resource_caps {
int num_vmid;
int num_dsc;
unsigned int num_dig_link_enc; // Total number of DIGs (digital encoders) in DIO (Display Input/Output).
+ unsigned int num_usb4_dpia; // Total number of USB4 DPIA (DisplayPort Input Adapters).
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ int num_hpo_dp_stream_encoder;
+ int num_hpo_dp_link_encoder;
+#endif
int num_mpc_3dlut;
};
@@ -68,6 +73,15 @@ struct resource_create_funcs {
struct stream_encoder *(*create_stream_encoder)(
enum engine_id eng_id, struct dc_context *ctx);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ struct hpo_dp_stream_encoder *(*create_hpo_dp_stream_encoder)(
+ enum engine_id eng_id, struct dc_context *ctx);
+
+ struct hpo_dp_link_encoder *(*create_hpo_dp_link_encoder)(
+ uint8_t inst,
+ struct dc_context *ctx);
+#endif
+
struct dce_hwseq *(*create_hwseq)(
struct dc_context *ctx);
};
@@ -187,4 +201,9 @@ int get_num_mpc_splits(struct pipe_ctx *pipe);
int get_num_odm_splits(struct pipe_ctx *pipe);
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+struct hpo_dp_link_encoder *resource_get_unused_hpo_dp_link_encoder(
+ const struct resource_pool *pool);
+#endif
+
#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/irq/Makefile b/drivers/gpu/drm/amd/display/dc/irq/Makefile
index 0d09181227c5..fd739aecf104 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/irq/Makefile
@@ -93,6 +93,16 @@ IRQ_DCN21 = irq_service_dcn21.o
AMD_DAL_IRQ_DCN21= $(addprefix $(AMDDALPATH)/dc/irq/dcn21/,$(IRQ_DCN21))
AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCN21)
+
+###############################################################################
+# DCN 201
+###############################################################################
+IRQ_DCN201 = irq_service_dcn201.o
+
+AMD_DAL_IRQ_DCN201 = $(addprefix $(AMDDALPATH)/dc/irq/dcn201/,$(IRQ_DCN201))
+
+AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCN201)
+
###############################################################################
# DCN 30
###############################################################################
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c
index c4b067d01895..9ccafe007b23 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c
@@ -132,6 +132,31 @@ enum dc_irq_source to_dal_irq_source_dcn20(
}
}
+uint32_t dc_get_hpd_state_dcn20(struct irq_service *irq_service, enum dc_irq_source source)
+{
+ const struct irq_source_info *info;
+ uint32_t addr;
+ uint32_t value;
+ uint32_t current_status;
+
+ info = find_irq_source_info(irq_service, source);
+ if (!info)
+ return 0;
+
+ addr = info->status_reg;
+ if (!addr)
+ return 0;
+
+ value = dm_read_reg(irq_service->ctx, addr);
+ current_status =
+ get_reg_field_value(
+ value,
+ HPD0_DC_HPD_INT_STATUS,
+ DC_HPD_SENSE);
+
+ return current_status;
+}
+
static bool hpd_ack(
struct irq_service *irq_service,
const struct irq_source_info *info)
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.h b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.h
index aee4b37999f1..4d69ab24ca25 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.h
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.h
@@ -31,4 +31,6 @@
struct irq_service *dal_irq_service_dcn20_create(
struct irq_service_init_data *init_data);
+uint32_t dc_get_hpd_state_dcn20(struct irq_service *irq_service, enum dc_irq_source source);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c b/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c
new file mode 100644
index 000000000000..a47f68634fc3
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.c
@@ -0,0 +1,374 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+
+#include "include/logger_interface.h"
+
+#include "../dce110/irq_service_dce110.h"
+
+#include "dcn/dcn_2_0_3_offset.h"
+#include "dcn/dcn_2_0_3_sh_mask.h"
+
+#include "cyan_skillfish_ip_offset.h"
+#include "soc15_hw_ip.h"
+
+#include "irq_service_dcn201.h"
+
+#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
+
+enum dc_irq_source to_dal_irq_source_dcn201(
+ struct irq_service *irq_service,
+ uint32_t src_id,
+ uint32_t ext_id)
+{
+ switch (src_id) {
+ case DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP:
+ return DC_IRQ_SOURCE_VBLANK1;
+ case DCN_1_0__SRCID__DC_D2_OTG_VSTARTUP:
+ return DC_IRQ_SOURCE_VBLANK2;
+ case DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL:
+ return DC_IRQ_SOURCE_DC1_VLINE0;
+ case DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL:
+ return DC_IRQ_SOURCE_DC2_VLINE0;
+ case DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT:
+ return DC_IRQ_SOURCE_PFLIP1;
+ case DCN_1_0__SRCID__HUBP1_FLIP_INTERRUPT:
+ return DC_IRQ_SOURCE_PFLIP2;
+ case DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
+ return DC_IRQ_SOURCE_VUPDATE1;
+ case DCN_1_0__SRCID__OTG1_IHC_V_UPDATE_NO_LOCK_INTERRUPT:
+ return DC_IRQ_SOURCE_VUPDATE2;
+ case DCN_1_0__SRCID__DC_HPD1_INT:
+ /* generic src_id for all HPD and HPDRX interrupts */
+ switch (ext_id) {
+ case DCN_1_0__CTXID__DC_HPD1_INT:
+ return DC_IRQ_SOURCE_HPD1;
+ case DCN_1_0__CTXID__DC_HPD2_INT:
+ return DC_IRQ_SOURCE_HPD2;
+ case DCN_1_0__CTXID__DC_HPD1_RX_INT:
+ return DC_IRQ_SOURCE_HPD1RX;
+ case DCN_1_0__CTXID__DC_HPD2_RX_INT:
+ return DC_IRQ_SOURCE_HPD2RX;
+ default:
+ return DC_IRQ_SOURCE_INVALID;
+ }
+ break;
+
+ default:
+ return DC_IRQ_SOURCE_INVALID;
+ }
+ return DC_IRQ_SOURCE_INVALID;
+}
+
+static bool hpd_ack(
+ struct irq_service *irq_service,
+ const struct irq_source_info *info)
+{
+ uint32_t addr = info->status_reg;
+ uint32_t value = dm_read_reg(irq_service->ctx, addr);
+ uint32_t current_status =
+ get_reg_field_value(
+ value,
+ HPD0_DC_HPD_INT_STATUS,
+ DC_HPD_SENSE_DELAYED);
+
+ dal_irq_service_ack_generic(irq_service, info);
+
+ value = dm_read_reg(irq_service->ctx, info->enable_reg);
+
+ set_reg_field_value(
+ value,
+ current_status ? 0 : 1,
+ HPD0_DC_HPD_INT_CONTROL,
+ DC_HPD_INT_POLARITY);
+
+ dm_write_reg(irq_service->ctx, info->enable_reg, value);
+
+ return true;
+}
+
+static const struct irq_source_info_funcs hpd_irq_info_funcs = {
+ .set = NULL,
+ .ack = hpd_ack
+};
+
+static const struct irq_source_info_funcs hpd_rx_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+static const struct irq_source_info_funcs pflip_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+static const struct irq_source_info_funcs vblank_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+static const struct irq_source_info_funcs vline0_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+static const struct irq_source_info_funcs vupdate_no_lock_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+static const struct irq_source_info_funcs dmub_outbox_irq_info_funcs = {
+ .set = NULL,
+ .ack = NULL
+};
+
+#undef BASE_INNER
+#define BASE_INNER(seg) DMU_BASE__INST0_SEG ## seg
+
+#define BASE(seg) BASE_INNER(seg)
+
+/* compile time expand base address. */
+#define BASE(seg) \
+ BASE_INNER(seg)
+
+#define SRI(reg_name, block, id)\
+ BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
+ mm ## block ## id ## _ ## reg_name
+
+#define IRQ_REG_ENTRY(block, reg_num, reg1, mask1, reg2, mask2)\
+ .enable_reg = SRI(reg1, block, reg_num),\
+ .enable_mask = \
+ block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
+ .enable_value = {\
+ block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK,\
+ ~block ## reg_num ## _ ## reg1 ## __ ## mask1 ## _MASK \
+ },\
+ .ack_reg = SRI(reg2, block, reg_num),\
+ .ack_mask = \
+ block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK,\
+ .ack_value = \
+ block ## reg_num ## _ ## reg2 ## __ ## mask2 ## _MASK \
+
+#define hpd_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_HPD1 + reg_num] = {\
+ IRQ_REG_ENTRY(HPD, reg_num,\
+ DC_HPD_INT_CONTROL, DC_HPD_INT_EN,\
+ DC_HPD_INT_CONTROL, DC_HPD_INT_ACK),\
+ .status_reg = SRI(DC_HPD_INT_STATUS, HPD, reg_num),\
+ .funcs = &hpd_irq_info_funcs\
+ }
+
+#define hpd_rx_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_HPD1RX + reg_num] = {\
+ IRQ_REG_ENTRY(HPD, reg_num,\
+ DC_HPD_INT_CONTROL, DC_HPD_RX_INT_EN,\
+ DC_HPD_INT_CONTROL, DC_HPD_RX_INT_ACK),\
+ .status_reg = SRI(DC_HPD_INT_STATUS, HPD, reg_num),\
+ .funcs = &hpd_rx_irq_info_funcs\
+ }
+#define pflip_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_PFLIP1 + reg_num] = {\
+ IRQ_REG_ENTRY(HUBPREQ, reg_num,\
+ DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_INT_MASK,\
+ DCSURF_SURFACE_FLIP_INTERRUPT, SURFACE_FLIP_CLEAR),\
+ .funcs = &pflip_irq_info_funcs\
+ }
+
+#define vupdate_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_VUPDATE1 + reg_num] = {\
+ IRQ_REG_ENTRY(OTG, reg_num,\
+ OTG_GLOBAL_SYNC_STATUS, VUPDATE_INT_EN,\
+ OTG_GLOBAL_SYNC_STATUS, VUPDATE_EVENT_CLEAR),\
+ .funcs = &vblank_irq_info_funcs\
+ }
+
+/* vupdate_no_lock_int_entry maps to DC_IRQ_SOURCE_VUPDATEx, to match semantic
+ * of DCE's DC_IRQ_SOURCE_VUPDATEx.
+ */
+#define vupdate_no_lock_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_VUPDATE1 + reg_num] = {\
+ IRQ_REG_ENTRY(OTG, reg_num,\
+ OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_INT_EN,\
+ OTG_GLOBAL_SYNC_STATUS, VUPDATE_NO_LOCK_EVENT_CLEAR),\
+ .funcs = &vupdate_no_lock_irq_info_funcs\
+ }
+#define vblank_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_VBLANK1 + reg_num] = {\
+ IRQ_REG_ENTRY(OTG, reg_num,\
+ OTG_GLOBAL_SYNC_STATUS, VSTARTUP_INT_EN,\
+ OTG_GLOBAL_SYNC_STATUS, VSTARTUP_EVENT_CLEAR),\
+ .funcs = &vblank_irq_info_funcs\
+ }
+
+#define vline0_int_entry(reg_num)\
+ [DC_IRQ_SOURCE_DC1_VLINE0 + reg_num] = {\
+ IRQ_REG_ENTRY(OTG, reg_num,\
+ OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_INT_ENABLE,\
+ OTG_VERTICAL_INTERRUPT0_CONTROL, OTG_VERTICAL_INTERRUPT0_CLEAR),\
+ .funcs = &vline0_irq_info_funcs\
+ }
+
+#define dummy_irq_entry() \
+ {\
+ .funcs = &dummy_irq_info_funcs\
+ }
+
+#define i2c_int_entry(reg_num) \
+ [DC_IRQ_SOURCE_I2C_DDC ## reg_num] = dummy_irq_entry()
+
+#define dp_sink_int_entry(reg_num) \
+ [DC_IRQ_SOURCE_DPSINK ## reg_num] = dummy_irq_entry()
+
+#define gpio_pad_int_entry(reg_num) \
+ [DC_IRQ_SOURCE_GPIOPAD ## reg_num] = dummy_irq_entry()
+
+#define dc_underflow_int_entry(reg_num) \
+ [DC_IRQ_SOURCE_DC ## reg_num ## UNDERFLOW] = dummy_irq_entry()
+
+static const struct irq_source_info_funcs dummy_irq_info_funcs = {
+ .set = dal_irq_service_dummy_set,
+ .ack = dal_irq_service_dummy_ack
+};
+
+static const struct irq_source_info
+irq_source_info_dcn201[DAL_IRQ_SOURCES_NUMBER] = {
+ [DC_IRQ_SOURCE_INVALID] = dummy_irq_entry(),
+ hpd_int_entry(0),
+ hpd_int_entry(1),
+ dummy_irq_entry(),
+ dummy_irq_entry(),
+ dummy_irq_entry(),
+ dummy_irq_entry(),
+ hpd_rx_int_entry(0),
+ hpd_rx_int_entry(1),
+ dummy_irq_entry(),
+ dummy_irq_entry(),
+ dummy_irq_entry(),
+ dummy_irq_entry(),
+ i2c_int_entry(1),
+ i2c_int_entry(2),
+ dummy_irq_entry(),
+ dummy_irq_entry(),
+ dummy_irq_entry(),
+ dummy_irq_entry(),
+ dp_sink_int_entry(1),
+ dp_sink_int_entry(2),
+ dummy_irq_entry(),
+ dummy_irq_entry(),
+ dummy_irq_entry(),
+ dummy_irq_entry(),
+ [DC_IRQ_SOURCE_TIMER] = dummy_irq_entry(),
+ pflip_int_entry(0),
+ pflip_int_entry(1),
+ pflip_int_entry(2),
+ pflip_int_entry(3),
+ [DC_IRQ_SOURCE_PFLIP5] = dummy_irq_entry(),
+ [DC_IRQ_SOURCE_PFLIP6] = dummy_irq_entry(),
+ [DC_IRQ_SOURCE_PFLIP_UNDERLAY0] = dummy_irq_entry(),
+ gpio_pad_int_entry(0),
+ gpio_pad_int_entry(1),
+ gpio_pad_int_entry(2),
+ gpio_pad_int_entry(3),
+ gpio_pad_int_entry(4),
+ gpio_pad_int_entry(5),
+ gpio_pad_int_entry(6),
+ gpio_pad_int_entry(7),
+ gpio_pad_int_entry(8),
+ gpio_pad_int_entry(9),
+ gpio_pad_int_entry(10),
+ gpio_pad_int_entry(11),
+ gpio_pad_int_entry(12),
+ gpio_pad_int_entry(13),
+ gpio_pad_int_entry(14),
+ gpio_pad_int_entry(15),
+ gpio_pad_int_entry(16),
+ gpio_pad_int_entry(17),
+ gpio_pad_int_entry(18),
+ gpio_pad_int_entry(19),
+ gpio_pad_int_entry(20),
+ gpio_pad_int_entry(21),
+ gpio_pad_int_entry(22),
+ gpio_pad_int_entry(23),
+ gpio_pad_int_entry(24),
+ gpio_pad_int_entry(25),
+ gpio_pad_int_entry(26),
+ gpio_pad_int_entry(27),
+ gpio_pad_int_entry(28),
+ gpio_pad_int_entry(29),
+ gpio_pad_int_entry(30),
+ dc_underflow_int_entry(1),
+ dc_underflow_int_entry(2),
+ dummy_irq_entry(),
+ dummy_irq_entry(),
+ dummy_irq_entry(),
+ dummy_irq_entry(),
+ [DC_IRQ_SOURCE_DMCU_SCP] = dummy_irq_entry(),
+ [DC_IRQ_SOURCE_VBIOS_SW] = dummy_irq_entry(),
+ vupdate_no_lock_int_entry(0),
+ vupdate_no_lock_int_entry(1),
+ dummy_irq_entry(),
+ dummy_irq_entry(),
+ dummy_irq_entry(),
+ dummy_irq_entry(),
+ vblank_int_entry(0),
+ vblank_int_entry(1),
+ dummy_irq_entry(),
+ dummy_irq_entry(),
+ dummy_irq_entry(),
+ dummy_irq_entry(),
+ vline0_int_entry(0),
+ vline0_int_entry(1),
+ dummy_irq_entry(),
+ dummy_irq_entry(),
+ dummy_irq_entry(),
+ dummy_irq_entry(),
+};
+
+static const struct irq_service_funcs irq_service_funcs_dcn201 = {
+ .to_dal_irq_source = to_dal_irq_source_dcn201
+};
+
+static void dcn201_irq_construct(
+ struct irq_service *irq_service,
+ struct irq_service_init_data *init_data)
+{
+ dal_irq_service_construct(irq_service, init_data);
+
+ irq_service->info = irq_source_info_dcn201;
+ irq_service->funcs = &irq_service_funcs_dcn201;
+}
+
+struct irq_service *dal_irq_service_dcn201_create(
+ struct irq_service_init_data *init_data)
+{
+ struct irq_service *irq_service = kzalloc(sizeof(*irq_service),
+ GFP_KERNEL);
+
+ if (!irq_service)
+ return NULL;
+
+ dcn201_irq_construct(irq_service, init_data);
+ return irq_service;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.h b/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.h
new file mode 100644
index 000000000000..8e27c5e219a3
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn201/irq_service_dcn201.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_IRQ_SERVICE_DCN201_H__
+#define __DAL_IRQ_SERVICE_DCN201_H__
+
+#include "../irq_service.h"
+
+struct irq_service *dal_irq_service_dcn201_create(
+ struct irq_service_init_data *init_data);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
index ed54e1c819be..78940cb20e10 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c
@@ -135,6 +135,31 @@ enum dc_irq_source to_dal_irq_source_dcn21(
return DC_IRQ_SOURCE_INVALID;
}
+uint32_t dc_get_hpd_state_dcn21(struct irq_service *irq_service, enum dc_irq_source source)
+{
+ const struct irq_source_info *info;
+ uint32_t addr;
+ uint32_t value;
+ uint32_t current_status;
+
+ info = find_irq_source_info(irq_service, source);
+ if (!info)
+ return 0;
+
+ addr = info->status_reg;
+ if (!addr)
+ return 0;
+
+ value = dm_read_reg(irq_service->ctx, addr);
+ current_status =
+ get_reg_field_value(
+ value,
+ HPD0_DC_HPD_INT_STATUS,
+ DC_HPD_SENSE);
+
+ return current_status;
+}
+
static bool hpd_ack(
struct irq_service *irq_service,
const struct irq_source_info *info)
diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.h b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.h
index da2bd0e93d7a..616470e32380 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.h
+++ b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.h
@@ -31,4 +31,6 @@
struct irq_service *dal_irq_service_dcn21_create(
struct irq_service_init_data *init_data);
+uint32_t dc_get_hpd_state_dcn21(struct irq_service *irq_service, enum dc_irq_source source);
+
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
index a2a4fbeb83f8..4db1133e4466 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
@@ -79,7 +79,7 @@ void dal_irq_service_destroy(struct irq_service **irq_service)
*irq_service = NULL;
}
-static const struct irq_source_info *find_irq_source_info(
+const struct irq_source_info *find_irq_source_info(
struct irq_service *irq_service,
enum dc_irq_source source)
{
diff --git a/drivers/gpu/drm/amd/display/dc/irq/irq_service.h b/drivers/gpu/drm/amd/display/dc/irq/irq_service.h
index dbfcb096eedd..e60b82480093 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/irq_service.h
+++ b/drivers/gpu/drm/amd/display/dc/irq/irq_service.h
@@ -69,6 +69,10 @@ struct irq_service {
const struct irq_service_funcs *funcs;
};
+const struct irq_source_info *find_irq_source_info(
+ struct irq_service *irq_service,
+ enum dc_irq_source source);
+
void dal_irq_service_construct(
struct irq_service *irq_service,
struct irq_service_init_data *init_data);
diff --git a/drivers/gpu/drm/amd/display/dc/os_types.h b/drivers/gpu/drm/amd/display/dc/os_types.h
index f50cae252de4..5df1d80c8341 100644
--- a/drivers/gpu/drm/amd/display/dc/os_types.h
+++ b/drivers/gpu/drm/amd/display/dc/os_types.h
@@ -31,10 +31,12 @@
#include <linux/kref.h>
#include <linux/types.h>
#include <linux/slab.h>
+#include <linux/delay.h>
#include <asm/byteorder.h>
#include <drm/drm_print.h>
+#include <drm/drm_dp_helper.h>
#include "cgs_common.h"
diff --git a/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c
index 1053b165c139..1e39aae6b1cf 100644
--- a/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/virtual/virtual_stream_encoder.c
@@ -69,9 +69,11 @@ static void virtual_stream_encoder_stop_dp_info_packets(
struct stream_encoder *enc) {}
static void virtual_stream_encoder_dp_blank(
+ struct dc_link *link,
struct stream_encoder *enc) {}
static void virtual_stream_encoder_dp_unblank(
+ struct dc_link *link,
struct stream_encoder *enc,
const struct encoder_unblank_param *param) {}
@@ -102,7 +104,8 @@ static void virtual_setup_stereo_sync(
static void virtual_stream_encoder_set_dsc_pps_info_packet(
struct stream_encoder *enc,
bool enable,
- uint8_t *dsc_packed_pps)
+ uint8_t *dsc_packed_pps,
+ bool immediate_update)
{}
static const struct stream_encoder_funcs virtual_str_enc_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
index caf961bb633f..717c0e572d2f 100644
--- a/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/dmub_srv.h
@@ -84,6 +84,7 @@ enum dmub_status {
DMUB_STATUS_QUEUE_FULL,
DMUB_STATUS_TIMEOUT,
DMUB_STATUS_INVALID,
+ DMUB_STATUS_HW_FAILURE,
};
/* enum dmub_asic - dmub asic identifier */
@@ -96,6 +97,7 @@ enum dmub_asic {
DMUB_ASIC_DCN302,
DMUB_ASIC_DCN303,
DMUB_ASIC_DCN31,
+ DMUB_ASIC_DCN31B,
DMUB_ASIC_MAX,
};
@@ -118,6 +120,7 @@ enum dmub_notification_type {
DMUB_NOTIFICATION_AUX_REPLY,
DMUB_NOTIFICATION_HPD,
DMUB_NOTIFICATION_HPD_IRQ,
+ DMUB_NOTIFICATION_SET_CONFIG_REPLY,
DMUB_NOTIFICATION_MAX
};
@@ -235,6 +238,8 @@ struct dmub_srv_hw_params {
bool load_inst_const;
bool skip_panel_power_sequence;
bool disable_z10;
+ bool dpia_supported;
+ bool disable_dpia;
};
/**
@@ -358,6 +363,8 @@ struct dmub_srv_hw_funcs {
uint32_t (*get_current_time)(struct dmub_srv *dmub);
void (*get_diagnostic_data)(struct dmub_srv *dmub, struct dmub_diagnostic_data *dmub_oca);
+
+ bool (*should_detect)(struct dmub_srv *dmub);
};
/**
@@ -437,6 +444,7 @@ struct dmub_notification {
union {
struct aux_reply_data aux_reply;
enum dp_hpd_status hpd_status;
+ enum set_config_status sc_status;
};
};
@@ -724,6 +732,8 @@ bool dmub_srv_get_outbox0_msg(struct dmub_srv *dmub, struct dmcub_trace_buf_entr
bool dmub_srv_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data);
+bool dmub_srv_should_detect(struct dmub_srv *dmub);
+
#if defined(__cplusplus)
}
#endif
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index 7efe9ba8706e..0293c58f0701 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -46,10 +46,10 @@
/* Firmware versioning. */
#ifdef DMUB_EXPOSE_VERSION
-#define DMUB_FW_VERSION_GIT_HASH 0x7383caadc
+#define DMUB_FW_VERSION_GIT_HASH 0x9525efb5
#define DMUB_FW_VERSION_MAJOR 0
#define DMUB_FW_VERSION_MINOR 0
-#define DMUB_FW_VERSION_REVISION 79
+#define DMUB_FW_VERSION_REVISION 90
#define DMUB_FW_VERSION_TEST 0
#define DMUB_FW_VERSION_VBIOS 0
#define DMUB_FW_VERSION_HOTFIX 0
@@ -368,10 +368,15 @@ union dmub_fw_boot_options {
uint32_t disable_clk_gate: 1; /**< 1 if clock gating should be disabled */
uint32_t skip_phy_init_panel_sequence: 1; /**< 1 to skip panel init seq */
uint32_t z10_disable: 1; /**< 1 to disable z10 */
- uint32_t reserved2: 1; /**< reserved for an unreleased feature */
- uint32_t reserved_unreleased1: 1; /**< reserved for an unreleased feature */
+ uint32_t enable_dpia: 1; /**< 1 if DPIA should be enabled */
uint32_t invalid_vbios_data: 1; /**< 1 if VBIOS data table is invalid */
- uint32_t reserved : 23; /**< reserved */
+ uint32_t dpia_supported: 1; /**< 1 if DPIA is supported on this platform */
+ uint32_t sel_mux_phy_c_d_phy_f_g: 1; /**< 1 if PHYF/PHYG should be enabled */
+ /**< 1 if all root clock gating is enabled and low power memory is enabled*/
+ uint32_t power_optimization: 1;
+ uint32_t diag_env: 1; /* 1 if diagnostic environment */
+
+ uint32_t reserved : 19; /**< reserved */
} bits; /**< boot bits */
uint32_t all; /**< 32-bit access to bits */
};
@@ -653,6 +658,10 @@ enum dmub_cmd_type {
*/
DMUB_CMD__PANEL_CNTL = 74,
/**
+ * Command type used for interfacing with DPIA.
+ */
+ DMUB_CMD__DPIA = 77,
+ /**
* Command type used for EDID CEA parsing
*/
DMUB_CMD__EDID_CEA = 79,
@@ -674,6 +683,21 @@ enum dmub_out_cmd_type {
* Command type used for DP AUX Reply data notification
*/
DMUB_OUT_CMD__DP_AUX_REPLY = 1,
+ /**
+ * Command type used for DP HPD event notification
+ */
+ DMUB_OUT_CMD__DP_HPD_NOTIFY = 2,
+ /**
+ * Command type used for SET_CONFIG Reply notification
+ */
+ DMUB_OUT_CMD__SET_CONFIG_REPLY = 3,
+};
+
+/* DMUB_CMD__DPIA command sub-types. */
+enum dmub_cmd_dpia_type {
+ DMUB_CMD__DPIA_DIG1_DPIA_CONTROL = 0,
+ DMUB_CMD__DPIA_SET_CONFIG_ACCESS = 1,
+ DMUB_CMD__DPIA_MST_ALLOC_SLOTS = 2,
};
#pragma pack(push, 1)
@@ -973,7 +997,7 @@ struct dmub_dig_transmitter_control_data_v1_7 {
uint8_t hpdsel; /**< =1: HPD1, =2: HPD2, ..., =6: HPD6, =0: HPD is not assigned */
uint8_t digfe_sel; /**< DIG front-end selection, bit0 means DIG0 FE is enabled */
uint8_t connobj_id; /**< Connector Object Id defined in ObjectId.h */
- uint8_t reserved0; /**< For future use */
+ uint8_t HPO_instance; /**< HPO instance (0: inst0, 1: inst1) */
uint8_t reserved1; /**< For future use */
uint8_t reserved2[3]; /**< For future use */
uint32_t reserved3[11]; /**< For future use */
@@ -996,6 +1020,77 @@ struct dmub_rb_cmd_dig1_transmitter_control {
};
/**
+ * DPIA tunnel command parameters.
+ */
+struct dmub_cmd_dig_dpia_control_data {
+ uint8_t enc_id; /** 0 = ENGINE_ID_DIGA, ... */
+ uint8_t action; /** ATOM_TRANSMITER_ACTION_DISABLE/ENABLE/SETUP_VSEMPH */
+ union {
+ uint8_t digmode; /** enum atom_encode_mode_def */
+ uint8_t dplaneset; /** DP voltage swing and pre-emphasis value */
+ } mode_laneset;
+ uint8_t lanenum; /** Lane number 1, 2, 4, 8 */
+ uint32_t symclk_10khz; /** Symbol Clock in 10Khz */
+ uint8_t hpdsel; /** =0: HPD is not assigned */
+ uint8_t digfe_sel; /** DIG stream( front-end ) selection, bit0 - DIG0 FE */
+ uint8_t dpia_id; /** Index of DPIA */
+ uint8_t fec_rdy : 1;
+ uint8_t reserved : 7;
+ uint32_t reserved1;
+};
+
+/**
+ * DMUB command for DPIA tunnel control.
+ */
+struct dmub_rb_cmd_dig1_dpia_control {
+ struct dmub_cmd_header header;
+ struct dmub_cmd_dig_dpia_control_data dpia_control;
+};
+
+/**
+ * SET_CONFIG Command Payload
+ */
+struct set_config_cmd_payload {
+ uint8_t msg_type; /* set config message type */
+ uint8_t msg_data; /* set config message data */
+};
+
+/**
+ * Data passed from driver to FW in a DMUB_CMD__DPIA_SET_CONFIG_ACCESS command.
+ */
+struct dmub_cmd_set_config_control_data {
+ struct set_config_cmd_payload cmd_pkt;
+ uint8_t instance; /* DPIA instance */
+ uint8_t immed_status; /* Immediate status returned in case of error */
+};
+
+/**
+ * DMUB command structure for SET_CONFIG command.
+ */
+struct dmub_rb_cmd_set_config_access {
+ struct dmub_cmd_header header; /* header */
+ struct dmub_cmd_set_config_control_data set_config_control; /* set config data */
+};
+
+/**
+ * Data passed from driver to FW in a DMUB_CMD__DPIA_MST_ALLOC_SLOTS command.
+ */
+struct dmub_cmd_mst_alloc_slots_control_data {
+ uint8_t mst_alloc_slots; /* mst slots to be allotted */
+ uint8_t instance; /* DPIA instance */
+ uint8_t immed_status; /* Immediate status returned as there is no outbox msg posted */
+ uint8_t mst_slots_in_use; /* returns slots in use for error cases */
+};
+
+/**
+ * DMUB command structure for SET_ command.
+ */
+struct dmub_rb_cmd_set_mst_alloc_slots {
+ struct dmub_cmd_header header; /* header */
+ struct dmub_cmd_mst_alloc_slots_control_data mst_slots_control; /* mst slots control */
+};
+
+/**
* struct dmub_rb_cmd_dpphy_init - DPPHY init.
*/
struct dmub_rb_cmd_dpphy_init {
@@ -1242,6 +1337,33 @@ struct dmub_rb_cmd_dp_hpd_notify {
struct dp_hpd_data hpd_data;
};
+/**
+ * Definition of a SET_CONFIG reply from DPOA.
+ */
+enum set_config_status {
+ SET_CONFIG_PENDING = 0,
+ SET_CONFIG_ACK_RECEIVED,
+ SET_CONFIG_RX_TIMEOUT,
+ SET_CONFIG_UNKNOWN_ERROR,
+};
+
+/**
+ * Definition of a set_config reply
+ */
+struct set_config_reply_control_data {
+ uint8_t instance; /* DPIA Instance */
+ uint8_t status; /* Set Config reply */
+ uint16_t pad; /* Alignment */
+};
+
+/**
+ * Definition of a DMUB_OUT_CMD__SET_CONFIG_REPLY command.
+ */
+struct dmub_rb_cmd_dp_set_config_reply {
+ struct dmub_cmd_header header;
+ struct set_config_reply_control_data set_config_reply_control;
+};
+
/*
* Command IDs should be treated as stable ABI.
* Do not reuse or modify IDs.
@@ -1280,6 +1402,10 @@ enum dmub_cmd_psr_type {
* Forces PSR enabled until an explicit PSR disable call.
*/
DMUB_CMD__PSR_FORCE_STATIC = 5,
+ /**
+ * Set PSR power option
+ */
+ DMUB_CMD__SET_PSR_POWER_OPT = 7,
};
/**
@@ -1578,6 +1704,44 @@ struct dmub_rb_cmd_psr_force_static {
};
/**
+ * Data passed from driver to FW in a DMUB_CMD__SET_PSR_POWER_OPT command.
+ */
+struct dmub_cmd_psr_set_power_opt_data {
+ /**
+ * PSR control version.
+ */
+ uint8_t cmd_version;
+ /**
+ * Panel Instance.
+ * Panel isntance to identify which psr_state to use
+ * Currently the support is only for 0 or 1
+ */
+ uint8_t panel_inst;
+ /**
+ * Explicit padding to 4 byte boundary.
+ */
+ uint8_t pad[2];
+ /**
+ * PSR power option
+ */
+ uint32_t power_opt;
+};
+
+/**
+ * Definition of a DMUB_CMD__SET_PSR_POWER_OPT command.
+ */
+struct dmub_rb_cmd_psr_set_power_opt {
+ /**
+ * Command header.
+ */
+ struct dmub_cmd_header header;
+ /**
+ * Definition of a DMUB_CMD__SET_PSR_POWER_OPT command.
+ */
+ struct dmub_cmd_psr_set_power_opt_data psr_set_power_opt_data;
+};
+
+/**
* Set of HW components that can be locked.
*
* Note: If updating with more HW components, fields
@@ -1730,6 +1894,11 @@ enum dmub_cmd_abm_type {
* Enable/disable fractional duty cycle for backlight PWM.
*/
DMUB_CMD__ABM_SET_PWM_FRAC = 5,
+
+ /**
+ * unregister vertical interrupt after steady state is reached
+ */
+ DMUB_CMD__ABM_PAUSE = 6,
};
/**
@@ -2086,6 +2255,50 @@ struct dmub_rb_cmd_abm_init_config {
};
/**
+ * Data passed from driver to FW in a DMUB_CMD__ABM_PAUSE command.
+ */
+
+struct dmub_cmd_abm_pause_data {
+
+ /**
+ * Panel Control HW instance mask.
+ * Bit 0 is Panel Control HW instance 0.
+ * Bit 1 is Panel Control HW instance 1.
+ */
+ uint8_t panel_mask;
+
+ /**
+ * OTG hw instance
+ */
+ uint8_t otg_inst;
+
+ /**
+ * Enable or disable ABM pause
+ */
+ uint8_t enable;
+
+ /**
+ * Explicit padding to 4 byte boundary.
+ */
+ uint8_t pad[1];
+};
+
+/**
+ * Definition of a DMUB_CMD__ABM_PAUSE command.
+ */
+struct dmub_rb_cmd_abm_pause {
+ /**
+ * Command header.
+ */
+ struct dmub_cmd_header header;
+
+ /**
+ * Data passed from driver to FW in a DMUB_CMD__ABM_PAUSE command.
+ */
+ struct dmub_cmd_abm_pause_data abm_pause_data;
+};
+
+/**
* Data passed from driver to FW in a DMUB_CMD__QUERY_FEATURE_CAPS command.
*/
struct dmub_cmd_query_feature_caps_data {
@@ -2312,6 +2525,10 @@ union dmub_rb_cmd {
*/
struct dmub_rb_cmd_psr_force_static psr_force_static;
/**
+ * Definition of a DMUB_CMD__SET_PSR_POWER_OPT command.
+ */
+ struct dmub_rb_cmd_psr_set_power_opt psr_set_power_opt;
+ /**
* Definition of a DMUB_CMD__PLAT_54186_WA command.
*/
struct dmub_rb_cmd_PLAT_54186_wa PLAT_54186_wa;
@@ -2364,6 +2581,11 @@ union dmub_rb_cmd {
struct dmub_rb_cmd_abm_init_config abm_init_config;
/**
+ * Definition of a DMUB_CMD__ABM_PAUSE command.
+ */
+ struct dmub_rb_cmd_abm_pause abm_pause;
+
+ /**
* Definition of a DMUB_CMD__DP_AUX_ACCESS command.
*/
struct dmub_rb_cmd_dp_aux_access dp_aux_access;
@@ -2383,6 +2605,18 @@ union dmub_rb_cmd {
*/
struct dmub_rb_cmd_lvtma_control lvtma_control;
/**
+ * Definition of a DMUB_CMD__DPIA_DIG1_CONTROL command.
+ */
+ struct dmub_rb_cmd_dig1_dpia_control dig1_dpia_control;
+ /**
+ * Definition of a DMUB_CMD__DPIA_SET_CONFIG_ACCESS command.
+ */
+ struct dmub_rb_cmd_set_config_access set_config_access;
+ /**
+ * Definition of a DMUB_CMD__DPIA_MST_ALLOC_SLOTS command.
+ */
+ struct dmub_rb_cmd_set_mst_alloc_slots set_mst_alloc_slots;
+ /**
* Definition of a DMUB_CMD__EDID_CEA command.
*/
struct dmub_rb_cmd_edid_cea edid_cea;
@@ -2404,6 +2638,10 @@ union dmub_rb_out_cmd {
* HPD notify command.
*/
struct dmub_rb_cmd_dp_hpd_notify dp_hpd_notify;
+ /**
+ * SET_CONFIG reply command.
+ */
+ struct dmub_rb_cmd_dp_set_config_reply set_config_reply;
};
#pragma pack(pop)
@@ -2484,14 +2722,16 @@ static inline bool dmub_rb_full(struct dmub_rb *rb)
static inline bool dmub_rb_push_front(struct dmub_rb *rb,
const union dmub_rb_cmd *cmd)
{
- uint8_t *dst = (uint8_t *)(rb->base_address) + rb->wrpt;
- const uint8_t *src = (const uint8_t *)cmd;
+ uint64_t volatile *dst = (uint64_t volatile *)(rb->base_address) + rb->wrpt / sizeof(uint64_t);
+ const uint64_t *src = (const uint64_t *)cmd;
+ uint8_t i;
if (dmub_rb_full(rb))
return false;
// copying data
- dmub_memcpy(dst, src, DMUB_RB_CMD_SIZE);
+ for (i = 0; i < DMUB_RB_CMD_SIZE / sizeof(uint64_t); i++)
+ *dst++ = *src++;
rb->wrpt += DMUB_RB_CMD_SIZE;
@@ -2600,14 +2840,16 @@ static inline bool dmub_rb_peek_offset(struct dmub_rb *rb,
static inline bool dmub_rb_out_front(struct dmub_rb *rb,
union dmub_rb_out_cmd *cmd)
{
- const uint8_t *src = (const uint8_t *)(rb->base_address) + rb->rptr;
- uint8_t *dst = (uint8_t *)cmd;
+ const uint64_t volatile *src = (const uint64_t volatile *)(rb->base_address) + rb->rptr / sizeof(uint64_t);
+ uint64_t *dst = (uint64_t *)cmd;
+ uint8_t i;
if (dmub_rb_empty(rb))
return false;
// copying data
- dmub_memcpy(dst, src, DMUB_RB_CMD_SIZE);
+ for (i = 0; i < DMUB_RB_CMD_SIZE / sizeof(uint64_t); i++)
+ *dst++ = *src++;
return true;
}
@@ -2642,14 +2884,17 @@ static inline bool dmub_rb_pop_front(struct dmub_rb *rb)
*/
static inline void dmub_rb_flush_pending(const struct dmub_rb *rb)
{
- uint8_t buf[DMUB_RB_CMD_SIZE];
uint32_t rptr = rb->rptr;
uint32_t wptr = rb->wrpt;
while (rptr != wptr) {
- const uint8_t *data = (const uint8_t *)rb->base_address + rptr;
+ uint64_t volatile *data = (uint64_t volatile *)rb->base_address + rptr / sizeof(uint64_t);
+ //uint64_t volatile *p = (uint64_t volatile *)data;
+ uint64_t temp;
+ uint8_t i;
- dmub_memcpy(buf, data, DMUB_RB_CMD_SIZE);
+ for (i = 0; i < DMUB_RB_CMD_SIZE / sizeof(uint64_t); i++)
+ temp = *data++;
rptr += DMUB_RB_CMD_SIZE;
if (rptr >= rb->capacity)
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
index fc667cb17eb0..10ebf20eaa41 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c
@@ -338,6 +338,10 @@ void dmub_dcn31_enable_dmub_boot_options(struct dmub_srv *dmub, const struct dmu
union dmub_fw_boot_options boot_options = {0};
boot_options.bits.z10_disable = params->disable_z10;
+ boot_options.bits.dpia_supported = params->dpia_supported;
+ boot_options.bits.enable_dpia = params->disable_dpia ? 0 : 1;
+
+ boot_options.bits.sel_mux_phy_c_d_phy_f_g = (dmub->asic == DMUB_ASIC_DCN31B) ? 1 : 0;
REG_WRITE(DMCUB_SCRATCH14, boot_options.all);
}
@@ -432,3 +436,11 @@ void dmub_dcn31_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnosti
REG_GET(DMCUB_REGION3_CW6_TOP_ADDRESS, DMCUB_REGION3_CW6_ENABLE, &is_cw6_enabled);
diag_data->is_cw6_enabled = is_cw6_enabled;
}
+
+bool dmub_dcn31_should_detect(struct dmub_srv *dmub)
+{
+ uint32_t fw_boot_status = REG_READ(DMCUB_SCRATCH0);
+ bool should_detect = fw_boot_status & DMUB_FW_BOOT_STATUS_BIT_DETECTION_REQUIRED;
+ return should_detect;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h
index bb62605d2ac8..59ddc81b5a0e 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h
@@ -245,4 +245,6 @@ uint32_t dmub_dcn31_get_current_time(struct dmub_srv *dmub);
void dmub_dcn31_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_data *diag_data);
+bool dmub_dcn31_should_detect(struct dmub_srv *dmub);
+
#endif /* _DMUB_DCN31_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
index 75a91cfaf036..56d400ffa7ac 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
@@ -208,6 +208,7 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
break;
case DMUB_ASIC_DCN31:
+ case DMUB_ASIC_DCN31B:
dmub->regs_dcn31 = &dmub_srv_dcn31_regs;
funcs->reset = dmub_dcn31_reset;
funcs->reset_release = dmub_dcn31_reset_release;
@@ -234,7 +235,7 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
funcs->set_outbox0_rptr = dmub_dcn31_set_outbox0_rptr;
funcs->get_diagnostic_data = dmub_dcn31_get_diagnostic_data;
-
+ funcs->should_detect = dmub_dcn31_should_detect;
funcs->get_current_time = dmub_dcn31_get_current_time;
break;
@@ -655,13 +656,19 @@ enum dmub_status dmub_srv_wait_for_phy_init(struct dmub_srv *dmub,
enum dmub_status dmub_srv_wait_for_idle(struct dmub_srv *dmub,
uint32_t timeout_us)
{
- uint32_t i;
+ uint32_t i, rptr;
if (!dmub->hw_init)
return DMUB_STATUS_INVALID;
for (i = 0; i <= timeout_us; ++i) {
- dmub->inbox1_rb.rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
+ rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
+
+ if (rptr > dmub->inbox1_rb.capacity)
+ return DMUB_STATUS_HW_FAILURE;
+
+ dmub->inbox1_rb.rptr = rptr;
+
if (dmub_rb_empty(&dmub->inbox1_rb))
return DMUB_STATUS_OK;
@@ -816,3 +823,11 @@ bool dmub_srv_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnostic_
dmub->hw_funcs.get_diagnostic_data(dmub, diag_data);
return true;
}
+
+bool dmub_srv_should_detect(struct dmub_srv *dmub)
+{
+ if (!dmub->hw_init || !dmub->hw_funcs.should_detect)
+ return false;
+
+ return dmub->hw_funcs.should_detect(dmub);
+}
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c
index 70766d534c9c..44502ec919a2 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv_stat.c
@@ -76,6 +76,22 @@ enum dmub_status dmub_srv_stat_get_notification(struct dmub_srv *dmub,
dmub_memcpy((void *)&notify->aux_reply,
(void *)&cmd.dp_aux_reply.reply_data, sizeof(struct aux_reply_data));
break;
+ case DMUB_OUT_CMD__DP_HPD_NOTIFY:
+ if (cmd.dp_hpd_notify.hpd_data.hpd_type == DP_HPD) {
+ notify->type = DMUB_NOTIFICATION_HPD;
+ notify->hpd_status = cmd.dp_hpd_notify.hpd_data.hpd_status;
+ } else {
+ notify->type = DMUB_NOTIFICATION_HPD_IRQ;
+ }
+
+ notify->link_index = cmd.dp_hpd_notify.hpd_data.instance;
+ notify->result = AUX_RET_SUCCESS;
+ break;
+ case DMUB_OUT_CMD__SET_CONFIG_REPLY:
+ notify->type = DMUB_NOTIFICATION_SET_CONFIG_REPLY;
+ notify->link_index = cmd.set_config_reply.set_config_reply_control.instance;
+ notify->sc_status = cmd.set_config_reply.set_config_reply_control.status;
+ break;
default:
notify->type = DMUB_NOTIFICATION_NO_DATA;
break;
diff --git a/drivers/gpu/drm/amd/display/include/bios_parser_types.h b/drivers/gpu/drm/amd/display/include/bios_parser_types.h
index 76a87b682883..b8ffb216ebc4 100644
--- a/drivers/gpu/drm/amd/display/include/bios_parser_types.h
+++ b/drivers/gpu/drm/amd/display/include/bios_parser_types.h
@@ -152,6 +152,10 @@ struct bp_transmitter_control {
enum signal_type signal;
enum dc_color_depth color_depth; /* not used for DCE6.0 */
enum hpd_source_id hpd_sel; /* ucHPDSel, used for DCe6.0 */
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ enum tx_ffe_id txffe_sel; /* used for DCN3 */
+ enum engine_id hpo_engine_id; /* used for DCN3 */
+#endif
struct graphics_object_id connector_obj_id;
/* symClock; in 10kHz, pixel clock, in HDMI deep color mode, it should
* be pixel clock * deep_color_ratio (in KHz)
@@ -319,6 +323,10 @@ struct bp_encoder_cap_info {
uint32_t DP_HBR2_EN:1;
uint32_t DP_HBR3_EN:1;
uint32_t HDMI_6GB_EN:1;
+ uint32_t IS_DP2_CAPABLE:1;
+ uint32_t DP_UHBR10_EN:1;
+ uint32_t DP_UHBR13_5_EN:1;
+ uint32_t DP_UHBR20_EN:1;
uint32_t DP_IS_USB_C:1;
uint32_t RESERVED:27;
};
diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
index 5adc471bef57..e4a2dfacab4c 100644
--- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h
+++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
@@ -211,6 +211,7 @@ enum {
#ifndef ASICREV_IS_GREEN_SARDINE
#define ASICREV_IS_GREEN_SARDINE(eChipRev) ((eChipRev >= GREEN_SARDINE_A0) && (eChipRev < 0xFF))
#endif
+#define DEVICE_ID_NV_13FE 0x13FE // CYAN_SKILLFISH
#define FAMILY_VGH 144
#define DEVICE_ID_VGH_163F 0x163F
#define VANGOGH_A0 0x01
@@ -227,7 +228,7 @@ enum {
#define FAMILY_YELLOW_CARP 146
#define YELLOW_CARP_A0 0x01
-#define YELLOW_CARP_B0 0x1A
+#define YELLOW_CARP_B0 0x20
#define YELLOW_CARP_UNKNOWN 0xFF
#ifndef ASICREV_IS_YELLOW_CARP
diff --git a/drivers/gpu/drm/amd/display/include/dal_types.h b/drivers/gpu/drm/amd/display/include/dal_types.h
index fe75ec834892..012b7c61798c 100644
--- a/drivers/gpu/drm/amd/display/include/dal_types.h
+++ b/drivers/gpu/drm/amd/display/include/dal_types.h
@@ -50,6 +50,7 @@ enum dce_version {
DCN_VERSION_1_0,
DCN_VERSION_1_01,
DCN_VERSION_2_0,
+ DCN_VERSION_2_01,
DCN_VERSION_2_1,
DCN_VERSION_3_0,
DCN_VERSION_3_01,
diff --git a/drivers/gpu/drm/amd/display/include/dpcd_defs.h b/drivers/gpu/drm/amd/display/include/dpcd_defs.h
index aec7389aff37..ffd0df1701e6 100644
--- a/drivers/gpu/drm/amd/display/include/dpcd_defs.h
+++ b/drivers/gpu/drm/amd/display/include/dpcd_defs.h
@@ -80,6 +80,15 @@ enum dpcd_phy_test_patterns {
PHY_TEST_PATTERN_CP2520_1,
PHY_TEST_PATTERN_CP2520_2,
PHY_TEST_PATTERN_CP2520_3, /* same as TPS4 */
+ PHY_TEST_PATTERN_128b_132b_TPS1 = 0x8,
+ PHY_TEST_PATTERN_128b_132b_TPS2 = 0x10,
+ PHY_TEST_PATTERN_PRBS9 = 0x18,
+ PHY_TEST_PATTERN_PRBS11 = 0x20,
+ PHY_TEST_PATTERN_PRBS15 = 0x28,
+ PHY_TEST_PATTERN_PRBS23 = 0x30,
+ PHY_TEST_PATTERN_PRBS31 = 0x38,
+ PHY_TEST_PATTERN_264BIT_CUSTOM = 0x40,
+ PHY_TEST_PATTERN_SQUARE_PULSE = 0x48,
};
enum dpcd_test_dyn_range {
@@ -135,7 +144,14 @@ enum dpcd_training_patterns {
DPCD_TRAINING_PATTERN_1,
DPCD_TRAINING_PATTERN_2,
DPCD_TRAINING_PATTERN_3,
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ DPCD_TRAINING_PATTERN_4 = 7,
+ DPCD_128b_132b_TPS1 = 1,
+ DPCD_128b_132b_TPS2 = 2,
+ DPCD_128b_132b_TPS2_CDS = 3,
+#else
DPCD_TRAINING_PATTERN_4 = 7
+#endif
};
/* This enum is for use with PsrSinkPsrStatus.bits.sinkSelfRefreshStatus
@@ -149,6 +165,7 @@ enum dpcd_psr_sink_states {
PSR_SINK_STATE_SINK_INTERNAL_ERROR = 7,
};
+#define DP_SOURCE_SEQUENCE 0x30c
#define DP_SOURCE_TABLE_REVISION 0x310
#define DP_SOURCE_PAYLOAD_SIZE 0x311
#define DP_SOURCE_SINK_CAP 0x317
diff --git a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
index 792652236c61..dd974c428d23 100644
--- a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
+++ b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
@@ -328,6 +328,7 @@ struct integrated_info {
uint8_t gu_id[NUMBER_OF_UCHAR_FOR_GUID];
uint8_t checksum;
+ uint8_t fixdpvoltageswing;
} ext_disp_conn_info; /* exiting long long time */
struct available_s_clk_list {
diff --git a/drivers/gpu/drm/amd/display/include/grph_object_defs.h b/drivers/gpu/drm/amd/display/include/grph_object_defs.h
index 58bb42ed85ca..84b299ff500a 100644
--- a/drivers/gpu/drm/amd/display/include/grph_object_defs.h
+++ b/drivers/gpu/drm/amd/display/include/grph_object_defs.h
@@ -140,6 +140,18 @@ enum sync_source {
SYNC_SOURCE_DUAL_GPU_PIN
};
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+enum tx_ffe_id {
+ TX_FFE0 = 0,
+ TX_FFE1,
+ TX_FFE2,
+ TX_FFE3,
+ TX_FFE_DeEmphasis_Only,
+ TX_FFE_PreShoot_Only,
+ TX_FFE_No_FFE,
+};
+#endif
+
/* connector sizes in millimeters - from BiosParserTypes.hpp */
#define CONNECTOR_SIZE_DVI 40
#define CONNECTOR_SIZE_VGA 32
diff --git a/drivers/gpu/drm/amd/display/include/grph_object_id.h b/drivers/gpu/drm/amd/display/include/grph_object_id.h
index 33b3d755fe65..01775417cf4b 100644
--- a/drivers/gpu/drm/amd/display/include/grph_object_id.h
+++ b/drivers/gpu/drm/amd/display/include/grph_object_id.h
@@ -184,6 +184,14 @@ enum engine_id {
ENGINE_ID_DACA,
ENGINE_ID_DACB,
ENGINE_ID_VCE, /* wireless display pseudo-encoder */
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ ENGINE_ID_HPO_0,
+ ENGINE_ID_HPO_1,
+ ENGINE_ID_HPO_DP_0,
+ ENGINE_ID_HPO_DP_1,
+ ENGINE_ID_HPO_DP_2,
+ ENGINE_ID_HPO_DP_3,
+#endif
ENGINE_ID_VIRTUAL,
ENGINE_ID_COUNT,
diff --git a/drivers/gpu/drm/amd/display/include/i2caux_interface.h b/drivers/gpu/drm/amd/display/include/i2caux_interface.h
index c7fbb9c3ad6b..418fbf8c5c3a 100644
--- a/drivers/gpu/drm/amd/display/include/i2caux_interface.h
+++ b/drivers/gpu/drm/amd/display/include/i2caux_interface.h
@@ -41,6 +41,8 @@ struct aux_payload {
* reset it to read data */
bool write;
bool mot;
+ bool write_status_update;
+
uint32_t address;
uint32_t length;
uint8_t *data;
@@ -53,6 +55,7 @@ struct aux_payload {
* zero means "use default value"
*/
uint32_t defer_delay;
+
};
struct aux_command {
diff --git a/drivers/gpu/drm/amd/display/include/link_service_types.h b/drivers/gpu/drm/amd/display/include/link_service_types.h
index 32f5274ed34e..424bccd36434 100644
--- a/drivers/gpu/drm/amd/display/include/link_service_types.h
+++ b/drivers/gpu/drm/amd/display/include/link_service_types.h
@@ -53,7 +53,11 @@ enum edp_revision {
};
enum {
- LINK_RATE_REF_FREQ_IN_KHZ = 27000 /*27MHz*/
+ LINK_RATE_REF_FREQ_IN_KHZ = 27000, /*27MHz*/
+ BITS_PER_DP_BYTE = 10,
+ DATA_EFFICIENCY_8b_10b_x10000 = 8000, /* 80% data efficiency */
+ DATA_EFFICIENCY_8b_10b_FEC_EFFICIENCY_x100 = 97, /* 97% data efficiency when FEC is enabled */
+ DATA_EFFICIENCY_128b_132b_x10000 = 9646, /* 96.71% data efficiency x 99.75% downspread factor */
};
enum link_training_result {
@@ -70,6 +74,12 @@ enum link_training_result {
LINK_TRAINING_LINK_LOSS,
/* Abort link training (because sink unplugged) */
LINK_TRAINING_ABORT,
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ DP_128b_132b_LT_FAILED,
+ DP_128b_132b_MAX_LOOP_COUNT_REACHED,
+ DP_128b_132b_CHANNEL_EQ_DONE_TIMEOUT,
+ DP_128b_132b_CDS_DONE_TIMEOUT,
+#endif
};
enum lttpr_mode {
@@ -80,21 +90,58 @@ enum lttpr_mode {
struct link_training_settings {
struct dc_link_settings link_settings;
- struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX];
+ /* TODO: turn lane settings below into mandatory fields
+ * as initial lane configuration
+ */
+ struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX];
enum dc_voltage_swing *voltage_swing;
enum dc_pre_emphasis *pre_emphasis;
enum dc_post_cursor2 *post_cursor2;
bool should_set_fec_ready;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ /* TODO - factor lane_settings out because it changes during LT */
+ union dc_dp_ffe_preset *ffe_preset;
+#endif
uint16_t cr_pattern_time;
uint16_t eq_pattern_time;
+ uint16_t cds_pattern_time;
enum dc_dp_training_pattern pattern_for_cr;
enum dc_dp_training_pattern pattern_for_eq;
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ enum dc_dp_training_pattern pattern_for_cds;
+
+ uint32_t eq_wait_time_limit;
+ uint8_t eq_loop_count_limit;
+ uint32_t cds_wait_time_limit;
+#endif
bool enhanced_framing;
- bool allow_invalid_msa_timing_param;
enum lttpr_mode lttpr_mode;
+
+ /* disallow different lanes to have different lane settings */
+ bool disallow_per_lane_settings;
+ /* dpcd lane settings will always use the same hw lane settings
+ * even if it doesn't match requested lane adjust */
+ bool always_match_dpcd_with_hw_lane_settings;
+
+ /*****************************************************************
+ * training states - parameters that can change in link training
+ *****************************************************************/
+ /* TODO: Move hw_lane_settings and dpcd_lane_settings
+ * along with lane adjust, lane align, offset and all
+ * other training states into a new structure called
+ * training states, so link_training_settings becomes
+ * a constant input pre-decided prior to link training.
+ *
+ * The goal is to strictly decouple link training settings
+ * decision making process from link training states to
+ * prevent it from messy code practice of changing training
+ * decision on the fly.
+ */
+ struct dc_lane_settings hw_lane_settings[LANE_COUNT_DP_MAX];
+ union dpcd_training_lane dpcd_lane_settings[LANE_COUNT_DP_MAX];
};
/*TODO: Move this enum test harness*/
@@ -114,13 +161,30 @@ enum dp_test_pattern {
DP_TEST_PATTERN_CP2520_2,
DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE = DP_TEST_PATTERN_CP2520_2,
DP_TEST_PATTERN_CP2520_3,
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ DP_TEST_PATTERN_128b_132b_TPS1,
+ DP_TEST_PATTERN_128b_132b_TPS2,
+ DP_TEST_PATTERN_PRBS9,
+ DP_TEST_PATTERN_PRBS11,
+ DP_TEST_PATTERN_PRBS15,
+ DP_TEST_PATTERN_PRBS23,
+ DP_TEST_PATTERN_PRBS31,
+ DP_TEST_PATTERN_264BIT_CUSTOM,
+ DP_TEST_PATTERN_SQUARE_PULSE,
+#endif
/* Link Training Patterns */
DP_TEST_PATTERN_TRAINING_PATTERN1,
DP_TEST_PATTERN_TRAINING_PATTERN2,
DP_TEST_PATTERN_TRAINING_PATTERN3,
DP_TEST_PATTERN_TRAINING_PATTERN4,
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ DP_TEST_PATTERN_128b_132b_TPS1_TRAINING_MODE,
+ DP_TEST_PATTERN_128b_132b_TPS2_TRAINING_MODE,
+ DP_TEST_PATTERN_PHY_PATTERN_END = DP_TEST_PATTERN_128b_132b_TPS2_TRAINING_MODE,
+#else
DP_TEST_PATTERN_PHY_PATTERN_END = DP_TEST_PATTERN_TRAINING_PATTERN4,
+#endif
/* link test patterns*/
DP_TEST_PATTERN_COLOR_SQUARES,
@@ -152,6 +216,22 @@ enum dp_panel_mode {
DP_PANEL_MODE_SPECIAL
};
+enum dpcd_source_sequence {
+ DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_OTG = 1, /*done in apply_single_controller_ctx_to_hw */
+ DPCD_SOURCE_SEQ_AFTER_DP_STREAM_ATTR, /*done in core_link_enable_stream */
+ DPCD_SOURCE_SEQ_AFTER_UPDATE_INFO_FRAME, /*done in core_link_enable_stream/dcn20_enable_stream */
+ DPCD_SOURCE_SEQ_AFTER_CONNECT_DIG_FE_BE, /*done in perform_link_training_with_retries/dcn20_enable_stream */
+ DPCD_SOURCE_SEQ_AFTER_ENABLE_LINK_PHY, /*done in dp_enable_link_phy */
+ DPCD_SOURCE_SEQ_AFTER_SET_SOURCE_PATTERN, /*done in dp_set_hw_test_pattern */
+ DPCD_SOURCE_SEQ_AFTER_ENABLE_AUDIO_STREAM, /*done in dce110_enable_audio_stream */
+ DPCD_SOURCE_SEQ_AFTER_ENABLE_DP_VID_STREAM, /*done in enc1_stream_encoder_dp_unblank */
+ DPCD_SOURCE_SEQ_AFTER_DISABLE_DP_VID_STREAM, /*done in enc1_stream_encoder_dp_blank */
+ DPCD_SOURCE_SEQ_AFTER_FIFO_STEER_RESET, /*done in enc1_stream_encoder_dp_blank */
+ DPCD_SOURCE_SEQ_AFTER_DISABLE_AUDIO_STREAM, /*done in dce110_disable_audio_stream */
+ DPCD_SOURCE_SEQ_AFTER_DISABLE_LINK_PHY, /*done in dp_disable_link_phy */
+ DPCD_SOURCE_SEQ_AFTER_DISCONNECT_DIG_FE_BE, /*done in dce110_disable_stream */
+};
+
/* DPCD_ADDR_TRAINING_LANEx_SET registers value */
union dpcd_training_lane_set {
struct {
diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h
index 571fcf23cea9..370fad883e33 100644
--- a/drivers/gpu/drm/amd/display/include/logger_types.h
+++ b/drivers/gpu/drm/amd/display/include/logger_types.h
@@ -72,6 +72,9 @@
#define DC_LOG_DSC(...) DRM_DEBUG_KMS(__VA_ARGS__)
#define DC_LOG_SMU(...) pr_debug("[SMU_MSG]:"__VA_ARGS__)
#define DC_LOG_DWB(...) DRM_DEBUG_KMS(__VA_ARGS__)
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+#define DC_LOG_DP2(...) DRM_DEBUG_KMS(__VA_ARGS__)
+#endif
struct dal_logger;
@@ -123,6 +126,9 @@ enum dc_log_type {
LOG_MAX_HW_POINTS,
LOG_ALL_TF_CHANNELS,
LOG_SAMPLE_1DLUT,
+#if defined(CONFIG_DRM_AMD_DC_DCN)
+ LOG_DP2,
+#endif
LOG_SECTION_TOTAL_COUNT
};
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index ef742d95ef05..64a38f08f497 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -54,12 +54,17 @@ static struct hw_x_point coordinates_x[MAX_HW_POINTS + 2];
* just multiply with 2^gamma which can be computed once, and save the result so we
* recursively compute all the values.
*/
- /*sRGB 709 2.2 2.4 P3*/
-static const int32_t gamma_numerator01[] = { 31308, 180000, 0, 0, 0};
-static const int32_t gamma_numerator02[] = { 12920, 4500, 0, 0, 0};
-static const int32_t gamma_numerator03[] = { 55, 99, 0, 0, 0};
-static const int32_t gamma_numerator04[] = { 55, 99, 0, 0, 0};
-static const int32_t gamma_numerator05[] = { 2400, 2200, 2200, 2400, 2600};
+
+/*
+ * Regamma coefficients are used for both regamma and degamma. Degamma
+ * coefficients are calculated in our formula using the regamma coefficients.
+ */
+ /*sRGB 709 2.2 2.4 P3*/
+static const int32_t numerator01[] = { 31308, 180000, 0, 0, 0};
+static const int32_t numerator02[] = { 12920, 4500, 0, 0, 0};
+static const int32_t numerator03[] = { 55, 99, 0, 0, 0};
+static const int32_t numerator04[] = { 55, 99, 0, 0, 0};
+static const int32_t numerator05[] = { 2400, 2200, 2200, 2400, 2600};
/* one-time setup of X points */
void setup_x_points_distribution(void)
@@ -288,7 +293,8 @@ struct dividers {
};
-static bool build_coefficients(struct gamma_coefficients *coefficients, enum dc_transfer_func_predefined type)
+static bool build_coefficients(struct gamma_coefficients *coefficients,
+ enum dc_transfer_func_predefined type)
{
uint32_t i = 0;
@@ -312,15 +318,15 @@ static bool build_coefficients(struct gamma_coefficients *coefficients, enum dc_
do {
coefficients->a0[i] = dc_fixpt_from_fraction(
- gamma_numerator01[index], 10000000);
+ numerator01[index], 10000000);
coefficients->a1[i] = dc_fixpt_from_fraction(
- gamma_numerator02[index], 1000);
+ numerator02[index], 1000);
coefficients->a2[i] = dc_fixpt_from_fraction(
- gamma_numerator03[index], 1000);
+ numerator03[index], 1000);
coefficients->a3[i] = dc_fixpt_from_fraction(
- gamma_numerator04[index], 1000);
+ numerator04[index], 1000);
coefficients->user_gamma[i] = dc_fixpt_from_fraction(
- gamma_numerator05[index], 1000);
+ numerator05[index], 1000);
++i;
} while (i != ARRAY_SIZE(coefficients->a0));
@@ -1685,7 +1691,7 @@ static void apply_degamma_for_user_regamma(struct pwl_float_data_ex *rgb_regamma
struct pwl_float_data_ex *rgb = rgb_regamma;
const struct hw_x_point *coord_x = coordinates_x;
- build_coefficients(&coeff, TRANSFER_FUNCTION_SRGB);
+ build_coefficients(&coeff, true);
i = 0;
while (i != hw_points_num + 1) {
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index b99aa232bd8b..bd1d1dc93629 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -155,9 +155,18 @@ static unsigned int calc_v_total_from_duration(
if (duration_in_us > vrr->max_duration_in_us)
duration_in_us = vrr->max_duration_in_us;
- v_total = div64_u64(div64_u64(((unsigned long long)(
- duration_in_us) * (stream->timing.pix_clk_100hz / 10)),
- stream->timing.h_total), 1000);
+ if (dc_is_hdmi_signal(stream->signal)) {
+ uint32_t h_total_up_scaled;
+
+ h_total_up_scaled = stream->timing.h_total * 10000;
+ v_total = div_u64((unsigned long long)duration_in_us
+ * stream->timing.pix_clk_100hz + (h_total_up_scaled - 1),
+ h_total_up_scaled);
+ } else {
+ v_total = div64_u64(div64_u64(((unsigned long long)(
+ duration_in_us) * (stream->timing.pix_clk_100hz / 10)),
+ stream->timing.h_total), 1000);
+ }
/* v_total cannot be less than nominal */
if (v_total < stream->timing.v_total) {
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
index e9bd84ec027d..be61975f1470 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
@@ -105,6 +105,7 @@ static enum mod_hdcp_status remove_display_from_topology_v3(
dtm_cmd->dtm_status = TA_DTM_STATUS__GENERIC_FAILURE;
psp_dtm_invoke(psp, dtm_cmd->cmd_id);
+ mutex_unlock(&psp->dtm_context.mutex);
if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS) {
status = remove_display_from_topology_v2(hdcp, index);
@@ -115,8 +116,6 @@ static enum mod_hdcp_status remove_display_from_topology_v3(
HDCP_TOP_REMOVE_DISPLAY_TRACE(hdcp, display->index);
}
- mutex_unlock(&psp->dtm_context.mutex);
-
return status;
}
@@ -205,6 +204,7 @@ static enum mod_hdcp_status add_display_to_topology_v3(
dtm_cmd->dtm_in_message.topology_update_v3.link_hdcp_cap = link->hdcp_supported_informational;
psp_dtm_invoke(psp, dtm_cmd->cmd_id);
+ mutex_unlock(&psp->dtm_context.mutex);
if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS) {
status = add_display_to_topology_v2(hdcp, display);
@@ -214,8 +214,6 @@ static enum mod_hdcp_status add_display_to_topology_v3(
HDCP_TOP_ADD_DISPLAY_TRACE(hdcp, display->index);
}
- mutex_unlock(&psp->dtm_context.mutex);
-
return status;
}
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
index f37101f5a777..6d648c889866 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
@@ -249,6 +249,8 @@ struct mod_hdcp_link {
uint8_t ddc_line;
uint8_t link_enc_idx;
uint8_t phy_idx;
+ uint8_t dio_output_type;
+ uint8_t dio_output_id;
uint8_t hdcp_supported_informational;
union {
struct mod_hdcp_displayport dp;
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index 257f280d3d53..f1a46d16f7ea 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -228,7 +228,7 @@ enum DC_FEATURE_MASK {
DC_FBC_MASK = (1 << 0), //0x1, disabled by default
DC_MULTI_MON_PP_MCLK_SWITCH_MASK = (1 << 1), //0x2, enabled by default
DC_DISABLE_FRACTIONAL_PWM_MASK = (1 << 2), //0x4, disabled by default
- DC_PSR_MASK = (1 << 3), //0x8, disabled by default
+ DC_PSR_MASK = (1 << 3), //0x8, disabled by default for dcn < 3.1
DC_EDP_NO_POWER_SEQUENCING = (1 << 4), //0x10, disabled by default
};
@@ -236,7 +236,8 @@ enum DC_DEBUG_MASK {
DC_DISABLE_PIPE_SPLIT = 0x1,
DC_DISABLE_STUTTER = 0x2,
DC_DISABLE_DSC = 0x4,
- DC_DISABLE_CLOCK_GATING = 0x8
+ DC_DISABLE_CLOCK_GATING = 0x8,
+ DC_DISABLE_PSR = 0x10,
};
enum amd_dpm_forced_level;
diff --git a/drivers/gpu/drm/amd/include/asic_reg/clk/clk_11_0_1_offset.h b/drivers/gpu/drm/amd/include/asic_reg/clk/clk_11_0_1_offset.h
new file mode 100755
index 000000000000..c56ca9740933
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/clk/clk_11_0_1_offset.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+
+#ifndef _clk_11_0_1_OFFSET_HEADER
+#define _clk_11_0_1_OFFSET_HEADER
+
+#define mmCLK4_0_CLK4_CLK_PLL_REQ 0x460e
+#define mmCLK4_0_CLK4_CLK_PLL_REQ_BASE_IDX 0
+
+#define mmCLK4_0_CLK4_CLK2_CURRENT_CNT 0x467f
+#define mmCLK4_0_CLK4_CLK2_CURRENT_CNT_BASE_IDX 0
+
+#endif \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/include/asic_reg/clk/clk_11_0_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/clk/clk_11_0_1_sh_mask.h
new file mode 100755
index 000000000000..168fbf9fcd48
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/clk/clk_11_0_1_sh_mask.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _clk_11_0_1_SH_MASK_HEADER
+#define _clk_11_0_1_SH_MASK_HEADER
+
+//CLK4_0_CLK4_CLK_PLL_REQ
+#define CLK4_0_CLK4_CLK_PLL_REQ__FbMult_int__SHIFT 0x0
+#define CLK4_0_CLK4_CLK_PLL_REQ__PllSpineDiv__SHIFT 0xc
+#define CLK4_0_CLK4_CLK_PLL_REQ__FbMult_frac__SHIFT 0x10
+#define CLK4_0_CLK4_CLK_PLL_REQ__FbMult_int_MASK 0x000001FFL
+#define CLK4_0_CLK4_CLK_PLL_REQ__PllSpineDiv_MASK 0x0000F000L
+#define CLK4_0_CLK4_CLK_PLL_REQ__FbMult_frac_MASK 0xFFFF0000L
+
+//CLK4_0_CLK4_CLK2_CURRENT_CNT
+#define CLK4_0_CLK4_CLK2_CURRENT_CNT__CURRENT_COUNT__SHIFT 0x0
+#define CLK4_0_CLK4_CLK2_CURRENT_CNT__CURRENT_COUNT_MASK 0xFFFFFFFFL
+
+#endif \ No newline at end of file
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_3_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_3_offset.h
new file mode 100755
index 000000000000..cae1a7e74323
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_3_offset.h
@@ -0,0 +1,6193 @@
+/*
+ * Copyright (C) 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _dcn_2_0_3_OFFSET_HEADER
+#define _dcn_2_0_3_OFFSET_HEADER
+
+
+// addressBlock: dce_dc_dccg_dccg_dispdec
+// base address: 0x0
+#define mmPHYPLLA_PIXCLK_RESYNC_CNTL 0x0040
+#define mmPHYPLLA_PIXCLK_RESYNC_CNTL_BASE_IDX 1
+#define mmPHYPLLB_PIXCLK_RESYNC_CNTL 0x0041
+#define mmPHYPLLB_PIXCLK_RESYNC_CNTL_BASE_IDX 1
+#define mmDP_DTO_DBUF_EN 0x0044
+#define mmDP_DTO_DBUF_EN_BASE_IDX 1
+#define mmDPREFCLK_CGTT_BLK_CTRL_REG 0x0048
+#define mmDPREFCLK_CGTT_BLK_CTRL_REG_BASE_IDX 1
+#define mmREFCLK_CNTL 0x0049
+#define mmREFCLK_CNTL_BASE_IDX 1
+#define mmREFCLK_CGTT_BLK_CTRL_REG 0x004b
+#define mmREFCLK_CGTT_BLK_CTRL_REG_BASE_IDX 1
+#define mmDCCG_PERFMON_CNTL2 0x004e
+#define mmDCCG_PERFMON_CNTL2_BASE_IDX 1
+#define mmDCCG_DS_DTO_INCR 0x0053
+#define mmDCCG_DS_DTO_INCR_BASE_IDX 1
+#define mmDCCG_DS_DTO_MODULO 0x0054
+#define mmDCCG_DS_DTO_MODULO_BASE_IDX 1
+#define mmDCCG_DS_CNTL 0x0055
+#define mmDCCG_DS_CNTL_BASE_IDX 1
+#define mmDCCG_DS_HW_CAL_INTERVAL 0x0056
+#define mmDCCG_DS_HW_CAL_INTERVAL_BASE_IDX 1
+#define mmDPREFCLK_CNTL 0x0058
+#define mmDPREFCLK_CNTL_BASE_IDX 1
+#define mmDCE_VERSION 0x005e
+#define mmDCE_VERSION_BASE_IDX 1
+#define mmDCCG_GTC_CNTL 0x0060
+#define mmDCCG_GTC_CNTL_BASE_IDX 1
+#define mmDCCG_GTC_DTO_INCR 0x0061
+#define mmDCCG_GTC_DTO_INCR_BASE_IDX 1
+#define mmDCCG_GTC_DTO_MODULO 0x0062
+#define mmDCCG_GTC_DTO_MODULO_BASE_IDX 1
+#define mmDCCG_GTC_CURRENT 0x0063
+#define mmDCCG_GTC_CURRENT_BASE_IDX 1
+#define mmDSCCLK0_DTO_PARAM 0x006c
+#define mmDSCCLK0_DTO_PARAM_BASE_IDX 1
+#define mmMILLISECOND_TIME_BASE_DIV 0x0070
+#define mmMILLISECOND_TIME_BASE_DIV_BASE_IDX 1
+#define mmDISPCLK_FREQ_CHANGE_CNTL 0x0071
+#define mmDISPCLK_FREQ_CHANGE_CNTL_BASE_IDX 1
+#define mmDC_MEM_GLOBAL_PWR_REQ_CNTL 0x0072
+#define mmDC_MEM_GLOBAL_PWR_REQ_CNTL_BASE_IDX 1
+#define mmDCCG_PERFMON_CNTL 0x0073
+#define mmDCCG_PERFMON_CNTL_BASE_IDX 1
+#define mmDCCG_GATE_DISABLE_CNTL 0x0074
+#define mmDCCG_GATE_DISABLE_CNTL_BASE_IDX 1
+#define mmDISPCLK_CGTT_BLK_CTRL_REG 0x0075
+#define mmDISPCLK_CGTT_BLK_CTRL_REG_BASE_IDX 1
+#define mmSOCCLK_CGTT_BLK_CTRL_REG 0x0076
+#define mmSOCCLK_CGTT_BLK_CTRL_REG_BASE_IDX 1
+#define mmDCCG_CAC_STATUS 0x0077
+#define mmDCCG_CAC_STATUS_BASE_IDX 1
+#define mmMICROSECOND_TIME_BASE_DIV 0x007b
+#define mmMICROSECOND_TIME_BASE_DIV_BASE_IDX 1
+#define mmDCCG_GATE_DISABLE_CNTL2 0x007c
+#define mmDCCG_GATE_DISABLE_CNTL2_BASE_IDX 1
+#define mmSYMCLK_CGTT_BLK_CTRL_REG 0x007d
+#define mmSYMCLK_CGTT_BLK_CTRL_REG_BASE_IDX 1
+#define mmDCCG_DISP_CNTL_REG 0x007f
+#define mmDCCG_DISP_CNTL_REG_BASE_IDX 1
+#define mmOTG0_PIXEL_RATE_CNTL 0x0080
+#define mmOTG0_PIXEL_RATE_CNTL_BASE_IDX 1
+#define mmDP_DTO0_PHASE 0x0081
+#define mmDP_DTO0_PHASE_BASE_IDX 1
+#define mmDP_DTO0_MODULO 0x0082
+#define mmDP_DTO0_MODULO_BASE_IDX 1
+#define mmOTG0_PHYPLL_PIXEL_RATE_CNTL 0x0083
+#define mmOTG0_PHYPLL_PIXEL_RATE_CNTL_BASE_IDX 1
+#define mmOTG1_PIXEL_RATE_CNTL 0x0084
+#define mmOTG1_PIXEL_RATE_CNTL_BASE_IDX 1
+#define mmDP_DTO1_PHASE 0x0085
+#define mmDP_DTO1_PHASE_BASE_IDX 1
+#define mmDP_DTO1_MODULO 0x0086
+#define mmDP_DTO1_MODULO_BASE_IDX 1
+#define mmOTG1_PHYPLL_PIXEL_RATE_CNTL 0x0087
+#define mmOTG1_PHYPLL_PIXEL_RATE_CNTL_BASE_IDX 1
+#define mmDPPCLK_CGTT_BLK_CTRL_REG 0x0098
+#define mmDPPCLK_CGTT_BLK_CTRL_REG_BASE_IDX 1
+#define mmDPPCLK0_DTO_PARAM 0x0099
+#define mmDPPCLK0_DTO_PARAM_BASE_IDX 1
+#define mmDPPCLK1_DTO_PARAM 0x009a
+#define mmDPPCLK1_DTO_PARAM_BASE_IDX 1
+#define mmDPPCLK2_DTO_PARAM 0x009b
+#define mmDPPCLK2_DTO_PARAM_BASE_IDX 1
+#define mmDPPCLK3_DTO_PARAM 0x009c
+#define mmDPPCLK3_DTO_PARAM_BASE_IDX 1
+#define mmDCCG_CAC_STATUS2 0x009f
+#define mmDCCG_CAC_STATUS2_BASE_IDX 1
+#define mmSYMCLKA_CLOCK_ENABLE 0x00a0
+#define mmSYMCLKA_CLOCK_ENABLE_BASE_IDX 1
+#define mmSYMCLKB_CLOCK_ENABLE 0x00a1
+#define mmSYMCLKB_CLOCK_ENABLE_BASE_IDX 1
+#define mmDCCG_SOFT_RESET 0x00a6
+#define mmDCCG_SOFT_RESET_BASE_IDX 1
+#define mmDSCCLK_DTO_CTRL 0x00a7
+#define mmDSCCLK_DTO_CTRL_BASE_IDX 1
+#define mmDCCG_AUDIO_DTO_SOURCE 0x00ab
+#define mmDCCG_AUDIO_DTO_SOURCE_BASE_IDX 1
+#define mmDCCG_AUDIO_DTO0_PHASE 0x00ac
+#define mmDCCG_AUDIO_DTO0_PHASE_BASE_IDX 1
+#define mmDCCG_AUDIO_DTO0_MODULE 0x00ad
+#define mmDCCG_AUDIO_DTO0_MODULE_BASE_IDX 1
+#define mmDCCG_AUDIO_DTO1_PHASE 0x00ae
+#define mmDCCG_AUDIO_DTO1_PHASE_BASE_IDX 1
+#define mmDCCG_AUDIO_DTO1_MODULE 0x00af
+#define mmDCCG_AUDIO_DTO1_MODULE_BASE_IDX 1
+#define mmDCCG_VSYNC_OTG0_LATCH_VALUE 0x00b0
+#define mmDCCG_VSYNC_OTG0_LATCH_VALUE_BASE_IDX 1
+#define mmDCCG_VSYNC_OTG1_LATCH_VALUE 0x00b1
+#define mmDCCG_VSYNC_OTG1_LATCH_VALUE_BASE_IDX 1
+#define mmDCCG_VSYNC_OTG2_LATCH_VALUE 0x00b2
+#define mmDCCG_VSYNC_OTG2_LATCH_VALUE_BASE_IDX 1
+#define mmDCCG_VSYNC_OTG3_LATCH_VALUE 0x00b3
+#define mmDCCG_VSYNC_OTG3_LATCH_VALUE_BASE_IDX 1
+#define mmDCCG_VSYNC_OTG4_LATCH_VALUE 0x00b4
+#define mmDCCG_VSYNC_OTG4_LATCH_VALUE_BASE_IDX 1
+#define mmDCCG_VSYNC_OTG5_LATCH_VALUE 0x00b5
+#define mmDCCG_VSYNC_OTG5_LATCH_VALUE_BASE_IDX 1
+#define mmDPPCLK_DTO_CTRL 0x00b6
+#define mmDPPCLK_DTO_CTRL_BASE_IDX 1
+#define mmDCCG_VSYNC_CNT_CTRL 0x00b8
+#define mmDCCG_VSYNC_CNT_CTRL_BASE_IDX 1
+#define mmDCCG_VSYNC_CNT_INT_CTRL 0x00b9
+#define mmDCCG_VSYNC_CNT_INT_CTRL_BASE_IDX 1
+
+
+// addressBlock: dce_dc_dccg_dccg_dfs_dispdec
+// base address: 0x0
+#define mmDENTIST_DISPCLK_CNTL 0x0064
+#define mmDENTIST_DISPCLK_CNTL_BASE_IDX 1
+
+
+// addressBlock: dce_dc_dmu_rbbmif_dispdec
+// base address: 0x0
+#define mmRBBMIF_TIMEOUT 0x005b
+#define mmRBBMIF_TIMEOUT_BASE_IDX 2
+#define mmRBBMIF_STATUS 0x005c
+#define mmRBBMIF_STATUS_BASE_IDX 2
+#define mmRBBMIF_STATUS_2 0x005d
+#define mmRBBMIF_STATUS_2_BASE_IDX 2
+#define mmRBBMIF_INT_STATUS 0x005e
+#define mmRBBMIF_INT_STATUS_BASE_IDX 2
+#define mmRBBMIF_TIMEOUT_DIS 0x005f
+#define mmRBBMIF_TIMEOUT_DIS_BASE_IDX 2
+#define mmRBBMIF_TIMEOUT_DIS_2 0x0060
+#define mmRBBMIF_TIMEOUT_DIS_2_BASE_IDX 2
+#define mmRBBMIF_STATUS_FLAG 0x0061
+#define mmRBBMIF_STATUS_FLAG_BASE_IDX 2
+
+// addressBlock: dce_dc_hda_az_misc_dispdec
+// base address: 0x0
+#define mmAZ_CLOCK_CNTL 0x0372
+#define mmAZ_CLOCK_CNTL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0endpoint0_dispdec
+// base address: 0x0
+#define mmAZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX 0x0386
+#define mmAZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX_BASE_IDX 2
+#define mmAZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA 0x0387
+#define mmAZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0endpoint1_dispdec
+// base address: 0x18
+#define mmAZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_INDEX 0x038c
+#define mmAZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_INDEX_BASE_IDX 2
+#define mmAZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_DATA 0x038d
+#define mmAZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0controller_dispdec
+// base address: 0x0
+#define mmAZALIA_CONTROLLER_CLOCK_GATING 0x03c2
+#define mmAZALIA_CONTROLLER_CLOCK_GATING_BASE_IDX 2
+#define mmAZALIA_AUDIO_DTO 0x03c3
+#define mmAZALIA_AUDIO_DTO_BASE_IDX 2
+#define mmAZALIA_AUDIO_DTO_CONTROL 0x03c4
+#define mmAZALIA_AUDIO_DTO_CONTROL_BASE_IDX 2
+#define mmAZALIA_SOCCLK_CONTROL 0x03c5
+#define mmAZALIA_SOCCLK_CONTROL_BASE_IDX 2
+#define mmAZALIA_UNDERFLOW_FILLER_SAMPLE 0x03c6
+#define mmAZALIA_UNDERFLOW_FILLER_SAMPLE_BASE_IDX 2
+#define mmAZALIA_DATA_DMA_CONTROL 0x03c7
+#define mmAZALIA_DATA_DMA_CONTROL_BASE_IDX 2
+#define mmAZALIA_BDL_DMA_CONTROL 0x03c8
+#define mmAZALIA_BDL_DMA_CONTROL_BASE_IDX 2
+#define mmAZALIA_RIRB_AND_DP_CONTROL 0x03c9
+#define mmAZALIA_RIRB_AND_DP_CONTROL_BASE_IDX 2
+#define mmAZALIA_CORB_DMA_CONTROL 0x03ca
+#define mmAZALIA_CORB_DMA_CONTROL_BASE_IDX 2
+#define mmAZALIA_APPLICATION_POSITION_IN_CYCLIC_BUFFER 0x03d1
+#define mmAZALIA_APPLICATION_POSITION_IN_CYCLIC_BUFFER_BASE_IDX 2
+#define mmAZALIA_CYCLIC_BUFFER_SYNC 0x03d2
+#define mmAZALIA_CYCLIC_BUFFER_SYNC_BASE_IDX 2
+#define mmAZALIA_GLOBAL_CAPABILITIES 0x03d3
+#define mmAZALIA_GLOBAL_CAPABILITIES_BASE_IDX 2
+#define mmAZALIA_OUTPUT_PAYLOAD_CAPABILITY 0x03d4
+#define mmAZALIA_OUTPUT_PAYLOAD_CAPABILITY_BASE_IDX 2
+#define mmAZALIA_OUTPUT_STREAM_ARBITER_CONTROL 0x03d5
+#define mmAZALIA_OUTPUT_STREAM_ARBITER_CONTROL_BASE_IDX 2
+#define mmAZALIA_INPUT_PAYLOAD_CAPABILITY 0x03d6
+#define mmAZALIA_INPUT_PAYLOAD_CAPABILITY_BASE_IDX 2
+#define mmAZALIA_INPUT_CRC0_CONTROL0 0x03d9
+#define mmAZALIA_INPUT_CRC0_CONTROL0_BASE_IDX 2
+#define mmAZALIA_INPUT_CRC0_CONTROL1 0x03da
+#define mmAZALIA_INPUT_CRC0_CONTROL1_BASE_IDX 2
+#define mmAZALIA_INPUT_CRC0_CONTROL2 0x03db
+#define mmAZALIA_INPUT_CRC0_CONTROL2_BASE_IDX 2
+#define mmAZALIA_INPUT_CRC0_CONTROL3 0x03dc
+#define mmAZALIA_INPUT_CRC0_CONTROL3_BASE_IDX 2
+#define mmAZALIA_INPUT_CRC0_RESULT 0x03dd
+#define mmAZALIA_INPUT_CRC0_RESULT_BASE_IDX 2
+#define mmAZALIA_INPUT_CRC1_CONTROL0 0x03de
+#define mmAZALIA_INPUT_CRC1_CONTROL0_BASE_IDX 2
+#define mmAZALIA_INPUT_CRC1_CONTROL1 0x03df
+#define mmAZALIA_INPUT_CRC1_CONTROL1_BASE_IDX 2
+#define mmAZALIA_INPUT_CRC1_CONTROL2 0x03e0
+#define mmAZALIA_INPUT_CRC1_CONTROL2_BASE_IDX 2
+#define mmAZALIA_INPUT_CRC1_CONTROL3 0x03e1
+#define mmAZALIA_INPUT_CRC1_CONTROL3_BASE_IDX 2
+#define mmAZALIA_INPUT_CRC1_RESULT 0x03e2
+#define mmAZALIA_INPUT_CRC1_RESULT_BASE_IDX 2
+#define mmAZALIA_MEM_PWR_CTRL 0x03ee
+#define mmAZALIA_MEM_PWR_CTRL_BASE_IDX 2
+#define mmAZALIA_MEM_PWR_STATUS 0x03ef
+#define mmAZALIA_MEM_PWR_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0root_dispdec
+// base address: 0x0
+#define mmAZALIA_F0_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID 0x0406
+#define mmAZALIA_F0_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID_BASE_IDX 2
+#define mmAZALIA_F0_CODEC_ROOT_PARAMETER_REVISION_ID 0x0407
+#define mmAZALIA_F0_CODEC_ROOT_PARAMETER_REVISION_ID_BASE_IDX 2
+#define mmAZALIA_F0_CODEC_CHANNEL_COUNT_CONTROL 0x0408
+#define mmAZALIA_F0_CODEC_CHANNEL_COUNT_CONTROL_BASE_IDX 2
+#define mmAZALIA_F0_CODEC_RESYNC_FIFO_CONTROL 0x0409
+#define mmAZALIA_F0_CODEC_RESYNC_FIFO_CONTROL_BASE_IDX 2
+#define mmAZALIA_F0_CODEC_FUNCTION_PARAMETER_GROUP_TYPE 0x040a
+#define mmAZALIA_F0_CODEC_FUNCTION_PARAMETER_GROUP_TYPE_BASE_IDX 2
+#define mmAZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES 0x040b
+#define mmAZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES_BASE_IDX 2
+#define mmAZALIA_F0_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS 0x040c
+#define mmAZALIA_F0_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS_BASE_IDX 2
+#define mmAZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES 0x040d
+#define mmAZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES_BASE_IDX 2
+#define mmAZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE 0x040e
+#define mmAZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE_BASE_IDX 2
+#define mmAZALIA_F0_CODEC_FUNCTION_CONTROL_RESET 0x040f
+#define mmAZALIA_F0_CODEC_FUNCTION_CONTROL_RESET_BASE_IDX 2
+#define mmAZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID 0x0410
+#define mmAZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID_BASE_IDX 2
+#define mmAZALIA_F0_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION 0x0411
+#define mmAZALIA_F0_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION_BASE_IDX 2
+#define mmREG_DC_AUDIO_PORT_CONNECTIVITY 0x041c
+#define mmREG_DC_AUDIO_PORT_CONNECTIVITY_BASE_IDX 2
+#define mmREG_DC_AUDIO_INPUT_PORT_CONNECTIVITY 0x041d
+#define mmREG_DC_AUDIO_INPUT_PORT_CONNECTIVITY_BASE_IDX 2
+
+// addressBlock: dce_dc_hda_azf0inputendpoint0_dispdec
+// base address: 0x0
+#define mmAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX 0x043a
+#define mmAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX_BASE_IDX 2
+#define mmAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA 0x043b
+#define mmAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_hda_azf0inputendpoint1_dispdec
+// base address: 0x10
+#define mmAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX 0x043e
+#define mmAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX_BASE_IDX 2
+#define mmAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA 0x043f
+#define mmAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dchubbub_hubbub_ret_path_dispdec
+// base address: 0x0
+#define mmDCHUBBUB_RET_PATH_DCC_CFG 0x04cf
+#define mmDCHUBBUB_RET_PATH_DCC_CFG_BASE_IDX 2
+#define mmDCHUBBUB_RET_PATH_DCC_CFG0_0 0x04d0
+#define mmDCHUBBUB_RET_PATH_DCC_CFG0_0_BASE_IDX 2
+#define mmDCHUBBUB_RET_PATH_DCC_CFG0_1 0x04d1
+#define mmDCHUBBUB_RET_PATH_DCC_CFG0_1_BASE_IDX 2
+#define mmDCHUBBUB_RET_PATH_DCC_CFG1_0 0x04d2
+#define mmDCHUBBUB_RET_PATH_DCC_CFG1_0_BASE_IDX 2
+#define mmDCHUBBUB_RET_PATH_DCC_CFG1_1 0x04d3
+#define mmDCHUBBUB_RET_PATH_DCC_CFG1_1_BASE_IDX 2
+#define mmDCHUBBUB_RET_PATH_DCC_CFG2_0 0x04d4
+#define mmDCHUBBUB_RET_PATH_DCC_CFG2_0_BASE_IDX 2
+#define mmDCHUBBUB_RET_PATH_DCC_CFG2_1 0x04d5
+#define mmDCHUBBUB_RET_PATH_DCC_CFG2_1_BASE_IDX 2
+#define mmDCHUBBUB_RET_PATH_DCC_CFG3_0 0x04d6
+#define mmDCHUBBUB_RET_PATH_DCC_CFG3_0_BASE_IDX 2
+#define mmDCHUBBUB_RET_PATH_DCC_CFG3_1 0x04d7
+#define mmDCHUBBUB_RET_PATH_DCC_CFG3_1_BASE_IDX 2
+#define mmDCHUBBUB_RET_PATH_DCC_CFG4_0 0x04d8
+#define mmDCHUBBUB_RET_PATH_DCC_CFG4_0_BASE_IDX 2
+#define mmDCHUBBUB_RET_PATH_DCC_CFG4_1 0x04d9
+#define mmDCHUBBUB_RET_PATH_DCC_CFG4_1_BASE_IDX 2
+#define mmDCHUBBUB_RET_PATH_DCC_CFG5_0 0x04da
+#define mmDCHUBBUB_RET_PATH_DCC_CFG5_0_BASE_IDX 2
+#define mmDCHUBBUB_RET_PATH_DCC_CFG5_1 0x04db
+#define mmDCHUBBUB_RET_PATH_DCC_CFG5_1_BASE_IDX 2
+#define mmDCHUBBUB_RET_PATH_DCC_CFG6_0 0x04dc
+#define mmDCHUBBUB_RET_PATH_DCC_CFG6_0_BASE_IDX 2
+#define mmDCHUBBUB_RET_PATH_DCC_CFG6_1 0x04dd
+#define mmDCHUBBUB_RET_PATH_DCC_CFG6_1_BASE_IDX 2
+#define mmDCHUBBUB_RET_PATH_DCC_CFG7_0 0x04de
+#define mmDCHUBBUB_RET_PATH_DCC_CFG7_0_BASE_IDX 2
+#define mmDCHUBBUB_RET_PATH_DCC_CFG7_1 0x04df
+#define mmDCHUBBUB_RET_PATH_DCC_CFG7_1_BASE_IDX 2
+#define mmDCHUBBUB_RET_PATH_MEM_PWR_CTRL 0x04ef
+#define mmDCHUBBUB_RET_PATH_MEM_PWR_CTRL_BASE_IDX 2
+#define mmDCHUBBUB_RET_PATH_MEM_PWR_STATUS 0x04f0
+#define mmDCHUBBUB_RET_PATH_MEM_PWR_STATUS_BASE_IDX 2
+#define mmDCHUBBUB_CRC_CTRL 0x04f1
+#define mmDCHUBBUB_CRC_CTRL_BASE_IDX 2
+#define mmDCHUBBUB_CRC0_VAL_R_G 0x04f2
+#define mmDCHUBBUB_CRC0_VAL_R_G_BASE_IDX 2
+#define mmDCHUBBUB_CRC0_VAL_B_A 0x04f3
+#define mmDCHUBBUB_CRC0_VAL_B_A_BASE_IDX 2
+#define mmDCHUBBUB_CRC1_VAL_R_G 0x04f4
+#define mmDCHUBBUB_CRC1_VAL_R_G_BASE_IDX 2
+#define mmDCHUBBUB_CRC1_VAL_B_A 0x04f5
+#define mmDCHUBBUB_CRC1_VAL_B_A_BASE_IDX 2
+
+// addressBlock: dce_dc_dchubbub_hubbub_dispdec
+// base address: 0x0
+#define mmDCHUBBUB_ARB_DF_REQ_OUTSTAND 0x0505
+#define mmDCHUBBUB_ARB_DF_REQ_OUTSTAND_BASE_IDX 2
+#define mmDCHUBBUB_ARB_SAT_LEVEL 0x0506
+#define mmDCHUBBUB_ARB_SAT_LEVEL_BASE_IDX 2
+#define mmDCHUBBUB_ARB_QOS_FORCE 0x0507
+#define mmDCHUBBUB_ARB_QOS_FORCE_BASE_IDX 2
+#define mmDCHUBBUB_ARB_DRAM_STATE_CNTL 0x0508
+#define mmDCHUBBUB_ARB_DRAM_STATE_CNTL_BASE_IDX 2
+#define mmDCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A 0x0509
+#define mmDCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A_BASE_IDX 2
+#define mmDCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A 0x050a
+#define mmDCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A_BASE_IDX 2
+#define mmDCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A 0x050d
+#define mmDCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A_BASE_IDX 2
+#define mmDCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B 0x050e
+#define mmDCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B_BASE_IDX 2
+#define mmDCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B 0x050f
+#define mmDCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B_BASE_IDX 2
+#define mmDCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B 0x0512
+#define mmDCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B_BASE_IDX 2
+#define mmDCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C 0x0513
+#define mmDCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C_BASE_IDX 2
+#define mmDCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C 0x0514
+#define mmDCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C_BASE_IDX 2
+#define mmDCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C 0x0517
+#define mmDCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C_BASE_IDX 2
+#define mmDCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D 0x0518
+#define mmDCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D_BASE_IDX 2
+#define mmDCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D 0x0519
+#define mmDCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D_BASE_IDX 2
+#define mmDCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D 0x051c
+#define mmDCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D_BASE_IDX 2
+#define mmDCHUBBUB_ARB_WATERMARK_CHANGE_CNTL 0x051d
+#define mmDCHUBBUB_ARB_WATERMARK_CHANGE_CNTL_BASE_IDX 2
+#define mmDCHUBBUB_ARB_TIMEOUT_ENABLE 0x051e
+#define mmDCHUBBUB_ARB_TIMEOUT_ENABLE_BASE_IDX 2
+#define mmDCHUBBUB_GLOBAL_TIMER_CNTL 0x051f
+#define mmDCHUBBUB_GLOBAL_TIMER_CNTL_BASE_IDX 2
+#define mmVTG0_CONTROL 0x0528
+#define mmVTG0_CONTROL_BASE_IDX 2
+#define mmVTG1_CONTROL 0x0529
+#define mmVTG1_CONTROL_BASE_IDX 2
+#define mmDCHUBBUB_SOFT_RESET 0x052e
+#define mmDCHUBBUB_SOFT_RESET_BASE_IDX 2
+#define mmDCHUBBUB_CLOCK_CNTL 0x052f
+#define mmDCHUBBUB_CLOCK_CNTL_BASE_IDX 2
+#define mmDCFCLK_CNTL 0x0530
+#define mmDCFCLK_CNTL_BASE_IDX 2
+#define mmDCHUBBUB_VLINE_SNAPSHOT 0x0533
+#define mmDCHUBBUB_VLINE_SNAPSHOT_BASE_IDX 2
+#define mmDCHUBBUB_CTRL_STATUS 0x0534
+#define mmDCHUBBUB_CTRL_STATUS_BASE_IDX 2
+#define mmDCHUBBUB_TIMEOUT_DETECTION_CTRL1 0x053a
+#define mmDCHUBBUB_TIMEOUT_DETECTION_CTRL1_BASE_IDX 2
+#define mmDCHUBBUB_TIMEOUT_DETECTION_CTRL2 0x053b
+#define mmDCHUBBUB_TIMEOUT_DETECTION_CTRL2_BASE_IDX 2
+#define mmDCHUBBUB_TIMEOUT_INTERRUPT_STATUS 0x053c
+#define mmDCHUBBUB_TIMEOUT_INTERRUPT_STATUS_BASE_IDX 2
+#define mmDCHUBBUB_TEST_DEBUG_INDEX 0x053d
+#define mmDCHUBBUB_TEST_DEBUG_INDEX_BASE_IDX 2
+#define mmDCHUBBUB_TEST_DEBUG_DATA 0x053e
+#define mmDCHUBBUB_TEST_DEBUG_DATA_BASE_IDX 2
+#define mmFMON_CTRL 0x0548
+#define mmFMON_CTRL_BASE_IDX 2
+
+
+
+// addressBlock: dce_dc_dcbubp0_dispdec_hubp_dispdec
+// base address: 0x0
+#define mmHUBP0_DCSURF_SURFACE_CONFIG 0x05e5
+#define mmHUBP0_DCSURF_SURFACE_CONFIG_BASE_IDX 2
+#define mmHUBP0_DCSURF_ADDR_CONFIG 0x05e6
+#define mmHUBP0_DCSURF_ADDR_CONFIG_BASE_IDX 2
+#define mmHUBP0_DCSURF_TILING_CONFIG 0x05e7
+#define mmHUBP0_DCSURF_TILING_CONFIG_BASE_IDX 2
+#define mmHUBP0_DCSURF_PRI_VIEWPORT_START 0x05e9
+#define mmHUBP0_DCSURF_PRI_VIEWPORT_START_BASE_IDX 2
+#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION 0x05ea
+#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2
+#define mmHUBP0_DCSURF_PRI_VIEWPORT_START_C 0x05eb
+#define mmHUBP0_DCSURF_PRI_VIEWPORT_START_C_BASE_IDX 2
+#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C 0x05ec
+#define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C_BASE_IDX 2
+#define mmHUBP0_DCSURF_SEC_VIEWPORT_START 0x05ed
+#define mmHUBP0_DCSURF_SEC_VIEWPORT_START_BASE_IDX 2
+#define mmHUBP0_DCSURF_SEC_VIEWPORT_DIMENSION 0x05ee
+#define mmHUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_BASE_IDX 2
+#define mmHUBP0_DCSURF_SEC_VIEWPORT_START_C 0x05ef
+#define mmHUBP0_DCSURF_SEC_VIEWPORT_START_C_BASE_IDX 2
+#define mmHUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_C 0x05f0
+#define mmHUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_C_BASE_IDX 2
+#define mmHUBP0_DCHUBP_REQ_SIZE_CONFIG 0x05f1
+#define mmHUBP0_DCHUBP_REQ_SIZE_CONFIG_BASE_IDX 2
+#define mmHUBP0_DCHUBP_REQ_SIZE_CONFIG_C 0x05f2
+#define mmHUBP0_DCHUBP_REQ_SIZE_CONFIG_C_BASE_IDX 2
+#define mmHUBP0_DCHUBP_CNTL 0x05f3
+#define mmHUBP0_DCHUBP_CNTL_BASE_IDX 2
+#define mmHUBP0_HUBP_CLK_CNTL 0x05f4
+#define mmHUBP0_HUBP_CLK_CNTL_BASE_IDX 2
+#define mmHUBP0_HUBPREQ_DEBUG_DB 0x05f6
+#define mmHUBP0_HUBPREQ_DEBUG_DB_BASE_IDX 2
+#define mmHUBP0_HUBPREQ_DEBUG 0x05f7
+#define mmHUBP0_HUBPREQ_DEBUG_BASE_IDX 2
+#define mmHUBP0_HUBP_MEASURE_WIN_CTRL_DCFCLK 0x05fb
+#define mmHUBP0_HUBP_MEASURE_WIN_CTRL_DCFCLK_BASE_IDX 2
+#define mmHUBP0_HUBP_MEASURE_WIN_CTRL_DPPCLK 0x05fc
+#define mmHUBP0_HUBP_MEASURE_WIN_CTRL_DPPCLK_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp0_dispdec_hubpreq_dispdec
+// base address: 0x0
+#define mmHUBPREQ0_DCSURF_SURFACE_PITCH 0x0607
+#define mmHUBPREQ0_DCSURF_SURFACE_PITCH_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_SURFACE_PITCH_C 0x0608
+#define mmHUBPREQ0_DCSURF_SURFACE_PITCH_C_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS 0x060a
+#define mmHUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH 0x060b
+#define mmHUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_C 0x060c
+#define mmHUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_C_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C 0x060d
+#define mmHUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS 0x060e
+#define mmHUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH 0x060f
+#define mmHUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_C 0x0610
+#define mmHUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_C_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C 0x0611
+#define mmHUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS 0x0612
+#define mmHUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH 0x0613
+#define mmHUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C 0x0614
+#define mmHUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C 0x0615
+#define mmHUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS 0x0616
+#define mmHUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH 0x0617
+#define mmHUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C 0x0618
+#define mmHUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C 0x0619
+#define mmHUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_SURFACE_CONTROL 0x061a
+#define mmHUBPREQ0_DCSURF_SURFACE_CONTROL_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_FLIP_CONTROL 0x061b
+#define mmHUBPREQ0_DCSURF_FLIP_CONTROL_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_FLIP_CONTROL2 0x061c
+#define mmHUBPREQ0_DCSURF_FLIP_CONTROL2_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT 0x0620
+#define mmHUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_SURFACE_INUSE 0x0621
+#define mmHUBPREQ0_DCSURF_SURFACE_INUSE_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_SURFACE_INUSE_HIGH 0x0622
+#define mmHUBPREQ0_DCSURF_SURFACE_INUSE_HIGH_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_SURFACE_INUSE_C 0x0623
+#define mmHUBPREQ0_DCSURF_SURFACE_INUSE_C_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_SURFACE_INUSE_HIGH_C 0x0624
+#define mmHUBPREQ0_DCSURF_SURFACE_INUSE_HIGH_C_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE 0x0625
+#define mmHUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH 0x0626
+#define mmHUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_C 0x0627
+#define mmHUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_C_BASE_IDX 2
+#define mmHUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C 0x0628
+#define mmHUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C_BASE_IDX 2
+#define mmHUBPREQ0_DCN_EXPANSION_MODE 0x062c
+#define mmHUBPREQ0_DCN_EXPANSION_MODE_BASE_IDX 2
+#define mmHUBPREQ0_DCN_TTU_QOS_WM 0x062d
+#define mmHUBPREQ0_DCN_TTU_QOS_WM_BASE_IDX 2
+#define mmHUBPREQ0_DCN_GLOBAL_TTU_CNTL 0x062e
+#define mmHUBPREQ0_DCN_GLOBAL_TTU_CNTL_BASE_IDX 2
+#define mmHUBPREQ0_DCN_SURF0_TTU_CNTL0 0x062f
+#define mmHUBPREQ0_DCN_SURF0_TTU_CNTL0_BASE_IDX 2
+#define mmHUBPREQ0_DCN_SURF0_TTU_CNTL1 0x0630
+#define mmHUBPREQ0_DCN_SURF0_TTU_CNTL1_BASE_IDX 2
+#define mmHUBPREQ0_DCN_SURF1_TTU_CNTL0 0x0631
+#define mmHUBPREQ0_DCN_SURF1_TTU_CNTL0_BASE_IDX 2
+#define mmHUBPREQ0_DCN_SURF1_TTU_CNTL1 0x0632
+#define mmHUBPREQ0_DCN_SURF1_TTU_CNTL1_BASE_IDX 2
+#define mmHUBPREQ0_DCN_CUR0_TTU_CNTL0 0x0633
+#define mmHUBPREQ0_DCN_CUR0_TTU_CNTL0_BASE_IDX 2
+#define mmHUBPREQ0_DCN_CUR0_TTU_CNTL1 0x0634
+#define mmHUBPREQ0_DCN_CUR0_TTU_CNTL1_BASE_IDX 2
+#define mmHUBPREQ0_BLANK_OFFSET_0 0x0646
+#define mmHUBPREQ0_BLANK_OFFSET_0_BASE_IDX 2
+#define mmHUBPREQ0_BLANK_OFFSET_1 0x0647
+#define mmHUBPREQ0_BLANK_OFFSET_1_BASE_IDX 2
+#define mmHUBPREQ0_DST_DIMENSIONS 0x0648
+#define mmHUBPREQ0_DST_DIMENSIONS_BASE_IDX 2
+#define mmHUBPREQ0_DST_AFTER_SCALER 0x0649
+#define mmHUBPREQ0_DST_AFTER_SCALER_BASE_IDX 2
+#define mmHUBPREQ0_PREFETCH_SETTINGS 0x064a
+#define mmHUBPREQ0_PREFETCH_SETTINGS_BASE_IDX 2
+#define mmHUBPREQ0_PREFETCH_SETTINGS_C 0x064b
+#define mmHUBPREQ0_PREFETCH_SETTINGS_C_BASE_IDX 2
+#define mmHUBPREQ0_VBLANK_PARAMETERS_0 0x064c
+#define mmHUBPREQ0_VBLANK_PARAMETERS_0_BASE_IDX 2
+#define mmHUBPREQ0_VBLANK_PARAMETERS_1 0x064d
+#define mmHUBPREQ0_VBLANK_PARAMETERS_1_BASE_IDX 2
+#define mmHUBPREQ0_VBLANK_PARAMETERS_2 0x064e
+#define mmHUBPREQ0_VBLANK_PARAMETERS_2_BASE_IDX 2
+#define mmHUBPREQ0_VBLANK_PARAMETERS_3 0x064f
+#define mmHUBPREQ0_VBLANK_PARAMETERS_3_BASE_IDX 2
+#define mmHUBPREQ0_VBLANK_PARAMETERS_4 0x0650
+#define mmHUBPREQ0_VBLANK_PARAMETERS_4_BASE_IDX 2
+#define mmHUBPREQ0_FLIP_PARAMETERS_0 0x0651
+#define mmHUBPREQ0_FLIP_PARAMETERS_0_BASE_IDX 2
+#define mmHUBPREQ0_FLIP_PARAMETERS_2 0x0653
+#define mmHUBPREQ0_FLIP_PARAMETERS_2_BASE_IDX 2
+#define mmHUBPREQ0_NOM_PARAMETERS_4 0x0658
+#define mmHUBPREQ0_NOM_PARAMETERS_4_BASE_IDX 2
+#define mmHUBPREQ0_NOM_PARAMETERS_5 0x0659
+#define mmHUBPREQ0_NOM_PARAMETERS_5_BASE_IDX 2
+#define mmHUBPREQ0_NOM_PARAMETERS_6 0x065a
+#define mmHUBPREQ0_NOM_PARAMETERS_6_BASE_IDX 2
+#define mmHUBPREQ0_NOM_PARAMETERS_7 0x065b
+#define mmHUBPREQ0_NOM_PARAMETERS_7_BASE_IDX 2
+#define mmHUBPREQ0_PER_LINE_DELIVERY_PRE 0x065c
+#define mmHUBPREQ0_PER_LINE_DELIVERY_PRE_BASE_IDX 2
+#define mmHUBPREQ0_PER_LINE_DELIVERY 0x065d
+#define mmHUBPREQ0_PER_LINE_DELIVERY_BASE_IDX 2
+#define mmHUBPREQ0_CURSOR_SETTINGS 0x065e
+#define mmHUBPREQ0_CURSOR_SETTINGS_BASE_IDX 2
+#define mmHUBPREQ0_REF_FREQ_TO_PIX_FREQ 0x065f
+#define mmHUBPREQ0_REF_FREQ_TO_PIX_FREQ_BASE_IDX 2
+#define mmHUBPREQ0_DST_Y_DELTA_DRQ_LIMIT 0x0660
+#define mmHUBPREQ0_DST_Y_DELTA_DRQ_LIMIT_BASE_IDX 2
+#define mmHUBPREQ0_HUBPREQ_MEM_PWR_CTRL 0x0661
+#define mmHUBPREQ0_HUBPREQ_MEM_PWR_CTRL_BASE_IDX 2
+#define mmHUBPREQ0_HUBPREQ_MEM_PWR_STATUS 0x0662
+#define mmHUBPREQ0_HUBPREQ_MEM_PWR_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp0_dispdec_hubpret_dispdec
+// base address: 0x0
+#define mmHUBPRET0_HUBPRET_CONTROL 0x066a
+#define mmHUBPRET0_HUBPRET_CONTROL_BASE_IDX 2
+#define mmHUBPRET0_HUBPRET_MEM_PWR_CTRL 0x066b
+#define mmHUBPRET0_HUBPRET_MEM_PWR_CTRL_BASE_IDX 2
+#define mmHUBPRET0_HUBPRET_MEM_PWR_STATUS 0x066c
+#define mmHUBPRET0_HUBPRET_MEM_PWR_STATUS_BASE_IDX 2
+#define mmHUBPRET0_HUBPRET_READ_LINE_CTRL0 0x066d
+#define mmHUBPRET0_HUBPRET_READ_LINE_CTRL0_BASE_IDX 2
+#define mmHUBPRET0_HUBPRET_READ_LINE_CTRL1 0x066e
+#define mmHUBPRET0_HUBPRET_READ_LINE_CTRL1_BASE_IDX 2
+#define mmHUBPRET0_HUBPRET_READ_LINE0 0x066f
+#define mmHUBPRET0_HUBPRET_READ_LINE0_BASE_IDX 2
+#define mmHUBPRET0_HUBPRET_READ_LINE1 0x0670
+#define mmHUBPRET0_HUBPRET_READ_LINE1_BASE_IDX 2
+#define mmHUBPRET0_HUBPRET_INTERRUPT 0x0671
+#define mmHUBPRET0_HUBPRET_INTERRUPT_BASE_IDX 2
+#define mmHUBPRET0_HUBPRET_READ_LINE_VALUE 0x0672
+#define mmHUBPRET0_HUBPRET_READ_LINE_VALUE_BASE_IDX 2
+#define mmHUBPRET0_HUBPRET_READ_LINE_STATUS 0x0673
+#define mmHUBPRET0_HUBPRET_READ_LINE_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp0_dispdec_cursor0_dispdec
+// base address: 0x0
+#define mmCURSOR0_0_CURSOR_CONTROL 0x0678
+#define mmCURSOR0_0_CURSOR_CONTROL_BASE_IDX 2
+#define mmCURSOR0_0_CURSOR_SURFACE_ADDRESS 0x0679
+#define mmCURSOR0_0_CURSOR_SURFACE_ADDRESS_BASE_IDX 2
+#define mmCURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH 0x067a
+#define mmCURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define mmCURSOR0_0_CURSOR_SIZE 0x067b
+#define mmCURSOR0_0_CURSOR_SIZE_BASE_IDX 2
+#define mmCURSOR0_0_CURSOR_POSITION 0x067c
+#define mmCURSOR0_0_CURSOR_POSITION_BASE_IDX 2
+#define mmCURSOR0_0_CURSOR_HOT_SPOT 0x067d
+#define mmCURSOR0_0_CURSOR_HOT_SPOT_BASE_IDX 2
+#define mmCURSOR0_0_CURSOR_STEREO_CONTROL 0x067e
+#define mmCURSOR0_0_CURSOR_STEREO_CONTROL_BASE_IDX 2
+#define mmCURSOR0_0_CURSOR_DST_OFFSET 0x067f
+#define mmCURSOR0_0_CURSOR_DST_OFFSET_BASE_IDX 2
+#define mmCURSOR0_0_CURSOR_MEM_PWR_CTRL 0x0680
+#define mmCURSOR0_0_CURSOR_MEM_PWR_CTRL_BASE_IDX 2
+#define mmCURSOR0_0_CURSOR_MEM_PWR_STATUS 0x0681
+#define mmCURSOR0_0_CURSOR_MEM_PWR_STATUS_BASE_IDX 2
+#define mmCURSOR0_0_DMDATA_ADDRESS_HIGH 0x0682
+#define mmCURSOR0_0_DMDATA_ADDRESS_HIGH_BASE_IDX 2
+#define mmCURSOR0_0_DMDATA_ADDRESS_LOW 0x0683
+#define mmCURSOR0_0_DMDATA_ADDRESS_LOW_BASE_IDX 2
+#define mmCURSOR0_0_DMDATA_CNTL 0x0684
+#define mmCURSOR0_0_DMDATA_CNTL_BASE_IDX 2
+#define mmCURSOR0_0_DMDATA_QOS_CNTL 0x0685
+#define mmCURSOR0_0_DMDATA_QOS_CNTL_BASE_IDX 2
+#define mmCURSOR0_0_DMDATA_STATUS 0x0686
+#define mmCURSOR0_0_DMDATA_STATUS_BASE_IDX 2
+#define mmCURSOR0_0_DMDATA_SW_CNTL 0x0687
+#define mmCURSOR0_0_DMDATA_SW_CNTL_BASE_IDX 2
+#define mmCURSOR0_0_DMDATA_SW_DATA 0x0688
+#define mmCURSOR0_0_DMDATA_SW_DATA_BASE_IDX 2
+
+
+
+// addressBlock: dce_dc_dcbubp1_dispdec_hubp_dispdec
+// base address: 0x370
+#define mmHUBP1_DCSURF_SURFACE_CONFIG 0x06c1
+#define mmHUBP1_DCSURF_SURFACE_CONFIG_BASE_IDX 2
+#define mmHUBP1_DCSURF_ADDR_CONFIG 0x06c2
+#define mmHUBP1_DCSURF_ADDR_CONFIG_BASE_IDX 2
+#define mmHUBP1_DCSURF_TILING_CONFIG 0x06c3
+#define mmHUBP1_DCSURF_TILING_CONFIG_BASE_IDX 2
+#define mmHUBP1_DCSURF_PRI_VIEWPORT_START 0x06c5
+#define mmHUBP1_DCSURF_PRI_VIEWPORT_START_BASE_IDX 2
+#define mmHUBP1_DCSURF_PRI_VIEWPORT_DIMENSION 0x06c6
+#define mmHUBP1_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2
+#define mmHUBP1_DCSURF_PRI_VIEWPORT_START_C 0x06c7
+#define mmHUBP1_DCSURF_PRI_VIEWPORT_START_C_BASE_IDX 2
+#define mmHUBP1_DCSURF_PRI_VIEWPORT_DIMENSION_C 0x06c8
+#define mmHUBP1_DCSURF_PRI_VIEWPORT_DIMENSION_C_BASE_IDX 2
+#define mmHUBP1_DCSURF_SEC_VIEWPORT_START 0x06c9
+#define mmHUBP1_DCSURF_SEC_VIEWPORT_START_BASE_IDX 2
+#define mmHUBP1_DCSURF_SEC_VIEWPORT_DIMENSION 0x06ca
+#define mmHUBP1_DCSURF_SEC_VIEWPORT_DIMENSION_BASE_IDX 2
+#define mmHUBP1_DCSURF_SEC_VIEWPORT_START_C 0x06cb
+#define mmHUBP1_DCSURF_SEC_VIEWPORT_START_C_BASE_IDX 2
+#define mmHUBP1_DCSURF_SEC_VIEWPORT_DIMENSION_C 0x06cc
+#define mmHUBP1_DCSURF_SEC_VIEWPORT_DIMENSION_C_BASE_IDX 2
+#define mmHUBP1_DCHUBP_REQ_SIZE_CONFIG 0x06cd
+#define mmHUBP1_DCHUBP_REQ_SIZE_CONFIG_BASE_IDX 2
+#define mmHUBP1_DCHUBP_REQ_SIZE_CONFIG_C 0x06ce
+#define mmHUBP1_DCHUBP_REQ_SIZE_CONFIG_C_BASE_IDX 2
+#define mmHUBP1_DCHUBP_CNTL 0x06cf
+#define mmHUBP1_DCHUBP_CNTL_BASE_IDX 2
+#define mmHUBP1_HUBP_CLK_CNTL 0x06d0
+#define mmHUBP1_HUBP_CLK_CNTL_BASE_IDX 2
+#define mmHUBP1_HUBPREQ_DEBUG_DB 0x06d2
+#define mmHUBP1_HUBPREQ_DEBUG_DB_BASE_IDX 2
+#define mmHUBP1_HUBPREQ_DEBUG 0x06d3
+#define mmHUBP1_HUBPREQ_DEBUG_BASE_IDX 2
+#define mmHUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK 0x06d7
+#define mmHUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK_BASE_IDX 2
+#define mmHUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK 0x06d8
+#define mmHUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp1_dispdec_hubpreq_dispdec
+// base address: 0x370
+#define mmHUBPREQ1_DCSURF_SURFACE_PITCH 0x06e3
+#define mmHUBPREQ1_DCSURF_SURFACE_PITCH_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_SURFACE_PITCH_C 0x06e4
+#define mmHUBPREQ1_DCSURF_SURFACE_PITCH_C_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS 0x06e6
+#define mmHUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH 0x06e7
+#define mmHUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_C 0x06e8
+#define mmHUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_C_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C 0x06e9
+#define mmHUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS 0x06ea
+#define mmHUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH 0x06eb
+#define mmHUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_C 0x06ec
+#define mmHUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_C_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C 0x06ed
+#define mmHUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS 0x06ee
+#define mmHUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH 0x06ef
+#define mmHUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C 0x06f0
+#define mmHUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C 0x06f1
+#define mmHUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS 0x06f2
+#define mmHUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH 0x06f3
+#define mmHUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C 0x06f4
+#define mmHUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C 0x06f5
+#define mmHUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_SURFACE_CONTROL 0x06f6
+#define mmHUBPREQ1_DCSURF_SURFACE_CONTROL_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_FLIP_CONTROL 0x06f7
+#define mmHUBPREQ1_DCSURF_FLIP_CONTROL_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_FLIP_CONTROL2 0x06f8
+#define mmHUBPREQ1_DCSURF_FLIP_CONTROL2_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT 0x06fc
+#define mmHUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_SURFACE_INUSE 0x06fd
+#define mmHUBPREQ1_DCSURF_SURFACE_INUSE_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_SURFACE_INUSE_HIGH 0x06fe
+#define mmHUBPREQ1_DCSURF_SURFACE_INUSE_HIGH_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_SURFACE_INUSE_C 0x06ff
+#define mmHUBPREQ1_DCSURF_SURFACE_INUSE_C_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_SURFACE_INUSE_HIGH_C 0x0700
+#define mmHUBPREQ1_DCSURF_SURFACE_INUSE_HIGH_C_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE 0x0701
+#define mmHUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH 0x0702
+#define mmHUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_C 0x0703
+#define mmHUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_C_BASE_IDX 2
+#define mmHUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C 0x0704
+#define mmHUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C_BASE_IDX 2
+#define mmHUBPREQ1_DCN_EXPANSION_MODE 0x0708
+#define mmHUBPREQ1_DCN_EXPANSION_MODE_BASE_IDX 2
+#define mmHUBPREQ1_DCN_TTU_QOS_WM 0x0709
+#define mmHUBPREQ1_DCN_TTU_QOS_WM_BASE_IDX 2
+#define mmHUBPREQ1_DCN_GLOBAL_TTU_CNTL 0x070a
+#define mmHUBPREQ1_DCN_GLOBAL_TTU_CNTL_BASE_IDX 2
+#define mmHUBPREQ1_DCN_SURF0_TTU_CNTL0 0x070b
+#define mmHUBPREQ1_DCN_SURF0_TTU_CNTL0_BASE_IDX 2
+#define mmHUBPREQ1_DCN_SURF0_TTU_CNTL1 0x070c
+#define mmHUBPREQ1_DCN_SURF0_TTU_CNTL1_BASE_IDX 2
+#define mmHUBPREQ1_DCN_SURF1_TTU_CNTL0 0x070d
+#define mmHUBPREQ1_DCN_SURF1_TTU_CNTL0_BASE_IDX 2
+#define mmHUBPREQ1_DCN_SURF1_TTU_CNTL1 0x070e
+#define mmHUBPREQ1_DCN_SURF1_TTU_CNTL1_BASE_IDX 2
+#define mmHUBPREQ1_DCN_CUR0_TTU_CNTL0 0x070f
+#define mmHUBPREQ1_DCN_CUR0_TTU_CNTL0_BASE_IDX 2
+#define mmHUBPREQ1_DCN_CUR0_TTU_CNTL1 0x0710
+#define mmHUBPREQ1_DCN_CUR0_TTU_CNTL1_BASE_IDX 2
+#define mmHUBPREQ1_DCN_CUR1_TTU_CNTL0 0x0711
+#define mmHUBPREQ1_DCN_CUR1_TTU_CNTL0_BASE_IDX 2
+#define mmHUBPREQ1_DCN_CUR1_TTU_CNTL1 0x0712
+#define mmHUBPREQ1_DCN_CUR1_TTU_CNTL1_BASE_IDX 2
+#define mmHUBPREQ1_BLANK_OFFSET_0 0x0722
+#define mmHUBPREQ1_BLANK_OFFSET_0_BASE_IDX 2
+#define mmHUBPREQ1_BLANK_OFFSET_1 0x0723
+#define mmHUBPREQ1_BLANK_OFFSET_1_BASE_IDX 2
+#define mmHUBPREQ1_DST_DIMENSIONS 0x0724
+#define mmHUBPREQ1_DST_DIMENSIONS_BASE_IDX 2
+#define mmHUBPREQ1_DST_AFTER_SCALER 0x0725
+#define mmHUBPREQ1_DST_AFTER_SCALER_BASE_IDX 2
+#define mmHUBPREQ1_PREFETCH_SETTINGS 0x0726
+#define mmHUBPREQ1_PREFETCH_SETTINGS_BASE_IDX 2
+#define mmHUBPREQ1_PREFETCH_SETTINGS_C 0x0727
+#define mmHUBPREQ1_PREFETCH_SETTINGS_C_BASE_IDX 2
+#define mmHUBPREQ1_VBLANK_PARAMETERS_0 0x0728
+#define mmHUBPREQ1_VBLANK_PARAMETERS_0_BASE_IDX 2
+#define mmHUBPREQ1_VBLANK_PARAMETERS_1 0x0729
+#define mmHUBPREQ1_VBLANK_PARAMETERS_1_BASE_IDX 2
+#define mmHUBPREQ1_VBLANK_PARAMETERS_2 0x072a
+#define mmHUBPREQ1_VBLANK_PARAMETERS_2_BASE_IDX 2
+#define mmHUBPREQ1_VBLANK_PARAMETERS_3 0x072b
+#define mmHUBPREQ1_VBLANK_PARAMETERS_3_BASE_IDX 2
+#define mmHUBPREQ1_VBLANK_PARAMETERS_4 0x072c
+#define mmHUBPREQ1_VBLANK_PARAMETERS_4_BASE_IDX 2
+#define mmHUBPREQ1_FLIP_PARAMETERS_0 0x072d
+#define mmHUBPREQ1_FLIP_PARAMETERS_0_BASE_IDX 2
+#define mmHUBPREQ1_FLIP_PARAMETERS_2 0x072f
+#define mmHUBPREQ1_FLIP_PARAMETERS_2_BASE_IDX 2
+#define mmHUBPREQ1_NOM_PARAMETERS_4 0x0734
+#define mmHUBPREQ1_NOM_PARAMETERS_4_BASE_IDX 2
+#define mmHUBPREQ1_NOM_PARAMETERS_5 0x0735
+#define mmHUBPREQ1_NOM_PARAMETERS_5_BASE_IDX 2
+#define mmHUBPREQ1_NOM_PARAMETERS_6 0x0736
+#define mmHUBPREQ1_NOM_PARAMETERS_6_BASE_IDX 2
+#define mmHUBPREQ1_NOM_PARAMETERS_7 0x0737
+#define mmHUBPREQ1_NOM_PARAMETERS_7_BASE_IDX 2
+#define mmHUBPREQ1_PER_LINE_DELIVERY_PRE 0x0738
+#define mmHUBPREQ1_PER_LINE_DELIVERY_PRE_BASE_IDX 2
+#define mmHUBPREQ1_PER_LINE_DELIVERY 0x0739
+#define mmHUBPREQ1_PER_LINE_DELIVERY_BASE_IDX 2
+#define mmHUBPREQ1_CURSOR_SETTINGS 0x073a
+#define mmHUBPREQ1_CURSOR_SETTINGS_BASE_IDX 2
+#define mmHUBPREQ1_REF_FREQ_TO_PIX_FREQ 0x073b
+#define mmHUBPREQ1_REF_FREQ_TO_PIX_FREQ_BASE_IDX 2
+#define mmHUBPREQ1_DST_Y_DELTA_DRQ_LIMIT 0x073c
+#define mmHUBPREQ1_DST_Y_DELTA_DRQ_LIMIT_BASE_IDX 2
+#define mmHUBPREQ1_HUBPREQ_MEM_PWR_CTRL 0x073d
+#define mmHUBPREQ1_HUBPREQ_MEM_PWR_CTRL_BASE_IDX 2
+#define mmHUBPREQ1_HUBPREQ_MEM_PWR_STATUS 0x073e
+#define mmHUBPREQ1_HUBPREQ_MEM_PWR_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp1_dispdec_hubpret_dispdec
+// base address: 0x370
+#define mmHUBPRET1_HUBPRET_CONTROL 0x0746
+#define mmHUBPRET1_HUBPRET_CONTROL_BASE_IDX 2
+#define mmHUBPRET1_HUBPRET_MEM_PWR_CTRL 0x0747
+#define mmHUBPRET1_HUBPRET_MEM_PWR_CTRL_BASE_IDX 2
+#define mmHUBPRET1_HUBPRET_MEM_PWR_STATUS 0x0748
+#define mmHUBPRET1_HUBPRET_MEM_PWR_STATUS_BASE_IDX 2
+#define mmHUBPRET1_HUBPRET_READ_LINE_CTRL0 0x0749
+#define mmHUBPRET1_HUBPRET_READ_LINE_CTRL0_BASE_IDX 2
+#define mmHUBPRET1_HUBPRET_READ_LINE_CTRL1 0x074a
+#define mmHUBPRET1_HUBPRET_READ_LINE_CTRL1_BASE_IDX 2
+#define mmHUBPRET1_HUBPRET_READ_LINE0 0x074b
+#define mmHUBPRET1_HUBPRET_READ_LINE0_BASE_IDX 2
+#define mmHUBPRET1_HUBPRET_READ_LINE1 0x074c
+#define mmHUBPRET1_HUBPRET_READ_LINE1_BASE_IDX 2
+#define mmHUBPRET1_HUBPRET_INTERRUPT 0x074d
+#define mmHUBPRET1_HUBPRET_INTERRUPT_BASE_IDX 2
+#define mmHUBPRET1_HUBPRET_READ_LINE_VALUE 0x074e
+#define mmHUBPRET1_HUBPRET_READ_LINE_VALUE_BASE_IDX 2
+#define mmHUBPRET1_HUBPRET_READ_LINE_STATUS 0x074f
+#define mmHUBPRET1_HUBPRET_READ_LINE_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp1_dispdec_cursor0_dispdec
+// base address: 0x370
+#define mmCURSOR0_1_CURSOR_CONTROL 0x0754
+#define mmCURSOR0_1_CURSOR_CONTROL_BASE_IDX 2
+#define mmCURSOR0_1_CURSOR_SURFACE_ADDRESS 0x0755
+#define mmCURSOR0_1_CURSOR_SURFACE_ADDRESS_BASE_IDX 2
+#define mmCURSOR0_1_CURSOR_SURFACE_ADDRESS_HIGH 0x0756
+#define mmCURSOR0_1_CURSOR_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define mmCURSOR0_1_CURSOR_SIZE 0x0757
+#define mmCURSOR0_1_CURSOR_SIZE_BASE_IDX 2
+#define mmCURSOR0_1_CURSOR_POSITION 0x0758
+#define mmCURSOR0_1_CURSOR_POSITION_BASE_IDX 2
+#define mmCURSOR0_1_CURSOR_HOT_SPOT 0x0759
+#define mmCURSOR0_1_CURSOR_HOT_SPOT_BASE_IDX 2
+#define mmCURSOR0_1_CURSOR_STEREO_CONTROL 0x075a
+#define mmCURSOR0_1_CURSOR_STEREO_CONTROL_BASE_IDX 2
+#define mmCURSOR0_1_CURSOR_DST_OFFSET 0x075b
+#define mmCURSOR0_1_CURSOR_DST_OFFSET_BASE_IDX 2
+#define mmCURSOR0_1_CURSOR_MEM_PWR_CTRL 0x075c
+#define mmCURSOR0_1_CURSOR_MEM_PWR_CTRL_BASE_IDX 2
+#define mmCURSOR0_1_CURSOR_MEM_PWR_STATUS 0x075d
+#define mmCURSOR0_1_CURSOR_MEM_PWR_STATUS_BASE_IDX 2
+#define mmCURSOR0_1_DMDATA_ADDRESS_HIGH 0x075e
+#define mmCURSOR0_1_DMDATA_ADDRESS_HIGH_BASE_IDX 2
+#define mmCURSOR0_1_DMDATA_ADDRESS_LOW 0x075f
+#define mmCURSOR0_1_DMDATA_ADDRESS_LOW_BASE_IDX 2
+#define mmCURSOR0_1_DMDATA_CNTL 0x0760
+#define mmCURSOR0_1_DMDATA_CNTL_BASE_IDX 2
+#define mmCURSOR0_1_DMDATA_QOS_CNTL 0x0761
+#define mmCURSOR0_1_DMDATA_QOS_CNTL_BASE_IDX 2
+#define mmCURSOR0_1_DMDATA_STATUS 0x0762
+#define mmCURSOR0_1_DMDATA_STATUS_BASE_IDX 2
+#define mmCURSOR0_1_DMDATA_SW_CNTL 0x0763
+#define mmCURSOR0_1_DMDATA_SW_CNTL_BASE_IDX 2
+#define mmCURSOR0_1_DMDATA_SW_DATA 0x0764
+#define mmCURSOR0_1_DMDATA_SW_DATA_BASE_IDX 2
+
+
+
+// addressBlock: dce_dc_dcbubp2_dispdec_hubp_dispdec
+// base address: 0x6e0
+#define mmHUBP2_DCSURF_SURFACE_CONFIG 0x079d
+#define mmHUBP2_DCSURF_SURFACE_CONFIG_BASE_IDX 2
+#define mmHUBP2_DCSURF_ADDR_CONFIG 0x079e
+#define mmHUBP2_DCSURF_ADDR_CONFIG_BASE_IDX 2
+#define mmHUBP2_DCSURF_TILING_CONFIG 0x079f
+#define mmHUBP2_DCSURF_TILING_CONFIG_BASE_IDX 2
+#define mmHUBP2_DCSURF_PRI_VIEWPORT_START 0x07a1
+#define mmHUBP2_DCSURF_PRI_VIEWPORT_START_BASE_IDX 2
+#define mmHUBP2_DCSURF_PRI_VIEWPORT_DIMENSION 0x07a2
+#define mmHUBP2_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2
+#define mmHUBP2_DCSURF_PRI_VIEWPORT_START_C 0x07a3
+#define mmHUBP2_DCSURF_PRI_VIEWPORT_START_C_BASE_IDX 2
+#define mmHUBP2_DCSURF_PRI_VIEWPORT_DIMENSION_C 0x07a4
+#define mmHUBP2_DCSURF_PRI_VIEWPORT_DIMENSION_C_BASE_IDX 2
+#define mmHUBP2_DCSURF_SEC_VIEWPORT_START 0x07a5
+#define mmHUBP2_DCSURF_SEC_VIEWPORT_START_BASE_IDX 2
+#define mmHUBP2_DCSURF_SEC_VIEWPORT_DIMENSION 0x07a6
+#define mmHUBP2_DCSURF_SEC_VIEWPORT_DIMENSION_BASE_IDX 2
+#define mmHUBP2_DCSURF_SEC_VIEWPORT_START_C 0x07a7
+#define mmHUBP2_DCSURF_SEC_VIEWPORT_START_C_BASE_IDX 2
+#define mmHUBP2_DCSURF_SEC_VIEWPORT_DIMENSION_C 0x07a8
+#define mmHUBP2_DCSURF_SEC_VIEWPORT_DIMENSION_C_BASE_IDX 2
+#define mmHUBP2_DCHUBP_REQ_SIZE_CONFIG 0x07a9
+#define mmHUBP2_DCHUBP_REQ_SIZE_CONFIG_BASE_IDX 2
+#define mmHUBP2_DCHUBP_REQ_SIZE_CONFIG_C 0x07aa
+#define mmHUBP2_DCHUBP_REQ_SIZE_CONFIG_C_BASE_IDX 2
+#define mmHUBP2_DCHUBP_CNTL 0x07ab
+#define mmHUBP2_DCHUBP_CNTL_BASE_IDX 2
+#define mmHUBP2_HUBP_CLK_CNTL 0x07ac
+#define mmHUBP2_HUBP_CLK_CNTL_BASE_IDX 2
+#define mmHUBP2_HUBPREQ_DEBUG_DB 0x07ae
+#define mmHUBP2_HUBPREQ_DEBUG_DB_BASE_IDX 2
+#define mmHUBP2_HUBPREQ_DEBUG 0x07af
+#define mmHUBP2_HUBPREQ_DEBUG_BASE_IDX 2
+#define mmHUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK 0x07b3
+#define mmHUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK_BASE_IDX 2
+#define mmHUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK 0x07b4
+#define mmHUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp2_dispdec_hubpreq_dispdec
+// base address: 0x6e0
+#define mmHUBPREQ2_DCSURF_SURFACE_PITCH 0x07bf
+#define mmHUBPREQ2_DCSURF_SURFACE_PITCH_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_SURFACE_PITCH_C 0x07c0
+#define mmHUBPREQ2_DCSURF_SURFACE_PITCH_C_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS 0x07c2
+#define mmHUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH 0x07c3
+#define mmHUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_C 0x07c4
+#define mmHUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_C_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C 0x07c5
+#define mmHUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS 0x07c6
+#define mmHUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH 0x07c7
+#define mmHUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_C 0x07c8
+#define mmHUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_C_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C 0x07c9
+#define mmHUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS 0x07ca
+#define mmHUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH 0x07cb
+#define mmHUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C 0x07cc
+#define mmHUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C 0x07cd
+#define mmHUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS 0x07ce
+#define mmHUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH 0x07cf
+#define mmHUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C 0x07d0
+#define mmHUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C 0x07d1
+#define mmHUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_SURFACE_CONTROL 0x07d2
+#define mmHUBPREQ2_DCSURF_SURFACE_CONTROL_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_FLIP_CONTROL 0x07d3
+#define mmHUBPREQ2_DCSURF_FLIP_CONTROL_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_FLIP_CONTROL2 0x07d4
+#define mmHUBPREQ2_DCSURF_FLIP_CONTROL2_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT 0x07d8
+#define mmHUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_SURFACE_INUSE 0x07d9
+#define mmHUBPREQ2_DCSURF_SURFACE_INUSE_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_SURFACE_INUSE_HIGH 0x07da
+#define mmHUBPREQ2_DCSURF_SURFACE_INUSE_HIGH_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_SURFACE_INUSE_C 0x07db
+#define mmHUBPREQ2_DCSURF_SURFACE_INUSE_C_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_SURFACE_INUSE_HIGH_C 0x07dc
+#define mmHUBPREQ2_DCSURF_SURFACE_INUSE_HIGH_C_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE 0x07dd
+#define mmHUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH 0x07de
+#define mmHUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_C 0x07df
+#define mmHUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_C_BASE_IDX 2
+#define mmHUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C 0x07e0
+#define mmHUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C_BASE_IDX 2
+#define mmHUBPREQ2_DCN_EXPANSION_MODE 0x07e4
+#define mmHUBPREQ2_DCN_EXPANSION_MODE_BASE_IDX 2
+#define mmHUBPREQ2_DCN_TTU_QOS_WM 0x07e5
+#define mmHUBPREQ2_DCN_TTU_QOS_WM_BASE_IDX 2
+#define mmHUBPREQ2_DCN_GLOBAL_TTU_CNTL 0x07e6
+#define mmHUBPREQ2_DCN_GLOBAL_TTU_CNTL_BASE_IDX 2
+#define mmHUBPREQ2_DCN_SURF0_TTU_CNTL0 0x07e7
+#define mmHUBPREQ2_DCN_SURF0_TTU_CNTL0_BASE_IDX 2
+#define mmHUBPREQ2_DCN_SURF0_TTU_CNTL1 0x07e8
+#define mmHUBPREQ2_DCN_SURF0_TTU_CNTL1_BASE_IDX 2
+#define mmHUBPREQ2_DCN_SURF1_TTU_CNTL0 0x07e9
+#define mmHUBPREQ2_DCN_SURF1_TTU_CNTL0_BASE_IDX 2
+#define mmHUBPREQ2_DCN_SURF1_TTU_CNTL1 0x07ea
+#define mmHUBPREQ2_DCN_SURF1_TTU_CNTL1_BASE_IDX 2
+#define mmHUBPREQ2_DCN_CUR0_TTU_CNTL0 0x07eb
+#define mmHUBPREQ2_DCN_CUR0_TTU_CNTL0_BASE_IDX 2
+#define mmHUBPREQ2_DCN_CUR0_TTU_CNTL1 0x07ec
+#define mmHUBPREQ2_DCN_CUR0_TTU_CNTL1_BASE_IDX 2
+#define mmHUBPREQ2_DCN_CUR1_TTU_CNTL0 0x07ed
+#define mmHUBPREQ2_DCN_CUR1_TTU_CNTL0_BASE_IDX 2
+#define mmHUBPREQ2_DCN_CUR1_TTU_CNTL1 0x07ee
+#define mmHUBPREQ2_DCN_CUR1_TTU_CNTL1_BASE_IDX 2
+#define mmHUBPREQ2_BLANK_OFFSET_0 0x07fe
+#define mmHUBPREQ2_BLANK_OFFSET_0_BASE_IDX 2
+#define mmHUBPREQ2_BLANK_OFFSET_1 0x07ff
+#define mmHUBPREQ2_BLANK_OFFSET_1_BASE_IDX 2
+#define mmHUBPREQ2_DST_DIMENSIONS 0x0800
+#define mmHUBPREQ2_DST_DIMENSIONS_BASE_IDX 2
+#define mmHUBPREQ2_DST_AFTER_SCALER 0x0801
+#define mmHUBPREQ2_DST_AFTER_SCALER_BASE_IDX 2
+#define mmHUBPREQ2_PREFETCH_SETTINGS 0x0802
+#define mmHUBPREQ2_PREFETCH_SETTINGS_BASE_IDX 2
+#define mmHUBPREQ2_PREFETCH_SETTINGS_C 0x0803
+#define mmHUBPREQ2_PREFETCH_SETTINGS_C_BASE_IDX 2
+#define mmHUBPREQ2_VBLANK_PARAMETERS_0 0x0804
+#define mmHUBPREQ2_VBLANK_PARAMETERS_0_BASE_IDX 2
+#define mmHUBPREQ2_VBLANK_PARAMETERS_1 0x0805
+#define mmHUBPREQ2_VBLANK_PARAMETERS_1_BASE_IDX 2
+#define mmHUBPREQ2_VBLANK_PARAMETERS_2 0x0806
+#define mmHUBPREQ2_VBLANK_PARAMETERS_2_BASE_IDX 2
+#define mmHUBPREQ2_VBLANK_PARAMETERS_3 0x0807
+#define mmHUBPREQ2_VBLANK_PARAMETERS_3_BASE_IDX 2
+#define mmHUBPREQ2_VBLANK_PARAMETERS_4 0x0808
+#define mmHUBPREQ2_VBLANK_PARAMETERS_4_BASE_IDX 2
+#define mmHUBPREQ2_FLIP_PARAMETERS_0 0x0809
+#define mmHUBPREQ2_FLIP_PARAMETERS_0_BASE_IDX 2
+#define mmHUBPREQ2_FLIP_PARAMETERS_2 0x080b
+#define mmHUBPREQ2_FLIP_PARAMETERS_2_BASE_IDX 2
+#define mmHUBPREQ2_NOM_PARAMETERS_4 0x0810
+#define mmHUBPREQ2_NOM_PARAMETERS_4_BASE_IDX 2
+#define mmHUBPREQ2_NOM_PARAMETERS_5 0x0811
+#define mmHUBPREQ2_NOM_PARAMETERS_5_BASE_IDX 2
+#define mmHUBPREQ2_NOM_PARAMETERS_6 0x0812
+#define mmHUBPREQ2_NOM_PARAMETERS_6_BASE_IDX 2
+#define mmHUBPREQ2_NOM_PARAMETERS_7 0x0813
+#define mmHUBPREQ2_NOM_PARAMETERS_7_BASE_IDX 2
+#define mmHUBPREQ2_PER_LINE_DELIVERY_PRE 0x0814
+#define mmHUBPREQ2_PER_LINE_DELIVERY_PRE_BASE_IDX 2
+#define mmHUBPREQ2_PER_LINE_DELIVERY 0x0815
+#define mmHUBPREQ2_PER_LINE_DELIVERY_BASE_IDX 2
+#define mmHUBPREQ2_CURSOR_SETTINGS 0x0816
+#define mmHUBPREQ2_CURSOR_SETTINGS_BASE_IDX 2
+#define mmHUBPREQ2_REF_FREQ_TO_PIX_FREQ 0x0817
+#define mmHUBPREQ2_REF_FREQ_TO_PIX_FREQ_BASE_IDX 2
+#define mmHUBPREQ2_DST_Y_DELTA_DRQ_LIMIT 0x0818
+#define mmHUBPREQ2_DST_Y_DELTA_DRQ_LIMIT_BASE_IDX 2
+#define mmHUBPREQ2_HUBPREQ_MEM_PWR_CTRL 0x0819
+#define mmHUBPREQ2_HUBPREQ_MEM_PWR_CTRL_BASE_IDX 2
+#define mmHUBPREQ2_HUBPREQ_MEM_PWR_STATUS 0x081a
+#define mmHUBPREQ2_HUBPREQ_MEM_PWR_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp2_dispdec_hubpret_dispdec
+// base address: 0x6e0
+#define mmHUBPRET2_HUBPRET_CONTROL 0x0822
+#define mmHUBPRET2_HUBPRET_CONTROL_BASE_IDX 2
+#define mmHUBPRET2_HUBPRET_MEM_PWR_CTRL 0x0823
+#define mmHUBPRET2_HUBPRET_MEM_PWR_CTRL_BASE_IDX 2
+#define mmHUBPRET2_HUBPRET_MEM_PWR_STATUS 0x0824
+#define mmHUBPRET2_HUBPRET_MEM_PWR_STATUS_BASE_IDX 2
+#define mmHUBPRET2_HUBPRET_READ_LINE_CTRL0 0x0825
+#define mmHUBPRET2_HUBPRET_READ_LINE_CTRL0_BASE_IDX 2
+#define mmHUBPRET2_HUBPRET_READ_LINE_CTRL1 0x0826
+#define mmHUBPRET2_HUBPRET_READ_LINE_CTRL1_BASE_IDX 2
+#define mmHUBPRET2_HUBPRET_READ_LINE0 0x0827
+#define mmHUBPRET2_HUBPRET_READ_LINE0_BASE_IDX 2
+#define mmHUBPRET2_HUBPRET_READ_LINE1 0x0828
+#define mmHUBPRET2_HUBPRET_READ_LINE1_BASE_IDX 2
+#define mmHUBPRET2_HUBPRET_INTERRUPT 0x0829
+#define mmHUBPRET2_HUBPRET_INTERRUPT_BASE_IDX 2
+#define mmHUBPRET2_HUBPRET_READ_LINE_VALUE 0x082a
+#define mmHUBPRET2_HUBPRET_READ_LINE_VALUE_BASE_IDX 2
+#define mmHUBPRET2_HUBPRET_READ_LINE_STATUS 0x082b
+#define mmHUBPRET2_HUBPRET_READ_LINE_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp2_dispdec_cursor0_dispdec
+// base address: 0x6e0
+#define mmCURSOR0_2_CURSOR_CONTROL 0x0830
+#define mmCURSOR0_2_CURSOR_CONTROL_BASE_IDX 2
+#define mmCURSOR0_2_CURSOR_SURFACE_ADDRESS 0x0831
+#define mmCURSOR0_2_CURSOR_SURFACE_ADDRESS_BASE_IDX 2
+#define mmCURSOR0_2_CURSOR_SURFACE_ADDRESS_HIGH 0x0832
+#define mmCURSOR0_2_CURSOR_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define mmCURSOR0_2_CURSOR_SIZE 0x0833
+#define mmCURSOR0_2_CURSOR_SIZE_BASE_IDX 2
+#define mmCURSOR0_2_CURSOR_POSITION 0x0834
+#define mmCURSOR0_2_CURSOR_POSITION_BASE_IDX 2
+#define mmCURSOR0_2_CURSOR_HOT_SPOT 0x0835
+#define mmCURSOR0_2_CURSOR_HOT_SPOT_BASE_IDX 2
+#define mmCURSOR0_2_CURSOR_STEREO_CONTROL 0x0836
+#define mmCURSOR0_2_CURSOR_STEREO_CONTROL_BASE_IDX 2
+#define mmCURSOR0_2_CURSOR_DST_OFFSET 0x0837
+#define mmCURSOR0_2_CURSOR_DST_OFFSET_BASE_IDX 2
+#define mmCURSOR0_2_CURSOR_MEM_PWR_CTRL 0x0838
+#define mmCURSOR0_2_CURSOR_MEM_PWR_CTRL_BASE_IDX 2
+#define mmCURSOR0_2_CURSOR_MEM_PWR_STATUS 0x0839
+#define mmCURSOR0_2_CURSOR_MEM_PWR_STATUS_BASE_IDX 2
+#define mmCURSOR0_2_DMDATA_ADDRESS_HIGH 0x083a
+#define mmCURSOR0_2_DMDATA_ADDRESS_HIGH_BASE_IDX 2
+#define mmCURSOR0_2_DMDATA_ADDRESS_LOW 0x083b
+#define mmCURSOR0_2_DMDATA_ADDRESS_LOW_BASE_IDX 2
+#define mmCURSOR0_2_DMDATA_CNTL 0x083c
+#define mmCURSOR0_2_DMDATA_CNTL_BASE_IDX 2
+#define mmCURSOR0_2_DMDATA_QOS_CNTL 0x083d
+#define mmCURSOR0_2_DMDATA_QOS_CNTL_BASE_IDX 2
+#define mmCURSOR0_2_DMDATA_STATUS 0x083e
+#define mmCURSOR0_2_DMDATA_STATUS_BASE_IDX 2
+#define mmCURSOR0_2_DMDATA_SW_CNTL 0x083f
+#define mmCURSOR0_2_DMDATA_SW_CNTL_BASE_IDX 2
+#define mmCURSOR0_2_DMDATA_SW_DATA 0x0840
+#define mmCURSOR0_2_DMDATA_SW_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp3_dispdec_hubp_dispdec
+// base address: 0xa50
+#define mmHUBP3_DCSURF_SURFACE_CONFIG 0x0879
+#define mmHUBP3_DCSURF_SURFACE_CONFIG_BASE_IDX 2
+#define mmHUBP3_DCSURF_ADDR_CONFIG 0x087a
+#define mmHUBP3_DCSURF_ADDR_CONFIG_BASE_IDX 2
+#define mmHUBP3_DCSURF_TILING_CONFIG 0x087b
+#define mmHUBP3_DCSURF_TILING_CONFIG_BASE_IDX 2
+#define mmHUBP3_DCSURF_PRI_VIEWPORT_START 0x087d
+#define mmHUBP3_DCSURF_PRI_VIEWPORT_START_BASE_IDX 2
+#define mmHUBP3_DCSURF_PRI_VIEWPORT_DIMENSION 0x087e
+#define mmHUBP3_DCSURF_PRI_VIEWPORT_DIMENSION_BASE_IDX 2
+#define mmHUBP3_DCSURF_PRI_VIEWPORT_START_C 0x087f
+#define mmHUBP3_DCSURF_PRI_VIEWPORT_START_C_BASE_IDX 2
+#define mmHUBP3_DCSURF_PRI_VIEWPORT_DIMENSION_C 0x0880
+#define mmHUBP3_DCSURF_PRI_VIEWPORT_DIMENSION_C_BASE_IDX 2
+#define mmHUBP3_DCSURF_SEC_VIEWPORT_START 0x0881
+#define mmHUBP3_DCSURF_SEC_VIEWPORT_START_BASE_IDX 2
+#define mmHUBP3_DCSURF_SEC_VIEWPORT_DIMENSION 0x0882
+#define mmHUBP3_DCSURF_SEC_VIEWPORT_DIMENSION_BASE_IDX 2
+#define mmHUBP3_DCSURF_SEC_VIEWPORT_START_C 0x0883
+#define mmHUBP3_DCSURF_SEC_VIEWPORT_START_C_BASE_IDX 2
+#define mmHUBP3_DCSURF_SEC_VIEWPORT_DIMENSION_C 0x0884
+#define mmHUBP3_DCSURF_SEC_VIEWPORT_DIMENSION_C_BASE_IDX 2
+#define mmHUBP3_DCHUBP_REQ_SIZE_CONFIG 0x0885
+#define mmHUBP3_DCHUBP_REQ_SIZE_CONFIG_BASE_IDX 2
+#define mmHUBP3_DCHUBP_REQ_SIZE_CONFIG_C 0x0886
+#define mmHUBP3_DCHUBP_REQ_SIZE_CONFIG_C_BASE_IDX 2
+#define mmHUBP3_DCHUBP_CNTL 0x0887
+#define mmHUBP3_DCHUBP_CNTL_BASE_IDX 2
+#define mmHUBP3_HUBP_CLK_CNTL 0x0888
+#define mmHUBP3_HUBP_CLK_CNTL_BASE_IDX 2
+#define mmHUBP3_HUBPREQ_DEBUG_DB 0x088a
+#define mmHUBP3_HUBPREQ_DEBUG_DB_BASE_IDX 2
+#define mmHUBP3_HUBPREQ_DEBUG 0x088b
+#define mmHUBP3_HUBPREQ_DEBUG_BASE_IDX 2
+#define mmHUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK 0x088f
+#define mmHUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK_BASE_IDX 2
+#define mmHUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK 0x0890
+#define mmHUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp3_dispdec_hubpreq_dispdec
+// base address: 0xa50
+#define mmHUBPREQ3_DCSURF_SURFACE_PITCH 0x089b
+#define mmHUBPREQ3_DCSURF_SURFACE_PITCH_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_SURFACE_PITCH_C 0x089c
+#define mmHUBPREQ3_DCSURF_SURFACE_PITCH_C_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS 0x089e
+#define mmHUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH 0x089f
+#define mmHUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_C 0x08a0
+#define mmHUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_C_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C 0x08a1
+#define mmHUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS 0x08a2
+#define mmHUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH 0x08a3
+#define mmHUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_C 0x08a4
+#define mmHUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_C_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C 0x08a5
+#define mmHUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS 0x08a6
+#define mmHUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH 0x08a7
+#define mmHUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C 0x08a8
+#define mmHUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C 0x08a9
+#define mmHUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS 0x08aa
+#define mmHUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH 0x08ab
+#define mmHUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C 0x08ac
+#define mmHUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C 0x08ad
+#define mmHUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_SURFACE_CONTROL 0x08ae
+#define mmHUBPREQ3_DCSURF_SURFACE_CONTROL_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_FLIP_CONTROL 0x08af
+#define mmHUBPREQ3_DCSURF_FLIP_CONTROL_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_FLIP_CONTROL2 0x08b0
+#define mmHUBPREQ3_DCSURF_FLIP_CONTROL2_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT 0x08b4
+#define mmHUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_SURFACE_INUSE 0x08b5
+#define mmHUBPREQ3_DCSURF_SURFACE_INUSE_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_SURFACE_INUSE_HIGH 0x08b6
+#define mmHUBPREQ3_DCSURF_SURFACE_INUSE_HIGH_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_SURFACE_INUSE_C 0x08b7
+#define mmHUBPREQ3_DCSURF_SURFACE_INUSE_C_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_SURFACE_INUSE_HIGH_C 0x08b8
+#define mmHUBPREQ3_DCSURF_SURFACE_INUSE_HIGH_C_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE 0x08b9
+#define mmHUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH 0x08ba
+#define mmHUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_C 0x08bb
+#define mmHUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_C_BASE_IDX 2
+#define mmHUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C 0x08bc
+#define mmHUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C_BASE_IDX 2
+#define mmHUBPREQ3_DCN_EXPANSION_MODE 0x08c0
+#define mmHUBPREQ3_DCN_EXPANSION_MODE_BASE_IDX 2
+#define mmHUBPREQ3_DCN_TTU_QOS_WM 0x08c1
+#define mmHUBPREQ3_DCN_TTU_QOS_WM_BASE_IDX 2
+#define mmHUBPREQ3_DCN_GLOBAL_TTU_CNTL 0x08c2
+#define mmHUBPREQ3_DCN_GLOBAL_TTU_CNTL_BASE_IDX 2
+#define mmHUBPREQ3_DCN_SURF0_TTU_CNTL0 0x08c3
+#define mmHUBPREQ3_DCN_SURF0_TTU_CNTL0_BASE_IDX 2
+#define mmHUBPREQ3_DCN_SURF0_TTU_CNTL1 0x08c4
+#define mmHUBPREQ3_DCN_SURF0_TTU_CNTL1_BASE_IDX 2
+#define mmHUBPREQ3_DCN_SURF1_TTU_CNTL0 0x08c5
+#define mmHUBPREQ3_DCN_SURF1_TTU_CNTL0_BASE_IDX 2
+#define mmHUBPREQ3_DCN_SURF1_TTU_CNTL1 0x08c6
+#define mmHUBPREQ3_DCN_SURF1_TTU_CNTL1_BASE_IDX 2
+#define mmHUBPREQ3_DCN_CUR0_TTU_CNTL0 0x08c7
+#define mmHUBPREQ3_DCN_CUR0_TTU_CNTL0_BASE_IDX 2
+#define mmHUBPREQ3_DCN_CUR0_TTU_CNTL1 0x08c8
+#define mmHUBPREQ3_DCN_CUR0_TTU_CNTL1_BASE_IDX 2
+#define mmHUBPREQ3_BLANK_OFFSET_0 0x08da
+#define mmHUBPREQ3_BLANK_OFFSET_0_BASE_IDX 2
+#define mmHUBPREQ3_BLANK_OFFSET_1 0x08db
+#define mmHUBPREQ3_BLANK_OFFSET_1_BASE_IDX 2
+#define mmHUBPREQ3_DST_DIMENSIONS 0x08dc
+#define mmHUBPREQ3_DST_DIMENSIONS_BASE_IDX 2
+#define mmHUBPREQ3_DST_AFTER_SCALER 0x08dd
+#define mmHUBPREQ3_DST_AFTER_SCALER_BASE_IDX 2
+#define mmHUBPREQ3_PREFETCH_SETTINGS 0x08de
+#define mmHUBPREQ3_PREFETCH_SETTINGS_BASE_IDX 2
+#define mmHUBPREQ3_PREFETCH_SETTINGS_C 0x08df
+#define mmHUBPREQ3_PREFETCH_SETTINGS_C_BASE_IDX 2
+#define mmHUBPREQ3_VBLANK_PARAMETERS_0 0x08e0
+#define mmHUBPREQ3_VBLANK_PARAMETERS_0_BASE_IDX 2
+#define mmHUBPREQ3_VBLANK_PARAMETERS_1 0x08e1
+#define mmHUBPREQ3_VBLANK_PARAMETERS_1_BASE_IDX 2
+#define mmHUBPREQ3_VBLANK_PARAMETERS_2 0x08e2
+#define mmHUBPREQ3_VBLANK_PARAMETERS_2_BASE_IDX 2
+#define mmHUBPREQ3_VBLANK_PARAMETERS_3 0x08e3
+#define mmHUBPREQ3_VBLANK_PARAMETERS_3_BASE_IDX 2
+#define mmHUBPREQ3_VBLANK_PARAMETERS_4 0x08e4
+#define mmHUBPREQ3_VBLANK_PARAMETERS_4_BASE_IDX 2
+#define mmHUBPREQ3_FLIP_PARAMETERS_0 0x08e5
+#define mmHUBPREQ3_FLIP_PARAMETERS_0_BASE_IDX 2
+#define mmHUBPREQ3_FLIP_PARAMETERS_2 0x08e7
+#define mmHUBPREQ3_FLIP_PARAMETERS_2_BASE_IDX 2
+#define mmHUBPREQ3_NOM_PARAMETERS_4 0x08ec
+#define mmHUBPREQ3_NOM_PARAMETERS_4_BASE_IDX 2
+#define mmHUBPREQ3_NOM_PARAMETERS_5 0x08ed
+#define mmHUBPREQ3_NOM_PARAMETERS_5_BASE_IDX 2
+#define mmHUBPREQ3_NOM_PARAMETERS_6 0x08ee
+#define mmHUBPREQ3_NOM_PARAMETERS_6_BASE_IDX 2
+#define mmHUBPREQ3_NOM_PARAMETERS_7 0x08ef
+#define mmHUBPREQ3_NOM_PARAMETERS_7_BASE_IDX 2
+#define mmHUBPREQ3_PER_LINE_DELIVERY_PRE 0x08f0
+#define mmHUBPREQ3_PER_LINE_DELIVERY_PRE_BASE_IDX 2
+#define mmHUBPREQ3_PER_LINE_DELIVERY 0x08f1
+#define mmHUBPREQ3_PER_LINE_DELIVERY_BASE_IDX 2
+#define mmHUBPREQ3_CURSOR_SETTINGS 0x08f2
+#define mmHUBPREQ3_CURSOR_SETTINGS_BASE_IDX 2
+#define mmHUBPREQ3_REF_FREQ_TO_PIX_FREQ 0x08f3
+#define mmHUBPREQ3_REF_FREQ_TO_PIX_FREQ_BASE_IDX 2
+#define mmHUBPREQ3_DST_Y_DELTA_DRQ_LIMIT 0x08f4
+#define mmHUBPREQ3_DST_Y_DELTA_DRQ_LIMIT_BASE_IDX 2
+#define mmHUBPREQ3_HUBPREQ_MEM_PWR_CTRL 0x08f5
+#define mmHUBPREQ3_HUBPREQ_MEM_PWR_CTRL_BASE_IDX 2
+#define mmHUBPREQ3_HUBPREQ_MEM_PWR_STATUS 0x08f6
+#define mmHUBPREQ3_HUBPREQ_MEM_PWR_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp3_dispdec_hubpret_dispdec
+// base address: 0xa50
+#define mmHUBPRET3_HUBPRET_CONTROL 0x08fe
+#define mmHUBPRET3_HUBPRET_CONTROL_BASE_IDX 2
+#define mmHUBPRET3_HUBPRET_MEM_PWR_CTRL 0x08ff
+#define mmHUBPRET3_HUBPRET_MEM_PWR_CTRL_BASE_IDX 2
+#define mmHUBPRET3_HUBPRET_MEM_PWR_STATUS 0x0900
+#define mmHUBPRET3_HUBPRET_MEM_PWR_STATUS_BASE_IDX 2
+#define mmHUBPRET3_HUBPRET_READ_LINE_CTRL0 0x0901
+#define mmHUBPRET3_HUBPRET_READ_LINE_CTRL0_BASE_IDX 2
+#define mmHUBPRET3_HUBPRET_READ_LINE_CTRL1 0x0902
+#define mmHUBPRET3_HUBPRET_READ_LINE_CTRL1_BASE_IDX 2
+#define mmHUBPRET3_HUBPRET_READ_LINE0 0x0903
+#define mmHUBPRET3_HUBPRET_READ_LINE0_BASE_IDX 2
+#define mmHUBPRET3_HUBPRET_READ_LINE1 0x0904
+#define mmHUBPRET3_HUBPRET_READ_LINE1_BASE_IDX 2
+#define mmHUBPRET3_HUBPRET_INTERRUPT 0x0905
+#define mmHUBPRET3_HUBPRET_INTERRUPT_BASE_IDX 2
+#define mmHUBPRET3_HUBPRET_READ_LINE_VALUE 0x0906
+#define mmHUBPRET3_HUBPRET_READ_LINE_VALUE_BASE_IDX 2
+#define mmHUBPRET3_HUBPRET_READ_LINE_STATUS 0x0907
+#define mmHUBPRET3_HUBPRET_READ_LINE_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcbubp3_dispdec_cursor0_dispdec
+// base address: 0xa50
+#define mmCURSOR0_3_CURSOR_CONTROL 0x090c
+#define mmCURSOR0_3_CURSOR_CONTROL_BASE_IDX 2
+#define mmCURSOR0_3_CURSOR_SURFACE_ADDRESS 0x090d
+#define mmCURSOR0_3_CURSOR_SURFACE_ADDRESS_BASE_IDX 2
+#define mmCURSOR0_3_CURSOR_SURFACE_ADDRESS_HIGH 0x090e
+#define mmCURSOR0_3_CURSOR_SURFACE_ADDRESS_HIGH_BASE_IDX 2
+#define mmCURSOR0_3_CURSOR_SIZE 0x090f
+#define mmCURSOR0_3_CURSOR_SIZE_BASE_IDX 2
+#define mmCURSOR0_3_CURSOR_POSITION 0x0910
+#define mmCURSOR0_3_CURSOR_POSITION_BASE_IDX 2
+#define mmCURSOR0_3_CURSOR_HOT_SPOT 0x0911
+#define mmCURSOR0_3_CURSOR_HOT_SPOT_BASE_IDX 2
+#define mmCURSOR0_3_CURSOR_STEREO_CONTROL 0x0912
+#define mmCURSOR0_3_CURSOR_STEREO_CONTROL_BASE_IDX 2
+#define mmCURSOR0_3_CURSOR_DST_OFFSET 0x0913
+#define mmCURSOR0_3_CURSOR_DST_OFFSET_BASE_IDX 2
+#define mmCURSOR0_3_CURSOR_MEM_PWR_CTRL 0x0914
+#define mmCURSOR0_3_CURSOR_MEM_PWR_CTRL_BASE_IDX 2
+#define mmCURSOR0_3_CURSOR_MEM_PWR_STATUS 0x0915
+#define mmCURSOR0_3_CURSOR_MEM_PWR_STATUS_BASE_IDX 2
+#define mmCURSOR0_3_DMDATA_ADDRESS_HIGH 0x0916
+#define mmCURSOR0_3_DMDATA_ADDRESS_HIGH_BASE_IDX 2
+#define mmCURSOR0_3_DMDATA_ADDRESS_LOW 0x0917
+#define mmCURSOR0_3_DMDATA_ADDRESS_LOW_BASE_IDX 2
+#define mmCURSOR0_3_DMDATA_CNTL 0x0918
+#define mmCURSOR0_3_DMDATA_CNTL_BASE_IDX 2
+#define mmCURSOR0_3_DMDATA_QOS_CNTL 0x0919
+#define mmCURSOR0_3_DMDATA_QOS_CNTL_BASE_IDX 2
+#define mmCURSOR0_3_DMDATA_STATUS 0x091a
+#define mmCURSOR0_3_DMDATA_STATUS_BASE_IDX 2
+#define mmCURSOR0_3_DMDATA_SW_CNTL 0x091b
+#define mmCURSOR0_3_DMDATA_SW_CNTL_BASE_IDX 2
+#define mmCURSOR0_3_DMDATA_SW_DATA 0x091c
+#define mmCURSOR0_3_DMDATA_SW_DATA_BASE_IDX 2
+
+// addressBlock: dce_dc_dpp0_dispdec_dpp_top_dispdec
+// base address: 0x0
+#define mmDPP_TOP0_DPP_CONTROL 0x0cc5
+#define mmDPP_TOP0_DPP_CONTROL_BASE_IDX 2
+#define mmDPP_TOP0_DPP_SOFT_RESET 0x0cc6
+#define mmDPP_TOP0_DPP_SOFT_RESET_BASE_IDX 2
+#define mmDPP_TOP0_DPP_CRC_VAL_R_G 0x0cc7
+#define mmDPP_TOP0_DPP_CRC_VAL_R_G_BASE_IDX 2
+#define mmDPP_TOP0_DPP_CRC_VAL_B_A 0x0cc8
+#define mmDPP_TOP0_DPP_CRC_VAL_B_A_BASE_IDX 2
+#define mmDPP_TOP0_DPP_CRC_CTRL 0x0cc9
+#define mmDPP_TOP0_DPP_CRC_CTRL_BASE_IDX 2
+#define mmDPP_TOP0_HOST_READ_CONTROL 0x0cca
+#define mmDPP_TOP0_HOST_READ_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp0_dispdec_cnvc_cfg_dispdec
+// base address: 0x0
+#define mmCNVC_CFG0_CNVC_SURFACE_PIXEL_FORMAT 0x0ccf
+#define mmCNVC_CFG0_CNVC_SURFACE_PIXEL_FORMAT_BASE_IDX 2
+#define mmCNVC_CFG0_FORMAT_CONTROL 0x0cd0
+#define mmCNVC_CFG0_FORMAT_CONTROL_BASE_IDX 2
+#define mmCNVC_CFG0_FCNV_FP_BIAS_R 0x0cd1
+#define mmCNVC_CFG0_FCNV_FP_BIAS_R_BASE_IDX 2
+#define mmCNVC_CFG0_FCNV_FP_BIAS_G 0x0cd2
+#define mmCNVC_CFG0_FCNV_FP_BIAS_G_BASE_IDX 2
+#define mmCNVC_CFG0_FCNV_FP_BIAS_B 0x0cd3
+#define mmCNVC_CFG0_FCNV_FP_BIAS_B_BASE_IDX 2
+#define mmCNVC_CFG0_FCNV_FP_SCALE_R 0x0cd4
+#define mmCNVC_CFG0_FCNV_FP_SCALE_R_BASE_IDX 2
+#define mmCNVC_CFG0_FCNV_FP_SCALE_G 0x0cd5
+#define mmCNVC_CFG0_FCNV_FP_SCALE_G_BASE_IDX 2
+#define mmCNVC_CFG0_FCNV_FP_SCALE_B 0x0cd6
+#define mmCNVC_CFG0_FCNV_FP_SCALE_B_BASE_IDX 2
+#define mmCNVC_CFG0_COLOR_KEYER_CONTROL 0x0cd7
+#define mmCNVC_CFG0_COLOR_KEYER_CONTROL_BASE_IDX 2
+#define mmCNVC_CFG0_COLOR_KEYER_ALPHA 0x0cd8
+#define mmCNVC_CFG0_COLOR_KEYER_ALPHA_BASE_IDX 2
+#define mmCNVC_CFG0_COLOR_KEYER_RED 0x0cd9
+#define mmCNVC_CFG0_COLOR_KEYER_RED_BASE_IDX 2
+#define mmCNVC_CFG0_COLOR_KEYER_GREEN 0x0cda
+#define mmCNVC_CFG0_COLOR_KEYER_GREEN_BASE_IDX 2
+#define mmCNVC_CFG0_COLOR_KEYER_BLUE 0x0cdb
+#define mmCNVC_CFG0_COLOR_KEYER_BLUE_BASE_IDX 2
+#define mmCNVC_CFG0_ALPHA_2BIT_LUT 0x0cdd
+#define mmCNVC_CFG0_ALPHA_2BIT_LUT_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp0_dispdec_cnvc_cur_dispdec
+// base address: 0x0
+#define mmCNVC_CUR0_CURSOR0_CONTROL 0x0ce0
+#define mmCNVC_CUR0_CURSOR0_CONTROL_BASE_IDX 2
+#define mmCNVC_CUR0_CURSOR0_COLOR0 0x0ce1
+#define mmCNVC_CUR0_CURSOR0_COLOR0_BASE_IDX 2
+#define mmCNVC_CUR0_CURSOR0_COLOR1 0x0ce2
+#define mmCNVC_CUR0_CURSOR0_COLOR1_BASE_IDX 2
+#define mmCNVC_CUR0_CURSOR0_FP_SCALE_BIAS 0x0ce3
+#define mmCNVC_CUR0_CURSOR0_FP_SCALE_BIAS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp0_dispdec_dscl_dispdec
+// base address: 0x0
+#define mmDSCL0_SCL_COEF_RAM_TAP_SELECT 0x0cea
+#define mmDSCL0_SCL_COEF_RAM_TAP_SELECT_BASE_IDX 2
+#define mmDSCL0_SCL_COEF_RAM_TAP_DATA 0x0ceb
+#define mmDSCL0_SCL_COEF_RAM_TAP_DATA_BASE_IDX 2
+#define mmDSCL0_SCL_MODE 0x0cec
+#define mmDSCL0_SCL_MODE_BASE_IDX 2
+#define mmDSCL0_SCL_TAP_CONTROL 0x0ced
+#define mmDSCL0_SCL_TAP_CONTROL_BASE_IDX 2
+#define mmDSCL0_DSCL_CONTROL 0x0cee
+#define mmDSCL0_DSCL_CONTROL_BASE_IDX 2
+#define mmDSCL0_DSCL_2TAP_CONTROL 0x0cef
+#define mmDSCL0_DSCL_2TAP_CONTROL_BASE_IDX 2
+#define mmDSCL0_SCL_MANUAL_REPLICATE_CONTROL 0x0cf0
+#define mmDSCL0_SCL_MANUAL_REPLICATE_CONTROL_BASE_IDX 2
+#define mmDSCL0_SCL_HORZ_FILTER_SCALE_RATIO 0x0cf1
+#define mmDSCL0_SCL_HORZ_FILTER_SCALE_RATIO_BASE_IDX 2
+#define mmDSCL0_SCL_HORZ_FILTER_INIT 0x0cf2
+#define mmDSCL0_SCL_HORZ_FILTER_INIT_BASE_IDX 2
+#define mmDSCL0_SCL_HORZ_FILTER_SCALE_RATIO_C 0x0cf3
+#define mmDSCL0_SCL_HORZ_FILTER_SCALE_RATIO_C_BASE_IDX 2
+#define mmDSCL0_SCL_HORZ_FILTER_INIT_C 0x0cf4
+#define mmDSCL0_SCL_HORZ_FILTER_INIT_C_BASE_IDX 2
+#define mmDSCL0_SCL_VERT_FILTER_SCALE_RATIO 0x0cf5
+#define mmDSCL0_SCL_VERT_FILTER_SCALE_RATIO_BASE_IDX 2
+#define mmDSCL0_SCL_VERT_FILTER_INIT 0x0cf6
+#define mmDSCL0_SCL_VERT_FILTER_INIT_BASE_IDX 2
+#define mmDSCL0_SCL_VERT_FILTER_INIT_BOT 0x0cf7
+#define mmDSCL0_SCL_VERT_FILTER_INIT_BOT_BASE_IDX 2
+#define mmDSCL0_SCL_VERT_FILTER_SCALE_RATIO_C 0x0cf8
+#define mmDSCL0_SCL_VERT_FILTER_SCALE_RATIO_C_BASE_IDX 2
+#define mmDSCL0_SCL_VERT_FILTER_INIT_C 0x0cf9
+#define mmDSCL0_SCL_VERT_FILTER_INIT_C_BASE_IDX 2
+#define mmDSCL0_SCL_VERT_FILTER_INIT_BOT_C 0x0cfa
+#define mmDSCL0_SCL_VERT_FILTER_INIT_BOT_C_BASE_IDX 2
+#define mmDSCL0_SCL_BLACK_OFFSET 0x0cfb
+#define mmDSCL0_SCL_BLACK_OFFSET_BASE_IDX 2
+#define mmDSCL0_DSCL_UPDATE 0x0cfc
+#define mmDSCL0_DSCL_UPDATE_BASE_IDX 2
+#define mmDSCL0_DSCL_AUTOCAL 0x0cfd
+#define mmDSCL0_DSCL_AUTOCAL_BASE_IDX 2
+#define mmDSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT 0x0cfe
+#define mmDSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT_BASE_IDX 2
+#define mmDSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM 0x0cff
+#define mmDSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM_BASE_IDX 2
+#define mmDSCL0_OTG_H_BLANK 0x0d00
+#define mmDSCL0_OTG_H_BLANK_BASE_IDX 2
+#define mmDSCL0_OTG_V_BLANK 0x0d01
+#define mmDSCL0_OTG_V_BLANK_BASE_IDX 2
+#define mmDSCL0_RECOUT_START 0x0d02
+#define mmDSCL0_RECOUT_START_BASE_IDX 2
+#define mmDSCL0_RECOUT_SIZE 0x0d03
+#define mmDSCL0_RECOUT_SIZE_BASE_IDX 2
+#define mmDSCL0_MPC_SIZE 0x0d04
+#define mmDSCL0_MPC_SIZE_BASE_IDX 2
+#define mmDSCL0_LB_DATA_FORMAT 0x0d05
+#define mmDSCL0_LB_DATA_FORMAT_BASE_IDX 2
+#define mmDSCL0_LB_MEMORY_CTRL 0x0d06
+#define mmDSCL0_LB_MEMORY_CTRL_BASE_IDX 2
+#define mmDSCL0_LB_V_COUNTER 0x0d07
+#define mmDSCL0_LB_V_COUNTER_BASE_IDX 2
+#define mmDSCL0_DSCL_MEM_PWR_CTRL 0x0d08
+#define mmDSCL0_DSCL_MEM_PWR_CTRL_BASE_IDX 2
+#define mmDSCL0_DSCL_MEM_PWR_STATUS 0x0d09
+#define mmDSCL0_DSCL_MEM_PWR_STATUS_BASE_IDX 2
+#define mmDSCL0_OBUF_CONTROL 0x0d0a
+#define mmDSCL0_OBUF_CONTROL_BASE_IDX 2
+#define mmDSCL0_OBUF_MEM_PWR_CTRL 0x0d0b
+#define mmDSCL0_OBUF_MEM_PWR_CTRL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp0_dispdec_cm_dispdec
+// base address: 0x0
+#define mmCM0_CM_CONTROL 0x0d1a
+#define mmCM0_CM_CONTROL_BASE_IDX 2
+#define mmCM0_CM_ICSC_CONTROL 0x0d1b
+#define mmCM0_CM_ICSC_CONTROL_BASE_IDX 2
+#define mmCM0_CM_ICSC_C11_C12 0x0d1c
+#define mmCM0_CM_ICSC_C11_C12_BASE_IDX 2
+#define mmCM0_CM_ICSC_C13_C14 0x0d1d
+#define mmCM0_CM_ICSC_C13_C14_BASE_IDX 2
+#define mmCM0_CM_ICSC_C21_C22 0x0d1e
+#define mmCM0_CM_ICSC_C21_C22_BASE_IDX 2
+#define mmCM0_CM_ICSC_C23_C24 0x0d1f
+#define mmCM0_CM_ICSC_C23_C24_BASE_IDX 2
+#define mmCM0_CM_ICSC_C31_C32 0x0d20
+#define mmCM0_CM_ICSC_C31_C32_BASE_IDX 2
+#define mmCM0_CM_ICSC_C33_C34 0x0d21
+#define mmCM0_CM_ICSC_C33_C34_BASE_IDX 2
+#define mmCM0_CM_ICSC_B_C11_C12 0x0d22
+#define mmCM0_CM_ICSC_B_C11_C12_BASE_IDX 2
+#define mmCM0_CM_ICSC_B_C13_C14 0x0d23
+#define mmCM0_CM_ICSC_B_C13_C14_BASE_IDX 2
+#define mmCM0_CM_ICSC_B_C21_C22 0x0d24
+#define mmCM0_CM_ICSC_B_C21_C22_BASE_IDX 2
+#define mmCM0_CM_ICSC_B_C23_C24 0x0d25
+#define mmCM0_CM_ICSC_B_C23_C24_BASE_IDX 2
+#define mmCM0_CM_ICSC_B_C31_C32 0x0d26
+#define mmCM0_CM_ICSC_B_C31_C32_BASE_IDX 2
+#define mmCM0_CM_ICSC_B_C33_C34 0x0d27
+#define mmCM0_CM_ICSC_B_C33_C34_BASE_IDX 2
+#define mmCM0_CM_GAMUT_REMAP_CONTROL 0x0d28
+#define mmCM0_CM_GAMUT_REMAP_CONTROL_BASE_IDX 2
+#define mmCM0_CM_GAMUT_REMAP_C11_C12 0x0d29
+#define mmCM0_CM_GAMUT_REMAP_C11_C12_BASE_IDX 2
+#define mmCM0_CM_GAMUT_REMAP_C13_C14 0x0d2a
+#define mmCM0_CM_GAMUT_REMAP_C13_C14_BASE_IDX 2
+#define mmCM0_CM_GAMUT_REMAP_C21_C22 0x0d2b
+#define mmCM0_CM_GAMUT_REMAP_C21_C22_BASE_IDX 2
+#define mmCM0_CM_GAMUT_REMAP_C23_C24 0x0d2c
+#define mmCM0_CM_GAMUT_REMAP_C23_C24_BASE_IDX 2
+#define mmCM0_CM_GAMUT_REMAP_C31_C32 0x0d2d
+#define mmCM0_CM_GAMUT_REMAP_C31_C32_BASE_IDX 2
+#define mmCM0_CM_GAMUT_REMAP_C33_C34 0x0d2e
+#define mmCM0_CM_GAMUT_REMAP_C33_C34_BASE_IDX 2
+#define mmCM0_CM_GAMUT_REMAP_B_C11_C12 0x0d2f
+#define mmCM0_CM_GAMUT_REMAP_B_C11_C12_BASE_IDX 2
+#define mmCM0_CM_GAMUT_REMAP_B_C13_C14 0x0d30
+#define mmCM0_CM_GAMUT_REMAP_B_C13_C14_BASE_IDX 2
+#define mmCM0_CM_GAMUT_REMAP_B_C21_C22 0x0d31
+#define mmCM0_CM_GAMUT_REMAP_B_C21_C22_BASE_IDX 2
+#define mmCM0_CM_GAMUT_REMAP_B_C23_C24 0x0d32
+#define mmCM0_CM_GAMUT_REMAP_B_C23_C24_BASE_IDX 2
+#define mmCM0_CM_GAMUT_REMAP_B_C31_C32 0x0d33
+#define mmCM0_CM_GAMUT_REMAP_B_C31_C32_BASE_IDX 2
+#define mmCM0_CM_GAMUT_REMAP_B_C33_C34 0x0d34
+#define mmCM0_CM_GAMUT_REMAP_B_C33_C34_BASE_IDX 2
+#define mmCM0_CM_BIAS_CR_R 0x0d35
+#define mmCM0_CM_BIAS_CR_R_BASE_IDX 2
+#define mmCM0_CM_BIAS_Y_G_CB_B 0x0d36
+#define mmCM0_CM_BIAS_Y_G_CB_B_BASE_IDX 2
+#define mmCM0_CM_DGAM_CONTROL 0x0d37
+#define mmCM0_CM_DGAM_CONTROL_BASE_IDX 2
+#define mmCM0_CM_DGAM_LUT_INDEX 0x0d38
+#define mmCM0_CM_DGAM_LUT_INDEX_BASE_IDX 2
+#define mmCM0_CM_DGAM_LUT_DATA 0x0d39
+#define mmCM0_CM_DGAM_LUT_DATA_BASE_IDX 2
+#define mmCM0_CM_DGAM_LUT_WRITE_EN_MASK 0x0d3a
+#define mmCM0_CM_DGAM_LUT_WRITE_EN_MASK_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMA_START_CNTL_B 0x0d3b
+#define mmCM0_CM_DGAM_RAMA_START_CNTL_B_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMA_START_CNTL_G 0x0d3c
+#define mmCM0_CM_DGAM_RAMA_START_CNTL_G_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMA_START_CNTL_R 0x0d3d
+#define mmCM0_CM_DGAM_RAMA_START_CNTL_R_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMA_SLOPE_CNTL_B 0x0d3e
+#define mmCM0_CM_DGAM_RAMA_SLOPE_CNTL_B_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMA_SLOPE_CNTL_G 0x0d3f
+#define mmCM0_CM_DGAM_RAMA_SLOPE_CNTL_G_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMA_SLOPE_CNTL_R 0x0d40
+#define mmCM0_CM_DGAM_RAMA_SLOPE_CNTL_R_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMA_END_CNTL1_B 0x0d41
+#define mmCM0_CM_DGAM_RAMA_END_CNTL1_B_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMA_END_CNTL2_B 0x0d42
+#define mmCM0_CM_DGAM_RAMA_END_CNTL2_B_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMA_END_CNTL1_G 0x0d43
+#define mmCM0_CM_DGAM_RAMA_END_CNTL1_G_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMA_END_CNTL2_G 0x0d44
+#define mmCM0_CM_DGAM_RAMA_END_CNTL2_G_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMA_END_CNTL1_R 0x0d45
+#define mmCM0_CM_DGAM_RAMA_END_CNTL1_R_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMA_END_CNTL2_R 0x0d46
+#define mmCM0_CM_DGAM_RAMA_END_CNTL2_R_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMA_REGION_0_1 0x0d47
+#define mmCM0_CM_DGAM_RAMA_REGION_0_1_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMA_REGION_2_3 0x0d48
+#define mmCM0_CM_DGAM_RAMA_REGION_2_3_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMA_REGION_4_5 0x0d49
+#define mmCM0_CM_DGAM_RAMA_REGION_4_5_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMA_REGION_6_7 0x0d4a
+#define mmCM0_CM_DGAM_RAMA_REGION_6_7_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMA_REGION_8_9 0x0d4b
+#define mmCM0_CM_DGAM_RAMA_REGION_8_9_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMA_REGION_10_11 0x0d4c
+#define mmCM0_CM_DGAM_RAMA_REGION_10_11_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMA_REGION_12_13 0x0d4d
+#define mmCM0_CM_DGAM_RAMA_REGION_12_13_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMA_REGION_14_15 0x0d4e
+#define mmCM0_CM_DGAM_RAMA_REGION_14_15_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMB_START_CNTL_B 0x0d4f
+#define mmCM0_CM_DGAM_RAMB_START_CNTL_B_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMB_START_CNTL_G 0x0d50
+#define mmCM0_CM_DGAM_RAMB_START_CNTL_G_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMB_START_CNTL_R 0x0d51
+#define mmCM0_CM_DGAM_RAMB_START_CNTL_R_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMB_SLOPE_CNTL_B 0x0d52
+#define mmCM0_CM_DGAM_RAMB_SLOPE_CNTL_B_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMB_SLOPE_CNTL_G 0x0d53
+#define mmCM0_CM_DGAM_RAMB_SLOPE_CNTL_G_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMB_SLOPE_CNTL_R 0x0d54
+#define mmCM0_CM_DGAM_RAMB_SLOPE_CNTL_R_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMB_END_CNTL1_B 0x0d55
+#define mmCM0_CM_DGAM_RAMB_END_CNTL1_B_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMB_END_CNTL2_B 0x0d56
+#define mmCM0_CM_DGAM_RAMB_END_CNTL2_B_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMB_END_CNTL1_G 0x0d57
+#define mmCM0_CM_DGAM_RAMB_END_CNTL1_G_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMB_END_CNTL2_G 0x0d58
+#define mmCM0_CM_DGAM_RAMB_END_CNTL2_G_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMB_END_CNTL1_R 0x0d59
+#define mmCM0_CM_DGAM_RAMB_END_CNTL1_R_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMB_END_CNTL2_R 0x0d5a
+#define mmCM0_CM_DGAM_RAMB_END_CNTL2_R_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMB_REGION_0_1 0x0d5b
+#define mmCM0_CM_DGAM_RAMB_REGION_0_1_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMB_REGION_2_3 0x0d5c
+#define mmCM0_CM_DGAM_RAMB_REGION_2_3_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMB_REGION_4_5 0x0d5d
+#define mmCM0_CM_DGAM_RAMB_REGION_4_5_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMB_REGION_6_7 0x0d5e
+#define mmCM0_CM_DGAM_RAMB_REGION_6_7_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMB_REGION_8_9 0x0d5f
+#define mmCM0_CM_DGAM_RAMB_REGION_8_9_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMB_REGION_10_11 0x0d60
+#define mmCM0_CM_DGAM_RAMB_REGION_10_11_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMB_REGION_12_13 0x0d61
+#define mmCM0_CM_DGAM_RAMB_REGION_12_13_BASE_IDX 2
+#define mmCM0_CM_DGAM_RAMB_REGION_14_15 0x0d62
+#define mmCM0_CM_DGAM_RAMB_REGION_14_15_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_CONTROL 0x0d63
+#define mmCM0_CM_BLNDGAM_CONTROL_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_LUT_INDEX 0x0d64
+#define mmCM0_CM_BLNDGAM_LUT_INDEX_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_LUT_DATA 0x0d65
+#define mmCM0_CM_BLNDGAM_LUT_DATA_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_LUT_WRITE_EN_MASK 0x0d66
+#define mmCM0_CM_BLNDGAM_LUT_WRITE_EN_MASK_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_START_CNTL_B 0x0d67
+#define mmCM0_CM_BLNDGAM_RAMA_START_CNTL_B_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_START_CNTL_G 0x0d68
+#define mmCM0_CM_BLNDGAM_RAMA_START_CNTL_G_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_START_CNTL_R 0x0d69
+#define mmCM0_CM_BLNDGAM_RAMA_START_CNTL_R_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_B 0x0d6a
+#define mmCM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_B_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_G 0x0d6b
+#define mmCM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_G_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_R 0x0d6c
+#define mmCM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_R_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_END_CNTL1_B 0x0d6d
+#define mmCM0_CM_BLNDGAM_RAMA_END_CNTL1_B_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_END_CNTL2_B 0x0d6e
+#define mmCM0_CM_BLNDGAM_RAMA_END_CNTL2_B_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_END_CNTL1_G 0x0d6f
+#define mmCM0_CM_BLNDGAM_RAMA_END_CNTL1_G_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_END_CNTL2_G 0x0d70
+#define mmCM0_CM_BLNDGAM_RAMA_END_CNTL2_G_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_END_CNTL1_R 0x0d71
+#define mmCM0_CM_BLNDGAM_RAMA_END_CNTL1_R_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_END_CNTL2_R 0x0d72
+#define mmCM0_CM_BLNDGAM_RAMA_END_CNTL2_R_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_0_1 0x0d73
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_0_1_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_2_3 0x0d74
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_2_3_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_4_5 0x0d75
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_4_5_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_6_7 0x0d76
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_6_7_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_8_9 0x0d77
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_8_9_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_10_11 0x0d78
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_10_11_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_12_13 0x0d79
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_12_13_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_14_15 0x0d7a
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_14_15_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_16_17 0x0d7b
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_16_17_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_18_19 0x0d7c
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_18_19_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_20_21 0x0d7d
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_20_21_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_22_23 0x0d7e
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_22_23_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_24_25 0x0d7f
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_24_25_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_26_27 0x0d80
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_26_27_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_28_29 0x0d81
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_28_29_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_30_31 0x0d82
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_30_31_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_32_33 0x0d83
+#define mmCM0_CM_BLNDGAM_RAMA_REGION_32_33_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_START_CNTL_B 0x0d84
+#define mmCM0_CM_BLNDGAM_RAMB_START_CNTL_B_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_START_CNTL_G 0x0d85
+#define mmCM0_CM_BLNDGAM_RAMB_START_CNTL_G_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_START_CNTL_R 0x0d86
+#define mmCM0_CM_BLNDGAM_RAMB_START_CNTL_R_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_B 0x0d87
+#define mmCM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_B_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_G 0x0d88
+#define mmCM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_G_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_R 0x0d89
+#define mmCM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_R_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_END_CNTL1_B 0x0d8a
+#define mmCM0_CM_BLNDGAM_RAMB_END_CNTL1_B_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_END_CNTL2_B 0x0d8b
+#define mmCM0_CM_BLNDGAM_RAMB_END_CNTL2_B_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_END_CNTL1_G 0x0d8c
+#define mmCM0_CM_BLNDGAM_RAMB_END_CNTL1_G_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_END_CNTL2_G 0x0d8d
+#define mmCM0_CM_BLNDGAM_RAMB_END_CNTL2_G_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_END_CNTL1_R 0x0d8e
+#define mmCM0_CM_BLNDGAM_RAMB_END_CNTL1_R_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_END_CNTL2_R 0x0d8f
+#define mmCM0_CM_BLNDGAM_RAMB_END_CNTL2_R_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_0_1 0x0d90
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_0_1_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_2_3 0x0d91
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_2_3_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_4_5 0x0d92
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_4_5_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_6_7 0x0d93
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_6_7_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_8_9 0x0d94
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_8_9_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_10_11 0x0d95
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_10_11_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_12_13 0x0d96
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_12_13_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_14_15 0x0d97
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_14_15_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_16_17 0x0d98
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_16_17_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_18_19 0x0d99
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_18_19_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_20_21 0x0d9a
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_20_21_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_22_23 0x0d9b
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_22_23_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_24_25 0x0d9c
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_24_25_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_26_27 0x0d9d
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_26_27_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_28_29 0x0d9e
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_28_29_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_30_31 0x0d9f
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_30_31_BASE_IDX 2
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_32_33 0x0da0
+#define mmCM0_CM_BLNDGAM_RAMB_REGION_32_33_BASE_IDX 2
+#define mmCM0_CM_HDR_MULT_COEF 0x0da1
+#define mmCM0_CM_HDR_MULT_COEF_BASE_IDX 2
+#define mmCM0_CM_MEM_PWR_CTRL 0x0da2
+#define mmCM0_CM_MEM_PWR_CTRL_BASE_IDX 2
+#define mmCM0_CM_MEM_PWR_STATUS 0x0da3
+#define mmCM0_CM_MEM_PWR_STATUS_BASE_IDX 2
+#define mmCM0_CM_DEALPHA 0x0da5
+#define mmCM0_CM_DEALPHA_BASE_IDX 2
+#define mmCM0_CM_COEF_FORMAT 0x0da6
+#define mmCM0_CM_COEF_FORMAT_BASE_IDX 2
+#define mmCM0_CM_SHAPER_CONTROL 0x0da7
+#define mmCM0_CM_SHAPER_CONTROL_BASE_IDX 2
+#define mmCM0_CM_SHAPER_OFFSET_R 0x0da8
+#define mmCM0_CM_SHAPER_OFFSET_R_BASE_IDX 2
+#define mmCM0_CM_SHAPER_OFFSET_G 0x0da9
+#define mmCM0_CM_SHAPER_OFFSET_G_BASE_IDX 2
+#define mmCM0_CM_SHAPER_OFFSET_B 0x0daa
+#define mmCM0_CM_SHAPER_OFFSET_B_BASE_IDX 2
+#define mmCM0_CM_SHAPER_SCALE_R 0x0dab
+#define mmCM0_CM_SHAPER_SCALE_R_BASE_IDX 2
+#define mmCM0_CM_SHAPER_SCALE_G_B 0x0dac
+#define mmCM0_CM_SHAPER_SCALE_G_B_BASE_IDX 2
+#define mmCM0_CM_SHAPER_LUT_INDEX 0x0dad
+#define mmCM0_CM_SHAPER_LUT_INDEX_BASE_IDX 2
+#define mmCM0_CM_SHAPER_LUT_DATA 0x0dae
+#define mmCM0_CM_SHAPER_LUT_DATA_BASE_IDX 2
+#define mmCM0_CM_SHAPER_LUT_WRITE_EN_MASK 0x0daf
+#define mmCM0_CM_SHAPER_LUT_WRITE_EN_MASK_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMA_START_CNTL_B 0x0db0
+#define mmCM0_CM_SHAPER_RAMA_START_CNTL_B_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMA_START_CNTL_G 0x0db1
+#define mmCM0_CM_SHAPER_RAMA_START_CNTL_G_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMA_START_CNTL_R 0x0db2
+#define mmCM0_CM_SHAPER_RAMA_START_CNTL_R_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMA_END_CNTL_B 0x0db3
+#define mmCM0_CM_SHAPER_RAMA_END_CNTL_B_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMA_END_CNTL_G 0x0db4
+#define mmCM0_CM_SHAPER_RAMA_END_CNTL_G_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMA_END_CNTL_R 0x0db5
+#define mmCM0_CM_SHAPER_RAMA_END_CNTL_R_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMA_REGION_0_1 0x0db6
+#define mmCM0_CM_SHAPER_RAMA_REGION_0_1_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMA_REGION_2_3 0x0db7
+#define mmCM0_CM_SHAPER_RAMA_REGION_2_3_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMA_REGION_4_5 0x0db8
+#define mmCM0_CM_SHAPER_RAMA_REGION_4_5_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMA_REGION_6_7 0x0db9
+#define mmCM0_CM_SHAPER_RAMA_REGION_6_7_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMA_REGION_8_9 0x0dba
+#define mmCM0_CM_SHAPER_RAMA_REGION_8_9_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMA_REGION_10_11 0x0dbb
+#define mmCM0_CM_SHAPER_RAMA_REGION_10_11_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMA_REGION_12_13 0x0dbc
+#define mmCM0_CM_SHAPER_RAMA_REGION_12_13_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMA_REGION_14_15 0x0dbd
+#define mmCM0_CM_SHAPER_RAMA_REGION_14_15_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMA_REGION_16_17 0x0dbe
+#define mmCM0_CM_SHAPER_RAMA_REGION_16_17_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMA_REGION_18_19 0x0dbf
+#define mmCM0_CM_SHAPER_RAMA_REGION_18_19_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMA_REGION_20_21 0x0dc0
+#define mmCM0_CM_SHAPER_RAMA_REGION_20_21_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMA_REGION_22_23 0x0dc1
+#define mmCM0_CM_SHAPER_RAMA_REGION_22_23_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMA_REGION_24_25 0x0dc2
+#define mmCM0_CM_SHAPER_RAMA_REGION_24_25_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMA_REGION_26_27 0x0dc3
+#define mmCM0_CM_SHAPER_RAMA_REGION_26_27_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMA_REGION_28_29 0x0dc4
+#define mmCM0_CM_SHAPER_RAMA_REGION_28_29_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMA_REGION_30_31 0x0dc5
+#define mmCM0_CM_SHAPER_RAMA_REGION_30_31_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMA_REGION_32_33 0x0dc6
+#define mmCM0_CM_SHAPER_RAMA_REGION_32_33_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMB_START_CNTL_B 0x0dc7
+#define mmCM0_CM_SHAPER_RAMB_START_CNTL_B_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMB_START_CNTL_G 0x0dc8
+#define mmCM0_CM_SHAPER_RAMB_START_CNTL_G_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMB_START_CNTL_R 0x0dc9
+#define mmCM0_CM_SHAPER_RAMB_START_CNTL_R_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMB_END_CNTL_B 0x0dca
+#define mmCM0_CM_SHAPER_RAMB_END_CNTL_B_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMB_END_CNTL_G 0x0dcb
+#define mmCM0_CM_SHAPER_RAMB_END_CNTL_G_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMB_END_CNTL_R 0x0dcc
+#define mmCM0_CM_SHAPER_RAMB_END_CNTL_R_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMB_REGION_0_1 0x0dcd
+#define mmCM0_CM_SHAPER_RAMB_REGION_0_1_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMB_REGION_2_3 0x0dce
+#define mmCM0_CM_SHAPER_RAMB_REGION_2_3_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMB_REGION_4_5 0x0dcf
+#define mmCM0_CM_SHAPER_RAMB_REGION_4_5_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMB_REGION_6_7 0x0dd0
+#define mmCM0_CM_SHAPER_RAMB_REGION_6_7_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMB_REGION_8_9 0x0dd1
+#define mmCM0_CM_SHAPER_RAMB_REGION_8_9_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMB_REGION_10_11 0x0dd2
+#define mmCM0_CM_SHAPER_RAMB_REGION_10_11_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMB_REGION_12_13 0x0dd3
+#define mmCM0_CM_SHAPER_RAMB_REGION_12_13_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMB_REGION_14_15 0x0dd4
+#define mmCM0_CM_SHAPER_RAMB_REGION_14_15_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMB_REGION_16_17 0x0dd5
+#define mmCM0_CM_SHAPER_RAMB_REGION_16_17_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMB_REGION_18_19 0x0dd6
+#define mmCM0_CM_SHAPER_RAMB_REGION_18_19_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMB_REGION_20_21 0x0dd7
+#define mmCM0_CM_SHAPER_RAMB_REGION_20_21_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMB_REGION_22_23 0x0dd8
+#define mmCM0_CM_SHAPER_RAMB_REGION_22_23_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMB_REGION_24_25 0x0dd9
+#define mmCM0_CM_SHAPER_RAMB_REGION_24_25_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMB_REGION_26_27 0x0dda
+#define mmCM0_CM_SHAPER_RAMB_REGION_26_27_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMB_REGION_28_29 0x0ddb
+#define mmCM0_CM_SHAPER_RAMB_REGION_28_29_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMB_REGION_30_31 0x0ddc
+#define mmCM0_CM_SHAPER_RAMB_REGION_30_31_BASE_IDX 2
+#define mmCM0_CM_SHAPER_RAMB_REGION_32_33 0x0ddd
+#define mmCM0_CM_SHAPER_RAMB_REGION_32_33_BASE_IDX 2
+#define mmCM0_CM_MEM_PWR_CTRL2 0x0dde
+#define mmCM0_CM_MEM_PWR_CTRL2_BASE_IDX 2
+#define mmCM0_CM_MEM_PWR_STATUS2 0x0ddf
+#define mmCM0_CM_MEM_PWR_STATUS2_BASE_IDX 2
+#define mmCM0_CM_3DLUT_MODE 0x0de0
+#define mmCM0_CM_3DLUT_MODE_BASE_IDX 2
+#define mmCM0_CM_3DLUT_INDEX 0x0de1
+#define mmCM0_CM_3DLUT_INDEX_BASE_IDX 2
+#define mmCM0_CM_3DLUT_DATA 0x0de2
+#define mmCM0_CM_3DLUT_DATA_BASE_IDX 2
+#define mmCM0_CM_3DLUT_DATA_30BIT 0x0de3
+#define mmCM0_CM_3DLUT_DATA_30BIT_BASE_IDX 2
+#define mmCM0_CM_3DLUT_READ_WRITE_CONTROL 0x0de4
+#define mmCM0_CM_3DLUT_READ_WRITE_CONTROL_BASE_IDX 2
+#define mmCM0_CM_3DLUT_OUT_NORM_FACTOR 0x0de5
+#define mmCM0_CM_3DLUT_OUT_NORM_FACTOR_BASE_IDX 2
+#define mmCM0_CM_3DLUT_OUT_OFFSET_R 0x0de6
+#define mmCM0_CM_3DLUT_OUT_OFFSET_R_BASE_IDX 2
+#define mmCM0_CM_3DLUT_OUT_OFFSET_G 0x0de7
+#define mmCM0_CM_3DLUT_OUT_OFFSET_G_BASE_IDX 2
+#define mmCM0_CM_3DLUT_OUT_OFFSET_B 0x0de8
+#define mmCM0_CM_3DLUT_OUT_OFFSET_B_BASE_IDX 2
+#define mmCM0_CM_TEST_DEBUG_INDEX 0x0de9
+#define mmCM0_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define mmCM0_CM_TEST_DEBUG_DATA 0x0dea
+#define mmCM0_CM_TEST_DEBUG_DATA_BASE_IDX 2
+
+// addressBlock: dce_dc_dpp1_dispdec_dpp_top_dispdec
+// base address: 0x5ac
+#define mmDPP_TOP1_DPP_CONTROL 0x0e30
+#define mmDPP_TOP1_DPP_CONTROL_BASE_IDX 2
+#define mmDPP_TOP1_DPP_SOFT_RESET 0x0e31
+#define mmDPP_TOP1_DPP_SOFT_RESET_BASE_IDX 2
+#define mmDPP_TOP1_DPP_CRC_VAL_R_G 0x0e32
+#define mmDPP_TOP1_DPP_CRC_VAL_R_G_BASE_IDX 2
+#define mmDPP_TOP1_DPP_CRC_VAL_B_A 0x0e33
+#define mmDPP_TOP1_DPP_CRC_VAL_B_A_BASE_IDX 2
+#define mmDPP_TOP1_DPP_CRC_CTRL 0x0e34
+#define mmDPP_TOP1_DPP_CRC_CTRL_BASE_IDX 2
+#define mmDPP_TOP1_HOST_READ_CONTROL 0x0e35
+#define mmDPP_TOP1_HOST_READ_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp1_dispdec_cnvc_cfg_dispdec
+// base address: 0x5ac
+#define mmCNVC_CFG1_CNVC_SURFACE_PIXEL_FORMAT 0x0e3a
+#define mmCNVC_CFG1_CNVC_SURFACE_PIXEL_FORMAT_BASE_IDX 2
+#define mmCNVC_CFG1_FORMAT_CONTROL 0x0e3b
+#define mmCNVC_CFG1_FORMAT_CONTROL_BASE_IDX 2
+#define mmCNVC_CFG1_FCNV_FP_BIAS_R 0x0e3c
+#define mmCNVC_CFG1_FCNV_FP_BIAS_R_BASE_IDX 2
+#define mmCNVC_CFG1_FCNV_FP_BIAS_G 0x0e3d
+#define mmCNVC_CFG1_FCNV_FP_BIAS_G_BASE_IDX 2
+#define mmCNVC_CFG1_FCNV_FP_BIAS_B 0x0e3e
+#define mmCNVC_CFG1_FCNV_FP_BIAS_B_BASE_IDX 2
+#define mmCNVC_CFG1_FCNV_FP_SCALE_R 0x0e3f
+#define mmCNVC_CFG1_FCNV_FP_SCALE_R_BASE_IDX 2
+#define mmCNVC_CFG1_FCNV_FP_SCALE_G 0x0e40
+#define mmCNVC_CFG1_FCNV_FP_SCALE_G_BASE_IDX 2
+#define mmCNVC_CFG1_FCNV_FP_SCALE_B 0x0e41
+#define mmCNVC_CFG1_FCNV_FP_SCALE_B_BASE_IDX 2
+#define mmCNVC_CFG1_COLOR_KEYER_CONTROL 0x0e42
+#define mmCNVC_CFG1_COLOR_KEYER_CONTROL_BASE_IDX 2
+#define mmCNVC_CFG1_COLOR_KEYER_ALPHA 0x0e43
+#define mmCNVC_CFG1_COLOR_KEYER_ALPHA_BASE_IDX 2
+#define mmCNVC_CFG1_COLOR_KEYER_RED 0x0e44
+#define mmCNVC_CFG1_COLOR_KEYER_RED_BASE_IDX 2
+#define mmCNVC_CFG1_COLOR_KEYER_GREEN 0x0e45
+#define mmCNVC_CFG1_COLOR_KEYER_GREEN_BASE_IDX 2
+#define mmCNVC_CFG1_COLOR_KEYER_BLUE 0x0e46
+#define mmCNVC_CFG1_COLOR_KEYER_BLUE_BASE_IDX 2
+#define mmCNVC_CFG1_ALPHA_2BIT_LUT 0x0e48
+#define mmCNVC_CFG1_ALPHA_2BIT_LUT_BASE_IDX 2
+
+// addressBlock: dce_dc_dpp1_dispdec_cnvc_cur_dispdec
+// base address: 0x5ac
+#define mmCNVC_CUR1_CURSOR0_CONTROL 0x0e4b
+#define mmCNVC_CUR1_CURSOR0_CONTROL_BASE_IDX 2
+#define mmCNVC_CUR1_CURSOR0_COLOR0 0x0e4c
+#define mmCNVC_CUR1_CURSOR0_COLOR0_BASE_IDX 2
+#define mmCNVC_CUR1_CURSOR0_COLOR1 0x0e4d
+#define mmCNVC_CUR1_CURSOR0_COLOR1_BASE_IDX 2
+#define mmCNVC_CUR1_CURSOR0_FP_SCALE_BIAS 0x0e4e
+#define mmCNVC_CUR1_CURSOR0_FP_SCALE_BIAS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp1_dispdec_dscl_dispdec
+// base address: 0x5ac
+#define mmDSCL1_SCL_COEF_RAM_TAP_SELECT 0x0e55
+#define mmDSCL1_SCL_COEF_RAM_TAP_SELECT_BASE_IDX 2
+#define mmDSCL1_SCL_COEF_RAM_TAP_DATA 0x0e56
+#define mmDSCL1_SCL_COEF_RAM_TAP_DATA_BASE_IDX 2
+#define mmDSCL1_SCL_MODE 0x0e57
+#define mmDSCL1_SCL_MODE_BASE_IDX 2
+#define mmDSCL1_SCL_TAP_CONTROL 0x0e58
+#define mmDSCL1_SCL_TAP_CONTROL_BASE_IDX 2
+#define mmDSCL1_DSCL_CONTROL 0x0e59
+#define mmDSCL1_DSCL_CONTROL_BASE_IDX 2
+#define mmDSCL1_DSCL_2TAP_CONTROL 0x0e5a
+#define mmDSCL1_DSCL_2TAP_CONTROL_BASE_IDX 2
+#define mmDSCL1_SCL_MANUAL_REPLICATE_CONTROL 0x0e5b
+#define mmDSCL1_SCL_MANUAL_REPLICATE_CONTROL_BASE_IDX 2
+#define mmDSCL1_SCL_HORZ_FILTER_SCALE_RATIO 0x0e5c
+#define mmDSCL1_SCL_HORZ_FILTER_SCALE_RATIO_BASE_IDX 2
+#define mmDSCL1_SCL_HORZ_FILTER_INIT 0x0e5d
+#define mmDSCL1_SCL_HORZ_FILTER_INIT_BASE_IDX 2
+#define mmDSCL1_SCL_HORZ_FILTER_SCALE_RATIO_C 0x0e5e
+#define mmDSCL1_SCL_HORZ_FILTER_SCALE_RATIO_C_BASE_IDX 2
+#define mmDSCL1_SCL_HORZ_FILTER_INIT_C 0x0e5f
+#define mmDSCL1_SCL_HORZ_FILTER_INIT_C_BASE_IDX 2
+#define mmDSCL1_SCL_VERT_FILTER_SCALE_RATIO 0x0e60
+#define mmDSCL1_SCL_VERT_FILTER_SCALE_RATIO_BASE_IDX 2
+#define mmDSCL1_SCL_VERT_FILTER_INIT 0x0e61
+#define mmDSCL1_SCL_VERT_FILTER_INIT_BASE_IDX 2
+#define mmDSCL1_SCL_VERT_FILTER_INIT_BOT 0x0e62
+#define mmDSCL1_SCL_VERT_FILTER_INIT_BOT_BASE_IDX 2
+#define mmDSCL1_SCL_VERT_FILTER_SCALE_RATIO_C 0x0e63
+#define mmDSCL1_SCL_VERT_FILTER_SCALE_RATIO_C_BASE_IDX 2
+#define mmDSCL1_SCL_VERT_FILTER_INIT_C 0x0e64
+#define mmDSCL1_SCL_VERT_FILTER_INIT_C_BASE_IDX 2
+#define mmDSCL1_SCL_VERT_FILTER_INIT_BOT_C 0x0e65
+#define mmDSCL1_SCL_VERT_FILTER_INIT_BOT_C_BASE_IDX 2
+#define mmDSCL1_SCL_BLACK_OFFSET 0x0e66
+#define mmDSCL1_SCL_BLACK_OFFSET_BASE_IDX 2
+#define mmDSCL1_DSCL_UPDATE 0x0e67
+#define mmDSCL1_DSCL_UPDATE_BASE_IDX 2
+#define mmDSCL1_DSCL_AUTOCAL 0x0e68
+#define mmDSCL1_DSCL_AUTOCAL_BASE_IDX 2
+#define mmDSCL1_DSCL_EXT_OVERSCAN_LEFT_RIGHT 0x0e69
+#define mmDSCL1_DSCL_EXT_OVERSCAN_LEFT_RIGHT_BASE_IDX 2
+#define mmDSCL1_DSCL_EXT_OVERSCAN_TOP_BOTTOM 0x0e6a
+#define mmDSCL1_DSCL_EXT_OVERSCAN_TOP_BOTTOM_BASE_IDX 2
+#define mmDSCL1_OTG_H_BLANK 0x0e6b
+#define mmDSCL1_OTG_H_BLANK_BASE_IDX 2
+#define mmDSCL1_OTG_V_BLANK 0x0e6c
+#define mmDSCL1_OTG_V_BLANK_BASE_IDX 2
+#define mmDSCL1_RECOUT_START 0x0e6d
+#define mmDSCL1_RECOUT_START_BASE_IDX 2
+#define mmDSCL1_RECOUT_SIZE 0x0e6e
+#define mmDSCL1_RECOUT_SIZE_BASE_IDX 2
+#define mmDSCL1_MPC_SIZE 0x0e6f
+#define mmDSCL1_MPC_SIZE_BASE_IDX 2
+#define mmDSCL1_LB_DATA_FORMAT 0x0e70
+#define mmDSCL1_LB_DATA_FORMAT_BASE_IDX 2
+#define mmDSCL1_LB_MEMORY_CTRL 0x0e71
+#define mmDSCL1_LB_MEMORY_CTRL_BASE_IDX 2
+#define mmDSCL1_LB_V_COUNTER 0x0e72
+#define mmDSCL1_LB_V_COUNTER_BASE_IDX 2
+#define mmDSCL1_DSCL_MEM_PWR_CTRL 0x0e73
+#define mmDSCL1_DSCL_MEM_PWR_CTRL_BASE_IDX 2
+#define mmDSCL1_DSCL_MEM_PWR_STATUS 0x0e74
+#define mmDSCL1_DSCL_MEM_PWR_STATUS_BASE_IDX 2
+#define mmDSCL1_OBUF_CONTROL 0x0e75
+#define mmDSCL1_OBUF_CONTROL_BASE_IDX 2
+#define mmDSCL1_OBUF_MEM_PWR_CTRL 0x0e76
+#define mmDSCL1_OBUF_MEM_PWR_CTRL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp1_dispdec_cm_dispdec
+// base address: 0x5ac
+#define mmCM1_CM_CONTROL 0x0e85
+#define mmCM1_CM_CONTROL_BASE_IDX 2
+#define mmCM1_CM_ICSC_CONTROL 0x0e86
+#define mmCM1_CM_ICSC_CONTROL_BASE_IDX 2
+#define mmCM1_CM_ICSC_C11_C12 0x0e87
+#define mmCM1_CM_ICSC_C11_C12_BASE_IDX 2
+#define mmCM1_CM_ICSC_C13_C14 0x0e88
+#define mmCM1_CM_ICSC_C13_C14_BASE_IDX 2
+#define mmCM1_CM_ICSC_C21_C22 0x0e89
+#define mmCM1_CM_ICSC_C21_C22_BASE_IDX 2
+#define mmCM1_CM_ICSC_C23_C24 0x0e8a
+#define mmCM1_CM_ICSC_C23_C24_BASE_IDX 2
+#define mmCM1_CM_ICSC_C31_C32 0x0e8b
+#define mmCM1_CM_ICSC_C31_C32_BASE_IDX 2
+#define mmCM1_CM_ICSC_C33_C34 0x0e8c
+#define mmCM1_CM_ICSC_C33_C34_BASE_IDX 2
+#define mmCM1_CM_ICSC_B_C11_C12 0x0e8d
+#define mmCM1_CM_ICSC_B_C11_C12_BASE_IDX 2
+#define mmCM1_CM_ICSC_B_C13_C14 0x0e8e
+#define mmCM1_CM_ICSC_B_C13_C14_BASE_IDX 2
+#define mmCM1_CM_ICSC_B_C21_C22 0x0e8f
+#define mmCM1_CM_ICSC_B_C21_C22_BASE_IDX 2
+#define mmCM1_CM_ICSC_B_C23_C24 0x0e90
+#define mmCM1_CM_ICSC_B_C23_C24_BASE_IDX 2
+#define mmCM1_CM_ICSC_B_C31_C32 0x0e91
+#define mmCM1_CM_ICSC_B_C31_C32_BASE_IDX 2
+#define mmCM1_CM_ICSC_B_C33_C34 0x0e92
+#define mmCM1_CM_ICSC_B_C33_C34_BASE_IDX 2
+#define mmCM1_CM_GAMUT_REMAP_CONTROL 0x0e93
+#define mmCM1_CM_GAMUT_REMAP_CONTROL_BASE_IDX 2
+#define mmCM1_CM_GAMUT_REMAP_C11_C12 0x0e94
+#define mmCM1_CM_GAMUT_REMAP_C11_C12_BASE_IDX 2
+#define mmCM1_CM_GAMUT_REMAP_C13_C14 0x0e95
+#define mmCM1_CM_GAMUT_REMAP_C13_C14_BASE_IDX 2
+#define mmCM1_CM_GAMUT_REMAP_C21_C22 0x0e96
+#define mmCM1_CM_GAMUT_REMAP_C21_C22_BASE_IDX 2
+#define mmCM1_CM_GAMUT_REMAP_C23_C24 0x0e97
+#define mmCM1_CM_GAMUT_REMAP_C23_C24_BASE_IDX 2
+#define mmCM1_CM_GAMUT_REMAP_C31_C32 0x0e98
+#define mmCM1_CM_GAMUT_REMAP_C31_C32_BASE_IDX 2
+#define mmCM1_CM_GAMUT_REMAP_C33_C34 0x0e99
+#define mmCM1_CM_GAMUT_REMAP_C33_C34_BASE_IDX 2
+#define mmCM1_CM_GAMUT_REMAP_B_C11_C12 0x0e9a
+#define mmCM1_CM_GAMUT_REMAP_B_C11_C12_BASE_IDX 2
+#define mmCM1_CM_GAMUT_REMAP_B_C13_C14 0x0e9b
+#define mmCM1_CM_GAMUT_REMAP_B_C13_C14_BASE_IDX 2
+#define mmCM1_CM_GAMUT_REMAP_B_C21_C22 0x0e9c
+#define mmCM1_CM_GAMUT_REMAP_B_C21_C22_BASE_IDX 2
+#define mmCM1_CM_GAMUT_REMAP_B_C23_C24 0x0e9d
+#define mmCM1_CM_GAMUT_REMAP_B_C23_C24_BASE_IDX 2
+#define mmCM1_CM_GAMUT_REMAP_B_C31_C32 0x0e9e
+#define mmCM1_CM_GAMUT_REMAP_B_C31_C32_BASE_IDX 2
+#define mmCM1_CM_GAMUT_REMAP_B_C33_C34 0x0e9f
+#define mmCM1_CM_GAMUT_REMAP_B_C33_C34_BASE_IDX 2
+#define mmCM1_CM_BIAS_CR_R 0x0ea0
+#define mmCM1_CM_BIAS_CR_R_BASE_IDX 2
+#define mmCM1_CM_BIAS_Y_G_CB_B 0x0ea1
+#define mmCM1_CM_BIAS_Y_G_CB_B_BASE_IDX 2
+#define mmCM1_CM_DGAM_CONTROL 0x0ea2
+#define mmCM1_CM_DGAM_CONTROL_BASE_IDX 2
+#define mmCM1_CM_DGAM_LUT_INDEX 0x0ea3
+#define mmCM1_CM_DGAM_LUT_INDEX_BASE_IDX 2
+#define mmCM1_CM_DGAM_LUT_DATA 0x0ea4
+#define mmCM1_CM_DGAM_LUT_DATA_BASE_IDX 2
+#define mmCM1_CM_DGAM_LUT_WRITE_EN_MASK 0x0ea5
+#define mmCM1_CM_DGAM_LUT_WRITE_EN_MASK_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMA_START_CNTL_B 0x0ea6
+#define mmCM1_CM_DGAM_RAMA_START_CNTL_B_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMA_START_CNTL_G 0x0ea7
+#define mmCM1_CM_DGAM_RAMA_START_CNTL_G_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMA_START_CNTL_R 0x0ea8
+#define mmCM1_CM_DGAM_RAMA_START_CNTL_R_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMA_SLOPE_CNTL_B 0x0ea9
+#define mmCM1_CM_DGAM_RAMA_SLOPE_CNTL_B_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMA_SLOPE_CNTL_G 0x0eaa
+#define mmCM1_CM_DGAM_RAMA_SLOPE_CNTL_G_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMA_SLOPE_CNTL_R 0x0eab
+#define mmCM1_CM_DGAM_RAMA_SLOPE_CNTL_R_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMA_END_CNTL1_B 0x0eac
+#define mmCM1_CM_DGAM_RAMA_END_CNTL1_B_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMA_END_CNTL2_B 0x0ead
+#define mmCM1_CM_DGAM_RAMA_END_CNTL2_B_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMA_END_CNTL1_G 0x0eae
+#define mmCM1_CM_DGAM_RAMA_END_CNTL1_G_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMA_END_CNTL2_G 0x0eaf
+#define mmCM1_CM_DGAM_RAMA_END_CNTL2_G_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMA_END_CNTL1_R 0x0eb0
+#define mmCM1_CM_DGAM_RAMA_END_CNTL1_R_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMA_END_CNTL2_R 0x0eb1
+#define mmCM1_CM_DGAM_RAMA_END_CNTL2_R_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMA_REGION_0_1 0x0eb2
+#define mmCM1_CM_DGAM_RAMA_REGION_0_1_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMA_REGION_2_3 0x0eb3
+#define mmCM1_CM_DGAM_RAMA_REGION_2_3_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMA_REGION_4_5 0x0eb4
+#define mmCM1_CM_DGAM_RAMA_REGION_4_5_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMA_REGION_6_7 0x0eb5
+#define mmCM1_CM_DGAM_RAMA_REGION_6_7_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMA_REGION_8_9 0x0eb6
+#define mmCM1_CM_DGAM_RAMA_REGION_8_9_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMA_REGION_10_11 0x0eb7
+#define mmCM1_CM_DGAM_RAMA_REGION_10_11_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMA_REGION_12_13 0x0eb8
+#define mmCM1_CM_DGAM_RAMA_REGION_12_13_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMA_REGION_14_15 0x0eb9
+#define mmCM1_CM_DGAM_RAMA_REGION_14_15_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMB_START_CNTL_B 0x0eba
+#define mmCM1_CM_DGAM_RAMB_START_CNTL_B_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMB_START_CNTL_G 0x0ebb
+#define mmCM1_CM_DGAM_RAMB_START_CNTL_G_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMB_START_CNTL_R 0x0ebc
+#define mmCM1_CM_DGAM_RAMB_START_CNTL_R_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMB_SLOPE_CNTL_B 0x0ebd
+#define mmCM1_CM_DGAM_RAMB_SLOPE_CNTL_B_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMB_SLOPE_CNTL_G 0x0ebe
+#define mmCM1_CM_DGAM_RAMB_SLOPE_CNTL_G_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMB_SLOPE_CNTL_R 0x0ebf
+#define mmCM1_CM_DGAM_RAMB_SLOPE_CNTL_R_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMB_END_CNTL1_B 0x0ec0
+#define mmCM1_CM_DGAM_RAMB_END_CNTL1_B_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMB_END_CNTL2_B 0x0ec1
+#define mmCM1_CM_DGAM_RAMB_END_CNTL2_B_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMB_END_CNTL1_G 0x0ec2
+#define mmCM1_CM_DGAM_RAMB_END_CNTL1_G_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMB_END_CNTL2_G 0x0ec3
+#define mmCM1_CM_DGAM_RAMB_END_CNTL2_G_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMB_END_CNTL1_R 0x0ec4
+#define mmCM1_CM_DGAM_RAMB_END_CNTL1_R_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMB_END_CNTL2_R 0x0ec5
+#define mmCM1_CM_DGAM_RAMB_END_CNTL2_R_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMB_REGION_0_1 0x0ec6
+#define mmCM1_CM_DGAM_RAMB_REGION_0_1_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMB_REGION_2_3 0x0ec7
+#define mmCM1_CM_DGAM_RAMB_REGION_2_3_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMB_REGION_4_5 0x0ec8
+#define mmCM1_CM_DGAM_RAMB_REGION_4_5_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMB_REGION_6_7 0x0ec9
+#define mmCM1_CM_DGAM_RAMB_REGION_6_7_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMB_REGION_8_9 0x0eca
+#define mmCM1_CM_DGAM_RAMB_REGION_8_9_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMB_REGION_10_11 0x0ecb
+#define mmCM1_CM_DGAM_RAMB_REGION_10_11_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMB_REGION_12_13 0x0ecc
+#define mmCM1_CM_DGAM_RAMB_REGION_12_13_BASE_IDX 2
+#define mmCM1_CM_DGAM_RAMB_REGION_14_15 0x0ecd
+#define mmCM1_CM_DGAM_RAMB_REGION_14_15_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_CONTROL 0x0ece
+#define mmCM1_CM_BLNDGAM_CONTROL_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_LUT_INDEX 0x0ecf
+#define mmCM1_CM_BLNDGAM_LUT_INDEX_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_LUT_DATA 0x0ed0
+#define mmCM1_CM_BLNDGAM_LUT_DATA_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_LUT_WRITE_EN_MASK 0x0ed1
+#define mmCM1_CM_BLNDGAM_LUT_WRITE_EN_MASK_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_START_CNTL_B 0x0ed2
+#define mmCM1_CM_BLNDGAM_RAMA_START_CNTL_B_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_START_CNTL_G 0x0ed3
+#define mmCM1_CM_BLNDGAM_RAMA_START_CNTL_G_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_START_CNTL_R 0x0ed4
+#define mmCM1_CM_BLNDGAM_RAMA_START_CNTL_R_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_SLOPE_CNTL_B 0x0ed5
+#define mmCM1_CM_BLNDGAM_RAMA_SLOPE_CNTL_B_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_SLOPE_CNTL_G 0x0ed6
+#define mmCM1_CM_BLNDGAM_RAMA_SLOPE_CNTL_G_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_SLOPE_CNTL_R 0x0ed7
+#define mmCM1_CM_BLNDGAM_RAMA_SLOPE_CNTL_R_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_END_CNTL1_B 0x0ed8
+#define mmCM1_CM_BLNDGAM_RAMA_END_CNTL1_B_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_END_CNTL2_B 0x0ed9
+#define mmCM1_CM_BLNDGAM_RAMA_END_CNTL2_B_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_END_CNTL1_G 0x0eda
+#define mmCM1_CM_BLNDGAM_RAMA_END_CNTL1_G_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_END_CNTL2_G 0x0edb
+#define mmCM1_CM_BLNDGAM_RAMA_END_CNTL2_G_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_END_CNTL1_R 0x0edc
+#define mmCM1_CM_BLNDGAM_RAMA_END_CNTL1_R_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_END_CNTL2_R 0x0edd
+#define mmCM1_CM_BLNDGAM_RAMA_END_CNTL2_R_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_0_1 0x0ede
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_0_1_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_2_3 0x0edf
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_2_3_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_4_5 0x0ee0
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_4_5_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_6_7 0x0ee1
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_6_7_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_8_9 0x0ee2
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_8_9_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_10_11 0x0ee3
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_10_11_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_12_13 0x0ee4
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_12_13_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_14_15 0x0ee5
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_14_15_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_16_17 0x0ee6
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_16_17_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_18_19 0x0ee7
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_18_19_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_20_21 0x0ee8
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_20_21_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_22_23 0x0ee9
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_22_23_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_24_25 0x0eea
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_24_25_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_26_27 0x0eeb
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_26_27_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_28_29 0x0eec
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_28_29_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_30_31 0x0eed
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_30_31_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_32_33 0x0eee
+#define mmCM1_CM_BLNDGAM_RAMA_REGION_32_33_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_START_CNTL_B 0x0eef
+#define mmCM1_CM_BLNDGAM_RAMB_START_CNTL_B_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_START_CNTL_G 0x0ef0
+#define mmCM1_CM_BLNDGAM_RAMB_START_CNTL_G_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_START_CNTL_R 0x0ef1
+#define mmCM1_CM_BLNDGAM_RAMB_START_CNTL_R_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_SLOPE_CNTL_B 0x0ef2
+#define mmCM1_CM_BLNDGAM_RAMB_SLOPE_CNTL_B_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_SLOPE_CNTL_G 0x0ef3
+#define mmCM1_CM_BLNDGAM_RAMB_SLOPE_CNTL_G_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_SLOPE_CNTL_R 0x0ef4
+#define mmCM1_CM_BLNDGAM_RAMB_SLOPE_CNTL_R_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_END_CNTL1_B 0x0ef5
+#define mmCM1_CM_BLNDGAM_RAMB_END_CNTL1_B_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_END_CNTL2_B 0x0ef6
+#define mmCM1_CM_BLNDGAM_RAMB_END_CNTL2_B_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_END_CNTL1_G 0x0ef7
+#define mmCM1_CM_BLNDGAM_RAMB_END_CNTL1_G_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_END_CNTL2_G 0x0ef8
+#define mmCM1_CM_BLNDGAM_RAMB_END_CNTL2_G_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_END_CNTL1_R 0x0ef9
+#define mmCM1_CM_BLNDGAM_RAMB_END_CNTL1_R_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_END_CNTL2_R 0x0efa
+#define mmCM1_CM_BLNDGAM_RAMB_END_CNTL2_R_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_0_1 0x0efb
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_0_1_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_2_3 0x0efc
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_2_3_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_4_5 0x0efd
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_4_5_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_6_7 0x0efe
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_6_7_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_8_9 0x0eff
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_8_9_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_10_11 0x0f00
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_10_11_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_12_13 0x0f01
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_12_13_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_14_15 0x0f02
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_14_15_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_16_17 0x0f03
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_16_17_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_18_19 0x0f04
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_18_19_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_20_21 0x0f05
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_20_21_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_22_23 0x0f06
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_22_23_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_24_25 0x0f07
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_24_25_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_26_27 0x0f08
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_26_27_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_28_29 0x0f09
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_28_29_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_30_31 0x0f0a
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_30_31_BASE_IDX 2
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_32_33 0x0f0b
+#define mmCM1_CM_BLNDGAM_RAMB_REGION_32_33_BASE_IDX 2
+#define mmCM1_CM_HDR_MULT_COEF 0x0f0c
+#define mmCM1_CM_HDR_MULT_COEF_BASE_IDX 2
+#define mmCM1_CM_MEM_PWR_CTRL 0x0f0d
+#define mmCM1_CM_MEM_PWR_CTRL_BASE_IDX 2
+#define mmCM1_CM_MEM_PWR_STATUS 0x0f0e
+#define mmCM1_CM_MEM_PWR_STATUS_BASE_IDX 2
+#define mmCM1_CM_DEALPHA 0x0f10
+#define mmCM1_CM_DEALPHA_BASE_IDX 2
+#define mmCM1_CM_COEF_FORMAT 0x0f11
+#define mmCM1_CM_COEF_FORMAT_BASE_IDX 2
+#define mmCM1_CM_SHAPER_CONTROL 0x0f12
+#define mmCM1_CM_SHAPER_CONTROL_BASE_IDX 2
+#define mmCM1_CM_SHAPER_OFFSET_R 0x0f13
+#define mmCM1_CM_SHAPER_OFFSET_R_BASE_IDX 2
+#define mmCM1_CM_SHAPER_OFFSET_G 0x0f14
+#define mmCM1_CM_SHAPER_OFFSET_G_BASE_IDX 2
+#define mmCM1_CM_SHAPER_OFFSET_B 0x0f15
+#define mmCM1_CM_SHAPER_OFFSET_B_BASE_IDX 2
+#define mmCM1_CM_SHAPER_SCALE_R 0x0f16
+#define mmCM1_CM_SHAPER_SCALE_R_BASE_IDX 2
+#define mmCM1_CM_SHAPER_SCALE_G_B 0x0f17
+#define mmCM1_CM_SHAPER_SCALE_G_B_BASE_IDX 2
+#define mmCM1_CM_SHAPER_LUT_INDEX 0x0f18
+#define mmCM1_CM_SHAPER_LUT_INDEX_BASE_IDX 2
+#define mmCM1_CM_SHAPER_LUT_DATA 0x0f19
+#define mmCM1_CM_SHAPER_LUT_DATA_BASE_IDX 2
+#define mmCM1_CM_SHAPER_LUT_WRITE_EN_MASK 0x0f1a
+#define mmCM1_CM_SHAPER_LUT_WRITE_EN_MASK_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMA_START_CNTL_B 0x0f1b
+#define mmCM1_CM_SHAPER_RAMA_START_CNTL_B_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMA_START_CNTL_G 0x0f1c
+#define mmCM1_CM_SHAPER_RAMA_START_CNTL_G_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMA_START_CNTL_R 0x0f1d
+#define mmCM1_CM_SHAPER_RAMA_START_CNTL_R_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMA_END_CNTL_B 0x0f1e
+#define mmCM1_CM_SHAPER_RAMA_END_CNTL_B_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMA_END_CNTL_G 0x0f1f
+#define mmCM1_CM_SHAPER_RAMA_END_CNTL_G_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMA_END_CNTL_R 0x0f20
+#define mmCM1_CM_SHAPER_RAMA_END_CNTL_R_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMA_REGION_0_1 0x0f21
+#define mmCM1_CM_SHAPER_RAMA_REGION_0_1_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMA_REGION_2_3 0x0f22
+#define mmCM1_CM_SHAPER_RAMA_REGION_2_3_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMA_REGION_4_5 0x0f23
+#define mmCM1_CM_SHAPER_RAMA_REGION_4_5_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMA_REGION_6_7 0x0f24
+#define mmCM1_CM_SHAPER_RAMA_REGION_6_7_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMA_REGION_8_9 0x0f25
+#define mmCM1_CM_SHAPER_RAMA_REGION_8_9_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMA_REGION_10_11 0x0f26
+#define mmCM1_CM_SHAPER_RAMA_REGION_10_11_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMA_REGION_12_13 0x0f27
+#define mmCM1_CM_SHAPER_RAMA_REGION_12_13_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMA_REGION_14_15 0x0f28
+#define mmCM1_CM_SHAPER_RAMA_REGION_14_15_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMA_REGION_16_17 0x0f29
+#define mmCM1_CM_SHAPER_RAMA_REGION_16_17_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMA_REGION_18_19 0x0f2a
+#define mmCM1_CM_SHAPER_RAMA_REGION_18_19_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMA_REGION_20_21 0x0f2b
+#define mmCM1_CM_SHAPER_RAMA_REGION_20_21_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMA_REGION_22_23 0x0f2c
+#define mmCM1_CM_SHAPER_RAMA_REGION_22_23_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMA_REGION_24_25 0x0f2d
+#define mmCM1_CM_SHAPER_RAMA_REGION_24_25_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMA_REGION_26_27 0x0f2e
+#define mmCM1_CM_SHAPER_RAMA_REGION_26_27_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMA_REGION_28_29 0x0f2f
+#define mmCM1_CM_SHAPER_RAMA_REGION_28_29_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMA_REGION_30_31 0x0f30
+#define mmCM1_CM_SHAPER_RAMA_REGION_30_31_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMA_REGION_32_33 0x0f31
+#define mmCM1_CM_SHAPER_RAMA_REGION_32_33_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMB_START_CNTL_B 0x0f32
+#define mmCM1_CM_SHAPER_RAMB_START_CNTL_B_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMB_START_CNTL_G 0x0f33
+#define mmCM1_CM_SHAPER_RAMB_START_CNTL_G_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMB_START_CNTL_R 0x0f34
+#define mmCM1_CM_SHAPER_RAMB_START_CNTL_R_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMB_END_CNTL_B 0x0f35
+#define mmCM1_CM_SHAPER_RAMB_END_CNTL_B_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMB_END_CNTL_G 0x0f36
+#define mmCM1_CM_SHAPER_RAMB_END_CNTL_G_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMB_END_CNTL_R 0x0f37
+#define mmCM1_CM_SHAPER_RAMB_END_CNTL_R_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMB_REGION_0_1 0x0f38
+#define mmCM1_CM_SHAPER_RAMB_REGION_0_1_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMB_REGION_2_3 0x0f39
+#define mmCM1_CM_SHAPER_RAMB_REGION_2_3_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMB_REGION_4_5 0x0f3a
+#define mmCM1_CM_SHAPER_RAMB_REGION_4_5_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMB_REGION_6_7 0x0f3b
+#define mmCM1_CM_SHAPER_RAMB_REGION_6_7_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMB_REGION_8_9 0x0f3c
+#define mmCM1_CM_SHAPER_RAMB_REGION_8_9_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMB_REGION_10_11 0x0f3d
+#define mmCM1_CM_SHAPER_RAMB_REGION_10_11_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMB_REGION_12_13 0x0f3e
+#define mmCM1_CM_SHAPER_RAMB_REGION_12_13_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMB_REGION_14_15 0x0f3f
+#define mmCM1_CM_SHAPER_RAMB_REGION_14_15_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMB_REGION_16_17 0x0f40
+#define mmCM1_CM_SHAPER_RAMB_REGION_16_17_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMB_REGION_18_19 0x0f41
+#define mmCM1_CM_SHAPER_RAMB_REGION_18_19_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMB_REGION_20_21 0x0f42
+#define mmCM1_CM_SHAPER_RAMB_REGION_20_21_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMB_REGION_22_23 0x0f43
+#define mmCM1_CM_SHAPER_RAMB_REGION_22_23_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMB_REGION_24_25 0x0f44
+#define mmCM1_CM_SHAPER_RAMB_REGION_24_25_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMB_REGION_26_27 0x0f45
+#define mmCM1_CM_SHAPER_RAMB_REGION_26_27_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMB_REGION_28_29 0x0f46
+#define mmCM1_CM_SHAPER_RAMB_REGION_28_29_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMB_REGION_30_31 0x0f47
+#define mmCM1_CM_SHAPER_RAMB_REGION_30_31_BASE_IDX 2
+#define mmCM1_CM_SHAPER_RAMB_REGION_32_33 0x0f48
+#define mmCM1_CM_SHAPER_RAMB_REGION_32_33_BASE_IDX 2
+#define mmCM1_CM_MEM_PWR_CTRL2 0x0f49
+#define mmCM1_CM_MEM_PWR_CTRL2_BASE_IDX 2
+#define mmCM1_CM_MEM_PWR_STATUS2 0x0f4a
+#define mmCM1_CM_MEM_PWR_STATUS2_BASE_IDX 2
+#define mmCM1_CM_3DLUT_MODE 0x0f4b
+#define mmCM1_CM_3DLUT_MODE_BASE_IDX 2
+#define mmCM1_CM_3DLUT_INDEX 0x0f4c
+#define mmCM1_CM_3DLUT_INDEX_BASE_IDX 2
+#define mmCM1_CM_3DLUT_DATA 0x0f4d
+#define mmCM1_CM_3DLUT_DATA_BASE_IDX 2
+#define mmCM1_CM_3DLUT_DATA_30BIT 0x0f4e
+#define mmCM1_CM_3DLUT_DATA_30BIT_BASE_IDX 2
+#define mmCM1_CM_3DLUT_READ_WRITE_CONTROL 0x0f4f
+#define mmCM1_CM_3DLUT_READ_WRITE_CONTROL_BASE_IDX 2
+#define mmCM1_CM_3DLUT_OUT_NORM_FACTOR 0x0f50
+#define mmCM1_CM_3DLUT_OUT_NORM_FACTOR_BASE_IDX 2
+#define mmCM1_CM_3DLUT_OUT_OFFSET_R 0x0f51
+#define mmCM1_CM_3DLUT_OUT_OFFSET_R_BASE_IDX 2
+#define mmCM1_CM_3DLUT_OUT_OFFSET_G 0x0f52
+#define mmCM1_CM_3DLUT_OUT_OFFSET_G_BASE_IDX 2
+#define mmCM1_CM_3DLUT_OUT_OFFSET_B 0x0f53
+#define mmCM1_CM_3DLUT_OUT_OFFSET_B_BASE_IDX 2
+#define mmCM1_CM_TEST_DEBUG_INDEX 0x0f54
+#define mmCM1_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define mmCM1_CM_TEST_DEBUG_DATA 0x0f55
+#define mmCM1_CM_TEST_DEBUG_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp2_dispdec_dpp_top_dispdec
+// base address: 0xb58
+#define mmDPP_TOP2_DPP_CONTROL 0x0f9b
+#define mmDPP_TOP2_DPP_CONTROL_BASE_IDX 2
+#define mmDPP_TOP2_DPP_SOFT_RESET 0x0f9c
+#define mmDPP_TOP2_DPP_SOFT_RESET_BASE_IDX 2
+#define mmDPP_TOP2_DPP_CRC_VAL_R_G 0x0f9d
+#define mmDPP_TOP2_DPP_CRC_VAL_R_G_BASE_IDX 2
+#define mmDPP_TOP2_DPP_CRC_VAL_B_A 0x0f9e
+#define mmDPP_TOP2_DPP_CRC_VAL_B_A_BASE_IDX 2
+#define mmDPP_TOP2_DPP_CRC_CTRL 0x0f9f
+#define mmDPP_TOP2_DPP_CRC_CTRL_BASE_IDX 2
+
+// addressBlock: dce_dc_dpp2_dispdec_cnvc_cfg_dispdec
+// base address: 0xb58
+#define mmCNVC_CFG2_CNVC_SURFACE_PIXEL_FORMAT 0x0fa5
+#define mmCNVC_CFG2_CNVC_SURFACE_PIXEL_FORMAT_BASE_IDX 2
+#define mmCNVC_CFG2_FORMAT_CONTROL 0x0fa6
+#define mmCNVC_CFG2_FORMAT_CONTROL_BASE_IDX 2
+#define mmCNVC_CFG2_FCNV_FP_BIAS_R 0x0fa7
+#define mmCNVC_CFG2_FCNV_FP_BIAS_R_BASE_IDX 2
+#define mmCNVC_CFG2_FCNV_FP_BIAS_G 0x0fa8
+#define mmCNVC_CFG2_FCNV_FP_BIAS_G_BASE_IDX 2
+#define mmCNVC_CFG2_FCNV_FP_BIAS_B 0x0fa9
+#define mmCNVC_CFG2_FCNV_FP_BIAS_B_BASE_IDX 2
+#define mmCNVC_CFG2_FCNV_FP_SCALE_R 0x0faa
+#define mmCNVC_CFG2_FCNV_FP_SCALE_R_BASE_IDX 2
+#define mmCNVC_CFG2_FCNV_FP_SCALE_G 0x0fab
+#define mmCNVC_CFG2_FCNV_FP_SCALE_G_BASE_IDX 2
+#define mmCNVC_CFG2_FCNV_FP_SCALE_B 0x0fac
+#define mmCNVC_CFG2_FCNV_FP_SCALE_B_BASE_IDX 2
+#define mmCNVC_CFG2_COLOR_KEYER_CONTROL 0x0fad
+#define mmCNVC_CFG2_COLOR_KEYER_CONTROL_BASE_IDX 2
+#define mmCNVC_CFG2_COLOR_KEYER_ALPHA 0x0fae
+#define mmCNVC_CFG2_COLOR_KEYER_ALPHA_BASE_IDX 2
+#define mmCNVC_CFG2_COLOR_KEYER_RED 0x0faf
+#define mmCNVC_CFG2_COLOR_KEYER_RED_BASE_IDX 2
+#define mmCNVC_CFG2_COLOR_KEYER_GREEN 0x0fb0
+#define mmCNVC_CFG2_COLOR_KEYER_GREEN_BASE_IDX 2
+#define mmCNVC_CFG2_COLOR_KEYER_BLUE 0x0fb1
+#define mmCNVC_CFG2_COLOR_KEYER_BLUE_BASE_IDX 2
+#define mmCNVC_CFG2_ALPHA_2BIT_LUT 0x0fb3
+#define mmCNVC_CFG2_ALPHA_2BIT_LUT_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp2_dispdec_cnvc_cur_dispdec
+// base address: 0xb58
+#define mmCNVC_CUR2_CURSOR0_CONTROL 0x0fb6
+#define mmCNVC_CUR2_CURSOR0_CONTROL_BASE_IDX 2
+#define mmCNVC_CUR2_CURSOR0_COLOR0 0x0fb7
+#define mmCNVC_CUR2_CURSOR0_COLOR0_BASE_IDX 2
+#define mmCNVC_CUR2_CURSOR0_COLOR1 0x0fb8
+#define mmCNVC_CUR2_CURSOR0_COLOR1_BASE_IDX 2
+#define mmCNVC_CUR2_CURSOR0_FP_SCALE_BIAS 0x0fb9
+#define mmCNVC_CUR2_CURSOR0_FP_SCALE_BIAS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp2_dispdec_dscl_dispdec
+// base address: 0xb58
+#define mmDSCL2_SCL_COEF_RAM_TAP_SELECT 0x0fc0
+#define mmDSCL2_SCL_COEF_RAM_TAP_SELECT_BASE_IDX 2
+#define mmDSCL2_SCL_COEF_RAM_TAP_DATA 0x0fc1
+#define mmDSCL2_SCL_COEF_RAM_TAP_DATA_BASE_IDX 2
+#define mmDSCL2_SCL_MODE 0x0fc2
+#define mmDSCL2_SCL_MODE_BASE_IDX 2
+#define mmDSCL2_SCL_TAP_CONTROL 0x0fc3
+#define mmDSCL2_SCL_TAP_CONTROL_BASE_IDX 2
+#define mmDSCL2_DSCL_CONTROL 0x0fc4
+#define mmDSCL2_DSCL_CONTROL_BASE_IDX 2
+#define mmDSCL2_DSCL_2TAP_CONTROL 0x0fc5
+#define mmDSCL2_DSCL_2TAP_CONTROL_BASE_IDX 2
+#define mmDSCL2_SCL_MANUAL_REPLICATE_CONTROL 0x0fc6
+#define mmDSCL2_SCL_MANUAL_REPLICATE_CONTROL_BASE_IDX 2
+#define mmDSCL2_SCL_HORZ_FILTER_SCALE_RATIO 0x0fc7
+#define mmDSCL2_SCL_HORZ_FILTER_SCALE_RATIO_BASE_IDX 2
+#define mmDSCL2_SCL_HORZ_FILTER_INIT 0x0fc8
+#define mmDSCL2_SCL_HORZ_FILTER_INIT_BASE_IDX 2
+#define mmDSCL2_SCL_HORZ_FILTER_SCALE_RATIO_C 0x0fc9
+#define mmDSCL2_SCL_HORZ_FILTER_SCALE_RATIO_C_BASE_IDX 2
+#define mmDSCL2_SCL_HORZ_FILTER_INIT_C 0x0fca
+#define mmDSCL2_SCL_HORZ_FILTER_INIT_C_BASE_IDX 2
+#define mmDSCL2_SCL_VERT_FILTER_SCALE_RATIO 0x0fcb
+#define mmDSCL2_SCL_VERT_FILTER_SCALE_RATIO_BASE_IDX 2
+#define mmDSCL2_SCL_VERT_FILTER_INIT 0x0fcc
+#define mmDSCL2_SCL_VERT_FILTER_INIT_BASE_IDX 2
+#define mmDSCL2_SCL_VERT_FILTER_INIT_BOT 0x0fcd
+#define mmDSCL2_SCL_VERT_FILTER_INIT_BOT_BASE_IDX 2
+#define mmDSCL2_SCL_VERT_FILTER_SCALE_RATIO_C 0x0fce
+#define mmDSCL2_SCL_VERT_FILTER_SCALE_RATIO_C_BASE_IDX 2
+#define mmDSCL2_SCL_VERT_FILTER_INIT_C 0x0fcf
+#define mmDSCL2_SCL_VERT_FILTER_INIT_C_BASE_IDX 2
+#define mmDSCL2_SCL_VERT_FILTER_INIT_BOT_C 0x0fd0
+#define mmDSCL2_SCL_VERT_FILTER_INIT_BOT_C_BASE_IDX 2
+#define mmDSCL2_SCL_BLACK_OFFSET 0x0fd1
+#define mmDSCL2_SCL_BLACK_OFFSET_BASE_IDX 2
+#define mmDSCL2_DSCL_UPDATE 0x0fd2
+#define mmDSCL2_DSCL_UPDATE_BASE_IDX 2
+#define mmDSCL2_DSCL_AUTOCAL 0x0fd3
+#define mmDSCL2_DSCL_AUTOCAL_BASE_IDX 2
+#define mmDSCL2_DSCL_EXT_OVERSCAN_LEFT_RIGHT 0x0fd4
+#define mmDSCL2_DSCL_EXT_OVERSCAN_LEFT_RIGHT_BASE_IDX 2
+#define mmDSCL2_DSCL_EXT_OVERSCAN_TOP_BOTTOM 0x0fd5
+#define mmDSCL2_DSCL_EXT_OVERSCAN_TOP_BOTTOM_BASE_IDX 2
+#define mmDSCL2_OTG_H_BLANK 0x0fd6
+#define mmDSCL2_OTG_H_BLANK_BASE_IDX 2
+#define mmDSCL2_OTG_V_BLANK 0x0fd7
+#define mmDSCL2_OTG_V_BLANK_BASE_IDX 2
+#define mmDSCL2_RECOUT_START 0x0fd8
+#define mmDSCL2_RECOUT_START_BASE_IDX 2
+#define mmDSCL2_RECOUT_SIZE 0x0fd9
+#define mmDSCL2_RECOUT_SIZE_BASE_IDX 2
+#define mmDSCL2_MPC_SIZE 0x0fda
+#define mmDSCL2_MPC_SIZE_BASE_IDX 2
+#define mmDSCL2_LB_DATA_FORMAT 0x0fdb
+#define mmDSCL2_LB_DATA_FORMAT_BASE_IDX 2
+#define mmDSCL2_LB_MEMORY_CTRL 0x0fdc
+#define mmDSCL2_LB_MEMORY_CTRL_BASE_IDX 2
+#define mmDSCL2_LB_V_COUNTER 0x0fdd
+#define mmDSCL2_LB_V_COUNTER_BASE_IDX 2
+#define mmDSCL2_DSCL_MEM_PWR_CTRL 0x0fde
+#define mmDSCL2_DSCL_MEM_PWR_CTRL_BASE_IDX 2
+#define mmDSCL2_DSCL_MEM_PWR_STATUS 0x0fdf
+#define mmDSCL2_DSCL_MEM_PWR_STATUS_BASE_IDX 2
+#define mmDSCL2_OBUF_CONTROL 0x0fe0
+#define mmDSCL2_OBUF_CONTROL_BASE_IDX 2
+#define mmDSCL2_OBUF_MEM_PWR_CTRL 0x0fe1
+#define mmDSCL2_OBUF_MEM_PWR_CTRL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp2_dispdec_cm_dispdec
+// base address: 0xb58
+#define mmCM2_CM_CONTROL 0x0ff0
+#define mmCM2_CM_CONTROL_BASE_IDX 2
+#define mmCM2_CM_ICSC_CONTROL 0x0ff1
+#define mmCM2_CM_ICSC_CONTROL_BASE_IDX 2
+#define mmCM2_CM_ICSC_C11_C12 0x0ff2
+#define mmCM2_CM_ICSC_C11_C12_BASE_IDX 2
+#define mmCM2_CM_ICSC_C13_C14 0x0ff3
+#define mmCM2_CM_ICSC_C13_C14_BASE_IDX 2
+#define mmCM2_CM_ICSC_C21_C22 0x0ff4
+#define mmCM2_CM_ICSC_C21_C22_BASE_IDX 2
+#define mmCM2_CM_ICSC_C23_C24 0x0ff5
+#define mmCM2_CM_ICSC_C23_C24_BASE_IDX 2
+#define mmCM2_CM_ICSC_C31_C32 0x0ff6
+#define mmCM2_CM_ICSC_C31_C32_BASE_IDX 2
+#define mmCM2_CM_ICSC_C33_C34 0x0ff7
+#define mmCM2_CM_ICSC_C33_C34_BASE_IDX 2
+#define mmCM2_CM_ICSC_B_C11_C12 0x0ff8
+#define mmCM2_CM_ICSC_B_C11_C12_BASE_IDX 2
+#define mmCM2_CM_ICSC_B_C13_C14 0x0ff9
+#define mmCM2_CM_ICSC_B_C13_C14_BASE_IDX 2
+#define mmCM2_CM_ICSC_B_C21_C22 0x0ffa
+#define mmCM2_CM_ICSC_B_C21_C22_BASE_IDX 2
+#define mmCM2_CM_ICSC_B_C23_C24 0x0ffb
+#define mmCM2_CM_ICSC_B_C23_C24_BASE_IDX 2
+#define mmCM2_CM_ICSC_B_C31_C32 0x0ffc
+#define mmCM2_CM_ICSC_B_C31_C32_BASE_IDX 2
+#define mmCM2_CM_ICSC_B_C33_C34 0x0ffd
+#define mmCM2_CM_ICSC_B_C33_C34_BASE_IDX 2
+#define mmCM2_CM_GAMUT_REMAP_CONTROL 0x0ffe
+#define mmCM2_CM_GAMUT_REMAP_CONTROL_BASE_IDX 2
+#define mmCM2_CM_GAMUT_REMAP_C11_C12 0x0fff
+#define mmCM2_CM_GAMUT_REMAP_C11_C12_BASE_IDX 2
+#define mmCM2_CM_GAMUT_REMAP_C13_C14 0x1000
+#define mmCM2_CM_GAMUT_REMAP_C13_C14_BASE_IDX 2
+#define mmCM2_CM_GAMUT_REMAP_C21_C22 0x1001
+#define mmCM2_CM_GAMUT_REMAP_C21_C22_BASE_IDX 2
+#define mmCM2_CM_GAMUT_REMAP_C23_C24 0x1002
+#define mmCM2_CM_GAMUT_REMAP_C23_C24_BASE_IDX 2
+#define mmCM2_CM_GAMUT_REMAP_C31_C32 0x1003
+#define mmCM2_CM_GAMUT_REMAP_C31_C32_BASE_IDX 2
+#define mmCM2_CM_GAMUT_REMAP_C33_C34 0x1004
+#define mmCM2_CM_GAMUT_REMAP_C33_C34_BASE_IDX 2
+#define mmCM2_CM_GAMUT_REMAP_B_C11_C12 0x1005
+#define mmCM2_CM_GAMUT_REMAP_B_C11_C12_BASE_IDX 2
+#define mmCM2_CM_GAMUT_REMAP_B_C13_C14 0x1006
+#define mmCM2_CM_GAMUT_REMAP_B_C13_C14_BASE_IDX 2
+#define mmCM2_CM_GAMUT_REMAP_B_C21_C22 0x1007
+#define mmCM2_CM_GAMUT_REMAP_B_C21_C22_BASE_IDX 2
+#define mmCM2_CM_GAMUT_REMAP_B_C23_C24 0x1008
+#define mmCM2_CM_GAMUT_REMAP_B_C23_C24_BASE_IDX 2
+#define mmCM2_CM_GAMUT_REMAP_B_C31_C32 0x1009
+#define mmCM2_CM_GAMUT_REMAP_B_C31_C32_BASE_IDX 2
+#define mmCM2_CM_GAMUT_REMAP_B_C33_C34 0x100a
+#define mmCM2_CM_GAMUT_REMAP_B_C33_C34_BASE_IDX 2
+#define mmCM2_CM_BIAS_CR_R 0x100b
+#define mmCM2_CM_BIAS_CR_R_BASE_IDX 2
+#define mmCM2_CM_BIAS_Y_G_CB_B 0x100c
+#define mmCM2_CM_BIAS_Y_G_CB_B_BASE_IDX 2
+#define mmCM2_CM_DGAM_CONTROL 0x100d
+#define mmCM2_CM_DGAM_CONTROL_BASE_IDX 2
+#define mmCM2_CM_DGAM_LUT_INDEX 0x100e
+#define mmCM2_CM_DGAM_LUT_INDEX_BASE_IDX 2
+#define mmCM2_CM_DGAM_LUT_DATA 0x100f
+#define mmCM2_CM_DGAM_LUT_DATA_BASE_IDX 2
+#define mmCM2_CM_DGAM_LUT_WRITE_EN_MASK 0x1010
+#define mmCM2_CM_DGAM_LUT_WRITE_EN_MASK_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMA_START_CNTL_B 0x1011
+#define mmCM2_CM_DGAM_RAMA_START_CNTL_B_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMA_START_CNTL_G 0x1012
+#define mmCM2_CM_DGAM_RAMA_START_CNTL_G_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMA_START_CNTL_R 0x1013
+#define mmCM2_CM_DGAM_RAMA_START_CNTL_R_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMA_SLOPE_CNTL_B 0x1014
+#define mmCM2_CM_DGAM_RAMA_SLOPE_CNTL_B_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMA_SLOPE_CNTL_G 0x1015
+#define mmCM2_CM_DGAM_RAMA_SLOPE_CNTL_G_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMA_SLOPE_CNTL_R 0x1016
+#define mmCM2_CM_DGAM_RAMA_SLOPE_CNTL_R_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMA_END_CNTL1_B 0x1017
+#define mmCM2_CM_DGAM_RAMA_END_CNTL1_B_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMA_END_CNTL2_B 0x1018
+#define mmCM2_CM_DGAM_RAMA_END_CNTL2_B_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMA_END_CNTL1_G 0x1019
+#define mmCM2_CM_DGAM_RAMA_END_CNTL1_G_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMA_END_CNTL2_G 0x101a
+#define mmCM2_CM_DGAM_RAMA_END_CNTL2_G_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMA_END_CNTL1_R 0x101b
+#define mmCM2_CM_DGAM_RAMA_END_CNTL1_R_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMA_END_CNTL2_R 0x101c
+#define mmCM2_CM_DGAM_RAMA_END_CNTL2_R_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMA_REGION_0_1 0x101d
+#define mmCM2_CM_DGAM_RAMA_REGION_0_1_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMA_REGION_2_3 0x101e
+#define mmCM2_CM_DGAM_RAMA_REGION_2_3_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMA_REGION_4_5 0x101f
+#define mmCM2_CM_DGAM_RAMA_REGION_4_5_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMA_REGION_6_7 0x1020
+#define mmCM2_CM_DGAM_RAMA_REGION_6_7_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMA_REGION_8_9 0x1021
+#define mmCM2_CM_DGAM_RAMA_REGION_8_9_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMA_REGION_10_11 0x1022
+#define mmCM2_CM_DGAM_RAMA_REGION_10_11_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMA_REGION_12_13 0x1023
+#define mmCM2_CM_DGAM_RAMA_REGION_12_13_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMA_REGION_14_15 0x1024
+#define mmCM2_CM_DGAM_RAMA_REGION_14_15_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMB_START_CNTL_B 0x1025
+#define mmCM2_CM_DGAM_RAMB_START_CNTL_B_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMB_START_CNTL_G 0x1026
+#define mmCM2_CM_DGAM_RAMB_START_CNTL_G_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMB_START_CNTL_R 0x1027
+#define mmCM2_CM_DGAM_RAMB_START_CNTL_R_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMB_SLOPE_CNTL_B 0x1028
+#define mmCM2_CM_DGAM_RAMB_SLOPE_CNTL_B_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMB_SLOPE_CNTL_G 0x1029
+#define mmCM2_CM_DGAM_RAMB_SLOPE_CNTL_G_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMB_SLOPE_CNTL_R 0x102a
+#define mmCM2_CM_DGAM_RAMB_SLOPE_CNTL_R_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMB_END_CNTL1_B 0x102b
+#define mmCM2_CM_DGAM_RAMB_END_CNTL1_B_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMB_END_CNTL2_B 0x102c
+#define mmCM2_CM_DGAM_RAMB_END_CNTL2_B_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMB_END_CNTL1_G 0x102d
+#define mmCM2_CM_DGAM_RAMB_END_CNTL1_G_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMB_END_CNTL2_G 0x102e
+#define mmCM2_CM_DGAM_RAMB_END_CNTL2_G_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMB_END_CNTL1_R 0x102f
+#define mmCM2_CM_DGAM_RAMB_END_CNTL1_R_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMB_END_CNTL2_R 0x1030
+#define mmCM2_CM_DGAM_RAMB_END_CNTL2_R_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMB_REGION_0_1 0x1031
+#define mmCM2_CM_DGAM_RAMB_REGION_0_1_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMB_REGION_2_3 0x1032
+#define mmCM2_CM_DGAM_RAMB_REGION_2_3_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMB_REGION_4_5 0x1033
+#define mmCM2_CM_DGAM_RAMB_REGION_4_5_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMB_REGION_6_7 0x1034
+#define mmCM2_CM_DGAM_RAMB_REGION_6_7_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMB_REGION_8_9 0x1035
+#define mmCM2_CM_DGAM_RAMB_REGION_8_9_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMB_REGION_10_11 0x1036
+#define mmCM2_CM_DGAM_RAMB_REGION_10_11_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMB_REGION_12_13 0x1037
+#define mmCM2_CM_DGAM_RAMB_REGION_12_13_BASE_IDX 2
+#define mmCM2_CM_DGAM_RAMB_REGION_14_15 0x1038
+#define mmCM2_CM_DGAM_RAMB_REGION_14_15_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_CONTROL 0x1039
+#define mmCM2_CM_BLNDGAM_CONTROL_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_LUT_INDEX 0x103a
+#define mmCM2_CM_BLNDGAM_LUT_INDEX_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_LUT_DATA 0x103b
+#define mmCM2_CM_BLNDGAM_LUT_DATA_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_LUT_WRITE_EN_MASK 0x103c
+#define mmCM2_CM_BLNDGAM_LUT_WRITE_EN_MASK_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_START_CNTL_B 0x103d
+#define mmCM2_CM_BLNDGAM_RAMA_START_CNTL_B_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_START_CNTL_G 0x103e
+#define mmCM2_CM_BLNDGAM_RAMA_START_CNTL_G_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_START_CNTL_R 0x103f
+#define mmCM2_CM_BLNDGAM_RAMA_START_CNTL_R_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_SLOPE_CNTL_B 0x1040
+#define mmCM2_CM_BLNDGAM_RAMA_SLOPE_CNTL_B_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_SLOPE_CNTL_G 0x1041
+#define mmCM2_CM_BLNDGAM_RAMA_SLOPE_CNTL_G_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_SLOPE_CNTL_R 0x1042
+#define mmCM2_CM_BLNDGAM_RAMA_SLOPE_CNTL_R_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_END_CNTL1_B 0x1043
+#define mmCM2_CM_BLNDGAM_RAMA_END_CNTL1_B_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_END_CNTL2_B 0x1044
+#define mmCM2_CM_BLNDGAM_RAMA_END_CNTL2_B_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_END_CNTL1_G 0x1045
+#define mmCM2_CM_BLNDGAM_RAMA_END_CNTL1_G_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_END_CNTL2_G 0x1046
+#define mmCM2_CM_BLNDGAM_RAMA_END_CNTL2_G_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_END_CNTL1_R 0x1047
+#define mmCM2_CM_BLNDGAM_RAMA_END_CNTL1_R_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_END_CNTL2_R 0x1048
+#define mmCM2_CM_BLNDGAM_RAMA_END_CNTL2_R_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_0_1 0x1049
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_0_1_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_2_3 0x104a
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_2_3_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_4_5 0x104b
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_4_5_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_6_7 0x104c
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_6_7_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_8_9 0x104d
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_8_9_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_10_11 0x104e
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_10_11_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_12_13 0x104f
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_12_13_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_14_15 0x1050
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_14_15_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_16_17 0x1051
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_16_17_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_18_19 0x1052
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_18_19_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_20_21 0x1053
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_20_21_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_22_23 0x1054
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_22_23_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_24_25 0x1055
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_24_25_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_26_27 0x1056
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_26_27_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_28_29 0x1057
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_28_29_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_30_31 0x1058
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_30_31_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_32_33 0x1059
+#define mmCM2_CM_BLNDGAM_RAMA_REGION_32_33_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_START_CNTL_B 0x105a
+#define mmCM2_CM_BLNDGAM_RAMB_START_CNTL_B_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_START_CNTL_G 0x105b
+#define mmCM2_CM_BLNDGAM_RAMB_START_CNTL_G_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_START_CNTL_R 0x105c
+#define mmCM2_CM_BLNDGAM_RAMB_START_CNTL_R_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_SLOPE_CNTL_B 0x105d
+#define mmCM2_CM_BLNDGAM_RAMB_SLOPE_CNTL_B_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_SLOPE_CNTL_G 0x105e
+#define mmCM2_CM_BLNDGAM_RAMB_SLOPE_CNTL_G_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_SLOPE_CNTL_R 0x105f
+#define mmCM2_CM_BLNDGAM_RAMB_SLOPE_CNTL_R_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_END_CNTL1_B 0x1060
+#define mmCM2_CM_BLNDGAM_RAMB_END_CNTL1_B_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_END_CNTL2_B 0x1061
+#define mmCM2_CM_BLNDGAM_RAMB_END_CNTL2_B_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_END_CNTL1_G 0x1062
+#define mmCM2_CM_BLNDGAM_RAMB_END_CNTL1_G_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_END_CNTL2_G 0x1063
+#define mmCM2_CM_BLNDGAM_RAMB_END_CNTL2_G_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_END_CNTL1_R 0x1064
+#define mmCM2_CM_BLNDGAM_RAMB_END_CNTL1_R_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_END_CNTL2_R 0x1065
+#define mmCM2_CM_BLNDGAM_RAMB_END_CNTL2_R_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_0_1 0x1066
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_0_1_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_2_3 0x1067
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_2_3_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_4_5 0x1068
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_4_5_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_6_7 0x1069
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_6_7_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_8_9 0x106a
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_8_9_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_10_11 0x106b
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_10_11_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_12_13 0x106c
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_12_13_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_14_15 0x106d
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_14_15_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_16_17 0x106e
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_16_17_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_18_19 0x106f
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_18_19_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_20_21 0x1070
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_20_21_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_22_23 0x1071
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_22_23_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_24_25 0x1072
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_24_25_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_26_27 0x1073
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_26_27_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_28_29 0x1074
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_28_29_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_30_31 0x1075
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_30_31_BASE_IDX 2
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_32_33 0x1076
+#define mmCM2_CM_BLNDGAM_RAMB_REGION_32_33_BASE_IDX 2
+#define mmCM2_CM_HDR_MULT_COEF 0x1077
+#define mmCM2_CM_HDR_MULT_COEF_BASE_IDX 2
+#define mmCM2_CM_MEM_PWR_CTRL 0x1078
+#define mmCM2_CM_MEM_PWR_CTRL_BASE_IDX 2
+#define mmCM2_CM_MEM_PWR_STATUS 0x1079
+#define mmCM2_CM_MEM_PWR_STATUS_BASE_IDX 2
+#define mmCM2_CM_DEALPHA 0x107b
+#define mmCM2_CM_DEALPHA_BASE_IDX 2
+#define mmCM2_CM_COEF_FORMAT 0x107c
+#define mmCM2_CM_COEF_FORMAT_BASE_IDX 2
+#define mmCM2_CM_SHAPER_CONTROL 0x107d
+#define mmCM2_CM_SHAPER_CONTROL_BASE_IDX 2
+#define mmCM2_CM_SHAPER_OFFSET_R 0x107e
+#define mmCM2_CM_SHAPER_OFFSET_R_BASE_IDX 2
+#define mmCM2_CM_SHAPER_OFFSET_G 0x107f
+#define mmCM2_CM_SHAPER_OFFSET_G_BASE_IDX 2
+#define mmCM2_CM_SHAPER_OFFSET_B 0x1080
+#define mmCM2_CM_SHAPER_OFFSET_B_BASE_IDX 2
+#define mmCM2_CM_SHAPER_SCALE_R 0x1081
+#define mmCM2_CM_SHAPER_SCALE_R_BASE_IDX 2
+#define mmCM2_CM_SHAPER_SCALE_G_B 0x1082
+#define mmCM2_CM_SHAPER_SCALE_G_B_BASE_IDX 2
+#define mmCM2_CM_SHAPER_LUT_INDEX 0x1083
+#define mmCM2_CM_SHAPER_LUT_INDEX_BASE_IDX 2
+#define mmCM2_CM_SHAPER_LUT_DATA 0x1084
+#define mmCM2_CM_SHAPER_LUT_DATA_BASE_IDX 2
+#define mmCM2_CM_SHAPER_LUT_WRITE_EN_MASK 0x1085
+#define mmCM2_CM_SHAPER_LUT_WRITE_EN_MASK_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMA_START_CNTL_B 0x1086
+#define mmCM2_CM_SHAPER_RAMA_START_CNTL_B_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMA_START_CNTL_G 0x1087
+#define mmCM2_CM_SHAPER_RAMA_START_CNTL_G_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMA_START_CNTL_R 0x1088
+#define mmCM2_CM_SHAPER_RAMA_START_CNTL_R_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMA_END_CNTL_B 0x1089
+#define mmCM2_CM_SHAPER_RAMA_END_CNTL_B_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMA_END_CNTL_G 0x108a
+#define mmCM2_CM_SHAPER_RAMA_END_CNTL_G_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMA_END_CNTL_R 0x108b
+#define mmCM2_CM_SHAPER_RAMA_END_CNTL_R_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMA_REGION_0_1 0x108c
+#define mmCM2_CM_SHAPER_RAMA_REGION_0_1_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMA_REGION_2_3 0x108d
+#define mmCM2_CM_SHAPER_RAMA_REGION_2_3_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMA_REGION_4_5 0x108e
+#define mmCM2_CM_SHAPER_RAMA_REGION_4_5_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMA_REGION_6_7 0x108f
+#define mmCM2_CM_SHAPER_RAMA_REGION_6_7_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMA_REGION_8_9 0x1090
+#define mmCM2_CM_SHAPER_RAMA_REGION_8_9_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMA_REGION_10_11 0x1091
+#define mmCM2_CM_SHAPER_RAMA_REGION_10_11_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMA_REGION_12_13 0x1092
+#define mmCM2_CM_SHAPER_RAMA_REGION_12_13_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMA_REGION_14_15 0x1093
+#define mmCM2_CM_SHAPER_RAMA_REGION_14_15_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMA_REGION_16_17 0x1094
+#define mmCM2_CM_SHAPER_RAMA_REGION_16_17_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMA_REGION_18_19 0x1095
+#define mmCM2_CM_SHAPER_RAMA_REGION_18_19_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMA_REGION_20_21 0x1096
+#define mmCM2_CM_SHAPER_RAMA_REGION_20_21_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMA_REGION_22_23 0x1097
+#define mmCM2_CM_SHAPER_RAMA_REGION_22_23_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMA_REGION_24_25 0x1098
+#define mmCM2_CM_SHAPER_RAMA_REGION_24_25_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMA_REGION_26_27 0x1099
+#define mmCM2_CM_SHAPER_RAMA_REGION_26_27_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMA_REGION_28_29 0x109a
+#define mmCM2_CM_SHAPER_RAMA_REGION_28_29_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMA_REGION_30_31 0x109b
+#define mmCM2_CM_SHAPER_RAMA_REGION_30_31_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMA_REGION_32_33 0x109c
+#define mmCM2_CM_SHAPER_RAMA_REGION_32_33_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMB_START_CNTL_B 0x109d
+#define mmCM2_CM_SHAPER_RAMB_START_CNTL_B_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMB_START_CNTL_G 0x109e
+#define mmCM2_CM_SHAPER_RAMB_START_CNTL_G_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMB_START_CNTL_R 0x109f
+#define mmCM2_CM_SHAPER_RAMB_START_CNTL_R_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMB_END_CNTL_B 0x10a0
+#define mmCM2_CM_SHAPER_RAMB_END_CNTL_B_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMB_END_CNTL_G 0x10a1
+#define mmCM2_CM_SHAPER_RAMB_END_CNTL_G_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMB_END_CNTL_R 0x10a2
+#define mmCM2_CM_SHAPER_RAMB_END_CNTL_R_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMB_REGION_0_1 0x10a3
+#define mmCM2_CM_SHAPER_RAMB_REGION_0_1_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMB_REGION_2_3 0x10a4
+#define mmCM2_CM_SHAPER_RAMB_REGION_2_3_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMB_REGION_4_5 0x10a5
+#define mmCM2_CM_SHAPER_RAMB_REGION_4_5_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMB_REGION_6_7 0x10a6
+#define mmCM2_CM_SHAPER_RAMB_REGION_6_7_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMB_REGION_8_9 0x10a7
+#define mmCM2_CM_SHAPER_RAMB_REGION_8_9_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMB_REGION_10_11 0x10a8
+#define mmCM2_CM_SHAPER_RAMB_REGION_10_11_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMB_REGION_12_13 0x10a9
+#define mmCM2_CM_SHAPER_RAMB_REGION_12_13_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMB_REGION_14_15 0x10aa
+#define mmCM2_CM_SHAPER_RAMB_REGION_14_15_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMB_REGION_16_17 0x10ab
+#define mmCM2_CM_SHAPER_RAMB_REGION_16_17_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMB_REGION_18_19 0x10ac
+#define mmCM2_CM_SHAPER_RAMB_REGION_18_19_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMB_REGION_20_21 0x10ad
+#define mmCM2_CM_SHAPER_RAMB_REGION_20_21_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMB_REGION_22_23 0x10ae
+#define mmCM2_CM_SHAPER_RAMB_REGION_22_23_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMB_REGION_24_25 0x10af
+#define mmCM2_CM_SHAPER_RAMB_REGION_24_25_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMB_REGION_26_27 0x10b0
+#define mmCM2_CM_SHAPER_RAMB_REGION_26_27_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMB_REGION_28_29 0x10b1
+#define mmCM2_CM_SHAPER_RAMB_REGION_28_29_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMB_REGION_30_31 0x10b2
+#define mmCM2_CM_SHAPER_RAMB_REGION_30_31_BASE_IDX 2
+#define mmCM2_CM_SHAPER_RAMB_REGION_32_33 0x10b3
+#define mmCM2_CM_SHAPER_RAMB_REGION_32_33_BASE_IDX 2
+#define mmCM2_CM_MEM_PWR_CTRL2 0x10b4
+#define mmCM2_CM_MEM_PWR_CTRL2_BASE_IDX 2
+#define mmCM2_CM_MEM_PWR_STATUS2 0x10b5
+#define mmCM2_CM_MEM_PWR_STATUS2_BASE_IDX 2
+#define mmCM2_CM_3DLUT_MODE 0x10b6
+#define mmCM2_CM_3DLUT_MODE_BASE_IDX 2
+#define mmCM2_CM_3DLUT_INDEX 0x10b7
+#define mmCM2_CM_3DLUT_INDEX_BASE_IDX 2
+#define mmCM2_CM_3DLUT_DATA 0x10b8
+#define mmCM2_CM_3DLUT_DATA_BASE_IDX 2
+#define mmCM2_CM_3DLUT_DATA_30BIT 0x10b9
+#define mmCM2_CM_3DLUT_DATA_30BIT_BASE_IDX 2
+#define mmCM2_CM_3DLUT_READ_WRITE_CONTROL 0x10ba
+#define mmCM2_CM_3DLUT_READ_WRITE_CONTROL_BASE_IDX 2
+#define mmCM2_CM_3DLUT_OUT_NORM_FACTOR 0x10bb
+#define mmCM2_CM_3DLUT_OUT_NORM_FACTOR_BASE_IDX 2
+#define mmCM2_CM_3DLUT_OUT_OFFSET_R 0x10bc
+#define mmCM2_CM_3DLUT_OUT_OFFSET_R_BASE_IDX 2
+#define mmCM2_CM_3DLUT_OUT_OFFSET_G 0x10bd
+#define mmCM2_CM_3DLUT_OUT_OFFSET_G_BASE_IDX 2
+#define mmCM2_CM_3DLUT_OUT_OFFSET_B 0x10be
+#define mmCM2_CM_3DLUT_OUT_OFFSET_B_BASE_IDX 2
+#define mmCM2_CM_TEST_DEBUG_INDEX 0x10bf
+#define mmCM2_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define mmCM2_CM_TEST_DEBUG_DATA 0x10c0
+#define mmCM2_CM_TEST_DEBUG_DATA_BASE_IDX 2
+
+// addressBlock: dce_dc_dpp3_dispdec_dpp_top_dispdec
+// base address: 0x1104
+#define mmDPP_TOP3_DPP_CONTROL 0x1106
+#define mmDPP_TOP3_DPP_CONTROL_BASE_IDX 2
+#define mmDPP_TOP3_DPP_SOFT_RESET 0x1107
+#define mmDPP_TOP3_DPP_SOFT_RESET_BASE_IDX 2
+#define mmDPP_TOP3_DPP_CRC_VAL_R_G 0x1108
+#define mmDPP_TOP3_DPP_CRC_VAL_R_G_BASE_IDX 2
+#define mmDPP_TOP3_DPP_CRC_VAL_B_A 0x1109
+#define mmDPP_TOP3_DPP_CRC_VAL_B_A_BASE_IDX 2
+#define mmDPP_TOP3_DPP_CRC_CTRL 0x110a
+#define mmDPP_TOP3_DPP_CRC_CTRL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp3_dispdec_cnvc_cfg_dispdec
+// base address: 0x1104
+#define mmCNVC_CFG3_CNVC_SURFACE_PIXEL_FORMAT 0x1110
+#define mmCNVC_CFG3_CNVC_SURFACE_PIXEL_FORMAT_BASE_IDX 2
+#define mmCNVC_CFG3_FORMAT_CONTROL 0x1111
+#define mmCNVC_CFG3_FORMAT_CONTROL_BASE_IDX 2
+#define mmCNVC_CFG3_FCNV_FP_BIAS_R 0x1112
+#define mmCNVC_CFG3_FCNV_FP_BIAS_R_BASE_IDX 2
+#define mmCNVC_CFG3_FCNV_FP_BIAS_G 0x1113
+#define mmCNVC_CFG3_FCNV_FP_BIAS_G_BASE_IDX 2
+#define mmCNVC_CFG3_FCNV_FP_BIAS_B 0x1114
+#define mmCNVC_CFG3_FCNV_FP_BIAS_B_BASE_IDX 2
+#define mmCNVC_CFG3_FCNV_FP_SCALE_R 0x1115
+#define mmCNVC_CFG3_FCNV_FP_SCALE_R_BASE_IDX 2
+#define mmCNVC_CFG3_FCNV_FP_SCALE_G 0x1116
+#define mmCNVC_CFG3_FCNV_FP_SCALE_G_BASE_IDX 2
+#define mmCNVC_CFG3_FCNV_FP_SCALE_B 0x1117
+#define mmCNVC_CFG3_FCNV_FP_SCALE_B_BASE_IDX 2
+#define mmCNVC_CFG3_COLOR_KEYER_CONTROL 0x1118
+#define mmCNVC_CFG3_COLOR_KEYER_CONTROL_BASE_IDX 2
+#define mmCNVC_CFG3_COLOR_KEYER_ALPHA 0x1119
+#define mmCNVC_CFG3_COLOR_KEYER_ALPHA_BASE_IDX 2
+#define mmCNVC_CFG3_COLOR_KEYER_RED 0x111a
+#define mmCNVC_CFG3_COLOR_KEYER_RED_BASE_IDX 2
+#define mmCNVC_CFG3_COLOR_KEYER_GREEN 0x111b
+#define mmCNVC_CFG3_COLOR_KEYER_GREEN_BASE_IDX 2
+#define mmCNVC_CFG3_COLOR_KEYER_BLUE 0x111c
+#define mmCNVC_CFG3_COLOR_KEYER_BLUE_BASE_IDX 2
+#define mmCNVC_CFG3_ALPHA_2BIT_LUT 0x111e
+#define mmCNVC_CFG3_ALPHA_2BIT_LUT_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp3_dispdec_cnvc_cur_dispdec
+// base address: 0x1104
+#define mmCNVC_CUR3_CURSOR0_CONTROL 0x1121
+#define mmCNVC_CUR3_CURSOR0_CONTROL_BASE_IDX 2
+#define mmCNVC_CUR3_CURSOR0_COLOR0 0x1122
+#define mmCNVC_CUR3_CURSOR0_COLOR0_BASE_IDX 2
+#define mmCNVC_CUR3_CURSOR0_COLOR1 0x1123
+#define mmCNVC_CUR3_CURSOR0_COLOR1_BASE_IDX 2
+#define mmCNVC_CUR3_CURSOR0_FP_SCALE_BIAS 0x1124
+#define mmCNVC_CUR3_CURSOR0_FP_SCALE_BIAS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp3_dispdec_dscl_dispdec
+// base address: 0x1104
+#define mmDSCL3_SCL_COEF_RAM_TAP_SELECT 0x112b
+#define mmDSCL3_SCL_COEF_RAM_TAP_SELECT_BASE_IDX 2
+#define mmDSCL3_SCL_COEF_RAM_TAP_DATA 0x112c
+#define mmDSCL3_SCL_COEF_RAM_TAP_DATA_BASE_IDX 2
+#define mmDSCL3_SCL_MODE 0x112d
+#define mmDSCL3_SCL_MODE_BASE_IDX 2
+#define mmDSCL3_SCL_TAP_CONTROL 0x112e
+#define mmDSCL3_SCL_TAP_CONTROL_BASE_IDX 2
+#define mmDSCL3_DSCL_CONTROL 0x112f
+#define mmDSCL3_DSCL_CONTROL_BASE_IDX 2
+#define mmDSCL3_DSCL_2TAP_CONTROL 0x1130
+#define mmDSCL3_DSCL_2TAP_CONTROL_BASE_IDX 2
+#define mmDSCL3_SCL_MANUAL_REPLICATE_CONTROL 0x1131
+#define mmDSCL3_SCL_MANUAL_REPLICATE_CONTROL_BASE_IDX 2
+#define mmDSCL3_SCL_HORZ_FILTER_SCALE_RATIO 0x1132
+#define mmDSCL3_SCL_HORZ_FILTER_SCALE_RATIO_BASE_IDX 2
+#define mmDSCL3_SCL_HORZ_FILTER_INIT 0x1133
+#define mmDSCL3_SCL_HORZ_FILTER_INIT_BASE_IDX 2
+#define mmDSCL3_SCL_HORZ_FILTER_SCALE_RATIO_C 0x1134
+#define mmDSCL3_SCL_HORZ_FILTER_SCALE_RATIO_C_BASE_IDX 2
+#define mmDSCL3_SCL_HORZ_FILTER_INIT_C 0x1135
+#define mmDSCL3_SCL_HORZ_FILTER_INIT_C_BASE_IDX 2
+#define mmDSCL3_SCL_VERT_FILTER_SCALE_RATIO 0x1136
+#define mmDSCL3_SCL_VERT_FILTER_SCALE_RATIO_BASE_IDX 2
+#define mmDSCL3_SCL_VERT_FILTER_INIT 0x1137
+#define mmDSCL3_SCL_VERT_FILTER_INIT_BASE_IDX 2
+#define mmDSCL3_SCL_VERT_FILTER_INIT_BOT 0x1138
+#define mmDSCL3_SCL_VERT_FILTER_INIT_BOT_BASE_IDX 2
+#define mmDSCL3_SCL_VERT_FILTER_SCALE_RATIO_C 0x1139
+#define mmDSCL3_SCL_VERT_FILTER_SCALE_RATIO_C_BASE_IDX 2
+#define mmDSCL3_SCL_VERT_FILTER_INIT_C 0x113a
+#define mmDSCL3_SCL_VERT_FILTER_INIT_C_BASE_IDX 2
+#define mmDSCL3_SCL_VERT_FILTER_INIT_BOT_C 0x113b
+#define mmDSCL3_SCL_VERT_FILTER_INIT_BOT_C_BASE_IDX 2
+#define mmDSCL3_SCL_BLACK_OFFSET 0x113c
+#define mmDSCL3_SCL_BLACK_OFFSET_BASE_IDX 2
+#define mmDSCL3_DSCL_UPDATE 0x113d
+#define mmDSCL3_DSCL_UPDATE_BASE_IDX 2
+#define mmDSCL3_DSCL_AUTOCAL 0x113e
+#define mmDSCL3_DSCL_AUTOCAL_BASE_IDX 2
+#define mmDSCL3_DSCL_EXT_OVERSCAN_LEFT_RIGHT 0x113f
+#define mmDSCL3_DSCL_EXT_OVERSCAN_LEFT_RIGHT_BASE_IDX 2
+#define mmDSCL3_DSCL_EXT_OVERSCAN_TOP_BOTTOM 0x1140
+#define mmDSCL3_DSCL_EXT_OVERSCAN_TOP_BOTTOM_BASE_IDX 2
+#define mmDSCL3_OTG_H_BLANK 0x1141
+#define mmDSCL3_OTG_H_BLANK_BASE_IDX 2
+#define mmDSCL3_OTG_V_BLANK 0x1142
+#define mmDSCL3_OTG_V_BLANK_BASE_IDX 2
+#define mmDSCL3_RECOUT_START 0x1143
+#define mmDSCL3_RECOUT_START_BASE_IDX 2
+#define mmDSCL3_RECOUT_SIZE 0x1144
+#define mmDSCL3_RECOUT_SIZE_BASE_IDX 2
+#define mmDSCL3_MPC_SIZE 0x1145
+#define mmDSCL3_MPC_SIZE_BASE_IDX 2
+#define mmDSCL3_LB_DATA_FORMAT 0x1146
+#define mmDSCL3_LB_DATA_FORMAT_BASE_IDX 2
+#define mmDSCL3_LB_MEMORY_CTRL 0x1147
+#define mmDSCL3_LB_MEMORY_CTRL_BASE_IDX 2
+#define mmDSCL3_LB_V_COUNTER 0x1148
+#define mmDSCL3_LB_V_COUNTER_BASE_IDX 2
+#define mmDSCL3_DSCL_MEM_PWR_CTRL 0x1149
+#define mmDSCL3_DSCL_MEM_PWR_CTRL_BASE_IDX 2
+#define mmDSCL3_DSCL_MEM_PWR_STATUS 0x114a
+#define mmDSCL3_DSCL_MEM_PWR_STATUS_BASE_IDX 2
+#define mmDSCL3_OBUF_CONTROL 0x114b
+#define mmDSCL3_OBUF_CONTROL_BASE_IDX 2
+#define mmDSCL3_OBUF_MEM_PWR_CTRL 0x114c
+#define mmDSCL3_OBUF_MEM_PWR_CTRL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dpp3_dispdec_cm_dispdec
+// base address: 0x1104
+#define mmCM3_CM_CONTROL 0x115b
+#define mmCM3_CM_CONTROL_BASE_IDX 2
+#define mmCM3_CM_ICSC_CONTROL 0x115c
+#define mmCM3_CM_ICSC_CONTROL_BASE_IDX 2
+#define mmCM3_CM_ICSC_C11_C12 0x115d
+#define mmCM3_CM_ICSC_C11_C12_BASE_IDX 2
+#define mmCM3_CM_ICSC_C13_C14 0x115e
+#define mmCM3_CM_ICSC_C13_C14_BASE_IDX 2
+#define mmCM3_CM_ICSC_C21_C22 0x115f
+#define mmCM3_CM_ICSC_C21_C22_BASE_IDX 2
+#define mmCM3_CM_ICSC_C23_C24 0x1160
+#define mmCM3_CM_ICSC_C23_C24_BASE_IDX 2
+#define mmCM3_CM_ICSC_C31_C32 0x1161
+#define mmCM3_CM_ICSC_C31_C32_BASE_IDX 2
+#define mmCM3_CM_ICSC_C33_C34 0x1162
+#define mmCM3_CM_ICSC_C33_C34_BASE_IDX 2
+#define mmCM3_CM_ICSC_B_C11_C12 0x1163
+#define mmCM3_CM_ICSC_B_C11_C12_BASE_IDX 2
+#define mmCM3_CM_ICSC_B_C13_C14 0x1164
+#define mmCM3_CM_ICSC_B_C13_C14_BASE_IDX 2
+#define mmCM3_CM_ICSC_B_C21_C22 0x1165
+#define mmCM3_CM_ICSC_B_C21_C22_BASE_IDX 2
+#define mmCM3_CM_ICSC_B_C23_C24 0x1166
+#define mmCM3_CM_ICSC_B_C23_C24_BASE_IDX 2
+#define mmCM3_CM_ICSC_B_C31_C32 0x1167
+#define mmCM3_CM_ICSC_B_C31_C32_BASE_IDX 2
+#define mmCM3_CM_ICSC_B_C33_C34 0x1168
+#define mmCM3_CM_ICSC_B_C33_C34_BASE_IDX 2
+#define mmCM3_CM_GAMUT_REMAP_CONTROL 0x1169
+#define mmCM3_CM_GAMUT_REMAP_CONTROL_BASE_IDX 2
+#define mmCM3_CM_GAMUT_REMAP_C11_C12 0x116a
+#define mmCM3_CM_GAMUT_REMAP_C11_C12_BASE_IDX 2
+#define mmCM3_CM_GAMUT_REMAP_C13_C14 0x116b
+#define mmCM3_CM_GAMUT_REMAP_C13_C14_BASE_IDX 2
+#define mmCM3_CM_GAMUT_REMAP_C21_C22 0x116c
+#define mmCM3_CM_GAMUT_REMAP_C21_C22_BASE_IDX 2
+#define mmCM3_CM_GAMUT_REMAP_C23_C24 0x116d
+#define mmCM3_CM_GAMUT_REMAP_C23_C24_BASE_IDX 2
+#define mmCM3_CM_GAMUT_REMAP_C31_C32 0x116e
+#define mmCM3_CM_GAMUT_REMAP_C31_C32_BASE_IDX 2
+#define mmCM3_CM_GAMUT_REMAP_C33_C34 0x116f
+#define mmCM3_CM_GAMUT_REMAP_C33_C34_BASE_IDX 2
+#define mmCM3_CM_GAMUT_REMAP_B_C11_C12 0x1170
+#define mmCM3_CM_GAMUT_REMAP_B_C11_C12_BASE_IDX 2
+#define mmCM3_CM_GAMUT_REMAP_B_C13_C14 0x1171
+#define mmCM3_CM_GAMUT_REMAP_B_C13_C14_BASE_IDX 2
+#define mmCM3_CM_GAMUT_REMAP_B_C21_C22 0x1172
+#define mmCM3_CM_GAMUT_REMAP_B_C21_C22_BASE_IDX 2
+#define mmCM3_CM_GAMUT_REMAP_B_C23_C24 0x1173
+#define mmCM3_CM_GAMUT_REMAP_B_C23_C24_BASE_IDX 2
+#define mmCM3_CM_GAMUT_REMAP_B_C31_C32 0x1174
+#define mmCM3_CM_GAMUT_REMAP_B_C31_C32_BASE_IDX 2
+#define mmCM3_CM_GAMUT_REMAP_B_C33_C34 0x1175
+#define mmCM3_CM_GAMUT_REMAP_B_C33_C34_BASE_IDX 2
+#define mmCM3_CM_BIAS_CR_R 0x1176
+#define mmCM3_CM_BIAS_CR_R_BASE_IDX 2
+#define mmCM3_CM_BIAS_Y_G_CB_B 0x1177
+#define mmCM3_CM_BIAS_Y_G_CB_B_BASE_IDX 2
+#define mmCM3_CM_DGAM_CONTROL 0x1178
+#define mmCM3_CM_DGAM_CONTROL_BASE_IDX 2
+#define mmCM3_CM_DGAM_LUT_INDEX 0x1179
+#define mmCM3_CM_DGAM_LUT_INDEX_BASE_IDX 2
+#define mmCM3_CM_DGAM_LUT_DATA 0x117a
+#define mmCM3_CM_DGAM_LUT_DATA_BASE_IDX 2
+#define mmCM3_CM_DGAM_LUT_WRITE_EN_MASK 0x117b
+#define mmCM3_CM_DGAM_LUT_WRITE_EN_MASK_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMA_START_CNTL_B 0x117c
+#define mmCM3_CM_DGAM_RAMA_START_CNTL_B_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMA_START_CNTL_G 0x117d
+#define mmCM3_CM_DGAM_RAMA_START_CNTL_G_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMA_START_CNTL_R 0x117e
+#define mmCM3_CM_DGAM_RAMA_START_CNTL_R_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMA_SLOPE_CNTL_B 0x117f
+#define mmCM3_CM_DGAM_RAMA_SLOPE_CNTL_B_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMA_SLOPE_CNTL_G 0x1180
+#define mmCM3_CM_DGAM_RAMA_SLOPE_CNTL_G_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMA_SLOPE_CNTL_R 0x1181
+#define mmCM3_CM_DGAM_RAMA_SLOPE_CNTL_R_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMA_END_CNTL1_B 0x1182
+#define mmCM3_CM_DGAM_RAMA_END_CNTL1_B_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMA_END_CNTL2_B 0x1183
+#define mmCM3_CM_DGAM_RAMA_END_CNTL2_B_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMA_END_CNTL1_G 0x1184
+#define mmCM3_CM_DGAM_RAMA_END_CNTL1_G_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMA_END_CNTL2_G 0x1185
+#define mmCM3_CM_DGAM_RAMA_END_CNTL2_G_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMA_END_CNTL1_R 0x1186
+#define mmCM3_CM_DGAM_RAMA_END_CNTL1_R_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMA_END_CNTL2_R 0x1187
+#define mmCM3_CM_DGAM_RAMA_END_CNTL2_R_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMA_REGION_0_1 0x1188
+#define mmCM3_CM_DGAM_RAMA_REGION_0_1_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMA_REGION_2_3 0x1189
+#define mmCM3_CM_DGAM_RAMA_REGION_2_3_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMA_REGION_4_5 0x118a
+#define mmCM3_CM_DGAM_RAMA_REGION_4_5_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMA_REGION_6_7 0x118b
+#define mmCM3_CM_DGAM_RAMA_REGION_6_7_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMA_REGION_8_9 0x118c
+#define mmCM3_CM_DGAM_RAMA_REGION_8_9_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMA_REGION_10_11 0x118d
+#define mmCM3_CM_DGAM_RAMA_REGION_10_11_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMA_REGION_12_13 0x118e
+#define mmCM3_CM_DGAM_RAMA_REGION_12_13_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMA_REGION_14_15 0x118f
+#define mmCM3_CM_DGAM_RAMA_REGION_14_15_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMB_START_CNTL_B 0x1190
+#define mmCM3_CM_DGAM_RAMB_START_CNTL_B_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMB_START_CNTL_G 0x1191
+#define mmCM3_CM_DGAM_RAMB_START_CNTL_G_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMB_START_CNTL_R 0x1192
+#define mmCM3_CM_DGAM_RAMB_START_CNTL_R_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMB_SLOPE_CNTL_B 0x1193
+#define mmCM3_CM_DGAM_RAMB_SLOPE_CNTL_B_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMB_SLOPE_CNTL_G 0x1194
+#define mmCM3_CM_DGAM_RAMB_SLOPE_CNTL_G_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMB_SLOPE_CNTL_R 0x1195
+#define mmCM3_CM_DGAM_RAMB_SLOPE_CNTL_R_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMB_END_CNTL1_B 0x1196
+#define mmCM3_CM_DGAM_RAMB_END_CNTL1_B_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMB_END_CNTL2_B 0x1197
+#define mmCM3_CM_DGAM_RAMB_END_CNTL2_B_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMB_END_CNTL1_G 0x1198
+#define mmCM3_CM_DGAM_RAMB_END_CNTL1_G_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMB_END_CNTL2_G 0x1199
+#define mmCM3_CM_DGAM_RAMB_END_CNTL2_G_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMB_END_CNTL1_R 0x119a
+#define mmCM3_CM_DGAM_RAMB_END_CNTL1_R_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMB_END_CNTL2_R 0x119b
+#define mmCM3_CM_DGAM_RAMB_END_CNTL2_R_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMB_REGION_0_1 0x119c
+#define mmCM3_CM_DGAM_RAMB_REGION_0_1_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMB_REGION_2_3 0x119d
+#define mmCM3_CM_DGAM_RAMB_REGION_2_3_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMB_REGION_4_5 0x119e
+#define mmCM3_CM_DGAM_RAMB_REGION_4_5_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMB_REGION_6_7 0x119f
+#define mmCM3_CM_DGAM_RAMB_REGION_6_7_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMB_REGION_8_9 0x11a0
+#define mmCM3_CM_DGAM_RAMB_REGION_8_9_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMB_REGION_10_11 0x11a1
+#define mmCM3_CM_DGAM_RAMB_REGION_10_11_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMB_REGION_12_13 0x11a2
+#define mmCM3_CM_DGAM_RAMB_REGION_12_13_BASE_IDX 2
+#define mmCM3_CM_DGAM_RAMB_REGION_14_15 0x11a3
+#define mmCM3_CM_DGAM_RAMB_REGION_14_15_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_CONTROL 0x11a4
+#define mmCM3_CM_BLNDGAM_CONTROL_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_LUT_INDEX 0x11a5
+#define mmCM3_CM_BLNDGAM_LUT_INDEX_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_LUT_DATA 0x11a6
+#define mmCM3_CM_BLNDGAM_LUT_DATA_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_LUT_WRITE_EN_MASK 0x11a7
+#define mmCM3_CM_BLNDGAM_LUT_WRITE_EN_MASK_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_START_CNTL_B 0x11a8
+#define mmCM3_CM_BLNDGAM_RAMA_START_CNTL_B_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_START_CNTL_G 0x11a9
+#define mmCM3_CM_BLNDGAM_RAMA_START_CNTL_G_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_START_CNTL_R 0x11aa
+#define mmCM3_CM_BLNDGAM_RAMA_START_CNTL_R_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_SLOPE_CNTL_B 0x11ab
+#define mmCM3_CM_BLNDGAM_RAMA_SLOPE_CNTL_B_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_SLOPE_CNTL_G 0x11ac
+#define mmCM3_CM_BLNDGAM_RAMA_SLOPE_CNTL_G_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_SLOPE_CNTL_R 0x11ad
+#define mmCM3_CM_BLNDGAM_RAMA_SLOPE_CNTL_R_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_END_CNTL1_B 0x11ae
+#define mmCM3_CM_BLNDGAM_RAMA_END_CNTL1_B_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_END_CNTL2_B 0x11af
+#define mmCM3_CM_BLNDGAM_RAMA_END_CNTL2_B_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_END_CNTL1_G 0x11b0
+#define mmCM3_CM_BLNDGAM_RAMA_END_CNTL1_G_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_END_CNTL2_G 0x11b1
+#define mmCM3_CM_BLNDGAM_RAMA_END_CNTL2_G_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_END_CNTL1_R 0x11b2
+#define mmCM3_CM_BLNDGAM_RAMA_END_CNTL1_R_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_END_CNTL2_R 0x11b3
+#define mmCM3_CM_BLNDGAM_RAMA_END_CNTL2_R_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_0_1 0x11b4
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_0_1_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_2_3 0x11b5
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_2_3_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_4_5 0x11b6
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_4_5_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_6_7 0x11b7
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_6_7_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_8_9 0x11b8
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_8_9_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_10_11 0x11b9
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_10_11_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_12_13 0x11ba
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_12_13_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_14_15 0x11bb
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_14_15_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_16_17 0x11bc
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_16_17_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_18_19 0x11bd
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_18_19_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_20_21 0x11be
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_20_21_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_22_23 0x11bf
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_22_23_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_24_25 0x11c0
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_24_25_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_26_27 0x11c1
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_26_27_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_28_29 0x11c2
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_28_29_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_30_31 0x11c3
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_30_31_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_32_33 0x11c4
+#define mmCM3_CM_BLNDGAM_RAMA_REGION_32_33_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_START_CNTL_B 0x11c5
+#define mmCM3_CM_BLNDGAM_RAMB_START_CNTL_B_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_START_CNTL_G 0x11c6
+#define mmCM3_CM_BLNDGAM_RAMB_START_CNTL_G_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_START_CNTL_R 0x11c7
+#define mmCM3_CM_BLNDGAM_RAMB_START_CNTL_R_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_SLOPE_CNTL_B 0x11c8
+#define mmCM3_CM_BLNDGAM_RAMB_SLOPE_CNTL_B_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_SLOPE_CNTL_G 0x11c9
+#define mmCM3_CM_BLNDGAM_RAMB_SLOPE_CNTL_G_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_SLOPE_CNTL_R 0x11ca
+#define mmCM3_CM_BLNDGAM_RAMB_SLOPE_CNTL_R_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_END_CNTL1_B 0x11cb
+#define mmCM3_CM_BLNDGAM_RAMB_END_CNTL1_B_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_END_CNTL2_B 0x11cc
+#define mmCM3_CM_BLNDGAM_RAMB_END_CNTL2_B_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_END_CNTL1_G 0x11cd
+#define mmCM3_CM_BLNDGAM_RAMB_END_CNTL1_G_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_END_CNTL2_G 0x11ce
+#define mmCM3_CM_BLNDGAM_RAMB_END_CNTL2_G_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_END_CNTL1_R 0x11cf
+#define mmCM3_CM_BLNDGAM_RAMB_END_CNTL1_R_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_END_CNTL2_R 0x11d0
+#define mmCM3_CM_BLNDGAM_RAMB_END_CNTL2_R_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_0_1 0x11d1
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_0_1_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_2_3 0x11d2
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_2_3_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_4_5 0x11d3
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_4_5_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_6_7 0x11d4
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_6_7_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_8_9 0x11d5
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_8_9_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_10_11 0x11d6
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_10_11_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_12_13 0x11d7
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_12_13_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_14_15 0x11d8
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_14_15_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_16_17 0x11d9
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_16_17_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_18_19 0x11da
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_18_19_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_20_21 0x11db
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_20_21_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_22_23 0x11dc
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_22_23_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_24_25 0x11dd
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_24_25_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_26_27 0x11de
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_26_27_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_28_29 0x11df
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_28_29_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_30_31 0x11e0
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_30_31_BASE_IDX 2
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_32_33 0x11e1
+#define mmCM3_CM_BLNDGAM_RAMB_REGION_32_33_BASE_IDX 2
+#define mmCM3_CM_HDR_MULT_COEF 0x11e2
+#define mmCM3_CM_HDR_MULT_COEF_BASE_IDX 2
+#define mmCM3_CM_MEM_PWR_CTRL 0x11e3
+#define mmCM3_CM_MEM_PWR_CTRL_BASE_IDX 2
+#define mmCM3_CM_MEM_PWR_STATUS 0x11e4
+#define mmCM3_CM_MEM_PWR_STATUS_BASE_IDX 2
+#define mmCM3_CM_DEALPHA 0x11e6
+#define mmCM3_CM_DEALPHA_BASE_IDX 2
+#define mmCM3_CM_COEF_FORMAT 0x11e7
+#define mmCM3_CM_COEF_FORMAT_BASE_IDX 2
+#define mmCM3_CM_SHAPER_CONTROL 0x11e8
+#define mmCM3_CM_SHAPER_CONTROL_BASE_IDX 2
+#define mmCM3_CM_SHAPER_OFFSET_R 0x11e9
+#define mmCM3_CM_SHAPER_OFFSET_R_BASE_IDX 2
+#define mmCM3_CM_SHAPER_OFFSET_G 0x11ea
+#define mmCM3_CM_SHAPER_OFFSET_G_BASE_IDX 2
+#define mmCM3_CM_SHAPER_OFFSET_B 0x11eb
+#define mmCM3_CM_SHAPER_OFFSET_B_BASE_IDX 2
+#define mmCM3_CM_SHAPER_SCALE_R 0x11ec
+#define mmCM3_CM_SHAPER_SCALE_R_BASE_IDX 2
+#define mmCM3_CM_SHAPER_SCALE_G_B 0x11ed
+#define mmCM3_CM_SHAPER_SCALE_G_B_BASE_IDX 2
+#define mmCM3_CM_SHAPER_LUT_INDEX 0x11ee
+#define mmCM3_CM_SHAPER_LUT_INDEX_BASE_IDX 2
+#define mmCM3_CM_SHAPER_LUT_DATA 0x11ef
+#define mmCM3_CM_SHAPER_LUT_DATA_BASE_IDX 2
+#define mmCM3_CM_SHAPER_LUT_WRITE_EN_MASK 0x11f0
+#define mmCM3_CM_SHAPER_LUT_WRITE_EN_MASK_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMA_START_CNTL_B 0x11f1
+#define mmCM3_CM_SHAPER_RAMA_START_CNTL_B_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMA_START_CNTL_G 0x11f2
+#define mmCM3_CM_SHAPER_RAMA_START_CNTL_G_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMA_START_CNTL_R 0x11f3
+#define mmCM3_CM_SHAPER_RAMA_START_CNTL_R_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMA_END_CNTL_B 0x11f4
+#define mmCM3_CM_SHAPER_RAMA_END_CNTL_B_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMA_END_CNTL_G 0x11f5
+#define mmCM3_CM_SHAPER_RAMA_END_CNTL_G_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMA_END_CNTL_R 0x11f6
+#define mmCM3_CM_SHAPER_RAMA_END_CNTL_R_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMA_REGION_0_1 0x11f7
+#define mmCM3_CM_SHAPER_RAMA_REGION_0_1_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMA_REGION_2_3 0x11f8
+#define mmCM3_CM_SHAPER_RAMA_REGION_2_3_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMA_REGION_4_5 0x11f9
+#define mmCM3_CM_SHAPER_RAMA_REGION_4_5_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMA_REGION_6_7 0x11fa
+#define mmCM3_CM_SHAPER_RAMA_REGION_6_7_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMA_REGION_8_9 0x11fb
+#define mmCM3_CM_SHAPER_RAMA_REGION_8_9_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMA_REGION_10_11 0x11fc
+#define mmCM3_CM_SHAPER_RAMA_REGION_10_11_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMA_REGION_12_13 0x11fd
+#define mmCM3_CM_SHAPER_RAMA_REGION_12_13_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMA_REGION_14_15 0x11fe
+#define mmCM3_CM_SHAPER_RAMA_REGION_14_15_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMA_REGION_16_17 0x11ff
+#define mmCM3_CM_SHAPER_RAMA_REGION_16_17_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMA_REGION_18_19 0x1200
+#define mmCM3_CM_SHAPER_RAMA_REGION_18_19_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMA_REGION_20_21 0x1201
+#define mmCM3_CM_SHAPER_RAMA_REGION_20_21_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMA_REGION_22_23 0x1202
+#define mmCM3_CM_SHAPER_RAMA_REGION_22_23_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMA_REGION_24_25 0x1203
+#define mmCM3_CM_SHAPER_RAMA_REGION_24_25_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMA_REGION_26_27 0x1204
+#define mmCM3_CM_SHAPER_RAMA_REGION_26_27_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMA_REGION_28_29 0x1205
+#define mmCM3_CM_SHAPER_RAMA_REGION_28_29_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMA_REGION_30_31 0x1206
+#define mmCM3_CM_SHAPER_RAMA_REGION_30_31_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMA_REGION_32_33 0x1207
+#define mmCM3_CM_SHAPER_RAMA_REGION_32_33_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMB_START_CNTL_B 0x1208
+#define mmCM3_CM_SHAPER_RAMB_START_CNTL_B_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMB_START_CNTL_G 0x1209
+#define mmCM3_CM_SHAPER_RAMB_START_CNTL_G_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMB_START_CNTL_R 0x120a
+#define mmCM3_CM_SHAPER_RAMB_START_CNTL_R_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMB_END_CNTL_B 0x120b
+#define mmCM3_CM_SHAPER_RAMB_END_CNTL_B_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMB_END_CNTL_G 0x120c
+#define mmCM3_CM_SHAPER_RAMB_END_CNTL_G_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMB_END_CNTL_R 0x120d
+#define mmCM3_CM_SHAPER_RAMB_END_CNTL_R_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMB_REGION_0_1 0x120e
+#define mmCM3_CM_SHAPER_RAMB_REGION_0_1_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMB_REGION_2_3 0x120f
+#define mmCM3_CM_SHAPER_RAMB_REGION_2_3_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMB_REGION_4_5 0x1210
+#define mmCM3_CM_SHAPER_RAMB_REGION_4_5_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMB_REGION_6_7 0x1211
+#define mmCM3_CM_SHAPER_RAMB_REGION_6_7_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMB_REGION_8_9 0x1212
+#define mmCM3_CM_SHAPER_RAMB_REGION_8_9_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMB_REGION_10_11 0x1213
+#define mmCM3_CM_SHAPER_RAMB_REGION_10_11_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMB_REGION_12_13 0x1214
+#define mmCM3_CM_SHAPER_RAMB_REGION_12_13_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMB_REGION_14_15 0x1215
+#define mmCM3_CM_SHAPER_RAMB_REGION_14_15_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMB_REGION_16_17 0x1216
+#define mmCM3_CM_SHAPER_RAMB_REGION_16_17_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMB_REGION_18_19 0x1217
+#define mmCM3_CM_SHAPER_RAMB_REGION_18_19_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMB_REGION_20_21 0x1218
+#define mmCM3_CM_SHAPER_RAMB_REGION_20_21_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMB_REGION_22_23 0x1219
+#define mmCM3_CM_SHAPER_RAMB_REGION_22_23_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMB_REGION_24_25 0x121a
+#define mmCM3_CM_SHAPER_RAMB_REGION_24_25_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMB_REGION_26_27 0x121b
+#define mmCM3_CM_SHAPER_RAMB_REGION_26_27_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMB_REGION_28_29 0x121c
+#define mmCM3_CM_SHAPER_RAMB_REGION_28_29_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMB_REGION_30_31 0x121d
+#define mmCM3_CM_SHAPER_RAMB_REGION_30_31_BASE_IDX 2
+#define mmCM3_CM_SHAPER_RAMB_REGION_32_33 0x121e
+#define mmCM3_CM_SHAPER_RAMB_REGION_32_33_BASE_IDX 2
+#define mmCM3_CM_MEM_PWR_CTRL2 0x121f
+#define mmCM3_CM_MEM_PWR_CTRL2_BASE_IDX 2
+#define mmCM3_CM_MEM_PWR_STATUS2 0x1220
+#define mmCM3_CM_MEM_PWR_STATUS2_BASE_IDX 2
+#define mmCM3_CM_3DLUT_MODE 0x1221
+#define mmCM3_CM_3DLUT_MODE_BASE_IDX 2
+#define mmCM3_CM_3DLUT_INDEX 0x1222
+#define mmCM3_CM_3DLUT_INDEX_BASE_IDX 2
+#define mmCM3_CM_3DLUT_DATA 0x1223
+#define mmCM3_CM_3DLUT_DATA_BASE_IDX 2
+#define mmCM3_CM_3DLUT_DATA_30BIT 0x1224
+#define mmCM3_CM_3DLUT_DATA_30BIT_BASE_IDX 2
+#define mmCM3_CM_3DLUT_READ_WRITE_CONTROL 0x1225
+#define mmCM3_CM_3DLUT_READ_WRITE_CONTROL_BASE_IDX 2
+#define mmCM3_CM_3DLUT_OUT_NORM_FACTOR 0x1226
+#define mmCM3_CM_3DLUT_OUT_NORM_FACTOR_BASE_IDX 2
+#define mmCM3_CM_3DLUT_OUT_OFFSET_R 0x1227
+#define mmCM3_CM_3DLUT_OUT_OFFSET_R_BASE_IDX 2
+#define mmCM3_CM_3DLUT_OUT_OFFSET_G 0x1228
+#define mmCM3_CM_3DLUT_OUT_OFFSET_G_BASE_IDX 2
+#define mmCM3_CM_3DLUT_OUT_OFFSET_B 0x1229
+#define mmCM3_CM_3DLUT_OUT_OFFSET_B_BASE_IDX 2
+#define mmCM3_CM_TEST_DEBUG_INDEX 0x122a
+#define mmCM3_CM_TEST_DEBUG_INDEX_BASE_IDX 2
+#define mmCM3_CM_TEST_DEBUG_DATA 0x122b
+#define mmCM3_CM_TEST_DEBUG_DATA_BASE_IDX 2
+
+
+// addressBlock: dce_dc_mpc_mpcc0_dispdec
+// base address: 0x0
+#define mmMPCC0_MPCC_TOP_SEL 0x1271
+#define mmMPCC0_MPCC_TOP_SEL_BASE_IDX 2
+#define mmMPCC0_MPCC_BOT_SEL 0x1272
+#define mmMPCC0_MPCC_BOT_SEL_BASE_IDX 2
+#define mmMPCC0_MPCC_OPP_ID 0x1273
+#define mmMPCC0_MPCC_OPP_ID_BASE_IDX 2
+#define mmMPCC0_MPCC_CONTROL 0x1274
+#define mmMPCC0_MPCC_CONTROL_BASE_IDX 2
+#define mmMPCC0_MPCC_SM_CONTROL 0x1275
+#define mmMPCC0_MPCC_SM_CONTROL_BASE_IDX 2
+#define mmMPCC0_MPCC_UPDATE_LOCK_SEL 0x1276
+#define mmMPCC0_MPCC_UPDATE_LOCK_SEL_BASE_IDX 2
+#define mmMPCC0_MPCC_TOP_GAIN 0x1277
+#define mmMPCC0_MPCC_TOP_GAIN_BASE_IDX 2
+#define mmMPCC0_MPCC_BOT_GAIN_INSIDE 0x1278
+#define mmMPCC0_MPCC_BOT_GAIN_INSIDE_BASE_IDX 2
+#define mmMPCC0_MPCC_BOT_GAIN_OUTSIDE 0x1279
+#define mmMPCC0_MPCC_BOT_GAIN_OUTSIDE_BASE_IDX 2
+#define mmMPCC0_MPCC_BG_R_CR 0x127a
+#define mmMPCC0_MPCC_BG_R_CR_BASE_IDX 2
+#define mmMPCC0_MPCC_BG_G_Y 0x127b
+#define mmMPCC0_MPCC_BG_G_Y_BASE_IDX 2
+#define mmMPCC0_MPCC_BG_B_CB 0x127c
+#define mmMPCC0_MPCC_BG_B_CB_BASE_IDX 2
+#define mmMPCC0_MPCC_MEM_PWR_CTRL 0x127d
+#define mmMPCC0_MPCC_MEM_PWR_CTRL_BASE_IDX 2
+#define mmMPCC0_MPCC_STALL_STATUS 0x127e
+#define mmMPCC0_MPCC_STALL_STATUS_BASE_IDX 2
+#define mmMPCC0_MPCC_STATUS 0x127f
+#define mmMPCC0_MPCC_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_mpc_mpcc1_dispdec
+// base address: 0x6c
+#define mmMPCC1_MPCC_TOP_SEL 0x128c
+#define mmMPCC1_MPCC_TOP_SEL_BASE_IDX 2
+#define mmMPCC1_MPCC_BOT_SEL 0x128d
+#define mmMPCC1_MPCC_BOT_SEL_BASE_IDX 2
+#define mmMPCC1_MPCC_OPP_ID 0x128e
+#define mmMPCC1_MPCC_OPP_ID_BASE_IDX 2
+#define mmMPCC1_MPCC_CONTROL 0x128f
+#define mmMPCC1_MPCC_CONTROL_BASE_IDX 2
+#define mmMPCC1_MPCC_SM_CONTROL 0x1290
+#define mmMPCC1_MPCC_SM_CONTROL_BASE_IDX 2
+#define mmMPCC1_MPCC_UPDATE_LOCK_SEL 0x1291
+#define mmMPCC1_MPCC_UPDATE_LOCK_SEL_BASE_IDX 2
+#define mmMPCC1_MPCC_TOP_GAIN 0x1292
+#define mmMPCC1_MPCC_TOP_GAIN_BASE_IDX 2
+#define mmMPCC1_MPCC_BOT_GAIN_INSIDE 0x1293
+#define mmMPCC1_MPCC_BOT_GAIN_INSIDE_BASE_IDX 2
+#define mmMPCC1_MPCC_BOT_GAIN_OUTSIDE 0x1294
+#define mmMPCC1_MPCC_BOT_GAIN_OUTSIDE_BASE_IDX 2
+#define mmMPCC1_MPCC_BG_R_CR 0x1295
+#define mmMPCC1_MPCC_BG_R_CR_BASE_IDX 2
+#define mmMPCC1_MPCC_BG_G_Y 0x1296
+#define mmMPCC1_MPCC_BG_G_Y_BASE_IDX 2
+#define mmMPCC1_MPCC_BG_B_CB 0x1297
+#define mmMPCC1_MPCC_BG_B_CB_BASE_IDX 2
+#define mmMPCC1_MPCC_MEM_PWR_CTRL 0x1298
+#define mmMPCC1_MPCC_MEM_PWR_CTRL_BASE_IDX 2
+#define mmMPCC1_MPCC_STALL_STATUS 0x1299
+#define mmMPCC1_MPCC_STALL_STATUS_BASE_IDX 2
+#define mmMPCC1_MPCC_STATUS 0x129a
+#define mmMPCC1_MPCC_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_mpc_mpcc2_dispdec
+// base address: 0xd8
+#define mmMPCC2_MPCC_TOP_SEL 0x12a7
+#define mmMPCC2_MPCC_TOP_SEL_BASE_IDX 2
+#define mmMPCC2_MPCC_BOT_SEL 0x12a8
+#define mmMPCC2_MPCC_BOT_SEL_BASE_IDX 2
+#define mmMPCC2_MPCC_OPP_ID 0x12a9
+#define mmMPCC2_MPCC_OPP_ID_BASE_IDX 2
+#define mmMPCC2_MPCC_CONTROL 0x12aa
+#define mmMPCC2_MPCC_CONTROL_BASE_IDX 2
+#define mmMPCC2_MPCC_SM_CONTROL 0x12ab
+#define mmMPCC2_MPCC_SM_CONTROL_BASE_IDX 2
+#define mmMPCC2_MPCC_UPDATE_LOCK_SEL 0x12ac
+#define mmMPCC2_MPCC_UPDATE_LOCK_SEL_BASE_IDX 2
+#define mmMPCC2_MPCC_TOP_GAIN 0x12ad
+#define mmMPCC2_MPCC_TOP_GAIN_BASE_IDX 2
+#define mmMPCC2_MPCC_BOT_GAIN_INSIDE 0x12ae
+#define mmMPCC2_MPCC_BOT_GAIN_INSIDE_BASE_IDX 2
+#define mmMPCC2_MPCC_BOT_GAIN_OUTSIDE 0x12af
+#define mmMPCC2_MPCC_BOT_GAIN_OUTSIDE_BASE_IDX 2
+#define mmMPCC2_MPCC_BG_R_CR 0x12b0
+#define mmMPCC2_MPCC_BG_R_CR_BASE_IDX 2
+#define mmMPCC2_MPCC_BG_G_Y 0x12b1
+#define mmMPCC2_MPCC_BG_G_Y_BASE_IDX 2
+#define mmMPCC2_MPCC_BG_B_CB 0x12b2
+#define mmMPCC2_MPCC_BG_B_CB_BASE_IDX 2
+#define mmMPCC2_MPCC_MEM_PWR_CTRL 0x12b3
+#define mmMPCC2_MPCC_MEM_PWR_CTRL_BASE_IDX 2
+#define mmMPCC2_MPCC_STALL_STATUS 0x12b4
+#define mmMPCC2_MPCC_STALL_STATUS_BASE_IDX 2
+#define mmMPCC2_MPCC_STATUS 0x12b5
+#define mmMPCC2_MPCC_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_mpc_mpcc3_dispdec
+// base address: 0x144
+#define mmMPCC3_MPCC_TOP_SEL 0x12c2
+#define mmMPCC3_MPCC_TOP_SEL_BASE_IDX 2
+#define mmMPCC3_MPCC_BOT_SEL 0x12c3
+#define mmMPCC3_MPCC_BOT_SEL_BASE_IDX 2
+#define mmMPCC3_MPCC_OPP_ID 0x12c4
+#define mmMPCC3_MPCC_OPP_ID_BASE_IDX 2
+#define mmMPCC3_MPCC_CONTROL 0x12c5
+#define mmMPCC3_MPCC_CONTROL_BASE_IDX 2
+#define mmMPCC3_MPCC_SM_CONTROL 0x12c6
+#define mmMPCC3_MPCC_SM_CONTROL_BASE_IDX 2
+#define mmMPCC3_MPCC_UPDATE_LOCK_SEL 0x12c7
+#define mmMPCC3_MPCC_UPDATE_LOCK_SEL_BASE_IDX 2
+#define mmMPCC3_MPCC_TOP_GAIN 0x12c8
+#define mmMPCC3_MPCC_TOP_GAIN_BASE_IDX 2
+#define mmMPCC3_MPCC_BOT_GAIN_INSIDE 0x12c9
+#define mmMPCC3_MPCC_BOT_GAIN_INSIDE_BASE_IDX 2
+#define mmMPCC3_MPCC_BOT_GAIN_OUTSIDE 0x12ca
+#define mmMPCC3_MPCC_BOT_GAIN_OUTSIDE_BASE_IDX 2
+#define mmMPCC3_MPCC_BG_R_CR 0x12cb
+#define mmMPCC3_MPCC_BG_R_CR_BASE_IDX 2
+#define mmMPCC3_MPCC_BG_G_Y 0x12cc
+#define mmMPCC3_MPCC_BG_G_Y_BASE_IDX 2
+#define mmMPCC3_MPCC_BG_B_CB 0x12cd
+#define mmMPCC3_MPCC_BG_B_CB_BASE_IDX 2
+#define mmMPCC3_MPCC_MEM_PWR_CTRL 0x12ce
+#define mmMPCC3_MPCC_MEM_PWR_CTRL_BASE_IDX 2
+#define mmMPCC3_MPCC_STALL_STATUS 0x12cf
+#define mmMPCC3_MPCC_STALL_STATUS_BASE_IDX 2
+#define mmMPCC3_MPCC_STATUS 0x12d0
+#define mmMPCC3_MPCC_STATUS_BASE_IDX 2
+
+// addressBlock: dce_dc_mpc_mpcc4_dispdec
+// base address: 0x1b0
+#define mmMPCC4_MPCC_TOP_SEL 0x12dd
+#define mmMPCC4_MPCC_TOP_SEL_BASE_IDX 2
+#define mmMPCC4_MPCC_BOT_SEL 0x12de
+#define mmMPCC4_MPCC_BOT_SEL_BASE_IDX 2
+#define mmMPCC4_MPCC_OPP_ID 0x12df
+#define mmMPCC4_MPCC_OPP_ID_BASE_IDX 2
+#define mmMPCC4_MPCC_CONTROL 0x12e0
+#define mmMPCC4_MPCC_CONTROL_BASE_IDX 2
+#define mmMPCC4_MPCC_SM_CONTROL 0x12e1
+#define mmMPCC4_MPCC_SM_CONTROL_BASE_IDX 2
+#define mmMPCC4_MPCC_UPDATE_LOCK_SEL 0x12e2
+#define mmMPCC4_MPCC_UPDATE_LOCK_SEL_BASE_IDX 2
+#define mmMPCC4_MPCC_TOP_GAIN 0x12e3
+#define mmMPCC4_MPCC_TOP_GAIN_BASE_IDX 2
+#define mmMPCC4_MPCC_BOT_GAIN_INSIDE 0x12e4
+#define mmMPCC4_MPCC_BOT_GAIN_INSIDE_BASE_IDX 2
+#define mmMPCC4_MPCC_BOT_GAIN_OUTSIDE 0x12e5
+#define mmMPCC4_MPCC_BOT_GAIN_OUTSIDE_BASE_IDX 2
+#define mmMPCC4_MPCC_BG_R_CR 0x12e6
+#define mmMPCC4_MPCC_BG_R_CR_BASE_IDX 2
+#define mmMPCC4_MPCC_BG_G_Y 0x12e7
+#define mmMPCC4_MPCC_BG_G_Y_BASE_IDX 2
+#define mmMPCC4_MPCC_BG_B_CB 0x12e8
+#define mmMPCC4_MPCC_BG_B_CB_BASE_IDX 2
+#define mmMPCC4_MPCC_MEM_PWR_CTRL 0x12e9
+#define mmMPCC4_MPCC_MEM_PWR_CTRL_BASE_IDX 2
+#define mmMPCC4_MPCC_STALL_STATUS 0x12ea
+#define mmMPCC4_MPCC_STALL_STATUS_BASE_IDX 2
+#define mmMPCC4_MPCC_STATUS 0x12eb
+#define mmMPCC4_MPCC_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_mpc_mpc_cfg_dispdec
+// base address: 0x0
+#define mmMPC_CLOCK_CONTROL 0x1349
+#define mmMPC_CLOCK_CONTROL_BASE_IDX 2
+#define mmMPC_SOFT_RESET 0x134a
+#define mmMPC_SOFT_RESET_BASE_IDX 2
+#define mmMPC_CRC_CTRL 0x134b
+#define mmMPC_CRC_CTRL_BASE_IDX 2
+#define mmMPC_CRC_SEL_CONTROL 0x134c
+#define mmMPC_CRC_SEL_CONTROL_BASE_IDX 2
+#define mmMPC_CRC_RESULT_AR 0x134d
+#define mmMPC_CRC_RESULT_AR_BASE_IDX 2
+#define mmMPC_CRC_RESULT_GB 0x134e
+#define mmMPC_CRC_RESULT_GB_BASE_IDX 2
+#define mmMPC_CRC_RESULT_C 0x134f
+#define mmMPC_CRC_RESULT_C_BASE_IDX 2
+#define mmMPC_PERFMON_EVENT_CTRL 0x1352
+#define mmMPC_PERFMON_EVENT_CTRL_BASE_IDX 2
+#define mmMPC_BYPASS_BG_AR 0x1353
+#define mmMPC_BYPASS_BG_AR_BASE_IDX 2
+#define mmMPC_BYPASS_BG_GB 0x1354
+#define mmMPC_BYPASS_BG_GB_BASE_IDX 2
+#define mmMPC_STALL_GRACE_WINDOW 0x1355
+#define mmMPC_STALL_GRACE_WINDOW_BASE_IDX 2
+#define mmMPC_HOST_READ_CONTROL 0x1356
+#define mmMPC_HOST_READ_CONTROL_BASE_IDX 2
+#define mmADR_CFG_CUR_VUPDATE_LOCK_SET0 0x135d
+#define mmADR_CFG_CUR_VUPDATE_LOCK_SET0_BASE_IDX 2
+#define mmADR_CFG_VUPDATE_LOCK_SET0 0x135e
+#define mmADR_CFG_VUPDATE_LOCK_SET0_BASE_IDX 2
+#define mmADR_VUPDATE_LOCK_SET0 0x135f
+#define mmADR_VUPDATE_LOCK_SET0_BASE_IDX 2
+#define mmCFG_VUPDATE_LOCK_SET0 0x1360
+#define mmCFG_VUPDATE_LOCK_SET0_BASE_IDX 2
+#define mmCUR_VUPDATE_LOCK_SET0 0x1361
+#define mmCUR_VUPDATE_LOCK_SET0_BASE_IDX 2
+#define mmADR_CFG_CUR_VUPDATE_LOCK_SET1 0x1362
+#define mmADR_CFG_CUR_VUPDATE_LOCK_SET1_BASE_IDX 2
+#define mmADR_CFG_VUPDATE_LOCK_SET1 0x1363
+#define mmADR_CFG_VUPDATE_LOCK_SET1_BASE_IDX 2
+#define mmADR_VUPDATE_LOCK_SET1 0x1364
+#define mmADR_VUPDATE_LOCK_SET1_BASE_IDX 2
+#define mmCFG_VUPDATE_LOCK_SET1 0x1365
+#define mmCFG_VUPDATE_LOCK_SET1_BASE_IDX 2
+#define mmCUR_VUPDATE_LOCK_SET1 0x1366
+#define mmCUR_VUPDATE_LOCK_SET1_BASE_IDX 2
+#define mmADR_CFG_CUR_VUPDATE_LOCK_SET2 0x1367
+#define mmADR_CFG_CUR_VUPDATE_LOCK_SET2_BASE_IDX 2
+#define mmADR_CFG_VUPDATE_LOCK_SET2 0x1368
+#define mmADR_CFG_VUPDATE_LOCK_SET2_BASE_IDX 2
+#define mmADR_VUPDATE_LOCK_SET2 0x1369
+#define mmADR_VUPDATE_LOCK_SET2_BASE_IDX 2
+#define mmCFG_VUPDATE_LOCK_SET2 0x136a
+#define mmCFG_VUPDATE_LOCK_SET2_BASE_IDX 2
+#define mmCUR_VUPDATE_LOCK_SET2 0x136b
+#define mmCUR_VUPDATE_LOCK_SET2_BASE_IDX 2
+#define mmADR_CFG_CUR_VUPDATE_LOCK_SET3 0x136c
+#define mmADR_CFG_CUR_VUPDATE_LOCK_SET3_BASE_IDX 2
+#define mmADR_CFG_VUPDATE_LOCK_SET3 0x136d
+#define mmADR_CFG_VUPDATE_LOCK_SET3_BASE_IDX 2
+#define mmADR_VUPDATE_LOCK_SET3 0x136e
+#define mmADR_VUPDATE_LOCK_SET3_BASE_IDX 2
+#define mmCFG_VUPDATE_LOCK_SET3 0x136f
+#define mmCFG_VUPDATE_LOCK_SET3_BASE_IDX 2
+#define mmCUR_VUPDATE_LOCK_SET3 0x1370
+#define mmCUR_VUPDATE_LOCK_SET3_BASE_IDX 2
+#define mmMPC_OUT0_MUX 0x1385
+#define mmMPC_OUT0_MUX_BASE_IDX 2
+#define mmMPC_OUT0_DENORM_CONTROL 0x1386
+#define mmMPC_OUT0_DENORM_CONTROL_BASE_IDX 2
+#define mmMPC_OUT0_DENORM_CLAMP_G_Y 0x1387
+#define mmMPC_OUT0_DENORM_CLAMP_G_Y_BASE_IDX 2
+#define mmMPC_OUT0_DENORM_CLAMP_B_CB 0x1388
+#define mmMPC_OUT0_DENORM_CLAMP_B_CB_BASE_IDX 2
+#define mmMPC_OUT1_MUX 0x1389
+#define mmMPC_OUT1_MUX_BASE_IDX 2
+#define mmMPC_OUT1_DENORM_CONTROL 0x138a
+#define mmMPC_OUT1_DENORM_CONTROL_BASE_IDX 2
+#define mmMPC_OUT1_DENORM_CLAMP_G_Y 0x138b
+#define mmMPC_OUT1_DENORM_CLAMP_G_Y_BASE_IDX 2
+#define mmMPC_OUT1_DENORM_CLAMP_B_CB 0x138c
+#define mmMPC_OUT1_DENORM_CLAMP_B_CB_BASE_IDX 2
+
+
+// addressBlock: dce_dc_mpc_mpcc_ogam0_dispdec
+// base address: 0x0
+#define mmMPCC_OGAM0_MPCC_OGAM_MODE 0x13ae
+#define mmMPCC_OGAM0_MPCC_OGAM_MODE_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_LUT_INDEX 0x13af
+#define mmMPCC_OGAM0_MPCC_OGAM_LUT_INDEX_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_LUT_DATA 0x13b0
+#define mmMPCC_OGAM0_MPCC_OGAM_LUT_DATA_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_LUT_RAM_CONTROL 0x13b1
+#define mmMPCC_OGAM0_MPCC_OGAM_LUT_RAM_CONTROL_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_B 0x13b2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_B_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_G 0x13b3
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_G_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_R 0x13b4
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_R_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_SLOPE_CNTL_B 0x13b5
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_SLOPE_CNTL_B_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_SLOPE_CNTL_G 0x13b6
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_SLOPE_CNTL_G_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_SLOPE_CNTL_R 0x13b7
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_SLOPE_CNTL_R_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_B 0x13b8
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_B_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_B 0x13b9
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_B_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_G 0x13ba
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_G_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_G 0x13bb
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_G_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_R 0x13bc
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_R_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_R 0x13bd
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_R_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1 0x13be
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3 0x13bf
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5 0x13c0
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7 0x13c1
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9 0x13c2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11 0x13c3
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13 0x13c4
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15 0x13c5
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17 0x13c6
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19 0x13c7
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21 0x13c8
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23 0x13c9
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25 0x13ca
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27 0x13cb
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29 0x13cc
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31 0x13cd
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33 0x13ce
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_B 0x13cf
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_B_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_G 0x13d0
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_G_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_R 0x13d1
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_R_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_SLOPE_CNTL_B 0x13d2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_SLOPE_CNTL_B_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_SLOPE_CNTL_G 0x13d3
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_SLOPE_CNTL_G_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_SLOPE_CNTL_R 0x13d4
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_SLOPE_CNTL_R_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_B 0x13d5
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_B_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_B 0x13d6
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_B_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_G 0x13d7
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_G_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_G 0x13d8
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_G_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_R 0x13d9
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_R_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_R 0x13da
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_R_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1 0x13db
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3 0x13dc
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5 0x13dd
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7 0x13de
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9 0x13df
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11 0x13e0
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13 0x13e1
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15 0x13e2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17 0x13e3
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19 0x13e4
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21 0x13e5
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23 0x13e6
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25 0x13e7
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27 0x13e8
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29 0x13e9
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31 0x13ea
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31_BASE_IDX 2
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33 0x13eb
+#define mmMPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33_BASE_IDX 2
+
+
+// addressBlock: dce_dc_mpc_mpcc_ogam1_dispdec
+// base address: 0x104
+#define mmMPCC_OGAM1_MPCC_OGAM_MODE 0x13ef
+#define mmMPCC_OGAM1_MPCC_OGAM_MODE_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_LUT_INDEX 0x13f0
+#define mmMPCC_OGAM1_MPCC_OGAM_LUT_INDEX_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_LUT_DATA 0x13f1
+#define mmMPCC_OGAM1_MPCC_OGAM_LUT_DATA_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_LUT_RAM_CONTROL 0x13f2
+#define mmMPCC_OGAM1_MPCC_OGAM_LUT_RAM_CONTROL_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_B 0x13f3
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_B_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_G 0x13f4
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_G_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_R 0x13f5
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_R_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_SLOPE_CNTL_B 0x13f6
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_SLOPE_CNTL_B_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_SLOPE_CNTL_G 0x13f7
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_SLOPE_CNTL_G_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_SLOPE_CNTL_R 0x13f8
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_SLOPE_CNTL_R_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_B 0x13f9
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_B_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_B 0x13fa
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_B_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_G 0x13fb
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_G_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_G 0x13fc
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_G_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_R 0x13fd
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_R_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_R 0x13fe
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_R_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1 0x13ff
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3 0x1400
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5 0x1401
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7 0x1402
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9 0x1403
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11 0x1404
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13 0x1405
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15 0x1406
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17 0x1407
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19 0x1408
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21 0x1409
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23 0x140a
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25 0x140b
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27 0x140c
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29 0x140d
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31 0x140e
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33 0x140f
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_B 0x1410
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_B_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_G 0x1411
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_G_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_R 0x1412
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_R_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_SLOPE_CNTL_B 0x1413
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_SLOPE_CNTL_B_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_SLOPE_CNTL_G 0x1414
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_SLOPE_CNTL_G_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_SLOPE_CNTL_R 0x1415
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_SLOPE_CNTL_R_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_B 0x1416
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_B_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_B 0x1417
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_B_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_G 0x1418
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_G_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_G 0x1419
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_G_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_R 0x141a
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_R_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_R 0x141b
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_R_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1 0x141c
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3 0x141d
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5 0x141e
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7 0x141f
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9 0x1420
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11 0x1421
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13 0x1422
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15 0x1423
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17 0x1424
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19 0x1425
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21 0x1426
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23 0x1427
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25 0x1428
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27 0x1429
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29 0x142a
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31 0x142b
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31_BASE_IDX 2
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33 0x142c
+#define mmMPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33_BASE_IDX 2
+
+
+// addressBlock: dce_dc_mpc_mpcc_ogam2_dispdec
+// base address: 0x208
+#define mmMPCC_OGAM2_MPCC_OGAM_MODE 0x1430
+#define mmMPCC_OGAM2_MPCC_OGAM_MODE_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_LUT_INDEX 0x1431
+#define mmMPCC_OGAM2_MPCC_OGAM_LUT_INDEX_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_LUT_DATA 0x1432
+#define mmMPCC_OGAM2_MPCC_OGAM_LUT_DATA_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_LUT_RAM_CONTROL 0x1433
+#define mmMPCC_OGAM2_MPCC_OGAM_LUT_RAM_CONTROL_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_B 0x1434
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_B_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_G 0x1435
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_G_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_R 0x1436
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_R_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_SLOPE_CNTL_B 0x1437
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_SLOPE_CNTL_B_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_SLOPE_CNTL_G 0x1438
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_SLOPE_CNTL_G_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_SLOPE_CNTL_R 0x1439
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_SLOPE_CNTL_R_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_B 0x143a
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_B_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_B 0x143b
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_B_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_G 0x143c
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_G_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_G 0x143d
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_G_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_R 0x143e
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_R_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_R 0x143f
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_R_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1 0x1440
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3 0x1441
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5 0x1442
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7 0x1443
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9 0x1444
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11 0x1445
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13 0x1446
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15 0x1447
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17 0x1448
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19 0x1449
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21 0x144a
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23 0x144b
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25 0x144c
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27 0x144d
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29 0x144e
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31 0x144f
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33 0x1450
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_B 0x1451
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_B_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_G 0x1452
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_G_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_R 0x1453
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_R_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_SLOPE_CNTL_B 0x1454
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_SLOPE_CNTL_B_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_SLOPE_CNTL_G 0x1455
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_SLOPE_CNTL_G_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_SLOPE_CNTL_R 0x1456
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_SLOPE_CNTL_R_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_B 0x1457
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_B_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_B 0x1458
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_B_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_G 0x1459
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_G_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_G 0x145a
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_G_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_R 0x145b
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_R_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_R 0x145c
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_R_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1 0x145d
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3 0x145e
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5 0x145f
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7 0x1460
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9 0x1461
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11 0x1462
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13 0x1463
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15 0x1464
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17 0x1465
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19 0x1466
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21 0x1467
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23 0x1468
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25 0x1469
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27 0x146a
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29 0x146b
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31 0x146c
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31_BASE_IDX 2
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33 0x146d
+#define mmMPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33_BASE_IDX 2
+
+
+// addressBlock: dce_dc_mpc_mpcc_ogam3_dispdec
+// base address: 0x30c
+#define mmMPCC_OGAM3_MPCC_OGAM_MODE 0x1471
+#define mmMPCC_OGAM3_MPCC_OGAM_MODE_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_LUT_INDEX 0x1472
+#define mmMPCC_OGAM3_MPCC_OGAM_LUT_INDEX_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_LUT_DATA 0x1473
+#define mmMPCC_OGAM3_MPCC_OGAM_LUT_DATA_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_LUT_RAM_CONTROL 0x1474
+#define mmMPCC_OGAM3_MPCC_OGAM_LUT_RAM_CONTROL_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_B 0x1475
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_B_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_G 0x1476
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_G_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_R 0x1477
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_R_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_SLOPE_CNTL_B 0x1478
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_SLOPE_CNTL_B_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_SLOPE_CNTL_G 0x1479
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_SLOPE_CNTL_G_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_SLOPE_CNTL_R 0x147a
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_SLOPE_CNTL_R_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_B 0x147b
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_B_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_B 0x147c
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_B_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_G 0x147d
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_G_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_G 0x147e
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_G_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_R 0x147f
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_R_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_R 0x1480
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_R_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1 0x1481
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3 0x1482
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5 0x1483
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7 0x1484
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9 0x1485
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11 0x1486
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13 0x1487
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15 0x1488
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17 0x1489
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19 0x148a
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21 0x148b
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23 0x148c
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25 0x148d
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27 0x148e
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29 0x148f
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31 0x1490
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33 0x1491
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_B 0x1492
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_B_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_G 0x1493
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_G_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_R 0x1494
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_R_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_SLOPE_CNTL_B 0x1495
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_SLOPE_CNTL_B_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_SLOPE_CNTL_G 0x1496
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_SLOPE_CNTL_G_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_SLOPE_CNTL_R 0x1497
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_SLOPE_CNTL_R_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_B 0x1498
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_B_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_B 0x1499
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_B_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_G 0x149a
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_G_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_G 0x149b
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_G_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_R 0x149c
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_R_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_R 0x149d
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_R_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1 0x149e
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3 0x149f
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5 0x14a0
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7 0x14a1
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9 0x14a2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11 0x14a3
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13 0x14a4
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15 0x14a5
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17 0x14a6
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19 0x14a7
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21 0x14a8
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23 0x14a9
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25 0x14aa
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27 0x14ab
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29 0x14ac
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31 0x14ad
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31_BASE_IDX 2
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33 0x14ae
+#define mmMPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33_BASE_IDX 2
+
+
+// addressBlock: dce_dc_mpc_mpcc_ogam4_dispdec
+// base address: 0x410
+#define mmMPCC_OGAM4_MPCC_OGAM_MODE 0x14b2
+#define mmMPCC_OGAM4_MPCC_OGAM_MODE_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_LUT_INDEX 0x14b3
+#define mmMPCC_OGAM4_MPCC_OGAM_LUT_INDEX_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_LUT_DATA 0x14b4
+#define mmMPCC_OGAM4_MPCC_OGAM_LUT_DATA_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_LUT_RAM_CONTROL 0x14b5
+#define mmMPCC_OGAM4_MPCC_OGAM_LUT_RAM_CONTROL_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_B 0x14b6
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_B_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_G 0x14b7
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_G_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_R 0x14b8
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_R_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_SLOPE_CNTL_B 0x14b9
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_SLOPE_CNTL_B_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_SLOPE_CNTL_G 0x14ba
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_SLOPE_CNTL_G_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_SLOPE_CNTL_R 0x14bb
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_SLOPE_CNTL_R_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL1_B 0x14bc
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL1_B_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_B 0x14bd
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_B_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL1_G 0x14be
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL1_G_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_G 0x14bf
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_G_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL1_R 0x14c0
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL1_R_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_R 0x14c1
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_R_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_0_1 0x14c2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_0_1_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_2_3 0x14c3
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_2_3_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_4_5 0x14c4
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_4_5_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_6_7 0x14c5
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_6_7_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_8_9 0x14c6
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_8_9_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_10_11 0x14c7
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_10_11_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_12_13 0x14c8
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_12_13_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_14_15 0x14c9
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_14_15_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_16_17 0x14ca
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_16_17_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_18_19 0x14cb
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_18_19_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_20_21 0x14cc
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_20_21_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_22_23 0x14cd
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_22_23_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_24_25 0x14ce
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_24_25_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_26_27 0x14cf
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_26_27_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_28_29 0x14d0
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_28_29_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_30_31 0x14d1
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_30_31_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_32_33 0x14d2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMA_REGION_32_33_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_B 0x14d3
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_B_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_G 0x14d4
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_G_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_R 0x14d5
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_R_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_SLOPE_CNTL_B 0x14d6
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_SLOPE_CNTL_B_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_SLOPE_CNTL_G 0x14d7
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_SLOPE_CNTL_G_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_SLOPE_CNTL_R 0x14d8
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_SLOPE_CNTL_R_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL1_B 0x14d9
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL1_B_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_B 0x14da
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_B_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL1_G 0x14db
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL1_G_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_G 0x14dc
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_G_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL1_R 0x14dd
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL1_R_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_R 0x14de
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_R_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_0_1 0x14df
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_0_1_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_2_3 0x14e0
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_2_3_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_4_5 0x14e1
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_4_5_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_6_7 0x14e2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_6_7_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_8_9 0x14e3
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_8_9_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_10_11 0x14e4
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_10_11_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_12_13 0x14e5
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_12_13_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_14_15 0x14e6
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_14_15_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_16_17 0x14e7
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_16_17_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_18_19 0x14e8
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_18_19_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_20_21 0x14e9
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_20_21_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_22_23 0x14ea
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_22_23_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_24_25 0x14eb
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_24_25_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_26_27 0x14ec
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_26_27_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_28_29 0x14ed
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_28_29_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_30_31 0x14ee
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_30_31_BASE_IDX 2
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_32_33 0x14ef
+#define mmMPCC_OGAM4_MPCC_OGAM_RAMB_REGION_32_33_BASE_IDX 2
+
+
+// addressBlock: dce_dc_mpc_mpc_ocsc_dispdec
+// base address: 0x0
+#define mmMPC_OUT_CSC_COEF_FORMAT 0x15b6
+#define mmMPC_OUT_CSC_COEF_FORMAT_BASE_IDX 2
+#define mmMPC_OUT0_CSC_MODE 0x15b7
+#define mmMPC_OUT0_CSC_MODE_BASE_IDX 2
+#define mmMPC_OUT0_CSC_C11_C12_A 0x15b8
+#define mmMPC_OUT0_CSC_C11_C12_A_BASE_IDX 2
+#define mmMPC_OUT0_CSC_C13_C14_A 0x15b9
+#define mmMPC_OUT0_CSC_C13_C14_A_BASE_IDX 2
+#define mmMPC_OUT0_CSC_C21_C22_A 0x15ba
+#define mmMPC_OUT0_CSC_C21_C22_A_BASE_IDX 2
+#define mmMPC_OUT0_CSC_C23_C24_A 0x15bb
+#define mmMPC_OUT0_CSC_C23_C24_A_BASE_IDX 2
+#define mmMPC_OUT0_CSC_C31_C32_A 0x15bc
+#define mmMPC_OUT0_CSC_C31_C32_A_BASE_IDX 2
+#define mmMPC_OUT0_CSC_C33_C34_A 0x15bd
+#define mmMPC_OUT0_CSC_C33_C34_A_BASE_IDX 2
+#define mmMPC_OUT0_CSC_C11_C12_B 0x15be
+#define mmMPC_OUT0_CSC_C11_C12_B_BASE_IDX 2
+#define mmMPC_OUT0_CSC_C13_C14_B 0x15bf
+#define mmMPC_OUT0_CSC_C13_C14_B_BASE_IDX 2
+#define mmMPC_OUT0_CSC_C21_C22_B 0x15c0
+#define mmMPC_OUT0_CSC_C21_C22_B_BASE_IDX 2
+#define mmMPC_OUT0_CSC_C23_C24_B 0x15c1
+#define mmMPC_OUT0_CSC_C23_C24_B_BASE_IDX 2
+#define mmMPC_OUT0_CSC_C31_C32_B 0x15c2
+#define mmMPC_OUT0_CSC_C31_C32_B_BASE_IDX 2
+#define mmMPC_OUT0_CSC_C33_C34_B 0x15c3
+#define mmMPC_OUT0_CSC_C33_C34_B_BASE_IDX 2
+#define mmMPC_OUT1_CSC_MODE 0x15c4
+#define mmMPC_OUT1_CSC_MODE_BASE_IDX 2
+#define mmMPC_OUT1_CSC_C11_C12_A 0x15c5
+#define mmMPC_OUT1_CSC_C11_C12_A_BASE_IDX 2
+#define mmMPC_OUT1_CSC_C13_C14_A 0x15c6
+#define mmMPC_OUT1_CSC_C13_C14_A_BASE_IDX 2
+#define mmMPC_OUT1_CSC_C21_C22_A 0x15c7
+#define mmMPC_OUT1_CSC_C21_C22_A_BASE_IDX 2
+#define mmMPC_OUT1_CSC_C23_C24_A 0x15c8
+#define mmMPC_OUT1_CSC_C23_C24_A_BASE_IDX 2
+#define mmMPC_OUT1_CSC_C31_C32_A 0x15c9
+#define mmMPC_OUT1_CSC_C31_C32_A_BASE_IDX 2
+#define mmMPC_OUT1_CSC_C33_C34_A 0x15ca
+#define mmMPC_OUT1_CSC_C33_C34_A_BASE_IDX 2
+#define mmMPC_OUT1_CSC_C11_C12_B 0x15cb
+#define mmMPC_OUT1_CSC_C11_C12_B_BASE_IDX 2
+#define mmMPC_OUT1_CSC_C13_C14_B 0x15cc
+#define mmMPC_OUT1_CSC_C13_C14_B_BASE_IDX 2
+#define mmMPC_OUT1_CSC_C21_C22_B 0x15cd
+#define mmMPC_OUT1_CSC_C21_C22_B_BASE_IDX 2
+#define mmMPC_OUT1_CSC_C23_C24_B 0x15ce
+#define mmMPC_OUT1_CSC_C23_C24_B_BASE_IDX 2
+#define mmMPC_OUT1_CSC_C31_C32_B 0x15cf
+#define mmMPC_OUT1_CSC_C31_C32_B_BASE_IDX 2
+#define mmMPC_OUT1_CSC_C33_C34_B 0x15d0
+#define mmMPC_OUT1_CSC_C33_C34_B_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_fmt0_dispdec
+// base address: 0x0
+#define mmFMT0_FMT_CLAMP_COMPONENT_R 0x183c
+#define mmFMT0_FMT_CLAMP_COMPONENT_R_BASE_IDX 2
+#define mmFMT0_FMT_CLAMP_COMPONENT_G 0x183d
+#define mmFMT0_FMT_CLAMP_COMPONENT_G_BASE_IDX 2
+#define mmFMT0_FMT_CLAMP_COMPONENT_B 0x183e
+#define mmFMT0_FMT_CLAMP_COMPONENT_B_BASE_IDX 2
+#define mmFMT0_FMT_DYNAMIC_EXP_CNTL 0x183f
+#define mmFMT0_FMT_DYNAMIC_EXP_CNTL_BASE_IDX 2
+#define mmFMT0_FMT_CONTROL 0x1840
+#define mmFMT0_FMT_CONTROL_BASE_IDX 2
+#define mmFMT0_FMT_BIT_DEPTH_CONTROL 0x1841
+#define mmFMT0_FMT_BIT_DEPTH_CONTROL_BASE_IDX 2
+#define mmFMT0_FMT_DITHER_RAND_R_SEED 0x1842
+#define mmFMT0_FMT_DITHER_RAND_R_SEED_BASE_IDX 2
+#define mmFMT0_FMT_DITHER_RAND_G_SEED 0x1843
+#define mmFMT0_FMT_DITHER_RAND_G_SEED_BASE_IDX 2
+#define mmFMT0_FMT_DITHER_RAND_B_SEED 0x1844
+#define mmFMT0_FMT_DITHER_RAND_B_SEED_BASE_IDX 2
+#define mmFMT0_FMT_CLAMP_CNTL 0x1845
+#define mmFMT0_FMT_CLAMP_CNTL_BASE_IDX 2
+#define mmFMT0_FMT_SIDE_BY_SIDE_STEREO_CONTROL 0x1846
+#define mmFMT0_FMT_SIDE_BY_SIDE_STEREO_CONTROL_BASE_IDX 2
+#define mmFMT0_FMT_MAP420_MEMORY_CONTROL 0x1847
+#define mmFMT0_FMT_MAP420_MEMORY_CONTROL_BASE_IDX 2
+#define mmFMT0_FMT_422_CONTROL 0x1849
+#define mmFMT0_FMT_422_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_dpg0_dispdec
+// base address: 0x0
+#define mmDPG0_DPG_CONTROL 0x1854
+#define mmDPG0_DPG_CONTROL_BASE_IDX 2
+#define mmDPG0_DPG_RAMP_CONTROL 0x1855
+#define mmDPG0_DPG_RAMP_CONTROL_BASE_IDX 2
+#define mmDPG0_DPG_DIMENSIONS 0x1856
+#define mmDPG0_DPG_DIMENSIONS_BASE_IDX 2
+#define mmDPG0_DPG_COLOUR_R_CR 0x1857
+#define mmDPG0_DPG_COLOUR_R_CR_BASE_IDX 2
+#define mmDPG0_DPG_COLOUR_G_Y 0x1858
+#define mmDPG0_DPG_COLOUR_G_Y_BASE_IDX 2
+#define mmDPG0_DPG_COLOUR_B_CB 0x1859
+#define mmDPG0_DPG_COLOUR_B_CB_BASE_IDX 2
+#define mmDPG0_DPG_OFFSET_SEGMENT 0x185a
+#define mmDPG0_DPG_OFFSET_SEGMENT_BASE_IDX 2
+#define mmDPG0_DPG_STATUS 0x185b
+#define mmDPG0_DPG_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_oppbuf0_dispdec
+// base address: 0x0
+#define mmOPPBUF0_OPPBUF_CONTROL 0x1884
+#define mmOPPBUF0_OPPBUF_CONTROL_BASE_IDX 2
+#define mmOPPBUF0_OPPBUF_3D_PARAMETERS_0 0x1885
+#define mmOPPBUF0_OPPBUF_3D_PARAMETERS_0_BASE_IDX 2
+#define mmOPPBUF0_OPPBUF_3D_PARAMETERS_1 0x1886
+#define mmOPPBUF0_OPPBUF_3D_PARAMETERS_1_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_opp_pipe0_dispdec
+// base address: 0x0
+#define mmOPP_PIPE0_OPP_PIPE_CONTROL 0x188c
+#define mmOPP_PIPE0_OPP_PIPE_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_opp_pipe_crc0_dispdec
+// base address: 0x0
+#define mmOPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL 0x1891
+#define mmOPP_PIPE_CRC0_OPP_PIPE_CRC_CONTROL_BASE_IDX 2
+#define mmOPP_PIPE_CRC0_OPP_PIPE_CRC_MASK 0x1892
+#define mmOPP_PIPE_CRC0_OPP_PIPE_CRC_MASK_BASE_IDX 2
+#define mmOPP_PIPE_CRC0_OPP_PIPE_CRC_RESULT0 0x1893
+#define mmOPP_PIPE_CRC0_OPP_PIPE_CRC_RESULT0_BASE_IDX 2
+#define mmOPP_PIPE_CRC0_OPP_PIPE_CRC_RESULT1 0x1894
+#define mmOPP_PIPE_CRC0_OPP_PIPE_CRC_RESULT1_BASE_IDX 2
+#define mmOPP_PIPE_CRC0_OPP_PIPE_CRC_RESULT2 0x1895
+#define mmOPP_PIPE_CRC0_OPP_PIPE_CRC_RESULT2_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_fmt1_dispdec
+// base address: 0x168
+#define mmFMT1_FMT_CLAMP_COMPONENT_R 0x1896
+#define mmFMT1_FMT_CLAMP_COMPONENT_R_BASE_IDX 2
+#define mmFMT1_FMT_CLAMP_COMPONENT_G 0x1897
+#define mmFMT1_FMT_CLAMP_COMPONENT_G_BASE_IDX 2
+#define mmFMT1_FMT_CLAMP_COMPONENT_B 0x1898
+#define mmFMT1_FMT_CLAMP_COMPONENT_B_BASE_IDX 2
+#define mmFMT1_FMT_DYNAMIC_EXP_CNTL 0x1899
+#define mmFMT1_FMT_DYNAMIC_EXP_CNTL_BASE_IDX 2
+#define mmFMT1_FMT_CONTROL 0x189a
+#define mmFMT1_FMT_CONTROL_BASE_IDX 2
+#define mmFMT1_FMT_BIT_DEPTH_CONTROL 0x189b
+#define mmFMT1_FMT_BIT_DEPTH_CONTROL_BASE_IDX 2
+#define mmFMT1_FMT_DITHER_RAND_R_SEED 0x189c
+#define mmFMT1_FMT_DITHER_RAND_R_SEED_BASE_IDX 2
+#define mmFMT1_FMT_DITHER_RAND_G_SEED 0x189d
+#define mmFMT1_FMT_DITHER_RAND_G_SEED_BASE_IDX 2
+#define mmFMT1_FMT_DITHER_RAND_B_SEED 0x189e
+#define mmFMT1_FMT_DITHER_RAND_B_SEED_BASE_IDX 2
+#define mmFMT1_FMT_CLAMP_CNTL 0x189f
+#define mmFMT1_FMT_CLAMP_CNTL_BASE_IDX 2
+#define mmFMT1_FMT_SIDE_BY_SIDE_STEREO_CONTROL 0x18a0
+#define mmFMT1_FMT_SIDE_BY_SIDE_STEREO_CONTROL_BASE_IDX 2
+#define mmFMT1_FMT_MAP420_MEMORY_CONTROL 0x18a1
+#define mmFMT1_FMT_MAP420_MEMORY_CONTROL_BASE_IDX 2
+#define mmFMT1_FMT_422_CONTROL 0x18a3
+#define mmFMT1_FMT_422_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_dpg1_dispdec
+// base address: 0x168
+#define mmDPG1_DPG_CONTROL 0x18ae
+#define mmDPG1_DPG_CONTROL_BASE_IDX 2
+#define mmDPG1_DPG_RAMP_CONTROL 0x18af
+#define mmDPG1_DPG_RAMP_CONTROL_BASE_IDX 2
+#define mmDPG1_DPG_DIMENSIONS 0x18b0
+#define mmDPG1_DPG_DIMENSIONS_BASE_IDX 2
+#define mmDPG1_DPG_COLOUR_R_CR 0x18b1
+#define mmDPG1_DPG_COLOUR_R_CR_BASE_IDX 2
+#define mmDPG1_DPG_COLOUR_G_Y 0x18b2
+#define mmDPG1_DPG_COLOUR_G_Y_BASE_IDX 2
+#define mmDPG1_DPG_COLOUR_B_CB 0x18b3
+#define mmDPG1_DPG_COLOUR_B_CB_BASE_IDX 2
+#define mmDPG1_DPG_OFFSET_SEGMENT 0x18b4
+#define mmDPG1_DPG_OFFSET_SEGMENT_BASE_IDX 2
+#define mmDPG1_DPG_STATUS 0x18b5
+#define mmDPG1_DPG_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_oppbuf1_dispdec
+// base address: 0x168
+#define mmOPPBUF1_OPPBUF_CONTROL 0x18de
+#define mmOPPBUF1_OPPBUF_CONTROL_BASE_IDX 2
+#define mmOPPBUF1_OPPBUF_3D_PARAMETERS_0 0x18df
+#define mmOPPBUF1_OPPBUF_3D_PARAMETERS_0_BASE_IDX 2
+#define mmOPPBUF1_OPPBUF_3D_PARAMETERS_1 0x18e0
+#define mmOPPBUF1_OPPBUF_3D_PARAMETERS_1_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_opp_pipe1_dispdec
+// base address: 0x168
+#define mmOPP_PIPE1_OPP_PIPE_CONTROL 0x18e6
+#define mmOPP_PIPE1_OPP_PIPE_CONTROL_BASE_IDX 2
+
+// addressBlock: dce_dc_opp_opp_pipe_crc1_dispdec
+// base address: 0x168
+#define mmOPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL 0x18eb
+#define mmOPP_PIPE_CRC1_OPP_PIPE_CRC_CONTROL_BASE_IDX 2
+#define mmOPP_PIPE_CRC1_OPP_PIPE_CRC_MASK 0x18ec
+#define mmOPP_PIPE_CRC1_OPP_PIPE_CRC_MASK_BASE_IDX 2
+#define mmOPP_PIPE_CRC1_OPP_PIPE_CRC_RESULT0 0x18ed
+#define mmOPP_PIPE_CRC1_OPP_PIPE_CRC_RESULT0_BASE_IDX 2
+#define mmOPP_PIPE_CRC1_OPP_PIPE_CRC_RESULT1 0x18ee
+#define mmOPP_PIPE_CRC1_OPP_PIPE_CRC_RESULT1_BASE_IDX 2
+#define mmOPP_PIPE_CRC1_OPP_PIPE_CRC_RESULT2 0x18ef
+#define mmOPP_PIPE_CRC1_OPP_PIPE_CRC_RESULT2_BASE_IDX 2
+
+
+// addressBlock: dce_dc_opp_opp_top_dispdec
+// base address: 0x0
+#define mmOPP_TOP_CLK_CONTROL 0x1a5e
+#define mmOPP_TOP_CLK_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_optc_odm0_dispdec
+// base address: 0x0
+#define mmODM0_OPTC_INPUT_GLOBAL_CONTROL 0x1aca
+#define mmODM0_OPTC_INPUT_GLOBAL_CONTROL_BASE_IDX 2
+#define mmODM0_OPTC_DATA_SOURCE_SELECT 0x1acb
+#define mmODM0_OPTC_DATA_SOURCE_SELECT_BASE_IDX 2
+#define mmODM0_OPTC_DATA_FORMAT_CONTROL 0x1acc
+#define mmODM0_OPTC_DATA_FORMAT_CONTROL_BASE_IDX 2
+#define mmODM0_OPTC_BYTES_PER_PIXEL 0x1acd
+#define mmODM0_OPTC_BYTES_PER_PIXEL_BASE_IDX 2
+#define mmODM0_OPTC_WIDTH_CONTROL 0x1ace
+#define mmODM0_OPTC_WIDTH_CONTROL_BASE_IDX 2
+#define mmODM0_OPTC_INPUT_CLOCK_CONTROL 0x1acf
+#define mmODM0_OPTC_INPUT_CLOCK_CONTROL_BASE_IDX 2
+
+// addressBlock: dce_dc_optc_odm1_dispdec
+// base address: 0x40
+#define mmODM1_OPTC_INPUT_GLOBAL_CONTROL 0x1ada
+#define mmODM1_OPTC_INPUT_GLOBAL_CONTROL_BASE_IDX 2
+#define mmODM1_OPTC_DATA_SOURCE_SELECT 0x1adb
+#define mmODM1_OPTC_DATA_SOURCE_SELECT_BASE_IDX 2
+#define mmODM1_OPTC_DATA_FORMAT_CONTROL 0x1adc
+#define mmODM1_OPTC_DATA_FORMAT_CONTROL_BASE_IDX 2
+#define mmODM1_OPTC_BYTES_PER_PIXEL 0x1add
+#define mmODM1_OPTC_BYTES_PER_PIXEL_BASE_IDX 2
+#define mmODM1_OPTC_WIDTH_CONTROL 0x1ade
+#define mmODM1_OPTC_WIDTH_CONTROL_BASE_IDX 2
+#define mmODM1_OPTC_INPUT_CLOCK_CONTROL 0x1adf
+#define mmODM1_OPTC_INPUT_CLOCK_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_optc_otg0_dispdec
+// base address: 0x0
+#define mmOTG0_OTG_H_TOTAL 0x1b2a
+#define mmOTG0_OTG_H_TOTAL_BASE_IDX 2
+#define mmOTG0_OTG_H_BLANK_START_END 0x1b2b
+#define mmOTG0_OTG_H_BLANK_START_END_BASE_IDX 2
+#define mmOTG0_OTG_H_SYNC_A 0x1b2c
+#define mmOTG0_OTG_H_SYNC_A_BASE_IDX 2
+#define mmOTG0_OTG_H_SYNC_A_CNTL 0x1b2d
+#define mmOTG0_OTG_H_SYNC_A_CNTL_BASE_IDX 2
+#define mmOTG0_OTG_H_TIMING_CNTL 0x1b2e
+#define mmOTG0_OTG_H_TIMING_CNTL_BASE_IDX 2
+#define mmOTG0_OTG_V_TOTAL 0x1b2f
+#define mmOTG0_OTG_V_TOTAL_BASE_IDX 2
+#define mmOTG0_OTG_V_TOTAL_MIN 0x1b30
+#define mmOTG0_OTG_V_TOTAL_MIN_BASE_IDX 2
+#define mmOTG0_OTG_V_TOTAL_MAX 0x1b31
+#define mmOTG0_OTG_V_TOTAL_MAX_BASE_IDX 2
+#define mmOTG0_OTG_V_TOTAL_MID 0x1b32
+#define mmOTG0_OTG_V_TOTAL_MID_BASE_IDX 2
+#define mmOTG0_OTG_V_TOTAL_CONTROL 0x1b33
+#define mmOTG0_OTG_V_TOTAL_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_V_TOTAL_INT_STATUS 0x1b34
+#define mmOTG0_OTG_V_TOTAL_INT_STATUS_BASE_IDX 2
+#define mmOTG0_OTG_VSYNC_NOM_INT_STATUS 0x1b35
+#define mmOTG0_OTG_VSYNC_NOM_INT_STATUS_BASE_IDX 2
+#define mmOTG0_OTG_V_BLANK_START_END 0x1b36
+#define mmOTG0_OTG_V_BLANK_START_END_BASE_IDX 2
+#define mmOTG0_OTG_V_SYNC_A 0x1b37
+#define mmOTG0_OTG_V_SYNC_A_BASE_IDX 2
+#define mmOTG0_OTG_V_SYNC_A_CNTL 0x1b38
+#define mmOTG0_OTG_V_SYNC_A_CNTL_BASE_IDX 2
+#define mmOTG0_OTG_TRIGA_CNTL 0x1b39
+#define mmOTG0_OTG_TRIGA_CNTL_BASE_IDX 2
+#define mmOTG0_OTG_TRIGA_MANUAL_TRIG 0x1b3a
+#define mmOTG0_OTG_TRIGA_MANUAL_TRIG_BASE_IDX 2
+#define mmOTG0_OTG_TRIGB_CNTL 0x1b3b
+#define mmOTG0_OTG_TRIGB_CNTL_BASE_IDX 2
+#define mmOTG0_OTG_TRIGB_MANUAL_TRIG 0x1b3c
+#define mmOTG0_OTG_TRIGB_MANUAL_TRIG_BASE_IDX 2
+#define mmOTG0_OTG_FORCE_COUNT_NOW_CNTL 0x1b3d
+#define mmOTG0_OTG_FORCE_COUNT_NOW_CNTL_BASE_IDX 2
+#define mmOTG0_OTG_STEREO_FORCE_NEXT_EYE 0x1b3f
+#define mmOTG0_OTG_STEREO_FORCE_NEXT_EYE_BASE_IDX 2
+#define mmOTG0_OTG_CONTROL 0x1b41
+#define mmOTG0_OTG_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_BLANK_CONTROL 0x1b42
+#define mmOTG0_OTG_BLANK_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_INTERLACE_CONTROL 0x1b44
+#define mmOTG0_OTG_INTERLACE_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_INTERLACE_STATUS 0x1b45
+#define mmOTG0_OTG_INTERLACE_STATUS_BASE_IDX 2
+#define mmOTG0_OTG_PIXEL_DATA_READBACK0 0x1b47
+#define mmOTG0_OTG_PIXEL_DATA_READBACK0_BASE_IDX 2
+#define mmOTG0_OTG_PIXEL_DATA_READBACK1 0x1b48
+#define mmOTG0_OTG_PIXEL_DATA_READBACK1_BASE_IDX 2
+#define mmOTG0_OTG_STATUS 0x1b49
+#define mmOTG0_OTG_STATUS_BASE_IDX 2
+#define mmOTG0_OTG_STATUS_POSITION 0x1b4a
+#define mmOTG0_OTG_STATUS_POSITION_BASE_IDX 2
+#define mmOTG0_OTG_NOM_VERT_POSITION 0x1b4b
+#define mmOTG0_OTG_NOM_VERT_POSITION_BASE_IDX 2
+#define mmOTG0_OTG_STATUS_FRAME_COUNT 0x1b4c
+#define mmOTG0_OTG_STATUS_FRAME_COUNT_BASE_IDX 2
+#define mmOTG0_OTG_STATUS_VF_COUNT 0x1b4d
+#define mmOTG0_OTG_STATUS_VF_COUNT_BASE_IDX 2
+#define mmOTG0_OTG_STATUS_HV_COUNT 0x1b4e
+#define mmOTG0_OTG_STATUS_HV_COUNT_BASE_IDX 2
+#define mmOTG0_OTG_COUNT_CONTROL 0x1b4f
+#define mmOTG0_OTG_COUNT_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_COUNT_RESET 0x1b50
+#define mmOTG0_OTG_COUNT_RESET_BASE_IDX 2
+#define mmOTG0_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE 0x1b51
+#define mmOTG0_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE_BASE_IDX 2
+#define mmOTG0_OTG_VERT_SYNC_CONTROL 0x1b52
+#define mmOTG0_OTG_VERT_SYNC_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_STEREO_STATUS 0x1b53
+#define mmOTG0_OTG_STEREO_STATUS_BASE_IDX 2
+#define mmOTG0_OTG_STEREO_CONTROL 0x1b54
+#define mmOTG0_OTG_STEREO_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_SNAPSHOT_STATUS 0x1b55
+#define mmOTG0_OTG_SNAPSHOT_STATUS_BASE_IDX 2
+#define mmOTG0_OTG_SNAPSHOT_CONTROL 0x1b56
+#define mmOTG0_OTG_SNAPSHOT_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_SNAPSHOT_POSITION 0x1b57
+#define mmOTG0_OTG_SNAPSHOT_POSITION_BASE_IDX 2
+#define mmOTG0_OTG_SNAPSHOT_FRAME 0x1b58
+#define mmOTG0_OTG_SNAPSHOT_FRAME_BASE_IDX 2
+#define mmOTG0_OTG_INTERRUPT_CONTROL 0x1b59
+#define mmOTG0_OTG_INTERRUPT_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_UPDATE_LOCK 0x1b5a
+#define mmOTG0_OTG_UPDATE_LOCK_BASE_IDX 2
+#define mmOTG0_OTG_DOUBLE_BUFFER_CONTROL 0x1b5b
+#define mmOTG0_OTG_DOUBLE_BUFFER_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_MASTER_EN 0x1b5c
+#define mmOTG0_OTG_MASTER_EN_BASE_IDX 2
+#define mmOTG0_OTG_BLANK_DATA_COLOR 0x1b5e
+#define mmOTG0_OTG_BLANK_DATA_COLOR_BASE_IDX 2
+#define mmOTG0_OTG_BLANK_DATA_COLOR_EXT 0x1b5f
+#define mmOTG0_OTG_BLANK_DATA_COLOR_EXT_BASE_IDX 2
+#define mmOTG0_OTG_BLACK_COLOR 0x1b60
+#define mmOTG0_OTG_BLACK_COLOR_BASE_IDX 2
+#define mmOTG0_OTG_BLACK_COLOR_EXT 0x1b61
+#define mmOTG0_OTG_BLACK_COLOR_EXT_BASE_IDX 2
+#define mmOTG0_OTG_VERTICAL_INTERRUPT0_POSITION 0x1b62
+#define mmOTG0_OTG_VERTICAL_INTERRUPT0_POSITION_BASE_IDX 2
+#define mmOTG0_OTG_VERTICAL_INTERRUPT0_CONTROL 0x1b63
+#define mmOTG0_OTG_VERTICAL_INTERRUPT0_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_VERTICAL_INTERRUPT1_POSITION 0x1b64
+#define mmOTG0_OTG_VERTICAL_INTERRUPT1_POSITION_BASE_IDX 2
+#define mmOTG0_OTG_VERTICAL_INTERRUPT1_CONTROL 0x1b65
+#define mmOTG0_OTG_VERTICAL_INTERRUPT1_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_VERTICAL_INTERRUPT2_POSITION 0x1b66
+#define mmOTG0_OTG_VERTICAL_INTERRUPT2_POSITION_BASE_IDX 2
+#define mmOTG0_OTG_VERTICAL_INTERRUPT2_CONTROL 0x1b67
+#define mmOTG0_OTG_VERTICAL_INTERRUPT2_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_CRC_CNTL 0x1b68
+#define mmOTG0_OTG_CRC_CNTL_BASE_IDX 2
+#define mmOTG0_OTG_CRC_CNTL2 0x1b69
+#define mmOTG0_OTG_CRC_CNTL2_BASE_IDX 2
+#define mmOTG0_OTG_CRC0_WINDOWA_X_CONTROL 0x1b6a
+#define mmOTG0_OTG_CRC0_WINDOWA_X_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_CRC0_WINDOWA_Y_CONTROL 0x1b6b
+#define mmOTG0_OTG_CRC0_WINDOWA_Y_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_CRC0_WINDOWB_X_CONTROL 0x1b6c
+#define mmOTG0_OTG_CRC0_WINDOWB_X_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_CRC0_WINDOWB_Y_CONTROL 0x1b6d
+#define mmOTG0_OTG_CRC0_WINDOWB_Y_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_CRC0_DATA_RG 0x1b6e
+#define mmOTG0_OTG_CRC0_DATA_RG_BASE_IDX 2
+#define mmOTG0_OTG_CRC0_DATA_B 0x1b6f
+#define mmOTG0_OTG_CRC0_DATA_B_BASE_IDX 2
+#define mmOTG0_OTG_CRC1_WINDOWA_X_CONTROL 0x1b70
+#define mmOTG0_OTG_CRC1_WINDOWA_X_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_CRC1_WINDOWA_Y_CONTROL 0x1b71
+#define mmOTG0_OTG_CRC1_WINDOWA_Y_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_CRC1_WINDOWB_X_CONTROL 0x1b72
+#define mmOTG0_OTG_CRC1_WINDOWB_X_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_CRC1_WINDOWB_Y_CONTROL 0x1b73
+#define mmOTG0_OTG_CRC1_WINDOWB_Y_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_CRC1_DATA_RG 0x1b74
+#define mmOTG0_OTG_CRC1_DATA_RG_BASE_IDX 2
+#define mmOTG0_OTG_CRC1_DATA_B 0x1b75
+#define mmOTG0_OTG_CRC1_DATA_B_BASE_IDX 2
+#define mmOTG0_OTG_CRC_SIG_RED_GREEN_MASK 0x1b7a
+#define mmOTG0_OTG_CRC_SIG_RED_GREEN_MASK_BASE_IDX 2
+#define mmOTG0_OTG_CRC_SIG_BLUE_CONTROL_MASK 0x1b7b
+#define mmOTG0_OTG_CRC_SIG_BLUE_CONTROL_MASK_BASE_IDX 2
+#define mmOTG0_OTG_STATIC_SCREEN_CONTROL 0x1b82
+#define mmOTG0_OTG_STATIC_SCREEN_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_3D_STRUCTURE_CONTROL 0x1b83
+#define mmOTG0_OTG_3D_STRUCTURE_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_GSL_VSYNC_GAP 0x1b84
+#define mmOTG0_OTG_GSL_VSYNC_GAP_BASE_IDX 2
+#define mmOTG0_OTG_MASTER_UPDATE_MODE 0x1b85
+#define mmOTG0_OTG_MASTER_UPDATE_MODE_BASE_IDX 2
+#define mmOTG0_OTG_CLOCK_CONTROL 0x1b86
+#define mmOTG0_OTG_CLOCK_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_VSTARTUP_PARAM 0x1b87
+#define mmOTG0_OTG_VSTARTUP_PARAM_BASE_IDX 2
+#define mmOTG0_OTG_VUPDATE_PARAM 0x1b88
+#define mmOTG0_OTG_VUPDATE_PARAM_BASE_IDX 2
+#define mmOTG0_OTG_VREADY_PARAM 0x1b89
+#define mmOTG0_OTG_VREADY_PARAM_BASE_IDX 2
+#define mmOTG0_OTG_GLOBAL_SYNC_STATUS 0x1b8a
+#define mmOTG0_OTG_GLOBAL_SYNC_STATUS_BASE_IDX 2
+#define mmOTG0_OTG_MASTER_UPDATE_LOCK 0x1b8b
+#define mmOTG0_OTG_MASTER_UPDATE_LOCK_BASE_IDX 2
+#define mmOTG0_OTG_GSL_CONTROL 0x1b8c
+#define mmOTG0_OTG_GSL_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_GSL_WINDOW_X 0x1b8d
+#define mmOTG0_OTG_GSL_WINDOW_X_BASE_IDX 2
+#define mmOTG0_OTG_GSL_WINDOW_Y 0x1b8e
+#define mmOTG0_OTG_GSL_WINDOW_Y_BASE_IDX 2
+#define mmOTG0_OTG_VUPDATE_KEEPOUT 0x1b8f
+#define mmOTG0_OTG_VUPDATE_KEEPOUT_BASE_IDX 2
+#define mmOTG0_OTG_GLOBAL_CONTROL0 0x1b90
+#define mmOTG0_OTG_GLOBAL_CONTROL0_BASE_IDX 2
+#define mmOTG0_OTG_GLOBAL_CONTROL1 0x1b91
+#define mmOTG0_OTG_GLOBAL_CONTROL1_BASE_IDX 2
+#define mmOTG0_OTG_GLOBAL_CONTROL2 0x1b92
+#define mmOTG0_OTG_GLOBAL_CONTROL2_BASE_IDX 2
+#define mmOTG0_OTG_GLOBAL_CONTROL3 0x1b93
+#define mmOTG0_OTG_GLOBAL_CONTROL3_BASE_IDX 2
+#define mmOTG0_OTG_TRIG_MANUAL_CONTROL 0x1b94
+#define mmOTG0_OTG_TRIG_MANUAL_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_DRR_CONTROL 0x1b97
+#define mmOTG0_OTG_DRR_CONTROL_BASE_IDX 2
+#define mmOTG0_OTG_DSC_START_POSITION 0x1b99
+#define mmOTG0_OTG_DSC_START_POSITION_BASE_IDX 2
+
+// addressBlock: dce_dc_optc_otg1_dispdec
+// base address: 0x200
+#define mmOTG1_OTG_H_TOTAL 0x1baa
+#define mmOTG1_OTG_H_TOTAL_BASE_IDX 2
+#define mmOTG1_OTG_H_BLANK_START_END 0x1bab
+#define mmOTG1_OTG_H_BLANK_START_END_BASE_IDX 2
+#define mmOTG1_OTG_H_SYNC_A 0x1bac
+#define mmOTG1_OTG_H_SYNC_A_BASE_IDX 2
+#define mmOTG1_OTG_H_SYNC_A_CNTL 0x1bad
+#define mmOTG1_OTG_H_SYNC_A_CNTL_BASE_IDX 2
+#define mmOTG1_OTG_H_TIMING_CNTL 0x1bae
+#define mmOTG1_OTG_H_TIMING_CNTL_BASE_IDX 2
+#define mmOTG1_OTG_V_TOTAL 0x1baf
+#define mmOTG1_OTG_V_TOTAL_BASE_IDX 2
+#define mmOTG1_OTG_V_TOTAL_MIN 0x1bb0
+#define mmOTG1_OTG_V_TOTAL_MIN_BASE_IDX 2
+#define mmOTG1_OTG_V_TOTAL_MAX 0x1bb1
+#define mmOTG1_OTG_V_TOTAL_MAX_BASE_IDX 2
+#define mmOTG1_OTG_V_TOTAL_MID 0x1bb2
+#define mmOTG1_OTG_V_TOTAL_MID_BASE_IDX 2
+#define mmOTG1_OTG_V_TOTAL_CONTROL 0x1bb3
+#define mmOTG1_OTG_V_TOTAL_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_V_TOTAL_INT_STATUS 0x1bb4
+#define mmOTG1_OTG_V_TOTAL_INT_STATUS_BASE_IDX 2
+#define mmOTG1_OTG_VSYNC_NOM_INT_STATUS 0x1bb5
+#define mmOTG1_OTG_VSYNC_NOM_INT_STATUS_BASE_IDX 2
+#define mmOTG1_OTG_V_BLANK_START_END 0x1bb6
+#define mmOTG1_OTG_V_BLANK_START_END_BASE_IDX 2
+#define mmOTG1_OTG_V_SYNC_A 0x1bb7
+#define mmOTG1_OTG_V_SYNC_A_BASE_IDX 2
+#define mmOTG1_OTG_V_SYNC_A_CNTL 0x1bb8
+#define mmOTG1_OTG_V_SYNC_A_CNTL_BASE_IDX 2
+#define mmOTG1_OTG_TRIGA_CNTL 0x1bb9
+#define mmOTG1_OTG_TRIGA_CNTL_BASE_IDX 2
+#define mmOTG1_OTG_TRIGA_MANUAL_TRIG 0x1bba
+#define mmOTG1_OTG_TRIGA_MANUAL_TRIG_BASE_IDX 2
+#define mmOTG1_OTG_TRIGB_CNTL 0x1bbb
+#define mmOTG1_OTG_TRIGB_CNTL_BASE_IDX 2
+#define mmOTG1_OTG_TRIGB_MANUAL_TRIG 0x1bbc
+#define mmOTG1_OTG_TRIGB_MANUAL_TRIG_BASE_IDX 2
+#define mmOTG1_OTG_FORCE_COUNT_NOW_CNTL 0x1bbd
+#define mmOTG1_OTG_FORCE_COUNT_NOW_CNTL_BASE_IDX 2
+#define mmOTG1_OTG_STEREO_FORCE_NEXT_EYE 0x1bbf
+#define mmOTG1_OTG_STEREO_FORCE_NEXT_EYE_BASE_IDX 2
+#define mmOTG1_OTG_CONTROL 0x1bc1
+#define mmOTG1_OTG_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_BLANK_CONTROL 0x1bc2
+#define mmOTG1_OTG_BLANK_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_INTERLACE_CONTROL 0x1bc4
+#define mmOTG1_OTG_INTERLACE_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_INTERLACE_STATUS 0x1bc5
+#define mmOTG1_OTG_INTERLACE_STATUS_BASE_IDX 2
+#define mmOTG1_OTG_PIXEL_DATA_READBACK0 0x1bc7
+#define mmOTG1_OTG_PIXEL_DATA_READBACK0_BASE_IDX 2
+#define mmOTG1_OTG_PIXEL_DATA_READBACK1 0x1bc8
+#define mmOTG1_OTG_PIXEL_DATA_READBACK1_BASE_IDX 2
+#define mmOTG1_OTG_STATUS 0x1bc9
+#define mmOTG1_OTG_STATUS_BASE_IDX 2
+#define mmOTG1_OTG_STATUS_POSITION 0x1bca
+#define mmOTG1_OTG_STATUS_POSITION_BASE_IDX 2
+#define mmOTG1_OTG_NOM_VERT_POSITION 0x1bcb
+#define mmOTG1_OTG_NOM_VERT_POSITION_BASE_IDX 2
+#define mmOTG1_OTG_STATUS_FRAME_COUNT 0x1bcc
+#define mmOTG1_OTG_STATUS_FRAME_COUNT_BASE_IDX 2
+#define mmOTG1_OTG_STATUS_VF_COUNT 0x1bcd
+#define mmOTG1_OTG_STATUS_VF_COUNT_BASE_IDX 2
+#define mmOTG1_OTG_STATUS_HV_COUNT 0x1bce
+#define mmOTG1_OTG_STATUS_HV_COUNT_BASE_IDX 2
+#define mmOTG1_OTG_COUNT_CONTROL 0x1bcf
+#define mmOTG1_OTG_COUNT_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_COUNT_RESET 0x1bd0
+#define mmOTG1_OTG_COUNT_RESET_BASE_IDX 2
+#define mmOTG1_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE 0x1bd1
+#define mmOTG1_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE_BASE_IDX 2
+#define mmOTG1_OTG_VERT_SYNC_CONTROL 0x1bd2
+#define mmOTG1_OTG_VERT_SYNC_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_STEREO_STATUS 0x1bd3
+#define mmOTG1_OTG_STEREO_STATUS_BASE_IDX 2
+#define mmOTG1_OTG_STEREO_CONTROL 0x1bd4
+#define mmOTG1_OTG_STEREO_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_SNAPSHOT_STATUS 0x1bd5
+#define mmOTG1_OTG_SNAPSHOT_STATUS_BASE_IDX 2
+#define mmOTG1_OTG_SNAPSHOT_CONTROL 0x1bd6
+#define mmOTG1_OTG_SNAPSHOT_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_SNAPSHOT_POSITION 0x1bd7
+#define mmOTG1_OTG_SNAPSHOT_POSITION_BASE_IDX 2
+#define mmOTG1_OTG_SNAPSHOT_FRAME 0x1bd8
+#define mmOTG1_OTG_SNAPSHOT_FRAME_BASE_IDX 2
+#define mmOTG1_OTG_INTERRUPT_CONTROL 0x1bd9
+#define mmOTG1_OTG_INTERRUPT_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_UPDATE_LOCK 0x1bda
+#define mmOTG1_OTG_UPDATE_LOCK_BASE_IDX 2
+#define mmOTG1_OTG_DOUBLE_BUFFER_CONTROL 0x1bdb
+#define mmOTG1_OTG_DOUBLE_BUFFER_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_MASTER_EN 0x1bdc
+#define mmOTG1_OTG_MASTER_EN_BASE_IDX 2
+#define mmOTG1_OTG_BLANK_DATA_COLOR 0x1bde
+#define mmOTG1_OTG_BLANK_DATA_COLOR_BASE_IDX 2
+#define mmOTG1_OTG_BLANK_DATA_COLOR_EXT 0x1bdf
+#define mmOTG1_OTG_BLANK_DATA_COLOR_EXT_BASE_IDX 2
+#define mmOTG1_OTG_BLACK_COLOR 0x1be0
+#define mmOTG1_OTG_BLACK_COLOR_BASE_IDX 2
+#define mmOTG1_OTG_BLACK_COLOR_EXT 0x1be1
+#define mmOTG1_OTG_BLACK_COLOR_EXT_BASE_IDX 2
+#define mmOTG1_OTG_VERTICAL_INTERRUPT0_POSITION 0x1be2
+#define mmOTG1_OTG_VERTICAL_INTERRUPT0_POSITION_BASE_IDX 2
+#define mmOTG1_OTG_VERTICAL_INTERRUPT0_CONTROL 0x1be3
+#define mmOTG1_OTG_VERTICAL_INTERRUPT0_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_VERTICAL_INTERRUPT1_POSITION 0x1be4
+#define mmOTG1_OTG_VERTICAL_INTERRUPT1_POSITION_BASE_IDX 2
+#define mmOTG1_OTG_VERTICAL_INTERRUPT1_CONTROL 0x1be5
+#define mmOTG1_OTG_VERTICAL_INTERRUPT1_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_VERTICAL_INTERRUPT2_POSITION 0x1be6
+#define mmOTG1_OTG_VERTICAL_INTERRUPT2_POSITION_BASE_IDX 2
+#define mmOTG1_OTG_VERTICAL_INTERRUPT2_CONTROL 0x1be7
+#define mmOTG1_OTG_VERTICAL_INTERRUPT2_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_CRC_CNTL 0x1be8
+#define mmOTG1_OTG_CRC_CNTL_BASE_IDX 2
+#define mmOTG1_OTG_CRC_CNTL2 0x1be9
+#define mmOTG1_OTG_CRC_CNTL2_BASE_IDX 2
+#define mmOTG1_OTG_CRC0_WINDOWA_X_CONTROL 0x1bea
+#define mmOTG1_OTG_CRC0_WINDOWA_X_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_CRC0_WINDOWA_Y_CONTROL 0x1beb
+#define mmOTG1_OTG_CRC0_WINDOWA_Y_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_CRC0_WINDOWB_X_CONTROL 0x1bec
+#define mmOTG1_OTG_CRC0_WINDOWB_X_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_CRC0_WINDOWB_Y_CONTROL 0x1bed
+#define mmOTG1_OTG_CRC0_WINDOWB_Y_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_CRC0_DATA_RG 0x1bee
+#define mmOTG1_OTG_CRC0_DATA_RG_BASE_IDX 2
+#define mmOTG1_OTG_CRC0_DATA_B 0x1bef
+#define mmOTG1_OTG_CRC0_DATA_B_BASE_IDX 2
+#define mmOTG1_OTG_CRC1_WINDOWA_X_CONTROL 0x1bf0
+#define mmOTG1_OTG_CRC1_WINDOWA_X_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_CRC1_WINDOWA_Y_CONTROL 0x1bf1
+#define mmOTG1_OTG_CRC1_WINDOWA_Y_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_CRC1_WINDOWB_X_CONTROL 0x1bf2
+#define mmOTG1_OTG_CRC1_WINDOWB_X_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_CRC1_WINDOWB_Y_CONTROL 0x1bf3
+#define mmOTG1_OTG_CRC1_WINDOWB_Y_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_CRC1_DATA_RG 0x1bf4
+#define mmOTG1_OTG_CRC1_DATA_RG_BASE_IDX 2
+#define mmOTG1_OTG_CRC1_DATA_B 0x1bf5
+#define mmOTG1_OTG_CRC1_DATA_B_BASE_IDX 2
+#define mmOTG1_OTG_CRC_SIG_RED_GREEN_MASK 0x1bfa
+#define mmOTG1_OTG_CRC_SIG_RED_GREEN_MASK_BASE_IDX 2
+#define mmOTG1_OTG_CRC_SIG_BLUE_CONTROL_MASK 0x1bfb
+#define mmOTG1_OTG_CRC_SIG_BLUE_CONTROL_MASK_BASE_IDX 2
+#define mmOTG1_OTG_STATIC_SCREEN_CONTROL 0x1c02
+#define mmOTG1_OTG_STATIC_SCREEN_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_3D_STRUCTURE_CONTROL 0x1c03
+#define mmOTG1_OTG_3D_STRUCTURE_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_GSL_VSYNC_GAP 0x1c04
+#define mmOTG1_OTG_GSL_VSYNC_GAP_BASE_IDX 2
+#define mmOTG1_OTG_MASTER_UPDATE_MODE 0x1c05
+#define mmOTG1_OTG_MASTER_UPDATE_MODE_BASE_IDX 2
+#define mmOTG1_OTG_CLOCK_CONTROL 0x1c06
+#define mmOTG1_OTG_CLOCK_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_VSTARTUP_PARAM 0x1c07
+#define mmOTG1_OTG_VSTARTUP_PARAM_BASE_IDX 2
+#define mmOTG1_OTG_VUPDATE_PARAM 0x1c08
+#define mmOTG1_OTG_VUPDATE_PARAM_BASE_IDX 2
+#define mmOTG1_OTG_VREADY_PARAM 0x1c09
+#define mmOTG1_OTG_VREADY_PARAM_BASE_IDX 2
+#define mmOTG1_OTG_GLOBAL_SYNC_STATUS 0x1c0a
+#define mmOTG1_OTG_GLOBAL_SYNC_STATUS_BASE_IDX 2
+#define mmOTG1_OTG_MASTER_UPDATE_LOCK 0x1c0b
+#define mmOTG1_OTG_MASTER_UPDATE_LOCK_BASE_IDX 2
+#define mmOTG1_OTG_GSL_CONTROL 0x1c0c
+#define mmOTG1_OTG_GSL_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_GSL_WINDOW_X 0x1c0d
+#define mmOTG1_OTG_GSL_WINDOW_X_BASE_IDX 2
+#define mmOTG1_OTG_GSL_WINDOW_Y 0x1c0e
+#define mmOTG1_OTG_GSL_WINDOW_Y_BASE_IDX 2
+#define mmOTG1_OTG_VUPDATE_KEEPOUT 0x1c0f
+#define mmOTG1_OTG_VUPDATE_KEEPOUT_BASE_IDX 2
+#define mmOTG1_OTG_GLOBAL_CONTROL0 0x1c10
+#define mmOTG1_OTG_GLOBAL_CONTROL0_BASE_IDX 2
+#define mmOTG1_OTG_GLOBAL_CONTROL1 0x1c11
+#define mmOTG1_OTG_GLOBAL_CONTROL1_BASE_IDX 2
+#define mmOTG1_OTG_GLOBAL_CONTROL2 0x1c12
+#define mmOTG1_OTG_GLOBAL_CONTROL2_BASE_IDX 2
+#define mmOTG1_OTG_GLOBAL_CONTROL3 0x1c13
+#define mmOTG1_OTG_GLOBAL_CONTROL3_BASE_IDX 2
+#define mmOTG1_OTG_TRIG_MANUAL_CONTROL 0x1c14
+#define mmOTG1_OTG_TRIG_MANUAL_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_DRR_CONTROL 0x1c17
+#define mmOTG1_OTG_DRR_CONTROL_BASE_IDX 2
+#define mmOTG1_OTG_DSC_START_POSITION 0x1c19
+#define mmOTG1_OTG_DSC_START_POSITION_BASE_IDX 2
+
+
+// addressBlock: dce_dc_optc_optc_misc_dispdec
+// base address: 0x0
+#define mmDWB_SOURCE_SELECT 0x1e2a
+#define mmDWB_SOURCE_SELECT_BASE_IDX 2
+#define mmGSL_SOURCE_SELECT 0x1e2b
+#define mmGSL_SOURCE_SELECT_BASE_IDX 2
+#define mmOPTC_CLOCK_CONTROL 0x1e2c
+#define mmOPTC_CLOCK_CONTROL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dout_i2c_dispdec
+// base address: 0x0
+#define mmDC_I2C_CONTROL 0x1e98
+#define mmDC_I2C_CONTROL_BASE_IDX 2
+#define mmDC_I2C_ARBITRATION 0x1e99
+#define mmDC_I2C_ARBITRATION_BASE_IDX 2
+#define mmDC_I2C_INTERRUPT_CONTROL 0x1e9a
+#define mmDC_I2C_INTERRUPT_CONTROL_BASE_IDX 2
+#define mmDC_I2C_SW_STATUS 0x1e9b
+#define mmDC_I2C_SW_STATUS_BASE_IDX 2
+#define mmDC_I2C_DDC1_HW_STATUS 0x1e9c
+#define mmDC_I2C_DDC1_HW_STATUS_BASE_IDX 2
+#define mmDC_I2C_DDC2_HW_STATUS 0x1e9d
+#define mmDC_I2C_DDC2_HW_STATUS_BASE_IDX 2
+#define mmDC_I2C_DDC1_SPEED 0x1ea2
+#define mmDC_I2C_DDC1_SPEED_BASE_IDX 2
+#define mmDC_I2C_DDC1_SETUP 0x1ea3
+#define mmDC_I2C_DDC1_SETUP_BASE_IDX 2
+#define mmDC_I2C_DDC2_SPEED 0x1ea4
+#define mmDC_I2C_DDC2_SPEED_BASE_IDX 2
+#define mmDC_I2C_DDC2_SETUP 0x1ea5
+#define mmDC_I2C_DDC2_SETUP_BASE_IDX 2
+#define mmDC_I2C_TRANSACTION0 0x1eae
+#define mmDC_I2C_TRANSACTION0_BASE_IDX 2
+#define mmDC_I2C_TRANSACTION1 0x1eaf
+#define mmDC_I2C_TRANSACTION1_BASE_IDX 2
+#define mmDC_I2C_TRANSACTION2 0x1eb0
+#define mmDC_I2C_TRANSACTION2_BASE_IDX 2
+#define mmDC_I2C_TRANSACTION3 0x1eb1
+#define mmDC_I2C_TRANSACTION3_BASE_IDX 2
+#define mmDC_I2C_DATA 0x1eb2
+#define mmDC_I2C_DATA_BASE_IDX 2
+#define mmDC_I2C_EDID_DETECT_CTRL 0x1eb6
+#define mmDC_I2C_EDID_DETECT_CTRL_BASE_IDX 2
+#define mmDC_I2C_READ_REQUEST_INTERRUPT 0x1eb7
+#define mmDC_I2C_READ_REQUEST_INTERRUPT_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dio_misc_dispdec
+// base address: 0x0
+#define mmDIO_SCRATCH0 0x1eca
+#define mmDIO_SCRATCH0_BASE_IDX 2
+#define mmDIO_SCRATCH1 0x1ecb
+#define mmDIO_SCRATCH1_BASE_IDX 2
+#define mmDIO_SCRATCH2 0x1ecc
+#define mmDIO_SCRATCH2_BASE_IDX 2
+#define mmDIO_SCRATCH3 0x1ecd
+#define mmDIO_SCRATCH3_BASE_IDX 2
+#define mmDIO_SCRATCH4 0x1ece
+#define mmDIO_SCRATCH4_BASE_IDX 2
+#define mmDIO_SCRATCH5 0x1ecf
+#define mmDIO_SCRATCH5_BASE_IDX 2
+#define mmDIO_SCRATCH6 0x1ed0
+#define mmDIO_SCRATCH6_BASE_IDX 2
+#define mmDIO_SCRATCH7 0x1ed1
+#define mmDIO_SCRATCH7_BASE_IDX 2
+#define mmDIO_MEM_PWR_STATUS 0x1edd
+#define mmDIO_MEM_PWR_STATUS_BASE_IDX 2
+#define mmDIO_MEM_PWR_CTRL 0x1ede
+#define mmDIO_MEM_PWR_CTRL_BASE_IDX 2
+#define mmDIO_MEM_PWR_CTRL2 0x1edf
+#define mmDIO_MEM_PWR_CTRL2_BASE_IDX 2
+#define mmDIO_CLK_CNTL 0x1ee0
+#define mmDIO_CLK_CNTL_BASE_IDX 2
+#define mmDIO_MEM_PWR_CTRL3 0x1ee1
+#define mmDIO_MEM_PWR_CTRL3_BASE_IDX 2
+#define mmDIO_POWER_MANAGEMENT_CNTL 0x1ee4
+#define mmDIO_POWER_MANAGEMENT_CNTL_BASE_IDX 2
+#define mmDIG_SOFT_RESET 0x1eee
+#define mmDIG_SOFT_RESET_BASE_IDX 2
+#define mmDIO_MEM_PWR_STATUS1 0x1ef0
+#define mmDIO_MEM_PWR_STATUS1_BASE_IDX 2
+#define mmDIO_CLK_CNTL2 0x1ef2
+#define mmDIO_CLK_CNTL2_BASE_IDX 2
+#define mmDIO_CLK_CNTL3 0x1ef3
+#define mmDIO_CLK_CNTL3_BASE_IDX 2
+#define mmDIO_HDMI_RXSTATUS_TIMER_CONTROL 0x1eff
+#define mmDIO_HDMI_RXSTATUS_TIMER_CONTROL_BASE_IDX 2
+#define mmDIO_GENERIC_INTERRUPT_MESSAGE 0x1f02
+#define mmDIO_GENERIC_INTERRUPT_MESSAGE_BASE_IDX 2
+#define mmDIO_GENERIC_INTERRUPT_CLEAR 0x1f03
+#define mmDIO_GENERIC_INTERRUPT_CLEAR_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_hpd0_dispdec
+// base address: 0x0
+#define mmHPD0_DC_HPD_INT_STATUS 0x1f14
+#define mmHPD0_DC_HPD_INT_STATUS_BASE_IDX 2
+#define mmHPD0_DC_HPD_INT_CONTROL 0x1f15
+#define mmHPD0_DC_HPD_INT_CONTROL_BASE_IDX 2
+#define mmHPD0_DC_HPD_CONTROL 0x1f16
+#define mmHPD0_DC_HPD_CONTROL_BASE_IDX 2
+#define mmHPD0_DC_HPD_FAST_TRAIN_CNTL 0x1f17
+#define mmHPD0_DC_HPD_FAST_TRAIN_CNTL_BASE_IDX 2
+#define mmHPD0_DC_HPD_TOGGLE_FILT_CNTL 0x1f18
+#define mmHPD0_DC_HPD_TOGGLE_FILT_CNTL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_hpd1_dispdec
+// base address: 0x20
+#define mmHPD1_DC_HPD_INT_STATUS 0x1f1c
+#define mmHPD1_DC_HPD_INT_STATUS_BASE_IDX 2
+#define mmHPD1_DC_HPD_INT_CONTROL 0x1f1d
+#define mmHPD1_DC_HPD_INT_CONTROL_BASE_IDX 2
+#define mmHPD1_DC_HPD_CONTROL 0x1f1e
+#define mmHPD1_DC_HPD_CONTROL_BASE_IDX 2
+#define mmHPD1_DC_HPD_FAST_TRAIN_CNTL 0x1f1f
+#define mmHPD1_DC_HPD_FAST_TRAIN_CNTL_BASE_IDX 2
+#define mmHPD1_DC_HPD_TOGGLE_FILT_CNTL 0x1f20
+#define mmHPD1_DC_HPD_TOGGLE_FILT_CNTL_BASE_IDX 2
+
+// addressBlock: dce_dc_dio_dp_aux0_dispdec
+// base address: 0x0
+#define mmDP_AUX0_AUX_CONTROL 0x1f50
+#define mmDP_AUX0_AUX_CONTROL_BASE_IDX 2
+#define mmDP_AUX0_AUX_SW_CONTROL 0x1f51
+#define mmDP_AUX0_AUX_SW_CONTROL_BASE_IDX 2
+#define mmDP_AUX0_AUX_ARB_CONTROL 0x1f52
+#define mmDP_AUX0_AUX_ARB_CONTROL_BASE_IDX 2
+#define mmDP_AUX0_AUX_INTERRUPT_CONTROL 0x1f53
+#define mmDP_AUX0_AUX_INTERRUPT_CONTROL_BASE_IDX 2
+#define mmDP_AUX0_AUX_SW_STATUS 0x1f54
+#define mmDP_AUX0_AUX_SW_STATUS_BASE_IDX 2
+#define mmDP_AUX0_AUX_LS_STATUS 0x1f55
+#define mmDP_AUX0_AUX_LS_STATUS_BASE_IDX 2
+#define mmDP_AUX0_AUX_SW_DATA 0x1f56
+#define mmDP_AUX0_AUX_SW_DATA_BASE_IDX 2
+#define mmDP_AUX0_AUX_LS_DATA 0x1f57
+#define mmDP_AUX0_AUX_LS_DATA_BASE_IDX 2
+#define mmDP_AUX0_AUX_DPHY_TX_REF_CONTROL 0x1f58
+#define mmDP_AUX0_AUX_DPHY_TX_REF_CONTROL_BASE_IDX 2
+#define mmDP_AUX0_AUX_DPHY_TX_CONTROL 0x1f59
+#define mmDP_AUX0_AUX_DPHY_TX_CONTROL_BASE_IDX 2
+#define mmDP_AUX0_AUX_DPHY_RX_CONTROL0 0x1f5a
+#define mmDP_AUX0_AUX_DPHY_RX_CONTROL0_BASE_IDX 2
+#define mmDP_AUX0_AUX_DPHY_RX_CONTROL1 0x1f5b
+#define mmDP_AUX0_AUX_DPHY_RX_CONTROL1_BASE_IDX 2
+#define mmDP_AUX0_AUX_DPHY_TX_STATUS 0x1f5c
+#define mmDP_AUX0_AUX_DPHY_TX_STATUS_BASE_IDX 2
+#define mmDP_AUX0_AUX_DPHY_RX_STATUS 0x1f5d
+#define mmDP_AUX0_AUX_DPHY_RX_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dp_aux1_dispdec
+// base address: 0x70
+#define mmDP_AUX1_AUX_CONTROL 0x1f6c
+#define mmDP_AUX1_AUX_CONTROL_BASE_IDX 2
+#define mmDP_AUX1_AUX_SW_CONTROL 0x1f6d
+#define mmDP_AUX1_AUX_SW_CONTROL_BASE_IDX 2
+#define mmDP_AUX1_AUX_ARB_CONTROL 0x1f6e
+#define mmDP_AUX1_AUX_ARB_CONTROL_BASE_IDX 2
+#define mmDP_AUX1_AUX_INTERRUPT_CONTROL 0x1f6f
+#define mmDP_AUX1_AUX_INTERRUPT_CONTROL_BASE_IDX 2
+#define mmDP_AUX1_AUX_SW_STATUS 0x1f70
+#define mmDP_AUX1_AUX_SW_STATUS_BASE_IDX 2
+#define mmDP_AUX1_AUX_LS_STATUS 0x1f71
+#define mmDP_AUX1_AUX_LS_STATUS_BASE_IDX 2
+#define mmDP_AUX1_AUX_SW_DATA 0x1f72
+#define mmDP_AUX1_AUX_SW_DATA_BASE_IDX 2
+#define mmDP_AUX1_AUX_LS_DATA 0x1f73
+#define mmDP_AUX1_AUX_LS_DATA_BASE_IDX 2
+#define mmDP_AUX1_AUX_DPHY_TX_REF_CONTROL 0x1f74
+#define mmDP_AUX1_AUX_DPHY_TX_REF_CONTROL_BASE_IDX 2
+#define mmDP_AUX1_AUX_DPHY_TX_CONTROL 0x1f75
+#define mmDP_AUX1_AUX_DPHY_TX_CONTROL_BASE_IDX 2
+#define mmDP_AUX1_AUX_DPHY_RX_CONTROL0 0x1f76
+#define mmDP_AUX1_AUX_DPHY_RX_CONTROL0_BASE_IDX 2
+#define mmDP_AUX1_AUX_DPHY_RX_CONTROL1 0x1f77
+#define mmDP_AUX1_AUX_DPHY_RX_CONTROL1_BASE_IDX 2
+#define mmDP_AUX1_AUX_DPHY_TX_STATUS 0x1f78
+#define mmDP_AUX1_AUX_DPHY_TX_STATUS_BASE_IDX 2
+#define mmDP_AUX1_AUX_DPHY_RX_STATUS 0x1f79
+#define mmDP_AUX1_AUX_DPHY_RX_STATUS_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dig0_dispdec
+// base address: 0x0
+#define mmDIG0_DIG_FE_CNTL 0x2068
+#define mmDIG0_DIG_FE_CNTL_BASE_IDX 2
+#define mmDIG0_DIG_OUTPUT_CRC_CNTL 0x2069
+#define mmDIG0_DIG_OUTPUT_CRC_CNTL_BASE_IDX 2
+#define mmDIG0_DIG_OUTPUT_CRC_RESULT 0x206a
+#define mmDIG0_DIG_OUTPUT_CRC_RESULT_BASE_IDX 2
+#define mmDIG0_DIG_CLOCK_PATTERN 0x206b
+#define mmDIG0_DIG_CLOCK_PATTERN_BASE_IDX 2
+#define mmDIG0_DIG_TEST_PATTERN 0x206c
+#define mmDIG0_DIG_TEST_PATTERN_BASE_IDX 2
+#define mmDIG0_DIG_RANDOM_PATTERN_SEED 0x206d
+#define mmDIG0_DIG_RANDOM_PATTERN_SEED_BASE_IDX 2
+#define mmDIG0_DIG_FIFO_STATUS 0x206e
+#define mmDIG0_DIG_FIFO_STATUS_BASE_IDX 2
+#define mmDIG0_HDMI_METADATA_PACKET_CONTROL 0x206f
+#define mmDIG0_HDMI_METADATA_PACKET_CONTROL_BASE_IDX 2
+#define mmDIG0_HDMI_GENERIC_PACKET_CONTROL4 0x2070
+#define mmDIG0_HDMI_GENERIC_PACKET_CONTROL4_BASE_IDX 2
+#define mmDIG0_HDMI_CONTROL 0x2071
+#define mmDIG0_HDMI_CONTROL_BASE_IDX 2
+#define mmDIG0_HDMI_STATUS 0x2072
+#define mmDIG0_HDMI_STATUS_BASE_IDX 2
+#define mmDIG0_HDMI_AUDIO_PACKET_CONTROL 0x2073
+#define mmDIG0_HDMI_AUDIO_PACKET_CONTROL_BASE_IDX 2
+#define mmDIG0_HDMI_ACR_PACKET_CONTROL 0x2074
+#define mmDIG0_HDMI_ACR_PACKET_CONTROL_BASE_IDX 2
+#define mmDIG0_HDMI_VBI_PACKET_CONTROL 0x2075
+#define mmDIG0_HDMI_VBI_PACKET_CONTROL_BASE_IDX 2
+#define mmDIG0_HDMI_INFOFRAME_CONTROL0 0x2076
+#define mmDIG0_HDMI_INFOFRAME_CONTROL0_BASE_IDX 2
+#define mmDIG0_HDMI_INFOFRAME_CONTROL1 0x2077
+#define mmDIG0_HDMI_INFOFRAME_CONTROL1_BASE_IDX 2
+#define mmDIG0_HDMI_GENERIC_PACKET_CONTROL0 0x2078
+#define mmDIG0_HDMI_GENERIC_PACKET_CONTROL0_BASE_IDX 2
+#define mmDIG0_AFMT_INTERRUPT_STATUS 0x2079
+#define mmDIG0_AFMT_INTERRUPT_STATUS_BASE_IDX 2
+#define mmDIG0_HDMI_GC 0x207b
+#define mmDIG0_HDMI_GC_BASE_IDX 2
+#define mmDIG0_AFMT_AUDIO_PACKET_CONTROL2 0x207c
+#define mmDIG0_AFMT_AUDIO_PACKET_CONTROL2_BASE_IDX 2
+#define mmDIG0_AFMT_ISRC1_0 0x207d
+#define mmDIG0_AFMT_ISRC1_0_BASE_IDX 2
+#define mmDIG0_AFMT_ISRC1_1 0x207e
+#define mmDIG0_AFMT_ISRC1_1_BASE_IDX 2
+#define mmDIG0_AFMT_ISRC1_2 0x207f
+#define mmDIG0_AFMT_ISRC1_2_BASE_IDX 2
+#define mmDIG0_AFMT_ISRC1_3 0x2080
+#define mmDIG0_AFMT_ISRC1_3_BASE_IDX 2
+#define mmDIG0_AFMT_ISRC1_4 0x2081
+#define mmDIG0_AFMT_ISRC1_4_BASE_IDX 2
+#define mmDIG0_AFMT_ISRC2_0 0x2082
+#define mmDIG0_AFMT_ISRC2_0_BASE_IDX 2
+#define mmDIG0_AFMT_ISRC2_1 0x2083
+#define mmDIG0_AFMT_ISRC2_1_BASE_IDX 2
+#define mmDIG0_AFMT_ISRC2_2 0x2084
+#define mmDIG0_AFMT_ISRC2_2_BASE_IDX 2
+#define mmDIG0_AFMT_ISRC2_3 0x2085
+#define mmDIG0_AFMT_ISRC2_3_BASE_IDX 2
+#define mmDIG0_HDMI_GENERIC_PACKET_CONTROL2 0x2086
+#define mmDIG0_HDMI_GENERIC_PACKET_CONTROL2_BASE_IDX 2
+#define mmDIG0_HDMI_GENERIC_PACKET_CONTROL3 0x2087
+#define mmDIG0_HDMI_GENERIC_PACKET_CONTROL3_BASE_IDX 2
+#define mmDIG0_HDMI_DB_CONTROL 0x2088
+#define mmDIG0_HDMI_DB_CONTROL_BASE_IDX 2
+#define mmDIG0_DME_CONTROL 0x2089
+#define mmDIG0_DME_CONTROL_BASE_IDX 2
+#define mmDIG0_AFMT_MPEG_INFO0 0x208a
+#define mmDIG0_AFMT_MPEG_INFO0_BASE_IDX 2
+#define mmDIG0_AFMT_MPEG_INFO1 0x208b
+#define mmDIG0_AFMT_MPEG_INFO1_BASE_IDX 2
+#define mmDIG0_AFMT_GENERIC_HDR 0x208c
+#define mmDIG0_AFMT_GENERIC_HDR_BASE_IDX 2
+#define mmDIG0_AFMT_GENERIC_0 0x208d
+#define mmDIG0_AFMT_GENERIC_0_BASE_IDX 2
+#define mmDIG0_AFMT_GENERIC_1 0x208e
+#define mmDIG0_AFMT_GENERIC_1_BASE_IDX 2
+#define mmDIG0_AFMT_GENERIC_2 0x208f
+#define mmDIG0_AFMT_GENERIC_2_BASE_IDX 2
+#define mmDIG0_AFMT_GENERIC_3 0x2090
+#define mmDIG0_AFMT_GENERIC_3_BASE_IDX 2
+#define mmDIG0_AFMT_GENERIC_4 0x2091
+#define mmDIG0_AFMT_GENERIC_4_BASE_IDX 2
+#define mmDIG0_AFMT_GENERIC_5 0x2092
+#define mmDIG0_AFMT_GENERIC_5_BASE_IDX 2
+#define mmDIG0_AFMT_GENERIC_6 0x2093
+#define mmDIG0_AFMT_GENERIC_6_BASE_IDX 2
+#define mmDIG0_AFMT_GENERIC_7 0x2094
+#define mmDIG0_AFMT_GENERIC_7_BASE_IDX 2
+#define mmDIG0_HDMI_GENERIC_PACKET_CONTROL1 0x2095
+#define mmDIG0_HDMI_GENERIC_PACKET_CONTROL1_BASE_IDX 2
+#define mmDIG0_HDMI_ACR_32_0 0x2096
+#define mmDIG0_HDMI_ACR_32_0_BASE_IDX 2
+#define mmDIG0_HDMI_ACR_32_1 0x2097
+#define mmDIG0_HDMI_ACR_32_1_BASE_IDX 2
+#define mmDIG0_HDMI_ACR_44_0 0x2098
+#define mmDIG0_HDMI_ACR_44_0_BASE_IDX 2
+#define mmDIG0_HDMI_ACR_44_1 0x2099
+#define mmDIG0_HDMI_ACR_44_1_BASE_IDX 2
+#define mmDIG0_HDMI_ACR_48_0 0x209a
+#define mmDIG0_HDMI_ACR_48_0_BASE_IDX 2
+#define mmDIG0_HDMI_ACR_48_1 0x209b
+#define mmDIG0_HDMI_ACR_48_1_BASE_IDX 2
+#define mmDIG0_HDMI_ACR_STATUS_0 0x209c
+#define mmDIG0_HDMI_ACR_STATUS_0_BASE_IDX 2
+#define mmDIG0_HDMI_ACR_STATUS_1 0x209d
+#define mmDIG0_HDMI_ACR_STATUS_1_BASE_IDX 2
+#define mmDIG0_AFMT_AUDIO_INFO0 0x209e
+#define mmDIG0_AFMT_AUDIO_INFO0_BASE_IDX 2
+#define mmDIG0_AFMT_AUDIO_INFO1 0x209f
+#define mmDIG0_AFMT_AUDIO_INFO1_BASE_IDX 2
+#define mmDIG0_AFMT_60958_0 0x20a0
+#define mmDIG0_AFMT_60958_0_BASE_IDX 2
+#define mmDIG0_AFMT_60958_1 0x20a1
+#define mmDIG0_AFMT_60958_1_BASE_IDX 2
+#define mmDIG0_AFMT_AUDIO_CRC_CONTROL 0x20a2
+#define mmDIG0_AFMT_AUDIO_CRC_CONTROL_BASE_IDX 2
+#define mmDIG0_AFMT_RAMP_CONTROL0 0x20a3
+#define mmDIG0_AFMT_RAMP_CONTROL0_BASE_IDX 2
+#define mmDIG0_AFMT_RAMP_CONTROL1 0x20a4
+#define mmDIG0_AFMT_RAMP_CONTROL1_BASE_IDX 2
+#define mmDIG0_AFMT_RAMP_CONTROL2 0x20a5
+#define mmDIG0_AFMT_RAMP_CONTROL2_BASE_IDX 2
+#define mmDIG0_AFMT_RAMP_CONTROL3 0x20a6
+#define mmDIG0_AFMT_RAMP_CONTROL3_BASE_IDX 2
+#define mmDIG0_AFMT_60958_2 0x20a7
+#define mmDIG0_AFMT_60958_2_BASE_IDX 2
+#define mmDIG0_AFMT_AUDIO_CRC_RESULT 0x20a8
+#define mmDIG0_AFMT_AUDIO_CRC_RESULT_BASE_IDX 2
+#define mmDIG0_AFMT_STATUS 0x20a9
+#define mmDIG0_AFMT_STATUS_BASE_IDX 2
+#define mmDIG0_AFMT_AUDIO_PACKET_CONTROL 0x20aa
+#define mmDIG0_AFMT_AUDIO_PACKET_CONTROL_BASE_IDX 2
+#define mmDIG0_AFMT_VBI_PACKET_CONTROL 0x20ab
+#define mmDIG0_AFMT_VBI_PACKET_CONTROL_BASE_IDX 2
+#define mmDIG0_AFMT_INFOFRAME_CONTROL0 0x20ac
+#define mmDIG0_AFMT_INFOFRAME_CONTROL0_BASE_IDX 2
+#define mmDIG0_AFMT_AUDIO_SRC_CONTROL 0x20ad
+#define mmDIG0_AFMT_AUDIO_SRC_CONTROL_BASE_IDX 2
+#define mmDIG0_DIG_BE_CNTL 0x20af
+#define mmDIG0_DIG_BE_CNTL_BASE_IDX 2
+#define mmDIG0_DIG_BE_EN_CNTL 0x20b0
+#define mmDIG0_DIG_BE_EN_CNTL_BASE_IDX 2
+#define mmDIG0_TMDS_CNTL 0x20d3
+#define mmDIG0_TMDS_CNTL_BASE_IDX 2
+#define mmDIG0_TMDS_CONTROL_CHAR 0x20d4
+#define mmDIG0_TMDS_CONTROL_CHAR_BASE_IDX 2
+#define mmDIG0_TMDS_CONTROL0_FEEDBACK 0x20d5
+#define mmDIG0_TMDS_CONTROL0_FEEDBACK_BASE_IDX 2
+#define mmDIG0_TMDS_STEREOSYNC_CTL_SEL 0x20d6
+#define mmDIG0_TMDS_STEREOSYNC_CTL_SEL_BASE_IDX 2
+#define mmDIG0_TMDS_SYNC_CHAR_PATTERN_0_1 0x20d7
+#define mmDIG0_TMDS_SYNC_CHAR_PATTERN_0_1_BASE_IDX 2
+#define mmDIG0_TMDS_SYNC_CHAR_PATTERN_2_3 0x20d8
+#define mmDIG0_TMDS_SYNC_CHAR_PATTERN_2_3_BASE_IDX 2
+#define mmDIG0_TMDS_CTL_BITS 0x20da
+#define mmDIG0_TMDS_CTL_BITS_BASE_IDX 2
+#define mmDIG0_TMDS_DCBALANCER_CONTROL 0x20db
+#define mmDIG0_TMDS_DCBALANCER_CONTROL_BASE_IDX 2
+#define mmDIG0_TMDS_SYNC_DCBALANCE_CHAR 0x20dc
+#define mmDIG0_TMDS_SYNC_DCBALANCE_CHAR_BASE_IDX 2
+#define mmDIG0_TMDS_CTL0_1_GEN_CNTL 0x20dd
+#define mmDIG0_TMDS_CTL0_1_GEN_CNTL_BASE_IDX 2
+#define mmDIG0_TMDS_CTL2_3_GEN_CNTL 0x20de
+#define mmDIG0_TMDS_CTL2_3_GEN_CNTL_BASE_IDX 2
+#define mmDIG0_DIG_VERSION 0x20e0
+#define mmDIG0_DIG_VERSION_BASE_IDX 2
+#define mmDIG0_DIG_LANE_ENABLE 0x20e1
+#define mmDIG0_DIG_LANE_ENABLE_BASE_IDX 2
+#define mmDIG0_AFMT_CNTL 0x20e6
+#define mmDIG0_AFMT_CNTL_BASE_IDX 2
+#define mmDIG0_AFMT_VBI_PACKET_CONTROL1 0x20e7
+#define mmDIG0_AFMT_VBI_PACKET_CONTROL1_BASE_IDX 2
+#define mmDIG0_HDMI_GENERIC_PACKET_CONTROL5 0x20f6
+#define mmDIG0_HDMI_GENERIC_PACKET_CONTROL5_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dp0_dispdec
+// base address: 0x0
+#define mmDP0_DP_LINK_CNTL 0x2108
+#define mmDP0_DP_LINK_CNTL_BASE_IDX 2
+#define mmDP0_DP_PIXEL_FORMAT 0x2109
+#define mmDP0_DP_PIXEL_FORMAT_BASE_IDX 2
+#define mmDP0_DP_MSA_COLORIMETRY 0x210a
+#define mmDP0_DP_MSA_COLORIMETRY_BASE_IDX 2
+#define mmDP0_DP_CONFIG 0x210b
+#define mmDP0_DP_CONFIG_BASE_IDX 2
+#define mmDP0_DP_VID_STREAM_CNTL 0x210c
+#define mmDP0_DP_VID_STREAM_CNTL_BASE_IDX 2
+#define mmDP0_DP_STEER_FIFO 0x210d
+#define mmDP0_DP_STEER_FIFO_BASE_IDX 2
+#define mmDP0_DP_MSA_MISC 0x210e
+#define mmDP0_DP_MSA_MISC_BASE_IDX 2
+#define mmDP0_DP_VID_TIMING 0x2110
+#define mmDP0_DP_VID_TIMING_BASE_IDX 2
+#define mmDP0_DP_VID_N 0x2111
+#define mmDP0_DP_VID_N_BASE_IDX 2
+#define mmDP0_DP_VID_M 0x2112
+#define mmDP0_DP_VID_M_BASE_IDX 2
+#define mmDP0_DP_LINK_FRAMING_CNTL 0x2113
+#define mmDP0_DP_LINK_FRAMING_CNTL_BASE_IDX 2
+#define mmDP0_DP_HBR2_EYE_PATTERN 0x2114
+#define mmDP0_DP_HBR2_EYE_PATTERN_BASE_IDX 2
+#define mmDP0_DP_VID_MSA_VBID 0x2115
+#define mmDP0_DP_VID_MSA_VBID_BASE_IDX 2
+#define mmDP0_DP_VID_INTERRUPT_CNTL 0x2116
+#define mmDP0_DP_VID_INTERRUPT_CNTL_BASE_IDX 2
+#define mmDP0_DP_DPHY_CNTL 0x2117
+#define mmDP0_DP_DPHY_CNTL_BASE_IDX 2
+#define mmDP0_DP_DPHY_TRAINING_PATTERN_SEL 0x2118
+#define mmDP0_DP_DPHY_TRAINING_PATTERN_SEL_BASE_IDX 2
+#define mmDP0_DP_DPHY_SYM0 0x2119
+#define mmDP0_DP_DPHY_SYM0_BASE_IDX 2
+#define mmDP0_DP_DPHY_SYM1 0x211a
+#define mmDP0_DP_DPHY_SYM1_BASE_IDX 2
+#define mmDP0_DP_DPHY_SYM2 0x211b
+#define mmDP0_DP_DPHY_SYM2_BASE_IDX 2
+#define mmDP0_DP_DPHY_8B10B_CNTL 0x211c
+#define mmDP0_DP_DPHY_8B10B_CNTL_BASE_IDX 2
+#define mmDP0_DP_DPHY_PRBS_CNTL 0x211d
+#define mmDP0_DP_DPHY_PRBS_CNTL_BASE_IDX 2
+#define mmDP0_DP_DPHY_SCRAM_CNTL 0x211e
+#define mmDP0_DP_DPHY_SCRAM_CNTL_BASE_IDX 2
+#define mmDP0_DP_DPHY_CRC_EN 0x211f
+#define mmDP0_DP_DPHY_CRC_EN_BASE_IDX 2
+#define mmDP0_DP_DPHY_CRC_CNTL 0x2120
+#define mmDP0_DP_DPHY_CRC_CNTL_BASE_IDX 2
+#define mmDP0_DP_DPHY_CRC_RESULT 0x2121
+#define mmDP0_DP_DPHY_CRC_RESULT_BASE_IDX 2
+#define mmDP0_DP_DPHY_CRC_MST_CNTL 0x2122
+#define mmDP0_DP_DPHY_CRC_MST_CNTL_BASE_IDX 2
+#define mmDP0_DP_DPHY_CRC_MST_STATUS 0x2123
+#define mmDP0_DP_DPHY_CRC_MST_STATUS_BASE_IDX 2
+#define mmDP0_DP_DPHY_FAST_TRAINING 0x2124
+#define mmDP0_DP_DPHY_FAST_TRAINING_BASE_IDX 2
+#define mmDP0_DP_DPHY_FAST_TRAINING_STATUS 0x2125
+#define mmDP0_DP_DPHY_FAST_TRAINING_STATUS_BASE_IDX 2
+#define mmDP0_DP_SEC_CNTL 0x212b
+#define mmDP0_DP_SEC_CNTL_BASE_IDX 2
+#define mmDP0_DP_SEC_CNTL1 0x212c
+#define mmDP0_DP_SEC_CNTL1_BASE_IDX 2
+#define mmDP0_DP_SEC_FRAMING1 0x212d
+#define mmDP0_DP_SEC_FRAMING1_BASE_IDX 2
+#define mmDP0_DP_SEC_FRAMING2 0x212e
+#define mmDP0_DP_SEC_FRAMING2_BASE_IDX 2
+#define mmDP0_DP_SEC_FRAMING3 0x212f
+#define mmDP0_DP_SEC_FRAMING3_BASE_IDX 2
+#define mmDP0_DP_SEC_FRAMING4 0x2130
+#define mmDP0_DP_SEC_FRAMING4_BASE_IDX 2
+#define mmDP0_DP_SEC_AUD_N 0x2131
+#define mmDP0_DP_SEC_AUD_N_BASE_IDX 2
+#define mmDP0_DP_SEC_AUD_N_READBACK 0x2132
+#define mmDP0_DP_SEC_AUD_N_READBACK_BASE_IDX 2
+#define mmDP0_DP_SEC_AUD_M 0x2133
+#define mmDP0_DP_SEC_AUD_M_BASE_IDX 2
+#define mmDP0_DP_SEC_AUD_M_READBACK 0x2134
+#define mmDP0_DP_SEC_AUD_M_READBACK_BASE_IDX 2
+#define mmDP0_DP_SEC_TIMESTAMP 0x2135
+#define mmDP0_DP_SEC_TIMESTAMP_BASE_IDX 2
+#define mmDP0_DP_SEC_PACKET_CNTL 0x2136
+#define mmDP0_DP_SEC_PACKET_CNTL_BASE_IDX 2
+#define mmDP0_DP_MSE_RATE_CNTL 0x2137
+#define mmDP0_DP_MSE_RATE_CNTL_BASE_IDX 2
+#define mmDP0_DP_MSE_RATE_UPDATE 0x2139
+#define mmDP0_DP_MSE_RATE_UPDATE_BASE_IDX 2
+#define mmDP0_DP_MSE_SAT0 0x213a
+#define mmDP0_DP_MSE_SAT0_BASE_IDX 2
+#define mmDP0_DP_MSE_SAT1 0x213b
+#define mmDP0_DP_MSE_SAT1_BASE_IDX 2
+#define mmDP0_DP_MSE_SAT2 0x213c
+#define mmDP0_DP_MSE_SAT2_BASE_IDX 2
+#define mmDP0_DP_MSE_SAT_UPDATE 0x213d
+#define mmDP0_DP_MSE_SAT_UPDATE_BASE_IDX 2
+#define mmDP0_DP_MSE_LINK_TIMING 0x213e
+#define mmDP0_DP_MSE_LINK_TIMING_BASE_IDX 2
+#define mmDP0_DP_MSE_MISC_CNTL 0x213f
+#define mmDP0_DP_MSE_MISC_CNTL_BASE_IDX 2
+#define mmDP0_DP_DPHY_BS_SR_SWAP_CNTL 0x2144
+#define mmDP0_DP_DPHY_BS_SR_SWAP_CNTL_BASE_IDX 2
+#define mmDP0_DP_DPHY_HBR2_PATTERN_CONTROL 0x2145
+#define mmDP0_DP_DPHY_HBR2_PATTERN_CONTROL_BASE_IDX 2
+#define mmDP0_DP_MSE_SAT0_STATUS 0x2147
+#define mmDP0_DP_MSE_SAT0_STATUS_BASE_IDX 2
+#define mmDP0_DP_MSE_SAT1_STATUS 0x2148
+#define mmDP0_DP_MSE_SAT1_STATUS_BASE_IDX 2
+#define mmDP0_DP_MSE_SAT2_STATUS 0x2149
+#define mmDP0_DP_MSE_SAT2_STATUS_BASE_IDX 2
+#define mmDP0_DP_MSA_TIMING_PARAM1 0x214c
+#define mmDP0_DP_MSA_TIMING_PARAM1_BASE_IDX 2
+#define mmDP0_DP_MSA_TIMING_PARAM2 0x214d
+#define mmDP0_DP_MSA_TIMING_PARAM2_BASE_IDX 2
+#define mmDP0_DP_MSA_TIMING_PARAM3 0x214e
+#define mmDP0_DP_MSA_TIMING_PARAM3_BASE_IDX 2
+#define mmDP0_DP_MSA_TIMING_PARAM4 0x214f
+#define mmDP0_DP_MSA_TIMING_PARAM4_BASE_IDX 2
+#define mmDP0_DP_DSC_CNTL 0x2152
+#define mmDP0_DP_DSC_CNTL_BASE_IDX 2
+#define mmDP0_DP_SEC_CNTL2 0x2153
+#define mmDP0_DP_SEC_CNTL2_BASE_IDX 2
+#define mmDP0_DP_SEC_CNTL3 0x2154
+#define mmDP0_DP_SEC_CNTL3_BASE_IDX 2
+#define mmDP0_DP_SEC_CNTL4 0x2155
+#define mmDP0_DP_SEC_CNTL4_BASE_IDX 2
+#define mmDP0_DP_SEC_CNTL5 0x2156
+#define mmDP0_DP_SEC_CNTL5_BASE_IDX 2
+#define mmDP0_DP_SEC_CNTL6 0x2157
+#define mmDP0_DP_SEC_CNTL6_BASE_IDX 2
+#define mmDP0_DP_SEC_CNTL7 0x2158
+#define mmDP0_DP_SEC_CNTL7_BASE_IDX 2
+#define mmDP0_DP_DB_CNTL 0x2159
+#define mmDP0_DP_DB_CNTL_BASE_IDX 2
+#define mmDP0_DP_MSA_VBID_MISC 0x215a
+#define mmDP0_DP_MSA_VBID_MISC_BASE_IDX 2
+#define mmDP0_DP_SEC_METADATA_TRANSMISSION 0x215b
+#define mmDP0_DP_SEC_METADATA_TRANSMISSION_BASE_IDX 2
+#define mmDP0_DP_DSC_BYTES_PER_PIXEL 0x215c
+#define mmDP0_DP_DSC_BYTES_PER_PIXEL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dig1_dispdec
+// base address: 0x400
+#define mmDIG1_DIG_FE_CNTL 0x2168
+#define mmDIG1_DIG_FE_CNTL_BASE_IDX 2
+#define mmDIG1_DIG_OUTPUT_CRC_CNTL 0x2169
+#define mmDIG1_DIG_OUTPUT_CRC_CNTL_BASE_IDX 2
+#define mmDIG1_DIG_OUTPUT_CRC_RESULT 0x216a
+#define mmDIG1_DIG_OUTPUT_CRC_RESULT_BASE_IDX 2
+#define mmDIG1_DIG_CLOCK_PATTERN 0x216b
+#define mmDIG1_DIG_CLOCK_PATTERN_BASE_IDX 2
+#define mmDIG1_DIG_TEST_PATTERN 0x216c
+#define mmDIG1_DIG_TEST_PATTERN_BASE_IDX 2
+#define mmDIG1_DIG_RANDOM_PATTERN_SEED 0x216d
+#define mmDIG1_DIG_RANDOM_PATTERN_SEED_BASE_IDX 2
+#define mmDIG1_DIG_FIFO_STATUS 0x216e
+#define mmDIG1_DIG_FIFO_STATUS_BASE_IDX 2
+#define mmDIG1_HDMI_METADATA_PACKET_CONTROL 0x216f
+#define mmDIG1_HDMI_METADATA_PACKET_CONTROL_BASE_IDX 2
+#define mmDIG1_HDMI_GENERIC_PACKET_CONTROL4 0x2170
+#define mmDIG1_HDMI_GENERIC_PACKET_CONTROL4_BASE_IDX 2
+#define mmDIG1_HDMI_CONTROL 0x2171
+#define mmDIG1_HDMI_CONTROL_BASE_IDX 2
+#define mmDIG1_HDMI_STATUS 0x2172
+#define mmDIG1_HDMI_STATUS_BASE_IDX 2
+#define mmDIG1_HDMI_AUDIO_PACKET_CONTROL 0x2173
+#define mmDIG1_HDMI_AUDIO_PACKET_CONTROL_BASE_IDX 2
+#define mmDIG1_HDMI_ACR_PACKET_CONTROL 0x2174
+#define mmDIG1_HDMI_ACR_PACKET_CONTROL_BASE_IDX 2
+#define mmDIG1_HDMI_VBI_PACKET_CONTROL 0x2175
+#define mmDIG1_HDMI_VBI_PACKET_CONTROL_BASE_IDX 2
+#define mmDIG1_HDMI_INFOFRAME_CONTROL0 0x2176
+#define mmDIG1_HDMI_INFOFRAME_CONTROL0_BASE_IDX 2
+#define mmDIG1_HDMI_INFOFRAME_CONTROL1 0x2177
+#define mmDIG1_HDMI_INFOFRAME_CONTROL1_BASE_IDX 2
+#define mmDIG1_HDMI_GENERIC_PACKET_CONTROL0 0x2178
+#define mmDIG1_HDMI_GENERIC_PACKET_CONTROL0_BASE_IDX 2
+#define mmDIG1_AFMT_INTERRUPT_STATUS 0x2179
+#define mmDIG1_AFMT_INTERRUPT_STATUS_BASE_IDX 2
+#define mmDIG1_HDMI_GC 0x217b
+#define mmDIG1_HDMI_GC_BASE_IDX 2
+#define mmDIG1_AFMT_AUDIO_PACKET_CONTROL2 0x217c
+#define mmDIG1_AFMT_AUDIO_PACKET_CONTROL2_BASE_IDX 2
+#define mmDIG1_AFMT_ISRC1_0 0x217d
+#define mmDIG1_AFMT_ISRC1_0_BASE_IDX 2
+#define mmDIG1_AFMT_ISRC1_1 0x217e
+#define mmDIG1_AFMT_ISRC1_1_BASE_IDX 2
+#define mmDIG1_AFMT_ISRC1_2 0x217f
+#define mmDIG1_AFMT_ISRC1_2_BASE_IDX 2
+#define mmDIG1_AFMT_ISRC1_3 0x2180
+#define mmDIG1_AFMT_ISRC1_3_BASE_IDX 2
+#define mmDIG1_AFMT_ISRC1_4 0x2181
+#define mmDIG1_AFMT_ISRC1_4_BASE_IDX 2
+#define mmDIG1_AFMT_ISRC2_0 0x2182
+#define mmDIG1_AFMT_ISRC2_0_BASE_IDX 2
+#define mmDIG1_AFMT_ISRC2_1 0x2183
+#define mmDIG1_AFMT_ISRC2_1_BASE_IDX 2
+#define mmDIG1_AFMT_ISRC2_2 0x2184
+#define mmDIG1_AFMT_ISRC2_2_BASE_IDX 2
+#define mmDIG1_AFMT_ISRC2_3 0x2185
+#define mmDIG1_AFMT_ISRC2_3_BASE_IDX 2
+#define mmDIG1_HDMI_GENERIC_PACKET_CONTROL2 0x2186
+#define mmDIG1_HDMI_GENERIC_PACKET_CONTROL2_BASE_IDX 2
+#define mmDIG1_HDMI_GENERIC_PACKET_CONTROL3 0x2187
+#define mmDIG1_HDMI_GENERIC_PACKET_CONTROL3_BASE_IDX 2
+#define mmDIG1_HDMI_DB_CONTROL 0x2188
+#define mmDIG1_HDMI_DB_CONTROL_BASE_IDX 2
+#define mmDIG1_DME_CONTROL 0x2189
+#define mmDIG1_DME_CONTROL_BASE_IDX 2
+#define mmDIG1_AFMT_MPEG_INFO0 0x218a
+#define mmDIG1_AFMT_MPEG_INFO0_BASE_IDX 2
+#define mmDIG1_AFMT_MPEG_INFO1 0x218b
+#define mmDIG1_AFMT_MPEG_INFO1_BASE_IDX 2
+#define mmDIG1_AFMT_GENERIC_HDR 0x218c
+#define mmDIG1_AFMT_GENERIC_HDR_BASE_IDX 2
+#define mmDIG1_AFMT_GENERIC_0 0x218d
+#define mmDIG1_AFMT_GENERIC_0_BASE_IDX 2
+#define mmDIG1_AFMT_GENERIC_1 0x218e
+#define mmDIG1_AFMT_GENERIC_1_BASE_IDX 2
+#define mmDIG1_AFMT_GENERIC_2 0x218f
+#define mmDIG1_AFMT_GENERIC_2_BASE_IDX 2
+#define mmDIG1_AFMT_GENERIC_3 0x2190
+#define mmDIG1_AFMT_GENERIC_3_BASE_IDX 2
+#define mmDIG1_AFMT_GENERIC_4 0x2191
+#define mmDIG1_AFMT_GENERIC_4_BASE_IDX 2
+#define mmDIG1_AFMT_GENERIC_5 0x2192
+#define mmDIG1_AFMT_GENERIC_5_BASE_IDX 2
+#define mmDIG1_AFMT_GENERIC_6 0x2193
+#define mmDIG1_AFMT_GENERIC_6_BASE_IDX 2
+#define mmDIG1_AFMT_GENERIC_7 0x2194
+#define mmDIG1_AFMT_GENERIC_7_BASE_IDX 2
+#define mmDIG1_HDMI_GENERIC_PACKET_CONTROL1 0x2195
+#define mmDIG1_HDMI_GENERIC_PACKET_CONTROL1_BASE_IDX 2
+#define mmDIG1_HDMI_ACR_32_0 0x2196
+#define mmDIG1_HDMI_ACR_32_0_BASE_IDX 2
+#define mmDIG1_HDMI_ACR_32_1 0x2197
+#define mmDIG1_HDMI_ACR_32_1_BASE_IDX 2
+#define mmDIG1_HDMI_ACR_44_0 0x2198
+#define mmDIG1_HDMI_ACR_44_0_BASE_IDX 2
+#define mmDIG1_HDMI_ACR_44_1 0x2199
+#define mmDIG1_HDMI_ACR_44_1_BASE_IDX 2
+#define mmDIG1_HDMI_ACR_48_0 0x219a
+#define mmDIG1_HDMI_ACR_48_0_BASE_IDX 2
+#define mmDIG1_HDMI_ACR_48_1 0x219b
+#define mmDIG1_HDMI_ACR_48_1_BASE_IDX 2
+#define mmDIG1_HDMI_ACR_STATUS_0 0x219c
+#define mmDIG1_HDMI_ACR_STATUS_0_BASE_IDX 2
+#define mmDIG1_HDMI_ACR_STATUS_1 0x219d
+#define mmDIG1_HDMI_ACR_STATUS_1_BASE_IDX 2
+#define mmDIG1_AFMT_AUDIO_INFO0 0x219e
+#define mmDIG1_AFMT_AUDIO_INFO0_BASE_IDX 2
+#define mmDIG1_AFMT_AUDIO_INFO1 0x219f
+#define mmDIG1_AFMT_AUDIO_INFO1_BASE_IDX 2
+#define mmDIG1_AFMT_60958_0 0x21a0
+#define mmDIG1_AFMT_60958_0_BASE_IDX 2
+#define mmDIG1_AFMT_60958_1 0x21a1
+#define mmDIG1_AFMT_60958_1_BASE_IDX 2
+#define mmDIG1_AFMT_AUDIO_CRC_CONTROL 0x21a2
+#define mmDIG1_AFMT_AUDIO_CRC_CONTROL_BASE_IDX 2
+#define mmDIG1_AFMT_RAMP_CONTROL0 0x21a3
+#define mmDIG1_AFMT_RAMP_CONTROL0_BASE_IDX 2
+#define mmDIG1_AFMT_RAMP_CONTROL1 0x21a4
+#define mmDIG1_AFMT_RAMP_CONTROL1_BASE_IDX 2
+#define mmDIG1_AFMT_RAMP_CONTROL2 0x21a5
+#define mmDIG1_AFMT_RAMP_CONTROL2_BASE_IDX 2
+#define mmDIG1_AFMT_RAMP_CONTROL3 0x21a6
+#define mmDIG1_AFMT_RAMP_CONTROL3_BASE_IDX 2
+#define mmDIG1_AFMT_60958_2 0x21a7
+#define mmDIG1_AFMT_60958_2_BASE_IDX 2
+#define mmDIG1_AFMT_AUDIO_CRC_RESULT 0x21a8
+#define mmDIG1_AFMT_AUDIO_CRC_RESULT_BASE_IDX 2
+#define mmDIG1_AFMT_STATUS 0x21a9
+#define mmDIG1_AFMT_STATUS_BASE_IDX 2
+#define mmDIG1_AFMT_AUDIO_PACKET_CONTROL 0x21aa
+#define mmDIG1_AFMT_AUDIO_PACKET_CONTROL_BASE_IDX 2
+#define mmDIG1_AFMT_VBI_PACKET_CONTROL 0x21ab
+#define mmDIG1_AFMT_VBI_PACKET_CONTROL_BASE_IDX 2
+#define mmDIG1_AFMT_INFOFRAME_CONTROL0 0x21ac
+#define mmDIG1_AFMT_INFOFRAME_CONTROL0_BASE_IDX 2
+#define mmDIG1_AFMT_AUDIO_SRC_CONTROL 0x21ad
+#define mmDIG1_AFMT_AUDIO_SRC_CONTROL_BASE_IDX 2
+#define mmDIG1_DIG_BE_CNTL 0x21af
+#define mmDIG1_DIG_BE_CNTL_BASE_IDX 2
+#define mmDIG1_DIG_BE_EN_CNTL 0x21b0
+#define mmDIG1_DIG_BE_EN_CNTL_BASE_IDX 2
+#define mmDIG1_TMDS_CNTL 0x21d3
+#define mmDIG1_TMDS_CNTL_BASE_IDX 2
+#define mmDIG1_TMDS_CONTROL_CHAR 0x21d4
+#define mmDIG1_TMDS_CONTROL_CHAR_BASE_IDX 2
+#define mmDIG1_TMDS_CONTROL0_FEEDBACK 0x21d5
+#define mmDIG1_TMDS_CONTROL0_FEEDBACK_BASE_IDX 2
+#define mmDIG1_TMDS_STEREOSYNC_CTL_SEL 0x21d6
+#define mmDIG1_TMDS_STEREOSYNC_CTL_SEL_BASE_IDX 2
+#define mmDIG1_TMDS_SYNC_CHAR_PATTERN_0_1 0x21d7
+#define mmDIG1_TMDS_SYNC_CHAR_PATTERN_0_1_BASE_IDX 2
+#define mmDIG1_TMDS_SYNC_CHAR_PATTERN_2_3 0x21d8
+#define mmDIG1_TMDS_SYNC_CHAR_PATTERN_2_3_BASE_IDX 2
+#define mmDIG1_TMDS_CTL_BITS 0x21da
+#define mmDIG1_TMDS_CTL_BITS_BASE_IDX 2
+#define mmDIG1_TMDS_DCBALANCER_CONTROL 0x21db
+#define mmDIG1_TMDS_DCBALANCER_CONTROL_BASE_IDX 2
+#define mmDIG1_TMDS_SYNC_DCBALANCE_CHAR 0x21dc
+#define mmDIG1_TMDS_SYNC_DCBALANCE_CHAR_BASE_IDX 2
+#define mmDIG1_TMDS_CTL0_1_GEN_CNTL 0x21dd
+#define mmDIG1_TMDS_CTL0_1_GEN_CNTL_BASE_IDX 2
+#define mmDIG1_TMDS_CTL2_3_GEN_CNTL 0x21de
+#define mmDIG1_TMDS_CTL2_3_GEN_CNTL_BASE_IDX 2
+#define mmDIG1_DIG_VERSION 0x21e0
+#define mmDIG1_DIG_VERSION_BASE_IDX 2
+#define mmDIG1_DIG_LANE_ENABLE 0x21e1
+#define mmDIG1_DIG_LANE_ENABLE_BASE_IDX 2
+#define mmDIG1_AFMT_CNTL 0x21e6
+#define mmDIG1_AFMT_CNTL_BASE_IDX 2
+#define mmDIG1_AFMT_VBI_PACKET_CONTROL1 0x21e7
+#define mmDIG1_AFMT_VBI_PACKET_CONTROL1_BASE_IDX 2
+#define mmDIG1_HDMI_GENERIC_PACKET_CONTROL5 0x21f6
+#define mmDIG1_HDMI_GENERIC_PACKET_CONTROL5_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dio_dp1_dispdec
+// base address: 0x400
+#define mmDP1_DP_LINK_CNTL 0x2208
+#define mmDP1_DP_LINK_CNTL_BASE_IDX 2
+#define mmDP1_DP_PIXEL_FORMAT 0x2209
+#define mmDP1_DP_PIXEL_FORMAT_BASE_IDX 2
+#define mmDP1_DP_MSA_COLORIMETRY 0x220a
+#define mmDP1_DP_MSA_COLORIMETRY_BASE_IDX 2
+#define mmDP1_DP_CONFIG 0x220b
+#define mmDP1_DP_CONFIG_BASE_IDX 2
+#define mmDP1_DP_VID_STREAM_CNTL 0x220c
+#define mmDP1_DP_VID_STREAM_CNTL_BASE_IDX 2
+#define mmDP1_DP_STEER_FIFO 0x220d
+#define mmDP1_DP_STEER_FIFO_BASE_IDX 2
+#define mmDP1_DP_MSA_MISC 0x220e
+#define mmDP1_DP_MSA_MISC_BASE_IDX 2
+#define mmDP1_DP_VID_TIMING 0x2210
+#define mmDP1_DP_VID_TIMING_BASE_IDX 2
+#define mmDP1_DP_VID_N 0x2211
+#define mmDP1_DP_VID_N_BASE_IDX 2
+#define mmDP1_DP_VID_M 0x2212
+#define mmDP1_DP_VID_M_BASE_IDX 2
+#define mmDP1_DP_LINK_FRAMING_CNTL 0x2213
+#define mmDP1_DP_LINK_FRAMING_CNTL_BASE_IDX 2
+#define mmDP1_DP_HBR2_EYE_PATTERN 0x2214
+#define mmDP1_DP_HBR2_EYE_PATTERN_BASE_IDX 2
+#define mmDP1_DP_VID_MSA_VBID 0x2215
+#define mmDP1_DP_VID_MSA_VBID_BASE_IDX 2
+#define mmDP1_DP_VID_INTERRUPT_CNTL 0x2216
+#define mmDP1_DP_VID_INTERRUPT_CNTL_BASE_IDX 2
+#define mmDP1_DP_DPHY_CNTL 0x2217
+#define mmDP1_DP_DPHY_CNTL_BASE_IDX 2
+#define mmDP1_DP_DPHY_TRAINING_PATTERN_SEL 0x2218
+#define mmDP1_DP_DPHY_TRAINING_PATTERN_SEL_BASE_IDX 2
+#define mmDP1_DP_DPHY_SYM0 0x2219
+#define mmDP1_DP_DPHY_SYM0_BASE_IDX 2
+#define mmDP1_DP_DPHY_SYM1 0x221a
+#define mmDP1_DP_DPHY_SYM1_BASE_IDX 2
+#define mmDP1_DP_DPHY_SYM2 0x221b
+#define mmDP1_DP_DPHY_SYM2_BASE_IDX 2
+#define mmDP1_DP_DPHY_8B10B_CNTL 0x221c
+#define mmDP1_DP_DPHY_8B10B_CNTL_BASE_IDX 2
+#define mmDP1_DP_DPHY_PRBS_CNTL 0x221d
+#define mmDP1_DP_DPHY_PRBS_CNTL_BASE_IDX 2
+#define mmDP1_DP_DPHY_SCRAM_CNTL 0x221e
+#define mmDP1_DP_DPHY_SCRAM_CNTL_BASE_IDX 2
+#define mmDP1_DP_DPHY_CRC_EN 0x221f
+#define mmDP1_DP_DPHY_CRC_EN_BASE_IDX 2
+#define mmDP1_DP_DPHY_CRC_CNTL 0x2220
+#define mmDP1_DP_DPHY_CRC_CNTL_BASE_IDX 2
+#define mmDP1_DP_DPHY_CRC_RESULT 0x2221
+#define mmDP1_DP_DPHY_CRC_RESULT_BASE_IDX 2
+#define mmDP1_DP_DPHY_CRC_MST_CNTL 0x2222
+#define mmDP1_DP_DPHY_CRC_MST_CNTL_BASE_IDX 2
+#define mmDP1_DP_DPHY_CRC_MST_STATUS 0x2223
+#define mmDP1_DP_DPHY_CRC_MST_STATUS_BASE_IDX 2
+#define mmDP1_DP_DPHY_FAST_TRAINING 0x2224
+#define mmDP1_DP_DPHY_FAST_TRAINING_BASE_IDX 2
+#define mmDP1_DP_DPHY_FAST_TRAINING_STATUS 0x2225
+#define mmDP1_DP_DPHY_FAST_TRAINING_STATUS_BASE_IDX 2
+#define mmDP1_DP_SEC_CNTL 0x222b
+#define mmDP1_DP_SEC_CNTL_BASE_IDX 2
+#define mmDP1_DP_SEC_CNTL1 0x222c
+#define mmDP1_DP_SEC_CNTL1_BASE_IDX 2
+#define mmDP1_DP_SEC_FRAMING1 0x222d
+#define mmDP1_DP_SEC_FRAMING1_BASE_IDX 2
+#define mmDP1_DP_SEC_FRAMING2 0x222e
+#define mmDP1_DP_SEC_FRAMING2_BASE_IDX 2
+#define mmDP1_DP_SEC_FRAMING3 0x222f
+#define mmDP1_DP_SEC_FRAMING3_BASE_IDX 2
+#define mmDP1_DP_SEC_FRAMING4 0x2230
+#define mmDP1_DP_SEC_FRAMING4_BASE_IDX 2
+#define mmDP1_DP_SEC_AUD_N 0x2231
+#define mmDP1_DP_SEC_AUD_N_BASE_IDX 2
+#define mmDP1_DP_SEC_AUD_N_READBACK 0x2232
+#define mmDP1_DP_SEC_AUD_N_READBACK_BASE_IDX 2
+#define mmDP1_DP_SEC_AUD_M 0x2233
+#define mmDP1_DP_SEC_AUD_M_BASE_IDX 2
+#define mmDP1_DP_SEC_AUD_M_READBACK 0x2234
+#define mmDP1_DP_SEC_AUD_M_READBACK_BASE_IDX 2
+#define mmDP1_DP_SEC_TIMESTAMP 0x2235
+#define mmDP1_DP_SEC_TIMESTAMP_BASE_IDX 2
+#define mmDP1_DP_SEC_PACKET_CNTL 0x2236
+#define mmDP1_DP_SEC_PACKET_CNTL_BASE_IDX 2
+#define mmDP1_DP_MSE_RATE_CNTL 0x2237
+#define mmDP1_DP_MSE_RATE_CNTL_BASE_IDX 2
+#define mmDP1_DP_MSE_RATE_UPDATE 0x2239
+#define mmDP1_DP_MSE_RATE_UPDATE_BASE_IDX 2
+#define mmDP1_DP_MSE_SAT0 0x223a
+#define mmDP1_DP_MSE_SAT0_BASE_IDX 2
+#define mmDP1_DP_MSE_SAT1 0x223b
+#define mmDP1_DP_MSE_SAT1_BASE_IDX 2
+#define mmDP1_DP_MSE_SAT2 0x223c
+#define mmDP1_DP_MSE_SAT2_BASE_IDX 2
+#define mmDP1_DP_MSE_SAT_UPDATE 0x223d
+#define mmDP1_DP_MSE_SAT_UPDATE_BASE_IDX 2
+#define mmDP1_DP_MSE_LINK_TIMING 0x223e
+#define mmDP1_DP_MSE_LINK_TIMING_BASE_IDX 2
+#define mmDP1_DP_MSE_MISC_CNTL 0x223f
+#define mmDP1_DP_MSE_MISC_CNTL_BASE_IDX 2
+#define mmDP1_DP_DPHY_BS_SR_SWAP_CNTL 0x2244
+#define mmDP1_DP_DPHY_BS_SR_SWAP_CNTL_BASE_IDX 2
+#define mmDP1_DP_DPHY_HBR2_PATTERN_CONTROL 0x2245
+#define mmDP1_DP_DPHY_HBR2_PATTERN_CONTROL_BASE_IDX 2
+#define mmDP1_DP_MSE_SAT0_STATUS 0x2247
+#define mmDP1_DP_MSE_SAT0_STATUS_BASE_IDX 2
+#define mmDP1_DP_MSE_SAT1_STATUS 0x2248
+#define mmDP1_DP_MSE_SAT1_STATUS_BASE_IDX 2
+#define mmDP1_DP_MSE_SAT2_STATUS 0x2249
+#define mmDP1_DP_MSE_SAT2_STATUS_BASE_IDX 2
+#define mmDP1_DP_MSA_TIMING_PARAM1 0x224c
+#define mmDP1_DP_MSA_TIMING_PARAM1_BASE_IDX 2
+#define mmDP1_DP_MSA_TIMING_PARAM2 0x224d
+#define mmDP1_DP_MSA_TIMING_PARAM2_BASE_IDX 2
+#define mmDP1_DP_MSA_TIMING_PARAM3 0x224e
+#define mmDP1_DP_MSA_TIMING_PARAM3_BASE_IDX 2
+#define mmDP1_DP_MSA_TIMING_PARAM4 0x224f
+#define mmDP1_DP_MSA_TIMING_PARAM4_BASE_IDX 2
+#define mmDP1_DP_DSC_CNTL 0x2252
+#define mmDP1_DP_DSC_CNTL_BASE_IDX 2
+#define mmDP1_DP_SEC_CNTL2 0x2253
+#define mmDP1_DP_SEC_CNTL2_BASE_IDX 2
+#define mmDP1_DP_SEC_CNTL3 0x2254
+#define mmDP1_DP_SEC_CNTL3_BASE_IDX 2
+#define mmDP1_DP_SEC_CNTL4 0x2255
+#define mmDP1_DP_SEC_CNTL4_BASE_IDX 2
+#define mmDP1_DP_SEC_CNTL5 0x2256
+#define mmDP1_DP_SEC_CNTL5_BASE_IDX 2
+#define mmDP1_DP_SEC_CNTL6 0x2257
+#define mmDP1_DP_SEC_CNTL6_BASE_IDX 2
+#define mmDP1_DP_SEC_CNTL7 0x2258
+#define mmDP1_DP_SEC_CNTL7_BASE_IDX 2
+#define mmDP1_DP_DB_CNTL 0x2259
+#define mmDP1_DP_DB_CNTL_BASE_IDX 2
+#define mmDP1_DP_MSA_VBID_MISC 0x225a
+#define mmDP1_DP_MSA_VBID_MISC_BASE_IDX 2
+#define mmDP1_DP_SEC_METADATA_TRANSMISSION 0x225b
+#define mmDP1_DP_SEC_METADATA_TRANSMISSION_BASE_IDX 2
+#define mmDP1_DP_DSC_BYTES_PER_PIXEL 0x225c
+#define mmDP1_DP_DSC_BYTES_PER_PIXEL_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcio_dcio_dispdec
+// base address: 0x0
+#define mmDC_GENERICA 0x2868
+#define mmDC_GENERICA_BASE_IDX 2
+#define mmUNIPHYA_LINK_CNTL 0x286d
+#define mmUNIPHYA_LINK_CNTL_BASE_IDX 2
+#define mmUNIPHYA_CHANNEL_XBAR_CNTL 0x286e
+#define mmUNIPHYA_CHANNEL_XBAR_CNTL_BASE_IDX 2
+#define mmUNIPHYB_LINK_CNTL 0x286f
+#define mmUNIPHYB_LINK_CNTL_BASE_IDX 2
+#define mmUNIPHYB_CHANNEL_XBAR_CNTL 0x2870
+#define mmUNIPHYB_CHANNEL_XBAR_CNTL_BASE_IDX 2
+#define mmDCIO_WRCMD_DELAY 0x287e
+#define mmDCIO_WRCMD_DELAY_BASE_IDX 2
+#define mmDC_PINSTRAPS 0x2880
+#define mmDC_PINSTRAPS_BASE_IDX 2
+#define mmDCIO_CLOCK_CNTL 0x2895
+#define mmDCIO_CLOCK_CNTL_BASE_IDX 2
+#define mmDCIO_SOFT_RESET 0x289e
+#define mmDCIO_SOFT_RESET_BASE_IDX 2
+
+
+// addressBlock: dce_dc_dcio_dcio_chip_dispdec
+// base address: 0x0
+#define mmDC_GPIO_DDC1_MASK 0x28d0
+#define mmDC_GPIO_DDC1_MASK_BASE_IDX 2
+#define mmDC_GPIO_DDC1_A 0x28d1
+#define mmDC_GPIO_DDC1_A_BASE_IDX 2
+#define mmDC_GPIO_DDC1_EN 0x28d2
+#define mmDC_GPIO_DDC1_EN_BASE_IDX 2
+#define mmDC_GPIO_DDC1_Y 0x28d3
+#define mmDC_GPIO_DDC1_Y_BASE_IDX 2
+#define mmDC_GPIO_DDC2_MASK 0x28d4
+#define mmDC_GPIO_DDC2_MASK_BASE_IDX 2
+#define mmDC_GPIO_DDC2_A 0x28d5
+#define mmDC_GPIO_DDC2_A_BASE_IDX 2
+#define mmDC_GPIO_DDC2_EN 0x28d6
+#define mmDC_GPIO_DDC2_EN_BASE_IDX 2
+#define mmDC_GPIO_DDC2_Y 0x28d7
+#define mmDC_GPIO_DDC2_Y_BASE_IDX 2
+#define mmDC_GPIO_HPD_MASK 0x28f4
+#define mmDC_GPIO_HPD_MASK_BASE_IDX 2
+#define mmDC_GPIO_HPD_A 0x28f5
+#define mmDC_GPIO_HPD_A_BASE_IDX 2
+#define mmDC_GPIO_HPD_EN 0x28f6
+#define mmDC_GPIO_HPD_EN_BASE_IDX 2
+#define mmDC_GPIO_HPD_Y 0x28f7
+#define mmDC_GPIO_HPD_Y_BASE_IDX 2
+#define mmDC_GPIO_PAD_STRENGTH_1 0x28fc
+#define mmDC_GPIO_PAD_STRENGTH_1_BASE_IDX 2
+#define mmPHY_AUX_CNTL 0x28ff
+#define mmPHY_AUX_CNTL_BASE_IDX 2
+#define mmDC_GPIO_AUX_CTRL_1 0x2917
+#define mmDC_GPIO_AUX_CTRL_1_BASE_IDX 2
+#define mmDC_GPIO_AUX_CTRL_2 0x2918
+#define mmDC_GPIO_AUX_CTRL_2_BASE_IDX 2
+#define mmDC_GPIO_AUX_CTRL_3 0x291b
+#define mmDC_GPIO_AUX_CTRL_3_BASE_IDX 2
+#define mmDC_GPIO_AUX_CTRL_4 0x291c
+#define mmDC_GPIO_AUX_CTRL_4_BASE_IDX 2
+#define mmDC_GPIO_AUX_CTRL_5 0x291d
+#define mmDC_GPIO_AUX_CTRL_5_BASE_IDX 2
+#define mmAUXI2C_PAD_ALL_PWR_OK 0x291e
+#define mmAUXI2C_PAD_ALL_PWR_OK_BASE_IDX 2
+
+// addressBlock: azf0endpoint0_endpointind
+// base address: 0x0
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL 0x0007
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE 0x0008
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING 0x0009
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA 0x000c
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN 0x000d
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX 0x000e
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES 0x0021
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE 0x0023
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL 0x0024
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER 0x0025
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0 0x0028
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1 0x0029
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2 0x002a
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3 0x002b
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4 0x002c
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5 0x002d
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6 0x002e
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7 0x002f
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8 0x0030
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9 0x0031
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10 0x0032
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11 0x0033
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12 0x0034
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13 0x0035
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC 0x0037
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR 0x0038
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0 0x003a
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1 0x003b
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2 0x003c
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3 0x003d
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4 0x003e
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5 0x003f
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6 0x0040
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7 0x0041
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8 0x0042
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0057
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE 0x0058
+#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0 0x0059
+#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1 0x005a
+#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2 0x005b
+#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3 0x005c
+#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4 0x005d
+#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5 0x005e
+#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6 0x005f
+#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7 0x0060
+#define ixAZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8 0x0061
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO 0x0062
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS 0x0063
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB 0x0065
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE 0x0067
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED 0x0068
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION 0x0069
+#define ixAZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE 0x006a
+#define ixAZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLE_STATUS 0x006b
+#define ixAZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLED_INT_STATUS 0x006c
+#define ixAZF0ENDPOINT0_AZALIA_F0_AUDIO_DISABLED_INT_STATUS 0x006d
+#define ixAZF0ENDPOINT0_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS 0x006e
+
+
+// addressBlock: azf0endpoint1_endpointind
+// base address: 0x0
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL 0x0007
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE 0x0008
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING 0x0009
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA 0x000c
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN 0x000d
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX 0x000e
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES 0x0021
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE 0x0023
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL 0x0024
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER 0x0025
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0 0x0028
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1 0x0029
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2 0x002a
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3 0x002b
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4 0x002c
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5 0x002d
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6 0x002e
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7 0x002f
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8 0x0030
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9 0x0031
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10 0x0032
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11 0x0033
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12 0x0034
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13 0x0035
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC 0x0037
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR 0x0038
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0 0x003a
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1 0x003b
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2 0x003c
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3 0x003d
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4 0x003e
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5 0x003f
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6 0x0040
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7 0x0041
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8 0x0042
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0057
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE 0x0058
+#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0 0x0059
+#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1 0x005a
+#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2 0x005b
+#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3 0x005c
+#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4 0x005d
+#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5 0x005e
+#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6 0x005f
+#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7 0x0060
+#define ixAZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8 0x0061
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO 0x0062
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS 0x0063
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB 0x0065
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE 0x0067
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED 0x0068
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION 0x0069
+#define ixAZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE 0x006a
+#define ixAZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLE_STATUS 0x006b
+#define ixAZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLED_INT_STATUS 0x006c
+#define ixAZF0ENDPOINT1_AZALIA_F0_AUDIO_DISABLED_INT_STATUS 0x006d
+#define ixAZF0ENDPOINT1_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS 0x006e
+
+
+// addressBlock: azf0inputendpoint0_inputendpointind
+// base address: 0x0
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES 0x0021
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE 0x0023
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL 0x0024
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0037
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR 0x0038
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION 0x0053
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB 0x0065
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL 0x0067
+#define ixAZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME 0x0068
+
+
+// addressBlock: azf0inputendpoint1_inputendpointind
+// base address: 0x0
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0001
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT 0x0002
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID 0x0003
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER 0x0004
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS 0x0005
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES 0x0006
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES 0x0020
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES 0x0021
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE 0x0022
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE 0x0023
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL 0x0024
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE 0x0036
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2 0x0037
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR 0x0038
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION 0x0053
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL 0x0054
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE 0x0055
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT 0x0056
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL 0x0064
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB 0x0065
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT 0x0066
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL 0x0067
+#define ixAZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME 0x0068
+
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_3_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_3_sh_mask.h
new file mode 100755
index 000000000000..91969554e36a
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_0_3_sh_mask.h
@@ -0,0 +1,22091 @@
+/*
+ * Copyright (C) 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _dcn_2_0_3_SH_MASK_HEADER
+#define _dcn_2_0_3_SH_MASK_HEADER
+
+
+// addressBlock: dce_dc_dccg_dccg_dispdec
+//PHYPLLA_PIXCLK_RESYNC_CNTL
+#define PHYPLLA_PIXCLK_RESYNC_CNTL__PHYPLLA_PIXCLK_RESYNC_ENABLE__SHIFT 0x0
+#define PHYPLLA_PIXCLK_RESYNC_CNTL__PHYPLLA_DCCG_DEEP_COLOR_CNTL__SHIFT 0x4
+#define PHYPLLA_PIXCLK_RESYNC_CNTL__PHYPLLA_PIXCLK_ENABLE__SHIFT 0x8
+#define PHYPLLA_PIXCLK_RESYNC_CNTL__PHYPLLA_PIXCLK_DOUBLE_RATE_ENABLE__SHIFT 0x9
+#define PHYPLLA_PIXCLK_RESYNC_CNTL__PHYPLLA_PIXCLK_RESYNC_ENABLE_MASK 0x00000001L
+#define PHYPLLA_PIXCLK_RESYNC_CNTL__PHYPLLA_DCCG_DEEP_COLOR_CNTL_MASK 0x00000030L
+#define PHYPLLA_PIXCLK_RESYNC_CNTL__PHYPLLA_PIXCLK_ENABLE_MASK 0x00000100L
+#define PHYPLLA_PIXCLK_RESYNC_CNTL__PHYPLLA_PIXCLK_DOUBLE_RATE_ENABLE_MASK 0x00000200L
+//PHYPLLB_PIXCLK_RESYNC_CNTL
+#define PHYPLLB_PIXCLK_RESYNC_CNTL__PHYPLLB_PIXCLK_RESYNC_ENABLE__SHIFT 0x0
+#define PHYPLLB_PIXCLK_RESYNC_CNTL__PHYPLLB_DCCG_DEEP_COLOR_CNTL__SHIFT 0x4
+#define PHYPLLB_PIXCLK_RESYNC_CNTL__PHYPLLB_PIXCLK_ENABLE__SHIFT 0x8
+#define PHYPLLB_PIXCLK_RESYNC_CNTL__PHYPLLB_PIXCLK_DOUBLE_RATE_ENABLE__SHIFT 0x9
+#define PHYPLLB_PIXCLK_RESYNC_CNTL__PHYPLLB_PIXCLK_RESYNC_ENABLE_MASK 0x00000001L
+#define PHYPLLB_PIXCLK_RESYNC_CNTL__PHYPLLB_DCCG_DEEP_COLOR_CNTL_MASK 0x00000030L
+#define PHYPLLB_PIXCLK_RESYNC_CNTL__PHYPLLB_PIXCLK_ENABLE_MASK 0x00000100L
+#define PHYPLLB_PIXCLK_RESYNC_CNTL__PHYPLLB_PIXCLK_DOUBLE_RATE_ENABLE_MASK 0x00000200L
+//DP_DTO_DBUF_EN
+#define DP_DTO_DBUF_EN__DP_DTO0_DBUF_EN__SHIFT 0x0
+#define DP_DTO_DBUF_EN__DP_DTO1_DBUF_EN__SHIFT 0x1
+#define DP_DTO_DBUF_EN__DP_DTO0_DBUF_EN_MASK 0x00000001L
+#define DP_DTO_DBUF_EN__DP_DTO1_DBUF_EN_MASK 0x00000002L
+//DPREFCLK_CGTT_BLK_CTRL_REG
+#define DPREFCLK_CGTT_BLK_CTRL_REG__DPREFCLK_TURN_ON_DELAY__SHIFT 0x0
+#define DPREFCLK_CGTT_BLK_CTRL_REG__DPREFCLK_TURN_OFF_DELAY__SHIFT 0x4
+#define DPREFCLK_CGTT_BLK_CTRL_REG__DPREFCLK_TURN_ON_DELAY_MASK 0x0000000FL
+#define DPREFCLK_CGTT_BLK_CTRL_REG__DPREFCLK_TURN_OFF_DELAY_MASK 0x00000FF0L
+//REFCLK_CNTL
+#define REFCLK_CNTL__REFCLK_CLOCK_EN__SHIFT 0x0
+#define REFCLK_CNTL__REFCLK_SRC_SEL__SHIFT 0x1
+#define REFCLK_CNTL__REFCLK_CLOCK_EN_MASK 0x00000001L
+#define REFCLK_CNTL__REFCLK_SRC_SEL_MASK 0x00000002L
+//DCCG_DS_DTO_INCR
+#define DCCG_DS_DTO_INCR__DCCG_DS_DTO_INCR__SHIFT 0x0
+#define DCCG_DS_DTO_INCR__DCCG_DS_DTO_INCR_MASK 0xFFFFFFFFL
+//DCCG_DS_DTO_MODULO
+#define DCCG_DS_DTO_MODULO__DCCG_DS_DTO_MODULO__SHIFT 0x0
+#define DCCG_DS_DTO_MODULO__DCCG_DS_DTO_MODULO_MASK 0xFFFFFFFFL
+//DCCG_DS_CNTL
+#define DCCG_DS_CNTL__DCCG_DS_ENABLE__SHIFT 0x0
+#define DCCG_DS_CNTL__DCCG_DS_REF_SRC__SHIFT 0x4
+#define DCCG_DS_CNTL__DCCG_DS_HW_CAL_ENABLE__SHIFT 0x8
+#define DCCG_DS_CNTL__DCCG_DS_ENABLED_STATUS__SHIFT 0x9
+#define DCCG_DS_CNTL__DCCG_DS_XTALIN_RATE_DIV__SHIFT 0x10
+#define DCCG_DS_CNTL__DCCG_DS_JITTER_REMOVE_DIS__SHIFT 0x18
+#define DCCG_DS_CNTL__DCCG_DS_DELAY_XTAL_SEL__SHIFT 0x19
+#define DCCG_DS_CNTL__DCCG_DS_ENABLE_MASK 0x00000001L
+#define DCCG_DS_CNTL__DCCG_DS_REF_SRC_MASK 0x00000030L
+#define DCCG_DS_CNTL__DCCG_DS_HW_CAL_ENABLE_MASK 0x00000100L
+#define DCCG_DS_CNTL__DCCG_DS_ENABLED_STATUS_MASK 0x00000200L
+#define DCCG_DS_CNTL__DCCG_DS_XTALIN_RATE_DIV_MASK 0x00030000L
+#define DCCG_DS_CNTL__DCCG_DS_JITTER_REMOVE_DIS_MASK 0x01000000L
+#define DCCG_DS_CNTL__DCCG_DS_DELAY_XTAL_SEL_MASK 0x02000000L
+//DCCG_DS_HW_CAL_INTERVAL
+#define DCCG_DS_HW_CAL_INTERVAL__DCCG_DS_HW_CAL_INTERVAL__SHIFT 0x0
+#define DCCG_DS_HW_CAL_INTERVAL__DCCG_DS_HW_CAL_INTERVAL_MASK 0xFFFFFFFFL
+//DPREFCLK_CNTL
+#define DPREFCLK_CNTL__DPREFCLK_SRC_SEL__SHIFT 0x0
+#define DPREFCLK_CNTL__DPREFCLK_SRC_SEL_MASK 0x00000007L
+//DCE_VERSION
+#define DCE_VERSION__MAJOR_VERSION__SHIFT 0x0
+#define DCE_VERSION__MINOR_VERSION__SHIFT 0x8
+#define DCE_VERSION__MAJOR_VERSION_MASK 0x000000FFL
+#define DCE_VERSION__MINOR_VERSION_MASK 0x0000FF00L
+//DCCG_GTC_CNTL
+#define DCCG_GTC_CNTL__DCCG_GTC_ENABLE__SHIFT 0x0
+#define DCCG_GTC_CNTL__DCCG_GTC_ENABLE_MASK 0x00000001L
+//DCCG_GTC_DTO_INCR
+#define DCCG_GTC_DTO_INCR__DCCG_GTC_DTO_INCR__SHIFT 0x0
+#define DCCG_GTC_DTO_INCR__DCCG_GTC_DTO_INCR_MASK 0xFFFFFFFFL
+//DCCG_GTC_DTO_MODULO
+#define DCCG_GTC_DTO_MODULO__DCCG_GTC_DTO_MODULO__SHIFT 0x0
+#define DCCG_GTC_DTO_MODULO__DCCG_GTC_DTO_MODULO_MASK 0xFFFFFFFFL
+//DCCG_GTC_CURRENT
+#define DCCG_GTC_CURRENT__DCCG_GTC_CURRENT__SHIFT 0x0
+#define DCCG_GTC_CURRENT__DCCG_GTC_CURRENT_MASK 0xFFFFFFFFL
+//MILLISECOND_TIME_BASE_DIV
+#define MILLISECOND_TIME_BASE_DIV__MILLISECOND_TIME_BASE_DIV__SHIFT 0x0
+#define MILLISECOND_TIME_BASE_DIV__MILLISECOND_TIME_BASE_CLOCK_SOURCE_SEL__SHIFT 0x14
+#define MILLISECOND_TIME_BASE_DIV__MILLISECOND_TIME_BASE_DIV_MASK 0x0001FFFFL
+#define MILLISECOND_TIME_BASE_DIV__MILLISECOND_TIME_BASE_CLOCK_SOURCE_SEL_MASK 0x00100000L
+//DISPCLK_FREQ_CHANGE_CNTL
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_STEP_DELAY__SHIFT 0x0
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_STEP_SIZE__SHIFT 0x10
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_FREQ_RAMP_DONE__SHIFT 0x14
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_MAX_ERRDET_CYCLES__SHIFT 0x19
+#define DISPCLK_FREQ_CHANGE_CNTL__DCCG_FIFO_ERRDET_RESET__SHIFT 0x1c
+#define DISPCLK_FREQ_CHANGE_CNTL__DCCG_FIFO_ERRDET_STATE__SHIFT 0x1d
+#define DISPCLK_FREQ_CHANGE_CNTL__DCCG_FIFO_ERRDET_OVR_EN__SHIFT 0x1e
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_CHG_FWD_CORR_DISABLE__SHIFT 0x1f
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_STEP_DELAY_MASK 0x00003FFFL
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_STEP_SIZE_MASK 0x000F0000L
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_FREQ_RAMP_DONE_MASK 0x00100000L
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_MAX_ERRDET_CYCLES_MASK 0x0E000000L
+#define DISPCLK_FREQ_CHANGE_CNTL__DCCG_FIFO_ERRDET_RESET_MASK 0x10000000L
+#define DISPCLK_FREQ_CHANGE_CNTL__DCCG_FIFO_ERRDET_STATE_MASK 0x20000000L
+#define DISPCLK_FREQ_CHANGE_CNTL__DCCG_FIFO_ERRDET_OVR_EN_MASK 0x40000000L
+#define DISPCLK_FREQ_CHANGE_CNTL__DISPCLK_CHG_FWD_CORR_DISABLE_MASK 0x80000000L
+//DC_MEM_GLOBAL_PWR_REQ_CNTL
+#define DC_MEM_GLOBAL_PWR_REQ_CNTL__DC_MEM_GLOBAL_PWR_REQ_DIS__SHIFT 0x0
+#define DC_MEM_GLOBAL_PWR_REQ_CNTL__DC_MEM_GLOBAL_PWR_REQ_DIS_MASK 0x00000001L
+//DCCG_PERFMON_CNTL
+#define DCCG_PERFMON_CNTL__DCCG_PERF_DISPCLK_ENABLE__SHIFT 0x0
+#define DCCG_PERFMON_CNTL__DCCG_PERF_DPREFCLK_ENABLE__SHIFT 0x1
+#define DCCG_PERFMON_CNTL__DCCG_PERF_UNIPHYA_PIXCLK_ENABLE__SHIFT 0x2
+#define DCCG_PERFMON_CNTL__DCCG_PERF_UNIPHYB_PIXCLK_ENABLE__SHIFT 0x3
+#define DCCG_PERFMON_CNTL__DCCG_PERF_PIXCLK0_ENABLE__SHIFT 0x4
+#define DCCG_PERFMON_CNTL__DCCG_PERF_RUN__SHIFT 0x5
+#define DCCG_PERFMON_CNTL__DCCG_PERF_MODE_VSYNC__SHIFT 0x6
+#define DCCG_PERFMON_CNTL__DCCG_PERF_MODE_HSYNC__SHIFT 0x7
+#define DCCG_PERFMON_CNTL__DCCG_PERF_OTG_SEL__SHIFT 0x8
+#define DCCG_PERFMON_CNTL__DCCG_PERF_XTALIN_PULSE_DIV__SHIFT 0xb
+#define DCCG_PERFMON_CNTL__DCCG_PERF_DISPCLK_ENABLE_MASK 0x00000001L
+#define DCCG_PERFMON_CNTL__DCCG_PERF_DPREFCLK_ENABLE_MASK 0x00000002L
+#define DCCG_PERFMON_CNTL__DCCG_PERF_UNIPHYA_PIXCLK_ENABLE_MASK 0x00000004L
+#define DCCG_PERFMON_CNTL__DCCG_PERF_UNIPHYB_PIXCLK_ENABLE_MASK 0x00000008L
+#define DCCG_PERFMON_CNTL__DCCG_PERF_PIXCLK0_ENABLE_MASK 0x00000010L
+#define DCCG_PERFMON_CNTL__DCCG_PERF_RUN_MASK 0x00000020L
+#define DCCG_PERFMON_CNTL__DCCG_PERF_MODE_VSYNC_MASK 0x00000040L
+#define DCCG_PERFMON_CNTL__DCCG_PERF_MODE_HSYNC_MASK 0x00000080L
+#define DCCG_PERFMON_CNTL__DCCG_PERF_OTG_SEL_MASK 0x00000700L
+#define DCCG_PERFMON_CNTL__DCCG_PERF_XTALIN_PULSE_DIV_MASK 0xFFFFF800L
+//DCCG_GATE_DISABLE_CNTL
+#define DCCG_GATE_DISABLE_CNTL__DISPCLK_DCCG_GATE_DISABLE__SHIFT 0x0
+#define DCCG_GATE_DISABLE_CNTL__DISPCLK_R_DCCG_GATE_DISABLE__SHIFT 0x1
+#define DCCG_GATE_DISABLE_CNTL__SOCCLK_GATE_DISABLE__SHIFT 0x2
+#define DCCG_GATE_DISABLE_CNTL__DPREFCLK_GATE_DISABLE__SHIFT 0x3
+#define DCCG_GATE_DISABLE_CNTL__DACACLK_GATE_DISABLE__SHIFT 0x4
+#define DCCG_GATE_DISABLE_CNTL__DVOACLK_GATE_DISABLE__SHIFT 0x6
+#define DCCG_GATE_DISABLE_CNTL__DPREFCLK_R_DCCG_GATE_DISABLE__SHIFT 0x8
+#define DCCG_GATE_DISABLE_CNTL__DPPCLK_GATE_DISABLE__SHIFT 0x9
+#define DCCG_GATE_DISABLE_CNTL__DPPCLK_R_DCCG_GATE_DISABLE__SHIFT 0xa
+#define DCCG_GATE_DISABLE_CNTL__DSCCLK_GATE_DISABLE__SHIFT 0xb
+#define DCCG_GATE_DISABLE_CNTL__DMCUBCLK_GATE_DISABLE__SHIFT 0xc
+#define DCCG_GATE_DISABLE_CNTL__AOMCLK0_GATE_DISABLE__SHIFT 0x11
+#define DCCG_GATE_DISABLE_CNTL__AOMCLK1_GATE_DISABLE__SHIFT 0x12
+#define DCCG_GATE_DISABLE_CNTL__AOMCLK2_GATE_DISABLE__SHIFT 0x13
+#define DCCG_GATE_DISABLE_CNTL__AUDIO_DTO2_CLK_GATE_DISABLE__SHIFT 0x15
+#define DCCG_GATE_DISABLE_CNTL__DPREFCLK_GTC_GATE_DISABLE__SHIFT 0x16
+#define DCCG_GATE_DISABLE_CNTL__REFCLK_GATE_DISABLE__SHIFT 0x1a
+#define DCCG_GATE_DISABLE_CNTL__REFCLK_R_DIG_GATE_DISABLE__SHIFT 0x1b
+#define DCCG_GATE_DISABLE_CNTL__DSICLK_GATE_DISABLE__SHIFT 0x1c
+#define DCCG_GATE_DISABLE_CNTL__BYTECLK_GATE_DISABLE__SHIFT 0x1d
+#define DCCG_GATE_DISABLE_CNTL__ESCCLK_GATE_DISABLE__SHIFT 0x1e
+#define DCCG_GATE_DISABLE_CNTL__DISPCLK_DCCG_GATE_DISABLE_MASK 0x00000001L
+#define DCCG_GATE_DISABLE_CNTL__DISPCLK_R_DCCG_GATE_DISABLE_MASK 0x00000002L
+#define DCCG_GATE_DISABLE_CNTL__SOCCLK_GATE_DISABLE_MASK 0x00000004L
+#define DCCG_GATE_DISABLE_CNTL__DPREFCLK_GATE_DISABLE_MASK 0x00000008L
+#define DCCG_GATE_DISABLE_CNTL__DACACLK_GATE_DISABLE_MASK 0x00000010L
+#define DCCG_GATE_DISABLE_CNTL__DVOACLK_GATE_DISABLE_MASK 0x00000040L
+#define DCCG_GATE_DISABLE_CNTL__DPREFCLK_R_DCCG_GATE_DISABLE_MASK 0x00000100L
+#define DCCG_GATE_DISABLE_CNTL__DPPCLK_GATE_DISABLE_MASK 0x00000200L
+#define DCCG_GATE_DISABLE_CNTL__DPPCLK_R_DCCG_GATE_DISABLE_MASK 0x00000400L
+#define DCCG_GATE_DISABLE_CNTL__DSCCLK_GATE_DISABLE_MASK 0x00000800L
+#define DCCG_GATE_DISABLE_CNTL__DMCUBCLK_GATE_DISABLE_MASK 0x00001000L
+#define DCCG_GATE_DISABLE_CNTL__AOMCLK0_GATE_DISABLE_MASK 0x00020000L
+#define DCCG_GATE_DISABLE_CNTL__AOMCLK1_GATE_DISABLE_MASK 0x00040000L
+#define DCCG_GATE_DISABLE_CNTL__AOMCLK2_GATE_DISABLE_MASK 0x00080000L
+#define DCCG_GATE_DISABLE_CNTL__AUDIO_DTO2_CLK_GATE_DISABLE_MASK 0x00200000L
+#define DCCG_GATE_DISABLE_CNTL__DPREFCLK_GTC_GATE_DISABLE_MASK 0x00400000L
+#define DCCG_GATE_DISABLE_CNTL__REFCLK_GATE_DISABLE_MASK 0x04000000L
+#define DCCG_GATE_DISABLE_CNTL__REFCLK_R_DIG_GATE_DISABLE_MASK 0x08000000L
+#define DCCG_GATE_DISABLE_CNTL__DSICLK_GATE_DISABLE_MASK 0x10000000L
+#define DCCG_GATE_DISABLE_CNTL__BYTECLK_GATE_DISABLE_MASK 0x20000000L
+#define DCCG_GATE_DISABLE_CNTL__ESCCLK_GATE_DISABLE_MASK 0x40000000L
+//DISPCLK_CGTT_BLK_CTRL_REG
+#define DISPCLK_CGTT_BLK_CTRL_REG__DISPCLK_TURN_ON_DELAY__SHIFT 0x0
+#define DISPCLK_CGTT_BLK_CTRL_REG__DISPCLK_TURN_OFF_DELAY__SHIFT 0x4
+#define DISPCLK_CGTT_BLK_CTRL_REG__DISPCLK_TURN_ON_DELAY_MASK 0x0000000FL
+#define DISPCLK_CGTT_BLK_CTRL_REG__DISPCLK_TURN_OFF_DELAY_MASK 0x00000FF0L
+//SOCCLK_CGTT_BLK_CTRL_REG
+#define SOCCLK_CGTT_BLK_CTRL_REG__SOCCLK_TURN_ON_DELAY__SHIFT 0x0
+#define SOCCLK_CGTT_BLK_CTRL_REG__SOCCLK_TURN_OFF_DELAY__SHIFT 0x4
+#define SOCCLK_CGTT_BLK_CTRL_REG__SOCCLK_TURN_ON_DELAY_MASK 0x0000000FL
+#define SOCCLK_CGTT_BLK_CTRL_REG__SOCCLK_TURN_OFF_DELAY_MASK 0x00000FF0L
+//DCCG_CAC_STATUS
+#define DCCG_CAC_STATUS__CAC_STATUS_RDDATA__SHIFT 0x0
+#define DCCG_CAC_STATUS__CAC_STATUS_RDDATA_MASK 0xFFFFFFFFL
+//MICROSECOND_TIME_BASE_DIV
+#define MICROSECOND_TIME_BASE_DIV__MICROSECOND_TIME_BASE_DIV__SHIFT 0x0
+#define MICROSECOND_TIME_BASE_DIV__XTAL_REF_DIV__SHIFT 0x8
+#define MICROSECOND_TIME_BASE_DIV__XTAL_REF_SEL__SHIFT 0x10
+#define MICROSECOND_TIME_BASE_DIV__XTAL_REF_CLOCK_SOURCE_SEL__SHIFT 0x11
+#define MICROSECOND_TIME_BASE_DIV__MICROSECOND_TIME_BASE_CLOCK_SOURCE_SEL__SHIFT 0x14
+#define MICROSECOND_TIME_BASE_DIV__MICROSECOND_TIME_BASE_DIV_MASK 0x0000007FL
+#define MICROSECOND_TIME_BASE_DIV__XTAL_REF_DIV_MASK 0x00007F00L
+#define MICROSECOND_TIME_BASE_DIV__XTAL_REF_SEL_MASK 0x00010000L
+#define MICROSECOND_TIME_BASE_DIV__XTAL_REF_CLOCK_SOURCE_SEL_MASK 0x00020000L
+#define MICROSECOND_TIME_BASE_DIV__MICROSECOND_TIME_BASE_CLOCK_SOURCE_SEL_MASK 0x00100000L
+//DCCG_GATE_DISABLE_CNTL2
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKA_FE_GATE_DISABLE__SHIFT 0x0
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKB_FE_GATE_DISABLE__SHIFT 0x1
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKC_FE_GATE_DISABLE__SHIFT 0x2
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKD_FE_GATE_DISABLE__SHIFT 0x3
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKE_FE_GATE_DISABLE__SHIFT 0x4
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKF_FE_GATE_DISABLE__SHIFT 0x5
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKG_FE_GATE_DISABLE__SHIFT 0x6
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKA_GATE_DISABLE__SHIFT 0x10
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKB_GATE_DISABLE__SHIFT 0x11
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKC_GATE_DISABLE__SHIFT 0x12
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKD_GATE_DISABLE__SHIFT 0x13
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKE_GATE_DISABLE__SHIFT 0x14
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKF_GATE_DISABLE__SHIFT 0x15
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKG_GATE_DISABLE__SHIFT 0x16
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKA_FE_GATE_DISABLE_MASK 0x00000001L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKB_FE_GATE_DISABLE_MASK 0x00000002L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKC_FE_GATE_DISABLE_MASK 0x00000004L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKD_FE_GATE_DISABLE_MASK 0x00000008L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKE_FE_GATE_DISABLE_MASK 0x00000010L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKF_FE_GATE_DISABLE_MASK 0x00000020L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKG_FE_GATE_DISABLE_MASK 0x00000040L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKA_GATE_DISABLE_MASK 0x00010000L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKB_GATE_DISABLE_MASK 0x00020000L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKC_GATE_DISABLE_MASK 0x00040000L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKD_GATE_DISABLE_MASK 0x00080000L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKE_GATE_DISABLE_MASK 0x00100000L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKF_GATE_DISABLE_MASK 0x00200000L
+#define DCCG_GATE_DISABLE_CNTL2__SYMCLKG_GATE_DISABLE_MASK 0x00400000L
+//SYMCLK_CGTT_BLK_CTRL_REG
+#define SYMCLK_CGTT_BLK_CTRL_REG__SYMCLK_TURN_ON_DELAY__SHIFT 0x0
+#define SYMCLK_CGTT_BLK_CTRL_REG__SYMCLK_TURN_OFF_DELAY__SHIFT 0x4
+#define SYMCLK_CGTT_BLK_CTRL_REG__SYMCLK_TURN_ON_DELAY_MASK 0x0000000FL
+#define SYMCLK_CGTT_BLK_CTRL_REG__SYMCLK_TURN_OFF_DELAY_MASK 0x00000FF0L
+//DCCG_DISP_CNTL_REG
+#define DCCG_DISP_CNTL_REG__ALLOW_SR_ON_TRANS_REQ__SHIFT 0x8
+#define DCCG_DISP_CNTL_REG__ALLOW_SR_ON_TRANS_REQ_MASK 0x00000100L
+//OTG0_PIXEL_RATE_CNTL
+#define OTG0_PIXEL_RATE_CNTL__OTG0_PIXEL_RATE_SOURCE__SHIFT 0x0
+#define OTG0_PIXEL_RATE_CNTL__DP_DTO0_ENABLE__SHIFT 0x4
+#define OTG0_PIXEL_RATE_CNTL__DP_DTO0_DS_DISABLE__SHIFT 0x5
+#define OTG0_PIXEL_RATE_CNTL__OTG0_ADD_PIXEL__SHIFT 0x8
+#define OTG0_PIXEL_RATE_CNTL__OTG0_DROP_PIXEL__SHIFT 0x9
+#define OTG0_PIXEL_RATE_CNTL__OTG0_DISPOUT_HALF_RATE_EN__SHIFT 0xb
+#define OTG0_PIXEL_RATE_CNTL__OTG0_DIO_FIFO_ERROR__SHIFT 0xe
+#define OTG0_PIXEL_RATE_CNTL__OTG0_DIO_ERROR_COUNT__SHIFT 0x10
+#define OTG0_PIXEL_RATE_CNTL__OTG0_PIXEL_RATE_SOURCE_MASK 0x00000003L
+#define OTG0_PIXEL_RATE_CNTL__DP_DTO0_ENABLE_MASK 0x00000010L
+#define OTG0_PIXEL_RATE_CNTL__DP_DTO0_DS_DISABLE_MASK 0x00000020L
+#define OTG0_PIXEL_RATE_CNTL__OTG0_ADD_PIXEL_MASK 0x00000100L
+#define OTG0_PIXEL_RATE_CNTL__OTG0_DROP_PIXEL_MASK 0x00000200L
+#define OTG0_PIXEL_RATE_CNTL__OTG0_DISPOUT_HALF_RATE_EN_MASK 0x00000800L
+#define OTG0_PIXEL_RATE_CNTL__OTG0_DIO_FIFO_ERROR_MASK 0x0000C000L
+#define OTG0_PIXEL_RATE_CNTL__OTG0_DIO_ERROR_COUNT_MASK 0x0FFF0000L
+//DP_DTO0_PHASE
+#define DP_DTO0_PHASE__DP_DTO0_PHASE__SHIFT 0x0
+#define DP_DTO0_PHASE__DP_DTO0_PHASE_MASK 0xFFFFFFFFL
+//DP_DTO0_MODULO
+#define DP_DTO0_MODULO__DP_DTO0_MODULO__SHIFT 0x0
+#define DP_DTO0_MODULO__DP_DTO0_MODULO_MASK 0xFFFFFFFFL
+//OTG0_PHYPLL_PIXEL_RATE_CNTL
+#define OTG0_PHYPLL_PIXEL_RATE_CNTL__OTG0_PHYPLL_PIXEL_RATE_SOURCE__SHIFT 0x0
+#define OTG0_PHYPLL_PIXEL_RATE_CNTL__OTG0_PIXEL_RATE_PLL_SOURCE__SHIFT 0x4
+#define OTG0_PHYPLL_PIXEL_RATE_CNTL__OTG0_PHYPLL_PIXEL_RATE_SOURCE_MASK 0x00000007L
+#define OTG0_PHYPLL_PIXEL_RATE_CNTL__OTG0_PIXEL_RATE_PLL_SOURCE_MASK 0x00000010L
+//OTG1_PIXEL_RATE_CNTL
+#define OTG1_PIXEL_RATE_CNTL__OTG1_PIXEL_RATE_SOURCE__SHIFT 0x0
+#define OTG1_PIXEL_RATE_CNTL__DP_DTO1_ENABLE__SHIFT 0x4
+#define OTG1_PIXEL_RATE_CNTL__DP_DTO1_DS_DISABLE__SHIFT 0x5
+#define OTG1_PIXEL_RATE_CNTL__OTG1_ADD_PIXEL__SHIFT 0x8
+#define OTG1_PIXEL_RATE_CNTL__OTG1_DROP_PIXEL__SHIFT 0x9
+#define OTG1_PIXEL_RATE_CNTL__OTG1_DISPOUT_HALF_RATE_EN__SHIFT 0xb
+#define OTG1_PIXEL_RATE_CNTL__OTG1_DIO_FIFO_ERROR__SHIFT 0xe
+#define OTG1_PIXEL_RATE_CNTL__OTG1_DIO_ERROR_COUNT__SHIFT 0x10
+#define OTG1_PIXEL_RATE_CNTL__OTG1_PIXEL_RATE_SOURCE_MASK 0x00000003L
+#define OTG1_PIXEL_RATE_CNTL__DP_DTO1_ENABLE_MASK 0x00000010L
+#define OTG1_PIXEL_RATE_CNTL__DP_DTO1_DS_DISABLE_MASK 0x00000020L
+#define OTG1_PIXEL_RATE_CNTL__OTG1_ADD_PIXEL_MASK 0x00000100L
+#define OTG1_PIXEL_RATE_CNTL__OTG1_DROP_PIXEL_MASK 0x00000200L
+#define OTG1_PIXEL_RATE_CNTL__OTG1_DISPOUT_HALF_RATE_EN_MASK 0x00000800L
+#define OTG1_PIXEL_RATE_CNTL__OTG1_DIO_FIFO_ERROR_MASK 0x0000C000L
+#define OTG1_PIXEL_RATE_CNTL__OTG1_DIO_ERROR_COUNT_MASK 0x0FFF0000L
+//DP_DTO1_PHASE
+#define DP_DTO1_PHASE__DP_DTO1_PHASE__SHIFT 0x0
+#define DP_DTO1_PHASE__DP_DTO1_PHASE_MASK 0xFFFFFFFFL
+//DP_DTO1_MODULO
+#define DP_DTO1_MODULO__DP_DTO1_MODULO__SHIFT 0x0
+#define DP_DTO1_MODULO__DP_DTO1_MODULO_MASK 0xFFFFFFFFL
+//OTG1_PHYPLL_PIXEL_RATE_CNTL
+#define OTG1_PHYPLL_PIXEL_RATE_CNTL__OTG1_PHYPLL_PIXEL_RATE_SOURCE__SHIFT 0x0
+#define OTG1_PHYPLL_PIXEL_RATE_CNTL__OTG1_PIXEL_RATE_PLL_SOURCE__SHIFT 0x4
+#define OTG1_PHYPLL_PIXEL_RATE_CNTL__OTG1_PHYPLL_PIXEL_RATE_SOURCE_MASK 0x00000007L
+#define OTG1_PHYPLL_PIXEL_RATE_CNTL__OTG1_PIXEL_RATE_PLL_SOURCE_MASK 0x00000010L
+//DPPCLK_CGTT_BLK_CTRL_REG
+#define DPPCLK_CGTT_BLK_CTRL_REG__DPPCLK_TURN_ON_DELAY__SHIFT 0x0
+#define DPPCLK_CGTT_BLK_CTRL_REG__DPPCLK_TURN_OFF_DELAY__SHIFT 0x4
+#define DPPCLK_CGTT_BLK_CTRL_REG__DPPCLK_TURN_ON_DELAY_MASK 0x0000000FL
+#define DPPCLK_CGTT_BLK_CTRL_REG__DPPCLK_TURN_OFF_DELAY_MASK 0x00000FF0L
+//DPPCLK0_DTO_PARAM
+#define DPPCLK0_DTO_PARAM__DPPCLK0_DTO_PHASE__SHIFT 0x0
+#define DPPCLK0_DTO_PARAM__DPPCLK0_DTO_MODULO__SHIFT 0x10
+#define DPPCLK0_DTO_PARAM__DPPCLK0_DTO_PHASE_MASK 0x000000FFL
+#define DPPCLK0_DTO_PARAM__DPPCLK0_DTO_MODULO_MASK 0x00FF0000L
+//DPPCLK1_DTO_PARAM
+#define DPPCLK1_DTO_PARAM__DPPCLK1_DTO_PHASE__SHIFT 0x0
+#define DPPCLK1_DTO_PARAM__DPPCLK1_DTO_MODULO__SHIFT 0x10
+#define DPPCLK1_DTO_PARAM__DPPCLK1_DTO_PHASE_MASK 0x000000FFL
+#define DPPCLK1_DTO_PARAM__DPPCLK1_DTO_MODULO_MASK 0x00FF0000L
+//DPPCLK2_DTO_PARAM
+#define DPPCLK2_DTO_PARAM__DPPCLK2_DTO_PHASE__SHIFT 0x0
+#define DPPCLK2_DTO_PARAM__DPPCLK2_DTO_MODULO__SHIFT 0x10
+#define DPPCLK2_DTO_PARAM__DPPCLK2_DTO_PHASE_MASK 0x000000FFL
+#define DPPCLK2_DTO_PARAM__DPPCLK2_DTO_MODULO_MASK 0x00FF0000L
+//DPPCLK3_DTO_PARAM
+#define DPPCLK3_DTO_PARAM__DPPCLK3_DTO_PHASE__SHIFT 0x0
+#define DPPCLK3_DTO_PARAM__DPPCLK3_DTO_MODULO__SHIFT 0x10
+#define DPPCLK3_DTO_PARAM__DPPCLK3_DTO_PHASE_MASK 0x000000FFL
+#define DPPCLK3_DTO_PARAM__DPPCLK3_DTO_MODULO_MASK 0x00FF0000L
+//DCCG_CAC_STATUS2
+#define DCCG_CAC_STATUS2__CAC_STATUS_RDDATA2__SHIFT 0x0
+#define DCCG_CAC_STATUS2__CAC_STATUS_RDDATA2_MASK 0x0000000FL
+//SYMCLKA_CLOCK_ENABLE
+#define SYMCLKA_CLOCK_ENABLE__SYMCLKA_CLOCK_ENABLE__SHIFT 0x0
+#define SYMCLKA_CLOCK_ENABLE__SYMCLKA_FE_FORCE_EN__SHIFT 0x4
+#define SYMCLKA_CLOCK_ENABLE__SYMCLKA_FE_FORCE_SRC__SHIFT 0x8
+#define SYMCLKA_CLOCK_ENABLE__SYMCLKA_CLOCK_ENABLE_MASK 0x00000001L
+#define SYMCLKA_CLOCK_ENABLE__SYMCLKA_FE_FORCE_EN_MASK 0x00000010L
+#define SYMCLKA_CLOCK_ENABLE__SYMCLKA_FE_FORCE_SRC_MASK 0x00000700L
+//SYMCLKB_CLOCK_ENABLE
+#define SYMCLKB_CLOCK_ENABLE__SYMCLKB_CLOCK_ENABLE__SHIFT 0x0
+#define SYMCLKB_CLOCK_ENABLE__SYMCLKB_FE_FORCE_EN__SHIFT 0x4
+#define SYMCLKB_CLOCK_ENABLE__SYMCLKB_FE_FORCE_SRC__SHIFT 0x8
+#define SYMCLKB_CLOCK_ENABLE__SYMCLKB_CLOCK_ENABLE_MASK 0x00000001L
+#define SYMCLKB_CLOCK_ENABLE__SYMCLKB_FE_FORCE_EN_MASK 0x00000010L
+#define SYMCLKB_CLOCK_ENABLE__SYMCLKB_FE_FORCE_SRC_MASK 0x00000700L
+//DCCG_AUDIO_DTO_SOURCE
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO0_SOURCE_SEL__SHIFT 0x0
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO_SEL__SHIFT 0x4
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO2_SOURCE_SEL__SHIFT 0xc
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO2_CLOCK_EN__SHIFT 0x10
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO2_USE_512FBR_DTO__SHIFT 0x14
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO0_USE_512FBR_DTO__SHIFT 0x18
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO1_USE_512FBR_DTO__SHIFT 0x1c
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO0_SOURCE_SEL_MASK 0x00000007L
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO_SEL_MASK 0x00000030L
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO2_SOURCE_SEL_MASK 0x00003000L
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO2_CLOCK_EN_MASK 0x00010000L
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO2_USE_512FBR_DTO_MASK 0x00100000L
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO0_USE_512FBR_DTO_MASK 0x01000000L
+#define DCCG_AUDIO_DTO_SOURCE__DCCG_AUDIO_DTO1_USE_512FBR_DTO_MASK 0x10000000L
+//DCCG_AUDIO_DTO0_PHASE
+#define DCCG_AUDIO_DTO0_PHASE__DCCG_AUDIO_DTO0_PHASE__SHIFT 0x0
+#define DCCG_AUDIO_DTO0_PHASE__DCCG_AUDIO_DTO0_PHASE_MASK 0xFFFFFFFFL
+//DCCG_AUDIO_DTO0_MODULE
+#define DCCG_AUDIO_DTO0_MODULE__DCCG_AUDIO_DTO0_MODULE__SHIFT 0x0
+#define DCCG_AUDIO_DTO0_MODULE__DCCG_AUDIO_DTO0_MODULE_MASK 0xFFFFFFFFL
+//DCCG_AUDIO_DTO1_PHASE
+#define DCCG_AUDIO_DTO1_PHASE__DCCG_AUDIO_DTO1_PHASE__SHIFT 0x0
+#define DCCG_AUDIO_DTO1_PHASE__DCCG_AUDIO_DTO1_PHASE_MASK 0xFFFFFFFFL
+//DCCG_AUDIO_DTO1_MODULE
+#define DCCG_AUDIO_DTO1_MODULE__DCCG_AUDIO_DTO1_MODULE__SHIFT 0x0
+#define DCCG_AUDIO_DTO1_MODULE__DCCG_AUDIO_DTO1_MODULE_MASK 0xFFFFFFFFL
+//DCCG_VSYNC_OTG0_LATCH_VALUE
+#define DCCG_VSYNC_OTG0_LATCH_VALUE__DCCG_VSYNC_CNT_OTG0_LATCH_VALUE__SHIFT 0x0
+#define DCCG_VSYNC_OTG0_LATCH_VALUE__DCCG_VSYNC_CNT_OTG0_LATCH_VALUE_MASK 0xFFFFFFFFL
+//DCCG_VSYNC_OTG1_LATCH_VALUE
+#define DCCG_VSYNC_OTG1_LATCH_VALUE__DCCG_VSYNC_CNT_OTG1_LATCH_VALUE__SHIFT 0x0
+#define DCCG_VSYNC_OTG1_LATCH_VALUE__DCCG_VSYNC_CNT_OTG1_LATCH_VALUE_MASK 0xFFFFFFFFL
+//DPPCLK_DTO_CTRL
+#define DPPCLK_DTO_CTRL__DPPCLK0_DTO_ENABLE__SHIFT 0x0
+#define DPPCLK_DTO_CTRL__DPPCLK0_DTO_DB_EN__SHIFT 0x1
+#define DPPCLK_DTO_CTRL__DPPCLK1_DTO_ENABLE__SHIFT 0x4
+#define DPPCLK_DTO_CTRL__DPPCLK1_DTO_DB_EN__SHIFT 0x5
+#define DPPCLK_DTO_CTRL__DPPCLK2_DTO_ENABLE__SHIFT 0x8
+#define DPPCLK_DTO_CTRL__DPPCLK2_DTO_DB_EN__SHIFT 0x9
+#define DPPCLK_DTO_CTRL__DPPCLK3_DTO_ENABLE__SHIFT 0xc
+#define DPPCLK_DTO_CTRL__DPPCLK3_DTO_DB_EN__SHIFT 0xd
+#define DPPCLK_DTO_CTRL__DPPCLK0_DTO_ENABLE_MASK 0x00000001L
+#define DPPCLK_DTO_CTRL__DPPCLK0_DTO_DB_EN_MASK 0x00000002L
+#define DPPCLK_DTO_CTRL__DPPCLK1_DTO_ENABLE_MASK 0x00000010L
+#define DPPCLK_DTO_CTRL__DPPCLK1_DTO_DB_EN_MASK 0x00000020L
+#define DPPCLK_DTO_CTRL__DPPCLK2_DTO_ENABLE_MASK 0x00000100L
+#define DPPCLK_DTO_CTRL__DPPCLK2_DTO_DB_EN_MASK 0x00000200L
+#define DPPCLK_DTO_CTRL__DPPCLK3_DTO_ENABLE_MASK 0x00001000L
+#define DPPCLK_DTO_CTRL__DPPCLK3_DTO_DB_EN_MASK 0x00002000L
+//DCCG_VSYNC_CNT_CTRL
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_ENABLE__SHIFT 0x0
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_REFCLK_SEL__SHIFT 0x1
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_SW_RESET__SHIFT 0x2
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_RESET_SEL__SHIFT 0x3
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_EXT_TRIG_SEL__SHIFT 0x4
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_FRAME_CNT__SHIFT 0x8
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG0_LATCH_EN__SHIFT 0x10
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG1_LATCH_EN__SHIFT 0x11
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG0_VSYNC_TRIG_SEL__SHIFT 0x18
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG1_VSYNC_TRIG_SEL__SHIFT 0x19
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_ENABLE_MASK 0x00000001L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_REFCLK_SEL_MASK 0x00000002L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_SW_RESET_MASK 0x00000004L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_RESET_SEL_MASK 0x00000008L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_EXT_TRIG_SEL_MASK 0x000000F0L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_CNT_FRAME_CNT_MASK 0x00000F00L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG0_LATCH_EN_MASK 0x00010000L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG1_LATCH_EN_MASK 0x00020000L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG0_VSYNC_TRIG_SEL_MASK 0x01000000L
+#define DCCG_VSYNC_CNT_CTRL__DCCG_VSYNC_OTG1_VSYNC_TRIG_SEL_MASK 0x02000000L
+//DCCG_VSYNC_CNT_INT_CTRL
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG0_LATCH_INTERRUPT__SHIFT 0x0
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG0_LATCH_INTERRUPT_CLEAR__SHIFT 0x0
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG1_LATCH_INTERRUPT__SHIFT 0x1
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG1_LATCH_INTERRUPT_CLEAR__SHIFT 0x1
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG0_LATCH_MASK__SHIFT 0x8
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG1_LATCH_MASK__SHIFT 0x9
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG0_LATCH_INTERRUPT_MASK 0x00000001L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG0_LATCH_INTERRUPT_CLEAR_MASK 0x00000001L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG1_LATCH_INTERRUPT_MASK 0x00000002L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG1_LATCH_INTERRUPT_CLEAR_MASK 0x00000002L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG0_LATCH_MASK_MASK 0x00000100L
+#define DCCG_VSYNC_CNT_INT_CTRL__DCCG_VSYNC_CNT_OTG1_LATCH_MASK_MASK 0x00000200L
+
+
+// addressBlock: dce_dc_dccg_dccg_dfs_dispdec
+//DENTIST_DISPCLK_CNTL
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_WDIVIDER__SHIFT 0x0
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_RDIVIDER__SHIFT 0x8
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_MODE__SHIFT 0xf
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHGTOG__SHIFT 0x11
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_DONETOG__SHIFT 0x12
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_DONE__SHIFT 0x13
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_CHG_DONE__SHIFT 0x14
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_CHGTOG__SHIFT 0x15
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_DONETOG__SHIFT 0x16
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_WDIVIDER__SHIFT 0x18
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_WDIVIDER_MASK 0x0000007FL
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_RDIVIDER_MASK 0x00007F00L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_MODE_MASK 0x00018000L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHGTOG_MASK 0x00020000L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_DONETOG_MASK 0x00040000L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_DONE_MASK 0x00080000L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_CHG_DONE_MASK 0x00100000L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_CHGTOG_MASK 0x00200000L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_DONETOG_MASK 0x00400000L
+#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_WDIVIDER_MASK 0x7F000000L
+//DISP_INTERRUPT_STATUS
+#define DISP_INTERRUPT_STATUS__OPTC1_DATA_UNDERFLOW_INTERRUPT__SHIFT 0x1
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_SNAPSHOT_INTERRUPT__SHIFT 0x4
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT__SHIFT 0x5
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_FORCE_COUNT_NOW_INTERRUPT__SHIFT 0x6
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_TRIGA_INTERRUPT__SHIFT 0x7
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_TRIGB_INTERRUPT__SHIFT 0x8
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_VSYNC_NOM_INTERRUPT__SHIFT 0x9
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INT__SHIFT 0xa
+#define DISP_INTERRUPT_STATUS__DIGA_DP_FAST_TRAINING_COMPLETE_INTERRUPT__SHIFT 0xf
+#define DISP_INTERRUPT_STATUS__DIGA_DP_VID_STREAM_DISABLE_INTERRUPT__SHIFT 0x10
+#define DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT__SHIFT 0x11
+#define DISP_INTERRUPT_STATUS__DC_HPD1_RX_INTERRUPT__SHIFT 0x12
+#define DISP_INTERRUPT_STATUS__AUX1_SW_DONE_INTERRUPT__SHIFT 0x13
+#define DISP_INTERRUPT_STATUS__AUX1_LS_DONE_INTERRUPT__SHIFT 0x14
+#define DISP_INTERRUPT_STATUS__DACA_AUTODETECT_GENERITE_INTERRUPT__SHIFT 0x16
+#define DISP_INTERRUPT_STATUS__RBBMIF_IHC_TIMEOUT_INTERRUPT__SHIFT 0x17
+#define DISP_INTERRUPT_STATUS__DC_I2C_SW_DONE_INTERRUPT__SHIFT 0x18
+#define DISP_INTERRUPT_STATUS__DMCU_UC_INTERNAL_INT__SHIFT 0x1a
+#define DISP_INTERRUPT_STATUS__DMCU_SCP_INT__SHIFT 0x1b
+#define DISP_INTERRUPT_STATUS__ABM1_HG_READY_INT__SHIFT 0x1c
+#define DISP_INTERRUPT_STATUS__ABM1_LS_READY_INT__SHIFT 0x1d
+#define DISP_INTERRUPT_STATUS__ABM1_BL_UPDATE_INT__SHIFT 0x1e
+#define DISP_INTERRUPT_STATUS__DISP_INTERRUPT_STATUS_CONTINUE__SHIFT 0x1f
+#define DISP_INTERRUPT_STATUS__OPTC1_DATA_UNDERFLOW_INTERRUPT_MASK 0x00000002L
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_SNAPSHOT_INTERRUPT_MASK 0x00000010L
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_FORCE_VSYNC_NEXT_LINE_INTERRUPT_MASK 0x00000020L
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_FORCE_COUNT_NOW_INTERRUPT_MASK 0x00000040L
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_TRIGA_INTERRUPT_MASK 0x00000080L
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_TRIGB_INTERRUPT_MASK 0x00000100L
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_VSYNC_NOM_INTERRUPT_MASK 0x00000200L
+#define DISP_INTERRUPT_STATUS__OTG1_IHC_SET_V_TOTAL_MIN_EVENT_OCCURED_INT_MASK 0x00000400L
+#define DISP_INTERRUPT_STATUS__DIGA_DP_FAST_TRAINING_COMPLETE_INTERRUPT_MASK 0x00008000L
+#define DISP_INTERRUPT_STATUS__DIGA_DP_VID_STREAM_DISABLE_INTERRUPT_MASK 0x00010000L
+#define DISP_INTERRUPT_STATUS__DC_HPD1_INTERRUPT_MASK 0x00020000L
+#define DISP_INTERRUPT_STATUS__DC_HPD1_RX_INTERRUPT_MASK 0x00040000L
+#define DISP_INTERRUPT_STATUS__AUX1_SW_DONE_INTERRUPT_MASK 0x00080000L
+#define DISP_INTERRUPT_STATUS__AUX1_LS_DONE_INTERRUPT_MASK 0x00100000L
+#define DISP_INTERRUPT_STATUS__DACA_AUTODETECT_GENERITE_INTERRUPT_MASK 0x00400000L
+#define DISP_INTERRUPT_STATUS__RBBMIF_IHC_TIMEOUT_INTERRUPT_MASK 0x00800000L
+#define DISP_INTERRUPT_STATUS__DC_I2C_SW_DONE_INTERRUPT_MASK 0x01000000L
+
+//AZ_CLOCK_CNTL
+#define AZ_CLOCK_CNTL__SCLK_G_STREAM_AZ_GATE_DIS__SHIFT 0x0
+#define AZ_CLOCK_CNTL__SCLK_R_AZ_GATE_DIS__SHIFT 0x8
+#define AZ_CLOCK_CNTL__SCLK_G_CNTL_AZ_GATE_DIS__SHIFT 0x10
+#define AZ_CLOCK_CNTL__SCLK_G_STREAM_AZ_GATE_DIS_MASK 0x00000001L
+#define AZ_CLOCK_CNTL__SCLK_R_AZ_GATE_DIS_MASK 0x00000100L
+#define AZ_CLOCK_CNTL__SCLK_G_CNTL_AZ_GATE_DIS_MASK 0x00010000L
+
+// addressBlock: dce_dc_hda_azf0endpoint0_dispdec
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX_MASK 0x00003FFFL
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL
+
+// addressBlock: dce_dc_hda_azf0endpoint1_dispdec
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_INDEX
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_INDEX__AZALIA_ENDPOINT_REG_INDEX_MASK 0x00003FFFL
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_DATA
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_ENDPOINT_DATA__AZALIA_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL
+
+// addressBlock: dce_dc_hda_azf0controller_dispdec
+//AZALIA_CONTROLLER_CLOCK_GATING
+#define AZALIA_CONTROLLER_CLOCK_GATING__ENABLE_CLOCK_GATING__SHIFT 0x0
+#define AZALIA_CONTROLLER_CLOCK_GATING__CLOCK_ON_STATE__SHIFT 0x4
+#define AZALIA_CONTROLLER_CLOCK_GATING__ENABLE_CLOCK_GATING_MASK 0x00000001L
+#define AZALIA_CONTROLLER_CLOCK_GATING__CLOCK_ON_STATE_MASK 0x00000010L
+//AZALIA_AUDIO_DTO
+#define AZALIA_AUDIO_DTO__AZALIA_AUDIO_DTO_PHASE__SHIFT 0x0
+#define AZALIA_AUDIO_DTO__AZALIA_AUDIO_DTO_MODULE__SHIFT 0x10
+#define AZALIA_AUDIO_DTO__AZALIA_AUDIO_DTO_PHASE_MASK 0x0000FFFFL
+#define AZALIA_AUDIO_DTO__AZALIA_AUDIO_DTO_MODULE_MASK 0xFFFF0000L
+//AZALIA_AUDIO_DTO_CONTROL
+#define AZALIA_AUDIO_DTO_CONTROL__AZALIA_AUDIO_FORCE_DTO__SHIFT 0x8
+#define AZALIA_AUDIO_DTO_CONTROL__AZALIA_AUDIO_FORCE_DTO_MASK 0x00000300L
+//AZALIA_SOCCLK_CONTROL
+#define AZALIA_SOCCLK_CONTROL__AUDIO_STREAM_SOCCLK_DEEP_SLEEP_EXIT_EN__SHIFT 0x1
+#define AZALIA_SOCCLK_CONTROL__AUDIO_STREAM_SOCCLK_DEEP_SLEEP_EXIT_EN_MASK 0x00000002L
+//AZALIA_UNDERFLOW_FILLER_SAMPLE
+#define AZALIA_UNDERFLOW_FILLER_SAMPLE__AZALIA_UNDERFLOW_FILLER_SAMPLE__SHIFT 0x0
+#define AZALIA_UNDERFLOW_FILLER_SAMPLE__AZALIA_UNDERFLOW_FILLER_SAMPLE_MASK 0xFFFFFFFFL
+//AZALIA_DATA_DMA_CONTROL
+#define AZALIA_DATA_DMA_CONTROL__DATA_DMA_NON_SNOOP__SHIFT 0x0
+#define AZALIA_DATA_DMA_CONTROL__INPUT_DATA_DMA_NON_SNOOP__SHIFT 0x2
+#define AZALIA_DATA_DMA_CONTROL__DATA_DMA_ISOCHRONOUS__SHIFT 0x4
+#define AZALIA_DATA_DMA_CONTROL__INPUT_DATA_DMA_ISOCHRONOUS__SHIFT 0x6
+#define AZALIA_DATA_DMA_CONTROL__AZALIA_IOC_GENERATION_METHOD__SHIFT 0x10
+#define AZALIA_DATA_DMA_CONTROL__AZALIA_UNDERFLOW_CONTROL__SHIFT 0x11
+#define AZALIA_DATA_DMA_CONTROL__DATA_DMA_NON_SNOOP_MASK 0x00000003L
+#define AZALIA_DATA_DMA_CONTROL__INPUT_DATA_DMA_NON_SNOOP_MASK 0x0000000CL
+#define AZALIA_DATA_DMA_CONTROL__DATA_DMA_ISOCHRONOUS_MASK 0x00000030L
+#define AZALIA_DATA_DMA_CONTROL__INPUT_DATA_DMA_ISOCHRONOUS_MASK 0x000000C0L
+#define AZALIA_DATA_DMA_CONTROL__AZALIA_IOC_GENERATION_METHOD_MASK 0x00010000L
+#define AZALIA_DATA_DMA_CONTROL__AZALIA_UNDERFLOW_CONTROL_MASK 0x00020000L
+//AZALIA_BDL_DMA_CONTROL
+#define AZALIA_BDL_DMA_CONTROL__BDL_DMA_NON_SNOOP__SHIFT 0x0
+#define AZALIA_BDL_DMA_CONTROL__INPUT_BDL_DMA_NON_SNOOP__SHIFT 0x2
+#define AZALIA_BDL_DMA_CONTROL__BDL_DMA_ISOCHRONOUS__SHIFT 0x4
+#define AZALIA_BDL_DMA_CONTROL__INPUT_BDL_DMA_ISOCHRONOUS__SHIFT 0x6
+#define AZALIA_BDL_DMA_CONTROL__BDL_DMA_NON_SNOOP_MASK 0x00000003L
+#define AZALIA_BDL_DMA_CONTROL__INPUT_BDL_DMA_NON_SNOOP_MASK 0x0000000CL
+#define AZALIA_BDL_DMA_CONTROL__BDL_DMA_ISOCHRONOUS_MASK 0x00000030L
+#define AZALIA_BDL_DMA_CONTROL__INPUT_BDL_DMA_ISOCHRONOUS_MASK 0x000000C0L
+//AZALIA_RIRB_AND_DP_CONTROL
+#define AZALIA_RIRB_AND_DP_CONTROL__RIRB_NON_SNOOP__SHIFT 0x0
+#define AZALIA_RIRB_AND_DP_CONTROL__DP_DMA_NON_SNOOP__SHIFT 0x4
+#define AZALIA_RIRB_AND_DP_CONTROL__DP_UPDATE_FREQ_DIVIDER__SHIFT 0x5
+#define AZALIA_RIRB_AND_DP_CONTROL__RIRB_NON_SNOOP_MASK 0x00000001L
+#define AZALIA_RIRB_AND_DP_CONTROL__DP_DMA_NON_SNOOP_MASK 0x00000010L
+#define AZALIA_RIRB_AND_DP_CONTROL__DP_UPDATE_FREQ_DIVIDER_MASK 0x000001E0L
+//AZALIA_CORB_DMA_CONTROL
+#define AZALIA_CORB_DMA_CONTROL__CORB_DMA_NON_SNOOP__SHIFT 0x0
+#define AZALIA_CORB_DMA_CONTROL__CORB_DMA_ISOCHRONOUS__SHIFT 0x4
+
+
+// addressBlock: dce_dc_hda_azf0root_dispdec
+//AZALIA_F0_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID
+#define AZALIA_F0_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID__AZALIA_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID__SHIFT 0x0
+#define AZALIA_F0_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID__AZALIA_CODEC_ROOT_PARAMETER_VENDOR_AND_DEVICE_ID_MASK 0xFFFFFFFFL
+//AZALIA_F0_CODEC_ROOT_PARAMETER_REVISION_ID
+#define AZALIA_F0_CODEC_ROOT_PARAMETER_REVISION_ID__AZALIA_CODEC_ROOT_PARAMETER_REVISION_ID__SHIFT 0x0
+#define AZALIA_F0_CODEC_ROOT_PARAMETER_REVISION_ID__AZALIA_CODEC_ROOT_PARAMETER_REVISION_ID_MASK 0xFFFFFFFFL
+//AZALIA_F0_CODEC_CHANNEL_COUNT_CONTROL
+#define AZALIA_F0_CODEC_CHANNEL_COUNT_CONTROL__HBR_CHANNEL_COUNT__SHIFT 0x0
+#define AZALIA_F0_CODEC_CHANNEL_COUNT_CONTROL__COMPRESSED_CHANNEL_COUNT__SHIFT 0x4
+#define AZALIA_F0_CODEC_CHANNEL_COUNT_CONTROL__HBR_CHANNEL_COUNT_MASK 0x00000007L
+#define AZALIA_F0_CODEC_CHANNEL_COUNT_CONTROL__COMPRESSED_CHANNEL_COUNT_MASK 0x00000070L
+//AZALIA_F0_CODEC_RESYNC_FIFO_CONTROL
+#define AZALIA_F0_CODEC_RESYNC_FIFO_CONTROL__RESYNC_FIFO_STARTUP_KEEPOUT_WINDOW__SHIFT 0x0
+#define AZALIA_F0_CODEC_RESYNC_FIFO_CONTROL__RESYNC_FIFO_STARTUP_KEEPOUT_WINDOW_MASK 0x0000003FL
+//AZALIA_F0_CODEC_FUNCTION_PARAMETER_GROUP_TYPE
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_GROUP_TYPE__AZALIA_CODEC_FUNCTION_PARAMETER_GROUP_TYPE__SHIFT 0x0
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_GROUP_TYPE__AZALIA_CODEC_FUNCTION_PARAMETER_GROUP_TYPE_MASK 0xFFFFFFFFL
+//AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+//AZALIA_F0_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS__AZALIA_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS__SHIFT 0x0
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS__AZALIA_CODEC_FUNCTION_PARAMETER_STREAM_FORMATS_MASK 0xFFFFFFFFL
+//AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES__AZALIA_CODEC_FUNCTION_PARAMETER_POWER_STATES__SHIFT 0x0
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES__CLKSTOP__SHIFT 0x1e
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES__EPSS__SHIFT 0x1f
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES__AZALIA_CODEC_FUNCTION_PARAMETER_POWER_STATES_MASK 0x3FFFFFFFL
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES__CLKSTOP_MASK 0x40000000L
+#define AZALIA_F0_CODEC_FUNCTION_PARAMETER_POWER_STATES__EPSS_MASK 0x80000000L
+//AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SET__SHIFT 0x0
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_ACT__SHIFT 0x4
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__CLKSTOPOK__SHIFT 0x9
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SETTINGS_RESET__SHIFT 0xa
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SET_MASK 0x0000000FL
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_ACT_MASK 0x000000F0L
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__CLKSTOPOK_MASK 0x00000200L
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_POWER_STATE__POWER_STATE_SETTINGS_RESET_MASK 0x00000400L
+//AZALIA_F0_CODEC_FUNCTION_CONTROL_RESET
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESET__CODEC_RESET__SHIFT 0x0
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESET__CODEC_RESET_MASK 0x00000001L
+//AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE0__SHIFT 0x0
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE1__SHIFT 0x8
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE2__SHIFT 0x10
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE3__SHIFT 0x18
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE0_MASK 0x000000FFL
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE1_MASK 0x0000FF00L
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE2_MASK 0x00FF0000L
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_RESPONSE_SUBSYSTEM_ID__SUBSYSTEM_ID_BYTE3_MASK 0xFF000000L
+//AZALIA_F0_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION__CONVERTER_SYNCHRONIZATION__SHIFT 0x0
+#define AZALIA_F0_CODEC_FUNCTION_CONTROL_CONVERTER_SYNCHRONIZATION__CONVERTER_SYNCHRONIZATION_MASK 0x000000FFL
+//CC_RCU_DC_AUDIO_PORT_CONNECTIVITY
+#define CC_RCU_DC_AUDIO_PORT_CONNECTIVITY__PORT_CONNECTIVITY__SHIFT 0x0
+#define CC_RCU_DC_AUDIO_PORT_CONNECTIVITY__PORT_CONNECTIVITY_OVERRIDE_ENABLE__SHIFT 0x4
+#define CC_RCU_DC_AUDIO_PORT_CONNECTIVITY__PORT_CONNECTIVITY_MASK 0x00000007L
+#define CC_RCU_DC_AUDIO_PORT_CONNECTIVITY__PORT_CONNECTIVITY_OVERRIDE_ENABLE_MASK 0x00000010L
+//CC_RCU_DC_AUDIO_INPUT_PORT_CONNECTIVITY
+#define CC_RCU_DC_AUDIO_INPUT_PORT_CONNECTIVITY__INPUT_PORT_CONNECTIVITY__SHIFT 0x0
+#define CC_RCU_DC_AUDIO_INPUT_PORT_CONNECTIVITY__INPUT_PORT_CONNECTIVITY_OVERRIDE_ENABLE__SHIFT 0x4
+#define CC_RCU_DC_AUDIO_INPUT_PORT_CONNECTIVITY__INPUT_PORT_CONNECTIVITY_MASK 0x00000007L
+#define CC_RCU_DC_AUDIO_INPUT_PORT_CONNECTIVITY__INPUT_PORT_CONNECTIVITY_OVERRIDE_ENABLE_MASK 0x00000010L
+//REG_DC_AUDIO_PORT_CONNECTIVITY
+#define REG_DC_AUDIO_PORT_CONNECTIVITY__REG_PORT_CONNECTIVITY__SHIFT 0x0
+#define REG_DC_AUDIO_PORT_CONNECTIVITY__REG_PORT_CONNECTIVITY_OVERRIDE_ENABLE__SHIFT 0x4
+#define REG_DC_AUDIO_PORT_CONNECTIVITY__REG_PORT_CONNECTIVITY_MASK 0x00000007L
+#define REG_DC_AUDIO_PORT_CONNECTIVITY__REG_PORT_CONNECTIVITY_OVERRIDE_ENABLE_MASK 0x00000010L
+//REG_DC_AUDIO_INPUT_PORT_CONNECTIVITY
+#define REG_DC_AUDIO_INPUT_PORT_CONNECTIVITY__REG_INPUT_PORT_CONNECTIVITY__SHIFT 0x0
+#define REG_DC_AUDIO_INPUT_PORT_CONNECTIVITY__REG_INPUT_PORT_CONNECTIVITY_OVERRIDE_ENABLE__SHIFT 0x4
+#define REG_DC_AUDIO_INPUT_PORT_CONNECTIVITY__REG_INPUT_PORT_CONNECTIVITY_MASK 0x00000007L
+#define REG_DC_AUDIO_INPUT_PORT_CONNECTIVITY__REG_INPUT_PORT_CONNECTIVITY_OVERRIDE_ENABLE_MASK 0x00000010L
+
+// addressBlock: dce_dc_hda_azf0inputendpoint0_dispdec
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX_MASK 0x00003FFFL
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0inputendpoint1_dispdec
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX_MASK 0x00003FFFL
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_hda_azf0inputendpoint2_dispdec
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_ENDPOINT_INDEX__AZALIA_INPUT_ENDPOINT_REG_INDEX_MASK 0x00003FFFL
+//AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA__SHIFT 0x0
+#define AZF0INPUTENDPOINT2_AZALIA_F0_CODEC_INPUT_ENDPOINT_DATA__AZALIA_INPUT_ENDPOINT_REG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dchubbub_hubbub_sdpif_dispdec
+//DCHUBBUB_SDPIF_CFG0
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_NO_OUTSTANDING_REQ__SHIFT 0x0
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_PORT_STATUS__SHIFT 0x1
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_DATA_RESPONSE_STATUS__SHIFT 0x3
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_RESPONSE_STATUS__SHIFT 0x6
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_REQ_CREDIT_ERROR__SHIFT 0xa
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_RESPONSE_STATUS_CLEAR__SHIFT 0xb
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_REQ_CREDIT_ERROR_CLEAR__SHIFT 0xc
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_FLUSH_REQ_CREDIT_EN__SHIFT 0xd
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_REQ_CREDIT_EN__SHIFT 0xe
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_PORT_CONTROL__SHIFT 0xf
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_CREDIT_DISCONNECT_DELAY__SHIFT 0x19
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_NO_OUTSTANDING_REQ_MASK 0x00000001L
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_PORT_STATUS_MASK 0x00000006L
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_DATA_RESPONSE_STATUS_MASK 0x00000038L
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_RESPONSE_STATUS_MASK 0x000003C0L
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_REQ_CREDIT_ERROR_MASK 0x00000400L
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_RESPONSE_STATUS_CLEAR_MASK 0x00000800L
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_REQ_CREDIT_ERROR_CLEAR_MASK 0x00001000L
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_FLUSH_REQ_CREDIT_EN_MASK 0x00002000L
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_REQ_CREDIT_EN_MASK 0x00004000L
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_PORT_CONTROL_MASK 0x00008000L
+#define DCHUBBUB_SDPIF_CFG0__SDPIF_CREDIT_DISCONNECT_DELAY_MASK 0x7E000000L
+//DCHUBBUB_SDPIF_PIPE_SEC_LVL
+#define DCHUBBUB_SDPIF_PIPE_SEC_LVL__SDPIF_PIPE0_SEC_LVL__SHIFT 0x0
+#define DCHUBBUB_SDPIF_PIPE_SEC_LVL__SDPIF_PIPE1_SEC_LVL__SHIFT 0x3
+#define DCHUBBUB_SDPIF_PIPE_SEC_LVL__SDPIF_PIPE2_SEC_LVL__SHIFT 0x6
+#define DCHUBBUB_SDPIF_PIPE_SEC_LVL__SDPIF_PIPE3_SEC_LVL__SHIFT 0x9
+#define DCHUBBUB_SDPIF_PIPE_SEC_LVL__SDPIF_PIPE0_SEC_LVL_MASK 0x00000007L
+#define DCHUBBUB_SDPIF_PIPE_SEC_LVL__SDPIF_PIPE1_SEC_LVL_MASK 0x00000038L
+#define DCHUBBUB_SDPIF_PIPE_SEC_LVL__SDPIF_PIPE2_SEC_LVL_MASK 0x000001C0L
+#define DCHUBBUB_SDPIF_PIPE_SEC_LVL__SDPIF_PIPE3_SEC_LVL_MASK 0x00000E00L
+//DCHUBBUB_SDPIF_PIPE_DMDATA_SEC_LVL
+#define DCHUBBUB_SDPIF_PIPE_DMDATA_SEC_LVL__SDPIF_PIPE0_DMDATA_SEC_LVL__SHIFT 0x0
+#define DCHUBBUB_SDPIF_PIPE_DMDATA_SEC_LVL__SDPIF_PIPE1_DMDATA_SEC_LVL__SHIFT 0x3
+#define DCHUBBUB_SDPIF_PIPE_DMDATA_SEC_LVL__SDPIF_PIPE2_DMDATA_SEC_LVL__SHIFT 0x6
+#define DCHUBBUB_SDPIF_PIPE_DMDATA_SEC_LVL__SDPIF_PIPE3_DMDATA_SEC_LVL__SHIFT 0x9
+#define DCHUBBUB_SDPIF_PIPE_DMDATA_SEC_LVL__SDPIF_PIPE0_DMDATA_SEC_LVL_MASK 0x00000007L
+#define DCHUBBUB_SDPIF_PIPE_DMDATA_SEC_LVL__SDPIF_PIPE1_DMDATA_SEC_LVL_MASK 0x00000038L
+#define DCHUBBUB_SDPIF_PIPE_DMDATA_SEC_LVL__SDPIF_PIPE2_DMDATA_SEC_LVL_MASK 0x000001C0L
+#define DCHUBBUB_SDPIF_PIPE_DMDATA_SEC_LVL__SDPIF_PIPE3_DMDATA_SEC_LVL_MASK 0x00000E00L
+//DCHUBBUB_SDPIF_MEM_PWR_CTRL
+#define DCHUBBUB_SDPIF_MEM_PWR_CTRL__DCHUBBUB_SDPIF_MEM_PWR_FORCE__SHIFT 0x0
+#define DCHUBBUB_SDPIF_MEM_PWR_CTRL__DCHUBBUB_SDPIF_MEM_PWR_DIS__SHIFT 0x2
+#define DCHUBBUB_SDPIF_MEM_PWR_CTRL__DCHUBBUB_SDPIF_MEM_PWR_FORCE_MASK 0x00000003L
+#define DCHUBBUB_SDPIF_MEM_PWR_CTRL__DCHUBBUB_SDPIF_MEM_PWR_DIS_MASK 0x00000004L
+//DCHUBBUB_SDPIF_MEM_PWR_STATUS
+#define DCHUBBUB_SDPIF_MEM_PWR_STATUS__DCHUBBUB_SDPIF_MEM_PWR_STATE__SHIFT 0x0
+#define DCHUBBUB_SDPIF_MEM_PWR_STATUS__DCHUBBUB_SDPIF_MEM_PWR_STATE_MASK 0x00000003L
+//DCHUBBUB_SDPIF_CFG1
+#define DCHUBBUB_SDPIF_CFG1__SDPIF_PRQ_ERROR_DETECT_EN__SHIFT 0x0
+#define DCHUBBUB_SDPIF_CFG1__SDPIF_PRQ_ERROR_STATUS__SHIFT 0x1
+#define DCHUBBUB_SDPIF_CFG1__SDPIF_PRQ_ERROR_STATUS_CLEAR__SHIFT 0x2
+#define DCHUBBUB_SDPIF_CFG1__SDPIF_FORCE_SNOOP__SHIFT 0x8
+#define DCHUBBUB_SDPIF_CFG1__SDPIF_PRQ_ERROR_DETECT_EN_MASK 0x00000001L
+#define DCHUBBUB_SDPIF_CFG1__SDPIF_PRQ_ERROR_STATUS_MASK 0x00000002L
+#define DCHUBBUB_SDPIF_CFG1__SDPIF_PRQ_ERROR_STATUS_CLEAR_MASK 0x00000004L
+#define DCHUBBUB_SDPIF_CFG1__SDPIF_FORCE_SNOOP_MASK 0x00000100L
+
+
+// addressBlock: dce_dc_dchubbub_hubbub_ret_path_dispdec
+//DCHUBBUB_RET_PATH_DCC_CFG
+#define DCHUBBUB_RET_PATH_DCC_CFG__DCC_VIDEO_FORMAT_EN__SHIFT 0x0
+#define DCHUBBUB_RET_PATH_DCC_CFG__DCC_VIDEO_FORMAT_EN_MASK 0x00000001L
+//DCHUBBUB_RET_PATH_DCC_CFG0_0
+#define DCHUBBUB_RET_PATH_DCC_CFG0_0__DCC_CFG0_CONSTANT_0__SHIFT 0x0
+#define DCHUBBUB_RET_PATH_DCC_CFG0_0__DCC_CFG0_CONSTANT_0_MASK 0xFFFFFFFFL
+//DCHUBBUB_RET_PATH_DCC_CFG0_1
+#define DCHUBBUB_RET_PATH_DCC_CFG0_1__DCC_CFG0_CONSTANT_1__SHIFT 0x0
+#define DCHUBBUB_RET_PATH_DCC_CFG0_1__DCC_CFG0_CONSTANT_1_MASK 0xFFFFFFFFL
+//DCHUBBUB_RET_PATH_DCC_CFG1_0
+#define DCHUBBUB_RET_PATH_DCC_CFG1_0__DCC_CFG1_CONSTANT_0__SHIFT 0x0
+#define DCHUBBUB_RET_PATH_DCC_CFG1_0__DCC_CFG1_CONSTANT_0_MASK 0xFFFFFFFFL
+//DCHUBBUB_RET_PATH_DCC_CFG1_1
+#define DCHUBBUB_RET_PATH_DCC_CFG1_1__DCC_CFG1_CONSTANT_1__SHIFT 0x0
+#define DCHUBBUB_RET_PATH_DCC_CFG1_1__DCC_CFG1_CONSTANT_1_MASK 0xFFFFFFFFL
+//DCHUBBUB_RET_PATH_DCC_CFG2_0
+#define DCHUBBUB_RET_PATH_DCC_CFG2_0__DCC_CFG2_CONSTANT_0__SHIFT 0x0
+#define DCHUBBUB_RET_PATH_DCC_CFG2_0__DCC_CFG2_CONSTANT_0_MASK 0xFFFFFFFFL
+//DCHUBBUB_RET_PATH_DCC_CFG2_1
+#define DCHUBBUB_RET_PATH_DCC_CFG2_1__DCC_CFG2_CONSTANT_1__SHIFT 0x0
+#define DCHUBBUB_RET_PATH_DCC_CFG2_1__DCC_CFG2_CONSTANT_1_MASK 0xFFFFFFFFL
+//DCHUBBUB_RET_PATH_DCC_CFG3_0
+#define DCHUBBUB_RET_PATH_DCC_CFG3_0__DCC_CFG3_CONSTANT_0__SHIFT 0x0
+#define DCHUBBUB_RET_PATH_DCC_CFG3_0__DCC_CFG3_CONSTANT_0_MASK 0xFFFFFFFFL
+//DCHUBBUB_RET_PATH_DCC_CFG3_1
+#define DCHUBBUB_RET_PATH_DCC_CFG3_1__DCC_CFG3_CONSTANT_1__SHIFT 0x0
+#define DCHUBBUB_RET_PATH_DCC_CFG3_1__DCC_CFG3_CONSTANT_1_MASK 0xFFFFFFFFL
+//DCHUBBUB_RET_PATH_DCC_CFG4_0
+#define DCHUBBUB_RET_PATH_DCC_CFG4_0__DCC_CFG4_CONSTANT_0__SHIFT 0x0
+#define DCHUBBUB_RET_PATH_DCC_CFG4_0__DCC_CFG4_CONSTANT_0_MASK 0xFFFFFFFFL
+//DCHUBBUB_RET_PATH_DCC_CFG4_1
+#define DCHUBBUB_RET_PATH_DCC_CFG4_1__DCC_CFG4_CONSTANT_1__SHIFT 0x0
+#define DCHUBBUB_RET_PATH_DCC_CFG4_1__DCC_CFG4_CONSTANT_1_MASK 0xFFFFFFFFL
+//DCHUBBUB_RET_PATH_DCC_CFG5_0
+#define DCHUBBUB_RET_PATH_DCC_CFG5_0__DCC_CFG5_CONSTANT_0__SHIFT 0x0
+#define DCHUBBUB_RET_PATH_DCC_CFG5_0__DCC_CFG5_CONSTANT_0_MASK 0xFFFFFFFFL
+//DCHUBBUB_RET_PATH_DCC_CFG5_1
+#define DCHUBBUB_RET_PATH_DCC_CFG5_1__DCC_CFG5_CONSTANT_1__SHIFT 0x0
+#define DCHUBBUB_RET_PATH_DCC_CFG5_1__DCC_CFG5_CONSTANT_1_MASK 0xFFFFFFFFL
+//DCHUBBUB_RET_PATH_DCC_CFG6_0
+#define DCHUBBUB_RET_PATH_DCC_CFG6_0__DCC_CFG6_CONSTANT_0__SHIFT 0x0
+#define DCHUBBUB_RET_PATH_DCC_CFG6_0__DCC_CFG6_CONSTANT_0_MASK 0xFFFFFFFFL
+//DCHUBBUB_RET_PATH_DCC_CFG6_1
+#define DCHUBBUB_RET_PATH_DCC_CFG6_1__DCC_CFG6_CONSTANT_1__SHIFT 0x0
+#define DCHUBBUB_RET_PATH_DCC_CFG6_1__DCC_CFG6_CONSTANT_1_MASK 0xFFFFFFFFL
+//DCHUBBUB_RET_PATH_DCC_CFG7_0
+#define DCHUBBUB_RET_PATH_DCC_CFG7_0__DCC_CFG7_CONSTANT_0__SHIFT 0x0
+#define DCHUBBUB_RET_PATH_DCC_CFG7_0__DCC_CFG7_CONSTANT_0_MASK 0xFFFFFFFFL
+//DCHUBBUB_RET_PATH_DCC_CFG7_1
+#define DCHUBBUB_RET_PATH_DCC_CFG7_1__DCC_CFG7_CONSTANT_1__SHIFT 0x0
+#define DCHUBBUB_RET_PATH_DCC_CFG7_1__DCC_CFG7_CONSTANT_1_MASK 0xFFFFFFFFL
+//DCHUBBUB_RET_PATH_MEM_PWR_CTRL
+#define DCHUBBUB_RET_PATH_MEM_PWR_CTRL__DCHUBBUB_RET_PATH_MEM_PWR_FORCE__SHIFT 0x0
+#define DCHUBBUB_RET_PATH_MEM_PWR_CTRL__DCHUBBUB_RET_PATH_MEM_PWR_DIS__SHIFT 0x2
+#define DCHUBBUB_RET_PATH_MEM_PWR_CTRL__DCHUBBUB_RET_PATH_MEM_PWR_FORCE_MASK 0x00000003L
+#define DCHUBBUB_RET_PATH_MEM_PWR_CTRL__DCHUBBUB_RET_PATH_MEM_PWR_DIS_MASK 0x00000004L
+//DCHUBBUB_RET_PATH_MEM_PWR_STATUS
+#define DCHUBBUB_RET_PATH_MEM_PWR_STATUS__DCHUBBUB_RET_PATH_MEM_PWR_STATE__SHIFT 0x0
+#define DCHUBBUB_RET_PATH_MEM_PWR_STATUS__DCHUBBUB_RET_PATH_MEM_PWR_STATE_MASK 0x00000003L
+
+// addressBlock: dce_dc_dchubbub_hubbub_dispdec
+//DCHUBBUB_ARB_DF_REQ_OUTSTAND
+#define DCHUBBUB_ARB_DF_REQ_OUTSTAND__DCHUBBUB_ARB_MAX_REQ_OUTSTAND__SHIFT 0x0
+#define DCHUBBUB_ARB_DF_REQ_OUTSTAND__DCHUBBUB_ARB_MIN_REQ_OUTSTAND__SHIFT 0x10
+#define DCHUBBUB_ARB_DF_REQ_OUTSTAND__DCHUBBUB_ARB_MAX_REQ_OUTSTAND_MASK 0x000001FFL
+#define DCHUBBUB_ARB_DF_REQ_OUTSTAND__DCHUBBUB_ARB_MIN_REQ_OUTSTAND_MASK 0x01FF0000L
+//DCHUBBUB_ARB_SAT_LEVEL
+#define DCHUBBUB_ARB_SAT_LEVEL__DCHUBBUB_ARB_SAT_LEVEL__SHIFT 0x0
+#define DCHUBBUB_ARB_SAT_LEVEL__DCHUBBUB_ARB_SAT_LEVEL_MASK 0xFFFFFFFFL
+//DCHUBBUB_ARB_QOS_FORCE
+#define DCHUBBUB_ARB_QOS_FORCE__DCHUBBUB_ARB_QOS_FORCE_VALUE__SHIFT 0x0
+#define DCHUBBUB_ARB_QOS_FORCE__DCHUBBUB_ARB_QOS_FORCE_ENABLE__SHIFT 0x8
+#define DCHUBBUB_ARB_QOS_FORCE__DCHUBBUB_ARB_QOS_FORCE_VALUE_MASK 0x0000000FL
+#define DCHUBBUB_ARB_QOS_FORCE__DCHUBBUB_ARB_QOS_FORCE_ENABLE_MASK 0x00000100L
+//DCHUBBUB_ARB_DRAM_STATE_CNTL
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_VALUE__SHIFT 0x0
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE__SHIFT 0x1
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE__SHIFT 0x4
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE__SHIFT 0x5
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_DO_NOT_FORCE_URGENCY_AND_SELF_REFRESH_DURING_DRAM_CLOCK_NBPSTATE_CHANGE_REQUEST__SHIFT 0x8
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_FORCE_URGENCY_DURING_DRAM_CLOCK_NBPSTATE_CHANGE_REQUEST_REGARDLESS_OF_ALLOW_SIGNAL__SHIFT 0x9
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_VALUE_MASK 0x00000001L
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE_MASK 0x00000002L
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_VALUE_MASK 0x00000010L
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_ALLOW_PSTATE_CHANGE_FORCE_ENABLE_MASK 0x00000020L
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_DO_NOT_FORCE_URGENCY_AND_SELF_REFRESH_DURING_DRAM_CLOCK_NBPSTATE_CHANGE_REQUEST_MASK 0x00000100L
+#define DCHUBBUB_ARB_DRAM_STATE_CNTL__DCHUBBUB_ARB_FORCE_URGENCY_DURING_DRAM_CLOCK_NBPSTATE_CHANGE_REQUEST_REGARDLESS_OF_ALLOW_SIGNAL_MASK 0x00000200L
+//DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A
+#define DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A__DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A__SHIFT 0x0
+#define DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A__DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A_MASK 0x001FFFFFL
+//DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A
+#define DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A__DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A__SHIFT 0x0
+#define DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A__DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_A_MASK 0x001FFFFFL
+//DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A
+#define DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A__DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A__SHIFT 0x0
+#define DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A__DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_A_MASK 0x001FFFFFL
+//DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B
+#define DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B__DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B__SHIFT 0x0
+#define DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B__DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_B_MASK 0x001FFFFFL
+//DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B
+#define DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B__DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B__SHIFT 0x0
+#define DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B__DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_B_MASK 0x001FFFFFL
+//DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B
+#define DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B__DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B__SHIFT 0x0
+#define DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B__DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_B_MASK 0x001FFFFFL
+//DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C
+#define DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C__DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C__SHIFT 0x0
+#define DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C__DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_C_MASK 0x001FFFFFL
+//DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C
+#define DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C__DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C__SHIFT 0x0
+#define DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C__DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_C_MASK 0x001FFFFFL
+//DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C
+#define DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C__DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C__SHIFT 0x0
+#define DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C__DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_C_MASK 0x001FFFFFL
+//DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D
+#define DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D__DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D__SHIFT 0x0
+#define DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D__DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_D_MASK 0x001FFFFFL
+//DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D
+#define DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D__DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D__SHIFT 0x0
+#define DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D__DCHUBBUB_ARB_PTE_META_URGENCY_WATERMARK_D_MASK 0x001FFFFFL
+//DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D
+#define DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D__DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D__SHIFT 0x0
+#define DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D__DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D_MASK 0x001FFFFFL
+//DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL
+#define DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL__DCHUBBUB_ARB_WATERMARK_CHANGE_SELECT__SHIFT 0x0
+#define DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL__DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_DISABLE__SHIFT 0x4
+#define DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL__DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_STATUS__SHIFT 0x5
+#define DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL__DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_ACK__SHIFT 0x6
+#define DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL__DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST__SHIFT 0x8
+#define DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL__DCHUBBUB_ARB_WATERMARK_CHANGE_SELECT_MASK 0x00000003L
+#define DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL__DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_DISABLE_MASK 0x00000010L
+#define DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL__DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_STATUS_MASK 0x00000020L
+#define DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL__DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_ACK_MASK 0x00000040L
+#define DCHUBBUB_ARB_WATERMARK_CHANGE_CNTL__DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST_MASK 0x00000100L
+//DCHUBBUB_ARB_TIMEOUT_ENABLE
+#define DCHUBBUB_ARB_TIMEOUT_ENABLE__DCHUBBUB_ARB_TIMEOUT_ENABLE__SHIFT 0x0
+#define DCHUBBUB_ARB_TIMEOUT_ENABLE__DCHUBBUB_ARB_TIMEOUT_ENABLE_MASK 0x00000001L
+//DCHUBBUB_GLOBAL_TIMER_CNTL
+#define DCHUBBUB_GLOBAL_TIMER_CNTL__DCHUBBUB_GLOBAL_TIMER_REFDIV__SHIFT 0x0
+#define DCHUBBUB_GLOBAL_TIMER_CNTL__DCHUBBUB_GLOBAL_TIMER_ENABLE__SHIFT 0xc
+#define DCHUBBUB_GLOBAL_TIMER_CNTL__DCHUBBUB_GLOBAL_TIMER_INIT__SHIFT 0x10
+#define DCHUBBUB_GLOBAL_TIMER_CNTL__DCHUBBUB_GLOBAL_TIMER_REFDIV_MASK 0x0000000FL
+#define DCHUBBUB_GLOBAL_TIMER_CNTL__DCHUBBUB_GLOBAL_TIMER_ENABLE_MASK 0x00001000L
+#define DCHUBBUB_GLOBAL_TIMER_CNTL__DCHUBBUB_GLOBAL_TIMER_INIT_MASK 0xFFFF0000L
+//SURFACE_CHECK0_ADDRESS_LSB
+#define SURFACE_CHECK0_ADDRESS_LSB__SURFACE_CHECK0_ADDRESS_LSB__SHIFT 0x0
+#define SURFACE_CHECK0_ADDRESS_LSB__SURFACE_CHECK0_ADDRESS_LSB_MASK 0xFFFFFFFFL
+//SURFACE_CHECK0_ADDRESS_MSB
+#define SURFACE_CHECK0_ADDRESS_MSB__SURFACE_CHECK0_ADDRESS_MSB__SHIFT 0x0
+#define SURFACE_CHECK0_ADDRESS_MSB__CHECKER0_SURFACE_INUSE__SHIFT 0x1f
+#define SURFACE_CHECK0_ADDRESS_MSB__SURFACE_CHECK0_ADDRESS_MSB_MASK 0x0000FFFFL
+#define SURFACE_CHECK0_ADDRESS_MSB__CHECKER0_SURFACE_INUSE_MASK 0x80000000L
+//SURFACE_CHECK1_ADDRESS_LSB
+#define SURFACE_CHECK1_ADDRESS_LSB__SURFACE_CHECK1_ADDRESS_LSB__SHIFT 0x0
+#define SURFACE_CHECK1_ADDRESS_LSB__SURFACE_CHECK1_ADDRESS_LSB_MASK 0xFFFFFFFFL
+//SURFACE_CHECK1_ADDRESS_MSB
+#define SURFACE_CHECK1_ADDRESS_MSB__SURFACE_CHECK1_ADDRESS_MSB__SHIFT 0x0
+#define SURFACE_CHECK1_ADDRESS_MSB__CHECKER1_SURFACE_INUSE__SHIFT 0x1f
+#define SURFACE_CHECK1_ADDRESS_MSB__SURFACE_CHECK1_ADDRESS_MSB_MASK 0x0000FFFFL
+#define SURFACE_CHECK1_ADDRESS_MSB__CHECKER1_SURFACE_INUSE_MASK 0x80000000L
+//SURFACE_CHECK2_ADDRESS_LSB
+#define SURFACE_CHECK2_ADDRESS_LSB__SURFACE_CHECK2_ADDRESS_LSB__SHIFT 0x0
+#define SURFACE_CHECK2_ADDRESS_LSB__SURFACE_CHECK2_ADDRESS_LSB_MASK 0xFFFFFFFFL
+//SURFACE_CHECK2_ADDRESS_MSB
+#define SURFACE_CHECK2_ADDRESS_MSB__SURFACE_CHECK2_ADDRESS_MSB__SHIFT 0x0
+#define SURFACE_CHECK2_ADDRESS_MSB__CHECKER2_SURFACE_INUSE__SHIFT 0x1f
+#define SURFACE_CHECK2_ADDRESS_MSB__SURFACE_CHECK2_ADDRESS_MSB_MASK 0x0000FFFFL
+#define SURFACE_CHECK2_ADDRESS_MSB__CHECKER2_SURFACE_INUSE_MASK 0x80000000L
+//SURFACE_CHECK3_ADDRESS_LSB
+#define SURFACE_CHECK3_ADDRESS_LSB__SURFACE_CHECK3_ADDRESS_LSB__SHIFT 0x0
+#define SURFACE_CHECK3_ADDRESS_LSB__SURFACE_CHECK3_ADDRESS_LSB_MASK 0xFFFFFFFFL
+//SURFACE_CHECK3_ADDRESS_MSB
+#define SURFACE_CHECK3_ADDRESS_MSB__SURFACE_CHECK3_ADDRESS_MSB__SHIFT 0x0
+#define SURFACE_CHECK3_ADDRESS_MSB__CHECKER3_SURFACE_INUSE__SHIFT 0x1f
+#define SURFACE_CHECK3_ADDRESS_MSB__SURFACE_CHECK3_ADDRESS_MSB_MASK 0x0000FFFFL
+#define SURFACE_CHECK3_ADDRESS_MSB__CHECKER3_SURFACE_INUSE_MASK 0x80000000L
+//VTG0_CONTROL
+#define VTG0_CONTROL__VTG0_FP2__SHIFT 0x0
+#define VTG0_CONTROL__VTG0_VCOUNT_INIT__SHIFT 0x10
+#define VTG0_CONTROL__VTG0_ENABLE__SHIFT 0x1f
+#define VTG0_CONTROL__VTG0_FP2_MASK 0x00007FFFL
+#define VTG0_CONTROL__VTG0_VCOUNT_INIT_MASK 0x7FFF0000L
+#define VTG0_CONTROL__VTG0_ENABLE_MASK 0x80000000L
+//VTG1_CONTROL
+#define VTG1_CONTROL__VTG1_FP2__SHIFT 0x0
+#define VTG1_CONTROL__VTG1_VCOUNT_INIT__SHIFT 0x10
+#define VTG1_CONTROL__VTG1_ENABLE__SHIFT 0x1f
+#define VTG1_CONTROL__VTG1_FP2_MASK 0x00007FFFL
+#define VTG1_CONTROL__VTG1_VCOUNT_INIT_MASK 0x7FFF0000L
+#define VTG1_CONTROL__VTG1_ENABLE_MASK 0x80000000L
+//DCHUBBUB_SOFT_RESET
+#define DCHUBBUB_SOFT_RESET__DCHUBBUB_GLOBAL_SOFT_RESET__SHIFT 0x0
+#define DCHUBBUB_SOFT_RESET__ALLOW_CSTATE_SOFT_RESET__SHIFT 0x1
+#define DCHUBBUB_SOFT_RESET__GLBFLIP_SOFT_RESET__SHIFT 0x4
+#define DCHUBBUB_SOFT_RESET__DCHUBBUB_GLOBAL_SOFT_RESET_MASK 0x00000001L
+#define DCHUBBUB_SOFT_RESET__ALLOW_CSTATE_SOFT_RESET_MASK 0x00000002L
+#define DCHUBBUB_SOFT_RESET__GLBFLIP_SOFT_RESET_MASK 0x00000010L
+//DCHUBBUB_CLOCK_CNTL
+#define DCHUBBUB_CLOCK_CNTL__DISPCLK_R_DCHUBBUB_GATE_DIS__SHIFT 0x5
+#define DCHUBBUB_CLOCK_CNTL__DCFCLK_R_DCHUBBUB_GATE_DIS__SHIFT 0x6
+#define DCHUBBUB_CLOCK_CNTL__DISPCLK_R_DCHUBBUB_GATE_DIS_MASK 0x00000020L
+#define DCHUBBUB_CLOCK_CNTL__DCFCLK_R_DCHUBBUB_GATE_DIS_MASK 0x00000040L
+//DCFCLK_CNTL
+#define DCFCLK_CNTL__DCFCLK_TURN_ON_DELAY__SHIFT 0x0
+#define DCFCLK_CNTL__DCFCLK_TURN_OFF_DELAY__SHIFT 0x4
+#define DCFCLK_CNTL__DCFCLK_GATE_DIS__SHIFT 0x1f
+#define DCFCLK_CNTL__DCFCLK_TURN_ON_DELAY_MASK 0x0000000FL
+#define DCFCLK_CNTL__DCFCLK_TURN_OFF_DELAY_MASK 0x00000FF0L
+#define DCFCLK_CNTL__DCFCLK_GATE_DIS_MASK 0x80000000L
+//DCHUBBUB_VLINE_SNAPSHOT
+#define DCHUBBUB_VLINE_SNAPSHOT__DCHUBBUB_VLINE_SNAPSHOT__SHIFT 0x0
+#define DCHUBBUB_VLINE_SNAPSHOT__DCHUBBUB_VLINE_SNAPSHOT_MASK 0x00000001L
+//DCHUBBUB_CTRL_STATUS
+#define DCHUBBUB_CTRL_STATUS__URGENT_ZERO_SIZE_REQ_EN__SHIFT 0x0
+#define DCHUBBUB_CTRL_STATUS__URGENT_ZERO_SIZE_REQ_EN_MASK 0x00000001L
+//DCHUBBUB_TIMEOUT_DETECTION_CTRL1
+#define DCHUBBUB_TIMEOUT_DETECTION_CTRL1__DCHUBBUB_TIMEOUT_ERROR_STATUS__SHIFT 0x0
+#define DCHUBBUB_TIMEOUT_DETECTION_CTRL1__DCHUBBUB_TIMEOUT_REQ_STALL_THRESHOLD__SHIFT 0x6
+#define DCHUBBUB_TIMEOUT_DETECTION_CTRL1__DCHUBBUB_TIMEOUT_ERROR_STATUS_MASK 0x0000003FL
+#define DCHUBBUB_TIMEOUT_DETECTION_CTRL1__DCHUBBUB_TIMEOUT_REQ_STALL_THRESHOLD_MASK 0xFFFFFFC0L
+//DCHUBBUB_TIMEOUT_DETECTION_CTRL2
+#define DCHUBBUB_TIMEOUT_DETECTION_CTRL2__DCHUBBUB_TIMEOUT_PSTATE_STALL_THRESHOLD__SHIFT 0x0
+#define DCHUBBUB_TIMEOUT_DETECTION_CTRL2__DCHUBBUB_TIMEOUT_DETECTION_EN__SHIFT 0x1b
+#define DCHUBBUB_TIMEOUT_DETECTION_CTRL2__DCHUBBUB_TIMEOUT_TIMER_RESET__SHIFT 0x1c
+#define DCHUBBUB_TIMEOUT_DETECTION_CTRL2__DCHUBBUB_TIMEOUT_PSTATE_STALL_THRESHOLD_MASK 0x07FFFFFFL
+#define DCHUBBUB_TIMEOUT_DETECTION_CTRL2__DCHUBBUB_TIMEOUT_DETECTION_EN_MASK 0x08000000L
+#define DCHUBBUB_TIMEOUT_DETECTION_CTRL2__DCHUBBUB_TIMEOUT_TIMER_RESET_MASK 0x10000000L
+//DCHUBBUB_TIMEOUT_INTERRUPT_STATUS
+#define DCHUBBUB_TIMEOUT_INTERRUPT_STATUS__DCHUBBUB_TIMEOUT_INT_ENABLE__SHIFT 0x0
+#define DCHUBBUB_TIMEOUT_INTERRUPT_STATUS__DCHUBBUB_TIMEOUT_INT_STATUS__SHIFT 0x1
+#define DCHUBBUB_TIMEOUT_INTERRUPT_STATUS__DCHUBBUB_TIMEOUT_INT_CLEAR__SHIFT 0x2
+#define DCHUBBUB_TIMEOUT_INTERRUPT_STATUS__DCHUBBUB_TIMEOUT_INT_MASK__SHIFT 0x3
+#define DCHUBBUB_TIMEOUT_INTERRUPT_STATUS__DCHUBBUB_TIMEOUT_INT_ENABLE_MASK 0x00000001L
+#define DCHUBBUB_TIMEOUT_INTERRUPT_STATUS__DCHUBBUB_TIMEOUT_INT_STATUS_MASK 0x00000002L
+#define DCHUBBUB_TIMEOUT_INTERRUPT_STATUS__DCHUBBUB_TIMEOUT_INT_CLEAR_MASK 0x00000004L
+#define DCHUBBUB_TIMEOUT_INTERRUPT_STATUS__DCHUBBUB_TIMEOUT_INT_MASK_MASK 0x000000F8L
+//DCHUBBUB_TEST_DEBUG_INDEX
+#define DCHUBBUB_TEST_DEBUG_INDEX__DCHUBBUB_TEST_DEBUG_INDEX__SHIFT 0x0
+#define DCHUBBUB_TEST_DEBUG_INDEX__DCHUBBUB_TEST_DEBUG_INDEX_MASK 0x000000FFL
+//DCHUBBUB_TEST_DEBUG_DATA
+#define DCHUBBUB_TEST_DEBUG_DATA__DCHUBBUB_TEST_DEBUG_DATA__SHIFT 0x0
+#define DCHUBBUB_TEST_DEBUG_DATA__DCHUBBUB_TEST_DEBUG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dcbubp0_dispdec_hubp_dispdec
+//HUBP0_DCSURF_SURFACE_CONFIG
+#define HUBP0_DCSURF_SURFACE_CONFIG__SURFACE_PIXEL_FORMAT__SHIFT 0x0
+#define HUBP0_DCSURF_SURFACE_CONFIG__ROTATION_ANGLE__SHIFT 0x8
+#define HUBP0_DCSURF_SURFACE_CONFIG__H_MIRROR_EN__SHIFT 0xa
+#define HUBP0_DCSURF_SURFACE_CONFIG__SURFACE_PIXEL_FORMAT_MASK 0x0000007FL
+#define HUBP0_DCSURF_SURFACE_CONFIG__ROTATION_ANGLE_MASK 0x00000300L
+#define HUBP0_DCSURF_SURFACE_CONFIG__H_MIRROR_EN_MASK 0x00000400L
+//HUBP0_DCSURF_ADDR_CONFIG
+#define HUBP0_DCSURF_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0
+#define HUBP0_DCSURF_ADDR_CONFIG__NUM_BANKS__SHIFT 0x3
+#define HUBP0_DCSURF_ADDR_CONFIG__PIPE_INTERLEAVE__SHIFT 0x6
+#define HUBP0_DCSURF_ADDR_CONFIG__NUM_SE__SHIFT 0x8
+#define HUBP0_DCSURF_ADDR_CONFIG__NUM_RB_PER_SE__SHIFT 0xa
+#define HUBP0_DCSURF_ADDR_CONFIG__MAX_COMPRESSED_FRAGS__SHIFT 0xc
+#define HUBP0_DCSURF_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
+#define HUBP0_DCSURF_ADDR_CONFIG__NUM_BANKS_MASK 0x00000038L
+#define HUBP0_DCSURF_ADDR_CONFIG__PIPE_INTERLEAVE_MASK 0x000000C0L
+#define HUBP0_DCSURF_ADDR_CONFIG__NUM_SE_MASK 0x00000300L
+#define HUBP0_DCSURF_ADDR_CONFIG__NUM_RB_PER_SE_MASK 0x00000C00L
+#define HUBP0_DCSURF_ADDR_CONFIG__MAX_COMPRESSED_FRAGS_MASK 0x00003000L
+//HUBP0_DCSURF_TILING_CONFIG
+#define HUBP0_DCSURF_TILING_CONFIG__SW_MODE__SHIFT 0x0
+#define HUBP0_DCSURF_TILING_CONFIG__DIM_TYPE__SHIFT 0x7
+#define HUBP0_DCSURF_TILING_CONFIG__META_LINEAR__SHIFT 0x9
+#define HUBP0_DCSURF_TILING_CONFIG__RB_ALIGNED__SHIFT 0xa
+#define HUBP0_DCSURF_TILING_CONFIG__PIPE_ALIGNED__SHIFT 0xb
+#define HUBP0_DCSURF_TILING_CONFIG__SW_MODE_MASK 0x0000001FL
+#define HUBP0_DCSURF_TILING_CONFIG__DIM_TYPE_MASK 0x00000180L
+#define HUBP0_DCSURF_TILING_CONFIG__META_LINEAR_MASK 0x00000200L
+#define HUBP0_DCSURF_TILING_CONFIG__RB_ALIGNED_MASK 0x00000400L
+#define HUBP0_DCSURF_TILING_CONFIG__PIPE_ALIGNED_MASK 0x00000800L
+//HUBP0_DCSURF_PRI_VIEWPORT_START
+#define HUBP0_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_X_START__SHIFT 0x0
+#define HUBP0_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_Y_START__SHIFT 0x10
+#define HUBP0_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_X_START_MASK 0x00003FFFL
+#define HUBP0_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_Y_START_MASK 0x3FFF0000L
+//HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION
+#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT 0x0
+#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT 0x10
+#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK 0x00003FFFL
+#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK 0x3FFF0000L
+//HUBP0_DCSURF_PRI_VIEWPORT_START_C
+#define HUBP0_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_X_START_C__SHIFT 0x0
+#define HUBP0_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_Y_START_C__SHIFT 0x10
+#define HUBP0_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_X_START_C_MASK 0x00003FFFL
+#define HUBP0_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_Y_START_C_MASK 0x3FFF0000L
+//HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C
+#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_WIDTH_C__SHIFT 0x0
+#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_HEIGHT_C__SHIFT 0x10
+#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_WIDTH_C_MASK 0x00003FFFL
+#define HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_HEIGHT_C_MASK 0x3FFF0000L
+//HUBP0_DCSURF_SEC_VIEWPORT_START
+#define HUBP0_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_X_START__SHIFT 0x0
+#define HUBP0_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_Y_START__SHIFT 0x10
+#define HUBP0_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_X_START_MASK 0x00003FFFL
+#define HUBP0_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_Y_START_MASK 0x3FFF0000L
+//HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION
+#define HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_WIDTH__SHIFT 0x0
+#define HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_HEIGHT__SHIFT 0x10
+#define HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_WIDTH_MASK 0x00003FFFL
+#define HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_HEIGHT_MASK 0x3FFF0000L
+//HUBP0_DCSURF_SEC_VIEWPORT_START_C
+#define HUBP0_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_X_START_C__SHIFT 0x0
+#define HUBP0_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_Y_START_C__SHIFT 0x10
+#define HUBP0_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_X_START_C_MASK 0x00003FFFL
+#define HUBP0_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_Y_START_C_MASK 0x3FFF0000L
+//HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_C
+#define HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_WIDTH_C__SHIFT 0x0
+#define HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_HEIGHT_C__SHIFT 0x10
+#define HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_WIDTH_C_MASK 0x00003FFFL
+#define HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_HEIGHT_C_MASK 0x3FFF0000L
+//HUBP0_DCHUBP_REQ_SIZE_CONFIG
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__SWATH_HEIGHT__SHIFT 0x0
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__PTE_ROW_HEIGHT_LINEAR__SHIFT 0x4
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__CHUNK_SIZE__SHIFT 0x8
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__MIN_CHUNK_SIZE__SHIFT 0xb
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__META_CHUNK_SIZE__SHIFT 0x10
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__MIN_META_CHUNK_SIZE__SHIFT 0x12
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__DPTE_GROUP_SIZE__SHIFT 0x14
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__MPTE_GROUP_SIZE__SHIFT 0x18
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__SWATH_HEIGHT_MASK 0x00000007L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__PTE_ROW_HEIGHT_LINEAR_MASK 0x00000070L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__CHUNK_SIZE_MASK 0x00000700L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__MIN_CHUNK_SIZE_MASK 0x00001800L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__META_CHUNK_SIZE_MASK 0x00030000L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__MIN_META_CHUNK_SIZE_MASK 0x000C0000L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__DPTE_GROUP_SIZE_MASK 0x00700000L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG__MPTE_GROUP_SIZE_MASK 0x07000000L
+//HUBP0_DCHUBP_REQ_SIZE_CONFIG_C
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__SWATH_HEIGHT_C__SHIFT 0x0
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__PTE_ROW_HEIGHT_LINEAR_C__SHIFT 0x4
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__CHUNK_SIZE_C__SHIFT 0x8
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__MIN_CHUNK_SIZE_C__SHIFT 0xb
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__META_CHUNK_SIZE_C__SHIFT 0x10
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__MIN_META_CHUNK_SIZE_C__SHIFT 0x12
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__DPTE_GROUP_SIZE_C__SHIFT 0x14
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__MPTE_GROUP_SIZE_C__SHIFT 0x18
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__SWATH_HEIGHT_C_MASK 0x00000007L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__PTE_ROW_HEIGHT_LINEAR_C_MASK 0x00000070L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__CHUNK_SIZE_C_MASK 0x00000700L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__MIN_CHUNK_SIZE_C_MASK 0x00001800L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__META_CHUNK_SIZE_C_MASK 0x00030000L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__MIN_META_CHUNK_SIZE_C_MASK 0x000C0000L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__DPTE_GROUP_SIZE_C_MASK 0x00700000L
+#define HUBP0_DCHUBP_REQ_SIZE_CONFIG_C__MPTE_GROUP_SIZE_C_MASK 0x07000000L
+//HUBP0_DCHUBP_CNTL
+#define HUBP0_DCHUBP_CNTL__HUBP_BLANK_EN__SHIFT 0x0
+#define HUBP0_DCHUBP_CNTL__HUBP_NO_OUTSTANDING_REQ__SHIFT 0x1
+#define HUBP0_DCHUBP_CNTL__HUBP_DISABLE__SHIFT 0x2
+#define HUBP0_DCHUBP_CNTL__HUBP_IN_BLANK__SHIFT 0x3
+#define HUBP0_DCHUBP_CNTL__HUBP_VTG_SEL__SHIFT 0x4
+#define HUBP0_DCHUBP_CNTL__HUBP_VREADY_AT_OR_AFTER_VSYNC__SHIFT 0x8
+#define HUBP0_DCHUBP_CNTL__HUBP_DISABLE_STOP_DATA_DURING_VM__SHIFT 0x9
+#define HUBP0_DCHUBP_CNTL__HUBP_TTU_DISABLE__SHIFT 0xc
+#define HUBP0_DCHUBP_CNTL__HUBP_TTU_MODE__SHIFT 0xd
+#define HUBP0_DCHUBP_CNTL__HUBP_XRQ_NO_OUTSTANDING_REQ__SHIFT 0x10
+#define HUBP0_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS__SHIFT 0x14
+#define HUBP0_DCHUBP_CNTL__HUBP_TIMEOUT_THRESHOLD__SHIFT 0x18
+#define HUBP0_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_CLEAR__SHIFT 0x1a
+#define HUBP0_DCHUBP_CNTL__HUBP_TIMEOUT_INTERRUPT_EN__SHIFT 0x1b
+#define HUBP0_DCHUBP_CNTL__HUBP_UNDERFLOW_STATUS__SHIFT 0x1c
+#define HUBP0_DCHUBP_CNTL__HUBP_UNDERFLOW_CLEAR__SHIFT 0x1f
+#define HUBP0_DCHUBP_CNTL__HUBP_BLANK_EN_MASK 0x00000001L
+#define HUBP0_DCHUBP_CNTL__HUBP_NO_OUTSTANDING_REQ_MASK 0x00000002L
+#define HUBP0_DCHUBP_CNTL__HUBP_DISABLE_MASK 0x00000004L
+#define HUBP0_DCHUBP_CNTL__HUBP_IN_BLANK_MASK 0x00000008L
+#define HUBP0_DCHUBP_CNTL__HUBP_VTG_SEL_MASK 0x000000F0L
+#define HUBP0_DCHUBP_CNTL__HUBP_VREADY_AT_OR_AFTER_VSYNC_MASK 0x00000100L
+#define HUBP0_DCHUBP_CNTL__HUBP_DISABLE_STOP_DATA_DURING_VM_MASK 0x00000200L
+#define HUBP0_DCHUBP_CNTL__HUBP_TTU_DISABLE_MASK 0x00001000L
+#define HUBP0_DCHUBP_CNTL__HUBP_TTU_MODE_MASK 0x0000E000L
+#define HUBP0_DCHUBP_CNTL__HUBP_XRQ_NO_OUTSTANDING_REQ_MASK 0x000F0000L
+#define HUBP0_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_MASK 0x00F00000L
+#define HUBP0_DCHUBP_CNTL__HUBP_TIMEOUT_THRESHOLD_MASK 0x03000000L
+#define HUBP0_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_CLEAR_MASK 0x04000000L
+#define HUBP0_DCHUBP_CNTL__HUBP_TIMEOUT_INTERRUPT_EN_MASK 0x08000000L
+#define HUBP0_DCHUBP_CNTL__HUBP_UNDERFLOW_STATUS_MASK 0x70000000L
+#define HUBP0_DCHUBP_CNTL__HUBP_UNDERFLOW_CLEAR_MASK 0x80000000L
+//HUBP0_HUBP_CLK_CNTL
+#define HUBP0_HUBP_CLK_CNTL__HUBP_CLOCK_ENABLE__SHIFT 0x0
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DISPCLK_R_GATE_DIS__SHIFT 0x4
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DPPCLK_G_GATE_DIS__SHIFT 0x8
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DCFCLK_R_GATE_DIS__SHIFT 0xc
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DCFCLK_G_GATE_DIS__SHIFT 0x10
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DISPCLK_R_CLOCK_ON__SHIFT 0x14
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DPPCLK_G_CLOCK_ON__SHIFT 0x15
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DCFCLK_R_CLOCK_ON__SHIFT 0x16
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DCFCLK_G_CLOCK_ON__SHIFT 0x17
+#define HUBP0_HUBP_CLK_CNTL__HUBP_CLOCK_ENABLE_MASK 0x00000001L
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DISPCLK_R_GATE_DIS_MASK 0x00000010L
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DPPCLK_G_GATE_DIS_MASK 0x00000100L
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DCFCLK_R_GATE_DIS_MASK 0x00001000L
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DCFCLK_G_GATE_DIS_MASK 0x00010000L
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DISPCLK_R_CLOCK_ON_MASK 0x00100000L
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DPPCLK_G_CLOCK_ON_MASK 0x00200000L
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DCFCLK_R_CLOCK_ON_MASK 0x00400000L
+#define HUBP0_HUBP_CLK_CNTL__HUBP_DCFCLK_G_CLOCK_ON_MASK 0x00800000L
+//HUBP0_HUBPREQ_DEBUG_DB
+#define HUBP0_HUBPREQ_DEBUG_DB__HUBPREQ_DEBUG__SHIFT 0x0
+#define HUBP0_HUBPREQ_DEBUG_DB__HUBPREQ_DEBUG_MASK 0xFFFFFFFFL
+
+// addressBlock: dce_dc_dcbubp0_dispdec_hubpreq_dispdec
+//HUBPREQ0_DCSURF_SURFACE_PITCH
+#define HUBPREQ0_DCSURF_SURFACE_PITCH__PITCH__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SURFACE_PITCH__META_PITCH__SHIFT 0x10
+#define HUBPREQ0_DCSURF_SURFACE_PITCH__PITCH_MASK 0x00003FFFL
+#define HUBPREQ0_DCSURF_SURFACE_PITCH__META_PITCH_MASK 0x3FFF0000L
+//HUBPREQ0_DCSURF_SURFACE_PITCH_C
+#define HUBPREQ0_DCSURF_SURFACE_PITCH_C__PITCH_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SURFACE_PITCH_C__META_PITCH_C__SHIFT 0x10
+#define HUBPREQ0_DCSURF_SURFACE_PITCH_C__PITCH_C_MASK 0x00003FFFL
+#define HUBPREQ0_DCSURF_SURFACE_PITCH_C__META_PITCH_C_MASK 0x3FFF0000L
+//HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS
+#define HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS__PRIMARY_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS__PRIMARY_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH
+#define HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH__PRIMARY_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH__PRIMARY_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_C
+#define HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_C__PRIMARY_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_C__PRIMARY_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C__PRIMARY_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C__PRIMARY_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS
+#define HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS__SECONDARY_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS__SECONDARY_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH
+#define HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH__SECONDARY_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH__SECONDARY_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_C
+#define HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_C__SECONDARY_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_C__SECONDARY_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C__SECONDARY_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C__SECONDARY_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS
+#define HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS__PRIMARY_META_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS__PRIMARY_META_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH
+#define HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH__PRIMARY_META_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH__PRIMARY_META_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C
+#define HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C__PRIMARY_META_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C__PRIMARY_META_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C__PRIMARY_META_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C__PRIMARY_META_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS
+#define HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS__SECONDARY_META_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS__SECONDARY_META_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH
+#define HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH__SECONDARY_META_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH__SECONDARY_META_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C
+#define HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C__SECONDARY_META_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C__SECONDARY_META_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SECONDARY_META_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ0_DCSURF_SURFACE_CONTROL
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_EN__SHIFT 0x1
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK__SHIFT 0x2
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_C__SHIFT 0x4
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK_C__SHIFT 0x5
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ__SHIFT 0x8
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_EN__SHIFT 0x9
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK__SHIFT 0xa
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_C__SHIFT 0xc
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK_C__SHIFT 0xd
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ__SHIFT 0x10
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_C__SHIFT 0x11
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ__SHIFT 0x12
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_C__SHIFT 0x13
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_MASK 0x00000001L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_EN_MASK 0x00000002L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK_MASK 0x00000004L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_C_MASK 0x00000010L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK_C_MASK 0x00000020L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_MASK 0x00000100L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_EN_MASK 0x00000200L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK_MASK 0x00000400L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_C_MASK 0x00001000L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK_C_MASK 0x00002000L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_MASK 0x00010000L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_C_MASK 0x00020000L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_MASK 0x00040000L
+#define HUBPREQ0_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_C_MASK 0x00080000L
+//HUBPREQ0_DCSURF_FLIP_CONTROL
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_UPDATE_LOCK__SHIFT 0x0
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_TYPE__SHIFT 0x1
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_VUPDATE_SKIP_NUM__SHIFT 0x4
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING__SHIFT 0x8
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__HUBPREQ_MASTER_UPDATE_LOCK_STATUS__SHIFT 0x9
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_MODE_FOR_STEREOSYNC__SHIFT 0xc
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_IN_STEREOSYNC__SHIFT 0x10
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_DISABLE__SHIFT 0x11
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_POLARITY__SHIFT 0x12
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_DELAY__SHIFT 0x14
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_UPDATE_LOCK_MASK 0x00000001L
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_TYPE_MASK 0x00000002L
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_VUPDATE_SKIP_NUM_MASK 0x000000F0L
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_MASK 0x00000100L
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__HUBPREQ_MASTER_UPDATE_LOCK_STATUS_MASK 0x00000200L
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_MODE_FOR_STEREOSYNC_MASK 0x00003000L
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_IN_STEREOSYNC_MASK 0x00010000L
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_DISABLE_MASK 0x00020000L
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_POLARITY_MASK 0x00040000L
+#define HUBPREQ0_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_DELAY_MASK 0x3FF00000L
+//HUBPREQ0_DCSURF_FLIP_CONTROL2
+#define HUBPREQ0_DCSURF_FLIP_CONTROL2__SURFACE_FLIP_PENDING_MIN_TIME__SHIFT 0x0
+#define HUBPREQ0_DCSURF_FLIP_CONTROL2__SURFACE_GSL_ENABLE__SHIFT 0x8
+#define HUBPREQ0_DCSURF_FLIP_CONTROL2__SURFACE_GSL_MASK__SHIFT 0x9
+#define HUBPREQ0_DCSURF_FLIP_CONTROL2__SURFACE_TRIPLE_BUFFER_ENABLE__SHIFT 0xa
+#define HUBPREQ0_DCSURF_FLIP_CONTROL2__SURFACE_INUSE_RAED_NO_LATCH__SHIFT 0xc
+#define HUBPREQ0_DCSURF_FLIP_CONTROL2__SURFACE_FLIP_PENDING_MIN_TIME_MASK 0x000000FFL
+#define HUBPREQ0_DCSURF_FLIP_CONTROL2__SURFACE_GSL_ENABLE_MASK 0x00000100L
+#define HUBPREQ0_DCSURF_FLIP_CONTROL2__SURFACE_GSL_MASK_MASK 0x00000200L
+#define HUBPREQ0_DCSURF_FLIP_CONTROL2__SURFACE_TRIPLE_BUFFER_ENABLE_MASK 0x00000400L
+#define HUBPREQ0_DCSURF_FLIP_CONTROL2__SURFACE_INUSE_RAED_NO_LATCH_MASK 0x00001000L
+//HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_MASK__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_TYPE__SHIFT 0x1
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_MASK__SHIFT 0x2
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_TYPE__SHIFT 0x3
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_CLEAR__SHIFT 0x8
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_CLEAR__SHIFT 0x9
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_OCCURRED__SHIFT 0x10
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_STATUS__SHIFT 0x11
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_OCCURRED__SHIFT 0x12
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_STATUS__SHIFT 0x13
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_MASK_MASK 0x00000001L
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_TYPE_MASK 0x00000002L
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_MASK_MASK 0x00000004L
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_TYPE_MASK 0x00000008L
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_CLEAR_MASK 0x00000100L
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_CLEAR_MASK 0x00000200L
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_OCCURRED_MASK 0x00010000L
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_STATUS_MASK 0x00020000L
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_OCCURRED_MASK 0x00040000L
+#define HUBPREQ0_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_STATUS_MASK 0x00080000L
+//HUBPREQ0_DCSURF_SURFACE_INUSE
+#define HUBPREQ0_DCSURF_SURFACE_INUSE__SURFACE_INUSE_ADDRESS__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SURFACE_INUSE__SURFACE_INUSE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH
+#define HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ0_DCSURF_SURFACE_INUSE_C
+#define HUBPREQ0_DCSURF_SURFACE_INUSE_C__SURFACE_INUSE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SURFACE_INUSE_C__SURFACE_INUSE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH_C
+#define HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE
+#define HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE__SURFACE_EARLIEST_INUSE_ADDRESS__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE__SURFACE_EARLIEST_INUSE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH
+#define HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_C
+#define HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_C__SURFACE_EARLIEST_INUSE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_C__SURFACE_EARLIEST_INUSE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C
+#define HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ0_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ0_DCN_EXPANSION_MODE
+#define HUBPREQ0_DCN_EXPANSION_MODE__DRQ_EXPANSION_MODE__SHIFT 0x0
+#define HUBPREQ0_DCN_EXPANSION_MODE__CRQ_EXPANSION_MODE__SHIFT 0x2
+#define HUBPREQ0_DCN_EXPANSION_MODE__MRQ_EXPANSION_MODE__SHIFT 0x4
+#define HUBPREQ0_DCN_EXPANSION_MODE__PRQ_EXPANSION_MODE__SHIFT 0x6
+#define HUBPREQ0_DCN_EXPANSION_MODE__DRQ_EXPANSION_MODE_MASK 0x00000003L
+#define HUBPREQ0_DCN_EXPANSION_MODE__CRQ_EXPANSION_MODE_MASK 0x0000000CL
+#define HUBPREQ0_DCN_EXPANSION_MODE__MRQ_EXPANSION_MODE_MASK 0x00000030L
+#define HUBPREQ0_DCN_EXPANSION_MODE__PRQ_EXPANSION_MODE_MASK 0x000000C0L
+//HUBPREQ0_DCN_TTU_QOS_WM
+#define HUBPREQ0_DCN_TTU_QOS_WM__QoS_LEVEL_LOW_WM__SHIFT 0x0
+#define HUBPREQ0_DCN_TTU_QOS_WM__QoS_LEVEL_HIGH_WM__SHIFT 0x10
+#define HUBPREQ0_DCN_TTU_QOS_WM__QoS_LEVEL_LOW_WM_MASK 0x00003FFFL
+#define HUBPREQ0_DCN_TTU_QOS_WM__QoS_LEVEL_HIGH_WM_MASK 0x3FFF0000L
+//HUBPREQ0_DCN_GLOBAL_TTU_CNTL
+#define HUBPREQ0_DCN_GLOBAL_TTU_CNTL__MIN_TTU_VBLANK__SHIFT 0x0
+#define HUBPREQ0_DCN_GLOBAL_TTU_CNTL__QoS_LEVEL_FLIP__SHIFT 0x1c
+#define HUBPREQ0_DCN_GLOBAL_TTU_CNTL__MIN_TTU_VBLANK_MASK 0x00FFFFFFL
+#define HUBPREQ0_DCN_GLOBAL_TTU_CNTL__QoS_LEVEL_FLIP_MASK 0xF0000000L
+//HUBPREQ0_DCN_SURF0_TTU_CNTL0
+#define HUBPREQ0_DCN_SURF0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ0_DCN_SURF0_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ0_DCN_SURF0_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ0_DCN_SURF0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ0_DCN_SURF0_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ0_DCN_SURF0_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ0_DCN_SURF0_TTU_CNTL1
+#define HUBPREQ0_DCN_SURF0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ0_DCN_SURF0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ0_DCN_SURF1_TTU_CNTL0
+#define HUBPREQ0_DCN_SURF1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ0_DCN_SURF1_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ0_DCN_SURF1_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ0_DCN_SURF1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ0_DCN_SURF1_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ0_DCN_SURF1_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ0_DCN_SURF1_TTU_CNTL1
+#define HUBPREQ0_DCN_SURF1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ0_DCN_SURF1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ0_DCN_CUR0_TTU_CNTL0
+#define HUBPREQ0_DCN_CUR0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ0_DCN_CUR0_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ0_DCN_CUR0_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ0_DCN_CUR0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ0_DCN_CUR0_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ0_DCN_CUR0_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ0_DCN_CUR0_TTU_CNTL1
+#define HUBPREQ0_DCN_CUR0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ0_DCN_CUR0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ0_DCN_CUR1_TTU_CNTL0
+#define HUBPREQ0_DCN_CUR1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ0_DCN_CUR1_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ0_DCN_CUR1_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ0_DCN_CUR1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ0_DCN_CUR1_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ0_DCN_CUR1_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ0_DCN_CUR1_TTU_CNTL1
+#define HUBPREQ0_DCN_CUR1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ0_DCN_CUR1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ0_BLANK_OFFSET_0
+#define HUBPREQ0_BLANK_OFFSET_0__REFCYC_H_BLANK_END__SHIFT 0x0
+#define HUBPREQ0_BLANK_OFFSET_0__DLG_V_BLANK_END__SHIFT 0x10
+#define HUBPREQ0_BLANK_OFFSET_0__REFCYC_H_BLANK_END_MASK 0x00001FFFL
+#define HUBPREQ0_BLANK_OFFSET_0__DLG_V_BLANK_END_MASK 0x7FFF0000L
+//HUBPREQ0_BLANK_OFFSET_1
+#define HUBPREQ0_BLANK_OFFSET_1__MIN_DST_Y_NEXT_START__SHIFT 0x0
+#define HUBPREQ0_BLANK_OFFSET_1__MIN_DST_Y_NEXT_START_MASK 0x0003FFFFL
+//HUBPREQ0_DST_DIMENSIONS
+#define HUBPREQ0_DST_DIMENSIONS__REFCYC_PER_HTOTAL__SHIFT 0x0
+#define HUBPREQ0_DST_DIMENSIONS__REFCYC_PER_HTOTAL_MASK 0x001FFFFFL
+//HUBPREQ0_DST_AFTER_SCALER
+#define HUBPREQ0_DST_AFTER_SCALER__REFCYC_X_AFTER_SCALER__SHIFT 0x0
+#define HUBPREQ0_DST_AFTER_SCALER__DST_Y_AFTER_SCALER__SHIFT 0x10
+#define HUBPREQ0_DST_AFTER_SCALER__REFCYC_X_AFTER_SCALER_MASK 0x00001FFFL
+#define HUBPREQ0_DST_AFTER_SCALER__DST_Y_AFTER_SCALER_MASK 0x00070000L
+//HUBPREQ0_PREFETCH_SETTINGS
+#define HUBPREQ0_PREFETCH_SETTINGS__VRATIO_PREFETCH__SHIFT 0x0
+#define HUBPREQ0_PREFETCH_SETTINGS__DST_Y_PREFETCH__SHIFT 0x18
+#define HUBPREQ0_PREFETCH_SETTINGS__VRATIO_PREFETCH_MASK 0x003FFFFFL
+#define HUBPREQ0_PREFETCH_SETTINGS__DST_Y_PREFETCH_MASK 0xFF000000L
+//HUBPREQ0_PREFETCH_SETTINGS_C
+#define HUBPREQ0_PREFETCH_SETTINGS_C__VRATIO_PREFETCH_C__SHIFT 0x0
+#define HUBPREQ0_PREFETCH_SETTINGS_C__VRATIO_PREFETCH_C_MASK 0x003FFFFFL
+//HUBPREQ0_VBLANK_PARAMETERS_0
+#define HUBPREQ0_VBLANK_PARAMETERS_0__DST_Y_PER_VM_VBLANK__SHIFT 0x0
+#define HUBPREQ0_VBLANK_PARAMETERS_0__DST_Y_PER_ROW_VBLANK__SHIFT 0x8
+#define HUBPREQ0_VBLANK_PARAMETERS_0__DST_Y_PER_VM_VBLANK_MASK 0x0000001FL
+#define HUBPREQ0_VBLANK_PARAMETERS_0__DST_Y_PER_ROW_VBLANK_MASK 0x00003F00L
+//HUBPREQ0_VBLANK_PARAMETERS_1
+#define HUBPREQ0_VBLANK_PARAMETERS_1__REFCYC_PER_PTE_GROUP_VBLANK_L__SHIFT 0x0
+#define HUBPREQ0_VBLANK_PARAMETERS_1__REFCYC_PER_PTE_GROUP_VBLANK_L_MASK 0x007FFFFFL
+//HUBPREQ0_VBLANK_PARAMETERS_2
+#define HUBPREQ0_VBLANK_PARAMETERS_2__REFCYC_PER_PTE_GROUP_VBLANK_C__SHIFT 0x0
+#define HUBPREQ0_VBLANK_PARAMETERS_2__REFCYC_PER_PTE_GROUP_VBLANK_C_MASK 0x007FFFFFL
+//HUBPREQ0_VBLANK_PARAMETERS_3
+#define HUBPREQ0_VBLANK_PARAMETERS_3__REFCYC_PER_META_CHUNK_VBLANK_L__SHIFT 0x0
+#define HUBPREQ0_VBLANK_PARAMETERS_3__REFCYC_PER_META_CHUNK_VBLANK_L_MASK 0x007FFFFFL
+//HUBPREQ0_VBLANK_PARAMETERS_4
+#define HUBPREQ0_VBLANK_PARAMETERS_4__REFCYC_PER_META_CHUNK_VBLANK_C__SHIFT 0x0
+#define HUBPREQ0_VBLANK_PARAMETERS_4__REFCYC_PER_META_CHUNK_VBLANK_C_MASK 0x007FFFFFL
+//HUBPREQ0_FLIP_PARAMETERS_0
+#define HUBPREQ0_FLIP_PARAMETERS_0__DST_Y_PER_VM_FLIP__SHIFT 0x0
+#define HUBPREQ0_FLIP_PARAMETERS_0__DST_Y_PER_ROW_FLIP__SHIFT 0x8
+#define HUBPREQ0_FLIP_PARAMETERS_0__DST_Y_PER_VM_FLIP_MASK 0x0000001FL
+#define HUBPREQ0_FLIP_PARAMETERS_0__DST_Y_PER_ROW_FLIP_MASK 0x00003F00L
+//HUBPREQ0_FLIP_PARAMETERS_2
+#define HUBPREQ0_FLIP_PARAMETERS_2__REFCYC_PER_META_CHUNK_FLIP_L__SHIFT 0x0
+#define HUBPREQ0_FLIP_PARAMETERS_2__REFCYC_PER_META_CHUNK_FLIP_L_MASK 0x007FFFFFL
+//HUBPREQ0_NOM_PARAMETERS_4
+#define HUBPREQ0_NOM_PARAMETERS_4__DST_Y_PER_META_ROW_NOM_L__SHIFT 0x0
+#define HUBPREQ0_NOM_PARAMETERS_4__DST_Y_PER_META_ROW_NOM_L_MASK 0x0001FFFFL
+//HUBPREQ0_NOM_PARAMETERS_5
+#define HUBPREQ0_NOM_PARAMETERS_5__REFCYC_PER_META_CHUNK_NOM_L__SHIFT 0x0
+#define HUBPREQ0_NOM_PARAMETERS_5__REFCYC_PER_META_CHUNK_NOM_L_MASK 0x007FFFFFL
+//HUBPREQ0_NOM_PARAMETERS_6
+#define HUBPREQ0_NOM_PARAMETERS_6__DST_Y_PER_META_ROW_NOM_C__SHIFT 0x0
+#define HUBPREQ0_NOM_PARAMETERS_6__DST_Y_PER_META_ROW_NOM_C_MASK 0x0001FFFFL
+//HUBPREQ0_NOM_PARAMETERS_7
+#define HUBPREQ0_NOM_PARAMETERS_7__REFCYC_PER_META_CHUNK_NOM_C__SHIFT 0x0
+#define HUBPREQ0_NOM_PARAMETERS_7__REFCYC_PER_META_CHUNK_NOM_C_MASK 0x007FFFFFL
+//HUBPREQ0_PER_LINE_DELIVERY_PRE
+#define HUBPREQ0_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_L__SHIFT 0x0
+#define HUBPREQ0_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_C__SHIFT 0x10
+#define HUBPREQ0_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_L_MASK 0x00001FFFL
+#define HUBPREQ0_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_C_MASK 0x1FFF0000L
+//HUBPREQ0_PER_LINE_DELIVERY
+#define HUBPREQ0_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_L__SHIFT 0x0
+#define HUBPREQ0_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_C__SHIFT 0x10
+#define HUBPREQ0_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_L_MASK 0x00001FFFL
+#define HUBPREQ0_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_C_MASK 0x1FFF0000L
+//HUBPREQ0_CURSOR_SETTINGS
+#define HUBPREQ0_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET__SHIFT 0x0
+#define HUBPREQ0_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST__SHIFT 0x8
+#define HUBPREQ0_CURSOR_SETTINGS__CURSOR1_DST_Y_OFFSET__SHIFT 0x10
+#define HUBPREQ0_CURSOR_SETTINGS__CURSOR1_CHUNK_HDL_ADJUST__SHIFT 0x18
+#define HUBPREQ0_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET_MASK 0x000000FFL
+#define HUBPREQ0_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST_MASK 0x00000300L
+#define HUBPREQ0_CURSOR_SETTINGS__CURSOR1_DST_Y_OFFSET_MASK 0x00FF0000L
+#define HUBPREQ0_CURSOR_SETTINGS__CURSOR1_CHUNK_HDL_ADJUST_MASK 0x03000000L
+//HUBPREQ0_REF_FREQ_TO_PIX_FREQ
+#define HUBPREQ0_REF_FREQ_TO_PIX_FREQ__REF_FREQ_TO_PIX_FREQ__SHIFT 0x0
+#define HUBPREQ0_REF_FREQ_TO_PIX_FREQ__REF_FREQ_TO_PIX_FREQ_MASK 0x001FFFFFL
+//HUBPREQ0_DST_Y_DELTA_DRQ_LIMIT
+#define HUBPREQ0_DST_Y_DELTA_DRQ_LIMIT__DST_Y_DELTA_DRQ_LIMIT__SHIFT 0x0
+#define HUBPREQ0_DST_Y_DELTA_DRQ_LIMIT__DST_Y_DELTA_DRQ_LIMIT_MASK 0x00007FFFL
+//HUBPREQ0_HUBPREQ_MEM_PWR_CTRL
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_FORCE__SHIFT 0x0
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_DIS__SHIFT 0x2
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_FORCE__SHIFT 0x4
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_DIS__SHIFT 0x6
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_FORCE__SHIFT 0x8
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_DIS__SHIFT 0xa
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_FORCE__SHIFT 0xc
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_DIS__SHIFT 0xe
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_FORCE_MASK 0x00000003L
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_DIS_MASK 0x00000004L
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_FORCE_MASK 0x00000030L
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_DIS_MASK 0x00000040L
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_FORCE_MASK 0x00000300L
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_DIS_MASK 0x00000400L
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_FORCE_MASK 0x00003000L
+#define HUBPREQ0_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_DIS_MASK 0x00004000L
+//HUBPREQ0_HUBPREQ_MEM_PWR_STATUS
+#define HUBPREQ0_HUBPREQ_MEM_PWR_STATUS__REQ_DPTE_MEM_PWR_STATE__SHIFT 0x0
+#define HUBPREQ0_HUBPREQ_MEM_PWR_STATUS__REQ_MPTE_MEM_PWR_STATE__SHIFT 0x2
+#define HUBPREQ0_HUBPREQ_MEM_PWR_STATUS__REQ_META_MEM_PWR_STATE__SHIFT 0x4
+#define HUBPREQ0_HUBPREQ_MEM_PWR_STATUS__REQ_PDE_MEM_PWR_STATE__SHIFT 0x6
+#define HUBPREQ0_HUBPREQ_MEM_PWR_STATUS__REQ_DPTE_MEM_PWR_STATE_MASK 0x00000003L
+#define HUBPREQ0_HUBPREQ_MEM_PWR_STATUS__REQ_MPTE_MEM_PWR_STATE_MASK 0x0000000CL
+#define HUBPREQ0_HUBPREQ_MEM_PWR_STATUS__REQ_META_MEM_PWR_STATE_MASK 0x00000030L
+#define HUBPREQ0_HUBPREQ_MEM_PWR_STATUS__REQ_PDE_MEM_PWR_STATE_MASK 0x000000C0L
+
+
+// addressBlock: dce_dc_dcbubp0_dispdec_hubpret_dispdec
+//HUBPRET0_HUBPRET_CONTROL
+#define HUBPRET0_HUBPRET_CONTROL__DET_BUF_PLANE1_BASE_ADDRESS__SHIFT 0x0
+#define HUBPRET0_HUBPRET_CONTROL__PACK_3TO2_ELEMENT_DISABLE__SHIFT 0xc
+#define HUBPRET0_HUBPRET_CONTROL__CROSSBAR_SRC_ALPHA__SHIFT 0x10
+#define HUBPRET0_HUBPRET_CONTROL__CROSSBAR_SRC_Y_G__SHIFT 0x12
+#define HUBPRET0_HUBPRET_CONTROL__CROSSBAR_SRC_CB_B__SHIFT 0x14
+#define HUBPRET0_HUBPRET_CONTROL__CROSSBAR_SRC_CR_R__SHIFT 0x16
+#define HUBPRET0_HUBPRET_CONTROL__HUBPRET_CONTROL_SPARE__SHIFT 0x18
+#define HUBPRET0_HUBPRET_CONTROL__DET_BUF_PLANE1_BASE_ADDRESS_MASK 0x00000FFFL
+#define HUBPRET0_HUBPRET_CONTROL__PACK_3TO2_ELEMENT_DISABLE_MASK 0x00001000L
+#define HUBPRET0_HUBPRET_CONTROL__CROSSBAR_SRC_ALPHA_MASK 0x00030000L
+#define HUBPRET0_HUBPRET_CONTROL__CROSSBAR_SRC_Y_G_MASK 0x000C0000L
+#define HUBPRET0_HUBPRET_CONTROL__CROSSBAR_SRC_CB_B_MASK 0x00300000L
+#define HUBPRET0_HUBPRET_CONTROL__CROSSBAR_SRC_CR_R_MASK 0x00C00000L
+#define HUBPRET0_HUBPRET_CONTROL__HUBPRET_CONTROL_SPARE_MASK 0xFF000000L
+//HUBPRET0_HUBPRET_MEM_PWR_CTRL
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_FORCE__SHIFT 0x0
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_DIS__SHIFT 0x2
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_LS_MODE__SHIFT 0x4
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_FORCE__SHIFT 0x8
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_DIS__SHIFT 0xa
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_FORCE__SHIFT 0x10
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_DIS__SHIFT 0x12
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_LS_MODE__SHIFT 0x14
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_FORCE_MASK 0x00000003L
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_DIS_MASK 0x00000004L
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_LS_MODE_MASK 0x00000030L
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_FORCE_MASK 0x00000300L
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_DIS_MASK 0x00000400L
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_FORCE_MASK 0x00030000L
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_DIS_MASK 0x00040000L
+#define HUBPRET0_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_LS_MODE_MASK 0x00300000L
+//HUBPRET0_HUBPRET_MEM_PWR_STATUS
+#define HUBPRET0_HUBPRET_MEM_PWR_STATUS__DET_MEM_PWR_STATE__SHIFT 0x0
+#define HUBPRET0_HUBPRET_MEM_PWR_STATUS__DMROB_MEM_PWR_STATE__SHIFT 0x2
+#define HUBPRET0_HUBPRET_MEM_PWR_STATUS__PIXCDC_MEM_PWR_STATE__SHIFT 0x4
+#define HUBPRET0_HUBPRET_MEM_PWR_STATUS__DET_MEM_PWR_STATE_MASK 0x00000003L
+#define HUBPRET0_HUBPRET_MEM_PWR_STATUS__DMROB_MEM_PWR_STATE_MASK 0x0000000CL
+#define HUBPRET0_HUBPRET_MEM_PWR_STATUS__PIXCDC_MEM_PWR_STATE_MASK 0x00000030L
+//HUBPRET0_HUBPRET_READ_LINE_CTRL0
+#define HUBPRET0_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_INTERVAL_IN_NONACTIVE__SHIFT 0x0
+#define HUBPRET0_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_VBLANK_MAXIMUM__SHIFT 0x10
+#define HUBPRET0_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_INTERVAL_IN_NONACTIVE_MASK 0x0000FFFFL
+#define HUBPRET0_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_VBLANK_MAXIMUM_MASK 0x3FFF0000L
+//HUBPRET0_HUBPRET_READ_LINE_CTRL1
+#define HUBPRET0_HUBPRET_READ_LINE_CTRL1__PIPE_READ_LINE_REPORTED_WHEN_REQ_DISABLED__SHIFT 0x0
+#define HUBPRET0_HUBPRET_READ_LINE_CTRL1__HUBPRET_READ_LINE_CTRL1_SPARE__SHIFT 0x10
+#define HUBPRET0_HUBPRET_READ_LINE_CTRL1__PIPE_READ_LINE_REPORTED_WHEN_REQ_DISABLED_MASK 0x00003FFFL
+#define HUBPRET0_HUBPRET_READ_LINE_CTRL1__HUBPRET_READ_LINE_CTRL1_SPARE_MASK 0xFFFF0000L
+//HUBPRET0_HUBPRET_READ_LINE0
+#define HUBPRET0_HUBPRET_READ_LINE0__PIPE_READ_LINE0_START__SHIFT 0x0
+#define HUBPRET0_HUBPRET_READ_LINE0__PIPE_READ_LINE0_END__SHIFT 0x10
+#define HUBPRET0_HUBPRET_READ_LINE0__PIPE_READ_LINE0_START_MASK 0x00003FFFL
+#define HUBPRET0_HUBPRET_READ_LINE0__PIPE_READ_LINE0_END_MASK 0x3FFF0000L
+//HUBPRET0_HUBPRET_READ_LINE1
+#define HUBPRET0_HUBPRET_READ_LINE1__PIPE_READ_LINE1_START__SHIFT 0x0
+#define HUBPRET0_HUBPRET_READ_LINE1__PIPE_READ_LINE1_END__SHIFT 0x10
+#define HUBPRET0_HUBPRET_READ_LINE1__PIPE_READ_LINE1_START_MASK 0x00003FFFL
+#define HUBPRET0_HUBPRET_READ_LINE1__PIPE_READ_LINE1_END_MASK 0x3FFF0000L
+//HUBPRET0_HUBPRET_INTERRUPT
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_MASK__SHIFT 0x0
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_MASK__SHIFT 0x1
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_MASK__SHIFT 0x2
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_TYPE__SHIFT 0x4
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_TYPE__SHIFT 0x5
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_TYPE__SHIFT 0x6
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_CLEAR__SHIFT 0x8
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_CLEAR__SHIFT 0x9
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_CLEAR__SHIFT 0xa
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_VBLANK_STATUS__SHIFT 0xc
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE0_STATUS__SHIFT 0xd
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE1_STATUS__SHIFT 0xe
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_STATUS__SHIFT 0x10
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_STATUS__SHIFT 0x11
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_STATUS__SHIFT 0x12
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_MASK_MASK 0x00000001L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_MASK_MASK 0x00000002L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_MASK_MASK 0x00000004L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_TYPE_MASK 0x00000010L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_TYPE_MASK 0x00000020L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_TYPE_MASK 0x00000040L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_CLEAR_MASK 0x00000100L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_CLEAR_MASK 0x00000200L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_CLEAR_MASK 0x00000400L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_VBLANK_STATUS_MASK 0x00001000L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE0_STATUS_MASK 0x00002000L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE1_STATUS_MASK 0x00004000L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_STATUS_MASK 0x00010000L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_STATUS_MASK 0x00020000L
+#define HUBPRET0_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_STATUS_MASK 0x00040000L
+//HUBPRET0_HUBPRET_READ_LINE_VALUE
+#define HUBPRET0_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE__SHIFT 0x0
+#define HUBPRET0_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_SNAPSHOT__SHIFT 0x10
+#define HUBPRET0_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_MASK 0x00003FFFL
+#define HUBPRET0_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_SNAPSHOT_MASK 0x3FFF0000L
+//HUBPRET0_HUBPRET_READ_LINE_STATUS
+#define HUBPRET0_HUBPRET_READ_LINE_STATUS__PIPE_READ_VBLANK__SHIFT 0x0
+#define HUBPRET0_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_INSIDE__SHIFT 0x4
+#define HUBPRET0_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_OUTSIDE__SHIFT 0x5
+#define HUBPRET0_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_INSIDE__SHIFT 0x8
+#define HUBPRET0_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_OUTSIDE__SHIFT 0xa
+#define HUBPRET0_HUBPRET_READ_LINE_STATUS__PIPE_READ_VBLANK_MASK 0x00000001L
+#define HUBPRET0_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_INSIDE_MASK 0x00000010L
+#define HUBPRET0_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_OUTSIDE_MASK 0x00000020L
+#define HUBPRET0_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_INSIDE_MASK 0x00000100L
+#define HUBPRET0_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_OUTSIDE_MASK 0x00000400L
+
+
+// addressBlock: dce_dc_dcbubp0_dispdec_cursor0_dispdec
+//CURSOR0_0_CURSOR_CONTROL
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_ENABLE__SHIFT 0x0
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_2X_MAGNIFY__SHIFT 0x4
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_MODE__SHIFT 0x8
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_TMZ__SHIFT 0xc
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_SNOOP__SHIFT 0xd
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_SYSTEM__SHIFT 0xe
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_PITCH__SHIFT 0x10
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_XY_POSITION_ROTATION_AND_MIRRORING_BYPASS__SHIFT 0x14
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK__SHIFT 0x18
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_EN__SHIFT 0x1e
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_SEL__SHIFT 0x1f
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_ENABLE_MASK 0x00000001L
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_2X_MAGNIFY_MASK 0x00000010L
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_MODE_MASK 0x00000700L
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_TMZ_MASK 0x00001000L
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_SNOOP_MASK 0x00002000L
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_SYSTEM_MASK 0x00004000L
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_PITCH_MASK 0x00030000L
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_XY_POSITION_ROTATION_AND_MIRRORING_BYPASS_MASK 0x00100000L
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK_MASK 0x1F000000L
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_EN_MASK 0x40000000L
+#define CURSOR0_0_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_SEL_MASK 0x80000000L
+//CURSOR0_0_CURSOR_SURFACE_ADDRESS
+#define CURSOR0_0_CURSOR_SURFACE_ADDRESS__CURSOR_SURFACE_ADDRESS__SHIFT 0x0
+#define CURSOR0_0_CURSOR_SURFACE_ADDRESS__CURSOR_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//CURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH
+#define CURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH__CURSOR_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define CURSOR0_0_CURSOR_SURFACE_ADDRESS_HIGH__CURSOR_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//CURSOR0_0_CURSOR_SIZE
+#define CURSOR0_0_CURSOR_SIZE__CURSOR_HEIGHT__SHIFT 0x0
+#define CURSOR0_0_CURSOR_SIZE__CURSOR_WIDTH__SHIFT 0x10
+#define CURSOR0_0_CURSOR_SIZE__CURSOR_HEIGHT_MASK 0x000001FFL
+#define CURSOR0_0_CURSOR_SIZE__CURSOR_WIDTH_MASK 0x01FF0000L
+//CURSOR0_0_CURSOR_POSITION
+#define CURSOR0_0_CURSOR_POSITION__CURSOR_Y_POSITION__SHIFT 0x0
+#define CURSOR0_0_CURSOR_POSITION__CURSOR_X_POSITION__SHIFT 0x10
+#define CURSOR0_0_CURSOR_POSITION__CURSOR_Y_POSITION_MASK 0x00003FFFL
+#define CURSOR0_0_CURSOR_POSITION__CURSOR_X_POSITION_MASK 0x3FFF0000L
+//CURSOR0_0_CURSOR_HOT_SPOT
+#define CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y__SHIFT 0x0
+#define CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X__SHIFT 0x10
+#define CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y_MASK 0x000000FFL
+#define CURSOR0_0_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X_MASK 0x00FF0000L
+//CURSOR0_0_CURSOR_STEREO_CONTROL
+#define CURSOR0_0_CURSOR_STEREO_CONTROL__CURSOR_STEREO_EN__SHIFT 0x0
+#define CURSOR0_0_CURSOR_STEREO_CONTROL__CURSOR_PRIMARY_OFFSET__SHIFT 0x4
+#define CURSOR0_0_CURSOR_STEREO_CONTROL__CURSOR_SECONDARY_OFFSET__SHIFT 0x12
+#define CURSOR0_0_CURSOR_STEREO_CONTROL__CURSOR_STEREO_EN_MASK 0x00000001L
+#define CURSOR0_0_CURSOR_STEREO_CONTROL__CURSOR_PRIMARY_OFFSET_MASK 0x0003FFF0L
+#define CURSOR0_0_CURSOR_STEREO_CONTROL__CURSOR_SECONDARY_OFFSET_MASK 0xFFFC0000L
+//CURSOR0_0_CURSOR_DST_OFFSET
+#define CURSOR0_0_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET__SHIFT 0x0
+#define CURSOR0_0_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET_MASK 0x00001FFFL
+//CURSOR0_0_CURSOR_MEM_PWR_CTRL
+#define CURSOR0_0_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_FORCE__SHIFT 0x0
+#define CURSOR0_0_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_DIS__SHIFT 0x2
+#define CURSOR0_0_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_LS_MODE__SHIFT 0x4
+#define CURSOR0_0_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_FORCE_MASK 0x00000003L
+#define CURSOR0_0_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_DIS_MASK 0x00000004L
+#define CURSOR0_0_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_LS_MODE_MASK 0x00000030L
+//CURSOR0_0_CURSOR_MEM_PWR_STATUS
+#define CURSOR0_0_CURSOR_MEM_PWR_STATUS__CROB_MEM_PWR_STATE__SHIFT 0x0
+#define CURSOR0_0_CURSOR_MEM_PWR_STATUS__CROB_MEM_PWR_STATE_MASK 0x00000003L
+//CURSOR0_0_DMDATA_ADDRESS_HIGH
+#define CURSOR0_0_DMDATA_ADDRESS_HIGH__DMDATA_ADDRESS_HIGH__SHIFT 0x0
+#define CURSOR0_0_DMDATA_ADDRESS_HIGH__DMDATA_SYSTEM__SHIFT 0x1c
+#define CURSOR0_0_DMDATA_ADDRESS_HIGH__DMDATA_SNOOP__SHIFT 0x1d
+#define CURSOR0_0_DMDATA_ADDRESS_HIGH__DMDATA_TMZ__SHIFT 0x1e
+#define CURSOR0_0_DMDATA_ADDRESS_HIGH__DMDATA_ADDRESS_HIGH_MASK 0x0000FFFFL
+#define CURSOR0_0_DMDATA_ADDRESS_HIGH__DMDATA_SYSTEM_MASK 0x10000000L
+#define CURSOR0_0_DMDATA_ADDRESS_HIGH__DMDATA_SNOOP_MASK 0x20000000L
+#define CURSOR0_0_DMDATA_ADDRESS_HIGH__DMDATA_TMZ_MASK 0x40000000L
+//CURSOR0_0_DMDATA_ADDRESS_LOW
+#define CURSOR0_0_DMDATA_ADDRESS_LOW__DMDATA_ADDRESS_LOW__SHIFT 0x0
+#define CURSOR0_0_DMDATA_ADDRESS_LOW__DMDATA_ADDRESS_LOW_MASK 0xFFFFFFFFL
+//CURSOR0_0_DMDATA_CNTL
+#define CURSOR0_0_DMDATA_CNTL__DMDATA_UPDATED__SHIFT 0x0
+#define CURSOR0_0_DMDATA_CNTL__DMDATA_REPEAT__SHIFT 0x1
+#define CURSOR0_0_DMDATA_CNTL__DMDATA_MODE__SHIFT 0x2
+#define CURSOR0_0_DMDATA_CNTL__DMDATA_SIZE__SHIFT 0x10
+#define CURSOR0_0_DMDATA_CNTL__DMDATA_UPDATED_MASK 0x00000001L
+#define CURSOR0_0_DMDATA_CNTL__DMDATA_REPEAT_MASK 0x00000002L
+#define CURSOR0_0_DMDATA_CNTL__DMDATA_MODE_MASK 0x00000004L
+#define CURSOR0_0_DMDATA_CNTL__DMDATA_SIZE_MASK 0x0FFF0000L
+//CURSOR0_0_DMDATA_QOS_CNTL
+#define CURSOR0_0_DMDATA_QOS_CNTL__DMDATA_QOS_MODE__SHIFT 0x0
+#define CURSOR0_0_DMDATA_QOS_CNTL__DMDATA_QOS_LEVEL__SHIFT 0x4
+#define CURSOR0_0_DMDATA_QOS_CNTL__DMDATA_DL_DELTA__SHIFT 0x10
+#define CURSOR0_0_DMDATA_QOS_CNTL__DMDATA_QOS_MODE_MASK 0x00000001L
+#define CURSOR0_0_DMDATA_QOS_CNTL__DMDATA_QOS_LEVEL_MASK 0x000000F0L
+#define CURSOR0_0_DMDATA_QOS_CNTL__DMDATA_DL_DELTA_MASK 0xFFFF0000L
+//CURSOR0_0_DMDATA_STATUS
+#define CURSOR0_0_DMDATA_STATUS__DMDATA_DONE__SHIFT 0x0
+#define CURSOR0_0_DMDATA_STATUS__DMDATA_UNDERFLOW__SHIFT 0x2
+#define CURSOR0_0_DMDATA_STATUS__DMDATA_UNDERFLOW_CLEAR__SHIFT 0x4
+#define CURSOR0_0_DMDATA_STATUS__DMDATA_DONE_MASK 0x00000001L
+#define CURSOR0_0_DMDATA_STATUS__DMDATA_UNDERFLOW_MASK 0x00000004L
+#define CURSOR0_0_DMDATA_STATUS__DMDATA_UNDERFLOW_CLEAR_MASK 0x00000010L
+//CURSOR0_0_DMDATA_SW_CNTL
+#define CURSOR0_0_DMDATA_SW_CNTL__DMDATA_SW_UPDATED__SHIFT 0x0
+#define CURSOR0_0_DMDATA_SW_CNTL__DMDATA_SW_REPEAT__SHIFT 0x1
+#define CURSOR0_0_DMDATA_SW_CNTL__DMDATA_SW_SIZE__SHIFT 0x10
+#define CURSOR0_0_DMDATA_SW_CNTL__DMDATA_SW_UPDATED_MASK 0x00000001L
+#define CURSOR0_0_DMDATA_SW_CNTL__DMDATA_SW_REPEAT_MASK 0x00000002L
+#define CURSOR0_0_DMDATA_SW_CNTL__DMDATA_SW_SIZE_MASK 0x0FFF0000L
+//CURSOR0_0_DMDATA_SW_DATA
+#define CURSOR0_0_DMDATA_SW_DATA__DMDATA_SW_DATA__SHIFT 0x0
+#define CURSOR0_0_DMDATA_SW_DATA__DMDATA_SW_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dcbubp1_dispdec_hubp_dispdec
+//HUBP1_DCSURF_SURFACE_CONFIG
+#define HUBP1_DCSURF_SURFACE_CONFIG__SURFACE_PIXEL_FORMAT__SHIFT 0x0
+#define HUBP1_DCSURF_SURFACE_CONFIG__ROTATION_ANGLE__SHIFT 0x8
+#define HUBP1_DCSURF_SURFACE_CONFIG__H_MIRROR_EN__SHIFT 0xa
+#define HUBP1_DCSURF_SURFACE_CONFIG__SURFACE_PIXEL_FORMAT_MASK 0x0000007FL
+#define HUBP1_DCSURF_SURFACE_CONFIG__ROTATION_ANGLE_MASK 0x00000300L
+#define HUBP1_DCSURF_SURFACE_CONFIG__H_MIRROR_EN_MASK 0x00000400L
+//HUBP1_DCSURF_ADDR_CONFIG
+#define HUBP1_DCSURF_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0
+#define HUBP1_DCSURF_ADDR_CONFIG__NUM_BANKS__SHIFT 0x3
+#define HUBP1_DCSURF_ADDR_CONFIG__PIPE_INTERLEAVE__SHIFT 0x6
+#define HUBP1_DCSURF_ADDR_CONFIG__NUM_SE__SHIFT 0x8
+#define HUBP1_DCSURF_ADDR_CONFIG__NUM_RB_PER_SE__SHIFT 0xa
+#define HUBP1_DCSURF_ADDR_CONFIG__MAX_COMPRESSED_FRAGS__SHIFT 0xc
+#define HUBP1_DCSURF_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
+#define HUBP1_DCSURF_ADDR_CONFIG__NUM_BANKS_MASK 0x00000038L
+#define HUBP1_DCSURF_ADDR_CONFIG__PIPE_INTERLEAVE_MASK 0x000000C0L
+#define HUBP1_DCSURF_ADDR_CONFIG__NUM_SE_MASK 0x00000300L
+#define HUBP1_DCSURF_ADDR_CONFIG__NUM_RB_PER_SE_MASK 0x00000C00L
+#define HUBP1_DCSURF_ADDR_CONFIG__MAX_COMPRESSED_FRAGS_MASK 0x00003000L
+//HUBP1_DCSURF_TILING_CONFIG
+#define HUBP1_DCSURF_TILING_CONFIG__SW_MODE__SHIFT 0x0
+#define HUBP1_DCSURF_TILING_CONFIG__DIM_TYPE__SHIFT 0x7
+#define HUBP1_DCSURF_TILING_CONFIG__META_LINEAR__SHIFT 0x9
+#define HUBP1_DCSURF_TILING_CONFIG__RB_ALIGNED__SHIFT 0xa
+#define HUBP1_DCSURF_TILING_CONFIG__PIPE_ALIGNED__SHIFT 0xb
+#define HUBP1_DCSURF_TILING_CONFIG__SW_MODE_MASK 0x0000001FL
+#define HUBP1_DCSURF_TILING_CONFIG__DIM_TYPE_MASK 0x00000180L
+#define HUBP1_DCSURF_TILING_CONFIG__META_LINEAR_MASK 0x00000200L
+#define HUBP1_DCSURF_TILING_CONFIG__RB_ALIGNED_MASK 0x00000400L
+#define HUBP1_DCSURF_TILING_CONFIG__PIPE_ALIGNED_MASK 0x00000800L
+//HUBP1_DCSURF_PRI_VIEWPORT_START
+#define HUBP1_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_X_START__SHIFT 0x0
+#define HUBP1_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_Y_START__SHIFT 0x10
+#define HUBP1_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_X_START_MASK 0x00003FFFL
+#define HUBP1_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_Y_START_MASK 0x3FFF0000L
+//HUBP1_DCSURF_PRI_VIEWPORT_DIMENSION
+#define HUBP1_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT 0x0
+#define HUBP1_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT 0x10
+#define HUBP1_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK 0x00003FFFL
+#define HUBP1_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK 0x3FFF0000L
+//HUBP1_DCSURF_PRI_VIEWPORT_START_C
+#define HUBP1_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_X_START_C__SHIFT 0x0
+#define HUBP1_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_Y_START_C__SHIFT 0x10
+#define HUBP1_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_X_START_C_MASK 0x00003FFFL
+#define HUBP1_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_Y_START_C_MASK 0x3FFF0000L
+//HUBP1_DCSURF_PRI_VIEWPORT_DIMENSION_C
+#define HUBP1_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_WIDTH_C__SHIFT 0x0
+#define HUBP1_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_HEIGHT_C__SHIFT 0x10
+#define HUBP1_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_WIDTH_C_MASK 0x00003FFFL
+#define HUBP1_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_HEIGHT_C_MASK 0x3FFF0000L
+//HUBP1_DCSURF_SEC_VIEWPORT_START
+#define HUBP1_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_X_START__SHIFT 0x0
+#define HUBP1_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_Y_START__SHIFT 0x10
+#define HUBP1_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_X_START_MASK 0x00003FFFL
+#define HUBP1_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_Y_START_MASK 0x3FFF0000L
+//HUBP1_DCSURF_SEC_VIEWPORT_DIMENSION
+#define HUBP1_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_WIDTH__SHIFT 0x0
+#define HUBP1_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_HEIGHT__SHIFT 0x10
+#define HUBP1_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_WIDTH_MASK 0x00003FFFL
+#define HUBP1_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_HEIGHT_MASK 0x3FFF0000L
+//HUBP1_DCSURF_SEC_VIEWPORT_START_C
+#define HUBP1_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_X_START_C__SHIFT 0x0
+#define HUBP1_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_Y_START_C__SHIFT 0x10
+#define HUBP1_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_X_START_C_MASK 0x00003FFFL
+#define HUBP1_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_Y_START_C_MASK 0x3FFF0000L
+//HUBP1_DCSURF_SEC_VIEWPORT_DIMENSION_C
+#define HUBP1_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_WIDTH_C__SHIFT 0x0
+#define HUBP1_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_HEIGHT_C__SHIFT 0x10
+#define HUBP1_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_WIDTH_C_MASK 0x00003FFFL
+#define HUBP1_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_HEIGHT_C_MASK 0x3FFF0000L
+//HUBP1_DCHUBP_REQ_SIZE_CONFIG
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__SWATH_HEIGHT__SHIFT 0x0
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__PTE_ROW_HEIGHT_LINEAR__SHIFT 0x4
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__CHUNK_SIZE__SHIFT 0x8
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__MIN_CHUNK_SIZE__SHIFT 0xb
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__META_CHUNK_SIZE__SHIFT 0x10
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__MIN_META_CHUNK_SIZE__SHIFT 0x12
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__DPTE_GROUP_SIZE__SHIFT 0x14
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__MPTE_GROUP_SIZE__SHIFT 0x18
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__SWATH_HEIGHT_MASK 0x00000007L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__PTE_ROW_HEIGHT_LINEAR_MASK 0x00000070L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__CHUNK_SIZE_MASK 0x00000700L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__MIN_CHUNK_SIZE_MASK 0x00001800L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__META_CHUNK_SIZE_MASK 0x00030000L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__MIN_META_CHUNK_SIZE_MASK 0x000C0000L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__DPTE_GROUP_SIZE_MASK 0x00700000L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG__MPTE_GROUP_SIZE_MASK 0x07000000L
+//HUBP1_DCHUBP_REQ_SIZE_CONFIG_C
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__SWATH_HEIGHT_C__SHIFT 0x0
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__PTE_ROW_HEIGHT_LINEAR_C__SHIFT 0x4
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__CHUNK_SIZE_C__SHIFT 0x8
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__MIN_CHUNK_SIZE_C__SHIFT 0xb
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__META_CHUNK_SIZE_C__SHIFT 0x10
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__MIN_META_CHUNK_SIZE_C__SHIFT 0x12
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__DPTE_GROUP_SIZE_C__SHIFT 0x14
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__MPTE_GROUP_SIZE_C__SHIFT 0x18
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__SWATH_HEIGHT_C_MASK 0x00000007L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__PTE_ROW_HEIGHT_LINEAR_C_MASK 0x00000070L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__CHUNK_SIZE_C_MASK 0x00000700L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__MIN_CHUNK_SIZE_C_MASK 0x00001800L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__META_CHUNK_SIZE_C_MASK 0x00030000L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__MIN_META_CHUNK_SIZE_C_MASK 0x000C0000L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__DPTE_GROUP_SIZE_C_MASK 0x00700000L
+#define HUBP1_DCHUBP_REQ_SIZE_CONFIG_C__MPTE_GROUP_SIZE_C_MASK 0x07000000L
+//HUBP1_DCHUBP_CNTL
+#define HUBP1_DCHUBP_CNTL__HUBP_BLANK_EN__SHIFT 0x0
+#define HUBP1_DCHUBP_CNTL__HUBP_NO_OUTSTANDING_REQ__SHIFT 0x1
+#define HUBP1_DCHUBP_CNTL__HUBP_DISABLE__SHIFT 0x2
+#define HUBP1_DCHUBP_CNTL__HUBP_IN_BLANK__SHIFT 0x3
+#define HUBP1_DCHUBP_CNTL__HUBP_VTG_SEL__SHIFT 0x4
+#define HUBP1_DCHUBP_CNTL__HUBP_VREADY_AT_OR_AFTER_VSYNC__SHIFT 0x8
+#define HUBP1_DCHUBP_CNTL__HUBP_DISABLE_STOP_DATA_DURING_VM__SHIFT 0x9
+#define HUBP1_DCHUBP_CNTL__HUBP_TTU_DISABLE__SHIFT 0xc
+#define HUBP1_DCHUBP_CNTL__HUBP_TTU_MODE__SHIFT 0xd
+#define HUBP1_DCHUBP_CNTL__HUBP_XRQ_NO_OUTSTANDING_REQ__SHIFT 0x10
+#define HUBP1_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS__SHIFT 0x14
+#define HUBP1_DCHUBP_CNTL__HUBP_TIMEOUT_THRESHOLD__SHIFT 0x18
+#define HUBP1_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_CLEAR__SHIFT 0x1a
+#define HUBP1_DCHUBP_CNTL__HUBP_TIMEOUT_INTERRUPT_EN__SHIFT 0x1b
+#define HUBP1_DCHUBP_CNTL__HUBP_UNDERFLOW_STATUS__SHIFT 0x1c
+#define HUBP1_DCHUBP_CNTL__HUBP_UNDERFLOW_CLEAR__SHIFT 0x1f
+#define HUBP1_DCHUBP_CNTL__HUBP_BLANK_EN_MASK 0x00000001L
+#define HUBP1_DCHUBP_CNTL__HUBP_NO_OUTSTANDING_REQ_MASK 0x00000002L
+#define HUBP1_DCHUBP_CNTL__HUBP_DISABLE_MASK 0x00000004L
+#define HUBP1_DCHUBP_CNTL__HUBP_IN_BLANK_MASK 0x00000008L
+#define HUBP1_DCHUBP_CNTL__HUBP_VTG_SEL_MASK 0x000000F0L
+#define HUBP1_DCHUBP_CNTL__HUBP_VREADY_AT_OR_AFTER_VSYNC_MASK 0x00000100L
+#define HUBP1_DCHUBP_CNTL__HUBP_DISABLE_STOP_DATA_DURING_VM_MASK 0x00000200L
+#define HUBP1_DCHUBP_CNTL__HUBP_TTU_DISABLE_MASK 0x00001000L
+#define HUBP1_DCHUBP_CNTL__HUBP_TTU_MODE_MASK 0x0000E000L
+#define HUBP1_DCHUBP_CNTL__HUBP_XRQ_NO_OUTSTANDING_REQ_MASK 0x000F0000L
+#define HUBP1_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_MASK 0x00F00000L
+#define HUBP1_DCHUBP_CNTL__HUBP_TIMEOUT_THRESHOLD_MASK 0x03000000L
+#define HUBP1_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_CLEAR_MASK 0x04000000L
+#define HUBP1_DCHUBP_CNTL__HUBP_TIMEOUT_INTERRUPT_EN_MASK 0x08000000L
+#define HUBP1_DCHUBP_CNTL__HUBP_UNDERFLOW_STATUS_MASK 0x70000000L
+#define HUBP1_DCHUBP_CNTL__HUBP_UNDERFLOW_CLEAR_MASK 0x80000000L
+//HUBP1_HUBP_CLK_CNTL
+#define HUBP1_HUBP_CLK_CNTL__HUBP_CLOCK_ENABLE__SHIFT 0x0
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DISPCLK_R_GATE_DIS__SHIFT 0x4
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DPPCLK_G_GATE_DIS__SHIFT 0x8
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DCFCLK_R_GATE_DIS__SHIFT 0xc
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DCFCLK_G_GATE_DIS__SHIFT 0x10
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DISPCLK_R_CLOCK_ON__SHIFT 0x14
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DPPCLK_G_CLOCK_ON__SHIFT 0x15
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DCFCLK_R_CLOCK_ON__SHIFT 0x16
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DCFCLK_G_CLOCK_ON__SHIFT 0x17
+#define HUBP1_HUBP_CLK_CNTL__HUBP_CLOCK_ENABLE_MASK 0x00000001L
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DISPCLK_R_GATE_DIS_MASK 0x00000010L
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DPPCLK_G_GATE_DIS_MASK 0x00000100L
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DCFCLK_R_GATE_DIS_MASK 0x00001000L
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DCFCLK_G_GATE_DIS_MASK 0x00010000L
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DISPCLK_R_CLOCK_ON_MASK 0x00100000L
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DPPCLK_G_CLOCK_ON_MASK 0x00200000L
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DCFCLK_R_CLOCK_ON_MASK 0x00400000L
+#define HUBP1_HUBP_CLK_CNTL__HUBP_DCFCLK_G_CLOCK_ON_MASK 0x00800000L
+//HUBP1_HUBPREQ_DEBUG_DB
+#define HUBP1_HUBPREQ_DEBUG_DB__HUBPREQ_DEBUG__SHIFT 0x0
+#define HUBP1_HUBPREQ_DEBUG_DB__HUBPREQ_DEBUG_MASK 0xFFFFFFFFL
+//HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_EN_DCFCLK__SHIFT 0x0
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_PERIOD_M1_DCFCLK__SHIFT 0x4
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_START_SEL_DCFCLK__SHIFT 0xc
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_STOP_SEL_DCFCLK__SHIFT 0x14
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_MODE_DCFCLK__SHIFT 0x1c
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_EN_DCFCLK_MASK 0x00000001L
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_PERIOD_M1_DCFCLK_MASK 0x00000FF0L
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_START_SEL_DCFCLK_MASK 0x0001F000L
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_STOP_SEL_DCFCLK_MASK 0x01F00000L
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_MODE_DCFCLK_MASK 0x30000000L
+//HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_EN_DPPCLK__SHIFT 0x0
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_SRC_SEL_DPPCLK__SHIFT 0x1
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_PERIOD_M1_DPPCLK__SHIFT 0x4
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_START_SEL_DPPCLK__SHIFT 0xc
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_STOP_SEL_DPPCLK__SHIFT 0x14
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_EN_DPPCLK_MASK 0x00000001L
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_SRC_SEL_DPPCLK_MASK 0x00000002L
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_PERIOD_M1_DPPCLK_MASK 0x00000FF0L
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_START_SEL_DPPCLK_MASK 0x0001F000L
+#define HUBP1_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_STOP_SEL_DPPCLK_MASK 0x01F00000L
+// addressBlock: dce_dc_dcbubp1_dispdec_hubpreq_dispdec
+//HUBPREQ1_DCSURF_SURFACE_PITCH
+#define HUBPREQ1_DCSURF_SURFACE_PITCH__PITCH__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SURFACE_PITCH__META_PITCH__SHIFT 0x10
+#define HUBPREQ1_DCSURF_SURFACE_PITCH__PITCH_MASK 0x00003FFFL
+#define HUBPREQ1_DCSURF_SURFACE_PITCH__META_PITCH_MASK 0x3FFF0000L
+//HUBPREQ1_DCSURF_SURFACE_PITCH_C
+#define HUBPREQ1_DCSURF_SURFACE_PITCH_C__PITCH_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SURFACE_PITCH_C__META_PITCH_C__SHIFT 0x10
+#define HUBPREQ1_DCSURF_SURFACE_PITCH_C__PITCH_C_MASK 0x00003FFFL
+#define HUBPREQ1_DCSURF_SURFACE_PITCH_C__META_PITCH_C_MASK 0x3FFF0000L
+//HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS
+#define HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS__PRIMARY_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS__PRIMARY_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH
+#define HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH__PRIMARY_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH__PRIMARY_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_C
+#define HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_C__PRIMARY_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_C__PRIMARY_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C__PRIMARY_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C__PRIMARY_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS
+#define HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS__SECONDARY_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS__SECONDARY_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH
+#define HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH__SECONDARY_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH__SECONDARY_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_C
+#define HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_C__SECONDARY_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_C__SECONDARY_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C__SECONDARY_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C__SECONDARY_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS
+#define HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS__PRIMARY_META_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS__PRIMARY_META_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH
+#define HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH__PRIMARY_META_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH__PRIMARY_META_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C
+#define HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C__PRIMARY_META_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C__PRIMARY_META_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C__PRIMARY_META_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C__PRIMARY_META_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS
+#define HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS__SECONDARY_META_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS__SECONDARY_META_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH
+#define HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH__SECONDARY_META_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH__SECONDARY_META_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C
+#define HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C__SECONDARY_META_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C__SECONDARY_META_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SECONDARY_META_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ1_DCSURF_SURFACE_CONTROL
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_EN__SHIFT 0x1
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK__SHIFT 0x2
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_C__SHIFT 0x4
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK_C__SHIFT 0x5
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ__SHIFT 0x8
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_EN__SHIFT 0x9
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK__SHIFT 0xa
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_C__SHIFT 0xc
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK_C__SHIFT 0xd
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ__SHIFT 0x10
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_C__SHIFT 0x11
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ__SHIFT 0x12
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_C__SHIFT 0x13
+
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_MASK 0x00000001L
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_EN_MASK 0x00000002L
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK_MASK 0x00000004L
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_C_MASK 0x00000010L
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK_C_MASK 0x00000020L
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_MASK 0x00000100L
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_EN_MASK 0x00000200L
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK_MASK 0x00000400L
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_C_MASK 0x00001000L
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK_C_MASK 0x00002000L
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_MASK 0x00010000L
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_C_MASK 0x00020000L
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_MASK 0x00040000L
+#define HUBPREQ1_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_C_MASK 0x00080000L
+//HUBPREQ1_DCSURF_FLIP_CONTROL
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_UPDATE_LOCK__SHIFT 0x0
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_TYPE__SHIFT 0x1
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_VUPDATE_SKIP_NUM__SHIFT 0x4
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING__SHIFT 0x8
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__HUBPREQ_MASTER_UPDATE_LOCK_STATUS__SHIFT 0x9
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_MODE_FOR_STEREOSYNC__SHIFT 0xc
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_IN_STEREOSYNC__SHIFT 0x10
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_DISABLE__SHIFT 0x11
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_POLARITY__SHIFT 0x12
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_DELAY__SHIFT 0x14
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_UPDATE_LOCK_MASK 0x00000001L
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_TYPE_MASK 0x00000002L
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_VUPDATE_SKIP_NUM_MASK 0x000000F0L
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_MASK 0x00000100L
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__HUBPREQ_MASTER_UPDATE_LOCK_STATUS_MASK 0x00000200L
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_MODE_FOR_STEREOSYNC_MASK 0x00003000L
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_IN_STEREOSYNC_MASK 0x00010000L
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_DISABLE_MASK 0x00020000L
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_POLARITY_MASK 0x00040000L
+#define HUBPREQ1_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_DELAY_MASK 0x3FF00000L
+//HUBPREQ1_DCSURF_FLIP_CONTROL2
+#define HUBPREQ1_DCSURF_FLIP_CONTROL2__SURFACE_FLIP_PENDING_MIN_TIME__SHIFT 0x0
+#define HUBPREQ1_DCSURF_FLIP_CONTROL2__SURFACE_GSL_ENABLE__SHIFT 0x8
+#define HUBPREQ1_DCSURF_FLIP_CONTROL2__SURFACE_GSL_MASK__SHIFT 0x9
+#define HUBPREQ1_DCSURF_FLIP_CONTROL2__SURFACE_TRIPLE_BUFFER_ENABLE__SHIFT 0xa
+#define HUBPREQ1_DCSURF_FLIP_CONTROL2__SURFACE_INUSE_RAED_NO_LATCH__SHIFT 0xc
+#define HUBPREQ1_DCSURF_FLIP_CONTROL2__SURFACE_FLIP_PENDING_MIN_TIME_MASK 0x000000FFL
+#define HUBPREQ1_DCSURF_FLIP_CONTROL2__SURFACE_GSL_ENABLE_MASK 0x00000100L
+#define HUBPREQ1_DCSURF_FLIP_CONTROL2__SURFACE_GSL_MASK_MASK 0x00000200L
+#define HUBPREQ1_DCSURF_FLIP_CONTROL2__SURFACE_TRIPLE_BUFFER_ENABLE_MASK 0x00000400L
+#define HUBPREQ1_DCSURF_FLIP_CONTROL2__SURFACE_INUSE_RAED_NO_LATCH_MASK 0x00001000L
+//HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_MASK__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_TYPE__SHIFT 0x1
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_MASK__SHIFT 0x2
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_TYPE__SHIFT 0x3
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_CLEAR__SHIFT 0x8
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_CLEAR__SHIFT 0x9
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_OCCURRED__SHIFT 0x10
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_STATUS__SHIFT 0x11
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_OCCURRED__SHIFT 0x12
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_STATUS__SHIFT 0x13
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_MASK_MASK 0x00000001L
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_TYPE_MASK 0x00000002L
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_MASK_MASK 0x00000004L
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_TYPE_MASK 0x00000008L
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_CLEAR_MASK 0x00000100L
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_CLEAR_MASK 0x00000200L
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_OCCURRED_MASK 0x00010000L
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_STATUS_MASK 0x00020000L
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_OCCURRED_MASK 0x00040000L
+#define HUBPREQ1_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_STATUS_MASK 0x00080000L
+//HUBPREQ1_DCSURF_SURFACE_INUSE
+#define HUBPREQ1_DCSURF_SURFACE_INUSE__SURFACE_INUSE_ADDRESS__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SURFACE_INUSE__SURFACE_INUSE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ1_DCSURF_SURFACE_INUSE_HIGH
+#define HUBPREQ1_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ1_DCSURF_SURFACE_INUSE_C
+#define HUBPREQ1_DCSURF_SURFACE_INUSE_C__SURFACE_INUSE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SURFACE_INUSE_C__SURFACE_INUSE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ1_DCSURF_SURFACE_INUSE_HIGH_C
+#define HUBPREQ1_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE
+#define HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE__SURFACE_EARLIEST_INUSE_ADDRESS__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE__SURFACE_EARLIEST_INUSE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH
+#define HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_C
+#define HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_C__SURFACE_EARLIEST_INUSE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_C__SURFACE_EARLIEST_INUSE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C
+#define HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ1_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ1_DCN_EXPANSION_MODE
+#define HUBPREQ1_DCN_EXPANSION_MODE__DRQ_EXPANSION_MODE__SHIFT 0x0
+#define HUBPREQ1_DCN_EXPANSION_MODE__CRQ_EXPANSION_MODE__SHIFT 0x2
+#define HUBPREQ1_DCN_EXPANSION_MODE__MRQ_EXPANSION_MODE__SHIFT 0x4
+#define HUBPREQ1_DCN_EXPANSION_MODE__PRQ_EXPANSION_MODE__SHIFT 0x6
+#define HUBPREQ1_DCN_EXPANSION_MODE__DRQ_EXPANSION_MODE_MASK 0x00000003L
+#define HUBPREQ1_DCN_EXPANSION_MODE__CRQ_EXPANSION_MODE_MASK 0x0000000CL
+#define HUBPREQ1_DCN_EXPANSION_MODE__MRQ_EXPANSION_MODE_MASK 0x00000030L
+#define HUBPREQ1_DCN_EXPANSION_MODE__PRQ_EXPANSION_MODE_MASK 0x000000C0L
+//HUBPREQ1_DCN_TTU_QOS_WM
+#define HUBPREQ1_DCN_TTU_QOS_WM__QoS_LEVEL_LOW_WM__SHIFT 0x0
+#define HUBPREQ1_DCN_TTU_QOS_WM__QoS_LEVEL_HIGH_WM__SHIFT 0x10
+#define HUBPREQ1_DCN_TTU_QOS_WM__QoS_LEVEL_LOW_WM_MASK 0x00003FFFL
+#define HUBPREQ1_DCN_TTU_QOS_WM__QoS_LEVEL_HIGH_WM_MASK 0x3FFF0000L
+//HUBPREQ1_DCN_GLOBAL_TTU_CNTL
+#define HUBPREQ1_DCN_GLOBAL_TTU_CNTL__MIN_TTU_VBLANK__SHIFT 0x0
+#define HUBPREQ1_DCN_GLOBAL_TTU_CNTL__QoS_LEVEL_FLIP__SHIFT 0x1c
+#define HUBPREQ1_DCN_GLOBAL_TTU_CNTL__MIN_TTU_VBLANK_MASK 0x00FFFFFFL
+#define HUBPREQ1_DCN_GLOBAL_TTU_CNTL__QoS_LEVEL_FLIP_MASK 0xF0000000L
+//HUBPREQ1_DCN_SURF0_TTU_CNTL0
+#define HUBPREQ1_DCN_SURF0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ1_DCN_SURF0_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ1_DCN_SURF0_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ1_DCN_SURF0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ1_DCN_SURF0_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ1_DCN_SURF0_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ1_DCN_SURF0_TTU_CNTL1
+#define HUBPREQ1_DCN_SURF0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ1_DCN_SURF0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ1_DCN_SURF1_TTU_CNTL0
+#define HUBPREQ1_DCN_SURF1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ1_DCN_SURF1_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ1_DCN_SURF1_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ1_DCN_SURF1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ1_DCN_SURF1_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ1_DCN_SURF1_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ1_DCN_SURF1_TTU_CNTL1
+#define HUBPREQ1_DCN_SURF1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ1_DCN_SURF1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ1_DCN_CUR0_TTU_CNTL0
+#define HUBPREQ1_DCN_CUR0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ1_DCN_CUR0_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ1_DCN_CUR0_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ1_DCN_CUR0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ1_DCN_CUR0_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ1_DCN_CUR0_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ1_DCN_CUR0_TTU_CNTL1
+#define HUBPREQ1_DCN_CUR0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ1_DCN_CUR0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ1_DCN_CUR1_TTU_CNTL0
+#define HUBPREQ1_DCN_CUR1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ1_DCN_CUR1_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ1_DCN_CUR1_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ1_DCN_CUR1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ1_DCN_CUR1_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ1_DCN_CUR1_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ1_DCN_CUR1_TTU_CNTL1
+#define HUBPREQ1_DCN_CUR1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ1_DCN_CUR1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ1_BLANK_OFFSET_0
+#define HUBPREQ1_BLANK_OFFSET_0__REFCYC_H_BLANK_END__SHIFT 0x0
+#define HUBPREQ1_BLANK_OFFSET_0__DLG_V_BLANK_END__SHIFT 0x10
+#define HUBPREQ1_BLANK_OFFSET_0__REFCYC_H_BLANK_END_MASK 0x00001FFFL
+#define HUBPREQ1_BLANK_OFFSET_0__DLG_V_BLANK_END_MASK 0x7FFF0000L
+//HUBPREQ1_BLANK_OFFSET_1
+#define HUBPREQ1_BLANK_OFFSET_1__MIN_DST_Y_NEXT_START__SHIFT 0x0
+#define HUBPREQ1_BLANK_OFFSET_1__MIN_DST_Y_NEXT_START_MASK 0x0003FFFFL
+//HUBPREQ1_DST_DIMENSIONS
+#define HUBPREQ1_DST_DIMENSIONS__REFCYC_PER_HTOTAL__SHIFT 0x0
+#define HUBPREQ1_DST_DIMENSIONS__REFCYC_PER_HTOTAL_MASK 0x001FFFFFL
+//HUBPREQ1_DST_AFTER_SCALER
+#define HUBPREQ1_DST_AFTER_SCALER__REFCYC_X_AFTER_SCALER__SHIFT 0x0
+#define HUBPREQ1_DST_AFTER_SCALER__DST_Y_AFTER_SCALER__SHIFT 0x10
+#define HUBPREQ1_DST_AFTER_SCALER__REFCYC_X_AFTER_SCALER_MASK 0x00001FFFL
+#define HUBPREQ1_DST_AFTER_SCALER__DST_Y_AFTER_SCALER_MASK 0x00070000L
+//HUBPREQ1_PREFETCH_SETTINGS
+#define HUBPREQ1_PREFETCH_SETTINGS__VRATIO_PREFETCH__SHIFT 0x0
+#define HUBPREQ1_PREFETCH_SETTINGS__DST_Y_PREFETCH__SHIFT 0x18
+#define HUBPREQ1_PREFETCH_SETTINGS__VRATIO_PREFETCH_MASK 0x003FFFFFL
+#define HUBPREQ1_PREFETCH_SETTINGS__DST_Y_PREFETCH_MASK 0xFF000000L
+//HUBPREQ1_PREFETCH_SETTINGS_C
+#define HUBPREQ1_PREFETCH_SETTINGS_C__VRATIO_PREFETCH_C__SHIFT 0x0
+#define HUBPREQ1_PREFETCH_SETTINGS_C__VRATIO_PREFETCH_C_MASK 0x003FFFFFL
+//HUBPREQ1_VBLANK_PARAMETERS_0
+#define HUBPREQ1_VBLANK_PARAMETERS_0__DST_Y_PER_VM_VBLANK__SHIFT 0x0
+#define HUBPREQ1_VBLANK_PARAMETERS_0__DST_Y_PER_ROW_VBLANK__SHIFT 0x8
+#define HUBPREQ1_VBLANK_PARAMETERS_0__DST_Y_PER_VM_VBLANK_MASK 0x0000001FL
+#define HUBPREQ1_VBLANK_PARAMETERS_0__DST_Y_PER_ROW_VBLANK_MASK 0x00003F00L
+//HUBPREQ1_VBLANK_PARAMETERS_1
+#define HUBPREQ1_VBLANK_PARAMETERS_1__REFCYC_PER_PTE_GROUP_VBLANK_L__SHIFT 0x0
+#define HUBPREQ1_VBLANK_PARAMETERS_1__REFCYC_PER_PTE_GROUP_VBLANK_L_MASK 0x007FFFFFL
+//HUBPREQ1_VBLANK_PARAMETERS_2
+#define HUBPREQ1_VBLANK_PARAMETERS_2__REFCYC_PER_PTE_GROUP_VBLANK_C__SHIFT 0x0
+#define HUBPREQ1_VBLANK_PARAMETERS_2__REFCYC_PER_PTE_GROUP_VBLANK_C_MASK 0x007FFFFFL
+//HUBPREQ1_VBLANK_PARAMETERS_3
+#define HUBPREQ1_VBLANK_PARAMETERS_3__REFCYC_PER_META_CHUNK_VBLANK_L__SHIFT 0x0
+#define HUBPREQ1_VBLANK_PARAMETERS_3__REFCYC_PER_META_CHUNK_VBLANK_L_MASK 0x007FFFFFL
+//HUBPREQ1_VBLANK_PARAMETERS_4
+#define HUBPREQ1_VBLANK_PARAMETERS_4__REFCYC_PER_META_CHUNK_VBLANK_C__SHIFT 0x0
+#define HUBPREQ1_VBLANK_PARAMETERS_4__REFCYC_PER_META_CHUNK_VBLANK_C_MASK 0x007FFFFFL
+//HUBPREQ1_FLIP_PARAMETERS_0
+#define HUBPREQ1_FLIP_PARAMETERS_0__DST_Y_PER_VM_FLIP__SHIFT 0x0
+#define HUBPREQ1_FLIP_PARAMETERS_0__DST_Y_PER_ROW_FLIP__SHIFT 0x8
+#define HUBPREQ1_FLIP_PARAMETERS_0__DST_Y_PER_VM_FLIP_MASK 0x0000001FL
+#define HUBPREQ1_FLIP_PARAMETERS_0__DST_Y_PER_ROW_FLIP_MASK 0x00003F00L
+//HUBPREQ1_FLIP_PARAMETERS_2
+#define HUBPREQ1_FLIP_PARAMETERS_2__REFCYC_PER_META_CHUNK_FLIP_L__SHIFT 0x0
+#define HUBPREQ1_FLIP_PARAMETERS_2__REFCYC_PER_META_CHUNK_FLIP_L_MASK 0x007FFFFFL
+//HUBPREQ1_NOM_PARAMETERS_4
+#define HUBPREQ1_NOM_PARAMETERS_4__DST_Y_PER_META_ROW_NOM_L__SHIFT 0x0
+#define HUBPREQ1_NOM_PARAMETERS_4__DST_Y_PER_META_ROW_NOM_L_MASK 0x0001FFFFL
+//HUBPREQ1_NOM_PARAMETERS_5
+#define HUBPREQ1_NOM_PARAMETERS_5__REFCYC_PER_META_CHUNK_NOM_L__SHIFT 0x0
+#define HUBPREQ1_NOM_PARAMETERS_5__REFCYC_PER_META_CHUNK_NOM_L_MASK 0x007FFFFFL
+//HUBPREQ1_NOM_PARAMETERS_6
+#define HUBPREQ1_NOM_PARAMETERS_6__DST_Y_PER_META_ROW_NOM_C__SHIFT 0x0
+#define HUBPREQ1_NOM_PARAMETERS_6__DST_Y_PER_META_ROW_NOM_C_MASK 0x0001FFFFL
+//HUBPREQ1_NOM_PARAMETERS_7
+#define HUBPREQ1_NOM_PARAMETERS_7__REFCYC_PER_META_CHUNK_NOM_C__SHIFT 0x0
+#define HUBPREQ1_NOM_PARAMETERS_7__REFCYC_PER_META_CHUNK_NOM_C_MASK 0x007FFFFFL
+//HUBPREQ1_PER_LINE_DELIVERY_PRE
+#define HUBPREQ1_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_L__SHIFT 0x0
+#define HUBPREQ1_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_C__SHIFT 0x10
+#define HUBPREQ1_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_L_MASK 0x00001FFFL
+#define HUBPREQ1_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_C_MASK 0x1FFF0000L
+//HUBPREQ1_PER_LINE_DELIVERY
+#define HUBPREQ1_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_L__SHIFT 0x0
+#define HUBPREQ1_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_C__SHIFT 0x10
+#define HUBPREQ1_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_L_MASK 0x00001FFFL
+#define HUBPREQ1_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_C_MASK 0x1FFF0000L
+//HUBPREQ1_CURSOR_SETTINGS
+#define HUBPREQ1_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET__SHIFT 0x0
+#define HUBPREQ1_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST__SHIFT 0x8
+#define HUBPREQ1_CURSOR_SETTINGS__CURSOR1_DST_Y_OFFSET__SHIFT 0x10
+#define HUBPREQ1_CURSOR_SETTINGS__CURSOR1_CHUNK_HDL_ADJUST__SHIFT 0x18
+#define HUBPREQ1_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET_MASK 0x000000FFL
+#define HUBPREQ1_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST_MASK 0x00000300L
+#define HUBPREQ1_CURSOR_SETTINGS__CURSOR1_DST_Y_OFFSET_MASK 0x00FF0000L
+#define HUBPREQ1_CURSOR_SETTINGS__CURSOR1_CHUNK_HDL_ADJUST_MASK 0x03000000L
+//HUBPREQ1_REF_FREQ_TO_PIX_FREQ
+#define HUBPREQ1_REF_FREQ_TO_PIX_FREQ__REF_FREQ_TO_PIX_FREQ__SHIFT 0x0
+#define HUBPREQ1_REF_FREQ_TO_PIX_FREQ__REF_FREQ_TO_PIX_FREQ_MASK 0x001FFFFFL
+//HUBPREQ1_DST_Y_DELTA_DRQ_LIMIT
+#define HUBPREQ1_DST_Y_DELTA_DRQ_LIMIT__DST_Y_DELTA_DRQ_LIMIT__SHIFT 0x0
+#define HUBPREQ1_DST_Y_DELTA_DRQ_LIMIT__DST_Y_DELTA_DRQ_LIMIT_MASK 0x00007FFFL
+//HUBPREQ1_HUBPREQ_MEM_PWR_CTRL
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_FORCE__SHIFT 0x0
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_DIS__SHIFT 0x2
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_FORCE__SHIFT 0x4
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_DIS__SHIFT 0x6
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_FORCE__SHIFT 0x8
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_DIS__SHIFT 0xa
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_FORCE__SHIFT 0xc
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_DIS__SHIFT 0xe
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_FORCE_MASK 0x00000003L
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_DIS_MASK 0x00000004L
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_FORCE_MASK 0x00000030L
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_DIS_MASK 0x00000040L
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_FORCE_MASK 0x00000300L
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_DIS_MASK 0x00000400L
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_FORCE_MASK 0x00003000L
+#define HUBPREQ1_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_DIS_MASK 0x00004000L
+//HUBPREQ1_HUBPREQ_MEM_PWR_STATUS
+#define HUBPREQ1_HUBPREQ_MEM_PWR_STATUS__REQ_DPTE_MEM_PWR_STATE__SHIFT 0x0
+#define HUBPREQ1_HUBPREQ_MEM_PWR_STATUS__REQ_MPTE_MEM_PWR_STATE__SHIFT 0x2
+#define HUBPREQ1_HUBPREQ_MEM_PWR_STATUS__REQ_META_MEM_PWR_STATE__SHIFT 0x4
+#define HUBPREQ1_HUBPREQ_MEM_PWR_STATUS__REQ_PDE_MEM_PWR_STATE__SHIFT 0x6
+#define HUBPREQ1_HUBPREQ_MEM_PWR_STATUS__REQ_DPTE_MEM_PWR_STATE_MASK 0x00000003L
+#define HUBPREQ1_HUBPREQ_MEM_PWR_STATUS__REQ_MPTE_MEM_PWR_STATE_MASK 0x0000000CL
+#define HUBPREQ1_HUBPREQ_MEM_PWR_STATUS__REQ_META_MEM_PWR_STATE_MASK 0x00000030L
+#define HUBPREQ1_HUBPREQ_MEM_PWR_STATUS__REQ_PDE_MEM_PWR_STATE_MASK 0x000000C0L
+// addressBlock: dce_dc_dcbubp1_dispdec_hubpret_dispdec
+//HUBPRET1_HUBPRET_CONTROL
+#define HUBPRET1_HUBPRET_CONTROL__DET_BUF_PLANE1_BASE_ADDRESS__SHIFT 0x0
+#define HUBPRET1_HUBPRET_CONTROL__PACK_3TO2_ELEMENT_DISABLE__SHIFT 0xc
+#define HUBPRET1_HUBPRET_CONTROL__CROSSBAR_SRC_ALPHA__SHIFT 0x10
+#define HUBPRET1_HUBPRET_CONTROL__CROSSBAR_SRC_Y_G__SHIFT 0x12
+#define HUBPRET1_HUBPRET_CONTROL__CROSSBAR_SRC_CB_B__SHIFT 0x14
+#define HUBPRET1_HUBPRET_CONTROL__CROSSBAR_SRC_CR_R__SHIFT 0x16
+#define HUBPRET1_HUBPRET_CONTROL__HUBPRET_CONTROL_SPARE__SHIFT 0x18
+#define HUBPRET1_HUBPRET_CONTROL__DET_BUF_PLANE1_BASE_ADDRESS_MASK 0x00000FFFL
+#define HUBPRET1_HUBPRET_CONTROL__PACK_3TO2_ELEMENT_DISABLE_MASK 0x00001000L
+#define HUBPRET1_HUBPRET_CONTROL__CROSSBAR_SRC_ALPHA_MASK 0x00030000L
+#define HUBPRET1_HUBPRET_CONTROL__CROSSBAR_SRC_Y_G_MASK 0x000C0000L
+#define HUBPRET1_HUBPRET_CONTROL__CROSSBAR_SRC_CB_B_MASK 0x00300000L
+#define HUBPRET1_HUBPRET_CONTROL__CROSSBAR_SRC_CR_R_MASK 0x00C00000L
+#define HUBPRET1_HUBPRET_CONTROL__HUBPRET_CONTROL_SPARE_MASK 0xFF000000L
+//HUBPRET1_HUBPRET_MEM_PWR_CTRL
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_FORCE__SHIFT 0x0
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_DIS__SHIFT 0x2
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_LS_MODE__SHIFT 0x4
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_FORCE__SHIFT 0x8
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_DIS__SHIFT 0xa
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_FORCE__SHIFT 0x10
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_DIS__SHIFT 0x12
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_LS_MODE__SHIFT 0x14
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_FORCE_MASK 0x00000003L
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_DIS_MASK 0x00000004L
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_LS_MODE_MASK 0x00000030L
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_FORCE_MASK 0x00000300L
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_DIS_MASK 0x00000400L
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_FORCE_MASK 0x00030000L
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_DIS_MASK 0x00040000L
+#define HUBPRET1_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_LS_MODE_MASK 0x00300000L
+//HUBPRET1_HUBPRET_MEM_PWR_STATUS
+#define HUBPRET1_HUBPRET_MEM_PWR_STATUS__DET_MEM_PWR_STATE__SHIFT 0x0
+#define HUBPRET1_HUBPRET_MEM_PWR_STATUS__DMROB_MEM_PWR_STATE__SHIFT 0x2
+#define HUBPRET1_HUBPRET_MEM_PWR_STATUS__PIXCDC_MEM_PWR_STATE__SHIFT 0x4
+#define HUBPRET1_HUBPRET_MEM_PWR_STATUS__DET_MEM_PWR_STATE_MASK 0x00000003L
+#define HUBPRET1_HUBPRET_MEM_PWR_STATUS__DMROB_MEM_PWR_STATE_MASK 0x0000000CL
+#define HUBPRET1_HUBPRET_MEM_PWR_STATUS__PIXCDC_MEM_PWR_STATE_MASK 0x00000030L
+//HUBPRET1_HUBPRET_READ_LINE_CTRL0
+#define HUBPRET1_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_INTERVAL_IN_NONACTIVE__SHIFT 0x0
+#define HUBPRET1_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_VBLANK_MAXIMUM__SHIFT 0x10
+#define HUBPRET1_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_INTERVAL_IN_NONACTIVE_MASK 0x0000FFFFL
+#define HUBPRET1_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_VBLANK_MAXIMUM_MASK 0x3FFF0000L
+//HUBPRET1_HUBPRET_READ_LINE_CTRL1
+#define HUBPRET1_HUBPRET_READ_LINE_CTRL1__PIPE_READ_LINE_REPORTED_WHEN_REQ_DISABLED__SHIFT 0x0
+#define HUBPRET1_HUBPRET_READ_LINE_CTRL1__HUBPRET_READ_LINE_CTRL1_SPARE__SHIFT 0x10
+#define HUBPRET1_HUBPRET_READ_LINE_CTRL1__PIPE_READ_LINE_REPORTED_WHEN_REQ_DISABLED_MASK 0x00003FFFL
+#define HUBPRET1_HUBPRET_READ_LINE_CTRL1__HUBPRET_READ_LINE_CTRL1_SPARE_MASK 0xFFFF0000L
+//HUBPRET1_HUBPRET_READ_LINE0
+#define HUBPRET1_HUBPRET_READ_LINE0__PIPE_READ_LINE0_START__SHIFT 0x0
+#define HUBPRET1_HUBPRET_READ_LINE0__PIPE_READ_LINE0_END__SHIFT 0x10
+#define HUBPRET1_HUBPRET_READ_LINE0__PIPE_READ_LINE0_START_MASK 0x00003FFFL
+#define HUBPRET1_HUBPRET_READ_LINE0__PIPE_READ_LINE0_END_MASK 0x3FFF0000L
+//HUBPRET1_HUBPRET_READ_LINE1
+#define HUBPRET1_HUBPRET_READ_LINE1__PIPE_READ_LINE1_START__SHIFT 0x0
+#define HUBPRET1_HUBPRET_READ_LINE1__PIPE_READ_LINE1_END__SHIFT 0x10
+#define HUBPRET1_HUBPRET_READ_LINE1__PIPE_READ_LINE1_START_MASK 0x00003FFFL
+#define HUBPRET1_HUBPRET_READ_LINE1__PIPE_READ_LINE1_END_MASK 0x3FFF0000L
+//HUBPRET1_HUBPRET_INTERRUPT
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_MASK__SHIFT 0x0
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_MASK__SHIFT 0x1
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_MASK__SHIFT 0x2
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_TYPE__SHIFT 0x4
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_TYPE__SHIFT 0x5
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_TYPE__SHIFT 0x6
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_CLEAR__SHIFT 0x8
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_CLEAR__SHIFT 0x9
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_CLEAR__SHIFT 0xa
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_VBLANK_STATUS__SHIFT 0xc
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE0_STATUS__SHIFT 0xd
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE1_STATUS__SHIFT 0xe
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_STATUS__SHIFT 0x10
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_STATUS__SHIFT 0x11
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_STATUS__SHIFT 0x12
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_MASK_MASK 0x00000001L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_MASK_MASK 0x00000002L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_MASK_MASK 0x00000004L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_TYPE_MASK 0x00000010L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_TYPE_MASK 0x00000020L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_TYPE_MASK 0x00000040L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_CLEAR_MASK 0x00000100L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_CLEAR_MASK 0x00000200L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_CLEAR_MASK 0x00000400L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_VBLANK_STATUS_MASK 0x00001000L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE0_STATUS_MASK 0x00002000L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE1_STATUS_MASK 0x00004000L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_STATUS_MASK 0x00010000L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_STATUS_MASK 0x00020000L
+#define HUBPRET1_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_STATUS_MASK 0x00040000L
+//HUBPRET1_HUBPRET_READ_LINE_VALUE
+#define HUBPRET1_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE__SHIFT 0x0
+#define HUBPRET1_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_SNAPSHOT__SHIFT 0x10
+#define HUBPRET1_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_MASK 0x00003FFFL
+#define HUBPRET1_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_SNAPSHOT_MASK 0x3FFF0000L
+//HUBPRET1_HUBPRET_READ_LINE_STATUS
+#define HUBPRET1_HUBPRET_READ_LINE_STATUS__PIPE_READ_VBLANK__SHIFT 0x0
+#define HUBPRET1_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_INSIDE__SHIFT 0x4
+#define HUBPRET1_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_OUTSIDE__SHIFT 0x5
+#define HUBPRET1_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_INSIDE__SHIFT 0x8
+#define HUBPRET1_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_OUTSIDE__SHIFT 0xa
+#define HUBPRET1_HUBPRET_READ_LINE_STATUS__PIPE_READ_VBLANK_MASK 0x00000001L
+#define HUBPRET1_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_INSIDE_MASK 0x00000010L
+#define HUBPRET1_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_OUTSIDE_MASK 0x00000020L
+#define HUBPRET1_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_INSIDE_MASK 0x00000100L
+#define HUBPRET1_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_OUTSIDE_MASK 0x00000400L
+
+
+// addressBlock: dce_dc_dcbubp1_dispdec_cursor0_dispdec
+//CURSOR0_1_CURSOR_CONTROL
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_ENABLE__SHIFT 0x0
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_2X_MAGNIFY__SHIFT 0x4
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_MODE__SHIFT 0x8
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_TMZ__SHIFT 0xc
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_SNOOP__SHIFT 0xd
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_SYSTEM__SHIFT 0xe
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_PITCH__SHIFT 0x10
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_XY_POSITION_ROTATION_AND_MIRRORING_BYPASS__SHIFT 0x14
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK__SHIFT 0x18
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_EN__SHIFT 0x1e
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_SEL__SHIFT 0x1f
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_ENABLE_MASK 0x00000001L
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_2X_MAGNIFY_MASK 0x00000010L
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_MODE_MASK 0x00000700L
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_TMZ_MASK 0x00001000L
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_SNOOP_MASK 0x00002000L
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_SYSTEM_MASK 0x00004000L
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_PITCH_MASK 0x00030000L
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_XY_POSITION_ROTATION_AND_MIRRORING_BYPASS_MASK 0x00100000L
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK_MASK 0x1F000000L
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_EN_MASK 0x40000000L
+#define CURSOR0_1_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_SEL_MASK 0x80000000L
+//CURSOR0_1_CURSOR_SURFACE_ADDRESS
+#define CURSOR0_1_CURSOR_SURFACE_ADDRESS__CURSOR_SURFACE_ADDRESS__SHIFT 0x0
+#define CURSOR0_1_CURSOR_SURFACE_ADDRESS__CURSOR_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//CURSOR0_1_CURSOR_SURFACE_ADDRESS_HIGH
+#define CURSOR0_1_CURSOR_SURFACE_ADDRESS_HIGH__CURSOR_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define CURSOR0_1_CURSOR_SURFACE_ADDRESS_HIGH__CURSOR_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//CURSOR0_1_CURSOR_SIZE
+#define CURSOR0_1_CURSOR_SIZE__CURSOR_HEIGHT__SHIFT 0x0
+#define CURSOR0_1_CURSOR_SIZE__CURSOR_WIDTH__SHIFT 0x10
+#define CURSOR0_1_CURSOR_SIZE__CURSOR_HEIGHT_MASK 0x000001FFL
+#define CURSOR0_1_CURSOR_SIZE__CURSOR_WIDTH_MASK 0x01FF0000L
+//CURSOR0_1_CURSOR_POSITION
+#define CURSOR0_1_CURSOR_POSITION__CURSOR_Y_POSITION__SHIFT 0x0
+#define CURSOR0_1_CURSOR_POSITION__CURSOR_X_POSITION__SHIFT 0x10
+#define CURSOR0_1_CURSOR_POSITION__CURSOR_Y_POSITION_MASK 0x00003FFFL
+#define CURSOR0_1_CURSOR_POSITION__CURSOR_X_POSITION_MASK 0x3FFF0000L
+//CURSOR0_1_CURSOR_HOT_SPOT
+#define CURSOR0_1_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y__SHIFT 0x0
+#define CURSOR0_1_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X__SHIFT 0x10
+#define CURSOR0_1_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y_MASK 0x000000FFL
+#define CURSOR0_1_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X_MASK 0x00FF0000L
+//CURSOR0_1_CURSOR_STEREO_CONTROL
+#define CURSOR0_1_CURSOR_STEREO_CONTROL__CURSOR_STEREO_EN__SHIFT 0x0
+#define CURSOR0_1_CURSOR_STEREO_CONTROL__CURSOR_PRIMARY_OFFSET__SHIFT 0x4
+#define CURSOR0_1_CURSOR_STEREO_CONTROL__CURSOR_SECONDARY_OFFSET__SHIFT 0x12
+#define CURSOR0_1_CURSOR_STEREO_CONTROL__CURSOR_STEREO_EN_MASK 0x00000001L
+#define CURSOR0_1_CURSOR_STEREO_CONTROL__CURSOR_PRIMARY_OFFSET_MASK 0x0003FFF0L
+#define CURSOR0_1_CURSOR_STEREO_CONTROL__CURSOR_SECONDARY_OFFSET_MASK 0xFFFC0000L
+//CURSOR0_1_CURSOR_DST_OFFSET
+#define CURSOR0_1_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET__SHIFT 0x0
+#define CURSOR0_1_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET_MASK 0x00001FFFL
+//CURSOR0_1_CURSOR_MEM_PWR_CTRL
+#define CURSOR0_1_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_FORCE__SHIFT 0x0
+#define CURSOR0_1_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_DIS__SHIFT 0x2
+#define CURSOR0_1_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_LS_MODE__SHIFT 0x4
+#define CURSOR0_1_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_FORCE_MASK 0x00000003L
+#define CURSOR0_1_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_DIS_MASK 0x00000004L
+#define CURSOR0_1_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_LS_MODE_MASK 0x00000030L
+//CURSOR0_1_CURSOR_MEM_PWR_STATUS
+#define CURSOR0_1_CURSOR_MEM_PWR_STATUS__CROB_MEM_PWR_STATE__SHIFT 0x0
+#define CURSOR0_1_CURSOR_MEM_PWR_STATUS__CROB_MEM_PWR_STATE_MASK 0x00000003L
+//CURSOR0_1_DMDATA_ADDRESS_HIGH
+#define CURSOR0_1_DMDATA_ADDRESS_HIGH__DMDATA_ADDRESS_HIGH__SHIFT 0x0
+#define CURSOR0_1_DMDATA_ADDRESS_HIGH__DMDATA_SYSTEM__SHIFT 0x1c
+#define CURSOR0_1_DMDATA_ADDRESS_HIGH__DMDATA_SNOOP__SHIFT 0x1d
+#define CURSOR0_1_DMDATA_ADDRESS_HIGH__DMDATA_TMZ__SHIFT 0x1e
+#define CURSOR0_1_DMDATA_ADDRESS_HIGH__DMDATA_ADDRESS_HIGH_MASK 0x0000FFFFL
+#define CURSOR0_1_DMDATA_ADDRESS_HIGH__DMDATA_SYSTEM_MASK 0x10000000L
+#define CURSOR0_1_DMDATA_ADDRESS_HIGH__DMDATA_SNOOP_MASK 0x20000000L
+#define CURSOR0_1_DMDATA_ADDRESS_HIGH__DMDATA_TMZ_MASK 0x40000000L
+//CURSOR0_1_DMDATA_ADDRESS_LOW
+#define CURSOR0_1_DMDATA_ADDRESS_LOW__DMDATA_ADDRESS_LOW__SHIFT 0x0
+#define CURSOR0_1_DMDATA_ADDRESS_LOW__DMDATA_ADDRESS_LOW_MASK 0xFFFFFFFFL
+//CURSOR0_1_DMDATA_CNTL
+#define CURSOR0_1_DMDATA_CNTL__DMDATA_UPDATED__SHIFT 0x0
+#define CURSOR0_1_DMDATA_CNTL__DMDATA_REPEAT__SHIFT 0x1
+#define CURSOR0_1_DMDATA_CNTL__DMDATA_MODE__SHIFT 0x2
+#define CURSOR0_1_DMDATA_CNTL__DMDATA_SIZE__SHIFT 0x10
+#define CURSOR0_1_DMDATA_CNTL__DMDATA_UPDATED_MASK 0x00000001L
+#define CURSOR0_1_DMDATA_CNTL__DMDATA_REPEAT_MASK 0x00000002L
+#define CURSOR0_1_DMDATA_CNTL__DMDATA_MODE_MASK 0x00000004L
+#define CURSOR0_1_DMDATA_CNTL__DMDATA_SIZE_MASK 0x0FFF0000L
+//CURSOR0_1_DMDATA_QOS_CNTL
+#define CURSOR0_1_DMDATA_QOS_CNTL__DMDATA_QOS_MODE__SHIFT 0x0
+#define CURSOR0_1_DMDATA_QOS_CNTL__DMDATA_QOS_LEVEL__SHIFT 0x4
+#define CURSOR0_1_DMDATA_QOS_CNTL__DMDATA_DL_DELTA__SHIFT 0x10
+#define CURSOR0_1_DMDATA_QOS_CNTL__DMDATA_QOS_MODE_MASK 0x00000001L
+#define CURSOR0_1_DMDATA_QOS_CNTL__DMDATA_QOS_LEVEL_MASK 0x000000F0L
+#define CURSOR0_1_DMDATA_QOS_CNTL__DMDATA_DL_DELTA_MASK 0xFFFF0000L
+//CURSOR0_1_DMDATA_STATUS
+#define CURSOR0_1_DMDATA_STATUS__DMDATA_DONE__SHIFT 0x0
+#define CURSOR0_1_DMDATA_STATUS__DMDATA_UNDERFLOW__SHIFT 0x2
+#define CURSOR0_1_DMDATA_STATUS__DMDATA_UNDERFLOW_CLEAR__SHIFT 0x4
+#define CURSOR0_1_DMDATA_STATUS__DMDATA_DONE_MASK 0x00000001L
+#define CURSOR0_1_DMDATA_STATUS__DMDATA_UNDERFLOW_MASK 0x00000004L
+#define CURSOR0_1_DMDATA_STATUS__DMDATA_UNDERFLOW_CLEAR_MASK 0x00000010L
+//CURSOR0_1_DMDATA_SW_CNTL
+#define CURSOR0_1_DMDATA_SW_CNTL__DMDATA_SW_UPDATED__SHIFT 0x0
+#define CURSOR0_1_DMDATA_SW_CNTL__DMDATA_SW_REPEAT__SHIFT 0x1
+#define CURSOR0_1_DMDATA_SW_CNTL__DMDATA_SW_SIZE__SHIFT 0x10
+#define CURSOR0_1_DMDATA_SW_CNTL__DMDATA_SW_UPDATED_MASK 0x00000001L
+#define CURSOR0_1_DMDATA_SW_CNTL__DMDATA_SW_REPEAT_MASK 0x00000002L
+#define CURSOR0_1_DMDATA_SW_CNTL__DMDATA_SW_SIZE_MASK 0x0FFF0000L
+//CURSOR0_1_DMDATA_SW_DATA
+#define CURSOR0_1_DMDATA_SW_DATA__DMDATA_SW_DATA__SHIFT 0x0
+#define CURSOR0_1_DMDATA_SW_DATA__DMDATA_SW_DATA_MASK 0xFFFFFFFFL
+// addressBlock: dce_dc_dcbubp2_dispdec_hubp_dispdec
+//HUBP2_DCSURF_SURFACE_CONFIG
+#define HUBP2_DCSURF_SURFACE_CONFIG__SURFACE_PIXEL_FORMAT__SHIFT 0x0
+#define HUBP2_DCSURF_SURFACE_CONFIG__ROTATION_ANGLE__SHIFT 0x8
+#define HUBP2_DCSURF_SURFACE_CONFIG__H_MIRROR_EN__SHIFT 0xa
+#define HUBP2_DCSURF_SURFACE_CONFIG__SURFACE_PIXEL_FORMAT_MASK 0x0000007FL
+#define HUBP2_DCSURF_SURFACE_CONFIG__ROTATION_ANGLE_MASK 0x00000300L
+#define HUBP2_DCSURF_SURFACE_CONFIG__H_MIRROR_EN_MASK 0x00000400L
+//HUBP2_DCSURF_ADDR_CONFIG
+#define HUBP2_DCSURF_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0
+#define HUBP2_DCSURF_ADDR_CONFIG__NUM_BANKS__SHIFT 0x3
+#define HUBP2_DCSURF_ADDR_CONFIG__PIPE_INTERLEAVE__SHIFT 0x6
+#define HUBP2_DCSURF_ADDR_CONFIG__NUM_SE__SHIFT 0x8
+#define HUBP2_DCSURF_ADDR_CONFIG__NUM_RB_PER_SE__SHIFT 0xa
+#define HUBP2_DCSURF_ADDR_CONFIG__MAX_COMPRESSED_FRAGS__SHIFT 0xc
+#define HUBP2_DCSURF_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
+#define HUBP2_DCSURF_ADDR_CONFIG__NUM_BANKS_MASK 0x00000038L
+#define HUBP2_DCSURF_ADDR_CONFIG__PIPE_INTERLEAVE_MASK 0x000000C0L
+#define HUBP2_DCSURF_ADDR_CONFIG__NUM_SE_MASK 0x00000300L
+#define HUBP2_DCSURF_ADDR_CONFIG__NUM_RB_PER_SE_MASK 0x00000C00L
+#define HUBP2_DCSURF_ADDR_CONFIG__MAX_COMPRESSED_FRAGS_MASK 0x00003000L
+//HUBP2_DCSURF_TILING_CONFIG
+#define HUBP2_DCSURF_TILING_CONFIG__SW_MODE__SHIFT 0x0
+#define HUBP2_DCSURF_TILING_CONFIG__DIM_TYPE__SHIFT 0x7
+#define HUBP2_DCSURF_TILING_CONFIG__META_LINEAR__SHIFT 0x9
+#define HUBP2_DCSURF_TILING_CONFIG__RB_ALIGNED__SHIFT 0xa
+#define HUBP2_DCSURF_TILING_CONFIG__PIPE_ALIGNED__SHIFT 0xb
+#define HUBP2_DCSURF_TILING_CONFIG__SW_MODE_MASK 0x0000001FL
+#define HUBP2_DCSURF_TILING_CONFIG__DIM_TYPE_MASK 0x00000180L
+#define HUBP2_DCSURF_TILING_CONFIG__META_LINEAR_MASK 0x00000200L
+#define HUBP2_DCSURF_TILING_CONFIG__RB_ALIGNED_MASK 0x00000400L
+#define HUBP2_DCSURF_TILING_CONFIG__PIPE_ALIGNED_MASK 0x00000800L
+//HUBP2_DCSURF_PRI_VIEWPORT_START
+#define HUBP2_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_X_START__SHIFT 0x0
+#define HUBP2_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_Y_START__SHIFT 0x10
+#define HUBP2_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_X_START_MASK 0x00003FFFL
+#define HUBP2_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_Y_START_MASK 0x3FFF0000L
+//HUBP2_DCSURF_PRI_VIEWPORT_DIMENSION
+#define HUBP2_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT 0x0
+#define HUBP2_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT 0x10
+#define HUBP2_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK 0x00003FFFL
+#define HUBP2_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK 0x3FFF0000L
+//HUBP2_DCSURF_PRI_VIEWPORT_START_C
+#define HUBP2_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_X_START_C__SHIFT 0x0
+#define HUBP2_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_Y_START_C__SHIFT 0x10
+#define HUBP2_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_X_START_C_MASK 0x00003FFFL
+#define HUBP2_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_Y_START_C_MASK 0x3FFF0000L
+//HUBP2_DCSURF_PRI_VIEWPORT_DIMENSION_C
+#define HUBP2_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_WIDTH_C__SHIFT 0x0
+#define HUBP2_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_HEIGHT_C__SHIFT 0x10
+#define HUBP2_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_WIDTH_C_MASK 0x00003FFFL
+#define HUBP2_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_HEIGHT_C_MASK 0x3FFF0000L
+//HUBP2_DCSURF_SEC_VIEWPORT_START
+#define HUBP2_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_X_START__SHIFT 0x0
+#define HUBP2_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_Y_START__SHIFT 0x10
+#define HUBP2_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_X_START_MASK 0x00003FFFL
+#define HUBP2_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_Y_START_MASK 0x3FFF0000L
+//HUBP2_DCSURF_SEC_VIEWPORT_DIMENSION
+#define HUBP2_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_WIDTH__SHIFT 0x0
+#define HUBP2_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_HEIGHT__SHIFT 0x10
+#define HUBP2_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_WIDTH_MASK 0x00003FFFL
+#define HUBP2_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_HEIGHT_MASK 0x3FFF0000L
+//HUBP2_DCSURF_SEC_VIEWPORT_START_C
+#define HUBP2_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_X_START_C__SHIFT 0x0
+#define HUBP2_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_Y_START_C__SHIFT 0x10
+#define HUBP2_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_X_START_C_MASK 0x00003FFFL
+#define HUBP2_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_Y_START_C_MASK 0x3FFF0000L
+//HUBP2_DCSURF_SEC_VIEWPORT_DIMENSION_C
+#define HUBP2_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_WIDTH_C__SHIFT 0x0
+#define HUBP2_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_HEIGHT_C__SHIFT 0x10
+#define HUBP2_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_WIDTH_C_MASK 0x00003FFFL
+#define HUBP2_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_HEIGHT_C_MASK 0x3FFF0000L
+//HUBP2_DCHUBP_REQ_SIZE_CONFIG
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__SWATH_HEIGHT__SHIFT 0x0
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__PTE_ROW_HEIGHT_LINEAR__SHIFT 0x4
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__CHUNK_SIZE__SHIFT 0x8
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__MIN_CHUNK_SIZE__SHIFT 0xb
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__META_CHUNK_SIZE__SHIFT 0x10
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__MIN_META_CHUNK_SIZE__SHIFT 0x12
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__DPTE_GROUP_SIZE__SHIFT 0x14
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__MPTE_GROUP_SIZE__SHIFT 0x18
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__SWATH_HEIGHT_MASK 0x00000007L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__PTE_ROW_HEIGHT_LINEAR_MASK 0x00000070L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__CHUNK_SIZE_MASK 0x00000700L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__MIN_CHUNK_SIZE_MASK 0x00001800L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__META_CHUNK_SIZE_MASK 0x00030000L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__MIN_META_CHUNK_SIZE_MASK 0x000C0000L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__DPTE_GROUP_SIZE_MASK 0x00700000L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG__MPTE_GROUP_SIZE_MASK 0x07000000L
+//HUBP2_DCHUBP_REQ_SIZE_CONFIG_C
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__SWATH_HEIGHT_C__SHIFT 0x0
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__PTE_ROW_HEIGHT_LINEAR_C__SHIFT 0x4
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__CHUNK_SIZE_C__SHIFT 0x8
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__MIN_CHUNK_SIZE_C__SHIFT 0xb
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__META_CHUNK_SIZE_C__SHIFT 0x10
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__MIN_META_CHUNK_SIZE_C__SHIFT 0x12
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__DPTE_GROUP_SIZE_C__SHIFT 0x14
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__MPTE_GROUP_SIZE_C__SHIFT 0x18
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__SWATH_HEIGHT_C_MASK 0x00000007L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__PTE_ROW_HEIGHT_LINEAR_C_MASK 0x00000070L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__CHUNK_SIZE_C_MASK 0x00000700L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__MIN_CHUNK_SIZE_C_MASK 0x00001800L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__META_CHUNK_SIZE_C_MASK 0x00030000L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__MIN_META_CHUNK_SIZE_C_MASK 0x000C0000L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__DPTE_GROUP_SIZE_C_MASK 0x00700000L
+#define HUBP2_DCHUBP_REQ_SIZE_CONFIG_C__MPTE_GROUP_SIZE_C_MASK 0x07000000L
+//HUBP2_DCHUBP_CNTL
+#define HUBP2_DCHUBP_CNTL__HUBP_BLANK_EN__SHIFT 0x0
+#define HUBP2_DCHUBP_CNTL__HUBP_NO_OUTSTANDING_REQ__SHIFT 0x1
+#define HUBP2_DCHUBP_CNTL__HUBP_DISABLE__SHIFT 0x2
+#define HUBP2_DCHUBP_CNTL__HUBP_IN_BLANK__SHIFT 0x3
+#define HUBP2_DCHUBP_CNTL__HUBP_VTG_SEL__SHIFT 0x4
+#define HUBP2_DCHUBP_CNTL__HUBP_VREADY_AT_OR_AFTER_VSYNC__SHIFT 0x8
+#define HUBP2_DCHUBP_CNTL__HUBP_DISABLE_STOP_DATA_DURING_VM__SHIFT 0x9
+#define HUBP2_DCHUBP_CNTL__HUBP_TTU_DISABLE__SHIFT 0xc
+#define HUBP2_DCHUBP_CNTL__HUBP_TTU_MODE__SHIFT 0xd
+#define HUBP2_DCHUBP_CNTL__HUBP_XRQ_NO_OUTSTANDING_REQ__SHIFT 0x10
+#define HUBP2_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS__SHIFT 0x14
+#define HUBP2_DCHUBP_CNTL__HUBP_TIMEOUT_THRESHOLD__SHIFT 0x18
+#define HUBP2_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_CLEAR__SHIFT 0x1a
+#define HUBP2_DCHUBP_CNTL__HUBP_TIMEOUT_INTERRUPT_EN__SHIFT 0x1b
+#define HUBP2_DCHUBP_CNTL__HUBP_UNDERFLOW_STATUS__SHIFT 0x1c
+#define HUBP2_DCHUBP_CNTL__HUBP_UNDERFLOW_CLEAR__SHIFT 0x1f
+#define HUBP2_DCHUBP_CNTL__HUBP_BLANK_EN_MASK 0x00000001L
+#define HUBP2_DCHUBP_CNTL__HUBP_NO_OUTSTANDING_REQ_MASK 0x00000002L
+#define HUBP2_DCHUBP_CNTL__HUBP_DISABLE_MASK 0x00000004L
+#define HUBP2_DCHUBP_CNTL__HUBP_IN_BLANK_MASK 0x00000008L
+#define HUBP2_DCHUBP_CNTL__HUBP_VTG_SEL_MASK 0x000000F0L
+#define HUBP2_DCHUBP_CNTL__HUBP_VREADY_AT_OR_AFTER_VSYNC_MASK 0x00000100L
+#define HUBP2_DCHUBP_CNTL__HUBP_DISABLE_STOP_DATA_DURING_VM_MASK 0x00000200L
+#define HUBP2_DCHUBP_CNTL__HUBP_TTU_DISABLE_MASK 0x00001000L
+#define HUBP2_DCHUBP_CNTL__HUBP_TTU_MODE_MASK 0x0000E000L
+#define HUBP2_DCHUBP_CNTL__HUBP_XRQ_NO_OUTSTANDING_REQ_MASK 0x000F0000L
+#define HUBP2_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_MASK 0x00F00000L
+#define HUBP2_DCHUBP_CNTL__HUBP_TIMEOUT_THRESHOLD_MASK 0x03000000L
+#define HUBP2_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_CLEAR_MASK 0x04000000L
+#define HUBP2_DCHUBP_CNTL__HUBP_TIMEOUT_INTERRUPT_EN_MASK 0x08000000L
+#define HUBP2_DCHUBP_CNTL__HUBP_UNDERFLOW_STATUS_MASK 0x70000000L
+#define HUBP2_DCHUBP_CNTL__HUBP_UNDERFLOW_CLEAR_MASK 0x80000000L
+//HUBP2_HUBP_CLK_CNTL
+#define HUBP2_HUBP_CLK_CNTL__HUBP_CLOCK_ENABLE__SHIFT 0x0
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DISPCLK_R_GATE_DIS__SHIFT 0x4
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DPPCLK_G_GATE_DIS__SHIFT 0x8
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DCFCLK_R_GATE_DIS__SHIFT 0xc
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DCFCLK_G_GATE_DIS__SHIFT 0x10
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DISPCLK_R_CLOCK_ON__SHIFT 0x14
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DPPCLK_G_CLOCK_ON__SHIFT 0x15
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DCFCLK_R_CLOCK_ON__SHIFT 0x16
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DCFCLK_G_CLOCK_ON__SHIFT 0x17
+#define HUBP2_HUBP_CLK_CNTL__HUBP_CLOCK_ENABLE_MASK 0x00000001L
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DISPCLK_R_GATE_DIS_MASK 0x00000010L
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DPPCLK_G_GATE_DIS_MASK 0x00000100L
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DCFCLK_R_GATE_DIS_MASK 0x00001000L
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DCFCLK_G_GATE_DIS_MASK 0x00010000L
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DISPCLK_R_CLOCK_ON_MASK 0x00100000L
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DPPCLK_G_CLOCK_ON_MASK 0x00200000L
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DCFCLK_R_CLOCK_ON_MASK 0x00400000L
+#define HUBP2_HUBP_CLK_CNTL__HUBP_DCFCLK_G_CLOCK_ON_MASK 0x00800000L
+//HUBP2_HUBPREQ_DEBUG_DB
+#define HUBP2_HUBPREQ_DEBUG_DB__HUBPREQ_DEBUG__SHIFT 0x0
+#define HUBP2_HUBPREQ_DEBUG_DB__HUBPREQ_DEBUG_MASK 0xFFFFFFFFL
+//HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_EN_DCFCLK__SHIFT 0x0
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_PERIOD_M1_DCFCLK__SHIFT 0x4
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_START_SEL_DCFCLK__SHIFT 0xc
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_STOP_SEL_DCFCLK__SHIFT 0x14
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_MODE_DCFCLK__SHIFT 0x1c
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_EN_DCFCLK_MASK 0x00000001L
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_PERIOD_M1_DCFCLK_MASK 0x00000FF0L
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_START_SEL_DCFCLK_MASK 0x0001F000L
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_STOP_SEL_DCFCLK_MASK 0x01F00000L
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_MODE_DCFCLK_MASK 0x30000000L
+//HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_EN_DPPCLK__SHIFT 0x0
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_SRC_SEL_DPPCLK__SHIFT 0x1
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_PERIOD_M1_DPPCLK__SHIFT 0x4
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_START_SEL_DPPCLK__SHIFT 0xc
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_STOP_SEL_DPPCLK__SHIFT 0x14
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_EN_DPPCLK_MASK 0x00000001L
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_SRC_SEL_DPPCLK_MASK 0x00000002L
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_PERIOD_M1_DPPCLK_MASK 0x00000FF0L
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_START_SEL_DPPCLK_MASK 0x0001F000L
+#define HUBP2_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_STOP_SEL_DPPCLK_MASK 0x01F00000L
+
+
+// addressBlock: dce_dc_dcbubp2_dispdec_hubpreq_dispdec
+//HUBPREQ2_DCSURF_SURFACE_PITCH
+#define HUBPREQ2_DCSURF_SURFACE_PITCH__PITCH__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SURFACE_PITCH__META_PITCH__SHIFT 0x10
+#define HUBPREQ2_DCSURF_SURFACE_PITCH__PITCH_MASK 0x00003FFFL
+#define HUBPREQ2_DCSURF_SURFACE_PITCH__META_PITCH_MASK 0x3FFF0000L
+//HUBPREQ2_DCSURF_SURFACE_PITCH_C
+#define HUBPREQ2_DCSURF_SURFACE_PITCH_C__PITCH_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SURFACE_PITCH_C__META_PITCH_C__SHIFT 0x10
+#define HUBPREQ2_DCSURF_SURFACE_PITCH_C__PITCH_C_MASK 0x00003FFFL
+#define HUBPREQ2_DCSURF_SURFACE_PITCH_C__META_PITCH_C_MASK 0x3FFF0000L
+//HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS
+#define HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS__PRIMARY_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS__PRIMARY_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH
+#define HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH__PRIMARY_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH__PRIMARY_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_C
+#define HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_C__PRIMARY_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_C__PRIMARY_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C__PRIMARY_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C__PRIMARY_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS
+#define HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS__SECONDARY_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS__SECONDARY_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH
+#define HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH__SECONDARY_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH__SECONDARY_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_C
+#define HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_C__SECONDARY_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_C__SECONDARY_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C__SECONDARY_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C__SECONDARY_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS
+#define HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS__PRIMARY_META_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS__PRIMARY_META_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH
+#define HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH__PRIMARY_META_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH__PRIMARY_META_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C
+#define HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C__PRIMARY_META_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C__PRIMARY_META_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C__PRIMARY_META_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C__PRIMARY_META_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS
+#define HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS__SECONDARY_META_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS__SECONDARY_META_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH
+#define HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH__SECONDARY_META_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH__SECONDARY_META_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C
+#define HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C__SECONDARY_META_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C__SECONDARY_META_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SECONDARY_META_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ2_DCSURF_SURFACE_CONTROL
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_EN__SHIFT 0x1
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK__SHIFT 0x2
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_C__SHIFT 0x4
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK_C__SHIFT 0x5
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ__SHIFT 0x8
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_EN__SHIFT 0x9
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK__SHIFT 0xa
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_C__SHIFT 0xc
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK_C__SHIFT 0xd
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ__SHIFT 0x10
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_C__SHIFT 0x11
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ__SHIFT 0x12
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_C__SHIFT 0x13
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_MASK 0x00000001L
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_EN_MASK 0x00000002L
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK_MASK 0x00000004L
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_C_MASK 0x00000010L
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK_C_MASK 0x00000020L
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_MASK 0x00000100L
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_EN_MASK 0x00000200L
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK_MASK 0x00000400L
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_C_MASK 0x00001000L
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK_C_MASK 0x00002000L
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_MASK 0x00010000L
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_C_MASK 0x00020000L
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_MASK 0x00040000L
+#define HUBPREQ2_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_C_MASK 0x00080000L
+//HUBPREQ2_DCSURF_FLIP_CONTROL.,
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_UPDATE_LOCK__SHIFT 0x0
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_TYPE__SHIFT 0x1
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_VUPDATE_SKIP_NUM__SHIFT 0x4
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING__SHIFT 0x8
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__HUBPREQ_MASTER_UPDATE_LOCK_STATUS__SHIFT 0x9
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_MODE_FOR_STEREOSYNC__SHIFT 0xc
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_IN_STEREOSYNC__SHIFT 0x10
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_DISABLE__SHIFT 0x11
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_POLARITY__SHIFT 0x12
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_DELAY__SHIFT 0x14
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_UPDATE_LOCK_MASK 0x00000001L
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_TYPE_MASK 0x00000002L
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_VUPDATE_SKIP_NUM_MASK 0x000000F0L
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_MASK 0x00000100L
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__HUBPREQ_MASTER_UPDATE_LOCK_STATUS_MASK 0x00000200L
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_MODE_FOR_STEREOSYNC_MASK 0x00003000L
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_IN_STEREOSYNC_MASK 0x00010000L
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_DISABLE_MASK 0x00020000L
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_POLARITY_MASK 0x00040000L
+#define HUBPREQ2_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_DELAY_MASK 0x3FF00000L
+//HUBPREQ2_DCSURF_FLIP_CONTROL2
+#define HUBPREQ2_DCSURF_FLIP_CONTROL2__SURFACE_FLIP_PENDING_MIN_TIME__SHIFT 0x0
+#define HUBPREQ2_DCSURF_FLIP_CONTROL2__SURFACE_GSL_ENABLE__SHIFT 0x8
+#define HUBPREQ2_DCSURF_FLIP_CONTROL2__SURFACE_GSL_MASK__SHIFT 0x9
+#define HUBPREQ2_DCSURF_FLIP_CONTROL2__SURFACE_TRIPLE_BUFFER_ENABLE__SHIFT 0xa
+#define HUBPREQ2_DCSURF_FLIP_CONTROL2__SURFACE_INUSE_RAED_NO_LATCH__SHIFT 0xc
+#define HUBPREQ2_DCSURF_FLIP_CONTROL2__SURFACE_FLIP_PENDING_MIN_TIME_MASK 0x000000FFL
+#define HUBPREQ2_DCSURF_FLIP_CONTROL2__SURFACE_GSL_ENABLE_MASK 0x00000100L
+#define HUBPREQ2_DCSURF_FLIP_CONTROL2__SURFACE_GSL_MASK_MASK 0x00000200L
+#define HUBPREQ2_DCSURF_FLIP_CONTROL2__SURFACE_TRIPLE_BUFFER_ENABLE_MASK 0x00000400L
+#define HUBPREQ2_DCSURF_FLIP_CONTROL2__SURFACE_INUSE_RAED_NO_LATCH_MASK 0x00001000L
+//HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_MASK__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_TYPE__SHIFT 0x1
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_MASK__SHIFT 0x2
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_TYPE__SHIFT 0x3
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_CLEAR__SHIFT 0x8
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_CLEAR__SHIFT 0x9
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_OCCURRED__SHIFT 0x10
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_STATUS__SHIFT 0x11
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_OCCURRED__SHIFT 0x12
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_STATUS__SHIFT 0x13
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_MASK_MASK 0x00000001L
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_TYPE_MASK 0x00000002L
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_MASK_MASK 0x00000004L
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_TYPE_MASK 0x00000008L
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_CLEAR_MASK 0x00000100L
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_CLEAR_MASK 0x00000200L
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_OCCURRED_MASK 0x00010000L
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_STATUS_MASK 0x00020000L
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_OCCURRED_MASK 0x00040000L
+#define HUBPREQ2_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_STATUS_MASK 0x00080000L
+//HUBPREQ2_DCSURF_SURFACE_INUSE
+#define HUBPREQ2_DCSURF_SURFACE_INUSE__SURFACE_INUSE_ADDRESS__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SURFACE_INUSE__SURFACE_INUSE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ2_DCSURF_SURFACE_INUSE_HIGH
+#define HUBPREQ2_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ2_DCSURF_SURFACE_INUSE_C
+#define HUBPREQ2_DCSURF_SURFACE_INUSE_C__SURFACE_INUSE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SURFACE_INUSE_C__SURFACE_INUSE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ2_DCSURF_SURFACE_INUSE_HIGH_C
+#define HUBPREQ2_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE
+#define HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE__SURFACE_EARLIEST_INUSE_ADDRESS__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE__SURFACE_EARLIEST_INUSE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH
+#define HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_C
+#define HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_C__SURFACE_EARLIEST_INUSE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_C__SURFACE_EARLIEST_INUSE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C
+#define HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ2_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+
+//HUBPREQ2_DCN_EXPANSION_MODE
+#define HUBPREQ2_DCN_EXPANSION_MODE__DRQ_EXPANSION_MODE__SHIFT 0x0
+#define HUBPREQ2_DCN_EXPANSION_MODE__CRQ_EXPANSION_MODE__SHIFT 0x2
+#define HUBPREQ2_DCN_EXPANSION_MODE__MRQ_EXPANSION_MODE__SHIFT 0x4
+#define HUBPREQ2_DCN_EXPANSION_MODE__PRQ_EXPANSION_MODE__SHIFT 0x6
+#define HUBPREQ2_DCN_EXPANSION_MODE__DRQ_EXPANSION_MODE_MASK 0x00000003L
+#define HUBPREQ2_DCN_EXPANSION_MODE__CRQ_EXPANSION_MODE_MASK 0x0000000CL
+#define HUBPREQ2_DCN_EXPANSION_MODE__MRQ_EXPANSION_MODE_MASK 0x00000030L
+#define HUBPREQ2_DCN_EXPANSION_MODE__PRQ_EXPANSION_MODE_MASK 0x000000C0L
+//HUBPREQ2_DCN_TTU_QOS_WM
+#define HUBPREQ2_DCN_TTU_QOS_WM__QoS_LEVEL_LOW_WM__SHIFT 0x0
+#define HUBPREQ2_DCN_TTU_QOS_WM__QoS_LEVEL_HIGH_WM__SHIFT 0x10
+#define HUBPREQ2_DCN_TTU_QOS_WM__QoS_LEVEL_LOW_WM_MASK 0x00003FFFL
+#define HUBPREQ2_DCN_TTU_QOS_WM__QoS_LEVEL_HIGH_WM_MASK 0x3FFF0000L
+//HUBPREQ2_DCN_GLOBAL_TTU_CNTL
+#define HUBPREQ2_DCN_GLOBAL_TTU_CNTL__MIN_TTU_VBLANK__SHIFT 0x0
+#define HUBPREQ2_DCN_GLOBAL_TTU_CNTL__QoS_LEVEL_FLIP__SHIFT 0x1c
+#define HUBPREQ2_DCN_GLOBAL_TTU_CNTL__MIN_TTU_VBLANK_MASK 0x00FFFFFFL
+#define HUBPREQ2_DCN_GLOBAL_TTU_CNTL__QoS_LEVEL_FLIP_MASK 0xF0000000L
+//HUBPREQ2_DCN_SURF0_TTU_CNTL0
+#define HUBPREQ2_DCN_SURF0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ2_DCN_SURF0_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ2_DCN_SURF0_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ2_DCN_SURF0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ2_DCN_SURF0_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ2_DCN_SURF0_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ2_DCN_SURF0_TTU_CNTL1
+#define HUBPREQ2_DCN_SURF0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ2_DCN_SURF0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ2_DCN_SURF1_TTU_CNTL0
+#define HUBPREQ2_DCN_SURF1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ2_DCN_SURF1_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ2_DCN_SURF1_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ2_DCN_SURF1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ2_DCN_SURF1_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ2_DCN_SURF1_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ2_DCN_SURF1_TTU_CNTL1
+#define HUBPREQ2_DCN_SURF1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ2_DCN_SURF1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ2_DCN_CUR0_TTU_CNTL0
+#define HUBPREQ2_DCN_CUR0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ2_DCN_CUR0_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ2_DCN_CUR0_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ2_DCN_CUR0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ2_DCN_CUR0_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ2_DCN_CUR0_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ2_DCN_CUR0_TTU_CNTL1
+#define HUBPREQ2_DCN_CUR0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ2_DCN_CUR0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ2_DCN_CUR1_TTU_CNTL0
+#define HUBPREQ2_DCN_CUR1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ2_DCN_CUR1_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ2_DCN_CUR1_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ2_DCN_CUR1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ2_DCN_CUR1_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ2_DCN_CUR1_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ2_DCN_CUR1_TTU_CNTL1
+#define HUBPREQ2_DCN_CUR1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ2_DCN_CUR1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ2_BLANK_OFFSET_0
+#define HUBPREQ2_BLANK_OFFSET_0__REFCYC_H_BLANK_END__SHIFT 0x0
+#define HUBPREQ2_BLANK_OFFSET_0__DLG_V_BLANK_END__SHIFT 0x10
+#define HUBPREQ2_BLANK_OFFSET_0__REFCYC_H_BLANK_END_MASK 0x00001FFFL
+#define HUBPREQ2_BLANK_OFFSET_0__DLG_V_BLANK_END_MASK 0x7FFF0000L
+//HUBPREQ2_BLANK_OFFSET_1
+#define HUBPREQ2_BLANK_OFFSET_1__MIN_DST_Y_NEXT_START__SHIFT 0x0
+#define HUBPREQ2_BLANK_OFFSET_1__MIN_DST_Y_NEXT_START_MASK 0x0003FFFFL
+//HUBPREQ2_DST_DIMENSIONS
+#define HUBPREQ2_DST_DIMENSIONS__REFCYC_PER_HTOTAL__SHIFT 0x0
+#define HUBPREQ2_DST_DIMENSIONS__REFCYC_PER_HTOTAL_MASK 0x001FFFFFL
+//HUBPREQ2_DST_AFTER_SCALER
+#define HUBPREQ2_DST_AFTER_SCALER__REFCYC_X_AFTER_SCALER__SHIFT 0x0
+#define HUBPREQ2_DST_AFTER_SCALER__DST_Y_AFTER_SCALER__SHIFT 0x10
+#define HUBPREQ2_DST_AFTER_SCALER__REFCYC_X_AFTER_SCALER_MASK 0x00001FFFL
+#define HUBPREQ2_DST_AFTER_SCALER__DST_Y_AFTER_SCALER_MASK 0x00070000L
+//HUBPREQ2_PREFETCH_SETTINGS
+#define HUBPREQ2_PREFETCH_SETTINGS__VRATIO_PREFETCH__SHIFT 0x0
+#define HUBPREQ2_PREFETCH_SETTINGS__DST_Y_PREFETCH__SHIFT 0x18
+#define HUBPREQ2_PREFETCH_SETTINGS__VRATIO_PREFETCH_MASK 0x003FFFFFL
+#define HUBPREQ2_PREFETCH_SETTINGS__DST_Y_PREFETCH_MASK 0xFF000000L
+//HUBPREQ2_PREFETCH_SETTINGS_C
+#define HUBPREQ2_PREFETCH_SETTINGS_C__VRATIO_PREFETCH_C__SHIFT 0x0
+#define HUBPREQ2_PREFETCH_SETTINGS_C__VRATIO_PREFETCH_C_MASK 0x003FFFFFL
+//HUBPREQ2_VBLANK_PARAMETERS_0
+#define HUBPREQ2_VBLANK_PARAMETERS_0__DST_Y_PER_VM_VBLANK__SHIFT 0x0
+#define HUBPREQ2_VBLANK_PARAMETERS_0__DST_Y_PER_ROW_VBLANK__SHIFT 0x8
+#define HUBPREQ2_VBLANK_PARAMETERS_0__DST_Y_PER_VM_VBLANK_MASK 0x0000001FL
+#define HUBPREQ2_VBLANK_PARAMETERS_0__DST_Y_PER_ROW_VBLANK_MASK 0x00003F00L
+//HUBPREQ2_VBLANK_PARAMETERS_1
+#define HUBPREQ2_VBLANK_PARAMETERS_1__REFCYC_PER_PTE_GROUP_VBLANK_L__SHIFT 0x0
+#define HUBPREQ2_VBLANK_PARAMETERS_1__REFCYC_PER_PTE_GROUP_VBLANK_L_MASK 0x007FFFFFL
+//HUBPREQ2_VBLANK_PARAMETERS_2
+#define HUBPREQ2_VBLANK_PARAMETERS_2__REFCYC_PER_PTE_GROUP_VBLANK_C__SHIFT 0x0
+#define HUBPREQ2_VBLANK_PARAMETERS_2__REFCYC_PER_PTE_GROUP_VBLANK_C_MASK 0x007FFFFFL
+//HUBPREQ2_VBLANK_PARAMETERS_3
+#define HUBPREQ2_VBLANK_PARAMETERS_3__REFCYC_PER_META_CHUNK_VBLANK_L__SHIFT 0x0
+#define HUBPREQ2_VBLANK_PARAMETERS_3__REFCYC_PER_META_CHUNK_VBLANK_L_MASK 0x007FFFFFL
+//HUBPREQ2_VBLANK_PARAMETERS_4
+#define HUBPREQ2_VBLANK_PARAMETERS_4__REFCYC_PER_META_CHUNK_VBLANK_C__SHIFT 0x0
+#define HUBPREQ2_VBLANK_PARAMETERS_4__REFCYC_PER_META_CHUNK_VBLANK_C_MASK 0x007FFFFFL
+//HUBPREQ2_FLIP_PARAMETERS_0
+#define HUBPREQ2_FLIP_PARAMETERS_0__DST_Y_PER_VM_FLIP__SHIFT 0x0
+#define HUBPREQ2_FLIP_PARAMETERS_0__DST_Y_PER_ROW_FLIP__SHIFT 0x8
+#define HUBPREQ2_FLIP_PARAMETERS_0__DST_Y_PER_VM_FLIP_MASK 0x0000001FL
+#define HUBPREQ2_FLIP_PARAMETERS_0__DST_Y_PER_ROW_FLIP_MASK 0x00003F00L
+//HUBPREQ2_FLIP_PARAMETERS_2
+#define HUBPREQ2_FLIP_PARAMETERS_2__REFCYC_PER_META_CHUNK_FLIP_L__SHIFT 0x0
+#define HUBPREQ2_FLIP_PARAMETERS_2__REFCYC_PER_META_CHUNK_FLIP_L_MASK 0x007FFFFFL
+//HUBPREQ2_NOM_PARAMETERS_4
+#define HUBPREQ2_NOM_PARAMETERS_4__DST_Y_PER_META_ROW_NOM_L__SHIFT 0x0
+#define HUBPREQ2_NOM_PARAMETERS_4__DST_Y_PER_META_ROW_NOM_L_MASK 0x0001FFFFL
+//HUBPREQ2_NOM_PARAMETERS_5
+#define HUBPREQ2_NOM_PARAMETERS_5__REFCYC_PER_META_CHUNK_NOM_L__SHIFT 0x0
+#define HUBPREQ2_NOM_PARAMETERS_5__REFCYC_PER_META_CHUNK_NOM_L_MASK 0x007FFFFFL
+//HUBPREQ2_NOM_PARAMETERS_6
+#define HUBPREQ2_NOM_PARAMETERS_6__DST_Y_PER_META_ROW_NOM_C__SHIFT 0x0
+#define HUBPREQ2_NOM_PARAMETERS_6__DST_Y_PER_META_ROW_NOM_C_MASK 0x0001FFFFL
+//HUBPREQ2_NOM_PARAMETERS_7
+#define HUBPREQ2_NOM_PARAMETERS_7__REFCYC_PER_META_CHUNK_NOM_C__SHIFT 0x0
+#define HUBPREQ2_NOM_PARAMETERS_7__REFCYC_PER_META_CHUNK_NOM_C_MASK 0x007FFFFFL
+//HUBPREQ2_PER_LINE_DELIVERY_PRE
+#define HUBPREQ2_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_L__SHIFT 0x0
+#define HUBPREQ2_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_C__SHIFT 0x10
+#define HUBPREQ2_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_L_MASK 0x00001FFFL
+#define HUBPREQ2_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_C_MASK 0x1FFF0000L
+//HUBPREQ2_PER_LINE_DELIVERY
+#define HUBPREQ2_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_L__SHIFT 0x0
+#define HUBPREQ2_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_C__SHIFT 0x10
+#define HUBPREQ2_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_L_MASK 0x00001FFFL
+#define HUBPREQ2_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_C_MASK 0x1FFF0000L
+//HUBPREQ2_CURSOR_SETTINGS
+#define HUBPREQ2_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET__SHIFT 0x0
+#define HUBPREQ2_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST__SHIFT 0x8
+#define HUBPREQ2_CURSOR_SETTINGS__CURSOR1_DST_Y_OFFSET__SHIFT 0x10
+#define HUBPREQ2_CURSOR_SETTINGS__CURSOR1_CHUNK_HDL_ADJUST__SHIFT 0x18
+#define HUBPREQ2_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET_MASK 0x000000FFL
+#define HUBPREQ2_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST_MASK 0x00000300L
+#define HUBPREQ2_CURSOR_SETTINGS__CURSOR1_DST_Y_OFFSET_MASK 0x00FF0000L
+#define HUBPREQ2_CURSOR_SETTINGS__CURSOR1_CHUNK_HDL_ADJUST_MASK 0x03000000L
+//HUBPREQ2_REF_FREQ_TO_PIX_FREQ
+#define HUBPREQ2_REF_FREQ_TO_PIX_FREQ__REF_FREQ_TO_PIX_FREQ__SHIFT 0x0
+#define HUBPREQ2_REF_FREQ_TO_PIX_FREQ__REF_FREQ_TO_PIX_FREQ_MASK 0x001FFFFFL
+//HUBPREQ2_DST_Y_DELTA_DRQ_LIMIT
+#define HUBPREQ2_DST_Y_DELTA_DRQ_LIMIT__DST_Y_DELTA_DRQ_LIMIT__SHIFT 0x0
+#define HUBPREQ2_DST_Y_DELTA_DRQ_LIMIT__DST_Y_DELTA_DRQ_LIMIT_MASK 0x00007FFFL
+//HUBPREQ2_HUBPREQ_MEM_PWR_CTRL
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_FORCE__SHIFT 0x0
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_DIS__SHIFT 0x2
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_FORCE__SHIFT 0x4
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_DIS__SHIFT 0x6
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_FORCE__SHIFT 0x8
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_DIS__SHIFT 0xa
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_FORCE__SHIFT 0xc
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_DIS__SHIFT 0xe
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_FORCE_MASK 0x00000003L
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_DIS_MASK 0x00000004L
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_FORCE_MASK 0x00000030L
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_DIS_MASK 0x00000040L
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_FORCE_MASK 0x00000300L
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_DIS_MASK 0x00000400L
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_FORCE_MASK 0x00003000L
+#define HUBPREQ2_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_DIS_MASK 0x00004000L
+//HUBPREQ2_HUBPREQ_MEM_PWR_STATUS
+#define HUBPREQ2_HUBPREQ_MEM_PWR_STATUS__REQ_DPTE_MEM_PWR_STATE__SHIFT 0x0
+#define HUBPREQ2_HUBPREQ_MEM_PWR_STATUS__REQ_MPTE_MEM_PWR_STATE__SHIFT 0x2
+#define HUBPREQ2_HUBPREQ_MEM_PWR_STATUS__REQ_META_MEM_PWR_STATE__SHIFT 0x4
+#define HUBPREQ2_HUBPREQ_MEM_PWR_STATUS__REQ_PDE_MEM_PWR_STATE__SHIFT 0x6
+#define HUBPREQ2_HUBPREQ_MEM_PWR_STATUS__REQ_DPTE_MEM_PWR_STATE_MASK 0x00000003L
+#define HUBPREQ2_HUBPREQ_MEM_PWR_STATUS__REQ_MPTE_MEM_PWR_STATE_MASK 0x0000000CL
+#define HUBPREQ2_HUBPREQ_MEM_PWR_STATUS__REQ_META_MEM_PWR_STATE_MASK 0x00000030L
+#define HUBPREQ2_HUBPREQ_MEM_PWR_STATUS__REQ_PDE_MEM_PWR_STATE_MASK 0x000000C0L
+// addressBlock: dce_dc_dcbubp2_dispdec_hubpret_dispdec
+//HUBPRET2_HUBPRET_CONTROL
+#define HUBPRET2_HUBPRET_CONTROL__DET_BUF_PLANE1_BASE_ADDRESS__SHIFT 0x0
+#define HUBPRET2_HUBPRET_CONTROL__PACK_3TO2_ELEMENT_DISABLE__SHIFT 0xc
+#define HUBPRET2_HUBPRET_CONTROL__CROSSBAR_SRC_ALPHA__SHIFT 0x10
+#define HUBPRET2_HUBPRET_CONTROL__CROSSBAR_SRC_Y_G__SHIFT 0x12
+#define HUBPRET2_HUBPRET_CONTROL__CROSSBAR_SRC_CB_B__SHIFT 0x14
+#define HUBPRET2_HUBPRET_CONTROL__CROSSBAR_SRC_CR_R__SHIFT 0x16
+#define HUBPRET2_HUBPRET_CONTROL__HUBPRET_CONTROL_SPARE__SHIFT 0x18
+#define HUBPRET2_HUBPRET_CONTROL__DET_BUF_PLANE1_BASE_ADDRESS_MASK 0x00000FFFL
+#define HUBPRET2_HUBPRET_CONTROL__PACK_3TO2_ELEMENT_DISABLE_MASK 0x00001000L
+#define HUBPRET2_HUBPRET_CONTROL__CROSSBAR_SRC_ALPHA_MASK 0x00030000L
+#define HUBPRET2_HUBPRET_CONTROL__CROSSBAR_SRC_Y_G_MASK 0x000C0000L
+#define HUBPRET2_HUBPRET_CONTROL__CROSSBAR_SRC_CB_B_MASK 0x00300000L
+#define HUBPRET2_HUBPRET_CONTROL__CROSSBAR_SRC_CR_R_MASK 0x00C00000L
+#define HUBPRET2_HUBPRET_CONTROL__HUBPRET_CONTROL_SPARE_MASK 0xFF000000L
+//HUBPRET2_HUBPRET_MEM_PWR_CTRL
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_FORCE__SHIFT 0x0
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_DIS__SHIFT 0x2
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_LS_MODE__SHIFT 0x4
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_FORCE__SHIFT 0x8
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_DIS__SHIFT 0xa
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_FORCE__SHIFT 0x10
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_DIS__SHIFT 0x12
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_LS_MODE__SHIFT 0x14
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_FORCE_MASK 0x00000003L
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_DIS_MASK 0x00000004L
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_LS_MODE_MASK 0x00000030L
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_FORCE_MASK 0x00000300L
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_DIS_MASK 0x00000400L
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_FORCE_MASK 0x00030000L
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_DIS_MASK 0x00040000L
+#define HUBPRET2_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_LS_MODE_MASK 0x00300000L
+//HUBPRET2_HUBPRET_MEM_PWR_STATUS
+#define HUBPRET2_HUBPRET_MEM_PWR_STATUS__DET_MEM_PWR_STATE__SHIFT 0x0
+#define HUBPRET2_HUBPRET_MEM_PWR_STATUS__DMROB_MEM_PWR_STATE__SHIFT 0x2
+#define HUBPRET2_HUBPRET_MEM_PWR_STATUS__PIXCDC_MEM_PWR_STATE__SHIFT 0x4
+#define HUBPRET2_HUBPRET_MEM_PWR_STATUS__DET_MEM_PWR_STATE_MASK 0x00000003L
+#define HUBPRET2_HUBPRET_MEM_PWR_STATUS__DMROB_MEM_PWR_STATE_MASK 0x0000000CL
+#define HUBPRET2_HUBPRET_MEM_PWR_STATUS__PIXCDC_MEM_PWR_STATE_MASK 0x00000030L
+//HUBPRET2_HUBPRET_READ_LINE_CTRL0
+#define HUBPRET2_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_INTERVAL_IN_NONACTIVE__SHIFT 0x0
+#define HUBPRET2_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_VBLANK_MAXIMUM__SHIFT 0x10
+#define HUBPRET2_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_INTERVAL_IN_NONACTIVE_MASK 0x0000FFFFL
+#define HUBPRET2_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_VBLANK_MAXIMUM_MASK 0x3FFF0000L
+//HUBPRET2_HUBPRET_READ_LINE_CTRL1
+#define HUBPRET2_HUBPRET_READ_LINE_CTRL1__PIPE_READ_LINE_REPORTED_WHEN_REQ_DISABLED__SHIFT 0x0
+#define HUBPRET2_HUBPRET_READ_LINE_CTRL1__HUBPRET_READ_LINE_CTRL1_SPARE__SHIFT 0x10
+#define HUBPRET2_HUBPRET_READ_LINE_CTRL1__PIPE_READ_LINE_REPORTED_WHEN_REQ_DISABLED_MASK 0x00003FFFL
+#define HUBPRET2_HUBPRET_READ_LINE_CTRL1__HUBPRET_READ_LINE_CTRL1_SPARE_MASK 0xFFFF0000L
+//HUBPRET2_HUBPRET_READ_LINE0
+#define HUBPRET2_HUBPRET_READ_LINE0__PIPE_READ_LINE0_START__SHIFT 0x0
+#define HUBPRET2_HUBPRET_READ_LINE0__PIPE_READ_LINE0_END__SHIFT 0x10
+#define HUBPRET2_HUBPRET_READ_LINE0__PIPE_READ_LINE0_START_MASK 0x00003FFFL
+#define HUBPRET2_HUBPRET_READ_LINE0__PIPE_READ_LINE0_END_MASK 0x3FFF0000L
+//HUBPRET2_HUBPRET_READ_LINE1
+#define HUBPRET2_HUBPRET_READ_LINE1__PIPE_READ_LINE1_START__SHIFT 0x0
+#define HUBPRET2_HUBPRET_READ_LINE1__PIPE_READ_LINE1_END__SHIFT 0x10
+#define HUBPRET2_HUBPRET_READ_LINE1__PIPE_READ_LINE1_START_MASK 0x00003FFFL
+#define HUBPRET2_HUBPRET_READ_LINE1__PIPE_READ_LINE1_END_MASK 0x3FFF0000L
+//HUBPRET2_HUBPRET_INTERRUPT
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_MASK__SHIFT 0x0
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_MASK__SHIFT 0x1
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_MASK__SHIFT 0x2
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_TYPE__SHIFT 0x4
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_TYPE__SHIFT 0x5
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_TYPE__SHIFT 0x6
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_CLEAR__SHIFT 0x8
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_CLEAR__SHIFT 0x9
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_CLEAR__SHIFT 0xa
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_VBLANK_STATUS__SHIFT 0xc
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE0_STATUS__SHIFT 0xd
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE1_STATUS__SHIFT 0xe
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_STATUS__SHIFT 0x10
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_STATUS__SHIFT 0x11
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_STATUS__SHIFT 0x12
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_MASK_MASK 0x00000001L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_MASK_MASK 0x00000002L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_MASK_MASK 0x00000004L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_TYPE_MASK 0x00000010L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_TYPE_MASK 0x00000020L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_TYPE_MASK 0x00000040L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_CLEAR_MASK 0x00000100L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_CLEAR_MASK 0x00000200L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_CLEAR_MASK 0x00000400L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_VBLANK_STATUS_MASK 0x00001000L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE0_STATUS_MASK 0x00002000L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE1_STATUS_MASK 0x00004000L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_STATUS_MASK 0x00010000L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_STATUS_MASK 0x00020000L
+#define HUBPRET2_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_STATUS_MASK 0x00040000L
+//HUBPRET2_HUBPRET_READ_LINE_VALUE
+#define HUBPRET2_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE__SHIFT 0x0
+#define HUBPRET2_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_SNAPSHOT__SHIFT 0x10
+#define HUBPRET2_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_MASK 0x00003FFFL
+#define HUBPRET2_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_SNAPSHOT_MASK 0x3FFF0000L
+//HUBPRET2_HUBPRET_READ_LINE_STATUS
+#define HUBPRET2_HUBPRET_READ_LINE_STATUS__PIPE_READ_VBLANK__SHIFT 0x0
+#define HUBPRET2_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_INSIDE__SHIFT 0x4
+#define HUBPRET2_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_OUTSIDE__SHIFT 0x5
+#define HUBPRET2_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_INSIDE__SHIFT 0x8
+#define HUBPRET2_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_OUTSIDE__SHIFT 0xa
+#define HUBPRET2_HUBPRET_READ_LINE_STATUS__PIPE_READ_VBLANK_MASK 0x00000001L
+#define HUBPRET2_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_INSIDE_MASK 0x00000010L
+#define HUBPRET2_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_OUTSIDE_MASK 0x00000020L
+#define HUBPRET2_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_INSIDE_MASK 0x00000100L
+#define HUBPRET2_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_OUTSIDE_MASK 0x00000400L
+// addressBlock: dce_dc_dcbubp2_dispdec_cursor0_dispdec
+//CURSOR0_2_CURSOR_CONTROL
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_ENABLE__SHIFT 0x0
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_2X_MAGNIFY__SHIFT 0x4
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_MODE__SHIFT 0x8
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_TMZ__SHIFT 0xc
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_SNOOP__SHIFT 0xd
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_SYSTEM__SHIFT 0xe
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_PITCH__SHIFT 0x10
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_XY_POSITION_ROTATION_AND_MIRRORING_BYPASS__SHIFT 0x14
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK__SHIFT 0x18
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_EN__SHIFT 0x1e
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_SEL__SHIFT 0x1f
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_ENABLE_MASK 0x00000001L
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_2X_MAGNIFY_MASK 0x00000010L
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_MODE_MASK 0x00000700L
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_TMZ_MASK 0x00001000L
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_SNOOP_MASK 0x00002000L
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_SYSTEM_MASK 0x00004000L
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_PITCH_MASK 0x00030000L
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_XY_POSITION_ROTATION_AND_MIRRORING_BYPASS_MASK 0x00100000L
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK_MASK 0x1F000000L
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_EN_MASK 0x40000000L
+#define CURSOR0_2_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_SEL_MASK 0x80000000L
+//CURSOR0_2_CURSOR_SURFACE_ADDRESS
+#define CURSOR0_2_CURSOR_SURFACE_ADDRESS__CURSOR_SURFACE_ADDRESS__SHIFT 0x0
+#define CURSOR0_2_CURSOR_SURFACE_ADDRESS__CURSOR_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//CURSOR0_2_CURSOR_SURFACE_ADDRESS_HIGH
+#define CURSOR0_2_CURSOR_SURFACE_ADDRESS_HIGH__CURSOR_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define CURSOR0_2_CURSOR_SURFACE_ADDRESS_HIGH__CURSOR_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//CURSOR0_2_CURSOR_SIZE
+#define CURSOR0_2_CURSOR_SIZE__CURSOR_HEIGHT__SHIFT 0x0
+#define CURSOR0_2_CURSOR_SIZE__CURSOR_WIDTH__SHIFT 0x10
+#define CURSOR0_2_CURSOR_SIZE__CURSOR_HEIGHT_MASK 0x000001FFL
+#define CURSOR0_2_CURSOR_SIZE__CURSOR_WIDTH_MASK 0x01FF0000L
+//CURSOR0_2_CURSOR_POSITION
+#define CURSOR0_2_CURSOR_POSITION__CURSOR_Y_POSITION__SHIFT 0x0
+#define CURSOR0_2_CURSOR_POSITION__CURSOR_X_POSITION__SHIFT 0x10
+#define CURSOR0_2_CURSOR_POSITION__CURSOR_Y_POSITION_MASK 0x00003FFFL
+#define CURSOR0_2_CURSOR_POSITION__CURSOR_X_POSITION_MASK 0x3FFF0000L
+//CURSOR0_2_CURSOR_HOT_SPOT
+#define CURSOR0_2_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y__SHIFT 0x0
+#define CURSOR0_2_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X__SHIFT 0x10
+#define CURSOR0_2_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y_MASK 0x000000FFL
+#define CURSOR0_2_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X_MASK 0x00FF0000L
+//CURSOR0_2_CURSOR_STEREO_CONTROL
+#define CURSOR0_2_CURSOR_STEREO_CONTROL__CURSOR_STEREO_EN__SHIFT 0x0
+#define CURSOR0_2_CURSOR_STEREO_CONTROL__CURSOR_PRIMARY_OFFSET__SHIFT 0x4
+#define CURSOR0_2_CURSOR_STEREO_CONTROL__CURSOR_SECONDARY_OFFSET__SHIFT 0x12
+#define CURSOR0_2_CURSOR_STEREO_CONTROL__CURSOR_STEREO_EN_MASK 0x00000001L
+#define CURSOR0_2_CURSOR_STEREO_CONTROL__CURSOR_PRIMARY_OFFSET_MASK 0x0003FFF0L
+#define CURSOR0_2_CURSOR_STEREO_CONTROL__CURSOR_SECONDARY_OFFSET_MASK 0xFFFC0000L
+//CURSOR0_2_CURSOR_DST_OFFSET
+#define CURSOR0_2_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET__SHIFT 0x0
+#define CURSOR0_2_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET_MASK 0x00001FFFL
+//CURSOR0_2_CURSOR_MEM_PWR_CTRL
+#define CURSOR0_2_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_FORCE__SHIFT 0x0
+#define CURSOR0_2_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_DIS__SHIFT 0x2
+#define CURSOR0_2_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_LS_MODE__SHIFT 0x4
+#define CURSOR0_2_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_FORCE_MASK 0x00000003L
+#define CURSOR0_2_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_DIS_MASK 0x00000004L
+#define CURSOR0_2_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_LS_MODE_MASK 0x00000030L
+//CURSOR0_2_CURSOR_MEM_PWR_STATUS
+#define CURSOR0_2_CURSOR_MEM_PWR_STATUS__CROB_MEM_PWR_STATE__SHIFT 0x0
+#define CURSOR0_2_CURSOR_MEM_PWR_STATUS__CROB_MEM_PWR_STATE_MASK 0x00000003L
+//CURSOR0_2_DMDATA_ADDRESS_HIGH
+#define CURSOR0_2_DMDATA_ADDRESS_HIGH__DMDATA_ADDRESS_HIGH__SHIFT 0x0
+#define CURSOR0_2_DMDATA_ADDRESS_HIGH__DMDATA_SYSTEM__SHIFT 0x1c
+#define CURSOR0_2_DMDATA_ADDRESS_HIGH__DMDATA_SNOOP__SHIFT 0x1d
+#define CURSOR0_2_DMDATA_ADDRESS_HIGH__DMDATA_TMZ__SHIFT 0x1e
+#define CURSOR0_2_DMDATA_ADDRESS_HIGH__DMDATA_ADDRESS_HIGH_MASK 0x0000FFFFL
+#define CURSOR0_2_DMDATA_ADDRESS_HIGH__DMDATA_SYSTEM_MASK 0x10000000L
+#define CURSOR0_2_DMDATA_ADDRESS_HIGH__DMDATA_SNOOP_MASK 0x20000000L
+#define CURSOR0_2_DMDATA_ADDRESS_HIGH__DMDATA_TMZ_MASK 0x40000000L
+//CURSOR0_2_DMDATA_ADDRESS_LOW
+#define CURSOR0_2_DMDATA_ADDRESS_LOW__DMDATA_ADDRESS_LOW__SHIFT 0x0
+#define CURSOR0_2_DMDATA_ADDRESS_LOW__DMDATA_ADDRESS_LOW_MASK 0xFFFFFFFFL
+//CURSOR0_2_DMDATA_CNTL
+#define CURSOR0_2_DMDATA_CNTL__DMDATA_UPDATED__SHIFT 0x0
+#define CURSOR0_2_DMDATA_CNTL__DMDATA_REPEAT__SHIFT 0x1
+#define CURSOR0_2_DMDATA_CNTL__DMDATA_MODE__SHIFT 0x2
+#define CURSOR0_2_DMDATA_CNTL__DMDATA_SIZE__SHIFT 0x10
+#define CURSOR0_2_DMDATA_CNTL__DMDATA_UPDATED_MASK 0x00000001L
+#define CURSOR0_2_DMDATA_CNTL__DMDATA_REPEAT_MASK 0x00000002L
+#define CURSOR0_2_DMDATA_CNTL__DMDATA_MODE_MASK 0x00000004L
+#define CURSOR0_2_DMDATA_CNTL__DMDATA_SIZE_MASK 0x0FFF0000L
+//CURSOR0_2_DMDATA_QOS_CNTL
+#define CURSOR0_2_DMDATA_QOS_CNTL__DMDATA_QOS_MODE__SHIFT 0x0
+#define CURSOR0_2_DMDATA_QOS_CNTL__DMDATA_QOS_LEVEL__SHIFT 0x4
+#define CURSOR0_2_DMDATA_QOS_CNTL__DMDATA_DL_DELTA__SHIFT 0x10
+#define CURSOR0_2_DMDATA_QOS_CNTL__DMDATA_QOS_MODE_MASK 0x00000001L
+#define CURSOR0_2_DMDATA_QOS_CNTL__DMDATA_QOS_LEVEL_MASK 0x000000F0L
+#define CURSOR0_2_DMDATA_QOS_CNTL__DMDATA_DL_DELTA_MASK 0xFFFF0000L
+//CURSOR0_2_DMDATA_STATUS
+#define CURSOR0_2_DMDATA_STATUS__DMDATA_DONE__SHIFT 0x0
+#define CURSOR0_2_DMDATA_STATUS__DMDATA_UNDERFLOW__SHIFT 0x2
+#define CURSOR0_2_DMDATA_STATUS__DMDATA_UNDERFLOW_CLEAR__SHIFT 0x4
+#define CURSOR0_2_DMDATA_STATUS__DMDATA_DONE_MASK 0x00000001L
+#define CURSOR0_2_DMDATA_STATUS__DMDATA_UNDERFLOW_MASK 0x00000004L
+#define CURSOR0_2_DMDATA_STATUS__DMDATA_UNDERFLOW_CLEAR_MASK 0x00000010L
+//CURSOR0_2_DMDATA_SW_CNTL
+#define CURSOR0_2_DMDATA_SW_CNTL__DMDATA_SW_UPDATED__SHIFT 0x0
+#define CURSOR0_2_DMDATA_SW_CNTL__DMDATA_SW_REPEAT__SHIFT 0x1
+#define CURSOR0_2_DMDATA_SW_CNTL__DMDATA_SW_SIZE__SHIFT 0x10
+#define CURSOR0_2_DMDATA_SW_CNTL__DMDATA_SW_UPDATED_MASK 0x00000001L
+#define CURSOR0_2_DMDATA_SW_CNTL__DMDATA_SW_REPEAT_MASK 0x00000002L
+#define CURSOR0_2_DMDATA_SW_CNTL__DMDATA_SW_SIZE_MASK 0x0FFF0000L
+//CURSOR0_2_DMDATA_SW_DATA
+#define CURSOR0_2_DMDATA_SW_DATA__DMDATA_SW_DATA__SHIFT 0x0
+#define CURSOR0_2_DMDATA_SW_DATA__DMDATA_SW_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_dcbubp3_dispdec_hubp_dispdec
+//HUBP3_DCSURF_SURFACE_CONFIG
+#define HUBP3_DCSURF_SURFACE_CONFIG__SURFACE_PIXEL_FORMAT__SHIFT 0x0
+#define HUBP3_DCSURF_SURFACE_CONFIG__ROTATION_ANGLE__SHIFT 0x8
+#define HUBP3_DCSURF_SURFACE_CONFIG__H_MIRROR_EN__SHIFT 0xa
+#define HUBP3_DCSURF_SURFACE_CONFIG__SURFACE_PIXEL_FORMAT_MASK 0x0000007FL
+#define HUBP3_DCSURF_SURFACE_CONFIG__ROTATION_ANGLE_MASK 0x00000300L
+#define HUBP3_DCSURF_SURFACE_CONFIG__H_MIRROR_EN_MASK 0x00000400L
+//HUBP3_DCSURF_ADDR_CONFIG
+#define HUBP3_DCSURF_ADDR_CONFIG__NUM_PIPES__SHIFT 0x0
+#define HUBP3_DCSURF_ADDR_CONFIG__NUM_BANKS__SHIFT 0x3
+#define HUBP3_DCSURF_ADDR_CONFIG__PIPE_INTERLEAVE__SHIFT 0x6
+#define HUBP3_DCSURF_ADDR_CONFIG__NUM_SE__SHIFT 0x8
+#define HUBP3_DCSURF_ADDR_CONFIG__NUM_RB_PER_SE__SHIFT 0xa
+#define HUBP3_DCSURF_ADDR_CONFIG__MAX_COMPRESSED_FRAGS__SHIFT 0xc
+#define HUBP3_DCSURF_ADDR_CONFIG__NUM_PIPES_MASK 0x00000007L
+#define HUBP3_DCSURF_ADDR_CONFIG__NUM_BANKS_MASK 0x00000038L
+#define HUBP3_DCSURF_ADDR_CONFIG__PIPE_INTERLEAVE_MASK 0x000000C0L
+#define HUBP3_DCSURF_ADDR_CONFIG__NUM_SE_MASK 0x00000300L
+#define HUBP3_DCSURF_ADDR_CONFIG__NUM_RB_PER_SE_MASK 0x00000C00L
+#define HUBP3_DCSURF_ADDR_CONFIG__MAX_COMPRESSED_FRAGS_MASK 0x00003000L
+//HUBP3_DCSURF_TILING_CONFIG
+#define HUBP3_DCSURF_TILING_CONFIG__SW_MODE__SHIFT 0x0
+#define HUBP3_DCSURF_TILING_CONFIG__DIM_TYPE__SHIFT 0x7
+#define HUBP3_DCSURF_TILING_CONFIG__META_LINEAR__SHIFT 0x9
+#define HUBP3_DCSURF_TILING_CONFIG__RB_ALIGNED__SHIFT 0xa
+#define HUBP3_DCSURF_TILING_CONFIG__PIPE_ALIGNED__SHIFT 0xb
+#define HUBP3_DCSURF_TILING_CONFIG__SW_MODE_MASK 0x0000001FL
+#define HUBP3_DCSURF_TILING_CONFIG__DIM_TYPE_MASK 0x00000180L
+#define HUBP3_DCSURF_TILING_CONFIG__META_LINEAR_MASK 0x00000200L
+#define HUBP3_DCSURF_TILING_CONFIG__RB_ALIGNED_MASK 0x00000400L
+#define HUBP3_DCSURF_TILING_CONFIG__PIPE_ALIGNED_MASK 0x00000800L
+//HUBP3_DCSURF_PRI_VIEWPORT_START
+#define HUBP3_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_X_START__SHIFT 0x0
+#define HUBP3_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_Y_START__SHIFT 0x10
+#define HUBP3_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_X_START_MASK 0x00003FFFL
+#define HUBP3_DCSURF_PRI_VIEWPORT_START__PRI_VIEWPORT_Y_START_MASK 0x3FFF0000L
+//HUBP3_DCSURF_PRI_VIEWPORT_DIMENSION
+#define HUBP3_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH__SHIFT 0x0
+#define HUBP3_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT__SHIFT 0x10
+#define HUBP3_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_WIDTH_MASK 0x00003FFFL
+#define HUBP3_DCSURF_PRI_VIEWPORT_DIMENSION__PRI_VIEWPORT_HEIGHT_MASK 0x3FFF0000L
+//HUBP3_DCSURF_PRI_VIEWPORT_START_C
+#define HUBP3_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_X_START_C__SHIFT 0x0
+#define HUBP3_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_Y_START_C__SHIFT 0x10
+#define HUBP3_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_X_START_C_MASK 0x00003FFFL
+#define HUBP3_DCSURF_PRI_VIEWPORT_START_C__PRI_VIEWPORT_Y_START_C_MASK 0x3FFF0000L
+//HUBP3_DCSURF_PRI_VIEWPORT_DIMENSION_C
+#define HUBP3_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_WIDTH_C__SHIFT 0x0
+#define HUBP3_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_HEIGHT_C__SHIFT 0x10
+#define HUBP3_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_WIDTH_C_MASK 0x00003FFFL
+#define HUBP3_DCSURF_PRI_VIEWPORT_DIMENSION_C__PRI_VIEWPORT_HEIGHT_C_MASK 0x3FFF0000L
+//HUBP3_DCSURF_SEC_VIEWPORT_START
+#define HUBP3_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_X_START__SHIFT 0x0
+#define HUBP3_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_Y_START__SHIFT 0x10
+#define HUBP3_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_X_START_MASK 0x00003FFFL
+#define HUBP3_DCSURF_SEC_VIEWPORT_START__SEC_VIEWPORT_Y_START_MASK 0x3FFF0000L
+//HUBP3_DCSURF_SEC_VIEWPORT_DIMENSION
+#define HUBP3_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_WIDTH__SHIFT 0x0
+#define HUBP3_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_HEIGHT__SHIFT 0x10
+#define HUBP3_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_WIDTH_MASK 0x00003FFFL
+#define HUBP3_DCSURF_SEC_VIEWPORT_DIMENSION__SEC_VIEWPORT_HEIGHT_MASK 0x3FFF0000L
+//HUBP3_DCSURF_SEC_VIEWPORT_START_C
+#define HUBP3_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_X_START_C__SHIFT 0x0
+#define HUBP3_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_Y_START_C__SHIFT 0x10
+#define HUBP3_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_X_START_C_MASK 0x00003FFFL
+#define HUBP3_DCSURF_SEC_VIEWPORT_START_C__SEC_VIEWPORT_Y_START_C_MASK 0x3FFF0000L
+//HUBP3_DCSURF_SEC_VIEWPORT_DIMENSION_C
+#define HUBP3_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_WIDTH_C__SHIFT 0x0
+#define HUBP3_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_HEIGHT_C__SHIFT 0x10
+#define HUBP3_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_WIDTH_C_MASK 0x00003FFFL
+#define HUBP3_DCSURF_SEC_VIEWPORT_DIMENSION_C__SEC_VIEWPORT_HEIGHT_C_MASK 0x3FFF0000L
+//HUBP3_DCHUBP_REQ_SIZE_CONFIG
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__SWATH_HEIGHT__SHIFT 0x0
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__PTE_ROW_HEIGHT_LINEAR__SHIFT 0x4
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__CHUNK_SIZE__SHIFT 0x8
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__MIN_CHUNK_SIZE__SHIFT 0xb
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__META_CHUNK_SIZE__SHIFT 0x10
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__MIN_META_CHUNK_SIZE__SHIFT 0x12
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__DPTE_GROUP_SIZE__SHIFT 0x14
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__MPTE_GROUP_SIZE__SHIFT 0x18
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__SWATH_HEIGHT_MASK 0x00000007L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__PTE_ROW_HEIGHT_LINEAR_MASK 0x00000070L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__CHUNK_SIZE_MASK 0x00000700L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__MIN_CHUNK_SIZE_MASK 0x00001800L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__META_CHUNK_SIZE_MASK 0x00030000L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__MIN_META_CHUNK_SIZE_MASK 0x000C0000L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__DPTE_GROUP_SIZE_MASK 0x00700000L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG__MPTE_GROUP_SIZE_MASK 0x07000000L
+//HUBP3_DCHUBP_REQ_SIZE_CONFIG_C
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__SWATH_HEIGHT_C__SHIFT 0x0
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__PTE_ROW_HEIGHT_LINEAR_C__SHIFT 0x4
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__CHUNK_SIZE_C__SHIFT 0x8
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__MIN_CHUNK_SIZE_C__SHIFT 0xb
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__META_CHUNK_SIZE_C__SHIFT 0x10
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__MIN_META_CHUNK_SIZE_C__SHIFT 0x12
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__DPTE_GROUP_SIZE_C__SHIFT 0x14
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__MPTE_GROUP_SIZE_C__SHIFT 0x18
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__SWATH_HEIGHT_C_MASK 0x00000007L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__PTE_ROW_HEIGHT_LINEAR_C_MASK 0x00000070L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__CHUNK_SIZE_C_MASK 0x00000700L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__MIN_CHUNK_SIZE_C_MASK 0x00001800L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__META_CHUNK_SIZE_C_MASK 0x00030000L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__MIN_META_CHUNK_SIZE_C_MASK 0x000C0000L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__DPTE_GROUP_SIZE_C_MASK 0x00700000L
+#define HUBP3_DCHUBP_REQ_SIZE_CONFIG_C__MPTE_GROUP_SIZE_C_MASK 0x07000000L
+//HUBP3_DCHUBP_CNTL
+#define HUBP3_DCHUBP_CNTL__HUBP_BLANK_EN__SHIFT 0x0
+#define HUBP3_DCHUBP_CNTL__HUBP_NO_OUTSTANDING_REQ__SHIFT 0x1
+#define HUBP3_DCHUBP_CNTL__HUBP_DISABLE__SHIFT 0x2
+#define HUBP3_DCHUBP_CNTL__HUBP_IN_BLANK__SHIFT 0x3
+#define HUBP3_DCHUBP_CNTL__HUBP_VTG_SEL__SHIFT 0x4
+#define HUBP3_DCHUBP_CNTL__HUBP_VREADY_AT_OR_AFTER_VSYNC__SHIFT 0x8
+#define HUBP3_DCHUBP_CNTL__HUBP_DISABLE_STOP_DATA_DURING_VM__SHIFT 0x9
+#define HUBP3_DCHUBP_CNTL__HUBP_TTU_DISABLE__SHIFT 0xc
+#define HUBP3_DCHUBP_CNTL__HUBP_TTU_MODE__SHIFT 0xd
+#define HUBP3_DCHUBP_CNTL__HUBP_XRQ_NO_OUTSTANDING_REQ__SHIFT 0x10
+#define HUBP3_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS__SHIFT 0x14
+#define HUBP3_DCHUBP_CNTL__HUBP_TIMEOUT_THRESHOLD__SHIFT 0x18
+#define HUBP3_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_CLEAR__SHIFT 0x1a
+#define HUBP3_DCHUBP_CNTL__HUBP_TIMEOUT_INTERRUPT_EN__SHIFT 0x1b
+#define HUBP3_DCHUBP_CNTL__HUBP_UNDERFLOW_STATUS__SHIFT 0x1c
+#define HUBP3_DCHUBP_CNTL__HUBP_UNDERFLOW_CLEAR__SHIFT 0x1f
+#define HUBP3_DCHUBP_CNTL__HUBP_BLANK_EN_MASK 0x00000001L
+#define HUBP3_DCHUBP_CNTL__HUBP_NO_OUTSTANDING_REQ_MASK 0x00000002L
+#define HUBP3_DCHUBP_CNTL__HUBP_DISABLE_MASK 0x00000004L
+#define HUBP3_DCHUBP_CNTL__HUBP_IN_BLANK_MASK 0x00000008L
+#define HUBP3_DCHUBP_CNTL__HUBP_VTG_SEL_MASK 0x000000F0L
+#define HUBP3_DCHUBP_CNTL__HUBP_VREADY_AT_OR_AFTER_VSYNC_MASK 0x00000100L
+#define HUBP3_DCHUBP_CNTL__HUBP_DISABLE_STOP_DATA_DURING_VM_MASK 0x00000200L
+#define HUBP3_DCHUBP_CNTL__HUBP_TTU_DISABLE_MASK 0x00001000L
+#define HUBP3_DCHUBP_CNTL__HUBP_TTU_MODE_MASK 0x0000E000L
+#define HUBP3_DCHUBP_CNTL__HUBP_XRQ_NO_OUTSTANDING_REQ_MASK 0x000F0000L
+#define HUBP3_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_MASK 0x00F00000L
+#define HUBP3_DCHUBP_CNTL__HUBP_TIMEOUT_THRESHOLD_MASK 0x03000000L
+#define HUBP3_DCHUBP_CNTL__HUBP_TIMEOUT_STATUS_CLEAR_MASK 0x04000000L
+#define HUBP3_DCHUBP_CNTL__HUBP_TIMEOUT_INTERRUPT_EN_MASK 0x08000000L
+#define HUBP3_DCHUBP_CNTL__HUBP_UNDERFLOW_STATUS_MASK 0x70000000L
+#define HUBP3_DCHUBP_CNTL__HUBP_UNDERFLOW_CLEAR_MASK 0x80000000L
+//HUBP3_HUBP_CLK_CNTL
+#define HUBP3_HUBP_CLK_CNTL__HUBP_CLOCK_ENABLE__SHIFT 0x0
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DISPCLK_R_GATE_DIS__SHIFT 0x4
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DPPCLK_G_GATE_DIS__SHIFT 0x8
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DCFCLK_R_GATE_DIS__SHIFT 0xc
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DCFCLK_G_GATE_DIS__SHIFT 0x10
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DISPCLK_R_CLOCK_ON__SHIFT 0x14
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DPPCLK_G_CLOCK_ON__SHIFT 0x15
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DCFCLK_R_CLOCK_ON__SHIFT 0x16
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DCFCLK_G_CLOCK_ON__SHIFT 0x17
+#define HUBP3_HUBP_CLK_CNTL__HUBP_CLOCK_ENABLE_MASK 0x00000001L
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DISPCLK_R_GATE_DIS_MASK 0x00000010L
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DPPCLK_G_GATE_DIS_MASK 0x00000100L
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DCFCLK_R_GATE_DIS_MASK 0x00001000L
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DCFCLK_G_GATE_DIS_MASK 0x00010000L
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DISPCLK_R_CLOCK_ON_MASK 0x00100000L
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DPPCLK_G_CLOCK_ON_MASK 0x00200000L
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DCFCLK_R_CLOCK_ON_MASK 0x00400000L
+#define HUBP3_HUBP_CLK_CNTL__HUBP_DCFCLK_G_CLOCK_ON_MASK 0x00800000L
+//HUBP3_HUBPREQ_DEBUG_DB
+#define HUBP3_HUBPREQ_DEBUG_DB__HUBPREQ_DEBUG__SHIFT 0x0
+#define HUBP3_HUBPREQ_DEBUG_DB__HUBPREQ_DEBUG_MASK 0xFFFFFFFFL
+//HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_EN_DCFCLK__SHIFT 0x0
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_PERIOD_M1_DCFCLK__SHIFT 0x4
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_START_SEL_DCFCLK__SHIFT 0xc
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_STOP_SEL_DCFCLK__SHIFT 0x14
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_MODE_DCFCLK__SHIFT 0x1c
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_EN_DCFCLK_MASK 0x00000001L
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_PERIOD_M1_DCFCLK_MASK 0x00000FF0L
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_START_SEL_DCFCLK_MASK 0x0001F000L
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_PERFMON_STOP_SEL_DCFCLK_MASK 0x01F00000L
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DCFCLK__HUBP_MEASURE_WIN_MODE_DCFCLK_MASK 0x30000000L
+//HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_EN_DPPCLK__SHIFT 0x0
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_SRC_SEL_DPPCLK__SHIFT 0x1
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_PERIOD_M1_DPPCLK__SHIFT 0x4
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_START_SEL_DPPCLK__SHIFT 0xc
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_STOP_SEL_DPPCLK__SHIFT 0x14
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_EN_DPPCLK_MASK 0x00000001L
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_SRC_SEL_DPPCLK_MASK 0x00000002L
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_MEASURE_WIN_PERIOD_M1_DPPCLK_MASK 0x00000FF0L
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_START_SEL_DPPCLK_MASK 0x0001F000L
+#define HUBP3_HUBP_MEASURE_WIN_CTRL_DPPCLK__HUBP_PERFMON_STOP_SEL_DPPCLK_MASK 0x01F00000L
+
+
+// addressBlock: dce_dc_dcbubp3_dispdec_hubpreq_dispdec
+//HUBPREQ3_DCSURF_SURFACE_PITCH
+#define HUBPREQ3_DCSURF_SURFACE_PITCH__PITCH__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SURFACE_PITCH__META_PITCH__SHIFT 0x10
+#define HUBPREQ3_DCSURF_SURFACE_PITCH__PITCH_MASK 0x00003FFFL
+#define HUBPREQ3_DCSURF_SURFACE_PITCH__META_PITCH_MASK 0x3FFF0000L
+//HUBPREQ3_DCSURF_SURFACE_PITCH_C
+#define HUBPREQ3_DCSURF_SURFACE_PITCH_C__PITCH_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SURFACE_PITCH_C__META_PITCH_C__SHIFT 0x10
+#define HUBPREQ3_DCSURF_SURFACE_PITCH_C__PITCH_C_MASK 0x00003FFFL
+#define HUBPREQ3_DCSURF_SURFACE_PITCH_C__META_PITCH_C_MASK 0x3FFF0000L
+//HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS
+#define HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS__PRIMARY_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS__PRIMARY_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH
+#define HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH__PRIMARY_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH__PRIMARY_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_C
+#define HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_C__PRIMARY_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_C__PRIMARY_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C__PRIMARY_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C__PRIMARY_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS
+#define HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS__SECONDARY_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS__SECONDARY_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH
+#define HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH__SECONDARY_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH__SECONDARY_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_C
+#define HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_C__SECONDARY_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_C__SECONDARY_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C__SECONDARY_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C__SECONDARY_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS
+#define HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS__PRIMARY_META_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS__PRIMARY_META_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH
+#define HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH__PRIMARY_META_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH__PRIMARY_META_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C
+#define HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C__PRIMARY_META_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C__PRIMARY_META_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C__PRIMARY_META_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C__PRIMARY_META_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS
+#define HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS__SECONDARY_META_SURFACE_ADDRESS__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS__SECONDARY_META_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH
+#define HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH__SECONDARY_META_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH__SECONDARY_META_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C
+#define HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C__SECONDARY_META_SURFACE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C__SECONDARY_META_SURFACE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C
+#define HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C__SECONDARY_META_SURFACE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ3_DCSURF_SURFACE_CONTROL
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_EN__SHIFT 0x1
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK__SHIFT 0x2
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_C__SHIFT 0x4
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK_C__SHIFT 0x5
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ__SHIFT 0x8
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_EN__SHIFT 0x9
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK__SHIFT 0xa
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_C__SHIFT 0xc
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK_C__SHIFT 0xd
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ__SHIFT 0x10
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_C__SHIFT 0x11
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ__SHIFT 0x12
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_C__SHIFT 0x13
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_MASK 0x00000001L
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_EN_MASK 0x00000002L
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK_MASK 0x00000004L
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_TMZ_C_MASK 0x00000010L
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_SURFACE_DCC_IND_64B_BLK_C_MASK 0x00000020L
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_MASK 0x00000100L
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_EN_MASK 0x00000200L
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK_MASK 0x00000400L
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_TMZ_C_MASK 0x00001000L
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_SURFACE_DCC_IND_64B_BLK_C_MASK 0x00002000L
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_MASK 0x00010000L
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__PRIMARY_META_SURFACE_TMZ_C_MASK 0x00020000L
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_MASK 0x00040000L
+#define HUBPREQ3_DCSURF_SURFACE_CONTROL__SECONDARY_META_SURFACE_TMZ_C_MASK 0x00080000L
+//HUBPREQ3_DCSURF_FLIP_CONTROL
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_UPDATE_LOCK__SHIFT 0x0
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_TYPE__SHIFT 0x1
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_VUPDATE_SKIP_NUM__SHIFT 0x4
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING__SHIFT 0x8
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__HUBPREQ_MASTER_UPDATE_LOCK_STATUS__SHIFT 0x9
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_MODE_FOR_STEREOSYNC__SHIFT 0xc
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_IN_STEREOSYNC__SHIFT 0x10
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_DISABLE__SHIFT 0x11
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_POLARITY__SHIFT 0x12
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_DELAY__SHIFT 0x14
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_UPDATE_LOCK_MASK 0x00000001L
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_TYPE_MASK 0x00000002L
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_VUPDATE_SKIP_NUM_MASK 0x000000F0L
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_MASK 0x00000100L
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__HUBPREQ_MASTER_UPDATE_LOCK_STATUS_MASK 0x00000200L
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_MODE_FOR_STEREOSYNC_MASK 0x00003000L
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_IN_STEREOSYNC_MASK 0x00010000L
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_DISABLE_MASK 0x00020000L
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_STEREO_SELECT_POLARITY_MASK 0x00040000L
+#define HUBPREQ3_DCSURF_FLIP_CONTROL__SURFACE_FLIP_PENDING_DELAY_MASK 0x3FF00000L
+//HUBPREQ3_DCSURF_FLIP_CONTROL2
+#define HUBPREQ3_DCSURF_FLIP_CONTROL2__SURFACE_FLIP_PENDING_MIN_TIME__SHIFT 0x0
+#define HUBPREQ3_DCSURF_FLIP_CONTROL2__SURFACE_GSL_ENABLE__SHIFT 0x8
+#define HUBPREQ3_DCSURF_FLIP_CONTROL2__SURFACE_GSL_MASK__SHIFT 0x9
+#define HUBPREQ3_DCSURF_FLIP_CONTROL2__SURFACE_TRIPLE_BUFFER_ENABLE__SHIFT 0xa
+#define HUBPREQ3_DCSURF_FLIP_CONTROL2__SURFACE_INUSE_RAED_NO_LATCH__SHIFT 0xc
+#define HUBPREQ3_DCSURF_FLIP_CONTROL2__SURFACE_FLIP_PENDING_MIN_TIME_MASK 0x000000FFL
+#define HUBPREQ3_DCSURF_FLIP_CONTROL2__SURFACE_GSL_ENABLE_MASK 0x00000100L
+#define HUBPREQ3_DCSURF_FLIP_CONTROL2__SURFACE_GSL_MASK_MASK 0x00000200L
+#define HUBPREQ3_DCSURF_FLIP_CONTROL2__SURFACE_TRIPLE_BUFFER_ENABLE_MASK 0x00000400L
+#define HUBPREQ3_DCSURF_FLIP_CONTROL2__SURFACE_INUSE_RAED_NO_LATCH_MASK 0x00001000L
+//HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_MASK__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_TYPE__SHIFT 0x1
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_MASK__SHIFT 0x2
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_TYPE__SHIFT 0x3
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_CLEAR__SHIFT 0x8
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_CLEAR__SHIFT 0x9
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_OCCURRED__SHIFT 0x10
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_STATUS__SHIFT 0x11
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_OCCURRED__SHIFT 0x12
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_STATUS__SHIFT 0x13
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_MASK_MASK 0x00000001L
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_TYPE_MASK 0x00000002L
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_MASK_MASK 0x00000004L
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_TYPE_MASK 0x00000008L
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_CLEAR_MASK 0x00000100L
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_CLEAR_MASK 0x00000200L
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_OCCURRED_MASK 0x00010000L
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_INT_STATUS_MASK 0x00020000L
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_OCCURRED_MASK 0x00040000L
+#define HUBPREQ3_DCSURF_SURFACE_FLIP_INTERRUPT__SURFACE_FLIP_AWAY_INT_STATUS_MASK 0x00080000L
+//HUBPREQ3_DCSURF_SURFACE_INUSE
+#define HUBPREQ3_DCSURF_SURFACE_INUSE__SURFACE_INUSE_ADDRESS__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SURFACE_INUSE__SURFACE_INUSE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ3_DCSURF_SURFACE_INUSE_HIGH
+#define HUBPREQ3_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SURFACE_INUSE_HIGH__SURFACE_INUSE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ3_DCSURF_SURFACE_INUSE_C
+#define HUBPREQ3_DCSURF_SURFACE_INUSE_C__SURFACE_INUSE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SURFACE_INUSE_C__SURFACE_INUSE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ3_DCSURF_SURFACE_INUSE_HIGH_C
+#define HUBPREQ3_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SURFACE_INUSE_HIGH_C__SURFACE_INUSE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE
+#define HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE__SURFACE_EARLIEST_INUSE_ADDRESS__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE__SURFACE_EARLIEST_INUSE_ADDRESS_MASK 0xFFFFFFFFL
+//HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH
+#define HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_C
+#define HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_C__SURFACE_EARLIEST_INUSE_ADDRESS_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_C__SURFACE_EARLIEST_INUSE_ADDRESS_C_MASK 0xFFFFFFFFL
+//HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C
+#define HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_C__SHIFT 0x0
+#define HUBPREQ3_DCSURF_SURFACE_EARLIEST_INUSE_HIGH_C__SURFACE_EARLIEST_INUSE_ADDRESS_HIGH_C_MASK 0x0000FFFFL
+//HUBPREQ3_DCN_EXPANSION_MODE
+#define HUBPREQ3_DCN_EXPANSION_MODE__DRQ_EXPANSION_MODE__SHIFT 0x0
+#define HUBPREQ3_DCN_EXPANSION_MODE__CRQ_EXPANSION_MODE__SHIFT 0x2
+#define HUBPREQ3_DCN_EXPANSION_MODE__MRQ_EXPANSION_MODE__SHIFT 0x4
+#define HUBPREQ3_DCN_EXPANSION_MODE__PRQ_EXPANSION_MODE__SHIFT 0x6
+#define HUBPREQ3_DCN_EXPANSION_MODE__DRQ_EXPANSION_MODE_MASK 0x00000003L
+#define HUBPREQ3_DCN_EXPANSION_MODE__CRQ_EXPANSION_MODE_MASK 0x0000000CL
+#define HUBPREQ3_DCN_EXPANSION_MODE__MRQ_EXPANSION_MODE_MASK 0x00000030L
+#define HUBPREQ3_DCN_EXPANSION_MODE__PRQ_EXPANSION_MODE_MASK 0x000000C0L
+//HUBPREQ3_DCN_TTU_QOS_WM
+#define HUBPREQ3_DCN_TTU_QOS_WM__QoS_LEVEL_LOW_WM__SHIFT 0x0
+#define HUBPREQ3_DCN_TTU_QOS_WM__QoS_LEVEL_HIGH_WM__SHIFT 0x10
+#define HUBPREQ3_DCN_TTU_QOS_WM__QoS_LEVEL_LOW_WM_MASK 0x00003FFFL
+#define HUBPREQ3_DCN_TTU_QOS_WM__QoS_LEVEL_HIGH_WM_MASK 0x3FFF0000L
+//HUBPREQ3_DCN_GLOBAL_TTU_CNTL
+#define HUBPREQ3_DCN_GLOBAL_TTU_CNTL__MIN_TTU_VBLANK__SHIFT 0x0
+#define HUBPREQ3_DCN_GLOBAL_TTU_CNTL__QoS_LEVEL_FLIP__SHIFT 0x1c
+#define HUBPREQ3_DCN_GLOBAL_TTU_CNTL__MIN_TTU_VBLANK_MASK 0x00FFFFFFL
+#define HUBPREQ3_DCN_GLOBAL_TTU_CNTL__QoS_LEVEL_FLIP_MASK 0xF0000000L
+//HUBPREQ3_DCN_SURF0_TTU_CNTL0
+#define HUBPREQ3_DCN_SURF0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ3_DCN_SURF0_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ3_DCN_SURF0_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ3_DCN_SURF0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ3_DCN_SURF0_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ3_DCN_SURF0_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ3_DCN_SURF0_TTU_CNTL1
+#define HUBPREQ3_DCN_SURF0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ3_DCN_SURF0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ3_DCN_SURF1_TTU_CNTL0
+#define HUBPREQ3_DCN_SURF1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ3_DCN_SURF1_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ3_DCN_SURF1_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ3_DCN_SURF1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ3_DCN_SURF1_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ3_DCN_SURF1_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ3_DCN_SURF1_TTU_CNTL1
+#define HUBPREQ3_DCN_SURF1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ3_DCN_SURF1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ3_DCN_CUR0_TTU_CNTL0
+#define HUBPREQ3_DCN_CUR0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ3_DCN_CUR0_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ3_DCN_CUR0_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ3_DCN_CUR0_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ3_DCN_CUR0_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ3_DCN_CUR0_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ3_DCN_CUR0_TTU_CNTL1
+#define HUBPREQ3_DCN_CUR0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ3_DCN_CUR0_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ3_DCN_CUR1_TTU_CNTL0
+#define HUBPREQ3_DCN_CUR1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY__SHIFT 0x0
+#define HUBPREQ3_DCN_CUR1_TTU_CNTL0__QoS_LEVEL_FIXED__SHIFT 0x18
+#define HUBPREQ3_DCN_CUR1_TTU_CNTL0__QoS_RAMP_DISABLE__SHIFT 0x1c
+#define HUBPREQ3_DCN_CUR1_TTU_CNTL0__REFCYC_PER_REQ_DELIVERY_MASK 0x007FFFFFL
+#define HUBPREQ3_DCN_CUR1_TTU_CNTL0__QoS_LEVEL_FIXED_MASK 0x0F000000L
+#define HUBPREQ3_DCN_CUR1_TTU_CNTL0__QoS_RAMP_DISABLE_MASK 0x10000000L
+//HUBPREQ3_DCN_CUR1_TTU_CNTL1
+#define HUBPREQ3_DCN_CUR1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE__SHIFT 0x0
+#define HUBPREQ3_DCN_CUR1_TTU_CNTL1__REFCYC_PER_REQ_DELIVERY_PRE_MASK 0x007FFFFFL
+//HUBPREQ3_BLANK_OFFSET_0
+#define HUBPREQ3_BLANK_OFFSET_0__REFCYC_H_BLANK_END__SHIFT 0x0
+#define HUBPREQ3_BLANK_OFFSET_0__DLG_V_BLANK_END__SHIFT 0x10
+#define HUBPREQ3_BLANK_OFFSET_0__REFCYC_H_BLANK_END_MASK 0x00001FFFL
+#define HUBPREQ3_BLANK_OFFSET_0__DLG_V_BLANK_END_MASK 0x7FFF0000L
+//HUBPREQ3_BLANK_OFFSET_1
+#define HUBPREQ3_BLANK_OFFSET_1__MIN_DST_Y_NEXT_START__SHIFT 0x0
+#define HUBPREQ3_BLANK_OFFSET_1__MIN_DST_Y_NEXT_START_MASK 0x0003FFFFL
+//HUBPREQ3_DST_DIMENSIONS
+#define HUBPREQ3_DST_DIMENSIONS__REFCYC_PER_HTOTAL__SHIFT 0x0
+#define HUBPREQ3_DST_DIMENSIONS__REFCYC_PER_HTOTAL_MASK 0x001FFFFFL
+//HUBPREQ3_DST_AFTER_SCALER
+#define HUBPREQ3_DST_AFTER_SCALER__REFCYC_X_AFTER_SCALER__SHIFT 0x0
+#define HUBPREQ3_DST_AFTER_SCALER__DST_Y_AFTER_SCALER__SHIFT 0x10
+#define HUBPREQ3_DST_AFTER_SCALER__REFCYC_X_AFTER_SCALER_MASK 0x00001FFFL
+#define HUBPREQ3_DST_AFTER_SCALER__DST_Y_AFTER_SCALER_MASK 0x00070000L
+//HUBPREQ3_PREFETCH_SETTINGS
+#define HUBPREQ3_PREFETCH_SETTINGS__VRATIO_PREFETCH__SHIFT 0x0
+#define HUBPREQ3_PREFETCH_SETTINGS__DST_Y_PREFETCH__SHIFT 0x18
+#define HUBPREQ3_PREFETCH_SETTINGS__VRATIO_PREFETCH_MASK 0x003FFFFFL
+#define HUBPREQ3_PREFETCH_SETTINGS__DST_Y_PREFETCH_MASK 0xFF000000L
+//HUBPREQ3_PREFETCH_SETTINGS_C
+#define HUBPREQ3_PREFETCH_SETTINGS_C__VRATIO_PREFETCH_C__SHIFT 0x0
+#define HUBPREQ3_PREFETCH_SETTINGS_C__VRATIO_PREFETCH_C_MASK 0x003FFFFFL
+//HUBPREQ3_VBLANK_PARAMETERS_0
+#define HUBPREQ3_VBLANK_PARAMETERS_0__DST_Y_PER_VM_VBLANK__SHIFT 0x0
+#define HUBPREQ3_VBLANK_PARAMETERS_0__DST_Y_PER_ROW_VBLANK__SHIFT 0x8
+#define HUBPREQ3_VBLANK_PARAMETERS_0__DST_Y_PER_VM_VBLANK_MASK 0x0000001FL
+#define HUBPREQ3_VBLANK_PARAMETERS_0__DST_Y_PER_ROW_VBLANK_MASK 0x00003F00L
+//HUBPREQ3_VBLANK_PARAMETERS_1
+#define HUBPREQ3_VBLANK_PARAMETERS_1__REFCYC_PER_PTE_GROUP_VBLANK_L__SHIFT 0x0
+#define HUBPREQ3_VBLANK_PARAMETERS_1__REFCYC_PER_PTE_GROUP_VBLANK_L_MASK 0x007FFFFFL
+//HUBPREQ3_VBLANK_PARAMETERS_2
+#define HUBPREQ3_VBLANK_PARAMETERS_2__REFCYC_PER_PTE_GROUP_VBLANK_C__SHIFT 0x0
+#define HUBPREQ3_VBLANK_PARAMETERS_2__REFCYC_PER_PTE_GROUP_VBLANK_C_MASK 0x007FFFFFL
+//HUBPREQ3_VBLANK_PARAMETERS_3
+#define HUBPREQ3_VBLANK_PARAMETERS_3__REFCYC_PER_META_CHUNK_VBLANK_L__SHIFT 0x0
+#define HUBPREQ3_VBLANK_PARAMETERS_3__REFCYC_PER_META_CHUNK_VBLANK_L_MASK 0x007FFFFFL
+//HUBPREQ3_VBLANK_PARAMETERS_4
+#define HUBPREQ3_VBLANK_PARAMETERS_4__REFCYC_PER_META_CHUNK_VBLANK_C__SHIFT 0x0
+#define HUBPREQ3_VBLANK_PARAMETERS_4__REFCYC_PER_META_CHUNK_VBLANK_C_MASK 0x007FFFFFL
+//HUBPREQ3_FLIP_PARAMETERS_0
+#define HUBPREQ3_FLIP_PARAMETERS_0__DST_Y_PER_VM_FLIP__SHIFT 0x0
+#define HUBPREQ3_FLIP_PARAMETERS_0__DST_Y_PER_ROW_FLIP__SHIFT 0x8
+#define HUBPREQ3_FLIP_PARAMETERS_0__DST_Y_PER_VM_FLIP_MASK 0x0000001FL
+#define HUBPREQ3_FLIP_PARAMETERS_0__DST_Y_PER_ROW_FLIP_MASK 0x00003F00L
+//HUBPREQ3_FLIP_PARAMETERS_2
+#define HUBPREQ3_FLIP_PARAMETERS_2__REFCYC_PER_META_CHUNK_FLIP_L__SHIFT 0x0
+#define HUBPREQ3_FLIP_PARAMETERS_2__REFCYC_PER_META_CHUNK_FLIP_L_MASK 0x007FFFFFL
+//HUBPREQ3_NOM_PARAMETERS_4
+#define HUBPREQ3_NOM_PARAMETERS_4__DST_Y_PER_META_ROW_NOM_L__SHIFT 0x0
+#define HUBPREQ3_NOM_PARAMETERS_4__DST_Y_PER_META_ROW_NOM_L_MASK 0x0001FFFFL
+//HUBPREQ3_NOM_PARAMETERS_5
+#define HUBPREQ3_NOM_PARAMETERS_5__REFCYC_PER_META_CHUNK_NOM_L__SHIFT 0x0
+#define HUBPREQ3_NOM_PARAMETERS_5__REFCYC_PER_META_CHUNK_NOM_L_MASK 0x007FFFFFL
+//HUBPREQ3_NOM_PARAMETERS_6
+#define HUBPREQ3_NOM_PARAMETERS_6__DST_Y_PER_META_ROW_NOM_C__SHIFT 0x0
+#define HUBPREQ3_NOM_PARAMETERS_6__DST_Y_PER_META_ROW_NOM_C_MASK 0x0001FFFFL
+//HUBPREQ3_NOM_PARAMETERS_7
+#define HUBPREQ3_NOM_PARAMETERS_7__REFCYC_PER_META_CHUNK_NOM_C__SHIFT 0x0
+#define HUBPREQ3_NOM_PARAMETERS_7__REFCYC_PER_META_CHUNK_NOM_C_MASK 0x007FFFFFL
+//HUBPREQ3_PER_LINE_DELIVERY_PRE
+#define HUBPREQ3_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_L__SHIFT 0x0
+#define HUBPREQ3_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_C__SHIFT 0x10
+#define HUBPREQ3_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_L_MASK 0x00001FFFL
+#define HUBPREQ3_PER_LINE_DELIVERY_PRE__REFCYC_PER_LINE_DELIVERY_PRE_C_MASK 0x1FFF0000L
+//HUBPREQ3_PER_LINE_DELIVERY
+#define HUBPREQ3_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_L__SHIFT 0x0
+#define HUBPREQ3_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_C__SHIFT 0x10
+#define HUBPREQ3_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_L_MASK 0x00001FFFL
+#define HUBPREQ3_PER_LINE_DELIVERY__REFCYC_PER_LINE_DELIVERY_C_MASK 0x1FFF0000L
+//HUBPREQ3_CURSOR_SETTINGS
+#define HUBPREQ3_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET__SHIFT 0x0
+#define HUBPREQ3_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST__SHIFT 0x8
+#define HUBPREQ3_CURSOR_SETTINGS__CURSOR1_DST_Y_OFFSET__SHIFT 0x10
+#define HUBPREQ3_CURSOR_SETTINGS__CURSOR1_CHUNK_HDL_ADJUST__SHIFT 0x18
+#define HUBPREQ3_CURSOR_SETTINGS__CURSOR0_DST_Y_OFFSET_MASK 0x000000FFL
+#define HUBPREQ3_CURSOR_SETTINGS__CURSOR0_CHUNK_HDL_ADJUST_MASK 0x00000300L
+#define HUBPREQ3_CURSOR_SETTINGS__CURSOR1_DST_Y_OFFSET_MASK 0x00FF0000L
+#define HUBPREQ3_CURSOR_SETTINGS__CURSOR1_CHUNK_HDL_ADJUST_MASK 0x03000000L
+//HUBPREQ3_REF_FREQ_TO_PIX_FREQ
+#define HUBPREQ3_REF_FREQ_TO_PIX_FREQ__REF_FREQ_TO_PIX_FREQ__SHIFT 0x0
+#define HUBPREQ3_REF_FREQ_TO_PIX_FREQ__REF_FREQ_TO_PIX_FREQ_MASK 0x001FFFFFL
+//HUBPREQ3_DST_Y_DELTA_DRQ_LIMIT
+#define HUBPREQ3_DST_Y_DELTA_DRQ_LIMIT__DST_Y_DELTA_DRQ_LIMIT__SHIFT 0x0
+#define HUBPREQ3_DST_Y_DELTA_DRQ_LIMIT__DST_Y_DELTA_DRQ_LIMIT_MASK 0x00007FFFL
+//HUBPREQ3_HUBPREQ_MEM_PWR_CTRL
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_FORCE__SHIFT 0x0
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_DIS__SHIFT 0x2
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_FORCE__SHIFT 0x4
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_DIS__SHIFT 0x6
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_FORCE__SHIFT 0x8
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_DIS__SHIFT 0xa
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_FORCE__SHIFT 0xc
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_DIS__SHIFT 0xe
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_FORCE_MASK 0x00000003L
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_DPTE_MEM_PWR_DIS_MASK 0x00000004L
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_FORCE_MASK 0x00000030L
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_MPTE_MEM_PWR_DIS_MASK 0x00000040L
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_FORCE_MASK 0x00000300L
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_META_MEM_PWR_DIS_MASK 0x00000400L
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_FORCE_MASK 0x00003000L
+#define HUBPREQ3_HUBPREQ_MEM_PWR_CTRL__REQ_PDE_MEM_PWR_DIS_MASK 0x00004000L
+//HUBPREQ3_HUBPREQ_MEM_PWR_STATUS
+#define HUBPREQ3_HUBPREQ_MEM_PWR_STATUS__REQ_DPTE_MEM_PWR_STATE__SHIFT 0x0
+#define HUBPREQ3_HUBPREQ_MEM_PWR_STATUS__REQ_MPTE_MEM_PWR_STATE__SHIFT 0x2
+#define HUBPREQ3_HUBPREQ_MEM_PWR_STATUS__REQ_META_MEM_PWR_STATE__SHIFT 0x4
+#define HUBPREQ3_HUBPREQ_MEM_PWR_STATUS__REQ_PDE_MEM_PWR_STATE__SHIFT 0x6
+#define HUBPREQ3_HUBPREQ_MEM_PWR_STATUS__REQ_DPTE_MEM_PWR_STATE_MASK 0x00000003L
+#define HUBPREQ3_HUBPREQ_MEM_PWR_STATUS__REQ_MPTE_MEM_PWR_STATE_MASK 0x0000000CL
+#define HUBPREQ3_HUBPREQ_MEM_PWR_STATUS__REQ_META_MEM_PWR_STATE_MASK 0x00000030L
+#define HUBPREQ3_HUBPREQ_MEM_PWR_STATUS__REQ_PDE_MEM_PWR_STATE_MASK 0x000000C0L
+// addressBlock: dce_dc_dcbubp3_dispdec_hubpret_dispdec
+//HUBPRET3_HUBPRET_CONTROL
+#define HUBPRET3_HUBPRET_CONTROL__DET_BUF_PLANE1_BASE_ADDRESS__SHIFT 0x0
+#define HUBPRET3_HUBPRET_CONTROL__PACK_3TO2_ELEMENT_DISABLE__SHIFT 0xc
+#define HUBPRET3_HUBPRET_CONTROL__CROSSBAR_SRC_ALPHA__SHIFT 0x10
+#define HUBPRET3_HUBPRET_CONTROL__CROSSBAR_SRC_Y_G__SHIFT 0x12
+#define HUBPRET3_HUBPRET_CONTROL__CROSSBAR_SRC_CB_B__SHIFT 0x14
+#define HUBPRET3_HUBPRET_CONTROL__CROSSBAR_SRC_CR_R__SHIFT 0x16
+#define HUBPRET3_HUBPRET_CONTROL__HUBPRET_CONTROL_SPARE__SHIFT 0x18
+#define HUBPRET3_HUBPRET_CONTROL__DET_BUF_PLANE1_BASE_ADDRESS_MASK 0x00000FFFL
+#define HUBPRET3_HUBPRET_CONTROL__PACK_3TO2_ELEMENT_DISABLE_MASK 0x00001000L
+#define HUBPRET3_HUBPRET_CONTROL__CROSSBAR_SRC_ALPHA_MASK 0x00030000L
+#define HUBPRET3_HUBPRET_CONTROL__CROSSBAR_SRC_Y_G_MASK 0x000C0000L
+#define HUBPRET3_HUBPRET_CONTROL__CROSSBAR_SRC_CB_B_MASK 0x00300000L
+#define HUBPRET3_HUBPRET_CONTROL__CROSSBAR_SRC_CR_R_MASK 0x00C00000L
+#define HUBPRET3_HUBPRET_CONTROL__HUBPRET_CONTROL_SPARE_MASK 0xFF000000L
+//HUBPRET3_HUBPRET_MEM_PWR_CTRL
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_FORCE__SHIFT 0x0
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_DIS__SHIFT 0x2
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_LS_MODE__SHIFT 0x4
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_FORCE__SHIFT 0x8
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_DIS__SHIFT 0xa
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_FORCE__SHIFT 0x10
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_DIS__SHIFT 0x12
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_LS_MODE__SHIFT 0x14
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_FORCE_MASK 0x00000003L
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_DIS_MASK 0x00000004L
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__DET_MEM_PWR_LS_MODE_MASK 0x00000030L
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_FORCE_MASK 0x00000300L
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__DMROB_MEM_PWR_DIS_MASK 0x00000400L
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_FORCE_MASK 0x00030000L
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_DIS_MASK 0x00040000L
+#define HUBPRET3_HUBPRET_MEM_PWR_CTRL__PIXCDC_MEM_PWR_LS_MODE_MASK 0x00300000L
+//HUBPRET3_HUBPRET_MEM_PWR_STATUS
+#define HUBPRET3_HUBPRET_MEM_PWR_STATUS__DET_MEM_PWR_STATE__SHIFT 0x0
+#define HUBPRET3_HUBPRET_MEM_PWR_STATUS__DMROB_MEM_PWR_STATE__SHIFT 0x2
+#define HUBPRET3_HUBPRET_MEM_PWR_STATUS__PIXCDC_MEM_PWR_STATE__SHIFT 0x4
+#define HUBPRET3_HUBPRET_MEM_PWR_STATUS__DET_MEM_PWR_STATE_MASK 0x00000003L
+#define HUBPRET3_HUBPRET_MEM_PWR_STATUS__DMROB_MEM_PWR_STATE_MASK 0x0000000CL
+#define HUBPRET3_HUBPRET_MEM_PWR_STATUS__PIXCDC_MEM_PWR_STATE_MASK 0x00000030L
+//HUBPRET3_HUBPRET_READ_LINE_CTRL0
+#define HUBPRET3_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_INTERVAL_IN_NONACTIVE__SHIFT 0x0
+#define HUBPRET3_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_VBLANK_MAXIMUM__SHIFT 0x10
+#define HUBPRET3_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_INTERVAL_IN_NONACTIVE_MASK 0x0000FFFFL
+#define HUBPRET3_HUBPRET_READ_LINE_CTRL0__PIPE_READ_LINE_VBLANK_MAXIMUM_MASK 0x3FFF0000L
+//HUBPRET3_HUBPRET_READ_LINE_CTRL1
+#define HUBPRET3_HUBPRET_READ_LINE_CTRL1__PIPE_READ_LINE_REPORTED_WHEN_REQ_DISABLED__SHIFT 0x0
+#define HUBPRET3_HUBPRET_READ_LINE_CTRL1__HUBPRET_READ_LINE_CTRL1_SPARE__SHIFT 0x10
+#define HUBPRET3_HUBPRET_READ_LINE_CTRL1__PIPE_READ_LINE_REPORTED_WHEN_REQ_DISABLED_MASK 0x00003FFFL
+#define HUBPRET3_HUBPRET_READ_LINE_CTRL1__HUBPRET_READ_LINE_CTRL1_SPARE_MASK 0xFFFF0000L
+//HUBPRET3_HUBPRET_READ_LINE0
+#define HUBPRET3_HUBPRET_READ_LINE0__PIPE_READ_LINE0_START__SHIFT 0x0
+#define HUBPRET3_HUBPRET_READ_LINE0__PIPE_READ_LINE0_END__SHIFT 0x10
+#define HUBPRET3_HUBPRET_READ_LINE0__PIPE_READ_LINE0_START_MASK 0x00003FFFL
+#define HUBPRET3_HUBPRET_READ_LINE0__PIPE_READ_LINE0_END_MASK 0x3FFF0000L
+//HUBPRET3_HUBPRET_READ_LINE1
+#define HUBPRET3_HUBPRET_READ_LINE1__PIPE_READ_LINE1_START__SHIFT 0x0
+#define HUBPRET3_HUBPRET_READ_LINE1__PIPE_READ_LINE1_END__SHIFT 0x10
+#define HUBPRET3_HUBPRET_READ_LINE1__PIPE_READ_LINE1_START_MASK 0x00003FFFL
+#define HUBPRET3_HUBPRET_READ_LINE1__PIPE_READ_LINE1_END_MASK 0x3FFF0000L
+//HUBPRET3_HUBPRET_INTERRUPT
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_MASK__SHIFT 0x0
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_MASK__SHIFT 0x1
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_MASK__SHIFT 0x2
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_TYPE__SHIFT 0x4
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_TYPE__SHIFT 0x5
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_TYPE__SHIFT 0x6
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_CLEAR__SHIFT 0x8
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_CLEAR__SHIFT 0x9
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_CLEAR__SHIFT 0xa
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_VBLANK_STATUS__SHIFT 0xc
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE0_STATUS__SHIFT 0xd
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE1_STATUS__SHIFT 0xe
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_STATUS__SHIFT 0x10
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_STATUS__SHIFT 0x11
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_STATUS__SHIFT 0x12
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_MASK_MASK 0x00000001L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_MASK_MASK 0x00000002L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_MASK_MASK 0x00000004L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_TYPE_MASK 0x00000010L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_TYPE_MASK 0x00000020L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_TYPE_MASK 0x00000040L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_CLEAR_MASK 0x00000100L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_CLEAR_MASK 0x00000200L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_CLEAR_MASK 0x00000400L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_VBLANK_STATUS_MASK 0x00001000L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE0_STATUS_MASK 0x00002000L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE1_STATUS_MASK 0x00004000L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_VBLANK_INT_STATUS_MASK 0x00010000L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE0_INT_STATUS_MASK 0x00020000L
+#define HUBPRET3_HUBPRET_INTERRUPT__PIPE_READ_LINE1_INT_STATUS_MASK 0x00040000L
+//HUBPRET3_HUBPRET_READ_LINE_VALUE
+#define HUBPRET3_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE__SHIFT 0x0
+#define HUBPRET3_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_SNAPSHOT__SHIFT 0x10
+#define HUBPRET3_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_MASK 0x00003FFFL
+#define HUBPRET3_HUBPRET_READ_LINE_VALUE__PIPE_READ_LINE_SNAPSHOT_MASK 0x3FFF0000L
+//HUBPRET3_HUBPRET_READ_LINE_STATUS
+#define HUBPRET3_HUBPRET_READ_LINE_STATUS__PIPE_READ_VBLANK__SHIFT 0x0
+#define HUBPRET3_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_INSIDE__SHIFT 0x4
+#define HUBPRET3_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_OUTSIDE__SHIFT 0x5
+#define HUBPRET3_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_INSIDE__SHIFT 0x8
+#define HUBPRET3_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_OUTSIDE__SHIFT 0xa
+#define HUBPRET3_HUBPRET_READ_LINE_STATUS__PIPE_READ_VBLANK_MASK 0x00000001L
+#define HUBPRET3_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_INSIDE_MASK 0x00000010L
+#define HUBPRET3_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE0_OUTSIDE_MASK 0x00000020L
+#define HUBPRET3_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_INSIDE_MASK 0x00000100L
+#define HUBPRET3_HUBPRET_READ_LINE_STATUS__PIPE_READ_LINE1_OUTSIDE_MASK 0x00000400L
+
+
+// addressBlock: dce_dc_dcbubp3_dispdec_cursor0_dispdec
+//CURSOR0_3_CURSOR_CONTROL
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_ENABLE__SHIFT 0x0
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_2X_MAGNIFY__SHIFT 0x4
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_MODE__SHIFT 0x8
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_TMZ__SHIFT 0xc
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_SNOOP__SHIFT 0xd
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_SYSTEM__SHIFT 0xe
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_PITCH__SHIFT 0x10
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_XY_POSITION_ROTATION_AND_MIRRORING_BYPASS__SHIFT 0x14
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK__SHIFT 0x18
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_EN__SHIFT 0x1e
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_SEL__SHIFT 0x1f
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_ENABLE_MASK 0x00000001L
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_2X_MAGNIFY_MASK 0x00000010L
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_MODE_MASK 0x00000700L
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_TMZ_MASK 0x00001000L
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_SNOOP_MASK 0x00002000L
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_SYSTEM_MASK 0x00004000L
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_PITCH_MASK 0x00030000L
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_XY_POSITION_ROTATION_AND_MIRRORING_BYPASS_MASK 0x00100000L
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_LINES_PER_CHUNK_MASK 0x1F000000L
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_EN_MASK 0x40000000L
+#define CURSOR0_3_CURSOR_CONTROL__CURSOR_PERFMON_LATENCY_MEASURE_SEL_MASK 0x80000000L
+//CURSOR0_3_CURSOR_SURFACE_ADDRESS
+#define CURSOR0_3_CURSOR_SURFACE_ADDRESS__CURSOR_SURFACE_ADDRESS__SHIFT 0x0
+#define CURSOR0_3_CURSOR_SURFACE_ADDRESS__CURSOR_SURFACE_ADDRESS_MASK 0xFFFFFFFFL
+//CURSOR0_3_CURSOR_SURFACE_ADDRESS_HIGH
+#define CURSOR0_3_CURSOR_SURFACE_ADDRESS_HIGH__CURSOR_SURFACE_ADDRESS_HIGH__SHIFT 0x0
+#define CURSOR0_3_CURSOR_SURFACE_ADDRESS_HIGH__CURSOR_SURFACE_ADDRESS_HIGH_MASK 0x0000FFFFL
+//CURSOR0_3_CURSOR_SIZE
+#define CURSOR0_3_CURSOR_SIZE__CURSOR_HEIGHT__SHIFT 0x0
+#define CURSOR0_3_CURSOR_SIZE__CURSOR_WIDTH__SHIFT 0x10
+#define CURSOR0_3_CURSOR_SIZE__CURSOR_HEIGHT_MASK 0x000001FFL
+#define CURSOR0_3_CURSOR_SIZE__CURSOR_WIDTH_MASK 0x01FF0000L
+//CURSOR0_3_CURSOR_POSITION
+#define CURSOR0_3_CURSOR_POSITION__CURSOR_Y_POSITION__SHIFT 0x0
+#define CURSOR0_3_CURSOR_POSITION__CURSOR_X_POSITION__SHIFT 0x10
+#define CURSOR0_3_CURSOR_POSITION__CURSOR_Y_POSITION_MASK 0x00003FFFL
+#define CURSOR0_3_CURSOR_POSITION__CURSOR_X_POSITION_MASK 0x3FFF0000L
+//CURSOR0_3_CURSOR_HOT_SPOT
+#define CURSOR0_3_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y__SHIFT 0x0
+#define CURSOR0_3_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X__SHIFT 0x10
+#define CURSOR0_3_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_Y_MASK 0x000000FFL
+#define CURSOR0_3_CURSOR_HOT_SPOT__CURSOR_HOT_SPOT_X_MASK 0x00FF0000L
+//CURSOR0_3_CURSOR_STEREO_CONTROL
+#define CURSOR0_3_CURSOR_STEREO_CONTROL__CURSOR_STEREO_EN__SHIFT 0x0
+#define CURSOR0_3_CURSOR_STEREO_CONTROL__CURSOR_PRIMARY_OFFSET__SHIFT 0x4
+#define CURSOR0_3_CURSOR_STEREO_CONTROL__CURSOR_SECONDARY_OFFSET__SHIFT 0x12
+#define CURSOR0_3_CURSOR_STEREO_CONTROL__CURSOR_STEREO_EN_MASK 0x00000001L
+#define CURSOR0_3_CURSOR_STEREO_CONTROL__CURSOR_PRIMARY_OFFSET_MASK 0x0003FFF0L
+#define CURSOR0_3_CURSOR_STEREO_CONTROL__CURSOR_SECONDARY_OFFSET_MASK 0xFFFC0000L
+//CURSOR0_3_CURSOR_DST_OFFSET
+#define CURSOR0_3_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET__SHIFT 0x0
+#define CURSOR0_3_CURSOR_DST_OFFSET__CURSOR_DST_X_OFFSET_MASK 0x00001FFFL
+//CURSOR0_3_CURSOR_MEM_PWR_CTRL
+#define CURSOR0_3_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_FORCE__SHIFT 0x0
+#define CURSOR0_3_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_DIS__SHIFT 0x2
+#define CURSOR0_3_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_LS_MODE__SHIFT 0x4
+#define CURSOR0_3_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_FORCE_MASK 0x00000003L
+#define CURSOR0_3_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_DIS_MASK 0x00000004L
+#define CURSOR0_3_CURSOR_MEM_PWR_CTRL__CROB_MEM_PWR_LS_MODE_MASK 0x00000030L
+//CURSOR0_3_CURSOR_MEM_PWR_STATUS
+#define CURSOR0_3_CURSOR_MEM_PWR_STATUS__CROB_MEM_PWR_STATE__SHIFT 0x0
+#define CURSOR0_3_CURSOR_MEM_PWR_STATUS__CROB_MEM_PWR_STATE_MASK 0x00000003L
+//CURSOR0_3_DMDATA_ADDRESS_HIGH
+#define CURSOR0_3_DMDATA_ADDRESS_HIGH__DMDATA_ADDRESS_HIGH__SHIFT 0x0
+#define CURSOR0_3_DMDATA_ADDRESS_HIGH__DMDATA_SYSTEM__SHIFT 0x1c
+#define CURSOR0_3_DMDATA_ADDRESS_HIGH__DMDATA_SNOOP__SHIFT 0x1d
+#define CURSOR0_3_DMDATA_ADDRESS_HIGH__DMDATA_TMZ__SHIFT 0x1e
+#define CURSOR0_3_DMDATA_ADDRESS_HIGH__DMDATA_ADDRESS_HIGH_MASK 0x0000FFFFL
+#define CURSOR0_3_DMDATA_ADDRESS_HIGH__DMDATA_SYSTEM_MASK 0x10000000L
+#define CURSOR0_3_DMDATA_ADDRESS_HIGH__DMDATA_SNOOP_MASK 0x20000000L
+#define CURSOR0_3_DMDATA_ADDRESS_HIGH__DMDATA_TMZ_MASK 0x40000000L
+//CURSOR0_3_DMDATA_ADDRESS_LOW
+#define CURSOR0_3_DMDATA_ADDRESS_LOW__DMDATA_ADDRESS_LOW__SHIFT 0x0
+#define CURSOR0_3_DMDATA_ADDRESS_LOW__DMDATA_ADDRESS_LOW_MASK 0xFFFFFFFFL
+//CURSOR0_3_DMDATA_CNTL
+#define CURSOR0_3_DMDATA_CNTL__DMDATA_UPDATED__SHIFT 0x0
+#define CURSOR0_3_DMDATA_CNTL__DMDATA_REPEAT__SHIFT 0x1
+#define CURSOR0_3_DMDATA_CNTL__DMDATA_MODE__SHIFT 0x2
+#define CURSOR0_3_DMDATA_CNTL__DMDATA_SIZE__SHIFT 0x10
+#define CURSOR0_3_DMDATA_CNTL__DMDATA_UPDATED_MASK 0x00000001L
+#define CURSOR0_3_DMDATA_CNTL__DMDATA_REPEAT_MASK 0x00000002L
+#define CURSOR0_3_DMDATA_CNTL__DMDATA_MODE_MASK 0x00000004L
+#define CURSOR0_3_DMDATA_CNTL__DMDATA_SIZE_MASK 0x0FFF0000L
+//CURSOR0_3_DMDATA_QOS_CNTL
+#define CURSOR0_3_DMDATA_QOS_CNTL__DMDATA_QOS_MODE__SHIFT 0x0
+#define CURSOR0_3_DMDATA_QOS_CNTL__DMDATA_QOS_LEVEL__SHIFT 0x4
+#define CURSOR0_3_DMDATA_QOS_CNTL__DMDATA_DL_DELTA__SHIFT 0x10
+#define CURSOR0_3_DMDATA_QOS_CNTL__DMDATA_QOS_MODE_MASK 0x00000001L
+#define CURSOR0_3_DMDATA_QOS_CNTL__DMDATA_QOS_LEVEL_MASK 0x000000F0L
+#define CURSOR0_3_DMDATA_QOS_CNTL__DMDATA_DL_DELTA_MASK 0xFFFF0000L
+//CURSOR0_3_DMDATA_STATUS
+#define CURSOR0_3_DMDATA_STATUS__DMDATA_DONE__SHIFT 0x0
+#define CURSOR0_3_DMDATA_STATUS__DMDATA_UNDERFLOW__SHIFT 0x2
+#define CURSOR0_3_DMDATA_STATUS__DMDATA_UNDERFLOW_CLEAR__SHIFT 0x4
+#define CURSOR0_3_DMDATA_STATUS__DMDATA_DONE_MASK 0x00000001L
+#define CURSOR0_3_DMDATA_STATUS__DMDATA_UNDERFLOW_MASK 0x00000004L
+#define CURSOR0_3_DMDATA_STATUS__DMDATA_UNDERFLOW_CLEAR_MASK 0x00000010L
+//CURSOR0_3_DMDATA_SW_CNTL
+#define CURSOR0_3_DMDATA_SW_CNTL__DMDATA_SW_UPDATED__SHIFT 0x0
+#define CURSOR0_3_DMDATA_SW_CNTL__DMDATA_SW_REPEAT__SHIFT 0x1
+#define CURSOR0_3_DMDATA_SW_CNTL__DMDATA_SW_SIZE__SHIFT 0x10
+#define CURSOR0_3_DMDATA_SW_CNTL__DMDATA_SW_UPDATED_MASK 0x00000001L
+#define CURSOR0_3_DMDATA_SW_CNTL__DMDATA_SW_REPEAT_MASK 0x00000002L
+#define CURSOR0_3_DMDATA_SW_CNTL__DMDATA_SW_SIZE_MASK 0x0FFF0000L
+//CURSOR0_3_DMDATA_SW_DATA
+#define CURSOR0_3_DMDATA_SW_DATA__DMDATA_SW_DATA__SHIFT 0x0
+#define CURSOR0_3_DMDATA_SW_DATA__DMDATA_SW_DATA_MASK 0xFFFFFFFFL
+// addressBlock: dce_dc_dpp0_dispdec_dpp_top_dispdec
+//DPP_TOP0_DPP_CONTROL
+#define DPP_TOP0_DPP_CONTROL__DPP_CLOCK_ENABLE__SHIFT 0x4
+#define DPP_TOP0_DPP_CONTROL__DPPCLK_G_GATE_DISABLE__SHIFT 0x8
+#define DPP_TOP0_DPP_CONTROL__DPPCLK_G_DYN_GATE_DISABLE__SHIFT 0xa
+#define DPP_TOP0_DPP_CONTROL__DPPCLK_G_DSCL_GATE_DISABLE__SHIFT 0xc
+#define DPP_TOP0_DPP_CONTROL__DPPCLK_G_DSCL_ALPHA_GATE_DISABLE__SHIFT 0xe
+#define DPP_TOP0_DPP_CONTROL__DPPCLK_R_GATE_DISABLE__SHIFT 0x10
+#define DPP_TOP0_DPP_CONTROL__DISPCLK_R_GATE_DISABLE__SHIFT 0x12
+#define DPP_TOP0_DPP_CONTROL__DISPCLK_G_GATE_DISABLE__SHIFT 0x14
+#define DPP_TOP0_DPP_CONTROL__DPP_CLOCK_ENABLE_MASK 0x00000010L
+#define DPP_TOP0_DPP_CONTROL__DPPCLK_G_GATE_DISABLE_MASK 0x00000100L
+#define DPP_TOP0_DPP_CONTROL__DPPCLK_G_DYN_GATE_DISABLE_MASK 0x00000400L
+#define DPP_TOP0_DPP_CONTROL__DPPCLK_G_DSCL_GATE_DISABLE_MASK 0x00001000L
+#define DPP_TOP0_DPP_CONTROL__DPPCLK_G_DSCL_ALPHA_GATE_DISABLE_MASK 0x00004000L
+#define DPP_TOP0_DPP_CONTROL__DPPCLK_R_GATE_DISABLE_MASK 0x00010000L
+#define DPP_TOP0_DPP_CONTROL__DISPCLK_R_GATE_DISABLE_MASK 0x00040000L
+#define DPP_TOP0_DPP_CONTROL__DISPCLK_G_GATE_DISABLE_MASK 0x00100000L
+//DPP_TOP0_DPP_SOFT_RESET
+#define DPP_TOP0_DPP_SOFT_RESET__CNVC_SOFT_RESET__SHIFT 0x0
+#define DPP_TOP0_DPP_SOFT_RESET__DSCL_SOFT_RESET__SHIFT 0x4
+#define DPP_TOP0_DPP_SOFT_RESET__CM_SOFT_RESET__SHIFT 0x8
+#define DPP_TOP0_DPP_SOFT_RESET__OBUF_SOFT_RESET__SHIFT 0xc
+#define DPP_TOP0_DPP_SOFT_RESET__CNVC_SOFT_RESET_MASK 0x00000001L
+#define DPP_TOP0_DPP_SOFT_RESET__DSCL_SOFT_RESET_MASK 0x00000010L
+#define DPP_TOP0_DPP_SOFT_RESET__CM_SOFT_RESET_MASK 0x00000100L
+#define DPP_TOP0_DPP_SOFT_RESET__OBUF_SOFT_RESET_MASK 0x00001000L
+//DPP_TOP0_DPP_CRC_VAL_R_G
+#define DPP_TOP0_DPP_CRC_VAL_R_G__DPP_CRC_R_CR__SHIFT 0x0
+#define DPP_TOP0_DPP_CRC_VAL_R_G__DPP_CRC_G_Y__SHIFT 0x10
+#define DPP_TOP0_DPP_CRC_VAL_R_G__DPP_CRC_R_CR_MASK 0x0000FFFFL
+#define DPP_TOP0_DPP_CRC_VAL_R_G__DPP_CRC_G_Y_MASK 0xFFFF0000L
+//DPP_TOP0_DPP_CRC_VAL_B_A
+#define DPP_TOP0_DPP_CRC_VAL_B_A__DPP_CRC_B_CB__SHIFT 0x0
+#define DPP_TOP0_DPP_CRC_VAL_B_A__DPP_CRC_ALPHA__SHIFT 0x10
+#define DPP_TOP0_DPP_CRC_VAL_B_A__DPP_CRC_B_CB_MASK 0x0000FFFFL
+#define DPP_TOP0_DPP_CRC_VAL_B_A__DPP_CRC_ALPHA_MASK 0xFFFF0000L
+//DPP_TOP0_DPP_CRC_CTRL
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_EN__SHIFT 0x0
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_CONT_EN__SHIFT 0x1
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_ONE_SHOT_PENDING__SHIFT 0x2
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_420_COMP_SEL__SHIFT 0x3
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_SRC_SEL__SHIFT 0x4
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_CURSOR_BITS_SEL__SHIFT 0x6
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_STEREO_EN__SHIFT 0x7
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_STEREO_MODE__SHIFT 0x8
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_INTERLACE_MODE__SHIFT 0xa
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_PIX_FORMAT_SEL__SHIFT 0xc
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_CURSOR_FORMAT_SEL__SHIFT 0xf
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_MASK__SHIFT 0x10
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_EN_MASK 0x00000001L
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_CONT_EN_MASK 0x00000002L
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_ONE_SHOT_PENDING_MASK 0x00000004L
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_420_COMP_SEL_MASK 0x00000008L
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_SRC_SEL_MASK 0x00000030L
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_CURSOR_BITS_SEL_MASK 0x00000040L
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_STEREO_EN_MASK 0x00000080L
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_STEREO_MODE_MASK 0x00000300L
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_INTERLACE_MODE_MASK 0x00000C00L
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_PIX_FORMAT_SEL_MASK 0x00007000L
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_CURSOR_FORMAT_SEL_MASK 0x00008000L
+#define DPP_TOP0_DPP_CRC_CTRL__DPP_CRC_MASK_MASK 0xFFFF0000L
+//DPP_TOP0_HOST_READ_CONTROL
+#define DPP_TOP0_HOST_READ_CONTROL__HOST_READ_RATE_CONTROL__SHIFT 0x0
+#define DPP_TOP0_HOST_READ_CONTROL__HOST_READ_RATE_CONTROL_MASK 0x000000FFL
+
+
+// addressBlock: dce_dc_dpp0_dispdec_cnvc_cfg_dispdec
+//CNVC_CFG0_CNVC_SURFACE_PIXEL_FORMAT
+#define CNVC_CFG0_CNVC_SURFACE_PIXEL_FORMAT__CNVC_SURFACE_PIXEL_FORMAT__SHIFT 0x0
+#define CNVC_CFG0_CNVC_SURFACE_PIXEL_FORMAT__CNVC_SURFACE_PIXEL_FORMAT_MASK 0x0000007FL
+//CNVC_CFG0_FORMAT_CONTROL
+#define CNVC_CFG0_FORMAT_CONTROL__FORMAT_EXPANSION_MODE__SHIFT 0x0
+#define CNVC_CFG0_FORMAT_CONTROL__FORMAT_CNV16__SHIFT 0x4
+#define CNVC_CFG0_FORMAT_CONTROL__ALPHA_EN__SHIFT 0x8
+#define CNVC_CFG0_FORMAT_CONTROL__CNVC_BYPASS__SHIFT 0xc
+#define CNVC_CFG0_FORMAT_CONTROL__CNVC_BYPASS_MSB_ALIGN__SHIFT 0xd
+#define CNVC_CFG0_FORMAT_CONTROL__CLAMP_POSITIVE__SHIFT 0x10
+#define CNVC_CFG0_FORMAT_CONTROL__CLAMP_POSITIVE_C__SHIFT 0x11
+#define CNVC_CFG0_FORMAT_CONTROL__CNVC_UPDATE_PENDING__SHIFT 0x14
+#define CNVC_CFG0_FORMAT_CONTROL__FORMAT_EXPANSION_MODE_MASK 0x00000001L
+#define CNVC_CFG0_FORMAT_CONTROL__FORMAT_CNV16_MASK 0x00000010L
+#define CNVC_CFG0_FORMAT_CONTROL__ALPHA_EN_MASK 0x00000100L
+#define CNVC_CFG0_FORMAT_CONTROL__CNVC_BYPASS_MASK 0x00001000L
+#define CNVC_CFG0_FORMAT_CONTROL__CNVC_BYPASS_MSB_ALIGN_MASK 0x00002000L
+#define CNVC_CFG0_FORMAT_CONTROL__CLAMP_POSITIVE_MASK 0x00010000L
+#define CNVC_CFG0_FORMAT_CONTROL__CLAMP_POSITIVE_C_MASK 0x00020000L
+#define CNVC_CFG0_FORMAT_CONTROL__CNVC_UPDATE_PENDING_MASK 0x00100000L
+//CNVC_CFG0_FCNV_FP_BIAS_R
+#define CNVC_CFG0_FCNV_FP_BIAS_R__FCNV_FP_BIAS_R__SHIFT 0x0
+#define CNVC_CFG0_FCNV_FP_BIAS_R__FCNV_FP_BIAS_R_MASK 0x0007FFFFL
+//CNVC_CFG0_FCNV_FP_BIAS_G
+#define CNVC_CFG0_FCNV_FP_BIAS_G__FCNV_FP_BIAS_G__SHIFT 0x0
+#define CNVC_CFG0_FCNV_FP_BIAS_G__FCNV_FP_BIAS_G_MASK 0x0007FFFFL
+//CNVC_CFG0_FCNV_FP_BIAS_B
+#define CNVC_CFG0_FCNV_FP_BIAS_B__FCNV_FP_BIAS_B__SHIFT 0x0
+#define CNVC_CFG0_FCNV_FP_BIAS_B__FCNV_FP_BIAS_B_MASK 0x0007FFFFL
+//CNVC_CFG0_FCNV_FP_SCALE_R
+#define CNVC_CFG0_FCNV_FP_SCALE_R__FCNV_FP_SCALE_R__SHIFT 0x0
+#define CNVC_CFG0_FCNV_FP_SCALE_R__FCNV_FP_SCALE_R_MASK 0x0007FFFFL
+//CNVC_CFG0_FCNV_FP_SCALE_G
+#define CNVC_CFG0_FCNV_FP_SCALE_G__FCNV_FP_SCALE_G__SHIFT 0x0
+#define CNVC_CFG0_FCNV_FP_SCALE_G__FCNV_FP_SCALE_G_MASK 0x0007FFFFL
+//CNVC_CFG0_FCNV_FP_SCALE_B
+#define CNVC_CFG0_FCNV_FP_SCALE_B__FCNV_FP_SCALE_B__SHIFT 0x0
+#define CNVC_CFG0_FCNV_FP_SCALE_B__FCNV_FP_SCALE_B_MASK 0x0007FFFFL
+//CNVC_CFG0_COLOR_KEYER_CONTROL
+#define CNVC_CFG0_COLOR_KEYER_CONTROL__COLOR_KEYER_EN__SHIFT 0x0
+#define CNVC_CFG0_COLOR_KEYER_CONTROL__COLOR_KEYER_MODE__SHIFT 0x4
+#define CNVC_CFG0_COLOR_KEYER_CONTROL__COLOR_KEYER_EN_MASK 0x00000001L
+#define CNVC_CFG0_COLOR_KEYER_CONTROL__COLOR_KEYER_MODE_MASK 0x00000030L
+//CNVC_CFG0_COLOR_KEYER_ALPHA
+#define CNVC_CFG0_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_LOW__SHIFT 0x0
+#define CNVC_CFG0_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_HIGH__SHIFT 0x10
+#define CNVC_CFG0_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG0_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG0_COLOR_KEYER_RED
+#define CNVC_CFG0_COLOR_KEYER_RED__COLOR_KEYER_RED_LOW__SHIFT 0x0
+#define CNVC_CFG0_COLOR_KEYER_RED__COLOR_KEYER_RED_HIGH__SHIFT 0x10
+#define CNVC_CFG0_COLOR_KEYER_RED__COLOR_KEYER_RED_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG0_COLOR_KEYER_RED__COLOR_KEYER_RED_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG0_COLOR_KEYER_GREEN
+#define CNVC_CFG0_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_LOW__SHIFT 0x0
+#define CNVC_CFG0_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_HIGH__SHIFT 0x10
+#define CNVC_CFG0_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG0_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG0_COLOR_KEYER_BLUE
+#define CNVC_CFG0_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_LOW__SHIFT 0x0
+#define CNVC_CFG0_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_HIGH__SHIFT 0x10
+#define CNVC_CFG0_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG0_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG0_ALPHA_2BIT_LUT
+#define CNVC_CFG0_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT0__SHIFT 0x0
+#define CNVC_CFG0_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT1__SHIFT 0x8
+#define CNVC_CFG0_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT2__SHIFT 0x10
+#define CNVC_CFG0_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT3__SHIFT 0x18
+#define CNVC_CFG0_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT0_MASK 0x000000FFL
+#define CNVC_CFG0_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT1_MASK 0x0000FF00L
+#define CNVC_CFG0_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT2_MASK 0x00FF0000L
+#define CNVC_CFG0_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT3_MASK 0xFF000000L
+
+
+// addressBlock: dce_dc_dpp0_dispdec_cnvc_cur_dispdec
+//CNVC_CUR0_CURSOR0_CONTROL
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_ENABLE__SHIFT 0x0
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_EXPANSION_MODE__SHIFT 0x1
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_PIX_INV_MODE__SHIFT 0x2
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_ROM_EN__SHIFT 0x3
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_MODE__SHIFT 0x4
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_PIXEL_ALPHA_MOD_EN__SHIFT 0x7
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_UPDATE_PENDING__SHIFT 0x10
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_ENABLE_MASK 0x00000001L
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_EXPANSION_MODE_MASK 0x00000002L
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_PIX_INV_MODE_MASK 0x00000004L
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_ROM_EN_MASK 0x00000008L
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_MODE_MASK 0x00000070L
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_PIXEL_ALPHA_MOD_EN_MASK 0x00000080L
+#define CNVC_CUR0_CURSOR0_CONTROL__CUR0_UPDATE_PENDING_MASK 0x00010000L
+//CNVC_CUR0_CURSOR0_COLOR0
+#define CNVC_CUR0_CURSOR0_COLOR0__CUR0_COLOR0__SHIFT 0x0
+#define CNVC_CUR0_CURSOR0_COLOR0__CUR0_COLOR0_MASK 0x00FFFFFFL
+//CNVC_CUR0_CURSOR0_COLOR1
+#define CNVC_CUR0_CURSOR0_COLOR1__CUR0_COLOR1__SHIFT 0x0
+#define CNVC_CUR0_CURSOR0_COLOR1__CUR0_COLOR1_MASK 0x00FFFFFFL
+//CNVC_CUR0_CURSOR0_FP_SCALE_BIAS
+#define CNVC_CUR0_CURSOR0_FP_SCALE_BIAS__CUR0_FP_SCALE__SHIFT 0x0
+#define CNVC_CUR0_CURSOR0_FP_SCALE_BIAS__CUR0_FP_BIAS__SHIFT 0x10
+#define CNVC_CUR0_CURSOR0_FP_SCALE_BIAS__CUR0_FP_SCALE_MASK 0x0000FFFFL
+#define CNVC_CUR0_CURSOR0_FP_SCALE_BIAS__CUR0_FP_BIAS_MASK 0xFFFF0000L
+
+
+// addressBlock: dce_dc_dpp0_dispdec_dscl_dispdec
+//DSCL0_SCL_COEF_RAM_TAP_SELECT
+#define DSCL0_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_TAP_PAIR_IDX__SHIFT 0x0
+#define DSCL0_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_PHASE__SHIFT 0x8
+#define DSCL0_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_FILTER_TYPE__SHIFT 0x10
+#define DSCL0_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_TAP_PAIR_IDX_MASK 0x00000003L
+#define DSCL0_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_PHASE_MASK 0x00003F00L
+#define DSCL0_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_FILTER_TYPE_MASK 0x00070000L
+//DSCL0_SCL_COEF_RAM_TAP_DATA
+#define DSCL0_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF__SHIFT 0x0
+#define DSCL0_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_EN__SHIFT 0xf
+#define DSCL0_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF__SHIFT 0x10
+#define DSCL0_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_EN__SHIFT 0x1f
+#define DSCL0_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_MASK 0x00003FFFL
+#define DSCL0_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_EN_MASK 0x00008000L
+#define DSCL0_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_MASK 0x3FFF0000L
+#define DSCL0_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_EN_MASK 0x80000000L
+//DSCL0_SCL_MODE
+#define DSCL0_SCL_MODE__DSCL_MODE__SHIFT 0x0
+#define DSCL0_SCL_MODE__SCL_COEF_RAM_SELECT__SHIFT 0x8
+#define DSCL0_SCL_MODE__SCL_COEF_RAM_SELECT_CURRENT__SHIFT 0xc
+#define DSCL0_SCL_MODE__SCL_CHROMA_COEF_MODE__SHIFT 0x10
+#define DSCL0_SCL_MODE__SCL_ALPHA_COEF_MODE__SHIFT 0x14
+#define DSCL0_SCL_MODE__SCL_COEF_RAM_SELECT_RD__SHIFT 0x18
+#define DSCL0_SCL_MODE__DSCL_MODE_MASK 0x00000007L
+#define DSCL0_SCL_MODE__SCL_COEF_RAM_SELECT_MASK 0x00000100L
+#define DSCL0_SCL_MODE__SCL_COEF_RAM_SELECT_CURRENT_MASK 0x00001000L
+#define DSCL0_SCL_MODE__SCL_CHROMA_COEF_MODE_MASK 0x00010000L
+#define DSCL0_SCL_MODE__SCL_ALPHA_COEF_MODE_MASK 0x00100000L
+#define DSCL0_SCL_MODE__SCL_COEF_RAM_SELECT_RD_MASK 0x01000000L
+//DSCL0_SCL_TAP_CONTROL
+#define DSCL0_SCL_TAP_CONTROL__SCL_V_NUM_TAPS__SHIFT 0x0
+#define DSCL0_SCL_TAP_CONTROL__SCL_H_NUM_TAPS__SHIFT 0x4
+#define DSCL0_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_C__SHIFT 0x8
+#define DSCL0_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_C__SHIFT 0xc
+#define DSCL0_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_MASK 0x00000007L
+#define DSCL0_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_MASK 0x00000070L
+#define DSCL0_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_C_MASK 0x00000700L
+#define DSCL0_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_C_MASK 0x00007000L
+//DSCL0_DSCL_CONTROL
+#define DSCL0_DSCL_CONTROL__SCL_BOUNDARY_MODE__SHIFT 0x0
+#define DSCL0_DSCL_CONTROL__SCL_BOUNDARY_MODE_MASK 0x00000001L
+//DSCL0_DSCL_2TAP_CONTROL
+#define DSCL0_DSCL_2TAP_CONTROL__SCL_H_2TAP_HARDCODE_COEF_EN__SHIFT 0x0
+#define DSCL0_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_EN__SHIFT 0x4
+#define DSCL0_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_FACTOR__SHIFT 0x8
+#define DSCL0_DSCL_2TAP_CONTROL__SCL_V_2TAP_HARDCODE_COEF_EN__SHIFT 0x10
+#define DSCL0_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_EN__SHIFT 0x14
+#define DSCL0_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_FACTOR__SHIFT 0x18
+#define DSCL0_DSCL_2TAP_CONTROL__SCL_H_2TAP_HARDCODE_COEF_EN_MASK 0x00000001L
+#define DSCL0_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_EN_MASK 0x00000010L
+#define DSCL0_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_FACTOR_MASK 0x00000700L
+#define DSCL0_DSCL_2TAP_CONTROL__SCL_V_2TAP_HARDCODE_COEF_EN_MASK 0x00010000L
+#define DSCL0_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_EN_MASK 0x00100000L
+#define DSCL0_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_FACTOR_MASK 0x07000000L
+//DSCL0_SCL_MANUAL_REPLICATE_CONTROL
+#define DSCL0_SCL_MANUAL_REPLICATE_CONTROL__SCL_V_MANUAL_REPLICATE_FACTOR__SHIFT 0x0
+#define DSCL0_SCL_MANUAL_REPLICATE_CONTROL__SCL_H_MANUAL_REPLICATE_FACTOR__SHIFT 0x8
+#define DSCL0_SCL_MANUAL_REPLICATE_CONTROL__SCL_V_MANUAL_REPLICATE_FACTOR_MASK 0x0000000FL
+#define DSCL0_SCL_MANUAL_REPLICATE_CONTROL__SCL_H_MANUAL_REPLICATE_FACTOR_MASK 0x00000F00L
+//DSCL0_SCL_HORZ_FILTER_SCALE_RATIO
+#define DSCL0_SCL_HORZ_FILTER_SCALE_RATIO__SCL_H_SCALE_RATIO__SHIFT 0x0
+#define DSCL0_SCL_HORZ_FILTER_SCALE_RATIO__SCL_H_SCALE_RATIO_MASK 0x07FFFFFFL
+//DSCL0_SCL_HORZ_FILTER_INIT
+#define DSCL0_SCL_HORZ_FILTER_INIT__SCL_H_INIT_FRAC__SHIFT 0x0
+#define DSCL0_SCL_HORZ_FILTER_INIT__SCL_H_INIT_INT__SHIFT 0x18
+#define DSCL0_SCL_HORZ_FILTER_INIT__SCL_H_INIT_FRAC_MASK 0x00FFFFFFL
+#define DSCL0_SCL_HORZ_FILTER_INIT__SCL_H_INIT_INT_MASK 0x0F000000L
+//DSCL0_SCL_HORZ_FILTER_SCALE_RATIO_C
+#define DSCL0_SCL_HORZ_FILTER_SCALE_RATIO_C__SCL_H_SCALE_RATIO_C__SHIFT 0x0
+#define DSCL0_SCL_HORZ_FILTER_SCALE_RATIO_C__SCL_H_SCALE_RATIO_C_MASK 0x07FFFFFFL
+//DSCL0_SCL_HORZ_FILTER_INIT_C
+#define DSCL0_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_FRAC_C__SHIFT 0x0
+#define DSCL0_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_INT_C__SHIFT 0x18
+#define DSCL0_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_FRAC_C_MASK 0x00FFFFFFL
+#define DSCL0_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_INT_C_MASK 0x0F000000L
+//DSCL0_SCL_VERT_FILTER_SCALE_RATIO
+#define DSCL0_SCL_VERT_FILTER_SCALE_RATIO__SCL_V_SCALE_RATIO__SHIFT 0x0
+#define DSCL0_SCL_VERT_FILTER_SCALE_RATIO__SCL_V_SCALE_RATIO_MASK 0x07FFFFFFL
+//DSCL0_SCL_VERT_FILTER_INIT
+#define DSCL0_SCL_VERT_FILTER_INIT__SCL_V_INIT_FRAC__SHIFT 0x0
+#define DSCL0_SCL_VERT_FILTER_INIT__SCL_V_INIT_INT__SHIFT 0x18
+#define DSCL0_SCL_VERT_FILTER_INIT__SCL_V_INIT_FRAC_MASK 0x00FFFFFFL
+#define DSCL0_SCL_VERT_FILTER_INIT__SCL_V_INIT_INT_MASK 0x0F000000L
+//DSCL0_SCL_VERT_FILTER_INIT_BOT
+#define DSCL0_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_FRAC_BOT__SHIFT 0x0
+#define DSCL0_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_INT_BOT__SHIFT 0x18
+#define DSCL0_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_FRAC_BOT_MASK 0x00FFFFFFL
+#define DSCL0_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_INT_BOT_MASK 0x0F000000L
+//DSCL0_SCL_VERT_FILTER_SCALE_RATIO_C
+#define DSCL0_SCL_VERT_FILTER_SCALE_RATIO_C__SCL_V_SCALE_RATIO_C__SHIFT 0x0
+#define DSCL0_SCL_VERT_FILTER_SCALE_RATIO_C__SCL_V_SCALE_RATIO_C_MASK 0x07FFFFFFL
+//DSCL0_SCL_VERT_FILTER_INIT_C
+#define DSCL0_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_FRAC_C__SHIFT 0x0
+#define DSCL0_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_INT_C__SHIFT 0x18
+#define DSCL0_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_FRAC_C_MASK 0x00FFFFFFL
+#define DSCL0_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_INT_C_MASK 0x0F000000L
+//DSCL0_SCL_VERT_FILTER_INIT_BOT_C
+#define DSCL0_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_FRAC_BOT_C__SHIFT 0x0
+#define DSCL0_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_INT_BOT_C__SHIFT 0x18
+#define DSCL0_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_FRAC_BOT_C_MASK 0x00FFFFFFL
+#define DSCL0_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_INT_BOT_C_MASK 0x0F000000L
+//DSCL0_SCL_BLACK_OFFSET
+#define DSCL0_SCL_BLACK_OFFSET__SCL_BLACK_OFFSET_RGB_Y__SHIFT 0x0
+#define DSCL0_SCL_BLACK_OFFSET__SCL_BLACK_OFFSET_CBCR__SHIFT 0x10
+#define DSCL0_SCL_BLACK_OFFSET__SCL_BLACK_OFFSET_RGB_Y_MASK 0x0000FFFFL
+#define DSCL0_SCL_BLACK_OFFSET__SCL_BLACK_OFFSET_CBCR_MASK 0xFFFF0000L
+//DSCL0_DSCL_UPDATE
+#define DSCL0_DSCL_UPDATE__SCL_UPDATE_PENDING__SHIFT 0x0
+#define DSCL0_DSCL_UPDATE__SCL_UPDATE_PENDING_MASK 0x00000001L
+//DSCL0_DSCL_AUTOCAL
+#define DSCL0_DSCL_AUTOCAL__AUTOCAL_MODE__SHIFT 0x0
+#define DSCL0_DSCL_AUTOCAL__AUTOCAL_NUM_PIPE__SHIFT 0x8
+#define DSCL0_DSCL_AUTOCAL__AUTOCAL_PIPE_ID__SHIFT 0xc
+#define DSCL0_DSCL_AUTOCAL__AUTOCAL_MODE_MASK 0x00000003L
+#define DSCL0_DSCL_AUTOCAL__AUTOCAL_NUM_PIPE_MASK 0x00000300L
+#define DSCL0_DSCL_AUTOCAL__AUTOCAL_PIPE_ID_MASK 0x00003000L
+//DSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT
+#define DSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_RIGHT__SHIFT 0x0
+#define DSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_LEFT__SHIFT 0x10
+#define DSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_RIGHT_MASK 0x00001FFFL
+#define DSCL0_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_LEFT_MASK 0x1FFF0000L
+//DSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM
+#define DSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_BOTTOM__SHIFT 0x0
+#define DSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_TOP__SHIFT 0x10
+#define DSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_BOTTOM_MASK 0x00001FFFL
+#define DSCL0_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_TOP_MASK 0x1FFF0000L
+//DSCL0_OTG_H_BLANK
+#define DSCL0_OTG_H_BLANK__OTG_H_BLANK_START__SHIFT 0x0
+#define DSCL0_OTG_H_BLANK__OTG_H_BLANK_END__SHIFT 0x10
+#define DSCL0_OTG_H_BLANK__OTG_H_BLANK_START_MASK 0x00003FFFL
+#define DSCL0_OTG_H_BLANK__OTG_H_BLANK_END_MASK 0x3FFF0000L
+//DSCL0_OTG_V_BLANK
+#define DSCL0_OTG_V_BLANK__OTG_V_BLANK_START__SHIFT 0x0
+#define DSCL0_OTG_V_BLANK__OTG_V_BLANK_END__SHIFT 0x10
+#define DSCL0_OTG_V_BLANK__OTG_V_BLANK_START_MASK 0x00003FFFL
+#define DSCL0_OTG_V_BLANK__OTG_V_BLANK_END_MASK 0x3FFF0000L
+//DSCL0_RECOUT_START
+#define DSCL0_RECOUT_START__RECOUT_START_X__SHIFT 0x0
+#define DSCL0_RECOUT_START__RECOUT_START_Y__SHIFT 0x10
+#define DSCL0_RECOUT_START__RECOUT_START_X_MASK 0x00001FFFL
+#define DSCL0_RECOUT_START__RECOUT_START_Y_MASK 0x1FFF0000L
+//DSCL0_RECOUT_SIZE
+#define DSCL0_RECOUT_SIZE__RECOUT_WIDTH__SHIFT 0x0
+#define DSCL0_RECOUT_SIZE__RECOUT_HEIGHT__SHIFT 0x10
+#define DSCL0_RECOUT_SIZE__RECOUT_WIDTH_MASK 0x00003FFFL
+#define DSCL0_RECOUT_SIZE__RECOUT_HEIGHT_MASK 0x3FFF0000L
+//DSCL0_MPC_SIZE
+#define DSCL0_MPC_SIZE__MPC_WIDTH__SHIFT 0x0
+#define DSCL0_MPC_SIZE__MPC_HEIGHT__SHIFT 0x10
+#define DSCL0_MPC_SIZE__MPC_WIDTH_MASK 0x00003FFFL
+#define DSCL0_MPC_SIZE__MPC_HEIGHT_MASK 0x3FFF0000L
+//DSCL0_LB_DATA_FORMAT
+#define DSCL0_LB_DATA_FORMAT__INTERLEAVE_EN__SHIFT 0x0
+#define DSCL0_LB_DATA_FORMAT__ALPHA_EN__SHIFT 0x4
+#define DSCL0_LB_DATA_FORMAT__INTERLEAVE_EN_MASK 0x00000001L
+#define DSCL0_LB_DATA_FORMAT__ALPHA_EN_MASK 0x00000010L
+//DSCL0_LB_MEMORY_CTRL
+#define DSCL0_LB_MEMORY_CTRL__MEMORY_CONFIG__SHIFT 0x0
+#define DSCL0_LB_MEMORY_CTRL__LB_MAX_PARTITIONS__SHIFT 0x8
+#define DSCL0_LB_MEMORY_CTRL__LB_NUM_PARTITIONS__SHIFT 0x10
+#define DSCL0_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_C__SHIFT 0x18
+#define DSCL0_LB_MEMORY_CTRL__MEMORY_CONFIG_MASK 0x00000003L
+#define DSCL0_LB_MEMORY_CTRL__LB_MAX_PARTITIONS_MASK 0x00003F00L
+#define DSCL0_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_MASK 0x007F0000L
+#define DSCL0_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_C_MASK 0x7F000000L
+//DSCL0_LB_V_COUNTER
+#define DSCL0_LB_V_COUNTER__V_COUNTER__SHIFT 0x0
+#define DSCL0_LB_V_COUNTER__V_COUNTER_C__SHIFT 0x10
+#define DSCL0_LB_V_COUNTER__V_COUNTER_MASK 0x00001FFFL
+#define DSCL0_LB_V_COUNTER__V_COUNTER_C_MASK 0x1FFF0000L
+//DSCL0_DSCL_MEM_PWR_CTRL
+#define DSCL0_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_FORCE__SHIFT 0x0
+#define DSCL0_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_DIS__SHIFT 0x2
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_FORCE__SHIFT 0x4
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_DIS__SHIFT 0x6
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_FORCE__SHIFT 0x8
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_DIS__SHIFT 0xa
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_FORCE__SHIFT 0xc
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_DIS__SHIFT 0xe
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_FORCE__SHIFT 0x10
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_DIS__SHIFT 0x12
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_FORCE__SHIFT 0x14
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_DIS__SHIFT 0x16
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_FORCE__SHIFT 0x18
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_DIS__SHIFT 0x1a
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_MEM_PWR_MODE__SHIFT 0x1c
+#define DSCL0_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_FORCE_MASK 0x00000003L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_DIS_MASK 0x00000004L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_FORCE_MASK 0x00000030L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_DIS_MASK 0x00000040L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_FORCE_MASK 0x00000300L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_DIS_MASK 0x00000400L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_FORCE_MASK 0x00003000L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_DIS_MASK 0x00004000L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_FORCE_MASK 0x00030000L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_DIS_MASK 0x00040000L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_FORCE_MASK 0x00300000L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_DIS_MASK 0x00400000L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_FORCE_MASK 0x03000000L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_DIS_MASK 0x04000000L
+#define DSCL0_DSCL_MEM_PWR_CTRL__LB_MEM_PWR_MODE_MASK 0x10000000L
+//DSCL0_DSCL_MEM_PWR_STATUS
+#define DSCL0_DSCL_MEM_PWR_STATUS__LUT_MEM_PWR_STATE__SHIFT 0x0
+#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G1_MEM_PWR_STATE__SHIFT 0x2
+#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G2_MEM_PWR_STATE__SHIFT 0x4
+#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G3_MEM_PWR_STATE__SHIFT 0x6
+#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G4_MEM_PWR_STATE__SHIFT 0x8
+#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G5_MEM_PWR_STATE__SHIFT 0xa
+#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G6_MEM_PWR_STATE__SHIFT 0xc
+#define DSCL0_DSCL_MEM_PWR_STATUS__LUT_MEM_PWR_STATE_MASK 0x00000003L
+#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G1_MEM_PWR_STATE_MASK 0x0000000CL
+#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G2_MEM_PWR_STATE_MASK 0x00000030L
+#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G3_MEM_PWR_STATE_MASK 0x000000C0L
+#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G4_MEM_PWR_STATE_MASK 0x00000300L
+#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G5_MEM_PWR_STATE_MASK 0x00000C00L
+#define DSCL0_DSCL_MEM_PWR_STATUS__LB_G6_MEM_PWR_STATE_MASK 0x00003000L
+//DSCL0_OBUF_CONTROL
+#define DSCL0_OBUF_CONTROL__OBUF_BYPASS__SHIFT 0x0
+#define DSCL0_OBUF_CONTROL__OBUF_USE_FULL_BUFFER__SHIFT 0x4
+#define DSCL0_OBUF_CONTROL__OBUF_IS_HALF_RECOUT_WIDTH__SHIFT 0xc
+#define DSCL0_OBUF_CONTROL__OBUF_OUT_HOLD_CNT__SHIFT 0x1c
+#define DSCL0_OBUF_CONTROL__OBUF_BYPASS_MASK 0x00000001L
+#define DSCL0_OBUF_CONTROL__OBUF_USE_FULL_BUFFER_MASK 0x00000010L
+#define DSCL0_OBUF_CONTROL__OBUF_IS_HALF_RECOUT_WIDTH_MASK 0x00001000L
+#define DSCL0_OBUF_CONTROL__OBUF_OUT_HOLD_CNT_MASK 0xF0000000L
+//DSCL0_OBUF_MEM_PWR_CTRL
+#define DSCL0_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_FORCE__SHIFT 0x0
+#define DSCL0_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_DIS__SHIFT 0x2
+#define DSCL0_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_MODE__SHIFT 0x8
+#define DSCL0_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_STATE__SHIFT 0x10
+#define DSCL0_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_FORCE_MASK 0x00000003L
+#define DSCL0_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_DIS_MASK 0x00000004L
+#define DSCL0_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_MODE_MASK 0x00000100L
+#define DSCL0_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_STATE_MASK 0x00030000L
+
+
+// addressBlock: dce_dc_dpp0_dispdec_cm_dispdec
+//CM0_CM_CONTROL
+#define CM0_CM_CONTROL__CM_BYPASS__SHIFT 0x0
+#define CM0_CM_CONTROL__CM_UPDATE_PENDING__SHIFT 0x8
+#define CM0_CM_CONTROL__CM_BYPASS_MASK 0x00000001L
+#define CM0_CM_CONTROL__CM_UPDATE_PENDING_MASK 0x00000100L
+//CM0_CM_ICSC_CONTROL
+#define CM0_CM_ICSC_CONTROL__CM_ICSC_MODE__SHIFT 0x0
+#define CM0_CM_ICSC_CONTROL__CM_ICSC_MODE_MASK 0x00000003L
+//CM0_CM_ICSC_C11_C12
+#define CM0_CM_ICSC_C11_C12__CM_ICSC_C11__SHIFT 0x0
+#define CM0_CM_ICSC_C11_C12__CM_ICSC_C12__SHIFT 0x10
+#define CM0_CM_ICSC_C11_C12__CM_ICSC_C11_MASK 0x0000FFFFL
+#define CM0_CM_ICSC_C11_C12__CM_ICSC_C12_MASK 0xFFFF0000L
+//CM0_CM_ICSC_C13_C14
+#define CM0_CM_ICSC_C13_C14__CM_ICSC_C13__SHIFT 0x0
+#define CM0_CM_ICSC_C13_C14__CM_ICSC_C14__SHIFT 0x10
+#define CM0_CM_ICSC_C13_C14__CM_ICSC_C13_MASK 0x0000FFFFL
+#define CM0_CM_ICSC_C13_C14__CM_ICSC_C14_MASK 0xFFFF0000L
+//CM0_CM_ICSC_C21_C22
+#define CM0_CM_ICSC_C21_C22__CM_ICSC_C21__SHIFT 0x0
+#define CM0_CM_ICSC_C21_C22__CM_ICSC_C22__SHIFT 0x10
+#define CM0_CM_ICSC_C21_C22__CM_ICSC_C21_MASK 0x0000FFFFL
+#define CM0_CM_ICSC_C21_C22__CM_ICSC_C22_MASK 0xFFFF0000L
+//CM0_CM_ICSC_C23_C24
+#define CM0_CM_ICSC_C23_C24__CM_ICSC_C23__SHIFT 0x0
+#define CM0_CM_ICSC_C23_C24__CM_ICSC_C24__SHIFT 0x10
+#define CM0_CM_ICSC_C23_C24__CM_ICSC_C23_MASK 0x0000FFFFL
+#define CM0_CM_ICSC_C23_C24__CM_ICSC_C24_MASK 0xFFFF0000L
+//CM0_CM_ICSC_C31_C32
+#define CM0_CM_ICSC_C31_C32__CM_ICSC_C31__SHIFT 0x0
+#define CM0_CM_ICSC_C31_C32__CM_ICSC_C32__SHIFT 0x10
+#define CM0_CM_ICSC_C31_C32__CM_ICSC_C31_MASK 0x0000FFFFL
+#define CM0_CM_ICSC_C31_C32__CM_ICSC_C32_MASK 0xFFFF0000L
+//CM0_CM_ICSC_C33_C34
+#define CM0_CM_ICSC_C33_C34__CM_ICSC_C33__SHIFT 0x0
+#define CM0_CM_ICSC_C33_C34__CM_ICSC_C34__SHIFT 0x10
+#define CM0_CM_ICSC_C33_C34__CM_ICSC_C33_MASK 0x0000FFFFL
+#define CM0_CM_ICSC_C33_C34__CM_ICSC_C34_MASK 0xFFFF0000L
+//CM0_CM_ICSC_B_C11_C12
+#define CM0_CM_ICSC_B_C11_C12__CM_ICSC_B_C11__SHIFT 0x0
+#define CM0_CM_ICSC_B_C11_C12__CM_ICSC_B_C12__SHIFT 0x10
+#define CM0_CM_ICSC_B_C11_C12__CM_ICSC_B_C11_MASK 0x0000FFFFL
+#define CM0_CM_ICSC_B_C11_C12__CM_ICSC_B_C12_MASK 0xFFFF0000L
+//CM0_CM_ICSC_B_C13_C14
+#define CM0_CM_ICSC_B_C13_C14__CM_ICSC_B_C13__SHIFT 0x0
+#define CM0_CM_ICSC_B_C13_C14__CM_ICSC_B_C14__SHIFT 0x10
+#define CM0_CM_ICSC_B_C13_C14__CM_ICSC_B_C13_MASK 0x0000FFFFL
+#define CM0_CM_ICSC_B_C13_C14__CM_ICSC_B_C14_MASK 0xFFFF0000L
+//CM0_CM_ICSC_B_C21_C22
+#define CM0_CM_ICSC_B_C21_C22__CM_ICSC_B_C21__SHIFT 0x0
+#define CM0_CM_ICSC_B_C21_C22__CM_ICSC_B_C22__SHIFT 0x10
+#define CM0_CM_ICSC_B_C21_C22__CM_ICSC_B_C21_MASK 0x0000FFFFL
+#define CM0_CM_ICSC_B_C21_C22__CM_ICSC_B_C22_MASK 0xFFFF0000L
+//CM0_CM_ICSC_B_C23_C24
+#define CM0_CM_ICSC_B_C23_C24__CM_ICSC_B_C23__SHIFT 0x0
+#define CM0_CM_ICSC_B_C23_C24__CM_ICSC_B_C24__SHIFT 0x10
+#define CM0_CM_ICSC_B_C23_C24__CM_ICSC_B_C23_MASK 0x0000FFFFL
+#define CM0_CM_ICSC_B_C23_C24__CM_ICSC_B_C24_MASK 0xFFFF0000L
+//CM0_CM_ICSC_B_C31_C32
+#define CM0_CM_ICSC_B_C31_C32__CM_ICSC_B_C31__SHIFT 0x0
+#define CM0_CM_ICSC_B_C31_C32__CM_ICSC_B_C32__SHIFT 0x10
+#define CM0_CM_ICSC_B_C31_C32__CM_ICSC_B_C31_MASK 0x0000FFFFL
+#define CM0_CM_ICSC_B_C31_C32__CM_ICSC_B_C32_MASK 0xFFFF0000L
+//CM0_CM_ICSC_B_C33_C34
+#define CM0_CM_ICSC_B_C33_C34__CM_ICSC_B_C33__SHIFT 0x0
+#define CM0_CM_ICSC_B_C33_C34__CM_ICSC_B_C34__SHIFT 0x10
+#define CM0_CM_ICSC_B_C33_C34__CM_ICSC_B_C33_MASK 0x0000FFFFL
+#define CM0_CM_ICSC_B_C33_C34__CM_ICSC_B_C34_MASK 0xFFFF0000L
+//CM0_CM_GAMUT_REMAP_CONTROL
+#define CM0_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE_MASK 0x00000003L
+//CM0_CM_GAMUT_REMAP_C11_C12
+#define CM0_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C11__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C12__SHIFT 0x10
+#define CM0_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C11_MASK 0x0000FFFFL
+#define CM0_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C12_MASK 0xFFFF0000L
+//CM0_CM_GAMUT_REMAP_C13_C14
+#define CM0_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C13__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C14__SHIFT 0x10
+#define CM0_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C13_MASK 0x0000FFFFL
+#define CM0_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C14_MASK 0xFFFF0000L
+//CM0_CM_GAMUT_REMAP_C21_C22
+#define CM0_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C21__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C22__SHIFT 0x10
+#define CM0_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C21_MASK 0x0000FFFFL
+#define CM0_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C22_MASK 0xFFFF0000L
+//CM0_CM_GAMUT_REMAP_C23_C24
+#define CM0_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C23__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C24__SHIFT 0x10
+#define CM0_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C23_MASK 0x0000FFFFL
+#define CM0_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C24_MASK 0xFFFF0000L
+//CM0_CM_GAMUT_REMAP_C31_C32
+#define CM0_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C31__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C32__SHIFT 0x10
+#define CM0_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C31_MASK 0x0000FFFFL
+#define CM0_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C32_MASK 0xFFFF0000L
+//CM0_CM_GAMUT_REMAP_C33_C34
+#define CM0_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C33__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C34__SHIFT 0x10
+#define CM0_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C33_MASK 0x0000FFFFL
+#define CM0_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C34_MASK 0xFFFF0000L
+//CM0_CM_GAMUT_REMAP_B_C11_C12
+#define CM0_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C11__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C12__SHIFT 0x10
+#define CM0_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C11_MASK 0x0000FFFFL
+#define CM0_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C12_MASK 0xFFFF0000L
+//CM0_CM_GAMUT_REMAP_B_C13_C14
+#define CM0_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C13__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C14__SHIFT 0x10
+#define CM0_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C13_MASK 0x0000FFFFL
+#define CM0_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C14_MASK 0xFFFF0000L
+//CM0_CM_GAMUT_REMAP_B_C21_C22
+#define CM0_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C21__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C22__SHIFT 0x10
+#define CM0_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C21_MASK 0x0000FFFFL
+#define CM0_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C22_MASK 0xFFFF0000L
+//CM0_CM_GAMUT_REMAP_B_C23_C24
+#define CM0_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C23__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C24__SHIFT 0x10
+#define CM0_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C23_MASK 0x0000FFFFL
+#define CM0_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C24_MASK 0xFFFF0000L
+//CM0_CM_GAMUT_REMAP_B_C31_C32
+#define CM0_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C31__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C32__SHIFT 0x10
+#define CM0_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C31_MASK 0x0000FFFFL
+#define CM0_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C32_MASK 0xFFFF0000L
+//CM0_CM_GAMUT_REMAP_B_C33_C34
+#define CM0_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C33__SHIFT 0x0
+#define CM0_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C34__SHIFT 0x10
+#define CM0_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C33_MASK 0x0000FFFFL
+#define CM0_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C34_MASK 0xFFFF0000L
+//CM0_CM_BIAS_CR_R
+#define CM0_CM_BIAS_CR_R__CM_BIAS_CR_R__SHIFT 0x0
+#define CM0_CM_BIAS_CR_R__CM_BIAS_CR_R_MASK 0x0000FFFFL
+//CM0_CM_BIAS_Y_G_CB_B
+#define CM0_CM_BIAS_Y_G_CB_B__CM_BIAS_Y_G__SHIFT 0x0
+#define CM0_CM_BIAS_Y_G_CB_B__CM_BIAS_CB_B__SHIFT 0x10
+#define CM0_CM_BIAS_Y_G_CB_B__CM_BIAS_Y_G_MASK 0x0000FFFFL
+#define CM0_CM_BIAS_Y_G_CB_B__CM_BIAS_CB_B_MASK 0xFFFF0000L
+//CM0_CM_DGAM_CONTROL
+#define CM0_CM_DGAM_CONTROL__CM_DGAM_LUT_MODE__SHIFT 0x0
+#define CM0_CM_DGAM_CONTROL__CM_DGAM_LUT_MODE_MASK 0x00000007L
+//CM0_CM_DGAM_LUT_INDEX
+#define CM0_CM_DGAM_LUT_INDEX__CM_DGAM_LUT_INDEX__SHIFT 0x0
+#define CM0_CM_DGAM_LUT_INDEX__CM_DGAM_LUT_INDEX_MASK 0x000001FFL
+//CM0_CM_DGAM_LUT_DATA
+#define CM0_CM_DGAM_LUT_DATA__CM_DGAM_LUT_DATA__SHIFT 0x0
+#define CM0_CM_DGAM_LUT_DATA__CM_DGAM_LUT_DATA_MASK 0x0007FFFFL
+//CM0_CM_DGAM_LUT_WRITE_EN_MASK
+#define CM0_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_LUT_WRITE_EN_MASK__SHIFT 0x0
+#define CM0_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_LUT_WRITE_SEL__SHIFT 0x4
+#define CM0_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_CONFIG_STATUS__SHIFT 0x8
+#define CM0_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_WRITE_LUT_BASE_ONLY__SHIFT 0xc
+#define CM0_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_LUT_WRITE_EN_MASK_MASK 0x00000007L
+#define CM0_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_LUT_WRITE_SEL_MASK 0x00000010L
+#define CM0_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_CONFIG_STATUS_MASK 0x00000700L
+#define CM0_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_WRITE_LUT_BASE_ONLY_MASK 0x00001000L
+//CM0_CM_DGAM_RAMA_START_CNTL_B
+#define CM0_CM_DGAM_RAMA_START_CNTL_B__CM_DGAM_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define CM0_CM_DGAM_RAMA_START_CNTL_B__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM0_CM_DGAM_RAMA_START_CNTL_B__CM_DGAM_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM0_CM_DGAM_RAMA_START_CNTL_B__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM0_CM_DGAM_RAMA_START_CNTL_G
+#define CM0_CM_DGAM_RAMA_START_CNTL_G__CM_DGAM_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define CM0_CM_DGAM_RAMA_START_CNTL_G__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM0_CM_DGAM_RAMA_START_CNTL_G__CM_DGAM_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM0_CM_DGAM_RAMA_START_CNTL_G__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM0_CM_DGAM_RAMA_START_CNTL_R
+#define CM0_CM_DGAM_RAMA_START_CNTL_R__CM_DGAM_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define CM0_CM_DGAM_RAMA_START_CNTL_R__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM0_CM_DGAM_RAMA_START_CNTL_R__CM_DGAM_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM0_CM_DGAM_RAMA_START_CNTL_R__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM0_CM_DGAM_RAMA_SLOPE_CNTL_B
+#define CM0_CM_DGAM_RAMA_SLOPE_CNTL_B__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define CM0_CM_DGAM_RAMA_SLOPE_CNTL_B__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//CM0_CM_DGAM_RAMA_SLOPE_CNTL_G
+#define CM0_CM_DGAM_RAMA_SLOPE_CNTL_G__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define CM0_CM_DGAM_RAMA_SLOPE_CNTL_G__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//CM0_CM_DGAM_RAMA_SLOPE_CNTL_R
+#define CM0_CM_DGAM_RAMA_SLOPE_CNTL_R__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define CM0_CM_DGAM_RAMA_SLOPE_CNTL_R__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//CM0_CM_DGAM_RAMA_END_CNTL1_B
+#define CM0_CM_DGAM_RAMA_END_CNTL1_B__CM_DGAM_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define CM0_CM_DGAM_RAMA_END_CNTL1_B__CM_DGAM_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+//CM0_CM_DGAM_RAMA_END_CNTL2_B
+#define CM0_CM_DGAM_RAMA_END_CNTL2_B__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define CM0_CM_DGAM_RAMA_END_CNTL2_B__CM_DGAM_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define CM0_CM_DGAM_RAMA_END_CNTL2_B__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define CM0_CM_DGAM_RAMA_END_CNTL2_B__CM_DGAM_RAMA_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//CM0_CM_DGAM_RAMA_END_CNTL1_G
+#define CM0_CM_DGAM_RAMA_END_CNTL1_G__CM_DGAM_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define CM0_CM_DGAM_RAMA_END_CNTL1_G__CM_DGAM_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+//CM0_CM_DGAM_RAMA_END_CNTL2_G
+#define CM0_CM_DGAM_RAMA_END_CNTL2_G__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define CM0_CM_DGAM_RAMA_END_CNTL2_G__CM_DGAM_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define CM0_CM_DGAM_RAMA_END_CNTL2_G__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define CM0_CM_DGAM_RAMA_END_CNTL2_G__CM_DGAM_RAMA_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//CM0_CM_DGAM_RAMA_END_CNTL1_R
+#define CM0_CM_DGAM_RAMA_END_CNTL1_R__CM_DGAM_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define CM0_CM_DGAM_RAMA_END_CNTL1_R__CM_DGAM_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+//CM0_CM_DGAM_RAMA_END_CNTL2_R
+#define CM0_CM_DGAM_RAMA_END_CNTL2_R__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define CM0_CM_DGAM_RAMA_END_CNTL2_R__CM_DGAM_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define CM0_CM_DGAM_RAMA_END_CNTL2_R__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define CM0_CM_DGAM_RAMA_END_CNTL2_R__CM_DGAM_RAMA_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//CM0_CM_DGAM_RAMA_REGION_0_1
+#define CM0_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_DGAM_RAMA_REGION_2_3
+#define CM0_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_DGAM_RAMA_REGION_4_5
+#define CM0_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_DGAM_RAMA_REGION_6_7
+#define CM0_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_DGAM_RAMA_REGION_8_9
+#define CM0_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_DGAM_RAMA_REGION_10_11
+#define CM0_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_DGAM_RAMA_REGION_12_13
+#define CM0_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_DGAM_RAMA_REGION_14_15
+#define CM0_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_DGAM_RAMB_START_CNTL_B
+#define CM0_CM_DGAM_RAMB_START_CNTL_B__CM_DGAM_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define CM0_CM_DGAM_RAMB_START_CNTL_B__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM0_CM_DGAM_RAMB_START_CNTL_B__CM_DGAM_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM0_CM_DGAM_RAMB_START_CNTL_B__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM0_CM_DGAM_RAMB_START_CNTL_G
+#define CM0_CM_DGAM_RAMB_START_CNTL_G__CM_DGAM_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define CM0_CM_DGAM_RAMB_START_CNTL_G__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM0_CM_DGAM_RAMB_START_CNTL_G__CM_DGAM_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM0_CM_DGAM_RAMB_START_CNTL_G__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM0_CM_DGAM_RAMB_START_CNTL_R
+#define CM0_CM_DGAM_RAMB_START_CNTL_R__CM_DGAM_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define CM0_CM_DGAM_RAMB_START_CNTL_R__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM0_CM_DGAM_RAMB_START_CNTL_R__CM_DGAM_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM0_CM_DGAM_RAMB_START_CNTL_R__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM0_CM_DGAM_RAMB_SLOPE_CNTL_B
+#define CM0_CM_DGAM_RAMB_SLOPE_CNTL_B__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define CM0_CM_DGAM_RAMB_SLOPE_CNTL_B__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//CM0_CM_DGAM_RAMB_SLOPE_CNTL_G
+#define CM0_CM_DGAM_RAMB_SLOPE_CNTL_G__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define CM0_CM_DGAM_RAMB_SLOPE_CNTL_G__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//CM0_CM_DGAM_RAMB_SLOPE_CNTL_R
+#define CM0_CM_DGAM_RAMB_SLOPE_CNTL_R__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define CM0_CM_DGAM_RAMB_SLOPE_CNTL_R__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//CM0_CM_DGAM_RAMB_END_CNTL1_B
+#define CM0_CM_DGAM_RAMB_END_CNTL1_B__CM_DGAM_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define CM0_CM_DGAM_RAMB_END_CNTL1_B__CM_DGAM_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+//CM0_CM_DGAM_RAMB_END_CNTL2_B
+#define CM0_CM_DGAM_RAMB_END_CNTL2_B__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define CM0_CM_DGAM_RAMB_END_CNTL2_B__CM_DGAM_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define CM0_CM_DGAM_RAMB_END_CNTL2_B__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define CM0_CM_DGAM_RAMB_END_CNTL2_B__CM_DGAM_RAMB_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//CM0_CM_DGAM_RAMB_END_CNTL1_G
+#define CM0_CM_DGAM_RAMB_END_CNTL1_G__CM_DGAM_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define CM0_CM_DGAM_RAMB_END_CNTL1_G__CM_DGAM_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+//CM0_CM_DGAM_RAMB_END_CNTL2_G
+#define CM0_CM_DGAM_RAMB_END_CNTL2_G__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define CM0_CM_DGAM_RAMB_END_CNTL2_G__CM_DGAM_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define CM0_CM_DGAM_RAMB_END_CNTL2_G__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define CM0_CM_DGAM_RAMB_END_CNTL2_G__CM_DGAM_RAMB_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//CM0_CM_DGAM_RAMB_END_CNTL1_R
+#define CM0_CM_DGAM_RAMB_END_CNTL1_R__CM_DGAM_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define CM0_CM_DGAM_RAMB_END_CNTL1_R__CM_DGAM_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+//CM0_CM_DGAM_RAMB_END_CNTL2_R
+#define CM0_CM_DGAM_RAMB_END_CNTL2_R__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define CM0_CM_DGAM_RAMB_END_CNTL2_R__CM_DGAM_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define CM0_CM_DGAM_RAMB_END_CNTL2_R__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define CM0_CM_DGAM_RAMB_END_CNTL2_R__CM_DGAM_RAMB_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//CM0_CM_DGAM_RAMB_REGION_0_1
+#define CM0_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_DGAM_RAMB_REGION_2_3
+#define CM0_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_DGAM_RAMB_REGION_4_5
+#define CM0_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_DGAM_RAMB_REGION_6_7
+#define CM0_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_DGAM_RAMB_REGION_8_9
+#define CM0_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_DGAM_RAMB_REGION_10_11
+#define CM0_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_DGAM_RAMB_REGION_12_13
+#define CM0_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_DGAM_RAMB_REGION_14_15
+#define CM0_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_CONTROL
+#define CM0_CM_BLNDGAM_CONTROL__CM_BLNDGAM_LUT_MODE__SHIFT 0x0
+#define CM0_CM_BLNDGAM_CONTROL__CM_BLNDGAM_LUT_MODE_MASK 0x00000003L
+//CM0_CM_BLNDGAM_LUT_INDEX
+#define CM0_CM_BLNDGAM_LUT_INDEX__CM_BLNDGAM_LUT_INDEX__SHIFT 0x0
+#define CM0_CM_BLNDGAM_LUT_INDEX__CM_BLNDGAM_LUT_INDEX_MASK 0x000001FFL
+//CM0_CM_BLNDGAM_LUT_DATA
+#define CM0_CM_BLNDGAM_LUT_DATA__CM_BLNDGAM_LUT_DATA__SHIFT 0x0
+#define CM0_CM_BLNDGAM_LUT_DATA__CM_BLNDGAM_LUT_DATA_MASK 0x0007FFFFL
+//CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK
+#define CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_LUT_WRITE_EN_MASK__SHIFT 0x0
+#define CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_LUT_WRITE_SEL__SHIFT 0x4
+#define CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_CONFIG_STATUS__SHIFT 0x8
+#define CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_LUT_WRITE_EN_MASK_MASK 0x00000007L
+#define CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_LUT_WRITE_SEL_MASK 0x00000010L
+#define CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_CONFIG_STATUS_MASK 0x00000300L
+//CM0_CM_BLNDGAM_RAMA_START_CNTL_B
+#define CM0_CM_BLNDGAM_RAMA_START_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_START_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM0_CM_BLNDGAM_RAMA_START_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM0_CM_BLNDGAM_RAMA_START_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM0_CM_BLNDGAM_RAMA_START_CNTL_G
+#define CM0_CM_BLNDGAM_RAMA_START_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_START_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM0_CM_BLNDGAM_RAMA_START_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM0_CM_BLNDGAM_RAMA_START_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM0_CM_BLNDGAM_RAMA_START_CNTL_R
+#define CM0_CM_BLNDGAM_RAMA_START_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_START_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM0_CM_BLNDGAM_RAMA_START_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM0_CM_BLNDGAM_RAMA_START_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_B
+#define CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_G
+#define CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_R
+#define CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//CM0_CM_BLNDGAM_RAMA_END_CNTL1_B
+#define CM0_CM_BLNDGAM_RAMA_END_CNTL1_B__CM_BLNDGAM_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_END_CNTL1_B__CM_BLNDGAM_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+//CM0_CM_BLNDGAM_RAMA_END_CNTL2_B
+#define CM0_CM_BLNDGAM_RAMA_END_CNTL2_B__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_END_CNTL2_B__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMA_END_CNTL2_B__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define CM0_CM_BLNDGAM_RAMA_END_CNTL2_B__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//CM0_CM_BLNDGAM_RAMA_END_CNTL1_G
+#define CM0_CM_BLNDGAM_RAMA_END_CNTL1_G__CM_BLNDGAM_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_END_CNTL1_G__CM_BLNDGAM_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+//CM0_CM_BLNDGAM_RAMA_END_CNTL2_G
+#define CM0_CM_BLNDGAM_RAMA_END_CNTL2_G__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_END_CNTL2_G__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMA_END_CNTL2_G__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define CM0_CM_BLNDGAM_RAMA_END_CNTL2_G__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//CM0_CM_BLNDGAM_RAMA_END_CNTL1_R
+#define CM0_CM_BLNDGAM_RAMA_END_CNTL1_R__CM_BLNDGAM_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_END_CNTL1_R__CM_BLNDGAM_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+//CM0_CM_BLNDGAM_RAMA_END_CNTL2_R
+#define CM0_CM_BLNDGAM_RAMA_END_CNTL2_R__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_END_CNTL2_R__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMA_END_CNTL2_R__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define CM0_CM_BLNDGAM_RAMA_END_CNTL2_R__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//CM0_CM_BLNDGAM_RAMA_REGION_0_1
+#define CM0_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMA_REGION_2_3
+#define CM0_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMA_REGION_4_5
+#define CM0_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMA_REGION_6_7
+#define CM0_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMA_REGION_8_9
+#define CM0_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMA_REGION_10_11
+#define CM0_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMA_REGION_12_13
+#define CM0_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMA_REGION_14_15
+#define CM0_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMA_REGION_16_17
+#define CM0_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMA_REGION_18_19
+#define CM0_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMA_REGION_20_21
+#define CM0_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMA_REGION_22_23
+#define CM0_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMA_REGION_24_25
+#define CM0_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMA_REGION_26_27
+#define CM0_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMA_REGION_28_29
+#define CM0_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMA_REGION_30_31
+#define CM0_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMA_REGION_32_33
+#define CM0_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMB_START_CNTL_B
+#define CM0_CM_BLNDGAM_RAMB_START_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_START_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM0_CM_BLNDGAM_RAMB_START_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM0_CM_BLNDGAM_RAMB_START_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM0_CM_BLNDGAM_RAMB_START_CNTL_G
+#define CM0_CM_BLNDGAM_RAMB_START_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_START_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM0_CM_BLNDGAM_RAMB_START_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM0_CM_BLNDGAM_RAMB_START_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM0_CM_BLNDGAM_RAMB_START_CNTL_R
+#define CM0_CM_BLNDGAM_RAMB_START_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_START_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM0_CM_BLNDGAM_RAMB_START_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM0_CM_BLNDGAM_RAMB_START_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_B
+#define CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_G
+#define CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_R
+#define CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//CM0_CM_BLNDGAM_RAMB_END_CNTL1_B
+#define CM0_CM_BLNDGAM_RAMB_END_CNTL1_B__CM_BLNDGAM_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_END_CNTL1_B__CM_BLNDGAM_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+//CM0_CM_BLNDGAM_RAMB_END_CNTL2_B
+#define CM0_CM_BLNDGAM_RAMB_END_CNTL2_B__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_END_CNTL2_B__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMB_END_CNTL2_B__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define CM0_CM_BLNDGAM_RAMB_END_CNTL2_B__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//CM0_CM_BLNDGAM_RAMB_END_CNTL1_G
+#define CM0_CM_BLNDGAM_RAMB_END_CNTL1_G__CM_BLNDGAM_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_END_CNTL1_G__CM_BLNDGAM_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+//CM0_CM_BLNDGAM_RAMB_END_CNTL2_G
+#define CM0_CM_BLNDGAM_RAMB_END_CNTL2_G__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_END_CNTL2_G__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMB_END_CNTL2_G__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define CM0_CM_BLNDGAM_RAMB_END_CNTL2_G__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//CM0_CM_BLNDGAM_RAMB_END_CNTL1_R
+#define CM0_CM_BLNDGAM_RAMB_END_CNTL1_R__CM_BLNDGAM_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_END_CNTL1_R__CM_BLNDGAM_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+//CM0_CM_BLNDGAM_RAMB_END_CNTL2_R
+#define CM0_CM_BLNDGAM_RAMB_END_CNTL2_R__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_END_CNTL2_R__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMB_END_CNTL2_R__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define CM0_CM_BLNDGAM_RAMB_END_CNTL2_R__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//CM0_CM_BLNDGAM_RAMB_REGION_0_1
+#define CM0_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMB_REGION_2_3
+#define CM0_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMB_REGION_4_5
+#define CM0_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMB_REGION_6_7
+#define CM0_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMB_REGION_8_9
+#define CM0_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMB_REGION_10_11
+#define CM0_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMB_REGION_12_13
+#define CM0_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMB_REGION_14_15
+#define CM0_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMB_REGION_16_17
+#define CM0_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMB_REGION_18_19
+#define CM0_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMB_REGION_20_21
+#define CM0_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMB_REGION_22_23
+#define CM0_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMB_REGION_24_25
+#define CM0_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMB_REGION_26_27
+#define CM0_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMB_REGION_28_29
+#define CM0_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMB_REGION_30_31
+#define CM0_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_BLNDGAM_RAMB_REGION_32_33
+#define CM0_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_HDR_MULT_COEF
+#define CM0_CM_HDR_MULT_COEF__CM_HDR_MULT_COEF__SHIFT 0x0
+#define CM0_CM_HDR_MULT_COEF__CM_HDR_MULT_COEF_MASK 0x0007FFFFL
+//CM0_CM_MEM_PWR_CTRL
+#define CM0_CM_MEM_PWR_CTRL__SHARED_MEM_PWR_FORCE__SHIFT 0x0
+#define CM0_CM_MEM_PWR_CTRL__SHARED_MEM_PWR_DIS__SHIFT 0x2
+#define CM0_CM_MEM_PWR_CTRL__BLNDGAM_MEM_PWR_FORCE__SHIFT 0x4
+#define CM0_CM_MEM_PWR_CTRL__BLNDGAM_MEM_PWR_DIS__SHIFT 0x6
+#define CM0_CM_MEM_PWR_CTRL__SHARED_MEM_PWR_FORCE_MASK 0x00000003L
+#define CM0_CM_MEM_PWR_CTRL__SHARED_MEM_PWR_DIS_MASK 0x00000004L
+#define CM0_CM_MEM_PWR_CTRL__BLNDGAM_MEM_PWR_FORCE_MASK 0x00000030L
+#define CM0_CM_MEM_PWR_CTRL__BLNDGAM_MEM_PWR_DIS_MASK 0x00000040L
+//CM0_CM_MEM_PWR_STATUS
+#define CM0_CM_MEM_PWR_STATUS__SHARED_MEM_PWR_STATE__SHIFT 0x0
+#define CM0_CM_MEM_PWR_STATUS__BLNDGAM_MEM_PWR_STATE__SHIFT 0x2
+#define CM0_CM_MEM_PWR_STATUS__SHARED_MEM_PWR_STATE_MASK 0x00000003L
+#define CM0_CM_MEM_PWR_STATUS__BLNDGAM_MEM_PWR_STATE_MASK 0x0000000CL
+//CM0_CM_DEALPHA
+#define CM0_CM_DEALPHA__CM_DEALPHA_EN__SHIFT 0x0
+#define CM0_CM_DEALPHA__CM_DEALPHA_EN_MASK 0x00000001L
+//CM0_CM_COEF_FORMAT
+#define CM0_CM_COEF_FORMAT__CM_BIAS_FORMAT__SHIFT 0x0
+#define CM0_CM_COEF_FORMAT__CM_ICSC_COEF_FORMAT__SHIFT 0x4
+#define CM0_CM_COEF_FORMAT__CM_GAMUT_REMAP_COEF_FORMAT__SHIFT 0x8
+#define CM0_CM_COEF_FORMAT__CM_BIAS_FORMAT_MASK 0x00000001L
+#define CM0_CM_COEF_FORMAT__CM_ICSC_COEF_FORMAT_MASK 0x00000010L
+#define CM0_CM_COEF_FORMAT__CM_GAMUT_REMAP_COEF_FORMAT_MASK 0x00000100L
+//CM0_CM_SHAPER_CONTROL
+#define CM0_CM_SHAPER_CONTROL__CM_SHAPER_LUT_MODE__SHIFT 0x0
+#define CM0_CM_SHAPER_CONTROL__CM_SHAPER_LUT_MODE_MASK 0x00000003L
+//CM0_CM_SHAPER_OFFSET_R
+#define CM0_CM_SHAPER_OFFSET_R__CM_SHAPER_OFFSET_R__SHIFT 0x0
+#define CM0_CM_SHAPER_OFFSET_R__CM_SHAPER_OFFSET_R_MASK 0x0007FFFFL
+//CM0_CM_SHAPER_OFFSET_G
+#define CM0_CM_SHAPER_OFFSET_G__CM_SHAPER_OFFSET_G__SHIFT 0x0
+#define CM0_CM_SHAPER_OFFSET_G__CM_SHAPER_OFFSET_G_MASK 0x0007FFFFL
+//CM0_CM_SHAPER_OFFSET_B
+#define CM0_CM_SHAPER_OFFSET_B__CM_SHAPER_OFFSET_B__SHIFT 0x0
+#define CM0_CM_SHAPER_OFFSET_B__CM_SHAPER_OFFSET_B_MASK 0x0007FFFFL
+//CM0_CM_SHAPER_SCALE_R
+#define CM0_CM_SHAPER_SCALE_R__CM_SHAPER_SCALE_R__SHIFT 0x0
+#define CM0_CM_SHAPER_SCALE_R__CM_SHAPER_SCALE_R_MASK 0x0000FFFFL
+//CM0_CM_SHAPER_SCALE_G_B
+#define CM0_CM_SHAPER_SCALE_G_B__CM_SHAPER_SCALE_G__SHIFT 0x0
+#define CM0_CM_SHAPER_SCALE_G_B__CM_SHAPER_SCALE_B__SHIFT 0x10
+#define CM0_CM_SHAPER_SCALE_G_B__CM_SHAPER_SCALE_G_MASK 0x0000FFFFL
+#define CM0_CM_SHAPER_SCALE_G_B__CM_SHAPER_SCALE_B_MASK 0xFFFF0000L
+//CM0_CM_SHAPER_LUT_INDEX
+#define CM0_CM_SHAPER_LUT_INDEX__CM_SHAPER_LUT_INDEX__SHIFT 0x0
+#define CM0_CM_SHAPER_LUT_INDEX__CM_SHAPER_LUT_INDEX_MASK 0x000000FFL
+//CM0_CM_SHAPER_LUT_DATA
+#define CM0_CM_SHAPER_LUT_DATA__CM_SHAPER_LUT_DATA__SHIFT 0x0
+#define CM0_CM_SHAPER_LUT_DATA__CM_SHAPER_LUT_DATA_MASK 0x00FFFFFFL
+//CM0_CM_SHAPER_LUT_WRITE_EN_MASK
+#define CM0_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_LUT_WRITE_EN_MASK__SHIFT 0x0
+#define CM0_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_LUT_WRITE_SEL__SHIFT 0x4
+#define CM0_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_CONFIG_STATUS__SHIFT 0x8
+#define CM0_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_LUT_WRITE_EN_MASK_MASK 0x00000007L
+#define CM0_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_LUT_WRITE_SEL_MASK 0x00000010L
+#define CM0_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_CONFIG_STATUS_MASK 0x00000300L
+//CM0_CM_SHAPER_RAMA_START_CNTL_B
+#define CM0_CM_SHAPER_RAMA_START_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMA_START_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM0_CM_SHAPER_RAMA_START_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM0_CM_SHAPER_RAMA_START_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM0_CM_SHAPER_RAMA_START_CNTL_G
+#define CM0_CM_SHAPER_RAMA_START_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMA_START_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM0_CM_SHAPER_RAMA_START_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM0_CM_SHAPER_RAMA_START_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM0_CM_SHAPER_RAMA_START_CNTL_R
+#define CM0_CM_SHAPER_RAMA_START_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMA_START_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM0_CM_SHAPER_RAMA_START_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM0_CM_SHAPER_RAMA_START_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM0_CM_SHAPER_RAMA_END_CNTL_B
+#define CM0_CM_SHAPER_RAMA_END_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMA_END_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMA_END_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define CM0_CM_SHAPER_RAMA_END_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_END_BASE_B_MASK 0x3FFF0000L
+//CM0_CM_SHAPER_RAMA_END_CNTL_G
+#define CM0_CM_SHAPER_RAMA_END_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMA_END_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMA_END_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define CM0_CM_SHAPER_RAMA_END_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_END_BASE_G_MASK 0x3FFF0000L
+//CM0_CM_SHAPER_RAMA_END_CNTL_R
+#define CM0_CM_SHAPER_RAMA_END_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMA_END_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMA_END_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define CM0_CM_SHAPER_RAMA_END_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_END_BASE_R_MASK 0x3FFF0000L
+//CM0_CM_SHAPER_RAMA_REGION_0_1
+#define CM0_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMA_REGION_2_3
+#define CM0_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMA_REGION_4_5
+#define CM0_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMA_REGION_6_7
+#define CM0_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMA_REGION_8_9
+#define CM0_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMA_REGION_10_11
+#define CM0_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMA_REGION_12_13
+#define CM0_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMA_REGION_14_15
+#define CM0_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMA_REGION_16_17
+#define CM0_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMA_REGION_18_19
+#define CM0_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMA_REGION_20_21
+#define CM0_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMA_REGION_22_23
+#define CM0_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMA_REGION_24_25
+#define CM0_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMA_REGION_26_27
+#define CM0_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMA_REGION_28_29
+#define CM0_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMA_REGION_30_31
+#define CM0_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMA_REGION_32_33
+#define CM0_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMB_START_CNTL_B
+#define CM0_CM_SHAPER_RAMB_START_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMB_START_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM0_CM_SHAPER_RAMB_START_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM0_CM_SHAPER_RAMB_START_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM0_CM_SHAPER_RAMB_START_CNTL_G
+#define CM0_CM_SHAPER_RAMB_START_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMB_START_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM0_CM_SHAPER_RAMB_START_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM0_CM_SHAPER_RAMB_START_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM0_CM_SHAPER_RAMB_START_CNTL_R
+#define CM0_CM_SHAPER_RAMB_START_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMB_START_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM0_CM_SHAPER_RAMB_START_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM0_CM_SHAPER_RAMB_START_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM0_CM_SHAPER_RAMB_END_CNTL_B
+#define CM0_CM_SHAPER_RAMB_END_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMB_END_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMB_END_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define CM0_CM_SHAPER_RAMB_END_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_END_BASE_B_MASK 0x3FFF0000L
+//CM0_CM_SHAPER_RAMB_END_CNTL_G
+#define CM0_CM_SHAPER_RAMB_END_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMB_END_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMB_END_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define CM0_CM_SHAPER_RAMB_END_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_END_BASE_G_MASK 0x3FFF0000L
+//CM0_CM_SHAPER_RAMB_END_CNTL_R
+#define CM0_CM_SHAPER_RAMB_END_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMB_END_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMB_END_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define CM0_CM_SHAPER_RAMB_END_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_END_BASE_R_MASK 0x3FFF0000L
+//CM0_CM_SHAPER_RAMB_REGION_0_1
+#define CM0_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMB_REGION_2_3
+#define CM0_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMB_REGION_4_5
+#define CM0_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMB_REGION_6_7
+#define CM0_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMB_REGION_8_9
+#define CM0_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMB_REGION_10_11
+#define CM0_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMB_REGION_12_13
+#define CM0_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMB_REGION_14_15
+#define CM0_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMB_REGION_16_17
+#define CM0_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMB_REGION_18_19
+#define CM0_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMB_REGION_20_21
+#define CM0_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMB_REGION_22_23
+#define CM0_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMB_REGION_24_25
+#define CM0_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMB_REGION_26_27
+#define CM0_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMB_REGION_28_29
+#define CM0_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMB_REGION_30_31
+#define CM0_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_SHAPER_RAMB_REGION_32_33
+#define CM0_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define CM0_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define CM0_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define CM0_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define CM0_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define CM0_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define CM0_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define CM0_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//CM0_CM_MEM_PWR_CTRL2
+#define CM0_CM_MEM_PWR_CTRL2__SHAPER_MEM_PWR_FORCE__SHIFT 0x8
+#define CM0_CM_MEM_PWR_CTRL2__SHAPER_MEM_PWR_DIS__SHIFT 0xa
+#define CM0_CM_MEM_PWR_CTRL2__HDR3DLUT_MEM_PWR_FORCE__SHIFT 0xc
+#define CM0_CM_MEM_PWR_CTRL2__HDR3DLUT_MEM_PWR_DIS__SHIFT 0xe
+#define CM0_CM_MEM_PWR_CTRL2__SHAPER_MEM_PWR_FORCE_MASK 0x00000300L
+#define CM0_CM_MEM_PWR_CTRL2__SHAPER_MEM_PWR_DIS_MASK 0x00000400L
+#define CM0_CM_MEM_PWR_CTRL2__HDR3DLUT_MEM_PWR_FORCE_MASK 0x00003000L
+#define CM0_CM_MEM_PWR_CTRL2__HDR3DLUT_MEM_PWR_DIS_MASK 0x00004000L
+//CM0_CM_MEM_PWR_STATUS2
+#define CM0_CM_MEM_PWR_STATUS2__SHAPER_MEM_PWR_STATE__SHIFT 0x4
+#define CM0_CM_MEM_PWR_STATUS2__HDR3DLUT_MEM_PWR_STATE__SHIFT 0x6
+#define CM0_CM_MEM_PWR_STATUS2__SHAPER_MEM_PWR_STATE_MASK 0x00000030L
+#define CM0_CM_MEM_PWR_STATUS2__HDR3DLUT_MEM_PWR_STATE_MASK 0x000000C0L
+//CM0_CM_3DLUT_MODE
+#define CM0_CM_3DLUT_MODE__CM_3DLUT_MODE__SHIFT 0x0
+#define CM0_CM_3DLUT_MODE__CM_3DLUT_SIZE__SHIFT 0x4
+#define CM0_CM_3DLUT_MODE__CM_3DLUT_MODE_MASK 0x00000003L
+#define CM0_CM_3DLUT_MODE__CM_3DLUT_SIZE_MASK 0x00000010L
+//CM0_CM_3DLUT_INDEX
+#define CM0_CM_3DLUT_INDEX__CM_3DLUT_INDEX__SHIFT 0x0
+#define CM0_CM_3DLUT_INDEX__CM_3DLUT_INDEX_MASK 0x000007FFL
+//CM0_CM_3DLUT_DATA
+#define CM0_CM_3DLUT_DATA__CM_3DLUT_DATA0__SHIFT 0x0
+#define CM0_CM_3DLUT_DATA__CM_3DLUT_DATA1__SHIFT 0x10
+#define CM0_CM_3DLUT_DATA__CM_3DLUT_DATA0_MASK 0x0000FFFFL
+#define CM0_CM_3DLUT_DATA__CM_3DLUT_DATA1_MASK 0xFFFF0000L
+//CM0_CM_3DLUT_DATA_30BIT
+#define CM0_CM_3DLUT_DATA_30BIT__CM_3DLUT_DATA_30BIT__SHIFT 0x2
+#define CM0_CM_3DLUT_DATA_30BIT__CM_3DLUT_DATA_30BIT_MASK 0xFFFFFFFCL
+//CM0_CM_3DLUT_READ_WRITE_CONTROL
+#define CM0_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_WRITE_EN_MASK__SHIFT 0x0
+#define CM0_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_RAM_SEL__SHIFT 0x4
+#define CM0_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_30BIT_EN__SHIFT 0x8
+#define CM0_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_CONFIG_STATUS__SHIFT 0xc
+#define CM0_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_READ_SEL__SHIFT 0x10
+#define CM0_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_WRITE_EN_MASK_MASK 0x0000000FL
+#define CM0_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_RAM_SEL_MASK 0x00000010L
+#define CM0_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_30BIT_EN_MASK 0x00000100L
+#define CM0_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_CONFIG_STATUS_MASK 0x00003000L
+#define CM0_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_READ_SEL_MASK 0x00030000L
+//CM0_CM_3DLUT_OUT_NORM_FACTOR
+#define CM0_CM_3DLUT_OUT_NORM_FACTOR__CM_3DLUT_OUT_NORM_FACTOR__SHIFT 0x0
+#define CM0_CM_3DLUT_OUT_NORM_FACTOR__CM_3DLUT_OUT_NORM_FACTOR_MASK 0x0000FFFFL
+//CM0_CM_3DLUT_OUT_OFFSET_R
+#define CM0_CM_3DLUT_OUT_OFFSET_R__CM_3DLUT_OUT_OFFSET_R__SHIFT 0x0
+#define CM0_CM_3DLUT_OUT_OFFSET_R__CM_3DLUT_OUT_SCALE_R__SHIFT 0x10
+#define CM0_CM_3DLUT_OUT_OFFSET_R__CM_3DLUT_OUT_OFFSET_R_MASK 0x0000FFFFL
+#define CM0_CM_3DLUT_OUT_OFFSET_R__CM_3DLUT_OUT_SCALE_R_MASK 0xFFFF0000L
+//CM0_CM_3DLUT_OUT_OFFSET_G
+#define CM0_CM_3DLUT_OUT_OFFSET_G__CM_3DLUT_OUT_OFFSET_G__SHIFT 0x0
+#define CM0_CM_3DLUT_OUT_OFFSET_G__CM_3DLUT_OUT_SCALE_G__SHIFT 0x10
+#define CM0_CM_3DLUT_OUT_OFFSET_G__CM_3DLUT_OUT_OFFSET_G_MASK 0x0000FFFFL
+#define CM0_CM_3DLUT_OUT_OFFSET_G__CM_3DLUT_OUT_SCALE_G_MASK 0xFFFF0000L
+//CM0_CM_3DLUT_OUT_OFFSET_B
+#define CM0_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_OFFSET_B__SHIFT 0x0
+#define CM0_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_SCALE_B__SHIFT 0x10
+#define CM0_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_OFFSET_B_MASK 0x0000FFFFL
+#define CM0_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_SCALE_B_MASK 0xFFFF0000L
+//CM0_CM_TEST_DEBUG_INDEX
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX__SHIFT 0x0
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX_MASK 0x000000FFL
+#define CM0_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+//CM0_CM_TEST_DEBUG_DATA
+#define CM0_CM_TEST_DEBUG_DATA__CM_TEST_DEBUG_DATA__SHIFT 0x0
+#define CM0_CM_TEST_DEBUG_DATA__CM_TEST_DEBUG_DATA_MASK 0xFFFFFFFFL
+// addressBlock: dce_dc_dpp1_dispdec_dpp_top_dispdec
+//DPP_TOP1_DPP_CONTROL
+#define DPP_TOP1_DPP_CONTROL__DPP_CLOCK_ENABLE__SHIFT 0x4
+#define DPP_TOP1_DPP_CONTROL__DPPCLK_G_GATE_DISABLE__SHIFT 0x8
+#define DPP_TOP1_DPP_CONTROL__DPPCLK_G_DYN_GATE_DISABLE__SHIFT 0xa
+#define DPP_TOP1_DPP_CONTROL__DPPCLK_G_DSCL_GATE_DISABLE__SHIFT 0xc
+#define DPP_TOP1_DPP_CONTROL__DPPCLK_G_DSCL_ALPHA_GATE_DISABLE__SHIFT 0xe
+#define DPP_TOP1_DPP_CONTROL__DPPCLK_R_GATE_DISABLE__SHIFT 0x10
+#define DPP_TOP1_DPP_CONTROL__DISPCLK_R_GATE_DISABLE__SHIFT 0x12
+#define DPP_TOP1_DPP_CONTROL__DISPCLK_G_GATE_DISABLE__SHIFT 0x14
+#define DPP_TOP1_DPP_CONTROL__DPP_CLOCK_ENABLE_MASK 0x00000010L
+#define DPP_TOP1_DPP_CONTROL__DPPCLK_G_GATE_DISABLE_MASK 0x00000100L
+#define DPP_TOP1_DPP_CONTROL__DPPCLK_G_DYN_GATE_DISABLE_MASK 0x00000400L
+#define DPP_TOP1_DPP_CONTROL__DPPCLK_G_DSCL_GATE_DISABLE_MASK 0x00001000L
+#define DPP_TOP1_DPP_CONTROL__DPPCLK_G_DSCL_ALPHA_GATE_DISABLE_MASK 0x00004000L
+#define DPP_TOP1_DPP_CONTROL__DPPCLK_R_GATE_DISABLE_MASK 0x00010000L
+#define DPP_TOP1_DPP_CONTROL__DISPCLK_R_GATE_DISABLE_MASK 0x00040000L
+#define DPP_TOP1_DPP_CONTROL__DISPCLK_G_GATE_DISABLE_MASK 0x00100000L
+//DPP_TOP1_DPP_SOFT_RESET
+#define DPP_TOP1_DPP_SOFT_RESET__CNVC_SOFT_RESET__SHIFT 0x0
+#define DPP_TOP1_DPP_SOFT_RESET__DSCL_SOFT_RESET__SHIFT 0x4
+#define DPP_TOP1_DPP_SOFT_RESET__CM_SOFT_RESET__SHIFT 0x8
+#define DPP_TOP1_DPP_SOFT_RESET__OBUF_SOFT_RESET__SHIFT 0xc
+#define DPP_TOP1_DPP_SOFT_RESET__CNVC_SOFT_RESET_MASK 0x00000001L
+#define DPP_TOP1_DPP_SOFT_RESET__DSCL_SOFT_RESET_MASK 0x00000010L
+#define DPP_TOP1_DPP_SOFT_RESET__CM_SOFT_RESET_MASK 0x00000100L
+#define DPP_TOP1_DPP_SOFT_RESET__OBUF_SOFT_RESET_MASK 0x00001000L
+//DPP_TOP1_DPP_CRC_VAL_R_G
+#define DPP_TOP1_DPP_CRC_VAL_R_G__DPP_CRC_R_CR__SHIFT 0x0
+#define DPP_TOP1_DPP_CRC_VAL_R_G__DPP_CRC_G_Y__SHIFT 0x10
+#define DPP_TOP1_DPP_CRC_VAL_R_G__DPP_CRC_R_CR_MASK 0x0000FFFFL
+#define DPP_TOP1_DPP_CRC_VAL_R_G__DPP_CRC_G_Y_MASK 0xFFFF0000L
+//DPP_TOP1_DPP_CRC_VAL_B_A
+#define DPP_TOP1_DPP_CRC_VAL_B_A__DPP_CRC_B_CB__SHIFT 0x0
+#define DPP_TOP1_DPP_CRC_VAL_B_A__DPP_CRC_ALPHA__SHIFT 0x10
+#define DPP_TOP1_DPP_CRC_VAL_B_A__DPP_CRC_B_CB_MASK 0x0000FFFFL
+#define DPP_TOP1_DPP_CRC_VAL_B_A__DPP_CRC_ALPHA_MASK 0xFFFF0000L
+//DPP_TOP1_DPP_CRC_CTRL
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_EN__SHIFT 0x0
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_CONT_EN__SHIFT 0x1
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_ONE_SHOT_PENDING__SHIFT 0x2
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_420_COMP_SEL__SHIFT 0x3
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_SRC_SEL__SHIFT 0x4
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_CURSOR_BITS_SEL__SHIFT 0x6
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_STEREO_EN__SHIFT 0x7
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_STEREO_MODE__SHIFT 0x8
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_INTERLACE_MODE__SHIFT 0xa
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_PIX_FORMAT_SEL__SHIFT 0xc
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_CURSOR_FORMAT_SEL__SHIFT 0xf
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_MASK__SHIFT 0x10
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_EN_MASK 0x00000001L
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_CONT_EN_MASK 0x00000002L
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_ONE_SHOT_PENDING_MASK 0x00000004L
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_420_COMP_SEL_MASK 0x00000008L
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_SRC_SEL_MASK 0x00000030L
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_CURSOR_BITS_SEL_MASK 0x00000040L
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_STEREO_EN_MASK 0x00000080L
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_STEREO_MODE_MASK 0x00000300L
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_INTERLACE_MODE_MASK 0x00000C00L
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_PIX_FORMAT_SEL_MASK 0x00007000L
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_CURSOR_FORMAT_SEL_MASK 0x00008000L
+#define DPP_TOP1_DPP_CRC_CTRL__DPP_CRC_MASK_MASK 0xFFFF0000L
+//DPP_TOP1_HOST_READ_CONTROL
+#define DPP_TOP1_HOST_READ_CONTROL__HOST_READ_RATE_CONTROL__SHIFT 0x0
+#define DPP_TOP1_HOST_READ_CONTROL__HOST_READ_RATE_CONTROL_MASK 0x000000FFL
+
+
+// addressBlock: dce_dc_dpp1_dispdec_cnvc_cfg_dispdec
+//CNVC_CFG1_CNVC_SURFACE_PIXEL_FORMAT
+#define CNVC_CFG1_CNVC_SURFACE_PIXEL_FORMAT__CNVC_SURFACE_PIXEL_FORMAT__SHIFT 0x0
+#define CNVC_CFG1_CNVC_SURFACE_PIXEL_FORMAT__CNVC_SURFACE_PIXEL_FORMAT_MASK 0x0000007FL
+//CNVC_CFG1_FORMAT_CONTROL
+#define CNVC_CFG1_FORMAT_CONTROL__FORMAT_EXPANSION_MODE__SHIFT 0x0
+#define CNVC_CFG1_FORMAT_CONTROL__FORMAT_CNV16__SHIFT 0x4
+#define CNVC_CFG1_FORMAT_CONTROL__ALPHA_EN__SHIFT 0x8
+#define CNVC_CFG1_FORMAT_CONTROL__CNVC_BYPASS__SHIFT 0xc
+#define CNVC_CFG1_FORMAT_CONTROL__CNVC_BYPASS_MSB_ALIGN__SHIFT 0xd
+#define CNVC_CFG1_FORMAT_CONTROL__CLAMP_POSITIVE__SHIFT 0x10
+#define CNVC_CFG1_FORMAT_CONTROL__CLAMP_POSITIVE_C__SHIFT 0x11
+#define CNVC_CFG1_FORMAT_CONTROL__CNVC_UPDATE_PENDING__SHIFT 0x14
+#define CNVC_CFG1_FORMAT_CONTROL__FORMAT_EXPANSION_MODE_MASK 0x00000001L
+#define CNVC_CFG1_FORMAT_CONTROL__FORMAT_CNV16_MASK 0x00000010L
+#define CNVC_CFG1_FORMAT_CONTROL__ALPHA_EN_MASK 0x00000100L
+#define CNVC_CFG1_FORMAT_CONTROL__CNVC_BYPASS_MASK 0x00001000L
+#define CNVC_CFG1_FORMAT_CONTROL__CNVC_BYPASS_MSB_ALIGN_MASK 0x00002000L
+#define CNVC_CFG1_FORMAT_CONTROL__CLAMP_POSITIVE_MASK 0x00010000L
+#define CNVC_CFG1_FORMAT_CONTROL__CLAMP_POSITIVE_C_MASK 0x00020000L
+#define CNVC_CFG1_FORMAT_CONTROL__CNVC_UPDATE_PENDING_MASK 0x00100000L
+//CNVC_CFG1_FCNV_FP_BIAS_R
+#define CNVC_CFG1_FCNV_FP_BIAS_R__FCNV_FP_BIAS_R__SHIFT 0x0
+#define CNVC_CFG1_FCNV_FP_BIAS_R__FCNV_FP_BIAS_R_MASK 0x0007FFFFL
+//CNVC_CFG1_FCNV_FP_BIAS_G
+#define CNVC_CFG1_FCNV_FP_BIAS_G__FCNV_FP_BIAS_G__SHIFT 0x0
+#define CNVC_CFG1_FCNV_FP_BIAS_G__FCNV_FP_BIAS_G_MASK 0x0007FFFFL
+//CNVC_CFG1_FCNV_FP_BIAS_B
+#define CNVC_CFG1_FCNV_FP_BIAS_B__FCNV_FP_BIAS_B__SHIFT 0x0
+#define CNVC_CFG1_FCNV_FP_BIAS_B__FCNV_FP_BIAS_B_MASK 0x0007FFFFL
+//CNVC_CFG1_FCNV_FP_SCALE_R
+#define CNVC_CFG1_FCNV_FP_SCALE_R__FCNV_FP_SCALE_R__SHIFT 0x0
+#define CNVC_CFG1_FCNV_FP_SCALE_R__FCNV_FP_SCALE_R_MASK 0x0007FFFFL
+//CNVC_CFG1_FCNV_FP_SCALE_G
+#define CNVC_CFG1_FCNV_FP_SCALE_G__FCNV_FP_SCALE_G__SHIFT 0x0
+#define CNVC_CFG1_FCNV_FP_SCALE_G__FCNV_FP_SCALE_G_MASK 0x0007FFFFL
+//CNVC_CFG1_FCNV_FP_SCALE_B
+#define CNVC_CFG1_FCNV_FP_SCALE_B__FCNV_FP_SCALE_B__SHIFT 0x0
+#define CNVC_CFG1_FCNV_FP_SCALE_B__FCNV_FP_SCALE_B_MASK 0x0007FFFFL
+//CNVC_CFG1_COLOR_KEYER_CONTROL
+#define CNVC_CFG1_COLOR_KEYER_CONTROL__COLOR_KEYER_EN__SHIFT 0x0
+#define CNVC_CFG1_COLOR_KEYER_CONTROL__COLOR_KEYER_MODE__SHIFT 0x4
+#define CNVC_CFG1_COLOR_KEYER_CONTROL__COLOR_KEYER_EN_MASK 0x00000001L
+#define CNVC_CFG1_COLOR_KEYER_CONTROL__COLOR_KEYER_MODE_MASK 0x00000030L
+//CNVC_CFG1_COLOR_KEYER_ALPHA
+#define CNVC_CFG1_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_LOW__SHIFT 0x0
+#define CNVC_CFG1_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_HIGH__SHIFT 0x10
+#define CNVC_CFG1_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG1_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG1_COLOR_KEYER_RED
+#define CNVC_CFG1_COLOR_KEYER_RED__COLOR_KEYER_RED_LOW__SHIFT 0x0
+#define CNVC_CFG1_COLOR_KEYER_RED__COLOR_KEYER_RED_HIGH__SHIFT 0x10
+#define CNVC_CFG1_COLOR_KEYER_RED__COLOR_KEYER_RED_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG1_COLOR_KEYER_RED__COLOR_KEYER_RED_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG1_COLOR_KEYER_GREEN
+#define CNVC_CFG1_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_LOW__SHIFT 0x0
+#define CNVC_CFG1_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_HIGH__SHIFT 0x10
+#define CNVC_CFG1_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG1_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG1_COLOR_KEYER_BLUE
+#define CNVC_CFG1_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_LOW__SHIFT 0x0
+#define CNVC_CFG1_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_HIGH__SHIFT 0x10
+#define CNVC_CFG1_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG1_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG1_ALPHA_2BIT_LUT
+#define CNVC_CFG1_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT0__SHIFT 0x0
+#define CNVC_CFG1_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT1__SHIFT 0x8
+#define CNVC_CFG1_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT2__SHIFT 0x10
+#define CNVC_CFG1_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT3__SHIFT 0x18
+#define CNVC_CFG1_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT0_MASK 0x000000FFL
+#define CNVC_CFG1_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT1_MASK 0x0000FF00L
+#define CNVC_CFG1_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT2_MASK 0x00FF0000L
+#define CNVC_CFG1_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT3_MASK 0xFF000000L
+
+
+// addressBlock: dce_dc_dpp1_dispdec_cnvc_cur_dispdec
+//CNVC_CUR1_CURSOR0_CONTROL
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_ENABLE__SHIFT 0x0
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_EXPANSION_MODE__SHIFT 0x1
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_PIX_INV_MODE__SHIFT 0x2
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_ROM_EN__SHIFT 0x3
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_MODE__SHIFT 0x4
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_PIXEL_ALPHA_MOD_EN__SHIFT 0x7
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_UPDATE_PENDING__SHIFT 0x10
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_ENABLE_MASK 0x00000001L
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_EXPANSION_MODE_MASK 0x00000002L
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_PIX_INV_MODE_MASK 0x00000004L
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_ROM_EN_MASK 0x00000008L
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_MODE_MASK 0x00000070L
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_PIXEL_ALPHA_MOD_EN_MASK 0x00000080L
+#define CNVC_CUR1_CURSOR0_CONTROL__CUR0_UPDATE_PENDING_MASK 0x00010000L
+//CNVC_CUR1_CURSOR0_COLOR0
+#define CNVC_CUR1_CURSOR0_COLOR0__CUR0_COLOR0__SHIFT 0x0
+#define CNVC_CUR1_CURSOR0_COLOR0__CUR0_COLOR0_MASK 0x00FFFFFFL
+//CNVC_CUR1_CURSOR0_COLOR1
+#define CNVC_CUR1_CURSOR0_COLOR1__CUR0_COLOR1__SHIFT 0x0
+#define CNVC_CUR1_CURSOR0_COLOR1__CUR0_COLOR1_MASK 0x00FFFFFFL
+//CNVC_CUR1_CURSOR0_FP_SCALE_BIAS
+#define CNVC_CUR1_CURSOR0_FP_SCALE_BIAS__CUR0_FP_SCALE__SHIFT 0x0
+#define CNVC_CUR1_CURSOR0_FP_SCALE_BIAS__CUR0_FP_BIAS__SHIFT 0x10
+#define CNVC_CUR1_CURSOR0_FP_SCALE_BIAS__CUR0_FP_SCALE_MASK 0x0000FFFFL
+#define CNVC_CUR1_CURSOR0_FP_SCALE_BIAS__CUR0_FP_BIAS_MASK 0xFFFF0000L
+
+
+// addressBlock: dce_dc_dpp1_dispdec_dscl_dispdec
+//DSCL1_SCL_COEF_RAM_TAP_SELECT
+#define DSCL1_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_TAP_PAIR_IDX__SHIFT 0x0
+#define DSCL1_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_PHASE__SHIFT 0x8
+#define DSCL1_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_FILTER_TYPE__SHIFT 0x10
+#define DSCL1_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_TAP_PAIR_IDX_MASK 0x00000003L
+#define DSCL1_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_PHASE_MASK 0x00003F00L
+#define DSCL1_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_FILTER_TYPE_MASK 0x00070000L
+//DSCL1_SCL_COEF_RAM_TAP_DATA
+#define DSCL1_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF__SHIFT 0x0
+#define DSCL1_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_EN__SHIFT 0xf
+#define DSCL1_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF__SHIFT 0x10
+#define DSCL1_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_EN__SHIFT 0x1f
+#define DSCL1_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_MASK 0x00003FFFL
+#define DSCL1_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_EN_MASK 0x00008000L
+#define DSCL1_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_MASK 0x3FFF0000L
+#define DSCL1_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_EN_MASK 0x80000000L
+//DSCL1_SCL_MODE
+#define DSCL1_SCL_MODE__DSCL_MODE__SHIFT 0x0
+#define DSCL1_SCL_MODE__SCL_COEF_RAM_SELECT__SHIFT 0x8
+#define DSCL1_SCL_MODE__SCL_COEF_RAM_SELECT_CURRENT__SHIFT 0xc
+#define DSCL1_SCL_MODE__SCL_CHROMA_COEF_MODE__SHIFT 0x10
+#define DSCL1_SCL_MODE__SCL_ALPHA_COEF_MODE__SHIFT 0x14
+#define DSCL1_SCL_MODE__SCL_COEF_RAM_SELECT_RD__SHIFT 0x18
+#define DSCL1_SCL_MODE__DSCL_MODE_MASK 0x00000007L
+#define DSCL1_SCL_MODE__SCL_COEF_RAM_SELECT_MASK 0x00000100L
+#define DSCL1_SCL_MODE__SCL_COEF_RAM_SELECT_CURRENT_MASK 0x00001000L
+#define DSCL1_SCL_MODE__SCL_CHROMA_COEF_MODE_MASK 0x00010000L
+#define DSCL1_SCL_MODE__SCL_ALPHA_COEF_MODE_MASK 0x00100000L
+#define DSCL1_SCL_MODE__SCL_COEF_RAM_SELECT_RD_MASK 0x01000000L
+//DSCL1_SCL_TAP_CONTROL
+#define DSCL1_SCL_TAP_CONTROL__SCL_V_NUM_TAPS__SHIFT 0x0
+#define DSCL1_SCL_TAP_CONTROL__SCL_H_NUM_TAPS__SHIFT 0x4
+#define DSCL1_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_C__SHIFT 0x8
+#define DSCL1_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_C__SHIFT 0xc
+#define DSCL1_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_MASK 0x00000007L
+#define DSCL1_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_MASK 0x00000070L
+#define DSCL1_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_C_MASK 0x00000700L
+#define DSCL1_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_C_MASK 0x00007000L
+//DSCL1_DSCL_CONTROL
+#define DSCL1_DSCL_CONTROL__SCL_BOUNDARY_MODE__SHIFT 0x0
+#define DSCL1_DSCL_CONTROL__SCL_BOUNDARY_MODE_MASK 0x00000001L
+//DSCL1_DSCL_2TAP_CONTROL
+#define DSCL1_DSCL_2TAP_CONTROL__SCL_H_2TAP_HARDCODE_COEF_EN__SHIFT 0x0
+#define DSCL1_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_EN__SHIFT 0x4
+#define DSCL1_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_FACTOR__SHIFT 0x8
+#define DSCL1_DSCL_2TAP_CONTROL__SCL_V_2TAP_HARDCODE_COEF_EN__SHIFT 0x10
+#define DSCL1_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_EN__SHIFT 0x14
+#define DSCL1_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_FACTOR__SHIFT 0x18
+#define DSCL1_DSCL_2TAP_CONTROL__SCL_H_2TAP_HARDCODE_COEF_EN_MASK 0x00000001L
+#define DSCL1_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_EN_MASK 0x00000010L
+#define DSCL1_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_FACTOR_MASK 0x00000700L
+#define DSCL1_DSCL_2TAP_CONTROL__SCL_V_2TAP_HARDCODE_COEF_EN_MASK 0x00010000L
+#define DSCL1_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_EN_MASK 0x00100000L
+#define DSCL1_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_FACTOR_MASK 0x07000000L
+//DSCL1_SCL_MANUAL_REPLICATE_CONTROL
+#define DSCL1_SCL_MANUAL_REPLICATE_CONTROL__SCL_V_MANUAL_REPLICATE_FACTOR__SHIFT 0x0
+#define DSCL1_SCL_MANUAL_REPLICATE_CONTROL__SCL_H_MANUAL_REPLICATE_FACTOR__SHIFT 0x8
+#define DSCL1_SCL_MANUAL_REPLICATE_CONTROL__SCL_V_MANUAL_REPLICATE_FACTOR_MASK 0x0000000FL
+#define DSCL1_SCL_MANUAL_REPLICATE_CONTROL__SCL_H_MANUAL_REPLICATE_FACTOR_MASK 0x00000F00L
+//DSCL1_SCL_HORZ_FILTER_SCALE_RATIO
+#define DSCL1_SCL_HORZ_FILTER_SCALE_RATIO__SCL_H_SCALE_RATIO__SHIFT 0x0
+#define DSCL1_SCL_HORZ_FILTER_SCALE_RATIO__SCL_H_SCALE_RATIO_MASK 0x07FFFFFFL
+//DSCL1_SCL_HORZ_FILTER_INIT
+#define DSCL1_SCL_HORZ_FILTER_INIT__SCL_H_INIT_FRAC__SHIFT 0x0
+#define DSCL1_SCL_HORZ_FILTER_INIT__SCL_H_INIT_INT__SHIFT 0x18
+#define DSCL1_SCL_HORZ_FILTER_INIT__SCL_H_INIT_FRAC_MASK 0x00FFFFFFL
+#define DSCL1_SCL_HORZ_FILTER_INIT__SCL_H_INIT_INT_MASK 0x0F000000L
+//DSCL1_SCL_HORZ_FILTER_SCALE_RATIO_C
+#define DSCL1_SCL_HORZ_FILTER_SCALE_RATIO_C__SCL_H_SCALE_RATIO_C__SHIFT 0x0
+#define DSCL1_SCL_HORZ_FILTER_SCALE_RATIO_C__SCL_H_SCALE_RATIO_C_MASK 0x07FFFFFFL
+//DSCL1_SCL_HORZ_FILTER_INIT_C
+#define DSCL1_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_FRAC_C__SHIFT 0x0
+#define DSCL1_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_INT_C__SHIFT 0x18
+#define DSCL1_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_FRAC_C_MASK 0x00FFFFFFL
+#define DSCL1_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_INT_C_MASK 0x0F000000L
+//DSCL1_SCL_VERT_FILTER_SCALE_RATIO
+#define DSCL1_SCL_VERT_FILTER_SCALE_RATIO__SCL_V_SCALE_RATIO__SHIFT 0x0
+#define DSCL1_SCL_VERT_FILTER_SCALE_RATIO__SCL_V_SCALE_RATIO_MASK 0x07FFFFFFL
+//DSCL1_SCL_VERT_FILTER_INIT
+#define DSCL1_SCL_VERT_FILTER_INIT__SCL_V_INIT_FRAC__SHIFT 0x0
+#define DSCL1_SCL_VERT_FILTER_INIT__SCL_V_INIT_INT__SHIFT 0x18
+#define DSCL1_SCL_VERT_FILTER_INIT__SCL_V_INIT_FRAC_MASK 0x00FFFFFFL
+#define DSCL1_SCL_VERT_FILTER_INIT__SCL_V_INIT_INT_MASK 0x0F000000L
+//DSCL1_SCL_VERT_FILTER_INIT_BOT
+#define DSCL1_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_FRAC_BOT__SHIFT 0x0
+#define DSCL1_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_INT_BOT__SHIFT 0x18
+#define DSCL1_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_FRAC_BOT_MASK 0x00FFFFFFL
+#define DSCL1_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_INT_BOT_MASK 0x0F000000L
+//DSCL1_SCL_VERT_FILTER_SCALE_RATIO_C
+#define DSCL1_SCL_VERT_FILTER_SCALE_RATIO_C__SCL_V_SCALE_RATIO_C__SHIFT 0x0
+#define DSCL1_SCL_VERT_FILTER_SCALE_RATIO_C__SCL_V_SCALE_RATIO_C_MASK 0x07FFFFFFL
+//DSCL1_SCL_VERT_FILTER_INIT_C
+#define DSCL1_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_FRAC_C__SHIFT 0x0
+#define DSCL1_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_INT_C__SHIFT 0x18
+#define DSCL1_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_FRAC_C_MASK 0x00FFFFFFL
+#define DSCL1_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_INT_C_MASK 0x0F000000L
+//DSCL1_SCL_VERT_FILTER_INIT_BOT_C
+#define DSCL1_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_FRAC_BOT_C__SHIFT 0x0
+#define DSCL1_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_INT_BOT_C__SHIFT 0x18
+#define DSCL1_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_FRAC_BOT_C_MASK 0x00FFFFFFL
+#define DSCL1_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_INT_BOT_C_MASK 0x0F000000L
+//DSCL1_SCL_BLACK_OFFSET
+#define DSCL1_SCL_BLACK_OFFSET__SCL_BLACK_OFFSET_RGB_Y__SHIFT 0x0
+#define DSCL1_SCL_BLACK_OFFSET__SCL_BLACK_OFFSET_CBCR__SHIFT 0x10
+#define DSCL1_SCL_BLACK_OFFSET__SCL_BLACK_OFFSET_RGB_Y_MASK 0x0000FFFFL
+#define DSCL1_SCL_BLACK_OFFSET__SCL_BLACK_OFFSET_CBCR_MASK 0xFFFF0000L
+//DSCL1_DSCL_UPDATE
+#define DSCL1_DSCL_UPDATE__SCL_UPDATE_PENDING__SHIFT 0x0
+#define DSCL1_DSCL_UPDATE__SCL_UPDATE_PENDING_MASK 0x00000001L
+//DSCL1_DSCL_AUTOCAL
+#define DSCL1_DSCL_AUTOCAL__AUTOCAL_MODE__SHIFT 0x0
+#define DSCL1_DSCL_AUTOCAL__AUTOCAL_NUM_PIPE__SHIFT 0x8
+#define DSCL1_DSCL_AUTOCAL__AUTOCAL_PIPE_ID__SHIFT 0xc
+#define DSCL1_DSCL_AUTOCAL__AUTOCAL_MODE_MASK 0x00000003L
+#define DSCL1_DSCL_AUTOCAL__AUTOCAL_NUM_PIPE_MASK 0x00000300L
+#define DSCL1_DSCL_AUTOCAL__AUTOCAL_PIPE_ID_MASK 0x00003000L
+//DSCL1_DSCL_EXT_OVERSCAN_LEFT_RIGHT
+#define DSCL1_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_RIGHT__SHIFT 0x0
+#define DSCL1_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_LEFT__SHIFT 0x10
+#define DSCL1_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_RIGHT_MASK 0x00001FFFL
+#define DSCL1_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_LEFT_MASK 0x1FFF0000L
+//DSCL1_DSCL_EXT_OVERSCAN_TOP_BOTTOM
+#define DSCL1_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_BOTTOM__SHIFT 0x0
+#define DSCL1_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_TOP__SHIFT 0x10
+#define DSCL1_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_BOTTOM_MASK 0x00001FFFL
+#define DSCL1_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_TOP_MASK 0x1FFF0000L
+//DSCL1_OTG_H_BLANK
+#define DSCL1_OTG_H_BLANK__OTG_H_BLANK_START__SHIFT 0x0
+#define DSCL1_OTG_H_BLANK__OTG_H_BLANK_END__SHIFT 0x10
+#define DSCL1_OTG_H_BLANK__OTG_H_BLANK_START_MASK 0x00003FFFL
+#define DSCL1_OTG_H_BLANK__OTG_H_BLANK_END_MASK 0x3FFF0000L
+//DSCL1_OTG_V_BLANK
+#define DSCL1_OTG_V_BLANK__OTG_V_BLANK_START__SHIFT 0x0
+#define DSCL1_OTG_V_BLANK__OTG_V_BLANK_END__SHIFT 0x10
+#define DSCL1_OTG_V_BLANK__OTG_V_BLANK_START_MASK 0x00003FFFL
+#define DSCL1_OTG_V_BLANK__OTG_V_BLANK_END_MASK 0x3FFF0000L
+//DSCL1_RECOUT_START
+#define DSCL1_RECOUT_START__RECOUT_START_X__SHIFT 0x0
+#define DSCL1_RECOUT_START__RECOUT_START_Y__SHIFT 0x10
+#define DSCL1_RECOUT_START__RECOUT_START_X_MASK 0x00001FFFL
+#define DSCL1_RECOUT_START__RECOUT_START_Y_MASK 0x1FFF0000L
+//DSCL1_RECOUT_SIZE
+#define DSCL1_RECOUT_SIZE__RECOUT_WIDTH__SHIFT 0x0
+#define DSCL1_RECOUT_SIZE__RECOUT_HEIGHT__SHIFT 0x10
+#define DSCL1_RECOUT_SIZE__RECOUT_WIDTH_MASK 0x00003FFFL
+#define DSCL1_RECOUT_SIZE__RECOUT_HEIGHT_MASK 0x3FFF0000L
+//DSCL1_MPC_SIZE
+#define DSCL1_MPC_SIZE__MPC_WIDTH__SHIFT 0x0
+#define DSCL1_MPC_SIZE__MPC_HEIGHT__SHIFT 0x10
+#define DSCL1_MPC_SIZE__MPC_WIDTH_MASK 0x00003FFFL
+#define DSCL1_MPC_SIZE__MPC_HEIGHT_MASK 0x3FFF0000L
+//DSCL1_LB_DATA_FORMAT
+#define DSCL1_LB_DATA_FORMAT__INTERLEAVE_EN__SHIFT 0x0
+#define DSCL1_LB_DATA_FORMAT__ALPHA_EN__SHIFT 0x4
+#define DSCL1_LB_DATA_FORMAT__INTERLEAVE_EN_MASK 0x00000001L
+#define DSCL1_LB_DATA_FORMAT__ALPHA_EN_MASK 0x00000010L
+//DSCL1_LB_MEMORY_CTRL
+#define DSCL1_LB_MEMORY_CTRL__MEMORY_CONFIG__SHIFT 0x0
+#define DSCL1_LB_MEMORY_CTRL__LB_MAX_PARTITIONS__SHIFT 0x8
+#define DSCL1_LB_MEMORY_CTRL__LB_NUM_PARTITIONS__SHIFT 0x10
+#define DSCL1_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_C__SHIFT 0x18
+#define DSCL1_LB_MEMORY_CTRL__MEMORY_CONFIG_MASK 0x00000003L
+#define DSCL1_LB_MEMORY_CTRL__LB_MAX_PARTITIONS_MASK 0x00003F00L
+#define DSCL1_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_MASK 0x007F0000L
+#define DSCL1_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_C_MASK 0x7F000000L
+//DSCL1_LB_V_COUNTER
+#define DSCL1_LB_V_COUNTER__V_COUNTER__SHIFT 0x0
+#define DSCL1_LB_V_COUNTER__V_COUNTER_C__SHIFT 0x10
+#define DSCL1_LB_V_COUNTER__V_COUNTER_MASK 0x00001FFFL
+#define DSCL1_LB_V_COUNTER__V_COUNTER_C_MASK 0x1FFF0000L
+//DSCL1_DSCL_MEM_PWR_CTRL
+#define DSCL1_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_FORCE__SHIFT 0x0
+#define DSCL1_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_DIS__SHIFT 0x2
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_FORCE__SHIFT 0x4
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_DIS__SHIFT 0x6
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_FORCE__SHIFT 0x8
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_DIS__SHIFT 0xa
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_FORCE__SHIFT 0xc
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_DIS__SHIFT 0xe
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_FORCE__SHIFT 0x10
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_DIS__SHIFT 0x12
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_FORCE__SHIFT 0x14
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_DIS__SHIFT 0x16
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_FORCE__SHIFT 0x18
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_DIS__SHIFT 0x1a
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_MEM_PWR_MODE__SHIFT 0x1c
+#define DSCL1_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_FORCE_MASK 0x00000003L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_DIS_MASK 0x00000004L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_FORCE_MASK 0x00000030L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_DIS_MASK 0x00000040L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_FORCE_MASK 0x00000300L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_DIS_MASK 0x00000400L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_FORCE_MASK 0x00003000L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_DIS_MASK 0x00004000L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_FORCE_MASK 0x00030000L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_DIS_MASK 0x00040000L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_FORCE_MASK 0x00300000L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_DIS_MASK 0x00400000L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_FORCE_MASK 0x03000000L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_DIS_MASK 0x04000000L
+#define DSCL1_DSCL_MEM_PWR_CTRL__LB_MEM_PWR_MODE_MASK 0x10000000L
+//DSCL1_DSCL_MEM_PWR_STATUS
+#define DSCL1_DSCL_MEM_PWR_STATUS__LUT_MEM_PWR_STATE__SHIFT 0x0
+#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G1_MEM_PWR_STATE__SHIFT 0x2
+#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G2_MEM_PWR_STATE__SHIFT 0x4
+#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G3_MEM_PWR_STATE__SHIFT 0x6
+#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G4_MEM_PWR_STATE__SHIFT 0x8
+#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G5_MEM_PWR_STATE__SHIFT 0xa
+#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G6_MEM_PWR_STATE__SHIFT 0xc
+#define DSCL1_DSCL_MEM_PWR_STATUS__LUT_MEM_PWR_STATE_MASK 0x00000003L
+#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G1_MEM_PWR_STATE_MASK 0x0000000CL
+#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G2_MEM_PWR_STATE_MASK 0x00000030L
+#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G3_MEM_PWR_STATE_MASK 0x000000C0L
+#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G4_MEM_PWR_STATE_MASK 0x00000300L
+#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G5_MEM_PWR_STATE_MASK 0x00000C00L
+#define DSCL1_DSCL_MEM_PWR_STATUS__LB_G6_MEM_PWR_STATE_MASK 0x00003000L
+//DSCL1_OBUF_CONTROL
+#define DSCL1_OBUF_CONTROL__OBUF_BYPASS__SHIFT 0x0
+#define DSCL1_OBUF_CONTROL__OBUF_USE_FULL_BUFFER__SHIFT 0x4
+#define DSCL1_OBUF_CONTROL__OBUF_IS_HALF_RECOUT_WIDTH__SHIFT 0xc
+#define DSCL1_OBUF_CONTROL__OBUF_OUT_HOLD_CNT__SHIFT 0x1c
+#define DSCL1_OBUF_CONTROL__OBUF_BYPASS_MASK 0x00000001L
+#define DSCL1_OBUF_CONTROL__OBUF_USE_FULL_BUFFER_MASK 0x00000010L
+#define DSCL1_OBUF_CONTROL__OBUF_IS_HALF_RECOUT_WIDTH_MASK 0x00001000L
+#define DSCL1_OBUF_CONTROL__OBUF_OUT_HOLD_CNT_MASK 0xF0000000L
+//DSCL1_OBUF_MEM_PWR_CTRL
+#define DSCL1_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_FORCE__SHIFT 0x0
+#define DSCL1_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_DIS__SHIFT 0x2
+#define DSCL1_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_MODE__SHIFT 0x8
+#define DSCL1_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_STATE__SHIFT 0x10
+#define DSCL1_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_FORCE_MASK 0x00000003L
+#define DSCL1_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_DIS_MASK 0x00000004L
+#define DSCL1_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_MODE_MASK 0x00000100L
+#define DSCL1_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_STATE_MASK 0x00030000L
+
+
+// addressBlock: dce_dc_dpp1_dispdec_cm_dispdec
+//CM1_CM_CONTROL
+#define CM1_CM_CONTROL__CM_BYPASS__SHIFT 0x0
+#define CM1_CM_CONTROL__CM_UPDATE_PENDING__SHIFT 0x8
+#define CM1_CM_CONTROL__CM_BYPASS_MASK 0x00000001L
+#define CM1_CM_CONTROL__CM_UPDATE_PENDING_MASK 0x00000100L
+//CM1_CM_ICSC_CONTROL
+#define CM1_CM_ICSC_CONTROL__CM_ICSC_MODE__SHIFT 0x0
+#define CM1_CM_ICSC_CONTROL__CM_ICSC_MODE_MASK 0x00000003L
+//CM1_CM_ICSC_C11_C12
+#define CM1_CM_ICSC_C11_C12__CM_ICSC_C11__SHIFT 0x0
+#define CM1_CM_ICSC_C11_C12__CM_ICSC_C12__SHIFT 0x10
+#define CM1_CM_ICSC_C11_C12__CM_ICSC_C11_MASK 0x0000FFFFL
+#define CM1_CM_ICSC_C11_C12__CM_ICSC_C12_MASK 0xFFFF0000L
+//CM1_CM_ICSC_C13_C14
+#define CM1_CM_ICSC_C13_C14__CM_ICSC_C13__SHIFT 0x0
+#define CM1_CM_ICSC_C13_C14__CM_ICSC_C14__SHIFT 0x10
+#define CM1_CM_ICSC_C13_C14__CM_ICSC_C13_MASK 0x0000FFFFL
+#define CM1_CM_ICSC_C13_C14__CM_ICSC_C14_MASK 0xFFFF0000L
+//CM1_CM_ICSC_C21_C22
+#define CM1_CM_ICSC_C21_C22__CM_ICSC_C21__SHIFT 0x0
+#define CM1_CM_ICSC_C21_C22__CM_ICSC_C22__SHIFT 0x10
+#define CM1_CM_ICSC_C21_C22__CM_ICSC_C21_MASK 0x0000FFFFL
+#define CM1_CM_ICSC_C21_C22__CM_ICSC_C22_MASK 0xFFFF0000L
+//CM1_CM_ICSC_C23_C24
+#define CM1_CM_ICSC_C23_C24__CM_ICSC_C23__SHIFT 0x0
+#define CM1_CM_ICSC_C23_C24__CM_ICSC_C24__SHIFT 0x10
+#define CM1_CM_ICSC_C23_C24__CM_ICSC_C23_MASK 0x0000FFFFL
+#define CM1_CM_ICSC_C23_C24__CM_ICSC_C24_MASK 0xFFFF0000L
+//CM1_CM_ICSC_C31_C32
+#define CM1_CM_ICSC_C31_C32__CM_ICSC_C31__SHIFT 0x0
+#define CM1_CM_ICSC_C31_C32__CM_ICSC_C32__SHIFT 0x10
+#define CM1_CM_ICSC_C31_C32__CM_ICSC_C31_MASK 0x0000FFFFL
+#define CM1_CM_ICSC_C31_C32__CM_ICSC_C32_MASK 0xFFFF0000L
+//CM1_CM_ICSC_C33_C34
+#define CM1_CM_ICSC_C33_C34__CM_ICSC_C33__SHIFT 0x0
+#define CM1_CM_ICSC_C33_C34__CM_ICSC_C34__SHIFT 0x10
+#define CM1_CM_ICSC_C33_C34__CM_ICSC_C33_MASK 0x0000FFFFL
+#define CM1_CM_ICSC_C33_C34__CM_ICSC_C34_MASK 0xFFFF0000L
+//CM1_CM_ICSC_B_C11_C12
+#define CM1_CM_ICSC_B_C11_C12__CM_ICSC_B_C11__SHIFT 0x0
+#define CM1_CM_ICSC_B_C11_C12__CM_ICSC_B_C12__SHIFT 0x10
+#define CM1_CM_ICSC_B_C11_C12__CM_ICSC_B_C11_MASK 0x0000FFFFL
+#define CM1_CM_ICSC_B_C11_C12__CM_ICSC_B_C12_MASK 0xFFFF0000L
+//CM1_CM_ICSC_B_C13_C14
+#define CM1_CM_ICSC_B_C13_C14__CM_ICSC_B_C13__SHIFT 0x0
+#define CM1_CM_ICSC_B_C13_C14__CM_ICSC_B_C14__SHIFT 0x10
+#define CM1_CM_ICSC_B_C13_C14__CM_ICSC_B_C13_MASK 0x0000FFFFL
+#define CM1_CM_ICSC_B_C13_C14__CM_ICSC_B_C14_MASK 0xFFFF0000L
+//CM1_CM_ICSC_B_C21_C22
+#define CM1_CM_ICSC_B_C21_C22__CM_ICSC_B_C21__SHIFT 0x0
+#define CM1_CM_ICSC_B_C21_C22__CM_ICSC_B_C22__SHIFT 0x10
+#define CM1_CM_ICSC_B_C21_C22__CM_ICSC_B_C21_MASK 0x0000FFFFL
+#define CM1_CM_ICSC_B_C21_C22__CM_ICSC_B_C22_MASK 0xFFFF0000L
+//CM1_CM_ICSC_B_C23_C24
+#define CM1_CM_ICSC_B_C23_C24__CM_ICSC_B_C23__SHIFT 0x0
+#define CM1_CM_ICSC_B_C23_C24__CM_ICSC_B_C24__SHIFT 0x10
+#define CM1_CM_ICSC_B_C23_C24__CM_ICSC_B_C23_MASK 0x0000FFFFL
+#define CM1_CM_ICSC_B_C23_C24__CM_ICSC_B_C24_MASK 0xFFFF0000L
+//CM1_CM_ICSC_B_C31_C32
+#define CM1_CM_ICSC_B_C31_C32__CM_ICSC_B_C31__SHIFT 0x0
+#define CM1_CM_ICSC_B_C31_C32__CM_ICSC_B_C32__SHIFT 0x10
+#define CM1_CM_ICSC_B_C31_C32__CM_ICSC_B_C31_MASK 0x0000FFFFL
+#define CM1_CM_ICSC_B_C31_C32__CM_ICSC_B_C32_MASK 0xFFFF0000L
+//CM1_CM_ICSC_B_C33_C34
+#define CM1_CM_ICSC_B_C33_C34__CM_ICSC_B_C33__SHIFT 0x0
+#define CM1_CM_ICSC_B_C33_C34__CM_ICSC_B_C34__SHIFT 0x10
+#define CM1_CM_ICSC_B_C33_C34__CM_ICSC_B_C33_MASK 0x0000FFFFL
+#define CM1_CM_ICSC_B_C33_C34__CM_ICSC_B_C34_MASK 0xFFFF0000L
+//CM1_CM_GAMUT_REMAP_CONTROL
+#define CM1_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE_MASK 0x00000003L
+//CM1_CM_GAMUT_REMAP_C11_C12
+#define CM1_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C11__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C12__SHIFT 0x10
+#define CM1_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C11_MASK 0x0000FFFFL
+#define CM1_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C12_MASK 0xFFFF0000L
+//CM1_CM_GAMUT_REMAP_C13_C14
+#define CM1_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C13__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C14__SHIFT 0x10
+#define CM1_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C13_MASK 0x0000FFFFL
+#define CM1_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C14_MASK 0xFFFF0000L
+//CM1_CM_GAMUT_REMAP_C21_C22
+#define CM1_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C21__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C22__SHIFT 0x10
+#define CM1_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C21_MASK 0x0000FFFFL
+#define CM1_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C22_MASK 0xFFFF0000L
+//CM1_CM_GAMUT_REMAP_C23_C24
+#define CM1_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C23__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C24__SHIFT 0x10
+#define CM1_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C23_MASK 0x0000FFFFL
+#define CM1_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C24_MASK 0xFFFF0000L
+//CM1_CM_GAMUT_REMAP_C31_C32
+#define CM1_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C31__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C32__SHIFT 0x10
+#define CM1_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C31_MASK 0x0000FFFFL
+#define CM1_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C32_MASK 0xFFFF0000L
+//CM1_CM_GAMUT_REMAP_C33_C34
+#define CM1_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C33__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C34__SHIFT 0x10
+#define CM1_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C33_MASK 0x0000FFFFL
+#define CM1_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C34_MASK 0xFFFF0000L
+//CM1_CM_GAMUT_REMAP_B_C11_C12
+#define CM1_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C11__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C12__SHIFT 0x10
+#define CM1_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C11_MASK 0x0000FFFFL
+#define CM1_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C12_MASK 0xFFFF0000L
+//CM1_CM_GAMUT_REMAP_B_C13_C14
+#define CM1_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C13__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C14__SHIFT 0x10
+#define CM1_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C13_MASK 0x0000FFFFL
+#define CM1_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C14_MASK 0xFFFF0000L
+//CM1_CM_GAMUT_REMAP_B_C21_C22
+#define CM1_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C21__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C22__SHIFT 0x10
+#define CM1_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C21_MASK 0x0000FFFFL
+#define CM1_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C22_MASK 0xFFFF0000L
+//CM1_CM_GAMUT_REMAP_B_C23_C24
+#define CM1_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C23__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C24__SHIFT 0x10
+#define CM1_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C23_MASK 0x0000FFFFL
+#define CM1_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C24_MASK 0xFFFF0000L
+//CM1_CM_GAMUT_REMAP_B_C31_C32
+#define CM1_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C31__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C32__SHIFT 0x10
+#define CM1_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C31_MASK 0x0000FFFFL
+#define CM1_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C32_MASK 0xFFFF0000L
+//CM1_CM_GAMUT_REMAP_B_C33_C34
+#define CM1_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C33__SHIFT 0x0
+#define CM1_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C34__SHIFT 0x10
+#define CM1_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C33_MASK 0x0000FFFFL
+#define CM1_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C34_MASK 0xFFFF0000L
+//CM1_CM_BIAS_CR_R
+#define CM1_CM_BIAS_CR_R__CM_BIAS_CR_R__SHIFT 0x0
+#define CM1_CM_BIAS_CR_R__CM_BIAS_CR_R_MASK 0x0000FFFFL
+//CM1_CM_BIAS_Y_G_CB_B
+#define CM1_CM_BIAS_Y_G_CB_B__CM_BIAS_Y_G__SHIFT 0x0
+#define CM1_CM_BIAS_Y_G_CB_B__CM_BIAS_CB_B__SHIFT 0x10
+#define CM1_CM_BIAS_Y_G_CB_B__CM_BIAS_Y_G_MASK 0x0000FFFFL
+#define CM1_CM_BIAS_Y_G_CB_B__CM_BIAS_CB_B_MASK 0xFFFF0000L
+//CM1_CM_DGAM_CONTROL
+#define CM1_CM_DGAM_CONTROL__CM_DGAM_LUT_MODE__SHIFT 0x0
+#define CM1_CM_DGAM_CONTROL__CM_DGAM_LUT_MODE_MASK 0x00000007L
+//CM1_CM_DGAM_LUT_INDEX
+#define CM1_CM_DGAM_LUT_INDEX__CM_DGAM_LUT_INDEX__SHIFT 0x0
+#define CM1_CM_DGAM_LUT_INDEX__CM_DGAM_LUT_INDEX_MASK 0x000001FFL
+//CM1_CM_DGAM_LUT_DATA
+#define CM1_CM_DGAM_LUT_DATA__CM_DGAM_LUT_DATA__SHIFT 0x0
+#define CM1_CM_DGAM_LUT_DATA__CM_DGAM_LUT_DATA_MASK 0x0007FFFFL
+//CM1_CM_DGAM_LUT_WRITE_EN_MASK
+#define CM1_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_LUT_WRITE_EN_MASK__SHIFT 0x0
+#define CM1_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_LUT_WRITE_SEL__SHIFT 0x4
+#define CM1_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_CONFIG_STATUS__SHIFT 0x8
+#define CM1_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_WRITE_LUT_BASE_ONLY__SHIFT 0xc
+#define CM1_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_LUT_WRITE_EN_MASK_MASK 0x00000007L
+#define CM1_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_LUT_WRITE_SEL_MASK 0x00000010L
+#define CM1_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_CONFIG_STATUS_MASK 0x00000700L
+#define CM1_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_WRITE_LUT_BASE_ONLY_MASK 0x00001000L
+//CM1_CM_DGAM_RAMA_START_CNTL_B
+#define CM1_CM_DGAM_RAMA_START_CNTL_B__CM_DGAM_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define CM1_CM_DGAM_RAMA_START_CNTL_B__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM1_CM_DGAM_RAMA_START_CNTL_B__CM_DGAM_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM1_CM_DGAM_RAMA_START_CNTL_B__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM1_CM_DGAM_RAMA_START_CNTL_G
+#define CM1_CM_DGAM_RAMA_START_CNTL_G__CM_DGAM_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define CM1_CM_DGAM_RAMA_START_CNTL_G__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM1_CM_DGAM_RAMA_START_CNTL_G__CM_DGAM_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM1_CM_DGAM_RAMA_START_CNTL_G__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM1_CM_DGAM_RAMA_START_CNTL_R
+#define CM1_CM_DGAM_RAMA_START_CNTL_R__CM_DGAM_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define CM1_CM_DGAM_RAMA_START_CNTL_R__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM1_CM_DGAM_RAMA_START_CNTL_R__CM_DGAM_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM1_CM_DGAM_RAMA_START_CNTL_R__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM1_CM_DGAM_RAMA_SLOPE_CNTL_B
+#define CM1_CM_DGAM_RAMA_SLOPE_CNTL_B__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define CM1_CM_DGAM_RAMA_SLOPE_CNTL_B__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//CM1_CM_DGAM_RAMA_SLOPE_CNTL_G
+#define CM1_CM_DGAM_RAMA_SLOPE_CNTL_G__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define CM1_CM_DGAM_RAMA_SLOPE_CNTL_G__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//CM1_CM_DGAM_RAMA_SLOPE_CNTL_R
+#define CM1_CM_DGAM_RAMA_SLOPE_CNTL_R__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define CM1_CM_DGAM_RAMA_SLOPE_CNTL_R__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//CM1_CM_DGAM_RAMA_END_CNTL1_B
+#define CM1_CM_DGAM_RAMA_END_CNTL1_B__CM_DGAM_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define CM1_CM_DGAM_RAMA_END_CNTL1_B__CM_DGAM_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+//CM1_CM_DGAM_RAMA_END_CNTL2_B
+#define CM1_CM_DGAM_RAMA_END_CNTL2_B__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define CM1_CM_DGAM_RAMA_END_CNTL2_B__CM_DGAM_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define CM1_CM_DGAM_RAMA_END_CNTL2_B__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define CM1_CM_DGAM_RAMA_END_CNTL2_B__CM_DGAM_RAMA_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//CM1_CM_DGAM_RAMA_END_CNTL1_G
+#define CM1_CM_DGAM_RAMA_END_CNTL1_G__CM_DGAM_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define CM1_CM_DGAM_RAMA_END_CNTL1_G__CM_DGAM_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+//CM1_CM_DGAM_RAMA_END_CNTL2_G
+#define CM1_CM_DGAM_RAMA_END_CNTL2_G__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define CM1_CM_DGAM_RAMA_END_CNTL2_G__CM_DGAM_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define CM1_CM_DGAM_RAMA_END_CNTL2_G__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define CM1_CM_DGAM_RAMA_END_CNTL2_G__CM_DGAM_RAMA_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//CM1_CM_DGAM_RAMA_END_CNTL1_R
+#define CM1_CM_DGAM_RAMA_END_CNTL1_R__CM_DGAM_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define CM1_CM_DGAM_RAMA_END_CNTL1_R__CM_DGAM_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+//CM1_CM_DGAM_RAMA_END_CNTL2_R
+#define CM1_CM_DGAM_RAMA_END_CNTL2_R__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define CM1_CM_DGAM_RAMA_END_CNTL2_R__CM_DGAM_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define CM1_CM_DGAM_RAMA_END_CNTL2_R__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define CM1_CM_DGAM_RAMA_END_CNTL2_R__CM_DGAM_RAMA_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//CM1_CM_DGAM_RAMA_REGION_0_1
+#define CM1_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_DGAM_RAMA_REGION_2_3
+#define CM1_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_DGAM_RAMA_REGION_4_5
+#define CM1_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_DGAM_RAMA_REGION_6_7
+#define CM1_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_DGAM_RAMA_REGION_8_9
+#define CM1_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_DGAM_RAMA_REGION_10_11
+#define CM1_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_DGAM_RAMA_REGION_12_13
+#define CM1_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_DGAM_RAMA_REGION_14_15
+#define CM1_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_DGAM_RAMB_START_CNTL_B
+#define CM1_CM_DGAM_RAMB_START_CNTL_B__CM_DGAM_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define CM1_CM_DGAM_RAMB_START_CNTL_B__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM1_CM_DGAM_RAMB_START_CNTL_B__CM_DGAM_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM1_CM_DGAM_RAMB_START_CNTL_B__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM1_CM_DGAM_RAMB_START_CNTL_G
+#define CM1_CM_DGAM_RAMB_START_CNTL_G__CM_DGAM_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define CM1_CM_DGAM_RAMB_START_CNTL_G__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM1_CM_DGAM_RAMB_START_CNTL_G__CM_DGAM_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM1_CM_DGAM_RAMB_START_CNTL_G__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM1_CM_DGAM_RAMB_START_CNTL_R
+#define CM1_CM_DGAM_RAMB_START_CNTL_R__CM_DGAM_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define CM1_CM_DGAM_RAMB_START_CNTL_R__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM1_CM_DGAM_RAMB_START_CNTL_R__CM_DGAM_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM1_CM_DGAM_RAMB_START_CNTL_R__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM1_CM_DGAM_RAMB_SLOPE_CNTL_B
+#define CM1_CM_DGAM_RAMB_SLOPE_CNTL_B__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define CM1_CM_DGAM_RAMB_SLOPE_CNTL_B__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//CM1_CM_DGAM_RAMB_SLOPE_CNTL_G
+#define CM1_CM_DGAM_RAMB_SLOPE_CNTL_G__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define CM1_CM_DGAM_RAMB_SLOPE_CNTL_G__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//CM1_CM_DGAM_RAMB_SLOPE_CNTL_R
+#define CM1_CM_DGAM_RAMB_SLOPE_CNTL_R__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define CM1_CM_DGAM_RAMB_SLOPE_CNTL_R__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//CM1_CM_DGAM_RAMB_END_CNTL1_B
+#define CM1_CM_DGAM_RAMB_END_CNTL1_B__CM_DGAM_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define CM1_CM_DGAM_RAMB_END_CNTL1_B__CM_DGAM_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+//CM1_CM_DGAM_RAMB_END_CNTL2_B
+#define CM1_CM_DGAM_RAMB_END_CNTL2_B__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define CM1_CM_DGAM_RAMB_END_CNTL2_B__CM_DGAM_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define CM1_CM_DGAM_RAMB_END_CNTL2_B__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define CM1_CM_DGAM_RAMB_END_CNTL2_B__CM_DGAM_RAMB_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//CM1_CM_DGAM_RAMB_END_CNTL1_G
+#define CM1_CM_DGAM_RAMB_END_CNTL1_G__CM_DGAM_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define CM1_CM_DGAM_RAMB_END_CNTL1_G__CM_DGAM_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+//CM1_CM_DGAM_RAMB_END_CNTL2_G
+#define CM1_CM_DGAM_RAMB_END_CNTL2_G__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define CM1_CM_DGAM_RAMB_END_CNTL2_G__CM_DGAM_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define CM1_CM_DGAM_RAMB_END_CNTL2_G__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define CM1_CM_DGAM_RAMB_END_CNTL2_G__CM_DGAM_RAMB_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//CM1_CM_DGAM_RAMB_END_CNTL1_R
+#define CM1_CM_DGAM_RAMB_END_CNTL1_R__CM_DGAM_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define CM1_CM_DGAM_RAMB_END_CNTL1_R__CM_DGAM_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+//CM1_CM_DGAM_RAMB_END_CNTL2_R
+#define CM1_CM_DGAM_RAMB_END_CNTL2_R__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define CM1_CM_DGAM_RAMB_END_CNTL2_R__CM_DGAM_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define CM1_CM_DGAM_RAMB_END_CNTL2_R__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define CM1_CM_DGAM_RAMB_END_CNTL2_R__CM_DGAM_RAMB_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//CM1_CM_DGAM_RAMB_REGION_0_1
+#define CM1_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_DGAM_RAMB_REGION_2_3
+#define CM1_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_DGAM_RAMB_REGION_4_5
+#define CM1_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_DGAM_RAMB_REGION_6_7
+#define CM1_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_DGAM_RAMB_REGION_8_9
+#define CM1_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_DGAM_RAMB_REGION_10_11
+#define CM1_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_DGAM_RAMB_REGION_12_13
+#define CM1_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_DGAM_RAMB_REGION_14_15
+#define CM1_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_CONTROL
+#define CM1_CM_BLNDGAM_CONTROL__CM_BLNDGAM_LUT_MODE__SHIFT 0x0
+#define CM1_CM_BLNDGAM_CONTROL__CM_BLNDGAM_LUT_MODE_MASK 0x00000003L
+//CM1_CM_BLNDGAM_LUT_INDEX
+#define CM1_CM_BLNDGAM_LUT_INDEX__CM_BLNDGAM_LUT_INDEX__SHIFT 0x0
+#define CM1_CM_BLNDGAM_LUT_INDEX__CM_BLNDGAM_LUT_INDEX_MASK 0x000001FFL
+//CM1_CM_BLNDGAM_LUT_DATA
+#define CM1_CM_BLNDGAM_LUT_DATA__CM_BLNDGAM_LUT_DATA__SHIFT 0x0
+#define CM1_CM_BLNDGAM_LUT_DATA__CM_BLNDGAM_LUT_DATA_MASK 0x0007FFFFL
+//CM1_CM_BLNDGAM_LUT_WRITE_EN_MASK
+#define CM1_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_LUT_WRITE_EN_MASK__SHIFT 0x0
+#define CM1_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_LUT_WRITE_SEL__SHIFT 0x4
+#define CM1_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_CONFIG_STATUS__SHIFT 0x8
+#define CM1_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_LUT_WRITE_EN_MASK_MASK 0x00000007L
+#define CM1_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_LUT_WRITE_SEL_MASK 0x00000010L
+#define CM1_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_CONFIG_STATUS_MASK 0x00000300L
+//CM1_CM_BLNDGAM_RAMA_START_CNTL_B
+#define CM1_CM_BLNDGAM_RAMA_START_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_START_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM1_CM_BLNDGAM_RAMA_START_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM1_CM_BLNDGAM_RAMA_START_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM1_CM_BLNDGAM_RAMA_START_CNTL_G
+#define CM1_CM_BLNDGAM_RAMA_START_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_START_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM1_CM_BLNDGAM_RAMA_START_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM1_CM_BLNDGAM_RAMA_START_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM1_CM_BLNDGAM_RAMA_START_CNTL_R
+#define CM1_CM_BLNDGAM_RAMA_START_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_START_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM1_CM_BLNDGAM_RAMA_START_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM1_CM_BLNDGAM_RAMA_START_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM1_CM_BLNDGAM_RAMA_SLOPE_CNTL_B
+#define CM1_CM_BLNDGAM_RAMA_SLOPE_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_SLOPE_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//CM1_CM_BLNDGAM_RAMA_SLOPE_CNTL_G
+#define CM1_CM_BLNDGAM_RAMA_SLOPE_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_SLOPE_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//CM1_CM_BLNDGAM_RAMA_SLOPE_CNTL_R
+#define CM1_CM_BLNDGAM_RAMA_SLOPE_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_SLOPE_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//CM1_CM_BLNDGAM_RAMA_END_CNTL1_B
+#define CM1_CM_BLNDGAM_RAMA_END_CNTL1_B__CM_BLNDGAM_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_END_CNTL1_B__CM_BLNDGAM_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+//CM1_CM_BLNDGAM_RAMA_END_CNTL2_B
+#define CM1_CM_BLNDGAM_RAMA_END_CNTL2_B__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_END_CNTL2_B__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMA_END_CNTL2_B__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define CM1_CM_BLNDGAM_RAMA_END_CNTL2_B__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//CM1_CM_BLNDGAM_RAMA_END_CNTL1_G
+#define CM1_CM_BLNDGAM_RAMA_END_CNTL1_G__CM_BLNDGAM_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_END_CNTL1_G__CM_BLNDGAM_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+//CM1_CM_BLNDGAM_RAMA_END_CNTL2_G
+#define CM1_CM_BLNDGAM_RAMA_END_CNTL2_G__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_END_CNTL2_G__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMA_END_CNTL2_G__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define CM1_CM_BLNDGAM_RAMA_END_CNTL2_G__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//CM1_CM_BLNDGAM_RAMA_END_CNTL1_R
+#define CM1_CM_BLNDGAM_RAMA_END_CNTL1_R__CM_BLNDGAM_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_END_CNTL1_R__CM_BLNDGAM_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+//CM1_CM_BLNDGAM_RAMA_END_CNTL2_R
+#define CM1_CM_BLNDGAM_RAMA_END_CNTL2_R__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_END_CNTL2_R__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMA_END_CNTL2_R__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define CM1_CM_BLNDGAM_RAMA_END_CNTL2_R__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//CM1_CM_BLNDGAM_RAMA_REGION_0_1
+#define CM1_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMA_REGION_2_3
+#define CM1_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMA_REGION_4_5
+#define CM1_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMA_REGION_6_7
+#define CM1_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMA_REGION_8_9
+#define CM1_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMA_REGION_10_11
+#define CM1_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMA_REGION_12_13
+#define CM1_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMA_REGION_14_15
+#define CM1_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMA_REGION_16_17
+#define CM1_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMA_REGION_18_19
+#define CM1_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMA_REGION_20_21
+#define CM1_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMA_REGION_22_23
+#define CM1_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMA_REGION_24_25
+#define CM1_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMA_REGION_26_27
+#define CM1_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMA_REGION_28_29
+#define CM1_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMA_REGION_30_31
+#define CM1_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMA_REGION_32_33
+#define CM1_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMB_START_CNTL_B
+#define CM1_CM_BLNDGAM_RAMB_START_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_START_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM1_CM_BLNDGAM_RAMB_START_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM1_CM_BLNDGAM_RAMB_START_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM1_CM_BLNDGAM_RAMB_START_CNTL_G
+#define CM1_CM_BLNDGAM_RAMB_START_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_START_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM1_CM_BLNDGAM_RAMB_START_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM1_CM_BLNDGAM_RAMB_START_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM1_CM_BLNDGAM_RAMB_START_CNTL_R
+#define CM1_CM_BLNDGAM_RAMB_START_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_START_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM1_CM_BLNDGAM_RAMB_START_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM1_CM_BLNDGAM_RAMB_START_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM1_CM_BLNDGAM_RAMB_SLOPE_CNTL_B
+#define CM1_CM_BLNDGAM_RAMB_SLOPE_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_SLOPE_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//CM1_CM_BLNDGAM_RAMB_SLOPE_CNTL_G
+#define CM1_CM_BLNDGAM_RAMB_SLOPE_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_SLOPE_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//CM1_CM_BLNDGAM_RAMB_SLOPE_CNTL_R
+#define CM1_CM_BLNDGAM_RAMB_SLOPE_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_SLOPE_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//CM1_CM_BLNDGAM_RAMB_END_CNTL1_B
+#define CM1_CM_BLNDGAM_RAMB_END_CNTL1_B__CM_BLNDGAM_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_END_CNTL1_B__CM_BLNDGAM_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+//CM1_CM_BLNDGAM_RAMB_END_CNTL2_B
+#define CM1_CM_BLNDGAM_RAMB_END_CNTL2_B__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_END_CNTL2_B__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMB_END_CNTL2_B__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define CM1_CM_BLNDGAM_RAMB_END_CNTL2_B__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//CM1_CM_BLNDGAM_RAMB_END_CNTL1_G
+#define CM1_CM_BLNDGAM_RAMB_END_CNTL1_G__CM_BLNDGAM_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_END_CNTL1_G__CM_BLNDGAM_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+//CM1_CM_BLNDGAM_RAMB_END_CNTL2_G
+#define CM1_CM_BLNDGAM_RAMB_END_CNTL2_G__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_END_CNTL2_G__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMB_END_CNTL2_G__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define CM1_CM_BLNDGAM_RAMB_END_CNTL2_G__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//CM1_CM_BLNDGAM_RAMB_END_CNTL1_R
+#define CM1_CM_BLNDGAM_RAMB_END_CNTL1_R__CM_BLNDGAM_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_END_CNTL1_R__CM_BLNDGAM_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+//CM1_CM_BLNDGAM_RAMB_END_CNTL2_R
+#define CM1_CM_BLNDGAM_RAMB_END_CNTL2_R__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_END_CNTL2_R__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMB_END_CNTL2_R__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define CM1_CM_BLNDGAM_RAMB_END_CNTL2_R__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//CM1_CM_BLNDGAM_RAMB_REGION_0_1
+#define CM1_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMB_REGION_2_3
+#define CM1_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMB_REGION_4_5
+#define CM1_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMB_REGION_6_7
+#define CM1_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMB_REGION_8_9
+#define CM1_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMB_REGION_10_11
+#define CM1_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMB_REGION_12_13
+#define CM1_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMB_REGION_14_15
+#define CM1_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMB_REGION_16_17
+#define CM1_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMB_REGION_18_19
+#define CM1_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMB_REGION_20_21
+#define CM1_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMB_REGION_22_23
+#define CM1_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMB_REGION_24_25
+#define CM1_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMB_REGION_26_27
+#define CM1_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMB_REGION_28_29
+#define CM1_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMB_REGION_30_31
+#define CM1_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_BLNDGAM_RAMB_REGION_32_33
+#define CM1_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_HDR_MULT_COEF
+#define CM1_CM_HDR_MULT_COEF__CM_HDR_MULT_COEF__SHIFT 0x0
+#define CM1_CM_HDR_MULT_COEF__CM_HDR_MULT_COEF_MASK 0x0007FFFFL
+//CM1_CM_MEM_PWR_CTRL
+#define CM1_CM_MEM_PWR_CTRL__SHARED_MEM_PWR_FORCE__SHIFT 0x0
+#define CM1_CM_MEM_PWR_CTRL__SHARED_MEM_PWR_DIS__SHIFT 0x2
+#define CM1_CM_MEM_PWR_CTRL__BLNDGAM_MEM_PWR_FORCE__SHIFT 0x4
+#define CM1_CM_MEM_PWR_CTRL__BLNDGAM_MEM_PWR_DIS__SHIFT 0x6
+#define CM1_CM_MEM_PWR_CTRL__SHARED_MEM_PWR_FORCE_MASK 0x00000003L
+#define CM1_CM_MEM_PWR_CTRL__SHARED_MEM_PWR_DIS_MASK 0x00000004L
+#define CM1_CM_MEM_PWR_CTRL__BLNDGAM_MEM_PWR_FORCE_MASK 0x00000030L
+#define CM1_CM_MEM_PWR_CTRL__BLNDGAM_MEM_PWR_DIS_MASK 0x00000040L
+//CM1_CM_MEM_PWR_STATUS
+#define CM1_CM_MEM_PWR_STATUS__SHARED_MEM_PWR_STATE__SHIFT 0x0
+#define CM1_CM_MEM_PWR_STATUS__BLNDGAM_MEM_PWR_STATE__SHIFT 0x2
+#define CM1_CM_MEM_PWR_STATUS__SHARED_MEM_PWR_STATE_MASK 0x00000003L
+#define CM1_CM_MEM_PWR_STATUS__BLNDGAM_MEM_PWR_STATE_MASK 0x0000000CL
+//CM1_CM_DEALPHA
+#define CM1_CM_DEALPHA__CM_DEALPHA_EN__SHIFT 0x0
+#define CM1_CM_DEALPHA__CM_DEALPHA_EN_MASK 0x00000001L
+//CM1_CM_COEF_FORMAT
+#define CM1_CM_COEF_FORMAT__CM_BIAS_FORMAT__SHIFT 0x0
+#define CM1_CM_COEF_FORMAT__CM_ICSC_COEF_FORMAT__SHIFT 0x4
+#define CM1_CM_COEF_FORMAT__CM_GAMUT_REMAP_COEF_FORMAT__SHIFT 0x8
+#define CM1_CM_COEF_FORMAT__CM_BIAS_FORMAT_MASK 0x00000001L
+#define CM1_CM_COEF_FORMAT__CM_ICSC_COEF_FORMAT_MASK 0x00000010L
+#define CM1_CM_COEF_FORMAT__CM_GAMUT_REMAP_COEF_FORMAT_MASK 0x00000100L
+//CM1_CM_SHAPER_CONTROL
+#define CM1_CM_SHAPER_CONTROL__CM_SHAPER_LUT_MODE__SHIFT 0x0
+#define CM1_CM_SHAPER_CONTROL__CM_SHAPER_LUT_MODE_MASK 0x00000003L
+//CM1_CM_SHAPER_OFFSET_R
+#define CM1_CM_SHAPER_OFFSET_R__CM_SHAPER_OFFSET_R__SHIFT 0x0
+#define CM1_CM_SHAPER_OFFSET_R__CM_SHAPER_OFFSET_R_MASK 0x0007FFFFL
+//CM1_CM_SHAPER_OFFSET_G
+#define CM1_CM_SHAPER_OFFSET_G__CM_SHAPER_OFFSET_G__SHIFT 0x0
+#define CM1_CM_SHAPER_OFFSET_G__CM_SHAPER_OFFSET_G_MASK 0x0007FFFFL
+//CM1_CM_SHAPER_OFFSET_B
+#define CM1_CM_SHAPER_OFFSET_B__CM_SHAPER_OFFSET_B__SHIFT 0x0
+#define CM1_CM_SHAPER_OFFSET_B__CM_SHAPER_OFFSET_B_MASK 0x0007FFFFL
+//CM1_CM_SHAPER_SCALE_R
+#define CM1_CM_SHAPER_SCALE_R__CM_SHAPER_SCALE_R__SHIFT 0x0
+#define CM1_CM_SHAPER_SCALE_R__CM_SHAPER_SCALE_R_MASK 0x0000FFFFL
+//CM1_CM_SHAPER_SCALE_G_B
+#define CM1_CM_SHAPER_SCALE_G_B__CM_SHAPER_SCALE_G__SHIFT 0x0
+#define CM1_CM_SHAPER_SCALE_G_B__CM_SHAPER_SCALE_B__SHIFT 0x10
+#define CM1_CM_SHAPER_SCALE_G_B__CM_SHAPER_SCALE_G_MASK 0x0000FFFFL
+#define CM1_CM_SHAPER_SCALE_G_B__CM_SHAPER_SCALE_B_MASK 0xFFFF0000L
+//CM1_CM_SHAPER_LUT_INDEX
+#define CM1_CM_SHAPER_LUT_INDEX__CM_SHAPER_LUT_INDEX__SHIFT 0x0
+#define CM1_CM_SHAPER_LUT_INDEX__CM_SHAPER_LUT_INDEX_MASK 0x000000FFL
+//CM1_CM_SHAPER_LUT_DATA
+#define CM1_CM_SHAPER_LUT_DATA__CM_SHAPER_LUT_DATA__SHIFT 0x0
+#define CM1_CM_SHAPER_LUT_DATA__CM_SHAPER_LUT_DATA_MASK 0x00FFFFFFL
+//CM1_CM_SHAPER_LUT_WRITE_EN_MASK
+#define CM1_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_LUT_WRITE_EN_MASK__SHIFT 0x0
+#define CM1_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_LUT_WRITE_SEL__SHIFT 0x4
+#define CM1_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_CONFIG_STATUS__SHIFT 0x8
+#define CM1_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_LUT_WRITE_EN_MASK_MASK 0x00000007L
+#define CM1_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_LUT_WRITE_SEL_MASK 0x00000010L
+#define CM1_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_CONFIG_STATUS_MASK 0x00000300L
+//CM1_CM_SHAPER_RAMA_START_CNTL_B
+#define CM1_CM_SHAPER_RAMA_START_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMA_START_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM1_CM_SHAPER_RAMA_START_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM1_CM_SHAPER_RAMA_START_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM1_CM_SHAPER_RAMA_START_CNTL_G
+#define CM1_CM_SHAPER_RAMA_START_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMA_START_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM1_CM_SHAPER_RAMA_START_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM1_CM_SHAPER_RAMA_START_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM1_CM_SHAPER_RAMA_START_CNTL_R
+#define CM1_CM_SHAPER_RAMA_START_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMA_START_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM1_CM_SHAPER_RAMA_START_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM1_CM_SHAPER_RAMA_START_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM1_CM_SHAPER_RAMA_END_CNTL_B
+#define CM1_CM_SHAPER_RAMA_END_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMA_END_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMA_END_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define CM1_CM_SHAPER_RAMA_END_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_END_BASE_B_MASK 0x3FFF0000L
+//CM1_CM_SHAPER_RAMA_END_CNTL_G
+#define CM1_CM_SHAPER_RAMA_END_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMA_END_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMA_END_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define CM1_CM_SHAPER_RAMA_END_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_END_BASE_G_MASK 0x3FFF0000L
+//CM1_CM_SHAPER_RAMA_END_CNTL_R
+#define CM1_CM_SHAPER_RAMA_END_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMA_END_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMA_END_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define CM1_CM_SHAPER_RAMA_END_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_END_BASE_R_MASK 0x3FFF0000L
+//CM1_CM_SHAPER_RAMA_REGION_0_1
+#define CM1_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMA_REGION_2_3
+#define CM1_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMA_REGION_4_5
+#define CM1_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMA_REGION_6_7
+#define CM1_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMA_REGION_8_9
+#define CM1_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMA_REGION_10_11
+#define CM1_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMA_REGION_12_13
+#define CM1_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMA_REGION_14_15
+#define CM1_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMA_REGION_16_17
+#define CM1_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMA_REGION_18_19
+#define CM1_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMA_REGION_20_21
+#define CM1_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMA_REGION_22_23
+#define CM1_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMA_REGION_24_25
+#define CM1_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMA_REGION_26_27
+#define CM1_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMA_REGION_28_29
+#define CM1_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMA_REGION_30_31
+#define CM1_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMA_REGION_32_33
+#define CM1_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMB_START_CNTL_B
+#define CM1_CM_SHAPER_RAMB_START_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMB_START_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM1_CM_SHAPER_RAMB_START_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM1_CM_SHAPER_RAMB_START_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM1_CM_SHAPER_RAMB_START_CNTL_G
+#define CM1_CM_SHAPER_RAMB_START_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMB_START_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM1_CM_SHAPER_RAMB_START_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM1_CM_SHAPER_RAMB_START_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM1_CM_SHAPER_RAMB_START_CNTL_R
+#define CM1_CM_SHAPER_RAMB_START_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMB_START_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM1_CM_SHAPER_RAMB_START_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM1_CM_SHAPER_RAMB_START_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM1_CM_SHAPER_RAMB_END_CNTL_B
+#define CM1_CM_SHAPER_RAMB_END_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMB_END_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMB_END_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define CM1_CM_SHAPER_RAMB_END_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_END_BASE_B_MASK 0x3FFF0000L
+//CM1_CM_SHAPER_RAMB_END_CNTL_G
+#define CM1_CM_SHAPER_RAMB_END_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMB_END_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMB_END_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define CM1_CM_SHAPER_RAMB_END_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_END_BASE_G_MASK 0x3FFF0000L
+//CM1_CM_SHAPER_RAMB_END_CNTL_R
+#define CM1_CM_SHAPER_RAMB_END_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMB_END_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMB_END_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define CM1_CM_SHAPER_RAMB_END_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_END_BASE_R_MASK 0x3FFF0000L
+//CM1_CM_SHAPER_RAMB_REGION_0_1
+#define CM1_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMB_REGION_2_3
+#define CM1_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMB_REGION_4_5
+#define CM1_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMB_REGION_6_7
+#define CM1_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMB_REGION_8_9
+#define CM1_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMB_REGION_10_11
+#define CM1_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMB_REGION_12_13
+#define CM1_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMB_REGION_14_15
+#define CM1_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMB_REGION_16_17
+#define CM1_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMB_REGION_18_19
+#define CM1_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMB_REGION_20_21
+#define CM1_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMB_REGION_22_23
+#define CM1_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMB_REGION_24_25
+#define CM1_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMB_REGION_26_27
+#define CM1_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMB_REGION_28_29
+#define CM1_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMB_REGION_30_31
+#define CM1_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_SHAPER_RAMB_REGION_32_33
+#define CM1_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define CM1_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define CM1_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define CM1_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define CM1_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define CM1_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define CM1_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define CM1_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//CM1_CM_MEM_PWR_CTRL2
+#define CM1_CM_MEM_PWR_CTRL2__SHAPER_MEM_PWR_FORCE__SHIFT 0x8
+#define CM1_CM_MEM_PWR_CTRL2__SHAPER_MEM_PWR_DIS__SHIFT 0xa
+#define CM1_CM_MEM_PWR_CTRL2__HDR3DLUT_MEM_PWR_FORCE__SHIFT 0xc
+#define CM1_CM_MEM_PWR_CTRL2__HDR3DLUT_MEM_PWR_DIS__SHIFT 0xe
+#define CM1_CM_MEM_PWR_CTRL2__SHAPER_MEM_PWR_FORCE_MASK 0x00000300L
+#define CM1_CM_MEM_PWR_CTRL2__SHAPER_MEM_PWR_DIS_MASK 0x00000400L
+#define CM1_CM_MEM_PWR_CTRL2__HDR3DLUT_MEM_PWR_FORCE_MASK 0x00003000L
+#define CM1_CM_MEM_PWR_CTRL2__HDR3DLUT_MEM_PWR_DIS_MASK 0x00004000L
+//CM1_CM_MEM_PWR_STATUS2
+#define CM1_CM_MEM_PWR_STATUS2__SHAPER_MEM_PWR_STATE__SHIFT 0x4
+#define CM1_CM_MEM_PWR_STATUS2__HDR3DLUT_MEM_PWR_STATE__SHIFT 0x6
+#define CM1_CM_MEM_PWR_STATUS2__SHAPER_MEM_PWR_STATE_MASK 0x00000030L
+#define CM1_CM_MEM_PWR_STATUS2__HDR3DLUT_MEM_PWR_STATE_MASK 0x000000C0L
+//CM1_CM_3DLUT_MODE
+#define CM1_CM_3DLUT_MODE__CM_3DLUT_MODE__SHIFT 0x0
+#define CM1_CM_3DLUT_MODE__CM_3DLUT_SIZE__SHIFT 0x4
+#define CM1_CM_3DLUT_MODE__CM_3DLUT_MODE_MASK 0x00000003L
+#define CM1_CM_3DLUT_MODE__CM_3DLUT_SIZE_MASK 0x00000010L
+//CM1_CM_3DLUT_INDEX
+#define CM1_CM_3DLUT_INDEX__CM_3DLUT_INDEX__SHIFT 0x0
+#define CM1_CM_3DLUT_INDEX__CM_3DLUT_INDEX_MASK 0x000007FFL
+//CM1_CM_3DLUT_DATA
+#define CM1_CM_3DLUT_DATA__CM_3DLUT_DATA0__SHIFT 0x0
+#define CM1_CM_3DLUT_DATA__CM_3DLUT_DATA1__SHIFT 0x10
+#define CM1_CM_3DLUT_DATA__CM_3DLUT_DATA0_MASK 0x0000FFFFL
+#define CM1_CM_3DLUT_DATA__CM_3DLUT_DATA1_MASK 0xFFFF0000L
+//CM1_CM_3DLUT_DATA_30BIT
+#define CM1_CM_3DLUT_DATA_30BIT__CM_3DLUT_DATA_30BIT__SHIFT 0x2
+#define CM1_CM_3DLUT_DATA_30BIT__CM_3DLUT_DATA_30BIT_MASK 0xFFFFFFFCL
+//CM1_CM_3DLUT_READ_WRITE_CONTROL
+#define CM1_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_WRITE_EN_MASK__SHIFT 0x0
+#define CM1_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_RAM_SEL__SHIFT 0x4
+#define CM1_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_30BIT_EN__SHIFT 0x8
+#define CM1_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_CONFIG_STATUS__SHIFT 0xc
+#define CM1_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_READ_SEL__SHIFT 0x10
+#define CM1_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_WRITE_EN_MASK_MASK 0x0000000FL
+#define CM1_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_RAM_SEL_MASK 0x00000010L
+#define CM1_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_30BIT_EN_MASK 0x00000100L
+#define CM1_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_CONFIG_STATUS_MASK 0x00003000L
+#define CM1_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_READ_SEL_MASK 0x00030000L
+//CM1_CM_3DLUT_OUT_NORM_FACTOR
+#define CM1_CM_3DLUT_OUT_NORM_FACTOR__CM_3DLUT_OUT_NORM_FACTOR__SHIFT 0x0
+#define CM1_CM_3DLUT_OUT_NORM_FACTOR__CM_3DLUT_OUT_NORM_FACTOR_MASK 0x0000FFFFL
+//CM1_CM_3DLUT_OUT_OFFSET_R
+#define CM1_CM_3DLUT_OUT_OFFSET_R__CM_3DLUT_OUT_OFFSET_R__SHIFT 0x0
+#define CM1_CM_3DLUT_OUT_OFFSET_R__CM_3DLUT_OUT_SCALE_R__SHIFT 0x10
+#define CM1_CM_3DLUT_OUT_OFFSET_R__CM_3DLUT_OUT_OFFSET_R_MASK 0x0000FFFFL
+#define CM1_CM_3DLUT_OUT_OFFSET_R__CM_3DLUT_OUT_SCALE_R_MASK 0xFFFF0000L
+//CM1_CM_3DLUT_OUT_OFFSET_G
+#define CM1_CM_3DLUT_OUT_OFFSET_G__CM_3DLUT_OUT_OFFSET_G__SHIFT 0x0
+#define CM1_CM_3DLUT_OUT_OFFSET_G__CM_3DLUT_OUT_SCALE_G__SHIFT 0x10
+#define CM1_CM_3DLUT_OUT_OFFSET_G__CM_3DLUT_OUT_OFFSET_G_MASK 0x0000FFFFL
+#define CM1_CM_3DLUT_OUT_OFFSET_G__CM_3DLUT_OUT_SCALE_G_MASK 0xFFFF0000L
+//CM1_CM_3DLUT_OUT_OFFSET_B
+#define CM1_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_OFFSET_B__SHIFT 0x0
+#define CM1_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_SCALE_B__SHIFT 0x10
+#define CM1_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_OFFSET_B_MASK 0x0000FFFFL
+#define CM1_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_SCALE_B_MASK 0xFFFF0000L
+// addressBlock: dce_dc_dpp2_dispdec_dpp_top_dispdec
+//DPP_TOP2_DPP_CONTROL
+#define DPP_TOP2_DPP_CONTROL__DPP_CLOCK_ENABLE__SHIFT 0x4
+#define DPP_TOP2_DPP_CONTROL__DPPCLK_G_GATE_DISABLE__SHIFT 0x8
+#define DPP_TOP2_DPP_CONTROL__DPPCLK_G_DYN_GATE_DISABLE__SHIFT 0xa
+#define DPP_TOP2_DPP_CONTROL__DPPCLK_G_DSCL_GATE_DISABLE__SHIFT 0xc
+#define DPP_TOP2_DPP_CONTROL__DPPCLK_G_DSCL_ALPHA_GATE_DISABLE__SHIFT 0xe
+#define DPP_TOP2_DPP_CONTROL__DPPCLK_R_GATE_DISABLE__SHIFT 0x10
+#define DPP_TOP2_DPP_CONTROL__DISPCLK_R_GATE_DISABLE__SHIFT 0x12
+#define DPP_TOP2_DPP_CONTROL__DISPCLK_G_GATE_DISABLE__SHIFT 0x14
+#define DPP_TOP2_DPP_CONTROL__DPP_CLOCK_ENABLE_MASK 0x00000010L
+#define DPP_TOP2_DPP_CONTROL__DPPCLK_G_GATE_DISABLE_MASK 0x00000100L
+#define DPP_TOP2_DPP_CONTROL__DPPCLK_G_DYN_GATE_DISABLE_MASK 0x00000400L
+#define DPP_TOP2_DPP_CONTROL__DPPCLK_G_DSCL_GATE_DISABLE_MASK 0x00001000L
+#define DPP_TOP2_DPP_CONTROL__DPPCLK_G_DSCL_ALPHA_GATE_DISABLE_MASK 0x00004000L
+#define DPP_TOP2_DPP_CONTROL__DPPCLK_R_GATE_DISABLE_MASK 0x00010000L
+#define DPP_TOP2_DPP_CONTROL__DISPCLK_R_GATE_DISABLE_MASK 0x00040000L
+#define DPP_TOP2_DPP_CONTROL__DISPCLK_G_GATE_DISABLE_MASK 0x00100000L
+//DPP_TOP2_DPP_SOFT_RESET
+#define DPP_TOP2_DPP_SOFT_RESET__CNVC_SOFT_RESET__SHIFT 0x0
+#define DPP_TOP2_DPP_SOFT_RESET__DSCL_SOFT_RESET__SHIFT 0x4
+#define DPP_TOP2_DPP_SOFT_RESET__CM_SOFT_RESET__SHIFT 0x8
+#define DPP_TOP2_DPP_SOFT_RESET__OBUF_SOFT_RESET__SHIFT 0xc
+#define DPP_TOP2_DPP_SOFT_RESET__CNVC_SOFT_RESET_MASK 0x00000001L
+#define DPP_TOP2_DPP_SOFT_RESET__DSCL_SOFT_RESET_MASK 0x00000010L
+#define DPP_TOP2_DPP_SOFT_RESET__CM_SOFT_RESET_MASK 0x00000100L
+#define DPP_TOP2_DPP_SOFT_RESET__OBUF_SOFT_RESET_MASK 0x00001000L
+//DPP_TOP2_DPP_CRC_VAL_R_G
+#define DPP_TOP2_DPP_CRC_VAL_R_G__DPP_CRC_R_CR__SHIFT 0x0
+#define DPP_TOP2_DPP_CRC_VAL_R_G__DPP_CRC_G_Y__SHIFT 0x10
+#define DPP_TOP2_DPP_CRC_VAL_R_G__DPP_CRC_R_CR_MASK 0x0000FFFFL
+#define DPP_TOP2_DPP_CRC_VAL_R_G__DPP_CRC_G_Y_MASK 0xFFFF0000L
+//DPP_TOP2_DPP_CRC_VAL_B_A
+#define DPP_TOP2_DPP_CRC_VAL_B_A__DPP_CRC_B_CB__SHIFT 0x0
+#define DPP_TOP2_DPP_CRC_VAL_B_A__DPP_CRC_ALPHA__SHIFT 0x10
+#define DPP_TOP2_DPP_CRC_VAL_B_A__DPP_CRC_B_CB_MASK 0x0000FFFFL
+#define DPP_TOP2_DPP_CRC_VAL_B_A__DPP_CRC_ALPHA_MASK 0xFFFF0000L
+//DPP_TOP2_DPP_CRC_CTRL
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_EN__SHIFT 0x0
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_CONT_EN__SHIFT 0x1
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_ONE_SHOT_PENDING__SHIFT 0x2
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_420_COMP_SEL__SHIFT 0x3
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_SRC_SEL__SHIFT 0x4
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_CURSOR_BITS_SEL__SHIFT 0x6
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_STEREO_EN__SHIFT 0x7
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_STEREO_MODE__SHIFT 0x8
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_INTERLACE_MODE__SHIFT 0xa
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_PIX_FORMAT_SEL__SHIFT 0xc
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_CURSOR_FORMAT_SEL__SHIFT 0xf
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_MASK__SHIFT 0x10
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_EN_MASK 0x00000001L
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_CONT_EN_MASK 0x00000002L
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_ONE_SHOT_PENDING_MASK 0x00000004L
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_420_COMP_SEL_MASK 0x00000008L
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_SRC_SEL_MASK 0x00000030L
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_CURSOR_BITS_SEL_MASK 0x00000040L
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_STEREO_EN_MASK 0x00000080L
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_STEREO_MODE_MASK 0x00000300L
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_INTERLACE_MODE_MASK 0x00000C00L
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_PIX_FORMAT_SEL_MASK 0x00007000L
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_CURSOR_FORMAT_SEL_MASK 0x00008000L
+#define DPP_TOP2_DPP_CRC_CTRL__DPP_CRC_MASK_MASK 0xFFFF0000L
+//DPP_TOP2_HOST_READ_CONTROL
+#define DPP_TOP2_HOST_READ_CONTROL__HOST_READ_RATE_CONTROL__SHIFT 0x0
+#define DPP_TOP2_HOST_READ_CONTROL__HOST_READ_RATE_CONTROL_MASK 0x000000FFL
+
+
+// addressBlock: dce_dc_dpp2_dispdec_cnvc_cfg_dispdec
+//CNVC_CFG2_CNVC_SURFACE_PIXEL_FORMAT
+#define CNVC_CFG2_CNVC_SURFACE_PIXEL_FORMAT__CNVC_SURFACE_PIXEL_FORMAT__SHIFT 0x0
+#define CNVC_CFG2_CNVC_SURFACE_PIXEL_FORMAT__CNVC_SURFACE_PIXEL_FORMAT_MASK 0x0000007FL
+//CNVC_CFG2_FORMAT_CONTROL
+#define CNVC_CFG2_FORMAT_CONTROL__FORMAT_EXPANSION_MODE__SHIFT 0x0
+#define CNVC_CFG2_FORMAT_CONTROL__FORMAT_CNV16__SHIFT 0x4
+#define CNVC_CFG2_FORMAT_CONTROL__ALPHA_EN__SHIFT 0x8
+#define CNVC_CFG2_FORMAT_CONTROL__CNVC_BYPASS__SHIFT 0xc
+#define CNVC_CFG2_FORMAT_CONTROL__CNVC_BYPASS_MSB_ALIGN__SHIFT 0xd
+#define CNVC_CFG2_FORMAT_CONTROL__CLAMP_POSITIVE__SHIFT 0x10
+#define CNVC_CFG2_FORMAT_CONTROL__CLAMP_POSITIVE_C__SHIFT 0x11
+#define CNVC_CFG2_FORMAT_CONTROL__CNVC_UPDATE_PENDING__SHIFT 0x14
+#define CNVC_CFG2_FORMAT_CONTROL__FORMAT_EXPANSION_MODE_MASK 0x00000001L
+#define CNVC_CFG2_FORMAT_CONTROL__FORMAT_CNV16_MASK 0x00000010L
+#define CNVC_CFG2_FORMAT_CONTROL__ALPHA_EN_MASK 0x00000100L
+#define CNVC_CFG2_FORMAT_CONTROL__CNVC_BYPASS_MASK 0x00001000L
+#define CNVC_CFG2_FORMAT_CONTROL__CNVC_BYPASS_MSB_ALIGN_MASK 0x00002000L
+#define CNVC_CFG2_FORMAT_CONTROL__CLAMP_POSITIVE_MASK 0x00010000L
+#define CNVC_CFG2_FORMAT_CONTROL__CLAMP_POSITIVE_C_MASK 0x00020000L
+#define CNVC_CFG2_FORMAT_CONTROL__CNVC_UPDATE_PENDING_MASK 0x00100000L
+//CNVC_CFG2_FCNV_FP_BIAS_R
+#define CNVC_CFG2_FCNV_FP_BIAS_R__FCNV_FP_BIAS_R__SHIFT 0x0
+#define CNVC_CFG2_FCNV_FP_BIAS_R__FCNV_FP_BIAS_R_MASK 0x0007FFFFL
+//CNVC_CFG2_FCNV_FP_BIAS_G
+#define CNVC_CFG2_FCNV_FP_BIAS_G__FCNV_FP_BIAS_G__SHIFT 0x0
+#define CNVC_CFG2_FCNV_FP_BIAS_G__FCNV_FP_BIAS_G_MASK 0x0007FFFFL
+//CNVC_CFG2_FCNV_FP_BIAS_B
+#define CNVC_CFG2_FCNV_FP_BIAS_B__FCNV_FP_BIAS_B__SHIFT 0x0
+#define CNVC_CFG2_FCNV_FP_BIAS_B__FCNV_FP_BIAS_B_MASK 0x0007FFFFL
+//CNVC_CFG2_FCNV_FP_SCALE_R
+#define CNVC_CFG2_FCNV_FP_SCALE_R__FCNV_FP_SCALE_R__SHIFT 0x0
+#define CNVC_CFG2_FCNV_FP_SCALE_R__FCNV_FP_SCALE_R_MASK 0x0007FFFFL
+//CNVC_CFG2_FCNV_FP_SCALE_G
+#define CNVC_CFG2_FCNV_FP_SCALE_G__FCNV_FP_SCALE_G__SHIFT 0x0
+#define CNVC_CFG2_FCNV_FP_SCALE_G__FCNV_FP_SCALE_G_MASK 0x0007FFFFL
+//CNVC_CFG2_FCNV_FP_SCALE_B
+#define CNVC_CFG2_FCNV_FP_SCALE_B__FCNV_FP_SCALE_B__SHIFT 0x0
+#define CNVC_CFG2_FCNV_FP_SCALE_B__FCNV_FP_SCALE_B_MASK 0x0007FFFFL
+//CNVC_CFG2_COLOR_KEYER_CONTROL
+#define CNVC_CFG2_COLOR_KEYER_CONTROL__COLOR_KEYER_EN__SHIFT 0x0
+#define CNVC_CFG2_COLOR_KEYER_CONTROL__COLOR_KEYER_MODE__SHIFT 0x4
+#define CNVC_CFG2_COLOR_KEYER_CONTROL__COLOR_KEYER_EN_MASK 0x00000001L
+#define CNVC_CFG2_COLOR_KEYER_CONTROL__COLOR_KEYER_MODE_MASK 0x00000030L
+//CNVC_CFG2_COLOR_KEYER_ALPHA
+#define CNVC_CFG2_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_LOW__SHIFT 0x0
+#define CNVC_CFG2_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_HIGH__SHIFT 0x10
+#define CNVC_CFG2_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG2_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG2_COLOR_KEYER_RED
+#define CNVC_CFG2_COLOR_KEYER_RED__COLOR_KEYER_RED_LOW__SHIFT 0x0
+#define CNVC_CFG2_COLOR_KEYER_RED__COLOR_KEYER_RED_HIGH__SHIFT 0x10
+#define CNVC_CFG2_COLOR_KEYER_RED__COLOR_KEYER_RED_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG2_COLOR_KEYER_RED__COLOR_KEYER_RED_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG2_COLOR_KEYER_GREEN
+#define CNVC_CFG2_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_LOW__SHIFT 0x0
+#define CNVC_CFG2_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_HIGH__SHIFT 0x10
+#define CNVC_CFG2_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG2_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG2_COLOR_KEYER_BLUE
+#define CNVC_CFG2_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_LOW__SHIFT 0x0
+#define CNVC_CFG2_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_HIGH__SHIFT 0x10
+#define CNVC_CFG2_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG2_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG2_ALPHA_2BIT_LUT
+#define CNVC_CFG2_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT0__SHIFT 0x0
+#define CNVC_CFG2_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT1__SHIFT 0x8
+#define CNVC_CFG2_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT2__SHIFT 0x10
+#define CNVC_CFG2_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT3__SHIFT 0x18
+#define CNVC_CFG2_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT0_MASK 0x000000FFL
+#define CNVC_CFG2_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT1_MASK 0x0000FF00L
+#define CNVC_CFG2_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT2_MASK 0x00FF0000L
+#define CNVC_CFG2_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT3_MASK 0xFF000000L
+
+
+// addressBlock: dce_dc_dpp2_dispdec_cnvc_cur_dispdec
+//CNVC_CUR2_CURSOR0_CONTROL
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_ENABLE__SHIFT 0x0
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_EXPANSION_MODE__SHIFT 0x1
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_PIX_INV_MODE__SHIFT 0x2
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_ROM_EN__SHIFT 0x3
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_MODE__SHIFT 0x4
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_PIXEL_ALPHA_MOD_EN__SHIFT 0x7
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_UPDATE_PENDING__SHIFT 0x10
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_ENABLE_MASK 0x00000001L
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_EXPANSION_MODE_MASK 0x00000002L
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_PIX_INV_MODE_MASK 0x00000004L
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_ROM_EN_MASK 0x00000008L
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_MODE_MASK 0x00000070L
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_PIXEL_ALPHA_MOD_EN_MASK 0x00000080L
+#define CNVC_CUR2_CURSOR0_CONTROL__CUR0_UPDATE_PENDING_MASK 0x00010000L
+//CNVC_CUR2_CURSOR0_COLOR0
+#define CNVC_CUR2_CURSOR0_COLOR0__CUR0_COLOR0__SHIFT 0x0
+#define CNVC_CUR2_CURSOR0_COLOR0__CUR0_COLOR0_MASK 0x00FFFFFFL
+//CNVC_CUR2_CURSOR0_COLOR1
+#define CNVC_CUR2_CURSOR0_COLOR1__CUR0_COLOR1__SHIFT 0x0
+#define CNVC_CUR2_CURSOR0_COLOR1__CUR0_COLOR1_MASK 0x00FFFFFFL
+//CNVC_CUR2_CURSOR0_FP_SCALE_BIAS
+#define CNVC_CUR2_CURSOR0_FP_SCALE_BIAS__CUR0_FP_SCALE__SHIFT 0x0
+#define CNVC_CUR2_CURSOR0_FP_SCALE_BIAS__CUR0_FP_BIAS__SHIFT 0x10
+#define CNVC_CUR2_CURSOR0_FP_SCALE_BIAS__CUR0_FP_SCALE_MASK 0x0000FFFFL
+#define CNVC_CUR2_CURSOR0_FP_SCALE_BIAS__CUR0_FP_BIAS_MASK 0xFFFF0000L
+
+
+// addressBlock: dce_dc_dpp2_dispdec_dscl_dispdec
+//DSCL2_SCL_COEF_RAM_TAP_SELECT
+#define DSCL2_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_TAP_PAIR_IDX__SHIFT 0x0
+#define DSCL2_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_PHASE__SHIFT 0x8
+#define DSCL2_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_FILTER_TYPE__SHIFT 0x10
+#define DSCL2_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_TAP_PAIR_IDX_MASK 0x00000003L
+#define DSCL2_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_PHASE_MASK 0x00003F00L
+#define DSCL2_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_FILTER_TYPE_MASK 0x00070000L
+//DSCL2_SCL_COEF_RAM_TAP_DATA
+#define DSCL2_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF__SHIFT 0x0
+#define DSCL2_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_EN__SHIFT 0xf
+#define DSCL2_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF__SHIFT 0x10
+#define DSCL2_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_EN__SHIFT 0x1f
+#define DSCL2_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_MASK 0x00003FFFL
+#define DSCL2_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_EN_MASK 0x00008000L
+#define DSCL2_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_MASK 0x3FFF0000L
+#define DSCL2_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_EN_MASK 0x80000000L
+//DSCL2_SCL_MODE
+#define DSCL2_SCL_MODE__DSCL_MODE__SHIFT 0x0
+#define DSCL2_SCL_MODE__SCL_COEF_RAM_SELECT__SHIFT 0x8
+#define DSCL2_SCL_MODE__SCL_COEF_RAM_SELECT_CURRENT__SHIFT 0xc
+#define DSCL2_SCL_MODE__SCL_CHROMA_COEF_MODE__SHIFT 0x10
+#define DSCL2_SCL_MODE__SCL_ALPHA_COEF_MODE__SHIFT 0x14
+#define DSCL2_SCL_MODE__SCL_COEF_RAM_SELECT_RD__SHIFT 0x18
+#define DSCL2_SCL_MODE__DSCL_MODE_MASK 0x00000007L
+#define DSCL2_SCL_MODE__SCL_COEF_RAM_SELECT_MASK 0x00000100L
+#define DSCL2_SCL_MODE__SCL_COEF_RAM_SELECT_CURRENT_MASK 0x00001000L
+#define DSCL2_SCL_MODE__SCL_CHROMA_COEF_MODE_MASK 0x00010000L
+#define DSCL2_SCL_MODE__SCL_ALPHA_COEF_MODE_MASK 0x00100000L
+#define DSCL2_SCL_MODE__SCL_COEF_RAM_SELECT_RD_MASK 0x01000000L
+//DSCL2_SCL_TAP_CONTROL
+#define DSCL2_SCL_TAP_CONTROL__SCL_V_NUM_TAPS__SHIFT 0x0
+#define DSCL2_SCL_TAP_CONTROL__SCL_H_NUM_TAPS__SHIFT 0x4
+#define DSCL2_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_C__SHIFT 0x8
+#define DSCL2_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_C__SHIFT 0xc
+#define DSCL2_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_MASK 0x00000007L
+#define DSCL2_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_MASK 0x00000070L
+#define DSCL2_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_C_MASK 0x00000700L
+#define DSCL2_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_C_MASK 0x00007000L
+//DSCL2_DSCL_CONTROL
+#define DSCL2_DSCL_CONTROL__SCL_BOUNDARY_MODE__SHIFT 0x0
+#define DSCL2_DSCL_CONTROL__SCL_BOUNDARY_MODE_MASK 0x00000001L
+//DSCL2_DSCL_2TAP_CONTROL
+#define DSCL2_DSCL_2TAP_CONTROL__SCL_H_2TAP_HARDCODE_COEF_EN__SHIFT 0x0
+#define DSCL2_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_EN__SHIFT 0x4
+#define DSCL2_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_FACTOR__SHIFT 0x8
+#define DSCL2_DSCL_2TAP_CONTROL__SCL_V_2TAP_HARDCODE_COEF_EN__SHIFT 0x10
+#define DSCL2_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_EN__SHIFT 0x14
+#define DSCL2_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_FACTOR__SHIFT 0x18
+#define DSCL2_DSCL_2TAP_CONTROL__SCL_H_2TAP_HARDCODE_COEF_EN_MASK 0x00000001L
+#define DSCL2_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_EN_MASK 0x00000010L
+#define DSCL2_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_FACTOR_MASK 0x00000700L
+#define DSCL2_DSCL_2TAP_CONTROL__SCL_V_2TAP_HARDCODE_COEF_EN_MASK 0x00010000L
+#define DSCL2_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_EN_MASK 0x00100000L
+#define DSCL2_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_FACTOR_MASK 0x07000000L
+//DSCL2_SCL_MANUAL_REPLICATE_CONTROL
+#define DSCL2_SCL_MANUAL_REPLICATE_CONTROL__SCL_V_MANUAL_REPLICATE_FACTOR__SHIFT 0x0
+#define DSCL2_SCL_MANUAL_REPLICATE_CONTROL__SCL_H_MANUAL_REPLICATE_FACTOR__SHIFT 0x8
+#define DSCL2_SCL_MANUAL_REPLICATE_CONTROL__SCL_V_MANUAL_REPLICATE_FACTOR_MASK 0x0000000FL
+#define DSCL2_SCL_MANUAL_REPLICATE_CONTROL__SCL_H_MANUAL_REPLICATE_FACTOR_MASK 0x00000F00L
+//DSCL2_SCL_HORZ_FILTER_SCALE_RATIO
+#define DSCL2_SCL_HORZ_FILTER_SCALE_RATIO__SCL_H_SCALE_RATIO__SHIFT 0x0
+#define DSCL2_SCL_HORZ_FILTER_SCALE_RATIO__SCL_H_SCALE_RATIO_MASK 0x07FFFFFFL
+//DSCL2_SCL_HORZ_FILTER_INIT
+#define DSCL2_SCL_HORZ_FILTER_INIT__SCL_H_INIT_FRAC__SHIFT 0x0
+#define DSCL2_SCL_HORZ_FILTER_INIT__SCL_H_INIT_INT__SHIFT 0x18
+#define DSCL2_SCL_HORZ_FILTER_INIT__SCL_H_INIT_FRAC_MASK 0x00FFFFFFL
+#define DSCL2_SCL_HORZ_FILTER_INIT__SCL_H_INIT_INT_MASK 0x0F000000L
+//DSCL2_SCL_HORZ_FILTER_SCALE_RATIO_C
+#define DSCL2_SCL_HORZ_FILTER_SCALE_RATIO_C__SCL_H_SCALE_RATIO_C__SHIFT 0x0
+#define DSCL2_SCL_HORZ_FILTER_SCALE_RATIO_C__SCL_H_SCALE_RATIO_C_MASK 0x07FFFFFFL
+//DSCL2_SCL_HORZ_FILTER_INIT_C
+#define DSCL2_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_FRAC_C__SHIFT 0x0
+#define DSCL2_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_INT_C__SHIFT 0x18
+#define DSCL2_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_FRAC_C_MASK 0x00FFFFFFL
+#define DSCL2_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_INT_C_MASK 0x0F000000L
+//DSCL2_SCL_VERT_FILTER_SCALE_RATIO
+#define DSCL2_SCL_VERT_FILTER_SCALE_RATIO__SCL_V_SCALE_RATIO__SHIFT 0x0
+#define DSCL2_SCL_VERT_FILTER_SCALE_RATIO__SCL_V_SCALE_RATIO_MASK 0x07FFFFFFL
+//DSCL2_SCL_VERT_FILTER_INIT
+#define DSCL2_SCL_VERT_FILTER_INIT__SCL_V_INIT_FRAC__SHIFT 0x0
+#define DSCL2_SCL_VERT_FILTER_INIT__SCL_V_INIT_INT__SHIFT 0x18
+#define DSCL2_SCL_VERT_FILTER_INIT__SCL_V_INIT_FRAC_MASK 0x00FFFFFFL
+#define DSCL2_SCL_VERT_FILTER_INIT__SCL_V_INIT_INT_MASK 0x0F000000L
+//DSCL2_SCL_VERT_FILTER_INIT_BOT
+#define DSCL2_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_FRAC_BOT__SHIFT 0x0
+#define DSCL2_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_INT_BOT__SHIFT 0x18
+#define DSCL2_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_FRAC_BOT_MASK 0x00FFFFFFL
+#define DSCL2_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_INT_BOT_MASK 0x0F000000L
+//DSCL2_SCL_VERT_FILTER_SCALE_RATIO_C
+#define DSCL2_SCL_VERT_FILTER_SCALE_RATIO_C__SCL_V_SCALE_RATIO_C__SHIFT 0x0
+#define DSCL2_SCL_VERT_FILTER_SCALE_RATIO_C__SCL_V_SCALE_RATIO_C_MASK 0x07FFFFFFL
+//DSCL2_SCL_VERT_FILTER_INIT_C
+#define DSCL2_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_FRAC_C__SHIFT 0x0
+#define DSCL2_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_INT_C__SHIFT 0x18
+#define DSCL2_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_FRAC_C_MASK 0x00FFFFFFL
+#define DSCL2_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_INT_C_MASK 0x0F000000L
+//DSCL2_SCL_VERT_FILTER_INIT_BOT_C
+#define DSCL2_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_FRAC_BOT_C__SHIFT 0x0
+#define DSCL2_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_INT_BOT_C__SHIFT 0x18
+#define DSCL2_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_FRAC_BOT_C_MASK 0x00FFFFFFL
+#define DSCL2_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_INT_BOT_C_MASK 0x0F000000L
+//DSCL2_SCL_BLACK_OFFSET
+#define DSCL2_SCL_BLACK_OFFSET__SCL_BLACK_OFFSET_RGB_Y__SHIFT 0x0
+#define DSCL2_SCL_BLACK_OFFSET__SCL_BLACK_OFFSET_CBCR__SHIFT 0x10
+#define DSCL2_SCL_BLACK_OFFSET__SCL_BLACK_OFFSET_RGB_Y_MASK 0x0000FFFFL
+#define DSCL2_SCL_BLACK_OFFSET__SCL_BLACK_OFFSET_CBCR_MASK 0xFFFF0000L
+//DSCL2_DSCL_UPDATE
+#define DSCL2_DSCL_UPDATE__SCL_UPDATE_PENDING__SHIFT 0x0
+#define DSCL2_DSCL_UPDATE__SCL_UPDATE_PENDING_MASK 0x00000001L
+//DSCL2_DSCL_AUTOCAL
+#define DSCL2_DSCL_AUTOCAL__AUTOCAL_MODE__SHIFT 0x0
+#define DSCL2_DSCL_AUTOCAL__AUTOCAL_NUM_PIPE__SHIFT 0x8
+#define DSCL2_DSCL_AUTOCAL__AUTOCAL_PIPE_ID__SHIFT 0xc
+#define DSCL2_DSCL_AUTOCAL__AUTOCAL_MODE_MASK 0x00000003L
+#define DSCL2_DSCL_AUTOCAL__AUTOCAL_NUM_PIPE_MASK 0x00000300L
+#define DSCL2_DSCL_AUTOCAL__AUTOCAL_PIPE_ID_MASK 0x00003000L
+//DSCL2_DSCL_EXT_OVERSCAN_LEFT_RIGHT
+#define DSCL2_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_RIGHT__SHIFT 0x0
+#define DSCL2_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_LEFT__SHIFT 0x10
+#define DSCL2_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_RIGHT_MASK 0x00001FFFL
+#define DSCL2_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_LEFT_MASK 0x1FFF0000L
+//DSCL2_DSCL_EXT_OVERSCAN_TOP_BOTTOM
+#define DSCL2_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_BOTTOM__SHIFT 0x0
+#define DSCL2_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_TOP__SHIFT 0x10
+#define DSCL2_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_BOTTOM_MASK 0x00001FFFL
+#define DSCL2_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_TOP_MASK 0x1FFF0000L
+//DSCL2_OTG_H_BLANK
+#define DSCL2_OTG_H_BLANK__OTG_H_BLANK_START__SHIFT 0x0
+#define DSCL2_OTG_H_BLANK__OTG_H_BLANK_END__SHIFT 0x10
+#define DSCL2_OTG_H_BLANK__OTG_H_BLANK_START_MASK 0x00003FFFL
+#define DSCL2_OTG_H_BLANK__OTG_H_BLANK_END_MASK 0x3FFF0000L
+//DSCL2_OTG_V_BLANK
+#define DSCL2_OTG_V_BLANK__OTG_V_BLANK_START__SHIFT 0x0
+#define DSCL2_OTG_V_BLANK__OTG_V_BLANK_END__SHIFT 0x10
+#define DSCL2_OTG_V_BLANK__OTG_V_BLANK_START_MASK 0x00003FFFL
+#define DSCL2_OTG_V_BLANK__OTG_V_BLANK_END_MASK 0x3FFF0000L
+//DSCL2_RECOUT_START
+#define DSCL2_RECOUT_START__RECOUT_START_X__SHIFT 0x0
+#define DSCL2_RECOUT_START__RECOUT_START_Y__SHIFT 0x10
+#define DSCL2_RECOUT_START__RECOUT_START_X_MASK 0x00001FFFL
+#define DSCL2_RECOUT_START__RECOUT_START_Y_MASK 0x1FFF0000L
+//DSCL2_RECOUT_SIZE
+#define DSCL2_RECOUT_SIZE__RECOUT_WIDTH__SHIFT 0x0
+#define DSCL2_RECOUT_SIZE__RECOUT_HEIGHT__SHIFT 0x10
+#define DSCL2_RECOUT_SIZE__RECOUT_WIDTH_MASK 0x00003FFFL
+#define DSCL2_RECOUT_SIZE__RECOUT_HEIGHT_MASK 0x3FFF0000L
+//DSCL2_MPC_SIZE
+#define DSCL2_MPC_SIZE__MPC_WIDTH__SHIFT 0x0
+#define DSCL2_MPC_SIZE__MPC_HEIGHT__SHIFT 0x10
+#define DSCL2_MPC_SIZE__MPC_WIDTH_MASK 0x00003FFFL
+#define DSCL2_MPC_SIZE__MPC_HEIGHT_MASK 0x3FFF0000L
+//DSCL2_LB_DATA_FORMAT
+#define DSCL2_LB_DATA_FORMAT__INTERLEAVE_EN__SHIFT 0x0
+#define DSCL2_LB_DATA_FORMAT__ALPHA_EN__SHIFT 0x4
+#define DSCL2_LB_DATA_FORMAT__INTERLEAVE_EN_MASK 0x00000001L
+#define DSCL2_LB_DATA_FORMAT__ALPHA_EN_MASK 0x00000010L
+//DSCL2_LB_MEMORY_CTRL
+#define DSCL2_LB_MEMORY_CTRL__MEMORY_CONFIG__SHIFT 0x0
+#define DSCL2_LB_MEMORY_CTRL__LB_MAX_PARTITIONS__SHIFT 0x8
+#define DSCL2_LB_MEMORY_CTRL__LB_NUM_PARTITIONS__SHIFT 0x10
+#define DSCL2_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_C__SHIFT 0x18
+#define DSCL2_LB_MEMORY_CTRL__MEMORY_CONFIG_MASK 0x00000003L
+#define DSCL2_LB_MEMORY_CTRL__LB_MAX_PARTITIONS_MASK 0x00003F00L
+#define DSCL2_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_MASK 0x007F0000L
+#define DSCL2_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_C_MASK 0x7F000000L
+//DSCL2_LB_V_COUNTER
+#define DSCL2_LB_V_COUNTER__V_COUNTER__SHIFT 0x0
+#define DSCL2_LB_V_COUNTER__V_COUNTER_C__SHIFT 0x10
+#define DSCL2_LB_V_COUNTER__V_COUNTER_MASK 0x00001FFFL
+#define DSCL2_LB_V_COUNTER__V_COUNTER_C_MASK 0x1FFF0000L
+//DSCL2_DSCL_MEM_PWR_CTRL
+#define DSCL2_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_FORCE__SHIFT 0x0
+#define DSCL2_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_DIS__SHIFT 0x2
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_FORCE__SHIFT 0x4
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_DIS__SHIFT 0x6
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_FORCE__SHIFT 0x8
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_DIS__SHIFT 0xa
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_FORCE__SHIFT 0xc
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_DIS__SHIFT 0xe
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_FORCE__SHIFT 0x10
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_DIS__SHIFT 0x12
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_FORCE__SHIFT 0x14
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_DIS__SHIFT 0x16
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_FORCE__SHIFT 0x18
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_DIS__SHIFT 0x1a
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_MEM_PWR_MODE__SHIFT 0x1c
+#define DSCL2_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_FORCE_MASK 0x00000003L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_DIS_MASK 0x00000004L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_FORCE_MASK 0x00000030L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_DIS_MASK 0x00000040L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_FORCE_MASK 0x00000300L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_DIS_MASK 0x00000400L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_FORCE_MASK 0x00003000L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_DIS_MASK 0x00004000L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_FORCE_MASK 0x00030000L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_DIS_MASK 0x00040000L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_FORCE_MASK 0x00300000L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_DIS_MASK 0x00400000L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_FORCE_MASK 0x03000000L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_DIS_MASK 0x04000000L
+#define DSCL2_DSCL_MEM_PWR_CTRL__LB_MEM_PWR_MODE_MASK 0x10000000L
+//DSCL2_DSCL_MEM_PWR_STATUS
+#define DSCL2_DSCL_MEM_PWR_STATUS__LUT_MEM_PWR_STATE__SHIFT 0x0
+#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G1_MEM_PWR_STATE__SHIFT 0x2
+#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G2_MEM_PWR_STATE__SHIFT 0x4
+#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G3_MEM_PWR_STATE__SHIFT 0x6
+#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G4_MEM_PWR_STATE__SHIFT 0x8
+#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G5_MEM_PWR_STATE__SHIFT 0xa
+#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G6_MEM_PWR_STATE__SHIFT 0xc
+#define DSCL2_DSCL_MEM_PWR_STATUS__LUT_MEM_PWR_STATE_MASK 0x00000003L
+#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G1_MEM_PWR_STATE_MASK 0x0000000CL
+#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G2_MEM_PWR_STATE_MASK 0x00000030L
+#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G3_MEM_PWR_STATE_MASK 0x000000C0L
+#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G4_MEM_PWR_STATE_MASK 0x00000300L
+#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G5_MEM_PWR_STATE_MASK 0x00000C00L
+#define DSCL2_DSCL_MEM_PWR_STATUS__LB_G6_MEM_PWR_STATE_MASK 0x00003000L
+//DSCL2_OBUF_CONTROL
+#define DSCL2_OBUF_CONTROL__OBUF_BYPASS__SHIFT 0x0
+#define DSCL2_OBUF_CONTROL__OBUF_USE_FULL_BUFFER__SHIFT 0x4
+#define DSCL2_OBUF_CONTROL__OBUF_IS_HALF_RECOUT_WIDTH__SHIFT 0xc
+#define DSCL2_OBUF_CONTROL__OBUF_OUT_HOLD_CNT__SHIFT 0x1c
+#define DSCL2_OBUF_CONTROL__OBUF_BYPASS_MASK 0x00000001L
+#define DSCL2_OBUF_CONTROL__OBUF_USE_FULL_BUFFER_MASK 0x00000010L
+#define DSCL2_OBUF_CONTROL__OBUF_IS_HALF_RECOUT_WIDTH_MASK 0x00001000L
+#define DSCL2_OBUF_CONTROL__OBUF_OUT_HOLD_CNT_MASK 0xF0000000L
+//DSCL2_OBUF_MEM_PWR_CTRL
+#define DSCL2_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_FORCE__SHIFT 0x0
+#define DSCL2_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_DIS__SHIFT 0x2
+#define DSCL2_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_MODE__SHIFT 0x8
+#define DSCL2_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_STATE__SHIFT 0x10
+#define DSCL2_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_FORCE_MASK 0x00000003L
+#define DSCL2_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_DIS_MASK 0x00000004L
+#define DSCL2_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_MODE_MASK 0x00000100L
+#define DSCL2_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_STATE_MASK 0x00030000L
+
+
+// addressBlock: dce_dc_dpp2_dispdec_cm_dispdec
+//CM2_CM_CONTROL
+#define CM2_CM_CONTROL__CM_BYPASS__SHIFT 0x0
+#define CM2_CM_CONTROL__CM_UPDATE_PENDING__SHIFT 0x8
+#define CM2_CM_CONTROL__CM_BYPASS_MASK 0x00000001L
+#define CM2_CM_CONTROL__CM_UPDATE_PENDING_MASK 0x00000100L
+//CM2_CM_ICSC_CONTROL
+#define CM2_CM_ICSC_CONTROL__CM_ICSC_MODE__SHIFT 0x0
+#define CM2_CM_ICSC_CONTROL__CM_ICSC_MODE_MASK 0x00000003L
+//CM2_CM_ICSC_C11_C12
+#define CM2_CM_ICSC_C11_C12__CM_ICSC_C11__SHIFT 0x0
+#define CM2_CM_ICSC_C11_C12__CM_ICSC_C12__SHIFT 0x10
+#define CM2_CM_ICSC_C11_C12__CM_ICSC_C11_MASK 0x0000FFFFL
+#define CM2_CM_ICSC_C11_C12__CM_ICSC_C12_MASK 0xFFFF0000L
+//CM2_CM_ICSC_C13_C14
+#define CM2_CM_ICSC_C13_C14__CM_ICSC_C13__SHIFT 0x0
+#define CM2_CM_ICSC_C13_C14__CM_ICSC_C14__SHIFT 0x10
+#define CM2_CM_ICSC_C13_C14__CM_ICSC_C13_MASK 0x0000FFFFL
+#define CM2_CM_ICSC_C13_C14__CM_ICSC_C14_MASK 0xFFFF0000L
+//CM2_CM_ICSC_C21_C22
+#define CM2_CM_ICSC_C21_C22__CM_ICSC_C21__SHIFT 0x0
+#define CM2_CM_ICSC_C21_C22__CM_ICSC_C22__SHIFT 0x10
+#define CM2_CM_ICSC_C21_C22__CM_ICSC_C21_MASK 0x0000FFFFL
+#define CM2_CM_ICSC_C21_C22__CM_ICSC_C22_MASK 0xFFFF0000L
+//CM2_CM_ICSC_C23_C24
+#define CM2_CM_ICSC_C23_C24__CM_ICSC_C23__SHIFT 0x0
+#define CM2_CM_ICSC_C23_C24__CM_ICSC_C24__SHIFT 0x10
+#define CM2_CM_ICSC_C23_C24__CM_ICSC_C23_MASK 0x0000FFFFL
+#define CM2_CM_ICSC_C23_C24__CM_ICSC_C24_MASK 0xFFFF0000L
+//CM2_CM_ICSC_C31_C32
+#define CM2_CM_ICSC_C31_C32__CM_ICSC_C31__SHIFT 0x0
+#define CM2_CM_ICSC_C31_C32__CM_ICSC_C32__SHIFT 0x10
+#define CM2_CM_ICSC_C31_C32__CM_ICSC_C31_MASK 0x0000FFFFL
+#define CM2_CM_ICSC_C31_C32__CM_ICSC_C32_MASK 0xFFFF0000L
+//CM2_CM_ICSC_C33_C34
+#define CM2_CM_ICSC_C33_C34__CM_ICSC_C33__SHIFT 0x0
+#define CM2_CM_ICSC_C33_C34__CM_ICSC_C34__SHIFT 0x10
+#define CM2_CM_ICSC_C33_C34__CM_ICSC_C33_MASK 0x0000FFFFL
+#define CM2_CM_ICSC_C33_C34__CM_ICSC_C34_MASK 0xFFFF0000L
+//CM2_CM_ICSC_B_C11_C12
+#define CM2_CM_ICSC_B_C11_C12__CM_ICSC_B_C11__SHIFT 0x0
+#define CM2_CM_ICSC_B_C11_C12__CM_ICSC_B_C12__SHIFT 0x10
+#define CM2_CM_ICSC_B_C11_C12__CM_ICSC_B_C11_MASK 0x0000FFFFL
+#define CM2_CM_ICSC_B_C11_C12__CM_ICSC_B_C12_MASK 0xFFFF0000L
+//CM2_CM_ICSC_B_C13_C14
+#define CM2_CM_ICSC_B_C13_C14__CM_ICSC_B_C13__SHIFT 0x0
+#define CM2_CM_ICSC_B_C13_C14__CM_ICSC_B_C14__SHIFT 0x10
+#define CM2_CM_ICSC_B_C13_C14__CM_ICSC_B_C13_MASK 0x0000FFFFL
+#define CM2_CM_ICSC_B_C13_C14__CM_ICSC_B_C14_MASK 0xFFFF0000L
+//CM2_CM_ICSC_B_C21_C22
+#define CM2_CM_ICSC_B_C21_C22__CM_ICSC_B_C21__SHIFT 0x0
+#define CM2_CM_ICSC_B_C21_C22__CM_ICSC_B_C22__SHIFT 0x10
+#define CM2_CM_ICSC_B_C21_C22__CM_ICSC_B_C21_MASK 0x0000FFFFL
+#define CM2_CM_ICSC_B_C21_C22__CM_ICSC_B_C22_MASK 0xFFFF0000L
+//CM2_CM_ICSC_B_C23_C24
+#define CM2_CM_ICSC_B_C23_C24__CM_ICSC_B_C23__SHIFT 0x0
+#define CM2_CM_ICSC_B_C23_C24__CM_ICSC_B_C24__SHIFT 0x10
+#define CM2_CM_ICSC_B_C23_C24__CM_ICSC_B_C23_MASK 0x0000FFFFL
+#define CM2_CM_ICSC_B_C23_C24__CM_ICSC_B_C24_MASK 0xFFFF0000L
+//CM2_CM_ICSC_B_C31_C32
+#define CM2_CM_ICSC_B_C31_C32__CM_ICSC_B_C31__SHIFT 0x0
+#define CM2_CM_ICSC_B_C31_C32__CM_ICSC_B_C32__SHIFT 0x10
+#define CM2_CM_ICSC_B_C31_C32__CM_ICSC_B_C31_MASK 0x0000FFFFL
+#define CM2_CM_ICSC_B_C31_C32__CM_ICSC_B_C32_MASK 0xFFFF0000L
+//CM2_CM_ICSC_B_C33_C34
+#define CM2_CM_ICSC_B_C33_C34__CM_ICSC_B_C33__SHIFT 0x0
+#define CM2_CM_ICSC_B_C33_C34__CM_ICSC_B_C34__SHIFT 0x10
+#define CM2_CM_ICSC_B_C33_C34__CM_ICSC_B_C33_MASK 0x0000FFFFL
+#define CM2_CM_ICSC_B_C33_C34__CM_ICSC_B_C34_MASK 0xFFFF0000L
+//CM2_CM_GAMUT_REMAP_CONTROL
+#define CM2_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE_MASK 0x00000003L
+//CM2_CM_GAMUT_REMAP_C11_C12
+#define CM2_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C11__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C12__SHIFT 0x10
+#define CM2_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C11_MASK 0x0000FFFFL
+#define CM2_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C12_MASK 0xFFFF0000L
+//CM2_CM_GAMUT_REMAP_C13_C14
+#define CM2_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C13__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C14__SHIFT 0x10
+#define CM2_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C13_MASK 0x0000FFFFL
+#define CM2_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C14_MASK 0xFFFF0000L
+//CM2_CM_GAMUT_REMAP_C21_C22
+#define CM2_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C21__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C22__SHIFT 0x10
+#define CM2_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C21_MASK 0x0000FFFFL
+#define CM2_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C22_MASK 0xFFFF0000L
+//CM2_CM_GAMUT_REMAP_C23_C24
+#define CM2_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C23__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C24__SHIFT 0x10
+#define CM2_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C23_MASK 0x0000FFFFL
+#define CM2_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C24_MASK 0xFFFF0000L
+//CM2_CM_GAMUT_REMAP_C31_C32
+#define CM2_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C31__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C32__SHIFT 0x10
+#define CM2_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C31_MASK 0x0000FFFFL
+#define CM2_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C32_MASK 0xFFFF0000L
+//CM2_CM_GAMUT_REMAP_C33_C34
+#define CM2_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C33__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C34__SHIFT 0x10
+#define CM2_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C33_MASK 0x0000FFFFL
+#define CM2_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C34_MASK 0xFFFF0000L
+//CM2_CM_GAMUT_REMAP_B_C11_C12
+#define CM2_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C11__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C12__SHIFT 0x10
+#define CM2_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C11_MASK 0x0000FFFFL
+#define CM2_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C12_MASK 0xFFFF0000L
+//CM2_CM_GAMUT_REMAP_B_C13_C14
+#define CM2_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C13__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C14__SHIFT 0x10
+#define CM2_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C13_MASK 0x0000FFFFL
+#define CM2_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C14_MASK 0xFFFF0000L
+//CM2_CM_GAMUT_REMAP_B_C21_C22
+#define CM2_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C21__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C22__SHIFT 0x10
+#define CM2_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C21_MASK 0x0000FFFFL
+#define CM2_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C22_MASK 0xFFFF0000L
+//CM2_CM_GAMUT_REMAP_B_C23_C24
+#define CM2_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C23__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C24__SHIFT 0x10
+#define CM2_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C23_MASK 0x0000FFFFL
+#define CM2_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C24_MASK 0xFFFF0000L
+//CM2_CM_GAMUT_REMAP_B_C31_C32
+#define CM2_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C31__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C32__SHIFT 0x10
+#define CM2_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C31_MASK 0x0000FFFFL
+#define CM2_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C32_MASK 0xFFFF0000L
+//CM2_CM_GAMUT_REMAP_B_C33_C34
+#define CM2_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C33__SHIFT 0x0
+#define CM2_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C34__SHIFT 0x10
+#define CM2_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C33_MASK 0x0000FFFFL
+#define CM2_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C34_MASK 0xFFFF0000L
+//CM2_CM_BIAS_CR_R
+#define CM2_CM_BIAS_CR_R__CM_BIAS_CR_R__SHIFT 0x0
+#define CM2_CM_BIAS_CR_R__CM_BIAS_CR_R_MASK 0x0000FFFFL
+//CM2_CM_BIAS_Y_G_CB_B
+#define CM2_CM_BIAS_Y_G_CB_B__CM_BIAS_Y_G__SHIFT 0x0
+#define CM2_CM_BIAS_Y_G_CB_B__CM_BIAS_CB_B__SHIFT 0x10
+#define CM2_CM_BIAS_Y_G_CB_B__CM_BIAS_Y_G_MASK 0x0000FFFFL
+#define CM2_CM_BIAS_Y_G_CB_B__CM_BIAS_CB_B_MASK 0xFFFF0000L
+//CM2_CM_DGAM_CONTROL
+#define CM2_CM_DGAM_CONTROL__CM_DGAM_LUT_MODE__SHIFT 0x0
+#define CM2_CM_DGAM_CONTROL__CM_DGAM_LUT_MODE_MASK 0x00000007L
+//CM2_CM_DGAM_LUT_INDEX
+#define CM2_CM_DGAM_LUT_INDEX__CM_DGAM_LUT_INDEX__SHIFT 0x0
+#define CM2_CM_DGAM_LUT_INDEX__CM_DGAM_LUT_INDEX_MASK 0x000001FFL
+//CM2_CM_DGAM_LUT_DATA
+#define CM2_CM_DGAM_LUT_DATA__CM_DGAM_LUT_DATA__SHIFT 0x0
+#define CM2_CM_DGAM_LUT_DATA__CM_DGAM_LUT_DATA_MASK 0x0007FFFFL
+//CM2_CM_DGAM_LUT_WRITE_EN_MASK
+#define CM2_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_LUT_WRITE_EN_MASK__SHIFT 0x0
+#define CM2_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_LUT_WRITE_SEL__SHIFT 0x4
+#define CM2_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_CONFIG_STATUS__SHIFT 0x8
+#define CM2_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_WRITE_LUT_BASE_ONLY__SHIFT 0xc
+#define CM2_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_LUT_WRITE_EN_MASK_MASK 0x00000007L
+#define CM2_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_LUT_WRITE_SEL_MASK 0x00000010L
+#define CM2_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_CONFIG_STATUS_MASK 0x00000700L
+#define CM2_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_WRITE_LUT_BASE_ONLY_MASK 0x00001000L
+//CM2_CM_DGAM_RAMA_START_CNTL_B
+#define CM2_CM_DGAM_RAMA_START_CNTL_B__CM_DGAM_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define CM2_CM_DGAM_RAMA_START_CNTL_B__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM2_CM_DGAM_RAMA_START_CNTL_B__CM_DGAM_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM2_CM_DGAM_RAMA_START_CNTL_B__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM2_CM_DGAM_RAMA_START_CNTL_G
+#define CM2_CM_DGAM_RAMA_START_CNTL_G__CM_DGAM_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define CM2_CM_DGAM_RAMA_START_CNTL_G__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM2_CM_DGAM_RAMA_START_CNTL_G__CM_DGAM_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM2_CM_DGAM_RAMA_START_CNTL_G__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM2_CM_DGAM_RAMA_START_CNTL_R
+#define CM2_CM_DGAM_RAMA_START_CNTL_R__CM_DGAM_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define CM2_CM_DGAM_RAMA_START_CNTL_R__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM2_CM_DGAM_RAMA_START_CNTL_R__CM_DGAM_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM2_CM_DGAM_RAMA_START_CNTL_R__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM2_CM_DGAM_RAMA_SLOPE_CNTL_B
+#define CM2_CM_DGAM_RAMA_SLOPE_CNTL_B__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define CM2_CM_DGAM_RAMA_SLOPE_CNTL_B__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//CM2_CM_DGAM_RAMA_SLOPE_CNTL_G
+#define CM2_CM_DGAM_RAMA_SLOPE_CNTL_G__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define CM2_CM_DGAM_RAMA_SLOPE_CNTL_G__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//CM2_CM_DGAM_RAMA_SLOPE_CNTL_R
+#define CM2_CM_DGAM_RAMA_SLOPE_CNTL_R__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define CM2_CM_DGAM_RAMA_SLOPE_CNTL_R__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//CM2_CM_DGAM_RAMA_END_CNTL1_B
+#define CM2_CM_DGAM_RAMA_END_CNTL1_B__CM_DGAM_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define CM2_CM_DGAM_RAMA_END_CNTL1_B__CM_DGAM_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+//CM2_CM_DGAM_RAMA_END_CNTL2_B
+#define CM2_CM_DGAM_RAMA_END_CNTL2_B__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define CM2_CM_DGAM_RAMA_END_CNTL2_B__CM_DGAM_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define CM2_CM_DGAM_RAMA_END_CNTL2_B__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define CM2_CM_DGAM_RAMA_END_CNTL2_B__CM_DGAM_RAMA_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//CM2_CM_DGAM_RAMA_END_CNTL1_G
+#define CM2_CM_DGAM_RAMA_END_CNTL1_G__CM_DGAM_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define CM2_CM_DGAM_RAMA_END_CNTL1_G__CM_DGAM_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+//CM2_CM_DGAM_RAMA_END_CNTL2_G
+#define CM2_CM_DGAM_RAMA_END_CNTL2_G__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define CM2_CM_DGAM_RAMA_END_CNTL2_G__CM_DGAM_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define CM2_CM_DGAM_RAMA_END_CNTL2_G__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define CM2_CM_DGAM_RAMA_END_CNTL2_G__CM_DGAM_RAMA_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//CM2_CM_DGAM_RAMA_END_CNTL1_R
+#define CM2_CM_DGAM_RAMA_END_CNTL1_R__CM_DGAM_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define CM2_CM_DGAM_RAMA_END_CNTL1_R__CM_DGAM_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+//CM2_CM_DGAM_RAMA_END_CNTL2_R
+#define CM2_CM_DGAM_RAMA_END_CNTL2_R__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define CM2_CM_DGAM_RAMA_END_CNTL2_R__CM_DGAM_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define CM2_CM_DGAM_RAMA_END_CNTL2_R__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define CM2_CM_DGAM_RAMA_END_CNTL2_R__CM_DGAM_RAMA_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//CM2_CM_DGAM_RAMA_REGION_0_1
+#define CM2_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_DGAM_RAMA_REGION_2_3
+#define CM2_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_DGAM_RAMA_REGION_4_5
+#define CM2_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_DGAM_RAMA_REGION_6_7
+#define CM2_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_DGAM_RAMA_REGION_8_9
+#define CM2_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_DGAM_RAMA_REGION_10_11
+#define CM2_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_DGAM_RAMA_REGION_12_13
+#define CM2_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_DGAM_RAMA_REGION_14_15
+#define CM2_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_DGAM_RAMB_START_CNTL_B
+#define CM2_CM_DGAM_RAMB_START_CNTL_B__CM_DGAM_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define CM2_CM_DGAM_RAMB_START_CNTL_B__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM2_CM_DGAM_RAMB_START_CNTL_B__CM_DGAM_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM2_CM_DGAM_RAMB_START_CNTL_B__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM2_CM_DGAM_RAMB_START_CNTL_G
+#define CM2_CM_DGAM_RAMB_START_CNTL_G__CM_DGAM_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define CM2_CM_DGAM_RAMB_START_CNTL_G__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM2_CM_DGAM_RAMB_START_CNTL_G__CM_DGAM_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM2_CM_DGAM_RAMB_START_CNTL_G__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM2_CM_DGAM_RAMB_START_CNTL_R
+#define CM2_CM_DGAM_RAMB_START_CNTL_R__CM_DGAM_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define CM2_CM_DGAM_RAMB_START_CNTL_R__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM2_CM_DGAM_RAMB_START_CNTL_R__CM_DGAM_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM2_CM_DGAM_RAMB_START_CNTL_R__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM2_CM_DGAM_RAMB_SLOPE_CNTL_B
+#define CM2_CM_DGAM_RAMB_SLOPE_CNTL_B__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define CM2_CM_DGAM_RAMB_SLOPE_CNTL_B__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//CM2_CM_DGAM_RAMB_SLOPE_CNTL_G
+#define CM2_CM_DGAM_RAMB_SLOPE_CNTL_G__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define CM2_CM_DGAM_RAMB_SLOPE_CNTL_G__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//CM2_CM_DGAM_RAMB_SLOPE_CNTL_R
+#define CM2_CM_DGAM_RAMB_SLOPE_CNTL_R__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define CM2_CM_DGAM_RAMB_SLOPE_CNTL_R__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//CM2_CM_DGAM_RAMB_END_CNTL1_B
+#define CM2_CM_DGAM_RAMB_END_CNTL1_B__CM_DGAM_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define CM2_CM_DGAM_RAMB_END_CNTL1_B__CM_DGAM_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+//CM2_CM_DGAM_RAMB_END_CNTL2_B
+#define CM2_CM_DGAM_RAMB_END_CNTL2_B__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define CM2_CM_DGAM_RAMB_END_CNTL2_B__CM_DGAM_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define CM2_CM_DGAM_RAMB_END_CNTL2_B__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define CM2_CM_DGAM_RAMB_END_CNTL2_B__CM_DGAM_RAMB_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//CM2_CM_DGAM_RAMB_END_CNTL1_G
+#define CM2_CM_DGAM_RAMB_END_CNTL1_G__CM_DGAM_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define CM2_CM_DGAM_RAMB_END_CNTL1_G__CM_DGAM_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+//CM2_CM_DGAM_RAMB_END_CNTL2_G
+#define CM2_CM_DGAM_RAMB_END_CNTL2_G__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define CM2_CM_DGAM_RAMB_END_CNTL2_G__CM_DGAM_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define CM2_CM_DGAM_RAMB_END_CNTL2_G__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define CM2_CM_DGAM_RAMB_END_CNTL2_G__CM_DGAM_RAMB_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//CM2_CM_DGAM_RAMB_END_CNTL1_R
+#define CM2_CM_DGAM_RAMB_END_CNTL1_R__CM_DGAM_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define CM2_CM_DGAM_RAMB_END_CNTL1_R__CM_DGAM_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+//CM2_CM_DGAM_RAMB_END_CNTL2_R
+#define CM2_CM_DGAM_RAMB_END_CNTL2_R__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define CM2_CM_DGAM_RAMB_END_CNTL2_R__CM_DGAM_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define CM2_CM_DGAM_RAMB_END_CNTL2_R__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define CM2_CM_DGAM_RAMB_END_CNTL2_R__CM_DGAM_RAMB_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//CM2_CM_DGAM_RAMB_REGION_0_1
+#define CM2_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_DGAM_RAMB_REGION_2_3
+#define CM2_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_DGAM_RAMB_REGION_4_5
+#define CM2_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_DGAM_RAMB_REGION_6_7
+#define CM2_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_DGAM_RAMB_REGION_8_9
+#define CM2_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_DGAM_RAMB_REGION_10_11
+#define CM2_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_DGAM_RAMB_REGION_12_13
+#define CM2_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_DGAM_RAMB_REGION_14_15
+#define CM2_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_CONTROL
+#define CM2_CM_BLNDGAM_CONTROL__CM_BLNDGAM_LUT_MODE__SHIFT 0x0
+#define CM2_CM_BLNDGAM_CONTROL__CM_BLNDGAM_LUT_MODE_MASK 0x00000003L
+//CM2_CM_BLNDGAM_LUT_INDEX
+#define CM2_CM_BLNDGAM_LUT_INDEX__CM_BLNDGAM_LUT_INDEX__SHIFT 0x0
+#define CM2_CM_BLNDGAM_LUT_INDEX__CM_BLNDGAM_LUT_INDEX_MASK 0x000001FFL
+//CM2_CM_BLNDGAM_LUT_DATA
+#define CM2_CM_BLNDGAM_LUT_DATA__CM_BLNDGAM_LUT_DATA__SHIFT 0x0
+#define CM2_CM_BLNDGAM_LUT_DATA__CM_BLNDGAM_LUT_DATA_MASK 0x0007FFFFL
+//CM2_CM_BLNDGAM_LUT_WRITE_EN_MASK
+#define CM2_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_LUT_WRITE_EN_MASK__SHIFT 0x0
+#define CM2_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_LUT_WRITE_SEL__SHIFT 0x4
+#define CM2_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_CONFIG_STATUS__SHIFT 0x8
+#define CM2_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_LUT_WRITE_EN_MASK_MASK 0x00000007L
+#define CM2_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_LUT_WRITE_SEL_MASK 0x00000010L
+#define CM2_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_CONFIG_STATUS_MASK 0x00000300L
+//CM2_CM_BLNDGAM_RAMA_START_CNTL_B
+#define CM2_CM_BLNDGAM_RAMA_START_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_START_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM2_CM_BLNDGAM_RAMA_START_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM2_CM_BLNDGAM_RAMA_START_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM2_CM_BLNDGAM_RAMA_START_CNTL_G
+#define CM2_CM_BLNDGAM_RAMA_START_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_START_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM2_CM_BLNDGAM_RAMA_START_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM2_CM_BLNDGAM_RAMA_START_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM2_CM_BLNDGAM_RAMA_START_CNTL_R
+#define CM2_CM_BLNDGAM_RAMA_START_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_START_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM2_CM_BLNDGAM_RAMA_START_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM2_CM_BLNDGAM_RAMA_START_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM2_CM_BLNDGAM_RAMA_SLOPE_CNTL_B
+#define CM2_CM_BLNDGAM_RAMA_SLOPE_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_SLOPE_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//CM2_CM_BLNDGAM_RAMA_SLOPE_CNTL_G
+#define CM2_CM_BLNDGAM_RAMA_SLOPE_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_SLOPE_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//CM2_CM_BLNDGAM_RAMA_SLOPE_CNTL_R
+#define CM2_CM_BLNDGAM_RAMA_SLOPE_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_SLOPE_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//CM2_CM_BLNDGAM_RAMA_END_CNTL1_B
+#define CM2_CM_BLNDGAM_RAMA_END_CNTL1_B__CM_BLNDGAM_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_END_CNTL1_B__CM_BLNDGAM_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+//CM2_CM_BLNDGAM_RAMA_END_CNTL2_B
+#define CM2_CM_BLNDGAM_RAMA_END_CNTL2_B__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_END_CNTL2_B__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMA_END_CNTL2_B__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define CM2_CM_BLNDGAM_RAMA_END_CNTL2_B__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//CM2_CM_BLNDGAM_RAMA_END_CNTL1_G
+#define CM2_CM_BLNDGAM_RAMA_END_CNTL1_G__CM_BLNDGAM_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_END_CNTL1_G__CM_BLNDGAM_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+//CM2_CM_BLNDGAM_RAMA_END_CNTL2_G
+#define CM2_CM_BLNDGAM_RAMA_END_CNTL2_G__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_END_CNTL2_G__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMA_END_CNTL2_G__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define CM2_CM_BLNDGAM_RAMA_END_CNTL2_G__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//CM2_CM_BLNDGAM_RAMA_END_CNTL1_R
+#define CM2_CM_BLNDGAM_RAMA_END_CNTL1_R__CM_BLNDGAM_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_END_CNTL1_R__CM_BLNDGAM_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+//CM2_CM_BLNDGAM_RAMA_END_CNTL2_R
+#define CM2_CM_BLNDGAM_RAMA_END_CNTL2_R__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_END_CNTL2_R__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMA_END_CNTL2_R__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define CM2_CM_BLNDGAM_RAMA_END_CNTL2_R__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//CM2_CM_BLNDGAM_RAMA_REGION_0_1
+#define CM2_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMA_REGION_2_3
+#define CM2_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMA_REGION_4_5
+#define CM2_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMA_REGION_6_7
+#define CM2_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMA_REGION_8_9
+#define CM2_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMA_REGION_10_11
+#define CM2_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMA_REGION_12_13
+#define CM2_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMA_REGION_14_15
+#define CM2_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMA_REGION_16_17
+#define CM2_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMA_REGION_18_19
+#define CM2_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMA_REGION_20_21
+#define CM2_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMA_REGION_22_23
+#define CM2_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMA_REGION_24_25
+#define CM2_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMA_REGION_26_27
+#define CM2_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMA_REGION_28_29
+#define CM2_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMA_REGION_30_31
+#define CM2_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMA_REGION_32_33
+#define CM2_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMB_START_CNTL_B
+#define CM2_CM_BLNDGAM_RAMB_START_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_START_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM2_CM_BLNDGAM_RAMB_START_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM2_CM_BLNDGAM_RAMB_START_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM2_CM_BLNDGAM_RAMB_START_CNTL_G
+#define CM2_CM_BLNDGAM_RAMB_START_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_START_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM2_CM_BLNDGAM_RAMB_START_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM2_CM_BLNDGAM_RAMB_START_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM2_CM_BLNDGAM_RAMB_START_CNTL_R
+#define CM2_CM_BLNDGAM_RAMB_START_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_START_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM2_CM_BLNDGAM_RAMB_START_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM2_CM_BLNDGAM_RAMB_START_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM2_CM_BLNDGAM_RAMB_SLOPE_CNTL_B
+#define CM2_CM_BLNDGAM_RAMB_SLOPE_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_SLOPE_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//CM2_CM_BLNDGAM_RAMB_SLOPE_CNTL_G
+#define CM2_CM_BLNDGAM_RAMB_SLOPE_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_SLOPE_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//CM2_CM_BLNDGAM_RAMB_SLOPE_CNTL_R
+#define CM2_CM_BLNDGAM_RAMB_SLOPE_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_SLOPE_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//CM2_CM_BLNDGAM_RAMB_END_CNTL1_B
+#define CM2_CM_BLNDGAM_RAMB_END_CNTL1_B__CM_BLNDGAM_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_END_CNTL1_B__CM_BLNDGAM_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+//CM2_CM_BLNDGAM_RAMB_END_CNTL2_B
+#define CM2_CM_BLNDGAM_RAMB_END_CNTL2_B__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_END_CNTL2_B__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMB_END_CNTL2_B__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define CM2_CM_BLNDGAM_RAMB_END_CNTL2_B__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//CM2_CM_BLNDGAM_RAMB_END_CNTL1_G
+#define CM2_CM_BLNDGAM_RAMB_END_CNTL1_G__CM_BLNDGAM_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_END_CNTL1_G__CM_BLNDGAM_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+//CM2_CM_BLNDGAM_RAMB_END_CNTL2_G
+#define CM2_CM_BLNDGAM_RAMB_END_CNTL2_G__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_END_CNTL2_G__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMB_END_CNTL2_G__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define CM2_CM_BLNDGAM_RAMB_END_CNTL2_G__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//CM2_CM_BLNDGAM_RAMB_END_CNTL1_R
+#define CM2_CM_BLNDGAM_RAMB_END_CNTL1_R__CM_BLNDGAM_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_END_CNTL1_R__CM_BLNDGAM_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+//CM2_CM_BLNDGAM_RAMB_END_CNTL2_R
+#define CM2_CM_BLNDGAM_RAMB_END_CNTL2_R__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_END_CNTL2_R__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMB_END_CNTL2_R__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define CM2_CM_BLNDGAM_RAMB_END_CNTL2_R__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//CM2_CM_BLNDGAM_RAMB_REGION_0_1
+#define CM2_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMB_REGION_2_3
+#define CM2_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMB_REGION_4_5
+#define CM2_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMB_REGION_6_7
+#define CM2_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMB_REGION_8_9
+#define CM2_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMB_REGION_10_11
+#define CM2_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMB_REGION_12_13
+#define CM2_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMB_REGION_14_15
+#define CM2_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMB_REGION_16_17
+#define CM2_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMB_REGION_18_19
+#define CM2_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMB_REGION_20_21
+#define CM2_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMB_REGION_22_23
+#define CM2_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMB_REGION_24_25
+#define CM2_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMB_REGION_26_27
+#define CM2_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMB_REGION_28_29
+#define CM2_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMB_REGION_30_31
+#define CM2_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_BLNDGAM_RAMB_REGION_32_33
+#define CM2_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_HDR_MULT_COEF
+#define CM2_CM_HDR_MULT_COEF__CM_HDR_MULT_COEF__SHIFT 0x0
+#define CM2_CM_HDR_MULT_COEF__CM_HDR_MULT_COEF_MASK 0x0007FFFFL
+//CM2_CM_MEM_PWR_CTRL
+#define CM2_CM_MEM_PWR_CTRL__SHARED_MEM_PWR_FORCE__SHIFT 0x0
+#define CM2_CM_MEM_PWR_CTRL__SHARED_MEM_PWR_DIS__SHIFT 0x2
+#define CM2_CM_MEM_PWR_CTRL__BLNDGAM_MEM_PWR_FORCE__SHIFT 0x4
+#define CM2_CM_MEM_PWR_CTRL__BLNDGAM_MEM_PWR_DIS__SHIFT 0x6
+#define CM2_CM_MEM_PWR_CTRL__SHARED_MEM_PWR_FORCE_MASK 0x00000003L
+#define CM2_CM_MEM_PWR_CTRL__SHARED_MEM_PWR_DIS_MASK 0x00000004L
+#define CM2_CM_MEM_PWR_CTRL__BLNDGAM_MEM_PWR_FORCE_MASK 0x00000030L
+#define CM2_CM_MEM_PWR_CTRL__BLNDGAM_MEM_PWR_DIS_MASK 0x00000040L
+//CM2_CM_MEM_PWR_STATUS
+#define CM2_CM_MEM_PWR_STATUS__SHARED_MEM_PWR_STATE__SHIFT 0x0
+#define CM2_CM_MEM_PWR_STATUS__BLNDGAM_MEM_PWR_STATE__SHIFT 0x2
+#define CM2_CM_MEM_PWR_STATUS__SHARED_MEM_PWR_STATE_MASK 0x00000003L
+#define CM2_CM_MEM_PWR_STATUS__BLNDGAM_MEM_PWR_STATE_MASK 0x0000000CL
+//CM2_CM_DEALPHA
+#define CM2_CM_DEALPHA__CM_DEALPHA_EN__SHIFT 0x0
+#define CM2_CM_DEALPHA__CM_DEALPHA_EN_MASK 0x00000001L
+//CM2_CM_COEF_FORMAT
+#define CM2_CM_COEF_FORMAT__CM_BIAS_FORMAT__SHIFT 0x0
+#define CM2_CM_COEF_FORMAT__CM_ICSC_COEF_FORMAT__SHIFT 0x4
+#define CM2_CM_COEF_FORMAT__CM_GAMUT_REMAP_COEF_FORMAT__SHIFT 0x8
+#define CM2_CM_COEF_FORMAT__CM_BIAS_FORMAT_MASK 0x00000001L
+#define CM2_CM_COEF_FORMAT__CM_ICSC_COEF_FORMAT_MASK 0x00000010L
+#define CM2_CM_COEF_FORMAT__CM_GAMUT_REMAP_COEF_FORMAT_MASK 0x00000100L
+//CM2_CM_SHAPER_CONTROL
+#define CM2_CM_SHAPER_CONTROL__CM_SHAPER_LUT_MODE__SHIFT 0x0
+#define CM2_CM_SHAPER_CONTROL__CM_SHAPER_LUT_MODE_MASK 0x00000003L
+//CM2_CM_SHAPER_OFFSET_R
+#define CM2_CM_SHAPER_OFFSET_R__CM_SHAPER_OFFSET_R__SHIFT 0x0
+#define CM2_CM_SHAPER_OFFSET_R__CM_SHAPER_OFFSET_R_MASK 0x0007FFFFL
+//CM2_CM_SHAPER_OFFSET_G
+#define CM2_CM_SHAPER_OFFSET_G__CM_SHAPER_OFFSET_G__SHIFT 0x0
+#define CM2_CM_SHAPER_OFFSET_G__CM_SHAPER_OFFSET_G_MASK 0x0007FFFFL
+//CM2_CM_SHAPER_OFFSET_B
+#define CM2_CM_SHAPER_OFFSET_B__CM_SHAPER_OFFSET_B__SHIFT 0x0
+#define CM2_CM_SHAPER_OFFSET_B__CM_SHAPER_OFFSET_B_MASK 0x0007FFFFL
+//CM2_CM_SHAPER_SCALE_R
+#define CM2_CM_SHAPER_SCALE_R__CM_SHAPER_SCALE_R__SHIFT 0x0
+#define CM2_CM_SHAPER_SCALE_R__CM_SHAPER_SCALE_R_MASK 0x0000FFFFL
+//CM2_CM_SHAPER_SCALE_G_B
+#define CM2_CM_SHAPER_SCALE_G_B__CM_SHAPER_SCALE_G__SHIFT 0x0
+#define CM2_CM_SHAPER_SCALE_G_B__CM_SHAPER_SCALE_B__SHIFT 0x10
+#define CM2_CM_SHAPER_SCALE_G_B__CM_SHAPER_SCALE_G_MASK 0x0000FFFFL
+#define CM2_CM_SHAPER_SCALE_G_B__CM_SHAPER_SCALE_B_MASK 0xFFFF0000L
+//CM2_CM_SHAPER_LUT_INDEX
+#define CM2_CM_SHAPER_LUT_INDEX__CM_SHAPER_LUT_INDEX__SHIFT 0x0
+#define CM2_CM_SHAPER_LUT_INDEX__CM_SHAPER_LUT_INDEX_MASK 0x000000FFL
+//CM2_CM_SHAPER_LUT_DATA
+#define CM2_CM_SHAPER_LUT_DATA__CM_SHAPER_LUT_DATA__SHIFT 0x0
+#define CM2_CM_SHAPER_LUT_DATA__CM_SHAPER_LUT_DATA_MASK 0x00FFFFFFL
+//CM2_CM_SHAPER_LUT_WRITE_EN_MASK
+#define CM2_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_LUT_WRITE_EN_MASK__SHIFT 0x0
+#define CM2_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_LUT_WRITE_SEL__SHIFT 0x4
+#define CM2_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_CONFIG_STATUS__SHIFT 0x8
+#define CM2_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_LUT_WRITE_EN_MASK_MASK 0x00000007L
+#define CM2_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_LUT_WRITE_SEL_MASK 0x00000010L
+#define CM2_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_CONFIG_STATUS_MASK 0x00000300L
+//CM2_CM_SHAPER_RAMA_START_CNTL_B
+#define CM2_CM_SHAPER_RAMA_START_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMA_START_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM2_CM_SHAPER_RAMA_START_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM2_CM_SHAPER_RAMA_START_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM2_CM_SHAPER_RAMA_START_CNTL_G
+#define CM2_CM_SHAPER_RAMA_START_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMA_START_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM2_CM_SHAPER_RAMA_START_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM2_CM_SHAPER_RAMA_START_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM2_CM_SHAPER_RAMA_START_CNTL_R
+#define CM2_CM_SHAPER_RAMA_START_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMA_START_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM2_CM_SHAPER_RAMA_START_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM2_CM_SHAPER_RAMA_START_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM2_CM_SHAPER_RAMA_END_CNTL_B
+#define CM2_CM_SHAPER_RAMA_END_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMA_END_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMA_END_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define CM2_CM_SHAPER_RAMA_END_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_END_BASE_B_MASK 0x3FFF0000L
+//CM2_CM_SHAPER_RAMA_END_CNTL_G
+#define CM2_CM_SHAPER_RAMA_END_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMA_END_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMA_END_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define CM2_CM_SHAPER_RAMA_END_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_END_BASE_G_MASK 0x3FFF0000L
+//CM2_CM_SHAPER_RAMA_END_CNTL_R
+#define CM2_CM_SHAPER_RAMA_END_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMA_END_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMA_END_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define CM2_CM_SHAPER_RAMA_END_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_END_BASE_R_MASK 0x3FFF0000L
+//CM2_CM_SHAPER_RAMA_REGION_0_1
+#define CM2_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMA_REGION_2_3
+#define CM2_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMA_REGION_4_5
+#define CM2_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMA_REGION_6_7
+#define CM2_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMA_REGION_8_9
+#define CM2_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMA_REGION_10_11
+#define CM2_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMA_REGION_12_13
+#define CM2_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMA_REGION_14_15
+#define CM2_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMA_REGION_16_17
+#define CM2_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMA_REGION_18_19
+#define CM2_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMA_REGION_20_21
+#define CM2_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMA_REGION_22_23
+#define CM2_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMA_REGION_24_25
+#define CM2_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMA_REGION_26_27
+#define CM2_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMA_REGION_28_29
+#define CM2_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMA_REGION_30_31
+#define CM2_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMA_REGION_32_33
+#define CM2_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMB_START_CNTL_B
+#define CM2_CM_SHAPER_RAMB_START_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMB_START_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM2_CM_SHAPER_RAMB_START_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM2_CM_SHAPER_RAMB_START_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM2_CM_SHAPER_RAMB_START_CNTL_G
+#define CM2_CM_SHAPER_RAMB_START_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMB_START_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM2_CM_SHAPER_RAMB_START_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM2_CM_SHAPER_RAMB_START_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM2_CM_SHAPER_RAMB_START_CNTL_R
+#define CM2_CM_SHAPER_RAMB_START_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMB_START_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM2_CM_SHAPER_RAMB_START_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM2_CM_SHAPER_RAMB_START_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM2_CM_SHAPER_RAMB_END_CNTL_B
+#define CM2_CM_SHAPER_RAMB_END_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMB_END_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMB_END_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define CM2_CM_SHAPER_RAMB_END_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_END_BASE_B_MASK 0x3FFF0000L
+//CM2_CM_SHAPER_RAMB_END_CNTL_G
+#define CM2_CM_SHAPER_RAMB_END_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMB_END_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMB_END_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define CM2_CM_SHAPER_RAMB_END_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_END_BASE_G_MASK 0x3FFF0000L
+//CM2_CM_SHAPER_RAMB_END_CNTL_R
+#define CM2_CM_SHAPER_RAMB_END_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMB_END_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMB_END_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define CM2_CM_SHAPER_RAMB_END_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_END_BASE_R_MASK 0x3FFF0000L
+//CM2_CM_SHAPER_RAMB_REGION_0_1
+#define CM2_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMB_REGION_2_3
+#define CM2_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMB_REGION_4_5
+#define CM2_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMB_REGION_6_7
+#define CM2_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMB_REGION_8_9
+#define CM2_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMB_REGION_10_11
+#define CM2_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMB_REGION_12_13
+#define CM2_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMB_REGION_14_15
+#define CM2_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMB_REGION_16_17
+#define CM2_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMB_REGION_18_19
+#define CM2_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMB_REGION_20_21
+#define CM2_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMB_REGION_22_23
+#define CM2_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMB_REGION_24_25
+#define CM2_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMB_REGION_26_27
+#define CM2_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMB_REGION_28_29
+#define CM2_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMB_REGION_30_31
+#define CM2_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_SHAPER_RAMB_REGION_32_33
+#define CM2_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define CM2_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define CM2_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define CM2_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define CM2_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define CM2_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define CM2_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define CM2_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//CM2_CM_MEM_PWR_CTRL2
+#define CM2_CM_MEM_PWR_CTRL2__SHAPER_MEM_PWR_FORCE__SHIFT 0x8
+#define CM2_CM_MEM_PWR_CTRL2__SHAPER_MEM_PWR_DIS__SHIFT 0xa
+#define CM2_CM_MEM_PWR_CTRL2__HDR3DLUT_MEM_PWR_FORCE__SHIFT 0xc
+#define CM2_CM_MEM_PWR_CTRL2__HDR3DLUT_MEM_PWR_DIS__SHIFT 0xe
+#define CM2_CM_MEM_PWR_CTRL2__SHAPER_MEM_PWR_FORCE_MASK 0x00000300L
+#define CM2_CM_MEM_PWR_CTRL2__SHAPER_MEM_PWR_DIS_MASK 0x00000400L
+#define CM2_CM_MEM_PWR_CTRL2__HDR3DLUT_MEM_PWR_FORCE_MASK 0x00003000L
+#define CM2_CM_MEM_PWR_CTRL2__HDR3DLUT_MEM_PWR_DIS_MASK 0x00004000L
+//CM2_CM_MEM_PWR_STATUS2
+#define CM2_CM_MEM_PWR_STATUS2__SHAPER_MEM_PWR_STATE__SHIFT 0x4
+#define CM2_CM_MEM_PWR_STATUS2__HDR3DLUT_MEM_PWR_STATE__SHIFT 0x6
+#define CM2_CM_MEM_PWR_STATUS2__SHAPER_MEM_PWR_STATE_MASK 0x00000030L
+#define CM2_CM_MEM_PWR_STATUS2__HDR3DLUT_MEM_PWR_STATE_MASK 0x000000C0L
+//CM2_CM_3DLUT_MODE
+#define CM2_CM_3DLUT_MODE__CM_3DLUT_MODE__SHIFT 0x0
+#define CM2_CM_3DLUT_MODE__CM_3DLUT_SIZE__SHIFT 0x4
+#define CM2_CM_3DLUT_MODE__CM_3DLUT_MODE_MASK 0x00000003L
+#define CM2_CM_3DLUT_MODE__CM_3DLUT_SIZE_MASK 0x00000010L
+//CM2_CM_3DLUT_INDEX
+#define CM2_CM_3DLUT_INDEX__CM_3DLUT_INDEX__SHIFT 0x0
+#define CM2_CM_3DLUT_INDEX__CM_3DLUT_INDEX_MASK 0x000007FFL
+//CM2_CM_3DLUT_DATA
+#define CM2_CM_3DLUT_DATA__CM_3DLUT_DATA0__SHIFT 0x0
+#define CM2_CM_3DLUT_DATA__CM_3DLUT_DATA1__SHIFT 0x10
+#define CM2_CM_3DLUT_DATA__CM_3DLUT_DATA0_MASK 0x0000FFFFL
+#define CM2_CM_3DLUT_DATA__CM_3DLUT_DATA1_MASK 0xFFFF0000L
+//CM2_CM_3DLUT_DATA_30BIT
+#define CM2_CM_3DLUT_DATA_30BIT__CM_3DLUT_DATA_30BIT__SHIFT 0x2
+#define CM2_CM_3DLUT_DATA_30BIT__CM_3DLUT_DATA_30BIT_MASK 0xFFFFFFFCL
+//CM2_CM_3DLUT_READ_WRITE_CONTROL
+#define CM2_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_WRITE_EN_MASK__SHIFT 0x0
+#define CM2_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_RAM_SEL__SHIFT 0x4
+#define CM2_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_30BIT_EN__SHIFT 0x8
+#define CM2_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_CONFIG_STATUS__SHIFT 0xc
+#define CM2_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_READ_SEL__SHIFT 0x10
+#define CM2_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_WRITE_EN_MASK_MASK 0x0000000FL
+#define CM2_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_RAM_SEL_MASK 0x00000010L
+#define CM2_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_30BIT_EN_MASK 0x00000100L
+#define CM2_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_CONFIG_STATUS_MASK 0x00003000L
+#define CM2_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_READ_SEL_MASK 0x00030000L
+//CM2_CM_3DLUT_OUT_NORM_FACTOR
+#define CM2_CM_3DLUT_OUT_NORM_FACTOR__CM_3DLUT_OUT_NORM_FACTOR__SHIFT 0x0
+#define CM2_CM_3DLUT_OUT_NORM_FACTOR__CM_3DLUT_OUT_NORM_FACTOR_MASK 0x0000FFFFL
+//CM2_CM_3DLUT_OUT_OFFSET_R
+#define CM2_CM_3DLUT_OUT_OFFSET_R__CM_3DLUT_OUT_OFFSET_R__SHIFT 0x0
+#define CM2_CM_3DLUT_OUT_OFFSET_R__CM_3DLUT_OUT_SCALE_R__SHIFT 0x10
+#define CM2_CM_3DLUT_OUT_OFFSET_R__CM_3DLUT_OUT_OFFSET_R_MASK 0x0000FFFFL
+#define CM2_CM_3DLUT_OUT_OFFSET_R__CM_3DLUT_OUT_SCALE_R_MASK 0xFFFF0000L
+//CM2_CM_3DLUT_OUT_OFFSET_G
+#define CM2_CM_3DLUT_OUT_OFFSET_G__CM_3DLUT_OUT_OFFSET_G__SHIFT 0x0
+#define CM2_CM_3DLUT_OUT_OFFSET_G__CM_3DLUT_OUT_SCALE_G__SHIFT 0x10
+#define CM2_CM_3DLUT_OUT_OFFSET_G__CM_3DLUT_OUT_OFFSET_G_MASK 0x0000FFFFL
+#define CM2_CM_3DLUT_OUT_OFFSET_G__CM_3DLUT_OUT_SCALE_G_MASK 0xFFFF0000L
+//CM2_CM_3DLUT_OUT_OFFSET_B
+#define CM2_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_OFFSET_B__SHIFT 0x0
+#define CM2_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_SCALE_B__SHIFT 0x10
+#define CM2_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_OFFSET_B_MASK 0x0000FFFFL
+#define CM2_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_SCALE_B_MASK 0xFFFF0000L
+
+
+// addressBlock: dce_dc_dpp3_dispdec_dpp_top_dispdec
+//DPP_TOP3_DPP_CONTROL
+#define DPP_TOP3_DPP_CONTROL__DPP_CLOCK_ENABLE__SHIFT 0x4
+#define DPP_TOP3_DPP_CONTROL__DPPCLK_G_GATE_DISABLE__SHIFT 0x8
+#define DPP_TOP3_DPP_CONTROL__DPPCLK_G_DYN_GATE_DISABLE__SHIFT 0xa
+#define DPP_TOP3_DPP_CONTROL__DPPCLK_G_DSCL_GATE_DISABLE__SHIFT 0xc
+#define DPP_TOP3_DPP_CONTROL__DPPCLK_G_DSCL_ALPHA_GATE_DISABLE__SHIFT 0xe
+#define DPP_TOP3_DPP_CONTROL__DPPCLK_R_GATE_DISABLE__SHIFT 0x10
+#define DPP_TOP3_DPP_CONTROL__DISPCLK_R_GATE_DISABLE__SHIFT 0x12
+#define DPP_TOP3_DPP_CONTROL__DISPCLK_G_GATE_DISABLE__SHIFT 0x14
+#define DPP_TOP3_DPP_CONTROL__DPP_CLOCK_ENABLE_MASK 0x00000010L
+#define DPP_TOP3_DPP_CONTROL__DPPCLK_G_GATE_DISABLE_MASK 0x00000100L
+#define DPP_TOP3_DPP_CONTROL__DPPCLK_G_DYN_GATE_DISABLE_MASK 0x00000400L
+#define DPP_TOP3_DPP_CONTROL__DPPCLK_G_DSCL_GATE_DISABLE_MASK 0x00001000L
+#define DPP_TOP3_DPP_CONTROL__DPPCLK_G_DSCL_ALPHA_GATE_DISABLE_MASK 0x00004000L
+#define DPP_TOP3_DPP_CONTROL__DPPCLK_R_GATE_DISABLE_MASK 0x00010000L
+#define DPP_TOP3_DPP_CONTROL__DISPCLK_R_GATE_DISABLE_MASK 0x00040000L
+#define DPP_TOP3_DPP_CONTROL__DISPCLK_G_GATE_DISABLE_MASK 0x00100000L
+//DPP_TOP3_DPP_SOFT_RESET
+#define DPP_TOP3_DPP_SOFT_RESET__CNVC_SOFT_RESET__SHIFT 0x0
+#define DPP_TOP3_DPP_SOFT_RESET__DSCL_SOFT_RESET__SHIFT 0x4
+#define DPP_TOP3_DPP_SOFT_RESET__CM_SOFT_RESET__SHIFT 0x8
+#define DPP_TOP3_DPP_SOFT_RESET__OBUF_SOFT_RESET__SHIFT 0xc
+#define DPP_TOP3_DPP_SOFT_RESET__CNVC_SOFT_RESET_MASK 0x00000001L
+#define DPP_TOP3_DPP_SOFT_RESET__DSCL_SOFT_RESET_MASK 0x00000010L
+#define DPP_TOP3_DPP_SOFT_RESET__CM_SOFT_RESET_MASK 0x00000100L
+#define DPP_TOP3_DPP_SOFT_RESET__OBUF_SOFT_RESET_MASK 0x00001000L
+//DPP_TOP3_DPP_CRC_VAL_R_G
+#define DPP_TOP3_DPP_CRC_VAL_R_G__DPP_CRC_R_CR__SHIFT 0x0
+#define DPP_TOP3_DPP_CRC_VAL_R_G__DPP_CRC_G_Y__SHIFT 0x10
+#define DPP_TOP3_DPP_CRC_VAL_R_G__DPP_CRC_R_CR_MASK 0x0000FFFFL
+#define DPP_TOP3_DPP_CRC_VAL_R_G__DPP_CRC_G_Y_MASK 0xFFFF0000L
+//DPP_TOP3_DPP_CRC_VAL_B_A
+#define DPP_TOP3_DPP_CRC_VAL_B_A__DPP_CRC_B_CB__SHIFT 0x0
+#define DPP_TOP3_DPP_CRC_VAL_B_A__DPP_CRC_ALPHA__SHIFT 0x10
+#define DPP_TOP3_DPP_CRC_VAL_B_A__DPP_CRC_B_CB_MASK 0x0000FFFFL
+#define DPP_TOP3_DPP_CRC_VAL_B_A__DPP_CRC_ALPHA_MASK 0xFFFF0000L
+//DPP_TOP3_DPP_CRC_CTRL
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_EN__SHIFT 0x0
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_CONT_EN__SHIFT 0x1
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_ONE_SHOT_PENDING__SHIFT 0x2
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_420_COMP_SEL__SHIFT 0x3
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_SRC_SEL__SHIFT 0x4
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_CURSOR_BITS_SEL__SHIFT 0x6
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_STEREO_EN__SHIFT 0x7
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_STEREO_MODE__SHIFT 0x8
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_INTERLACE_MODE__SHIFT 0xa
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_PIX_FORMAT_SEL__SHIFT 0xc
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_CURSOR_FORMAT_SEL__SHIFT 0xf
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_MASK__SHIFT 0x10
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_EN_MASK 0x00000001L
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_CONT_EN_MASK 0x00000002L
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_ONE_SHOT_PENDING_MASK 0x00000004L
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_420_COMP_SEL_MASK 0x00000008L
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_SRC_SEL_MASK 0x00000030L
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_CURSOR_BITS_SEL_MASK 0x00000040L
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_STEREO_EN_MASK 0x00000080L
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_STEREO_MODE_MASK 0x00000300L
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_INTERLACE_MODE_MASK 0x00000C00L
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_PIX_FORMAT_SEL_MASK 0x00007000L
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_CURSOR_FORMAT_SEL_MASK 0x00008000L
+#define DPP_TOP3_DPP_CRC_CTRL__DPP_CRC_MASK_MASK 0xFFFF0000L
+//DPP_TOP3_HOST_READ_CONTROL
+#define DPP_TOP3_HOST_READ_CONTROL__HOST_READ_RATE_CONTROL__SHIFT 0x0
+#define DPP_TOP3_HOST_READ_CONTROL__HOST_READ_RATE_CONTROL_MASK 0x000000FFL
+
+
+// addressBlock: dce_dc_dpp3_dispdec_cnvc_cfg_dispdec
+//CNVC_CFG3_CNVC_SURFACE_PIXEL_FORMAT
+#define CNVC_CFG3_CNVC_SURFACE_PIXEL_FORMAT__CNVC_SURFACE_PIXEL_FORMAT__SHIFT 0x0
+#define CNVC_CFG3_CNVC_SURFACE_PIXEL_FORMAT__CNVC_SURFACE_PIXEL_FORMAT_MASK 0x0000007FL
+//CNVC_CFG3_FORMAT_CONTROL
+#define CNVC_CFG3_FORMAT_CONTROL__FORMAT_EXPANSION_MODE__SHIFT 0x0
+#define CNVC_CFG3_FORMAT_CONTROL__FORMAT_CNV16__SHIFT 0x4
+#define CNVC_CFG3_FORMAT_CONTROL__ALPHA_EN__SHIFT 0x8
+#define CNVC_CFG3_FORMAT_CONTROL__CNVC_BYPASS__SHIFT 0xc
+#define CNVC_CFG3_FORMAT_CONTROL__CNVC_BYPASS_MSB_ALIGN__SHIFT 0xd
+#define CNVC_CFG3_FORMAT_CONTROL__CLAMP_POSITIVE__SHIFT 0x10
+#define CNVC_CFG3_FORMAT_CONTROL__CLAMP_POSITIVE_C__SHIFT 0x11
+#define CNVC_CFG3_FORMAT_CONTROL__CNVC_UPDATE_PENDING__SHIFT 0x14
+#define CNVC_CFG3_FORMAT_CONTROL__FORMAT_EXPANSION_MODE_MASK 0x00000001L
+#define CNVC_CFG3_FORMAT_CONTROL__FORMAT_CNV16_MASK 0x00000010L
+#define CNVC_CFG3_FORMAT_CONTROL__ALPHA_EN_MASK 0x00000100L
+#define CNVC_CFG3_FORMAT_CONTROL__CNVC_BYPASS_MASK 0x00001000L
+#define CNVC_CFG3_FORMAT_CONTROL__CNVC_BYPASS_MSB_ALIGN_MASK 0x00002000L
+#define CNVC_CFG3_FORMAT_CONTROL__CLAMP_POSITIVE_MASK 0x00010000L
+#define CNVC_CFG3_FORMAT_CONTROL__CLAMP_POSITIVE_C_MASK 0x00020000L
+#define CNVC_CFG3_FORMAT_CONTROL__CNVC_UPDATE_PENDING_MASK 0x00100000L
+//CNVC_CFG3_FCNV_FP_BIAS_R
+#define CNVC_CFG3_FCNV_FP_BIAS_R__FCNV_FP_BIAS_R__SHIFT 0x0
+#define CNVC_CFG3_FCNV_FP_BIAS_R__FCNV_FP_BIAS_R_MASK 0x0007FFFFL
+//CNVC_CFG3_FCNV_FP_BIAS_G
+#define CNVC_CFG3_FCNV_FP_BIAS_G__FCNV_FP_BIAS_G__SHIFT 0x0
+#define CNVC_CFG3_FCNV_FP_BIAS_G__FCNV_FP_BIAS_G_MASK 0x0007FFFFL
+//CNVC_CFG3_FCNV_FP_BIAS_B
+#define CNVC_CFG3_FCNV_FP_BIAS_B__FCNV_FP_BIAS_B__SHIFT 0x0
+#define CNVC_CFG3_FCNV_FP_BIAS_B__FCNV_FP_BIAS_B_MASK 0x0007FFFFL
+//CNVC_CFG3_FCNV_FP_SCALE_R
+#define CNVC_CFG3_FCNV_FP_SCALE_R__FCNV_FP_SCALE_R__SHIFT 0x0
+#define CNVC_CFG3_FCNV_FP_SCALE_R__FCNV_FP_SCALE_R_MASK 0x0007FFFFL
+//CNVC_CFG3_FCNV_FP_SCALE_G
+#define CNVC_CFG3_FCNV_FP_SCALE_G__FCNV_FP_SCALE_G__SHIFT 0x0
+#define CNVC_CFG3_FCNV_FP_SCALE_G__FCNV_FP_SCALE_G_MASK 0x0007FFFFL
+//CNVC_CFG3_FCNV_FP_SCALE_B
+#define CNVC_CFG3_FCNV_FP_SCALE_B__FCNV_FP_SCALE_B__SHIFT 0x0
+#define CNVC_CFG3_FCNV_FP_SCALE_B__FCNV_FP_SCALE_B_MASK 0x0007FFFFL
+//CNVC_CFG3_COLOR_KEYER_CONTROL
+#define CNVC_CFG3_COLOR_KEYER_CONTROL__COLOR_KEYER_EN__SHIFT 0x0
+#define CNVC_CFG3_COLOR_KEYER_CONTROL__COLOR_KEYER_MODE__SHIFT 0x4
+#define CNVC_CFG3_COLOR_KEYER_CONTROL__COLOR_KEYER_EN_MASK 0x00000001L
+#define CNVC_CFG3_COLOR_KEYER_CONTROL__COLOR_KEYER_MODE_MASK 0x00000030L
+//CNVC_CFG3_COLOR_KEYER_ALPHA
+#define CNVC_CFG3_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_LOW__SHIFT 0x0
+#define CNVC_CFG3_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_HIGH__SHIFT 0x10
+#define CNVC_CFG3_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG3_COLOR_KEYER_ALPHA__COLOR_KEYER_ALPHA_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG3_COLOR_KEYER_RED
+#define CNVC_CFG3_COLOR_KEYER_RED__COLOR_KEYER_RED_LOW__SHIFT 0x0
+#define CNVC_CFG3_COLOR_KEYER_RED__COLOR_KEYER_RED_HIGH__SHIFT 0x10
+#define CNVC_CFG3_COLOR_KEYER_RED__COLOR_KEYER_RED_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG3_COLOR_KEYER_RED__COLOR_KEYER_RED_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG3_COLOR_KEYER_GREEN
+#define CNVC_CFG3_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_LOW__SHIFT 0x0
+#define CNVC_CFG3_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_HIGH__SHIFT 0x10
+#define CNVC_CFG3_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG3_COLOR_KEYER_GREEN__COLOR_KEYER_GREEN_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG3_COLOR_KEYER_BLUE
+#define CNVC_CFG3_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_LOW__SHIFT 0x0
+#define CNVC_CFG3_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_HIGH__SHIFT 0x10
+#define CNVC_CFG3_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_LOW_MASK 0x0000FFFFL
+#define CNVC_CFG3_COLOR_KEYER_BLUE__COLOR_KEYER_BLUE_HIGH_MASK 0xFFFF0000L
+//CNVC_CFG3_ALPHA_2BIT_LUT
+#define CNVC_CFG3_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT0__SHIFT 0x0
+#define CNVC_CFG3_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT1__SHIFT 0x8
+#define CNVC_CFG3_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT2__SHIFT 0x10
+#define CNVC_CFG3_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT3__SHIFT 0x18
+#define CNVC_CFG3_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT0_MASK 0x000000FFL
+#define CNVC_CFG3_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT1_MASK 0x0000FF00L
+#define CNVC_CFG3_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT2_MASK 0x00FF0000L
+#define CNVC_CFG3_ALPHA_2BIT_LUT__ALPHA_2BIT_LUT3_MASK 0xFF000000L
+
+
+// addressBlock: dce_dc_dpp3_dispdec_cnvc_cur_dispdec
+//CNVC_CUR3_CURSOR0_CONTROL
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_ENABLE__SHIFT 0x0
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_EXPANSION_MODE__SHIFT 0x1
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_PIX_INV_MODE__SHIFT 0x2
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_ROM_EN__SHIFT 0x3
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_MODE__SHIFT 0x4
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_PIXEL_ALPHA_MOD_EN__SHIFT 0x7
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_UPDATE_PENDING__SHIFT 0x10
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_ENABLE_MASK 0x00000001L
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_EXPANSION_MODE_MASK 0x00000002L
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_PIX_INV_MODE_MASK 0x00000004L
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_ROM_EN_MASK 0x00000008L
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_MODE_MASK 0x00000070L
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_PIXEL_ALPHA_MOD_EN_MASK 0x00000080L
+#define CNVC_CUR3_CURSOR0_CONTROL__CUR0_UPDATE_PENDING_MASK 0x00010000L
+//CNVC_CUR3_CURSOR0_COLOR0
+#define CNVC_CUR3_CURSOR0_COLOR0__CUR0_COLOR0__SHIFT 0x0
+#define CNVC_CUR3_CURSOR0_COLOR0__CUR0_COLOR0_MASK 0x00FFFFFFL
+//CNVC_CUR3_CURSOR0_COLOR1
+#define CNVC_CUR3_CURSOR0_COLOR1__CUR0_COLOR1__SHIFT 0x0
+#define CNVC_CUR3_CURSOR0_COLOR1__CUR0_COLOR1_MASK 0x00FFFFFFL
+//CNVC_CUR3_CURSOR0_FP_SCALE_BIAS
+#define CNVC_CUR3_CURSOR0_FP_SCALE_BIAS__CUR0_FP_SCALE__SHIFT 0x0
+#define CNVC_CUR3_CURSOR0_FP_SCALE_BIAS__CUR0_FP_BIAS__SHIFT 0x10
+#define CNVC_CUR3_CURSOR0_FP_SCALE_BIAS__CUR0_FP_SCALE_MASK 0x0000FFFFL
+#define CNVC_CUR3_CURSOR0_FP_SCALE_BIAS__CUR0_FP_BIAS_MASK 0xFFFF0000L
+
+
+// addressBlock: dce_dc_dpp3_dispdec_dscl_dispdec
+//DSCL3_SCL_COEF_RAM_TAP_SELECT
+#define DSCL3_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_TAP_PAIR_IDX__SHIFT 0x0
+#define DSCL3_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_PHASE__SHIFT 0x8
+#define DSCL3_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_FILTER_TYPE__SHIFT 0x10
+#define DSCL3_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_TAP_PAIR_IDX_MASK 0x00000003L
+#define DSCL3_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_PHASE_MASK 0x00003F00L
+#define DSCL3_SCL_COEF_RAM_TAP_SELECT__SCL_COEF_RAM_FILTER_TYPE_MASK 0x00070000L
+//DSCL3_SCL_COEF_RAM_TAP_DATA
+#define DSCL3_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF__SHIFT 0x0
+#define DSCL3_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_EN__SHIFT 0xf
+#define DSCL3_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF__SHIFT 0x10
+#define DSCL3_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_EN__SHIFT 0x1f
+#define DSCL3_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_MASK 0x00003FFFL
+#define DSCL3_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_EVEN_TAP_COEF_EN_MASK 0x00008000L
+#define DSCL3_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_MASK 0x3FFF0000L
+#define DSCL3_SCL_COEF_RAM_TAP_DATA__SCL_COEF_RAM_ODD_TAP_COEF_EN_MASK 0x80000000L
+//DSCL3_SCL_MODE
+#define DSCL3_SCL_MODE__DSCL_MODE__SHIFT 0x0
+#define DSCL3_SCL_MODE__SCL_COEF_RAM_SELECT__SHIFT 0x8
+#define DSCL3_SCL_MODE__SCL_COEF_RAM_SELECT_CURRENT__SHIFT 0xc
+#define DSCL3_SCL_MODE__SCL_CHROMA_COEF_MODE__SHIFT 0x10
+#define DSCL3_SCL_MODE__SCL_ALPHA_COEF_MODE__SHIFT 0x14
+#define DSCL3_SCL_MODE__SCL_COEF_RAM_SELECT_RD__SHIFT 0x18
+#define DSCL3_SCL_MODE__DSCL_MODE_MASK 0x00000007L
+#define DSCL3_SCL_MODE__SCL_COEF_RAM_SELECT_MASK 0x00000100L
+#define DSCL3_SCL_MODE__SCL_COEF_RAM_SELECT_CURRENT_MASK 0x00001000L
+#define DSCL3_SCL_MODE__SCL_CHROMA_COEF_MODE_MASK 0x00010000L
+#define DSCL3_SCL_MODE__SCL_ALPHA_COEF_MODE_MASK 0x00100000L
+#define DSCL3_SCL_MODE__SCL_COEF_RAM_SELECT_RD_MASK 0x01000000L
+//DSCL3_SCL_TAP_CONTROL
+#define DSCL3_SCL_TAP_CONTROL__SCL_V_NUM_TAPS__SHIFT 0x0
+#define DSCL3_SCL_TAP_CONTROL__SCL_H_NUM_TAPS__SHIFT 0x4
+#define DSCL3_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_C__SHIFT 0x8
+#define DSCL3_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_C__SHIFT 0xc
+#define DSCL3_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_MASK 0x00000007L
+#define DSCL3_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_MASK 0x00000070L
+#define DSCL3_SCL_TAP_CONTROL__SCL_V_NUM_TAPS_C_MASK 0x00000700L
+#define DSCL3_SCL_TAP_CONTROL__SCL_H_NUM_TAPS_C_MASK 0x00007000L
+//DSCL3_DSCL_CONTROL
+#define DSCL3_DSCL_CONTROL__SCL_BOUNDARY_MODE__SHIFT 0x0
+#define DSCL3_DSCL_CONTROL__SCL_BOUNDARY_MODE_MASK 0x00000001L
+//DSCL3_DSCL_2TAP_CONTROL
+#define DSCL3_DSCL_2TAP_CONTROL__SCL_H_2TAP_HARDCODE_COEF_EN__SHIFT 0x0
+#define DSCL3_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_EN__SHIFT 0x4
+#define DSCL3_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_FACTOR__SHIFT 0x8
+#define DSCL3_DSCL_2TAP_CONTROL__SCL_V_2TAP_HARDCODE_COEF_EN__SHIFT 0x10
+#define DSCL3_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_EN__SHIFT 0x14
+#define DSCL3_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_FACTOR__SHIFT 0x18
+#define DSCL3_DSCL_2TAP_CONTROL__SCL_H_2TAP_HARDCODE_COEF_EN_MASK 0x00000001L
+#define DSCL3_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_EN_MASK 0x00000010L
+#define DSCL3_DSCL_2TAP_CONTROL__SCL_H_2TAP_SHARP_FACTOR_MASK 0x00000700L
+#define DSCL3_DSCL_2TAP_CONTROL__SCL_V_2TAP_HARDCODE_COEF_EN_MASK 0x00010000L
+#define DSCL3_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_EN_MASK 0x00100000L
+#define DSCL3_DSCL_2TAP_CONTROL__SCL_V_2TAP_SHARP_FACTOR_MASK 0x07000000L
+//DSCL3_SCL_MANUAL_REPLICATE_CONTROL
+#define DSCL3_SCL_MANUAL_REPLICATE_CONTROL__SCL_V_MANUAL_REPLICATE_FACTOR__SHIFT 0x0
+#define DSCL3_SCL_MANUAL_REPLICATE_CONTROL__SCL_H_MANUAL_REPLICATE_FACTOR__SHIFT 0x8
+#define DSCL3_SCL_MANUAL_REPLICATE_CONTROL__SCL_V_MANUAL_REPLICATE_FACTOR_MASK 0x0000000FL
+#define DSCL3_SCL_MANUAL_REPLICATE_CONTROL__SCL_H_MANUAL_REPLICATE_FACTOR_MASK 0x00000F00L
+//DSCL3_SCL_HORZ_FILTER_SCALE_RATIO
+#define DSCL3_SCL_HORZ_FILTER_SCALE_RATIO__SCL_H_SCALE_RATIO__SHIFT 0x0
+#define DSCL3_SCL_HORZ_FILTER_SCALE_RATIO__SCL_H_SCALE_RATIO_MASK 0x07FFFFFFL
+//DSCL3_SCL_HORZ_FILTER_INIT
+#define DSCL3_SCL_HORZ_FILTER_INIT__SCL_H_INIT_FRAC__SHIFT 0x0
+#define DSCL3_SCL_HORZ_FILTER_INIT__SCL_H_INIT_INT__SHIFT 0x18
+#define DSCL3_SCL_HORZ_FILTER_INIT__SCL_H_INIT_FRAC_MASK 0x00FFFFFFL
+#define DSCL3_SCL_HORZ_FILTER_INIT__SCL_H_INIT_INT_MASK 0x0F000000L
+//DSCL3_SCL_HORZ_FILTER_SCALE_RATIO_C
+#define DSCL3_SCL_HORZ_FILTER_SCALE_RATIO_C__SCL_H_SCALE_RATIO_C__SHIFT 0x0
+#define DSCL3_SCL_HORZ_FILTER_SCALE_RATIO_C__SCL_H_SCALE_RATIO_C_MASK 0x07FFFFFFL
+//DSCL3_SCL_HORZ_FILTER_INIT_C
+#define DSCL3_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_FRAC_C__SHIFT 0x0
+#define DSCL3_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_INT_C__SHIFT 0x18
+#define DSCL3_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_FRAC_C_MASK 0x00FFFFFFL
+#define DSCL3_SCL_HORZ_FILTER_INIT_C__SCL_H_INIT_INT_C_MASK 0x0F000000L
+//DSCL3_SCL_VERT_FILTER_SCALE_RATIO
+#define DSCL3_SCL_VERT_FILTER_SCALE_RATIO__SCL_V_SCALE_RATIO__SHIFT 0x0
+#define DSCL3_SCL_VERT_FILTER_SCALE_RATIO__SCL_V_SCALE_RATIO_MASK 0x07FFFFFFL
+//DSCL3_SCL_VERT_FILTER_INIT
+#define DSCL3_SCL_VERT_FILTER_INIT__SCL_V_INIT_FRAC__SHIFT 0x0
+#define DSCL3_SCL_VERT_FILTER_INIT__SCL_V_INIT_INT__SHIFT 0x18
+#define DSCL3_SCL_VERT_FILTER_INIT__SCL_V_INIT_FRAC_MASK 0x00FFFFFFL
+#define DSCL3_SCL_VERT_FILTER_INIT__SCL_V_INIT_INT_MASK 0x0F000000L
+//DSCL3_SCL_VERT_FILTER_INIT_BOT
+#define DSCL3_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_FRAC_BOT__SHIFT 0x0
+#define DSCL3_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_INT_BOT__SHIFT 0x18
+#define DSCL3_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_FRAC_BOT_MASK 0x00FFFFFFL
+#define DSCL3_SCL_VERT_FILTER_INIT_BOT__SCL_V_INIT_INT_BOT_MASK 0x0F000000L
+//DSCL3_SCL_VERT_FILTER_SCALE_RATIO_C
+#define DSCL3_SCL_VERT_FILTER_SCALE_RATIO_C__SCL_V_SCALE_RATIO_C__SHIFT 0x0
+#define DSCL3_SCL_VERT_FILTER_SCALE_RATIO_C__SCL_V_SCALE_RATIO_C_MASK 0x07FFFFFFL
+//DSCL3_SCL_VERT_FILTER_INIT_C
+#define DSCL3_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_FRAC_C__SHIFT 0x0
+#define DSCL3_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_INT_C__SHIFT 0x18
+#define DSCL3_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_FRAC_C_MASK 0x00FFFFFFL
+#define DSCL3_SCL_VERT_FILTER_INIT_C__SCL_V_INIT_INT_C_MASK 0x0F000000L
+//DSCL3_SCL_VERT_FILTER_INIT_BOT_C
+#define DSCL3_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_FRAC_BOT_C__SHIFT 0x0
+#define DSCL3_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_INT_BOT_C__SHIFT 0x18
+#define DSCL3_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_FRAC_BOT_C_MASK 0x00FFFFFFL
+#define DSCL3_SCL_VERT_FILTER_INIT_BOT_C__SCL_V_INIT_INT_BOT_C_MASK 0x0F000000L
+//DSCL3_SCL_BLACK_OFFSET
+#define DSCL3_SCL_BLACK_OFFSET__SCL_BLACK_OFFSET_RGB_Y__SHIFT 0x0
+#define DSCL3_SCL_BLACK_OFFSET__SCL_BLACK_OFFSET_CBCR__SHIFT 0x10
+#define DSCL3_SCL_BLACK_OFFSET__SCL_BLACK_OFFSET_RGB_Y_MASK 0x0000FFFFL
+#define DSCL3_SCL_BLACK_OFFSET__SCL_BLACK_OFFSET_CBCR_MASK 0xFFFF0000L
+//DSCL3_DSCL_UPDATE
+#define DSCL3_DSCL_UPDATE__SCL_UPDATE_PENDING__SHIFT 0x0
+#define DSCL3_DSCL_UPDATE__SCL_UPDATE_PENDING_MASK 0x00000001L
+//DSCL3_DSCL_AUTOCAL
+#define DSCL3_DSCL_AUTOCAL__AUTOCAL_MODE__SHIFT 0x0
+#define DSCL3_DSCL_AUTOCAL__AUTOCAL_NUM_PIPE__SHIFT 0x8
+#define DSCL3_DSCL_AUTOCAL__AUTOCAL_PIPE_ID__SHIFT 0xc
+#define DSCL3_DSCL_AUTOCAL__AUTOCAL_MODE_MASK 0x00000003L
+#define DSCL3_DSCL_AUTOCAL__AUTOCAL_NUM_PIPE_MASK 0x00000300L
+#define DSCL3_DSCL_AUTOCAL__AUTOCAL_PIPE_ID_MASK 0x00003000L
+//DSCL3_DSCL_EXT_OVERSCAN_LEFT_RIGHT
+#define DSCL3_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_RIGHT__SHIFT 0x0
+#define DSCL3_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_LEFT__SHIFT 0x10
+#define DSCL3_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_RIGHT_MASK 0x00001FFFL
+#define DSCL3_DSCL_EXT_OVERSCAN_LEFT_RIGHT__EXT_OVERSCAN_LEFT_MASK 0x1FFF0000L
+//DSCL3_DSCL_EXT_OVERSCAN_TOP_BOTTOM
+#define DSCL3_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_BOTTOM__SHIFT 0x0
+#define DSCL3_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_TOP__SHIFT 0x10
+#define DSCL3_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_BOTTOM_MASK 0x00001FFFL
+#define DSCL3_DSCL_EXT_OVERSCAN_TOP_BOTTOM__EXT_OVERSCAN_TOP_MASK 0x1FFF0000L
+//DSCL3_OTG_H_BLANK
+#define DSCL3_OTG_H_BLANK__OTG_H_BLANK_START__SHIFT 0x0
+#define DSCL3_OTG_H_BLANK__OTG_H_BLANK_END__SHIFT 0x10
+#define DSCL3_OTG_H_BLANK__OTG_H_BLANK_START_MASK 0x00003FFFL
+#define DSCL3_OTG_H_BLANK__OTG_H_BLANK_END_MASK 0x3FFF0000L
+//DSCL3_OTG_V_BLANK
+#define DSCL3_OTG_V_BLANK__OTG_V_BLANK_START__SHIFT 0x0
+#define DSCL3_OTG_V_BLANK__OTG_V_BLANK_END__SHIFT 0x10
+#define DSCL3_OTG_V_BLANK__OTG_V_BLANK_START_MASK 0x00003FFFL
+#define DSCL3_OTG_V_BLANK__OTG_V_BLANK_END_MASK 0x3FFF0000L
+//DSCL3_RECOUT_START
+#define DSCL3_RECOUT_START__RECOUT_START_X__SHIFT 0x0
+#define DSCL3_RECOUT_START__RECOUT_START_Y__SHIFT 0x10
+#define DSCL3_RECOUT_START__RECOUT_START_X_MASK 0x00001FFFL
+#define DSCL3_RECOUT_START__RECOUT_START_Y_MASK 0x1FFF0000L
+//DSCL3_RECOUT_SIZE
+#define DSCL3_RECOUT_SIZE__RECOUT_WIDTH__SHIFT 0x0
+#define DSCL3_RECOUT_SIZE__RECOUT_HEIGHT__SHIFT 0x10
+#define DSCL3_RECOUT_SIZE__RECOUT_WIDTH_MASK 0x00003FFFL
+#define DSCL3_RECOUT_SIZE__RECOUT_HEIGHT_MASK 0x3FFF0000L
+//DSCL3_MPC_SIZE
+#define DSCL3_MPC_SIZE__MPC_WIDTH__SHIFT 0x0
+#define DSCL3_MPC_SIZE__MPC_HEIGHT__SHIFT 0x10
+#define DSCL3_MPC_SIZE__MPC_WIDTH_MASK 0x00003FFFL
+#define DSCL3_MPC_SIZE__MPC_HEIGHT_MASK 0x3FFF0000L
+//DSCL3_LB_DATA_FORMAT
+#define DSCL3_LB_DATA_FORMAT__INTERLEAVE_EN__SHIFT 0x0
+#define DSCL3_LB_DATA_FORMAT__ALPHA_EN__SHIFT 0x4
+#define DSCL3_LB_DATA_FORMAT__INTERLEAVE_EN_MASK 0x00000001L
+#define DSCL3_LB_DATA_FORMAT__ALPHA_EN_MASK 0x00000010L
+//DSCL3_LB_MEMORY_CTRL
+#define DSCL3_LB_MEMORY_CTRL__MEMORY_CONFIG__SHIFT 0x0
+#define DSCL3_LB_MEMORY_CTRL__LB_MAX_PARTITIONS__SHIFT 0x8
+#define DSCL3_LB_MEMORY_CTRL__LB_NUM_PARTITIONS__SHIFT 0x10
+#define DSCL3_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_C__SHIFT 0x18
+#define DSCL3_LB_MEMORY_CTRL__MEMORY_CONFIG_MASK 0x00000003L
+#define DSCL3_LB_MEMORY_CTRL__LB_MAX_PARTITIONS_MASK 0x00003F00L
+#define DSCL3_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_MASK 0x007F0000L
+#define DSCL3_LB_MEMORY_CTRL__LB_NUM_PARTITIONS_C_MASK 0x7F000000L
+//DSCL3_LB_V_COUNTER
+#define DSCL3_LB_V_COUNTER__V_COUNTER__SHIFT 0x0
+#define DSCL3_LB_V_COUNTER__V_COUNTER_C__SHIFT 0x10
+#define DSCL3_LB_V_COUNTER__V_COUNTER_MASK 0x00001FFFL
+#define DSCL3_LB_V_COUNTER__V_COUNTER_C_MASK 0x1FFF0000L
+//DSCL3_DSCL_MEM_PWR_CTRL
+#define DSCL3_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_FORCE__SHIFT 0x0
+#define DSCL3_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_DIS__SHIFT 0x2
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_FORCE__SHIFT 0x4
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_DIS__SHIFT 0x6
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_FORCE__SHIFT 0x8
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_DIS__SHIFT 0xa
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_FORCE__SHIFT 0xc
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_DIS__SHIFT 0xe
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_FORCE__SHIFT 0x10
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_DIS__SHIFT 0x12
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_FORCE__SHIFT 0x14
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_DIS__SHIFT 0x16
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_FORCE__SHIFT 0x18
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_DIS__SHIFT 0x1a
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_MEM_PWR_MODE__SHIFT 0x1c
+#define DSCL3_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_FORCE_MASK 0x00000003L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LUT_MEM_PWR_DIS_MASK 0x00000004L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_FORCE_MASK 0x00000030L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G1_MEM_PWR_DIS_MASK 0x00000040L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_FORCE_MASK 0x00000300L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G2_MEM_PWR_DIS_MASK 0x00000400L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_FORCE_MASK 0x00003000L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G3_MEM_PWR_DIS_MASK 0x00004000L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_FORCE_MASK 0x00030000L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G4_MEM_PWR_DIS_MASK 0x00040000L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_FORCE_MASK 0x00300000L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G5_MEM_PWR_DIS_MASK 0x00400000L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_FORCE_MASK 0x03000000L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_G6_MEM_PWR_DIS_MASK 0x04000000L
+#define DSCL3_DSCL_MEM_PWR_CTRL__LB_MEM_PWR_MODE_MASK 0x10000000L
+//DSCL3_DSCL_MEM_PWR_STATUS
+#define DSCL3_DSCL_MEM_PWR_STATUS__LUT_MEM_PWR_STATE__SHIFT 0x0
+#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G1_MEM_PWR_STATE__SHIFT 0x2
+#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G2_MEM_PWR_STATE__SHIFT 0x4
+#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G3_MEM_PWR_STATE__SHIFT 0x6
+#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G4_MEM_PWR_STATE__SHIFT 0x8
+#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G5_MEM_PWR_STATE__SHIFT 0xa
+#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G6_MEM_PWR_STATE__SHIFT 0xc
+#define DSCL3_DSCL_MEM_PWR_STATUS__LUT_MEM_PWR_STATE_MASK 0x00000003L
+#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G1_MEM_PWR_STATE_MASK 0x0000000CL
+#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G2_MEM_PWR_STATE_MASK 0x00000030L
+#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G3_MEM_PWR_STATE_MASK 0x000000C0L
+#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G4_MEM_PWR_STATE_MASK 0x00000300L
+#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G5_MEM_PWR_STATE_MASK 0x00000C00L
+#define DSCL3_DSCL_MEM_PWR_STATUS__LB_G6_MEM_PWR_STATE_MASK 0x00003000L
+//DSCL3_OBUF_CONTROL
+#define DSCL3_OBUF_CONTROL__OBUF_BYPASS__SHIFT 0x0
+#define DSCL3_OBUF_CONTROL__OBUF_USE_FULL_BUFFER__SHIFT 0x4
+#define DSCL3_OBUF_CONTROL__OBUF_IS_HALF_RECOUT_WIDTH__SHIFT 0xc
+#define DSCL3_OBUF_CONTROL__OBUF_OUT_HOLD_CNT__SHIFT 0x1c
+#define DSCL3_OBUF_CONTROL__OBUF_BYPASS_MASK 0x00000001L
+#define DSCL3_OBUF_CONTROL__OBUF_USE_FULL_BUFFER_MASK 0x00000010L
+#define DSCL3_OBUF_CONTROL__OBUF_IS_HALF_RECOUT_WIDTH_MASK 0x00001000L
+#define DSCL3_OBUF_CONTROL__OBUF_OUT_HOLD_CNT_MASK 0xF0000000L
+//DSCL3_OBUF_MEM_PWR_CTRL
+#define DSCL3_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_FORCE__SHIFT 0x0
+#define DSCL3_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_DIS__SHIFT 0x2
+#define DSCL3_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_MODE__SHIFT 0x8
+#define DSCL3_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_STATE__SHIFT 0x10
+#define DSCL3_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_FORCE_MASK 0x00000003L
+#define DSCL3_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_DIS_MASK 0x00000004L
+#define DSCL3_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_MODE_MASK 0x00000100L
+#define DSCL3_OBUF_MEM_PWR_CTRL__OBUF_MEM_PWR_STATE_MASK 0x00030000L
+
+
+// addressBlock: dce_dc_dpp3_dispdec_cm_dispdec
+//CM3_CM_CONTROL
+#define CM3_CM_CONTROL__CM_BYPASS__SHIFT 0x0
+#define CM3_CM_CONTROL__CM_UPDATE_PENDING__SHIFT 0x8
+#define CM3_CM_CONTROL__CM_BYPASS_MASK 0x00000001L
+#define CM3_CM_CONTROL__CM_UPDATE_PENDING_MASK 0x00000100L
+//CM3_CM_ICSC_CONTROL
+#define CM3_CM_ICSC_CONTROL__CM_ICSC_MODE__SHIFT 0x0
+#define CM3_CM_ICSC_CONTROL__CM_ICSC_MODE_MASK 0x00000003L
+//CM3_CM_ICSC_C11_C12
+#define CM3_CM_ICSC_C11_C12__CM_ICSC_C11__SHIFT 0x0
+#define CM3_CM_ICSC_C11_C12__CM_ICSC_C12__SHIFT 0x10
+#define CM3_CM_ICSC_C11_C12__CM_ICSC_C11_MASK 0x0000FFFFL
+#define CM3_CM_ICSC_C11_C12__CM_ICSC_C12_MASK 0xFFFF0000L
+//CM3_CM_ICSC_C13_C14
+#define CM3_CM_ICSC_C13_C14__CM_ICSC_C13__SHIFT 0x0
+#define CM3_CM_ICSC_C13_C14__CM_ICSC_C14__SHIFT 0x10
+#define CM3_CM_ICSC_C13_C14__CM_ICSC_C13_MASK 0x0000FFFFL
+#define CM3_CM_ICSC_C13_C14__CM_ICSC_C14_MASK 0xFFFF0000L
+//CM3_CM_ICSC_C21_C22
+#define CM3_CM_ICSC_C21_C22__CM_ICSC_C21__SHIFT 0x0
+#define CM3_CM_ICSC_C21_C22__CM_ICSC_C22__SHIFT 0x10
+#define CM3_CM_ICSC_C21_C22__CM_ICSC_C21_MASK 0x0000FFFFL
+#define CM3_CM_ICSC_C21_C22__CM_ICSC_C22_MASK 0xFFFF0000L
+//CM3_CM_ICSC_C23_C24
+#define CM3_CM_ICSC_C23_C24__CM_ICSC_C23__SHIFT 0x0
+#define CM3_CM_ICSC_C23_C24__CM_ICSC_C24__SHIFT 0x10
+#define CM3_CM_ICSC_C23_C24__CM_ICSC_C23_MASK 0x0000FFFFL
+#define CM3_CM_ICSC_C23_C24__CM_ICSC_C24_MASK 0xFFFF0000L
+//CM3_CM_ICSC_C31_C32
+#define CM3_CM_ICSC_C31_C32__CM_ICSC_C31__SHIFT 0x0
+#define CM3_CM_ICSC_C31_C32__CM_ICSC_C32__SHIFT 0x10
+#define CM3_CM_ICSC_C31_C32__CM_ICSC_C31_MASK 0x0000FFFFL
+#define CM3_CM_ICSC_C31_C32__CM_ICSC_C32_MASK 0xFFFF0000L
+//CM3_CM_ICSC_C33_C34
+#define CM3_CM_ICSC_C33_C34__CM_ICSC_C33__SHIFT 0x0
+#define CM3_CM_ICSC_C33_C34__CM_ICSC_C34__SHIFT 0x10
+#define CM3_CM_ICSC_C33_C34__CM_ICSC_C33_MASK 0x0000FFFFL
+#define CM3_CM_ICSC_C33_C34__CM_ICSC_C34_MASK 0xFFFF0000L
+//CM3_CM_ICSC_B_C11_C12
+#define CM3_CM_ICSC_B_C11_C12__CM_ICSC_B_C11__SHIFT 0x0
+#define CM3_CM_ICSC_B_C11_C12__CM_ICSC_B_C12__SHIFT 0x10
+#define CM3_CM_ICSC_B_C11_C12__CM_ICSC_B_C11_MASK 0x0000FFFFL
+#define CM3_CM_ICSC_B_C11_C12__CM_ICSC_B_C12_MASK 0xFFFF0000L
+//CM3_CM_ICSC_B_C13_C14
+#define CM3_CM_ICSC_B_C13_C14__CM_ICSC_B_C13__SHIFT 0x0
+#define CM3_CM_ICSC_B_C13_C14__CM_ICSC_B_C14__SHIFT 0x10
+#define CM3_CM_ICSC_B_C13_C14__CM_ICSC_B_C13_MASK 0x0000FFFFL
+#define CM3_CM_ICSC_B_C13_C14__CM_ICSC_B_C14_MASK 0xFFFF0000L
+//CM3_CM_ICSC_B_C21_C22
+#define CM3_CM_ICSC_B_C21_C22__CM_ICSC_B_C21__SHIFT 0x0
+#define CM3_CM_ICSC_B_C21_C22__CM_ICSC_B_C22__SHIFT 0x10
+#define CM3_CM_ICSC_B_C21_C22__CM_ICSC_B_C21_MASK 0x0000FFFFL
+#define CM3_CM_ICSC_B_C21_C22__CM_ICSC_B_C22_MASK 0xFFFF0000L
+//CM3_CM_ICSC_B_C23_C24
+#define CM3_CM_ICSC_B_C23_C24__CM_ICSC_B_C23__SHIFT 0x0
+#define CM3_CM_ICSC_B_C23_C24__CM_ICSC_B_C24__SHIFT 0x10
+#define CM3_CM_ICSC_B_C23_C24__CM_ICSC_B_C23_MASK 0x0000FFFFL
+#define CM3_CM_ICSC_B_C23_C24__CM_ICSC_B_C24_MASK 0xFFFF0000L
+//CM3_CM_ICSC_B_C31_C32
+#define CM3_CM_ICSC_B_C31_C32__CM_ICSC_B_C31__SHIFT 0x0
+#define CM3_CM_ICSC_B_C31_C32__CM_ICSC_B_C32__SHIFT 0x10
+#define CM3_CM_ICSC_B_C31_C32__CM_ICSC_B_C31_MASK 0x0000FFFFL
+#define CM3_CM_ICSC_B_C31_C32__CM_ICSC_B_C32_MASK 0xFFFF0000L
+//CM3_CM_ICSC_B_C33_C34
+#define CM3_CM_ICSC_B_C33_C34__CM_ICSC_B_C33__SHIFT 0x0
+#define CM3_CM_ICSC_B_C33_C34__CM_ICSC_B_C34__SHIFT 0x10
+#define CM3_CM_ICSC_B_C33_C34__CM_ICSC_B_C33_MASK 0x0000FFFFL
+#define CM3_CM_ICSC_B_C33_C34__CM_ICSC_B_C34_MASK 0xFFFF0000L
+//CM3_CM_GAMUT_REMAP_CONTROL
+#define CM3_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_CONTROL__CM_GAMUT_REMAP_MODE_MASK 0x00000003L
+//CM3_CM_GAMUT_REMAP_C11_C12
+#define CM3_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C11__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C12__SHIFT 0x10
+#define CM3_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C11_MASK 0x0000FFFFL
+#define CM3_CM_GAMUT_REMAP_C11_C12__CM_GAMUT_REMAP_C12_MASK 0xFFFF0000L
+//CM3_CM_GAMUT_REMAP_C13_C14
+#define CM3_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C13__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C14__SHIFT 0x10
+#define CM3_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C13_MASK 0x0000FFFFL
+#define CM3_CM_GAMUT_REMAP_C13_C14__CM_GAMUT_REMAP_C14_MASK 0xFFFF0000L
+//CM3_CM_GAMUT_REMAP_C21_C22
+#define CM3_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C21__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C22__SHIFT 0x10
+#define CM3_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C21_MASK 0x0000FFFFL
+#define CM3_CM_GAMUT_REMAP_C21_C22__CM_GAMUT_REMAP_C22_MASK 0xFFFF0000L
+//CM3_CM_GAMUT_REMAP_C23_C24
+#define CM3_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C23__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C24__SHIFT 0x10
+#define CM3_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C23_MASK 0x0000FFFFL
+#define CM3_CM_GAMUT_REMAP_C23_C24__CM_GAMUT_REMAP_C24_MASK 0xFFFF0000L
+//CM3_CM_GAMUT_REMAP_C31_C32
+#define CM3_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C31__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C32__SHIFT 0x10
+#define CM3_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C31_MASK 0x0000FFFFL
+#define CM3_CM_GAMUT_REMAP_C31_C32__CM_GAMUT_REMAP_C32_MASK 0xFFFF0000L
+//CM3_CM_GAMUT_REMAP_C33_C34
+#define CM3_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C33__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C34__SHIFT 0x10
+#define CM3_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C33_MASK 0x0000FFFFL
+#define CM3_CM_GAMUT_REMAP_C33_C34__CM_GAMUT_REMAP_C34_MASK 0xFFFF0000L
+//CM3_CM_GAMUT_REMAP_B_C11_C12
+#define CM3_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C11__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C12__SHIFT 0x10
+#define CM3_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C11_MASK 0x0000FFFFL
+#define CM3_CM_GAMUT_REMAP_B_C11_C12__CM_GAMUT_REMAP_B_C12_MASK 0xFFFF0000L
+//CM3_CM_GAMUT_REMAP_B_C13_C14
+#define CM3_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C13__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C14__SHIFT 0x10
+#define CM3_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C13_MASK 0x0000FFFFL
+#define CM3_CM_GAMUT_REMAP_B_C13_C14__CM_GAMUT_REMAP_B_C14_MASK 0xFFFF0000L
+//CM3_CM_GAMUT_REMAP_B_C21_C22
+#define CM3_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C21__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C22__SHIFT 0x10
+#define CM3_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C21_MASK 0x0000FFFFL
+#define CM3_CM_GAMUT_REMAP_B_C21_C22__CM_GAMUT_REMAP_B_C22_MASK 0xFFFF0000L
+//CM3_CM_GAMUT_REMAP_B_C23_C24
+#define CM3_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C23__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C24__SHIFT 0x10
+#define CM3_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C23_MASK 0x0000FFFFL
+#define CM3_CM_GAMUT_REMAP_B_C23_C24__CM_GAMUT_REMAP_B_C24_MASK 0xFFFF0000L
+//CM3_CM_GAMUT_REMAP_B_C31_C32
+#define CM3_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C31__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C32__SHIFT 0x10
+#define CM3_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C31_MASK 0x0000FFFFL
+#define CM3_CM_GAMUT_REMAP_B_C31_C32__CM_GAMUT_REMAP_B_C32_MASK 0xFFFF0000L
+//CM3_CM_GAMUT_REMAP_B_C33_C34
+#define CM3_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C33__SHIFT 0x0
+#define CM3_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C34__SHIFT 0x10
+#define CM3_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C33_MASK 0x0000FFFFL
+#define CM3_CM_GAMUT_REMAP_B_C33_C34__CM_GAMUT_REMAP_B_C34_MASK 0xFFFF0000L
+//CM3_CM_BIAS_CR_R
+#define CM3_CM_BIAS_CR_R__CM_BIAS_CR_R__SHIFT 0x0
+#define CM3_CM_BIAS_CR_R__CM_BIAS_CR_R_MASK 0x0000FFFFL
+//CM3_CM_BIAS_Y_G_CB_B
+#define CM3_CM_BIAS_Y_G_CB_B__CM_BIAS_Y_G__SHIFT 0x0
+#define CM3_CM_BIAS_Y_G_CB_B__CM_BIAS_CB_B__SHIFT 0x10
+#define CM3_CM_BIAS_Y_G_CB_B__CM_BIAS_Y_G_MASK 0x0000FFFFL
+#define CM3_CM_BIAS_Y_G_CB_B__CM_BIAS_CB_B_MASK 0xFFFF0000L
+//CM3_CM_DGAM_CONTROL
+#define CM3_CM_DGAM_CONTROL__CM_DGAM_LUT_MODE__SHIFT 0x0
+#define CM3_CM_DGAM_CONTROL__CM_DGAM_LUT_MODE_MASK 0x00000007L
+//CM3_CM_DGAM_LUT_INDEX
+#define CM3_CM_DGAM_LUT_INDEX__CM_DGAM_LUT_INDEX__SHIFT 0x0
+#define CM3_CM_DGAM_LUT_INDEX__CM_DGAM_LUT_INDEX_MASK 0x000001FFL
+//CM3_CM_DGAM_LUT_DATA
+#define CM3_CM_DGAM_LUT_DATA__CM_DGAM_LUT_DATA__SHIFT 0x0
+#define CM3_CM_DGAM_LUT_DATA__CM_DGAM_LUT_DATA_MASK 0x0007FFFFL
+//CM3_CM_DGAM_LUT_WRITE_EN_MASK
+#define CM3_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_LUT_WRITE_EN_MASK__SHIFT 0x0
+#define CM3_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_LUT_WRITE_SEL__SHIFT 0x4
+#define CM3_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_CONFIG_STATUS__SHIFT 0x8
+#define CM3_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_WRITE_LUT_BASE_ONLY__SHIFT 0xc
+#define CM3_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_LUT_WRITE_EN_MASK_MASK 0x00000007L
+#define CM3_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_LUT_WRITE_SEL_MASK 0x00000010L
+#define CM3_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_CONFIG_STATUS_MASK 0x00000700L
+#define CM3_CM_DGAM_LUT_WRITE_EN_MASK__CM_DGAM_WRITE_LUT_BASE_ONLY_MASK 0x00001000L
+//CM3_CM_DGAM_RAMA_START_CNTL_B
+#define CM3_CM_DGAM_RAMA_START_CNTL_B__CM_DGAM_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define CM3_CM_DGAM_RAMA_START_CNTL_B__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM3_CM_DGAM_RAMA_START_CNTL_B__CM_DGAM_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM3_CM_DGAM_RAMA_START_CNTL_B__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM3_CM_DGAM_RAMA_START_CNTL_G
+#define CM3_CM_DGAM_RAMA_START_CNTL_G__CM_DGAM_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define CM3_CM_DGAM_RAMA_START_CNTL_G__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM3_CM_DGAM_RAMA_START_CNTL_G__CM_DGAM_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM3_CM_DGAM_RAMA_START_CNTL_G__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM3_CM_DGAM_RAMA_START_CNTL_R
+#define CM3_CM_DGAM_RAMA_START_CNTL_R__CM_DGAM_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define CM3_CM_DGAM_RAMA_START_CNTL_R__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM3_CM_DGAM_RAMA_START_CNTL_R__CM_DGAM_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM3_CM_DGAM_RAMA_START_CNTL_R__CM_DGAM_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM3_CM_DGAM_RAMA_SLOPE_CNTL_B
+#define CM3_CM_DGAM_RAMA_SLOPE_CNTL_B__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define CM3_CM_DGAM_RAMA_SLOPE_CNTL_B__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//CM3_CM_DGAM_RAMA_SLOPE_CNTL_G
+#define CM3_CM_DGAM_RAMA_SLOPE_CNTL_G__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define CM3_CM_DGAM_RAMA_SLOPE_CNTL_G__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//CM3_CM_DGAM_RAMA_SLOPE_CNTL_R
+#define CM3_CM_DGAM_RAMA_SLOPE_CNTL_R__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define CM3_CM_DGAM_RAMA_SLOPE_CNTL_R__CM_DGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//CM3_CM_DGAM_RAMA_END_CNTL1_B
+#define CM3_CM_DGAM_RAMA_END_CNTL1_B__CM_DGAM_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define CM3_CM_DGAM_RAMA_END_CNTL1_B__CM_DGAM_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+//CM3_CM_DGAM_RAMA_END_CNTL2_B
+#define CM3_CM_DGAM_RAMA_END_CNTL2_B__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define CM3_CM_DGAM_RAMA_END_CNTL2_B__CM_DGAM_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define CM3_CM_DGAM_RAMA_END_CNTL2_B__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define CM3_CM_DGAM_RAMA_END_CNTL2_B__CM_DGAM_RAMA_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//CM3_CM_DGAM_RAMA_END_CNTL1_G
+#define CM3_CM_DGAM_RAMA_END_CNTL1_G__CM_DGAM_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define CM3_CM_DGAM_RAMA_END_CNTL1_G__CM_DGAM_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+//CM3_CM_DGAM_RAMA_END_CNTL2_G
+#define CM3_CM_DGAM_RAMA_END_CNTL2_G__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define CM3_CM_DGAM_RAMA_END_CNTL2_G__CM_DGAM_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define CM3_CM_DGAM_RAMA_END_CNTL2_G__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define CM3_CM_DGAM_RAMA_END_CNTL2_G__CM_DGAM_RAMA_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//CM3_CM_DGAM_RAMA_END_CNTL1_R
+#define CM3_CM_DGAM_RAMA_END_CNTL1_R__CM_DGAM_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define CM3_CM_DGAM_RAMA_END_CNTL1_R__CM_DGAM_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+//CM3_CM_DGAM_RAMA_END_CNTL2_R
+#define CM3_CM_DGAM_RAMA_END_CNTL2_R__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define CM3_CM_DGAM_RAMA_END_CNTL2_R__CM_DGAM_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define CM3_CM_DGAM_RAMA_END_CNTL2_R__CM_DGAM_RAMA_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define CM3_CM_DGAM_RAMA_END_CNTL2_R__CM_DGAM_RAMA_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//CM3_CM_DGAM_RAMA_REGION_0_1
+#define CM3_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_DGAM_RAMA_REGION_0_1__CM_DGAM_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_DGAM_RAMA_REGION_2_3
+#define CM3_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_DGAM_RAMA_REGION_2_3__CM_DGAM_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_DGAM_RAMA_REGION_4_5
+#define CM3_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_DGAM_RAMA_REGION_4_5__CM_DGAM_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_DGAM_RAMA_REGION_6_7
+#define CM3_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_DGAM_RAMA_REGION_6_7__CM_DGAM_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_DGAM_RAMA_REGION_8_9
+#define CM3_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_DGAM_RAMA_REGION_8_9__CM_DGAM_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_DGAM_RAMA_REGION_10_11
+#define CM3_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_DGAM_RAMA_REGION_10_11__CM_DGAM_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_DGAM_RAMA_REGION_12_13
+#define CM3_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_DGAM_RAMA_REGION_12_13__CM_DGAM_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_DGAM_RAMA_REGION_14_15
+#define CM3_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_DGAM_RAMA_REGION_14_15__CM_DGAM_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_DGAM_RAMB_START_CNTL_B
+#define CM3_CM_DGAM_RAMB_START_CNTL_B__CM_DGAM_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define CM3_CM_DGAM_RAMB_START_CNTL_B__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM3_CM_DGAM_RAMB_START_CNTL_B__CM_DGAM_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM3_CM_DGAM_RAMB_START_CNTL_B__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM3_CM_DGAM_RAMB_START_CNTL_G
+#define CM3_CM_DGAM_RAMB_START_CNTL_G__CM_DGAM_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define CM3_CM_DGAM_RAMB_START_CNTL_G__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM3_CM_DGAM_RAMB_START_CNTL_G__CM_DGAM_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM3_CM_DGAM_RAMB_START_CNTL_G__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM3_CM_DGAM_RAMB_START_CNTL_R
+#define CM3_CM_DGAM_RAMB_START_CNTL_R__CM_DGAM_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define CM3_CM_DGAM_RAMB_START_CNTL_R__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM3_CM_DGAM_RAMB_START_CNTL_R__CM_DGAM_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM3_CM_DGAM_RAMB_START_CNTL_R__CM_DGAM_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM3_CM_DGAM_RAMB_SLOPE_CNTL_B
+#define CM3_CM_DGAM_RAMB_SLOPE_CNTL_B__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define CM3_CM_DGAM_RAMB_SLOPE_CNTL_B__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//CM3_CM_DGAM_RAMB_SLOPE_CNTL_G
+#define CM3_CM_DGAM_RAMB_SLOPE_CNTL_G__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define CM3_CM_DGAM_RAMB_SLOPE_CNTL_G__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//CM3_CM_DGAM_RAMB_SLOPE_CNTL_R
+#define CM3_CM_DGAM_RAMB_SLOPE_CNTL_R__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define CM3_CM_DGAM_RAMB_SLOPE_CNTL_R__CM_DGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//CM3_CM_DGAM_RAMB_END_CNTL1_B
+#define CM3_CM_DGAM_RAMB_END_CNTL1_B__CM_DGAM_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define CM3_CM_DGAM_RAMB_END_CNTL1_B__CM_DGAM_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+//CM3_CM_DGAM_RAMB_END_CNTL2_B
+#define CM3_CM_DGAM_RAMB_END_CNTL2_B__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define CM3_CM_DGAM_RAMB_END_CNTL2_B__CM_DGAM_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define CM3_CM_DGAM_RAMB_END_CNTL2_B__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define CM3_CM_DGAM_RAMB_END_CNTL2_B__CM_DGAM_RAMB_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//CM3_CM_DGAM_RAMB_END_CNTL1_G
+#define CM3_CM_DGAM_RAMB_END_CNTL1_G__CM_DGAM_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define CM3_CM_DGAM_RAMB_END_CNTL1_G__CM_DGAM_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+//CM3_CM_DGAM_RAMB_END_CNTL2_G
+#define CM3_CM_DGAM_RAMB_END_CNTL2_G__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define CM3_CM_DGAM_RAMB_END_CNTL2_G__CM_DGAM_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define CM3_CM_DGAM_RAMB_END_CNTL2_G__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define CM3_CM_DGAM_RAMB_END_CNTL2_G__CM_DGAM_RAMB_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//CM3_CM_DGAM_RAMB_END_CNTL1_R
+#define CM3_CM_DGAM_RAMB_END_CNTL1_R__CM_DGAM_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define CM3_CM_DGAM_RAMB_END_CNTL1_R__CM_DGAM_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+//CM3_CM_DGAM_RAMB_END_CNTL2_R
+#define CM3_CM_DGAM_RAMB_END_CNTL2_R__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define CM3_CM_DGAM_RAMB_END_CNTL2_R__CM_DGAM_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define CM3_CM_DGAM_RAMB_END_CNTL2_R__CM_DGAM_RAMB_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define CM3_CM_DGAM_RAMB_END_CNTL2_R__CM_DGAM_RAMB_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//CM3_CM_DGAM_RAMB_REGION_0_1
+#define CM3_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_DGAM_RAMB_REGION_0_1__CM_DGAM_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_DGAM_RAMB_REGION_2_3
+#define CM3_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_DGAM_RAMB_REGION_2_3__CM_DGAM_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_DGAM_RAMB_REGION_4_5
+#define CM3_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_DGAM_RAMB_REGION_4_5__CM_DGAM_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_DGAM_RAMB_REGION_6_7
+#define CM3_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_DGAM_RAMB_REGION_6_7__CM_DGAM_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_DGAM_RAMB_REGION_8_9
+#define CM3_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_DGAM_RAMB_REGION_8_9__CM_DGAM_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_DGAM_RAMB_REGION_10_11
+#define CM3_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_DGAM_RAMB_REGION_10_11__CM_DGAM_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_DGAM_RAMB_REGION_12_13
+#define CM3_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_DGAM_RAMB_REGION_12_13__CM_DGAM_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_DGAM_RAMB_REGION_14_15
+#define CM3_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_DGAM_RAMB_REGION_14_15__CM_DGAM_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_CONTROL
+#define CM3_CM_BLNDGAM_CONTROL__CM_BLNDGAM_LUT_MODE__SHIFT 0x0
+#define CM3_CM_BLNDGAM_CONTROL__CM_BLNDGAM_LUT_MODE_MASK 0x00000003L
+//CM3_CM_BLNDGAM_LUT_INDEX
+#define CM3_CM_BLNDGAM_LUT_INDEX__CM_BLNDGAM_LUT_INDEX__SHIFT 0x0
+#define CM3_CM_BLNDGAM_LUT_INDEX__CM_BLNDGAM_LUT_INDEX_MASK 0x000001FFL
+//CM3_CM_BLNDGAM_LUT_DATA
+#define CM3_CM_BLNDGAM_LUT_DATA__CM_BLNDGAM_LUT_DATA__SHIFT 0x0
+#define CM3_CM_BLNDGAM_LUT_DATA__CM_BLNDGAM_LUT_DATA_MASK 0x0007FFFFL
+//CM3_CM_BLNDGAM_LUT_WRITE_EN_MASK
+#define CM3_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_LUT_WRITE_EN_MASK__SHIFT 0x0
+#define CM3_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_LUT_WRITE_SEL__SHIFT 0x4
+#define CM3_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_CONFIG_STATUS__SHIFT 0x8
+#define CM3_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_LUT_WRITE_EN_MASK_MASK 0x00000007L
+#define CM3_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_LUT_WRITE_SEL_MASK 0x00000010L
+#define CM3_CM_BLNDGAM_LUT_WRITE_EN_MASK__CM_BLNDGAM_CONFIG_STATUS_MASK 0x00000300L
+//CM3_CM_BLNDGAM_RAMA_START_CNTL_B
+#define CM3_CM_BLNDGAM_RAMA_START_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_START_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM3_CM_BLNDGAM_RAMA_START_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM3_CM_BLNDGAM_RAMA_START_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM3_CM_BLNDGAM_RAMA_START_CNTL_G
+#define CM3_CM_BLNDGAM_RAMA_START_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_START_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM3_CM_BLNDGAM_RAMA_START_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM3_CM_BLNDGAM_RAMA_START_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM3_CM_BLNDGAM_RAMA_START_CNTL_R
+#define CM3_CM_BLNDGAM_RAMA_START_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_START_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM3_CM_BLNDGAM_RAMA_START_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM3_CM_BLNDGAM_RAMA_START_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM3_CM_BLNDGAM_RAMA_SLOPE_CNTL_B
+#define CM3_CM_BLNDGAM_RAMA_SLOPE_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_SLOPE_CNTL_B__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//CM3_CM_BLNDGAM_RAMA_SLOPE_CNTL_G
+#define CM3_CM_BLNDGAM_RAMA_SLOPE_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_SLOPE_CNTL_G__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//CM3_CM_BLNDGAM_RAMA_SLOPE_CNTL_R
+#define CM3_CM_BLNDGAM_RAMA_SLOPE_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_SLOPE_CNTL_R__CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//CM3_CM_BLNDGAM_RAMA_END_CNTL1_B
+#define CM3_CM_BLNDGAM_RAMA_END_CNTL1_B__CM_BLNDGAM_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_END_CNTL1_B__CM_BLNDGAM_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+//CM3_CM_BLNDGAM_RAMA_END_CNTL2_B
+#define CM3_CM_BLNDGAM_RAMA_END_CNTL2_B__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_END_CNTL2_B__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMA_END_CNTL2_B__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define CM3_CM_BLNDGAM_RAMA_END_CNTL2_B__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//CM3_CM_BLNDGAM_RAMA_END_CNTL1_G
+#define CM3_CM_BLNDGAM_RAMA_END_CNTL1_G__CM_BLNDGAM_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_END_CNTL1_G__CM_BLNDGAM_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+//CM3_CM_BLNDGAM_RAMA_END_CNTL2_G
+#define CM3_CM_BLNDGAM_RAMA_END_CNTL2_G__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_END_CNTL2_G__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMA_END_CNTL2_G__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define CM3_CM_BLNDGAM_RAMA_END_CNTL2_G__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//CM3_CM_BLNDGAM_RAMA_END_CNTL1_R
+#define CM3_CM_BLNDGAM_RAMA_END_CNTL1_R__CM_BLNDGAM_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_END_CNTL1_R__CM_BLNDGAM_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+//CM3_CM_BLNDGAM_RAMA_END_CNTL2_R
+#define CM3_CM_BLNDGAM_RAMA_END_CNTL2_R__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_END_CNTL2_R__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMA_END_CNTL2_R__CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define CM3_CM_BLNDGAM_RAMA_END_CNTL2_R__CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//CM3_CM_BLNDGAM_RAMA_REGION_0_1
+#define CM3_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_0_1__CM_BLNDGAM_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMA_REGION_2_3
+#define CM3_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_2_3__CM_BLNDGAM_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMA_REGION_4_5
+#define CM3_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_4_5__CM_BLNDGAM_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMA_REGION_6_7
+#define CM3_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_6_7__CM_BLNDGAM_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMA_REGION_8_9
+#define CM3_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_8_9__CM_BLNDGAM_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMA_REGION_10_11
+#define CM3_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_10_11__CM_BLNDGAM_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMA_REGION_12_13
+#define CM3_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_12_13__CM_BLNDGAM_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMA_REGION_14_15
+#define CM3_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_14_15__CM_BLNDGAM_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMA_REGION_16_17
+#define CM3_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_16_17__CM_BLNDGAM_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMA_REGION_18_19
+#define CM3_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_18_19__CM_BLNDGAM_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMA_REGION_20_21
+#define CM3_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_20_21__CM_BLNDGAM_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMA_REGION_22_23
+#define CM3_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_22_23__CM_BLNDGAM_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMA_REGION_24_25
+#define CM3_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_24_25__CM_BLNDGAM_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMA_REGION_26_27
+#define CM3_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_26_27__CM_BLNDGAM_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMA_REGION_28_29
+#define CM3_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_28_29__CM_BLNDGAM_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMA_REGION_30_31
+#define CM3_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_30_31__CM_BLNDGAM_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMA_REGION_32_33
+#define CM3_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMA_REGION_32_33__CM_BLNDGAM_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMB_START_CNTL_B
+#define CM3_CM_BLNDGAM_RAMB_START_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_START_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM3_CM_BLNDGAM_RAMB_START_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM3_CM_BLNDGAM_RAMB_START_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM3_CM_BLNDGAM_RAMB_START_CNTL_G
+#define CM3_CM_BLNDGAM_RAMB_START_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_START_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM3_CM_BLNDGAM_RAMB_START_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM3_CM_BLNDGAM_RAMB_START_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM3_CM_BLNDGAM_RAMB_START_CNTL_R
+#define CM3_CM_BLNDGAM_RAMB_START_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_START_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM3_CM_BLNDGAM_RAMB_START_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM3_CM_BLNDGAM_RAMB_START_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM3_CM_BLNDGAM_RAMB_SLOPE_CNTL_B
+#define CM3_CM_BLNDGAM_RAMB_SLOPE_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_SLOPE_CNTL_B__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//CM3_CM_BLNDGAM_RAMB_SLOPE_CNTL_G
+#define CM3_CM_BLNDGAM_RAMB_SLOPE_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_SLOPE_CNTL_G__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//CM3_CM_BLNDGAM_RAMB_SLOPE_CNTL_R
+#define CM3_CM_BLNDGAM_RAMB_SLOPE_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_SLOPE_CNTL_R__CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//CM3_CM_BLNDGAM_RAMB_END_CNTL1_B
+#define CM3_CM_BLNDGAM_RAMB_END_CNTL1_B__CM_BLNDGAM_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_END_CNTL1_B__CM_BLNDGAM_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+//CM3_CM_BLNDGAM_RAMB_END_CNTL2_B
+#define CM3_CM_BLNDGAM_RAMB_END_CNTL2_B__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_END_CNTL2_B__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMB_END_CNTL2_B__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define CM3_CM_BLNDGAM_RAMB_END_CNTL2_B__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//CM3_CM_BLNDGAM_RAMB_END_CNTL1_G
+#define CM3_CM_BLNDGAM_RAMB_END_CNTL1_G__CM_BLNDGAM_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_END_CNTL1_G__CM_BLNDGAM_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+//CM3_CM_BLNDGAM_RAMB_END_CNTL2_G
+#define CM3_CM_BLNDGAM_RAMB_END_CNTL2_G__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_END_CNTL2_G__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMB_END_CNTL2_G__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define CM3_CM_BLNDGAM_RAMB_END_CNTL2_G__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//CM3_CM_BLNDGAM_RAMB_END_CNTL1_R
+#define CM3_CM_BLNDGAM_RAMB_END_CNTL1_R__CM_BLNDGAM_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_END_CNTL1_R__CM_BLNDGAM_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+//CM3_CM_BLNDGAM_RAMB_END_CNTL2_R
+#define CM3_CM_BLNDGAM_RAMB_END_CNTL2_R__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_END_CNTL2_R__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMB_END_CNTL2_R__CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define CM3_CM_BLNDGAM_RAMB_END_CNTL2_R__CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//CM3_CM_BLNDGAM_RAMB_REGION_0_1
+#define CM3_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_0_1__CM_BLNDGAM_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMB_REGION_2_3
+#define CM3_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_2_3__CM_BLNDGAM_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMB_REGION_4_5
+#define CM3_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_4_5__CM_BLNDGAM_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMB_REGION_6_7
+#define CM3_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_6_7__CM_BLNDGAM_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMB_REGION_8_9
+#define CM3_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_8_9__CM_BLNDGAM_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMB_REGION_10_11
+#define CM3_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_10_11__CM_BLNDGAM_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMB_REGION_12_13
+#define CM3_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_12_13__CM_BLNDGAM_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMB_REGION_14_15
+#define CM3_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_14_15__CM_BLNDGAM_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMB_REGION_16_17
+#define CM3_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_16_17__CM_BLNDGAM_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMB_REGION_18_19
+#define CM3_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_18_19__CM_BLNDGAM_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMB_REGION_20_21
+#define CM3_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_20_21__CM_BLNDGAM_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMB_REGION_22_23
+#define CM3_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_22_23__CM_BLNDGAM_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMB_REGION_24_25
+#define CM3_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_24_25__CM_BLNDGAM_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMB_REGION_26_27
+#define CM3_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_26_27__CM_BLNDGAM_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMB_REGION_28_29
+#define CM3_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_28_29__CM_BLNDGAM_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMB_REGION_30_31
+#define CM3_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_30_31__CM_BLNDGAM_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_BLNDGAM_RAMB_REGION_32_33
+#define CM3_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_BLNDGAM_RAMB_REGION_32_33__CM_BLNDGAM_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_HDR_MULT_COEF
+#define CM3_CM_HDR_MULT_COEF__CM_HDR_MULT_COEF__SHIFT 0x0
+#define CM3_CM_HDR_MULT_COEF__CM_HDR_MULT_COEF_MASK 0x0007FFFFL
+//CM3_CM_MEM_PWR_CTRL
+#define CM3_CM_MEM_PWR_CTRL__SHARED_MEM_PWR_FORCE__SHIFT 0x0
+#define CM3_CM_MEM_PWR_CTRL__SHARED_MEM_PWR_DIS__SHIFT 0x2
+#define CM3_CM_MEM_PWR_CTRL__BLNDGAM_MEM_PWR_FORCE__SHIFT 0x4
+#define CM3_CM_MEM_PWR_CTRL__BLNDGAM_MEM_PWR_DIS__SHIFT 0x6
+#define CM3_CM_MEM_PWR_CTRL__SHARED_MEM_PWR_FORCE_MASK 0x00000003L
+#define CM3_CM_MEM_PWR_CTRL__SHARED_MEM_PWR_DIS_MASK 0x00000004L
+#define CM3_CM_MEM_PWR_CTRL__BLNDGAM_MEM_PWR_FORCE_MASK 0x00000030L
+#define CM3_CM_MEM_PWR_CTRL__BLNDGAM_MEM_PWR_DIS_MASK 0x00000040L
+//CM3_CM_MEM_PWR_STATUS
+#define CM3_CM_MEM_PWR_STATUS__SHARED_MEM_PWR_STATE__SHIFT 0x0
+#define CM3_CM_MEM_PWR_STATUS__BLNDGAM_MEM_PWR_STATE__SHIFT 0x2
+#define CM3_CM_MEM_PWR_STATUS__SHARED_MEM_PWR_STATE_MASK 0x00000003L
+#define CM3_CM_MEM_PWR_STATUS__BLNDGAM_MEM_PWR_STATE_MASK 0x0000000CL
+//CM3_CM_DEALPHA
+#define CM3_CM_DEALPHA__CM_DEALPHA_EN__SHIFT 0x0
+#define CM3_CM_DEALPHA__CM_DEALPHA_EN_MASK 0x00000001L
+//CM3_CM_COEF_FORMAT
+#define CM3_CM_COEF_FORMAT__CM_BIAS_FORMAT__SHIFT 0x0
+#define CM3_CM_COEF_FORMAT__CM_ICSC_COEF_FORMAT__SHIFT 0x4
+#define CM3_CM_COEF_FORMAT__CM_GAMUT_REMAP_COEF_FORMAT__SHIFT 0x8
+#define CM3_CM_COEF_FORMAT__CM_BIAS_FORMAT_MASK 0x00000001L
+#define CM3_CM_COEF_FORMAT__CM_ICSC_COEF_FORMAT_MASK 0x00000010L
+#define CM3_CM_COEF_FORMAT__CM_GAMUT_REMAP_COEF_FORMAT_MASK 0x00000100L
+//CM3_CM_SHAPER_CONTROL
+#define CM3_CM_SHAPER_CONTROL__CM_SHAPER_LUT_MODE__SHIFT 0x0
+#define CM3_CM_SHAPER_CONTROL__CM_SHAPER_LUT_MODE_MASK 0x00000003L
+//CM3_CM_SHAPER_OFFSET_R
+#define CM3_CM_SHAPER_OFFSET_R__CM_SHAPER_OFFSET_R__SHIFT 0x0
+#define CM3_CM_SHAPER_OFFSET_R__CM_SHAPER_OFFSET_R_MASK 0x0007FFFFL
+//CM3_CM_SHAPER_OFFSET_G
+#define CM3_CM_SHAPER_OFFSET_G__CM_SHAPER_OFFSET_G__SHIFT 0x0
+#define CM3_CM_SHAPER_OFFSET_G__CM_SHAPER_OFFSET_G_MASK 0x0007FFFFL
+//CM3_CM_SHAPER_OFFSET_B
+#define CM3_CM_SHAPER_OFFSET_B__CM_SHAPER_OFFSET_B__SHIFT 0x0
+#define CM3_CM_SHAPER_OFFSET_B__CM_SHAPER_OFFSET_B_MASK 0x0007FFFFL
+//CM3_CM_SHAPER_SCALE_R
+#define CM3_CM_SHAPER_SCALE_R__CM_SHAPER_SCALE_R__SHIFT 0x0
+#define CM3_CM_SHAPER_SCALE_R__CM_SHAPER_SCALE_R_MASK 0x0000FFFFL
+//CM3_CM_SHAPER_SCALE_G_B
+#define CM3_CM_SHAPER_SCALE_G_B__CM_SHAPER_SCALE_G__SHIFT 0x0
+#define CM3_CM_SHAPER_SCALE_G_B__CM_SHAPER_SCALE_B__SHIFT 0x10
+#define CM3_CM_SHAPER_SCALE_G_B__CM_SHAPER_SCALE_G_MASK 0x0000FFFFL
+#define CM3_CM_SHAPER_SCALE_G_B__CM_SHAPER_SCALE_B_MASK 0xFFFF0000L
+//CM3_CM_SHAPER_LUT_INDEX
+#define CM3_CM_SHAPER_LUT_INDEX__CM_SHAPER_LUT_INDEX__SHIFT 0x0
+#define CM3_CM_SHAPER_LUT_INDEX__CM_SHAPER_LUT_INDEX_MASK 0x000000FFL
+//CM3_CM_SHAPER_LUT_DATA
+#define CM3_CM_SHAPER_LUT_DATA__CM_SHAPER_LUT_DATA__SHIFT 0x0
+#define CM3_CM_SHAPER_LUT_DATA__CM_SHAPER_LUT_DATA_MASK 0x00FFFFFFL
+//CM3_CM_SHAPER_LUT_WRITE_EN_MASK
+#define CM3_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_LUT_WRITE_EN_MASK__SHIFT 0x0
+#define CM3_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_LUT_WRITE_SEL__SHIFT 0x4
+#define CM3_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_CONFIG_STATUS__SHIFT 0x8
+#define CM3_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_LUT_WRITE_EN_MASK_MASK 0x00000007L
+#define CM3_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_LUT_WRITE_SEL_MASK 0x00000010L
+#define CM3_CM_SHAPER_LUT_WRITE_EN_MASK__CM_SHAPER_CONFIG_STATUS_MASK 0x00000300L
+//CM3_CM_SHAPER_RAMA_START_CNTL_B
+#define CM3_CM_SHAPER_RAMA_START_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMA_START_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM3_CM_SHAPER_RAMA_START_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM3_CM_SHAPER_RAMA_START_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM3_CM_SHAPER_RAMA_START_CNTL_G
+#define CM3_CM_SHAPER_RAMA_START_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMA_START_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM3_CM_SHAPER_RAMA_START_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM3_CM_SHAPER_RAMA_START_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM3_CM_SHAPER_RAMA_START_CNTL_R
+#define CM3_CM_SHAPER_RAMA_START_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMA_START_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM3_CM_SHAPER_RAMA_START_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM3_CM_SHAPER_RAMA_START_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM3_CM_SHAPER_RAMA_END_CNTL_B
+#define CM3_CM_SHAPER_RAMA_END_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMA_END_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMA_END_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define CM3_CM_SHAPER_RAMA_END_CNTL_B__CM_SHAPER_RAMA_EXP_REGION_END_BASE_B_MASK 0x3FFF0000L
+//CM3_CM_SHAPER_RAMA_END_CNTL_G
+#define CM3_CM_SHAPER_RAMA_END_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMA_END_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMA_END_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define CM3_CM_SHAPER_RAMA_END_CNTL_G__CM_SHAPER_RAMA_EXP_REGION_END_BASE_G_MASK 0x3FFF0000L
+//CM3_CM_SHAPER_RAMA_END_CNTL_R
+#define CM3_CM_SHAPER_RAMA_END_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMA_END_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMA_END_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define CM3_CM_SHAPER_RAMA_END_CNTL_R__CM_SHAPER_RAMA_EXP_REGION_END_BASE_R_MASK 0x3FFF0000L
+//CM3_CM_SHAPER_RAMA_REGION_0_1
+#define CM3_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMA_REGION_0_1__CM_SHAPER_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMA_REGION_2_3
+#define CM3_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMA_REGION_2_3__CM_SHAPER_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMA_REGION_4_5
+#define CM3_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMA_REGION_4_5__CM_SHAPER_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMA_REGION_6_7
+#define CM3_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMA_REGION_6_7__CM_SHAPER_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMA_REGION_8_9
+#define CM3_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMA_REGION_8_9__CM_SHAPER_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMA_REGION_10_11
+#define CM3_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMA_REGION_10_11__CM_SHAPER_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMA_REGION_12_13
+#define CM3_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMA_REGION_12_13__CM_SHAPER_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMA_REGION_14_15
+#define CM3_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMA_REGION_14_15__CM_SHAPER_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMA_REGION_16_17
+#define CM3_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMA_REGION_16_17__CM_SHAPER_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMA_REGION_18_19
+#define CM3_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMA_REGION_18_19__CM_SHAPER_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMA_REGION_20_21
+#define CM3_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMA_REGION_20_21__CM_SHAPER_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMA_REGION_22_23
+#define CM3_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMA_REGION_22_23__CM_SHAPER_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMA_REGION_24_25
+#define CM3_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMA_REGION_24_25__CM_SHAPER_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMA_REGION_26_27
+#define CM3_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMA_REGION_26_27__CM_SHAPER_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMA_REGION_28_29
+#define CM3_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMA_REGION_28_29__CM_SHAPER_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMA_REGION_30_31
+#define CM3_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMA_REGION_30_31__CM_SHAPER_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMA_REGION_32_33
+#define CM3_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMA_REGION_32_33__CM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMB_START_CNTL_B
+#define CM3_CM_SHAPER_RAMB_START_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMB_START_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define CM3_CM_SHAPER_RAMB_START_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define CM3_CM_SHAPER_RAMB_START_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//CM3_CM_SHAPER_RAMB_START_CNTL_G
+#define CM3_CM_SHAPER_RAMB_START_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMB_START_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define CM3_CM_SHAPER_RAMB_START_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define CM3_CM_SHAPER_RAMB_START_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//CM3_CM_SHAPER_RAMB_START_CNTL_R
+#define CM3_CM_SHAPER_RAMB_START_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMB_START_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define CM3_CM_SHAPER_RAMB_START_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define CM3_CM_SHAPER_RAMB_START_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//CM3_CM_SHAPER_RAMB_END_CNTL_B
+#define CM3_CM_SHAPER_RAMB_END_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMB_END_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMB_END_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+#define CM3_CM_SHAPER_RAMB_END_CNTL_B__CM_SHAPER_RAMB_EXP_REGION_END_BASE_B_MASK 0x3FFF0000L
+//CM3_CM_SHAPER_RAMB_END_CNTL_G
+#define CM3_CM_SHAPER_RAMB_END_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMB_END_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMB_END_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+#define CM3_CM_SHAPER_RAMB_END_CNTL_G__CM_SHAPER_RAMB_EXP_REGION_END_BASE_G_MASK 0x3FFF0000L
+//CM3_CM_SHAPER_RAMB_END_CNTL_R
+#define CM3_CM_SHAPER_RAMB_END_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMB_END_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMB_END_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+#define CM3_CM_SHAPER_RAMB_END_CNTL_R__CM_SHAPER_RAMB_EXP_REGION_END_BASE_R_MASK 0x3FFF0000L
+//CM3_CM_SHAPER_RAMB_REGION_0_1
+#define CM3_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMB_REGION_0_1__CM_SHAPER_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMB_REGION_2_3
+#define CM3_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMB_REGION_2_3__CM_SHAPER_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMB_REGION_4_5
+#define CM3_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMB_REGION_4_5__CM_SHAPER_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMB_REGION_6_7
+#define CM3_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMB_REGION_6_7__CM_SHAPER_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMB_REGION_8_9
+#define CM3_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMB_REGION_8_9__CM_SHAPER_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMB_REGION_10_11
+#define CM3_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMB_REGION_10_11__CM_SHAPER_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMB_REGION_12_13
+#define CM3_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMB_REGION_12_13__CM_SHAPER_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMB_REGION_14_15
+#define CM3_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMB_REGION_14_15__CM_SHAPER_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMB_REGION_16_17
+#define CM3_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMB_REGION_16_17__CM_SHAPER_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMB_REGION_18_19
+#define CM3_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMB_REGION_18_19__CM_SHAPER_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMB_REGION_20_21
+#define CM3_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMB_REGION_20_21__CM_SHAPER_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMB_REGION_22_23
+#define CM3_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMB_REGION_22_23__CM_SHAPER_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMB_REGION_24_25
+#define CM3_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMB_REGION_24_25__CM_SHAPER_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMB_REGION_26_27
+#define CM3_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMB_REGION_26_27__CM_SHAPER_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMB_REGION_28_29
+#define CM3_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMB_REGION_28_29__CM_SHAPER_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMB_REGION_30_31
+#define CM3_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMB_REGION_30_31__CM_SHAPER_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_SHAPER_RAMB_REGION_32_33
+#define CM3_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define CM3_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define CM3_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define CM3_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define CM3_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define CM3_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define CM3_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define CM3_CM_SHAPER_RAMB_REGION_32_33__CM_SHAPER_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//CM3_CM_MEM_PWR_CTRL2
+#define CM3_CM_MEM_PWR_CTRL2__SHAPER_MEM_PWR_FORCE__SHIFT 0x8
+#define CM3_CM_MEM_PWR_CTRL2__SHAPER_MEM_PWR_DIS__SHIFT 0xa
+#define CM3_CM_MEM_PWR_CTRL2__HDR3DLUT_MEM_PWR_FORCE__SHIFT 0xc
+#define CM3_CM_MEM_PWR_CTRL2__HDR3DLUT_MEM_PWR_DIS__SHIFT 0xe
+#define CM3_CM_MEM_PWR_CTRL2__SHAPER_MEM_PWR_FORCE_MASK 0x00000300L
+#define CM3_CM_MEM_PWR_CTRL2__SHAPER_MEM_PWR_DIS_MASK 0x00000400L
+#define CM3_CM_MEM_PWR_CTRL2__HDR3DLUT_MEM_PWR_FORCE_MASK 0x00003000L
+#define CM3_CM_MEM_PWR_CTRL2__HDR3DLUT_MEM_PWR_DIS_MASK 0x00004000L
+//CM3_CM_MEM_PWR_STATUS2
+#define CM3_CM_MEM_PWR_STATUS2__SHAPER_MEM_PWR_STATE__SHIFT 0x4
+#define CM3_CM_MEM_PWR_STATUS2__HDR3DLUT_MEM_PWR_STATE__SHIFT 0x6
+#define CM3_CM_MEM_PWR_STATUS2__SHAPER_MEM_PWR_STATE_MASK 0x00000030L
+#define CM3_CM_MEM_PWR_STATUS2__HDR3DLUT_MEM_PWR_STATE_MASK 0x000000C0L
+//CM3_CM_3DLUT_MODE
+#define CM3_CM_3DLUT_MODE__CM_3DLUT_MODE__SHIFT 0x0
+#define CM3_CM_3DLUT_MODE__CM_3DLUT_SIZE__SHIFT 0x4
+#define CM3_CM_3DLUT_MODE__CM_3DLUT_MODE_MASK 0x00000003L
+#define CM3_CM_3DLUT_MODE__CM_3DLUT_SIZE_MASK 0x00000010L
+//CM3_CM_3DLUT_INDEX
+#define CM3_CM_3DLUT_INDEX__CM_3DLUT_INDEX__SHIFT 0x0
+#define CM3_CM_3DLUT_INDEX__CM_3DLUT_INDEX_MASK 0x000007FFL
+//CM3_CM_3DLUT_DATA
+#define CM3_CM_3DLUT_DATA__CM_3DLUT_DATA0__SHIFT 0x0
+#define CM3_CM_3DLUT_DATA__CM_3DLUT_DATA1__SHIFT 0x10
+#define CM3_CM_3DLUT_DATA__CM_3DLUT_DATA0_MASK 0x0000FFFFL
+#define CM3_CM_3DLUT_DATA__CM_3DLUT_DATA1_MASK 0xFFFF0000L
+//CM3_CM_3DLUT_DATA_30BIT
+#define CM3_CM_3DLUT_DATA_30BIT__CM_3DLUT_DATA_30BIT__SHIFT 0x2
+#define CM3_CM_3DLUT_DATA_30BIT__CM_3DLUT_DATA_30BIT_MASK 0xFFFFFFFCL
+//CM3_CM_3DLUT_READ_WRITE_CONTROL
+#define CM3_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_WRITE_EN_MASK__SHIFT 0x0
+#define CM3_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_RAM_SEL__SHIFT 0x4
+#define CM3_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_30BIT_EN__SHIFT 0x8
+#define CM3_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_CONFIG_STATUS__SHIFT 0xc
+#define CM3_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_READ_SEL__SHIFT 0x10
+#define CM3_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_WRITE_EN_MASK_MASK 0x0000000FL
+#define CM3_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_RAM_SEL_MASK 0x00000010L
+#define CM3_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_30BIT_EN_MASK 0x00000100L
+#define CM3_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_CONFIG_STATUS_MASK 0x00003000L
+#define CM3_CM_3DLUT_READ_WRITE_CONTROL__CM_3DLUT_READ_SEL_MASK 0x00030000L
+//CM3_CM_3DLUT_OUT_NORM_FACTOR
+#define CM3_CM_3DLUT_OUT_NORM_FACTOR__CM_3DLUT_OUT_NORM_FACTOR__SHIFT 0x0
+#define CM3_CM_3DLUT_OUT_NORM_FACTOR__CM_3DLUT_OUT_NORM_FACTOR_MASK 0x0000FFFFL
+//CM3_CM_3DLUT_OUT_OFFSET_R
+#define CM3_CM_3DLUT_OUT_OFFSET_R__CM_3DLUT_OUT_OFFSET_R__SHIFT 0x0
+#define CM3_CM_3DLUT_OUT_OFFSET_R__CM_3DLUT_OUT_SCALE_R__SHIFT 0x10
+#define CM3_CM_3DLUT_OUT_OFFSET_R__CM_3DLUT_OUT_OFFSET_R_MASK 0x0000FFFFL
+#define CM3_CM_3DLUT_OUT_OFFSET_R__CM_3DLUT_OUT_SCALE_R_MASK 0xFFFF0000L
+//CM3_CM_3DLUT_OUT_OFFSET_G
+#define CM3_CM_3DLUT_OUT_OFFSET_G__CM_3DLUT_OUT_OFFSET_G__SHIFT 0x0
+#define CM3_CM_3DLUT_OUT_OFFSET_G__CM_3DLUT_OUT_SCALE_G__SHIFT 0x10
+#define CM3_CM_3DLUT_OUT_OFFSET_G__CM_3DLUT_OUT_OFFSET_G_MASK 0x0000FFFFL
+#define CM3_CM_3DLUT_OUT_OFFSET_G__CM_3DLUT_OUT_SCALE_G_MASK 0xFFFF0000L
+//CM3_CM_3DLUT_OUT_OFFSET_B
+#define CM3_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_OFFSET_B__SHIFT 0x0
+#define CM3_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_SCALE_B__SHIFT 0x10
+#define CM3_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_OFFSET_B_MASK 0x0000FFFFL
+#define CM3_CM_3DLUT_OUT_OFFSET_B__CM_3DLUT_OUT_SCALE_B_MASK 0xFFFF0000L
+//CM3_CM_TEST_DEBUG_INDEX
+#define CM3_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX__SHIFT 0x0
+#define CM3_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define CM3_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_INDEX_MASK 0x000000FFL
+#define CM3_CM_TEST_DEBUG_INDEX__CM_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+//CM3_CM_TEST_DEBUG_DATA
+#define CM3_CM_TEST_DEBUG_DATA__CM_TEST_DEBUG_DATA__SHIFT 0x0
+#define CM3_CM_TEST_DEBUG_DATA__CM_TEST_DEBUG_DATA_MASK 0xFFFFFFFFL
+
+// addressBlock: dce_dc_mpc_mpcc0_dispdec
+//MPCC0_MPCC_TOP_SEL
+#define MPCC0_MPCC_TOP_SEL__MPCC_TOP_SEL__SHIFT 0x0
+#define MPCC0_MPCC_TOP_SEL__MPCC_TOP_SEL_MASK 0x0000000FL
+//MPCC0_MPCC_BOT_SEL
+#define MPCC0_MPCC_BOT_SEL__MPCC_BOT_SEL__SHIFT 0x0
+#define MPCC0_MPCC_BOT_SEL__MPCC_BOT_SEL_MASK 0x0000000FL
+//MPCC0_MPCC_OPP_ID
+#define MPCC0_MPCC_OPP_ID__MPCC_OPP_ID__SHIFT 0x0
+#define MPCC0_MPCC_OPP_ID__MPCC_OPP_ID_MASK 0x0000000FL
+//MPCC0_MPCC_CONTROL
+#define MPCC0_MPCC_CONTROL__MPCC_MODE__SHIFT 0x0
+#define MPCC0_MPCC_CONTROL__MPCC_ALPHA_BLND_MODE__SHIFT 0x4
+#define MPCC0_MPCC_CONTROL__MPCC_ALPHA_MULTIPLIED_MODE__SHIFT 0x6
+#define MPCC0_MPCC_CONTROL__MPCC_BLND_ACTIVE_OVERLAP_ONLY__SHIFT 0x7
+#define MPCC0_MPCC_CONTROL__MPCC_BG_BPC__SHIFT 0x8
+#define MPCC0_MPCC_CONTROL__MPCC_BOT_GAIN_MODE__SHIFT 0xb
+#define MPCC0_MPCC_CONTROL__MPCC_GLOBAL_ALPHA__SHIFT 0x10
+#define MPCC0_MPCC_CONTROL__MPCC_GLOBAL_GAIN__SHIFT 0x18
+#define MPCC0_MPCC_CONTROL__MPCC_MODE_MASK 0x00000003L
+#define MPCC0_MPCC_CONTROL__MPCC_ALPHA_BLND_MODE_MASK 0x00000030L
+#define MPCC0_MPCC_CONTROL__MPCC_ALPHA_MULTIPLIED_MODE_MASK 0x00000040L
+#define MPCC0_MPCC_CONTROL__MPCC_BLND_ACTIVE_OVERLAP_ONLY_MASK 0x00000080L
+#define MPCC0_MPCC_CONTROL__MPCC_BG_BPC_MASK 0x00000700L
+#define MPCC0_MPCC_CONTROL__MPCC_BOT_GAIN_MODE_MASK 0x00000800L
+#define MPCC0_MPCC_CONTROL__MPCC_GLOBAL_ALPHA_MASK 0x00FF0000L
+#define MPCC0_MPCC_CONTROL__MPCC_GLOBAL_GAIN_MASK 0xFF000000L
+//MPCC0_MPCC_SM_CONTROL
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_EN__SHIFT 0x0
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_MODE__SHIFT 0x1
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_FRAME_ALT__SHIFT 0x4
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_FIELD_ALT__SHIFT 0x5
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_FRAME_POL__SHIFT 0x8
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_TOP_POL__SHIFT 0x10
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_CURRENT_FRAME_POL__SHIFT 0x18
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_EN_MASK 0x00000001L
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_MODE_MASK 0x0000000EL
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_FRAME_ALT_MASK 0x00000010L
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_FIELD_ALT_MASK 0x00000020L
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_FRAME_POL_MASK 0x00000300L
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_TOP_POL_MASK 0x00030000L
+#define MPCC0_MPCC_SM_CONTROL__MPCC_SM_CURRENT_FRAME_POL_MASK 0x01000000L
+//MPCC0_MPCC_UPDATE_LOCK_SEL
+#define MPCC0_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCK_SEL__SHIFT 0x0
+#define MPCC0_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCKED_STATUS__SHIFT 0x4
+#define MPCC0_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCK_SEL_MASK 0x0000000FL
+#define MPCC0_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCKED_STATUS_MASK 0x00000070L
+//MPCC0_MPCC_TOP_GAIN
+#define MPCC0_MPCC_TOP_GAIN__MPCC_TOP_GAIN__SHIFT 0x0
+#define MPCC0_MPCC_TOP_GAIN__MPCC_TOP_GAIN_MASK 0x0007FFFFL
+//MPCC0_MPCC_BOT_GAIN_INSIDE
+#define MPCC0_MPCC_BOT_GAIN_INSIDE__MPCC_BOT_GAIN_INSIDE__SHIFT 0x0
+#define MPCC0_MPCC_BOT_GAIN_INSIDE__MPCC_BOT_GAIN_INSIDE_MASK 0x0007FFFFL
+//MPCC0_MPCC_BOT_GAIN_OUTSIDE
+#define MPCC0_MPCC_BOT_GAIN_OUTSIDE__MPCC_BOT_GAIN_OUTSIDE__SHIFT 0x0
+#define MPCC0_MPCC_BOT_GAIN_OUTSIDE__MPCC_BOT_GAIN_OUTSIDE_MASK 0x0007FFFFL
+//MPCC0_MPCC_BG_R_CR
+#define MPCC0_MPCC_BG_R_CR__MPCC_BG_R_CR__SHIFT 0x0
+#define MPCC0_MPCC_BG_R_CR__MPCC_BG_R_CR_MASK 0x00000FFFL
+//MPCC0_MPCC_BG_G_Y
+#define MPCC0_MPCC_BG_G_Y__MPCC_BG_G_Y__SHIFT 0x0
+#define MPCC0_MPCC_BG_G_Y__MPCC_BG_G_Y_MASK 0x00000FFFL
+//MPCC0_MPCC_BG_B_CB
+#define MPCC0_MPCC_BG_B_CB__MPCC_BG_B_CB__SHIFT 0x0
+#define MPCC0_MPCC_BG_B_CB__MPCC_BG_B_CB_MASK 0x00000FFFL
+//MPCC0_MPCC_MEM_PWR_CTRL
+#define MPCC0_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_FORCE__SHIFT 0x0
+#define MPCC0_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_DIS__SHIFT 0x2
+#define MPCC0_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_STATE__SHIFT 0x4
+#define MPCC0_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_FORCE_MASK 0x00000003L
+#define MPCC0_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_DIS_MASK 0x00000004L
+#define MPCC0_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_STATE_MASK 0x00000030L
+//MPCC0_MPCC_STALL_STATUS
+#define MPCC0_MPCC_STALL_STATUS__MPCC_STALL_INT_OCCURED__SHIFT 0x0
+#define MPCC0_MPCC_STALL_STATUS__MPCC_STALL_INT_TYPE__SHIFT 0x4
+#define MPCC0_MPCC_STALL_STATUS__MPCC_STALL_INT_ACK__SHIFT 0x8
+#define MPCC0_MPCC_STALL_STATUS__MPCC_STALL_INT_MASK__SHIFT 0xc
+#define MPCC0_MPCC_STALL_STATUS__MPCC_STALL_INT_OCCURED_MASK 0x00000001L
+#define MPCC0_MPCC_STALL_STATUS__MPCC_STALL_INT_TYPE_MASK 0x00000010L
+#define MPCC0_MPCC_STALL_STATUS__MPCC_STALL_INT_ACK_MASK 0x00000100L
+#define MPCC0_MPCC_STALL_STATUS__MPCC_STALL_INT_MASK_MASK 0x00001000L
+//MPCC0_MPCC_STATUS
+#define MPCC0_MPCC_STATUS__MPCC_IDLE__SHIFT 0x0
+#define MPCC0_MPCC_STATUS__MPCC_BUSY__SHIFT 0x1
+#define MPCC0_MPCC_STATUS__MPCC_DISABLED__SHIFT 0x2
+#define MPCC0_MPCC_STATUS__DPP_MPCC_PIX_DATA_ERROR__SHIFT 0x1d
+#define MPCC0_MPCC_STATUS__DPP_MPCC_INPUT_CHECK_ENABLE__SHIFT 0x1e
+#define MPCC0_MPCC_STATUS__DPP_MPCC_EXCEPTION_ACK__SHIFT 0x1f
+#define MPCC0_MPCC_STATUS__MPCC_IDLE_MASK 0x00000001L
+#define MPCC0_MPCC_STATUS__MPCC_BUSY_MASK 0x00000002L
+#define MPCC0_MPCC_STATUS__MPCC_DISABLED_MASK 0x00000004L
+#define MPCC0_MPCC_STATUS__DPP_MPCC_PIX_DATA_ERROR_MASK 0x20000000L
+#define MPCC0_MPCC_STATUS__DPP_MPCC_INPUT_CHECK_ENABLE_MASK 0x40000000L
+#define MPCC0_MPCC_STATUS__DPP_MPCC_EXCEPTION_ACK_MASK 0x80000000L
+// addressBlock: dce_dc_mpc_mpcc1_dispdec
+//MPCC1_MPCC_TOP_SEL
+#define MPCC1_MPCC_TOP_SEL__MPCC_TOP_SEL__SHIFT 0x0
+#define MPCC1_MPCC_TOP_SEL__MPCC_TOP_SEL_MASK 0x0000000FL
+//MPCC1_MPCC_BOT_SEL
+#define MPCC1_MPCC_BOT_SEL__MPCC_BOT_SEL__SHIFT 0x0
+#define MPCC1_MPCC_BOT_SEL__MPCC_BOT_SEL_MASK 0x0000000FL
+//MPCC1_MPCC_OPP_ID
+#define MPCC1_MPCC_OPP_ID__MPCC_OPP_ID__SHIFT 0x0
+#define MPCC1_MPCC_OPP_ID__MPCC_OPP_ID_MASK 0x0000000FL
+//MPCC1_MPCC_CONTROL
+#define MPCC1_MPCC_CONTROL__MPCC_MODE__SHIFT 0x0
+#define MPCC1_MPCC_CONTROL__MPCC_ALPHA_BLND_MODE__SHIFT 0x4
+#define MPCC1_MPCC_CONTROL__MPCC_ALPHA_MULTIPLIED_MODE__SHIFT 0x6
+#define MPCC1_MPCC_CONTROL__MPCC_BLND_ACTIVE_OVERLAP_ONLY__SHIFT 0x7
+#define MPCC1_MPCC_CONTROL__MPCC_BG_BPC__SHIFT 0x8
+#define MPCC1_MPCC_CONTROL__MPCC_BOT_GAIN_MODE__SHIFT 0xb
+#define MPCC1_MPCC_CONTROL__MPCC_GLOBAL_ALPHA__SHIFT 0x10
+#define MPCC1_MPCC_CONTROL__MPCC_GLOBAL_GAIN__SHIFT 0x18
+#define MPCC1_MPCC_CONTROL__MPCC_MODE_MASK 0x00000003L
+#define MPCC1_MPCC_CONTROL__MPCC_ALPHA_BLND_MODE_MASK 0x00000030L
+#define MPCC1_MPCC_CONTROL__MPCC_ALPHA_MULTIPLIED_MODE_MASK 0x00000040L
+#define MPCC1_MPCC_CONTROL__MPCC_BLND_ACTIVE_OVERLAP_ONLY_MASK 0x00000080L
+#define MPCC1_MPCC_CONTROL__MPCC_BG_BPC_MASK 0x00000700L
+#define MPCC1_MPCC_CONTROL__MPCC_BOT_GAIN_MODE_MASK 0x00000800L
+#define MPCC1_MPCC_CONTROL__MPCC_GLOBAL_ALPHA_MASK 0x00FF0000L
+#define MPCC1_MPCC_CONTROL__MPCC_GLOBAL_GAIN_MASK 0xFF000000L
+//MPCC1_MPCC_SM_CONTROL
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_EN__SHIFT 0x0
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_MODE__SHIFT 0x1
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_FRAME_ALT__SHIFT 0x4
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_FIELD_ALT__SHIFT 0x5
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_FRAME_POL__SHIFT 0x8
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_TOP_POL__SHIFT 0x10
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_CURRENT_FRAME_POL__SHIFT 0x18
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_EN_MASK 0x00000001L
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_MODE_MASK 0x0000000EL
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_FRAME_ALT_MASK 0x00000010L
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_FIELD_ALT_MASK 0x00000020L
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_FRAME_POL_MASK 0x00000300L
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_TOP_POL_MASK 0x00030000L
+#define MPCC1_MPCC_SM_CONTROL__MPCC_SM_CURRENT_FRAME_POL_MASK 0x01000000L
+//MPCC1_MPCC_UPDATE_LOCK_SEL
+#define MPCC1_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCK_SEL__SHIFT 0x0
+#define MPCC1_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCKED_STATUS__SHIFT 0x4
+#define MPCC1_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCK_SEL_MASK 0x0000000FL
+#define MPCC1_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCKED_STATUS_MASK 0x00000070L
+//MPCC1_MPCC_TOP_GAIN
+#define MPCC1_MPCC_TOP_GAIN__MPCC_TOP_GAIN__SHIFT 0x0
+#define MPCC1_MPCC_TOP_GAIN__MPCC_TOP_GAIN_MASK 0x0007FFFFL
+//MPCC1_MPCC_BOT_GAIN_INSIDE
+#define MPCC1_MPCC_BOT_GAIN_INSIDE__MPCC_BOT_GAIN_INSIDE__SHIFT 0x0
+#define MPCC1_MPCC_BOT_GAIN_INSIDE__MPCC_BOT_GAIN_INSIDE_MASK 0x0007FFFFL
+//MPCC1_MPCC_BOT_GAIN_OUTSIDE
+#define MPCC1_MPCC_BOT_GAIN_OUTSIDE__MPCC_BOT_GAIN_OUTSIDE__SHIFT 0x0
+#define MPCC1_MPCC_BOT_GAIN_OUTSIDE__MPCC_BOT_GAIN_OUTSIDE_MASK 0x0007FFFFL
+//MPCC1_MPCC_BG_R_CR
+#define MPCC1_MPCC_BG_R_CR__MPCC_BG_R_CR__SHIFT 0x0
+#define MPCC1_MPCC_BG_R_CR__MPCC_BG_R_CR_MASK 0x00000FFFL
+//MPCC1_MPCC_BG_G_Y
+#define MPCC1_MPCC_BG_G_Y__MPCC_BG_G_Y__SHIFT 0x0
+#define MPCC1_MPCC_BG_G_Y__MPCC_BG_G_Y_MASK 0x00000FFFL
+//MPCC1_MPCC_BG_B_CB
+#define MPCC1_MPCC_BG_B_CB__MPCC_BG_B_CB__SHIFT 0x0
+#define MPCC1_MPCC_BG_B_CB__MPCC_BG_B_CB_MASK 0x00000FFFL
+//MPCC1_MPCC_MEM_PWR_CTRL
+#define MPCC1_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_FORCE__SHIFT 0x0
+#define MPCC1_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_DIS__SHIFT 0x2
+#define MPCC1_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_STATE__SHIFT 0x4
+#define MPCC1_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_FORCE_MASK 0x00000003L
+#define MPCC1_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_DIS_MASK 0x00000004L
+#define MPCC1_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_STATE_MASK 0x00000030L
+//MPCC1_MPCC_STALL_STATUS
+#define MPCC1_MPCC_STALL_STATUS__MPCC_STALL_INT_OCCURED__SHIFT 0x0
+#define MPCC1_MPCC_STALL_STATUS__MPCC_STALL_INT_TYPE__SHIFT 0x4
+#define MPCC1_MPCC_STALL_STATUS__MPCC_STALL_INT_ACK__SHIFT 0x8
+#define MPCC1_MPCC_STALL_STATUS__MPCC_STALL_INT_MASK__SHIFT 0xc
+#define MPCC1_MPCC_STALL_STATUS__MPCC_STALL_INT_OCCURED_MASK 0x00000001L
+#define MPCC1_MPCC_STALL_STATUS__MPCC_STALL_INT_TYPE_MASK 0x00000010L
+#define MPCC1_MPCC_STALL_STATUS__MPCC_STALL_INT_ACK_MASK 0x00000100L
+#define MPCC1_MPCC_STALL_STATUS__MPCC_STALL_INT_MASK_MASK 0x00001000L
+//MPCC1_MPCC_STATUS
+#define MPCC1_MPCC_STATUS__MPCC_IDLE__SHIFT 0x0
+#define MPCC1_MPCC_STATUS__MPCC_BUSY__SHIFT 0x1
+#define MPCC1_MPCC_STATUS__MPCC_DISABLED__SHIFT 0x2
+#define MPCC1_MPCC_STATUS__DPP_MPCC_PIX_DATA_ERROR__SHIFT 0x1d
+#define MPCC1_MPCC_STATUS__DPP_MPCC_INPUT_CHECK_ENABLE__SHIFT 0x1e
+#define MPCC1_MPCC_STATUS__DPP_MPCC_EXCEPTION_ACK__SHIFT 0x1f
+#define MPCC1_MPCC_STATUS__MPCC_IDLE_MASK 0x00000001L
+#define MPCC1_MPCC_STATUS__MPCC_BUSY_MASK 0x00000002L
+#define MPCC1_MPCC_STATUS__MPCC_DISABLED_MASK 0x00000004L
+#define MPCC1_MPCC_STATUS__DPP_MPCC_PIX_DATA_ERROR_MASK 0x20000000L
+#define MPCC1_MPCC_STATUS__DPP_MPCC_INPUT_CHECK_ENABLE_MASK 0x40000000L
+#define MPCC1_MPCC_STATUS__DPP_MPCC_EXCEPTION_ACK_MASK 0x80000000L
+// addressBlock: dce_dc_mpc_mpcc2_dispdec
+//MPCC2_MPCC_TOP_SEL
+#define MPCC2_MPCC_TOP_SEL__MPCC_TOP_SEL__SHIFT 0x0
+#define MPCC2_MPCC_TOP_SEL__MPCC_TOP_SEL_MASK 0x0000000FL
+//MPCC2_MPCC_BOT_SEL
+#define MPCC2_MPCC_BOT_SEL__MPCC_BOT_SEL__SHIFT 0x0
+#define MPCC2_MPCC_BOT_SEL__MPCC_BOT_SEL_MASK 0x0000000FL
+//MPCC2_MPCC_OPP_ID
+#define MPCC2_MPCC_OPP_ID__MPCC_OPP_ID__SHIFT 0x0
+#define MPCC2_MPCC_OPP_ID__MPCC_OPP_ID_MASK 0x0000000FL
+//MPCC2_MPCC_CONTROL
+#define MPCC2_MPCC_CONTROL__MPCC_MODE__SHIFT 0x0
+#define MPCC2_MPCC_CONTROL__MPCC_ALPHA_BLND_MODE__SHIFT 0x4
+#define MPCC2_MPCC_CONTROL__MPCC_ALPHA_MULTIPLIED_MODE__SHIFT 0x6
+#define MPCC2_MPCC_CONTROL__MPCC_BLND_ACTIVE_OVERLAP_ONLY__SHIFT 0x7
+#define MPCC2_MPCC_CONTROL__MPCC_BG_BPC__SHIFT 0x8
+#define MPCC2_MPCC_CONTROL__MPCC_BOT_GAIN_MODE__SHIFT 0xb
+#define MPCC2_MPCC_CONTROL__MPCC_GLOBAL_ALPHA__SHIFT 0x10
+#define MPCC2_MPCC_CONTROL__MPCC_GLOBAL_GAIN__SHIFT 0x18
+#define MPCC2_MPCC_CONTROL__MPCC_MODE_MASK 0x00000003L
+#define MPCC2_MPCC_CONTROL__MPCC_ALPHA_BLND_MODE_MASK 0x00000030L
+#define MPCC2_MPCC_CONTROL__MPCC_ALPHA_MULTIPLIED_MODE_MASK 0x00000040L
+#define MPCC2_MPCC_CONTROL__MPCC_BLND_ACTIVE_OVERLAP_ONLY_MASK 0x00000080L
+#define MPCC2_MPCC_CONTROL__MPCC_BG_BPC_MASK 0x00000700L
+#define MPCC2_MPCC_CONTROL__MPCC_BOT_GAIN_MODE_MASK 0x00000800L
+#define MPCC2_MPCC_CONTROL__MPCC_GLOBAL_ALPHA_MASK 0x00FF0000L
+#define MPCC2_MPCC_CONTROL__MPCC_GLOBAL_GAIN_MASK 0xFF000000L
+//MPCC2_MPCC_SM_CONTROL
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_EN__SHIFT 0x0
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_MODE__SHIFT 0x1
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_FRAME_ALT__SHIFT 0x4
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_FIELD_ALT__SHIFT 0x5
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_FRAME_POL__SHIFT 0x8
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_TOP_POL__SHIFT 0x10
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_CURRENT_FRAME_POL__SHIFT 0x18
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_EN_MASK 0x00000001L
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_MODE_MASK 0x0000000EL
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_FRAME_ALT_MASK 0x00000010L
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_FIELD_ALT_MASK 0x00000020L
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_FRAME_POL_MASK 0x00000300L
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_TOP_POL_MASK 0x00030000L
+#define MPCC2_MPCC_SM_CONTROL__MPCC_SM_CURRENT_FRAME_POL_MASK 0x01000000L
+//MPCC2_MPCC_UPDATE_LOCK_SEL
+#define MPCC2_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCK_SEL__SHIFT 0x0
+#define MPCC2_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCKED_STATUS__SHIFT 0x4
+#define MPCC2_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCK_SEL_MASK 0x0000000FL
+#define MPCC2_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCKED_STATUS_MASK 0x00000070L
+//MPCC2_MPCC_TOP_GAIN
+#define MPCC2_MPCC_TOP_GAIN__MPCC_TOP_GAIN__SHIFT 0x0
+#define MPCC2_MPCC_TOP_GAIN__MPCC_TOP_GAIN_MASK 0x0007FFFFL
+//MPCC2_MPCC_BOT_GAIN_INSIDE
+#define MPCC2_MPCC_BOT_GAIN_INSIDE__MPCC_BOT_GAIN_INSIDE__SHIFT 0x0
+#define MPCC2_MPCC_BOT_GAIN_INSIDE__MPCC_BOT_GAIN_INSIDE_MASK 0x0007FFFFL
+//MPCC2_MPCC_BOT_GAIN_OUTSIDE
+#define MPCC2_MPCC_BOT_GAIN_OUTSIDE__MPCC_BOT_GAIN_OUTSIDE__SHIFT 0x0
+#define MPCC2_MPCC_BOT_GAIN_OUTSIDE__MPCC_BOT_GAIN_OUTSIDE_MASK 0x0007FFFFL
+//MPCC2_MPCC_BG_R_CR
+#define MPCC2_MPCC_BG_R_CR__MPCC_BG_R_CR__SHIFT 0x0
+#define MPCC2_MPCC_BG_R_CR__MPCC_BG_R_CR_MASK 0x00000FFFL
+//MPCC2_MPCC_BG_G_Y
+#define MPCC2_MPCC_BG_G_Y__MPCC_BG_G_Y__SHIFT 0x0
+#define MPCC2_MPCC_BG_G_Y__MPCC_BG_G_Y_MASK 0x00000FFFL
+//MPCC2_MPCC_BG_B_CB
+#define MPCC2_MPCC_BG_B_CB__MPCC_BG_B_CB__SHIFT 0x0
+#define MPCC2_MPCC_BG_B_CB__MPCC_BG_B_CB_MASK 0x00000FFFL
+//MPCC2_MPCC_MEM_PWR_CTRL
+#define MPCC2_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_FORCE__SHIFT 0x0
+#define MPCC2_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_DIS__SHIFT 0x2
+#define MPCC2_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_STATE__SHIFT 0x4
+#define MPCC2_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_FORCE_MASK 0x00000003L
+#define MPCC2_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_DIS_MASK 0x00000004L
+#define MPCC2_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_STATE_MASK 0x00000030L
+//MPCC2_MPCC_STALL_STATUS
+#define MPCC2_MPCC_STALL_STATUS__MPCC_STALL_INT_OCCURED__SHIFT 0x0
+#define MPCC2_MPCC_STALL_STATUS__MPCC_STALL_INT_TYPE__SHIFT 0x4
+#define MPCC2_MPCC_STALL_STATUS__MPCC_STALL_INT_ACK__SHIFT 0x8
+#define MPCC2_MPCC_STALL_STATUS__MPCC_STALL_INT_MASK__SHIFT 0xc
+#define MPCC2_MPCC_STALL_STATUS__MPCC_STALL_INT_OCCURED_MASK 0x00000001L
+#define MPCC2_MPCC_STALL_STATUS__MPCC_STALL_INT_TYPE_MASK 0x00000010L
+#define MPCC2_MPCC_STALL_STATUS__MPCC_STALL_INT_ACK_MASK 0x00000100L
+#define MPCC2_MPCC_STALL_STATUS__MPCC_STALL_INT_MASK_MASK 0x00001000L
+//MPCC2_MPCC_STATUS
+#define MPCC2_MPCC_STATUS__MPCC_IDLE__SHIFT 0x0
+#define MPCC2_MPCC_STATUS__MPCC_BUSY__SHIFT 0x1
+#define MPCC2_MPCC_STATUS__MPCC_DISABLED__SHIFT 0x2
+#define MPCC2_MPCC_STATUS__DPP_MPCC_PIX_DATA_ERROR__SHIFT 0x1d
+#define MPCC2_MPCC_STATUS__DPP_MPCC_INPUT_CHECK_ENABLE__SHIFT 0x1e
+#define MPCC2_MPCC_STATUS__DPP_MPCC_EXCEPTION_ACK__SHIFT 0x1f
+#define MPCC2_MPCC_STATUS__MPCC_IDLE_MASK 0x00000001L
+#define MPCC2_MPCC_STATUS__MPCC_BUSY_MASK 0x00000002L
+#define MPCC2_MPCC_STATUS__MPCC_DISABLED_MASK 0x00000004L
+#define MPCC2_MPCC_STATUS__DPP_MPCC_PIX_DATA_ERROR_MASK 0x20000000L
+#define MPCC2_MPCC_STATUS__DPP_MPCC_INPUT_CHECK_ENABLE_MASK 0x40000000L
+#define MPCC2_MPCC_STATUS__DPP_MPCC_EXCEPTION_ACK_MASK 0x80000000L
+// addressBlock: dce_dc_mpc_mpcc3_dispdec
+//MPCC3_MPCC_TOP_SEL
+#define MPCC3_MPCC_TOP_SEL__MPCC_TOP_SEL__SHIFT 0x0
+#define MPCC3_MPCC_TOP_SEL__MPCC_TOP_SEL_MASK 0x0000000FL
+//MPCC3_MPCC_BOT_SEL
+#define MPCC3_MPCC_BOT_SEL__MPCC_BOT_SEL__SHIFT 0x0
+#define MPCC3_MPCC_BOT_SEL__MPCC_BOT_SEL_MASK 0x0000000FL
+//MPCC3_MPCC_OPP_ID
+#define MPCC3_MPCC_OPP_ID__MPCC_OPP_ID__SHIFT 0x0
+#define MPCC3_MPCC_OPP_ID__MPCC_OPP_ID_MASK 0x0000000FL
+//MPCC3_MPCC_CONTROL
+#define MPCC3_MPCC_CONTROL__MPCC_MODE__SHIFT 0x0
+#define MPCC3_MPCC_CONTROL__MPCC_ALPHA_BLND_MODE__SHIFT 0x4
+#define MPCC3_MPCC_CONTROL__MPCC_ALPHA_MULTIPLIED_MODE__SHIFT 0x6
+#define MPCC3_MPCC_CONTROL__MPCC_BLND_ACTIVE_OVERLAP_ONLY__SHIFT 0x7
+#define MPCC3_MPCC_CONTROL__MPCC_BG_BPC__SHIFT 0x8
+#define MPCC3_MPCC_CONTROL__MPCC_BOT_GAIN_MODE__SHIFT 0xb
+#define MPCC3_MPCC_CONTROL__MPCC_GLOBAL_ALPHA__SHIFT 0x10
+#define MPCC3_MPCC_CONTROL__MPCC_GLOBAL_GAIN__SHIFT 0x18
+#define MPCC3_MPCC_CONTROL__MPCC_MODE_MASK 0x00000003L
+#define MPCC3_MPCC_CONTROL__MPCC_ALPHA_BLND_MODE_MASK 0x00000030L
+#define MPCC3_MPCC_CONTROL__MPCC_ALPHA_MULTIPLIED_MODE_MASK 0x00000040L
+#define MPCC3_MPCC_CONTROL__MPCC_BLND_ACTIVE_OVERLAP_ONLY_MASK 0x00000080L
+#define MPCC3_MPCC_CONTROL__MPCC_BG_BPC_MASK 0x00000700L
+#define MPCC3_MPCC_CONTROL__MPCC_BOT_GAIN_MODE_MASK 0x00000800L
+#define MPCC3_MPCC_CONTROL__MPCC_GLOBAL_ALPHA_MASK 0x00FF0000L
+#define MPCC3_MPCC_CONTROL__MPCC_GLOBAL_GAIN_MASK 0xFF000000L
+//MPCC3_MPCC_SM_CONTROL
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_EN__SHIFT 0x0
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_MODE__SHIFT 0x1
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_FRAME_ALT__SHIFT 0x4
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_FIELD_ALT__SHIFT 0x5
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_FRAME_POL__SHIFT 0x8
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_TOP_POL__SHIFT 0x10
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_CURRENT_FRAME_POL__SHIFT 0x18
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_EN_MASK 0x00000001L
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_MODE_MASK 0x0000000EL
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_FRAME_ALT_MASK 0x00000010L
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_FIELD_ALT_MASK 0x00000020L
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_FRAME_POL_MASK 0x00000300L
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_TOP_POL_MASK 0x00030000L
+#define MPCC3_MPCC_SM_CONTROL__MPCC_SM_CURRENT_FRAME_POL_MASK 0x01000000L
+//MPCC3_MPCC_UPDATE_LOCK_SEL
+#define MPCC3_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCK_SEL__SHIFT 0x0
+#define MPCC3_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCKED_STATUS__SHIFT 0x4
+#define MPCC3_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCK_SEL_MASK 0x0000000FL
+#define MPCC3_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCKED_STATUS_MASK 0x00000070L
+//MPCC3_MPCC_TOP_GAIN
+#define MPCC3_MPCC_TOP_GAIN__MPCC_TOP_GAIN__SHIFT 0x0
+#define MPCC3_MPCC_TOP_GAIN__MPCC_TOP_GAIN_MASK 0x0007FFFFL
+//MPCC3_MPCC_BOT_GAIN_INSIDE
+#define MPCC3_MPCC_BOT_GAIN_INSIDE__MPCC_BOT_GAIN_INSIDE__SHIFT 0x0
+#define MPCC3_MPCC_BOT_GAIN_INSIDE__MPCC_BOT_GAIN_INSIDE_MASK 0x0007FFFFL
+//MPCC3_MPCC_BOT_GAIN_OUTSIDE
+#define MPCC3_MPCC_BOT_GAIN_OUTSIDE__MPCC_BOT_GAIN_OUTSIDE__SHIFT 0x0
+#define MPCC3_MPCC_BOT_GAIN_OUTSIDE__MPCC_BOT_GAIN_OUTSIDE_MASK 0x0007FFFFL
+//MPCC3_MPCC_BG_R_CR
+#define MPCC3_MPCC_BG_R_CR__MPCC_BG_R_CR__SHIFT 0x0
+#define MPCC3_MPCC_BG_R_CR__MPCC_BG_R_CR_MASK 0x00000FFFL
+//MPCC3_MPCC_BG_G_Y
+#define MPCC3_MPCC_BG_G_Y__MPCC_BG_G_Y__SHIFT 0x0
+#define MPCC3_MPCC_BG_G_Y__MPCC_BG_G_Y_MASK 0x00000FFFL
+//MPCC3_MPCC_BG_B_CB
+#define MPCC3_MPCC_BG_B_CB__MPCC_BG_B_CB__SHIFT 0x0
+#define MPCC3_MPCC_BG_B_CB__MPCC_BG_B_CB_MASK 0x00000FFFL
+//MPCC3_MPCC_MEM_PWR_CTRL
+#define MPCC3_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_FORCE__SHIFT 0x0
+#define MPCC3_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_DIS__SHIFT 0x2
+#define MPCC3_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_STATE__SHIFT 0x4
+#define MPCC3_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_FORCE_MASK 0x00000003L
+#define MPCC3_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_DIS_MASK 0x00000004L
+#define MPCC3_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_STATE_MASK 0x00000030L
+//MPCC3_MPCC_STALL_STATUS
+#define MPCC3_MPCC_STALL_STATUS__MPCC_STALL_INT_OCCURED__SHIFT 0x0
+#define MPCC3_MPCC_STALL_STATUS__MPCC_STALL_INT_TYPE__SHIFT 0x4
+#define MPCC3_MPCC_STALL_STATUS__MPCC_STALL_INT_ACK__SHIFT 0x8
+#define MPCC3_MPCC_STALL_STATUS__MPCC_STALL_INT_MASK__SHIFT 0xc
+#define MPCC3_MPCC_STALL_STATUS__MPCC_STALL_INT_OCCURED_MASK 0x00000001L
+#define MPCC3_MPCC_STALL_STATUS__MPCC_STALL_INT_TYPE_MASK 0x00000010L
+#define MPCC3_MPCC_STALL_STATUS__MPCC_STALL_INT_ACK_MASK 0x00000100L
+#define MPCC3_MPCC_STALL_STATUS__MPCC_STALL_INT_MASK_MASK 0x00001000L
+//MPCC3_MPCC_STATUS
+#define MPCC3_MPCC_STATUS__MPCC_IDLE__SHIFT 0x0
+#define MPCC3_MPCC_STATUS__MPCC_BUSY__SHIFT 0x1
+#define MPCC3_MPCC_STATUS__MPCC_DISABLED__SHIFT 0x2
+#define MPCC3_MPCC_STATUS__DPP_MPCC_PIX_DATA_ERROR__SHIFT 0x1d
+#define MPCC3_MPCC_STATUS__DPP_MPCC_INPUT_CHECK_ENABLE__SHIFT 0x1e
+#define MPCC3_MPCC_STATUS__DPP_MPCC_EXCEPTION_ACK__SHIFT 0x1f
+#define MPCC3_MPCC_STATUS__MPCC_IDLE_MASK 0x00000001L
+#define MPCC3_MPCC_STATUS__MPCC_BUSY_MASK 0x00000002L
+#define MPCC3_MPCC_STATUS__MPCC_DISABLED_MASK 0x00000004L
+#define MPCC3_MPCC_STATUS__DPP_MPCC_PIX_DATA_ERROR_MASK 0x20000000L
+#define MPCC3_MPCC_STATUS__DPP_MPCC_INPUT_CHECK_ENABLE_MASK 0x40000000L
+#define MPCC3_MPCC_STATUS__DPP_MPCC_EXCEPTION_ACK_MASK 0x80000000L
+// addressBlock: dce_dc_mpc_mpcc4_dispdec
+//MPCC4_MPCC_TOP_SEL
+#define MPCC4_MPCC_TOP_SEL__MPCC_TOP_SEL__SHIFT 0x0
+#define MPCC4_MPCC_TOP_SEL__MPCC_TOP_SEL_MASK 0x0000000FL
+//MPCC4_MPCC_BOT_SEL
+#define MPCC4_MPCC_BOT_SEL__MPCC_BOT_SEL__SHIFT 0x0
+#define MPCC4_MPCC_BOT_SEL__MPCC_BOT_SEL_MASK 0x0000000FL
+//MPCC4_MPCC_OPP_ID
+#define MPCC4_MPCC_OPP_ID__MPCC_OPP_ID__SHIFT 0x0
+#define MPCC4_MPCC_OPP_ID__MPCC_OPP_ID_MASK 0x0000000FL
+//MPCC4_MPCC_CONTROL
+#define MPCC4_MPCC_CONTROL__MPCC_MODE__SHIFT 0x0
+#define MPCC4_MPCC_CONTROL__MPCC_ALPHA_BLND_MODE__SHIFT 0x4
+#define MPCC4_MPCC_CONTROL__MPCC_ALPHA_MULTIPLIED_MODE__SHIFT 0x6
+#define MPCC4_MPCC_CONTROL__MPCC_BLND_ACTIVE_OVERLAP_ONLY__SHIFT 0x7
+#define MPCC4_MPCC_CONTROL__MPCC_BG_BPC__SHIFT 0x8
+#define MPCC4_MPCC_CONTROL__MPCC_BOT_GAIN_MODE__SHIFT 0xb
+#define MPCC4_MPCC_CONTROL__MPCC_GLOBAL_ALPHA__SHIFT 0x10
+#define MPCC4_MPCC_CONTROL__MPCC_GLOBAL_GAIN__SHIFT 0x18
+#define MPCC4_MPCC_CONTROL__MPCC_MODE_MASK 0x00000003L
+#define MPCC4_MPCC_CONTROL__MPCC_ALPHA_BLND_MODE_MASK 0x00000030L
+#define MPCC4_MPCC_CONTROL__MPCC_ALPHA_MULTIPLIED_MODE_MASK 0x00000040L
+#define MPCC4_MPCC_CONTROL__MPCC_BLND_ACTIVE_OVERLAP_ONLY_MASK 0x00000080L
+#define MPCC4_MPCC_CONTROL__MPCC_BG_BPC_MASK 0x00000700L
+#define MPCC4_MPCC_CONTROL__MPCC_BOT_GAIN_MODE_MASK 0x00000800L
+#define MPCC4_MPCC_CONTROL__MPCC_GLOBAL_ALPHA_MASK 0x00FF0000L
+#define MPCC4_MPCC_CONTROL__MPCC_GLOBAL_GAIN_MASK 0xFF000000L
+//MPCC4_MPCC_SM_CONTROL
+#define MPCC4_MPCC_SM_CONTROL__MPCC_SM_EN__SHIFT 0x0
+#define MPCC4_MPCC_SM_CONTROL__MPCC_SM_MODE__SHIFT 0x1
+#define MPCC4_MPCC_SM_CONTROL__MPCC_SM_FRAME_ALT__SHIFT 0x4
+#define MPCC4_MPCC_SM_CONTROL__MPCC_SM_FIELD_ALT__SHIFT 0x5
+#define MPCC4_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_FRAME_POL__SHIFT 0x8
+#define MPCC4_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_TOP_POL__SHIFT 0x10
+#define MPCC4_MPCC_SM_CONTROL__MPCC_SM_CURRENT_FRAME_POL__SHIFT 0x18
+#define MPCC4_MPCC_SM_CONTROL__MPCC_SM_EN_MASK 0x00000001L
+#define MPCC4_MPCC_SM_CONTROL__MPCC_SM_MODE_MASK 0x0000000EL
+#define MPCC4_MPCC_SM_CONTROL__MPCC_SM_FRAME_ALT_MASK 0x00000010L
+#define MPCC4_MPCC_SM_CONTROL__MPCC_SM_FIELD_ALT_MASK 0x00000020L
+#define MPCC4_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_FRAME_POL_MASK 0x00000300L
+#define MPCC4_MPCC_SM_CONTROL__MPCC_SM_FORCE_NEXT_TOP_POL_MASK 0x00030000L
+#define MPCC4_MPCC_SM_CONTROL__MPCC_SM_CURRENT_FRAME_POL_MASK 0x01000000L
+//MPCC4_MPCC_UPDATE_LOCK_SEL
+#define MPCC4_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCK_SEL__SHIFT 0x0
+#define MPCC4_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCKED_STATUS__SHIFT 0x4
+#define MPCC4_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCK_SEL_MASK 0x0000000FL
+#define MPCC4_MPCC_UPDATE_LOCK_SEL__MPCC_UPDATE_LOCKED_STATUS_MASK 0x00000070L
+//MPCC4_MPCC_TOP_GAIN
+#define MPCC4_MPCC_TOP_GAIN__MPCC_TOP_GAIN__SHIFT 0x0
+#define MPCC4_MPCC_TOP_GAIN__MPCC_TOP_GAIN_MASK 0x0007FFFFL
+//MPCC4_MPCC_BOT_GAIN_INSIDE
+#define MPCC4_MPCC_BOT_GAIN_INSIDE__MPCC_BOT_GAIN_INSIDE__SHIFT 0x0
+#define MPCC4_MPCC_BOT_GAIN_INSIDE__MPCC_BOT_GAIN_INSIDE_MASK 0x0007FFFFL
+//MPCC4_MPCC_BOT_GAIN_OUTSIDE
+#define MPCC4_MPCC_BOT_GAIN_OUTSIDE__MPCC_BOT_GAIN_OUTSIDE__SHIFT 0x0
+#define MPCC4_MPCC_BOT_GAIN_OUTSIDE__MPCC_BOT_GAIN_OUTSIDE_MASK 0x0007FFFFL
+//MPCC4_MPCC_BG_R_CR
+#define MPCC4_MPCC_BG_R_CR__MPCC_BG_R_CR__SHIFT 0x0
+#define MPCC4_MPCC_BG_R_CR__MPCC_BG_R_CR_MASK 0x00000FFFL
+//MPCC4_MPCC_BG_G_Y
+#define MPCC4_MPCC_BG_G_Y__MPCC_BG_G_Y__SHIFT 0x0
+#define MPCC4_MPCC_BG_G_Y__MPCC_BG_G_Y_MASK 0x00000FFFL
+//MPCC4_MPCC_BG_B_CB
+#define MPCC4_MPCC_BG_B_CB__MPCC_BG_B_CB__SHIFT 0x0
+#define MPCC4_MPCC_BG_B_CB__MPCC_BG_B_CB_MASK 0x00000FFFL
+//MPCC4_MPCC_MEM_PWR_CTRL
+#define MPCC4_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_FORCE__SHIFT 0x0
+#define MPCC4_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_DIS__SHIFT 0x2
+#define MPCC4_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_STATE__SHIFT 0x4
+#define MPCC4_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_FORCE_MASK 0x00000003L
+#define MPCC4_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_DIS_MASK 0x00000004L
+#define MPCC4_MPCC_MEM_PWR_CTRL__MPCC_OGAM_MEM_PWR_STATE_MASK 0x00000030L
+//MPCC4_MPCC_STALL_STATUS
+#define MPCC4_MPCC_STALL_STATUS__MPCC_STALL_INT_OCCURED__SHIFT 0x0
+#define MPCC4_MPCC_STALL_STATUS__MPCC_STALL_INT_TYPE__SHIFT 0x4
+#define MPCC4_MPCC_STALL_STATUS__MPCC_STALL_INT_ACK__SHIFT 0x8
+#define MPCC4_MPCC_STALL_STATUS__MPCC_STALL_INT_MASK__SHIFT 0xc
+#define MPCC4_MPCC_STALL_STATUS__MPCC_STALL_INT_OCCURED_MASK 0x00000001L
+#define MPCC4_MPCC_STALL_STATUS__MPCC_STALL_INT_TYPE_MASK 0x00000010L
+#define MPCC4_MPCC_STALL_STATUS__MPCC_STALL_INT_ACK_MASK 0x00000100L
+#define MPCC4_MPCC_STALL_STATUS__MPCC_STALL_INT_MASK_MASK 0x00001000L
+//MPCC4_MPCC_STATUS
+#define MPCC4_MPCC_STATUS__MPCC_IDLE__SHIFT 0x0
+#define MPCC4_MPCC_STATUS__MPCC_BUSY__SHIFT 0x1
+#define MPCC4_MPCC_STATUS__MPCC_DISABLED__SHIFT 0x2
+#define MPCC4_MPCC_STATUS__DPP_MPCC_PIX_DATA_ERROR__SHIFT 0x1d
+#define MPCC4_MPCC_STATUS__DPP_MPCC_INPUT_CHECK_ENABLE__SHIFT 0x1e
+#define MPCC4_MPCC_STATUS__DPP_MPCC_EXCEPTION_ACK__SHIFT 0x1f
+#define MPCC4_MPCC_STATUS__MPCC_IDLE_MASK 0x00000001L
+#define MPCC4_MPCC_STATUS__MPCC_BUSY_MASK 0x00000002L
+#define MPCC4_MPCC_STATUS__MPCC_DISABLED_MASK 0x00000004L
+#define MPCC4_MPCC_STATUS__DPP_MPCC_PIX_DATA_ERROR_MASK 0x20000000L
+#define MPCC4_MPCC_STATUS__DPP_MPCC_INPUT_CHECK_ENABLE_MASK 0x40000000L
+#define MPCC4_MPCC_STATUS__DPP_MPCC_EXCEPTION_ACK_MASK 0x80000000L
+// addressBlock: dce_dc_mpc_mpc_cfg_dispdec
+//MPC_CLOCK_CONTROL
+#define MPC_CLOCK_CONTROL__DISPCLK_R_GATE_DISABLE__SHIFT 0x1
+#define MPC_CLOCK_CONTROL__DISPCLK_R_GATE_DISABLE_MASK 0x00000002L
+//MPC_SOFT_RESET
+#define MPC_SOFT_RESET__MPCC0_SOFT_RESET__SHIFT 0x0
+#define MPC_SOFT_RESET__MPCC1_SOFT_RESET__SHIFT 0x1
+#define MPC_SOFT_RESET__MPCC2_SOFT_RESET__SHIFT 0x2
+#define MPC_SOFT_RESET__MPCC3_SOFT_RESET__SHIFT 0x3
+#define MPC_SOFT_RESET__MPCC4_SOFT_RESET__SHIFT 0x4
+#define MPC_SOFT_RESET__MPC_SFR0_SOFT_RESET__SHIFT 0xa
+#define MPC_SOFT_RESET__MPC_SFR1_SOFT_RESET__SHIFT 0xb
+#define MPC_SOFT_RESET__MPC_SFR2_SOFT_RESET__SHIFT 0xc
+#define MPC_SOFT_RESET__MPC_SFR3_SOFT_RESET__SHIFT 0xd
+#define MPC_SOFT_RESET__MPC_SFT0_SOFT_RESET__SHIFT 0x14
+#define MPC_SOFT_RESET__MPC_SFT1_SOFT_RESET__SHIFT 0x15
+#define MPC_SOFT_RESET__MPC_SOFT_RESET__SHIFT 0x1f
+#define MPC_SOFT_RESET__MPCC0_SOFT_RESET_MASK 0x00000001L
+#define MPC_SOFT_RESET__MPCC1_SOFT_RESET_MASK 0x00000002L
+#define MPC_SOFT_RESET__MPCC2_SOFT_RESET_MASK 0x00000004L
+#define MPC_SOFT_RESET__MPCC3_SOFT_RESET_MASK 0x00000008L
+#define MPC_SOFT_RESET__MPCC4_SOFT_RESET_MASK 0x00000010L
+#define MPC_SOFT_RESET__MPC_SFR0_SOFT_RESET_MASK 0x00000400L
+#define MPC_SOFT_RESET__MPC_SFR1_SOFT_RESET_MASK 0x00000800L
+#define MPC_SOFT_RESET__MPC_SFR2_SOFT_RESET_MASK 0x00001000L
+#define MPC_SOFT_RESET__MPC_SFR3_SOFT_RESET_MASK 0x00002000L
+#define MPC_SOFT_RESET__MPC_SFT0_SOFT_RESET_MASK 0x00100000L
+#define MPC_SOFT_RESET__MPC_SFT1_SOFT_RESET_MASK 0x00200000L
+#define MPC_SOFT_RESET__MPC_SOFT_RESET_MASK 0x80000000L
+//MPC_BYPASS_BG_AR
+#define MPC_BYPASS_BG_AR__MPC_BYPASS_BG_ALPHA__SHIFT 0x0
+#define MPC_BYPASS_BG_AR__MPC_BYPASS_BG_R_CR__SHIFT 0x10
+#define MPC_BYPASS_BG_AR__MPC_BYPASS_BG_ALPHA_MASK 0x0000FFFFL
+#define MPC_BYPASS_BG_AR__MPC_BYPASS_BG_R_CR_MASK 0xFFFF0000L
+//MPC_BYPASS_BG_GB
+#define MPC_BYPASS_BG_GB__MPC_BYPASS_BG_G_Y__SHIFT 0x0
+#define MPC_BYPASS_BG_GB__MPC_BYPASS_BG_B_CB__SHIFT 0x10
+#define MPC_BYPASS_BG_GB__MPC_BYPASS_BG_G_Y_MASK 0x0000FFFFL
+#define MPC_BYPASS_BG_GB__MPC_BYPASS_BG_B_CB_MASK 0xFFFF0000L
+//MPC_STALL_GRACE_WINDOW
+#define MPC_STALL_GRACE_WINDOW__MPC_STALL_GRACE_WINDOW_PERIOD__SHIFT 0x0
+#define MPC_STALL_GRACE_WINDOW__MPC_STALL_GRACE_WINDOW_PERIOD_MASK 0x000000FFL
+//MPC_HOST_READ_CONTROL
+#define MPC_HOST_READ_CONTROL__HOST_READ_RATE_CONTROL__SHIFT 0x0
+#define MPC_HOST_READ_CONTROL__HOST_READ_RATE_CONTROL_MASK 0x000000FFL
+//ADR_CFG_CUR_VUPDATE_LOCK_SET0
+#define ADR_CFG_CUR_VUPDATE_LOCK_SET0__ADR_CFG_CUR_VUPDATE_LOCK_SET__SHIFT 0x0
+#define ADR_CFG_CUR_VUPDATE_LOCK_SET0__ADR_CFG_CUR_VUPDATE_LOCK_SET_MASK 0x00000001L
+//ADR_CFG_VUPDATE_LOCK_SET0
+#define ADR_CFG_VUPDATE_LOCK_SET0__ADR_CFG_VUPDATE_LOCK_SET__SHIFT 0x0
+#define ADR_CFG_VUPDATE_LOCK_SET0__ADR_CFG_VUPDATE_LOCK_SET_MASK 0x00000001L
+//ADR_VUPDATE_LOCK_SET0
+#define ADR_VUPDATE_LOCK_SET0__ADR_VUPDATE_LOCK_SET__SHIFT 0x0
+#define ADR_VUPDATE_LOCK_SET0__ADR_VUPDATE_LOCK_SET_MASK 0x00000001L
+//CFG_VUPDATE_LOCK_SET0
+#define CFG_VUPDATE_LOCK_SET0__CFG_VUPDATE_LOCK_SET__SHIFT 0x0
+#define CFG_VUPDATE_LOCK_SET0__CFG_VUPDATE_LOCK_SET_MASK 0x00000001L
+//CUR_VUPDATE_LOCK_SET0
+#define CUR_VUPDATE_LOCK_SET0__CUR_VUPDATE_LOCK_SET__SHIFT 0x0
+#define CUR_VUPDATE_LOCK_SET0__CUR_VUPDATE_LOCK_SET_MASK 0x00000001L
+//ADR_CFG_CUR_VUPDATE_LOCK_SET1
+#define ADR_CFG_CUR_VUPDATE_LOCK_SET1__ADR_CFG_CUR_VUPDATE_LOCK_SET__SHIFT 0x0
+#define ADR_CFG_CUR_VUPDATE_LOCK_SET1__ADR_CFG_CUR_VUPDATE_LOCK_SET_MASK 0x00000001L
+//ADR_CFG_VUPDATE_LOCK_SET1
+#define ADR_CFG_VUPDATE_LOCK_SET1__ADR_CFG_VUPDATE_LOCK_SET__SHIFT 0x0
+#define ADR_CFG_VUPDATE_LOCK_SET1__ADR_CFG_VUPDATE_LOCK_SET_MASK 0x00000001L
+//ADR_VUPDATE_LOCK_SET1
+#define ADR_VUPDATE_LOCK_SET1__ADR_VUPDATE_LOCK_SET__SHIFT 0x0
+#define ADR_VUPDATE_LOCK_SET1__ADR_VUPDATE_LOCK_SET_MASK 0x00000001L
+//CFG_VUPDATE_LOCK_SET1
+#define CFG_VUPDATE_LOCK_SET1__CFG_VUPDATE_LOCK_SET__SHIFT 0x0
+#define CFG_VUPDATE_LOCK_SET1__CFG_VUPDATE_LOCK_SET_MASK 0x00000001L
+//CUR_VUPDATE_LOCK_SET1
+#define CUR_VUPDATE_LOCK_SET1__CUR_VUPDATE_LOCK_SET__SHIFT 0x0
+#define CUR_VUPDATE_LOCK_SET1__CUR_VUPDATE_LOCK_SET_MASK 0x00000001L
+//ADR_CFG_CUR_VUPDATE_LOCK_SET2
+#define ADR_CFG_CUR_VUPDATE_LOCK_SET2__ADR_CFG_CUR_VUPDATE_LOCK_SET__SHIFT 0x0
+#define ADR_CFG_CUR_VUPDATE_LOCK_SET2__ADR_CFG_CUR_VUPDATE_LOCK_SET_MASK 0x00000001L
+//ADR_CFG_VUPDATE_LOCK_SET2
+#define ADR_CFG_VUPDATE_LOCK_SET2__ADR_CFG_VUPDATE_LOCK_SET__SHIFT 0x0
+#define ADR_CFG_VUPDATE_LOCK_SET2__ADR_CFG_VUPDATE_LOCK_SET_MASK 0x00000001L
+//ADR_VUPDATE_LOCK_SET2
+#define ADR_VUPDATE_LOCK_SET2__ADR_VUPDATE_LOCK_SET__SHIFT 0x0
+#define ADR_VUPDATE_LOCK_SET2__ADR_VUPDATE_LOCK_SET_MASK 0x00000001L
+//CFG_VUPDATE_LOCK_SET2
+#define CFG_VUPDATE_LOCK_SET2__CFG_VUPDATE_LOCK_SET__SHIFT 0x0
+#define CFG_VUPDATE_LOCK_SET2__CFG_VUPDATE_LOCK_SET_MASK 0x00000001L
+//CUR_VUPDATE_LOCK_SET2
+#define CUR_VUPDATE_LOCK_SET2__CUR_VUPDATE_LOCK_SET__SHIFT 0x0
+#define CUR_VUPDATE_LOCK_SET2__CUR_VUPDATE_LOCK_SET_MASK 0x00000001L
+//ADR_CFG_CUR_VUPDATE_LOCK_SET3
+#define ADR_CFG_CUR_VUPDATE_LOCK_SET3__ADR_CFG_CUR_VUPDATE_LOCK_SET__SHIFT 0x0
+#define ADR_CFG_CUR_VUPDATE_LOCK_SET3__ADR_CFG_CUR_VUPDATE_LOCK_SET_MASK 0x00000001L
+//ADR_CFG_VUPDATE_LOCK_SET3
+#define ADR_CFG_VUPDATE_LOCK_SET3__ADR_CFG_VUPDATE_LOCK_SET__SHIFT 0x0
+#define ADR_CFG_VUPDATE_LOCK_SET3__ADR_CFG_VUPDATE_LOCK_SET_MASK 0x00000001L
+//ADR_VUPDATE_LOCK_SET3
+#define ADR_VUPDATE_LOCK_SET3__ADR_VUPDATE_LOCK_SET__SHIFT 0x0
+#define ADR_VUPDATE_LOCK_SET3__ADR_VUPDATE_LOCK_SET_MASK 0x00000001L
+//CFG_VUPDATE_LOCK_SET3
+#define CFG_VUPDATE_LOCK_SET3__CFG_VUPDATE_LOCK_SET__SHIFT 0x0
+#define CFG_VUPDATE_LOCK_SET3__CFG_VUPDATE_LOCK_SET_MASK 0x00000001L
+//CUR_VUPDATE_LOCK_SET3
+#define CUR_VUPDATE_LOCK_SET3__CUR_VUPDATE_LOCK_SET__SHIFT 0x0
+#define CUR_VUPDATE_LOCK_SET3__CUR_VUPDATE_LOCK_SET_MASK 0x00000001L
+//MPC_OUT0_MUX
+#define MPC_OUT0_MUX__MPC_OUT_MUX__SHIFT 0x0
+#define MPC_OUT0_MUX__MPC_OUT_RATE_CONTROL_OVFL_ERROR__SHIFT 0x5
+#define MPC_OUT0_MUX__MPC_OUT_RATE_CONTROL_UDFL_ERROR__SHIFT 0x6
+#define MPC_OUT0_MUX__MPC_OUT_RATE_CONTROL_ERROR_ACK__SHIFT 0x7
+#define MPC_OUT0_MUX__MPC_OUT_RATE_CONTROL_DISABLE__SHIFT 0x8
+#define MPC_OUT0_MUX__MPC_OUT_RATE_CONTROL__SHIFT 0x9
+#define MPC_OUT0_MUX__MPC_OUT_FLOW_CONTROL_MODE__SHIFT 0xa
+#define MPC_OUT0_MUX__MPC_OUT_FLOW_CONTROL_COUNT0__SHIFT 0xb
+#define MPC_OUT0_MUX__MPC_OUT_FLOW_CONTROL_COUNT1__SHIFT 0x14
+#define MPC_OUT0_MUX__MPC_OUT_MUX_MASK 0x0000000FL
+#define MPC_OUT0_MUX__MPC_OUT_RATE_CONTROL_OVFL_ERROR_MASK 0x00000020L
+#define MPC_OUT0_MUX__MPC_OUT_RATE_CONTROL_UDFL_ERROR_MASK 0x00000040L
+#define MPC_OUT0_MUX__MPC_OUT_RATE_CONTROL_ERROR_ACK_MASK 0x00000080L
+#define MPC_OUT0_MUX__MPC_OUT_RATE_CONTROL_DISABLE_MASK 0x00000100L
+#define MPC_OUT0_MUX__MPC_OUT_RATE_CONTROL_MASK 0x00000200L
+#define MPC_OUT0_MUX__MPC_OUT_FLOW_CONTROL_MODE_MASK 0x00000400L
+#define MPC_OUT0_MUX__MPC_OUT_FLOW_CONTROL_COUNT0_MASK 0x000FF800L
+#define MPC_OUT0_MUX__MPC_OUT_FLOW_CONTROL_COUNT1_MASK 0xFFF00000L
+//MPC_OUT0_DENORM_CONTROL
+#define MPC_OUT0_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MIN_R_CR__SHIFT 0x0
+#define MPC_OUT0_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MAX_R_CR__SHIFT 0xc
+#define MPC_OUT0_DENORM_CONTROL__MPC_OUT_DENORM_MODE__SHIFT 0x18
+#define MPC_OUT0_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MIN_R_CR_MASK 0x00000FFFL
+#define MPC_OUT0_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MAX_R_CR_MASK 0x00FFF000L
+#define MPC_OUT0_DENORM_CONTROL__MPC_OUT_DENORM_MODE_MASK 0x07000000L
+//MPC_OUT0_DENORM_CLAMP_G_Y
+#define MPC_OUT0_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MIN_G_Y__SHIFT 0x0
+#define MPC_OUT0_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MAX_G_Y__SHIFT 0xc
+#define MPC_OUT0_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MIN_G_Y_MASK 0x00000FFFL
+#define MPC_OUT0_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MAX_G_Y_MASK 0x00FFF000L
+//MPC_OUT0_DENORM_CLAMP_B_CB
+#define MPC_OUT0_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MIN_B_CB__SHIFT 0x0
+#define MPC_OUT0_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MAX_B_CB__SHIFT 0xc
+#define MPC_OUT0_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MIN_B_CB_MASK 0x00000FFFL
+#define MPC_OUT0_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MAX_B_CB_MASK 0x00FFF000L
+//MPC_OUT1_MUX
+#define MPC_OUT1_MUX__MPC_OUT_MUX__SHIFT 0x0
+#define MPC_OUT1_MUX__MPC_OUT_RATE_CONTROL_OVFL_ERROR__SHIFT 0x5
+#define MPC_OUT1_MUX__MPC_OUT_RATE_CONTROL_UDFL_ERROR__SHIFT 0x6
+#define MPC_OUT1_MUX__MPC_OUT_RATE_CONTROL_ERROR_ACK__SHIFT 0x7
+#define MPC_OUT1_MUX__MPC_OUT_RATE_CONTROL_DISABLE__SHIFT 0x8
+#define MPC_OUT1_MUX__MPC_OUT_RATE_CONTROL__SHIFT 0x9
+#define MPC_OUT1_MUX__MPC_OUT_FLOW_CONTROL_MODE__SHIFT 0xa
+#define MPC_OUT1_MUX__MPC_OUT_FLOW_CONTROL_COUNT0__SHIFT 0xb
+#define MPC_OUT1_MUX__MPC_OUT_FLOW_CONTROL_COUNT1__SHIFT 0x14
+#define MPC_OUT1_MUX__MPC_OUT_MUX_MASK 0x0000000FL
+#define MPC_OUT1_MUX__MPC_OUT_RATE_CONTROL_OVFL_ERROR_MASK 0x00000020L
+#define MPC_OUT1_MUX__MPC_OUT_RATE_CONTROL_UDFL_ERROR_MASK 0x00000040L
+#define MPC_OUT1_MUX__MPC_OUT_RATE_CONTROL_ERROR_ACK_MASK 0x00000080L
+#define MPC_OUT1_MUX__MPC_OUT_RATE_CONTROL_DISABLE_MASK 0x00000100L
+#define MPC_OUT1_MUX__MPC_OUT_RATE_CONTROL_MASK 0x00000200L
+#define MPC_OUT1_MUX__MPC_OUT_FLOW_CONTROL_MODE_MASK 0x00000400L
+#define MPC_OUT1_MUX__MPC_OUT_FLOW_CONTROL_COUNT0_MASK 0x000FF800L
+#define MPC_OUT1_MUX__MPC_OUT_FLOW_CONTROL_COUNT1_MASK 0xFFF00000L
+//MPC_OUT1_DENORM_CONTROL
+#define MPC_OUT1_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MIN_R_CR__SHIFT 0x0
+#define MPC_OUT1_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MAX_R_CR__SHIFT 0xc
+#define MPC_OUT1_DENORM_CONTROL__MPC_OUT_DENORM_MODE__SHIFT 0x18
+#define MPC_OUT1_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MIN_R_CR_MASK 0x00000FFFL
+#define MPC_OUT1_DENORM_CONTROL__MPC_OUT_DENORM_CLAMP_MAX_R_CR_MASK 0x00FFF000L
+#define MPC_OUT1_DENORM_CONTROL__MPC_OUT_DENORM_MODE_MASK 0x07000000L
+//MPC_OUT1_DENORM_CLAMP_G_Y
+#define MPC_OUT1_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MIN_G_Y__SHIFT 0x0
+#define MPC_OUT1_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MAX_G_Y__SHIFT 0xc
+#define MPC_OUT1_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MIN_G_Y_MASK 0x00000FFFL
+#define MPC_OUT1_DENORM_CLAMP_G_Y__MPC_OUT_DENORM_CLAMP_MAX_G_Y_MASK 0x00FFF000L
+//MPC_OUT1_DENORM_CLAMP_B_CB
+#define MPC_OUT1_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MIN_B_CB__SHIFT 0x0
+#define MPC_OUT1_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MAX_B_CB__SHIFT 0xc
+#define MPC_OUT1_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MIN_B_CB_MASK 0x00000FFFL
+#define MPC_OUT1_DENORM_CLAMP_B_CB__MPC_OUT_DENORM_CLAMP_MAX_B_CB_MASK 0x00FFF000L
+// addressBlock: dce_dc_mpc_mpcc_ogam0_dispdec
+//MPCC_OGAM0_MPCC_OGAM_MODE
+#define MPCC_OGAM0_MPCC_OGAM_MODE__MPCC_OGAM_MODE__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_MODE__MPCC_OGAM_MODE_MASK 0x00000003L
+//MPCC_OGAM0_MPCC_OGAM_LUT_INDEX
+#define MPCC_OGAM0_MPCC_OGAM_LUT_INDEX__MPCC_OGAM_LUT_INDEX__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_LUT_INDEX__MPCC_OGAM_LUT_INDEX_MASK 0x000001FFL
+//MPCC_OGAM0_MPCC_OGAM_LUT_DATA
+#define MPCC_OGAM0_MPCC_OGAM_LUT_DATA__MPCC_OGAM_LUT_DATA__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_LUT_DATA__MPCC_OGAM_LUT_DATA_MASK 0x0007FFFFL
+//MPCC_OGAM0_MPCC_OGAM_LUT_RAM_CONTROL
+#define MPCC_OGAM0_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_WRITE_EN_MASK__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_RAM_SEL__SHIFT 0x3
+#define MPCC_OGAM0_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_CONFIG_STATUS__SHIFT 0x4
+#define MPCC_OGAM0_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_WRITE_EN_MASK_MASK 0x00000007L
+#define MPCC_OGAM0_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_RAM_SEL_MASK 0x00000008L
+#define MPCC_OGAM0_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_CONFIG_STATUS_MASK 0x00000030L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_B
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_G
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_R
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_SLOPE_CNTL_B
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_SLOPE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_SLOPE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMA_SLOPE_CNTL_G
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_SLOPE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_SLOPE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMA_SLOPE_CNTL_R
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_SLOPE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_SLOPE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_B
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_B__MPCC_OGAM_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_B__MPCC_OGAM_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_B
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_G
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_G__MPCC_OGAM_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_G__MPCC_OGAM_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_G
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_R
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_R__MPCC_OGAM_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL1_R__MPCC_OGAM_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_R
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_B
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_G
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_R
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_SLOPE_CNTL_B
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_SLOPE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_SLOPE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMB_SLOPE_CNTL_G
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_SLOPE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_SLOPE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMB_SLOPE_CNTL_R
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_SLOPE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_SLOPE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_B
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_B__MPCC_OGAM_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_B__MPCC_OGAM_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_B
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_G
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_G__MPCC_OGAM_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_G__MPCC_OGAM_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_G
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_R
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_R__MPCC_OGAM_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL1_R__MPCC_OGAM_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+//MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_R
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM0_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+
+
+// addressBlock: dce_dc_mpc_mpcc_ogam1_dispdec
+//MPCC_OGAM1_MPCC_OGAM_MODE
+#define MPCC_OGAM1_MPCC_OGAM_MODE__MPCC_OGAM_MODE__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_MODE__MPCC_OGAM_MODE_MASK 0x00000003L
+//MPCC_OGAM1_MPCC_OGAM_LUT_INDEX
+#define MPCC_OGAM1_MPCC_OGAM_LUT_INDEX__MPCC_OGAM_LUT_INDEX__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_LUT_INDEX__MPCC_OGAM_LUT_INDEX_MASK 0x000001FFL
+//MPCC_OGAM1_MPCC_OGAM_LUT_DATA
+#define MPCC_OGAM1_MPCC_OGAM_LUT_DATA__MPCC_OGAM_LUT_DATA__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_LUT_DATA__MPCC_OGAM_LUT_DATA_MASK 0x0007FFFFL
+//MPCC_OGAM1_MPCC_OGAM_LUT_RAM_CONTROL
+#define MPCC_OGAM1_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_WRITE_EN_MASK__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_RAM_SEL__SHIFT 0x3
+#define MPCC_OGAM1_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_CONFIG_STATUS__SHIFT 0x4
+#define MPCC_OGAM1_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_WRITE_EN_MASK_MASK 0x00000007L
+#define MPCC_OGAM1_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_RAM_SEL_MASK 0x00000008L
+#define MPCC_OGAM1_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_CONFIG_STATUS_MASK 0x00000030L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_B
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_G
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_R
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_SLOPE_CNTL_B
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_SLOPE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_SLOPE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMA_SLOPE_CNTL_G
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_SLOPE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_SLOPE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMA_SLOPE_CNTL_R
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_SLOPE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_SLOPE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_B
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_B__MPCC_OGAM_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_B__MPCC_OGAM_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_B
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_G
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_G__MPCC_OGAM_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_G__MPCC_OGAM_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_G
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_R
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_R__MPCC_OGAM_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL1_R__MPCC_OGAM_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_R
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_B
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_G
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_R
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_SLOPE_CNTL_B
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_SLOPE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_SLOPE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMB_SLOPE_CNTL_G
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_SLOPE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_SLOPE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMB_SLOPE_CNTL_R
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_SLOPE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_SLOPE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_B
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_B__MPCC_OGAM_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_B__MPCC_OGAM_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_B
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_G
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_G__MPCC_OGAM_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_G__MPCC_OGAM_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_G
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_R
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_R__MPCC_OGAM_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL1_R__MPCC_OGAM_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+//MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_R
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM1_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+
+
+// addressBlock: dce_dc_mpc_mpcc_ogam2_dispdec
+//MPCC_OGAM2_MPCC_OGAM_MODE
+#define MPCC_OGAM2_MPCC_OGAM_MODE__MPCC_OGAM_MODE__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_MODE__MPCC_OGAM_MODE_MASK 0x00000003L
+//MPCC_OGAM2_MPCC_OGAM_LUT_INDEX
+#define MPCC_OGAM2_MPCC_OGAM_LUT_INDEX__MPCC_OGAM_LUT_INDEX__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_LUT_INDEX__MPCC_OGAM_LUT_INDEX_MASK 0x000001FFL
+//MPCC_OGAM2_MPCC_OGAM_LUT_DATA
+#define MPCC_OGAM2_MPCC_OGAM_LUT_DATA__MPCC_OGAM_LUT_DATA__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_LUT_DATA__MPCC_OGAM_LUT_DATA_MASK 0x0007FFFFL
+//MPCC_OGAM2_MPCC_OGAM_LUT_RAM_CONTROL
+#define MPCC_OGAM2_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_WRITE_EN_MASK__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_RAM_SEL__SHIFT 0x3
+#define MPCC_OGAM2_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_CONFIG_STATUS__SHIFT 0x4
+#define MPCC_OGAM2_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_WRITE_EN_MASK_MASK 0x00000007L
+#define MPCC_OGAM2_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_RAM_SEL_MASK 0x00000008L
+#define MPCC_OGAM2_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_CONFIG_STATUS_MASK 0x00000030L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_B
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_G
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_R
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_SLOPE_CNTL_B
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_SLOPE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_SLOPE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMA_SLOPE_CNTL_G
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_SLOPE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_SLOPE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMA_SLOPE_CNTL_R
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_SLOPE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_SLOPE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_B
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_B__MPCC_OGAM_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_B__MPCC_OGAM_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_B
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_G
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_G__MPCC_OGAM_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_G__MPCC_OGAM_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_G
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_R
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_R__MPCC_OGAM_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL1_R__MPCC_OGAM_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_R
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_B
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_G
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_R
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_SLOPE_CNTL_B
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_SLOPE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_SLOPE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMB_SLOPE_CNTL_G
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_SLOPE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_SLOPE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMB_SLOPE_CNTL_R
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_SLOPE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_SLOPE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_B
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_B__MPCC_OGAM_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_B__MPCC_OGAM_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_B
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_G
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_G__MPCC_OGAM_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_G__MPCC_OGAM_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_G
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_R
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_R__MPCC_OGAM_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL1_R__MPCC_OGAM_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+//MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_R
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM2_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+
+
+// addressBlock: dce_dc_mpc_mpcc_ogam3_dispdec
+//MPCC_OGAM3_MPCC_OGAM_MODE
+#define MPCC_OGAM3_MPCC_OGAM_MODE__MPCC_OGAM_MODE__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_MODE__MPCC_OGAM_MODE_MASK 0x00000003L
+//MPCC_OGAM3_MPCC_OGAM_LUT_INDEX
+#define MPCC_OGAM3_MPCC_OGAM_LUT_INDEX__MPCC_OGAM_LUT_INDEX__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_LUT_INDEX__MPCC_OGAM_LUT_INDEX_MASK 0x000001FFL
+//MPCC_OGAM3_MPCC_OGAM_LUT_DATA
+#define MPCC_OGAM3_MPCC_OGAM_LUT_DATA__MPCC_OGAM_LUT_DATA__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_LUT_DATA__MPCC_OGAM_LUT_DATA_MASK 0x0007FFFFL
+//MPCC_OGAM3_MPCC_OGAM_LUT_RAM_CONTROL
+#define MPCC_OGAM3_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_WRITE_EN_MASK__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_RAM_SEL__SHIFT 0x3
+#define MPCC_OGAM3_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_CONFIG_STATUS__SHIFT 0x4
+#define MPCC_OGAM3_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_WRITE_EN_MASK_MASK 0x00000007L
+#define MPCC_OGAM3_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_RAM_SEL_MASK 0x00000008L
+#define MPCC_OGAM3_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_CONFIG_STATUS_MASK 0x00000030L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_B
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_G
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_R
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_SLOPE_CNTL_B
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_SLOPE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_SLOPE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMA_SLOPE_CNTL_G
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_SLOPE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_SLOPE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMA_SLOPE_CNTL_R
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_SLOPE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_SLOPE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_B
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_B__MPCC_OGAM_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_B__MPCC_OGAM_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_B
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_G
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_G__MPCC_OGAM_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_G__MPCC_OGAM_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_G
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_R
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_R__MPCC_OGAM_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL1_R__MPCC_OGAM_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_R
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_B
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_G
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_R
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_SLOPE_CNTL_B
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_SLOPE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_SLOPE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMB_SLOPE_CNTL_G
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_SLOPE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_SLOPE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMB_SLOPE_CNTL_R
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_SLOPE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_SLOPE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_B
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_B__MPCC_OGAM_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_B__MPCC_OGAM_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_B
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_G
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_G__MPCC_OGAM_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_G__MPCC_OGAM_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_G
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_R
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_R__MPCC_OGAM_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL1_R__MPCC_OGAM_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+//MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_R
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM3_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+
+
+// addressBlock: dce_dc_mpc_mpcc_ogam4_dispdec
+//MPCC_OGAM4_MPCC_OGAM_MODE
+#define MPCC_OGAM4_MPCC_OGAM_MODE__MPCC_OGAM_MODE__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_MODE__MPCC_OGAM_MODE_MASK 0x00000003L
+//MPCC_OGAM4_MPCC_OGAM_LUT_INDEX
+#define MPCC_OGAM4_MPCC_OGAM_LUT_INDEX__MPCC_OGAM_LUT_INDEX__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_LUT_INDEX__MPCC_OGAM_LUT_INDEX_MASK 0x000001FFL
+//MPCC_OGAM4_MPCC_OGAM_LUT_DATA
+#define MPCC_OGAM4_MPCC_OGAM_LUT_DATA__MPCC_OGAM_LUT_DATA__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_LUT_DATA__MPCC_OGAM_LUT_DATA_MASK 0x0007FFFFL
+//MPCC_OGAM4_MPCC_OGAM_LUT_RAM_CONTROL
+#define MPCC_OGAM4_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_WRITE_EN_MASK__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_RAM_SEL__SHIFT 0x3
+#define MPCC_OGAM4_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_CONFIG_STATUS__SHIFT 0x4
+#define MPCC_OGAM4_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_WRITE_EN_MASK_MASK 0x00000007L
+#define MPCC_OGAM4_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_LUT_RAM_SEL_MASK 0x00000008L
+#define MPCC_OGAM4_MPCC_OGAM_LUT_RAM_CONTROL__MPCC_OGAM_CONFIG_STATUS_MASK 0x00000030L
+//MPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_B
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_G
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_R
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_START_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_OGAM4_MPCC_OGAM_RAMA_SLOPE_CNTL_B
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_SLOPE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_SLOPE_CNTL_B__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//MPCC_OGAM4_MPCC_OGAM_RAMA_SLOPE_CNTL_G
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_SLOPE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_SLOPE_CNTL_G__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//MPCC_OGAM4_MPCC_OGAM_RAMA_SLOPE_CNTL_R
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_SLOPE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_SLOPE_CNTL_R__MPCC_OGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL1_B
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL1_B__MPCC_OGAM_RAMA_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL1_B__MPCC_OGAM_RAMA_EXP_REGION_END_B_MASK 0x0000FFFFL
+//MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_B
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_B__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL1_G
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL1_G__MPCC_OGAM_RAMA_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL1_G__MPCC_OGAM_RAMA_EXP_REGION_END_G_MASK 0x0000FFFFL
+//MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_G
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_G__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL1_R
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL1_R__MPCC_OGAM_RAMA_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL1_R__MPCC_OGAM_RAMA_EXP_REGION_END_R_MASK 0x0000FFFFL
+//MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_R
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_END_CNTL2_R__MPCC_OGAM_RAMA_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_0_1
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_0_1__MPCC_OGAM_RAMA_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_2_3
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_2_3__MPCC_OGAM_RAMA_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_4_5
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_4_5__MPCC_OGAM_RAMA_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_6_7
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_6_7__MPCC_OGAM_RAMA_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_8_9
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_8_9__MPCC_OGAM_RAMA_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_10_11
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_10_11__MPCC_OGAM_RAMA_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_12_13
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_12_13__MPCC_OGAM_RAMA_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_14_15
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_14_15__MPCC_OGAM_RAMA_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_16_17
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_16_17__MPCC_OGAM_RAMA_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_18_19
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_18_19__MPCC_OGAM_RAMA_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_20_21
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_20_21__MPCC_OGAM_RAMA_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_22_23
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_22_23__MPCC_OGAM_RAMA_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_24_25
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_24_25__MPCC_OGAM_RAMA_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_26_27
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_26_27__MPCC_OGAM_RAMA_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_28_29
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_28_29__MPCC_OGAM_RAMA_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_30_31
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_30_31__MPCC_OGAM_RAMA_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_32_33
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMA_REGION_32_33__MPCC_OGAM_RAMA_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_B
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_B__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B__SHIFT 0x14
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_B_MASK 0x0003FFFFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_B_MASK 0x07F00000L
+//MPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_G
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_G__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_G__SHIFT 0x14
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_G_MASK 0x0003FFFFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_G_MASK 0x07F00000L
+//MPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_R
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_R__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_R__SHIFT 0x14
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_R_MASK 0x0003FFFFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_START_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_START_SEGMENT_R_MASK 0x07F00000L
+//MPCC_OGAM4_MPCC_OGAM_RAMB_SLOPE_CNTL_B
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_SLOPE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_SLOPE_CNTL_B__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B_MASK 0x0003FFFFL
+//MPCC_OGAM4_MPCC_OGAM_RAMB_SLOPE_CNTL_G
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_SLOPE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_SLOPE_CNTL_G__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G_MASK 0x0003FFFFL
+//MPCC_OGAM4_MPCC_OGAM_RAMB_SLOPE_CNTL_R
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_SLOPE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_SLOPE_CNTL_R__MPCC_OGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R_MASK 0x0003FFFFL
+//MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL1_B
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL1_B__MPCC_OGAM_RAMB_EXP_REGION_END_B__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL1_B__MPCC_OGAM_RAMB_EXP_REGION_END_B_MASK 0x0000FFFFL
+//MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_B
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_B_MASK 0x0000FFFFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_B__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_B_MASK 0xFFFF0000L
+//MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL1_G
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL1_G__MPCC_OGAM_RAMB_EXP_REGION_END_G__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL1_G__MPCC_OGAM_RAMB_EXP_REGION_END_G_MASK 0x0000FFFFL
+//MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_G
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_G__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_G__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_G_MASK 0x0000FFFFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_G__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_G_MASK 0xFFFF0000L
+//MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL1_R
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL1_R__MPCC_OGAM_RAMB_EXP_REGION_END_R__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL1_R__MPCC_OGAM_RAMB_EXP_REGION_END_R_MASK 0x0000FFFFL
+//MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_R
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_R__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_R__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_SLOPE_R_MASK 0x0000FFFFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_END_CNTL2_R__MPCC_OGAM_RAMB_EXP_REGION_END_BASE_R_MASK 0xFFFF0000L
+//MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_0_1
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION0_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_0_1__MPCC_OGAM_RAMB_EXP_REGION1_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_2_3
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION2_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_2_3__MPCC_OGAM_RAMB_EXP_REGION3_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_4_5
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION4_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_4_5__MPCC_OGAM_RAMB_EXP_REGION5_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_6_7
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION6_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_6_7__MPCC_OGAM_RAMB_EXP_REGION7_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_8_9
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION8_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_8_9__MPCC_OGAM_RAMB_EXP_REGION9_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_10_11
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION10_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_10_11__MPCC_OGAM_RAMB_EXP_REGION11_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_12_13
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION12_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_12_13__MPCC_OGAM_RAMB_EXP_REGION13_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_14_15
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION14_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_14_15__MPCC_OGAM_RAMB_EXP_REGION15_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_16_17
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION16_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_16_17__MPCC_OGAM_RAMB_EXP_REGION17_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_18_19
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION18_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_18_19__MPCC_OGAM_RAMB_EXP_REGION19_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_20_21
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION20_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_20_21__MPCC_OGAM_RAMB_EXP_REGION21_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_22_23
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION22_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_22_23__MPCC_OGAM_RAMB_EXP_REGION23_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_24_25
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION24_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_24_25__MPCC_OGAM_RAMB_EXP_REGION25_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_26_27
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION26_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_26_27__MPCC_OGAM_RAMB_EXP_REGION27_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_28_29
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION28_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_28_29__MPCC_OGAM_RAMB_EXP_REGION29_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_30_31
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION30_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_30_31__MPCC_OGAM_RAMB_EXP_REGION31_NUM_SEGMENTS_MASK 0x70000000L
+//MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_32_33
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_LUT_OFFSET__SHIFT 0x0
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS__SHIFT 0xc
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_LUT_OFFSET__SHIFT 0x10
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS__SHIFT 0x1c
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_LUT_OFFSET_MASK 0x000001FFL
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION32_NUM_SEGMENTS_MASK 0x00007000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_LUT_OFFSET_MASK 0x01FF0000L
+#define MPCC_OGAM4_MPCC_OGAM_RAMB_REGION_32_33__MPCC_OGAM_RAMB_EXP_REGION33_NUM_SEGMENTS_MASK 0x70000000L
+
+
+// addressBlock: dce_dc_mpc_mpc_ocsc_dispdec
+//MPC_OUT_CSC_COEF_FORMAT
+#define MPC_OUT_CSC_COEF_FORMAT__MPC_OCSC0_COEF_FORMAT__SHIFT 0x0
+#define MPC_OUT_CSC_COEF_FORMAT__MPC_OCSC1_COEF_FORMAT__SHIFT 0x1
+#define MPC_OUT_CSC_COEF_FORMAT__MPC_OCSC0_COEF_FORMAT_MASK 0x00000001L
+#define MPC_OUT_CSC_COEF_FORMAT__MPC_OCSC1_COEF_FORMAT_MASK 0x00000002L
+//MPC_OUT0_CSC_MODE
+#define MPC_OUT0_CSC_MODE__MPC_OCSC_MODE__SHIFT 0x0
+#define MPC_OUT0_CSC_MODE__MPC_OCSC_MODE_MASK 0x00000003L
+//MPC_OUT0_CSC_C11_C12_A
+#define MPC_OUT0_CSC_C11_C12_A__MPC_OCSC_C11_A__SHIFT 0x0
+#define MPC_OUT0_CSC_C11_C12_A__MPC_OCSC_C12_A__SHIFT 0x10
+#define MPC_OUT0_CSC_C11_C12_A__MPC_OCSC_C11_A_MASK 0x0000FFFFL
+#define MPC_OUT0_CSC_C11_C12_A__MPC_OCSC_C12_A_MASK 0xFFFF0000L
+//MPC_OUT0_CSC_C13_C14_A
+#define MPC_OUT0_CSC_C13_C14_A__MPC_OCSC_C13_A__SHIFT 0x0
+#define MPC_OUT0_CSC_C13_C14_A__MPC_OCSC_C14_A__SHIFT 0x10
+#define MPC_OUT0_CSC_C13_C14_A__MPC_OCSC_C13_A_MASK 0x0000FFFFL
+#define MPC_OUT0_CSC_C13_C14_A__MPC_OCSC_C14_A_MASK 0xFFFF0000L
+//MPC_OUT0_CSC_C21_C22_A
+#define MPC_OUT0_CSC_C21_C22_A__MPC_OCSC_C21_A__SHIFT 0x0
+#define MPC_OUT0_CSC_C21_C22_A__MPC_OCSC_C22_A__SHIFT 0x10
+#define MPC_OUT0_CSC_C21_C22_A__MPC_OCSC_C21_A_MASK 0x0000FFFFL
+#define MPC_OUT0_CSC_C21_C22_A__MPC_OCSC_C22_A_MASK 0xFFFF0000L
+//MPC_OUT0_CSC_C23_C24_A
+#define MPC_OUT0_CSC_C23_C24_A__MPC_OCSC_C23_A__SHIFT 0x0
+#define MPC_OUT0_CSC_C23_C24_A__MPC_OCSC_C24_A__SHIFT 0x10
+#define MPC_OUT0_CSC_C23_C24_A__MPC_OCSC_C23_A_MASK 0x0000FFFFL
+#define MPC_OUT0_CSC_C23_C24_A__MPC_OCSC_C24_A_MASK 0xFFFF0000L
+//MPC_OUT0_CSC_C31_C32_A
+#define MPC_OUT0_CSC_C31_C32_A__MPC_OCSC_C31_A__SHIFT 0x0
+#define MPC_OUT0_CSC_C31_C32_A__MPC_OCSC_C32_A__SHIFT 0x10
+#define MPC_OUT0_CSC_C31_C32_A__MPC_OCSC_C31_A_MASK 0x0000FFFFL
+#define MPC_OUT0_CSC_C31_C32_A__MPC_OCSC_C32_A_MASK 0xFFFF0000L
+//MPC_OUT0_CSC_C33_C34_A
+#define MPC_OUT0_CSC_C33_C34_A__MPC_OCSC_C33_A__SHIFT 0x0
+#define MPC_OUT0_CSC_C33_C34_A__MPC_OCSC_C34_A__SHIFT 0x10
+#define MPC_OUT0_CSC_C33_C34_A__MPC_OCSC_C33_A_MASK 0x0000FFFFL
+#define MPC_OUT0_CSC_C33_C34_A__MPC_OCSC_C34_A_MASK 0xFFFF0000L
+//MPC_OUT0_CSC_C11_C12_B
+#define MPC_OUT0_CSC_C11_C12_B__MPC_OCSC_C11_B__SHIFT 0x0
+#define MPC_OUT0_CSC_C11_C12_B__MPC_OCSC_C12_B__SHIFT 0x10
+#define MPC_OUT0_CSC_C11_C12_B__MPC_OCSC_C11_B_MASK 0x0000FFFFL
+#define MPC_OUT0_CSC_C11_C12_B__MPC_OCSC_C12_B_MASK 0xFFFF0000L
+//MPC_OUT0_CSC_C13_C14_B
+#define MPC_OUT0_CSC_C13_C14_B__MPC_OCSC_C13_B__SHIFT 0x0
+#define MPC_OUT0_CSC_C13_C14_B__MPC_OCSC_C14_B__SHIFT 0x10
+#define MPC_OUT0_CSC_C13_C14_B__MPC_OCSC_C13_B_MASK 0x0000FFFFL
+#define MPC_OUT0_CSC_C13_C14_B__MPC_OCSC_C14_B_MASK 0xFFFF0000L
+//MPC_OUT0_CSC_C21_C22_B
+#define MPC_OUT0_CSC_C21_C22_B__MPC_OCSC_C21_B__SHIFT 0x0
+#define MPC_OUT0_CSC_C21_C22_B__MPC_OCSC_C22_B__SHIFT 0x10
+#define MPC_OUT0_CSC_C21_C22_B__MPC_OCSC_C21_B_MASK 0x0000FFFFL
+#define MPC_OUT0_CSC_C21_C22_B__MPC_OCSC_C22_B_MASK 0xFFFF0000L
+//MPC_OUT0_CSC_C23_C24_B
+#define MPC_OUT0_CSC_C23_C24_B__MPC_OCSC_C23_B__SHIFT 0x0
+#define MPC_OUT0_CSC_C23_C24_B__MPC_OCSC_C24_B__SHIFT 0x10
+#define MPC_OUT0_CSC_C23_C24_B__MPC_OCSC_C23_B_MASK 0x0000FFFFL
+#define MPC_OUT0_CSC_C23_C24_B__MPC_OCSC_C24_B_MASK 0xFFFF0000L
+//MPC_OUT0_CSC_C31_C32_B
+#define MPC_OUT0_CSC_C31_C32_B__MPC_OCSC_C31_B__SHIFT 0x0
+#define MPC_OUT0_CSC_C31_C32_B__MPC_OCSC_C32_B__SHIFT 0x10
+#define MPC_OUT0_CSC_C31_C32_B__MPC_OCSC_C31_B_MASK 0x0000FFFFL
+#define MPC_OUT0_CSC_C31_C32_B__MPC_OCSC_C32_B_MASK 0xFFFF0000L
+//MPC_OUT0_CSC_C33_C34_B
+#define MPC_OUT0_CSC_C33_C34_B__MPC_OCSC_C33_B__SHIFT 0x0
+#define MPC_OUT0_CSC_C33_C34_B__MPC_OCSC_C34_B__SHIFT 0x10
+#define MPC_OUT0_CSC_C33_C34_B__MPC_OCSC_C33_B_MASK 0x0000FFFFL
+#define MPC_OUT0_CSC_C33_C34_B__MPC_OCSC_C34_B_MASK 0xFFFF0000L
+//MPC_OUT1_CSC_MODE
+#define MPC_OUT1_CSC_MODE__MPC_OCSC_MODE__SHIFT 0x0
+#define MPC_OUT1_CSC_MODE__MPC_OCSC_MODE_MASK 0x00000003L
+//MPC_OUT1_CSC_C11_C12_A
+#define MPC_OUT1_CSC_C11_C12_A__MPC_OCSC_C11_A__SHIFT 0x0
+#define MPC_OUT1_CSC_C11_C12_A__MPC_OCSC_C12_A__SHIFT 0x10
+#define MPC_OUT1_CSC_C11_C12_A__MPC_OCSC_C11_A_MASK 0x0000FFFFL
+#define MPC_OUT1_CSC_C11_C12_A__MPC_OCSC_C12_A_MASK 0xFFFF0000L
+//MPC_OUT1_CSC_C13_C14_A
+#define MPC_OUT1_CSC_C13_C14_A__MPC_OCSC_C13_A__SHIFT 0x0
+#define MPC_OUT1_CSC_C13_C14_A__MPC_OCSC_C14_A__SHIFT 0x10
+#define MPC_OUT1_CSC_C13_C14_A__MPC_OCSC_C13_A_MASK 0x0000FFFFL
+#define MPC_OUT1_CSC_C13_C14_A__MPC_OCSC_C14_A_MASK 0xFFFF0000L
+//MPC_OUT1_CSC_C21_C22_A
+#define MPC_OUT1_CSC_C21_C22_A__MPC_OCSC_C21_A__SHIFT 0x0
+#define MPC_OUT1_CSC_C21_C22_A__MPC_OCSC_C22_A__SHIFT 0x10
+#define MPC_OUT1_CSC_C21_C22_A__MPC_OCSC_C21_A_MASK 0x0000FFFFL
+#define MPC_OUT1_CSC_C21_C22_A__MPC_OCSC_C22_A_MASK 0xFFFF0000L
+//MPC_OUT1_CSC_C23_C24_A
+#define MPC_OUT1_CSC_C23_C24_A__MPC_OCSC_C23_A__SHIFT 0x0
+#define MPC_OUT1_CSC_C23_C24_A__MPC_OCSC_C24_A__SHIFT 0x10
+#define MPC_OUT1_CSC_C23_C24_A__MPC_OCSC_C23_A_MASK 0x0000FFFFL
+#define MPC_OUT1_CSC_C23_C24_A__MPC_OCSC_C24_A_MASK 0xFFFF0000L
+//MPC_OUT1_CSC_C31_C32_A
+#define MPC_OUT1_CSC_C31_C32_A__MPC_OCSC_C31_A__SHIFT 0x0
+#define MPC_OUT1_CSC_C31_C32_A__MPC_OCSC_C32_A__SHIFT 0x10
+#define MPC_OUT1_CSC_C31_C32_A__MPC_OCSC_C31_A_MASK 0x0000FFFFL
+#define MPC_OUT1_CSC_C31_C32_A__MPC_OCSC_C32_A_MASK 0xFFFF0000L
+//MPC_OUT1_CSC_C33_C34_A
+#define MPC_OUT1_CSC_C33_C34_A__MPC_OCSC_C33_A__SHIFT 0x0
+#define MPC_OUT1_CSC_C33_C34_A__MPC_OCSC_C34_A__SHIFT 0x10
+#define MPC_OUT1_CSC_C33_C34_A__MPC_OCSC_C33_A_MASK 0x0000FFFFL
+#define MPC_OUT1_CSC_C33_C34_A__MPC_OCSC_C34_A_MASK 0xFFFF0000L
+//MPC_OUT1_CSC_C11_C12_B
+#define MPC_OUT1_CSC_C11_C12_B__MPC_OCSC_C11_B__SHIFT 0x0
+#define MPC_OUT1_CSC_C11_C12_B__MPC_OCSC_C12_B__SHIFT 0x10
+#define MPC_OUT1_CSC_C11_C12_B__MPC_OCSC_C11_B_MASK 0x0000FFFFL
+#define MPC_OUT1_CSC_C11_C12_B__MPC_OCSC_C12_B_MASK 0xFFFF0000L
+//MPC_OUT1_CSC_C13_C14_B
+#define MPC_OUT1_CSC_C13_C14_B__MPC_OCSC_C13_B__SHIFT 0x0
+#define MPC_OUT1_CSC_C13_C14_B__MPC_OCSC_C14_B__SHIFT 0x10
+#define MPC_OUT1_CSC_C13_C14_B__MPC_OCSC_C13_B_MASK 0x0000FFFFL
+#define MPC_OUT1_CSC_C13_C14_B__MPC_OCSC_C14_B_MASK 0xFFFF0000L
+//MPC_OUT1_CSC_C21_C22_B
+#define MPC_OUT1_CSC_C21_C22_B__MPC_OCSC_C21_B__SHIFT 0x0
+#define MPC_OUT1_CSC_C21_C22_B__MPC_OCSC_C22_B__SHIFT 0x10
+#define MPC_OUT1_CSC_C21_C22_B__MPC_OCSC_C21_B_MASK 0x0000FFFFL
+#define MPC_OUT1_CSC_C21_C22_B__MPC_OCSC_C22_B_MASK 0xFFFF0000L
+//MPC_OUT1_CSC_C23_C24_B
+#define MPC_OUT1_CSC_C23_C24_B__MPC_OCSC_C23_B__SHIFT 0x0
+#define MPC_OUT1_CSC_C23_C24_B__MPC_OCSC_C24_B__SHIFT 0x10
+#define MPC_OUT1_CSC_C23_C24_B__MPC_OCSC_C23_B_MASK 0x0000FFFFL
+#define MPC_OUT1_CSC_C23_C24_B__MPC_OCSC_C24_B_MASK 0xFFFF0000L
+//MPC_OUT1_CSC_C31_C32_B
+#define MPC_OUT1_CSC_C31_C32_B__MPC_OCSC_C31_B__SHIFT 0x0
+#define MPC_OUT1_CSC_C31_C32_B__MPC_OCSC_C32_B__SHIFT 0x10
+#define MPC_OUT1_CSC_C31_C32_B__MPC_OCSC_C31_B_MASK 0x0000FFFFL
+#define MPC_OUT1_CSC_C31_C32_B__MPC_OCSC_C32_B_MASK 0xFFFF0000L
+//MPC_OUT1_CSC_C33_C34_B
+#define MPC_OUT1_CSC_C33_C34_B__MPC_OCSC_C33_B__SHIFT 0x0
+#define MPC_OUT1_CSC_C33_C34_B__MPC_OCSC_C34_B__SHIFT 0x10
+#define MPC_OUT1_CSC_C33_C34_B__MPC_OCSC_C33_B_MASK 0x0000FFFFL
+#define MPC_OUT1_CSC_C33_C34_B__MPC_OCSC_C34_B_MASK 0xFFFF0000L
+//MPC_OCSC_TEST_DEBUG_INDEX
+#define MPC_OCSC_TEST_DEBUG_INDEX__MPC_OCSC_TEST_DEBUG_INDEX__SHIFT 0x0
+#define MPC_OCSC_TEST_DEBUG_INDEX__MPC_OCSC_TEST_DEBUG_WRITE_EN__SHIFT 0x8
+#define MPC_OCSC_TEST_DEBUG_INDEX__MPC_OCSC_TEST_DEBUG_INDEX_MASK 0x000000FFL
+#define MPC_OCSC_TEST_DEBUG_INDEX__MPC_OCSC_TEST_DEBUG_WRITE_EN_MASK 0x00000100L
+
+
+// addressBlock: dce_dc_opp_fmt0_dispdec
+//FMT0_FMT_CLAMP_COMPONENT_R
+#define FMT0_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_LOWER_R__SHIFT 0x0
+#define FMT0_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_UPPER_R__SHIFT 0x10
+#define FMT0_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_LOWER_R_MASK 0x0000FFFFL
+#define FMT0_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_UPPER_R_MASK 0xFFFF0000L
+//FMT0_FMT_CLAMP_COMPONENT_G
+#define FMT0_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_LOWER_G__SHIFT 0x0
+#define FMT0_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_UPPER_G__SHIFT 0x10
+#define FMT0_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_LOWER_G_MASK 0x0000FFFFL
+#define FMT0_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_UPPER_G_MASK 0xFFFF0000L
+//FMT0_FMT_CLAMP_COMPONENT_B
+#define FMT0_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_LOWER_B__SHIFT 0x0
+#define FMT0_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_UPPER_B__SHIFT 0x10
+#define FMT0_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_LOWER_B_MASK 0x0000FFFFL
+#define FMT0_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_UPPER_B_MASK 0xFFFF0000L
+//FMT0_FMT_DYNAMIC_EXP_CNTL
+#define FMT0_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_EN__SHIFT 0x0
+#define FMT0_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_MODE__SHIFT 0x4
+#define FMT0_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_EN_MASK 0x00000001L
+#define FMT0_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_MODE_MASK 0x00000010L
+//FMT0_FMT_CONTROL
+#define FMT0_FMT_CONTROL__FMT_STEREOSYNC_OVERRIDE__SHIFT 0x0
+#define FMT0_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX__SHIFT 0x8
+#define FMT0_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP__SHIFT 0xc
+#define FMT0_FMT_CONTROL__FMT_PIXEL_ENCODING__SHIFT 0x10
+#define FMT0_FMT_CONTROL__FMT_SUBSAMPLING_MODE__SHIFT 0x12
+#define FMT0_FMT_CONTROL__FMT_SUBSAMPLING_ORDER__SHIFT 0x14
+#define FMT0_FMT_CONTROL__FMT_CBCR_BIT_REDUCTION_BYPASS__SHIFT 0x15
+#define FMT0_FMT_CONTROL__FMT_DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x18
+#define FMT0_FMT_CONTROL__FMT_STEREOSYNC_OVERRIDE_MASK 0x00000001L
+#define FMT0_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX_MASK 0x00000F00L
+#define FMT0_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP_MASK 0x00003000L
+#define FMT0_FMT_CONTROL__FMT_PIXEL_ENCODING_MASK 0x00030000L
+#define FMT0_FMT_CONTROL__FMT_SUBSAMPLING_MODE_MASK 0x000C0000L
+#define FMT0_FMT_CONTROL__FMT_SUBSAMPLING_ORDER_MASK 0x00100000L
+#define FMT0_FMT_CONTROL__FMT_CBCR_BIT_REDUCTION_BYPASS_MASK 0x00200000L
+#define FMT0_FMT_CONTROL__FMT_DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x01000000L
+//FMT0_FMT_BIT_DEPTH_CONTROL
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN__SHIFT 0x0
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_MODE__SHIFT 0x1
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT 0x4
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN__SHIFT 0x8
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_MODE__SHIFT 0x9
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT 0xb
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE__SHIFT 0xd
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE__SHIFT 0xe
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE__SHIFT 0xf
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_EN__SHIFT 0x10
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_DEPTH__SHIFT 0x11
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_OFFSET__SHIFT 0x15
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_LEVEL__SHIFT 0x18
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_RESET__SHIFT 0x19
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_25FRC_SEL__SHIFT 0x1a
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_50FRC_SEL__SHIFT 0x1c
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_75FRC_SEL__SHIFT 0x1e
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK 0x00000001L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_MODE_MASK 0x00000002L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH_MASK 0x00000030L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK 0x00000100L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_MODE_MASK 0x00000600L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH_MASK 0x00001800L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK 0x00002000L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK 0x00004000L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK 0x00008000L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_EN_MASK 0x00010000L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_DEPTH_MASK 0x00060000L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_OFFSET_MASK 0x00600000L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_LEVEL_MASK 0x01000000L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_RESET_MASK 0x02000000L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_25FRC_SEL_MASK 0x0C000000L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_50FRC_SEL_MASK 0x30000000L
+#define FMT0_FMT_BIT_DEPTH_CONTROL__FMT_75FRC_SEL_MASK 0xC0000000L
+//FMT0_FMT_DITHER_RAND_R_SEED
+#define FMT0_FMT_DITHER_RAND_R_SEED__FMT_RAND_R_SEED__SHIFT 0x0
+#define FMT0_FMT_DITHER_RAND_R_SEED__FMT_OFFSET_R_CR__SHIFT 0x10
+#define FMT0_FMT_DITHER_RAND_R_SEED__FMT_RAND_R_SEED_MASK 0x000000FFL
+#define FMT0_FMT_DITHER_RAND_R_SEED__FMT_OFFSET_R_CR_MASK 0xFFFF0000L
+//FMT0_FMT_DITHER_RAND_G_SEED
+#define FMT0_FMT_DITHER_RAND_G_SEED__FMT_RAND_G_SEED__SHIFT 0x0
+#define FMT0_FMT_DITHER_RAND_G_SEED__FMT_OFFSET_G_Y__SHIFT 0x10
+#define FMT0_FMT_DITHER_RAND_G_SEED__FMT_RAND_G_SEED_MASK 0x000000FFL
+#define FMT0_FMT_DITHER_RAND_G_SEED__FMT_OFFSET_G_Y_MASK 0xFFFF0000L
+//FMT0_FMT_DITHER_RAND_B_SEED
+#define FMT0_FMT_DITHER_RAND_B_SEED__FMT_RAND_B_SEED__SHIFT 0x0
+#define FMT0_FMT_DITHER_RAND_B_SEED__FMT_OFFSET_B_CB__SHIFT 0x10
+#define FMT0_FMT_DITHER_RAND_B_SEED__FMT_RAND_B_SEED_MASK 0x000000FFL
+#define FMT0_FMT_DITHER_RAND_B_SEED__FMT_OFFSET_B_CB_MASK 0xFFFF0000L
+//FMT0_FMT_CLAMP_CNTL
+#define FMT0_FMT_CLAMP_CNTL__FMT_CLAMP_DATA_EN__SHIFT 0x0
+#define FMT0_FMT_CLAMP_CNTL__FMT_CLAMP_COLOR_FORMAT__SHIFT 0x10
+#define FMT0_FMT_CLAMP_CNTL__FMT_CLAMP_DATA_EN_MASK 0x00000001L
+#define FMT0_FMT_CLAMP_CNTL__FMT_CLAMP_COLOR_FORMAT_MASK 0x00070000L
+//FMT0_FMT_SIDE_BY_SIDE_STEREO_CONTROL
+#define FMT0_FMT_SIDE_BY_SIDE_STEREO_CONTROL__FMT_SIDE_BY_SIDE_STEREO_ACTIVE_WIDTH__SHIFT 0x0
+#define FMT0_FMT_SIDE_BY_SIDE_STEREO_CONTROL__FMT_SIDE_BY_SIDE_STEREO_ACTIVE_WIDTH_MASK 0x00001FFFL
+//FMT0_FMT_MAP420_MEMORY_CONTROL
+#define FMT0_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_FORCE__SHIFT 0x0
+#define FMT0_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_DIS__SHIFT 0x4
+#define FMT0_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_STATE__SHIFT 0x8
+#define FMT0_FMT_MAP420_MEMORY_CONTROL__FMT_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0xc
+#define FMT0_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_FORCE_MASK 0x00000003L
+#define FMT0_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_DIS_MASK 0x00000010L
+#define FMT0_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_STATE_MASK 0x00000300L
+#define FMT0_FMT_MAP420_MEMORY_CONTROL__FMT_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00003000L
+//FMT0_FMT_422_CONTROL
+#define FMT0_FMT_422_CONTROL__FMT_LEFT_EDGE_EXTRA_PIXEL_COUNT__SHIFT 0x0
+#define FMT0_FMT_422_CONTROL__FMT_LEFT_EDGE_EXTRA_PIXEL_COUNT_MASK 0x00000001L
+
+
+// addressBlock: dce_dc_opp_dpg0_dispdec
+//DPG0_DPG_CONTROL
+#define DPG0_DPG_CONTROL__DPG_EN__SHIFT 0x0
+#define DPG0_DPG_CONTROL__DPG_MODE__SHIFT 0x4
+#define DPG0_DPG_CONTROL__DPG_DYNAMIC_RANGE__SHIFT 0x8
+#define DPG0_DPG_CONTROL__DPG_BIT_DEPTH__SHIFT 0xc
+#define DPG0_DPG_CONTROL__DPG_VRES__SHIFT 0x10
+#define DPG0_DPG_CONTROL__DPG_HRES__SHIFT 0x14
+#define DPG0_DPG_CONTROL__DPG_FIELD_POLARITY__SHIFT 0x18
+#define DPG0_DPG_CONTROL__DPG_EN_MASK 0x00000001L
+#define DPG0_DPG_CONTROL__DPG_MODE_MASK 0x00000070L
+#define DPG0_DPG_CONTROL__DPG_DYNAMIC_RANGE_MASK 0x00000100L
+#define DPG0_DPG_CONTROL__DPG_BIT_DEPTH_MASK 0x00003000L
+#define DPG0_DPG_CONTROL__DPG_VRES_MASK 0x000F0000L
+#define DPG0_DPG_CONTROL__DPG_HRES_MASK 0x00F00000L
+#define DPG0_DPG_CONTROL__DPG_FIELD_POLARITY_MASK 0x01000000L
+//DPG0_DPG_RAMP_CONTROL
+#define DPG0_DPG_RAMP_CONTROL__DPG_RAMP0_OFFSET__SHIFT 0x0
+#define DPG0_DPG_RAMP_CONTROL__DPG_INC0__SHIFT 0x18
+#define DPG0_DPG_RAMP_CONTROL__DPG_INC1__SHIFT 0x1c
+#define DPG0_DPG_RAMP_CONTROL__DPG_RAMP0_OFFSET_MASK 0x0000FFFFL
+#define DPG0_DPG_RAMP_CONTROL__DPG_INC0_MASK 0x0F000000L
+#define DPG0_DPG_RAMP_CONTROL__DPG_INC1_MASK 0xF0000000L
+//DPG0_DPG_DIMENSIONS
+#define DPG0_DPG_DIMENSIONS__DPG_ACTIVE_HEIGHT__SHIFT 0x0
+#define DPG0_DPG_DIMENSIONS__DPG_ACTIVE_WIDTH__SHIFT 0x10
+#define DPG0_DPG_DIMENSIONS__DPG_ACTIVE_HEIGHT_MASK 0x00003FFFL
+#define DPG0_DPG_DIMENSIONS__DPG_ACTIVE_WIDTH_MASK 0x3FFF0000L
+//DPG0_DPG_COLOUR_R_CR
+#define DPG0_DPG_COLOUR_R_CR__DPG_COLOUR0_R_CR__SHIFT 0x0
+#define DPG0_DPG_COLOUR_R_CR__DPG_COLOUR1_R_CR__SHIFT 0x10
+#define DPG0_DPG_COLOUR_R_CR__DPG_COLOUR0_R_CR_MASK 0x0000FFFFL
+#define DPG0_DPG_COLOUR_R_CR__DPG_COLOUR1_R_CR_MASK 0xFFFF0000L
+//DPG0_DPG_COLOUR_G_Y
+#define DPG0_DPG_COLOUR_G_Y__DPG_COLOUR0_G_Y__SHIFT 0x0
+#define DPG0_DPG_COLOUR_G_Y__DPG_COLOUR1_G_Y__SHIFT 0x10
+#define DPG0_DPG_COLOUR_G_Y__DPG_COLOUR0_G_Y_MASK 0x0000FFFFL
+#define DPG0_DPG_COLOUR_G_Y__DPG_COLOUR1_G_Y_MASK 0xFFFF0000L
+//DPG0_DPG_COLOUR_B_CB
+#define DPG0_DPG_COLOUR_B_CB__DPG_COLOUR0_B_CB__SHIFT 0x0
+#define DPG0_DPG_COLOUR_B_CB__DPG_COLOUR1_B_CB__SHIFT 0x10
+#define DPG0_DPG_COLOUR_B_CB__DPG_COLOUR0_B_CB_MASK 0x0000FFFFL
+#define DPG0_DPG_COLOUR_B_CB__DPG_COLOUR1_B_CB_MASK 0xFFFF0000L
+//DPG0_DPG_OFFSET_SEGMENT
+#define DPG0_DPG_OFFSET_SEGMENT__DPG_X_OFFSET__SHIFT 0x0
+#define DPG0_DPG_OFFSET_SEGMENT__DPG_SEGMENT_WIDTH__SHIFT 0x10
+#define DPG0_DPG_OFFSET_SEGMENT__DPG_X_OFFSET_MASK 0x00003FFFL
+#define DPG0_DPG_OFFSET_SEGMENT__DPG_SEGMENT_WIDTH_MASK 0x3FFF0000L
+//DPG0_DPG_STATUS
+#define DPG0_DPG_STATUS__DPG_DOUBLE_BUFFER_PENDING__SHIFT 0x0
+#define DPG0_DPG_STATUS__DPG_DOUBLE_BUFFER_PENDING_MASK 0x00000001L
+
+
+// addressBlock: dce_dc_opp_oppbuf0_dispdec
+//OPPBUF0_OPPBUF_CONTROL
+#define OPPBUF0_OPPBUF_CONTROL__OPPBUF_ACTIVE_WIDTH__SHIFT 0x0
+#define OPPBUF0_OPPBUF_CONTROL__OPPBUF_DISPLAY_SEGMENTATION__SHIFT 0x10
+#define OPPBUF0_OPPBUF_CONTROL__OPPBUF_OVERLAP_PIXEL_NUM__SHIFT 0x14
+#define OPPBUF0_OPPBUF_CONTROL__OPPBUF_PIXEL_REPETITION__SHIFT 0x18
+#define OPPBUF0_OPPBUF_CONTROL__OPPBUF_DOUBLE_BUFFER_PENDING__SHIFT 0x1c
+#define OPPBUF0_OPPBUF_CONTROL__OPPBUF_ACTIVE_WIDTH_MASK 0x00003FFFL
+#define OPPBUF0_OPPBUF_CONTROL__OPPBUF_DISPLAY_SEGMENTATION_MASK 0x00070000L
+#define OPPBUF0_OPPBUF_CONTROL__OPPBUF_OVERLAP_PIXEL_NUM_MASK 0x00F00000L
+#define OPPBUF0_OPPBUF_CONTROL__OPPBUF_PIXEL_REPETITION_MASK 0x0F000000L
+#define OPPBUF0_OPPBUF_CONTROL__OPPBUF_DOUBLE_BUFFER_PENDING_MASK 0x10000000L
+//OPPBUF0_OPPBUF_3D_PARAMETERS_0
+#define OPPBUF0_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE1_SIZE__SHIFT 0x0
+#define OPPBUF0_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE2_SIZE__SHIFT 0xa
+#define OPPBUF0_OPPBUF_3D_PARAMETERS_0__OPPBUF_DUMMY_DATA_R__SHIFT 0x14
+#define OPPBUF0_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE1_SIZE_MASK 0x000003FFL
+#define OPPBUF0_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE2_SIZE_MASK 0x000FFC00L
+#define OPPBUF0_OPPBUF_3D_PARAMETERS_0__OPPBUF_DUMMY_DATA_R_MASK 0xFFF00000L
+//OPPBUF0_OPPBUF_3D_PARAMETERS_1
+#define OPPBUF0_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_G__SHIFT 0x0
+#define OPPBUF0_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_B__SHIFT 0x10
+#define OPPBUF0_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_G_MASK 0x00000FFFL
+#define OPPBUF0_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_B_MASK 0x0FFF0000L
+
+
+// addressBlock: dce_dc_opp_opp_pipe0_dispdec
+//OPP_PIPE0_OPP_PIPE_CONTROL
+#define OPP_PIPE0_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_EN__SHIFT 0x0
+#define OPP_PIPE0_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_ON__SHIFT 0x1
+#define OPP_PIPE0_OPP_PIPE_CONTROL__OPP_PIPE_DIGITAL_BYPASS_EN__SHIFT 0x4
+#define OPP_PIPE0_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_EN_MASK 0x00000001L
+#define OPP_PIPE0_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_ON_MASK 0x00000002L
+#define OPP_PIPE0_OPP_PIPE_CONTROL__OPP_PIPE_DIGITAL_BYPASS_EN_MASK 0x00000010L
+
+
+// addressBlock: dce_dc_opp_fmt1_dispdec
+//FMT1_FMT_CLAMP_COMPONENT_R
+#define FMT1_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_LOWER_R__SHIFT 0x0
+#define FMT1_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_UPPER_R__SHIFT 0x10
+#define FMT1_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_LOWER_R_MASK 0x0000FFFFL
+#define FMT1_FMT_CLAMP_COMPONENT_R__FMT_CLAMP_UPPER_R_MASK 0xFFFF0000L
+//FMT1_FMT_CLAMP_COMPONENT_G
+#define FMT1_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_LOWER_G__SHIFT 0x0
+#define FMT1_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_UPPER_G__SHIFT 0x10
+#define FMT1_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_LOWER_G_MASK 0x0000FFFFL
+#define FMT1_FMT_CLAMP_COMPONENT_G__FMT_CLAMP_UPPER_G_MASK 0xFFFF0000L
+//FMT1_FMT_CLAMP_COMPONENT_B
+#define FMT1_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_LOWER_B__SHIFT 0x0
+#define FMT1_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_UPPER_B__SHIFT 0x10
+#define FMT1_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_LOWER_B_MASK 0x0000FFFFL
+#define FMT1_FMT_CLAMP_COMPONENT_B__FMT_CLAMP_UPPER_B_MASK 0xFFFF0000L
+//FMT1_FMT_DYNAMIC_EXP_CNTL
+#define FMT1_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_EN__SHIFT 0x0
+#define FMT1_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_MODE__SHIFT 0x4
+#define FMT1_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_EN_MASK 0x00000001L
+#define FMT1_FMT_DYNAMIC_EXP_CNTL__FMT_DYNAMIC_EXP_MODE_MASK 0x00000010L
+//FMT1_FMT_CONTROL
+#define FMT1_FMT_CONTROL__FMT_STEREOSYNC_OVERRIDE__SHIFT 0x0
+#define FMT1_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX__SHIFT 0x8
+#define FMT1_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP__SHIFT 0xc
+#define FMT1_FMT_CONTROL__FMT_PIXEL_ENCODING__SHIFT 0x10
+#define FMT1_FMT_CONTROL__FMT_SUBSAMPLING_MODE__SHIFT 0x12
+#define FMT1_FMT_CONTROL__FMT_SUBSAMPLING_ORDER__SHIFT 0x14
+#define FMT1_FMT_CONTROL__FMT_CBCR_BIT_REDUCTION_BYPASS__SHIFT 0x15
+#define FMT1_FMT_CONTROL__FMT_DOUBLE_BUFFER_REG_UPDATE_PENDING__SHIFT 0x18
+#define FMT1_FMT_CONTROL__FMT_STEREOSYNC_OVERRIDE_MASK 0x00000001L
+#define FMT1_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_MAX_MASK 0x00000F00L
+#define FMT1_FMT_CONTROL__FMT_SPATIAL_DITHER_FRAME_COUNTER_BIT_SWAP_MASK 0x00003000L
+#define FMT1_FMT_CONTROL__FMT_PIXEL_ENCODING_MASK 0x00030000L
+#define FMT1_FMT_CONTROL__FMT_SUBSAMPLING_MODE_MASK 0x000C0000L
+#define FMT1_FMT_CONTROL__FMT_SUBSAMPLING_ORDER_MASK 0x00100000L
+#define FMT1_FMT_CONTROL__FMT_CBCR_BIT_REDUCTION_BYPASS_MASK 0x00200000L
+#define FMT1_FMT_CONTROL__FMT_DOUBLE_BUFFER_REG_UPDATE_PENDING_MASK 0x01000000L
+//FMT1_FMT_BIT_DEPTH_CONTROL
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN__SHIFT 0x0
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_MODE__SHIFT 0x1
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH__SHIFT 0x4
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN__SHIFT 0x8
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_MODE__SHIFT 0x9
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH__SHIFT 0xb
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE__SHIFT 0xd
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE__SHIFT 0xe
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE__SHIFT 0xf
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_EN__SHIFT 0x10
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_DEPTH__SHIFT 0x11
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_OFFSET__SHIFT 0x15
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_LEVEL__SHIFT 0x18
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_RESET__SHIFT 0x19
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_25FRC_SEL__SHIFT 0x1a
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_50FRC_SEL__SHIFT 0x1c
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_75FRC_SEL__SHIFT 0x1e
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_EN_MASK 0x00000001L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_MODE_MASK 0x00000002L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TRUNCATE_DEPTH_MASK 0x00000030L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_EN_MASK 0x00000100L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_MODE_MASK 0x00000600L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_SPATIAL_DITHER_DEPTH_MASK 0x00001800L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_FRAME_RANDOM_ENABLE_MASK 0x00002000L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_RGB_RANDOM_ENABLE_MASK 0x00004000L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_HIGHPASS_RANDOM_ENABLE_MASK 0x00008000L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_EN_MASK 0x00010000L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_DEPTH_MASK 0x00060000L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_OFFSET_MASK 0x00600000L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_LEVEL_MASK 0x01000000L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_TEMPORAL_DITHER_RESET_MASK 0x02000000L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_25FRC_SEL_MASK 0x0C000000L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_50FRC_SEL_MASK 0x30000000L
+#define FMT1_FMT_BIT_DEPTH_CONTROL__FMT_75FRC_SEL_MASK 0xC0000000L
+//FMT1_FMT_DITHER_RAND_R_SEED
+#define FMT1_FMT_DITHER_RAND_R_SEED__FMT_RAND_R_SEED__SHIFT 0x0
+#define FMT1_FMT_DITHER_RAND_R_SEED__FMT_OFFSET_R_CR__SHIFT 0x10
+#define FMT1_FMT_DITHER_RAND_R_SEED__FMT_RAND_R_SEED_MASK 0x000000FFL
+#define FMT1_FMT_DITHER_RAND_R_SEED__FMT_OFFSET_R_CR_MASK 0xFFFF0000L
+//FMT1_FMT_DITHER_RAND_G_SEED
+#define FMT1_FMT_DITHER_RAND_G_SEED__FMT_RAND_G_SEED__SHIFT 0x0
+#define FMT1_FMT_DITHER_RAND_G_SEED__FMT_OFFSET_G_Y__SHIFT 0x10
+#define FMT1_FMT_DITHER_RAND_G_SEED__FMT_RAND_G_SEED_MASK 0x000000FFL
+#define FMT1_FMT_DITHER_RAND_G_SEED__FMT_OFFSET_G_Y_MASK 0xFFFF0000L
+//FMT1_FMT_DITHER_RAND_B_SEED
+#define FMT1_FMT_DITHER_RAND_B_SEED__FMT_RAND_B_SEED__SHIFT 0x0
+#define FMT1_FMT_DITHER_RAND_B_SEED__FMT_OFFSET_B_CB__SHIFT 0x10
+#define FMT1_FMT_DITHER_RAND_B_SEED__FMT_RAND_B_SEED_MASK 0x000000FFL
+#define FMT1_FMT_DITHER_RAND_B_SEED__FMT_OFFSET_B_CB_MASK 0xFFFF0000L
+//FMT1_FMT_CLAMP_CNTL
+#define FMT1_FMT_CLAMP_CNTL__FMT_CLAMP_DATA_EN__SHIFT 0x0
+#define FMT1_FMT_CLAMP_CNTL__FMT_CLAMP_COLOR_FORMAT__SHIFT 0x10
+#define FMT1_FMT_CLAMP_CNTL__FMT_CLAMP_DATA_EN_MASK 0x00000001L
+#define FMT1_FMT_CLAMP_CNTL__FMT_CLAMP_COLOR_FORMAT_MASK 0x00070000L
+//FMT1_FMT_SIDE_BY_SIDE_STEREO_CONTROL
+#define FMT1_FMT_SIDE_BY_SIDE_STEREO_CONTROL__FMT_SIDE_BY_SIDE_STEREO_ACTIVE_WIDTH__SHIFT 0x0
+#define FMT1_FMT_SIDE_BY_SIDE_STEREO_CONTROL__FMT_SIDE_BY_SIDE_STEREO_ACTIVE_WIDTH_MASK 0x00001FFFL
+//FMT1_FMT_MAP420_MEMORY_CONTROL
+#define FMT1_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_FORCE__SHIFT 0x0
+#define FMT1_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_DIS__SHIFT 0x4
+#define FMT1_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_STATE__SHIFT 0x8
+#define FMT1_FMT_MAP420_MEMORY_CONTROL__FMT_DEFAULT_MEM_LOW_POWER_STATE__SHIFT 0xc
+#define FMT1_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_FORCE_MASK 0x00000003L
+#define FMT1_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_DIS_MASK 0x00000010L
+#define FMT1_FMT_MAP420_MEMORY_CONTROL__FMT_MAP420MEM_PWR_STATE_MASK 0x00000300L
+#define FMT1_FMT_MAP420_MEMORY_CONTROL__FMT_DEFAULT_MEM_LOW_POWER_STATE_MASK 0x00003000L
+//FMT1_FMT_422_CONTROL
+#define FMT1_FMT_422_CONTROL__FMT_LEFT_EDGE_EXTRA_PIXEL_COUNT__SHIFT 0x0
+#define FMT1_FMT_422_CONTROL__FMT_LEFT_EDGE_EXTRA_PIXEL_COUNT_MASK 0x00000001L
+
+
+// addressBlock: dce_dc_opp_dpg1_dispdec
+//DPG1_DPG_CONTROL
+#define DPG1_DPG_CONTROL__DPG_EN__SHIFT 0x0
+#define DPG1_DPG_CONTROL__DPG_MODE__SHIFT 0x4
+#define DPG1_DPG_CONTROL__DPG_DYNAMIC_RANGE__SHIFT 0x8
+#define DPG1_DPG_CONTROL__DPG_BIT_DEPTH__SHIFT 0xc
+#define DPG1_DPG_CONTROL__DPG_VRES__SHIFT 0x10
+#define DPG1_DPG_CONTROL__DPG_HRES__SHIFT 0x14
+#define DPG1_DPG_CONTROL__DPG_FIELD_POLARITY__SHIFT 0x18
+#define DPG1_DPG_CONTROL__DPG_EN_MASK 0x00000001L
+#define DPG1_DPG_CONTROL__DPG_MODE_MASK 0x00000070L
+#define DPG1_DPG_CONTROL__DPG_DYNAMIC_RANGE_MASK 0x00000100L
+#define DPG1_DPG_CONTROL__DPG_BIT_DEPTH_MASK 0x00003000L
+#define DPG1_DPG_CONTROL__DPG_VRES_MASK 0x000F0000L
+#define DPG1_DPG_CONTROL__DPG_HRES_MASK 0x00F00000L
+#define DPG1_DPG_CONTROL__DPG_FIELD_POLARITY_MASK 0x01000000L
+//DPG1_DPG_RAMP_CONTROL
+#define DPG1_DPG_RAMP_CONTROL__DPG_RAMP0_OFFSET__SHIFT 0x0
+#define DPG1_DPG_RAMP_CONTROL__DPG_INC0__SHIFT 0x18
+#define DPG1_DPG_RAMP_CONTROL__DPG_INC1__SHIFT 0x1c
+#define DPG1_DPG_RAMP_CONTROL__DPG_RAMP0_OFFSET_MASK 0x0000FFFFL
+#define DPG1_DPG_RAMP_CONTROL__DPG_INC0_MASK 0x0F000000L
+#define DPG1_DPG_RAMP_CONTROL__DPG_INC1_MASK 0xF0000000L
+//DPG1_DPG_DIMENSIONS
+#define DPG1_DPG_DIMENSIONS__DPG_ACTIVE_HEIGHT__SHIFT 0x0
+#define DPG1_DPG_DIMENSIONS__DPG_ACTIVE_WIDTH__SHIFT 0x10
+#define DPG1_DPG_DIMENSIONS__DPG_ACTIVE_HEIGHT_MASK 0x00003FFFL
+#define DPG1_DPG_DIMENSIONS__DPG_ACTIVE_WIDTH_MASK 0x3FFF0000L
+//DPG1_DPG_COLOUR_R_CR
+#define DPG1_DPG_COLOUR_R_CR__DPG_COLOUR0_R_CR__SHIFT 0x0
+#define DPG1_DPG_COLOUR_R_CR__DPG_COLOUR1_R_CR__SHIFT 0x10
+#define DPG1_DPG_COLOUR_R_CR__DPG_COLOUR0_R_CR_MASK 0x0000FFFFL
+#define DPG1_DPG_COLOUR_R_CR__DPG_COLOUR1_R_CR_MASK 0xFFFF0000L
+//DPG1_DPG_COLOUR_G_Y
+#define DPG1_DPG_COLOUR_G_Y__DPG_COLOUR0_G_Y__SHIFT 0x0
+#define DPG1_DPG_COLOUR_G_Y__DPG_COLOUR1_G_Y__SHIFT 0x10
+#define DPG1_DPG_COLOUR_G_Y__DPG_COLOUR0_G_Y_MASK 0x0000FFFFL
+#define DPG1_DPG_COLOUR_G_Y__DPG_COLOUR1_G_Y_MASK 0xFFFF0000L
+//DPG1_DPG_COLOUR_B_CB
+#define DPG1_DPG_COLOUR_B_CB__DPG_COLOUR0_B_CB__SHIFT 0x0
+#define DPG1_DPG_COLOUR_B_CB__DPG_COLOUR1_B_CB__SHIFT 0x10
+#define DPG1_DPG_COLOUR_B_CB__DPG_COLOUR0_B_CB_MASK 0x0000FFFFL
+#define DPG1_DPG_COLOUR_B_CB__DPG_COLOUR1_B_CB_MASK 0xFFFF0000L
+//DPG1_DPG_OFFSET_SEGMENT
+#define DPG1_DPG_OFFSET_SEGMENT__DPG_X_OFFSET__SHIFT 0x0
+#define DPG1_DPG_OFFSET_SEGMENT__DPG_SEGMENT_WIDTH__SHIFT 0x10
+#define DPG1_DPG_OFFSET_SEGMENT__DPG_X_OFFSET_MASK 0x00003FFFL
+#define DPG1_DPG_OFFSET_SEGMENT__DPG_SEGMENT_WIDTH_MASK 0x3FFF0000L
+//DPG1_DPG_STATUS
+#define DPG1_DPG_STATUS__DPG_DOUBLE_BUFFER_PENDING__SHIFT 0x0
+#define DPG1_DPG_STATUS__DPG_DOUBLE_BUFFER_PENDING_MASK 0x00000001L
+
+
+// addressBlock: dce_dc_opp_oppbuf1_dispdec
+//OPPBUF1_OPPBUF_CONTROL
+#define OPPBUF1_OPPBUF_CONTROL__OPPBUF_ACTIVE_WIDTH__SHIFT 0x0
+#define OPPBUF1_OPPBUF_CONTROL__OPPBUF_DISPLAY_SEGMENTATION__SHIFT 0x10
+#define OPPBUF1_OPPBUF_CONTROL__OPPBUF_OVERLAP_PIXEL_NUM__SHIFT 0x14
+#define OPPBUF1_OPPBUF_CONTROL__OPPBUF_PIXEL_REPETITION__SHIFT 0x18
+#define OPPBUF1_OPPBUF_CONTROL__OPPBUF_DOUBLE_BUFFER_PENDING__SHIFT 0x1c
+#define OPPBUF1_OPPBUF_CONTROL__OPPBUF_ACTIVE_WIDTH_MASK 0x00003FFFL
+#define OPPBUF1_OPPBUF_CONTROL__OPPBUF_DISPLAY_SEGMENTATION_MASK 0x00070000L
+#define OPPBUF1_OPPBUF_CONTROL__OPPBUF_OVERLAP_PIXEL_NUM_MASK 0x00F00000L
+#define OPPBUF1_OPPBUF_CONTROL__OPPBUF_PIXEL_REPETITION_MASK 0x0F000000L
+#define OPPBUF1_OPPBUF_CONTROL__OPPBUF_DOUBLE_BUFFER_PENDING_MASK 0x10000000L
+//OPPBUF1_OPPBUF_3D_PARAMETERS_0
+#define OPPBUF1_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE1_SIZE__SHIFT 0x0
+#define OPPBUF1_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE2_SIZE__SHIFT 0xa
+#define OPPBUF1_OPPBUF_3D_PARAMETERS_0__OPPBUF_DUMMY_DATA_R__SHIFT 0x14
+#define OPPBUF1_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE1_SIZE_MASK 0x000003FFL
+#define OPPBUF1_OPPBUF_3D_PARAMETERS_0__OPPBUF_3D_VACT_SPACE2_SIZE_MASK 0x000FFC00L
+#define OPPBUF1_OPPBUF_3D_PARAMETERS_0__OPPBUF_DUMMY_DATA_R_MASK 0xFFF00000L
+//OPPBUF1_OPPBUF_3D_PARAMETERS_1
+#define OPPBUF1_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_G__SHIFT 0x0
+#define OPPBUF1_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_B__SHIFT 0x10
+#define OPPBUF1_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_G_MASK 0x00000FFFL
+#define OPPBUF1_OPPBUF_3D_PARAMETERS_1__OPPBUF_DUMMY_DATA_B_MASK 0x0FFF0000L
+
+
+// addressBlock: dce_dc_opp_opp_pipe1_dispdec
+//OPP_PIPE1_OPP_PIPE_CONTROL
+#define OPP_PIPE1_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_EN__SHIFT 0x0
+#define OPP_PIPE1_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_ON__SHIFT 0x1
+#define OPP_PIPE1_OPP_PIPE_CONTROL__OPP_PIPE_DIGITAL_BYPASS_EN__SHIFT 0x4
+#define OPP_PIPE1_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_EN_MASK 0x00000001L
+#define OPP_PIPE1_OPP_PIPE_CONTROL__OPP_PIPE_CLOCK_ON_MASK 0x00000002L
+#define OPP_PIPE1_OPP_PIPE_CONTROL__OPP_PIPE_DIGITAL_BYPASS_EN_MASK 0x00000010L
+
+
+
+
+// addressBlock: dce_dc_opp_opp_top_dispdec
+//OPP_TOP_CLK_CONTROL
+#define OPP_TOP_CLK_CONTROL__OPP_DISPCLK_R_GATE_DIS__SHIFT 0x0
+#define OPP_TOP_CLK_CONTROL__OPP_DISPCLK_G_ABM_GATE_DIS__SHIFT 0x4
+#define OPP_TOP_CLK_CONTROL__OPP_ABM0_CLOCK_ON__SHIFT 0xc
+#define OPP_TOP_CLK_CONTROL__OPP_ABM1_CLOCK_ON__SHIFT 0xd
+#define OPP_TOP_CLK_CONTROL__OPP_DISPCLK_R_GATE_DIS_MASK 0x00000001L
+#define OPP_TOP_CLK_CONTROL__OPP_DISPCLK_G_ABM_GATE_DIS_MASK 0x00000010L
+#define OPP_TOP_CLK_CONTROL__OPP_ABM0_CLOCK_ON_MASK 0x00001000L
+#define OPP_TOP_CLK_CONTROL__OPP_ABM1_CLOCK_ON_MASK 0x00002000L
+
+
+// addressBlock: dce_dc_optc_odm0_dispdec
+//ODM0_OPTC_INPUT_GLOBAL_CONTROL
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_INPUT_SOFT_RESET__SHIFT 0x0
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_EN__SHIFT 0x8
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_TYPE__SHIFT 0x9
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_STATUS__SHIFT 0xa
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_STATUS__SHIFT 0xb
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_CLEAR__SHIFT 0xc
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_CURRENT__SHIFT 0xd
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_DOUBLE_BUFFER_PENDING__SHIFT 0x1f
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_INPUT_SOFT_RESET_MASK 0x00000001L
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_EN_MASK 0x00000100L
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_TYPE_MASK 0x00000200L
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_STATUS_MASK 0x00000400L
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_STATUS_MASK 0x00000800L
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_CLEAR_MASK 0x00001000L
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_CURRENT_MASK 0x00002000L
+#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_DOUBLE_BUFFER_PENDING_MASK 0x80000000L
+//ODM0_OPTC_DATA_SOURCE_SELECT
+#define ODM0_OPTC_DATA_SOURCE_SELECT__OPTC_SEG0_SRC_SEL__SHIFT 0x8
+#define ODM0_OPTC_DATA_SOURCE_SELECT__OPTC_SEG0_SRC_SEL_MASK 0x00000F00L
+//ODM0_OPTC_DATA_FORMAT_CONTROL
+#define ODM0_OPTC_DATA_FORMAT_CONTROL__OPTC_DATA_FORMAT__SHIFT 0x0
+#define ODM0_OPTC_DATA_FORMAT_CONTROL__OPTC_DSC_MODE__SHIFT 0x4
+#define ODM0_OPTC_DATA_FORMAT_CONTROL__OPTC_DATA_FORMAT_MASK 0x00000003L
+#define ODM0_OPTC_DATA_FORMAT_CONTROL__OPTC_DSC_MODE_MASK 0x00000030L
+//ODM0_OPTC_BYTES_PER_PIXEL
+#define ODM0_OPTC_BYTES_PER_PIXEL__OPTC_DSC_BYTES_PER_PIXEL__SHIFT 0x0
+#define ODM0_OPTC_BYTES_PER_PIXEL__OPTC_DSC_BYTES_PER_PIXEL_MASK 0x7FFFFFFFL
+//ODM0_OPTC_WIDTH_CONTROL
+#define ODM0_OPTC_WIDTH_CONTROL__OPTC_DSC_SLICE_WIDTH__SHIFT 0x10
+#define ODM0_OPTC_WIDTH_CONTROL__OPTC_DSC_SLICE_WIDTH_MASK 0x1FFF0000L
+//ODM0_OPTC_INPUT_CLOCK_CONTROL
+#define ODM0_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_GATE_DIS__SHIFT 0x0
+#define ODM0_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_EN__SHIFT 0x1
+#define ODM0_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_ON__SHIFT 0x2
+#define ODM0_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_GATE_DIS_MASK 0x00000001L
+#define ODM0_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_EN_MASK 0x00000002L
+#define ODM0_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_ON_MASK 0x00000004L
+//ODM0_OPTC_INPUT_SPARE_REGISTER
+#define ODM0_OPTC_INPUT_SPARE_REGISTER__OPTC_INPUT_SPARE_REG__SHIFT 0x0
+#define ODM0_OPTC_INPUT_SPARE_REGISTER__OPTC_INPUT_SPARE_REG_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_optc_odm1_dispdec
+//ODM1_OPTC_INPUT_GLOBAL_CONTROL
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_INPUT_SOFT_RESET__SHIFT 0x0
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_EN__SHIFT 0x8
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_TYPE__SHIFT 0x9
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_STATUS__SHIFT 0xa
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_STATUS__SHIFT 0xb
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_CLEAR__SHIFT 0xc
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_CURRENT__SHIFT 0xd
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_DOUBLE_BUFFER_PENDING__SHIFT 0x1f
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_INPUT_SOFT_RESET_MASK 0x00000001L
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_EN_MASK 0x00000100L
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_TYPE_MASK 0x00000200L
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_STATUS_MASK 0x00000400L
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_INT_STATUS_MASK 0x00000800L
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_CLEAR_MASK 0x00001000L
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_CURRENT_MASK 0x00002000L
+#define ODM1_OPTC_INPUT_GLOBAL_CONTROL__OPTC_DOUBLE_BUFFER_PENDING_MASK 0x80000000L
+//ODM1_OPTC_DATA_SOURCE_SELECT
+#define ODM1_OPTC_DATA_SOURCE_SELECT__OPTC_SEG0_SRC_SEL__SHIFT 0x8
+#define ODM1_OPTC_DATA_SOURCE_SELECT__OPTC_SEG0_SRC_SEL_MASK 0x00000F00L
+//ODM1_OPTC_DATA_FORMAT_CONTROL
+#define ODM1_OPTC_DATA_FORMAT_CONTROL__OPTC_DATA_FORMAT__SHIFT 0x0
+#define ODM1_OPTC_DATA_FORMAT_CONTROL__OPTC_DSC_MODE__SHIFT 0x4
+#define ODM1_OPTC_DATA_FORMAT_CONTROL__OPTC_DATA_FORMAT_MASK 0x00000003L
+#define ODM1_OPTC_DATA_FORMAT_CONTROL__OPTC_DSC_MODE_MASK 0x00000030L
+//ODM1_OPTC_BYTES_PER_PIXEL
+#define ODM1_OPTC_BYTES_PER_PIXEL__OPTC_DSC_BYTES_PER_PIXEL__SHIFT 0x0
+#define ODM1_OPTC_BYTES_PER_PIXEL__OPTC_DSC_BYTES_PER_PIXEL_MASK 0x7FFFFFFFL
+//ODM1_OPTC_WIDTH_CONTROL
+#define ODM1_OPTC_WIDTH_CONTROL__OPTC_DSC_SLICE_WIDTH__SHIFT 0x10
+#define ODM1_OPTC_WIDTH_CONTROL__OPTC_DSC_SLICE_WIDTH_MASK 0x1FFF0000L
+//ODM1_OPTC_INPUT_CLOCK_CONTROL
+#define ODM1_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_GATE_DIS__SHIFT 0x0
+#define ODM1_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_EN__SHIFT 0x1
+#define ODM1_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_ON__SHIFT 0x2
+#define ODM1_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_GATE_DIS_MASK 0x00000001L
+#define ODM1_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_EN_MASK 0x00000002L
+#define ODM1_OPTC_INPUT_CLOCK_CONTROL__OPTC_INPUT_CLK_ON_MASK 0x00000004L
+//ODM1_OPTC_INPUT_SPARE_REGISTER
+#define ODM1_OPTC_INPUT_SPARE_REGISTER__OPTC_INPUT_SPARE_REG__SHIFT 0x0
+#define ODM1_OPTC_INPUT_SPARE_REGISTER__OPTC_INPUT_SPARE_REG_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_optc_otg0_dispdec
+//OTG0_OTG_H_TOTAL
+#define OTG0_OTG_H_TOTAL__OTG_H_TOTAL__SHIFT 0x0
+#define OTG0_OTG_H_TOTAL__OTG_H_TOTAL_MASK 0x00007FFFL
+//OTG0_OTG_H_BLANK_START_END
+#define OTG0_OTG_H_BLANK_START_END__OTG_H_BLANK_START__SHIFT 0x0
+#define OTG0_OTG_H_BLANK_START_END__OTG_H_BLANK_END__SHIFT 0x10
+#define OTG0_OTG_H_BLANK_START_END__OTG_H_BLANK_START_MASK 0x00007FFFL
+#define OTG0_OTG_H_BLANK_START_END__OTG_H_BLANK_END_MASK 0x7FFF0000L
+//OTG0_OTG_H_SYNC_A
+#define OTG0_OTG_H_SYNC_A__OTG_H_SYNC_A_START__SHIFT 0x0
+#define OTG0_OTG_H_SYNC_A__OTG_H_SYNC_A_END__SHIFT 0x10
+#define OTG0_OTG_H_SYNC_A__OTG_H_SYNC_A_START_MASK 0x00007FFFL
+#define OTG0_OTG_H_SYNC_A__OTG_H_SYNC_A_END_MASK 0x7FFF0000L
+//OTG0_OTG_H_SYNC_A_CNTL
+#define OTG0_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_POL__SHIFT 0x0
+#define OTG0_OTG_H_SYNC_A_CNTL__OTG_COMP_SYNC_A_EN__SHIFT 0x10
+#define OTG0_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_CUTOFF__SHIFT 0x11
+#define OTG0_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_POL_MASK 0x00000001L
+#define OTG0_OTG_H_SYNC_A_CNTL__OTG_COMP_SYNC_A_EN_MASK 0x00010000L
+#define OTG0_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_CUTOFF_MASK 0x00020000L
+//OTG0_OTG_H_TIMING_CNTL
+#define OTG0_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_BY2__SHIFT 0x0
+#define OTG0_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_BY2_UPDATE_MODE__SHIFT 0x8
+#define OTG0_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_BY2_MASK 0x00000001L
+#define OTG0_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_BY2_UPDATE_MODE_MASK 0x00000100L
+//OTG0_OTG_V_TOTAL
+#define OTG0_OTG_V_TOTAL__OTG_V_TOTAL__SHIFT 0x0
+#define OTG0_OTG_V_TOTAL__OTG_V_TOTAL_MASK 0x00007FFFL
+//OTG0_OTG_V_TOTAL_MIN
+#define OTG0_OTG_V_TOTAL_MIN__OTG_V_TOTAL_MIN__SHIFT 0x0
+#define OTG0_OTG_V_TOTAL_MIN__OTG_V_TOTAL_MIN_MASK 0x00007FFFL
+//OTG0_OTG_V_TOTAL_MAX
+#define OTG0_OTG_V_TOTAL_MAX__OTG_V_TOTAL_MAX__SHIFT 0x0
+#define OTG0_OTG_V_TOTAL_MAX__OTG_V_TOTAL_MAX_MASK 0x00007FFFL
+//OTG0_OTG_V_TOTAL_MID
+#define OTG0_OTG_V_TOTAL_MID__OTG_V_TOTAL_MID__SHIFT 0x0
+#define OTG0_OTG_V_TOTAL_MID__OTG_V_TOTAL_MID_MASK 0x00007FFFL
+//OTG0_OTG_V_TOTAL_CONTROL
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MIN_SEL__SHIFT 0x0
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MAX_SEL__SHIFT 0x1
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MAX_EN__SHIFT 0x2
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MIN_EN__SHIFT 0x3
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_FORCE_LOCK_ON_EVENT__SHIFT 0x4
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_DRR_EVENT_ACTIVE_PERIOD__SHIFT 0x5
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_SET_V_TOTAL_MIN_MASK_EN__SHIFT 0x7
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_FRAME_NUM__SHIFT 0x8
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_SET_V_TOTAL_MIN_MASK__SHIFT 0x10
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MIN_SEL_MASK 0x00000001L
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MAX_SEL_MASK 0x00000002L
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MAX_EN_MASK 0x00000004L
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MIN_EN_MASK 0x00000008L
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_FORCE_LOCK_ON_EVENT_MASK 0x00000010L
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_DRR_EVENT_ACTIVE_PERIOD_MASK 0x00000020L
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_SET_V_TOTAL_MIN_MASK_EN_MASK 0x00000080L
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_FRAME_NUM_MASK 0x0000FF00L
+#define OTG0_OTG_V_TOTAL_CONTROL__OTG_SET_V_TOTAL_MIN_MASK_MASK 0xFFFF0000L
+//OTG0_OTG_V_TOTAL_INT_STATUS
+#define OTG0_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURED__SHIFT 0x0
+#define OTG0_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURED_INT__SHIFT 0x4
+#define OTG0_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURED_ACK__SHIFT 0x8
+#define OTG0_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURED_MSK__SHIFT 0xc
+#define OTG0_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURED_MASK 0x00000001L
+#define OTG0_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURED_INT_MASK 0x00000010L
+#define OTG0_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURED_ACK_MASK 0x00000100L
+#define OTG0_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURED_MSK_MASK 0x00001000L
+//OTG0_OTG_VSYNC_NOM_INT_STATUS
+#define OTG0_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM__SHIFT 0x0
+#define OTG0_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM_INT_CLEAR__SHIFT 0x4
+#define OTG0_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM_MASK 0x00000001L
+#define OTG0_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM_INT_CLEAR_MASK 0x00000010L
+//OTG0_OTG_V_BLANK_START_END
+#define OTG0_OTG_V_BLANK_START_END__OTG_V_BLANK_START__SHIFT 0x0
+#define OTG0_OTG_V_BLANK_START_END__OTG_V_BLANK_END__SHIFT 0x10
+#define OTG0_OTG_V_BLANK_START_END__OTG_V_BLANK_START_MASK 0x00007FFFL
+#define OTG0_OTG_V_BLANK_START_END__OTG_V_BLANK_END_MASK 0x7FFF0000L
+//OTG0_OTG_V_SYNC_A
+#define OTG0_OTG_V_SYNC_A__OTG_V_SYNC_A_START__SHIFT 0x0
+#define OTG0_OTG_V_SYNC_A__OTG_V_SYNC_A_END__SHIFT 0x10
+#define OTG0_OTG_V_SYNC_A__OTG_V_SYNC_A_START_MASK 0x00007FFFL
+#define OTG0_OTG_V_SYNC_A__OTG_V_SYNC_A_END_MASK 0x7FFF0000L
+//OTG0_OTG_V_SYNC_A_CNTL
+#define OTG0_OTG_V_SYNC_A_CNTL__OTG_V_SYNC_A_POL__SHIFT 0x0
+#define OTG0_OTG_V_SYNC_A_CNTL__OTG_V_SYNC_A_POL_MASK 0x00000001L
+//OTG0_OTG_TRIGA_CNTL
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_SELECT__SHIFT 0x0
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_PIPE_SELECT__SHIFT 0x5
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_SELECT__SHIFT 0x8
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_RESYNC_BYPASS_EN__SHIFT 0xb
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_INPUT_STATUS__SHIFT 0xc
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_STATUS__SHIFT 0xd
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_OCCURRED__SHIFT 0xe
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_RISING_EDGE_DETECT_CNTL__SHIFT 0x10
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_FALLING_EDGE_DETECT_CNTL__SHIFT 0x12
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_FREQUENCY_SELECT__SHIFT 0x14
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_DELAY__SHIFT 0x18
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_CLEAR__SHIFT 0x1f
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_SELECT_MASK 0x0000001FL
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_PIPE_SELECT_MASK 0x000000E0L
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_SELECT_MASK 0x00000700L
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_RESYNC_BYPASS_EN_MASK 0x00000800L
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_INPUT_STATUS_MASK 0x00001000L
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_STATUS_MASK 0x00002000L
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_OCCURRED_MASK 0x00004000L
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_RISING_EDGE_DETECT_CNTL_MASK 0x00030000L
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_FALLING_EDGE_DETECT_CNTL_MASK 0x000C0000L
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_FREQUENCY_SELECT_MASK 0x00300000L
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_DELAY_MASK 0x1F000000L
+#define OTG0_OTG_TRIGA_CNTL__OTG_TRIGA_CLEAR_MASK 0x80000000L
+//OTG0_OTG_TRIGA_MANUAL_TRIG
+#define OTG0_OTG_TRIGA_MANUAL_TRIG__OTG_TRIGA_MANUAL_TRIG__SHIFT 0x0
+#define OTG0_OTG_TRIGA_MANUAL_TRIG__OTG_TRIGA_MANUAL_TRIG_MASK 0x00000001L
+//OTG0_OTG_TRIGB_CNTL
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_SELECT__SHIFT 0x0
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_PIPE_SELECT__SHIFT 0x5
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_SELECT__SHIFT 0x8
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_RESYNC_BYPASS_EN__SHIFT 0xb
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_INPUT_STATUS__SHIFT 0xc
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_STATUS__SHIFT 0xd
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_OCCURRED__SHIFT 0xe
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_RISING_EDGE_DETECT_CNTL__SHIFT 0x10
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_FALLING_EDGE_DETECT_CNTL__SHIFT 0x12
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_FREQUENCY_SELECT__SHIFT 0x14
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_DELAY__SHIFT 0x18
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_CLEAR__SHIFT 0x1f
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_SELECT_MASK 0x0000001FL
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_PIPE_SELECT_MASK 0x000000E0L
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_SELECT_MASK 0x00000700L
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_RESYNC_BYPASS_EN_MASK 0x00000800L
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_INPUT_STATUS_MASK 0x00001000L
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_STATUS_MASK 0x00002000L
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_OCCURRED_MASK 0x00004000L
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_RISING_EDGE_DETECT_CNTL_MASK 0x00030000L
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_FALLING_EDGE_DETECT_CNTL_MASK 0x000C0000L
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_FREQUENCY_SELECT_MASK 0x00300000L
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_DELAY_MASK 0x1F000000L
+#define OTG0_OTG_TRIGB_CNTL__OTG_TRIGB_CLEAR_MASK 0x80000000L
+//OTG0_OTG_TRIGB_MANUAL_TRIG
+#define OTG0_OTG_TRIGB_MANUAL_TRIG__OTG_TRIGB_MANUAL_TRIG__SHIFT 0x0
+#define OTG0_OTG_TRIGB_MANUAL_TRIG__OTG_TRIGB_MANUAL_TRIG_MASK 0x00000001L
+//OTG0_OTG_FORCE_COUNT_NOW_CNTL
+#define OTG0_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_MODE__SHIFT 0x0
+#define OTG0_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CHECK__SHIFT 0x4
+#define OTG0_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_TRIG_SEL__SHIFT 0x8
+#define OTG0_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_OCCURRED__SHIFT 0x10
+#define OTG0_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CLEAR__SHIFT 0x18
+#define OTG0_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_MODE_MASK 0x00000003L
+#define OTG0_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CHECK_MASK 0x00000010L
+#define OTG0_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_TRIG_SEL_MASK 0x00000100L
+#define OTG0_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_OCCURRED_MASK 0x00010000L
+#define OTG0_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CLEAR_MASK 0x01000000L
+//OTG0_OTG_STEREO_FORCE_NEXT_EYE
+#define OTG0_OTG_STEREO_FORCE_NEXT_EYE__OTG_STEREO_FORCE_NEXT_EYE__SHIFT 0x0
+#define OTG0_OTG_STEREO_FORCE_NEXT_EYE__OTG_AVSYNC_FRAME_COUNTER__SHIFT 0x8
+#define OTG0_OTG_STEREO_FORCE_NEXT_EYE__OTG_AVSYNC_LINE_COUNTER__SHIFT 0x10
+#define OTG0_OTG_STEREO_FORCE_NEXT_EYE__OTG_STEREO_FORCE_NEXT_EYE_MASK 0x00000003L
+#define OTG0_OTG_STEREO_FORCE_NEXT_EYE__OTG_AVSYNC_FRAME_COUNTER_MASK 0x0000FF00L
+#define OTG0_OTG_STEREO_FORCE_NEXT_EYE__OTG_AVSYNC_LINE_COUNTER_MASK 0x1FFF0000L
+//OTG0_OTG_CONTROL
+#define OTG0_OTG_CONTROL__OTG_MASTER_EN__SHIFT 0x0
+#define OTG0_OTG_CONTROL__OTG_DISABLE_POINT_CNTL__SHIFT 0x8
+#define OTG0_OTG_CONTROL__OTG_START_POINT_CNTL__SHIFT 0xc
+#define OTG0_OTG_CONTROL__OTG_FIELD_NUMBER_CNTL__SHIFT 0xd
+#define OTG0_OTG_CONTROL__OTG_FIELD_NUMBER_POLARITY__SHIFT 0xe
+#define OTG0_OTG_CONTROL__OTG_CURRENT_MASTER_EN_STATE__SHIFT 0x10
+#define OTG0_OTG_CONTROL__OTG_DISP_READ_REQUEST_DISABLE__SHIFT 0x18
+#define OTG0_OTG_CONTROL__OTG_AVSYNC_LOCK_SNAPSHOT__SHIFT 0x1e
+#define OTG0_OTG_CONTROL__OTG_AVSYNC_VSYNC_N_HSYNC_MODE__SHIFT 0x1f
+#define OTG0_OTG_CONTROL__OTG_MASTER_EN_MASK 0x00000001L
+#define OTG0_OTG_CONTROL__OTG_DISABLE_POINT_CNTL_MASK 0x00000300L
+#define OTG0_OTG_CONTROL__OTG_START_POINT_CNTL_MASK 0x00001000L
+#define OTG0_OTG_CONTROL__OTG_FIELD_NUMBER_CNTL_MASK 0x00002000L
+#define OTG0_OTG_CONTROL__OTG_FIELD_NUMBER_POLARITY_MASK 0x00004000L
+#define OTG0_OTG_CONTROL__OTG_CURRENT_MASTER_EN_STATE_MASK 0x00010000L
+#define OTG0_OTG_CONTROL__OTG_DISP_READ_REQUEST_DISABLE_MASK 0x01000000L
+#define OTG0_OTG_CONTROL__OTG_AVSYNC_LOCK_SNAPSHOT_MASK 0x40000000L
+#define OTG0_OTG_CONTROL__OTG_AVSYNC_VSYNC_N_HSYNC_MODE_MASK 0x80000000L
+//OTG0_OTG_BLANK_CONTROL
+#define OTG0_OTG_BLANK_CONTROL__OTG_CURRENT_BLANK_STATE__SHIFT 0x0
+#define OTG0_OTG_BLANK_CONTROL__OTG_BLANK_DATA_EN__SHIFT 0x8
+#define OTG0_OTG_BLANK_CONTROL__OTG_BLANK_DE_MODE__SHIFT 0x10
+#define OTG0_OTG_BLANK_CONTROL__OTG_CURRENT_BLANK_STATE_MASK 0x00000001L
+#define OTG0_OTG_BLANK_CONTROL__OTG_BLANK_DATA_EN_MASK 0x00000100L
+#define OTG0_OTG_BLANK_CONTROL__OTG_BLANK_DE_MODE_MASK 0x00010000L
+//OTG0_OTG_INTERLACE_CONTROL
+#define OTG0_OTG_INTERLACE_CONTROL__OTG_INTERLACE_ENABLE__SHIFT 0x0
+#define OTG0_OTG_INTERLACE_CONTROL__OTG_INTERLACE_FORCE_NEXT_FIELD__SHIFT 0x10
+#define OTG0_OTG_INTERLACE_CONTROL__OTG_INTERLACE_ENABLE_MASK 0x00000001L
+#define OTG0_OTG_INTERLACE_CONTROL__OTG_INTERLACE_FORCE_NEXT_FIELD_MASK 0x00030000L
+//OTG0_OTG_INTERLACE_STATUS
+#define OTG0_OTG_INTERLACE_STATUS__OTG_INTERLACE_CURRENT_FIELD__SHIFT 0x0
+#define OTG0_OTG_INTERLACE_STATUS__OTG_INTERLACE_NEXT_FIELD__SHIFT 0x1
+#define OTG0_OTG_INTERLACE_STATUS__OTG_INTERLACE_CURRENT_FIELD_MASK 0x00000001L
+#define OTG0_OTG_INTERLACE_STATUS__OTG_INTERLACE_NEXT_FIELD_MASK 0x00000002L
+//OTG0_OTG_PIXEL_DATA_READBACK0
+#define OTG0_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_BLUE_CB__SHIFT 0x0
+#define OTG0_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_GREEN_Y__SHIFT 0x10
+#define OTG0_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_BLUE_CB_MASK 0x0000FFFFL
+#define OTG0_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_GREEN_Y_MASK 0xFFFF0000L
+//OTG0_OTG_PIXEL_DATA_READBACK1
+#define OTG0_OTG_PIXEL_DATA_READBACK1__OTG_PIXEL_DATA_RED_CR__SHIFT 0x0
+#define OTG0_OTG_PIXEL_DATA_READBACK1__OTG_PIXEL_DATA_RED_CR_MASK 0x0000FFFFL
+//OTG0_OTG_STATUS
+#define OTG0_OTG_STATUS__OTG_V_BLANK__SHIFT 0x0
+#define OTG0_OTG_STATUS__OTG_V_ACTIVE_DISP__SHIFT 0x1
+#define OTG0_OTG_STATUS__OTG_V_SYNC_A__SHIFT 0x2
+#define OTG0_OTG_STATUS__OTG_V_UPDATE__SHIFT 0x3
+#define OTG0_OTG_STATUS__OTG_V_BLANK_3D_STRUCTURE__SHIFT 0x5
+#define OTG0_OTG_STATUS__OTG_H_BLANK__SHIFT 0x10
+#define OTG0_OTG_STATUS__OTG_H_ACTIVE_DISP__SHIFT 0x11
+#define OTG0_OTG_STATUS__OTG_H_SYNC_A__SHIFT 0x12
+#define OTG0_OTG_STATUS__OTG_V_BLANK_MASK 0x00000001L
+#define OTG0_OTG_STATUS__OTG_V_ACTIVE_DISP_MASK 0x00000002L
+#define OTG0_OTG_STATUS__OTG_V_SYNC_A_MASK 0x00000004L
+#define OTG0_OTG_STATUS__OTG_V_UPDATE_MASK 0x00000008L
+#define OTG0_OTG_STATUS__OTG_V_BLANK_3D_STRUCTURE_MASK 0x00000020L
+#define OTG0_OTG_STATUS__OTG_H_BLANK_MASK 0x00010000L
+#define OTG0_OTG_STATUS__OTG_H_ACTIVE_DISP_MASK 0x00020000L
+#define OTG0_OTG_STATUS__OTG_H_SYNC_A_MASK 0x00040000L
+//OTG0_OTG_STATUS_POSITION
+#define OTG0_OTG_STATUS_POSITION__OTG_VERT_COUNT__SHIFT 0x0
+#define OTG0_OTG_STATUS_POSITION__OTG_HORZ_COUNT__SHIFT 0x10
+#define OTG0_OTG_STATUS_POSITION__OTG_VERT_COUNT_MASK 0x00007FFFL
+#define OTG0_OTG_STATUS_POSITION__OTG_HORZ_COUNT_MASK 0x7FFF0000L
+//OTG0_OTG_NOM_VERT_POSITION
+#define OTG0_OTG_NOM_VERT_POSITION__OTG_VERT_COUNT_NOM__SHIFT 0x0
+#define OTG0_OTG_NOM_VERT_POSITION__OTG_VERT_COUNT_NOM_MASK 0x00007FFFL
+//OTG0_OTG_STATUS_FRAME_COUNT
+#define OTG0_OTG_STATUS_FRAME_COUNT__OTG_FRAME_COUNT__SHIFT 0x0
+#define OTG0_OTG_STATUS_FRAME_COUNT__OTG_FRAME_COUNT_MASK 0x00FFFFFFL
+//OTG0_OTG_STATUS_VF_COUNT
+#define OTG0_OTG_STATUS_VF_COUNT__OTG_VF_COUNT__SHIFT 0x0
+#define OTG0_OTG_STATUS_VF_COUNT__OTG_VF_COUNT_MASK 0x7FFFFFFFL
+//OTG0_OTG_STATUS_HV_COUNT
+#define OTG0_OTG_STATUS_HV_COUNT__OTG_HV_COUNT__SHIFT 0x0
+#define OTG0_OTG_STATUS_HV_COUNT__OTG_HV_COUNT_MASK 0x7FFFFFFFL
+//OTG0_OTG_COUNT_CONTROL
+#define OTG0_OTG_COUNT_CONTROL__OTG_HORZ_COUNT_BY2_EN__SHIFT 0x0
+#define OTG0_OTG_COUNT_CONTROL__OTG_HORZ_REPETITION_COUNT__SHIFT 0x1
+#define OTG0_OTG_COUNT_CONTROL__OTG_HORZ_COUNT_BY2_EN_MASK 0x00000001L
+#define OTG0_OTG_COUNT_CONTROL__OTG_HORZ_REPETITION_COUNT_MASK 0x0000001EL
+//OTG0_OTG_COUNT_RESET
+#define OTG0_OTG_COUNT_RESET__OTG_RESET_FRAME_COUNT__SHIFT 0x0
+#define OTG0_OTG_COUNT_RESET__OTG_RESET_FRAME_COUNT_MASK 0x00000001L
+//OTG0_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE
+#define OTG0_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE__OTG_MANUAL_FORCE_VSYNC_NEXT_LINE__SHIFT 0x0
+#define OTG0_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE__OTG_MANUAL_FORCE_VSYNC_NEXT_LINE_MASK 0x00000001L
+//OTG0_OTG_VERT_SYNC_CONTROL
+#define OTG0_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_OCCURRED__SHIFT 0x0
+#define OTG0_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_CLEAR__SHIFT 0x8
+#define OTG0_OTG_VERT_SYNC_CONTROL__OTG_AUTO_FORCE_VSYNC_MODE__SHIFT 0x10
+#define OTG0_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_OCCURRED_MASK 0x00000001L
+#define OTG0_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_CLEAR_MASK 0x00000100L
+#define OTG0_OTG_VERT_SYNC_CONTROL__OTG_AUTO_FORCE_VSYNC_MODE_MASK 0x00030000L
+//OTG0_OTG_STEREO_STATUS
+#define OTG0_OTG_STEREO_STATUS__OTG_STEREO_CURRENT_EYE__SHIFT 0x0
+#define OTG0_OTG_STEREO_STATUS__OTG_STEREO_SYNC_OUTPUT__SHIFT 0x8
+#define OTG0_OTG_STEREO_STATUS__OTG_STEREO_SYNC_SELECT__SHIFT 0x10
+#define OTG0_OTG_STEREO_STATUS__OTG_STEREO_EYE_FLAG__SHIFT 0x14
+#define OTG0_OTG_STEREO_STATUS__OTG_STEREO_FORCE_NEXT_EYE_PENDING__SHIFT 0x18
+#define OTG0_OTG_STEREO_STATUS__OTG_CURRENT_3D_STRUCTURE_STATE__SHIFT 0x1e
+#define OTG0_OTG_STEREO_STATUS__OTG_CURRENT_STEREOSYNC_EN_STATE__SHIFT 0x1f
+#define OTG0_OTG_STEREO_STATUS__OTG_STEREO_CURRENT_EYE_MASK 0x00000001L
+#define OTG0_OTG_STEREO_STATUS__OTG_STEREO_SYNC_OUTPUT_MASK 0x00000100L
+#define OTG0_OTG_STEREO_STATUS__OTG_STEREO_SYNC_SELECT_MASK 0x00010000L
+#define OTG0_OTG_STEREO_STATUS__OTG_STEREO_EYE_FLAG_MASK 0x00100000L
+#define OTG0_OTG_STEREO_STATUS__OTG_STEREO_FORCE_NEXT_EYE_PENDING_MASK 0x03000000L
+#define OTG0_OTG_STEREO_STATUS__OTG_CURRENT_3D_STRUCTURE_STATE_MASK 0x40000000L
+#define OTG0_OTG_STEREO_STATUS__OTG_CURRENT_STEREOSYNC_EN_STATE_MASK 0x80000000L
+//OTG0_OTG_STEREO_CONTROL
+#define OTG0_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_LINE_NUM__SHIFT 0x0
+#define OTG0_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_POLARITY__SHIFT 0xf
+#define OTG0_OTG_STEREO_CONTROL__OTG_STEREO_EYE_FLAG_POLARITY__SHIFT 0x11
+#define OTG0_OTG_STEREO_CONTROL__OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP__SHIFT 0x12
+#define OTG0_OTG_STEREO_CONTROL__OTG_DISABLE_FIELD_NUM__SHIFT 0x13
+#define OTG0_OTG_STEREO_CONTROL__OTG_DISABLE_V_BLANK_FOR_DP_FIX__SHIFT 0x14
+#define OTG0_OTG_STEREO_CONTROL__OTG_STEREO_EN__SHIFT 0x18
+#define OTG0_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_LINE_NUM_MASK 0x00007FFFL
+#define OTG0_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_POLARITY_MASK 0x00008000L
+#define OTG0_OTG_STEREO_CONTROL__OTG_STEREO_EYE_FLAG_POLARITY_MASK 0x00020000L
+#define OTG0_OTG_STEREO_CONTROL__OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP_MASK 0x00040000L
+#define OTG0_OTG_STEREO_CONTROL__OTG_DISABLE_FIELD_NUM_MASK 0x00080000L
+#define OTG0_OTG_STEREO_CONTROL__OTG_DISABLE_V_BLANK_FOR_DP_FIX_MASK 0x00100000L
+#define OTG0_OTG_STEREO_CONTROL__OTG_STEREO_EN_MASK 0x01000000L
+//OTG0_OTG_SNAPSHOT_STATUS
+#define OTG0_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_OCCURRED__SHIFT 0x0
+#define OTG0_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_CLEAR__SHIFT 0x1
+#define OTG0_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_MANUAL_TRIGGER__SHIFT 0x2
+#define OTG0_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_OCCURRED_MASK 0x00000001L
+#define OTG0_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_CLEAR_MASK 0x00000002L
+#define OTG0_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_MANUAL_TRIGGER_MASK 0x00000004L
+//OTG0_OTG_SNAPSHOT_CONTROL
+#define OTG0_OTG_SNAPSHOT_CONTROL__OTG_AUTO_SNAPSHOT_TRIG_SEL__SHIFT 0x0
+#define OTG0_OTG_SNAPSHOT_CONTROL__OTG_AUTO_SNAPSHOT_TRIG_SEL_MASK 0x00000003L
+//OTG0_OTG_SNAPSHOT_POSITION
+#define OTG0_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_VERT_COUNT__SHIFT 0x0
+#define OTG0_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_HORZ_COUNT__SHIFT 0x10
+#define OTG0_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_VERT_COUNT_MASK 0x00007FFFL
+#define OTG0_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_HORZ_COUNT_MASK 0x7FFF0000L
+//OTG0_OTG_SNAPSHOT_FRAME
+#define OTG0_OTG_SNAPSHOT_FRAME__OTG_SNAPSHOT_FRAME_COUNT__SHIFT 0x0
+#define OTG0_OTG_SNAPSHOT_FRAME__OTG_SNAPSHOT_FRAME_COUNT_MASK 0x00FFFFFFL
+//OTG0_OTG_INTERRUPT_CONTROL
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_MSK__SHIFT 0x0
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_TYPE__SHIFT 0x1
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_MSK__SHIFT 0x8
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_TYPE__SHIFT 0x9
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_MSK__SHIFT 0x10
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_TYPE__SHIFT 0x11
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_MSK__SHIFT 0x18
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_MSK__SHIFT 0x19
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_TYPE__SHIFT 0x1a
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_TYPE__SHIFT 0x1b
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_MSK__SHIFT 0x1c
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_TYPE__SHIFT 0x1d
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_MSK__SHIFT 0x1e
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_TYPE__SHIFT 0x1f
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_MSK_MASK 0x00000001L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_TYPE_MASK 0x00000002L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_MSK_MASK 0x00000100L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_TYPE_MASK 0x00000200L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_MSK_MASK 0x00010000L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_TYPE_MASK 0x00020000L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_MSK_MASK 0x01000000L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_MSK_MASK 0x02000000L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_TYPE_MASK 0x04000000L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_TYPE_MASK 0x08000000L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_MSK_MASK 0x10000000L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_TYPE_MASK 0x20000000L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_MSK_MASK 0x40000000L
+#define OTG0_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_TYPE_MASK 0x80000000L
+//OTG0_OTG_UPDATE_LOCK
+#define OTG0_OTG_UPDATE_LOCK__OTG_UPDATE_LOCK__SHIFT 0x0
+#define OTG0_OTG_UPDATE_LOCK__OTG_UPDATE_LOCK_MASK 0x00000001L
+//OTG0_OTG_DOUBLE_BUFFER_CONTROL
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_PENDING__SHIFT 0x0
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_H_TIMING_DIV_BY2_DB_UPDATE_PENDING__SHIFT 0x2
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_BLANK_DATA_EN_UPDATE_PENDING__SHIFT 0x3
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_RANGE_TIMING_DBUF_UPDATE_PENDING__SHIFT 0x4
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_TIMING_DB_UPDATE_PENDING__SHIFT 0x5
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_CTRL_DB_UPDATE_PENDING__SHIFT 0x6
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_STRUCTURE_EN_DB_UPDATE_PENDING__SHIFT 0x7
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_INSTANTLY__SHIFT 0x8
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_VSTARTUP_DB_UPDATE_PENDING__SHIFT 0x9
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_DSC_POSITION_DB_UPDATE_PENDING__SHIFT 0xa
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_BLANK_DATA_DOUBLE_BUFFER_EN__SHIFT 0x10
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_RANGE_TIMING_DBUF_UPDATE_MODE__SHIFT 0x18
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_PENDING_MASK 0x00000001L
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_H_TIMING_DIV_BY2_DB_UPDATE_PENDING_MASK 0x00000004L
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_BLANK_DATA_EN_UPDATE_PENDING_MASK 0x00000008L
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_RANGE_TIMING_DBUF_UPDATE_PENDING_MASK 0x00000010L
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_TIMING_DB_UPDATE_PENDING_MASK 0x00000020L
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_CTRL_DB_UPDATE_PENDING_MASK 0x00000040L
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_STRUCTURE_EN_DB_UPDATE_PENDING_MASK 0x00000080L
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_INSTANTLY_MASK 0x00000100L
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_VSTARTUP_DB_UPDATE_PENDING_MASK 0x00000200L
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_DSC_POSITION_DB_UPDATE_PENDING_MASK 0x00000400L
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_BLANK_DATA_DOUBLE_BUFFER_EN_MASK 0x00010000L
+#define OTG0_OTG_DOUBLE_BUFFER_CONTROL__OTG_RANGE_TIMING_DBUF_UPDATE_MODE_MASK 0x03000000L
+//OTG0_OTG_MASTER_EN
+#define OTG0_OTG_MASTER_EN__OTG_MASTER_EN__SHIFT 0x0
+#define OTG0_OTG_MASTER_EN__OTG_MASTER_EN_MASK 0x00000001L
+//OTG0_OTG_BLANK_DATA_COLOR
+#define OTG0_OTG_BLANK_DATA_COLOR__OTG_BLANK_DATA_COLOR_BLUE_CB__SHIFT 0x0
+#define OTG0_OTG_BLANK_DATA_COLOR__OTG_BLANK_DATA_COLOR_GREEN_Y__SHIFT 0xa
+#define OTG0_OTG_BLANK_DATA_COLOR__OTG_BLANK_DATA_COLOR_RED_CR__SHIFT 0x14
+#define OTG0_OTG_BLANK_DATA_COLOR__OTG_BLANK_DATA_COLOR_BLUE_CB_MASK 0x000003FFL
+#define OTG0_OTG_BLANK_DATA_COLOR__OTG_BLANK_DATA_COLOR_GREEN_Y_MASK 0x000FFC00L
+#define OTG0_OTG_BLANK_DATA_COLOR__OTG_BLANK_DATA_COLOR_RED_CR_MASK 0x3FF00000L
+//OTG0_OTG_BLANK_DATA_COLOR_EXT
+#define OTG0_OTG_BLANK_DATA_COLOR_EXT__OTG_BLANK_DATA_COLOR_BLUE_CB_EXT__SHIFT 0x0
+#define OTG0_OTG_BLANK_DATA_COLOR_EXT__OTG_BLANK_DATA_COLOR_GREEN_Y_EXT__SHIFT 0x8
+#define OTG0_OTG_BLANK_DATA_COLOR_EXT__OTG_BLANK_DATA_COLOR_RED_CR_EXT__SHIFT 0x10
+#define OTG0_OTG_BLANK_DATA_COLOR_EXT__OTG_BLANK_DATA_COLOR_BLUE_CB_EXT_MASK 0x0000003FL
+#define OTG0_OTG_BLANK_DATA_COLOR_EXT__OTG_BLANK_DATA_COLOR_GREEN_Y_EXT_MASK 0x00003F00L
+#define OTG0_OTG_BLANK_DATA_COLOR_EXT__OTG_BLANK_DATA_COLOR_RED_CR_EXT_MASK 0x003F0000L
+//OTG0_OTG_BLACK_COLOR
+#define OTG0_OTG_BLACK_COLOR__OTG_BLACK_COLOR_B_CB__SHIFT 0x0
+#define OTG0_OTG_BLACK_COLOR__OTG_BLACK_COLOR_G_Y__SHIFT 0xa
+#define OTG0_OTG_BLACK_COLOR__OTG_BLACK_COLOR_R_CR__SHIFT 0x14
+#define OTG0_OTG_BLACK_COLOR__OTG_BLACK_COLOR_B_CB_MASK 0x000003FFL
+#define OTG0_OTG_BLACK_COLOR__OTG_BLACK_COLOR_G_Y_MASK 0x000FFC00L
+#define OTG0_OTG_BLACK_COLOR__OTG_BLACK_COLOR_R_CR_MASK 0x3FF00000L
+//OTG0_OTG_BLACK_COLOR_EXT
+#define OTG0_OTG_BLACK_COLOR_EXT__OTG_BLACK_COLOR_B_CB_EXT__SHIFT 0x0
+#define OTG0_OTG_BLACK_COLOR_EXT__OTG_BLACK_COLOR_G_Y_EXT__SHIFT 0x8
+#define OTG0_OTG_BLACK_COLOR_EXT__OTG_BLACK_COLOR_R_CR_EXT__SHIFT 0x10
+#define OTG0_OTG_BLACK_COLOR_EXT__OTG_BLACK_COLOR_B_CB_EXT_MASK 0x0000003FL
+#define OTG0_OTG_BLACK_COLOR_EXT__OTG_BLACK_COLOR_G_Y_EXT_MASK 0x00003F00L
+#define OTG0_OTG_BLACK_COLOR_EXT__OTG_BLACK_COLOR_R_CR_EXT_MASK 0x003F0000L
+//OTG0_OTG_VERTICAL_INTERRUPT0_POSITION
+#define OTG0_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_START__SHIFT 0x0
+#define OTG0_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_END__SHIFT 0x10
+#define OTG0_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_START_MASK 0x00007FFFL
+#define OTG0_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_END_MASK 0x7FFF0000L
+//OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_OUTPUT_POLARITY__SHIFT 0x4
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_ENABLE__SHIFT 0x8
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_STATUS__SHIFT 0xc
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_STATUS__SHIFT 0x10
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_CLEAR__SHIFT 0x14
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_TYPE__SHIFT 0x18
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_OUTPUT_POLARITY_MASK 0x00000010L
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_ENABLE_MASK 0x00000100L
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_STATUS_MASK 0x00001000L
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_STATUS_MASK 0x00010000L
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_CLEAR_MASK 0x00100000L
+#define OTG0_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_TYPE_MASK 0x01000000L
+//OTG0_OTG_VERTICAL_INTERRUPT1_POSITION
+#define OTG0_OTG_VERTICAL_INTERRUPT1_POSITION__OTG_VERTICAL_INTERRUPT1_LINE_START__SHIFT 0x0
+#define OTG0_OTG_VERTICAL_INTERRUPT1_POSITION__OTG_VERTICAL_INTERRUPT1_LINE_START_MASK 0x00007FFFL
+//OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL
+#define OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_ENABLE__SHIFT 0x8
+#define OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_STATUS__SHIFT 0xc
+#define OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_STATUS__SHIFT 0x10
+#define OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_CLEAR__SHIFT 0x14
+#define OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_TYPE__SHIFT 0x18
+#define OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_ENABLE_MASK 0x00000100L
+#define OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_STATUS_MASK 0x00001000L
+#define OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_STATUS_MASK 0x00010000L
+#define OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_CLEAR_MASK 0x00100000L
+#define OTG0_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_TYPE_MASK 0x01000000L
+//OTG0_OTG_VERTICAL_INTERRUPT2_POSITION
+#define OTG0_OTG_VERTICAL_INTERRUPT2_POSITION__OTG_VERTICAL_INTERRUPT2_LINE_START__SHIFT 0x0
+#define OTG0_OTG_VERTICAL_INTERRUPT2_POSITION__OTG_VERTICAL_INTERRUPT2_LINE_START_MASK 0x00007FFFL
+//OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL
+#define OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_ENABLE__SHIFT 0x8
+#define OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_STATUS__SHIFT 0xc
+#define OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_STATUS__SHIFT 0x10
+#define OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_CLEAR__SHIFT 0x14
+#define OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_TYPE__SHIFT 0x18
+#define OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_ENABLE_MASK 0x00000100L
+#define OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_STATUS_MASK 0x00001000L
+#define OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_STATUS_MASK 0x00010000L
+#define OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_CLEAR_MASK 0x00100000L
+#define OTG0_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_TYPE_MASK 0x01000000L
+//OTG0_OTG_CRC_CNTL
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_EN__SHIFT 0x0
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_DUAL_LINK_EN__SHIFT 0x1
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_DUAL_LINK_MODE__SHIFT 0x2
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_BLANK_ONLY__SHIFT 0x3
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_CONT_EN__SHIFT 0x4
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_CAPTURE_START_SEL__SHIFT 0x5
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_STEREO_MODE__SHIFT 0x8
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_INTERLACE_MODE__SHIFT 0xc
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_USE_NEW_AND_REPEATED_PIXELS__SHIFT 0x13
+#define OTG0_OTG_CRC_CNTL__OTG_CRC0_SELECT__SHIFT 0x14
+#define OTG0_OTG_CRC_CNTL__OTG_CRC1_SELECT__SHIFT 0x18
+#define OTG0_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC0_PENDING__SHIFT 0x1c
+#define OTG0_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC1_PENDING__SHIFT 0x1d
+#define OTG0_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC2_PENDING__SHIFT 0x1e
+#define OTG0_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC3_PENDING__SHIFT 0x1f
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_EN_MASK 0x00000001L
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_DUAL_LINK_EN_MASK 0x00000002L
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_DUAL_LINK_MODE_MASK 0x00000004L
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_BLANK_ONLY_MASK 0x00000008L
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_CONT_EN_MASK 0x00000010L
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_CAPTURE_START_SEL_MASK 0x00000060L
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_STEREO_MODE_MASK 0x00000300L
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_INTERLACE_MODE_MASK 0x00003000L
+#define OTG0_OTG_CRC_CNTL__OTG_CRC_USE_NEW_AND_REPEATED_PIXELS_MASK 0x00080000L
+#define OTG0_OTG_CRC_CNTL__OTG_CRC0_SELECT_MASK 0x00700000L
+#define OTG0_OTG_CRC_CNTL__OTG_CRC1_SELECT_MASK 0x07000000L
+#define OTG0_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC0_PENDING_MASK 0x10000000L
+#define OTG0_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC1_PENDING_MASK 0x20000000L
+#define OTG0_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC2_PENDING_MASK 0x40000000L
+#define OTG0_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC3_PENDING_MASK 0x80000000L
+//OTG0_OTG_CRC_CNTL2
+#define OTG0_OTG_CRC_CNTL2__OTG_CRC_DSC_MODE__SHIFT 0x0
+#define OTG0_OTG_CRC_CNTL2__OTG_CRC_DATA_STREAM_COMBINE_MODE__SHIFT 0x1
+#define OTG0_OTG_CRC_CNTL2__OTG_CRC_DATA_STREAM_SPLIT_MODE__SHIFT 0x4
+#define OTG0_OTG_CRC_CNTL2__OTG_CRC_DATA_FORMAT__SHIFT 0x8
+#define OTG0_OTG_CRC_CNTL2__OTG_CRC_DSC_MODE_MASK 0x00000001L
+#define OTG0_OTG_CRC_CNTL2__OTG_CRC_DATA_STREAM_COMBINE_MODE_MASK 0x00000002L
+#define OTG0_OTG_CRC_CNTL2__OTG_CRC_DATA_STREAM_SPLIT_MODE_MASK 0x00000030L
+#define OTG0_OTG_CRC_CNTL2__OTG_CRC_DATA_FORMAT_MASK 0x00000300L
+//OTG0_OTG_CRC0_WINDOWA_X_CONTROL
+#define OTG0_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_START__SHIFT 0x0
+#define OTG0_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_END__SHIFT 0x10
+#define OTG0_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_START_MASK 0x00007FFFL
+#define OTG0_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_END_MASK 0x7FFF0000L
+//OTG0_OTG_CRC0_WINDOWA_Y_CONTROL
+#define OTG0_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_START__SHIFT 0x0
+#define OTG0_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_END__SHIFT 0x10
+#define OTG0_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_START_MASK 0x00007FFFL
+#define OTG0_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_END_MASK 0x7FFF0000L
+//OTG0_OTG_CRC0_WINDOWB_X_CONTROL
+#define OTG0_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_START__SHIFT 0x0
+#define OTG0_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_END__SHIFT 0x10
+#define OTG0_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_START_MASK 0x00007FFFL
+#define OTG0_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_END_MASK 0x7FFF0000L
+//OTG0_OTG_CRC0_WINDOWB_Y_CONTROL
+#define OTG0_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_START__SHIFT 0x0
+#define OTG0_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_END__SHIFT 0x10
+#define OTG0_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_START_MASK 0x00007FFFL
+#define OTG0_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_END_MASK 0x7FFF0000L
+//OTG0_OTG_CRC0_DATA_RG
+#define OTG0_OTG_CRC0_DATA_RG__CRC0_R_CR__SHIFT 0x0
+#define OTG0_OTG_CRC0_DATA_RG__CRC0_G_Y__SHIFT 0x10
+#define OTG0_OTG_CRC0_DATA_RG__CRC0_R_CR_MASK 0x0000FFFFL
+#define OTG0_OTG_CRC0_DATA_RG__CRC0_G_Y_MASK 0xFFFF0000L
+//OTG0_OTG_CRC0_DATA_B
+#define OTG0_OTG_CRC0_DATA_B__CRC0_B_CB__SHIFT 0x0
+#define OTG0_OTG_CRC0_DATA_B__CRC0_C__SHIFT 0x10
+#define OTG0_OTG_CRC0_DATA_B__CRC0_B_CB_MASK 0x0000FFFFL
+#define OTG0_OTG_CRC0_DATA_B__CRC0_C_MASK 0xFFFF0000L
+//OTG0_OTG_CRC1_WINDOWA_X_CONTROL
+#define OTG0_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_START__SHIFT 0x0
+#define OTG0_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_END__SHIFT 0x10
+#define OTG0_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_START_MASK 0x00007FFFL
+#define OTG0_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_END_MASK 0x7FFF0000L
+//OTG0_OTG_CRC1_WINDOWA_Y_CONTROL
+#define OTG0_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_START__SHIFT 0x0
+#define OTG0_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_END__SHIFT 0x10
+#define OTG0_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_START_MASK 0x00007FFFL
+#define OTG0_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_END_MASK 0x7FFF0000L
+//OTG0_OTG_CRC1_WINDOWB_X_CONTROL
+#define OTG0_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_START__SHIFT 0x0
+#define OTG0_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_END__SHIFT 0x10
+#define OTG0_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_START_MASK 0x00007FFFL
+#define OTG0_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_END_MASK 0x7FFF0000L
+//OTG0_OTG_CRC1_WINDOWB_Y_CONTROL
+#define OTG0_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_START__SHIFT 0x0
+#define OTG0_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_END__SHIFT 0x10
+#define OTG0_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_START_MASK 0x00007FFFL
+#define OTG0_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_END_MASK 0x7FFF0000L
+//OTG0_OTG_CRC1_DATA_RG
+#define OTG0_OTG_CRC1_DATA_RG__CRC1_R_CR__SHIFT 0x0
+#define OTG0_OTG_CRC1_DATA_RG__CRC1_G_Y__SHIFT 0x10
+#define OTG0_OTG_CRC1_DATA_RG__CRC1_R_CR_MASK 0x0000FFFFL
+#define OTG0_OTG_CRC1_DATA_RG__CRC1_G_Y_MASK 0xFFFF0000L
+//OTG0_OTG_CRC1_DATA_B
+#define OTG0_OTG_CRC1_DATA_B__CRC1_B_CB__SHIFT 0x0
+#define OTG0_OTG_CRC1_DATA_B__CRC1_C__SHIFT 0x10
+#define OTG0_OTG_CRC1_DATA_B__CRC1_B_CB_MASK 0x0000FFFFL
+#define OTG0_OTG_CRC1_DATA_B__CRC1_C_MASK 0xFFFF0000L
+//OTG0_OTG_CRC_SIG_RED_GREEN_MASK
+#define OTG0_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_RED_MASK__SHIFT 0x0
+#define OTG0_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_GREEN_MASK__SHIFT 0x10
+#define OTG0_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_RED_MASK_MASK 0x0000FFFFL
+#define OTG0_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_GREEN_MASK_MASK 0xFFFF0000L
+//OTG0_OTG_CRC_SIG_BLUE_CONTROL_MASK
+#define OTG0_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_BLUE_MASK__SHIFT 0x0
+#define OTG0_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_CONTROL_MASK__SHIFT 0x10
+#define OTG0_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_BLUE_MASK_MASK 0x0000FFFFL
+#define OTG0_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_CONTROL_MASK_MASK 0xFFFF0000L
+//OTG0_OTG_STATIC_SCREEN_CONTROL
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_EVENT_MASK__SHIFT 0x0
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_FRAME_COUNT__SHIFT 0x10
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_ENABLE__SHIFT 0x18
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_SS_STATUS__SHIFT 0x19
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_STATUS__SHIFT 0x1a
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_CLEAR__SHIFT 0x1b
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_TYPE__SHIFT 0x1c
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE__SHIFT 0x1e
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE_VALUE__SHIFT 0x1f
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_EVENT_MASK_MASK 0x0000FFFFL
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_FRAME_COUNT_MASK 0x00FF0000L
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_ENABLE_MASK 0x01000000L
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_SS_STATUS_MASK 0x02000000L
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_STATUS_MASK 0x04000000L
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_CLEAR_MASK 0x08000000L
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_TYPE_MASK 0x10000000L
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE_MASK 0x40000000L
+#define OTG0_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE_VALUE_MASK 0x80000000L
+//OTG0_OTG_3D_STRUCTURE_CONTROL
+#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_EN__SHIFT 0x0
+#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_V_UPDATE_MODE__SHIFT 0x8
+#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_STEREO_SEL_OVR__SHIFT 0xc
+#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET__SHIFT 0x10
+#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET_PENDING__SHIFT 0x11
+#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT__SHIFT 0x12
+#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_EN_MASK 0x00000001L
+#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_V_UPDATE_MODE_MASK 0x00000300L
+#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_STEREO_SEL_OVR_MASK 0x00001000L
+#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET_MASK 0x00010000L
+#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET_PENDING_MASK 0x00020000L
+#define OTG0_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_MASK 0x000C0000L
+//OTG0_OTG_GSL_VSYNC_GAP
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_LIMIT__SHIFT 0x0
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_DELAY__SHIFT 0x8
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_SOURCE_SEL__SHIFT 0x10
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MODE__SHIFT 0x11
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_CLEAR__SHIFT 0x13
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_OCCURRED__SHIFT 0x14
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MASTER_FASTER__SHIFT 0x17
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP__SHIFT 0x18
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_LIMIT_MASK 0x000000FFL
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_DELAY_MASK 0x0000FF00L
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_SOURCE_SEL_MASK 0x00010000L
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MODE_MASK 0x00060000L
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_CLEAR_MASK 0x00080000L
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_OCCURRED_MASK 0x00100000L
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MASTER_FASTER_MASK 0x00800000L
+#define OTG0_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MASK 0xFF000000L
+//OTG0_OTG_MASTER_UPDATE_MODE
+#define OTG0_OTG_MASTER_UPDATE_MODE__MASTER_UPDATE_INTERLACED_MODE__SHIFT 0x0
+#define OTG0_OTG_MASTER_UPDATE_MODE__MASTER_UPDATE_INTERLACED_MODE_MASK 0x00000003L
+//OTG0_OTG_CLOCK_CONTROL
+#define OTG0_OTG_CLOCK_CONTROL__OTG_CLOCK_EN__SHIFT 0x0
+#define OTG0_OTG_CLOCK_CONTROL__OTG_CLOCK_GATE_DIS__SHIFT 0x1
+#define OTG0_OTG_CLOCK_CONTROL__OTG_SOFT_RESET__SHIFT 0x4
+#define OTG0_OTG_CLOCK_CONTROL__OTG_CLOCK_ON__SHIFT 0x8
+#define OTG0_OTG_CLOCK_CONTROL__OTG_BUSY__SHIFT 0x10
+#define OTG0_OTG_CLOCK_CONTROL__OTG_CLOCK_EN_MASK 0x00000001L
+#define OTG0_OTG_CLOCK_CONTROL__OTG_CLOCK_GATE_DIS_MASK 0x00000002L
+#define OTG0_OTG_CLOCK_CONTROL__OTG_SOFT_RESET_MASK 0x00000010L
+#define OTG0_OTG_CLOCK_CONTROL__OTG_CLOCK_ON_MASK 0x00000100L
+#define OTG0_OTG_CLOCK_CONTROL__OTG_BUSY_MASK 0x00010000L
+//OTG0_OTG_VSTARTUP_PARAM
+#define OTG0_OTG_VSTARTUP_PARAM__VSTARTUP_START__SHIFT 0x0
+#define OTG0_OTG_VSTARTUP_PARAM__VSTARTUP_START_MASK 0x000003FFL
+//OTG0_OTG_VUPDATE_PARAM
+#define OTG0_OTG_VUPDATE_PARAM__VUPDATE_OFFSET__SHIFT 0x0
+#define OTG0_OTG_VUPDATE_PARAM__VUPDATE_WIDTH__SHIFT 0x10
+#define OTG0_OTG_VUPDATE_PARAM__VUPDATE_OFFSET_MASK 0x0000FFFFL
+#define OTG0_OTG_VUPDATE_PARAM__VUPDATE_WIDTH_MASK 0x03FF0000L
+//OTG0_OTG_VREADY_PARAM
+#define OTG0_OTG_VREADY_PARAM__VREADY_OFFSET__SHIFT 0x0
+#define OTG0_OTG_VREADY_PARAM__VREADY_OFFSET_MASK 0x0000FFFFL
+//OTG0_OTG_GLOBAL_SYNC_STATUS
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_EN__SHIFT 0x0
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_TYPE__SHIFT 0x1
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_OCCURRED__SHIFT 0x2
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_STATUS__SHIFT 0x3
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_CLEAR__SHIFT 0x4
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_EN__SHIFT 0x5
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_TYPE__SHIFT 0x6
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_POSITION_SEL__SHIFT 0x7
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_OCCURRED__SHIFT 0x8
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_STATUS__SHIFT 0x9
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_CLEAR__SHIFT 0xa
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_STATUS__SHIFT 0xb
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_EN__SHIFT 0xc
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_TYPE__SHIFT 0xd
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_OCCURRED__SHIFT 0xe
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_STATUS__SHIFT 0xf
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_CLEAR__SHIFT 0x10
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_STATUS__SHIFT 0x11
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_EN__SHIFT 0x12
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_TYPE__SHIFT 0x13
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_OCCURRED__SHIFT 0x14
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_STATUS__SHIFT 0x15
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_CLEAR__SHIFT 0x16
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__STEREO_SELECT_STATUS__SHIFT 0x18
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__FIELD_NUMBER_STATUS__SHIFT 0x19
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_EN_MASK 0x00000001L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_TYPE_MASK 0x00000002L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_OCCURRED_MASK 0x00000004L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_STATUS_MASK 0x00000008L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_CLEAR_MASK 0x00000010L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_EN_MASK 0x00000020L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_TYPE_MASK 0x00000040L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_POSITION_SEL_MASK 0x00000080L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_OCCURRED_MASK 0x00000100L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_STATUS_MASK 0x00000200L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_CLEAR_MASK 0x00000400L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_STATUS_MASK 0x00000800L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_EN_MASK 0x00001000L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_TYPE_MASK 0x00002000L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_OCCURRED_MASK 0x00004000L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_STATUS_MASK 0x00008000L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_CLEAR_MASK 0x00010000L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_STATUS_MASK 0x00020000L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_EN_MASK 0x00040000L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_TYPE_MASK 0x00080000L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_OCCURRED_MASK 0x00100000L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_STATUS_MASK 0x00200000L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_CLEAR_MASK 0x00400000L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__STEREO_SELECT_STATUS_MASK 0x01000000L
+#define OTG0_OTG_GLOBAL_SYNC_STATUS__FIELD_NUMBER_STATUS_MASK 0x02000000L
+//OTG0_OTG_MASTER_UPDATE_LOCK
+#define OTG0_OTG_MASTER_UPDATE_LOCK__OTG_MASTER_UPDATE_LOCK__SHIFT 0x0
+#define OTG0_OTG_MASTER_UPDATE_LOCK__UPDATE_LOCK_STATUS__SHIFT 0x8
+#define OTG0_OTG_MASTER_UPDATE_LOCK__OTG_MASTER_UPDATE_LOCK_MASK 0x00000001L
+#define OTG0_OTG_MASTER_UPDATE_LOCK__UPDATE_LOCK_STATUS_MASK 0x00000100L
+//OTG0_OTG_GSL_CONTROL
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL0_EN__SHIFT 0x0
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL1_EN__SHIFT 0x1
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL2_EN__SHIFT 0x2
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL_MASTER_EN__SHIFT 0x3
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL_MASTER_MODE__SHIFT 0x4
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL_CHECK_DELAY__SHIFT 0x8
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL_FORCE_DELAY__SHIFT 0x10
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL_CHECK_ALL_FIELDS__SHIFT 0x1c
+#define OTG0_OTG_GSL_CONTROL__OTG_MASTER_UPDATE_LOCK_GSL_EN__SHIFT 0x1f
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL0_EN_MASK 0x00000001L
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL1_EN_MASK 0x00000002L
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL2_EN_MASK 0x00000004L
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL_MASTER_EN_MASK 0x00000008L
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL_MASTER_MODE_MASK 0x00000030L
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL_CHECK_DELAY_MASK 0x00000F00L
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL_FORCE_DELAY_MASK 0x001F0000L
+#define OTG0_OTG_GSL_CONTROL__OTG_GSL_CHECK_ALL_FIELDS_MASK 0x10000000L
+#define OTG0_OTG_GSL_CONTROL__OTG_MASTER_UPDATE_LOCK_GSL_EN_MASK 0x80000000L
+//OTG0_OTG_GSL_WINDOW_X
+#define OTG0_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_START_X__SHIFT 0x0
+#define OTG0_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_END_X__SHIFT 0x10
+#define OTG0_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_START_X_MASK 0x00007FFFL
+#define OTG0_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_END_X_MASK 0x7FFF0000L
+//OTG0_OTG_GSL_WINDOW_Y
+#define OTG0_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_START_Y__SHIFT 0x0
+#define OTG0_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_END_Y__SHIFT 0x10
+#define OTG0_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_START_Y_MASK 0x00007FFFL
+#define OTG0_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_END_Y_MASK 0x7FFF0000L
+//OTG0_OTG_VUPDATE_KEEPOUT
+#define OTG0_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET__SHIFT 0x0
+#define OTG0_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET__SHIFT 0x10
+#define OTG0_OTG_VUPDATE_KEEPOUT__OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN__SHIFT 0x1f
+#define OTG0_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET_MASK 0x0000FFFFL
+#define OTG0_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET_MASK 0x03FF0000L
+#define OTG0_OTG_VUPDATE_KEEPOUT__OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN_MASK 0x80000000L
+//OTG0_OTG_GLOBAL_CONTROL0
+#define OTG0_OTG_GLOBAL_CONTROL0__OTG_MASTER_UPDATE_LOCK_HTOTAL_KEEPOUT__SHIFT 0x0
+#define OTG0_OTG_GLOBAL_CONTROL0__OTG_MASTER_UPDATE_LOCK_HTOTAL_KEEPOUT_EN__SHIFT 0x8
+#define OTG0_OTG_GLOBAL_CONTROL0__OTG_MASTER_UPDATE_LOCK_SEL__SHIFT 0x19
+#define OTG0_OTG_GLOBAL_CONTROL0__OTG_MASTER_UPDATE_LOCK_HTOTAL_KEEPOUT_MASK 0x000000FFL
+#define OTG0_OTG_GLOBAL_CONTROL0__OTG_MASTER_UPDATE_LOCK_HTOTAL_KEEPOUT_EN_MASK 0x00000100L
+#define OTG0_OTG_GLOBAL_CONTROL0__OTG_MASTER_UPDATE_LOCK_SEL_MASK 0x0E000000L
+//OTG0_OTG_GLOBAL_CONTROL1
+#define OTG0_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_X__SHIFT 0x0
+#define OTG0_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_Y__SHIFT 0x10
+#define OTG0_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_EN__SHIFT 0x1f
+#define OTG0_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_X_MASK 0x00007FFFL
+#define OTG0_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_Y_MASK 0x7FFF0000L
+#define OTG0_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_EN_MASK 0x80000000L
+//OTG0_OTG_GLOBAL_CONTROL2
+#define OTG0_OTG_GLOBAL_CONTROL2__DIG_UPDATE_LOCATION__SHIFT 0x0
+#define OTG0_OTG_GLOBAL_CONTROL2__GLOBAL_UPDATE_LOCK_EN__SHIFT 0xa
+#define OTG0_OTG_GLOBAL_CONTROL2__MANUAL_FLOW_CONTROL_SEL__SHIFT 0x10
+#define OTG0_OTG_GLOBAL_CONTROL2__DCCG_VUPDATE_MODE__SHIFT 0x1f
+#define OTG0_OTG_GLOBAL_CONTROL2__DIG_UPDATE_LOCATION_MASK 0x000003FFL
+#define OTG0_OTG_GLOBAL_CONTROL2__GLOBAL_UPDATE_LOCK_EN_MASK 0x00000400L
+#define OTG0_OTG_GLOBAL_CONTROL2__MANUAL_FLOW_CONTROL_SEL_MASK 0x00070000L
+#define OTG0_OTG_GLOBAL_CONTROL2__DCCG_VUPDATE_MODE_MASK 0x80000000L
+//OTG0_OTG_GLOBAL_CONTROL3
+#define OTG0_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_FIELD__SHIFT 0x0
+#define OTG0_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_STEREO_SEL__SHIFT 0x4
+#define OTG0_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_FIELD_STEREO_FLAG_SEL__SHIFT 0x8
+#define OTG0_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_FIELD_MASK 0x00000003L
+#define OTG0_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_STEREO_SEL_MASK 0x00000030L
+#define OTG0_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_FIELD_STEREO_FLAG_SEL_MASK 0x00000100L
+//OTG0_OTG_TRIG_MANUAL_CONTROL
+#define OTG0_OTG_TRIG_MANUAL_CONTROL__TRIG_MANUAL_CONTROL__SHIFT 0x0
+#define OTG0_OTG_TRIG_MANUAL_CONTROL__TRIG_MANUAL_CONTROL_MASK 0x00000001L
+//OTG0_OTG_RANGE_TIMING_INT_STATUS
+#define OTG0_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED__SHIFT 0x0
+#define OTG0_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_INT__SHIFT 0x4
+#define OTG0_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_CLEAR__SHIFT 0x8
+#define OTG0_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_INT_MSK__SHIFT 0xc
+#define OTG0_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_INT_TYPE__SHIFT 0x10
+#define OTG0_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_MASK 0x00000001L
+#define OTG0_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_INT_MASK 0x00000010L
+#define OTG0_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_CLEAR_MASK 0x00000100L
+#define OTG0_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_INT_MSK_MASK 0x00001000L
+#define OTG0_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_INT_TYPE_MASK 0x00010000L
+//OTG0_OTG_DRR_CONTROL
+#define OTG0_OTG_DRR_CONTROL__OTG_DRR_AVERAGE_FRAME__SHIFT 0x0
+#define OTG0_OTG_DRR_CONTROL__OTG_V_TOTAL_LAST_USED_BY_DRR__SHIFT 0x10
+#define OTG0_OTG_DRR_CONTROL__OTG_DRR_AVERAGE_FRAME_MASK 0x00000007L
+#define OTG0_OTG_DRR_CONTROL__OTG_V_TOTAL_LAST_USED_BY_DRR_MASK 0x7FFF0000L
+//OTG0_OTG_REQUEST_CONTROL
+#define OTG0_OTG_REQUEST_CONTROL__OTG_REQUEST_MODE_FOR_H_DUPLICATE__SHIFT 0x0
+#define OTG0_OTG_REQUEST_CONTROL__OTG_REQUEST_MODE_FOR_H_DUPLICATE_MASK 0x00000001L
+//OTG0_OTG_DSC_START_POSITION
+#define OTG0_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_X__SHIFT 0x0
+#define OTG0_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_LINE_NUM__SHIFT 0x10
+#define OTG0_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_X_MASK 0x00007FFFL
+#define OTG0_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_LINE_NUM_MASK 0x03FF0000L
+//OTG0_OTG_SPARE_REGISTER
+#define OTG0_OTG_SPARE_REGISTER__OTG_SPARE_REG__SHIFT 0x0
+#define OTG0_OTG_SPARE_REGISTER__OTG_SPARE_REG_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dce_dc_optc_otg1_dispdec
+//OTG1_OTG_H_TOTAL
+#define OTG1_OTG_H_TOTAL__OTG_H_TOTAL__SHIFT 0x0
+#define OTG1_OTG_H_TOTAL__OTG_H_TOTAL_MASK 0x00007FFFL
+//OTG1_OTG_H_BLANK_START_END
+#define OTG1_OTG_H_BLANK_START_END__OTG_H_BLANK_START__SHIFT 0x0
+#define OTG1_OTG_H_BLANK_START_END__OTG_H_BLANK_END__SHIFT 0x10
+#define OTG1_OTG_H_BLANK_START_END__OTG_H_BLANK_START_MASK 0x00007FFFL
+#define OTG1_OTG_H_BLANK_START_END__OTG_H_BLANK_END_MASK 0x7FFF0000L
+//OTG1_OTG_H_SYNC_A
+#define OTG1_OTG_H_SYNC_A__OTG_H_SYNC_A_START__SHIFT 0x0
+#define OTG1_OTG_H_SYNC_A__OTG_H_SYNC_A_END__SHIFT 0x10
+#define OTG1_OTG_H_SYNC_A__OTG_H_SYNC_A_START_MASK 0x00007FFFL
+#define OTG1_OTG_H_SYNC_A__OTG_H_SYNC_A_END_MASK 0x7FFF0000L
+//OTG1_OTG_H_SYNC_A_CNTL
+#define OTG1_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_POL__SHIFT 0x0
+#define OTG1_OTG_H_SYNC_A_CNTL__OTG_COMP_SYNC_A_EN__SHIFT 0x10
+#define OTG1_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_CUTOFF__SHIFT 0x11
+#define OTG1_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_POL_MASK 0x00000001L
+#define OTG1_OTG_H_SYNC_A_CNTL__OTG_COMP_SYNC_A_EN_MASK 0x00010000L
+#define OTG1_OTG_H_SYNC_A_CNTL__OTG_H_SYNC_A_CUTOFF_MASK 0x00020000L
+//OTG1_OTG_H_TIMING_CNTL
+#define OTG1_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_BY2__SHIFT 0x0
+#define OTG1_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_BY2_UPDATE_MODE__SHIFT 0x8
+#define OTG1_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_BY2_MASK 0x00000001L
+#define OTG1_OTG_H_TIMING_CNTL__OTG_H_TIMING_DIV_BY2_UPDATE_MODE_MASK 0x00000100L
+//OTG1_OTG_V_TOTAL
+#define OTG1_OTG_V_TOTAL__OTG_V_TOTAL__SHIFT 0x0
+#define OTG1_OTG_V_TOTAL__OTG_V_TOTAL_MASK 0x00007FFFL
+//OTG1_OTG_V_TOTAL_MIN
+#define OTG1_OTG_V_TOTAL_MIN__OTG_V_TOTAL_MIN__SHIFT 0x0
+#define OTG1_OTG_V_TOTAL_MIN__OTG_V_TOTAL_MIN_MASK 0x00007FFFL
+//OTG1_OTG_V_TOTAL_MAX
+#define OTG1_OTG_V_TOTAL_MAX__OTG_V_TOTAL_MAX__SHIFT 0x0
+#define OTG1_OTG_V_TOTAL_MAX__OTG_V_TOTAL_MAX_MASK 0x00007FFFL
+//OTG1_OTG_V_TOTAL_MID
+#define OTG1_OTG_V_TOTAL_MID__OTG_V_TOTAL_MID__SHIFT 0x0
+#define OTG1_OTG_V_TOTAL_MID__OTG_V_TOTAL_MID_MASK 0x00007FFFL
+//OTG1_OTG_V_TOTAL_CONTROL
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MIN_SEL__SHIFT 0x0
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MAX_SEL__SHIFT 0x1
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MAX_EN__SHIFT 0x2
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MIN_EN__SHIFT 0x3
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_FORCE_LOCK_ON_EVENT__SHIFT 0x4
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_DRR_EVENT_ACTIVE_PERIOD__SHIFT 0x5
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_SET_V_TOTAL_MIN_MASK_EN__SHIFT 0x7
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_FRAME_NUM__SHIFT 0x8
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_SET_V_TOTAL_MIN_MASK__SHIFT 0x10
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MIN_SEL_MASK 0x00000001L
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_V_TOTAL_MAX_SEL_MASK 0x00000002L
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MAX_EN_MASK 0x00000004L
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_REPLACING_MIN_EN_MASK 0x00000008L
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_FORCE_LOCK_ON_EVENT_MASK 0x00000010L
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_DRR_EVENT_ACTIVE_PERIOD_MASK 0x00000020L
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_SET_V_TOTAL_MIN_MASK_EN_MASK 0x00000080L
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_VTOTAL_MID_FRAME_NUM_MASK 0x0000FF00L
+#define OTG1_OTG_V_TOTAL_CONTROL__OTG_SET_V_TOTAL_MIN_MASK_MASK 0xFFFF0000L
+//OTG1_OTG_V_TOTAL_INT_STATUS
+#define OTG1_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURED__SHIFT 0x0
+#define OTG1_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURED_INT__SHIFT 0x4
+#define OTG1_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURED_ACK__SHIFT 0x8
+#define OTG1_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURED_MSK__SHIFT 0xc
+#define OTG1_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURED_MASK 0x00000001L
+#define OTG1_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURED_INT_MASK 0x00000010L
+#define OTG1_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURED_ACK_MASK 0x00000100L
+#define OTG1_OTG_V_TOTAL_INT_STATUS__OTG_SET_V_TOTAL_MIN_EVENT_OCCURED_MSK_MASK 0x00001000L
+//OTG1_OTG_VSYNC_NOM_INT_STATUS
+#define OTG1_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM__SHIFT 0x0
+#define OTG1_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM_INT_CLEAR__SHIFT 0x4
+#define OTG1_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM_MASK 0x00000001L
+#define OTG1_OTG_VSYNC_NOM_INT_STATUS__OTG_VSYNC_NOM_INT_CLEAR_MASK 0x00000010L
+//OTG1_OTG_V_BLANK_START_END
+#define OTG1_OTG_V_BLANK_START_END__OTG_V_BLANK_START__SHIFT 0x0
+#define OTG1_OTG_V_BLANK_START_END__OTG_V_BLANK_END__SHIFT 0x10
+#define OTG1_OTG_V_BLANK_START_END__OTG_V_BLANK_START_MASK 0x00007FFFL
+#define OTG1_OTG_V_BLANK_START_END__OTG_V_BLANK_END_MASK 0x7FFF0000L
+//OTG1_OTG_V_SYNC_A
+#define OTG1_OTG_V_SYNC_A__OTG_V_SYNC_A_START__SHIFT 0x0
+#define OTG1_OTG_V_SYNC_A__OTG_V_SYNC_A_END__SHIFT 0x10
+#define OTG1_OTG_V_SYNC_A__OTG_V_SYNC_A_START_MASK 0x00007FFFL
+#define OTG1_OTG_V_SYNC_A__OTG_V_SYNC_A_END_MASK 0x7FFF0000L
+//OTG1_OTG_V_SYNC_A_CNTL
+#define OTG1_OTG_V_SYNC_A_CNTL__OTG_V_SYNC_A_POL__SHIFT 0x0
+#define OTG1_OTG_V_SYNC_A_CNTL__OTG_V_SYNC_A_POL_MASK 0x00000001L
+//OTG1_OTG_TRIGA_CNTL
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_SELECT__SHIFT 0x0
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_PIPE_SELECT__SHIFT 0x5
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_SELECT__SHIFT 0x8
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_RESYNC_BYPASS_EN__SHIFT 0xb
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_INPUT_STATUS__SHIFT 0xc
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_STATUS__SHIFT 0xd
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_OCCURRED__SHIFT 0xe
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_RISING_EDGE_DETECT_CNTL__SHIFT 0x10
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_FALLING_EDGE_DETECT_CNTL__SHIFT 0x12
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_FREQUENCY_SELECT__SHIFT 0x14
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_DELAY__SHIFT 0x18
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_CLEAR__SHIFT 0x1f
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_SELECT_MASK 0x0000001FL
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_SOURCE_PIPE_SELECT_MASK 0x000000E0L
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_SELECT_MASK 0x00000700L
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_RESYNC_BYPASS_EN_MASK 0x00000800L
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_INPUT_STATUS_MASK 0x00001000L
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_POLARITY_STATUS_MASK 0x00002000L
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_OCCURRED_MASK 0x00004000L
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_RISING_EDGE_DETECT_CNTL_MASK 0x00030000L
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_FALLING_EDGE_DETECT_CNTL_MASK 0x000C0000L
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_FREQUENCY_SELECT_MASK 0x00300000L
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_DELAY_MASK 0x1F000000L
+#define OTG1_OTG_TRIGA_CNTL__OTG_TRIGA_CLEAR_MASK 0x80000000L
+//OTG1_OTG_TRIGA_MANUAL_TRIG
+#define OTG1_OTG_TRIGA_MANUAL_TRIG__OTG_TRIGA_MANUAL_TRIG__SHIFT 0x0
+#define OTG1_OTG_TRIGA_MANUAL_TRIG__OTG_TRIGA_MANUAL_TRIG_MASK 0x00000001L
+//OTG1_OTG_TRIGB_CNTL
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_SELECT__SHIFT 0x0
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_PIPE_SELECT__SHIFT 0x5
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_SELECT__SHIFT 0x8
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_RESYNC_BYPASS_EN__SHIFT 0xb
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_INPUT_STATUS__SHIFT 0xc
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_STATUS__SHIFT 0xd
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_OCCURRED__SHIFT 0xe
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_RISING_EDGE_DETECT_CNTL__SHIFT 0x10
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_FALLING_EDGE_DETECT_CNTL__SHIFT 0x12
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_FREQUENCY_SELECT__SHIFT 0x14
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_DELAY__SHIFT 0x18
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_CLEAR__SHIFT 0x1f
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_SELECT_MASK 0x0000001FL
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_SOURCE_PIPE_SELECT_MASK 0x000000E0L
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_SELECT_MASK 0x00000700L
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_RESYNC_BYPASS_EN_MASK 0x00000800L
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_INPUT_STATUS_MASK 0x00001000L
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_POLARITY_STATUS_MASK 0x00002000L
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_OCCURRED_MASK 0x00004000L
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_RISING_EDGE_DETECT_CNTL_MASK 0x00030000L
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_FALLING_EDGE_DETECT_CNTL_MASK 0x000C0000L
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_FREQUENCY_SELECT_MASK 0x00300000L
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_DELAY_MASK 0x1F000000L
+#define OTG1_OTG_TRIGB_CNTL__OTG_TRIGB_CLEAR_MASK 0x80000000L
+//OTG1_OTG_TRIGB_MANUAL_TRIG
+#define OTG1_OTG_TRIGB_MANUAL_TRIG__OTG_TRIGB_MANUAL_TRIG__SHIFT 0x0
+#define OTG1_OTG_TRIGB_MANUAL_TRIG__OTG_TRIGB_MANUAL_TRIG_MASK 0x00000001L
+//OTG1_OTG_FORCE_COUNT_NOW_CNTL
+#define OTG1_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_MODE__SHIFT 0x0
+#define OTG1_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CHECK__SHIFT 0x4
+#define OTG1_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_TRIG_SEL__SHIFT 0x8
+#define OTG1_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_OCCURRED__SHIFT 0x10
+#define OTG1_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CLEAR__SHIFT 0x18
+#define OTG1_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_MODE_MASK 0x00000003L
+#define OTG1_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CHECK_MASK 0x00000010L
+#define OTG1_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_TRIG_SEL_MASK 0x00000100L
+#define OTG1_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_OCCURRED_MASK 0x00010000L
+#define OTG1_OTG_FORCE_COUNT_NOW_CNTL__OTG_FORCE_COUNT_NOW_CLEAR_MASK 0x01000000L
+//OTG1_OTG_STEREO_FORCE_NEXT_EYE
+#define OTG1_OTG_STEREO_FORCE_NEXT_EYE__OTG_STEREO_FORCE_NEXT_EYE__SHIFT 0x0
+#define OTG1_OTG_STEREO_FORCE_NEXT_EYE__OTG_AVSYNC_FRAME_COUNTER__SHIFT 0x8
+#define OTG1_OTG_STEREO_FORCE_NEXT_EYE__OTG_AVSYNC_LINE_COUNTER__SHIFT 0x10
+#define OTG1_OTG_STEREO_FORCE_NEXT_EYE__OTG_STEREO_FORCE_NEXT_EYE_MASK 0x00000003L
+#define OTG1_OTG_STEREO_FORCE_NEXT_EYE__OTG_AVSYNC_FRAME_COUNTER_MASK 0x0000FF00L
+#define OTG1_OTG_STEREO_FORCE_NEXT_EYE__OTG_AVSYNC_LINE_COUNTER_MASK 0x1FFF0000L
+//OTG1_OTG_CONTROL
+#define OTG1_OTG_CONTROL__OTG_MASTER_EN__SHIFT 0x0
+#define OTG1_OTG_CONTROL__OTG_SYNC_RESET_SEL__SHIFT 0x4
+#define OTG1_OTG_CONTROL__OTG_DISABLE_POINT_CNTL__SHIFT 0x8
+#define OTG1_OTG_CONTROL__OTG_START_POINT_CNTL__SHIFT 0xc
+#define OTG1_OTG_CONTROL__OTG_FIELD_NUMBER_CNTL__SHIFT 0xd
+#define OTG1_OTG_CONTROL__OTG_FIELD_NUMBER_POLARITY__SHIFT 0xe
+#define OTG1_OTG_CONTROL__OTG_CURRENT_MASTER_EN_STATE__SHIFT 0x10
+#define OTG1_OTG_CONTROL__OTG_DISP_READ_REQUEST_DISABLE__SHIFT 0x18
+#define OTG1_OTG_CONTROL__OTG_AVSYNC_LOCK_SNAPSHOT__SHIFT 0x1e
+#define OTG1_OTG_CONTROL__OTG_AVSYNC_VSYNC_N_HSYNC_MODE__SHIFT 0x1f
+#define OTG1_OTG_CONTROL__OTG_MASTER_EN_MASK 0x00000001L
+#define OTG1_OTG_CONTROL__OTG_SYNC_RESET_SEL_MASK 0x00000010L
+#define OTG1_OTG_CONTROL__OTG_DISABLE_POINT_CNTL_MASK 0x00000300L
+#define OTG1_OTG_CONTROL__OTG_START_POINT_CNTL_MASK 0x00001000L
+#define OTG1_OTG_CONTROL__OTG_FIELD_NUMBER_CNTL_MASK 0x00002000L
+#define OTG1_OTG_CONTROL__OTG_FIELD_NUMBER_POLARITY_MASK 0x00004000L
+#define OTG1_OTG_CONTROL__OTG_CURRENT_MASTER_EN_STATE_MASK 0x00010000L
+#define OTG1_OTG_CONTROL__OTG_DISP_READ_REQUEST_DISABLE_MASK 0x01000000L
+#define OTG1_OTG_CONTROL__OTG_AVSYNC_LOCK_SNAPSHOT_MASK 0x40000000L
+#define OTG1_OTG_CONTROL__OTG_AVSYNC_VSYNC_N_HSYNC_MODE_MASK 0x80000000L
+//OTG1_OTG_BLANK_CONTROL
+#define OTG1_OTG_BLANK_CONTROL__OTG_CURRENT_BLANK_STATE__SHIFT 0x0
+#define OTG1_OTG_BLANK_CONTROL__OTG_BLANK_DATA_EN__SHIFT 0x8
+#define OTG1_OTG_BLANK_CONTROL__OTG_BLANK_DE_MODE__SHIFT 0x10
+#define OTG1_OTG_BLANK_CONTROL__OTG_CURRENT_BLANK_STATE_MASK 0x00000001L
+#define OTG1_OTG_BLANK_CONTROL__OTG_BLANK_DATA_EN_MASK 0x00000100L
+#define OTG1_OTG_BLANK_CONTROL__OTG_BLANK_DE_MODE_MASK 0x00010000L
+//OTG1_OTG_INTERLACE_CONTROL
+#define OTG1_OTG_INTERLACE_CONTROL__OTG_INTERLACE_ENABLE__SHIFT 0x0
+#define OTG1_OTG_INTERLACE_CONTROL__OTG_INTERLACE_FORCE_NEXT_FIELD__SHIFT 0x10
+#define OTG1_OTG_INTERLACE_CONTROL__OTG_INTERLACE_ENABLE_MASK 0x00000001L
+#define OTG1_OTG_INTERLACE_CONTROL__OTG_INTERLACE_FORCE_NEXT_FIELD_MASK 0x00030000L
+//OTG1_OTG_INTERLACE_STATUS
+#define OTG1_OTG_INTERLACE_STATUS__OTG_INTERLACE_CURRENT_FIELD__SHIFT 0x0
+#define OTG1_OTG_INTERLACE_STATUS__OTG_INTERLACE_NEXT_FIELD__SHIFT 0x1
+#define OTG1_OTG_INTERLACE_STATUS__OTG_INTERLACE_CURRENT_FIELD_MASK 0x00000001L
+#define OTG1_OTG_INTERLACE_STATUS__OTG_INTERLACE_NEXT_FIELD_MASK 0x00000002L
+//OTG1_OTG_PIXEL_DATA_READBACK0
+#define OTG1_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_BLUE_CB__SHIFT 0x0
+#define OTG1_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_GREEN_Y__SHIFT 0x10
+#define OTG1_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_BLUE_CB_MASK 0x0000FFFFL
+#define OTG1_OTG_PIXEL_DATA_READBACK0__OTG_PIXEL_DATA_GREEN_Y_MASK 0xFFFF0000L
+//OTG1_OTG_PIXEL_DATA_READBACK1
+#define OTG1_OTG_PIXEL_DATA_READBACK1__OTG_PIXEL_DATA_RED_CR__SHIFT 0x0
+#define OTG1_OTG_PIXEL_DATA_READBACK1__OTG_PIXEL_DATA_RED_CR_MASK 0x0000FFFFL
+//OTG1_OTG_STATUS
+#define OTG1_OTG_STATUS__OTG_V_BLANK__SHIFT 0x0
+#define OTG1_OTG_STATUS__OTG_V_ACTIVE_DISP__SHIFT 0x1
+#define OTG1_OTG_STATUS__OTG_V_SYNC_A__SHIFT 0x2
+#define OTG1_OTG_STATUS__OTG_V_UPDATE__SHIFT 0x3
+#define OTG1_OTG_STATUS__OTG_V_BLANK_3D_STRUCTURE__SHIFT 0x5
+#define OTG1_OTG_STATUS__OTG_H_BLANK__SHIFT 0x10
+#define OTG1_OTG_STATUS__OTG_H_ACTIVE_DISP__SHIFT 0x11
+#define OTG1_OTG_STATUS__OTG_H_SYNC_A__SHIFT 0x12
+#define OTG1_OTG_STATUS__OTG_V_BLANK_MASK 0x00000001L
+#define OTG1_OTG_STATUS__OTG_V_ACTIVE_DISP_MASK 0x00000002L
+#define OTG1_OTG_STATUS__OTG_V_SYNC_A_MASK 0x00000004L
+#define OTG1_OTG_STATUS__OTG_V_UPDATE_MASK 0x00000008L
+#define OTG1_OTG_STATUS__OTG_V_BLANK_3D_STRUCTURE_MASK 0x00000020L
+#define OTG1_OTG_STATUS__OTG_H_BLANK_MASK 0x00010000L
+#define OTG1_OTG_STATUS__OTG_H_ACTIVE_DISP_MASK 0x00020000L
+#define OTG1_OTG_STATUS__OTG_H_SYNC_A_MASK 0x00040000L
+//OTG1_OTG_STATUS_POSITION
+#define OTG1_OTG_STATUS_POSITION__OTG_VERT_COUNT__SHIFT 0x0
+#define OTG1_OTG_STATUS_POSITION__OTG_HORZ_COUNT__SHIFT 0x10
+#define OTG1_OTG_STATUS_POSITION__OTG_VERT_COUNT_MASK 0x00007FFFL
+#define OTG1_OTG_STATUS_POSITION__OTG_HORZ_COUNT_MASK 0x7FFF0000L
+//OTG1_OTG_NOM_VERT_POSITION
+#define OTG1_OTG_NOM_VERT_POSITION__OTG_VERT_COUNT_NOM__SHIFT 0x0
+#define OTG1_OTG_NOM_VERT_POSITION__OTG_VERT_COUNT_NOM_MASK 0x00007FFFL
+//OTG1_OTG_STATUS_FRAME_COUNT
+#define OTG1_OTG_STATUS_FRAME_COUNT__OTG_FRAME_COUNT__SHIFT 0x0
+#define OTG1_OTG_STATUS_FRAME_COUNT__OTG_FRAME_COUNT_MASK 0x00FFFFFFL
+//OTG1_OTG_STATUS_VF_COUNT
+#define OTG1_OTG_STATUS_VF_COUNT__OTG_VF_COUNT__SHIFT 0x0
+#define OTG1_OTG_STATUS_VF_COUNT__OTG_VF_COUNT_MASK 0x7FFFFFFFL
+//OTG1_OTG_STATUS_HV_COUNT
+#define OTG1_OTG_STATUS_HV_COUNT__OTG_HV_COUNT__SHIFT 0x0
+#define OTG1_OTG_STATUS_HV_COUNT__OTG_HV_COUNT_MASK 0x7FFFFFFFL
+//OTG1_OTG_COUNT_CONTROL
+#define OTG1_OTG_COUNT_CONTROL__OTG_HORZ_COUNT_BY2_EN__SHIFT 0x0
+#define OTG1_OTG_COUNT_CONTROL__OTG_HORZ_REPETITION_COUNT__SHIFT 0x1
+#define OTG1_OTG_COUNT_CONTROL__OTG_HORZ_COUNT_BY2_EN_MASK 0x00000001L
+#define OTG1_OTG_COUNT_CONTROL__OTG_HORZ_REPETITION_COUNT_MASK 0x0000001EL
+//OTG1_OTG_COUNT_RESET
+#define OTG1_OTG_COUNT_RESET__OTG_RESET_FRAME_COUNT__SHIFT 0x0
+#define OTG1_OTG_COUNT_RESET__OTG_RESET_FRAME_COUNT_MASK 0x00000001L
+//OTG1_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE
+#define OTG1_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE__OTG_MANUAL_FORCE_VSYNC_NEXT_LINE__SHIFT 0x0
+#define OTG1_OTG_MANUAL_FORCE_VSYNC_NEXT_LINE__OTG_MANUAL_FORCE_VSYNC_NEXT_LINE_MASK 0x00000001L
+//OTG1_OTG_VERT_SYNC_CONTROL
+#define OTG1_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_OCCURRED__SHIFT 0x0
+#define OTG1_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_CLEAR__SHIFT 0x8
+#define OTG1_OTG_VERT_SYNC_CONTROL__OTG_AUTO_FORCE_VSYNC_MODE__SHIFT 0x10
+#define OTG1_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_OCCURRED_MASK 0x00000001L
+#define OTG1_OTG_VERT_SYNC_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_CLEAR_MASK 0x00000100L
+#define OTG1_OTG_VERT_SYNC_CONTROL__OTG_AUTO_FORCE_VSYNC_MODE_MASK 0x00030000L
+//OTG1_OTG_STEREO_STATUS
+#define OTG1_OTG_STEREO_STATUS__OTG_STEREO_CURRENT_EYE__SHIFT 0x0
+#define OTG1_OTG_STEREO_STATUS__OTG_STEREO_SYNC_OUTPUT__SHIFT 0x8
+#define OTG1_OTG_STEREO_STATUS__OTG_STEREO_SYNC_SELECT__SHIFT 0x10
+#define OTG1_OTG_STEREO_STATUS__OTG_STEREO_EYE_FLAG__SHIFT 0x14
+#define OTG1_OTG_STEREO_STATUS__OTG_STEREO_FORCE_NEXT_EYE_PENDING__SHIFT 0x18
+#define OTG1_OTG_STEREO_STATUS__OTG_CURRENT_3D_STRUCTURE_STATE__SHIFT 0x1e
+#define OTG1_OTG_STEREO_STATUS__OTG_CURRENT_STEREOSYNC_EN_STATE__SHIFT 0x1f
+#define OTG1_OTG_STEREO_STATUS__OTG_STEREO_CURRENT_EYE_MASK 0x00000001L
+#define OTG1_OTG_STEREO_STATUS__OTG_STEREO_SYNC_OUTPUT_MASK 0x00000100L
+#define OTG1_OTG_STEREO_STATUS__OTG_STEREO_SYNC_SELECT_MASK 0x00010000L
+#define OTG1_OTG_STEREO_STATUS__OTG_STEREO_EYE_FLAG_MASK 0x00100000L
+#define OTG1_OTG_STEREO_STATUS__OTG_STEREO_FORCE_NEXT_EYE_PENDING_MASK 0x03000000L
+#define OTG1_OTG_STEREO_STATUS__OTG_CURRENT_3D_STRUCTURE_STATE_MASK 0x40000000L
+#define OTG1_OTG_STEREO_STATUS__OTG_CURRENT_STEREOSYNC_EN_STATE_MASK 0x80000000L
+//OTG1_OTG_STEREO_CONTROL
+#define OTG1_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_LINE_NUM__SHIFT 0x0
+#define OTG1_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_POLARITY__SHIFT 0xf
+#define OTG1_OTG_STEREO_CONTROL__OTG_STEREO_EYE_FLAG_POLARITY__SHIFT 0x11
+#define OTG1_OTG_STEREO_CONTROL__OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP__SHIFT 0x12
+#define OTG1_OTG_STEREO_CONTROL__OTG_DISABLE_FIELD_NUM__SHIFT 0x13
+#define OTG1_OTG_STEREO_CONTROL__OTG_DISABLE_V_BLANK_FOR_DP_FIX__SHIFT 0x14
+#define OTG1_OTG_STEREO_CONTROL__OTG_STEREO_EN__SHIFT 0x18
+#define OTG1_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_LINE_NUM_MASK 0x00007FFFL
+#define OTG1_OTG_STEREO_CONTROL__OTG_STEREO_SYNC_OUTPUT_POLARITY_MASK 0x00008000L
+#define OTG1_OTG_STEREO_CONTROL__OTG_STEREO_EYE_FLAG_POLARITY_MASK 0x00020000L
+#define OTG1_OTG_STEREO_CONTROL__OTG_DISABLE_STEREOSYNC_OUTPUT_FOR_DP_MASK 0x00040000L
+#define OTG1_OTG_STEREO_CONTROL__OTG_DISABLE_FIELD_NUM_MASK 0x00080000L
+#define OTG1_OTG_STEREO_CONTROL__OTG_DISABLE_V_BLANK_FOR_DP_FIX_MASK 0x00100000L
+#define OTG1_OTG_STEREO_CONTROL__OTG_STEREO_EN_MASK 0x01000000L
+//OTG1_OTG_SNAPSHOT_STATUS
+#define OTG1_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_OCCURRED__SHIFT 0x0
+#define OTG1_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_CLEAR__SHIFT 0x1
+#define OTG1_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_MANUAL_TRIGGER__SHIFT 0x2
+#define OTG1_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_OCCURRED_MASK 0x00000001L
+#define OTG1_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_CLEAR_MASK 0x00000002L
+#define OTG1_OTG_SNAPSHOT_STATUS__OTG_SNAPSHOT_MANUAL_TRIGGER_MASK 0x00000004L
+//OTG1_OTG_SNAPSHOT_CONTROL
+#define OTG1_OTG_SNAPSHOT_CONTROL__OTG_AUTO_SNAPSHOT_TRIG_SEL__SHIFT 0x0
+#define OTG1_OTG_SNAPSHOT_CONTROL__OTG_AUTO_SNAPSHOT_TRIG_SEL_MASK 0x00000003L
+//OTG1_OTG_SNAPSHOT_POSITION
+#define OTG1_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_VERT_COUNT__SHIFT 0x0
+#define OTG1_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_HORZ_COUNT__SHIFT 0x10
+#define OTG1_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_VERT_COUNT_MASK 0x00007FFFL
+#define OTG1_OTG_SNAPSHOT_POSITION__OTG_SNAPSHOT_HORZ_COUNT_MASK 0x7FFF0000L
+//OTG1_OTG_SNAPSHOT_FRAME
+#define OTG1_OTG_SNAPSHOT_FRAME__OTG_SNAPSHOT_FRAME_COUNT__SHIFT 0x0
+#define OTG1_OTG_SNAPSHOT_FRAME__OTG_SNAPSHOT_FRAME_COUNT_MASK 0x00FFFFFFL
+//OTG1_OTG_INTERRUPT_CONTROL
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_MSK__SHIFT 0x0
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_TYPE__SHIFT 0x1
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_MSK__SHIFT 0x8
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_TYPE__SHIFT 0x9
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_MSK__SHIFT 0x10
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_TYPE__SHIFT 0x11
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_MSK__SHIFT 0x18
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_MSK__SHIFT 0x19
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_TYPE__SHIFT 0x1a
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_TYPE__SHIFT 0x1b
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_MSK__SHIFT 0x1c
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_TYPE__SHIFT 0x1d
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_MSK__SHIFT 0x1e
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_TYPE__SHIFT 0x1f
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_MSK_MASK 0x00000001L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_SNAPSHOT_INT_TYPE_MASK 0x00000002L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_MSK_MASK 0x00000100L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_FORCE_COUNT_NOW_INT_TYPE_MASK 0x00000200L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_MSK_MASK 0x00010000L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_FORCE_VSYNC_NEXT_LINE_INT_TYPE_MASK 0x00020000L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_MSK_MASK 0x01000000L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_MSK_MASK 0x02000000L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_TRIGA_INT_TYPE_MASK 0x04000000L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_TRIGB_INT_TYPE_MASK 0x08000000L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_MSK_MASK 0x10000000L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_VSYNC_NOM_INT_TYPE_MASK 0x20000000L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_MSK_MASK 0x40000000L
+#define OTG1_OTG_INTERRUPT_CONTROL__OTG_GSL_VSYNC_GAP_INT_TYPE_MASK 0x80000000L
+//OTG1_OTG_UPDATE_LOCK
+#define OTG1_OTG_UPDATE_LOCK__OTG_UPDATE_LOCK__SHIFT 0x0
+#define OTG1_OTG_UPDATE_LOCK__OTG_UPDATE_LOCK_MASK 0x00000001L
+//OTG1_OTG_DOUBLE_BUFFER_CONTROL
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_PENDING__SHIFT 0x0
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_H_TIMING_DIV_BY2_DB_UPDATE_PENDING__SHIFT 0x2
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_BLANK_DATA_EN_UPDATE_PENDING__SHIFT 0x3
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_RANGE_TIMING_DBUF_UPDATE_PENDING__SHIFT 0x4
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_TIMING_DB_UPDATE_PENDING__SHIFT 0x5
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_CTRL_DB_UPDATE_PENDING__SHIFT 0x6
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_STRUCTURE_EN_DB_UPDATE_PENDING__SHIFT 0x7
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_INSTANTLY__SHIFT 0x8
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_VSTARTUP_DB_UPDATE_PENDING__SHIFT 0x9
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_DSC_POSITION_DB_UPDATE_PENDING__SHIFT 0xa
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_BLANK_DATA_DOUBLE_BUFFER_EN__SHIFT 0x10
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_RANGE_TIMING_DBUF_UPDATE_MODE__SHIFT 0x18
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_PENDING_MASK 0x00000001L
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_H_TIMING_DIV_BY2_DB_UPDATE_PENDING_MASK 0x00000004L
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_BLANK_DATA_EN_UPDATE_PENDING_MASK 0x00000008L
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_RANGE_TIMING_DBUF_UPDATE_PENDING_MASK 0x00000010L
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_TIMING_DB_UPDATE_PENDING_MASK 0x00000020L
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_CTRL_DB_UPDATE_PENDING_MASK 0x00000040L
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_3D_STRUCTURE_EN_DB_UPDATE_PENDING_MASK 0x00000080L
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_UPDATE_INSTANTLY_MASK 0x00000100L
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_VSTARTUP_DB_UPDATE_PENDING_MASK 0x00000200L
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_DSC_POSITION_DB_UPDATE_PENDING_MASK 0x00000400L
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_BLANK_DATA_DOUBLE_BUFFER_EN_MASK 0x00010000L
+#define OTG1_OTG_DOUBLE_BUFFER_CONTROL__OTG_RANGE_TIMING_DBUF_UPDATE_MODE_MASK 0x03000000L
+//OTG1_OTG_MASTER_EN
+#define OTG1_OTG_MASTER_EN__OTG_MASTER_EN__SHIFT 0x0
+#define OTG1_OTG_MASTER_EN__OTG_MASTER_EN_MASK 0x00000001L
+//OTG1_OTG_BLANK_DATA_COLOR
+#define OTG1_OTG_BLANK_DATA_COLOR__OTG_BLANK_DATA_COLOR_BLUE_CB__SHIFT 0x0
+#define OTG1_OTG_BLANK_DATA_COLOR__OTG_BLANK_DATA_COLOR_GREEN_Y__SHIFT 0xa
+#define OTG1_OTG_BLANK_DATA_COLOR__OTG_BLANK_DATA_COLOR_RED_CR__SHIFT 0x14
+#define OTG1_OTG_BLANK_DATA_COLOR__OTG_BLANK_DATA_COLOR_BLUE_CB_MASK 0x000003FFL
+#define OTG1_OTG_BLANK_DATA_COLOR__OTG_BLANK_DATA_COLOR_GREEN_Y_MASK 0x000FFC00L
+#define OTG1_OTG_BLANK_DATA_COLOR__OTG_BLANK_DATA_COLOR_RED_CR_MASK 0x3FF00000L
+//OTG1_OTG_BLANK_DATA_COLOR_EXT
+#define OTG1_OTG_BLANK_DATA_COLOR_EXT__OTG_BLANK_DATA_COLOR_BLUE_CB_EXT__SHIFT 0x0
+#define OTG1_OTG_BLANK_DATA_COLOR_EXT__OTG_BLANK_DATA_COLOR_GREEN_Y_EXT__SHIFT 0x8
+#define OTG1_OTG_BLANK_DATA_COLOR_EXT__OTG_BLANK_DATA_COLOR_RED_CR_EXT__SHIFT 0x10
+#define OTG1_OTG_BLANK_DATA_COLOR_EXT__OTG_BLANK_DATA_COLOR_BLUE_CB_EXT_MASK 0x0000003FL
+#define OTG1_OTG_BLANK_DATA_COLOR_EXT__OTG_BLANK_DATA_COLOR_GREEN_Y_EXT_MASK 0x00003F00L
+#define OTG1_OTG_BLANK_DATA_COLOR_EXT__OTG_BLANK_DATA_COLOR_RED_CR_EXT_MASK 0x003F0000L
+//OTG1_OTG_BLACK_COLOR
+#define OTG1_OTG_BLACK_COLOR__OTG_BLACK_COLOR_B_CB__SHIFT 0x0
+#define OTG1_OTG_BLACK_COLOR__OTG_BLACK_COLOR_G_Y__SHIFT 0xa
+#define OTG1_OTG_BLACK_COLOR__OTG_BLACK_COLOR_R_CR__SHIFT 0x14
+#define OTG1_OTG_BLACK_COLOR__OTG_BLACK_COLOR_B_CB_MASK 0x000003FFL
+#define OTG1_OTG_BLACK_COLOR__OTG_BLACK_COLOR_G_Y_MASK 0x000FFC00L
+#define OTG1_OTG_BLACK_COLOR__OTG_BLACK_COLOR_R_CR_MASK 0x3FF00000L
+//OTG1_OTG_BLACK_COLOR_EXT
+#define OTG1_OTG_BLACK_COLOR_EXT__OTG_BLACK_COLOR_B_CB_EXT__SHIFT 0x0
+#define OTG1_OTG_BLACK_COLOR_EXT__OTG_BLACK_COLOR_G_Y_EXT__SHIFT 0x8
+#define OTG1_OTG_BLACK_COLOR_EXT__OTG_BLACK_COLOR_R_CR_EXT__SHIFT 0x10
+#define OTG1_OTG_BLACK_COLOR_EXT__OTG_BLACK_COLOR_B_CB_EXT_MASK 0x0000003FL
+#define OTG1_OTG_BLACK_COLOR_EXT__OTG_BLACK_COLOR_G_Y_EXT_MASK 0x00003F00L
+#define OTG1_OTG_BLACK_COLOR_EXT__OTG_BLACK_COLOR_R_CR_EXT_MASK 0x003F0000L
+//OTG1_OTG_VERTICAL_INTERRUPT0_POSITION
+#define OTG1_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_START__SHIFT 0x0
+#define OTG1_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_END__SHIFT 0x10
+#define OTG1_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_START_MASK 0x00007FFFL
+#define OTG1_OTG_VERTICAL_INTERRUPT0_POSITION__OTG_VERTICAL_INTERRUPT0_LINE_END_MASK 0x7FFF0000L
+//OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_OUTPUT_POLARITY__SHIFT 0x4
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_ENABLE__SHIFT 0x8
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_STATUS__SHIFT 0xc
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_STATUS__SHIFT 0x10
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_CLEAR__SHIFT 0x14
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_TYPE__SHIFT 0x18
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_OUTPUT_POLARITY_MASK 0x00000010L
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_ENABLE_MASK 0x00000100L
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_STATUS_MASK 0x00001000L
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_STATUS_MASK 0x00010000L
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_CLEAR_MASK 0x00100000L
+#define OTG1_OTG_VERTICAL_INTERRUPT0_CONTROL__OTG_VERTICAL_INTERRUPT0_INT_TYPE_MASK 0x01000000L
+//OTG1_OTG_VERTICAL_INTERRUPT1_POSITION
+#define OTG1_OTG_VERTICAL_INTERRUPT1_POSITION__OTG_VERTICAL_INTERRUPT1_LINE_START__SHIFT 0x0
+#define OTG1_OTG_VERTICAL_INTERRUPT1_POSITION__OTG_VERTICAL_INTERRUPT1_LINE_START_MASK 0x00007FFFL
+//OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL
+#define OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_ENABLE__SHIFT 0x8
+#define OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_STATUS__SHIFT 0xc
+#define OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_STATUS__SHIFT 0x10
+#define OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_CLEAR__SHIFT 0x14
+#define OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_TYPE__SHIFT 0x18
+#define OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_ENABLE_MASK 0x00000100L
+#define OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_STATUS_MASK 0x00001000L
+#define OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_STATUS_MASK 0x00010000L
+#define OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_CLEAR_MASK 0x00100000L
+#define OTG1_OTG_VERTICAL_INTERRUPT1_CONTROL__OTG_VERTICAL_INTERRUPT1_INT_TYPE_MASK 0x01000000L
+//OTG1_OTG_VERTICAL_INTERRUPT2_POSITION
+#define OTG1_OTG_VERTICAL_INTERRUPT2_POSITION__OTG_VERTICAL_INTERRUPT2_LINE_START__SHIFT 0x0
+#define OTG1_OTG_VERTICAL_INTERRUPT2_POSITION__OTG_VERTICAL_INTERRUPT2_LINE_START_MASK 0x00007FFFL
+//OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL
+#define OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_ENABLE__SHIFT 0x8
+#define OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_STATUS__SHIFT 0xc
+#define OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_STATUS__SHIFT 0x10
+#define OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_CLEAR__SHIFT 0x14
+#define OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_TYPE__SHIFT 0x18
+#define OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_ENABLE_MASK 0x00000100L
+#define OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_STATUS_MASK 0x00001000L
+#define OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_STATUS_MASK 0x00010000L
+#define OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_CLEAR_MASK 0x00100000L
+#define OTG1_OTG_VERTICAL_INTERRUPT2_CONTROL__OTG_VERTICAL_INTERRUPT2_INT_TYPE_MASK 0x01000000L
+//OTG1_OTG_CRC_CNTL
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_EN__SHIFT 0x0
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_DUAL_LINK_EN__SHIFT 0x1
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_DUAL_LINK_MODE__SHIFT 0x2
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_BLANK_ONLY__SHIFT 0x3
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_CONT_EN__SHIFT 0x4
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_CAPTURE_START_SEL__SHIFT 0x5
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_STEREO_MODE__SHIFT 0x8
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_INTERLACE_MODE__SHIFT 0xc
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_USE_NEW_AND_REPEATED_PIXELS__SHIFT 0x13
+#define OTG1_OTG_CRC_CNTL__OTG_CRC0_SELECT__SHIFT 0x14
+#define OTG1_OTG_CRC_CNTL__OTG_CRC1_SELECT__SHIFT 0x18
+#define OTG1_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC0_PENDING__SHIFT 0x1c
+#define OTG1_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC1_PENDING__SHIFT 0x1d
+#define OTG1_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC2_PENDING__SHIFT 0x1e
+#define OTG1_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC3_PENDING__SHIFT 0x1f
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_EN_MASK 0x00000001L
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_DUAL_LINK_EN_MASK 0x00000002L
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_DUAL_LINK_MODE_MASK 0x00000004L
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_BLANK_ONLY_MASK 0x00000008L
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_CONT_EN_MASK 0x00000010L
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_CAPTURE_START_SEL_MASK 0x00000060L
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_STEREO_MODE_MASK 0x00000300L
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_INTERLACE_MODE_MASK 0x00003000L
+#define OTG1_OTG_CRC_CNTL__OTG_CRC_USE_NEW_AND_REPEATED_PIXELS_MASK 0x00080000L
+#define OTG1_OTG_CRC_CNTL__OTG_CRC0_SELECT_MASK 0x00700000L
+#define OTG1_OTG_CRC_CNTL__OTG_CRC1_SELECT_MASK 0x07000000L
+#define OTG1_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC0_PENDING_MASK 0x10000000L
+#define OTG1_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC1_PENDING_MASK 0x20000000L
+#define OTG1_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC2_PENDING_MASK 0x40000000L
+#define OTG1_OTG_CRC_CNTL__OTG_ONE_SHOT_CRC3_PENDING_MASK 0x80000000L
+//OTG1_OTG_CRC_CNTL2
+#define OTG1_OTG_CRC_CNTL2__OTG_CRC_DSC_MODE__SHIFT 0x0
+#define OTG1_OTG_CRC_CNTL2__OTG_CRC_DATA_STREAM_COMBINE_MODE__SHIFT 0x1
+#define OTG1_OTG_CRC_CNTL2__OTG_CRC_DATA_STREAM_SPLIT_MODE__SHIFT 0x4
+#define OTG1_OTG_CRC_CNTL2__OTG_CRC_DATA_FORMAT__SHIFT 0x8
+#define OTG1_OTG_CRC_CNTL2__OTG_CRC_DSC_MODE_MASK 0x00000001L
+#define OTG1_OTG_CRC_CNTL2__OTG_CRC_DATA_STREAM_COMBINE_MODE_MASK 0x00000002L
+#define OTG1_OTG_CRC_CNTL2__OTG_CRC_DATA_STREAM_SPLIT_MODE_MASK 0x00000030L
+#define OTG1_OTG_CRC_CNTL2__OTG_CRC_DATA_FORMAT_MASK 0x00000300L
+//OTG1_OTG_CRC0_WINDOWA_X_CONTROL
+#define OTG1_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_START__SHIFT 0x0
+#define OTG1_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_END__SHIFT 0x10
+#define OTG1_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_START_MASK 0x00007FFFL
+#define OTG1_OTG_CRC0_WINDOWA_X_CONTROL__OTG_CRC0_WINDOWA_X_END_MASK 0x7FFF0000L
+//OTG1_OTG_CRC0_WINDOWA_Y_CONTROL
+#define OTG1_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_START__SHIFT 0x0
+#define OTG1_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_END__SHIFT 0x10
+#define OTG1_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_START_MASK 0x00007FFFL
+#define OTG1_OTG_CRC0_WINDOWA_Y_CONTROL__OTG_CRC0_WINDOWA_Y_END_MASK 0x7FFF0000L
+//OTG1_OTG_CRC0_WINDOWB_X_CONTROL
+#define OTG1_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_START__SHIFT 0x0
+#define OTG1_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_END__SHIFT 0x10
+#define OTG1_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_START_MASK 0x00007FFFL
+#define OTG1_OTG_CRC0_WINDOWB_X_CONTROL__OTG_CRC0_WINDOWB_X_END_MASK 0x7FFF0000L
+//OTG1_OTG_CRC0_WINDOWB_Y_CONTROL
+#define OTG1_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_START__SHIFT 0x0
+#define OTG1_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_END__SHIFT 0x10
+#define OTG1_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_START_MASK 0x00007FFFL
+#define OTG1_OTG_CRC0_WINDOWB_Y_CONTROL__OTG_CRC0_WINDOWB_Y_END_MASK 0x7FFF0000L
+//OTG1_OTG_CRC0_DATA_RG
+#define OTG1_OTG_CRC0_DATA_RG__CRC0_R_CR__SHIFT 0x0
+#define OTG1_OTG_CRC0_DATA_RG__CRC0_G_Y__SHIFT 0x10
+#define OTG1_OTG_CRC0_DATA_RG__CRC0_R_CR_MASK 0x0000FFFFL
+#define OTG1_OTG_CRC0_DATA_RG__CRC0_G_Y_MASK 0xFFFF0000L
+//OTG1_OTG_CRC0_DATA_B
+#define OTG1_OTG_CRC0_DATA_B__CRC0_B_CB__SHIFT 0x0
+#define OTG1_OTG_CRC0_DATA_B__CRC0_C__SHIFT 0x10
+#define OTG1_OTG_CRC0_DATA_B__CRC0_B_CB_MASK 0x0000FFFFL
+#define OTG1_OTG_CRC0_DATA_B__CRC0_C_MASK 0xFFFF0000L
+//OTG1_OTG_CRC1_WINDOWA_X_CONTROL
+#define OTG1_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_START__SHIFT 0x0
+#define OTG1_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_END__SHIFT 0x10
+#define OTG1_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_START_MASK 0x00007FFFL
+#define OTG1_OTG_CRC1_WINDOWA_X_CONTROL__OTG_CRC1_WINDOWA_X_END_MASK 0x7FFF0000L
+//OTG1_OTG_CRC1_WINDOWA_Y_CONTROL
+#define OTG1_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_START__SHIFT 0x0
+#define OTG1_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_END__SHIFT 0x10
+#define OTG1_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_START_MASK 0x00007FFFL
+#define OTG1_OTG_CRC1_WINDOWA_Y_CONTROL__OTG_CRC1_WINDOWA_Y_END_MASK 0x7FFF0000L
+//OTG1_OTG_CRC1_WINDOWB_X_CONTROL
+#define OTG1_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_START__SHIFT 0x0
+#define OTG1_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_END__SHIFT 0x10
+#define OTG1_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_START_MASK 0x00007FFFL
+#define OTG1_OTG_CRC1_WINDOWB_X_CONTROL__OTG_CRC1_WINDOWB_X_END_MASK 0x7FFF0000L
+//OTG1_OTG_CRC1_WINDOWB_Y_CONTROL
+#define OTG1_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_START__SHIFT 0x0
+#define OTG1_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_END__SHIFT 0x10
+#define OTG1_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_START_MASK 0x00007FFFL
+#define OTG1_OTG_CRC1_WINDOWB_Y_CONTROL__OTG_CRC1_WINDOWB_Y_END_MASK 0x7FFF0000L
+//OTG1_OTG_CRC1_DATA_RG
+#define OTG1_OTG_CRC1_DATA_RG__CRC1_R_CR__SHIFT 0x0
+#define OTG1_OTG_CRC1_DATA_RG__CRC1_G_Y__SHIFT 0x10
+#define OTG1_OTG_CRC1_DATA_RG__CRC1_R_CR_MASK 0x0000FFFFL
+#define OTG1_OTG_CRC1_DATA_RG__CRC1_G_Y_MASK 0xFFFF0000L
+//OTG1_OTG_CRC1_DATA_B
+#define OTG1_OTG_CRC1_DATA_B__CRC1_B_CB__SHIFT 0x0
+#define OTG1_OTG_CRC1_DATA_B__CRC1_C__SHIFT 0x10
+#define OTG1_OTG_CRC1_DATA_B__CRC1_B_CB_MASK 0x0000FFFFL
+#define OTG1_OTG_CRC1_DATA_B__CRC1_C_MASK 0xFFFF0000L
+//OTG1_OTG_CRC_SIG_RED_GREEN_MASK
+#define OTG1_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_RED_MASK__SHIFT 0x0
+#define OTG1_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_GREEN_MASK__SHIFT 0x10
+#define OTG1_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_RED_MASK_MASK 0x0000FFFFL
+#define OTG1_OTG_CRC_SIG_RED_GREEN_MASK__OTG_CRC_SIG_GREEN_MASK_MASK 0xFFFF0000L
+//OTG1_OTG_CRC_SIG_BLUE_CONTROL_MASK
+#define OTG1_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_BLUE_MASK__SHIFT 0x0
+#define OTG1_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_CONTROL_MASK__SHIFT 0x10
+#define OTG1_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_BLUE_MASK_MASK 0x0000FFFFL
+#define OTG1_OTG_CRC_SIG_BLUE_CONTROL_MASK__OTG_CRC_SIG_CONTROL_MASK_MASK 0xFFFF0000L
+//OTG1_OTG_STATIC_SCREEN_CONTROL
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_EVENT_MASK__SHIFT 0x0
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_FRAME_COUNT__SHIFT 0x10
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_ENABLE__SHIFT 0x18
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_SS_STATUS__SHIFT 0x19
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_STATUS__SHIFT 0x1a
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_CLEAR__SHIFT 0x1b
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_TYPE__SHIFT 0x1c
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE__SHIFT 0x1e
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE_VALUE__SHIFT 0x1f
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_EVENT_MASK_MASK 0x0000FFFFL
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_FRAME_COUNT_MASK 0x00FF0000L
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_ENABLE_MASK 0x01000000L
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_SS_STATUS_MASK 0x02000000L
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_STATUS_MASK 0x04000000L
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_CLEAR_MASK 0x08000000L
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_CPU_SS_INT_TYPE_MASK 0x10000000L
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE_MASK 0x40000000L
+#define OTG1_OTG_STATIC_SCREEN_CONTROL__OTG_STATIC_SCREEN_OVERRIDE_VALUE_MASK 0x80000000L
+//OTG1_OTG_3D_STRUCTURE_CONTROL
+#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_EN__SHIFT 0x0
+#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_V_UPDATE_MODE__SHIFT 0x8
+#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_STEREO_SEL_OVR__SHIFT 0xc
+#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET__SHIFT 0x10
+#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET_PENDING__SHIFT 0x11
+#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT__SHIFT 0x12
+#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_EN_MASK 0x00000001L
+#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_V_UPDATE_MODE_MASK 0x00000300L
+#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_STEREO_SEL_OVR_MASK 0x00001000L
+#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET_MASK 0x00010000L
+#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_RESET_PENDING_MASK 0x00020000L
+#define OTG1_OTG_3D_STRUCTURE_CONTROL__OTG_3D_STRUCTURE_F_COUNT_MASK 0x000C0000L
+//OTG1_OTG_GSL_VSYNC_GAP
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_LIMIT__SHIFT 0x0
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_DELAY__SHIFT 0x8
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_SOURCE_SEL__SHIFT 0x10
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MODE__SHIFT 0x11
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_CLEAR__SHIFT 0x13
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_OCCURRED__SHIFT 0x14
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MASTER_FASTER__SHIFT 0x17
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP__SHIFT 0x18
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_LIMIT_MASK 0x000000FFL
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_DELAY_MASK 0x0000FF00L
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_SOURCE_SEL_MASK 0x00010000L
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MODE_MASK 0x00060000L
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_CLEAR_MASK 0x00080000L
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_OCCURRED_MASK 0x00100000L
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MASTER_FASTER_MASK 0x00800000L
+#define OTG1_OTG_GSL_VSYNC_GAP__OTG_GSL_VSYNC_GAP_MASK 0xFF000000L
+//OTG1_OTG_MASTER_UPDATE_MODE
+#define OTG1_OTG_MASTER_UPDATE_MODE__MASTER_UPDATE_INTERLACED_MODE__SHIFT 0x0
+#define OTG1_OTG_MASTER_UPDATE_MODE__MASTER_UPDATE_INTERLACED_MODE_MASK 0x00000003L
+//OTG1_OTG_CLOCK_CONTROL
+#define OTG1_OTG_CLOCK_CONTROL__OTG_CLOCK_EN__SHIFT 0x0
+#define OTG1_OTG_CLOCK_CONTROL__OTG_CLOCK_GATE_DIS__SHIFT 0x1
+#define OTG1_OTG_CLOCK_CONTROL__OTG_SOFT_RESET__SHIFT 0x4
+#define OTG1_OTG_CLOCK_CONTROL__OTG_CLOCK_ON__SHIFT 0x8
+#define OTG1_OTG_CLOCK_CONTROL__OTG_BUSY__SHIFT 0x10
+#define OTG1_OTG_CLOCK_CONTROL__OTG_CLOCK_EN_MASK 0x00000001L
+#define OTG1_OTG_CLOCK_CONTROL__OTG_CLOCK_GATE_DIS_MASK 0x00000002L
+#define OTG1_OTG_CLOCK_CONTROL__OTG_SOFT_RESET_MASK 0x00000010L
+#define OTG1_OTG_CLOCK_CONTROL__OTG_CLOCK_ON_MASK 0x00000100L
+#define OTG1_OTG_CLOCK_CONTROL__OTG_BUSY_MASK 0x00010000L
+//OTG1_OTG_VSTARTUP_PARAM
+#define OTG1_OTG_VSTARTUP_PARAM__VSTARTUP_START__SHIFT 0x0
+#define OTG1_OTG_VSTARTUP_PARAM__VSTARTUP_START_MASK 0x000003FFL
+//OTG1_OTG_VUPDATE_PARAM
+#define OTG1_OTG_VUPDATE_PARAM__VUPDATE_OFFSET__SHIFT 0x0
+#define OTG1_OTG_VUPDATE_PARAM__VUPDATE_WIDTH__SHIFT 0x10
+#define OTG1_OTG_VUPDATE_PARAM__VUPDATE_OFFSET_MASK 0x0000FFFFL
+#define OTG1_OTG_VUPDATE_PARAM__VUPDATE_WIDTH_MASK 0x03FF0000L
+//OTG1_OTG_VREADY_PARAM
+#define OTG1_OTG_VREADY_PARAM__VREADY_OFFSET__SHIFT 0x0
+#define OTG1_OTG_VREADY_PARAM__VREADY_OFFSET_MASK 0x0000FFFFL
+//OTG1_OTG_GLOBAL_SYNC_STATUS
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_EN__SHIFT 0x0
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_TYPE__SHIFT 0x1
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_OCCURRED__SHIFT 0x2
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_STATUS__SHIFT 0x3
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_CLEAR__SHIFT 0x4
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_EN__SHIFT 0x5
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_TYPE__SHIFT 0x6
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_POSITION_SEL__SHIFT 0x7
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_OCCURRED__SHIFT 0x8
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_STATUS__SHIFT 0x9
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_CLEAR__SHIFT 0xa
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_STATUS__SHIFT 0xb
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_EN__SHIFT 0xc
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_TYPE__SHIFT 0xd
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_OCCURRED__SHIFT 0xe
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_STATUS__SHIFT 0xf
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_CLEAR__SHIFT 0x10
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_STATUS__SHIFT 0x11
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_EN__SHIFT 0x12
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_TYPE__SHIFT 0x13
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_OCCURRED__SHIFT 0x14
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_STATUS__SHIFT 0x15
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_CLEAR__SHIFT 0x16
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__STEREO_SELECT_STATUS__SHIFT 0x18
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__FIELD_NUMBER_STATUS__SHIFT 0x19
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_EN_MASK 0x00000001L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_TYPE_MASK 0x00000002L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_OCCURRED_MASK 0x00000004L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_INT_STATUS_MASK 0x00000008L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VSTARTUP_EVENT_CLEAR_MASK 0x00000010L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_EN_MASK 0x00000020L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_TYPE_MASK 0x00000040L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_POSITION_SEL_MASK 0x00000080L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_OCCURRED_MASK 0x00000100L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_INT_STATUS_MASK 0x00000200L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_EVENT_CLEAR_MASK 0x00000400L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_STATUS_MASK 0x00000800L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_EN_MASK 0x00001000L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_TYPE_MASK 0x00002000L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_OCCURRED_MASK 0x00004000L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_INT_STATUS_MASK 0x00008000L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_EVENT_CLEAR_MASK 0x00010000L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VUPDATE_NO_LOCK_STATUS_MASK 0x00020000L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_EN_MASK 0x00040000L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_TYPE_MASK 0x00080000L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_OCCURRED_MASK 0x00100000L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VREADY_INT_STATUS_MASK 0x00200000L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__VREADY_EVENT_CLEAR_MASK 0x00400000L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__STEREO_SELECT_STATUS_MASK 0x01000000L
+#define OTG1_OTG_GLOBAL_SYNC_STATUS__FIELD_NUMBER_STATUS_MASK 0x02000000L
+//OTG1_OTG_MASTER_UPDATE_LOCK
+#define OTG1_OTG_MASTER_UPDATE_LOCK__OTG_MASTER_UPDATE_LOCK__SHIFT 0x0
+#define OTG1_OTG_MASTER_UPDATE_LOCK__UPDATE_LOCK_STATUS__SHIFT 0x8
+#define OTG1_OTG_MASTER_UPDATE_LOCK__OTG_MASTER_UPDATE_LOCK_MASK 0x00000001L
+#define OTG1_OTG_MASTER_UPDATE_LOCK__UPDATE_LOCK_STATUS_MASK 0x00000100L
+//OTG1_OTG_GSL_CONTROL
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL0_EN__SHIFT 0x0
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL1_EN__SHIFT 0x1
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL2_EN__SHIFT 0x2
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL_MASTER_EN__SHIFT 0x3
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL_MASTER_MODE__SHIFT 0x4
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL_CHECK_DELAY__SHIFT 0x8
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL_FORCE_DELAY__SHIFT 0x10
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL_CHECK_ALL_FIELDS__SHIFT 0x1c
+#define OTG1_OTG_GSL_CONTROL__OTG_MASTER_UPDATE_LOCK_GSL_EN__SHIFT 0x1f
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL0_EN_MASK 0x00000001L
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL1_EN_MASK 0x00000002L
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL2_EN_MASK 0x00000004L
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL_MASTER_EN_MASK 0x00000008L
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL_MASTER_MODE_MASK 0x00000030L
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL_CHECK_DELAY_MASK 0x00000F00L
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL_FORCE_DELAY_MASK 0x001F0000L
+#define OTG1_OTG_GSL_CONTROL__OTG_GSL_CHECK_ALL_FIELDS_MASK 0x10000000L
+#define OTG1_OTG_GSL_CONTROL__OTG_MASTER_UPDATE_LOCK_GSL_EN_MASK 0x80000000L
+//OTG1_OTG_GSL_WINDOW_X
+#define OTG1_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_START_X__SHIFT 0x0
+#define OTG1_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_END_X__SHIFT 0x10
+#define OTG1_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_START_X_MASK 0x00007FFFL
+#define OTG1_OTG_GSL_WINDOW_X__OTG_GSL_WINDOW_END_X_MASK 0x7FFF0000L
+//OTG1_OTG_GSL_WINDOW_Y
+#define OTG1_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_START_Y__SHIFT 0x0
+#define OTG1_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_END_Y__SHIFT 0x10
+#define OTG1_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_START_Y_MASK 0x00007FFFL
+#define OTG1_OTG_GSL_WINDOW_Y__OTG_GSL_WINDOW_END_Y_MASK 0x7FFF0000L
+//OTG1_OTG_VUPDATE_KEEPOUT
+#define OTG1_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET__SHIFT 0x0
+#define OTG1_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET__SHIFT 0x10
+#define OTG1_OTG_VUPDATE_KEEPOUT__OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN__SHIFT 0x1f
+#define OTG1_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET_MASK 0x0000FFFFL
+#define OTG1_OTG_VUPDATE_KEEPOUT__MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET_MASK 0x03FF0000L
+#define OTG1_OTG_VUPDATE_KEEPOUT__OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN_MASK 0x80000000L
+//OTG1_OTG_GLOBAL_CONTROL0
+#define OTG1_OTG_GLOBAL_CONTROL0__OTG_MASTER_UPDATE_LOCK_HTOTAL_KEEPOUT__SHIFT 0x0
+#define OTG1_OTG_GLOBAL_CONTROL0__OTG_MASTER_UPDATE_LOCK_HTOTAL_KEEPOUT_EN__SHIFT 0x8
+#define OTG1_OTG_GLOBAL_CONTROL0__OTG_MASTER_UPDATE_LOCK_SEL__SHIFT 0x19
+#define OTG1_OTG_GLOBAL_CONTROL0__OTG_MASTER_UPDATE_LOCK_HTOTAL_KEEPOUT_MASK 0x000000FFL
+#define OTG1_OTG_GLOBAL_CONTROL0__OTG_MASTER_UPDATE_LOCK_HTOTAL_KEEPOUT_EN_MASK 0x00000100L
+#define OTG1_OTG_GLOBAL_CONTROL0__OTG_MASTER_UPDATE_LOCK_SEL_MASK 0x0E000000L
+//OTG1_OTG_GLOBAL_CONTROL1
+#define OTG1_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_X__SHIFT 0x0
+#define OTG1_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_Y__SHIFT 0x10
+#define OTG1_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_EN__SHIFT 0x1f
+#define OTG1_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_X_MASK 0x00007FFFL
+#define OTG1_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_Y_MASK 0x7FFF0000L
+#define OTG1_OTG_GLOBAL_CONTROL1__MASTER_UPDATE_LOCK_DB_EN_MASK 0x80000000L
+//OTG1_OTG_GLOBAL_CONTROL2
+#define OTG1_OTG_GLOBAL_CONTROL2__DIG_UPDATE_LOCATION__SHIFT 0x0
+#define OTG1_OTG_GLOBAL_CONTROL2__GLOBAL_UPDATE_LOCK_EN__SHIFT 0xa
+#define OTG1_OTG_GLOBAL_CONTROL2__MANUAL_FLOW_CONTROL_SEL__SHIFT 0x10
+#define OTG1_OTG_GLOBAL_CONTROL2__DCCG_VUPDATE_MODE__SHIFT 0x1f
+#define OTG1_OTG_GLOBAL_CONTROL2__DIG_UPDATE_LOCATION_MASK 0x000003FFL
+#define OTG1_OTG_GLOBAL_CONTROL2__GLOBAL_UPDATE_LOCK_EN_MASK 0x00000400L
+#define OTG1_OTG_GLOBAL_CONTROL2__MANUAL_FLOW_CONTROL_SEL_MASK 0x00070000L
+#define OTG1_OTG_GLOBAL_CONTROL2__DCCG_VUPDATE_MODE_MASK 0x80000000L
+//OTG1_OTG_GLOBAL_CONTROL3
+#define OTG1_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_FIELD__SHIFT 0x0
+#define OTG1_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_STEREO_SEL__SHIFT 0x4
+#define OTG1_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_FIELD_STEREO_FLAG_SEL__SHIFT 0x8
+#define OTG1_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_FIELD_MASK 0x00000003L
+#define OTG1_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_STEREO_SEL_MASK 0x00000030L
+#define OTG1_OTG_GLOBAL_CONTROL3__MASTER_UPDATE_LOCK_DB_FIELD_STEREO_FLAG_SEL_MASK 0x00000100L
+//OTG1_OTG_TRIG_MANUAL_CONTROL
+#define OTG1_OTG_TRIG_MANUAL_CONTROL__TRIG_MANUAL_CONTROL__SHIFT 0x0
+#define OTG1_OTG_TRIG_MANUAL_CONTROL__TRIG_MANUAL_CONTROL_MASK 0x00000001L
+//OTG1_OTG_RANGE_TIMING_INT_STATUS
+#define OTG1_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED__SHIFT 0x0
+#define OTG1_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_INT__SHIFT 0x4
+#define OTG1_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_CLEAR__SHIFT 0x8
+#define OTG1_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_INT_MSK__SHIFT 0xc
+#define OTG1_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_INT_TYPE__SHIFT 0x10
+#define OTG1_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_MASK 0x00000001L
+#define OTG1_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_INT_MASK 0x00000010L
+#define OTG1_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_CLEAR_MASK 0x00000100L
+#define OTG1_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_INT_MSK_MASK 0x00001000L
+#define OTG1_OTG_RANGE_TIMING_INT_STATUS__OTG_RANGE_TIMING_UPDATE_OCCURRED_INT_TYPE_MASK 0x00010000L
+//OTG1_OTG_DRR_CONTROL
+#define OTG1_OTG_DRR_CONTROL__OTG_DRR_AVERAGE_FRAME__SHIFT 0x0
+#define OTG1_OTG_DRR_CONTROL__OTG_V_TOTAL_LAST_USED_BY_DRR__SHIFT 0x10
+#define OTG1_OTG_DRR_CONTROL__OTG_DRR_AVERAGE_FRAME_MASK 0x00000007L
+#define OTG1_OTG_DRR_CONTROL__OTG_V_TOTAL_LAST_USED_BY_DRR_MASK 0x7FFF0000L
+//OTG1_OTG_REQUEST_CONTROL
+#define OTG1_OTG_REQUEST_CONTROL__OTG_REQUEST_MODE_FOR_H_DUPLICATE__SHIFT 0x0
+#define OTG1_OTG_REQUEST_CONTROL__OTG_REQUEST_MODE_FOR_H_DUPLICATE_MASK 0x00000001L
+//OTG1_OTG_DSC_START_POSITION
+#define OTG1_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_X__SHIFT 0x0
+#define OTG1_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_LINE_NUM__SHIFT 0x10
+#define OTG1_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_X_MASK 0x00007FFFL
+#define OTG1_OTG_DSC_START_POSITION__OTG_DSC_START_POSITION_LINE_NUM_MASK 0x03FF0000L
+
+
+// addressBlock: dce_dc_optc_optc_misc_dispdec
+//DWB_SOURCE_SELECT
+#define DWB_SOURCE_SELECT__OPTC_DWB0_SOURCE_SELECT__SHIFT 0x0
+#define DWB_SOURCE_SELECT__OPTC_DWB1_SOURCE_SELECT__SHIFT 0x3
+#define DWB_SOURCE_SELECT__OPTC_DWB2_SOURCE_SELECT__SHIFT 0x6
+#define DWB_SOURCE_SELECT__OPTC_DWB0_SOURCE_SELECT_MASK 0x00000007L
+#define DWB_SOURCE_SELECT__OPTC_DWB1_SOURCE_SELECT_MASK 0x00000038L
+#define DWB_SOURCE_SELECT__OPTC_DWB2_SOURCE_SELECT_MASK 0x000001C0L
+//GSL_SOURCE_SELECT
+#define GSL_SOURCE_SELECT__GSL0_READY_SOURCE_SEL__SHIFT 0x0
+#define GSL_SOURCE_SELECT__GSL1_READY_SOURCE_SEL__SHIFT 0x4
+#define GSL_SOURCE_SELECT__GSL2_READY_SOURCE_SEL__SHIFT 0x8
+#define GSL_SOURCE_SELECT__GSL_TIMING_SYNC_SEL__SHIFT 0x10
+#define GSL_SOURCE_SELECT__GSL0_READY_SOURCE_SEL_MASK 0x00000007L
+#define GSL_SOURCE_SELECT__GSL1_READY_SOURCE_SEL_MASK 0x00000070L
+#define GSL_SOURCE_SELECT__GSL2_READY_SOURCE_SEL_MASK 0x00000700L
+#define GSL_SOURCE_SELECT__GSL_TIMING_SYNC_SEL_MASK 0x00070000L
+//OPTC_CLOCK_CONTROL
+#define OPTC_CLOCK_CONTROL__OPTC_DISPCLK_R_GATE_DIS__SHIFT 0x0
+#define OPTC_CLOCK_CONTROL__OPTC_DISPCLK_R_CLOCK_ON__SHIFT 0x1
+#define OPTC_CLOCK_CONTROL__OPTC_DISPCLK_R_GATE_DIS_MASK 0x00000001L
+#define OPTC_CLOCK_CONTROL__OPTC_DISPCLK_R_CLOCK_ON_MASK 0x00000002L
+
+// addressBlock: dce_dc_dio_dout_i2c_dispdec
+//DC_I2C_CONTROL
+#define DC_I2C_CONTROL__DC_I2C_GO__SHIFT 0x0
+#define DC_I2C_CONTROL__DC_I2C_SOFT_RESET__SHIFT 0x1
+#define DC_I2C_CONTROL__DC_I2C_SEND_RESET__SHIFT 0x2
+#define DC_I2C_CONTROL__DC_I2C_SW_STATUS_RESET__SHIFT 0x3
+#define DC_I2C_CONTROL__DC_I2C_DDC_SELECT__SHIFT 0x8
+#define DC_I2C_CONTROL__DC_I2C_TRANSACTION_COUNT__SHIFT 0x14
+#define DC_I2C_CONTROL__DC_I2C_GO_MASK 0x00000001L
+#define DC_I2C_CONTROL__DC_I2C_SOFT_RESET_MASK 0x00000002L
+#define DC_I2C_CONTROL__DC_I2C_SEND_RESET_MASK 0x00000004L
+#define DC_I2C_CONTROL__DC_I2C_SW_STATUS_RESET_MASK 0x00000008L
+#define DC_I2C_CONTROL__DC_I2C_DDC_SELECT_MASK 0x00000700L
+#define DC_I2C_CONTROL__DC_I2C_TRANSACTION_COUNT_MASK 0x00300000L
+//DC_I2C_ARBITRATION
+#define DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY__SHIFT 0x0
+#define DC_I2C_ARBITRATION__DC_I2C_REG_RW_CNTL_STATUS__SHIFT 0x2
+#define DC_I2C_ARBITRATION__DC_I2C_NO_QUEUED_SW_GO__SHIFT 0x4
+#define DC_I2C_ARBITRATION__DC_I2C_ABORT_HW_XFER__SHIFT 0x8
+#define DC_I2C_ARBITRATION__DC_I2C_ABORT_SW_XFER__SHIFT 0xc
+#define DC_I2C_ARBITRATION__DC_I2C_SW_USE_I2C_REG_REQ__SHIFT 0x14
+#define DC_I2C_ARBITRATION__DC_I2C_SW_DONE_USING_I2C_REG__SHIFT 0x15
+#define DC_I2C_ARBITRATION__DC_I2C_DMCU_USE_I2C_REG_REQ__SHIFT 0x18
+#define DC_I2C_ARBITRATION__DC_I2C_DMCU_DONE_USING_I2C_REG__SHIFT 0x19
+#define DC_I2C_ARBITRATION__DC_I2C_SW_PRIORITY_MASK 0x00000003L
+#define DC_I2C_ARBITRATION__DC_I2C_REG_RW_CNTL_STATUS_MASK 0x0000000CL
+#define DC_I2C_ARBITRATION__DC_I2C_NO_QUEUED_SW_GO_MASK 0x00000010L
+#define DC_I2C_ARBITRATION__DC_I2C_ABORT_HW_XFER_MASK 0x00000100L
+#define DC_I2C_ARBITRATION__DC_I2C_ABORT_SW_XFER_MASK 0x00001000L
+#define DC_I2C_ARBITRATION__DC_I2C_SW_USE_I2C_REG_REQ_MASK 0x00100000L
+#define DC_I2C_ARBITRATION__DC_I2C_SW_DONE_USING_I2C_REG_MASK 0x00200000L
+#define DC_I2C_ARBITRATION__DC_I2C_DMCU_USE_I2C_REG_REQ_MASK 0x01000000L
+#define DC_I2C_ARBITRATION__DC_I2C_DMCU_DONE_USING_I2C_REG_MASK 0x02000000L
+//DC_I2C_INTERRUPT_CONTROL
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_SW_DONE_INT__SHIFT 0x0
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_SW_DONE_ACK__SHIFT 0x1
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_SW_DONE_MASK__SHIFT 0x2
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC1_HW_DONE_INT__SHIFT 0x4
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC1_HW_DONE_ACK__SHIFT 0x5
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC1_HW_DONE_MASK__SHIFT 0x6
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC2_HW_DONE_INT__SHIFT 0x8
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC2_HW_DONE_ACK__SHIFT 0x9
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC2_HW_DONE_MASK__SHIFT 0xa
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_SW_DONE_INT_MASK 0x00000001L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_SW_DONE_ACK_MASK 0x00000002L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_SW_DONE_MASK_MASK 0x00000004L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC1_HW_DONE_INT_MASK 0x00000010L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC1_HW_DONE_ACK_MASK 0x00000020L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC1_HW_DONE_MASK_MASK 0x00000040L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC2_HW_DONE_INT_MASK 0x00000100L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC2_HW_DONE_ACK_MASK 0x00000200L
+#define DC_I2C_INTERRUPT_CONTROL__DC_I2C_DDC2_HW_DONE_MASK_MASK 0x00000400L
+//DC_I2C_SW_STATUS
+#define DC_I2C_SW_STATUS__DC_I2C_SW_STATUS__SHIFT 0x0
+#define DC_I2C_SW_STATUS__DC_I2C_SW_DONE__SHIFT 0x2
+#define DC_I2C_SW_STATUS__DC_I2C_SW_ABORTED__SHIFT 0x4
+#define DC_I2C_SW_STATUS__DC_I2C_SW_TIMEOUT__SHIFT 0x5
+#define DC_I2C_SW_STATUS__DC_I2C_SW_INTERRUPTED__SHIFT 0x6
+#define DC_I2C_SW_STATUS__DC_I2C_SW_BUFFER_OVERFLOW__SHIFT 0x7
+#define DC_I2C_SW_STATUS__DC_I2C_SW_STOPPED_ON_NACK__SHIFT 0x8
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK0__SHIFT 0xc
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK1__SHIFT 0xd
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK2__SHIFT 0xe
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK3__SHIFT 0xf
+#define DC_I2C_SW_STATUS__DC_I2C_SW_REQ__SHIFT 0x12
+#define DC_I2C_SW_STATUS__DC_I2C_SW_STATUS_MASK 0x00000003L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_DONE_MASK 0x00000004L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_ABORTED_MASK 0x00000010L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_TIMEOUT_MASK 0x00000020L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_INTERRUPTED_MASK 0x00000040L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_BUFFER_OVERFLOW_MASK 0x00000080L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_STOPPED_ON_NACK_MASK 0x00000100L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK0_MASK 0x00001000L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK1_MASK 0x00002000L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK2_MASK 0x00004000L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_NACK3_MASK 0x00008000L
+#define DC_I2C_SW_STATUS__DC_I2C_SW_REQ_MASK 0x00040000L
+//DC_I2C_DDC1_HW_STATUS
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_STATUS__SHIFT 0x0
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_DONE__SHIFT 0x3
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_REQ__SHIFT 0x10
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_URG__SHIFT 0x11
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_EDID_DETECT_STATUS__SHIFT 0x14
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_EDID_DETECT_NUM_VALID_TRIES__SHIFT 0x18
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_EDID_DETECT_STATE__SHIFT 0x1c
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_STATUS_MASK 0x00000003L
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_DONE_MASK 0x00000008L
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_REQ_MASK 0x00010000L
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_HW_URG_MASK 0x00020000L
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_EDID_DETECT_STATUS_MASK 0x00100000L
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_EDID_DETECT_NUM_VALID_TRIES_MASK 0x0F000000L
+#define DC_I2C_DDC1_HW_STATUS__DC_I2C_DDC1_EDID_DETECT_STATE_MASK 0x70000000L
+//DC_I2C_DDC2_HW_STATUS
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_STATUS__SHIFT 0x0
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_DONE__SHIFT 0x3
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_REQ__SHIFT 0x10
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_URG__SHIFT 0x11
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_EDID_DETECT_STATUS__SHIFT 0x14
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_EDID_DETECT_NUM_VALID_TRIES__SHIFT 0x18
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_EDID_DETECT_STATE__SHIFT 0x1c
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_STATUS_MASK 0x00000003L
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_DONE_MASK 0x00000008L
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_REQ_MASK 0x00010000L
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_HW_URG_MASK 0x00020000L
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_EDID_DETECT_STATUS_MASK 0x00100000L
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_EDID_DETECT_NUM_VALID_TRIES_MASK 0x0F000000L
+#define DC_I2C_DDC2_HW_STATUS__DC_I2C_DDC2_EDID_DETECT_STATE_MASK 0x70000000L
+//DC_I2C_DDC1_SPEED
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_THRESHOLD__SHIFT 0x0
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_DISABLE_FILTER_DURING_STALL__SHIFT 0x4
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_START_STOP_TIMING_CNTL__SHIFT 0x8
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_PRESCALE__SHIFT 0x10
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_THRESHOLD_MASK 0x00000003L
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_DISABLE_FILTER_DURING_STALL_MASK 0x00000010L
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_START_STOP_TIMING_CNTL_MASK 0x00000300L
+#define DC_I2C_DDC1_SPEED__DC_I2C_DDC1_PRESCALE_MASK 0xFFFF0000L
+//DC_I2C_DDC1_SETUP
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_DATA_DRIVE_EN__SHIFT 0x0
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_DATA_DRIVE_SEL__SHIFT 0x1
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_SEND_RESET_LENGTH__SHIFT 0x2
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_EDID_DETECT_ENABLE__SHIFT 0x4
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_EDID_DETECT_MODE__SHIFT 0x5
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_ENABLE__SHIFT 0x6
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_CLK_DRIVE_EN__SHIFT 0x7
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_INTRA_BYTE_DELAY__SHIFT 0x8
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_INTRA_TRANSACTION_DELAY__SHIFT 0x10
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_TIME_LIMIT__SHIFT 0x18
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_DATA_DRIVE_EN_MASK 0x00000001L
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_DATA_DRIVE_SEL_MASK 0x00000002L
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_SEND_RESET_LENGTH_MASK 0x00000004L
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_EDID_DETECT_ENABLE_MASK 0x00000010L
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_EDID_DETECT_MODE_MASK 0x00000020L
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_ENABLE_MASK 0x00000040L
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_CLK_DRIVE_EN_MASK 0x00000080L
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_INTRA_BYTE_DELAY_MASK 0x0000FF00L
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_INTRA_TRANSACTION_DELAY_MASK 0x00FF0000L
+#define DC_I2C_DDC1_SETUP__DC_I2C_DDC1_TIME_LIMIT_MASK 0xFF000000L
+//DC_I2C_DDC2_SPEED
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_THRESHOLD__SHIFT 0x0
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_DISABLE_FILTER_DURING_STALL__SHIFT 0x4
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_START_STOP_TIMING_CNTL__SHIFT 0x8
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_PRESCALE__SHIFT 0x10
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_THRESHOLD_MASK 0x00000003L
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_DISABLE_FILTER_DURING_STALL_MASK 0x00000010L
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_START_STOP_TIMING_CNTL_MASK 0x00000300L
+#define DC_I2C_DDC2_SPEED__DC_I2C_DDC2_PRESCALE_MASK 0xFFFF0000L
+//DC_I2C_DDC2_SETUP
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_DATA_DRIVE_EN__SHIFT 0x0
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_DATA_DRIVE_SEL__SHIFT 0x1
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_SEND_RESET_LENGTH__SHIFT 0x2
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_EDID_DETECT_ENABLE__SHIFT 0x4
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_EDID_DETECT_MODE__SHIFT 0x5
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_ENABLE__SHIFT 0x6
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_CLK_DRIVE_EN__SHIFT 0x7
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_INTRA_BYTE_DELAY__SHIFT 0x8
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_INTRA_TRANSACTION_DELAY__SHIFT 0x10
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_TIME_LIMIT__SHIFT 0x18
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_DATA_DRIVE_EN_MASK 0x00000001L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_DATA_DRIVE_SEL_MASK 0x00000002L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_SEND_RESET_LENGTH_MASK 0x00000004L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_EDID_DETECT_ENABLE_MASK 0x00000010L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_EDID_DETECT_MODE_MASK 0x00000020L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_ENABLE_MASK 0x00000040L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_CLK_DRIVE_EN_MASK 0x00000080L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_INTRA_BYTE_DELAY_MASK 0x0000FF00L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_INTRA_TRANSACTION_DELAY_MASK 0x00FF0000L
+#define DC_I2C_DDC2_SETUP__DC_I2C_DDC2_TIME_LIMIT_MASK 0xFF000000L
+//DC_I2C_TRANSACTION0
+#define DC_I2C_TRANSACTION0__DC_I2C_RW0__SHIFT 0x0
+#define DC_I2C_TRANSACTION0__DC_I2C_STOP_ON_NACK0__SHIFT 0x8
+#define DC_I2C_TRANSACTION0__DC_I2C_START0__SHIFT 0xc
+#define DC_I2C_TRANSACTION0__DC_I2C_STOP0__SHIFT 0xd
+#define DC_I2C_TRANSACTION0__DC_I2C_COUNT0__SHIFT 0x10
+#define DC_I2C_TRANSACTION0__DC_I2C_RW0_MASK 0x00000001L
+#define DC_I2C_TRANSACTION0__DC_I2C_STOP_ON_NACK0_MASK 0x00000100L
+#define DC_I2C_TRANSACTION0__DC_I2C_START0_MASK 0x00001000L
+#define DC_I2C_TRANSACTION0__DC_I2C_STOP0_MASK 0x00002000L
+#define DC_I2C_TRANSACTION0__DC_I2C_COUNT0_MASK 0x03FF0000L
+//DC_I2C_TRANSACTION1
+#define DC_I2C_TRANSACTION1__DC_I2C_RW1__SHIFT 0x0
+#define DC_I2C_TRANSACTION1__DC_I2C_STOP_ON_NACK1__SHIFT 0x8
+#define DC_I2C_TRANSACTION1__DC_I2C_START1__SHIFT 0xc
+#define DC_I2C_TRANSACTION1__DC_I2C_STOP1__SHIFT 0xd
+#define DC_I2C_TRANSACTION1__DC_I2C_COUNT1__SHIFT 0x10
+#define DC_I2C_TRANSACTION1__DC_I2C_RW1_MASK 0x00000001L
+#define DC_I2C_TRANSACTION1__DC_I2C_STOP_ON_NACK1_MASK 0x00000100L
+#define DC_I2C_TRANSACTION1__DC_I2C_START1_MASK 0x00001000L
+#define DC_I2C_TRANSACTION1__DC_I2C_STOP1_MASK 0x00002000L
+#define DC_I2C_TRANSACTION1__DC_I2C_COUNT1_MASK 0x03FF0000L
+//DC_I2C_TRANSACTION2
+#define DC_I2C_TRANSACTION2__DC_I2C_RW2__SHIFT 0x0
+#define DC_I2C_TRANSACTION2__DC_I2C_STOP_ON_NACK2__SHIFT 0x8
+#define DC_I2C_TRANSACTION2__DC_I2C_START2__SHIFT 0xc
+#define DC_I2C_TRANSACTION2__DC_I2C_STOP2__SHIFT 0xd
+#define DC_I2C_TRANSACTION2__DC_I2C_COUNT2__SHIFT 0x10
+#define DC_I2C_TRANSACTION2__DC_I2C_RW2_MASK 0x00000001L
+#define DC_I2C_TRANSACTION2__DC_I2C_STOP_ON_NACK2_MASK 0x00000100L
+#define DC_I2C_TRANSACTION2__DC_I2C_START2_MASK 0x00001000L
+#define DC_I2C_TRANSACTION2__DC_I2C_STOP2_MASK 0x00002000L
+#define DC_I2C_TRANSACTION2__DC_I2C_COUNT2_MASK 0x03FF0000L
+//DC_I2C_TRANSACTION3
+#define DC_I2C_TRANSACTION3__DC_I2C_RW3__SHIFT 0x0
+#define DC_I2C_TRANSACTION3__DC_I2C_STOP_ON_NACK3__SHIFT 0x8
+#define DC_I2C_TRANSACTION3__DC_I2C_START3__SHIFT 0xc
+#define DC_I2C_TRANSACTION3__DC_I2C_STOP3__SHIFT 0xd
+#define DC_I2C_TRANSACTION3__DC_I2C_COUNT3__SHIFT 0x10
+#define DC_I2C_TRANSACTION3__DC_I2C_RW3_MASK 0x00000001L
+#define DC_I2C_TRANSACTION3__DC_I2C_STOP_ON_NACK3_MASK 0x00000100L
+#define DC_I2C_TRANSACTION3__DC_I2C_START3_MASK 0x00001000L
+#define DC_I2C_TRANSACTION3__DC_I2C_STOP3_MASK 0x00002000L
+#define DC_I2C_TRANSACTION3__DC_I2C_COUNT3_MASK 0x03FF0000L
+//DC_I2C_DATA
+#define DC_I2C_DATA__DC_I2C_DATA_RW__SHIFT 0x0
+#define DC_I2C_DATA__DC_I2C_DATA__SHIFT 0x8
+#define DC_I2C_DATA__DC_I2C_INDEX__SHIFT 0x10
+#define DC_I2C_DATA__DC_I2C_INDEX_WRITE__SHIFT 0x1f
+#define DC_I2C_DATA__DC_I2C_DATA_RW_MASK 0x00000001L
+#define DC_I2C_DATA__DC_I2C_DATA_MASK 0x0000FF00L
+#define DC_I2C_DATA__DC_I2C_INDEX_MASK 0x03FF0000L
+#define DC_I2C_DATA__DC_I2C_INDEX_WRITE_MASK 0x80000000L
+
+//DIO_SCRATCH0
+#define DIO_SCRATCH0__DIO_SCRATCH0__SHIFT 0x0
+#define DIO_SCRATCH0__DIO_SCRATCH0_MASK 0xFFFFFFFFL
+//DIO_SCRATCH1
+#define DIO_SCRATCH1__DIO_SCRATCH1__SHIFT 0x0
+#define DIO_SCRATCH1__DIO_SCRATCH1_MASK 0xFFFFFFFFL
+//DIO_SCRATCH2
+#define DIO_SCRATCH2__DIO_SCRATCH2__SHIFT 0x0
+#define DIO_SCRATCH2__DIO_SCRATCH2_MASK 0xFFFFFFFFL
+//DIO_SCRATCH3
+#define DIO_SCRATCH3__DIO_SCRATCH3__SHIFT 0x0
+#define DIO_SCRATCH3__DIO_SCRATCH3_MASK 0xFFFFFFFFL
+//DIO_SCRATCH4
+#define DIO_SCRATCH4__DIO_SCRATCH4__SHIFT 0x0
+#define DIO_SCRATCH4__DIO_SCRATCH4_MASK 0xFFFFFFFFL
+//DIO_SCRATCH5
+#define DIO_SCRATCH5__DIO_SCRATCH5__SHIFT 0x0
+#define DIO_SCRATCH5__DIO_SCRATCH5_MASK 0xFFFFFFFFL
+//DIO_SCRATCH6
+#define DIO_SCRATCH6__DIO_SCRATCH6__SHIFT 0x0
+#define DIO_SCRATCH6__DIO_SCRATCH6_MASK 0xFFFFFFFFL
+//DIO_SCRATCH7
+#define DIO_SCRATCH7__DIO_SCRATCH7__SHIFT 0x0
+#define DIO_SCRATCH7__DIO_SCRATCH7_MASK 0xFFFFFFFFL
+//DIO_MEM_PWR_STATUS
+#define DIO_MEM_PWR_STATUS__I2C_MEM_PWR_STATE__SHIFT 0x0
+#define DIO_MEM_PWR_STATUS__DPA_MEM_PWR_STATE__SHIFT 0x3
+#define DIO_MEM_PWR_STATUS__DPB_MEM_PWR_STATE__SHIFT 0x4
+#define DIO_MEM_PWR_STATUS__DPC_MEM_PWR_STATE__SHIFT 0x5
+#define DIO_MEM_PWR_STATUS__DPD_MEM_PWR_STATE__SHIFT 0x6
+#define DIO_MEM_PWR_STATUS__DPE_MEM_PWR_STATE__SHIFT 0x7
+#define DIO_MEM_PWR_STATUS__DPF_MEM_PWR_STATE__SHIFT 0x8
+#define DIO_MEM_PWR_STATUS__DPG_MEM_PWR_STATE__SHIFT 0x9
+#define DIO_MEM_PWR_STATUS__HDMI0_MEM_PWR_STATE__SHIFT 0xa
+#define DIO_MEM_PWR_STATUS__HDMI1_MEM_PWR_STATE__SHIFT 0xc
+#define DIO_MEM_PWR_STATUS__HDMI2_MEM_PWR_STATE__SHIFT 0xe
+#define DIO_MEM_PWR_STATUS__HDMI3_MEM_PWR_STATE__SHIFT 0x10
+#define DIO_MEM_PWR_STATUS__HDMI4_MEM_PWR_STATE__SHIFT 0x12
+#define DIO_MEM_PWR_STATUS__HDMI5_MEM_PWR_STATE__SHIFT 0x14
+#define DIO_MEM_PWR_STATUS__HDMI6_MEM_PWR_STATE__SHIFT 0x16
+#define DIO_MEM_PWR_STATUS__I2C_MEM_PWR_STATE_MASK 0x00000001L
+#define DIO_MEM_PWR_STATUS__DPA_MEM_PWR_STATE_MASK 0x00000008L
+#define DIO_MEM_PWR_STATUS__DPB_MEM_PWR_STATE_MASK 0x00000010L
+#define DIO_MEM_PWR_STATUS__DPC_MEM_PWR_STATE_MASK 0x00000020L
+#define DIO_MEM_PWR_STATUS__DPD_MEM_PWR_STATE_MASK 0x00000040L
+#define DIO_MEM_PWR_STATUS__DPE_MEM_PWR_STATE_MASK 0x00000080L
+#define DIO_MEM_PWR_STATUS__DPF_MEM_PWR_STATE_MASK 0x00000100L
+#define DIO_MEM_PWR_STATUS__DPG_MEM_PWR_STATE_MASK 0x00000200L
+#define DIO_MEM_PWR_STATUS__HDMI0_MEM_PWR_STATE_MASK 0x00000C00L
+#define DIO_MEM_PWR_STATUS__HDMI1_MEM_PWR_STATE_MASK 0x00003000L
+#define DIO_MEM_PWR_STATUS__HDMI2_MEM_PWR_STATE_MASK 0x0000C000L
+#define DIO_MEM_PWR_STATUS__HDMI3_MEM_PWR_STATE_MASK 0x00030000L
+#define DIO_MEM_PWR_STATUS__HDMI4_MEM_PWR_STATE_MASK 0x000C0000L
+#define DIO_MEM_PWR_STATUS__HDMI5_MEM_PWR_STATE_MASK 0x00300000L
+#define DIO_MEM_PWR_STATUS__HDMI6_MEM_PWR_STATE_MASK 0x00C00000L
+//DIO_MEM_PWR_CTRL
+#define DIO_MEM_PWR_CTRL__I2C_LIGHT_SLEEP_FORCE__SHIFT 0x0
+#define DIO_MEM_PWR_CTRL__I2C_LIGHT_SLEEP_DIS__SHIFT 0x1
+#define DIO_MEM_PWR_CTRL__DPA_LIGHT_SLEEP_DIS__SHIFT 0x4
+#define DIO_MEM_PWR_CTRL__DPB_LIGHT_SLEEP_DIS__SHIFT 0x5
+#define DIO_MEM_PWR_CTRL__DPC_LIGHT_SLEEP_DIS__SHIFT 0x6
+#define DIO_MEM_PWR_CTRL__DPD_LIGHT_SLEEP_DIS__SHIFT 0x7
+#define DIO_MEM_PWR_CTRL__DPE_LIGHT_SLEEP_DIS__SHIFT 0x8
+#define DIO_MEM_PWR_CTRL__DPF_LIGHT_SLEEP_DIS__SHIFT 0x9
+#define DIO_MEM_PWR_CTRL__DPG_LIGHT_SLEEP_DIS__SHIFT 0xa
+#define DIO_MEM_PWR_CTRL__HDMI0_MEM_PWR_FORCE__SHIFT 0xb
+#define DIO_MEM_PWR_CTRL__HDMI0_MEM_PWR_DIS__SHIFT 0xd
+#define DIO_MEM_PWR_CTRL__HDMI1_MEM_PWR_FORCE__SHIFT 0xe
+#define DIO_MEM_PWR_CTRL__HDMI1_MEM_PWR_DIS__SHIFT 0x10
+#define DIO_MEM_PWR_CTRL__HDMI2_MEM_PWR_FORCE__SHIFT 0x11
+#define DIO_MEM_PWR_CTRL__HDMI2_MEM_PWR_DIS__SHIFT 0x13
+#define DIO_MEM_PWR_CTRL__HDMI3_MEM_PWR_FORCE__SHIFT 0x14
+#define DIO_MEM_PWR_CTRL__HDMI3_MEM_PWR_DIS__SHIFT 0x16
+#define DIO_MEM_PWR_CTRL__HDMI4_MEM_PWR_FORCE__SHIFT 0x17
+#define DIO_MEM_PWR_CTRL__HDMI4_MEM_PWR_DIS__SHIFT 0x19
+#define DIO_MEM_PWR_CTRL__HDMI5_MEM_PWR_FORCE__SHIFT 0x1a
+#define DIO_MEM_PWR_CTRL__HDMI5_MEM_PWR_DIS__SHIFT 0x1c
+#define DIO_MEM_PWR_CTRL__HDMI6_MEM_PWR_FORCE__SHIFT 0x1d
+#define DIO_MEM_PWR_CTRL__HDMI6_MEM_PWR_DIS__SHIFT 0x1f
+#define DIO_MEM_PWR_CTRL__I2C_LIGHT_SLEEP_FORCE_MASK 0x00000001L
+#define DIO_MEM_PWR_CTRL__I2C_LIGHT_SLEEP_DIS_MASK 0x00000002L
+#define DIO_MEM_PWR_CTRL__DPA_LIGHT_SLEEP_DIS_MASK 0x00000010L
+#define DIO_MEM_PWR_CTRL__DPB_LIGHT_SLEEP_DIS_MASK 0x00000020L
+#define DIO_MEM_PWR_CTRL__DPC_LIGHT_SLEEP_DIS_MASK 0x00000040L
+#define DIO_MEM_PWR_CTRL__DPD_LIGHT_SLEEP_DIS_MASK 0x00000080L
+#define DIO_MEM_PWR_CTRL__DPE_LIGHT_SLEEP_DIS_MASK 0x00000100L
+#define DIO_MEM_PWR_CTRL__DPF_LIGHT_SLEEP_DIS_MASK 0x00000200L
+#define DIO_MEM_PWR_CTRL__DPG_LIGHT_SLEEP_DIS_MASK 0x00000400L
+#define DIO_MEM_PWR_CTRL__HDMI0_MEM_PWR_FORCE_MASK 0x00001800L
+#define DIO_MEM_PWR_CTRL__HDMI0_MEM_PWR_DIS_MASK 0x00002000L
+#define DIO_MEM_PWR_CTRL__HDMI1_MEM_PWR_FORCE_MASK 0x0000C000L
+#define DIO_MEM_PWR_CTRL__HDMI1_MEM_PWR_DIS_MASK 0x00010000L
+#define DIO_MEM_PWR_CTRL__HDMI2_MEM_PWR_FORCE_MASK 0x00060000L
+#define DIO_MEM_PWR_CTRL__HDMI2_MEM_PWR_DIS_MASK 0x00080000L
+#define DIO_MEM_PWR_CTRL__HDMI3_MEM_PWR_FORCE_MASK 0x00300000L
+#define DIO_MEM_PWR_CTRL__HDMI3_MEM_PWR_DIS_MASK 0x00400000L
+#define DIO_MEM_PWR_CTRL__HDMI4_MEM_PWR_FORCE_MASK 0x01800000L
+#define DIO_MEM_PWR_CTRL__HDMI4_MEM_PWR_DIS_MASK 0x02000000L
+#define DIO_MEM_PWR_CTRL__HDMI5_MEM_PWR_FORCE_MASK 0x0C000000L
+#define DIO_MEM_PWR_CTRL__HDMI5_MEM_PWR_DIS_MASK 0x10000000L
+#define DIO_MEM_PWR_CTRL__HDMI6_MEM_PWR_FORCE_MASK 0x60000000L
+#define DIO_MEM_PWR_CTRL__HDMI6_MEM_PWR_DIS_MASK 0x80000000L
+//DIO_MEM_PWR_CTRL2
+#define DIO_MEM_PWR_CTRL2__HDMI_MEM_PWR_MODE_SEL__SHIFT 0x0
+#define DIO_MEM_PWR_CTRL2__AFMT0_LIGHT_SLEEP_DIS__SHIFT 0x4
+#define DIO_MEM_PWR_CTRL2__AFMT0_LIGHT_SLEEP_FORCE__SHIFT 0x5
+#define DIO_MEM_PWR_CTRL2__AFMT1_LIGHT_SLEEP_DIS__SHIFT 0x6
+#define DIO_MEM_PWR_CTRL2__AFMT1_LIGHT_SLEEP_FORCE__SHIFT 0x7
+#define DIO_MEM_PWR_CTRL2__AFMT2_LIGHT_SLEEP_DIS__SHIFT 0x8
+#define DIO_MEM_PWR_CTRL2__AFMT2_LIGHT_SLEEP_FORCE__SHIFT 0x9
+#define DIO_MEM_PWR_CTRL2__AFMT3_LIGHT_SLEEP_DIS__SHIFT 0xa
+#define DIO_MEM_PWR_CTRL2__AFMT3_LIGHT_SLEEP_FORCE__SHIFT 0xb
+#define DIO_MEM_PWR_CTRL2__AFMT4_LIGHT_SLEEP_DIS__SHIFT 0xc
+#define DIO_MEM_PWR_CTRL2__AFMT4_LIGHT_SLEEP_FORCE__SHIFT 0xd
+#define DIO_MEM_PWR_CTRL2__AFMT5_LIGHT_SLEEP_DIS__SHIFT 0xe
+#define DIO_MEM_PWR_CTRL2__AFMT5_LIGHT_SLEEP_FORCE__SHIFT 0xf
+#define DIO_MEM_PWR_CTRL2__DPA_LIGHT_SLEEP_FORCE__SHIFT 0x18
+#define DIO_MEM_PWR_CTRL2__DPB_LIGHT_SLEEP_FORCE__SHIFT 0x19
+#define DIO_MEM_PWR_CTRL2__DPC_LIGHT_SLEEP_FORCE__SHIFT 0x1a
+#define DIO_MEM_PWR_CTRL2__DPD_LIGHT_SLEEP_FORCE__SHIFT 0x1b
+#define DIO_MEM_PWR_CTRL2__DPE_LIGHT_SLEEP_FORCE__SHIFT 0x1c
+#define DIO_MEM_PWR_CTRL2__DPF_LIGHT_SLEEP_FORCE__SHIFT 0x1d
+#define DIO_MEM_PWR_CTRL2__DPG_LIGHT_SLEEP_FORCE__SHIFT 0x1e
+#define DIO_MEM_PWR_CTRL2__HDMI_MEM_PWR_MODE_SEL_MASK 0x00000003L
+#define DIO_MEM_PWR_CTRL2__AFMT0_LIGHT_SLEEP_DIS_MASK 0x00000010L
+#define DIO_MEM_PWR_CTRL2__AFMT0_LIGHT_SLEEP_FORCE_MASK 0x00000020L
+#define DIO_MEM_PWR_CTRL2__AFMT1_LIGHT_SLEEP_DIS_MASK 0x00000040L
+#define DIO_MEM_PWR_CTRL2__AFMT1_LIGHT_SLEEP_FORCE_MASK 0x00000080L
+#define DIO_MEM_PWR_CTRL2__AFMT2_LIGHT_SLEEP_DIS_MASK 0x00000100L
+#define DIO_MEM_PWR_CTRL2__AFMT2_LIGHT_SLEEP_FORCE_MASK 0x00000200L
+#define DIO_MEM_PWR_CTRL2__AFMT3_LIGHT_SLEEP_DIS_MASK 0x00000400L
+#define DIO_MEM_PWR_CTRL2__AFMT3_LIGHT_SLEEP_FORCE_MASK 0x00000800L
+#define DIO_MEM_PWR_CTRL2__AFMT4_LIGHT_SLEEP_DIS_MASK 0x00001000L
+#define DIO_MEM_PWR_CTRL2__AFMT4_LIGHT_SLEEP_FORCE_MASK 0x00002000L
+#define DIO_MEM_PWR_CTRL2__AFMT5_LIGHT_SLEEP_DIS_MASK 0x00004000L
+#define DIO_MEM_PWR_CTRL2__AFMT5_LIGHT_SLEEP_FORCE_MASK 0x00008000L
+#define DIO_MEM_PWR_CTRL2__DPA_LIGHT_SLEEP_FORCE_MASK 0x01000000L
+#define DIO_MEM_PWR_CTRL2__DPB_LIGHT_SLEEP_FORCE_MASK 0x02000000L
+#define DIO_MEM_PWR_CTRL2__DPC_LIGHT_SLEEP_FORCE_MASK 0x04000000L
+#define DIO_MEM_PWR_CTRL2__DPD_LIGHT_SLEEP_FORCE_MASK 0x08000000L
+#define DIO_MEM_PWR_CTRL2__DPE_LIGHT_SLEEP_FORCE_MASK 0x10000000L
+#define DIO_MEM_PWR_CTRL2__DPF_LIGHT_SLEEP_FORCE_MASK 0x20000000L
+#define DIO_MEM_PWR_CTRL2__DPG_LIGHT_SLEEP_FORCE_MASK 0x40000000L
+//DIO_CLK_CNTL
+#define DIO_CLK_CNTL__DISPCLK_R_DIO_GATE_DIS__SHIFT 0x5
+#define DIO_CLK_CNTL__DISPCLK_G_DACA_GATE_DIS__SHIFT 0x8
+#define DIO_CLK_CNTL__REFCLK_R_DIO_GATE_DIS__SHIFT 0xa
+#define DIO_CLK_CNTL__DISPCLK_G_DIGA_GATE_DIS__SHIFT 0x18
+#define DIO_CLK_CNTL__DISPCLK_G_DIGB_GATE_DIS__SHIFT 0x19
+#define DIO_CLK_CNTL__DISPCLK_G_DIGC_GATE_DIS__SHIFT 0x1a
+#define DIO_CLK_CNTL__DISPCLK_G_DIGD_GATE_DIS__SHIFT 0x1b
+#define DIO_CLK_CNTL__DISPCLK_G_DIGE_GATE_DIS__SHIFT 0x1c
+#define DIO_CLK_CNTL__DISPCLK_G_DIGF_GATE_DIS__SHIFT 0x1d
+#define DIO_CLK_CNTL__DISPCLK_G_DIGG_GATE_DIS__SHIFT 0x1e
+#define DIO_CLK_CNTL__DISPCLK_R_DIO_GATE_DIS_MASK 0x00000020L
+#define DIO_CLK_CNTL__DISPCLK_G_DACA_GATE_DIS_MASK 0x00000100L
+#define DIO_CLK_CNTL__REFCLK_R_DIO_GATE_DIS_MASK 0x00000400L
+#define DIO_CLK_CNTL__DISPCLK_G_DIGA_GATE_DIS_MASK 0x01000000L
+#define DIO_CLK_CNTL__DISPCLK_G_DIGB_GATE_DIS_MASK 0x02000000L
+#define DIO_CLK_CNTL__DISPCLK_G_DIGC_GATE_DIS_MASK 0x04000000L
+#define DIO_CLK_CNTL__DISPCLK_G_DIGD_GATE_DIS_MASK 0x08000000L
+#define DIO_CLK_CNTL__DISPCLK_G_DIGE_GATE_DIS_MASK 0x10000000L
+#define DIO_CLK_CNTL__DISPCLK_G_DIGF_GATE_DIS_MASK 0x20000000L
+#define DIO_CLK_CNTL__DISPCLK_G_DIGG_GATE_DIS_MASK 0x40000000L
+//DIO_MEM_PWR_CTRL3
+#define DIO_MEM_PWR_CTRL3__DME0_MEM_PWR_DIS__SHIFT 0x0
+#define DIO_MEM_PWR_CTRL3__DME0_MEM_PWR_FORCE__SHIFT 0x1
+#define DIO_MEM_PWR_CTRL3__DME1_MEM_PWR_DIS__SHIFT 0x3
+#define DIO_MEM_PWR_CTRL3__DME1_MEM_PWR_FORCE__SHIFT 0x4
+#define DIO_MEM_PWR_CTRL3__DME2_MEM_PWR_DIS__SHIFT 0x6
+#define DIO_MEM_PWR_CTRL3__DME2_MEM_PWR_FORCE__SHIFT 0x7
+#define DIO_MEM_PWR_CTRL3__DME3_MEM_PWR_DIS__SHIFT 0x9
+#define DIO_MEM_PWR_CTRL3__DME3_MEM_PWR_FORCE__SHIFT 0xa
+#define DIO_MEM_PWR_CTRL3__DME4_MEM_PWR_DIS__SHIFT 0xc
+#define DIO_MEM_PWR_CTRL3__DME4_MEM_PWR_FORCE__SHIFT 0xd
+#define DIO_MEM_PWR_CTRL3__DME5_MEM_PWR_DIS__SHIFT 0xf
+#define DIO_MEM_PWR_CTRL3__DME5_MEM_PWR_FORCE__SHIFT 0x10
+#define DIO_MEM_PWR_CTRL3__DME0_MEM_PWR_DIS_MASK 0x00000001L
+#define DIO_MEM_PWR_CTRL3__DME0_MEM_PWR_FORCE_MASK 0x00000006L
+#define DIO_MEM_PWR_CTRL3__DME1_MEM_PWR_DIS_MASK 0x00000008L
+#define DIO_MEM_PWR_CTRL3__DME1_MEM_PWR_FORCE_MASK 0x00000030L
+#define DIO_MEM_PWR_CTRL3__DME2_MEM_PWR_DIS_MASK 0x00000040L
+#define DIO_MEM_PWR_CTRL3__DME2_MEM_PWR_FORCE_MASK 0x00000180L
+#define DIO_MEM_PWR_CTRL3__DME3_MEM_PWR_DIS_MASK 0x00000200L
+#define DIO_MEM_PWR_CTRL3__DME3_MEM_PWR_FORCE_MASK 0x00000C00L
+#define DIO_MEM_PWR_CTRL3__DME4_MEM_PWR_DIS_MASK 0x00001000L
+#define DIO_MEM_PWR_CTRL3__DME4_MEM_PWR_FORCE_MASK 0x00006000L
+#define DIO_MEM_PWR_CTRL3__DME5_MEM_PWR_DIS_MASK 0x00008000L
+#define DIO_MEM_PWR_CTRL3__DME5_MEM_PWR_FORCE_MASK 0x00030000L
+//DIO_POWER_MANAGEMENT_CNTL
+#define DIO_POWER_MANAGEMENT_CNTL__PM_ASSERT_RESET__SHIFT 0x0
+#define DIO_POWER_MANAGEMENT_CNTL__PM_ALL_BUSY_OFF__SHIFT 0x8
+#define DIO_POWER_MANAGEMENT_CNTL__PM_ASSERT_RESET_MASK 0x00000001L
+#define DIO_POWER_MANAGEMENT_CNTL__PM_ALL_BUSY_OFF_MASK 0x00000100L
+
+//DIO_MEM_PWR_STATUS1
+#define DIO_MEM_PWR_STATUS1__AFMT0_MEM_PWR_STATE__SHIFT 0x0
+#define DIO_MEM_PWR_STATUS1__AFMT1_MEM_PWR_STATE__SHIFT 0x2
+#define DIO_MEM_PWR_STATUS1__AFMT2_MEM_PWR_STATE__SHIFT 0x4
+#define DIO_MEM_PWR_STATUS1__AFMT3_MEM_PWR_STATE__SHIFT 0x6
+#define DIO_MEM_PWR_STATUS1__AFMT4_MEM_PWR_STATE__SHIFT 0x8
+#define DIO_MEM_PWR_STATUS1__AFMT5_MEM_PWR_STATE__SHIFT 0xa
+#define DIO_MEM_PWR_STATUS1__DME0_MEM_PWR_STATE__SHIFT 0x10
+#define DIO_MEM_PWR_STATUS1__DME1_MEM_PWR_STATE__SHIFT 0x12
+#define DIO_MEM_PWR_STATUS1__DME2_MEM_PWR_STATE__SHIFT 0x14
+#define DIO_MEM_PWR_STATUS1__DME3_MEM_PWR_STATE__SHIFT 0x16
+#define DIO_MEM_PWR_STATUS1__DME4_MEM_PWR_STATE__SHIFT 0x18
+#define DIO_MEM_PWR_STATUS1__DME5_MEM_PWR_STATE__SHIFT 0x1a
+#define DIO_MEM_PWR_STATUS1__AFMT0_MEM_PWR_STATE_MASK 0x00000001L
+#define DIO_MEM_PWR_STATUS1__AFMT1_MEM_PWR_STATE_MASK 0x00000004L
+#define DIO_MEM_PWR_STATUS1__AFMT2_MEM_PWR_STATE_MASK 0x00000010L
+#define DIO_MEM_PWR_STATUS1__AFMT3_MEM_PWR_STATE_MASK 0x00000040L
+#define DIO_MEM_PWR_STATUS1__AFMT4_MEM_PWR_STATE_MASK 0x00000100L
+#define DIO_MEM_PWR_STATUS1__AFMT5_MEM_PWR_STATE_MASK 0x00000400L
+#define DIO_MEM_PWR_STATUS1__DME0_MEM_PWR_STATE_MASK 0x00030000L
+#define DIO_MEM_PWR_STATUS1__DME1_MEM_PWR_STATE_MASK 0x000C0000L
+#define DIO_MEM_PWR_STATUS1__DME2_MEM_PWR_STATE_MASK 0x00300000L
+#define DIO_MEM_PWR_STATUS1__DME3_MEM_PWR_STATE_MASK 0x00C00000L
+#define DIO_MEM_PWR_STATUS1__DME4_MEM_PWR_STATE_MASK 0x03000000L
+#define DIO_MEM_PWR_STATUS1__DME5_MEM_PWR_STATE_MASK 0x0C000000L
+//DIO_CLK_CNTL2
+#define DIO_CLK_CNTL2__SOCCLK_G_AFMTA_GATE_DIS__SHIFT 0x7
+#define DIO_CLK_CNTL2__SOCCLK_G_AFMTB_GATE_DIS__SHIFT 0x8
+#define DIO_CLK_CNTL2__SOCCLK_G_AFMTC_GATE_DIS__SHIFT 0x9
+#define DIO_CLK_CNTL2__SOCCLK_G_AFMTD_GATE_DIS__SHIFT 0xa
+#define DIO_CLK_CNTL2__SOCCLK_G_AFMTE_GATE_DIS__SHIFT 0xb
+#define DIO_CLK_CNTL2__SOCCLK_G_AFMTF_GATE_DIS__SHIFT 0xc
+#define DIO_CLK_CNTL2__SOCCLK_G_AFMTG_GATE_DIS__SHIFT 0xd
+#define DIO_CLK_CNTL2__SYMCLKA_FE_G_AFMT_GATE_DIS__SHIFT 0x11
+#define DIO_CLK_CNTL2__SYMCLKB_FE_G_AFMT_GATE_DIS__SHIFT 0x12
+#define DIO_CLK_CNTL2__SYMCLKC_FE_G_AFMT_GATE_DIS__SHIFT 0x13
+#define DIO_CLK_CNTL2__SYMCLKD_FE_G_AFMT_GATE_DIS__SHIFT 0x14
+#define DIO_CLK_CNTL2__SYMCLKE_FE_G_AFMT_GATE_DIS__SHIFT 0x15
+#define DIO_CLK_CNTL2__SYMCLKF_FE_G_AFMT_GATE_DIS__SHIFT 0x16
+#define DIO_CLK_CNTL2__SYMCLKG_FE_G_AFMT_GATE_DIS__SHIFT 0x17
+#define DIO_CLK_CNTL2__SOCCLK_G_AFMTA_GATE_DIS_MASK 0x00000080L
+#define DIO_CLK_CNTL2__SOCCLK_G_AFMTB_GATE_DIS_MASK 0x00000100L
+#define DIO_CLK_CNTL2__SOCCLK_G_AFMTC_GATE_DIS_MASK 0x00000200L
+#define DIO_CLK_CNTL2__SOCCLK_G_AFMTD_GATE_DIS_MASK 0x00000400L
+#define DIO_CLK_CNTL2__SOCCLK_G_AFMTE_GATE_DIS_MASK 0x00000800L
+#define DIO_CLK_CNTL2__SOCCLK_G_AFMTF_GATE_DIS_MASK 0x00001000L
+#define DIO_CLK_CNTL2__SOCCLK_G_AFMTG_GATE_DIS_MASK 0x00002000L
+#define DIO_CLK_CNTL2__SYMCLKA_FE_G_AFMT_GATE_DIS_MASK 0x00020000L
+#define DIO_CLK_CNTL2__SYMCLKB_FE_G_AFMT_GATE_DIS_MASK 0x00040000L
+#define DIO_CLK_CNTL2__SYMCLKC_FE_G_AFMT_GATE_DIS_MASK 0x00080000L
+#define DIO_CLK_CNTL2__SYMCLKD_FE_G_AFMT_GATE_DIS_MASK 0x00100000L
+#define DIO_CLK_CNTL2__SYMCLKE_FE_G_AFMT_GATE_DIS_MASK 0x00200000L
+#define DIO_CLK_CNTL2__SYMCLKF_FE_G_AFMT_GATE_DIS_MASK 0x00400000L
+#define DIO_CLK_CNTL2__SYMCLKG_FE_G_AFMT_GATE_DIS_MASK 0x00800000L
+//DIO_CLK_CNTL3
+#define DIO_CLK_CNTL3__SYMCLKA_FE_G_TMDS_GATE_DIS__SHIFT 0x0
+#define DIO_CLK_CNTL3__SYMCLKB_FE_G_TMDS_GATE_DIS__SHIFT 0x1
+#define DIO_CLK_CNTL3__SYMCLKC_FE_G_TMDS_GATE_DIS__SHIFT 0x2
+#define DIO_CLK_CNTL3__SYMCLKD_FE_G_TMDS_GATE_DIS__SHIFT 0x3
+#define DIO_CLK_CNTL3__SYMCLKE_FE_G_TMDS_GATE_DIS__SHIFT 0x4
+#define DIO_CLK_CNTL3__SYMCLKF_FE_G_TMDS_GATE_DIS__SHIFT 0x5
+#define DIO_CLK_CNTL3__SYMCLKG_FE_G_TMDS_GATE_DIS__SHIFT 0x6
+#define DIO_CLK_CNTL3__SYMCLKA_G_TMDS_GATE_DIS__SHIFT 0xa
+#define DIO_CLK_CNTL3__SYMCLKB_G_TMDS_GATE_DIS__SHIFT 0xb
+#define DIO_CLK_CNTL3__SYMCLKC_G_TMDS_GATE_DIS__SHIFT 0xc
+#define DIO_CLK_CNTL3__SYMCLKD_G_TMDS_GATE_DIS__SHIFT 0xd
+#define DIO_CLK_CNTL3__SYMCLKE_G_TMDS_GATE_DIS__SHIFT 0xe
+#define DIO_CLK_CNTL3__SYMCLKF_G_TMDS_GATE_DIS__SHIFT 0xf
+#define DIO_CLK_CNTL3__SYMCLKG_G_TMDS_GATE_DIS__SHIFT 0x10
+#define DIO_CLK_CNTL3__SYMCLKA_FE_G_TMDS_GATE_DIS_MASK 0x00000001L
+#define DIO_CLK_CNTL3__SYMCLKB_FE_G_TMDS_GATE_DIS_MASK 0x00000002L
+#define DIO_CLK_CNTL3__SYMCLKC_FE_G_TMDS_GATE_DIS_MASK 0x00000004L
+#define DIO_CLK_CNTL3__SYMCLKD_FE_G_TMDS_GATE_DIS_MASK 0x00000008L
+#define DIO_CLK_CNTL3__SYMCLKE_FE_G_TMDS_GATE_DIS_MASK 0x00000010L
+#define DIO_CLK_CNTL3__SYMCLKF_FE_G_TMDS_GATE_DIS_MASK 0x00000020L
+#define DIO_CLK_CNTL3__SYMCLKG_FE_G_TMDS_GATE_DIS_MASK 0x00000040L
+#define DIO_CLK_CNTL3__SYMCLKA_G_TMDS_GATE_DIS_MASK 0x00000400L
+#define DIO_CLK_CNTL3__SYMCLKB_G_TMDS_GATE_DIS_MASK 0x00000800L
+#define DIO_CLK_CNTL3__SYMCLKC_G_TMDS_GATE_DIS_MASK 0x00001000L
+#define DIO_CLK_CNTL3__SYMCLKD_G_TMDS_GATE_DIS_MASK 0x00002000L
+#define DIO_CLK_CNTL3__SYMCLKE_G_TMDS_GATE_DIS_MASK 0x00004000L
+#define DIO_CLK_CNTL3__SYMCLKF_G_TMDS_GATE_DIS_MASK 0x00008000L
+#define DIO_CLK_CNTL3__SYMCLKG_G_TMDS_GATE_DIS_MASK 0x00010000L
+//DIO_HDMI_RXSTATUS_TIMER_CONTROL
+#define DIO_HDMI_RXSTATUS_TIMER_CONTROL__DIO_HDMI_RXSTATUS_TIMER_ENABLE__SHIFT 0x0
+#define DIO_HDMI_RXSTATUS_TIMER_CONTROL__DIO_HDMI_RXSTATUS_TIMER_TYPE__SHIFT 0x4
+#define DIO_HDMI_RXSTATUS_TIMER_CONTROL__DIO_HDMI_RXSTATUS_TIMER_STATUS__SHIFT 0x8
+#define DIO_HDMI_RXSTATUS_TIMER_CONTROL__DIO_HDMI_RXSTATUS_TIMER_MASK__SHIFT 0xc
+#define DIO_HDMI_RXSTATUS_TIMER_CONTROL__DIO_HDMI_RXSTATUS_TIMER_INTERVAL__SHIFT 0x10
+#define DIO_HDMI_RXSTATUS_TIMER_CONTROL__DIO_HDMI_RXSTATUS_TIMER_ENABLE_MASK 0x00000001L
+#define DIO_HDMI_RXSTATUS_TIMER_CONTROL__DIO_HDMI_RXSTATUS_TIMER_TYPE_MASK 0x00000010L
+#define DIO_HDMI_RXSTATUS_TIMER_CONTROL__DIO_HDMI_RXSTATUS_TIMER_STATUS_MASK 0x00000100L
+#define DIO_HDMI_RXSTATUS_TIMER_CONTROL__DIO_HDMI_RXSTATUS_TIMER_MASK_MASK 0x00001000L
+#define DIO_HDMI_RXSTATUS_TIMER_CONTROL__DIO_HDMI_RXSTATUS_TIMER_INTERVAL_MASK 0x0FFF0000L
+//DIO_GENERIC_INTERRUPT_MESSAGE
+#define DIO_GENERIC_INTERRUPT_MESSAGE__DIO_GENERIC_INTERRUPT_STATUS__SHIFT 0x0
+#define DIO_GENERIC_INTERRUPT_MESSAGE__DIO_GENERIC_INTERRUPT_MESSAGE__SHIFT 0x1
+#define DIO_GENERIC_INTERRUPT_MESSAGE__DIO_GENERIC_INTERRUPT_STATUS_MASK 0x00000001L
+#define DIO_GENERIC_INTERRUPT_MESSAGE__DIO_GENERIC_INTERRUPT_MESSAGE_MASK 0xFFFFFFFEL
+//DIO_GENERIC_INTERRUPT_CLEAR
+#define DIO_GENERIC_INTERRUPT_CLEAR__DIO_GENERIC_INTERRUPT_CLEAR__SHIFT 0x0
+#define DIO_GENERIC_INTERRUPT_CLEAR__DIO_GENERIC_INTERRUPT_CLEAR_MASK 0x00000001L
+
+
+// addressBlock: dce_dc_dio_hpd0_dispdec
+//HPD0_DC_HPD_INT_STATUS
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_INT_STATUS__SHIFT 0x0
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_SENSE__SHIFT 0x1
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_SENSE_DELAYED__SHIFT 0x4
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_RX_INT_STATUS__SHIFT 0x8
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_CON_TIMER_VAL__SHIFT 0xc
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_DISCON_TIMER_VAL__SHIFT 0x18
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_INT_STATUS_MASK 0x00000001L
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK 0x00000002L
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_SENSE_DELAYED_MASK 0x00000010L
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_RX_INT_STATUS_MASK 0x00000100L
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_CON_TIMER_VAL_MASK 0x000FF000L
+#define HPD0_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_DISCON_TIMER_VAL_MASK 0xFF000000L
+//HPD0_DC_HPD_INT_CONTROL
+#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_INT_ACK__SHIFT 0x0
+#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_INT_POLARITY__SHIFT 0x8
+#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_INT_EN__SHIFT 0x10
+#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_ACK__SHIFT 0x14
+#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN__SHIFT 0x18
+#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_INT_ACK_MASK 0x00000001L
+#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_INT_POLARITY_MASK 0x00000100L
+#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_INT_EN_MASK 0x00010000L
+#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_ACK_MASK 0x00100000L
+#define HPD0_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN_MASK 0x01000000L
+//HPD0_DC_HPD_CONTROL
+#define HPD0_DC_HPD_CONTROL__DC_HPD_CONNECTION_TIMER__SHIFT 0x0
+#define HPD0_DC_HPD_CONTROL__DC_HPD_RX_INT_TIMER__SHIFT 0x10
+#define HPD0_DC_HPD_CONTROL__DC_HPD_EN__SHIFT 0x1c
+#define HPD0_DC_HPD_CONTROL__DC_HPD_CONNECTION_TIMER_MASK 0x00001FFFL
+#define HPD0_DC_HPD_CONTROL__DC_HPD_RX_INT_TIMER_MASK 0x03FF0000L
+#define HPD0_DC_HPD_CONTROL__DC_HPD_EN_MASK 0x10000000L
+//HPD0_DC_HPD_FAST_TRAIN_CNTL
+#define HPD0_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_DELAY__SHIFT 0x0
+#define HPD0_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_DELAY__SHIFT 0xc
+#define HPD0_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_EN__SHIFT 0x18
+#define HPD0_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_EN__SHIFT 0x1c
+#define HPD0_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_DELAY_MASK 0x000000FFL
+#define HPD0_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_DELAY_MASK 0x000FF000L
+#define HPD0_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_EN_MASK 0x01000000L
+#define HPD0_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_EN_MASK 0x10000000L
+//HPD0_DC_HPD_TOGGLE_FILT_CNTL
+#define HPD0_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_CONNECT_INT_DELAY__SHIFT 0x0
+#define HPD0_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_DISCONNECT_INT_DELAY__SHIFT 0x14
+#define HPD0_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_CONNECT_INT_DELAY_MASK 0x000000FFL
+#define HPD0_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_DISCONNECT_INT_DELAY_MASK 0x0FF00000L
+
+
+// addressBlock: dce_dc_dio_hpd1_dispdec
+//HPD1_DC_HPD_INT_STATUS
+#define HPD1_DC_HPD_INT_STATUS__DC_HPD_INT_STATUS__SHIFT 0x0
+#define HPD1_DC_HPD_INT_STATUS__DC_HPD_SENSE__SHIFT 0x1
+#define HPD1_DC_HPD_INT_STATUS__DC_HPD_SENSE_DELAYED__SHIFT 0x4
+#define HPD1_DC_HPD_INT_STATUS__DC_HPD_RX_INT_STATUS__SHIFT 0x8
+#define HPD1_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_CON_TIMER_VAL__SHIFT 0xc
+#define HPD1_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_DISCON_TIMER_VAL__SHIFT 0x18
+#define HPD1_DC_HPD_INT_STATUS__DC_HPD_INT_STATUS_MASK 0x00000001L
+#define HPD1_DC_HPD_INT_STATUS__DC_HPD_SENSE_MASK 0x00000002L
+#define HPD1_DC_HPD_INT_STATUS__DC_HPD_SENSE_DELAYED_MASK 0x00000010L
+#define HPD1_DC_HPD_INT_STATUS__DC_HPD_RX_INT_STATUS_MASK 0x00000100L
+#define HPD1_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_CON_TIMER_VAL_MASK 0x000FF000L
+#define HPD1_DC_HPD_INT_STATUS__DC_HPD_TOGGLE_FILT_DISCON_TIMER_VAL_MASK 0xFF000000L
+//HPD1_DC_HPD_INT_CONTROL
+#define HPD1_DC_HPD_INT_CONTROL__DC_HPD_INT_ACK__SHIFT 0x0
+#define HPD1_DC_HPD_INT_CONTROL__DC_HPD_INT_POLARITY__SHIFT 0x8
+#define HPD1_DC_HPD_INT_CONTROL__DC_HPD_INT_EN__SHIFT 0x10
+#define HPD1_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_ACK__SHIFT 0x14
+#define HPD1_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN__SHIFT 0x18
+#define HPD1_DC_HPD_INT_CONTROL__DC_HPD_INT_ACK_MASK 0x00000001L
+#define HPD1_DC_HPD_INT_CONTROL__DC_HPD_INT_POLARITY_MASK 0x00000100L
+#define HPD1_DC_HPD_INT_CONTROL__DC_HPD_INT_EN_MASK 0x00010000L
+#define HPD1_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_ACK_MASK 0x00100000L
+#define HPD1_DC_HPD_INT_CONTROL__DC_HPD_RX_INT_EN_MASK 0x01000000L
+//HPD1_DC_HPD_CONTROL
+#define HPD1_DC_HPD_CONTROL__DC_HPD_CONNECTION_TIMER__SHIFT 0x0
+#define HPD1_DC_HPD_CONTROL__DC_HPD_RX_INT_TIMER__SHIFT 0x10
+#define HPD1_DC_HPD_CONTROL__DC_HPD_EN__SHIFT 0x1c
+#define HPD1_DC_HPD_CONTROL__DC_HPD_CONNECTION_TIMER_MASK 0x00001FFFL
+#define HPD1_DC_HPD_CONTROL__DC_HPD_RX_INT_TIMER_MASK 0x03FF0000L
+#define HPD1_DC_HPD_CONTROL__DC_HPD_EN_MASK 0x10000000L
+//HPD1_DC_HPD_FAST_TRAIN_CNTL
+#define HPD1_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_DELAY__SHIFT 0x0
+#define HPD1_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_DELAY__SHIFT 0xc
+#define HPD1_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_EN__SHIFT 0x18
+#define HPD1_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_EN__SHIFT 0x1c
+#define HPD1_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_DELAY_MASK 0x000000FFL
+#define HPD1_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_DELAY_MASK 0x000FF000L
+#define HPD1_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_AUX_TX_EN_MASK 0x01000000L
+#define HPD1_DC_HPD_FAST_TRAIN_CNTL__DC_HPD_CONNECT_FAST_TRAIN_EN_MASK 0x10000000L
+//HPD1_DC_HPD_TOGGLE_FILT_CNTL
+#define HPD1_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_CONNECT_INT_DELAY__SHIFT 0x0
+#define HPD1_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_DISCONNECT_INT_DELAY__SHIFT 0x14
+#define HPD1_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_CONNECT_INT_DELAY_MASK 0x000000FFL
+#define HPD1_DC_HPD_TOGGLE_FILT_CNTL__DC_HPD_DISCONNECT_INT_DELAY_MASK 0x0FF00000L
+
+// addressBlock: dce_dc_dio_dp_aux0_dispdec
+//DP_AUX0_AUX_CONTROL
+#define DP_AUX0_AUX_CONTROL__AUX_EN__SHIFT 0x0
+#define DP_AUX0_AUX_CONTROL__AUX_RESET__SHIFT 0x4
+#define DP_AUX0_AUX_CONTROL__AUX_RESET_DONE__SHIFT 0x5
+#define DP_AUX0_AUX_CONTROL__AUX_LS_READ_EN__SHIFT 0x8
+#define DP_AUX0_AUX_CONTROL__AUX_LS_UPDATE_DISABLE__SHIFT 0xc
+#define DP_AUX0_AUX_CONTROL__AUX_IGNORE_HPD_DISCON__SHIFT 0x10
+#define DP_AUX0_AUX_CONTROL__AUX_MODE_DET_EN__SHIFT 0x12
+#define DP_AUX0_AUX_CONTROL__AUX_HPD_SEL__SHIFT 0x14
+#define DP_AUX0_AUX_CONTROL__AUX_IMPCAL_REQ_EN__SHIFT 0x18
+#define DP_AUX0_AUX_CONTROL__AUX_TEST_MODE__SHIFT 0x1c
+#define DP_AUX0_AUX_CONTROL__AUX_DEGLITCH_EN__SHIFT 0x1d
+#define DP_AUX0_AUX_CONTROL__SPARE_0__SHIFT 0x1e
+#define DP_AUX0_AUX_CONTROL__SPARE_1__SHIFT 0x1f
+#define DP_AUX0_AUX_CONTROL__AUX_EN_MASK 0x00000001L
+#define DP_AUX0_AUX_CONTROL__AUX_RESET_MASK 0x00000010L
+#define DP_AUX0_AUX_CONTROL__AUX_RESET_DONE_MASK 0x00000020L
+#define DP_AUX0_AUX_CONTROL__AUX_LS_READ_EN_MASK 0x00000100L
+#define DP_AUX0_AUX_CONTROL__AUX_LS_UPDATE_DISABLE_MASK 0x00001000L
+#define DP_AUX0_AUX_CONTROL__AUX_IGNORE_HPD_DISCON_MASK 0x00010000L
+#define DP_AUX0_AUX_CONTROL__AUX_MODE_DET_EN_MASK 0x00040000L
+#define DP_AUX0_AUX_CONTROL__AUX_HPD_SEL_MASK 0x00700000L
+#define DP_AUX0_AUX_CONTROL__AUX_IMPCAL_REQ_EN_MASK 0x01000000L
+#define DP_AUX0_AUX_CONTROL__AUX_TEST_MODE_MASK 0x10000000L
+#define DP_AUX0_AUX_CONTROL__AUX_DEGLITCH_EN_MASK 0x20000000L
+#define DP_AUX0_AUX_CONTROL__SPARE_0_MASK 0x40000000L
+#define DP_AUX0_AUX_CONTROL__SPARE_1_MASK 0x80000000L
+//DP_AUX0_AUX_SW_CONTROL
+#define DP_AUX0_AUX_SW_CONTROL__AUX_SW_GO__SHIFT 0x0
+#define DP_AUX0_AUX_SW_CONTROL__AUX_LS_READ_TRIG__SHIFT 0x2
+#define DP_AUX0_AUX_SW_CONTROL__AUX_SW_START_DELAY__SHIFT 0x4
+#define DP_AUX0_AUX_SW_CONTROL__AUX_SW_WR_BYTES__SHIFT 0x10
+#define DP_AUX0_AUX_SW_CONTROL__AUX_SW_GO_MASK 0x00000001L
+#define DP_AUX0_AUX_SW_CONTROL__AUX_LS_READ_TRIG_MASK 0x00000004L
+#define DP_AUX0_AUX_SW_CONTROL__AUX_SW_START_DELAY_MASK 0x000000F0L
+#define DP_AUX0_AUX_SW_CONTROL__AUX_SW_WR_BYTES_MASK 0x001F0000L
+//DP_AUX0_AUX_ARB_CONTROL
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_ARB_PRIORITY__SHIFT 0x0
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_REG_RW_CNTL_STATUS__SHIFT 0x2
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_NO_QUEUED_SW_GO__SHIFT 0x8
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_NO_QUEUED_LS_GO__SHIFT 0xa
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_SW_USE_AUX_REG_REQ__SHIFT 0x10
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_SW_PENDING_USE_AUX_REG_REQ__SHIFT 0x10
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_SW_DONE_USING_AUX_REG__SHIFT 0x11
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_DMCU_USE_AUX_REG_REQ__SHIFT 0x18
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_DMCU_PENDING_USE_AUX_REG_REQ__SHIFT 0x18
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_DMCU_DONE_USING_AUX_REG__SHIFT 0x19
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_ARB_PRIORITY_MASK 0x00000003L
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_REG_RW_CNTL_STATUS_MASK 0x0000000CL
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_NO_QUEUED_SW_GO_MASK 0x00000100L
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_NO_QUEUED_LS_GO_MASK 0x00000400L
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_SW_USE_AUX_REG_REQ_MASK 0x00010000L
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_SW_PENDING_USE_AUX_REG_REQ_MASK 0x00010000L
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_SW_DONE_USING_AUX_REG_MASK 0x00020000L
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_DMCU_USE_AUX_REG_REQ_MASK 0x01000000L
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_DMCU_PENDING_USE_AUX_REG_REQ_MASK 0x01000000L
+#define DP_AUX0_AUX_ARB_CONTROL__AUX_DMCU_DONE_USING_AUX_REG_MASK 0x02000000L
+//DP_AUX0_AUX_INTERRUPT_CONTROL
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_INT__SHIFT 0x0
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_ACK__SHIFT 0x1
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_MASK__SHIFT 0x2
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_INT__SHIFT 0x4
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_ACK__SHIFT 0x5
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_MASK__SHIFT 0x6
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_INT_MASK 0x00000001L
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_ACK_MASK 0x00000002L
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_MASK_MASK 0x00000004L
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_INT_MASK 0x00000010L
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_ACK_MASK 0x00000020L
+#define DP_AUX0_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_MASK_MASK 0x00000040L
+//DP_AUX0_AUX_SW_STATUS
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_DONE__SHIFT 0x0
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_REQ__SHIFT 0x1
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE__SHIFT 0x4
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT__SHIFT 0x7
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_OVERFLOW__SHIFT 0x8
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_HPD_DISCON__SHIFT 0x9
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_PARTIAL_BYTE__SHIFT 0xa
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_NON_AUX_MODE__SHIFT 0xb
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_MIN_COUNT_VIOL__SHIFT 0xc
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP__SHIFT 0xe
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_L__SHIFT 0x11
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_H__SHIFT 0x12
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_INVALID_START__SHIFT 0x13
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET__SHIFT 0x14
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H__SHIFT 0x16
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L__SHIFT 0x17
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_REPLY_BYTE_COUNT__SHIFT 0x18
+#define DP_AUX0_AUX_SW_STATUS__AUX_ARB_STATUS__SHIFT 0x1d
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_DONE_MASK 0x00000001L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_REQ_MASK 0x00000002L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE_MASK 0x00000070L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_MASK 0x00000080L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_OVERFLOW_MASK 0x00000100L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK 0x00000200L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_PARTIAL_BYTE_MASK 0x00000400L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_NON_AUX_MODE_MASK 0x00000800L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_MIN_COUNT_VIOL_MASK 0x00001000L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP_MASK 0x00004000L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_L_MASK 0x00020000L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_H_MASK 0x00040000L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_INVALID_START_MASK 0x00080000L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET_MASK 0x00100000L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H_MASK 0x00400000L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L_MASK 0x00800000L
+#define DP_AUX0_AUX_SW_STATUS__AUX_SW_REPLY_BYTE_COUNT_MASK 0x1F000000L
+#define DP_AUX0_AUX_SW_STATUS__AUX_ARB_STATUS_MASK 0xE0000000L
+//DP_AUX0_AUX_LS_STATUS
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_DONE__SHIFT 0x0
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_REQ__SHIFT 0x1
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_STATE__SHIFT 0x4
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT__SHIFT 0x7
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_OVERFLOW__SHIFT 0x8
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_HPD_DISCON__SHIFT 0x9
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_PARTIAL_BYTE__SHIFT 0xa
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_NON_AUX_MODE__SHIFT 0xb
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_MIN_COUNT_VIOL__SHIFT 0xc
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_INVALID_STOP__SHIFT 0xe
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_L__SHIFT 0x11
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_H__SHIFT 0x12
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_INVALID_START__SHIFT 0x13
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_RECV_NO_DET__SHIFT 0x14
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_H__SHIFT 0x16
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_L__SHIFT 0x17
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_REPLY_BYTE_COUNT__SHIFT 0x18
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_CP_IRQ__SHIFT 0x1d
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_UPDATED__SHIFT 0x1e
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_UPDATED_ACK__SHIFT 0x1f
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_DONE_MASK 0x00000001L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_REQ_MASK 0x00000002L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_STATE_MASK 0x00000070L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_MASK 0x00000080L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_OVERFLOW_MASK 0x00000100L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_HPD_DISCON_MASK 0x00000200L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_PARTIAL_BYTE_MASK 0x00000400L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_NON_AUX_MODE_MASK 0x00000800L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_MIN_COUNT_VIOL_MASK 0x00001000L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_INVALID_STOP_MASK 0x00004000L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_L_MASK 0x00020000L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_H_MASK 0x00040000L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_INVALID_START_MASK 0x00080000L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_RECV_NO_DET_MASK 0x00100000L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_H_MASK 0x00400000L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_L_MASK 0x00800000L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_REPLY_BYTE_COUNT_MASK 0x1F000000L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_CP_IRQ_MASK 0x20000000L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_UPDATED_MASK 0x40000000L
+#define DP_AUX0_AUX_LS_STATUS__AUX_LS_UPDATED_ACK_MASK 0x80000000L
+//DP_AUX0_AUX_SW_DATA
+#define DP_AUX0_AUX_SW_DATA__AUX_SW_DATA_RW__SHIFT 0x0
+#define DP_AUX0_AUX_SW_DATA__AUX_SW_DATA__SHIFT 0x8
+#define DP_AUX0_AUX_SW_DATA__AUX_SW_INDEX__SHIFT 0x10
+#define DP_AUX0_AUX_SW_DATA__AUX_SW_AUTOINCREMENT_DISABLE__SHIFT 0x1f
+#define DP_AUX0_AUX_SW_DATA__AUX_SW_DATA_RW_MASK 0x00000001L
+#define DP_AUX0_AUX_SW_DATA__AUX_SW_DATA_MASK 0x0000FF00L
+#define DP_AUX0_AUX_SW_DATA__AUX_SW_INDEX_MASK 0x001F0000L
+#define DP_AUX0_AUX_SW_DATA__AUX_SW_AUTOINCREMENT_DISABLE_MASK 0x80000000L
+//DP_AUX0_AUX_LS_DATA
+#define DP_AUX0_AUX_LS_DATA__AUX_LS_DATA__SHIFT 0x8
+#define DP_AUX0_AUX_LS_DATA__AUX_LS_INDEX__SHIFT 0x10
+#define DP_AUX0_AUX_LS_DATA__AUX_LS_DATA_MASK 0x0000FF00L
+#define DP_AUX0_AUX_LS_DATA__AUX_LS_INDEX_MASK 0x001F0000L
+//DP_AUX0_AUX_DPHY_TX_REF_CONTROL
+#define DP_AUX0_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_SEL__SHIFT 0x0
+#define DP_AUX0_AUX_DPHY_TX_REF_CONTROL__AUX_TX_RATE__SHIFT 0x4
+#define DP_AUX0_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_DIV__SHIFT 0x10
+#define DP_AUX0_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_SEL_MASK 0x00000001L
+#define DP_AUX0_AUX_DPHY_TX_REF_CONTROL__AUX_TX_RATE_MASK 0x00000030L
+#define DP_AUX0_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_DIV_MASK 0x01FF0000L
+//DP_AUX0_AUX_DPHY_TX_CONTROL
+#define DP_AUX0_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN__SHIFT 0x0
+#define DP_AUX0_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MUL__SHIFT 0x4
+#define DP_AUX0_AUX_DPHY_TX_CONTROL__AUX_TX_OE_ASSERT_TIME__SHIFT 0x6
+#define DP_AUX0_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_SYMBOLS__SHIFT 0x8
+#define DP_AUX0_AUX_DPHY_TX_CONTROL__AUX_MODE_DET_CHECK_DELAY__SHIFT 0x10
+#define DP_AUX0_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MASK 0x0000000FL
+#define DP_AUX0_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MUL_MASK 0x00000030L
+#define DP_AUX0_AUX_DPHY_TX_CONTROL__AUX_TX_OE_ASSERT_TIME_MASK 0x00000040L
+#define DP_AUX0_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_SYMBOLS_MASK 0x00003F00L
+#define DP_AUX0_AUX_DPHY_TX_CONTROL__AUX_MODE_DET_CHECK_DELAY_MASK 0x00070000L
+//DP_AUX0_AUX_DPHY_RX_CONTROL0
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_START_WINDOW__SHIFT 0x4
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_RECEIVE_WINDOW__SHIFT 0x8
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_HALF_SYM_DETECT_LEN__SHIFT 0xc
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_TRANSITION_FILTER_EN__SHIFT 0x10
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_PHASE_DETECT__SHIFT 0x11
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_START__SHIFT 0x12
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_STOP__SHIFT 0x13
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_PHASE_DETECT_LEN__SHIFT 0x14
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_DETECTION_THRESHOLD__SHIFT 0x1c
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_START_WINDOW_MASK 0x00000070L
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_RECEIVE_WINDOW_MASK 0x00000700L
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_HALF_SYM_DETECT_LEN_MASK 0x00003000L
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_TRANSITION_FILTER_EN_MASK 0x00010000L
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_PHASE_DETECT_MASK 0x00020000L
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_START_MASK 0x00040000L
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_STOP_MASK 0x00080000L
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_PHASE_DETECT_LEN_MASK 0x00300000L
+#define DP_AUX0_AUX_DPHY_RX_CONTROL0__AUX_RX_DETECTION_THRESHOLD_MASK 0x70000000L
+//DP_AUX0_AUX_DPHY_RX_CONTROL1
+#define DP_AUX0_AUX_DPHY_RX_CONTROL1__AUX_RX_PRECHARGE_SKIP__SHIFT 0x0
+#define DP_AUX0_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN__SHIFT 0x8
+#define DP_AUX0_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MUL__SHIFT 0xf
+#define DP_AUX0_AUX_DPHY_RX_CONTROL1__AUX_RX_PRECHARGE_SKIP_MASK 0x000000FFL
+#define DP_AUX0_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MASK 0x00007F00L
+#define DP_AUX0_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MUL_MASK 0x00018000L
+//DP_AUX0_AUX_DPHY_TX_STATUS
+#define DP_AUX0_AUX_DPHY_TX_STATUS__AUX_TX_ACTIVE__SHIFT 0x0
+#define DP_AUX0_AUX_DPHY_TX_STATUS__AUX_TX_STATE__SHIFT 0x4
+#define DP_AUX0_AUX_DPHY_TX_STATUS__AUX_TX_HALF_SYM_PERIOD__SHIFT 0x10
+#define DP_AUX0_AUX_DPHY_TX_STATUS__AUX_TX_ACTIVE_MASK 0x00000001L
+#define DP_AUX0_AUX_DPHY_TX_STATUS__AUX_TX_STATE_MASK 0x00000070L
+#define DP_AUX0_AUX_DPHY_TX_STATUS__AUX_TX_HALF_SYM_PERIOD_MASK 0x01FF0000L
+//DP_AUX0_AUX_DPHY_RX_STATUS
+#define DP_AUX0_AUX_DPHY_RX_STATUS__AUX_RX_STATE__SHIFT 0x0
+#define DP_AUX0_AUX_DPHY_RX_STATUS__AUX_RX_SYNC_VALID_COUNT__SHIFT 0x8
+#define DP_AUX0_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_FRACT__SHIFT 0x10
+#define DP_AUX0_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD__SHIFT 0x15
+#define DP_AUX0_AUX_DPHY_RX_STATUS__AUX_RX_STATE_MASK 0x00000007L
+#define DP_AUX0_AUX_DPHY_RX_STATUS__AUX_RX_SYNC_VALID_COUNT_MASK 0x00001F00L
+#define DP_AUX0_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_FRACT_MASK 0x001F0000L
+#define DP_AUX0_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_MASK 0x3FE00000L
+
+// addressBlock: dce_dc_dio_dp_aux1_dispdec
+//DP_AUX1_AUX_CONTROL
+#define DP_AUX1_AUX_CONTROL__AUX_EN__SHIFT 0x0
+#define DP_AUX1_AUX_CONTROL__AUX_RESET__SHIFT 0x4
+#define DP_AUX1_AUX_CONTROL__AUX_RESET_DONE__SHIFT 0x5
+#define DP_AUX1_AUX_CONTROL__AUX_LS_READ_EN__SHIFT 0x8
+#define DP_AUX1_AUX_CONTROL__AUX_LS_UPDATE_DISABLE__SHIFT 0xc
+#define DP_AUX1_AUX_CONTROL__AUX_IGNORE_HPD_DISCON__SHIFT 0x10
+#define DP_AUX1_AUX_CONTROL__AUX_MODE_DET_EN__SHIFT 0x12
+#define DP_AUX1_AUX_CONTROL__AUX_HPD_SEL__SHIFT 0x14
+#define DP_AUX1_AUX_CONTROL__AUX_IMPCAL_REQ_EN__SHIFT 0x18
+#define DP_AUX1_AUX_CONTROL__AUX_TEST_MODE__SHIFT 0x1c
+#define DP_AUX1_AUX_CONTROL__AUX_DEGLITCH_EN__SHIFT 0x1d
+#define DP_AUX1_AUX_CONTROL__SPARE_0__SHIFT 0x1e
+#define DP_AUX1_AUX_CONTROL__SPARE_1__SHIFT 0x1f
+#define DP_AUX1_AUX_CONTROL__AUX_EN_MASK 0x00000001L
+#define DP_AUX1_AUX_CONTROL__AUX_RESET_MASK 0x00000010L
+#define DP_AUX1_AUX_CONTROL__AUX_RESET_DONE_MASK 0x00000020L
+#define DP_AUX1_AUX_CONTROL__AUX_LS_READ_EN_MASK 0x00000100L
+#define DP_AUX1_AUX_CONTROL__AUX_LS_UPDATE_DISABLE_MASK 0x00001000L
+#define DP_AUX1_AUX_CONTROL__AUX_IGNORE_HPD_DISCON_MASK 0x00010000L
+#define DP_AUX1_AUX_CONTROL__AUX_MODE_DET_EN_MASK 0x00040000L
+#define DP_AUX1_AUX_CONTROL__AUX_HPD_SEL_MASK 0x00700000L
+#define DP_AUX1_AUX_CONTROL__AUX_IMPCAL_REQ_EN_MASK 0x01000000L
+#define DP_AUX1_AUX_CONTROL__AUX_TEST_MODE_MASK 0x10000000L
+#define DP_AUX1_AUX_CONTROL__AUX_DEGLITCH_EN_MASK 0x20000000L
+#define DP_AUX1_AUX_CONTROL__SPARE_0_MASK 0x40000000L
+#define DP_AUX1_AUX_CONTROL__SPARE_1_MASK 0x80000000L
+//DP_AUX1_AUX_SW_CONTROL
+#define DP_AUX1_AUX_SW_CONTROL__AUX_SW_GO__SHIFT 0x0
+#define DP_AUX1_AUX_SW_CONTROL__AUX_LS_READ_TRIG__SHIFT 0x2
+#define DP_AUX1_AUX_SW_CONTROL__AUX_SW_START_DELAY__SHIFT 0x4
+#define DP_AUX1_AUX_SW_CONTROL__AUX_SW_WR_BYTES__SHIFT 0x10
+#define DP_AUX1_AUX_SW_CONTROL__AUX_SW_GO_MASK 0x00000001L
+#define DP_AUX1_AUX_SW_CONTROL__AUX_LS_READ_TRIG_MASK 0x00000004L
+#define DP_AUX1_AUX_SW_CONTROL__AUX_SW_START_DELAY_MASK 0x000000F0L
+#define DP_AUX1_AUX_SW_CONTROL__AUX_SW_WR_BYTES_MASK 0x001F0000L
+//DP_AUX1_AUX_ARB_CONTROL
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_ARB_PRIORITY__SHIFT 0x0
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_REG_RW_CNTL_STATUS__SHIFT 0x2
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_NO_QUEUED_SW_GO__SHIFT 0x8
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_NO_QUEUED_LS_GO__SHIFT 0xa
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_SW_USE_AUX_REG_REQ__SHIFT 0x10
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_SW_PENDING_USE_AUX_REG_REQ__SHIFT 0x10
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_SW_DONE_USING_AUX_REG__SHIFT 0x11
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_DMCU_USE_AUX_REG_REQ__SHIFT 0x18
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_DMCU_PENDING_USE_AUX_REG_REQ__SHIFT 0x18
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_DMCU_DONE_USING_AUX_REG__SHIFT 0x19
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_ARB_PRIORITY_MASK 0x00000003L
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_REG_RW_CNTL_STATUS_MASK 0x0000000CL
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_NO_QUEUED_SW_GO_MASK 0x00000100L
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_NO_QUEUED_LS_GO_MASK 0x00000400L
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_SW_USE_AUX_REG_REQ_MASK 0x00010000L
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_SW_PENDING_USE_AUX_REG_REQ_MASK 0x00010000L
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_SW_DONE_USING_AUX_REG_MASK 0x00020000L
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_DMCU_USE_AUX_REG_REQ_MASK 0x01000000L
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_DMCU_PENDING_USE_AUX_REG_REQ_MASK 0x01000000L
+#define DP_AUX1_AUX_ARB_CONTROL__AUX_DMCU_DONE_USING_AUX_REG_MASK 0x02000000L
+//DP_AUX1_AUX_INTERRUPT_CONTROL
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_INT__SHIFT 0x0
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_ACK__SHIFT 0x1
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_MASK__SHIFT 0x2
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_INT__SHIFT 0x4
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_ACK__SHIFT 0x5
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_MASK__SHIFT 0x6
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_INT_MASK 0x00000001L
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_ACK_MASK 0x00000002L
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_SW_DONE_MASK_MASK 0x00000004L
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_INT_MASK 0x00000010L
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_ACK_MASK 0x00000020L
+#define DP_AUX1_AUX_INTERRUPT_CONTROL__AUX_LS_DONE_MASK_MASK 0x00000040L
+//DP_AUX1_AUX_SW_STATUS
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_DONE__SHIFT 0x0
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_REQ__SHIFT 0x1
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE__SHIFT 0x4
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT__SHIFT 0x7
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_OVERFLOW__SHIFT 0x8
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_HPD_DISCON__SHIFT 0x9
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_PARTIAL_BYTE__SHIFT 0xa
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_NON_AUX_MODE__SHIFT 0xb
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_MIN_COUNT_VIOL__SHIFT 0xc
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP__SHIFT 0xe
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_L__SHIFT 0x11
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_H__SHIFT 0x12
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_INVALID_START__SHIFT 0x13
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET__SHIFT 0x14
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H__SHIFT 0x16
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L__SHIFT 0x17
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_REPLY_BYTE_COUNT__SHIFT 0x18
+#define DP_AUX1_AUX_SW_STATUS__AUX_ARB_STATUS__SHIFT 0x1d
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_DONE_MASK 0x00000001L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_REQ_MASK 0x00000002L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE_MASK 0x00000070L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_MASK 0x00000080L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_OVERFLOW_MASK 0x00000100L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK 0x00000200L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_PARTIAL_BYTE_MASK 0x00000400L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_NON_AUX_MODE_MASK 0x00000800L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_MIN_COUNT_VIOL_MASK 0x00001000L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP_MASK 0x00004000L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_L_MASK 0x00020000L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_SYNC_INVALID_H_MASK 0x00040000L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_INVALID_START_MASK 0x00080000L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET_MASK 0x00100000L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H_MASK 0x00400000L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L_MASK 0x00800000L
+#define DP_AUX1_AUX_SW_STATUS__AUX_SW_REPLY_BYTE_COUNT_MASK 0x1F000000L
+#define DP_AUX1_AUX_SW_STATUS__AUX_ARB_STATUS_MASK 0xE0000000L
+//DP_AUX1_AUX_LS_STATUS
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_DONE__SHIFT 0x0
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_REQ__SHIFT 0x1
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_STATE__SHIFT 0x4
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT__SHIFT 0x7
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_OVERFLOW__SHIFT 0x8
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_HPD_DISCON__SHIFT 0x9
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_PARTIAL_BYTE__SHIFT 0xa
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_NON_AUX_MODE__SHIFT 0xb
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_MIN_COUNT_VIOL__SHIFT 0xc
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_INVALID_STOP__SHIFT 0xe
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_L__SHIFT 0x11
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_H__SHIFT 0x12
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_INVALID_START__SHIFT 0x13
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_RECV_NO_DET__SHIFT 0x14
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_H__SHIFT 0x16
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_L__SHIFT 0x17
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_REPLY_BYTE_COUNT__SHIFT 0x18
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_CP_IRQ__SHIFT 0x1d
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_UPDATED__SHIFT 0x1e
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_UPDATED_ACK__SHIFT 0x1f
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_DONE_MASK 0x00000001L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_REQ_MASK 0x00000002L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_STATE_MASK 0x00000070L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_TIMEOUT_MASK 0x00000080L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_OVERFLOW_MASK 0x00000100L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_HPD_DISCON_MASK 0x00000200L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_PARTIAL_BYTE_MASK 0x00000400L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_NON_AUX_MODE_MASK 0x00000800L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_MIN_COUNT_VIOL_MASK 0x00001000L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_INVALID_STOP_MASK 0x00004000L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_L_MASK 0x00020000L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_SYNC_INVALID_H_MASK 0x00040000L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_INVALID_START_MASK 0x00080000L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_RECV_NO_DET_MASK 0x00100000L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_H_MASK 0x00400000L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_RX_RECV_INVALID_L_MASK 0x00800000L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_REPLY_BYTE_COUNT_MASK 0x1F000000L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_CP_IRQ_MASK 0x20000000L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_UPDATED_MASK 0x40000000L
+#define DP_AUX1_AUX_LS_STATUS__AUX_LS_UPDATED_ACK_MASK 0x80000000L
+//DP_AUX1_AUX_SW_DATA
+#define DP_AUX1_AUX_SW_DATA__AUX_SW_DATA_RW__SHIFT 0x0
+#define DP_AUX1_AUX_SW_DATA__AUX_SW_DATA__SHIFT 0x8
+#define DP_AUX1_AUX_SW_DATA__AUX_SW_INDEX__SHIFT 0x10
+#define DP_AUX1_AUX_SW_DATA__AUX_SW_AUTOINCREMENT_DISABLE__SHIFT 0x1f
+#define DP_AUX1_AUX_SW_DATA__AUX_SW_DATA_RW_MASK 0x00000001L
+#define DP_AUX1_AUX_SW_DATA__AUX_SW_DATA_MASK 0x0000FF00L
+#define DP_AUX1_AUX_SW_DATA__AUX_SW_INDEX_MASK 0x001F0000L
+#define DP_AUX1_AUX_SW_DATA__AUX_SW_AUTOINCREMENT_DISABLE_MASK 0x80000000L
+//DP_AUX1_AUX_LS_DATA
+#define DP_AUX1_AUX_LS_DATA__AUX_LS_DATA__SHIFT 0x8
+#define DP_AUX1_AUX_LS_DATA__AUX_LS_INDEX__SHIFT 0x10
+#define DP_AUX1_AUX_LS_DATA__AUX_LS_DATA_MASK 0x0000FF00L
+#define DP_AUX1_AUX_LS_DATA__AUX_LS_INDEX_MASK 0x001F0000L
+//DP_AUX1_AUX_DPHY_TX_REF_CONTROL
+#define DP_AUX1_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_SEL__SHIFT 0x0
+#define DP_AUX1_AUX_DPHY_TX_REF_CONTROL__AUX_TX_RATE__SHIFT 0x4
+#define DP_AUX1_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_DIV__SHIFT 0x10
+#define DP_AUX1_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_SEL_MASK 0x00000001L
+#define DP_AUX1_AUX_DPHY_TX_REF_CONTROL__AUX_TX_RATE_MASK 0x00000030L
+#define DP_AUX1_AUX_DPHY_TX_REF_CONTROL__AUX_TX_REF_DIV_MASK 0x01FF0000L
+//DP_AUX1_AUX_DPHY_TX_CONTROL
+#define DP_AUX1_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN__SHIFT 0x0
+#define DP_AUX1_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MUL__SHIFT 0x4
+#define DP_AUX1_AUX_DPHY_TX_CONTROL__AUX_TX_OE_ASSERT_TIME__SHIFT 0x6
+#define DP_AUX1_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_SYMBOLS__SHIFT 0x8
+#define DP_AUX1_AUX_DPHY_TX_CONTROL__AUX_MODE_DET_CHECK_DELAY__SHIFT 0x10
+#define DP_AUX1_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MASK 0x0000000FL
+#define DP_AUX1_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_LEN_MUL_MASK 0x00000030L
+#define DP_AUX1_AUX_DPHY_TX_CONTROL__AUX_TX_OE_ASSERT_TIME_MASK 0x00000040L
+#define DP_AUX1_AUX_DPHY_TX_CONTROL__AUX_TX_PRECHARGE_SYMBOLS_MASK 0x00003F00L
+#define DP_AUX1_AUX_DPHY_TX_CONTROL__AUX_MODE_DET_CHECK_DELAY_MASK 0x00070000L
+//DP_AUX1_AUX_DPHY_RX_CONTROL0
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_START_WINDOW__SHIFT 0x4
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_RECEIVE_WINDOW__SHIFT 0x8
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_HALF_SYM_DETECT_LEN__SHIFT 0xc
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_TRANSITION_FILTER_EN__SHIFT 0x10
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_PHASE_DETECT__SHIFT 0x11
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_START__SHIFT 0x12
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_STOP__SHIFT 0x13
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_PHASE_DETECT_LEN__SHIFT 0x14
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_DETECTION_THRESHOLD__SHIFT 0x1c
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_START_WINDOW_MASK 0x00000070L
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_RECEIVE_WINDOW_MASK 0x00000700L
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_HALF_SYM_DETECT_LEN_MASK 0x00003000L
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_TRANSITION_FILTER_EN_MASK 0x00010000L
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_PHASE_DETECT_MASK 0x00020000L
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_START_MASK 0x00040000L
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_ALLOW_BELOW_THRESHOLD_STOP_MASK 0x00080000L
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_PHASE_DETECT_LEN_MASK 0x00300000L
+#define DP_AUX1_AUX_DPHY_RX_CONTROL0__AUX_RX_DETECTION_THRESHOLD_MASK 0x70000000L
+//DP_AUX1_AUX_DPHY_RX_CONTROL1
+#define DP_AUX1_AUX_DPHY_RX_CONTROL1__AUX_RX_PRECHARGE_SKIP__SHIFT 0x0
+#define DP_AUX1_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN__SHIFT 0x8
+#define DP_AUX1_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MUL__SHIFT 0xf
+#define DP_AUX1_AUX_DPHY_RX_CONTROL1__AUX_RX_PRECHARGE_SKIP_MASK 0x000000FFL
+#define DP_AUX1_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MASK 0x00007F00L
+#define DP_AUX1_AUX_DPHY_RX_CONTROL1__AUX_RX_TIMEOUT_LEN_MUL_MASK 0x00018000L
+//DP_AUX1_AUX_DPHY_TX_STATUS
+#define DP_AUX1_AUX_DPHY_TX_STATUS__AUX_TX_ACTIVE__SHIFT 0x0
+#define DP_AUX1_AUX_DPHY_TX_STATUS__AUX_TX_STATE__SHIFT 0x4
+#define DP_AUX1_AUX_DPHY_TX_STATUS__AUX_TX_HALF_SYM_PERIOD__SHIFT 0x10
+#define DP_AUX1_AUX_DPHY_TX_STATUS__AUX_TX_ACTIVE_MASK 0x00000001L
+#define DP_AUX1_AUX_DPHY_TX_STATUS__AUX_TX_STATE_MASK 0x00000070L
+#define DP_AUX1_AUX_DPHY_TX_STATUS__AUX_TX_HALF_SYM_PERIOD_MASK 0x01FF0000L
+//DP_AUX1_AUX_DPHY_RX_STATUS
+#define DP_AUX1_AUX_DPHY_RX_STATUS__AUX_RX_STATE__SHIFT 0x0
+#define DP_AUX1_AUX_DPHY_RX_STATUS__AUX_RX_SYNC_VALID_COUNT__SHIFT 0x8
+#define DP_AUX1_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_FRACT__SHIFT 0x10
+#define DP_AUX1_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD__SHIFT 0x15
+#define DP_AUX1_AUX_DPHY_RX_STATUS__AUX_RX_STATE_MASK 0x00000007L
+#define DP_AUX1_AUX_DPHY_RX_STATUS__AUX_RX_SYNC_VALID_COUNT_MASK 0x00001F00L
+#define DP_AUX1_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_FRACT_MASK 0x001F0000L
+#define DP_AUX1_AUX_DPHY_RX_STATUS__AUX_RX_HALF_SYM_PERIOD_MASK 0x3FE00000L
+
+
+// addressBlock: dce_dc_dio_dig0_dispdec
+//DIG0_DIG_FE_CNTL
+#define DIG0_DIG_FE_CNTL__DIG_SOURCE_SELECT__SHIFT 0x0
+#define DIG0_DIG_FE_CNTL__DIG_STEREOSYNC_SELECT__SHIFT 0x4
+#define DIG0_DIG_FE_CNTL__DIG_STEREOSYNC_GATE_EN__SHIFT 0x8
+#define DIG0_DIG_FE_CNTL__DIG_START__SHIFT 0xa
+#define DIG0_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_SELECT__SHIFT 0xc
+#define DIG0_DIG_FE_CNTL__DIG_INPUT_PIXEL_SELECT__SHIFT 0x10
+#define DIG0_DIG_FE_CNTL__DOLBY_VISION_EN__SHIFT 0x12
+#define DIG0_DIG_FE_CNTL__DOLBY_VISION_METADATA_PACKET_MISSED__SHIFT 0x13
+#define DIG0_DIG_FE_CNTL__DIG_SYMCLK_FE_ON__SHIFT 0x18
+#define DIG0_DIG_FE_CNTL__TMDS_PIXEL_ENCODING__SHIFT 0x1c
+#define DIG0_DIG_FE_CNTL__TMDS_COLOR_FORMAT__SHIFT 0x1e
+#define DIG0_DIG_FE_CNTL__DIG_SOURCE_SELECT_MASK 0x00000007L
+#define DIG0_DIG_FE_CNTL__DIG_STEREOSYNC_SELECT_MASK 0x00000070L
+#define DIG0_DIG_FE_CNTL__DIG_STEREOSYNC_GATE_EN_MASK 0x00000100L
+#define DIG0_DIG_FE_CNTL__DIG_START_MASK 0x00000400L
+#define DIG0_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_SELECT_MASK 0x00007000L
+#define DIG0_DIG_FE_CNTL__DIG_INPUT_PIXEL_SELECT_MASK 0x00030000L
+#define DIG0_DIG_FE_CNTL__DOLBY_VISION_EN_MASK 0x00040000L
+#define DIG0_DIG_FE_CNTL__DOLBY_VISION_METADATA_PACKET_MISSED_MASK 0x00080000L
+#define DIG0_DIG_FE_CNTL__DIG_SYMCLK_FE_ON_MASK 0x01000000L
+#define DIG0_DIG_FE_CNTL__TMDS_PIXEL_ENCODING_MASK 0x10000000L
+#define DIG0_DIG_FE_CNTL__TMDS_COLOR_FORMAT_MASK 0xC0000000L
+//DIG0_DIG_OUTPUT_CRC_CNTL
+#define DIG0_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_EN__SHIFT 0x0
+#define DIG0_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_LINK_SEL__SHIFT 0x4
+#define DIG0_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_DATA_SEL__SHIFT 0x8
+#define DIG0_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_EN_MASK 0x00000001L
+#define DIG0_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_LINK_SEL_MASK 0x00000010L
+#define DIG0_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_DATA_SEL_MASK 0x00000300L
+//DIG0_DIG_OUTPUT_CRC_RESULT
+#define DIG0_DIG_OUTPUT_CRC_RESULT__DIG_OUTPUT_CRC_RESULT__SHIFT 0x0
+#define DIG0_DIG_OUTPUT_CRC_RESULT__DIG_OUTPUT_CRC_RESULT_MASK 0x3FFFFFFFL
+//DIG0_DIG_CLOCK_PATTERN
+#define DIG0_DIG_CLOCK_PATTERN__DIG_CLOCK_PATTERN__SHIFT 0x0
+#define DIG0_DIG_CLOCK_PATTERN__DIG_CLOCK_PATTERN_MASK 0x000003FFL
+//DIG0_DIG_TEST_PATTERN
+#define DIG0_DIG_TEST_PATTERN__DIG_TEST_PATTERN_OUT_EN__SHIFT 0x0
+#define DIG0_DIG_TEST_PATTERN__DIG_HALF_CLOCK_PATTERN_SEL__SHIFT 0x1
+#define DIG0_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_OUT_EN__SHIFT 0x4
+#define DIG0_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_RESET__SHIFT 0x5
+#define DIG0_DIG_TEST_PATTERN__DIG_TEST_PATTERN_EXTERNAL_RESET_EN__SHIFT 0x6
+#define DIG0_DIG_TEST_PATTERN__DIG_STATIC_TEST_PATTERN__SHIFT 0x10
+#define DIG0_DIG_TEST_PATTERN__DIG_TEST_PATTERN_OUT_EN_MASK 0x00000001L
+#define DIG0_DIG_TEST_PATTERN__DIG_HALF_CLOCK_PATTERN_SEL_MASK 0x00000002L
+#define DIG0_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_OUT_EN_MASK 0x00000010L
+#define DIG0_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_RESET_MASK 0x00000020L
+#define DIG0_DIG_TEST_PATTERN__DIG_TEST_PATTERN_EXTERNAL_RESET_EN_MASK 0x00000040L
+#define DIG0_DIG_TEST_PATTERN__DIG_STATIC_TEST_PATTERN_MASK 0x03FF0000L
+//DIG0_DIG_RANDOM_PATTERN_SEED
+#define DIG0_DIG_RANDOM_PATTERN_SEED__DIG_RANDOM_PATTERN_SEED__SHIFT 0x0
+#define DIG0_DIG_RANDOM_PATTERN_SEED__DIG_RAN_PAT_DURING_DE_ONLY__SHIFT 0x18
+#define DIG0_DIG_RANDOM_PATTERN_SEED__DIG_RANDOM_PATTERN_SEED_MASK 0x00FFFFFFL
+#define DIG0_DIG_RANDOM_PATTERN_SEED__DIG_RAN_PAT_DURING_DE_ONLY_MASK 0x01000000L
+//DIG0_DIG_FIFO_STATUS
+#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_LEVEL_ERROR__SHIFT 0x0
+#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_USE_OVERWRITE_LEVEL__SHIFT 0x1
+#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_OVERWRITE_LEVEL__SHIFT 0x2
+#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_ERROR_ACK__SHIFT 0x8
+#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_CAL_AVERAGE_LEVEL__SHIFT 0xa
+#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_MAXIMUM_LEVEL__SHIFT 0x10
+#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_MINIMUM_LEVEL__SHIFT 0x16
+#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_READ_CLOCK_SRC__SHIFT 0x1a
+#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_CALIBRATED__SHIFT 0x1d
+#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_FORCE_RECAL_AVERAGE__SHIFT 0x1e
+#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_FORCE_RECOMP_MINMAX__SHIFT 0x1f
+#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_LEVEL_ERROR_MASK 0x00000001L
+#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_USE_OVERWRITE_LEVEL_MASK 0x00000002L
+#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_OVERWRITE_LEVEL_MASK 0x000000FCL
+#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_ERROR_ACK_MASK 0x00000100L
+#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_CAL_AVERAGE_LEVEL_MASK 0x0000FC00L
+#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_MAXIMUM_LEVEL_MASK 0x001F0000L
+#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_MINIMUM_LEVEL_MASK 0x03C00000L
+#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_READ_CLOCK_SRC_MASK 0x04000000L
+#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_CALIBRATED_MASK 0x20000000L
+#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_FORCE_RECAL_AVERAGE_MASK 0x40000000L
+#define DIG0_DIG_FIFO_STATUS__DIG_FIFO_FORCE_RECOMP_MINMAX_MASK 0x80000000L
+//DIG0_HDMI_METADATA_PACKET_CONTROL
+#define DIG0_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_ENABLE__SHIFT 0x0
+#define DIG0_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_REFERENCE__SHIFT 0x4
+#define DIG0_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_MISSED__SHIFT 0x8
+#define DIG0_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE__SHIFT 0x10
+#define DIG0_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_ENABLE_MASK 0x00000001L
+#define DIG0_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_REFERENCE_MASK 0x00000010L
+#define DIG0_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_MISSED_MASK 0x00000100L
+#define DIG0_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_MASK 0xFFFF0000L
+//DIG0_HDMI_GENERIC_PACKET_CONTROL4
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC6_LINE__SHIFT 0x0
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC7_LINE__SHIFT 0x10
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC6_LINE_MASK 0x0000FFFFL
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC7_LINE_MASK 0xFFFF0000L
+//DIG0_HDMI_CONTROL
+#define DIG0_HDMI_CONTROL__HDMI_KEEPOUT_MODE__SHIFT 0x0
+#define DIG0_HDMI_CONTROL__HDMI_DATA_SCRAMBLE_EN__SHIFT 0x1
+#define DIG0_HDMI_CONTROL__HDMI_CLOCK_CHANNEL_RATE__SHIFT 0x2
+#define DIG0_HDMI_CONTROL__HDMI_NO_EXTRA_NULL_PACKET_FILLED__SHIFT 0x3
+#define DIG0_HDMI_CONTROL__HDMI_PACKET_GEN_VERSION__SHIFT 0x4
+#define DIG0_HDMI_CONTROL__HDMI_ERROR_ACK__SHIFT 0x8
+#define DIG0_HDMI_CONTROL__HDMI_ERROR_MASK__SHIFT 0x9
+#define DIG0_HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE__SHIFT 0x18
+#define DIG0_HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT 0x1c
+#define DIG0_HDMI_CONTROL__HDMI_KEEPOUT_MODE_MASK 0x00000001L
+#define DIG0_HDMI_CONTROL__HDMI_DATA_SCRAMBLE_EN_MASK 0x00000002L
+#define DIG0_HDMI_CONTROL__HDMI_CLOCK_CHANNEL_RATE_MASK 0x00000004L
+#define DIG0_HDMI_CONTROL__HDMI_NO_EXTRA_NULL_PACKET_FILLED_MASK 0x00000008L
+#define DIG0_HDMI_CONTROL__HDMI_PACKET_GEN_VERSION_MASK 0x00000010L
+#define DIG0_HDMI_CONTROL__HDMI_ERROR_ACK_MASK 0x00000100L
+#define DIG0_HDMI_CONTROL__HDMI_ERROR_MASK_MASK 0x00000200L
+#define DIG0_HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK 0x01000000L
+#define DIG0_HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH_MASK 0x30000000L
+//DIG0_HDMI_STATUS
+#define DIG0_HDMI_STATUS__HDMI_ACTIVE_AVMUTE__SHIFT 0x0
+#define DIG0_HDMI_STATUS__HDMI_AUDIO_PACKET_ERROR__SHIFT 0x10
+#define DIG0_HDMI_STATUS__HDMI_VBI_PACKET_ERROR__SHIFT 0x14
+#define DIG0_HDMI_STATUS__HDMI_ERROR_INT__SHIFT 0x1b
+#define DIG0_HDMI_STATUS__HDMI_ACTIVE_AVMUTE_MASK 0x00000001L
+#define DIG0_HDMI_STATUS__HDMI_AUDIO_PACKET_ERROR_MASK 0x00010000L
+#define DIG0_HDMI_STATUS__HDMI_VBI_PACKET_ERROR_MASK 0x00100000L
+#define DIG0_HDMI_STATUS__HDMI_ERROR_INT_MASK 0x08000000L
+//DIG0_HDMI_AUDIO_PACKET_CONTROL
+#define DIG0_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN__SHIFT 0x4
+#define DIG0_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_SEND_MAX_PACKETS__SHIFT 0x8
+#define DIG0_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_PACKETS_PER_LINE__SHIFT 0x10
+#define DIG0_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN_MASK 0x00000030L
+#define DIG0_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_SEND_MAX_PACKETS_MASK 0x00000100L
+#define DIG0_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_PACKETS_PER_LINE_MASK 0x001F0000L
+//DIG0_HDMI_ACR_PACKET_CONTROL
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SEND__SHIFT 0x0
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_CONT__SHIFT 0x1
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SELECT__SHIFT 0x4
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE__SHIFT 0x8
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND__SHIFT 0xc
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE__SHIFT 0x10
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUDIO_PRIORITY__SHIFT 0x1f
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SEND_MASK 0x00000001L
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_CONT_MASK 0x00000002L
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SELECT_MASK 0x00000030L
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE_MASK 0x00000100L
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK 0x00001000L
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE_MASK 0x00070000L
+#define DIG0_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUDIO_PRIORITY_MASK 0x80000000L
+//DIG0_HDMI_VBI_PACKET_CONTROL
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND__SHIFT 0x0
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND__SHIFT 0x4
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT__SHIFT 0x5
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND__SHIFT 0x8
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT__SHIFT 0x9
+
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE__SHIFT 0x10
+
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK 0x00000001L
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND_MASK 0x00000010L
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT_MASK 0x00000020L
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND_MASK 0x00000100L
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT_MASK 0x00000200L
+
+#define DIG0_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE_MASK 0x003F0000L
+
+//DIG0_HDMI_INFOFRAME_CONTROL0
+#define DIG0_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND__SHIFT 0x4
+#define DIG0_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT__SHIFT 0x5
+#define DIG0_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_SEND__SHIFT 0x8
+#define DIG0_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_CONT__SHIFT 0x9
+#define DIG0_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND_MASK 0x00000010L
+#define DIG0_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT_MASK 0x00000020L
+#define DIG0_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_SEND_MASK 0x00000100L
+#define DIG0_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_CONT_MASK 0x00000200L
+//DIG0_HDMI_INFOFRAME_CONTROL1
+#define DIG0_HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE__SHIFT 0x8
+#define DIG0_HDMI_INFOFRAME_CONTROL1__HDMI_MPEG_INFO_LINE__SHIFT 0x10
+#define DIG0_HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE_MASK 0x00003F00L
+#define DIG0_HDMI_INFOFRAME_CONTROL1__HDMI_MPEG_INFO_LINE_MASK 0x003F0000L
+//DIG0_HDMI_GENERIC_PACKET_CONTROL0
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_SEND__SHIFT 0x0
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_CONT__SHIFT 0x1
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_LINE_REFERENCE__SHIFT 0x2
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_SEND__SHIFT 0x4
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_CONT__SHIFT 0x5
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_LINE_REFERENCE__SHIFT 0x6
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_SEND__SHIFT 0x8
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_CONT__SHIFT 0x9
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_LINE_REFERENCE__SHIFT 0xa
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_SEND__SHIFT 0xc
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_CONT__SHIFT 0xd
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_LINE_REFERENCE__SHIFT 0xe
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_SEND__SHIFT 0x10
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_CONT__SHIFT 0x11
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_LINE_REFERENCE__SHIFT 0x12
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_SEND__SHIFT 0x14
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_CONT__SHIFT 0x15
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_LINE_REFERENCE__SHIFT 0x16
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_SEND__SHIFT 0x18
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_CONT__SHIFT 0x19
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_LINE_REFERENCE__SHIFT 0x1a
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_SEND__SHIFT 0x1c
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_CONT__SHIFT 0x1d
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_LINE_REFERENCE__SHIFT 0x1e
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_SEND_MASK 0x00000001L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_CONT_MASK 0x00000002L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_LINE_REFERENCE_MASK 0x00000004L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_SEND_MASK 0x00000010L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_CONT_MASK 0x00000020L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_LINE_REFERENCE_MASK 0x00000040L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_SEND_MASK 0x00000100L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_CONT_MASK 0x00000200L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_LINE_REFERENCE_MASK 0x00000400L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_SEND_MASK 0x00001000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_CONT_MASK 0x00002000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_LINE_REFERENCE_MASK 0x00004000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_SEND_MASK 0x00010000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_CONT_MASK 0x00020000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_LINE_REFERENCE_MASK 0x00040000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_SEND_MASK 0x00100000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_CONT_MASK 0x00200000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_LINE_REFERENCE_MASK 0x00400000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_SEND_MASK 0x01000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_CONT_MASK 0x02000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_LINE_REFERENCE_MASK 0x04000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_SEND_MASK 0x10000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_CONT_MASK 0x20000000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_LINE_REFERENCE_MASK 0x40000000L
+
+#define DIG0_HDMI_GC__HDMI_GC_AVMUTE__SHIFT 0x0
+#define DIG0_HDMI_GC__HDMI_GC_AVMUTE_CONT__SHIFT 0x2
+#define DIG0_HDMI_GC__HDMI_DEFAULT_PHASE__SHIFT 0x4
+#define DIG0_HDMI_GC__HDMI_PACKING_PHASE__SHIFT 0x8
+#define DIG0_HDMI_GC__HDMI_PACKING_PHASE_OVERRIDE__SHIFT 0xc
+#define DIG0_HDMI_GC__HDMI_GC_AVMUTE_MASK 0x00000001L
+#define DIG0_HDMI_GC__HDMI_GC_AVMUTE_CONT_MASK 0x00000004L
+#define DIG0_HDMI_GC__HDMI_DEFAULT_PHASE_MASK 0x00000010L
+#define DIG0_HDMI_GC__HDMI_PACKING_PHASE_MASK 0x00000F00L
+#define DIG0_HDMI_GC__HDMI_PACKING_PHASE_OVERRIDE_MASK 0x00001000L
+//DIG0_AFMT_AUDIO_PACKET_CONTROL2
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_OVRD__SHIFT 0x0
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_SELECT__SHIFT 0x1
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT 0x8
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_DP_AUDIO_STREAM_ID__SHIFT 0x10
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_HBR_ENABLE_OVRD__SHIFT 0x18
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_60958_OSF_OVRD__SHIFT 0x1c
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_OVRD_MASK 0x00000001L
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_SELECT_MASK 0x00000002L
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE_MASK 0x0000FF00L
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_DP_AUDIO_STREAM_ID_MASK 0x00FF0000L
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_HBR_ENABLE_OVRD_MASK 0x01000000L
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL2__AFMT_60958_OSF_OVRD_MASK 0x10000000L
+//DIG0_AFMT_ISRC1_0
+#define DIG0_AFMT_ISRC1_0__AFMT_ISRC_STATUS__SHIFT 0x0
+#define DIG0_AFMT_ISRC1_0__AFMT_ISRC_CONTINUE__SHIFT 0x6
+#define DIG0_AFMT_ISRC1_0__AFMT_ISRC_VALID__SHIFT 0x7
+#define DIG0_AFMT_ISRC1_0__AFMT_ISRC_STATUS_MASK 0x00000007L
+#define DIG0_AFMT_ISRC1_0__AFMT_ISRC_CONTINUE_MASK 0x00000040L
+#define DIG0_AFMT_ISRC1_0__AFMT_ISRC_VALID_MASK 0x00000080L
+//DIG0_AFMT_ISRC1_1
+#define DIG0_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC0__SHIFT 0x0
+#define DIG0_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC1__SHIFT 0x8
+#define DIG0_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC2__SHIFT 0x10
+#define DIG0_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC3__SHIFT 0x18
+#define DIG0_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC0_MASK 0x000000FFL
+#define DIG0_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC1_MASK 0x0000FF00L
+#define DIG0_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC2_MASK 0x00FF0000L
+#define DIG0_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC3_MASK 0xFF000000L
+//DIG0_AFMT_ISRC1_2
+#define DIG0_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC4__SHIFT 0x0
+#define DIG0_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC5__SHIFT 0x8
+#define DIG0_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC6__SHIFT 0x10
+#define DIG0_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC7__SHIFT 0x18
+#define DIG0_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC4_MASK 0x000000FFL
+#define DIG0_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC5_MASK 0x0000FF00L
+#define DIG0_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC6_MASK 0x00FF0000L
+#define DIG0_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC7_MASK 0xFF000000L
+//DIG0_AFMT_ISRC1_3
+#define DIG0_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC8__SHIFT 0x0
+#define DIG0_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC9__SHIFT 0x8
+#define DIG0_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC10__SHIFT 0x10
+#define DIG0_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC11__SHIFT 0x18
+#define DIG0_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC8_MASK 0x000000FFL
+#define DIG0_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC9_MASK 0x0000FF00L
+#define DIG0_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC10_MASK 0x00FF0000L
+#define DIG0_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC11_MASK 0xFF000000L
+//DIG0_AFMT_ISRC1_4
+#define DIG0_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC12__SHIFT 0x0
+#define DIG0_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC13__SHIFT 0x8
+#define DIG0_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC14__SHIFT 0x10
+#define DIG0_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC15__SHIFT 0x18
+#define DIG0_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC12_MASK 0x000000FFL
+#define DIG0_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC13_MASK 0x0000FF00L
+#define DIG0_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC14_MASK 0x00FF0000L
+#define DIG0_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC15_MASK 0xFF000000L
+//DIG0_AFMT_ISRC2_0
+#define DIG0_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC16__SHIFT 0x0
+#define DIG0_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC17__SHIFT 0x8
+#define DIG0_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC18__SHIFT 0x10
+#define DIG0_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC19__SHIFT 0x18
+#define DIG0_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC16_MASK 0x000000FFL
+#define DIG0_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC17_MASK 0x0000FF00L
+#define DIG0_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC18_MASK 0x00FF0000L
+#define DIG0_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC19_MASK 0xFF000000L
+//DIG0_AFMT_ISRC2_1
+#define DIG0_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC20__SHIFT 0x0
+#define DIG0_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC21__SHIFT 0x8
+#define DIG0_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC22__SHIFT 0x10
+#define DIG0_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC23__SHIFT 0x18
+#define DIG0_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC20_MASK 0x000000FFL
+#define DIG0_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC21_MASK 0x0000FF00L
+#define DIG0_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC22_MASK 0x00FF0000L
+#define DIG0_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC23_MASK 0xFF000000L
+//DIG0_AFMT_ISRC2_2
+#define DIG0_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC24__SHIFT 0x0
+#define DIG0_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC25__SHIFT 0x8
+#define DIG0_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC26__SHIFT 0x10
+#define DIG0_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC27__SHIFT 0x18
+#define DIG0_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC24_MASK 0x000000FFL
+#define DIG0_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC25_MASK 0x0000FF00L
+#define DIG0_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC26_MASK 0x00FF0000L
+#define DIG0_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC27_MASK 0xFF000000L
+//DIG0_AFMT_ISRC2_3
+#define DIG0_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC28__SHIFT 0x0
+#define DIG0_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC29__SHIFT 0x8
+#define DIG0_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC30__SHIFT 0x10
+#define DIG0_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC31__SHIFT 0x18
+#define DIG0_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC28_MASK 0x000000FFL
+#define DIG0_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC29_MASK 0x0000FF00L
+#define DIG0_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC30_MASK 0x00FF0000L
+#define DIG0_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC31_MASK 0xFF000000L
+//DIG0_HDMI_GENERIC_PACKET_CONTROL2
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC2_LINE__SHIFT 0x0
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC3_LINE__SHIFT 0x10
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC2_LINE_MASK 0x0000FFFFL
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC3_LINE_MASK 0xFFFF0000L
+//DIG0_HDMI_GENERIC_PACKET_CONTROL3
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC4_LINE__SHIFT 0x0
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC5_LINE__SHIFT 0x10
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC4_LINE_MASK 0x0000FFFFL
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC5_LINE_MASK 0xFFFF0000L
+//DIG0_HDMI_DB_CONTROL
+#define DIG0_HDMI_DB_CONTROL__HDMI_DB_PENDING__SHIFT 0x0
+#define DIG0_HDMI_DB_CONTROL__HDMI_DB_TAKEN__SHIFT 0x4
+#define DIG0_HDMI_DB_CONTROL__HDMI_DB_TAKEN_CLR__SHIFT 0x5
+#define DIG0_HDMI_DB_CONTROL__HDMI_DB_LOCK__SHIFT 0x8
+#define DIG0_HDMI_DB_CONTROL__HDMI_DB_DISABLE__SHIFT 0xc
+#define DIG0_HDMI_DB_CONTROL__VUPDATE_DB_PENDING__SHIFT 0xf
+#define DIG0_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN__SHIFT 0x10
+#define DIG0_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_CLR__SHIFT 0x11
+#define DIG0_HDMI_DB_CONTROL__HDMI_DB_PENDING_MASK 0x00000001L
+#define DIG0_HDMI_DB_CONTROL__HDMI_DB_TAKEN_MASK 0x00000010L
+#define DIG0_HDMI_DB_CONTROL__HDMI_DB_TAKEN_CLR_MASK 0x00000020L
+#define DIG0_HDMI_DB_CONTROL__HDMI_DB_LOCK_MASK 0x00000100L
+#define DIG0_HDMI_DB_CONTROL__HDMI_DB_DISABLE_MASK 0x00001000L
+#define DIG0_HDMI_DB_CONTROL__VUPDATE_DB_PENDING_MASK 0x00008000L
+#define DIG0_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_MASK 0x00010000L
+#define DIG0_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_CLR_MASK 0x00020000L
+//DIG0_DME_CONTROL
+#define DIG0_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID__SHIFT 0x0
+#define DIG0_DME_CONTROL__METADATA_ENGINE_EN__SHIFT 0x4
+#define DIG0_DME_CONTROL__METADATA_STREAM_TYPE__SHIFT 0x8
+#define DIG0_DME_CONTROL__METADATA_DB_PENDING__SHIFT 0xc
+#define DIG0_DME_CONTROL__METADATA_DB_TAKEN__SHIFT 0xd
+#define DIG0_DME_CONTROL__METADATA_DB_TAKEN_CLR__SHIFT 0x10
+#define DIG0_DME_CONTROL__METADATA_DB_DISABLE__SHIFT 0x14
+#define DIG0_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID_MASK 0x00000007L
+#define DIG0_DME_CONTROL__METADATA_ENGINE_EN_MASK 0x00000010L
+#define DIG0_DME_CONTROL__METADATA_STREAM_TYPE_MASK 0x00000100L
+#define DIG0_DME_CONTROL__METADATA_DB_PENDING_MASK 0x00001000L
+#define DIG0_DME_CONTROL__METADATA_DB_TAKEN_MASK 0x00002000L
+#define DIG0_DME_CONTROL__METADATA_DB_TAKEN_CLR_MASK 0x00010000L
+#define DIG0_DME_CONTROL__METADATA_DB_DISABLE_MASK 0x00100000L
+//DIG0_AFMT_MPEG_INFO0
+#define DIG0_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_CHECKSUM__SHIFT 0x0
+#define DIG0_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB0__SHIFT 0x8
+#define DIG0_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB1__SHIFT 0x10
+#define DIG0_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB2__SHIFT 0x18
+#define DIG0_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_CHECKSUM_MASK 0x000000FFL
+#define DIG0_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB0_MASK 0x0000FF00L
+#define DIG0_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB1_MASK 0x00FF0000L
+#define DIG0_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB2_MASK 0xFF000000L
+//DIG0_AFMT_MPEG_INFO1
+#define DIG0_AFMT_MPEG_INFO1__AFMT_MPEG_INFO_MB3__SHIFT 0x0
+#define DIG0_AFMT_MPEG_INFO1__AFMT_MPEG_INFO_MF__SHIFT 0x8
+#define DIG0_AFMT_MPEG_INFO1__AFMT_MPEG_INFO_FR__SHIFT 0xc
+#define DIG0_AFMT_MPEG_INFO1__AFMT_MPEG_INFO_MB3_MASK 0x000000FFL
+#define DIG0_AFMT_MPEG_INFO1__AFMT_MPEG_INFO_MF_MASK 0x00000300L
+#define DIG0_AFMT_MPEG_INFO1__AFMT_MPEG_INFO_FR_MASK 0x00001000L
+//DIG0_AFMT_GENERIC_HDR
+#define DIG0_AFMT_GENERIC_HDR__AFMT_GENERIC_HB0__SHIFT 0x0
+#define DIG0_AFMT_GENERIC_HDR__AFMT_GENERIC_HB1__SHIFT 0x8
+#define DIG0_AFMT_GENERIC_HDR__AFMT_GENERIC_HB2__SHIFT 0x10
+#define DIG0_AFMT_GENERIC_HDR__AFMT_GENERIC_HB3__SHIFT 0x18
+#define DIG0_AFMT_GENERIC_HDR__AFMT_GENERIC_HB0_MASK 0x000000FFL
+#define DIG0_AFMT_GENERIC_HDR__AFMT_GENERIC_HB1_MASK 0x0000FF00L
+#define DIG0_AFMT_GENERIC_HDR__AFMT_GENERIC_HB2_MASK 0x00FF0000L
+#define DIG0_AFMT_GENERIC_HDR__AFMT_GENERIC_HB3_MASK 0xFF000000L
+//DIG0_AFMT_GENERIC_0
+#define DIG0_AFMT_GENERIC_0__AFMT_GENERIC_BYTE0__SHIFT 0x0
+#define DIG0_AFMT_GENERIC_0__AFMT_GENERIC_BYTE1__SHIFT 0x8
+#define DIG0_AFMT_GENERIC_0__AFMT_GENERIC_BYTE2__SHIFT 0x10
+#define DIG0_AFMT_GENERIC_0__AFMT_GENERIC_BYTE3__SHIFT 0x18
+#define DIG0_AFMT_GENERIC_0__AFMT_GENERIC_BYTE0_MASK 0x000000FFL
+#define DIG0_AFMT_GENERIC_0__AFMT_GENERIC_BYTE1_MASK 0x0000FF00L
+#define DIG0_AFMT_GENERIC_0__AFMT_GENERIC_BYTE2_MASK 0x00FF0000L
+#define DIG0_AFMT_GENERIC_0__AFMT_GENERIC_BYTE3_MASK 0xFF000000L
+//DIG0_AFMT_GENERIC_1
+#define DIG0_AFMT_GENERIC_1__AFMT_GENERIC_BYTE4__SHIFT 0x0
+#define DIG0_AFMT_GENERIC_1__AFMT_GENERIC_BYTE5__SHIFT 0x8
+#define DIG0_AFMT_GENERIC_1__AFMT_GENERIC_BYTE6__SHIFT 0x10
+#define DIG0_AFMT_GENERIC_1__AFMT_GENERIC_BYTE7__SHIFT 0x18
+#define DIG0_AFMT_GENERIC_1__AFMT_GENERIC_BYTE4_MASK 0x000000FFL
+#define DIG0_AFMT_GENERIC_1__AFMT_GENERIC_BYTE5_MASK 0x0000FF00L
+#define DIG0_AFMT_GENERIC_1__AFMT_GENERIC_BYTE6_MASK 0x00FF0000L
+#define DIG0_AFMT_GENERIC_1__AFMT_GENERIC_BYTE7_MASK 0xFF000000L
+//DIG0_AFMT_GENERIC_2
+#define DIG0_AFMT_GENERIC_2__AFMT_GENERIC_BYTE8__SHIFT 0x0
+#define DIG0_AFMT_GENERIC_2__AFMT_GENERIC_BYTE9__SHIFT 0x8
+#define DIG0_AFMT_GENERIC_2__AFMT_GENERIC_BYTE10__SHIFT 0x10
+#define DIG0_AFMT_GENERIC_2__AFMT_GENERIC_BYTE11__SHIFT 0x18
+#define DIG0_AFMT_GENERIC_2__AFMT_GENERIC_BYTE8_MASK 0x000000FFL
+#define DIG0_AFMT_GENERIC_2__AFMT_GENERIC_BYTE9_MASK 0x0000FF00L
+#define DIG0_AFMT_GENERIC_2__AFMT_GENERIC_BYTE10_MASK 0x00FF0000L
+#define DIG0_AFMT_GENERIC_2__AFMT_GENERIC_BYTE11_MASK 0xFF000000L
+//DIG0_AFMT_GENERIC_3
+#define DIG0_AFMT_GENERIC_3__AFMT_GENERIC_BYTE12__SHIFT 0x0
+#define DIG0_AFMT_GENERIC_3__AFMT_GENERIC_BYTE13__SHIFT 0x8
+#define DIG0_AFMT_GENERIC_3__AFMT_GENERIC_BYTE14__SHIFT 0x10
+#define DIG0_AFMT_GENERIC_3__AFMT_GENERIC_BYTE15__SHIFT 0x18
+#define DIG0_AFMT_GENERIC_3__AFMT_GENERIC_BYTE12_MASK 0x000000FFL
+#define DIG0_AFMT_GENERIC_3__AFMT_GENERIC_BYTE13_MASK 0x0000FF00L
+#define DIG0_AFMT_GENERIC_3__AFMT_GENERIC_BYTE14_MASK 0x00FF0000L
+#define DIG0_AFMT_GENERIC_3__AFMT_GENERIC_BYTE15_MASK 0xFF000000L
+//DIG0_AFMT_GENERIC_4
+#define DIG0_AFMT_GENERIC_4__AFMT_GENERIC_BYTE16__SHIFT 0x0
+#define DIG0_AFMT_GENERIC_4__AFMT_GENERIC_BYTE17__SHIFT 0x8
+#define DIG0_AFMT_GENERIC_4__AFMT_GENERIC_BYTE18__SHIFT 0x10
+#define DIG0_AFMT_GENERIC_4__AFMT_GENERIC_BYTE19__SHIFT 0x18
+#define DIG0_AFMT_GENERIC_4__AFMT_GENERIC_BYTE16_MASK 0x000000FFL
+#define DIG0_AFMT_GENERIC_4__AFMT_GENERIC_BYTE17_MASK 0x0000FF00L
+#define DIG0_AFMT_GENERIC_4__AFMT_GENERIC_BYTE18_MASK 0x00FF0000L
+#define DIG0_AFMT_GENERIC_4__AFMT_GENERIC_BYTE19_MASK 0xFF000000L
+//DIG0_AFMT_GENERIC_5
+#define DIG0_AFMT_GENERIC_5__AFMT_GENERIC_BYTE20__SHIFT 0x0
+#define DIG0_AFMT_GENERIC_5__AFMT_GENERIC_BYTE21__SHIFT 0x8
+#define DIG0_AFMT_GENERIC_5__AFMT_GENERIC_BYTE22__SHIFT 0x10
+#define DIG0_AFMT_GENERIC_5__AFMT_GENERIC_BYTE23__SHIFT 0x18
+#define DIG0_AFMT_GENERIC_5__AFMT_GENERIC_BYTE20_MASK 0x000000FFL
+#define DIG0_AFMT_GENERIC_5__AFMT_GENERIC_BYTE21_MASK 0x0000FF00L
+#define DIG0_AFMT_GENERIC_5__AFMT_GENERIC_BYTE22_MASK 0x00FF0000L
+#define DIG0_AFMT_GENERIC_5__AFMT_GENERIC_BYTE23_MASK 0xFF000000L
+//DIG0_AFMT_GENERIC_6
+#define DIG0_AFMT_GENERIC_6__AFMT_GENERIC_BYTE24__SHIFT 0x0
+#define DIG0_AFMT_GENERIC_6__AFMT_GENERIC_BYTE25__SHIFT 0x8
+#define DIG0_AFMT_GENERIC_6__AFMT_GENERIC_BYTE26__SHIFT 0x10
+#define DIG0_AFMT_GENERIC_6__AFMT_GENERIC_BYTE27__SHIFT 0x18
+#define DIG0_AFMT_GENERIC_6__AFMT_GENERIC_BYTE24_MASK 0x000000FFL
+#define DIG0_AFMT_GENERIC_6__AFMT_GENERIC_BYTE25_MASK 0x0000FF00L
+#define DIG0_AFMT_GENERIC_6__AFMT_GENERIC_BYTE26_MASK 0x00FF0000L
+#define DIG0_AFMT_GENERIC_6__AFMT_GENERIC_BYTE27_MASK 0xFF000000L
+//DIG0_AFMT_GENERIC_7
+#define DIG0_AFMT_GENERIC_7__AFMT_GENERIC_BYTE28__SHIFT 0x0
+#define DIG0_AFMT_GENERIC_7__AFMT_GENERIC_BYTE29__SHIFT 0x8
+#define DIG0_AFMT_GENERIC_7__AFMT_GENERIC_BYTE30__SHIFT 0x10
+#define DIG0_AFMT_GENERIC_7__AFMT_GENERIC_BYTE31__SHIFT 0x18
+#define DIG0_AFMT_GENERIC_7__AFMT_GENERIC_BYTE28_MASK 0x000000FFL
+#define DIG0_AFMT_GENERIC_7__AFMT_GENERIC_BYTE29_MASK 0x0000FF00L
+#define DIG0_AFMT_GENERIC_7__AFMT_GENERIC_BYTE30_MASK 0x00FF0000L
+#define DIG0_AFMT_GENERIC_7__AFMT_GENERIC_BYTE31_MASK 0xFF000000L
+//DIG0_HDMI_GENERIC_PACKET_CONTROL1
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC0_LINE__SHIFT 0x0
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC1_LINE__SHIFT 0x10
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC0_LINE_MASK 0x0000FFFFL
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC1_LINE_MASK 0xFFFF0000L
+//DIG0_HDMI_ACR_32_0
+#define DIG0_HDMI_ACR_32_0__HDMI_ACR_CTS_32__SHIFT 0xc
+#define DIG0_HDMI_ACR_32_0__HDMI_ACR_CTS_32_MASK 0xFFFFF000L
+//DIG0_HDMI_ACR_32_1
+#define DIG0_HDMI_ACR_32_1__HDMI_ACR_N_32__SHIFT 0x0
+#define DIG0_HDMI_ACR_32_1__HDMI_ACR_N_32_MASK 0x000FFFFFL
+//DIG0_HDMI_ACR_44_0
+#define DIG0_HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT 0xc
+#define DIG0_HDMI_ACR_44_0__HDMI_ACR_CTS_44_MASK 0xFFFFF000L
+//DIG0_HDMI_ACR_44_1
+#define DIG0_HDMI_ACR_44_1__HDMI_ACR_N_44__SHIFT 0x0
+#define DIG0_HDMI_ACR_44_1__HDMI_ACR_N_44_MASK 0x000FFFFFL
+//DIG0_HDMI_ACR_48_0
+#define DIG0_HDMI_ACR_48_0__HDMI_ACR_CTS_48__SHIFT 0xc
+#define DIG0_HDMI_ACR_48_0__HDMI_ACR_CTS_48_MASK 0xFFFFF000L
+//DIG0_HDMI_ACR_48_1
+#define DIG0_HDMI_ACR_48_1__HDMI_ACR_N_48__SHIFT 0x0
+#define DIG0_HDMI_ACR_48_1__HDMI_ACR_N_48_MASK 0x000FFFFFL
+//DIG0_HDMI_ACR_STATUS_0
+#define DIG0_HDMI_ACR_STATUS_0__HDMI_ACR_CTS__SHIFT 0xc
+#define DIG0_HDMI_ACR_STATUS_0__HDMI_ACR_CTS_MASK 0xFFFFF000L
+//DIG0_HDMI_ACR_STATUS_1
+#define DIG0_HDMI_ACR_STATUS_1__HDMI_ACR_N__SHIFT 0x0
+#define DIG0_HDMI_ACR_STATUS_1__HDMI_ACR_N_MASK 0x000FFFFFL
+//DIG0_AFMT_AUDIO_INFO0
+#define DIG0_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM__SHIFT 0x0
+#define DIG0_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CC__SHIFT 0x8
+#define DIG0_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CT__SHIFT 0xb
+#define DIG0_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_OFFSET__SHIFT 0x10
+#define DIG0_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CXT__SHIFT 0x18
+#define DIG0_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_MASK 0x000000FFL
+#define DIG0_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CC_MASK 0x00000700L
+#define DIG0_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CT_MASK 0x00007800L
+#define DIG0_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_OFFSET_MASK 0x00FF0000L
+#define DIG0_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CXT_MASK 0x1F000000L
+//DIG0_AFMT_AUDIO_INFO1
+#define DIG0_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_CA__SHIFT 0x0
+#define DIG0_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LSV__SHIFT 0xb
+#define DIG0_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_DM_INH__SHIFT 0xf
+#define DIG0_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LFEPBL__SHIFT 0x10
+#define DIG0_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_CA_MASK 0x000000FFL
+#define DIG0_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LSV_MASK 0x00007800L
+#define DIG0_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_DM_INH_MASK 0x00008000L
+#define DIG0_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LFEPBL_MASK 0x00030000L
+//DIG0_AFMT_60958_0
+#define DIG0_AFMT_60958_0__AFMT_60958_CS_A__SHIFT 0x0
+#define DIG0_AFMT_60958_0__AFMT_60958_CS_B__SHIFT 0x1
+#define DIG0_AFMT_60958_0__AFMT_60958_CS_C__SHIFT 0x2
+#define DIG0_AFMT_60958_0__AFMT_60958_CS_D__SHIFT 0x3
+#define DIG0_AFMT_60958_0__AFMT_60958_CS_MODE__SHIFT 0x6
+#define DIG0_AFMT_60958_0__AFMT_60958_CS_CATEGORY_CODE__SHIFT 0x8
+#define DIG0_AFMT_60958_0__AFMT_60958_CS_SOURCE_NUMBER__SHIFT 0x10
+#define DIG0_AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x14
+#define DIG0_AFMT_60958_0__AFMT_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x18
+#define DIG0_AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY__SHIFT 0x1c
+#define DIG0_AFMT_60958_0__AFMT_60958_CS_A_MASK 0x00000001L
+#define DIG0_AFMT_60958_0__AFMT_60958_CS_B_MASK 0x00000002L
+#define DIG0_AFMT_60958_0__AFMT_60958_CS_C_MASK 0x00000004L
+#define DIG0_AFMT_60958_0__AFMT_60958_CS_D_MASK 0x00000038L
+#define DIG0_AFMT_60958_0__AFMT_60958_CS_MODE_MASK 0x000000C0L
+#define DIG0_AFMT_60958_0__AFMT_60958_CS_CATEGORY_CODE_MASK 0x0000FF00L
+#define DIG0_AFMT_60958_0__AFMT_60958_CS_SOURCE_NUMBER_MASK 0x000F0000L
+#define DIG0_AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L_MASK 0x00F00000L
+#define DIG0_AFMT_60958_0__AFMT_60958_CS_SAMPLING_FREQUENCY_MASK 0x0F000000L
+#define DIG0_AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY_MASK 0x30000000L
+//DIG0_AFMT_60958_1
+#define DIG0_AFMT_60958_1__AFMT_60958_CS_WORD_LENGTH__SHIFT 0x0
+#define DIG0_AFMT_60958_1__AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x4
+#define DIG0_AFMT_60958_1__AFMT_60958_VALID_L__SHIFT 0x10
+#define DIG0_AFMT_60958_1__AFMT_60958_VALID_R__SHIFT 0x12
+#define DIG0_AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x14
+#define DIG0_AFMT_60958_1__AFMT_60958_CS_WORD_LENGTH_MASK 0x0000000FL
+#define DIG0_AFMT_60958_1__AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x000000F0L
+#define DIG0_AFMT_60958_1__AFMT_60958_VALID_L_MASK 0x00010000L
+#define DIG0_AFMT_60958_1__AFMT_60958_VALID_R_MASK 0x00040000L
+#define DIG0_AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R_MASK 0x00F00000L
+//DIG0_AFMT_AUDIO_CRC_CONTROL
+#define DIG0_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_EN__SHIFT 0x0
+#define DIG0_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CONT__SHIFT 0x4
+#define DIG0_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_SOURCE__SHIFT 0x8
+#define DIG0_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CH_SEL__SHIFT 0xc
+#define DIG0_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_COUNT__SHIFT 0x10
+#define DIG0_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_EN_MASK 0x00000001L
+#define DIG0_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CONT_MASK 0x00000010L
+#define DIG0_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_SOURCE_MASK 0x00000100L
+#define DIG0_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CH_SEL_MASK 0x0000F000L
+#define DIG0_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_COUNT_MASK 0xFFFF0000L
+//DIG0_AFMT_RAMP_CONTROL0
+#define DIG0_AFMT_RAMP_CONTROL0__AFMT_RAMP_MAX_COUNT__SHIFT 0x0
+#define DIG0_AFMT_RAMP_CONTROL0__AFMT_RAMP_DATA_SIGN__SHIFT 0x1f
+#define DIG0_AFMT_RAMP_CONTROL0__AFMT_RAMP_MAX_COUNT_MASK 0x00FFFFFFL
+#define DIG0_AFMT_RAMP_CONTROL0__AFMT_RAMP_DATA_SIGN_MASK 0x80000000L
+//DIG0_AFMT_RAMP_CONTROL1
+#define DIG0_AFMT_RAMP_CONTROL1__AFMT_RAMP_MIN_COUNT__SHIFT 0x0
+#define DIG0_AFMT_RAMP_CONTROL1__AFMT_AUDIO_TEST_CH_DISABLE__SHIFT 0x18
+#define DIG0_AFMT_RAMP_CONTROL1__AFMT_RAMP_MIN_COUNT_MASK 0x00FFFFFFL
+#define DIG0_AFMT_RAMP_CONTROL1__AFMT_AUDIO_TEST_CH_DISABLE_MASK 0xFF000000L
+//DIG0_AFMT_RAMP_CONTROL2
+#define DIG0_AFMT_RAMP_CONTROL2__AFMT_RAMP_INC_COUNT__SHIFT 0x0
+#define DIG0_AFMT_RAMP_CONTROL2__AFMT_RAMP_INC_COUNT_MASK 0x00FFFFFFL
+//DIG0_AFMT_RAMP_CONTROL3
+#define DIG0_AFMT_RAMP_CONTROL3__AFMT_RAMP_DEC_COUNT__SHIFT 0x0
+#define DIG0_AFMT_RAMP_CONTROL3__AFMT_RAMP_DEC_COUNT_MASK 0x00FFFFFFL
+//DIG0_AFMT_60958_2
+#define DIG0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0
+#define DIG0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4
+#define DIG0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x8
+#define DIG0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5__SHIFT 0xc
+#define DIG0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x10
+#define DIG0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x14
+#define DIG0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000FL
+#define DIG0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000F0L
+#define DIG0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4_MASK 0x00000F00L
+#define DIG0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5_MASK 0x0000F000L
+#define DIG0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6_MASK 0x000F0000L
+#define DIG0_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7_MASK 0x00F00000L
+//DIG0_AFMT_AUDIO_CRC_RESULT
+#define DIG0_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_DONE__SHIFT 0x0
+#define DIG0_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC__SHIFT 0x8
+#define DIG0_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_DONE_MASK 0x00000001L
+#define DIG0_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_MASK 0xFFFFFF00L
+//DIG0_AFMT_STATUS
+#define DIG0_AFMT_STATUS__AFMT_AUDIO_ENABLE__SHIFT 0x4
+#define DIG0_AFMT_STATUS__AFMT_AZ_HBR_ENABLE__SHIFT 0x8
+#define DIG0_AFMT_STATUS__AFMT_AUDIO_FIFO_OVERFLOW__SHIFT 0x18
+#define DIG0_AFMT_STATUS__AFMT_AZ_AUDIO_ENABLE_CHG__SHIFT 0x1e
+#define DIG0_AFMT_STATUS__AFMT_AUDIO_ENABLE_MASK 0x00000010L
+#define DIG0_AFMT_STATUS__AFMT_AZ_HBR_ENABLE_MASK 0x00000100L
+#define DIG0_AFMT_STATUS__AFMT_AUDIO_FIFO_OVERFLOW_MASK 0x01000000L
+#define DIG0_AFMT_STATUS__AFMT_AZ_AUDIO_ENABLE_CHG_MASK 0x40000000L
+//DIG0_AFMT_AUDIO_PACKET_CONTROL
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND__SHIFT 0x0
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL__AFMT_RESET_FIFO_WHEN_AUDIO_DIS__SHIFT 0xb
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_EN__SHIFT 0xc
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_MODE__SHIFT 0xe
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_FIFO_OVERFLOW_ACK__SHIFT 0x17
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_CHANNEL_SWAP__SHIFT 0x18
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE__SHIFT 0x1a
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AZ_AUDIO_ENABLE_CHG_ACK__SHIFT 0x1e
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL__AFMT_BLANK_TEST_DATA_ON_ENC_ENB__SHIFT 0x1f
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_MASK 0x00000001L
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL__AFMT_RESET_FIFO_WHEN_AUDIO_DIS_MASK 0x00000800L
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_EN_MASK 0x00001000L
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_MODE_MASK 0x00004000L
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_FIFO_OVERFLOW_ACK_MASK 0x00800000L
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_CHANNEL_SWAP_MASK 0x01000000L
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE_MASK 0x04000000L
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL__AFMT_AZ_AUDIO_ENABLE_CHG_ACK_MASK 0x40000000L
+#define DIG0_AFMT_AUDIO_PACKET_CONTROL__AFMT_BLANK_TEST_DATA_ON_ENC_ENB_MASK 0x80000000L
+//DIG0_AFMT_VBI_PACKET_CONTROL
+#define DIG0_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_LOCK_STATUS__SHIFT 0x8
+
+#define DIG0_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_CONFLICT__SHIFT 0x10
+#define DIG0_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_CONFLICT_CLR__SHIFT 0x11
+#define DIG0_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_INDEX__SHIFT 0x1c
+#define DIG0_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_LOCK_STATUS_MASK 0x00000100L
+
+#define DIG0_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_CONFLICT_MASK 0x00010000L
+#define DIG0_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_CONFLICT_CLR_MASK 0x00020000L
+#define DIG0_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_INDEX_MASK 0xF0000000L
+//DIG0_AFMT_INFOFRAME_CONTROL0
+#define DIG0_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_SOURCE__SHIFT 0x6
+#define DIG0_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE__SHIFT 0x7
+#define DIG0_AFMT_INFOFRAME_CONTROL0__AFMT_MPEG_INFO_UPDATE__SHIFT 0xa
+#define DIG0_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_SOURCE_MASK 0x00000040L
+#define DIG0_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE_MASK 0x00000080L
+#define DIG0_AFMT_INFOFRAME_CONTROL0__AFMT_MPEG_INFO_UPDATE_MASK 0x00000400L
+//DIG0_AFMT_AUDIO_SRC_CONTROL
+#define DIG0_AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT__SHIFT 0x0
+#define DIG0_AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT_MASK 0x00000007L
+//DIG0_DIG_BE_CNTL
+#define DIG0_DIG_BE_CNTL__DIG_DUAL_LINK_ENABLE__SHIFT 0x0
+#define DIG0_DIG_BE_CNTL__DIG_SWAP__SHIFT 0x1
+#define DIG0_DIG_BE_CNTL__DIG_RB_SWITCH_EN__SHIFT 0x2
+#define DIG0_DIG_BE_CNTL__DIG_FE_SOURCE_SELECT__SHIFT 0x8
+#define DIG0_DIG_BE_CNTL__DIG_MODE__SHIFT 0x10
+#define DIG0_DIG_BE_CNTL__DIG_HPD_SELECT__SHIFT 0x1c
+#define DIG0_DIG_BE_CNTL__DIG_DUAL_LINK_ENABLE_MASK 0x00000001L
+#define DIG0_DIG_BE_CNTL__DIG_SWAP_MASK 0x00000002L
+#define DIG0_DIG_BE_CNTL__DIG_RB_SWITCH_EN_MASK 0x00000004L
+#define DIG0_DIG_BE_CNTL__DIG_FE_SOURCE_SELECT_MASK 0x00007F00L
+#define DIG0_DIG_BE_CNTL__DIG_MODE_MASK 0x00070000L
+#define DIG0_DIG_BE_CNTL__DIG_HPD_SELECT_MASK 0x70000000L
+//DIG0_DIG_BE_EN_CNTL
+#define DIG0_DIG_BE_EN_CNTL__DIG_ENABLE__SHIFT 0x0
+#define DIG0_DIG_BE_EN_CNTL__DIG_SYMCLK_BE_ON__SHIFT 0x8
+#define DIG0_DIG_BE_EN_CNTL__DIG_ENABLE_MASK 0x00000001L
+#define DIG0_DIG_BE_EN_CNTL__DIG_SYMCLK_BE_ON_MASK 0x00000100L
+//DIG0_TMDS_CNTL
+#define DIG0_TMDS_CNTL__TMDS_SYNC_PHASE__SHIFT 0x0
+#define DIG0_TMDS_CNTL__TMDS_SYNC_PHASE_MASK 0x00000001L
+//DIG0_TMDS_CONTROL_CHAR
+#define DIG0_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR0_OUT_EN__SHIFT 0x0
+#define DIG0_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR1_OUT_EN__SHIFT 0x1
+#define DIG0_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR2_OUT_EN__SHIFT 0x2
+#define DIG0_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR3_OUT_EN__SHIFT 0x3
+#define DIG0_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR0_OUT_EN_MASK 0x00000001L
+#define DIG0_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR1_OUT_EN_MASK 0x00000002L
+#define DIG0_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR2_OUT_EN_MASK 0x00000004L
+#define DIG0_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR3_OUT_EN_MASK 0x00000008L
+//DIG0_TMDS_CONTROL0_FEEDBACK
+#define DIG0_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_SELECT__SHIFT 0x0
+#define DIG0_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_DELAY__SHIFT 0x8
+#define DIG0_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_SELECT_MASK 0x00000003L
+#define DIG0_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_DELAY_MASK 0x00000300L
+//DIG0_TMDS_STEREOSYNC_CTL_SEL
+#define DIG0_TMDS_STEREOSYNC_CTL_SEL__TMDS_STEREOSYNC_CTL_SEL__SHIFT 0x0
+#define DIG0_TMDS_STEREOSYNC_CTL_SEL__TMDS_STEREOSYNC_CTL_SEL_MASK 0x00000003L
+//DIG0_TMDS_SYNC_CHAR_PATTERN_0_1
+#define DIG0_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN0__SHIFT 0x0
+#define DIG0_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN1__SHIFT 0x10
+#define DIG0_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN0_MASK 0x000003FFL
+#define DIG0_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN1_MASK 0x03FF0000L
+//DIG0_TMDS_SYNC_CHAR_PATTERN_2_3
+#define DIG0_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN2__SHIFT 0x0
+#define DIG0_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN3__SHIFT 0x10
+#define DIG0_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN2_MASK 0x000003FFL
+#define DIG0_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN3_MASK 0x03FF0000L
+
+//DIG0_TMDS_CTL_BITS
+#define DIG0_TMDS_CTL_BITS__TMDS_CTL0__SHIFT 0x0
+#define DIG0_TMDS_CTL_BITS__TMDS_CTL1__SHIFT 0x8
+#define DIG0_TMDS_CTL_BITS__TMDS_CTL2__SHIFT 0x10
+#define DIG0_TMDS_CTL_BITS__TMDS_CTL3__SHIFT 0x18
+#define DIG0_TMDS_CTL_BITS__TMDS_CTL0_MASK 0x00000001L
+#define DIG0_TMDS_CTL_BITS__TMDS_CTL1_MASK 0x00000100L
+#define DIG0_TMDS_CTL_BITS__TMDS_CTL2_MASK 0x00010000L
+#define DIG0_TMDS_CTL_BITS__TMDS_CTL3_MASK 0x01000000L
+//DIG0_TMDS_DCBALANCER_CONTROL
+#define DIG0_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_EN__SHIFT 0x0
+#define DIG0_TMDS_DCBALANCER_CONTROL__TMDS_SYNC_DCBAL_EN__SHIFT 0x4
+#define DIG0_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_EN__SHIFT 0x8
+#define DIG0_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_IN__SHIFT 0x10
+#define DIG0_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_FORCE__SHIFT 0x18
+#define DIG0_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_EN_MASK 0x00000001L
+#define DIG0_TMDS_DCBALANCER_CONTROL__TMDS_SYNC_DCBAL_EN_MASK 0x00000070L
+#define DIG0_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_EN_MASK 0x00000100L
+#define DIG0_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_IN_MASK 0x000F0000L
+#define DIG0_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_FORCE_MASK 0x01000000L
+//DIG0_TMDS_SYNC_DCBALANCE_CHAR
+#define DIG0_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR01__SHIFT 0x0
+#define DIG0_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR11__SHIFT 0x10
+#define DIG0_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR01_MASK 0x000003FFL
+#define DIG0_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR11_MASK 0x03FF0000L
+//DIG0_TMDS_CTL0_1_GEN_CNTL
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_SEL__SHIFT 0x0
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_DELAY__SHIFT 0x4
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_INVERT__SHIFT 0x7
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_MODULATION__SHIFT 0x8
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_USE_FEEDBACK_PATH__SHIFT 0xa
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_FB_SYNC_CONT__SHIFT 0xb
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_PATTERN_OUT_EN__SHIFT 0xc
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_SEL__SHIFT 0x10
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_DELAY__SHIFT 0x14
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_INVERT__SHIFT 0x17
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_MODULATION__SHIFT 0x18
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_USE_FEEDBACK_PATH__SHIFT 0x1a
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_FB_SYNC_CONT__SHIFT 0x1b
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_PATTERN_OUT_EN__SHIFT 0x1c
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_2BIT_COUNTER_EN__SHIFT 0x1f
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_SEL_MASK 0x0000000FL
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_DELAY_MASK 0x00000070L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_INVERT_MASK 0x00000080L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_MODULATION_MASK 0x00000300L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_USE_FEEDBACK_PATH_MASK 0x00000400L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_FB_SYNC_CONT_MASK 0x00000800L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_PATTERN_OUT_EN_MASK 0x00001000L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_SEL_MASK 0x000F0000L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_DELAY_MASK 0x00700000L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_INVERT_MASK 0x00800000L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_MODULATION_MASK 0x03000000L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_USE_FEEDBACK_PATH_MASK 0x04000000L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_FB_SYNC_CONT_MASK 0x08000000L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_PATTERN_OUT_EN_MASK 0x10000000L
+#define DIG0_TMDS_CTL0_1_GEN_CNTL__TMDS_2BIT_COUNTER_EN_MASK 0x80000000L
+//DIG0_TMDS_CTL2_3_GEN_CNTL
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_SEL__SHIFT 0x0
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_DELAY__SHIFT 0x4
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_INVERT__SHIFT 0x7
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_MODULATION__SHIFT 0x8
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_USE_FEEDBACK_PATH__SHIFT 0xa
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_FB_SYNC_CONT__SHIFT 0xb
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_PATTERN_OUT_EN__SHIFT 0xc
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_SEL__SHIFT 0x10
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_DELAY__SHIFT 0x14
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_INVERT__SHIFT 0x17
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_MODULATION__SHIFT 0x18
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_USE_FEEDBACK_PATH__SHIFT 0x1a
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_FB_SYNC_CONT__SHIFT 0x1b
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_PATTERN_OUT_EN__SHIFT 0x1c
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_SEL_MASK 0x0000000FL
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_DELAY_MASK 0x00000070L
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_INVERT_MASK 0x00000080L
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_MODULATION_MASK 0x00000300L
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_USE_FEEDBACK_PATH_MASK 0x00000400L
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_FB_SYNC_CONT_MASK 0x00000800L
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_PATTERN_OUT_EN_MASK 0x00001000L
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_SEL_MASK 0x000F0000L
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_DELAY_MASK 0x00700000L
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_INVERT_MASK 0x00800000L
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_MODULATION_MASK 0x03000000L
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_USE_FEEDBACK_PATH_MASK 0x04000000L
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_FB_SYNC_CONT_MASK 0x08000000L
+#define DIG0_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_PATTERN_OUT_EN_MASK 0x10000000L
+
+//DIG0_DIG_VERSION
+#define DIG0_DIG_VERSION__DIG_TYPE__SHIFT 0x0
+#define DIG0_DIG_VERSION__DIG_TYPE_MASK 0x00000001L
+//DIG0_DIG_LANE_ENABLE
+#define DIG0_DIG_LANE_ENABLE__DIG_LANE0EN__SHIFT 0x0
+#define DIG0_DIG_LANE_ENABLE__DIG_LANE1EN__SHIFT 0x1
+#define DIG0_DIG_LANE_ENABLE__DIG_LANE2EN__SHIFT 0x2
+#define DIG0_DIG_LANE_ENABLE__DIG_LANE3EN__SHIFT 0x3
+#define DIG0_DIG_LANE_ENABLE__DIG_CLK_EN__SHIFT 0x8
+#define DIG0_DIG_LANE_ENABLE__DIG_LANE0EN_MASK 0x00000001L
+#define DIG0_DIG_LANE_ENABLE__DIG_LANE1EN_MASK 0x00000002L
+#define DIG0_DIG_LANE_ENABLE__DIG_LANE2EN_MASK 0x00000004L
+#define DIG0_DIG_LANE_ENABLE__DIG_LANE3EN_MASK 0x00000008L
+#define DIG0_DIG_LANE_ENABLE__DIG_CLK_EN_MASK 0x00000100L
+
+//DIG0_AFMT_CNTL
+#define DIG0_AFMT_CNTL__AFMT_AUDIO_CLOCK_EN__SHIFT 0x0
+#define DIG0_AFMT_CNTL__AFMT_AUDIO_CLOCK_ON__SHIFT 0x8
+#define DIG0_AFMT_CNTL__AFMT_AUDIO_CLOCK_EN_MASK 0x00000001L
+#define DIG0_AFMT_CNTL__AFMT_AUDIO_CLOCK_ON_MASK 0x00000100L
+//DIG0_AFMT_VBI_PACKET_CONTROL1
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_FRAME_UPDATE__SHIFT 0x0
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_FRAME_UPDATE_PENDING__SHIFT 0x1
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_IMMEDIATE_UPDATE__SHIFT 0x2
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_IMMEDIATE_UPDATE_PENDING__SHIFT 0x3
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_FRAME_UPDATE__SHIFT 0x4
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_FRAME_UPDATE_PENDING__SHIFT 0x5
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_IMMEDIATE_UPDATE__SHIFT 0x6
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_IMMEDIATE_UPDATE_PENDING__SHIFT 0x7
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_FRAME_UPDATE__SHIFT 0x8
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_FRAME_UPDATE_PENDING__SHIFT 0x9
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_IMMEDIATE_UPDATE__SHIFT 0xa
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_IMMEDIATE_UPDATE_PENDING__SHIFT 0xb
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_FRAME_UPDATE__SHIFT 0xc
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_FRAME_UPDATE_PENDING__SHIFT 0xd
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_IMMEDIATE_UPDATE__SHIFT 0xe
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_IMMEDIATE_UPDATE_PENDING__SHIFT 0xf
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_FRAME_UPDATE__SHIFT 0x10
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_FRAME_UPDATE_PENDING__SHIFT 0x11
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_IMMEDIATE_UPDATE__SHIFT 0x12
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_IMMEDIATE_UPDATE_PENDING__SHIFT 0x13
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_FRAME_UPDATE__SHIFT 0x14
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_FRAME_UPDATE_PENDING__SHIFT 0x15
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_IMMEDIATE_UPDATE__SHIFT 0x16
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_IMMEDIATE_UPDATE_PENDING__SHIFT 0x17
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_FRAME_UPDATE__SHIFT 0x18
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_FRAME_UPDATE_PENDING__SHIFT 0x19
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_IMMEDIATE_UPDATE__SHIFT 0x1a
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1b
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_FRAME_UPDATE__SHIFT 0x1c
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_FRAME_UPDATE_PENDING__SHIFT 0x1d
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_IMMEDIATE_UPDATE__SHIFT 0x1e
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1f
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_FRAME_UPDATE_MASK 0x00000001L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_FRAME_UPDATE_PENDING_MASK 0x00000002L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_IMMEDIATE_UPDATE_MASK 0x00000004L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_IMMEDIATE_UPDATE_PENDING_MASK 0x00000008L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_FRAME_UPDATE_MASK 0x00000010L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_FRAME_UPDATE_PENDING_MASK 0x00000020L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_IMMEDIATE_UPDATE_MASK 0x00000040L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_IMMEDIATE_UPDATE_PENDING_MASK 0x00000080L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_FRAME_UPDATE_MASK 0x00000100L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_FRAME_UPDATE_PENDING_MASK 0x00000200L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_IMMEDIATE_UPDATE_MASK 0x00000400L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_IMMEDIATE_UPDATE_PENDING_MASK 0x00000800L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_FRAME_UPDATE_MASK 0x00001000L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_FRAME_UPDATE_PENDING_MASK 0x00002000L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_IMMEDIATE_UPDATE_MASK 0x00004000L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_IMMEDIATE_UPDATE_PENDING_MASK 0x00008000L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_FRAME_UPDATE_MASK 0x00010000L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_FRAME_UPDATE_PENDING_MASK 0x00020000L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_IMMEDIATE_UPDATE_MASK 0x00040000L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_IMMEDIATE_UPDATE_PENDING_MASK 0x00080000L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_FRAME_UPDATE_MASK 0x00100000L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_FRAME_UPDATE_PENDING_MASK 0x00200000L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_IMMEDIATE_UPDATE_MASK 0x00400000L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_IMMEDIATE_UPDATE_PENDING_MASK 0x00800000L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_FRAME_UPDATE_MASK 0x01000000L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_FRAME_UPDATE_PENDING_MASK 0x02000000L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_IMMEDIATE_UPDATE_MASK 0x04000000L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_IMMEDIATE_UPDATE_PENDING_MASK 0x08000000L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_FRAME_UPDATE_MASK 0x10000000L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_FRAME_UPDATE_PENDING_MASK 0x20000000L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_IMMEDIATE_UPDATE_MASK 0x40000000L
+#define DIG0_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_IMMEDIATE_UPDATE_PENDING_MASK 0x80000000L
+//DIG0_HDMI_GENERIC_PACKET_CONTROL5
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND__SHIFT 0x0
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_PENDING__SHIFT 0x1
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND__SHIFT 0x2
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_PENDING__SHIFT 0x3
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND__SHIFT 0x4
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_PENDING__SHIFT 0x5
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND__SHIFT 0x6
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_PENDING__SHIFT 0x7
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND__SHIFT 0x8
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_PENDING__SHIFT 0x9
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND__SHIFT 0xa
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_PENDING__SHIFT 0xb
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND__SHIFT 0xc
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_PENDING__SHIFT 0xd
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND__SHIFT 0xe
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_PENDING__SHIFT 0xf
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_MASK 0x00000001L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_PENDING_MASK 0x00000002L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_MASK 0x00000004L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_PENDING_MASK 0x00000008L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_MASK 0x00000010L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_PENDING_MASK 0x00000020L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_MASK 0x00000040L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_PENDING_MASK 0x00000080L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_MASK 0x00000100L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_PENDING_MASK 0x00000200L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_MASK 0x00000400L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_PENDING_MASK 0x00000800L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_MASK 0x00001000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_PENDING_MASK 0x00002000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_MASK 0x00004000L
+#define DIG0_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_PENDING_MASK 0x00008000L
+
+
+// addressBlock: dce_dc_dio_dp0_dispdec
+//DP0_DP_LINK_CNTL
+#define DP0_DP_LINK_CNTL__DP_LINK_TRAINING_COMPLETE__SHIFT 0x4
+#define DP0_DP_LINK_CNTL__DP_LINK_STATUS__SHIFT 0x8
+#define DP0_DP_LINK_CNTL__DP_EMBEDDED_PANEL_MODE__SHIFT 0x11
+#define DP0_DP_LINK_CNTL__DP_LINK_TRAINING_COMPLETE_MASK 0x00000010L
+#define DP0_DP_LINK_CNTL__DP_LINK_STATUS_MASK 0x00000100L
+#define DP0_DP_LINK_CNTL__DP_EMBEDDED_PANEL_MODE_MASK 0x00020000L
+//DP0_DP_PIXEL_FORMAT
+#define DP0_DP_PIXEL_FORMAT__DP_PIXEL_ENCODING__SHIFT 0x0
+#define DP0_DP_PIXEL_FORMAT__DP_COMPONENT_DEPTH__SHIFT 0x18
+#define DP0_DP_PIXEL_FORMAT__DP_PIXEL_COMBINE__SHIFT 0x1c
+#define DP0_DP_PIXEL_FORMAT__DP_PIXEL_ENCODING_MASK 0x00000007L
+#define DP0_DP_PIXEL_FORMAT__DP_COMPONENT_DEPTH_MASK 0x07000000L
+#define DP0_DP_PIXEL_FORMAT__DP_PIXEL_COMBINE_MASK 0x30000000L
+//DP0_DP_MSA_COLORIMETRY
+#define DP0_DP_MSA_COLORIMETRY__DP_MSA_MISC0__SHIFT 0x18
+#define DP0_DP_MSA_COLORIMETRY__DP_MSA_MISC0_MASK 0xFF000000L
+//DP0_DP_CONFIG
+#define DP0_DP_CONFIG__DP_UDI_LANES__SHIFT 0x0
+#define DP0_DP_CONFIG__DP_UDI_LANES_MASK 0x00000003L
+//DP0_DP_VID_STREAM_CNTL
+#define DP0_DP_VID_STREAM_CNTL__DP_VID_STREAM_ENABLE__SHIFT 0x0
+#define DP0_DP_VID_STREAM_CNTL__DP_VID_STREAM_DIS_DEFER__SHIFT 0x8
+#define DP0_DP_VID_STREAM_CNTL__DP_VID_STREAM_STATUS__SHIFT 0x10
+#define DP0_DP_VID_STREAM_CNTL__DP_VID_STREAM_CHANGE_KEEPOUT__SHIFT 0x14
+#define DP0_DP_VID_STREAM_CNTL__DP_VID_STREAM_ENABLE_MASK 0x00000001L
+#define DP0_DP_VID_STREAM_CNTL__DP_VID_STREAM_DIS_DEFER_MASK 0x00000300L
+#define DP0_DP_VID_STREAM_CNTL__DP_VID_STREAM_STATUS_MASK 0x00010000L
+#define DP0_DP_VID_STREAM_CNTL__DP_VID_STREAM_CHANGE_KEEPOUT_MASK 0x00100000L
+//DP0_DP_STEER_FIFO
+#define DP0_DP_STEER_FIFO__DP_STEER_FIFO_RESET__SHIFT 0x0
+#define DP0_DP_STEER_FIFO__DP_STEER_OVERFLOW_FLAG__SHIFT 0x4
+#define DP0_DP_STEER_FIFO__DP_STEER_OVERFLOW_INT__SHIFT 0x5
+#define DP0_DP_STEER_FIFO__DP_STEER_OVERFLOW_ACK__SHIFT 0x6
+#define DP0_DP_STEER_FIFO__DP_STEER_OVERFLOW_MASK__SHIFT 0x7
+#define DP0_DP_STEER_FIFO__DP_TU_OVERFLOW_FLAG__SHIFT 0x8
+#define DP0_DP_STEER_FIFO__DP_TU_OVERFLOW_ACK__SHIFT 0xc
+#define DP0_DP_STEER_FIFO__DP_STEER_FIFO_RESET_MASK 0x00000001L
+#define DP0_DP_STEER_FIFO__DP_STEER_OVERFLOW_FLAG_MASK 0x00000010L
+#define DP0_DP_STEER_FIFO__DP_STEER_OVERFLOW_INT_MASK 0x00000020L
+#define DP0_DP_STEER_FIFO__DP_STEER_OVERFLOW_ACK_MASK 0x00000040L
+#define DP0_DP_STEER_FIFO__DP_STEER_OVERFLOW_MASK_MASK 0x00000080L
+#define DP0_DP_STEER_FIFO__DP_TU_OVERFLOW_FLAG_MASK 0x00000100L
+#define DP0_DP_STEER_FIFO__DP_TU_OVERFLOW_ACK_MASK 0x00001000L
+//DP0_DP_MSA_MISC
+#define DP0_DP_MSA_MISC__DP_MSA_MISC1__SHIFT 0x0
+#define DP0_DP_MSA_MISC__DP_MSA_MISC2__SHIFT 0x8
+#define DP0_DP_MSA_MISC__DP_MSA_MISC3__SHIFT 0x10
+#define DP0_DP_MSA_MISC__DP_MSA_MISC4__SHIFT 0x18
+#define DP0_DP_MSA_MISC__DP_MSA_MISC1_MASK 0x000000FFL
+#define DP0_DP_MSA_MISC__DP_MSA_MISC2_MASK 0x0000FF00L
+#define DP0_DP_MSA_MISC__DP_MSA_MISC3_MASK 0x00FF0000L
+#define DP0_DP_MSA_MISC__DP_MSA_MISC4_MASK 0xFF000000L
+//DP0_DP_VID_TIMING
+#define DP0_DP_VID_TIMING__DP_VID_M_N_DOUBLE_BUFFER_MODE__SHIFT 0x4
+#define DP0_DP_VID_TIMING__DP_VID_M_N_GEN_EN__SHIFT 0x8
+#define DP0_DP_VID_TIMING__DP_VID_N_MUL__SHIFT 0xa
+#define DP0_DP_VID_TIMING__DP_VID_M_DIV__SHIFT 0xc
+#define DP0_DP_VID_TIMING__DP_VID_N_DIV__SHIFT 0x18
+#define DP0_DP_VID_TIMING__DP_VID_M_N_DOUBLE_BUFFER_MODE_MASK 0x00000010L
+#define DP0_DP_VID_TIMING__DP_VID_M_N_GEN_EN_MASK 0x00000100L
+#define DP0_DP_VID_TIMING__DP_VID_N_MUL_MASK 0x00000C00L
+#define DP0_DP_VID_TIMING__DP_VID_M_DIV_MASK 0x00003000L
+#define DP0_DP_VID_TIMING__DP_VID_N_DIV_MASK 0xFF000000L
+//DP0_DP_VID_N
+#define DP0_DP_VID_N__DP_VID_N__SHIFT 0x0
+#define DP0_DP_VID_N__DP_VID_N_MASK 0x00FFFFFFL
+//DP0_DP_VID_M
+#define DP0_DP_VID_M__DP_VID_M__SHIFT 0x0
+#define DP0_DP_VID_M__DP_VID_M_MASK 0x00FFFFFFL
+//DP0_DP_LINK_FRAMING_CNTL
+#define DP0_DP_LINK_FRAMING_CNTL__DP_IDLE_BS_INTERVAL__SHIFT 0x0
+#define DP0_DP_LINK_FRAMING_CNTL__DP_VBID_DISABLE__SHIFT 0x18
+#define DP0_DP_LINK_FRAMING_CNTL__DP_VID_ENHANCED_FRAME_MODE__SHIFT 0x1c
+#define DP0_DP_LINK_FRAMING_CNTL__DP_IDLE_BS_INTERVAL_MASK 0x0003FFFFL
+#define DP0_DP_LINK_FRAMING_CNTL__DP_VBID_DISABLE_MASK 0x01000000L
+#define DP0_DP_LINK_FRAMING_CNTL__DP_VID_ENHANCED_FRAME_MODE_MASK 0x10000000L
+//DP0_DP_HBR2_EYE_PATTERN
+#define DP0_DP_HBR2_EYE_PATTERN__DP_HBR2_EYE_PATTERN_ENABLE__SHIFT 0x0
+#define DP0_DP_HBR2_EYE_PATTERN__DP_HBR2_EYE_PATTERN_ENABLE_MASK 0x00000001L
+//DP0_DP_VID_MSA_VBID
+#define DP0_DP_VID_MSA_VBID__DP_VID_MSA_LOCATION__SHIFT 0x0
+#define DP0_DP_VID_MSA_VBID__DP_VID_VBID_FIELD_POL__SHIFT 0x18
+#define DP0_DP_VID_MSA_VBID__DP_VID_MSA_LOCATION_MASK 0x00000FFFL
+#define DP0_DP_VID_MSA_VBID__DP_VID_VBID_FIELD_POL_MASK 0x01000000L
+//DP0_DP_VID_INTERRUPT_CNTL
+#define DP0_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_INT__SHIFT 0x0
+#define DP0_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_ACK__SHIFT 0x1
+#define DP0_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_MASK__SHIFT 0x2
+#define DP0_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_INT_MASK 0x00000001L
+#define DP0_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_ACK_MASK 0x00000002L
+#define DP0_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_MASK_MASK 0x00000004L
+//DP0_DP_DPHY_CNTL
+#define DP0_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE0__SHIFT 0x0
+#define DP0_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE1__SHIFT 0x1
+#define DP0_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE2__SHIFT 0x2
+#define DP0_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE3__SHIFT 0x3
+#define DP0_DP_DPHY_CNTL__DPHY_FEC_EN__SHIFT 0x4
+#define DP0_DP_DPHY_CNTL__DPHY_FEC_READY_SHADOW__SHIFT 0x5
+#define DP0_DP_DPHY_CNTL__DPHY_FEC_ACTIVE_STATUS__SHIFT 0x6
+#define DP0_DP_DPHY_CNTL__DPHY_BYPASS__SHIFT 0x10
+#define DP0_DP_DPHY_CNTL__DPHY_SKEW_BYPASS__SHIFT 0x18
+#define DP0_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE0_MASK 0x00000001L
+#define DP0_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE1_MASK 0x00000002L
+#define DP0_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE2_MASK 0x00000004L
+#define DP0_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE3_MASK 0x00000008L
+#define DP0_DP_DPHY_CNTL__DPHY_FEC_EN_MASK 0x00000010L
+#define DP0_DP_DPHY_CNTL__DPHY_FEC_READY_SHADOW_MASK 0x00000020L
+#define DP0_DP_DPHY_CNTL__DPHY_FEC_ACTIVE_STATUS_MASK 0x00000040L
+#define DP0_DP_DPHY_CNTL__DPHY_BYPASS_MASK 0x00010000L
+#define DP0_DP_DPHY_CNTL__DPHY_SKEW_BYPASS_MASK 0x01000000L
+//DP0_DP_DPHY_TRAINING_PATTERN_SEL
+#define DP0_DP_DPHY_TRAINING_PATTERN_SEL__DPHY_TRAINING_PATTERN_SEL__SHIFT 0x0
+#define DP0_DP_DPHY_TRAINING_PATTERN_SEL__DPHY_TRAINING_PATTERN_SEL_MASK 0x00000003L
+//DP0_DP_DPHY_SYM0
+#define DP0_DP_DPHY_SYM0__DPHY_SYM1__SHIFT 0x0
+#define DP0_DP_DPHY_SYM0__DPHY_SYM2__SHIFT 0xa
+#define DP0_DP_DPHY_SYM0__DPHY_SYM3__SHIFT 0x14
+#define DP0_DP_DPHY_SYM0__DPHY_SYM1_MASK 0x000003FFL
+#define DP0_DP_DPHY_SYM0__DPHY_SYM2_MASK 0x000FFC00L
+#define DP0_DP_DPHY_SYM0__DPHY_SYM3_MASK 0x3FF00000L
+//DP0_DP_DPHY_SYM1
+#define DP0_DP_DPHY_SYM1__DPHY_SYM4__SHIFT 0x0
+#define DP0_DP_DPHY_SYM1__DPHY_SYM5__SHIFT 0xa
+#define DP0_DP_DPHY_SYM1__DPHY_SYM6__SHIFT 0x14
+#define DP0_DP_DPHY_SYM1__DPHY_SYM4_MASK 0x000003FFL
+#define DP0_DP_DPHY_SYM1__DPHY_SYM5_MASK 0x000FFC00L
+#define DP0_DP_DPHY_SYM1__DPHY_SYM6_MASK 0x3FF00000L
+//DP0_DP_DPHY_SYM2
+#define DP0_DP_DPHY_SYM2__DPHY_SYM7__SHIFT 0x0
+#define DP0_DP_DPHY_SYM2__DPHY_SYM8__SHIFT 0xa
+#define DP0_DP_DPHY_SYM2__DPHY_SYM7_MASK 0x000003FFL
+#define DP0_DP_DPHY_SYM2__DPHY_SYM8_MASK 0x000FFC00L
+//DP0_DP_DPHY_8B10B_CNTL
+#define DP0_DP_DPHY_8B10B_CNTL__DPHY_8B10B_RESET__SHIFT 0x8
+#define DP0_DP_DPHY_8B10B_CNTL__DPHY_8B10B_EXT_DISP__SHIFT 0x10
+#define DP0_DP_DPHY_8B10B_CNTL__DPHY_8B10B_CUR_DISP__SHIFT 0x18
+#define DP0_DP_DPHY_8B10B_CNTL__DPHY_8B10B_RESET_MASK 0x00000100L
+#define DP0_DP_DPHY_8B10B_CNTL__DPHY_8B10B_EXT_DISP_MASK 0x00010000L
+#define DP0_DP_DPHY_8B10B_CNTL__DPHY_8B10B_CUR_DISP_MASK 0x01000000L
+//DP0_DP_DPHY_PRBS_CNTL
+#define DP0_DP_DPHY_PRBS_CNTL__DPHY_PRBS_EN__SHIFT 0x0
+#define DP0_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEL__SHIFT 0x4
+#define DP0_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED__SHIFT 0x8
+#define DP0_DP_DPHY_PRBS_CNTL__DPHY_PRBS_EN_MASK 0x00000001L
+#define DP0_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEL_MASK 0x00000030L
+#define DP0_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED_MASK 0x7FFFFF00L
+//DP0_DP_DPHY_SCRAM_CNTL
+#define DP0_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_DIS__SHIFT 0x0
+#define DP0_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE__SHIFT 0x4
+#define DP0_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT__SHIFT 0x8
+#define DP0_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_KCODE__SHIFT 0x18
+#define DP0_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_DIS_MASK 0x00000001L
+#define DP0_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE_MASK 0x00000010L
+#define DP0_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT_MASK 0x0003FF00L
+#define DP0_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_KCODE_MASK 0x01000000L
+//DP0_DP_DPHY_CRC_EN
+#define DP0_DP_DPHY_CRC_EN__DPHY_CRC_EN__SHIFT 0x0
+#define DP0_DP_DPHY_CRC_EN__DPHY_CRC_CONT_EN__SHIFT 0x4
+#define DP0_DP_DPHY_CRC_EN__DPHY_CRC_RESULT_VALID__SHIFT 0x8
+#define DP0_DP_DPHY_CRC_EN__DPHY_CRC_EN_MASK 0x00000001L
+#define DP0_DP_DPHY_CRC_EN__DPHY_CRC_CONT_EN_MASK 0x00000010L
+#define DP0_DP_DPHY_CRC_EN__DPHY_CRC_RESULT_VALID_MASK 0x00000100L
+//DP0_DP_DPHY_CRC_CNTL
+#define DP0_DP_DPHY_CRC_CNTL__DPHY_CRC_FIELD__SHIFT 0x0
+#define DP0_DP_DPHY_CRC_CNTL__DPHY_CRC_SEL__SHIFT 0x4
+#define DP0_DP_DPHY_CRC_CNTL__DPHY_CRC_MASK__SHIFT 0x10
+#define DP0_DP_DPHY_CRC_CNTL__DPHY_CRC_FIELD_MASK 0x00000001L
+#define DP0_DP_DPHY_CRC_CNTL__DPHY_CRC_SEL_MASK 0x00000030L
+#define DP0_DP_DPHY_CRC_CNTL__DPHY_CRC_MASK_MASK 0x00FF0000L
+//DP0_DP_DPHY_CRC_RESULT
+#define DP0_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT__SHIFT 0x0
+#define DP0_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT1__SHIFT 0x8
+#define DP0_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT2__SHIFT 0x10
+#define DP0_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT3__SHIFT 0x18
+#define DP0_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT_MASK 0x000000FFL
+#define DP0_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT1_MASK 0x0000FF00L
+#define DP0_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT2_MASK 0x00FF0000L
+#define DP0_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT3_MASK 0xFF000000L
+//DP0_DP_DPHY_CRC_MST_CNTL
+#define DP0_DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_FIRST_SLOT__SHIFT 0x0
+#define DP0_DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_LAST_SLOT__SHIFT 0x8
+#define DP0_DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_FIRST_SLOT_MASK 0x0000003FL
+#define DP0_DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_LAST_SLOT_MASK 0x00003F00L
+//DP0_DP_DPHY_CRC_MST_STATUS
+#define DP0_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_LOCK__SHIFT 0x0
+#define DP0_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR__SHIFT 0x8
+#define DP0_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR_ACK__SHIFT 0x10
+#define DP0_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_LOCK_MASK 0x00000001L
+#define DP0_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR_MASK 0x00000100L
+#define DP0_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR_ACK_MASK 0x00010000L
+//DP0_DP_DPHY_FAST_TRAINING
+#define DP0_DP_DPHY_FAST_TRAINING__DPHY_RX_FAST_TRAINING_CAPABLE__SHIFT 0x0
+#define DP0_DP_DPHY_FAST_TRAINING__DPHY_SW_FAST_TRAINING_START__SHIFT 0x1
+#define DP0_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_VBLANK_EDGE_DETECT_EN__SHIFT 0x2
+#define DP0_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP1_TIME__SHIFT 0x8
+#define DP0_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP2_TIME__SHIFT 0x14
+#define DP0_DP_DPHY_FAST_TRAINING__DPHY_RX_FAST_TRAINING_CAPABLE_MASK 0x00000001L
+#define DP0_DP_DPHY_FAST_TRAINING__DPHY_SW_FAST_TRAINING_START_MASK 0x00000002L
+#define DP0_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_VBLANK_EDGE_DETECT_EN_MASK 0x00000004L
+#define DP0_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP1_TIME_MASK 0x000FFF00L
+#define DP0_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP2_TIME_MASK 0xFFF00000L
+//DP0_DP_DPHY_FAST_TRAINING_STATUS
+#define DP0_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_STATE__SHIFT 0x0
+#define DP0_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_OCCURRED__SHIFT 0x4
+#define DP0_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_MASK__SHIFT 0x8
+#define DP0_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_ACK__SHIFT 0xc
+#define DP0_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_STATE_MASK 0x00000007L
+#define DP0_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_OCCURRED_MASK 0x00000010L
+#define DP0_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_MASK_MASK 0x00000100L
+#define DP0_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_ACK_MASK 0x00001000L
+//DP0_DP_SEC_CNTL
+#define DP0_DP_SEC_CNTL__DP_SEC_STREAM_ENABLE__SHIFT 0x0
+#define DP0_DP_SEC_CNTL__DP_SEC_ASP_ENABLE__SHIFT 0x4
+#define DP0_DP_SEC_CNTL__DP_SEC_ATP_ENABLE__SHIFT 0x8
+#define DP0_DP_SEC_CNTL__DP_SEC_AIP_ENABLE__SHIFT 0xc
+#define DP0_DP_SEC_CNTL__DP_SEC_ACM_ENABLE__SHIFT 0x10
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP0_ENABLE__SHIFT 0x14
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP1_ENABLE__SHIFT 0x15
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP2_ENABLE__SHIFT 0x16
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP3_ENABLE__SHIFT 0x17
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP4_ENABLE__SHIFT 0x18
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP5_ENABLE__SHIFT 0x19
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP6_ENABLE__SHIFT 0x1a
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP7_ENABLE__SHIFT 0x1b
+#define DP0_DP_SEC_CNTL__DP_SEC_MPG_ENABLE__SHIFT 0x1c
+#define DP0_DP_SEC_CNTL__DP_SEC_STREAM_ENABLE_MASK 0x00000001L
+#define DP0_DP_SEC_CNTL__DP_SEC_ASP_ENABLE_MASK 0x00000010L
+#define DP0_DP_SEC_CNTL__DP_SEC_ATP_ENABLE_MASK 0x00000100L
+#define DP0_DP_SEC_CNTL__DP_SEC_AIP_ENABLE_MASK 0x00001000L
+#define DP0_DP_SEC_CNTL__DP_SEC_ACM_ENABLE_MASK 0x00010000L
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP0_ENABLE_MASK 0x00100000L
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP1_ENABLE_MASK 0x00200000L
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP2_ENABLE_MASK 0x00400000L
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP3_ENABLE_MASK 0x00800000L
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP4_ENABLE_MASK 0x01000000L
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP5_ENABLE_MASK 0x02000000L
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP6_ENABLE_MASK 0x04000000L
+#define DP0_DP_SEC_CNTL__DP_SEC_GSP7_ENABLE_MASK 0x08000000L
+#define DP0_DP_SEC_CNTL__DP_SEC_MPG_ENABLE_MASK 0x10000000L
+//DP0_DP_SEC_CNTL1
+#define DP0_DP_SEC_CNTL1__DP_SEC_ISRC_ENABLE__SHIFT 0x0
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_REFERENCE__SHIFT 0x1
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_PRIORITY__SHIFT 0x4
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_SEND__SHIFT 0x5
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_PENDING__SHIFT 0x6
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_DEADLINE_MISSED__SHIFT 0x7
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_ANY_LINE__SHIFT 0x8
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP1_LINE_REFERENCE__SHIFT 0x9
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP2_LINE_REFERENCE__SHIFT 0xa
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP3_LINE_REFERENCE__SHIFT 0xb
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP4_LINE_REFERENCE__SHIFT 0xc
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP5_LINE_REFERENCE__SHIFT 0xd
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP6_LINE_REFERENCE__SHIFT 0xe
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP7_LINE_REFERENCE__SHIFT 0xf
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_NUM__SHIFT 0x10
+#define DP0_DP_SEC_CNTL1__DP_SEC_ISRC_ENABLE_MASK 0x00000001L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_REFERENCE_MASK 0x00000002L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_PRIORITY_MASK 0x00000010L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_MASK 0x00000020L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_PENDING_MASK 0x00000040L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_DEADLINE_MISSED_MASK 0x00000080L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_ANY_LINE_MASK 0x00000100L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP1_LINE_REFERENCE_MASK 0x00000200L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP2_LINE_REFERENCE_MASK 0x00000400L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP3_LINE_REFERENCE_MASK 0x00000800L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP4_LINE_REFERENCE_MASK 0x00001000L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP5_LINE_REFERENCE_MASK 0x00002000L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP6_LINE_REFERENCE_MASK 0x00004000L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP7_LINE_REFERENCE_MASK 0x00008000L
+#define DP0_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_NUM_MASK 0xFFFF0000L
+//DP0_DP_SEC_FRAMING1
+#define DP0_DP_SEC_FRAMING1__DP_SEC_FRAME_START_LOCATION__SHIFT 0x0
+#define DP0_DP_SEC_FRAMING1__DP_SEC_VBLANK_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP0_DP_SEC_FRAMING1__DP_SEC_FRAME_START_LOCATION_MASK 0x00000FFFL
+#define DP0_DP_SEC_FRAMING1__DP_SEC_VBLANK_TRANSMIT_WIDTH_MASK 0xFFFF0000L
+//DP0_DP_SEC_FRAMING2
+#define DP0_DP_SEC_FRAMING2__DP_SEC_START_POSITION__SHIFT 0x0
+#define DP0_DP_SEC_FRAMING2__DP_SEC_HBLANK_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP0_DP_SEC_FRAMING2__DP_SEC_START_POSITION_MASK 0x0000FFFFL
+#define DP0_DP_SEC_FRAMING2__DP_SEC_HBLANK_TRANSMIT_WIDTH_MASK 0xFFFF0000L
+//DP0_DP_SEC_FRAMING3
+#define DP0_DP_SEC_FRAMING3__DP_SEC_IDLE_FRAME_SIZE__SHIFT 0x0
+#define DP0_DP_SEC_FRAMING3__DP_SEC_IDLE_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP0_DP_SEC_FRAMING3__DP_SEC_IDLE_FRAME_SIZE_MASK 0x00003FFFL
+#define DP0_DP_SEC_FRAMING3__DP_SEC_IDLE_TRANSMIT_WIDTH_MASK 0xFFFF0000L
+//DP0_DP_SEC_FRAMING4
+#define DP0_DP_SEC_FRAMING4__DP_SST_SDP_SPLITTING__SHIFT 0x0
+#define DP0_DP_SEC_FRAMING4__DP_SEC_COLLISION_STATUS__SHIFT 0x14
+#define DP0_DP_SEC_FRAMING4__DP_SEC_COLLISION_ACK__SHIFT 0x18
+#define DP0_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE__SHIFT 0x1c
+#define DP0_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_STATUS__SHIFT 0x1d
+#define DP0_DP_SEC_FRAMING4__DP_SST_SDP_SPLITTING_MASK 0x00000001L
+#define DP0_DP_SEC_FRAMING4__DP_SEC_COLLISION_STATUS_MASK 0x00100000L
+#define DP0_DP_SEC_FRAMING4__DP_SEC_COLLISION_ACK_MASK 0x01000000L
+#define DP0_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_MASK 0x10000000L
+#define DP0_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_STATUS_MASK 0x20000000L
+//DP0_DP_SEC_AUD_N
+#define DP0_DP_SEC_AUD_N__DP_SEC_AUD_N__SHIFT 0x0
+#define DP0_DP_SEC_AUD_N__DP_SEC_AUD_N_MASK 0x00FFFFFFL
+//DP0_DP_SEC_AUD_N_READBACK
+#define DP0_DP_SEC_AUD_N_READBACK__DP_SEC_AUD_N_READBACK__SHIFT 0x0
+#define DP0_DP_SEC_AUD_N_READBACK__DP_SEC_AUD_N_READBACK_MASK 0x00FFFFFFL
+//DP0_DP_SEC_AUD_M
+#define DP0_DP_SEC_AUD_M__DP_SEC_AUD_M__SHIFT 0x0
+#define DP0_DP_SEC_AUD_M__DP_SEC_AUD_M_MASK 0x00FFFFFFL
+//DP0_DP_SEC_AUD_M_READBACK
+#define DP0_DP_SEC_AUD_M_READBACK__DP_SEC_AUD_M_READBACK__SHIFT 0x0
+#define DP0_DP_SEC_AUD_M_READBACK__DP_SEC_AUD_M_READBACK_MASK 0x00FFFFFFL
+//DP0_DP_SEC_TIMESTAMP
+#define DP0_DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE__SHIFT 0x0
+#define DP0_DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE_MASK 0x00000001L
+//DP0_DP_SEC_PACKET_CNTL
+#define DP0_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CODING_TYPE__SHIFT 0x1
+#define DP0_DP_SEC_PACKET_CNTL__DP_SEC_ASP_PRIORITY__SHIFT 0x4
+#define DP0_DP_SEC_PACKET_CNTL__DP_SEC_VERSION__SHIFT 0x8
+#define DP0_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CHANNEL_COUNT_OVERRIDE__SHIFT 0x10
+#define DP0_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CODING_TYPE_MASK 0x0000000EL
+#define DP0_DP_SEC_PACKET_CNTL__DP_SEC_ASP_PRIORITY_MASK 0x00000010L
+#define DP0_DP_SEC_PACKET_CNTL__DP_SEC_VERSION_MASK 0x00003F00L
+#define DP0_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CHANNEL_COUNT_OVERRIDE_MASK 0x00010000L
+//DP0_DP_MSE_RATE_CNTL
+#define DP0_DP_MSE_RATE_CNTL__DP_MSE_RATE_Y__SHIFT 0x0
+#define DP0_DP_MSE_RATE_CNTL__DP_MSE_RATE_X__SHIFT 0x1a
+#define DP0_DP_MSE_RATE_CNTL__DP_MSE_RATE_Y_MASK 0x03FFFFFFL
+#define DP0_DP_MSE_RATE_CNTL__DP_MSE_RATE_X_MASK 0xFC000000L
+//DP0_DP_CP_MSE_STATUS
+//DP0_DP_MSE_RATE_UPDATE
+#define DP0_DP_MSE_RATE_UPDATE__DP_MSE_RATE_UPDATE_PENDING__SHIFT 0x0
+#define DP0_DP_MSE_RATE_UPDATE__DP_MSE_RATE_UPDATE_PENDING_MASK 0x00000001L
+//DP0_DP_MSE_SAT0
+#define DP0_DP_MSE_SAT0__DP_MSE_SAT_SRC0__SHIFT 0x0
+#define DP0_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT0__SHIFT 0x8
+#define DP0_DP_MSE_SAT0__DP_MSE_SAT_SRC1__SHIFT 0x10
+#define DP0_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT1__SHIFT 0x18
+#define DP0_DP_MSE_SAT0__DP_MSE_SAT_SRC0_MASK 0x00000007L
+#define DP0_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT0_MASK 0x00003F00L
+#define DP0_DP_MSE_SAT0__DP_MSE_SAT_SRC1_MASK 0x00070000L
+#define DP0_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT1_MASK 0x3F000000L
+//DP0_DP_MSE_SAT1
+#define DP0_DP_MSE_SAT1__DP_MSE_SAT_SRC2__SHIFT 0x0
+#define DP0_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT2__SHIFT 0x8
+#define DP0_DP_MSE_SAT1__DP_MSE_SAT_SRC3__SHIFT 0x10
+#define DP0_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT3__SHIFT 0x18
+#define DP0_DP_MSE_SAT1__DP_MSE_SAT_SRC2_MASK 0x00000007L
+#define DP0_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT2_MASK 0x00003F00L
+#define DP0_DP_MSE_SAT1__DP_MSE_SAT_SRC3_MASK 0x00070000L
+#define DP0_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT3_MASK 0x3F000000L
+//DP0_DP_MSE_SAT2
+#define DP0_DP_MSE_SAT2__DP_MSE_SAT_SRC4__SHIFT 0x0
+#define DP0_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT4__SHIFT 0x8
+#define DP0_DP_MSE_SAT2__DP_MSE_SAT_SRC5__SHIFT 0x10
+#define DP0_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT5__SHIFT 0x18
+#define DP0_DP_MSE_SAT2__DP_MSE_SAT_SRC4_MASK 0x00000007L
+#define DP0_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT4_MASK 0x00003F00L
+#define DP0_DP_MSE_SAT2__DP_MSE_SAT_SRC5_MASK 0x00070000L
+#define DP0_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT5_MASK 0x3F000000L
+//DP0_DP_MSE_SAT_UPDATE
+#define DP0_DP_MSE_SAT_UPDATE__DP_MSE_SAT_UPDATE__SHIFT 0x0
+#define DP0_DP_MSE_SAT_UPDATE__DP_MSE_16_MTP_KEEPOUT__SHIFT 0x8
+#define DP0_DP_MSE_SAT_UPDATE__DP_MSE_SAT_UPDATE_MASK 0x00000003L
+#define DP0_DP_MSE_SAT_UPDATE__DP_MSE_16_MTP_KEEPOUT_MASK 0x00000100L
+//DP0_DP_MSE_LINK_TIMING
+#define DP0_DP_MSE_LINK_TIMING__DP_MSE_LINK_FRAME__SHIFT 0x0
+#define DP0_DP_MSE_LINK_TIMING__DP_MSE_LINK_LINE__SHIFT 0x10
+#define DP0_DP_MSE_LINK_TIMING__DP_MSE_LINK_FRAME_MASK 0x000003FFL
+#define DP0_DP_MSE_LINK_TIMING__DP_MSE_LINK_LINE_MASK 0x00030000L
+//DP0_DP_MSE_MISC_CNTL
+#define DP0_DP_MSE_MISC_CNTL__DP_MSE_BLANK_CODE__SHIFT 0x0
+#define DP0_DP_MSE_MISC_CNTL__DP_MSE_TIMESTAMP_MODE__SHIFT 0x4
+#define DP0_DP_MSE_MISC_CNTL__DP_MSE_ZERO_ENCODER__SHIFT 0x8
+#define DP0_DP_MSE_MISC_CNTL__DP_MSE_BLANK_CODE_MASK 0x00000001L
+#define DP0_DP_MSE_MISC_CNTL__DP_MSE_TIMESTAMP_MODE_MASK 0x00000010L
+#define DP0_DP_MSE_MISC_CNTL__DP_MSE_ZERO_ENCODER_MASK 0x00000100L
+
+#define DP0_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT__SHIFT 0x0
+#define DP0_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_BS_SR_SWAP_DONE__SHIFT 0xf
+#define DP0_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_START__SHIFT 0x10
+#define DP0_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_MASK 0x000003FFL
+#define DP0_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_BS_SR_SWAP_DONE_MASK 0x00008000L
+#define DP0_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_START_MASK 0x00010000L
+//DP0_DP_DPHY_HBR2_PATTERN_CONTROL
+#define DP0_DP_DPHY_HBR2_PATTERN_CONTROL__DP_DPHY_HBR2_PATTERN_CONTROL__SHIFT 0x0
+#define DP0_DP_DPHY_HBR2_PATTERN_CONTROL__DP_DPHY_HBR2_PATTERN_CONTROL_MASK 0x00000007L
+//DP0_DP_MSE_SAT0_STATUS
+#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC0_STATUS__SHIFT 0x0
+#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT0_STATUS__SHIFT 0x8
+#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC1_STATUS__SHIFT 0x10
+#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT1_STATUS__SHIFT 0x18
+#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC0_STATUS_MASK 0x00000007L
+#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT0_STATUS_MASK 0x00003F00L
+#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC1_STATUS_MASK 0x00070000L
+#define DP0_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT1_STATUS_MASK 0x3F000000L
+//DP0_DP_MSE_SAT1_STATUS
+#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC2_STATUS__SHIFT 0x0
+#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT2_STATUS__SHIFT 0x8
+#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC3_STATUS__SHIFT 0x10
+#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT3_STATUS__SHIFT 0x18
+#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC2_STATUS_MASK 0x00000007L
+#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT2_STATUS_MASK 0x00003F00L
+#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC3_STATUS_MASK 0x00070000L
+#define DP0_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT3_STATUS_MASK 0x3F000000L
+//DP0_DP_MSE_SAT2_STATUS
+#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC4_STATUS__SHIFT 0x0
+#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT4_STATUS__SHIFT 0x8
+#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC5_STATUS__SHIFT 0x10
+#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT5_STATUS__SHIFT 0x18
+#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC4_STATUS_MASK 0x00000007L
+#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT4_STATUS_MASK 0x00003F00L
+#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC5_STATUS_MASK 0x00070000L
+#define DP0_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT5_STATUS_MASK 0x3F000000L
+//DP0_DP_MSA_TIMING_PARAM1
+#define DP0_DP_MSA_TIMING_PARAM1__DP_MSA_VTOTAL__SHIFT 0x0
+#define DP0_DP_MSA_TIMING_PARAM1__DP_MSA_HTOTAL__SHIFT 0x10
+#define DP0_DP_MSA_TIMING_PARAM1__DP_MSA_VTOTAL_MASK 0x0000FFFFL
+#define DP0_DP_MSA_TIMING_PARAM1__DP_MSA_HTOTAL_MASK 0xFFFF0000L
+//DP0_DP_MSA_TIMING_PARAM2
+#define DP0_DP_MSA_TIMING_PARAM2__DP_MSA_VSTART__SHIFT 0x0
+#define DP0_DP_MSA_TIMING_PARAM2__DP_MSA_HSTART__SHIFT 0x10
+#define DP0_DP_MSA_TIMING_PARAM2__DP_MSA_VSTART_MASK 0x0000FFFFL
+#define DP0_DP_MSA_TIMING_PARAM2__DP_MSA_HSTART_MASK 0xFFFF0000L
+//DP0_DP_MSA_TIMING_PARAM3
+#define DP0_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCWIDTH__SHIFT 0x0
+#define DP0_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCPOLARITY__SHIFT 0xf
+#define DP0_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCWIDTH__SHIFT 0x10
+#define DP0_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCPOLARITY__SHIFT 0x1f
+#define DP0_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCWIDTH_MASK 0x00007FFFL
+#define DP0_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCPOLARITY_MASK 0x00008000L
+#define DP0_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCWIDTH_MASK 0x7FFF0000L
+#define DP0_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCPOLARITY_MASK 0x80000000L
+//DP0_DP_MSA_TIMING_PARAM4
+#define DP0_DP_MSA_TIMING_PARAM4__DP_MSA_VHEIGHT__SHIFT 0x0
+#define DP0_DP_MSA_TIMING_PARAM4__DP_MSA_HWIDTH__SHIFT 0x10
+#define DP0_DP_MSA_TIMING_PARAM4__DP_MSA_VHEIGHT_MASK 0x0000FFFFL
+#define DP0_DP_MSA_TIMING_PARAM4__DP_MSA_HWIDTH_MASK 0xFFFF0000L
+//DP0_DP_DSC_CNTL
+#define DP0_DP_DSC_CNTL__DP_DSC_MODE__SHIFT 0x0
+#define DP0_DP_DSC_CNTL__DP_DSC_SLICE_WIDTH__SHIFT 0x10
+#define DP0_DP_DSC_CNTL__DP_DSC_MODE_MASK 0x00000003L
+#define DP0_DP_DSC_CNTL__DP_DSC_SLICE_WIDTH_MASK 0x1FFF0000L
+//DP0_DP_SEC_CNTL2
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP1_SEND__SHIFT 0x0
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_PENDING__SHIFT 0x1
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_DEADLINE_MISSED__SHIFT 0x2
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_ANY_LINE__SHIFT 0x3
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP2_SEND__SHIFT 0x4
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_PENDING__SHIFT 0x5
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_DEADLINE_MISSED__SHIFT 0x6
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_ANY_LINE__SHIFT 0x7
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP3_SEND__SHIFT 0x8
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_PENDING__SHIFT 0x9
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_DEADLINE_MISSED__SHIFT 0xa
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_ANY_LINE__SHIFT 0xb
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP4_SEND__SHIFT 0xc
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_PENDING__SHIFT 0xd
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_ANY_LINE__SHIFT 0xf
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP5_SEND__SHIFT 0x10
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_PENDING__SHIFT 0x11
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_DEADLINE_MISSED__SHIFT 0x12
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_ANY_LINE__SHIFT 0x13
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP6_SEND__SHIFT 0x14
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_PENDING__SHIFT 0x15
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_DEADLINE_MISSED__SHIFT 0x16
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_ANY_LINE__SHIFT 0x17
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP7_SEND__SHIFT 0x18
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_PENDING__SHIFT 0x19
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_DEADLINE_MISSED__SHIFT 0x1a
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_ANY_LINE__SHIFT 0x1b
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP7_PPS__SHIFT 0x1c
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_MASK 0x00000001L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_PENDING_MASK 0x00000002L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_DEADLINE_MISSED_MASK 0x00000004L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_ANY_LINE_MASK 0x00000008L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_MASK 0x00000010L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_PENDING_MASK 0x00000020L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_DEADLINE_MISSED_MASK 0x00000040L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_ANY_LINE_MASK 0x00000080L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_MASK 0x00000100L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_PENDING_MASK 0x00000200L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_DEADLINE_MISSED_MASK 0x00000400L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_ANY_LINE_MASK 0x00000800L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_MASK 0x00001000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_PENDING_MASK 0x00002000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_ANY_LINE_MASK 0x00008000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_MASK 0x00010000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_PENDING_MASK 0x00020000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_DEADLINE_MISSED_MASK 0x00040000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_ANY_LINE_MASK 0x00080000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_MASK 0x00100000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_PENDING_MASK 0x00200000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_DEADLINE_MISSED_MASK 0x00400000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_ANY_LINE_MASK 0x00800000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_MASK 0x01000000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_PENDING_MASK 0x02000000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_DEADLINE_MISSED_MASK 0x04000000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_ANY_LINE_MASK 0x08000000L
+#define DP0_DP_SEC_CNTL2__DP_SEC_GSP7_PPS_MASK 0x10000000L
+//DP0_DP_SEC_CNTL3
+#define DP0_DP_SEC_CNTL3__DP_SEC_GSP1_LINE_NUM__SHIFT 0x0
+#define DP0_DP_SEC_CNTL3__DP_SEC_GSP2_LINE_NUM__SHIFT 0x10
+#define DP0_DP_SEC_CNTL3__DP_SEC_GSP1_LINE_NUM_MASK 0x0000FFFFL
+#define DP0_DP_SEC_CNTL3__DP_SEC_GSP2_LINE_NUM_MASK 0xFFFF0000L
+//DP0_DP_SEC_CNTL4
+#define DP0_DP_SEC_CNTL4__DP_SEC_GSP3_LINE_NUM__SHIFT 0x0
+#define DP0_DP_SEC_CNTL4__DP_SEC_GSP4_LINE_NUM__SHIFT 0x10
+#define DP0_DP_SEC_CNTL4__DP_SEC_GSP3_LINE_NUM_MASK 0x0000FFFFL
+#define DP0_DP_SEC_CNTL4__DP_SEC_GSP4_LINE_NUM_MASK 0xFFFF0000L
+//DP0_DP_SEC_CNTL5
+#define DP0_DP_SEC_CNTL5__DP_SEC_GSP5_LINE_NUM__SHIFT 0x0
+#define DP0_DP_SEC_CNTL5__DP_SEC_GSP6_LINE_NUM__SHIFT 0x10
+#define DP0_DP_SEC_CNTL5__DP_SEC_GSP5_LINE_NUM_MASK 0x0000FFFFL
+#define DP0_DP_SEC_CNTL5__DP_SEC_GSP6_LINE_NUM_MASK 0xFFFF0000L
+//DP0_DP_SEC_CNTL6
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP7_LINE_NUM__SHIFT 0x0
+#define DP0_DP_SEC_CNTL6__DP_SEC_GSP7_LINE_NUM_MASK 0x0000FFFFL
+//DP0_DP_SEC_CNTL7
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_ACTIVE__SHIFT 0x0
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_IN_IDLE__SHIFT 0x1
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_ACTIVE__SHIFT 0x4
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_IN_IDLE__SHIFT 0x5
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_ACTIVE__SHIFT 0x8
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_IN_IDLE__SHIFT 0x9
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_ACTIVE__SHIFT 0xc
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_IN_IDLE__SHIFT 0xd
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_ACTIVE__SHIFT 0x10
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_IN_IDLE__SHIFT 0x11
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_ACTIVE__SHIFT 0x14
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_IN_IDLE__SHIFT 0x15
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_ACTIVE__SHIFT 0x18
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_IN_IDLE__SHIFT 0x19
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_ACTIVE__SHIFT 0x1c
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_IN_IDLE__SHIFT 0x1d
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_ACTIVE_MASK 0x00000001L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_IN_IDLE_MASK 0x00000002L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_ACTIVE_MASK 0x00000010L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_IN_IDLE_MASK 0x00000020L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_ACTIVE_MASK 0x00000100L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_IN_IDLE_MASK 0x00000200L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_ACTIVE_MASK 0x00001000L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_IN_IDLE_MASK 0x00002000L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_ACTIVE_MASK 0x00010000L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_IN_IDLE_MASK 0x00020000L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_ACTIVE_MASK 0x00100000L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_IN_IDLE_MASK 0x00200000L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_ACTIVE_MASK 0x01000000L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_IN_IDLE_MASK 0x02000000L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_ACTIVE_MASK 0x10000000L
+#define DP0_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_IN_IDLE_MASK 0x20000000L
+//DP0_DP_DB_CNTL
+#define DP0_DP_DB_CNTL__DP_DB_PENDING__SHIFT 0x0
+#define DP0_DP_DB_CNTL__DP_DB_TAKEN__SHIFT 0x4
+#define DP0_DP_DB_CNTL__DP_DB_TAKEN_CLR__SHIFT 0x5
+#define DP0_DP_DB_CNTL__DP_DB_LOCK__SHIFT 0x8
+#define DP0_DP_DB_CNTL__DP_DB_DISABLE__SHIFT 0xc
+#define DP0_DP_DB_CNTL__DP_VUPDATE_DB_PENDING__SHIFT 0xf
+#define DP0_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN__SHIFT 0x10
+#define DP0_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_CLR__SHIFT 0x11
+#define DP0_DP_DB_CNTL__DP_DB_PENDING_MASK 0x00000001L
+#define DP0_DP_DB_CNTL__DP_DB_TAKEN_MASK 0x00000010L
+#define DP0_DP_DB_CNTL__DP_DB_TAKEN_CLR_MASK 0x00000020L
+#define DP0_DP_DB_CNTL__DP_DB_LOCK_MASK 0x00000100L
+#define DP0_DP_DB_CNTL__DP_DB_DISABLE_MASK 0x00001000L
+#define DP0_DP_DB_CNTL__DP_VUPDATE_DB_PENDING_MASK 0x00008000L
+#define DP0_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_MASK 0x00010000L
+#define DP0_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_CLR_MASK 0x00020000L
+//DP0_DP_MSA_VBID_MISC
+#define DP0_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE__SHIFT 0x0
+#define DP0_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_EN__SHIFT 0x4
+#define DP0_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE__SHIFT 0x8
+#define DP0_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE__SHIFT 0x9
+#define DP0_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_EN__SHIFT 0xc
+#define DP0_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_EN__SHIFT 0xd
+#define DP0_DP_MSA_VBID_MISC__DP_VBID6_LINE_REFERENCE__SHIFT 0xf
+#define DP0_DP_MSA_VBID_MISC__DP_VBID6_LINE_NUM__SHIFT 0x10
+#define DP0_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_MASK 0x00000003L
+#define DP0_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_EN_MASK 0x00000010L
+#define DP0_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_MASK 0x00000100L
+#define DP0_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_MASK 0x00000200L
+#define DP0_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_EN_MASK 0x00001000L
+#define DP0_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_EN_MASK 0x00002000L
+#define DP0_DP_MSA_VBID_MISC__DP_VBID6_LINE_REFERENCE_MASK 0x00008000L
+#define DP0_DP_MSA_VBID_MISC__DP_VBID6_LINE_NUM_MASK 0xFFFF0000L
+//DP0_DP_SEC_METADATA_TRANSMISSION
+#define DP0_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_ENABLE__SHIFT 0x0
+#define DP0_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_REFERENCE__SHIFT 0x1
+#define DP0_DP_SEC_METADATA_TRANSMISSION__DP_SEC_MSO_METADATA_PACKET_ENABLE__SHIFT 0x4
+#define DP0_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE__SHIFT 0x10
+#define DP0_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_ENABLE_MASK 0x00000001L
+#define DP0_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_REFERENCE_MASK 0x00000002L
+#define DP0_DP_SEC_METADATA_TRANSMISSION__DP_SEC_MSO_METADATA_PACKET_ENABLE_MASK 0x000000F0L
+#define DP0_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_MASK 0xFFFF0000L
+//DP0_DP_DSC_BYTES_PER_PIXEL
+#define DP0_DP_DSC_BYTES_PER_PIXEL__DP_DSC_BYTES_PER_PIXEL__SHIFT 0x0
+#define DP0_DP_DSC_BYTES_PER_PIXEL__DP_DSC_BYTES_PER_PIXEL_MASK 0x7FFFFFFFL
+
+
+// addressBlock: dce_dc_dio_dig1_dispdec
+//DIG1_DIG_FE_CNTL
+#define DIG1_DIG_FE_CNTL__DIG_SOURCE_SELECT__SHIFT 0x0
+#define DIG1_DIG_FE_CNTL__DIG_STEREOSYNC_SELECT__SHIFT 0x4
+#define DIG1_DIG_FE_CNTL__DIG_STEREOSYNC_GATE_EN__SHIFT 0x8
+#define DIG1_DIG_FE_CNTL__DIG_START__SHIFT 0xa
+#define DIG1_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_SELECT__SHIFT 0xc
+#define DIG1_DIG_FE_CNTL__DIG_INPUT_PIXEL_SELECT__SHIFT 0x10
+#define DIG1_DIG_FE_CNTL__DOLBY_VISION_EN__SHIFT 0x12
+#define DIG1_DIG_FE_CNTL__DOLBY_VISION_METADATA_PACKET_MISSED__SHIFT 0x13
+#define DIG1_DIG_FE_CNTL__DIG_SYMCLK_FE_ON__SHIFT 0x18
+#define DIG1_DIG_FE_CNTL__TMDS_PIXEL_ENCODING__SHIFT 0x1c
+#define DIG1_DIG_FE_CNTL__TMDS_COLOR_FORMAT__SHIFT 0x1e
+#define DIG1_DIG_FE_CNTL__DIG_SOURCE_SELECT_MASK 0x00000007L
+#define DIG1_DIG_FE_CNTL__DIG_STEREOSYNC_SELECT_MASK 0x00000070L
+#define DIG1_DIG_FE_CNTL__DIG_STEREOSYNC_GATE_EN_MASK 0x00000100L
+#define DIG1_DIG_FE_CNTL__DIG_START_MASK 0x00000400L
+#define DIG1_DIG_FE_CNTL__DIG_DIGITAL_BYPASS_SELECT_MASK 0x00007000L
+#define DIG1_DIG_FE_CNTL__DIG_INPUT_PIXEL_SELECT_MASK 0x00030000L
+#define DIG1_DIG_FE_CNTL__DOLBY_VISION_EN_MASK 0x00040000L
+#define DIG1_DIG_FE_CNTL__DOLBY_VISION_METADATA_PACKET_MISSED_MASK 0x00080000L
+#define DIG1_DIG_FE_CNTL__DIG_SYMCLK_FE_ON_MASK 0x01000000L
+#define DIG1_DIG_FE_CNTL__TMDS_PIXEL_ENCODING_MASK 0x10000000L
+#define DIG1_DIG_FE_CNTL__TMDS_COLOR_FORMAT_MASK 0xC0000000L
+//DIG1_DIG_OUTPUT_CRC_CNTL
+#define DIG1_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_EN__SHIFT 0x0
+#define DIG1_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_LINK_SEL__SHIFT 0x4
+#define DIG1_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_DATA_SEL__SHIFT 0x8
+#define DIG1_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_EN_MASK 0x00000001L
+#define DIG1_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_LINK_SEL_MASK 0x00000010L
+#define DIG1_DIG_OUTPUT_CRC_CNTL__DIG_OUTPUT_CRC_DATA_SEL_MASK 0x00000300L
+//DIG1_DIG_OUTPUT_CRC_RESULT
+#define DIG1_DIG_OUTPUT_CRC_RESULT__DIG_OUTPUT_CRC_RESULT__SHIFT 0x0
+#define DIG1_DIG_OUTPUT_CRC_RESULT__DIG_OUTPUT_CRC_RESULT_MASK 0x3FFFFFFFL
+//DIG1_DIG_CLOCK_PATTERN
+#define DIG1_DIG_CLOCK_PATTERN__DIG_CLOCK_PATTERN__SHIFT 0x0
+#define DIG1_DIG_CLOCK_PATTERN__DIG_CLOCK_PATTERN_MASK 0x000003FFL
+//DIG1_DIG_TEST_PATTERN
+#define DIG1_DIG_TEST_PATTERN__DIG_TEST_PATTERN_OUT_EN__SHIFT 0x0
+#define DIG1_DIG_TEST_PATTERN__DIG_HALF_CLOCK_PATTERN_SEL__SHIFT 0x1
+#define DIG1_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_OUT_EN__SHIFT 0x4
+#define DIG1_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_RESET__SHIFT 0x5
+#define DIG1_DIG_TEST_PATTERN__DIG_TEST_PATTERN_EXTERNAL_RESET_EN__SHIFT 0x6
+#define DIG1_DIG_TEST_PATTERN__DIG_STATIC_TEST_PATTERN__SHIFT 0x10
+#define DIG1_DIG_TEST_PATTERN__DIG_TEST_PATTERN_OUT_EN_MASK 0x00000001L
+#define DIG1_DIG_TEST_PATTERN__DIG_HALF_CLOCK_PATTERN_SEL_MASK 0x00000002L
+#define DIG1_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_OUT_EN_MASK 0x00000010L
+#define DIG1_DIG_TEST_PATTERN__DIG_RANDOM_PATTERN_RESET_MASK 0x00000020L
+#define DIG1_DIG_TEST_PATTERN__DIG_TEST_PATTERN_EXTERNAL_RESET_EN_MASK 0x00000040L
+#define DIG1_DIG_TEST_PATTERN__DIG_STATIC_TEST_PATTERN_MASK 0x03FF0000L
+//DIG1_DIG_RANDOM_PATTERN_SEED
+#define DIG1_DIG_RANDOM_PATTERN_SEED__DIG_RANDOM_PATTERN_SEED__SHIFT 0x0
+#define DIG1_DIG_RANDOM_PATTERN_SEED__DIG_RAN_PAT_DURING_DE_ONLY__SHIFT 0x18
+#define DIG1_DIG_RANDOM_PATTERN_SEED__DIG_RANDOM_PATTERN_SEED_MASK 0x00FFFFFFL
+#define DIG1_DIG_RANDOM_PATTERN_SEED__DIG_RAN_PAT_DURING_DE_ONLY_MASK 0x01000000L
+//DIG1_DIG_FIFO_STATUS
+#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_LEVEL_ERROR__SHIFT 0x0
+#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_USE_OVERWRITE_LEVEL__SHIFT 0x1
+#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_OVERWRITE_LEVEL__SHIFT 0x2
+#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_ERROR_ACK__SHIFT 0x8
+#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_CAL_AVERAGE_LEVEL__SHIFT 0xa
+#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_MAXIMUM_LEVEL__SHIFT 0x10
+#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_MINIMUM_LEVEL__SHIFT 0x16
+#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_READ_CLOCK_SRC__SHIFT 0x1a
+#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_CALIBRATED__SHIFT 0x1d
+#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_FORCE_RECAL_AVERAGE__SHIFT 0x1e
+#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_FORCE_RECOMP_MINMAX__SHIFT 0x1f
+#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_LEVEL_ERROR_MASK 0x00000001L
+#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_USE_OVERWRITE_LEVEL_MASK 0x00000002L
+#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_OVERWRITE_LEVEL_MASK 0x000000FCL
+#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_ERROR_ACK_MASK 0x00000100L
+#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_CAL_AVERAGE_LEVEL_MASK 0x0000FC00L
+#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_MAXIMUM_LEVEL_MASK 0x001F0000L
+#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_MINIMUM_LEVEL_MASK 0x03C00000L
+#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_READ_CLOCK_SRC_MASK 0x04000000L
+#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_CALIBRATED_MASK 0x20000000L
+#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_FORCE_RECAL_AVERAGE_MASK 0x40000000L
+#define DIG1_DIG_FIFO_STATUS__DIG_FIFO_FORCE_RECOMP_MINMAX_MASK 0x80000000L
+//DIG1_HDMI_METADATA_PACKET_CONTROL
+#define DIG1_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_ENABLE__SHIFT 0x0
+#define DIG1_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_REFERENCE__SHIFT 0x4
+#define DIG1_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_MISSED__SHIFT 0x8
+#define DIG1_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE__SHIFT 0x10
+#define DIG1_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_ENABLE_MASK 0x00000001L
+#define DIG1_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_REFERENCE_MASK 0x00000010L
+#define DIG1_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_MISSED_MASK 0x00000100L
+#define DIG1_HDMI_METADATA_PACKET_CONTROL__HDMI_METADATA_PACKET_LINE_MASK 0xFFFF0000L
+//DIG1_HDMI_GENERIC_PACKET_CONTROL4
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC6_LINE__SHIFT 0x0
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC7_LINE__SHIFT 0x10
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC6_LINE_MASK 0x0000FFFFL
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL4__HDMI_GENERIC7_LINE_MASK 0xFFFF0000L
+//DIG1_HDMI_CONTROL
+#define DIG1_HDMI_CONTROL__HDMI_KEEPOUT_MODE__SHIFT 0x0
+#define DIG1_HDMI_CONTROL__HDMI_DATA_SCRAMBLE_EN__SHIFT 0x1
+#define DIG1_HDMI_CONTROL__HDMI_CLOCK_CHANNEL_RATE__SHIFT 0x2
+#define DIG1_HDMI_CONTROL__HDMI_NO_EXTRA_NULL_PACKET_FILLED__SHIFT 0x3
+#define DIG1_HDMI_CONTROL__HDMI_PACKET_GEN_VERSION__SHIFT 0x4
+#define DIG1_HDMI_CONTROL__HDMI_ERROR_ACK__SHIFT 0x8
+#define DIG1_HDMI_CONTROL__HDMI_ERROR_MASK__SHIFT 0x9
+#define DIG1_HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE__SHIFT 0x18
+#define DIG1_HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH__SHIFT 0x1c
+#define DIG1_HDMI_CONTROL__HDMI_KEEPOUT_MODE_MASK 0x00000001L
+#define DIG1_HDMI_CONTROL__HDMI_DATA_SCRAMBLE_EN_MASK 0x00000002L
+#define DIG1_HDMI_CONTROL__HDMI_CLOCK_CHANNEL_RATE_MASK 0x00000004L
+#define DIG1_HDMI_CONTROL__HDMI_NO_EXTRA_NULL_PACKET_FILLED_MASK 0x00000008L
+#define DIG1_HDMI_CONTROL__HDMI_PACKET_GEN_VERSION_MASK 0x00000010L
+#define DIG1_HDMI_CONTROL__HDMI_ERROR_ACK_MASK 0x00000100L
+#define DIG1_HDMI_CONTROL__HDMI_ERROR_MASK_MASK 0x00000200L
+#define DIG1_HDMI_CONTROL__HDMI_DEEP_COLOR_ENABLE_MASK 0x01000000L
+#define DIG1_HDMI_CONTROL__HDMI_DEEP_COLOR_DEPTH_MASK 0x30000000L
+//DIG1_HDMI_STATUS
+#define DIG1_HDMI_STATUS__HDMI_ACTIVE_AVMUTE__SHIFT 0x0
+#define DIG1_HDMI_STATUS__HDMI_AUDIO_PACKET_ERROR__SHIFT 0x10
+#define DIG1_HDMI_STATUS__HDMI_VBI_PACKET_ERROR__SHIFT 0x14
+#define DIG1_HDMI_STATUS__HDMI_ERROR_INT__SHIFT 0x1b
+#define DIG1_HDMI_STATUS__HDMI_ACTIVE_AVMUTE_MASK 0x00000001L
+#define DIG1_HDMI_STATUS__HDMI_AUDIO_PACKET_ERROR_MASK 0x00010000L
+#define DIG1_HDMI_STATUS__HDMI_VBI_PACKET_ERROR_MASK 0x00100000L
+#define DIG1_HDMI_STATUS__HDMI_ERROR_INT_MASK 0x08000000L
+//DIG1_HDMI_AUDIO_PACKET_CONTROL
+#define DIG1_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN__SHIFT 0x4
+#define DIG1_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_SEND_MAX_PACKETS__SHIFT 0x8
+#define DIG1_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_PACKETS_PER_LINE__SHIFT 0x10
+#define DIG1_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_DELAY_EN_MASK 0x00000030L
+#define DIG1_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_SEND_MAX_PACKETS_MASK 0x00000100L
+#define DIG1_HDMI_AUDIO_PACKET_CONTROL__HDMI_AUDIO_PACKETS_PER_LINE_MASK 0x001F0000L
+//DIG1_HDMI_ACR_PACKET_CONTROL
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SEND__SHIFT 0x0
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_CONT__SHIFT 0x1
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SELECT__SHIFT 0x4
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE__SHIFT 0x8
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND__SHIFT 0xc
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE__SHIFT 0x10
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUDIO_PRIORITY__SHIFT 0x1f
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SEND_MASK 0x00000001L
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_CONT_MASK 0x00000002L
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SELECT_MASK 0x00000030L
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_SOURCE_MASK 0x00000100L
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUTO_SEND_MASK 0x00001000L
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_N_MULTIPLE_MASK 0x00070000L
+#define DIG1_HDMI_ACR_PACKET_CONTROL__HDMI_ACR_AUDIO_PRIORITY_MASK 0x80000000L
+//DIG1_HDMI_VBI_PACKET_CONTROL
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND__SHIFT 0x0
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND__SHIFT 0x4
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT__SHIFT 0x5
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND__SHIFT 0x8
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT__SHIFT 0x9
+
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE__SHIFT 0x10
+
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_NULL_SEND_MASK 0x00000001L
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_GC_SEND_MASK 0x00000010L
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_GC_CONT_MASK 0x00000020L
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_SEND_MASK 0x00000100L
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_CONT_MASK 0x00000200L
+#define DIG1_HDMI_VBI_PACKET_CONTROL__HDMI_ISRC_LINE_MASK 0x003F0000L
+
+//DIG1_HDMI_INFOFRAME_CONTROL0
+#define DIG1_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND__SHIFT 0x4
+#define DIG1_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT__SHIFT 0x5
+#define DIG1_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_SEND__SHIFT 0x8
+#define DIG1_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_CONT__SHIFT 0x9
+#define DIG1_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_SEND_MASK 0x00000010L
+#define DIG1_HDMI_INFOFRAME_CONTROL0__HDMI_AUDIO_INFO_CONT_MASK 0x00000020L
+#define DIG1_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_SEND_MASK 0x00000100L
+#define DIG1_HDMI_INFOFRAME_CONTROL0__HDMI_MPEG_INFO_CONT_MASK 0x00000200L
+//DIG1_HDMI_INFOFRAME_CONTROL1
+#define DIG1_HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE__SHIFT 0x8
+#define DIG1_HDMI_INFOFRAME_CONTROL1__HDMI_MPEG_INFO_LINE__SHIFT 0x10
+#define DIG1_HDMI_INFOFRAME_CONTROL1__HDMI_AUDIO_INFO_LINE_MASK 0x00003F00L
+#define DIG1_HDMI_INFOFRAME_CONTROL1__HDMI_MPEG_INFO_LINE_MASK 0x003F0000L
+//DIG1_HDMI_GENERIC_PACKET_CONTROL0
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_SEND__SHIFT 0x0
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_CONT__SHIFT 0x1
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_LINE_REFERENCE__SHIFT 0x2
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_SEND__SHIFT 0x4
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_CONT__SHIFT 0x5
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_LINE_REFERENCE__SHIFT 0x6
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_SEND__SHIFT 0x8
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_CONT__SHIFT 0x9
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_LINE_REFERENCE__SHIFT 0xa
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_SEND__SHIFT 0xc
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_CONT__SHIFT 0xd
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_LINE_REFERENCE__SHIFT 0xe
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_SEND__SHIFT 0x10
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_CONT__SHIFT 0x11
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_LINE_REFERENCE__SHIFT 0x12
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_SEND__SHIFT 0x14
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_CONT__SHIFT 0x15
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_LINE_REFERENCE__SHIFT 0x16
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_SEND__SHIFT 0x18
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_CONT__SHIFT 0x19
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_LINE_REFERENCE__SHIFT 0x1a
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_SEND__SHIFT 0x1c
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_CONT__SHIFT 0x1d
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_LINE_REFERENCE__SHIFT 0x1e
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_SEND_MASK 0x00000001L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_CONT_MASK 0x00000002L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC0_LINE_REFERENCE_MASK 0x00000004L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_SEND_MASK 0x00000010L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_CONT_MASK 0x00000020L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC1_LINE_REFERENCE_MASK 0x00000040L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_SEND_MASK 0x00000100L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_CONT_MASK 0x00000200L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC2_LINE_REFERENCE_MASK 0x00000400L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_SEND_MASK 0x00001000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_CONT_MASK 0x00002000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC3_LINE_REFERENCE_MASK 0x00004000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_SEND_MASK 0x00010000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_CONT_MASK 0x00020000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC4_LINE_REFERENCE_MASK 0x00040000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_SEND_MASK 0x00100000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_CONT_MASK 0x00200000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC5_LINE_REFERENCE_MASK 0x00400000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_SEND_MASK 0x01000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_CONT_MASK 0x02000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC6_LINE_REFERENCE_MASK 0x04000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_SEND_MASK 0x10000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_CONT_MASK 0x20000000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL0__HDMI_GENERIC7_LINE_REFERENCE_MASK 0x40000000L
+
+//DIG1_HDMI_GC
+#define DIG1_HDMI_GC__HDMI_GC_AVMUTE__SHIFT 0x0
+#define DIG1_HDMI_GC__HDMI_GC_AVMUTE_CONT__SHIFT 0x2
+#define DIG1_HDMI_GC__HDMI_DEFAULT_PHASE__SHIFT 0x4
+#define DIG1_HDMI_GC__HDMI_PACKING_PHASE__SHIFT 0x8
+#define DIG1_HDMI_GC__HDMI_PACKING_PHASE_OVERRIDE__SHIFT 0xc
+#define DIG1_HDMI_GC__HDMI_GC_AVMUTE_MASK 0x00000001L
+#define DIG1_HDMI_GC__HDMI_GC_AVMUTE_CONT_MASK 0x00000004L
+#define DIG1_HDMI_GC__HDMI_DEFAULT_PHASE_MASK 0x00000010L
+#define DIG1_HDMI_GC__HDMI_PACKING_PHASE_MASK 0x00000F00L
+#define DIG1_HDMI_GC__HDMI_PACKING_PHASE_OVERRIDE_MASK 0x00001000L
+//DIG1_AFMT_AUDIO_PACKET_CONTROL2
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_OVRD__SHIFT 0x0
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_SELECT__SHIFT 0x1
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE__SHIFT 0x8
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_DP_AUDIO_STREAM_ID__SHIFT 0x10
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_HBR_ENABLE_OVRD__SHIFT 0x18
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_60958_OSF_OVRD__SHIFT 0x1c
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_OVRD_MASK 0x00000001L
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_LAYOUT_SELECT_MASK 0x00000002L
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_AUDIO_CHANNEL_ENABLE_MASK 0x0000FF00L
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_DP_AUDIO_STREAM_ID_MASK 0x00FF0000L
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_HBR_ENABLE_OVRD_MASK 0x01000000L
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL2__AFMT_60958_OSF_OVRD_MASK 0x10000000L
+//DIG1_AFMT_ISRC1_0
+#define DIG1_AFMT_ISRC1_0__AFMT_ISRC_STATUS__SHIFT 0x0
+#define DIG1_AFMT_ISRC1_0__AFMT_ISRC_CONTINUE__SHIFT 0x6
+#define DIG1_AFMT_ISRC1_0__AFMT_ISRC_VALID__SHIFT 0x7
+#define DIG1_AFMT_ISRC1_0__AFMT_ISRC_STATUS_MASK 0x00000007L
+#define DIG1_AFMT_ISRC1_0__AFMT_ISRC_CONTINUE_MASK 0x00000040L
+#define DIG1_AFMT_ISRC1_0__AFMT_ISRC_VALID_MASK 0x00000080L
+//DIG1_AFMT_ISRC1_1
+#define DIG1_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC0__SHIFT 0x0
+#define DIG1_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC1__SHIFT 0x8
+#define DIG1_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC2__SHIFT 0x10
+#define DIG1_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC3__SHIFT 0x18
+#define DIG1_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC0_MASK 0x000000FFL
+#define DIG1_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC1_MASK 0x0000FF00L
+#define DIG1_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC2_MASK 0x00FF0000L
+#define DIG1_AFMT_ISRC1_1__AFMT_UPC_EAN_ISRC3_MASK 0xFF000000L
+//DIG1_AFMT_ISRC1_2
+#define DIG1_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC4__SHIFT 0x0
+#define DIG1_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC5__SHIFT 0x8
+#define DIG1_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC6__SHIFT 0x10
+#define DIG1_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC7__SHIFT 0x18
+#define DIG1_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC4_MASK 0x000000FFL
+#define DIG1_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC5_MASK 0x0000FF00L
+#define DIG1_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC6_MASK 0x00FF0000L
+#define DIG1_AFMT_ISRC1_2__AFMT_UPC_EAN_ISRC7_MASK 0xFF000000L
+//DIG1_AFMT_ISRC1_3
+#define DIG1_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC8__SHIFT 0x0
+#define DIG1_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC9__SHIFT 0x8
+#define DIG1_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC10__SHIFT 0x10
+#define DIG1_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC11__SHIFT 0x18
+#define DIG1_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC8_MASK 0x000000FFL
+#define DIG1_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC9_MASK 0x0000FF00L
+#define DIG1_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC10_MASK 0x00FF0000L
+#define DIG1_AFMT_ISRC1_3__AFMT_UPC_EAN_ISRC11_MASK 0xFF000000L
+//DIG1_AFMT_ISRC1_4
+#define DIG1_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC12__SHIFT 0x0
+#define DIG1_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC13__SHIFT 0x8
+#define DIG1_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC14__SHIFT 0x10
+#define DIG1_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC15__SHIFT 0x18
+#define DIG1_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC12_MASK 0x000000FFL
+#define DIG1_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC13_MASK 0x0000FF00L
+#define DIG1_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC14_MASK 0x00FF0000L
+#define DIG1_AFMT_ISRC1_4__AFMT_UPC_EAN_ISRC15_MASK 0xFF000000L
+//DIG1_AFMT_ISRC2_0
+#define DIG1_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC16__SHIFT 0x0
+#define DIG1_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC17__SHIFT 0x8
+#define DIG1_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC18__SHIFT 0x10
+#define DIG1_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC19__SHIFT 0x18
+#define DIG1_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC16_MASK 0x000000FFL
+#define DIG1_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC17_MASK 0x0000FF00L
+#define DIG1_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC18_MASK 0x00FF0000L
+#define DIG1_AFMT_ISRC2_0__AFMT_UPC_EAN_ISRC19_MASK 0xFF000000L
+//DIG1_AFMT_ISRC2_1
+#define DIG1_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC20__SHIFT 0x0
+#define DIG1_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC21__SHIFT 0x8
+#define DIG1_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC22__SHIFT 0x10
+#define DIG1_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC23__SHIFT 0x18
+#define DIG1_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC20_MASK 0x000000FFL
+#define DIG1_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC21_MASK 0x0000FF00L
+#define DIG1_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC22_MASK 0x00FF0000L
+#define DIG1_AFMT_ISRC2_1__AFMT_UPC_EAN_ISRC23_MASK 0xFF000000L
+//DIG1_AFMT_ISRC2_2
+#define DIG1_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC24__SHIFT 0x0
+#define DIG1_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC25__SHIFT 0x8
+#define DIG1_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC26__SHIFT 0x10
+#define DIG1_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC27__SHIFT 0x18
+#define DIG1_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC24_MASK 0x000000FFL
+#define DIG1_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC25_MASK 0x0000FF00L
+#define DIG1_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC26_MASK 0x00FF0000L
+#define DIG1_AFMT_ISRC2_2__AFMT_UPC_EAN_ISRC27_MASK 0xFF000000L
+//DIG1_AFMT_ISRC2_3
+#define DIG1_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC28__SHIFT 0x0
+#define DIG1_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC29__SHIFT 0x8
+#define DIG1_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC30__SHIFT 0x10
+#define DIG1_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC31__SHIFT 0x18
+#define DIG1_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC28_MASK 0x000000FFL
+#define DIG1_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC29_MASK 0x0000FF00L
+#define DIG1_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC30_MASK 0x00FF0000L
+#define DIG1_AFMT_ISRC2_3__AFMT_UPC_EAN_ISRC31_MASK 0xFF000000L
+//DIG1_HDMI_GENERIC_PACKET_CONTROL2
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC2_LINE__SHIFT 0x0
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC3_LINE__SHIFT 0x10
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC2_LINE_MASK 0x0000FFFFL
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL2__HDMI_GENERIC3_LINE_MASK 0xFFFF0000L
+//DIG1_HDMI_GENERIC_PACKET_CONTROL3
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC4_LINE__SHIFT 0x0
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC5_LINE__SHIFT 0x10
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC4_LINE_MASK 0x0000FFFFL
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL3__HDMI_GENERIC5_LINE_MASK 0xFFFF0000L
+//DIG1_HDMI_DB_CONTROL
+#define DIG1_HDMI_DB_CONTROL__HDMI_DB_PENDING__SHIFT 0x0
+#define DIG1_HDMI_DB_CONTROL__HDMI_DB_TAKEN__SHIFT 0x4
+#define DIG1_HDMI_DB_CONTROL__HDMI_DB_TAKEN_CLR__SHIFT 0x5
+#define DIG1_HDMI_DB_CONTROL__HDMI_DB_LOCK__SHIFT 0x8
+#define DIG1_HDMI_DB_CONTROL__HDMI_DB_DISABLE__SHIFT 0xc
+#define DIG1_HDMI_DB_CONTROL__VUPDATE_DB_PENDING__SHIFT 0xf
+#define DIG1_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN__SHIFT 0x10
+#define DIG1_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_CLR__SHIFT 0x11
+#define DIG1_HDMI_DB_CONTROL__HDMI_DB_PENDING_MASK 0x00000001L
+#define DIG1_HDMI_DB_CONTROL__HDMI_DB_TAKEN_MASK 0x00000010L
+#define DIG1_HDMI_DB_CONTROL__HDMI_DB_TAKEN_CLR_MASK 0x00000020L
+#define DIG1_HDMI_DB_CONTROL__HDMI_DB_LOCK_MASK 0x00000100L
+#define DIG1_HDMI_DB_CONTROL__HDMI_DB_DISABLE_MASK 0x00001000L
+#define DIG1_HDMI_DB_CONTROL__VUPDATE_DB_PENDING_MASK 0x00008000L
+#define DIG1_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_MASK 0x00010000L
+#define DIG1_HDMI_DB_CONTROL__VUPDATE_DB_TAKEN_CLR_MASK 0x00020000L
+//DIG1_DME_CONTROL
+#define DIG1_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID__SHIFT 0x0
+#define DIG1_DME_CONTROL__METADATA_ENGINE_EN__SHIFT 0x4
+#define DIG1_DME_CONTROL__METADATA_STREAM_TYPE__SHIFT 0x8
+#define DIG1_DME_CONTROL__METADATA_DB_PENDING__SHIFT 0xc
+#define DIG1_DME_CONTROL__METADATA_DB_TAKEN__SHIFT 0xd
+#define DIG1_DME_CONTROL__METADATA_DB_TAKEN_CLR__SHIFT 0x10
+#define DIG1_DME_CONTROL__METADATA_DB_DISABLE__SHIFT 0x14
+#define DIG1_DME_CONTROL__METADATA_HUBP_REQUESTOR_ID_MASK 0x00000007L
+#define DIG1_DME_CONTROL__METADATA_ENGINE_EN_MASK 0x00000010L
+#define DIG1_DME_CONTROL__METADATA_STREAM_TYPE_MASK 0x00000100L
+#define DIG1_DME_CONTROL__METADATA_DB_PENDING_MASK 0x00001000L
+#define DIG1_DME_CONTROL__METADATA_DB_TAKEN_MASK 0x00002000L
+#define DIG1_DME_CONTROL__METADATA_DB_TAKEN_CLR_MASK 0x00010000L
+#define DIG1_DME_CONTROL__METADATA_DB_DISABLE_MASK 0x00100000L
+//DIG1_AFMT_MPEG_INFO0
+#define DIG1_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_CHECKSUM__SHIFT 0x0
+#define DIG1_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB0__SHIFT 0x8
+#define DIG1_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB1__SHIFT 0x10
+#define DIG1_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB2__SHIFT 0x18
+#define DIG1_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_CHECKSUM_MASK 0x000000FFL
+#define DIG1_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB0_MASK 0x0000FF00L
+#define DIG1_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB1_MASK 0x00FF0000L
+#define DIG1_AFMT_MPEG_INFO0__AFMT_MPEG_INFO_MB2_MASK 0xFF000000L
+//DIG1_AFMT_MPEG_INFO1
+#define DIG1_AFMT_MPEG_INFO1__AFMT_MPEG_INFO_MB3__SHIFT 0x0
+#define DIG1_AFMT_MPEG_INFO1__AFMT_MPEG_INFO_MF__SHIFT 0x8
+#define DIG1_AFMT_MPEG_INFO1__AFMT_MPEG_INFO_FR__SHIFT 0xc
+#define DIG1_AFMT_MPEG_INFO1__AFMT_MPEG_INFO_MB3_MASK 0x000000FFL
+#define DIG1_AFMT_MPEG_INFO1__AFMT_MPEG_INFO_MF_MASK 0x00000300L
+#define DIG1_AFMT_MPEG_INFO1__AFMT_MPEG_INFO_FR_MASK 0x00001000L
+//DIG1_AFMT_GENERIC_HDR
+#define DIG1_AFMT_GENERIC_HDR__AFMT_GENERIC_HB0__SHIFT 0x0
+#define DIG1_AFMT_GENERIC_HDR__AFMT_GENERIC_HB1__SHIFT 0x8
+#define DIG1_AFMT_GENERIC_HDR__AFMT_GENERIC_HB2__SHIFT 0x10
+#define DIG1_AFMT_GENERIC_HDR__AFMT_GENERIC_HB3__SHIFT 0x18
+#define DIG1_AFMT_GENERIC_HDR__AFMT_GENERIC_HB0_MASK 0x000000FFL
+#define DIG1_AFMT_GENERIC_HDR__AFMT_GENERIC_HB1_MASK 0x0000FF00L
+#define DIG1_AFMT_GENERIC_HDR__AFMT_GENERIC_HB2_MASK 0x00FF0000L
+#define DIG1_AFMT_GENERIC_HDR__AFMT_GENERIC_HB3_MASK 0xFF000000L
+//DIG1_AFMT_GENERIC_0
+#define DIG1_AFMT_GENERIC_0__AFMT_GENERIC_BYTE0__SHIFT 0x0
+#define DIG1_AFMT_GENERIC_0__AFMT_GENERIC_BYTE1__SHIFT 0x8
+#define DIG1_AFMT_GENERIC_0__AFMT_GENERIC_BYTE2__SHIFT 0x10
+#define DIG1_AFMT_GENERIC_0__AFMT_GENERIC_BYTE3__SHIFT 0x18
+#define DIG1_AFMT_GENERIC_0__AFMT_GENERIC_BYTE0_MASK 0x000000FFL
+#define DIG1_AFMT_GENERIC_0__AFMT_GENERIC_BYTE1_MASK 0x0000FF00L
+#define DIG1_AFMT_GENERIC_0__AFMT_GENERIC_BYTE2_MASK 0x00FF0000L
+#define DIG1_AFMT_GENERIC_0__AFMT_GENERIC_BYTE3_MASK 0xFF000000L
+//DIG1_AFMT_GENERIC_1
+#define DIG1_AFMT_GENERIC_1__AFMT_GENERIC_BYTE4__SHIFT 0x0
+#define DIG1_AFMT_GENERIC_1__AFMT_GENERIC_BYTE5__SHIFT 0x8
+#define DIG1_AFMT_GENERIC_1__AFMT_GENERIC_BYTE6__SHIFT 0x10
+#define DIG1_AFMT_GENERIC_1__AFMT_GENERIC_BYTE7__SHIFT 0x18
+#define DIG1_AFMT_GENERIC_1__AFMT_GENERIC_BYTE4_MASK 0x000000FFL
+#define DIG1_AFMT_GENERIC_1__AFMT_GENERIC_BYTE5_MASK 0x0000FF00L
+#define DIG1_AFMT_GENERIC_1__AFMT_GENERIC_BYTE6_MASK 0x00FF0000L
+#define DIG1_AFMT_GENERIC_1__AFMT_GENERIC_BYTE7_MASK 0xFF000000L
+//DIG1_AFMT_GENERIC_2
+#define DIG1_AFMT_GENERIC_2__AFMT_GENERIC_BYTE8__SHIFT 0x0
+#define DIG1_AFMT_GENERIC_2__AFMT_GENERIC_BYTE9__SHIFT 0x8
+#define DIG1_AFMT_GENERIC_2__AFMT_GENERIC_BYTE10__SHIFT 0x10
+#define DIG1_AFMT_GENERIC_2__AFMT_GENERIC_BYTE11__SHIFT 0x18
+#define DIG1_AFMT_GENERIC_2__AFMT_GENERIC_BYTE8_MASK 0x000000FFL
+#define DIG1_AFMT_GENERIC_2__AFMT_GENERIC_BYTE9_MASK 0x0000FF00L
+#define DIG1_AFMT_GENERIC_2__AFMT_GENERIC_BYTE10_MASK 0x00FF0000L
+#define DIG1_AFMT_GENERIC_2__AFMT_GENERIC_BYTE11_MASK 0xFF000000L
+//DIG1_AFMT_GENERIC_3
+#define DIG1_AFMT_GENERIC_3__AFMT_GENERIC_BYTE12__SHIFT 0x0
+#define DIG1_AFMT_GENERIC_3__AFMT_GENERIC_BYTE13__SHIFT 0x8
+#define DIG1_AFMT_GENERIC_3__AFMT_GENERIC_BYTE14__SHIFT 0x10
+#define DIG1_AFMT_GENERIC_3__AFMT_GENERIC_BYTE15__SHIFT 0x18
+#define DIG1_AFMT_GENERIC_3__AFMT_GENERIC_BYTE12_MASK 0x000000FFL
+#define DIG1_AFMT_GENERIC_3__AFMT_GENERIC_BYTE13_MASK 0x0000FF00L
+#define DIG1_AFMT_GENERIC_3__AFMT_GENERIC_BYTE14_MASK 0x00FF0000L
+#define DIG1_AFMT_GENERIC_3__AFMT_GENERIC_BYTE15_MASK 0xFF000000L
+//DIG1_AFMT_GENERIC_4
+#define DIG1_AFMT_GENERIC_4__AFMT_GENERIC_BYTE16__SHIFT 0x0
+#define DIG1_AFMT_GENERIC_4__AFMT_GENERIC_BYTE17__SHIFT 0x8
+#define DIG1_AFMT_GENERIC_4__AFMT_GENERIC_BYTE18__SHIFT 0x10
+#define DIG1_AFMT_GENERIC_4__AFMT_GENERIC_BYTE19__SHIFT 0x18
+#define DIG1_AFMT_GENERIC_4__AFMT_GENERIC_BYTE16_MASK 0x000000FFL
+#define DIG1_AFMT_GENERIC_4__AFMT_GENERIC_BYTE17_MASK 0x0000FF00L
+#define DIG1_AFMT_GENERIC_4__AFMT_GENERIC_BYTE18_MASK 0x00FF0000L
+#define DIG1_AFMT_GENERIC_4__AFMT_GENERIC_BYTE19_MASK 0xFF000000L
+//DIG1_AFMT_GENERIC_5
+#define DIG1_AFMT_GENERIC_5__AFMT_GENERIC_BYTE20__SHIFT 0x0
+#define DIG1_AFMT_GENERIC_5__AFMT_GENERIC_BYTE21__SHIFT 0x8
+#define DIG1_AFMT_GENERIC_5__AFMT_GENERIC_BYTE22__SHIFT 0x10
+#define DIG1_AFMT_GENERIC_5__AFMT_GENERIC_BYTE23__SHIFT 0x18
+#define DIG1_AFMT_GENERIC_5__AFMT_GENERIC_BYTE20_MASK 0x000000FFL
+#define DIG1_AFMT_GENERIC_5__AFMT_GENERIC_BYTE21_MASK 0x0000FF00L
+#define DIG1_AFMT_GENERIC_5__AFMT_GENERIC_BYTE22_MASK 0x00FF0000L
+#define DIG1_AFMT_GENERIC_5__AFMT_GENERIC_BYTE23_MASK 0xFF000000L
+//DIG1_AFMT_GENERIC_6
+#define DIG1_AFMT_GENERIC_6__AFMT_GENERIC_BYTE24__SHIFT 0x0
+#define DIG1_AFMT_GENERIC_6__AFMT_GENERIC_BYTE25__SHIFT 0x8
+#define DIG1_AFMT_GENERIC_6__AFMT_GENERIC_BYTE26__SHIFT 0x10
+#define DIG1_AFMT_GENERIC_6__AFMT_GENERIC_BYTE27__SHIFT 0x18
+#define DIG1_AFMT_GENERIC_6__AFMT_GENERIC_BYTE24_MASK 0x000000FFL
+#define DIG1_AFMT_GENERIC_6__AFMT_GENERIC_BYTE25_MASK 0x0000FF00L
+#define DIG1_AFMT_GENERIC_6__AFMT_GENERIC_BYTE26_MASK 0x00FF0000L
+#define DIG1_AFMT_GENERIC_6__AFMT_GENERIC_BYTE27_MASK 0xFF000000L
+//DIG1_AFMT_GENERIC_7
+#define DIG1_AFMT_GENERIC_7__AFMT_GENERIC_BYTE28__SHIFT 0x0
+#define DIG1_AFMT_GENERIC_7__AFMT_GENERIC_BYTE29__SHIFT 0x8
+#define DIG1_AFMT_GENERIC_7__AFMT_GENERIC_BYTE30__SHIFT 0x10
+#define DIG1_AFMT_GENERIC_7__AFMT_GENERIC_BYTE31__SHIFT 0x18
+#define DIG1_AFMT_GENERIC_7__AFMT_GENERIC_BYTE28_MASK 0x000000FFL
+#define DIG1_AFMT_GENERIC_7__AFMT_GENERIC_BYTE29_MASK 0x0000FF00L
+#define DIG1_AFMT_GENERIC_7__AFMT_GENERIC_BYTE30_MASK 0x00FF0000L
+#define DIG1_AFMT_GENERIC_7__AFMT_GENERIC_BYTE31_MASK 0xFF000000L
+//DIG1_HDMI_GENERIC_PACKET_CONTROL1
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC0_LINE__SHIFT 0x0
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC1_LINE__SHIFT 0x10
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC0_LINE_MASK 0x0000FFFFL
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL1__HDMI_GENERIC1_LINE_MASK 0xFFFF0000L
+//DIG1_HDMI_ACR_32_0
+#define DIG1_HDMI_ACR_32_0__HDMI_ACR_CTS_32__SHIFT 0xc
+#define DIG1_HDMI_ACR_32_0__HDMI_ACR_CTS_32_MASK 0xFFFFF000L
+//DIG1_HDMI_ACR_32_1
+#define DIG1_HDMI_ACR_32_1__HDMI_ACR_N_32__SHIFT 0x0
+#define DIG1_HDMI_ACR_32_1__HDMI_ACR_N_32_MASK 0x000FFFFFL
+//DIG1_HDMI_ACR_44_0
+#define DIG1_HDMI_ACR_44_0__HDMI_ACR_CTS_44__SHIFT 0xc
+#define DIG1_HDMI_ACR_44_0__HDMI_ACR_CTS_44_MASK 0xFFFFF000L
+//DIG1_HDMI_ACR_44_1
+#define DIG1_HDMI_ACR_44_1__HDMI_ACR_N_44__SHIFT 0x0
+#define DIG1_HDMI_ACR_44_1__HDMI_ACR_N_44_MASK 0x000FFFFFL
+//DIG1_HDMI_ACR_48_0
+#define DIG1_HDMI_ACR_48_0__HDMI_ACR_CTS_48__SHIFT 0xc
+#define DIG1_HDMI_ACR_48_0__HDMI_ACR_CTS_48_MASK 0xFFFFF000L
+//DIG1_HDMI_ACR_48_1
+#define DIG1_HDMI_ACR_48_1__HDMI_ACR_N_48__SHIFT 0x0
+#define DIG1_HDMI_ACR_48_1__HDMI_ACR_N_48_MASK 0x000FFFFFL
+//DIG1_HDMI_ACR_STATUS_0
+#define DIG1_HDMI_ACR_STATUS_0__HDMI_ACR_CTS__SHIFT 0xc
+#define DIG1_HDMI_ACR_STATUS_0__HDMI_ACR_CTS_MASK 0xFFFFF000L
+//DIG1_HDMI_ACR_STATUS_1
+#define DIG1_HDMI_ACR_STATUS_1__HDMI_ACR_N__SHIFT 0x0
+#define DIG1_HDMI_ACR_STATUS_1__HDMI_ACR_N_MASK 0x000FFFFFL
+//DIG1_AFMT_AUDIO_INFO0
+#define DIG1_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM__SHIFT 0x0
+#define DIG1_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CC__SHIFT 0x8
+#define DIG1_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CT__SHIFT 0xb
+#define DIG1_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_OFFSET__SHIFT 0x10
+#define DIG1_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CXT__SHIFT 0x18
+#define DIG1_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_MASK 0x000000FFL
+#define DIG1_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CC_MASK 0x00000700L
+#define DIG1_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CT_MASK 0x00007800L
+#define DIG1_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CHECKSUM_OFFSET_MASK 0x00FF0000L
+#define DIG1_AFMT_AUDIO_INFO0__AFMT_AUDIO_INFO_CXT_MASK 0x1F000000L
+//DIG1_AFMT_AUDIO_INFO1
+#define DIG1_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_CA__SHIFT 0x0
+#define DIG1_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LSV__SHIFT 0xb
+#define DIG1_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_DM_INH__SHIFT 0xf
+#define DIG1_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LFEPBL__SHIFT 0x10
+#define DIG1_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_CA_MASK 0x000000FFL
+#define DIG1_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LSV_MASK 0x00007800L
+#define DIG1_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_DM_INH_MASK 0x00008000L
+#define DIG1_AFMT_AUDIO_INFO1__AFMT_AUDIO_INFO_LFEPBL_MASK 0x00030000L
+//DIG1_AFMT_60958_0
+#define DIG1_AFMT_60958_0__AFMT_60958_CS_A__SHIFT 0x0
+#define DIG1_AFMT_60958_0__AFMT_60958_CS_B__SHIFT 0x1
+#define DIG1_AFMT_60958_0__AFMT_60958_CS_C__SHIFT 0x2
+#define DIG1_AFMT_60958_0__AFMT_60958_CS_D__SHIFT 0x3
+#define DIG1_AFMT_60958_0__AFMT_60958_CS_MODE__SHIFT 0x6
+#define DIG1_AFMT_60958_0__AFMT_60958_CS_CATEGORY_CODE__SHIFT 0x8
+#define DIG1_AFMT_60958_0__AFMT_60958_CS_SOURCE_NUMBER__SHIFT 0x10
+#define DIG1_AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x14
+#define DIG1_AFMT_60958_0__AFMT_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x18
+#define DIG1_AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY__SHIFT 0x1c
+#define DIG1_AFMT_60958_0__AFMT_60958_CS_A_MASK 0x00000001L
+#define DIG1_AFMT_60958_0__AFMT_60958_CS_B_MASK 0x00000002L
+#define DIG1_AFMT_60958_0__AFMT_60958_CS_C_MASK 0x00000004L
+#define DIG1_AFMT_60958_0__AFMT_60958_CS_D_MASK 0x00000038L
+#define DIG1_AFMT_60958_0__AFMT_60958_CS_MODE_MASK 0x000000C0L
+#define DIG1_AFMT_60958_0__AFMT_60958_CS_CATEGORY_CODE_MASK 0x0000FF00L
+#define DIG1_AFMT_60958_0__AFMT_60958_CS_SOURCE_NUMBER_MASK 0x000F0000L
+#define DIG1_AFMT_60958_0__AFMT_60958_CS_CHANNEL_NUMBER_L_MASK 0x00F00000L
+#define DIG1_AFMT_60958_0__AFMT_60958_CS_SAMPLING_FREQUENCY_MASK 0x0F000000L
+#define DIG1_AFMT_60958_0__AFMT_60958_CS_CLOCK_ACCURACY_MASK 0x30000000L
+//DIG1_AFMT_60958_1
+#define DIG1_AFMT_60958_1__AFMT_60958_CS_WORD_LENGTH__SHIFT 0x0
+#define DIG1_AFMT_60958_1__AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x4
+#define DIG1_AFMT_60958_1__AFMT_60958_VALID_L__SHIFT 0x10
+#define DIG1_AFMT_60958_1__AFMT_60958_VALID_R__SHIFT 0x12
+#define DIG1_AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x14
+#define DIG1_AFMT_60958_1__AFMT_60958_CS_WORD_LENGTH_MASK 0x0000000FL
+#define DIG1_AFMT_60958_1__AFMT_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x000000F0L
+#define DIG1_AFMT_60958_1__AFMT_60958_VALID_L_MASK 0x00010000L
+#define DIG1_AFMT_60958_1__AFMT_60958_VALID_R_MASK 0x00040000L
+#define DIG1_AFMT_60958_1__AFMT_60958_CS_CHANNEL_NUMBER_R_MASK 0x00F00000L
+//DIG1_AFMT_AUDIO_CRC_CONTROL
+#define DIG1_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_EN__SHIFT 0x0
+#define DIG1_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CONT__SHIFT 0x4
+#define DIG1_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_SOURCE__SHIFT 0x8
+#define DIG1_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CH_SEL__SHIFT 0xc
+#define DIG1_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_COUNT__SHIFT 0x10
+#define DIG1_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_EN_MASK 0x00000001L
+#define DIG1_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CONT_MASK 0x00000010L
+#define DIG1_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_SOURCE_MASK 0x00000100L
+#define DIG1_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_CH_SEL_MASK 0x0000F000L
+#define DIG1_AFMT_AUDIO_CRC_CONTROL__AFMT_AUDIO_CRC_COUNT_MASK 0xFFFF0000L
+//DIG1_AFMT_RAMP_CONTROL0
+#define DIG1_AFMT_RAMP_CONTROL0__AFMT_RAMP_MAX_COUNT__SHIFT 0x0
+#define DIG1_AFMT_RAMP_CONTROL0__AFMT_RAMP_DATA_SIGN__SHIFT 0x1f
+#define DIG1_AFMT_RAMP_CONTROL0__AFMT_RAMP_MAX_COUNT_MASK 0x00FFFFFFL
+#define DIG1_AFMT_RAMP_CONTROL0__AFMT_RAMP_DATA_SIGN_MASK 0x80000000L
+//DIG1_AFMT_RAMP_CONTROL1
+#define DIG1_AFMT_RAMP_CONTROL1__AFMT_RAMP_MIN_COUNT__SHIFT 0x0
+#define DIG1_AFMT_RAMP_CONTROL1__AFMT_AUDIO_TEST_CH_DISABLE__SHIFT 0x18
+#define DIG1_AFMT_RAMP_CONTROL1__AFMT_RAMP_MIN_COUNT_MASK 0x00FFFFFFL
+#define DIG1_AFMT_RAMP_CONTROL1__AFMT_AUDIO_TEST_CH_DISABLE_MASK 0xFF000000L
+//DIG1_AFMT_RAMP_CONTROL2
+#define DIG1_AFMT_RAMP_CONTROL2__AFMT_RAMP_INC_COUNT__SHIFT 0x0
+#define DIG1_AFMT_RAMP_CONTROL2__AFMT_RAMP_INC_COUNT_MASK 0x00FFFFFFL
+//DIG1_AFMT_RAMP_CONTROL3
+#define DIG1_AFMT_RAMP_CONTROL3__AFMT_RAMP_DEC_COUNT__SHIFT 0x0
+#define DIG1_AFMT_RAMP_CONTROL3__AFMT_RAMP_DEC_COUNT_MASK 0x00FFFFFFL
+//DIG1_AFMT_60958_2
+#define DIG1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0
+#define DIG1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4
+#define DIG1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x8
+#define DIG1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5__SHIFT 0xc
+#define DIG1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x10
+#define DIG1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x14
+#define DIG1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000FL
+#define DIG1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000F0L
+#define DIG1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_4_MASK 0x00000F00L
+#define DIG1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_5_MASK 0x0000F000L
+#define DIG1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_6_MASK 0x000F0000L
+#define DIG1_AFMT_60958_2__AFMT_60958_CS_CHANNEL_NUMBER_7_MASK 0x00F00000L
+//DIG1_AFMT_AUDIO_CRC_RESULT
+#define DIG1_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_DONE__SHIFT 0x0
+#define DIG1_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC__SHIFT 0x8
+#define DIG1_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_DONE_MASK 0x00000001L
+#define DIG1_AFMT_AUDIO_CRC_RESULT__AFMT_AUDIO_CRC_MASK 0xFFFFFF00L
+//DIG1_AFMT_STATUS
+#define DIG1_AFMT_STATUS__AFMT_AUDIO_ENABLE__SHIFT 0x4
+#define DIG1_AFMT_STATUS__AFMT_AZ_HBR_ENABLE__SHIFT 0x8
+#define DIG1_AFMT_STATUS__AFMT_AUDIO_FIFO_OVERFLOW__SHIFT 0x18
+#define DIG1_AFMT_STATUS__AFMT_AZ_AUDIO_ENABLE_CHG__SHIFT 0x1e
+#define DIG1_AFMT_STATUS__AFMT_AUDIO_ENABLE_MASK 0x00000010L
+#define DIG1_AFMT_STATUS__AFMT_AZ_HBR_ENABLE_MASK 0x00000100L
+#define DIG1_AFMT_STATUS__AFMT_AUDIO_FIFO_OVERFLOW_MASK 0x01000000L
+#define DIG1_AFMT_STATUS__AFMT_AZ_AUDIO_ENABLE_CHG_MASK 0x40000000L
+//DIG1_AFMT_AUDIO_PACKET_CONTROL
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND__SHIFT 0x0
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL__AFMT_RESET_FIFO_WHEN_AUDIO_DIS__SHIFT 0xb
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_EN__SHIFT 0xc
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_MODE__SHIFT 0xe
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_FIFO_OVERFLOW_ACK__SHIFT 0x17
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_CHANNEL_SWAP__SHIFT 0x18
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE__SHIFT 0x1a
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AZ_AUDIO_ENABLE_CHG_ACK__SHIFT 0x1e
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL__AFMT_BLANK_TEST_DATA_ON_ENC_ENB__SHIFT 0x1f
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_SAMPLE_SEND_MASK 0x00000001L
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL__AFMT_RESET_FIFO_WHEN_AUDIO_DIS_MASK 0x00000800L
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_EN_MASK 0x00001000L
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_TEST_MODE_MASK 0x00004000L
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_FIFO_OVERFLOW_ACK_MASK 0x00800000L
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AUDIO_CHANNEL_SWAP_MASK 0x01000000L
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL__AFMT_60958_CS_UPDATE_MASK 0x04000000L
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL__AFMT_AZ_AUDIO_ENABLE_CHG_ACK_MASK 0x40000000L
+#define DIG1_AFMT_AUDIO_PACKET_CONTROL__AFMT_BLANK_TEST_DATA_ON_ENC_ENB_MASK 0x80000000L
+//DIG1_AFMT_VBI_PACKET_CONTROL
+#define DIG1_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_LOCK_STATUS__SHIFT 0x8
+#define DIG1_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_CONFLICT__SHIFT 0x10
+#define DIG1_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_CONFLICT_CLR__SHIFT 0x11
+#define DIG1_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_INDEX__SHIFT 0x1c
+#define DIG1_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_LOCK_STATUS_MASK 0x00000100L
+#define DIG1_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_CONFLICT_MASK 0x00010000L
+#define DIG1_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_CONFLICT_CLR_MASK 0x00020000L
+#define DIG1_AFMT_VBI_PACKET_CONTROL__AFMT_GENERIC_INDEX_MASK 0xF0000000L
+//DIG1_AFMT_INFOFRAME_CONTROL0
+#define DIG1_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_SOURCE__SHIFT 0x6
+#define DIG1_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE__SHIFT 0x7
+#define DIG1_AFMT_INFOFRAME_CONTROL0__AFMT_MPEG_INFO_UPDATE__SHIFT 0xa
+#define DIG1_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_SOURCE_MASK 0x00000040L
+#define DIG1_AFMT_INFOFRAME_CONTROL0__AFMT_AUDIO_INFO_UPDATE_MASK 0x00000080L
+#define DIG1_AFMT_INFOFRAME_CONTROL0__AFMT_MPEG_INFO_UPDATE_MASK 0x00000400L
+//DIG1_AFMT_AUDIO_SRC_CONTROL
+#define DIG1_AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT__SHIFT 0x0
+#define DIG1_AFMT_AUDIO_SRC_CONTROL__AFMT_AUDIO_SRC_SELECT_MASK 0x00000007L
+//DIG1_DIG_BE_CNTL
+#define DIG1_DIG_BE_CNTL__DIG_DUAL_LINK_ENABLE__SHIFT 0x0
+#define DIG1_DIG_BE_CNTL__DIG_SWAP__SHIFT 0x1
+#define DIG1_DIG_BE_CNTL__DIG_RB_SWITCH_EN__SHIFT 0x2
+#define DIG1_DIG_BE_CNTL__DIG_FE_SOURCE_SELECT__SHIFT 0x8
+#define DIG1_DIG_BE_CNTL__DIG_MODE__SHIFT 0x10
+#define DIG1_DIG_BE_CNTL__DIG_HPD_SELECT__SHIFT 0x1c
+#define DIG1_DIG_BE_CNTL__DIG_DUAL_LINK_ENABLE_MASK 0x00000001L
+#define DIG1_DIG_BE_CNTL__DIG_SWAP_MASK 0x00000002L
+#define DIG1_DIG_BE_CNTL__DIG_RB_SWITCH_EN_MASK 0x00000004L
+#define DIG1_DIG_BE_CNTL__DIG_FE_SOURCE_SELECT_MASK 0x00007F00L
+#define DIG1_DIG_BE_CNTL__DIG_MODE_MASK 0x00070000L
+#define DIG1_DIG_BE_CNTL__DIG_HPD_SELECT_MASK 0x70000000L
+//DIG1_DIG_BE_EN_CNTL
+#define DIG1_DIG_BE_EN_CNTL__DIG_ENABLE__SHIFT 0x0
+#define DIG1_DIG_BE_EN_CNTL__DIG_SYMCLK_BE_ON__SHIFT 0x8
+#define DIG1_DIG_BE_EN_CNTL__DIG_ENABLE_MASK 0x00000001L
+#define DIG1_DIG_BE_EN_CNTL__DIG_SYMCLK_BE_ON_MASK 0x00000100L
+//DIG1_TMDS_CNTL
+#define DIG1_TMDS_CNTL__TMDS_SYNC_PHASE__SHIFT 0x0
+#define DIG1_TMDS_CNTL__TMDS_SYNC_PHASE_MASK 0x00000001L
+//DIG1_TMDS_CONTROL_CHAR
+#define DIG1_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR0_OUT_EN__SHIFT 0x0
+#define DIG1_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR1_OUT_EN__SHIFT 0x1
+#define DIG1_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR2_OUT_EN__SHIFT 0x2
+#define DIG1_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR3_OUT_EN__SHIFT 0x3
+#define DIG1_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR0_OUT_EN_MASK 0x00000001L
+#define DIG1_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR1_OUT_EN_MASK 0x00000002L
+#define DIG1_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR2_OUT_EN_MASK 0x00000004L
+#define DIG1_TMDS_CONTROL_CHAR__TMDS_CONTROL_CHAR3_OUT_EN_MASK 0x00000008L
+//DIG1_TMDS_CONTROL0_FEEDBACK
+#define DIG1_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_SELECT__SHIFT 0x0
+#define DIG1_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_DELAY__SHIFT 0x8
+#define DIG1_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_SELECT_MASK 0x00000003L
+#define DIG1_TMDS_CONTROL0_FEEDBACK__TMDS_CONTROL0_FEEDBACK_DELAY_MASK 0x00000300L
+//DIG1_TMDS_STEREOSYNC_CTL_SEL
+#define DIG1_TMDS_STEREOSYNC_CTL_SEL__TMDS_STEREOSYNC_CTL_SEL__SHIFT 0x0
+#define DIG1_TMDS_STEREOSYNC_CTL_SEL__TMDS_STEREOSYNC_CTL_SEL_MASK 0x00000003L
+//DIG1_TMDS_SYNC_CHAR_PATTERN_0_1
+#define DIG1_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN0__SHIFT 0x0
+#define DIG1_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN1__SHIFT 0x10
+#define DIG1_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN0_MASK 0x000003FFL
+#define DIG1_TMDS_SYNC_CHAR_PATTERN_0_1__TMDS_SYNC_CHAR_PATTERN1_MASK 0x03FF0000L
+//DIG1_TMDS_SYNC_CHAR_PATTERN_2_3
+#define DIG1_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN2__SHIFT 0x0
+#define DIG1_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN3__SHIFT 0x10
+#define DIG1_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN2_MASK 0x000003FFL
+#define DIG1_TMDS_SYNC_CHAR_PATTERN_2_3__TMDS_SYNC_CHAR_PATTERN3_MASK 0x03FF0000L
+
+//DIG1_TMDS_CTL_BITS
+#define DIG1_TMDS_CTL_BITS__TMDS_CTL0__SHIFT 0x0
+#define DIG1_TMDS_CTL_BITS__TMDS_CTL1__SHIFT 0x8
+#define DIG1_TMDS_CTL_BITS__TMDS_CTL2__SHIFT 0x10
+#define DIG1_TMDS_CTL_BITS__TMDS_CTL3__SHIFT 0x18
+#define DIG1_TMDS_CTL_BITS__TMDS_CTL0_MASK 0x00000001L
+#define DIG1_TMDS_CTL_BITS__TMDS_CTL1_MASK 0x00000100L
+#define DIG1_TMDS_CTL_BITS__TMDS_CTL2_MASK 0x00010000L
+#define DIG1_TMDS_CTL_BITS__TMDS_CTL3_MASK 0x01000000L
+//DIG1_TMDS_DCBALANCER_CONTROL
+#define DIG1_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_EN__SHIFT 0x0
+#define DIG1_TMDS_DCBALANCER_CONTROL__TMDS_SYNC_DCBAL_EN__SHIFT 0x4
+#define DIG1_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_EN__SHIFT 0x8
+#define DIG1_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_IN__SHIFT 0x10
+#define DIG1_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_FORCE__SHIFT 0x18
+#define DIG1_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_EN_MASK 0x00000001L
+#define DIG1_TMDS_DCBALANCER_CONTROL__TMDS_SYNC_DCBAL_EN_MASK 0x00000070L
+#define DIG1_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_EN_MASK 0x00000100L
+#define DIG1_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_TEST_IN_MASK 0x000F0000L
+#define DIG1_TMDS_DCBALANCER_CONTROL__TMDS_DCBALANCER_FORCE_MASK 0x01000000L
+//DIG1_TMDS_SYNC_DCBALANCE_CHAR
+#define DIG1_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR01__SHIFT 0x0
+#define DIG1_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR11__SHIFT 0x10
+#define DIG1_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR01_MASK 0x000003FFL
+#define DIG1_TMDS_SYNC_DCBALANCE_CHAR__TMDS_SYNC_DCBAL_CHAR11_MASK 0x03FF0000L
+//DIG1_TMDS_CTL0_1_GEN_CNTL
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_SEL__SHIFT 0x0
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_DELAY__SHIFT 0x4
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_INVERT__SHIFT 0x7
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_MODULATION__SHIFT 0x8
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_USE_FEEDBACK_PATH__SHIFT 0xa
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_FB_SYNC_CONT__SHIFT 0xb
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_PATTERN_OUT_EN__SHIFT 0xc
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_SEL__SHIFT 0x10
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_DELAY__SHIFT 0x14
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_INVERT__SHIFT 0x17
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_MODULATION__SHIFT 0x18
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_USE_FEEDBACK_PATH__SHIFT 0x1a
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_FB_SYNC_CONT__SHIFT 0x1b
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_PATTERN_OUT_EN__SHIFT 0x1c
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_2BIT_COUNTER_EN__SHIFT 0x1f
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_SEL_MASK 0x0000000FL
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_DELAY_MASK 0x00000070L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_INVERT_MASK 0x00000080L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_DATA_MODULATION_MASK 0x00000300L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_USE_FEEDBACK_PATH_MASK 0x00000400L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_FB_SYNC_CONT_MASK 0x00000800L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL0_PATTERN_OUT_EN_MASK 0x00001000L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_SEL_MASK 0x000F0000L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_DELAY_MASK 0x00700000L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_INVERT_MASK 0x00800000L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_DATA_MODULATION_MASK 0x03000000L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_USE_FEEDBACK_PATH_MASK 0x04000000L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_FB_SYNC_CONT_MASK 0x08000000L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_CTL1_PATTERN_OUT_EN_MASK 0x10000000L
+#define DIG1_TMDS_CTL0_1_GEN_CNTL__TMDS_2BIT_COUNTER_EN_MASK 0x80000000L
+//DIG1_TMDS_CTL2_3_GEN_CNTL
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_SEL__SHIFT 0x0
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_DELAY__SHIFT 0x4
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_INVERT__SHIFT 0x7
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_MODULATION__SHIFT 0x8
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_USE_FEEDBACK_PATH__SHIFT 0xa
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_FB_SYNC_CONT__SHIFT 0xb
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_PATTERN_OUT_EN__SHIFT 0xc
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_SEL__SHIFT 0x10
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_DELAY__SHIFT 0x14
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_INVERT__SHIFT 0x17
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_MODULATION__SHIFT 0x18
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_USE_FEEDBACK_PATH__SHIFT 0x1a
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_FB_SYNC_CONT__SHIFT 0x1b
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_PATTERN_OUT_EN__SHIFT 0x1c
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_SEL_MASK 0x0000000FL
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_DELAY_MASK 0x00000070L
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_INVERT_MASK 0x00000080L
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_DATA_MODULATION_MASK 0x00000300L
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_USE_FEEDBACK_PATH_MASK 0x00000400L
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_FB_SYNC_CONT_MASK 0x00000800L
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL2_PATTERN_OUT_EN_MASK 0x00001000L
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_SEL_MASK 0x000F0000L
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_DELAY_MASK 0x00700000L
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_INVERT_MASK 0x00800000L
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_DATA_MODULATION_MASK 0x03000000L
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_USE_FEEDBACK_PATH_MASK 0x04000000L
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_FB_SYNC_CONT_MASK 0x08000000L
+#define DIG1_TMDS_CTL2_3_GEN_CNTL__TMDS_CTL3_PATTERN_OUT_EN_MASK 0x10000000L
+
+//DIG1_DIG_VERSION
+#define DIG1_DIG_VERSION__DIG_TYPE__SHIFT 0x0
+#define DIG1_DIG_VERSION__DIG_TYPE_MASK 0x00000001L
+//DIG1_DIG_LANE_ENABLE
+#define DIG1_DIG_LANE_ENABLE__DIG_LANE0EN__SHIFT 0x0
+#define DIG1_DIG_LANE_ENABLE__DIG_LANE1EN__SHIFT 0x1
+#define DIG1_DIG_LANE_ENABLE__DIG_LANE2EN__SHIFT 0x2
+#define DIG1_DIG_LANE_ENABLE__DIG_LANE3EN__SHIFT 0x3
+#define DIG1_DIG_LANE_ENABLE__DIG_CLK_EN__SHIFT 0x8
+#define DIG1_DIG_LANE_ENABLE__DIG_LANE0EN_MASK 0x00000001L
+#define DIG1_DIG_LANE_ENABLE__DIG_LANE1EN_MASK 0x00000002L
+#define DIG1_DIG_LANE_ENABLE__DIG_LANE2EN_MASK 0x00000004L
+#define DIG1_DIG_LANE_ENABLE__DIG_LANE3EN_MASK 0x00000008L
+#define DIG1_DIG_LANE_ENABLE__DIG_CLK_EN_MASK 0x00000100L
+
+//DIG1_AFMT_CNTL
+#define DIG1_AFMT_CNTL__AFMT_AUDIO_CLOCK_EN__SHIFT 0x0
+#define DIG1_AFMT_CNTL__AFMT_AUDIO_CLOCK_ON__SHIFT 0x8
+#define DIG1_AFMT_CNTL__AFMT_AUDIO_CLOCK_EN_MASK 0x00000001L
+#define DIG1_AFMT_CNTL__AFMT_AUDIO_CLOCK_ON_MASK 0x00000100L
+//DIG1_AFMT_VBI_PACKET_CONTROL1
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_FRAME_UPDATE__SHIFT 0x0
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_FRAME_UPDATE_PENDING__SHIFT 0x1
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_IMMEDIATE_UPDATE__SHIFT 0x2
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_IMMEDIATE_UPDATE_PENDING__SHIFT 0x3
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_FRAME_UPDATE__SHIFT 0x4
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_FRAME_UPDATE_PENDING__SHIFT 0x5
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_IMMEDIATE_UPDATE__SHIFT 0x6
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_IMMEDIATE_UPDATE_PENDING__SHIFT 0x7
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_FRAME_UPDATE__SHIFT 0x8
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_FRAME_UPDATE_PENDING__SHIFT 0x9
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_IMMEDIATE_UPDATE__SHIFT 0xa
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_IMMEDIATE_UPDATE_PENDING__SHIFT 0xb
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_FRAME_UPDATE__SHIFT 0xc
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_FRAME_UPDATE_PENDING__SHIFT 0xd
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_IMMEDIATE_UPDATE__SHIFT 0xe
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_IMMEDIATE_UPDATE_PENDING__SHIFT 0xf
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_FRAME_UPDATE__SHIFT 0x10
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_FRAME_UPDATE_PENDING__SHIFT 0x11
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_IMMEDIATE_UPDATE__SHIFT 0x12
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_IMMEDIATE_UPDATE_PENDING__SHIFT 0x13
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_FRAME_UPDATE__SHIFT 0x14
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_FRAME_UPDATE_PENDING__SHIFT 0x15
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_IMMEDIATE_UPDATE__SHIFT 0x16
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_IMMEDIATE_UPDATE_PENDING__SHIFT 0x17
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_FRAME_UPDATE__SHIFT 0x18
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_FRAME_UPDATE_PENDING__SHIFT 0x19
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_IMMEDIATE_UPDATE__SHIFT 0x1a
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1b
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_FRAME_UPDATE__SHIFT 0x1c
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_FRAME_UPDATE_PENDING__SHIFT 0x1d
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_IMMEDIATE_UPDATE__SHIFT 0x1e
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_IMMEDIATE_UPDATE_PENDING__SHIFT 0x1f
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_FRAME_UPDATE_MASK 0x00000001L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_FRAME_UPDATE_PENDING_MASK 0x00000002L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_IMMEDIATE_UPDATE_MASK 0x00000004L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC0_IMMEDIATE_UPDATE_PENDING_MASK 0x00000008L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_FRAME_UPDATE_MASK 0x00000010L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_FRAME_UPDATE_PENDING_MASK 0x00000020L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_IMMEDIATE_UPDATE_MASK 0x00000040L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC1_IMMEDIATE_UPDATE_PENDING_MASK 0x00000080L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_FRAME_UPDATE_MASK 0x00000100L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_FRAME_UPDATE_PENDING_MASK 0x00000200L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_IMMEDIATE_UPDATE_MASK 0x00000400L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC2_IMMEDIATE_UPDATE_PENDING_MASK 0x00000800L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_FRAME_UPDATE_MASK 0x00001000L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_FRAME_UPDATE_PENDING_MASK 0x00002000L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_IMMEDIATE_UPDATE_MASK 0x00004000L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC3_IMMEDIATE_UPDATE_PENDING_MASK 0x00008000L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_FRAME_UPDATE_MASK 0x00010000L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_FRAME_UPDATE_PENDING_MASK 0x00020000L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_IMMEDIATE_UPDATE_MASK 0x00040000L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC4_IMMEDIATE_UPDATE_PENDING_MASK 0x00080000L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_FRAME_UPDATE_MASK 0x00100000L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_FRAME_UPDATE_PENDING_MASK 0x00200000L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_IMMEDIATE_UPDATE_MASK 0x00400000L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC5_IMMEDIATE_UPDATE_PENDING_MASK 0x00800000L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_FRAME_UPDATE_MASK 0x01000000L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_FRAME_UPDATE_PENDING_MASK 0x02000000L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_IMMEDIATE_UPDATE_MASK 0x04000000L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC6_IMMEDIATE_UPDATE_PENDING_MASK 0x08000000L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_FRAME_UPDATE_MASK 0x10000000L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_FRAME_UPDATE_PENDING_MASK 0x20000000L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_IMMEDIATE_UPDATE_MASK 0x40000000L
+#define DIG1_AFMT_VBI_PACKET_CONTROL1__AFMT_GENERIC7_IMMEDIATE_UPDATE_PENDING_MASK 0x80000000L
+//DIG1_HDMI_GENERIC_PACKET_CONTROL5
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND__SHIFT 0x0
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_PENDING__SHIFT 0x1
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND__SHIFT 0x2
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_PENDING__SHIFT 0x3
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND__SHIFT 0x4
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_PENDING__SHIFT 0x5
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND__SHIFT 0x6
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_PENDING__SHIFT 0x7
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND__SHIFT 0x8
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_PENDING__SHIFT 0x9
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND__SHIFT 0xa
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_PENDING__SHIFT 0xb
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND__SHIFT 0xc
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_PENDING__SHIFT 0xd
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND__SHIFT 0xe
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_PENDING__SHIFT 0xf
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_MASK 0x00000001L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC0_IMMEDIATE_SEND_PENDING_MASK 0x00000002L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_MASK 0x00000004L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC1_IMMEDIATE_SEND_PENDING_MASK 0x00000008L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_MASK 0x00000010L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC2_IMMEDIATE_SEND_PENDING_MASK 0x00000020L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_MASK 0x00000040L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC3_IMMEDIATE_SEND_PENDING_MASK 0x00000080L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_MASK 0x00000100L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC4_IMMEDIATE_SEND_PENDING_MASK 0x00000200L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_MASK 0x00000400L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC5_IMMEDIATE_SEND_PENDING_MASK 0x00000800L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_MASK 0x00001000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC6_IMMEDIATE_SEND_PENDING_MASK 0x00002000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_MASK 0x00004000L
+#define DIG1_HDMI_GENERIC_PACKET_CONTROL5__HDMI_GENERIC7_IMMEDIATE_SEND_PENDING_MASK 0x00008000L
+
+
+// addressBlock: dce_dc_dio_dp1_dispdec
+//DP1_DP_LINK_CNTL
+#define DP1_DP_LINK_CNTL__DP_LINK_TRAINING_COMPLETE__SHIFT 0x4
+#define DP1_DP_LINK_CNTL__DP_LINK_STATUS__SHIFT 0x8
+#define DP1_DP_LINK_CNTL__DP_EMBEDDED_PANEL_MODE__SHIFT 0x11
+#define DP1_DP_LINK_CNTL__DP_LINK_TRAINING_COMPLETE_MASK 0x00000010L
+#define DP1_DP_LINK_CNTL__DP_LINK_STATUS_MASK 0x00000100L
+#define DP1_DP_LINK_CNTL__DP_EMBEDDED_PANEL_MODE_MASK 0x00020000L
+//DP1_DP_PIXEL_FORMAT
+#define DP1_DP_PIXEL_FORMAT__DP_PIXEL_ENCODING__SHIFT 0x0
+#define DP1_DP_PIXEL_FORMAT__DP_COMPONENT_DEPTH__SHIFT 0x18
+#define DP1_DP_PIXEL_FORMAT__DP_PIXEL_COMBINE__SHIFT 0x1c
+#define DP1_DP_PIXEL_FORMAT__DP_PIXEL_ENCODING_MASK 0x00000007L
+#define DP1_DP_PIXEL_FORMAT__DP_COMPONENT_DEPTH_MASK 0x07000000L
+#define DP1_DP_PIXEL_FORMAT__DP_PIXEL_COMBINE_MASK 0x30000000L
+//DP1_DP_MSA_COLORIMETRY
+#define DP1_DP_MSA_COLORIMETRY__DP_MSA_MISC0__SHIFT 0x18
+#define DP1_DP_MSA_COLORIMETRY__DP_MSA_MISC0_MASK 0xFF000000L
+//DP1_DP_CONFIG
+#define DP1_DP_CONFIG__DP_UDI_LANES__SHIFT 0x0
+#define DP1_DP_CONFIG__DP_UDI_LANES_MASK 0x00000003L
+//DP1_DP_VID_STREAM_CNTL
+#define DP1_DP_VID_STREAM_CNTL__DP_VID_STREAM_ENABLE__SHIFT 0x0
+#define DP1_DP_VID_STREAM_CNTL__DP_VID_STREAM_DIS_DEFER__SHIFT 0x8
+#define DP1_DP_VID_STREAM_CNTL__DP_VID_STREAM_STATUS__SHIFT 0x10
+#define DP1_DP_VID_STREAM_CNTL__DP_VID_STREAM_CHANGE_KEEPOUT__SHIFT 0x14
+#define DP1_DP_VID_STREAM_CNTL__DP_VID_STREAM_ENABLE_MASK 0x00000001L
+#define DP1_DP_VID_STREAM_CNTL__DP_VID_STREAM_DIS_DEFER_MASK 0x00000300L
+#define DP1_DP_VID_STREAM_CNTL__DP_VID_STREAM_STATUS_MASK 0x00010000L
+#define DP1_DP_VID_STREAM_CNTL__DP_VID_STREAM_CHANGE_KEEPOUT_MASK 0x00100000L
+//DP1_DP_STEER_FIFO
+#define DP1_DP_STEER_FIFO__DP_STEER_FIFO_RESET__SHIFT 0x0
+#define DP1_DP_STEER_FIFO__DP_STEER_OVERFLOW_FLAG__SHIFT 0x4
+#define DP1_DP_STEER_FIFO__DP_STEER_OVERFLOW_INT__SHIFT 0x5
+#define DP1_DP_STEER_FIFO__DP_STEER_OVERFLOW_ACK__SHIFT 0x6
+#define DP1_DP_STEER_FIFO__DP_STEER_OVERFLOW_MASK__SHIFT 0x7
+#define DP1_DP_STEER_FIFO__DP_TU_OVERFLOW_FLAG__SHIFT 0x8
+#define DP1_DP_STEER_FIFO__DP_TU_OVERFLOW_ACK__SHIFT 0xc
+#define DP1_DP_STEER_FIFO__DP_STEER_FIFO_RESET_MASK 0x00000001L
+#define DP1_DP_STEER_FIFO__DP_STEER_OVERFLOW_FLAG_MASK 0x00000010L
+#define DP1_DP_STEER_FIFO__DP_STEER_OVERFLOW_INT_MASK 0x00000020L
+#define DP1_DP_STEER_FIFO__DP_STEER_OVERFLOW_ACK_MASK 0x00000040L
+#define DP1_DP_STEER_FIFO__DP_STEER_OVERFLOW_MASK_MASK 0x00000080L
+#define DP1_DP_STEER_FIFO__DP_TU_OVERFLOW_FLAG_MASK 0x00000100L
+#define DP1_DP_STEER_FIFO__DP_TU_OVERFLOW_ACK_MASK 0x00001000L
+//DP1_DP_MSA_MISC
+#define DP1_DP_MSA_MISC__DP_MSA_MISC1__SHIFT 0x0
+#define DP1_DP_MSA_MISC__DP_MSA_MISC2__SHIFT 0x8
+#define DP1_DP_MSA_MISC__DP_MSA_MISC3__SHIFT 0x10
+#define DP1_DP_MSA_MISC__DP_MSA_MISC4__SHIFT 0x18
+#define DP1_DP_MSA_MISC__DP_MSA_MISC1_MASK 0x000000FFL
+#define DP1_DP_MSA_MISC__DP_MSA_MISC2_MASK 0x0000FF00L
+#define DP1_DP_MSA_MISC__DP_MSA_MISC3_MASK 0x00FF0000L
+#define DP1_DP_MSA_MISC__DP_MSA_MISC4_MASK 0xFF000000L
+//DP1_DP_VID_TIMING
+#define DP1_DP_VID_TIMING__DP_VID_M_N_DOUBLE_BUFFER_MODE__SHIFT 0x4
+#define DP1_DP_VID_TIMING__DP_VID_M_N_GEN_EN__SHIFT 0x8
+#define DP1_DP_VID_TIMING__DP_VID_N_MUL__SHIFT 0xa
+#define DP1_DP_VID_TIMING__DP_VID_M_DIV__SHIFT 0xc
+#define DP1_DP_VID_TIMING__DP_VID_N_DIV__SHIFT 0x18
+#define DP1_DP_VID_TIMING__DP_VID_M_N_DOUBLE_BUFFER_MODE_MASK 0x00000010L
+#define DP1_DP_VID_TIMING__DP_VID_M_N_GEN_EN_MASK 0x00000100L
+#define DP1_DP_VID_TIMING__DP_VID_N_MUL_MASK 0x00000C00L
+#define DP1_DP_VID_TIMING__DP_VID_M_DIV_MASK 0x00003000L
+#define DP1_DP_VID_TIMING__DP_VID_N_DIV_MASK 0xFF000000L
+//DP1_DP_VID_N
+#define DP1_DP_VID_N__DP_VID_N__SHIFT 0x0
+#define DP1_DP_VID_N__DP_VID_N_MASK 0x00FFFFFFL
+//DP1_DP_VID_M
+#define DP1_DP_VID_M__DP_VID_M__SHIFT 0x0
+#define DP1_DP_VID_M__DP_VID_M_MASK 0x00FFFFFFL
+//DP1_DP_LINK_FRAMING_CNTL
+#define DP1_DP_LINK_FRAMING_CNTL__DP_IDLE_BS_INTERVAL__SHIFT 0x0
+#define DP1_DP_LINK_FRAMING_CNTL__DP_VBID_DISABLE__SHIFT 0x18
+#define DP1_DP_LINK_FRAMING_CNTL__DP_VID_ENHANCED_FRAME_MODE__SHIFT 0x1c
+#define DP1_DP_LINK_FRAMING_CNTL__DP_IDLE_BS_INTERVAL_MASK 0x0003FFFFL
+#define DP1_DP_LINK_FRAMING_CNTL__DP_VBID_DISABLE_MASK 0x01000000L
+#define DP1_DP_LINK_FRAMING_CNTL__DP_VID_ENHANCED_FRAME_MODE_MASK 0x10000000L
+//DP1_DP_HBR2_EYE_PATTERN
+#define DP1_DP_HBR2_EYE_PATTERN__DP_HBR2_EYE_PATTERN_ENABLE__SHIFT 0x0
+#define DP1_DP_HBR2_EYE_PATTERN__DP_HBR2_EYE_PATTERN_ENABLE_MASK 0x00000001L
+//DP1_DP_VID_MSA_VBID
+#define DP1_DP_VID_MSA_VBID__DP_VID_MSA_LOCATION__SHIFT 0x0
+#define DP1_DP_VID_MSA_VBID__DP_VID_VBID_FIELD_POL__SHIFT 0x18
+#define DP1_DP_VID_MSA_VBID__DP_VID_MSA_LOCATION_MASK 0x00000FFFL
+#define DP1_DP_VID_MSA_VBID__DP_VID_VBID_FIELD_POL_MASK 0x01000000L
+//DP1_DP_VID_INTERRUPT_CNTL
+#define DP1_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_INT__SHIFT 0x0
+#define DP1_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_ACK__SHIFT 0x1
+#define DP1_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_MASK__SHIFT 0x2
+#define DP1_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_INT_MASK 0x00000001L
+#define DP1_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_ACK_MASK 0x00000002L
+#define DP1_DP_VID_INTERRUPT_CNTL__DP_VID_STREAM_DISABLE_MASK_MASK 0x00000004L
+//DP1_DP_DPHY_CNTL
+#define DP1_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE0__SHIFT 0x0
+#define DP1_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE1__SHIFT 0x1
+#define DP1_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE2__SHIFT 0x2
+#define DP1_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE3__SHIFT 0x3
+#define DP1_DP_DPHY_CNTL__DPHY_FEC_EN__SHIFT 0x4
+#define DP1_DP_DPHY_CNTL__DPHY_FEC_READY_SHADOW__SHIFT 0x5
+#define DP1_DP_DPHY_CNTL__DPHY_FEC_ACTIVE_STATUS__SHIFT 0x6
+#define DP1_DP_DPHY_CNTL__DPHY_BYPASS__SHIFT 0x10
+#define DP1_DP_DPHY_CNTL__DPHY_SKEW_BYPASS__SHIFT 0x18
+#define DP1_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE0_MASK 0x00000001L
+#define DP1_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE1_MASK 0x00000002L
+#define DP1_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE2_MASK 0x00000004L
+#define DP1_DP_DPHY_CNTL__DPHY_ATEST_SEL_LANE3_MASK 0x00000008L
+#define DP1_DP_DPHY_CNTL__DPHY_FEC_EN_MASK 0x00000010L
+#define DP1_DP_DPHY_CNTL__DPHY_FEC_READY_SHADOW_MASK 0x00000020L
+#define DP1_DP_DPHY_CNTL__DPHY_FEC_ACTIVE_STATUS_MASK 0x00000040L
+#define DP1_DP_DPHY_CNTL__DPHY_BYPASS_MASK 0x00010000L
+#define DP1_DP_DPHY_CNTL__DPHY_SKEW_BYPASS_MASK 0x01000000L
+//DP1_DP_DPHY_TRAINING_PATTERN_SEL
+#define DP1_DP_DPHY_TRAINING_PATTERN_SEL__DPHY_TRAINING_PATTERN_SEL__SHIFT 0x0
+#define DP1_DP_DPHY_TRAINING_PATTERN_SEL__DPHY_TRAINING_PATTERN_SEL_MASK 0x00000003L
+//DP1_DP_DPHY_SYM0
+#define DP1_DP_DPHY_SYM0__DPHY_SYM1__SHIFT 0x0
+#define DP1_DP_DPHY_SYM0__DPHY_SYM2__SHIFT 0xa
+#define DP1_DP_DPHY_SYM0__DPHY_SYM3__SHIFT 0x14
+#define DP1_DP_DPHY_SYM0__DPHY_SYM1_MASK 0x000003FFL
+#define DP1_DP_DPHY_SYM0__DPHY_SYM2_MASK 0x000FFC00L
+#define DP1_DP_DPHY_SYM0__DPHY_SYM3_MASK 0x3FF00000L
+//DP1_DP_DPHY_SYM1
+#define DP1_DP_DPHY_SYM1__DPHY_SYM4__SHIFT 0x0
+#define DP1_DP_DPHY_SYM1__DPHY_SYM5__SHIFT 0xa
+#define DP1_DP_DPHY_SYM1__DPHY_SYM6__SHIFT 0x14
+#define DP1_DP_DPHY_SYM1__DPHY_SYM4_MASK 0x000003FFL
+#define DP1_DP_DPHY_SYM1__DPHY_SYM5_MASK 0x000FFC00L
+#define DP1_DP_DPHY_SYM1__DPHY_SYM6_MASK 0x3FF00000L
+//DP1_DP_DPHY_SYM2
+#define DP1_DP_DPHY_SYM2__DPHY_SYM7__SHIFT 0x0
+#define DP1_DP_DPHY_SYM2__DPHY_SYM8__SHIFT 0xa
+#define DP1_DP_DPHY_SYM2__DPHY_SYM7_MASK 0x000003FFL
+#define DP1_DP_DPHY_SYM2__DPHY_SYM8_MASK 0x000FFC00L
+//DP1_DP_DPHY_8B10B_CNTL
+#define DP1_DP_DPHY_8B10B_CNTL__DPHY_8B10B_RESET__SHIFT 0x8
+#define DP1_DP_DPHY_8B10B_CNTL__DPHY_8B10B_EXT_DISP__SHIFT 0x10
+#define DP1_DP_DPHY_8B10B_CNTL__DPHY_8B10B_CUR_DISP__SHIFT 0x18
+#define DP1_DP_DPHY_8B10B_CNTL__DPHY_8B10B_RESET_MASK 0x00000100L
+#define DP1_DP_DPHY_8B10B_CNTL__DPHY_8B10B_EXT_DISP_MASK 0x00010000L
+#define DP1_DP_DPHY_8B10B_CNTL__DPHY_8B10B_CUR_DISP_MASK 0x01000000L
+//DP1_DP_DPHY_PRBS_CNTL
+#define DP1_DP_DPHY_PRBS_CNTL__DPHY_PRBS_EN__SHIFT 0x0
+#define DP1_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEL__SHIFT 0x4
+#define DP1_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED__SHIFT 0x8
+#define DP1_DP_DPHY_PRBS_CNTL__DPHY_PRBS_EN_MASK 0x00000001L
+#define DP1_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEL_MASK 0x00000030L
+#define DP1_DP_DPHY_PRBS_CNTL__DPHY_PRBS_SEED_MASK 0x7FFFFF00L
+//DP1_DP_DPHY_SCRAM_CNTL
+#define DP1_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_DIS__SHIFT 0x0
+#define DP1_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE__SHIFT 0x4
+#define DP1_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT__SHIFT 0x8
+#define DP1_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_KCODE__SHIFT 0x18
+#define DP1_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_DIS_MASK 0x00000001L
+#define DP1_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_ADVANCE_MASK 0x00000010L
+#define DP1_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_BS_COUNT_MASK 0x0003FF00L
+#define DP1_DP_DPHY_SCRAM_CNTL__DPHY_SCRAMBLER_KCODE_MASK 0x01000000L
+//DP1_DP_DPHY_CRC_EN
+#define DP1_DP_DPHY_CRC_EN__DPHY_CRC_EN__SHIFT 0x0
+#define DP1_DP_DPHY_CRC_EN__DPHY_CRC_CONT_EN__SHIFT 0x4
+#define DP1_DP_DPHY_CRC_EN__DPHY_CRC_RESULT_VALID__SHIFT 0x8
+#define DP1_DP_DPHY_CRC_EN__DPHY_CRC_EN_MASK 0x00000001L
+#define DP1_DP_DPHY_CRC_EN__DPHY_CRC_CONT_EN_MASK 0x00000010L
+#define DP1_DP_DPHY_CRC_EN__DPHY_CRC_RESULT_VALID_MASK 0x00000100L
+//DP1_DP_DPHY_CRC_CNTL
+#define DP1_DP_DPHY_CRC_CNTL__DPHY_CRC_FIELD__SHIFT 0x0
+#define DP1_DP_DPHY_CRC_CNTL__DPHY_CRC_SEL__SHIFT 0x4
+#define DP1_DP_DPHY_CRC_CNTL__DPHY_CRC_MASK__SHIFT 0x10
+#define DP1_DP_DPHY_CRC_CNTL__DPHY_CRC_FIELD_MASK 0x00000001L
+#define DP1_DP_DPHY_CRC_CNTL__DPHY_CRC_SEL_MASK 0x00000030L
+#define DP1_DP_DPHY_CRC_CNTL__DPHY_CRC_MASK_MASK 0x00FF0000L
+//DP1_DP_DPHY_CRC_RESULT
+#define DP1_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT__SHIFT 0x0
+#define DP1_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT1__SHIFT 0x8
+#define DP1_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT2__SHIFT 0x10
+#define DP1_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT3__SHIFT 0x18
+#define DP1_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT_MASK 0x000000FFL
+#define DP1_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT1_MASK 0x0000FF00L
+#define DP1_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT2_MASK 0x00FF0000L
+#define DP1_DP_DPHY_CRC_RESULT__DPHY_CRC_RESULT3_MASK 0xFF000000L
+//DP1_DP_DPHY_CRC_MST_CNTL
+#define DP1_DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_FIRST_SLOT__SHIFT 0x0
+#define DP1_DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_LAST_SLOT__SHIFT 0x8
+#define DP1_DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_FIRST_SLOT_MASK 0x0000003FL
+#define DP1_DP_DPHY_CRC_MST_CNTL__DPHY_CRC_MST_LAST_SLOT_MASK 0x00003F00L
+//DP1_DP_DPHY_CRC_MST_STATUS
+#define DP1_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_LOCK__SHIFT 0x0
+#define DP1_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR__SHIFT 0x8
+#define DP1_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR_ACK__SHIFT 0x10
+#define DP1_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_LOCK_MASK 0x00000001L
+#define DP1_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR_MASK 0x00000100L
+#define DP1_DP_DPHY_CRC_MST_STATUS__DPHY_CRC_MST_PHASE_ERROR_ACK_MASK 0x00010000L
+//DP1_DP_DPHY_FAST_TRAINING
+#define DP1_DP_DPHY_FAST_TRAINING__DPHY_RX_FAST_TRAINING_CAPABLE__SHIFT 0x0
+#define DP1_DP_DPHY_FAST_TRAINING__DPHY_SW_FAST_TRAINING_START__SHIFT 0x1
+#define DP1_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_VBLANK_EDGE_DETECT_EN__SHIFT 0x2
+#define DP1_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP1_TIME__SHIFT 0x8
+#define DP1_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP2_TIME__SHIFT 0x14
+#define DP1_DP_DPHY_FAST_TRAINING__DPHY_RX_FAST_TRAINING_CAPABLE_MASK 0x00000001L
+#define DP1_DP_DPHY_FAST_TRAINING__DPHY_SW_FAST_TRAINING_START_MASK 0x00000002L
+#define DP1_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_VBLANK_EDGE_DETECT_EN_MASK 0x00000004L
+#define DP1_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP1_TIME_MASK 0x000FFF00L
+#define DP1_DP_DPHY_FAST_TRAINING__DPHY_FAST_TRAINING_TP2_TIME_MASK 0xFFF00000L
+//DP1_DP_DPHY_FAST_TRAINING_STATUS
+#define DP1_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_STATE__SHIFT 0x0
+#define DP1_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_OCCURRED__SHIFT 0x4
+#define DP1_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_MASK__SHIFT 0x8
+#define DP1_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_ACK__SHIFT 0xc
+#define DP1_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_STATE_MASK 0x00000007L
+#define DP1_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_OCCURRED_MASK 0x00000010L
+#define DP1_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_MASK_MASK 0x00000100L
+#define DP1_DP_DPHY_FAST_TRAINING_STATUS__DPHY_FAST_TRAINING_COMPLETE_ACK_MASK 0x00001000L
+//DP1_DP_SEC_CNTL
+#define DP1_DP_SEC_CNTL__DP_SEC_STREAM_ENABLE__SHIFT 0x0
+#define DP1_DP_SEC_CNTL__DP_SEC_ASP_ENABLE__SHIFT 0x4
+#define DP1_DP_SEC_CNTL__DP_SEC_ATP_ENABLE__SHIFT 0x8
+#define DP1_DP_SEC_CNTL__DP_SEC_AIP_ENABLE__SHIFT 0xc
+#define DP1_DP_SEC_CNTL__DP_SEC_ACM_ENABLE__SHIFT 0x10
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP0_ENABLE__SHIFT 0x14
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP1_ENABLE__SHIFT 0x15
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP2_ENABLE__SHIFT 0x16
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP3_ENABLE__SHIFT 0x17
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP4_ENABLE__SHIFT 0x18
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP5_ENABLE__SHIFT 0x19
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP6_ENABLE__SHIFT 0x1a
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP7_ENABLE__SHIFT 0x1b
+#define DP1_DP_SEC_CNTL__DP_SEC_MPG_ENABLE__SHIFT 0x1c
+#define DP1_DP_SEC_CNTL__DP_SEC_STREAM_ENABLE_MASK 0x00000001L
+#define DP1_DP_SEC_CNTL__DP_SEC_ASP_ENABLE_MASK 0x00000010L
+#define DP1_DP_SEC_CNTL__DP_SEC_ATP_ENABLE_MASK 0x00000100L
+#define DP1_DP_SEC_CNTL__DP_SEC_AIP_ENABLE_MASK 0x00001000L
+#define DP1_DP_SEC_CNTL__DP_SEC_ACM_ENABLE_MASK 0x00010000L
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP0_ENABLE_MASK 0x00100000L
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP1_ENABLE_MASK 0x00200000L
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP2_ENABLE_MASK 0x00400000L
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP3_ENABLE_MASK 0x00800000L
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP4_ENABLE_MASK 0x01000000L
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP5_ENABLE_MASK 0x02000000L
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP6_ENABLE_MASK 0x04000000L
+#define DP1_DP_SEC_CNTL__DP_SEC_GSP7_ENABLE_MASK 0x08000000L
+#define DP1_DP_SEC_CNTL__DP_SEC_MPG_ENABLE_MASK 0x10000000L
+//DP1_DP_SEC_CNTL1
+#define DP1_DP_SEC_CNTL1__DP_SEC_ISRC_ENABLE__SHIFT 0x0
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_REFERENCE__SHIFT 0x1
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_PRIORITY__SHIFT 0x4
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_SEND__SHIFT 0x5
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_PENDING__SHIFT 0x6
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_DEADLINE_MISSED__SHIFT 0x7
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_ANY_LINE__SHIFT 0x8
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP1_LINE_REFERENCE__SHIFT 0x9
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP2_LINE_REFERENCE__SHIFT 0xa
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP3_LINE_REFERENCE__SHIFT 0xb
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP4_LINE_REFERENCE__SHIFT 0xc
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP5_LINE_REFERENCE__SHIFT 0xd
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP6_LINE_REFERENCE__SHIFT 0xe
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP7_LINE_REFERENCE__SHIFT 0xf
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_NUM__SHIFT 0x10
+#define DP1_DP_SEC_CNTL1__DP_SEC_ISRC_ENABLE_MASK 0x00000001L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_REFERENCE_MASK 0x00000002L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_PRIORITY_MASK 0x00000010L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_MASK 0x00000020L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_PENDING_MASK 0x00000040L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_DEADLINE_MISSED_MASK 0x00000080L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_SEND_ANY_LINE_MASK 0x00000100L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP1_LINE_REFERENCE_MASK 0x00000200L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP2_LINE_REFERENCE_MASK 0x00000400L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP3_LINE_REFERENCE_MASK 0x00000800L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP4_LINE_REFERENCE_MASK 0x00001000L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP5_LINE_REFERENCE_MASK 0x00002000L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP6_LINE_REFERENCE_MASK 0x00004000L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP7_LINE_REFERENCE_MASK 0x00008000L
+#define DP1_DP_SEC_CNTL1__DP_SEC_GSP0_LINE_NUM_MASK 0xFFFF0000L
+//DP1_DP_SEC_FRAMING1
+#define DP1_DP_SEC_FRAMING1__DP_SEC_FRAME_START_LOCATION__SHIFT 0x0
+#define DP1_DP_SEC_FRAMING1__DP_SEC_VBLANK_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP1_DP_SEC_FRAMING1__DP_SEC_FRAME_START_LOCATION_MASK 0x00000FFFL
+#define DP1_DP_SEC_FRAMING1__DP_SEC_VBLANK_TRANSMIT_WIDTH_MASK 0xFFFF0000L
+//DP1_DP_SEC_FRAMING2
+#define DP1_DP_SEC_FRAMING2__DP_SEC_START_POSITION__SHIFT 0x0
+#define DP1_DP_SEC_FRAMING2__DP_SEC_HBLANK_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP1_DP_SEC_FRAMING2__DP_SEC_START_POSITION_MASK 0x0000FFFFL
+#define DP1_DP_SEC_FRAMING2__DP_SEC_HBLANK_TRANSMIT_WIDTH_MASK 0xFFFF0000L
+//DP1_DP_SEC_FRAMING3
+#define DP1_DP_SEC_FRAMING3__DP_SEC_IDLE_FRAME_SIZE__SHIFT 0x0
+#define DP1_DP_SEC_FRAMING3__DP_SEC_IDLE_TRANSMIT_WIDTH__SHIFT 0x10
+#define DP1_DP_SEC_FRAMING3__DP_SEC_IDLE_FRAME_SIZE_MASK 0x00003FFFL
+#define DP1_DP_SEC_FRAMING3__DP_SEC_IDLE_TRANSMIT_WIDTH_MASK 0xFFFF0000L
+//DP1_DP_SEC_FRAMING4
+#define DP1_DP_SEC_FRAMING4__DP_SST_SDP_SPLITTING__SHIFT 0x0
+#define DP1_DP_SEC_FRAMING4__DP_SEC_COLLISION_STATUS__SHIFT 0x14
+#define DP1_DP_SEC_FRAMING4__DP_SEC_COLLISION_ACK__SHIFT 0x18
+#define DP1_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE__SHIFT 0x1c
+#define DP1_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_STATUS__SHIFT 0x1d
+#define DP1_DP_SEC_FRAMING4__DP_SST_SDP_SPLITTING_MASK 0x00000001L
+#define DP1_DP_SEC_FRAMING4__DP_SEC_COLLISION_STATUS_MASK 0x00100000L
+#define DP1_DP_SEC_FRAMING4__DP_SEC_COLLISION_ACK_MASK 0x01000000L
+#define DP1_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_MASK 0x10000000L
+#define DP1_DP_SEC_FRAMING4__DP_SEC_AUDIO_MUTE_STATUS_MASK 0x20000000L
+//DP1_DP_SEC_AUD_N
+#define DP1_DP_SEC_AUD_N__DP_SEC_AUD_N__SHIFT 0x0
+#define DP1_DP_SEC_AUD_N__DP_SEC_AUD_N_MASK 0x00FFFFFFL
+//DP1_DP_SEC_AUD_N_READBACK
+#define DP1_DP_SEC_AUD_N_READBACK__DP_SEC_AUD_N_READBACK__SHIFT 0x0
+#define DP1_DP_SEC_AUD_N_READBACK__DP_SEC_AUD_N_READBACK_MASK 0x00FFFFFFL
+//DP1_DP_SEC_AUD_M
+#define DP1_DP_SEC_AUD_M__DP_SEC_AUD_M__SHIFT 0x0
+#define DP1_DP_SEC_AUD_M__DP_SEC_AUD_M_MASK 0x00FFFFFFL
+//DP1_DP_SEC_AUD_M_READBACK
+#define DP1_DP_SEC_AUD_M_READBACK__DP_SEC_AUD_M_READBACK__SHIFT 0x0
+#define DP1_DP_SEC_AUD_M_READBACK__DP_SEC_AUD_M_READBACK_MASK 0x00FFFFFFL
+//DP1_DP_SEC_TIMESTAMP
+#define DP1_DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE__SHIFT 0x0
+#define DP1_DP_SEC_TIMESTAMP__DP_SEC_TIMESTAMP_MODE_MASK 0x00000001L
+//DP1_DP_SEC_PACKET_CNTL
+#define DP1_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CODING_TYPE__SHIFT 0x1
+#define DP1_DP_SEC_PACKET_CNTL__DP_SEC_ASP_PRIORITY__SHIFT 0x4
+#define DP1_DP_SEC_PACKET_CNTL__DP_SEC_VERSION__SHIFT 0x8
+#define DP1_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CHANNEL_COUNT_OVERRIDE__SHIFT 0x10
+#define DP1_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CODING_TYPE_MASK 0x0000000EL
+#define DP1_DP_SEC_PACKET_CNTL__DP_SEC_ASP_PRIORITY_MASK 0x00000010L
+#define DP1_DP_SEC_PACKET_CNTL__DP_SEC_VERSION_MASK 0x00003F00L
+#define DP1_DP_SEC_PACKET_CNTL__DP_SEC_ASP_CHANNEL_COUNT_OVERRIDE_MASK 0x00010000L
+//DP1_DP_MSE_RATE_CNTL
+#define DP1_DP_MSE_RATE_CNTL__DP_MSE_RATE_Y__SHIFT 0x0
+#define DP1_DP_MSE_RATE_CNTL__DP_MSE_RATE_X__SHIFT 0x1a
+#define DP1_DP_MSE_RATE_CNTL__DP_MSE_RATE_Y_MASK 0x03FFFFFFL
+#define DP1_DP_MSE_RATE_CNTL__DP_MSE_RATE_X_MASK 0xFC000000L
+//DP1_DP_CP_MSE_STATUS
+//DP1_DP_MSE_RATE_UPDATE
+#define DP1_DP_MSE_RATE_UPDATE__DP_MSE_RATE_UPDATE_PENDING__SHIFT 0x0
+#define DP1_DP_MSE_RATE_UPDATE__DP_MSE_RATE_UPDATE_PENDING_MASK 0x00000001L
+//DP1_DP_MSE_SAT0
+#define DP1_DP_MSE_SAT0__DP_MSE_SAT_SRC0__SHIFT 0x0
+#define DP1_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT0__SHIFT 0x8
+#define DP1_DP_MSE_SAT0__DP_MSE_SAT_SRC1__SHIFT 0x10
+#define DP1_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT1__SHIFT 0x18
+#define DP1_DP_MSE_SAT0__DP_MSE_SAT_SRC0_MASK 0x00000007L
+#define DP1_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT0_MASK 0x00003F00L
+#define DP1_DP_MSE_SAT0__DP_MSE_SAT_SRC1_MASK 0x00070000L
+#define DP1_DP_MSE_SAT0__DP_MSE_SAT_SLOT_COUNT1_MASK 0x3F000000L
+//DP1_DP_MSE_SAT1
+#define DP1_DP_MSE_SAT1__DP_MSE_SAT_SRC2__SHIFT 0x0
+#define DP1_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT2__SHIFT 0x8
+#define DP1_DP_MSE_SAT1__DP_MSE_SAT_SRC3__SHIFT 0x10
+#define DP1_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT3__SHIFT 0x18
+#define DP1_DP_MSE_SAT1__DP_MSE_SAT_SRC2_MASK 0x00000007L
+#define DP1_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT2_MASK 0x00003F00L
+#define DP1_DP_MSE_SAT1__DP_MSE_SAT_SRC3_MASK 0x00070000L
+#define DP1_DP_MSE_SAT1__DP_MSE_SAT_SLOT_COUNT3_MASK 0x3F000000L
+//DP1_DP_MSE_SAT2
+#define DP1_DP_MSE_SAT2__DP_MSE_SAT_SRC4__SHIFT 0x0
+#define DP1_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT4__SHIFT 0x8
+#define DP1_DP_MSE_SAT2__DP_MSE_SAT_SRC5__SHIFT 0x10
+#define DP1_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT5__SHIFT 0x18
+#define DP1_DP_MSE_SAT2__DP_MSE_SAT_SRC4_MASK 0x00000007L
+#define DP1_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT4_MASK 0x00003F00L
+#define DP1_DP_MSE_SAT2__DP_MSE_SAT_SRC5_MASK 0x00070000L
+#define DP1_DP_MSE_SAT2__DP_MSE_SAT_SLOT_COUNT5_MASK 0x3F000000L
+//DP1_DP_MSE_SAT_UPDATE
+#define DP1_DP_MSE_SAT_UPDATE__DP_MSE_SAT_UPDATE__SHIFT 0x0
+#define DP1_DP_MSE_SAT_UPDATE__DP_MSE_16_MTP_KEEPOUT__SHIFT 0x8
+#define DP1_DP_MSE_SAT_UPDATE__DP_MSE_SAT_UPDATE_MASK 0x00000003L
+#define DP1_DP_MSE_SAT_UPDATE__DP_MSE_16_MTP_KEEPOUT_MASK 0x00000100L
+//DP1_DP_MSE_LINK_TIMING
+#define DP1_DP_MSE_LINK_TIMING__DP_MSE_LINK_FRAME__SHIFT 0x0
+#define DP1_DP_MSE_LINK_TIMING__DP_MSE_LINK_LINE__SHIFT 0x10
+#define DP1_DP_MSE_LINK_TIMING__DP_MSE_LINK_FRAME_MASK 0x000003FFL
+#define DP1_DP_MSE_LINK_TIMING__DP_MSE_LINK_LINE_MASK 0x00030000L
+//DP1_DP_MSE_MISC_CNTL
+#define DP1_DP_MSE_MISC_CNTL__DP_MSE_BLANK_CODE__SHIFT 0x0
+#define DP1_DP_MSE_MISC_CNTL__DP_MSE_TIMESTAMP_MODE__SHIFT 0x4
+#define DP1_DP_MSE_MISC_CNTL__DP_MSE_ZERO_ENCODER__SHIFT 0x8
+#define DP1_DP_MSE_MISC_CNTL__DP_MSE_BLANK_CODE_MASK 0x00000001L
+#define DP1_DP_MSE_MISC_CNTL__DP_MSE_TIMESTAMP_MODE_MASK 0x00000010L
+#define DP1_DP_MSE_MISC_CNTL__DP_MSE_ZERO_ENCODER_MASK 0x00000100L
+
+//DP1_DP_DPHY_BS_SR_SWAP_CNTL
+#define DP1_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT__SHIFT 0x0
+#define DP1_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_BS_SR_SWAP_DONE__SHIFT 0xf
+#define DP1_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_START__SHIFT 0x10
+#define DP1_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_MASK 0x000003FFL
+#define DP1_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_BS_SR_SWAP_DONE_MASK 0x00008000L
+#define DP1_DP_DPHY_BS_SR_SWAP_CNTL__DPHY_LOAD_BS_COUNT_START_MASK 0x00010000L
+//DP1_DP_DPHY_HBR2_PATTERN_CONTROL
+#define DP1_DP_DPHY_HBR2_PATTERN_CONTROL__DP_DPHY_HBR2_PATTERN_CONTROL__SHIFT 0x0
+#define DP1_DP_DPHY_HBR2_PATTERN_CONTROL__DP_DPHY_HBR2_PATTERN_CONTROL_MASK 0x00000007L
+//DP1_DP_MSE_SAT0_STATUS
+#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC0_STATUS__SHIFT 0x0
+#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT0_STATUS__SHIFT 0x8
+#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC1_STATUS__SHIFT 0x10
+#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT1_STATUS__SHIFT 0x18
+#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC0_STATUS_MASK 0x00000007L
+#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT0_STATUS_MASK 0x00003F00L
+#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SRC1_STATUS_MASK 0x00070000L
+#define DP1_DP_MSE_SAT0_STATUS__DP_MSE_SAT_SLOT_COUNT1_STATUS_MASK 0x3F000000L
+//DP1_DP_MSE_SAT1_STATUS
+#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC2_STATUS__SHIFT 0x0
+#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT2_STATUS__SHIFT 0x8
+#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC3_STATUS__SHIFT 0x10
+#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT3_STATUS__SHIFT 0x18
+#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC2_STATUS_MASK 0x00000007L
+#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT2_STATUS_MASK 0x00003F00L
+#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SRC3_STATUS_MASK 0x00070000L
+#define DP1_DP_MSE_SAT1_STATUS__DP_MSE_SAT_SLOT_COUNT3_STATUS_MASK 0x3F000000L
+//DP1_DP_MSE_SAT2_STATUS
+#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC4_STATUS__SHIFT 0x0
+#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT4_STATUS__SHIFT 0x8
+#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC5_STATUS__SHIFT 0x10
+#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT5_STATUS__SHIFT 0x18
+#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC4_STATUS_MASK 0x00000007L
+#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT4_STATUS_MASK 0x00003F00L
+#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SRC5_STATUS_MASK 0x00070000L
+#define DP1_DP_MSE_SAT2_STATUS__DP_MSE_SAT_SLOT_COUNT5_STATUS_MASK 0x3F000000L
+//DP1_DP_MSA_TIMING_PARAM1
+#define DP1_DP_MSA_TIMING_PARAM1__DP_MSA_VTOTAL__SHIFT 0x0
+#define DP1_DP_MSA_TIMING_PARAM1__DP_MSA_HTOTAL__SHIFT 0x10
+#define DP1_DP_MSA_TIMING_PARAM1__DP_MSA_VTOTAL_MASK 0x0000FFFFL
+#define DP1_DP_MSA_TIMING_PARAM1__DP_MSA_HTOTAL_MASK 0xFFFF0000L
+//DP1_DP_MSA_TIMING_PARAM2
+#define DP1_DP_MSA_TIMING_PARAM2__DP_MSA_VSTART__SHIFT 0x0
+#define DP1_DP_MSA_TIMING_PARAM2__DP_MSA_HSTART__SHIFT 0x10
+#define DP1_DP_MSA_TIMING_PARAM2__DP_MSA_VSTART_MASK 0x0000FFFFL
+#define DP1_DP_MSA_TIMING_PARAM2__DP_MSA_HSTART_MASK 0xFFFF0000L
+//DP1_DP_MSA_TIMING_PARAM3
+#define DP1_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCWIDTH__SHIFT 0x0
+#define DP1_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCPOLARITY__SHIFT 0xf
+#define DP1_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCWIDTH__SHIFT 0x10
+#define DP1_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCPOLARITY__SHIFT 0x1f
+#define DP1_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCWIDTH_MASK 0x00007FFFL
+#define DP1_DP_MSA_TIMING_PARAM3__DP_MSA_VSYNCPOLARITY_MASK 0x00008000L
+#define DP1_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCWIDTH_MASK 0x7FFF0000L
+#define DP1_DP_MSA_TIMING_PARAM3__DP_MSA_HSYNCPOLARITY_MASK 0x80000000L
+//DP1_DP_MSA_TIMING_PARAM4
+#define DP1_DP_MSA_TIMING_PARAM4__DP_MSA_VHEIGHT__SHIFT 0x0
+#define DP1_DP_MSA_TIMING_PARAM4__DP_MSA_HWIDTH__SHIFT 0x10
+#define DP1_DP_MSA_TIMING_PARAM4__DP_MSA_VHEIGHT_MASK 0x0000FFFFL
+#define DP1_DP_MSA_TIMING_PARAM4__DP_MSA_HWIDTH_MASK 0xFFFF0000L
+//DP1_DP_DSC_CNTL
+#define DP1_DP_DSC_CNTL__DP_DSC_MODE__SHIFT 0x0
+#define DP1_DP_DSC_CNTL__DP_DSC_SLICE_WIDTH__SHIFT 0x10
+#define DP1_DP_DSC_CNTL__DP_DSC_MODE_MASK 0x00000003L
+#define DP1_DP_DSC_CNTL__DP_DSC_SLICE_WIDTH_MASK 0x1FFF0000L
+//DP1_DP_SEC_CNTL2
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP1_SEND__SHIFT 0x0
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_PENDING__SHIFT 0x1
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_DEADLINE_MISSED__SHIFT 0x2
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_ANY_LINE__SHIFT 0x3
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP2_SEND__SHIFT 0x4
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_PENDING__SHIFT 0x5
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_DEADLINE_MISSED__SHIFT 0x6
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_ANY_LINE__SHIFT 0x7
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP3_SEND__SHIFT 0x8
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_PENDING__SHIFT 0x9
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_DEADLINE_MISSED__SHIFT 0xa
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_ANY_LINE__SHIFT 0xb
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP4_SEND__SHIFT 0xc
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_PENDING__SHIFT 0xd
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_DEADLINE_MISSED__SHIFT 0xe
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_ANY_LINE__SHIFT 0xf
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP5_SEND__SHIFT 0x10
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_PENDING__SHIFT 0x11
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_DEADLINE_MISSED__SHIFT 0x12
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_ANY_LINE__SHIFT 0x13
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP6_SEND__SHIFT 0x14
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_PENDING__SHIFT 0x15
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_DEADLINE_MISSED__SHIFT 0x16
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_ANY_LINE__SHIFT 0x17
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP7_SEND__SHIFT 0x18
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_PENDING__SHIFT 0x19
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_DEADLINE_MISSED__SHIFT 0x1a
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_ANY_LINE__SHIFT 0x1b
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP7_PPS__SHIFT 0x1c
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_MASK 0x00000001L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_PENDING_MASK 0x00000002L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_DEADLINE_MISSED_MASK 0x00000004L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP1_SEND_ANY_LINE_MASK 0x00000008L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_MASK 0x00000010L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_PENDING_MASK 0x00000020L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_DEADLINE_MISSED_MASK 0x00000040L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP2_SEND_ANY_LINE_MASK 0x00000080L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_MASK 0x00000100L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_PENDING_MASK 0x00000200L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_DEADLINE_MISSED_MASK 0x00000400L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP3_SEND_ANY_LINE_MASK 0x00000800L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_MASK 0x00001000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_PENDING_MASK 0x00002000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_DEADLINE_MISSED_MASK 0x00004000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP4_SEND_ANY_LINE_MASK 0x00008000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_MASK 0x00010000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_PENDING_MASK 0x00020000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_DEADLINE_MISSED_MASK 0x00040000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP5_SEND_ANY_LINE_MASK 0x00080000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_MASK 0x00100000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_PENDING_MASK 0x00200000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_DEADLINE_MISSED_MASK 0x00400000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP6_SEND_ANY_LINE_MASK 0x00800000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_MASK 0x01000000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_PENDING_MASK 0x02000000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_DEADLINE_MISSED_MASK 0x04000000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP7_SEND_ANY_LINE_MASK 0x08000000L
+#define DP1_DP_SEC_CNTL2__DP_SEC_GSP7_PPS_MASK 0x10000000L
+//DP1_DP_SEC_CNTL3
+#define DP1_DP_SEC_CNTL3__DP_SEC_GSP1_LINE_NUM__SHIFT 0x0
+#define DP1_DP_SEC_CNTL3__DP_SEC_GSP2_LINE_NUM__SHIFT 0x10
+#define DP1_DP_SEC_CNTL3__DP_SEC_GSP1_LINE_NUM_MASK 0x0000FFFFL
+#define DP1_DP_SEC_CNTL3__DP_SEC_GSP2_LINE_NUM_MASK 0xFFFF0000L
+//DP1_DP_SEC_CNTL4
+#define DP1_DP_SEC_CNTL4__DP_SEC_GSP3_LINE_NUM__SHIFT 0x0
+#define DP1_DP_SEC_CNTL4__DP_SEC_GSP4_LINE_NUM__SHIFT 0x10
+#define DP1_DP_SEC_CNTL4__DP_SEC_GSP3_LINE_NUM_MASK 0x0000FFFFL
+#define DP1_DP_SEC_CNTL4__DP_SEC_GSP4_LINE_NUM_MASK 0xFFFF0000L
+//DP1_DP_SEC_CNTL5
+#define DP1_DP_SEC_CNTL5__DP_SEC_GSP5_LINE_NUM__SHIFT 0x0
+#define DP1_DP_SEC_CNTL5__DP_SEC_GSP6_LINE_NUM__SHIFT 0x10
+#define DP1_DP_SEC_CNTL5__DP_SEC_GSP5_LINE_NUM_MASK 0x0000FFFFL
+#define DP1_DP_SEC_CNTL5__DP_SEC_GSP6_LINE_NUM_MASK 0xFFFF0000L
+//DP1_DP_SEC_CNTL6
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP7_LINE_NUM__SHIFT 0x0
+#define DP1_DP_SEC_CNTL6__DP_SEC_GSP7_LINE_NUM_MASK 0x0000FFFFL
+//DP1_DP_SEC_CNTL7
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_ACTIVE__SHIFT 0x0
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_IN_IDLE__SHIFT 0x1
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_ACTIVE__SHIFT 0x4
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_IN_IDLE__SHIFT 0x5
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_ACTIVE__SHIFT 0x8
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_IN_IDLE__SHIFT 0x9
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_ACTIVE__SHIFT 0xc
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_IN_IDLE__SHIFT 0xd
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_ACTIVE__SHIFT 0x10
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_IN_IDLE__SHIFT 0x11
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_ACTIVE__SHIFT 0x14
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_IN_IDLE__SHIFT 0x15
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_ACTIVE__SHIFT 0x18
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_IN_IDLE__SHIFT 0x19
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_ACTIVE__SHIFT 0x1c
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_IN_IDLE__SHIFT 0x1d
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_ACTIVE_MASK 0x00000001L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP0_SEND_IN_IDLE_MASK 0x00000002L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_ACTIVE_MASK 0x00000010L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP1_SEND_IN_IDLE_MASK 0x00000020L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_ACTIVE_MASK 0x00000100L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP2_SEND_IN_IDLE_MASK 0x00000200L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_ACTIVE_MASK 0x00001000L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP3_SEND_IN_IDLE_MASK 0x00002000L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_ACTIVE_MASK 0x00010000L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP4_SEND_IN_IDLE_MASK 0x00020000L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_ACTIVE_MASK 0x00100000L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP5_SEND_IN_IDLE_MASK 0x00200000L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_ACTIVE_MASK 0x01000000L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP6_SEND_IN_IDLE_MASK 0x02000000L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_ACTIVE_MASK 0x10000000L
+#define DP1_DP_SEC_CNTL7__DP_SEC_GSP7_SEND_IN_IDLE_MASK 0x20000000L
+//DP1_DP_DB_CNTL
+#define DP1_DP_DB_CNTL__DP_DB_PENDING__SHIFT 0x0
+#define DP1_DP_DB_CNTL__DP_DB_TAKEN__SHIFT 0x4
+#define DP1_DP_DB_CNTL__DP_DB_TAKEN_CLR__SHIFT 0x5
+#define DP1_DP_DB_CNTL__DP_DB_LOCK__SHIFT 0x8
+#define DP1_DP_DB_CNTL__DP_DB_DISABLE__SHIFT 0xc
+#define DP1_DP_DB_CNTL__DP_VUPDATE_DB_PENDING__SHIFT 0xf
+#define DP1_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN__SHIFT 0x10
+#define DP1_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_CLR__SHIFT 0x11
+#define DP1_DP_DB_CNTL__DP_DB_PENDING_MASK 0x00000001L
+#define DP1_DP_DB_CNTL__DP_DB_TAKEN_MASK 0x00000010L
+#define DP1_DP_DB_CNTL__DP_DB_TAKEN_CLR_MASK 0x00000020L
+#define DP1_DP_DB_CNTL__DP_DB_LOCK_MASK 0x00000100L
+#define DP1_DP_DB_CNTL__DP_DB_DISABLE_MASK 0x00001000L
+#define DP1_DP_DB_CNTL__DP_VUPDATE_DB_PENDING_MASK 0x00008000L
+#define DP1_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_MASK 0x00010000L
+#define DP1_DP_DB_CNTL__DP_VUPDATE_DB_TAKEN_CLR_MASK 0x00020000L
+//DP1_DP_MSA_VBID_MISC
+#define DP1_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE__SHIFT 0x0
+#define DP1_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_EN__SHIFT 0x4
+#define DP1_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE__SHIFT 0x8
+#define DP1_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE__SHIFT 0x9
+#define DP1_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_EN__SHIFT 0xc
+#define DP1_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_EN__SHIFT 0xd
+#define DP1_DP_MSA_VBID_MISC__DP_VBID6_LINE_REFERENCE__SHIFT 0xf
+#define DP1_DP_MSA_VBID_MISC__DP_VBID6_LINE_NUM__SHIFT 0x10
+#define DP1_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_MASK 0x00000003L
+#define DP1_DP_MSA_VBID_MISC__DP_MSA_MISC1_STEREOSYNC_OVERRIDE_EN_MASK 0x00000010L
+#define DP1_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_MASK 0x00000100L
+#define DP1_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_MASK 0x00000200L
+#define DP1_DP_MSA_VBID_MISC__DP_VBID1_OVERRIDE_EN_MASK 0x00001000L
+#define DP1_DP_MSA_VBID_MISC__DP_VBID2_OVERRIDE_EN_MASK 0x00002000L
+#define DP1_DP_MSA_VBID_MISC__DP_VBID6_LINE_REFERENCE_MASK 0x00008000L
+#define DP1_DP_MSA_VBID_MISC__DP_VBID6_LINE_NUM_MASK 0xFFFF0000L
+//DP1_DP_SEC_METADATA_TRANSMISSION
+#define DP1_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_ENABLE__SHIFT 0x0
+#define DP1_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_REFERENCE__SHIFT 0x1
+#define DP1_DP_SEC_METADATA_TRANSMISSION__DP_SEC_MSO_METADATA_PACKET_ENABLE__SHIFT 0x4
+#define DP1_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE__SHIFT 0x10
+#define DP1_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_ENABLE_MASK 0x00000001L
+#define DP1_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_REFERENCE_MASK 0x00000002L
+#define DP1_DP_SEC_METADATA_TRANSMISSION__DP_SEC_MSO_METADATA_PACKET_ENABLE_MASK 0x000000F0L
+#define DP1_DP_SEC_METADATA_TRANSMISSION__DP_SEC_METADATA_PACKET_LINE_MASK 0xFFFF0000L
+//DP1_DP_DSC_BYTES_PER_PIXEL
+#define DP1_DP_DSC_BYTES_PER_PIXEL__DP_DSC_BYTES_PER_PIXEL__SHIFT 0x0
+#define DP1_DP_DSC_BYTES_PER_PIXEL__DP_DSC_BYTES_PER_PIXEL_MASK 0x7FFFFFFFL
+
+
+// addressBlock: dce_dc_dcio_dcio_dispdec
+//DC_GENERICA
+#define DC_GENERICA__GENERICA_EN__SHIFT 0x0
+#define DC_GENERICA__GENERICA_SEL__SHIFT 0x7
+#define DC_GENERICA__GENERICA_UNIPHY_REFDIV_CLK_SEL__SHIFT 0xc
+#define DC_GENERICA__GENERICA_UNIPHY_FBDIV_CLK_SEL__SHIFT 0x10
+#define DC_GENERICA__GENERICA_UNIPHY_FBDIV_SSC_CLK_SEL__SHIFT 0x14
+#define DC_GENERICA__GENERICA_UNIPHY_FBDIV_CLK_DIV2_SEL__SHIFT 0x18
+#define DC_GENERICA__GENERICA_EN_MASK 0x00000001L
+#define DC_GENERICA__GENERICA_SEL_MASK 0x00000F80L
+#define DC_GENERICA__GENERICA_UNIPHY_REFDIV_CLK_SEL_MASK 0x0000F000L
+#define DC_GENERICA__GENERICA_UNIPHY_FBDIV_CLK_SEL_MASK 0x000F0000L
+#define DC_GENERICA__GENERICA_UNIPHY_FBDIV_SSC_CLK_SEL_MASK 0x00F00000L
+#define DC_GENERICA__GENERICA_UNIPHY_FBDIV_CLK_DIV2_SEL_MASK 0x0F000000L
+//UNIPHYA_LINK_CNTL
+#define UNIPHYA_LINK_CNTL__UNIPHY_PFREQCHG__SHIFT 0x0
+#define UNIPHYA_LINK_CNTL__UNIPHY_PIXVLD_RESET__SHIFT 0x4
+#define UNIPHYA_LINK_CNTL__UNIPHY_MINIMUM_PIXVLD_LOW_DURATION__SHIFT 0x8
+#define UNIPHYA_LINK_CNTL__UNIPHY_CHANNEL0_INVERT__SHIFT 0xc
+#define UNIPHYA_LINK_CNTL__UNIPHY_CHANNEL1_INVERT__SHIFT 0xd
+#define UNIPHYA_LINK_CNTL__UNIPHY_CHANNEL2_INVERT__SHIFT 0xe
+#define UNIPHYA_LINK_CNTL__UNIPHY_CHANNEL3_INVERT__SHIFT 0xf
+#define UNIPHYA_LINK_CNTL__UNIPHY_LANE_STAGGER_DELAY__SHIFT 0x14
+#define UNIPHYA_LINK_CNTL__UNIPHY_LINK_ENABLE_HPD_MASK__SHIFT 0x18
+#define UNIPHYA_LINK_CNTL__UNIPHY_PFREQCHG_MASK 0x00000001L
+#define UNIPHYA_LINK_CNTL__UNIPHY_PIXVLD_RESET_MASK 0x00000010L
+#define UNIPHYA_LINK_CNTL__UNIPHY_MINIMUM_PIXVLD_LOW_DURATION_MASK 0x00000700L
+#define UNIPHYA_LINK_CNTL__UNIPHY_CHANNEL0_INVERT_MASK 0x00001000L
+#define UNIPHYA_LINK_CNTL__UNIPHY_CHANNEL1_INVERT_MASK 0x00002000L
+#define UNIPHYA_LINK_CNTL__UNIPHY_CHANNEL2_INVERT_MASK 0x00004000L
+#define UNIPHYA_LINK_CNTL__UNIPHY_CHANNEL3_INVERT_MASK 0x00008000L
+#define UNIPHYA_LINK_CNTL__UNIPHY_LANE_STAGGER_DELAY_MASK 0x00700000L
+#define UNIPHYA_LINK_CNTL__UNIPHY_LINK_ENABLE_HPD_MASK_MASK 0x03000000L
+//UNIPHYA_CHANNEL_XBAR_CNTL
+#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE__SHIFT 0x0
+#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE__SHIFT 0x8
+#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE__SHIFT 0x10
+#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE__SHIFT 0x18
+#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_LINK_ENABLE__SHIFT 0x1c
+#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE_MASK 0x00000003L
+#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE_MASK 0x00000300L
+#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE_MASK 0x00030000L
+#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE_MASK 0x03000000L
+#define UNIPHYA_CHANNEL_XBAR_CNTL__UNIPHY_LINK_ENABLE_MASK 0x10000000L
+//UNIPHYB_LINK_CNTL
+#define UNIPHYB_LINK_CNTL__UNIPHY_PFREQCHG__SHIFT 0x0
+#define UNIPHYB_LINK_CNTL__UNIPHY_PIXVLD_RESET__SHIFT 0x4
+#define UNIPHYB_LINK_CNTL__UNIPHY_MINIMUM_PIXVLD_LOW_DURATION__SHIFT 0x8
+#define UNIPHYB_LINK_CNTL__UNIPHY_CHANNEL0_INVERT__SHIFT 0xc
+#define UNIPHYB_LINK_CNTL__UNIPHY_CHANNEL1_INVERT__SHIFT 0xd
+#define UNIPHYB_LINK_CNTL__UNIPHY_CHANNEL2_INVERT__SHIFT 0xe
+#define UNIPHYB_LINK_CNTL__UNIPHY_CHANNEL3_INVERT__SHIFT 0xf
+#define UNIPHYB_LINK_CNTL__UNIPHY_LANE_STAGGER_DELAY__SHIFT 0x14
+#define UNIPHYB_LINK_CNTL__UNIPHY_LINK_ENABLE_HPD_MASK__SHIFT 0x18
+#define UNIPHYB_LINK_CNTL__UNIPHY_PFREQCHG_MASK 0x00000001L
+#define UNIPHYB_LINK_CNTL__UNIPHY_PIXVLD_RESET_MASK 0x00000010L
+#define UNIPHYB_LINK_CNTL__UNIPHY_MINIMUM_PIXVLD_LOW_DURATION_MASK 0x00000700L
+#define UNIPHYB_LINK_CNTL__UNIPHY_CHANNEL0_INVERT_MASK 0x00001000L
+#define UNIPHYB_LINK_CNTL__UNIPHY_CHANNEL1_INVERT_MASK 0x00002000L
+#define UNIPHYB_LINK_CNTL__UNIPHY_CHANNEL2_INVERT_MASK 0x00004000L
+#define UNIPHYB_LINK_CNTL__UNIPHY_CHANNEL3_INVERT_MASK 0x00008000L
+#define UNIPHYB_LINK_CNTL__UNIPHY_LANE_STAGGER_DELAY_MASK 0x00700000L
+#define UNIPHYB_LINK_CNTL__UNIPHY_LINK_ENABLE_HPD_MASK_MASK 0x03000000L
+//UNIPHYB_CHANNEL_XBAR_CNTL
+#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE__SHIFT 0x0
+#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE__SHIFT 0x8
+#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE__SHIFT 0x10
+#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE__SHIFT 0x18
+#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_LINK_ENABLE__SHIFT 0x1c
+#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL0_XBAR_SOURCE_MASK 0x00000003L
+#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL1_XBAR_SOURCE_MASK 0x00000300L
+#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL2_XBAR_SOURCE_MASK 0x00030000L
+#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_CHANNEL3_XBAR_SOURCE_MASK 0x03000000L
+#define UNIPHYB_CHANNEL_XBAR_CNTL__UNIPHY_LINK_ENABLE_MASK 0x10000000L
+//DC_PINSTRAPS
+#define DC_PINSTRAPS__DC_PINSTRAPS_SMS_EN_HARD__SHIFT 0xd
+#define DC_PINSTRAPS__DC_PINSTRAPS_AUDIO__SHIFT 0xe
+#define DC_PINSTRAPS__DC_PINSTRAPS_CCBYPASS__SHIFT 0x10
+#define DC_PINSTRAPS__DC_PINSTRAPS_CONNECTIVITY__SHIFT 0x11
+#define DC_PINSTRAPS__DC_PINSTRAPS_SMS_EN_HARD_MASK 0x00002000L
+#define DC_PINSTRAPS__DC_PINSTRAPS_AUDIO_MASK 0x0000C000L
+#define DC_PINSTRAPS__DC_PINSTRAPS_CCBYPASS_MASK 0x00010000L
+#define DC_PINSTRAPS__DC_PINSTRAPS_CONNECTIVITY_MASK 0x000E0000L
+//DCIO_CLOCK_CNTL
+#define DCIO_CLOCK_CNTL__DISPCLK_R_DCIO_GATE_DIS__SHIFT 0x5
+#define DCIO_CLOCK_CNTL__DISPCLK_R_DCIO_GATE_DIS_MASK 0x00000020L
+
+// addressBlock: dce_dc_dcio_dcio_chip_dispdec
+//DC_GPIO_DDC1_MASK
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_MASK__SHIFT 0x0
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_PD_EN__SHIFT 0x4
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_RECV__SHIFT 0x6
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_MASK__SHIFT 0x8
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_PD_EN__SHIFT 0xc
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_RECV__SHIFT 0xe
+#define DC_GPIO_DDC1_MASK__AUX_PAD1_MODE__SHIFT 0x10
+#define DC_GPIO_DDC1_MASK__AUX1_POL__SHIFT 0x14
+#define DC_GPIO_DDC1_MASK__ALLOW_HW_DDC1_PD_EN__SHIFT 0x16
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_STR__SHIFT 0x18
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_STR__SHIFT 0x1c
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_MASK_MASK 0x00000001L
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_PD_EN_MASK 0x00000010L
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_RECV_MASK 0x00000040L
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_MASK_MASK 0x00000100L
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_PD_EN_MASK 0x00001000L
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_RECV_MASK 0x00004000L
+#define DC_GPIO_DDC1_MASK__AUX_PAD1_MODE_MASK 0x00010000L
+#define DC_GPIO_DDC1_MASK__AUX1_POL_MASK 0x00100000L
+#define DC_GPIO_DDC1_MASK__ALLOW_HW_DDC1_PD_EN_MASK 0x00400000L
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1CLK_STR_MASK 0x0F000000L
+#define DC_GPIO_DDC1_MASK__DC_GPIO_DDC1DATA_STR_MASK 0xF0000000L
+//DC_GPIO_DDC1_A
+#define DC_GPIO_DDC1_A__DC_GPIO_DDC1CLK_A__SHIFT 0x0
+#define DC_GPIO_DDC1_A__DC_GPIO_DDC1DATA_A__SHIFT 0x8
+#define DC_GPIO_DDC1_A__DC_GPIO_DDC1CLK_A_MASK 0x00000001L
+#define DC_GPIO_DDC1_A__DC_GPIO_DDC1DATA_A_MASK 0x00000100L
+//DC_GPIO_DDC1_EN
+#define DC_GPIO_DDC1_EN__DC_GPIO_DDC1CLK_EN__SHIFT 0x0
+#define DC_GPIO_DDC1_EN__DC_GPIO_DDC1DATA_EN__SHIFT 0x8
+#define DC_GPIO_DDC1_EN__DC_GPIO_DDC1CLK_EN_MASK 0x00000001L
+#define DC_GPIO_DDC1_EN__DC_GPIO_DDC1DATA_EN_MASK 0x00000100L
+//DC_GPIO_DDC1_Y
+#define DC_GPIO_DDC1_Y__DC_GPIO_DDC1CLK_Y__SHIFT 0x0
+#define DC_GPIO_DDC1_Y__DC_GPIO_DDC1DATA_Y__SHIFT 0x8
+#define DC_GPIO_DDC1_Y__DC_GPIO_DDC1CLK_Y_MASK 0x00000001L
+#define DC_GPIO_DDC1_Y__DC_GPIO_DDC1DATA_Y_MASK 0x00000100L
+//DC_GPIO_DDC2_MASK
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_MASK__SHIFT 0x0
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_PD_EN__SHIFT 0x4
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_RECV__SHIFT 0x6
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_MASK__SHIFT 0x8
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_PD_EN__SHIFT 0xc
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_RECV__SHIFT 0xe
+#define DC_GPIO_DDC2_MASK__AUX_PAD2_MODE__SHIFT 0x10
+#define DC_GPIO_DDC2_MASK__AUX2_POL__SHIFT 0x14
+#define DC_GPIO_DDC2_MASK__ALLOW_HW_DDC2_PD_EN__SHIFT 0x16
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_STR__SHIFT 0x18
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_STR__SHIFT 0x1c
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_MASK_MASK 0x00000001L
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_PD_EN_MASK 0x00000010L
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_RECV_MASK 0x00000040L
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_MASK_MASK 0x00000100L
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_PD_EN_MASK 0x00001000L
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_RECV_MASK 0x00004000L
+#define DC_GPIO_DDC2_MASK__AUX_PAD2_MODE_MASK 0x00010000L
+#define DC_GPIO_DDC2_MASK__AUX2_POL_MASK 0x00100000L
+#define DC_GPIO_DDC2_MASK__ALLOW_HW_DDC2_PD_EN_MASK 0x00400000L
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2CLK_STR_MASK 0x0F000000L
+#define DC_GPIO_DDC2_MASK__DC_GPIO_DDC2DATA_STR_MASK 0xF0000000L
+//DC_GPIO_DDC2_A
+#define DC_GPIO_DDC2_A__DC_GPIO_DDC2CLK_A__SHIFT 0x0
+#define DC_GPIO_DDC2_A__DC_GPIO_DDC2DATA_A__SHIFT 0x8
+#define DC_GPIO_DDC2_A__DC_GPIO_DDC2CLK_A_MASK 0x00000001L
+#define DC_GPIO_DDC2_A__DC_GPIO_DDC2DATA_A_MASK 0x00000100L
+//DC_GPIO_DDC2_EN
+#define DC_GPIO_DDC2_EN__DC_GPIO_DDC2CLK_EN__SHIFT 0x0
+#define DC_GPIO_DDC2_EN__DC_GPIO_DDC2DATA_EN__SHIFT 0x8
+#define DC_GPIO_DDC2_EN__DC_GPIO_DDC2CLK_EN_MASK 0x00000001L
+#define DC_GPIO_DDC2_EN__DC_GPIO_DDC2DATA_EN_MASK 0x00000100L
+//DC_GPIO_DDC2_Y
+#define DC_GPIO_DDC2_Y__DC_GPIO_DDC2CLK_Y__SHIFT 0x0
+#define DC_GPIO_DDC2_Y__DC_GPIO_DDC2DATA_Y__SHIFT 0x8
+#define DC_GPIO_DDC2_Y__DC_GPIO_DDC2CLK_Y_MASK 0x00000001L
+#define DC_GPIO_DDC2_Y__DC_GPIO_DDC2DATA_Y_MASK 0x00000100L
+//DC_GPIO_HPD_MASK
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD1_MASK__SHIFT 0x0
+#define DC_GPIO_HPD_MASK__DC_GPIO_RX_HPD_MASK__SHIFT 0x1
+#define DC_GPIO_HPD_MASK__DC_GPIO_RX_HPD_PD_DIS__SHIFT 0x2
+#define DC_GPIO_HPD_MASK__DC_GPIO_RX_HPD_RX_SEL__SHIFT 0x3
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD1_PD_DIS__SHIFT 0x4
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD1_RECV__SHIFT 0x6
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD2_MASK__SHIFT 0x8
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD2_PD_DIS__SHIFT 0x9
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD2_RECV__SHIFT 0xa
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD3_MASK__SHIFT 0x10
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD3_PD_DIS__SHIFT 0x11
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD3_RECV__SHIFT 0x12
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD4_MASK__SHIFT 0x14
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD4_PD_DIS__SHIFT 0x15
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD4_RECV__SHIFT 0x16
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD5_MASK__SHIFT 0x18
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD5_PD_DIS__SHIFT 0x19
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD5_RECV__SHIFT 0x1a
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD6_MASK__SHIFT 0x1c
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD6_PD_DIS__SHIFT 0x1d
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD6_RECV__SHIFT 0x1e
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD1_MASK_MASK 0x00000001L
+#define DC_GPIO_HPD_MASK__DC_GPIO_RX_HPD_MASK_MASK 0x00000002L
+#define DC_GPIO_HPD_MASK__DC_GPIO_RX_HPD_PD_DIS_MASK 0x00000004L
+#define DC_GPIO_HPD_MASK__DC_GPIO_RX_HPD_RX_SEL_MASK 0x00000008L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD1_PD_DIS_MASK 0x00000010L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD1_RECV_MASK 0x000000C0L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD2_MASK_MASK 0x00000100L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD2_PD_DIS_MASK 0x00000200L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD2_RECV_MASK 0x00000C00L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD3_MASK_MASK 0x00010000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD3_PD_DIS_MASK 0x00020000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD3_RECV_MASK 0x000C0000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD4_MASK_MASK 0x00100000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD4_PD_DIS_MASK 0x00200000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD4_RECV_MASK 0x00C00000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD5_MASK_MASK 0x01000000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD5_PD_DIS_MASK 0x02000000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD5_RECV_MASK 0x0C000000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD6_MASK_MASK 0x10000000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD6_PD_DIS_MASK 0x20000000L
+#define DC_GPIO_HPD_MASK__DC_GPIO_HPD6_RECV_MASK 0xC0000000L
+//DC_GPIO_HPD_A
+#define DC_GPIO_HPD_A__DC_GPIO_HPD1_A__SHIFT 0x0
+#define DC_GPIO_HPD_A__DC_GPIO_HPD2_A__SHIFT 0x8
+#define DC_GPIO_HPD_A__DC_GPIO_HPD3_A__SHIFT 0x10
+#define DC_GPIO_HPD_A__DC_GPIO_HPD4_A__SHIFT 0x18
+#define DC_GPIO_HPD_A__DC_GPIO_HPD5_A__SHIFT 0x1a
+#define DC_GPIO_HPD_A__DC_GPIO_HPD6_A__SHIFT 0x1c
+#define DC_GPIO_HPD_A__DC_GPIO_HPD1_A_MASK 0x00000001L
+#define DC_GPIO_HPD_A__DC_GPIO_HPD2_A_MASK 0x00000100L
+#define DC_GPIO_HPD_A__DC_GPIO_HPD3_A_MASK 0x00010000L
+#define DC_GPIO_HPD_A__DC_GPIO_HPD4_A_MASK 0x01000000L
+#define DC_GPIO_HPD_A__DC_GPIO_HPD5_A_MASK 0x04000000L
+#define DC_GPIO_HPD_A__DC_GPIO_HPD6_A_MASK 0x10000000L
+
+
+//DC_GPIO_HPD_EN
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD1_EN__SHIFT 0x0
+#define DC_GPIO_HPD_EN__HPD1_SCHMEN_PI__SHIFT 0x1
+#define DC_GPIO_HPD_EN__HPD1_SLEWNCORE__SHIFT 0x2
+#define DC_GPIO_HPD_EN__RX_HPD_SCHMEN_PI__SHIFT 0x3
+#define DC_GPIO_HPD_EN__RX_HPD_SLEWNCORE__SHIFT 0x4
+#define DC_GPIO_HPD_EN__HPD12_SPARE0__SHIFT 0x5
+#define DC_GPIO_HPD_EN__HPD1_SEL0__SHIFT 0x6
+#define DC_GPIO_HPD_EN__RX_HPD_SEL0__SHIFT 0x7
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD2_EN__SHIFT 0x8
+#define DC_GPIO_HPD_EN__HPD2_SCHMEN_PI__SHIFT 0x9
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD1_EN_MASK 0x00000001L
+#define DC_GPIO_HPD_EN__HPD1_SCHMEN_PI_MASK 0x00000002L
+#define DC_GPIO_HPD_EN__HPD1_SLEWNCORE_MASK 0x00000004L
+#define DC_GPIO_HPD_EN__RX_HPD_SCHMEN_PI_MASK 0x00000008L
+#define DC_GPIO_HPD_EN__RX_HPD_SLEWNCORE_MASK 0x00000010L
+#define DC_GPIO_HPD_EN__HPD1_SEL0_MASK 0x00000040L
+#define DC_GPIO_HPD_EN__RX_HPD_SEL0_MASK 0x00000080L
+#define DC_GPIO_HPD_EN__DC_GPIO_HPD2_EN_MASK 0x00000100L
+#define DC_GPIO_HPD_EN__HPD2_SCHMEN_PI_MASK 0x00000200L
+
+//DC_GPIO_HPD_Y
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD1_Y__SHIFT 0x0
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD2_Y__SHIFT 0x8
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD1_Y_MASK 0x00000001L
+#define DC_GPIO_HPD_Y__DC_GPIO_HPD2_Y_MASK 0x00000100L
+//DC_GPIO_PAD_STRENGTH_1
+#define DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SN__SHIFT 0x0
+#define DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SP__SHIFT 0x4
+#define DC_GPIO_PAD_STRENGTH_1__RX_HPD_STRENGTH_SN__SHIFT 0x8
+#define DC_GPIO_PAD_STRENGTH_1__RX_HPD_STRENGTH_SP__SHIFT 0xc
+#define DC_GPIO_PAD_STRENGTH_1__TX_HPD_STRENGTH_SN__SHIFT 0x10
+#define DC_GPIO_PAD_STRENGTH_1__TX_HPD_STRENGTH_SP__SHIFT 0x14
+#define DC_GPIO_PAD_STRENGTH_1__SYNC_STRENGTH_SN__SHIFT 0x18
+#define DC_GPIO_PAD_STRENGTH_1__SYNC_STRENGTH_SP__SHIFT 0x1c
+#define DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SN_MASK 0x0000000FL
+#define DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SP_MASK 0x000000F0L
+#define DC_GPIO_PAD_STRENGTH_1__RX_HPD_STRENGTH_SN_MASK 0x00000F00L
+#define DC_GPIO_PAD_STRENGTH_1__RX_HPD_STRENGTH_SP_MASK 0x0000F000L
+#define DC_GPIO_PAD_STRENGTH_1__TX_HPD_STRENGTH_SN_MASK 0x000F0000L
+#define DC_GPIO_PAD_STRENGTH_1__TX_HPD_STRENGTH_SP_MASK 0x00F00000L
+#define DC_GPIO_PAD_STRENGTH_1__SYNC_STRENGTH_SN_MASK 0x0F000000L
+#define DC_GPIO_PAD_STRENGTH_1__SYNC_STRENGTH_SP_MASK 0xF0000000L
+//PHY_AUX_CNTL
+#define PHY_AUX_CNTL__AUXSLAVE_PAD_SLEWN__SHIFT 0x0
+#define PHY_AUX_CNTL__AUXSLAVE_PAD_WAKE__SHIFT 0x1
+#define PHY_AUX_CNTL__AUXSLAVE_PAD_RXSEL__SHIFT 0x2
+#define PHY_AUX_CNTL__AUXSLAVE_PAD_MODE__SHIFT 0x3
+#define PHY_AUX_CNTL__DDCSLAVE_DATA_PD_EN__SHIFT 0x4
+#define PHY_AUX_CNTL__DDCSLAVE_DATA_EN__SHIFT 0x5
+#define PHY_AUX_CNTL__DDCSLAVE_CLK_PD_EN__SHIFT 0x6
+#define PHY_AUX_CNTL__DDCSLAVE_CLK_EN__SHIFT 0x7
+#define PHY_AUX_CNTL__AUXSLAVE_CLK_PD_EN__SHIFT 0x8
+#define PHY_AUX_CNTL__AUX_PAD_WAKE__SHIFT 0x9
+#define PHY_AUX_CNTL__AUX1_PAD_RXSEL__SHIFT 0xa
+#define PHY_AUX_CNTL__AUX2_PAD_RXSEL__SHIFT 0xc
+#define PHY_AUX_CNTL__AUX_CAL_RESBIASEN__SHIFT 0x17
+#define PHY_AUX_CNTL__AUX_CAL_SPARE__SHIFT 0x18
+#define PHY_AUX_CNTL__AUX_CAL_BIASENTST__SHIFT 0x1c
+#define PHY_AUX_CNTL__AUXSLAVE_PAD_SLEWN_MASK 0x00000001L
+#define PHY_AUX_CNTL__AUXSLAVE_PAD_WAKE_MASK 0x00000002L
+#define PHY_AUX_CNTL__AUXSLAVE_PAD_RXSEL_MASK 0x00000004L
+#define PHY_AUX_CNTL__AUXSLAVE_PAD_MODE_MASK 0x00000008L
+#define PHY_AUX_CNTL__DDCSLAVE_DATA_PD_EN_MASK 0x00000010L
+#define PHY_AUX_CNTL__DDCSLAVE_DATA_EN_MASK 0x00000020L
+#define PHY_AUX_CNTL__DDCSLAVE_CLK_PD_EN_MASK 0x00000040L
+#define PHY_AUX_CNTL__DDCSLAVE_CLK_EN_MASK 0x00000080L
+#define PHY_AUX_CNTL__AUXSLAVE_CLK_PD_EN_MASK 0x00000100L
+#define PHY_AUX_CNTL__AUX_PAD_WAKE_MASK 0x00000200L
+#define PHY_AUX_CNTL__AUX1_PAD_RXSEL_MASK 0x00000C00L
+#define PHY_AUX_CNTL__AUX2_PAD_RXSEL_MASK 0x00003000L
+#define PHY_AUX_CNTL__AUX_CAL_RESBIASEN_MASK 0x00800000L
+#define PHY_AUX_CNTL__AUX_CAL_SPARE_MASK 0x03000000L
+#define PHY_AUX_CNTL__AUX_CAL_BIASENTST_MASK 0x70000000L
+//DC_GPIO_AUX_CTRL_1
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_CSEL_0P9__SHIFT 0x0
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_CSEL_1P1__SHIFT 0x1
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_RSEL_0P9__SHIFT 0x2
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_RSEL_1P1__SHIFT 0x3
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_CSEL_0P9__SHIFT 0x4
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_CSEL_1P1__SHIFT 0x5
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_RSEL_0P9__SHIFT 0x6
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_RSEL_1P1__SHIFT 0x7
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_BIASCRTEN__SHIFT 0x8
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_BIASCRTEN__SHIFT 0x9
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_RESBIASEN__SHIFT 0xa
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_RESBIASEN__SHIFT 0xb
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX1_COMPSEL__SHIFT 0xc
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_DDCVGA_SPARE__SHIFT 0xe
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_DDCVGA_SLEWN__SHIFT 0x12
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_DDCVGA_RXSEL__SHIFT 0x14
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX2_COMPSEL__SHIFT 0x19
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX3_COMPSEL__SHIFT 0x1a
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX4_COMPSEL__SHIFT 0x1b
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX5_COMPSEL__SHIFT 0x1c
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX6_COMPSEL__SHIFT 0x1d
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_DDCVGA_COMPSEL__SHIFT 0x1e
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_CSEL_0P9_MASK 0x00000001L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_CSEL_1P1_MASK 0x00000002L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_RSEL_0P9_MASK 0x00000004L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_RSEL_1P1_MASK 0x00000008L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_CSEL_0P9_MASK 0x00000010L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_CSEL_1P1_MASK 0x00000020L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_RSEL_0P9_MASK 0x00000040L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_RSEL_1P1_MASK 0x00000080L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_BIASCRTEN_MASK 0x00000100L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_BIASCRTEN_MASK 0x00000200L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX_RESBIASEN_MASK 0x00000400L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_I2C_RESBIASEN_MASK 0x00000800L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX1_COMPSEL_MASK 0x00001000L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_DDCVGA_SPARE_MASK 0x0000C000L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_DDCVGA_SLEWN_MASK 0x00040000L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_DDCVGA_RXSEL_MASK 0x00300000L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX2_COMPSEL_MASK 0x02000000L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX3_COMPSEL_MASK 0x04000000L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX4_COMPSEL_MASK 0x08000000L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX5_COMPSEL_MASK 0x10000000L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_AUX6_COMPSEL_MASK 0x20000000L
+#define DC_GPIO_AUX_CTRL_1__DC_GPIO_DDCVGA_COMPSEL_MASK 0x40000000L
+//DC_GPIO_AUX_CTRL_2
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD12_FALLSLEWSEL__SHIFT 0x0
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD12_SPIKERCEN__SHIFT 0x8
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD12_SPIKERCSEL__SHIFT 0xc
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_CSEL_0P9__SHIFT 0x10
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_CSEL_1P1__SHIFT 0x11
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_RSEL_0P9__SHIFT 0x12
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_RSEL_1P1__SHIFT 0x13
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_BIASCRTEN__SHIFT 0x14
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD12_SLEWN__SHIFT 0x18
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_RESBIASEN__SHIFT 0x1b
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD12_COMPSEL__SHIFT 0x1c
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD12_FALLSLEWSEL_MASK 0x00000003L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD12_SPIKERCEN_MASK 0x00000100L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD12_SPIKERCSEL_MASK 0x00001000L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_CSEL_0P9_MASK 0x00010000L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_CSEL_1P1_MASK 0x00020000L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_RSEL_0P9_MASK 0x00040000L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_RSEL_1P1_MASK 0x00080000L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_BIASCRTEN_MASK 0x00100000L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD12_SLEWN_MASK 0x01000000L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD_RESBIASEN_MASK 0x08000000L
+#define DC_GPIO_AUX_CTRL_2__DC_GPIO_HPD12_COMPSEL_MASK 0x10000000L
+//DC_GPIO_AUX_CTRL_3
+#define DC_GPIO_AUX_CTRL_3__AUX1_NEN_RTERM__SHIFT 0x0
+#define DC_GPIO_AUX_CTRL_3__AUX2_NEN_RTERM__SHIFT 0x1
+#define DC_GPIO_AUX_CTRL_3__AUX1_DP_DN_SWAP__SHIFT 0x8
+#define DC_GPIO_AUX_CTRL_3__AUX2_DP_DN_SWAP__SHIFT 0x9
+#define DC_GPIO_AUX_CTRL_3__AUX1_HYS_TUNE__SHIFT 0x10
+#define DC_GPIO_AUX_CTRL_3__AUX2_HYS_TUNE__SHIFT 0x12
+#define DC_GPIO_AUX_CTRL_3__AUX1_NEN_RTERM_MASK 0x00000001L
+#define DC_GPIO_AUX_CTRL_3__AUX2_NEN_RTERM_MASK 0x00000002L
+#define DC_GPIO_AUX_CTRL_3__AUX1_DP_DN_SWAP_MASK 0x00000100L
+#define DC_GPIO_AUX_CTRL_3__AUX2_DP_DN_SWAP_MASK 0x00000200L
+#define DC_GPIO_AUX_CTRL_3__AUX1_HYS_TUNE_MASK 0x00030000L
+#define DC_GPIO_AUX_CTRL_3__AUX2_HYS_TUNE_MASK 0x000C0000L
+//DC_GPIO_AUX_CTRL_4
+#define DC_GPIO_AUX_CTRL_4__AUX1_AUX_CTRL__SHIFT 0x0
+#define DC_GPIO_AUX_CTRL_4__AUX2_AUX_CTRL__SHIFT 0x4
+#define DC_GPIO_AUX_CTRL_4__AUX1_AUX_CTRL_MASK 0x0000000FL
+#define DC_GPIO_AUX_CTRL_4__AUX2_AUX_CTRL_MASK 0x000000F0L
+//DC_GPIO_AUX_CTRL_5
+#define DC_GPIO_AUX_CTRL_5__AUX1_VOD_TUNE__SHIFT 0x0
+#define DC_GPIO_AUX_CTRL_5__AUX2_VOD_TUNE__SHIFT 0x2
+#define DC_GPIO_AUX_CTRL_5__DDC_PAD1_I2CMODE__SHIFT 0xc
+#define DC_GPIO_AUX_CTRL_5__DDC_PAD2_I2CMODE__SHIFT 0xd
+#define DC_GPIO_AUX_CTRL_5__DDC1_I2C_VPH_1V2_EN__SHIFT 0x12
+#define DC_GPIO_AUX_CTRL_5__DDC2_I2C_VPH_1V2_EN__SHIFT 0x13
+#define DC_GPIO_AUX_CTRL_5__DDC1_PAD_I2C_CTRL__SHIFT 0x18
+#define DC_GPIO_AUX_CTRL_5__DDC2_PAD_I2C_CTRL__SHIFT 0x19
+#define DC_GPIO_AUX_CTRL_5__AUX1_VOD_TUNE_MASK 0x00000003L
+#define DC_GPIO_AUX_CTRL_5__AUX2_VOD_TUNE_MASK 0x0000000CL
+#define DC_GPIO_AUX_CTRL_5__DDC_PAD1_I2CMODE_MASK 0x00001000L
+#define DC_GPIO_AUX_CTRL_5__DDC_PAD2_I2CMODE_MASK 0x00002000L
+#define DC_GPIO_AUX_CTRL_5__DDC1_I2C_VPH_1V2_EN_MASK 0x00040000L
+#define DC_GPIO_AUX_CTRL_5__DDC2_I2C_VPH_1V2_EN_MASK 0x00080000L
+#define DC_GPIO_AUX_CTRL_5__DDC1_PAD_I2C_CTRL_MASK 0x01000000L
+#define DC_GPIO_AUX_CTRL_5__DDC2_PAD_I2C_CTRL_MASK 0x02000000L
+//AUXI2C_PAD_ALL_PWR_OK
+#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY1_ALL_PWR_OK__SHIFT 0x0
+#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY2_ALL_PWR_OK__SHIFT 0x1
+#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY1_ALL_PWR_OK_MASK 0x00000001L
+#define AUXI2C_PAD_ALL_PWR_OK__AUXI2C_PHY2_ALL_PWR_OK_MASK 0x00000002L
+
+
+// addressBlock: azf0endpoint0_endpointind
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY__SHIFT 0x14
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL_MASK 0x00000003L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY_MASK 0x00700000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE_MASK 0x000000FFL
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED__SHIFT 0x1
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA__SHIFT 0x2
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED_MASK 0x00000002L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA_MASK 0x00000004L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP_MASK 0x00000070L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE__SHIFT 0x6
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE_MASK 0x00000040L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION__SHIFT 0x11
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO__SHIFT 0x12
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL__SHIFT 0x18
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT__SHIFT 0x1b
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT__SHIFT 0x1f
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION_MASK 0x0000007FL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION_MASK 0x00010000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION_MASK 0x00020000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO_MASK 0x00FC0000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL_MASK 0x03000000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT_MASK 0x78000000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT_MASK 0x80000000L
+
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE__SHIFT 0x1
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE__SHIFT 0x9
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID__SHIFT 0xc
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE__SHIFT 0x11
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID__SHIFT 0x14
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE__SHIFT 0x18
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE__SHIFT 0x19
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID__SHIFT 0x1c
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE_MASK 0x00000002L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE_MASK 0x00000100L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE_MASK 0x00000200L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE_MASK 0x00010000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE_MASK 0x00020000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE_MASK 0x01000000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE_MASK 0x02000000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID_MASK 0xF0000000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC_MASK 0x000000FFL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC_MASK 0x0000FF00L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID_MASK 0x0000FFFFL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID_MASK 0xFFFF0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN_MASK 0x000000FFL
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3__SHIFT 0x18
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0_MASK 0x000000FFL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2_MASK 0x00FF0000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3_MASK 0xFF000000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7__SHIFT 0x18
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4_MASK 0x000000FFL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6_MASK 0x00FF0000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7_MASK 0xFF000000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11__SHIFT 0x18
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8_MASK 0x000000FFL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10_MASK 0x00FF0000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11_MASK 0xFF000000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15__SHIFT 0x18
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12_MASK 0x000000FFL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14_MASK 0x00FF0000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15_MASK 0xFF000000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16_MASK 0x000000FFL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17_MASK 0x0000FF00L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE__SHIFT 0x1
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE__SHIFT 0x9
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID__SHIFT 0xc
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x11
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0x14
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE_MASK 0x00000002L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE_MASK 0x00000100L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE_MASK 0x00000200L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00010000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00020000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE_MASK 0x00000001L
+//AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER__SHIFT 0x2
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE_MASK 0x00000003L
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER_MASK 0x0000003CL
+//AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN__SHIFT 0x2
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH__SHIFT 0x3
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN__SHIFT 0x7
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_MASK 0x00000003L
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN_MASK 0x00000004L
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_MASK 0x00000078L
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN_MASK 0x00000080L
+//AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x6
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_MASK 0x0000003FL
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000040L
+//AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x0000000FL
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000010L
+//AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A__SHIFT 0x5
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID__SHIFT 0x7
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF_MASK 0x0000000FL
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO_MASK 0x00000010L
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_MASK 0x00000060L
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID_MASK 0x00000080L
+//AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L_MASK 0x0000000FL
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R_MASK 0x000000F0L
+//AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000FL
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000F0L
+//AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4_MASK 0x0000000FL
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5_MASK 0x000000F0L
+//AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6_MASK 0x0000000FL
+#define AZF0ENDPOINT0_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7_MASK 0x000000F0L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE_MASK 0x00000001L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE_MASK 0x000000FFL
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE__SHIFT 0x1
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE__SHIFT 0x10
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE_MASK 0x00000002L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON_MASK 0x0000FF00L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE_MASK 0x00FF0000L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION_MASK 0x00000003L
+//AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY_MASK 0x00000010L
+//AZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLE_STATUS
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS_MASK 0x00000001L
+//AZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLED_INT_STATUS
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE_MASK 0x00000100L
+//AZF0ENDPOINT0_AZALIA_F0_AUDIO_DISABLED_INT_STATUS
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE_MASK 0x00000100L
+//AZF0ENDPOINT0_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT0_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE_MASK 0x00000100L
+
+
+// addressBlock: azf0endpoint1_endpointind
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY__SHIFT 0x14
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CONTROL_MASK 0x00000003L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_STRIPE_CONTROL__STRIPE_CAPABILITY_MASK 0x00700000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_RAMP_RATE__RAMP_RATE_MASK 0x000000FFL
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED__SHIFT 0x1
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA__SHIFT 0x2
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_OFFSET_CHANGED_MASK 0x00000002L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__CLEAR_GTC_COUNTER_MIN_MAX_DELTA_MASK 0x00000004L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_EMBEDDING__PRESENTATION_TIME_EMBEDDING_GROUP_MASK 0x00000070L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_CONTROL_GTC_OFFSET_DEBUG
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA__GTC_COUNTER_DELTA_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MIN__GTC_COUNTER_DELTA_MIN_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_CONVERTER_GTC_COUNTER_DELTA_MAX__GTC_COUNTER_DELTA_MAX_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE__SHIFT 0x6
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_WIDGET_CONTROL__OUT_ENABLE_MASK 0x00000040L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION__SHIFT 0x11
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO__SHIFT 0x12
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL__SHIFT 0x18
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT__SHIFT 0x1b
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT__SHIFT 0x1f
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION_MASK 0x0000007FL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION_MASK 0x00010000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION_MASK 0x00020000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO_MASK 0x00FC0000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL_MASK 0x03000000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT_MASK 0x78000000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT_MASK 0x80000000L
+
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE__SHIFT 0x1
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE__SHIFT 0x9
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID__SHIFT 0xc
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE__SHIFT 0x11
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID__SHIFT 0x14
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE__SHIFT 0x18
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE__SHIFT 0x19
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID__SHIFT 0x1c
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_MUTE_MASK 0x00000002L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL01_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_ENABLE_MASK 0x00000100L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_MUTE_MASK 0x00000200L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL23_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_ENABLE_MASK 0x00010000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_MUTE_MASK 0x00020000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL45_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_ENABLE_MASK 0x01000000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_MUTE_MASK 0x02000000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL67_CHANNEL_ID_MASK 0xF0000000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__VIDEO_LIPSYNC_MASK 0x000000FFL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC__AUDIO_LIPSYNC_MASK 0x0000FF00L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__MANUFACTURER_ID_MASK 0x0000FFFFL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO0__PRODUCT_ID_MASK 0xFFFF0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO1__SINK_DESCRIPTION_LEN_MASK 0x000000FFL
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO2__PORT_ID0_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO3__PORT_ID1_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3__SHIFT 0x18
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION0_MASK 0x000000FFL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION1_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION2_MASK 0x00FF0000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO4__DESCRIPTION3_MASK 0xFF000000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7__SHIFT 0x18
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION4_MASK 0x000000FFL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION5_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION6_MASK 0x00FF0000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO5__DESCRIPTION7_MASK 0xFF000000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11__SHIFT 0x18
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION8_MASK 0x000000FFL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION9_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION10_MASK 0x00FF0000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO6__DESCRIPTION11_MASK 0xFF000000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15__SHIFT 0x18
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION12_MASK 0x000000FFL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION13_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION14_MASK 0x00FF0000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO7__DESCRIPTION15_MASK 0xFF000000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION16_MASK 0x000000FFL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_SINK_INFO8__DESCRIPTION17_MASK 0x0000FF00L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE__SHIFT 0x1
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE__SHIFT 0x9
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID__SHIFT 0xc
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x11
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0x14
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_MUTE_MASK 0x00000002L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL1_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_ENABLE_MASK 0x00000100L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_MUTE_MASK 0x00000200L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL3_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00010000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00020000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_MULTICHANNEL_MODE__MULTICHANNEL_MODE_MASK 0x00000001L
+//AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER__SHIFT 0x2
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_MODE_MASK 0x00000003L
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_0__IEC_60958_CS_SOURCE_NUMBER_MASK 0x0000003CL
+//AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN__SHIFT 0x2
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH__SHIFT 0x3
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN__SHIFT 0x7
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_MASK 0x00000003L
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_CLOCK_ACCURACY_OVRRD_EN_MASK 0x00000004L
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_MASK 0x00000078L
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_1__IEC_60958_CS_WORD_LENGTH_OVRRD_EN_MASK 0x00000080L
+//AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x6
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_MASK 0x0000003FL
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_2__IEC_60958_CS_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000040L
+//AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_MASK 0x0000000FL
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_3__IEC_60958_CS_ORIGINAL_SAMPLING_FREQUENCY_OVRRD_EN_MASK 0x00000010L
+//AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A__SHIFT 0x5
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID__SHIFT 0x7
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_SAMPLING_FREQUENCY_COEFF_MASK 0x0000000FL
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_MPEG_SURROUND_INFO_MASK 0x00000010L
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_MASK 0x00000060L
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_4__IEC_60958_CS_CGMS_A_VALID_MASK 0x00000080L
+//AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_L_MASK 0x0000000FL
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_5__IEC_60958_CS_CHANNEL_NUMBER_R_MASK 0x000000F0L
+//AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_2_MASK 0x0000000FL
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_6__IEC_60958_CS_CHANNEL_NUMBER_3_MASK 0x000000F0L
+//AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_4_MASK 0x0000000FL
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_7__IEC_60958_CS_CHANNEL_NUMBER_5_MASK 0x000000F0L
+//AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_6_MASK 0x0000000FL
+#define AZF0ENDPOINT1_AZALIA_F0_PIN_CONTROL_CODEC_CS_OVERRIDE_8__IEC_60958_CS_CHANNEL_NUMBER_7_MASK 0x000000F0L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_ASSOCIATION_INFO__ASSOCIATION_INFO_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_DIGITAL_OUTPUT_STATUS__OUTPUT_ACTIVE_MASK 0x00000001L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_CODING_TYPE__CODING_TYPE_MASK 0x000000FFL
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE__SHIFT 0x1
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE__SHIFT 0x10
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGED_ACK_UR_ENABLE_MASK 0x00000002L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_REASON_MASK 0x0000FF00L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_FORMAT_CHANGED__FORMAT_CHANGE_RESPONSE_MASK 0x00FF0000L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_WIRELESS_DISPLAY_IDENTIFICATION__WIRELESS_DISPLAY_IDENTIFICATION_MASK 0x00000003L
+//AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_ENABLE_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_CODEC_PIN_CONTROL_REMOTE_KEEPALIVE__REMOTE_KEEP_ALIVE_CAPABILITY_MASK 0x00000010L
+//AZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLE_STATUS
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLE_STATUS__AUDIO_ENABLE_STATUS_MASK 0x00000001L
+//AZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLED_INT_STATUS
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_ENABLED_INT_STATUS__AUDIO_ENABLED_TYPE_MASK 0x00000100L
+//AZF0ENDPOINT1_AZALIA_F0_AUDIO_DISABLED_INT_STATUS
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_DISABLED_INT_STATUS__AUDIO_DISABLED_TYPE_MASK 0x00000100L
+//AZF0ENDPOINT1_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG__SHIFT 0x0
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK__SHIFT 0x4
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE__SHIFT 0x8
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_FLAG_MASK 0x00000001L
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_MASK_MASK 0x00000010L
+#define AZF0ENDPOINT1_AZALIA_F0_AUDIO_FORMAT_CHANGED_INT_STATUS__AUDIO_FORMAT_CHANGED_TYPE_MASK 0x00000100L
+
+
+// addressBlock: azf0inputendpoint0_inputendpointind
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT__SHIFT 0x1f
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT_MASK 0x80000000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE_MASK 0x00000020L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION__SHIFT 0x11
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO__SHIFT 0x12
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL__SHIFT 0x18
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT__SHIFT 0x1b
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT__SHIFT 0x1f
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__SPEAKER_ALLOCATION_MASK 0x0000007FL
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__HDMI_CONNECTION_MASK 0x00010000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DP_CONNECTION_MASK 0x00020000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__EXTRA_CONNECTION_INFO_MASK 0x00FC0000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LFE_PLAYBACK_LEVEL_MASK 0x03000000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__LEVEL_SHIFT_MASK 0x78000000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER__DOWN_MIX_INHIBIT_MASK 0x80000000L
+
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO__SHIFT 0x18
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR0__SUPPORTED_FREQUENCIES_STEREO_MASK 0xFF000000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR1__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR2__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR3__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR4__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR5__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR6__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR7__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR8__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR9__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR10__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR11__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR12__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS__SHIFT 0x0
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES__SHIFT 0x8
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2__SHIFT 0x10
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__MAX_CHANNELS_MASK 0x00000007L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__SUPPORTED_FREQUENCIES_MASK 0x0000FF00L
+#define AZF0ENDPOINT2_AZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13__DESCRIPTOR_BYTE_2_MASK 0x00FF0000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE__SHIFT 0x1
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE__SHIFT 0x8
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE__SHIFT 0x9
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID__SHIFT 0xc
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE__SHIFT 0x11
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID__SHIFT 0x14
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE__SHIFT 0x18
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE__SHIFT 0x19
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID__SHIFT 0x1c
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE_MASK 0x00000002L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE_MASK 0x00000100L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE_MASK 0x00000200L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE_MASK 0x00020000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE_MASK 0x01000000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE_MASK 0x02000000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID_MASK 0xF0000000L
+// addressBlock: azf0inputendpoint0_inputendpointind
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT__SHIFT 0x1f
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT_MASK 0x80000000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE_MASK 0x00000020L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE__SHIFT 0x1
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE__SHIFT 0x8
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE__SHIFT 0x9
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID__SHIFT 0xc
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE__SHIFT 0x11
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID__SHIFT 0x14
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE__SHIFT 0x18
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE__SHIFT 0x19
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID__SHIFT 0x1c
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE_MASK 0x00000002L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE_MASK 0x00000100L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE_MASK 0x00000200L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE_MASK 0x00020000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE_MASK 0x01000000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE_MASK 0x02000000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID_MASK 0xF0000000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE__SHIFT 0x1
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x8
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x9
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0xc
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE__SHIFT 0x11
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID__SHIFT 0x14
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE_MASK 0x00000002L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00000100L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00000200L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE_MASK 0x00020000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION_MASK 0x000000FFL
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT__SHIFT 0x1
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_MASK 0x00000001L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT_MASK 0x00000006L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE_MASK 0x00000020L
+//AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT__SHIFT 0x0
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5__SHIFT 0x10
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID__SHIFT 0x1f
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT_MASK 0x00000007L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5_MASK 0x00FF0000L
+#define AZF0INPUTENDPOINT0_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID_MASK 0x80000000L
+
+
+// addressBlock: azf0inputendpoint1_inputendpointind
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE__SHIFT 0x4
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__FORMAT_OVERRIDE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR__SHIFT 0x8
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE__SHIFT 0xb
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE__SHIFT 0xe
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE__SHIFT 0xf
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__NUMBER_OF_CHANNELS_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__BITS_PER_SAMPLE_MASK 0x00000070L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_DIVISOR_MASK 0x00000700L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_MULTIPLE_MASK 0x00003800L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__SAMPLE_BASE_RATE_MASK 0x00004000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CONVERTER_FORMAT__STREAM_TYPE_MASK 0x00008000L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__CHANNEL_ID_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_CHANNEL_STREAM_ID__STREAM_ID_MASK 0x000000F0L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V__SHIFT 0x1
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG__SHIFT 0x2
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE__SHIFT 0x3
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY__SHIFT 0x4
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO__SHIFT 0x5
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO__SHIFT 0x6
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L__SHIFT 0x7
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC__SHIFT 0x8
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE__SHIFT 0x17
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__DIGEN_MASK 0x00000001L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__V_MASK 0x00000002L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__VCFG_MASK 0x00000004L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__COPY_MASK 0x00000010L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__NON_AUDIO_MASK 0x00000020L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__PRO_MASK 0x00000040L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__L_MASK 0x00000080L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__CC_MASK 0x00007F00L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_CONTROL_DIGITAL_CONVERTER__KEEPALIVE_MASK 0x00800000L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_STREAM_FORMATS__STREAM_FORMATS_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES__SHIFT 0x10
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_RATE_CAPABILITIES_MASK 0x00000FFFL
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_CONVERTER_PARAMETER_SUPPORTED_SIZE_RATES__AUDIO_BIT_CAPABILITIES_MASK 0x001F0000L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT__SHIFT 0x1
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT__SHIFT 0x2
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE__SHIFT 0x3
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE__SHIFT 0x5
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET__SHIFT 0x6
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY__SHIFT 0x7
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST__SHIFT 0x8
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL__SHIFT 0x9
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL__SHIFT 0xa
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP__SHIFT 0xb
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY__SHIFT 0x10
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE__SHIFT 0x14
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_CHANNEL_CAPABILITIES_MASK 0x00000001L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__INPUT_AMPLIFIER_PRESENT_MASK 0x00000002L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__OUTPUT_AMPLIFIER_PRESENT_MASK 0x00000004L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AMPLIFIER_PARAMETER_OVERRIDE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__STRIPE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__PROCESSING_WIDGET_MASK 0x00000040L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__UNSOLICITED_RESPONSE_CAPABILITY_MASK 0x00000080L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__CONNECTION_LIST_MASK 0x00000100L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__DIGITAL_MASK 0x00000200L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__POWER_CONTROL_MASK 0x00000400L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__LR_SWAP_MASK 0x00000800L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__AUDIO_WIDGET_CAPABILITIES_DELAY_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_AUDIO_WIDGET_CAPABILITIES__TYPE_MASK 0x00F00000L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED__SHIFT 0x1
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY__SHIFT 0x2
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE__SHIFT 0x3
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS__SHIFT 0x6
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI__SHIFT 0x7
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL__SHIFT 0x8
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP__SHIFT 0x18
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__IMPEDANCE_SENSE_CAPABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__TRIGGER_REQUIRED_MASK 0x00000002L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__JACK_DETECTION_CAPABILITY_MASK 0x00000004L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HEADPHONE_DRIVE_CAPABLE_MASK 0x00000008L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__OUTPUT_CAPABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__INPUT_CAPABLE_MASK 0x00000020L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__BALANCED_I_O_PINS_MASK 0x00000040L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__HDMI_MASK 0x00000080L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__VREF_CONTROL_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__EAPD_CAPABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_PARAMETER_CAPABILITIES__DP_MASK 0x01000000L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE__SHIFT 0x7
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__TAG_MASK 0x0000003FL
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE__ENABLE_MASK 0x00000080L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT__SHIFT 0x1f
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__IMPEDANCE_SENSE_MASK 0x7FFFFFFFL
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_INPUT_PIN_SENSE__PRESENCE_DETECT_MASK 0x80000000L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_WIDGET_CONTROL__IN_ENABLE_MASK 0x00000020L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE__SHIFT 0x1
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE__SHIFT 0x8
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE__SHIFT 0x9
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID__SHIFT 0xc
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE__SHIFT 0x11
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID__SHIFT 0x14
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE__SHIFT 0x18
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE__SHIFT 0x19
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID__SHIFT 0x1c
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_ENABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_MUTE_MASK 0x00000002L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL0_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_ENABLE_MASK 0x00000100L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_MUTE_MASK 0x00000200L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL1_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_ENABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_MUTE_MASK 0x00020000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL2_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_ENABLE_MASK 0x01000000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_MUTE_MASK 0x02000000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE__MULTICHANNEL3_CHANNEL_ID_MASK 0xF0000000L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE__SHIFT 0x1
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID__SHIFT 0x4
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE__SHIFT 0x8
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE__SHIFT 0x9
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID__SHIFT 0xc
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE__SHIFT 0x10
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE__SHIFT 0x11
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID__SHIFT 0x14
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE__SHIFT 0x18
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE__SHIFT 0x19
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID__SHIFT 0x1c
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_ENABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_MUTE_MASK 0x00000002L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL4_CHANNEL_ID_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_ENABLE_MASK 0x00000100L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_MUTE_MASK 0x00000200L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL5_CHANNEL_ID_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_ENABLE_MASK 0x00010000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_MUTE_MASK 0x00020000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL6_CHANNEL_ID_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_ENABLE_MASK 0x01000000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_MUTE_MASK 0x02000000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_MULTICHANNEL_ENABLE2__MULTICHANNEL7_CHANNEL_ID_MASK 0xF0000000L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_CAPABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_HBR__HBR_ENABLE_MASK 0x00000010L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_CHANNEL_ALLOCATION__CHANNEL_ALLOCATION_MASK 0x000000FFL
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE__SHIFT 0x4
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED__SHIFT 0x1f
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_GATING_DISABLE_MASK 0x00000001L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__CLOCK_ON_STATE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_HOT_PLUG_CONTROL__AUDIO_ENABLED_MASK 0x80000000L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE__SHIFT 0x1c
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_PAYLOAD_MASK 0x03FFFFFFL
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_UNSOLICITED_RESPONSE_FORCE__UNSOLICITED_RESPONSE_FORCE_MASK 0x10000000L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION__SHIFT 0x4
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC__SHIFT 0x8
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR__SHIFT 0xc
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE__SHIFT 0x10
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE__SHIFT 0x14
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION__SHIFT 0x18
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY__SHIFT 0x1e
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__SEQUENCE_MASK 0x0000000FL
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_ASSOCIATION_MASK 0x000000F0L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__MISC_MASK 0x00000F00L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__COLOR_MASK 0x0000F000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__CONNECTION_TYPE_MASK 0x000F0000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__DEFAULT_DEVICE_MASK 0x00F00000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__LOCATION_MASK 0x3F000000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_RESPONSE_CONFIGURATION_DEFAULT__PORT_CONNECTIVITY_MASK 0xC0000000L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT__SHIFT 0x8
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__LPIB_SNAPSHOT_LOCK_MASK 0x00000001L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_SNAPSHOT_CONTROL__CYCLIC_BUFFER_WRAP_COUNT_MASK 0x0000FF00L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB__LPIB_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_LPIB_TIMER_SNAPSHOT__LPIB_TIMER_SNAPSHOT_MASK 0xFFFFFFFFL
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT__SHIFT 0x1
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE__SHIFT 0x4
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE__SHIFT 0x5
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_MASK 0x00000001L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__CHANNEL_LAYOUT_MASK 0x00000006L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_ACTIVITY_UR_ENABLE_MASK 0x00000010L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INPUT_STATUS_CONTROL__INPUT_CL_CS_INFOFRAME_CHANGE_UR_ENABLE_MASK 0x00000020L
+//AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT__SHIFT 0x0
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION__SHIFT 0x8
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5__SHIFT 0x10
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID__SHIFT 0x1f
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_COUNT_MASK 0x00000007L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__CHANNEL_ALLOCATION_MASK 0x0000FF00L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_BYTE_5_MASK 0x00FF0000L
+#define AZF0INPUTENDPOINT1_AZALIA_F0_CODEC_INPUT_PIN_CONTROL_INFOFRAME__INFOFRAME_VALID_MASK 0x80000000L
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_offset.h
index 312c50ea30f3..f268d33c4744 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_offset.h
@@ -436,6 +436,8 @@
#define regPHYESYMCLK_CLOCK_CNTL_BASE_IDX 2
#define regDCCG_GATE_DISABLE_CNTL3 0x005a
#define regDCCG_GATE_DISABLE_CNTL3_BASE_IDX 2
+#define regHDMISTREAMCLK0_DTO_PARAM 0x005b
+#define regHDMISTREAMCLK0_DTO_PARAM_BASE_IDX 2
#define regDCCG_AUDIO_DTBCLK_DTO_PHASE 0x0061
#define regDCCG_AUDIO_DTBCLK_DTO_PHASE_BASE_IDX 2
#define regDCCG_AUDIO_DTBCLK_DTO_MODULO 0x0062
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_sh_mask.h
index a9d553ef26c0..1f21f313bd1d 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_3_1_2_sh_mask.h
@@ -1438,6 +1438,14 @@
#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_LE0_GATE_DISABLE_MASK 0x00200000L
#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_ROOT_LE1_GATE_DISABLE_MASK 0x00400000L
#define DCCG_GATE_DISABLE_CNTL3__SYMCLK32_LE1_GATE_DISABLE_MASK 0x00800000L
+//HDMISTREAMCLK0_DTO_PARAM
+#define HDMISTREAMCLK0_DTO_PARAM__HDMISTREAMCLK0_DTO_PHASE__SHIFT 0x0
+#define HDMISTREAMCLK0_DTO_PARAM__HDMISTREAMCLK0_DTO_MODULO__SHIFT 0x8
+#define HDMISTREAMCLK0_DTO_PARAM__HDMISTREAMCLK0_DTO_EN__SHIFT 0x10
+#define HDMISTREAMCLK0_DTO_PARAM__HDMISTREAMCLK0_DTO_PHASE_MASK 0x000000FFL
+#define HDMISTREAMCLK0_DTO_PARAM__HDMISTREAMCLK0_DTO_MODULO_MASK 0x0000FF00L
+#define HDMISTREAMCLK0_DTO_PARAM__HDMISTREAMCLK0_DTO_EN_MASK 0x00010000L
+
//DCCG_AUDIO_DTBCLK_DTO_PHASE
#define DCCG_AUDIO_DTBCLK_DTO_PHASE__DCCG_AUDIO_DTBCLK_DTO_PHASE__SHIFT 0x0
#define DCCG_AUDIO_DTBCLK_DTO_PHASE__DCCG_AUDIO_DTBCLK_DTO_PHASE_MASK 0xFFFFFFFFL
diff --git a/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h
index bd37aa6b6560..b4b2584bbd66 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_offset.h
@@ -77,4 +77,9 @@
#define smnDF_CS_UMC_AON0_DramBaseAddress0 0x1c110UL
#define smnDF_CS_UMC_AON0_DramLimitAddress0 0x1c114UL
+#define mmDF_CS_UMC_AON0_HardwareAssertMaskLow 0x067e
+#define mmDF_CS_UMC_AON0_HardwareAssertMaskLow_BASE_IDX 0
+#define mmDF_NCS_PG0_HardwareAssertMaskHigh 0x067f
+#define mmDF_NCS_PG0_HardwareAssertMaskHigh_BASE_IDX 0
+
#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h
index f804e13b002e..f45ec6f97ff2 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/df/df_3_6_sh_mask.h
@@ -62,4 +62,136 @@
#define DF_CS_UMC_AON0_DramLimitAddress0__AllowReqIO_MASK 0x00000400L
#define DF_CS_UMC_AON0_DramLimitAddress0__DramLimitAddr_MASK 0xFFFFF000L
+//DF_CS_UMC_AON0_HardwareAssertMaskLow
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk0__SHIFT 0x0
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk1__SHIFT 0x1
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk2__SHIFT 0x2
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk3__SHIFT 0x3
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk4__SHIFT 0x4
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk5__SHIFT 0x5
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk6__SHIFT 0x6
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk7__SHIFT 0x7
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk8__SHIFT 0x8
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk9__SHIFT 0x9
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk10__SHIFT 0xa
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk11__SHIFT 0xb
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk12__SHIFT 0xc
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk13__SHIFT 0xd
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk14__SHIFT 0xe
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk15__SHIFT 0xf
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk16__SHIFT 0x10
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk17__SHIFT 0x11
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk18__SHIFT 0x12
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk19__SHIFT 0x13
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk20__SHIFT 0x14
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk21__SHIFT 0x15
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk22__SHIFT 0x16
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk23__SHIFT 0x17
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk24__SHIFT 0x18
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk25__SHIFT 0x19
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk26__SHIFT 0x1a
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk27__SHIFT 0x1b
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk28__SHIFT 0x1c
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk29__SHIFT 0x1d
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk30__SHIFT 0x1e
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk31__SHIFT 0x1f
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk0_MASK 0x00000001L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk1_MASK 0x00000002L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk2_MASK 0x00000004L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk3_MASK 0x00000008L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk4_MASK 0x00000010L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk5_MASK 0x00000020L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk6_MASK 0x00000040L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk7_MASK 0x00000080L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk8_MASK 0x00000100L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk9_MASK 0x00000200L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk10_MASK 0x00000400L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk11_MASK 0x00000800L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk12_MASK 0x00001000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk13_MASK 0x00002000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk14_MASK 0x00004000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk15_MASK 0x00008000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk16_MASK 0x00010000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk17_MASK 0x00020000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk18_MASK 0x00040000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk19_MASK 0x00080000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk20_MASK 0x00100000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk21_MASK 0x00200000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk22_MASK 0x00400000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk23_MASK 0x00800000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk24_MASK 0x01000000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk25_MASK 0x02000000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk26_MASK 0x04000000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk27_MASK 0x08000000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk28_MASK 0x10000000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk29_MASK 0x20000000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk30_MASK 0x40000000L
+#define DF_CS_UMC_AON0_HardwareAssertMaskLow__HWAssertMsk31_MASK 0x80000000L
+
+//DF_NCS_PG0_HardwareAssertMaskHigh
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk0__SHIFT 0x0
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk1__SHIFT 0x1
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk2__SHIFT 0x2
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk3__SHIFT 0x3
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk4__SHIFT 0x4
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk5__SHIFT 0x5
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk6__SHIFT 0x6
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk7__SHIFT 0x7
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk8__SHIFT 0x8
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk9__SHIFT 0x9
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk10__SHIFT 0xa
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk11__SHIFT 0xb
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk12__SHIFT 0xc
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk13__SHIFT 0xd
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk14__SHIFT 0xe
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk15__SHIFT 0xf
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk16__SHIFT 0x10
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk17__SHIFT 0x11
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk18__SHIFT 0x12
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk19__SHIFT 0x13
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk20__SHIFT 0x14
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk21__SHIFT 0x15
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk22__SHIFT 0x16
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk23__SHIFT 0x17
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk24__SHIFT 0x18
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk25__SHIFT 0x19
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk26__SHIFT 0x1a
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk27__SHIFT 0x1b
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk28__SHIFT 0x1c
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk29__SHIFT 0x1d
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk30__SHIFT 0x1e
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk31__SHIFT 0x1f
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk0_MASK 0x00000001L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk1_MASK 0x00000002L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk2_MASK 0x00000004L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk3_MASK 0x00000008L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk4_MASK 0x00000010L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk5_MASK 0x00000020L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk6_MASK 0x00000040L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk7_MASK 0x00000080L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk8_MASK 0x00000100L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk9_MASK 0x00000200L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk10_MASK 0x00000400L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk11_MASK 0x00000800L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk12_MASK 0x00001000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk13_MASK 0x00002000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk14_MASK 0x00004000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk15_MASK 0x00008000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk16_MASK 0x00010000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk17_MASK 0x00020000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk18_MASK 0x00040000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk19_MASK 0x00080000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk20_MASK 0x00100000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk21_MASK 0x00200000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk22_MASK 0x00400000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk23_MASK 0x00800000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk24_MASK 0x01000000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk25_MASK 0x02000000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk26_MASK 0x04000000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk27_MASK 0x08000000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk28_MASK 0x10000000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk29_MASK 0x20000000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk30_MASK 0x40000000L
+#define DF_NCS_PG0_HardwareAssertMaskHigh__HWAssertMsk31_MASK 0x80000000L
+
#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_0_3_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_0_3_offset.h
new file mode 100755
index 000000000000..3c2f270fb3bb
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_0_3_offset.h
@@ -0,0 +1,151 @@
+/*
+ * Copyright (C) 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _dpcs_2_0_3_OFFSET_HEADER
+#define _dpcs_2_0_3_OFFSET_HEADER
+// addressBlock: dpcssysa_dpcs0_dpcstx0_dispdec
+// base address: 0x0
+#define mmDPCSTX0_DPCSTX_TX_CLOCK_CNTL 0x2928
+#define mmDPCSTX0_DPCSTX_TX_CLOCK_CNTL_BASE_IDX 2
+#define mmDPCSTX0_DPCSTX_TX_CNTL 0x2929
+#define mmDPCSTX0_DPCSTX_TX_CNTL_BASE_IDX 2
+#define mmDPCSTX0_DPCSTX_CBUS_CNTL 0x292a
+#define mmDPCSTX0_DPCSTX_CBUS_CNTL_BASE_IDX 2
+#define mmDPCSTX0_DPCSTX_INTERRUPT_CNTL 0x292b
+#define mmDPCSTX0_DPCSTX_INTERRUPT_CNTL_BASE_IDX 2
+#define mmDPCSTX0_DPCSTX_PLL_UPDATE_ADDR 0x292c
+#define mmDPCSTX0_DPCSTX_PLL_UPDATE_ADDR_BASE_IDX 2
+#define mmDPCSTX0_DPCSTX_PLL_UPDATE_DATA 0x292d
+#define mmDPCSTX0_DPCSTX_PLL_UPDATE_DATA_BASE_IDX 2
+
+
+// addressBlock: dpcssysa_dpcs0_rdpcstx0_dispdec
+// base address: 0x0
+#define mmRDPCSTX0_RDPCSTX_CNTL 0x2930
+#define mmRDPCSTX0_RDPCSTX_CNTL_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_CLOCK_CNTL 0x2931
+#define mmRDPCSTX0_RDPCSTX_CLOCK_CNTL_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_INTERRUPT_CONTROL 0x2932
+#define mmRDPCSTX0_RDPCSTX_INTERRUPT_CONTROL_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PLL_UPDATE_DATA 0x2933
+#define mmRDPCSTX0_RDPCSTX_PLL_UPDATE_DATA_BASE_IDX 2
+#define mmRDPCSTX0_RDPCS_TX_CR_ADDR 0x2934
+#define mmRDPCSTX0_RDPCS_TX_CR_ADDR_BASE_IDX 2
+#define mmRDPCSTX0_RDPCS_TX_CR_DATA 0x2935
+#define mmRDPCSTX0_RDPCS_TX_CR_DATA_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_SCRATCH 0x2939
+#define mmRDPCSTX0_RDPCSTX_SCRATCH_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL0 0x2940
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL0_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL1 0x2941
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL1_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL2 0x2942
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL2_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL3 0x2943
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL3_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL4 0x2944
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL4_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL5 0x2945
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL5_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL6 0x2946
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL6_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL7 0x2947
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL7_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL8 0x2948
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL8_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL9 0x2949
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL9_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL10 0x294a
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL10_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL11 0x294b
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL11_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL12 0x294c
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL12_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL13 0x294d
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL13_BASE_IDX 2
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL14 0x294e
+#define mmRDPCSTX0_RDPCSTX_PHY_CNTL14_BASE_IDX 2
+
+
+// addressBlock: dpcssysa_dpcs0_dpcstx1_dispdec
+// base address: 0x360
+#define mmDPCSTX1_DPCSTX_TX_CLOCK_CNTL 0x2a00
+#define mmDPCSTX1_DPCSTX_TX_CLOCK_CNTL_BASE_IDX 2
+#define mmDPCSTX1_DPCSTX_TX_CNTL 0x2a01
+#define mmDPCSTX1_DPCSTX_TX_CNTL_BASE_IDX 2
+#define mmDPCSTX1_DPCSTX_CBUS_CNTL 0x2a02
+#define mmDPCSTX1_DPCSTX_CBUS_CNTL_BASE_IDX 2
+#define mmDPCSTX1_DPCSTX_INTERRUPT_CNTL 0x2a03
+#define mmDPCSTX1_DPCSTX_INTERRUPT_CNTL_BASE_IDX 2
+#define mmDPCSTX1_DPCSTX_PLL_UPDATE_ADDR 0x2a04
+#define mmDPCSTX1_DPCSTX_PLL_UPDATE_ADDR_BASE_IDX 2
+#define mmDPCSTX1_DPCSTX_PLL_UPDATE_DATA 0x2a05
+#define mmDPCSTX1_DPCSTX_PLL_UPDATE_DATA_BASE_IDX 2
+
+
+// addressBlock: dpcssysa_dpcs0_rdpcstx1_dispdec
+// base address: 0x360
+#define mmRDPCSTX1_RDPCSTX_CNTL 0x2a08
+#define mmRDPCSTX1_RDPCSTX_CNTL_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_CLOCK_CNTL 0x2a09
+#define mmRDPCSTX1_RDPCSTX_CLOCK_CNTL_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_INTERRUPT_CONTROL 0x2a0a
+#define mmRDPCSTX1_RDPCSTX_INTERRUPT_CONTROL_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PLL_UPDATE_DATA 0x2a0b
+#define mmRDPCSTX1_RDPCSTX_PLL_UPDATE_DATA_BASE_IDX 2
+#define mmRDPCSTX1_RDPCS_TX_CR_ADDR 0x2a0c
+#define mmRDPCSTX1_RDPCS_TX_CR_ADDR_BASE_IDX 2
+#define mmRDPCSTX1_RDPCS_TX_CR_DATA 0x2a0d
+#define mmRDPCSTX1_RDPCS_TX_CR_DATA_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_SCRATCH 0x2a11
+#define mmRDPCSTX1_RDPCSTX_SCRATCH_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL0 0x2a18
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL0_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL1 0x2a19
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL1_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL2 0x2a1a
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL2_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL3 0x2a1b
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL3_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL4 0x2a1c
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL4_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL5 0x2a1d
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL5_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL6 0x2a1e
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL6_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL7 0x2a1f
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL7_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL8 0x2a20
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL8_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL9 0x2a21
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL9_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL10 0x2a22
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL10_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL11 0x2a23
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL11_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL12 0x2a24
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL12_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL13 0x2a25
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL13_BASE_IDX 2
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL14 0x2a26
+#define mmRDPCSTX1_RDPCSTX_PHY_CNTL14_BASE_IDX 2
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_0_3_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_0_3_sh_mask.h
new file mode 100755
index 000000000000..a6d076530117
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/dpcs/dpcs_2_0_3_sh_mask.h
@@ -0,0 +1,952 @@
+/*
+ * Copyright (C) 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _dpcs_2_0_3_SH_MASK_HEADER
+#define _dpcs_2_0_3_SH_MASK_HEADER
+// addressBlock: dpcssysa_dpcs0_dpcstx0_dispdec
+//DPCSTX0_DPCSTX_TX_CLOCK_CNTL
+#define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS__SHIFT 0x0
+#define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN__SHIFT 0x1
+#define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON__SHIFT 0x2
+#define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0x3
+#define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS_MASK 0x00000001L
+#define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN_MASK 0x00000002L
+#define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON_MASK 0x00000004L
+#define DPCSTX0_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000008L
+//DPCSTX0_DPCSTX_TX_CNTL
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ__SHIFT 0xc
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING__SHIFT 0xd
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP__SHIFT 0xe
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT__SHIFT 0xf
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN__SHIFT 0x10
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START__SHIFT 0x11
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET__SHIFT 0x1f
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ_MASK 0x00001000L
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING_MASK 0x00002000L
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP_MASK 0x00004000L
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT_MASK 0x00008000L
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN_MASK 0x00010000L
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START_MASK 0x00020000L
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L
+#define DPCSTX0_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET_MASK 0x80000000L
+//DPCSTX0_DPCSTX_CBUS_CNTL
+#define DPCSTX0_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY__SHIFT 0x0
+#define DPCSTX0_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET__SHIFT 0x1f
+#define DPCSTX0_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY_MASK 0x000000FFL
+#define DPCSTX0_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET_MASK 0x80000000L
+//DPCSTX0_DPCSTX_INTERRUPT_CNTL
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW__SHIFT 0x0
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR__SHIFT 0x1
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK__SHIFT 0x4
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR__SHIFT 0x8
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR__SHIFT 0x9
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR__SHIFT 0xa
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR__SHIFT 0xb
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR__SHIFT 0xc
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK__SHIFT 0x10
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK__SHIFT 0x14
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR_MASK 0x00000002L
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK_MASK 0x00000010L
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR_MASK 0x00000100L
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR_MASK 0x00000200L
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR_MASK 0x00000400L
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR_MASK 0x00000800L
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR_MASK 0x00001000L
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK_MASK 0x00010000L
+#define DPCSTX0_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK_MASK 0x00100000L
+//DPCSTX0_DPCSTX_PLL_UPDATE_ADDR
+#define DPCSTX0_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR__SHIFT 0x0
+#define DPCSTX0_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR_MASK 0x0003FFFFL
+//DPCSTX0_DPCSTX_PLL_UPDATE_DATA
+#define DPCSTX0_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA__SHIFT 0x0
+#define DPCSTX0_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA_MASK 0xFFFFFFFFL
+//DPCSTX0_DPCSTX_DEBUG_CONFIG
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN__SHIFT 0x0
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL__SHIFT 0x1
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL__SHIFT 0x4
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL__SHIFT 0x8
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS__SHIFT 0xe
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN__SHIFT 0x10
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_INDEX__SHIFT 0x18
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_EN_MASK 0x00000001L
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CFGCLK_SEL_MASK 0x0000000EL
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_SEL_MASK 0x00000070L
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_TX_SYMCLK_DIV2_SEL_MASK 0x00000700L
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_DBG_CBUS_DIS_MASK 0x00004000L
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_WRITE_EN_MASK 0x00010000L
+#define DPCSTX0_DPCSTX_DEBUG_CONFIG__DPCS_TEST_DEBUG_INDEX_MASK 0xFF000000L
+//DPCSTX0_DPCSTX_TEST_DEBUG_DATA
+#define DPCSTX0_DPCSTX_TEST_DEBUG_DATA__DPCS_TEST_DEBUG_DATA__SHIFT 0x0
+#define DPCSTX0_DPCSTX_TEST_DEBUG_DATA__DPCS_TEST_DEBUG_DATA_MASK 0xFFFFFFFFL
+
+
+// addressBlock: dpcssysa_dpcs0_rdpcstx0_dispdec
+//RDPCSTX0_RDPCSTX_CNTL
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN__SHIFT 0xd
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN__SHIFT 0xe
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN__SHIFT 0xf
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN__SHIFT 0x10
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_START__SHIFT 0x11
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET__SHIFT 0x1f
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET_MASK 0x00000001L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET_MASK 0x00000010L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN_MASK 0x00001000L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN_MASK 0x00002000L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN_MASK 0x00004000L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN_MASK 0x00008000L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN_MASK 0x00010000L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_START_MASK 0x00020000L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L
+#define RDPCSTX0_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET_MASK 0x80000000L
+//RDPCSTX0_RDPCSTX_CLOCK_CNTL
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN__SHIFT 0x5
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN__SHIFT 0x6
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN__SHIFT 0x7
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN__SHIFT 0x9
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0xa
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN__SHIFT 0xd
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON__SHIFT 0xe
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS__SHIFT 0x10
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN_MASK 0x00000001L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN_MASK 0x00000010L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN_MASK 0x00000020L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN_MASK 0x00000040L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN_MASK 0x00000080L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS_MASK 0x00000100L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN_MASK 0x00000200L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000400L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS_MASK 0x00001000L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN_MASK 0x00002000L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON_MASK 0x00004000L
+#define RDPCSTX0_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS_MASK 0x00010000L
+//RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE__SHIFT 0x1
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE__SHIFT 0x2
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR__SHIFT 0x5
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR__SHIFT 0x6
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR__SHIFT 0x7
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR__SHIFT 0x9
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR__SHIFT 0xa
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK__SHIFT 0x10
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK__SHIFT 0x11
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK__SHIFT 0x12
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK__SHIFT 0x14
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK 0x00000002L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK 0x00000004L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR_MASK 0x00000010L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR_MASK 0x00000020L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR_MASK 0x00000040L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR_MASK 0x00000080L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR_MASK 0x00000100L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR_MASK 0x00000200L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR_MASK 0x00000400L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR_MASK 0x00001000L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK_MASK 0x00010000L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK_MASK 0x00020000L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK_MASK 0x00040000L
+#define RDPCSTX0_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK_MASK 0x00100000L
+//RDPCSTX0_RDPCSTX_PLL_UPDATE_DATA
+#define RDPCSTX0_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA_MASK 0x00000001L
+//RDPCSTX0_RDPCS_TX_CR_ADDR
+#define RDPCSTX0_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0
+#define RDPCSTX0_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL
+//RDPCSTX0_RDPCS_TX_CR_DATA
+#define RDPCSTX0_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0
+#define RDPCSTX0_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL
+//RDPCSTX0_RDPCSTX_SCRATCH
+#define RDPCSTX0_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH_MASK 0xFFFFFFFFL
+//RDPCSTX0_RDPCSTX_PHY_CNTL0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET__SHIFT 0x1
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N__SHIFT 0x2
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN__SHIFT 0x3
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE__SHIFT 0x9
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL__SHIFT 0xe
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ__SHIFT 0x11
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK__SHIFT 0x12
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL__SHIFT 0x14
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL__SHIFT 0x15
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN__SHIFT 0x18
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT__SHIFT 0x19
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE__SHIFT 0x1c
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE__SHIFT 0x1d
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS__SHIFT 0x1f
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET_MASK 0x00000001L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET_MASK 0x00000002L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N_MASK 0x00000004L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN_MASK 0x00000008L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT_MASK 0x00000030L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE_MASK 0x00000100L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE_MASK 0x00003E00L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL_MASK 0x0001C000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ_MASK 0x00020000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK_MASK 0x00040000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL_MASK 0x00100000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL_MASK 0x00200000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN_MASK 0x01000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT_MASK 0x02000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE_MASK 0x10000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE_MASK 0x20000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS_MASK 0x80000000L
+//RDPCSTX0_RDPCSTX_PHY_CNTL1
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN__SHIFT 0x1
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE__SHIFT 0x2
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN__SHIFT 0x3
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET__SHIFT 0x5
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN__SHIFT 0x6
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE__SHIFT 0x7
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN_MASK 0x00000001L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN_MASK 0x00000002L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE_MASK 0x00000004L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN_MASK 0x00000008L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE_MASK 0x00000010L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET_MASK 0x00000020L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN_MASK 0x00000040L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE_MASK 0x00000080L
+//RDPCSTX0_RDPCSTX_PHY_CNTL2
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DPALT_DP4__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DPALT_DISABLE__SHIFT 0x1
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DPALT_DISABLE_ACK__SHIFT 0x2
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR__SHIFT 0x3
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN__SHIFT 0x5
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN__SHIFT 0x6
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN__SHIFT 0x7
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN__SHIFT 0x9
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN__SHIFT 0xa
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN__SHIFT 0xb
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DPALT_DP4_MASK 0x00000001L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DPALT_DISABLE_MASK 0x00000002L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DPALT_DISABLE_ACK_MASK 0x00000004L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR_MASK 0x00000008L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN_MASK 0x00000010L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN_MASK 0x00000020L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN_MASK 0x00000040L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN_MASK 0x00000080L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN_MASK 0x00000100L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN_MASK 0x00000200L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN_MASK 0x00000400L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN_MASK 0x00000800L
+//RDPCSTX0_RDPCSTX_PHY_CNTL3
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE__SHIFT 0x1
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY__SHIFT 0x2
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN__SHIFT 0x3
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK__SHIFT 0x5
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE__SHIFT 0x9
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY__SHIFT 0xa
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN__SHIFT 0xb
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK__SHIFT 0xd
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET__SHIFT 0x10
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE__SHIFT 0x11
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY__SHIFT 0x12
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN__SHIFT 0x13
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ__SHIFT 0x14
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK__SHIFT 0x15
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET__SHIFT 0x18
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE__SHIFT 0x19
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY__SHIFT 0x1a
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN__SHIFT 0x1b
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ__SHIFT 0x1c
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK__SHIFT 0x1d
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_MASK 0x00000001L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_MASK 0x00000002L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_MASK 0x00000004L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_MASK 0x00000008L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_MASK 0x00000010L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_MASK 0x00000020L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_MASK 0x00000100L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_MASK 0x00000200L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_MASK 0x00000400L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_MASK 0x00000800L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_MASK 0x00001000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_MASK 0x00002000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_MASK 0x00010000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_MASK 0x00020000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_MASK 0x00040000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_MASK 0x00080000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_MASK 0x00100000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_MASK 0x00200000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_MASK 0x01000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_MASK 0x02000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_MASK 0x04000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_MASK 0x08000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_MASK 0x10000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_MASK 0x20000000L
+//RDPCSTX0_RDPCSTX_PHY_CNTL4
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC__SHIFT 0x6
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN__SHIFT 0x7
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC__SHIFT 0xe
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN__SHIFT 0xf
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL__SHIFT 0x10
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT__SHIFT 0x14
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC__SHIFT 0x16
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN__SHIFT 0x17
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL__SHIFT 0x18
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT__SHIFT 0x1c
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC__SHIFT 0x1e
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN__SHIFT 0x1f
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL_MASK 0x00000007L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT_MASK 0x00000010L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC_MASK 0x00000040L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN_MASK 0x00000080L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL_MASK 0x00000700L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT_MASK 0x00001000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC_MASK 0x00004000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN_MASK 0x00008000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL_MASK 0x00070000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT_MASK 0x00100000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC_MASK 0x00400000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN_MASK 0x00800000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL_MASK 0x07000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT_MASK 0x10000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC_MASK 0x40000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN_MASK 0x80000000L
+//RDPCSTX0_RDPCSTX_PHY_CNTL5
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_PSTATE__SHIFT 0x2
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE__SHIFT 0x5
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_MPLL_EN__SHIFT 0xa
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ__SHIFT 0xb
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_PSTATE__SHIFT 0x12
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD__SHIFT 0x14
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE__SHIFT 0x15
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH__SHIFT 0x18
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_MPLL_EN__SHIFT 0x1a
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ__SHIFT 0x1b
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT__SHIFT 0x1c
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_PSTATE_MASK 0x0000000CL
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD_MASK 0x00000010L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE_MASK 0x000000E0L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH_MASK 0x00000300L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_MPLL_EN_MASK 0x00000400L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ_MASK 0x00000800L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT_MASK 0x00001000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_PSTATE_MASK 0x000C0000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD_MASK 0x00100000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE_MASK 0x00E00000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH_MASK 0x03000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_MPLL_EN_MASK 0x04000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ_MASK 0x08000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT_MASK 0x10000000L
+//RDPCSTX0_RDPCSTX_PHY_CNTL6
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE__SHIFT 0x2
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_LPD__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_RATE__SHIFT 0x5
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_WIDTH__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN__SHIFT 0xa
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_DETRX_REQ__SHIFT 0xb
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_DETRX_RESULT__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE__SHIFT 0x12
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_LPD__SHIFT 0x14
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_RATE__SHIFT 0x15
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_WIDTH__SHIFT 0x18
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN__SHIFT 0x1a
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_DETRX_REQ__SHIFT 0x1b
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_DETRX_RESULT__SHIFT 0x1c
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_MASK 0x0000000CL
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_LPD_MASK 0x00000010L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_RATE_MASK 0x000000E0L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_WIDTH_MASK 0x00000300L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_MASK 0x00000400L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_DETRX_REQ_MASK 0x00000800L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_DETRX_RESULT_MASK 0x00001000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_MASK 0x000C0000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_LPD_MASK 0x00100000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_RATE_MASK 0x00E00000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_WIDTH_MASK 0x03000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_MASK 0x04000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_DETRX_REQ_MASK 0x08000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_DETRX_RESULT_MASK 0x10000000L
+//RDPCSTX0_RDPCSTX_PHY_CNTL7
+#define RDPCSTX0_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT__SHIFT 0x10
+#define RDPCSTX0_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN_MASK 0x0000FFFFL
+#define RDPCSTX0_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT_MASK 0xFFFF0000L
+//RDPCSTX0_RDPCSTX_PHY_CNTL8
+#define RDPCSTX0_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK_MASK 0x000FFFFFL
+//RDPCSTX0_RDPCSTX_PHY_CNTL9
+#define RDPCSTX0_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD__SHIFT 0x18
+#define RDPCSTX0_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE_MASK 0x001FFFFFL
+#define RDPCSTX0_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD_MASK 0x01000000L
+//RDPCSTX0_RDPCSTX_PHY_CNTL10
+#define RDPCSTX0_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM_MASK 0x0000FFFFL
+//RDPCSTX0_RDPCSTX_PHY_CNTL11
+#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_EN__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_REQ__SHIFT 0x1
+#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV__SHIFT 0x10
+#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV__SHIFT 0x14
+#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV__SHIFT 0x18
+#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_EN_MASK 0x00000001L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_REQ_MASK 0x00000002L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER_MASK 0x0000FFF0L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV_MASK 0x00070000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV_MASK 0x00700000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV_MASK 0x03000000L
+//RDPCSTX0_RDPCSTX_PHY_CNTL12
+#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN__SHIFT 0x2
+#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV__SHIFT 0x4
+#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE__SHIFT 0x7
+#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN_MASK 0x00000001L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN_MASK 0x00000004L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV_MASK 0x00000070L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE_MASK 0x00000080L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN_MASK 0x00000100L
+//RDPCSTX0_RDPCSTX_PHY_CNTL13
+#define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER__SHIFT 0x14
+#define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN__SHIFT 0x1c
+#define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN__SHIFT 0x1d
+#define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE__SHIFT 0x1e
+#define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER_MASK 0x0FF00000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN_MASK 0x10000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN_MASK 0x20000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE_MASK 0x40000000L
+//RDPCSTX0_RDPCSTX_PHY_CNTL14
+#define RDPCSTX0_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN__SHIFT 0x18
+#define RDPCSTX0_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN__SHIFT 0x1c
+#define RDPCSTX0_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE_MASK 0x00000001L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN_MASK 0x01000000L
+#define RDPCSTX0_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN_MASK 0x10000000L
+//RDPCSTX0_RDPCSTX_PHY_FUSE0
+#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE__SHIFT 0x6
+#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I__SHIFT 0x12
+#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO__SHIFT 0x14
+#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_TX0_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_V2I_MASK 0x000C0000L
+#define RDPCSTX0_RDPCSTX_PHY_FUSE0__RDPCS_PHY_DP_MPLLB_FREQ_VCO_MASK 0x00300000L
+//RDPCSTX0_RDPCSTX_PHY_FUSE1
+#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE__SHIFT 0x6
+#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT__SHIFT 0x12
+#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP__SHIFT 0x19
+#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_TX1_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_INT_MASK 0x01FC0000L
+#define RDPCSTX0_RDPCSTX_PHY_FUSE1__RDPCS_PHY_DP_MPLLB_CP_PROP_MASK 0xFE000000L
+//RDPCSTX0_RDPCSTX_PHY_FUSE2
+#define RDPCSTX0_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE__SHIFT 0x6
+#define RDPCSTX0_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX0_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX0_RDPCSTX_PHY_FUSE2__RDPCS_PHY_DP_TX2_EQ_POST_MASK 0x0003F000L
+//RDPCSTX0_RDPCSTX_PHY_FUSE3
+#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE__SHIFT 0x6
+#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST__SHIFT 0xc
+#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE__SHIFT 0x12
+#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE__SHIFT 0x18
+#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_MAIN_MASK 0x0000003FL
+#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_PRE_MASK 0x00000FC0L
+#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DP_TX3_EQ_POST_MASK 0x0003F000L
+#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_FINETUNE_MASK 0x00FC0000L
+#define RDPCSTX0_RDPCSTX_PHY_FUSE3__RDPCS_PHY_DCO_RANGE_MASK 0x03000000L
+//RDPCSTX0_RDPCSTX_PHY_RX_LD_VAL
+#define RDPCSTX0_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL__SHIFT 0x0
+#define RDPCSTX0_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL__SHIFT 0x8
+#define RDPCSTX0_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_REF_LD_VAL_MASK 0x0000007FL
+#define RDPCSTX0_RDPCSTX_PHY_RX_LD_VAL__RDPCS_PHY_RX_VCO_LD_VAL_MASK 0x001FFF00L
+
+
+// addressBlock: dpcssysa_dpcssys_cr0_dispdec
+//DPCSSYS_CR0_DPCSSYS_CR_ADDR
+#define DPCSSYS_CR0_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0
+#define DPCSSYS_CR0_DPCSSYS_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL
+//DPCSSYS_CR0_DPCSSYS_CR_DATA
+#define DPCSSYS_CR0_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0
+#define DPCSSYS_CR0_DPCSSYS_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL
+
+
+// addressBlock: dpcssysa_dpcs0_dpcstx1_dispdec
+//DPCSTX1_DPCSTX_TX_CLOCK_CNTL
+#define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS__SHIFT 0x0
+#define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN__SHIFT 0x1
+#define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON__SHIFT 0x2
+#define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0x3
+#define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_GATE_DIS_MASK 0x00000001L
+#define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_EN_MASK 0x00000002L
+#define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_CLOCK_ON_MASK 0x00000004L
+#define DPCSTX1_DPCSTX_TX_CLOCK_CNTL__DPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000008L
+//DPCSTX1_DPCSTX_TX_CNTL
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ__SHIFT 0xc
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING__SHIFT 0xd
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP__SHIFT 0xe
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT__SHIFT 0xf
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN__SHIFT 0x10
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START__SHIFT 0x11
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET__SHIFT 0x1f
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_REQ_MASK 0x00001000L
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_PLL_UPDATE_PENDING_MASK 0x00002000L
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_DATA_SWAP_MASK 0x00004000L
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_DATA_ORDER_INVERT_MASK 0x00008000L
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_FIFO_EN_MASK 0x00010000L
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_FIFO_START_MASK 0x00020000L
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L
+#define DPCSTX1_DPCSTX_TX_CNTL__DPCS_TX_SOFT_RESET_MASK 0x80000000L
+//DPCSTX1_DPCSTX_CBUS_CNTL
+#define DPCSTX1_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY__SHIFT 0x0
+#define DPCSTX1_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET__SHIFT 0x1f
+#define DPCSTX1_DPCSTX_CBUS_CNTL__DPCS_CBUS_WR_CMD_DELAY_MASK 0x000000FFL
+#define DPCSTX1_DPCSTX_CBUS_CNTL__DPCS_CBUS_SOFT_RESET_MASK 0x80000000L
+//DPCSTX1_DPCSTX_INTERRUPT_CNTL
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW__SHIFT 0x0
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR__SHIFT 0x1
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK__SHIFT 0x4
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR__SHIFT 0x8
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR__SHIFT 0x9
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR__SHIFT 0xa
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR__SHIFT 0xb
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR__SHIFT 0xc
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK__SHIFT 0x10
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK__SHIFT 0x14
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_REG_ERROR_CLR_MASK 0x00000002L
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_REG_FIFO_ERROR_MASK_MASK 0x00000010L
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX0_FIFO_ERROR_MASK 0x00000100L
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX1_FIFO_ERROR_MASK 0x00000200L
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX2_FIFO_ERROR_MASK 0x00000400L
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX3_FIFO_ERROR_MASK 0x00000800L
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX_ERROR_CLR_MASK 0x00001000L
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_TX_FIFO_ERROR_MASK_MASK 0x00010000L
+#define DPCSTX1_DPCSTX_INTERRUPT_CNTL__DPCS_INTERRUPT_MASK_MASK 0x00100000L
+//DPCSTX1_DPCSTX_PLL_UPDATE_ADDR
+#define DPCSTX1_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR__SHIFT 0x0
+#define DPCSTX1_DPCSTX_PLL_UPDATE_ADDR__DPCS_PLL_UPDATE_ADDR_MASK 0x0003FFFFL
+//DPCSTX1_DPCSTX_PLL_UPDATE_DATA
+#define DPCSTX1_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA__SHIFT 0x0
+#define DPCSTX1_DPCSTX_PLL_UPDATE_DATA__DPCS_PLL_UPDATE_DATA_MASK 0xFFFFFFFFL
+// addressBlock: dpcssysa_dpcs0_rdpcstx1_dispdec
+//RDPCSTX1_RDPCSTX_CNTL
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN__SHIFT 0xc
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN__SHIFT 0xd
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN__SHIFT 0xe
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN__SHIFT 0xf
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN__SHIFT 0x10
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_START__SHIFT 0x11
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY__SHIFT 0x14
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET__SHIFT 0x1f
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_CBUS_SOFT_RESET_MASK 0x00000001L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_SRAM_SOFT_RESET_MASK 0x00000010L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE0_EN_MASK 0x00001000L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE1_EN_MASK 0x00002000L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE2_EN_MASK 0x00004000L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_LANE3_EN_MASK 0x00008000L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_EN_MASK 0x00010000L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_START_MASK 0x00020000L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_FIFO_RD_START_DELAY_MASK 0x00F00000L
+#define RDPCSTX1_RDPCSTX_CNTL__RDPCS_TX_SOFT_RESET_MASK 0x80000000L
+//RDPCSTX1_RDPCSTX_CLOCK_CNTL
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN__SHIFT 0x5
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN__SHIFT 0x6
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN__SHIFT 0x7
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN__SHIFT 0x9
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON__SHIFT 0xa
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS__SHIFT 0xc
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN__SHIFT 0xd
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON__SHIFT 0xe
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS__SHIFT 0x10
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_EXT_REFCLK_EN_MASK 0x00000001L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX0_EN_MASK 0x00000010L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX1_EN_MASK 0x00000020L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX2_EN_MASK 0x00000040L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_TX3_EN_MASK 0x00000080L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_GATE_DIS_MASK 0x00000100L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_EN_MASK 0x00000200L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SYMCLK_DIV2_CLOCK_ON_MASK 0x00000400L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_GATE_DIS_MASK 0x00001000L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_EN_MASK 0x00002000L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_CLOCK_ON_MASK 0x00004000L
+#define RDPCSTX1_RDPCSTX_CLOCK_CNTL__RDPCS_SRAMCLK_BYPASS_MASK 0x00010000L
+//RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE__SHIFT 0x1
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE__SHIFT 0x2
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR__SHIFT 0x5
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR__SHIFT 0x6
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR__SHIFT 0x7
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR__SHIFT 0x9
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR__SHIFT 0xa
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR__SHIFT 0xc
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK__SHIFT 0x10
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK__SHIFT 0x11
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK__SHIFT 0x12
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK__SHIFT 0x14
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_OVERFLOW_MASK 0x00000001L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK 0x00000002L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK 0x00000004L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX0_FIFO_ERROR_MASK 0x00000010L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX1_FIFO_ERROR_MASK 0x00000020L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX2_FIFO_ERROR_MASK 0x00000040L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX3_FIFO_ERROR_MASK 0x00000080L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_ERROR_CLR_MASK 0x00000100L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_CLR_MASK 0x00000200L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_CLR_MASK 0x00000400L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_ERROR_CLR_MASK 0x00001000L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_REG_FIFO_ERROR_MASK_MASK 0x00010000L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_DISABLE_TOGGLE_MASK_MASK 0x00020000L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_DPALT_4LANE_TOGGLE_MASK_MASK 0x00040000L
+#define RDPCSTX1_RDPCSTX_INTERRUPT_CONTROL__RDPCS_TX_FIFO_ERROR_MASK_MASK 0x00100000L
+//RDPCSTX1_RDPCSTX_PLL_UPDATE_DATA
+#define RDPCSTX1_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PLL_UPDATE_DATA__RDPCS_PLL_UPDATE_DATA_MASK 0x00000001L
+//RDPCSTX1_RDPCS_TX_CR_ADDR
+#define RDPCSTX1_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR__SHIFT 0x0
+#define RDPCSTX1_RDPCS_TX_CR_ADDR__RDPCS_TX_CR_ADDR_MASK 0x0000FFFFL
+//RDPCSTX1_RDPCS_TX_CR_DATA
+#define RDPCSTX1_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA__SHIFT 0x0
+#define RDPCSTX1_RDPCS_TX_CR_DATA__RDPCS_TX_CR_DATA_MASK 0x0000FFFFL
+//RDPCSTX1_RDPCS_TX_SRAM_CNTL
+#define RDPCSTX1_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS__SHIFT 0x14
+#define RDPCSTX1_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE__SHIFT 0x18
+#define RDPCSTX1_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE__SHIFT 0x1c
+#define RDPCSTX1_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_DIS_MASK 0x00100000L
+#define RDPCSTX1_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_FORCE_MASK 0x03000000L
+#define RDPCSTX1_RDPCS_TX_SRAM_CNTL__RDPCS_MEM_PWR_PWR_STATE_MASK 0x30000000L
+//RDPCSTX1_RDPCSTX_SCRATCH
+#define RDPCSTX1_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_SCRATCH__RDPCSTX_SCRATCH_MASK 0xFFFFFFFFL
+//RDPCSTX1_RDPCSTX_PHY_CNTL0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET__SHIFT 0x1
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N__SHIFT 0x2
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN__SHIFT 0x3
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE__SHIFT 0x9
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL__SHIFT 0xe
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ__SHIFT 0x11
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK__SHIFT 0x12
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL__SHIFT 0x14
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL__SHIFT 0x15
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN__SHIFT 0x18
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT__SHIFT 0x19
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE__SHIFT 0x1c
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE__SHIFT 0x1d
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS__SHIFT 0x1f
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RESET_MASK 0x00000001L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_PHY_RESET_MASK 0x00000002L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TCA_APB_RESET_N_MASK 0x00000004L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TEST_POWERDOWN_MASK 0x00000008L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_DTB_OUT_MASK 0x00000030L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_HDMIMODE_ENABLE_MASK 0x00000100L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_RANGE_MASK 0x00003E00L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_TX_VBOOST_LVL_MASK 0x0001C000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_REQ_MASK 0x00020000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_RTUNE_ACK_MASK 0x00040000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_PARA_SEL_MASK 0x00100000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_CR_MUX_SEL_MASK 0x00200000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_EN_MASK 0x01000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_PHY_REF_CLKDET_RESULT_MASK 0x02000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_INIT_DONE_MASK 0x10000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_EXT_LD_DONE_MASK 0x20000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL0__RDPCS_SRAM_BYPASS_MASK 0x80000000L
+//RDPCSTX1_RDPCSTX_PHY_CNTL1
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN__SHIFT 0x1
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE__SHIFT 0x2
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN__SHIFT 0x3
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET__SHIFT 0x5
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN__SHIFT 0x6
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE__SHIFT 0x7
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PG_MODE_EN_MASK 0x00000001L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_EN_MASK 0x00000002L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PCS_PWR_STABLE_MASK 0x00000004L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_EN_MASK 0x00000008L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_PMA_PWR_STABLE_MASK 0x00000010L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_DP_PG_RESET_MASK 0x00000020L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_EN_MASK 0x00000040L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL1__RDPCS_PHY_ANA_PWR_STABLE_MASK 0x00000080L
+//RDPCSTX1_RDPCSTX_PHY_CNTL2
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DPALT_DP4__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DPALT_DISABLE__SHIFT 0x1
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DPALT_DISABLE_ACK__SHIFT 0x2
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR__SHIFT 0x3
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN__SHIFT 0x5
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN__SHIFT 0x6
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN__SHIFT 0x7
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN__SHIFT 0x9
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN__SHIFT 0xa
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN__SHIFT 0xb
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DPALT_DP4_MASK 0x00000001L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DPALT_DISABLE_MASK 0x00000002L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DPALT_DISABLE_ACK_MASK 0x00000004L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP4_POR_MASK 0x00000008L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_RX2TX_PAR_LB_EN_MASK 0x00000010L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_RX2TX_PAR_LB_EN_MASK 0x00000020L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_RX2TX_PAR_LB_EN_MASK 0x00000040L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_RX2TX_PAR_LB_EN_MASK 0x00000080L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE0_TX2RX_SER_LB_EN_MASK 0x00000100L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE1_TX2RX_SER_LB_EN_MASK 0x00000200L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE2_TX2RX_SER_LB_EN_MASK 0x00000400L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL2__RDPCS_PHY_DP_LANE3_TX2RX_SER_LB_EN_MASK 0x00000800L
+//RDPCSTX1_RDPCSTX_PHY_CNTL3
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE__SHIFT 0x1
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY__SHIFT 0x2
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN__SHIFT 0x3
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK__SHIFT 0x5
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE__SHIFT 0x9
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY__SHIFT 0xa
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN__SHIFT 0xb
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ__SHIFT 0xc
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK__SHIFT 0xd
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET__SHIFT 0x10
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE__SHIFT 0x11
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY__SHIFT 0x12
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN__SHIFT 0x13
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ__SHIFT 0x14
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK__SHIFT 0x15
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET__SHIFT 0x18
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE__SHIFT 0x19
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY__SHIFT 0x1a
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN__SHIFT 0x1b
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ__SHIFT 0x1c
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK__SHIFT 0x1d
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_RESET_MASK 0x00000001L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DISABLE_MASK 0x00000002L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_CLK_RDY_MASK 0x00000004L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_DATA_EN_MASK 0x00000008L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_REQ_MASK 0x00000010L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX0_ACK_MASK 0x00000020L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_RESET_MASK 0x00000100L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DISABLE_MASK 0x00000200L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_CLK_RDY_MASK 0x00000400L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_DATA_EN_MASK 0x00000800L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_REQ_MASK 0x00001000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX1_ACK_MASK 0x00002000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_RESET_MASK 0x00010000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DISABLE_MASK 0x00020000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_CLK_RDY_MASK 0x00040000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_DATA_EN_MASK 0x00080000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_REQ_MASK 0x00100000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX2_ACK_MASK 0x00200000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_RESET_MASK 0x01000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DISABLE_MASK 0x02000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_CLK_RDY_MASK 0x04000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_DATA_EN_MASK 0x08000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_REQ_MASK 0x10000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL3__RDPCS_PHY_DP_TX3_ACK_MASK 0x20000000L
+//RDPCSTX1_RDPCSTX_PHY_CNTL4
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC__SHIFT 0x6
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN__SHIFT 0x7
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT__SHIFT 0xc
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC__SHIFT 0xe
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN__SHIFT 0xf
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL__SHIFT 0x10
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT__SHIFT 0x14
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC__SHIFT 0x16
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN__SHIFT 0x17
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL__SHIFT 0x18
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT__SHIFT 0x1c
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC__SHIFT 0x1e
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN__SHIFT 0x1f
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_TERM_CTRL_MASK 0x00000007L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_INVERT_MASK 0x00000010L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_BYPASS_EQ_CALC_MASK 0x00000040L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX0_HP_PROT_EN_MASK 0x00000080L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_TERM_CTRL_MASK 0x00000700L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_INVERT_MASK 0x00001000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_BYPASS_EQ_CALC_MASK 0x00004000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX1_HP_PROT_EN_MASK 0x00008000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_TERM_CTRL_MASK 0x00070000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_INVERT_MASK 0x00100000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_BYPASS_EQ_CALC_MASK 0x00400000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX2_HP_PROT_EN_MASK 0x00800000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_TERM_CTRL_MASK 0x07000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_INVERT_MASK 0x10000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_BYPASS_EQ_CALC_MASK 0x40000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL4__RDPCS_PHY_DP_TX3_HP_PROT_EN_MASK 0x80000000L
+//RDPCSTX1_RDPCSTX_PHY_CNTL5
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_PSTATE__SHIFT 0x2
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE__SHIFT 0x5
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_MPLL_EN__SHIFT 0xa
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ__SHIFT 0xb
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT__SHIFT 0xc
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_PSTATE__SHIFT 0x12
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD__SHIFT 0x14
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE__SHIFT 0x15
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH__SHIFT 0x18
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_MPLL_EN__SHIFT 0x1a
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ__SHIFT 0x1b
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT__SHIFT 0x1c
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_PSTATE_MASK 0x0000000CL
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_LPD_MASK 0x00000010L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_RATE_MASK 0x000000E0L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_WIDTH_MASK 0x00000300L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_MPLL_EN_MASK 0x00000400L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_REQ_MASK 0x00000800L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX0_DETRX_RESULT_MASK 0x00001000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_PSTATE_MASK 0x000C0000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_LPD_MASK 0x00100000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_RATE_MASK 0x00E00000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_WIDTH_MASK 0x03000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_MPLL_EN_MASK 0x04000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_REQ_MASK 0x08000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL5__RDPCS_PHY_DP_TX1_DETRX_RESULT_MASK 0x10000000L
+//RDPCSTX1_RDPCSTX_PHY_CNTL6
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE__SHIFT 0x2
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_LPD__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_RATE__SHIFT 0x5
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_WIDTH__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN__SHIFT 0xa
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_DETRX_REQ__SHIFT 0xb
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_DETRX_RESULT__SHIFT 0xc
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE__SHIFT 0x12
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_LPD__SHIFT 0x14
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_RATE__SHIFT 0x15
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_WIDTH__SHIFT 0x18
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN__SHIFT 0x1a
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_DETRX_REQ__SHIFT 0x1b
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_DETRX_RESULT__SHIFT 0x1c
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_PSTATE_MASK 0x0000000CL
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_LPD_MASK 0x00000010L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_RATE_MASK 0x000000E0L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_WIDTH_MASK 0x00000300L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_MPLL_EN_MASK 0x00000400L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_DETRX_REQ_MASK 0x00000800L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX2_DETRX_RESULT_MASK 0x00001000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_PSTATE_MASK 0x000C0000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_LPD_MASK 0x00100000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_RATE_MASK 0x00E00000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_WIDTH_MASK 0x03000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_MPLL_EN_MASK 0x04000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_DETRX_REQ_MASK 0x08000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL6__RDPCS_PHY_DP_TX3_DETRX_RESULT_MASK 0x10000000L
+//RDPCSTX1_RDPCSTX_PHY_CNTL7
+#define RDPCSTX1_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT__SHIFT 0x10
+#define RDPCSTX1_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_DEN_MASK 0x0000FFFFL
+#define RDPCSTX1_RDPCSTX_PHY_CNTL7__RDPCS_PHY_DP_MPLLB_FRACN_QUOT_MASK 0xFFFF0000L
+//RDPCSTX1_RDPCSTX_PHY_CNTL8
+#define RDPCSTX1_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL8__RDPCS_PHY_DP_MPLLB_SSC_PEAK_MASK 0x000FFFFFL
+//RDPCSTX1_RDPCSTX_PHY_CNTL9
+#define RDPCSTX1_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD__SHIFT 0x18
+#define RDPCSTX1_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_STEPSIZE_MASK 0x001FFFFFL
+#define RDPCSTX1_RDPCSTX_PHY_CNTL9__RDPCS_PHY_DP_MPLLB_SSC_UP_SPREAD_MASK 0x01000000L
+//RDPCSTX1_RDPCSTX_PHY_CNTL10
+#define RDPCSTX1_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL10__RDPCS_PHY_DP_MPLLB_FRACN_REM_MASK 0x0000FFFFL
+//RDPCSTX1_RDPCSTX_PHY_CNTL11
+#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_EN__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_REQ__SHIFT 0x1
+#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV__SHIFT 0x10
+#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV__SHIFT 0x14
+#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV__SHIFT 0x18
+#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_EN_MASK 0x00000001L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_REQ_MASK 0x00000002L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_MPLLB_MULTIPLIER_MASK 0x0000FFF0L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_DIV_MASK 0x00070000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_DP_REF_CLK_MPLLB_DIV_MASK 0x00700000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL11__RDPCS_PHY_HDMI_MPLLB_HDMI_PIXEL_CLK_DIV_MASK 0x03000000L
+//RDPCSTX1_RDPCSTX_PHY_CNTL12
+#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN__SHIFT 0x2
+#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV__SHIFT 0x4
+#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE__SHIFT 0x7
+#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN__SHIFT 0x8
+#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_DIV5_CLK_EN_MASK 0x00000001L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_WORD_DIV2_EN_MASK 0x00000004L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_TX_CLK_DIV_MASK 0x00000070L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_STATE_MASK 0x00000080L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL12__RDPCS_PHY_DP_MPLLB_SSC_EN_MASK 0x00000100L
+//RDPCSTX1_RDPCSTX_PHY_CNTL13
+#define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER__SHIFT 0x14
+#define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN__SHIFT 0x1c
+#define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN__SHIFT 0x1d
+#define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE__SHIFT 0x1e
+#define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_MULTIPLIER_MASK 0x0FF00000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_DIV_CLK_EN_MASK 0x10000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_FORCE_EN_MASK 0x20000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL13__RDPCS_PHY_DP_MPLLB_INIT_CAL_DISABLE_MASK 0x40000000L
+//RDPCSTX1_RDPCSTX_PHY_CNTL14
+#define RDPCSTX1_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE__SHIFT 0x0
+#define RDPCSTX1_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN__SHIFT 0x18
+#define RDPCSTX1_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN__SHIFT 0x1c
+#define RDPCSTX1_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_CAL_FORCE_MASK 0x00000001L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_FRACN_EN_MASK 0x01000000L
+#define RDPCSTX1_RDPCSTX_PHY_CNTL14__RDPCS_PHY_DP_MPLLB_PMIX_EN_MASK 0x10000000L
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_0_8_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_0_8_sh_mask.h
new file mode 100644
index 000000000000..b7d3d0df3260
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_11_0_8_sh_mask.h
@@ -0,0 +1,355 @@
+/*
+ * Copyright (C) 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
+ * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _mp_11_0_8_SH_MASK_HEADER
+#define _mp_11_0_8_SH_MASK_HEADER
+
+#define MP0_SMN_C2PMSG_100__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_100__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_101__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_101__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_102__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_102__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_103__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_103__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_32__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_32__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_33__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_33__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_34__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_34__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_35__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_35__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_36__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_36__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_37__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_37__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_38__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_38__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_39__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_39__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_40__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_40__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_41__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_41__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_42__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_42__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_43__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_43__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_44__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_44__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_45__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_45__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_46__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_46__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_47__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_47__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_48__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_48__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_49__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_49__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_50__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_50__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_51__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_51__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_52__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_52__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_53__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_53__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_54__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_54__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_55__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_55__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_56__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_56__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_57__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_57__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_58__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_58__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_59__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_59__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_60__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_60__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_61__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_61__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_62__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_62__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_63__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_63__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_64__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_64__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_65__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_65__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_66__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_66__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_67__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_67__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_68__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_68__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_69__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_69__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_70__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_70__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_71__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_71__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_72__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_72__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_73__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_73__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_74__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_74__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_75__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_75__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_76__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_76__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_77__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_77__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_78__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_78__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_79__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_79__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_80__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_80__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_81__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_81__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_82__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_82__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_83__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_83__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_84__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_84__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_85__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_85__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_86__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_86__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_87__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_87__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_88__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_88__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_89__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_89__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_90__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_91__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_91__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_92__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_92__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_93__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_93__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_94__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_94__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_95__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_95__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_96__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_96__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_97__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_97__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_98__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_98__CONTENT__SHIFT 0x0
+#define MP0_SMN_C2PMSG_99__CONTENT_MASK 0xFFFFFFFFL
+#define MP0_SMN_C2PMSG_99__CONTENT__SHIFT 0x0
+#define MP0_SMN_IH_CREDIT__CLIENT_ID_MASK 0x00FF0000L
+#define MP0_SMN_IH_CREDIT__CLIENT_ID__SHIFT 0x10
+#define MP0_SMN_IH_CREDIT__CREDIT_VALUE_MASK 0x00000003L
+#define MP0_SMN_IH_CREDIT__CREDIT_VALUE__SHIFT 0x0
+#define MP0_SMN_IH_SW_INT_CTRL__INT_ACK_MASK 0x00000100L
+#define MP0_SMN_IH_SW_INT_CTRL__INT_ACK__SHIFT 0x8
+#define MP0_SMN_IH_SW_INT_CTRL__INT_MASK_MASK 0x00000001L
+#define MP0_SMN_IH_SW_INT_CTRL__INT_MASK__SHIFT 0x0
+#define MP0_SMN_IH_SW_INT__ID_MASK 0x000000FFL
+#define MP0_SMN_IH_SW_INT__ID__SHIFT 0x0
+#define MP0_SMN_IH_SW_INT__VALID_MASK 0x00000100L
+#define MP0_SMN_IH_SW_INT__VALID__SHIFT 0x8
+#define MP1_SMN_C2PMSG_100__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_100__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_101__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_101__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_102__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_102__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_103__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_103__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_32__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_32__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_33__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_33__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_34__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_34__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_35__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_35__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_36__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_36__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_37__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_37__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_38__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_38__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_39__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_39__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_40__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_40__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_41__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_41__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_42__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_42__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_43__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_43__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_44__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_44__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_45__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_45__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_46__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_46__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_47__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_47__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_48__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_48__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_49__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_49__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_50__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_50__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_51__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_51__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_52__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_52__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_53__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_53__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_54__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_54__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_55__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_55__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_56__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_56__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_57__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_57__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_58__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_58__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_59__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_59__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_60__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_60__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_61__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_61__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_62__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_62__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_63__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_63__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_64__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_64__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_65__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_65__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_66__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_66__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_67__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_67__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_68__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_68__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_69__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_69__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_70__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_70__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_71__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_71__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_72__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_72__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_73__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_73__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_74__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_74__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_75__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_75__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_76__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_76__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_77__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_77__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_78__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_78__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_79__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_79__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_80__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_80__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_81__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_81__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_82__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_82__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_83__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_83__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_84__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_84__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_85__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_85__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_86__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_86__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_87__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_87__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_88__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_88__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_89__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_89__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_90__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_91__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_91__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_92__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_92__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_93__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_93__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_94__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_94__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_95__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_95__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_96__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_96__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_97__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_97__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_98__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_98__CONTENT__SHIFT 0x0
+#define MP1_SMN_C2PMSG_99__CONTENT_MASK 0xFFFFFFFFL
+#define MP1_SMN_C2PMSG_99__CONTENT__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH0__DATA_MASK 0xFFFFFFFFL
+#define MP1_SMN_EXT_SCRATCH0__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH1__DATA_MASK 0xFFFFFFFFL
+#define MP1_SMN_EXT_SCRATCH1__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH2__DATA_MASK 0xFFFFFFFFL
+#define MP1_SMN_EXT_SCRATCH2__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH3__DATA_MASK 0xFFFFFFFFL
+#define MP1_SMN_EXT_SCRATCH3__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH4__DATA_MASK 0xFFFFFFFFL
+#define MP1_SMN_EXT_SCRATCH4__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH5__DATA_MASK 0xFFFFFFFFL
+#define MP1_SMN_EXT_SCRATCH5__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH6__DATA_MASK 0xFFFFFFFFL
+#define MP1_SMN_EXT_SCRATCH6__DATA__SHIFT 0x0
+#define MP1_SMN_EXT_SCRATCH7__DATA_MASK 0xFFFFFFFFL
+#define MP1_SMN_EXT_SCRATCH7__DATA__SHIFT 0x0
+#define MP1_SMN_FPS_CNT__COUNT_MASK 0xFFFFFFFFL
+#define MP1_SMN_FPS_CNT__COUNT__SHIFT 0x0
+#define MP1_SMN_IH_CREDIT__CLIENT_ID_MASK 0x00FF0000L
+#define MP1_SMN_IH_CREDIT__CLIENT_ID__SHIFT 0x10
+#define MP1_SMN_IH_CREDIT__CREDIT_VALUE_MASK 0x00000003L
+#define MP1_SMN_IH_CREDIT__CREDIT_VALUE__SHIFT 0x0
+#define MP1_SMN_IH_SW_INT_CTRL__INT_ACK_MASK 0x00000100L
+#define MP1_SMN_IH_SW_INT_CTRL__INT_ACK__SHIFT 0x8
+#define MP1_SMN_IH_SW_INT_CTRL__INT_MASK_MASK 0x00000001L
+#define MP1_SMN_IH_SW_INT_CTRL__INT_MASK__SHIFT 0x0
+#define MP1_SMN_IH_SW_INT__ID_MASK 0x000000FFL
+#define MP1_SMN_IH_SW_INT__ID__SHIFT 0x0
+#define MP1_SMN_IH_SW_INT__VALID_MASK 0x00000100L
+#define MP1_SMN_IH_SW_INT__VALID__SHIFT 0x8
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/atombios.h b/drivers/gpu/drm/amd/include/atombios.h
index 6a505d1b82a5..da895d1f3b4f 100644
--- a/drivers/gpu/drm/amd/include/atombios.h
+++ b/drivers/gpu/drm/amd/include/atombios.h
@@ -7148,7 +7148,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V3
#define GET_COMMAND_TABLE_COMMANDSET_REVISION(TABLE_HEADER_OFFSET) (((static_cast<ATOM_COMMON_TABLE_HEADER*>(TABLE_HEADER_OFFSET))->ucTableFormatRevision )&0x3F)
#define GET_COMMAND_TABLE_PARAMETER_REVISION(TABLE_HEADER_OFFSET) (((static_cast<ATOM_COMMON_TABLE_HEADER*>(TABLE_HEADER_OFFSET))->ucTableContentRevision)&0x3F)
#else // not __cplusplus
-#define GetIndexIntoMasterTable(MasterOrData, FieldName) (((char*)(&((ATOM_MASTER_LIST_OF_##MasterOrData##_TABLES*)0)->FieldName)-(char*)0)/sizeof(USHORT))
+#define GetIndexIntoMasterTable(MasterOrData, FieldName) (offsetof(ATOM_MASTER_LIST_OF_##MasterOrData##_TABLES, FieldName) / sizeof(USHORT))
#define GET_COMMAND_TABLE_COMMANDSET_REVISION(TABLE_HEADER_OFFSET) ((((ATOM_COMMON_TABLE_HEADER*)TABLE_HEADER_OFFSET)->ucTableFormatRevision)&0x3F)
#define GET_COMMAND_TABLE_PARAMETER_REVISION(TABLE_HEADER_OFFSET) ((((ATOM_COMMON_TABLE_HEADER*)TABLE_HEADER_OFFSET)->ucTableContentRevision)&0x3F)
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
index 44955458fe38..7bd763361d6e 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -768,6 +768,10 @@ enum atom_encoder_caps_def
ATOM_ENCODER_CAP_RECORD_HBR2_EN =0x02, // DP1.2 HBR2 setting is qualified and HBR2 can be enabled
ATOM_ENCODER_CAP_RECORD_HDMI6Gbps_EN =0x04, // HDMI2.0 6Gbps enable or not.
ATOM_ENCODER_CAP_RECORD_HBR3_EN =0x08, // DP1.3 HBR3 is supported by board.
+ ATOM_ENCODER_CAP_RECORD_DP2 =0x10, // DP2 is supported by ASIC/board.
+ ATOM_ENCODER_CAP_RECORD_UHBR10_EN =0x20, // DP2.0 UHBR10 settings is supported by board
+ ATOM_ENCODER_CAP_RECORD_UHBR13_5_EN =0x40, // DP2.0 UHBR13.5 settings is supported by board
+ ATOM_ENCODER_CAP_RECORD_UHBR20_EN =0x80, // DP2.0 UHBR20 settings is supported by board
ATOM_ENCODER_CAP_RECORD_USB_C_TYPE =0x100, // the DP connector is a USB-C type.
};
diff --git a/drivers/gpu/drm/amd/include/soc15_hw_ip.h b/drivers/gpu/drm/amd/include/soc15_hw_ip.h
index 45ca4c921a66..c1519d20596a 100644
--- a/drivers/gpu/drm/amd/include/soc15_hw_ip.h
+++ b/drivers/gpu/drm/amd/include/soc15_hw_ip.h
@@ -80,6 +80,8 @@
#define L1IMU15_HWID 65
#define WAFLC_HWID 66
#define FCH_USB_PD_HWID 67
+#define SDMA2_HWID 68
+#define SDMA3_HWID 69
#define PCIE_HWID 70
#define PCS_HWID 80
#define DDCL_HWID 89
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index 249cb0aeb5ae..49fe4155c374 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -310,7 +310,7 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
struct amdgpu_device *adev = drm_to_adev(ddev);
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
enum amd_dpm_forced_level level;
- enum amd_dpm_forced_level current_level = 0xff;
+ enum amd_dpm_forced_level current_level;
int ret = 0;
if (amdgpu_in_reset(adev))
@@ -350,6 +350,8 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
if (pp_funcs->get_performance_level)
current_level = amdgpu_dpm_get_performance_level(adev);
+ else
+ current_level = adev->pm.dpm.forced_level;
if (current_level == level) {
pm_runtime_mark_last_busy(ddev->dev);
@@ -2019,15 +2021,15 @@ static struct amdgpu_device_attr amdgpu_device_attrs[] = {
AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie, ATTR_FLAG_BASIC),
AMDGPU_DEVICE_ATTR_RW(pp_sclk_od, ATTR_FLAG_BASIC),
AMDGPU_DEVICE_ATTR_RW(pp_mclk_od, ATTR_FLAG_BASIC),
- AMDGPU_DEVICE_ATTR_RW(pp_power_profile_mode, ATTR_FLAG_BASIC),
+ AMDGPU_DEVICE_ATTR_RW(pp_power_profile_mode, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(pp_od_clk_voltage, ATTR_FLAG_BASIC),
- AMDGPU_DEVICE_ATTR_RO(gpu_busy_percent, ATTR_FLAG_BASIC),
- AMDGPU_DEVICE_ATTR_RO(mem_busy_percent, ATTR_FLAG_BASIC),
+ AMDGPU_DEVICE_ATTR_RO(gpu_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+ AMDGPU_DEVICE_ATTR_RO(mem_busy_percent, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RO(pcie_bw, ATTR_FLAG_BASIC),
- AMDGPU_DEVICE_ATTR_RW(pp_features, ATTR_FLAG_BASIC),
- AMDGPU_DEVICE_ATTR_RO(unique_id, ATTR_FLAG_BASIC),
- AMDGPU_DEVICE_ATTR_RW(thermal_throttling_logging, ATTR_FLAG_BASIC),
- AMDGPU_DEVICE_ATTR_RO(gpu_metrics, ATTR_FLAG_BASIC),
+ AMDGPU_DEVICE_ATTR_RW(pp_features, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+ AMDGPU_DEVICE_ATTR_RO(unique_id, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+ AMDGPU_DEVICE_ATTR_RW(thermal_throttling_logging, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+ AMDGPU_DEVICE_ATTR_RO(gpu_metrics, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RO(smartshift_apu_power, ATTR_FLAG_BASIC,
.attr_update = ss_power_attr_update),
AMDGPU_DEVICE_ATTR_RO(smartshift_dgpu_power, ATTR_FLAG_BASIC,
@@ -2087,10 +2089,10 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
if (asic_type < CHIP_VEGA12)
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pp_dpm_vclk)) {
- if (!(asic_type == CHIP_VANGOGH))
+ if (!(asic_type == CHIP_VANGOGH || asic_type == CHIP_SIENNA_CICHLID))
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pp_dpm_dclk)) {
- if (!(asic_type == CHIP_VANGOGH))
+ if (!(asic_type == CHIP_VANGOGH || asic_type == CHIP_SIENNA_CICHLID))
*states = ATTR_STATE_UNSUPPORTED;
}
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
index 8156729c370b..3557f4e7fc30 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
@@ -1008,7 +1008,9 @@ struct pptable_funcs {
/**
* @set_power_limit: Set power limit in watts.
*/
- int (*set_power_limit)(struct smu_context *smu, uint32_t n);
+ int (*set_power_limit)(struct smu_context *smu,
+ enum smu_ppt_limit_type limit_type,
+ uint32_t limit);
/**
* @init_max_sustainable_clocks: Populate max sustainable clock speed
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
index cbdae8a2c698..2d422e6a9feb 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
@@ -197,7 +197,9 @@ int smu_v11_0_notify_display_change(struct smu_context *smu);
int smu_v11_0_get_current_power_limit(struct smu_context *smu,
uint32_t *power_limit);
-int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n);
+int smu_v11_0_set_power_limit(struct smu_context *smu,
+ enum smu_ppt_limit_type limit_type,
+ uint32_t limit);
int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu);
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h
index dc91eb608791..e5d3b0d1a032 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h
+++ b/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h
@@ -163,7 +163,9 @@ int smu_v13_0_notify_display_change(struct smu_context *smu);
int smu_v13_0_get_current_power_limit(struct smu_context *smu,
uint32_t *power_limit);
-int smu_v13_0_set_power_limit(struct smu_context *smu, uint32_t n);
+int smu_v13_0_set_power_limit(struct smu_context *smu,
+ enum smu_ppt_limit_type limit_type,
+ uint32_t limit);
int smu_v13_0_init_max_sustainable_clocks(struct smu_context *smu);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.h b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.h
index b7e2651b570b..2fc1733bcdcf 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/ppatomfwctrl.h
@@ -29,9 +29,9 @@
typedef enum atom_smu9_syspll0_clock_id BIOS_CLKID;
#define GetIndexIntoMasterCmdTable(FieldName) \
- (((char*)(&((struct atom_master_list_of_command_functions_v2_1*)0)->FieldName)-(char*)0)/sizeof(uint16_t))
+ (offsetof(struct atom_master_list_of_command_functions_v2_1, FieldName) / sizeof(uint16_t))
#define GetIndexIntoMasterDataTable(FieldName) \
- (((char*)(&((struct atom_master_list_of_data_tables_v2_1*)0)->FieldName)-(char*)0)/sizeof(uint16_t))
+ (offsetof(struct atom_master_list_of_data_tables_v2_1, FieldName) / sizeof(uint16_t))
#define PP_ATOMFWCTRL_MAX_VOLTAGE_ENTRIES 32
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 04863a797115..b06c59dcc1b4 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -455,7 +455,11 @@ static int smu_get_power_num_states(void *handle,
bool is_support_sw_smu(struct amdgpu_device *adev)
{
- if (adev->asic_type >= CHIP_ARCTURUS)
+ /* vega20 is 11.0.2, but it's supported via the powerplay code */
+ if (adev->asic_type == CHIP_VEGA20)
+ return false;
+
+ if (adev->ip_versions[MP1_HWIP][0] >= IP_VERSION(11, 0, 0))
return true;
return false;
@@ -575,41 +579,43 @@ static int smu_set_funcs(struct amdgpu_device *adev)
if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
smu->od_enabled = true;
- switch (adev->asic_type) {
- case CHIP_NAVI10:
- case CHIP_NAVI14:
- case CHIP_NAVI12:
+ switch (adev->ip_versions[MP1_HWIP][0]) {
+ case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 5):
+ case IP_VERSION(11, 0, 9):
navi10_set_ppt_funcs(smu);
break;
- case CHIP_ARCTURUS:
- adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
- arcturus_set_ppt_funcs(smu);
- /* OD is not supported on Arcturus */
- smu->od_enabled =false;
- break;
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
+ case IP_VERSION(11, 0, 7):
+ case IP_VERSION(11, 0, 11):
+ case IP_VERSION(11, 0, 12):
+ case IP_VERSION(11, 0, 13):
sienna_cichlid_set_ppt_funcs(smu);
break;
- case CHIP_ALDEBARAN:
- aldebaran_set_ppt_funcs(smu);
- /* Enable pp_od_clk_voltage node */
- smu->od_enabled = true;
- break;
- case CHIP_RENOIR:
+ case IP_VERSION(12, 0, 0):
+ case IP_VERSION(12, 0, 1):
renoir_set_ppt_funcs(smu);
break;
- case CHIP_VANGOGH:
+ case IP_VERSION(11, 5, 0):
vangogh_set_ppt_funcs(smu);
break;
- case CHIP_YELLOW_CARP:
+ case IP_VERSION(13, 0, 1):
+ case IP_VERSION(13, 0, 3):
yellow_carp_set_ppt_funcs(smu);
break;
- case CHIP_CYAN_SKILLFISH:
+ case IP_VERSION(11, 0, 8):
cyan_skillfish_set_ppt_funcs(smu);
break;
+ case IP_VERSION(11, 0, 2):
+ adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
+ arcturus_set_ppt_funcs(smu);
+ /* OD is not supported on Arcturus */
+ smu->od_enabled =false;
+ break;
+ case IP_VERSION(13, 0, 2):
+ aldebaran_set_ppt_funcs(smu);
+ /* Enable pp_od_clk_voltage node */
+ smu->od_enabled = true;
+ break;
default:
return -EINVAL;
}
@@ -694,7 +700,8 @@ static int smu_late_init(void *handle)
return ret;
}
- if (adev->asic_type == CHIP_YELLOW_CARP)
+ if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 1)) ||
+ (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 3)))
return 0;
if (!amdgpu_sriov_vf(adev) || smu->od_enabled) {
@@ -1140,9 +1147,16 @@ static int smu_smc_hw_setup(struct smu_context *smu)
if (adev->in_suspend && smu_is_dpm_running(smu)) {
dev_info(adev->dev, "dpm has been enabled\n");
/* this is needed specifically */
- if ((adev->asic_type >= CHIP_SIENNA_CICHLID) &&
- (adev->asic_type <= CHIP_DIMGREY_CAVEFISH))
+ switch (adev->ip_versions[MP1_HWIP][0]) {
+ case IP_VERSION(11, 0, 7):
+ case IP_VERSION(11, 0, 11):
+ case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 0, 12):
ret = smu_system_features_control(smu, true);
+ break;
+ default:
+ break;
+ }
return ret;
}
@@ -1284,7 +1298,7 @@ static int smu_start_smc_engine(struct smu_context *smu)
int ret = 0;
if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
- if (adev->asic_type < CHIP_NAVI10) {
+ if (adev->ip_versions[MP1_HWIP][0] < IP_VERSION(11, 0, 0)) {
if (smu->ppt_funcs->load_microcode) {
ret = smu->ppt_funcs->load_microcode(smu);
if (ret)
@@ -1402,23 +1416,41 @@ static int smu_disable_dpms(struct smu_context *smu)
* - SMU firmware can handle the DPM reenablement
* properly.
*/
- if (smu->uploading_custom_pp_table &&
- (adev->asic_type >= CHIP_NAVI10) &&
- (adev->asic_type <= CHIP_BEIGE_GOBY))
- return smu_disable_all_features_with_exception(smu,
- true,
- SMU_FEATURE_COUNT);
+ if (smu->uploading_custom_pp_table) {
+ switch (adev->ip_versions[MP1_HWIP][0]) {
+ case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 5):
+ case IP_VERSION(11, 0, 9):
+ case IP_VERSION(11, 0, 7):
+ case IP_VERSION(11, 0, 11):
+ case IP_VERSION(11, 5, 0):
+ case IP_VERSION(11, 0, 12):
+ case IP_VERSION(11, 0, 13):
+ return smu_disable_all_features_with_exception(smu,
+ true,
+ SMU_FEATURE_COUNT);
+ default:
+ break;
+ }
+ }
/*
* For Sienna_Cichlid, PMFW will handle the features disablement properly
* on BACO in. Driver involvement is unnecessary.
*/
- if (((adev->asic_type == CHIP_SIENNA_CICHLID) ||
- ((adev->asic_type >= CHIP_NAVI10) && (adev->asic_type <= CHIP_NAVI12))) &&
- use_baco)
- return smu_disable_all_features_with_exception(smu,
- true,
- SMU_FEATURE_BACO_BIT);
+ if (use_baco) {
+ switch (adev->ip_versions[MP1_HWIP][0]) {
+ case IP_VERSION(11, 0, 7):
+ case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 5):
+ case IP_VERSION(11, 0, 9):
+ return smu_disable_all_features_with_exception(smu,
+ true,
+ SMU_FEATURE_BACO_BIT);
+ default:
+ break;
+ }
+ }
/*
* For gpu reset, runpm and hibernation through BACO,
@@ -1436,7 +1468,7 @@ static int smu_disable_dpms(struct smu_context *smu)
dev_err(adev->dev, "Failed to disable smu features.\n");
}
- if (adev->asic_type >= CHIP_NAVI10 &&
+ if (adev->ip_versions[MP1_HWIP][0] >= IP_VERSION(11, 0, 0) &&
adev->gfx.rlc.funcs->stop)
adev->gfx.rlc.funcs->stop(adev);
@@ -2229,6 +2261,7 @@ int smu_get_power_limit(void *handle,
enum pp_power_type pp_power_type)
{
struct smu_context *smu = handle;
+ struct amdgpu_device *adev = smu->adev;
enum smu_ppt_limit_level limit_level;
uint32_t limit_type;
int ret = 0;
@@ -2272,15 +2305,20 @@ int smu_get_power_limit(void *handle,
} else {
switch (limit_level) {
case SMU_PPT_LIMIT_CURRENT:
- if ((smu->adev->asic_type == CHIP_ALDEBARAN) ||
- (smu->adev->asic_type == CHIP_SIENNA_CICHLID) ||
- (smu->adev->asic_type == CHIP_NAVY_FLOUNDER) ||
- (smu->adev->asic_type == CHIP_DIMGREY_CAVEFISH) ||
- (smu->adev->asic_type == CHIP_BEIGE_GOBY))
+ switch (adev->ip_versions[MP1_HWIP][0]) {
+ case IP_VERSION(13, 0, 2):
+ case IP_VERSION(11, 0, 7):
+ case IP_VERSION(11, 0, 11):
+ case IP_VERSION(11, 0, 12):
+ case IP_VERSION(11, 0, 13):
ret = smu_get_asic_power_limits(smu,
&smu->current_power_limit,
NULL,
NULL);
+ break;
+ default:
+ break;
+ }
*limit = smu->current_power_limit;
break;
case SMU_PPT_LIMIT_DEFAULT:
@@ -2310,9 +2348,10 @@ static int smu_set_power_limit(void *handle, uint32_t limit)
mutex_lock(&smu->mutex);
+ limit &= (1<<24)-1;
if (limit_type != SMU_DEFAULT_PPT_LIMIT)
if (smu->ppt_funcs->set_power_limit) {
- ret = smu->ppt_funcs->set_power_limit(smu, limit);
+ ret = smu->ppt_funcs->set_power_limit(smu, limit_type, limit);
goto out;
}
@@ -2328,7 +2367,7 @@ static int smu_set_power_limit(void *handle, uint32_t limit)
limit = smu->current_power_limit;
if (smu->ppt_funcs->set_power_limit) {
- ret = smu->ppt_funcs->set_power_limit(smu, limit);
+ ret = smu->ppt_funcs->set_power_limit(smu, limit_type, limit);
if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
smu->user_dpm_profile.power_limit = limit;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
index 082f01893f3d..fd1d30a93db5 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
@@ -436,6 +436,19 @@ static void arcturus_check_bxco_support(struct smu_context *smu)
}
}
+static void arcturus_check_fan_support(struct smu_context *smu)
+{
+ struct smu_table_context *table_context = &smu->smu_table;
+ PPTable_t *pptable = table_context->driver_pptable;
+
+ /* No sort of fan control possible if PPTable has it disabled */
+ smu->adev->pm.no_fan =
+ !(pptable->FeaturesToRun[0] & FEATURE_FAN_CONTROL_MASK);
+ if (smu->adev->pm.no_fan)
+ dev_info_once(smu->adev->dev,
+ "PMFW based fan control disabled");
+}
+
static int arcturus_check_powerplay_table(struct smu_context *smu)
{
struct smu_table_context *table_context = &smu->smu_table;
@@ -443,6 +456,7 @@ static int arcturus_check_powerplay_table(struct smu_context *smu)
table_context->power_play_table;
arcturus_check_bxco_support(smu);
+ arcturus_check_fan_support(smu);
table_context->thermal_controller_type =
powerplay_table->thermal_controller_type;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c
index 3d4c65bc29dc..cbc3f99e8573 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c
@@ -47,7 +47,6 @@
/* unit: MHz */
#define CYAN_SKILLFISH_SCLK_MIN 1000
#define CYAN_SKILLFISH_SCLK_MAX 2000
-#define CYAN_SKILLFISH_SCLK_DEFAULT 1800
/* unit: mV */
#define CYAN_SKILLFISH_VDDC_MIN 700
@@ -59,6 +58,8 @@ static struct gfx_user_settings {
uint32_t vddc;
} cyan_skillfish_user_settings;
+static uint32_t cyan_skillfish_sclk_default;
+
#define FEATURE_MASK(feature) (1ULL << feature)
#define SMC_DPM_FEATURE ( \
FEATURE_MASK(FEATURE_FCLK_DPM_BIT) | \
@@ -365,13 +366,19 @@ static bool cyan_skillfish_is_dpm_running(struct smu_context *smu)
return false;
ret = smu_cmn_get_enabled_32_bits_mask(smu, feature_mask, 2);
-
if (ret)
return false;
feature_enabled = (uint64_t)feature_mask[0] |
((uint64_t)feature_mask[1] << 32);
+ /*
+ * cyan_skillfish specific, query default sclk inseted of hard code.
+ */
+ if (!cyan_skillfish_sclk_default)
+ cyan_skillfish_get_smu_metrics_data(smu, METRICS_CURR_GFXCLK,
+ &cyan_skillfish_sclk_default);
+
return !!(feature_enabled & SMC_DPM_FEATURE);
}
@@ -444,14 +451,14 @@ static int cyan_skillfish_od_edit_dpm_table(struct smu_context *smu,
return -EINVAL;
}
- if (input[1] <= CYAN_SKILLFISH_SCLK_MIN ||
+ if (input[1] < CYAN_SKILLFISH_SCLK_MIN ||
input[1] > CYAN_SKILLFISH_SCLK_MAX) {
dev_err(smu->adev->dev, "Invalid sclk! Valid sclk range: %uMHz - %uMhz\n",
CYAN_SKILLFISH_SCLK_MIN, CYAN_SKILLFISH_SCLK_MAX);
return -EINVAL;
}
- if (input[2] <= CYAN_SKILLFISH_VDDC_MIN ||
+ if (input[2] < CYAN_SKILLFISH_VDDC_MIN ||
input[2] > CYAN_SKILLFISH_VDDC_MAX) {
dev_err(smu->adev->dev, "Invalid vddc! Valid vddc range: %umV - %umV\n",
CYAN_SKILLFISH_VDDC_MIN, CYAN_SKILLFISH_VDDC_MAX);
@@ -468,7 +475,7 @@ static int cyan_skillfish_od_edit_dpm_table(struct smu_context *smu,
return -EINVAL;
}
- cyan_skillfish_user_settings.sclk = CYAN_SKILLFISH_SCLK_DEFAULT;
+ cyan_skillfish_user_settings.sclk = cyan_skillfish_sclk_default;
cyan_skillfish_user_settings.vddc = CYAN_SKILLFISH_VDDC_MAGIC;
break;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index b1ad451af06b..71161f6b78fe 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -86,21 +86,21 @@ static struct cmn2asic_msg_mapping navi10_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(DisableSmuFeaturesHigh, PPSMC_MSG_DisableSmuFeaturesHigh, 0),
MSG_MAP(GetEnabledSmuFeaturesLow, PPSMC_MSG_GetEnabledSmuFeaturesLow, 1),
MSG_MAP(GetEnabledSmuFeaturesHigh, PPSMC_MSG_GetEnabledSmuFeaturesHigh, 1),
- MSG_MAP(SetWorkloadMask, PPSMC_MSG_SetWorkloadMask, 1),
+ MSG_MAP(SetWorkloadMask, PPSMC_MSG_SetWorkloadMask, 0),
MSG_MAP(SetPptLimit, PPSMC_MSG_SetPptLimit, 0),
- MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 0),
- MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 0),
+ MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1),
+ MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1),
MSG_MAP(SetToolsDramAddrHigh, PPSMC_MSG_SetToolsDramAddrHigh, 0),
MSG_MAP(SetToolsDramAddrLow, PPSMC_MSG_SetToolsDramAddrLow, 0),
- MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 0),
+ MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 1),
MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 0),
MSG_MAP(UseDefaultPPTable, PPSMC_MSG_UseDefaultPPTable, 0),
MSG_MAP(UseBackupPPTable, PPSMC_MSG_UseBackupPPTable, 0),
MSG_MAP(RunBtc, PPSMC_MSG_RunBtc, 0),
MSG_MAP(EnterBaco, PPSMC_MSG_EnterBaco, 0),
- MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 0),
- MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 0),
- MSG_MAP(SetHardMinByFreq, PPSMC_MSG_SetHardMinByFreq, 1),
+ MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 1),
+ MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 1),
+ MSG_MAP(SetHardMinByFreq, PPSMC_MSG_SetHardMinByFreq, 0),
MSG_MAP(SetHardMaxByFreq, PPSMC_MSG_SetHardMaxByFreq, 0),
MSG_MAP(GetMinDpmFreq, PPSMC_MSG_GetMinDpmFreq, 1),
MSG_MAP(GetMaxDpmFreq, PPSMC_MSG_GetMaxDpmFreq, 1),
@@ -345,7 +345,7 @@ navi10_get_allowed_feature_mask(struct smu_context *smu,
/* DPM UCLK enablement should be skipped for navi10 A0 secure board */
if (!(is_asic_secure(smu) &&
- (adev->asic_type == CHIP_NAVI10) &&
+ (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0)) &&
(adev->rev_id == 0)) &&
(adev->pm.pp_feature & PP_MCLK_DPM_MASK))
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_UCLK_BIT)
@@ -354,7 +354,7 @@ navi10_get_allowed_feature_mask(struct smu_context *smu,
/* DS SOCCLK enablement should be skipped for navi10 A0 secure board */
if (is_asic_secure(smu) &&
- (adev->asic_type == CHIP_NAVI10) &&
+ (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0)) &&
(adev->rev_id == 0))
*(uint64_t *)feature_mask &=
~FEATURE_MASK(FEATURE_DS_SOCCLK_BIT);
@@ -925,18 +925,18 @@ static int navi1x_get_smu_metrics_data(struct smu_context *smu,
return ret;
}
- switch (adev->asic_type) {
- case CHIP_NAVI12:
+ switch (adev->ip_versions[MP1_HWIP][0]) {
+ case IP_VERSION(11, 0, 9):
if (smu_version > 0x00341C00)
ret = navi12_get_smu_metrics_data(smu, member, value);
else
ret = navi12_get_legacy_smu_metrics_data(smu, member, value);
break;
- case CHIP_NAVI10:
- case CHIP_NAVI14:
+ case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 5):
default:
- if (((adev->asic_type == CHIP_NAVI14) && smu_version > 0x00351F00) ||
- ((adev->asic_type == CHIP_NAVI10) && smu_version > 0x002A3B00))
+ if (((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 5)) && smu_version > 0x00351F00) ||
+ ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0)) && smu_version > 0x002A3B00))
ret = navi10_get_smu_metrics_data(smu, member, value);
else
ret = navi10_get_legacy_smu_metrics_data(smu, member, value);
@@ -1509,8 +1509,8 @@ static int navi10_populate_umd_state_clk(struct smu_context *smu)
uint32_t sclk_freq;
pstate_table->gfxclk_pstate.min = gfx_table->min;
- switch (adev->asic_type) {
- case CHIP_NAVI10:
+ switch (adev->ip_versions[MP1_HWIP][0]) {
+ case IP_VERSION(11, 0, 0):
switch (adev->pdev->revision) {
case 0xf0: /* XTX */
case 0xc0:
@@ -1525,7 +1525,7 @@ static int navi10_populate_umd_state_clk(struct smu_context *smu)
break;
}
break;
- case CHIP_NAVI14:
+ case IP_VERSION(11, 0, 5):
switch (adev->pdev->revision) {
case 0xc7: /* XT */
case 0xf4:
@@ -1548,7 +1548,7 @@ static int navi10_populate_umd_state_clk(struct smu_context *smu)
break;
}
break;
- case CHIP_NAVI12:
+ case IP_VERSION(11, 0, 9):
sclk_freq = NAVI12_UMD_PSTATE_PEAK_GFXCLK;
break;
default:
@@ -2562,8 +2562,8 @@ static bool navi10_need_umc_cdr_workaround(struct smu_context *smu)
if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT))
return false;
- if (adev->asic_type == CHIP_NAVI10 ||
- adev->asic_type == CHIP_NAVI14)
+ if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0) ||
+ adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 5))
return true;
return false;
@@ -2671,8 +2671,8 @@ static int navi10_run_umc_cdr_workaround(struct smu_context *smu)
* - PPSMC_MSG_SetDriverDummyTableDramAddrLow
* - PPSMC_MSG_GetUMCFWWA
*/
- if (((adev->asic_type == CHIP_NAVI10) && (pmfw_version >= 0x2a3500)) ||
- ((adev->asic_type == CHIP_NAVI14) && (pmfw_version >= 0x351D00))) {
+ if (((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0)) && (pmfw_version >= 0x2a3500)) ||
+ ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 5)) && (pmfw_version >= 0x351D00))) {
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_GET_UMC_FW_WA,
0,
@@ -2691,13 +2691,13 @@ static int navi10_run_umc_cdr_workaround(struct smu_context *smu)
return 0;
if (umc_fw_disable_cdr) {
- if (adev->asic_type == CHIP_NAVI10)
+ if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0))
return navi10_umc_hybrid_cdr_workaround(smu);
} else {
return navi10_set_dummy_pstates_table_location(smu);
}
} else {
- if (adev->asic_type == CHIP_NAVI10)
+ if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0))
return navi10_umc_hybrid_cdr_workaround(smu);
}
@@ -3151,18 +3151,18 @@ static ssize_t navi1x_get_gpu_metrics(struct smu_context *smu,
return ret;
}
- switch (adev->asic_type) {
- case CHIP_NAVI12:
+ switch (adev->ip_versions[MP1_HWIP][0]) {
+ case IP_VERSION(11, 0, 9):
if (smu_version > 0x00341C00)
ret = navi12_get_gpu_metrics(smu, table);
else
ret = navi12_get_legacy_gpu_metrics(smu, table);
break;
- case CHIP_NAVI10:
- case CHIP_NAVI14:
+ case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 5):
default:
- if (((adev->asic_type == CHIP_NAVI14) && smu_version > 0x00351F00) ||
- ((adev->asic_type == CHIP_NAVI10) && smu_version > 0x002A3B00))
+ if (((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 5)) && smu_version > 0x00351F00) ||
+ ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 0)) && smu_version > 0x002A3B00))
ret = navi10_get_gpu_metrics(smu, table);
else
ret =navi10_get_legacy_gpu_metrics(smu, table);
@@ -3180,7 +3180,7 @@ static int navi10_enable_mgpu_fan_boost(struct smu_context *smu)
uint32_t param = 0;
/* Navi12 does not support this */
- if (adev->asic_type == CHIP_NAVI12)
+ if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 9))
return 0;
/*
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index ca57221e3962..a4108025fe29 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -74,7 +74,7 @@
#define SMU_11_0_7_GFX_BUSY_THRESHOLD 15
#define GET_PPTABLE_MEMBER(field, member) do {\
- if (smu->adev->asic_type == CHIP_BEIGE_GOBY)\
+ if (smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 13))\
(*member) = (smu->smu_table.driver_pptable + offsetof(PPTable_beige_goby_t, field));\
else\
(*member) = (smu->smu_table.driver_pptable + offsetof(PPTable_t, field));\
@@ -82,7 +82,7 @@
static int get_table_size(struct smu_context *smu)
{
- if (smu->adev->asic_type == CHIP_BEIGE_GOBY)
+ if (smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 13))
return sizeof(PPTable_beige_goby_t);
else
return sizeof(PPTable_t);
@@ -298,7 +298,7 @@ sienna_cichlid_get_allowed_feature_mask(struct smu_context *smu,
}
if ((adev->pm.pp_feature & PP_GFX_DCS_MASK) &&
- (adev->asic_type > CHIP_SIENNA_CICHLID) &&
+ (adev->ip_versions[MP1_HWIP][0] > IP_VERSION(11, 0, 7)) &&
!(adev->flags & AMD_IS_APU))
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_DCS_BIT);
@@ -496,7 +496,7 @@ static uint32_t sienna_cichlid_get_throttler_status_locked(struct smu_context *s
uint32_t throttler_status = 0;
int i;
- if ((smu->adev->asic_type == CHIP_SIENNA_CICHLID) &&
+ if ((smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) &&
(smu->smc_fw_version >= 0x3A4300)) {
for (i = 0; i < THROTTLER_COUNT; i++)
throttler_status |=
@@ -517,7 +517,7 @@ static int sienna_cichlid_get_smu_metrics_data(struct smu_context *smu,
&(((SmuMetricsExternal_t *)(smu_table->metrics_table))->SmuMetrics);
SmuMetrics_V2_t *metrics_v2 =
&(((SmuMetricsExternal_t *)(smu_table->metrics_table))->SmuMetrics_V2);
- bool use_metrics_v2 = ((smu->adev->asic_type == CHIP_SIENNA_CICHLID) &&
+ bool use_metrics_v2 = ((smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) &&
(smu->smc_fw_version >= 0x3A4300)) ? true : false;
uint16_t average_gfx_activity;
int ret = 0;
@@ -670,7 +670,7 @@ static int sienna_cichlid_set_default_dpm_table(struct smu_context *smu)
struct smu_11_0_dpm_context *dpm_context = smu->smu_dpm.dpm_context;
struct smu_11_0_dpm_table *dpm_table;
struct amdgpu_device *adev = smu->adev;
- int ret = 0;
+ int i, ret = 0;
DpmDescriptor_t *table_member;
/* socclk dpm table setup */
@@ -746,78 +746,45 @@ static int sienna_cichlid_set_default_dpm_table(struct smu_context *smu)
dpm_table->max = dpm_table->dpm_levels[0].value;
}
- /* vclk0 dpm table setup */
- dpm_table = &dpm_context->dpm_tables.vclk_table;
- if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) {
- ret = smu_v11_0_set_single_dpm_table(smu,
- SMU_VCLK,
- dpm_table);
- if (ret)
- return ret;
- dpm_table->is_fine_grained =
- !table_member[PPCLK_VCLK_0].SnapToDiscrete;
- } else {
- dpm_table->count = 1;
- dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.vclk / 100;
- dpm_table->dpm_levels[0].enabled = true;
- dpm_table->min = dpm_table->dpm_levels[0].value;
- dpm_table->max = dpm_table->dpm_levels[0].value;
- }
+ /* vclk0/1 dpm table setup */
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
- /* vclk1 dpm table setup */
- if (adev->vcn.num_vcn_inst > 1) {
- dpm_table = &dpm_context->dpm_tables.vclk1_table;
+ dpm_table = &dpm_context->dpm_tables.vclk_table;
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) {
ret = smu_v11_0_set_single_dpm_table(smu,
- SMU_VCLK1,
+ i ? SMU_VCLK1 : SMU_VCLK,
dpm_table);
if (ret)
return ret;
dpm_table->is_fine_grained =
- !table_member[PPCLK_VCLK_1].SnapToDiscrete;
+ !table_member[i ? PPCLK_VCLK_1 : PPCLK_VCLK_0].SnapToDiscrete;
} else {
dpm_table->count = 1;
- dpm_table->dpm_levels[0].value =
- smu->smu_table.boot_values.vclk / 100;
+ dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.vclk / 100;
dpm_table->dpm_levels[0].enabled = true;
dpm_table->min = dpm_table->dpm_levels[0].value;
dpm_table->max = dpm_table->dpm_levels[0].value;
}
}
- /* dclk0 dpm table setup */
- dpm_table = &dpm_context->dpm_tables.dclk_table;
- if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) {
- ret = smu_v11_0_set_single_dpm_table(smu,
- SMU_DCLK,
- dpm_table);
- if (ret)
- return ret;
- dpm_table->is_fine_grained =
- !table_member[PPCLK_DCLK_0].SnapToDiscrete;
- } else {
- dpm_table->count = 1;
- dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dclk / 100;
- dpm_table->dpm_levels[0].enabled = true;
- dpm_table->min = dpm_table->dpm_levels[0].value;
- dpm_table->max = dpm_table->dpm_levels[0].value;
- }
-
- /* dclk1 dpm table setup */
- if (adev->vcn.num_vcn_inst > 1) {
- dpm_table = &dpm_context->dpm_tables.dclk1_table;
+ /* dclk0/1 dpm table setup */
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
+ dpm_table = &dpm_context->dpm_tables.dclk_table;
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) {
ret = smu_v11_0_set_single_dpm_table(smu,
- SMU_DCLK1,
+ i ? SMU_DCLK1 : SMU_DCLK,
dpm_table);
if (ret)
return ret;
dpm_table->is_fine_grained =
- !table_member[PPCLK_DCLK_1].SnapToDiscrete;
+ !table_member[i ? PPCLK_DCLK_1 : PPCLK_DCLK_0].SnapToDiscrete;
} else {
dpm_table->count = 1;
- dpm_table->dpm_levels[0].value =
- smu->smu_table.boot_values.dclk / 100;
+ dpm_table->dpm_levels[0].value = smu->smu_table.boot_values.dclk / 100;
dpm_table->dpm_levels[0].enabled = true;
dpm_table->min = dpm_table->dpm_levels[0].value;
dpm_table->max = dpm_table->dpm_levels[0].value;
@@ -902,32 +869,18 @@ static int sienna_cichlid_set_default_dpm_table(struct smu_context *smu)
static int sienna_cichlid_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
{
struct amdgpu_device *adev = smu->adev;
- int ret = 0;
+ int i, ret = 0;
- if (enable) {
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ if (adev->vcn.harvest_config & (1 << i))
+ continue;
/* vcn dpm on is a prerequisite for vcn power gate messages */
if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) {
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0, NULL);
+ ret = smu_cmn_send_smc_msg_with_param(smu, enable ?
+ SMU_MSG_PowerUpVcn : SMU_MSG_PowerDownVcn,
+ 0x10000 * i, NULL);
if (ret)
return ret;
- if (adev->vcn.num_vcn_inst > 1) {
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn,
- 0x10000, NULL);
- if (ret)
- return ret;
- }
- }
- } else {
- if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_MM_DPM_PG_BIT)) {
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownVcn, 0, NULL);
- if (ret)
- return ret;
- if (adev->vcn.num_vcn_inst > 1) {
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownVcn,
- 0x10000, NULL);
- if (ret)
- return ret;
- }
}
}
@@ -1170,7 +1123,7 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
* and onwards SMU firmwares.
*/
smu_cmn_get_smc_version(smu, NULL, &smu_version);
- if ((adev->asic_type == CHIP_SIENNA_CICHLID) &&
+ if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) &&
(smu_version < 0x003a2900))
break;
@@ -1937,7 +1890,7 @@ static void sienna_cichlid_dump_od_table(struct smu_context *smu,
od_table->UclkFmax);
smu_cmn_get_smc_version(smu, NULL, &smu_version);
- if (!((adev->asic_type == CHIP_SIENNA_CICHLID) &&
+ if (!((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) &&
(smu_version < 0x003a2900)))
dev_dbg(smu->adev->dev, "OD: VddGfxOffset: %d\n", od_table->VddGfxOffset);
}
@@ -2161,7 +2114,7 @@ static int sienna_cichlid_od_edit_dpm_table(struct smu_context *smu,
* and onwards SMU firmwares.
*/
smu_cmn_get_smc_version(smu, NULL, &smu_version);
- if ((adev->asic_type == CHIP_SIENNA_CICHLID) &&
+ if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) &&
(smu_version < 0x003a2900)) {
dev_err(smu->adev->dev, "OD GFX Voltage offset functionality is supported "
"only by 58.41.0 and onwards SMU firmwares!\n");
@@ -2865,7 +2818,7 @@ static void sienna_cichlid_dump_pptable(struct smu_context *smu)
PPTable_t *pptable = table_context->driver_pptable;
int i;
- if (smu->adev->asic_type == CHIP_BEIGE_GOBY) {
+ if (smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 13)) {
beige_goby_dump_pptable(smu);
return;
}
@@ -3625,7 +3578,7 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
SmuMetrics_V2_t *metrics_v2 =
&(metrics_external.SmuMetrics_V2);
struct amdgpu_device *adev = smu->adev;
- bool use_metrics_v2 = ((adev->asic_type == CHIP_SIENNA_CICHLID) &&
+ bool use_metrics_v2 = ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) &&
(smu->smc_fw_version >= 0x3A4300)) ? true : false;
uint16_t average_gfx_activity;
int ret = 0;
@@ -3706,8 +3659,8 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
gpu_metrics->current_fan_speed = use_metrics_v2 ? metrics_v2->CurrFanSpeed : metrics->CurrFanSpeed;
- if (((adev->asic_type == CHIP_SIENNA_CICHLID) && smu->smc_fw_version > 0x003A1E00) ||
- ((adev->asic_type == CHIP_NAVY_FLOUNDER) && smu->smc_fw_version > 0x00410400)) {
+ if (((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) && smu->smc_fw_version > 0x003A1E00) ||
+ ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 11)) && smu->smc_fw_version > 0x00410400)) {
gpu_metrics->pcie_link_width = use_metrics_v2 ? metrics_v2->PcieWidth : metrics->PcieWidth;
gpu_metrics->pcie_link_speed = link_speed[use_metrics_v2 ? metrics_v2->PcieRate : metrics->PcieRate];
} else {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
index 87b055466a33..28b7c0562b99 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
@@ -90,37 +90,38 @@ int smu_v11_0_init_microcode(struct smu_context *smu)
struct amdgpu_firmware_info *ucode = NULL;
if (amdgpu_sriov_vf(adev) &&
- ((adev->asic_type == CHIP_NAVI12) ||
- (adev->asic_type == CHIP_SIENNA_CICHLID)))
+ ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 9)) ||
+ (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7))))
return 0;
- switch (adev->asic_type) {
- case CHIP_ARCTURUS:
- chip_name = "arcturus";
- break;
- case CHIP_NAVI10:
+ switch (adev->ip_versions[MP1_HWIP][0]) {
+ case IP_VERSION(11, 0, 0):
chip_name = "navi10";
break;
- case CHIP_NAVI14:
+ case IP_VERSION(11, 0, 5):
chip_name = "navi14";
break;
- case CHIP_NAVI12:
+ case IP_VERSION(11, 0, 9):
chip_name = "navi12";
break;
- case CHIP_SIENNA_CICHLID:
+ case IP_VERSION(11, 0, 7):
chip_name = "sienna_cichlid";
break;
- case CHIP_NAVY_FLOUNDER:
+ case IP_VERSION(11, 0, 11):
chip_name = "navy_flounder";
break;
- case CHIP_DIMGREY_CAVEFISH:
+ case IP_VERSION(11, 0, 12):
chip_name = "dimgrey_cavefish";
break;
- case CHIP_BEIGE_GOBY:
+ case IP_VERSION(11, 0, 13):
chip_name = "beige_goby";
break;
+ case IP_VERSION(11, 0, 2):
+ chip_name = "arcturus";
+ break;
default:
- dev_err(adev->dev, "Unsupported ASIC type %d\n", adev->asic_type);
+ dev_err(adev->dev, "Unsupported IP version 0x%x\n",
+ adev->ip_versions[MP1_HWIP][0]);
return -EINVAL;
}
@@ -238,39 +239,40 @@ int smu_v11_0_check_fw_version(struct smu_context *smu)
if (smu->is_apu)
adev->pm.fw_version = smu_version;
- switch (smu->adev->asic_type) {
- case CHIP_ARCTURUS:
- smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_ARCT;
- break;
- case CHIP_NAVI10:
+ switch (adev->ip_versions[MP1_HWIP][0]) {
+ case IP_VERSION(11, 0, 0):
smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV10;
break;
- case CHIP_NAVI12:
+ case IP_VERSION(11, 0, 9):
smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV12;
break;
- case CHIP_NAVI14:
+ case IP_VERSION(11, 0, 5):
smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_NV14;
break;
- case CHIP_SIENNA_CICHLID:
+ case IP_VERSION(11, 0, 7):
smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Sienna_Cichlid;
break;
- case CHIP_NAVY_FLOUNDER:
+ case IP_VERSION(11, 0, 11):
smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Navy_Flounder;
break;
- case CHIP_VANGOGH:
+ case IP_VERSION(11, 5, 0):
smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_VANGOGH;
break;
- case CHIP_DIMGREY_CAVEFISH:
+ case IP_VERSION(11, 0, 12):
smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Dimgrey_Cavefish;
break;
- case CHIP_BEIGE_GOBY:
+ case IP_VERSION(11, 0, 13):
smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Beige_Goby;
break;
- case CHIP_CYAN_SKILLFISH:
+ case IP_VERSION(11, 0, 8):
smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_Cyan_Skillfish;
break;
+ case IP_VERSION(11, 0, 2):
+ smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_ARCT;
+ break;
default:
- dev_err(smu->adev->dev, "smu unsupported asic type:%d.\n", smu->adev->asic_type);
+ dev_err(smu->adev->dev, "smu unsupported IP version: 0x%x.\n",
+ adev->ip_versions[MP1_HWIP][0]);
smu->smc_driver_if_version = SMU11_DRIVER_IF_VERSION_INV;
break;
}
@@ -492,8 +494,9 @@ int smu_v11_0_fini_smc_tables(struct smu_context *smu)
int smu_v11_0_init_power(struct smu_context *smu)
{
+ struct amdgpu_device *adev = smu->adev;
struct smu_power_context *smu_power = &smu->smu_power;
- size_t size = smu->adev->asic_type == CHIP_VANGOGH ?
+ size_t size = adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 5, 0) ?
sizeof(struct smu_11_5_power_context) :
sizeof(struct smu_11_0_power_context);
@@ -750,8 +753,10 @@ int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count)
/* Navy_Flounder/Dimgrey_Cavefish do not support to change
* display num currently
*/
- if (adev->asic_type >= CHIP_NAVY_FLOUNDER &&
- adev->asic_type <= CHIP_BEIGE_GOBY)
+ if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 11) ||
+ adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 5, 0) ||
+ adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 12) ||
+ adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 13))
return 0;
return smu_cmn_send_smc_msg_with_param(smu,
@@ -974,10 +979,16 @@ int smu_v11_0_get_current_power_limit(struct smu_context *smu,
return ret;
}
-int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n)
+int smu_v11_0_set_power_limit(struct smu_context *smu,
+ enum smu_ppt_limit_type limit_type,
+ uint32_t limit)
{
int power_src;
int ret = 0;
+ uint32_t limit_param;
+
+ if (limit_type != SMU_DEFAULT_PPT_LIMIT)
+ return -EINVAL;
if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
dev_err(smu->adev->dev, "Setting new power limit is not supported!\n");
@@ -997,16 +1008,16 @@ int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n)
* BIT 16-23: PowerSource
* BIT 0-15: PowerLimit
*/
- n &= 0xFFFF;
- n |= 0 << 24;
- n |= (power_src) << 16;
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, n, NULL);
+ limit_param = (limit & 0xFFFF);
+ limit_param |= 0 << 24;
+ limit_param |= (power_src) << 16;
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, limit_param, NULL);
if (ret) {
dev_err(smu->adev->dev, "[%s] Set power limit Failed!\n", __func__);
return ret;
}
- smu->current_power_limit = n;
+ smu->current_power_limit = limit;
return 0;
}
@@ -1136,15 +1147,15 @@ int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable)
int ret = 0;
struct amdgpu_device *adev = smu->adev;
- switch (adev->asic_type) {
- case CHIP_NAVI10:
- case CHIP_NAVI14:
- case CHIP_NAVI12:
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
- case CHIP_VANGOGH:
+ switch (adev->ip_versions[MP1_HWIP][0]) {
+ case IP_VERSION(11, 0, 0):
+ case IP_VERSION(11, 0, 5):
+ case IP_VERSION(11, 0, 9):
+ case IP_VERSION(11, 0, 7):
+ case IP_VERSION(11, 0, 11):
+ case IP_VERSION(11, 0, 12):
+ case IP_VERSION(11, 0, 13):
+ case IP_VERSION(11, 5, 0):
if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
return 0;
if (enable)
@@ -1630,11 +1641,11 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
mutex_lock(&smu_baco->mutex);
if (state == SMU_BACO_STATE_ENTER) {
- switch (adev->asic_type) {
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
- case CHIP_DIMGREY_CAVEFISH:
- case CHIP_BEIGE_GOBY:
+ switch (adev->ip_versions[MP1_HWIP][0]) {
+ case IP_VERSION(11, 0, 7):
+ case IP_VERSION(11, 0, 11):
+ case IP_VERSION(11, 0, 12):
+ case IP_VERSION(11, 0, 13):
if (amdgpu_runtime_pm == 2)
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_EnterBaco,
@@ -1649,7 +1660,7 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
default:
if (!ras || !adev->ras_enabled ||
adev->gmc.xgmi.pending_reset) {
- if (adev->asic_type == CHIP_ARCTURUS) {
+ if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 2)) {
data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL_ARCT);
data |= 0x80000000;
WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL_ARCT, data);
@@ -1931,7 +1942,7 @@ int smu_v11_0_set_performance_level(struct smu_context *smu,
* Separate MCLK and SOCCLK soft min/max settings are not allowed
* on Arcturus.
*/
- if (adev->asic_type == CHIP_ARCTURUS) {
+ if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 2)) {
mclk_min = mclk_max = 0;
socclk_min = socclk_max = 0;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
index f6ef0ce6e9e2..421f38e8dada 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
@@ -1386,52 +1386,38 @@ static int vangogh_set_performance_level(struct smu_context *smu,
uint32_t soc_mask, mclk_mask, fclk_mask;
uint32_t vclk_mask = 0, dclk_mask = 0;
+ smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq;
+ smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq;
+
switch (level) {
case AMD_DPM_FORCED_LEVEL_HIGH:
- smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
+ smu->gfx_actual_hard_min_freq = smu->gfx_default_soft_max_freq;
smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
- smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq;
- smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq;
ret = vangogh_force_dpm_limit_value(smu, true);
+ if (ret)
+ return ret;
break;
case AMD_DPM_FORCED_LEVEL_LOW:
smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
- smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
-
- smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq;
- smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq;
+ smu->gfx_actual_soft_max_freq = smu->gfx_default_hard_min_freq;
ret = vangogh_force_dpm_limit_value(smu, false);
+ if (ret)
+ return ret;
break;
case AMD_DPM_FORCED_LEVEL_AUTO:
smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
- smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq;
- smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq;
-
ret = vangogh_unforce_dpm_levels(smu);
- break;
- case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
- smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
- smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
-
- smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq;
- smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq;
-
- ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_SetHardMinGfxClk,
- VANGOGH_UMD_PSTATE_STANDARD_GFXCLK, NULL);
- if (ret)
- return ret;
-
- ret = smu_cmn_send_smc_msg_with_param(smu,
- SMU_MSG_SetSoftMaxGfxClk,
- VANGOGH_UMD_PSTATE_STANDARD_GFXCLK, NULL);
if (ret)
return ret;
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
+ smu->gfx_actual_hard_min_freq = VANGOGH_UMD_PSTATE_STANDARD_GFXCLK;
+ smu->gfx_actual_soft_max_freq = VANGOGH_UMD_PSTATE_STANDARD_GFXCLK;
ret = vangogh_get_profiling_clk_mask(smu, level,
&vclk_mask,
@@ -1446,32 +1432,15 @@ static int vangogh_set_performance_level(struct smu_context *smu,
vangogh_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask);
vangogh_force_clk_levels(smu, SMU_VCLK, 1 << vclk_mask);
vangogh_force_clk_levels(smu, SMU_DCLK, 1 << dclk_mask);
-
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
- smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
-
- smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq;
- smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq;
-
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinVcn,
- VANGOGH_UMD_PSTATE_PEAK_DCLK, NULL);
- if (ret)
- return ret;
-
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxVcn,
- VANGOGH_UMD_PSTATE_PEAK_DCLK, NULL);
- if (ret)
- return ret;
+ smu->gfx_actual_soft_max_freq = smu->gfx_default_hard_min_freq;
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
- smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq;
- smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq;
-
ret = vangogh_get_profiling_clk_mask(smu, level,
NULL,
NULL,
@@ -1484,29 +1453,29 @@ static int vangogh_set_performance_level(struct smu_context *smu,
vangogh_force_clk_levels(smu, SMU_FCLK, 1 << fclk_mask);
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
- smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
- smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
-
- smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq;
- smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq;
-
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk,
- VANGOGH_UMD_PSTATE_PEAK_GFXCLK, NULL);
- if (ret)
- return ret;
+ smu->gfx_actual_hard_min_freq = VANGOGH_UMD_PSTATE_PEAK_GFXCLK;
+ smu->gfx_actual_soft_max_freq = VANGOGH_UMD_PSTATE_PEAK_GFXCLK;
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
- VANGOGH_UMD_PSTATE_PEAK_GFXCLK, NULL);
+ ret = vangogh_set_peak_clock_by_device(smu);
if (ret)
return ret;
-
- ret = vangogh_set_peak_clock_by_device(smu);
break;
case AMD_DPM_FORCED_LEVEL_MANUAL:
case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
default:
- break;
+ return 0;
}
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk,
+ smu->gfx_actual_hard_min_freq, NULL);
+ if (ret)
+ return ret;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
+ smu->gfx_actual_soft_max_freq, NULL);
+ if (ret)
+ return ret;
+
return ret;
}
@@ -2144,11 +2113,12 @@ static int vangogh_get_ppt_limit(struct smu_context *smu,
return 0;
}
-static int vangogh_set_power_limit(struct smu_context *smu, uint32_t ppt_limit)
+static int vangogh_set_power_limit(struct smu_context *smu,
+ enum smu_ppt_limit_type limit_type,
+ uint32_t ppt_limit)
{
struct smu_11_5_power_context *power_context =
- smu->smu_power.power_context;
- uint32_t limit_type = ppt_limit >> 24;
+ smu->smu_power.power_context;
int ret = 0;
if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
index 5019903db492..59a7d276541d 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
@@ -1241,11 +1241,13 @@ static int aldebaran_get_power_limit(struct smu_context *smu,
return 0;
}
-static int aldebaran_set_power_limit(struct smu_context *smu, uint32_t n)
+static int aldebaran_set_power_limit(struct smu_context *smu,
+ enum smu_ppt_limit_type limit_type,
+ uint32_t limit)
{
/* Power limit can be set only through primary die */
if (aldebaran_is_primary(smu))
- return smu_v13_0_set_power_limit(smu, n);
+ return smu_v13_0_set_power_limit(smu, limit_type, limit);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index a0e50f23b1dd..35145db6eedf 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -89,12 +89,13 @@ int smu_v13_0_init_microcode(struct smu_context *smu)
if (amdgpu_sriov_vf(adev))
return 0;
- switch (adev->asic_type) {
- case CHIP_ALDEBARAN:
+ switch (adev->ip_versions[MP1_HWIP][0]) {
+ case IP_VERSION(13, 0, 2):
chip_name = "aldebaran";
break;
default:
- dev_err(adev->dev, "Unsupported ASIC type %d\n", adev->asic_type);
+ dev_err(adev->dev, "Unsupported IP version 0x%x\n",
+ adev->ip_versions[MP1_HWIP][0]);
return -EINVAL;
}
@@ -210,15 +211,17 @@ int smu_v13_0_check_fw_version(struct smu_context *smu)
smu_minor = (smu_version >> 8) & 0xff;
smu_debug = (smu_version >> 0) & 0xff;
- switch (smu->adev->asic_type) {
- case CHIP_ALDEBARAN:
+ switch (smu->adev->ip_versions[MP1_HWIP][0]) {
+ case IP_VERSION(13, 0, 2):
smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_ALDE;
break;
- case CHIP_YELLOW_CARP:
+ case IP_VERSION(13, 0, 1):
+ case IP_VERSION(13, 0, 3):
smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_YELLOW_CARP;
break;
default:
- dev_err(smu->adev->dev, "smu unsupported asic type:%d.\n", smu->adev->asic_type);
+ dev_err(smu->adev->dev, "smu unsupported IP version: 0x%x.\n",
+ smu->adev->ip_versions[MP1_HWIP][0]);
smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_INV;
break;
}
@@ -740,8 +743,9 @@ int smu_v13_0_gfx_off_control(struct smu_context *smu, bool enable)
int ret = 0;
struct amdgpu_device *adev = smu->adev;
- switch (adev->asic_type) {
- case CHIP_YELLOW_CARP:
+ switch (adev->ip_versions[MP1_HWIP][0]) {
+ case IP_VERSION(13, 0, 1):
+ case IP_VERSION(13, 0, 3):
if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
return 0;
if (enable)
@@ -941,22 +945,27 @@ int smu_v13_0_get_current_power_limit(struct smu_context *smu,
return ret;
}
-int smu_v13_0_set_power_limit(struct smu_context *smu, uint32_t n)
+int smu_v13_0_set_power_limit(struct smu_context *smu,
+ enum smu_ppt_limit_type limit_type,
+ uint32_t limit)
{
int ret = 0;
+ if (limit_type != SMU_DEFAULT_PPT_LIMIT)
+ return -EINVAL;
+
if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
dev_err(smu->adev->dev, "Setting new power limit is not supported!\n");
return -EOPNOTSUPP;
}
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, n, NULL);
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, limit, NULL);
if (ret) {
dev_err(smu->adev->dev, "[%s] Set power limit Failed!\n", __func__);
return ret;
}
- smu->current_power_limit = n;
+ smu->current_power_limit = limit;
return 0;
}
diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
index 8c2ab3d653b7..0562bdaac00c 100644
--- a/drivers/gpu/drm/arm/malidp_planes.c
+++ b/drivers/gpu/drm/arm/malidp_planes.c
@@ -165,7 +165,7 @@ bool malidp_format_mod_supported(struct drm_device *drm,
return !malidp_hw_format_is_afbc_only(format);
}
- if ((modifier >> 56) != DRM_FORMAT_MOD_VENDOR_ARM) {
+ if (!fourcc_mod_is_vendor(modifier, ARM)) {
DRM_ERROR("Unknown modifier (not Arm)\n");
return false;
}
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index 711f0cca6f9c..147abf1a3968 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -338,7 +338,7 @@ int armada_gem_pwrite_ioctl(struct drm_device *dev, void *data,
struct drm_armada_gem_pwrite *args = data;
struct armada_gem_object *dobj;
char __user *ptr;
- int ret;
+ int ret = 0;
DRM_DEBUG_DRIVER("handle %u off %u size %u ptr 0x%llx\n",
args->handle, args->offset, args->size, args->ptr);
@@ -351,9 +351,8 @@ int armada_gem_pwrite_ioctl(struct drm_device *dev, void *data,
if (!access_ok(ptr, args->size))
return -EFAULT;
- ret = fault_in_pages_readable(ptr, args->size);
- if (ret)
- return ret;
+ if (fault_in_readable(ptr, args->size))
+ return -EFAULT;
dobj = armada_gem_object_lookup(file, args->handle);
if (dobj == NULL)
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 39ca338eb80b..2cfce7dc95af 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -158,8 +158,6 @@ struct ast_private {
uint32_t dram_type;
uint32_t mclk;
- int fb_mtrr;
-
struct drm_plane primary_plane;
struct ast_cursor_plane cursor_plane;
struct drm_crtc crtc;
diff --git a/drivers/gpu/drm/ast/ast_mm.c b/drivers/gpu/drm/ast/ast_mm.c
index 7592f1b9e1f1..6e999408dda9 100644
--- a/drivers/gpu/drm/ast/ast_mm.c
+++ b/drivers/gpu/drm/ast/ast_mm.c
@@ -74,35 +74,28 @@ static u32 ast_get_vram_size(struct ast_private *ast)
return vram_size;
}
-static void ast_mm_release(struct drm_device *dev, void *ptr)
-{
- struct ast_private *ast = to_ast_private(dev);
- struct pci_dev *pdev = to_pci_dev(dev->dev);
-
- arch_phys_wc_del(ast->fb_mtrr);
- arch_io_free_memtype_wc(pci_resource_start(pdev, 0),
- pci_resource_len(pdev, 0));
-}
-
int ast_mm_init(struct ast_private *ast)
{
struct drm_device *dev = &ast->base;
struct pci_dev *pdev = to_pci_dev(dev->dev);
+ resource_size_t base, size;
u32 vram_size;
int ret;
+ base = pci_resource_start(pdev, 0);
+ size = pci_resource_len(pdev, 0);
+
+ /* Don't fail on errors, but performance might be reduced. */
+ devm_arch_io_reserve_memtype_wc(dev->dev, base, size);
+ devm_arch_phys_wc_add(dev->dev, base, size);
+
vram_size = ast_get_vram_size(ast);
- ret = drmm_vram_helper_init(dev, pci_resource_start(pdev, 0), vram_size);
+ ret = drmm_vram_helper_init(dev, base, vram_size);
if (ret) {
drm_err(dev, "Error initializing VRAM MM; %d\n", ret);
return ret;
}
- arch_io_reserve_memtype_wc(pci_resource_start(pdev, 0),
- pci_resource_len(pdev, 0));
- ast->fb_mtrr = arch_phys_wc_add(pci_resource_start(pdev, 0),
- pci_resource_len(pdev, 0));
-
- return drmm_add_action_or_reset(dev, ast_mm_release, NULL);
+ return 0;
}
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 6bfaefa01818..1e30eaeb0e1b 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -1300,18 +1300,6 @@ static enum drm_mode_status ast_mode_valid(struct drm_connector *connector,
return flags;
}
-static enum drm_connector_status ast_connector_detect(struct drm_connector
- *connector, bool force)
-{
- int r;
-
- r = ast_get_modes(connector);
- if (r <= 0)
- return connector_status_disconnected;
-
- return connector_status_connected;
-}
-
static void ast_connector_destroy(struct drm_connector *connector)
{
struct ast_connector *ast_connector = to_ast_connector(connector);
@@ -1327,7 +1315,6 @@ static const struct drm_connector_helper_funcs ast_connector_helper_funcs = {
static const struct drm_connector_funcs ast_connector_funcs = {
.reset = drm_atomic_helper_connector_reset,
- .detect = ast_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = ast_connector_destroy,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
@@ -1355,8 +1342,7 @@ static int ast_connector_init(struct drm_device *dev)
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
- connector->polled = DRM_CONNECTOR_POLL_CONNECT |
- DRM_CONNECTOR_POLL_DISCONNECT;
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
drm_connector_attach_encoder(connector, encoder);
@@ -1425,8 +1411,6 @@ int ast_mode_config_init(struct ast_private *ast)
drm_mode_config_reset(dev);
- drm_kms_helper_poll_init(dev);
-
return 0;
}
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c b/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c
index a20a45c0b353..28d9becc939c 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_cec.c
@@ -1,21 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* adv7511_cec.c - Analog Devices ADV7511/33 cec driver
*
* Copyright 2017 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
*/
#include <linux/device.h>
diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c
index 14d73fb1dd15..1a871f6b6822 100644
--- a/drivers/gpu/drm/bridge/analogix/anx7625.c
+++ b/drivers/gpu/drm/bridge/analogix/anx7625.c
@@ -720,7 +720,7 @@ static int edid_read(struct anx7625_data *ctx,
ret = sp_tx_aux_rd(ctx, 0xf1);
if (ret) {
- sp_tx_rst_aux(ctx);
+ ret = sp_tx_rst_aux(ctx);
DRM_DEV_DEBUG_DRIVER(dev, "edid read fail, reset!\n");
} else {
ret = anx7625_reg_block_read(ctx, ctx->i2c.rx_p0_client,
@@ -735,7 +735,7 @@ static int edid_read(struct anx7625_data *ctx,
if (cnt > EDID_TRY_CNT)
return -EIO;
- return 0;
+ return ret;
}
static int segments_edid_read(struct anx7625_data *ctx,
@@ -785,7 +785,7 @@ static int segments_edid_read(struct anx7625_data *ctx,
if (cnt > EDID_TRY_CNT)
return -EIO;
- return 0;
+ return ret;
}
static int sp_tx_edid_read(struct anx7625_data *ctx,
@@ -845,8 +845,11 @@ static int sp_tx_edid_read(struct anx7625_data *ctx,
if (g_edid_break == 1)
break;
- segments_edid_read(ctx, count / 2,
- pblock_buf, offset);
+ ret = segments_edid_read(ctx, count / 2,
+ pblock_buf, offset);
+ if (ret < 0)
+ return ret;
+
memcpy(&pedid_blocks_buf[edid_pos],
pblock_buf,
MAX_DPCD_BUFFER_SIZE);
@@ -863,8 +866,11 @@ static int sp_tx_edid_read(struct anx7625_data *ctx,
if (g_edid_break == 1)
break;
- segments_edid_read(ctx, count / 2,
- pblock_buf, offset);
+ ret = segments_edid_read(ctx, count / 2,
+ pblock_buf, offset);
+ if (ret < 0)
+ return ret;
+
memcpy(&pedid_blocks_buf[edid_pos],
pblock_buf,
MAX_DPCD_BUFFER_SIZE);
@@ -887,7 +893,11 @@ static int sp_tx_edid_read(struct anx7625_data *ctx,
}
/* Reset aux channel */
- sp_tx_rst_aux(ctx);
+ ret = sp_tx_rst_aux(ctx);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "Failed to reset aux channel!\n");
+ return ret;
+ }
return (blocks_num + 1);
}
@@ -1325,7 +1335,6 @@ static int anx7625_attach_dsi(struct anx7625_data *ctx)
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO |
MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
- MIPI_DSI_MODE_NO_EOT_PACKET |
MIPI_DSI_MODE_VIDEO_HSE;
if (mipi_dsi_attach(dsi) < 0) {
diff --git a/drivers/gpu/drm/bridge/cdns-dsi.c b/drivers/gpu/drm/bridge/cdns-dsi.c
index e6e331071a00..d8a15c459b42 100644
--- a/drivers/gpu/drm/bridge/cdns-dsi.c
+++ b/drivers/gpu/drm/bridge/cdns-dsi.c
@@ -1171,7 +1171,6 @@ static int cdns_dsi_drm_probe(struct platform_device *pdev)
{
struct cdns_dsi *dsi;
struct cdns_dsi_input *input;
- struct resource *res;
int ret, irq;
u32 val;
@@ -1183,8 +1182,7 @@ static int cdns_dsi_drm_probe(struct platform_device *pdev)
input = &dsi->input;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dsi->regs = devm_ioremap_resource(&pdev->dev, res);
+ dsi->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dsi->regs))
return PTR_ERR(dsi->regs);
diff --git a/drivers/gpu/drm/bridge/ite-it66121.c b/drivers/gpu/drm/bridge/ite-it66121.c
index 2f2a09adb4bc..06b59b422c69 100644
--- a/drivers/gpu/drm/bridge/ite-it66121.c
+++ b/drivers/gpu/drm/bridge/ite-it66121.c
@@ -889,7 +889,7 @@ unlock:
static int it66121_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- u32 vendor_ids[2], device_ids[2], revision_id;
+ u32 revision_id, vendor_ids[2] = { 0 }, device_ids[2] = { 0 };
struct device_node *ep;
int ret;
struct it66121_ctx *ctx;
@@ -918,11 +918,26 @@ static int it66121_probe(struct i2c_client *client,
return -EINVAL;
ep = of_graph_get_remote_node(dev->of_node, 1, -1);
- if (!ep)
- return -EPROBE_DEFER;
+ if (!ep) {
+ dev_err(ctx->dev, "The endpoint is unconnected\n");
+ return -EINVAL;
+ }
+
+ if (!of_device_is_available(ep)) {
+ of_node_put(ep);
+ dev_err(ctx->dev, "The remote device is disabled\n");
+ return -ENODEV;
+ }
ctx->next_bridge = of_drm_find_bridge(ep);
of_node_put(ep);
+ if (!ctx->next_bridge) {
+ dev_dbg(ctx->dev, "Next bridge not found, deferring probe\n");
+ return -EPROBE_DEFER;
+ }
+
+ if (!ctx->next_bridge)
+ return -EPROBE_DEFER;
i2c_set_clientdata(client, ctx);
mutex_init(&ctx->lock);
diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c
index c916f4b8907e..b32295abd9e7 100644
--- a/drivers/gpu/drm/bridge/panel.c
+++ b/drivers/gpu/drm/bridge/panel.c
@@ -9,6 +9,7 @@
#include <drm/drm_connector.h>
#include <drm/drm_encoder.h>
#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
@@ -332,3 +333,39 @@ struct drm_connector *drm_panel_bridge_connector(struct drm_bridge *bridge)
return &panel_bridge->connector;
}
EXPORT_SYMBOL(drm_panel_bridge_connector);
+
+#ifdef CONFIG_OF
+/**
+ * devm_drm_of_get_bridge - Return next bridge in the chain
+ * @dev: device to tie the bridge lifetime to
+ * @np: device tree node containing encoder output ports
+ * @port: port in the device tree node
+ * @endpoint: endpoint in the device tree node
+ *
+ * Given a DT node's port and endpoint number, finds the connected node
+ * and returns the associated bridge if any, or creates and returns a
+ * drm panel bridge instance if a panel is connected.
+ *
+ * Returns a pointer to the bridge if successful, or an error pointer
+ * otherwise.
+ */
+struct drm_bridge *devm_drm_of_get_bridge(struct device *dev,
+ struct device_node *np,
+ u32 port, u32 endpoint)
+{
+ struct drm_bridge *bridge;
+ struct drm_panel *panel;
+ int ret;
+
+ ret = drm_of_find_panel_or_bridge(np, port, endpoint,
+ &panel, &bridge);
+ if (ret)
+ return ERR_PTR(ret);
+
+ if (panel)
+ bridge = devm_drm_panel_bridge_add(dev, panel);
+
+ return bridge;
+}
+EXPORT_SYMBOL(devm_drm_of_get_bridge);
+#endif
diff --git a/drivers/gpu/drm/bridge/parade-ps8640.c b/drivers/gpu/drm/bridge/parade-ps8640.c
index 7bd0affa057a..3aaa90913bf8 100644
--- a/drivers/gpu/drm/bridge/parade-ps8640.c
+++ b/drivers/gpu/drm/bridge/parade-ps8640.c
@@ -9,25 +9,58 @@
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/of_graph.h>
+#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <drm/drm_bridge.h>
+#include <drm/drm_dp_helper.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
#include <drm/drm_print.h>
+#define PAGE0_AUXCH_CFG3 0x76
+#define AUXCH_CFG3_RESET 0xff
+#define PAGE0_SWAUX_ADDR_7_0 0x7d
+#define PAGE0_SWAUX_ADDR_15_8 0x7e
+#define PAGE0_SWAUX_ADDR_23_16 0x7f
+#define SWAUX_ADDR_MASK GENMASK(19, 0)
+#define PAGE0_SWAUX_LENGTH 0x80
+#define SWAUX_LENGTH_MASK GENMASK(3, 0)
+#define SWAUX_NO_PAYLOAD BIT(7)
+#define PAGE0_SWAUX_WDATA 0x81
+#define PAGE0_SWAUX_RDATA 0x82
+#define PAGE0_SWAUX_CTRL 0x83
+#define SWAUX_SEND BIT(0)
+#define PAGE0_SWAUX_STATUS 0x84
+#define SWAUX_M_MASK GENMASK(4, 0)
+#define SWAUX_STATUS_MASK GENMASK(7, 5)
+#define SWAUX_STATUS_NACK (0x1 << 5)
+#define SWAUX_STATUS_DEFER (0x2 << 5)
+#define SWAUX_STATUS_ACKM (0x3 << 5)
+#define SWAUX_STATUS_INVALID (0x4 << 5)
+#define SWAUX_STATUS_I2C_NACK (0x5 << 5)
+#define SWAUX_STATUS_I2C_DEFER (0x6 << 5)
+#define SWAUX_STATUS_TIMEOUT (0x7 << 5)
+
#define PAGE2_GPIO_H 0xa7
-#define PS_GPIO9 BIT(1)
+#define PS_GPIO9 BIT(1)
#define PAGE2_I2C_BYPASS 0xea
-#define I2C_BYPASS_EN 0xd0
+#define I2C_BYPASS_EN 0xd0
#define PAGE2_MCS_EN 0xf3
-#define MCS_EN BIT(0)
+#define MCS_EN BIT(0)
+
#define PAGE3_SET_ADD 0xfe
-#define VDO_CTL_ADD 0x13
-#define VDO_DIS 0x18
-#define VDO_EN 0x1c
-#define DP_NUM_LANES 4
+#define VDO_CTL_ADD 0x13
+#define VDO_DIS 0x18
+#define VDO_EN 0x1c
+
+#define NUM_MIPI_LANES 4
+
+#define COMMON_PS8640_REGMAP_CONFIG \
+ .reg_bits = 8, \
+ .val_bits = 8, \
+ .cache_type = REGCACHE_NONE
/*
* PS8640 uses multiple addresses:
@@ -60,29 +93,197 @@ enum ps8640_vdo_control {
struct ps8640 {
struct drm_bridge bridge;
struct drm_bridge *panel_bridge;
+ struct drm_dp_aux aux;
struct mipi_dsi_device *dsi;
struct i2c_client *page[MAX_DEVS];
+ struct regmap *regmap[MAX_DEVS];
struct regulator_bulk_data supplies[2];
struct gpio_desc *gpio_reset;
struct gpio_desc *gpio_powerdown;
bool powered;
};
+static const struct regmap_config ps8640_regmap_config[] = {
+ [PAGE0_DP_CNTL] = {
+ COMMON_PS8640_REGMAP_CONFIG,
+ .max_register = 0xbf,
+ },
+ [PAGE1_VDO_BDG] = {
+ COMMON_PS8640_REGMAP_CONFIG,
+ .max_register = 0xff,
+ },
+ [PAGE2_TOP_CNTL] = {
+ COMMON_PS8640_REGMAP_CONFIG,
+ .max_register = 0xff,
+ },
+ [PAGE3_DSI_CNTL1] = {
+ COMMON_PS8640_REGMAP_CONFIG,
+ .max_register = 0xff,
+ },
+ [PAGE4_MIPI_PHY] = {
+ COMMON_PS8640_REGMAP_CONFIG,
+ .max_register = 0xff,
+ },
+ [PAGE5_VPLL] = {
+ COMMON_PS8640_REGMAP_CONFIG,
+ .max_register = 0x7f,
+ },
+ [PAGE6_DSI_CNTL2] = {
+ COMMON_PS8640_REGMAP_CONFIG,
+ .max_register = 0xff,
+ },
+ [PAGE7_SPI_CNTL] = {
+ COMMON_PS8640_REGMAP_CONFIG,
+ .max_register = 0xff,
+ },
+};
+
static inline struct ps8640 *bridge_to_ps8640(struct drm_bridge *e)
{
return container_of(e, struct ps8640, bridge);
}
+static inline struct ps8640 *aux_to_ps8640(struct drm_dp_aux *aux)
+{
+ return container_of(aux, struct ps8640, aux);
+}
+
+static ssize_t ps8640_aux_transfer(struct drm_dp_aux *aux,
+ struct drm_dp_aux_msg *msg)
+{
+ struct ps8640 *ps_bridge = aux_to_ps8640(aux);
+ struct regmap *map = ps_bridge->regmap[PAGE0_DP_CNTL];
+ struct device *dev = &ps_bridge->page[PAGE0_DP_CNTL]->dev;
+ unsigned int len = msg->size;
+ unsigned int data;
+ unsigned int base;
+ int ret;
+ u8 request = msg->request &
+ ~(DP_AUX_I2C_MOT | DP_AUX_I2C_WRITE_STATUS_UPDATE);
+ u8 *buf = msg->buffer;
+ u8 addr_len[PAGE0_SWAUX_LENGTH + 1 - PAGE0_SWAUX_ADDR_7_0];
+ u8 i;
+ bool is_native_aux = false;
+
+ if (len > DP_AUX_MAX_PAYLOAD_BYTES)
+ return -EINVAL;
+
+ if (msg->address & ~SWAUX_ADDR_MASK)
+ return -EINVAL;
+
+ switch (request) {
+ case DP_AUX_NATIVE_WRITE:
+ case DP_AUX_NATIVE_READ:
+ is_native_aux = true;
+ fallthrough;
+ case DP_AUX_I2C_WRITE:
+ case DP_AUX_I2C_READ:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = regmap_write(map, PAGE0_AUXCH_CFG3, AUXCH_CFG3_RESET);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to write PAGE0_AUXCH_CFG3: %d\n",
+ ret);
+ return ret;
+ }
+
+ /* Assume it's good */
+ msg->reply = 0;
+
+ base = PAGE0_SWAUX_ADDR_7_0;
+ addr_len[PAGE0_SWAUX_ADDR_7_0 - base] = msg->address;
+ addr_len[PAGE0_SWAUX_ADDR_15_8 - base] = msg->address >> 8;
+ addr_len[PAGE0_SWAUX_ADDR_23_16 - base] = (msg->address >> 16) |
+ (msg->request << 4);
+ addr_len[PAGE0_SWAUX_LENGTH - base] = (len == 0) ? SWAUX_NO_PAYLOAD :
+ ((len - 1) & SWAUX_LENGTH_MASK);
+
+ regmap_bulk_write(map, PAGE0_SWAUX_ADDR_7_0, addr_len,
+ ARRAY_SIZE(addr_len));
+
+ if (len && (request == DP_AUX_NATIVE_WRITE ||
+ request == DP_AUX_I2C_WRITE)) {
+ /* Write to the internal FIFO buffer */
+ for (i = 0; i < len; i++) {
+ ret = regmap_write(map, PAGE0_SWAUX_WDATA, buf[i]);
+ if (ret) {
+ DRM_DEV_ERROR(dev,
+ "failed to write WDATA: %d\n",
+ ret);
+ return ret;
+ }
+ }
+ }
+
+ regmap_write(map, PAGE0_SWAUX_CTRL, SWAUX_SEND);
+
+ /* Zero delay loop because i2c transactions are slow already */
+ regmap_read_poll_timeout(map, PAGE0_SWAUX_CTRL, data,
+ !(data & SWAUX_SEND), 0, 50 * 1000);
+
+ regmap_read(map, PAGE0_SWAUX_STATUS, &data);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to read PAGE0_SWAUX_STATUS: %d\n",
+ ret);
+ return ret;
+ }
+
+ switch (data & SWAUX_STATUS_MASK) {
+ /* Ignore the DEFER cases as they are already handled in hardware */
+ case SWAUX_STATUS_NACK:
+ case SWAUX_STATUS_I2C_NACK:
+ /*
+ * The programming guide is not clear about whether a I2C NACK
+ * would trigger SWAUX_STATUS_NACK or SWAUX_STATUS_I2C_NACK. So
+ * we handle both cases together.
+ */
+ if (is_native_aux)
+ msg->reply |= DP_AUX_NATIVE_REPLY_NACK;
+ else
+ msg->reply |= DP_AUX_I2C_REPLY_NACK;
+
+ fallthrough;
+ case SWAUX_STATUS_ACKM:
+ len = data & SWAUX_M_MASK;
+ break;
+ case SWAUX_STATUS_INVALID:
+ return -EOPNOTSUPP;
+ case SWAUX_STATUS_TIMEOUT:
+ return -ETIMEDOUT;
+ }
+
+ if (len && (request == DP_AUX_NATIVE_READ ||
+ request == DP_AUX_I2C_READ)) {
+ /* Read from the internal FIFO buffer */
+ for (i = 0; i < len; i++) {
+ ret = regmap_read(map, PAGE0_SWAUX_RDATA, &data);
+ if (ret) {
+ DRM_DEV_ERROR(dev,
+ "failed to read RDATA: %d\n",
+ ret);
+ return ret;
+ }
+
+ buf[i] = data;
+ }
+ }
+
+ return len;
+}
+
static int ps8640_bridge_vdo_control(struct ps8640 *ps_bridge,
const enum ps8640_vdo_control ctrl)
{
- struct i2c_client *client = ps_bridge->page[PAGE3_DSI_CNTL1];
+ struct regmap *map = ps_bridge->regmap[PAGE3_DSI_CNTL1];
u8 vdo_ctrl_buf[] = { VDO_CTL_ADD, ctrl };
int ret;
- ret = i2c_smbus_write_i2c_block_data(client, PAGE3_SET_ADD,
- sizeof(vdo_ctrl_buf),
- vdo_ctrl_buf);
+ ret = regmap_bulk_write(map, PAGE3_SET_ADD,
+ vdo_ctrl_buf, sizeof(vdo_ctrl_buf));
+
if (ret < 0) {
DRM_ERROR("failed to %sable VDO: %d\n",
ctrl == ENABLE ? "en" : "dis", ret);
@@ -94,8 +295,7 @@ static int ps8640_bridge_vdo_control(struct ps8640 *ps_bridge,
static void ps8640_bridge_poweron(struct ps8640 *ps_bridge)
{
- struct i2c_client *client = ps_bridge->page[PAGE2_TOP_CNTL];
- unsigned long timeout;
+ struct regmap *map = ps_bridge->regmap[PAGE2_TOP_CNTL];
int ret, status;
if (ps_bridge->powered)
@@ -119,18 +319,12 @@ static void ps8640_bridge_poweron(struct ps8640 *ps_bridge)
*/
msleep(200);
- timeout = jiffies + msecs_to_jiffies(200) + 1;
-
- while (time_is_after_jiffies(timeout)) {
- status = i2c_smbus_read_byte_data(client, PAGE2_GPIO_H);
- if (status < 0) {
- DRM_ERROR("failed read PAGE2_GPIO_H: %d\n", status);
- goto err_regulators_disable;
- }
- if ((status & PS_GPIO9) == PS_GPIO9)
- break;
+ ret = regmap_read_poll_timeout(map, PAGE2_GPIO_H, status,
+ status & PS_GPIO9, 20 * 1000, 200 * 1000);
- msleep(20);
+ if (ret < 0) {
+ DRM_ERROR("failed read PAGE2_GPIO_H: %d\n", ret);
+ goto err_regulators_disable;
}
msleep(50);
@@ -142,22 +336,15 @@ static void ps8640_bridge_poweron(struct ps8640 *ps_bridge)
* disabled by the manufacturer. Once disabled, all MCS commands are
* ignored by the display interface.
*/
- status = i2c_smbus_read_byte_data(client, PAGE2_MCS_EN);
- if (status < 0) {
- DRM_ERROR("failed read PAGE2_MCS_EN: %d\n", status);
- goto err_regulators_disable;
- }
- ret = i2c_smbus_write_byte_data(client, PAGE2_MCS_EN,
- status & ~MCS_EN);
+ ret = regmap_update_bits(map, PAGE2_MCS_EN, MCS_EN, 0);
if (ret < 0) {
DRM_ERROR("failed write PAGE2_MCS_EN: %d\n", ret);
goto err_regulators_disable;
}
/* Switch access edp panel's edid through i2c */
- ret = i2c_smbus_write_byte_data(client, PAGE2_I2C_BYPASS,
- I2C_BYPASS_EN);
+ ret = regmap_write(map, PAGE2_I2C_BYPASS, I2C_BYPASS_EN);
if (ret < 0) {
DRM_ERROR("failed write PAGE2_I2C_BYPASS: %d\n", ret);
goto err_regulators_disable;
@@ -254,20 +441,35 @@ static int ps8640_bridge_attach(struct drm_bridge *bridge,
dsi->mode_flags = MIPI_DSI_MODE_VIDEO |
MIPI_DSI_MODE_VIDEO_SYNC_PULSE;
dsi->format = MIPI_DSI_FMT_RGB888;
- dsi->lanes = DP_NUM_LANES;
+ dsi->lanes = NUM_MIPI_LANES;
ret = mipi_dsi_attach(dsi);
- if (ret)
+ if (ret) {
+ dev_err(dev, "failed to attach dsi device: %d\n", ret);
goto err_dsi_attach;
+ }
+
+ ret = drm_dp_aux_register(&ps_bridge->aux);
+ if (ret) {
+ dev_err(dev, "failed to register DP AUX channel: %d\n", ret);
+ goto err_aux_register;
+ }
/* Attach the panel-bridge to the dsi bridge */
return drm_bridge_attach(bridge->encoder, ps_bridge->panel_bridge,
&ps_bridge->bridge, flags);
+err_aux_register:
+ mipi_dsi_detach(dsi);
err_dsi_attach:
mipi_dsi_device_unregister(dsi);
return ret;
}
+static void ps8640_bridge_detach(struct drm_bridge *bridge)
+{
+ drm_dp_aux_unregister(&bridge_to_ps8640(bridge)->aux);
+}
+
static struct edid *ps8640_bridge_get_edid(struct drm_bridge *bridge,
struct drm_connector *connector)
{
@@ -304,6 +506,7 @@ static struct edid *ps8640_bridge_get_edid(struct drm_bridge *bridge,
static const struct drm_bridge_funcs ps8640_bridge_funcs = {
.attach = ps8640_bridge_attach,
+ .detach = ps8640_bridge_detach,
.get_edid = ps8640_bridge_get_edid,
.post_disable = ps8640_post_disable,
.pre_enable = ps8640_pre_enable,
@@ -360,19 +563,30 @@ static int ps8640_probe(struct i2c_client *client)
ps_bridge->page[PAGE0_DP_CNTL] = client;
+ ps_bridge->regmap[PAGE0_DP_CNTL] = devm_regmap_init_i2c(client, ps8640_regmap_config);
+ if (IS_ERR(ps_bridge->regmap[PAGE0_DP_CNTL]))
+ return PTR_ERR(ps_bridge->regmap[PAGE0_DP_CNTL]);
+
for (i = 1; i < ARRAY_SIZE(ps_bridge->page); i++) {
ps_bridge->page[i] = devm_i2c_new_dummy_device(&client->dev,
client->adapter,
client->addr + i);
- if (IS_ERR(ps_bridge->page[i])) {
- dev_err(dev, "failed i2c dummy device, address %02x\n",
- client->addr + i);
+ if (IS_ERR(ps_bridge->page[i]))
return PTR_ERR(ps_bridge->page[i]);
- }
+
+ ps_bridge->regmap[i] = devm_regmap_init_i2c(ps_bridge->page[i],
+ ps8640_regmap_config + i);
+ if (IS_ERR(ps_bridge->regmap[i]))
+ return PTR_ERR(ps_bridge->regmap[i]);
}
i2c_set_clientdata(client, ps_bridge);
+ ps_bridge->aux.name = "parade-ps8640-aux";
+ ps_bridge->aux.dev = dev;
+ ps_bridge->aux.transfer = ps8640_aux_transfer;
+ drm_dp_aux_init(&ps_bridge->aux);
+
drm_bridge_add(&ps_bridge->bridge);
return 0;
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c
index 70ab4fbdc23e..c8f44bcb298a 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c
@@ -265,11 +265,9 @@ static int dw_hdmi_cec_probe(struct platform_device *pdev)
/* override the module pointer */
cec->adap->owner = THIS_MODULE;
- ret = devm_add_action(&pdev->dev, dw_hdmi_cec_del, cec);
- if (ret) {
- cec_delete_adapter(cec->adap);
+ ret = devm_add_action_or_reset(&pdev->dev, dw_hdmi_cec_del, cec);
+ if (ret)
return ret;
- }
ret = devm_request_threaded_irq(&pdev->dev, cec->irq,
dw_hdmi_cec_hardirq,
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index 41d48a393e7f..6154bed0af5b 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -615,20 +615,8 @@ static int ti_sn_bridge_connector_get_modes(struct drm_connector *connector)
return drm_bridge_get_modes(pdata->next_bridge, connector);
}
-static enum drm_mode_status
-ti_sn_bridge_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- /* maximum supported resolution is 4K at 60 fps */
- if (mode->clock > 594000)
- return MODE_CLOCK_HIGH;
-
- return MODE_OK;
-}
-
static struct drm_connector_helper_funcs ti_sn_bridge_connector_helper_funcs = {
.get_modes = ti_sn_bridge_connector_get_modes,
- .mode_valid = ti_sn_bridge_connector_mode_valid,
};
static const struct drm_connector_funcs ti_sn_bridge_connector_funcs = {
@@ -766,6 +754,18 @@ static void ti_sn_bridge_detach(struct drm_bridge *bridge)
drm_dp_aux_unregister(&bridge_to_ti_sn65dsi86(bridge)->aux);
}
+static enum drm_mode_status
+ti_sn_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_info *info,
+ const struct drm_display_mode *mode)
+{
+ /* maximum supported resolution is 4K at 60 fps */
+ if (mode->clock > 594000)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
static void ti_sn_bridge_disable(struct drm_bridge *bridge)
{
struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
@@ -1127,6 +1127,7 @@ static void ti_sn_bridge_post_disable(struct drm_bridge *bridge)
static const struct drm_bridge_funcs ti_sn_bridge_funcs = {
.attach = ti_sn_bridge_attach,
.detach = ti_sn_bridge_detach,
+ .mode_valid = ti_sn_bridge_mode_valid,
.pre_enable = ti_sn_bridge_pre_enable,
.enable = ti_sn_bridge_enable,
.disable = ti_sn_bridge_disable,
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index a8ed66751c2d..c96847fc0ebc 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -28,6 +28,7 @@
#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_encoder.h>
+#include <drm/drm_of.h>
#include <drm/drm_print.h>
#include "drm_crtc_internal.h"
@@ -49,12 +50,19 @@
* Chaining multiple bridges to the output of a bridge, or the same bridge to
* the output of different bridges, is not supported.
*
+ * &drm_bridge, like &drm_panel, aren't &drm_mode_object entities like planes,
+ * CRTCs, encoders or connectors and hence are not visible to userspace. They
+ * just provide additional hooks to get the desired output at the end of the
+ * encoder chain.
+ */
+
+/**
+ * DOC: display driver integration
+ *
* Display drivers are responsible for linking encoders with the first bridge
* in the chains. This is done by acquiring the appropriate bridge with
- * of_drm_find_bridge() or drm_of_find_panel_or_bridge(), or creating it for a
- * panel with drm_panel_bridge_add_typed() (or the managed version
- * devm_drm_panel_bridge_add_typed()). Once acquired, the bridge shall be
- * attached to the encoder with a call to drm_bridge_attach().
+ * devm_drm_of_get_bridge(). Once acquired, the bridge shall be attached to the
+ * encoder with a call to drm_bridge_attach().
*
* Bridges are responsible for linking themselves with the next bridge in the
* chain, if any. This is done the same way as for encoders, with the call to
@@ -85,11 +93,63 @@
* helper to create the &drm_connector, or implement it manually on top of the
* connector-related operations exposed by the bridge (see the overview
* documentation of bridge operations for more details).
- *
- * &drm_bridge, like &drm_panel, aren't &drm_mode_object entities like planes,
- * CRTCs, encoders or connectors and hence are not visible to userspace. They
- * just provide additional hooks to get the desired output at the end of the
- * encoder chain.
+ */
+
+/**
+ * DOC: special care dsi
+ *
+ * The interaction between the bridges and other frameworks involved in
+ * the probing of the upstream driver and the bridge driver can be
+ * challenging. Indeed, there's multiple cases that needs to be
+ * considered:
+ *
+ * - The upstream driver doesn't use the component framework and isn't a
+ * MIPI-DSI host. In this case, the bridge driver will probe at some
+ * point and the upstream driver should try to probe again by returning
+ * EPROBE_DEFER as long as the bridge driver hasn't probed.
+ *
+ * - The upstream driver doesn't use the component framework, but is a
+ * MIPI-DSI host. The bridge device uses the MIPI-DCS commands to be
+ * controlled. In this case, the bridge device is a child of the
+ * display device and when it will probe it's assured that the display
+ * device (and MIPI-DSI host) is present. The upstream driver will be
+ * assured that the bridge driver is connected between the
+ * &mipi_dsi_host_ops.attach and &mipi_dsi_host_ops.detach operations.
+ * Therefore, it must run mipi_dsi_host_register() in its probe
+ * function, and then run drm_bridge_attach() in its
+ * &mipi_dsi_host_ops.attach hook.
+ *
+ * - The upstream driver uses the component framework and is a MIPI-DSI
+ * host. The bridge device uses the MIPI-DCS commands to be
+ * controlled. This is the same situation than above, and can run
+ * mipi_dsi_host_register() in either its probe or bind hooks.
+ *
+ * - The upstream driver uses the component framework and is a MIPI-DSI
+ * host. The bridge device uses a separate bus (such as I2C) to be
+ * controlled. In this case, there's no correlation between the probe
+ * of the bridge and upstream drivers, so care must be taken to avoid
+ * an endless EPROBE_DEFER loop, with each driver waiting for the
+ * other to probe.
+ *
+ * The ideal pattern to cover the last item (and all the others in the
+ * MIPI-DSI host driver case) is to split the operations like this:
+ *
+ * - The MIPI-DSI host driver must run mipi_dsi_host_register() in its
+ * probe hook. It will make sure that the MIPI-DSI host sticks around,
+ * and that the driver's bind can be called.
+ *
+ * - In its probe hook, the bridge driver must try to find its MIPI-DSI
+ * host, register as a MIPI-DSI device and attach the MIPI-DSI device
+ * to its host. The bridge driver is now functional.
+ *
+ * - In its &struct mipi_dsi_host_ops.attach hook, the MIPI-DSI host can
+ * now add its component. Its bind hook will now be called and since
+ * the bridge driver is attached and registered, we can now look for
+ * and attach it.
+ *
+ * At this point, we're now certain that both the upstream driver and
+ * the bridge driver are functional and we can't have a deadlock-like
+ * situation when probing.
*/
static DEFINE_MUTEX(bridge_lock);
diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
index 30cc59fe6ef7..f19d9acbe959 100644
--- a/drivers/gpu/drm/drm_cache.c
+++ b/drivers/gpu/drm/drm_cache.c
@@ -31,7 +31,7 @@
#include <linux/dma-buf-map.h>
#include <linux/export.h>
#include <linux/highmem.h>
-#include <linux/mem_encrypt.h>
+#include <linux/cc_platform.h>
#include <xen/xen.h>
#include <drm/drm_cache.h>
@@ -204,7 +204,7 @@ bool drm_need_swiotlb(int dma_bits)
* Enforce dma_alloc_coherent when memory encryption is active as well
* for the same reasons as for Xen paravirtual hosts.
*/
- if (mem_encrypt_active())
+ if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
return true;
for (tmp = iomem_resource.child; tmp; tmp = tmp->sibling)
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 2ba257b1ae20..3bc782b630b9 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -65,6 +65,14 @@
* support can instead use e.g. drm_helper_hpd_irq_event().
*/
+/*
+ * Global connector list for drm_connector_find_by_fwnode().
+ * Note drm_connector_[un]register() first take connector->lock and then
+ * take the connector_list_lock.
+ */
+static DEFINE_MUTEX(connector_list_lock);
+static LIST_HEAD(connector_list);
+
struct drm_conn_prop_enum_list {
int type;
const char *name;
@@ -267,6 +275,7 @@ int drm_connector_init(struct drm_device *dev,
goto out_put_type_id;
}
+ INIT_LIST_HEAD(&connector->global_connector_list_entry);
INIT_LIST_HEAD(&connector->probed_modes);
INIT_LIST_HEAD(&connector->modes);
mutex_init(&connector->mutex);
@@ -474,6 +483,8 @@ void drm_connector_cleanup(struct drm_connector *connector)
drm_mode_object_unregister(dev, &connector->base);
kfree(connector->name);
connector->name = NULL;
+ fwnode_handle_put(connector->fwnode);
+ connector->fwnode = NULL;
spin_lock_irq(&dev->mode_config.connector_list_lock);
list_del(&connector->head);
dev->mode_config.num_connector--;
@@ -532,6 +543,9 @@ int drm_connector_register(struct drm_connector *connector)
/* Let userspace know we have a new connector */
drm_sysfs_hotplug_event(connector->dev);
+ mutex_lock(&connector_list_lock);
+ list_add_tail(&connector->global_connector_list_entry, &connector_list);
+ mutex_unlock(&connector_list_lock);
goto unlock;
err_debugfs:
@@ -560,6 +574,10 @@ void drm_connector_unregister(struct drm_connector *connector)
return;
}
+ mutex_lock(&connector_list_lock);
+ list_del_init(&connector->global_connector_list_entry);
+ mutex_unlock(&connector_list_lock);
+
if (connector->funcs->early_unregister)
connector->funcs->early_unregister(connector);
@@ -1602,7 +1620,7 @@ EXPORT_SYMBOL(drm_mode_create_tv_properties);
* connectors.
*
* Atomic drivers should use drm_connector_attach_scaling_mode_property()
- * instead to correctly assign &drm_connector_state.picture_aspect_ratio
+ * instead to correctly assign &drm_connector_state.scaling_mode
* in the atomic state.
*/
int drm_mode_create_scaling_mode_property(struct drm_device *dev)
@@ -1722,7 +1740,7 @@ EXPORT_SYMBOL(drm_connector_attach_vrr_capable_property);
* @scaling_mode_mask: or'ed mask of BIT(%DRM_MODE_SCALE_\*).
*
* This is used to add support for scaling mode to atomic drivers.
- * The scaling mode will be set to &drm_connector_state.picture_aspect_ratio
+ * The scaling mode will be set to &drm_connector_state.scaling_mode
* and can be used from &drm_connector_helper_funcs->atomic_check for validation.
*
* This is the atomic version of drm_mode_create_scaling_mode_property().
@@ -2543,6 +2561,67 @@ out:
return ret;
}
+/**
+ * drm_connector_find_by_fwnode - Find a connector based on the associated fwnode
+ * @fwnode: fwnode for which to find the matching drm_connector
+ *
+ * This functions looks up a drm_connector based on its associated fwnode. When
+ * a connector is found a reference to the connector is returned. The caller must
+ * call drm_connector_put() to release this reference when it is done with the
+ * connector.
+ *
+ * Returns: A reference to the found connector or an ERR_PTR().
+ */
+struct drm_connector *drm_connector_find_by_fwnode(struct fwnode_handle *fwnode)
+{
+ struct drm_connector *connector, *found = ERR_PTR(-ENODEV);
+
+ if (!fwnode)
+ return ERR_PTR(-ENODEV);
+
+ mutex_lock(&connector_list_lock);
+
+ list_for_each_entry(connector, &connector_list, global_connector_list_entry) {
+ if (connector->fwnode == fwnode ||
+ (connector->fwnode && connector->fwnode->secondary == fwnode)) {
+ drm_connector_get(connector);
+ found = connector;
+ break;
+ }
+ }
+
+ mutex_unlock(&connector_list_lock);
+
+ return found;
+}
+
+/**
+ * drm_connector_oob_hotplug_event - Report out-of-band hotplug event to connector
+ * @connector: connector to report the event on
+ *
+ * On some hardware a hotplug event notification may come from outside the display
+ * driver / device. An example of this is some USB Type-C setups where the hardware
+ * muxes the DisplayPort data and aux-lines but does not pass the altmode HPD
+ * status bit to the GPU's DP HPD pin.
+ *
+ * This function can be used to report these out-of-band events after obtaining
+ * a drm_connector reference through calling drm_connector_find_by_fwnode().
+ */
+void drm_connector_oob_hotplug_event(struct fwnode_handle *connector_fwnode)
+{
+ struct drm_connector *connector;
+
+ connector = drm_connector_find_by_fwnode(connector_fwnode);
+ if (IS_ERR(connector))
+ return;
+
+ if (connector->funcs->oob_hotplug_event)
+ connector->funcs->oob_hotplug_event(connector);
+
+ drm_connector_put(connector);
+}
+EXPORT_SYMBOL(drm_connector_oob_hotplug_event);
+
/**
* DOC: Tile group
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
index edb772947cb4..63279e984342 100644
--- a/drivers/gpu/drm/drm_crtc_internal.h
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -58,6 +58,7 @@ struct drm_property;
struct edid;
struct kref;
struct work_struct;
+struct fwnode_handle;
/* drm_crtc.c */
int drm_mode_crtc_set_obj_prop(struct drm_mode_object *obj,
@@ -186,6 +187,7 @@ int drm_connector_set_obj_prop(struct drm_mode_object *obj,
int drm_connector_create_standard_properties(struct drm_device *dev);
const char *drm_get_connector_force_name(enum drm_connector_force force);
void drm_connector_free_work_fn(struct work_struct *work);
+struct drm_connector *drm_connector_find_by_fwnode(struct fwnode_handle *fwnode);
/* IOCTL */
int drm_connector_property_set_ioctl(struct drm_device *dev,
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 6d0f2c447f3b..4d0d1e8e51fa 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -130,6 +130,20 @@ u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SI
}
EXPORT_SYMBOL(drm_dp_get_adjust_request_pre_emphasis);
+/* DP 2.0 128b/132b */
+u8 drm_dp_get_adjust_tx_ffe_preset(const u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane)
+{
+ int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
+ int s = ((lane & 1) ?
+ DP_ADJUST_TX_FFE_PRESET_LANE1_SHIFT :
+ DP_ADJUST_TX_FFE_PRESET_LANE0_SHIFT);
+ u8 l = dp_link_status(link_status, i);
+
+ return (l >> s) & 0xf;
+}
+EXPORT_SYMBOL(drm_dp_get_adjust_tx_ffe_preset);
+
u8 drm_dp_get_adjust_request_post_cursor(const u8 link_status[DP_LINK_STATUS_SIZE],
unsigned int lane)
{
@@ -207,15 +221,33 @@ EXPORT_SYMBOL(drm_dp_lttpr_link_train_channel_eq_delay);
u8 drm_dp_link_rate_to_bw_code(int link_rate)
{
- /* Spec says link_bw = link_rate / 0.27Gbps */
- return link_rate / 27000;
+ switch (link_rate) {
+ case 1000000:
+ return DP_LINK_BW_10;
+ case 1350000:
+ return DP_LINK_BW_13_5;
+ case 2000000:
+ return DP_LINK_BW_20;
+ default:
+ /* Spec says link_bw = link_rate / 0.27Gbps */
+ return link_rate / 27000;
+ }
}
EXPORT_SYMBOL(drm_dp_link_rate_to_bw_code);
int drm_dp_bw_code_to_link_rate(u8 link_bw)
{
- /* Spec says link_rate = link_bw * 0.27Gbps */
- return link_bw * 27000;
+ switch (link_bw) {
+ case DP_LINK_BW_10:
+ return 1000000;
+ case DP_LINK_BW_13_5:
+ return 1350000;
+ case DP_LINK_BW_20:
+ return 2000000;
+ default:
+ /* Spec says link_rate = link_bw * 0.27Gbps */
+ return link_bw * 27000;
+ }
}
EXPORT_SYMBOL(drm_dp_bw_code_to_link_rate);
@@ -590,7 +622,7 @@ static u8 drm_dp_downstream_port_count(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
static int drm_dp_read_extended_dpcd_caps(struct drm_dp_aux *aux,
u8 dpcd[DP_RECEIVER_CAP_SIZE])
{
- u8 dpcd_ext[6];
+ u8 dpcd_ext[DP_RECEIVER_CAP_SIZE];
int ret;
/*
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 86d13d6bc463..571da0c2f39f 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -3355,6 +3355,10 @@ static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
/**
* drm_dp_update_payload_part1() - Execute payload update part 1
* @mgr: manager to use.
+ * @start_slot: this is the cur slot
+ *
+ * NOTE: start_slot is a temporary workaround for non-atomic drivers,
+ * this will be removed when non-atomic mst helpers are moved out of the helper
*
* This iterates over all proposed virtual channels, and tries to
* allocate space in the link for them. For 0->slots transitions,
@@ -3365,12 +3369,12 @@ static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
* after calling this the driver should generate ACT and payload
* packets.
*/
-int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
+int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr, int start_slot)
{
struct drm_dp_payload req_payload;
struct drm_dp_mst_port *port;
int i, j;
- int cur_slots = 1;
+ int cur_slots = start_slot;
bool skip;
mutex_lock(&mgr->payload_lock);
@@ -4334,10 +4338,6 @@ static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
{
int ret;
- /* max. time slots - one slot for MTP header */
- if (slots > 63)
- return -ENOSPC;
-
vcpi->pbn = pbn;
vcpi->aligned_pbn = slots * mgr->pbn_div;
vcpi->num_slots = slots;
@@ -4510,6 +4510,27 @@ int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
EXPORT_SYMBOL(drm_dp_atomic_release_vcpi_slots);
/**
+ * drm_dp_mst_update_slots() - updates the slot info depending on the DP ecoding format
+ * @mst_state: mst_state to update
+ * @link_encoding_cap: the ecoding format on the link
+ */
+void drm_dp_mst_update_slots(struct drm_dp_mst_topology_state *mst_state, uint8_t link_encoding_cap)
+{
+ if (link_encoding_cap == DP_CAP_ANSI_128B132B) {
+ mst_state->total_avail_slots = 64;
+ mst_state->start_slot = 0;
+ } else {
+ mst_state->total_avail_slots = 63;
+ mst_state->start_slot = 1;
+ }
+
+ DRM_DEBUG_KMS("%s encoding format on mst_state 0x%p\n",
+ (link_encoding_cap == DP_CAP_ANSI_128B132B) ? "128b/132b":"8b/10b",
+ mst_state);
+}
+EXPORT_SYMBOL(drm_dp_mst_update_slots);
+
+/**
* drm_dp_mst_allocate_vcpi() - Allocate a virtual channel
* @mgr: manager for this port
* @port: port to allocate a virtual channel for.
@@ -4540,7 +4561,7 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn, slots);
if (ret) {
- drm_dbg_kms(mgr->dev, "failed to init vcpi slots=%d max=63 ret=%d\n",
+ drm_dbg_kms(mgr->dev, "failed to init vcpi slots=%d ret=%d\n",
DIV_ROUND_UP(pbn, mgr->pbn_div), ret);
drm_dp_mst_topology_put_port(port);
goto out;
@@ -5228,7 +5249,7 @@ drm_dp_mst_atomic_check_vcpi_alloc_limit(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_topology_state *mst_state)
{
struct drm_dp_vcpi_allocation *vcpi;
- int avail_slots = 63, payload_count = 0;
+ int avail_slots = mst_state->total_avail_slots, payload_count = 0;
list_for_each_entry(vcpi, &mst_state->vcpis, next) {
/* Releasing VCPI is always OK-even if the port is gone */
@@ -5257,7 +5278,7 @@ drm_dp_mst_atomic_check_vcpi_alloc_limit(struct drm_dp_mst_topology_mgr *mgr,
}
}
drm_dbg_atomic(mgr->dev, "[MST MGR:%p] mst state %p VCPI avail=%d used=%d\n",
- mgr, mst_state, avail_slots, 63 - avail_slots);
+ mgr, mst_state, avail_slots, mst_state->total_avail_slots - avail_slots);
return 0;
}
@@ -5534,6 +5555,9 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
if (mst_state == NULL)
return -ENOMEM;
+ mst_state->total_avail_slots = 63;
+ mst_state->start_slot = 1;
+
mst_state->mgr = mgr;
INIT_LIST_HEAD(&mst_state->vcpis);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index ea9a79bc9583..12893e7be89b 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -28,6 +28,7 @@
* DEALINGS IN THE SOFTWARE.
*/
+#include <linux/bitfield.h>
#include <linux/hdmi.h>
#include <linux/i2c.h>
#include <linux/kernel.h>
@@ -49,6 +50,11 @@
(((edid)->version > (maj)) || \
((edid)->version == (maj) && (edid)->revision > (min)))
+static int oui(u8 first, u8 second, u8 third)
+{
+ return (first << 16) | (second << 8) | third;
+}
+
#define EDID_EST_TIMINGS 16
#define EDID_STD_TIMINGS 8
#define EDID_DETAILED_TIMINGS 4
@@ -100,122 +106,128 @@ struct detailed_mode_closure {
#define LEVEL_GTF2 2
#define LEVEL_CVT 3
+#define EDID_QUIRK(vend_chr_0, vend_chr_1, vend_chr_2, product_id, _quirks) \
+{ \
+ .panel_id = drm_edid_encode_panel_id(vend_chr_0, vend_chr_1, vend_chr_2, \
+ product_id), \
+ .quirks = _quirks \
+}
+
static const struct edid_quirk {
- char vendor[4];
- int product_id;
+ u32 panel_id;
u32 quirks;
} edid_quirk_list[] = {
/* Acer AL1706 */
- { "ACR", 44358, EDID_QUIRK_PREFER_LARGE_60 },
+ EDID_QUIRK('A', 'C', 'R', 44358, EDID_QUIRK_PREFER_LARGE_60),
/* Acer F51 */
- { "API", 0x7602, EDID_QUIRK_PREFER_LARGE_60 },
+ EDID_QUIRK('A', 'P', 'I', 0x7602, EDID_QUIRK_PREFER_LARGE_60),
/* AEO model 0 reports 8 bpc, but is a 6 bpc panel */
- { "AEO", 0, EDID_QUIRK_FORCE_6BPC },
+ EDID_QUIRK('A', 'E', 'O', 0, EDID_QUIRK_FORCE_6BPC),
/* BOE model on HP Pavilion 15-n233sl reports 8 bpc, but is a 6 bpc panel */
- { "BOE", 0x78b, EDID_QUIRK_FORCE_6BPC },
+ EDID_QUIRK('B', 'O', 'E', 0x78b, EDID_QUIRK_FORCE_6BPC),
/* CPT panel of Asus UX303LA reports 8 bpc, but is a 6 bpc panel */
- { "CPT", 0x17df, EDID_QUIRK_FORCE_6BPC },
+ EDID_QUIRK('C', 'P', 'T', 0x17df, EDID_QUIRK_FORCE_6BPC),
/* SDC panel of Lenovo B50-80 reports 8 bpc, but is a 6 bpc panel */
- { "SDC", 0x3652, EDID_QUIRK_FORCE_6BPC },
+ EDID_QUIRK('S', 'D', 'C', 0x3652, EDID_QUIRK_FORCE_6BPC),
/* BOE model 0x0771 reports 8 bpc, but is a 6 bpc panel */
- { "BOE", 0x0771, EDID_QUIRK_FORCE_6BPC },
+ EDID_QUIRK('B', 'O', 'E', 0x0771, EDID_QUIRK_FORCE_6BPC),
/* Belinea 10 15 55 */
- { "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 },
- { "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 },
+ EDID_QUIRK('M', 'A', 'X', 1516, EDID_QUIRK_PREFER_LARGE_60),
+ EDID_QUIRK('M', 'A', 'X', 0x77e, EDID_QUIRK_PREFER_LARGE_60),
/* Envision Peripherals, Inc. EN-7100e */
- { "EPI", 59264, EDID_QUIRK_135_CLOCK_TOO_HIGH },
+ EDID_QUIRK('E', 'P', 'I', 59264, EDID_QUIRK_135_CLOCK_TOO_HIGH),
/* Envision EN2028 */
- { "EPI", 8232, EDID_QUIRK_PREFER_LARGE_60 },
+ EDID_QUIRK('E', 'P', 'I', 8232, EDID_QUIRK_PREFER_LARGE_60),
/* Funai Electronics PM36B */
- { "FCM", 13600, EDID_QUIRK_PREFER_LARGE_75 |
- EDID_QUIRK_DETAILED_IN_CM },
+ EDID_QUIRK('F', 'C', 'M', 13600, EDID_QUIRK_PREFER_LARGE_75 |
+ EDID_QUIRK_DETAILED_IN_CM),
/* LGD panel of HP zBook 17 G2, eDP 10 bpc, but reports unknown bpc */
- { "LGD", 764, EDID_QUIRK_FORCE_10BPC },
+ EDID_QUIRK('L', 'G', 'D', 764, EDID_QUIRK_FORCE_10BPC),
/* LG Philips LCD LP154W01-A5 */
- { "LPL", 0, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE },
- { "LPL", 0x2a00, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE },
+ EDID_QUIRK('L', 'P', 'L', 0, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE),
+ EDID_QUIRK('L', 'P', 'L', 0x2a00, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE),
/* Samsung SyncMaster 205BW. Note: irony */
- { "SAM", 541, EDID_QUIRK_DETAILED_SYNC_PP },
+ EDID_QUIRK('S', 'A', 'M', 541, EDID_QUIRK_DETAILED_SYNC_PP),
/* Samsung SyncMaster 22[5-6]BW */
- { "SAM", 596, EDID_QUIRK_PREFER_LARGE_60 },
- { "SAM", 638, EDID_QUIRK_PREFER_LARGE_60 },
+ EDID_QUIRK('S', 'A', 'M', 596, EDID_QUIRK_PREFER_LARGE_60),
+ EDID_QUIRK('S', 'A', 'M', 638, EDID_QUIRK_PREFER_LARGE_60),
/* Sony PVM-2541A does up to 12 bpc, but only reports max 8 bpc */
- { "SNY", 0x2541, EDID_QUIRK_FORCE_12BPC },
+ EDID_QUIRK('S', 'N', 'Y', 0x2541, EDID_QUIRK_FORCE_12BPC),
/* ViewSonic VA2026w */
- { "VSC", 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING },
+ EDID_QUIRK('V', 'S', 'C', 5020, EDID_QUIRK_FORCE_REDUCED_BLANKING),
/* Medion MD 30217 PG */
- { "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 },
+ EDID_QUIRK('M', 'E', 'D', 0x7b8, EDID_QUIRK_PREFER_LARGE_75),
/* Lenovo G50 */
- { "SDC", 18514, EDID_QUIRK_FORCE_6BPC },
+ EDID_QUIRK('S', 'D', 'C', 18514, EDID_QUIRK_FORCE_6BPC),
/* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */
- { "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC },
+ EDID_QUIRK('S', 'E', 'C', 0xd033, EDID_QUIRK_FORCE_8BPC),
/* Rotel RSX-1058 forwards sink's EDID but only does HDMI 1.1*/
- { "ETR", 13896, EDID_QUIRK_FORCE_8BPC },
+ EDID_QUIRK('E', 'T', 'R', 13896, EDID_QUIRK_FORCE_8BPC),
/* Valve Index Headset */
- { "VLV", 0x91a8, EDID_QUIRK_NON_DESKTOP },
- { "VLV", 0x91b0, EDID_QUIRK_NON_DESKTOP },
- { "VLV", 0x91b1, EDID_QUIRK_NON_DESKTOP },
- { "VLV", 0x91b2, EDID_QUIRK_NON_DESKTOP },
- { "VLV", 0x91b3, EDID_QUIRK_NON_DESKTOP },
- { "VLV", 0x91b4, EDID_QUIRK_NON_DESKTOP },
- { "VLV", 0x91b5, EDID_QUIRK_NON_DESKTOP },
- { "VLV", 0x91b6, EDID_QUIRK_NON_DESKTOP },
- { "VLV", 0x91b7, EDID_QUIRK_NON_DESKTOP },
- { "VLV", 0x91b8, EDID_QUIRK_NON_DESKTOP },
- { "VLV", 0x91b9, EDID_QUIRK_NON_DESKTOP },
- { "VLV", 0x91ba, EDID_QUIRK_NON_DESKTOP },
- { "VLV", 0x91bb, EDID_QUIRK_NON_DESKTOP },
- { "VLV", 0x91bc, EDID_QUIRK_NON_DESKTOP },
- { "VLV", 0x91bd, EDID_QUIRK_NON_DESKTOP },
- { "VLV", 0x91be, EDID_QUIRK_NON_DESKTOP },
- { "VLV", 0x91bf, EDID_QUIRK_NON_DESKTOP },
+ EDID_QUIRK('V', 'L', 'V', 0x91a8, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('V', 'L', 'V', 0x91b0, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('V', 'L', 'V', 0x91b1, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('V', 'L', 'V', 0x91b2, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('V', 'L', 'V', 0x91b3, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('V', 'L', 'V', 0x91b4, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('V', 'L', 'V', 0x91b5, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('V', 'L', 'V', 0x91b6, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('V', 'L', 'V', 0x91b7, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('V', 'L', 'V', 0x91b8, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('V', 'L', 'V', 0x91b9, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('V', 'L', 'V', 0x91ba, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('V', 'L', 'V', 0x91bb, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('V', 'L', 'V', 0x91bc, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('V', 'L', 'V', 0x91bd, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('V', 'L', 'V', 0x91be, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('V', 'L', 'V', 0x91bf, EDID_QUIRK_NON_DESKTOP),
/* HTC Vive and Vive Pro VR Headsets */
- { "HVR", 0xaa01, EDID_QUIRK_NON_DESKTOP },
- { "HVR", 0xaa02, EDID_QUIRK_NON_DESKTOP },
+ EDID_QUIRK('H', 'V', 'R', 0xaa01, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('H', 'V', 'R', 0xaa02, EDID_QUIRK_NON_DESKTOP),
/* Oculus Rift DK1, DK2, CV1 and Rift S VR Headsets */
- { "OVR", 0x0001, EDID_QUIRK_NON_DESKTOP },
- { "OVR", 0x0003, EDID_QUIRK_NON_DESKTOP },
- { "OVR", 0x0004, EDID_QUIRK_NON_DESKTOP },
- { "OVR", 0x0012, EDID_QUIRK_NON_DESKTOP },
+ EDID_QUIRK('O', 'V', 'R', 0x0001, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('O', 'V', 'R', 0x0003, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('O', 'V', 'R', 0x0004, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('O', 'V', 'R', 0x0012, EDID_QUIRK_NON_DESKTOP),
/* Windows Mixed Reality Headsets */
- { "ACR", 0x7fce, EDID_QUIRK_NON_DESKTOP },
- { "HPN", 0x3515, EDID_QUIRK_NON_DESKTOP },
- { "LEN", 0x0408, EDID_QUIRK_NON_DESKTOP },
- { "LEN", 0xb800, EDID_QUIRK_NON_DESKTOP },
- { "FUJ", 0x1970, EDID_QUIRK_NON_DESKTOP },
- { "DEL", 0x7fce, EDID_QUIRK_NON_DESKTOP },
- { "SEC", 0x144a, EDID_QUIRK_NON_DESKTOP },
- { "AUS", 0xc102, EDID_QUIRK_NON_DESKTOP },
+ EDID_QUIRK('A', 'C', 'R', 0x7fce, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('H', 'P', 'N', 0x3515, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('L', 'E', 'N', 0x0408, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('L', 'E', 'N', 0xb800, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('F', 'U', 'J', 0x1970, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('D', 'E', 'L', 0x7fce, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('S', 'E', 'C', 0x144a, EDID_QUIRK_NON_DESKTOP),
+ EDID_QUIRK('A', 'U', 'S', 0xc102, EDID_QUIRK_NON_DESKTOP),
/* Sony PlayStation VR Headset */
- { "SNY", 0x0704, EDID_QUIRK_NON_DESKTOP },
+ EDID_QUIRK('S', 'N', 'Y', 0x0704, EDID_QUIRK_NON_DESKTOP),
/* Sensics VR Headsets */
- { "SEN", 0x1019, EDID_QUIRK_NON_DESKTOP },
+ EDID_QUIRK('S', 'E', 'N', 0x1019, EDID_QUIRK_NON_DESKTOP),
/* OSVR HDK and HDK2 VR Headsets */
- { "SVR", 0x1019, EDID_QUIRK_NON_DESKTOP },
+ EDID_QUIRK('S', 'V', 'R', 0x1019, EDID_QUIRK_NON_DESKTOP),
};
/*
@@ -1914,6 +1926,45 @@ int drm_add_override_edid_modes(struct drm_connector *connector)
}
EXPORT_SYMBOL(drm_add_override_edid_modes);
+static struct edid *drm_do_get_edid_base_block(struct drm_connector *connector,
+ int (*get_edid_block)(void *data, u8 *buf, unsigned int block,
+ size_t len),
+ void *data)
+{
+ int *null_edid_counter = connector ? &connector->null_edid_counter : NULL;
+ bool *edid_corrupt = connector ? &connector->edid_corrupt : NULL;
+ void *edid;
+ int i;
+
+ edid = kmalloc(EDID_LENGTH, GFP_KERNEL);
+ if (edid == NULL)
+ return NULL;
+
+ /* base block fetch */
+ for (i = 0; i < 4; i++) {
+ if (get_edid_block(data, edid, 0, EDID_LENGTH))
+ goto out;
+ if (drm_edid_block_valid(edid, 0, false, edid_corrupt))
+ break;
+ if (i == 0 && drm_edid_is_zero(edid, EDID_LENGTH)) {
+ if (null_edid_counter)
+ (*null_edid_counter)++;
+ goto carp;
+ }
+ }
+ if (i == 4)
+ goto carp;
+
+ return edid;
+
+carp:
+ if (connector)
+ connector_bad_edid(connector, edid, 1);
+out:
+ kfree(edid);
+ return NULL;
+}
+
/**
* drm_do_get_edid - get EDID data using a custom EDID block read function
* @connector: connector we're probing
@@ -1947,25 +1998,11 @@ struct edid *drm_do_get_edid(struct drm_connector *connector,
if (override)
return override;
- if ((edid = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
+ edid = (u8 *)drm_do_get_edid_base_block(connector, get_edid_block, data);
+ if (!edid)
return NULL;
- /* base block fetch */
- for (i = 0; i < 4; i++) {
- if (get_edid_block(data, edid, 0, EDID_LENGTH))
- goto out;
- if (drm_edid_block_valid(edid, 0, false,
- &connector->edid_corrupt))
- break;
- if (i == 0 && drm_edid_is_zero(edid, EDID_LENGTH)) {
- connector->null_edid_counter++;
- goto carp;
- }
- }
- if (i == 4)
- goto carp;
-
- /* if there's no extensions, we're done */
+ /* if there's no extensions or no connector, we're done */
valid_extensions = edid[0x7e];
if (valid_extensions == 0)
return (struct edid *)edid;
@@ -2019,8 +2056,6 @@ struct edid *drm_do_get_edid(struct drm_connector *connector,
return (struct edid *)edid;
-carp:
- connector_bad_edid(connector, edid, 1);
out:
kfree(edid);
return NULL;
@@ -2069,6 +2104,71 @@ struct edid *drm_get_edid(struct drm_connector *connector,
}
EXPORT_SYMBOL(drm_get_edid);
+static u32 edid_extract_panel_id(const struct edid *edid)
+{
+ /*
+ * We represent the ID as a 32-bit number so it can easily be compared
+ * with "==".
+ *
+ * NOTE that we deal with endianness differently for the top half
+ * of this ID than for the bottom half. The bottom half (the product
+ * id) gets decoded as little endian by the EDID_PRODUCT_ID because
+ * that's how everyone seems to interpret it. The top half (the mfg_id)
+ * gets stored as big endian because that makes
+ * drm_edid_encode_panel_id() and drm_edid_decode_panel_id() easier
+ * to write (it's easier to extract the ASCII). It doesn't really
+ * matter, though, as long as the number here is unique.
+ */
+ return (u32)edid->mfg_id[0] << 24 |
+ (u32)edid->mfg_id[1] << 16 |
+ (u32)EDID_PRODUCT_ID(edid);
+}
+
+/**
+ * drm_edid_get_panel_id - Get a panel's ID through DDC
+ * @adapter: I2C adapter to use for DDC
+ *
+ * This function reads the first block of the EDID of a panel and (assuming
+ * that the EDID is valid) extracts the ID out of it. The ID is a 32-bit value
+ * (16 bits of manufacturer ID and 16 bits of per-manufacturer ID) that's
+ * supposed to be different for each different modem of panel.
+ *
+ * This function is intended to be used during early probing on devices where
+ * more than one panel might be present. Because of its intended use it must
+ * assume that the EDID of the panel is correct, at least as far as the ID
+ * is concerned (in other words, we don't process any overrides here).
+ *
+ * NOTE: it's expected that this function and drm_do_get_edid() will both
+ * be read the EDID, but there is no caching between them. Since we're only
+ * reading the first block, hopefully this extra overhead won't be too big.
+ *
+ * Return: A 32-bit ID that should be different for each make/model of panel.
+ * See the functions drm_edid_encode_panel_id() and
+ * drm_edid_decode_panel_id() for some details on the structure of this
+ * ID.
+ */
+
+u32 drm_edid_get_panel_id(struct i2c_adapter *adapter)
+{
+ struct edid *edid;
+ u32 panel_id;
+
+ edid = drm_do_get_edid_base_block(NULL, drm_do_probe_ddc_edid, adapter);
+
+ /*
+ * There are no manufacturer IDs of 0, so if there is a problem reading
+ * the EDID then we'll just return 0.
+ */
+ if (!edid)
+ return 0;
+
+ panel_id = edid_extract_panel_id(edid);
+ kfree(edid);
+
+ return panel_id;
+}
+EXPORT_SYMBOL(drm_edid_get_panel_id);
+
/**
* drm_get_edid_switcheroo - get EDID data for a vga_switcheroo output
* @connector: connector we're probing
@@ -2113,25 +2213,6 @@ EXPORT_SYMBOL(drm_edid_duplicate);
/*** EDID parsing ***/
/**
- * edid_vendor - match a string against EDID's obfuscated vendor field
- * @edid: EDID to match
- * @vendor: vendor string
- *
- * Returns true if @vendor is in @edid, false otherwise
- */
-static bool edid_vendor(const struct edid *edid, const char *vendor)
-{
- char edid_vendor[3];
-
- edid_vendor[0] = ((edid->mfg_id[0] & 0x7c) >> 2) + '@';
- edid_vendor[1] = (((edid->mfg_id[0] & 0x3) << 3) |
- ((edid->mfg_id[1] & 0xe0) >> 5)) + '@';
- edid_vendor[2] = (edid->mfg_id[1] & 0x1f) + '@';
-
- return !strncmp(edid_vendor, vendor, 3);
-}
-
-/**
* edid_get_quirks - return quirk flags for a given EDID
* @edid: EDID to process
*
@@ -2139,14 +2220,13 @@ static bool edid_vendor(const struct edid *edid, const char *vendor)
*/
static u32 edid_get_quirks(const struct edid *edid)
{
+ u32 panel_id = edid_extract_panel_id(edid);
const struct edid_quirk *quirk;
int i;
for (i = 0; i < ARRAY_SIZE(edid_quirk_list); i++) {
quirk = &edid_quirk_list[i];
-
- if (edid_vendor(edid, quirk->vendor) &&
- (EDID_PRODUCT_ID(edid) == quirk->product_id))
+ if (quirk->panel_id == panel_id)
return quirk->quirks;
}
@@ -4122,32 +4202,24 @@ cea_db_offsets(const u8 *cea, int *start, int *end)
static bool cea_db_is_hdmi_vsdb(const u8 *db)
{
- int hdmi_id;
-
if (cea_db_tag(db) != VENDOR_BLOCK)
return false;
if (cea_db_payload_len(db) < 5)
return false;
- hdmi_id = db[1] | (db[2] << 8) | (db[3] << 16);
-
- return hdmi_id == HDMI_IEEE_OUI;
+ return oui(db[3], db[2], db[1]) == HDMI_IEEE_OUI;
}
static bool cea_db_is_hdmi_forum_vsdb(const u8 *db)
{
- unsigned int oui;
-
if (cea_db_tag(db) != VENDOR_BLOCK)
return false;
if (cea_db_payload_len(db) < 7)
return false;
- oui = db[3] << 16 | db[2] << 8 | db[1];
-
- return oui == HDMI_FORUM_IEEE_OUI;
+ return oui(db[3], db[2], db[1]) == HDMI_FORUM_IEEE_OUI;
}
static bool cea_db_is_vcdb(const u8 *db)
@@ -5157,6 +5229,71 @@ void drm_get_monitor_range(struct drm_connector *connector,
info->monitor_range.max_vfreq);
}
+static void drm_parse_vesa_mso_data(struct drm_connector *connector,
+ const struct displayid_block *block)
+{
+ struct displayid_vesa_vendor_specific_block *vesa =
+ (struct displayid_vesa_vendor_specific_block *)block;
+ struct drm_display_info *info = &connector->display_info;
+
+ if (block->num_bytes < 3) {
+ drm_dbg_kms(connector->dev, "Unexpected vendor block size %u\n",
+ block->num_bytes);
+ return;
+ }
+
+ if (oui(vesa->oui[0], vesa->oui[1], vesa->oui[2]) != VESA_IEEE_OUI)
+ return;
+
+ if (sizeof(*vesa) != sizeof(*block) + block->num_bytes) {
+ drm_dbg_kms(connector->dev, "Unexpected VESA vendor block size\n");
+ return;
+ }
+
+ switch (FIELD_GET(DISPLAYID_VESA_MSO_MODE, vesa->mso)) {
+ default:
+ drm_dbg_kms(connector->dev, "Reserved MSO mode value\n");
+ fallthrough;
+ case 0:
+ info->mso_stream_count = 0;
+ break;
+ case 1:
+ info->mso_stream_count = 2; /* 2 or 4 links */
+ break;
+ case 2:
+ info->mso_stream_count = 4; /* 4 links */
+ break;
+ }
+
+ if (!info->mso_stream_count) {
+ info->mso_pixel_overlap = 0;
+ return;
+ }
+
+ info->mso_pixel_overlap = FIELD_GET(DISPLAYID_VESA_MSO_OVERLAP, vesa->mso);
+ if (info->mso_pixel_overlap > 8) {
+ drm_dbg_kms(connector->dev, "Reserved MSO pixel overlap value %u\n",
+ info->mso_pixel_overlap);
+ info->mso_pixel_overlap = 8;
+ }
+
+ drm_dbg_kms(connector->dev, "MSO stream count %u, pixel overlap %u\n",
+ info->mso_stream_count, info->mso_pixel_overlap);
+}
+
+static void drm_update_mso(struct drm_connector *connector, const struct edid *edid)
+{
+ const struct displayid_block *block;
+ struct displayid_iter iter;
+
+ displayid_iter_edid_begin(edid, &iter);
+ displayid_iter_for_each(block, &iter) {
+ if (block->tag == DATA_BLOCK_2_VENDOR_SPECIFIC)
+ drm_parse_vesa_mso_data(connector, block);
+ }
+ displayid_iter_end(&iter);
+}
+
/* A connector has no EDID information, so we've got no EDID to compute quirks from. Reset
* all of the values which would have been set from EDID
*/
@@ -5180,6 +5317,9 @@ drm_reset_display_info(struct drm_connector *connector)
info->non_desktop = 0;
memset(&info->monitor_range, 0, sizeof(info->monitor_range));
+
+ info->mso_stream_count = 0;
+ info->mso_pixel_overlap = 0;
}
u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edid)
@@ -5258,6 +5398,9 @@ u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edi
info->color_formats |= DRM_COLOR_FORMAT_YCRCB444;
if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB422)
info->color_formats |= DRM_COLOR_FORMAT_YCRCB422;
+
+ drm_update_mso(connector, edid);
+
return quirks;
}
diff --git a/drivers/gpu/drm/drm_format_helper.c b/drivers/gpu/drm/drm_format_helper.c
index 5231104b1498..69fde60e36b3 100644
--- a/drivers/gpu/drm/drm_format_helper.c
+++ b/drivers/gpu/drm/drm_format_helper.c
@@ -135,6 +135,56 @@ void drm_fb_swab(void *dst, void *src, struct drm_framebuffer *fb,
}
EXPORT_SYMBOL(drm_fb_swab);
+static void drm_fb_xrgb8888_to_rgb332_line(u8 *dbuf, __le32 *sbuf, unsigned int pixels)
+{
+ unsigned int x;
+ u32 pix;
+
+ for (x = 0; x < pixels; x++) {
+ pix = le32_to_cpu(sbuf[x]);
+ dbuf[x] = ((pix & 0x00e00000) >> 16) |
+ ((pix & 0x0000e000) >> 11) |
+ ((pix & 0x000000c0) >> 6);
+ }
+}
+
+/**
+ * drm_fb_xrgb8888_to_rgb332 - Convert XRGB8888 to RGB332 clip buffer
+ * @dst: RGB332 destination buffer
+ * @src: XRGB8888 source buffer
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ *
+ * Drivers can use this function for RGB332 devices that don't natively support XRGB8888.
+ *
+ * This function does not apply clipping on dst, i.e. the destination is a small buffer
+ * containing the clip rect only.
+ */
+void drm_fb_xrgb8888_to_rgb332(void *dst, void *src, struct drm_framebuffer *fb,
+ struct drm_rect *clip)
+{
+ size_t width = drm_rect_width(clip);
+ size_t src_len = width * sizeof(u32);
+ unsigned int y;
+ void *sbuf;
+
+ /* Use a buffer to speed up access on buffers with uncached read mapping (i.e. WC) */
+ sbuf = kmalloc(src_len, GFP_KERNEL);
+ if (!sbuf)
+ return;
+
+ src += clip_offset(clip, fb->pitches[0], sizeof(u32));
+ for (y = 0; y < drm_rect_height(clip); y++) {
+ memcpy(sbuf, src, src_len);
+ drm_fb_xrgb8888_to_rgb332_line(dst, sbuf, width);
+ src += fb->pitches[0];
+ dst += width;
+ }
+
+ kfree(sbuf);
+}
+EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb332);
+
static void drm_fb_xrgb8888_to_rgb565_line(u16 *dbuf, u32 *sbuf,
unsigned int pixels,
bool swab)
@@ -251,6 +301,44 @@ static void drm_fb_xrgb8888_to_rgb888_line(u8 *dbuf, u32 *sbuf,
}
/**
+ * drm_fb_xrgb8888_to_rgb888 - Convert XRGB8888 to RGB888 clip buffer
+ * @dst: RGB888 destination buffer
+ * @src: XRGB8888 source buffer
+ * @fb: DRM framebuffer
+ * @clip: Clip rectangle area to copy
+ *
+ * Drivers can use this function for RGB888 devices that don't natively
+ * support XRGB8888.
+ *
+ * This function does not apply clipping on dst, i.e. the destination
+ * is a small buffer containing the clip rect only.
+ */
+void drm_fb_xrgb8888_to_rgb888(void *dst, void *src, struct drm_framebuffer *fb,
+ struct drm_rect *clip)
+{
+ size_t width = drm_rect_width(clip);
+ size_t src_len = width * sizeof(u32);
+ unsigned int y;
+ void *sbuf;
+
+ /* Use a buffer to speed up access on buffers with uncached read mapping (i.e. WC) */
+ sbuf = kmalloc(src_len, GFP_KERNEL);
+ if (!sbuf)
+ return;
+
+ src += clip_offset(clip, fb->pitches[0], sizeof(u32));
+ for (y = 0; y < drm_rect_height(clip); y++) {
+ memcpy(sbuf, src, src_len);
+ drm_fb_xrgb8888_to_rgb888_line(dst, sbuf, width);
+ src += fb->pitches[0];
+ dst += width * 3;
+ }
+
+ kfree(sbuf);
+}
+EXPORT_SYMBOL(drm_fb_xrgb8888_to_rgb888);
+
+/**
* drm_fb_xrgb8888_to_rgb888_dstclip - Convert XRGB8888 to RGB888 clip buffer
* @dst: RGB565 destination buffer (iomem)
* @dst_pitch: destination buffer pitch
diff --git a/drivers/gpu/drm/drm_fourcc.c b/drivers/gpu/drm/drm_fourcc.c
index eda832f9200d..25837b1d6639 100644
--- a/drivers/gpu/drm/drm_fourcc.c
+++ b/drivers/gpu/drm/drm_fourcc.c
@@ -133,6 +133,9 @@ const struct drm_format_info *__drm_format_info(u32 format)
{
static const struct drm_format_info formats[] = {
{ .format = DRM_FORMAT_C8, .depth = 8, .num_planes = 1, .cpp = { 1, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_R8, .depth = 8, .num_planes = 1, .cpp = { 1, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_R10, .depth = 10, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
+ { .format = DRM_FORMAT_R12, .depth = 12, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_RGB332, .depth = 8, .num_planes = 1, .cpp = { 1, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_BGR233, .depth = 8, .num_planes = 1, .cpp = { 1, 0, 0 }, .hsub = 1, .vsub = 1 },
{ .format = DRM_FORMAT_XRGB4444, .depth = 0, .num_planes = 1, .cpp = { 2, 0, 0 }, .hsub = 1, .vsub = 1 },
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index b25736baba49..7b9f69f21f1e 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -10,6 +10,10 @@
#include <linux/slab.h>
#include <linux/vmalloc.h>
+#ifdef CONFIG_X86
+#include <asm/set_memory.h>
+#endif
+
#include <drm/drm.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
@@ -164,6 +168,16 @@ static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem)
return PTR_ERR(pages);
}
+ /*
+ * TODO: Allocating WC pages which are correctly flushed is only
+ * supported on x86. Ideal solution would be a GFP_WC flag, which also
+ * ttm_pool.c could use.
+ */
+#ifdef CONFIG_X86
+ if (shmem->map_wc)
+ set_pages_array_wc(pages, obj->size >> PAGE_SHIFT);
+#endif
+
shmem->pages = pages;
return 0;
@@ -205,6 +219,11 @@ static void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
if (--shmem->pages_use_count > 0)
return;
+#ifdef CONFIG_X86
+ if (shmem->map_wc)
+ set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT);
+#endif
+
drm_gem_put_pages(obj, shmem->pages,
shmem->pages_mark_dirty_on_put,
shmem->pages_mark_accessed_on_put);
@@ -544,7 +563,7 @@ static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
} else {
page = shmem->pages[page_offset];
- ret = vmf_insert_page(vma, vmf->address, page);
+ ret = vmf_insert_pfn(vma, vmf->address, page_to_pfn(page));
}
mutex_unlock(&shmem->pages_lock);
@@ -614,7 +633,7 @@ int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
return ret;
}
- vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
+ vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND;
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
if (shmem->map_wc)
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c
index 43cf7e887d1a..bfa386b98134 100644
--- a/drivers/gpu/drm/drm_gem_vram_helper.c
+++ b/drivers/gpu/drm/drm_gem_vram_helper.c
@@ -846,7 +846,6 @@ static const struct drm_gem_object_funcs drm_gem_vram_object_funcs = {
static void bo_driver_ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *tt)
{
- ttm_tt_destroy_common(bdev, tt);
ttm_tt_fini(tt);
kfree(tt);
}
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index be4a52dc4d6f..8b8744dcf691 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -522,19 +522,7 @@ int drm_version(struct drm_device *dev, void *data,
return err;
}
-/**
- * drm_ioctl_permit - Check ioctl permissions against caller
- *
- * @flags: ioctl permission flags.
- * @file_priv: Pointer to struct drm_file identifying the caller.
- *
- * Checks whether the caller is allowed to run an ioctl with the
- * indicated permissions.
- *
- * Returns:
- * Zero if allowed, -EACCES otherwise.
- */
-int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
+static int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
{
/* ROOT_ONLY is only for CAP_SYS_ADMIN */
if (unlikely((flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)))
@@ -557,7 +545,6 @@ int drm_ioctl_permit(u32 flags, struct drm_file *file_priv)
return 0;
}
-EXPORT_SYMBOL(drm_ioctl_permit);
#define DRM_IOCTL_DEF(ioctl, _func, _flags) \
[DRM_IOCTL_NR(ioctl)] = { \
@@ -725,7 +712,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_MODE_REVOKE_LEASE, drm_mode_revoke_lease_ioctl, DRM_MASTER),
};
-#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
+#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE(drm_ioctls)
/**
* DOC: driver specific ioctls
@@ -834,8 +821,8 @@ long drm_ioctl(struct file *filp,
if (drm_dev_is_unplugged(dev))
return -ENODEV;
- if (DRM_IOCTL_TYPE(cmd) != DRM_IOCTL_BASE)
- return -ENOTTY;
+ if (DRM_IOCTL_TYPE(cmd) != DRM_IOCTL_BASE)
+ return -ENOTTY;
is_driver_ioctl = nr >= DRM_COMMAND_BASE && nr < DRM_COMMAND_END;
diff --git a/drivers/gpu/drm/drm_kms_helper_common.c b/drivers/gpu/drm/drm_kms_helper_common.c
index f933da1656eb..47e92400548d 100644
--- a/drivers/gpu/drm/drm_kms_helper_common.c
+++ b/drivers/gpu/drm/drm_kms_helper_common.c
@@ -64,17 +64,6 @@ MODULE_PARM_DESC(edid_firmware,
static int __init drm_kms_helper_init(void)
{
- /*
- * The Kconfig DRM_KMS_HELPER selects FRAMEBUFFER_CONSOLE (if !EXPERT)
- * but the module doesn't depend on any fb console symbols. At least
- * attempt to load fbcon to avoid leaving the system without a usable
- * console.
- */
- if (IS_ENABLED(CONFIG_DRM_FBDEV_EMULATION) &&
- IS_MODULE(CONFIG_FRAMEBUFFER_CONSOLE) &&
- !IS_ENABLED(CONFIG_EXPERT))
- request_module_nowait("fbcon");
-
return drm_dp_aux_dev_init();
}
diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c
index dee4f24a1808..d72c2fac0ff1 100644
--- a/drivers/gpu/drm/drm_lease.c
+++ b/drivers/gpu/drm/drm_lease.c
@@ -489,12 +489,6 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EOPNOTSUPP;
- /* need some objects */
- if (cl->object_count == 0) {
- DRM_DEBUG_LEASE("no objects in lease\n");
- return -EINVAL;
- }
-
if (cl->flags && (cl->flags & ~(O_CLOEXEC | O_NONBLOCK))) {
DRM_DEBUG_LEASE("invalid flags\n");
return -EINVAL;
@@ -510,23 +504,26 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
object_count = cl->object_count;
- object_ids = memdup_user(u64_to_user_ptr(cl->object_ids),
- array_size(object_count, sizeof(__u32)));
- if (IS_ERR(object_ids)) {
- ret = PTR_ERR(object_ids);
- goto out_lessor;
- }
-
+ /* Handle leased objects, if any */
idr_init(&leases);
+ if (object_count != 0) {
+ object_ids = memdup_user(u64_to_user_ptr(cl->object_ids),
+ array_size(object_count, sizeof(__u32)));
+ if (IS_ERR(object_ids)) {
+ ret = PTR_ERR(object_ids);
+ idr_destroy(&leases);
+ goto out_lessor;
+ }
- /* fill and validate the object idr */
- ret = fill_object_idr(dev, lessor_priv, &leases,
- object_count, object_ids);
- kfree(object_ids);
- if (ret) {
- DRM_DEBUG_LEASE("lease object lookup failed: %i\n", ret);
- idr_destroy(&leases);
- goto out_lessor;
+ /* fill and validate the object idr */
+ ret = fill_object_idr(dev, lessor_priv, &leases,
+ object_count, object_ids);
+ kfree(object_ids);
+ if (ret) {
+ DRM_DEBUG_LEASE("lease object lookup failed: %i\n", ret);
+ idr_destroy(&leases);
+ goto out_lessor;
+ }
}
/* Allocate a file descriptor for the lease */
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index 5dd475e82995..18cef04df2f2 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -246,6 +246,52 @@ void mipi_dsi_device_unregister(struct mipi_dsi_device *dsi)
}
EXPORT_SYMBOL(mipi_dsi_device_unregister);
+static void devm_mipi_dsi_device_unregister(void *arg)
+{
+ struct mipi_dsi_device *dsi = arg;
+
+ mipi_dsi_device_unregister(dsi);
+}
+
+/**
+ * devm_mipi_dsi_device_register_full - create a managed MIPI DSI device
+ * @dev: device to tie the MIPI-DSI device lifetime to
+ * @host: DSI host to which this device is connected
+ * @info: pointer to template containing DSI device information
+ *
+ * Create a MIPI DSI device by using the device information provided by
+ * mipi_dsi_device_info template
+ *
+ * This is the managed version of mipi_dsi_device_register_full() which
+ * automatically calls mipi_dsi_device_unregister() when @dev is
+ * unbound.
+ *
+ * Returns:
+ * A pointer to the newly created MIPI DSI device, or, a pointer encoded
+ * with an error
+ */
+struct mipi_dsi_device *
+devm_mipi_dsi_device_register_full(struct device *dev,
+ struct mipi_dsi_host *host,
+ const struct mipi_dsi_device_info *info)
+{
+ struct mipi_dsi_device *dsi;
+ int ret;
+
+ dsi = mipi_dsi_device_register_full(host, info);
+ if (IS_ERR(dsi))
+ return dsi;
+
+ ret = devm_add_action_or_reset(dev,
+ devm_mipi_dsi_device_unregister,
+ dsi);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return dsi;
+}
+EXPORT_SYMBOL_GPL(devm_mipi_dsi_device_register_full);
+
static DEFINE_MUTEX(host_lock);
static LIST_HEAD(host_list);
@@ -345,6 +391,41 @@ int mipi_dsi_detach(struct mipi_dsi_device *dsi)
}
EXPORT_SYMBOL(mipi_dsi_detach);
+static void devm_mipi_dsi_detach(void *arg)
+{
+ struct mipi_dsi_device *dsi = arg;
+
+ mipi_dsi_detach(dsi);
+}
+
+/**
+ * devm_mipi_dsi_attach - Attach a MIPI-DSI device to its DSI Host
+ * @dev: device to tie the MIPI-DSI device attachment lifetime to
+ * @dsi: DSI peripheral
+ *
+ * This is the managed version of mipi_dsi_attach() which automatically
+ * calls mipi_dsi_detach() when @dev is unbound.
+ *
+ * Returns:
+ * 0 on success, a negative error code on failure.
+ */
+int devm_mipi_dsi_attach(struct device *dev,
+ struct mipi_dsi_device *dsi)
+{
+ int ret;
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev, devm_mipi_dsi_detach, dsi);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(devm_mipi_dsi_attach);
+
static ssize_t mipi_dsi_device_transfer(struct mipi_dsi_device *dsi,
struct mipi_dsi_msg *msg)
{
diff --git a/drivers/gpu/drm/drm_modeset_lock.c b/drivers/gpu/drm/drm_modeset_lock.c
index fcfe1a03c4a1..bf8a6e823a15 100644
--- a/drivers/gpu/drm/drm_modeset_lock.c
+++ b/drivers/gpu/drm/drm_modeset_lock.c
@@ -248,7 +248,7 @@ static inline int modeset_lock(struct drm_modeset_lock *lock,
if (ctx->trylock_only) {
lockdep_assert_held(&ctx->ww_ctx);
- if (!ww_mutex_trylock(&lock->mutex))
+ if (!ww_mutex_trylock(&lock->mutex, NULL))
return -EBUSY;
else
return 0;
diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c
index 997b8827fed2..37c34146eea8 100644
--- a/drivers/gpu/drm/drm_of.c
+++ b/drivers/gpu/drm/drm_of.c
@@ -231,6 +231,9 @@ EXPORT_SYMBOL_GPL(drm_of_encoder_active_endpoint);
* return either the associated struct drm_panel or drm_bridge device. Either
* @panel or @bridge must not be NULL.
*
+ * This function is deprecated and should not be used in new drivers. Use
+ * devm_drm_of_get_bridge() instead.
+ *
* Returns zero if successful, or one of the standard error codes if it fails.
*/
int drm_of_find_panel_or_bridge(const struct device_node *np,
diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c
index f6bdec7fa925..a9359878f4ed 100644
--- a/drivers/gpu/drm/drm_panel_orientation_quirks.c
+++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c
@@ -109,6 +109,12 @@ static const struct drm_dmi_panel_orientation_data lcd1200x1920_rightside_up = {
.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
};
+static const struct drm_dmi_panel_orientation_data lcd1280x1920_rightside_up = {
+ .width = 1280,
+ .height = 1920,
+ .orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
+};
+
static const struct dmi_system_id orientation_data[] = {
{ /* Acer One 10 (S1003) */
.matches = {
@@ -134,6 +140,26 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T103HAF"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
+ }, { /* AYA NEO 2021 */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYADEVICE"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "AYA NEO 2021"),
+ },
+ .driver_data = (void *)&lcd800x1280_rightside_up,
+ }, { /* Chuwi HiBook (CWI514) */
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
+ DMI_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
+ /* Above matches are too generic, add bios-date match */
+ DMI_MATCH(DMI_BIOS_DATE, "05/07/2016"),
+ },
+ .driver_data = (void *)&lcd1200x1920_rightside_up,
+ }, { /* Chuwi Hi10 Pro (CWI529) */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Hampoo"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Hi10 pro tablet"),
+ },
+ .driver_data = (void *)&lcd1200x1920_rightside_up,
}, { /* GPD MicroPC (generic strings, also match on bios date) */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Default string"),
@@ -185,6 +211,12 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_BOARD_NAME, "Default string"),
},
.driver_data = (void *)&gpd_win2,
+ }, { /* GPD Win 3 */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "GPD"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "G1618-03")
+ },
+ .driver_data = (void *)&lcd720x1280_rightside_up,
}, { /* I.T.Works TW891 */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "To be filled by O.E.M."),
@@ -193,6 +225,13 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_BOARD_NAME, "TW891"),
},
.driver_data = (void *)&itworks_tw891,
+ }, { /* KD Kurio Smart C15200 2-in-1 */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "KD Interactive"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Kurio Smart"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "KDM960BCP"),
+ },
+ .driver_data = (void *)&lcd800x1280_rightside_up,
}, { /*
* Lenovo Ideapad Miix 310 laptop, only some production batches
* have a portrait screen, the resolution checks makes the quirk
@@ -211,10 +250,15 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo MIIX 320-10ICR"),
},
.driver_data = (void *)&lcd800x1280_rightside_up,
- }, { /* Lenovo Ideapad D330 */
+ }, { /* Lenovo Ideapad D330-10IGM (HD) */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGM"),
+ },
+ .driver_data = (void *)&lcd800x1280_rightside_up,
+ }, { /* Lenovo Ideapad D330-10IGM (FHD) */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "81H3"),
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad D330-10IGM"),
},
.driver_data = (void *)&lcd1200x1920_rightside_up,
@@ -225,6 +269,19 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Default string"),
},
.driver_data = (void *)&onegx1_pro,
+ }, { /* Samsung GalaxyBook 10.6 */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Galaxy Book 10.6"),
+ },
+ .driver_data = (void *)&lcd1280x1920_rightside_up,
+ }, { /* Valve Steam Deck */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Valve"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Jupiter"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "1"),
+ },
+ .driver_data = (void *)&lcd800x1280_rightside_up,
}, { /* VIOS LTH17 */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "VIOS"),
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index 5606bca3caa8..61d5c57f23e1 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -795,6 +795,86 @@ void drm_kms_helper_poll_fini(struct drm_device *dev)
}
EXPORT_SYMBOL(drm_kms_helper_poll_fini);
+static bool check_connector_changed(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ enum drm_connector_status old_status;
+ u64 old_epoch_counter;
+
+ /* Only handle HPD capable connectors. */
+ drm_WARN_ON(dev, !(connector->polled & DRM_CONNECTOR_POLL_HPD));
+
+ drm_WARN_ON(dev, !mutex_is_locked(&dev->mode_config.mutex));
+
+ old_status = connector->status;
+ old_epoch_counter = connector->epoch_counter;
+ connector->status = drm_helper_probe_detect(connector, NULL, false);
+
+ if (old_epoch_counter == connector->epoch_counter) {
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Same epoch counter %llu\n",
+ connector->base.id,
+ connector->name,
+ connector->epoch_counter);
+
+ return false;
+ }
+
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] status updated from %s to %s\n",
+ connector->base.id,
+ connector->name,
+ drm_get_connector_status_name(old_status),
+ drm_get_connector_status_name(connector->status));
+
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Changed epoch counter %llu => %llu\n",
+ connector->base.id,
+ connector->name,
+ old_epoch_counter,
+ connector->epoch_counter);
+
+ return true;
+}
+
+/**
+ * drm_connector_helper_hpd_irq_event - hotplug processing
+ * @connector: drm_connector
+ *
+ * Drivers can use this helper function to run a detect cycle on a connector
+ * which has the DRM_CONNECTOR_POLL_HPD flag set in its &polled member.
+ *
+ * This helper function is useful for drivers which can track hotplug
+ * interrupts for a single connector. Drivers that want to send a
+ * hotplug event for all connectors or can't track hotplug interrupts
+ * per connector need to use drm_helper_hpd_irq_event().
+ *
+ * This function must be called from process context with no mode
+ * setting locks held.
+ *
+ * Note that a connector can be both polled and probed from the hotplug
+ * handler, in case the hotplug interrupt is known to be unreliable.
+ *
+ * Returns:
+ * A boolean indicating whether the connector status changed or not
+ */
+bool drm_connector_helper_hpd_irq_event(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ bool changed;
+
+ mutex_lock(&dev->mode_config.mutex);
+ changed = check_connector_changed(connector);
+ mutex_unlock(&dev->mode_config.mutex);
+
+ if (changed) {
+ drm_kms_helper_hotplug_event(dev);
+ drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Sent hotplug event\n",
+ connector->base.id,
+ connector->name);
+ }
+
+ return changed;
+}
+EXPORT_SYMBOL(drm_connector_helper_hpd_irq_event);
+
/**
* drm_helper_hpd_irq_event - hotplug processing
* @dev: drm_device
@@ -808,23 +888,25 @@ EXPORT_SYMBOL(drm_kms_helper_poll_fini);
* interrupts for each connector.
*
* Drivers which support hotplug interrupts for each connector individually and
- * which have a more fine-grained detect logic should bypass this code and
- * directly call drm_kms_helper_hotplug_event() in case the connector state
- * changed.
+ * which have a more fine-grained detect logic can use
+ * drm_connector_helper_hpd_irq_event(). Alternatively, they should bypass this
+ * code and directly call drm_kms_helper_hotplug_event() in case the connector
+ * state changed.
*
* This function must be called from process context with no mode
* setting locks held.
*
* Note that a connector can be both polled and probed from the hotplug handler,
* in case the hotplug interrupt is known to be unreliable.
+ *
+ * Returns:
+ * A boolean indicating whether the connector status changed or not
*/
bool drm_helper_hpd_irq_event(struct drm_device *dev)
{
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
- enum drm_connector_status old_status;
bool changed = false;
- u64 old_epoch_counter;
if (!dev->mode_config.poll_enabled)
return false;
@@ -836,33 +918,8 @@ bool drm_helper_hpd_irq_event(struct drm_device *dev)
if (!(connector->polled & DRM_CONNECTOR_POLL_HPD))
continue;
- old_status = connector->status;
-
- old_epoch_counter = connector->epoch_counter;
-
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Old epoch counter %llu\n", connector->base.id,
- connector->name,
- old_epoch_counter);
-
- connector->status = drm_helper_probe_detect(connector, NULL, false);
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
- connector->base.id,
- connector->name,
- drm_get_connector_status_name(old_status),
- drm_get_connector_status_name(connector->status));
-
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] New epoch counter %llu\n",
- connector->base.id,
- connector->name,
- connector->epoch_counter);
-
- /*
- * Check if epoch counter had changed, meaning that we need
- * to send a uevent.
- */
- if (old_epoch_counter != connector->epoch_counter)
+ if (check_connector_changed(connector))
changed = true;
-
}
drm_connector_list_iter_end(&conn_iter);
mutex_unlock(&dev->mode_config.mutex);
diff --git a/drivers/gpu/drm/drm_property.c b/drivers/gpu/drm/drm_property.c
index 6c353c9dc772..dfec479830e4 100644
--- a/drivers/gpu/drm/drm_property.c
+++ b/drivers/gpu/drm/drm_property.c
@@ -127,8 +127,7 @@ struct drm_property *drm_property_create(struct drm_device *dev,
property->num_values = num_values;
INIT_LIST_HEAD(&property->enum_list);
- strncpy(property->name, name, DRM_PROP_NAME_LEN);
- property->name[DRM_PROP_NAME_LEN-1] = '\0';
+ strscpy_pad(property->name, name, DRM_PROP_NAME_LEN);
list_add_tail(&property->head, &dev->mode_config.property_list);
@@ -421,8 +420,7 @@ int drm_property_add_enum(struct drm_property *property,
if (!prop_enum)
return -ENOMEM;
- strncpy(prop_enum->name, name, DRM_PROP_NAME_LEN);
- prop_enum->name[DRM_PROP_NAME_LEN-1] = '\0';
+ strscpy_pad(prop_enum->name, name, DRM_PROP_NAME_LEN);
prop_enum->value = value;
property->values[index] = value;
@@ -475,8 +473,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
if (!property)
return -ENOENT;
- strncpy(out_resp->name, property->name, DRM_PROP_NAME_LEN);
- out_resp->name[DRM_PROP_NAME_LEN-1] = 0;
+ strscpy_pad(out_resp->name, property->name, DRM_PROP_NAME_LEN);
out_resp->flags = property->flags;
value_count = property->num_values;
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 968a9560b4aa..76ff6ec3421b 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -10,6 +10,7 @@
* Copyright (c) 2003-2004 IBM Corp.
*/
+#include <linux/acpi.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/export.h>
@@ -50,8 +51,45 @@ static struct device_type drm_sysfs_device_minor = {
.name = "drm_minor"
};
+static struct device_type drm_sysfs_device_connector = {
+ .name = "drm_connector",
+};
+
struct class *drm_class;
+#ifdef CONFIG_ACPI
+static bool drm_connector_acpi_bus_match(struct device *dev)
+{
+ return dev->type == &drm_sysfs_device_connector;
+}
+
+static struct acpi_device *drm_connector_acpi_find_companion(struct device *dev)
+{
+ struct drm_connector *connector = to_drm_connector(dev);
+
+ return to_acpi_device_node(connector->fwnode);
+}
+
+static struct acpi_bus_type drm_connector_acpi_bus = {
+ .name = "drm_connector",
+ .match = drm_connector_acpi_bus_match,
+ .find_companion = drm_connector_acpi_find_companion,
+};
+
+static void drm_sysfs_acpi_register(void)
+{
+ register_acpi_bus_type(&drm_connector_acpi_bus);
+}
+
+static void drm_sysfs_acpi_unregister(void)
+{
+ unregister_acpi_bus_type(&drm_connector_acpi_bus);
+}
+#else
+static void drm_sysfs_acpi_register(void) { }
+static void drm_sysfs_acpi_unregister(void) { }
+#endif
+
static char *drm_devnode(struct device *dev, umode_t *mode)
{
return kasprintf(GFP_KERNEL, "dri/%s", dev_name(dev));
@@ -85,6 +123,8 @@ int drm_sysfs_init(void)
}
drm_class->devnode = drm_devnode;
+
+ drm_sysfs_acpi_register();
return 0;
}
@@ -97,11 +137,17 @@ void drm_sysfs_destroy(void)
{
if (IS_ERR_OR_NULL(drm_class))
return;
+ drm_sysfs_acpi_unregister();
class_remove_file(drm_class, &class_attr_version.attr);
class_destroy(drm_class);
drm_class = NULL;
}
+static void drm_sysfs_release(struct device *dev)
+{
+ kfree(dev);
+}
+
/*
* Connector properties
*/
@@ -273,27 +319,47 @@ static const struct attribute_group *connector_dev_groups[] = {
int drm_sysfs_connector_add(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
+ struct device *kdev;
+ int r;
if (connector->kdev)
return 0;
- connector->kdev =
- device_create_with_groups(drm_class, dev->primary->kdev, 0,
- connector, connector_dev_groups,
- "card%d-%s", dev->primary->index,
- connector->name);
+ kdev = kzalloc(sizeof(*kdev), GFP_KERNEL);
+ if (!kdev)
+ return -ENOMEM;
+
+ device_initialize(kdev);
+ kdev->class = drm_class;
+ kdev->type = &drm_sysfs_device_connector;
+ kdev->parent = dev->primary->kdev;
+ kdev->groups = connector_dev_groups;
+ kdev->release = drm_sysfs_release;
+ dev_set_drvdata(kdev, connector);
+
+ r = dev_set_name(kdev, "card%d-%s", dev->primary->index, connector->name);
+ if (r)
+ goto err_free;
+
DRM_DEBUG("adding \"%s\" to sysfs\n",
connector->name);
- if (IS_ERR(connector->kdev)) {
- DRM_ERROR("failed to register connector device: %ld\n", PTR_ERR(connector->kdev));
- return PTR_ERR(connector->kdev);
+ r = device_add(kdev);
+ if (r) {
+ drm_err(dev, "failed to register connector device: %d\n", r);
+ goto err_free;
}
+ connector->kdev = kdev;
+
if (connector->ddc)
return sysfs_create_link(&connector->kdev->kobj,
&connector->ddc->dev.kobj, "ddc");
return 0;
+
+err_free:
+ put_device(kdev);
+ return r;
}
void drm_sysfs_connector_remove(struct drm_connector *connector)
@@ -374,11 +440,6 @@ void drm_sysfs_connector_status_event(struct drm_connector *connector,
}
EXPORT_SYMBOL(drm_sysfs_connector_status_event);
-static void drm_sysfs_release(struct device *dev)
-{
- kfree(dev);
-}
-
struct device *drm_sysfs_minor_alloc(struct drm_minor *minor)
{
const char *minor_str;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index cc5b07f86346..242a5fd8b932 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -1733,7 +1733,6 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
DBG("%s", dev_name(gpu->dev));
- flush_workqueue(gpu->wq);
destroy_workqueue(gpu->wq);
etnaviv_sched_fini(gpu);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
index feb6da1b6ceb..180bb633d5c5 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
@@ -163,6 +163,8 @@ int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity,
if (ret)
goto out_unlock;
+ drm_sched_job_arm(&submit->sched_job);
+
submit->out_fence = dma_fence_get(&submit->sched_job.s_fence->finished);
submit->out_fence_id = idr_alloc_cyclic(&submit->gpu->fence_idr,
submit->out_fence, 0,
@@ -176,7 +178,7 @@ int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity,
/* the scheduler holds on to the job now */
kref_get(&submit->refcount);
- drm_sched_entity_push_job(&submit->sched_job, sched_entity);
+ drm_sched_entity_push_job(&submit->sched_job);
out_unlock:
mutex_unlock(&submit->gpu->fence_lock);
diff --git a/drivers/gpu/drm/gma500/backlight.c b/drivers/gpu/drm/gma500/backlight.c
index 9e90258541a4..46b9c0f13d6d 100644
--- a/drivers/gpu/drm/gma500/backlight.c
+++ b/drivers/gpu/drm/gma500/backlight.c
@@ -16,7 +16,7 @@
#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
static void do_gma_backlight_set(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
backlight_update_status(dev_priv->backlight_device);
}
#endif
@@ -24,7 +24,7 @@ static void do_gma_backlight_set(struct drm_device *dev)
void gma_backlight_enable(struct drm_device *dev)
{
#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
dev_priv->backlight_enabled = true;
if (dev_priv->backlight_device) {
dev_priv->backlight_device->props.brightness = dev_priv->backlight_level;
@@ -36,7 +36,7 @@ void gma_backlight_enable(struct drm_device *dev)
void gma_backlight_disable(struct drm_device *dev)
{
#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
dev_priv->backlight_enabled = false;
if (dev_priv->backlight_device) {
dev_priv->backlight_device->props.brightness = 0;
@@ -48,7 +48,7 @@ void gma_backlight_disable(struct drm_device *dev)
void gma_backlight_set(struct drm_device *dev, int v)
{
#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
dev_priv->backlight_level = v;
if (dev_priv->backlight_device && dev_priv->backlight_enabled) {
dev_priv->backlight_device->props.brightness = v;
@@ -60,7 +60,7 @@ void gma_backlight_set(struct drm_device *dev, int v)
int gma_backlight_init(struct drm_device *dev)
{
#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
dev_priv->backlight_enabled = true;
return dev_priv->ops->backlight_init(dev);
#else
@@ -71,7 +71,7 @@ int gma_backlight_init(struct drm_device *dev)
void gma_backlight_exit(struct drm_device *dev)
{
#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
if (dev_priv->backlight_device) {
dev_priv->backlight_device->props.brightness = 0;
backlight_update_status(dev_priv->backlight_device);
diff --git a/drivers/gpu/drm/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c
index 1342e7fb382f..d7c6cca23e94 100644
--- a/drivers/gpu/drm/gma500/cdv_device.c
+++ b/drivers/gpu/drm/gma500/cdv_device.c
@@ -38,7 +38,7 @@ static void cdv_disable_vga(struct drm_device *dev)
static int cdv_output_init(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
drm_mode_create_scaling_mode_property(dev);
@@ -146,7 +146,7 @@ static const struct backlight_ops cdv_ops = {
static int cdv_backlight_init(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct backlight_properties props;
memset(&props, 0, sizeof(struct backlight_properties));
@@ -206,7 +206,7 @@ static inline void CDV_MSG_WRITE32(int domain, uint port, uint offset,
static void cdv_init_pm(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
u32 pwr_cnt;
int domain = pci_domain_nr(pdev->bus);
@@ -259,7 +259,7 @@ static void cdv_errata(struct drm_device *dev)
*/
static int cdv_save_display_registers(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
struct psb_save_area *regs = &dev_priv->regs;
struct drm_connector *connector;
@@ -314,7 +314,7 @@ static int cdv_save_display_registers(struct drm_device *dev)
*/
static int cdv_restore_display_registers(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
struct psb_save_area *regs = &dev_priv->regs;
struct drm_connector *connector;
@@ -383,7 +383,7 @@ static int cdv_restore_display_registers(struct drm_device *dev)
static int cdv_power_down(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 pwr_cnt, pwr_mask, pwr_sts;
int tries = 5;
@@ -405,7 +405,7 @@ static int cdv_power_down(struct drm_device *dev)
static int cdv_power_up(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 pwr_cnt, pwr_mask, pwr_sts;
int tries = 5;
@@ -429,7 +429,7 @@ static void cdv_hotplug_work_func(struct work_struct *work)
{
struct drm_psb_private *dev_priv = container_of(work, struct drm_psb_private,
hotplug_work);
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->dev;
/* Just fire off a uevent and let userspace tell us what to do */
drm_helper_hpd_irq_event(dev);
@@ -440,7 +440,7 @@ static void cdv_hotplug_work_func(struct work_struct *work)
static int cdv_hotplug_event(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
schedule_work(&dev_priv->hotplug_work);
REG_WRITE(PORT_HOTPLUG_STAT, REG_READ(PORT_HOTPLUG_STAT));
return 1;
@@ -468,7 +468,7 @@ static const char *force_audio_names[] = {
void cdv_intel_attach_force_audio_property(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct drm_property *prop;
int i;
@@ -497,7 +497,7 @@ static const char *broadcast_rgb_names[] = {
void cdv_intel_attach_broadcast_rgb_property(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct drm_property *prop;
int i;
@@ -574,7 +574,7 @@ static const struct psb_offset cdv_regmap[2] = {
static int cdv_chip_setup(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
INIT_WORK(&dev_priv->hotplug_work, cdv_hotplug_work_func);
diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c
index c3a9f6b3c848..94ebc48a4349 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_display.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_display.c
@@ -455,7 +455,7 @@ static bool cdv_intel_find_dp_pll(const struct gma_limit_t *limit,
static bool cdv_intel_pipe_enabled(struct drm_device *dev, int pipe)
{
struct drm_crtc *crtc;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_crtc *gma_crtc = NULL;
crtc = dev_priv->pipe_to_crtc_mapping[pipe];
@@ -489,7 +489,7 @@ void cdv_disable_sr(struct drm_device *dev)
void cdv_update_wm(struct drm_device *dev, struct drm_crtc *crtc)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
/* Is only one pipe enabled? */
@@ -574,7 +574,7 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
@@ -829,7 +829,7 @@ static void i8xx_clock(int refclk, struct gma_clock_t *clock)
static int cdv_intel_crtc_clock_get(struct drm_device *dev,
struct drm_crtc *crtc)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
@@ -910,7 +910,7 @@ struct drm_display_mode *cdv_intel_crtc_mode_get(struct drm_device *dev,
{
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
int pipe = gma_crtc->pipe;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_pipe *p = &dev_priv->regs.pipe[pipe];
const struct psb_offset *map = &dev_priv->regmap[pipe];
struct drm_display_mode *mode;
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index 595b765ecc71..ba6ad1466374 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -515,7 +515,7 @@ cdv_intel_dp_mode_valid(struct drm_connector *connector,
struct cdv_intel_dp *intel_dp = encoder->dev_priv;
int max_link_clock = cdv_intel_dp_link_clock(cdv_intel_dp_max_link_bw(encoder));
int max_lanes = cdv_intel_dp_max_lane_count(encoder);
- struct drm_psb_private *dev_priv = connector->dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(connector->dev);
if (is_edp(encoder) && intel_dp->panel_fixed_mode) {
if (mode->hdisplay > intel_dp->panel_fixed_mode->hdisplay)
@@ -896,7 +896,7 @@ static bool
cdv_intel_dp_mode_fixup(struct drm_encoder *encoder, const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- struct drm_psb_private *dev_priv = encoder->dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(encoder->dev);
struct gma_encoder *intel_encoder = to_gma_encoder(encoder);
struct cdv_intel_dp *intel_dp = intel_encoder->dev_priv;
int lane_count, clock;
@@ -988,7 +988,7 @@ cdv_intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_encoder *encoder;
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
@@ -1744,7 +1744,7 @@ static int cdv_intel_dp_get_modes(struct drm_connector *connector)
if (is_edp(intel_encoder)) {
struct drm_device *dev = connector->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
cdv_intel_edp_panel_vdd_off(intel_encoder);
if (ret) {
@@ -1809,7 +1809,7 @@ cdv_intel_dp_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t val)
{
- struct drm_psb_private *dev_priv = connector->dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(connector->dev);
struct gma_encoder *encoder = gma_attached_encoder(connector);
struct cdv_intel_dp *intel_dp = encoder->dev_priv;
int ret;
@@ -1908,7 +1908,7 @@ static void cdv_intel_dp_add_properties(struct drm_connector *connector)
/* check the VBT to see whether the eDP is on DP-D port */
static bool cdv_intel_dpc_is_edp(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct child_device_config *p_child;
int i;
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
index 8a2219fcf9b4..9e1cdb11023c 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -57,7 +57,7 @@ struct cdv_intel_lvds_priv {
*/
static u32 cdv_intel_lvds_get_max_backlight(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 retval;
if (gma_power_begin(dev, false)) {
@@ -81,7 +81,7 @@ static u32 cdv_intel_lvds_get_max_backlight(struct drm_device *dev)
*/
static void cdv_intel_lvds_set_backlight(struct drm_device *dev, int level)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 blc_pwm_ctl;
if (gma_power_begin(dev, false)) {
@@ -105,7 +105,7 @@ static void cdv_intel_lvds_set_backlight(struct drm_device *dev, int level)
static void cdv_intel_lvds_set_power(struct drm_device *dev,
struct drm_encoder *encoder, bool on)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 pp_status;
if (!gma_power_begin(dev, true))
@@ -154,7 +154,7 @@ static enum drm_mode_status cdv_intel_lvds_mode_valid(struct drm_connector *conn
struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct drm_display_mode *fixed_mode =
dev_priv->mode_dev.panel_fixed_mode;
@@ -180,7 +180,7 @@ static bool cdv_intel_lvds_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
struct drm_encoder *tmp_encoder;
struct drm_display_mode *panel_fixed_mode = mode_dev->panel_fixed_mode;
@@ -227,7 +227,7 @@ static bool cdv_intel_lvds_mode_fixup(struct drm_encoder *encoder,
static void cdv_intel_lvds_prepare(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
if (!gma_power_begin(dev, true))
@@ -245,7 +245,7 @@ static void cdv_intel_lvds_prepare(struct drm_encoder *encoder)
static void cdv_intel_lvds_commit(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
if (mode_dev->backlight_duty_cycle == 0)
@@ -260,7 +260,7 @@ static void cdv_intel_lvds_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_crtc *gma_crtc = to_gma_crtc(encoder->crtc);
u32 pfit_control;
@@ -297,7 +297,7 @@ static void cdv_intel_lvds_mode_set(struct drm_encoder *encoder,
static int cdv_intel_lvds_get_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
int ret;
@@ -428,7 +428,7 @@ static const struct drm_connector_funcs cdv_intel_lvds_connector_funcs = {
static bool lvds_is_present_in_vbt(struct drm_device *dev,
u8 *i2c_pin)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
int i;
if (!dev_priv->child_dev_num)
@@ -486,7 +486,7 @@ void cdv_intel_lvds_init(struct drm_device *dev,
struct drm_encoder *encoder;
struct drm_display_mode *scan;
struct drm_crtc *crtc;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 lvds;
int pipe;
u8 pin;
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 0b8648396fb2..321e416489a9 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -81,7 +81,7 @@ static vm_fault_t psbfb_vm_fault(struct vm_fault *vmf)
struct vm_area_struct *vma = vmf->vma;
struct drm_framebuffer *fb = vma->vm_private_data;
struct drm_device *dev = fb->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gtt_range *gtt = to_gtt_range(fb->obj[0]);
int page_num;
int i;
@@ -261,7 +261,7 @@ static int psbfb_create(struct drm_fb_helper *fb_helper,
struct drm_fb_helper_surface_size *sizes)
{
struct drm_device *dev = fb_helper->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
struct fb_info *info;
struct drm_framebuffer *fb;
@@ -374,7 +374,7 @@ static int psbfb_probe(struct drm_fb_helper *fb_helper,
struct drm_fb_helper_surface_size *sizes)
{
struct drm_device *dev = fb_helper->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
unsigned int fb_size;
int bytespp;
@@ -422,7 +422,7 @@ static int psb_fbdev_destroy(struct drm_device *dev,
int psb_fbdev_init(struct drm_device *dev)
{
struct drm_fb_helper *fb_helper;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
int ret;
fb_helper = kzalloc(sizeof(*fb_helper), GFP_KERNEL);
@@ -457,7 +457,7 @@ free:
static void psb_fbdev_fini(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
if (!dev_priv->fb_helper)
return;
@@ -474,7 +474,7 @@ static const struct drm_mode_config_funcs psb_mode_funcs = {
static void psb_setup_outputs(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct drm_connector *connector;
drm_mode_create_scaling_mode_property(dev);
@@ -533,7 +533,7 @@ static void psb_setup_outputs(struct drm_device *dev)
void psb_modeset_init(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
struct pci_dev *pdev = to_pci_dev(dev->dev);
int i;
@@ -566,7 +566,7 @@ void psb_modeset_init(struct drm_device *dev)
void psb_modeset_cleanup(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
if (dev_priv->modeset) {
drm_kms_helper_poll_fini(dev);
psb_fbdev_fini(dev);
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
index fbf420051ef5..5ae54c9d2819 100644
--- a/drivers/gpu/drm/gma500/gem.c
+++ b/drivers/gpu/drm/gma500/gem.c
@@ -147,7 +147,7 @@ static vm_fault_t psb_gem_fault(struct vm_fault *vmf)
obj = vma->vm_private_data; /* GEM object */
dev = obj->dev;
- dev_priv = dev->dev_private;
+ dev_priv = to_drm_psb_private(dev);
r = container_of(obj, struct gtt_range, gem); /* Get the gtt range */
diff --git a/drivers/gpu/drm/gma500/gma_device.c b/drivers/gpu/drm/gma500/gma_device.c
index 4c91e86f4b14..954f3a275d81 100644
--- a/drivers/gpu/drm/gma500/gma_device.c
+++ b/drivers/gpu/drm/gma500/gma_device.c
@@ -15,7 +15,7 @@ void gma_get_core_freq(struct drm_device *dev)
struct pci_dev *pci_root =
pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus),
0, 0);
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
/*pci_write_config_dword(pci_root, 0xD4, 0x00C32004);*/
/*pci_write_config_dword(pci_root, 0xD0, 0xE0033000);*/
diff --git a/drivers/gpu/drm/gma500/gma_display.c b/drivers/gpu/drm/gma500/gma_display.c
index b03f7b8241f2..cbcecbaa041b 100644
--- a/drivers/gpu/drm/gma500/gma_display.c
+++ b/drivers/gpu/drm/gma500/gma_display.c
@@ -51,7 +51,7 @@ int gma_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
struct drm_framebuffer *fb = crtc->primary->fb;
struct gtt_range *gtt;
@@ -136,7 +136,7 @@ gma_pipe_set_base_exit:
void gma_crtc_load_lut(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe];
int palreg = map->palette;
@@ -189,7 +189,7 @@ int gma_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue,
void gma_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
@@ -324,7 +324,7 @@ int gma_crtc_cursor_set(struct drm_crtc *crtc,
uint32_t width, uint32_t height)
{
struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
int pipe = gma_crtc->pipe;
uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
@@ -553,7 +553,7 @@ int gma_crtc_set_config(struct drm_mode_set *set,
struct drm_modeset_acquire_ctx *ctx)
{
struct drm_device *dev = set->crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
int ret;
if (!dev_priv->rpm_enabled)
@@ -572,7 +572,7 @@ int gma_crtc_set_config(struct drm_mode_set *set,
void gma_crtc_save(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
struct psb_intel_crtc_state *crtc_state = gma_crtc->crtc_state;
const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe];
@@ -615,7 +615,7 @@ void gma_crtc_save(struct drm_crtc *crtc)
void gma_crtc_restore(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
struct psb_intel_crtc_state *crtc_state = gma_crtc->crtc_state;
const struct psb_offset *map = &dev_priv->regmap[gma_crtc->pipe];
diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c
index df9b611b856a..55a2a6919533 100644
--- a/drivers/gpu/drm/gma500/gtt.c
+++ b/drivers/gpu/drm/gma500/gtt.c
@@ -53,7 +53,7 @@ static inline uint32_t psb_gtt_mask_pte(uint32_t pfn, int type)
*/
static u32 __iomem *psb_gtt_entry(struct drm_device *dev, struct gtt_range *r)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
unsigned long offset;
offset = r->resource.start - dev_priv->gtt_mem->start;
@@ -118,7 +118,7 @@ static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r,
*/
static void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 __iomem *gtt_slot;
u32 pte;
int i;
@@ -188,7 +188,7 @@ int psb_gtt_pin(struct gtt_range *gt)
{
int ret = 0;
struct drm_device *dev = gt->gem.dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 gpu_base = dev_priv->gtt.gatt_start;
mutex_lock(&dev_priv->gtt_mutex);
@@ -226,7 +226,7 @@ out:
void psb_gtt_unpin(struct gtt_range *gt)
{
struct drm_device *dev = gt->gem.dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 gpu_base = dev_priv->gtt.gatt_start;
mutex_lock(&dev_priv->gtt_mutex);
@@ -266,7 +266,7 @@ void psb_gtt_unpin(struct gtt_range *gt)
struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len,
const char *name, int backed, u32 align)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gtt_range *gt;
struct resource *r = dev_priv->gtt_mem;
int ret;
@@ -322,13 +322,13 @@ void psb_gtt_free_range(struct drm_device *dev, struct gtt_range *gt)
static void psb_gtt_alloc(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
init_rwsem(&dev_priv->gtt.sem);
}
void psb_gtt_takedown(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
if (dev_priv->gtt_map) {
@@ -347,7 +347,7 @@ void psb_gtt_takedown(struct drm_device *dev)
int psb_gtt_init(struct drm_device *dev, int resume)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
unsigned gtt_pages;
unsigned long stolen_size, vram_stolen_size;
@@ -496,7 +496,7 @@ out_err:
int psb_gtt_restore(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct resource *r = dev_priv->gtt_mem->child;
struct gtt_range *range;
unsigned int restored = 0, total = 0, size = 0;
diff --git a/drivers/gpu/drm/gma500/intel_bios.c b/drivers/gpu/drm/gma500/intel_bios.c
index d838369f0119..d5ca5f241974 100644
--- a/drivers/gpu/drm/gma500/intel_bios.c
+++ b/drivers/gpu/drm/gma500/intel_bios.c
@@ -207,7 +207,7 @@ static void parse_backlight_data(struct drm_psb_private *dev_priv,
lvds_bl = kmemdup(vbt_lvds_bl, sizeof(*vbt_lvds_bl), GFP_KERNEL);
if (!lvds_bl) {
- dev_err(dev_priv->dev->dev, "out of memory for backlight data\n");
+ dev_err(dev_priv->dev.dev, "out of memory for backlight data\n");
return;
}
dev_priv->lvds_bl = lvds_bl;
@@ -248,7 +248,7 @@ static void parse_lfp_panel_data(struct drm_psb_private *dev_priv,
panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode),
GFP_KERNEL);
if (panel_fixed_mode == NULL) {
- dev_err(dev_priv->dev->dev, "out of memory for fixed panel mode\n");
+ dev_err(dev_priv->dev.dev, "out of memory for fixed panel mode\n");
return;
}
@@ -259,7 +259,7 @@ static void parse_lfp_panel_data(struct drm_psb_private *dev_priv,
dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode;
drm_mode_debug_printmodeline(panel_fixed_mode);
} else {
- dev_dbg(dev_priv->dev->dev, "ignoring invalid LVDS VBT\n");
+ dev_dbg(dev_priv->dev.dev, "ignoring invalid LVDS VBT\n");
dev_priv->lvds_vbt = 0;
kfree(panel_fixed_mode);
}
@@ -515,7 +515,7 @@ parse_device_mapping(struct drm_psb_private *dev_priv,
*/
int psb_intel_init_bios(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
struct vbt_header *vbt = NULL;
struct bdb_header *bdb = NULL;
@@ -579,7 +579,7 @@ int psb_intel_init_bios(struct drm_device *dev)
*/
void psb_intel_destroy_bios(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
kfree(dev_priv->sdvo_lvds_vbt_mode);
kfree(dev_priv->lfp_lvds_vbt_mode);
diff --git a/drivers/gpu/drm/gma500/intel_gmbus.c b/drivers/gpu/drm/gma500/intel_gmbus.c
index c17cbafa468a..09cedabf4776 100644
--- a/drivers/gpu/drm/gma500/intel_gmbus.c
+++ b/drivers/gpu/drm/gma500/intel_gmbus.c
@@ -75,7 +75,7 @@ struct intel_gpio {
void
gma_intel_i2c_reset(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
GMBUS_REG_WRITE(GMBUS0, 0);
}
@@ -196,7 +196,7 @@ intel_gpio_create(struct drm_psb_private *dev_priv, u32 pin)
"gma500 GPIO%c", "?BACDE?F"[pin]);
gpio->adapter.owner = THIS_MODULE;
gpio->adapter.algo_data = &gpio->algo;
- gpio->adapter.dev.parent = dev_priv->dev->dev;
+ gpio->adapter.dev.parent = dev_priv->dev.dev;
gpio->algo.setsda = set_data;
gpio->algo.setscl = set_clock;
gpio->algo.getsda = get_data;
@@ -226,7 +226,7 @@ intel_i2c_quirk_xfer(struct drm_psb_private *dev_priv,
adapter);
int ret;
- gma_intel_i2c_reset(dev_priv->dev);
+ gma_intel_i2c_reset(&dev_priv->dev);
intel_i2c_quirk_set(dev_priv, true);
set_data(gpio, 1);
@@ -394,7 +394,7 @@ int gma_intel_setup_gmbus(struct drm_device *dev)
"reserved",
"dpd",
};
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
int ret, i;
dev_priv->gmbus = kcalloc(GMBUS_NUM_PORTS, sizeof(struct intel_gmbus),
@@ -432,7 +432,7 @@ int gma_intel_setup_gmbus(struct drm_device *dev)
bus->force_bit = intel_gpio_create(dev_priv, i);
}
- gma_intel_i2c_reset(dev_priv->dev);
+ gma_intel_i2c_reset(&dev_priv->dev);
return 0;
@@ -480,7 +480,7 @@ void gma_intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
void gma_intel_teardown_gmbus(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
int i;
if (dev_priv->gmbus == NULL)
diff --git a/drivers/gpu/drm/gma500/mid_bios.c b/drivers/gpu/drm/gma500/mid_bios.c
index 68e787924ed0..7e76790c6a81 100644
--- a/drivers/gpu/drm/gma500/mid_bios.c
+++ b/drivers/gpu/drm/gma500/mid_bios.c
@@ -18,7 +18,7 @@
static void mid_get_fuse_settings(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
struct pci_dev *pci_root =
pci_get_domain_bus_and_slot(pci_domain_nr(pdev->bus),
@@ -94,7 +94,7 @@ static void mid_get_fuse_settings(struct drm_device *dev)
static void mid_get_pci_revID(struct drm_psb_private *dev_priv)
{
uint32_t platform_rev_id = 0;
- struct pci_dev *pdev = to_pci_dev(dev_priv->dev->dev);
+ struct pci_dev *pdev = to_pci_dev(dev_priv->dev.dev);
int domain = pci_domain_nr(pdev->bus);
struct pci_dev *pci_gfx_root =
pci_get_domain_bus_and_slot(domain, 0, PCI_DEVFN(2, 0));
@@ -106,8 +106,7 @@ static void mid_get_pci_revID(struct drm_psb_private *dev_priv)
pci_read_config_dword(pci_gfx_root, 0x08, &platform_rev_id);
dev_priv->platform_rev_id = (uint8_t) platform_rev_id;
pci_dev_put(pci_gfx_root);
- dev_dbg(dev_priv->dev->dev, "platform_rev_id is %x\n",
- dev_priv->platform_rev_id);
+ dev_dbg(dev_priv->dev.dev, "platform_rev_id is %x\n", dev_priv->platform_rev_id);
}
struct mid_vbt_header {
@@ -270,7 +269,7 @@ out:
static void mid_get_vbt_data(struct drm_psb_private *dev_priv)
{
- struct drm_device *dev = dev_priv->dev;
+ struct drm_device *dev = &dev_priv->dev;
struct pci_dev *pdev = to_pci_dev(dev->dev);
u32 addr;
u8 __iomem *vbt_virtual;
@@ -325,7 +324,7 @@ out:
int mid_chip_setup(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
mid_get_fuse_settings(dev);
mid_get_vbt_data(dev_priv);
mid_get_pci_revID(dev_priv);
diff --git a/drivers/gpu/drm/gma500/mmu.c b/drivers/gpu/drm/gma500/mmu.c
index d856580b8111..fe9ace2a7967 100644
--- a/drivers/gpu/drm/gma500/mmu.c
+++ b/drivers/gpu/drm/gma500/mmu.c
@@ -66,7 +66,7 @@ static inline void psb_mmu_clflush(struct psb_mmu_driver *driver, void *addr)
static void psb_mmu_flush_pd_locked(struct psb_mmu_driver *driver, int force)
{
struct drm_device *dev = driver->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
if (atomic_read(&driver->needs_tlbflush) || force) {
uint32_t val = PSB_RSGX32(PSB_CR_BIF_CTRL);
@@ -94,7 +94,7 @@ static void psb_mmu_flush_pd(struct psb_mmu_driver *driver, int force)
void psb_mmu_flush(struct psb_mmu_driver *driver)
{
struct drm_device *dev = driver->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
uint32_t val;
down_write(&driver->sem);
@@ -120,7 +120,7 @@ void psb_mmu_flush(struct psb_mmu_driver *driver)
void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context)
{
struct drm_device *dev = pd->driver->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
uint32_t offset = (hw_context == 0) ? PSB_CR_BIF_DIR_LIST_BASE0 :
PSB_CR_BIF_DIR_LIST_BASE1 + hw_context * 4;
@@ -230,7 +230,7 @@ void psb_mmu_free_pagedir(struct psb_mmu_pd *pd)
{
struct psb_mmu_driver *driver = pd->driver;
struct drm_device *dev = driver->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_mmu_pt *pt;
int i;
@@ -409,7 +409,7 @@ struct psb_mmu_pd *psb_mmu_get_default_pd(struct psb_mmu_driver *driver)
void psb_mmu_driver_takedown(struct psb_mmu_driver *driver)
{
struct drm_device *dev = driver->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
PSB_WSGX32(driver->bif_ctrl, PSB_CR_BIF_CTRL);
psb_mmu_free_pagedir(driver->default_pd);
@@ -422,7 +422,7 @@ struct psb_mmu_driver *psb_mmu_driver_init(struct drm_device *dev,
atomic_t *msvdx_mmu_invaldc)
{
struct psb_mmu_driver *driver;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
driver = kmalloc(sizeof(*driver), GFP_KERNEL);
diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c
index 129f87971002..c6b115954b7d 100644
--- a/drivers/gpu/drm/gma500/oaktrail_crtc.c
+++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c
@@ -82,7 +82,7 @@ static const struct gma_limit_t *mrst_limit(struct drm_crtc *crtc,
{
const struct gma_limit_t *limit = NULL;
struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
if (gma_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)
|| gma_pipe_has_type(crtc, INTEL_OUTPUT_MIPI)) {
@@ -214,7 +214,7 @@ static bool mrst_lvds_find_best_pll(const struct gma_limit_t *limit,
static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
@@ -361,7 +361,7 @@ static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
int refclk = 0;
@@ -589,7 +589,7 @@ static int oaktrail_pipe_set_base(struct drm_crtc *crtc,
int x, int y, struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
struct drm_framebuffer *fb = crtc->primary->fb;
int pipe = gma_crtc->pipe;
diff --git a/drivers/gpu/drm/gma500/oaktrail_device.c b/drivers/gpu/drm/gma500/oaktrail_device.c
index 454156fcbec7..5c75eae630b5 100644
--- a/drivers/gpu/drm/gma500/oaktrail_device.c
+++ b/drivers/gpu/drm/gma500/oaktrail_device.c
@@ -20,7 +20,7 @@
static int oaktrail_output_init(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
if (dev_priv->iLVDS_enable)
oaktrail_lvds_init(dev, &dev_priv->mode_dev);
else
@@ -51,7 +51,7 @@ static int oaktrail_brightness;
static int oaktrail_set_brightness(struct backlight_device *bd)
{
struct drm_device *dev = bl_get_data(oaktrail_backlight_device);
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
int level = bd->props.brightness;
u32 blc_pwm_ctl;
u32 max_pwm_blc;
@@ -96,7 +96,7 @@ static int oaktrail_get_brightness(struct backlight_device *bd)
static int device_backlight_init(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
unsigned long core_clock;
u16 bl_max_freq;
uint32_t value;
@@ -133,7 +133,7 @@ static const struct backlight_ops oaktrail_ops = {
static int oaktrail_backlight_init(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
int ret;
struct backlight_properties props;
@@ -175,7 +175,7 @@ static int oaktrail_backlight_init(struct drm_device *dev)
*/
static int oaktrail_save_display_registers(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_save_area *regs = &dev_priv->regs;
struct psb_pipe *p = &regs->pipe[0];
int i;
@@ -289,7 +289,7 @@ static int oaktrail_save_display_registers(struct drm_device *dev)
*/
static int oaktrail_restore_display_registers(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_save_area *regs = &dev_priv->regs;
struct psb_pipe *p = &regs->pipe[0];
u32 pp_stat;
@@ -404,7 +404,7 @@ static int oaktrail_restore_display_registers(struct drm_device *dev)
*/
static int oaktrail_power_down(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 pwr_mask ;
u32 pwr_sts;
@@ -428,7 +428,7 @@ static int oaktrail_power_down(struct drm_device *dev)
*/
static int oaktrail_power_up(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 pwr_mask = PSB_PWRGT_DISPLAY_MASK;
u32 pwr_sts, pwr_cnt;
@@ -500,7 +500,7 @@ static const struct psb_offset oaktrail_regmap[2] = {
static int oaktrail_chip_setup(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
int ret;
@@ -524,7 +524,7 @@ static int oaktrail_chip_setup(struct drm_device *dev)
static void oaktrail_teardown(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
gma_intel_teardown_gmbus(dev);
oaktrail_hdmi_teardown(dev);
diff --git a/drivers/gpu/drm/gma500/oaktrail_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
index a097a59a9eae..6eef60a5ac27 100644
--- a/drivers/gpu/drm/gma500/oaktrail_hdmi.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
@@ -130,7 +130,7 @@ static const struct oaktrail_hdmi_limit oaktrail_hdmi_limit = {
static void oaktrail_hdmi_audio_enable(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
HDMI_WRITE(HDMI_HCR, 0x67);
@@ -145,7 +145,7 @@ static void oaktrail_hdmi_audio_enable(struct drm_device *dev)
static void oaktrail_hdmi_audio_disable(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
HDMI_WRITE(0x51a8, 0x0);
@@ -264,7 +264,7 @@ int oaktrail_crtc_hdmi_mode_set(struct drm_crtc *crtc,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
int pipe = 1;
int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
@@ -494,7 +494,7 @@ static void oaktrail_hdmi_dpms(struct drm_encoder *encoder, int mode)
static int dpms_mode = -1;
struct drm_device *dev = encoder->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
u32 temp;
@@ -529,7 +529,7 @@ oaktrail_hdmi_detect(struct drm_connector *connector, bool force)
{
enum drm_connector_status status;
struct drm_device *dev = connector->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
u32 temp;
@@ -665,7 +665,7 @@ failed_connector:
void oaktrail_hdmi_setup(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct pci_dev *pdev;
struct oaktrail_hdmi_dev *hdmi_dev;
int ret;
@@ -718,7 +718,7 @@ out:
void oaktrail_hdmi_teardown(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
struct pci_dev *pdev;
@@ -735,7 +735,7 @@ void oaktrail_hdmi_teardown(struct drm_device *dev)
/* save HDMI register state */
void oaktrail_hdmi_save(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
struct psb_state *regs = &dev_priv->regs.psb;
struct psb_pipe *pipeb = &dev_priv->regs.pipe[1];
@@ -788,7 +788,7 @@ void oaktrail_hdmi_save(struct drm_device *dev)
/* restore HDMI register state */
void oaktrail_hdmi_restore(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
struct psb_state *regs = &dev_priv->regs.psb;
struct psb_pipe *pipeb = &dev_priv->regs.pipe[1];
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c
index f9b1f88c73bd..28b995ef2844 100644
--- a/drivers/gpu/drm/gma500/oaktrail_lvds.c
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c
@@ -37,7 +37,7 @@ static void oaktrail_lvds_set_power(struct drm_device *dev,
bool on)
{
u32 pp_status;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
if (!gma_power_begin(dev, true))
return;
@@ -83,7 +83,7 @@ static void oaktrail_lvds_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_connector *connector = NULL;
@@ -155,7 +155,7 @@ static void oaktrail_lvds_mode_set(struct drm_encoder *encoder,
static void oaktrail_lvds_prepare(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_encoder *gma_encoder = to_gma_encoder(encoder);
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
@@ -171,7 +171,7 @@ static void oaktrail_lvds_prepare(struct drm_encoder *encoder)
static u32 oaktrail_lvds_get_max_backlight(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 ret;
if (gma_power_begin(dev, false)) {
@@ -191,7 +191,7 @@ static u32 oaktrail_lvds_get_max_backlight(struct drm_device *dev)
static void oaktrail_lvds_commit(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_encoder *gma_encoder = to_gma_encoder(encoder);
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
@@ -215,7 +215,7 @@ static void oaktrail_lvds_get_configuration_mode(struct drm_device *dev,
struct psb_intel_mode_device *mode_dev)
{
struct drm_display_mode *mode = NULL;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct oaktrail_timing_info *ti = &dev_priv->gct_data.DTD;
mode_dev->panel_fixed_mode = NULL;
@@ -294,7 +294,7 @@ void oaktrail_lvds_init(struct drm_device *dev,
struct gma_connector *gma_connector;
struct drm_connector *connector;
struct drm_encoder *encoder;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct edid *edid;
struct i2c_adapter *i2c_adap;
struct drm_display_mode *scan; /* *modes, *bios_mode; */
diff --git a/drivers/gpu/drm/gma500/oaktrail_lvds_i2c.c b/drivers/gpu/drm/gma500/oaktrail_lvds_i2c.c
index 1d2dd6ea1c71..d1ae91fcd224 100644
--- a/drivers/gpu/drm/gma500/oaktrail_lvds_i2c.c
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds_i2c.c
@@ -133,7 +133,7 @@ void oaktrail_lvds_i2c_init(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct gma_encoder *gma_encoder = to_gma_encoder(encoder);
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_intel_i2c_chan *chan;
chan = kzalloc(sizeof(struct psb_intel_i2c_chan), GFP_KERNEL);
diff --git a/drivers/gpu/drm/gma500/opregion.c b/drivers/gpu/drm/gma500/opregion.c
index a1ffc6a1c255..fef04ff8c3a9 100644
--- a/drivers/gpu/drm/gma500/opregion.c
+++ b/drivers/gpu/drm/gma500/opregion.c
@@ -147,7 +147,7 @@ static struct psb_intel_opregion *system_opregion;
static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct opregion_asle *asle = dev_priv->opregion.asle;
struct backlight_device *bd = dev_priv->backlight_device;
@@ -190,7 +190,7 @@ static void psb_intel_opregion_asle_work(struct work_struct *work)
}
if (asle_req & ASLE_SET_BACKLIGHT)
- asle_stat |= asle_set_backlight(dev_priv->dev, asle->bclp);
+ asle_stat |= asle_set_backlight(&dev_priv->dev, asle->bclp);
asle->aslc = asle_stat;
@@ -198,7 +198,7 @@ static void psb_intel_opregion_asle_work(struct work_struct *work)
void psb_intel_opregion_asle_intr(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
if (dev_priv->opregion.asle)
schedule_work(&dev_priv->opregion.asle_work);
@@ -211,7 +211,7 @@ void psb_intel_opregion_asle_intr(struct drm_device *dev)
void psb_intel_opregion_enable_asle(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct opregion_asle *asle = dev_priv->opregion.asle;
if (asle && system_opregion ) {
@@ -258,7 +258,7 @@ static struct notifier_block psb_intel_opregion_notifier = {
void psb_intel_opregion_init(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_intel_opregion *opregion = &dev_priv->opregion;
if (!opregion->header)
@@ -278,7 +278,7 @@ void psb_intel_opregion_init(struct drm_device *dev)
void psb_intel_opregion_fini(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_intel_opregion *opregion = &dev_priv->opregion;
if (!opregion->header)
@@ -304,7 +304,7 @@ void psb_intel_opregion_fini(struct drm_device *dev)
int psb_intel_opregion_setup(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
struct psb_intel_opregion *opregion = &dev_priv->opregion;
u32 opregion_phy, mboxes;
diff --git a/drivers/gpu/drm/gma500/power.c b/drivers/gpu/drm/gma500/power.c
index 20ace6010f9f..d2a46d96e746 100644
--- a/drivers/gpu/drm/gma500/power.c
+++ b/drivers/gpu/drm/gma500/power.c
@@ -47,7 +47,7 @@ static DEFINE_SPINLOCK(power_ctrl_lock); /* Serialize power claim */
*/
void gma_power_init(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
/* FIXME: Move APM/OSPM base into relevant device code */
dev_priv->apm_base = dev_priv->apm_reg & 0xffff;
@@ -82,7 +82,7 @@ void gma_power_uninit(struct drm_device *dev)
*/
static void gma_suspend_display(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
if (dev_priv->suspended)
return;
@@ -101,7 +101,7 @@ static void gma_suspend_display(struct drm_device *dev)
static void gma_resume_display(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
/* turn on the display power island */
dev_priv->ops->power_up(dev);
@@ -125,7 +125,7 @@ static void gma_resume_display(struct pci_dev *pdev)
static void gma_suspend_pci(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
int bsm, vbt;
if (dev_priv->suspended)
@@ -155,7 +155,7 @@ static void gma_suspend_pci(struct pci_dev *pdev)
static bool gma_resume_pci(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
int ret;
if (!dev_priv->suspended)
@@ -189,7 +189,7 @@ int gma_power_suspend(struct device *_dev)
{
struct pci_dev *pdev = to_pci_dev(_dev);
struct drm_device *dev = pci_get_drvdata(pdev);
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
mutex_lock(&power_mutex);
if (!dev_priv->suspended) {
@@ -234,7 +234,7 @@ int gma_power_resume(struct device *_dev)
*/
bool gma_power_is_on(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
return dev_priv->display_power;
}
@@ -248,7 +248,7 @@ bool gma_power_is_on(struct drm_device *dev)
*/
bool gma_power_begin(struct drm_device *dev, bool force_on)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
int ret;
unsigned long flags;
@@ -288,7 +288,7 @@ out_false:
*/
void gma_power_end(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
unsigned long flags;
spin_lock_irqsave(&power_ctrl_lock, flags);
dev_priv->display_count--;
@@ -310,7 +310,7 @@ int psb_runtime_resume(struct device *dev)
int psb_runtime_idle(struct device *dev)
{
struct drm_device *drmdev = pci_get_drvdata(to_pci_dev(dev));
- struct drm_psb_private *dev_priv = drmdev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(drmdev);
if (dev_priv->display_count)
return 0;
else
diff --git a/drivers/gpu/drm/gma500/psb_device.c b/drivers/gpu/drm/gma500/psb_device.c
index 951725a0f7a3..3030f18ba022 100644
--- a/drivers/gpu/drm/gma500/psb_device.c
+++ b/drivers/gpu/drm/gma500/psb_device.c
@@ -18,7 +18,7 @@
static int psb_output_init(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
psb_intel_lvds_init(dev, &dev_priv->mode_dev);
psb_intel_sdvo_init(dev, SDVOB);
return 0;
@@ -55,7 +55,7 @@ static int psb_get_brightness(struct backlight_device *bd)
static int psb_backlight_setup(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
unsigned long core_clock;
/* u32 bl_max_freq; */
/* unsigned long value; */
@@ -110,7 +110,7 @@ static const struct backlight_ops psb_ops = {
static int psb_backlight_init(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
int ret;
struct backlight_properties props;
@@ -149,7 +149,7 @@ static int psb_backlight_init(struct drm_device *dev)
static void psb_init_pm(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 gating = PSB_RSGX32(PSB_CR_CLKGATECTL);
gating &= ~3; /* Disable 2D clock gating */
@@ -167,7 +167,7 @@ static void psb_init_pm(struct drm_device *dev)
*/
static int psb_save_display_registers(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct drm_crtc *crtc;
struct gma_connector *connector;
struct psb_state *regs = &dev_priv->regs.psb;
@@ -205,7 +205,7 @@ static int psb_save_display_registers(struct drm_device *dev)
*/
static int psb_restore_display_registers(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct drm_crtc *crtc;
struct gma_connector *connector;
struct psb_state *regs = &dev_priv->regs.psb;
@@ -300,7 +300,7 @@ static const struct psb_offset psb_regmap[2] = {
static int psb_chip_setup(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
dev_priv->regmap = psb_regmap;
gma_get_core_freq(dev);
gma_intel_setup_gmbus(dev);
@@ -311,7 +311,7 @@ static int psb_chip_setup(struct drm_device *dev)
static void psb_chip_teardown(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
psb_lid_timer_takedown(dev_priv);
gma_intel_teardown_gmbus(dev);
}
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index 58bce1a60a4d..7a10bb39ef0b 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -124,7 +124,7 @@ void psb_spank(struct drm_psb_private *dev_priv)
static int psb_do_init(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_gtt *pg = &dev_priv->gtt;
uint32_t stolen_gtt;
@@ -163,71 +163,74 @@ static int psb_do_init(struct drm_device *dev)
static void psb_driver_unload(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
/* TODO: Kill vblank etc here */
- if (dev_priv) {
- if (dev_priv->backlight_device)
- gma_backlight_exit(dev);
- psb_modeset_cleanup(dev);
+ if (dev_priv->backlight_device)
+ gma_backlight_exit(dev);
+ psb_modeset_cleanup(dev);
- if (dev_priv->ops->chip_teardown)
- dev_priv->ops->chip_teardown(dev);
+ if (dev_priv->ops->chip_teardown)
+ dev_priv->ops->chip_teardown(dev);
- psb_intel_opregion_fini(dev);
+ psb_intel_opregion_fini(dev);
- if (dev_priv->pf_pd) {
- psb_mmu_free_pagedir(dev_priv->pf_pd);
- dev_priv->pf_pd = NULL;
- }
- if (dev_priv->mmu) {
- struct psb_gtt *pg = &dev_priv->gtt;
-
- down_read(&pg->sem);
- psb_mmu_remove_pfn_sequence(
- psb_mmu_get_default_pd
- (dev_priv->mmu),
- pg->mmu_gatt_start,
- dev_priv->vram_stolen_size >> PAGE_SHIFT);
- up_read(&pg->sem);
- psb_mmu_driver_takedown(dev_priv->mmu);
- dev_priv->mmu = NULL;
- }
- psb_gtt_takedown(dev);
- if (dev_priv->scratch_page) {
- set_pages_wb(dev_priv->scratch_page, 1);
- __free_page(dev_priv->scratch_page);
- dev_priv->scratch_page = NULL;
- }
- if (dev_priv->vdc_reg) {
- iounmap(dev_priv->vdc_reg);
- dev_priv->vdc_reg = NULL;
- }
- if (dev_priv->sgx_reg) {
- iounmap(dev_priv->sgx_reg);
- dev_priv->sgx_reg = NULL;
- }
- if (dev_priv->aux_reg) {
- iounmap(dev_priv->aux_reg);
- dev_priv->aux_reg = NULL;
- }
- pci_dev_put(dev_priv->aux_pdev);
- pci_dev_put(dev_priv->lpc_pdev);
+ if (dev_priv->pf_pd) {
+ psb_mmu_free_pagedir(dev_priv->pf_pd);
+ dev_priv->pf_pd = NULL;
+ }
+ if (dev_priv->mmu) {
+ struct psb_gtt *pg = &dev_priv->gtt;
+
+ down_read(&pg->sem);
+ psb_mmu_remove_pfn_sequence(
+ psb_mmu_get_default_pd
+ (dev_priv->mmu),
+ pg->mmu_gatt_start,
+ dev_priv->vram_stolen_size >> PAGE_SHIFT);
+ up_read(&pg->sem);
+ psb_mmu_driver_takedown(dev_priv->mmu);
+ dev_priv->mmu = NULL;
+ }
+ psb_gtt_takedown(dev);
+ if (dev_priv->scratch_page) {
+ set_pages_wb(dev_priv->scratch_page, 1);
+ __free_page(dev_priv->scratch_page);
+ dev_priv->scratch_page = NULL;
+ }
+ if (dev_priv->vdc_reg) {
+ iounmap(dev_priv->vdc_reg);
+ dev_priv->vdc_reg = NULL;
+ }
+ if (dev_priv->sgx_reg) {
+ iounmap(dev_priv->sgx_reg);
+ dev_priv->sgx_reg = NULL;
+ }
+ if (dev_priv->aux_reg) {
+ iounmap(dev_priv->aux_reg);
+ dev_priv->aux_reg = NULL;
+ }
+ pci_dev_put(dev_priv->aux_pdev);
+ pci_dev_put(dev_priv->lpc_pdev);
- /* Destroy VBT data */
- psb_intel_destroy_bios(dev);
+ /* Destroy VBT data */
+ psb_intel_destroy_bios(dev);
- kfree(dev_priv);
- dev->dev_private = NULL;
- }
gma_power_uninit(dev);
}
+static void psb_device_release(void *data)
+{
+ struct drm_device *dev = data;
+
+ psb_driver_unload(dev);
+}
+
static int psb_driver_load(struct drm_device *dev, unsigned long flags)
{
struct pci_dev *pdev = to_pci_dev(dev->dev);
- struct drm_psb_private *dev_priv;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
unsigned long resource_start, resource_len;
unsigned long irqflags;
int ret = -ENOMEM;
@@ -235,14 +238,9 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
struct gma_encoder *gma_encoder;
struct psb_gtt *pg;
- /* allocating and initializing driver private data */
- dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
- if (dev_priv == NULL)
- return -ENOMEM;
+ /* initializing driver private data */
dev_priv->ops = (struct psb_ops *)flags;
- dev_priv->dev = dev;
- dev->dev_private = (void *) dev_priv;
pg = &dev_priv->gtt;
@@ -409,8 +407,9 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
pm_runtime_enable(dev->dev);
pm_runtime_set_active(dev->dev);
#endif
- /* Intel drm driver load is done, continue doing pvr load */
- return 0;
+
+ return devm_add_action_or_reset(dev->dev, psb_device_release, dev);
+
out_err:
psb_driver_unload(dev);
return ret;
@@ -431,7 +430,7 @@ static long psb_unlocked_ioctl(struct file *filp, unsigned int cmd,
{
struct drm_file *file_priv = filp->private_data;
struct drm_device *dev = file_priv->minor->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
static unsigned int runtime_allowed;
if (runtime_allowed == 1 && dev_priv->is_lvds_on) {
@@ -445,38 +444,30 @@ static long psb_unlocked_ioctl(struct file *filp, unsigned int cmd,
static int psb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
+ struct drm_psb_private *dev_priv;
struct drm_device *dev;
int ret;
- ret = pci_enable_device(pdev);
+ ret = pcim_enable_device(pdev);
if (ret)
return ret;
- dev = drm_dev_alloc(&driver, &pdev->dev);
- if (IS_ERR(dev)) {
- ret = PTR_ERR(dev);
- goto err_pci_disable_device;
- }
+ dev_priv = devm_drm_dev_alloc(&pdev->dev, &driver, struct drm_psb_private, dev);
+ if (IS_ERR(dev_priv))
+ return PTR_ERR(dev_priv);
+ dev = &dev_priv->dev;
pci_set_drvdata(pdev, dev);
ret = psb_driver_load(dev, ent->driver_data);
if (ret)
- goto err_drm_dev_put;
+ return ret;
ret = drm_dev_register(dev, ent->driver_data);
if (ret)
- goto err_psb_driver_unload;
+ return ret;
return 0;
-
-err_psb_driver_unload:
- psb_driver_unload(dev);
-err_drm_dev_put:
- drm_dev_put(dev);
-err_pci_disable_device:
- pci_disable_device(pdev);
- return ret;
}
static void psb_pci_remove(struct pci_dev *pdev)
@@ -484,8 +475,6 @@ static void psb_pci_remove(struct pci_dev *pdev)
struct drm_device *dev = pci_get_drvdata(pdev);
drm_dev_unregister(dev);
- psb_driver_unload(dev);
- drm_dev_put(dev);
}
static const struct dev_pm_ops psb_pm_ops = {
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index f2bae270ca7b..0439b10d3db5 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -389,7 +389,8 @@ struct psb_ops;
struct intel_scu_ipc_dev;
struct drm_psb_private {
- struct drm_device *dev;
+ struct drm_device dev;
+
struct pci_dev *aux_pdev; /* Currently only used by mrst */
struct pci_dev *lpc_pdev; /* Currently only used by mrst */
const struct psb_ops *ops;
@@ -567,6 +568,10 @@ struct drm_psb_private {
uint8_t panel_type;
};
+static inline struct drm_psb_private *to_drm_psb_private(struct drm_device *dev)
+{
+ return container_of(dev, struct drm_psb_private, dev);
+}
/* Operations for each board type */
struct psb_ops {
@@ -618,11 +623,6 @@ struct psb_ops {
extern int drm_crtc_probe_output_modes(struct drm_device *dev, int, int);
extern int drm_pick_crtcs(struct drm_device *dev);
-static inline struct drm_psb_private *psb_priv(struct drm_device *dev)
-{
- return (struct drm_psb_private *) dev->dev_private;
-}
-
/* psb_irq.c */
extern void psb_irq_uninstall_islands(struct drm_device *dev, int hw_islands);
extern int psb_vblank_wait2(struct drm_device *dev, unsigned int *sequence);
@@ -729,13 +729,13 @@ static inline void MRST_MSG_WRITE32(int domain, uint port, uint offset,
static inline uint32_t REGISTER_READ(struct drm_device *dev, uint32_t reg)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
return ioread32(dev_priv->vdc_reg + reg);
}
static inline uint32_t REGISTER_READ_AUX(struct drm_device *dev, uint32_t reg)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
return ioread32(dev_priv->aux_reg + reg);
}
@@ -761,14 +761,14 @@ static inline uint32_t REGISTER_READ_WITH_AUX(struct drm_device *dev,
static inline void REGISTER_WRITE(struct drm_device *dev, uint32_t reg,
uint32_t val)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
iowrite32((val), dev_priv->vdc_reg + (reg));
}
static inline void REGISTER_WRITE_AUX(struct drm_device *dev, uint32_t reg,
uint32_t val)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
iowrite32((val), dev_priv->aux_reg + (reg));
}
@@ -789,7 +789,7 @@ static inline void REGISTER_WRITE_WITH_AUX(struct drm_device *dev, uint32_t reg,
static inline void REGISTER_WRITE16(struct drm_device *dev,
uint32_t reg, uint32_t val)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
iowrite16((val), dev_priv->vdc_reg + (reg));
}
@@ -798,7 +798,7 @@ static inline void REGISTER_WRITE16(struct drm_device *dev,
static inline void REGISTER_WRITE8(struct drm_device *dev,
uint32_t reg, uint32_t val)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
iowrite8((val), dev_priv->vdc_reg + (reg));
}
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index 359606429316..f5f259fde88e 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -95,7 +95,7 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
int pipe = gma_crtc->pipe;
@@ -298,7 +298,7 @@ static int psb_intel_crtc_clock_get(struct drm_device *dev,
struct drm_crtc *crtc)
{
struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
int pipe = gma_crtc->pipe;
const struct psb_offset *map = &dev_priv->regmap[pipe];
u32 dpll;
@@ -380,7 +380,7 @@ struct drm_display_mode *psb_intel_crtc_mode_get(struct drm_device *dev,
int hsync;
int vtot;
int vsync;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_pipe *p = &dev_priv->regs.pipe[pipe];
const struct psb_offset *map = &dev_priv->regmap[pipe];
@@ -451,7 +451,7 @@ const struct gma_clock_funcs psb_clock_funcs = {
static void psb_intel_cursor_init(struct drm_device *dev,
struct gma_crtc *gma_crtc)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 control[3] = { CURACNTR, CURBCNTR, CURCCNTR };
u32 base[3] = { CURABASE, CURBBASE, CURCBASE };
struct gtt_range *cursor_gt;
@@ -481,7 +481,7 @@ out:
void psb_intel_crtc_init(struct drm_device *dev, int pipe,
struct psb_intel_mode_device *mode_dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_crtc *gma_crtc;
int i;
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index ace95d4bdb6f..ac97e0d3c7dd 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -59,7 +59,7 @@ struct psb_intel_lvds_priv {
*/
static u32 psb_intel_lvds_get_max_backlight(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 ret;
if (gma_power_begin(dev, false)) {
@@ -88,8 +88,7 @@ static u32 psb_intel_lvds_get_max_backlight(struct drm_device *dev)
static int psb_lvds_i2c_set_brightness(struct drm_device *dev,
unsigned int level)
{
- struct drm_psb_private *dev_priv =
- (struct drm_psb_private *)dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_intel_i2c_chan *lvds_i2c_bus = dev_priv->lvds_i2c_bus;
u8 out_buf[2];
@@ -128,8 +127,7 @@ static int psb_lvds_i2c_set_brightness(struct drm_device *dev,
static int psb_lvds_pwm_set_brightness(struct drm_device *dev, int level)
{
- struct drm_psb_private *dev_priv =
- (struct drm_psb_private *)dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 max_pwm_blc;
u32 blc_pwm_duty_cycle;
@@ -161,7 +159,7 @@ static int psb_lvds_pwm_set_brightness(struct drm_device *dev, int level)
*/
void psb_intel_lvds_set_brightness(struct drm_device *dev, int level)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
dev_dbg(dev->dev, "backlight level is %d\n", level);
@@ -183,7 +181,7 @@ void psb_intel_lvds_set_brightness(struct drm_device *dev, int level)
*/
static void psb_intel_lvds_set_backlight(struct drm_device *dev, int level)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 blc_pwm_ctl;
if (gma_power_begin(dev, false)) {
@@ -208,7 +206,7 @@ static void psb_intel_lvds_set_backlight(struct drm_device *dev, int level)
*/
static void psb_intel_lvds_set_power(struct drm_device *dev, bool on)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
u32 pp_status;
@@ -254,8 +252,7 @@ static void psb_intel_lvds_encoder_dpms(struct drm_encoder *encoder, int mode)
static void psb_intel_lvds_save(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct drm_psb_private *dev_priv =
- (struct drm_psb_private *)dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
struct psb_intel_lvds_priv *lvds_priv =
(struct psb_intel_lvds_priv *)gma_encoder->dev_priv;
@@ -335,7 +332,7 @@ static void psb_intel_lvds_restore(struct drm_connector *connector)
enum drm_mode_status psb_intel_lvds_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct drm_psb_private *dev_priv = connector->dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(connector->dev);
struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
struct drm_display_mode *fixed_mode =
dev_priv->mode_dev.panel_fixed_mode;
@@ -365,7 +362,7 @@ bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
struct gma_crtc *gma_crtc = to_gma_crtc(encoder->crtc);
struct drm_encoder *tmp_encoder;
@@ -426,7 +423,7 @@ bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder,
static void psb_intel_lvds_prepare(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
if (!gma_power_begin(dev, true))
@@ -444,7 +441,7 @@ static void psb_intel_lvds_prepare(struct drm_encoder *encoder)
static void psb_intel_lvds_commit(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
if (mode_dev->backlight_duty_cycle == 0)
@@ -459,7 +456,7 @@ static void psb_intel_lvds_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 pfit_control;
/*
@@ -493,7 +490,7 @@ static void psb_intel_lvds_mode_set(struct drm_encoder *encoder,
static int psb_intel_lvds_get_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
struct gma_encoder *gma_encoder = gma_attached_encoder(connector);
struct psb_intel_lvds_priv *lvds_priv = gma_encoder->dev_priv;
@@ -641,7 +638,7 @@ void psb_intel_lvds_init(struct drm_device *dev,
struct drm_encoder *encoder;
struct drm_display_mode *scan; /* *modes, *bios_mode; */
struct drm_crtc *crtc;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 lvds;
int pipe;
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
index 355da2856389..042c4392e676 100644
--- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -1217,7 +1217,7 @@ psb_intel_sdvo_get_edid(struct drm_connector *connector)
static struct edid *
psb_intel_sdvo_get_analog_edid(struct drm_connector *connector)
{
- struct drm_psb_private *dev_priv = connector->dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(connector->dev);
return drm_get_edid(connector,
&dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
@@ -1486,7 +1486,7 @@ static void psb_intel_sdvo_get_tv_modes(struct drm_connector *connector)
static void psb_intel_sdvo_get_lvds_modes(struct drm_connector *connector)
{
struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
- struct drm_psb_private *dev_priv = connector->dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(connector->dev);
struct drm_display_mode *newmode;
/*
@@ -1570,7 +1570,7 @@ psb_intel_sdvo_set_property(struct drm_connector *connector,
{
struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
- struct drm_psb_private *dev_priv = connector->dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(connector->dev);
uint16_t temp_value;
uint8_t cmd;
int ret;
@@ -1878,7 +1878,7 @@ psb_intel_sdvo_is_hdmi_connector(struct psb_intel_sdvo *psb_intel_sdvo, int devi
static u8
psb_intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct sdvo_device_mapping *my_mapping, *other_mapping;
if (IS_SDVOB(sdvo_reg)) {
@@ -2415,7 +2415,7 @@ psb_intel_sdvo_init_ddc_proxy(struct psb_intel_sdvo *sdvo,
bool psb_intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct gma_encoder *gma_encoder;
struct psb_intel_sdvo *psb_intel_sdvo;
int i;
diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c
index deb1fbc1f748..ccf402007beb 100644
--- a/drivers/gpu/drm/gma500/psb_irq.c
+++ b/drivers/gpu/drm/gma500/psb_irq.c
@@ -76,12 +76,12 @@ psb_enable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask)
u32 reg = psb_pipestat(pipe);
dev_priv->pipestat[pipe] |= mask;
/* Enable the interrupt, clear any pending status */
- if (gma_power_begin(dev_priv->dev, false)) {
+ if (gma_power_begin(&dev_priv->dev, false)) {
u32 writeVal = PSB_RVDC32(reg);
writeVal |= (mask | (mask >> 16));
PSB_WVDC32(writeVal, reg);
(void) PSB_RVDC32(reg);
- gma_power_end(dev_priv->dev);
+ gma_power_end(&dev_priv->dev);
}
}
}
@@ -92,12 +92,12 @@ psb_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask)
if ((dev_priv->pipestat[pipe] & mask) != 0) {
u32 reg = psb_pipestat(pipe);
dev_priv->pipestat[pipe] &= ~mask;
- if (gma_power_begin(dev_priv->dev, false)) {
+ if (gma_power_begin(&dev_priv->dev, false)) {
u32 writeVal = PSB_RVDC32(reg);
writeVal &= ~mask;
PSB_WVDC32(writeVal, reg);
(void) PSB_RVDC32(reg);
- gma_power_end(dev_priv->dev);
+ gma_power_end(&dev_priv->dev);
}
}
}
@@ -107,8 +107,7 @@ psb_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask)
*/
static void mid_pipe_event_handler(struct drm_device *dev, int pipe)
{
- struct drm_psb_private *dev_priv =
- (struct drm_psb_private *) dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
uint32_t pipe_stat_val = 0;
uint32_t pipe_stat_reg = psb_pipestat(pipe);
@@ -178,7 +177,7 @@ static void psb_vdc_interrupt(struct drm_device *dev, uint32_t vdc_stat)
*/
static void psb_sgx_interrupt(struct drm_device *dev, u32 stat_1, u32 stat_2)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
u32 val, addr;
if (stat_1 & _PSB_CE_TWOD_COMPLETE)
@@ -226,7 +225,7 @@ static void psb_sgx_interrupt(struct drm_device *dev, u32 stat_1, u32 stat_2)
static irqreturn_t psb_irq_handler(int irq, void *arg)
{
struct drm_device *dev = arg;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
uint32_t vdc_stat, dsp_int = 0, sgx_int = 0, hotplug_int = 0;
u32 sgx_stat_1, sgx_stat_2;
int handled = 0;
@@ -277,8 +276,7 @@ static irqreturn_t psb_irq_handler(int irq, void *arg)
void psb_irq_preinstall(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv =
- (struct drm_psb_private *) dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
@@ -307,7 +305,7 @@ void psb_irq_preinstall(struct drm_device *dev)
void psb_irq_postinstall(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
unsigned long irqflags;
unsigned int i;
@@ -356,7 +354,7 @@ int psb_irq_install(struct drm_device *dev, unsigned int irq)
void psb_irq_uninstall(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
struct pci_dev *pdev = to_pci_dev(dev->dev);
unsigned long irqflags;
unsigned int i;
@@ -397,7 +395,7 @@ int psb_enable_vblank(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
unsigned int pipe = crtc->index;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
unsigned long irqflags;
uint32_t reg_val = 0;
uint32_t pipeconf_reg = mid_pipeconf(pipe);
@@ -433,7 +431,7 @@ void psb_disable_vblank(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
unsigned int pipe = crtc->index;
- struct drm_psb_private *dev_priv = dev->dev_private;
+ struct drm_psb_private *dev_priv = to_drm_psb_private(dev);
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
diff --git a/drivers/gpu/drm/gma500/psb_lid.c b/drivers/gpu/drm/gma500/psb_lid.c
index 97b0c52bfd8a..58a7fe392636 100644
--- a/drivers/gpu/drm/gma500/psb_lid.c
+++ b/drivers/gpu/drm/gma500/psb_lid.c
@@ -14,7 +14,7 @@
static void psb_lid_timer_func(struct timer_list *t)
{
struct drm_psb_private *dev_priv = from_timer(dev_priv, t, lid_timer);
- struct drm_device *dev = (struct drm_device *)dev_priv->dev;
+ struct drm_device *dev = (struct drm_device *)&dev_priv->dev;
struct timer_list *lid_timer = &dev_priv->lid_timer;
unsigned long irq_flags;
u32 __iomem *lid_state = dev_priv->opregion.lid_state;
diff --git a/drivers/gpu/drm/gud/Kconfig b/drivers/gpu/drm/gud/Kconfig
index 1c8601bf4d91..9c1e61f9eec3 100644
--- a/drivers/gpu/drm/gud/Kconfig
+++ b/drivers/gpu/drm/gud/Kconfig
@@ -2,7 +2,7 @@
config DRM_GUD
tristate "GUD USB Display"
- depends on DRM && USB
+ depends on DRM && USB && MMU
select LZ4_COMPRESS
select DRM_KMS_HELPER
select DRM_GEM_SHMEM_HELPER
diff --git a/drivers/gpu/drm/gud/gud_drv.c b/drivers/gpu/drm/gud/gud_drv.c
index eb4e08846da4..3f9d4b9a1e3d 100644
--- a/drivers/gpu/drm/gud/gud_drv.c
+++ b/drivers/gpu/drm/gud/gud_drv.c
@@ -523,7 +523,13 @@ static int gud_probe(struct usb_interface *intf, const struct usb_device_id *id)
switch (format) {
case GUD_DRM_FORMAT_R1:
fallthrough;
+ case DRM_FORMAT_R8:
+ fallthrough;
case GUD_DRM_FORMAT_XRGB1111:
+ fallthrough;
+ case DRM_FORMAT_RGB332:
+ fallthrough;
+ case DRM_FORMAT_RGB888:
if (!xrgb8888_emulation_format)
xrgb8888_emulation_format = info;
break;
diff --git a/drivers/gpu/drm/gud/gud_internal.h b/drivers/gpu/drm/gud/gud_internal.h
index 2a388e27d5d7..e351a1f1420d 100644
--- a/drivers/gpu/drm/gud/gud_internal.h
+++ b/drivers/gpu/drm/gud/gud_internal.h
@@ -80,10 +80,16 @@ static inline u8 gud_from_fourcc(u32 fourcc)
switch (fourcc) {
case GUD_DRM_FORMAT_R1:
return GUD_PIXEL_FORMAT_R1;
+ case DRM_FORMAT_R8:
+ return GUD_PIXEL_FORMAT_R8;
case GUD_DRM_FORMAT_XRGB1111:
return GUD_PIXEL_FORMAT_XRGB1111;
+ case DRM_FORMAT_RGB332:
+ return GUD_PIXEL_FORMAT_RGB332;
case DRM_FORMAT_RGB565:
return GUD_PIXEL_FORMAT_RGB565;
+ case DRM_FORMAT_RGB888:
+ return GUD_PIXEL_FORMAT_RGB888;
case DRM_FORMAT_XRGB8888:
return GUD_PIXEL_FORMAT_XRGB8888;
case DRM_FORMAT_ARGB8888:
@@ -98,10 +104,16 @@ static inline u32 gud_to_fourcc(u8 format)
switch (format) {
case GUD_PIXEL_FORMAT_R1:
return GUD_DRM_FORMAT_R1;
+ case GUD_PIXEL_FORMAT_R8:
+ return DRM_FORMAT_R8;
case GUD_PIXEL_FORMAT_XRGB1111:
return GUD_DRM_FORMAT_XRGB1111;
+ case GUD_PIXEL_FORMAT_RGB332:
+ return DRM_FORMAT_RGB332;
case GUD_PIXEL_FORMAT_RGB565:
return DRM_FORMAT_RGB565;
+ case GUD_PIXEL_FORMAT_RGB888:
+ return DRM_FORMAT_RGB888;
case GUD_PIXEL_FORMAT_XRGB8888:
return DRM_FORMAT_XRGB8888;
case GUD_PIXEL_FORMAT_ARGB8888:
diff --git a/drivers/gpu/drm/gud/gud_pipe.c b/drivers/gpu/drm/gud/gud_pipe.c
index b9b0e435ea0f..daf75c178c2b 100644
--- a/drivers/gpu/drm/gud/gud_pipe.c
+++ b/drivers/gpu/drm/gud/gud_pipe.c
@@ -189,8 +189,14 @@ retry:
ret = -ENOMEM;
goto end_cpu_access;
}
+ } else if (format->format == DRM_FORMAT_R8) {
+ drm_fb_xrgb8888_to_gray8(buf, vaddr, fb, rect);
+ } else if (format->format == DRM_FORMAT_RGB332) {
+ drm_fb_xrgb8888_to_rgb332(buf, vaddr, fb, rect);
} else if (format->format == DRM_FORMAT_RGB565) {
drm_fb_xrgb8888_to_rgb565(buf, vaddr, fb, rect, gud_is_big_endian());
+ } else if (format->format == DRM_FORMAT_RGB888) {
+ drm_fb_xrgb8888_to_rgb888(buf, vaddr, fb, rect);
} else {
len = gud_xrgb8888_to_color(buf, format, vaddr, fb, rect);
}
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index f960f5d7664e..84b6fc70cbf5 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -126,11 +126,23 @@ config DRM_I915_GVT_KVMGT
depends on DRM_I915_GVT
depends on KVM
depends on VFIO_MDEV
+ select KVM_EXTERNAL_WRITE_TRACKING
default n
help
Choose this option if you want to enable KVMGT support for
Intel GVT-g.
+config DRM_I915_PXP
+ bool "Enable Intel PXP support"
+ depends on DRM_I915
+ depends on INTEL_MEI && INTEL_MEI_PXP
+ default n
+ help
+ PXP (Protected Xe Path) is an i915 component, available on graphics
+ version 12 and newer GPUs, that helps to establish the hardware
+ protected session and manage the status of the alive software session,
+ as well as its life cycle.
+
menu "drm/i915 Debugging"
depends on DRM_I915
depends on EXPERT
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 335ba9f43d8f..660bb03de6fc 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -13,13 +13,11 @@
# will most likely get a sudden build breakage... Hopefully we will fix
# new warnings before CI updates!
subdir-ccflags-y := -Wall -Wextra
-subdir-ccflags-y += $(call cc-disable-warning, unused-parameter)
-subdir-ccflags-y += $(call cc-disable-warning, type-limits)
-subdir-ccflags-y += $(call cc-disable-warning, missing-field-initializers)
+subdir-ccflags-y += -Wno-unused-parameter
+subdir-ccflags-y += -Wno-type-limits
+subdir-ccflags-y += -Wno-missing-field-initializers
+subdir-ccflags-y += -Wno-sign-compare
subdir-ccflags-y += $(call cc-disable-warning, unused-but-set-variable)
-# clang warnings
-subdir-ccflags-y += $(call cc-disable-warning, sign-compare)
-subdir-ccflags-y += $(call cc-disable-warning, initializer-overrides)
subdir-ccflags-y += $(call cc-disable-warning, frame-address)
subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror
@@ -49,13 +47,15 @@ i915-y += i915_drv.o \
intel_dram.o \
intel_memory_region.o \
intel_pch.o \
+ intel_pcode.o \
intel_pm.o \
intel_region_ttm.o \
intel_runtime_pm.o \
- intel_sideband.o \
+ intel_sbi.o \
intel_step.o \
intel_uncore.o \
intel_wakeref.o \
+ vlv_sideband.o \
vlv_suspend.o
# core library code
@@ -78,9 +78,6 @@ i915-$(CONFIG_PERF_EVENTS) += i915_pmu.o
# "Graphics Technology" (aka we talk to the gpu)
gt-y += \
- gt/debugfs_engines.o \
- gt/debugfs_gt.o \
- gt/debugfs_gt_pm.o \
gt/gen2_engine_cs.o \
gt/gen6_engine_cs.o \
gt/gen6_ppgtt.o \
@@ -100,8 +97,11 @@ gt-y += \
gt/intel_gt.o \
gt/intel_gt_buffer_pool.o \
gt/intel_gt_clock_utils.o \
+ gt/intel_gt_debugfs.o \
+ gt/intel_gt_engines_debugfs.o \
gt/intel_gt_irq.o \
gt/intel_gt_pm.o \
+ gt/intel_gt_pm_debugfs.o \
gt/intel_gt_pm_irq.o \
gt/intel_gt_requests.o \
gt/intel_gtt.o \
@@ -154,6 +154,7 @@ gem-y += \
gem/i915_gem_throttle.o \
gem/i915_gem_tiling.o \
gem/i915_gem_ttm.o \
+ gem/i915_gem_ttm_pm.o \
gem/i915_gem_userptr.o \
gem/i915_gem_wait.o \
gem/i915_gemfs.o
@@ -211,8 +212,11 @@ i915-y += \
display/intel_dpio_phy.o \
display/intel_dpll.o \
display/intel_dpll_mgr.o \
+ display/intel_dpt.o \
+ display/intel_drrs.o \
display/intel_dsb.o \
display/intel_fb.o \
+ display/intel_fb_pin.o \
display/intel_fbc.o \
display/intel_fdi.o \
display/intel_fifo_underrun.o \
@@ -222,6 +226,7 @@ i915-y += \
display/intel_hotplug.o \
display/intel_lpe_audio.o \
display/intel_overlay.o \
+ display/intel_plane_initial.o \
display/intel_psr.o \
display/intel_quirks.o \
display/intel_sprite.o \
@@ -247,6 +252,7 @@ i915-y += \
display/g4x_dp.o \
display/g4x_hdmi.o \
display/icl_dsi.o \
+ display/intel_backlight.o \
display/intel_crt.o \
display/intel_ddi.o \
display/intel_ddi_buf_trans.o \
@@ -277,6 +283,16 @@ i915-y += \
i915-y += i915_perf.o
+# Protected execution platform (PXP) support
+i915-$(CONFIG_DRM_I915_PXP) += \
+ pxp/intel_pxp.o \
+ pxp/intel_pxp_cmd.o \
+ pxp/intel_pxp_debugfs.o \
+ pxp/intel_pxp_irq.o \
+ pxp/intel_pxp_pm.o \
+ pxp/intel_pxp_session.o \
+ pxp/intel_pxp_tee.o
+
# Post-mortem debug and GPU hang state capture
i915-$(CONFIG_DRM_I915_CAPTURE_ERROR) += i915_gpu_error.o
i915-$(CONFIG_DRM_I915_SELFTEST) += \
diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c
index de0f358184aa..dc41868d01ef 100644
--- a/drivers/gpu/drm/i915/display/g4x_dp.c
+++ b/drivers/gpu/drm/i915/display/g4x_dp.c
@@ -7,6 +7,7 @@
#include "g4x_dp.h"
#include "intel_audio.h"
+#include "intel_backlight.h"
#include "intel_connector.h"
#include "intel_de.h"
#include "intel_display_types.h"
@@ -16,9 +17,8 @@
#include "intel_fifo_underrun.h"
#include "intel_hdmi.h"
#include "intel_hotplug.h"
-#include "intel_panel.h"
#include "intel_pps.h"
-#include "intel_sideband.h"
+#include "vlv_sideband.h"
struct dp_link_dpll {
int clock;
@@ -211,7 +211,7 @@ static void ilk_edp_pll_on(struct intel_dp *intel_dp,
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
+ assert_transcoder_disabled(dev_priv, pipe_config->cpu_transcoder);
assert_dp_port_disabled(intel_dp);
assert_edp_pll_disabled(dev_priv);
@@ -251,7 +251,7 @@ static void ilk_edp_pll_off(struct intel_dp *intel_dp,
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder);
+ assert_transcoder_disabled(dev_priv, old_crtc_state->cpu_transcoder);
assert_dp_port_disabled(intel_dp);
assert_edp_pll_enabled(dev_priv);
@@ -426,7 +426,6 @@ intel_dp_link_down(struct intel_encoder *encoder,
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
enum port port = encoder->port;
- u32 DP = intel_dp->DP;
if (drm_WARN_ON(&dev_priv->drm,
(intel_de_read(dev_priv, intel_dp->output_reg) &
@@ -437,17 +436,17 @@ intel_dp_link_down(struct intel_encoder *encoder,
if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
(HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
- DP &= ~DP_LINK_TRAIN_MASK_CPT;
- DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
+ intel_dp->DP &= ~DP_LINK_TRAIN_MASK_CPT;
+ intel_dp->DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
} else {
- DP &= ~DP_LINK_TRAIN_MASK;
- DP |= DP_LINK_TRAIN_PAT_IDLE;
+ intel_dp->DP &= ~DP_LINK_TRAIN_MASK;
+ intel_dp->DP |= DP_LINK_TRAIN_PAT_IDLE;
}
- intel_de_write(dev_priv, intel_dp->output_reg, DP);
+ intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
intel_de_posting_read(dev_priv, intel_dp->output_reg);
- DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
- intel_de_write(dev_priv, intel_dp->output_reg, DP);
+ intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
+ intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
intel_de_posting_read(dev_priv, intel_dp->output_reg);
/*
@@ -464,14 +463,14 @@ intel_dp_link_down(struct intel_encoder *encoder,
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
/* always enable with pattern 1 (as per spec) */
- DP &= ~(DP_PIPE_SEL_MASK | DP_LINK_TRAIN_MASK);
- DP |= DP_PORT_EN | DP_PIPE_SEL(PIPE_A) |
+ intel_dp->DP &= ~(DP_PIPE_SEL_MASK | DP_LINK_TRAIN_MASK);
+ intel_dp->DP |= DP_PORT_EN | DP_PIPE_SEL(PIPE_A) |
DP_LINK_TRAIN_PAT_1;
- intel_de_write(dev_priv, intel_dp->output_reg, DP);
+ intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
intel_de_posting_read(dev_priv, intel_dp->output_reg);
- DP &= ~DP_PORT_EN;
- intel_de_write(dev_priv, intel_dp->output_reg, DP);
+ intel_dp->DP &= ~DP_PORT_EN;
+ intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
intel_de_posting_read(dev_priv, intel_dp->output_reg);
intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
@@ -481,8 +480,6 @@ intel_dp_link_down(struct intel_encoder *encoder,
msleep(intel_dp->pps.panel_power_down_delay);
- intel_dp->DP = DP;
-
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
intel_wakeref_t wakeref;
@@ -582,19 +579,18 @@ cpt_set_link_train(struct intel_dp *intel_dp,
u8 dp_train_pat)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- u32 *DP = &intel_dp->DP;
- *DP &= ~DP_LINK_TRAIN_MASK_CPT;
+ intel_dp->DP &= ~DP_LINK_TRAIN_MASK_CPT;
switch (intel_dp_training_pattern_symbol(dp_train_pat)) {
case DP_TRAINING_PATTERN_DISABLE:
- *DP |= DP_LINK_TRAIN_OFF_CPT;
+ intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
break;
case DP_TRAINING_PATTERN_1:
- *DP |= DP_LINK_TRAIN_PAT_1_CPT;
+ intel_dp->DP |= DP_LINK_TRAIN_PAT_1_CPT;
break;
case DP_TRAINING_PATTERN_2:
- *DP |= DP_LINK_TRAIN_PAT_2_CPT;
+ intel_dp->DP |= DP_LINK_TRAIN_PAT_2_CPT;
break;
default:
MISSING_CASE(intel_dp_training_pattern_symbol(dp_train_pat));
@@ -611,19 +607,18 @@ g4x_set_link_train(struct intel_dp *intel_dp,
u8 dp_train_pat)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- u32 *DP = &intel_dp->DP;
- *DP &= ~DP_LINK_TRAIN_MASK;
+ intel_dp->DP &= ~DP_LINK_TRAIN_MASK;
switch (intel_dp_training_pattern_symbol(dp_train_pat)) {
case DP_TRAINING_PATTERN_DISABLE:
- *DP |= DP_LINK_TRAIN_OFF;
+ intel_dp->DP |= DP_LINK_TRAIN_OFF;
break;
case DP_TRAINING_PATTERN_1:
- *DP |= DP_LINK_TRAIN_PAT_1;
+ intel_dp->DP |= DP_LINK_TRAIN_PAT_1;
break;
case DP_TRAINING_PATTERN_2:
- *DP |= DP_LINK_TRAIN_PAT_2;
+ intel_dp->DP |= DP_LINK_TRAIN_PAT_2;
break;
default:
MISSING_CASE(intel_dp_training_pattern_symbol(dp_train_pat));
@@ -642,7 +637,7 @@ static void intel_dp_enable_port(struct intel_dp *intel_dp,
/* enable with pattern 1 (as per spec) */
intel_dp_program_link_training_pattern(intel_dp, crtc_state,
- DP_TRAINING_PATTERN_1);
+ DP_PHY_DPRX, DP_TRAINING_PATTERN_1);
/*
* Magic for VLV/CHV. We _must_ first set up the register
@@ -813,10 +808,10 @@ static u8 intel_dp_preemph_max_3(struct intel_dp *intel_dp)
return DP_TRAIN_PRE_EMPH_LEVEL_3;
}
-static void vlv_set_signal_levels(struct intel_dp *intel_dp,
+static void vlv_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
unsigned long demph_reg_value, preemph_reg_value,
uniqtranscale_reg_value;
u8 train_set = intel_dp->train_set[0];
@@ -899,10 +894,10 @@ static void vlv_set_signal_levels(struct intel_dp *intel_dp,
uniqtranscale_reg_value, 0);
}
-static void chv_set_signal_levels(struct intel_dp *intel_dp,
+static void chv_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
u32 deemph_reg_value, margin_reg_value;
bool uniq_trans_scale = false;
u8 train_set = intel_dp->train_set[0];
@@ -1020,10 +1015,11 @@ static u32 g4x_signal_levels(u8 train_set)
}
static void
-g4x_set_signal_levels(struct intel_dp *intel_dp,
+g4x_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
u8 train_set = intel_dp->train_set[0];
u32 signal_levels;
@@ -1067,10 +1063,11 @@ static u32 snb_cpu_edp_signal_levels(u8 train_set)
}
static void
-snb_cpu_edp_set_signal_levels(struct intel_dp *intel_dp,
+snb_cpu_edp_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
u8 train_set = intel_dp->train_set[0];
u32 signal_levels;
@@ -1118,10 +1115,11 @@ static u32 ivb_cpu_edp_signal_levels(u8 train_set)
}
static void
-ivb_cpu_edp_set_signal_levels(struct intel_dp *intel_dp,
+ivb_cpu_edp_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
u8 train_set = intel_dp->train_set[0];
u32 signal_levels;
@@ -1334,7 +1332,7 @@ bool g4x_dp_init(struct drm_i915_private *dev_priv,
intel_encoder->get_config = intel_dp_get_config;
intel_encoder->sync_state = intel_dp_sync_state;
intel_encoder->initial_fastset_check = intel_dp_initial_fastset_check;
- intel_encoder->update_pipe = intel_panel_update_backlight;
+ intel_encoder->update_pipe = intel_backlight_update;
intel_encoder->suspend = intel_dp_encoder_suspend;
intel_encoder->shutdown = intel_dp_encoder_shutdown;
if (IS_CHERRYVIEW(dev_priv)) {
@@ -1364,15 +1362,15 @@ bool g4x_dp_init(struct drm_i915_private *dev_priv,
dig_port->dp.set_link_train = g4x_set_link_train;
if (IS_CHERRYVIEW(dev_priv))
- dig_port->dp.set_signal_levels = chv_set_signal_levels;
+ intel_encoder->set_signal_levels = chv_set_signal_levels;
else if (IS_VALLEYVIEW(dev_priv))
- dig_port->dp.set_signal_levels = vlv_set_signal_levels;
+ intel_encoder->set_signal_levels = vlv_set_signal_levels;
else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
- dig_port->dp.set_signal_levels = ivb_cpu_edp_set_signal_levels;
+ intel_encoder->set_signal_levels = ivb_cpu_edp_set_signal_levels;
else if (IS_SANDYBRIDGE(dev_priv) && port == PORT_A)
- dig_port->dp.set_signal_levels = snb_cpu_edp_set_signal_levels;
+ intel_encoder->set_signal_levels = snb_cpu_edp_set_signal_levels;
else
- dig_port->dp.set_signal_levels = g4x_set_signal_levels;
+ intel_encoder->set_signal_levels = g4x_set_signal_levels;
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv) ||
(HAS_PCH_SPLIT(dev_priv) && port != PORT_A)) {
diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.c b/drivers/gpu/drm/i915/display/g4x_hdmi.c
index be352e9f0afc..88c427f3c346 100644
--- a/drivers/gpu/drm/i915/display/g4x_hdmi.c
+++ b/drivers/gpu/drm/i915/display/g4x_hdmi.c
@@ -14,8 +14,8 @@
#include "intel_fifo_underrun.h"
#include "intel_hdmi.h"
#include "intel_hotplug.h"
-#include "intel_sideband.h"
#include "intel_sdvo.h"
+#include "vlv_sideband.h"
static void intel_hdmi_prepare(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index a3eae3f3eadc..168c84a74d30 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -29,6 +29,7 @@
#include <drm/drm_mipi_dsi.h>
#include "intel_atomic.h"
+#include "intel_backlight.h"
#include "intel_combo_phy.h"
#include "intel_connector.h"
#include "intel_crtc.h"
@@ -54,20 +55,28 @@ static int payload_credits_available(struct drm_i915_private *dev_priv,
>> FREE_PLOAD_CREDIT_SHIFT;
}
-static void wait_for_header_credits(struct drm_i915_private *dev_priv,
- enum transcoder dsi_trans)
+static bool wait_for_header_credits(struct drm_i915_private *dev_priv,
+ enum transcoder dsi_trans, int hdr_credit)
{
if (wait_for_us(header_credits_available(dev_priv, dsi_trans) >=
- MAX_HEADER_CREDIT, 100))
+ hdr_credit, 100)) {
drm_err(&dev_priv->drm, "DSI header credits not released\n");
+ return false;
+ }
+
+ return true;
}
-static void wait_for_payload_credits(struct drm_i915_private *dev_priv,
- enum transcoder dsi_trans)
+static bool wait_for_payload_credits(struct drm_i915_private *dev_priv,
+ enum transcoder dsi_trans, int payld_credit)
{
if (wait_for_us(payload_credits_available(dev_priv, dsi_trans) >=
- MAX_PLOAD_CREDIT, 100))
+ payld_credit, 100)) {
drm_err(&dev_priv->drm, "DSI payload credits not released\n");
+ return false;
+ }
+
+ return true;
}
static enum transcoder dsi_port_to_transcoder(enum port port)
@@ -90,8 +99,8 @@ static void wait_for_cmds_dispatched_to_panel(struct intel_encoder *encoder)
/* wait for header/payload credits to be released */
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- wait_for_header_credits(dev_priv, dsi_trans);
- wait_for_payload_credits(dev_priv, dsi_trans);
+ wait_for_header_credits(dev_priv, dsi_trans, MAX_HEADER_CREDIT);
+ wait_for_payload_credits(dev_priv, dsi_trans, MAX_PLOAD_CREDIT);
}
/* send nop DCS command */
@@ -108,7 +117,7 @@ static void wait_for_cmds_dispatched_to_panel(struct intel_encoder *encoder)
/* wait for header credits to be released */
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- wait_for_header_credits(dev_priv, dsi_trans);
+ wait_for_header_credits(dev_priv, dsi_trans, MAX_HEADER_CREDIT);
}
/* wait for LP TX in progress bit to be cleared */
@@ -120,54 +129,52 @@ static void wait_for_cmds_dispatched_to_panel(struct intel_encoder *encoder)
}
}
-static bool add_payld_to_queue(struct intel_dsi_host *host, const u8 *data,
- u32 len)
+static int dsi_send_pkt_payld(struct intel_dsi_host *host,
+ const struct mipi_dsi_packet *packet)
{
struct intel_dsi *intel_dsi = host->intel_dsi;
- struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
+ struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);
enum transcoder dsi_trans = dsi_port_to_transcoder(host->port);
- int free_credits;
+ const u8 *data = packet->payload;
+ u32 len = packet->payload_length;
int i, j;
+ /* payload queue can accept *256 bytes*, check limit */
+ if (len > MAX_PLOAD_CREDIT * 4) {
+ drm_err(&i915->drm, "payload size exceeds max queue limit\n");
+ return -EINVAL;
+ }
+
for (i = 0; i < len; i += 4) {
u32 tmp = 0;
- free_credits = payload_credits_available(dev_priv, dsi_trans);
- if (free_credits < 1) {
- drm_err(&dev_priv->drm,
- "Payload credit not available\n");
- return false;
- }
+ if (!wait_for_payload_credits(i915, dsi_trans, 1))
+ return -EBUSY;
for (j = 0; j < min_t(u32, len - i, 4); j++)
tmp |= *data++ << 8 * j;
- intel_de_write(dev_priv, DSI_CMD_TXPYLD(dsi_trans), tmp);
+ intel_de_write(i915, DSI_CMD_TXPYLD(dsi_trans), tmp);
}
- return true;
+ return 0;
}
static int dsi_send_pkt_hdr(struct intel_dsi_host *host,
- struct mipi_dsi_packet pkt, bool enable_lpdt)
+ const struct mipi_dsi_packet *packet,
+ bool enable_lpdt)
{
struct intel_dsi *intel_dsi = host->intel_dsi;
struct drm_i915_private *dev_priv = to_i915(intel_dsi->base.base.dev);
enum transcoder dsi_trans = dsi_port_to_transcoder(host->port);
u32 tmp;
- int free_credits;
- /* check if header credit available */
- free_credits = header_credits_available(dev_priv, dsi_trans);
- if (free_credits < 1) {
- drm_err(&dev_priv->drm,
- "send pkt header failed, not enough hdr credits\n");
- return -1;
- }
+ if (!wait_for_header_credits(dev_priv, dsi_trans, 1))
+ return -EBUSY;
tmp = intel_de_read(dev_priv, DSI_CMD_TXHDR(dsi_trans));
- if (pkt.payload)
+ if (packet->payload)
tmp |= PAYLOAD_PRESENT;
else
tmp &= ~PAYLOAD_PRESENT;
@@ -178,37 +185,15 @@ static int dsi_send_pkt_hdr(struct intel_dsi_host *host,
tmp |= LP_DATA_TRANSFER;
tmp &= ~(PARAM_WC_MASK | VC_MASK | DT_MASK);
- tmp |= ((pkt.header[0] & VC_MASK) << VC_SHIFT);
- tmp |= ((pkt.header[0] & DT_MASK) << DT_SHIFT);
- tmp |= (pkt.header[1] << PARAM_WC_LOWER_SHIFT);
- tmp |= (pkt.header[2] << PARAM_WC_UPPER_SHIFT);
+ tmp |= ((packet->header[0] & VC_MASK) << VC_SHIFT);
+ tmp |= ((packet->header[0] & DT_MASK) << DT_SHIFT);
+ tmp |= (packet->header[1] << PARAM_WC_LOWER_SHIFT);
+ tmp |= (packet->header[2] << PARAM_WC_UPPER_SHIFT);
intel_de_write(dev_priv, DSI_CMD_TXHDR(dsi_trans), tmp);
return 0;
}
-static int dsi_send_pkt_payld(struct intel_dsi_host *host,
- struct mipi_dsi_packet pkt)
-{
- struct intel_dsi *intel_dsi = host->intel_dsi;
- struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);
-
- /* payload queue can accept *256 bytes*, check limit */
- if (pkt.payload_length > MAX_PLOAD_CREDIT * 4) {
- drm_err(&i915->drm, "payload size exceeds max queue limit\n");
- return -1;
- }
-
- /* load data into command payload queue */
- if (!add_payld_to_queue(host, pkt.payload,
- pkt.payload_length)) {
- drm_err(&i915->drm, "adding payload to queue failed\n");
- return -1;
- }
-
- return 0;
-}
-
void icl_dsi_frame_update(struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
@@ -248,7 +233,7 @@ static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder)
* Program voltage swing and pre-emphasis level values as per
* table in BSPEC under DDI buffer programing
*/
- tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN0(phy));
+ tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy));
tmp &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK);
tmp |= SCALING_MODE_SEL(0x2);
tmp |= TAP2_DISABLE | TAP3_DISABLE;
@@ -262,7 +247,7 @@ static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder)
tmp |= RTERM_SELECT(0x6);
intel_de_write(dev_priv, ICL_PORT_TX_DW5_AUX(phy), tmp);
- tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN0(phy));
+ tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN(0, phy));
tmp &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
RCOMP_SCALAR_MASK);
tmp |= SWING_SEL_UPPER(0x2);
@@ -470,7 +455,7 @@ static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder)
tmp &= ~FRC_LATENCY_OPTIM_MASK;
tmp |= FRC_LATENCY_OPTIM_VAL(0x5);
intel_de_write(dev_priv, ICL_PORT_TX_DW2_AUX(phy), tmp);
- tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN0(phy));
+ tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN(0, phy));
tmp &= ~FRC_LATENCY_OPTIM_MASK;
tmp |= FRC_LATENCY_OPTIM_VAL(0x5);
intel_de_write(dev_priv, ICL_PORT_TX_DW2_GRP(phy), tmp);
@@ -485,7 +470,7 @@ static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder)
tmp);
tmp = intel_de_read(dev_priv,
- ICL_PORT_PCS_DW1_LN0(phy));
+ ICL_PORT_PCS_DW1_LN(0, phy));
tmp &= ~LATENCY_OPTIM_MASK;
tmp |= LATENCY_OPTIM_VAL(0x1);
intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy),
@@ -504,7 +489,7 @@ static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder)
/* clear common keeper enable bit */
for_each_dsi_phy(phy, intel_dsi->phys) {
- tmp = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN0(phy));
+ tmp = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN(0, phy));
tmp &= ~COMMON_KEEPER_EN;
intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy), tmp);
tmp = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_AUX(phy));
@@ -525,7 +510,7 @@ static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder)
/* Clear training enable to change swing values */
for_each_dsi_phy(phy, intel_dsi->phys) {
- tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN0(phy));
+ tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy));
tmp &= ~TX_TRAINING_EN;
intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), tmp);
tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_AUX(phy));
@@ -538,7 +523,7 @@ static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder)
/* Set training enable to trigger update */
for_each_dsi_phy(phy, intel_dsi->phys) {
- tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN0(phy));
+ tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy));
tmp |= TX_TRAINING_EN;
intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), tmp);
tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_AUX(phy));
@@ -1270,6 +1255,26 @@ static void icl_apply_kvmr_pipe_a_wa(struct intel_encoder *encoder,
IGNORE_KVMR_PIPE_A,
enable ? IGNORE_KVMR_PIPE_A : 0);
}
+
+/*
+ * Wa_16012360555:adl-p
+ * SW will have to program the "LP to HS Wakeup Guardband"
+ * to account for the repeaters on the HS Request/Ready
+ * PPI signaling between the Display engine and the DPHY.
+ */
+static void adlp_set_lp_hs_wakeup_gb(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ enum port port;
+
+ if (DISPLAY_VER(i915) == 13) {
+ for_each_dsi_port(port, intel_dsi->ports)
+ intel_de_rmw(i915, TGL_DSI_CHKN_REG(port),
+ TGL_DSI_CHKN_LSHS_GB, 0x4);
+ }
+}
+
static void gen11_dsi_enable(struct intel_atomic_state *state,
struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
@@ -1283,11 +1288,14 @@ static void gen11_dsi_enable(struct intel_atomic_state *state,
/* Wa_1409054076:icl,jsl,ehl */
icl_apply_kvmr_pipe_a_wa(encoder, crtc->pipe, true);
+ /* Wa_16012360555:adl-p */
+ adlp_set_lp_hs_wakeup_gb(encoder);
+
/* step6d: enable dsi transcoder */
gen11_dsi_enable_transcoder(encoder);
/* step7: enable backlight */
- intel_panel_enable_backlight(crtc_state, conn_state);
+ intel_backlight_enable(crtc_state, conn_state);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_ON);
intel_crtc_vblank_on(crtc_state);
@@ -1440,7 +1448,7 @@ static void gen11_dsi_disable(struct intel_atomic_state *state,
/* step1: turn off backlight */
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_OFF);
- intel_panel_disable_backlight(old_conn_state);
+ intel_backlight_disable(old_conn_state);
/* step2d,e: disable transcoder and wait */
gen11_dsi_disable_transcoder(encoder);
@@ -1650,16 +1658,17 @@ static int gen11_dsi_compute_config(struct intel_encoder *encoder,
struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
base);
struct intel_connector *intel_connector = intel_dsi->attached_connector;
- const struct drm_display_mode *fixed_mode =
- intel_connector->panel.fixed_mode;
struct drm_display_mode *adjusted_mode =
&pipe_config->hw.adjusted_mode;
int ret;
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
- intel_fixed_panel_mode(fixed_mode, adjusted_mode);
- ret = intel_pch_panel_fitting(pipe_config, conn_state);
+ ret = intel_panel_compute_config(intel_connector, adjusted_mode);
+ if (ret)
+ return ret;
+
+ ret = intel_panel_fitting(pipe_config, conn_state);
if (ret)
return ret;
@@ -1815,18 +1824,18 @@ static ssize_t gen11_dsi_host_transfer(struct mipi_dsi_host *host,
if (msg->flags & MIPI_DSI_MSG_USE_LPM)
enable_lpdt = true;
- /* send packet header */
- ret = dsi_send_pkt_hdr(intel_dsi_host, dsi_pkt, enable_lpdt);
- if (ret < 0)
- return ret;
-
/* only long packet contains payload */
if (mipi_dsi_packet_format_is_long(msg->type)) {
- ret = dsi_send_pkt_payld(intel_dsi_host, dsi_pkt);
+ ret = dsi_send_pkt_payld(intel_dsi_host, &dsi_pkt);
if (ret < 0)
return ret;
}
+ /* send packet header */
+ ret = dsi_send_pkt_hdr(intel_dsi_host, &dsi_pkt, enable_lpdt);
+ if (ret < 0)
+ return ret;
+
//TODO: add payload receive code if needed
ret = sizeof(dsi_pkt.header) + dsi_pkt.payload_length;
@@ -2014,7 +2023,7 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
encoder->port = port;
encoder->get_config = gen11_dsi_get_config;
encoder->sync_state = gen11_dsi_sync_state;
- encoder->update_pipe = intel_panel_update_backlight;
+ encoder->update_pipe = intel_backlight_update;
encoder->compute_config = gen11_dsi_compute_config;
encoder->get_hw_state = gen11_dsi_get_hw_state;
encoder->initial_fastset_check = gen11_dsi_initial_fastset_check;
@@ -2048,7 +2057,7 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
}
intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
- intel_panel_setup_backlight(connector, INVALID_PIPE);
+ intel_backlight_setup(intel_connector, INVALID_PIPE);
if (dev_priv->vbt.dsi.config->dual_link)
intel_dsi->ports = BIT(PORT_A) | BIT(PORT_B);
diff --git a/drivers/gpu/drm/i915/display/intel_acpi.c b/drivers/gpu/drm/i915/display/intel_acpi.c
index 68abeaf2d7d4..e78430001f07 100644
--- a/drivers/gpu/drm/i915/display/intel_acpi.c
+++ b/drivers/gpu/drm/i915/display/intel_acpi.c
@@ -285,3 +285,49 @@ void intel_acpi_device_id_update(struct drm_i915_private *dev_priv)
}
drm_connector_list_iter_end(&conn_iter);
}
+
+/* NOTE: The connector order must be final before this is called. */
+void intel_acpi_assign_connector_fwnodes(struct drm_i915_private *i915)
+{
+ struct drm_connector_list_iter conn_iter;
+ struct drm_device *drm_dev = &i915->drm;
+ struct fwnode_handle *fwnode = NULL;
+ struct drm_connector *connector;
+ struct acpi_device *adev;
+
+ drm_connector_list_iter_begin(drm_dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ /* Always getting the next, even when the last was not used. */
+ fwnode = device_get_next_child_node(drm_dev->dev, fwnode);
+ if (!fwnode)
+ break;
+
+ switch (connector->connector_type) {
+ case DRM_MODE_CONNECTOR_LVDS:
+ case DRM_MODE_CONNECTOR_eDP:
+ case DRM_MODE_CONNECTOR_DSI:
+ /*
+ * Integrated displays have a specific address 0x1f on
+ * most Intel platforms, but not on all of them.
+ */
+ adev = acpi_find_child_device(ACPI_COMPANION(drm_dev->dev),
+ 0x1f, 0);
+ if (adev) {
+ connector->fwnode =
+ fwnode_handle_get(acpi_fwnode_handle(adev));
+ break;
+ }
+ fallthrough;
+ default:
+ connector->fwnode = fwnode_handle_get(fwnode);
+ break;
+ }
+ }
+ drm_connector_list_iter_end(&conn_iter);
+ /*
+ * device_get_next_child_node() takes a reference on the fwnode, if
+ * we stopped iterating because we are out of connectors we need to
+ * put this, otherwise fwnode is NULL and the put is a no-op.
+ */
+ fwnode_handle_put(fwnode);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_acpi.h b/drivers/gpu/drm/i915/display/intel_acpi.h
index 9f197401c313..4a760a2baed9 100644
--- a/drivers/gpu/drm/i915/display/intel_acpi.h
+++ b/drivers/gpu/drm/i915/display/intel_acpi.h
@@ -13,6 +13,7 @@ void intel_register_dsm_handler(void);
void intel_unregister_dsm_handler(void);
void intel_dsm_get_bios_data_funcs_supported(struct drm_i915_private *i915);
void intel_acpi_device_id_update(struct drm_i915_private *i915);
+void intel_acpi_assign_connector_fwnodes(struct drm_i915_private *i915);
#else
static inline void intel_register_dsm_handler(void) { return; }
static inline void intel_unregister_dsm_handler(void) { return; }
@@ -20,6 +21,8 @@ static inline
void intel_dsm_get_bios_data_funcs_supported(struct drm_i915_private *i915) { return; }
static inline
void intel_acpi_device_id_update(struct drm_i915_private *i915) { return; }
+static inline
+void intel_acpi_assign_connector_fwnodes(struct drm_i915_private *i915) { return; }
#endif /* CONFIG_ACPI */
#endif /* __INTEL_ACPI_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
index 47234d898549..0be8c00e3db9 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
@@ -39,8 +39,10 @@
#include "intel_atomic_plane.h"
#include "intel_cdclk.h"
#include "intel_display_types.h"
+#include "intel_fb_pin.h"
#include "intel_pm.h"
#include "intel_sprite.h"
+#include "gt/intel_rps.h"
static void intel_plane_state_reset(struct intel_plane_state *plane_state,
struct intel_plane *plane)
@@ -601,6 +603,213 @@ int intel_atomic_plane_check_clipping(struct intel_plane_state *plane_state,
return 0;
}
+struct wait_rps_boost {
+ struct wait_queue_entry wait;
+
+ struct drm_crtc *crtc;
+ struct i915_request *request;
+};
+
+static int do_rps_boost(struct wait_queue_entry *_wait,
+ unsigned mode, int sync, void *key)
+{
+ struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
+ struct i915_request *rq = wait->request;
+
+ /*
+ * If we missed the vblank, but the request is already running it
+ * is reasonable to assume that it will complete before the next
+ * vblank without our intervention, so leave RPS alone.
+ */
+ if (!i915_request_started(rq))
+ intel_rps_boost(rq);
+ i915_request_put(rq);
+
+ drm_crtc_vblank_put(wait->crtc);
+
+ list_del(&wait->wait.entry);
+ kfree(wait);
+ return 1;
+}
+
+static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
+ struct dma_fence *fence)
+{
+ struct wait_rps_boost *wait;
+
+ if (!dma_fence_is_i915(fence))
+ return;
+
+ if (DISPLAY_VER(to_i915(crtc->dev)) < 6)
+ return;
+
+ if (drm_crtc_vblank_get(crtc))
+ return;
+
+ wait = kmalloc(sizeof(*wait), GFP_KERNEL);
+ if (!wait) {
+ drm_crtc_vblank_put(crtc);
+ return;
+ }
+
+ wait->request = to_request(dma_fence_get(fence));
+ wait->crtc = crtc;
+
+ wait->wait.func = do_rps_boost;
+ wait->wait.flags = 0;
+
+ add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
+}
+
+/**
+ * intel_prepare_plane_fb - Prepare fb for usage on plane
+ * @_plane: drm plane to prepare for
+ * @_new_plane_state: the plane state being prepared
+ *
+ * Prepares a framebuffer for usage on a display plane. Generally this
+ * involves pinning the underlying object and updating the frontbuffer tracking
+ * bits. Some older platforms need special physical address handling for
+ * cursor planes.
+ *
+ * Returns 0 on success, negative error code on failure.
+ */
+static int
+intel_prepare_plane_fb(struct drm_plane *_plane,
+ struct drm_plane_state *_new_plane_state)
+{
+ struct i915_sched_attr attr = { .priority = I915_PRIORITY_DISPLAY };
+ struct intel_plane *plane = to_intel_plane(_plane);
+ struct intel_plane_state *new_plane_state =
+ to_intel_plane_state(_new_plane_state);
+ struct intel_atomic_state *state =
+ to_intel_atomic_state(new_plane_state->uapi.state);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ const struct intel_plane_state *old_plane_state =
+ intel_atomic_get_old_plane_state(state, plane);
+ struct drm_i915_gem_object *obj = intel_fb_obj(new_plane_state->hw.fb);
+ struct drm_i915_gem_object *old_obj = intel_fb_obj(old_plane_state->hw.fb);
+ int ret;
+
+ if (old_obj) {
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state,
+ to_intel_crtc(old_plane_state->hw.crtc));
+
+ /* Big Hammer, we also need to ensure that any pending
+ * MI_WAIT_FOR_EVENT inside a user batch buffer on the
+ * current scanout is retired before unpinning the old
+ * framebuffer. Note that we rely on userspace rendering
+ * into the buffer attached to the pipe they are waiting
+ * on. If not, userspace generates a GPU hang with IPEHR
+ * point to the MI_WAIT_FOR_EVENT.
+ *
+ * This should only fail upon a hung GPU, in which case we
+ * can safely continue.
+ */
+ if (intel_crtc_needs_modeset(crtc_state)) {
+ ret = i915_sw_fence_await_reservation(&state->commit_ready,
+ old_obj->base.resv, NULL,
+ false, 0,
+ GFP_KERNEL);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ if (new_plane_state->uapi.fence) { /* explicit fencing */
+ i915_gem_fence_wait_priority(new_plane_state->uapi.fence,
+ &attr);
+ ret = i915_sw_fence_await_dma_fence(&state->commit_ready,
+ new_plane_state->uapi.fence,
+ i915_fence_timeout(dev_priv),
+ GFP_KERNEL);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (!obj)
+ return 0;
+
+
+ ret = intel_plane_pin_fb(new_plane_state);
+ if (ret)
+ return ret;
+
+ i915_gem_object_wait_priority(obj, 0, &attr);
+
+ if (!new_plane_state->uapi.fence) { /* implicit fencing */
+ struct dma_fence *fence;
+
+ ret = i915_sw_fence_await_reservation(&state->commit_ready,
+ obj->base.resv, NULL,
+ false,
+ i915_fence_timeout(dev_priv),
+ GFP_KERNEL);
+ if (ret < 0)
+ goto unpin_fb;
+
+ fence = dma_resv_get_excl_unlocked(obj->base.resv);
+ if (fence) {
+ add_rps_boost_after_vblank(new_plane_state->hw.crtc,
+ fence);
+ dma_fence_put(fence);
+ }
+ } else {
+ add_rps_boost_after_vblank(new_plane_state->hw.crtc,
+ new_plane_state->uapi.fence);
+ }
+
+ /*
+ * We declare pageflips to be interactive and so merit a small bias
+ * towards upclocking to deliver the frame on time. By only changing
+ * the RPS thresholds to sample more regularly and aim for higher
+ * clocks we can hopefully deliver low power workloads (like kodi)
+ * that are not quite steady state without resorting to forcing
+ * maximum clocks following a vblank miss (see do_rps_boost()).
+ */
+ if (!state->rps_interactive) {
+ intel_rps_mark_interactive(&dev_priv->gt.rps, true);
+ state->rps_interactive = true;
+ }
+
+ return 0;
+
+unpin_fb:
+ intel_plane_unpin_fb(new_plane_state);
+
+ return ret;
+}
+
+/**
+ * intel_cleanup_plane_fb - Cleans up an fb after plane use
+ * @plane: drm plane to clean up for
+ * @_old_plane_state: the state from the previous modeset
+ *
+ * Cleans up a framebuffer that has just been removed from a plane.
+ */
+static void
+intel_cleanup_plane_fb(struct drm_plane *plane,
+ struct drm_plane_state *_old_plane_state)
+{
+ struct intel_plane_state *old_plane_state =
+ to_intel_plane_state(_old_plane_state);
+ struct intel_atomic_state *state =
+ to_intel_atomic_state(old_plane_state->uapi.state);
+ struct drm_i915_private *dev_priv = to_i915(plane->dev);
+ struct drm_i915_gem_object *obj = intel_fb_obj(old_plane_state->hw.fb);
+
+ if (!obj)
+ return;
+
+ if (state->rps_interactive) {
+ intel_rps_mark_interactive(&dev_priv->gt.rps, false);
+ state->rps_interactive = false;
+ }
+
+ /* Should only be called after a successful intel_prepare_plane_fb()! */
+ intel_plane_unpin_fb(old_plane_state);
+}
+
static const struct drm_plane_helper_funcs intel_plane_helper_funcs = {
.prepare_fb = intel_prepare_plane_fb,
.cleanup_fb = intel_cleanup_plane_fb,
diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
index 4e0f96bf6158..03e8c05a74f6 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_audio.c
@@ -848,10 +848,10 @@ void intel_audio_codec_enable(struct intel_encoder *encoder,
connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
- if (dev_priv->display.audio_codec_enable)
- dev_priv->display.audio_codec_enable(encoder,
- crtc_state,
- conn_state);
+ if (dev_priv->audio_funcs)
+ dev_priv->audio_funcs->audio_codec_enable(encoder,
+ crtc_state,
+ conn_state);
mutex_lock(&dev_priv->av_mutex);
encoder->audio_connector = connector;
@@ -893,10 +893,10 @@ void intel_audio_codec_disable(struct intel_encoder *encoder,
enum port port = encoder->port;
enum pipe pipe = crtc->pipe;
- if (dev_priv->display.audio_codec_disable)
- dev_priv->display.audio_codec_disable(encoder,
- old_crtc_state,
- old_conn_state);
+ if (dev_priv->audio_funcs)
+ dev_priv->audio_funcs->audio_codec_disable(encoder,
+ old_crtc_state,
+ old_conn_state);
mutex_lock(&dev_priv->av_mutex);
encoder->audio_connector = NULL;
@@ -915,6 +915,21 @@ void intel_audio_codec_disable(struct intel_encoder *encoder,
intel_lpe_audio_notify(dev_priv, pipe, port, NULL, 0, false);
}
+static const struct intel_audio_funcs g4x_audio_funcs = {
+ .audio_codec_enable = g4x_audio_codec_enable,
+ .audio_codec_disable = g4x_audio_codec_disable,
+};
+
+static const struct intel_audio_funcs ilk_audio_funcs = {
+ .audio_codec_enable = ilk_audio_codec_enable,
+ .audio_codec_disable = ilk_audio_codec_disable,
+};
+
+static const struct intel_audio_funcs hsw_audio_funcs = {
+ .audio_codec_enable = hsw_audio_codec_enable,
+ .audio_codec_disable = hsw_audio_codec_disable,
+};
+
/**
* intel_init_audio_hooks - Set up chip specific audio hooks
* @dev_priv: device private
@@ -922,17 +937,13 @@ void intel_audio_codec_disable(struct intel_encoder *encoder,
void intel_init_audio_hooks(struct drm_i915_private *dev_priv)
{
if (IS_G4X(dev_priv)) {
- dev_priv->display.audio_codec_enable = g4x_audio_codec_enable;
- dev_priv->display.audio_codec_disable = g4x_audio_codec_disable;
+ dev_priv->audio_funcs = &g4x_audio_funcs;
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- dev_priv->display.audio_codec_enable = ilk_audio_codec_enable;
- dev_priv->display.audio_codec_disable = ilk_audio_codec_disable;
+ dev_priv->audio_funcs = &ilk_audio_funcs;
} else if (IS_HASWELL(dev_priv) || DISPLAY_VER(dev_priv) >= 8) {
- dev_priv->display.audio_codec_enable = hsw_audio_codec_enable;
- dev_priv->display.audio_codec_disable = hsw_audio_codec_disable;
+ dev_priv->audio_funcs = &hsw_audio_funcs;
} else if (HAS_PCH_SPLIT(dev_priv)) {
- dev_priv->display.audio_codec_enable = ilk_audio_codec_enable;
- dev_priv->display.audio_codec_disable = ilk_audio_codec_disable;
+ dev_priv->audio_funcs = &ilk_audio_funcs;
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c
new file mode 100644
index 000000000000..9523411cddd8
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_backlight.c
@@ -0,0 +1,1776 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include <linux/kernel.h>
+#include <linux/pwm.h>
+
+#include "intel_backlight.h"
+#include "intel_connector.h"
+#include "intel_de.h"
+#include "intel_display_types.h"
+#include "intel_dp_aux_backlight.h"
+#include "intel_dsi_dcs_backlight.h"
+#include "intel_panel.h"
+
+/**
+ * scale - scale values from one range to another
+ * @source_val: value in range [@source_min..@source_max]
+ * @source_min: minimum legal value for @source_val
+ * @source_max: maximum legal value for @source_val
+ * @target_min: corresponding target value for @source_min
+ * @target_max: corresponding target value for @source_max
+ *
+ * Return @source_val in range [@source_min..@source_max] scaled to range
+ * [@target_min..@target_max].
+ */
+static u32 scale(u32 source_val,
+ u32 source_min, u32 source_max,
+ u32 target_min, u32 target_max)
+{
+ u64 target_val;
+
+ WARN_ON(source_min > source_max);
+ WARN_ON(target_min > target_max);
+
+ /* defensive */
+ source_val = clamp(source_val, source_min, source_max);
+
+ /* avoid overflows */
+ target_val = mul_u32_u32(source_val - source_min,
+ target_max - target_min);
+ target_val = DIV_ROUND_CLOSEST_ULL(target_val, source_max - source_min);
+ target_val += target_min;
+
+ return target_val;
+}
+
+/*
+ * Scale user_level in range [0..user_max] to [0..hw_max], clamping the result
+ * to [hw_min..hw_max].
+ */
+static u32 clamp_user_to_hw(struct intel_connector *connector,
+ u32 user_level, u32 user_max)
+{
+ struct intel_panel *panel = &connector->panel;
+ u32 hw_level;
+
+ hw_level = scale(user_level, 0, user_max, 0, panel->backlight.max);
+ hw_level = clamp(hw_level, panel->backlight.min, panel->backlight.max);
+
+ return hw_level;
+}
+
+/* Scale hw_level in range [hw_min..hw_max] to [0..user_max]. */
+static u32 scale_hw_to_user(struct intel_connector *connector,
+ u32 hw_level, u32 user_max)
+{
+ struct intel_panel *panel = &connector->panel;
+
+ return scale(hw_level, panel->backlight.min, panel->backlight.max,
+ 0, user_max);
+}
+
+u32 intel_backlight_invert_pwm_level(struct intel_connector *connector, u32 val)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+
+ drm_WARN_ON(&dev_priv->drm, panel->backlight.pwm_level_max == 0);
+
+ if (dev_priv->params.invert_brightness < 0)
+ return val;
+
+ if (dev_priv->params.invert_brightness > 0 ||
+ dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) {
+ return panel->backlight.pwm_level_max - val + panel->backlight.pwm_level_min;
+ }
+
+ return val;
+}
+
+void intel_backlight_set_pwm_level(const struct drm_connector_state *conn_state, u32 val)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+
+ drm_dbg_kms(&i915->drm, "set backlight PWM = %d\n", val);
+ panel->backlight.pwm_funcs->set(conn_state, val);
+}
+
+u32 intel_backlight_level_to_pwm(struct intel_connector *connector, u32 val)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+
+ drm_WARN_ON_ONCE(&dev_priv->drm,
+ panel->backlight.max == 0 || panel->backlight.pwm_level_max == 0);
+
+ val = scale(val, panel->backlight.min, panel->backlight.max,
+ panel->backlight.pwm_level_min, panel->backlight.pwm_level_max);
+
+ return intel_backlight_invert_pwm_level(connector, val);
+}
+
+u32 intel_backlight_level_from_pwm(struct intel_connector *connector, u32 val)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+
+ drm_WARN_ON_ONCE(&dev_priv->drm,
+ panel->backlight.max == 0 || panel->backlight.pwm_level_max == 0);
+
+ if (dev_priv->params.invert_brightness > 0 ||
+ (dev_priv->params.invert_brightness == 0 && dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS))
+ val = panel->backlight.pwm_level_max - (val - panel->backlight.pwm_level_min);
+
+ return scale(val, panel->backlight.pwm_level_min, panel->backlight.pwm_level_max,
+ panel->backlight.min, panel->backlight.max);
+}
+
+static u32 lpt_get_backlight(struct intel_connector *connector, enum pipe unused)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+
+ return intel_de_read(dev_priv, BLC_PWM_PCH_CTL2) & BACKLIGHT_DUTY_CYCLE_MASK;
+}
+
+static u32 pch_get_backlight(struct intel_connector *connector, enum pipe unused)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+
+ return intel_de_read(dev_priv, BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
+}
+
+static u32 i9xx_get_backlight(struct intel_connector *connector, enum pipe unused)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ u32 val;
+
+ val = intel_de_read(dev_priv, BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
+ if (DISPLAY_VER(dev_priv) < 4)
+ val >>= 1;
+
+ if (panel->backlight.combination_mode) {
+ u8 lbpc;
+
+ pci_read_config_byte(to_pci_dev(dev_priv->drm.dev), LBPC, &lbpc);
+ val *= lbpc;
+ }
+
+ return val;
+}
+
+static u32 vlv_get_backlight(struct intel_connector *connector, enum pipe pipe)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+
+ if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B))
+ return 0;
+
+ return intel_de_read(dev_priv, VLV_BLC_PWM_CTL(pipe)) & BACKLIGHT_DUTY_CYCLE_MASK;
+}
+
+static u32 bxt_get_backlight(struct intel_connector *connector, enum pipe unused)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+
+ return intel_de_read(dev_priv,
+ BXT_BLC_PWM_DUTY(panel->backlight.controller));
+}
+
+static u32 ext_pwm_get_backlight(struct intel_connector *connector, enum pipe unused)
+{
+ struct intel_panel *panel = &connector->panel;
+ struct pwm_state state;
+
+ pwm_get_state(panel->backlight.pwm, &state);
+ return pwm_get_relative_duty_cycle(&state, 100);
+}
+
+static void lpt_set_backlight(const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+
+ u32 val = intel_de_read(dev_priv, BLC_PWM_PCH_CTL2) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+ intel_de_write(dev_priv, BLC_PWM_PCH_CTL2, val | level);
+}
+
+static void pch_set_backlight(const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ u32 tmp;
+
+ tmp = intel_de_read(dev_priv, BLC_PWM_CPU_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+ intel_de_write(dev_priv, BLC_PWM_CPU_CTL, tmp | level);
+}
+
+static void i9xx_set_backlight(const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ u32 tmp, mask;
+
+ drm_WARN_ON(&dev_priv->drm, panel->backlight.pwm_level_max == 0);
+
+ if (panel->backlight.combination_mode) {
+ u8 lbpc;
+
+ lbpc = level * 0xfe / panel->backlight.pwm_level_max + 1;
+ level /= lbpc;
+ pci_write_config_byte(to_pci_dev(dev_priv->drm.dev), LBPC, lbpc);
+ }
+
+ if (DISPLAY_VER(dev_priv) == 4) {
+ mask = BACKLIGHT_DUTY_CYCLE_MASK;
+ } else {
+ level <<= 1;
+ mask = BACKLIGHT_DUTY_CYCLE_MASK_PNV;
+ }
+
+ tmp = intel_de_read(dev_priv, BLC_PWM_CTL) & ~mask;
+ intel_de_write(dev_priv, BLC_PWM_CTL, tmp | level);
+}
+
+static void vlv_set_backlight(const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ enum pipe pipe = to_intel_crtc(conn_state->crtc)->pipe;
+ u32 tmp;
+
+ tmp = intel_de_read(dev_priv, VLV_BLC_PWM_CTL(pipe)) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+ intel_de_write(dev_priv, VLV_BLC_PWM_CTL(pipe), tmp | level);
+}
+
+static void bxt_set_backlight(const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+
+ intel_de_write(dev_priv,
+ BXT_BLC_PWM_DUTY(panel->backlight.controller), level);
+}
+
+static void ext_pwm_set_backlight(const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_panel *panel = &to_intel_connector(conn_state->connector)->panel;
+
+ pwm_set_relative_duty_cycle(&panel->backlight.pwm_state, level, 100);
+ pwm_apply_state(panel->backlight.pwm, &panel->backlight.pwm_state);
+}
+
+static void
+intel_panel_actually_set_backlight(const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+
+ drm_dbg_kms(&i915->drm, "set backlight level = %d\n", level);
+
+ panel->backlight.funcs->set(conn_state, level);
+}
+
+/* set backlight brightness to level in range [0..max], assuming hw min is
+ * respected.
+ */
+void intel_backlight_set_acpi(const struct drm_connector_state *conn_state,
+ u32 user_level, u32 user_max)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ u32 hw_level;
+
+ /*
+ * Lack of crtc may occur during driver init because
+ * connection_mutex isn't held across the entire backlight
+ * setup + modeset readout, and the BIOS can issue the
+ * requests at any time.
+ */
+ if (!panel->backlight.present || !conn_state->crtc)
+ return;
+
+ mutex_lock(&dev_priv->backlight_lock);
+
+ drm_WARN_ON(&dev_priv->drm, panel->backlight.max == 0);
+
+ hw_level = clamp_user_to_hw(connector, user_level, user_max);
+ panel->backlight.level = hw_level;
+
+ if (panel->backlight.device)
+ panel->backlight.device->props.brightness =
+ scale_hw_to_user(connector,
+ panel->backlight.level,
+ panel->backlight.device->props.max_brightness);
+
+ if (panel->backlight.enabled)
+ intel_panel_actually_set_backlight(conn_state, hw_level);
+
+ mutex_unlock(&dev_priv->backlight_lock);
+}
+
+static void lpt_disable_backlight(const struct drm_connector_state *old_conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ u32 tmp;
+
+ intel_backlight_set_pwm_level(old_conn_state, level);
+
+ /*
+ * Although we don't support or enable CPU PWM with LPT/SPT based
+ * systems, it may have been enabled prior to loading the
+ * driver. Disable to avoid warnings on LCPLL disable.
+ *
+ * This needs rework if we need to add support for CPU PWM on PCH split
+ * platforms.
+ */
+ tmp = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2);
+ if (tmp & BLM_PWM_ENABLE) {
+ drm_dbg_kms(&dev_priv->drm,
+ "cpu backlight was enabled, disabling\n");
+ intel_de_write(dev_priv, BLC_PWM_CPU_CTL2,
+ tmp & ~BLM_PWM_ENABLE);
+ }
+
+ tmp = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1);
+ intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE);
+}
+
+static void pch_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
+{
+ struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ u32 tmp;
+
+ intel_backlight_set_pwm_level(old_conn_state, val);
+
+ tmp = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2);
+ intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, tmp & ~BLM_PWM_ENABLE);
+
+ tmp = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1);
+ intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE);
+}
+
+static void i9xx_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
+{
+ intel_backlight_set_pwm_level(old_conn_state, val);
+}
+
+static void i965_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
+{
+ struct drm_i915_private *dev_priv = to_i915(old_conn_state->connector->dev);
+ u32 tmp;
+
+ intel_backlight_set_pwm_level(old_conn_state, val);
+
+ tmp = intel_de_read(dev_priv, BLC_PWM_CTL2);
+ intel_de_write(dev_priv, BLC_PWM_CTL2, tmp & ~BLM_PWM_ENABLE);
+}
+
+static void vlv_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
+{
+ struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ enum pipe pipe = to_intel_crtc(old_conn_state->crtc)->pipe;
+ u32 tmp;
+
+ intel_backlight_set_pwm_level(old_conn_state, val);
+
+ tmp = intel_de_read(dev_priv, VLV_BLC_PWM_CTL2(pipe));
+ intel_de_write(dev_priv, VLV_BLC_PWM_CTL2(pipe),
+ tmp & ~BLM_PWM_ENABLE);
+}
+
+static void bxt_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
+{
+ struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ u32 tmp;
+
+ intel_backlight_set_pwm_level(old_conn_state, val);
+
+ tmp = intel_de_read(dev_priv,
+ BXT_BLC_PWM_CTL(panel->backlight.controller));
+ intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller),
+ tmp & ~BXT_BLC_PWM_ENABLE);
+
+ if (panel->backlight.controller == 1) {
+ val = intel_de_read(dev_priv, UTIL_PIN_CTL);
+ val &= ~UTIL_PIN_ENABLE;
+ intel_de_write(dev_priv, UTIL_PIN_CTL, val);
+ }
+}
+
+static void cnp_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
+{
+ struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ u32 tmp;
+
+ intel_backlight_set_pwm_level(old_conn_state, val);
+
+ tmp = intel_de_read(dev_priv,
+ BXT_BLC_PWM_CTL(panel->backlight.controller));
+ intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller),
+ tmp & ~BXT_BLC_PWM_ENABLE);
+}
+
+static void ext_pwm_disable_backlight(const struct drm_connector_state *old_conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
+ struct intel_panel *panel = &connector->panel;
+
+ panel->backlight.pwm_state.enabled = false;
+ pwm_apply_state(panel->backlight.pwm, &panel->backlight.pwm_state);
+}
+
+void intel_backlight_disable(const struct drm_connector_state *old_conn_state)
+{
+ struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+
+ if (!panel->backlight.present)
+ return;
+
+ /*
+ * Do not disable backlight on the vga_switcheroo path. When switching
+ * away from i915, the other client may depend on i915 to handle the
+ * backlight. This will leave the backlight on unnecessarily when
+ * another client is not activated.
+ */
+ if (dev_priv->drm.switch_power_state == DRM_SWITCH_POWER_CHANGING) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Skipping backlight disable on vga switch\n");
+ return;
+ }
+
+ mutex_lock(&dev_priv->backlight_lock);
+
+ if (panel->backlight.device)
+ panel->backlight.device->props.power = FB_BLANK_POWERDOWN;
+ panel->backlight.enabled = false;
+ panel->backlight.funcs->disable(old_conn_state, 0);
+
+ mutex_unlock(&dev_priv->backlight_lock);
+}
+
+static void lpt_enable_backlight(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ u32 pch_ctl1, pch_ctl2, schicken;
+
+ pch_ctl1 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1);
+ if (pch_ctl1 & BLM_PCH_PWM_ENABLE) {
+ drm_dbg_kms(&dev_priv->drm, "pch backlight already enabled\n");
+ pch_ctl1 &= ~BLM_PCH_PWM_ENABLE;
+ intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, pch_ctl1);
+ }
+
+ if (HAS_PCH_LPT(dev_priv)) {
+ schicken = intel_de_read(dev_priv, SOUTH_CHICKEN2);
+ if (panel->backlight.alternate_pwm_increment)
+ schicken |= LPT_PWM_GRANULARITY;
+ else
+ schicken &= ~LPT_PWM_GRANULARITY;
+ intel_de_write(dev_priv, SOUTH_CHICKEN2, schicken);
+ } else {
+ schicken = intel_de_read(dev_priv, SOUTH_CHICKEN1);
+ if (panel->backlight.alternate_pwm_increment)
+ schicken |= SPT_PWM_GRANULARITY;
+ else
+ schicken &= ~SPT_PWM_GRANULARITY;
+ intel_de_write(dev_priv, SOUTH_CHICKEN1, schicken);
+ }
+
+ pch_ctl2 = panel->backlight.pwm_level_max << 16;
+ intel_de_write(dev_priv, BLC_PWM_PCH_CTL2, pch_ctl2);
+
+ pch_ctl1 = 0;
+ if (panel->backlight.active_low_pwm)
+ pch_ctl1 |= BLM_PCH_POLARITY;
+
+ /* After LPT, override is the default. */
+ if (HAS_PCH_LPT(dev_priv))
+ pch_ctl1 |= BLM_PCH_OVERRIDE_ENABLE;
+
+ intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, pch_ctl1);
+ intel_de_posting_read(dev_priv, BLC_PWM_PCH_CTL1);
+ intel_de_write(dev_priv, BLC_PWM_PCH_CTL1,
+ pch_ctl1 | BLM_PCH_PWM_ENABLE);
+
+ /* This won't stick until the above enable. */
+ intel_backlight_set_pwm_level(conn_state, level);
+}
+
+static void pch_enable_backlight(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ u32 cpu_ctl2, pch_ctl1, pch_ctl2;
+
+ cpu_ctl2 = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2);
+ if (cpu_ctl2 & BLM_PWM_ENABLE) {
+ drm_dbg_kms(&dev_priv->drm, "cpu backlight already enabled\n");
+ cpu_ctl2 &= ~BLM_PWM_ENABLE;
+ intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, cpu_ctl2);
+ }
+
+ pch_ctl1 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1);
+ if (pch_ctl1 & BLM_PCH_PWM_ENABLE) {
+ drm_dbg_kms(&dev_priv->drm, "pch backlight already enabled\n");
+ pch_ctl1 &= ~BLM_PCH_PWM_ENABLE;
+ intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, pch_ctl1);
+ }
+
+ if (cpu_transcoder == TRANSCODER_EDP)
+ cpu_ctl2 = BLM_TRANSCODER_EDP;
+ else
+ cpu_ctl2 = BLM_PIPE(cpu_transcoder);
+ intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, cpu_ctl2);
+ intel_de_posting_read(dev_priv, BLC_PWM_CPU_CTL2);
+ intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, cpu_ctl2 | BLM_PWM_ENABLE);
+
+ /* This won't stick until the above enable. */
+ intel_backlight_set_pwm_level(conn_state, level);
+
+ pch_ctl2 = panel->backlight.pwm_level_max << 16;
+ intel_de_write(dev_priv, BLC_PWM_PCH_CTL2, pch_ctl2);
+
+ pch_ctl1 = 0;
+ if (panel->backlight.active_low_pwm)
+ pch_ctl1 |= BLM_PCH_POLARITY;
+
+ intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, pch_ctl1);
+ intel_de_posting_read(dev_priv, BLC_PWM_PCH_CTL1);
+ intel_de_write(dev_priv, BLC_PWM_PCH_CTL1,
+ pch_ctl1 | BLM_PCH_PWM_ENABLE);
+}
+
+static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ u32 ctl, freq;
+
+ ctl = intel_de_read(dev_priv, BLC_PWM_CTL);
+ if (ctl & BACKLIGHT_DUTY_CYCLE_MASK_PNV) {
+ drm_dbg_kms(&dev_priv->drm, "backlight already enabled\n");
+ intel_de_write(dev_priv, BLC_PWM_CTL, 0);
+ }
+
+ freq = panel->backlight.pwm_level_max;
+ if (panel->backlight.combination_mode)
+ freq /= 0xff;
+
+ ctl = freq << 17;
+ if (panel->backlight.combination_mode)
+ ctl |= BLM_LEGACY_MODE;
+ if (IS_PINEVIEW(dev_priv) && panel->backlight.active_low_pwm)
+ ctl |= BLM_POLARITY_PNV;
+
+ intel_de_write(dev_priv, BLC_PWM_CTL, ctl);
+ intel_de_posting_read(dev_priv, BLC_PWM_CTL);
+
+ /* XXX: combine this into above write? */
+ intel_backlight_set_pwm_level(conn_state, level);
+
+ /*
+ * Needed to enable backlight on some 855gm models. BLC_HIST_CTL is
+ * 855gm only, but checking for gen2 is safe, as 855gm is the only gen2
+ * that has backlight.
+ */
+ if (DISPLAY_VER(dev_priv) == 2)
+ intel_de_write(dev_priv, BLC_HIST_CTL, BLM_HISTOGRAM_ENABLE);
+}
+
+static void i965_enable_backlight(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ enum pipe pipe = to_intel_crtc(conn_state->crtc)->pipe;
+ u32 ctl, ctl2, freq;
+
+ ctl2 = intel_de_read(dev_priv, BLC_PWM_CTL2);
+ if (ctl2 & BLM_PWM_ENABLE) {
+ drm_dbg_kms(&dev_priv->drm, "backlight already enabled\n");
+ ctl2 &= ~BLM_PWM_ENABLE;
+ intel_de_write(dev_priv, BLC_PWM_CTL2, ctl2);
+ }
+
+ freq = panel->backlight.pwm_level_max;
+ if (panel->backlight.combination_mode)
+ freq /= 0xff;
+
+ ctl = freq << 16;
+ intel_de_write(dev_priv, BLC_PWM_CTL, ctl);
+
+ ctl2 = BLM_PIPE(pipe);
+ if (panel->backlight.combination_mode)
+ ctl2 |= BLM_COMBINATION_MODE;
+ if (panel->backlight.active_low_pwm)
+ ctl2 |= BLM_POLARITY_I965;
+ intel_de_write(dev_priv, BLC_PWM_CTL2, ctl2);
+ intel_de_posting_read(dev_priv, BLC_PWM_CTL2);
+ intel_de_write(dev_priv, BLC_PWM_CTL2, ctl2 | BLM_PWM_ENABLE);
+
+ intel_backlight_set_pwm_level(conn_state, level);
+}
+
+static void vlv_enable_backlight(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
+ u32 ctl, ctl2;
+
+ ctl2 = intel_de_read(dev_priv, VLV_BLC_PWM_CTL2(pipe));
+ if (ctl2 & BLM_PWM_ENABLE) {
+ drm_dbg_kms(&dev_priv->drm, "backlight already enabled\n");
+ ctl2 &= ~BLM_PWM_ENABLE;
+ intel_de_write(dev_priv, VLV_BLC_PWM_CTL2(pipe), ctl2);
+ }
+
+ ctl = panel->backlight.pwm_level_max << 16;
+ intel_de_write(dev_priv, VLV_BLC_PWM_CTL(pipe), ctl);
+
+ /* XXX: combine this into above write? */
+ intel_backlight_set_pwm_level(conn_state, level);
+
+ ctl2 = 0;
+ if (panel->backlight.active_low_pwm)
+ ctl2 |= BLM_POLARITY_I965;
+ intel_de_write(dev_priv, VLV_BLC_PWM_CTL2(pipe), ctl2);
+ intel_de_posting_read(dev_priv, VLV_BLC_PWM_CTL2(pipe));
+ intel_de_write(dev_priv, VLV_BLC_PWM_CTL2(pipe),
+ ctl2 | BLM_PWM_ENABLE);
+}
+
+static void bxt_enable_backlight(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
+ u32 pwm_ctl, val;
+
+ /* Controller 1 uses the utility pin. */
+ if (panel->backlight.controller == 1) {
+ val = intel_de_read(dev_priv, UTIL_PIN_CTL);
+ if (val & UTIL_PIN_ENABLE) {
+ drm_dbg_kms(&dev_priv->drm,
+ "util pin already enabled\n");
+ val &= ~UTIL_PIN_ENABLE;
+ intel_de_write(dev_priv, UTIL_PIN_CTL, val);
+ }
+
+ val = 0;
+ if (panel->backlight.util_pin_active_low)
+ val |= UTIL_PIN_POLARITY;
+ intel_de_write(dev_priv, UTIL_PIN_CTL,
+ val | UTIL_PIN_PIPE(pipe) | UTIL_PIN_MODE_PWM | UTIL_PIN_ENABLE);
+ }
+
+ pwm_ctl = intel_de_read(dev_priv,
+ BXT_BLC_PWM_CTL(panel->backlight.controller));
+ if (pwm_ctl & BXT_BLC_PWM_ENABLE) {
+ drm_dbg_kms(&dev_priv->drm, "backlight already enabled\n");
+ pwm_ctl &= ~BXT_BLC_PWM_ENABLE;
+ intel_de_write(dev_priv,
+ BXT_BLC_PWM_CTL(panel->backlight.controller),
+ pwm_ctl);
+ }
+
+ intel_de_write(dev_priv,
+ BXT_BLC_PWM_FREQ(panel->backlight.controller),
+ panel->backlight.pwm_level_max);
+
+ intel_backlight_set_pwm_level(conn_state, level);
+
+ pwm_ctl = 0;
+ if (panel->backlight.active_low_pwm)
+ pwm_ctl |= BXT_BLC_PWM_POLARITY;
+
+ intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller),
+ pwm_ctl);
+ intel_de_posting_read(dev_priv,
+ BXT_BLC_PWM_CTL(panel->backlight.controller));
+ intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller),
+ pwm_ctl | BXT_BLC_PWM_ENABLE);
+}
+
+static void cnp_enable_backlight(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ u32 pwm_ctl;
+
+ pwm_ctl = intel_de_read(dev_priv,
+ BXT_BLC_PWM_CTL(panel->backlight.controller));
+ if (pwm_ctl & BXT_BLC_PWM_ENABLE) {
+ drm_dbg_kms(&dev_priv->drm, "backlight already enabled\n");
+ pwm_ctl &= ~BXT_BLC_PWM_ENABLE;
+ intel_de_write(dev_priv,
+ BXT_BLC_PWM_CTL(panel->backlight.controller),
+ pwm_ctl);
+ }
+
+ intel_de_write(dev_priv,
+ BXT_BLC_PWM_FREQ(panel->backlight.controller),
+ panel->backlight.pwm_level_max);
+
+ intel_backlight_set_pwm_level(conn_state, level);
+
+ pwm_ctl = 0;
+ if (panel->backlight.active_low_pwm)
+ pwm_ctl |= BXT_BLC_PWM_POLARITY;
+
+ intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller),
+ pwm_ctl);
+ intel_de_posting_read(dev_priv,
+ BXT_BLC_PWM_CTL(panel->backlight.controller));
+ intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller),
+ pwm_ctl | BXT_BLC_PWM_ENABLE);
+}
+
+static void ext_pwm_enable_backlight(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct intel_panel *panel = &connector->panel;
+
+ pwm_set_relative_duty_cycle(&panel->backlight.pwm_state, level, 100);
+ panel->backlight.pwm_state.enabled = true;
+ pwm_apply_state(panel->backlight.pwm, &panel->backlight.pwm_state);
+}
+
+static void __intel_backlight_enable(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct intel_panel *panel = &connector->panel;
+
+ WARN_ON(panel->backlight.max == 0);
+
+ if (panel->backlight.level <= panel->backlight.min) {
+ panel->backlight.level = panel->backlight.max;
+ if (panel->backlight.device)
+ panel->backlight.device->props.brightness =
+ scale_hw_to_user(connector,
+ panel->backlight.level,
+ panel->backlight.device->props.max_brightness);
+ }
+
+ panel->backlight.funcs->enable(crtc_state, conn_state, panel->backlight.level);
+ panel->backlight.enabled = true;
+ if (panel->backlight.device)
+ panel->backlight.device->props.power = FB_BLANK_UNBLANK;
+}
+
+void intel_backlight_enable(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
+
+ if (!panel->backlight.present)
+ return;
+
+ drm_dbg_kms(&dev_priv->drm, "pipe %c\n", pipe_name(pipe));
+
+ mutex_lock(&dev_priv->backlight_lock);
+
+ __intel_backlight_enable(crtc_state, conn_state);
+
+ mutex_unlock(&dev_priv->backlight_lock);
+}
+
+#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
+static u32 intel_panel_get_backlight(struct intel_connector *connector)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ u32 val = 0;
+
+ mutex_lock(&dev_priv->backlight_lock);
+
+ if (panel->backlight.enabled)
+ val = panel->backlight.funcs->get(connector, intel_connector_get_pipe(connector));
+
+ mutex_unlock(&dev_priv->backlight_lock);
+
+ drm_dbg_kms(&dev_priv->drm, "get backlight PWM = %d\n", val);
+ return val;
+}
+
+/* Scale user_level in range [0..user_max] to [hw_min..hw_max]. */
+static u32 scale_user_to_hw(struct intel_connector *connector,
+ u32 user_level, u32 user_max)
+{
+ struct intel_panel *panel = &connector->panel;
+
+ return scale(user_level, 0, user_max,
+ panel->backlight.min, panel->backlight.max);
+}
+
+/* set backlight brightness to level in range [0..max], scaling wrt hw min */
+static void intel_panel_set_backlight(const struct drm_connector_state *conn_state,
+ u32 user_level, u32 user_max)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ u32 hw_level;
+
+ if (!panel->backlight.present)
+ return;
+
+ mutex_lock(&dev_priv->backlight_lock);
+
+ drm_WARN_ON(&dev_priv->drm, panel->backlight.max == 0);
+
+ hw_level = scale_user_to_hw(connector, user_level, user_max);
+ panel->backlight.level = hw_level;
+
+ if (panel->backlight.enabled)
+ intel_panel_actually_set_backlight(conn_state, hw_level);
+
+ mutex_unlock(&dev_priv->backlight_lock);
+}
+
+static int intel_backlight_device_update_status(struct backlight_device *bd)
+{
+ struct intel_connector *connector = bl_get_data(bd);
+ struct intel_panel *panel = &connector->panel;
+ struct drm_device *dev = connector->base.dev;
+
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+ DRM_DEBUG_KMS("updating intel_backlight, brightness=%d/%d\n",
+ bd->props.brightness, bd->props.max_brightness);
+ intel_panel_set_backlight(connector->base.state, bd->props.brightness,
+ bd->props.max_brightness);
+
+ /*
+ * Allow flipping bl_power as a sub-state of enabled. Sadly the
+ * backlight class device does not make it easy to differentiate
+ * between callbacks for brightness and bl_power, so our backlight_power
+ * callback needs to take this into account.
+ */
+ if (panel->backlight.enabled) {
+ if (panel->backlight.power) {
+ bool enable = bd->props.power == FB_BLANK_UNBLANK &&
+ bd->props.brightness != 0;
+ panel->backlight.power(connector, enable);
+ }
+ } else {
+ bd->props.power = FB_BLANK_POWERDOWN;
+ }
+
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+ return 0;
+}
+
+static int intel_backlight_device_get_brightness(struct backlight_device *bd)
+{
+ struct intel_connector *connector = bl_get_data(bd);
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ intel_wakeref_t wakeref;
+ int ret = 0;
+
+ with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
+ u32 hw_level;
+
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+
+ hw_level = intel_panel_get_backlight(connector);
+ ret = scale_hw_to_user(connector,
+ hw_level, bd->props.max_brightness);
+
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+ }
+
+ return ret;
+}
+
+static const struct backlight_ops intel_backlight_device_ops = {
+ .update_status = intel_backlight_device_update_status,
+ .get_brightness = intel_backlight_device_get_brightness,
+};
+
+int intel_backlight_device_register(struct intel_connector *connector)
+{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ struct backlight_properties props;
+ struct backlight_device *bd;
+ const char *name;
+ int ret = 0;
+
+ if (WARN_ON(panel->backlight.device))
+ return -ENODEV;
+
+ if (!panel->backlight.present)
+ return 0;
+
+ WARN_ON(panel->backlight.max == 0);
+
+ memset(&props, 0, sizeof(props));
+ props.type = BACKLIGHT_RAW;
+
+ /*
+ * Note: Everything should work even if the backlight device max
+ * presented to the userspace is arbitrarily chosen.
+ */
+ props.max_brightness = panel->backlight.max;
+ props.brightness = scale_hw_to_user(connector,
+ panel->backlight.level,
+ props.max_brightness);
+
+ if (panel->backlight.enabled)
+ props.power = FB_BLANK_UNBLANK;
+ else
+ props.power = FB_BLANK_POWERDOWN;
+
+ name = kstrdup("intel_backlight", GFP_KERNEL);
+ if (!name)
+ return -ENOMEM;
+
+ bd = backlight_device_register(name, connector->base.kdev, connector,
+ &intel_backlight_device_ops, &props);
+
+ /*
+ * Using the same name independent of the drm device or connector
+ * prevents registration of multiple backlight devices in the
+ * driver. However, we need to use the default name for backward
+ * compatibility. Use unique names for subsequent backlight devices as a
+ * fallback when the default name already exists.
+ */
+ if (IS_ERR(bd) && PTR_ERR(bd) == -EEXIST) {
+ kfree(name);
+ name = kasprintf(GFP_KERNEL, "card%d-%s-backlight",
+ i915->drm.primary->index, connector->base.name);
+ if (!name)
+ return -ENOMEM;
+
+ bd = backlight_device_register(name, connector->base.kdev, connector,
+ &intel_backlight_device_ops, &props);
+ }
+
+ if (IS_ERR(bd)) {
+ drm_err(&i915->drm,
+ "[CONNECTOR:%d:%s] backlight device %s register failed: %ld\n",
+ connector->base.base.id, connector->base.name, name, PTR_ERR(bd));
+ ret = PTR_ERR(bd);
+ goto out;
+ }
+
+ panel->backlight.device = bd;
+
+ drm_dbg_kms(&i915->drm,
+ "[CONNECTOR:%d:%s] backlight device %s registered\n",
+ connector->base.base.id, connector->base.name, name);
+
+out:
+ kfree(name);
+
+ return ret;
+}
+
+void intel_backlight_device_unregister(struct intel_connector *connector)
+{
+ struct intel_panel *panel = &connector->panel;
+
+ if (panel->backlight.device) {
+ backlight_device_unregister(panel->backlight.device);
+ panel->backlight.device = NULL;
+ }
+}
+#endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */
+
+/*
+ * CNP: PWM clock frequency is 19.2 MHz or 24 MHz.
+ * PWM increment = 1
+ */
+static u32 cnp_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+
+ return DIV_ROUND_CLOSEST(KHz(RUNTIME_INFO(dev_priv)->rawclk_freq),
+ pwm_freq_hz);
+}
+
+/*
+ * BXT: PWM clock frequency = 19.2 MHz.
+ */
+static u32 bxt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
+{
+ return DIV_ROUND_CLOSEST(KHz(19200), pwm_freq_hz);
+}
+
+/*
+ * SPT: This value represents the period of the PWM stream in clock periods
+ * multiplied by 16 (default increment) or 128 (alternate increment selected in
+ * SCHICKEN_1 bit 0). PWM clock is 24 MHz.
+ */
+static u32 spt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
+{
+ struct intel_panel *panel = &connector->panel;
+ u32 mul;
+
+ if (panel->backlight.alternate_pwm_increment)
+ mul = 128;
+ else
+ mul = 16;
+
+ return DIV_ROUND_CLOSEST(MHz(24), pwm_freq_hz * mul);
+}
+
+/*
+ * LPT: This value represents the period of the PWM stream in clock periods
+ * multiplied by 128 (default increment) or 16 (alternate increment, selected in
+ * LPT SOUTH_CHICKEN2 register bit 5).
+ */
+static u32 lpt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ u32 mul, clock;
+
+ if (panel->backlight.alternate_pwm_increment)
+ mul = 16;
+ else
+ mul = 128;
+
+ if (HAS_PCH_LPT_H(dev_priv))
+ clock = MHz(135); /* LPT:H */
+ else
+ clock = MHz(24); /* LPT:LP */
+
+ return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * mul);
+}
+
+/*
+ * ILK/SNB/IVB: This value represents the period of the PWM stream in PCH
+ * display raw clocks multiplied by 128.
+ */
+static u32 pch_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+
+ return DIV_ROUND_CLOSEST(KHz(RUNTIME_INFO(dev_priv)->rawclk_freq),
+ pwm_freq_hz * 128);
+}
+
+/*
+ * Gen2: This field determines the number of time base events (display core
+ * clock frequency/32) in total for a complete cycle of modulated backlight
+ * control.
+ *
+ * Gen3: A time base event equals the display core clock ([DevPNV] HRAW clock)
+ * divided by 32.
+ */
+static u32 i9xx_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ int clock;
+
+ if (IS_PINEVIEW(dev_priv))
+ clock = KHz(RUNTIME_INFO(dev_priv)->rawclk_freq);
+ else
+ clock = KHz(dev_priv->cdclk.hw.cdclk);
+
+ return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * 32);
+}
+
+/*
+ * Gen4: This value represents the period of the PWM stream in display core
+ * clocks ([DevCTG] HRAW clocks) multiplied by 128.
+ *
+ */
+static u32 i965_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ int clock;
+
+ if (IS_G4X(dev_priv))
+ clock = KHz(RUNTIME_INFO(dev_priv)->rawclk_freq);
+ else
+ clock = KHz(dev_priv->cdclk.hw.cdclk);
+
+ return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * 128);
+}
+
+/*
+ * VLV: This value represents the period of the PWM stream in display core
+ * clocks ([DevCTG] 200MHz HRAW clocks) multiplied by 128 or 25MHz S0IX clocks
+ * multiplied by 16. CHV uses a 19.2MHz S0IX clock.
+ */
+static u32 vlv_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ int mul, clock;
+
+ if ((intel_de_read(dev_priv, CBR1_VLV) & CBR_PWM_CLOCK_MUX_SELECT) == 0) {
+ if (IS_CHERRYVIEW(dev_priv))
+ clock = KHz(19200);
+ else
+ clock = MHz(25);
+ mul = 16;
+ } else {
+ clock = KHz(RUNTIME_INFO(dev_priv)->rawclk_freq);
+ mul = 128;
+ }
+
+ return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * mul);
+}
+
+static u16 get_vbt_pwm_freq(struct drm_i915_private *dev_priv)
+{
+ u16 pwm_freq_hz = dev_priv->vbt.backlight.pwm_freq_hz;
+
+ if (pwm_freq_hz) {
+ drm_dbg_kms(&dev_priv->drm,
+ "VBT defined backlight frequency %u Hz\n",
+ pwm_freq_hz);
+ } else {
+ pwm_freq_hz = 200;
+ drm_dbg_kms(&dev_priv->drm,
+ "default backlight frequency %u Hz\n",
+ pwm_freq_hz);
+ }
+
+ return pwm_freq_hz;
+}
+
+static u32 get_backlight_max_vbt(struct intel_connector *connector)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ u16 pwm_freq_hz = get_vbt_pwm_freq(dev_priv);
+ u32 pwm;
+
+ if (!panel->backlight.pwm_funcs->hz_to_pwm) {
+ drm_dbg_kms(&dev_priv->drm,
+ "backlight frequency conversion not supported\n");
+ return 0;
+ }
+
+ pwm = panel->backlight.pwm_funcs->hz_to_pwm(connector, pwm_freq_hz);
+ if (!pwm) {
+ drm_dbg_kms(&dev_priv->drm,
+ "backlight frequency conversion failed\n");
+ return 0;
+ }
+
+ return pwm;
+}
+
+/*
+ * Note: The setup hooks can't assume pipe is set!
+ */
+static u32 get_backlight_min_vbt(struct intel_connector *connector)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ int min;
+
+ drm_WARN_ON(&dev_priv->drm, panel->backlight.pwm_level_max == 0);
+
+ /*
+ * XXX: If the vbt value is 255, it makes min equal to max, which leads
+ * to problems. There are such machines out there. Either our
+ * interpretation is wrong or the vbt has bogus data. Or both. Safeguard
+ * against this by letting the minimum be at most (arbitrarily chosen)
+ * 25% of the max.
+ */
+ min = clamp_t(int, dev_priv->vbt.backlight.min_brightness, 0, 64);
+ if (min != dev_priv->vbt.backlight.min_brightness) {
+ drm_dbg_kms(&dev_priv->drm,
+ "clamping VBT min backlight %d/255 to %d/255\n",
+ dev_priv->vbt.backlight.min_brightness, min);
+ }
+
+ /* vbt value is a coefficient in range [0..255] */
+ return scale(min, 0, 255, 0, panel->backlight.pwm_level_max);
+}
+
+static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unused)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ u32 cpu_ctl2, pch_ctl1, pch_ctl2, val;
+ bool alt, cpu_mode;
+
+ if (HAS_PCH_LPT(dev_priv))
+ alt = intel_de_read(dev_priv, SOUTH_CHICKEN2) & LPT_PWM_GRANULARITY;
+ else
+ alt = intel_de_read(dev_priv, SOUTH_CHICKEN1) & SPT_PWM_GRANULARITY;
+ panel->backlight.alternate_pwm_increment = alt;
+
+ pch_ctl1 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1);
+ panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY;
+
+ pch_ctl2 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL2);
+ panel->backlight.pwm_level_max = pch_ctl2 >> 16;
+
+ cpu_ctl2 = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2);
+
+ if (!panel->backlight.pwm_level_max)
+ panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
+
+ if (!panel->backlight.pwm_level_max)
+ return -ENODEV;
+
+ panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
+
+ panel->backlight.pwm_enabled = pch_ctl1 & BLM_PCH_PWM_ENABLE;
+
+ cpu_mode = panel->backlight.pwm_enabled && HAS_PCH_LPT(dev_priv) &&
+ !(pch_ctl1 & BLM_PCH_OVERRIDE_ENABLE) &&
+ (cpu_ctl2 & BLM_PWM_ENABLE);
+
+ if (cpu_mode) {
+ val = pch_get_backlight(connector, unused);
+
+ drm_dbg_kms(&dev_priv->drm,
+ "CPU backlight register was enabled, switching to PCH override\n");
+
+ /* Write converted CPU PWM value to PCH override register */
+ lpt_set_backlight(connector->base.state, val);
+ intel_de_write(dev_priv, BLC_PWM_PCH_CTL1,
+ pch_ctl1 | BLM_PCH_OVERRIDE_ENABLE);
+
+ intel_de_write(dev_priv, BLC_PWM_CPU_CTL2,
+ cpu_ctl2 & ~BLM_PWM_ENABLE);
+ }
+
+ return 0;
+}
+
+static int pch_setup_backlight(struct intel_connector *connector, enum pipe unused)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ u32 cpu_ctl2, pch_ctl1, pch_ctl2;
+
+ pch_ctl1 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1);
+ panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY;
+
+ pch_ctl2 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL2);
+ panel->backlight.pwm_level_max = pch_ctl2 >> 16;
+
+ if (!panel->backlight.pwm_level_max)
+ panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
+
+ if (!panel->backlight.pwm_level_max)
+ return -ENODEV;
+
+ panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
+
+ cpu_ctl2 = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2);
+ panel->backlight.pwm_enabled = (cpu_ctl2 & BLM_PWM_ENABLE) &&
+ (pch_ctl1 & BLM_PCH_PWM_ENABLE);
+
+ return 0;
+}
+
+static int i9xx_setup_backlight(struct intel_connector *connector, enum pipe unused)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ u32 ctl, val;
+
+ ctl = intel_de_read(dev_priv, BLC_PWM_CTL);
+
+ if (DISPLAY_VER(dev_priv) == 2 || IS_I915GM(dev_priv) || IS_I945GM(dev_priv))
+ panel->backlight.combination_mode = ctl & BLM_LEGACY_MODE;
+
+ if (IS_PINEVIEW(dev_priv))
+ panel->backlight.active_low_pwm = ctl & BLM_POLARITY_PNV;
+
+ panel->backlight.pwm_level_max = ctl >> 17;
+
+ if (!panel->backlight.pwm_level_max) {
+ panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
+ panel->backlight.pwm_level_max >>= 1;
+ }
+
+ if (!panel->backlight.pwm_level_max)
+ return -ENODEV;
+
+ if (panel->backlight.combination_mode)
+ panel->backlight.pwm_level_max *= 0xff;
+
+ panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
+
+ val = i9xx_get_backlight(connector, unused);
+ val = intel_backlight_invert_pwm_level(connector, val);
+ val = clamp(val, panel->backlight.pwm_level_min, panel->backlight.pwm_level_max);
+
+ panel->backlight.pwm_enabled = val != 0;
+
+ return 0;
+}
+
+static int i965_setup_backlight(struct intel_connector *connector, enum pipe unused)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ u32 ctl, ctl2;
+
+ ctl2 = intel_de_read(dev_priv, BLC_PWM_CTL2);
+ panel->backlight.combination_mode = ctl2 & BLM_COMBINATION_MODE;
+ panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965;
+
+ ctl = intel_de_read(dev_priv, BLC_PWM_CTL);
+ panel->backlight.pwm_level_max = ctl >> 16;
+
+ if (!panel->backlight.pwm_level_max)
+ panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
+
+ if (!panel->backlight.pwm_level_max)
+ return -ENODEV;
+
+ if (panel->backlight.combination_mode)
+ panel->backlight.pwm_level_max *= 0xff;
+
+ panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
+
+ panel->backlight.pwm_enabled = ctl2 & BLM_PWM_ENABLE;
+
+ return 0;
+}
+
+static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ u32 ctl, ctl2;
+
+ if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B))
+ return -ENODEV;
+
+ ctl2 = intel_de_read(dev_priv, VLV_BLC_PWM_CTL2(pipe));
+ panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965;
+
+ ctl = intel_de_read(dev_priv, VLV_BLC_PWM_CTL(pipe));
+ panel->backlight.pwm_level_max = ctl >> 16;
+
+ if (!panel->backlight.pwm_level_max)
+ panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
+
+ if (!panel->backlight.pwm_level_max)
+ return -ENODEV;
+
+ panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
+
+ panel->backlight.pwm_enabled = ctl2 & BLM_PWM_ENABLE;
+
+ return 0;
+}
+
+static int
+bxt_setup_backlight(struct intel_connector *connector, enum pipe unused)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ u32 pwm_ctl, val;
+
+ panel->backlight.controller = dev_priv->vbt.backlight.controller;
+
+ pwm_ctl = intel_de_read(dev_priv,
+ BXT_BLC_PWM_CTL(panel->backlight.controller));
+
+ /* Controller 1 uses the utility pin. */
+ if (panel->backlight.controller == 1) {
+ val = intel_de_read(dev_priv, UTIL_PIN_CTL);
+ panel->backlight.util_pin_active_low =
+ val & UTIL_PIN_POLARITY;
+ }
+
+ panel->backlight.active_low_pwm = pwm_ctl & BXT_BLC_PWM_POLARITY;
+ panel->backlight.pwm_level_max =
+ intel_de_read(dev_priv, BXT_BLC_PWM_FREQ(panel->backlight.controller));
+
+ if (!panel->backlight.pwm_level_max)
+ panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
+
+ if (!panel->backlight.pwm_level_max)
+ return -ENODEV;
+
+ panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
+
+ panel->backlight.pwm_enabled = pwm_ctl & BXT_BLC_PWM_ENABLE;
+
+ return 0;
+}
+
+static int
+cnp_setup_backlight(struct intel_connector *connector, enum pipe unused)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ u32 pwm_ctl;
+
+ /*
+ * CNP has the BXT implementation of backlight, but with only one
+ * controller. TODO: ICP has multiple controllers but we only use
+ * controller 0 for now.
+ */
+ panel->backlight.controller = 0;
+
+ pwm_ctl = intel_de_read(dev_priv,
+ BXT_BLC_PWM_CTL(panel->backlight.controller));
+
+ panel->backlight.active_low_pwm = pwm_ctl & BXT_BLC_PWM_POLARITY;
+ panel->backlight.pwm_level_max =
+ intel_de_read(dev_priv, BXT_BLC_PWM_FREQ(panel->backlight.controller));
+
+ if (!panel->backlight.pwm_level_max)
+ panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
+
+ if (!panel->backlight.pwm_level_max)
+ return -ENODEV;
+
+ panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
+
+ panel->backlight.pwm_enabled = pwm_ctl & BXT_BLC_PWM_ENABLE;
+
+ return 0;
+}
+
+static int ext_pwm_setup_backlight(struct intel_connector *connector,
+ enum pipe pipe)
+{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_panel *panel = &connector->panel;
+ const char *desc;
+ u32 level;
+
+ /* Get the right PWM chip for DSI backlight according to VBT */
+ if (dev_priv->vbt.dsi.config->pwm_blc == PPS_BLC_PMIC) {
+ panel->backlight.pwm = pwm_get(dev->dev, "pwm_pmic_backlight");
+ desc = "PMIC";
+ } else {
+ panel->backlight.pwm = pwm_get(dev->dev, "pwm_soc_backlight");
+ desc = "SoC";
+ }
+
+ if (IS_ERR(panel->backlight.pwm)) {
+ drm_err(&dev_priv->drm, "Failed to get the %s PWM chip\n",
+ desc);
+ panel->backlight.pwm = NULL;
+ return -ENODEV;
+ }
+
+ panel->backlight.pwm_level_max = 100; /* 100% */
+ panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
+
+ if (pwm_is_enabled(panel->backlight.pwm)) {
+ /* PWM is already enabled, use existing settings */
+ pwm_get_state(panel->backlight.pwm, &panel->backlight.pwm_state);
+
+ level = pwm_get_relative_duty_cycle(&panel->backlight.pwm_state,
+ 100);
+ level = intel_backlight_invert_pwm_level(connector, level);
+ panel->backlight.pwm_enabled = true;
+
+ drm_dbg_kms(&dev_priv->drm, "PWM already enabled at freq %ld, VBT freq %d, level %d\n",
+ NSEC_PER_SEC / (unsigned long)panel->backlight.pwm_state.period,
+ get_vbt_pwm_freq(dev_priv), level);
+ } else {
+ /* Set period from VBT frequency, leave other settings at 0. */
+ panel->backlight.pwm_state.period =
+ NSEC_PER_SEC / get_vbt_pwm_freq(dev_priv);
+ }
+
+ drm_info(&dev_priv->drm, "Using %s PWM for LCD backlight control\n",
+ desc);
+ return 0;
+}
+
+static void intel_pwm_set_backlight(const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct intel_panel *panel = &connector->panel;
+
+ panel->backlight.pwm_funcs->set(conn_state,
+ intel_backlight_invert_pwm_level(connector, level));
+}
+
+static u32 intel_pwm_get_backlight(struct intel_connector *connector, enum pipe pipe)
+{
+ struct intel_panel *panel = &connector->panel;
+
+ return intel_backlight_invert_pwm_level(connector,
+ panel->backlight.pwm_funcs->get(connector, pipe));
+}
+
+static void intel_pwm_enable_backlight(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct intel_panel *panel = &connector->panel;
+
+ panel->backlight.pwm_funcs->enable(crtc_state, conn_state,
+ intel_backlight_invert_pwm_level(connector, level));
+}
+
+static void intel_pwm_disable_backlight(const struct drm_connector_state *conn_state, u32 level)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct intel_panel *panel = &connector->panel;
+
+ panel->backlight.pwm_funcs->disable(conn_state,
+ intel_backlight_invert_pwm_level(connector, level));
+}
+
+static int intel_pwm_setup_backlight(struct intel_connector *connector, enum pipe pipe)
+{
+ struct intel_panel *panel = &connector->panel;
+ int ret = panel->backlight.pwm_funcs->setup(connector, pipe);
+
+ if (ret < 0)
+ return ret;
+
+ panel->backlight.min = panel->backlight.pwm_level_min;
+ panel->backlight.max = panel->backlight.pwm_level_max;
+ panel->backlight.level = intel_pwm_get_backlight(connector, pipe);
+ panel->backlight.enabled = panel->backlight.pwm_enabled;
+
+ return 0;
+}
+
+void intel_backlight_update(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+
+ if (!panel->backlight.present)
+ return;
+
+ mutex_lock(&dev_priv->backlight_lock);
+ if (!panel->backlight.enabled)
+ __intel_backlight_enable(crtc_state, conn_state);
+
+ mutex_unlock(&dev_priv->backlight_lock);
+}
+
+int intel_backlight_setup(struct intel_connector *connector, enum pipe pipe)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_panel *panel = &connector->panel;
+ int ret;
+
+ if (!dev_priv->vbt.backlight.present) {
+ if (dev_priv->quirks & QUIRK_BACKLIGHT_PRESENT) {
+ drm_dbg_kms(&dev_priv->drm,
+ "no backlight present per VBT, but present per quirk\n");
+ } else {
+ drm_dbg_kms(&dev_priv->drm,
+ "no backlight present per VBT\n");
+ return 0;
+ }
+ }
+
+ /* ensure intel_panel has been initialized first */
+ if (drm_WARN_ON(&dev_priv->drm, !panel->backlight.funcs))
+ return -ENODEV;
+
+ /* set level and max in panel struct */
+ mutex_lock(&dev_priv->backlight_lock);
+ ret = panel->backlight.funcs->setup(connector, pipe);
+ mutex_unlock(&dev_priv->backlight_lock);
+
+ if (ret) {
+ drm_dbg_kms(&dev_priv->drm,
+ "failed to setup backlight for connector %s\n",
+ connector->base.name);
+ return ret;
+ }
+
+ panel->backlight.present = true;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "Connector %s backlight initialized, %s, brightness %u/%u\n",
+ connector->base.name,
+ enableddisabled(panel->backlight.enabled),
+ panel->backlight.level, panel->backlight.max);
+
+ return 0;
+}
+
+void intel_backlight_destroy(struct intel_panel *panel)
+{
+ /* dispose of the pwm */
+ if (panel->backlight.pwm)
+ pwm_put(panel->backlight.pwm);
+
+ panel->backlight.present = false;
+}
+
+static const struct intel_panel_bl_funcs bxt_pwm_funcs = {
+ .setup = bxt_setup_backlight,
+ .enable = bxt_enable_backlight,
+ .disable = bxt_disable_backlight,
+ .set = bxt_set_backlight,
+ .get = bxt_get_backlight,
+ .hz_to_pwm = bxt_hz_to_pwm,
+};
+
+static const struct intel_panel_bl_funcs cnp_pwm_funcs = {
+ .setup = cnp_setup_backlight,
+ .enable = cnp_enable_backlight,
+ .disable = cnp_disable_backlight,
+ .set = bxt_set_backlight,
+ .get = bxt_get_backlight,
+ .hz_to_pwm = cnp_hz_to_pwm,
+};
+
+static const struct intel_panel_bl_funcs lpt_pwm_funcs = {
+ .setup = lpt_setup_backlight,
+ .enable = lpt_enable_backlight,
+ .disable = lpt_disable_backlight,
+ .set = lpt_set_backlight,
+ .get = lpt_get_backlight,
+ .hz_to_pwm = lpt_hz_to_pwm,
+};
+
+static const struct intel_panel_bl_funcs spt_pwm_funcs = {
+ .setup = lpt_setup_backlight,
+ .enable = lpt_enable_backlight,
+ .disable = lpt_disable_backlight,
+ .set = lpt_set_backlight,
+ .get = lpt_get_backlight,
+ .hz_to_pwm = spt_hz_to_pwm,
+};
+
+static const struct intel_panel_bl_funcs pch_pwm_funcs = {
+ .setup = pch_setup_backlight,
+ .enable = pch_enable_backlight,
+ .disable = pch_disable_backlight,
+ .set = pch_set_backlight,
+ .get = pch_get_backlight,
+ .hz_to_pwm = pch_hz_to_pwm,
+};
+
+static const struct intel_panel_bl_funcs ext_pwm_funcs = {
+ .setup = ext_pwm_setup_backlight,
+ .enable = ext_pwm_enable_backlight,
+ .disable = ext_pwm_disable_backlight,
+ .set = ext_pwm_set_backlight,
+ .get = ext_pwm_get_backlight,
+};
+
+static const struct intel_panel_bl_funcs vlv_pwm_funcs = {
+ .setup = vlv_setup_backlight,
+ .enable = vlv_enable_backlight,
+ .disable = vlv_disable_backlight,
+ .set = vlv_set_backlight,
+ .get = vlv_get_backlight,
+ .hz_to_pwm = vlv_hz_to_pwm,
+};
+
+static const struct intel_panel_bl_funcs i965_pwm_funcs = {
+ .setup = i965_setup_backlight,
+ .enable = i965_enable_backlight,
+ .disable = i965_disable_backlight,
+ .set = i9xx_set_backlight,
+ .get = i9xx_get_backlight,
+ .hz_to_pwm = i965_hz_to_pwm,
+};
+
+static const struct intel_panel_bl_funcs i9xx_pwm_funcs = {
+ .setup = i9xx_setup_backlight,
+ .enable = i9xx_enable_backlight,
+ .disable = i9xx_disable_backlight,
+ .set = i9xx_set_backlight,
+ .get = i9xx_get_backlight,
+ .hz_to_pwm = i9xx_hz_to_pwm,
+};
+
+static const struct intel_panel_bl_funcs pwm_bl_funcs = {
+ .setup = intel_pwm_setup_backlight,
+ .enable = intel_pwm_enable_backlight,
+ .disable = intel_pwm_disable_backlight,
+ .set = intel_pwm_set_backlight,
+ .get = intel_pwm_get_backlight,
+};
+
+/* Set up chip specific backlight functions */
+void intel_backlight_init_funcs(struct intel_panel *panel)
+{
+ struct intel_connector *connector =
+ container_of(panel, struct intel_connector, panel);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+
+ if (connector->base.connector_type == DRM_MODE_CONNECTOR_DSI &&
+ intel_dsi_dcs_init_backlight_funcs(connector) == 0)
+ return;
+
+ if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ panel->backlight.pwm_funcs = &bxt_pwm_funcs;
+ } else if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) {
+ panel->backlight.pwm_funcs = &cnp_pwm_funcs;
+ } else if (INTEL_PCH_TYPE(dev_priv) >= PCH_LPT) {
+ if (HAS_PCH_LPT(dev_priv))
+ panel->backlight.pwm_funcs = &lpt_pwm_funcs;
+ else
+ panel->backlight.pwm_funcs = &spt_pwm_funcs;
+ } else if (HAS_PCH_SPLIT(dev_priv)) {
+ panel->backlight.pwm_funcs = &pch_pwm_funcs;
+ } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ if (connector->base.connector_type == DRM_MODE_CONNECTOR_DSI) {
+ panel->backlight.pwm_funcs = &ext_pwm_funcs;
+ } else {
+ panel->backlight.pwm_funcs = &vlv_pwm_funcs;
+ }
+ } else if (DISPLAY_VER(dev_priv) == 4) {
+ panel->backlight.pwm_funcs = &i965_pwm_funcs;
+ } else {
+ panel->backlight.pwm_funcs = &i9xx_pwm_funcs;
+ }
+
+ if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP &&
+ intel_dp_aux_init_backlight_funcs(connector) == 0)
+ return;
+
+ /* We're using a standard PWM backlight interface */
+ panel->backlight.funcs = &pwm_bl_funcs;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_backlight.h b/drivers/gpu/drm/i915/display/intel_backlight.h
new file mode 100644
index 000000000000..339643f63897
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_backlight.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef __INTEL_BACKLIGHT_H__
+#define __INTEL_BACKLIGHT_H__
+
+#include <linux/types.h>
+
+struct drm_connector_state;
+struct intel_atomic_state;
+struct intel_connector;
+struct intel_crtc_state;
+struct intel_encoder;
+struct intel_panel;
+enum pipe;
+
+void intel_backlight_init_funcs(struct intel_panel *panel);
+int intel_backlight_setup(struct intel_connector *connector, enum pipe pipe);
+void intel_backlight_destroy(struct intel_panel *panel);
+
+void intel_backlight_enable(const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
+void intel_backlight_update(struct intel_atomic_state *state,
+ struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
+void intel_backlight_disable(const struct drm_connector_state *old_conn_state);
+
+void intel_backlight_set_acpi(const struct drm_connector_state *conn_state,
+ u32 level, u32 max);
+void intel_backlight_set_pwm_level(const struct drm_connector_state *conn_state,
+ u32 level);
+u32 intel_backlight_invert_pwm_level(struct intel_connector *connector, u32 level);
+u32 intel_backlight_level_to_pwm(struct intel_connector *connector, u32 level);
+u32 intel_backlight_level_from_pwm(struct intel_connector *connector, u32 val);
+
+#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
+int intel_backlight_device_register(struct intel_connector *connector);
+void intel_backlight_device_unregister(struct intel_connector *connector);
+#else /* CONFIG_BACKLIGHT_CLASS_DEVICE */
+static inline int intel_backlight_device_register(struct intel_connector *connector)
+{
+ return 0;
+}
+static inline void intel_backlight_device_unregister(struct intel_connector *connector)
+{
+}
+#endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */
+
+#endif /* __INTEL_BACKLIGHT_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index fd71346aac7b..b99907c656bb 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -493,6 +493,9 @@ parse_lfp_backlight(struct drm_i915_private *i915,
level = 255;
}
i915->vbt.backlight.min_brightness = min_level;
+
+ i915->vbt.backlight.brightness_precision_bits =
+ backlight_data->brightness_precision_bits[panel_type];
} else {
level = backlight_data->level[panel_type];
i915->vbt.backlight.min_brightness = entry->min_brightness;
@@ -1511,39 +1514,130 @@ static u8 translate_iboost(u8 val)
return mapping[val];
}
+static const u8 cnp_ddc_pin_map[] = {
+ [0] = 0, /* N/A */
+ [DDC_BUS_DDI_B] = GMBUS_PIN_1_BXT,
+ [DDC_BUS_DDI_C] = GMBUS_PIN_2_BXT,
+ [DDC_BUS_DDI_D] = GMBUS_PIN_4_CNP, /* sic */
+ [DDC_BUS_DDI_F] = GMBUS_PIN_3_BXT, /* sic */
+};
+
+static const u8 icp_ddc_pin_map[] = {
+ [ICL_DDC_BUS_DDI_A] = GMBUS_PIN_1_BXT,
+ [ICL_DDC_BUS_DDI_B] = GMBUS_PIN_2_BXT,
+ [TGL_DDC_BUS_DDI_C] = GMBUS_PIN_3_BXT,
+ [ICL_DDC_BUS_PORT_1] = GMBUS_PIN_9_TC1_ICP,
+ [ICL_DDC_BUS_PORT_2] = GMBUS_PIN_10_TC2_ICP,
+ [ICL_DDC_BUS_PORT_3] = GMBUS_PIN_11_TC3_ICP,
+ [ICL_DDC_BUS_PORT_4] = GMBUS_PIN_12_TC4_ICP,
+ [TGL_DDC_BUS_PORT_5] = GMBUS_PIN_13_TC5_TGP,
+ [TGL_DDC_BUS_PORT_6] = GMBUS_PIN_14_TC6_TGP,
+};
+
+static const u8 rkl_pch_tgp_ddc_pin_map[] = {
+ [ICL_DDC_BUS_DDI_A] = GMBUS_PIN_1_BXT,
+ [ICL_DDC_BUS_DDI_B] = GMBUS_PIN_2_BXT,
+ [RKL_DDC_BUS_DDI_D] = GMBUS_PIN_9_TC1_ICP,
+ [RKL_DDC_BUS_DDI_E] = GMBUS_PIN_10_TC2_ICP,
+};
+
+static const u8 adls_ddc_pin_map[] = {
+ [ICL_DDC_BUS_DDI_A] = GMBUS_PIN_1_BXT,
+ [ADLS_DDC_BUS_PORT_TC1] = GMBUS_PIN_9_TC1_ICP,
+ [ADLS_DDC_BUS_PORT_TC2] = GMBUS_PIN_10_TC2_ICP,
+ [ADLS_DDC_BUS_PORT_TC3] = GMBUS_PIN_11_TC3_ICP,
+ [ADLS_DDC_BUS_PORT_TC4] = GMBUS_PIN_12_TC4_ICP,
+};
+
+static const u8 gen9bc_tgp_ddc_pin_map[] = {
+ [DDC_BUS_DDI_B] = GMBUS_PIN_2_BXT,
+ [DDC_BUS_DDI_C] = GMBUS_PIN_9_TC1_ICP,
+ [DDC_BUS_DDI_D] = GMBUS_PIN_10_TC2_ICP,
+};
+
+static u8 map_ddc_pin(struct drm_i915_private *i915, u8 vbt_pin)
+{
+ const u8 *ddc_pin_map;
+ int n_entries;
+
+ if (IS_ALDERLAKE_S(i915)) {
+ ddc_pin_map = adls_ddc_pin_map;
+ n_entries = ARRAY_SIZE(adls_ddc_pin_map);
+ } else if (INTEL_PCH_TYPE(i915) >= PCH_DG1) {
+ return vbt_pin;
+ } else if (IS_ROCKETLAKE(i915) && INTEL_PCH_TYPE(i915) == PCH_TGP) {
+ ddc_pin_map = rkl_pch_tgp_ddc_pin_map;
+ n_entries = ARRAY_SIZE(rkl_pch_tgp_ddc_pin_map);
+ } else if (HAS_PCH_TGP(i915) && DISPLAY_VER(i915) == 9) {
+ ddc_pin_map = gen9bc_tgp_ddc_pin_map;
+ n_entries = ARRAY_SIZE(gen9bc_tgp_ddc_pin_map);
+ } else if (INTEL_PCH_TYPE(i915) >= PCH_ICP) {
+ ddc_pin_map = icp_ddc_pin_map;
+ n_entries = ARRAY_SIZE(icp_ddc_pin_map);
+ } else if (HAS_PCH_CNP(i915)) {
+ ddc_pin_map = cnp_ddc_pin_map;
+ n_entries = ARRAY_SIZE(cnp_ddc_pin_map);
+ } else {
+ /* Assuming direct map */
+ return vbt_pin;
+ }
+
+ if (vbt_pin < n_entries && ddc_pin_map[vbt_pin] != 0)
+ return ddc_pin_map[vbt_pin];
+
+ drm_dbg_kms(&i915->drm,
+ "Ignoring alternate pin: VBT claims DDC pin %d, which is not valid for this platform\n",
+ vbt_pin);
+ return 0;
+}
+
static enum port get_port_by_ddc_pin(struct drm_i915_private *i915, u8 ddc_pin)
{
- const struct ddi_vbt_port_info *info;
+ const struct intel_bios_encoder_data *devdata;
enum port port;
if (!ddc_pin)
return PORT_NONE;
for_each_port(port) {
- info = &i915->vbt.ddi_port_info[port];
+ devdata = i915->vbt.ports[port];
- if (info->devdata && ddc_pin == info->alternate_ddc_pin)
+ if (devdata && ddc_pin == devdata->child.ddc_pin)
return port;
}
return PORT_NONE;
}
-static void sanitize_ddc_pin(struct drm_i915_private *i915,
+static void sanitize_ddc_pin(struct intel_bios_encoder_data *devdata,
enum port port)
{
- struct ddi_vbt_port_info *info = &i915->vbt.ddi_port_info[port];
+ struct drm_i915_private *i915 = devdata->i915;
struct child_device_config *child;
+ u8 mapped_ddc_pin;
enum port p;
- p = get_port_by_ddc_pin(i915, info->alternate_ddc_pin);
+ if (!devdata->child.ddc_pin)
+ return;
+
+ mapped_ddc_pin = map_ddc_pin(i915, devdata->child.ddc_pin);
+ if (!intel_gmbus_is_valid_pin(i915, mapped_ddc_pin)) {
+ drm_dbg_kms(&i915->drm,
+ "Port %c has invalid DDC pin %d, "
+ "sticking to defaults\n",
+ port_name(port), mapped_ddc_pin);
+ devdata->child.ddc_pin = 0;
+ return;
+ }
+
+ p = get_port_by_ddc_pin(i915, devdata->child.ddc_pin);
if (p == PORT_NONE)
return;
drm_dbg_kms(&i915->drm,
"port %c trying to use the same DDC pin (0x%x) as port %c, "
"disabling port %c DVI/HDMI support\n",
- port_name(port), info->alternate_ddc_pin,
+ port_name(port), mapped_ddc_pin,
port_name(p), port_name(p));
/*
@@ -1555,48 +1649,47 @@ static void sanitize_ddc_pin(struct drm_i915_private *i915,
* there are real machines (eg. Asrock B250M-HDV) where VBT has both
* port A and port E with the same AUX ch and we must pick port E :(
*/
- info = &i915->vbt.ddi_port_info[p];
- child = &info->devdata->child;
+ child = &i915->vbt.ports[p]->child;
child->device_type &= ~DEVICE_TYPE_TMDS_DVI_SIGNALING;
child->device_type |= DEVICE_TYPE_NOT_HDMI_OUTPUT;
- info->alternate_ddc_pin = 0;
+ child->ddc_pin = 0;
}
static enum port get_port_by_aux_ch(struct drm_i915_private *i915, u8 aux_ch)
{
- const struct ddi_vbt_port_info *info;
+ const struct intel_bios_encoder_data *devdata;
enum port port;
if (!aux_ch)
return PORT_NONE;
for_each_port(port) {
- info = &i915->vbt.ddi_port_info[port];
+ devdata = i915->vbt.ports[port];
- if (info->devdata && aux_ch == info->alternate_aux_channel)
+ if (devdata && aux_ch == devdata->child.aux_channel)
return port;
}
return PORT_NONE;
}
-static void sanitize_aux_ch(struct drm_i915_private *i915,
+static void sanitize_aux_ch(struct intel_bios_encoder_data *devdata,
enum port port)
{
- struct ddi_vbt_port_info *info = &i915->vbt.ddi_port_info[port];
+ struct drm_i915_private *i915 = devdata->i915;
struct child_device_config *child;
enum port p;
- p = get_port_by_aux_ch(i915, info->alternate_aux_channel);
+ p = get_port_by_aux_ch(i915, devdata->child.aux_channel);
if (p == PORT_NONE)
return;
drm_dbg_kms(&i915->drm,
"port %c trying to use the same AUX CH (0x%x) as port %c, "
"disabling port %c DP support\n",
- port_name(port), info->alternate_aux_channel,
+ port_name(port), devdata->child.aux_channel,
port_name(p), port_name(p));
/*
@@ -1608,88 +1701,10 @@ static void sanitize_aux_ch(struct drm_i915_private *i915,
* there are real machines (eg. Asrock B250M-HDV) where VBT has both
* port A and port E with the same AUX ch and we must pick port E :(
*/
- info = &i915->vbt.ddi_port_info[p];
- child = &info->devdata->child;
+ child = &i915->vbt.ports[p]->child;
child->device_type &= ~DEVICE_TYPE_DISPLAYPORT_OUTPUT;
- info->alternate_aux_channel = 0;
-}
-
-static const u8 cnp_ddc_pin_map[] = {
- [0] = 0, /* N/A */
- [DDC_BUS_DDI_B] = GMBUS_PIN_1_BXT,
- [DDC_BUS_DDI_C] = GMBUS_PIN_2_BXT,
- [DDC_BUS_DDI_D] = GMBUS_PIN_4_CNP, /* sic */
- [DDC_BUS_DDI_F] = GMBUS_PIN_3_BXT, /* sic */
-};
-
-static const u8 icp_ddc_pin_map[] = {
- [ICL_DDC_BUS_DDI_A] = GMBUS_PIN_1_BXT,
- [ICL_DDC_BUS_DDI_B] = GMBUS_PIN_2_BXT,
- [TGL_DDC_BUS_DDI_C] = GMBUS_PIN_3_BXT,
- [ICL_DDC_BUS_PORT_1] = GMBUS_PIN_9_TC1_ICP,
- [ICL_DDC_BUS_PORT_2] = GMBUS_PIN_10_TC2_ICP,
- [ICL_DDC_BUS_PORT_3] = GMBUS_PIN_11_TC3_ICP,
- [ICL_DDC_BUS_PORT_4] = GMBUS_PIN_12_TC4_ICP,
- [TGL_DDC_BUS_PORT_5] = GMBUS_PIN_13_TC5_TGP,
- [TGL_DDC_BUS_PORT_6] = GMBUS_PIN_14_TC6_TGP,
-};
-
-static const u8 rkl_pch_tgp_ddc_pin_map[] = {
- [ICL_DDC_BUS_DDI_A] = GMBUS_PIN_1_BXT,
- [ICL_DDC_BUS_DDI_B] = GMBUS_PIN_2_BXT,
- [RKL_DDC_BUS_DDI_D] = GMBUS_PIN_9_TC1_ICP,
- [RKL_DDC_BUS_DDI_E] = GMBUS_PIN_10_TC2_ICP,
-};
-
-static const u8 adls_ddc_pin_map[] = {
- [ICL_DDC_BUS_DDI_A] = GMBUS_PIN_1_BXT,
- [ADLS_DDC_BUS_PORT_TC1] = GMBUS_PIN_9_TC1_ICP,
- [ADLS_DDC_BUS_PORT_TC2] = GMBUS_PIN_10_TC2_ICP,
- [ADLS_DDC_BUS_PORT_TC3] = GMBUS_PIN_11_TC3_ICP,
- [ADLS_DDC_BUS_PORT_TC4] = GMBUS_PIN_12_TC4_ICP,
-};
-
-static const u8 gen9bc_tgp_ddc_pin_map[] = {
- [DDC_BUS_DDI_B] = GMBUS_PIN_2_BXT,
- [DDC_BUS_DDI_C] = GMBUS_PIN_9_TC1_ICP,
- [DDC_BUS_DDI_D] = GMBUS_PIN_10_TC2_ICP,
-};
-
-static u8 map_ddc_pin(struct drm_i915_private *i915, u8 vbt_pin)
-{
- const u8 *ddc_pin_map;
- int n_entries;
-
- if (IS_ALDERLAKE_S(i915)) {
- ddc_pin_map = adls_ddc_pin_map;
- n_entries = ARRAY_SIZE(adls_ddc_pin_map);
- } else if (INTEL_PCH_TYPE(i915) >= PCH_DG1) {
- return vbt_pin;
- } else if (IS_ROCKETLAKE(i915) && INTEL_PCH_TYPE(i915) == PCH_TGP) {
- ddc_pin_map = rkl_pch_tgp_ddc_pin_map;
- n_entries = ARRAY_SIZE(rkl_pch_tgp_ddc_pin_map);
- } else if (HAS_PCH_TGP(i915) && DISPLAY_VER(i915) == 9) {
- ddc_pin_map = gen9bc_tgp_ddc_pin_map;
- n_entries = ARRAY_SIZE(gen9bc_tgp_ddc_pin_map);
- } else if (INTEL_PCH_TYPE(i915) >= PCH_ICP) {
- ddc_pin_map = icp_ddc_pin_map;
- n_entries = ARRAY_SIZE(icp_ddc_pin_map);
- } else if (HAS_PCH_CNP(i915)) {
- ddc_pin_map = cnp_ddc_pin_map;
- n_entries = ARRAY_SIZE(cnp_ddc_pin_map);
- } else {
- /* Assuming direct map */
- return vbt_pin;
- }
-
- if (vbt_pin < n_entries && ddc_pin_map[vbt_pin] != 0)
- return ddc_pin_map[vbt_pin];
-
- drm_dbg_kms(&i915->drm,
- "Ignoring alternate pin: VBT claims DDC pin %d, which is not valid for this platform\n",
- vbt_pin);
- return 0;
+ child->aux_channel = 0;
}
static enum port __dvo_port_to_port(int n_ports, int n_dvo,
@@ -1825,6 +1840,17 @@ static int parse_bdb_216_dp_max_link_rate(const int vbt_max_link_rate)
}
}
+static int _intel_bios_dp_max_link_rate(const struct intel_bios_encoder_data *devdata)
+{
+ if (!devdata || devdata->i915->vbt.version < 216)
+ return 0;
+
+ if (devdata->i915->vbt.version >= 230)
+ return parse_bdb_230_dp_max_link_rate(devdata->child.dp_max_link_rate);
+ else
+ return parse_bdb_216_dp_max_link_rate(devdata->child.dp_max_link_rate);
+}
+
static void sanitize_device_type(struct intel_bios_encoder_data *devdata,
enum port port)
{
@@ -1878,6 +1904,76 @@ intel_bios_encoder_supports_edp(const struct intel_bios_encoder_data *devdata)
devdata->child.device_type & DEVICE_TYPE_INTERNAL_CONNECTOR;
}
+static int _intel_bios_hdmi_level_shift(const struct intel_bios_encoder_data *devdata)
+{
+ if (!devdata || devdata->i915->vbt.version < 158)
+ return -1;
+
+ return devdata->child.hdmi_level_shifter_value;
+}
+
+static int _intel_bios_max_tmds_clock(const struct intel_bios_encoder_data *devdata)
+{
+ if (!devdata || devdata->i915->vbt.version < 204)
+ return 0;
+
+ switch (devdata->child.hdmi_max_data_rate) {
+ default:
+ MISSING_CASE(devdata->child.hdmi_max_data_rate);
+ fallthrough;
+ case HDMI_MAX_DATA_RATE_PLATFORM:
+ return 0;
+ case HDMI_MAX_DATA_RATE_297:
+ return 297000;
+ case HDMI_MAX_DATA_RATE_165:
+ return 165000;
+ }
+}
+
+static enum port get_edp_port(struct drm_i915_private *i915)
+{
+ const struct intel_bios_encoder_data *devdata;
+ enum port port;
+
+ for_each_port(port) {
+ devdata = i915->vbt.ports[port];
+
+ if (devdata && intel_bios_encoder_supports_edp(devdata))
+ return port;
+ }
+
+ return PORT_NONE;
+}
+
+/*
+ * FIXME: The power sequencer and backlight code currently do not support more
+ * than one set registers, at least not on anything other than VLV/CHV. It will
+ * clobber the registers. As a temporary workaround, gracefully prevent more
+ * than one eDP from being registered.
+ */
+static void sanitize_dual_edp(struct intel_bios_encoder_data *devdata,
+ enum port port)
+{
+ struct drm_i915_private *i915 = devdata->i915;
+ struct child_device_config *child = &devdata->child;
+ enum port p;
+
+ /* CHV might not clobber PPS registers. */
+ if (IS_CHERRYVIEW(i915))
+ return;
+
+ p = get_edp_port(i915);
+ if (p == PORT_NONE)
+ return;
+
+ drm_dbg_kms(&i915->drm, "both ports %c and %c configured as eDP, "
+ "disabling port %c eDP\n", port_name(p), port_name(port),
+ port_name(port));
+
+ child->device_type &= ~DEVICE_TYPE_DISPLAYPORT_OUTPUT;
+ child->device_type &= ~DEVICE_TYPE_INTERNAL_CONNECTOR;
+}
+
static bool is_port_valid(struct drm_i915_private *i915, enum port port)
{
/*
@@ -1895,9 +1991,8 @@ static void parse_ddi_port(struct drm_i915_private *i915,
struct intel_bios_encoder_data *devdata)
{
const struct child_device_config *child = &devdata->child;
- struct ddi_vbt_port_info *info;
bool is_dvi, is_hdmi, is_dp, is_edp, is_crt, supports_typec_usb, supports_tbt;
- int dp_boost_level, hdmi_boost_level;
+ int dp_boost_level, dp_max_link_rate, hdmi_boost_level, hdmi_level_shift, max_tmds_clock;
enum port port;
port = dvo_port_to_port(i915, child->dvo_port);
@@ -1911,9 +2006,7 @@ static void parse_ddi_port(struct drm_i915_private *i915,
return;
}
- info = &i915->vbt.ddi_port_info[port];
-
- if (info->devdata) {
+ if (i915->vbt.ports[port]) {
drm_dbg_kms(&i915->drm,
"More than one child device for port %c in VBT, using the first.\n",
port_name(port));
@@ -1938,62 +2031,27 @@ static void parse_ddi_port(struct drm_i915_private *i915,
supports_typec_usb, supports_tbt,
devdata->dsc != NULL);
- if (is_dvi) {
- u8 ddc_pin;
+ if (is_edp)
+ sanitize_dual_edp(devdata, port);
- ddc_pin = map_ddc_pin(i915, child->ddc_pin);
- if (intel_gmbus_is_valid_pin(i915, ddc_pin)) {
- info->alternate_ddc_pin = ddc_pin;
- sanitize_ddc_pin(i915, port);
- } else {
- drm_dbg_kms(&i915->drm,
- "Port %c has invalid DDC pin %d, "
- "sticking to defaults\n",
- port_name(port), ddc_pin);
- }
- }
+ if (is_dvi)
+ sanitize_ddc_pin(devdata, port);
- if (is_dp) {
- info->alternate_aux_channel = child->aux_channel;
+ if (is_dp)
+ sanitize_aux_ch(devdata, port);
- sanitize_aux_ch(i915, port);
- }
-
- if (i915->vbt.version >= 158) {
- /* The VBT HDMI level shift values match the table we have. */
- u8 hdmi_level_shift = child->hdmi_level_shifter_value;
+ hdmi_level_shift = _intel_bios_hdmi_level_shift(devdata);
+ if (hdmi_level_shift >= 0) {
drm_dbg_kms(&i915->drm,
"Port %c VBT HDMI level shift: %d\n",
- port_name(port),
- hdmi_level_shift);
- info->hdmi_level_shift = hdmi_level_shift;
- info->hdmi_level_shift_set = true;
+ port_name(port), hdmi_level_shift);
}
- if (i915->vbt.version >= 204) {
- int max_tmds_clock;
-
- switch (child->hdmi_max_data_rate) {
- default:
- MISSING_CASE(child->hdmi_max_data_rate);
- fallthrough;
- case HDMI_MAX_DATA_RATE_PLATFORM:
- max_tmds_clock = 0;
- break;
- case HDMI_MAX_DATA_RATE_297:
- max_tmds_clock = 297000;
- break;
- case HDMI_MAX_DATA_RATE_165:
- max_tmds_clock = 165000;
- break;
- }
-
- if (max_tmds_clock)
- drm_dbg_kms(&i915->drm,
- "Port %c VBT HDMI max TMDS clock: %d kHz\n",
- port_name(port), max_tmds_clock);
- info->max_tmds_clock = max_tmds_clock;
- }
+ max_tmds_clock = _intel_bios_max_tmds_clock(devdata);
+ if (max_tmds_clock)
+ drm_dbg_kms(&i915->drm,
+ "Port %c VBT HDMI max TMDS clock: %d kHz\n",
+ port_name(port), max_tmds_clock);
/* I_boost config for SKL and above */
dp_boost_level = intel_bios_encoder_dp_boost_level(devdata);
@@ -2008,19 +2066,13 @@ static void parse_ddi_port(struct drm_i915_private *i915,
"Port %c VBT HDMI boost level: %d\n",
port_name(port), hdmi_boost_level);
- /* DP max link rate for GLK+ */
- if (i915->vbt.version >= 216) {
- if (i915->vbt.version >= 230)
- info->dp_max_link_rate = parse_bdb_230_dp_max_link_rate(child->dp_max_link_rate);
- else
- info->dp_max_link_rate = parse_bdb_216_dp_max_link_rate(child->dp_max_link_rate);
-
+ dp_max_link_rate = _intel_bios_dp_max_link_rate(devdata);
+ if (dp_max_link_rate)
drm_dbg_kms(&i915->drm,
"Port %c VBT DP max link rate: %d\n",
- port_name(port), info->dp_max_link_rate);
- }
+ port_name(port), dp_max_link_rate);
- info->devdata = devdata;
+ i915->vbt.ports[port] = devdata;
}
static void parse_ddi_ports(struct drm_i915_private *i915)
@@ -2558,12 +2610,8 @@ bool intel_bios_is_port_present(struct drm_i915_private *i915, enum port port)
[PORT_F] = { DVO_PORT_DPF, DVO_PORT_HDMIF, },
};
- if (HAS_DDI(i915)) {
- const struct ddi_vbt_port_info *port_info =
- &i915->vbt.ddi_port_info[port];
-
- return port_info->devdata;
- }
+ if (HAS_DDI(i915))
+ return i915->vbt.ports[port];
/* FIXME maybe deal with port A as well? */
if (drm_WARN_ON(&i915->drm,
@@ -2814,8 +2862,7 @@ bool
intel_bios_is_port_hpd_inverted(const struct drm_i915_private *i915,
enum port port)
{
- const struct intel_bios_encoder_data *devdata =
- i915->vbt.ddi_port_info[port].devdata;
+ const struct intel_bios_encoder_data *devdata = i915->vbt.ports[port];
if (drm_WARN_ON_ONCE(&i915->drm,
!IS_GEMINILAKE(i915) && !IS_BROXTON(i915)))
@@ -2835,8 +2882,7 @@ bool
intel_bios_is_lspcon_present(const struct drm_i915_private *i915,
enum port port)
{
- const struct intel_bios_encoder_data *devdata =
- i915->vbt.ddi_port_info[port].devdata;
+ const struct intel_bios_encoder_data *devdata = i915->vbt.ports[port];
return HAS_LSPCON(i915) && devdata && devdata->child.lspcon;
}
@@ -2852,8 +2898,7 @@ bool
intel_bios_is_lane_reversal_needed(const struct drm_i915_private *i915,
enum port port)
{
- const struct intel_bios_encoder_data *devdata =
- i915->vbt.ddi_port_info[port].devdata;
+ const struct intel_bios_encoder_data *devdata = i915->vbt.ports[port];
return devdata && devdata->child.lane_reversal;
}
@@ -2861,11 +2906,10 @@ intel_bios_is_lane_reversal_needed(const struct drm_i915_private *i915,
enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *i915,
enum port port)
{
- const struct ddi_vbt_port_info *info =
- &i915->vbt.ddi_port_info[port];
+ const struct intel_bios_encoder_data *devdata = i915->vbt.ports[port];
enum aux_ch aux_ch;
- if (!info->alternate_aux_channel) {
+ if (!devdata || !devdata->child.aux_channel) {
aux_ch = (enum aux_ch)port;
drm_dbg_kms(&i915->drm,
@@ -2881,7 +2925,7 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *i915,
* ADL-S VBT uses PHY based mapping. Combo PHYs A,B,C,D,E
* map to DDI A,TC1,TC2,TC3,TC4 respectively.
*/
- switch (info->alternate_aux_channel) {
+ switch (devdata->child.aux_channel) {
case DP_AUX_A:
aux_ch = AUX_CH_A;
break;
@@ -2942,7 +2986,7 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *i915,
aux_ch = AUX_CH_I;
break;
default:
- MISSING_CASE(info->alternate_aux_channel);
+ MISSING_CASE(devdata->child.aux_channel);
aux_ch = AUX_CH_A;
break;
}
@@ -2956,17 +3000,18 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *i915,
int intel_bios_max_tmds_clock(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ const struct intel_bios_encoder_data *devdata = i915->vbt.ports[encoder->port];
- return i915->vbt.ddi_port_info[encoder->port].max_tmds_clock;
+ return _intel_bios_max_tmds_clock(devdata);
}
+/* This is an index in the HDMI/DVI DDI buffer translation table, or -1 */
int intel_bios_hdmi_level_shift(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- const struct ddi_vbt_port_info *info =
- &i915->vbt.ddi_port_info[encoder->port];
+ const struct intel_bios_encoder_data *devdata = i915->vbt.ports[encoder->port];
- return info->hdmi_level_shift_set ? info->hdmi_level_shift : -1;
+ return _intel_bios_hdmi_level_shift(devdata);
}
int intel_bios_encoder_dp_boost_level(const struct intel_bios_encoder_data *devdata)
@@ -2988,15 +3033,20 @@ int intel_bios_encoder_hdmi_boost_level(const struct intel_bios_encoder_data *de
int intel_bios_dp_max_link_rate(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ const struct intel_bios_encoder_data *devdata = i915->vbt.ports[encoder->port];
- return i915->vbt.ddi_port_info[encoder->port].dp_max_link_rate;
+ return _intel_bios_dp_max_link_rate(devdata);
}
int intel_bios_alternate_ddc_pin(struct intel_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ const struct intel_bios_encoder_data *devdata = i915->vbt.ports[encoder->port];
+
+ if (!devdata || !devdata->child.ddc_pin)
+ return 0;
- return i915->vbt.ddi_port_info[encoder->port].alternate_ddc_pin;
+ return map_ddc_pin(i915, devdata->child.ddc_pin);
}
bool intel_bios_encoder_supports_typec_usb(const struct intel_bios_encoder_data *devdata)
@@ -3012,5 +3062,5 @@ bool intel_bios_encoder_supports_tbt(const struct intel_bios_encoder_data *devda
const struct intel_bios_encoder_data *
intel_bios_encoder_data_lookup(struct drm_i915_private *i915, enum port port)
{
- return i915->vbt.ddi_port_info[port].devdata;
+ return i915->vbt.ports[port];
}
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index 4b94256d7319..8d9d888e9316 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -9,8 +9,8 @@
#include "intel_bw.h"
#include "intel_cdclk.h"
#include "intel_display_types.h"
+#include "intel_pcode.h"
#include "intel_pm.h"
-#include "intel_sideband.h"
/* Parameters for Qclk Geyserville (QGV) */
struct intel_qgv_point {
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index 34fa4130d5c4..9e466d829019 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -28,8 +28,9 @@
#include "intel_cdclk.h"
#include "intel_de.h"
#include "intel_display_types.h"
+#include "intel_pcode.h"
#include "intel_psr.h"
-#include "intel_sideband.h"
+#include "vlv_sideband.h"
/**
* DOC: CDCLK / RAWCLK
@@ -59,6 +60,37 @@
* dividers can be programmed correctly.
*/
+void intel_cdclk_get_cdclk(struct drm_i915_private *dev_priv,
+ struct intel_cdclk_config *cdclk_config)
+{
+ dev_priv->cdclk_funcs->get_cdclk(dev_priv, cdclk_config);
+}
+
+int intel_cdclk_bw_calc_min_cdclk(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ return dev_priv->cdclk_funcs->bw_calc_min_cdclk(state);
+}
+
+static void intel_cdclk_set_cdclk(struct drm_i915_private *dev_priv,
+ const struct intel_cdclk_config *cdclk_config,
+ enum pipe pipe)
+{
+ dev_priv->cdclk_funcs->set_cdclk(dev_priv, cdclk_config, pipe);
+}
+
+static int intel_cdclk_modeset_calc_cdclk(struct drm_i915_private *dev_priv,
+ struct intel_cdclk_state *cdclk_config)
+{
+ return dev_priv->cdclk_funcs->modeset_calc_cdclk(cdclk_config);
+}
+
+static u8 intel_cdclk_calc_voltage_level(struct drm_i915_private *dev_priv,
+ int cdclk)
+{
+ return dev_priv->cdclk_funcs->calc_voltage_level(cdclk);
+}
+
static void fixed_133mhz_get_cdclk(struct drm_i915_private *dev_priv,
struct intel_cdclk_config *cdclk_config)
{
@@ -1466,7 +1498,7 @@ static void bxt_get_cdclk(struct drm_i915_private *dev_priv,
* at least what the CDCLK frequency requires.
*/
cdclk_config->voltage_level =
- dev_priv->display.calc_voltage_level(cdclk_config->cdclk);
+ intel_cdclk_calc_voltage_level(dev_priv, cdclk_config->cdclk);
}
static void bxt_de_pll_disable(struct drm_i915_private *dev_priv)
@@ -1777,7 +1809,7 @@ static void bxt_cdclk_init_hw(struct drm_i915_private *dev_priv)
cdclk_config.cdclk = bxt_calc_cdclk(dev_priv, 0);
cdclk_config.vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk_config.cdclk);
cdclk_config.voltage_level =
- dev_priv->display.calc_voltage_level(cdclk_config.cdclk);
+ intel_cdclk_calc_voltage_level(dev_priv, cdclk_config.cdclk);
bxt_set_cdclk(dev_priv, &cdclk_config, INVALID_PIPE);
}
@@ -1789,7 +1821,7 @@ static void bxt_cdclk_uninit_hw(struct drm_i915_private *dev_priv)
cdclk_config.cdclk = cdclk_config.bypass;
cdclk_config.vco = 0;
cdclk_config.voltage_level =
- dev_priv->display.calc_voltage_level(cdclk_config.cdclk);
+ intel_cdclk_calc_voltage_level(dev_priv, cdclk_config.cdclk);
bxt_set_cdclk(dev_priv, &cdclk_config, INVALID_PIPE);
}
@@ -1932,7 +1964,7 @@ static void intel_set_cdclk(struct drm_i915_private *dev_priv,
if (!intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_config))
return;
- if (drm_WARN_ON_ONCE(&dev_priv->drm, !dev_priv->display.set_cdclk))
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !dev_priv->cdclk_funcs->set_cdclk))
return;
intel_dump_cdclk_config(cdclk_config, "Changing CDCLK to");
@@ -1956,7 +1988,7 @@ static void intel_set_cdclk(struct drm_i915_private *dev_priv,
&dev_priv->gmbus_mutex);
}
- dev_priv->display.set_cdclk(dev_priv, cdclk_config, pipe);
+ intel_cdclk_set_cdclk(dev_priv, cdclk_config, pipe);
for_each_intel_dp(&dev_priv->drm, encoder) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
@@ -2140,6 +2172,14 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
min_cdclk = max(intel_planes_min_cdclk(crtc_state), min_cdclk);
/*
+ * When we decide to use only one VDSC engine, since
+ * each VDSC operates with 1 ppc throughput, pixel clock
+ * cannot be higher than the VDSC clock (cdclk)
+ */
+ if (crtc_state->dsc.compression_enable && !crtc_state->dsc.dsc_split)
+ min_cdclk = max(min_cdclk, (int)crtc_state->pixel_rate);
+
+ /*
* HACK. Currently for TGL platforms we calculate
* min_cdclk initially based on pixel_rate divided
* by 2, accounting for also plane requirements,
@@ -2414,7 +2454,7 @@ static int bxt_modeset_calc_cdclk(struct intel_cdclk_state *cdclk_state)
cdclk_state->logical.cdclk = cdclk;
cdclk_state->logical.voltage_level =
max_t(int, min_voltage_level,
- dev_priv->display.calc_voltage_level(cdclk));
+ intel_cdclk_calc_voltage_level(dev_priv, cdclk));
if (!cdclk_state->active_pipes) {
cdclk = bxt_calc_cdclk(dev_priv, cdclk_state->force_min_cdclk);
@@ -2423,7 +2463,7 @@ static int bxt_modeset_calc_cdclk(struct intel_cdclk_state *cdclk_state)
cdclk_state->actual.vco = vco;
cdclk_state->actual.cdclk = cdclk;
cdclk_state->actual.voltage_level =
- dev_priv->display.calc_voltage_level(cdclk);
+ intel_cdclk_calc_voltage_level(dev_priv, cdclk);
} else {
cdclk_state->actual = cdclk_state->logical;
}
@@ -2515,7 +2555,7 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
new_cdclk_state->active_pipes =
intel_calc_active_pipes(state, old_cdclk_state->active_pipes);
- ret = dev_priv->display.modeset_calc_cdclk(new_cdclk_state);
+ ret = intel_cdclk_modeset_calc_cdclk(dev_priv, new_cdclk_state);
if (ret)
return ret;
@@ -2695,7 +2735,7 @@ void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
*/
void intel_update_cdclk(struct drm_i915_private *dev_priv)
{
- dev_priv->display.get_cdclk(dev_priv, &dev_priv->cdclk.hw);
+ intel_cdclk_get_cdclk(dev_priv, &dev_priv->cdclk.hw);
/*
* 9:0 CMBUS [sic] CDCLK frequency (cdfreq):
@@ -2845,6 +2885,157 @@ u32 intel_read_rawclk(struct drm_i915_private *dev_priv)
return freq;
}
+static struct intel_cdclk_funcs tgl_cdclk_funcs = {
+ .get_cdclk = bxt_get_cdclk,
+ .set_cdclk = bxt_set_cdclk,
+ .bw_calc_min_cdclk = skl_bw_calc_min_cdclk,
+ .modeset_calc_cdclk = bxt_modeset_calc_cdclk,
+ .calc_voltage_level = tgl_calc_voltage_level,
+};
+
+static struct intel_cdclk_funcs ehl_cdclk_funcs = {
+ .get_cdclk = bxt_get_cdclk,
+ .set_cdclk = bxt_set_cdclk,
+ .bw_calc_min_cdclk = skl_bw_calc_min_cdclk,
+ .modeset_calc_cdclk = bxt_modeset_calc_cdclk,
+ .calc_voltage_level = ehl_calc_voltage_level,
+};
+
+static struct intel_cdclk_funcs icl_cdclk_funcs = {
+ .get_cdclk = bxt_get_cdclk,
+ .set_cdclk = bxt_set_cdclk,
+ .bw_calc_min_cdclk = skl_bw_calc_min_cdclk,
+ .modeset_calc_cdclk = bxt_modeset_calc_cdclk,
+ .calc_voltage_level = icl_calc_voltage_level,
+};
+
+static struct intel_cdclk_funcs bxt_cdclk_funcs = {
+ .get_cdclk = bxt_get_cdclk,
+ .set_cdclk = bxt_set_cdclk,
+ .bw_calc_min_cdclk = skl_bw_calc_min_cdclk,
+ .modeset_calc_cdclk = bxt_modeset_calc_cdclk,
+ .calc_voltage_level = bxt_calc_voltage_level,
+};
+
+static struct intel_cdclk_funcs skl_cdclk_funcs = {
+ .get_cdclk = skl_get_cdclk,
+ .set_cdclk = skl_set_cdclk,
+ .bw_calc_min_cdclk = skl_bw_calc_min_cdclk,
+ .modeset_calc_cdclk = skl_modeset_calc_cdclk,
+};
+
+static struct intel_cdclk_funcs bdw_cdclk_funcs = {
+ .get_cdclk = bdw_get_cdclk,
+ .set_cdclk = bdw_set_cdclk,
+ .bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
+ .modeset_calc_cdclk = bdw_modeset_calc_cdclk,
+};
+
+static struct intel_cdclk_funcs chv_cdclk_funcs = {
+ .get_cdclk = vlv_get_cdclk,
+ .set_cdclk = chv_set_cdclk,
+ .bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
+ .modeset_calc_cdclk = vlv_modeset_calc_cdclk,
+};
+
+static struct intel_cdclk_funcs vlv_cdclk_funcs = {
+ .get_cdclk = vlv_get_cdclk,
+ .set_cdclk = vlv_set_cdclk,
+ .bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
+ .modeset_calc_cdclk = vlv_modeset_calc_cdclk,
+};
+
+static struct intel_cdclk_funcs hsw_cdclk_funcs = {
+ .get_cdclk = hsw_get_cdclk,
+ .bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
+ .modeset_calc_cdclk = fixed_modeset_calc_cdclk,
+};
+
+/* SNB, IVB, 965G, 945G */
+static struct intel_cdclk_funcs fixed_400mhz_cdclk_funcs = {
+ .get_cdclk = fixed_400mhz_get_cdclk,
+ .bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
+ .modeset_calc_cdclk = fixed_modeset_calc_cdclk,
+};
+
+static struct intel_cdclk_funcs ilk_cdclk_funcs = {
+ .get_cdclk = fixed_450mhz_get_cdclk,
+ .bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
+ .modeset_calc_cdclk = fixed_modeset_calc_cdclk,
+};
+
+static struct intel_cdclk_funcs gm45_cdclk_funcs = {
+ .get_cdclk = gm45_get_cdclk,
+ .bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
+ .modeset_calc_cdclk = fixed_modeset_calc_cdclk,
+};
+
+/* G45 uses G33 */
+
+static struct intel_cdclk_funcs i965gm_cdclk_funcs = {
+ .get_cdclk = i965gm_get_cdclk,
+ .bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
+ .modeset_calc_cdclk = fixed_modeset_calc_cdclk,
+};
+
+/* i965G uses fixed 400 */
+
+static struct intel_cdclk_funcs pnv_cdclk_funcs = {
+ .get_cdclk = pnv_get_cdclk,
+ .bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
+ .modeset_calc_cdclk = fixed_modeset_calc_cdclk,
+};
+
+static struct intel_cdclk_funcs g33_cdclk_funcs = {
+ .get_cdclk = g33_get_cdclk,
+ .bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
+ .modeset_calc_cdclk = fixed_modeset_calc_cdclk,
+};
+
+static struct intel_cdclk_funcs i945gm_cdclk_funcs = {
+ .get_cdclk = i945gm_get_cdclk,
+ .bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
+ .modeset_calc_cdclk = fixed_modeset_calc_cdclk,
+};
+
+/* i945G uses fixed 400 */
+
+static struct intel_cdclk_funcs i915gm_cdclk_funcs = {
+ .get_cdclk = i915gm_get_cdclk,
+ .bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
+ .modeset_calc_cdclk = fixed_modeset_calc_cdclk,
+};
+
+static struct intel_cdclk_funcs i915g_cdclk_funcs = {
+ .get_cdclk = fixed_333mhz_get_cdclk,
+ .bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
+ .modeset_calc_cdclk = fixed_modeset_calc_cdclk,
+};
+
+static struct intel_cdclk_funcs i865g_cdclk_funcs = {
+ .get_cdclk = fixed_266mhz_get_cdclk,
+ .bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
+ .modeset_calc_cdclk = fixed_modeset_calc_cdclk,
+};
+
+static struct intel_cdclk_funcs i85x_cdclk_funcs = {
+ .get_cdclk = i85x_get_cdclk,
+ .bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
+ .modeset_calc_cdclk = fixed_modeset_calc_cdclk,
+};
+
+static struct intel_cdclk_funcs i845g_cdclk_funcs = {
+ .get_cdclk = fixed_200mhz_get_cdclk,
+ .bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
+ .modeset_calc_cdclk = fixed_modeset_calc_cdclk,
+};
+
+static struct intel_cdclk_funcs i830_cdclk_funcs = {
+ .get_cdclk = fixed_133mhz_get_cdclk,
+ .bw_calc_min_cdclk = intel_bw_calc_min_cdclk,
+ .modeset_calc_cdclk = fixed_modeset_calc_cdclk,
+};
+
/**
* intel_init_cdclk_hooks - Initialize CDCLK related modesetting hooks
* @dev_priv: i915 device
@@ -2852,119 +3043,78 @@ u32 intel_read_rawclk(struct drm_i915_private *dev_priv)
void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
{
if (IS_DG2(dev_priv)) {
- dev_priv->display.set_cdclk = bxt_set_cdclk;
- dev_priv->display.bw_calc_min_cdclk = skl_bw_calc_min_cdclk;
- dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk;
- dev_priv->display.calc_voltage_level = tgl_calc_voltage_level;
+ dev_priv->cdclk_funcs = &tgl_cdclk_funcs;
dev_priv->cdclk.table = dg2_cdclk_table;
} else if (IS_ALDERLAKE_P(dev_priv)) {
- dev_priv->display.set_cdclk = bxt_set_cdclk;
- dev_priv->display.bw_calc_min_cdclk = skl_bw_calc_min_cdclk;
- dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk;
- dev_priv->display.calc_voltage_level = tgl_calc_voltage_level;
+ dev_priv->cdclk_funcs = &tgl_cdclk_funcs;
/* Wa_22011320316:adl-p[a0] */
if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
dev_priv->cdclk.table = adlp_a_step_cdclk_table;
else
dev_priv->cdclk.table = adlp_cdclk_table;
} else if (IS_ROCKETLAKE(dev_priv)) {
- dev_priv->display.set_cdclk = bxt_set_cdclk;
- dev_priv->display.bw_calc_min_cdclk = skl_bw_calc_min_cdclk;
- dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk;
- dev_priv->display.calc_voltage_level = tgl_calc_voltage_level;
+ dev_priv->cdclk_funcs = &tgl_cdclk_funcs;
dev_priv->cdclk.table = rkl_cdclk_table;
} else if (DISPLAY_VER(dev_priv) >= 12) {
- dev_priv->display.set_cdclk = bxt_set_cdclk;
- dev_priv->display.bw_calc_min_cdclk = skl_bw_calc_min_cdclk;
- dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk;
- dev_priv->display.calc_voltage_level = tgl_calc_voltage_level;
+ dev_priv->cdclk_funcs = &tgl_cdclk_funcs;
dev_priv->cdclk.table = icl_cdclk_table;
} else if (IS_JSL_EHL(dev_priv)) {
- dev_priv->display.set_cdclk = bxt_set_cdclk;
- dev_priv->display.bw_calc_min_cdclk = skl_bw_calc_min_cdclk;
- dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk;
- dev_priv->display.calc_voltage_level = ehl_calc_voltage_level;
+ dev_priv->cdclk_funcs = &ehl_cdclk_funcs;
dev_priv->cdclk.table = icl_cdclk_table;
} else if (DISPLAY_VER(dev_priv) >= 11) {
- dev_priv->display.set_cdclk = bxt_set_cdclk;
- dev_priv->display.bw_calc_min_cdclk = skl_bw_calc_min_cdclk;
- dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk;
- dev_priv->display.calc_voltage_level = icl_calc_voltage_level;
+ dev_priv->cdclk_funcs = &icl_cdclk_funcs;
dev_priv->cdclk.table = icl_cdclk_table;
} else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
- dev_priv->display.bw_calc_min_cdclk = skl_bw_calc_min_cdclk;
- dev_priv->display.set_cdclk = bxt_set_cdclk;
- dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk;
- dev_priv->display.calc_voltage_level = bxt_calc_voltage_level;
+ dev_priv->cdclk_funcs = &bxt_cdclk_funcs;
if (IS_GEMINILAKE(dev_priv))
dev_priv->cdclk.table = glk_cdclk_table;
else
dev_priv->cdclk.table = bxt_cdclk_table;
} else if (DISPLAY_VER(dev_priv) == 9) {
- dev_priv->display.bw_calc_min_cdclk = skl_bw_calc_min_cdclk;
- dev_priv->display.set_cdclk = skl_set_cdclk;
- dev_priv->display.modeset_calc_cdclk = skl_modeset_calc_cdclk;
+ dev_priv->cdclk_funcs = &skl_cdclk_funcs;
} else if (IS_BROADWELL(dev_priv)) {
- dev_priv->display.bw_calc_min_cdclk = intel_bw_calc_min_cdclk;
- dev_priv->display.set_cdclk = bdw_set_cdclk;
- dev_priv->display.modeset_calc_cdclk = bdw_modeset_calc_cdclk;
+ dev_priv->cdclk_funcs = &bdw_cdclk_funcs;
+ } else if (IS_HASWELL(dev_priv)) {
+ dev_priv->cdclk_funcs = &hsw_cdclk_funcs;
} else if (IS_CHERRYVIEW(dev_priv)) {
- dev_priv->display.bw_calc_min_cdclk = intel_bw_calc_min_cdclk;
- dev_priv->display.set_cdclk = chv_set_cdclk;
- dev_priv->display.modeset_calc_cdclk = vlv_modeset_calc_cdclk;
+ dev_priv->cdclk_funcs = &chv_cdclk_funcs;
} else if (IS_VALLEYVIEW(dev_priv)) {
- dev_priv->display.bw_calc_min_cdclk = intel_bw_calc_min_cdclk;
- dev_priv->display.set_cdclk = vlv_set_cdclk;
- dev_priv->display.modeset_calc_cdclk = vlv_modeset_calc_cdclk;
- } else {
- dev_priv->display.bw_calc_min_cdclk = intel_bw_calc_min_cdclk;
- dev_priv->display.modeset_calc_cdclk = fixed_modeset_calc_cdclk;
+ dev_priv->cdclk_funcs = &vlv_cdclk_funcs;
+ } else if (IS_SANDYBRIDGE(dev_priv) || IS_IVYBRIDGE(dev_priv)) {
+ dev_priv->cdclk_funcs = &fixed_400mhz_cdclk_funcs;
+ } else if (IS_IRONLAKE(dev_priv)) {
+ dev_priv->cdclk_funcs = &ilk_cdclk_funcs;
+ } else if (IS_GM45(dev_priv)) {
+ dev_priv->cdclk_funcs = &gm45_cdclk_funcs;
+ } else if (IS_G45(dev_priv)) {
+ dev_priv->cdclk_funcs = &g33_cdclk_funcs;
+ } else if (IS_I965GM(dev_priv)) {
+ dev_priv->cdclk_funcs = &i965gm_cdclk_funcs;
+ } else if (IS_I965G(dev_priv)) {
+ dev_priv->cdclk_funcs = &fixed_400mhz_cdclk_funcs;
+ } else if (IS_PINEVIEW(dev_priv)) {
+ dev_priv->cdclk_funcs = &pnv_cdclk_funcs;
+ } else if (IS_G33(dev_priv)) {
+ dev_priv->cdclk_funcs = &g33_cdclk_funcs;
+ } else if (IS_I945GM(dev_priv)) {
+ dev_priv->cdclk_funcs = &i945gm_cdclk_funcs;
+ } else if (IS_I945G(dev_priv)) {
+ dev_priv->cdclk_funcs = &fixed_400mhz_cdclk_funcs;
+ } else if (IS_I915GM(dev_priv)) {
+ dev_priv->cdclk_funcs = &i915gm_cdclk_funcs;
+ } else if (IS_I915G(dev_priv)) {
+ dev_priv->cdclk_funcs = &i915g_cdclk_funcs;
+ } else if (IS_I865G(dev_priv)) {
+ dev_priv->cdclk_funcs = &i865g_cdclk_funcs;
+ } else if (IS_I85X(dev_priv)) {
+ dev_priv->cdclk_funcs = &i85x_cdclk_funcs;
+ } else if (IS_I845G(dev_priv)) {
+ dev_priv->cdclk_funcs = &i845g_cdclk_funcs;
+ } else if (IS_I830(dev_priv)) {
+ dev_priv->cdclk_funcs = &i830_cdclk_funcs;
}
- if (DISPLAY_VER(dev_priv) >= 10 || IS_BROXTON(dev_priv))
- dev_priv->display.get_cdclk = bxt_get_cdclk;
- else if (DISPLAY_VER(dev_priv) == 9)
- dev_priv->display.get_cdclk = skl_get_cdclk;
- else if (IS_BROADWELL(dev_priv))
- dev_priv->display.get_cdclk = bdw_get_cdclk;
- else if (IS_HASWELL(dev_priv))
- dev_priv->display.get_cdclk = hsw_get_cdclk;
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- dev_priv->display.get_cdclk = vlv_get_cdclk;
- else if (IS_SANDYBRIDGE(dev_priv) || IS_IVYBRIDGE(dev_priv))
- dev_priv->display.get_cdclk = fixed_400mhz_get_cdclk;
- else if (IS_IRONLAKE(dev_priv))
- dev_priv->display.get_cdclk = fixed_450mhz_get_cdclk;
- else if (IS_GM45(dev_priv))
- dev_priv->display.get_cdclk = gm45_get_cdclk;
- else if (IS_G45(dev_priv))
- dev_priv->display.get_cdclk = g33_get_cdclk;
- else if (IS_I965GM(dev_priv))
- dev_priv->display.get_cdclk = i965gm_get_cdclk;
- else if (IS_I965G(dev_priv))
- dev_priv->display.get_cdclk = fixed_400mhz_get_cdclk;
- else if (IS_PINEVIEW(dev_priv))
- dev_priv->display.get_cdclk = pnv_get_cdclk;
- else if (IS_G33(dev_priv))
- dev_priv->display.get_cdclk = g33_get_cdclk;
- else if (IS_I945GM(dev_priv))
- dev_priv->display.get_cdclk = i945gm_get_cdclk;
- else if (IS_I945G(dev_priv))
- dev_priv->display.get_cdclk = fixed_400mhz_get_cdclk;
- else if (IS_I915GM(dev_priv))
- dev_priv->display.get_cdclk = i915gm_get_cdclk;
- else if (IS_I915G(dev_priv))
- dev_priv->display.get_cdclk = fixed_333mhz_get_cdclk;
- else if (IS_I865G(dev_priv))
- dev_priv->display.get_cdclk = fixed_266mhz_get_cdclk;
- else if (IS_I85X(dev_priv))
- dev_priv->display.get_cdclk = i85x_get_cdclk;
- else if (IS_I845G(dev_priv))
- dev_priv->display.get_cdclk = fixed_200mhz_get_cdclk;
- else if (IS_I830(dev_priv))
- dev_priv->display.get_cdclk = fixed_133mhz_get_cdclk;
-
- if (drm_WARN(&dev_priv->drm, !dev_priv->display.get_cdclk,
- "Unknown platform. Assuming 133 MHz CDCLK\n"))
- dev_priv->display.get_cdclk = fixed_133mhz_get_cdclk;
+ if (drm_WARN(&dev_priv->drm, !dev_priv->cdclk_funcs,
+ "Unknown platform. Assuming i830\n"))
+ dev_priv->cdclk_funcs = &i830_cdclk_funcs;
}
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.h b/drivers/gpu/drm/i915/display/intel_cdclk.h
index b34eb00fb327..309b3f394e24 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.h
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.h
@@ -68,7 +68,9 @@ void intel_set_cdclk_post_plane_update(struct intel_atomic_state *state);
void intel_dump_cdclk_config(const struct intel_cdclk_config *cdclk_config,
const char *context);
int intel_modeset_calc_cdclk(struct intel_atomic_state *state);
-
+void intel_cdclk_get_cdclk(struct drm_i915_private *dev_priv,
+ struct intel_cdclk_config *cdclk_config);
+int intel_cdclk_bw_calc_min_cdclk(struct intel_atomic_state *state);
struct intel_cdclk_state *
intel_atomic_get_cdclk_state(struct intel_atomic_state *state);
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index afcb4bf3826c..5359b7305a78 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -25,6 +25,8 @@
#include "intel_color.h"
#include "intel_de.h"
#include "intel_display_types.h"
+#include "intel_dpll.h"
+#include "intel_dsi.h"
#define CTM_COEFF_SIGN (1ULL << 63)
@@ -1137,14 +1139,14 @@ void intel_color_load_luts(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- dev_priv->display.load_luts(crtc_state);
+ dev_priv->color_funcs->load_luts(crtc_state);
}
void intel_color_commit(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- dev_priv->display.color_commit(crtc_state);
+ dev_priv->color_funcs->color_commit(crtc_state);
}
static bool intel_can_preload_luts(const struct intel_crtc_state *new_crtc_state)
@@ -1200,15 +1202,15 @@ int intel_color_check(struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- return dev_priv->display.color_check(crtc_state);
+ return dev_priv->color_funcs->color_check(crtc_state);
}
void intel_color_get_config(struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- if (dev_priv->display.read_luts)
- dev_priv->display.read_luts(crtc_state);
+ if (dev_priv->color_funcs->read_luts)
+ dev_priv->color_funcs->read_luts(crtc_state);
}
static bool need_plane_update(struct intel_plane *plane,
@@ -2092,6 +2094,76 @@ static void icl_read_luts(struct intel_crtc_state *crtc_state)
}
}
+static const struct intel_color_funcs chv_color_funcs = {
+ .color_check = chv_color_check,
+ .color_commit = i9xx_color_commit,
+ .load_luts = chv_load_luts,
+ .read_luts = chv_read_luts,
+};
+
+static const struct intel_color_funcs i965_color_funcs = {
+ .color_check = i9xx_color_check,
+ .color_commit = i9xx_color_commit,
+ .load_luts = i965_load_luts,
+ .read_luts = i965_read_luts,
+};
+
+static const struct intel_color_funcs i9xx_color_funcs = {
+ .color_check = i9xx_color_check,
+ .color_commit = i9xx_color_commit,
+ .load_luts = i9xx_load_luts,
+ .read_luts = i9xx_read_luts,
+};
+
+static const struct intel_color_funcs icl_color_funcs = {
+ .color_check = icl_color_check,
+ .color_commit = skl_color_commit,
+ .load_luts = icl_load_luts,
+ .read_luts = icl_read_luts,
+};
+
+static const struct intel_color_funcs glk_color_funcs = {
+ .color_check = glk_color_check,
+ .color_commit = skl_color_commit,
+ .load_luts = glk_load_luts,
+ .read_luts = glk_read_luts,
+};
+
+static const struct intel_color_funcs skl_color_funcs = {
+ .color_check = ivb_color_check,
+ .color_commit = skl_color_commit,
+ .load_luts = bdw_load_luts,
+ .read_luts = NULL,
+};
+
+static const struct intel_color_funcs bdw_color_funcs = {
+ .color_check = ivb_color_check,
+ .color_commit = hsw_color_commit,
+ .load_luts = bdw_load_luts,
+ .read_luts = NULL,
+};
+
+static const struct intel_color_funcs hsw_color_funcs = {
+ .color_check = ivb_color_check,
+ .color_commit = hsw_color_commit,
+ .load_luts = ivb_load_luts,
+ .read_luts = NULL,
+};
+
+static const struct intel_color_funcs ivb_color_funcs = {
+ .color_check = ivb_color_check,
+ .color_commit = ilk_color_commit,
+ .load_luts = ivb_load_luts,
+ .read_luts = NULL,
+};
+
+static const struct intel_color_funcs ilk_color_funcs = {
+ .color_check = ilk_color_check,
+ .color_commit = ilk_color_commit,
+ .load_luts = ilk_load_luts,
+ .read_luts = ilk_read_luts,
+};
+
void intel_color_init(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -2101,52 +2173,28 @@ void intel_color_init(struct intel_crtc *crtc)
if (HAS_GMCH(dev_priv)) {
if (IS_CHERRYVIEW(dev_priv)) {
- dev_priv->display.color_check = chv_color_check;
- dev_priv->display.color_commit = i9xx_color_commit;
- dev_priv->display.load_luts = chv_load_luts;
- dev_priv->display.read_luts = chv_read_luts;
+ dev_priv->color_funcs = &chv_color_funcs;
} else if (DISPLAY_VER(dev_priv) >= 4) {
- dev_priv->display.color_check = i9xx_color_check;
- dev_priv->display.color_commit = i9xx_color_commit;
- dev_priv->display.load_luts = i965_load_luts;
- dev_priv->display.read_luts = i965_read_luts;
+ dev_priv->color_funcs = &i965_color_funcs;
} else {
- dev_priv->display.color_check = i9xx_color_check;
- dev_priv->display.color_commit = i9xx_color_commit;
- dev_priv->display.load_luts = i9xx_load_luts;
- dev_priv->display.read_luts = i9xx_read_luts;
+ dev_priv->color_funcs = &i9xx_color_funcs;
}
} else {
if (DISPLAY_VER(dev_priv) >= 11)
- dev_priv->display.color_check = icl_color_check;
- else if (DISPLAY_VER(dev_priv) >= 10)
- dev_priv->display.color_check = glk_color_check;
- else if (DISPLAY_VER(dev_priv) >= 7)
- dev_priv->display.color_check = ivb_color_check;
- else
- dev_priv->display.color_check = ilk_color_check;
-
- if (DISPLAY_VER(dev_priv) >= 9)
- dev_priv->display.color_commit = skl_color_commit;
- else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
- dev_priv->display.color_commit = hsw_color_commit;
- else
- dev_priv->display.color_commit = ilk_color_commit;
-
- if (DISPLAY_VER(dev_priv) >= 11) {
- dev_priv->display.load_luts = icl_load_luts;
- dev_priv->display.read_luts = icl_read_luts;
- } else if (DISPLAY_VER(dev_priv) == 10) {
- dev_priv->display.load_luts = glk_load_luts;
- dev_priv->display.read_luts = glk_read_luts;
- } else if (DISPLAY_VER(dev_priv) >= 8) {
- dev_priv->display.load_luts = bdw_load_luts;
- } else if (DISPLAY_VER(dev_priv) >= 7) {
- dev_priv->display.load_luts = ivb_load_luts;
- } else {
- dev_priv->display.load_luts = ilk_load_luts;
- dev_priv->display.read_luts = ilk_read_luts;
- }
+ dev_priv->color_funcs = &icl_color_funcs;
+ else if (DISPLAY_VER(dev_priv) == 10)
+ dev_priv->color_funcs = &glk_color_funcs;
+ else if (DISPLAY_VER(dev_priv) == 9)
+ dev_priv->color_funcs = &skl_color_funcs;
+ else if (DISPLAY_VER(dev_priv) == 8)
+ dev_priv->color_funcs = &bdw_color_funcs;
+ else if (DISPLAY_VER(dev_priv) == 7) {
+ if (IS_HASWELL(dev_priv))
+ dev_priv->color_funcs = &hsw_color_funcs;
+ else
+ dev_priv->color_funcs = &ivb_color_funcs;
+ } else
+ dev_priv->color_funcs = &ilk_color_funcs;
}
drm_crtc_enable_color_mgmt(&crtc->base,
diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c
index bacdf8a16bcb..634e8d449457 100644
--- a/drivers/gpu/drm/i915/display/intel_combo_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c
@@ -220,13 +220,13 @@ static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv,
return false;
if (DISPLAY_VER(dev_priv) >= 12) {
- ret &= check_phy_reg(dev_priv, phy, ICL_PORT_TX_DW8_LN0(phy),
+ ret &= check_phy_reg(dev_priv, phy, ICL_PORT_TX_DW8_LN(0, phy),
ICL_PORT_TX_DW8_ODCC_CLK_SEL |
ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_MASK,
ICL_PORT_TX_DW8_ODCC_CLK_SEL |
ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_DIV2);
- ret &= check_phy_reg(dev_priv, phy, ICL_PORT_PCS_DW1_LN0(phy),
+ ret &= check_phy_reg(dev_priv, phy, ICL_PORT_PCS_DW1_LN(0, phy),
DCC_MODE_SELECT_MASK,
DCC_MODE_SELECT_CONTINUOSLY);
}
@@ -343,13 +343,13 @@ static void icl_combo_phys_init(struct drm_i915_private *dev_priv)
skip_phy_misc:
if (DISPLAY_VER(dev_priv) >= 12) {
- val = intel_de_read(dev_priv, ICL_PORT_TX_DW8_LN0(phy));
+ val = intel_de_read(dev_priv, ICL_PORT_TX_DW8_LN(0, phy));
val &= ~ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_MASK;
val |= ICL_PORT_TX_DW8_ODCC_CLK_SEL;
val |= ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_DIV2;
intel_de_write(dev_priv, ICL_PORT_TX_DW8_GRP(phy), val);
- val = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN0(phy));
+ val = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN(0, phy));
val &= ~DCC_MODE_SELECT_MASK;
val |= DCC_MODE_SELECT_CONTINUOSLY;
intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy), val);
diff --git a/drivers/gpu/drm/i915/display/intel_connector.c b/drivers/gpu/drm/i915/display/intel_connector.c
index 9bed1ccecea0..c65f95a9a1ec 100644
--- a/drivers/gpu/drm/i915/display/intel_connector.c
+++ b/drivers/gpu/drm/i915/display/intel_connector.c
@@ -29,13 +29,13 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_edid.h>
-#include "display/intel_panel.h"
-
#include "i915_drv.h"
+#include "intel_backlight.h"
#include "intel_connector.h"
#include "intel_display_debugfs.h"
#include "intel_display_types.h"
#include "intel_hdcp.h"
+#include "intel_panel.h"
int intel_connector_init(struct intel_connector *connector)
{
@@ -124,7 +124,7 @@ int intel_connector_register(struct drm_connector *connector)
goto err_backlight;
}
- intel_connector_debugfs_add(connector);
+ intel_connector_debugfs_add(intel_connector);
return 0;
diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
index 408f82b0dc7d..1c161eeed82f 100644
--- a/drivers/gpu/drm/i915/display/intel_crt.c
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -251,7 +251,7 @@ static void hsw_post_disable_crt(struct intel_atomic_state *state,
intel_crtc_vblank_off(old_crtc_state);
- intel_disable_pipe(old_crtc_state);
+ intel_disable_transcoder(old_crtc_state);
intel_ddi_disable_transcoder_func(old_crtc_state);
@@ -314,7 +314,7 @@ static void hsw_enable_crt(struct intel_atomic_state *state,
intel_ddi_enable_transcoder_func(encoder, crtc_state);
- intel_enable_pipe(crtc_state);
+ intel_enable_transcoder(crtc_state);
lpt_pch_enable(crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c
index c7618fef0143..11842f212613 100644
--- a/drivers/gpu/drm/i915/display/intel_cursor.c
+++ b/drivers/gpu/drm/i915/display/intel_cursor.c
@@ -17,7 +17,7 @@
#include "intel_display_types.h"
#include "intel_display.h"
#include "intel_fb.h"
-
+#include "intel_fb_pin.h"
#include "intel_frontbuffer.h"
#include "intel_pm.h"
#include "intel_psr.h"
@@ -536,8 +536,10 @@ static void i9xx_update_cursor(struct intel_plane *plane,
if (DISPLAY_VER(dev_priv) >= 9)
skl_write_cursor_wm(plane, crtc_state);
- if (!intel_crtc_needs_modeset(crtc_state))
+ if (plane_state)
intel_psr2_program_plane_sel_fetch(plane, crtc_state, plane_state, 0);
+ else
+ intel_psr2_disable_plane_sel_fetch(plane, crtc_state);
if (plane->cursor.base != base ||
plane->cursor.size != fbc_ctl ||
@@ -637,8 +639,7 @@ intel_legacy_cursor_update(struct drm_plane *_plane,
* FIXME bigjoiner fastpath would be good
*/
if (!crtc_state->hw.active || intel_crtc_needs_modeset(crtc_state) ||
- crtc_state->update_pipe || crtc_state->bigjoiner ||
- crtc_state->enable_psr2_sel_fetch)
+ crtc_state->update_pipe || crtc_state->bigjoiner)
goto slow;
/*
@@ -696,7 +697,7 @@ intel_legacy_cursor_update(struct drm_plane *_plane,
goto out_free;
intel_frontbuffer_flush(to_intel_frontbuffer(new_plane_state->hw.fb),
- ORIGIN_FLIP);
+ ORIGIN_CURSOR_UPDATE);
intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->hw.fb),
to_intel_frontbuffer(new_plane_state->hw.fb),
plane->frontbuffer_bit);
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index bd184325d0c7..1dcfe31e6c6f 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -29,6 +29,7 @@
#include "i915_drv.h"
#include "intel_audio.h"
+#include "intel_backlight.h"
#include "intel_combo_phy.h"
#include "intel_connector.h"
#include "intel_crtc.h"
@@ -40,6 +41,7 @@
#include "intel_dp_link_training.h"
#include "intel_dp_mst.h"
#include "intel_dpio_phy.h"
+#include "intel_drrs.h"
#include "intel_dsi.h"
#include "intel_fdi.h"
#include "intel_fifo_underrun.h"
@@ -48,7 +50,6 @@
#include "intel_hdmi.h"
#include "intel_hotplug.h"
#include "intel_lspcon.h"
-#include "intel_panel.h"
#include "intel_pps.h"
#include "intel_psr.h"
#include "intel_snps_phy.h"
@@ -73,24 +74,27 @@ static const u8 index_to_dp_signal_levels[] = {
};
static int intel_ddi_hdmi_level(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
+ const struct intel_ddi_buf_trans *trans)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- int n_entries, level, default_entry;
+ int level;
- n_entries = intel_ddi_hdmi_num_entries(encoder, crtc_state, &default_entry);
- if (n_entries == 0)
- return 0;
level = intel_bios_hdmi_level_shift(encoder);
if (level < 0)
- level = default_entry;
-
- if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
- level = n_entries - 1;
+ level = trans->hdmi_default_entry;
return level;
}
+static bool has_buf_trans_select(struct drm_i915_private *i915)
+{
+ return DISPLAY_VER(i915) < 10 && !IS_BROXTON(i915);
+}
+
+static bool has_iboost(struct drm_i915_private *i915)
+{
+ return DISPLAY_VER(i915) == 9 && !IS_BROXTON(i915);
+}
+
/*
* Starting with Haswell, DDI port buffers must be programmed with correct
* values in advance. This function programs the correct values for
@@ -103,22 +107,22 @@ void hsw_prepare_dp_ddi_buffers(struct intel_encoder *encoder,
u32 iboost_bit = 0;
int i, n_entries;
enum port port = encoder->port;
- const struct intel_ddi_buf_trans *ddi_translations;
+ const struct intel_ddi_buf_trans *trans;
- ddi_translations = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
- if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
+ trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans))
return;
/* If we're boosting the current, set bit 31 of trans1 */
- if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv) &&
+ if (has_iboost(dev_priv) &&
intel_bios_encoder_dp_boost_level(encoder->devdata))
iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
for (i = 0; i < n_entries; i++) {
intel_de_write(dev_priv, DDI_BUF_TRANS_LO(port, i),
- ddi_translations->entries[i].hsw.trans1 | iboost_bit);
+ trans->entries[i].hsw.trans1 | iboost_bit);
intel_de_write(dev_priv, DDI_BUF_TRANS_HI(port, i),
- ddi_translations->entries[i].hsw.trans2);
+ trans->entries[i].hsw.trans2);
}
}
@@ -128,31 +132,29 @@ void hsw_prepare_dp_ddi_buffers(struct intel_encoder *encoder,
* HDMI/DVI use cases.
*/
static void hsw_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int level)
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ int level = intel_ddi_level(encoder, crtc_state, 0);
u32 iboost_bit = 0;
int n_entries;
enum port port = encoder->port;
- const struct intel_ddi_buf_trans *ddi_translations;
+ const struct intel_ddi_buf_trans *trans;
- ddi_translations = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
- if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
+ trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans))
return;
- if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
- level = n_entries - 1;
/* If we're boosting the current, set bit 31 of trans1 */
- if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv) &&
+ if (has_iboost(dev_priv) &&
intel_bios_encoder_hdmi_boost_level(encoder->devdata))
iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
/* Entry 9 is for HDMI: */
intel_de_write(dev_priv, DDI_BUF_TRANS_LO(port, 9),
- ddi_translations->entries[level].hsw.trans1 | iboost_bit);
+ trans->entries[level].hsw.trans1 | iboost_bit);
intel_de_write(dev_priv, DDI_BUF_TRANS_HI(port, 9),
- ddi_translations->entries[level].hsw.trans2);
+ trans->entries[level].hsw.trans2);
}
void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
@@ -281,13 +283,14 @@ static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder,
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
enum phy phy = intel_port_to_phy(i915, encoder->port);
+ /* DDI_BUF_CTL_ENABLE will be set by intel_ddi_prepare_link_retrain() later */
intel_dp->DP = dig_port->saved_port_bits |
- DDI_BUF_CTL_ENABLE | DDI_BUF_TRANS_SELECT(0);
- intel_dp->DP |= DDI_PORT_WIDTH(crtc_state->lane_count);
+ DDI_PORT_WIDTH(crtc_state->lane_count) |
+ DDI_BUF_TRANS_SELECT(0);
if (IS_ALDERLAKE_P(i915) && intel_phy_is_tc(i915, phy)) {
intel_dp->DP |= ddi_buf_phy_link_rate(crtc_state->port_clock);
- if (dig_port->tc_mode != TC_PORT_TBT_ALT)
+ if (!intel_tc_port_in_tbt_alt_mode(dig_port))
intel_dp->DP |= DDI_BUF_CTL_TC_PHY_OWNERSHIP;
}
}
@@ -407,6 +410,20 @@ static u32 bdw_trans_port_sync_master_select(enum transcoder master_transcoder)
return master_transcoder + 1;
}
+static void
+intel_ddi_config_transcoder_dp2(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ u32 val = 0;
+
+ if (intel_dp_is_uhbr(crtc_state))
+ val = TRANS_DP2_128B132B_CHANNEL_CODING;
+
+ intel_de_write(i915, TRANS_DP2_CTL(cpu_transcoder), val);
+}
+
/*
* Returns the TRANS_DDI_FUNC_CTL value based on CRTC state.
*
@@ -488,10 +505,13 @@ intel_ddi_transcoder_func_reg_val_get(struct intel_encoder *encoder,
if (crtc_state->hdmi_high_tmds_clock_ratio)
temp |= TRANS_DDI_HIGH_TMDS_CHAR_RATE;
} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
- temp |= TRANS_DDI_MODE_SELECT_FDI;
+ temp |= TRANS_DDI_MODE_SELECT_FDI_OR_128B132B;
temp |= (crtc_state->fdi_lanes - 1) << 1;
} else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) {
- temp |= TRANS_DDI_MODE_SELECT_DP_MST;
+ if (intel_dp_is_uhbr(crtc_state))
+ temp |= TRANS_DDI_MODE_SELECT_FDI_OR_128B132B;
+ else
+ temp |= TRANS_DDI_MODE_SELECT_DP_MST;
temp |= DDI_PORT_WIDTH(crtc_state->lane_count);
if (DISPLAY_VER(dev_priv) >= 12) {
@@ -678,8 +698,13 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
ret = false;
break;
- case TRANS_DDI_MODE_SELECT_FDI:
- ret = type == DRM_MODE_CONNECTOR_VGA;
+ case TRANS_DDI_MODE_SELECT_FDI_OR_128B132B:
+ if (HAS_DP20(dev_priv))
+ /* 128b/132b */
+ ret = false;
+ else
+ /* FDI */
+ ret = type == DRM_MODE_CONNECTOR_VGA;
break;
default:
@@ -766,8 +791,9 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
if ((tmp & port_mask) != ddi_select)
continue;
- if ((tmp & TRANS_DDI_MODE_SELECT_MASK) ==
- TRANS_DDI_MODE_SELECT_DP_MST)
+ if ((tmp & TRANS_DDI_MODE_SELECT_MASK) == TRANS_DDI_MODE_SELECT_DP_MST ||
+ (HAS_DP20(dev_priv) &&
+ (tmp & TRANS_DDI_MODE_SELECT_MASK) == TRANS_DDI_MODE_SELECT_FDI_OR_128B132B))
mst_pipe_mask |= BIT(p);
*pipe_mask |= BIT(p);
@@ -861,8 +887,7 @@ static void intel_ddi_get_power_domains(struct intel_encoder *encoder,
dig_port = enc_to_dig_port(encoder);
- if (!intel_phy_is_tc(dev_priv, phy) ||
- dig_port->tc_mode != TC_PORT_TBT_ALT) {
+ if (!intel_tc_port_in_tbt_alt_mode(dig_port)) {
drm_WARN_ON(&dev_priv->drm, dig_port->ddi_io_wakeref);
dig_port->ddi_io_wakeref = intel_display_power_get(dev_priv,
dig_port->ddi_io_power_domain);
@@ -947,16 +972,14 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder,
iboost = intel_bios_encoder_dp_boost_level(encoder->devdata);
if (iboost == 0) {
- const struct intel_ddi_buf_trans *ddi_translations;
+ const struct intel_ddi_buf_trans *trans;
int n_entries;
- ddi_translations = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
- if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
+ trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans))
return;
- if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
- level = n_entries - 1;
- iboost = ddi_translations->entries[level].hsw.i_boost;
+ iboost = trans->entries[level].hsw.i_boost;
}
/* Make sure that the requested I_boost is valid */
@@ -971,28 +994,6 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder,
_skl_ddi_set_iboost(dev_priv, PORT_E, iboost);
}
-static void bxt_ddi_vswing_sequence(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int level)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- const struct intel_ddi_buf_trans *ddi_translations;
- enum port port = encoder->port;
- int n_entries;
-
- ddi_translations = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
- if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
- return;
- if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
- level = n_entries - 1;
-
- bxt_ddi_phy_set_signal_level(dev_priv, port,
- ddi_translations->entries[level].bxt.margin,
- ddi_translations->entries[level].bxt.scale,
- ddi_translations->entries[level].bxt.enable,
- ddi_translations->entries[level].bxt.deemphasis);
-}
-
static u8 intel_ddi_dp_voltage_max(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
@@ -1022,33 +1023,43 @@ static u8 intel_ddi_dp_preemph_max(struct intel_dp *intel_dp)
return DP_TRAIN_PRE_EMPH_LEVEL_3;
}
+static u32 icl_combo_phy_loadgen_select(const struct intel_crtc_state *crtc_state,
+ int lane)
+{
+ if (crtc_state->port_clock > 600000)
+ return 0;
+
+ if (crtc_state->lane_count == 4)
+ return lane >= 1 ? LOADGEN_SELECT : 0;
+ else
+ return lane == 1 || lane == 2 ? LOADGEN_SELECT : 0;
+}
+
static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int level)
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- const struct intel_ddi_buf_trans *ddi_translations;
+ int level = intel_ddi_level(encoder, crtc_state, 0);
+ const struct intel_ddi_buf_trans *trans;
enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
int n_entries, ln;
u32 val;
- ddi_translations = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
- if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
+ trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans))
return;
- if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
- level = n_entries - 1;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
val = EDP4K2K_MODE_OVRD_EN | EDP4K2K_MODE_OVRD_OPTIMIZED;
- intel_dp->hobl_active = is_hobl_buf_trans(ddi_translations);
+ intel_dp->hobl_active = is_hobl_buf_trans(trans);
intel_de_rmw(dev_priv, ICL_PORT_CL_DW10(phy), val,
intel_dp->hobl_active ? val : 0);
}
/* Set PORT_TX_DW5 */
- val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN0(phy));
+ val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy));
val &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK |
TAP2_DISABLE | TAP3_DISABLE);
val |= SCALING_MODE_SEL(0x2);
@@ -1057,52 +1068,48 @@ static void icl_ddi_combo_vswing_program(struct intel_encoder *encoder,
intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), val);
/* Program PORT_TX_DW2 */
- val = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN0(phy));
+ val = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN(0, phy));
val &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
RCOMP_SCALAR_MASK);
- val |= SWING_SEL_UPPER(ddi_translations->entries[level].icl.dw2_swing_sel);
- val |= SWING_SEL_LOWER(ddi_translations->entries[level].icl.dw2_swing_sel);
+ val |= SWING_SEL_UPPER(trans->entries[level].icl.dw2_swing_sel);
+ val |= SWING_SEL_LOWER(trans->entries[level].icl.dw2_swing_sel);
/* Program Rcomp scalar for every table entry */
val |= RCOMP_SCALAR(0x98);
intel_de_write(dev_priv, ICL_PORT_TX_DW2_GRP(phy), val);
/* Program PORT_TX_DW4 */
/* We cannot write to GRP. It would overwrite individual loadgen. */
- for (ln = 0; ln <= 3; ln++) {
+ for (ln = 0; ln < 4; ln++) {
val = intel_de_read(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy));
val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
CURSOR_COEFF_MASK);
- val |= POST_CURSOR_1(ddi_translations->entries[level].icl.dw4_post_cursor_1);
- val |= POST_CURSOR_2(ddi_translations->entries[level].icl.dw4_post_cursor_2);
- val |= CURSOR_COEFF(ddi_translations->entries[level].icl.dw4_cursor_coeff);
+ val |= POST_CURSOR_1(trans->entries[level].icl.dw4_post_cursor_1);
+ val |= POST_CURSOR_2(trans->entries[level].icl.dw4_post_cursor_2);
+ val |= CURSOR_COEFF(trans->entries[level].icl.dw4_cursor_coeff);
intel_de_write(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy), val);
}
/* Program PORT_TX_DW7 */
- val = intel_de_read(dev_priv, ICL_PORT_TX_DW7_LN0(phy));
+ val = intel_de_read(dev_priv, ICL_PORT_TX_DW7_LN(0, phy));
val &= ~N_SCALAR_MASK;
- val |= N_SCALAR(ddi_translations->entries[level].icl.dw7_n_scalar);
+ val |= N_SCALAR(trans->entries[level].icl.dw7_n_scalar);
intel_de_write(dev_priv, ICL_PORT_TX_DW7_GRP(phy), val);
}
-static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int level)
+static void icl_combo_phy_set_signal_levels(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
- int width, rate, ln;
u32 val;
-
- width = crtc_state->lane_count;
- rate = crtc_state->port_clock;
+ int ln;
/*
* 1. If port type is eDP or DP,
* set PORT_PCS_DW1 cmnkeeper_enable to 1b,
* else clear to 0b.
*/
- val = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN0(phy));
+ val = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN(0, phy));
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
val &= ~COMMON_KEEPER_EN;
else
@@ -1111,19 +1118,15 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
/* 2. Program loadgen select */
/*
- * Program PORT_TX_DW4_LN depending on Bit rate and used lanes
+ * Program PORT_TX_DW4 depending on Bit rate and used lanes
* <= 6 GHz and 4 lanes (LN0=0, LN1=1, LN2=1, LN3=1)
* <= 6 GHz and 1,2 lanes (LN0=0, LN1=1, LN2=1, LN3=0)
* > 6 GHz (LN0=0, LN1=0, LN2=0, LN3=0)
*/
- for (ln = 0; ln <= 3; ln++) {
+ for (ln = 0; ln < 4; ln++) {
val = intel_de_read(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy));
val &= ~LOADGEN_SELECT;
-
- if ((rate <= 600000 && width == 4 && ln >= 1) ||
- (rate <= 600000 && width < 4 && (ln == 1 || ln == 2))) {
- val |= LOADGEN_SELECT;
- }
+ val |= icl_combo_phy_loadgen_select(crtc_state, ln);
intel_de_write(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy), val);
}
@@ -1133,37 +1136,35 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
intel_de_write(dev_priv, ICL_PORT_CL_DW5(phy), val);
/* 4. Clear training enable to change swing values */
- val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN0(phy));
+ val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy));
val &= ~TX_TRAINING_EN;
intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), val);
/* 5. Program swing and de-emphasis */
- icl_ddi_combo_vswing_program(encoder, crtc_state, level);
+ icl_ddi_combo_vswing_program(encoder, crtc_state);
/* 6. Set training enable to trigger update */
- val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN0(phy));
+ val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN(0, phy));
val |= TX_TRAINING_EN;
intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), val);
}
-static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int level)
+static void icl_mg_phy_set_signal_levels(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum tc_port tc_port = intel_port_to_tc(dev_priv, encoder->port);
- const struct intel_ddi_buf_trans *ddi_translations;
+ int level = intel_ddi_level(encoder, crtc_state, 0);
+ const struct intel_ddi_buf_trans *trans;
int n_entries, ln;
u32 val;
- if (enc_to_dig_port(encoder)->tc_mode == TC_PORT_TBT_ALT)
+ if (intel_tc_port_in_tbt_alt_mode(enc_to_dig_port(encoder)))
return;
- ddi_translations = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
- if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
+ trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans))
return;
- if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
- level = n_entries - 1;
/* Set MG_TX_LINK_PARAMS cri_use_fs32 to 0. */
for (ln = 0; ln < 2; ln++) {
@@ -1181,13 +1182,13 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
val = intel_de_read(dev_priv, MG_TX1_SWINGCTRL(ln, tc_port));
val &= ~CRI_TXDEEMPH_OVERRIDE_17_12_MASK;
val |= CRI_TXDEEMPH_OVERRIDE_17_12(
- ddi_translations->entries[level].mg.cri_txdeemph_override_17_12);
+ trans->entries[level].mg.cri_txdeemph_override_17_12);
intel_de_write(dev_priv, MG_TX1_SWINGCTRL(ln, tc_port), val);
val = intel_de_read(dev_priv, MG_TX2_SWINGCTRL(ln, tc_port));
val &= ~CRI_TXDEEMPH_OVERRIDE_17_12_MASK;
val |= CRI_TXDEEMPH_OVERRIDE_17_12(
- ddi_translations->entries[level].mg.cri_txdeemph_override_17_12);
+ trans->entries[level].mg.cri_txdeemph_override_17_12);
intel_de_write(dev_priv, MG_TX2_SWINGCTRL(ln, tc_port), val);
}
@@ -1197,9 +1198,9 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
val &= ~(CRI_TXDEEMPH_OVERRIDE_11_6_MASK |
CRI_TXDEEMPH_OVERRIDE_5_0_MASK);
val |= CRI_TXDEEMPH_OVERRIDE_5_0(
- ddi_translations->entries[level].mg.cri_txdeemph_override_5_0) |
+ trans->entries[level].mg.cri_txdeemph_override_5_0) |
CRI_TXDEEMPH_OVERRIDE_11_6(
- ddi_translations->entries[level].mg.cri_txdeemph_override_11_6) |
+ trans->entries[level].mg.cri_txdeemph_override_11_6) |
CRI_TXDEEMPH_OVERRIDE_EN;
intel_de_write(dev_priv, MG_TX1_DRVCTRL(ln, tc_port), val);
@@ -1207,9 +1208,9 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
val &= ~(CRI_TXDEEMPH_OVERRIDE_11_6_MASK |
CRI_TXDEEMPH_OVERRIDE_5_0_MASK);
val |= CRI_TXDEEMPH_OVERRIDE_5_0(
- ddi_translations->entries[level].mg.cri_txdeemph_override_5_0) |
+ trans->entries[level].mg.cri_txdeemph_override_5_0) |
CRI_TXDEEMPH_OVERRIDE_11_6(
- ddi_translations->entries[level].mg.cri_txdeemph_override_11_6) |
+ trans->entries[level].mg.cri_txdeemph_override_11_6) |
CRI_TXDEEMPH_OVERRIDE_EN;
intel_de_write(dev_priv, MG_TX2_DRVCTRL(ln, tc_port), val);
@@ -1269,45 +1270,29 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
}
}
-static void icl_ddi_vswing_sequence(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int level)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
-
- if (intel_phy_is_combo(dev_priv, phy))
- icl_combo_phy_ddi_vswing_sequence(encoder, crtc_state, level);
- else
- icl_mg_phy_ddi_vswing_sequence(encoder, crtc_state, level);
-}
-
-static void
-tgl_dkl_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int level)
+static void tgl_dkl_phy_set_signal_levels(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum tc_port tc_port = intel_port_to_tc(dev_priv, encoder->port);
- const struct intel_ddi_buf_trans *ddi_translations;
+ int level = intel_ddi_level(encoder, crtc_state, 0);
+ const struct intel_ddi_buf_trans *trans;
u32 val, dpcnt_mask, dpcnt_val;
int n_entries, ln;
- if (enc_to_dig_port(encoder)->tc_mode == TC_PORT_TBT_ALT)
+ if (intel_tc_port_in_tbt_alt_mode(enc_to_dig_port(encoder)))
return;
- ddi_translations = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
- if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
+ trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans))
return;
- if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
- level = n_entries - 1;
dpcnt_mask = (DKL_TX_PRESHOOT_COEFF_MASK |
DKL_TX_DE_EMPAHSIS_COEFF_MASK |
DKL_TX_VSWING_CONTROL_MASK);
- dpcnt_val = DKL_TX_VSWING_CONTROL(ddi_translations->entries[level].dkl.dkl_vswing_control);
- dpcnt_val |= DKL_TX_DE_EMPHASIS_COEFF(ddi_translations->entries[level].dkl.dkl_de_emphasis_control);
- dpcnt_val |= DKL_TX_PRESHOOT_COEFF(ddi_translations->entries[level].dkl.dkl_preshoot_control);
+ dpcnt_val = DKL_TX_VSWING_CONTROL(trans->entries[level].dkl.vswing);
+ dpcnt_val |= DKL_TX_DE_EMPHASIS_COEFF(trans->entries[level].dkl.de_emphasis);
+ dpcnt_val |= DKL_TX_PRESHOOT_COEFF(trans->entries[level].dkl.preshoot);
for (ln = 0; ln < 2; ln++) {
intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
@@ -1329,30 +1314,9 @@ tgl_dkl_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
val = intel_de_read(dev_priv, DKL_TX_DPCNTL2(tc_port));
val &= ~DKL_TX_DP20BITMODE;
intel_de_write(dev_priv, DKL_TX_DPCNTL2(tc_port), val);
-
- if ((intel_crtc_has_dp_encoder(crtc_state) &&
- crtc_state->port_clock == 162000) ||
- (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) &&
- crtc_state->port_clock == 594000))
- val |= DKL_TX_LOADGEN_SHARING_PMD_DISABLE;
- else
- val &= ~DKL_TX_LOADGEN_SHARING_PMD_DISABLE;
}
}
-static void tgl_ddi_vswing_sequence(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int level)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
-
- if (intel_phy_is_combo(dev_priv, phy))
- icl_combo_phy_ddi_vswing_sequence(encoder, crtc_state, level);
- else
- tgl_dkl_phy_ddi_vswing_sequence(encoder, crtc_state, level);
-}
-
static int translate_signal_level(struct intel_dp *intel_dp,
u8 signal_levels)
{
@@ -1371,65 +1335,63 @@ static int translate_signal_level(struct intel_dp *intel_dp,
return 0;
}
-static int intel_ddi_dp_level(struct intel_dp *intel_dp)
+static int intel_ddi_dp_level(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ int lane)
{
- u8 train_set = intel_dp->train_set[0];
- u8 signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
- DP_TRAIN_PRE_EMPHASIS_MASK);
-
- return translate_signal_level(intel_dp, signal_levels);
-}
+ u8 train_set = intel_dp->train_set[lane];
-static void
-dg2_set_signal_levels(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
-{
- struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
- int level = intel_ddi_dp_level(intel_dp);
+ if (intel_dp_is_uhbr(crtc_state)) {
+ return train_set & DP_TX_FFE_PRESET_VALUE_MASK;
+ } else {
+ u8 signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
+ DP_TRAIN_PRE_EMPHASIS_MASK);
- intel_snps_phy_ddi_vswing_sequence(encoder, level);
+ return translate_signal_level(intel_dp, signal_levels);
+ }
}
-static void
-tgl_set_signal_levels(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
+int intel_ddi_level(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int lane)
{
- struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
- int level = intel_ddi_dp_level(intel_dp);
-
- tgl_ddi_vswing_sequence(encoder, crtc_state, level);
-}
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ const struct intel_ddi_buf_trans *trans;
+ int level, n_entries;
-static void
-icl_set_signal_levels(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
-{
- struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
- int level = intel_ddi_dp_level(intel_dp);
+ trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
+ if (drm_WARN_ON_ONCE(&i915->drm, !trans))
+ return 0;
- icl_ddi_vswing_sequence(encoder, crtc_state, level);
-}
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ level = intel_ddi_hdmi_level(encoder, trans);
+ else
+ level = intel_ddi_dp_level(enc_to_intel_dp(encoder), crtc_state,
+ lane);
-static void
-bxt_set_signal_levels(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
-{
- struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
- int level = intel_ddi_dp_level(intel_dp);
+ if (drm_WARN_ON_ONCE(&i915->drm, level >= n_entries))
+ level = n_entries - 1;
- bxt_ddi_vswing_sequence(encoder, crtc_state, level);
+ return level;
}
static void
-hsw_set_signal_levels(struct intel_dp *intel_dp,
+hsw_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- int level = intel_ddi_dp_level(intel_dp);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ int level = intel_ddi_level(encoder, crtc_state, 0);
enum port port = encoder->port;
u32 signal_levels;
+ if (has_iboost(dev_priv))
+ skl_ddi_set_iboost(encoder, crtc_state, level);
+
+ /* HDMI ignores the rest */
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ return;
+
signal_levels = DDI_BUF_TRANS_SELECT(level);
drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
@@ -1438,9 +1400,6 @@ hsw_set_signal_levels(struct intel_dp *intel_dp,
intel_dp->DP &= ~DDI_BUF_EMP_MASK;
intel_dp->DP |= signal_levels;
- if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv))
- skl_ddi_set_iboost(encoder, crtc_state, level);
-
intel_de_write(dev_priv, DDI_BUF_CTL(port), intel_dp->DP);
intel_de_posting_read(dev_priv, DDI_BUF_CTL(port));
}
@@ -2059,7 +2018,7 @@ icl_program_mg_dp_mode(struct intel_digital_port *dig_port,
u8 width;
if (!intel_phy_is_tc(dev_priv, phy) ||
- dig_port->tc_mode == TC_PORT_TBT_ALT)
+ intel_tc_port_in_tbt_alt_mode(dig_port))
return;
if (DISPLAY_VER(dev_priv) >= 12) {
@@ -2084,7 +2043,7 @@ icl_program_mg_dp_mode(struct intel_digital_port *dig_port,
switch (pin_assignment) {
case 0x0:
drm_WARN_ON(&dev_priv->drm,
- dig_port->tc_mode != TC_PORT_LEGACY);
+ !intel_tc_port_in_legacy_mode(dig_port));
if (width == 1) {
ln1 |= MG_DP_MODE_CFG_DP_X1_MODE;
} else {
@@ -2329,15 +2288,19 @@ static void dg2_ddi_pre_enable_dp(struct intel_atomic_state *state,
{
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
- int level = intel_ddi_dp_level(intel_dp);
intel_dp_set_link_params(intel_dp, crtc_state->port_clock,
crtc_state->lane_count);
/*
+ * We only configure what the register value will be here. Actual
+ * enabling happens during link training farther down.
+ */
+ intel_ddi_init_dp_buf_reg(encoder, crtc_state);
+
+ /*
* 1. Enable Power Wells
*
* This was handled at the beginning of intel_atomic_commit_tail(),
@@ -2353,8 +2316,7 @@ static void dg2_ddi_pre_enable_dp(struct intel_atomic_state *state,
intel_ddi_enable_clock(encoder, crtc_state);
/* 4. Enable IO power */
- if (!intel_phy_is_tc(dev_priv, phy) ||
- dig_port->tc_mode != TC_PORT_TBT_ALT)
+ if (!intel_tc_port_in_tbt_alt_mode(dig_port))
dig_port->ddi_io_wakeref = intel_display_power_get(dev_priv,
dig_port->ddi_io_power_domain);
@@ -2374,7 +2336,8 @@ static void dg2_ddi_pre_enable_dp(struct intel_atomic_state *state,
*/
intel_ddi_enable_pipe_clock(encoder, crtc_state);
- /* 5.b Not relevant to i915 for now */
+ /* 5.b Configure transcoder for DP 2.0 128b/132b */
+ intel_ddi_config_transcoder_dp2(encoder, crtc_state);
/*
* 5.c Configure TRANS_DDI_FUNC_CTL DDI Select, DDI Mode Select & MST
@@ -2391,21 +2354,12 @@ static void dg2_ddi_pre_enable_dp(struct intel_atomic_state *state,
*/
/* 5.e Configure voltage swing and related IO settings */
- intel_snps_phy_ddi_vswing_sequence(encoder, level);
-
- /*
- * 5.f Configure and enable DDI_BUF_CTL
- * 5.g Wait for DDI_BUF_CTL DDI Idle Status = 0b (Not Idle), timeout
- * after 1200 us.
- *
- * We only configure what the register value will be here. Actual
- * enabling happens during link training farther down.
- */
- intel_ddi_init_dp_buf_reg(encoder, crtc_state);
+ encoder->set_signal_levels(encoder, crtc_state);
if (!is_mst)
intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
+ intel_dp_configure_protocol_converter(intel_dp, crtc_state);
intel_dp_sink_set_decompression_state(intel_dp, crtc_state, true);
/*
* DDI FEC: "anticipates enabling FEC encoding sets the FEC_READY bit
@@ -2413,6 +2367,8 @@ static void dg2_ddi_pre_enable_dp(struct intel_atomic_state *state,
* training
*/
intel_dp_sink_set_fec_ready(intel_dp, crtc_state);
+ intel_dp_check_frl_training(intel_dp);
+ intel_dp_pcon_dsc_configure(intel_dp, crtc_state);
/*
* 5.h Follow DisplayPort specification training sequence (see notes for
@@ -2439,16 +2395,20 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
{
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
- int level = intel_ddi_dp_level(intel_dp);
intel_dp_set_link_params(intel_dp,
crtc_state->port_clock,
crtc_state->lane_count);
/*
+ * We only configure what the register value will be here. Actual
+ * enabling happens during link training farther down.
+ */
+ intel_ddi_init_dp_buf_reg(encoder, crtc_state);
+
+ /*
* 1. Enable Power Wells
*
* This was handled at the beginning of intel_atomic_commit_tail(),
@@ -2476,8 +2436,7 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
intel_ddi_enable_clock(encoder, crtc_state);
/* 5. If IO power is controlled through PWR_WELL_CTL, Enable IO Power */
- if (!intel_phy_is_tc(dev_priv, phy) ||
- dig_port->tc_mode != TC_PORT_TBT_ALT) {
+ if (!intel_tc_port_in_tbt_alt_mode(dig_port)) {
drm_WARN_ON(&dev_priv->drm, dig_port->ddi_io_wakeref);
dig_port->ddi_io_wakeref = intel_display_power_get(dev_priv,
dig_port->ddi_io_power_domain);
@@ -2517,7 +2476,7 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
*/
/* 7.e Configure voltage swing and related IO settings */
- tgl_ddi_vswing_sequence(encoder, crtc_state, level);
+ encoder->set_signal_levels(encoder, crtc_state);
/*
* 7.f Combo PHY: Configure PORT_CL_DW10 Static Power Down to power up
@@ -2530,16 +2489,6 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state,
*/
intel_ddi_mso_configure(crtc_state);
- /*
- * 7.g Configure and enable DDI_BUF_CTL
- * 7.h Wait for DDI_BUF_CTL DDI Idle Status = 0b (Not Idle), timeout
- * after 500 us.
- *
- * We only configure what the register value will be here. Actual
- * enabling happens during link training farther down.
- */
- intel_ddi_init_dp_buf_reg(encoder, crtc_state);
-
if (!is_mst)
intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
@@ -2582,10 +2531,8 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum port port = encoder->port;
- enum phy phy = intel_port_to_phy(dev_priv, port);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST);
- int level = intel_ddi_dp_level(intel_dp);
if (DISPLAY_VER(dev_priv) < 11)
drm_WARN_ON(&dev_priv->drm,
@@ -2597,12 +2544,17 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
crtc_state->port_clock,
crtc_state->lane_count);
+ /*
+ * We only configure what the register value will be here. Actual
+ * enabling happens during link training farther down.
+ */
+ intel_ddi_init_dp_buf_reg(encoder, crtc_state);
+
intel_pps_on(intel_dp);
intel_ddi_enable_clock(encoder, crtc_state);
- if (!intel_phy_is_tc(dev_priv, phy) ||
- dig_port->tc_mode != TC_PORT_TBT_ALT) {
+ if (!intel_tc_port_in_tbt_alt_mode(dig_port)) {
drm_WARN_ON(&dev_priv->drm, dig_port->ddi_io_wakeref);
dig_port->ddi_io_wakeref = intel_display_power_get(dev_priv,
dig_port->ddi_io_power_domain);
@@ -2610,16 +2562,13 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state,
icl_program_mg_dp_mode(dig_port, crtc_state);
- if (DISPLAY_VER(dev_priv) >= 11)
- icl_ddi_vswing_sequence(encoder, crtc_state, level);
- else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
- bxt_ddi_vswing_sequence(encoder, crtc_state, level);
- else
+ if (has_buf_trans_select(dev_priv))
hsw_prepare_dp_ddi_buffers(encoder, crtc_state);
+ encoder->set_signal_levels(encoder, crtc_state);
+
intel_ddi_power_up_lanes(encoder, crtc_state);
- intel_ddi_init_dp_buf_reg(encoder, crtc_state);
if (!is_mst)
intel_dp_set_power(intel_dp, DP_SET_POWER_D0);
intel_dp_configure_protocol_converter(intel_dp, crtc_state);
@@ -2772,7 +2721,6 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
struct intel_dp *intel_dp = &dig_port->dp;
bool is_mst = intel_crtc_has_type(old_crtc_state,
INTEL_OUTPUT_DP_MST);
- enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
if (!is_mst)
intel_dp_set_infoframes(encoder, false,
@@ -2815,8 +2763,7 @@ static void intel_ddi_post_disable_dp(struct intel_atomic_state *state,
intel_pps_vdd_on(intel_dp);
intel_pps_off(intel_dp);
- if (!intel_phy_is_tc(dev_priv, phy) ||
- dig_port->tc_mode != TC_PORT_TBT_ALT)
+ if (!intel_tc_port_in_tbt_alt_mode(dig_port))
intel_display_power_put(dev_priv,
dig_port->ddi_io_power_domain,
fetch_and_zero(&dig_port->ddi_io_wakeref));
@@ -2862,7 +2809,7 @@ static void intel_ddi_post_disable(struct intel_atomic_state *state,
if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST)) {
intel_crtc_vblank_off(old_crtc_state);
- intel_disable_pipe(old_crtc_state);
+ intel_disable_transcoder(old_crtc_state);
intel_vrr_disable(old_crtc_state);
@@ -3005,12 +2952,11 @@ static void intel_enable_ddi_dp(struct intel_atomic_state *state,
intel_dp_stop_link_train(intel_dp, crtc_state);
intel_edp_backlight_on(crtc_state, conn_state);
- intel_psr_enable(intel_dp, crtc_state, conn_state);
if (!dig_port->lspcon.active || dig_port->dp.has_hdmi_sink)
intel_dp_set_infoframes(encoder, true, crtc_state, conn_state);
- intel_edp_drrs_enable(intel_dp, crtc_state);
+ intel_drrs_enable(intel_dp, crtc_state);
if (crtc_state->has_audio)
intel_audio_codec_enable(encoder, crtc_state, conn_state);
@@ -3046,7 +2992,6 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct drm_connector *connector = conn_state->connector;
- int level = intel_ddi_hdmi_level(encoder, crtc_state);
enum port port = encoder->port;
if (!intel_hdmi_handle_sink_scrambling(encoder, connector,
@@ -3056,19 +3001,10 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state,
"[CONNECTOR:%d:%s] Failed to configure sink scrambling/TMDS bit clock ratio\n",
connector->base.id, connector->name);
- if (IS_DG2(dev_priv))
- intel_snps_phy_ddi_vswing_sequence(encoder, U32_MAX);
- else if (DISPLAY_VER(dev_priv) >= 12)
- tgl_ddi_vswing_sequence(encoder, crtc_state, level);
- else if (DISPLAY_VER(dev_priv) == 11)
- icl_ddi_vswing_sequence(encoder, crtc_state, level);
- else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
- bxt_ddi_vswing_sequence(encoder, crtc_state, level);
- else
- hsw_prepare_hdmi_ddi_buffers(encoder, crtc_state, level);
+ if (has_buf_trans_select(dev_priv))
+ hsw_prepare_hdmi_ddi_buffers(encoder, crtc_state);
- if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv))
- skl_ddi_set_iboost(encoder, crtc_state, level);
+ encoder->set_signal_levels(encoder, crtc_state);
/* Display WA #1143: skl,kbl,cfl */
if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) {
@@ -3133,7 +3069,7 @@ static void intel_enable_ddi(struct intel_atomic_state *state,
intel_vrr_enable(encoder, crtc_state);
- intel_enable_pipe(crtc_state);
+ intel_enable_transcoder(crtc_state);
intel_crtc_vblank_on(crtc_state);
@@ -3198,7 +3134,7 @@ static void intel_pre_disable_ddi(struct intel_atomic_state *state,
return;
intel_dp = enc_to_intel_dp(encoder);
- intel_edp_drrs_disable(intel_dp, old_crtc_state);
+ intel_drrs_disable(intel_dp, old_crtc_state);
intel_psr_disable(intel_dp, old_crtc_state);
}
@@ -3226,11 +3162,10 @@ static void intel_ddi_update_pipe_dp(struct intel_atomic_state *state,
intel_ddi_set_dp_msa(crtc_state, conn_state);
- intel_psr_update(intel_dp, crtc_state, conn_state);
intel_dp_set_infoframes(encoder, true, crtc_state, conn_state);
- intel_edp_drrs_update(intel_dp, crtc_state);
+ intel_drrs_update(intel_dp, crtc_state);
- intel_panel_update_backlight(state, encoder, crtc_state, conn_state);
+ intel_backlight_update(state, encoder, crtc_state, conn_state);
}
void intel_ddi_update_pipe(struct intel_atomic_state *state,
@@ -3293,7 +3228,7 @@ intel_ddi_pre_pll_enable(struct intel_atomic_state *state,
intel_ddi_main_link_aux_domain(dig_port));
}
- if (is_tc_port && dig_port->tc_mode != TC_PORT_TBT_ALT)
+ if (is_tc_port && !intel_tc_port_in_tbt_alt_mode(dig_port))
/*
* Program the lane count for static/dynamic connections on
* Type-C ports. Skip this step for TBT.
@@ -3553,9 +3488,6 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder,
pipe_config->output_types |= BIT(INTEL_OUTPUT_HDMI);
pipe_config->lane_count = 4;
break;
- case TRANS_DDI_MODE_SELECT_FDI:
- pipe_config->output_types |= BIT(INTEL_OUTPUT_ANALOG);
- break;
case TRANS_DDI_MODE_SELECT_DP_SST:
if (encoder->type == INTEL_OUTPUT_EDP)
pipe_config->output_types |= BIT(INTEL_OUTPUT_EDP);
@@ -3584,6 +3516,13 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder,
pipe_config->infoframes.enable |=
intel_hdmi_infoframes_enabled(encoder, pipe_config);
break;
+ case TRANS_DDI_MODE_SELECT_FDI_OR_128B132B:
+ if (!HAS_DP20(dev_priv)) {
+ /* FDI */
+ pipe_config->output_types |= BIT(INTEL_OUTPUT_ANALOG);
+ break;
+ }
+ fallthrough; /* 128b/132b */
case TRANS_DDI_MODE_SELECT_DP_MST:
pipe_config->output_types |= BIT(INTEL_OUTPUT_DP_MST);
pipe_config->lane_count =
@@ -3995,13 +3934,15 @@ static void intel_ddi_encoder_destroy(struct drm_encoder *encoder)
{
struct drm_i915_private *i915 = to_i915(encoder->dev);
struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder));
+ enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
intel_dp_encoder_flush_work(encoder);
+ if (intel_phy_is_tc(i915, phy))
+ intel_tc_port_flush_work(dig_port);
intel_display_power_flush_work(i915);
drm_encoder_cleanup(encoder);
- if (dig_port)
- kfree(dig_port->hdcp_port_data.streams);
+ kfree(dig_port->hdcp_port_data.streams);
kfree(dig_port);
}
@@ -4022,7 +3963,6 @@ static const struct drm_encoder_funcs intel_ddi_funcs = {
static struct intel_connector *
intel_ddi_init_dp_connector(struct intel_digital_port *dig_port)
{
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
struct intel_connector *connector;
enum port port = dig_port->base.port;
@@ -4035,17 +3975,6 @@ intel_ddi_init_dp_connector(struct intel_digital_port *dig_port)
dig_port->dp.set_link_train = intel_ddi_set_link_train;
dig_port->dp.set_idle_link_train = intel_ddi_set_idle_link_train;
- if (IS_DG2(dev_priv))
- dig_port->dp.set_signal_levels = dg2_set_signal_levels;
- else if (DISPLAY_VER(dev_priv) >= 12)
- dig_port->dp.set_signal_levels = tgl_set_signal_levels;
- else if (DISPLAY_VER(dev_priv) >= 11)
- dig_port->dp.set_signal_levels = icl_set_signal_levels;
- else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
- dig_port->dp.set_signal_levels = bxt_set_signal_levels;
- else
- dig_port->dp.set_signal_levels = hsw_set_signal_levels;
-
dig_port->dp.voltage_max = intel_ddi_dp_voltage_max;
dig_port->dp.preemph_max = intel_ddi_dp_preemph_max;
@@ -4421,7 +4350,7 @@ static void intel_ddi_encoder_suspend(struct intel_encoder *encoder)
if (!intel_phy_is_tc(i915, phy))
return;
- intel_tc_port_disconnect_phy(dig_port);
+ intel_tc_port_flush_work(dig_port);
}
static void intel_ddi_encoder_shutdown(struct intel_encoder *encoder)
@@ -4436,7 +4365,7 @@ static void intel_ddi_encoder_shutdown(struct intel_encoder *encoder)
if (!intel_phy_is_tc(i915, phy))
return;
- intel_tc_port_disconnect_phy(dig_port);
+ intel_tc_port_flush_work(dig_port);
}
#define port_tc_name(port) ((port) - PORT_TC1 + '1')
@@ -4617,6 +4546,24 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
encoder->get_config = hsw_ddi_get_config;
}
+ if (IS_DG2(dev_priv)) {
+ encoder->set_signal_levels = intel_snps_phy_set_signal_levels;
+ } else if (DISPLAY_VER(dev_priv) >= 12) {
+ if (intel_phy_is_combo(dev_priv, phy))
+ encoder->set_signal_levels = icl_combo_phy_set_signal_levels;
+ else
+ encoder->set_signal_levels = tgl_dkl_phy_set_signal_levels;
+ } else if (DISPLAY_VER(dev_priv) >= 11) {
+ if (intel_phy_is_combo(dev_priv, phy))
+ encoder->set_signal_levels = icl_combo_phy_set_signal_levels;
+ else
+ encoder->set_signal_levels = icl_mg_phy_set_signal_levels;
+ } else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
+ encoder->set_signal_levels = bxt_ddi_phy_set_signal_levels;
+ } else {
+ encoder->set_signal_levels = hsw_set_signal_levels;
+ }
+
intel_ddi_buf_trans_init(encoder);
if (DISPLAY_VER(dev_priv) >= 13)
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h
index 7d448485d887..d6971717ef9c 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.h
+++ b/drivers/gpu/drm/i915/display/intel_ddi.h
@@ -59,13 +59,12 @@ void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state,
bool state);
void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
struct intel_crtc_state *crtc_state);
-u32 bxt_signal_levels(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state);
-u32 ddi_signal_levels(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state);
int intel_ddi_toggle_hdcp_bits(struct intel_encoder *intel_encoder,
enum transcoder cpu_transcoder,
bool enable, u32 hdcp_mask);
void intel_ddi_sanitize_encoder_pll_mapping(struct intel_encoder *encoder);
+int intel_ddi_level(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int lane);
#endif /* __INTEL_DDI_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
index ba2c08f1a797..78cd8f77b49d 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c
@@ -8,12 +8,13 @@
#include "intel_ddi_buf_trans.h"
#include "intel_de.h"
#include "intel_display_types.h"
+#include "intel_dp.h"
/* HDMI/DVI modes ignore everything but the last 2 items. So we share
* them for both DP and FDI transports, allowing those ports to
* automatically adapt to HDMI connections as well
*/
-static const union intel_ddi_buf_trans_entry _hsw_ddi_translations_dp[] = {
+static const union intel_ddi_buf_trans_entry _hsw_trans_dp[] = {
{ .hsw = { 0x00FFFFFF, 0x0006000E, 0x0 } },
{ .hsw = { 0x00D75FFF, 0x0005000A, 0x0 } },
{ .hsw = { 0x00C30FFF, 0x00040006, 0x0 } },
@@ -25,12 +26,12 @@ static const union intel_ddi_buf_trans_entry _hsw_ddi_translations_dp[] = {
{ .hsw = { 0x80D75FFF, 0x000B0000, 0x0 } },
};
-static const struct intel_ddi_buf_trans hsw_ddi_translations_dp = {
- .entries = _hsw_ddi_translations_dp,
- .num_entries = ARRAY_SIZE(_hsw_ddi_translations_dp),
+static const struct intel_ddi_buf_trans hsw_trans_dp = {
+ .entries = _hsw_trans_dp,
+ .num_entries = ARRAY_SIZE(_hsw_trans_dp),
};
-static const union intel_ddi_buf_trans_entry _hsw_ddi_translations_fdi[] = {
+static const union intel_ddi_buf_trans_entry _hsw_trans_fdi[] = {
{ .hsw = { 0x00FFFFFF, 0x0007000E, 0x0 } },
{ .hsw = { 0x00D75FFF, 0x000F000A, 0x0 } },
{ .hsw = { 0x00C30FFF, 0x00060006, 0x0 } },
@@ -42,12 +43,12 @@ static const union intel_ddi_buf_trans_entry _hsw_ddi_translations_fdi[] = {
{ .hsw = { 0x00D75FFF, 0x001E0000, 0x0 } },
};
-static const struct intel_ddi_buf_trans hsw_ddi_translations_fdi = {
- .entries = _hsw_ddi_translations_fdi,
- .num_entries = ARRAY_SIZE(_hsw_ddi_translations_fdi),
+static const struct intel_ddi_buf_trans hsw_trans_fdi = {
+ .entries = _hsw_trans_fdi,
+ .num_entries = ARRAY_SIZE(_hsw_trans_fdi),
};
-static const union intel_ddi_buf_trans_entry _hsw_ddi_translations_hdmi[] = {
+static const union intel_ddi_buf_trans_entry _hsw_trans_hdmi[] = {
/* Idx NT mV d T mV d db */
{ .hsw = { 0x00FFFFFF, 0x0006000E, 0x0 } }, /* 0: 400 400 0 */
{ .hsw = { 0x00E79FFF, 0x000E000C, 0x0 } }, /* 1: 400 500 2 */
@@ -63,13 +64,13 @@ static const union intel_ddi_buf_trans_entry _hsw_ddi_translations_hdmi[] = {
{ .hsw = { 0x80FFFFFF, 0x00030002, 0x0 } }, /* 11: 1000 1000 0 */
};
-static const struct intel_ddi_buf_trans hsw_ddi_translations_hdmi = {
- .entries = _hsw_ddi_translations_hdmi,
- .num_entries = ARRAY_SIZE(_hsw_ddi_translations_hdmi),
+static const struct intel_ddi_buf_trans hsw_trans_hdmi = {
+ .entries = _hsw_trans_hdmi,
+ .num_entries = ARRAY_SIZE(_hsw_trans_hdmi),
.hdmi_default_entry = 6,
};
-static const union intel_ddi_buf_trans_entry _bdw_ddi_translations_edp[] = {
+static const union intel_ddi_buf_trans_entry _bdw_trans_edp[] = {
{ .hsw = { 0x00FFFFFF, 0x00000012, 0x0 } },
{ .hsw = { 0x00EBAFFF, 0x00020011, 0x0 } },
{ .hsw = { 0x00C71FFF, 0x0006000F, 0x0 } },
@@ -81,12 +82,12 @@ static const union intel_ddi_buf_trans_entry _bdw_ddi_translations_edp[] = {
{ .hsw = { 0x00DB6FFF, 0x000A000C, 0x0 } },
};
-static const struct intel_ddi_buf_trans bdw_ddi_translations_edp = {
- .entries = _bdw_ddi_translations_edp,
- .num_entries = ARRAY_SIZE(_bdw_ddi_translations_edp),
+static const struct intel_ddi_buf_trans bdw_trans_edp = {
+ .entries = _bdw_trans_edp,
+ .num_entries = ARRAY_SIZE(_bdw_trans_edp),
};
-static const union intel_ddi_buf_trans_entry _bdw_ddi_translations_dp[] = {
+static const union intel_ddi_buf_trans_entry _bdw_trans_dp[] = {
{ .hsw = { 0x00FFFFFF, 0x0007000E, 0x0 } },
{ .hsw = { 0x00D75FFF, 0x000E000A, 0x0 } },
{ .hsw = { 0x00BEFFFF, 0x00140006, 0x0 } },
@@ -98,12 +99,12 @@ static const union intel_ddi_buf_trans_entry _bdw_ddi_translations_dp[] = {
{ .hsw = { 0x80D75FFF, 0x001B0002, 0x0 } },
};
-static const struct intel_ddi_buf_trans bdw_ddi_translations_dp = {
- .entries = _bdw_ddi_translations_dp,
- .num_entries = ARRAY_SIZE(_bdw_ddi_translations_dp),
+static const struct intel_ddi_buf_trans bdw_trans_dp = {
+ .entries = _bdw_trans_dp,
+ .num_entries = ARRAY_SIZE(_bdw_trans_dp),
};
-static const union intel_ddi_buf_trans_entry _bdw_ddi_translations_fdi[] = {
+static const union intel_ddi_buf_trans_entry _bdw_trans_fdi[] = {
{ .hsw = { 0x00FFFFFF, 0x0001000E, 0x0 } },
{ .hsw = { 0x00D75FFF, 0x0004000A, 0x0 } },
{ .hsw = { 0x00C30FFF, 0x00070006, 0x0 } },
@@ -115,12 +116,12 @@ static const union intel_ddi_buf_trans_entry _bdw_ddi_translations_fdi[] = {
{ .hsw = { 0x00D75FFF, 0x000C0000, 0x0 } },
};
-static const struct intel_ddi_buf_trans bdw_ddi_translations_fdi = {
- .entries = _bdw_ddi_translations_fdi,
- .num_entries = ARRAY_SIZE(_bdw_ddi_translations_fdi),
+static const struct intel_ddi_buf_trans bdw_trans_fdi = {
+ .entries = _bdw_trans_fdi,
+ .num_entries = ARRAY_SIZE(_bdw_trans_fdi),
};
-static const union intel_ddi_buf_trans_entry _bdw_ddi_translations_hdmi[] = {
+static const union intel_ddi_buf_trans_entry _bdw_trans_hdmi[] = {
/* Idx NT mV d T mV df db */
{ .hsw = { 0x00FFFFFF, 0x0007000E, 0x0 } }, /* 0: 400 400 0 */
{ .hsw = { 0x00D75FFF, 0x000E000A, 0x0 } }, /* 1: 400 600 3.5 */
@@ -134,14 +135,14 @@ static const union intel_ddi_buf_trans_entry _bdw_ddi_translations_hdmi[] = {
{ .hsw = { 0x80FFFFFF, 0x001B0002, 0x0 } }, /* 9: 1000 1000 0 */
};
-static const struct intel_ddi_buf_trans bdw_ddi_translations_hdmi = {
- .entries = _bdw_ddi_translations_hdmi,
- .num_entries = ARRAY_SIZE(_bdw_ddi_translations_hdmi),
+static const struct intel_ddi_buf_trans bdw_trans_hdmi = {
+ .entries = _bdw_trans_hdmi,
+ .num_entries = ARRAY_SIZE(_bdw_trans_hdmi),
.hdmi_default_entry = 7,
};
/* Skylake H and S */
-static const union intel_ddi_buf_trans_entry _skl_ddi_translations_dp[] = {
+static const union intel_ddi_buf_trans_entry _skl_trans_dp[] = {
{ .hsw = { 0x00002016, 0x000000A0, 0x0 } },
{ .hsw = { 0x00005012, 0x0000009B, 0x0 } },
{ .hsw = { 0x00007011, 0x00000088, 0x0 } },
@@ -153,13 +154,13 @@ static const union intel_ddi_buf_trans_entry _skl_ddi_translations_dp[] = {
{ .hsw = { 0x80005012, 0x000000C0, 0x1 } },
};
-static const struct intel_ddi_buf_trans skl_ddi_translations_dp = {
- .entries = _skl_ddi_translations_dp,
- .num_entries = ARRAY_SIZE(_skl_ddi_translations_dp),
+static const struct intel_ddi_buf_trans skl_trans_dp = {
+ .entries = _skl_trans_dp,
+ .num_entries = ARRAY_SIZE(_skl_trans_dp),
};
/* Skylake U */
-static const union intel_ddi_buf_trans_entry _skl_u_ddi_translations_dp[] = {
+static const union intel_ddi_buf_trans_entry _skl_u_trans_dp[] = {
{ .hsw = { 0x0000201B, 0x000000A2, 0x0 } },
{ .hsw = { 0x00005012, 0x00000088, 0x0 } },
{ .hsw = { 0x80007011, 0x000000CD, 0x1 } },
@@ -171,13 +172,13 @@ static const union intel_ddi_buf_trans_entry _skl_u_ddi_translations_dp[] = {
{ .hsw = { 0x80005012, 0x000000C0, 0x1 } },
};
-static const struct intel_ddi_buf_trans skl_u_ddi_translations_dp = {
- .entries = _skl_u_ddi_translations_dp,
- .num_entries = ARRAY_SIZE(_skl_u_ddi_translations_dp),
+static const struct intel_ddi_buf_trans skl_u_trans_dp = {
+ .entries = _skl_u_trans_dp,
+ .num_entries = ARRAY_SIZE(_skl_u_trans_dp),
};
/* Skylake Y */
-static const union intel_ddi_buf_trans_entry _skl_y_ddi_translations_dp[] = {
+static const union intel_ddi_buf_trans_entry _skl_y_trans_dp[] = {
{ .hsw = { 0x00000018, 0x000000A2, 0x0 } },
{ .hsw = { 0x00005012, 0x00000088, 0x0 } },
{ .hsw = { 0x80007011, 0x000000CD, 0x3 } },
@@ -189,13 +190,13 @@ static const union intel_ddi_buf_trans_entry _skl_y_ddi_translations_dp[] = {
{ .hsw = { 0x80005012, 0x000000C0, 0x3 } },
};
-static const struct intel_ddi_buf_trans skl_y_ddi_translations_dp = {
- .entries = _skl_y_ddi_translations_dp,
- .num_entries = ARRAY_SIZE(_skl_y_ddi_translations_dp),
+static const struct intel_ddi_buf_trans skl_y_trans_dp = {
+ .entries = _skl_y_trans_dp,
+ .num_entries = ARRAY_SIZE(_skl_y_trans_dp),
};
/* Kabylake H and S */
-static const union intel_ddi_buf_trans_entry _kbl_ddi_translations_dp[] = {
+static const union intel_ddi_buf_trans_entry _kbl_trans_dp[] = {
{ .hsw = { 0x00002016, 0x000000A0, 0x0 } },
{ .hsw = { 0x00005012, 0x0000009B, 0x0 } },
{ .hsw = { 0x00007011, 0x00000088, 0x0 } },
@@ -207,13 +208,13 @@ static const union intel_ddi_buf_trans_entry _kbl_ddi_translations_dp[] = {
{ .hsw = { 0x80005012, 0x000000C0, 0x1 } },
};
-static const struct intel_ddi_buf_trans kbl_ddi_translations_dp = {
- .entries = _kbl_ddi_translations_dp,
- .num_entries = ARRAY_SIZE(_kbl_ddi_translations_dp),
+static const struct intel_ddi_buf_trans kbl_trans_dp = {
+ .entries = _kbl_trans_dp,
+ .num_entries = ARRAY_SIZE(_kbl_trans_dp),
};
/* Kabylake U */
-static const union intel_ddi_buf_trans_entry _kbl_u_ddi_translations_dp[] = {
+static const union intel_ddi_buf_trans_entry _kbl_u_trans_dp[] = {
{ .hsw = { 0x0000201B, 0x000000A1, 0x0 } },
{ .hsw = { 0x00005012, 0x00000088, 0x0 } },
{ .hsw = { 0x80007011, 0x000000CD, 0x3 } },
@@ -225,13 +226,13 @@ static const union intel_ddi_buf_trans_entry _kbl_u_ddi_translations_dp[] = {
{ .hsw = { 0x80005012, 0x000000C0, 0x3 } },
};
-static const struct intel_ddi_buf_trans kbl_u_ddi_translations_dp = {
- .entries = _kbl_u_ddi_translations_dp,
- .num_entries = ARRAY_SIZE(_kbl_u_ddi_translations_dp),
+static const struct intel_ddi_buf_trans kbl_u_trans_dp = {
+ .entries = _kbl_u_trans_dp,
+ .num_entries = ARRAY_SIZE(_kbl_u_trans_dp),
};
/* Kabylake Y */
-static const union intel_ddi_buf_trans_entry _kbl_y_ddi_translations_dp[] = {
+static const union intel_ddi_buf_trans_entry _kbl_y_trans_dp[] = {
{ .hsw = { 0x00001017, 0x000000A1, 0x0 } },
{ .hsw = { 0x00005012, 0x00000088, 0x0 } },
{ .hsw = { 0x80007011, 0x000000CD, 0x3 } },
@@ -243,16 +244,16 @@ static const union intel_ddi_buf_trans_entry _kbl_y_ddi_translations_dp[] = {
{ .hsw = { 0x80005012, 0x000000C0, 0x3 } },
};
-static const struct intel_ddi_buf_trans kbl_y_ddi_translations_dp = {
- .entries = _kbl_y_ddi_translations_dp,
- .num_entries = ARRAY_SIZE(_kbl_y_ddi_translations_dp),
+static const struct intel_ddi_buf_trans kbl_y_trans_dp = {
+ .entries = _kbl_y_trans_dp,
+ .num_entries = ARRAY_SIZE(_kbl_y_trans_dp),
};
/*
* Skylake/Kabylake H and S
* eDP 1.4 low vswing translation parameters
*/
-static const union intel_ddi_buf_trans_entry _skl_ddi_translations_edp[] = {
+static const union intel_ddi_buf_trans_entry _skl_trans_edp[] = {
{ .hsw = { 0x00000018, 0x000000A8, 0x0 } },
{ .hsw = { 0x00004013, 0x000000A9, 0x0 } },
{ .hsw = { 0x00007011, 0x000000A2, 0x0 } },
@@ -265,16 +266,16 @@ static const union intel_ddi_buf_trans_entry _skl_ddi_translations_edp[] = {
{ .hsw = { 0x00000018, 0x000000DF, 0x0 } },
};
-static const struct intel_ddi_buf_trans skl_ddi_translations_edp = {
- .entries = _skl_ddi_translations_edp,
- .num_entries = ARRAY_SIZE(_skl_ddi_translations_edp),
+static const struct intel_ddi_buf_trans skl_trans_edp = {
+ .entries = _skl_trans_edp,
+ .num_entries = ARRAY_SIZE(_skl_trans_edp),
};
/*
* Skylake/Kabylake U
* eDP 1.4 low vswing translation parameters
*/
-static const union intel_ddi_buf_trans_entry _skl_u_ddi_translations_edp[] = {
+static const union intel_ddi_buf_trans_entry _skl_u_trans_edp[] = {
{ .hsw = { 0x00000018, 0x000000A8, 0x0 } },
{ .hsw = { 0x00004013, 0x000000A9, 0x0 } },
{ .hsw = { 0x00007011, 0x000000A2, 0x0 } },
@@ -287,16 +288,16 @@ static const union intel_ddi_buf_trans_entry _skl_u_ddi_translations_edp[] = {
{ .hsw = { 0x00000018, 0x000000DF, 0x0 } },
};
-static const struct intel_ddi_buf_trans skl_u_ddi_translations_edp = {
- .entries = _skl_u_ddi_translations_edp,
- .num_entries = ARRAY_SIZE(_skl_u_ddi_translations_edp),
+static const struct intel_ddi_buf_trans skl_u_trans_edp = {
+ .entries = _skl_u_trans_edp,
+ .num_entries = ARRAY_SIZE(_skl_u_trans_edp),
};
/*
* Skylake/Kabylake Y
* eDP 1.4 low vswing translation parameters
*/
-static const union intel_ddi_buf_trans_entry _skl_y_ddi_translations_edp[] = {
+static const union intel_ddi_buf_trans_entry _skl_y_trans_edp[] = {
{ .hsw = { 0x00000018, 0x000000A8, 0x0 } },
{ .hsw = { 0x00004013, 0x000000AB, 0x0 } },
{ .hsw = { 0x00007011, 0x000000A4, 0x0 } },
@@ -309,13 +310,13 @@ static const union intel_ddi_buf_trans_entry _skl_y_ddi_translations_edp[] = {
{ .hsw = { 0x00000018, 0x0000008A, 0x0 } },
};
-static const struct intel_ddi_buf_trans skl_y_ddi_translations_edp = {
- .entries = _skl_y_ddi_translations_edp,
- .num_entries = ARRAY_SIZE(_skl_y_ddi_translations_edp),
+static const struct intel_ddi_buf_trans skl_y_trans_edp = {
+ .entries = _skl_y_trans_edp,
+ .num_entries = ARRAY_SIZE(_skl_y_trans_edp),
};
/* Skylake/Kabylake U, H and S */
-static const union intel_ddi_buf_trans_entry _skl_ddi_translations_hdmi[] = {
+static const union intel_ddi_buf_trans_entry _skl_trans_hdmi[] = {
{ .hsw = { 0x00000018, 0x000000AC, 0x0 } },
{ .hsw = { 0x00005012, 0x0000009D, 0x0 } },
{ .hsw = { 0x00007011, 0x00000088, 0x0 } },
@@ -329,14 +330,14 @@ static const union intel_ddi_buf_trans_entry _skl_ddi_translations_hdmi[] = {
{ .hsw = { 0x80000018, 0x000000C0, 0x1 } },
};
-static const struct intel_ddi_buf_trans skl_ddi_translations_hdmi = {
- .entries = _skl_ddi_translations_hdmi,
- .num_entries = ARRAY_SIZE(_skl_ddi_translations_hdmi),
+static const struct intel_ddi_buf_trans skl_trans_hdmi = {
+ .entries = _skl_trans_hdmi,
+ .num_entries = ARRAY_SIZE(_skl_trans_hdmi),
.hdmi_default_entry = 8,
};
/* Skylake/Kabylake Y */
-static const union intel_ddi_buf_trans_entry _skl_y_ddi_translations_hdmi[] = {
+static const union intel_ddi_buf_trans_entry _skl_y_trans_hdmi[] = {
{ .hsw = { 0x00000018, 0x000000A1, 0x0 } },
{ .hsw = { 0x00005012, 0x000000DF, 0x0 } },
{ .hsw = { 0x80007011, 0x000000CB, 0x3 } },
@@ -350,13 +351,13 @@ static const union intel_ddi_buf_trans_entry _skl_y_ddi_translations_hdmi[] = {
{ .hsw = { 0x80000018, 0x000000C0, 0x3 } },
};
-static const struct intel_ddi_buf_trans skl_y_ddi_translations_hdmi = {
- .entries = _skl_y_ddi_translations_hdmi,
- .num_entries = ARRAY_SIZE(_skl_y_ddi_translations_hdmi),
+static const struct intel_ddi_buf_trans skl_y_trans_hdmi = {
+ .entries = _skl_y_trans_hdmi,
+ .num_entries = ARRAY_SIZE(_skl_y_trans_hdmi),
.hdmi_default_entry = 8,
};
-static const union intel_ddi_buf_trans_entry _bxt_ddi_translations_dp[] = {
+static const union intel_ddi_buf_trans_entry _bxt_trans_dp[] = {
/* Idx NT mV diff db */
{ .bxt = { 52, 0x9A, 0, 128, } }, /* 0: 400 0 */
{ .bxt = { 78, 0x9A, 0, 85, } }, /* 1: 400 3.5 */
@@ -370,12 +371,12 @@ static const union intel_ddi_buf_trans_entry _bxt_ddi_translations_dp[] = {
{ .bxt = { 154, 0x9A, 1, 128, } }, /* 9: 1200 0 */
};
-static const struct intel_ddi_buf_trans bxt_ddi_translations_dp = {
- .entries = _bxt_ddi_translations_dp,
- .num_entries = ARRAY_SIZE(_bxt_ddi_translations_dp),
+static const struct intel_ddi_buf_trans bxt_trans_dp = {
+ .entries = _bxt_trans_dp,
+ .num_entries = ARRAY_SIZE(_bxt_trans_dp),
};
-static const union intel_ddi_buf_trans_entry _bxt_ddi_translations_edp[] = {
+static const union intel_ddi_buf_trans_entry _bxt_trans_edp[] = {
/* Idx NT mV diff db */
{ .bxt = { 26, 0, 0, 128, } }, /* 0: 200 0 */
{ .bxt = { 38, 0, 0, 112, } }, /* 1: 200 1.5 */
@@ -389,15 +390,15 @@ static const union intel_ddi_buf_trans_entry _bxt_ddi_translations_edp[] = {
{ .bxt = { 48, 0, 0, 128, } }, /* 9: 300 0 */
};
-static const struct intel_ddi_buf_trans bxt_ddi_translations_edp = {
- .entries = _bxt_ddi_translations_edp,
- .num_entries = ARRAY_SIZE(_bxt_ddi_translations_edp),
+static const struct intel_ddi_buf_trans bxt_trans_edp = {
+ .entries = _bxt_trans_edp,
+ .num_entries = ARRAY_SIZE(_bxt_trans_edp),
};
/* BSpec has 2 recommended values - entries 0 and 8.
* Using the entry with higher vswing.
*/
-static const union intel_ddi_buf_trans_entry _bxt_ddi_translations_hdmi[] = {
+static const union intel_ddi_buf_trans_entry _bxt_trans_hdmi[] = {
/* Idx NT mV diff db */
{ .bxt = { 52, 0x9A, 0, 128, } }, /* 0: 400 0 */
{ .bxt = { 52, 0x9A, 0, 85, } }, /* 1: 400 3.5 */
@@ -411,14 +412,14 @@ static const union intel_ddi_buf_trans_entry _bxt_ddi_translations_hdmi[] = {
{ .bxt = { 154, 0x9A, 1, 128, } }, /* 9: 1200 0 */
};
-static const struct intel_ddi_buf_trans bxt_ddi_translations_hdmi = {
- .entries = _bxt_ddi_translations_hdmi,
- .num_entries = ARRAY_SIZE(_bxt_ddi_translations_hdmi),
- .hdmi_default_entry = ARRAY_SIZE(_bxt_ddi_translations_hdmi) - 1,
+static const struct intel_ddi_buf_trans bxt_trans_hdmi = {
+ .entries = _bxt_trans_hdmi,
+ .num_entries = ARRAY_SIZE(_bxt_trans_hdmi),
+ .hdmi_default_entry = ARRAY_SIZE(_bxt_trans_hdmi) - 1,
};
-/* icl_combo_phy_ddi_translations */
-static const union intel_ddi_buf_trans_entry _icl_combo_phy_ddi_translations_dp_hbr2_edp_hbr3[] = {
+/* icl_combo_phy_trans */
+static const union intel_ddi_buf_trans_entry _icl_combo_phy_trans_dp_hbr2_edp_hbr3[] = {
/* NT mV Trans mV db */
{ .icl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */
{ .icl = { 0xA, 0x4F, 0x37, 0x00, 0x08 } }, /* 350 500 3.1 */
@@ -432,12 +433,12 @@ static const union intel_ddi_buf_trans_entry _icl_combo_phy_ddi_translations_dp_
{ .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */
};
-static const struct intel_ddi_buf_trans icl_combo_phy_ddi_translations_dp_hbr2_edp_hbr3 = {
- .entries = _icl_combo_phy_ddi_translations_dp_hbr2_edp_hbr3,
- .num_entries = ARRAY_SIZE(_icl_combo_phy_ddi_translations_dp_hbr2_edp_hbr3),
+static const struct intel_ddi_buf_trans icl_combo_phy_trans_dp_hbr2_edp_hbr3 = {
+ .entries = _icl_combo_phy_trans_dp_hbr2_edp_hbr3,
+ .num_entries = ARRAY_SIZE(_icl_combo_phy_trans_dp_hbr2_edp_hbr3),
};
-static const union intel_ddi_buf_trans_entry _icl_combo_phy_ddi_translations_edp_hbr2[] = {
+static const union intel_ddi_buf_trans_entry _icl_combo_phy_trans_edp_hbr2[] = {
/* NT mV Trans mV db */
{ .icl = { 0x0, 0x7F, 0x3F, 0x00, 0x00 } }, /* 200 200 0.0 */
{ .icl = { 0x8, 0x7F, 0x38, 0x00, 0x07 } }, /* 200 250 1.9 */
@@ -451,12 +452,12 @@ static const union intel_ddi_buf_trans_entry _icl_combo_phy_ddi_translations_edp
{ .icl = { 0x9, 0x7F, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */
};
-static const struct intel_ddi_buf_trans icl_combo_phy_ddi_translations_edp_hbr2 = {
- .entries = _icl_combo_phy_ddi_translations_edp_hbr2,
- .num_entries = ARRAY_SIZE(_icl_combo_phy_ddi_translations_edp_hbr2),
+static const struct intel_ddi_buf_trans icl_combo_phy_trans_edp_hbr2 = {
+ .entries = _icl_combo_phy_trans_edp_hbr2,
+ .num_entries = ARRAY_SIZE(_icl_combo_phy_trans_edp_hbr2),
};
-static const union intel_ddi_buf_trans_entry _icl_combo_phy_ddi_translations_hdmi[] = {
+static const union intel_ddi_buf_trans_entry _icl_combo_phy_trans_hdmi[] = {
/* NT mV Trans mV db */
{ .icl = { 0xA, 0x60, 0x3F, 0x00, 0x00 } }, /* 450 450 0.0 */
{ .icl = { 0xB, 0x73, 0x36, 0x00, 0x09 } }, /* 450 650 3.2 */
@@ -467,13 +468,13 @@ static const union intel_ddi_buf_trans_entry _icl_combo_phy_ddi_translations_hdm
{ .icl = { 0x6, 0x7F, 0x35, 0x00, 0x0A } }, /* 600 850 3.0 */
};
-static const struct intel_ddi_buf_trans icl_combo_phy_ddi_translations_hdmi = {
- .entries = _icl_combo_phy_ddi_translations_hdmi,
- .num_entries = ARRAY_SIZE(_icl_combo_phy_ddi_translations_hdmi),
- .hdmi_default_entry = ARRAY_SIZE(_icl_combo_phy_ddi_translations_hdmi) - 1,
+static const struct intel_ddi_buf_trans icl_combo_phy_trans_hdmi = {
+ .entries = _icl_combo_phy_trans_hdmi,
+ .num_entries = ARRAY_SIZE(_icl_combo_phy_trans_hdmi),
+ .hdmi_default_entry = ARRAY_SIZE(_icl_combo_phy_trans_hdmi) - 1,
};
-static const union intel_ddi_buf_trans_entry _ehl_combo_phy_ddi_translations_dp[] = {
+static const union intel_ddi_buf_trans_entry _ehl_combo_phy_trans_dp[] = {
/* NT mV Trans mV db */
{ .icl = { 0xA, 0x33, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */
{ .icl = { 0xA, 0x47, 0x36, 0x00, 0x09 } }, /* 350 500 3.1 */
@@ -487,12 +488,12 @@ static const union intel_ddi_buf_trans_entry _ehl_combo_phy_ddi_translations_dp[
{ .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */
};
-static const struct intel_ddi_buf_trans ehl_combo_phy_ddi_translations_dp = {
- .entries = _ehl_combo_phy_ddi_translations_dp,
- .num_entries = ARRAY_SIZE(_ehl_combo_phy_ddi_translations_dp),
+static const struct intel_ddi_buf_trans ehl_combo_phy_trans_dp = {
+ .entries = _ehl_combo_phy_trans_dp,
+ .num_entries = ARRAY_SIZE(_ehl_combo_phy_trans_dp),
};
-static const union intel_ddi_buf_trans_entry _ehl_combo_phy_ddi_translations_edp_hbr2[] = {
+static const union intel_ddi_buf_trans_entry _ehl_combo_phy_trans_edp_hbr2[] = {
/* NT mV Trans mV db */
{ .icl = { 0x8, 0x7F, 0x3F, 0x00, 0x00 } }, /* 200 200 0.0 */
{ .icl = { 0x8, 0x7F, 0x3F, 0x00, 0x00 } }, /* 200 250 1.9 */
@@ -506,12 +507,12 @@ static const union intel_ddi_buf_trans_entry _ehl_combo_phy_ddi_translations_edp
{ .icl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */
};
-static const struct intel_ddi_buf_trans ehl_combo_phy_ddi_translations_edp_hbr2 = {
- .entries = _ehl_combo_phy_ddi_translations_edp_hbr2,
- .num_entries = ARRAY_SIZE(_ehl_combo_phy_ddi_translations_edp_hbr2),
+static const struct intel_ddi_buf_trans ehl_combo_phy_trans_edp_hbr2 = {
+ .entries = _ehl_combo_phy_trans_edp_hbr2,
+ .num_entries = ARRAY_SIZE(_ehl_combo_phy_trans_edp_hbr2),
};
-static const union intel_ddi_buf_trans_entry _jsl_combo_phy_ddi_translations_edp_hbr[] = {
+static const union intel_ddi_buf_trans_entry _jsl_combo_phy_trans_edp_hbr[] = {
/* NT mV Trans mV db */
{ .icl = { 0x8, 0x7F, 0x3F, 0x00, 0x00 } }, /* 200 200 0.0 */
{ .icl = { 0x8, 0x7F, 0x38, 0x00, 0x07 } }, /* 200 250 1.9 */
@@ -525,12 +526,12 @@ static const union intel_ddi_buf_trans_entry _jsl_combo_phy_ddi_translations_edp
{ .icl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */
};
-static const struct intel_ddi_buf_trans jsl_combo_phy_ddi_translations_edp_hbr = {
- .entries = _jsl_combo_phy_ddi_translations_edp_hbr,
- .num_entries = ARRAY_SIZE(_jsl_combo_phy_ddi_translations_edp_hbr),
+static const struct intel_ddi_buf_trans jsl_combo_phy_trans_edp_hbr = {
+ .entries = _jsl_combo_phy_trans_edp_hbr,
+ .num_entries = ARRAY_SIZE(_jsl_combo_phy_trans_edp_hbr),
};
-static const union intel_ddi_buf_trans_entry _jsl_combo_phy_ddi_translations_edp_hbr2[] = {
+static const union intel_ddi_buf_trans_entry _jsl_combo_phy_trans_edp_hbr2[] = {
/* NT mV Trans mV db */
{ .icl = { 0x8, 0x7F, 0x3F, 0x00, 0x00 } }, /* 200 200 0.0 */
{ .icl = { 0x8, 0x7F, 0x3F, 0x00, 0x00 } }, /* 200 250 1.9 */
@@ -544,12 +545,12 @@ static const union intel_ddi_buf_trans_entry _jsl_combo_phy_ddi_translations_edp
{ .icl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */
};
-static const struct intel_ddi_buf_trans jsl_combo_phy_ddi_translations_edp_hbr2 = {
- .entries = _jsl_combo_phy_ddi_translations_edp_hbr2,
- .num_entries = ARRAY_SIZE(_jsl_combo_phy_ddi_translations_edp_hbr2),
+static const struct intel_ddi_buf_trans jsl_combo_phy_trans_edp_hbr2 = {
+ .entries = _jsl_combo_phy_trans_edp_hbr2,
+ .num_entries = ARRAY_SIZE(_jsl_combo_phy_trans_edp_hbr2),
};
-static const union intel_ddi_buf_trans_entry _dg1_combo_phy_ddi_translations_dp_rbr_hbr[] = {
+static const union intel_ddi_buf_trans_entry _dg1_combo_phy_trans_dp_rbr_hbr[] = {
/* NT mV Trans mV db */
{ .icl = { 0xA, 0x32, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */
{ .icl = { 0xA, 0x48, 0x35, 0x00, 0x0A } }, /* 350 500 3.1 */
@@ -563,12 +564,12 @@ static const union intel_ddi_buf_trans_entry _dg1_combo_phy_ddi_translations_dp_
{ .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */
};
-static const struct intel_ddi_buf_trans dg1_combo_phy_ddi_translations_dp_rbr_hbr = {
- .entries = _dg1_combo_phy_ddi_translations_dp_rbr_hbr,
- .num_entries = ARRAY_SIZE(_dg1_combo_phy_ddi_translations_dp_rbr_hbr),
+static const struct intel_ddi_buf_trans dg1_combo_phy_trans_dp_rbr_hbr = {
+ .entries = _dg1_combo_phy_trans_dp_rbr_hbr,
+ .num_entries = ARRAY_SIZE(_dg1_combo_phy_trans_dp_rbr_hbr),
};
-static const union intel_ddi_buf_trans_entry _dg1_combo_phy_ddi_translations_dp_hbr2_hbr3[] = {
+static const union intel_ddi_buf_trans_entry _dg1_combo_phy_trans_dp_hbr2_hbr3[] = {
/* NT mV Trans mV db */
{ .icl = { 0xA, 0x32, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */
{ .icl = { 0xA, 0x48, 0x35, 0x00, 0x0A } }, /* 350 500 3.1 */
@@ -582,12 +583,12 @@ static const union intel_ddi_buf_trans_entry _dg1_combo_phy_ddi_translations_dp_
{ .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */
};
-static const struct intel_ddi_buf_trans dg1_combo_phy_ddi_translations_dp_hbr2_hbr3 = {
- .entries = _dg1_combo_phy_ddi_translations_dp_hbr2_hbr3,
- .num_entries = ARRAY_SIZE(_dg1_combo_phy_ddi_translations_dp_hbr2_hbr3),
+static const struct intel_ddi_buf_trans dg1_combo_phy_trans_dp_hbr2_hbr3 = {
+ .entries = _dg1_combo_phy_trans_dp_hbr2_hbr3,
+ .num_entries = ARRAY_SIZE(_dg1_combo_phy_trans_dp_hbr2_hbr3),
};
-static const union intel_ddi_buf_trans_entry _icl_mg_phy_ddi_translations_rbr_hbr[] = {
+static const union intel_ddi_buf_trans_entry _icl_mg_phy_trans_rbr_hbr[] = {
/* Voltage swing pre-emphasis */
{ .mg = { 0x18, 0x00, 0x00 } }, /* 0 0 */
{ .mg = { 0x1D, 0x00, 0x05 } }, /* 0 1 */
@@ -601,12 +602,12 @@ static const union intel_ddi_buf_trans_entry _icl_mg_phy_ddi_translations_rbr_hb
{ .mg = { 0x3F, 0x00, 0x00 } }, /* 3 0 */
};
-static const struct intel_ddi_buf_trans icl_mg_phy_ddi_translations_rbr_hbr = {
- .entries = _icl_mg_phy_ddi_translations_rbr_hbr,
- .num_entries = ARRAY_SIZE(_icl_mg_phy_ddi_translations_rbr_hbr),
+static const struct intel_ddi_buf_trans icl_mg_phy_trans_rbr_hbr = {
+ .entries = _icl_mg_phy_trans_rbr_hbr,
+ .num_entries = ARRAY_SIZE(_icl_mg_phy_trans_rbr_hbr),
};
-static const union intel_ddi_buf_trans_entry _icl_mg_phy_ddi_translations_hbr2_hbr3[] = {
+static const union intel_ddi_buf_trans_entry _icl_mg_phy_trans_hbr2_hbr3[] = {
/* Voltage swing pre-emphasis */
{ .mg = { 0x18, 0x00, 0x00 } }, /* 0 0 */
{ .mg = { 0x1D, 0x00, 0x05 } }, /* 0 1 */
@@ -620,12 +621,12 @@ static const union intel_ddi_buf_trans_entry _icl_mg_phy_ddi_translations_hbr2_h
{ .mg = { 0x3F, 0x00, 0x00 } }, /* 3 0 */
};
-static const struct intel_ddi_buf_trans icl_mg_phy_ddi_translations_hbr2_hbr3 = {
- .entries = _icl_mg_phy_ddi_translations_hbr2_hbr3,
- .num_entries = ARRAY_SIZE(_icl_mg_phy_ddi_translations_hbr2_hbr3),
+static const struct intel_ddi_buf_trans icl_mg_phy_trans_hbr2_hbr3 = {
+ .entries = _icl_mg_phy_trans_hbr2_hbr3,
+ .num_entries = ARRAY_SIZE(_icl_mg_phy_trans_hbr2_hbr3),
};
-static const union intel_ddi_buf_trans_entry _icl_mg_phy_ddi_translations_hdmi[] = {
+static const union intel_ddi_buf_trans_entry _icl_mg_phy_trans_hdmi[] = {
/* HDMI Preset VS Pre-emph */
{ .mg = { 0x1A, 0x0, 0x0 } }, /* 1 400mV 0dB */
{ .mg = { 0x20, 0x0, 0x0 } }, /* 2 500mV 0dB */
@@ -639,13 +640,13 @@ static const union intel_ddi_buf_trans_entry _icl_mg_phy_ddi_translations_hdmi[]
{ .mg = { 0x36, 0x0, 0x9 } }, /* 10 Full -3 dB */
};
-static const struct intel_ddi_buf_trans icl_mg_phy_ddi_translations_hdmi = {
- .entries = _icl_mg_phy_ddi_translations_hdmi,
- .num_entries = ARRAY_SIZE(_icl_mg_phy_ddi_translations_hdmi),
- .hdmi_default_entry = ARRAY_SIZE(_icl_mg_phy_ddi_translations_hdmi) - 1,
+static const struct intel_ddi_buf_trans icl_mg_phy_trans_hdmi = {
+ .entries = _icl_mg_phy_trans_hdmi,
+ .num_entries = ARRAY_SIZE(_icl_mg_phy_trans_hdmi),
+ .hdmi_default_entry = ARRAY_SIZE(_icl_mg_phy_trans_hdmi) - 1,
};
-static const union intel_ddi_buf_trans_entry _tgl_dkl_phy_ddi_translations_dp_hbr[] = {
+static const union intel_ddi_buf_trans_entry _tgl_dkl_phy_trans_dp_hbr[] = {
/* VS pre-emp Non-trans mV Pre-emph dB */
{ .dkl = { 0x7, 0x0, 0x00 } }, /* 0 0 400mV 0 dB */
{ .dkl = { 0x5, 0x0, 0x05 } }, /* 0 1 400mV 3.5 dB */
@@ -659,12 +660,12 @@ static const union intel_ddi_buf_trans_entry _tgl_dkl_phy_ddi_translations_dp_hb
{ .dkl = { 0x0, 0x0, 0x00 } }, /* 3 0 1200mV 0 dB HDMI default */
};
-static const struct intel_ddi_buf_trans tgl_dkl_phy_ddi_translations_dp_hbr = {
- .entries = _tgl_dkl_phy_ddi_translations_dp_hbr,
- .num_entries = ARRAY_SIZE(_tgl_dkl_phy_ddi_translations_dp_hbr),
+static const struct intel_ddi_buf_trans tgl_dkl_phy_trans_dp_hbr = {
+ .entries = _tgl_dkl_phy_trans_dp_hbr,
+ .num_entries = ARRAY_SIZE(_tgl_dkl_phy_trans_dp_hbr),
};
-static const union intel_ddi_buf_trans_entry _tgl_dkl_phy_ddi_translations_dp_hbr2[] = {
+static const union intel_ddi_buf_trans_entry _tgl_dkl_phy_trans_dp_hbr2[] = {
/* VS pre-emp Non-trans mV Pre-emph dB */
{ .dkl = { 0x7, 0x0, 0x00 } }, /* 0 0 400mV 0 dB */
{ .dkl = { 0x5, 0x0, 0x05 } }, /* 0 1 400mV 3.5 dB */
@@ -678,12 +679,12 @@ static const union intel_ddi_buf_trans_entry _tgl_dkl_phy_ddi_translations_dp_hb
{ .dkl = { 0x0, 0x0, 0x00 } }, /* 3 0 1200mV 0 dB HDMI default */
};
-static const struct intel_ddi_buf_trans tgl_dkl_phy_ddi_translations_dp_hbr2 = {
- .entries = _tgl_dkl_phy_ddi_translations_dp_hbr2,
- .num_entries = ARRAY_SIZE(_tgl_dkl_phy_ddi_translations_dp_hbr2),
+static const struct intel_ddi_buf_trans tgl_dkl_phy_trans_dp_hbr2 = {
+ .entries = _tgl_dkl_phy_trans_dp_hbr2,
+ .num_entries = ARRAY_SIZE(_tgl_dkl_phy_trans_dp_hbr2),
};
-static const union intel_ddi_buf_trans_entry _tgl_dkl_phy_ddi_translations_hdmi[] = {
+static const union intel_ddi_buf_trans_entry _tgl_dkl_phy_trans_hdmi[] = {
/* HDMI Preset VS Pre-emph */
{ .dkl = { 0x7, 0x0, 0x0 } }, /* 1 400mV 0dB */
{ .dkl = { 0x6, 0x0, 0x0 } }, /* 2 500mV 0dB */
@@ -697,13 +698,13 @@ static const union intel_ddi_buf_trans_entry _tgl_dkl_phy_ddi_translations_hdmi[
{ .dkl = { 0x0, 0x0, 0xA } }, /* 10 Full -3 dB */
};
-static const struct intel_ddi_buf_trans tgl_dkl_phy_ddi_translations_hdmi = {
- .entries = _tgl_dkl_phy_ddi_translations_hdmi,
- .num_entries = ARRAY_SIZE(_tgl_dkl_phy_ddi_translations_hdmi),
- .hdmi_default_entry = ARRAY_SIZE(_tgl_dkl_phy_ddi_translations_hdmi) - 1,
+static const struct intel_ddi_buf_trans tgl_dkl_phy_trans_hdmi = {
+ .entries = _tgl_dkl_phy_trans_hdmi,
+ .num_entries = ARRAY_SIZE(_tgl_dkl_phy_trans_hdmi),
+ .hdmi_default_entry = ARRAY_SIZE(_tgl_dkl_phy_trans_hdmi) - 1,
};
-static const union intel_ddi_buf_trans_entry _tgl_combo_phy_ddi_translations_dp_hbr[] = {
+static const union intel_ddi_buf_trans_entry _tgl_combo_phy_trans_dp_hbr[] = {
/* NT mV Trans mV db */
{ .icl = { 0xA, 0x32, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */
{ .icl = { 0xA, 0x4F, 0x37, 0x00, 0x08 } }, /* 350 500 3.1 */
@@ -717,12 +718,12 @@ static const union intel_ddi_buf_trans_entry _tgl_combo_phy_ddi_translations_dp_
{ .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */
};
-static const struct intel_ddi_buf_trans tgl_combo_phy_ddi_translations_dp_hbr = {
- .entries = _tgl_combo_phy_ddi_translations_dp_hbr,
- .num_entries = ARRAY_SIZE(_tgl_combo_phy_ddi_translations_dp_hbr),
+static const struct intel_ddi_buf_trans tgl_combo_phy_trans_dp_hbr = {
+ .entries = _tgl_combo_phy_trans_dp_hbr,
+ .num_entries = ARRAY_SIZE(_tgl_combo_phy_trans_dp_hbr),
};
-static const union intel_ddi_buf_trans_entry _tgl_combo_phy_ddi_translations_dp_hbr2[] = {
+static const union intel_ddi_buf_trans_entry _tgl_combo_phy_trans_dp_hbr2[] = {
/* NT mV Trans mV db */
{ .icl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */
{ .icl = { 0xA, 0x4F, 0x37, 0x00, 0x08 } }, /* 350 500 3.1 */
@@ -736,12 +737,12 @@ static const union intel_ddi_buf_trans_entry _tgl_combo_phy_ddi_translations_dp_
{ .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */
};
-static const struct intel_ddi_buf_trans tgl_combo_phy_ddi_translations_dp_hbr2 = {
- .entries = _tgl_combo_phy_ddi_translations_dp_hbr2,
- .num_entries = ARRAY_SIZE(_tgl_combo_phy_ddi_translations_dp_hbr2),
+static const struct intel_ddi_buf_trans tgl_combo_phy_trans_dp_hbr2 = {
+ .entries = _tgl_combo_phy_trans_dp_hbr2,
+ .num_entries = ARRAY_SIZE(_tgl_combo_phy_trans_dp_hbr2),
};
-static const union intel_ddi_buf_trans_entry _tgl_uy_combo_phy_ddi_translations_dp_hbr2[] = {
+static const union intel_ddi_buf_trans_entry _tgl_uy_combo_phy_trans_dp_hbr2[] = {
/* NT mV Trans mV db */
{ .icl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */
{ .icl = { 0xA, 0x4F, 0x36, 0x00, 0x09 } }, /* 350 500 3.1 */
@@ -755,16 +756,16 @@ static const union intel_ddi_buf_trans_entry _tgl_uy_combo_phy_ddi_translations_
{ .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */
};
-static const struct intel_ddi_buf_trans tgl_uy_combo_phy_ddi_translations_dp_hbr2 = {
- .entries = _tgl_uy_combo_phy_ddi_translations_dp_hbr2,
- .num_entries = ARRAY_SIZE(_tgl_uy_combo_phy_ddi_translations_dp_hbr2),
+static const struct intel_ddi_buf_trans tgl_uy_combo_phy_trans_dp_hbr2 = {
+ .entries = _tgl_uy_combo_phy_trans_dp_hbr2,
+ .num_entries = ARRAY_SIZE(_tgl_uy_combo_phy_trans_dp_hbr2),
};
/*
* Cloned the HOBL entry to comply with the voltage and pre-emphasis entries
* that DisplayPort specification requires
*/
-static const union intel_ddi_buf_trans_entry _tgl_combo_phy_ddi_translations_edp_hbr2_hobl[] = {
+static const union intel_ddi_buf_trans_entry _tgl_combo_phy_trans_edp_hbr2_hobl[] = {
/* VS pre-emp */
{ .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 0 0 */
{ .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 0 1 */
@@ -777,12 +778,12 @@ static const union intel_ddi_buf_trans_entry _tgl_combo_phy_ddi_translations_edp
{ .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 2 1 */
};
-static const struct intel_ddi_buf_trans tgl_combo_phy_ddi_translations_edp_hbr2_hobl = {
- .entries = _tgl_combo_phy_ddi_translations_edp_hbr2_hobl,
- .num_entries = ARRAY_SIZE(_tgl_combo_phy_ddi_translations_edp_hbr2_hobl),
+static const struct intel_ddi_buf_trans tgl_combo_phy_trans_edp_hbr2_hobl = {
+ .entries = _tgl_combo_phy_trans_edp_hbr2_hobl,
+ .num_entries = ARRAY_SIZE(_tgl_combo_phy_trans_edp_hbr2_hobl),
};
-static const union intel_ddi_buf_trans_entry _rkl_combo_phy_ddi_translations_dp_hbr[] = {
+static const union intel_ddi_buf_trans_entry _rkl_combo_phy_trans_dp_hbr[] = {
/* NT mV Trans mV db */
{ .icl = { 0xA, 0x2F, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */
{ .icl = { 0xA, 0x4F, 0x37, 0x00, 0x08 } }, /* 350 500 3.1 */
@@ -796,12 +797,12 @@ static const union intel_ddi_buf_trans_entry _rkl_combo_phy_ddi_translations_dp_
{ .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */
};
-static const struct intel_ddi_buf_trans rkl_combo_phy_ddi_translations_dp_hbr = {
- .entries = _rkl_combo_phy_ddi_translations_dp_hbr,
- .num_entries = ARRAY_SIZE(_rkl_combo_phy_ddi_translations_dp_hbr),
+static const struct intel_ddi_buf_trans rkl_combo_phy_trans_dp_hbr = {
+ .entries = _rkl_combo_phy_trans_dp_hbr,
+ .num_entries = ARRAY_SIZE(_rkl_combo_phy_trans_dp_hbr),
};
-static const union intel_ddi_buf_trans_entry _rkl_combo_phy_ddi_translations_dp_hbr2_hbr3[] = {
+static const union intel_ddi_buf_trans_entry _rkl_combo_phy_trans_dp_hbr2_hbr3[] = {
/* NT mV Trans mV db */
{ .icl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */
{ .icl = { 0xA, 0x50, 0x38, 0x00, 0x07 } }, /* 350 500 3.1 */
@@ -815,12 +816,12 @@ static const union intel_ddi_buf_trans_entry _rkl_combo_phy_ddi_translations_dp_
{ .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */
};
-static const struct intel_ddi_buf_trans rkl_combo_phy_ddi_translations_dp_hbr2_hbr3 = {
- .entries = _rkl_combo_phy_ddi_translations_dp_hbr2_hbr3,
- .num_entries = ARRAY_SIZE(_rkl_combo_phy_ddi_translations_dp_hbr2_hbr3),
+static const struct intel_ddi_buf_trans rkl_combo_phy_trans_dp_hbr2_hbr3 = {
+ .entries = _rkl_combo_phy_trans_dp_hbr2_hbr3,
+ .num_entries = ARRAY_SIZE(_rkl_combo_phy_trans_dp_hbr2_hbr3),
};
-static const union intel_ddi_buf_trans_entry _adls_combo_phy_ddi_translations_dp_hbr2_hbr3[] = {
+static const union intel_ddi_buf_trans_entry _adls_combo_phy_trans_dp_hbr2_hbr3[] = {
/* NT mV Trans mV db */
{ .icl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */
{ .icl = { 0xA, 0x4F, 0x37, 0x00, 0x08 } }, /* 350 500 3.1 */
@@ -834,12 +835,12 @@ static const union intel_ddi_buf_trans_entry _adls_combo_phy_ddi_translations_dp
{ .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */
};
-static const struct intel_ddi_buf_trans adls_combo_phy_ddi_translations_dp_hbr2_hbr3 = {
- .entries = _adls_combo_phy_ddi_translations_dp_hbr2_hbr3,
- .num_entries = ARRAY_SIZE(_adls_combo_phy_ddi_translations_dp_hbr2_hbr3),
+static const struct intel_ddi_buf_trans adls_combo_phy_trans_dp_hbr2_hbr3 = {
+ .entries = _adls_combo_phy_trans_dp_hbr2_hbr3,
+ .num_entries = ARRAY_SIZE(_adls_combo_phy_trans_dp_hbr2_hbr3),
};
-static const union intel_ddi_buf_trans_entry _adls_combo_phy_ddi_translations_edp_hbr2[] = {
+static const union intel_ddi_buf_trans_entry _adls_combo_phy_trans_edp_hbr2[] = {
/* NT mV Trans mV db */
{ .icl = { 0x9, 0x73, 0x3D, 0x00, 0x02 } }, /* 200 200 0.0 */
{ .icl = { 0x9, 0x7A, 0x3C, 0x00, 0x03 } }, /* 200 250 1.9 */
@@ -853,12 +854,12 @@ static const union intel_ddi_buf_trans_entry _adls_combo_phy_ddi_translations_ed
{ .icl = { 0x4, 0x6C, 0x3A, 0x00, 0x05 } }, /* 350 350 0.0 */
};
-static const struct intel_ddi_buf_trans adls_combo_phy_ddi_translations_edp_hbr2 = {
- .entries = _adls_combo_phy_ddi_translations_edp_hbr2,
- .num_entries = ARRAY_SIZE(_adls_combo_phy_ddi_translations_edp_hbr2),
+static const struct intel_ddi_buf_trans adls_combo_phy_trans_edp_hbr2 = {
+ .entries = _adls_combo_phy_trans_edp_hbr2,
+ .num_entries = ARRAY_SIZE(_adls_combo_phy_trans_edp_hbr2),
};
-static const union intel_ddi_buf_trans_entry _adls_combo_phy_ddi_translations_edp_hbr3[] = {
+static const union intel_ddi_buf_trans_entry _adls_combo_phy_trans_edp_hbr3[] = {
/* NT mV Trans mV db */
{ .icl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */
{ .icl = { 0xA, 0x4F, 0x37, 0x00, 0x08 } }, /* 350 500 3.1 */
@@ -872,12 +873,12 @@ static const union intel_ddi_buf_trans_entry _adls_combo_phy_ddi_translations_ed
{ .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */
};
-static const struct intel_ddi_buf_trans adls_combo_phy_ddi_translations_edp_hbr3 = {
- .entries = _adls_combo_phy_ddi_translations_edp_hbr3,
- .num_entries = ARRAY_SIZE(_adls_combo_phy_ddi_translations_edp_hbr3),
+static const struct intel_ddi_buf_trans adls_combo_phy_trans_edp_hbr3 = {
+ .entries = _adls_combo_phy_trans_edp_hbr3,
+ .num_entries = ARRAY_SIZE(_adls_combo_phy_trans_edp_hbr3),
};
-static const union intel_ddi_buf_trans_entry _adlp_combo_phy_ddi_translations_hdmi[] = {
+static const union intel_ddi_buf_trans_entry _adlp_combo_phy_trans_hdmi[] = {
/* NT mV Trans mV db */
{ .icl = { 0x6, 0x60, 0x3F, 0x00, 0x00 } }, /* 400 400 0.0 */
{ .icl = { 0x6, 0x68, 0x3F, 0x00, 0x00 } }, /* 500 500 0.0 */
@@ -891,13 +892,13 @@ static const union intel_ddi_buf_trans_entry _adlp_combo_phy_ddi_translations_hd
{ .icl = { 0xB, 0x7F, 0x33, 0x00, 0x0C } }, /* Full Red -3.0 */
};
-static const struct intel_ddi_buf_trans adlp_combo_phy_ddi_translations_hdmi = {
- .entries = _adlp_combo_phy_ddi_translations_hdmi,
- .num_entries = ARRAY_SIZE(_adlp_combo_phy_ddi_translations_hdmi),
- .hdmi_default_entry = ARRAY_SIZE(_adlp_combo_phy_ddi_translations_hdmi) - 1,
+static const struct intel_ddi_buf_trans adlp_combo_phy_trans_hdmi = {
+ .entries = _adlp_combo_phy_trans_hdmi,
+ .num_entries = ARRAY_SIZE(_adlp_combo_phy_trans_hdmi),
+ .hdmi_default_entry = ARRAY_SIZE(_adlp_combo_phy_trans_hdmi) - 1,
};
-static const union intel_ddi_buf_trans_entry _adlp_combo_phy_ddi_translations_dp_hbr[] = {
+static const union intel_ddi_buf_trans_entry _adlp_combo_phy_trans_dp_hbr[] = {
/* NT mV Trans mV db */
{ .icl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */
{ .icl = { 0xA, 0x4F, 0x37, 0x00, 0x08 } }, /* 350 500 3.1 */
@@ -911,12 +912,12 @@ static const union intel_ddi_buf_trans_entry _adlp_combo_phy_ddi_translations_dp
{ .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */
};
-static const struct intel_ddi_buf_trans adlp_combo_phy_ddi_translations_dp_hbr = {
- .entries = _adlp_combo_phy_ddi_translations_dp_hbr,
- .num_entries = ARRAY_SIZE(_adlp_combo_phy_ddi_translations_dp_hbr),
+static const struct intel_ddi_buf_trans adlp_combo_phy_trans_dp_hbr = {
+ .entries = _adlp_combo_phy_trans_dp_hbr,
+ .num_entries = ARRAY_SIZE(_adlp_combo_phy_trans_dp_hbr),
};
-static const union intel_ddi_buf_trans_entry _adlp_combo_phy_ddi_translations_dp_hbr2_hbr3[] = {
+static const union intel_ddi_buf_trans_entry _adlp_combo_phy_trans_dp_hbr2_hbr3[] = {
/* NT mV Trans mV db */
{ .icl = { 0xA, 0x35, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */
{ .icl = { 0xA, 0x4F, 0x37, 0x00, 0x08 } }, /* 350 500 3.1 */
@@ -930,22 +931,22 @@ static const union intel_ddi_buf_trans_entry _adlp_combo_phy_ddi_translations_dp
{ .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */
};
-static const struct intel_ddi_buf_trans adlp_combo_phy_ddi_translations_dp_hbr2_hbr3 = {
- .entries = _adlp_combo_phy_ddi_translations_dp_hbr2_hbr3,
- .num_entries = ARRAY_SIZE(_adlp_combo_phy_ddi_translations_dp_hbr2_hbr3),
+static const struct intel_ddi_buf_trans adlp_combo_phy_trans_dp_hbr2_hbr3 = {
+ .entries = _adlp_combo_phy_trans_dp_hbr2_hbr3,
+ .num_entries = ARRAY_SIZE(_adlp_combo_phy_trans_dp_hbr2_hbr3),
};
-static const struct intel_ddi_buf_trans adlp_combo_phy_ddi_translations_edp_hbr3 = {
- .entries = _icl_combo_phy_ddi_translations_dp_hbr2_edp_hbr3,
- .num_entries = ARRAY_SIZE(_icl_combo_phy_ddi_translations_dp_hbr2_edp_hbr3),
+static const struct intel_ddi_buf_trans adlp_combo_phy_trans_edp_hbr3 = {
+ .entries = _icl_combo_phy_trans_dp_hbr2_edp_hbr3,
+ .num_entries = ARRAY_SIZE(_icl_combo_phy_trans_dp_hbr2_edp_hbr3),
};
-static const struct intel_ddi_buf_trans adlp_combo_phy_ddi_translations_edp_up_to_hbr2 = {
- .entries = _icl_combo_phy_ddi_translations_edp_hbr2,
- .num_entries = ARRAY_SIZE(_icl_combo_phy_ddi_translations_edp_hbr2),
+static const struct intel_ddi_buf_trans adlp_combo_phy_trans_edp_up_to_hbr2 = {
+ .entries = _icl_combo_phy_trans_edp_hbr2,
+ .num_entries = ARRAY_SIZE(_icl_combo_phy_trans_edp_hbr2),
};
-static const union intel_ddi_buf_trans_entry _adlp_dkl_phy_ddi_translations_dp_hbr[] = {
+static const union intel_ddi_buf_trans_entry _adlp_dkl_phy_trans_dp_hbr[] = {
/* VS pre-emp Non-trans mV Pre-emph dB */
{ .dkl = { 0x7, 0x0, 0x01 } }, /* 0 0 400mV 0 dB */
{ .dkl = { 0x5, 0x0, 0x06 } }, /* 0 1 400mV 3.5 dB */
@@ -959,12 +960,12 @@ static const union intel_ddi_buf_trans_entry _adlp_dkl_phy_ddi_translations_dp_h
{ .dkl = { 0x0, 0x0, 0x00 } }, /* 3 0 1200mV 0 dB */
};
-static const struct intel_ddi_buf_trans adlp_dkl_phy_ddi_translations_dp_hbr = {
- .entries = _adlp_dkl_phy_ddi_translations_dp_hbr,
- .num_entries = ARRAY_SIZE(_adlp_dkl_phy_ddi_translations_dp_hbr),
+static const struct intel_ddi_buf_trans adlp_dkl_phy_trans_dp_hbr = {
+ .entries = _adlp_dkl_phy_trans_dp_hbr,
+ .num_entries = ARRAY_SIZE(_adlp_dkl_phy_trans_dp_hbr),
};
-static const union intel_ddi_buf_trans_entry _adlp_dkl_phy_ddi_translations_dp_hbr2_hbr3[] = {
+static const union intel_ddi_buf_trans_entry _adlp_dkl_phy_trans_dp_hbr2_hbr3[] = {
/* VS pre-emp Non-trans mV Pre-emph dB */
{ .dkl = { 0x7, 0x0, 0x00 } }, /* 0 0 400mV 0 dB */
{ .dkl = { 0x5, 0x0, 0x04 } }, /* 0 1 400mV 3.5 dB */
@@ -978,21 +979,64 @@ static const union intel_ddi_buf_trans_entry _adlp_dkl_phy_ddi_translations_dp_h
{ .dkl = { 0x0, 0x0, 0x00 } }, /* 3 0 1200mV 0 dB */
};
-static const struct intel_ddi_buf_trans adlp_dkl_phy_ddi_translations_dp_hbr2_hbr3 = {
- .entries = _adlp_dkl_phy_ddi_translations_dp_hbr2_hbr3,
- .num_entries = ARRAY_SIZE(_adlp_dkl_phy_ddi_translations_dp_hbr2_hbr3),
+static const struct intel_ddi_buf_trans adlp_dkl_phy_trans_dp_hbr2_hbr3 = {
+ .entries = _adlp_dkl_phy_trans_dp_hbr2_hbr3,
+ .num_entries = ARRAY_SIZE(_adlp_dkl_phy_trans_dp_hbr2_hbr3),
+};
+
+static const union intel_ddi_buf_trans_entry _dg2_snps_trans[] = {
+ { .snps = { 26, 0, 0 } }, /* VS 0, pre-emph 0 */
+ { .snps = { 33, 0, 6 } }, /* VS 0, pre-emph 1 */
+ { .snps = { 38, 0, 12 } }, /* VS 0, pre-emph 2 */
+ { .snps = { 43, 0, 19 } }, /* VS 0, pre-emph 3 */
+ { .snps = { 39, 0, 0 } }, /* VS 1, pre-emph 0 */
+ { .snps = { 44, 0, 8 } }, /* VS 1, pre-emph 1 */
+ { .snps = { 47, 0, 15 } }, /* VS 1, pre-emph 2 */
+ { .snps = { 52, 0, 0 } }, /* VS 2, pre-emph 0 */
+ { .snps = { 51, 0, 10 } }, /* VS 2, pre-emph 1 */
+ { .snps = { 62, 0, 0 } }, /* VS 3, pre-emph 0 */
+};
+
+static const struct intel_ddi_buf_trans dg2_snps_trans = {
+ .entries = _dg2_snps_trans,
+ .num_entries = ARRAY_SIZE(_dg2_snps_trans),
+ .hdmi_default_entry = ARRAY_SIZE(_dg2_snps_trans) - 1,
+};
+
+static const union intel_ddi_buf_trans_entry _dg2_snps_trans_uhbr[] = {
+ { .snps = { 62, 0, 0 } }, /* preset 0 */
+ { .snps = { 56, 0, 6 } }, /* preset 1 */
+ { .snps = { 51, 0, 11 } }, /* preset 2 */
+ { .snps = { 48, 0, 14 } }, /* preset 3 */
+ { .snps = { 43, 0, 19 } }, /* preset 4 */
+ { .snps = { 59, 3, 0 } }, /* preset 5 */
+ { .snps = { 53, 3, 6 } }, /* preset 6 */
+ { .snps = { 49, 3, 10 } }, /* preset 7 */
+ { .snps = { 45, 3, 14 } }, /* preset 8 */
+ { .snps = { 42, 3, 17 } }, /* preset 9 */
+ { .snps = { 56, 6, 0 } }, /* preset 10 */
+ { .snps = { 50, 6, 6 } }, /* preset 11 */
+ { .snps = { 47, 6, 9 } }, /* preset 12 */
+ { .snps = { 42, 6, 14 } }, /* preset 13 */
+ { .snps = { 46, 8, 8 } }, /* preset 14 */
+ { .snps = { 56, 3, 3 } }, /* preset 15 */
+};
+
+static const struct intel_ddi_buf_trans dg2_snps_trans_uhbr = {
+ .entries = _dg2_snps_trans_uhbr,
+ .num_entries = ARRAY_SIZE(_dg2_snps_trans_uhbr),
};
bool is_hobl_buf_trans(const struct intel_ddi_buf_trans *table)
{
- return table == &tgl_combo_phy_ddi_translations_edp_hbr2_hobl;
+ return table == &tgl_combo_phy_trans_edp_hbr2_hobl;
}
static const struct intel_ddi_buf_trans *
-intel_get_buf_trans(const struct intel_ddi_buf_trans *ddi_translations, int *num_entries)
+intel_get_buf_trans(const struct intel_ddi_buf_trans *trans, int *num_entries)
{
- *num_entries = ddi_translations->num_entries;
- return ddi_translations;
+ *num_entries = trans->num_entries;
+ return trans;
}
static const struct intel_ddi_buf_trans *
@@ -1001,11 +1045,11 @@ hsw_get_buf_trans(struct intel_encoder *encoder,
int *n_entries)
{
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
- return intel_get_buf_trans(&hsw_ddi_translations_fdi, n_entries);
+ return intel_get_buf_trans(&hsw_trans_fdi, n_entries);
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- return intel_get_buf_trans(&hsw_ddi_translations_hdmi, n_entries);
+ return intel_get_buf_trans(&hsw_trans_hdmi, n_entries);
else
- return intel_get_buf_trans(&hsw_ddi_translations_dp, n_entries);
+ return intel_get_buf_trans(&hsw_trans_dp, n_entries);
}
static const struct intel_ddi_buf_trans *
@@ -1016,14 +1060,14 @@ bdw_get_buf_trans(struct intel_encoder *encoder,
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
- return intel_get_buf_trans(&bdw_ddi_translations_fdi, n_entries);
+ return intel_get_buf_trans(&bdw_trans_fdi, n_entries);
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- return intel_get_buf_trans(&bdw_ddi_translations_hdmi, n_entries);
+ return intel_get_buf_trans(&bdw_trans_hdmi, n_entries);
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) &&
i915->vbt.edp.low_vswing)
- return intel_get_buf_trans(&bdw_ddi_translations_edp, n_entries);
+ return intel_get_buf_trans(&bdw_trans_edp, n_entries);
else
- return intel_get_buf_trans(&bdw_ddi_translations_dp, n_entries);
+ return intel_get_buf_trans(&bdw_trans_dp, n_entries);
}
static int skl_buf_trans_num_entries(enum port port, int n_entries)
@@ -1037,12 +1081,12 @@ static int skl_buf_trans_num_entries(enum port port, int n_entries)
static const struct intel_ddi_buf_trans *
_skl_get_buf_trans_dp(struct intel_encoder *encoder,
- const struct intel_ddi_buf_trans *ddi_translations,
+ const struct intel_ddi_buf_trans *trans,
int *n_entries)
{
- ddi_translations = intel_get_buf_trans(ddi_translations, n_entries);
+ trans = intel_get_buf_trans(trans, n_entries);
*n_entries = skl_buf_trans_num_entries(encoder->port, *n_entries);
- return ddi_translations;
+ return trans;
}
static const struct intel_ddi_buf_trans *
@@ -1053,12 +1097,12 @@ skl_y_get_buf_trans(struct intel_encoder *encoder,
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- return intel_get_buf_trans(&skl_y_ddi_translations_hdmi, n_entries);
+ return intel_get_buf_trans(&skl_y_trans_hdmi, n_entries);
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) &&
i915->vbt.edp.low_vswing)
- return _skl_get_buf_trans_dp(encoder, &skl_y_ddi_translations_edp, n_entries);
+ return _skl_get_buf_trans_dp(encoder, &skl_y_trans_edp, n_entries);
else
- return _skl_get_buf_trans_dp(encoder, &skl_y_ddi_translations_dp, n_entries);
+ return _skl_get_buf_trans_dp(encoder, &skl_y_trans_dp, n_entries);
}
static const struct intel_ddi_buf_trans *
@@ -1069,12 +1113,12 @@ skl_u_get_buf_trans(struct intel_encoder *encoder,
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- return intel_get_buf_trans(&skl_ddi_translations_hdmi, n_entries);
+ return intel_get_buf_trans(&skl_trans_hdmi, n_entries);
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) &&
i915->vbt.edp.low_vswing)
- return _skl_get_buf_trans_dp(encoder, &skl_u_ddi_translations_edp, n_entries);
+ return _skl_get_buf_trans_dp(encoder, &skl_u_trans_edp, n_entries);
else
- return _skl_get_buf_trans_dp(encoder, &skl_u_ddi_translations_dp, n_entries);
+ return _skl_get_buf_trans_dp(encoder, &skl_u_trans_dp, n_entries);
}
static const struct intel_ddi_buf_trans *
@@ -1085,12 +1129,12 @@ skl_get_buf_trans(struct intel_encoder *encoder,
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- return intel_get_buf_trans(&skl_ddi_translations_hdmi, n_entries);
+ return intel_get_buf_trans(&skl_trans_hdmi, n_entries);
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) &&
i915->vbt.edp.low_vswing)
- return _skl_get_buf_trans_dp(encoder, &skl_ddi_translations_edp, n_entries);
+ return _skl_get_buf_trans_dp(encoder, &skl_trans_edp, n_entries);
else
- return _skl_get_buf_trans_dp(encoder, &skl_ddi_translations_dp, n_entries);
+ return _skl_get_buf_trans_dp(encoder, &skl_trans_dp, n_entries);
}
static const struct intel_ddi_buf_trans *
@@ -1101,12 +1145,12 @@ kbl_y_get_buf_trans(struct intel_encoder *encoder,
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- return intel_get_buf_trans(&skl_y_ddi_translations_hdmi, n_entries);
+ return intel_get_buf_trans(&skl_y_trans_hdmi, n_entries);
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) &&
i915->vbt.edp.low_vswing)
- return _skl_get_buf_trans_dp(encoder, &skl_y_ddi_translations_edp, n_entries);
+ return _skl_get_buf_trans_dp(encoder, &skl_y_trans_edp, n_entries);
else
- return _skl_get_buf_trans_dp(encoder, &kbl_y_ddi_translations_dp, n_entries);
+ return _skl_get_buf_trans_dp(encoder, &kbl_y_trans_dp, n_entries);
}
static const struct intel_ddi_buf_trans *
@@ -1117,12 +1161,12 @@ kbl_u_get_buf_trans(struct intel_encoder *encoder,
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- return intel_get_buf_trans(&skl_ddi_translations_hdmi, n_entries);
+ return intel_get_buf_trans(&skl_trans_hdmi, n_entries);
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) &&
i915->vbt.edp.low_vswing)
- return _skl_get_buf_trans_dp(encoder, &skl_u_ddi_translations_edp, n_entries);
+ return _skl_get_buf_trans_dp(encoder, &skl_u_trans_edp, n_entries);
else
- return _skl_get_buf_trans_dp(encoder, &kbl_u_ddi_translations_dp, n_entries);
+ return _skl_get_buf_trans_dp(encoder, &kbl_u_trans_dp, n_entries);
}
static const struct intel_ddi_buf_trans *
@@ -1133,12 +1177,12 @@ kbl_get_buf_trans(struct intel_encoder *encoder,
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- return intel_get_buf_trans(&skl_ddi_translations_hdmi, n_entries);
+ return intel_get_buf_trans(&skl_trans_hdmi, n_entries);
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) &&
i915->vbt.edp.low_vswing)
- return _skl_get_buf_trans_dp(encoder, &skl_ddi_translations_edp, n_entries);
+ return _skl_get_buf_trans_dp(encoder, &skl_trans_edp, n_entries);
else
- return _skl_get_buf_trans_dp(encoder, &kbl_ddi_translations_dp, n_entries);
+ return _skl_get_buf_trans_dp(encoder, &kbl_trans_dp, n_entries);
}
static const struct intel_ddi_buf_trans *
@@ -1149,12 +1193,12 @@ bxt_get_buf_trans(struct intel_encoder *encoder,
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- return intel_get_buf_trans(&bxt_ddi_translations_hdmi, n_entries);
+ return intel_get_buf_trans(&bxt_trans_hdmi, n_entries);
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) &&
i915->vbt.edp.low_vswing)
- return intel_get_buf_trans(&bxt_ddi_translations_edp, n_entries);
+ return intel_get_buf_trans(&bxt_trans_edp, n_entries);
else
- return intel_get_buf_trans(&bxt_ddi_translations_dp, n_entries);
+ return intel_get_buf_trans(&bxt_trans_dp, n_entries);
}
static const struct intel_ddi_buf_trans *
@@ -1162,7 +1206,7 @@ icl_get_combo_buf_trans_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
int *n_entries)
{
- return intel_get_buf_trans(&icl_combo_phy_ddi_translations_dp_hbr2_edp_hbr3,
+ return intel_get_buf_trans(&icl_combo_phy_trans_dp_hbr2_edp_hbr3,
n_entries);
}
@@ -1174,10 +1218,10 @@ icl_get_combo_buf_trans_edp(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
if (crtc_state->port_clock > 540000) {
- return intel_get_buf_trans(&icl_combo_phy_ddi_translations_dp_hbr2_edp_hbr3,
+ return intel_get_buf_trans(&icl_combo_phy_trans_dp_hbr2_edp_hbr3,
n_entries);
} else if (dev_priv->vbt.edp.low_vswing) {
- return intel_get_buf_trans(&icl_combo_phy_ddi_translations_edp_hbr2,
+ return intel_get_buf_trans(&icl_combo_phy_trans_edp_hbr2,
n_entries);
}
@@ -1190,7 +1234,7 @@ icl_get_combo_buf_trans(struct intel_encoder *encoder,
int *n_entries)
{
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- return intel_get_buf_trans(&icl_combo_phy_ddi_translations_hdmi, n_entries);
+ return intel_get_buf_trans(&icl_combo_phy_trans_hdmi, n_entries);
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
return icl_get_combo_buf_trans_edp(encoder, crtc_state, n_entries);
else
@@ -1203,10 +1247,10 @@ icl_get_mg_buf_trans_dp(struct intel_encoder *encoder,
int *n_entries)
{
if (crtc_state->port_clock > 270000) {
- return intel_get_buf_trans(&icl_mg_phy_ddi_translations_hbr2_hbr3,
+ return intel_get_buf_trans(&icl_mg_phy_trans_hbr2_hbr3,
n_entries);
} else {
- return intel_get_buf_trans(&icl_mg_phy_ddi_translations_rbr_hbr,
+ return intel_get_buf_trans(&icl_mg_phy_trans_rbr_hbr,
n_entries);
}
}
@@ -1217,7 +1261,7 @@ icl_get_mg_buf_trans(struct intel_encoder *encoder,
int *n_entries)
{
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- return intel_get_buf_trans(&icl_mg_phy_ddi_translations_hdmi, n_entries);
+ return intel_get_buf_trans(&icl_mg_phy_trans_hdmi, n_entries);
else
return icl_get_mg_buf_trans_dp(encoder, crtc_state, n_entries);
}
@@ -1228,9 +1272,9 @@ ehl_get_combo_buf_trans_edp(struct intel_encoder *encoder,
int *n_entries)
{
if (crtc_state->port_clock > 270000)
- return intel_get_buf_trans(&ehl_combo_phy_ddi_translations_edp_hbr2, n_entries);
+ return intel_get_buf_trans(&ehl_combo_phy_trans_edp_hbr2, n_entries);
else
- return intel_get_buf_trans(&icl_combo_phy_ddi_translations_edp_hbr2, n_entries);
+ return intel_get_buf_trans(&icl_combo_phy_trans_edp_hbr2, n_entries);
}
static const struct intel_ddi_buf_trans *
@@ -1241,12 +1285,12 @@ ehl_get_combo_buf_trans(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- return intel_get_buf_trans(&icl_combo_phy_ddi_translations_hdmi, n_entries);
+ return intel_get_buf_trans(&icl_combo_phy_trans_hdmi, n_entries);
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) &&
dev_priv->vbt.edp.low_vswing)
return ehl_get_combo_buf_trans_edp(encoder, crtc_state, n_entries);
else
- return intel_get_buf_trans(&ehl_combo_phy_ddi_translations_dp, n_entries);
+ return intel_get_buf_trans(&ehl_combo_phy_trans_dp, n_entries);
}
static const struct intel_ddi_buf_trans *
@@ -1255,9 +1299,9 @@ jsl_get_combo_buf_trans_edp(struct intel_encoder *encoder,
int *n_entries)
{
if (crtc_state->port_clock > 270000)
- return intel_get_buf_trans(&jsl_combo_phy_ddi_translations_edp_hbr2, n_entries);
+ return intel_get_buf_trans(&jsl_combo_phy_trans_edp_hbr2, n_entries);
else
- return intel_get_buf_trans(&jsl_combo_phy_ddi_translations_edp_hbr, n_entries);
+ return intel_get_buf_trans(&jsl_combo_phy_trans_edp_hbr, n_entries);
}
static const struct intel_ddi_buf_trans *
@@ -1268,12 +1312,12 @@ jsl_get_combo_buf_trans(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- return intel_get_buf_trans(&icl_combo_phy_ddi_translations_hdmi, n_entries);
+ return intel_get_buf_trans(&icl_combo_phy_trans_hdmi, n_entries);
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP) &&
dev_priv->vbt.edp.low_vswing)
return jsl_get_combo_buf_trans_edp(encoder, crtc_state, n_entries);
else
- return intel_get_buf_trans(&icl_combo_phy_ddi_translations_dp_hbr2_edp_hbr3, n_entries);
+ return intel_get_buf_trans(&icl_combo_phy_trans_dp_hbr2_edp_hbr3, n_entries);
}
static const struct intel_ddi_buf_trans *
@@ -1285,14 +1329,14 @@ tgl_get_combo_buf_trans_dp(struct intel_encoder *encoder,
if (crtc_state->port_clock > 270000) {
if (IS_TGL_U(dev_priv) || IS_TGL_Y(dev_priv)) {
- return intel_get_buf_trans(&tgl_uy_combo_phy_ddi_translations_dp_hbr2,
+ return intel_get_buf_trans(&tgl_uy_combo_phy_trans_dp_hbr2,
n_entries);
} else {
- return intel_get_buf_trans(&tgl_combo_phy_ddi_translations_dp_hbr2,
+ return intel_get_buf_trans(&tgl_combo_phy_trans_dp_hbr2,
n_entries);
}
} else {
- return intel_get_buf_trans(&tgl_combo_phy_ddi_translations_dp_hbr,
+ return intel_get_buf_trans(&tgl_combo_phy_trans_dp_hbr,
n_entries);
}
}
@@ -1306,13 +1350,13 @@ tgl_get_combo_buf_trans_edp(struct intel_encoder *encoder,
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
if (crtc_state->port_clock > 540000) {
- return intel_get_buf_trans(&icl_combo_phy_ddi_translations_dp_hbr2_edp_hbr3,
+ return intel_get_buf_trans(&icl_combo_phy_trans_dp_hbr2_edp_hbr3,
n_entries);
} else if (dev_priv->vbt.edp.hobl && !intel_dp->hobl_failed) {
- return intel_get_buf_trans(&tgl_combo_phy_ddi_translations_edp_hbr2_hobl,
+ return intel_get_buf_trans(&tgl_combo_phy_trans_edp_hbr2_hobl,
n_entries);
} else if (dev_priv->vbt.edp.low_vswing) {
- return intel_get_buf_trans(&icl_combo_phy_ddi_translations_edp_hbr2,
+ return intel_get_buf_trans(&icl_combo_phy_trans_edp_hbr2,
n_entries);
}
@@ -1325,7 +1369,7 @@ tgl_get_combo_buf_trans(struct intel_encoder *encoder,
int *n_entries)
{
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- return intel_get_buf_trans(&icl_combo_phy_ddi_translations_hdmi, n_entries);
+ return intel_get_buf_trans(&icl_combo_phy_trans_hdmi, n_entries);
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
return tgl_get_combo_buf_trans_edp(encoder, crtc_state, n_entries);
else
@@ -1338,10 +1382,10 @@ dg1_get_combo_buf_trans_dp(struct intel_encoder *encoder,
int *n_entries)
{
if (crtc_state->port_clock > 270000)
- return intel_get_buf_trans(&dg1_combo_phy_ddi_translations_dp_hbr2_hbr3,
+ return intel_get_buf_trans(&dg1_combo_phy_trans_dp_hbr2_hbr3,
n_entries);
else
- return intel_get_buf_trans(&dg1_combo_phy_ddi_translations_dp_rbr_hbr,
+ return intel_get_buf_trans(&dg1_combo_phy_trans_dp_rbr_hbr,
n_entries);
}
@@ -1354,13 +1398,13 @@ dg1_get_combo_buf_trans_edp(struct intel_encoder *encoder,
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
if (crtc_state->port_clock > 540000)
- return intel_get_buf_trans(&icl_combo_phy_ddi_translations_dp_hbr2_edp_hbr3,
+ return intel_get_buf_trans(&icl_combo_phy_trans_dp_hbr2_edp_hbr3,
n_entries);
else if (dev_priv->vbt.edp.hobl && !intel_dp->hobl_failed)
- return intel_get_buf_trans(&tgl_combo_phy_ddi_translations_edp_hbr2_hobl,
+ return intel_get_buf_trans(&tgl_combo_phy_trans_edp_hbr2_hobl,
n_entries);
else if (dev_priv->vbt.edp.low_vswing)
- return intel_get_buf_trans(&icl_combo_phy_ddi_translations_edp_hbr2,
+ return intel_get_buf_trans(&icl_combo_phy_trans_edp_hbr2,
n_entries);
else
return dg1_get_combo_buf_trans_dp(encoder, crtc_state, n_entries);
@@ -1372,7 +1416,7 @@ dg1_get_combo_buf_trans(struct intel_encoder *encoder,
int *n_entries)
{
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- return intel_get_buf_trans(&icl_combo_phy_ddi_translations_hdmi, n_entries);
+ return intel_get_buf_trans(&icl_combo_phy_trans_hdmi, n_entries);
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
return dg1_get_combo_buf_trans_edp(encoder, crtc_state, n_entries);
else
@@ -1385,9 +1429,9 @@ rkl_get_combo_buf_trans_dp(struct intel_encoder *encoder,
int *n_entries)
{
if (crtc_state->port_clock > 270000)
- return intel_get_buf_trans(&rkl_combo_phy_ddi_translations_dp_hbr2_hbr3, n_entries);
+ return intel_get_buf_trans(&rkl_combo_phy_trans_dp_hbr2_hbr3, n_entries);
else
- return intel_get_buf_trans(&rkl_combo_phy_ddi_translations_dp_hbr, n_entries);
+ return intel_get_buf_trans(&rkl_combo_phy_trans_dp_hbr, n_entries);
}
static const struct intel_ddi_buf_trans *
@@ -1399,13 +1443,13 @@ rkl_get_combo_buf_trans_edp(struct intel_encoder *encoder,
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
if (crtc_state->port_clock > 540000) {
- return intel_get_buf_trans(&icl_combo_phy_ddi_translations_dp_hbr2_edp_hbr3,
+ return intel_get_buf_trans(&icl_combo_phy_trans_dp_hbr2_edp_hbr3,
n_entries);
} else if (dev_priv->vbt.edp.hobl && !intel_dp->hobl_failed) {
- return intel_get_buf_trans(&tgl_combo_phy_ddi_translations_edp_hbr2_hobl,
+ return intel_get_buf_trans(&tgl_combo_phy_trans_edp_hbr2_hobl,
n_entries);
} else if (dev_priv->vbt.edp.low_vswing) {
- return intel_get_buf_trans(&icl_combo_phy_ddi_translations_edp_hbr2,
+ return intel_get_buf_trans(&icl_combo_phy_trans_edp_hbr2,
n_entries);
}
@@ -1418,7 +1462,7 @@ rkl_get_combo_buf_trans(struct intel_encoder *encoder,
int *n_entries)
{
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- return intel_get_buf_trans(&icl_combo_phy_ddi_translations_hdmi, n_entries);
+ return intel_get_buf_trans(&icl_combo_phy_trans_hdmi, n_entries);
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
return rkl_get_combo_buf_trans_edp(encoder, crtc_state, n_entries);
else
@@ -1431,9 +1475,9 @@ adls_get_combo_buf_trans_dp(struct intel_encoder *encoder,
int *n_entries)
{
if (crtc_state->port_clock > 270000)
- return intel_get_buf_trans(&adls_combo_phy_ddi_translations_dp_hbr2_hbr3, n_entries);
+ return intel_get_buf_trans(&adls_combo_phy_trans_dp_hbr2_hbr3, n_entries);
else
- return intel_get_buf_trans(&tgl_combo_phy_ddi_translations_dp_hbr, n_entries);
+ return intel_get_buf_trans(&tgl_combo_phy_trans_dp_hbr, n_entries);
}
static const struct intel_ddi_buf_trans *
@@ -1445,11 +1489,11 @@ adls_get_combo_buf_trans_edp(struct intel_encoder *encoder,
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
if (crtc_state->port_clock > 540000)
- return intel_get_buf_trans(&adls_combo_phy_ddi_translations_edp_hbr3, n_entries);
+ return intel_get_buf_trans(&adls_combo_phy_trans_edp_hbr3, n_entries);
else if (i915->vbt.edp.hobl && !intel_dp->hobl_failed)
- return intel_get_buf_trans(&tgl_combo_phy_ddi_translations_edp_hbr2_hobl, n_entries);
+ return intel_get_buf_trans(&tgl_combo_phy_trans_edp_hbr2_hobl, n_entries);
else if (i915->vbt.edp.low_vswing)
- return intel_get_buf_trans(&adls_combo_phy_ddi_translations_edp_hbr2, n_entries);
+ return intel_get_buf_trans(&adls_combo_phy_trans_edp_hbr2, n_entries);
else
return adls_get_combo_buf_trans_dp(encoder, crtc_state, n_entries);
}
@@ -1460,7 +1504,7 @@ adls_get_combo_buf_trans(struct intel_encoder *encoder,
int *n_entries)
{
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- return intel_get_buf_trans(&icl_combo_phy_ddi_translations_hdmi, n_entries);
+ return intel_get_buf_trans(&icl_combo_phy_trans_hdmi, n_entries);
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
return adls_get_combo_buf_trans_edp(encoder, crtc_state, n_entries);
else
@@ -1473,9 +1517,9 @@ adlp_get_combo_buf_trans_dp(struct intel_encoder *encoder,
int *n_entries)
{
if (crtc_state->port_clock > 270000)
- return intel_get_buf_trans(&adlp_combo_phy_ddi_translations_dp_hbr2_hbr3, n_entries);
+ return intel_get_buf_trans(&adlp_combo_phy_trans_dp_hbr2_hbr3, n_entries);
else
- return intel_get_buf_trans(&adlp_combo_phy_ddi_translations_dp_hbr, n_entries);
+ return intel_get_buf_trans(&adlp_combo_phy_trans_dp_hbr, n_entries);
}
static const struct intel_ddi_buf_trans *
@@ -1487,13 +1531,13 @@ adlp_get_combo_buf_trans_edp(struct intel_encoder *encoder,
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
if (crtc_state->port_clock > 540000) {
- return intel_get_buf_trans(&adlp_combo_phy_ddi_translations_edp_hbr3,
+ return intel_get_buf_trans(&adlp_combo_phy_trans_edp_hbr3,
n_entries);
} else if (dev_priv->vbt.edp.hobl && !intel_dp->hobl_failed) {
- return intel_get_buf_trans(&tgl_combo_phy_ddi_translations_edp_hbr2_hobl,
+ return intel_get_buf_trans(&tgl_combo_phy_trans_edp_hbr2_hobl,
n_entries);
} else if (dev_priv->vbt.edp.low_vswing) {
- return intel_get_buf_trans(&adlp_combo_phy_ddi_translations_edp_up_to_hbr2,
+ return intel_get_buf_trans(&adlp_combo_phy_trans_edp_up_to_hbr2,
n_entries);
}
@@ -1506,7 +1550,7 @@ adlp_get_combo_buf_trans(struct intel_encoder *encoder,
int *n_entries)
{
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- return intel_get_buf_trans(&adlp_combo_phy_ddi_translations_hdmi, n_entries);
+ return intel_get_buf_trans(&adlp_combo_phy_trans_hdmi, n_entries);
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
return adlp_get_combo_buf_trans_edp(encoder, crtc_state, n_entries);
else
@@ -1519,10 +1563,10 @@ tgl_get_dkl_buf_trans_dp(struct intel_encoder *encoder,
int *n_entries)
{
if (crtc_state->port_clock > 270000) {
- return intel_get_buf_trans(&tgl_dkl_phy_ddi_translations_dp_hbr2,
+ return intel_get_buf_trans(&tgl_dkl_phy_trans_dp_hbr2,
n_entries);
} else {
- return intel_get_buf_trans(&tgl_dkl_phy_ddi_translations_dp_hbr,
+ return intel_get_buf_trans(&tgl_dkl_phy_trans_dp_hbr,
n_entries);
}
}
@@ -1533,7 +1577,7 @@ tgl_get_dkl_buf_trans(struct intel_encoder *encoder,
int *n_entries)
{
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- return intel_get_buf_trans(&tgl_dkl_phy_ddi_translations_hdmi, n_entries);
+ return intel_get_buf_trans(&tgl_dkl_phy_trans_hdmi, n_entries);
else
return tgl_get_dkl_buf_trans_dp(encoder, crtc_state, n_entries);
}
@@ -1544,10 +1588,10 @@ adlp_get_dkl_buf_trans_dp(struct intel_encoder *encoder,
int *n_entries)
{
if (crtc_state->port_clock > 270000) {
- return intel_get_buf_trans(&adlp_dkl_phy_ddi_translations_dp_hbr2_hbr3,
+ return intel_get_buf_trans(&adlp_dkl_phy_trans_dp_hbr2_hbr3,
n_entries);
} else {
- return intel_get_buf_trans(&adlp_dkl_phy_ddi_translations_dp_hbr,
+ return intel_get_buf_trans(&adlp_dkl_phy_trans_dp_hbr,
n_entries);
}
}
@@ -1558,29 +1602,21 @@ adlp_get_dkl_buf_trans(struct intel_encoder *encoder,
int *n_entries)
{
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
- return intel_get_buf_trans(&tgl_dkl_phy_ddi_translations_hdmi, n_entries);
+ return intel_get_buf_trans(&tgl_dkl_phy_trans_hdmi, n_entries);
else
return adlp_get_dkl_buf_trans_dp(encoder, crtc_state, n_entries);
}
-int intel_ddi_hdmi_num_entries(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int *default_entry)
+static const struct intel_ddi_buf_trans *
+dg2_get_snps_buf_trans(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ int *n_entries)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- const struct intel_ddi_buf_trans *ddi_translations;
- int n_entries;
-
- ddi_translations = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
-
- if (drm_WARN_ON(&dev_priv->drm, !ddi_translations)) {
- *default_entry = 0;
- return 0;
- }
-
- *default_entry = ddi_translations->hdmi_default_entry;
-
- return n_entries;
+ if (intel_crtc_has_dp_encoder(crtc_state) &&
+ intel_dp_is_uhbr(crtc_state))
+ return intel_get_buf_trans(&dg2_snps_trans_uhbr, n_entries);
+ else
+ return intel_get_buf_trans(&dg2_snps_trans, n_entries);
}
void intel_ddi_buf_trans_init(struct intel_encoder *encoder)
@@ -1588,7 +1624,9 @@ void intel_ddi_buf_trans_init(struct intel_encoder *encoder)
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
enum phy phy = intel_port_to_phy(i915, encoder->port);
- if (IS_ALDERLAKE_P(i915)) {
+ if (IS_DG2(i915)) {
+ encoder->get_buf_trans = dg2_get_snps_buf_trans;
+ } else if (IS_ALDERLAKE_P(i915)) {
if (intel_phy_is_combo(i915, phy))
encoder->get_buf_trans = adlp_get_combo_buf_trans;
else
diff --git a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h
index 2acd720f9d4f..2133984a572b 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h
+++ b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.h
@@ -34,15 +34,21 @@ struct icl_ddi_buf_trans {
};
struct icl_mg_phy_ddi_buf_trans {
- u32 cri_txdeemph_override_11_6;
- u32 cri_txdeemph_override_5_0;
- u32 cri_txdeemph_override_17_12;
+ u8 cri_txdeemph_override_11_6;
+ u8 cri_txdeemph_override_5_0;
+ u8 cri_txdeemph_override_17_12;
};
struct tgl_dkl_phy_ddi_buf_trans {
- u32 dkl_vswing_control;
- u32 dkl_preshoot_control;
- u32 dkl_de_emphasis_control;
+ u8 vswing;
+ u8 preshoot;
+ u8 de_emphasis;
+};
+
+struct dg2_snps_phy_buf_trans {
+ u8 vswing;
+ u8 pre_cursor;
+ u8 post_cursor;
};
union intel_ddi_buf_trans_entry {
@@ -51,6 +57,7 @@ union intel_ddi_buf_trans_entry {
struct icl_ddi_buf_trans icl;
struct icl_mg_phy_ddi_buf_trans mg;
struct tgl_dkl_phy_ddi_buf_trans dkl;
+ struct dg2_snps_phy_buf_trans snps;
};
struct intel_ddi_buf_trans {
@@ -61,10 +68,6 @@ struct intel_ddi_buf_trans {
bool is_hobl_buf_trans(const struct intel_ddi_buf_trans *table);
-int intel_ddi_hdmi_num_entries(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- int *default_entry);
-
void intel_ddi_buf_trans_init(struct intel_encoder *encoder);
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 17f44ffea586..ff598b6cd953 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -52,6 +52,7 @@
#include "display/intel_dp_mst.h"
#include "display/intel_dpll.h"
#include "display/intel_dpll_mgr.h"
+#include "display/intel_drrs.h"
#include "display/intel_dsi.h"
#include "display/intel_dvo.h"
#include "display/intel_fb.h"
@@ -67,9 +68,10 @@
#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_object.h"
-#include "gt/intel_rps.h"
#include "gt/gen8_ppgtt.h"
+#include "pxp/intel_pxp.h"
+
#include "g4x_dp.h"
#include "g4x_hdmi.h"
#include "i915_drv.h"
@@ -84,35 +86,37 @@
#include "intel_display_types.h"
#include "intel_dmc.h"
#include "intel_dp_link_training.h"
+#include "intel_dpt.h"
#include "intel_fbc.h"
-#include "intel_fdi.h"
#include "intel_fbdev.h"
+#include "intel_fdi.h"
#include "intel_fifo_underrun.h"
#include "intel_frontbuffer.h"
#include "intel_hdcp.h"
#include "intel_hotplug.h"
#include "intel_overlay.h"
+#include "intel_panel.h"
+#include "intel_pcode.h"
#include "intel_pipe_crc.h"
+#include "intel_plane_initial.h"
#include "intel_pm.h"
#include "intel_pps.h"
#include "intel_psr.h"
#include "intel_quirks.h"
-#include "intel_sideband.h"
+#include "intel_sbi.h"
#include "intel_sprite.h"
#include "intel_tc.h"
#include "intel_vga.h"
#include "i9xx_plane.h"
#include "skl_scaler.h"
#include "skl_universal_plane.h"
+#include "vlv_sideband.h"
static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config);
static void ilk_pch_clock_get(struct intel_crtc *crtc,
struct intel_crtc_state *pipe_config);
-static int intel_framebuffer_init(struct intel_framebuffer *ifb,
- struct drm_i915_gem_object *obj,
- struct drm_mode_fb_cmd2 *mode_cmd);
static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state);
static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
@@ -120,186 +124,105 @@ static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_sta
const struct intel_link_m_n *m2_n2);
static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state);
-static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state);
+static void hsw_set_transconf(const struct intel_crtc_state *crtc_state);
static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state);
static void intel_modeset_setup_hw_state(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx);
-struct i915_dpt {
- struct i915_address_space vm;
-
- struct drm_i915_gem_object *obj;
- struct i915_vma *vma;
- void __iomem *iomem;
-};
-
-#define i915_is_dpt(vm) ((vm)->is_dpt)
-
-static inline struct i915_dpt *
-i915_vm_to_dpt(struct i915_address_space *vm)
-{
- BUILD_BUG_ON(offsetof(struct i915_dpt, vm));
- GEM_BUG_ON(!i915_is_dpt(vm));
- return container_of(vm, struct i915_dpt, vm);
-}
-
-#define dpt_total_entries(dpt) ((dpt)->vm.total >> PAGE_SHIFT)
-
-static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
-{
- writeq(pte, addr);
-}
-
-static void dpt_insert_page(struct i915_address_space *vm,
- dma_addr_t addr,
- u64 offset,
- enum i915_cache_level level,
- u32 flags)
+/**
+ * intel_update_watermarks - update FIFO watermark values based on current modes
+ * @dev_priv: i915 device
+ *
+ * Calculate watermark values for the various WM regs based on current mode
+ * and plane configuration.
+ *
+ * There are several cases to deal with here:
+ * - normal (i.e. non-self-refresh)
+ * - self-refresh (SR) mode
+ * - lines are large relative to FIFO size (buffer can hold up to 2)
+ * - lines are small relative to FIFO size (buffer can hold more than 2
+ * lines), so need to account for TLB latency
+ *
+ * The normal calculation is:
+ * watermark = dotclock * bytes per pixel * latency
+ * where latency is platform & configuration dependent (we assume pessimal
+ * values here).
+ *
+ * The SR calculation is:
+ * watermark = (trunc(latency/line time)+1) * surface width *
+ * bytes per pixel
+ * where
+ * line time = htotal / dotclock
+ * surface width = hdisplay for normal plane and 64 for cursor
+ * and latency is assumed to be high, as above.
+ *
+ * The final value programmed to the register should always be rounded up,
+ * and include an extra 2 entries to account for clock crossings.
+ *
+ * We don't use the sprite, so we can ignore that. And on Crestline we have
+ * to set the non-SR watermarks to 8.
+ */
+static void intel_update_watermarks(struct drm_i915_private *dev_priv)
{
- struct i915_dpt *dpt = i915_vm_to_dpt(vm);
- gen8_pte_t __iomem *base = dpt->iomem;
-
- gen8_set_pte(base + offset / I915_GTT_PAGE_SIZE,
- vm->pte_encode(addr, level, flags));
+ if (dev_priv->wm_disp->update_wm)
+ dev_priv->wm_disp->update_wm(dev_priv);
}
-static void dpt_insert_entries(struct i915_address_space *vm,
- struct i915_vma *vma,
- enum i915_cache_level level,
- u32 flags)
-{
- struct i915_dpt *dpt = i915_vm_to_dpt(vm);
- gen8_pte_t __iomem *base = dpt->iomem;
- const gen8_pte_t pte_encode = vm->pte_encode(0, level, flags);
- struct sgt_iter sgt_iter;
- dma_addr_t addr;
- int i;
-
- /*
- * Note that we ignore PTE_READ_ONLY here. The caller must be careful
- * not to allow the user to override access to a read only page.
- */
-
- i = vma->node.start / I915_GTT_PAGE_SIZE;
- for_each_sgt_daddr(addr, sgt_iter, vma->pages)
- gen8_set_pte(&base[i++], pte_encode | addr);
-}
-
-static void dpt_clear_range(struct i915_address_space *vm,
- u64 start, u64 length)
+static int intel_compute_pipe_wm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ if (dev_priv->wm_disp->compute_pipe_wm)
+ return dev_priv->wm_disp->compute_pipe_wm(state, crtc);
+ return 0;
}
-static void dpt_bind_vma(struct i915_address_space *vm,
- struct i915_vm_pt_stash *stash,
- struct i915_vma *vma,
- enum i915_cache_level cache_level,
- u32 flags)
+static int intel_compute_intermediate_wm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct drm_i915_gem_object *obj = vma->obj;
- u32 pte_flags;
-
- /* Applicable to VLV (gen8+ do not support RO in the GGTT) */
- pte_flags = 0;
- if (vma->vm->has_read_only && i915_gem_object_is_readonly(obj))
- pte_flags |= PTE_READ_ONLY;
- if (i915_gem_object_is_lmem(obj))
- pte_flags |= PTE_LM;
-
- vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
-
- vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
-
- /*
- * Without aliasing PPGTT there's no difference between
- * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
- * upgrade to both bound if we bind either to avoid double-binding.
- */
- atomic_or(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND, &vma->flags);
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ if (!dev_priv->wm_disp->compute_intermediate_wm)
+ return 0;
+ if (drm_WARN_ON(&dev_priv->drm,
+ !dev_priv->wm_disp->compute_pipe_wm))
+ return 0;
+ return dev_priv->wm_disp->compute_intermediate_wm(state, crtc);
}
-static void dpt_unbind_vma(struct i915_address_space *vm, struct i915_vma *vma)
+static bool intel_initial_watermarks(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- vm->clear_range(vm, vma->node.start, vma->size);
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ if (dev_priv->wm_disp->initial_watermarks) {
+ dev_priv->wm_disp->initial_watermarks(state, crtc);
+ return true;
+ }
+ return false;
}
-static void dpt_cleanup(struct i915_address_space *vm)
+static void intel_atomic_update_watermarks(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct i915_dpt *dpt = i915_vm_to_dpt(vm);
-
- i915_gem_object_put(dpt->obj);
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ if (dev_priv->wm_disp->atomic_update_watermarks)
+ dev_priv->wm_disp->atomic_update_watermarks(state, crtc);
}
-static struct i915_address_space *
-intel_dpt_create(struct intel_framebuffer *fb)
+static void intel_optimize_watermarks(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
- struct drm_gem_object *obj = &intel_fb_obj(&fb->base)->base;
- struct drm_i915_private *i915 = to_i915(obj->dev);
- struct drm_i915_gem_object *dpt_obj;
- struct i915_address_space *vm;
- struct i915_dpt *dpt;
- size_t size;
- int ret;
-
- if (intel_fb_needs_pot_stride_remap(fb))
- size = intel_remapped_info_size(&fb->remapped_view.gtt.remapped);
- else
- size = DIV_ROUND_UP_ULL(obj->size, I915_GTT_PAGE_SIZE);
-
- size = round_up(size * sizeof(gen8_pte_t), I915_GTT_PAGE_SIZE);
-
- if (HAS_LMEM(i915))
- dpt_obj = i915_gem_object_create_lmem(i915, size, 0);
- else
- dpt_obj = i915_gem_object_create_stolen(i915, size);
- if (IS_ERR(dpt_obj))
- return ERR_CAST(dpt_obj);
-
- ret = i915_gem_object_set_cache_level(dpt_obj, I915_CACHE_NONE);
- if (ret) {
- i915_gem_object_put(dpt_obj);
- return ERR_PTR(ret);
- }
-
- dpt = kzalloc(sizeof(*dpt), GFP_KERNEL);
- if (!dpt) {
- i915_gem_object_put(dpt_obj);
- return ERR_PTR(-ENOMEM);
- }
-
- vm = &dpt->vm;
-
- vm->gt = &i915->gt;
- vm->i915 = i915;
- vm->dma = i915->drm.dev;
- vm->total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
- vm->is_dpt = true;
-
- i915_address_space_init(vm, VM_CLASS_DPT);
-
- vm->insert_page = dpt_insert_page;
- vm->clear_range = dpt_clear_range;
- vm->insert_entries = dpt_insert_entries;
- vm->cleanup = dpt_cleanup;
-
- vm->vma_ops.bind_vma = dpt_bind_vma;
- vm->vma_ops.unbind_vma = dpt_unbind_vma;
- vm->vma_ops.set_pages = ggtt_set_pages;
- vm->vma_ops.clear_pages = clear_pages;
-
- vm->pte_encode = gen8_ggtt_pte_encode;
-
- dpt->obj = dpt_obj;
-
- return &dpt->vm;
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ if (dev_priv->wm_disp->optimize_watermarks)
+ dev_priv->wm_disp->optimize_watermarks(state, crtc);
}
-static void intel_dpt_destroy(struct i915_address_space *vm)
+static int intel_compute_global_watermarks(struct intel_atomic_state *state)
{
- struct i915_dpt *dpt = i915_vm_to_dpt(vm);
-
- i915_vm_close(&dpt->vm);
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ if (dev_priv->wm_disp->compute_global_watermarks)
+ return dev_priv->wm_disp->compute_global_watermarks(state);
+ return 0;
}
/* returns HPLL frequency in kHz */
@@ -359,6 +282,12 @@ static void intel_update_czclk(struct drm_i915_private *dev_priv)
dev_priv->czclk_freq);
}
+static bool is_hdr_mode(const struct intel_crtc_state *crtc_state)
+{
+ return (crtc_state->active_planes &
+ ~(icl_hdr_plane_mask() | BIT(PLANE_CURSOR))) == 0;
+}
+
/* WA Display #0827: Gen9:all */
static void
skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
@@ -384,6 +313,15 @@ icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
}
+/* Wa_1604331009:icl,jsl,ehl */
+static void
+icl_wa_cursorclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
+ bool enable)
+{
+ intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), CURSOR_GATING_DIS,
+ enable ? CURSOR_GATING_DIS : 0);
+}
+
static bool
is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state)
{
@@ -464,168 +402,8 @@ intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
}
}
-/* Only for pre-ILK configs */
-void assert_pll(struct drm_i915_private *dev_priv,
- enum pipe pipe, bool state)
-{
- u32 val;
- bool cur_state;
-
- val = intel_de_read(dev_priv, DPLL(pipe));
- cur_state = !!(val & DPLL_VCO_ENABLE);
- I915_STATE_WARN(cur_state != state,
- "PLL state assertion failure (expected %s, current %s)\n",
- onoff(state), onoff(cur_state));
-}
-
-/* XXX: the dsi pll is shared between MIPI DSI ports */
-void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
-{
- u32 val;
- bool cur_state;
-
- vlv_cck_get(dev_priv);
- val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
- vlv_cck_put(dev_priv);
-
- cur_state = val & DSI_PLL_VCO_EN;
- I915_STATE_WARN(cur_state != state,
- "DSI PLL state assertion failure (expected %s, current %s)\n",
- onoff(state), onoff(cur_state));
-}
-
-static void assert_fdi_tx(struct drm_i915_private *dev_priv,
- enum pipe pipe, bool state)
-{
- bool cur_state;
-
- if (HAS_DDI(dev_priv)) {
- /*
- * DDI does not have a specific FDI_TX register.
- *
- * FDI is never fed from EDP transcoder
- * so pipe->transcoder cast is fine here.
- */
- enum transcoder cpu_transcoder = (enum transcoder)pipe;
- u32 val = intel_de_read(dev_priv,
- TRANS_DDI_FUNC_CTL(cpu_transcoder));
- cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
- } else {
- u32 val = intel_de_read(dev_priv, FDI_TX_CTL(pipe));
- cur_state = !!(val & FDI_TX_ENABLE);
- }
- I915_STATE_WARN(cur_state != state,
- "FDI TX state assertion failure (expected %s, current %s)\n",
- onoff(state), onoff(cur_state));
-}
-#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
-#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
-
-static void assert_fdi_rx(struct drm_i915_private *dev_priv,
- enum pipe pipe, bool state)
-{
- u32 val;
- bool cur_state;
-
- val = intel_de_read(dev_priv, FDI_RX_CTL(pipe));
- cur_state = !!(val & FDI_RX_ENABLE);
- I915_STATE_WARN(cur_state != state,
- "FDI RX state assertion failure (expected %s, current %s)\n",
- onoff(state), onoff(cur_state));
-}
-#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
-#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
-
-static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
- enum pipe pipe)
-{
- u32 val;
-
- /* ILK FDI PLL is always enabled */
- if (IS_IRONLAKE(dev_priv))
- return;
-
- /* On Haswell, DDI ports are responsible for the FDI PLL setup */
- if (HAS_DDI(dev_priv))
- return;
-
- val = intel_de_read(dev_priv, FDI_TX_CTL(pipe));
- I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
-}
-
-void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
- enum pipe pipe, bool state)
-{
- u32 val;
- bool cur_state;
-
- val = intel_de_read(dev_priv, FDI_RX_CTL(pipe));
- cur_state = !!(val & FDI_RX_PLL_ENABLE);
- I915_STATE_WARN(cur_state != state,
- "FDI RX PLL assertion failure (expected %s, current %s)\n",
- onoff(state), onoff(cur_state));
-}
-
-void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
-{
- i915_reg_t pp_reg;
- u32 val;
- enum pipe panel_pipe = INVALID_PIPE;
- bool locked = true;
-
- if (drm_WARN_ON(&dev_priv->drm, HAS_DDI(dev_priv)))
- return;
-
- if (HAS_PCH_SPLIT(dev_priv)) {
- u32 port_sel;
-
- pp_reg = PP_CONTROL(0);
- port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
-
- switch (port_sel) {
- case PANEL_PORT_SELECT_LVDS:
- intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
- break;
- case PANEL_PORT_SELECT_DPA:
- g4x_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
- break;
- case PANEL_PORT_SELECT_DPC:
- g4x_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
- break;
- case PANEL_PORT_SELECT_DPD:
- g4x_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
- break;
- default:
- MISSING_CASE(port_sel);
- break;
- }
- } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- /* presumably write lock depends on pipe, not port select */
- pp_reg = PP_CONTROL(pipe);
- panel_pipe = pipe;
- } else {
- u32 port_sel;
-
- pp_reg = PP_CONTROL(0);
- port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
-
- drm_WARN_ON(&dev_priv->drm,
- port_sel != PANEL_PORT_SELECT_LVDS);
- intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
- }
-
- val = intel_de_read(dev_priv, pp_reg);
- if (!(val & PANEL_POWER_ON) ||
- ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
- locked = false;
-
- I915_STATE_WARN(panel_pipe == pipe && locked,
- "panel assertion failure, pipe %c regs locked\n",
- pipe_name(pipe));
-}
-
-void assert_pipe(struct drm_i915_private *dev_priv,
- enum transcoder cpu_transcoder, bool state)
+void assert_transcoder(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder, bool state)
{
bool cur_state;
enum intel_display_power_domain power_domain;
@@ -942,7 +720,7 @@ enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
return crtc->pipe;
}
-void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
+void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -1003,7 +781,7 @@ void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
intel_wait_for_pipe_scanline_moving(crtc);
}
-void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
+void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -1053,69 +831,6 @@ intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
info->num_planes == (is_ccs_modifier(modifier) ? 4 : 2);
}
-unsigned int
-intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
-{
- struct drm_i915_private *dev_priv = to_i915(fb->dev);
- unsigned int cpp = fb->format->cpp[color_plane];
-
- switch (fb->modifier) {
- case DRM_FORMAT_MOD_LINEAR:
- return intel_tile_size(dev_priv);
- case I915_FORMAT_MOD_X_TILED:
- if (DISPLAY_VER(dev_priv) == 2)
- return 128;
- else
- return 512;
- case I915_FORMAT_MOD_Y_TILED_CCS:
- if (is_ccs_plane(fb, color_plane))
- return 128;
- fallthrough;
- case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
- case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
- case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
- if (is_ccs_plane(fb, color_plane))
- return 64;
- fallthrough;
- case I915_FORMAT_MOD_Y_TILED:
- if (DISPLAY_VER(dev_priv) == 2 || HAS_128_BYTE_Y_TILING(dev_priv))
- return 128;
- else
- return 512;
- case I915_FORMAT_MOD_Yf_TILED_CCS:
- if (is_ccs_plane(fb, color_plane))
- return 128;
- fallthrough;
- case I915_FORMAT_MOD_Yf_TILED:
- switch (cpp) {
- case 1:
- return 64;
- case 2:
- case 4:
- return 128;
- case 8:
- case 16:
- return 256;
- default:
- MISSING_CASE(cpp);
- return cpp;
- }
- break;
- default:
- MISSING_CASE(fb->modifier);
- return cpp;
- }
-}
-
-unsigned int
-intel_fb_align_height(const struct drm_framebuffer *fb,
- int color_plane, unsigned int height)
-{
- unsigned int tile_height = intel_tile_height(fb, color_plane);
-
- return ALIGN(height, tile_height);
-}
-
unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
{
unsigned int size = 0;
@@ -1132,82 +847,16 @@ unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info
unsigned int size = 0;
int i;
- for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++)
+ for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) {
+ if (rem_info->plane_alignment)
+ size = ALIGN(size, rem_info->plane_alignment);
size += rem_info->plane[i].dst_stride * rem_info->plane[i].height;
-
- return size;
-}
-
-static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
-{
- if (DISPLAY_VER(dev_priv) >= 9)
- return 256 * 1024;
- else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
- IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- return 128 * 1024;
- else if (DISPLAY_VER(dev_priv) >= 4)
- return 4 * 1024;
- else
- return 0;
-}
-
-static bool has_async_flips(struct drm_i915_private *i915)
-{
- return DISPLAY_VER(i915) >= 5;
-}
-
-unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
- int color_plane)
-{
- struct drm_i915_private *dev_priv = to_i915(fb->dev);
-
- if (intel_fb_uses_dpt(fb))
- return 512 * 4096;
-
- /* AUX_DIST needs only 4K alignment */
- if (is_ccs_plane(fb, color_plane))
- return 4096;
-
- if (is_semiplanar_uv_plane(fb, color_plane)) {
- /*
- * TODO: cross-check wrt. the bspec stride in bytes * 64 bytes
- * alignment for linear UV planes on all platforms.
- */
- if (DISPLAY_VER(dev_priv) >= 12) {
- if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
- return intel_linear_alignment(dev_priv);
-
- return intel_tile_row_size(fb, color_plane);
- }
-
- return 4096;
}
- drm_WARN_ON(&dev_priv->drm, color_plane != 0);
-
- switch (fb->modifier) {
- case DRM_FORMAT_MOD_LINEAR:
- return intel_linear_alignment(dev_priv);
- case I915_FORMAT_MOD_X_TILED:
- if (has_async_flips(dev_priv))
- return 256 * 1024;
- return 0;
- case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
- case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
- case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
- return 16 * 1024;
- case I915_FORMAT_MOD_Y_TILED_CCS:
- case I915_FORMAT_MOD_Yf_TILED_CCS:
- case I915_FORMAT_MOD_Y_TILED:
- case I915_FORMAT_MOD_Yf_TILED:
- return 1 * 1024 * 1024;
- default:
- MISSING_CASE(fb->modifier);
- return 0;
- }
+ return size;
}
-static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
+bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
{
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
@@ -1217,198 +866,6 @@ static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
plane_state->view.gtt.type == I915_GGTT_VIEW_NORMAL);
}
-static struct i915_vma *
-intel_pin_fb_obj_dpt(struct drm_framebuffer *fb,
- const struct i915_ggtt_view *view,
- bool uses_fence,
- unsigned long *out_flags,
- struct i915_address_space *vm)
-{
- struct drm_device *dev = fb->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- struct i915_vma *vma;
- u32 alignment;
- int ret;
-
- if (WARN_ON(!i915_gem_object_is_framebuffer(obj)))
- return ERR_PTR(-EINVAL);
-
- alignment = 4096 * 512;
-
- atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
-
- ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
- if (ret) {
- vma = ERR_PTR(ret);
- goto err;
- }
-
- vma = i915_vma_instance(obj, vm, view);
- if (IS_ERR(vma))
- goto err;
-
- if (i915_vma_misplaced(vma, 0, alignment, 0)) {
- ret = i915_vma_unbind(vma);
- if (ret) {
- vma = ERR_PTR(ret);
- goto err;
- }
- }
-
- ret = i915_vma_pin(vma, 0, alignment, PIN_GLOBAL);
- if (ret) {
- vma = ERR_PTR(ret);
- goto err;
- }
-
- vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
-
- i915_gem_object_flush_if_display(obj);
-
- i915_vma_get(vma);
-err:
- atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
-
- return vma;
-}
-
-struct i915_vma *
-intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
- bool phys_cursor,
- const struct i915_ggtt_view *view,
- bool uses_fence,
- unsigned long *out_flags)
-{
- struct drm_device *dev = fb->dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- intel_wakeref_t wakeref;
- struct i915_gem_ww_ctx ww;
- struct i915_vma *vma;
- unsigned int pinctl;
- u32 alignment;
- int ret;
-
- if (drm_WARN_ON(dev, !i915_gem_object_is_framebuffer(obj)))
- return ERR_PTR(-EINVAL);
-
- if (phys_cursor)
- alignment = intel_cursor_alignment(dev_priv);
- else
- alignment = intel_surf_alignment(fb, 0);
- if (drm_WARN_ON(dev, alignment && !is_power_of_2(alignment)))
- return ERR_PTR(-EINVAL);
-
- /* Note that the w/a also requires 64 PTE of padding following the
- * bo. We currently fill all unused PTE with the shadow page and so
- * we should always have valid PTE following the scanout preventing
- * the VT-d warning.
- */
- if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
- alignment = 256 * 1024;
-
- /*
- * Global gtt pte registers are special registers which actually forward
- * writes to a chunk of system memory. Which means that there is no risk
- * that the register values disappear as soon as we call
- * intel_runtime_pm_put(), so it is correct to wrap only the
- * pin/unpin/fence and not more.
- */
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
-
- atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
-
- /*
- * Valleyview is definitely limited to scanning out the first
- * 512MiB. Lets presume this behaviour was inherited from the
- * g4x display engine and that all earlier gen are similarly
- * limited. Testing suggests that it is a little more
- * complicated than this. For example, Cherryview appears quite
- * happy to scanout from anywhere within its global aperture.
- */
- pinctl = 0;
- if (HAS_GMCH(dev_priv))
- pinctl |= PIN_MAPPABLE;
-
- i915_gem_ww_ctx_init(&ww, true);
-retry:
- ret = i915_gem_object_lock(obj, &ww);
- if (!ret && phys_cursor)
- ret = i915_gem_object_attach_phys(obj, alignment);
- else if (!ret && HAS_LMEM(dev_priv))
- ret = i915_gem_object_migrate(obj, &ww, INTEL_REGION_LMEM);
- /* TODO: Do we need to sync when migration becomes async? */
- if (!ret)
- ret = i915_gem_object_pin_pages(obj);
- if (ret)
- goto err;
-
- if (!ret) {
- vma = i915_gem_object_pin_to_display_plane(obj, &ww, alignment,
- view, pinctl);
- if (IS_ERR(vma)) {
- ret = PTR_ERR(vma);
- goto err_unpin;
- }
- }
-
- if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
- /*
- * Install a fence for tiled scan-out. Pre-i965 always needs a
- * fence, whereas 965+ only requires a fence if using
- * framebuffer compression. For simplicity, we always, when
- * possible, install a fence as the cost is not that onerous.
- *
- * If we fail to fence the tiled scanout, then either the
- * modeset will reject the change (which is highly unlikely as
- * the affected systems, all but one, do not have unmappable
- * space) or we will not be able to enable full powersaving
- * techniques (also likely not to apply due to various limits
- * FBC and the like impose on the size of the buffer, which
- * presumably we violated anyway with this unmappable buffer).
- * Anyway, it is presumably better to stumble onwards with
- * something and try to run the system in a "less than optimal"
- * mode that matches the user configuration.
- */
- ret = i915_vma_pin_fence(vma);
- if (ret != 0 && DISPLAY_VER(dev_priv) < 4) {
- i915_vma_unpin(vma);
- goto err_unpin;
- }
- ret = 0;
-
- if (vma->fence)
- *out_flags |= PLANE_HAS_FENCE;
- }
-
- i915_vma_get(vma);
-
-err_unpin:
- i915_gem_object_unpin_pages(obj);
-err:
- if (ret == -EDEADLK) {
- ret = i915_gem_ww_ctx_backoff(&ww);
- if (!ret)
- goto retry;
- }
- i915_gem_ww_ctx_fini(&ww);
- if (ret)
- vma = ERR_PTR(ret);
-
- atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
- return vma;
-}
-
-void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
-{
- if (flags & PLANE_HAS_FENCE)
- i915_vma_unpin_fence(vma);
- i915_vma_unpin(vma);
- i915_vma_put(vma);
-}
-
/*
* Convert the x/y offsets into a linear offset.
* Only valid with 0/180 degree rotation, which is fine since linear
@@ -1440,22 +897,6 @@ void intel_add_fb_offsets(int *x, int *y,
*y += state->view.color_plane[color_plane].y;
}
-static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
-{
- switch (fb_modifier) {
- case I915_FORMAT_MOD_X_TILED:
- return I915_TILING_X;
- case I915_FORMAT_MOD_Y_TILED:
- case I915_FORMAT_MOD_Y_TILED_CCS:
- case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
- case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
- case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
- return I915_TILING_Y;
- default:
- return I915_TILING_NONE;
- }
-}
-
/*
* From the Sky Lake PRM:
* "The Color Control Surface (CCS) contains the compression status of
@@ -1586,12 +1027,6 @@ intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
}
}
-static int gen12_ccs_aux_stride(struct drm_framebuffer *fb, int ccs_plane)
-{
- return DIV_ROUND_UP(fb->pitches[skl_ccs_to_main_plane(fb, ccs_plane)],
- 512) * 64;
-}
-
u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
u32 pixel_format, u64 modifier)
{
@@ -1616,188 +1051,6 @@ u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
DRM_MODE_ROTATE_0);
}
-static
-u32 intel_fb_max_stride(struct drm_i915_private *dev_priv,
- u32 pixel_format, u64 modifier)
-{
- /*
- * Arbitrary limit for gen4+ chosen to match the
- * render engine max stride.
- *
- * The new CCS hash mode makes remapping impossible
- */
- if (DISPLAY_VER(dev_priv) < 4 || is_ccs_modifier(modifier) ||
- intel_modifier_uses_dpt(dev_priv, modifier))
- return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier);
- else if (DISPLAY_VER(dev_priv) >= 7)
- return 256 * 1024;
- else
- return 128 * 1024;
-}
-
-static u32
-intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
-{
- struct drm_i915_private *dev_priv = to_i915(fb->dev);
- u32 tile_width;
-
- if (is_surface_linear(fb, color_plane)) {
- u32 max_stride = intel_plane_fb_max_stride(dev_priv,
- fb->format->format,
- fb->modifier);
-
- /*
- * To make remapping with linear generally feasible
- * we need the stride to be page aligned.
- */
- if (fb->pitches[color_plane] > max_stride &&
- !is_ccs_modifier(fb->modifier))
- return intel_tile_size(dev_priv);
- else
- return 64;
- }
-
- tile_width = intel_tile_width_bytes(fb, color_plane);
- if (is_ccs_modifier(fb->modifier)) {
- /*
- * Display WA #0531: skl,bxt,kbl,glk
- *
- * Render decompression and plane width > 3840
- * combined with horizontal panning requires the
- * plane stride to be a multiple of 4. We'll just
- * require the entire fb to accommodate that to avoid
- * potential runtime errors at plane configuration time.
- */
- if ((DISPLAY_VER(dev_priv) == 9 || IS_GEMINILAKE(dev_priv)) &&
- color_plane == 0 && fb->width > 3840)
- tile_width *= 4;
- /*
- * The main surface pitch must be padded to a multiple of four
- * tile widths.
- */
- else if (DISPLAY_VER(dev_priv) >= 12)
- tile_width *= 4;
- }
- return tile_width;
-}
-
-static struct i915_vma *
-initial_plane_vma(struct drm_i915_private *i915,
- struct intel_initial_plane_config *plane_config)
-{
- struct drm_i915_gem_object *obj;
- struct i915_vma *vma;
- u32 base, size;
-
- if (plane_config->size == 0)
- return NULL;
-
- base = round_down(plane_config->base,
- I915_GTT_MIN_ALIGNMENT);
- size = round_up(plane_config->base + plane_config->size,
- I915_GTT_MIN_ALIGNMENT);
- size -= base;
-
- /*
- * If the FB is too big, just don't use it since fbdev is not very
- * important and we should probably use that space with FBC or other
- * features.
- */
- if (IS_ENABLED(CONFIG_FRAMEBUFFER_CONSOLE) &&
- size * 2 > i915->stolen_usable_size)
- return NULL;
-
- obj = i915_gem_object_create_stolen_for_preallocated(i915, base, size);
- if (IS_ERR(obj))
- return NULL;
-
- /*
- * Mark it WT ahead of time to avoid changing the
- * cache_level during fbdev initialization. The
- * unbind there would get stuck waiting for rcu.
- */
- i915_gem_object_set_cache_coherency(obj, HAS_WT(i915) ?
- I915_CACHE_WT : I915_CACHE_NONE);
-
- switch (plane_config->tiling) {
- case I915_TILING_NONE:
- break;
- case I915_TILING_X:
- case I915_TILING_Y:
- obj->tiling_and_stride =
- plane_config->fb->base.pitches[0] |
- plane_config->tiling;
- break;
- default:
- MISSING_CASE(plane_config->tiling);
- goto err_obj;
- }
-
- vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
- if (IS_ERR(vma))
- goto err_obj;
-
- if (i915_ggtt_pin(vma, NULL, 0, PIN_MAPPABLE | PIN_OFFSET_FIXED | base))
- goto err_obj;
-
- if (i915_gem_object_is_tiled(obj) &&
- !i915_vma_is_map_and_fenceable(vma))
- goto err_obj;
-
- return vma;
-
-err_obj:
- i915_gem_object_put(obj);
- return NULL;
-}
-
-static bool
-intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
- struct intel_initial_plane_config *plane_config)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct drm_mode_fb_cmd2 mode_cmd = { 0 };
- struct drm_framebuffer *fb = &plane_config->fb->base;
- struct i915_vma *vma;
-
- switch (fb->modifier) {
- case DRM_FORMAT_MOD_LINEAR:
- case I915_FORMAT_MOD_X_TILED:
- case I915_FORMAT_MOD_Y_TILED:
- break;
- default:
- drm_dbg(&dev_priv->drm,
- "Unsupported modifier for initial FB: 0x%llx\n",
- fb->modifier);
- return false;
- }
-
- vma = initial_plane_vma(dev_priv, plane_config);
- if (!vma)
- return false;
-
- mode_cmd.pixel_format = fb->format->format;
- mode_cmd.width = fb->width;
- mode_cmd.height = fb->height;
- mode_cmd.pitches[0] = fb->pitches[0];
- mode_cmd.modifier[0] = fb->modifier;
- mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
-
- if (intel_framebuffer_init(to_intel_framebuffer(fb),
- vma->obj, &mode_cmd)) {
- drm_dbg_kms(&dev_priv->drm, "intel fb init failed\n");
- goto err_vma;
- }
-
- plane_config->vma = vma;
- return true;
-
-err_vma:
- i915_vma_put(vma);
- return false;
-}
-
static void
intel_set_plane_visible(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state,
@@ -1833,8 +1086,8 @@ static void fixup_plane_bitmasks(struct intel_crtc_state *crtc_state)
}
}
-static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
- struct intel_plane *plane)
+void intel_plane_disable_noatomic(struct intel_crtc *crtc,
+ struct intel_plane *plane)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_crtc_state *crtc_state =
@@ -1879,168 +1132,6 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
intel_wait_for_vblank(dev_priv, crtc->pipe);
}
-static struct i915_vma *intel_dpt_pin(struct i915_address_space *vm)
-{
- struct drm_i915_private *i915 = vm->i915;
- struct i915_dpt *dpt = i915_vm_to_dpt(vm);
- intel_wakeref_t wakeref;
- struct i915_vma *vma;
- void __iomem *iomem;
-
- wakeref = intel_runtime_pm_get(&i915->runtime_pm);
- atomic_inc(&i915->gpu_error.pending_fb_pin);
-
- vma = i915_gem_object_ggtt_pin(dpt->obj, NULL, 0, 4096,
- HAS_LMEM(i915) ? 0 : PIN_MAPPABLE);
- if (IS_ERR(vma))
- goto err;
-
- iomem = i915_vma_pin_iomap(vma);
- i915_vma_unpin(vma);
- if (IS_ERR(iomem)) {
- vma = iomem;
- goto err;
- }
-
- dpt->vma = vma;
- dpt->iomem = iomem;
-
- i915_vma_get(vma);
-
-err:
- atomic_dec(&i915->gpu_error.pending_fb_pin);
- intel_runtime_pm_put(&i915->runtime_pm, wakeref);
-
- return vma;
-}
-
-static void intel_dpt_unpin(struct i915_address_space *vm)
-{
- struct i915_dpt *dpt = i915_vm_to_dpt(vm);
-
- i915_vma_unpin_iomap(dpt->vma);
- i915_vma_put(dpt->vma);
-}
-
-static bool
-intel_reuse_initial_plane_obj(struct drm_i915_private *i915,
- const struct intel_initial_plane_config *plane_config,
- struct drm_framebuffer **fb,
- struct i915_vma **vma)
-{
- struct intel_crtc *crtc;
-
- for_each_intel_crtc(&i915->drm, crtc) {
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
- struct intel_plane *plane =
- to_intel_plane(crtc->base.primary);
- struct intel_plane_state *plane_state =
- to_intel_plane_state(plane->base.state);
-
- if (!crtc_state->uapi.active)
- continue;
-
- if (!plane_state->ggtt_vma)
- continue;
-
- if (intel_plane_ggtt_offset(plane_state) == plane_config->base) {
- *fb = plane_state->hw.fb;
- *vma = plane_state->ggtt_vma;
- return true;
- }
- }
-
- return false;
-}
-
-static void
-intel_find_initial_plane_obj(struct intel_crtc *crtc,
- struct intel_initial_plane_config *plane_config)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
- struct intel_plane *plane =
- to_intel_plane(crtc->base.primary);
- struct intel_plane_state *plane_state =
- to_intel_plane_state(plane->base.state);
- struct drm_framebuffer *fb;
- struct i915_vma *vma;
-
- /*
- * TODO:
- * Disable planes if get_initial_plane_config() failed.
- * Make sure things work if the surface base is not page aligned.
- */
- if (!plane_config->fb)
- return;
-
- if (intel_alloc_initial_plane_obj(crtc, plane_config)) {
- fb = &plane_config->fb->base;
- vma = plane_config->vma;
- goto valid_fb;
- }
-
- /*
- * Failed to alloc the obj, check to see if we should share
- * an fb with another CRTC instead
- */
- if (intel_reuse_initial_plane_obj(dev_priv, plane_config, &fb, &vma))
- goto valid_fb;
-
- /*
- * We've failed to reconstruct the BIOS FB. Current display state
- * indicates that the primary plane is visible, but has a NULL FB,
- * which will lead to problems later if we don't fix it up. The
- * simplest solution is to just disable the primary plane now and
- * pretend the BIOS never had it enabled.
- */
- intel_plane_disable_noatomic(crtc, plane);
- if (crtc_state->bigjoiner) {
- struct intel_crtc *slave =
- crtc_state->bigjoiner_linked_crtc;
- intel_plane_disable_noatomic(slave, to_intel_plane(slave->base.primary));
- }
-
- return;
-
-valid_fb:
- plane_state->uapi.rotation = plane_config->rotation;
- intel_fb_fill_view(to_intel_framebuffer(fb),
- plane_state->uapi.rotation, &plane_state->view);
-
- __i915_vma_pin(vma);
- plane_state->ggtt_vma = i915_vma_get(vma);
- if (intel_plane_uses_fence(plane_state) &&
- i915_vma_pin_fence(vma) == 0 && vma->fence)
- plane_state->flags |= PLANE_HAS_FENCE;
-
- plane_state->uapi.src_x = 0;
- plane_state->uapi.src_y = 0;
- plane_state->uapi.src_w = fb->width << 16;
- plane_state->uapi.src_h = fb->height << 16;
-
- plane_state->uapi.crtc_x = 0;
- plane_state->uapi.crtc_y = 0;
- plane_state->uapi.crtc_w = fb->width;
- plane_state->uapi.crtc_h = fb->height;
-
- if (plane_config->tiling)
- dev_priv->preserve_bios_swizzle = true;
-
- plane_state->uapi.fb = fb;
- drm_framebuffer_get(fb);
-
- plane_state->uapi.crtc = &crtc->base;
- intel_plane_copy_uapi_to_hw_state(plane_state, plane_state, crtc);
-
- intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
-
- atomic_or(plane->frontbuffer_bit, &to_intel_frontbuffer(fb)->bits);
-}
-
unsigned int
intel_plane_fence_y_offset(const struct intel_plane_state *plane_state)
{
@@ -2449,55 +1540,6 @@ static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_s
intel_de_read(dev_priv, VSYNCSHIFT(cpu_transcoder)));
}
-static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
-{
- u32 temp;
-
- temp = intel_de_read(dev_priv, SOUTH_CHICKEN1);
- if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
- return;
-
- drm_WARN_ON(&dev_priv->drm,
- intel_de_read(dev_priv, FDI_RX_CTL(PIPE_B)) &
- FDI_RX_ENABLE);
- drm_WARN_ON(&dev_priv->drm,
- intel_de_read(dev_priv, FDI_RX_CTL(PIPE_C)) &
- FDI_RX_ENABLE);
-
- temp &= ~FDI_BC_BIFURCATION_SELECT;
- if (enable)
- temp |= FDI_BC_BIFURCATION_SELECT;
-
- drm_dbg_kms(&dev_priv->drm, "%sabling fdi C rx\n",
- enable ? "en" : "dis");
- intel_de_write(dev_priv, SOUTH_CHICKEN1, temp);
- intel_de_posting_read(dev_priv, SOUTH_CHICKEN1);
-}
-
-static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
- switch (crtc->pipe) {
- case PIPE_A:
- break;
- case PIPE_B:
- if (crtc_state->fdi_lanes > 2)
- cpt_set_fdi_bc_bifurcation(dev_priv, false);
- else
- cpt_set_fdi_bc_bifurcation(dev_priv, true);
-
- break;
- case PIPE_C:
- cpt_set_fdi_bc_bifurcation(dev_priv, true);
-
- break;
- default:
- BUG();
- }
-}
-
/*
* Finds the encoder associated with the given CRTC. This can only be
* used when we know that the CRTC isn't feeding multiple encoders!
@@ -2547,16 +1589,8 @@ static void ilk_pch_enable(const struct intel_atomic_state *state,
assert_pch_transcoder_disabled(dev_priv, pipe);
- if (IS_IVYBRIDGE(dev_priv))
- ivb_update_fdi_bc_bifurcation(crtc_state);
-
- /* Write the TU size bits before fdi link training, so that error
- * detection works. */
- intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
- intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
-
/* For PCH output, training FDI link */
- dev_priv->display.fdi_link_train(crtc, crtc_state);
+ intel_fdi_link_train(crtc, crtc_state);
/* We need to program the right clock selection before writing the pixel
* mutliplier into the DPLL. */
@@ -2584,7 +1618,7 @@ static void ilk_pch_enable(const struct intel_atomic_state *state,
intel_enable_shared_dpll(crtc_state);
/* set transcoder timing, panel must allow it */
- assert_panel_unlocked(dev_priv, pipe);
+ assert_pps_unlocked(dev_priv, pipe);
ilk_pch_transcoder_set_timings(crtc_state, pipe);
intel_fdi_normal_train(crtc);
@@ -2842,6 +1876,46 @@ static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state)
return false;
}
+static bool needs_cursorclk_wa(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
+
+ /* Wa_1604331009:icl,jsl,ehl */
+ if (is_hdr_mode(crtc_state) &&
+ crtc_state->active_planes & BIT(PLANE_CURSOR) &&
+ DISPLAY_VER(dev_priv) == 11)
+ return true;
+
+ return false;
+}
+
+static void intel_async_flip_vtd_wa(struct drm_i915_private *i915,
+ enum pipe pipe, bool enable)
+{
+ if (DISPLAY_VER(i915) == 9) {
+ /*
+ * "Plane N strech max must be programmed to 11b (x1)
+ * when Async flips are enabled on that plane."
+ */
+ intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe),
+ SKL_PLANE1_STRETCH_MAX_MASK,
+ enable ? SKL_PLANE1_STRETCH_MAX_X1 : SKL_PLANE1_STRETCH_MAX_X8);
+ } else {
+ /* Also needed on HSW/BDW albeit undocumented */
+ intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe),
+ HSW_PRI_STRETCH_MAX_MASK,
+ enable ? HSW_PRI_STRETCH_MAX_X1 : HSW_PRI_STRETCH_MAX_X8);
+ }
+}
+
+static bool needs_async_flip_vtd_wa(const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+
+ return crtc_state->uapi.async_flip && intel_vtd_active() &&
+ (DISPLAY_VER(i915) == 9 || IS_BROADWELL(i915) || IS_HASWELL(i915));
+}
+
static bool planes_enabling(const struct intel_crtc_state *old_crtc_state,
const struct intel_crtc_state *new_crtc_state)
{
@@ -2869,12 +1943,17 @@ static void intel_post_plane_update(struct intel_atomic_state *state,
intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits);
if (new_crtc_state->update_wm_post && new_crtc_state->hw.active)
- intel_update_watermarks(crtc);
+ intel_update_watermarks(dev_priv);
if (hsw_post_update_enable_ips(old_crtc_state, new_crtc_state))
hsw_enable_ips(new_crtc_state);
intel_fbc_post_update(state, crtc);
+ intel_drrs_page_flip(state, crtc);
+
+ if (needs_async_flip_vtd_wa(old_crtc_state) &&
+ !needs_async_flip_vtd_wa(new_crtc_state))
+ intel_async_flip_vtd_wa(dev_priv, pipe, false);
if (needs_nv12_wa(old_crtc_state) &&
!needs_nv12_wa(new_crtc_state))
@@ -2883,6 +1962,11 @@ static void intel_post_plane_update(struct intel_atomic_state *state,
if (needs_scalerclk_wa(old_crtc_state) &&
!needs_scalerclk_wa(new_crtc_state))
icl_wa_scalerclkgating(dev_priv, pipe, false);
+
+ if (needs_cursorclk_wa(old_crtc_state) &&
+ !needs_cursorclk_wa(new_crtc_state))
+ icl_wa_cursorclkgating(dev_priv, pipe, false);
+
}
static void intel_crtc_enable_flip_done(struct intel_atomic_state *state,
@@ -2969,6 +2053,10 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
if (intel_fbc_pre_update(state, crtc))
intel_wait_for_vblank(dev_priv, pipe);
+ if (!needs_async_flip_vtd_wa(old_crtc_state) &&
+ needs_async_flip_vtd_wa(new_crtc_state))
+ intel_async_flip_vtd_wa(dev_priv, pipe, true);
+
/* Display WA 827 */
if (!needs_nv12_wa(old_crtc_state) &&
needs_nv12_wa(new_crtc_state))
@@ -2979,6 +2067,11 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
needs_scalerclk_wa(new_crtc_state))
icl_wa_scalerclkgating(dev_priv, pipe, true);
+ /* Wa_1604331009:icl,jsl,ehl */
+ if (!needs_cursorclk_wa(old_crtc_state) &&
+ needs_cursorclk_wa(new_crtc_state))
+ icl_wa_cursorclkgating(dev_priv, pipe, true);
+
/*
* Vblank time updates from the shadow to live plane control register
* are blocked if the memory self-refresh mode is active at that
@@ -3022,10 +2115,9 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
* we'll continue to update watermarks the old way, if flags tell
* us to.
*/
- if (dev_priv->display.initial_watermarks)
- dev_priv->display.initial_watermarks(state, crtc);
- else if (new_crtc_state->update_wm_pre)
- intel_update_watermarks(crtc);
+ if (!intel_initial_watermarks(state, crtc))
+ if (new_crtc_state->update_wm_pre)
+ intel_update_watermarks(dev_priv);
}
/*
@@ -3360,9 +2452,6 @@ static void ilk_crtc_enable(struct intel_atomic_state *state,
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
- if (new_crtc_state->has_pch_encoder)
- intel_prepare_shared_dpll(new_crtc_state);
-
if (intel_crtc_has_dp_encoder(new_crtc_state))
intel_dp_set_m_n(new_crtc_state, M1_N1);
@@ -3400,9 +2489,8 @@ static void ilk_crtc_enable(struct intel_atomic_state *state,
/* update DSPCNTR to configure gamma for pipe bottom color */
intel_disable_primary_plane(new_crtc_state);
- if (dev_priv->display.initial_watermarks)
- dev_priv->display.initial_watermarks(state, crtc);
- intel_enable_pipe(new_crtc_state);
+ intel_initial_watermarks(state, crtc);
+ intel_enable_transcoder(new_crtc_state);
if (new_crtc_state->has_pch_encoder)
ilk_pch_enable(state, new_crtc_state);
@@ -3578,10 +2666,9 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
&new_crtc_state->fdi_m_n, NULL);
hsw_set_frame_start_delay(new_crtc_state);
- }
- if (!transcoder_is_dsi(cpu_transcoder))
- hsw_set_pipeconf(new_crtc_state);
+ hsw_set_transconf(new_crtc_state);
+ }
crtc->active = true;
@@ -3611,8 +2698,7 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
if (DISPLAY_VER(dev_priv) >= 11)
icl_set_pipe_chicken(new_crtc_state);
- if (dev_priv->display.initial_watermarks)
- dev_priv->display.initial_watermarks(state, crtc);
+ intel_initial_watermarks(state, crtc);
if (DISPLAY_VER(dev_priv) >= 11) {
const struct intel_dbuf_state *dbuf_state =
@@ -3676,7 +2762,7 @@ static void ilk_crtc_disable(struct intel_atomic_state *state,
intel_crtc_vblank_off(old_crtc_state);
- intel_disable_pipe(old_crtc_state);
+ intel_disable_transcoder(old_crtc_state);
ilk_pfit_disable(old_crtc_state);
@@ -3738,7 +2824,7 @@ static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
*/
drm_WARN_ON(&dev_priv->drm,
intel_de_read(dev_priv, PFIT_CONTROL) & PFIT_ENABLE);
- assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
+ assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder);
intel_de_write(dev_priv, PFIT_PGM_RATIOS,
crtc_state->gmch_pfit.pgm_ratios);
@@ -3858,11 +2944,7 @@ enum intel_display_power_domain intel_port_to_power_domain(enum port port)
enum intel_display_power_domain
intel_aux_power_domain(struct intel_digital_port *dig_port)
{
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
-
- if (intel_phy_is_tc(dev_priv, phy) &&
- dig_port->tc_mode == TC_PORT_TBT_ALT) {
+ if (intel_tc_port_in_tbt_alt_mode(dig_port)) {
switch (dig_port->aux_ch) {
case AUX_CH_C:
return POWER_DOMAIN_AUX_C_TBT;
@@ -3923,16 +3005,16 @@ static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
struct drm_encoder *encoder;
enum pipe pipe = crtc->pipe;
u64 mask;
- enum transcoder transcoder = crtc_state->cpu_transcoder;
if (!crtc_state->hw.active)
return 0;
mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
- mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder));
+ mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(cpu_transcoder));
if (crtc_state->pch_pfit.enabled ||
crtc_state->pch_pfit.force_thru)
mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
@@ -3951,7 +3033,7 @@ static u64 get_crtc_power_domains(struct intel_crtc_state *crtc_state)
mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE);
if (crtc_state->dsc.compression_enable)
- mask |= BIT_ULL(intel_dsc_power_domain(crtc_state));
+ mask |= BIT_ULL(intel_dsc_power_domain(crtc, cpu_transcoder));
return mask;
}
@@ -4015,13 +3097,10 @@ static void valleyview_crtc_enable(struct intel_atomic_state *state,
intel_encoders_pre_pll_enable(state, crtc);
- if (IS_CHERRYVIEW(dev_priv)) {
- chv_prepare_pll(crtc, new_crtc_state);
- chv_enable_pll(crtc, new_crtc_state);
- } else {
- vlv_prepare_pll(crtc, new_crtc_state);
- vlv_enable_pll(crtc, new_crtc_state);
- }
+ if (IS_CHERRYVIEW(dev_priv))
+ chv_enable_pll(new_crtc_state);
+ else
+ vlv_enable_pll(new_crtc_state);
intel_encoders_pre_enable(state, crtc);
@@ -4032,25 +3111,14 @@ static void valleyview_crtc_enable(struct intel_atomic_state *state,
/* update DSPCNTR to configure gamma for pipe bottom color */
intel_disable_primary_plane(new_crtc_state);
- dev_priv->display.initial_watermarks(state, crtc);
- intel_enable_pipe(new_crtc_state);
+ intel_initial_watermarks(state, crtc);
+ intel_enable_transcoder(new_crtc_state);
intel_crtc_vblank_on(new_crtc_state);
intel_encoders_enable(state, crtc);
}
-static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
- intel_de_write(dev_priv, FP0(crtc->pipe),
- crtc_state->dpll_hw_state.fp0);
- intel_de_write(dev_priv, FP1(crtc->pipe),
- crtc_state->dpll_hw_state.fp1);
-}
-
static void i9xx_crtc_enable(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
@@ -4062,8 +3130,6 @@ static void i9xx_crtc_enable(struct intel_atomic_state *state,
if (drm_WARN_ON(&dev_priv->drm, crtc->active))
return;
- i9xx_set_pll_dividers(new_crtc_state);
-
if (intel_crtc_has_dp_encoder(new_crtc_state))
intel_dp_set_m_n(new_crtc_state, M1_N1);
@@ -4079,7 +3145,7 @@ static void i9xx_crtc_enable(struct intel_atomic_state *state,
intel_encoders_pre_enable(state, crtc);
- i9xx_enable_pll(crtc, new_crtc_state);
+ i9xx_enable_pll(new_crtc_state);
i9xx_pfit_enable(new_crtc_state);
@@ -4088,11 +3154,9 @@ static void i9xx_crtc_enable(struct intel_atomic_state *state,
/* update DSPCNTR to configure gamma for pipe bottom color */
intel_disable_primary_plane(new_crtc_state);
- if (dev_priv->display.initial_watermarks)
- dev_priv->display.initial_watermarks(state, crtc);
- else
- intel_update_watermarks(crtc);
- intel_enable_pipe(new_crtc_state);
+ if (!intel_initial_watermarks(state, crtc))
+ intel_update_watermarks(dev_priv);
+ intel_enable_transcoder(new_crtc_state);
intel_crtc_vblank_on(new_crtc_state);
@@ -4111,7 +3175,7 @@ static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
if (!old_crtc_state->gmch_pfit.control)
return;
- assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder);
+ assert_transcoder_disabled(dev_priv, old_crtc_state->cpu_transcoder);
drm_dbg_kms(&dev_priv->drm, "disabling pfit, current: 0x%08x\n",
intel_de_read(dev_priv, PFIT_CONTROL));
@@ -4137,7 +3201,7 @@ static void i9xx_crtc_disable(struct intel_atomic_state *state,
intel_crtc_vblank_off(old_crtc_state);
- intel_disable_pipe(old_crtc_state);
+ intel_disable_transcoder(old_crtc_state);
i9xx_pfit_disable(old_crtc_state);
@@ -4157,8 +3221,8 @@ static void i9xx_crtc_disable(struct intel_atomic_state *state,
if (DISPLAY_VER(dev_priv) != 2)
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
- if (!dev_priv->display.initial_watermarks)
- intel_update_watermarks(crtc);
+ if (!dev_priv->wm_disp->initial_watermarks)
+ intel_update_watermarks(dev_priv);
/* clock the pipe down to 640x480@60 to potentially save power */
if (IS_I830(dev_priv))
@@ -4211,7 +3275,7 @@ static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
drm_WARN_ON(&dev_priv->drm, IS_ERR(temp_crtc_state) || ret);
- dev_priv->display.crtc_disable(to_intel_atomic_state(state), crtc);
+ dev_priv->display->crtc_disable(to_intel_atomic_state(state), crtc);
drm_atomic_state_put(state);
@@ -4234,12 +3298,11 @@ static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
encoder->base.crtc = NULL;
intel_fbc_disable(crtc);
- intel_update_watermarks(crtc);
+ intel_update_watermarks(dev_priv);
intel_disable_shared_dpll(crtc_state);
intel_display_power_put_all_in_set(dev_priv, &crtc->enabled_power_domains);
- dev_priv->active_pipes &= ~BIT(pipe);
cdclk_state->min_cdclk[pipe] = 0;
cdclk_state->min_voltage_level[pipe] = 0;
cdclk_state->active_pipes &= ~BIT(pipe);
@@ -4596,13 +3659,6 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
}
}
- /* Cantiga+ cannot handle modes with a hsync front porch of 0.
- * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
- */
- if ((DISPLAY_VER(dev_priv) > 4 || IS_G4X(dev_priv)) &&
- pipe_mode->crtc_hsync_start == pipe_mode->crtc_hdisplay)
- return -EINVAL;
-
intel_crtc_compute_pixel_rate(pipe_config);
if (pipe_config->has_pch_encoder)
@@ -5412,102 +4468,6 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
BUG_ON(val != final);
}
-static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
-{
- u32 tmp;
-
- tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
- tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
- intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
-
- if (wait_for_us(intel_de_read(dev_priv, SOUTH_CHICKEN2) &
- FDI_MPHY_IOSFSB_RESET_STATUS, 100))
- drm_err(&dev_priv->drm, "FDI mPHY reset assert timeout\n");
-
- tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
- tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
- intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
-
- if (wait_for_us((intel_de_read(dev_priv, SOUTH_CHICKEN2) &
- FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
- drm_err(&dev_priv->drm, "FDI mPHY reset de-assert timeout\n");
-}
-
-/* WaMPhyProgramming:hsw */
-static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
-{
- u32 tmp;
-
- tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
- tmp &= ~(0xFF << 24);
- tmp |= (0x12 << 24);
- intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
-
- tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
- tmp |= (1 << 11);
- intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
-
- tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
- tmp |= (1 << 11);
- intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
-
- tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
- tmp |= (1 << 24) | (1 << 21) | (1 << 18);
- intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
-
- tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
- tmp |= (1 << 24) | (1 << 21) | (1 << 18);
- intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
-
- tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
- tmp &= ~(7 << 13);
- tmp |= (5 << 13);
- intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
-
- tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
- tmp &= ~(7 << 13);
- tmp |= (5 << 13);
- intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
-
- tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
- tmp &= ~0xFF;
- tmp |= 0x1C;
- intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
-
- tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
- tmp &= ~0xFF;
- tmp |= 0x1C;
- intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
-
- tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
- tmp &= ~(0xFF << 16);
- tmp |= (0x1C << 16);
- intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
-
- tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
- tmp &= ~(0xFF << 16);
- tmp |= (0x1C << 16);
- intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
-
- tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
- tmp |= (1 << 27);
- intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
-
- tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
- tmp |= (1 << 27);
- intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
-
- tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
- tmp &= ~(0xF << 28);
- tmp |= (4 << 28);
- intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
-
- tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
- tmp &= ~(0xF << 28);
- tmp |= (4 << 28);
- intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
-}
-
/* Implements 3 different sequences from BSpec chapter "Display iCLK
* Programming" based on the parameters passed:
* - Sequence to enable CLKOUT_DP
@@ -5540,10 +4500,8 @@ static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
tmp &= ~SBI_SSCCTL_PATHALT;
intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
- if (with_fdi) {
- lpt_reset_fdi_mphy(dev_priv);
- lpt_program_fdi_mphy(dev_priv);
- }
+ if (with_fdi)
+ lpt_fdi_program_mphy(dev_priv);
}
reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
@@ -5806,7 +4764,7 @@ static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
intel_de_posting_read(dev_priv, PIPECONF(pipe));
}
-static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state)
+static void hsw_set_transconf(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -5870,9 +4828,7 @@ static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
val |= PIPEMISC_YUV420_ENABLE |
PIPEMISC_YUV420_MODE_FULL_BLEND;
- if (DISPLAY_VER(dev_priv) >= 11 &&
- (crtc_state->active_planes & ~(icl_hdr_plane_mask() |
- BIT(PLANE_CURSOR))) == 0)
+ if (DISPLAY_VER(dev_priv) >= 11 && is_hdr_mode(crtc_state))
val |= PIPEMISC_HDR_MODE_PRECISION;
if (DISPLAY_VER(dev_priv) >= 12)
@@ -6211,59 +5167,64 @@ out:
return ret;
}
-static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config,
- struct intel_display_power_domain_set *power_domain_set)
+static bool transcoder_ddi_func_is_enabled(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- unsigned long panel_transcoder_mask = BIT(TRANSCODER_EDP);
- unsigned long enabled_panel_transcoders = 0;
- enum transcoder panel_transcoder;
- u32 tmp;
+ enum intel_display_power_domain power_domain;
+ intel_wakeref_t wakeref;
+ u32 tmp = 0;
- if (DISPLAY_VER(dev_priv) >= 11)
- panel_transcoder_mask |=
- BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
+ power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
- /*
- * The pipe->transcoder mapping is fixed with the exception of the eDP
- * and DSI transcoders handled below.
- */
- pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
+ with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref)
+ tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
+
+ return tmp & TRANS_DDI_FUNC_ENABLE;
+}
+
+static u8 hsw_panel_transcoders(struct drm_i915_private *i915)
+{
+ u8 panel_transcoder_mask = BIT(TRANSCODER_EDP);
+
+ if (DISPLAY_VER(i915) >= 11)
+ panel_transcoder_mask |= BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
+
+ return panel_transcoder_mask;
+}
+
+static u8 hsw_enabled_transcoders(struct intel_crtc *crtc)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ u8 panel_transcoder_mask = hsw_panel_transcoders(dev_priv);
+ enum transcoder cpu_transcoder;
+ u8 enabled_transcoders = 0;
/*
* XXX: Do intel_display_power_get_if_enabled before reading this (for
* consistency and less surprising code; it's in always on power).
*/
- for_each_cpu_transcoder_masked(dev_priv, panel_transcoder,
+ for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder,
panel_transcoder_mask) {
- bool force_thru = false;
+ enum intel_display_power_domain power_domain;
+ intel_wakeref_t wakeref;
enum pipe trans_pipe;
+ u32 tmp = 0;
- tmp = intel_de_read(dev_priv,
- TRANS_DDI_FUNC_CTL(panel_transcoder));
- if (!(tmp & TRANS_DDI_FUNC_ENABLE))
- continue;
+ power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
+ with_intel_display_power_if_enabled(dev_priv, power_domain, wakeref)
+ tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
- /*
- * Log all enabled ones, only use the first one.
- *
- * FIXME: This won't work for two separate DSI displays.
- */
- enabled_panel_transcoders |= BIT(panel_transcoder);
- if (enabled_panel_transcoders != BIT(panel_transcoder))
+ if (!(tmp & TRANS_DDI_FUNC_ENABLE))
continue;
switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
default:
drm_WARN(dev, 1,
"unknown pipe linked to transcoder %s\n",
- transcoder_name(panel_transcoder));
+ transcoder_name(cpu_transcoder));
fallthrough;
case TRANS_DDI_EDP_INPUT_A_ONOFF:
- force_thru = true;
- fallthrough;
case TRANS_DDI_EDP_INPUT_A_ON:
trans_pipe = PIPE_A;
break;
@@ -6278,22 +5239,83 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
break;
}
- if (trans_pipe == crtc->pipe) {
- pipe_config->cpu_transcoder = panel_transcoder;
- pipe_config->pch_pfit.force_thru = force_thru;
- }
+ if (trans_pipe == crtc->pipe)
+ enabled_transcoders |= BIT(cpu_transcoder);
}
+ cpu_transcoder = (enum transcoder) crtc->pipe;
+ if (transcoder_ddi_func_is_enabled(dev_priv, cpu_transcoder))
+ enabled_transcoders |= BIT(cpu_transcoder);
+
+ return enabled_transcoders;
+}
+
+static bool has_edp_transcoders(u8 enabled_transcoders)
+{
+ return enabled_transcoders & BIT(TRANSCODER_EDP);
+}
+
+static bool has_dsi_transcoders(u8 enabled_transcoders)
+{
+ return enabled_transcoders & (BIT(TRANSCODER_DSI_0) |
+ BIT(TRANSCODER_DSI_1));
+}
+
+static bool has_pipe_transcoders(u8 enabled_transcoders)
+{
+ return enabled_transcoders & ~(BIT(TRANSCODER_EDP) |
+ BIT(TRANSCODER_DSI_0) |
+ BIT(TRANSCODER_DSI_1));
+}
+
+static void assert_enabled_transcoders(struct drm_i915_private *i915,
+ u8 enabled_transcoders)
+{
+ /* Only one type of transcoder please */
+ drm_WARN_ON(&i915->drm,
+ has_edp_transcoders(enabled_transcoders) +
+ has_dsi_transcoders(enabled_transcoders) +
+ has_pipe_transcoders(enabled_transcoders) > 1);
+
+ /* Only DSI transcoders can be ganged */
+ drm_WARN_ON(&i915->drm,
+ !has_dsi_transcoders(enabled_transcoders) &&
+ !is_power_of_2(enabled_transcoders));
+}
+
+static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
+ struct intel_crtc_state *pipe_config,
+ struct intel_display_power_domain_set *power_domain_set)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ unsigned long enabled_transcoders;
+ u32 tmp;
+
+ enabled_transcoders = hsw_enabled_transcoders(crtc);
+ if (!enabled_transcoders)
+ return false;
+
+ assert_enabled_transcoders(dev_priv, enabled_transcoders);
+
/*
- * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1
+ * With the exception of DSI we should only ever have
+ * a single enabled transcoder. With DSI let's just
+ * pick the first one.
*/
- drm_WARN_ON(dev, (enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
- enabled_panel_transcoders != BIT(TRANSCODER_EDP));
+ pipe_config->cpu_transcoder = ffs(enabled_transcoders) - 1;
if (!intel_display_power_get_in_set_if_enabled(dev_priv, power_domain_set,
POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
return false;
+ if (hsw_panel_transcoders(dev_priv) & BIT(pipe_config->cpu_transcoder)) {
+ tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
+
+ if ((tmp & TRANS_DDI_EDP_INPUT_MASK) == TRANS_DDI_EDP_INPUT_A_ONOFF)
+ pipe_config->pch_pfit.force_thru = true;
+ }
+
tmp = intel_de_read(dev_priv, PIPECONF(pipe_config->cpu_transcoder));
return tmp & PIPECONF_ENABLE;
@@ -6515,7 +5537,7 @@ static bool intel_crtc_get_pipe_config(struct intel_crtc_state *crtc_state)
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- if (!i915->display.get_pipe_config(crtc, crtc_state))
+ if (!i915->display->get_pipe_config(crtc, crtc_state))
return false;
crtc_state->hw.active = true;
@@ -6531,28 +5553,6 @@ static const struct drm_display_mode load_detect_mode = {
704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
};
-struct drm_framebuffer *
-intel_framebuffer_create(struct drm_i915_gem_object *obj,
- struct drm_mode_fb_cmd2 *mode_cmd)
-{
- struct intel_framebuffer *intel_fb;
- int ret;
-
- intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
- if (!intel_fb)
- return ERR_PTR(-ENOMEM);
-
- ret = intel_framebuffer_init(intel_fb, obj, mode_cmd);
- if (ret)
- goto err;
-
- return &intel_fb->base;
-
-err:
- kfree(intel_fb);
- return ERR_PTR(ret);
-}
-
static int intel_modeset_disable_planes(struct drm_atomic_state *state,
struct drm_crtc *crtc)
{
@@ -6780,7 +5780,6 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- enum pipe pipe = crtc->pipe;
u32 dpll = pipe_config->dpll_hw_state.dpll;
u32 fp;
struct dpll clock;
@@ -6830,11 +5829,13 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
else
port_clock = i9xx_calc_dpll_params(refclk, &clock);
} else {
- u32 lvds = IS_I830(dev_priv) ? 0 : intel_de_read(dev_priv,
- LVDS);
- bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
+ enum pipe lvds_pipe;
+
+ if (IS_I85X(dev_priv) &&
+ intel_lvds_port_enabled(dev_priv, LVDS, &lvds_pipe) &&
+ lvds_pipe == crtc->pipe) {
+ u32 lvds = intel_de_read(dev_priv, LVDS);
- if (is_lvds) {
clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
DPLL_FPA01_P1_POST_DIV_SHIFT);
@@ -6985,27 +5986,27 @@ static bool needs_scaling(const struct intel_plane_state *state)
}
int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
- struct intel_crtc_state *crtc_state,
+ struct intel_crtc_state *new_crtc_state,
const struct intel_plane_state *old_plane_state,
- struct intel_plane_state *plane_state)
+ struct intel_plane_state *new_plane_state)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
+ struct intel_plane *plane = to_intel_plane(new_plane_state->uapi.plane);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- bool mode_changed = intel_crtc_needs_modeset(crtc_state);
+ bool mode_changed = intel_crtc_needs_modeset(new_crtc_state);
bool was_crtc_enabled = old_crtc_state->hw.active;
- bool is_crtc_enabled = crtc_state->hw.active;
+ bool is_crtc_enabled = new_crtc_state->hw.active;
bool turn_off, turn_on, visible, was_visible;
int ret;
if (DISPLAY_VER(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
- ret = skl_update_scaler_plane(crtc_state, plane_state);
+ ret = skl_update_scaler_plane(new_crtc_state, new_plane_state);
if (ret)
return ret;
}
was_visible = old_plane_state->uapi.visible;
- visible = plane_state->uapi.visible;
+ visible = new_plane_state->uapi.visible;
if (!was_crtc_enabled && drm_WARN_ON(&dev_priv->drm, was_visible))
was_visible = false;
@@ -7021,7 +6022,7 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
* only combine the results from all planes in the current place?
*/
if (!is_crtc_enabled) {
- intel_plane_set_invisible(crtc_state, plane_state);
+ intel_plane_set_invisible(new_crtc_state, new_plane_state);
visible = false;
}
@@ -7040,28 +6041,28 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
if (turn_on) {
if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
- crtc_state->update_wm_pre = true;
+ new_crtc_state->update_wm_pre = true;
/* must disable cxsr around plane enable/disable */
if (plane->id != PLANE_CURSOR)
- crtc_state->disable_cxsr = true;
+ new_crtc_state->disable_cxsr = true;
} else if (turn_off) {
if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
- crtc_state->update_wm_post = true;
+ new_crtc_state->update_wm_post = true;
/* must disable cxsr around plane enable/disable */
if (plane->id != PLANE_CURSOR)
- crtc_state->disable_cxsr = true;
- } else if (intel_wm_need_update(old_plane_state, plane_state)) {
+ new_crtc_state->disable_cxsr = true;
+ } else if (intel_wm_need_update(old_plane_state, new_plane_state)) {
if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) {
/* FIXME bollocks */
- crtc_state->update_wm_pre = true;
- crtc_state->update_wm_post = true;
+ new_crtc_state->update_wm_pre = true;
+ new_crtc_state->update_wm_post = true;
}
}
if (visible || was_visible)
- crtc_state->fb_bits |= plane->frontbuffer_bit;
+ new_crtc_state->fb_bits |= plane->frontbuffer_bit;
/*
* ILK/SNB DVSACNTR/Sprite Enable
@@ -7100,8 +6101,8 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
(IS_IRONLAKE(dev_priv) || IS_SANDYBRIDGE(dev_priv) ||
IS_IVYBRIDGE(dev_priv)) &&
(turn_on || (!needs_scaling(old_plane_state) &&
- needs_scaling(plane_state))))
- crtc_state->disable_lp_wm = true;
+ needs_scaling(new_plane_state))))
+ new_crtc_state->disable_lp_wm = true;
return 0;
}
@@ -7363,10 +6364,10 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
crtc_state->update_wm_post = true;
if (mode_changed && crtc_state->hw.enable &&
- dev_priv->display.crtc_compute_clock &&
+ dev_priv->dpll_funcs &&
!crtc_state->bigjoiner_slave &&
!drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll)) {
- ret = dev_priv->display.crtc_compute_clock(crtc, crtc_state);
+ ret = dev_priv->dpll_funcs->crtc_compute_clock(crtc_state);
if (ret)
return ret;
}
@@ -7385,32 +6386,23 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
return ret;
}
- if (dev_priv->display.compute_pipe_wm) {
- ret = dev_priv->display.compute_pipe_wm(state, crtc);
- if (ret) {
- drm_dbg_kms(&dev_priv->drm,
- "Target pipe watermarks are invalid\n");
- return ret;
- }
-
+ ret = intel_compute_pipe_wm(state, crtc);
+ if (ret) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Target pipe watermarks are invalid\n");
+ return ret;
}
- if (dev_priv->display.compute_intermediate_wm) {
- if (drm_WARN_ON(&dev_priv->drm,
- !dev_priv->display.compute_pipe_wm))
- return 0;
-
- /*
- * Calculate 'intermediate' watermarks that satisfy both the
- * old state and the new state. We can program these
- * immediately.
- */
- ret = dev_priv->display.compute_intermediate_wm(state, crtc);
- if (ret) {
- drm_dbg_kms(&dev_priv->drm,
- "No valid intermediate pipe watermarks are possible\n");
- return ret;
- }
+ /*
+ * Calculate 'intermediate' watermarks that satisfy both the
+ * old state and the new state. We can program these
+ * immediately.
+ */
+ ret = intel_compute_intermediate_wm(state, crtc);
+ if (ret) {
+ drm_dbg_kms(&dev_priv->drm,
+ "No valid intermediate pipe watermarks are possible\n");
+ return ret;
}
if (DISPLAY_VER(dev_priv) >= 9) {
@@ -7439,11 +6431,9 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
}
- if (!mode_changed) {
- ret = intel_psr2_sel_fetch_update(state, crtc);
- if (ret)
- return ret;
- }
+ ret = intel_psr2_sel_fetch_update(state, crtc);
+ if (ret)
+ return ret;
return 0;
}
@@ -8153,11 +7143,10 @@ encoder_retry:
ret = encoder->compute_config(encoder, pipe_config,
connector_state);
+ if (ret == -EDEADLK)
+ return ret;
if (ret < 0) {
- if (ret != -EDEADLK)
- drm_dbg_kms(&i915->drm,
- "Encoder config failure: %d\n",
- ret);
+ drm_dbg_kms(&i915->drm, "Encoder config failure: %d\n", ret);
return ret;
}
}
@@ -8171,12 +7160,7 @@ encoder_retry:
ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
if (ret == -EDEADLK)
return ret;
- if (ret < 0) {
- drm_dbg_kms(&i915->drm, "CRTC fixup failed\n");
- return ret;
- }
-
- if (ret == I915_DISPLAY_CONFIG_RETRY) {
+ if (ret == -EAGAIN) {
if (drm_WARN(&i915->drm, !retry,
"loop in pipe configuration computation\n"))
return -EINVAL;
@@ -8185,6 +7169,10 @@ encoder_retry:
retry = false;
goto encoder_retry;
}
+ if (ret < 0) {
+ drm_dbg_kms(&i915->drm, "CRTC config failure: %d\n", ret);
+ return ret;
+ }
/* Dithering seems to not pass-through bits correctly when it should, so
* only enable it on 6bpc panels and when its not a compliance
@@ -8720,10 +7708,12 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
if (bp_gamma)
PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, hw.gamma_lut, bp_gamma);
- PIPE_CONF_CHECK_BOOL(has_psr);
- PIPE_CONF_CHECK_BOOL(has_psr2);
- PIPE_CONF_CHECK_BOOL(enable_psr2_sel_fetch);
- PIPE_CONF_CHECK_I(dc3co_exitline);
+ if (current_config->active_planes) {
+ PIPE_CONF_CHECK_BOOL(has_psr);
+ PIPE_CONF_CHECK_BOOL(has_psr2);
+ PIPE_CONF_CHECK_BOOL(enable_psr2_sel_fetch);
+ PIPE_CONF_CHECK_I(dc3co_exitline);
+ }
}
PIPE_CONF_CHECK_BOOL(double_wide);
@@ -8780,7 +7770,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(min_voltage_level);
}
- if (fastset && (current_config->has_psr || pipe_config->has_psr))
+ if (current_config->has_psr || pipe_config->has_psr)
PIPE_CONF_CHECK_X_WITH_MASK(infoframes.enable,
~intel_hdmi_infoframe_enable(DP_SDP_VSC));
else
@@ -9402,7 +8392,7 @@ static void intel_modeset_clear_plls(struct intel_atomic_state *state)
struct intel_crtc *crtc;
int i;
- if (!dev_priv->display.crtc_compute_clock)
+ if (!dev_priv->dpll_funcs)
return;
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
@@ -9503,23 +8493,6 @@ static int intel_modeset_checks(struct intel_atomic_state *state)
return 0;
}
-/*
- * Handle calculation of various watermark data at the end of the atomic check
- * phase. The code here should be run after the per-crtc and per-plane 'check'
- * handlers to ensure that all derived state has been updated.
- */
-static int calc_watermark_data(struct intel_atomic_state *state)
-{
- struct drm_device *dev = state->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
-
- /* Is there platform-specific watermark information to calculate? */
- if (dev_priv->display.compute_global_watermarks)
- return dev_priv->display.compute_global_watermarks(state);
-
- return 0;
-}
-
static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_state,
struct intel_crtc_state *new_crtc_state)
{
@@ -9627,13 +8600,28 @@ static int intel_bigjoiner_add_affected_planes(struct intel_atomic_state *state)
return 0;
}
+static bool bo_has_valid_encryption(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+
+ return intel_pxp_key_check(&i915->gt.pxp, obj, false) == 0;
+}
+
+static bool pxp_is_borked(struct drm_i915_gem_object *obj)
+{
+ return i915_gem_object_is_protected(obj) && !bo_has_valid_encryption(obj);
+}
+
static int intel_atomic_check_planes(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *old_crtc_state, *new_crtc_state;
struct intel_plane_state *plane_state;
struct intel_plane *plane;
+ struct intel_plane_state *new_plane_state;
+ struct intel_plane_state *old_plane_state;
struct intel_crtc *crtc;
+ const struct drm_framebuffer *fb;
int i, ret;
ret = icl_add_linked_planes(state);
@@ -9681,6 +8669,19 @@ static int intel_atomic_check_planes(struct intel_atomic_state *state)
return ret;
}
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ new_plane_state = intel_atomic_get_new_plane_state(state, plane);
+ old_plane_state = intel_atomic_get_old_plane_state(state, plane);
+ fb = new_plane_state->hw.fb;
+ if (fb) {
+ new_plane_state->decrypt = bo_has_valid_encryption(intel_fb_obj(fb));
+ new_plane_state->force_black = pxp_is_borked(intel_fb_obj(fb));
+ } else {
+ new_plane_state->decrypt = old_plane_state->decrypt;
+ new_plane_state->force_black = old_plane_state->force_black;
+ }
+ }
+
return 0;
}
@@ -9715,7 +8716,7 @@ static int intel_atomic_check_cdclk(struct intel_atomic_state *state,
old_cdclk_state->force_min_cdclk != new_cdclk_state->force_min_cdclk)
*need_cdclk_calc = true;
- ret = dev_priv->display.bw_calc_min_cdclk(state);
+ ret = intel_cdclk_bw_calc_min_cdclk(state);
if (ret)
return ret;
@@ -9967,6 +8968,10 @@ static int intel_atomic_check_async(struct intel_atomic_state *state)
drm_dbg_kms(&i915->drm, "Color range cannot be changed in async flip\n");
return -EINVAL;
}
+
+ /* plane decryption is allow to change only in synchronous flips */
+ if (old_plane_state->decrypt != new_plane_state->decrypt)
+ return -EINVAL;
}
return 0;
@@ -10166,7 +9171,7 @@ static int intel_atomic_check(struct drm_device *dev,
goto fail;
intel_fbc_choose_crtc(dev_priv, state);
- ret = calc_watermark_data(state);
+ ret = intel_compute_global_watermarks(state);
if (ret)
goto fail;
@@ -10336,12 +9341,11 @@ static void commit_pipe_pre_planes(struct intel_atomic_state *state,
if (new_crtc_state->update_pipe)
intel_pipe_fastset(old_crtc_state, new_crtc_state);
-
- intel_psr2_program_trans_man_trk_ctl(new_crtc_state);
}
- if (dev_priv->display.atomic_update_watermarks)
- dev_priv->display.atomic_update_watermarks(state, crtc);
+ intel_psr2_program_trans_man_trk_ctl(new_crtc_state);
+
+ intel_atomic_update_watermarks(state, crtc);
}
static void commit_pipe_post_planes(struct intel_atomic_state *state,
@@ -10373,7 +9377,7 @@ static void intel_enable_crtc(struct intel_atomic_state *state,
intel_crtc_update_active_timings(new_crtc_state);
- dev_priv->display.crtc_enable(state, crtc);
+ dev_priv->display->crtc_enable(state, crtc);
if (new_crtc_state->bigjoiner_slave)
return;
@@ -10404,10 +9408,7 @@ static void intel_update_crtc(struct intel_atomic_state *state,
intel_encoders_update_pipe(state, crtc);
}
- if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
- intel_fbc_disable(crtc);
- else
- intel_fbc_enable(state, crtc);
+ intel_fbc_update(state, crtc);
/* Perform vblank evasion around commit operation */
intel_pipe_update_start(new_crtc_state);
@@ -10464,16 +9465,15 @@ static void intel_old_crtc_state_disables(struct intel_atomic_state *state,
*/
intel_crtc_disable_pipe_crc(crtc);
- dev_priv->display.crtc_disable(state, crtc);
+ dev_priv->display->crtc_disable(state, crtc);
crtc->active = false;
intel_fbc_disable(crtc);
intel_disable_shared_dpll(old_crtc_state);
/* FIXME unify this for all platforms */
if (!new_crtc_state->hw.active &&
- !HAS_GMCH(dev_priv) &&
- dev_priv->display.initial_watermarks)
- dev_priv->display.initial_watermarks(state, crtc);
+ !HAS_GMCH(dev_priv))
+ intel_initial_watermarks(state, crtc);
}
static void intel_commit_modeset_disables(struct intel_atomic_state *state)
@@ -10837,6 +9837,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
intel_encoders_update_prepare(state);
intel_dbuf_pre_plane_update(state);
+ intel_psr_pre_plane_update(state);
for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
if (new_crtc_state->uapi.async_flip)
@@ -10844,7 +9845,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
}
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
- dev_priv->display.commit_modeset_enables(state);
+ dev_priv->display->commit_modeset_enables(state);
if (state->modeset) {
intel_encoders_update_complete(state);
@@ -10895,11 +9896,11 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
if (DISPLAY_VER(dev_priv) == 2 && planes_enabling(old_crtc_state, new_crtc_state))
intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
- if (dev_priv->display.optimize_watermarks)
- dev_priv->display.optimize_watermarks(state, crtc);
+ intel_optimize_watermarks(state, crtc);
}
intel_dbuf_post_plane_update(state);
+ intel_psr_post_plane_update(state);
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
intel_post_plane_update(state, crtc);
@@ -11088,280 +10089,6 @@ static int intel_atomic_commit(struct drm_device *dev,
return 0;
}
-struct wait_rps_boost {
- struct wait_queue_entry wait;
-
- struct drm_crtc *crtc;
- struct i915_request *request;
-};
-
-static int do_rps_boost(struct wait_queue_entry *_wait,
- unsigned mode, int sync, void *key)
-{
- struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
- struct i915_request *rq = wait->request;
-
- /*
- * If we missed the vblank, but the request is already running it
- * is reasonable to assume that it will complete before the next
- * vblank without our intervention, so leave RPS alone.
- */
- if (!i915_request_started(rq))
- intel_rps_boost(rq);
- i915_request_put(rq);
-
- drm_crtc_vblank_put(wait->crtc);
-
- list_del(&wait->wait.entry);
- kfree(wait);
- return 1;
-}
-
-static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
- struct dma_fence *fence)
-{
- struct wait_rps_boost *wait;
-
- if (!dma_fence_is_i915(fence))
- return;
-
- if (DISPLAY_VER(to_i915(crtc->dev)) < 6)
- return;
-
- if (drm_crtc_vblank_get(crtc))
- return;
-
- wait = kmalloc(sizeof(*wait), GFP_KERNEL);
- if (!wait) {
- drm_crtc_vblank_put(crtc);
- return;
- }
-
- wait->request = to_request(dma_fence_get(fence));
- wait->crtc = crtc;
-
- wait->wait.func = do_rps_boost;
- wait->wait.flags = 0;
-
- add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
-}
-
-int intel_plane_pin_fb(struct intel_plane_state *plane_state)
-{
- struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- struct drm_framebuffer *fb = plane_state->hw.fb;
- struct i915_vma *vma;
- bool phys_cursor =
- plane->id == PLANE_CURSOR &&
- INTEL_INFO(dev_priv)->display.cursor_needs_physical;
-
- if (!intel_fb_uses_dpt(fb)) {
- vma = intel_pin_and_fence_fb_obj(fb, phys_cursor,
- &plane_state->view.gtt,
- intel_plane_uses_fence(plane_state),
- &plane_state->flags);
- if (IS_ERR(vma))
- return PTR_ERR(vma);
-
- plane_state->ggtt_vma = vma;
- } else {
- struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
-
- vma = intel_dpt_pin(intel_fb->dpt_vm);
- if (IS_ERR(vma))
- return PTR_ERR(vma);
-
- plane_state->ggtt_vma = vma;
-
- vma = intel_pin_fb_obj_dpt(fb, &plane_state->view.gtt, false,
- &plane_state->flags, intel_fb->dpt_vm);
- if (IS_ERR(vma)) {
- intel_dpt_unpin(intel_fb->dpt_vm);
- plane_state->ggtt_vma = NULL;
- return PTR_ERR(vma);
- }
-
- plane_state->dpt_vma = vma;
-
- WARN_ON(plane_state->ggtt_vma == plane_state->dpt_vma);
- }
-
- return 0;
-}
-
-void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
-{
- struct drm_framebuffer *fb = old_plane_state->hw.fb;
- struct i915_vma *vma;
-
- if (!intel_fb_uses_dpt(fb)) {
- vma = fetch_and_zero(&old_plane_state->ggtt_vma);
- if (vma)
- intel_unpin_fb_vma(vma, old_plane_state->flags);
- } else {
- struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
-
- vma = fetch_and_zero(&old_plane_state->dpt_vma);
- if (vma)
- intel_unpin_fb_vma(vma, old_plane_state->flags);
-
- vma = fetch_and_zero(&old_plane_state->ggtt_vma);
- if (vma)
- intel_dpt_unpin(intel_fb->dpt_vm);
- }
-}
-
-/**
- * intel_prepare_plane_fb - Prepare fb for usage on plane
- * @_plane: drm plane to prepare for
- * @_new_plane_state: the plane state being prepared
- *
- * Prepares a framebuffer for usage on a display plane. Generally this
- * involves pinning the underlying object and updating the frontbuffer tracking
- * bits. Some older platforms need special physical address handling for
- * cursor planes.
- *
- * Returns 0 on success, negative error code on failure.
- */
-int
-intel_prepare_plane_fb(struct drm_plane *_plane,
- struct drm_plane_state *_new_plane_state)
-{
- struct i915_sched_attr attr = { .priority = I915_PRIORITY_DISPLAY };
- struct intel_plane *plane = to_intel_plane(_plane);
- struct intel_plane_state *new_plane_state =
- to_intel_plane_state(_new_plane_state);
- struct intel_atomic_state *state =
- to_intel_atomic_state(new_plane_state->uapi.state);
- struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
- const struct intel_plane_state *old_plane_state =
- intel_atomic_get_old_plane_state(state, plane);
- struct drm_i915_gem_object *obj = intel_fb_obj(new_plane_state->hw.fb);
- struct drm_i915_gem_object *old_obj = intel_fb_obj(old_plane_state->hw.fb);
- int ret;
-
- if (old_obj) {
- const struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(state,
- to_intel_crtc(old_plane_state->hw.crtc));
-
- /* Big Hammer, we also need to ensure that any pending
- * MI_WAIT_FOR_EVENT inside a user batch buffer on the
- * current scanout is retired before unpinning the old
- * framebuffer. Note that we rely on userspace rendering
- * into the buffer attached to the pipe they are waiting
- * on. If not, userspace generates a GPU hang with IPEHR
- * point to the MI_WAIT_FOR_EVENT.
- *
- * This should only fail upon a hung GPU, in which case we
- * can safely continue.
- */
- if (intel_crtc_needs_modeset(crtc_state)) {
- ret = i915_sw_fence_await_reservation(&state->commit_ready,
- old_obj->base.resv, NULL,
- false, 0,
- GFP_KERNEL);
- if (ret < 0)
- return ret;
- }
- }
-
- if (new_plane_state->uapi.fence) { /* explicit fencing */
- i915_gem_fence_wait_priority(new_plane_state->uapi.fence,
- &attr);
- ret = i915_sw_fence_await_dma_fence(&state->commit_ready,
- new_plane_state->uapi.fence,
- i915_fence_timeout(dev_priv),
- GFP_KERNEL);
- if (ret < 0)
- return ret;
- }
-
- if (!obj)
- return 0;
-
-
- ret = intel_plane_pin_fb(new_plane_state);
- if (ret)
- return ret;
-
- i915_gem_object_wait_priority(obj, 0, &attr);
- i915_gem_object_flush_frontbuffer(obj, ORIGIN_DIRTYFB);
-
- if (!new_plane_state->uapi.fence) { /* implicit fencing */
- struct dma_fence *fence;
-
- ret = i915_sw_fence_await_reservation(&state->commit_ready,
- obj->base.resv, NULL,
- false,
- i915_fence_timeout(dev_priv),
- GFP_KERNEL);
- if (ret < 0)
- goto unpin_fb;
-
- fence = dma_resv_get_excl_unlocked(obj->base.resv);
- if (fence) {
- add_rps_boost_after_vblank(new_plane_state->hw.crtc,
- fence);
- dma_fence_put(fence);
- }
- } else {
- add_rps_boost_after_vblank(new_plane_state->hw.crtc,
- new_plane_state->uapi.fence);
- }
-
- /*
- * We declare pageflips to be interactive and so merit a small bias
- * towards upclocking to deliver the frame on time. By only changing
- * the RPS thresholds to sample more regularly and aim for higher
- * clocks we can hopefully deliver low power workloads (like kodi)
- * that are not quite steady state without resorting to forcing
- * maximum clocks following a vblank miss (see do_rps_boost()).
- */
- if (!state->rps_interactive) {
- intel_rps_mark_interactive(&dev_priv->gt.rps, true);
- state->rps_interactive = true;
- }
-
- return 0;
-
-unpin_fb:
- intel_plane_unpin_fb(new_plane_state);
-
- return ret;
-}
-
-/**
- * intel_cleanup_plane_fb - Cleans up an fb after plane use
- * @plane: drm plane to clean up for
- * @_old_plane_state: the state from the previous modeset
- *
- * Cleans up a framebuffer that has just been removed from a plane.
- */
-void
-intel_cleanup_plane_fb(struct drm_plane *plane,
- struct drm_plane_state *_old_plane_state)
-{
- struct intel_plane_state *old_plane_state =
- to_intel_plane_state(_old_plane_state);
- struct intel_atomic_state *state =
- to_intel_atomic_state(old_plane_state->uapi.state);
- struct drm_i915_private *dev_priv = to_i915(plane->dev);
- struct drm_i915_gem_object *obj = intel_fb_obj(old_plane_state->hw.fb);
-
- if (!obj)
- return;
-
- if (state->rps_interactive) {
- intel_rps_mark_interactive(&dev_priv->gt.rps, false);
- state->rps_interactive = false;
- }
-
- /* Should only be called after a successful intel_prepare_plane_fb()! */
- intel_plane_unpin_fb(old_plane_state);
-}
-
/**
* intel_plane_destroy - destroy a plane
* @plane: plane to destroy
@@ -11491,6 +10218,7 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
intel_ddi_init(dev_priv, PORT_TC2);
intel_ddi_init(dev_priv, PORT_TC3);
intel_ddi_init(dev_priv, PORT_TC4);
+ icl_dsi_init(dev_priv);
} else if (IS_ALDERLAKE_S(dev_priv)) {
intel_ddi_init(dev_priv, PORT_A);
intel_ddi_init(dev_priv, PORT_TC1);
@@ -11708,249 +10436,6 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
}
-static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
-{
- struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
-
- drm_framebuffer_cleanup(fb);
-
- if (intel_fb_uses_dpt(fb))
- intel_dpt_destroy(intel_fb->dpt_vm);
-
- intel_frontbuffer_put(intel_fb->frontbuffer);
-
- kfree(intel_fb);
-}
-
-static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
- struct drm_file *file,
- unsigned int *handle)
-{
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- struct drm_i915_private *i915 = to_i915(obj->base.dev);
-
- if (i915_gem_object_is_userptr(obj)) {
- drm_dbg(&i915->drm,
- "attempting to use a userptr for a framebuffer, denied\n");
- return -EINVAL;
- }
-
- return drm_gem_handle_create(file, &obj->base, handle);
-}
-
-static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
- struct drm_file *file,
- unsigned flags, unsigned color,
- struct drm_clip_rect *clips,
- unsigned num_clips)
-{
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
-
- i915_gem_object_flush_if_display(obj);
- intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
-
- return 0;
-}
-
-static const struct drm_framebuffer_funcs intel_fb_funcs = {
- .destroy = intel_user_framebuffer_destroy,
- .create_handle = intel_user_framebuffer_create_handle,
- .dirty = intel_user_framebuffer_dirty,
-};
-
-static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
- struct drm_i915_gem_object *obj,
- struct drm_mode_fb_cmd2 *mode_cmd)
-{
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
- struct drm_framebuffer *fb = &intel_fb->base;
- u32 max_stride;
- unsigned int tiling, stride;
- int ret = -EINVAL;
- int i;
-
- intel_fb->frontbuffer = intel_frontbuffer_get(obj);
- if (!intel_fb->frontbuffer)
- return -ENOMEM;
-
- i915_gem_object_lock(obj, NULL);
- tiling = i915_gem_object_get_tiling(obj);
- stride = i915_gem_object_get_stride(obj);
- i915_gem_object_unlock(obj);
-
- if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
- /*
- * If there's a fence, enforce that
- * the fb modifier and tiling mode match.
- */
- if (tiling != I915_TILING_NONE &&
- tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
- drm_dbg_kms(&dev_priv->drm,
- "tiling_mode doesn't match fb modifier\n");
- goto err;
- }
- } else {
- if (tiling == I915_TILING_X) {
- mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
- } else if (tiling == I915_TILING_Y) {
- drm_dbg_kms(&dev_priv->drm,
- "No Y tiling for legacy addfb\n");
- goto err;
- }
- }
-
- if (!drm_any_plane_has_format(&dev_priv->drm,
- mode_cmd->pixel_format,
- mode_cmd->modifier[0])) {
- drm_dbg_kms(&dev_priv->drm,
- "unsupported pixel format %p4cc / modifier 0x%llx\n",
- &mode_cmd->pixel_format, mode_cmd->modifier[0]);
- goto err;
- }
-
- /*
- * gen2/3 display engine uses the fence if present,
- * so the tiling mode must match the fb modifier exactly.
- */
- if (DISPLAY_VER(dev_priv) < 4 &&
- tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
- drm_dbg_kms(&dev_priv->drm,
- "tiling_mode must match fb modifier exactly on gen2/3\n");
- goto err;
- }
-
- max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format,
- mode_cmd->modifier[0]);
- if (mode_cmd->pitches[0] > max_stride) {
- drm_dbg_kms(&dev_priv->drm,
- "%s pitch (%u) must be at most %d\n",
- mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
- "tiled" : "linear",
- mode_cmd->pitches[0], max_stride);
- goto err;
- }
-
- /*
- * If there's a fence, enforce that
- * the fb pitch and fence stride match.
- */
- if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
- drm_dbg_kms(&dev_priv->drm,
- "pitch (%d) must match tiling stride (%d)\n",
- mode_cmd->pitches[0], stride);
- goto err;
- }
-
- /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
- if (mode_cmd->offsets[0] != 0) {
- drm_dbg_kms(&dev_priv->drm,
- "plane 0 offset (0x%08x) must be 0\n",
- mode_cmd->offsets[0]);
- goto err;
- }
-
- drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
-
- for (i = 0; i < fb->format->num_planes; i++) {
- u32 stride_alignment;
-
- if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
- drm_dbg_kms(&dev_priv->drm, "bad plane %d handle\n",
- i);
- goto err;
- }
-
- stride_alignment = intel_fb_stride_alignment(fb, i);
- if (fb->pitches[i] & (stride_alignment - 1)) {
- drm_dbg_kms(&dev_priv->drm,
- "plane %d pitch (%d) must be at least %u byte aligned\n",
- i, fb->pitches[i], stride_alignment);
- goto err;
- }
-
- if (is_gen12_ccs_plane(fb, i) && !is_gen12_ccs_cc_plane(fb, i)) {
- int ccs_aux_stride = gen12_ccs_aux_stride(fb, i);
-
- if (fb->pitches[i] != ccs_aux_stride) {
- drm_dbg_kms(&dev_priv->drm,
- "ccs aux plane %d pitch (%d) must be %d\n",
- i,
- fb->pitches[i], ccs_aux_stride);
- goto err;
- }
- }
-
- /* TODO: Add POT stride remapping support for CCS formats as well. */
- if (IS_ALDERLAKE_P(dev_priv) &&
- mode_cmd->modifier[i] != DRM_FORMAT_MOD_LINEAR &&
- !intel_fb_needs_pot_stride_remap(intel_fb) &&
- !is_power_of_2(mode_cmd->pitches[i])) {
- drm_dbg_kms(&dev_priv->drm,
- "plane %d pitch (%d) must be power of two for tiled buffers\n",
- i, mode_cmd->pitches[i]);
- goto err;
- }
-
- fb->obj[i] = &obj->base;
- }
-
- ret = intel_fill_fb_info(dev_priv, intel_fb);
- if (ret)
- goto err;
-
- if (intel_fb_uses_dpt(fb)) {
- struct i915_address_space *vm;
-
- vm = intel_dpt_create(intel_fb);
- if (IS_ERR(vm)) {
- ret = PTR_ERR(vm);
- goto err;
- }
-
- intel_fb->dpt_vm = vm;
- }
-
- ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
- if (ret) {
- drm_err(&dev_priv->drm, "framebuffer init failed %d\n", ret);
- goto err;
- }
-
- return 0;
-
-err:
- intel_frontbuffer_put(intel_fb->frontbuffer);
- return ret;
-}
-
-static struct drm_framebuffer *
-intel_user_framebuffer_create(struct drm_device *dev,
- struct drm_file *filp,
- const struct drm_mode_fb_cmd2 *user_mode_cmd)
-{
- struct drm_framebuffer *fb;
- struct drm_i915_gem_object *obj;
- struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
- struct drm_i915_private *i915;
-
- obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
- if (!obj)
- return ERR_PTR(-ENOENT);
-
- /* object is backed with LMEM for discrete */
- i915 = to_i915(obj->base.dev);
- if (HAS_LMEM(i915) && !i915_gem_object_can_migrate(obj, INTEL_REGION_LMEM)) {
- /* object is "remote", not in local memory */
- i915_gem_object_put(obj);
- return ERR_PTR(-EREMOTE);
- }
-
- fb = intel_framebuffer_create(obj, &mode_cmd);
- i915_gem_object_put(obj);
-
- return fb;
-}
-
static enum drm_mode_status
intel_mode_valid(struct drm_device *dev,
const struct drm_display_mode *mode)
@@ -12039,6 +10524,14 @@ intel_mode_valid(struct drm_device *dev,
return MODE_V_ILLEGAL;
}
+ /*
+ * Cantiga+ cannot handle modes with a hsync front porch of 0.
+ * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
+ */
+ if ((DISPLAY_VER(dev_priv) > 4 || IS_G4X(dev_priv)) &&
+ mode->hsync_start == mode->hdisplay)
+ return MODE_H_ILLEGAL;
+
return MODE_OK;
}
@@ -12090,6 +10583,46 @@ static const struct drm_mode_config_funcs intel_mode_funcs = {
.atomic_state_free = intel_atomic_state_free,
};
+static const struct drm_i915_display_funcs skl_display_funcs = {
+ .get_pipe_config = hsw_get_pipe_config,
+ .crtc_enable = hsw_crtc_enable,
+ .crtc_disable = hsw_crtc_disable,
+ .commit_modeset_enables = skl_commit_modeset_enables,
+ .get_initial_plane_config = skl_get_initial_plane_config,
+};
+
+static const struct drm_i915_display_funcs ddi_display_funcs = {
+ .get_pipe_config = hsw_get_pipe_config,
+ .crtc_enable = hsw_crtc_enable,
+ .crtc_disable = hsw_crtc_disable,
+ .commit_modeset_enables = intel_commit_modeset_enables,
+ .get_initial_plane_config = i9xx_get_initial_plane_config,
+};
+
+static const struct drm_i915_display_funcs pch_split_display_funcs = {
+ .get_pipe_config = ilk_get_pipe_config,
+ .crtc_enable = ilk_crtc_enable,
+ .crtc_disable = ilk_crtc_disable,
+ .commit_modeset_enables = intel_commit_modeset_enables,
+ .get_initial_plane_config = i9xx_get_initial_plane_config,
+};
+
+static const struct drm_i915_display_funcs vlv_display_funcs = {
+ .get_pipe_config = i9xx_get_pipe_config,
+ .crtc_enable = valleyview_crtc_enable,
+ .crtc_disable = i9xx_crtc_disable,
+ .commit_modeset_enables = intel_commit_modeset_enables,
+ .get_initial_plane_config = i9xx_get_initial_plane_config,
+};
+
+static const struct drm_i915_display_funcs i9xx_display_funcs = {
+ .get_pipe_config = i9xx_get_pipe_config,
+ .crtc_enable = i9xx_crtc_enable,
+ .crtc_disable = i9xx_crtc_disable,
+ .commit_modeset_enables = intel_commit_modeset_enables,
+ .get_initial_plane_config = i9xx_get_initial_plane_config,
+};
+
/**
* intel_init_display_hooks - initialize the display modesetting hooks
* @dev_priv: device private
@@ -12105,38 +10638,19 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
intel_dpll_init_clock_hook(dev_priv);
if (DISPLAY_VER(dev_priv) >= 9) {
- dev_priv->display.get_pipe_config = hsw_get_pipe_config;
- dev_priv->display.crtc_enable = hsw_crtc_enable;
- dev_priv->display.crtc_disable = hsw_crtc_disable;
+ dev_priv->display = &skl_display_funcs;
} else if (HAS_DDI(dev_priv)) {
- dev_priv->display.get_pipe_config = hsw_get_pipe_config;
- dev_priv->display.crtc_enable = hsw_crtc_enable;
- dev_priv->display.crtc_disable = hsw_crtc_disable;
+ dev_priv->display = &ddi_display_funcs;
} else if (HAS_PCH_SPLIT(dev_priv)) {
- dev_priv->display.get_pipe_config = ilk_get_pipe_config;
- dev_priv->display.crtc_enable = ilk_crtc_enable;
- dev_priv->display.crtc_disable = ilk_crtc_disable;
+ dev_priv->display = &pch_split_display_funcs;
} else if (IS_CHERRYVIEW(dev_priv) ||
IS_VALLEYVIEW(dev_priv)) {
- dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
- dev_priv->display.crtc_enable = valleyview_crtc_enable;
- dev_priv->display.crtc_disable = i9xx_crtc_disable;
+ dev_priv->display = &vlv_display_funcs;
} else {
- dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
- dev_priv->display.crtc_enable = i9xx_crtc_enable;
- dev_priv->display.crtc_disable = i9xx_crtc_disable;
+ dev_priv->display = &i9xx_display_funcs;
}
intel_fdi_init_hook(dev_priv);
-
- if (DISPLAY_VER(dev_priv) >= 9) {
- dev_priv->display.commit_modeset_enables = skl_commit_modeset_enables;
- dev_priv->display.get_initial_plane_config = skl_get_initial_plane_config;
- } else {
- dev_priv->display.commit_modeset_enables = intel_commit_modeset_enables;
- dev_priv->display.get_initial_plane_config = i9xx_get_initial_plane_config;
- }
-
}
void intel_modeset_init_hw(struct drm_i915_private *i915)
@@ -12206,7 +10720,7 @@ static void sanitize_watermarks(struct drm_i915_private *dev_priv)
int i;
/* Only supported on platforms that use atomic watermark design */
- if (!dev_priv->display.optimize_watermarks)
+ if (!dev_priv->wm_disp->optimize_watermarks)
return;
state = drm_atomic_state_alloc(&dev_priv->drm);
@@ -12239,7 +10753,7 @@ retry:
/* Write calculated watermark values back */
for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
crtc_state->wm.need_postvbl_update = true;
- dev_priv->display.optimize_watermarks(intel_state, crtc);
+ intel_optimize_watermarks(intel_state, crtc);
to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
}
@@ -12271,22 +10785,6 @@ fail:
drm_modeset_acquire_fini(&ctx);
}
-static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
-{
- if (IS_IRONLAKE(dev_priv)) {
- u32 fdi_pll_clk =
- intel_de_read(dev_priv, FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
-
- dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
- } else if (IS_SANDYBRIDGE(dev_priv) || IS_IVYBRIDGE(dev_priv)) {
- dev_priv->fdi_pll_freq = 270000;
- } else {
- return;
- }
-
- drm_dbg(&dev_priv->drm, "FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
-}
-
static int intel_initial_commit(struct drm_device *dev)
{
struct drm_atomic_state *state = NULL;
@@ -12381,7 +10879,7 @@ static void intel_mode_config_init(struct drm_i915_private *i915)
mode_config->funcs = &intel_mode_funcs;
- mode_config->async_page_flip = has_async_flips(i915);
+ mode_config->async_page_flip = HAS_ASYNC_FLIPS(i915);
/*
* Maximum framebuffer dimensions, chosen to match
@@ -12420,22 +10918,6 @@ static void intel_mode_config_cleanup(struct drm_i915_private *i915)
drm_mode_config_cleanup(&i915->drm);
}
-static void plane_config_fini(struct intel_initial_plane_config *plane_config)
-{
- if (plane_config->fb) {
- struct drm_framebuffer *fb = &plane_config->fb->base;
-
- /* We may only have the stub and not a full framebuffer */
- if (drm_framebuffer_read_refcount(fb))
- drm_framebuffer_put(fb);
- else
- kfree(fb);
- }
-
- if (plane_config->vma)
- i915_vma_put(plane_config->vma);
-}
-
/* part #1: call before irq install */
int intel_modeset_init_noirq(struct drm_i915_private *i915)
{
@@ -12540,7 +11022,7 @@ int intel_modeset_init_nogem(struct drm_i915_private *i915)
intel_plane_possible_crtcs_init(i915);
intel_shared_dpll_init(dev);
- intel_update_fdi_pll_freq(i915);
+ intel_fdi_pll_freq_update(i915);
intel_update_czclk(i915);
intel_modeset_init_hw(i915);
@@ -12564,30 +11046,13 @@ int intel_modeset_init_nogem(struct drm_i915_private *i915)
drm_modeset_lock_all(dev);
intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
+ intel_acpi_assign_connector_fwnodes(i915);
drm_modeset_unlock_all(dev);
for_each_intel_crtc(dev, crtc) {
- struct intel_initial_plane_config plane_config = {};
-
if (!to_intel_crtc_state(crtc->base.state)->uapi.active)
continue;
-
- /*
- * Note that reserving the BIOS fb up front prevents us
- * from stuffing other stolen allocations like the ring
- * on top. This prevents some ugliness at boot time, and
- * can even allow for smooth boot transitions if the BIOS
- * fb is large enough for the active pipe configuration.
- */
- i915->display.get_initial_plane_config(crtc, &plane_config);
-
- /*
- * If the fb is shared between multiple heads, we'll
- * just get the first one.
- */
- intel_find_initial_plane_obj(crtc, &plane_config);
-
- plane_config_fini(&plane_config);
+ intel_crtc_initial_plane_config(crtc);
}
/*
@@ -12869,13 +11334,8 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
intel_plane_disable_noatomic(crtc, plane);
}
- /*
- * Disable any background color set by the BIOS, but enable the
- * gamma and CSC to match how we program our planes.
- */
- if (DISPLAY_VER(dev_priv) >= 9)
- intel_de_write(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe),
- SKL_BOTTOM_COLOR_GAMMA_ENABLE | SKL_BOTTOM_COLOR_CSC_ENABLE);
+ /* Disable any background color/etc. set by the BIOS */
+ intel_color_commit(crtc_state);
}
/* Adjust the state of the output pipe according to whether we
@@ -13076,8 +11536,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
enableddisabled(crtc_state->hw.active));
}
- dev_priv->active_pipes = cdclk_state->active_pipes =
- dbuf_state->active_pipes = active_pipes;
+ cdclk_state->active_pipes = dbuf_state->active_pipes = active_pipes;
readout_plane_state(dev_priv);
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index 284936f0ddab..0c76bf57f86b 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -270,6 +270,7 @@ enum tc_port {
};
enum tc_port_mode {
+ TC_PORT_DISCONNECTED,
TC_PORT_TBT_ALT,
TC_PORT_DP_ALT,
TC_PORT_LEGACY,
@@ -531,8 +532,8 @@ enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port);
bool is_trans_port_sync_mode(const struct intel_crtc_state *state);
void intel_plane_destroy(struct drm_plane *plane);
-void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state);
-void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state);
+void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state);
+void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state);
void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc);
@@ -548,8 +549,6 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv);
unsigned int intel_fb_xy_to_linear(int x, int y,
const struct intel_plane_state *state,
int plane);
-unsigned int intel_fb_align_height(const struct drm_framebuffer *fb,
- int color_plane, unsigned int height);
void intel_add_fb_offsets(int *x, int *y,
const struct intel_plane_state *state, int plane);
unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info);
@@ -577,19 +576,9 @@ int intel_get_load_detect_pipe(struct drm_connector *connector,
void intel_release_load_detect_pipe(struct drm_connector *connector,
struct intel_load_detect_pipe *old,
struct drm_modeset_acquire_ctx *ctx);
-struct i915_vma *
-intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, bool phys_cursor,
- const struct i915_ggtt_view *view,
- bool uses_fence,
- unsigned long *out_flags);
-void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags);
struct drm_framebuffer *
intel_framebuffer_create(struct drm_i915_gem_object *obj,
struct drm_mode_fb_cmd2 *mode_cmd);
-int intel_prepare_plane_fb(struct drm_plane *plane,
- struct drm_plane_state *new_state);
-void intel_cleanup_plane_fb(struct drm_plane *plane,
- struct drm_plane_state *old_state);
void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
enum pipe pipe);
@@ -620,19 +609,16 @@ void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state);
int bdw_get_pipemisc_bpp(struct intel_crtc *crtc);
unsigned int intel_plane_fence_y_offset(const struct intel_plane_state *plane_state);
+bool intel_plane_uses_fence(const struct intel_plane_state *plane_state);
bool
intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
u64 modifier);
-int intel_plane_pin_fb(struct intel_plane_state *plane_state);
-void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state);
struct intel_encoder *
intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
const struct intel_crtc_state *crtc_state);
-
-unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
- int color_plane);
-unsigned int intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane);
+void intel_plane_disable_noatomic(struct intel_crtc *crtc,
+ struct intel_plane *plane);
void intel_display_driver_register(struct drm_i915_private *i915);
void intel_display_driver_unregister(struct drm_i915_private *i915);
@@ -650,23 +636,10 @@ void intel_init_pch_refclk(struct drm_i915_private *dev_priv);
int intel_modeset_all_pipes(struct intel_atomic_state *state);
/* modesetting asserts */
-void assert_panel_unlocked(struct drm_i915_private *dev_priv,
- enum pipe pipe);
-void assert_pll(struct drm_i915_private *dev_priv,
- enum pipe pipe, bool state);
-#define assert_pll_enabled(d, p) assert_pll(d, p, true)
-#define assert_pll_disabled(d, p) assert_pll(d, p, false)
-void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state);
-#define assert_dsi_pll_enabled(d) assert_dsi_pll(d, true)
-#define assert_dsi_pll_disabled(d) assert_dsi_pll(d, false)
-void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
- enum pipe pipe, bool state);
-#define assert_fdi_rx_pll_enabled(d, p) assert_fdi_rx_pll(d, p, true)
-#define assert_fdi_rx_pll_disabled(d, p) assert_fdi_rx_pll(d, p, false)
-void assert_pipe(struct drm_i915_private *dev_priv,
- enum transcoder cpu_transcoder, bool state);
-#define assert_pipe_enabled(d, t) assert_pipe(d, t, true)
-#define assert_pipe_disabled(d, t) assert_pipe(d, t, false)
+void assert_transcoder(struct drm_i915_private *dev_priv,
+ enum transcoder cpu_transcoder, bool state);
+#define assert_transcoder_enabled(d, t) assert_transcoder(d, t, true)
+#define assert_transcoder_disabled(d, t) assert_transcoder(d, t, false)
/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
* WARN_ON()) for hw state sanity checks to check for unexpected conditions
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index 8fdacb252bb1..e04767695530 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -7,18 +7,19 @@
#include <drm/drm_fourcc.h>
#include "i915_debugfs.h"
+#include "intel_de.h"
#include "intel_display_debugfs.h"
#include "intel_display_power.h"
-#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dmc.h"
#include "intel_dp.h"
+#include "intel_dp_mst.h"
+#include "intel_drrs.h"
#include "intel_fbc.h"
#include "intel_hdcp.h"
#include "intel_hdmi.h"
#include "intel_pm.h"
#include "intel_psr.h"
-#include "intel_sideband.h"
#include "intel_sprite.h"
static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
@@ -1323,9 +1324,6 @@ static int i915_drrs_status(struct seq_file *m, void *unused)
return 0;
}
-#define LPSP_STATUS(COND) (COND ? seq_puts(m, "LPSP: enabled\n") : \
- seq_puts(m, "LPSP: disabled\n"))
-
static bool
intel_lpsp_power_well_enabled(struct drm_i915_private *i915,
enum i915_power_well_id power_well_id)
@@ -1344,32 +1342,20 @@ intel_lpsp_power_well_enabled(struct drm_i915_private *i915,
static int i915_lpsp_status(struct seq_file *m, void *unused)
{
struct drm_i915_private *i915 = node_to_i915(m->private);
-
- if (DISPLAY_VER(i915) >= 13) {
- LPSP_STATUS(!intel_lpsp_power_well_enabled(i915,
- SKL_DISP_PW_2));
+ bool lpsp_enabled = false;
+
+ if (DISPLAY_VER(i915) >= 13 || IS_DISPLAY_VER(i915, 9, 10)) {
+ lpsp_enabled = !intel_lpsp_power_well_enabled(i915, SKL_DISP_PW_2);
+ } else if (IS_DISPLAY_VER(i915, 11, 12)) {
+ lpsp_enabled = !intel_lpsp_power_well_enabled(i915, ICL_DISP_PW_3);
+ } else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
+ lpsp_enabled = !intel_lpsp_power_well_enabled(i915, HSW_DISP_PW_GLOBAL);
+ } else {
+ seq_puts(m, "LPSP: not supported\n");
return 0;
}
- switch (DISPLAY_VER(i915)) {
- case 12:
- case 11:
- LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, ICL_DISP_PW_3));
- break;
- case 10:
- case 9:
- LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, SKL_DISP_PW_2));
- break;
- default:
- /*
- * Apart from HASWELL/BROADWELL other legacy platform doesn't
- * support lpsp.
- */
- if (IS_HASWELL(i915) || IS_BROADWELL(i915))
- LPSP_STATUS(!intel_lpsp_power_well_enabled(i915, HSW_DISP_PW_GLOBAL));
- else
- seq_puts(m, "LPSP: not supported\n");
- }
+ seq_printf(m, "LPSP: %s\n", enableddisabled(lpsp_enabled));
return 0;
}
@@ -1393,7 +1379,7 @@ static int i915_dp_mst_info(struct seq_file *m, void *unused)
continue;
dig_port = enc_to_dig_port(intel_encoder);
- if (!dig_port->dp.can_mst)
+ if (!intel_dp_mst_source_support(&dig_port->dp))
continue;
seq_printf(m, "MST Source Port [ENCODER:%d:%s]\n",
@@ -2044,11 +2030,9 @@ static int i915_drrs_ctl_set(void *data, u64 val)
intel_dp = enc_to_intel_dp(encoder);
if (val)
- intel_edp_drrs_enable(intel_dp,
- crtc_state);
+ intel_drrs_enable(intel_dp, crtc_state);
else
- intel_edp_drrs_disable(intel_dp,
- crtc_state);
+ intel_drrs_disable(intel_dp, crtc_state);
}
drm_connector_list_iter_end(&conn_iter);
@@ -2240,14 +2224,12 @@ static int i915_psr_status_show(struct seq_file *m, void *data)
}
DEFINE_SHOW_ATTRIBUTE(i915_psr_status);
-#define LPSP_CAPABLE(COND) (COND ? seq_puts(m, "LPSP: capable\n") : \
- seq_puts(m, "LPSP: incapable\n"))
-
static int i915_lpsp_capability_show(struct seq_file *m, void *data)
{
struct drm_connector *connector = m->private;
struct drm_i915_private *i915 = to_i915(connector->dev);
struct intel_encoder *encoder;
+ bool lpsp_capable = false;
encoder = intel_attached_encoder(to_intel_connector(connector));
if (!encoder)
@@ -2256,35 +2238,27 @@ static int i915_lpsp_capability_show(struct seq_file *m, void *data)
if (connector->status != connector_status_connected)
return -ENODEV;
- if (DISPLAY_VER(i915) >= 13) {
- LPSP_CAPABLE(encoder->port <= PORT_B);
- return 0;
- }
-
- switch (DISPLAY_VER(i915)) {
- case 12:
+ if (DISPLAY_VER(i915) >= 13)
+ lpsp_capable = encoder->port <= PORT_B;
+ else if (DISPLAY_VER(i915) >= 12)
/*
* Actually TGL can drive LPSP on port till DDI_C
* but there is no physical connected DDI_C on TGL sku's,
* even driver is not initilizing DDI_C port for gen12.
*/
- LPSP_CAPABLE(encoder->port <= PORT_B);
- break;
- case 11:
- LPSP_CAPABLE(connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
- connector->connector_type == DRM_MODE_CONNECTOR_eDP);
- break;
- case 10:
- case 9:
- LPSP_CAPABLE(encoder->port == PORT_A &&
- (connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
- connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
- connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort));
- break;
- default:
- if (IS_HASWELL(i915) || IS_BROADWELL(i915))
- LPSP_CAPABLE(connector->connector_type == DRM_MODE_CONNECTOR_eDP);
- }
+ lpsp_capable = encoder->port <= PORT_B;
+ else if (DISPLAY_VER(i915) == 11)
+ lpsp_capable = (connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
+ connector->connector_type == DRM_MODE_CONNECTOR_eDP);
+ else if (IS_DISPLAY_VER(i915, 9, 10))
+ lpsp_capable = (encoder->port == PORT_A &&
+ (connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
+ connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
+ connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort));
+ else if (IS_HASWELL(i915) || IS_BROADWELL(i915))
+ lpsp_capable = connector->connector_type == DRM_MODE_CONNECTOR_eDP;
+
+ seq_printf(m, "LPSP: %s\n", lpsp_capable ? "capable" : "incapable");
return 0;
}
@@ -2468,17 +2442,16 @@ static const struct file_operations i915_dsc_bpp_fops = {
*
* Cleanup will be done by drm_connector_unregister() through a call to
* drm_debugfs_connector_remove().
- *
- * Returns 0 on success, negative error codes on error.
*/
-int intel_connector_debugfs_add(struct drm_connector *connector)
+void intel_connector_debugfs_add(struct intel_connector *intel_connector)
{
+ struct drm_connector *connector = &intel_connector->base;
struct dentry *root = connector->debugfs_entry;
struct drm_i915_private *dev_priv = to_i915(connector->dev);
/* The connector must have been registered beforehands. */
if (!root)
- return -ENODEV;
+ return;
if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
debugfs_create_file("i915_panel_timings", S_IRUGO, root,
@@ -2511,33 +2484,23 @@ int intel_connector_debugfs_add(struct drm_connector *connector)
connector, &i915_dsc_bpp_fops);
}
- /* Legacy panels doesn't lpsp on any platform */
- if ((DISPLAY_VER(dev_priv) >= 9 || IS_HASWELL(dev_priv) ||
- IS_BROADWELL(dev_priv)) &&
- (connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
- connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
- connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
- connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
- connector->connector_type == DRM_MODE_CONNECTOR_HDMIB))
+ if (connector->connector_type == DRM_MODE_CONNECTOR_DSI ||
+ connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
+ connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
+ connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
+ connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)
debugfs_create_file("i915_lpsp_capability", 0444, root,
connector, &i915_lpsp_capability_fops);
-
- return 0;
}
/**
* intel_crtc_debugfs_add - add i915 specific crtc debugfs files
* @crtc: pointer to a drm_crtc
*
- * Returns 0 on success, negative error codes on error.
- *
* Failure to add debugfs entries should generally be ignored.
*/
-int intel_crtc_debugfs_add(struct drm_crtc *crtc)
+void intel_crtc_debugfs_add(struct drm_crtc *crtc)
{
- if (!crtc->debugfs_entry)
- return -ENODEV;
-
- crtc_updates_add(crtc);
- return 0;
+ if (crtc->debugfs_entry)
+ crtc_updates_add(crtc);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.h b/drivers/gpu/drm/i915/display/intel_display_debugfs.h
index 557901f3eb90..d3a79c07c384 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.h
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.h
@@ -6,18 +6,18 @@
#ifndef __INTEL_DISPLAY_DEBUGFS_H__
#define __INTEL_DISPLAY_DEBUGFS_H__
-struct drm_connector;
struct drm_crtc;
struct drm_i915_private;
+struct intel_connector;
#ifdef CONFIG_DEBUG_FS
void intel_display_debugfs_register(struct drm_i915_private *i915);
-int intel_connector_debugfs_add(struct drm_connector *connector);
-int intel_crtc_debugfs_add(struct drm_crtc *crtc);
+void intel_connector_debugfs_add(struct intel_connector *connector);
+void intel_crtc_debugfs_add(struct drm_crtc *crtc);
#else
static inline void intel_display_debugfs_register(struct drm_i915_private *i915) {}
-static inline int intel_connector_debugfs_add(struct drm_connector *connector) { return 0; }
-static inline int intel_crtc_debugfs_add(struct drm_crtc *crtc) { return 0; }
+static inline void intel_connector_debugfs_add(struct intel_connector *connector) {}
+static inline void intel_crtc_debugfs_add(struct drm_crtc *crtc) {}
#endif
#endif /* __INTEL_DISPLAY_DEBUGFS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index cce1a926fcc1..1672604f9ef7 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -3,24 +3,25 @@
* Copyright © 2019 Intel Corporation
*/
-#include "display/intel_crt.h"
-
#include "i915_drv.h"
#include "i915_irq.h"
#include "intel_cdclk.h"
#include "intel_combo_phy.h"
-#include "intel_display_power.h"
+#include "intel_crt.h"
#include "intel_de.h"
+#include "intel_display_power.h"
#include "intel_display_types.h"
#include "intel_dmc.h"
#include "intel_dpio_phy.h"
+#include "intel_dpll.h"
#include "intel_hotplug.h"
+#include "intel_pcode.h"
#include "intel_pm.h"
#include "intel_pps.h"
-#include "intel_sideband.h"
#include "intel_snps_phy.h"
#include "intel_tc.h"
#include "intel_vga.h"
+#include "vlv_sideband.h"
bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv,
enum i915_power_well_id power_well_id);
@@ -560,7 +561,7 @@ static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
if (drm_WARN_ON(&dev_priv->drm, !dig_port))
return;
- if (DISPLAY_VER(dev_priv) == 11 && dig_port->tc_legacy_port)
+ if (DISPLAY_VER(dev_priv) == 11 && intel_tc_cold_requires_aux_pw(dig_port))
return;
drm_WARN_ON(&dev_priv->drm, !intel_tc_port_ref_held(dig_port));
@@ -629,7 +630,7 @@ icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
* exit sequence.
*/
timeout_expected = is_tbt || intel_tc_cold_requires_aux_pw(dig_port);
- if (DISPLAY_VER(dev_priv) == 11 && dig_port->tc_legacy_port)
+ if (DISPLAY_VER(dev_priv) == 11 && intel_tc_cold_requires_aux_pw(dig_port))
icl_tc_cold_exit(dev_priv);
hsw_wait_for_power_well_enable(dev_priv, power_well, timeout_expected);
@@ -1195,7 +1196,7 @@ static void gen9_disable_dc_states(struct drm_i915_private *dev_priv)
if (!HAS_DISPLAY(dev_priv))
return;
- dev_priv->display.get_cdclk(dev_priv, &cdclk_config);
+ intel_cdclk_get_cdclk(dev_priv, &cdclk_config);
/* Can't read out voltage_level so can't use intel_cdclk_changed() */
drm_WARN_ON(&dev_priv->drm,
intel_cdclk_needs_modeset(&dev_priv->cdclk.hw,
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h
index 978531841fa3..0612e4b6e3c8 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.h
+++ b/drivers/gpu/drm/i915/display/intel_display_power.h
@@ -410,6 +410,10 @@ void gen9_dbuf_slices_update(struct drm_i915_private *dev_priv,
for ((wf) = intel_display_power_get((i915), (domain)); (wf); \
intel_display_power_put_async((i915), (domain), (wf)), (wf) = 0)
+#define with_intel_display_power_if_enabled(i915, domain, wf) \
+ for ((wf) = intel_display_power_get_if_enabled((i915), (domain)); (wf); \
+ intel_display_power_put_async((i915), (domain), (wf)), (wf) = 0)
+
void chv_phy_powergate_lanes(struct intel_encoder *encoder,
bool override, unsigned int mask);
bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 6beeeeba1bed..39e11eaec1a3 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -103,8 +103,6 @@ struct intel_fb_view {
* in the rotated and remapped GTT view all no-CCS formats (up to 2
* color planes) are supported.
*
- * TODO: add support for CCS formats in the remapped GTT view.
- *
* The view information shared by all FB color planes in the FB,
* like dst x/y and src/dst width, is stored separately in
* intel_plane_state.
@@ -271,6 +269,9 @@ struct intel_encoder {
const struct intel_ddi_buf_trans *(*get_buf_trans)(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
int *n_entries);
+ void (*set_signal_levels)(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
+
enum hpd_pin hpd_pin;
enum intel_display_power_domain power_domain;
/* for communication with audio component; protected by av_mutex */
@@ -428,10 +429,6 @@ struct intel_hdcp_shim {
int (*hdcp_2_2_capable)(struct intel_digital_port *dig_port,
bool *capable);
- /* Detects whether a HDCP 1.4 sink connected in MST topology */
- int (*streams_type1_capable)(struct intel_connector *connector,
- bool *capable);
-
/* Write HDCP2.2 messages */
int (*write_2_2_msg)(struct intel_digital_port *dig_port,
void *buf, size_t size);
@@ -629,6 +626,12 @@ struct intel_plane_state {
struct intel_fb_view view;
+ /* Plane pxp decryption state */
+ bool decrypt;
+
+ /* Plane state to display black pixels when pxp is borked */
+ bool force_black;
+
/* plane control register */
u32 ctl;
@@ -1060,12 +1063,14 @@ struct intel_crtc_state {
struct intel_link_m_n dp_m2_n2;
bool has_drrs;
+ /* PSR is supported but might not be enabled due the lack of enabled planes */
bool has_psr;
bool has_psr2;
bool enable_psr2_sel_fetch;
bool req_psr2_sdp_prior_scanline;
u32 dc3co_exitline;
u16 su_y_granularity;
+ struct drm_dp_vsc_sdp psr_vsc;
/*
* Frequence the dpll for the port should run at. Differs from the
@@ -1529,7 +1534,6 @@ struct intel_psr {
u32 dc3co_exitline;
u32 dc3co_exit_delay;
struct delayed_work dc3co_work;
- struct drm_dp_vsc_sdp vsc;
};
struct intel_dp {
@@ -1576,7 +1580,6 @@ struct intel_dp {
struct intel_pps pps;
- bool can_mst; /* this port supports mst */
bool is_mst;
int active_mst_links;
@@ -1606,8 +1609,6 @@ struct intel_dp {
u8 dp_train_pat);
void (*set_idle_link_train)(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state);
- void (*set_signal_levels)(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state);
u8 (*preemph_max)(struct intel_dp *intel_dp);
u8 (*voltage_max)(struct intel_dp *intel_dp,
@@ -1667,8 +1668,11 @@ struct intel_digital_port {
enum intel_display_power_domain ddi_io_power_domain;
intel_wakeref_t ddi_io_wakeref;
intel_wakeref_t aux_wakeref;
+
struct mutex tc_lock; /* protects the TypeC port mode */
intel_wakeref_t tc_lock_wakeref;
+ enum intel_display_power_domain tc_lock_power_domain;
+ struct delayed_work tc_disconnect_phy_work;
int tc_link_refcount;
bool tc_legacy_port:1;
char tc_port_name[8];
@@ -1684,6 +1688,8 @@ struct intel_digital_port {
bool hdcp_auth_status;
/* HDCP port data need to pass to security f/w */
struct hdcp_port_data hdcp_port_data;
+ /* Whether the MST topology supports HDCP Type 1 Content */
+ bool hdcp_mst_type1_capable;
void (*write_infoframe)(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
@@ -2035,28 +2041,6 @@ to_intel_frontbuffer(struct drm_framebuffer *fb)
return fb ? to_intel_framebuffer(fb)->frontbuffer : NULL;
}
-static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
-{
- if (dev_priv->params.panel_use_ssc >= 0)
- return dev_priv->params.panel_use_ssc != 0;
- return dev_priv->vbt.lvds_use_ssc
- && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
-}
-
-static inline u32 i9xx_dpll_compute_fp(struct dpll *dpll)
-{
- return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
-}
-
-static inline u32 intel_fdi_link_freq(struct drm_i915_private *dev_priv,
- const struct intel_crtc_state *pipe_config)
-{
- if (HAS_DDI(dev_priv))
- return pipe_config->port_clock; /* SPLL */
- else
- return dev_priv->fdi_pll_freq;
-}
-
static inline bool is_ccs_modifier(u64 modifier)
{
return modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c
index b3c8e1c450ef..2dc9d632969d 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.c
+++ b/drivers/gpu/drm/i915/display/intel_dmc.c
@@ -45,8 +45,8 @@
#define GEN12_DMC_MAX_FW_SIZE ICL_DMC_MAX_FW_SIZE
-#define ADLP_DMC_PATH DMC_PATH(adlp, 2, 10)
-#define ADLP_DMC_VERSION_REQUIRED DMC_VERSION(2, 10)
+#define ADLP_DMC_PATH DMC_PATH(adlp, 2, 12)
+#define ADLP_DMC_VERSION_REQUIRED DMC_VERSION(2, 12)
MODULE_FIRMWARE(ADLP_DMC_PATH);
#define ADLS_DMC_PATH DMC_PATH(adls, 2, 01)
@@ -255,20 +255,10 @@ intel_get_stepping_info(struct drm_i915_private *i915,
static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv)
{
- u32 val, mask;
-
- mask = DC_STATE_DEBUG_MASK_MEMORY_UP;
-
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
- mask |= DC_STATE_DEBUG_MASK_CORES;
-
/* The below bit doesn't need to be cleared ever afterwards */
- val = intel_de_read(dev_priv, DC_STATE_DEBUG);
- if ((val & mask) != mask) {
- val |= mask;
- intel_de_write(dev_priv, DC_STATE_DEBUG, val);
- intel_de_posting_read(dev_priv, DC_STATE_DEBUG);
- }
+ intel_de_rmw(dev_priv, DC_STATE_DEBUG, 0,
+ DC_STATE_DEBUG_MASK_CORES | DC_STATE_DEBUG_MASK_MEMORY_UP);
+ intel_de_posting_read(dev_priv, DC_STATE_DEBUG);
}
/**
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index abe3d61b6243..23de500d56b5 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -44,6 +44,7 @@
#include "i915_drv.h"
#include "intel_atomic.h"
#include "intel_audio.h"
+#include "intel_backlight.h"
#include "intel_connector.h"
#include "intel_ddi.h"
#include "intel_de.h"
@@ -55,6 +56,7 @@
#include "intel_dp_mst.h"
#include "intel_dpio_phy.h"
#include "intel_dpll.h"
+#include "intel_drrs.h"
#include "intel_fifo_underrun.h"
#include "intel_hdcp.h"
#include "intel_hdmi.h"
@@ -64,7 +66,6 @@
#include "intel_panel.h"
#include "intel_pps.h"
#include "intel_psr.h"
-#include "intel_sideband.h"
#include "intel_tc.h"
#include "intel_vdsc.h"
#include "intel_vrr.h"
@@ -100,6 +101,8 @@ static const u8 valid_dsc_slicecount[] = {1, 2, 4};
*
* If a CPU or PCH DP output is attached to an eDP panel, this function
* will return true, and false otherwise.
+ *
+ * This function is not safe to use prior to encoder type being set.
*/
bool intel_dp_is_edp(struct intel_dp *intel_dp)
{
@@ -111,6 +114,12 @@ bool intel_dp_is_edp(struct intel_dp *intel_dp)
static void intel_dp_unset_edid(struct intel_dp *intel_dp);
static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc);
+/* Is link rate UHBR and thus 128b/132b? */
+bool intel_dp_is_uhbr(const struct intel_crtc_state *crtc_state)
+{
+ return crtc_state->port_clock >= 1000000;
+}
+
/* update sink rates from dpcd */
static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
{
@@ -130,6 +139,9 @@ static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
return;
}
+ /*
+ * Sink rates for 8b/10b.
+ */
max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]);
max_lttpr_rate = drm_dp_lttpr_max_link_rate(intel_dp->lttpr_common_caps);
if (max_lttpr_rate)
@@ -141,6 +153,41 @@ static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
intel_dp->sink_rates[i] = dp_rates[i];
}
+ /*
+ * Sink rates for 128b/132b. If set, sink should support all 8b/10b
+ * rates and 10 Gbps.
+ */
+ if (intel_dp->dpcd[DP_MAIN_LINK_CHANNEL_CODING] & DP_CAP_ANSI_128B132B) {
+ u8 uhbr_rates = 0;
+
+ BUILD_BUG_ON(ARRAY_SIZE(intel_dp->sink_rates) < ARRAY_SIZE(dp_rates) + 3);
+
+ drm_dp_dpcd_readb(&intel_dp->aux,
+ DP_128B132B_SUPPORTED_LINK_RATES, &uhbr_rates);
+
+ if (drm_dp_lttpr_count(intel_dp->lttpr_common_caps)) {
+ /* We have a repeater */
+ if (intel_dp->lttpr_common_caps[0] >= 0x20 &&
+ intel_dp->lttpr_common_caps[DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV] &
+ DP_PHY_REPEATER_128B132B_SUPPORTED) {
+ /* Repeater supports 128b/132b, valid UHBR rates */
+ uhbr_rates &= intel_dp->lttpr_common_caps[DP_PHY_REPEATER_128B132B_RATES -
+ DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV];
+ } else {
+ /* Does not support 128b/132b */
+ uhbr_rates = 0;
+ }
+ }
+
+ if (uhbr_rates & DP_UHBR10)
+ intel_dp->sink_rates[i++] = 1000000;
+ if (uhbr_rates & DP_UHBR13_5)
+ intel_dp->sink_rates[i++] = 1350000;
+ if (uhbr_rates & DP_UHBR20)
+ intel_dp->sink_rates[i++] = 2000000;
+ }
+
intel_dp->num_sink_rates = i;
}
@@ -192,6 +239,10 @@ int intel_dp_max_lane_count(struct intel_dp *intel_dp)
return intel_dp->max_link_lane_count;
}
+/*
+ * The required data bandwidth for a mode with given pixel clock and bpp. This
+ * is the required net bandwidth independent of the data bandwidth efficiency.
+ */
int
intel_dp_link_required(int pixel_clock, int bpp)
{
@@ -199,16 +250,52 @@ intel_dp_link_required(int pixel_clock, int bpp)
return DIV_ROUND_UP(pixel_clock * bpp, 8);
}
+/*
+ * Given a link rate and lanes, get the data bandwidth.
+ *
+ * Data bandwidth is the actual payload rate, which depends on the data
+ * bandwidth efficiency and the link rate.
+ *
+ * For 8b/10b channel encoding, SST and non-FEC, the data bandwidth efficiency
+ * is 80%. For example, for a 1.62 Gbps link, 1.62*10^9 bps * 0.80 * (1/8) =
+ * 162000 kBps. With 8-bit symbols, we have 162000 kHz symbol clock. Just by
+ * coincidence, the port clock in kHz matches the data bandwidth in kBps, and
+ * they equal the link bit rate in Gbps multiplied by 100000. (Note that this no
+ * longer holds for data bandwidth as soon as FEC or MST is taken into account!)
+ *
+ * For 128b/132b channel encoding, the data bandwidth efficiency is 96.71%. For
+ * example, for a 10 Gbps link, 10*10^9 bps * 0.9671 * (1/8) = 1208875
+ * kBps. With 32-bit symbols, we have 312500 kHz symbol clock. The value 1000000
+ * does not match the symbol clock, the port clock (not even if you think in
+ * terms of a byte clock), nor the data bandwidth. It only matches the link bit
+ * rate in units of 10000 bps.
+ */
int
-intel_dp_max_data_rate(int max_link_clock, int max_lanes)
+intel_dp_max_data_rate(int max_link_rate, int max_lanes)
{
- /* max_link_clock is the link symbol clock (LS_Clk) in kHz and not the
- * link rate that is generally expressed in Gbps. Since, 8 bits of data
- * is transmitted every LS_Clk per lane, there is no need to account for
- * the channel encoding that is done in the PHY layer here.
+ if (max_link_rate >= 1000000) {
+ /*
+ * UHBR rates always use 128b/132b channel encoding, and have
+ * 97.71% data bandwidth efficiency. Consider max_link_rate the
+ * link bit rate in units of 10000 bps.
+ */
+ int max_link_rate_kbps = max_link_rate * 10;
+
+ max_link_rate_kbps = DIV_ROUND_CLOSEST_ULL(max_link_rate_kbps * 9671, 10000);
+ max_link_rate = max_link_rate_kbps / 8;
+ }
+
+ /*
+ * Lower than UHBR rates always use 8b/10b channel encoding, and have
+ * 80% data bandwidth efficiency for SST non-FEC. However, this turns
+ * out to be a nop by coincidence, and can be skipped:
+ *
+ * int max_link_rate_kbps = max_link_rate * 10;
+ * max_link_rate_kbps = DIV_ROUND_CLOSEST_ULL(max_link_rate_kbps * 8, 10);
+ * max_link_rate = max_link_rate_kbps / 8;
*/
- return max_link_clock * max_lanes;
+ return max_link_rate * max_lanes;
}
bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp)
@@ -222,6 +309,20 @@ bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp)
encoder->port != PORT_A);
}
+static int dg2_max_source_rate(struct intel_dp *intel_dp)
+{
+ return intel_dp_is_edp(intel_dp) ? 810000 : 1350000;
+}
+
+static bool is_low_voltage_sku(struct drm_i915_private *i915, enum phy phy)
+{
+ u32 voltage;
+
+ voltage = intel_de_read(i915, ICL_PORT_COMP_DW3(phy)) & VOLTAGE_INFO_MASK;
+
+ return voltage == VOLTAGE_INFO_0_85V;
+}
+
static int icl_max_source_rate(struct intel_dp *intel_dp)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
@@ -229,7 +330,7 @@ static int icl_max_source_rate(struct intel_dp *intel_dp)
enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
if (intel_phy_is_combo(dev_priv, phy) &&
- !intel_dp_is_edp(intel_dp))
+ (is_low_voltage_sku(dev_priv, phy) || !intel_dp_is_edp(intel_dp)))
return 540000;
return 810000;
@@ -237,7 +338,23 @@ static int icl_max_source_rate(struct intel_dp *intel_dp)
static int ehl_max_source_rate(struct intel_dp *intel_dp)
{
- if (intel_dp_is_edp(intel_dp))
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
+ enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
+
+ if (intel_dp_is_edp(intel_dp) || is_low_voltage_sku(dev_priv, phy))
+ return 540000;
+
+ return 810000;
+}
+
+static int dg1_max_source_rate(struct intel_dp *intel_dp)
+{
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
+
+ if (intel_phy_is_combo(i915, phy) && is_low_voltage_sku(i915, phy))
return 540000;
return 810000;
@@ -248,7 +365,8 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
{
/* The values must be in increasing order */
static const int icl_rates[] = {
- 162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000
+ 162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000,
+ 1000000, 1350000,
};
static const int bxt_rates[] = {
162000, 216000, 243000, 270000, 324000, 432000, 540000
@@ -275,7 +393,12 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
if (DISPLAY_VER(dev_priv) >= 11) {
source_rates = icl_rates;
size = ARRAY_SIZE(icl_rates);
- if (IS_JSL_EHL(dev_priv))
+ if (IS_DG2(dev_priv))
+ max_rate = dg2_max_source_rate(intel_dp);
+ else if (IS_ALDERLAKE_P(dev_priv) || IS_ALDERLAKE_S(dev_priv) ||
+ IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv))
+ max_rate = dg1_max_source_rate(intel_dp);
+ else if (IS_JSL_EHL(dev_priv))
max_rate = ehl_max_source_rate(intel_dp);
else
max_rate = icl_max_source_rate(intel_dp);
@@ -461,7 +584,9 @@ u32 intel_dp_mode_to_fec_clock(u32 mode_clock)
static int
small_joiner_ram_size_bits(struct drm_i915_private *i915)
{
- if (DISPLAY_VER(i915) >= 11)
+ if (DISPLAY_VER(i915) >= 13)
+ return 17280 * 8;
+ else if (DISPLAY_VER(i915) >= 11)
return 7680 * 8;
else
return 6144 * 8;
@@ -703,6 +828,17 @@ intel_dp_mode_valid_downstream(struct intel_connector *connector,
return MODE_OK;
}
+static bool intel_dp_need_bigjoiner(struct intel_dp *intel_dp,
+ int hdisplay, int clock)
+{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+
+ if (!intel_dp_can_bigjoiner(intel_dp))
+ return false;
+
+ return clock > i915->max_dotclk_freq || hdisplay > 5120;
+}
+
static enum drm_mode_status
intel_dp_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
@@ -726,11 +862,9 @@ intel_dp_mode_valid(struct drm_connector *connector,
return MODE_H_ILLEGAL;
if (intel_dp_is_edp(intel_dp) && fixed_mode) {
- if (mode->hdisplay != fixed_mode->hdisplay)
- return MODE_PANEL;
-
- if (mode->vdisplay != fixed_mode->vdisplay)
- return MODE_PANEL;
+ status = intel_panel_mode_valid(intel_connector, mode);
+ if (status != MODE_OK)
+ return status;
target_clock = fixed_mode->clock;
}
@@ -738,8 +872,7 @@ intel_dp_mode_valid(struct drm_connector *connector,
if (mode->clock < 10000)
return MODE_CLOCK_LOW;
- if ((target_clock > max_dotclk || mode->hdisplay > 5120) &&
- intel_dp_can_bigjoiner(intel_dp)) {
+ if (intel_dp_need_bigjoiner(intel_dp, mode->hdisplay, target_clock)) {
bigjoiner = true;
max_dotclk *= 2;
}
@@ -811,18 +944,14 @@ intel_dp_mode_valid(struct drm_connector *connector,
return intel_mode_valid_max_plane_size(dev_priv, mode, bigjoiner);
}
-bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
+bool intel_dp_source_supports_tps3(struct drm_i915_private *i915)
{
- int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
-
- return max_rate >= 540000;
+ return DISPLAY_VER(i915) >= 9 || IS_BROADWELL(i915) || IS_HASWELL(i915);
}
-bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp)
+bool intel_dp_source_supports_tps4(struct drm_i915_private *i915)
{
- int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
-
- return max_rate >= 810000;
+ return DISPLAY_VER(i915) >= 10;
}
static void snprintf_int_array(char *str, size_t len,
@@ -1044,7 +1173,8 @@ intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
intel_dp->num_common_rates,
intel_dp->compliance.test_link_rate);
if (index >= 0)
- limits->min_clock = limits->max_clock = index;
+ limits->min_rate = limits->max_rate =
+ intel_dp->compliance.test_link_rate;
limits->min_lane_count = limits->max_lane_count =
intel_dp->compliance.test_lane_count;
}
@@ -1058,8 +1188,8 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
const struct link_config_limits *limits)
{
struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
- int bpp, clock, lane_count;
- int mode_rate, link_clock, link_avail;
+ int bpp, i, lane_count;
+ int mode_rate, link_rate, link_avail;
for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
int output_bpp = intel_dp_output_bpp(pipe_config->output_format, bpp);
@@ -1067,18 +1197,22 @@ intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
output_bpp);
- for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
+ for (i = 0; i < intel_dp->num_common_rates; i++) {
+ link_rate = intel_dp->common_rates[i];
+ if (link_rate < limits->min_rate ||
+ link_rate > limits->max_rate)
+ continue;
+
for (lane_count = limits->min_lane_count;
lane_count <= limits->max_lane_count;
lane_count <<= 1) {
- link_clock = intel_dp->common_rates[clock];
- link_avail = intel_dp_max_data_rate(link_clock,
+ link_avail = intel_dp_max_data_rate(link_rate,
lane_count);
if (mode_rate <= link_avail) {
pipe_config->lane_count = lane_count;
pipe_config->pipe_bpp = bpp;
- pipe_config->port_clock = link_clock;
+ pipe_config->port_clock = link_rate;
return 0;
}
@@ -1212,7 +1346,7 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
* with DSC enabled for the requested mode.
*/
pipe_config->pipe_bpp = pipe_bpp;
- pipe_config->port_clock = intel_dp->common_rates[limits->max_clock];
+ pipe_config->port_clock = limits->max_rate;
pipe_config->lane_count = limits->max_lane_count;
if (intel_dp_is_edp(intel_dp)) {
@@ -1321,8 +1455,8 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
/* No common link rates between source and sink */
drm_WARN_ON(encoder->base.dev, common_len <= 0);
- limits.min_clock = 0;
- limits.max_clock = common_len - 1;
+ limits.min_rate = intel_dp->common_rates[0];
+ limits.max_rate = intel_dp->common_rates[common_len - 1];
limits.min_lane_count = 1;
limits.max_lane_count = intel_dp_max_lane_count(intel_dp);
@@ -1340,20 +1474,18 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
* values correspond to the native resolution of the panel.
*/
limits.min_lane_count = limits.max_lane_count;
- limits.min_clock = limits.max_clock;
+ limits.min_rate = limits.max_rate;
}
intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits);
drm_dbg_kms(&i915->drm, "DP link computation with max lane count %i "
"max rate %d max bpp %d pixel clock %iKHz\n",
- limits.max_lane_count,
- intel_dp->common_rates[limits.max_clock],
+ limits.max_lane_count, limits.max_rate,
limits.max_bpp, adjusted_mode->crtc_clock);
- if ((adjusted_mode->crtc_clock > i915->max_dotclk_freq ||
- adjusted_mode->crtc_hdisplay > 5120) &&
- intel_dp_can_bigjoiner(intel_dp))
+ if (intel_dp_need_bigjoiner(intel_dp, adjusted_mode->crtc_hdisplay,
+ adjusted_mode->crtc_clock))
pipe_config->bigjoiner = true;
/*
@@ -1553,7 +1685,7 @@ void intel_dp_compute_psr_vsc_sdp(struct intel_dp *intel_dp,
{
vsc->sdp_type = DP_SDP_VSC;
- if (intel_dp->psr.psr2_enabled) {
+ if (crtc_state->has_psr2) {
if (intel_dp->psr.colorimetry_support &&
intel_dp_needs_vsc_sdp(crtc_state, conn_state)) {
/* [PSR2, +Colorimetry] */
@@ -1603,46 +1735,6 @@ intel_dp_compute_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp,
intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA);
}
-static void
-intel_dp_drrs_compute_config(struct intel_dp *intel_dp,
- struct intel_crtc_state *pipe_config,
- int output_bpp, bool constant_n)
-{
- struct intel_connector *intel_connector = intel_dp->attached_connector;
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- int pixel_clock;
-
- if (pipe_config->vrr.enable)
- return;
-
- /*
- * DRRS and PSR can't be enable together, so giving preference to PSR
- * as it allows more power-savings by complete shutting down display,
- * so to guarantee this, intel_dp_drrs_compute_config() must be called
- * after intel_psr_compute_config().
- */
- if (pipe_config->has_psr)
- return;
-
- if (!intel_connector->panel.downclock_mode ||
- dev_priv->drrs.type != SEAMLESS_DRRS_SUPPORT)
- return;
-
- pipe_config->has_drrs = true;
-
- pixel_clock = intel_connector->panel.downclock_mode->clock;
- if (pipe_config->splitter.enable)
- pixel_clock /= pipe_config->splitter.link_count;
-
- intel_link_compute_m_n(output_bpp, pipe_config->lane_count, pixel_clock,
- pipe_config->port_clock, &pipe_config->dp_m2_n2,
- constant_n, pipe_config->fec_enable);
-
- /* FIXME: abstract this better */
- if (pipe_config->splitter.enable)
- pipe_config->dp_m2_n2.gmch_m *= pipe_config->splitter.link_count;
-}
-
int
intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
@@ -1665,7 +1757,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
adjusted_mode);
if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) {
- ret = intel_pch_panel_fitting(pipe_config, conn_state);
+ ret = intel_panel_fitting(pipe_config, conn_state);
if (ret)
return ret;
}
@@ -1678,13 +1770,11 @@ intel_dp_compute_config(struct intel_encoder *encoder,
pipe_config->has_audio = intel_conn_state->force_audio == HDMI_AUDIO_ON;
if (intel_dp_is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
- intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
- adjusted_mode);
+ ret = intel_panel_compute_config(intel_connector, adjusted_mode);
+ if (ret)
+ return ret;
- if (HAS_GMCH(dev_priv))
- ret = intel_gmch_panel_fitting(pipe_config, conn_state);
- else
- ret = intel_pch_panel_fitting(pipe_config, conn_state);
+ ret = intel_panel_fitting(pipe_config, conn_state);
if (ret)
return ret;
}
@@ -1750,9 +1840,9 @@ intel_dp_compute_config(struct intel_encoder *encoder,
g4x_dp_set_clock(encoder, pipe_config);
intel_vrr_compute_config(pipe_config, conn_state);
- intel_psr_compute_config(intel_dp, pipe_config);
- intel_dp_drrs_compute_config(intel_dp, pipe_config, output_bpp,
- constant_n);
+ intel_psr_compute_config(intel_dp, pipe_config, conn_state);
+ intel_drrs_compute_config(intel_dp, pipe_config, output_bpp,
+ constant_n);
intel_dp_compute_vsc_sdp(intel_dp, pipe_config, conn_state);
intel_dp_compute_hdr_metadata_infoframe_sdp(intel_dp, pipe_config, conn_state);
@@ -1762,6 +1852,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
void intel_dp_set_link_params(struct intel_dp *intel_dp,
int link_rate, int lane_count)
{
+ memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
intel_dp->link_trained = false;
intel_dp->link_rate = link_rate;
intel_dp->lane_count = lane_count;
@@ -1779,7 +1870,7 @@ void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
drm_dbg_kms(&i915->drm, "\n");
- intel_panel_enable_backlight(crtc_state, conn_state);
+ intel_backlight_enable(crtc_state, conn_state);
intel_pps_backlight_on(intel_dp);
}
@@ -1795,7 +1886,7 @@ void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state)
drm_dbg_kms(&i915->drm, "\n");
intel_pps_backlight_off(intel_dp);
- intel_panel_disable_backlight(old_conn_state);
+ intel_backlight_disable(old_conn_state);
}
static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp)
@@ -1916,6 +2007,9 @@ void intel_dp_sync_state(struct intel_encoder *encoder,
{
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ if (!crtc_state)
+ return;
+
/*
* Don't clobber DPCD if it's been already read out during output
* setup (eDP) or detect.
@@ -2389,6 +2483,8 @@ static void intel_edp_mso_mode_fixup(struct intel_connector *connector,
static void intel_edp_mso_init(struct intel_dp *intel_dp)
{
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_connector *connector = intel_dp->attached_connector;
+ struct drm_display_info *info = &connector->base.display_info;
u8 mso;
if (intel_dp->edp_dpcd[0] < DP_EDP_14)
@@ -2407,8 +2503,9 @@ static void intel_edp_mso_init(struct intel_dp *intel_dp)
}
if (mso) {
- drm_dbg_kms(&i915->drm, "Sink MSO %ux%u configuration\n",
- mso, drm_dp_max_lane_count(intel_dp->dpcd) / mso);
+ drm_dbg_kms(&i915->drm, "Sink MSO %ux%u configuration, pixel overlap %u\n",
+ mso, drm_dp_max_lane_count(intel_dp->dpcd) / mso,
+ info->mso_pixel_overlap);
if (!HAS_MSO(i915)) {
drm_err(&i915->drm, "No source MSO support, disabling\n");
mso = 0;
@@ -2416,7 +2513,7 @@ static void intel_edp_mso_init(struct intel_dp *intel_dp)
}
intel_dp->mso_link_count = mso;
- intel_dp->mso_pixel_overlap = 0; /* FIXME: read from DisplayID v2.0 */
+ intel_dp->mso_pixel_overlap = mso ? info->mso_pixel_overlap : 0;
}
static bool
@@ -2505,8 +2602,6 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
*/
intel_edp_init_source_oui(intel_dp, true);
- intel_edp_mso_init(intel_dp);
-
return true;
}
@@ -2574,7 +2669,7 @@ intel_dp_can_mst(struct intel_dp *intel_dp)
struct drm_i915_private *i915 = dp_to_i915(intel_dp);
return i915->params.enable_dp_mst &&
- intel_dp->can_mst &&
+ intel_dp_mst_source_support(intel_dp) &&
drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd);
}
@@ -2589,10 +2684,10 @@ intel_dp_configure_mst(struct intel_dp *intel_dp)
drm_dbg_kms(&i915->drm,
"[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s\n",
encoder->base.base.id, encoder->base.name,
- yesno(intel_dp->can_mst), yesno(sink_can_mst),
+ yesno(intel_dp_mst_source_support(intel_dp)), yesno(sink_can_mst),
yesno(i915->params.enable_dp_mst));
- if (!intel_dp->can_mst)
+ if (!intel_dp_mst_source_support(intel_dp))
return;
intel_dp->is_mst = sink_can_mst &&
@@ -2809,7 +2904,7 @@ static void intel_write_dp_sdp(struct intel_encoder *encoder,
void intel_write_dp_vsc_sdp(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
- struct drm_dp_vsc_sdp *vsc)
+ const struct drm_dp_vsc_sdp *vsc)
{
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
@@ -4588,6 +4683,17 @@ static int intel_dp_connector_atomic_check(struct drm_connector *conn,
return intel_modeset_synced_crtcs(state, conn);
}
+static void intel_dp_oob_hotplug_event(struct drm_connector *connector)
+{
+ struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
+ struct drm_i915_private *i915 = to_i915(connector->dev);
+
+ spin_lock_irq(&i915->irq_lock);
+ i915->hotplug.event_bits |= BIT(encoder->hpd_pin);
+ spin_unlock_irq(&i915->irq_lock);
+ queue_delayed_work(system_wq, &i915->hotplug.hotplug_work, 0);
+}
+
static const struct drm_connector_funcs intel_dp_connector_funcs = {
.force = intel_dp_force,
.fill_modes = drm_helper_probe_single_connector_modes,
@@ -4598,6 +4704,7 @@ static const struct drm_connector_funcs intel_dp_connector_funcs = {
.destroy = intel_connector_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = intel_digital_connector_duplicate_state,
+ .oob_hotplug_event = intel_dp_oob_hotplug_event,
};
static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
@@ -4713,432 +4820,6 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
drm_connector_attach_vrr_capable_property(connector);
}
-/**
- * intel_dp_set_drrs_state - program registers for RR switch to take effect
- * @dev_priv: i915 device
- * @crtc_state: a pointer to the active intel_crtc_state
- * @refresh_rate: RR to be programmed
- *
- * This function gets called when refresh rate (RR) has to be changed from
- * one frequency to another. Switches can be between high and low RR
- * supported by the panel or to any other RR based on media playback (in
- * this case, RR value needs to be passed from user space).
- *
- * The caller of this function needs to take a lock on dev_priv->drrs.
- */
-static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
- const struct intel_crtc_state *crtc_state,
- int refresh_rate)
-{
- struct intel_dp *intel_dp = dev_priv->drrs.dp;
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
-
- if (refresh_rate <= 0) {
- drm_dbg_kms(&dev_priv->drm,
- "Refresh rate should be positive non-zero.\n");
- return;
- }
-
- if (intel_dp == NULL) {
- drm_dbg_kms(&dev_priv->drm, "DRRS not supported.\n");
- return;
- }
-
- if (!crtc) {
- drm_dbg_kms(&dev_priv->drm,
- "DRRS: intel_crtc not initialized\n");
- return;
- }
-
- if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
- drm_dbg_kms(&dev_priv->drm, "Only Seamless DRRS supported.\n");
- return;
- }
-
- if (drm_mode_vrefresh(intel_dp->attached_connector->panel.downclock_mode) ==
- refresh_rate)
- index = DRRS_LOW_RR;
-
- if (index == dev_priv->drrs.refresh_rate_type) {
- drm_dbg_kms(&dev_priv->drm,
- "DRRS requested for previously set RR...ignoring\n");
- return;
- }
-
- if (!crtc_state->hw.active) {
- drm_dbg_kms(&dev_priv->drm,
- "eDP encoder disabled. CRTC not Active\n");
- return;
- }
-
- if (DISPLAY_VER(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) {
- switch (index) {
- case DRRS_HIGH_RR:
- intel_dp_set_m_n(crtc_state, M1_N1);
- break;
- case DRRS_LOW_RR:
- intel_dp_set_m_n(crtc_state, M2_N2);
- break;
- case DRRS_MAX_RR:
- default:
- drm_err(&dev_priv->drm,
- "Unsupported refreshrate type\n");
- }
- } else if (DISPLAY_VER(dev_priv) > 6) {
- i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder);
- u32 val;
-
- val = intel_de_read(dev_priv, reg);
- if (index > DRRS_HIGH_RR) {
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
- else
- val |= PIPECONF_EDP_RR_MODE_SWITCH;
- } else {
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
- else
- val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
- }
- intel_de_write(dev_priv, reg, val);
- }
-
- dev_priv->drrs.refresh_rate_type = index;
-
- drm_dbg_kms(&dev_priv->drm, "eDP Refresh Rate set to : %dHz\n",
- refresh_rate);
-}
-
-static void
-intel_edp_drrs_enable_locked(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-
- dev_priv->drrs.busy_frontbuffer_bits = 0;
- dev_priv->drrs.dp = intel_dp;
-}
-
-/**
- * intel_edp_drrs_enable - init drrs struct if supported
- * @intel_dp: DP struct
- * @crtc_state: A pointer to the active crtc state.
- *
- * Initializes frontbuffer_bits and drrs.dp
- */
-void intel_edp_drrs_enable(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-
- if (!crtc_state->has_drrs)
- return;
-
- drm_dbg_kms(&dev_priv->drm, "Enabling DRRS\n");
-
- mutex_lock(&dev_priv->drrs.mutex);
-
- if (dev_priv->drrs.dp) {
- drm_warn(&dev_priv->drm, "DRRS already enabled\n");
- goto unlock;
- }
-
- intel_edp_drrs_enable_locked(intel_dp);
-
-unlock:
- mutex_unlock(&dev_priv->drrs.mutex);
-}
-
-static void
-intel_edp_drrs_disable_locked(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-
- if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) {
- int refresh;
-
- refresh = drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode);
- intel_dp_set_drrs_state(dev_priv, crtc_state, refresh);
- }
-
- dev_priv->drrs.dp = NULL;
-}
-
-/**
- * intel_edp_drrs_disable - Disable DRRS
- * @intel_dp: DP struct
- * @old_crtc_state: Pointer to old crtc_state.
- *
- */
-void intel_edp_drrs_disable(struct intel_dp *intel_dp,
- const struct intel_crtc_state *old_crtc_state)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-
- if (!old_crtc_state->has_drrs)
- return;
-
- mutex_lock(&dev_priv->drrs.mutex);
- if (!dev_priv->drrs.dp) {
- mutex_unlock(&dev_priv->drrs.mutex);
- return;
- }
-
- intel_edp_drrs_disable_locked(intel_dp, old_crtc_state);
- mutex_unlock(&dev_priv->drrs.mutex);
-
- cancel_delayed_work_sync(&dev_priv->drrs.work);
-}
-
-/**
- * intel_edp_drrs_update - Update DRRS state
- * @intel_dp: Intel DP
- * @crtc_state: new CRTC state
- *
- * This function will update DRRS states, disabling or enabling DRRS when
- * executing fastsets. For full modeset, intel_edp_drrs_disable() and
- * intel_edp_drrs_enable() should be called instead.
- */
-void
-intel_edp_drrs_update(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-
- if (dev_priv->drrs.type != SEAMLESS_DRRS_SUPPORT)
- return;
-
- mutex_lock(&dev_priv->drrs.mutex);
-
- /* New state matches current one? */
- if (crtc_state->has_drrs == !!dev_priv->drrs.dp)
- goto unlock;
-
- if (crtc_state->has_drrs)
- intel_edp_drrs_enable_locked(intel_dp);
- else
- intel_edp_drrs_disable_locked(intel_dp, crtc_state);
-
-unlock:
- mutex_unlock(&dev_priv->drrs.mutex);
-}
-
-static void intel_edp_drrs_downclock_work(struct work_struct *work)
-{
- struct drm_i915_private *dev_priv =
- container_of(work, typeof(*dev_priv), drrs.work.work);
- struct intel_dp *intel_dp;
-
- mutex_lock(&dev_priv->drrs.mutex);
-
- intel_dp = dev_priv->drrs.dp;
-
- if (!intel_dp)
- goto unlock;
-
- /*
- * The delayed work can race with an invalidate hence we need to
- * recheck.
- */
-
- if (dev_priv->drrs.busy_frontbuffer_bits)
- goto unlock;
-
- if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR) {
- struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
-
- intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
- drm_mode_vrefresh(intel_dp->attached_connector->panel.downclock_mode));
- }
-
-unlock:
- mutex_unlock(&dev_priv->drrs.mutex);
-}
-
-/**
- * intel_edp_drrs_invalidate - Disable Idleness DRRS
- * @dev_priv: i915 device
- * @frontbuffer_bits: frontbuffer plane tracking bits
- *
- * This function gets called everytime rendering on the given planes start.
- * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
- *
- * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
- */
-void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
- unsigned int frontbuffer_bits)
-{
- struct intel_dp *intel_dp;
- struct drm_crtc *crtc;
- enum pipe pipe;
-
- if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
- return;
-
- cancel_delayed_work(&dev_priv->drrs.work);
-
- mutex_lock(&dev_priv->drrs.mutex);
-
- intel_dp = dev_priv->drrs.dp;
- if (!intel_dp) {
- mutex_unlock(&dev_priv->drrs.mutex);
- return;
- }
-
- crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
- pipe = to_intel_crtc(crtc)->pipe;
-
- frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
- dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
-
- /* invalidate means busy screen hence upclock */
- if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
- intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
- drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode));
-
- mutex_unlock(&dev_priv->drrs.mutex);
-}
-
-/**
- * intel_edp_drrs_flush - Restart Idleness DRRS
- * @dev_priv: i915 device
- * @frontbuffer_bits: frontbuffer plane tracking bits
- *
- * This function gets called every time rendering on the given planes has
- * completed or flip on a crtc is completed. So DRRS should be upclocked
- * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
- * if no other planes are dirty.
- *
- * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
- */
-void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
- unsigned int frontbuffer_bits)
-{
- struct intel_dp *intel_dp;
- struct drm_crtc *crtc;
- enum pipe pipe;
-
- if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
- return;
-
- cancel_delayed_work(&dev_priv->drrs.work);
-
- mutex_lock(&dev_priv->drrs.mutex);
-
- intel_dp = dev_priv->drrs.dp;
- if (!intel_dp) {
- mutex_unlock(&dev_priv->drrs.mutex);
- return;
- }
-
- crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
- pipe = to_intel_crtc(crtc)->pipe;
-
- frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
- dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
-
- /* flush means busy screen hence upclock */
- if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
- intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
- drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode));
-
- /*
- * flush also means no more activity hence schedule downclock, if all
- * other fbs are quiescent too
- */
- if (!dev_priv->drrs.busy_frontbuffer_bits)
- schedule_delayed_work(&dev_priv->drrs.work,
- msecs_to_jiffies(1000));
- mutex_unlock(&dev_priv->drrs.mutex);
-}
-
-/**
- * DOC: Display Refresh Rate Switching (DRRS)
- *
- * Display Refresh Rate Switching (DRRS) is a power conservation feature
- * which enables swtching between low and high refresh rates,
- * dynamically, based on the usage scenario. This feature is applicable
- * for internal panels.
- *
- * Indication that the panel supports DRRS is given by the panel EDID, which
- * would list multiple refresh rates for one resolution.
- *
- * DRRS is of 2 types - static and seamless.
- * Static DRRS involves changing refresh rate (RR) by doing a full modeset
- * (may appear as a blink on screen) and is used in dock-undock scenario.
- * Seamless DRRS involves changing RR without any visual effect to the user
- * and can be used during normal system usage. This is done by programming
- * certain registers.
- *
- * Support for static/seamless DRRS may be indicated in the VBT based on
- * inputs from the panel spec.
- *
- * DRRS saves power by switching to low RR based on usage scenarios.
- *
- * The implementation is based on frontbuffer tracking implementation. When
- * there is a disturbance on the screen triggered by user activity or a periodic
- * system activity, DRRS is disabled (RR is changed to high RR). When there is
- * no movement on screen, after a timeout of 1 second, a switch to low RR is
- * made.
- *
- * For integration with frontbuffer tracking code, intel_edp_drrs_invalidate()
- * and intel_edp_drrs_flush() are called.
- *
- * DRRS can be further extended to support other internal panels and also
- * the scenario of video playback wherein RR is set based on the rate
- * requested by userspace.
- */
-
-/**
- * intel_dp_drrs_init - Init basic DRRS work and mutex.
- * @connector: eDP connector
- * @fixed_mode: preferred mode of panel
- *
- * This function is called only once at driver load to initialize basic
- * DRRS stuff.
- *
- * Returns:
- * Downclock mode if panel supports it, else return NULL.
- * DRRS support is determined by the presence of downclock mode (apart
- * from VBT setting).
- */
-static struct drm_display_mode *
-intel_dp_drrs_init(struct intel_connector *connector,
- struct drm_display_mode *fixed_mode)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct drm_display_mode *downclock_mode = NULL;
-
- INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
- mutex_init(&dev_priv->drrs.mutex);
-
- if (DISPLAY_VER(dev_priv) <= 6) {
- drm_dbg_kms(&dev_priv->drm,
- "DRRS supported for Gen7 and above\n");
- return NULL;
- }
-
- if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
- drm_dbg_kms(&dev_priv->drm, "VBT doesn't support DRRS\n");
- return NULL;
- }
-
- downclock_mode = intel_panel_edid_downclock_mode(connector, fixed_mode);
- if (!downclock_mode) {
- drm_dbg_kms(&dev_priv->drm,
- "Downclock mode is not found. DRRS not supported\n");
- return NULL;
- }
-
- dev_priv->drrs.type = dev_priv->vbt.drrs_type;
-
- dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
- drm_dbg_kms(&dev_priv->drm,
- "seamless DRRS supported for eDP panel.\n");
- return downclock_mode;
-}
-
static bool intel_edp_init_connector(struct intel_dp *intel_dp,
struct intel_connector *intel_connector)
{
@@ -5197,7 +4878,10 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
fixed_mode = intel_panel_edid_fixed_mode(intel_connector);
if (fixed_mode)
- downclock_mode = intel_dp_drrs_init(intel_connector, fixed_mode);
+ downclock_mode = intel_drrs_init(intel_connector, fixed_mode);
+
+ /* MSO requires information from the EDID */
+ intel_edp_mso_init(intel_dp);
/* multiply the mode clock and horizontal timings for MSO */
intel_edp_mso_mode_fixup(intel_connector, fixed_mode);
@@ -5230,7 +4914,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
if (!(dev_priv->quirks & QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK))
intel_connector->panel.backlight.power = intel_pps_backlight_power;
- intel_panel_setup_backlight(connector, pipe);
+ intel_backlight_setup(intel_connector, pipe);
if (fixed_mode) {
drm_connector_set_panel_orientation_with_quirk(connector,
@@ -5292,8 +4976,6 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
intel_encoder->base.name))
return false;
- intel_dp_set_source_rates(intel_dp);
-
intel_dp->reset_link_params = true;
intel_dp->pps.pps_pipe = INVALID_PIPE;
intel_dp->pps.active_pipe = INVALID_PIPE;
@@ -5309,28 +4991,22 @@ intel_dp_init_connector(struct intel_digital_port *dig_port,
*/
drm_WARN_ON(dev, intel_phy_is_tc(dev_priv, phy));
type = DRM_MODE_CONNECTOR_eDP;
+ intel_encoder->type = INTEL_OUTPUT_EDP;
+
+ /* eDP only on port B and/or C on vlv/chv */
+ if (drm_WARN_ON(dev, (IS_VALLEYVIEW(dev_priv) ||
+ IS_CHERRYVIEW(dev_priv)) &&
+ port != PORT_B && port != PORT_C))
+ return false;
} else {
type = DRM_MODE_CONNECTOR_DisplayPort;
}
+ intel_dp_set_source_rates(intel_dp);
+
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
intel_dp->pps.active_pipe = vlv_active_pipe(intel_dp);
- /*
- * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
- * for DP the encoder type can be set by the caller to
- * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
- */
- if (type == DRM_MODE_CONNECTOR_eDP)
- intel_encoder->type = INTEL_OUTPUT_EDP;
-
- /* eDP only on port B and/or C on vlv/chv */
- if (drm_WARN_ON(dev, (IS_VALLEYVIEW(dev_priv) ||
- IS_CHERRYVIEW(dev_priv)) &&
- intel_dp_is_edp(intel_dp) &&
- port != PORT_B && port != PORT_C))
- return false;
-
drm_dbg_kms(&dev_priv->drm,
"Adding %s connector on [ENCODER:%d:%s]\n",
type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
@@ -5411,7 +5087,7 @@ void intel_dp_mst_suspend(struct drm_i915_private *dev_priv)
intel_dp = enc_to_intel_dp(encoder);
- if (!intel_dp->can_mst)
+ if (!intel_dp_mst_source_support(intel_dp))
continue;
if (intel_dp->is_mst)
@@ -5435,7 +5111,7 @@ void intel_dp_mst_resume(struct drm_i915_private *dev_priv)
intel_dp = enc_to_intel_dp(encoder);
- if (!intel_dp->can_mst)
+ if (!intel_dp_mst_source_support(intel_dp))
continue;
ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr,
diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h
index 680631b5b437..ce229026dc91 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.h
+++ b/drivers/gpu/drm/i915/display/intel_dp.h
@@ -26,7 +26,7 @@ struct intel_dp;
struct intel_encoder;
struct link_config_limits {
- int min_clock, max_clock;
+ int min_rate, max_rate;
int min_lane_count, max_lane_count;
int min_bpp, max_bpp;
};
@@ -58,6 +58,7 @@ int intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state);
bool intel_dp_is_edp(struct intel_dp *intel_dp);
+bool intel_dp_is_uhbr(const struct intel_crtc_state *crtc_state);
bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port);
enum irqreturn intel_dp_hpd_pulse(struct intel_digital_port *dig_port,
bool long_hpd);
@@ -70,25 +71,14 @@ int intel_dp_max_link_rate(struct intel_dp *intel_dp);
int intel_dp_max_lane_count(struct intel_dp *intel_dp);
int intel_dp_rate_select(struct intel_dp *intel_dp, int rate);
-void intel_edp_drrs_enable(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state);
-void intel_edp_drrs_disable(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state);
-void intel_edp_drrs_update(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state);
-void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
- unsigned int frontbuffer_bits);
-void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
- unsigned int frontbuffer_bits);
-
void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
u8 *link_bw, u8 *rate_select);
-bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp);
-bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp);
+bool intel_dp_source_supports_tps3(struct drm_i915_private *i915);
+bool intel_dp_source_supports_tps4(struct drm_i915_private *i915);
bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp);
int intel_dp_link_required(int pixel_clock, int bpp);
-int intel_dp_max_data_rate(int max_link_clock, int max_lanes);
+int intel_dp_max_data_rate(int max_link_rate, int max_lanes);
bool intel_dp_can_bigjoiner(struct intel_dp *intel_dp);
bool intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
@@ -98,7 +88,7 @@ void intel_dp_compute_psr_vsc_sdp(struct intel_dp *intel_dp,
struct drm_dp_vsc_sdp *vsc);
void intel_write_dp_vsc_sdp(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
- struct drm_dp_vsc_sdp *vsc);
+ const struct drm_dp_vsc_sdp *vsc);
void intel_dp_set_infoframes(struct intel_encoder *encoder, bool enable,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state);
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c
index f483f479dd0b..5fbb767fcd63 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c
@@ -150,9 +150,6 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
u32 unused)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *i915 =
- to_i915(dig_port->base.base.dev);
- enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
u32 ret;
/*
@@ -170,8 +167,7 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
- if (intel_phy_is_tc(i915, phy) &&
- dig_port->tc_mode == TC_PORT_TBT_ALT)
+ if (intel_tc_port_in_tbt_alt_mode(dig_port))
ret |= DP_AUX_CH_CTL_TBT_IO;
return ret;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
index 6ac568617ef3..569d17b4d00f 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
@@ -34,9 +34,9 @@
* for some reason.
*/
+#include "intel_backlight.h"
#include "intel_display_types.h"
#include "intel_dp_aux_backlight.h"
-#include "intel_panel.h"
/* TODO:
* Implement HDR, right now we just implement the bare minimum to bring us back into SDR mode so we
@@ -146,7 +146,7 @@ intel_dp_aux_hdr_get_backlight(struct intel_connector *connector, enum pipe pipe
if (!panel->backlight.edp.intel.sdr_uses_aux) {
u32 pwm_level = panel->backlight.pwm_funcs->get(connector, pipe);
- return intel_panel_backlight_level_from_pwm(connector, pwm_level);
+ return intel_backlight_level_from_pwm(connector, pwm_level);
}
/* Assume 100% brightness if backlight controls aren't enabled yet */
@@ -187,9 +187,9 @@ intel_dp_aux_hdr_set_backlight(const struct drm_connector_state *conn_state, u32
if (panel->backlight.edp.intel.sdr_uses_aux) {
intel_dp_aux_hdr_set_aux_backlight(conn_state, level);
} else {
- const u32 pwm_level = intel_panel_backlight_level_to_pwm(connector, level);
+ const u32 pwm_level = intel_backlight_level_to_pwm(connector, level);
- intel_panel_set_pwm_level(conn_state, pwm_level);
+ intel_backlight_set_pwm_level(conn_state, pwm_level);
}
}
@@ -215,7 +215,7 @@ intel_dp_aux_hdr_enable_backlight(const struct intel_crtc_state *crtc_state,
ctrl |= INTEL_EDP_HDR_TCON_BRIGHTNESS_AUX_ENABLE;
intel_dp_aux_hdr_set_aux_backlight(conn_state, level);
} else {
- u32 pwm_level = intel_panel_backlight_level_to_pwm(connector, level);
+ u32 pwm_level = intel_backlight_level_to_pwm(connector, level);
panel->backlight.pwm_funcs->enable(crtc_state, conn_state, pwm_level);
@@ -238,7 +238,7 @@ intel_dp_aux_hdr_disable_backlight(const struct drm_connector_state *conn_state,
return;
/* Note we want the actual pwm_level to be 0, regardless of pwm_min */
- panel->backlight.pwm_funcs->disable(conn_state, intel_panel_invert_pwm_level(connector, 0));
+ panel->backlight.pwm_funcs->disable(conn_state, intel_backlight_invert_pwm_level(connector, 0));
}
static int
diff --git a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
index d697d169e8c1..540a669e01dd 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_hdcp.c
@@ -446,8 +446,6 @@ static
int intel_dp_hdcp2_write_msg(struct intel_digital_port *dig_port,
void *buf, size_t size)
{
- struct intel_dp *dp = &dig_port->dp;
- struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
unsigned int offset;
u8 *byte = buf;
ssize_t ret, bytes_to_write, len;
@@ -463,8 +461,6 @@ int intel_dp_hdcp2_write_msg(struct intel_digital_port *dig_port,
bytes_to_write = size - 1;
byte++;
- hdcp->cp_irq_count_cached = atomic_read(&hdcp->cp_irq_count);
-
while (bytes_to_write) {
len = bytes_to_write > DP_AUX_MAX_PAYLOAD_BYTES ?
DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_write;
@@ -482,29 +478,11 @@ int intel_dp_hdcp2_write_msg(struct intel_digital_port *dig_port,
return size;
}
-static int
-get_rxinfo_hdcp_1_dev_downstream(struct intel_digital_port *dig_port, bool *hdcp_1_x)
-{
- u8 rx_info[HDCP_2_2_RXINFO_LEN];
- int ret;
-
- ret = drm_dp_dpcd_read(&dig_port->dp.aux,
- DP_HDCP_2_2_REG_RXINFO_OFFSET,
- (void *)rx_info, HDCP_2_2_RXINFO_LEN);
-
- if (ret != HDCP_2_2_RXINFO_LEN)
- return ret >= 0 ? -EIO : ret;
-
- *hdcp_1_x = HDCP_2_2_HDCP1_DEVICE_CONNECTED(rx_info[1]) ? true : false;
- return 0;
-}
-
static
-ssize_t get_receiver_id_list_size(struct intel_digital_port *dig_port)
+ssize_t get_receiver_id_list_rx_info(struct intel_digital_port *dig_port, u32 *dev_cnt, u8 *byte)
{
- u8 rx_info[HDCP_2_2_RXINFO_LEN];
- u32 dev_cnt;
ssize_t ret;
+ u8 *rx_info = byte;
ret = drm_dp_dpcd_read(&dig_port->dp.aux,
DP_HDCP_2_2_REG_RXINFO_OFFSET,
@@ -512,15 +490,11 @@ ssize_t get_receiver_id_list_size(struct intel_digital_port *dig_port)
if (ret != HDCP_2_2_RXINFO_LEN)
return ret >= 0 ? -EIO : ret;
- dev_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 |
+ *dev_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 |
HDCP_2_2_DEV_COUNT_LO(rx_info[1]));
- if (dev_cnt > HDCP_2_2_MAX_DEVICE_COUNT)
- dev_cnt = HDCP_2_2_MAX_DEVICE_COUNT;
-
- ret = sizeof(struct hdcp2_rep_send_receiverid_list) -
- HDCP_2_2_RECEIVER_IDS_MAX_LEN +
- (dev_cnt * HDCP_2_2_RECEIVER_ID_LEN);
+ if (*dev_cnt > HDCP_2_2_MAX_DEVICE_COUNT)
+ *dev_cnt = HDCP_2_2_MAX_DEVICE_COUNT;
return ret;
}
@@ -530,12 +504,15 @@ int intel_dp_hdcp2_read_msg(struct intel_digital_port *dig_port,
u8 msg_id, void *buf, size_t size)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_dp *dp = &dig_port->dp;
+ struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
unsigned int offset;
u8 *byte = buf;
ssize_t ret, bytes_to_recv, len;
const struct hdcp2_dp_msg_data *hdcp2_msg_data;
ktime_t msg_end = ktime_set(0, 0);
bool msg_expired;
+ u32 dev_cnt;
hdcp2_msg_data = get_hdcp2_dp_msg_data(msg_id);
if (!hdcp2_msg_data)
@@ -546,17 +523,24 @@ int intel_dp_hdcp2_read_msg(struct intel_digital_port *dig_port,
if (ret < 0)
return ret;
+ hdcp->cp_irq_count_cached = atomic_read(&hdcp->cp_irq_count);
+
+ /* DP adaptation msgs has no msg_id */
+ byte++;
+
if (msg_id == HDCP_2_2_REP_SEND_RECVID_LIST) {
- ret = get_receiver_id_list_size(dig_port);
+ ret = get_receiver_id_list_rx_info(dig_port, &dev_cnt, byte);
if (ret < 0)
return ret;
- size = ret;
+ byte += ret;
+ size = sizeof(struct hdcp2_rep_send_receiverid_list) -
+ HDCP_2_2_RXINFO_LEN - HDCP_2_2_RECEIVER_IDS_MAX_LEN +
+ (dev_cnt * HDCP_2_2_RECEIVER_ID_LEN);
+ offset += HDCP_2_2_RXINFO_LEN;
}
- bytes_to_recv = size - 1;
- /* DP adaptation msgs has no msg_id */
- byte++;
+ bytes_to_recv = size - 1;
while (bytes_to_recv) {
len = bytes_to_recv > DP_AUX_MAX_PAYLOAD_BYTES ?
@@ -664,27 +648,6 @@ int intel_dp_hdcp2_capable(struct intel_digital_port *dig_port,
return 0;
}
-static
-int intel_dp_mst_streams_type1_capable(struct intel_connector *connector,
- bool *capable)
-{
- struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- int ret;
- bool hdcp_1_x;
-
- ret = get_rxinfo_hdcp_1_dev_downstream(dig_port, &hdcp_1_x);
- if (ret) {
- drm_dbg_kms(&i915->drm,
- "[%s:%d] failed to read RxInfo ret=%d\n",
- connector->base.name, connector->base.base.id, ret);
- return ret;
- }
-
- *capable = !hdcp_1_x;
- return 0;
-}
-
static const struct intel_hdcp_shim intel_dp_hdcp_shim = {
.write_an_aksv = intel_dp_hdcp_write_an_aksv,
.read_bksv = intel_dp_hdcp_read_bksv,
@@ -833,7 +796,6 @@ static const struct intel_hdcp_shim intel_dp_mst_hdcp_shim = {
.stream_2_2_encryption = intel_dp_mst_hdcp2_stream_encryption,
.check_2_2_link = intel_dp_mst_hdcp2_check_link,
.hdcp_2_2_capable = intel_dp_hdcp2_capable,
- .streams_type1_capable = intel_dp_mst_streams_type1_capable,
.protocol = HDCP_PROTOCOL_DP,
};
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index 508a514c5e37..85676c953e0a 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -25,15 +25,6 @@
#include "intel_dp.h"
#include "intel_dp_link_training.h"
-static void
-intel_dp_dump_link_status(struct drm_device *drm,
- const u8 link_status[DP_LINK_STATUS_SIZE])
-{
- drm_dbg_kms(drm,
- "ln0_1:0x%x ln2_3:0x%x align:0x%x sink:0x%x adj_req0_1:0x%x adj_req2_3:0x%x\n",
- link_status[0], link_status[1], link_status[2],
- link_status[3], link_status[4], link_status[5]);
-}
static void intel_dp_reset_lttpr_common_caps(struct intel_dp *intel_dp)
{
@@ -66,6 +57,7 @@ static u8 *intel_dp_lttpr_phy_caps(struct intel_dp *intel_dp,
static void intel_dp_read_lttpr_phy_caps(struct intel_dp *intel_dp,
enum drm_dp_phy dp_phy)
{
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy);
char phy_name[10];
@@ -73,21 +65,22 @@ static void intel_dp_read_lttpr_phy_caps(struct intel_dp *intel_dp,
if (drm_dp_read_lttpr_phy_caps(&intel_dp->aux, dp_phy, phy_caps) < 0) {
drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
- "failed to read the PHY caps for %s\n",
- phy_name);
+ "[ENCODER:%d:%s][%s] failed to read the PHY caps\n",
+ encoder->base.base.id, encoder->base.name, phy_name);
return;
}
drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
- "%s PHY capabilities: %*ph\n",
- phy_name,
+ "[ENCODER:%d:%s][%s] PHY capabilities: %*ph\n",
+ encoder->base.base.id, encoder->base.name, phy_name,
(int)sizeof(intel_dp->lttpr_phy_caps[0]),
phy_caps);
}
static bool intel_dp_read_lttpr_common_caps(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
if (intel_dp_is_edp(intel_dp))
return false;
@@ -104,7 +97,8 @@ static bool intel_dp_read_lttpr_common_caps(struct intel_dp *intel_dp)
goto reset_caps;
drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
- "LTTPR common capabilities: %*ph\n",
+ "[ENCODER:%d:%s] LTTPR common capabilities: %*ph\n",
+ encoder->base.base.id, encoder->base.name,
(int)sizeof(intel_dp->lttpr_common_caps),
intel_dp->lttpr_common_caps);
@@ -130,6 +124,8 @@ intel_dp_set_lttpr_transparent_mode(struct intel_dp *intel_dp, bool enable)
static int intel_dp_init_lttpr(struct intel_dp *intel_dp)
{
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
int lttpr_count;
int i;
@@ -161,8 +157,9 @@ static int intel_dp_init_lttpr(struct intel_dp *intel_dp)
return 0;
if (!intel_dp_set_lttpr_transparent_mode(intel_dp, false)) {
- drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
- "Switching to LTTPR non-transparent LT mode failed, fall-back to transparent mode\n");
+ drm_dbg_kms(&i915->drm,
+ "[ENCODER:%d:%s] Switching to LTTPR non-transparent LT mode failed, fall-back to transparent mode\n",
+ encoder->base.base.id, encoder->base.name);
intel_dp_set_lttpr_transparent_mode(intel_dp, true);
intel_dp_reset_lttpr_count(intel_dp);
@@ -301,21 +298,54 @@ static u8 intel_dp_phy_preemph_max(struct intel_dp *intel_dp,
return preemph_max;
}
-void
-intel_dp_get_adjust_train(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state,
- enum drm_dp_phy dp_phy,
- const u8 link_status[DP_LINK_STATUS_SIZE])
+static bool has_per_lane_signal_levels(struct intel_dp *intel_dp,
+ enum drm_dp_phy dp_phy)
+{
+ return !intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy);
+}
+
+/* 128b/132b */
+static u8 intel_dp_get_lane_adjust_tx_ffe_preset(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ enum drm_dp_phy dp_phy,
+ const u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane)
+{
+ u8 tx_ffe = 0;
+
+ if (has_per_lane_signal_levels(intel_dp, dp_phy)) {
+ lane = min(lane, crtc_state->lane_count - 1);
+ tx_ffe = drm_dp_get_adjust_tx_ffe_preset(link_status, lane);
+ } else {
+ for (lane = 0; lane < crtc_state->lane_count; lane++)
+ tx_ffe = max(tx_ffe, drm_dp_get_adjust_tx_ffe_preset(link_status, lane));
+ }
+
+ return tx_ffe;
+}
+
+/* 8b/10b */
+static u8 intel_dp_get_lane_adjust_vswing_preemph(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ enum drm_dp_phy dp_phy,
+ const u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane)
{
u8 v = 0;
u8 p = 0;
- int lane;
u8 voltage_max;
u8 preemph_max;
- for (lane = 0; lane < crtc_state->lane_count; lane++) {
- v = max(v, drm_dp_get_adjust_request_voltage(link_status, lane));
- p = max(p, drm_dp_get_adjust_request_pre_emphasis(link_status, lane));
+ if (has_per_lane_signal_levels(intel_dp, dp_phy)) {
+ lane = min(lane, crtc_state->lane_count - 1);
+
+ v = drm_dp_get_adjust_request_voltage(link_status, lane);
+ p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
+ } else {
+ for (lane = 0; lane < crtc_state->lane_count; lane++) {
+ v = max(v, drm_dp_get_adjust_request_voltage(link_status, lane));
+ p = max(p, drm_dp_get_adjust_request_pre_emphasis(link_status, lane));
+ }
}
preemph_max = intel_dp_phy_preemph_max(intel_dp, dp_phy);
@@ -328,8 +358,79 @@ intel_dp_get_adjust_train(struct intel_dp *intel_dp,
if (v >= voltage_max)
v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
+ return v | p;
+}
+
+static u8 intel_dp_get_lane_adjust_train(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ enum drm_dp_phy dp_phy,
+ const u8 link_status[DP_LINK_STATUS_SIZE],
+ int lane)
+{
+ if (intel_dp_is_uhbr(crtc_state))
+ return intel_dp_get_lane_adjust_tx_ffe_preset(intel_dp, crtc_state,
+ dp_phy, link_status, lane);
+ else
+ return intel_dp_get_lane_adjust_vswing_preemph(intel_dp, crtc_state,
+ dp_phy, link_status, lane);
+}
+
+#define TRAIN_REQ_FMT "%d/%d/%d/%d"
+#define _TRAIN_REQ_VSWING_ARGS(link_status, lane) \
+ (drm_dp_get_adjust_request_voltage((link_status), (lane)) >> DP_TRAIN_VOLTAGE_SWING_SHIFT)
+#define TRAIN_REQ_VSWING_ARGS(link_status) \
+ _TRAIN_REQ_VSWING_ARGS(link_status, 0), \
+ _TRAIN_REQ_VSWING_ARGS(link_status, 1), \
+ _TRAIN_REQ_VSWING_ARGS(link_status, 2), \
+ _TRAIN_REQ_VSWING_ARGS(link_status, 3)
+#define _TRAIN_REQ_PREEMPH_ARGS(link_status, lane) \
+ (drm_dp_get_adjust_request_pre_emphasis((link_status), (lane)) >> DP_TRAIN_PRE_EMPHASIS_SHIFT)
+#define TRAIN_REQ_PREEMPH_ARGS(link_status) \
+ _TRAIN_REQ_PREEMPH_ARGS(link_status, 0), \
+ _TRAIN_REQ_PREEMPH_ARGS(link_status, 1), \
+ _TRAIN_REQ_PREEMPH_ARGS(link_status, 2), \
+ _TRAIN_REQ_PREEMPH_ARGS(link_status, 3)
+#define _TRAIN_REQ_TX_FFE_ARGS(link_status, lane) \
+ drm_dp_get_adjust_tx_ffe_preset((link_status), (lane))
+#define TRAIN_REQ_TX_FFE_ARGS(link_status) \
+ _TRAIN_REQ_TX_FFE_ARGS(link_status, 0), \
+ _TRAIN_REQ_TX_FFE_ARGS(link_status, 1), \
+ _TRAIN_REQ_TX_FFE_ARGS(link_status, 2), \
+ _TRAIN_REQ_TX_FFE_ARGS(link_status, 3)
+
+void
+intel_dp_get_adjust_train(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state,
+ enum drm_dp_phy dp_phy,
+ const u8 link_status[DP_LINK_STATUS_SIZE])
+{
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ char phy_name[10];
+ int lane;
+
+ if (intel_dp_is_uhbr(crtc_state)) {
+ drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s][%s] 128b/132b, lanes: %d, "
+ "TX FFE request: " TRAIN_REQ_FMT "\n",
+ encoder->base.base.id, encoder->base.name,
+ intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)),
+ crtc_state->lane_count,
+ TRAIN_REQ_TX_FFE_ARGS(link_status));
+ } else {
+ drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s][%s] 8b/10b, lanes: %d, "
+ "vswing request: " TRAIN_REQ_FMT ", "
+ "pre-emphasis request: " TRAIN_REQ_FMT "\n",
+ encoder->base.base.id, encoder->base.name,
+ intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)),
+ crtc_state->lane_count,
+ TRAIN_REQ_VSWING_ARGS(link_status),
+ TRAIN_REQ_PREEMPH_ARGS(link_status));
+ }
+
for (lane = 0; lane < 4; lane++)
- intel_dp->train_set[lane] = v | p;
+ intel_dp->train_set[lane] =
+ intel_dp_get_lane_adjust_train(intel_dp, crtc_state,
+ dp_phy, link_status, lane);
}
static int intel_dp_training_pattern_set_reg(struct intel_dp *intel_dp,
@@ -351,7 +452,7 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
int len;
intel_dp_program_link_training_pattern(intel_dp, crtc_state,
- dp_train_pat);
+ dp_phy, dp_train_pat);
buf[0] = dp_train_pat;
/* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
@@ -379,40 +480,77 @@ static char dp_training_pattern_name(u8 train_pat)
void
intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
+ enum drm_dp_phy dp_phy,
u8 dp_train_pat)
{
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
u8 train_pat = intel_dp_training_pattern_symbol(dp_train_pat);
+ char phy_name[10];
if (train_pat != DP_TRAINING_PATTERN_DISABLE)
- drm_dbg_kms(&dev_priv->drm,
- "[ENCODER:%d:%s] Using DP training pattern TPS%c\n",
+ drm_dbg_kms(&i915->drm,
+ "[ENCODER:%d:%s][%s] Using DP training pattern TPS%c\n",
encoder->base.base.id, encoder->base.name,
+ intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)),
dp_training_pattern_name(train_pat));
intel_dp->set_link_train(intel_dp, crtc_state, dp_train_pat);
}
+#define TRAIN_SET_FMT "%d%s/%d%s/%d%s/%d%s"
+#define _TRAIN_SET_VSWING_ARGS(train_set) \
+ ((train_set) & DP_TRAIN_VOLTAGE_SWING_MASK) >> DP_TRAIN_VOLTAGE_SWING_SHIFT, \
+ (train_set) & DP_TRAIN_MAX_SWING_REACHED ? "(max)" : ""
+#define TRAIN_SET_VSWING_ARGS(train_set) \
+ _TRAIN_SET_VSWING_ARGS((train_set)[0]), \
+ _TRAIN_SET_VSWING_ARGS((train_set)[1]), \
+ _TRAIN_SET_VSWING_ARGS((train_set)[2]), \
+ _TRAIN_SET_VSWING_ARGS((train_set)[3])
+#define _TRAIN_SET_PREEMPH_ARGS(train_set) \
+ ((train_set) & DP_TRAIN_PRE_EMPHASIS_MASK) >> DP_TRAIN_PRE_EMPHASIS_SHIFT, \
+ (train_set) & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED ? "(max)" : ""
+#define TRAIN_SET_PREEMPH_ARGS(train_set) \
+ _TRAIN_SET_PREEMPH_ARGS((train_set)[0]), \
+ _TRAIN_SET_PREEMPH_ARGS((train_set)[1]), \
+ _TRAIN_SET_PREEMPH_ARGS((train_set)[2]), \
+ _TRAIN_SET_PREEMPH_ARGS((train_set)[3])
+#define _TRAIN_SET_TX_FFE_ARGS(train_set) \
+ ((train_set) & DP_TX_FFE_PRESET_VALUE_MASK), ""
+#define TRAIN_SET_TX_FFE_ARGS(train_set) \
+ _TRAIN_SET_TX_FFE_ARGS((train_set)[0]), \
+ _TRAIN_SET_TX_FFE_ARGS((train_set)[1]), \
+ _TRAIN_SET_TX_FFE_ARGS((train_set)[2]), \
+ _TRAIN_SET_TX_FFE_ARGS((train_set)[3])
+
void intel_dp_set_signal_levels(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
enum drm_dp_phy dp_phy)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- u8 train_set = intel_dp->train_set[0];
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
char phy_name[10];
- drm_dbg_kms(&dev_priv->drm, "Using vswing level %d%s, pre-emphasis level %d%s, at %s\n",
- train_set & DP_TRAIN_VOLTAGE_SWING_MASK,
- train_set & DP_TRAIN_MAX_SWING_REACHED ? " (max)" : "",
- (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
- DP_TRAIN_PRE_EMPHASIS_SHIFT,
- train_set & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED ?
- " (max)" : "",
- intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)));
+ if (intel_dp_is_uhbr(crtc_state)) {
+ drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s][%s] 128b/132b, lanes: %d, "
+ "TX FFE presets: " TRAIN_SET_FMT "\n",
+ encoder->base.base.id, encoder->base.name,
+ intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)),
+ crtc_state->lane_count,
+ TRAIN_SET_TX_FFE_ARGS(intel_dp->train_set));
+ } else {
+ drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s][%s] 8b/10b, lanes: %d, "
+ "vswing levels: " TRAIN_SET_FMT ", "
+ "pre-emphasis levels: " TRAIN_SET_FMT "\n",
+ encoder->base.base.id, encoder->base.name,
+ intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)),
+ crtc_state->lane_count,
+ TRAIN_SET_VSWING_ARGS(intel_dp->train_set),
+ TRAIN_SET_PREEMPH_ARGS(intel_dp->train_set));
+ }
if (intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy))
- intel_dp->set_signal_levels(intel_dp, crtc_state);
+ encoder->set_signal_levels(encoder, crtc_state);
}
static bool
@@ -444,15 +582,55 @@ intel_dp_update_link_train(struct intel_dp *intel_dp,
return ret == crtc_state->lane_count;
}
+/* 128b/132b */
+static bool intel_dp_lane_max_tx_ffe_reached(u8 train_set_lane)
+{
+ return (train_set_lane & DP_TX_FFE_PRESET_VALUE_MASK) ==
+ DP_TX_FFE_PRESET_VALUE_MASK;
+}
+
+/*
+ * 8b/10b
+ *
+ * FIXME: The DP spec is very confusing here, also the Link CTS spec seems to
+ * have self contradicting tests around this area.
+ *
+ * In lieu of better ideas let's just stop when we've reached the max supported
+ * vswing with its max pre-emphasis, which is either 2+1 or 3+0 depending on
+ * whether vswing level 3 is supported or not.
+ */
+static bool intel_dp_lane_max_vswing_reached(u8 train_set_lane)
+{
+ u8 v = (train_set_lane & DP_TRAIN_VOLTAGE_SWING_MASK) >>
+ DP_TRAIN_VOLTAGE_SWING_SHIFT;
+ u8 p = (train_set_lane & DP_TRAIN_PRE_EMPHASIS_MASK) >>
+ DP_TRAIN_PRE_EMPHASIS_SHIFT;
+
+ if ((train_set_lane & DP_TRAIN_MAX_SWING_REACHED) == 0)
+ return false;
+
+ if (v + p != 3)
+ return false;
+
+ return true;
+}
+
static bool intel_dp_link_max_vswing_reached(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
int lane;
- for (lane = 0; lane < crtc_state->lane_count; lane++)
- if ((intel_dp->train_set[lane] &
- DP_TRAIN_MAX_SWING_REACHED) == 0)
- return false;
+ for (lane = 0; lane < crtc_state->lane_count; lane++) {
+ u8 train_set_lane = intel_dp->train_set[lane];
+
+ if (intel_dp_is_uhbr(crtc_state)) {
+ if (!intel_dp_lane_max_tx_ffe_reached(train_set_lane))
+ return false;
+ } else {
+ if (!intel_dp_lane_max_vswing_reached(train_set_lane))
+ return false;
+ }
+ }
return true;
}
@@ -465,7 +643,8 @@ static bool
intel_dp_prepare_link_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
u8 link_config[2];
u8 link_bw, rate_select;
@@ -477,10 +656,12 @@ intel_dp_prepare_link_train(struct intel_dp *intel_dp,
if (link_bw)
drm_dbg_kms(&i915->drm,
- "Using LINK_BW_SET value %02x\n", link_bw);
+ "[ENCODER:%d:%s] Using LINK_BW_SET value %02x\n",
+ encoder->base.base.id, encoder->base.name, link_bw);
else
drm_dbg_kms(&i915->drm,
- "Using LINK_RATE_SET value %02x\n", rate_select);
+ "[ENCODER:%d:%s] Using LINK_RATE_SET value %02x\n",
+ encoder->base.base.id, encoder->base.name, rate_select);
/* Write the link configuration data */
link_config[0] = link_bw;
@@ -495,11 +676,10 @@ intel_dp_prepare_link_train(struct intel_dp *intel_dp,
&rate_select, 1);
link_config[0] = crtc_state->vrr.enable ? DP_MSA_TIMING_PAR_IGNORE_EN : 0;
- link_config[1] = DP_SET_ANSI_8B10B;
+ link_config[1] = intel_dp_is_uhbr(crtc_state) ?
+ DP_SET_ANSI_128B132B : DP_SET_ANSI_8B10B;
drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
- intel_dp->DP |= DP_PORT_EN;
-
return true;
}
@@ -512,6 +692,48 @@ static void intel_dp_link_training_clock_recovery_delay(struct intel_dp *intel_d
drm_dp_lttpr_link_train_clock_recovery_delay();
}
+static bool intel_dp_adjust_request_changed(const struct intel_crtc_state *crtc_state,
+ const u8 old_link_status[DP_LINK_STATUS_SIZE],
+ const u8 new_link_status[DP_LINK_STATUS_SIZE])
+{
+ int lane;
+
+ for (lane = 0; lane < crtc_state->lane_count; lane++) {
+ u8 old, new;
+
+ if (intel_dp_is_uhbr(crtc_state)) {
+ old = drm_dp_get_adjust_tx_ffe_preset(old_link_status, lane);
+ new = drm_dp_get_adjust_tx_ffe_preset(new_link_status, lane);
+ } else {
+ old = drm_dp_get_adjust_request_voltage(old_link_status, lane) |
+ drm_dp_get_adjust_request_pre_emphasis(old_link_status, lane);
+ new = drm_dp_get_adjust_request_voltage(new_link_status, lane) |
+ drm_dp_get_adjust_request_pre_emphasis(new_link_status, lane);
+ }
+
+ if (old != new)
+ return true;
+ }
+
+ return false;
+}
+
+static void
+intel_dp_dump_link_status(struct intel_dp *intel_dp, enum drm_dp_phy dp_phy,
+ const u8 link_status[DP_LINK_STATUS_SIZE])
+{
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ char phy_name[10];
+
+ drm_dbg_kms(&i915->drm,
+ "[ENCODER:%d:%s][%s] ln0_1:0x%x ln2_3:0x%x align:0x%x sink:0x%x adj_req0_1:0x%x adj_req2_3:0x%x\n",
+ encoder->base.base.id, encoder->base.name,
+ intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)),
+ link_status[0], link_status[1], link_status[2],
+ link_status[3], link_status[4], link_status[5]);
+}
+
/*
* Perform the link training clock recovery phase on the given DP PHY using
* training pattern 1.
@@ -521,16 +743,22 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
enum drm_dp_phy dp_phy)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
- u8 voltage;
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ u8 old_link_status[DP_LINK_STATUS_SIZE] = {};
int voltage_tries, cr_tries, max_cr_tries;
+ u8 link_status[DP_LINK_STATUS_SIZE];
bool max_vswing_reached = false;
+ char phy_name[10];
+
+ intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name));
/* clock recovery */
if (!intel_dp_reset_link_train(intel_dp, crtc_state, dp_phy,
DP_TRAINING_PATTERN_1 |
DP_LINK_SCRAMBLING_DISABLE)) {
- drm_err(&i915->drm, "failed to enable link training\n");
+ drm_err(&i915->drm, "[ENCODER:%d:%s][%s] Failed to enable link training\n",
+ encoder->base.base.id, encoder->base.name, phy_name);
return false;
}
@@ -549,105 +777,118 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp,
voltage_tries = 1;
for (cr_tries = 0; cr_tries < max_cr_tries; ++cr_tries) {
- u8 link_status[DP_LINK_STATUS_SIZE];
-
intel_dp_link_training_clock_recovery_delay(intel_dp, dp_phy);
if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, dp_phy,
link_status) < 0) {
- drm_err(&i915->drm, "failed to get link status\n");
+ drm_err(&i915->drm, "[ENCODER:%d:%s][%s] Failed to get link status\n",
+ encoder->base.base.id, encoder->base.name, phy_name);
return false;
}
if (drm_dp_clock_recovery_ok(link_status, crtc_state->lane_count)) {
- drm_dbg_kms(&i915->drm, "clock recovery OK\n");
+ drm_dbg_kms(&i915->drm,
+ "[ENCODER:%d:%s][%s] Clock recovery OK\n",
+ encoder->base.base.id, encoder->base.name, phy_name);
return true;
}
if (voltage_tries == 5) {
+ intel_dp_dump_link_status(intel_dp, dp_phy, link_status);
drm_dbg_kms(&i915->drm,
- "Same voltage tried 5 times\n");
+ "[ENCODER:%d:%s][%s] Same voltage tried 5 times\n",
+ encoder->base.base.id, encoder->base.name, phy_name);
return false;
}
if (max_vswing_reached) {
- drm_dbg_kms(&i915->drm, "Max Voltage Swing reached\n");
+ intel_dp_dump_link_status(intel_dp, dp_phy, link_status);
+ drm_dbg_kms(&i915->drm,
+ "[ENCODER:%d:%s][%s] Max Voltage Swing reached\n",
+ encoder->base.base.id, encoder->base.name, phy_name);
return false;
}
- voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
-
/* Update training set as requested by target */
intel_dp_get_adjust_train(intel_dp, crtc_state, dp_phy,
link_status);
if (!intel_dp_update_link_train(intel_dp, crtc_state, dp_phy)) {
drm_err(&i915->drm,
- "failed to update link training\n");
+ "[ENCODER:%d:%s][%s] Failed to update link training\n",
+ encoder->base.base.id, encoder->base.name, phy_name);
return false;
}
- if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) ==
- voltage)
+ if (!intel_dp_adjust_request_changed(crtc_state, old_link_status, link_status))
++voltage_tries;
else
voltage_tries = 1;
+ memcpy(old_link_status, link_status, sizeof(link_status));
+
if (intel_dp_link_max_vswing_reached(intel_dp, crtc_state))
max_vswing_reached = true;
-
}
+
+ intel_dp_dump_link_status(intel_dp, dp_phy, link_status);
drm_err(&i915->drm,
- "Failed clock recovery %d times, giving up!\n", max_cr_tries);
+ "[ENCODER:%d:%s][%s] Failed clock recovery %d times, giving up!\n",
+ encoder->base.base.id, encoder->base.name, phy_name, max_cr_tries);
+
return false;
}
/*
- * Pick training pattern for channel equalization. Training pattern 4 for HBR3
- * or for 1.4 devices that support it, training Pattern 3 for HBR2
- * or 1.2 devices that support it, Training Pattern 2 otherwise.
+ * Pick Training Pattern Sequence (TPS) for channel equalization. 128b/132b TPS2
+ * for UHBR+, TPS4 for HBR3 or for 1.4 devices that support it, TPS3 for HBR2 or
+ * 1.2 devices that support it, TPS2 otherwise.
*/
static u32 intel_dp_training_pattern(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
enum drm_dp_phy dp_phy)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
bool source_tps3, sink_tps3, source_tps4, sink_tps4;
+ /* UHBR+ use separate 128b/132b TPS2 */
+ if (intel_dp_is_uhbr(crtc_state))
+ return DP_TRAINING_PATTERN_2;
+
/*
- * Intel platforms that support HBR3 also support TPS4. It is mandatory
- * for all downstream devices that support HBR3. There are no known eDP
- * panels that support TPS4 as of Feb 2018 as per VESA eDP_v1.4b_E1
- * specification.
+ * TPS4 support is mandatory for all downstream devices that
+ * support HBR3. There are no known eDP panels that support
+ * TPS4 as of Feb 2018 as per VESA eDP_v1.4b_E1 specification.
* LTTPRs must support TPS4.
*/
- source_tps4 = intel_dp_source_supports_hbr3(intel_dp);
+ source_tps4 = intel_dp_source_supports_tps4(i915);
sink_tps4 = dp_phy != DP_PHY_DPRX ||
drm_dp_tps4_supported(intel_dp->dpcd);
if (source_tps4 && sink_tps4) {
return DP_TRAINING_PATTERN_4;
} else if (crtc_state->port_clock == 810000) {
if (!source_tps4)
- drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
- "8.1 Gbps link rate without source HBR3/TPS4 support\n");
+ drm_dbg_kms(&i915->drm,
+ "8.1 Gbps link rate without source TPS4 support\n");
if (!sink_tps4)
- drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
+ drm_dbg_kms(&i915->drm,
"8.1 Gbps link rate without sink TPS4 support\n");
}
+
/*
- * Intel platforms that support HBR2 also support TPS3. TPS3 support is
- * also mandatory for downstream devices that support HBR2. However, not
- * all sinks follow the spec.
+ * TPS3 support is mandatory for downstream devices that
+ * support HBR2. However, not all sinks follow the spec.
*/
- source_tps3 = intel_dp_source_supports_hbr2(intel_dp);
+ source_tps3 = intel_dp_source_supports_tps3(i915);
sink_tps3 = dp_phy != DP_PHY_DPRX ||
drm_dp_tps3_supported(intel_dp->dpcd);
if (source_tps3 && sink_tps3) {
return DP_TRAINING_PATTERN_3;
} else if (crtc_state->port_clock >= 540000) {
if (!source_tps3)
- drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
- ">=5.4/6.48 Gbps link rate without source HBR2/TPS3 support\n");
+ drm_dbg_kms(&i915->drm,
+ ">=5.4/6.48 Gbps link rate without source TPS3 support\n");
if (!sink_tps3)
- drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
+ drm_dbg_kms(&i915->drm,
">=5.4/6.48 Gbps link rate without sink TPS3 support\n");
}
@@ -677,11 +918,15 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
enum drm_dp_phy dp_phy)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
int tries;
u32 training_pattern;
u8 link_status[DP_LINK_STATUS_SIZE];
bool channel_eq = false;
+ char phy_name[10];
+
+ intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name));
training_pattern = intel_dp_training_pattern(intel_dp, crtc_state, dp_phy);
/* Scrambling is disabled for TPS2/3 and enabled for TPS4 */
@@ -691,7 +936,10 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp,
/* channel equalization */
if (!intel_dp_set_link_train(intel_dp, crtc_state, dp_phy,
training_pattern)) {
- drm_err(&i915->drm, "failed to start channel equalization\n");
+ drm_err(&i915->drm,
+ "[ENCODER:%d:%s][%s] Failed to start channel equalization\n",
+ encoder->base.base.id, encoder->base.name,
+ phy_name);
return false;
}
@@ -701,25 +949,28 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp,
if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, dp_phy,
link_status) < 0) {
drm_err(&i915->drm,
- "failed to get link status\n");
+ "[ENCODER:%d:%s][%s] Failed to get link status\n",
+ encoder->base.base.id, encoder->base.name, phy_name);
break;
}
/* Make sure clock is still ok */
if (!drm_dp_clock_recovery_ok(link_status,
crtc_state->lane_count)) {
- intel_dp_dump_link_status(&i915->drm, link_status);
+ intel_dp_dump_link_status(intel_dp, dp_phy, link_status);
drm_dbg_kms(&i915->drm,
- "Clock recovery check failed, cannot "
- "continue channel equalization\n");
+ "[ENCODER:%d:%s][%s] Clock recovery check failed, cannot "
+ "continue channel equalization\n",
+ encoder->base.base.id, encoder->base.name, phy_name);
break;
}
if (drm_dp_channel_eq_ok(link_status,
crtc_state->lane_count)) {
channel_eq = true;
- drm_dbg_kms(&i915->drm, "Channel EQ done. DP Training "
- "successful\n");
+ drm_dbg_kms(&i915->drm,
+ "[ENCODER:%d:%s][%s] Channel EQ done. DP Training successful\n",
+ encoder->base.base.id, encoder->base.name, phy_name);
break;
}
@@ -728,16 +979,18 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp,
link_status);
if (!intel_dp_update_link_train(intel_dp, crtc_state, dp_phy)) {
drm_err(&i915->drm,
- "failed to update link training\n");
+ "[ENCODER:%d:%s][%s] Failed to update link training\n",
+ encoder->base.base.id, encoder->base.name, phy_name);
break;
}
}
/* Try 5 times, else fail and try at lower BW */
if (tries == 5) {
- intel_dp_dump_link_status(&i915->drm, link_status);
+ intel_dp_dump_link_status(intel_dp, dp_phy, link_status);
drm_dbg_kms(&i915->drm,
- "Channel equalization failed 5 times\n");
+ "[ENCODER:%d:%s][%s] Channel equalization failed 5 times\n",
+ encoder->base.base.id, encoder->base.name, phy_name);
}
return channel_eq;
@@ -774,7 +1027,7 @@ void intel_dp_stop_link_train(struct intel_dp *intel_dp,
intel_dp->link_trained = true;
intel_dp_disable_dpcd_training_pattern(intel_dp, DP_PHY_DPRX);
- intel_dp_program_link_training_pattern(intel_dp, crtc_state,
+ intel_dp_program_link_training_pattern(intel_dp, crtc_state, DP_PHY_DPRX,
DP_TRAINING_PATTERN_DISABLE);
}
@@ -783,7 +1036,8 @@ intel_dp_link_train_phy(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
enum drm_dp_phy dp_phy)
{
- struct intel_connector *intel_connector = intel_dp->attached_connector;
+ struct intel_connector *connector = intel_dp->attached_connector;
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
char phy_name[10];
bool ret = false;
@@ -797,12 +1051,12 @@ intel_dp_link_train_phy(struct intel_dp *intel_dp,
out:
drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
- "[CONNECTOR:%d:%s] Link Training %s at link rate = %d, lane count = %d, at %s\n",
- intel_connector->base.base.id,
- intel_connector->base.name,
+ "[CONNECTOR:%d:%s][ENCODER:%d:%s][%s] Link Training %s at link rate = %d, lane count = %d\n",
+ connector->base.base.id, connector->base.name,
+ encoder->base.base.id, encoder->base.name,
+ intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)),
ret ? "passed" : "failed",
- crtc_state->port_clock, crtc_state->lane_count,
- intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name)));
+ crtc_state->port_clock, crtc_state->lane_count);
return ret;
}
@@ -811,10 +1065,13 @@ static void intel_dp_schedule_fallback_link_training(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
struct intel_connector *intel_connector = intel_dp->attached_connector;
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
if (intel_dp->hobl_active) {
drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
- "Link Training failed with HOBL active, not enabling it from now on");
+ "[ENCODER:%d:%s] Link Training failed with HOBL active, "
+ "not enabling it from now on",
+ encoder->base.base.id, encoder->base.name);
intel_dp->hobl_failed = true;
} else if (intel_dp_get_link_train_fallback_values(intel_dp,
crtc_state->port_clock,
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.h b/drivers/gpu/drm/i915/display/intel_dp_link_training.h
index 9d24d594368c..6a3a7b37349a 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.h
@@ -19,6 +19,7 @@ void intel_dp_get_adjust_train(struct intel_dp *intel_dp,
const u8 link_status[DP_LINK_STATUS_SIZE]);
void intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
+ enum drm_dp_phy dp_phy,
u8 dp_train_pat);
void intel_dp_set_signal_levels(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 8d13d7b26a25..89d701e8ae9d 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -61,7 +61,7 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
int bpp, slots = -EINVAL;
crtc_state->lane_count = limits->max_lane_count;
- crtc_state->port_clock = limits->max_clock;
+ crtc_state->port_clock = limits->max_rate;
for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
crtc_state->pipe_bpp = bpp;
@@ -131,8 +131,8 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder,
* for MST we always configure max link bw - the spec doesn't
* seem to suggest we should do otherwise.
*/
- limits.min_clock =
- limits.max_clock = intel_dp_max_link_rate(intel_dp);
+ limits.min_rate =
+ limits.max_rate = intel_dp_max_link_rate(intel_dp);
limits.min_lane_count =
limits.max_lane_count = intel_dp_max_lane_count(intel_dp);
@@ -378,7 +378,7 @@ static void intel_mst_disable_dp(struct intel_atomic_state *state,
drm_dp_mst_reset_vcpi_slots(&intel_dp->mst_mgr, connector->port);
- ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);
+ ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr, 1);
if (ret) {
drm_dbg_kms(&i915->drm, "failed to update payload %d\n", ret);
}
@@ -396,7 +396,6 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
to_intel_connector(old_conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
bool last_mst_stream;
- u32 val;
intel_dp->active_mst_links--;
last_mst_stream = intel_dp->active_mst_links == 0;
@@ -406,18 +405,14 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state,
intel_crtc_vblank_off(old_crtc_state);
- intel_disable_pipe(old_crtc_state);
+ intel_disable_transcoder(old_crtc_state);
drm_dp_update_payload_part2(&intel_dp->mst_mgr);
clear_act_sent(encoder, old_crtc_state);
- val = intel_de_read(dev_priv,
- TRANS_DDI_FUNC_CTL(old_crtc_state->cpu_transcoder));
- val &= ~TRANS_DDI_DP_VC_PAYLOAD_ALLOC;
- intel_de_write(dev_priv,
- TRANS_DDI_FUNC_CTL(old_crtc_state->cpu_transcoder),
- val);
+ intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL(old_crtc_state->cpu_transcoder),
+ TRANS_DDI_DP_VC_PAYLOAD_ALLOC, 0);
wait_for_act_sent(encoder, old_crtc_state);
@@ -523,7 +518,7 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state,
intel_dp->active_mst_links++;
- ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);
+ ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr, 1);
/*
* Before Gen 12 this is not done as part of
@@ -555,6 +550,17 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
clear_act_sent(encoder, pipe_config);
+ if (intel_dp_is_uhbr(pipe_config)) {
+ const struct drm_display_mode *adjusted_mode =
+ &pipe_config->hw.adjusted_mode;
+ u64 crtc_clock_hz = KHz(adjusted_mode->crtc_clock);
+
+ intel_de_write(dev_priv, TRANS_DP2_VFREQHIGH(pipe_config->cpu_transcoder),
+ TRANS_DP2_VFREQ_PIXEL_CLOCK(crtc_clock_hz >> 24));
+ intel_de_write(dev_priv, TRANS_DP2_VFREQLOW(pipe_config->cpu_transcoder),
+ TRANS_DP2_VFREQ_PIXEL_CLOCK(crtc_clock_hz & 0xffffff));
+ }
+
intel_ddi_enable_transcoder_func(encoder, pipe_config);
intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL(trans), 0,
@@ -571,7 +577,7 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state,
intel_de_rmw(dev_priv, CHICKEN_TRANS(trans), 0,
FECSTALL_DIS_DPTSTREAM_DPTTG);
- intel_enable_pipe(pipe_config);
+ intel_enable_transcoder(pipe_config);
intel_crtc_vblank_on(pipe_config);
@@ -971,24 +977,31 @@ intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_base_id)
dig_port->max_lanes,
max_source_rate,
conn_base_id);
- if (ret)
+ if (ret) {
+ intel_dp->mst_mgr.cbs = NULL;
return ret;
-
- intel_dp->can_mst = true;
+ }
return 0;
}
+bool intel_dp_mst_source_support(struct intel_dp *intel_dp)
+{
+ return intel_dp->mst_mgr.cbs;
+}
+
void
intel_dp_mst_encoder_cleanup(struct intel_digital_port *dig_port)
{
struct intel_dp *intel_dp = &dig_port->dp;
- if (!intel_dp->can_mst)
+ if (!intel_dp_mst_source_support(intel_dp))
return;
drm_dp_mst_topology_mgr_destroy(&intel_dp->mst_mgr);
/* encoders will get killed by normal cleanup */
+
+ intel_dp->mst_mgr.cbs = NULL;
}
bool intel_dp_mst_is_master_trans(const struct intel_crtc_state *crtc_state)
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.h b/drivers/gpu/drm/i915/display/intel_dp_mst.h
index 6afda4e86b3c..f7301de6cdfb 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.h
@@ -8,13 +8,15 @@
#include <linux/types.h>
-struct intel_digital_port;
struct intel_crtc_state;
+struct intel_digital_port;
+struct intel_dp;
int intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_id);
void intel_dp_mst_encoder_cleanup(struct intel_digital_port *dig_port);
int intel_dp_mst_encoder_active_links(struct intel_digital_port *dig_port);
bool intel_dp_mst_is_master_trans(const struct intel_crtc_state *crtc_state);
bool intel_dp_mst_is_slave_trans(const struct intel_crtc_state *crtc_state);
+bool intel_dp_mst_source_support(struct intel_dp *intel_dp);
#endif /* __INTEL_DP_MST_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.c b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
index 48507ed79950..44edeb2e55c0 100644
--- a/drivers/gpu/drm/i915/display/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
@@ -21,12 +21,13 @@
* DEALINGS IN THE SOFTWARE.
*/
-#include "display/intel_dp.h"
-
+#include "intel_ddi.h"
+#include "intel_ddi_buf_trans.h"
#include "intel_de.h"
#include "intel_display_types.h"
+#include "intel_dp.h"
#include "intel_dpio_phy.h"
-#include "intel_sideband.h"
+#include "vlv_sideband.h"
/**
* DOC: DPIO
@@ -266,15 +267,22 @@ void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port,
*ch = DPIO_CH0;
}
-void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv,
- enum port port, u32 margin, u32 scale,
- u32 enable, u32 deemphasis)
+void bxt_ddi_phy_set_signal_levels(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
- u32 val;
- enum dpio_phy phy;
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ int level = intel_ddi_level(encoder, crtc_state, 0);
+ const struct intel_ddi_buf_trans *trans;
enum dpio_channel ch;
+ enum dpio_phy phy;
+ int n_entries;
+ u32 val;
- bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
+ trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans))
+ return;
+
+ bxt_port_to_phy_channel(dev_priv, encoder->port, &phy, &ch);
/*
* While we write to the group register to program all lanes at once we
@@ -286,12 +294,13 @@ void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv,
val = intel_de_read(dev_priv, BXT_PORT_TX_DW2_LN0(phy, ch));
val &= ~(MARGIN_000 | UNIQ_TRANS_SCALE);
- val |= margin << MARGIN_000_SHIFT | scale << UNIQ_TRANS_SCALE_SHIFT;
+ val |= trans->entries[level].bxt.margin << MARGIN_000_SHIFT |
+ trans->entries[level].bxt.scale << UNIQ_TRANS_SCALE_SHIFT;
intel_de_write(dev_priv, BXT_PORT_TX_DW2_GRP(phy, ch), val);
val = intel_de_read(dev_priv, BXT_PORT_TX_DW3_LN0(phy, ch));
val &= ~SCALE_DCOMP_METHOD;
- if (enable)
+ if (trans->entries[level].bxt.enable)
val |= SCALE_DCOMP_METHOD;
if ((val & UNIQUE_TRANGE_EN_METHOD) && !(val & SCALE_DCOMP_METHOD))
@@ -302,7 +311,7 @@ void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv,
val = intel_de_read(dev_priv, BXT_PORT_TX_DW4_LN0(phy, ch));
val &= ~DE_EMPHASIS;
- val |= deemphasis << DEEMPH_SHIFT;
+ val |= trans->entries[level].bxt.deemphasis << DEEMPH_SHIFT;
intel_de_write(dev_priv, BXT_PORT_TX_DW4_GRP(phy, ch), val);
val = intel_de_read(dev_priv, BXT_PORT_PCS_DW10_LN01(phy, ch));
diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.h b/drivers/gpu/drm/i915/display/intel_dpio_phy.h
index 6473440e7457..9c3d008e8e1a 100644
--- a/drivers/gpu/drm/i915/display/intel_dpio_phy.h
+++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.h
@@ -17,9 +17,8 @@ struct intel_encoder;
void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port,
enum dpio_phy *phy, enum dpio_channel *ch);
-void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv,
- enum port port, u32 margin, u32 scale,
- u32 enable, u32 deemphasis);
+void bxt_ddi_phy_set_signal_levels(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy);
void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy);
bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c
index 14515e62c05e..04a7af8340ca 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll.c
@@ -2,16 +2,19 @@
/*
* Copyright © 2020 Intel Corporation
*/
+
#include <linux/kernel.h>
+
#include "intel_crtc.h"
#include "intel_de.h"
-#include "intel_display_types.h"
#include "intel_display.h"
+#include "intel_display_types.h"
#include "intel_dpll.h"
#include "intel_lvds.h"
#include "intel_panel.h"
-#include "intel_sideband.h"
-#include "display/intel_snps_phy.h"
+#include "intel_pps.h"
+#include "intel_snps_phy.h"
+#include "vlv_sideband.h"
struct intel_limit {
struct {
@@ -309,7 +312,7 @@ int pnv_calc_dpll_params(int refclk, struct dpll *clock)
return clock->dot;
}
-static u32 i9xx_dpll_compute_m(struct dpll *dpll)
+static u32 i9xx_dpll_compute_m(const struct dpll *dpll)
{
return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
}
@@ -428,7 +431,8 @@ i9xx_select_p2_div(const struct intel_limit *limit,
static bool
i9xx_find_best_dpll(const struct intel_limit *limit,
struct intel_crtc_state *crtc_state,
- int target, int refclk, struct dpll *match_clock,
+ int target, int refclk,
+ const struct dpll *match_clock,
struct dpll *best_clock)
{
struct drm_device *dev = crtc_state->uapi.crtc->dev;
@@ -486,7 +490,8 @@ i9xx_find_best_dpll(const struct intel_limit *limit,
static bool
pnv_find_best_dpll(const struct intel_limit *limit,
struct intel_crtc_state *crtc_state,
- int target, int refclk, struct dpll *match_clock,
+ int target, int refclk,
+ const struct dpll *match_clock,
struct dpll *best_clock)
{
struct drm_device *dev = crtc_state->uapi.crtc->dev;
@@ -542,7 +547,8 @@ pnv_find_best_dpll(const struct intel_limit *limit,
static bool
g4x_find_best_dpll(const struct intel_limit *limit,
struct intel_crtc_state *crtc_state,
- int target, int refclk, struct dpll *match_clock,
+ int target, int refclk,
+ const struct dpll *match_clock,
struct dpll *best_clock)
{
struct drm_device *dev = crtc_state->uapi.crtc->dev;
@@ -636,7 +642,8 @@ static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
static bool
vlv_find_best_dpll(const struct intel_limit *limit,
struct intel_crtc_state *crtc_state,
- int target, int refclk, struct dpll *match_clock,
+ int target, int refclk,
+ const struct dpll *match_clock,
struct dpll *best_clock)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
@@ -696,7 +703,8 @@ vlv_find_best_dpll(const struct intel_limit *limit,
static bool
chv_find_best_dpll(const struct intel_limit *limit,
struct intel_crtc_state *crtc_state,
- int target, int refclk, struct dpll *match_clock,
+ int target, int refclk,
+ const struct dpll *match_clock,
struct dpll *best_clock)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
@@ -763,47 +771,45 @@ bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
NULL, best_clock);
}
-static u32 pnv_dpll_compute_fp(struct dpll *dpll)
+u32 i9xx_dpll_compute_fp(const struct dpll *dpll)
+{
+ return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
+}
+
+static u32 pnv_dpll_compute_fp(const struct dpll *dpll)
{
return (1 << dpll->n) << 16 | dpll->m2;
}
-static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct dpll *reduced_clock)
+static void i9xx_update_pll_dividers(struct intel_crtc_state *crtc_state,
+ const struct dpll *clock,
+ const struct dpll *reduced_clock)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 fp, fp2 = 0;
+ u32 fp, fp2;
if (IS_PINEVIEW(dev_priv)) {
- fp = pnv_dpll_compute_fp(&crtc_state->dpll);
- if (reduced_clock)
- fp2 = pnv_dpll_compute_fp(reduced_clock);
+ fp = pnv_dpll_compute_fp(clock);
+ fp2 = pnv_dpll_compute_fp(reduced_clock);
} else {
- fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
- if (reduced_clock)
- fp2 = i9xx_dpll_compute_fp(reduced_clock);
+ fp = i9xx_dpll_compute_fp(clock);
+ fp2 = i9xx_dpll_compute_fp(reduced_clock);
}
crtc_state->dpll_hw_state.fp0 = fp;
-
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
- reduced_clock) {
- crtc_state->dpll_hw_state.fp1 = fp2;
- } else {
- crtc_state->dpll_hw_state.fp1 = fp;
- }
+ crtc_state->dpll_hw_state.fp1 = fp2;
}
-static void i9xx_compute_dpll(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct dpll *reduced_clock)
+static void i9xx_compute_dpll(struct intel_crtc_state *crtc_state,
+ const struct dpll *clock,
+ const struct dpll *reduced_clock)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 dpll;
- struct dpll *clock = &crtc_state->dpll;
- i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
+ i9xx_update_pll_dividers(crtc_state, clock, reduced_clock);
dpll = DPLL_VGA_MODE_DIS;
@@ -826,13 +832,17 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc,
dpll |= DPLL_SDVO_HIGH_SPEED;
/* compute bitmask from p1 value */
- if (IS_PINEVIEW(dev_priv))
+ if (IS_G4X(dev_priv)) {
+ dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+ dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
+ } else if (IS_PINEVIEW(dev_priv)) {
dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
- else {
+ WARN_ON(reduced_clock->p1 != clock->p1);
+ } else {
dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
- if (IS_G4X(dev_priv) && reduced_clock)
- dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
+ WARN_ON(reduced_clock->p1 != clock->p1);
}
+
switch (clock->p2) {
case 5:
dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
@@ -847,6 +857,8 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc,
dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
break;
}
+ WARN_ON(reduced_clock->p2 != clock->p2);
+
if (DISPLAY_VER(dev_priv) >= 4)
dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
@@ -868,16 +880,15 @@ static void i9xx_compute_dpll(struct intel_crtc *crtc,
}
}
-static void i8xx_compute_dpll(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct dpll *reduced_clock)
+static void i8xx_compute_dpll(struct intel_crtc_state *crtc_state,
+ const struct dpll *clock,
+ const struct dpll *reduced_clock)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 dpll;
- struct dpll *clock = &crtc_state->dpll;
- i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
+ i9xx_update_pll_dividers(crtc_state, clock, reduced_clock);
dpll = DPLL_VGA_MODE_DIS;
@@ -891,6 +902,8 @@ static void i8xx_compute_dpll(struct intel_crtc *crtc,
if (clock->p2 == 4)
dpll |= PLL_P2_DIVIDE_BY_4;
}
+ WARN_ON(reduced_clock->p1 != clock->p1);
+ WARN_ON(reduced_clock->p2 != clock->p2);
/*
* Bspec:
@@ -918,42 +931,44 @@ static void i8xx_compute_dpll(struct intel_crtc *crtc,
crtc_state->dpll_hw_state.dpll = dpll;
}
-static int hsw_crtc_compute_clock(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
+static int hsw_crtc_compute_clock(struct intel_crtc_state *crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_atomic_state *state =
to_intel_atomic_state(crtc_state->uapi.state);
struct intel_encoder *encoder =
intel_get_crtc_new_encoder(state, crtc_state);
- if (IS_DG2(dev_priv)) {
+ if (IS_DG2(dev_priv))
return intel_mpllb_calc_state(crtc_state, encoder);
- } else if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) ||
- DISPLAY_VER(dev_priv) >= 11) {
- if (!intel_reserve_shared_dplls(state, crtc, encoder)) {
- drm_dbg_kms(&dev_priv->drm,
- "failed to find PLL for pipe %c\n",
- pipe_name(crtc->pipe));
- return -EINVAL;
- }
+
+ if (DISPLAY_VER(dev_priv) < 11 &&
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
+ return 0;
+
+ if (!intel_reserve_shared_dplls(state, crtc, encoder)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "failed to find PLL for pipe %c\n",
+ pipe_name(crtc->pipe));
+ return -EINVAL;
}
return 0;
}
-static bool ilk_needs_fb_cb_tune(struct dpll *dpll, int factor)
+static bool ilk_needs_fb_cb_tune(const struct dpll *dpll, int factor)
{
- return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
+ return dpll->m < factor * dpll->n;
}
-
-static void ilk_compute_dpll(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state,
- struct dpll *reduced_clock)
+static void ilk_update_pll_dividers(struct intel_crtc_state *crtc_state,
+ const struct dpll *clock,
+ const struct dpll *reduced_clock)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 dpll, fp, fp2;
+ u32 fp, fp2;
int factor;
/* Enable autotuning of the PLL clock (if permissible) */
@@ -968,19 +983,27 @@ static void ilk_compute_dpll(struct intel_crtc *crtc,
factor = 20;
}
- fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
-
- if (ilk_needs_fb_cb_tune(&crtc_state->dpll, factor))
+ fp = i9xx_dpll_compute_fp(clock);
+ if (ilk_needs_fb_cb_tune(clock, factor))
fp |= FP_CB_TUNE;
- if (reduced_clock) {
- fp2 = i9xx_dpll_compute_fp(reduced_clock);
+ fp2 = i9xx_dpll_compute_fp(reduced_clock);
+ if (ilk_needs_fb_cb_tune(reduced_clock, factor))
+ fp2 |= FP_CB_TUNE;
- if (reduced_clock->m < factor * reduced_clock->n)
- fp2 |= FP_CB_TUNE;
- } else {
- fp2 = fp;
- }
+ crtc_state->dpll_hw_state.fp0 = fp;
+ crtc_state->dpll_hw_state.fp1 = fp2;
+}
+
+static void ilk_compute_dpll(struct intel_crtc_state *crtc_state,
+ const struct dpll *clock,
+ const struct dpll *reduced_clock)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ u32 dpll;
+
+ ilk_update_pll_dividers(crtc_state, clock, reduced_clock);
dpll = 0;
@@ -1018,11 +1041,11 @@ static void ilk_compute_dpll(struct intel_crtc *crtc,
dpll |= DPLL_SDVO_HIGH_SPEED;
/* compute bitmask from p1 value */
- dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
+ dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
/* also FPA1 */
- dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
+ dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
- switch (crtc_state->dpll.p2) {
+ switch (clock->p2) {
case 5:
dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
break;
@@ -1036,6 +1059,7 @@ static void ilk_compute_dpll(struct intel_crtc *crtc,
dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
break;
}
+ WARN_ON(reduced_clock->p2 != clock->p2);
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
intel_panel_use_ssc(dev_priv))
@@ -1046,13 +1070,11 @@ static void ilk_compute_dpll(struct intel_crtc *crtc,
dpll |= DPLL_VCO_ENABLE;
crtc_state->dpll_hw_state.dpll = dpll;
- crtc_state->dpll_hw_state.fp0 = fp;
- crtc_state->dpll_hw_state.fp1 = fp2;
}
-static int ilk_crtc_compute_clock(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
+static int ilk_crtc_compute_clock(struct intel_crtc_state *crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_atomic_state *state =
to_intel_atomic_state(crtc_state->uapi.state);
@@ -1097,7 +1119,8 @@ static int ilk_crtc_compute_clock(struct intel_crtc *crtc,
return -EINVAL;
}
- ilk_compute_dpll(crtc, crtc_state, NULL);
+ ilk_compute_dpll(crtc_state, &crtc_state->dpll,
+ &crtc_state->dpll);
if (!intel_reserve_shared_dplls(state, crtc, NULL)) {
drm_dbg_kms(&dev_priv->drm,
@@ -1109,41 +1132,42 @@ static int ilk_crtc_compute_clock(struct intel_crtc *crtc,
return 0;
}
-void vlv_compute_dpll(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
+void vlv_compute_dpll(struct intel_crtc_state *crtc_state)
{
- pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ crtc_state->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
if (crtc->pipe != PIPE_A)
- pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
+ crtc_state->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
/* DPLL not used with DSI, but still need the rest set up */
- if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
- pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
+ if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
+ crtc_state->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
DPLL_EXT_BUFFER_ENABLE_VLV;
- pipe_config->dpll_hw_state.dpll_md =
- (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
+ crtc_state->dpll_hw_state.dpll_md =
+ (crtc_state->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
}
-void chv_compute_dpll(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config)
+void chv_compute_dpll(struct intel_crtc_state *crtc_state)
{
- pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
+ crtc_state->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
if (crtc->pipe != PIPE_A)
- pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
+ crtc_state->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
/* DPLL not used with DSI, but still need the rest set up */
- if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
- pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
+ if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
+ crtc_state->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
- pipe_config->dpll_hw_state.dpll_md =
- (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
+ crtc_state->dpll_hw_state.dpll_md =
+ (crtc_state->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
}
-static int chv_crtc_compute_clock(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
+static int chv_crtc_compute_clock(struct intel_crtc_state *crtc_state)
{
int refclk = 100000;
const struct intel_limit *limit = &intel_limits_chv;
@@ -1159,13 +1183,12 @@ static int chv_crtc_compute_clock(struct intel_crtc *crtc,
return -EINVAL;
}
- chv_compute_dpll(crtc, crtc_state);
+ chv_compute_dpll(crtc_state);
return 0;
}
-static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
+static int vlv_crtc_compute_clock(struct intel_crtc_state *crtc_state)
{
int refclk = 100000;
const struct intel_limit *limit = &intel_limits_vlv;
@@ -1181,14 +1204,14 @@ static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
return -EINVAL;
}
- vlv_compute_dpll(crtc, crtc_state);
+ vlv_compute_dpll(crtc_state);
return 0;
}
-static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
+static int g4x_crtc_compute_clock(struct intel_crtc_state *crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct intel_limit *limit;
int refclk = 96000;
@@ -1226,16 +1249,16 @@ static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
return -EINVAL;
}
- i9xx_compute_dpll(crtc, crtc_state, NULL);
+ i9xx_compute_dpll(crtc_state, &crtc_state->dpll,
+ &crtc_state->dpll);
return 0;
}
-static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
+static int pnv_crtc_compute_clock(struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct intel_limit *limit;
int refclk = 96000;
@@ -1263,16 +1286,16 @@ static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
return -EINVAL;
}
- i9xx_compute_dpll(crtc, crtc_state, NULL);
+ i9xx_compute_dpll(crtc_state, &crtc_state->dpll,
+ &crtc_state->dpll);
return 0;
}
-static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
+static int i9xx_crtc_compute_clock(struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct intel_limit *limit;
int refclk = 96000;
@@ -1300,16 +1323,16 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
return -EINVAL;
}
- i9xx_compute_dpll(crtc, crtc_state, NULL);
+ i9xx_compute_dpll(crtc_state, &crtc_state->dpll,
+ &crtc_state->dpll);
return 0;
}
-static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state)
+static int i8xx_crtc_compute_clock(struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct intel_limit *limit;
int refclk = 48000;
@@ -1339,30 +1362,63 @@ static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
return -EINVAL;
}
- i8xx_compute_dpll(crtc, crtc_state, NULL);
+ i8xx_compute_dpll(crtc_state, &crtc_state->dpll,
+ &crtc_state->dpll);
return 0;
}
+static const struct intel_dpll_funcs hsw_dpll_funcs = {
+ .crtc_compute_clock = hsw_crtc_compute_clock,
+};
+
+static const struct intel_dpll_funcs ilk_dpll_funcs = {
+ .crtc_compute_clock = ilk_crtc_compute_clock,
+};
+
+static const struct intel_dpll_funcs chv_dpll_funcs = {
+ .crtc_compute_clock = chv_crtc_compute_clock,
+};
+
+static const struct intel_dpll_funcs vlv_dpll_funcs = {
+ .crtc_compute_clock = vlv_crtc_compute_clock,
+};
+
+static const struct intel_dpll_funcs g4x_dpll_funcs = {
+ .crtc_compute_clock = g4x_crtc_compute_clock,
+};
+
+static const struct intel_dpll_funcs pnv_dpll_funcs = {
+ .crtc_compute_clock = pnv_crtc_compute_clock,
+};
+
+static const struct intel_dpll_funcs i9xx_dpll_funcs = {
+ .crtc_compute_clock = i9xx_crtc_compute_clock,
+};
+
+static const struct intel_dpll_funcs i8xx_dpll_funcs = {
+ .crtc_compute_clock = i8xx_crtc_compute_clock,
+};
+
void
intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv)
{
if (DISPLAY_VER(dev_priv) >= 9 || HAS_DDI(dev_priv))
- dev_priv->display.crtc_compute_clock = hsw_crtc_compute_clock;
+ dev_priv->dpll_funcs = &hsw_dpll_funcs;
else if (HAS_PCH_SPLIT(dev_priv))
- dev_priv->display.crtc_compute_clock = ilk_crtc_compute_clock;
+ dev_priv->dpll_funcs = &ilk_dpll_funcs;
else if (IS_CHERRYVIEW(dev_priv))
- dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
+ dev_priv->dpll_funcs = &chv_dpll_funcs;
else if (IS_VALLEYVIEW(dev_priv))
- dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
+ dev_priv->dpll_funcs = &vlv_dpll_funcs;
else if (IS_G4X(dev_priv))
- dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
+ dev_priv->dpll_funcs = &g4x_dpll_funcs;
else if (IS_PINEVIEW(dev_priv))
- dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
+ dev_priv->dpll_funcs = &pnv_dpll_funcs;
else if (DISPLAY_VER(dev_priv) != 2)
- dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
+ dev_priv->dpll_funcs = &i9xx_dpll_funcs;
else
- dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
+ dev_priv->dpll_funcs = &i8xx_dpll_funcs;
}
static bool i9xx_has_pps(struct drm_i915_private *dev_priv)
@@ -1373,34 +1429,37 @@ static bool i9xx_has_pps(struct drm_i915_private *dev_priv)
return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
}
-void i9xx_enable_pll(struct intel_crtc *crtc,
- const struct intel_crtc_state *crtc_state)
+void i9xx_enable_pll(const struct intel_crtc_state *crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- i915_reg_t reg = DPLL(crtc->pipe);
u32 dpll = crtc_state->dpll_hw_state.dpll;
+ enum pipe pipe = crtc->pipe;
int i;
- assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
+ assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder);
/* PLL is protected by panel, make sure we can write it */
if (i9xx_has_pps(dev_priv))
- assert_panel_unlocked(dev_priv, crtc->pipe);
+ assert_pps_unlocked(dev_priv, pipe);
+
+ intel_de_write(dev_priv, FP0(pipe), crtc_state->dpll_hw_state.fp0);
+ intel_de_write(dev_priv, FP1(pipe), crtc_state->dpll_hw_state.fp1);
/*
* Apparently we need to have VGA mode enabled prior to changing
* the P1/P2 dividers. Otherwise the DPLL will keep using the old
* dividers, even though the register value does change.
*/
- intel_de_write(dev_priv, reg, dpll & ~DPLL_VGA_MODE_DIS);
- intel_de_write(dev_priv, reg, dpll);
+ intel_de_write(dev_priv, DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
+ intel_de_write(dev_priv, DPLL(pipe), dpll);
/* Wait for the clocks to stabilize. */
- intel_de_posting_read(dev_priv, reg);
+ intel_de_posting_read(dev_priv, DPLL(pipe));
udelay(150);
if (DISPLAY_VER(dev_priv) >= 4) {
- intel_de_write(dev_priv, DPLL_MD(crtc->pipe),
+ intel_de_write(dev_priv, DPLL_MD(pipe),
crtc_state->dpll_hw_state.dpll_md);
} else {
/* The pixel multiplier can only be updated once the
@@ -1408,13 +1467,13 @@ void i9xx_enable_pll(struct intel_crtc *crtc,
*
* So write it again.
*/
- intel_de_write(dev_priv, reg, dpll);
+ intel_de_write(dev_priv, DPLL(pipe), dpll);
}
/* We do this three times for luck */
for (i = 0; i < 3; i++) {
- intel_de_write(dev_priv, reg, dpll);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_write(dev_priv, DPLL(pipe), dpll);
+ intel_de_posting_read(dev_priv, DPLL(pipe));
udelay(150); /* wait for warmup */
}
}
@@ -1448,136 +1507,22 @@ static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv,
vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
}
-static void _vlv_enable_pll(struct intel_crtc *crtc,
- const struct intel_crtc_state *pipe_config)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
-
- intel_de_write(dev_priv, DPLL(pipe), pipe_config->dpll_hw_state.dpll);
- intel_de_posting_read(dev_priv, DPLL(pipe));
- udelay(150);
-
- if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
- drm_err(&dev_priv->drm, "DPLL %d failed to lock\n", pipe);
-}
-
-void vlv_enable_pll(struct intel_crtc *crtc,
- const struct intel_crtc_state *pipe_config)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
-
- assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
-
- /* PLL is protected by panel, make sure we can write it */
- assert_panel_unlocked(dev_priv, pipe);
-
- if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
- _vlv_enable_pll(crtc, pipe_config);
-
- intel_de_write(dev_priv, DPLL_MD(pipe),
- pipe_config->dpll_hw_state.dpll_md);
- intel_de_posting_read(dev_priv, DPLL_MD(pipe));
-}
-
-
-static void _chv_enable_pll(struct intel_crtc *crtc,
- const struct intel_crtc_state *pipe_config)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
- enum dpio_channel port = vlv_pipe_to_channel(pipe);
- u32 tmp;
-
- vlv_dpio_get(dev_priv);
-
- /* Enable back the 10bit clock to display controller */
- tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
- tmp |= DPIO_DCLKP_EN;
- vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
-
- vlv_dpio_put(dev_priv);
-
- /*
- * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
- */
- udelay(1);
-
- /* Enable PLL */
- intel_de_write(dev_priv, DPLL(pipe), pipe_config->dpll_hw_state.dpll);
-
- /* Check PLL is locked */
- if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
- drm_err(&dev_priv->drm, "PLL %d failed to lock\n", pipe);
-}
-
-void chv_enable_pll(struct intel_crtc *crtc,
- const struct intel_crtc_state *pipe_config)
+static void vlv_prepare_pll(const struct intel_crtc_state *crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
-
- assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
-
- /* PLL is protected by panel, make sure we can write it */
- assert_panel_unlocked(dev_priv, pipe);
-
- if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
- _chv_enable_pll(crtc, pipe_config);
-
- if (pipe != PIPE_A) {
- /*
- * WaPixelRepeatModeFixForC0:chv
- *
- * DPLLCMD is AWOL. Use chicken bits to propagate
- * the value from DPLLBMD to either pipe B or C.
- */
- intel_de_write(dev_priv, CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
- intel_de_write(dev_priv, DPLL_MD(PIPE_B),
- pipe_config->dpll_hw_state.dpll_md);
- intel_de_write(dev_priv, CBR4_VLV, 0);
- dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
-
- /*
- * DPLLB VGA mode also seems to cause problems.
- * We should always have it disabled.
- */
- drm_WARN_ON(&dev_priv->drm,
- (intel_de_read(dev_priv, DPLL(PIPE_B)) &
- DPLL_VGA_MODE_DIS) == 0);
- } else {
- intel_de_write(dev_priv, DPLL_MD(pipe),
- pipe_config->dpll_hw_state.dpll_md);
- intel_de_posting_read(dev_priv, DPLL_MD(pipe));
- }
-}
-
-void vlv_prepare_pll(struct intel_crtc *crtc,
- const struct intel_crtc_state *pipe_config)
-{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- enum pipe pipe = crtc->pipe;
u32 mdiv;
u32 bestn, bestm1, bestm2, bestp1, bestp2;
u32 coreclk, reg_val;
- /* Enable Refclk */
- intel_de_write(dev_priv, DPLL(pipe),
- pipe_config->dpll_hw_state.dpll & ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
-
- /* No need to actually set up the DPLL with DSI */
- if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
- return;
-
vlv_dpio_get(dev_priv);
- bestn = pipe_config->dpll.n;
- bestm1 = pipe_config->dpll.m1;
- bestm2 = pipe_config->dpll.m2;
- bestp1 = pipe_config->dpll.p1;
- bestp2 = pipe_config->dpll.p2;
+ bestn = crtc_state->dpll.n;
+ bestm1 = crtc_state->dpll.m1;
+ bestm2 = crtc_state->dpll.m2;
+ bestp1 = crtc_state->dpll.p1;
+ bestp2 = crtc_state->dpll.p2;
/* See eDP HDMI DPIO driver vbios notes doc */
@@ -1614,16 +1559,16 @@ void vlv_prepare_pll(struct intel_crtc *crtc,
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
/* Set HBR and RBR LPF coefficients */
- if (pipe_config->port_clock == 162000 ||
- intel_crtc_has_type(pipe_config, INTEL_OUTPUT_ANALOG) ||
- intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
+ if (crtc_state->port_clock == 162000 ||
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG) ||
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
0x009f0003);
else
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
0x00d0000f);
- if (intel_crtc_has_dp_encoder(pipe_config)) {
+ if (intel_crtc_has_dp_encoder(crtc_state)) {
/* Use SSC source */
if (pipe == PIPE_A)
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
@@ -1643,7 +1588,7 @@ void vlv_prepare_pll(struct intel_crtc *crtc,
coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
- if (intel_crtc_has_dp_encoder(pipe_config))
+ if (intel_crtc_has_dp_encoder(crtc_state))
coreclk |= 0x01000000;
vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
@@ -1652,11 +1597,50 @@ void vlv_prepare_pll(struct intel_crtc *crtc,
vlv_dpio_put(dev_priv);
}
-void chv_prepare_pll(struct intel_crtc *crtc,
- const struct intel_crtc_state *pipe_config)
+static void _vlv_enable_pll(const struct intel_crtc_state *crtc_state)
{
- struct drm_device *dev = crtc->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+
+ intel_de_write(dev_priv, DPLL(pipe), crtc_state->dpll_hw_state.dpll);
+ intel_de_posting_read(dev_priv, DPLL(pipe));
+ udelay(150);
+
+ if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
+ drm_err(&dev_priv->drm, "DPLL %d failed to lock\n", pipe);
+}
+
+void vlv_enable_pll(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+
+ assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder);
+
+ /* PLL is protected by panel, make sure we can write it */
+ assert_pps_unlocked(dev_priv, pipe);
+
+ /* Enable Refclk */
+ intel_de_write(dev_priv, DPLL(pipe),
+ crtc_state->dpll_hw_state.dpll &
+ ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
+
+ if (crtc_state->dpll_hw_state.dpll & DPLL_VCO_ENABLE) {
+ vlv_prepare_pll(crtc_state);
+ _vlv_enable_pll(crtc_state);
+ }
+
+ intel_de_write(dev_priv, DPLL_MD(pipe),
+ crtc_state->dpll_hw_state.dpll_md);
+ intel_de_posting_read(dev_priv, DPLL_MD(pipe));
+}
+
+static void chv_prepare_pll(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
enum dpio_channel port = vlv_pipe_to_channel(pipe);
u32 loopfilter, tribuf_calcntr;
@@ -1664,21 +1648,13 @@ void chv_prepare_pll(struct intel_crtc *crtc,
u32 dpio_val;
int vco;
- /* Enable Refclk and SSC */
- intel_de_write(dev_priv, DPLL(pipe),
- pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
-
- /* No need to actually set up the DPLL with DSI */
- if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
- return;
-
- bestn = pipe_config->dpll.n;
- bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
- bestm1 = pipe_config->dpll.m1;
- bestm2 = pipe_config->dpll.m2 >> 22;
- bestp1 = pipe_config->dpll.p1;
- bestp2 = pipe_config->dpll.p2;
- vco = pipe_config->dpll.vco;
+ bestn = crtc_state->dpll.n;
+ bestm2_frac = crtc_state->dpll.m2 & 0x3fffff;
+ bestm1 = crtc_state->dpll.m1;
+ bestm2 = crtc_state->dpll.m2 >> 22;
+ bestp1 = crtc_state->dpll.p1;
+ bestp2 = crtc_state->dpll.p2;
+ vco = crtc_state->dpll.vco;
dpio_val = 0;
loopfilter = 0;
@@ -1757,6 +1733,83 @@ void chv_prepare_pll(struct intel_crtc *crtc,
vlv_dpio_put(dev_priv);
}
+static void _chv_enable_pll(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ enum dpio_channel port = vlv_pipe_to_channel(pipe);
+ u32 tmp;
+
+ vlv_dpio_get(dev_priv);
+
+ /* Enable back the 10bit clock to display controller */
+ tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
+ tmp |= DPIO_DCLKP_EN;
+ vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
+
+ vlv_dpio_put(dev_priv);
+
+ /*
+ * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
+ */
+ udelay(1);
+
+ /* Enable PLL */
+ intel_de_write(dev_priv, DPLL(pipe), crtc_state->dpll_hw_state.dpll);
+
+ /* Check PLL is locked */
+ if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
+ drm_err(&dev_priv->drm, "PLL %d failed to lock\n", pipe);
+}
+
+void chv_enable_pll(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+
+ assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder);
+
+ /* PLL is protected by panel, make sure we can write it */
+ assert_pps_unlocked(dev_priv, pipe);
+
+ /* Enable Refclk and SSC */
+ intel_de_write(dev_priv, DPLL(pipe),
+ crtc_state->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
+
+ if (crtc_state->dpll_hw_state.dpll & DPLL_VCO_ENABLE) {
+ chv_prepare_pll(crtc_state);
+ _chv_enable_pll(crtc_state);
+ }
+
+ if (pipe != PIPE_A) {
+ /*
+ * WaPixelRepeatModeFixForC0:chv
+ *
+ * DPLLCMD is AWOL. Use chicken bits to propagate
+ * the value from DPLLBMD to either pipe B or C.
+ */
+ intel_de_write(dev_priv, CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
+ intel_de_write(dev_priv, DPLL_MD(PIPE_B),
+ crtc_state->dpll_hw_state.dpll_md);
+ intel_de_write(dev_priv, CBR4_VLV, 0);
+ dev_priv->chv_dpll_md[pipe] = crtc_state->dpll_hw_state.dpll_md;
+
+ /*
+ * DPLLB VGA mode also seems to cause problems.
+ * We should always have it disabled.
+ */
+ drm_WARN_ON(&dev_priv->drm,
+ (intel_de_read(dev_priv, DPLL(PIPE_B)) &
+ DPLL_VGA_MODE_DIS) == 0);
+ } else {
+ intel_de_write(dev_priv, DPLL_MD(pipe),
+ crtc_state->dpll_hw_state.dpll_md);
+ intel_de_posting_read(dev_priv, DPLL_MD(pipe));
+ }
+}
+
/**
* vlv_force_pll_on - forcibly enable just the PLL
* @dev_priv: i915 private structure
@@ -1771,27 +1824,26 @@ int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
const struct dpll *dpll)
{
struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
- struct intel_crtc_state *pipe_config;
+ struct intel_crtc_state *crtc_state;
- pipe_config = intel_crtc_state_alloc(crtc);
- if (!pipe_config)
+ crtc_state = intel_crtc_state_alloc(crtc);
+ if (!crtc_state)
return -ENOMEM;
- pipe_config->cpu_transcoder = (enum transcoder)pipe;
- pipe_config->pixel_multiplier = 1;
- pipe_config->dpll = *dpll;
+ crtc_state->cpu_transcoder = (enum transcoder)pipe;
+ crtc_state->pixel_multiplier = 1;
+ crtc_state->dpll = *dpll;
+ crtc_state->output_types = BIT(INTEL_OUTPUT_EDP);
if (IS_CHERRYVIEW(dev_priv)) {
- chv_compute_dpll(crtc, pipe_config);
- chv_prepare_pll(crtc, pipe_config);
- chv_enable_pll(crtc, pipe_config);
+ chv_compute_dpll(crtc_state);
+ chv_enable_pll(crtc_state);
} else {
- vlv_compute_dpll(crtc, pipe_config);
- vlv_prepare_pll(crtc, pipe_config);
- vlv_enable_pll(crtc, pipe_config);
+ vlv_compute_dpll(crtc_state);
+ vlv_enable_pll(crtc_state);
}
- kfree(pipe_config);
+ kfree(crtc_state);
return 0;
}
@@ -1801,7 +1853,7 @@ void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
u32 val;
/* Make sure the pipe isn't still relying on us */
- assert_pipe_disabled(dev_priv, (enum transcoder)pipe);
+ assert_transcoder_disabled(dev_priv, (enum transcoder)pipe);
val = DPLL_INTEGRATED_REF_CLK_VLV |
DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
@@ -1818,7 +1870,7 @@ void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
u32 val;
/* Make sure the pipe isn't still relying on us */
- assert_pipe_disabled(dev_priv, (enum transcoder)pipe);
+ assert_transcoder_disabled(dev_priv, (enum transcoder)pipe);
val = DPLL_SSC_REF_CLK_CHV |
DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
@@ -1849,7 +1901,7 @@ void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
return;
/* Make sure the pipe isn't still relying on us */
- assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
+ assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder);
intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS);
intel_de_posting_read(dev_priv, DPLL(pipe));
@@ -1871,3 +1923,25 @@ void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
else
vlv_disable_pll(dev_priv, pipe);
}
+
+/* Only for pre-ILK configs */
+static void assert_pll(struct drm_i915_private *dev_priv,
+ enum pipe pipe, bool state)
+{
+ bool cur_state;
+
+ cur_state = intel_de_read(dev_priv, DPLL(pipe)) & DPLL_VCO_ENABLE;
+ I915_STATE_WARN(cur_state != state,
+ "PLL state assertion failure (expected %s, current %s)\n",
+ onoff(state), onoff(cur_state));
+}
+
+void assert_pll_enabled(struct drm_i915_private *i915, enum pipe pipe)
+{
+ assert_pll(i915, pipe, true);
+}
+
+void assert_pll_disabled(struct drm_i915_private *i915, enum pipe pipe)
+{
+ assert_pll(i915, pipe, false);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.h b/drivers/gpu/drm/i915/display/intel_dpll.h
index 88247027fd5a..1af0ac43cca4 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll.h
+++ b/drivers/gpu/drm/i915/display/intel_dpll.h
@@ -18,29 +18,25 @@ void intel_dpll_init_clock_hook(struct drm_i915_private *dev_priv);
int vlv_calc_dpll_params(int refclk, struct dpll *clock);
int pnv_calc_dpll_params(int refclk, struct dpll *clock);
int i9xx_calc_dpll_params(int refclk, struct dpll *clock);
-void vlv_compute_dpll(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config);
-void chv_compute_dpll(struct intel_crtc *crtc,
- struct intel_crtc_state *pipe_config);
+u32 i9xx_dpll_compute_fp(const struct dpll *dpll);
+void vlv_compute_dpll(struct intel_crtc_state *crtc_state);
+void chv_compute_dpll(struct intel_crtc_state *crtc_state);
int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
const struct dpll *dpll);
void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe);
-void i9xx_enable_pll(struct intel_crtc *crtc,
- const struct intel_crtc_state *crtc_state);
-void vlv_enable_pll(struct intel_crtc *crtc,
- const struct intel_crtc_state *pipe_config);
-void chv_enable_pll(struct intel_crtc *crtc,
- const struct intel_crtc_state *pipe_config);
-void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe);
+
+void chv_enable_pll(const struct intel_crtc_state *crtc_state);
void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe);
+void vlv_enable_pll(const struct intel_crtc_state *crtc_state);
+void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe);
+void i9xx_enable_pll(const struct intel_crtc_state *crtc_state);
void i9xx_disable_pll(const struct intel_crtc_state *crtc_state);
-void vlv_prepare_pll(struct intel_crtc *crtc,
- const struct intel_crtc_state *pipe_config);
-void chv_prepare_pll(struct intel_crtc *crtc,
- const struct intel_crtc_state *pipe_config);
bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
struct dpll *best_clock);
int chv_calc_dpll_params(int refclk, struct dpll *pll_clock);
+void assert_pll_enabled(struct drm_i915_private *i915, enum pipe pipe);
+void assert_pll_disabled(struct drm_i915_private *i915, enum pipe pipe);
+
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index 5c91d125a337..0a7e04db04be 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -26,6 +26,7 @@
#include "intel_dpio_phy.h"
#include "intel_dpll.h"
#include "intel_dpll_mgr.h"
+#include "intel_tc.h"
/**
* DOC: Display PLLs
@@ -185,34 +186,6 @@ intel_tc_pll_enable_reg(struct drm_i915_private *i915,
}
/**
- * intel_prepare_shared_dpll - call a dpll's prepare hook
- * @crtc_state: CRTC, and its state, which has a shared dpll
- *
- * This calls the PLL's prepare hook if it has one and if the PLL is not
- * already enabled. The prepare hook is platform specific.
- */
-void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- struct intel_shared_dpll *pll = crtc_state->shared_dpll;
-
- if (drm_WARN_ON(&dev_priv->drm, pll == NULL))
- return;
-
- mutex_lock(&dev_priv->dpll.lock);
- drm_WARN_ON(&dev_priv->drm, !pll->state.pipe_mask);
- if (!pll->active_mask) {
- drm_dbg(&dev_priv->drm, "setting up %s\n", pll->info->name);
- drm_WARN_ON(&dev_priv->drm, pll->on);
- assert_shared_dpll_disabled(dev_priv, pll);
-
- pll->info->funcs->prepare(dev_priv, pll);
- }
- mutex_unlock(&dev_priv->dpll.lock);
-}
-
-/**
* intel_enable_shared_dpll - enable a CRTC's shared DPLL
* @crtc_state: CRTC, and its state, which has a shared DPLL
*
@@ -451,15 +424,6 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
return val & DPLL_VCO_ENABLE;
}
-static void ibx_pch_dpll_prepare(struct drm_i915_private *dev_priv,
- struct intel_shared_dpll *pll)
-{
- const enum intel_dpll_id id = pll->info->id;
-
- intel_de_write(dev_priv, PCH_FP0(id), pll->state.hw_state.fp0);
- intel_de_write(dev_priv, PCH_FP1(id), pll->state.hw_state.fp1);
-}
-
static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
{
u32 val;
@@ -481,6 +445,9 @@ static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
/* PCH refclock must be enabled first */
ibx_assert_pch_refclk_enabled(dev_priv);
+ intel_de_write(dev_priv, PCH_FP0(id), pll->state.hw_state.fp0);
+ intel_de_write(dev_priv, PCH_FP1(id), pll->state.hw_state.fp1);
+
intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
/* Wait for the clocks to stabilize. */
@@ -558,7 +525,6 @@ static void ibx_dump_hw_state(struct drm_i915_private *dev_priv,
}
static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
- .prepare = ibx_pch_dpll_prepare,
.enable = ibx_pch_dpll_enable,
.disable = ibx_pch_dpll_disable,
.get_hw_state = ibx_pch_dpll_get_hw_state,
@@ -3136,8 +3102,8 @@ static void icl_update_active_dpll(struct intel_atomic_state *state,
enc_to_dig_port(encoder);
if (primary_port &&
- (primary_port->tc_mode == TC_PORT_DP_ALT ||
- primary_port->tc_mode == TC_PORT_LEGACY))
+ (intel_tc_port_in_dp_alt_mode(primary_port) ||
+ intel_tc_port_in_legacy_mode(primary_port)))
port_dpll_id = ICL_PORT_DPLL_MG_PHY;
icl_set_active_port_dpll(crtc_state, port_dpll_id);
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
index 30e0aa5ca109..2f59d863be4c 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
@@ -256,16 +256,6 @@ struct intel_shared_dpll_state {
*/
struct intel_shared_dpll_funcs {
/**
- * @prepare:
- *
- * Optional hook to perform operations prior to enabling the PLL.
- * Called from intel_prepare_shared_dpll() function unless the PLL
- * is already enabled.
- */
- void (*prepare)(struct drm_i915_private *dev_priv,
- struct intel_shared_dpll *pll);
-
- /**
* @enable:
*
* Hook for enabling the pll, called from intel_enable_shared_dpll()
@@ -404,7 +394,6 @@ int intel_dpll_get_freq(struct drm_i915_private *i915,
bool intel_dpll_get_hw_state(struct drm_i915_private *i915,
struct intel_shared_dpll *pll,
struct intel_dpll_hw_state *hw_state);
-void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state);
void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state);
void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state);
void intel_shared_dpll_swap_state(struct intel_atomic_state *state);
diff --git a/drivers/gpu/drm/i915/display/intel_dpt.c b/drivers/gpu/drm/i915/display/intel_dpt.c
new file mode 100644
index 000000000000..8f7b1f7534a4
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dpt.c
@@ -0,0 +1,239 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "intel_display_types.h"
+#include "intel_dpt.h"
+#include "intel_fb.h"
+#include "gt/gen8_ppgtt.h"
+
+struct i915_dpt {
+ struct i915_address_space vm;
+
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ void __iomem *iomem;
+};
+
+#define i915_is_dpt(vm) ((vm)->is_dpt)
+
+static inline struct i915_dpt *
+i915_vm_to_dpt(struct i915_address_space *vm)
+{
+ BUILD_BUG_ON(offsetof(struct i915_dpt, vm));
+ GEM_BUG_ON(!i915_is_dpt(vm));
+ return container_of(vm, struct i915_dpt, vm);
+}
+
+#define dpt_total_entries(dpt) ((dpt)->vm.total >> PAGE_SHIFT)
+
+static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
+{
+ writeq(pte, addr);
+}
+
+static void dpt_insert_page(struct i915_address_space *vm,
+ dma_addr_t addr,
+ u64 offset,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ struct i915_dpt *dpt = i915_vm_to_dpt(vm);
+ gen8_pte_t __iomem *base = dpt->iomem;
+
+ gen8_set_pte(base + offset / I915_GTT_PAGE_SIZE,
+ vm->pte_encode(addr, level, flags));
+}
+
+static void dpt_insert_entries(struct i915_address_space *vm,
+ struct i915_vma *vma,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ struct i915_dpt *dpt = i915_vm_to_dpt(vm);
+ gen8_pte_t __iomem *base = dpt->iomem;
+ const gen8_pte_t pte_encode = vm->pte_encode(0, level, flags);
+ struct sgt_iter sgt_iter;
+ dma_addr_t addr;
+ int i;
+
+ /*
+ * Note that we ignore PTE_READ_ONLY here. The caller must be careful
+ * not to allow the user to override access to a read only page.
+ */
+
+ i = vma->node.start / I915_GTT_PAGE_SIZE;
+ for_each_sgt_daddr(addr, sgt_iter, vma->pages)
+ gen8_set_pte(&base[i++], pte_encode | addr);
+}
+
+static void dpt_clear_range(struct i915_address_space *vm,
+ u64 start, u64 length)
+{
+}
+
+static void dpt_bind_vma(struct i915_address_space *vm,
+ struct i915_vm_pt_stash *stash,
+ struct i915_vma *vma,
+ enum i915_cache_level cache_level,
+ u32 flags)
+{
+ struct drm_i915_gem_object *obj = vma->obj;
+ u32 pte_flags;
+
+ /* Applicable to VLV (gen8+ do not support RO in the GGTT) */
+ pte_flags = 0;
+ if (vma->vm->has_read_only && i915_gem_object_is_readonly(obj))
+ pte_flags |= PTE_READ_ONLY;
+ if (i915_gem_object_is_lmem(obj))
+ pte_flags |= PTE_LM;
+
+ vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
+
+ vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
+
+ /*
+ * Without aliasing PPGTT there's no difference between
+ * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
+ * upgrade to both bound if we bind either to avoid double-binding.
+ */
+ atomic_or(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND, &vma->flags);
+}
+
+static void dpt_unbind_vma(struct i915_address_space *vm, struct i915_vma *vma)
+{
+ vm->clear_range(vm, vma->node.start, vma->size);
+}
+
+static void dpt_cleanup(struct i915_address_space *vm)
+{
+ struct i915_dpt *dpt = i915_vm_to_dpt(vm);
+
+ i915_gem_object_put(dpt->obj);
+}
+
+struct i915_vma *intel_dpt_pin(struct i915_address_space *vm)
+{
+ struct drm_i915_private *i915 = vm->i915;
+ struct i915_dpt *dpt = i915_vm_to_dpt(vm);
+ intel_wakeref_t wakeref;
+ struct i915_vma *vma;
+ void __iomem *iomem;
+ struct i915_gem_ww_ctx ww;
+ int err;
+
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ atomic_inc(&i915->gpu_error.pending_fb_pin);
+
+ for_i915_gem_ww(&ww, err, true) {
+ err = i915_gem_object_lock(dpt->obj, &ww);
+ if (err)
+ continue;
+
+ vma = i915_gem_object_ggtt_pin_ww(dpt->obj, &ww, NULL, 0, 4096,
+ HAS_LMEM(i915) ? 0 : PIN_MAPPABLE);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ continue;
+ }
+
+ iomem = i915_vma_pin_iomap(vma);
+ i915_vma_unpin(vma);
+
+ if (IS_ERR(iomem)) {
+ err = PTR_ERR(iomem);
+ continue;
+ }
+
+ dpt->vma = vma;
+ dpt->iomem = iomem;
+
+ i915_vma_get(vma);
+ }
+
+ atomic_dec(&i915->gpu_error.pending_fb_pin);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+
+ return err ? ERR_PTR(err) : vma;
+}
+
+void intel_dpt_unpin(struct i915_address_space *vm)
+{
+ struct i915_dpt *dpt = i915_vm_to_dpt(vm);
+
+ i915_vma_unpin_iomap(dpt->vma);
+ i915_vma_put(dpt->vma);
+}
+
+struct i915_address_space *
+intel_dpt_create(struct intel_framebuffer *fb)
+{
+ struct drm_gem_object *obj = &intel_fb_obj(&fb->base)->base;
+ struct drm_i915_private *i915 = to_i915(obj->dev);
+ struct drm_i915_gem_object *dpt_obj;
+ struct i915_address_space *vm;
+ struct i915_dpt *dpt;
+ size_t size;
+ int ret;
+
+ if (intel_fb_needs_pot_stride_remap(fb))
+ size = intel_remapped_info_size(&fb->remapped_view.gtt.remapped);
+ else
+ size = DIV_ROUND_UP_ULL(obj->size, I915_GTT_PAGE_SIZE);
+
+ size = round_up(size * sizeof(gen8_pte_t), I915_GTT_PAGE_SIZE);
+
+ if (HAS_LMEM(i915))
+ dpt_obj = i915_gem_object_create_lmem(i915, size, 0);
+ else
+ dpt_obj = i915_gem_object_create_stolen(i915, size);
+ if (IS_ERR(dpt_obj))
+ return ERR_CAST(dpt_obj);
+
+ ret = i915_gem_object_set_cache_level(dpt_obj, I915_CACHE_NONE);
+ if (ret) {
+ i915_gem_object_put(dpt_obj);
+ return ERR_PTR(ret);
+ }
+
+ dpt = kzalloc(sizeof(*dpt), GFP_KERNEL);
+ if (!dpt) {
+ i915_gem_object_put(dpt_obj);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ vm = &dpt->vm;
+
+ vm->gt = &i915->gt;
+ vm->i915 = i915;
+ vm->dma = i915->drm.dev;
+ vm->total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
+ vm->is_dpt = true;
+
+ i915_address_space_init(vm, VM_CLASS_DPT);
+
+ vm->insert_page = dpt_insert_page;
+ vm->clear_range = dpt_clear_range;
+ vm->insert_entries = dpt_insert_entries;
+ vm->cleanup = dpt_cleanup;
+
+ vm->vma_ops.bind_vma = dpt_bind_vma;
+ vm->vma_ops.unbind_vma = dpt_unbind_vma;
+ vm->vma_ops.set_pages = ggtt_set_pages;
+ vm->vma_ops.clear_pages = clear_pages;
+
+ vm->pte_encode = gen8_ggtt_pte_encode;
+
+ dpt->obj = dpt_obj;
+
+ return &dpt->vm;
+}
+
+void intel_dpt_destroy(struct i915_address_space *vm)
+{
+ struct i915_dpt *dpt = i915_vm_to_dpt(vm);
+
+ i915_vm_close(&dpt->vm);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dpt.h b/drivers/gpu/drm/i915/display/intel_dpt.h
new file mode 100644
index 000000000000..45142b8f849f
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_dpt.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef __INTEL_DPT_H__
+#define __INTEL_DPT_H__
+
+struct i915_address_space;
+struct i915_vma;
+struct intel_framebuffer;
+
+void intel_dpt_destroy(struct i915_address_space *vm);
+struct i915_vma *intel_dpt_pin(struct i915_address_space *vm);
+void intel_dpt_unpin(struct i915_address_space *vm);
+struct i915_address_space *
+intel_dpt_create(struct intel_framebuffer *fb);
+
+#endif /* __INTEL_DPT_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_drrs.c b/drivers/gpu/drm/i915/display/intel_drrs.c
new file mode 100644
index 000000000000..c1439fcb5a95
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_drrs.c
@@ -0,0 +1,437 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "intel_atomic.h"
+#include "intel_de.h"
+#include "intel_display_types.h"
+#include "intel_drrs.h"
+#include "intel_panel.h"
+
+/**
+ * DOC: Display Refresh Rate Switching (DRRS)
+ *
+ * Display Refresh Rate Switching (DRRS) is a power conservation feature
+ * which enables swtching between low and high refresh rates,
+ * dynamically, based on the usage scenario. This feature is applicable
+ * for internal panels.
+ *
+ * Indication that the panel supports DRRS is given by the panel EDID, which
+ * would list multiple refresh rates for one resolution.
+ *
+ * DRRS is of 2 types - static and seamless.
+ * Static DRRS involves changing refresh rate (RR) by doing a full modeset
+ * (may appear as a blink on screen) and is used in dock-undock scenario.
+ * Seamless DRRS involves changing RR without any visual effect to the user
+ * and can be used during normal system usage. This is done by programming
+ * certain registers.
+ *
+ * Support for static/seamless DRRS may be indicated in the VBT based on
+ * inputs from the panel spec.
+ *
+ * DRRS saves power by switching to low RR based on usage scenarios.
+ *
+ * The implementation is based on frontbuffer tracking implementation. When
+ * there is a disturbance on the screen triggered by user activity or a periodic
+ * system activity, DRRS is disabled (RR is changed to high RR). When there is
+ * no movement on screen, after a timeout of 1 second, a switch to low RR is
+ * made.
+ *
+ * For integration with frontbuffer tracking code, intel_drrs_invalidate()
+ * and intel_drrs_flush() are called.
+ *
+ * DRRS can be further extended to support other internal panels and also
+ * the scenario of video playback wherein RR is set based on the rate
+ * requested by userspace.
+ */
+
+void
+intel_drrs_compute_config(struct intel_dp *intel_dp,
+ struct intel_crtc_state *pipe_config,
+ int output_bpp, bool constant_n)
+{
+ struct intel_connector *intel_connector = intel_dp->attached_connector;
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ int pixel_clock;
+
+ if (pipe_config->vrr.enable)
+ return;
+
+ /*
+ * DRRS and PSR can't be enable together, so giving preference to PSR
+ * as it allows more power-savings by complete shutting down display,
+ * so to guarantee this, intel_drrs_compute_config() must be called
+ * after intel_psr_compute_config().
+ */
+ if (pipe_config->has_psr)
+ return;
+
+ if (!intel_connector->panel.downclock_mode ||
+ dev_priv->drrs.type != SEAMLESS_DRRS_SUPPORT)
+ return;
+
+ pipe_config->has_drrs = true;
+
+ pixel_clock = intel_connector->panel.downclock_mode->clock;
+ if (pipe_config->splitter.enable)
+ pixel_clock /= pipe_config->splitter.link_count;
+
+ intel_link_compute_m_n(output_bpp, pipe_config->lane_count, pixel_clock,
+ pipe_config->port_clock, &pipe_config->dp_m2_n2,
+ constant_n, pipe_config->fec_enable);
+
+ /* FIXME: abstract this better */
+ if (pipe_config->splitter.enable)
+ pipe_config->dp_m2_n2.gmch_m *= pipe_config->splitter.link_count;
+}
+
+static void intel_drrs_set_state(struct drm_i915_private *dev_priv,
+ const struct intel_crtc_state *crtc_state,
+ enum drrs_refresh_rate_type refresh_type)
+{
+ struct intel_dp *intel_dp = dev_priv->drrs.dp;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_display_mode *mode;
+
+ if (!intel_dp) {
+ drm_dbg_kms(&dev_priv->drm, "DRRS not supported.\n");
+ return;
+ }
+
+ if (!crtc) {
+ drm_dbg_kms(&dev_priv->drm,
+ "DRRS: intel_crtc not initialized\n");
+ return;
+ }
+
+ if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
+ drm_dbg_kms(&dev_priv->drm, "Only Seamless DRRS supported.\n");
+ return;
+ }
+
+ if (refresh_type == dev_priv->drrs.refresh_rate_type)
+ return;
+
+ if (!crtc_state->hw.active) {
+ drm_dbg_kms(&dev_priv->drm,
+ "eDP encoder disabled. CRTC not Active\n");
+ return;
+ }
+
+ if (DISPLAY_VER(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) {
+ switch (refresh_type) {
+ case DRRS_HIGH_RR:
+ intel_dp_set_m_n(crtc_state, M1_N1);
+ break;
+ case DRRS_LOW_RR:
+ intel_dp_set_m_n(crtc_state, M2_N2);
+ break;
+ case DRRS_MAX_RR:
+ default:
+ drm_err(&dev_priv->drm,
+ "Unsupported refreshrate type\n");
+ }
+ } else if (DISPLAY_VER(dev_priv) > 6) {
+ i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder);
+ u32 val;
+
+ val = intel_de_read(dev_priv, reg);
+ if (refresh_type == DRRS_LOW_RR) {
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
+ else
+ val |= PIPECONF_EDP_RR_MODE_SWITCH;
+ } else {
+ if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
+ else
+ val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
+ }
+ intel_de_write(dev_priv, reg, val);
+ }
+
+ dev_priv->drrs.refresh_rate_type = refresh_type;
+
+ if (refresh_type == DRRS_LOW_RR)
+ mode = intel_dp->attached_connector->panel.downclock_mode;
+ else
+ mode = intel_dp->attached_connector->panel.fixed_mode;
+ drm_dbg_kms(&dev_priv->drm, "eDP Refresh Rate set to : %dHz\n",
+ drm_mode_vrefresh(mode));
+}
+
+static void
+intel_drrs_enable_locked(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ dev_priv->drrs.busy_frontbuffer_bits = 0;
+ dev_priv->drrs.dp = intel_dp;
+}
+
+/**
+ * intel_drrs_enable - init drrs struct if supported
+ * @intel_dp: DP struct
+ * @crtc_state: A pointer to the active crtc state.
+ *
+ * Initializes frontbuffer_bits and drrs.dp
+ */
+void intel_drrs_enable(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ if (!crtc_state->has_drrs)
+ return;
+
+ drm_dbg_kms(&dev_priv->drm, "Enabling DRRS\n");
+
+ mutex_lock(&dev_priv->drrs.mutex);
+
+ if (dev_priv->drrs.dp) {
+ drm_warn(&dev_priv->drm, "DRRS already enabled\n");
+ goto unlock;
+ }
+
+ intel_drrs_enable_locked(intel_dp);
+
+unlock:
+ mutex_unlock(&dev_priv->drrs.mutex);
+}
+
+static void
+intel_drrs_disable_locked(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ intel_drrs_set_state(dev_priv, crtc_state, DRRS_HIGH_RR);
+ dev_priv->drrs.dp = NULL;
+}
+
+/**
+ * intel_drrs_disable - Disable DRRS
+ * @intel_dp: DP struct
+ * @old_crtc_state: Pointer to old crtc_state.
+ *
+ */
+void intel_drrs_disable(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *old_crtc_state)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ if (!old_crtc_state->has_drrs)
+ return;
+
+ mutex_lock(&dev_priv->drrs.mutex);
+ if (!dev_priv->drrs.dp) {
+ mutex_unlock(&dev_priv->drrs.mutex);
+ return;
+ }
+
+ intel_drrs_disable_locked(intel_dp, old_crtc_state);
+ mutex_unlock(&dev_priv->drrs.mutex);
+
+ cancel_delayed_work_sync(&dev_priv->drrs.work);
+}
+
+/**
+ * intel_drrs_update - Update DRRS state
+ * @intel_dp: Intel DP
+ * @crtc_state: new CRTC state
+ *
+ * This function will update DRRS states, disabling or enabling DRRS when
+ * executing fastsets. For full modeset, intel_drrs_disable() and
+ * intel_drrs_enable() should be called instead.
+ */
+void
+intel_drrs_update(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+
+ if (dev_priv->drrs.type != SEAMLESS_DRRS_SUPPORT)
+ return;
+
+ mutex_lock(&dev_priv->drrs.mutex);
+
+ /* New state matches current one? */
+ if (crtc_state->has_drrs == !!dev_priv->drrs.dp)
+ goto unlock;
+
+ if (crtc_state->has_drrs)
+ intel_drrs_enable_locked(intel_dp);
+ else
+ intel_drrs_disable_locked(intel_dp, crtc_state);
+
+unlock:
+ mutex_unlock(&dev_priv->drrs.mutex);
+}
+
+static void intel_drrs_downclock_work(struct work_struct *work)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(work, typeof(*dev_priv), drrs.work.work);
+ struct intel_dp *intel_dp;
+ struct drm_crtc *crtc;
+
+ mutex_lock(&dev_priv->drrs.mutex);
+
+ intel_dp = dev_priv->drrs.dp;
+
+ if (!intel_dp)
+ goto unlock;
+
+ /*
+ * The delayed work can race with an invalidate hence we need to
+ * recheck.
+ */
+
+ if (dev_priv->drrs.busy_frontbuffer_bits)
+ goto unlock;
+
+ crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
+ intel_drrs_set_state(dev_priv, to_intel_crtc(crtc)->config, DRRS_LOW_RR);
+
+unlock:
+ mutex_unlock(&dev_priv->drrs.mutex);
+}
+
+static void intel_drrs_frontbuffer_update(struct drm_i915_private *dev_priv,
+ unsigned int frontbuffer_bits,
+ bool invalidate)
+{
+ struct intel_dp *intel_dp;
+ struct drm_crtc *crtc;
+ enum pipe pipe;
+
+ if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
+ return;
+
+ cancel_delayed_work(&dev_priv->drrs.work);
+
+ mutex_lock(&dev_priv->drrs.mutex);
+
+ intel_dp = dev_priv->drrs.dp;
+ if (!intel_dp) {
+ mutex_unlock(&dev_priv->drrs.mutex);
+ return;
+ }
+
+ crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
+ pipe = to_intel_crtc(crtc)->pipe;
+
+ frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
+ if (invalidate)
+ dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
+ else
+ dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
+
+ /* flush/invalidate means busy screen hence upclock */
+ if (frontbuffer_bits)
+ intel_drrs_set_state(dev_priv, to_intel_crtc(crtc)->config,
+ DRRS_HIGH_RR);
+
+ /*
+ * flush also means no more activity hence schedule downclock, if all
+ * other fbs are quiescent too
+ */
+ if (!invalidate && !dev_priv->drrs.busy_frontbuffer_bits)
+ schedule_delayed_work(&dev_priv->drrs.work,
+ msecs_to_jiffies(1000));
+ mutex_unlock(&dev_priv->drrs.mutex);
+}
+
+/**
+ * intel_drrs_invalidate - Disable Idleness DRRS
+ * @dev_priv: i915 device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * This function gets called everytime rendering on the given planes start.
+ * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
+ *
+ * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
+ */
+void intel_drrs_invalidate(struct drm_i915_private *dev_priv,
+ unsigned int frontbuffer_bits)
+{
+ intel_drrs_frontbuffer_update(dev_priv, frontbuffer_bits, true);
+}
+
+/**
+ * intel_drrs_flush - Restart Idleness DRRS
+ * @dev_priv: i915 device
+ * @frontbuffer_bits: frontbuffer plane tracking bits
+ *
+ * This function gets called every time rendering on the given planes has
+ * completed or flip on a crtc is completed. So DRRS should be upclocked
+ * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
+ * if no other planes are dirty.
+ *
+ * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
+ */
+void intel_drrs_flush(struct drm_i915_private *dev_priv,
+ unsigned int frontbuffer_bits)
+{
+ intel_drrs_frontbuffer_update(dev_priv, frontbuffer_bits, false);
+}
+
+void intel_drrs_page_flip(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ unsigned int frontbuffer_bits = INTEL_FRONTBUFFER_ALL_MASK(crtc->pipe);
+
+ intel_drrs_frontbuffer_update(dev_priv, frontbuffer_bits, false);
+}
+
+/**
+ * intel_drrs_init - Init basic DRRS work and mutex.
+ * @connector: eDP connector
+ * @fixed_mode: preferred mode of panel
+ *
+ * This function is called only once at driver load to initialize basic
+ * DRRS stuff.
+ *
+ * Returns:
+ * Downclock mode if panel supports it, else return NULL.
+ * DRRS support is determined by the presence of downclock mode (apart
+ * from VBT setting).
+ */
+struct drm_display_mode *
+intel_drrs_init(struct intel_connector *connector,
+ struct drm_display_mode *fixed_mode)
+{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct drm_display_mode *downclock_mode = NULL;
+
+ INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_drrs_downclock_work);
+ mutex_init(&dev_priv->drrs.mutex);
+
+ if (DISPLAY_VER(dev_priv) <= 6) {
+ drm_dbg_kms(&dev_priv->drm,
+ "DRRS supported for Gen7 and above\n");
+ return NULL;
+ }
+
+ if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
+ drm_dbg_kms(&dev_priv->drm, "VBT doesn't support DRRS\n");
+ return NULL;
+ }
+
+ downclock_mode = intel_panel_edid_downclock_mode(connector, fixed_mode);
+ if (!downclock_mode) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Downclock mode is not found. DRRS not supported\n");
+ return NULL;
+ }
+
+ dev_priv->drrs.type = dev_priv->vbt.drrs_type;
+
+ dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
+ drm_dbg_kms(&dev_priv->drm,
+ "seamless DRRS supported for eDP panel.\n");
+ return downclock_mode;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_drrs.h b/drivers/gpu/drm/i915/display/intel_drrs.h
new file mode 100644
index 000000000000..9ec9c447211a
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_drrs.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef __INTEL_DRRS_H__
+#define __INTEL_DRRS_H__
+
+#include <linux/types.h>
+
+struct drm_i915_private;
+struct intel_atomic_state;
+struct intel_crtc;
+struct intel_crtc_state;
+struct intel_connector;
+struct intel_dp;
+
+void intel_drrs_enable(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state);
+void intel_drrs_disable(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state);
+void intel_drrs_update(struct intel_dp *intel_dp,
+ const struct intel_crtc_state *crtc_state);
+void intel_drrs_invalidate(struct drm_i915_private *dev_priv,
+ unsigned int frontbuffer_bits);
+void intel_drrs_flush(struct drm_i915_private *dev_priv,
+ unsigned int frontbuffer_bits);
+void intel_drrs_page_flip(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+void intel_drrs_compute_config(struct intel_dp *intel_dp,
+ struct intel_crtc_state *pipe_config,
+ int output_bpp, bool constant_n);
+struct drm_display_mode *intel_drrs_init(struct intel_connector *connector,
+ struct drm_display_mode *fixed_mode);
+
+#endif /* __INTEL_DRRS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dsi.c b/drivers/gpu/drm/i915/display/intel_dsi.c
index f453ceb8d149..6b0301ba046e 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi.c
@@ -5,6 +5,7 @@
#include <drm/drm_mipi_dsi.h>
#include "intel_dsi.h"
+#include "intel_panel.h"
int intel_dsi_bitrate(const struct intel_dsi *intel_dsi)
{
@@ -60,20 +61,19 @@ enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector,
struct intel_connector *intel_connector = to_intel_connector(connector);
const struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
+ enum drm_mode_status status;
drm_dbg_kms(&dev_priv->drm, "\n");
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
- if (fixed_mode) {
- if (mode->hdisplay > fixed_mode->hdisplay)
- return MODE_PANEL;
- if (mode->vdisplay > fixed_mode->vdisplay)
- return MODE_PANEL;
- if (fixed_mode->clock > max_dotclk)
- return MODE_CLOCK_HIGH;
- }
+ status = intel_panel_mode_valid(intel_connector, mode);
+ if (status != MODE_OK)
+ return status;
+
+ if (fixed_mode->clock > max_dotclk)
+ return MODE_CLOCK_HIGH;
return intel_mode_valid_max_plane_size(dev_priv, mode, false);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dsi.h b/drivers/gpu/drm/i915/display/intel_dsi.h
index 50d6da0b2419..fbc40ffdc02e 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi.h
+++ b/drivers/gpu/drm/i915/display/intel_dsi.h
@@ -207,6 +207,9 @@ u32 bxt_dsi_get_pclk(struct intel_encoder *encoder,
struct intel_crtc_state *config);
void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port);
+void assert_dsi_pll_enabled(struct drm_i915_private *i915);
+void assert_dsi_pll_disabled(struct drm_i915_private *i915);
+
/* intel_dsi_vbt.c */
bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id);
void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on);
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
index 584c14c4cbd0..f61ed82e8867 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
@@ -47,33 +47,42 @@ static u32 dcs_get_backlight(struct intel_connector *connector, enum pipe unused
{
struct intel_encoder *encoder = intel_attached_encoder(connector);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+ struct intel_panel *panel = &connector->panel;
struct mipi_dsi_device *dsi_device;
- u8 data = 0;
+ u8 data[2] = {};
enum port port;
+ size_t len = panel->backlight.max > U8_MAX ? 2 : 1;
- /* FIXME: Need to take care of 16 bit brightness level */
for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) {
dsi_device = intel_dsi->dsi_hosts[port]->device;
mipi_dsi_dcs_read(dsi_device, MIPI_DCS_GET_DISPLAY_BRIGHTNESS,
- &data, sizeof(data));
+ &data, len);
break;
}
- return data;
+ return (data[1] << 8) | data[0];
}
static void dcs_set_backlight(const struct drm_connector_state *conn_state, u32 level)
{
struct intel_dsi *intel_dsi = enc_to_intel_dsi(to_intel_encoder(conn_state->best_encoder));
+ struct intel_panel *panel = &to_intel_connector(conn_state->connector)->panel;
struct mipi_dsi_device *dsi_device;
- u8 data = level;
+ u8 data[2] = {};
enum port port;
+ size_t len = panel->backlight.max > U8_MAX ? 2 : 1;
+
+ if (len == 1) {
+ data[0] = level;
+ } else {
+ data[0] = level >> 8;
+ data[1] = level;
+ }
- /* FIXME: Need to take care of 16 bit brightness level */
for_each_dsi_port(port, intel_dsi->dcs_backlight_ports) {
dsi_device = intel_dsi->dsi_hosts[port]->device;
mipi_dsi_dcs_write(dsi_device, MIPI_DCS_SET_DISPLAY_BRIGHTNESS,
- &data, sizeof(data));
+ &data, len);
}
}
@@ -147,10 +156,16 @@ static void dcs_enable_backlight(const struct intel_crtc_state *crtc_state,
static int dcs_setup_backlight(struct intel_connector *connector,
enum pipe unused)
{
+ struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_panel *panel = &connector->panel;
- panel->backlight.max = PANEL_PWM_MAX_VALUE;
- panel->backlight.level = PANEL_PWM_MAX_VALUE;
+ if (dev_priv->vbt.backlight.brightness_precision_bits > 8)
+ panel->backlight.max = (1 << dev_priv->vbt.backlight.brightness_precision_bits) - 1;
+ else
+ panel->backlight.max = PANEL_PWM_MAX_VALUE;
+
+ panel->backlight.level = panel->backlight.max;
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
index c2a2cd1f84dc..f241bedb8597 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
@@ -31,7 +31,6 @@
#include <linux/pinctrl/machine.h>
#include <linux/slab.h>
-#include <asm/intel-mid.h>
#include <asm/unaligned.h>
#include <drm/drm_crtc.h>
@@ -42,7 +41,7 @@
#include "i915_drv.h"
#include "intel_display_types.h"
#include "intel_dsi.h"
-#include "intel_sideband.h"
+#include "vlv_sideband.h"
#define MIPI_TRANSFER_MODE_SHIFT 0
#define MIPI_VIRTUAL_CHANNEL_SHIFT 1
diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c
index 77419f8c05e9..2eeb209afc64 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo.c
+++ b/drivers/gpu/drm/i915/display/intel_dvo.c
@@ -223,9 +223,10 @@ static enum drm_mode_status
intel_dvo_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct intel_dvo *intel_dvo = intel_attached_dvo(to_intel_connector(connector));
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ struct intel_dvo *intel_dvo = intel_attached_dvo(intel_connector);
const struct drm_display_mode *fixed_mode =
- to_intel_connector(connector)->panel.fixed_mode;
+ intel_connector->panel.fixed_mode;
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
int target_clock = mode->clock;
@@ -235,10 +236,11 @@ intel_dvo_mode_valid(struct drm_connector *connector,
/* XXX: Validate clock range */
if (fixed_mode) {
- if (mode->hdisplay > fixed_mode->hdisplay)
- return MODE_PANEL;
- if (mode->vdisplay > fixed_mode->vdisplay)
- return MODE_PANEL;
+ enum drm_mode_status status;
+
+ status = intel_panel_mode_valid(intel_connector, mode);
+ if (status != MODE_OK)
+ return status;
target_clock = fixed_mode->clock;
}
@@ -254,6 +256,7 @@ static int intel_dvo_compute_config(struct intel_encoder *encoder,
struct drm_connector_state *conn_state)
{
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
+ struct intel_connector *connector = to_intel_connector(conn_state->connector);
const struct drm_display_mode *fixed_mode =
intel_dvo->attached_connector->panel.fixed_mode;
struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
@@ -264,8 +267,13 @@ static int intel_dvo_compute_config(struct intel_encoder *encoder,
* with the panel scaling set up to source from the H/VDisplay
* of the original mode.
*/
- if (fixed_mode)
- intel_fixed_panel_mode(fixed_mode, adjusted_mode);
+ if (fixed_mode) {
+ int ret;
+
+ ret = intel_panel_compute_config(connector, adjusted_mode);
+ if (ret)
+ return ret;
+ }
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return -EINVAL;
diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c
index c60a81a81c09..fa1f375e696b 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fb.c
@@ -4,9 +4,11 @@
*/
#include <drm/drm_framebuffer.h>
+#include <drm/drm_modeset_helper.h>
#include "intel_display.h"
#include "intel_display_types.h"
+#include "intel_dpt.h"
#include "intel_fb.h"
#define check_array_bounds(i915, a, i) drm_WARN_ON(&(i915)->drm, (i) >= ARRAY_SIZE(a))
@@ -61,6 +63,38 @@ int skl_ccs_to_main_plane(const struct drm_framebuffer *fb, int ccs_plane)
return ccs_plane - fb->format->num_planes / 2;
}
+static unsigned int gen12_aligned_scanout_stride(const struct intel_framebuffer *fb,
+ int color_plane)
+{
+ struct drm_i915_private *i915 = to_i915(fb->base.dev);
+ unsigned int stride = fb->base.pitches[color_plane];
+
+ if (IS_ALDERLAKE_P(i915))
+ return roundup_pow_of_two(max(stride,
+ 8u * intel_tile_width_bytes(&fb->base, color_plane)));
+
+ return stride;
+}
+
+static unsigned int gen12_ccs_aux_stride(struct intel_framebuffer *fb, int ccs_plane)
+{
+ struct drm_i915_private *i915 = to_i915(fb->base.dev);
+ int main_plane = skl_ccs_to_main_plane(&fb->base, ccs_plane);
+ unsigned int main_stride = fb->base.pitches[main_plane];
+ unsigned int main_tile_width = intel_tile_width_bytes(&fb->base, main_plane);
+
+ /*
+ * On ADL-P the AUX stride must align with a power-of-two aligned main
+ * surface stride. The stride of the allocated main surface object can
+ * be less than this POT stride, which is then autopadded to the POT
+ * size.
+ */
+ if (IS_ALDERLAKE_P(i915))
+ main_stride = gen12_aligned_scanout_stride(fb, main_plane);
+
+ return DIV_ROUND_UP(main_stride, 4 * main_tile_width) * 64;
+}
+
int skl_main_to_aux_plane(const struct drm_framebuffer *fb, int main_plane)
{
struct drm_i915_private *i915 = to_i915(fb->dev);
@@ -79,16 +113,70 @@ unsigned int intel_tile_size(const struct drm_i915_private *i915)
return DISPLAY_VER(i915) == 2 ? 2048 : 4096;
}
-unsigned int intel_tile_height(const struct drm_framebuffer *fb, int color_plane)
+unsigned int
+intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
{
- if (is_gen12_ccs_plane(fb, color_plane))
- return 1;
+ struct drm_i915_private *dev_priv = to_i915(fb->dev);
+ unsigned int cpp = fb->format->cpp[color_plane];
+
+ switch (fb->modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ return intel_tile_size(dev_priv);
+ case I915_FORMAT_MOD_X_TILED:
+ if (DISPLAY_VER(dev_priv) == 2)
+ return 128;
+ else
+ return 512;
+ case I915_FORMAT_MOD_Y_TILED_CCS:
+ if (is_ccs_plane(fb, color_plane))
+ return 128;
+ fallthrough;
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
+ if (is_ccs_plane(fb, color_plane))
+ return 64;
+ fallthrough;
+ case I915_FORMAT_MOD_Y_TILED:
+ if (DISPLAY_VER(dev_priv) == 2 || HAS_128_BYTE_Y_TILING(dev_priv))
+ return 128;
+ else
+ return 512;
+ case I915_FORMAT_MOD_Yf_TILED_CCS:
+ if (is_ccs_plane(fb, color_plane))
+ return 128;
+ fallthrough;
+ case I915_FORMAT_MOD_Yf_TILED:
+ switch (cpp) {
+ case 1:
+ return 64;
+ case 2:
+ case 4:
+ return 128;
+ case 8:
+ case 16:
+ return 256;
+ default:
+ MISSING_CASE(cpp);
+ return cpp;
+ }
+ break;
+ default:
+ MISSING_CASE(fb->modifier);
+ return cpp;
+ }
+}
+unsigned int intel_tile_height(const struct drm_framebuffer *fb, int color_plane)
+{
return intel_tile_size(to_i915(fb->dev)) /
intel_tile_width_bytes(fb, color_plane);
}
-/* Return the tile dimensions in pixel units */
+/*
+ * Return the tile dimensions in pixel units, based on the (2 or 4 kbyte) GTT
+ * page tile size.
+ */
static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane,
unsigned int *tile_width,
unsigned int *tile_height)
@@ -100,6 +188,21 @@ static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane,
*tile_height = intel_tile_height(fb, color_plane);
}
+/*
+ * Return the tile dimensions in pixel units, based on the tile block size.
+ * The block covers the full GTT page sized tile on all tiled surfaces and
+ * it's a 64 byte portion of the tile on TGL+ CCS surfaces.
+ */
+static void intel_tile_block_dims(const struct drm_framebuffer *fb, int color_plane,
+ unsigned int *tile_width,
+ unsigned int *tile_height)
+{
+ intel_tile_dims(fb, color_plane, tile_width, tile_height);
+
+ if (is_gen12_ccs_plane(fb, color_plane))
+ *tile_height = 1;
+}
+
unsigned int intel_tile_row_size(const struct drm_framebuffer *fb, int color_plane)
{
unsigned int tile_width, tile_height;
@@ -109,6 +212,31 @@ unsigned int intel_tile_row_size(const struct drm_framebuffer *fb, int color_pla
return fb->pitches[color_plane] * tile_height;
}
+unsigned int
+intel_fb_align_height(const struct drm_framebuffer *fb,
+ int color_plane, unsigned int height)
+{
+ unsigned int tile_height = intel_tile_height(fb, color_plane);
+
+ return ALIGN(height, tile_height);
+}
+
+static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
+{
+ switch (fb_modifier) {
+ case I915_FORMAT_MOD_X_TILED:
+ return I915_TILING_X;
+ case I915_FORMAT_MOD_Y_TILED:
+ case I915_FORMAT_MOD_Y_TILED_CCS:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
+ return I915_TILING_Y;
+ default:
+ return I915_TILING_NONE;
+ }
+}
+
unsigned int intel_cursor_alignment(const struct drm_i915_private *i915)
{
if (IS_I830(i915))
@@ -121,6 +249,70 @@ unsigned int intel_cursor_alignment(const struct drm_i915_private *i915)
return 4 * 1024;
}
+static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
+{
+ if (DISPLAY_VER(dev_priv) >= 9)
+ return 256 * 1024;
+ else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
+ IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ return 128 * 1024;
+ else if (DISPLAY_VER(dev_priv) >= 4)
+ return 4 * 1024;
+ else
+ return 0;
+}
+
+unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
+ int color_plane)
+{
+ struct drm_i915_private *dev_priv = to_i915(fb->dev);
+
+ if (intel_fb_uses_dpt(fb))
+ return 512 * 4096;
+
+ /* AUX_DIST needs only 4K alignment */
+ if (is_ccs_plane(fb, color_plane))
+ return 4096;
+
+ if (is_semiplanar_uv_plane(fb, color_plane)) {
+ /*
+ * TODO: cross-check wrt. the bspec stride in bytes * 64 bytes
+ * alignment for linear UV planes on all platforms.
+ */
+ if (DISPLAY_VER(dev_priv) >= 12) {
+ if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
+ return intel_linear_alignment(dev_priv);
+
+ return intel_tile_row_size(fb, color_plane);
+ }
+
+ return 4096;
+ }
+
+ drm_WARN_ON(&dev_priv->drm, color_plane != 0);
+
+ switch (fb->modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ return intel_linear_alignment(dev_priv);
+ case I915_FORMAT_MOD_X_TILED:
+ if (HAS_ASYNC_FLIPS(dev_priv))
+ return 256 * 1024;
+ return 0;
+ case I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS:
+ case I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS_CC:
+ return 16 * 1024;
+ case I915_FORMAT_MOD_Y_TILED_CCS:
+ case I915_FORMAT_MOD_Yf_TILED_CCS:
+ case I915_FORMAT_MOD_Y_TILED:
+ case I915_FORMAT_MOD_Yf_TILED:
+ return 1 * 1024 * 1024;
+ default:
+ MISSING_CASE(fb->modifier);
+ return 0;
+ }
+}
+
void intel_fb_plane_get_subsampling(int *hsub, int *vsub,
const struct drm_framebuffer *fb,
int color_plane)
@@ -165,15 +357,29 @@ void intel_fb_plane_get_subsampling(int *hsub, int *vsub,
static void intel_fb_plane_dims(const struct intel_framebuffer *fb, int color_plane, int *w, int *h)
{
+ struct drm_i915_private *i915 = to_i915(fb->base.dev);
int main_plane = is_ccs_plane(&fb->base, color_plane) ?
skl_ccs_to_main_plane(&fb->base, color_plane) : 0;
+ unsigned int main_width = fb->base.width;
+ unsigned int main_height = fb->base.height;
int main_hsub, main_vsub;
int hsub, vsub;
+ /*
+ * On ADL-P the CCS AUX surface layout always aligns with the
+ * power-of-two aligned main surface stride. The main surface
+ * stride in the allocated FB object may not be power-of-two
+ * sized, in which case it is auto-padded to the POT size.
+ */
+ if (IS_ALDERLAKE_P(i915) && is_ccs_plane(&fb->base, color_plane))
+ main_width = gen12_aligned_scanout_stride(fb, 0) /
+ fb->base.format->cpp[0];
+
intel_fb_plane_get_subsampling(&main_hsub, &main_vsub, &fb->base, main_plane);
intel_fb_plane_get_subsampling(&hsub, &vsub, &fb->base, color_plane);
- *w = fb->base.width / main_hsub / hsub;
- *h = fb->base.height / main_vsub / vsub;
+
+ *w = main_width / main_hsub / hsub;
+ *h = main_height / main_vsub / vsub;
}
static u32 intel_adjust_tile_offset(int *x, int *y,
@@ -355,17 +561,8 @@ static int intel_fb_offset_to_xy(int *x, int *y,
unsigned int height;
u32 alignment;
- /*
- * All DPT color planes must be 512*4k aligned (the amount mapped by a
- * single DPT page). For ADL_P CCS FBs this only works by requiring
- * the allocated offsets to be 2MB aligned. Once supoort to remap
- * such FBs is added we can remove this requirement, as then all the
- * planes can be remapped to an aligned offset.
- */
- if (IS_ALDERLAKE_P(i915) && is_ccs_modifier(fb->modifier))
- alignment = 512 * 4096;
- else if (DISPLAY_VER(i915) >= 12 &&
- is_semiplanar_uv_plane(fb, color_plane))
+ if (DISPLAY_VER(i915) >= 12 &&
+ is_semiplanar_uv_plane(fb, color_plane))
alignment = intel_tile_row_size(fb, color_plane);
else if (fb->modifier != DRM_FORMAT_MOD_LINEAR)
alignment = intel_tile_size(i915);
@@ -416,7 +613,12 @@ static int intel_fb_check_ccs_xy(const struct drm_framebuffer *fb, int ccs_plane
if (!is_ccs_plane(fb, ccs_plane) || is_gen12_ccs_cc_plane(fb, ccs_plane))
return 0;
- intel_tile_dims(fb, ccs_plane, &tile_width, &tile_height);
+ /*
+ * While all the tile dimensions are based on a 2k or 4k GTT page size
+ * here the main and CCS coordinates must match only within a (64 byte
+ * on TGL+) block inside the tile.
+ */
+ intel_tile_block_dims(fb, ccs_plane, &tile_width, &tile_height);
intel_fb_plane_get_subsampling(&hsub, &vsub, fb, ccs_plane);
tile_width *= hsub;
@@ -491,8 +693,7 @@ bool intel_fb_needs_pot_stride_remap(const struct intel_framebuffer *fb)
{
struct drm_i915_private *i915 = to_i915(fb->base.dev);
- return IS_ALDERLAKE_P(i915) && fb->base.modifier != DRM_FORMAT_MOD_LINEAR &&
- !is_ccs_modifier(fb->base.modifier);
+ return IS_ALDERLAKE_P(i915) && fb->base.modifier != DRM_FORMAT_MOD_LINEAR;
}
static int intel_fb_pitch(const struct intel_framebuffer *fb, int color_plane, unsigned int rotation)
@@ -612,14 +813,16 @@ static unsigned int
plane_view_dst_stride_tiles(const struct intel_framebuffer *fb, int color_plane,
unsigned int pitch_tiles)
{
- if (intel_fb_needs_pot_stride_remap(fb))
+ if (intel_fb_needs_pot_stride_remap(fb)) {
+ unsigned int min_stride = is_ccs_plane(&fb->base, color_plane) ? 2 : 8;
/*
* ADL_P, the only platform needing a POT stride has a minimum
- * of 8 stride tiles.
+ * of 8 main surface and 2 CCS AUX stride tiles.
*/
- return roundup_pow_of_two(max(pitch_tiles, 8u));
- else
+ return roundup_pow_of_two(max(pitch_tiles, min_stride));
+ } else {
return pitch_tiles;
+ }
}
static unsigned int
@@ -655,7 +858,7 @@ static u32 calc_plane_remap_info(const struct intel_framebuffer *fb, int color_p
unsigned int tile_height = dims->tile_height;
unsigned int tile_size = intel_tile_size(i915);
struct drm_rect r;
- u32 size;
+ u32 size = 0;
assign_chk_ovf(i915, remap_info->offset, obj_offset);
assign_chk_ovf(i915, remap_info->src_stride, plane_view_src_stride_tiles(fb, color_plane, dims));
@@ -680,7 +883,7 @@ static u32 calc_plane_remap_info(const struct intel_framebuffer *fb, int color_p
color_plane_info->stride = remap_info->dst_stride * tile_height;
- size = remap_info->dst_stride * remap_info->width;
+ size += remap_info->dst_stride * remap_info->width;
/* rotate the tile dimensions to match the GTT view */
swap(tile_width, tile_height);
@@ -689,6 +892,14 @@ static u32 calc_plane_remap_info(const struct intel_framebuffer *fb, int color_p
check_array_bounds(i915, view->gtt.remapped.plane, color_plane);
+ if (view->gtt.remapped.plane_alignment) {
+ unsigned int aligned_offset = ALIGN(gtt_offset,
+ view->gtt.remapped.plane_alignment);
+
+ size += aligned_offset - gtt_offset;
+ gtt_offset = aligned_offset;
+ }
+
assign_chk_ovf(i915, remap_info->dst_stride,
plane_view_dst_stride_tiles(fb, color_plane, remap_info->width));
@@ -698,7 +909,7 @@ static u32 calc_plane_remap_info(const struct intel_framebuffer *fb, int color_p
color_plane_info->stride = remap_info->dst_stride * tile_width *
fb->base.format->cpp[color_plane];
- size = remap_info->dst_stride * remap_info->height;
+ size += remap_info->dst_stride * remap_info->height;
}
/*
@@ -745,10 +956,14 @@ calc_plane_normal_size(const struct intel_framebuffer *fb, int color_plane,
return tiles;
}
-static void intel_fb_view_init(struct intel_fb_view *view, enum i915_ggtt_view_type view_type)
+static void intel_fb_view_init(struct drm_i915_private *i915, struct intel_fb_view *view,
+ enum i915_ggtt_view_type view_type)
{
memset(view, 0, sizeof(*view));
view->gtt.type = view_type;
+
+ if (view_type == I915_GGTT_VIEW_REMAPPED && IS_ALDERLAKE_P(i915))
+ view->gtt.remapped.plane_alignment = SZ_2M / PAGE_SIZE;
}
bool intel_fb_supports_90_270_rotation(const struct intel_framebuffer *fb)
@@ -769,16 +984,16 @@ int intel_fill_fb_info(struct drm_i915_private *i915, struct intel_framebuffer *
int i, num_planes = fb->base.format->num_planes;
unsigned int tile_size = intel_tile_size(i915);
- intel_fb_view_init(&fb->normal_view, I915_GGTT_VIEW_NORMAL);
+ intel_fb_view_init(i915, &fb->normal_view, I915_GGTT_VIEW_NORMAL);
drm_WARN_ON(&i915->drm,
intel_fb_supports_90_270_rotation(fb) &&
intel_fb_needs_pot_stride_remap(fb));
if (intel_fb_supports_90_270_rotation(fb))
- intel_fb_view_init(&fb->rotated_view, I915_GGTT_VIEW_ROTATED);
+ intel_fb_view_init(i915, &fb->rotated_view, I915_GGTT_VIEW_ROTATED);
if (intel_fb_needs_pot_stride_remap(fb))
- intel_fb_view_init(&fb->remapped_view, I915_GGTT_VIEW_REMAPPED);
+ intel_fb_view_init(i915, &fb->remapped_view, I915_GGTT_VIEW_REMAPPED);
for (i = 0; i < num_planes; i++) {
struct fb_plane_view_dims view_dims;
@@ -856,7 +1071,7 @@ static void intel_plane_remap_gtt(struct intel_plane_state *plane_state)
unsigned int src_w, src_h;
u32 gtt_offset = 0;
- intel_fb_view_init(&plane_state->view,
+ intel_fb_view_init(i915, &plane_state->view,
drm_rotation_90_or_270(rotation) ? I915_GGTT_VIEW_ROTATED :
I915_GGTT_VIEW_REMAPPED);
@@ -918,6 +1133,79 @@ void intel_fb_fill_view(const struct intel_framebuffer *fb, unsigned int rotatio
*view = fb->normal_view;
}
+static
+u32 intel_fb_max_stride(struct drm_i915_private *dev_priv,
+ u32 pixel_format, u64 modifier)
+{
+ /*
+ * Arbitrary limit for gen4+ chosen to match the
+ * render engine max stride.
+ *
+ * The new CCS hash mode makes remapping impossible
+ */
+ if (DISPLAY_VER(dev_priv) < 4 || is_ccs_modifier(modifier) ||
+ intel_modifier_uses_dpt(dev_priv, modifier))
+ return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier);
+ else if (DISPLAY_VER(dev_priv) >= 7)
+ return 256 * 1024;
+ else
+ return 128 * 1024;
+}
+
+static u32
+intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
+{
+ struct drm_i915_private *dev_priv = to_i915(fb->dev);
+ u32 tile_width;
+
+ if (is_surface_linear(fb, color_plane)) {
+ u32 max_stride = intel_plane_fb_max_stride(dev_priv,
+ fb->format->format,
+ fb->modifier);
+
+ /*
+ * To make remapping with linear generally feasible
+ * we need the stride to be page aligned.
+ */
+ if (fb->pitches[color_plane] > max_stride &&
+ !is_ccs_modifier(fb->modifier))
+ return intel_tile_size(dev_priv);
+ else
+ return 64;
+ }
+
+ tile_width = intel_tile_width_bytes(fb, color_plane);
+ if (is_ccs_modifier(fb->modifier)) {
+ /*
+ * On ADL-P the stride must be either 8 tiles or a stride
+ * that is aligned to 16 tiles, required by the 16 tiles =
+ * 64 kbyte CCS AUX PTE granularity, allowing CCS FBs to be
+ * remapped.
+ */
+ if (IS_ALDERLAKE_P(dev_priv))
+ tile_width *= fb->pitches[0] <= tile_width * 8 ? 8 : 16;
+ /*
+ * On TGL the surface stride must be 4 tile aligned, mapped by
+ * one 64 byte cacheline on the CCS AUX surface.
+ */
+ else if (DISPLAY_VER(dev_priv) >= 12)
+ tile_width *= 4;
+ /*
+ * Display WA #0531: skl,bxt,kbl,glk
+ *
+ * Render decompression and plane width > 3840
+ * combined with horizontal panning requires the
+ * plane stride to be a multiple of 4. We'll just
+ * require the entire fb to accommodate that to avoid
+ * potential runtime errors at plane configuration time.
+ */
+ else if ((DISPLAY_VER(dev_priv) == 9 || IS_GEMINILAKE(dev_priv)) &&
+ color_plane == 0 && fb->width > 3840)
+ tile_width *= 4;
+ }
+ return tile_width;
+}
+
static int intel_plane_check_stride(const struct intel_plane_state *plane_state)
{
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
@@ -981,3 +1269,257 @@ int intel_plane_compute_gtt(struct intel_plane_state *plane_state)
return intel_plane_check_stride(plane_state);
}
+
+static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+
+ drm_framebuffer_cleanup(fb);
+
+ if (intel_fb_uses_dpt(fb))
+ intel_dpt_destroy(intel_fb->dpt_vm);
+
+ intel_frontbuffer_put(intel_fb->frontbuffer);
+
+ kfree(intel_fb);
+}
+
+static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
+ struct drm_file *file,
+ unsigned int *handle)
+{
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+
+ if (i915_gem_object_is_userptr(obj)) {
+ drm_dbg(&i915->drm,
+ "attempting to use a userptr for a framebuffer, denied\n");
+ return -EINVAL;
+ }
+
+ return drm_gem_handle_create(file, &obj->base, handle);
+}
+
+static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
+ struct drm_file *file,
+ unsigned int flags, unsigned int color,
+ struct drm_clip_rect *clips,
+ unsigned int num_clips)
+{
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+
+ i915_gem_object_flush_if_display(obj);
+ intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
+
+ return 0;
+}
+
+static const struct drm_framebuffer_funcs intel_fb_funcs = {
+ .destroy = intel_user_framebuffer_destroy,
+ .create_handle = intel_user_framebuffer_create_handle,
+ .dirty = intel_user_framebuffer_dirty,
+};
+
+int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
+ struct drm_i915_gem_object *obj,
+ struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
+ struct drm_framebuffer *fb = &intel_fb->base;
+ u32 max_stride;
+ unsigned int tiling, stride;
+ int ret = -EINVAL;
+ int i;
+
+ intel_fb->frontbuffer = intel_frontbuffer_get(obj);
+ if (!intel_fb->frontbuffer)
+ return -ENOMEM;
+
+ i915_gem_object_lock(obj, NULL);
+ tiling = i915_gem_object_get_tiling(obj);
+ stride = i915_gem_object_get_stride(obj);
+ i915_gem_object_unlock(obj);
+
+ if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
+ /*
+ * If there's a fence, enforce that
+ * the fb modifier and tiling mode match.
+ */
+ if (tiling != I915_TILING_NONE &&
+ tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
+ drm_dbg_kms(&dev_priv->drm,
+ "tiling_mode doesn't match fb modifier\n");
+ goto err;
+ }
+ } else {
+ if (tiling == I915_TILING_X) {
+ mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
+ } else if (tiling == I915_TILING_Y) {
+ drm_dbg_kms(&dev_priv->drm,
+ "No Y tiling for legacy addfb\n");
+ goto err;
+ }
+ }
+
+ if (!drm_any_plane_has_format(&dev_priv->drm,
+ mode_cmd->pixel_format,
+ mode_cmd->modifier[0])) {
+ drm_dbg_kms(&dev_priv->drm,
+ "unsupported pixel format %p4cc / modifier 0x%llx\n",
+ &mode_cmd->pixel_format, mode_cmd->modifier[0]);
+ goto err;
+ }
+
+ /*
+ * gen2/3 display engine uses the fence if present,
+ * so the tiling mode must match the fb modifier exactly.
+ */
+ if (DISPLAY_VER(dev_priv) < 4 &&
+ tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
+ drm_dbg_kms(&dev_priv->drm,
+ "tiling_mode must match fb modifier exactly on gen2/3\n");
+ goto err;
+ }
+
+ max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format,
+ mode_cmd->modifier[0]);
+ if (mode_cmd->pitches[0] > max_stride) {
+ drm_dbg_kms(&dev_priv->drm,
+ "%s pitch (%u) must be at most %d\n",
+ mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
+ "tiled" : "linear",
+ mode_cmd->pitches[0], max_stride);
+ goto err;
+ }
+
+ /*
+ * If there's a fence, enforce that
+ * the fb pitch and fence stride match.
+ */
+ if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
+ drm_dbg_kms(&dev_priv->drm,
+ "pitch (%d) must match tiling stride (%d)\n",
+ mode_cmd->pitches[0], stride);
+ goto err;
+ }
+
+ /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
+ if (mode_cmd->offsets[0] != 0) {
+ drm_dbg_kms(&dev_priv->drm,
+ "plane 0 offset (0x%08x) must be 0\n",
+ mode_cmd->offsets[0]);
+ goto err;
+ }
+
+ drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
+
+ for (i = 0; i < fb->format->num_planes; i++) {
+ u32 stride_alignment;
+
+ if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
+ drm_dbg_kms(&dev_priv->drm, "bad plane %d handle\n",
+ i);
+ goto err;
+ }
+
+ stride_alignment = intel_fb_stride_alignment(fb, i);
+ if (fb->pitches[i] & (stride_alignment - 1)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "plane %d pitch (%d) must be at least %u byte aligned\n",
+ i, fb->pitches[i], stride_alignment);
+ goto err;
+ }
+
+ if (is_gen12_ccs_plane(fb, i) && !is_gen12_ccs_cc_plane(fb, i)) {
+ int ccs_aux_stride = gen12_ccs_aux_stride(intel_fb, i);
+
+ if (fb->pitches[i] != ccs_aux_stride) {
+ drm_dbg_kms(&dev_priv->drm,
+ "ccs aux plane %d pitch (%d) must be %d\n",
+ i,
+ fb->pitches[i], ccs_aux_stride);
+ goto err;
+ }
+ }
+
+ fb->obj[i] = &obj->base;
+ }
+
+ ret = intel_fill_fb_info(dev_priv, intel_fb);
+ if (ret)
+ goto err;
+
+ if (intel_fb_uses_dpt(fb)) {
+ struct i915_address_space *vm;
+
+ vm = intel_dpt_create(intel_fb);
+ if (IS_ERR(vm)) {
+ ret = PTR_ERR(vm);
+ goto err;
+ }
+
+ intel_fb->dpt_vm = vm;
+ }
+
+ ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
+ if (ret) {
+ drm_err(&dev_priv->drm, "framebuffer init failed %d\n", ret);
+ goto err;
+ }
+
+ return 0;
+
+err:
+ intel_frontbuffer_put(intel_fb->frontbuffer);
+ return ret;
+}
+
+struct drm_framebuffer *
+intel_user_framebuffer_create(struct drm_device *dev,
+ struct drm_file *filp,
+ const struct drm_mode_fb_cmd2 *user_mode_cmd)
+{
+ struct drm_framebuffer *fb;
+ struct drm_i915_gem_object *obj;
+ struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
+ struct drm_i915_private *i915;
+
+ obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
+ if (!obj)
+ return ERR_PTR(-ENOENT);
+
+ /* object is backed with LMEM for discrete */
+ i915 = to_i915(obj->base.dev);
+ if (HAS_LMEM(i915) && !i915_gem_object_can_migrate(obj, INTEL_REGION_LMEM)) {
+ /* object is "remote", not in local memory */
+ i915_gem_object_put(obj);
+ return ERR_PTR(-EREMOTE);
+ }
+
+ fb = intel_framebuffer_create(obj, &mode_cmd);
+ i915_gem_object_put(obj);
+
+ return fb;
+}
+
+struct drm_framebuffer *
+intel_framebuffer_create(struct drm_i915_gem_object *obj,
+ struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct intel_framebuffer *intel_fb;
+ int ret;
+
+ intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
+ if (!intel_fb)
+ return ERR_PTR(-ENOMEM);
+
+ ret = intel_framebuffer_init(intel_fb, obj, mode_cmd);
+ if (ret)
+ goto err;
+
+ return &intel_fb->base;
+
+err:
+ kfree(intel_fb);
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_fb.h b/drivers/gpu/drm/i915/display/intel_fb.h
index 739d1b91754b..1cbdd84502bd 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.h
+++ b/drivers/gpu/drm/i915/display/intel_fb.h
@@ -8,10 +8,12 @@
#include <linux/types.h>
+struct drm_device;
+struct drm_file;
struct drm_framebuffer;
-
+struct drm_i915_gem_object;
struct drm_i915_private;
-
+struct drm_mode_fb_cmd2;
struct intel_fb_view;
struct intel_framebuffer;
struct intel_plane_state;
@@ -28,10 +30,14 @@ int skl_ccs_to_main_plane(const struct drm_framebuffer *fb, int ccs_plane);
int skl_main_to_aux_plane(const struct drm_framebuffer *fb, int main_plane);
unsigned int intel_tile_size(const struct drm_i915_private *i915);
+unsigned int intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane);
unsigned int intel_tile_height(const struct drm_framebuffer *fb, int color_plane);
unsigned int intel_tile_row_size(const struct drm_framebuffer *fb, int color_plane);
-
+unsigned int intel_fb_align_height(const struct drm_framebuffer *fb,
+ int color_plane, unsigned int height);
unsigned int intel_cursor_alignment(const struct drm_i915_private *i915);
+unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
+ int color_plane);
void intel_fb_plane_get_subsampling(int *hsub, int *vsub,
const struct drm_framebuffer *fb,
@@ -53,4 +59,12 @@ void intel_fb_fill_view(const struct intel_framebuffer *fb, unsigned int rotatio
struct intel_fb_view *view);
int intel_plane_compute_gtt(struct intel_plane_state *plane_state);
+int intel_framebuffer_init(struct intel_framebuffer *ifb,
+ struct drm_i915_gem_object *obj,
+ struct drm_mode_fb_cmd2 *mode_cmd);
+struct drm_framebuffer *
+intel_user_framebuffer_create(struct drm_device *dev,
+ struct drm_file *filp,
+ const struct drm_mode_fb_cmd2 *user_mode_cmd);
+
#endif /* __INTEL_FB_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_fb_pin.c b/drivers/gpu/drm/i915/display/intel_fb_pin.c
new file mode 100644
index 000000000000..3f77f3013584
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_fb_pin.c
@@ -0,0 +1,274 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+/**
+ * DOC: display pinning helpers
+ */
+
+#include "intel_display_types.h"
+#include "intel_fb_pin.h"
+#include "intel_fb.h"
+
+#include "intel_dpt.h"
+
+#include "gem/i915_gem_object.h"
+
+static struct i915_vma *
+intel_pin_fb_obj_dpt(struct drm_framebuffer *fb,
+ const struct i915_ggtt_view *view,
+ bool uses_fence,
+ unsigned long *out_flags,
+ struct i915_address_space *vm)
+{
+ struct drm_device *dev = fb->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ struct i915_vma *vma;
+ u32 alignment;
+ int ret;
+
+ if (WARN_ON(!i915_gem_object_is_framebuffer(obj)))
+ return ERR_PTR(-EINVAL);
+
+ alignment = 4096 * 512;
+
+ atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
+
+ ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
+ if (ret) {
+ vma = ERR_PTR(ret);
+ goto err;
+ }
+
+ vma = i915_vma_instance(obj, vm, view);
+ if (IS_ERR(vma))
+ goto err;
+
+ if (i915_vma_misplaced(vma, 0, alignment, 0)) {
+ ret = i915_vma_unbind(vma);
+ if (ret) {
+ vma = ERR_PTR(ret);
+ goto err;
+ }
+ }
+
+ ret = i915_vma_pin(vma, 0, alignment, PIN_GLOBAL);
+ if (ret) {
+ vma = ERR_PTR(ret);
+ goto err;
+ }
+
+ vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
+
+ i915_gem_object_flush_if_display(obj);
+
+ i915_vma_get(vma);
+err:
+ atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
+
+ return vma;
+}
+
+struct i915_vma *
+intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
+ bool phys_cursor,
+ const struct i915_ggtt_view *view,
+ bool uses_fence,
+ unsigned long *out_flags)
+{
+ struct drm_device *dev = fb->dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ intel_wakeref_t wakeref;
+ struct i915_gem_ww_ctx ww;
+ struct i915_vma *vma;
+ unsigned int pinctl;
+ u32 alignment;
+ int ret;
+
+ if (drm_WARN_ON(dev, !i915_gem_object_is_framebuffer(obj)))
+ return ERR_PTR(-EINVAL);
+
+ if (phys_cursor)
+ alignment = intel_cursor_alignment(dev_priv);
+ else
+ alignment = intel_surf_alignment(fb, 0);
+ if (drm_WARN_ON(dev, alignment && !is_power_of_2(alignment)))
+ return ERR_PTR(-EINVAL);
+
+ /* Note that the w/a also requires 64 PTE of padding following the
+ * bo. We currently fill all unused PTE with the shadow page and so
+ * we should always have valid PTE following the scanout preventing
+ * the VT-d warning.
+ */
+ if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
+ alignment = 256 * 1024;
+
+ /*
+ * Global gtt pte registers are special registers which actually forward
+ * writes to a chunk of system memory. Which means that there is no risk
+ * that the register values disappear as soon as we call
+ * intel_runtime_pm_put(), so it is correct to wrap only the
+ * pin/unpin/fence and not more.
+ */
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+
+ atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
+
+ /*
+ * Valleyview is definitely limited to scanning out the first
+ * 512MiB. Lets presume this behaviour was inherited from the
+ * g4x display engine and that all earlier gen are similarly
+ * limited. Testing suggests that it is a little more
+ * complicated than this. For example, Cherryview appears quite
+ * happy to scanout from anywhere within its global aperture.
+ */
+ pinctl = 0;
+ if (HAS_GMCH(dev_priv))
+ pinctl |= PIN_MAPPABLE;
+
+ i915_gem_ww_ctx_init(&ww, true);
+retry:
+ ret = i915_gem_object_lock(obj, &ww);
+ if (!ret && phys_cursor)
+ ret = i915_gem_object_attach_phys(obj, alignment);
+ else if (!ret && HAS_LMEM(dev_priv))
+ ret = i915_gem_object_migrate(obj, &ww, INTEL_REGION_LMEM);
+ /* TODO: Do we need to sync when migration becomes async? */
+ if (!ret)
+ ret = i915_gem_object_pin_pages(obj);
+ if (ret)
+ goto err;
+
+ if (!ret) {
+ vma = i915_gem_object_pin_to_display_plane(obj, &ww, alignment,
+ view, pinctl);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto err_unpin;
+ }
+ }
+
+ if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
+ /*
+ * Install a fence for tiled scan-out. Pre-i965 always needs a
+ * fence, whereas 965+ only requires a fence if using
+ * framebuffer compression. For simplicity, we always, when
+ * possible, install a fence as the cost is not that onerous.
+ *
+ * If we fail to fence the tiled scanout, then either the
+ * modeset will reject the change (which is highly unlikely as
+ * the affected systems, all but one, do not have unmappable
+ * space) or we will not be able to enable full powersaving
+ * techniques (also likely not to apply due to various limits
+ * FBC and the like impose on the size of the buffer, which
+ * presumably we violated anyway with this unmappable buffer).
+ * Anyway, it is presumably better to stumble onwards with
+ * something and try to run the system in a "less than optimal"
+ * mode that matches the user configuration.
+ */
+ ret = i915_vma_pin_fence(vma);
+ if (ret != 0 && DISPLAY_VER(dev_priv) < 4) {
+ i915_vma_unpin(vma);
+ goto err_unpin;
+ }
+ ret = 0;
+
+ if (vma->fence)
+ *out_flags |= PLANE_HAS_FENCE;
+ }
+
+ i915_vma_get(vma);
+
+err_unpin:
+ i915_gem_object_unpin_pages(obj);
+err:
+ if (ret == -EDEADLK) {
+ ret = i915_gem_ww_ctx_backoff(&ww);
+ if (!ret)
+ goto retry;
+ }
+ i915_gem_ww_ctx_fini(&ww);
+ if (ret)
+ vma = ERR_PTR(ret);
+
+ atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+ return vma;
+}
+
+void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
+{
+ if (flags & PLANE_HAS_FENCE)
+ i915_vma_unpin_fence(vma);
+ i915_vma_unpin(vma);
+ i915_vma_put(vma);
+}
+
+int intel_plane_pin_fb(struct intel_plane_state *plane_state)
+{
+ struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ struct drm_framebuffer *fb = plane_state->hw.fb;
+ struct i915_vma *vma;
+ bool phys_cursor =
+ plane->id == PLANE_CURSOR &&
+ INTEL_INFO(dev_priv)->display.cursor_needs_physical;
+
+ if (!intel_fb_uses_dpt(fb)) {
+ vma = intel_pin_and_fence_fb_obj(fb, phys_cursor,
+ &plane_state->view.gtt,
+ intel_plane_uses_fence(plane_state),
+ &plane_state->flags);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ plane_state->ggtt_vma = vma;
+ } else {
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+
+ vma = intel_dpt_pin(intel_fb->dpt_vm);
+ if (IS_ERR(vma))
+ return PTR_ERR(vma);
+
+ plane_state->ggtt_vma = vma;
+
+ vma = intel_pin_fb_obj_dpt(fb, &plane_state->view.gtt, false,
+ &plane_state->flags, intel_fb->dpt_vm);
+ if (IS_ERR(vma)) {
+ intel_dpt_unpin(intel_fb->dpt_vm);
+ plane_state->ggtt_vma = NULL;
+ return PTR_ERR(vma);
+ }
+
+ plane_state->dpt_vma = vma;
+
+ WARN_ON(plane_state->ggtt_vma == plane_state->dpt_vma);
+ }
+
+ return 0;
+}
+
+void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
+{
+ struct drm_framebuffer *fb = old_plane_state->hw.fb;
+ struct i915_vma *vma;
+
+ if (!intel_fb_uses_dpt(fb)) {
+ vma = fetch_and_zero(&old_plane_state->ggtt_vma);
+ if (vma)
+ intel_unpin_fb_vma(vma, old_plane_state->flags);
+ } else {
+ struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
+
+ vma = fetch_and_zero(&old_plane_state->dpt_vma);
+ if (vma)
+ intel_unpin_fb_vma(vma, old_plane_state->flags);
+
+ vma = fetch_and_zero(&old_plane_state->ggtt_vma);
+ if (vma)
+ intel_dpt_unpin(intel_fb->dpt_vm);
+ }
+}
diff --git a/drivers/gpu/drm/i915/display/intel_fb_pin.h b/drivers/gpu/drm/i915/display/intel_fb_pin.h
new file mode 100644
index 000000000000..e4fcd0218d9d
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_fb_pin.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef __INTEL_FB_PIN_H__
+#define __INTEL_FB_PIN_H__
+
+#include <linux/types.h>
+
+struct drm_framebuffer;
+struct i915_vma;
+struct intel_plane_state;
+struct i915_ggtt_view;
+
+struct i915_vma *
+intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
+ bool phys_cursor,
+ const struct i915_ggtt_view *view,
+ bool uses_fence,
+ unsigned long *out_flags);
+
+void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags);
+
+int intel_plane_pin_fb(struct intel_plane_state *plane_state);
+void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index ddfc17e21668..1f66de77a6b1 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -62,19 +62,84 @@ static void intel_fbc_get_plane_source_size(const struct intel_fbc_state_cache *
*height = cache->plane.src_h;
}
-static int intel_fbc_calculate_cfb_size(struct drm_i915_private *dev_priv,
- const struct intel_fbc_state_cache *cache)
+/* plane stride in pixels */
+static unsigned int intel_fbc_plane_stride(const struct intel_plane_state *plane_state)
{
- int lines;
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ unsigned int stride;
+
+ stride = plane_state->view.color_plane[0].stride;
+ if (!drm_rotation_90_or_270(plane_state->hw.rotation))
+ stride /= fb->format->cpp[0];
+
+ return stride;
+}
+
+/* plane stride based cfb stride in bytes, assuming 1:1 compression limit */
+static unsigned int _intel_fbc_cfb_stride(const struct intel_fbc_state_cache *cache)
+{
+ unsigned int cpp = 4; /* FBC always 4 bytes per pixel */
+
+ return cache->fb.stride * cpp;
+}
+
+/* minimum acceptable cfb stride in bytes, assuming 1:1 compression limit */
+static unsigned int skl_fbc_min_cfb_stride(struct drm_i915_private *i915,
+ const struct intel_fbc_state_cache *cache)
+{
+ unsigned int limit = 4; /* 1:4 compression limit is the worst case */
+ unsigned int cpp = 4; /* FBC always 4 bytes per pixel */
+ unsigned int height = 4; /* FBC segment is 4 lines */
+ unsigned int stride;
+
+ /* minimum segment stride we can use */
+ stride = cache->plane.src_w * cpp * height / limit;
+
+ /*
+ * Wa_16011863758: icl+
+ * Avoid some hardware segment address miscalculation.
+ */
+ if (DISPLAY_VER(i915) >= 11)
+ stride += 64;
+
+ /*
+ * At least some of the platforms require each 4 line segment to
+ * be 512 byte aligned. Just do it always for simplicity.
+ */
+ stride = ALIGN(stride, 512);
+
+ /* convert back to single line equivalent with 1:1 compression limit */
+ return stride * limit / height;
+}
+
+/* properly aligned cfb stride in bytes, assuming 1:1 compression limit */
+static unsigned int intel_fbc_cfb_stride(struct drm_i915_private *i915,
+ const struct intel_fbc_state_cache *cache)
+{
+ unsigned int stride = _intel_fbc_cfb_stride(cache);
+
+ /*
+ * At least some of the platforms require each 4 line segment to
+ * be 512 byte aligned. Aligning each line to 512 bytes guarantees
+ * that regardless of the compression limit we choose later.
+ */
+ if (DISPLAY_VER(i915) >= 9)
+ return max(ALIGN(stride, 512), skl_fbc_min_cfb_stride(i915, cache));
+ else
+ return stride;
+}
+
+static unsigned int intel_fbc_cfb_size(struct drm_i915_private *dev_priv,
+ const struct intel_fbc_state_cache *cache)
+{
+ int lines = cache->plane.src_h;
- intel_fbc_get_plane_source_size(cache, NULL, &lines);
if (DISPLAY_VER(dev_priv) == 7)
lines = min(lines, 2048);
else if (DISPLAY_VER(dev_priv) >= 8)
lines = min(lines, 2560);
- /* Hardware needs the full buffer stride, not just the active area. */
- return lines * cache->fb.stride;
+ return lines * intel_fbc_cfb_stride(dev_priv, cache);
}
static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv)
@@ -99,15 +164,13 @@ static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv)
static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
{
- struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
+ struct intel_fbc *fbc = &dev_priv->fbc;
+ const struct intel_fbc_reg_params *params = &fbc->params;
int cfb_pitch;
int i;
u32 fbc_ctl;
- /* Note: fbc.limit == 1 for i8xx */
- cfb_pitch = params->cfb_size / FBC_LL_SIZE;
- if (params->fb.stride < cfb_pitch)
- cfb_pitch = params->fb.stride;
+ cfb_pitch = params->cfb_stride / fbc->limit;
/* FBC_CTL wants 32B or 64B units */
if (DISPLAY_VER(dev_priv) == 2)
@@ -150,15 +213,9 @@ static bool i8xx_fbc_is_active(struct drm_i915_private *dev_priv)
static u32 g4x_dpfc_ctl_limit(struct drm_i915_private *i915)
{
- const struct intel_fbc_reg_params *params = &i915->fbc.params;
- int limit = i915->fbc.limit;
-
- if (params->fb.format->cpp[0] == 2)
- limit <<= 1;
-
- switch (limit) {
+ switch (i915->fbc.limit) {
default:
- MISSING_CASE(limit);
+ MISSING_CASE(i915->fbc.limit);
fallthrough;
case 1:
return DPFC_CTL_LIMIT_1X;
@@ -232,16 +289,16 @@ static void i965_fbc_recompress(struct drm_i915_private *dev_priv)
/* This function forces a CFB recompression through the nuke operation. */
static void snb_fbc_recompress(struct drm_i915_private *dev_priv)
{
- struct intel_fbc *fbc = &dev_priv->fbc;
-
- trace_intel_fbc_nuke(fbc->crtc);
-
intel_de_write(dev_priv, MSG_FBC_REND_STATE, FBC_REND_NUKE);
intel_de_posting_read(dev_priv, MSG_FBC_REND_STATE);
}
static void intel_fbc_recompress(struct drm_i915_private *dev_priv)
{
+ struct intel_fbc *fbc = &dev_priv->fbc;
+
+ trace_intel_fbc_nuke(fbc->crtc);
+
if (DISPLAY_VER(dev_priv) >= 6)
snb_fbc_recompress(dev_priv);
else if (DISPLAY_VER(dev_priv) >= 4)
@@ -280,8 +337,6 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
params->fence_y_offset);
/* enable it... */
intel_de_write(dev_priv, ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
-
- intel_fbc_recompress(dev_priv);
}
static void ilk_fbc_deactivate(struct drm_i915_private *dev_priv)
@@ -303,19 +358,29 @@ static bool ilk_fbc_is_active(struct drm_i915_private *dev_priv)
static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
{
- struct intel_fbc_reg_params *params = &dev_priv->fbc.params;
+ struct intel_fbc *fbc = &dev_priv->fbc;
+ const struct intel_fbc_reg_params *params = &fbc->params;
u32 dpfc_ctl;
- /* Display WA #0529: skl, kbl, bxt. */
- if (DISPLAY_VER(dev_priv) == 9) {
- u32 val = intel_de_read(dev_priv, CHICKEN_MISC_4);
+ if (DISPLAY_VER(dev_priv) >= 10) {
+ u32 val = 0;
- val &= ~(FBC_STRIDE_OVERRIDE | FBC_STRIDE_MASK);
+ if (params->override_cfb_stride)
+ val |= FBC_STRIDE_OVERRIDE |
+ FBC_STRIDE(params->override_cfb_stride / fbc->limit);
- if (params->gen9_wa_cfb_stride)
- val |= FBC_STRIDE_OVERRIDE | params->gen9_wa_cfb_stride;
+ intel_de_write(dev_priv, GLK_FBC_STRIDE, val);
+ } else if (DISPLAY_VER(dev_priv) == 9) {
+ u32 val = 0;
- intel_de_write(dev_priv, CHICKEN_MISC_4, val);
+ /* Display WA #0529: skl, kbl, bxt. */
+ if (params->override_cfb_stride)
+ val |= CHICKEN_FBC_STRIDE_OVERRIDE |
+ CHICKEN_FBC_STRIDE(params->override_cfb_stride / fbc->limit);
+
+ intel_de_rmw(dev_priv, CHICKEN_MISC_4,
+ CHICKEN_FBC_STRIDE_OVERRIDE |
+ CHICKEN_FBC_STRIDE_MASK, val);
}
dpfc_ctl = 0;
@@ -339,8 +404,6 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
dpfc_ctl |= FBC_CTL_FALSE_COLOR;
intel_de_write(dev_priv, ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
-
- intel_fbc_recompress(dev_priv);
}
static bool intel_fbc_hw_is_active(struct drm_i915_private *dev_priv)
@@ -402,6 +465,12 @@ bool intel_fbc_is_active(struct drm_i915_private *dev_priv)
return dev_priv->fbc.active;
}
+static void intel_fbc_activate(struct drm_i915_private *dev_priv)
+{
+ intel_fbc_hw_activate(dev_priv);
+ intel_fbc_recompress(dev_priv);
+}
+
static void intel_fbc_deactivate(struct drm_i915_private *dev_priv,
const char *reason)
{
@@ -440,30 +509,32 @@ static u64 intel_fbc_stolen_end(struct drm_i915_private *dev_priv)
return min(end, intel_fbc_cfb_base_max(dev_priv));
}
-static int intel_fbc_max_limit(struct drm_i915_private *dev_priv, int fb_cpp)
+static int intel_fbc_min_limit(int fb_cpp)
{
- /*
- * FIXME: FBC1 can have arbitrary cfb stride,
- * so we could support different compression ratios.
- */
- if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv))
- return 1;
+ return fb_cpp == 2 ? 2 : 1;
+}
+static int intel_fbc_max_limit(struct drm_i915_private *dev_priv)
+{
/* WaFbcOnly1to1Ratio:ctg */
if (IS_G4X(dev_priv))
return 1;
- /* FBC2 can only do 1:1, 1:2, 1:4 */
- return fb_cpp == 2 ? 2 : 4;
+ /*
+ * FBC2 can only do 1:1, 1:2, 1:4, we limit
+ * FBC1 to the same out of convenience.
+ */
+ return 4;
}
static int find_compression_limit(struct drm_i915_private *dev_priv,
- unsigned int size,
- unsigned int fb_cpp)
+ unsigned int size, int min_limit)
{
struct intel_fbc *fbc = &dev_priv->fbc;
u64 end = intel_fbc_stolen_end(dev_priv);
- int ret, limit = 1;
+ int ret, limit = min_limit;
+
+ size /= limit;
/* Try to over-allocate to reduce reallocations and fragmentation. */
ret = i915_gem_stolen_insert_node_in_range(dev_priv, &fbc->compressed_fb,
@@ -471,7 +542,7 @@ static int find_compression_limit(struct drm_i915_private *dev_priv,
if (ret == 0)
return limit;
- for (; limit <= intel_fbc_max_limit(dev_priv, fb_cpp); limit <<= 1) {
+ for (; limit <= intel_fbc_max_limit(dev_priv); limit <<= 1) {
ret = i915_gem_stolen_insert_node_in_range(dev_priv, &fbc->compressed_fb,
size >>= 1, 4096, 0, end);
if (ret == 0)
@@ -482,7 +553,7 @@ static int find_compression_limit(struct drm_i915_private *dev_priv,
}
static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv,
- unsigned int size, unsigned int fb_cpp)
+ unsigned int size, int min_limit)
{
struct intel_fbc *fbc = &dev_priv->fbc;
int ret;
@@ -499,13 +570,12 @@ static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv,
goto err;
}
- ret = find_compression_limit(dev_priv, size, fb_cpp);
+ ret = find_compression_limit(dev_priv, size, min_limit);
if (!ret)
goto err_llb;
- else if (ret > 1) {
+ else if (ret > min_limit)
drm_info_once(&dev_priv->drm,
"Reducing the compressed framebuffer size. This may lead to less power savings than a non-reduced-size. Try to increase stolen memory size if available in BIOS.\n");
- }
fbc->limit = ret;
@@ -675,11 +745,10 @@ static bool tiling_is_valid(struct drm_i915_private *dev_priv,
{
switch (modifier) {
case DRM_FORMAT_MOD_LINEAR:
- if (DISPLAY_VER(dev_priv) >= 9)
- return true;
- return false;
- case I915_FORMAT_MOD_X_TILED:
case I915_FORMAT_MOD_Y_TILED:
+ case I915_FORMAT_MOD_Yf_TILED:
+ return DISPLAY_VER(dev_priv) >= 9;
+ case I915_FORMAT_MOD_X_TILED:
return true;
default:
return false;
@@ -718,11 +787,7 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
cache->fb.format = fb->format;
cache->fb.modifier = fb->modifier;
-
- /* FIXME is this correct? */
- cache->fb.stride = plane_state->view.color_plane[0].stride;
- if (drm_rotation_90_or_270(plane_state->hw.rotation))
- cache->fb.stride *= fb->format->cpp[0];
+ cache->fb.stride = intel_fbc_plane_stride(plane_state);
/* FBC1 compression interval: arbitrary choice of 1 second */
cache->interval = drm_mode_vrefresh(&crtc_state->hw.adjusted_mode);
@@ -745,27 +810,29 @@ static bool intel_fbc_cfb_size_changed(struct drm_i915_private *dev_priv)
{
struct intel_fbc *fbc = &dev_priv->fbc;
- return intel_fbc_calculate_cfb_size(dev_priv, &fbc->state_cache) >
+ return intel_fbc_cfb_size(dev_priv, &fbc->state_cache) >
fbc->compressed_fb.size * fbc->limit;
}
-static u16 intel_fbc_gen9_wa_cfb_stride(struct drm_i915_private *dev_priv)
+static u16 intel_fbc_override_cfb_stride(struct drm_i915_private *dev_priv,
+ const struct intel_fbc_state_cache *cache)
{
- struct intel_fbc *fbc = &dev_priv->fbc;
- struct intel_fbc_state_cache *cache = &fbc->state_cache;
-
- if ((DISPLAY_VER(dev_priv) == 9) &&
- cache->fb.modifier != I915_FORMAT_MOD_X_TILED)
- return DIV_ROUND_UP(cache->plane.src_w, 32 * fbc->limit) * 8;
- else
- return 0;
-}
+ unsigned int stride = _intel_fbc_cfb_stride(cache);
+ unsigned int stride_aligned = intel_fbc_cfb_stride(dev_priv, cache);
-static bool intel_fbc_gen9_wa_cfb_stride_changed(struct drm_i915_private *dev_priv)
-{
- struct intel_fbc *fbc = &dev_priv->fbc;
+ /*
+ * Override stride in 64 byte units per 4 line segment.
+ *
+ * Gen9 hw miscalculates cfb stride for linear as
+ * PLANE_STRIDE*512 instead of PLANE_STRIDE*64, so
+ * we always need to use the override there.
+ */
+ if (stride != stride_aligned ||
+ (DISPLAY_VER(dev_priv) == 9 &&
+ cache->fb.modifier == DRM_FORMAT_MOD_LINEAR))
+ return stride_aligned * 4 / 64;
- return fbc->params.gen9_wa_cfb_stride != intel_fbc_gen9_wa_cfb_stride(dev_priv);
+ return 0;
}
static bool intel_fbc_can_enable(struct drm_i915_private *dev_priv)
@@ -860,7 +927,8 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
return false;
}
- if (!stride_is_valid(dev_priv, cache->fb.modifier, cache->fb.stride)) {
+ if (!stride_is_valid(dev_priv, cache->fb.modifier,
+ cache->fb.stride * cache->fb.format->cpp[0])) {
fbc->no_fbc_reason = "framebuffer stride not supported";
return false;
}
@@ -948,9 +1016,9 @@ static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
params->fb.modifier = cache->fb.modifier;
params->fb.stride = cache->fb.stride;
- params->cfb_size = intel_fbc_calculate_cfb_size(dev_priv, cache);
-
- params->gen9_wa_cfb_stride = cache->gen9_wa_cfb_stride;
+ params->cfb_stride = intel_fbc_cfb_stride(dev_priv, cache);
+ params->cfb_size = intel_fbc_cfb_size(dev_priv, cache);
+ params->override_cfb_stride = intel_fbc_override_cfb_stride(dev_priv, cache);
params->plane_visible = cache->plane.visible;
}
@@ -981,10 +1049,13 @@ static bool intel_fbc_can_flip_nuke(const struct intel_crtc_state *crtc_state)
if (params->fb.stride != cache->fb.stride)
return false;
- if (params->cfb_size != intel_fbc_calculate_cfb_size(dev_priv, cache))
+ if (params->cfb_stride != intel_fbc_cfb_stride(dev_priv, cache))
+ return false;
+
+ if (params->cfb_size != intel_fbc_cfb_size(dev_priv, cache))
return false;
- if (params->gen9_wa_cfb_stride != cache->gen9_wa_cfb_stride)
+ if (params->override_cfb_stride != intel_fbc_override_cfb_stride(dev_priv, cache))
return false;
return true;
@@ -1090,7 +1161,7 @@ static void __intel_fbc_post_update(struct intel_crtc *crtc)
return;
if (!fbc->busy_bits)
- intel_fbc_hw_activate(dev_priv);
+ intel_fbc_activate(dev_priv);
else
intel_fbc_deactivate(dev_priv, "frontbuffer write");
}
@@ -1129,7 +1200,7 @@ void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
if (!HAS_FBC(dev_priv))
return;
- if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP)
+ if (origin == ORIGIN_FLIP || origin == ORIGIN_CURSOR_UPDATE)
return;
mutex_lock(&fbc->lock);
@@ -1150,19 +1221,11 @@ void intel_fbc_flush(struct drm_i915_private *dev_priv,
if (!HAS_FBC(dev_priv))
return;
- /*
- * GTT tracking does not nuke the entire cfb
- * so don't clear busy_bits set for some other
- * reason.
- */
- if (origin == ORIGIN_GTT)
- return;
-
mutex_lock(&fbc->lock);
fbc->busy_bits &= ~frontbuffer_bits;
- if (origin == ORIGIN_FLIP)
+ if (origin == ORIGIN_FLIP || origin == ORIGIN_CURSOR_UPDATE)
goto out;
if (!fbc->busy_bits && fbc->crtc &&
@@ -1246,8 +1309,8 @@ out:
* intel_fbc_enable multiple times for the same pipe without an
* intel_fbc_disable in the middle, as long as it is deactivated.
*/
-void intel_fbc_enable(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
+static void intel_fbc_enable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_plane *plane = to_intel_plane(crtc->base.primary);
@@ -1257,16 +1320,22 @@ void intel_fbc_enable(struct intel_atomic_state *state,
intel_atomic_get_new_plane_state(state, plane);
struct intel_fbc *fbc = &dev_priv->fbc;
struct intel_fbc_state_cache *cache = &fbc->state_cache;
+ int min_limit;
if (!plane->has_fbc || !plane_state)
return;
+ min_limit = intel_fbc_min_limit(plane_state->hw.fb ?
+ plane_state->hw.fb->format->cpp[0] : 0);
+
mutex_lock(&fbc->lock);
if (fbc->crtc) {
- if (fbc->crtc != crtc ||
- (!intel_fbc_cfb_size_changed(dev_priv) &&
- !intel_fbc_gen9_wa_cfb_stride_changed(dev_priv)))
+ if (fbc->crtc != crtc)
+ goto out;
+
+ if (fbc->limit >= min_limit &&
+ !intel_fbc_cfb_size_changed(dev_priv))
goto out;
__intel_fbc_disable(dev_priv);
@@ -1281,15 +1350,12 @@ void intel_fbc_enable(struct intel_atomic_state *state,
goto out;
if (intel_fbc_alloc_cfb(dev_priv,
- intel_fbc_calculate_cfb_size(dev_priv, cache),
- plane_state->hw.fb->format->cpp[0])) {
+ intel_fbc_cfb_size(dev_priv, cache), min_limit)) {
cache->plane.visible = false;
fbc->no_fbc_reason = "not enough stolen memory";
goto out;
}
- cache->gen9_wa_cfb_stride = intel_fbc_gen9_wa_cfb_stride(dev_priv);
-
drm_dbg_kms(&dev_priv->drm, "Enabling FBC on pipe %c\n",
pipe_name(crtc->pipe));
fbc->no_fbc_reason = "FBC enabled but not active yet\n";
@@ -1323,6 +1389,28 @@ void intel_fbc_disable(struct intel_crtc *crtc)
}
/**
+ * intel_fbc_update: enable/disable FBC on the CRTC
+ * @state: atomic state
+ * @crtc: the CRTC
+ *
+ * This function checks if the given CRTC was chosen for FBC, then enables it if
+ * possible. Notice that it doesn't activate FBC. It is valid to call
+ * intel_fbc_update multiple times for the same pipe without an
+ * intel_fbc_disable in the middle.
+ */
+void intel_fbc_update(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ if (crtc_state->update_pipe && !crtc_state->enable_fbc)
+ intel_fbc_disable(crtc);
+ else
+ intel_fbc_enable(state, crtc);
+}
+
+/**
* intel_fbc_global_disable - globally disable FBC
* @dev_priv: i915 device instance
*
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.h b/drivers/gpu/drm/i915/display/intel_fbc.h
index 6dc1edefe81b..b97d908738e6 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.h
+++ b/drivers/gpu/drm/i915/display/intel_fbc.h
@@ -24,7 +24,7 @@ bool intel_fbc_pre_update(struct intel_atomic_state *state,
void intel_fbc_post_update(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void intel_fbc_init(struct drm_i915_private *dev_priv);
-void intel_fbc_enable(struct intel_atomic_state *state,
+void intel_fbc_update(struct intel_atomic_state *state,
struct intel_crtc *crtc);
void intel_fbc_disable(struct intel_crtc *crtc);
void intel_fbc_global_disable(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index df05d285f0bd..adc3a81be9f7 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -45,6 +45,8 @@
#include "i915_drv.h"
#include "intel_display_types.h"
+#include "intel_fb.h"
+#include "intel_fb_pin.h"
#include "intel_fbdev.h"
#include "intel_frontbuffer.h"
@@ -229,8 +231,6 @@ static int intelfb_create(struct drm_fb_helper *helper,
goto out_unlock;
}
- intel_frontbuffer_flush(to_frontbuffer(ifbdev), ORIGIN_DIRTYFB);
-
info = drm_fb_helper_alloc_fbi(helper);
if (IS_ERR(info)) {
drm_err(&dev_priv->drm, "Failed to allocate fb_info\n");
diff --git a/drivers/gpu/drm/i915/display/intel_fdi.c b/drivers/gpu/drm/i915/display/intel_fdi.c
index e10b9cd8e86e..dd2cf0c59921 100644
--- a/drivers/gpu/drm/i915/display/intel_fdi.c
+++ b/drivers/gpu/drm/i915/display/intel_fdi.c
@@ -2,11 +2,112 @@
/*
* Copyright © 2020 Intel Corporation
*/
+
#include "intel_atomic.h"
#include "intel_ddi.h"
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_fdi.h"
+#include "intel_sbi.h"
+
+static void assert_fdi_tx(struct drm_i915_private *dev_priv,
+ enum pipe pipe, bool state)
+{
+ bool cur_state;
+
+ if (HAS_DDI(dev_priv)) {
+ /*
+ * DDI does not have a specific FDI_TX register.
+ *
+ * FDI is never fed from EDP transcoder
+ * so pipe->transcoder cast is fine here.
+ */
+ enum transcoder cpu_transcoder = (enum transcoder)pipe;
+ cur_state = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder)) & TRANS_DDI_FUNC_ENABLE;
+ } else {
+ cur_state = intel_de_read(dev_priv, FDI_TX_CTL(pipe)) & FDI_TX_ENABLE;
+ }
+ I915_STATE_WARN(cur_state != state,
+ "FDI TX state assertion failure (expected %s, current %s)\n",
+ onoff(state), onoff(cur_state));
+}
+
+void assert_fdi_tx_enabled(struct drm_i915_private *i915, enum pipe pipe)
+{
+ assert_fdi_tx(i915, pipe, true);
+}
+
+void assert_fdi_tx_disabled(struct drm_i915_private *i915, enum pipe pipe)
+{
+ assert_fdi_tx(i915, pipe, false);
+}
+
+static void assert_fdi_rx(struct drm_i915_private *dev_priv,
+ enum pipe pipe, bool state)
+{
+ bool cur_state;
+
+ cur_state = intel_de_read(dev_priv, FDI_RX_CTL(pipe)) & FDI_RX_ENABLE;
+ I915_STATE_WARN(cur_state != state,
+ "FDI RX state assertion failure (expected %s, current %s)\n",
+ onoff(state), onoff(cur_state));
+}
+
+void assert_fdi_rx_enabled(struct drm_i915_private *i915, enum pipe pipe)
+{
+ assert_fdi_rx(i915, pipe, true);
+}
+
+void assert_fdi_rx_disabled(struct drm_i915_private *i915, enum pipe pipe)
+{
+ assert_fdi_rx(i915, pipe, false);
+}
+
+void assert_fdi_tx_pll_enabled(struct drm_i915_private *i915,
+ enum pipe pipe)
+{
+ bool cur_state;
+
+ /* ILK FDI PLL is always enabled */
+ if (IS_IRONLAKE(i915))
+ return;
+
+ /* On Haswell, DDI ports are responsible for the FDI PLL setup */
+ if (HAS_DDI(i915))
+ return;
+
+ cur_state = intel_de_read(i915, FDI_TX_CTL(pipe)) & FDI_TX_PLL_ENABLE;
+ I915_STATE_WARN(!cur_state, "FDI TX PLL assertion failure, should be active but is disabled\n");
+}
+
+static void assert_fdi_rx_pll(struct drm_i915_private *i915,
+ enum pipe pipe, bool state)
+{
+ bool cur_state;
+
+ cur_state = intel_de_read(i915, FDI_RX_CTL(pipe)) & FDI_RX_PLL_ENABLE;
+ I915_STATE_WARN(cur_state != state,
+ "FDI RX PLL assertion failure (expected %s, current %s)\n",
+ onoff(state), onoff(cur_state));
+}
+
+void assert_fdi_rx_pll_enabled(struct drm_i915_private *i915, enum pipe pipe)
+{
+ assert_fdi_rx_pll(i915, pipe, true);
+}
+
+void assert_fdi_rx_pll_disabled(struct drm_i915_private *i915, enum pipe pipe)
+{
+ assert_fdi_rx_pll(i915, pipe, false);
+}
+
+void intel_fdi_link_train(struct intel_crtc *crtc,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+ dev_priv->fdi_funcs->fdi_link_train(crtc, crtc_state);
+}
/* units of 100MHz */
static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
@@ -91,8 +192,34 @@ static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
}
return 0;
default:
- BUG();
+ MISSING_CASE(pipe);
+ return 0;
+ }
+}
+
+void intel_fdi_pll_freq_update(struct drm_i915_private *i915)
+{
+ if (IS_IRONLAKE(i915)) {
+ u32 fdi_pll_clk =
+ intel_de_read(i915, FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
+
+ i915->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
+ } else if (IS_SANDYBRIDGE(i915) || IS_IVYBRIDGE(i915)) {
+ i915->fdi_pll_freq = 270000;
+ } else {
+ return;
}
+
+ drm_dbg(&i915->drm, "FDI PLL freq=%d\n", i915->fdi_pll_freq);
+}
+
+int intel_fdi_link_freq(struct drm_i915_private *i915,
+ const struct intel_crtc_state *pipe_config)
+{
+ if (HAS_DDI(i915))
+ return pipe_config->port_clock; /* SPLL */
+ else
+ return i915->fdi_pll_freq;
}
int ilk_fdi_compute_config(struct intel_crtc *crtc,
@@ -140,11 +267,60 @@ retry:
}
if (needs_recompute)
- return I915_DISPLAY_CONFIG_RETRY;
+ return -EAGAIN;
return ret;
}
+static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
+{
+ u32 temp;
+
+ temp = intel_de_read(dev_priv, SOUTH_CHICKEN1);
+ if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
+ return;
+
+ drm_WARN_ON(&dev_priv->drm,
+ intel_de_read(dev_priv, FDI_RX_CTL(PIPE_B)) &
+ FDI_RX_ENABLE);
+ drm_WARN_ON(&dev_priv->drm,
+ intel_de_read(dev_priv, FDI_RX_CTL(PIPE_C)) &
+ FDI_RX_ENABLE);
+
+ temp &= ~FDI_BC_BIFURCATION_SELECT;
+ if (enable)
+ temp |= FDI_BC_BIFURCATION_SELECT;
+
+ drm_dbg_kms(&dev_priv->drm, "%sabling fdi C rx\n",
+ enable ? "en" : "dis");
+ intel_de_write(dev_priv, SOUTH_CHICKEN1, temp);
+ intel_de_posting_read(dev_priv, SOUTH_CHICKEN1);
+}
+
+static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+ switch (crtc->pipe) {
+ case PIPE_A:
+ break;
+ case PIPE_B:
+ if (crtc_state->fdi_lanes > 2)
+ cpt_set_fdi_bc_bifurcation(dev_priv, false);
+ else
+ cpt_set_fdi_bc_bifurcation(dev_priv, true);
+
+ break;
+ case PIPE_C:
+ cpt_set_fdi_bc_bifurcation(dev_priv, true);
+
+ break;
+ default:
+ MISSING_CASE(crtc->pipe);
+ }
+}
+
void intel_fdi_normal_train(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
@@ -196,8 +372,15 @@ static void ilk_fdi_link_train(struct intel_crtc *crtc,
i915_reg_t reg;
u32 temp, tries;
+ /*
+ * Write the TU size bits before fdi link training, so that error
+ * detection works.
+ */
+ intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
+ intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
+
/* FDI needs bits from pipe first */
- assert_pipe_enabled(dev_priv, crtc_state->cpu_transcoder);
+ assert_transcoder_enabled(dev_priv, crtc_state->cpu_transcoder);
/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
for train result */
@@ -299,6 +482,13 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
i915_reg_t reg;
u32 temp, i, retry;
+ /*
+ * Write the TU size bits before fdi link training, so that error
+ * detection works.
+ */
+ intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
+ intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
+
/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
for train result */
reg = FDI_RX_IMR(pipe);
@@ -436,6 +626,15 @@ static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
i915_reg_t reg;
u32 temp, i, j;
+ ivb_update_fdi_bc_bifurcation(crtc_state);
+
+ /*
+ * Write the TU size bits before fdi link training, so that error
+ * detection works.
+ */
+ intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
+ intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
+
/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
for train result */
reg = FDI_RX_IMR(pipe);
@@ -807,15 +1006,125 @@ void ilk_fdi_disable(struct intel_crtc *crtc)
udelay(100);
}
+static void lpt_fdi_reset_mphy(struct drm_i915_private *dev_priv)
+{
+ u32 tmp;
+
+ tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
+ tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
+ intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
+
+ if (wait_for_us(intel_de_read(dev_priv, SOUTH_CHICKEN2) &
+ FDI_MPHY_IOSFSB_RESET_STATUS, 100))
+ drm_err(&dev_priv->drm, "FDI mPHY reset assert timeout\n");
+
+ tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
+ tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
+ intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
+
+ if (wait_for_us((intel_de_read(dev_priv, SOUTH_CHICKEN2) &
+ FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
+ drm_err(&dev_priv->drm, "FDI mPHY reset de-assert timeout\n");
+}
+
+/* WaMPhyProgramming:hsw */
+void lpt_fdi_program_mphy(struct drm_i915_private *dev_priv)
+{
+ u32 tmp;
+
+ lpt_fdi_reset_mphy(dev_priv);
+
+ tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
+ tmp &= ~(0xFF << 24);
+ tmp |= (0x12 << 24);
+ intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
+ tmp |= (1 << 11);
+ intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
+ tmp |= (1 << 11);
+ intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
+ tmp |= (1 << 24) | (1 << 21) | (1 << 18);
+ intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
+ tmp |= (1 << 24) | (1 << 21) | (1 << 18);
+ intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
+ tmp &= ~(7 << 13);
+ tmp |= (5 << 13);
+ intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
+ tmp &= ~(7 << 13);
+ tmp |= (5 << 13);
+ intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
+ tmp &= ~0xFF;
+ tmp |= 0x1C;
+ intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
+ tmp &= ~0xFF;
+ tmp |= 0x1C;
+ intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
+ tmp &= ~(0xFF << 16);
+ tmp |= (0x1C << 16);
+ intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
+ tmp &= ~(0xFF << 16);
+ tmp |= (0x1C << 16);
+ intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
+ tmp |= (1 << 27);
+ intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
+ tmp |= (1 << 27);
+ intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
+ tmp &= ~(0xF << 28);
+ tmp |= (4 << 28);
+ intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
+
+ tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
+ tmp &= ~(0xF << 28);
+ tmp |= (4 << 28);
+ intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
+}
+
+static const struct intel_fdi_funcs ilk_funcs = {
+ .fdi_link_train = ilk_fdi_link_train,
+};
+
+static const struct intel_fdi_funcs gen6_funcs = {
+ .fdi_link_train = gen6_fdi_link_train,
+};
+
+static const struct intel_fdi_funcs ivb_funcs = {
+ .fdi_link_train = ivb_manual_fdi_link_train,
+};
+
void
intel_fdi_init_hook(struct drm_i915_private *dev_priv)
{
if (IS_IRONLAKE(dev_priv)) {
- dev_priv->display.fdi_link_train = ilk_fdi_link_train;
+ dev_priv->fdi_funcs = &ilk_funcs;
} else if (IS_SANDYBRIDGE(dev_priv)) {
- dev_priv->display.fdi_link_train = gen6_fdi_link_train;
+ dev_priv->fdi_funcs = &gen6_funcs;
} else if (IS_IVYBRIDGE(dev_priv)) {
/* FIXME: detect B0+ stepping and use auto training */
- dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
+ dev_priv->fdi_funcs = &ivb_funcs;
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_fdi.h b/drivers/gpu/drm/i915/display/intel_fdi.h
index af01d2c173a8..640d6585c137 100644
--- a/drivers/gpu/drm/i915/display/intel_fdi.h
+++ b/drivers/gpu/drm/i915/display/intel_fdi.h
@@ -6,12 +6,14 @@
#ifndef _INTEL_FDI_H_
#define _INTEL_FDI_H_
+enum pipe;
struct drm_i915_private;
struct intel_crtc;
struct intel_crtc_state;
struct intel_encoder;
-#define I915_DISPLAY_CONFIG_RETRY 1
+int intel_fdi_link_freq(struct drm_i915_private *i915,
+ const struct intel_crtc_state *pipe_config);
int ilk_fdi_compute_config(struct intel_crtc *intel_crtc,
struct intel_crtc_state *pipe_config);
void intel_fdi_normal_train(struct intel_crtc *crtc);
@@ -21,5 +23,18 @@ void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state);
void intel_fdi_init_hook(struct drm_i915_private *dev_priv);
void hsw_fdi_link_train(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
+void intel_fdi_pll_freq_update(struct drm_i915_private *i915);
+void lpt_fdi_program_mphy(struct drm_i915_private *i915);
+
+void intel_fdi_link_train(struct intel_crtc *crtc,
+ const struct intel_crtc_state *crtc_state);
+
+void assert_fdi_tx_enabled(struct drm_i915_private *i915, enum pipe pipe);
+void assert_fdi_tx_disabled(struct drm_i915_private *i915, enum pipe pipe);
+void assert_fdi_rx_enabled(struct drm_i915_private *i915, enum pipe pipe);
+void assert_fdi_rx_disabled(struct drm_i915_private *i915, enum pipe pipe);
+void assert_fdi_tx_pll_enabled(struct drm_i915_private *i915, enum pipe pipe);
+void assert_fdi_rx_pll_enabled(struct drm_i915_private *i915, enum pipe pipe);
+void assert_fdi_rx_pll_disabled(struct drm_i915_private *i915, enum pipe pipe);
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.c b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
index 8e75debcce1a..0492446cd04a 100644
--- a/drivers/gpu/drm/i915/display/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
@@ -62,6 +62,7 @@
#include "intel_display_types.h"
#include "intel_fbc.h"
#include "intel_frontbuffer.h"
+#include "intel_drrs.h"
#include "intel_psr.h"
/**
@@ -91,7 +92,7 @@ static void frontbuffer_flush(struct drm_i915_private *i915,
trace_intel_frontbuffer_flush(frontbuffer_bits, origin);
might_sleep();
- intel_edp_drrs_flush(i915, frontbuffer_bits);
+ intel_drrs_flush(i915, frontbuffer_bits);
intel_psr_flush(i915, frontbuffer_bits, origin);
intel_fbc_flush(i915, frontbuffer_bits, origin);
}
@@ -180,7 +181,7 @@ void __intel_fb_invalidate(struct intel_frontbuffer *front,
might_sleep();
intel_psr_invalidate(i915, frontbuffer_bits, origin);
- intel_edp_drrs_invalidate(i915, frontbuffer_bits);
+ intel_drrs_invalidate(i915, frontbuffer_bits);
intel_fbc_invalidate(i915, frontbuffer_bits, origin);
}
diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.h b/drivers/gpu/drm/i915/display/intel_frontbuffer.h
index 6d41f5394425..a88441edc8f9 100644
--- a/drivers/gpu/drm/i915/display/intel_frontbuffer.h
+++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.h
@@ -33,11 +33,11 @@
struct drm_i915_private;
enum fb_op_origin {
- ORIGIN_GTT,
- ORIGIN_CPU,
+ ORIGIN_CPU = 0,
ORIGIN_CS,
ORIGIN_FLIP,
ORIGIN_DIRTYFB,
+ ORIGIN_CURSOR_UPDATE,
};
struct intel_frontbuffer {
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index ebc2e32aec0b..4509fe7438e8 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -17,12 +17,12 @@
#include "i915_drv.h"
#include "i915_reg.h"
-#include "intel_display_power.h"
+#include "intel_connector.h"
#include "intel_de.h"
+#include "intel_display_power.h"
#include "intel_display_types.h"
#include "intel_hdcp.h"
-#include "intel_sideband.h"
-#include "intel_connector.h"
+#include "intel_pcode.h"
#define KEY_LOAD_TRIES 5
#define HDCP2_LC_RETRY_CNT 3
@@ -33,21 +33,6 @@ static int intel_conn_to_vcpi(struct intel_connector *connector)
return connector->port ? connector->port->vcpi.vcpi : 0;
}
-static bool
-intel_streams_type1_capable(struct intel_connector *connector)
-{
- const struct intel_hdcp_shim *shim = connector->hdcp.shim;
- bool capable = false;
-
- if (!shim)
- return capable;
-
- if (shim->streams_type1_capable)
- shim->streams_type1_capable(connector, &capable);
-
- return capable;
-}
-
/*
* intel_hdcp_required_content_stream selects the most highest common possible HDCP
* content_type for all streams in DP MST topology because security f/w doesn't
@@ -86,7 +71,7 @@ intel_hdcp_required_content_stream(struct intel_digital_port *dig_port)
if (conn_dig_port != dig_port)
continue;
- if (!enforce_type0 && !intel_streams_type1_capable(connector))
+ if (!enforce_type0 && !dig_port->hdcp_mst_type1_capable)
enforce_type0 = true;
data->streams[data->k].stream_id = intel_conn_to_vcpi(connector);
@@ -112,6 +97,25 @@ intel_hdcp_required_content_stream(struct intel_digital_port *dig_port)
return 0;
}
+static int intel_hdcp_prepare_streams(struct intel_connector *connector)
+{
+ struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
+ struct hdcp_port_data *data = &dig_port->hdcp_port_data;
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ int ret;
+
+ if (!intel_encoder_is_mst(intel_attached_encoder(connector))) {
+ data->k = 1;
+ data->streams[0].stream_type = hdcp->content_type;
+ } else {
+ ret = intel_hdcp_required_content_stream(dig_port);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
static
bool intel_hdcp_is_ksv_valid(u8 *ksv)
{
@@ -1632,6 +1636,14 @@ int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
return -EINVAL;
}
+ /*
+ * MST topology is not Type 1 capable if it contains a downstream
+ * device that is only HDCP 1.x or Legacy HDCP 2.0/2.1 compliant.
+ */
+ dig_port->hdcp_mst_type1_capable =
+ !HDCP_2_2_HDCP1_DEVICE_CONNECTED(rx_info[1]) &&
+ !HDCP_2_2_HDCP_2_0_REP_CONNECTED(rx_info[1]);
+
/* Converting and Storing the seq_num_v to local variable as DWORD */
seq_num_v =
drm_hdcp_be24_to_cpu((const u8 *)msgs.recvid_list.seq_num_v);
@@ -1876,6 +1888,14 @@ static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
for (i = 0; i < tries && !dig_port->hdcp_auth_status; i++) {
ret = hdcp2_authenticate_sink(connector);
if (!ret) {
+ ret = intel_hdcp_prepare_streams(connector);
+ if (ret) {
+ drm_dbg_kms(&i915->drm,
+ "Prepare streams failed.(%d)\n",
+ ret);
+ break;
+ }
+
ret = hdcp2_propagate_stream_management_info(connector);
if (ret) {
drm_dbg_kms(&i915->drm,
@@ -1921,9 +1941,7 @@ static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
static int _intel_hdcp2_enable(struct intel_connector *connector)
{
- struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
- struct hdcp_port_data *data = &dig_port->hdcp_port_data;
struct intel_hdcp *hdcp = &connector->hdcp;
int ret;
@@ -1931,16 +1949,6 @@ static int _intel_hdcp2_enable(struct intel_connector *connector)
connector->base.name, connector->base.base.id,
hdcp->content_type);
- /* Stream which requires encryption */
- if (!intel_encoder_is_mst(intel_attached_encoder(connector))) {
- data->k = 1;
- data->streams[0].stream_type = hdcp->content_type;
- } else {
- ret = intel_hdcp_required_content_stream(dig_port);
- if (ret)
- return ret;
- }
-
ret = hdcp2_authenticate_and_encrypt(connector);
if (ret) {
drm_dbg_kms(&i915->drm, "HDCP2 Type%d Enabling Failed. (%d)\n",
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index b04685bb6439..d2e61f6c6e08 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -53,21 +53,20 @@
#include "intel_panel.h"
#include "intel_snps_phy.h"
-static struct drm_device *intel_hdmi_to_dev(struct intel_hdmi *intel_hdmi)
+static struct drm_i915_private *intel_hdmi_to_i915(struct intel_hdmi *intel_hdmi)
{
- return hdmi_to_dig_port(intel_hdmi)->base.base.dev;
+ return to_i915(hdmi_to_dig_port(intel_hdmi)->base.base.dev);
}
static void
assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
{
- struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi);
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = intel_hdmi_to_i915(intel_hdmi);
u32 enabled_bits;
enabled_bits = HAS_DDI(dev_priv) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE;
- drm_WARN(dev,
+ drm_WARN(&dev_priv->drm,
intel_de_read(dev_priv, intel_hdmi->hdmi_reg) & enabled_bits,
"HDMI port enabled, expecting disabled\n");
}
@@ -1246,7 +1245,7 @@ static void hsw_set_infoframes(struct intel_encoder *encoder,
void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable)
{
- struct drm_i915_private *dev_priv = to_i915(intel_hdmi_to_dev(hdmi));
+ struct drm_i915_private *dev_priv = intel_hdmi_to_i915(hdmi);
struct i2c_adapter *adapter =
intel_gmbus_get_adapter(dev_priv, hdmi->ddc_bus);
@@ -1703,7 +1702,7 @@ int intel_hdmi_hdcp2_read_msg(struct intel_digital_port *dig_port,
drm_dbg_kms(&i915->drm,
"msg_sz(%zd) is more than exp size(%zu)\n",
ret, size);
- return -1;
+ return -EINVAL;
}
offset = HDCP_2_2_HDMI_REG_RD_MSG_OFFSET;
@@ -1830,7 +1829,7 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi,
int clock, bool respect_downstream_limits,
bool has_hdmi_sink)
{
- struct drm_i915_private *dev_priv = to_i915(intel_hdmi_to_dev(hdmi));
+ struct drm_i915_private *dev_priv = intel_hdmi_to_i915(hdmi);
if (clock < 25000)
return MODE_CLOCK_LOW;
@@ -1946,8 +1945,7 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct intel_hdmi *hdmi = intel_attached_hdmi(to_intel_connector(connector));
- struct drm_device *dev = intel_hdmi_to_dev(hdmi);
- struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_i915_private *dev_priv = intel_hdmi_to_i915(hdmi);
enum drm_mode_status status;
int clock = mode->clock;
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
@@ -2210,7 +2208,7 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
return ret;
if (pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) {
- ret = intel_pch_panel_fitting(pipe_config, conn_state);
+ ret = intel_panel_fitting(pipe_config, conn_state);
if (ret)
return ret;
}
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c
index 47c85ac97c87..955f6d07b0e1 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.c
@@ -215,8 +215,8 @@ intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
static void intel_hpd_irq_setup(struct drm_i915_private *i915)
{
- if (i915->display_irqs_enabled && i915->display.hpd_irq_setup)
- i915->display.hpd_irq_setup(i915);
+ if (i915->display_irqs_enabled && i915->hotplug_funcs)
+ i915->hotplug_funcs->hpd_irq_setup(i915);
}
static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
index e0381b0fce91..9fced37bed70 100644
--- a/drivers/gpu/drm/i915/display/intel_lvds.c
+++ b/drivers/gpu/drm/i915/display/intel_lvds.c
@@ -40,9 +40,12 @@
#include "i915_drv.h"
#include "intel_atomic.h"
+#include "intel_backlight.h"
#include "intel_connector.h"
#include "intel_de.h"
#include "intel_display_types.h"
+#include "intel_dpll.h"
+#include "intel_fdi.h"
#include "intel_gmbus.h"
#include "intel_lvds.h"
#include "intel_panel.h"
@@ -323,7 +326,7 @@ static void intel_enable_lvds(struct intel_atomic_state *state,
drm_err(&dev_priv->drm,
"timed out waiting for panel to power on\n");
- intel_panel_enable_backlight(pipe_config, conn_state);
+ intel_backlight_enable(pipe_config, conn_state);
}
static void intel_disable_lvds(struct intel_atomic_state *state,
@@ -351,7 +354,7 @@ static void gmch_disable_lvds(struct intel_atomic_state *state,
const struct drm_connector_state *old_conn_state)
{
- intel_panel_disable_backlight(old_conn_state);
+ intel_backlight_disable(old_conn_state);
intel_disable_lvds(state, encoder, old_crtc_state, old_conn_state);
}
@@ -361,7 +364,7 @@ static void pch_disable_lvds(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- intel_panel_disable_backlight(old_conn_state);
+ intel_backlight_disable(old_conn_state);
}
static void pch_post_disable_lvds(struct intel_atomic_state *state,
@@ -388,13 +391,15 @@ intel_lvds_mode_valid(struct drm_connector *connector,
struct intel_connector *intel_connector = to_intel_connector(connector);
struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
int max_pixclk = to_i915(connector->dev)->max_dotclk_freq;
+ enum drm_mode_status status;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
- if (mode->hdisplay > fixed_mode->hdisplay)
- return MODE_PANEL;
- if (mode->vdisplay > fixed_mode->vdisplay)
- return MODE_PANEL;
+
+ status = intel_panel_mode_valid(intel_connector, mode);
+ if (status != MODE_OK)
+ return status;
+
if (fixed_mode->clock > max_pixclk)
return MODE_CLOCK_HIGH;
@@ -441,8 +446,9 @@ static int intel_lvds_compute_config(struct intel_encoder *intel_encoder,
* with the panel scaling set up to source from the H/VDisplay
* of the original mode.
*/
- intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
- adjusted_mode);
+ ret = intel_panel_compute_config(intel_connector, adjusted_mode);
+ if (ret)
+ return ret;
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return -EINVAL;
@@ -450,10 +456,7 @@ static int intel_lvds_compute_config(struct intel_encoder *intel_encoder,
if (HAS_PCH_SPLIT(dev_priv))
pipe_config->has_pch_encoder = true;
- if (HAS_GMCH(dev_priv))
- ret = intel_gmch_panel_fitting(pipe_config, conn_state);
- else
- ret = intel_pch_panel_fitting(pipe_config, conn_state);
+ ret = intel_panel_fitting(pipe_config, conn_state);
if (ret)
return ret;
@@ -906,7 +909,7 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
}
intel_encoder->get_hw_state = intel_lvds_get_hw_state;
intel_encoder->get_config = intel_lvds_get_config;
- intel_encoder->update_pipe = intel_panel_update_backlight;
+ intel_encoder->update_pipe = intel_backlight_update;
intel_encoder->shutdown = intel_lvds_shutdown;
intel_connector->get_hw_state = intel_connector_get_hw_state;
@@ -999,7 +1002,7 @@ out:
mutex_unlock(&dev->mode_config.mutex);
intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
- intel_panel_setup_backlight(connector, INVALID_PIPE);
+ intel_backlight_setup(intel_connector, INVALID_PIPE);
lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder);
drm_dbg_kms(&dev_priv->drm, "detected %s-link lvds configuration\n",
diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c
index 3855fba70980..0065111593a6 100644
--- a/drivers/gpu/drm/i915/display/intel_opregion.c
+++ b/drivers/gpu/drm/i915/display/intel_opregion.c
@@ -30,10 +30,9 @@
#include <linux/firmware.h>
#include <acpi/video.h>
-#include "display/intel_panel.h"
-
#include "i915_drv.h"
#include "intel_acpi.h"
+#include "intel_backlight.h"
#include "intel_display_types.h"
#include "intel_opregion.h"
@@ -450,7 +449,7 @@ static u32 asle_set_backlight(struct drm_i915_private *dev_priv, u32 bclp)
bclp);
drm_connector_list_iter_begin(dev, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter)
- intel_panel_set_backlight_acpi(connector->base.state, bclp, 255);
+ intel_backlight_set_acpi(connector->base.state, bclp, 255);
drm_connector_list_iter_end(&conn_iter);
asle->cblv = DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID;
diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c
index 7d7a60b4d2de..a0c8e43db5eb 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.c
+++ b/drivers/gpu/drm/i915/display/intel_panel.c
@@ -28,26 +28,51 @@
* Chris Wilson <chris@chris-wilson.co.uk>
*/
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
#include <linux/kernel.h>
-#include <linux/moduleparam.h>
#include <linux/pwm.h>
+#include "intel_backlight.h"
#include "intel_connector.h"
#include "intel_de.h"
#include "intel_display_types.h"
-#include "intel_dp_aux_backlight.h"
-#include "intel_dsi_dcs_backlight.h"
#include "intel_panel.h"
-void
-intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
- struct drm_display_mode *adjusted_mode)
+bool intel_panel_use_ssc(struct drm_i915_private *i915)
+{
+ if (i915->params.panel_use_ssc >= 0)
+ return i915->params.panel_use_ssc != 0;
+ return i915->vbt.lvds_use_ssc
+ && !(i915->quirks & QUIRK_LVDS_SSC_DISABLE);
+}
+
+int intel_panel_compute_config(struct intel_connector *connector,
+ struct drm_display_mode *adjusted_mode)
{
+ const struct drm_display_mode *fixed_mode = connector->panel.fixed_mode;
+
+ if (!fixed_mode)
+ return 0;
+
+ /*
+ * We don't want to lie too much to the user about the refresh
+ * rate they're going to get. But we have to allow a bit of latitude
+ * for Xorg since it likes to automagically cook up modes with slightly
+ * off refresh rates.
+ */
+ if (abs(drm_mode_vrefresh(adjusted_mode) - drm_mode_vrefresh(fixed_mode)) > 1) {
+ drm_dbg_kms(connector->base.dev,
+ "[CONNECTOR:%d:%s] Requested mode vrefresh (%d Hz) does not match fixed mode vrefresh (%d Hz)\n",
+ connector->base.base.id, connector->base.name,
+ drm_mode_vrefresh(adjusted_mode), drm_mode_vrefresh(fixed_mode));
+
+ return -EINVAL;
+ }
+
drm_mode_copy(adjusted_mode, fixed_mode);
drm_mode_set_crtcinfo(adjusted_mode, 0);
+
+ return 0;
}
static bool is_downclock_mode(const struct drm_display_mode *downclock_mode,
@@ -175,8 +200,8 @@ intel_panel_vbt_fixed_mode(struct intel_connector *connector)
}
/* adjusted_mode has been preset to be the panel's fixed mode */
-int intel_pch_panel_fitting(struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
+static int pch_panel_fitting(struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
@@ -381,8 +406,8 @@ static void i9xx_scale_aspect(struct intel_crtc_state *crtc_state,
}
}
-int intel_gmch_panel_fitting(struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
+static int gmch_panel_fitting(struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -456,1783 +481,55 @@ out:
return 0;
}
-/**
- * scale - scale values from one range to another
- * @source_val: value in range [@source_min..@source_max]
- * @source_min: minimum legal value for @source_val
- * @source_max: maximum legal value for @source_val
- * @target_min: corresponding target value for @source_min
- * @target_max: corresponding target value for @source_max
- *
- * Return @source_val in range [@source_min..@source_max] scaled to range
- * [@target_min..@target_max].
- */
-static u32 scale(u32 source_val,
- u32 source_min, u32 source_max,
- u32 target_min, u32 target_max)
-{
- u64 target_val;
-
- WARN_ON(source_min > source_max);
- WARN_ON(target_min > target_max);
-
- /* defensive */
- source_val = clamp(source_val, source_min, source_max);
-
- /* avoid overflows */
- target_val = mul_u32_u32(source_val - source_min,
- target_max - target_min);
- target_val = DIV_ROUND_CLOSEST_ULL(target_val, source_max - source_min);
- target_val += target_min;
-
- return target_val;
-}
-
-/* Scale user_level in range [0..user_max] to [0..hw_max], clamping the result
- * to [hw_min..hw_max]. */
-static u32 clamp_user_to_hw(struct intel_connector *connector,
- u32 user_level, u32 user_max)
-{
- struct intel_panel *panel = &connector->panel;
- u32 hw_level;
-
- hw_level = scale(user_level, 0, user_max, 0, panel->backlight.max);
- hw_level = clamp(hw_level, panel->backlight.min, panel->backlight.max);
-
- return hw_level;
-}
-
-/* Scale hw_level in range [hw_min..hw_max] to [0..user_max]. */
-static u32 scale_hw_to_user(struct intel_connector *connector,
- u32 hw_level, u32 user_max)
-{
- struct intel_panel *panel = &connector->panel;
-
- return scale(hw_level, panel->backlight.min, panel->backlight.max,
- 0, user_max);
-}
-
-u32 intel_panel_invert_pwm_level(struct intel_connector *connector, u32 val)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_panel *panel = &connector->panel;
-
- drm_WARN_ON(&dev_priv->drm, panel->backlight.pwm_level_max == 0);
-
- if (dev_priv->params.invert_brightness < 0)
- return val;
-
- if (dev_priv->params.invert_brightness > 0 ||
- dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) {
- return panel->backlight.pwm_level_max - val + panel->backlight.pwm_level_min;
- }
-
- return val;
-}
-
-void intel_panel_set_pwm_level(const struct drm_connector_state *conn_state, u32 val)
-{
- struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
- struct intel_panel *panel = &connector->panel;
-
- drm_dbg_kms(&i915->drm, "set backlight PWM = %d\n", val);
- panel->backlight.pwm_funcs->set(conn_state, val);
-}
-
-u32 intel_panel_backlight_level_to_pwm(struct intel_connector *connector, u32 val)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_panel *panel = &connector->panel;
-
- drm_WARN_ON_ONCE(&dev_priv->drm,
- panel->backlight.max == 0 || panel->backlight.pwm_level_max == 0);
-
- val = scale(val, panel->backlight.min, panel->backlight.max,
- panel->backlight.pwm_level_min, panel->backlight.pwm_level_max);
-
- return intel_panel_invert_pwm_level(connector, val);
-}
-
-u32 intel_panel_backlight_level_from_pwm(struct intel_connector *connector, u32 val)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_panel *panel = &connector->panel;
-
- drm_WARN_ON_ONCE(&dev_priv->drm,
- panel->backlight.max == 0 || panel->backlight.pwm_level_max == 0);
-
- if (dev_priv->params.invert_brightness > 0 ||
- (dev_priv->params.invert_brightness == 0 && dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS))
- val = panel->backlight.pwm_level_max - (val - panel->backlight.pwm_level_min);
-
- return scale(val, panel->backlight.pwm_level_min, panel->backlight.pwm_level_max,
- panel->backlight.min, panel->backlight.max);
-}
-
-static u32 lpt_get_backlight(struct intel_connector *connector, enum pipe unused)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
-
- return intel_de_read(dev_priv, BLC_PWM_PCH_CTL2) & BACKLIGHT_DUTY_CYCLE_MASK;
-}
-
-static u32 pch_get_backlight(struct intel_connector *connector, enum pipe unused)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
-
- return intel_de_read(dev_priv, BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
-}
-
-static u32 i9xx_get_backlight(struct intel_connector *connector, enum pipe unused)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_panel *panel = &connector->panel;
- u32 val;
-
- val = intel_de_read(dev_priv, BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
- if (DISPLAY_VER(dev_priv) < 4)
- val >>= 1;
-
- if (panel->backlight.combination_mode) {
- u8 lbpc;
-
- pci_read_config_byte(to_pci_dev(dev_priv->drm.dev), LBPC, &lbpc);
- val *= lbpc;
- }
-
- return val;
-}
-
-static u32 vlv_get_backlight(struct intel_connector *connector, enum pipe pipe)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
-
- if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B))
- return 0;
-
- return intel_de_read(dev_priv, VLV_BLC_PWM_CTL(pipe)) & BACKLIGHT_DUTY_CYCLE_MASK;
-}
-
-static u32 bxt_get_backlight(struct intel_connector *connector, enum pipe unused)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_panel *panel = &connector->panel;
-
- return intel_de_read(dev_priv,
- BXT_BLC_PWM_DUTY(panel->backlight.controller));
-}
-
-static u32 ext_pwm_get_backlight(struct intel_connector *connector, enum pipe unused)
-{
- struct intel_panel *panel = &connector->panel;
- struct pwm_state state;
-
- pwm_get_state(panel->backlight.pwm, &state);
- return pwm_get_relative_duty_cycle(&state, 100);
-}
-
-static void lpt_set_backlight(const struct drm_connector_state *conn_state, u32 level)
-{
- struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
-
- u32 val = intel_de_read(dev_priv, BLC_PWM_PCH_CTL2) & ~BACKLIGHT_DUTY_CYCLE_MASK;
- intel_de_write(dev_priv, BLC_PWM_PCH_CTL2, val | level);
-}
-
-static void pch_set_backlight(const struct drm_connector_state *conn_state, u32 level)
-{
- struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- u32 tmp;
-
- tmp = intel_de_read(dev_priv, BLC_PWM_CPU_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
- intel_de_write(dev_priv, BLC_PWM_CPU_CTL, tmp | level);
-}
-
-static void i9xx_set_backlight(const struct drm_connector_state *conn_state, u32 level)
-{
- struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_panel *panel = &connector->panel;
- u32 tmp, mask;
-
- drm_WARN_ON(&dev_priv->drm, panel->backlight.pwm_level_max == 0);
-
- if (panel->backlight.combination_mode) {
- u8 lbpc;
-
- lbpc = level * 0xfe / panel->backlight.pwm_level_max + 1;
- level /= lbpc;
- pci_write_config_byte(to_pci_dev(dev_priv->drm.dev), LBPC, lbpc);
- }
-
- if (DISPLAY_VER(dev_priv) == 4) {
- mask = BACKLIGHT_DUTY_CYCLE_MASK;
- } else {
- level <<= 1;
- mask = BACKLIGHT_DUTY_CYCLE_MASK_PNV;
- }
-
- tmp = intel_de_read(dev_priv, BLC_PWM_CTL) & ~mask;
- intel_de_write(dev_priv, BLC_PWM_CTL, tmp | level);
-}
-
-static void vlv_set_backlight(const struct drm_connector_state *conn_state, u32 level)
-{
- struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- enum pipe pipe = to_intel_crtc(conn_state->crtc)->pipe;
- u32 tmp;
-
- tmp = intel_de_read(dev_priv, VLV_BLC_PWM_CTL(pipe)) & ~BACKLIGHT_DUTY_CYCLE_MASK;
- intel_de_write(dev_priv, VLV_BLC_PWM_CTL(pipe), tmp | level);
-}
-
-static void bxt_set_backlight(const struct drm_connector_state *conn_state, u32 level)
-{
- struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_panel *panel = &connector->panel;
-
- intel_de_write(dev_priv,
- BXT_BLC_PWM_DUTY(panel->backlight.controller), level);
-}
-
-static void ext_pwm_set_backlight(const struct drm_connector_state *conn_state, u32 level)
-{
- struct intel_panel *panel = &to_intel_connector(conn_state->connector)->panel;
-
- pwm_set_relative_duty_cycle(&panel->backlight.pwm_state, level, 100);
- pwm_apply_state(panel->backlight.pwm, &panel->backlight.pwm_state);
-}
-
-static void
-intel_panel_actually_set_backlight(const struct drm_connector_state *conn_state, u32 level)
-{
- struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
- struct intel_panel *panel = &connector->panel;
-
- drm_dbg_kms(&i915->drm, "set backlight level = %d\n", level);
-
- panel->backlight.funcs->set(conn_state, level);
-}
-
-/* set backlight brightness to level in range [0..max], assuming hw min is
- * respected.
- */
-void intel_panel_set_backlight_acpi(const struct drm_connector_state *conn_state,
- u32 user_level, u32 user_max)
-{
- struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_panel *panel = &connector->panel;
- u32 hw_level;
-
- /*
- * Lack of crtc may occur during driver init because
- * connection_mutex isn't held across the entire backlight
- * setup + modeset readout, and the BIOS can issue the
- * requests at any time.
- */
- if (!panel->backlight.present || !conn_state->crtc)
- return;
-
- mutex_lock(&dev_priv->backlight_lock);
-
- drm_WARN_ON(&dev_priv->drm, panel->backlight.max == 0);
-
- hw_level = clamp_user_to_hw(connector, user_level, user_max);
- panel->backlight.level = hw_level;
-
- if (panel->backlight.device)
- panel->backlight.device->props.brightness =
- scale_hw_to_user(connector,
- panel->backlight.level,
- panel->backlight.device->props.max_brightness);
-
- if (panel->backlight.enabled)
- intel_panel_actually_set_backlight(conn_state, hw_level);
-
- mutex_unlock(&dev_priv->backlight_lock);
-}
-
-static void lpt_disable_backlight(const struct drm_connector_state *old_conn_state, u32 level)
-{
- struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- u32 tmp;
-
- intel_panel_set_pwm_level(old_conn_state, level);
-
- /*
- * Although we don't support or enable CPU PWM with LPT/SPT based
- * systems, it may have been enabled prior to loading the
- * driver. Disable to avoid warnings on LCPLL disable.
- *
- * This needs rework if we need to add support for CPU PWM on PCH split
- * platforms.
- */
- tmp = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2);
- if (tmp & BLM_PWM_ENABLE) {
- drm_dbg_kms(&dev_priv->drm,
- "cpu backlight was enabled, disabling\n");
- intel_de_write(dev_priv, BLC_PWM_CPU_CTL2,
- tmp & ~BLM_PWM_ENABLE);
- }
-
- tmp = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1);
- intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE);
-}
-
-static void pch_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
-{
- struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- u32 tmp;
-
- intel_panel_set_pwm_level(old_conn_state, val);
-
- tmp = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2);
- intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, tmp & ~BLM_PWM_ENABLE);
-
- tmp = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1);
- intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE);
-}
-
-static void i9xx_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
-{
- intel_panel_set_pwm_level(old_conn_state, val);
-}
-
-static void i965_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
-{
- struct drm_i915_private *dev_priv = to_i915(old_conn_state->connector->dev);
- u32 tmp;
-
- intel_panel_set_pwm_level(old_conn_state, val);
-
- tmp = intel_de_read(dev_priv, BLC_PWM_CTL2);
- intel_de_write(dev_priv, BLC_PWM_CTL2, tmp & ~BLM_PWM_ENABLE);
-}
-
-static void vlv_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
-{
- struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- enum pipe pipe = to_intel_crtc(old_conn_state->crtc)->pipe;
- u32 tmp;
-
- intel_panel_set_pwm_level(old_conn_state, val);
-
- tmp = intel_de_read(dev_priv, VLV_BLC_PWM_CTL2(pipe));
- intel_de_write(dev_priv, VLV_BLC_PWM_CTL2(pipe),
- tmp & ~BLM_PWM_ENABLE);
-}
-
-static void bxt_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
-{
- struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_panel *panel = &connector->panel;
- u32 tmp;
-
- intel_panel_set_pwm_level(old_conn_state, val);
-
- tmp = intel_de_read(dev_priv,
- BXT_BLC_PWM_CTL(panel->backlight.controller));
- intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller),
- tmp & ~BXT_BLC_PWM_ENABLE);
-
- if (panel->backlight.controller == 1) {
- val = intel_de_read(dev_priv, UTIL_PIN_CTL);
- val &= ~UTIL_PIN_ENABLE;
- intel_de_write(dev_priv, UTIL_PIN_CTL, val);
- }
-}
-
-static void cnp_disable_backlight(const struct drm_connector_state *old_conn_state, u32 val)
-{
- struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_panel *panel = &connector->panel;
- u32 tmp;
-
- intel_panel_set_pwm_level(old_conn_state, val);
-
- tmp = intel_de_read(dev_priv,
- BXT_BLC_PWM_CTL(panel->backlight.controller));
- intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller),
- tmp & ~BXT_BLC_PWM_ENABLE);
-}
-
-static void ext_pwm_disable_backlight(const struct drm_connector_state *old_conn_state, u32 level)
-{
- struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
- struct intel_panel *panel = &connector->panel;
-
- panel->backlight.pwm_state.enabled = false;
- pwm_apply_state(panel->backlight.pwm, &panel->backlight.pwm_state);
-}
-
-void intel_panel_disable_backlight(const struct drm_connector_state *old_conn_state)
-{
- struct intel_connector *connector = to_intel_connector(old_conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_panel *panel = &connector->panel;
-
- if (!panel->backlight.present)
- return;
-
- /*
- * Do not disable backlight on the vga_switcheroo path. When switching
- * away from i915, the other client may depend on i915 to handle the
- * backlight. This will leave the backlight on unnecessarily when
- * another client is not activated.
- */
- if (dev_priv->drm.switch_power_state == DRM_SWITCH_POWER_CHANGING) {
- drm_dbg_kms(&dev_priv->drm,
- "Skipping backlight disable on vga switch\n");
- return;
- }
-
- mutex_lock(&dev_priv->backlight_lock);
-
- if (panel->backlight.device)
- panel->backlight.device->props.power = FB_BLANK_POWERDOWN;
- panel->backlight.enabled = false;
- panel->backlight.funcs->disable(old_conn_state, 0);
-
- mutex_unlock(&dev_priv->backlight_lock);
-}
-
-static void lpt_enable_backlight(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state, u32 level)
-{
- struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_panel *panel = &connector->panel;
- u32 pch_ctl1, pch_ctl2, schicken;
-
- pch_ctl1 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1);
- if (pch_ctl1 & BLM_PCH_PWM_ENABLE) {
- drm_dbg_kms(&dev_priv->drm, "pch backlight already enabled\n");
- pch_ctl1 &= ~BLM_PCH_PWM_ENABLE;
- intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, pch_ctl1);
- }
-
- if (HAS_PCH_LPT(dev_priv)) {
- schicken = intel_de_read(dev_priv, SOUTH_CHICKEN2);
- if (panel->backlight.alternate_pwm_increment)
- schicken |= LPT_PWM_GRANULARITY;
- else
- schicken &= ~LPT_PWM_GRANULARITY;
- intel_de_write(dev_priv, SOUTH_CHICKEN2, schicken);
- } else {
- schicken = intel_de_read(dev_priv, SOUTH_CHICKEN1);
- if (panel->backlight.alternate_pwm_increment)
- schicken |= SPT_PWM_GRANULARITY;
- else
- schicken &= ~SPT_PWM_GRANULARITY;
- intel_de_write(dev_priv, SOUTH_CHICKEN1, schicken);
- }
-
- pch_ctl2 = panel->backlight.pwm_level_max << 16;
- intel_de_write(dev_priv, BLC_PWM_PCH_CTL2, pch_ctl2);
-
- pch_ctl1 = 0;
- if (panel->backlight.active_low_pwm)
- pch_ctl1 |= BLM_PCH_POLARITY;
-
- /* After LPT, override is the default. */
- if (HAS_PCH_LPT(dev_priv))
- pch_ctl1 |= BLM_PCH_OVERRIDE_ENABLE;
-
- intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, pch_ctl1);
- intel_de_posting_read(dev_priv, BLC_PWM_PCH_CTL1);
- intel_de_write(dev_priv, BLC_PWM_PCH_CTL1,
- pch_ctl1 | BLM_PCH_PWM_ENABLE);
-
- /* This won't stick until the above enable. */
- intel_panel_set_pwm_level(conn_state, level);
-}
-
-static void pch_enable_backlight(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state, u32 level)
-{
- struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_panel *panel = &connector->panel;
- enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- u32 cpu_ctl2, pch_ctl1, pch_ctl2;
-
- cpu_ctl2 = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2);
- if (cpu_ctl2 & BLM_PWM_ENABLE) {
- drm_dbg_kms(&dev_priv->drm, "cpu backlight already enabled\n");
- cpu_ctl2 &= ~BLM_PWM_ENABLE;
- intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, cpu_ctl2);
- }
-
- pch_ctl1 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1);
- if (pch_ctl1 & BLM_PCH_PWM_ENABLE) {
- drm_dbg_kms(&dev_priv->drm, "pch backlight already enabled\n");
- pch_ctl1 &= ~BLM_PCH_PWM_ENABLE;
- intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, pch_ctl1);
- }
-
- if (cpu_transcoder == TRANSCODER_EDP)
- cpu_ctl2 = BLM_TRANSCODER_EDP;
- else
- cpu_ctl2 = BLM_PIPE(cpu_transcoder);
- intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, cpu_ctl2);
- intel_de_posting_read(dev_priv, BLC_PWM_CPU_CTL2);
- intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, cpu_ctl2 | BLM_PWM_ENABLE);
-
- /* This won't stick until the above enable. */
- intel_panel_set_pwm_level(conn_state, level);
-
- pch_ctl2 = panel->backlight.pwm_level_max << 16;
- intel_de_write(dev_priv, BLC_PWM_PCH_CTL2, pch_ctl2);
-
- pch_ctl1 = 0;
- if (panel->backlight.active_low_pwm)
- pch_ctl1 |= BLM_PCH_POLARITY;
-
- intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, pch_ctl1);
- intel_de_posting_read(dev_priv, BLC_PWM_PCH_CTL1);
- intel_de_write(dev_priv, BLC_PWM_PCH_CTL1,
- pch_ctl1 | BLM_PCH_PWM_ENABLE);
-}
-
-static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state, u32 level)
-{
- struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_panel *panel = &connector->panel;
- u32 ctl, freq;
-
- ctl = intel_de_read(dev_priv, BLC_PWM_CTL);
- if (ctl & BACKLIGHT_DUTY_CYCLE_MASK_PNV) {
- drm_dbg_kms(&dev_priv->drm, "backlight already enabled\n");
- intel_de_write(dev_priv, BLC_PWM_CTL, 0);
- }
-
- freq = panel->backlight.pwm_level_max;
- if (panel->backlight.combination_mode)
- freq /= 0xff;
-
- ctl = freq << 17;
- if (panel->backlight.combination_mode)
- ctl |= BLM_LEGACY_MODE;
- if (IS_PINEVIEW(dev_priv) && panel->backlight.active_low_pwm)
- ctl |= BLM_POLARITY_PNV;
-
- intel_de_write(dev_priv, BLC_PWM_CTL, ctl);
- intel_de_posting_read(dev_priv, BLC_PWM_CTL);
-
- /* XXX: combine this into above write? */
- intel_panel_set_pwm_level(conn_state, level);
-
- /*
- * Needed to enable backlight on some 855gm models. BLC_HIST_CTL is
- * 855gm only, but checking for gen2 is safe, as 855gm is the only gen2
- * that has backlight.
- */
- if (DISPLAY_VER(dev_priv) == 2)
- intel_de_write(dev_priv, BLC_HIST_CTL, BLM_HISTOGRAM_ENABLE);
-}
-
-static void i965_enable_backlight(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state, u32 level)
-{
- struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_panel *panel = &connector->panel;
- enum pipe pipe = to_intel_crtc(conn_state->crtc)->pipe;
- u32 ctl, ctl2, freq;
-
- ctl2 = intel_de_read(dev_priv, BLC_PWM_CTL2);
- if (ctl2 & BLM_PWM_ENABLE) {
- drm_dbg_kms(&dev_priv->drm, "backlight already enabled\n");
- ctl2 &= ~BLM_PWM_ENABLE;
- intel_de_write(dev_priv, BLC_PWM_CTL2, ctl2);
- }
-
- freq = panel->backlight.pwm_level_max;
- if (panel->backlight.combination_mode)
- freq /= 0xff;
-
- ctl = freq << 16;
- intel_de_write(dev_priv, BLC_PWM_CTL, ctl);
-
- ctl2 = BLM_PIPE(pipe);
- if (panel->backlight.combination_mode)
- ctl2 |= BLM_COMBINATION_MODE;
- if (panel->backlight.active_low_pwm)
- ctl2 |= BLM_POLARITY_I965;
- intel_de_write(dev_priv, BLC_PWM_CTL2, ctl2);
- intel_de_posting_read(dev_priv, BLC_PWM_CTL2);
- intel_de_write(dev_priv, BLC_PWM_CTL2, ctl2 | BLM_PWM_ENABLE);
-
- intel_panel_set_pwm_level(conn_state, level);
-}
-
-static void vlv_enable_backlight(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state, u32 level)
-{
- struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_panel *panel = &connector->panel;
- enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
- u32 ctl, ctl2;
-
- ctl2 = intel_de_read(dev_priv, VLV_BLC_PWM_CTL2(pipe));
- if (ctl2 & BLM_PWM_ENABLE) {
- drm_dbg_kms(&dev_priv->drm, "backlight already enabled\n");
- ctl2 &= ~BLM_PWM_ENABLE;
- intel_de_write(dev_priv, VLV_BLC_PWM_CTL2(pipe), ctl2);
- }
-
- ctl = panel->backlight.pwm_level_max << 16;
- intel_de_write(dev_priv, VLV_BLC_PWM_CTL(pipe), ctl);
-
- /* XXX: combine this into above write? */
- intel_panel_set_pwm_level(conn_state, level);
-
- ctl2 = 0;
- if (panel->backlight.active_low_pwm)
- ctl2 |= BLM_POLARITY_I965;
- intel_de_write(dev_priv, VLV_BLC_PWM_CTL2(pipe), ctl2);
- intel_de_posting_read(dev_priv, VLV_BLC_PWM_CTL2(pipe));
- intel_de_write(dev_priv, VLV_BLC_PWM_CTL2(pipe),
- ctl2 | BLM_PWM_ENABLE);
-}
-
-static void bxt_enable_backlight(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state, u32 level)
-{
- struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_panel *panel = &connector->panel;
- enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
- u32 pwm_ctl, val;
-
- /* Controller 1 uses the utility pin. */
- if (panel->backlight.controller == 1) {
- val = intel_de_read(dev_priv, UTIL_PIN_CTL);
- if (val & UTIL_PIN_ENABLE) {
- drm_dbg_kms(&dev_priv->drm,
- "util pin already enabled\n");
- val &= ~UTIL_PIN_ENABLE;
- intel_de_write(dev_priv, UTIL_PIN_CTL, val);
- }
-
- val = 0;
- if (panel->backlight.util_pin_active_low)
- val |= UTIL_PIN_POLARITY;
- intel_de_write(dev_priv, UTIL_PIN_CTL,
- val | UTIL_PIN_PIPE(pipe) | UTIL_PIN_MODE_PWM | UTIL_PIN_ENABLE);
- }
-
- pwm_ctl = intel_de_read(dev_priv,
- BXT_BLC_PWM_CTL(panel->backlight.controller));
- if (pwm_ctl & BXT_BLC_PWM_ENABLE) {
- drm_dbg_kms(&dev_priv->drm, "backlight already enabled\n");
- pwm_ctl &= ~BXT_BLC_PWM_ENABLE;
- intel_de_write(dev_priv,
- BXT_BLC_PWM_CTL(panel->backlight.controller),
- pwm_ctl);
- }
-
- intel_de_write(dev_priv,
- BXT_BLC_PWM_FREQ(panel->backlight.controller),
- panel->backlight.pwm_level_max);
-
- intel_panel_set_pwm_level(conn_state, level);
-
- pwm_ctl = 0;
- if (panel->backlight.active_low_pwm)
- pwm_ctl |= BXT_BLC_PWM_POLARITY;
-
- intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller),
- pwm_ctl);
- intel_de_posting_read(dev_priv,
- BXT_BLC_PWM_CTL(panel->backlight.controller));
- intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller),
- pwm_ctl | BXT_BLC_PWM_ENABLE);
-}
-
-static void cnp_enable_backlight(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state, u32 level)
-{
- struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_panel *panel = &connector->panel;
- u32 pwm_ctl;
-
- pwm_ctl = intel_de_read(dev_priv,
- BXT_BLC_PWM_CTL(panel->backlight.controller));
- if (pwm_ctl & BXT_BLC_PWM_ENABLE) {
- drm_dbg_kms(&dev_priv->drm, "backlight already enabled\n");
- pwm_ctl &= ~BXT_BLC_PWM_ENABLE;
- intel_de_write(dev_priv,
- BXT_BLC_PWM_CTL(panel->backlight.controller),
- pwm_ctl);
- }
-
- intel_de_write(dev_priv,
- BXT_BLC_PWM_FREQ(panel->backlight.controller),
- panel->backlight.pwm_level_max);
-
- intel_panel_set_pwm_level(conn_state, level);
-
- pwm_ctl = 0;
- if (panel->backlight.active_low_pwm)
- pwm_ctl |= BXT_BLC_PWM_POLARITY;
-
- intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller),
- pwm_ctl);
- intel_de_posting_read(dev_priv,
- BXT_BLC_PWM_CTL(panel->backlight.controller));
- intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller),
- pwm_ctl | BXT_BLC_PWM_ENABLE);
-}
-
-static void ext_pwm_enable_backlight(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state, u32 level)
-{
- struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct intel_panel *panel = &connector->panel;
-
- pwm_set_relative_duty_cycle(&panel->backlight.pwm_state, level, 100);
- panel->backlight.pwm_state.enabled = true;
- pwm_apply_state(panel->backlight.pwm, &panel->backlight.pwm_state);
-}
-
-static void __intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
-{
- struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct intel_panel *panel = &connector->panel;
-
- WARN_ON(panel->backlight.max == 0);
-
- if (panel->backlight.level <= panel->backlight.min) {
- panel->backlight.level = panel->backlight.max;
- if (panel->backlight.device)
- panel->backlight.device->props.brightness =
- scale_hw_to_user(connector,
- panel->backlight.level,
- panel->backlight.device->props.max_brightness);
- }
-
- panel->backlight.funcs->enable(crtc_state, conn_state, panel->backlight.level);
- panel->backlight.enabled = true;
- if (panel->backlight.device)
- panel->backlight.device->props.power = FB_BLANK_UNBLANK;
-}
-
-void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
-{
- struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_panel *panel = &connector->panel;
- enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
-
- if (!panel->backlight.present)
- return;
-
- drm_dbg_kms(&dev_priv->drm, "pipe %c\n", pipe_name(pipe));
-
- mutex_lock(&dev_priv->backlight_lock);
-
- __intel_panel_enable_backlight(crtc_state, conn_state);
-
- mutex_unlock(&dev_priv->backlight_lock);
-}
-
-#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
-static u32 intel_panel_get_backlight(struct intel_connector *connector)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_panel *panel = &connector->panel;
- u32 val = 0;
-
- mutex_lock(&dev_priv->backlight_lock);
-
- if (panel->backlight.enabled)
- val = panel->backlight.funcs->get(connector, intel_connector_get_pipe(connector));
-
- mutex_unlock(&dev_priv->backlight_lock);
-
- drm_dbg_kms(&dev_priv->drm, "get backlight PWM = %d\n", val);
- return val;
-}
-
-/* Scale user_level in range [0..user_max] to [hw_min..hw_max]. */
-static u32 scale_user_to_hw(struct intel_connector *connector,
- u32 user_level, u32 user_max)
-{
- struct intel_panel *panel = &connector->panel;
-
- return scale(user_level, 0, user_max,
- panel->backlight.min, panel->backlight.max);
-}
-
-/* set backlight brightness to level in range [0..max], scaling wrt hw min */
-static void intel_panel_set_backlight(const struct drm_connector_state *conn_state,
- u32 user_level, u32 user_max)
-{
- struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_panel *panel = &connector->panel;
- u32 hw_level;
-
- if (!panel->backlight.present)
- return;
-
- mutex_lock(&dev_priv->backlight_lock);
-
- drm_WARN_ON(&dev_priv->drm, panel->backlight.max == 0);
-
- hw_level = scale_user_to_hw(connector, user_level, user_max);
- panel->backlight.level = hw_level;
-
- if (panel->backlight.enabled)
- intel_panel_actually_set_backlight(conn_state, hw_level);
-
- mutex_unlock(&dev_priv->backlight_lock);
-}
-
-static int intel_backlight_device_update_status(struct backlight_device *bd)
-{
- struct intel_connector *connector = bl_get_data(bd);
- struct intel_panel *panel = &connector->panel;
- struct drm_device *dev = connector->base.dev;
-
- drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
- DRM_DEBUG_KMS("updating intel_backlight, brightness=%d/%d\n",
- bd->props.brightness, bd->props.max_brightness);
- intel_panel_set_backlight(connector->base.state, bd->props.brightness,
- bd->props.max_brightness);
-
- /*
- * Allow flipping bl_power as a sub-state of enabled. Sadly the
- * backlight class device does not make it easy to to differentiate
- * between callbacks for brightness and bl_power, so our backlight_power
- * callback needs to take this into account.
- */
- if (panel->backlight.enabled) {
- if (panel->backlight.power) {
- bool enable = bd->props.power == FB_BLANK_UNBLANK &&
- bd->props.brightness != 0;
- panel->backlight.power(connector, enable);
- }
- } else {
- bd->props.power = FB_BLANK_POWERDOWN;
- }
-
- drm_modeset_unlock(&dev->mode_config.connection_mutex);
- return 0;
-}
-
-static int intel_backlight_device_get_brightness(struct backlight_device *bd)
-{
- struct intel_connector *connector = bl_get_data(bd);
- struct drm_device *dev = connector->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- intel_wakeref_t wakeref;
- int ret = 0;
-
- with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
- u32 hw_level;
-
- drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
-
- hw_level = intel_panel_get_backlight(connector);
- ret = scale_hw_to_user(connector,
- hw_level, bd->props.max_brightness);
-
- drm_modeset_unlock(&dev->mode_config.connection_mutex);
- }
-
- return ret;
-}
-
-static const struct backlight_ops intel_backlight_device_ops = {
- .update_status = intel_backlight_device_update_status,
- .get_brightness = intel_backlight_device_get_brightness,
-};
-
-int intel_backlight_device_register(struct intel_connector *connector)
-{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
- struct intel_panel *panel = &connector->panel;
- struct backlight_properties props;
- struct backlight_device *bd;
- const char *name;
- int ret = 0;
-
- if (WARN_ON(panel->backlight.device))
- return -ENODEV;
-
- if (!panel->backlight.present)
- return 0;
-
- WARN_ON(panel->backlight.max == 0);
-
- memset(&props, 0, sizeof(props));
- props.type = BACKLIGHT_RAW;
-
- /*
- * Note: Everything should work even if the backlight device max
- * presented to the userspace is arbitrarily chosen.
- */
- props.max_brightness = panel->backlight.max;
- props.brightness = scale_hw_to_user(connector,
- panel->backlight.level,
- props.max_brightness);
-
- if (panel->backlight.enabled)
- props.power = FB_BLANK_UNBLANK;
- else
- props.power = FB_BLANK_POWERDOWN;
-
- name = kstrdup("intel_backlight", GFP_KERNEL);
- if (!name)
- return -ENOMEM;
-
- bd = backlight_device_register(name, connector->base.kdev, connector,
- &intel_backlight_device_ops, &props);
-
- /*
- * Using the same name independent of the drm device or connector
- * prevents registration of multiple backlight devices in the
- * driver. However, we need to use the default name for backward
- * compatibility. Use unique names for subsequent backlight devices as a
- * fallback when the default name already exists.
- */
- if (IS_ERR(bd) && PTR_ERR(bd) == -EEXIST) {
- kfree(name);
- name = kasprintf(GFP_KERNEL, "card%d-%s-backlight",
- i915->drm.primary->index, connector->base.name);
- if (!name)
- return -ENOMEM;
-
- bd = backlight_device_register(name, connector->base.kdev, connector,
- &intel_backlight_device_ops, &props);
- }
-
- if (IS_ERR(bd)) {
- drm_err(&i915->drm,
- "[CONNECTOR:%d:%s] backlight device %s register failed: %ld\n",
- connector->base.base.id, connector->base.name, name, PTR_ERR(bd));
- ret = PTR_ERR(bd);
- goto out;
- }
-
- panel->backlight.device = bd;
-
- drm_dbg_kms(&i915->drm,
- "[CONNECTOR:%d:%s] backlight device %s registered\n",
- connector->base.base.id, connector->base.name, name);
-
-out:
- kfree(name);
-
- return ret;
-}
-
-void intel_backlight_device_unregister(struct intel_connector *connector)
-{
- struct intel_panel *panel = &connector->panel;
-
- if (panel->backlight.device) {
- backlight_device_unregister(panel->backlight.device);
- panel->backlight.device = NULL;
- }
-}
-#endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */
-
-/*
- * CNP: PWM clock frequency is 19.2 MHz or 24 MHz.
- * PWM increment = 1
- */
-static u32 cnp_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
-
- return DIV_ROUND_CLOSEST(KHz(RUNTIME_INFO(dev_priv)->rawclk_freq),
- pwm_freq_hz);
-}
-
-/*
- * BXT: PWM clock frequency = 19.2 MHz.
- */
-static u32 bxt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
-{
- return DIV_ROUND_CLOSEST(KHz(19200), pwm_freq_hz);
-}
-
-/*
- * SPT: This value represents the period of the PWM stream in clock periods
- * multiplied by 16 (default increment) or 128 (alternate increment selected in
- * SCHICKEN_1 bit 0). PWM clock is 24 MHz.
- */
-static u32 spt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
-{
- struct intel_panel *panel = &connector->panel;
- u32 mul;
-
- if (panel->backlight.alternate_pwm_increment)
- mul = 128;
- else
- mul = 16;
-
- return DIV_ROUND_CLOSEST(MHz(24), pwm_freq_hz * mul);
-}
-
-/*
- * LPT: This value represents the period of the PWM stream in clock periods
- * multiplied by 128 (default increment) or 16 (alternate increment, selected in
- * LPT SOUTH_CHICKEN2 register bit 5).
- */
-static u32 lpt_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_panel *panel = &connector->panel;
- u32 mul, clock;
-
- if (panel->backlight.alternate_pwm_increment)
- mul = 16;
- else
- mul = 128;
-
- if (HAS_PCH_LPT_H(dev_priv))
- clock = MHz(135); /* LPT:H */
- else
- clock = MHz(24); /* LPT:LP */
-
- return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * mul);
-}
-
-/*
- * ILK/SNB/IVB: This value represents the period of the PWM stream in PCH
- * display raw clocks multiplied by 128.
- */
-static u32 pch_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
-
- return DIV_ROUND_CLOSEST(KHz(RUNTIME_INFO(dev_priv)->rawclk_freq),
- pwm_freq_hz * 128);
-}
-
-/*
- * Gen2: This field determines the number of time base events (display core
- * clock frequency/32) in total for a complete cycle of modulated backlight
- * control.
- *
- * Gen3: A time base event equals the display core clock ([DevPNV] HRAW clock)
- * divided by 32.
- */
-static u32 i9xx_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- int clock;
-
- if (IS_PINEVIEW(dev_priv))
- clock = KHz(RUNTIME_INFO(dev_priv)->rawclk_freq);
- else
- clock = KHz(dev_priv->cdclk.hw.cdclk);
-
- return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * 32);
-}
-
-/*
- * Gen4: This value represents the period of the PWM stream in display core
- * clocks ([DevCTG] HRAW clocks) multiplied by 128.
- *
- */
-static u32 i965_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
+int intel_panel_fitting(struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- int clock;
-
- if (IS_G4X(dev_priv))
- clock = KHz(RUNTIME_INFO(dev_priv)->rawclk_freq);
- else
- clock = KHz(dev_priv->cdclk.hw.cdclk);
-
- return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * 128);
-}
-
-/*
- * VLV: This value represents the period of the PWM stream in display core
- * clocks ([DevCTG] 200MHz HRAW clocks) multiplied by 128 or 25MHz S0IX clocks
- * multiplied by 16. CHV uses a 19.2MHz S0IX clock.
- */
-static u32 vlv_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- int mul, clock;
-
- if ((intel_de_read(dev_priv, CBR1_VLV) & CBR_PWM_CLOCK_MUX_SELECT) == 0) {
- if (IS_CHERRYVIEW(dev_priv))
- clock = KHz(19200);
- else
- clock = MHz(25);
- mul = 16;
- } else {
- clock = KHz(RUNTIME_INFO(dev_priv)->rawclk_freq);
- mul = 128;
- }
-
- return DIV_ROUND_CLOSEST(clock, pwm_freq_hz * mul);
-}
-
-static u16 get_vbt_pwm_freq(struct drm_i915_private *dev_priv)
-{
- u16 pwm_freq_hz = dev_priv->vbt.backlight.pwm_freq_hz;
-
- if (pwm_freq_hz) {
- drm_dbg_kms(&dev_priv->drm,
- "VBT defined backlight frequency %u Hz\n",
- pwm_freq_hz);
- } else {
- pwm_freq_hz = 200;
- drm_dbg_kms(&dev_priv->drm,
- "default backlight frequency %u Hz\n",
- pwm_freq_hz);
- }
-
- return pwm_freq_hz;
-}
-
-static u32 get_backlight_max_vbt(struct intel_connector *connector)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_panel *panel = &connector->panel;
- u16 pwm_freq_hz = get_vbt_pwm_freq(dev_priv);
- u32 pwm;
-
- if (!panel->backlight.pwm_funcs->hz_to_pwm) {
- drm_dbg_kms(&dev_priv->drm,
- "backlight frequency conversion not supported\n");
- return 0;
- }
-
- pwm = panel->backlight.pwm_funcs->hz_to_pwm(connector, pwm_freq_hz);
- if (!pwm) {
- drm_dbg_kms(&dev_priv->drm,
- "backlight frequency conversion failed\n");
- return 0;
- }
-
- return pwm;
-}
-
-/*
- * Note: The setup hooks can't assume pipe is set!
- */
-static u32 get_backlight_min_vbt(struct intel_connector *connector)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_panel *panel = &connector->panel;
- int min;
-
- drm_WARN_ON(&dev_priv->drm, panel->backlight.pwm_level_max == 0);
-
- /*
- * XXX: If the vbt value is 255, it makes min equal to max, which leads
- * to problems. There are such machines out there. Either our
- * interpretation is wrong or the vbt has bogus data. Or both. Safeguard
- * against this by letting the minimum be at most (arbitrarily chosen)
- * 25% of the max.
- */
- min = clamp_t(int, dev_priv->vbt.backlight.min_brightness, 0, 64);
- if (min != dev_priv->vbt.backlight.min_brightness) {
- drm_dbg_kms(&dev_priv->drm,
- "clamping VBT min backlight %d/255 to %d/255\n",
- dev_priv->vbt.backlight.min_brightness, min);
- }
-
- /* vbt value is a coefficient in range [0..255] */
- return scale(min, 0, 255, 0, panel->backlight.pwm_level_max);
-}
-
-static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unused)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_panel *panel = &connector->panel;
- u32 cpu_ctl2, pch_ctl1, pch_ctl2, val;
- bool alt, cpu_mode;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- if (HAS_PCH_LPT(dev_priv))
- alt = intel_de_read(dev_priv, SOUTH_CHICKEN2) & LPT_PWM_GRANULARITY;
+ if (HAS_GMCH(i915))
+ return gmch_panel_fitting(crtc_state, conn_state);
else
- alt = intel_de_read(dev_priv, SOUTH_CHICKEN1) & SPT_PWM_GRANULARITY;
- panel->backlight.alternate_pwm_increment = alt;
-
- pch_ctl1 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1);
- panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY;
-
- pch_ctl2 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL2);
- panel->backlight.pwm_level_max = pch_ctl2 >> 16;
-
- cpu_ctl2 = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2);
-
- if (!panel->backlight.pwm_level_max)
- panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
-
- if (!panel->backlight.pwm_level_max)
- return -ENODEV;
-
- panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
-
- panel->backlight.pwm_enabled = pch_ctl1 & BLM_PCH_PWM_ENABLE;
-
- cpu_mode = panel->backlight.pwm_enabled && HAS_PCH_LPT(dev_priv) &&
- !(pch_ctl1 & BLM_PCH_OVERRIDE_ENABLE) &&
- (cpu_ctl2 & BLM_PWM_ENABLE);
-
- if (cpu_mode) {
- val = pch_get_backlight(connector, unused);
-
- drm_dbg_kms(&dev_priv->drm,
- "CPU backlight register was enabled, switching to PCH override\n");
-
- /* Write converted CPU PWM value to PCH override register */
- lpt_set_backlight(connector->base.state, val);
- intel_de_write(dev_priv, BLC_PWM_PCH_CTL1,
- pch_ctl1 | BLM_PCH_OVERRIDE_ENABLE);
-
- intel_de_write(dev_priv, BLC_PWM_CPU_CTL2,
- cpu_ctl2 & ~BLM_PWM_ENABLE);
- }
-
- return 0;
-}
-
-static int pch_setup_backlight(struct intel_connector *connector, enum pipe unused)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_panel *panel = &connector->panel;
- u32 cpu_ctl2, pch_ctl1, pch_ctl2;
-
- pch_ctl1 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1);
- panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY;
-
- pch_ctl2 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL2);
- panel->backlight.pwm_level_max = pch_ctl2 >> 16;
-
- if (!panel->backlight.pwm_level_max)
- panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
-
- if (!panel->backlight.pwm_level_max)
- return -ENODEV;
-
- panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
-
- cpu_ctl2 = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2);
- panel->backlight.pwm_enabled = (cpu_ctl2 & BLM_PWM_ENABLE) &&
- (pch_ctl1 & BLM_PCH_PWM_ENABLE);
-
- return 0;
-}
-
-static int i9xx_setup_backlight(struct intel_connector *connector, enum pipe unused)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_panel *panel = &connector->panel;
- u32 ctl, val;
-
- ctl = intel_de_read(dev_priv, BLC_PWM_CTL);
-
- if (DISPLAY_VER(dev_priv) == 2 || IS_I915GM(dev_priv) || IS_I945GM(dev_priv))
- panel->backlight.combination_mode = ctl & BLM_LEGACY_MODE;
-
- if (IS_PINEVIEW(dev_priv))
- panel->backlight.active_low_pwm = ctl & BLM_POLARITY_PNV;
-
- panel->backlight.pwm_level_max = ctl >> 17;
-
- if (!panel->backlight.pwm_level_max) {
- panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
- panel->backlight.pwm_level_max >>= 1;
- }
-
- if (!panel->backlight.pwm_level_max)
- return -ENODEV;
-
- if (panel->backlight.combination_mode)
- panel->backlight.pwm_level_max *= 0xff;
-
- panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
-
- val = i9xx_get_backlight(connector, unused);
- val = intel_panel_invert_pwm_level(connector, val);
- val = clamp(val, panel->backlight.pwm_level_min, panel->backlight.pwm_level_max);
-
- panel->backlight.pwm_enabled = val != 0;
-
- return 0;
-}
-
-static int i965_setup_backlight(struct intel_connector *connector, enum pipe unused)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_panel *panel = &connector->panel;
- u32 ctl, ctl2;
-
- ctl2 = intel_de_read(dev_priv, BLC_PWM_CTL2);
- panel->backlight.combination_mode = ctl2 & BLM_COMBINATION_MODE;
- panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965;
-
- ctl = intel_de_read(dev_priv, BLC_PWM_CTL);
- panel->backlight.pwm_level_max = ctl >> 16;
-
- if (!panel->backlight.pwm_level_max)
- panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
-
- if (!panel->backlight.pwm_level_max)
- return -ENODEV;
-
- if (panel->backlight.combination_mode)
- panel->backlight.pwm_level_max *= 0xff;
-
- panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
-
- panel->backlight.pwm_enabled = ctl2 & BLM_PWM_ENABLE;
-
- return 0;
-}
-
-static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_panel *panel = &connector->panel;
- u32 ctl, ctl2;
-
- if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B))
- return -ENODEV;
-
- ctl2 = intel_de_read(dev_priv, VLV_BLC_PWM_CTL2(pipe));
- panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965;
-
- ctl = intel_de_read(dev_priv, VLV_BLC_PWM_CTL(pipe));
- panel->backlight.pwm_level_max = ctl >> 16;
-
- if (!panel->backlight.pwm_level_max)
- panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
-
- if (!panel->backlight.pwm_level_max)
- return -ENODEV;
-
- panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
-
- panel->backlight.pwm_enabled = ctl2 & BLM_PWM_ENABLE;
-
- return 0;
-}
-
-static int
-bxt_setup_backlight(struct intel_connector *connector, enum pipe unused)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_panel *panel = &connector->panel;
- u32 pwm_ctl, val;
-
- panel->backlight.controller = dev_priv->vbt.backlight.controller;
-
- pwm_ctl = intel_de_read(dev_priv,
- BXT_BLC_PWM_CTL(panel->backlight.controller));
-
- /* Controller 1 uses the utility pin. */
- if (panel->backlight.controller == 1) {
- val = intel_de_read(dev_priv, UTIL_PIN_CTL);
- panel->backlight.util_pin_active_low =
- val & UTIL_PIN_POLARITY;
- }
-
- panel->backlight.active_low_pwm = pwm_ctl & BXT_BLC_PWM_POLARITY;
- panel->backlight.pwm_level_max =
- intel_de_read(dev_priv, BXT_BLC_PWM_FREQ(panel->backlight.controller));
-
- if (!panel->backlight.pwm_level_max)
- panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
-
- if (!panel->backlight.pwm_level_max)
- return -ENODEV;
-
- panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
-
- panel->backlight.pwm_enabled = pwm_ctl & BXT_BLC_PWM_ENABLE;
-
- return 0;
-}
-
-static int
-cnp_setup_backlight(struct intel_connector *connector, enum pipe unused)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_panel *panel = &connector->panel;
- u32 pwm_ctl;
-
- /*
- * CNP has the BXT implementation of backlight, but with only one
- * controller. TODO: ICP has multiple controllers but we only use
- * controller 0 for now.
- */
- panel->backlight.controller = 0;
-
- pwm_ctl = intel_de_read(dev_priv,
- BXT_BLC_PWM_CTL(panel->backlight.controller));
-
- panel->backlight.active_low_pwm = pwm_ctl & BXT_BLC_PWM_POLARITY;
- panel->backlight.pwm_level_max =
- intel_de_read(dev_priv, BXT_BLC_PWM_FREQ(panel->backlight.controller));
-
- if (!panel->backlight.pwm_level_max)
- panel->backlight.pwm_level_max = get_backlight_max_vbt(connector);
-
- if (!panel->backlight.pwm_level_max)
- return -ENODEV;
-
- panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
-
- panel->backlight.pwm_enabled = pwm_ctl & BXT_BLC_PWM_ENABLE;
-
- return 0;
-}
-
-static int ext_pwm_setup_backlight(struct intel_connector *connector,
- enum pipe pipe)
-{
- struct drm_device *dev = connector->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_panel *panel = &connector->panel;
- const char *desc;
- u32 level;
-
- /* Get the right PWM chip for DSI backlight according to VBT */
- if (dev_priv->vbt.dsi.config->pwm_blc == PPS_BLC_PMIC) {
- panel->backlight.pwm = pwm_get(dev->dev, "pwm_pmic_backlight");
- desc = "PMIC";
- } else {
- panel->backlight.pwm = pwm_get(dev->dev, "pwm_soc_backlight");
- desc = "SoC";
- }
-
- if (IS_ERR(panel->backlight.pwm)) {
- drm_err(&dev_priv->drm, "Failed to get the %s PWM chip\n",
- desc);
- panel->backlight.pwm = NULL;
- return -ENODEV;
- }
-
- panel->backlight.pwm_level_max = 100; /* 100% */
- panel->backlight.pwm_level_min = get_backlight_min_vbt(connector);
-
- if (pwm_is_enabled(panel->backlight.pwm)) {
- /* PWM is already enabled, use existing settings */
- pwm_get_state(panel->backlight.pwm, &panel->backlight.pwm_state);
-
- level = pwm_get_relative_duty_cycle(&panel->backlight.pwm_state,
- 100);
- level = intel_panel_invert_pwm_level(connector, level);
- panel->backlight.pwm_enabled = true;
-
- drm_dbg_kms(&dev_priv->drm, "PWM already enabled at freq %ld, VBT freq %d, level %d\n",
- NSEC_PER_SEC / (unsigned long)panel->backlight.pwm_state.period,
- get_vbt_pwm_freq(dev_priv), level);
- } else {
- /* Set period from VBT frequency, leave other settings at 0. */
- panel->backlight.pwm_state.period =
- NSEC_PER_SEC / get_vbt_pwm_freq(dev_priv);
- }
-
- drm_info(&dev_priv->drm, "Using %s PWM for LCD backlight control\n",
- desc);
- return 0;
-}
-
-static void intel_pwm_set_backlight(const struct drm_connector_state *conn_state, u32 level)
-{
- struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct intel_panel *panel = &connector->panel;
-
- panel->backlight.pwm_funcs->set(conn_state,
- intel_panel_invert_pwm_level(connector, level));
+ return pch_panel_fitting(crtc_state, conn_state);
}
-static u32 intel_pwm_get_backlight(struct intel_connector *connector, enum pipe pipe)
-{
- struct intel_panel *panel = &connector->panel;
-
- return intel_panel_invert_pwm_level(connector,
- panel->backlight.pwm_funcs->get(connector, pipe));
-}
-
-static void intel_pwm_enable_backlight(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state, u32 level)
-{
- struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct intel_panel *panel = &connector->panel;
-
- panel->backlight.pwm_funcs->enable(crtc_state, conn_state,
- intel_panel_invert_pwm_level(connector, level));
-}
-
-static void intel_pwm_disable_backlight(const struct drm_connector_state *conn_state, u32 level)
-{
- struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct intel_panel *panel = &connector->panel;
-
- panel->backlight.pwm_funcs->disable(conn_state,
- intel_panel_invert_pwm_level(connector, level));
-}
-
-static int intel_pwm_setup_backlight(struct intel_connector *connector, enum pipe pipe)
-{
- struct intel_panel *panel = &connector->panel;
- int ret = panel->backlight.pwm_funcs->setup(connector, pipe);
-
- if (ret < 0)
- return ret;
-
- panel->backlight.min = panel->backlight.pwm_level_min;
- panel->backlight.max = panel->backlight.pwm_level_max;
- panel->backlight.level = intel_pwm_get_backlight(connector, pipe);
- panel->backlight.enabled = panel->backlight.pwm_enabled;
-
- return 0;
-}
-
-void intel_panel_update_backlight(struct intel_atomic_state *state,
- struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
-{
- struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_panel *panel = &connector->panel;
-
- if (!panel->backlight.present)
- return;
-
- mutex_lock(&dev_priv->backlight_lock);
- if (!panel->backlight.enabled)
- __intel_panel_enable_backlight(crtc_state, conn_state);
-
- mutex_unlock(&dev_priv->backlight_lock);
-}
-
-int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe)
+enum drm_connector_status
+intel_panel_detect(struct drm_connector *connector, bool force)
{
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
- struct intel_connector *intel_connector = to_intel_connector(connector);
- struct intel_panel *panel = &intel_connector->panel;
- int ret;
-
- if (!dev_priv->vbt.backlight.present) {
- if (dev_priv->quirks & QUIRK_BACKLIGHT_PRESENT) {
- drm_dbg_kms(&dev_priv->drm,
- "no backlight present per VBT, but present per quirk\n");
- } else {
- drm_dbg_kms(&dev_priv->drm,
- "no backlight present per VBT\n");
- return 0;
- }
- }
-
- /* ensure intel_panel has been initialized first */
- if (drm_WARN_ON(&dev_priv->drm, !panel->backlight.funcs))
- return -ENODEV;
-
- /* set level and max in panel struct */
- mutex_lock(&dev_priv->backlight_lock);
- ret = panel->backlight.funcs->setup(intel_connector, pipe);
- mutex_unlock(&dev_priv->backlight_lock);
-
- if (ret) {
- drm_dbg_kms(&dev_priv->drm,
- "failed to setup backlight for connector %s\n",
- connector->name);
- return ret;
- }
-
- panel->backlight.present = true;
-
- drm_dbg_kms(&dev_priv->drm,
- "Connector %s backlight initialized, %s, brightness %u/%u\n",
- connector->name,
- enableddisabled(panel->backlight.enabled),
- panel->backlight.level, panel->backlight.max);
-
- return 0;
-}
+ struct drm_i915_private *i915 = to_i915(connector->dev);
-static void intel_panel_destroy_backlight(struct intel_panel *panel)
-{
- /* dispose of the pwm */
- if (panel->backlight.pwm)
- pwm_put(panel->backlight.pwm);
+ if (!INTEL_DISPLAY_ENABLED(i915))
+ return connector_status_disconnected;
- panel->backlight.present = false;
+ return connector_status_connected;
}
-static const struct intel_panel_bl_funcs bxt_pwm_funcs = {
- .setup = bxt_setup_backlight,
- .enable = bxt_enable_backlight,
- .disable = bxt_disable_backlight,
- .set = bxt_set_backlight,
- .get = bxt_get_backlight,
- .hz_to_pwm = bxt_hz_to_pwm,
-};
-
-static const struct intel_panel_bl_funcs cnp_pwm_funcs = {
- .setup = cnp_setup_backlight,
- .enable = cnp_enable_backlight,
- .disable = cnp_disable_backlight,
- .set = bxt_set_backlight,
- .get = bxt_get_backlight,
- .hz_to_pwm = cnp_hz_to_pwm,
-};
-
-static const struct intel_panel_bl_funcs lpt_pwm_funcs = {
- .setup = lpt_setup_backlight,
- .enable = lpt_enable_backlight,
- .disable = lpt_disable_backlight,
- .set = lpt_set_backlight,
- .get = lpt_get_backlight,
- .hz_to_pwm = lpt_hz_to_pwm,
-};
-
-static const struct intel_panel_bl_funcs spt_pwm_funcs = {
- .setup = lpt_setup_backlight,
- .enable = lpt_enable_backlight,
- .disable = lpt_disable_backlight,
- .set = lpt_set_backlight,
- .get = lpt_get_backlight,
- .hz_to_pwm = spt_hz_to_pwm,
-};
-
-static const struct intel_panel_bl_funcs pch_pwm_funcs = {
- .setup = pch_setup_backlight,
- .enable = pch_enable_backlight,
- .disable = pch_disable_backlight,
- .set = pch_set_backlight,
- .get = pch_get_backlight,
- .hz_to_pwm = pch_hz_to_pwm,
-};
-
-static const struct intel_panel_bl_funcs ext_pwm_funcs = {
- .setup = ext_pwm_setup_backlight,
- .enable = ext_pwm_enable_backlight,
- .disable = ext_pwm_disable_backlight,
- .set = ext_pwm_set_backlight,
- .get = ext_pwm_get_backlight,
-};
-
-static const struct intel_panel_bl_funcs vlv_pwm_funcs = {
- .setup = vlv_setup_backlight,
- .enable = vlv_enable_backlight,
- .disable = vlv_disable_backlight,
- .set = vlv_set_backlight,
- .get = vlv_get_backlight,
- .hz_to_pwm = vlv_hz_to_pwm,
-};
-
-static const struct intel_panel_bl_funcs i965_pwm_funcs = {
- .setup = i965_setup_backlight,
- .enable = i965_enable_backlight,
- .disable = i965_disable_backlight,
- .set = i9xx_set_backlight,
- .get = i9xx_get_backlight,
- .hz_to_pwm = i965_hz_to_pwm,
-};
-
-static const struct intel_panel_bl_funcs i9xx_pwm_funcs = {
- .setup = i9xx_setup_backlight,
- .enable = i9xx_enable_backlight,
- .disable = i9xx_disable_backlight,
- .set = i9xx_set_backlight,
- .get = i9xx_get_backlight,
- .hz_to_pwm = i9xx_hz_to_pwm,
-};
-
-static const struct intel_panel_bl_funcs pwm_bl_funcs = {
- .setup = intel_pwm_setup_backlight,
- .enable = intel_pwm_enable_backlight,
- .disable = intel_pwm_disable_backlight,
- .set = intel_pwm_set_backlight,
- .get = intel_pwm_get_backlight,
-};
-
-/* Set up chip specific backlight functions */
-static void
-intel_panel_init_backlight_funcs(struct intel_panel *panel)
+enum drm_mode_status
+intel_panel_mode_valid(struct intel_connector *connector,
+ const struct drm_display_mode *mode)
{
- struct intel_connector *connector =
- container_of(panel, struct intel_connector, panel);
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ const struct drm_display_mode *fixed_mode = connector->panel.fixed_mode;
- if (connector->base.connector_type == DRM_MODE_CONNECTOR_DSI &&
- intel_dsi_dcs_init_backlight_funcs(connector) == 0)
- return;
-
- if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv)) {
- panel->backlight.pwm_funcs = &bxt_pwm_funcs;
- } else if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP) {
- panel->backlight.pwm_funcs = &cnp_pwm_funcs;
- } else if (INTEL_PCH_TYPE(dev_priv) >= PCH_LPT) {
- if (HAS_PCH_LPT(dev_priv))
- panel->backlight.pwm_funcs = &lpt_pwm_funcs;
- else
- panel->backlight.pwm_funcs = &spt_pwm_funcs;
- } else if (HAS_PCH_SPLIT(dev_priv)) {
- panel->backlight.pwm_funcs = &pch_pwm_funcs;
- } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- if (connector->base.connector_type == DRM_MODE_CONNECTOR_DSI) {
- panel->backlight.pwm_funcs = &ext_pwm_funcs;
- } else {
- panel->backlight.pwm_funcs = &vlv_pwm_funcs;
- }
- } else if (DISPLAY_VER(dev_priv) == 4) {
- panel->backlight.pwm_funcs = &i965_pwm_funcs;
- } else {
- panel->backlight.pwm_funcs = &i9xx_pwm_funcs;
- }
-
- if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP &&
- intel_dp_aux_init_backlight_funcs(connector) == 0)
- return;
+ if (!fixed_mode)
+ return MODE_OK;
- /* We're using a standard PWM backlight interface */
- panel->backlight.funcs = &pwm_bl_funcs;
-}
+ if (mode->hdisplay != fixed_mode->hdisplay)
+ return MODE_PANEL;
-enum drm_connector_status
-intel_panel_detect(struct drm_connector *connector, bool force)
-{
- struct drm_i915_private *i915 = to_i915(connector->dev);
+ if (mode->vdisplay != fixed_mode->vdisplay)
+ return MODE_PANEL;
- if (!INTEL_DISPLAY_ENABLED(i915))
- return connector_status_disconnected;
+ if (drm_mode_vrefresh(mode) != drm_mode_vrefresh(fixed_mode))
+ return MODE_PANEL;
- return connector_status_connected;
+ return MODE_OK;
}
int intel_panel_init(struct intel_panel *panel,
struct drm_display_mode *fixed_mode,
struct drm_display_mode *downclock_mode)
{
- intel_panel_init_backlight_funcs(panel);
+ intel_backlight_init_funcs(panel);
panel->fixed_mode = fixed_mode;
panel->downclock_mode = downclock_mode;
@@ -2245,7 +542,7 @@ void intel_panel_fini(struct intel_panel *panel)
struct intel_connector *intel_connector =
container_of(panel, struct intel_connector, panel);
- intel_panel_destroy_backlight(panel);
+ intel_backlight_destroy(panel);
if (panel->fixed_mode)
drm_mode_destroy(intel_connector->base.dev, panel->fixed_mode);
diff --git a/drivers/gpu/drm/i915/display/intel_panel.h b/drivers/gpu/drm/i915/display/intel_panel.h
index 1d340f77bffc..d50b3f7e9e58 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.h
+++ b/drivers/gpu/drm/i915/display/intel_panel.h
@@ -8,15 +8,13 @@
#include <linux/types.h>
-#include "intel_display.h"
-
+enum drm_connector_status;
struct drm_connector;
struct drm_connector_state;
struct drm_display_mode;
+struct drm_i915_private;
struct intel_connector;
-struct intel_crtc;
struct intel_crtc_state;
-struct intel_encoder;
struct intel_panel;
int intel_panel_init(struct intel_panel *panel,
@@ -25,23 +23,16 @@ int intel_panel_init(struct intel_panel *panel,
void intel_panel_fini(struct intel_panel *panel);
enum drm_connector_status
intel_panel_detect(struct drm_connector *connector, bool force);
-void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode,
+bool intel_panel_use_ssc(struct drm_i915_private *i915);
+void intel_panel_fixed_mode(const struct drm_display_mode *fixed_mode,
struct drm_display_mode *adjusted_mode);
-int intel_pch_panel_fitting(struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state);
-int intel_gmch_panel_fitting(struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state);
-void intel_panel_set_backlight_acpi(const struct drm_connector_state *conn_state,
- u32 level, u32 max);
-int intel_panel_setup_backlight(struct drm_connector *connector,
- enum pipe pipe);
-void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state);
-void intel_panel_update_backlight(struct intel_atomic_state *state,
- struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state);
-void intel_panel_disable_backlight(const struct drm_connector_state *old_conn_state);
+enum drm_mode_status
+intel_panel_mode_valid(struct intel_connector *connector,
+ const struct drm_display_mode *mode);
+int intel_panel_fitting(struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
+int intel_panel_compute_config(struct intel_connector *connector,
+ struct drm_display_mode *adjusted_mode);
struct drm_display_mode *
intel_panel_edid_downclock_mode(struct intel_connector *connector,
const struct drm_display_mode *fixed_mode);
@@ -49,22 +40,5 @@ struct drm_display_mode *
intel_panel_edid_fixed_mode(struct intel_connector *connector);
struct drm_display_mode *
intel_panel_vbt_fixed_mode(struct intel_connector *connector);
-void intel_panel_set_pwm_level(const struct drm_connector_state *conn_state, u32 level);
-u32 intel_panel_invert_pwm_level(struct intel_connector *connector, u32 level);
-u32 intel_panel_backlight_level_to_pwm(struct intel_connector *connector, u32 level);
-u32 intel_panel_backlight_level_from_pwm(struct intel_connector *connector, u32 val);
-
-#if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE)
-int intel_backlight_device_register(struct intel_connector *connector);
-void intel_backlight_device_unregister(struct intel_connector *connector);
-#else /* CONFIG_BACKLIGHT_CLASS_DEVICE */
-static inline int intel_backlight_device_register(struct intel_connector *connector)
-{
- return 0;
-}
-static inline void intel_backlight_device_unregister(struct intel_connector *connector)
-{
-}
-#endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */
#endif /* __INTEL_PANEL_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_plane_initial.c b/drivers/gpu/drm/i915/display/intel_plane_initial.c
new file mode 100644
index 000000000000..dcd698a02da2
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_plane_initial.c
@@ -0,0 +1,283 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include "intel_display_types.h"
+#include "intel_plane_initial.h"
+#include "intel_atomic_plane.h"
+#include "intel_display.h"
+#include "intel_fb.h"
+
+static bool
+intel_reuse_initial_plane_obj(struct drm_i915_private *i915,
+ const struct intel_initial_plane_config *plane_config,
+ struct drm_framebuffer **fb,
+ struct i915_vma **vma)
+{
+ struct intel_crtc *crtc;
+
+ for_each_intel_crtc(&i915->drm, crtc) {
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ struct intel_plane *plane =
+ to_intel_plane(crtc->base.primary);
+ struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+
+ if (!crtc_state->uapi.active)
+ continue;
+
+ if (!plane_state->ggtt_vma)
+ continue;
+
+ if (intel_plane_ggtt_offset(plane_state) == plane_config->base) {
+ *fb = plane_state->hw.fb;
+ *vma = plane_state->ggtt_vma;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static struct i915_vma *
+initial_plane_vma(struct drm_i915_private *i915,
+ struct intel_initial_plane_config *plane_config)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ u32 base, size;
+
+ if (plane_config->size == 0)
+ return NULL;
+
+ base = round_down(plane_config->base,
+ I915_GTT_MIN_ALIGNMENT);
+ size = round_up(plane_config->base + plane_config->size,
+ I915_GTT_MIN_ALIGNMENT);
+ size -= base;
+
+ /*
+ * If the FB is too big, just don't use it since fbdev is not very
+ * important and we should probably use that space with FBC or other
+ * features.
+ */
+ if (IS_ENABLED(CONFIG_FRAMEBUFFER_CONSOLE) &&
+ size * 2 > i915->stolen_usable_size)
+ return NULL;
+
+ obj = i915_gem_object_create_stolen_for_preallocated(i915, base, size);
+ if (IS_ERR(obj))
+ return NULL;
+
+ /*
+ * Mark it WT ahead of time to avoid changing the
+ * cache_level during fbdev initialization. The
+ * unbind there would get stuck waiting for rcu.
+ */
+ i915_gem_object_set_cache_coherency(obj, HAS_WT(i915) ?
+ I915_CACHE_WT : I915_CACHE_NONE);
+
+ switch (plane_config->tiling) {
+ case I915_TILING_NONE:
+ break;
+ case I915_TILING_X:
+ case I915_TILING_Y:
+ obj->tiling_and_stride =
+ plane_config->fb->base.pitches[0] |
+ plane_config->tiling;
+ break;
+ default:
+ MISSING_CASE(plane_config->tiling);
+ goto err_obj;
+ }
+
+ vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
+ if (IS_ERR(vma))
+ goto err_obj;
+
+ if (i915_ggtt_pin(vma, NULL, 0, PIN_MAPPABLE | PIN_OFFSET_FIXED | base))
+ goto err_obj;
+
+ if (i915_gem_object_is_tiled(obj) &&
+ !i915_vma_is_map_and_fenceable(vma))
+ goto err_obj;
+
+ return vma;
+
+err_obj:
+ i915_gem_object_put(obj);
+ return NULL;
+}
+
+static bool
+intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
+ struct intel_initial_plane_config *plane_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct drm_mode_fb_cmd2 mode_cmd = { 0 };
+ struct drm_framebuffer *fb = &plane_config->fb->base;
+ struct i915_vma *vma;
+
+ switch (fb->modifier) {
+ case DRM_FORMAT_MOD_LINEAR:
+ case I915_FORMAT_MOD_X_TILED:
+ case I915_FORMAT_MOD_Y_TILED:
+ break;
+ default:
+ drm_dbg(&dev_priv->drm,
+ "Unsupported modifier for initial FB: 0x%llx\n",
+ fb->modifier);
+ return false;
+ }
+
+ vma = initial_plane_vma(dev_priv, plane_config);
+ if (!vma)
+ return false;
+
+ mode_cmd.pixel_format = fb->format->format;
+ mode_cmd.width = fb->width;
+ mode_cmd.height = fb->height;
+ mode_cmd.pitches[0] = fb->pitches[0];
+ mode_cmd.modifier[0] = fb->modifier;
+ mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
+
+ if (intel_framebuffer_init(to_intel_framebuffer(fb),
+ vma->obj, &mode_cmd)) {
+ drm_dbg_kms(&dev_priv->drm, "intel fb init failed\n");
+ goto err_vma;
+ }
+
+ plane_config->vma = vma;
+ return true;
+
+err_vma:
+ i915_vma_put(vma);
+ return false;
+}
+
+static void
+intel_find_initial_plane_obj(struct intel_crtc *crtc,
+ struct intel_initial_plane_config *plane_config)
+{
+ struct drm_device *dev = crtc->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ struct intel_plane *plane =
+ to_intel_plane(crtc->base.primary);
+ struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+ struct drm_framebuffer *fb;
+ struct i915_vma *vma;
+
+ /*
+ * TODO:
+ * Disable planes if get_initial_plane_config() failed.
+ * Make sure things work if the surface base is not page aligned.
+ */
+ if (!plane_config->fb)
+ return;
+
+ if (intel_alloc_initial_plane_obj(crtc, plane_config)) {
+ fb = &plane_config->fb->base;
+ vma = plane_config->vma;
+ goto valid_fb;
+ }
+
+ /*
+ * Failed to alloc the obj, check to see if we should share
+ * an fb with another CRTC instead
+ */
+ if (intel_reuse_initial_plane_obj(dev_priv, plane_config, &fb, &vma))
+ goto valid_fb;
+
+ /*
+ * We've failed to reconstruct the BIOS FB. Current display state
+ * indicates that the primary plane is visible, but has a NULL FB,
+ * which will lead to problems later if we don't fix it up. The
+ * simplest solution is to just disable the primary plane now and
+ * pretend the BIOS never had it enabled.
+ */
+ intel_plane_disable_noatomic(crtc, plane);
+ if (crtc_state->bigjoiner) {
+ struct intel_crtc *slave =
+ crtc_state->bigjoiner_linked_crtc;
+ intel_plane_disable_noatomic(slave, to_intel_plane(slave->base.primary));
+ }
+
+ return;
+
+valid_fb:
+ plane_state->uapi.rotation = plane_config->rotation;
+ intel_fb_fill_view(to_intel_framebuffer(fb),
+ plane_state->uapi.rotation, &plane_state->view);
+
+ __i915_vma_pin(vma);
+ plane_state->ggtt_vma = i915_vma_get(vma);
+ if (intel_plane_uses_fence(plane_state) &&
+ i915_vma_pin_fence(vma) == 0 && vma->fence)
+ plane_state->flags |= PLANE_HAS_FENCE;
+
+ plane_state->uapi.src_x = 0;
+ plane_state->uapi.src_y = 0;
+ plane_state->uapi.src_w = fb->width << 16;
+ plane_state->uapi.src_h = fb->height << 16;
+
+ plane_state->uapi.crtc_x = 0;
+ plane_state->uapi.crtc_y = 0;
+ plane_state->uapi.crtc_w = fb->width;
+ plane_state->uapi.crtc_h = fb->height;
+
+ if (plane_config->tiling)
+ dev_priv->preserve_bios_swizzle = true;
+
+ plane_state->uapi.fb = fb;
+ drm_framebuffer_get(fb);
+
+ plane_state->uapi.crtc = &crtc->base;
+ intel_plane_copy_uapi_to_hw_state(plane_state, plane_state, crtc);
+
+ atomic_or(plane->frontbuffer_bit, &to_intel_frontbuffer(fb)->bits);
+}
+
+static void plane_config_fini(struct intel_initial_plane_config *plane_config)
+{
+ if (plane_config->fb) {
+ struct drm_framebuffer *fb = &plane_config->fb->base;
+
+ /* We may only have the stub and not a full framebuffer */
+ if (drm_framebuffer_read_refcount(fb))
+ drm_framebuffer_put(fb);
+ else
+ kfree(fb);
+ }
+
+ if (plane_config->vma)
+ i915_vma_put(plane_config->vma);
+}
+
+void intel_crtc_initial_plane_config(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_initial_plane_config plane_config = {};
+
+ /*
+ * Note that reserving the BIOS fb up front prevents us
+ * from stuffing other stolen allocations like the ring
+ * on top. This prevents some ugliness at boot time, and
+ * can even allow for smooth boot transitions if the BIOS
+ * fb is large enough for the active pipe configuration.
+ */
+ dev_priv->display->get_initial_plane_config(crtc, &plane_config);
+
+ /*
+ * If the fb is shared between multiple heads, we'll
+ * just get the first one.
+ */
+ intel_find_initial_plane_obj(crtc, &plane_config);
+
+ plane_config_fini(&plane_config);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_plane_initial.h b/drivers/gpu/drm/i915/display/intel_plane_initial.h
new file mode 100644
index 000000000000..c7e35ab3182b
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_plane_initial.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef __INTEL_PLANE_INITIAL_H__
+#define __INTEL_PLANE_INITIAL_H__
+
+struct intel_crtc;
+
+void intel_crtc_initial_plane_config(struct intel_crtc *crtc);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c
index a36ec4a818ff..e9c679bb1b2e 100644
--- a/drivers/gpu/drm/i915/display/intel_pps.c
+++ b/drivers/gpu/drm/i915/display/intel_pps.c
@@ -9,6 +9,7 @@
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dpll.h"
+#include "intel_lvds.h"
#include "intel_pps.h"
static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
@@ -1408,3 +1409,61 @@ void intel_pps_setup(struct drm_i915_private *i915)
else
i915->pps_mmio_base = PPS_BASE;
}
+
+void assert_pps_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+ i915_reg_t pp_reg;
+ u32 val;
+ enum pipe panel_pipe = INVALID_PIPE;
+ bool locked = true;
+
+ if (drm_WARN_ON(&dev_priv->drm, HAS_DDI(dev_priv)))
+ return;
+
+ if (HAS_PCH_SPLIT(dev_priv)) {
+ u32 port_sel;
+
+ pp_reg = PP_CONTROL(0);
+ port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
+
+ switch (port_sel) {
+ case PANEL_PORT_SELECT_LVDS:
+ intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
+ break;
+ case PANEL_PORT_SELECT_DPA:
+ g4x_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
+ break;
+ case PANEL_PORT_SELECT_DPC:
+ g4x_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
+ break;
+ case PANEL_PORT_SELECT_DPD:
+ g4x_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
+ break;
+ default:
+ MISSING_CASE(port_sel);
+ break;
+ }
+ } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
+ /* presumably write lock depends on pipe, not port select */
+ pp_reg = PP_CONTROL(pipe);
+ panel_pipe = pipe;
+ } else {
+ u32 port_sel;
+
+ pp_reg = PP_CONTROL(0);
+ port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
+
+ drm_WARN_ON(&dev_priv->drm,
+ port_sel != PANEL_PORT_SELECT_LVDS);
+ intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
+ }
+
+ val = intel_de_read(dev_priv, pp_reg);
+ if (!(val & PANEL_POWER_ON) ||
+ ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
+ locked = false;
+
+ I915_STATE_WARN(panel_pipe == pipe && locked,
+ "panel assertion failure, pipe %c regs locked\n",
+ pipe_name(pipe));
+}
diff --git a/drivers/gpu/drm/i915/display/intel_pps.h b/drivers/gpu/drm/i915/display/intel_pps.h
index fbbcca782e7b..fbb47f6f453e 100644
--- a/drivers/gpu/drm/i915/display/intel_pps.h
+++ b/drivers/gpu/drm/i915/display/intel_pps.h
@@ -10,6 +10,7 @@
#include "intel_wakeref.h"
+enum pipe;
struct drm_i915_private;
struct intel_connector;
struct intel_crtc_state;
@@ -49,4 +50,6 @@ void vlv_pps_init(struct intel_encoder *encoder,
void intel_pps_unlock_regs_wa(struct drm_i915_private *i915);
void intel_pps_setup(struct drm_i915_private *i915);
+void assert_pps_unlocked(struct drm_i915_private *i915, enum pipe pipe);
+
#endif /* __INTEL_PPS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 1b0daf649e82..7a205fd5023b 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -22,6 +22,7 @@
*/
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_damage_helper.h>
#include "display/intel_dp.h"
@@ -364,41 +365,6 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
}
}
-static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- u32 aux_clock_divider, aux_ctl;
- int i;
- static const u8 aux_msg[] = {
- [0] = DP_AUX_NATIVE_WRITE << 4,
- [1] = DP_SET_POWER >> 8,
- [2] = DP_SET_POWER & 0xff,
- [3] = 1 - 1,
- [4] = DP_SET_POWER_D0,
- };
- u32 psr_aux_mask = EDP_PSR_AUX_CTL_TIME_OUT_MASK |
- EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK |
- EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK |
- EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK;
-
- BUILD_BUG_ON(sizeof(aux_msg) > 20);
- for (i = 0; i < sizeof(aux_msg); i += 4)
- intel_de_write(dev_priv,
- EDP_PSR_AUX_DATA(intel_dp->psr.transcoder, i >> 2),
- intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
-
- aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
-
- /* Start with bits set for DDI_AUX_CTL register */
- aux_ctl = intel_dp->get_aux_send_ctl(intel_dp, sizeof(aux_msg),
- aux_clock_divider);
-
- /* Select only valid bits for SRD_AUX_CTL */
- aux_ctl &= psr_aux_mask;
- intel_de_write(dev_priv, EDP_PSR_AUX_CTL(intel_dp->psr.transcoder),
- aux_ctl);
-}
-
static void intel_psr_enable_sink(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
@@ -460,7 +426,7 @@ static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
val |= EDP_PSR_TP2_TP3_TIME_2500us;
check_tp3_sel:
- if (intel_dp_source_supports_hbr2(intel_dp) &&
+ if (intel_dp_source_supports_tps3(dev_priv) &&
drm_dp_tps3_supported(intel_dp->dpcd))
val |= EDP_PSR_TP1_TP3_SEL;
else
@@ -545,7 +511,7 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
if (DISPLAY_VER(dev_priv) >= 10 && DISPLAY_VER(dev_priv) <= 12)
val |= EDP_Y_COORDINATE_ENABLE;
- val |= EDP_PSR2_FRAME_BEFORE_SU(intel_dp->psr.sink_sync_latency + 1);
+ val |= EDP_PSR2_FRAME_BEFORE_SU(max_t(u8, intel_dp->psr.sink_sync_latency + 1, 2));
val |= intel_psr2_get_tp_time(intel_dp);
/* Wa_22012278275:adl-p */
@@ -595,15 +561,16 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
val |= EDP_PSR2_SU_SDP_SCANLINE;
if (intel_dp->psr.psr2_sel_fetch_enabled) {
+ u32 tmp;
+
/* Wa_1408330847 */
if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
intel_de_rmw(dev_priv, CHICKEN_PAR1_1,
DIS_RAM_BYPASS_PSR2_MAN_TRACK,
DIS_RAM_BYPASS_PSR2_MAN_TRACK);
- intel_de_write(dev_priv,
- PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder),
- PSR2_MAN_TRK_CTL_ENABLE);
+ tmp = intel_de_read(dev_priv, PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder));
+ drm_WARN_ON(&dev_priv->drm, !(tmp & PSR2_MAN_TRK_CTL_ENABLE));
} else if (HAS_PSR2_SEL_FETCH(dev_priv)) {
intel_de_write(dev_priv,
PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder), 0);
@@ -621,9 +588,7 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
static bool
transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder trans)
{
- if (DISPLAY_VER(dev_priv) < 9)
- return false;
- else if (DISPLAY_VER(dev_priv) >= 12)
+ if (DISPLAY_VER(dev_priv) >= 12)
return trans == TRANSCODER_A;
else
return trans == TRANSCODER_EDP;
@@ -755,11 +720,7 @@ tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
- struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_plane_state *plane_state;
- struct intel_plane *plane;
- int i;
if (!dev_priv->params.enable_psr2_sel_fetch &&
intel_dp->psr.debug != I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
@@ -774,14 +735,6 @@ static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
return false;
}
- for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
- if (plane_state->uapi.rotation != DRM_MODE_ROTATE_0) {
- drm_dbg_kms(&dev_priv->drm,
- "PSR2 sel fetch not enabled, plane rotated\n");
- return false;
- }
- }
-
/* Wa_14010254185 Wa_14010103792 */
if (IS_TGL_DISPLAY_STEP(dev_priv, STEP_A0, STEP_C0)) {
drm_dbg_kms(&dev_priv->drm,
@@ -877,12 +830,8 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
return false;
}
- /*
- * We are missing the implementation of some workarounds to enabled PSR2
- * in Alderlake_P, until ready PSR2 should be kept disabled.
- */
- if (IS_ALDERLAKE_P(dev_priv)) {
- drm_dbg_kms(&dev_priv->drm, "PSR2 is missing the implementation of workarounds\n");
+ if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
+ drm_dbg_kms(&dev_priv->drm, "PSR2 not completely functional in this stepping\n");
return false;
}
@@ -985,7 +934,8 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
}
void intel_psr_compute_config(struct intel_dp *intel_dp,
- struct intel_crtc_state *crtc_state)
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
const struct drm_display_mode *adjusted_mode =
@@ -1037,7 +987,10 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
crtc_state->has_psr = true;
crtc_state->has_psr2 = intel_psr2_config_valid(intel_dp, crtc_state);
+
crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
+ intel_dp_compute_psr_vsc_sdp(intel_dp, crtc_state, conn_state,
+ &crtc_state->psr_vsc);
}
void intel_psr_get_config(struct intel_encoder *encoder,
@@ -1114,12 +1067,6 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp)
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
u32 mask;
- /* Only HSW and BDW have PSR AUX registers that need to be setup. SKL+
- * use hardcoded values PSR AUX transactions
- */
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- hsw_psr_setup_aux(intel_dp);
-
if (intel_dp->psr.psr2_enabled && DISPLAY_VER(dev_priv) == 9) {
i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder);
u32 chicken = intel_de_read(dev_priv, reg);
@@ -1130,6 +1077,16 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp)
}
/*
+ * Wa_16014451276:adlp
+ * All supported adlp panels have 1-based X granularity, this may
+ * cause issues if non-supported panels are used.
+ */
+ if (IS_ALDERLAKE_P(dev_priv) &&
+ intel_dp->psr.psr2_enabled)
+ intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0,
+ ADLP_1_BASED_X_GRANULARITY);
+
+ /*
* Per Spec: Avoid continuous PSR exit by masking MEMUP and HPD also
* mask LPSP to avoid dependency on other drivers that might block
* runtime_pm besides preventing other hw tracking issues now we
@@ -1174,6 +1131,11 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp)
TRANS_SET_CONTEXT_LATENCY(intel_dp->psr.transcoder),
TRANS_SET_CONTEXT_LATENCY_MASK,
TRANS_SET_CONTEXT_LATENCY_VALUE(1));
+
+ /* Wa_16012604467:adlp */
+ if (IS_ALDERLAKE_P(dev_priv) && intel_dp->psr.psr2_enabled)
+ intel_de_rmw(dev_priv, CLKGATE_DIS_MISC, 0,
+ CLKGATE_DIS_MISC_DMASC_GATING_DIS);
}
static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
@@ -1208,8 +1170,7 @@ static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
}
static void intel_psr_enable_locked(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
+ const struct intel_crtc_state *crtc_state)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
@@ -1236,9 +1197,7 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp,
drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",
intel_dp->psr.psr2_enabled ? "2" : "1");
- intel_dp_compute_psr_vsc_sdp(intel_dp, crtc_state, conn_state,
- &intel_dp->psr.vsc);
- intel_write_dp_vsc_sdp(encoder, crtc_state, &intel_dp->psr.vsc);
+ intel_write_dp_vsc_sdp(encoder, crtc_state, &crtc_state->psr_vsc);
intel_snps_phy_update_psr_power_state(dev_priv, phy, true);
intel_psr_enable_sink(intel_dp);
intel_psr_enable_source(intel_dp);
@@ -1248,33 +1207,6 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp,
intel_psr_activate(intel_dp);
}
-/**
- * intel_psr_enable - Enable PSR
- * @intel_dp: Intel DP
- * @crtc_state: new CRTC state
- * @conn_state: new CONNECTOR state
- *
- * This function can only be called after the pipe is fully trained and enabled.
- */
-void intel_psr_enable(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
-{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
-
- if (!CAN_PSR(intel_dp))
- return;
-
- if (!crtc_state->has_psr)
- return;
-
- drm_WARN_ON(&dev_priv->drm, dev_priv->drrs.dp);
-
- mutex_lock(&intel_dp->psr.lock);
- intel_psr_enable_locked(intel_dp, crtc_state, conn_state);
- mutex_unlock(&intel_dp->psr.lock);
-}
-
static void intel_psr_exit(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
@@ -1363,6 +1295,11 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
TRANS_SET_CONTEXT_LATENCY(intel_dp->psr.transcoder),
TRANS_SET_CONTEXT_LATENCY_MASK, 0);
+ /* Wa_16012604467:adlp */
+ if (IS_ALDERLAKE_P(dev_priv) && intel_dp->psr.psr2_enabled)
+ intel_de_rmw(dev_priv, CLKGATE_DIS_MISC,
+ CLKGATE_DIS_MISC_DMASC_GATING_DIS, 0);
+
intel_snps_phy_update_psr_power_state(dev_priv, phy, false);
/* Disable PSR on Sink */
@@ -1456,27 +1393,48 @@ unlock:
mutex_unlock(&psr->lock);
}
+static inline u32 man_trk_ctl_single_full_frame_bit_get(struct drm_i915_private *dev_priv)
+{
+ return IS_ALDERLAKE_P(dev_priv) ?
+ ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME :
+ PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
+}
+
static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- if (DISPLAY_VER(dev_priv) >= 9)
- /*
- * Display WA #0884: skl+
- * This documented WA for bxt can be safely applied
- * broadly so we can force HW tracking to exit PSR
- * instead of disabling and re-enabling.
- * Workaround tells us to write 0 to CUR_SURFLIVE_A,
- * but it makes more sense write to the current active
- * pipe.
- */
- intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
- else
- /*
- * A write to CURSURFLIVE do not cause HW tracking to exit PSR
- * on older gens so doing the manual exit instead.
- */
- intel_psr_exit(intel_dp);
+ if (intel_dp->psr.psr2_sel_fetch_enabled)
+ intel_de_rmw(dev_priv,
+ PSR2_MAN_TRK_CTL(intel_dp->psr.transcoder), 0,
+ man_trk_ctl_single_full_frame_bit_get(dev_priv));
+
+ /*
+ * Display WA #0884: skl+
+ * This documented WA for bxt can be safely applied
+ * broadly so we can force HW tracking to exit PSR
+ * instead of disabling and re-enabling.
+ * Workaround tells us to write 0 to CUR_SURFLIVE_A,
+ * but it makes more sense write to the current active
+ * pipe.
+ *
+ * This workaround do not exist for platforms with display 10 or newer
+ * but testing proved that it works for up display 13, for newer
+ * than that testing will be needed.
+ */
+ intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0);
+}
+
+void intel_psr2_disable_plane_sel_fetch(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ enum pipe pipe = plane->pipe;
+
+ if (!crtc_state->enable_psr2_sel_fetch)
+ return;
+
+ intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id), 0);
}
void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane,
@@ -1487,17 +1445,17 @@ void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane,
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
enum pipe pipe = plane->pipe;
const struct drm_rect *clip;
- u32 val, offset;
- int ret, x, y;
+ u32 val;
+ int x, y;
if (!crtc_state->enable_psr2_sel_fetch)
return;
- val = plane_state ? plane_state->ctl : 0;
- val &= plane->id == PLANE_CURSOR ? val : PLANE_SEL_FETCH_CTL_ENABLE;
- intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id), val);
- if (!val || plane->id == PLANE_CURSOR)
+ if (plane->id == PLANE_CURSOR) {
+ intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id),
+ plane_state->ctl);
return;
+ }
clip = &plane_state->psr2_sel_fetch_area;
@@ -1508,10 +1466,6 @@ void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane,
/* TODO: consider auxiliary surfaces */
x = plane_state->uapi.src.x1 >> 16;
y = (plane_state->uapi.src.y1 >> 16) + clip->y1;
- ret = skl_calc_main_surface_offset(plane_state, &x, &y, &offset);
- if (ret)
- drm_warn_once(&dev_priv->drm, "skl_calc_main_surface_offset() returned %i\n",
- ret);
val = y << 16 | x;
intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_OFFSET(pipe, plane->id),
val);
@@ -1520,14 +1474,16 @@ void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane,
val = (drm_rect_height(clip) - 1) << 16;
val |= (drm_rect_width(&plane_state->uapi.src) >> 16) - 1;
intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_SIZE(pipe, plane->id), val);
+
+ intel_de_write_fw(dev_priv, PLANE_SEL_FETCH_CTL(pipe, plane->id),
+ PLANE_SEL_FETCH_CTL_ENABLE);
}
void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- if (!HAS_PSR2_SEL_FETCH(dev_priv) ||
- !crtc_state->enable_psr2_sel_fetch)
+ if (!crtc_state->enable_psr2_sel_fetch)
return;
intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(crtc_state->cpu_transcoder),
@@ -1542,11 +1498,11 @@ static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
u32 val = PSR2_MAN_TRK_CTL_ENABLE;
if (full_update) {
- if (IS_ALDERLAKE_P(dev_priv))
- val |= ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
- else
- val |= PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
-
+ /*
+ * Not applying Wa_14014971508:adlp as we do not support the
+ * feature that requires this workaround.
+ */
+ val |= man_trk_ctl_single_full_frame_bit_get(dev_priv);
goto exit;
}
@@ -1555,7 +1511,7 @@ static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
if (IS_ALDERLAKE_P(dev_priv)) {
val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(clip->y1);
- val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2);
+ val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(clip->y2 - 1);
} else {
drm_WARN_ON(crtc_state->uapi.crtc->dev, clip->y1 % 4 || clip->y2 % 4);
@@ -1597,6 +1553,45 @@ static void intel_psr2_sel_fetch_pipe_alignment(const struct intel_crtc_state *c
drm_warn(&dev_priv->drm, "Missing PSR2 sel fetch alignment with DSC\n");
}
+/*
+ * TODO: Not clear how to handle planes with negative position,
+ * also planes are not updated if they have a negative X
+ * position so for now doing a full update in this cases
+ *
+ * TODO: We are missing multi-planar formats handling, until it is
+ * implemented it will send full frame updates.
+ *
+ * Plane scaling and rotation is not supported by selective fetch and both
+ * properties can change without a modeset, so need to be check at every
+ * atomic commmit.
+ */
+static bool psr2_sel_fetch_plane_state_supported(const struct intel_plane_state *plane_state)
+{
+ if (plane_state->uapi.dst.y1 < 0 ||
+ plane_state->uapi.dst.x1 < 0 ||
+ plane_state->scaler_id >= 0 ||
+ plane_state->hw.fb->format->num_planes > 1 ||
+ plane_state->uapi.rotation != DRM_MODE_ROTATE_0)
+ return false;
+
+ return true;
+}
+
+/*
+ * Check for pipe properties that is not supported by selective fetch.
+ *
+ * TODO: pipe scaling causes a modeset but skl_update_scaler_crtc() is executed
+ * after intel_psr_compute_config(), so for now keeping PSR2 selective fetch
+ * enabled and going to the full update path.
+ */
+static bool psr2_sel_fetch_pipe_state_supported(const struct intel_crtc_state *crtc_state)
+{
+ if (crtc_state->scaler_state.scaler_id >= 0)
+ return false;
+
+ return true;
+}
+
int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
@@ -1610,9 +1605,10 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
if (!crtc_state->enable_psr2_sel_fetch)
return 0;
- ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
- if (ret)
- return ret;
+ if (!psr2_sel_fetch_pipe_state_supported(crtc_state)) {
+ full_update = true;
+ goto skip_sel_fetch_set_loop;
+ }
/*
* Calculate minimal selective fetch area of each plane and calculate
@@ -1623,8 +1619,8 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
for_each_oldnew_intel_plane_in_state(state, plane, old_plane_state,
new_plane_state, i) {
struct drm_rect src, damaged_area = { .y1 = -1 };
- struct drm_mode_rect *damaged_clips;
- u32 num_clips, j;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_rect clip;
if (new_plane_state->uapi.crtc != crtc_state->uapi.crtc)
continue;
@@ -1633,19 +1629,11 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
!old_plane_state->uapi.visible)
continue;
- /*
- * TODO: Not clear how to handle planes with negative position,
- * also planes are not updated if they have a negative X
- * position so for now doing a full update in this cases
- */
- if (new_plane_state->uapi.dst.y1 < 0 ||
- new_plane_state->uapi.dst.x1 < 0) {
+ if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
full_update = true;
break;
}
- num_clips = drm_plane_get_damage_clips_count(&new_plane_state->uapi);
-
/*
* If visibility or plane moved, mark the whole plane area as
* damaged as it needs to be complete redraw in the new and old
@@ -1666,14 +1654,8 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
clip_area_update(&pipe_clip, &damaged_area);
}
continue;
- } else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha ||
- (!num_clips &&
- new_plane_state->uapi.fb != old_plane_state->uapi.fb)) {
- /*
- * If the plane don't have damaged areas but the
- * framebuffer changed or alpha changed, mark the whole
- * plane area as damaged.
- */
+ } else if (new_plane_state->uapi.alpha != old_plane_state->uapi.alpha) {
+ /* If alpha changed mark the whole plane area as damaged */
damaged_area.y1 = new_plane_state->uapi.dst.y1;
damaged_area.y2 = new_plane_state->uapi.dst.y2;
clip_area_update(&pipe_clip, &damaged_area);
@@ -1681,15 +1663,11 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
}
drm_rect_fp_to_int(&src, &new_plane_state->uapi.src);
- damaged_clips = drm_plane_get_damage_clips(&new_plane_state->uapi);
- for (j = 0; j < num_clips; j++) {
- struct drm_rect clip;
-
- clip.x1 = damaged_clips[j].x1;
- clip.y1 = damaged_clips[j].y1;
- clip.x2 = damaged_clips[j].x2;
- clip.y2 = damaged_clips[j].y2;
+ drm_atomic_helper_damage_iter_init(&iter,
+ &old_plane_state->uapi,
+ &new_plane_state->uapi);
+ drm_atomic_for_each_plane_damage(&iter, &clip) {
if (drm_rect_intersect(&clip, &src))
clip_area_update(&damaged_area, &clip);
}
@@ -1705,6 +1683,10 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
if (full_update)
goto skip_sel_fetch_set_loop;
+ ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
+ if (ret)
+ return ret;
+
intel_psr2_sel_fetch_pipe_alignment(crtc_state, &pipe_clip);
/*
@@ -1723,6 +1705,11 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
if (!drm_rect_intersect(&inter, &new_plane_state->uapi.dst))
continue;
+ if (!psr2_sel_fetch_plane_state_supported(new_plane_state)) {
+ full_update = true;
+ break;
+ }
+
sel_fetch_area = &new_plane_state->psr2_sel_fetch_area;
sel_fetch_area->y1 = inter.y1 - new_plane_state->uapi.dst.y1;
sel_fetch_area->y2 = inter.y2 - new_plane_state->uapi.dst.y1;
@@ -1734,58 +1721,92 @@ skip_sel_fetch_set_loop:
return 0;
}
-/**
- * intel_psr_update - Update PSR state
- * @intel_dp: Intel DP
- * @crtc_state: new CRTC state
- * @conn_state: new CONNECTOR state
- *
- * This functions will update PSR states, disabling, enabling or switching PSR
- * version when executing fastsets. For full modeset, intel_psr_disable() and
- * intel_psr_enable() should be called instead.
- */
-void intel_psr_update(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state)
+static void _intel_psr_pre_plane_update(const struct intel_atomic_state *state,
+ const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_psr *psr = &intel_dp->psr;
- bool enable, psr2_enable;
+ struct intel_encoder *encoder;
- if (!CAN_PSR(intel_dp))
+ for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
+ crtc_state->uapi.encoder_mask) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_psr *psr = &intel_dp->psr;
+ bool needs_to_disable = false;
+
+ mutex_lock(&psr->lock);
+
+ /*
+ * Reasons to disable:
+ * - PSR disabled in new state
+ * - All planes will go inactive
+ * - Changing between PSR versions
+ */
+ needs_to_disable |= !crtc_state->has_psr;
+ needs_to_disable |= !crtc_state->active_planes;
+ needs_to_disable |= crtc_state->has_psr2 != psr->psr2_enabled;
+
+ if (psr->enabled && needs_to_disable)
+ intel_psr_disable_locked(intel_dp);
+
+ mutex_unlock(&psr->lock);
+ }
+}
+
+void intel_psr_pre_plane_update(const struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+ int i;
+
+ if (!HAS_PSR(dev_priv))
return;
- mutex_lock(&intel_dp->psr.lock);
+ for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i)
+ _intel_psr_pre_plane_update(state, crtc_state);
+}
+
+static void _intel_psr_post_plane_update(const struct intel_atomic_state *state,
+ const struct intel_crtc_state *crtc_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_encoder *encoder;
+
+ if (!crtc_state->has_psr)
+ return;
+
+ for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
+ crtc_state->uapi.encoder_mask) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ struct intel_psr *psr = &intel_dp->psr;
+
+ mutex_lock(&psr->lock);
+
+ drm_WARN_ON(&dev_priv->drm, psr->enabled && !crtc_state->active_planes);
- enable = crtc_state->has_psr;
- psr2_enable = crtc_state->has_psr2;
+ /* Only enable if there is active planes */
+ if (!psr->enabled && crtc_state->active_planes)
+ intel_psr_enable_locked(intel_dp, crtc_state);
- if (enable == psr->enabled && psr2_enable == psr->psr2_enabled &&
- crtc_state->enable_psr2_sel_fetch == psr->psr2_sel_fetch_enabled) {
/* Force a PSR exit when enabling CRC to avoid CRC timeouts */
if (crtc_state->crc_enabled && psr->enabled)
psr_force_hw_tracking_exit(intel_dp);
- else if (DISPLAY_VER(dev_priv) < 9 && psr->enabled) {
- /*
- * Activate PSR again after a force exit when enabling
- * CRC in older gens
- */
- if (!intel_dp->psr.active &&
- !intel_dp->psr.busy_frontbuffer_bits)
- schedule_work(&intel_dp->psr.work);
- }
- goto unlock;
+ mutex_unlock(&psr->lock);
}
+}
- if (psr->enabled)
- intel_psr_disable_locked(intel_dp);
+void intel_psr_post_plane_update(const struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+ int i;
- if (enable)
- intel_psr_enable_locked(intel_dp, crtc_state, conn_state);
+ if (!HAS_PSR(dev_priv))
+ return;
-unlock:
- mutex_unlock(&intel_dp->psr.lock);
+ for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i)
+ _intel_psr_post_plane_update(state, crtc_state);
}
/**
@@ -2065,20 +2086,16 @@ void intel_psr_invalidate(struct drm_i915_private *dev_priv,
/*
* When we will be completely rely on PSR2 S/W tracking in future,
* intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP
- * event also therefore tgl_dc3co_flush() require to be changed
+ * event also therefore tgl_dc3co_flush_locked() require to be changed
* accordingly in future.
*/
static void
-tgl_dc3co_flush(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
- enum fb_op_origin origin)
+tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
+ enum fb_op_origin origin)
{
- mutex_lock(&intel_dp->psr.lock);
-
- if (!intel_dp->psr.dc3co_exitline)
- goto unlock;
-
- if (!intel_dp->psr.psr2_enabled || !intel_dp->psr.active)
- goto unlock;
+ if (!intel_dp->psr.dc3co_exitline || !intel_dp->psr.psr2_enabled ||
+ !intel_dp->psr.active)
+ return;
/*
* At every frontbuffer flush flip event modified delay of delayed work,
@@ -2086,14 +2103,11 @@ tgl_dc3co_flush(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
*/
if (!(frontbuffer_bits &
INTEL_FRONTBUFFER_ALL_MASK(intel_dp->psr.pipe)))
- goto unlock;
+ return;
tgl_psr2_enable_dc3co(intel_dp);
mod_delayed_work(system_wq, &intel_dp->psr.dc3co_work,
intel_dp->psr.dc3co_exit_delay);
-
-unlock:
- mutex_unlock(&intel_dp->psr.lock);
}
/**
@@ -2118,11 +2132,6 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- if (origin == ORIGIN_FLIP) {
- tgl_dc3co_flush(intel_dp, frontbuffer_bits, origin);
- continue;
- }
-
mutex_lock(&intel_dp->psr.lock);
if (!intel_dp->psr.enabled) {
mutex_unlock(&intel_dp->psr.lock);
@@ -2143,6 +2152,14 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
continue;
}
+ if (origin == ORIGIN_FLIP ||
+ (origin == ORIGIN_CURSOR_UPDATE &&
+ !intel_dp->psr.psr2_sel_fetch_enabled)) {
+ tgl_dc3co_flush_locked(intel_dp, frontbuffer_bits, origin);
+ mutex_unlock(&intel_dp->psr.lock);
+ continue;
+ }
+
/* By definition flush = invalidate + flush */
if (pipe_frontbuffer_bits)
psr_force_hw_tracking_exit(intel_dp);
@@ -2186,23 +2203,12 @@ void intel_psr_init(struct intel_dp *intel_dp)
intel_dp->psr.source_support = true;
- if (IS_HASWELL(dev_priv))
- /*
- * HSW don't have PSR registers on the same space as transcoder
- * so set this to a value that when subtract to the register
- * in transcoder space results in the right offset for HSW
- */
- dev_priv->hsw_psr_mmio_adjust = _SRD_CTL_EDP - _HSW_EDP_PSR_BASE;
-
if (dev_priv->params.enable_psr == -1)
- if (DISPLAY_VER(dev_priv) < 9 || !dev_priv->vbt.psr.enable)
+ if (!dev_priv->vbt.psr.enable)
dev_priv->params.enable_psr = 0;
/* Set link_standby x link_off defaults */
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- /* HSW and BDW require workarounds that we don't implement. */
- intel_dp->psr.link_standby = false;
- else if (DISPLAY_VER(dev_priv) < 12)
+ if (DISPLAY_VER(dev_priv) < 12)
/* For new platforms up to TGL let's respect VBT back again */
intel_dp->psr.link_standby = dev_priv->vbt.psr.full_link;
diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h
index 641521b101c8..facffbacd357 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.h
+++ b/drivers/gpu/drm/i915/display/intel_psr.h
@@ -20,14 +20,10 @@ struct intel_plane;
struct intel_encoder;
void intel_psr_init_dpcd(struct intel_dp *intel_dp);
-void intel_psr_enable(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state);
+void intel_psr_pre_plane_update(const struct intel_atomic_state *state);
+void intel_psr_post_plane_update(const struct intel_atomic_state *state);
void intel_psr_disable(struct intel_dp *intel_dp,
const struct intel_crtc_state *old_crtc_state);
-void intel_psr_update(struct intel_dp *intel_dp,
- const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state);
int intel_psr_debug_set(struct intel_dp *intel_dp, u64 value);
void intel_psr_invalidate(struct drm_i915_private *dev_priv,
unsigned frontbuffer_bits,
@@ -37,7 +33,8 @@ void intel_psr_flush(struct drm_i915_private *dev_priv,
enum fb_op_origin origin);
void intel_psr_init(struct intel_dp *intel_dp);
void intel_psr_compute_config(struct intel_dp *intel_dp,
- struct intel_crtc_state *crtc_state);
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state);
void intel_psr_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config);
void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir);
@@ -51,6 +48,8 @@ void intel_psr2_program_plane_sel_fetch(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
int color_plane);
+void intel_psr2_disable_plane_sel_fetch(struct intel_plane *plane,
+ const struct intel_crtc_state *crtc_state);
void intel_psr_pause(struct intel_dp *intel_dp);
void intel_psr_resume(struct intel_dp *intel_dp);
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index 6cb27599ea03..2dc6c3742ba2 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -1335,6 +1335,13 @@ static int intel_sdvo_compute_config(struct intel_encoder *encoder,
adjusted_mode);
pipe_config->sdvo_tv_clock = true;
} else if (IS_LVDS(intel_sdvo_connector)) {
+ int ret;
+
+ ret = intel_panel_compute_config(&intel_sdvo_connector->base,
+ adjusted_mode);
+ if (ret)
+ return ret;
+
if (!intel_sdvo_set_output_timings_from_mode(intel_sdvo,
intel_sdvo_connector->base.panel.fixed_mode))
return -EINVAL;
@@ -1873,7 +1880,6 @@ intel_sdvo_mode_valid(struct drm_connector *connector,
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
-
if (clock > max_dotclk)
return MODE_CLOCK_HIGH;
@@ -1890,14 +1896,11 @@ intel_sdvo_mode_valid(struct drm_connector *connector,
return MODE_CLOCK_HIGH;
if (IS_LVDS(intel_sdvo_connector)) {
- const struct drm_display_mode *fixed_mode =
- intel_sdvo_connector->base.panel.fixed_mode;
-
- if (mode->hdisplay > fixed_mode->hdisplay)
- return MODE_PANEL;
+ enum drm_mode_status status;
- if (mode->vdisplay > fixed_mode->vdisplay)
- return MODE_PANEL;
+ status = intel_panel_mode_valid(&intel_sdvo_connector->base, mode);
+ if (status != MODE_OK)
+ return status;
}
return MODE_OK;
diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.c b/drivers/gpu/drm/i915/display/intel_snps_phy.c
index 18b52b64af95..5e20f340730f 100644
--- a/drivers/gpu/drm/i915/display/intel_snps_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_snps_phy.c
@@ -5,6 +5,8 @@
#include <linux/util_macros.h>
+#include "intel_ddi.h"
+#include "intel_ddi_buf_trans.h"
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_snps_phy.h"
@@ -50,58 +52,28 @@ void intel_snps_phy_update_psr_power_state(struct drm_i915_private *dev_priv,
SNPS_PHY_TX_REQ_LN_DIS_PWR_STATE_PSR, val);
}
-static const u32 dg2_ddi_translations[] = {
- /* VS 0, pre-emph 0 */
- REG_FIELD_PREP(SNPS_PHY_TX_EQ_MAIN, 26),
-
- /* VS 0, pre-emph 1 */
- REG_FIELD_PREP(SNPS_PHY_TX_EQ_MAIN, 33) |
- REG_FIELD_PREP(SNPS_PHY_TX_EQ_POST, 6),
-
- /* VS 0, pre-emph 2 */
- REG_FIELD_PREP(SNPS_PHY_TX_EQ_MAIN, 38) |
- REG_FIELD_PREP(SNPS_PHY_TX_EQ_POST, 12),
-
- /* VS 0, pre-emph 3 */
- REG_FIELD_PREP(SNPS_PHY_TX_EQ_MAIN, 43) |
- REG_FIELD_PREP(SNPS_PHY_TX_EQ_POST, 19),
-
- /* VS 1, pre-emph 0 */
- REG_FIELD_PREP(SNPS_PHY_TX_EQ_MAIN, 39),
-
- /* VS 1, pre-emph 1 */
- REG_FIELD_PREP(SNPS_PHY_TX_EQ_MAIN, 44) |
- REG_FIELD_PREP(SNPS_PHY_TX_EQ_POST, 8),
-
- /* VS 1, pre-emph 2 */
- REG_FIELD_PREP(SNPS_PHY_TX_EQ_MAIN, 47) |
- REG_FIELD_PREP(SNPS_PHY_TX_EQ_POST, 15),
-
- /* VS 2, pre-emph 0 */
- REG_FIELD_PREP(SNPS_PHY_TX_EQ_MAIN, 52),
-
- /* VS 2, pre-emph 1 */
- REG_FIELD_PREP(SNPS_PHY_TX_EQ_MAIN, 51) |
- REG_FIELD_PREP(SNPS_PHY_TX_EQ_POST, 10),
-
- /* VS 3, pre-emph 0 */
- REG_FIELD_PREP(SNPS_PHY_TX_EQ_MAIN, 62),
-};
-
-void intel_snps_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
- u32 level)
+void intel_snps_phy_set_signal_levels(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ const struct intel_ddi_buf_trans *trans;
enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
+ int level = intel_ddi_level(encoder, crtc_state, 0);
int n_entries, ln;
- n_entries = ARRAY_SIZE(dg2_ddi_translations);
- if (level >= n_entries)
- level = n_entries - 1;
+ trans = encoder->get_buf_trans(encoder, crtc_state, &n_entries);
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !trans))
+ return;
+
+ for (ln = 0; ln < 4; ln++) {
+ u32 val = 0;
+
+ val |= REG_FIELD_PREP(SNPS_PHY_TX_EQ_MAIN, trans->entries[level].snps.vswing);
+ val |= REG_FIELD_PREP(SNPS_PHY_TX_EQ_PRE, trans->entries[level].snps.pre_cursor);
+ val |= REG_FIELD_PREP(SNPS_PHY_TX_EQ_POST, trans->entries[level].snps.post_cursor);
- for (ln = 0; ln < 4; ln++)
- intel_de_write(dev_priv, SNPS_PHY_TX_EQ(ln, phy),
- dg2_ddi_translations[level]);
+ intel_de_write(dev_priv, SNPS_PHY_TX_EQ(ln, phy), val);
+ }
}
/*
@@ -198,11 +170,81 @@ static const struct intel_mpllb_state dg2_dp_hbr3_100 = {
REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 1),
};
-static const struct intel_mpllb_state *dg2_dp_100_tables[] = {
+static const struct intel_mpllb_state dg2_dp_uhbr10_100 = {
+ .clock = 1000000,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 4) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 21) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 65) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 127),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV_MULTIPLIER, 8) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_WORD_DIV2_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DP2_MODE, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 368),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 1),
+
+ /*
+ * SSC will be enabled, DP UHBR has a minimum SSC requirement.
+ */
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_PEAK, 58982),
+ .mpllb_sscstep =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_STEPSIZE, 76101),
+};
+
+static const struct intel_mpllb_state dg2_dp_uhbr13_100 = {
+ .clock = 1350000,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 5) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 45) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 65) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 127),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV_MULTIPLIER, 8) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_WORD_DIV2_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DP2_MODE, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 508),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 1),
+
+ /*
+ * SSC will be enabled, DP UHBR has a minimum SSC requirement.
+ */
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_PEAK, 79626),
+ .mpllb_sscstep =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_STEPSIZE, 102737),
+};
+
+static const struct intel_mpllb_state * const dg2_dp_100_tables[] = {
&dg2_dp_rbr_100,
&dg2_dp_hbr1_100,
&dg2_dp_hbr2_100,
&dg2_dp_hbr3_100,
+ &dg2_dp_uhbr10_100,
+ &dg2_dp_uhbr13_100,
NULL,
};
@@ -311,11 +353,88 @@ static const struct intel_mpllb_state dg2_dp_hbr3_38_4 = {
REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 61440),
};
-static const struct intel_mpllb_state *dg2_dp_38_4_tables[] = {
+static const struct intel_mpllb_state dg2_dp_uhbr10_38_4 = {
+ .clock = 1000000,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 1),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 5) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 26) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 65) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 127),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV_MULTIPLIER, 8) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_WORD_DIV2_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DP2_MODE, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 488),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 3),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 2) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 27306),
+
+ /*
+ * SSC will be enabled, DP UHBR has a minimum SSC requirement.
+ */
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_PEAK, 76800),
+ .mpllb_sscstep =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_STEPSIZE, 129024),
+};
+
+static const struct intel_mpllb_state dg2_dp_uhbr13_38_4 = {
+ .clock = 1350000,
+ .ref_control =
+ REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 1),
+ .mpllb_cp =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 56) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 65) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 127),
+ .mpllb_div =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV_CLK_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV_MULTIPLIER, 8) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_WORD_DIV2_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_DP2_MODE, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 3),
+ .mpllb_div2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 670),
+ .mpllb_fracn1 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 1),
+ .mpllb_fracn2 =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 36864),
+
+ /*
+ * SSC will be enabled, DP UHBR has a minimum SSC requirement.
+ */
+ .mpllb_sscen =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_EN, 1) |
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_PEAK, 103680),
+ .mpllb_sscstep =
+ REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_STEPSIZE, 174182),
+};
+
+static const struct intel_mpllb_state * const dg2_dp_38_4_tables[] = {
&dg2_dp_rbr_38_4,
&dg2_dp_hbr1_38_4,
&dg2_dp_hbr2_38_4,
&dg2_dp_hbr3_38_4,
+ &dg2_dp_uhbr10_38_4,
+ &dg2_dp_uhbr13_38_4,
NULL,
};
@@ -448,7 +567,7 @@ static const struct intel_mpllb_state dg2_edp_r432 = {
REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_STEPSIZE, 65752),
};
-static const struct intel_mpllb_state *dg2_edp_tables[] = {
+static const struct intel_mpllb_state * const dg2_edp_tables[] = {
&dg2_dp_rbr_100,
&dg2_edp_r216,
&dg2_edp_r243,
@@ -611,7 +730,7 @@ static const struct intel_mpllb_state dg2_hdmi_594 = {
REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1),
};
-static const struct intel_mpllb_state *dg2_hdmi_tables[] = {
+static const struct intel_mpllb_state * const dg2_hdmi_tables[] = {
&dg2_hdmi_25_175,
&dg2_hdmi_27_0,
&dg2_hdmi_74_25,
@@ -620,7 +739,7 @@ static const struct intel_mpllb_state *dg2_hdmi_tables[] = {
NULL,
};
-static const struct intel_mpllb_state **
+static const struct intel_mpllb_state * const *
intel_mpllb_tables_get(struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder)
{
@@ -654,7 +773,7 @@ intel_mpllb_tables_get(struct intel_crtc_state *crtc_state,
int intel_mpllb_calc_state(struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder)
{
- const struct intel_mpllb_state **tables;
+ const struct intel_mpllb_state * const *tables;
int i;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
@@ -850,7 +969,7 @@ void intel_mpllb_readout_hw_state(struct intel_encoder *encoder,
int intel_snps_phy_check_hdmi_link_rate(int clock)
{
- const struct intel_mpllb_state **tables = dg2_hdmi_tables;
+ const struct intel_mpllb_state * const *tables = dg2_hdmi_tables;
int i;
for (i = 0; tables[i]; i++) {
diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.h b/drivers/gpu/drm/i915/display/intel_snps_phy.h
index 6261ff88ef5c..11dcd6deb070 100644
--- a/drivers/gpu/drm/i915/display/intel_snps_phy.h
+++ b/drivers/gpu/drm/i915/display/intel_snps_phy.h
@@ -29,7 +29,7 @@ int intel_mpllb_calc_port_clock(struct intel_encoder *encoder,
const struct intel_mpllb_state *pll_state);
int intel_snps_phy_check_hdmi_link_rate(int clock);
-void intel_snps_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
- u32 level);
+void intel_snps_phy_set_signal_levels(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state);
#endif /* __INTEL_SNPS_PHY_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
index 3ffece568ed9..40faa18947c9 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.c
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -12,44 +12,81 @@
static const char *tc_port_mode_name(enum tc_port_mode mode)
{
static const char * const names[] = {
+ [TC_PORT_DISCONNECTED] = "disconnected",
[TC_PORT_TBT_ALT] = "tbt-alt",
[TC_PORT_DP_ALT] = "dp-alt",
[TC_PORT_LEGACY] = "legacy",
};
if (WARN_ON(mode >= ARRAY_SIZE(names)))
- mode = TC_PORT_TBT_ALT;
+ mode = TC_PORT_DISCONNECTED;
return names[mode];
}
+static bool intel_tc_port_in_mode(struct intel_digital_port *dig_port,
+ enum tc_port_mode mode)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
+
+ return intel_phy_is_tc(i915, phy) && dig_port->tc_mode == mode;
+}
+
+bool intel_tc_port_in_tbt_alt_mode(struct intel_digital_port *dig_port)
+{
+ return intel_tc_port_in_mode(dig_port, TC_PORT_TBT_ALT);
+}
+
+bool intel_tc_port_in_dp_alt_mode(struct intel_digital_port *dig_port)
+{
+ return intel_tc_port_in_mode(dig_port, TC_PORT_DP_ALT);
+}
+
+bool intel_tc_port_in_legacy_mode(struct intel_digital_port *dig_port)
+{
+ return intel_tc_port_in_mode(dig_port, TC_PORT_LEGACY);
+}
+
+bool intel_tc_cold_requires_aux_pw(struct intel_digital_port *dig_port)
+{
+ struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+
+ return (DISPLAY_VER(i915) == 11 && dig_port->tc_legacy_port) ||
+ IS_ALDERLAKE_P(i915);
+}
+
static enum intel_display_power_domain
-tc_cold_get_power_domain(struct intel_digital_port *dig_port)
+tc_cold_get_power_domain(struct intel_digital_port *dig_port, enum tc_port_mode mode)
{
- if (intel_tc_cold_requires_aux_pw(dig_port))
- return intel_legacy_aux_to_power_domain(dig_port->aux_ch);
- else
+ if (mode == TC_PORT_TBT_ALT || !intel_tc_cold_requires_aux_pw(dig_port))
return POWER_DOMAIN_TC_COLD_OFF;
+
+ return intel_legacy_aux_to_power_domain(dig_port->aux_ch);
}
static intel_wakeref_t
-tc_cold_block(struct intel_digital_port *dig_port)
+tc_cold_block_in_mode(struct intel_digital_port *dig_port, enum tc_port_mode mode,
+ enum intel_display_power_domain *domain)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- enum intel_display_power_domain domain;
- if (DISPLAY_VER(i915) == 11 && !dig_port->tc_legacy_port)
- return 0;
+ *domain = tc_cold_get_power_domain(dig_port, mode);
- domain = tc_cold_get_power_domain(dig_port);
- return intel_display_power_get(i915, domain);
+ return intel_display_power_get(i915, *domain);
+}
+
+static intel_wakeref_t
+tc_cold_block(struct intel_digital_port *dig_port, enum intel_display_power_domain *domain)
+{
+ return tc_cold_block_in_mode(dig_port, dig_port->tc_mode, domain);
}
static void
-tc_cold_unblock(struct intel_digital_port *dig_port, intel_wakeref_t wakeref)
+tc_cold_unblock(struct intel_digital_port *dig_port, enum intel_display_power_domain domain,
+ intel_wakeref_t wakeref)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- enum intel_display_power_domain domain;
/*
* wakeref == -1, means some error happened saving save_depot_stack but
@@ -59,8 +96,7 @@ tc_cold_unblock(struct intel_digital_port *dig_port, intel_wakeref_t wakeref)
if (wakeref == 0)
return;
- domain = tc_cold_get_power_domain(dig_port);
- intel_display_power_put_async(i915, domain, wakeref);
+ intel_display_power_put(i915, domain, wakeref);
}
static void
@@ -69,11 +105,9 @@ assert_tc_cold_blocked(struct intel_digital_port *dig_port)
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
bool enabled;
- if (DISPLAY_VER(i915) == 11 && !dig_port->tc_legacy_port)
- return;
-
enabled = intel_display_power_is_enabled(i915,
- tc_cold_get_power_domain(dig_port));
+ tc_cold_get_power_domain(dig_port,
+ dig_port->tc_mode));
drm_WARN_ON(&i915->drm, !enabled);
}
@@ -244,6 +278,11 @@ static u32 adl_tc_port_live_status_mask(struct intel_digital_port *dig_port)
struct intel_uncore *uncore = &i915->uncore;
u32 val, mask = 0;
+ /*
+ * On ADL-P HW/FW will wake from TCCOLD to complete the read access of
+ * registers in IOM. Note that this doesn't apply to PHY and FIA
+ * registers.
+ */
val = intel_uncore_read(uncore, TCSS_DDI_STATUS(tc_port));
if (val & TCSS_DDI_STATUS_HPD_LIVE_STATUS_ALT)
mask |= BIT(TC_PORT_DP_ALT);
@@ -270,6 +309,14 @@ static u32 tc_port_live_status_mask(struct intel_digital_port *dig_port)
return icl_tc_port_live_status_mask(dig_port);
}
+/*
+ * Return the PHY status complete flag indicating that display can acquire the
+ * PHY ownership. The IOM firmware sets this flag when a DP-alt or legacy sink
+ * is connected and it's ready to switch the ownership to display. The flag
+ * will be left cleared when a TBT-alt sink is connected, where the PHY is
+ * owned by the TBT subsystem and so switching the ownership to display is not
+ * required.
+ */
static bool icl_tc_phy_status_complete(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
@@ -288,6 +335,13 @@ static bool icl_tc_phy_status_complete(struct intel_digital_port *dig_port)
return val & DP_PHY_MODE_STATUS_COMPLETED(dig_port->tc_phy_fia_idx);
}
+/*
+ * Return the PHY status complete flag indicating that display can acquire the
+ * PHY ownership. The IOM firmware sets this flag when it's ready to switch
+ * the ownership to display, regardless of what sink is connected (TBT-alt,
+ * DP-alt, legacy or nothing). For TBT-alt sinks the PHY is owned by the TBT
+ * subsystem and so switching the ownership to display is not required.
+ */
static bool adl_tc_phy_status_complete(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
@@ -339,11 +393,6 @@ static bool icl_tc_phy_take_ownership(struct intel_digital_port *dig_port,
intel_uncore_write(uncore,
PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia), val);
- if (!take && wait_for(!tc_phy_status_complete(dig_port), 10))
- drm_dbg_kms(&i915->drm,
- "Port %s: PHY complete clear timed out\n",
- dig_port->tc_port_name);
-
return true;
}
@@ -429,6 +478,7 @@ static void icl_tc_phy_connect(struct intel_digital_port *dig_port,
int required_lanes)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ u32 live_status_mask;
int max_lanes;
if (!tc_phy_status_complete(dig_port)) {
@@ -437,6 +487,13 @@ static void icl_tc_phy_connect(struct intel_digital_port *dig_port,
goto out_set_tbt_alt_mode;
}
+ live_status_mask = tc_port_live_status_mask(dig_port);
+ if (!(live_status_mask & (BIT(TC_PORT_DP_ALT) | BIT(TC_PORT_LEGACY)))) {
+ drm_dbg_kms(&i915->drm, "Port %s: PHY ownership not required (live status %02x)\n",
+ dig_port->tc_port_name, live_status_mask);
+ goto out_set_tbt_alt_mode;
+ }
+
if (!tc_phy_take_ownership(dig_port, true) &&
!drm_WARN_ON(&i915->drm, dig_port->tc_legacy_port))
goto out_set_tbt_alt_mode;
@@ -485,14 +542,13 @@ static void icl_tc_phy_disconnect(struct intel_digital_port *dig_port)
{
switch (dig_port->tc_mode) {
case TC_PORT_LEGACY:
- /* Nothing to do, we never disconnect from legacy mode */
- break;
case TC_PORT_DP_ALT:
tc_phy_take_ownership(dig_port, false);
- dig_port->tc_mode = TC_PORT_TBT_ALT;
- break;
+ fallthrough;
case TC_PORT_TBT_ALT:
- /* Nothing to do, we stay in TBT-alt mode */
+ dig_port->tc_mode = TC_PORT_DISCONNECTED;
+ fallthrough;
+ case TC_PORT_DISCONNECTED:
break;
default:
MISSING_CASE(dig_port->tc_mode);
@@ -509,6 +565,10 @@ static bool icl_tc_phy_is_connected(struct intel_digital_port *dig_port)
return dig_port->tc_mode == TC_PORT_TBT_ALT;
}
+ /* On ADL-P the PHY complete flag is set in TBT mode as well. */
+ if (IS_ALDERLAKE_P(i915) && dig_port->tc_mode == TC_PORT_TBT_ALT)
+ return true;
+
if (!tc_phy_is_owned(dig_port)) {
drm_dbg_kms(&i915->drm, "Port %s: PHY not owned\n",
dig_port->tc_port_name);
@@ -550,9 +610,7 @@ intel_tc_port_get_target_mode(struct intel_digital_port *dig_port)
if (live_status_mask)
return fls(live_status_mask) - 1;
- return tc_phy_status_complete(dig_port) &&
- dig_port->tc_legacy_port ? TC_PORT_LEGACY :
- TC_PORT_TBT_ALT;
+ return TC_PORT_TBT_ALT;
}
static void intel_tc_port_reset_mode(struct intel_digital_port *dig_port,
@@ -581,6 +639,43 @@ static void intel_tc_port_reset_mode(struct intel_digital_port *dig_port,
tc_port_mode_name(dig_port->tc_mode));
}
+static bool intel_tc_port_needs_reset(struct intel_digital_port *dig_port)
+{
+ return intel_tc_port_get_target_mode(dig_port) != dig_port->tc_mode;
+}
+
+static void intel_tc_port_update_mode(struct intel_digital_port *dig_port,
+ int required_lanes, bool force_disconnect)
+{
+ enum intel_display_power_domain domain;
+ intel_wakeref_t wref;
+ bool needs_reset = force_disconnect;
+
+ if (!needs_reset) {
+ /* Get power domain required to check the hotplug live status. */
+ wref = tc_cold_block(dig_port, &domain);
+ needs_reset = intel_tc_port_needs_reset(dig_port);
+ tc_cold_unblock(dig_port, domain, wref);
+ }
+
+ if (!needs_reset)
+ return;
+
+ /* Get power domain required for resetting the mode. */
+ wref = tc_cold_block_in_mode(dig_port, TC_PORT_DISCONNECTED, &domain);
+
+ intel_tc_port_reset_mode(dig_port, required_lanes, force_disconnect);
+
+ /* Get power domain matching the new mode after reset. */
+ tc_cold_unblock(dig_port, dig_port->tc_lock_power_domain,
+ fetch_and_zero(&dig_port->tc_lock_wakeref));
+ if (dig_port->tc_mode != TC_PORT_DISCONNECTED)
+ dig_port->tc_lock_wakeref = tc_cold_block(dig_port,
+ &dig_port->tc_lock_power_domain);
+
+ tc_cold_unblock(dig_port, domain, wref);
+}
+
static void
intel_tc_port_link_init_refcount(struct intel_digital_port *dig_port,
int refcount)
@@ -595,45 +690,42 @@ void intel_tc_port_sanitize(struct intel_digital_port *dig_port)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
struct intel_encoder *encoder = &dig_port->base;
- intel_wakeref_t tc_cold_wref;
int active_links = 0;
mutex_lock(&dig_port->tc_lock);
- tc_cold_wref = tc_cold_block(dig_port);
- dig_port->tc_mode = intel_tc_port_get_current_mode(dig_port);
if (dig_port->dp.is_mst)
active_links = intel_dp_mst_encoder_active_links(dig_port);
else if (encoder->base.crtc)
active_links = to_intel_crtc(encoder->base.crtc)->active;
+ drm_WARN_ON(&i915->drm, dig_port->tc_mode != TC_PORT_DISCONNECTED);
+ drm_WARN_ON(&i915->drm, dig_port->tc_lock_wakeref);
if (active_links) {
+ enum intel_display_power_domain domain;
+ intel_wakeref_t tc_cold_wref = tc_cold_block(dig_port, &domain);
+
+ dig_port->tc_mode = intel_tc_port_get_current_mode(dig_port);
+
if (!icl_tc_phy_is_connected(dig_port))
drm_dbg_kms(&i915->drm,
"Port %s: PHY disconnected with %d active link(s)\n",
dig_port->tc_port_name, active_links);
intel_tc_port_link_init_refcount(dig_port, active_links);
- goto out;
- }
+ dig_port->tc_lock_wakeref = tc_cold_block(dig_port,
+ &dig_port->tc_lock_power_domain);
- if (dig_port->tc_legacy_port)
- icl_tc_phy_connect(dig_port, 1);
+ tc_cold_unblock(dig_port, domain, tc_cold_wref);
+ }
-out:
drm_dbg_kms(&i915->drm, "Port %s: sanitize mode (%s)\n",
dig_port->tc_port_name,
tc_port_mode_name(dig_port->tc_mode));
- tc_cold_unblock(dig_port, tc_cold_wref);
mutex_unlock(&dig_port->tc_lock);
}
-static bool intel_tc_port_needs_reset(struct intel_digital_port *dig_port)
-{
- return intel_tc_port_get_target_mode(dig_port) != dig_port->tc_mode;
-}
-
/*
* The type-C ports are different because even when they are connected, they may
* not be available/usable by the graphics driver: see the comment on
@@ -648,78 +740,79 @@ bool intel_tc_port_connected(struct intel_encoder *encoder)
{
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
bool is_connected;
- intel_wakeref_t tc_cold_wref;
intel_tc_port_lock(dig_port);
- tc_cold_wref = tc_cold_block(dig_port);
is_connected = tc_port_live_status_mask(dig_port) &
BIT(dig_port->tc_mode);
- tc_cold_unblock(dig_port, tc_cold_wref);
intel_tc_port_unlock(dig_port);
return is_connected;
}
static void __intel_tc_port_lock(struct intel_digital_port *dig_port,
- int required_lanes, bool force_disconnect)
+ int required_lanes)
{
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- intel_wakeref_t wakeref;
-
- wakeref = intel_display_power_get(i915, POWER_DOMAIN_DISPLAY_CORE);
mutex_lock(&dig_port->tc_lock);
- if (!dig_port->tc_link_refcount) {
- intel_wakeref_t tc_cold_wref;
-
- tc_cold_wref = tc_cold_block(dig_port);
+ cancel_delayed_work(&dig_port->tc_disconnect_phy_work);
- if (force_disconnect || intel_tc_port_needs_reset(dig_port))
- intel_tc_port_reset_mode(dig_port, required_lanes,
- force_disconnect);
-
- tc_cold_unblock(dig_port, tc_cold_wref);
- }
+ if (!dig_port->tc_link_refcount)
+ intel_tc_port_update_mode(dig_port, required_lanes,
+ false);
- drm_WARN_ON(&i915->drm, dig_port->tc_lock_wakeref);
- dig_port->tc_lock_wakeref = wakeref;
+ drm_WARN_ON(&i915->drm, dig_port->tc_mode == TC_PORT_DISCONNECTED);
+ drm_WARN_ON(&i915->drm, dig_port->tc_mode != TC_PORT_TBT_ALT &&
+ !tc_phy_is_owned(dig_port));
}
void intel_tc_port_lock(struct intel_digital_port *dig_port)
{
- __intel_tc_port_lock(dig_port, 1, false);
+ __intel_tc_port_lock(dig_port, 1);
}
-void intel_tc_port_unlock(struct intel_digital_port *dig_port)
+/**
+ * intel_tc_port_disconnect_phy_work: disconnect TypeC PHY from display port
+ * @dig_port: digital port
+ *
+ * Disconnect the given digital port from its TypeC PHY (handing back the
+ * control of the PHY to the TypeC subsystem). This will happen in a delayed
+ * manner after each aux transactions and modeset disables.
+ */
+static void intel_tc_port_disconnect_phy_work(struct work_struct *work)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
- intel_wakeref_t wakeref = fetch_and_zero(&dig_port->tc_lock_wakeref);
+ struct intel_digital_port *dig_port =
+ container_of(work, struct intel_digital_port, tc_disconnect_phy_work.work);
- mutex_unlock(&dig_port->tc_lock);
+ mutex_lock(&dig_port->tc_lock);
+
+ if (!dig_port->tc_link_refcount)
+ intel_tc_port_update_mode(dig_port, 1, true);
- intel_display_power_put_async(i915, POWER_DOMAIN_DISPLAY_CORE,
- wakeref);
+ mutex_unlock(&dig_port->tc_lock);
}
/**
- * intel_tc_port_disconnect_phy: disconnect TypeC PHY from display port
+ * intel_tc_port_flush_work: flush the work disconnecting the PHY
* @dig_port: digital port
*
- * Disconnect the given digital port from its TypeC PHY (handing back the
- * control of the PHY to the TypeC subsystem). The only purpose of this
- * function is to force the disconnect even with a TypeC display output still
- * plugged to the TypeC connector, which is required by the TypeC firmwares
- * during system suspend and shutdown. Otherwise - during the unplug event
- * handling - the PHY ownership is released automatically by
- * intel_tc_port_reset_mode(), when calling this function is not required.
+ * Flush the delayed work disconnecting an idle PHY.
*/
-void intel_tc_port_disconnect_phy(struct intel_digital_port *dig_port)
+void intel_tc_port_flush_work(struct intel_digital_port *dig_port)
{
- __intel_tc_port_lock(dig_port, 1, true);
- intel_tc_port_unlock(dig_port);
+ flush_delayed_work(&dig_port->tc_disconnect_phy_work);
+}
+
+void intel_tc_port_unlock(struct intel_digital_port *dig_port)
+{
+ if (!dig_port->tc_link_refcount && dig_port->tc_mode != TC_PORT_DISCONNECTED)
+ queue_delayed_work(system_unbound_wq, &dig_port->tc_disconnect_phy_work,
+ msecs_to_jiffies(1000));
+
+ mutex_unlock(&dig_port->tc_lock);
}
bool intel_tc_port_ref_held(struct intel_digital_port *dig_port)
@@ -731,21 +824,30 @@ bool intel_tc_port_ref_held(struct intel_digital_port *dig_port)
void intel_tc_port_get_link(struct intel_digital_port *dig_port,
int required_lanes)
{
- __intel_tc_port_lock(dig_port, required_lanes, false);
+ __intel_tc_port_lock(dig_port, required_lanes);
dig_port->tc_link_refcount++;
intel_tc_port_unlock(dig_port);
}
void intel_tc_port_put_link(struct intel_digital_port *dig_port)
{
- mutex_lock(&dig_port->tc_lock);
- dig_port->tc_link_refcount--;
- mutex_unlock(&dig_port->tc_lock);
+ intel_tc_port_lock(dig_port);
+ --dig_port->tc_link_refcount;
+ intel_tc_port_unlock(dig_port);
+
+ /*
+ * Disconnecting the PHY after the PHY's PLL gets disabled may
+ * hang the system on ADL-P, so disconnect the PHY here synchronously.
+ * TODO: remove this once the root cause of the ordering requirement
+ * is found/fixed.
+ */
+ intel_tc_port_flush_work(dig_port);
}
static bool
tc_has_modular_fia(struct drm_i915_private *i915, struct intel_digital_port *dig_port)
{
+ enum intel_display_power_domain domain;
intel_wakeref_t wakeref;
u32 val;
@@ -753,9 +855,9 @@ tc_has_modular_fia(struct drm_i915_private *i915, struct intel_digital_port *dig
return false;
mutex_lock(&dig_port->tc_lock);
- wakeref = tc_cold_block(dig_port);
+ wakeref = tc_cold_block(dig_port, &domain);
val = intel_uncore_read(&i915->uncore, PORT_TX_DFLEXDPSP(FIA1));
- tc_cold_unblock(dig_port, wakeref);
+ tc_cold_unblock(dig_port, domain, wakeref);
mutex_unlock(&dig_port->tc_lock);
drm_WARN_ON(&i915->drm, val == 0xffffffff);
@@ -795,15 +897,9 @@ void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy)
"%c/TC#%d", port_name(port), tc_port + 1);
mutex_init(&dig_port->tc_lock);
+ INIT_DELAYED_WORK(&dig_port->tc_disconnect_phy_work, intel_tc_port_disconnect_phy_work);
dig_port->tc_legacy_port = is_legacy;
+ dig_port->tc_mode = TC_PORT_DISCONNECTED;
dig_port->tc_link_refcount = 0;
tc_port_load_fia_params(i915, dig_port);
}
-
-bool intel_tc_cold_requires_aux_pw(struct intel_digital_port *dig_port)
-{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
-
- return (DISPLAY_VER(i915) == 11 && dig_port->tc_legacy_port) ||
- IS_ALDERLAKE_P(i915);
-}
diff --git a/drivers/gpu/drm/i915/display/intel_tc.h b/drivers/gpu/drm/i915/display/intel_tc.h
index 0c881f645e27..6b47b29f551c 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.h
+++ b/drivers/gpu/drm/i915/display/intel_tc.h
@@ -12,8 +12,11 @@
struct intel_digital_port;
struct intel_encoder;
+bool intel_tc_port_in_tbt_alt_mode(struct intel_digital_port *dig_port);
+bool intel_tc_port_in_dp_alt_mode(struct intel_digital_port *dig_port);
+bool intel_tc_port_in_legacy_mode(struct intel_digital_port *dig_port);
+
bool intel_tc_port_connected(struct intel_encoder *encoder);
-void intel_tc_port_disconnect_phy(struct intel_digital_port *dig_port);
u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port);
u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port);
@@ -24,6 +27,7 @@ void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
void intel_tc_port_sanitize(struct intel_digital_port *dig_port);
void intel_tc_port_lock(struct intel_digital_port *dig_port);
void intel_tc_port_unlock(struct intel_digital_port *dig_port);
+void intel_tc_port_flush_work(struct intel_digital_port *dig_port);
void intel_tc_port_get_link(struct intel_digital_port *dig_port,
int required_lanes);
void intel_tc_port_put_link(struct intel_digital_port *dig_port);
diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
index d02f09f7e750..88a398df9621 100644
--- a/drivers/gpu/drm/i915/display/intel_tv.c
+++ b/drivers/gpu/drm/i915/display/intel_tv.c
@@ -1529,7 +1529,7 @@ static void intel_tv_pre_enable(struct intel_atomic_state *state,
intel_de_write(dev_priv, TV_CLR_LEVEL,
((video_levels->black << TV_BLACK_LEVEL_SHIFT) | (video_levels->blank << TV_BLANK_LEVEL_SHIFT)));
- assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
+ assert_transcoder_disabled(dev_priv, pipe_config->cpu_transcoder);
/* Filter ctl must be set before TV_WIN_SIZE */
tv_filter_ctl = TV_AUTO_SCALE;
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index df3286aa6999..2275f99ce9d7 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -357,11 +357,9 @@ bool intel_dsc_source_support(const struct intel_crtc_state *crtc_state)
return false;
}
-static bool is_pipe_dsc(const struct intel_crtc_state *crtc_state)
+static bool is_pipe_dsc(struct intel_crtc *crtc, enum transcoder cpu_transcoder)
{
- const struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- const struct drm_i915_private *i915 = to_i915(crtc->base.dev);
- enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
if (DISPLAY_VER(i915) >= 12)
return true;
@@ -547,9 +545,8 @@ int intel_dsc_compute_params(struct intel_encoder *encoder,
}
enum intel_display_power_domain
-intel_dsc_power_domain(const struct intel_crtc_state *crtc_state)
+intel_dsc_power_domain(struct intel_crtc *crtc, enum transcoder cpu_transcoder)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *i915 = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
@@ -566,7 +563,7 @@ intel_dsc_power_domain(const struct intel_crtc_state *crtc_state)
*/
if (DISPLAY_VER(i915) == 12 && !IS_ROCKETLAKE(i915) && pipe == PIPE_A)
return POWER_DOMAIN_TRANSCODER_VDSC_PW2;
- else if (is_pipe_dsc(crtc_state))
+ else if (is_pipe_dsc(crtc, cpu_transcoder))
return POWER_DOMAIN_PIPE(pipe);
else
return POWER_DOMAIN_TRANSCODER_VDSC_PW2;
@@ -577,6 +574,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
enum pipe pipe = crtc->pipe;
u32 pps_val = 0;
u32 rc_buf_thresh_dword[4];
@@ -601,7 +599,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
if (vdsc_cfg->vbr_enable)
pps_val |= DSC_VBR_ENABLE;
drm_info(&dev_priv->drm, "PPS0 = 0x%08x\n", pps_val);
- if (!is_pipe_dsc(crtc_state)) {
+ if (!is_pipe_dsc(crtc, cpu_transcoder)) {
intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_0,
pps_val);
/*
@@ -625,7 +623,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
pps_val = 0;
pps_val |= DSC_BPP(vdsc_cfg->bits_per_pixel);
drm_info(&dev_priv->drm, "PPS1 = 0x%08x\n", pps_val);
- if (!is_pipe_dsc(crtc_state)) {
+ if (!is_pipe_dsc(crtc, cpu_transcoder)) {
intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_1,
pps_val);
/*
@@ -650,7 +648,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
pps_val |= DSC_PIC_HEIGHT(vdsc_cfg->pic_height) |
DSC_PIC_WIDTH(vdsc_cfg->pic_width / num_vdsc_instances);
drm_info(&dev_priv->drm, "PPS2 = 0x%08x\n", pps_val);
- if (!is_pipe_dsc(crtc_state)) {
+ if (!is_pipe_dsc(crtc, cpu_transcoder)) {
intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_2,
pps_val);
/*
@@ -675,7 +673,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
pps_val |= DSC_SLICE_HEIGHT(vdsc_cfg->slice_height) |
DSC_SLICE_WIDTH(vdsc_cfg->slice_width);
drm_info(&dev_priv->drm, "PPS3 = 0x%08x\n", pps_val);
- if (!is_pipe_dsc(crtc_state)) {
+ if (!is_pipe_dsc(crtc, cpu_transcoder)) {
intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_3,
pps_val);
/*
@@ -700,7 +698,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
pps_val |= DSC_INITIAL_XMIT_DELAY(vdsc_cfg->initial_xmit_delay) |
DSC_INITIAL_DEC_DELAY(vdsc_cfg->initial_dec_delay);
drm_info(&dev_priv->drm, "PPS4 = 0x%08x\n", pps_val);
- if (!is_pipe_dsc(crtc_state)) {
+ if (!is_pipe_dsc(crtc, cpu_transcoder)) {
intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_4,
pps_val);
/*
@@ -725,7 +723,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
pps_val |= DSC_SCALE_INC_INT(vdsc_cfg->scale_increment_interval) |
DSC_SCALE_DEC_INT(vdsc_cfg->scale_decrement_interval);
drm_info(&dev_priv->drm, "PPS5 = 0x%08x\n", pps_val);
- if (!is_pipe_dsc(crtc_state)) {
+ if (!is_pipe_dsc(crtc, cpu_transcoder)) {
intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_5,
pps_val);
/*
@@ -752,7 +750,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
DSC_FLATNESS_MIN_QP(vdsc_cfg->flatness_min_qp) |
DSC_FLATNESS_MAX_QP(vdsc_cfg->flatness_max_qp);
drm_info(&dev_priv->drm, "PPS6 = 0x%08x\n", pps_val);
- if (!is_pipe_dsc(crtc_state)) {
+ if (!is_pipe_dsc(crtc, cpu_transcoder)) {
intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_6,
pps_val);
/*
@@ -777,7 +775,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
pps_val |= DSC_SLICE_BPG_OFFSET(vdsc_cfg->slice_bpg_offset) |
DSC_NFL_BPG_OFFSET(vdsc_cfg->nfl_bpg_offset);
drm_info(&dev_priv->drm, "PPS7 = 0x%08x\n", pps_val);
- if (!is_pipe_dsc(crtc_state)) {
+ if (!is_pipe_dsc(crtc, cpu_transcoder)) {
intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_7,
pps_val);
/*
@@ -802,7 +800,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
pps_val |= DSC_FINAL_OFFSET(vdsc_cfg->final_offset) |
DSC_INITIAL_OFFSET(vdsc_cfg->initial_offset);
drm_info(&dev_priv->drm, "PPS8 = 0x%08x\n", pps_val);
- if (!is_pipe_dsc(crtc_state)) {
+ if (!is_pipe_dsc(crtc, cpu_transcoder)) {
intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_8,
pps_val);
/*
@@ -827,7 +825,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
pps_val |= DSC_RC_MODEL_SIZE(vdsc_cfg->rc_model_size) |
DSC_RC_EDGE_FACTOR(DSC_RC_EDGE_FACTOR_CONST);
drm_info(&dev_priv->drm, "PPS9 = 0x%08x\n", pps_val);
- if (!is_pipe_dsc(crtc_state)) {
+ if (!is_pipe_dsc(crtc, cpu_transcoder)) {
intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_9,
pps_val);
/*
@@ -854,7 +852,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
DSC_RC_TARGET_OFF_HIGH(DSC_RC_TGT_OFFSET_HI_CONST) |
DSC_RC_TARGET_OFF_LOW(DSC_RC_TGT_OFFSET_LO_CONST);
drm_info(&dev_priv->drm, "PPS10 = 0x%08x\n", pps_val);
- if (!is_pipe_dsc(crtc_state)) {
+ if (!is_pipe_dsc(crtc, cpu_transcoder)) {
intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_10,
pps_val);
/*
@@ -882,7 +880,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
DSC_SLICE_ROW_PER_FRAME(vdsc_cfg->pic_height /
vdsc_cfg->slice_height);
drm_info(&dev_priv->drm, "PPS16 = 0x%08x\n", pps_val);
- if (!is_pipe_dsc(crtc_state)) {
+ if (!is_pipe_dsc(crtc, cpu_transcoder)) {
intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_16,
pps_val);
/*
@@ -911,7 +909,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
drm_info(&dev_priv->drm, " RC_BUF_THRESH%d = 0x%08x\n", i,
rc_buf_thresh_dword[i / 4]);
}
- if (!is_pipe_dsc(crtc_state)) {
+ if (!is_pipe_dsc(crtc, cpu_transcoder)) {
intel_de_write(dev_priv, DSCA_RC_BUF_THRESH_0,
rc_buf_thresh_dword[0]);
intel_de_write(dev_priv, DSCA_RC_BUF_THRESH_0_UDW,
@@ -968,7 +966,7 @@ static void intel_dsc_pps_configure(const struct intel_crtc_state *crtc_state)
drm_info(&dev_priv->drm, " RC_RANGE_PARAM_%d = 0x%08x\n", i,
rc_range_params_dword[i / 2]);
}
- if (!is_pipe_dsc(crtc_state)) {
+ if (!is_pipe_dsc(crtc, cpu_transcoder)) {
intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_0,
rc_range_params_dword[0]);
intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_0_UDW,
@@ -1095,18 +1093,16 @@ static void intel_dsc_dp_pps_write(struct intel_encoder *encoder,
sizeof(dp_dsc_pps_sdp));
}
-static i915_reg_t dss_ctl1_reg(const struct intel_crtc_state *crtc_state)
+static i915_reg_t dss_ctl1_reg(struct intel_crtc *crtc, enum transcoder cpu_transcoder)
{
- enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
-
- return is_pipe_dsc(crtc_state) ? ICL_PIPE_DSS_CTL1(pipe) : DSS_CTL1;
+ return is_pipe_dsc(crtc, cpu_transcoder) ?
+ ICL_PIPE_DSS_CTL1(crtc->pipe) : DSS_CTL1;
}
-static i915_reg_t dss_ctl2_reg(const struct intel_crtc_state *crtc_state)
+static i915_reg_t dss_ctl2_reg(struct intel_crtc *crtc, enum transcoder cpu_transcoder)
{
- enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
-
- return is_pipe_dsc(crtc_state) ? ICL_PIPE_DSS_CTL2(pipe) : DSS_CTL2;
+ return is_pipe_dsc(crtc, cpu_transcoder) ?
+ ICL_PIPE_DSS_CTL2(crtc->pipe) : DSS_CTL2;
}
static struct intel_crtc *
@@ -1142,7 +1138,7 @@ void intel_uncompressed_joiner_enable(const struct intel_crtc_state *crtc_state)
else
dss_ctl1_val |= UNCOMPRESSED_JOINER_MASTER;
- intel_de_write(dev_priv, dss_ctl1_reg(crtc_state), dss_ctl1_val);
+ intel_de_write(dev_priv, dss_ctl1_reg(crtc, crtc_state->cpu_transcoder), dss_ctl1_val);
}
}
@@ -1176,8 +1172,8 @@ void intel_dsc_enable(struct intel_encoder *encoder,
if (!crtc_state->bigjoiner_slave)
dss_ctl1_val |= MASTER_BIG_JOINER_ENABLE;
}
- intel_de_write(dev_priv, dss_ctl1_reg(crtc_state), dss_ctl1_val);
- intel_de_write(dev_priv, dss_ctl2_reg(crtc_state), dss_ctl2_val);
+ intel_de_write(dev_priv, dss_ctl1_reg(crtc, crtc_state->cpu_transcoder), dss_ctl1_val);
+ intel_de_write(dev_priv, dss_ctl2_reg(crtc, crtc_state->cpu_transcoder), dss_ctl2_val);
}
void intel_dsc_disable(const struct intel_crtc_state *old_crtc_state)
@@ -1188,8 +1184,8 @@ void intel_dsc_disable(const struct intel_crtc_state *old_crtc_state)
/* Disable only if either of them is enabled */
if (old_crtc_state->dsc.compression_enable ||
old_crtc_state->bigjoiner) {
- intel_de_write(dev_priv, dss_ctl1_reg(old_crtc_state), 0);
- intel_de_write(dev_priv, dss_ctl2_reg(old_crtc_state), 0);
+ intel_de_write(dev_priv, dss_ctl1_reg(crtc, old_crtc_state->cpu_transcoder), 0);
+ intel_de_write(dev_priv, dss_ctl2_reg(crtc, old_crtc_state->cpu_transcoder), 0);
}
}
@@ -1199,7 +1195,7 @@ void intel_uncompressed_joiner_get_config(struct intel_crtc_state *crtc_state)
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 dss_ctl1;
- dss_ctl1 = intel_de_read(dev_priv, dss_ctl1_reg(crtc_state));
+ dss_ctl1 = intel_de_read(dev_priv, dss_ctl1_reg(crtc, crtc_state->cpu_transcoder));
if (dss_ctl1 & UNCOMPRESSED_JOINER_MASTER) {
crtc_state->bigjoiner = true;
crtc_state->bigjoiner_linked_crtc = intel_dsc_get_bigjoiner_secondary(crtc);
@@ -1214,9 +1210,10 @@ void intel_uncompressed_joiner_get_config(struct intel_crtc_state *crtc_state)
void intel_dsc_get_config(struct intel_crtc_state *crtc_state)
{
- struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
enum pipe pipe = crtc->pipe;
enum intel_display_power_domain power_domain;
intel_wakeref_t wakeref;
@@ -1225,14 +1222,14 @@ void intel_dsc_get_config(struct intel_crtc_state *crtc_state)
if (!intel_dsc_source_support(crtc_state))
return;
- power_domain = intel_dsc_power_domain(crtc_state);
+ power_domain = intel_dsc_power_domain(crtc, cpu_transcoder);
wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
if (!wakeref)
return;
- dss_ctl1 = intel_de_read(dev_priv, dss_ctl1_reg(crtc_state));
- dss_ctl2 = intel_de_read(dev_priv, dss_ctl2_reg(crtc_state));
+ dss_ctl1 = intel_de_read(dev_priv, dss_ctl1_reg(crtc, cpu_transcoder));
+ dss_ctl2 = intel_de_read(dev_priv, dss_ctl2_reg(crtc, cpu_transcoder));
crtc_state->dsc.compression_enable = dss_ctl2 & LEFT_BRANCH_VDSC_ENABLE;
if (!crtc_state->dsc.compression_enable)
@@ -1256,7 +1253,7 @@ void intel_dsc_get_config(struct intel_crtc_state *crtc_state)
/* FIXME: add more state readout as needed */
/* PPS1 */
- if (!is_pipe_dsc(crtc_state))
+ if (!is_pipe_dsc(crtc, cpu_transcoder))
val = intel_de_read(dev_priv, DSCA_PICTURE_PARAMETER_SET_1);
else
val = intel_de_read(dev_priv,
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.h b/drivers/gpu/drm/i915/display/intel_vdsc.h
index dfb1fd38deb4..0c5d80a572da 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.h
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.h
@@ -8,8 +8,10 @@
#include <linux/types.h>
-struct intel_encoder;
+enum transcoder;
+struct intel_crtc;
struct intel_crtc_state;
+struct intel_encoder;
bool intel_dsc_source_support(const struct intel_crtc_state *crtc_state);
void intel_uncompressed_joiner_enable(const struct intel_crtc_state *crtc_state);
@@ -21,7 +23,7 @@ int intel_dsc_compute_params(struct intel_encoder *encoder,
void intel_uncompressed_joiner_get_config(struct intel_crtc_state *crtc_state);
void intel_dsc_get_config(struct intel_crtc_state *crtc_state);
enum intel_display_power_domain
-intel_dsc_power_domain(const struct intel_crtc_state *crtc_state);
+intel_dsc_power_domain(struct intel_crtc *crtc, enum transcoder cpu_transcoder);
struct intel_crtc *intel_dsc_get_bigjoiner_secondary(const struct intel_crtc *primary_crtc);
#endif /* __INTEL_VDSC_H__ */
diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c
index 724e7b04f3b6..a0e53a3b267a 100644
--- a/drivers/gpu/drm/i915/display/skl_universal_plane.c
+++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c
@@ -18,6 +18,7 @@
#include "intel_sprite.h"
#include "skl_scaler.h"
#include "skl_universal_plane.h"
+#include "pxp/intel_pxp.h"
static const u32 skl_plane_formats[] = {
DRM_FORMAT_C8,
@@ -656,6 +657,7 @@ skl_disable_plane(struct intel_plane *plane,
skl_write_plane_wm(plane, crtc_state);
+ intel_psr2_disable_plane_sel_fetch(plane, crtc_state);
intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), 0);
intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), 0);
@@ -993,6 +995,11 @@ static u32 skl_surf_address(const struct intel_plane_state *plane_state,
u32 offset = plane_state->view.color_plane[color_plane].offset;
if (intel_fb_uses_dpt(fb)) {
+ /*
+ * The DPT object contains only one vma, so the VMA's offset
+ * within the DPT is always 0.
+ */
+ WARN_ON(plane_state->dpt_vma->node.start);
WARN_ON(offset & 0x1fffff);
return offset >> 9;
} else {
@@ -1001,6 +1008,33 @@ static u32 skl_surf_address(const struct intel_plane_state *plane_state,
}
}
+static void intel_load_plane_csc_black(struct intel_plane *intel_plane)
+{
+ struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
+ enum pipe pipe = intel_plane->pipe;
+ enum plane_id plane = intel_plane->id;
+ u16 postoff = 0;
+
+ drm_dbg_kms(&dev_priv->drm, "plane color CTM to black %s:%d\n",
+ intel_plane->base.name, plane);
+ intel_de_write_fw(dev_priv, PLANE_CSC_COEFF(pipe, plane, 0), 0);
+ intel_de_write_fw(dev_priv, PLANE_CSC_COEFF(pipe, plane, 1), 0);
+
+ intel_de_write_fw(dev_priv, PLANE_CSC_COEFF(pipe, plane, 2), 0);
+ intel_de_write_fw(dev_priv, PLANE_CSC_COEFF(pipe, plane, 3), 0);
+
+ intel_de_write_fw(dev_priv, PLANE_CSC_COEFF(pipe, plane, 4), 0);
+ intel_de_write_fw(dev_priv, PLANE_CSC_COEFF(pipe, plane, 5), 0);
+
+ intel_de_write_fw(dev_priv, PLANE_CSC_PREOFF(pipe, plane, 0), 0);
+ intel_de_write_fw(dev_priv, PLANE_CSC_PREOFF(pipe, plane, 1), 0);
+ intel_de_write_fw(dev_priv, PLANE_CSC_PREOFF(pipe, plane, 2), 0);
+
+ intel_de_write_fw(dev_priv, PLANE_CSC_POSTOFF(pipe, plane, 0), postoff);
+ intel_de_write_fw(dev_priv, PLANE_CSC_POSTOFF(pipe, plane, 1), postoff);
+ intel_de_write_fw(dev_priv, PLANE_CSC_POSTOFF(pipe, plane, 2), postoff);
+}
+
static void
skl_program_plane(struct intel_plane *plane,
const struct intel_crtc_state *crtc_state,
@@ -1024,7 +1058,7 @@ skl_program_plane(struct intel_plane *plane,
u8 alpha = plane_state->hw.alpha >> 8;
u32 plane_color_ctl = 0, aux_dist = 0;
unsigned long irqflags;
- u32 keymsk, keymax;
+ u32 keymsk, keymax, plane_surf;
u32 plane_ctl = plane_state->ctl;
plane_ctl |= skl_plane_ctl_crtc(crtc_state);
@@ -1096,8 +1130,7 @@ skl_program_plane(struct intel_plane *plane,
(plane_state->view.color_plane[1].y << 16) |
plane_state->view.color_plane[1].x);
- if (!drm_atomic_crtc_needs_modeset(&crtc_state->uapi))
- intel_psr2_program_plane_sel_fetch(plane, crtc_state, plane_state, color_plane);
+ intel_psr2_program_plane_sel_fetch(plane, crtc_state, plane_state, color_plane);
/*
* Enable the scaler before the plane so that we don't
@@ -1113,8 +1146,23 @@ skl_program_plane(struct intel_plane *plane,
* the control register just before the surface register.
*/
intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), plane_ctl);
- intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id),
- intel_plane_ggtt_offset(plane_state) + surf_addr);
+ plane_surf = intel_plane_ggtt_offset(plane_state) + surf_addr;
+ plane_color_ctl = intel_de_read_fw(dev_priv, PLANE_COLOR_CTL(pipe, plane_id));
+
+ /*
+ * FIXME: pxp session invalidation can hit any time even at time of commit
+ * or after the commit, display content will be garbage.
+ */
+ if (plane_state->decrypt) {
+ plane_surf |= PLANE_SURF_DECRYPT;
+ } else if (plane_state->force_black) {
+ intel_load_plane_csc_black(plane);
+ plane_color_ctl |= PLANE_COLOR_PLANE_CSC_ENABLE;
+ }
+
+ intel_de_write_fw(dev_priv, PLANE_COLOR_CTL(pipe, plane_id),
+ plane_color_ctl);
+ intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), plane_surf);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index 0ee4ff341e25..07584695514b 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -32,6 +32,7 @@
#include "i915_drv.h"
#include "intel_atomic.h"
+#include "intel_backlight.h"
#include "intel_connector.h"
#include "intel_crtc.h"
#include "intel_de.h"
@@ -39,8 +40,8 @@
#include "intel_dsi.h"
#include "intel_fifo_underrun.h"
#include "intel_panel.h"
-#include "intel_sideband.h"
#include "skl_scaler.h"
+#include "vlv_sideband.h"
/* return pixels in terms of txbyteclkhs */
static u16 txbyteclkhs(u16 pixels, int bpp, int lane_count,
@@ -270,23 +271,19 @@ static int intel_dsi_compute_config(struct intel_encoder *encoder,
struct intel_dsi *intel_dsi = container_of(encoder, struct intel_dsi,
base);
struct intel_connector *intel_connector = intel_dsi->attached_connector;
- const struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
int ret;
drm_dbg_kms(&dev_priv->drm, "\n");
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
- if (fixed_mode) {
- intel_fixed_panel_mode(fixed_mode, adjusted_mode);
+ ret = intel_panel_compute_config(intel_connector, adjusted_mode);
+ if (ret)
+ return ret;
- if (HAS_GMCH(dev_priv))
- ret = intel_gmch_panel_fitting(pipe_config, conn_state);
- else
- ret = intel_pch_panel_fitting(pipe_config, conn_state);
- if (ret)
- return ret;
- }
+ ret = intel_panel_fitting(pipe_config, conn_state);
+ if (ret)
+ return ret;
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return -EINVAL;
@@ -883,7 +880,7 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state,
intel_dsi_port_enable(encoder, pipe_config);
}
- intel_panel_enable_backlight(pipe_config, conn_state);
+ intel_backlight_enable(pipe_config, conn_state);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_ON);
}
@@ -913,7 +910,7 @@ static void intel_dsi_disable(struct intel_atomic_state *state,
drm_dbg_kms(&i915->drm, "\n");
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_OFF);
- intel_panel_disable_backlight(old_conn_state);
+ intel_backlight_disable(old_conn_state);
/*
* According to the spec we should send SHUTDOWN before
@@ -1633,25 +1630,21 @@ static const struct drm_connector_funcs intel_dsi_connector_funcs = {
static void vlv_dsi_add_properties(struct intel_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ u32 allowed_scalers;
- if (connector->panel.fixed_mode) {
- u32 allowed_scalers;
-
- allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN);
- if (!HAS_GMCH(dev_priv))
- allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER);
+ allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN);
+ if (!HAS_GMCH(dev_priv))
+ allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER);
- drm_connector_attach_scaling_mode_property(&connector->base,
- allowed_scalers);
+ drm_connector_attach_scaling_mode_property(&connector->base,
+ allowed_scalers);
- connector->base.state->scaling_mode = DRM_MODE_SCALE_ASPECT;
+ connector->base.state->scaling_mode = DRM_MODE_SCALE_ASPECT;
- drm_connector_set_panel_orientation_with_quirk(
- &connector->base,
- intel_dsi_get_panel_orientation(connector),
- connector->panel.fixed_mode->hdisplay,
- connector->panel.fixed_mode->vdisplay);
- }
+ drm_connector_set_panel_orientation_with_quirk(&connector->base,
+ intel_dsi_get_panel_orientation(connector),
+ connector->panel.fixed_mode->hdisplay,
+ connector->panel.fixed_mode->vdisplay);
}
#define NS_KHZ_RATIO 1000000
@@ -1876,7 +1869,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
intel_encoder->post_disable = intel_dsi_post_disable;
intel_encoder->get_hw_state = intel_dsi_get_hw_state;
intel_encoder->get_config = intel_dsi_get_config;
- intel_encoder->update_pipe = intel_panel_update_backlight;
+ intel_encoder->update_pipe = intel_backlight_update;
intel_encoder->shutdown = intel_dsi_shutdown;
intel_connector->get_hw_state = intel_connector_get_hw_state;
@@ -1964,7 +1957,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
}
intel_panel_init(&intel_connector->panel, fixed_mode, NULL);
- intel_panel_setup_backlight(connector, INVALID_PIPE);
+ intel_backlight_setup(intel_connector, INVALID_PIPE);
vlv_dsi_add_properties(intel_connector);
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
index 90185b219447..5413b52ab6ba 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
@@ -31,7 +31,7 @@
#include "intel_de.h"
#include "intel_display_types.h"
#include "intel_dsi.h"
-#include "intel_sideband.h"
+#include "vlv_sideband.h"
static const u16 lfsr_converts[] = {
426, 469, 234, 373, 442, 221, 110, 311, 411, /* 62 - 70 */
@@ -568,3 +568,26 @@ void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
}
intel_de_write(dev_priv, MIPI_EOT_DISABLE(port), CLOCKSTOP);
}
+
+static void assert_dsi_pll(struct drm_i915_private *i915, bool state)
+{
+ bool cur_state;
+
+ vlv_cck_get(i915);
+ cur_state = vlv_cck_read(i915, CCK_REG_DSI_PLL_CONTROL) & DSI_PLL_VCO_EN;
+ vlv_cck_put(i915);
+
+ I915_STATE_WARN(cur_state != state,
+ "DSI PLL state assertion failure (expected %s, current %s)\n",
+ onoff(state), onoff(cur_state));
+}
+
+void assert_dsi_pll_enabled(struct drm_i915_private *i915)
+{
+ assert_dsi_pll(i915, true);
+}
+
+void assert_dsi_pll_disabled(struct drm_i915_private *i915)
+{
+ assert_dsi_pll(i915, false);
+}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
index 6234e17259c1..7358bebef15c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c
@@ -4,6 +4,8 @@
* Copyright © 2014-2016 Intel Corporation
*/
+#include <linux/dma-fence-array.h>
+
#include "gt/intel_engine.h"
#include "i915_gem_ioctls.h"
@@ -36,7 +38,7 @@ static __always_inline u32 __busy_write_id(u16 id)
}
static __always_inline unsigned int
-__busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u16 id))
+__busy_set_if_active(struct dma_fence *fence, u32 (*flag)(u16 id))
{
const struct i915_request *rq;
@@ -46,29 +48,60 @@ __busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u16 id))
* to eventually flush us, but to minimise latency just ask the
* hardware.
*
- * Note we only report on the status of native fences.
+ * Note we only report on the status of native fences and we currently
+ * have two native fences:
+ *
+ * 1. A composite fence (dma_fence_array) constructed of i915 requests
+ * created during a parallel submission. In this case we deconstruct the
+ * composite fence into individual i915 requests and check the status of
+ * each request.
+ *
+ * 2. A single i915 request.
*/
- if (!dma_fence_is_i915(fence))
+ if (dma_fence_is_array(fence)) {
+ struct dma_fence_array *array = to_dma_fence_array(fence);
+ struct dma_fence **child = array->fences;
+ unsigned int nchild = array->num_fences;
+
+ do {
+ struct dma_fence *current_fence = *child++;
+
+ /* Not an i915 fence, can't be busy per above */
+ if (!dma_fence_is_i915(current_fence) ||
+ !test_bit(I915_FENCE_FLAG_COMPOSITE,
+ &current_fence->flags)) {
+ return 0;
+ }
+
+ rq = to_request(current_fence);
+ if (!i915_request_completed(rq))
+ return flag(rq->engine->uabi_class);
+ } while (--nchild);
+
+ /* All requests in array complete, not busy */
return 0;
+ } else {
+ if (!dma_fence_is_i915(fence))
+ return 0;
- /* opencode to_request() in order to avoid const warnings */
- rq = container_of(fence, const struct i915_request, fence);
- if (i915_request_completed(rq))
- return 0;
+ rq = to_request(fence);
+ if (i915_request_completed(rq))
+ return 0;
- /* Beware type-expansion follies! */
- BUILD_BUG_ON(!typecheck(u16, rq->engine->uabi_class));
- return flag(rq->engine->uabi_class);
+ /* Beware type-expansion follies! */
+ BUILD_BUG_ON(!typecheck(u16, rq->engine->uabi_class));
+ return flag(rq->engine->uabi_class);
+ }
}
static __always_inline unsigned int
-busy_check_reader(const struct dma_fence *fence)
+busy_check_reader(struct dma_fence *fence)
{
return __busy_set_if_active(fence, __busy_read_flag);
}
static __always_inline unsigned int
-busy_check_writer(const struct dma_fence *fence)
+busy_check_writer(struct dma_fence *fence)
{
if (!fence)
return 0;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index 166bb46408a9..fb33d0322960 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -77,6 +77,8 @@
#include "gt/intel_gpu_commands.h"
#include "gt/intel_ring.h"
+#include "pxp/intel_pxp.h"
+
#include "i915_gem_context.h"
#include "i915_trace.h"
#include "i915_user_extensions.h"
@@ -186,10 +188,13 @@ static int validate_priority(struct drm_i915_private *i915,
return 0;
}
-static void proto_context_close(struct i915_gem_proto_context *pc)
+static void proto_context_close(struct drm_i915_private *i915,
+ struct i915_gem_proto_context *pc)
{
int i;
+ if (pc->pxp_wakeref)
+ intel_runtime_pm_put(&i915->runtime_pm, pc->pxp_wakeref);
if (pc->vm)
i915_vm_put(pc->vm);
if (pc->user_engines) {
@@ -241,6 +246,35 @@ static int proto_context_set_persistence(struct drm_i915_private *i915,
return 0;
}
+static int proto_context_set_protected(struct drm_i915_private *i915,
+ struct i915_gem_proto_context *pc,
+ bool protected)
+{
+ int ret = 0;
+
+ if (!protected) {
+ pc->uses_protected_content = false;
+ } else if (!intel_pxp_is_enabled(&i915->gt.pxp)) {
+ ret = -ENODEV;
+ } else if ((pc->user_flags & BIT(UCONTEXT_RECOVERABLE)) ||
+ !(pc->user_flags & BIT(UCONTEXT_BANNABLE))) {
+ ret = -EPERM;
+ } else {
+ pc->uses_protected_content = true;
+
+ /*
+ * protected context usage requires the PXP session to be up,
+ * which in turn requires the device to be active.
+ */
+ pc->pxp_wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+
+ if (!intel_pxp_is_active(&i915->gt.pxp))
+ ret = intel_pxp_start(&i915->gt.pxp);
+ }
+
+ return ret;
+}
+
static struct i915_gem_proto_context *
proto_context_create(struct drm_i915_private *i915, unsigned int flags)
{
@@ -269,7 +303,7 @@ proto_context_create(struct drm_i915_private *i915, unsigned int flags)
return pc;
proto_close:
- proto_context_close(pc);
+ proto_context_close(i915, pc);
return err;
}
@@ -442,6 +476,13 @@ set_proto_ctx_engines_bond(struct i915_user_extension __user *base, void *data)
u16 idx, num_bonds;
int err, n;
+ if (GRAPHICS_VER(i915) >= 12 && !IS_TIGERLAKE(i915) &&
+ !IS_ROCKETLAKE(i915) && !IS_ALDERLAKE_S(i915)) {
+ drm_dbg(&i915->drm,
+ "Bonding on gen12+ aside from TGL, RKL, and ADL_S not supported\n");
+ return -ENODEV;
+ }
+
if (get_user(idx, &ext->virtual_index))
return -EFAULT;
@@ -515,9 +556,147 @@ set_proto_ctx_engines_bond(struct i915_user_extension __user *base, void *data)
return 0;
}
+static int
+set_proto_ctx_engines_parallel_submit(struct i915_user_extension __user *base,
+ void *data)
+{
+ struct i915_context_engines_parallel_submit __user *ext =
+ container_of_user(base, typeof(*ext), base);
+ const struct set_proto_ctx_engines *set = data;
+ struct drm_i915_private *i915 = set->i915;
+ u64 flags;
+ int err = 0, n, i, j;
+ u16 slot, width, num_siblings;
+ struct intel_engine_cs **siblings = NULL;
+ intel_engine_mask_t prev_mask;
+
+ /* FIXME: This is NIY for execlists */
+ if (!(intel_uc_uses_guc_submission(&i915->gt.uc)))
+ return -ENODEV;
+
+ if (get_user(slot, &ext->engine_index))
+ return -EFAULT;
+
+ if (get_user(width, &ext->width))
+ return -EFAULT;
+
+ if (get_user(num_siblings, &ext->num_siblings))
+ return -EFAULT;
+
+ if (slot >= set->num_engines) {
+ drm_dbg(&i915->drm, "Invalid placement value, %d >= %d\n",
+ slot, set->num_engines);
+ return -EINVAL;
+ }
+
+ if (set->engines[slot].type != I915_GEM_ENGINE_TYPE_INVALID) {
+ drm_dbg(&i915->drm,
+ "Invalid placement[%d], already occupied\n", slot);
+ return -EINVAL;
+ }
+
+ if (get_user(flags, &ext->flags))
+ return -EFAULT;
+
+ if (flags) {
+ drm_dbg(&i915->drm, "Unknown flags 0x%02llx", flags);
+ return -EINVAL;
+ }
+
+ for (n = 0; n < ARRAY_SIZE(ext->mbz64); n++) {
+ err = check_user_mbz(&ext->mbz64[n]);
+ if (err)
+ return err;
+ }
+
+ if (width < 2) {
+ drm_dbg(&i915->drm, "Width (%d) < 2\n", width);
+ return -EINVAL;
+ }
+
+ if (num_siblings < 1) {
+ drm_dbg(&i915->drm, "Number siblings (%d) < 1\n",
+ num_siblings);
+ return -EINVAL;
+ }
+
+ siblings = kmalloc_array(num_siblings * width,
+ sizeof(*siblings),
+ GFP_KERNEL);
+ if (!siblings)
+ return -ENOMEM;
+
+ /* Create contexts / engines */
+ for (i = 0; i < width; ++i) {
+ intel_engine_mask_t current_mask = 0;
+ struct i915_engine_class_instance prev_engine;
+
+ for (j = 0; j < num_siblings; ++j) {
+ struct i915_engine_class_instance ci;
+
+ n = i * num_siblings + j;
+ if (copy_from_user(&ci, &ext->engines[n], sizeof(ci))) {
+ err = -EFAULT;
+ goto out_err;
+ }
+
+ siblings[n] =
+ intel_engine_lookup_user(i915, ci.engine_class,
+ ci.engine_instance);
+ if (!siblings[n]) {
+ drm_dbg(&i915->drm,
+ "Invalid sibling[%d]: { class:%d, inst:%d }\n",
+ n, ci.engine_class, ci.engine_instance);
+ err = -EINVAL;
+ goto out_err;
+ }
+
+ if (n) {
+ if (prev_engine.engine_class !=
+ ci.engine_class) {
+ drm_dbg(&i915->drm,
+ "Mismatched class %d, %d\n",
+ prev_engine.engine_class,
+ ci.engine_class);
+ err = -EINVAL;
+ goto out_err;
+ }
+ }
+
+ prev_engine = ci;
+ current_mask |= siblings[n]->logical_mask;
+ }
+
+ if (i > 0) {
+ if (current_mask != prev_mask << 1) {
+ drm_dbg(&i915->drm,
+ "Non contiguous logical mask 0x%x, 0x%x\n",
+ prev_mask, current_mask);
+ err = -EINVAL;
+ goto out_err;
+ }
+ }
+ prev_mask = current_mask;
+ }
+
+ set->engines[slot].type = I915_GEM_ENGINE_TYPE_PARALLEL;
+ set->engines[slot].num_siblings = num_siblings;
+ set->engines[slot].width = width;
+ set->engines[slot].siblings = siblings;
+
+ return 0;
+
+out_err:
+ kfree(siblings);
+
+ return err;
+}
+
static const i915_user_extension_fn set_proto_ctx_engines_extensions[] = {
[I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE] = set_proto_ctx_engines_balance,
[I915_CONTEXT_ENGINES_EXT_BOND] = set_proto_ctx_engines_bond,
+ [I915_CONTEXT_ENGINES_EXT_PARALLEL_SUBMIT] =
+ set_proto_ctx_engines_parallel_submit,
};
static int set_proto_ctx_engines(struct drm_i915_file_private *fpriv,
@@ -686,6 +865,8 @@ static int set_proto_ctx_param(struct drm_i915_file_private *fpriv,
ret = -EPERM;
else if (args->value)
pc->user_flags |= BIT(UCONTEXT_BANNABLE);
+ else if (pc->uses_protected_content)
+ ret = -EPERM;
else
pc->user_flags &= ~BIT(UCONTEXT_BANNABLE);
break;
@@ -693,10 +874,12 @@ static int set_proto_ctx_param(struct drm_i915_file_private *fpriv,
case I915_CONTEXT_PARAM_RECOVERABLE:
if (args->size)
ret = -EINVAL;
- else if (args->value)
- pc->user_flags |= BIT(UCONTEXT_RECOVERABLE);
- else
+ else if (!args->value)
pc->user_flags &= ~BIT(UCONTEXT_RECOVERABLE);
+ else if (pc->uses_protected_content)
+ ret = -EPERM;
+ else
+ pc->user_flags |= BIT(UCONTEXT_RECOVERABLE);
break;
case I915_CONTEXT_PARAM_PRIORITY:
@@ -724,6 +907,11 @@ static int set_proto_ctx_param(struct drm_i915_file_private *fpriv,
args->value);
break;
+ case I915_CONTEXT_PARAM_PROTECTED_CONTENT:
+ ret = proto_context_set_protected(fpriv->dev_priv, pc,
+ args->value);
+ break;
+
case I915_CONTEXT_PARAM_NO_ZEROMAP:
case I915_CONTEXT_PARAM_BAN_PERIOD:
case I915_CONTEXT_PARAM_RINGSIZE:
@@ -735,44 +923,6 @@ static int set_proto_ctx_param(struct drm_i915_file_private *fpriv,
return ret;
}
-static struct i915_address_space *
-context_get_vm_rcu(struct i915_gem_context *ctx)
-{
- GEM_BUG_ON(!rcu_access_pointer(ctx->vm));
-
- do {
- struct i915_address_space *vm;
-
- /*
- * We do not allow downgrading from full-ppgtt [to a shared
- * global gtt], so ctx->vm cannot become NULL.
- */
- vm = rcu_dereference(ctx->vm);
- if (!kref_get_unless_zero(&vm->ref))
- continue;
-
- /*
- * This ppgtt may have be reallocated between
- * the read and the kref, and reassigned to a third
- * context. In order to avoid inadvertent sharing
- * of this ppgtt with that third context (and not
- * src), we have to confirm that we have the same
- * ppgtt after passing through the strong memory
- * barrier implied by a successful
- * kref_get_unless_zero().
- *
- * Once we have acquired the current ppgtt of ctx,
- * we no longer care if it is released from ctx, as
- * it cannot be reallocated elsewhere.
- */
-
- if (vm == rcu_access_pointer(ctx->vm))
- return rcu_pointer_handoff(vm);
-
- i915_vm_put(vm);
- } while (1);
-}
-
static int intel_context_set_gem(struct intel_context *ce,
struct i915_gem_context *ctx,
struct intel_sseu sseu)
@@ -782,25 +932,18 @@ static int intel_context_set_gem(struct intel_context *ce,
GEM_BUG_ON(rcu_access_pointer(ce->gem_context));
RCU_INIT_POINTER(ce->gem_context, ctx);
+ GEM_BUG_ON(intel_context_is_pinned(ce));
ce->ring_size = SZ_16K;
- if (rcu_access_pointer(ctx->vm)) {
- struct i915_address_space *vm;
-
- rcu_read_lock();
- vm = context_get_vm_rcu(ctx); /* hmm */
- rcu_read_unlock();
-
- i915_vm_put(ce->vm);
- ce->vm = vm;
- }
+ i915_vm_put(ce->vm);
+ ce->vm = i915_gem_context_get_eb_vm(ctx);
if (ctx->sched.priority >= I915_PRIORITY_NORMAL &&
intel_engine_has_timeslices(ce->engine) &&
intel_engine_has_semaphores(ce->engine))
__set_bit(CONTEXT_USE_SEMAPHORES, &ce->flags);
- if (IS_ACTIVE(CONFIG_DRM_I915_REQUEST_TIMEOUT) &&
+ if (CONFIG_DRM_I915_REQUEST_TIMEOUT &&
ctx->i915->params.request_timeout_ms) {
unsigned int timeout_ms = ctx->i915->params.request_timeout_ms;
@@ -814,6 +957,25 @@ static int intel_context_set_gem(struct intel_context *ce,
return ret;
}
+static void __unpin_engines(struct i915_gem_engines *e, unsigned int count)
+{
+ while (count--) {
+ struct intel_context *ce = e->engines[count], *child;
+
+ if (!ce || !test_bit(CONTEXT_PERMA_PIN, &ce->flags))
+ continue;
+
+ for_each_child(ce, child)
+ intel_context_unpin(child);
+ intel_context_unpin(ce);
+ }
+}
+
+static void unpin_engines(struct i915_gem_engines *e)
+{
+ __unpin_engines(e, e->num_engines);
+}
+
static void __free_engines(struct i915_gem_engines *e, unsigned int count)
{
while (count--) {
@@ -929,6 +1091,40 @@ free_engines:
return err;
}
+static int perma_pin_contexts(struct intel_context *ce)
+{
+ struct intel_context *child;
+ int i = 0, j = 0, ret;
+
+ GEM_BUG_ON(!intel_context_is_parent(ce));
+
+ ret = intel_context_pin(ce);
+ if (unlikely(ret))
+ return ret;
+
+ for_each_child(ce, child) {
+ ret = intel_context_pin(child);
+ if (unlikely(ret))
+ goto unwind;
+ ++i;
+ }
+
+ set_bit(CONTEXT_PERMA_PIN, &ce->flags);
+
+ return 0;
+
+unwind:
+ intel_context_unpin(ce);
+ for_each_child(ce, child) {
+ if (j++ < i)
+ intel_context_unpin(child);
+ else
+ break;
+ }
+
+ return ret;
+}
+
static struct i915_gem_engines *user_engines(struct i915_gem_context *ctx,
unsigned int num_engines,
struct i915_gem_proto_engine *pe)
@@ -942,7 +1138,7 @@ static struct i915_gem_engines *user_engines(struct i915_gem_context *ctx,
e->num_engines = num_engines;
for (n = 0; n < num_engines; n++) {
- struct intel_context *ce;
+ struct intel_context *ce, *child;
int ret;
switch (pe[n].type) {
@@ -952,7 +1148,13 @@ static struct i915_gem_engines *user_engines(struct i915_gem_context *ctx,
case I915_GEM_ENGINE_TYPE_BALANCED:
ce = intel_engine_create_virtual(pe[n].siblings,
- pe[n].num_siblings);
+ pe[n].num_siblings, 0);
+ break;
+
+ case I915_GEM_ENGINE_TYPE_PARALLEL:
+ ce = intel_engine_create_parallel(pe[n].siblings,
+ pe[n].num_siblings,
+ pe[n].width);
break;
case I915_GEM_ENGINE_TYPE_INVALID:
@@ -973,6 +1175,30 @@ static struct i915_gem_engines *user_engines(struct i915_gem_context *ctx,
err = ERR_PTR(ret);
goto free_engines;
}
+ for_each_child(ce, child) {
+ ret = intel_context_set_gem(child, ctx, pe->sseu);
+ if (ret) {
+ err = ERR_PTR(ret);
+ goto free_engines;
+ }
+ }
+
+ /*
+ * XXX: Must be done after calling intel_context_set_gem as that
+ * function changes the ring size. The ring is allocated when
+ * the context is pinned. If the ring size is changed after
+ * allocation we have a mismatch of the ring size and will cause
+ * the context to hang. Presumably with a bit of reordering we
+ * could move the perma-pin step to the backend function
+ * intel_engine_create_parallel.
+ */
+ if (pe[n].type == I915_GEM_ENGINE_TYPE_PARALLEL) {
+ ret = perma_pin_contexts(ce);
+ if (ret) {
+ err = ERR_PTR(ret);
+ goto free_engines;
+ }
+ }
}
return e;
@@ -982,9 +1208,11 @@ free_engines:
return err;
}
-void i915_gem_context_release(struct kref *ref)
+static void i915_gem_context_release_work(struct work_struct *work)
{
- struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
+ struct i915_gem_context *ctx = container_of(work, typeof(*ctx),
+ release_work);
+ struct i915_address_space *vm;
trace_i915_context_free(ctx);
GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
@@ -992,6 +1220,13 @@ void i915_gem_context_release(struct kref *ref)
if (ctx->syncobj)
drm_syncobj_put(ctx->syncobj);
+ vm = ctx->vm;
+ if (vm)
+ i915_vm_put(vm);
+
+ if (ctx->pxp_wakeref)
+ intel_runtime_pm_put(&ctx->i915->runtime_pm, ctx->pxp_wakeref);
+
mutex_destroy(&ctx->engines_mutex);
mutex_destroy(&ctx->lut_mutex);
@@ -1001,6 +1236,13 @@ void i915_gem_context_release(struct kref *ref)
kfree_rcu(ctx, rcu);
}
+void i915_gem_context_release(struct kref *ref)
+{
+ struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref);
+
+ queue_work(ctx->i915->wq, &ctx->release_work);
+}
+
static inline struct i915_gem_engines *
__context_engines_static(const struct i915_gem_context *ctx)
{
@@ -1199,6 +1441,7 @@ static void context_close(struct i915_gem_context *ctx)
/* Flush any concurrent set_engines() */
mutex_lock(&ctx->engines_mutex);
+ unpin_engines(__context_engines_static(ctx));
engines_idle_release(ctx, rcu_replace_pointer(ctx->engines, NULL, 1));
i915_gem_context_set_closed(ctx);
mutex_unlock(&ctx->engines_mutex);
@@ -1207,9 +1450,16 @@ static void context_close(struct i915_gem_context *ctx)
set_closed_name(ctx);
- vm = i915_gem_context_vm(ctx);
- if (vm)
+ vm = ctx->vm;
+ if (vm) {
+ /* i915_vm_close drops the final reference, which is a bit too
+ * early and could result in surprises with concurrent
+ * operations racing with thist ctx close. Keep a full reference
+ * until the end.
+ */
+ i915_vm_get(vm);
i915_vm_close(vm);
+ }
ctx->file_priv = ERR_PTR(-EBADF);
@@ -1280,49 +1530,6 @@ static int __context_set_persistence(struct i915_gem_context *ctx, bool state)
return 0;
}
-static inline struct i915_gem_engines *
-__context_engines_await(const struct i915_gem_context *ctx,
- bool *user_engines)
-{
- struct i915_gem_engines *engines;
-
- rcu_read_lock();
- do {
- engines = rcu_dereference(ctx->engines);
- GEM_BUG_ON(!engines);
-
- if (user_engines)
- *user_engines = i915_gem_context_user_engines(ctx);
-
- /* successful await => strong mb */
- if (unlikely(!i915_sw_fence_await(&engines->fence)))
- continue;
-
- if (likely(engines == rcu_access_pointer(ctx->engines)))
- break;
-
- i915_sw_fence_complete(&engines->fence);
- } while (1);
- rcu_read_unlock();
-
- return engines;
-}
-
-static void
-context_apply_all(struct i915_gem_context *ctx,
- void (*fn)(struct intel_context *ce, void *data),
- void *data)
-{
- struct i915_gem_engines_iter it;
- struct i915_gem_engines *e;
- struct intel_context *ce;
-
- e = __context_engines_await(ctx, NULL);
- for_each_gem_engine(ce, e, it)
- fn(ce, data);
- i915_sw_fence_complete(&e->fence);
-}
-
static struct i915_gem_context *
i915_gem_create_context(struct drm_i915_private *i915,
const struct i915_gem_proto_context *pc)
@@ -1342,6 +1549,7 @@ i915_gem_create_context(struct drm_i915_private *i915,
ctx->sched = pc->sched;
mutex_init(&ctx->mutex);
INIT_LIST_HEAD(&ctx->link);
+ INIT_WORK(&ctx->release_work, i915_gem_context_release_work);
spin_lock_init(&ctx->stale.lock);
INIT_LIST_HEAD(&ctx->stale.engines);
@@ -1351,7 +1559,7 @@ i915_gem_create_context(struct drm_i915_private *i915,
} else if (HAS_FULL_PPGTT(i915)) {
struct i915_ppgtt *ppgtt;
- ppgtt = i915_ppgtt_create(&i915->gt);
+ ppgtt = i915_ppgtt_create(&i915->gt, 0);
if (IS_ERR(ppgtt)) {
drm_dbg(&i915->drm, "PPGTT setup failed (%ld)\n",
PTR_ERR(ppgtt));
@@ -1361,7 +1569,7 @@ i915_gem_create_context(struct drm_i915_private *i915,
vm = &ppgtt->vm;
}
if (vm) {
- RCU_INIT_POINTER(ctx->vm, i915_vm_open(vm));
+ ctx->vm = i915_vm_open(vm);
/* i915_vm_open() takes a reference */
i915_vm_put(vm);
@@ -1402,6 +1610,11 @@ i915_gem_create_context(struct drm_i915_private *i915,
goto err_engines;
}
+ if (pc->uses_protected_content) {
+ ctx->pxp_wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ ctx->uses_protected_content = true;
+ }
+
trace_i915_context_create(ctx);
return ctx;
@@ -1473,7 +1686,7 @@ int i915_gem_context_open(struct drm_i915_private *i915,
}
ctx = i915_gem_create_context(i915, pc);
- proto_context_close(pc);
+ proto_context_close(i915, pc);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
goto err;
@@ -1500,7 +1713,7 @@ void i915_gem_context_close(struct drm_file *file)
unsigned long idx;
xa_for_each(&file_priv->proto_context_xa, idx, pc)
- proto_context_close(pc);
+ proto_context_close(file_priv->dev_priv, pc);
xa_destroy(&file_priv->proto_context_xa);
mutex_destroy(&file_priv->proto_context_lock);
@@ -1529,7 +1742,7 @@ int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
if (args->flags)
return -EINVAL;
- ppgtt = i915_ppgtt_create(&i915->gt);
+ ppgtt = i915_ppgtt_create(&i915->gt, 0);
if (IS_ERR(ppgtt))
return PTR_ERR(ppgtt);
@@ -1584,18 +1797,15 @@ static int get_ppgtt(struct drm_i915_file_private *file_priv,
int err;
u32 id;
- if (!rcu_access_pointer(ctx->vm))
+ if (!i915_gem_context_has_full_ppgtt(ctx))
return -ENODEV;
- rcu_read_lock();
- vm = context_get_vm_rcu(ctx);
- rcu_read_unlock();
- if (!vm)
- return -ENODEV;
+ vm = ctx->vm;
+ GEM_BUG_ON(!vm);
err = xa_alloc(&file_priv->vm_xa, &id, vm, xa_limit_32b, GFP_KERNEL);
if (err)
- goto err_put;
+ return err;
i915_vm_open(vm);
@@ -1603,8 +1813,6 @@ static int get_ppgtt(struct drm_i915_file_private *file_priv,
args->value = id;
args->size = 0;
-err_put:
- i915_vm_put(vm);
return err;
}
@@ -1772,23 +1980,11 @@ set_persistence(struct i915_gem_context *ctx,
return __context_set_persistence(ctx, args->value);
}
-static void __apply_priority(struct intel_context *ce, void *arg)
-{
- struct i915_gem_context *ctx = arg;
-
- if (!intel_engine_has_timeslices(ce->engine))
- return;
-
- if (ctx->sched.priority >= I915_PRIORITY_NORMAL &&
- intel_engine_has_semaphores(ce->engine))
- intel_context_set_use_semaphores(ce);
- else
- intel_context_clear_use_semaphores(ce);
-}
-
static int set_priority(struct i915_gem_context *ctx,
const struct drm_i915_gem_context_param *args)
{
+ struct i915_gem_engines_iter it;
+ struct intel_context *ce;
int err;
err = validate_priority(ctx->i915, args);
@@ -1796,7 +1992,27 @@ static int set_priority(struct i915_gem_context *ctx,
return err;
ctx->sched.priority = args->value;
- context_apply_all(ctx, __apply_priority, ctx);
+
+ for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+ if (!intel_engine_has_timeslices(ce->engine))
+ continue;
+
+ if (ctx->sched.priority >= I915_PRIORITY_NORMAL &&
+ intel_engine_has_semaphores(ce->engine))
+ intel_context_set_use_semaphores(ce);
+ else
+ intel_context_clear_use_semaphores(ce);
+ }
+ i915_gem_context_unlock_engines(ctx);
+
+ return 0;
+}
+
+static int get_protected(struct i915_gem_context *ctx,
+ struct drm_i915_gem_context_param *args)
+{
+ args->size = 0;
+ args->value = i915_gem_context_uses_protected_content(ctx);
return 0;
}
@@ -1824,6 +2040,8 @@ static int ctx_setparam(struct drm_i915_file_private *fpriv,
ret = -EPERM;
else if (args->value)
i915_gem_context_set_bannable(ctx);
+ else if (i915_gem_context_uses_protected_content(ctx))
+ ret = -EPERM; /* can't clear this for protected contexts */
else
i915_gem_context_clear_bannable(ctx);
break;
@@ -1831,10 +2049,12 @@ static int ctx_setparam(struct drm_i915_file_private *fpriv,
case I915_CONTEXT_PARAM_RECOVERABLE:
if (args->size)
ret = -EINVAL;
- else if (args->value)
- i915_gem_context_set_recoverable(ctx);
- else
+ else if (!args->value)
i915_gem_context_clear_recoverable(ctx);
+ else if (i915_gem_context_uses_protected_content(ctx))
+ ret = -EPERM; /* can't set this for protected contexts */
+ else
+ i915_gem_context_set_recoverable(ctx);
break;
case I915_CONTEXT_PARAM_PRIORITY:
@@ -1849,6 +2069,7 @@ static int ctx_setparam(struct drm_i915_file_private *fpriv,
ret = set_persistence(ctx, args);
break;
+ case I915_CONTEXT_PARAM_PROTECTED_CONTENT:
case I915_CONTEXT_PARAM_NO_ZEROMAP:
case I915_CONTEXT_PARAM_BAN_PERIOD:
case I915_CONTEXT_PARAM_RINGSIZE:
@@ -1927,7 +2148,7 @@ finalize_create_context_locked(struct drm_i915_file_private *file_priv,
old = xa_erase(&file_priv->proto_context_xa, id);
GEM_BUG_ON(old != pc);
- proto_context_close(pc);
+ proto_context_close(file_priv->dev_priv, pc);
/* One for the xarray and one for the caller */
return i915_gem_context_get(ctx);
@@ -2013,7 +2234,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
goto err_pc;
}
- proto_context_close(ext_data.pc);
+ proto_context_close(i915, ext_data.pc);
gem_context_register(ctx, ext_data.fpriv, id);
} else {
ret = proto_context_register(ext_data.fpriv, ext_data.pc, &id);
@@ -2027,7 +2248,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
return 0;
err_pc:
- proto_context_close(ext_data.pc);
+ proto_context_close(i915, ext_data.pc);
return ret;
}
@@ -2058,7 +2279,7 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
GEM_WARN_ON(ctx && pc);
if (pc)
- proto_context_close(pc);
+ proto_context_close(file_priv->dev_priv, pc);
if (ctx)
context_close(ctx);
@@ -2127,6 +2348,7 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
struct drm_i915_file_private *file_priv = file->driver_priv;
struct drm_i915_gem_context_param *args = data;
struct i915_gem_context *ctx;
+ struct i915_address_space *vm;
int ret = 0;
ctx = i915_gem_context_lookup(file_priv, args->ctx_id);
@@ -2136,12 +2358,10 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
switch (args->param) {
case I915_CONTEXT_PARAM_GTT_SIZE:
args->size = 0;
- rcu_read_lock();
- if (rcu_access_pointer(ctx->vm))
- args->value = rcu_dereference(ctx->vm)->total;
- else
- args->value = to_i915(dev)->ggtt.vm.total;
- rcu_read_unlock();
+ vm = i915_gem_context_get_eb_vm(ctx);
+ args->value = vm->total;
+ i915_vm_put(vm);
+
break;
case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE:
@@ -2177,6 +2397,10 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
args->value = i915_gem_context_is_persistent(ctx);
break;
+ case I915_CONTEXT_PARAM_PROTECTED_CONTENT:
+ ret = get_protected(ctx, args);
+ break;
+
case I915_CONTEXT_PARAM_NO_ZEROMAP:
case I915_CONTEXT_PARAM_BAN_PERIOD:
case I915_CONTEXT_PARAM_ENGINES:
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.h b/drivers/gpu/drm/i915/gem/i915_gem_context.h
index 18060536b0c2..babfecb17ad1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.h
@@ -108,6 +108,12 @@ i915_gem_context_clear_user_engines(struct i915_gem_context *ctx)
clear_bit(CONTEXT_USER_ENGINES, &ctx->flags);
}
+static inline bool
+i915_gem_context_uses_protected_content(const struct i915_gem_context *ctx)
+{
+ return ctx->uses_protected_content;
+}
+
/* i915_gem_context.c */
void i915_gem_init__contexts(struct drm_i915_private *i915);
@@ -154,17 +160,22 @@ i915_gem_context_vm(struct i915_gem_context *ctx)
return rcu_dereference_protected(ctx->vm, lockdep_is_held(&ctx->mutex));
}
+static inline bool i915_gem_context_has_full_ppgtt(struct i915_gem_context *ctx)
+{
+ GEM_BUG_ON(!!ctx->vm != HAS_FULL_PPGTT(ctx->i915));
+
+ return !!ctx->vm;
+}
+
static inline struct i915_address_space *
-i915_gem_context_get_vm_rcu(struct i915_gem_context *ctx)
+i915_gem_context_get_eb_vm(struct i915_gem_context *ctx)
{
struct i915_address_space *vm;
- rcu_read_lock();
- vm = rcu_dereference(ctx->vm);
+ vm = ctx->vm;
if (!vm)
vm = &ctx->i915->ggtt.vm;
vm = i915_vm_get(vm);
- rcu_read_unlock();
return vm;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
index 94c03a97cb77..282cdb8a5c5a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
@@ -78,13 +78,16 @@ enum i915_gem_engine_type {
/** @I915_GEM_ENGINE_TYPE_BALANCED: A load-balanced engine set */
I915_GEM_ENGINE_TYPE_BALANCED,
+
+ /** @I915_GEM_ENGINE_TYPE_PARALLEL: A parallel engine set */
+ I915_GEM_ENGINE_TYPE_PARALLEL,
};
/**
* struct i915_gem_proto_engine - prototype engine
*
* This struct describes an engine that a context may contain. Engines
- * have three types:
+ * have four types:
*
* - I915_GEM_ENGINE_TYPE_INVALID: Invalid engines can be created but they
* show up as a NULL in i915_gem_engines::engines[i] and any attempt to
@@ -97,6 +100,10 @@ enum i915_gem_engine_type {
*
* - I915_GEM_ENGINE_TYPE_BALANCED: A load-balanced engine set, described
* i915_gem_proto_engine::num_siblings and i915_gem_proto_engine::siblings.
+ *
+ * - I915_GEM_ENGINE_TYPE_PARALLEL: A parallel submission engine set, described
+ * i915_gem_proto_engine::width, i915_gem_proto_engine::num_siblings, and
+ * i915_gem_proto_engine::siblings.
*/
struct i915_gem_proto_engine {
/** @type: Type of this engine */
@@ -105,10 +112,13 @@ struct i915_gem_proto_engine {
/** @engine: Engine, for physical */
struct intel_engine_cs *engine;
- /** @num_siblings: Number of balanced siblings */
+ /** @num_siblings: Number of balanced or parallel siblings */
unsigned int num_siblings;
- /** @siblings: Balanced siblings */
+ /** @width: Width of each sibling */
+ unsigned int width;
+
+ /** @siblings: Balanced siblings or num_siblings * width for parallel */
struct intel_engine_cs **siblings;
/** @sseu: Client-set SSEU parameters */
@@ -198,6 +208,12 @@ struct i915_gem_proto_context {
/** @single_timeline: See See &i915_gem_context.syncobj */
bool single_timeline;
+
+ /** @uses_protected_content: See &i915_gem_context.uses_protected_content */
+ bool uses_protected_content;
+
+ /** @pxp_wakeref: See &i915_gem_context.pxp_wakeref */
+ intel_wakeref_t pxp_wakeref;
};
/**
@@ -262,7 +278,7 @@ struct i915_gem_context {
* In other modes, this is a NULL pointer with the expectation that
* the caller uses the shared global GTT.
*/
- struct i915_address_space __rcu *vm;
+ struct i915_address_space *vm;
/**
* @pid: process id of creator
@@ -289,6 +305,18 @@ struct i915_gem_context {
struct kref ref;
/**
+ * @release_work:
+ *
+ * Work item for deferred cleanup, since i915_gem_context_put() tends to
+ * be called from hardirq context.
+ *
+ * FIXME: The only real reason for this is &i915_gem_engines.fence, all
+ * other callers are from process context and need at most some mild
+ * shuffling to pull the i915_gem_context_put() call out of a spinlock.
+ */
+ struct work_struct release_work;
+
+ /**
* @rcu: rcu_head for deferred freeing.
*/
struct rcu_head rcu;
@@ -309,6 +337,28 @@ struct i915_gem_context {
#define CONTEXT_CLOSED 0
#define CONTEXT_USER_ENGINES 1
+ /**
+ * @uses_protected_content: context uses PXP-encrypted objects.
+ *
+ * This flag can only be set at ctx creation time and it's immutable for
+ * the lifetime of the context. See I915_CONTEXT_PARAM_PROTECTED_CONTENT
+ * in uapi/drm/i915_drm.h for more info on setting restrictions and
+ * expected behaviour of marked contexts.
+ */
+ bool uses_protected_content;
+
+ /**
+ * @pxp_wakeref: wakeref to keep the device awake when PXP is in use
+ *
+ * PXP sessions are invalidated when the device is suspended, which in
+ * turns invalidates all contexts and objects using it. To keep the
+ * flow simple, we keep the device awake when contexts using PXP objects
+ * are in use. It is expected that the userspace application only uses
+ * PXP when the display is on, so taking a wakeref here shouldn't worsen
+ * our power metrics.
+ */
+ intel_wakeref_t pxp_wakeref;
+
/** @mutex: guards everything that isn't engines or handles_vma */
struct mutex mutex;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_create.c b/drivers/gpu/drm/i915/gem/i915_gem_create.c
index 23fee13a3384..8955d6abcef1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_create.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_create.c
@@ -6,6 +6,7 @@
#include "gem/i915_gem_ioctls.h"
#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_region.h"
+#include "pxp/intel_pxp.h"
#include "i915_drv.h"
#include "i915_trace.h"
@@ -82,21 +83,11 @@ static int i915_gem_publish(struct drm_i915_gem_object *obj,
return 0;
}
-/**
- * Creates a new object using the same path as DRM_I915_GEM_CREATE_EXT
- * @i915: i915 private
- * @size: size of the buffer, in bytes
- * @placements: possible placement regions, in priority order
- * @n_placements: number of possible placement regions
- *
- * This function is exposed primarily for selftests and does very little
- * error checking. It is assumed that the set of placement regions has
- * already been verified to be valid.
- */
-struct drm_i915_gem_object *
-__i915_gem_object_create_user(struct drm_i915_private *i915, u64 size,
- struct intel_memory_region **placements,
- unsigned int n_placements)
+static struct drm_i915_gem_object *
+__i915_gem_object_create_user_ext(struct drm_i915_private *i915, u64 size,
+ struct intel_memory_region **placements,
+ unsigned int n_placements,
+ unsigned int ext_flags)
{
struct intel_memory_region *mr = placements[0];
struct drm_i915_gem_object *obj;
@@ -135,6 +126,9 @@ __i915_gem_object_create_user(struct drm_i915_private *i915, u64 size,
GEM_BUG_ON(size != obj->base.size);
+ /* Add any flag set by create_ext options */
+ obj->flags |= ext_flags;
+
trace_i915_gem_object_create(obj);
return obj;
@@ -145,6 +139,26 @@ object_free:
return ERR_PTR(ret);
}
+/**
+ * Creates a new object using the same path as DRM_I915_GEM_CREATE_EXT
+ * @i915: i915 private
+ * @size: size of the buffer, in bytes
+ * @placements: possible placement regions, in priority order
+ * @n_placements: number of possible placement regions
+ *
+ * This function is exposed primarily for selftests and does very little
+ * error checking. It is assumed that the set of placement regions has
+ * already been verified to be valid.
+ */
+struct drm_i915_gem_object *
+__i915_gem_object_create_user(struct drm_i915_private *i915, u64 size,
+ struct intel_memory_region **placements,
+ unsigned int n_placements)
+{
+ return __i915_gem_object_create_user_ext(i915, size, placements,
+ n_placements, 0);
+}
+
int
i915_gem_dumb_create(struct drm_file *file,
struct drm_device *dev,
@@ -224,6 +238,7 @@ struct create_ext {
struct drm_i915_private *i915;
struct intel_memory_region *placements[INTEL_REGION_UNKNOWN];
unsigned int n_placements;
+ unsigned long flags;
};
static void repr_placements(char *buf, size_t size,
@@ -347,17 +362,34 @@ static int ext_set_placements(struct i915_user_extension __user *base,
{
struct drm_i915_gem_create_ext_memory_regions ext;
- if (!IS_ENABLED(CONFIG_DRM_I915_UNSTABLE_FAKE_LMEM))
- return -ENODEV;
-
if (copy_from_user(&ext, base, sizeof(ext)))
return -EFAULT;
return set_placements(&ext, data);
}
+static int ext_set_protected(struct i915_user_extension __user *base, void *data)
+{
+ struct drm_i915_gem_create_ext_protected_content ext;
+ struct create_ext *ext_data = data;
+
+ if (copy_from_user(&ext, base, sizeof(ext)))
+ return -EFAULT;
+
+ if (ext.flags)
+ return -EINVAL;
+
+ if (!intel_pxp_is_enabled(&ext_data->i915->gt.pxp))
+ return -ENODEV;
+
+ ext_data->flags |= I915_BO_PROTECTED;
+
+ return 0;
+}
+
static const i915_user_extension_fn create_extensions[] = {
[I915_GEM_CREATE_EXT_MEMORY_REGIONS] = ext_set_placements,
+ [I915_GEM_CREATE_EXT_PROTECTED_CONTENT] = ext_set_protected,
};
/**
@@ -392,9 +424,10 @@ i915_gem_create_ext_ioctl(struct drm_device *dev, void *data,
ext_data.n_placements = 1;
}
- obj = __i915_gem_object_create_user(i915, args->size,
- ext_data.placements,
- ext_data.n_placements);
+ obj = __i915_gem_object_create_user_ext(i915, args->size,
+ ext_data.placements,
+ ext_data.n_placements,
+ ext_data.flags);
if (IS_ERR(obj))
return PTR_ERR(obj);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
index abb854281347..eae09d323049 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
@@ -235,6 +235,7 @@ struct dma_buf *i915_gem_prime_export(struct drm_gem_object *gem_obj, int flags)
static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct sg_table *pages;
unsigned int sg_page_sizes;
@@ -245,8 +246,11 @@ static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
if (IS_ERR(pages))
return PTR_ERR(pages);
- sg_page_sizes = i915_sg_dma_sizes(pages->sgl);
+ /* XXX: consider doing a vmap flush or something */
+ if (!HAS_LLC(i915) || i915_gem_object_can_bypass_llc(obj))
+ wbinvd_on_all_cpus();
+ sg_page_sizes = i915_sg_dma_sizes(pages->sgl);
__i915_gem_object_set_pages(obj, pages, sg_page_sizes);
return 0;
@@ -304,7 +308,8 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
}
drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
- i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops, &lock_class, 0);
+ i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops, &lock_class,
+ I915_BO_ALLOC_USER);
obj->base.import_attach = attach;
obj->base.resv = dma_buf->resv;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 1aa249908b64..4d7da07442f2 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -21,6 +21,8 @@
#include "gt/intel_gt_pm.h"
#include "gt/intel_ring.h"
+#include "pxp/intel_pxp.h"
+
#include "i915_drv.h"
#include "i915_gem_clflush.h"
#include "i915_gem_context.h"
@@ -244,17 +246,25 @@ struct i915_execbuffer {
struct drm_i915_gem_exec_object2 *exec; /** ioctl execobj[] */
struct eb_vma *vma;
- struct intel_engine_cs *engine; /** engine to queue the request to */
+ struct intel_gt *gt; /* gt for the execbuf */
struct intel_context *context; /* logical state for the request */
struct i915_gem_context *gem_context; /** caller's context */
- struct i915_request *request; /** our request to build */
- struct eb_vma *batch; /** identity of the batch obj/vma */
+ /** our requests to build */
+ struct i915_request *requests[MAX_ENGINE_INSTANCE + 1];
+ /** identity of the batch obj/vma */
+ struct eb_vma *batches[MAX_ENGINE_INSTANCE + 1];
struct i915_vma *trampoline; /** trampoline used for chaining */
+ /** used for excl fence in dma_resv objects when > 1 BB submitted */
+ struct dma_fence *composite_fence;
+
/** actual size of execobj[] as we may extend it for the cmdparser */
unsigned int buffer_count;
+ /* number of batches in execbuf IOCTL */
+ unsigned int num_batches;
+
/** list of vma not yet bound during reservation phase */
struct list_head unbound;
@@ -281,7 +291,8 @@ struct i915_execbuffer {
u64 invalid_flags; /** Set of execobj.flags that are invalid */
- u64 batch_len; /** Length of batch within object */
+ /** Length of batch within object */
+ u64 batch_len[MAX_ENGINE_INSTANCE + 1];
u32 batch_start_offset; /** Location within object of batch */
u32 batch_flags; /** Flags composed for emit_bb_start() */
struct intel_gt_buffer_pool_node *batch_pool; /** pool node for batch buffer */
@@ -299,14 +310,13 @@ struct i915_execbuffer {
};
static int eb_parse(struct i915_execbuffer *eb);
-static struct i915_request *eb_pin_engine(struct i915_execbuffer *eb,
- bool throttle);
+static int eb_pin_engine(struct i915_execbuffer *eb, bool throttle);
static void eb_unpin_engine(struct i915_execbuffer *eb);
static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb)
{
- return intel_engine_requires_cmd_parser(eb->engine) ||
- (intel_engine_using_cmd_parser(eb->engine) &&
+ return intel_engine_requires_cmd_parser(eb->context->engine) ||
+ (intel_engine_using_cmd_parser(eb->context->engine) &&
eb->args->batch_len);
}
@@ -533,11 +543,21 @@ eb_validate_vma(struct i915_execbuffer *eb,
return 0;
}
-static void
+static inline bool
+is_batch_buffer(struct i915_execbuffer *eb, unsigned int buffer_idx)
+{
+ return eb->args->flags & I915_EXEC_BATCH_FIRST ?
+ buffer_idx < eb->num_batches :
+ buffer_idx >= eb->args->buffer_count - eb->num_batches;
+}
+
+static int
eb_add_vma(struct i915_execbuffer *eb,
- unsigned int i, unsigned batch_idx,
+ unsigned int *current_batch,
+ unsigned int i,
struct i915_vma *vma)
{
+ struct drm_i915_private *i915 = eb->i915;
struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
struct eb_vma *ev = &eb->vma[i];
@@ -564,15 +584,43 @@ eb_add_vma(struct i915_execbuffer *eb,
* Note that actual hangs have only been observed on gen7, but for
* paranoia do it everywhere.
*/
- if (i == batch_idx) {
+ if (is_batch_buffer(eb, i)) {
if (entry->relocation_count &&
!(ev->flags & EXEC_OBJECT_PINNED))
ev->flags |= __EXEC_OBJECT_NEEDS_BIAS;
if (eb->reloc_cache.has_fence)
ev->flags |= EXEC_OBJECT_NEEDS_FENCE;
- eb->batch = ev;
+ eb->batches[*current_batch] = ev;
+
+ if (unlikely(ev->flags & EXEC_OBJECT_WRITE)) {
+ drm_dbg(&i915->drm,
+ "Attempting to use self-modifying batch buffer\n");
+ return -EINVAL;
+ }
+
+ if (range_overflows_t(u64,
+ eb->batch_start_offset,
+ eb->args->batch_len,
+ ev->vma->size)) {
+ drm_dbg(&i915->drm, "Attempting to use out-of-bounds batch\n");
+ return -EINVAL;
+ }
+
+ if (eb->args->batch_len == 0)
+ eb->batch_len[*current_batch] = ev->vma->size -
+ eb->batch_start_offset;
+ else
+ eb->batch_len[*current_batch] = eb->args->batch_len;
+ if (unlikely(eb->batch_len[*current_batch] == 0)) { /* impossible! */
+ drm_dbg(&i915->drm, "Invalid batch length\n");
+ return -EINVAL;
+ }
+
+ ++*current_batch;
}
+
+ return 0;
}
static inline int use_cpu_reloc(const struct reloc_cache *cache,
@@ -716,14 +764,6 @@ static int eb_reserve(struct i915_execbuffer *eb)
} while (1);
}
-static unsigned int eb_batch_index(const struct i915_execbuffer *eb)
-{
- if (eb->args->flags & I915_EXEC_BATCH_FIRST)
- return 0;
- else
- return eb->buffer_count - 1;
-}
-
static int eb_select_context(struct i915_execbuffer *eb)
{
struct i915_gem_context *ctx;
@@ -733,7 +773,7 @@ static int eb_select_context(struct i915_execbuffer *eb)
return PTR_ERR(ctx);
eb->gem_context = ctx;
- if (rcu_access_pointer(ctx->vm))
+ if (i915_gem_context_has_full_ppgtt(ctx))
eb->invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
return 0;
@@ -759,11 +799,7 @@ static int __eb_add_lut(struct i915_execbuffer *eb,
/* Check that the context hasn't been closed in the meantime */
err = -EINTR;
if (!mutex_lock_interruptible(&ctx->lut_mutex)) {
- struct i915_address_space *vm = rcu_access_pointer(ctx->vm);
-
- if (unlikely(vm && vma->vm != vm))
- err = -EAGAIN; /* user racing with ctx set-vm */
- else if (likely(!i915_gem_context_is_closed(ctx)))
+ if (likely(!i915_gem_context_is_closed(ctx)))
err = radix_tree_insert(&ctx->handles_vma, handle, vma);
else
err = -ENOENT;
@@ -814,6 +850,22 @@ static struct i915_vma *eb_lookup_vma(struct i915_execbuffer *eb, u32 handle)
if (unlikely(!obj))
return ERR_PTR(-ENOENT);
+ /*
+ * If the user has opted-in for protected-object tracking, make
+ * sure the object encryption can be used.
+ * We only need to do this when the object is first used with
+ * this context, because the context itself will be banned when
+ * the protected objects become invalid.
+ */
+ if (i915_gem_context_uses_protected_content(eb->gem_context) &&
+ i915_gem_object_is_protected(obj)) {
+ err = intel_pxp_key_check(&vm->gt->pxp, obj, true);
+ if (err) {
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+ }
+ }
+
vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma)) {
i915_gem_object_put(obj);
@@ -832,9 +884,7 @@ static struct i915_vma *eb_lookup_vma(struct i915_execbuffer *eb, u32 handle)
static int eb_lookup_vmas(struct i915_execbuffer *eb)
{
- struct drm_i915_private *i915 = eb->i915;
- unsigned int batch = eb_batch_index(eb);
- unsigned int i;
+ unsigned int i, current_batch = 0;
int err = 0;
INIT_LIST_HEAD(&eb->relocs);
@@ -854,7 +904,9 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
goto err;
}
- eb_add_vma(eb, i, batch, vma);
+ err = eb_add_vma(eb, &current_batch, i, vma);
+ if (err)
+ return err;
if (i915_gem_object_is_userptr(vma->obj)) {
err = i915_gem_object_userptr_submit_init(vma->obj);
@@ -877,26 +929,6 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
}
}
- if (unlikely(eb->batch->flags & EXEC_OBJECT_WRITE)) {
- drm_dbg(&i915->drm,
- "Attempting to use self-modifying batch buffer\n");
- return -EINVAL;
- }
-
- if (range_overflows_t(u64,
- eb->batch_start_offset, eb->batch_len,
- eb->batch->vma->size)) {
- drm_dbg(&i915->drm, "Attempting to use out-of-bounds batch\n");
- return -EINVAL;
- }
-
- if (eb->batch_len == 0)
- eb->batch_len = eb->batch->vma->size - eb->batch_start_offset;
- if (unlikely(eb->batch_len == 0)) { /* impossible! */
- drm_dbg(&i915->drm, "Invalid batch length\n");
- return -EINVAL;
- }
-
return 0;
err:
@@ -1629,8 +1661,7 @@ static int eb_reinit_userptr(struct i915_execbuffer *eb)
return 0;
}
-static noinline int eb_relocate_parse_slow(struct i915_execbuffer *eb,
- struct i915_request *rq)
+static noinline int eb_relocate_parse_slow(struct i915_execbuffer *eb)
{
bool have_copy = false;
struct eb_vma *ev;
@@ -1646,21 +1677,6 @@ repeat:
eb_release_vmas(eb, false);
i915_gem_ww_ctx_fini(&eb->ww);
- if (rq) {
- /* nonblocking is always false */
- if (i915_request_wait(rq, I915_WAIT_INTERRUPTIBLE,
- MAX_SCHEDULE_TIMEOUT) < 0) {
- i915_request_put(rq);
- rq = NULL;
-
- err = -EINTR;
- goto err_relock;
- }
-
- i915_request_put(rq);
- rq = NULL;
- }
-
/*
* We take 3 passes through the slowpatch.
*
@@ -1687,28 +1703,21 @@ repeat:
if (!err)
err = eb_reinit_userptr(eb);
-err_relock:
i915_gem_ww_ctx_init(&eb->ww, true);
if (err)
goto out;
/* reacquire the objects */
repeat_validate:
- rq = eb_pin_engine(eb, false);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- rq = NULL;
+ err = eb_pin_engine(eb, false);
+ if (err)
goto err;
- }
-
- /* We didn't throttle, should be NULL */
- GEM_WARN_ON(rq);
err = eb_validate_vmas(eb);
if (err)
goto err;
- GEM_BUG_ON(!eb->batch);
+ GEM_BUG_ON(!eb->batches[0]);
list_for_each_entry(ev, &eb->relocs, reloc_link) {
if (!have_copy) {
@@ -1772,46 +1781,23 @@ out:
}
}
- if (rq)
- i915_request_put(rq);
-
return err;
}
static int eb_relocate_parse(struct i915_execbuffer *eb)
{
int err;
- struct i915_request *rq = NULL;
bool throttle = true;
retry:
- rq = eb_pin_engine(eb, throttle);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- rq = NULL;
+ err = eb_pin_engine(eb, throttle);
+ if (err) {
if (err != -EDEADLK)
return err;
goto err;
}
- if (rq) {
- bool nonblock = eb->file->filp->f_flags & O_NONBLOCK;
-
- /* Need to drop all locks now for throttling, take slowpath */
- err = i915_request_wait(rq, I915_WAIT_INTERRUPTIBLE, 0);
- if (err == -ETIME) {
- if (nonblock) {
- err = -EWOULDBLOCK;
- i915_request_put(rq);
- goto err;
- }
- goto slow;
- }
- i915_request_put(rq);
- rq = NULL;
- }
-
/* only throttle once, even if we didn't need to throttle */
throttle = false;
@@ -1851,7 +1837,7 @@ err:
return err;
slow:
- err = eb_relocate_parse_slow(eb, rq);
+ err = eb_relocate_parse_slow(eb);
if (err)
/*
* If the user expects the execobject.offset and
@@ -1865,11 +1851,40 @@ slow:
return err;
}
+/*
+ * Using two helper loops for the order of which requests / batches are created
+ * and added the to backend. Requests are created in order from the parent to
+ * the last child. Requests are added in the reverse order, from the last child
+ * to parent. This is done for locking reasons as the timeline lock is acquired
+ * during request creation and released when the request is added to the
+ * backend. To make lockdep happy (see intel_context_timeline_lock) this must be
+ * the ordering.
+ */
+#define for_each_batch_create_order(_eb, _i) \
+ for ((_i) = 0; (_i) < (_eb)->num_batches; ++(_i))
+#define for_each_batch_add_order(_eb, _i) \
+ BUILD_BUG_ON(!typecheck(int, _i)); \
+ for ((_i) = (_eb)->num_batches - 1; (_i) >= 0; --(_i))
+
+static struct i915_request *
+eb_find_first_request_added(struct i915_execbuffer *eb)
+{
+ int i;
+
+ for_each_batch_add_order(eb, i)
+ if (eb->requests[i])
+ return eb->requests[i];
+
+ GEM_BUG_ON("Request not found");
+
+ return NULL;
+}
+
static int eb_move_to_gpu(struct i915_execbuffer *eb)
{
const unsigned int count = eb->buffer_count;
unsigned int i = count;
- int err = 0;
+ int err = 0, j;
while (i--) {
struct eb_vma *ev = &eb->vma[i];
@@ -1882,11 +1897,17 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
if (flags & EXEC_OBJECT_CAPTURE) {
struct i915_capture_list *capture;
- capture = kmalloc(sizeof(*capture), GFP_KERNEL);
- if (capture) {
- capture->next = eb->request->capture_list;
- capture->vma = vma;
- eb->request->capture_list = capture;
+ for_each_batch_create_order(eb, j) {
+ if (!eb->requests[j])
+ break;
+
+ capture = kmalloc(sizeof(*capture), GFP_KERNEL);
+ if (capture) {
+ capture->next =
+ eb->requests[j]->capture_list;
+ capture->vma = vma;
+ eb->requests[j]->capture_list = capture;
+ }
}
}
@@ -1901,20 +1922,43 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
* !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)
* but gcc's optimiser doesn't handle that as well and emits
* two jumps instead of one. Maybe one day...
+ *
+ * FIXME: There is also sync flushing in set_pages(), which
+ * serves a different purpose(some of the time at least).
+ *
+ * We should consider:
+ *
+ * 1. Rip out the async flush code.
+ *
+ * 2. Or make the sync flushing use the async clflush path
+ * using mandatory fences underneath. Currently the below
+ * async flush happens after we bind the object.
*/
if (unlikely(obj->cache_dirty & ~obj->cache_coherent)) {
if (i915_gem_clflush_object(obj, 0))
flags &= ~EXEC_OBJECT_ASYNC;
}
+ /* We only need to await on the first request */
if (err == 0 && !(flags & EXEC_OBJECT_ASYNC)) {
err = i915_request_await_object
- (eb->request, obj, flags & EXEC_OBJECT_WRITE);
+ (eb_find_first_request_added(eb), obj,
+ flags & EXEC_OBJECT_WRITE);
}
- if (err == 0)
- err = i915_vma_move_to_active(vma, eb->request,
- flags | __EXEC_OBJECT_NO_RESERVE);
+ for_each_batch_add_order(eb, j) {
+ if (err)
+ break;
+ if (!eb->requests[j])
+ continue;
+
+ err = _i915_vma_move_to_active(vma, eb->requests[j],
+ j ? NULL :
+ eb->composite_fence ?
+ eb->composite_fence :
+ &eb->requests[j]->fence,
+ flags | __EXEC_OBJECT_NO_RESERVE);
+ }
}
#ifdef CONFIG_MMU_NOTIFIER
@@ -1945,11 +1989,16 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
goto err_skip;
/* Unconditionally flush any chipset caches (for streaming writes). */
- intel_gt_chipset_flush(eb->engine->gt);
+ intel_gt_chipset_flush(eb->gt);
return 0;
err_skip:
- i915_request_set_error_once(eb->request, err);
+ for_each_batch_create_order(eb, j) {
+ if (!eb->requests[j])
+ break;
+
+ i915_request_set_error_once(eb->requests[j], err);
+ }
return err;
}
@@ -2044,14 +2093,17 @@ static int eb_parse(struct i915_execbuffer *eb)
int err;
if (!eb_use_cmdparser(eb)) {
- batch = eb_dispatch_secure(eb, eb->batch->vma);
+ batch = eb_dispatch_secure(eb, eb->batches[0]->vma);
if (IS_ERR(batch))
return PTR_ERR(batch);
goto secure_batch;
}
- len = eb->batch_len;
+ if (intel_context_is_parallel(eb->context))
+ return -EINVAL;
+
+ len = eb->batch_len[0];
if (!CMDPARSER_USES_GGTT(eb->i915)) {
/*
* ppGTT backed shadow buffers must be mapped RO, to prevent
@@ -2065,11 +2117,11 @@ static int eb_parse(struct i915_execbuffer *eb)
} else {
len += I915_CMD_PARSER_TRAMPOLINE_SIZE;
}
- if (unlikely(len < eb->batch_len)) /* last paranoid check of overflow */
+ if (unlikely(len < eb->batch_len[0])) /* last paranoid check of overflow */
return -EINVAL;
if (!pool) {
- pool = intel_gt_get_buffer_pool(eb->engine->gt, len,
+ pool = intel_gt_get_buffer_pool(eb->gt, len,
I915_MAP_WB);
if (IS_ERR(pool))
return PTR_ERR(pool);
@@ -2094,7 +2146,7 @@ static int eb_parse(struct i915_execbuffer *eb)
trampoline = shadow;
shadow = shadow_batch_pin(eb, pool->obj,
- &eb->engine->gt->ggtt->vm,
+ &eb->gt->ggtt->vm,
PIN_GLOBAL);
if (IS_ERR(shadow)) {
err = PTR_ERR(shadow);
@@ -2116,26 +2168,29 @@ static int eb_parse(struct i915_execbuffer *eb)
if (err)
goto err_trampoline;
- err = intel_engine_cmd_parser(eb->engine,
- eb->batch->vma,
+ err = intel_engine_cmd_parser(eb->context->engine,
+ eb->batches[0]->vma,
eb->batch_start_offset,
- eb->batch_len,
+ eb->batch_len[0],
shadow, trampoline);
if (err)
goto err_unpin_batch;
- eb->batch = &eb->vma[eb->buffer_count++];
- eb->batch->vma = i915_vma_get(shadow);
- eb->batch->flags = __EXEC_OBJECT_HAS_PIN;
+ eb->batches[0] = &eb->vma[eb->buffer_count++];
+ eb->batches[0]->vma = i915_vma_get(shadow);
+ eb->batches[0]->flags = __EXEC_OBJECT_HAS_PIN;
eb->trampoline = trampoline;
eb->batch_start_offset = 0;
secure_batch:
if (batch) {
- eb->batch = &eb->vma[eb->buffer_count++];
- eb->batch->flags = __EXEC_OBJECT_HAS_PIN;
- eb->batch->vma = i915_vma_get(batch);
+ if (intel_context_is_parallel(eb->context))
+ return -EINVAL;
+
+ eb->batches[0] = &eb->vma[eb->buffer_count++];
+ eb->batches[0]->flags = __EXEC_OBJECT_HAS_PIN;
+ eb->batches[0]->vma = i915_vma_get(batch);
}
return 0;
@@ -2151,19 +2206,18 @@ err:
return err;
}
-static int eb_submit(struct i915_execbuffer *eb, struct i915_vma *batch)
+static int eb_request_submit(struct i915_execbuffer *eb,
+ struct i915_request *rq,
+ struct i915_vma *batch,
+ u64 batch_len)
{
int err;
- if (intel_context_nopreempt(eb->context))
- __set_bit(I915_FENCE_FLAG_NOPREEMPT, &eb->request->fence.flags);
-
- err = eb_move_to_gpu(eb);
- if (err)
- return err;
+ if (intel_context_nopreempt(rq->context))
+ __set_bit(I915_FENCE_FLAG_NOPREEMPT, &rq->fence.flags);
if (eb->args->flags & I915_EXEC_GEN7_SOL_RESET) {
- err = i915_reset_gen7_sol_offsets(eb->request);
+ err = i915_reset_gen7_sol_offsets(rq);
if (err)
return err;
}
@@ -2174,26 +2228,26 @@ static int eb_submit(struct i915_execbuffer *eb, struct i915_vma *batch)
* allows us to determine if the batch is still waiting on the GPU
* or actually running by checking the breadcrumb.
*/
- if (eb->engine->emit_init_breadcrumb) {
- err = eb->engine->emit_init_breadcrumb(eb->request);
+ if (rq->context->engine->emit_init_breadcrumb) {
+ err = rq->context->engine->emit_init_breadcrumb(rq);
if (err)
return err;
}
- err = eb->engine->emit_bb_start(eb->request,
- batch->node.start +
- eb->batch_start_offset,
- eb->batch_len,
- eb->batch_flags);
+ err = rq->context->engine->emit_bb_start(rq,
+ batch->node.start +
+ eb->batch_start_offset,
+ batch_len,
+ eb->batch_flags);
if (err)
return err;
if (eb->trampoline) {
+ GEM_BUG_ON(intel_context_is_parallel(rq->context));
GEM_BUG_ON(eb->batch_start_offset);
- err = eb->engine->emit_bb_start(eb->request,
- eb->trampoline->node.start +
- eb->batch_len,
- 0, 0);
+ err = rq->context->engine->emit_bb_start(rq,
+ eb->trampoline->node.start +
+ batch_len, 0, 0);
if (err)
return err;
}
@@ -2201,6 +2255,27 @@ static int eb_submit(struct i915_execbuffer *eb, struct i915_vma *batch)
return 0;
}
+static int eb_submit(struct i915_execbuffer *eb)
+{
+ unsigned int i;
+ int err;
+
+ err = eb_move_to_gpu(eb);
+
+ for_each_batch_create_order(eb, i) {
+ if (!eb->requests[i])
+ break;
+
+ trace_i915_request_queue(eb->requests[i], eb->batch_flags);
+ if (!err)
+ err = eb_request_submit(eb, eb->requests[i],
+ eb->batches[i]->vma,
+ eb->batch_len[i]);
+ }
+
+ return err;
+}
+
static int num_vcs_engines(const struct drm_i915_private *i915)
{
return hweight_long(VDBOX_MASK(&i915->gt));
@@ -2266,26 +2341,11 @@ static struct i915_request *eb_throttle(struct i915_execbuffer *eb, struct intel
return i915_request_get(rq);
}
-static struct i915_request *eb_pin_engine(struct i915_execbuffer *eb, bool throttle)
+static int eb_pin_timeline(struct i915_execbuffer *eb, struct intel_context *ce,
+ bool throttle)
{
- struct intel_context *ce = eb->context;
struct intel_timeline *tl;
struct i915_request *rq = NULL;
- int err;
-
- GEM_BUG_ON(eb->args->flags & __EXEC_ENGINE_PINNED);
-
- if (unlikely(intel_context_is_banned(ce)))
- return ERR_PTR(-EIO);
-
- /*
- * Pinning the contexts may generate requests in order to acquire
- * GGTT space, so do this first before we reserve a seqno for
- * ourselves.
- */
- err = intel_context_pin_ww(ce, &eb->ww);
- if (err)
- return ERR_PTR(err);
/*
* Take a local wakeref for preparing to dispatch the execbuf as
@@ -2296,33 +2356,108 @@ static struct i915_request *eb_pin_engine(struct i915_execbuffer *eb, bool throt
* taken on the engine, and the parent device.
*/
tl = intel_context_timeline_lock(ce);
- if (IS_ERR(tl)) {
- intel_context_unpin(ce);
- return ERR_CAST(tl);
- }
+ if (IS_ERR(tl))
+ return PTR_ERR(tl);
intel_context_enter(ce);
if (throttle)
rq = eb_throttle(eb, ce);
intel_context_timeline_unlock(tl);
+ if (rq) {
+ bool nonblock = eb->file->filp->f_flags & O_NONBLOCK;
+ long timeout = nonblock ? 0 : MAX_SCHEDULE_TIMEOUT;
+
+ if (i915_request_wait(rq, I915_WAIT_INTERRUPTIBLE,
+ timeout) < 0) {
+ i915_request_put(rq);
+
+ tl = intel_context_timeline_lock(ce);
+ intel_context_exit(ce);
+ intel_context_timeline_unlock(tl);
+
+ if (nonblock)
+ return -EWOULDBLOCK;
+ else
+ return -EINTR;
+ }
+ i915_request_put(rq);
+ }
+
+ return 0;
+}
+
+static int eb_pin_engine(struct i915_execbuffer *eb, bool throttle)
+{
+ struct intel_context *ce = eb->context, *child;
+ int err;
+ int i = 0, j = 0;
+
+ GEM_BUG_ON(eb->args->flags & __EXEC_ENGINE_PINNED);
+
+ if (unlikely(intel_context_is_banned(ce)))
+ return -EIO;
+
+ /*
+ * Pinning the contexts may generate requests in order to acquire
+ * GGTT space, so do this first before we reserve a seqno for
+ * ourselves.
+ */
+ err = intel_context_pin_ww(ce, &eb->ww);
+ if (err)
+ return err;
+ for_each_child(ce, child) {
+ err = intel_context_pin_ww(child, &eb->ww);
+ GEM_BUG_ON(err); /* perma-pinned should incr a counter */
+ }
+
+ for_each_child(ce, child) {
+ err = eb_pin_timeline(eb, child, throttle);
+ if (err)
+ goto unwind;
+ ++i;
+ }
+ err = eb_pin_timeline(eb, ce, throttle);
+ if (err)
+ goto unwind;
+
eb->args->flags |= __EXEC_ENGINE_PINNED;
- return rq;
+ return 0;
+
+unwind:
+ for_each_child(ce, child) {
+ if (j++ < i) {
+ mutex_lock(&child->timeline->mutex);
+ intel_context_exit(child);
+ mutex_unlock(&child->timeline->mutex);
+ }
+ }
+ for_each_child(ce, child)
+ intel_context_unpin(child);
+ intel_context_unpin(ce);
+ return err;
}
static void eb_unpin_engine(struct i915_execbuffer *eb)
{
- struct intel_context *ce = eb->context;
- struct intel_timeline *tl = ce->timeline;
+ struct intel_context *ce = eb->context, *child;
if (!(eb->args->flags & __EXEC_ENGINE_PINNED))
return;
eb->args->flags &= ~__EXEC_ENGINE_PINNED;
- mutex_lock(&tl->mutex);
+ for_each_child(ce, child) {
+ mutex_lock(&child->timeline->mutex);
+ intel_context_exit(child);
+ mutex_unlock(&child->timeline->mutex);
+
+ intel_context_unpin(child);
+ }
+
+ mutex_lock(&ce->timeline->mutex);
intel_context_exit(ce);
- mutex_unlock(&tl->mutex);
+ mutex_unlock(&ce->timeline->mutex);
intel_context_unpin(ce);
}
@@ -2373,7 +2508,7 @@ eb_select_legacy_ring(struct i915_execbuffer *eb)
static int
eb_select_engine(struct i915_execbuffer *eb)
{
- struct intel_context *ce;
+ struct intel_context *ce, *child;
unsigned int idx;
int err;
@@ -2386,6 +2521,20 @@ eb_select_engine(struct i915_execbuffer *eb)
if (IS_ERR(ce))
return PTR_ERR(ce);
+ if (intel_context_is_parallel(ce)) {
+ if (eb->buffer_count < ce->parallel.number_children + 1) {
+ intel_context_put(ce);
+ return -EINVAL;
+ }
+ if (eb->batch_start_offset || eb->args->batch_len) {
+ intel_context_put(ce);
+ return -EINVAL;
+ }
+ }
+ eb->num_batches = ce->parallel.number_children + 1;
+
+ for_each_child(ce, child)
+ intel_context_get(child);
intel_gt_pm_get(ce->engine->gt);
if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
@@ -2393,6 +2542,13 @@ eb_select_engine(struct i915_execbuffer *eb)
if (err)
goto err;
}
+ for_each_child(ce, child) {
+ if (!test_bit(CONTEXT_ALLOC_BIT, &child->flags)) {
+ err = intel_context_alloc_state(child);
+ if (err)
+ goto err;
+ }
+ }
/*
* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
@@ -2403,7 +2559,7 @@ eb_select_engine(struct i915_execbuffer *eb)
goto err;
eb->context = ce;
- eb->engine = ce->engine;
+ eb->gt = ce->engine->gt;
/*
* Make sure engine pool stays alive even if we call intel_context_put
@@ -2414,6 +2570,8 @@ eb_select_engine(struct i915_execbuffer *eb)
err:
intel_gt_pm_put(ce->engine->gt);
+ for_each_child(ce, child)
+ intel_context_put(child);
intel_context_put(ce);
return err;
}
@@ -2421,7 +2579,11 @@ err:
static void
eb_put_engine(struct i915_execbuffer *eb)
{
- intel_gt_pm_put(eb->engine->gt);
+ struct intel_context *child;
+
+ intel_gt_pm_put(eb->gt);
+ for_each_child(eb->context, child)
+ intel_context_put(child);
intel_context_put(eb->context);
}
@@ -2644,7 +2806,8 @@ static void put_fence_array(struct eb_fence *fences, int num_fences)
}
static int
-await_fence_array(struct i915_execbuffer *eb)
+await_fence_array(struct i915_execbuffer *eb,
+ struct i915_request *rq)
{
unsigned int n;
int err;
@@ -2658,8 +2821,7 @@ await_fence_array(struct i915_execbuffer *eb)
if (!eb->fences[n].dma_fence)
continue;
- err = i915_request_await_dma_fence(eb->request,
- eb->fences[n].dma_fence);
+ err = i915_request_await_dma_fence(rq, eb->fences[n].dma_fence);
if (err < 0)
return err;
}
@@ -2667,9 +2829,9 @@ await_fence_array(struct i915_execbuffer *eb)
return 0;
}
-static void signal_fence_array(const struct i915_execbuffer *eb)
+static void signal_fence_array(const struct i915_execbuffer *eb,
+ struct dma_fence * const fence)
{
- struct dma_fence * const fence = &eb->request->fence;
unsigned int n;
for (n = 0; n < eb->num_fences; n++) {
@@ -2717,9 +2879,9 @@ static void retire_requests(struct intel_timeline *tl, struct i915_request *end)
break;
}
-static int eb_request_add(struct i915_execbuffer *eb, int err)
+static int eb_request_add(struct i915_execbuffer *eb, struct i915_request *rq,
+ int err, bool last_parallel)
{
- struct i915_request *rq = eb->request;
struct intel_timeline * const tl = i915_request_timeline(rq);
struct i915_sched_attr attr = {};
struct i915_request *prev;
@@ -2741,6 +2903,17 @@ static int eb_request_add(struct i915_execbuffer *eb, int err)
err = -ENOENT; /* override any transient errors */
}
+ if (intel_context_is_parallel(eb->context)) {
+ if (err) {
+ __i915_request_skip(rq);
+ set_bit(I915_FENCE_FLAG_SKIP_PARALLEL,
+ &rq->fence.flags);
+ }
+ if (last_parallel)
+ set_bit(I915_FENCE_FLAG_SUBMIT_PARALLEL,
+ &rq->fence.flags);
+ }
+
__i915_request_queue(rq, &attr);
/* Try to clean up the client's timeline after submitting the request */
@@ -2752,6 +2925,25 @@ static int eb_request_add(struct i915_execbuffer *eb, int err)
return err;
}
+static int eb_requests_add(struct i915_execbuffer *eb, int err)
+{
+ int i;
+
+ /*
+ * We iterate in reverse order of creation to release timeline mutexes in
+ * same order.
+ */
+ for_each_batch_add_order(eb, i) {
+ struct i915_request *rq = eb->requests[i];
+
+ if (!rq)
+ continue;
+ err |= eb_request_add(eb, rq, err, i == 0);
+ }
+
+ return err;
+}
+
static const i915_user_extension_fn execbuf_extensions[] = {
[DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES] = parse_timeline_fences,
};
@@ -2778,6 +2970,185 @@ parse_execbuf2_extensions(struct drm_i915_gem_execbuffer2 *args,
eb);
}
+static void eb_requests_get(struct i915_execbuffer *eb)
+{
+ unsigned int i;
+
+ for_each_batch_create_order(eb, i) {
+ if (!eb->requests[i])
+ break;
+
+ i915_request_get(eb->requests[i]);
+ }
+}
+
+static void eb_requests_put(struct i915_execbuffer *eb)
+{
+ unsigned int i;
+
+ for_each_batch_create_order(eb, i) {
+ if (!eb->requests[i])
+ break;
+
+ i915_request_put(eb->requests[i]);
+ }
+}
+
+static struct sync_file *
+eb_composite_fence_create(struct i915_execbuffer *eb, int out_fence_fd)
+{
+ struct sync_file *out_fence = NULL;
+ struct dma_fence_array *fence_array;
+ struct dma_fence **fences;
+ unsigned int i;
+
+ GEM_BUG_ON(!intel_context_is_parent(eb->context));
+
+ fences = kmalloc_array(eb->num_batches, sizeof(*fences), GFP_KERNEL);
+ if (!fences)
+ return ERR_PTR(-ENOMEM);
+
+ for_each_batch_create_order(eb, i) {
+ fences[i] = &eb->requests[i]->fence;
+ __set_bit(I915_FENCE_FLAG_COMPOSITE,
+ &eb->requests[i]->fence.flags);
+ }
+
+ fence_array = dma_fence_array_create(eb->num_batches,
+ fences,
+ eb->context->parallel.fence_context,
+ eb->context->parallel.seqno,
+ false);
+ if (!fence_array) {
+ kfree(fences);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* Move ownership to the dma_fence_array created above */
+ for_each_batch_create_order(eb, i)
+ dma_fence_get(fences[i]);
+
+ if (out_fence_fd != -1) {
+ out_fence = sync_file_create(&fence_array->base);
+ /* sync_file now owns fence_arry, drop creation ref */
+ dma_fence_put(&fence_array->base);
+ if (!out_fence)
+ return ERR_PTR(-ENOMEM);
+ }
+
+ eb->composite_fence = &fence_array->base;
+
+ return out_fence;
+}
+
+static struct sync_file *
+eb_fences_add(struct i915_execbuffer *eb, struct i915_request *rq,
+ struct dma_fence *in_fence, int out_fence_fd)
+{
+ struct sync_file *out_fence = NULL;
+ int err;
+
+ if (unlikely(eb->gem_context->syncobj)) {
+ struct dma_fence *fence;
+
+ fence = drm_syncobj_fence_get(eb->gem_context->syncobj);
+ err = i915_request_await_dma_fence(rq, fence);
+ dma_fence_put(fence);
+ if (err)
+ return ERR_PTR(err);
+ }
+
+ if (in_fence) {
+ if (eb->args->flags & I915_EXEC_FENCE_SUBMIT)
+ err = i915_request_await_execution(rq, in_fence);
+ else
+ err = i915_request_await_dma_fence(rq, in_fence);
+ if (err < 0)
+ return ERR_PTR(err);
+ }
+
+ if (eb->fences) {
+ err = await_fence_array(eb, rq);
+ if (err)
+ return ERR_PTR(err);
+ }
+
+ if (intel_context_is_parallel(eb->context)) {
+ out_fence = eb_composite_fence_create(eb, out_fence_fd);
+ if (IS_ERR(out_fence))
+ return ERR_PTR(-ENOMEM);
+ } else if (out_fence_fd != -1) {
+ out_fence = sync_file_create(&rq->fence);
+ if (!out_fence)
+ return ERR_PTR(-ENOMEM);
+ }
+
+ return out_fence;
+}
+
+static struct intel_context *
+eb_find_context(struct i915_execbuffer *eb, unsigned int context_number)
+{
+ struct intel_context *child;
+
+ if (likely(context_number == 0))
+ return eb->context;
+
+ for_each_child(eb->context, child)
+ if (!--context_number)
+ return child;
+
+ GEM_BUG_ON("Context not found");
+
+ return NULL;
+}
+
+static struct sync_file *
+eb_requests_create(struct i915_execbuffer *eb, struct dma_fence *in_fence,
+ int out_fence_fd)
+{
+ struct sync_file *out_fence = NULL;
+ unsigned int i;
+
+ for_each_batch_create_order(eb, i) {
+ /* Allocate a request for this batch buffer nice and early. */
+ eb->requests[i] = i915_request_create(eb_find_context(eb, i));
+ if (IS_ERR(eb->requests[i])) {
+ out_fence = ERR_PTR(PTR_ERR(eb->requests[i]));
+ eb->requests[i] = NULL;
+ return out_fence;
+ }
+
+ /*
+ * Only the first request added (committed to backend) has to
+ * take the in fences into account as all subsequent requests
+ * will have fences inserted inbetween them.
+ */
+ if (i + 1 == eb->num_batches) {
+ out_fence = eb_fences_add(eb, eb->requests[i],
+ in_fence, out_fence_fd);
+ if (IS_ERR(out_fence))
+ return out_fence;
+ }
+
+ /*
+ * Whilst this request exists, batch_obj will be on the
+ * active_list, and so will hold the active reference. Only when
+ * this request is retired will the batch_obj be moved onto
+ * the inactive_list and lose its active reference. Hence we do
+ * not need to explicitly hold another reference here.
+ */
+ eb->requests[i]->batch = eb->batches[i]->vma;
+ if (eb->batch_pool) {
+ GEM_BUG_ON(intel_context_is_parallel(eb->context));
+ intel_gt_buffer_pool_mark_active(eb->batch_pool,
+ eb->requests[i]);
+ }
+ }
+
+ return out_fence;
+}
+
static int
i915_gem_do_execbuffer(struct drm_device *dev,
struct drm_file *file,
@@ -2788,7 +3159,6 @@ i915_gem_do_execbuffer(struct drm_device *dev,
struct i915_execbuffer eb;
struct dma_fence *in_fence = NULL;
struct sync_file *out_fence = NULL;
- struct i915_vma *batch;
int out_fence_fd = -1;
int err;
@@ -2812,12 +3182,15 @@ i915_gem_do_execbuffer(struct drm_device *dev,
eb.buffer_count = args->buffer_count;
eb.batch_start_offset = args->batch_start_offset;
- eb.batch_len = args->batch_len;
eb.trampoline = NULL;
eb.fences = NULL;
eb.num_fences = 0;
+ memset(eb.requests, 0, sizeof(struct i915_request *) *
+ ARRAY_SIZE(eb.requests));
+ eb.composite_fence = NULL;
+
eb.batch_flags = 0;
if (args->flags & I915_EXEC_SECURE) {
if (GRAPHICS_VER(i915) >= 11)
@@ -2901,70 +3274,25 @@ i915_gem_do_execbuffer(struct drm_device *dev,
ww_acquire_done(&eb.ww.ctx);
- batch = eb.batch->vma;
-
- /* Allocate a request for this batch buffer nice and early. */
- eb.request = i915_request_create(eb.context);
- if (IS_ERR(eb.request)) {
- err = PTR_ERR(eb.request);
- goto err_vma;
- }
-
- if (unlikely(eb.gem_context->syncobj)) {
- struct dma_fence *fence;
-
- fence = drm_syncobj_fence_get(eb.gem_context->syncobj);
- err = i915_request_await_dma_fence(eb.request, fence);
- dma_fence_put(fence);
- if (err)
- goto err_ext;
- }
-
- if (in_fence) {
- if (args->flags & I915_EXEC_FENCE_SUBMIT)
- err = i915_request_await_execution(eb.request,
- in_fence);
- else
- err = i915_request_await_dma_fence(eb.request,
- in_fence);
- if (err < 0)
- goto err_request;
- }
-
- if (eb.fences) {
- err = await_fence_array(&eb);
- if (err)
- goto err_request;
- }
-
- if (out_fence_fd != -1) {
- out_fence = sync_file_create(&eb.request->fence);
- if (!out_fence) {
- err = -ENOMEM;
+ out_fence = eb_requests_create(&eb, in_fence, out_fence_fd);
+ if (IS_ERR(out_fence)) {
+ err = PTR_ERR(out_fence);
+ if (eb.requests[0])
goto err_request;
- }
+ else
+ goto err_vma;
}
- /*
- * Whilst this request exists, batch_obj will be on the
- * active_list, and so will hold the active reference. Only when this
- * request is retired will the the batch_obj be moved onto the
- * inactive_list and lose its active reference. Hence we do not need
- * to explicitly hold another reference here.
- */
- eb.request->batch = batch;
- if (eb.batch_pool)
- intel_gt_buffer_pool_mark_active(eb.batch_pool, eb.request);
-
- trace_i915_request_queue(eb.request, eb.batch_flags);
- err = eb_submit(&eb, batch);
+ err = eb_submit(&eb);
err_request:
- i915_request_get(eb.request);
- err = eb_request_add(&eb, err);
+ eb_requests_get(&eb);
+ err = eb_requests_add(&eb, err);
if (eb.fences)
- signal_fence_array(&eb);
+ signal_fence_array(&eb, eb.composite_fence ?
+ eb.composite_fence :
+ &eb.requests[0]->fence);
if (out_fence) {
if (err == 0) {
@@ -2979,10 +3307,15 @@ err_request:
if (unlikely(eb.gem_context->syncobj)) {
drm_syncobj_replace_fence(eb.gem_context->syncobj,
- &eb.request->fence);
+ eb.composite_fence ?
+ eb.composite_fence :
+ &eb.requests[0]->fence);
}
- i915_request_put(eb.request);
+ if (!out_fence && eb.composite_fence)
+ dma_fence_put(eb.composite_fence);
+
+ eb_requests_put(&eb);
err_vma:
eb_release_vmas(&eb, true);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_internal.c b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
index e5ae9c06510c..a57a6b7013c2 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_internal.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
@@ -134,6 +134,8 @@ static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj,
internal_free_pages(pages);
obj->mm.dirty = false;
+
+ __start_cpu_write(obj);
}
static const struct drm_i915_gem_object_ops i915_gem_object_internal_ops = {
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
index eb345305dc52..444f8268b9c5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
@@ -56,8 +56,8 @@ bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
* @obj: The object to check.
*
* This function is intended to be called from within the fence signaling
- * path where the fence keeps the object from being migrated. For example
- * during gpu reset or similar.
+ * path where the fence, or a pin, keeps the object from being migrated. For
+ * example during gpu reset or similar.
*
* Return: Whether the object is resident in lmem.
*/
@@ -66,7 +66,8 @@ bool __i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
struct intel_memory_region *mr = READ_ONCE(obj->mm.region);
#ifdef CONFIG_LOCKDEP
- GEM_WARN_ON(dma_resv_test_signaled(obj->base.resv, true));
+ GEM_WARN_ON(dma_resv_test_signaled(obj->base.resv, true) &&
+ i915_gem_object_evictable(obj));
#endif
return mr && (mr->type == INTEL_MEMORY_LOCAL ||
mr->type == INTEL_MEMORY_STOLEN_LOCAL);
@@ -104,6 +105,32 @@ __i915_gem_object_create_lmem_with_ps(struct drm_i915_private *i915,
}
struct drm_i915_gem_object *
+i915_gem_object_create_lmem_from_data(struct drm_i915_private *i915,
+ const void *data, size_t size)
+{
+ struct drm_i915_gem_object *obj;
+ void *map;
+
+ obj = i915_gem_object_create_lmem(i915,
+ round_up(size, PAGE_SIZE),
+ I915_BO_ALLOC_CONTIGUOUS);
+ if (IS_ERR(obj))
+ return obj;
+
+ map = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
+ if (IS_ERR(map)) {
+ i915_gem_object_put(obj);
+ return map;
+ }
+
+ memcpy(map, data, size);
+
+ i915_gem_object_unpin_map(obj);
+
+ return obj;
+}
+
+struct drm_i915_gem_object *
i915_gem_object_create_lmem(struct drm_i915_private *i915,
resource_size_t size,
unsigned int flags)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
index 4ee81fc66302..1b88ea13435c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
@@ -24,6 +24,10 @@ bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj);
bool __i915_gem_object_is_lmem(struct drm_i915_gem_object *obj);
struct drm_i915_gem_object *
+i915_gem_object_create_lmem_from_data(struct drm_i915_private *i915,
+ const void *data, size_t size);
+
+struct drm_i915_gem_object *
__i915_gem_object_create_lmem_with_ps(struct drm_i915_private *i915,
resource_size_t size,
resource_size_t page_size,
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index 5130e8ed9564..65fc6ff5f59d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -395,7 +395,7 @@ retry:
/* Track the mmo associated with the fenced vma */
vma->mmo = mmo;
- if (IS_ACTIVE(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND))
+ if (CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)
intel_wakeref_auto(&i915->ggtt.userfault_wakeref,
msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 6fb9afb65034..1e426a42a36c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -25,6 +25,7 @@
#include <linux/sched/mm.h>
#include "display/intel_frontbuffer.h"
+#include "pxp/intel_pxp.h"
#include "i915_drv.h"
#include "i915_gem_clflush.h"
#include "i915_gem_context.h"
@@ -90,6 +91,22 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
}
/**
+ * i915_gem_object_fini - Clean up a GEM object initialization
+ * @obj: The gem object to cleanup
+ *
+ * This function cleans up gem object fields that are set up by
+ * drm_gem_private_object_init() and i915_gem_object_init().
+ * It's primarily intended as a helper for backends that need to
+ * clean up the gem object in separate steps.
+ */
+void __i915_gem_object_fini(struct drm_i915_gem_object *obj)
+{
+ mutex_destroy(&obj->mm.get_page.lock);
+ mutex_destroy(&obj->mm.get_dma_page.lock);
+ dma_resv_fini(&obj->base._resv);
+}
+
+/**
* Mark up the object's coherency levels for a given cache_level
* @obj: #drm_i915_gem_object
* @cache_level: cache level
@@ -111,6 +128,32 @@ void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE);
}
+bool i915_gem_object_can_bypass_llc(struct drm_i915_gem_object *obj)
+{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+
+ /*
+ * This is purely from a security perspective, so we simply don't care
+ * about non-userspace objects being able to bypass the LLC.
+ */
+ if (!(obj->flags & I915_BO_ALLOC_USER))
+ return false;
+
+ /*
+ * EHL and JSL add the 'Bypass LLC' MOCS entry, which should make it
+ * possible for userspace to bypass the GTT caching bits set by the
+ * kernel, as per the given object cache_level. This is troublesome
+ * since the heavy flush we apply when first gathering the pages is
+ * skipped if the kernel thinks the object is coherent with the GPU. As
+ * a result it might be possible to bypass the cache and read the
+ * contents of the page directly, which could be stale data. If it's
+ * just a case of userspace shooting themselves in the foot then so be
+ * it, but since i915 takes the stance of always zeroing memory before
+ * handing it to userspace, we need to prevent this.
+ */
+ return IS_JSL_EHL(i915);
+}
+
static void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
{
struct drm_i915_gem_object *obj = to_intel_bo(gem);
@@ -174,7 +217,6 @@ void __i915_gem_free_object_rcu(struct rcu_head *head)
container_of(head, typeof(*obj), rcu);
struct drm_i915_private *i915 = to_i915(obj->base.dev);
- dma_resv_fini(&obj->base._resv);
i915_gem_object_free(obj);
GEM_BUG_ON(!atomic_read(&i915->mm.free_count));
@@ -204,10 +246,17 @@ static void __i915_gem_object_free_mmaps(struct drm_i915_gem_object *obj)
}
}
-void __i915_gem_free_object(struct drm_i915_gem_object *obj)
+/**
+ * __i915_gem_object_pages_fini - Clean up pages use of a gem object
+ * @obj: The gem object to clean up
+ *
+ * This function cleans up usage of the object mm.pages member. It
+ * is intended for backends that need to clean up a gem object in
+ * separate steps and needs to be called when the object is idle before
+ * the object's backing memory is freed.
+ */
+void __i915_gem_object_pages_fini(struct drm_i915_gem_object *obj)
{
- trace_i915_gem_object_destroy(obj);
-
if (!list_empty(&obj->vma.list)) {
struct i915_vma *vma;
@@ -233,11 +282,17 @@ void __i915_gem_free_object(struct drm_i915_gem_object *obj)
__i915_gem_object_free_mmaps(obj);
- GEM_BUG_ON(!list_empty(&obj->lut_list));
-
atomic_set(&obj->mm.pages_pin_count, 0);
__i915_gem_object_put_pages(obj);
GEM_BUG_ON(i915_gem_object_has_pages(obj));
+}
+
+void __i915_gem_free_object(struct drm_i915_gem_object *obj)
+{
+ trace_i915_gem_object_destroy(obj);
+
+ GEM_BUG_ON(!list_empty(&obj->lut_list));
+
bitmap_free(obj->bit_17);
if (obj->base.import_attach)
@@ -253,6 +308,8 @@ void __i915_gem_free_object(struct drm_i915_gem_object *obj)
if (obj->shares_resv_from)
i915_vm_resv_put(obj->shares_resv_from);
+
+ __i915_gem_object_fini(obj);
}
static void __i915_gem_free_objects(struct drm_i915_private *i915,
@@ -266,6 +323,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
obj->ops->delayed_free(obj);
continue;
}
+ __i915_gem_object_pages_fini(obj);
__i915_gem_free_object(obj);
/* But keep the pointer alive for RCU-protected lookups */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index 48112b9d76df..59201801cec5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -58,6 +58,9 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_object_ops *ops,
struct lock_class_key *key,
unsigned alloc_flags);
+
+void __i915_gem_object_fini(struct drm_i915_gem_object *obj);
+
struct drm_i915_gem_object *
i915_gem_object_create_shmem(struct drm_i915_private *i915,
resource_size_t size);
@@ -270,6 +273,12 @@ i915_gem_object_clear_tiling_quirk(struct drm_i915_gem_object *obj)
}
static inline bool
+i915_gem_object_is_protected(const struct drm_i915_gem_object *obj)
+{
+ return obj->flags & I915_BO_PROTECTED;
+}
+
+static inline bool
i915_gem_object_type_has(const struct drm_i915_gem_object *obj,
unsigned long flags)
{
@@ -503,25 +512,9 @@ i915_gem_object_finish_access(struct drm_i915_gem_object *obj)
i915_gem_object_unpin_pages(obj);
}
-static inline struct intel_engine_cs *
-i915_gem_object_last_write_engine(struct drm_i915_gem_object *obj)
-{
- struct intel_engine_cs *engine = NULL;
- struct dma_fence *fence;
-
- rcu_read_lock();
- fence = dma_resv_get_excl_unlocked(obj->base.resv);
- rcu_read_unlock();
-
- if (fence && dma_fence_is_i915(fence) && !dma_fence_is_signaled(fence))
- engine = to_request(fence)->engine;
- dma_fence_put(fence);
-
- return engine;
-}
-
void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
unsigned int cache_level);
+bool i915_gem_object_can_bypass_llc(struct drm_i915_gem_object *obj);
void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj);
void i915_gem_object_flush_if_display_locked(struct drm_i915_gem_object *obj);
@@ -599,6 +592,8 @@ bool i915_gem_object_is_shmem(const struct drm_i915_gem_object *obj);
void __i915_gem_free_object_rcu(struct rcu_head *head);
+void __i915_gem_object_pages_fini(struct drm_i915_gem_object *obj);
+
void __i915_gem_free_object(struct drm_i915_gem_object *obj);
bool i915_gem_object_evictable(struct drm_i915_gem_object *obj);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 2471f36aaff3..da85169006d4 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -288,17 +288,23 @@ struct drm_i915_gem_object {
I915_SELFTEST_DECLARE(struct list_head st_link);
unsigned long flags;
-#define I915_BO_ALLOC_CONTIGUOUS BIT(0)
-#define I915_BO_ALLOC_VOLATILE BIT(1)
-#define I915_BO_ALLOC_CPU_CLEAR BIT(2)
-#define I915_BO_ALLOC_USER BIT(3)
+#define I915_BO_ALLOC_CONTIGUOUS BIT(0)
+#define I915_BO_ALLOC_VOLATILE BIT(1)
+#define I915_BO_ALLOC_CPU_CLEAR BIT(2)
+#define I915_BO_ALLOC_USER BIT(3)
+/* Object is allowed to lose its contents on suspend / resume, even if pinned */
+#define I915_BO_ALLOC_PM_VOLATILE BIT(4)
+/* Object needs to be restored early using memcpy during resume */
+#define I915_BO_ALLOC_PM_EARLY BIT(5)
#define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | \
I915_BO_ALLOC_VOLATILE | \
I915_BO_ALLOC_CPU_CLEAR | \
- I915_BO_ALLOC_USER)
-#define I915_BO_READONLY BIT(4)
-#define I915_TILING_QUIRK_BIT 5 /* unknown swizzling; do not release! */
-
+ I915_BO_ALLOC_USER | \
+ I915_BO_ALLOC_PM_VOLATILE | \
+ I915_BO_ALLOC_PM_EARLY)
+#define I915_BO_READONLY BIT(6)
+#define I915_TILING_QUIRK_BIT 7 /* unknown swizzling; do not release! */
+#define I915_BO_PROTECTED BIT(8)
/**
* @mem_flags - Mutable placement-related flags
*
@@ -421,6 +427,33 @@ struct drm_i915_gem_object {
* can freely bypass the CPU cache when touching the pages with the GPU,
* where the kernel is completely unaware. On such platform we need
* apply the sledgehammer-on-acquire regardless of the @cache_coherent.
+ *
+ * Special care is taken on non-LLC platforms, to prevent potential
+ * information leak. The driver currently ensures:
+ *
+ * 1. All userspace objects, by default, have @cache_level set as
+ * I915_CACHE_NONE. The only exception is userptr objects, where we
+ * instead force I915_CACHE_LLC, but we also don't allow userspace to
+ * ever change the @cache_level for such objects. Another special case
+ * is dma-buf, which doesn't rely on @cache_dirty, but there we
+ * always do a forced flush when acquiring the pages, if there is a
+ * chance that the pages can be read directly from main memory with
+ * the GPU.
+ *
+ * 2. All I915_CACHE_NONE objects have @cache_dirty initially true.
+ *
+ * 3. All swapped-out objects(i.e shmem) have @cache_dirty set to
+ * true.
+ *
+ * 4. The @cache_dirty is never freely reset before the initial
+ * flush, even if userspace adjusts the @cache_level through the
+ * i915_gem_set_caching_ioctl.
+ *
+ * 5. All @cache_dirty objects(including swapped-in) are initially
+ * flushed with a synchronous call to drm_clflush_sg in
+ * __i915_gem_object_set_pages. The @cache_dirty can be freely reset
+ * at this point. All further asynchronous clfushes are never security
+ * critical, i.e userspace is free to race against itself.
*/
unsigned int cache_dirty:1;
@@ -534,9 +567,17 @@ struct drm_i915_gem_object {
struct {
struct sg_table *cached_io_st;
struct i915_gem_object_page_iter get_io_page;
+ struct drm_i915_gem_object *backup;
bool created:1;
} ttm;
+ /*
+ * Record which PXP key instance this object was created against (if
+ * any), so we can use it to determine if the encryption is valid by
+ * comparing against the current key instance.
+ */
+ u32 pxp_key_instance;
+
/** Record of address bit 17 of each page at last unbind. */
unsigned long *bit_17;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
index 8b9d7d14c4bd..726b40e1fbb0 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
@@ -5,6 +5,7 @@
*/
#include "gem/i915_gem_pm.h"
+#include "gem/i915_gem_ttm_pm.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
#include "gt/intel_gt_requests.h"
@@ -39,6 +40,88 @@ void i915_gem_suspend(struct drm_i915_private *i915)
i915_gem_drain_freed_objects(i915);
}
+static int lmem_restore(struct drm_i915_private *i915, u32 flags)
+{
+ struct intel_memory_region *mr;
+ int ret = 0, id;
+
+ for_each_memory_region(mr, i915, id) {
+ if (mr->type == INTEL_MEMORY_LOCAL) {
+ ret = i915_ttm_restore_region(mr, flags);
+ if (ret)
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int lmem_suspend(struct drm_i915_private *i915, u32 flags)
+{
+ struct intel_memory_region *mr;
+ int ret = 0, id;
+
+ for_each_memory_region(mr, i915, id) {
+ if (mr->type == INTEL_MEMORY_LOCAL) {
+ ret = i915_ttm_backup_region(mr, flags);
+ if (ret)
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static void lmem_recover(struct drm_i915_private *i915)
+{
+ struct intel_memory_region *mr;
+ int id;
+
+ for_each_memory_region(mr, i915, id)
+ if (mr->type == INTEL_MEMORY_LOCAL)
+ i915_ttm_recover_region(mr);
+}
+
+int i915_gem_backup_suspend(struct drm_i915_private *i915)
+{
+ int ret;
+
+ /* Opportunistically try to evict unpinned objects */
+ ret = lmem_suspend(i915, I915_TTM_BACKUP_ALLOW_GPU);
+ if (ret)
+ goto out_recover;
+
+ i915_gem_suspend(i915);
+
+ /*
+ * More objects may have become unpinned as requests were
+ * retired. Now try to evict again. The gt may be wedged here
+ * in which case we automatically fall back to memcpy.
+ * We allow also backing up pinned objects that have not been
+ * marked for early recover, and that may contain, for example,
+ * page-tables for the migrate context.
+ */
+ ret = lmem_suspend(i915, I915_TTM_BACKUP_ALLOW_GPU |
+ I915_TTM_BACKUP_PINNED);
+ if (ret)
+ goto out_recover;
+
+ /*
+ * Remaining objects are backed up using memcpy once we've stopped
+ * using the migrate context.
+ */
+ ret = lmem_suspend(i915, I915_TTM_BACKUP_PINNED);
+ if (ret)
+ goto out_recover;
+
+ return 0;
+
+out_recover:
+ lmem_recover(i915);
+
+ return ret;
+}
+
void i915_gem_suspend_late(struct drm_i915_private *i915)
{
struct drm_i915_gem_object *obj;
@@ -128,12 +211,20 @@ int i915_gem_freeze_late(struct drm_i915_private *i915)
void i915_gem_resume(struct drm_i915_private *i915)
{
+ int ret;
+
GEM_TRACE("%s\n", dev_name(i915->drm.dev));
+ ret = lmem_restore(i915, 0);
+ GEM_WARN_ON(ret);
+
/*
* As we didn't flush the kernel context before suspend, we cannot
* guarantee that the context image is complete. So let's just reset
* it and start again.
*/
intel_gt_resume(&i915->gt);
+
+ ret = lmem_restore(i915, I915_TTM_BACKUP_ALLOW_GPU);
+ GEM_WARN_ON(ret);
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.h b/drivers/gpu/drm/i915/gem/i915_gem_pm.h
index c9a66630e92e..bedf1e95941a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.h
@@ -18,6 +18,7 @@ void i915_gem_idle_work_handler(struct work_struct *work);
void i915_gem_suspend(struct drm_i915_private *i915);
void i915_gem_suspend_late(struct drm_i915_private *i915);
+int i915_gem_backup_suspend(struct drm_i915_private *i915);
int i915_gem_freeze(struct drm_i915_private *i915);
int i915_gem_freeze_late(struct drm_i915_private *i915);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.c b/drivers/gpu/drm/i915/gem/i915_gem_region.c
index 1f557b2178ed..a016ccec36f3 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_region.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_region.c
@@ -80,3 +80,73 @@ err_object_free:
i915_gem_object_free(obj);
return ERR_PTR(err);
}
+
+/**
+ * i915_gem_process_region - Iterate over all objects of a region using ops
+ * to process and optionally skip objects
+ * @mr: The memory region
+ * @apply: ops and private data
+ *
+ * This function can be used to iterate over the regions object list,
+ * checking whether to skip objects, and, if not, lock the objects and
+ * process them using the supplied ops. Note that this function temporarily
+ * removes objects from the region list while iterating, so that if run
+ * concurrently with itself may not iterate over all objects.
+ *
+ * Return: 0 if successful, negative error code on failure.
+ */
+int i915_gem_process_region(struct intel_memory_region *mr,
+ struct i915_gem_apply_to_region *apply)
+{
+ const struct i915_gem_apply_to_region_ops *ops = apply->ops;
+ struct drm_i915_gem_object *obj;
+ struct list_head still_in_list;
+ int ret = 0;
+
+ /*
+ * In the future, a non-NULL apply->ww could mean the caller is
+ * already in a locking transaction and provides its own context.
+ */
+ GEM_WARN_ON(apply->ww);
+
+ INIT_LIST_HEAD(&still_in_list);
+ mutex_lock(&mr->objects.lock);
+ for (;;) {
+ struct i915_gem_ww_ctx ww;
+
+ obj = list_first_entry_or_null(&mr->objects.list, typeof(*obj),
+ mm.region_link);
+ if (!obj)
+ break;
+
+ list_move_tail(&obj->mm.region_link, &still_in_list);
+ if (!kref_get_unless_zero(&obj->base.refcount))
+ continue;
+
+ /*
+ * Note: Someone else might be migrating the object at this
+ * point. The object's region is not stable until we lock
+ * the object.
+ */
+ mutex_unlock(&mr->objects.lock);
+ apply->ww = &ww;
+ for_i915_gem_ww(&ww, ret, apply->interruptible) {
+ ret = i915_gem_object_lock(obj, apply->ww);
+ if (ret)
+ continue;
+
+ if (obj->mm.region == mr)
+ ret = ops->process_obj(apply, obj);
+ /* Implicit object unlock */
+ }
+
+ i915_gem_object_put(obj);
+ mutex_lock(&mr->objects.lock);
+ if (ret)
+ break;
+ }
+ list_splice_tail(&still_in_list, &mr->objects.list);
+ mutex_unlock(&mr->objects.lock);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.h b/drivers/gpu/drm/i915/gem/i915_gem_region.h
index 1008e580a89a..fcaa12d657d4 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_region.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_region.h
@@ -12,6 +12,41 @@ struct intel_memory_region;
struct drm_i915_gem_object;
struct sg_table;
+struct i915_gem_apply_to_region;
+
+/**
+ * struct i915_gem_apply_to_region_ops - ops to use when iterating over all
+ * region objects.
+ */
+struct i915_gem_apply_to_region_ops {
+ /**
+ * process_obj - Process the current object
+ * @apply: Embed this for private data.
+ * @obj: The current object.
+ *
+ * Note that if this function is part of a ww transaction, and
+ * if returns -EDEADLK for one of the objects, it may be
+ * rerun for that same object in the same pass.
+ */
+ int (*process_obj)(struct i915_gem_apply_to_region *apply,
+ struct drm_i915_gem_object *obj);
+};
+
+/**
+ * struct i915_gem_apply_to_region - Argument to the struct
+ * i915_gem_apply_to_region_ops functions.
+ * @ops: The ops for the operation.
+ * @ww: Locking context used for the transaction.
+ * @interruptible: Whether to perform object locking interruptible.
+ *
+ * This structure is intended to be embedded in a private struct if needed
+ */
+struct i915_gem_apply_to_region {
+ const struct i915_gem_apply_to_region_ops *ops;
+ struct i915_gem_ww_ctx *ww;
+ u32 interruptible:1;
+};
+
void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
struct intel_memory_region *mem);
void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj);
@@ -22,4 +57,6 @@ i915_gem_object_create_region(struct intel_memory_region *mem,
resource_size_t page_size,
unsigned int flags);
+int i915_gem_process_region(struct intel_memory_region *mr,
+ struct i915_gem_apply_to_region *apply);
#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index 11f072193f3b..d77da59fae04 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -182,22 +182,7 @@ rebuild_st:
if (i915_gem_object_needs_bit17_swizzle(obj))
i915_gem_object_do_bit_17_swizzle(obj, st);
- /*
- * EHL and JSL add the 'Bypass LLC' MOCS entry, which should make it
- * possible for userspace to bypass the GTT caching bits set by the
- * kernel, as per the given object cache_level. This is troublesome
- * since the heavy flush we apply when first gathering the pages is
- * skipped if the kernel thinks the object is coherent with the GPU. As
- * a result it might be possible to bypass the cache and read the
- * contents of the page directly, which could be stale data. If it's
- * just a case of userspace shooting themselves in the foot then so be
- * it, but since i915 takes the stance of always zeroing memory before
- * handing it to userspace, we need to prevent this.
- *
- * By setting cache_dirty here we make the clflush in set_pages
- * unconditional on such platforms.
- */
- if (IS_JSL_EHL(i915) && obj->flags & I915_BO_ALLOC_USER)
+ if (i915_gem_object_can_bypass_llc(obj))
obj->cache_dirty = true;
__i915_gem_object_set_pages(obj, st, sg_page_sizes);
@@ -301,6 +286,8 @@ __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
struct sg_table *pages,
bool needs_clflush)
{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+
GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
if (obj->mm.madv == I915_MADV_DONTNEED)
@@ -312,6 +299,16 @@ __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
drm_clflush_sg(pages);
__start_cpu_write(obj);
+ /*
+ * On non-LLC platforms, force the flush-on-acquire if this is ever
+ * swapped-in. Our async flush path is not trust worthy enough yet(and
+ * happens in the wrong order), and with some tricks it's conceivable
+ * for userspace to change the cache-level to I915_CACHE_NONE after the
+ * pages are swapped-in, and since execbuf binds the object before doing
+ * the async flush, we have a race window.
+ */
+ if (!HAS_LLC(i915))
+ obj->cache_dirty = true;
}
void i915_gem_object_put_pages_shmem(struct drm_i915_gem_object *obj, struct sg_table *pages)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index 6ea13159bffc..74a1ffd0d7dd 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -10,18 +10,16 @@
#include "intel_memory_region.h"
#include "intel_region_ttm.h"
+#include "gem/i915_gem_mman.h"
#include "gem/i915_gem_object.h"
#include "gem/i915_gem_region.h"
#include "gem/i915_gem_ttm.h"
-#include "gem/i915_gem_mman.h"
+#include "gem/i915_gem_ttm_pm.h"
-#include "gt/intel_migrate.h"
-#include "gt/intel_engine_pm.h"
-#define I915_PL_LMEM0 TTM_PL_PRIV
-#define I915_PL_SYSTEM TTM_PL_SYSTEM
-#define I915_PL_STOLEN TTM_PL_VRAM
-#define I915_PL_GGTT TTM_PL_TT
+#include "gt/intel_engine_pm.h"
+#include "gt/intel_gt.h"
+#include "gt/intel_migrate.h"
#define I915_TTM_PRIO_PURGE 0
#define I915_TTM_PRIO_NO_PAGES 1
@@ -64,6 +62,20 @@ static struct ttm_placement i915_sys_placement = {
.busy_placement = &sys_placement_flags,
};
+/**
+ * i915_ttm_sys_placement - Return the struct ttm_placement to be
+ * used for an object in system memory.
+ *
+ * Rather than making the struct extern, use this
+ * function.
+ *
+ * Return: A pointer to a static variable for sys placement.
+ */
+struct ttm_placement *i915_ttm_sys_placement(void)
+{
+ return &i915_sys_placement;
+}
+
static int i915_ttm_err_to_gem(int err)
{
/* Fastpath */
@@ -182,7 +194,7 @@ static struct ttm_tt *i915_ttm_tt_create(struct ttm_buffer_object *bo,
if (obj->flags & I915_BO_ALLOC_CPU_CLEAR &&
man->use_tt)
- page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
+ page_flags |= TTM_TT_FLAG_ZERO_ALLOC;
ret = ttm_tt_init(&i915_tt->ttm, bo, page_flags,
i915_ttm_select_tt_caching(obj));
@@ -214,7 +226,6 @@ static void i915_ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
{
struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm);
- ttm_tt_destroy_common(bdev, ttm);
ttm_tt_fini(ttm);
kfree(i915_tt);
}
@@ -356,8 +367,10 @@ static void i915_ttm_delete_mem_notify(struct ttm_buffer_object *bo)
{
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
- if (likely(obj))
+ if (likely(obj)) {
+ __i915_gem_object_pages_fini(obj);
i915_ttm_free_cached_io_st(obj);
+ }
}
static struct intel_memory_region *
@@ -427,7 +440,9 @@ i915_ttm_resource_get_st(struct drm_i915_gem_object *obj,
}
static int i915_ttm_accel_move(struct ttm_buffer_object *bo,
+ bool clear,
struct ttm_resource *dst_mem,
+ struct ttm_tt *dst_ttm,
struct sg_table *dst_st)
{
struct drm_i915_private *i915 = container_of(bo->bdev, typeof(*i915),
@@ -437,21 +452,18 @@ static int i915_ttm_accel_move(struct ttm_buffer_object *bo,
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
struct sg_table *src_st;
struct i915_request *rq;
- struct ttm_tt *ttm = bo->ttm;
+ struct ttm_tt *src_ttm = bo->ttm;
enum i915_cache_level src_level, dst_level;
int ret;
- if (!i915->gt.migrate.context)
+ if (!i915->gt.migrate.context || intel_gt_is_wedged(&i915->gt))
return -EINVAL;
- dst_level = i915_ttm_cache_level(i915, dst_mem, ttm);
- if (!ttm || !ttm_tt_is_populated(ttm)) {
+ dst_level = i915_ttm_cache_level(i915, dst_mem, dst_ttm);
+ if (clear) {
if (bo->type == ttm_bo_type_kernel)
return -EINVAL;
- if (ttm && !(ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC))
- return 0;
-
intel_engine_pm_get(i915->gt.migrate.context->engine);
ret = intel_context_migrate_clear(i915->gt.migrate.context, NULL,
dst_st->sgl, dst_level,
@@ -464,10 +476,10 @@ static int i915_ttm_accel_move(struct ttm_buffer_object *bo,
}
intel_engine_pm_put(i915->gt.migrate.context->engine);
} else {
- src_st = src_man->use_tt ? i915_ttm_tt_get_st(ttm) :
+ src_st = src_man->use_tt ? i915_ttm_tt_get_st(src_ttm) :
obj->ttm.cached_io_st;
- src_level = i915_ttm_cache_level(i915, bo->resource, ttm);
+ src_level = i915_ttm_cache_level(i915, bo->resource, src_ttm);
intel_engine_pm_get(i915->gt.migrate.context->engine);
ret = intel_context_migrate_copy(i915->gt.migrate.context,
NULL, src_st->sgl, src_level,
@@ -485,6 +497,44 @@ static int i915_ttm_accel_move(struct ttm_buffer_object *bo,
return ret;
}
+static void __i915_ttm_move(struct ttm_buffer_object *bo, bool clear,
+ struct ttm_resource *dst_mem,
+ struct ttm_tt *dst_ttm,
+ struct sg_table *dst_st,
+ bool allow_accel)
+{
+ int ret = -EINVAL;
+
+ if (allow_accel)
+ ret = i915_ttm_accel_move(bo, clear, dst_mem, dst_ttm, dst_st);
+ if (ret) {
+ struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
+ struct intel_memory_region *dst_reg, *src_reg;
+ union {
+ struct ttm_kmap_iter_tt tt;
+ struct ttm_kmap_iter_iomap io;
+ } _dst_iter, _src_iter;
+ struct ttm_kmap_iter *dst_iter, *src_iter;
+
+ dst_reg = i915_ttm_region(bo->bdev, dst_mem->mem_type);
+ src_reg = i915_ttm_region(bo->bdev, bo->resource->mem_type);
+ GEM_BUG_ON(!dst_reg || !src_reg);
+
+ dst_iter = !cpu_maps_iomem(dst_mem) ?
+ ttm_kmap_iter_tt_init(&_dst_iter.tt, dst_ttm) :
+ ttm_kmap_iter_iomap_init(&_dst_iter.io, &dst_reg->iomap,
+ dst_st, dst_reg->region.start);
+
+ src_iter = !cpu_maps_iomem(bo->resource) ?
+ ttm_kmap_iter_tt_init(&_src_iter.tt, bo->ttm) :
+ ttm_kmap_iter_iomap_init(&_src_iter.io, &src_reg->iomap,
+ obj->ttm.cached_io_st,
+ src_reg->region.start);
+
+ ttm_move_memcpy(clear, dst_mem->num_pages, dst_iter, src_iter);
+ }
+}
+
static int i915_ttm_move(struct ttm_buffer_object *bo, bool evict,
struct ttm_operation_ctx *ctx,
struct ttm_resource *dst_mem,
@@ -493,19 +543,11 @@ static int i915_ttm_move(struct ttm_buffer_object *bo, bool evict,
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
struct ttm_resource_manager *dst_man =
ttm_manager_type(bo->bdev, dst_mem->mem_type);
- struct intel_memory_region *dst_reg, *src_reg;
- union {
- struct ttm_kmap_iter_tt tt;
- struct ttm_kmap_iter_iomap io;
- } _dst_iter, _src_iter;
- struct ttm_kmap_iter *dst_iter, *src_iter;
+ struct ttm_tt *ttm = bo->ttm;
struct sg_table *dst_st;
+ bool clear;
int ret;
- dst_reg = i915_ttm_region(bo->bdev, dst_mem->mem_type);
- src_reg = i915_ttm_region(bo->bdev, bo->resource->mem_type);
- GEM_BUG_ON(!dst_reg || !src_reg);
-
/* Sync for now. We could do the actual copy async. */
ret = ttm_bo_wait_ctx(bo, ctx);
if (ret)
@@ -522,9 +564,8 @@ static int i915_ttm_move(struct ttm_buffer_object *bo, bool evict,
}
/* Populate ttm with pages if needed. Typically system memory. */
- if (bo->ttm && (dst_man->use_tt ||
- (bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED))) {
- ret = ttm_tt_populate(bo->bdev, bo->ttm, ctx);
+ if (ttm && (dst_man->use_tt || (ttm->page_flags & TTM_TT_FLAG_SWAPPED))) {
+ ret = ttm_tt_populate(bo->bdev, ttm, ctx);
if (ret)
return ret;
}
@@ -533,23 +574,10 @@ static int i915_ttm_move(struct ttm_buffer_object *bo, bool evict,
if (IS_ERR(dst_st))
return PTR_ERR(dst_st);
- ret = i915_ttm_accel_move(bo, dst_mem, dst_st);
- if (ret) {
- /* If we start mapping GGTT, we can no longer use man::use_tt here. */
- dst_iter = !cpu_maps_iomem(dst_mem) ?
- ttm_kmap_iter_tt_init(&_dst_iter.tt, bo->ttm) :
- ttm_kmap_iter_iomap_init(&_dst_iter.io, &dst_reg->iomap,
- dst_st, dst_reg->region.start);
+ clear = !cpu_maps_iomem(bo->resource) && (!ttm || !ttm_tt_is_populated(ttm));
+ if (!(clear && ttm && !(ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC)))
+ __i915_ttm_move(bo, clear, dst_mem, bo->ttm, dst_st, true);
- src_iter = !cpu_maps_iomem(bo->resource) ?
- ttm_kmap_iter_tt_init(&_src_iter.tt, bo->ttm) :
- ttm_kmap_iter_iomap_init(&_src_iter.io, &src_reg->iomap,
- obj->ttm.cached_io_st,
- src_reg->region.start);
-
- ttm_move_memcpy(bo, dst_mem->num_pages, dst_iter, src_iter);
- }
- /* Below dst_mem becomes bo->resource. */
ttm_bo_move_sync_cleanup(bo, dst_mem);
i915_ttm_adjust_domains_after_move(obj);
i915_ttm_free_cached_io_st(obj);
@@ -787,12 +815,9 @@ static void i915_ttm_adjust_lru(struct drm_i915_gem_object *obj)
*/
static void i915_ttm_delayed_free(struct drm_i915_gem_object *obj)
{
- if (obj->ttm.created) {
- ttm_bo_put(i915_gem_to_ttm(obj));
- } else {
- __i915_gem_free_object(obj);
- call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
- }
+ GEM_BUG_ON(!obj->ttm.created);
+
+ ttm_bo_put(i915_gem_to_ttm(obj));
}
static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
@@ -872,14 +897,19 @@ void i915_ttm_bo_destroy(struct ttm_buffer_object *bo)
{
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
- /* This releases all gem object bindings to the backend. */
- __i915_gem_free_object(obj);
-
i915_gem_object_release_memory_region(obj);
mutex_destroy(&obj->ttm.get_io_page.lock);
- if (obj->ttm.created)
+ if (obj->ttm.created) {
+ i915_ttm_backup_free(obj);
+
+ /* This releases all gem object bindings to the backend. */
+ __i915_gem_free_object(obj);
+
call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
+ } else {
+ __i915_gem_object_fini(obj);
+ }
}
/**
@@ -908,7 +938,11 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
drm_gem_private_object_init(&i915->drm, &obj->base, size);
i915_gem_object_init(obj, &i915_gem_ttm_obj_ops, &lock_class, flags);
- i915_gem_object_init_memory_region(obj, mem);
+
+ /* Don't put on a region list until we're either locked or fully initialized. */
+ obj->mm.region = intel_memory_region_get(mem);
+ INIT_LIST_HEAD(&obj->mm.region_link);
+
i915_gem_object_make_unshrinkable(obj);
INIT_RADIX_TREE(&obj->ttm.get_io_page.radix, GFP_KERNEL | __GFP_NOWARN);
mutex_init(&obj->ttm.get_io_page.lock);
@@ -935,6 +969,8 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
return i915_ttm_err_to_gem(ret);
obj->ttm.created = true;
+ i915_gem_object_release_memory_region(obj);
+ i915_gem_object_init_memory_region(obj, mem);
i915_ttm_adjust_domains_after_move(obj);
i915_ttm_adjust_gem_after_move(obj);
i915_gem_object_unlock(obj);
@@ -963,3 +999,50 @@ i915_gem_ttm_system_setup(struct drm_i915_private *i915,
intel_memory_region_set_name(mr, "system-ttm");
return mr;
}
+
+/**
+ * i915_gem_obj_copy_ttm - Copy the contents of one ttm-based gem object to
+ * another
+ * @dst: The destination object
+ * @src: The source object
+ * @allow_accel: Allow using the blitter. Otherwise TTM memcpy is used.
+ * @intr: Whether to perform waits interruptible:
+ *
+ * Note: The caller is responsible for assuring that the underlying
+ * TTM objects are populated if needed and locked.
+ *
+ * Return: Zero on success. Negative error code on error. If @intr == true,
+ * then it may return -ERESTARTSYS or -EINTR.
+ */
+int i915_gem_obj_copy_ttm(struct drm_i915_gem_object *dst,
+ struct drm_i915_gem_object *src,
+ bool allow_accel, bool intr)
+{
+ struct ttm_buffer_object *dst_bo = i915_gem_to_ttm(dst);
+ struct ttm_buffer_object *src_bo = i915_gem_to_ttm(src);
+ struct ttm_operation_ctx ctx = {
+ .interruptible = intr,
+ };
+ struct sg_table *dst_st;
+ int ret;
+
+ assert_object_held(dst);
+ assert_object_held(src);
+
+ /*
+ * Sync for now. This will change with async moves.
+ */
+ ret = ttm_bo_wait_ctx(dst_bo, &ctx);
+ if (!ret)
+ ret = ttm_bo_wait_ctx(src_bo, &ctx);
+ if (ret)
+ return ret;
+
+ dst_st = gpu_binds_iomem(dst_bo->resource) ?
+ dst->ttm.cached_io_st : i915_ttm_tt_get_st(dst_bo->ttm);
+
+ __i915_ttm_move(src_bo, false, dst_bo->resource, dst_bo->ttm,
+ dst_st, allow_accel);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.h b/drivers/gpu/drm/i915/gem/i915_gem_ttm.h
index 40927f67b6d9..0b7291dd897c 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.h
@@ -46,4 +46,18 @@ int __i915_gem_ttm_object_init(struct intel_memory_region *mem,
resource_size_t size,
resource_size_t page_size,
unsigned int flags);
+
+int i915_gem_obj_copy_ttm(struct drm_i915_gem_object *dst,
+ struct drm_i915_gem_object *src,
+ bool allow_accel, bool intr);
+
+/* Internal I915 TTM declarations and definitions below. */
+
+#define I915_PL_LMEM0 TTM_PL_PRIV
+#define I915_PL_SYSTEM TTM_PL_SYSTEM
+#define I915_PL_STOLEN TTM_PL_VRAM
+#define I915_PL_GGTT TTM_PL_TT
+
+struct ttm_placement *i915_ttm_sys_placement(void);
+
#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c
new file mode 100644
index 000000000000..3b6d14b5c604
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.c
@@ -0,0 +1,206 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_tt.h>
+
+#include "i915_drv.h"
+#include "intel_memory_region.h"
+#include "intel_region_ttm.h"
+
+#include "gem/i915_gem_region.h"
+#include "gem/i915_gem_ttm.h"
+#include "gem/i915_gem_ttm_pm.h"
+
+/**
+ * i915_ttm_backup_free - Free any backup attached to this object
+ * @obj: The object whose backup is to be freed.
+ */
+void i915_ttm_backup_free(struct drm_i915_gem_object *obj)
+{
+ if (obj->ttm.backup) {
+ i915_gem_object_put(obj->ttm.backup);
+ obj->ttm.backup = NULL;
+ }
+}
+
+/**
+ * struct i915_gem_ttm_pm_apply - Apply-to-region subclass for restore
+ * @base: The i915_gem_apply_to_region we derive from.
+ * @allow_gpu: Whether using the gpu blitter is allowed.
+ * @backup_pinned: On backup, backup also pinned objects.
+ */
+struct i915_gem_ttm_pm_apply {
+ struct i915_gem_apply_to_region base;
+ bool allow_gpu : 1;
+ bool backup_pinned : 1;
+};
+
+static int i915_ttm_backup(struct i915_gem_apply_to_region *apply,
+ struct drm_i915_gem_object *obj)
+{
+ struct i915_gem_ttm_pm_apply *pm_apply =
+ container_of(apply, typeof(*pm_apply), base);
+ struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
+ struct ttm_buffer_object *backup_bo;
+ struct drm_i915_private *i915 =
+ container_of(bo->bdev, typeof(*i915), bdev);
+ struct drm_i915_gem_object *backup;
+ struct ttm_operation_ctx ctx = {};
+ int err = 0;
+
+ if (bo->resource->mem_type == I915_PL_SYSTEM || obj->ttm.backup)
+ return 0;
+
+ if (pm_apply->allow_gpu && i915_gem_object_evictable(obj))
+ return ttm_bo_validate(bo, i915_ttm_sys_placement(), &ctx);
+
+ if (!pm_apply->backup_pinned ||
+ (pm_apply->allow_gpu && (obj->flags & I915_BO_ALLOC_PM_EARLY)))
+ return 0;
+
+ if (obj->flags & I915_BO_ALLOC_PM_VOLATILE)
+ return 0;
+
+ backup = i915_gem_object_create_shmem(i915, obj->base.size);
+ if (IS_ERR(backup))
+ return PTR_ERR(backup);
+
+ err = i915_gem_object_lock(backup, apply->ww);
+ if (err)
+ goto out_no_lock;
+
+ backup_bo = i915_gem_to_ttm(backup);
+ err = ttm_tt_populate(backup_bo->bdev, backup_bo->ttm, &ctx);
+ if (err)
+ goto out_no_populate;
+
+ err = i915_gem_obj_copy_ttm(backup, obj, pm_apply->allow_gpu, false);
+ GEM_WARN_ON(err);
+
+ obj->ttm.backup = backup;
+ return 0;
+
+out_no_populate:
+ i915_gem_ww_unlock_single(backup);
+out_no_lock:
+ i915_gem_object_put(backup);
+
+ return err;
+}
+
+static int i915_ttm_recover(struct i915_gem_apply_to_region *apply,
+ struct drm_i915_gem_object *obj)
+{
+ i915_ttm_backup_free(obj);
+ return 0;
+}
+
+/**
+ * i915_ttm_recover_region - Free the backup of all objects of a region
+ * @mr: The memory region
+ *
+ * Checks all objects of a region if there is backup attached and if so
+ * frees that backup. Typically this is called to recover after a partially
+ * performed backup.
+ */
+void i915_ttm_recover_region(struct intel_memory_region *mr)
+{
+ static const struct i915_gem_apply_to_region_ops recover_ops = {
+ .process_obj = i915_ttm_recover,
+ };
+ struct i915_gem_apply_to_region apply = {.ops = &recover_ops};
+ int ret;
+
+ ret = i915_gem_process_region(mr, &apply);
+ GEM_WARN_ON(ret);
+}
+
+/**
+ * i915_ttm_backup_region - Back up all objects of a region to smem.
+ * @mr: The memory region
+ * @allow_gpu: Whether to allow the gpu blitter for this backup.
+ * @backup_pinned: Backup also pinned objects.
+ *
+ * Loops over all objects of a region and either evicts them if they are
+ * evictable or backs them up using a backup object if they are pinned.
+ *
+ * Return: Zero on success. Negative error code on error.
+ */
+int i915_ttm_backup_region(struct intel_memory_region *mr, u32 flags)
+{
+ static const struct i915_gem_apply_to_region_ops backup_ops = {
+ .process_obj = i915_ttm_backup,
+ };
+ struct i915_gem_ttm_pm_apply pm_apply = {
+ .base = {.ops = &backup_ops},
+ .allow_gpu = flags & I915_TTM_BACKUP_ALLOW_GPU,
+ .backup_pinned = flags & I915_TTM_BACKUP_PINNED,
+ };
+
+ return i915_gem_process_region(mr, &pm_apply.base);
+}
+
+static int i915_ttm_restore(struct i915_gem_apply_to_region *apply,
+ struct drm_i915_gem_object *obj)
+{
+ struct i915_gem_ttm_pm_apply *pm_apply =
+ container_of(apply, typeof(*pm_apply), base);
+ struct drm_i915_gem_object *backup = obj->ttm.backup;
+ struct ttm_buffer_object *backup_bo = i915_gem_to_ttm(backup);
+ struct ttm_operation_ctx ctx = {};
+ int err;
+
+ if (!backup)
+ return 0;
+
+ if (!pm_apply->allow_gpu && !(obj->flags & I915_BO_ALLOC_PM_EARLY))
+ return 0;
+
+ err = i915_gem_object_lock(backup, apply->ww);
+ if (err)
+ return err;
+
+ /* Content may have been swapped. */
+ err = ttm_tt_populate(backup_bo->bdev, backup_bo->ttm, &ctx);
+ if (!err) {
+ err = i915_gem_obj_copy_ttm(obj, backup, pm_apply->allow_gpu,
+ false);
+ GEM_WARN_ON(err);
+
+ obj->ttm.backup = NULL;
+ err = 0;
+ }
+
+ i915_gem_ww_unlock_single(backup);
+
+ if (!err)
+ i915_gem_object_put(backup);
+
+ return err;
+}
+
+/**
+ * i915_ttm_restore_region - Restore backed-up objects of a region from smem.
+ * @mr: The memory region
+ * @allow_gpu: Whether to allow the gpu blitter to recover.
+ *
+ * Loops over all objects of a region and if they are backed-up, restores
+ * them from smem.
+ *
+ * Return: Zero on success. Negative error code on error.
+ */
+int i915_ttm_restore_region(struct intel_memory_region *mr, u32 flags)
+{
+ static const struct i915_gem_apply_to_region_ops restore_ops = {
+ .process_obj = i915_ttm_restore,
+ };
+ struct i915_gem_ttm_pm_apply pm_apply = {
+ .base = {.ops = &restore_ops},
+ .allow_gpu = flags & I915_TTM_BACKUP_ALLOW_GPU,
+ };
+
+ return i915_gem_process_region(mr, &pm_apply.base);
+}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.h b/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.h
new file mode 100644
index 000000000000..25ed67a31571
--- /dev/null
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_pm.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef _I915_GEM_TTM_PM_H_
+#define _I915_GEM_TTM_PM_H_
+
+#include <linux/types.h>
+
+struct intel_memory_region;
+struct drm_i915_gem_object;
+
+#define I915_TTM_BACKUP_ALLOW_GPU BIT(0)
+#define I915_TTM_BACKUP_PINNED BIT(1)
+
+int i915_ttm_backup_region(struct intel_memory_region *mr, u32 flags);
+
+void i915_ttm_recover_region(struct intel_memory_region *mr);
+
+int i915_ttm_restore_region(struct intel_memory_region *mr, u32 flags);
+
+/* Internal I915 TTM functions below. */
+void i915_ttm_backup_free(struct drm_i915_gem_object *obj);
+
+#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 8ea0fa665e53..3173c9f9a040 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -165,8 +165,11 @@ alloc_table:
goto err;
}
- sg_page_sizes = i915_sg_dma_sizes(st->sgl);
+ WARN_ON_ONCE(!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE));
+ if (i915_gem_object_can_bypass_llc(obj))
+ obj->cache_dirty = true;
+ sg_page_sizes = i915_sg_dma_sizes(st->sgl);
__i915_gem_object_set_pages(obj, st, sg_page_sizes);
return 0;
@@ -546,7 +549,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
return -ENOMEM;
drm_gem_private_object_init(dev, &obj->base, args->user_size);
- i915_gem_object_init(obj, &i915_gem_userptr_ops, &lock_class, 0);
+ i915_gem_object_init(obj, &i915_gem_userptr_ops, &lock_class,
+ I915_BO_ALLOC_USER);
obj->mem_flags = I915_BO_FLAG_STRUCT_PAGE;
obj->read_domains = I915_GEM_DOMAIN_CPU;
obj->write_domain = I915_GEM_DOMAIN_CPU;
diff --git a/drivers/gpu/drm/i915/gem/i915_gemfs.c b/drivers/gpu/drm/i915/gem/i915_gemfs.c
index 5e6e8c91ab38..dbdbdc344d87 100644
--- a/drivers/gpu/drm/i915/gem/i915_gemfs.c
+++ b/drivers/gpu/drm/i915/gem/i915_gemfs.c
@@ -6,7 +6,6 @@
#include <linux/fs.h>
#include <linux/mount.h>
-#include <linux/pagemap.h>
#include "i915_drv.h"
#include "i915_gemfs.h"
@@ -15,6 +14,7 @@ int i915_gemfs_init(struct drm_i915_private *i915)
{
struct file_system_type *type;
struct vfsmount *gemfs;
+ char *opts;
type = get_fs_type("tmpfs");
if (!type)
@@ -26,10 +26,26 @@ int i915_gemfs_init(struct drm_i915_private *i915)
*
* One example, although it is probably better with a per-file
* control, is selecting huge page allocations ("huge=within_size").
- * Currently unused due to bandwidth issues (slow reads) on Broadwell+.
+ * However, we only do so to offset the overhead of iommu lookups
+ * due to bandwidth issues (slow reads) on Broadwell+.
*/
- gemfs = kern_mount(type);
+ opts = NULL;
+ if (intel_vtd_active()) {
+ if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
+ static char huge_opt[] = "huge=within_size"; /* r/w */
+
+ opts = huge_opt;
+ drm_info(&i915->drm,
+ "Transparent Hugepage mode '%s'\n",
+ opts);
+ } else {
+ drm_notice(&i915->drm,
+ "Transparent Hugepage support is recommended for optimal performance when IOMMU is enabled!\n");
+ }
+ }
+
+ gemfs = vfs_kern_mount(type, SB_KERNMOUNT, type->name, opts);
if (IS_ERR(gemfs))
return PTR_ERR(gemfs);
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index a094f3ce1a90..b2003133deaf 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -136,6 +136,8 @@ static void put_huge_pages(struct drm_i915_gem_object *obj,
huge_pages_free_pages(pages);
obj->mm.dirty = false;
+
+ __start_cpu_write(obj);
}
static const struct drm_i915_gem_object_ops huge_page_ops = {
@@ -152,6 +154,7 @@ huge_pages_object(struct drm_i915_private *i915,
{
static struct lock_class_key lock_class;
struct drm_i915_gem_object *obj;
+ unsigned int cache_level;
GEM_BUG_ON(!size);
GEM_BUG_ON(!IS_ALIGNED(size, BIT(__ffs(page_mask))));
@@ -173,7 +176,9 @@ huge_pages_object(struct drm_i915_private *i915,
obj->write_domain = I915_GEM_DOMAIN_CPU;
obj->read_domains = I915_GEM_DOMAIN_CPU;
- obj->cache_level = I915_CACHE_NONE;
+
+ cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
+ i915_gem_object_set_cache_coherency(obj, cache_level);
obj->mm.page_mask = page_mask;
@@ -1456,7 +1461,7 @@ static int igt_tmpfs_fallback(void *arg)
struct i915_gem_context *ctx = arg;
struct drm_i915_private *i915 = ctx->i915;
struct vfsmount *gemfs = i915->mm.gemfs;
- struct i915_address_space *vm = i915_gem_context_get_vm_rcu(ctx);
+ struct i915_address_space *vm = i915_gem_context_get_eb_vm(ctx);
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
u32 *vaddr;
@@ -1512,13 +1517,14 @@ static int igt_shrink_thp(void *arg)
{
struct i915_gem_context *ctx = arg;
struct drm_i915_private *i915 = ctx->i915;
- struct i915_address_space *vm = i915_gem_context_get_vm_rcu(ctx);
+ struct i915_address_space *vm = i915_gem_context_get_eb_vm(ctx);
struct drm_i915_gem_object *obj;
struct i915_gem_engines_iter it;
struct intel_context *ce;
struct i915_vma *vma;
unsigned int flags = PIN_USER;
unsigned int n;
+ bool should_swap;
int err = 0;
/*
@@ -1567,23 +1573,39 @@ static int igt_shrink_thp(void *arg)
break;
}
i915_gem_context_unlock_engines(ctx);
+ /*
+ * Nuke everything *before* we unpin the pages so we can be reasonably
+ * sure that when later checking get_nr_swap_pages() that some random
+ * leftover object doesn't steal the remaining swap space.
+ */
+ i915_gem_shrink(NULL, i915, -1UL, NULL,
+ I915_SHRINK_BOUND |
+ I915_SHRINK_UNBOUND |
+ I915_SHRINK_ACTIVE);
i915_vma_unpin(vma);
if (err)
goto out_put;
/*
- * Now that the pages are *unpinned* shrink-all should invoke
- * shmem to truncate our pages.
+ * Now that the pages are *unpinned* shrinking should invoke
+ * shmem to truncate our pages, if we have available swap.
*/
- i915_gem_shrink_all(i915);
- if (i915_gem_object_has_pages(obj)) {
- pr_err("shrink-all didn't truncate the pages\n");
+ should_swap = get_nr_swap_pages() > 0;
+ i915_gem_shrink(NULL, i915, -1UL, NULL,
+ I915_SHRINK_BOUND |
+ I915_SHRINK_UNBOUND |
+ I915_SHRINK_ACTIVE |
+ I915_SHRINK_WRITEBACK);
+ if (should_swap == i915_gem_object_has_pages(obj)) {
+ pr_err("unexpected pages mismatch, should_swap=%s\n",
+ yesno(should_swap));
err = -EINVAL;
goto out_put;
}
- if (obj->mm.page_sizes.sg || obj->mm.page_sizes.phys) {
- pr_err("residual page-size bits left\n");
+ if (should_swap == (obj->mm.page_sizes.sg || obj->mm.page_sizes.phys)) {
+ pr_err("unexpected residual page-size bits, should_swap=%s\n",
+ yesno(should_swap));
err = -EINVAL;
goto out_put;
}
@@ -1629,7 +1651,7 @@ int i915_gem_huge_page_mock_selftests(void)
mkwrite_device_info(dev_priv)->ppgtt_type = INTEL_PPGTT_FULL;
mkwrite_device_info(dev_priv)->ppgtt_size = 48;
- ppgtt = i915_ppgtt_create(&dev_priv->gt);
+ ppgtt = i915_ppgtt_create(&dev_priv->gt, 0);
if (IS_ERR(ppgtt)) {
err = PTR_ERR(ppgtt);
goto out_unlock;
@@ -1688,11 +1710,9 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915)
goto out_file;
}
- mutex_lock(&ctx->mutex);
- vm = i915_gem_context_vm(ctx);
+ vm = ctx->vm;
if (vm)
WRITE_ONCE(vm->scrub_64K, true);
- mutex_unlock(&ctx->mutex);
err = i915_subtests(tests, ctx);
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
index ecbcbb86ae1e..8402ed925a69 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c
@@ -17,13 +17,20 @@
#include "huge_gem_object.h"
#include "mock_context.h"
+enum client_tiling {
+ CLIENT_TILING_LINEAR,
+ CLIENT_TILING_X,
+ CLIENT_TILING_Y,
+ CLIENT_NUM_TILING_TYPES
+};
+
#define WIDTH 512
#define HEIGHT 32
struct blit_buffer {
struct i915_vma *vma;
u32 start_val;
- u32 tiling;
+ enum client_tiling tiling;
};
struct tiled_blits {
@@ -53,9 +60,9 @@ static int prepare_blit(const struct tiled_blits *t,
*cs++ = MI_LOAD_REGISTER_IMM(1);
*cs++ = i915_mmio_reg_offset(BCS_SWCTRL);
cmd = (BCS_SRC_Y | BCS_DST_Y) << 16;
- if (src->tiling == I915_TILING_Y)
+ if (src->tiling == CLIENT_TILING_Y)
cmd |= BCS_SRC_Y;
- if (dst->tiling == I915_TILING_Y)
+ if (dst->tiling == CLIENT_TILING_Y)
cmd |= BCS_DST_Y;
*cs++ = cmd;
@@ -172,7 +179,7 @@ static int tiled_blits_create_buffers(struct tiled_blits *t,
t->buffers[i].vma = vma;
t->buffers[i].tiling =
- i915_prandom_u32_max_state(I915_TILING_Y + 1, prng);
+ i915_prandom_u32_max_state(CLIENT_TILING_Y + 1, prng);
}
return 0;
@@ -197,17 +204,17 @@ static u64 swizzle_bit(unsigned int bit, u64 offset)
static u64 tiled_offset(const struct intel_gt *gt,
u64 v,
unsigned int stride,
- unsigned int tiling)
+ enum client_tiling tiling)
{
unsigned int swizzle;
u64 x, y;
- if (tiling == I915_TILING_NONE)
+ if (tiling == CLIENT_TILING_LINEAR)
return v;
y = div64_u64_rem(v, stride, &x);
- if (tiling == I915_TILING_X) {
+ if (tiling == CLIENT_TILING_X) {
v = div64_u64_rem(y, 8, &y) * stride * 8;
v += y * 512;
v += div64_u64_rem(x, 512, &x) << 12;
@@ -244,12 +251,12 @@ static u64 tiled_offset(const struct intel_gt *gt,
return v;
}
-static const char *repr_tiling(int tiling)
+static const char *repr_tiling(enum client_tiling tiling)
{
switch (tiling) {
- case I915_TILING_NONE: return "linear";
- case I915_TILING_X: return "X";
- case I915_TILING_Y: return "Y";
+ case CLIENT_TILING_LINEAR: return "linear";
+ case CLIENT_TILING_X: return "X";
+ case CLIENT_TILING_Y: return "Y";
default: return "unknown";
}
}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
index 8eb5050f8cb3..b32f7fed2d9c 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
@@ -27,12 +27,6 @@
#define DW_PER_PAGE (PAGE_SIZE / sizeof(u32))
-static inline struct i915_address_space *ctx_vm(struct i915_gem_context *ctx)
-{
- /* single threaded, private ctx */
- return rcu_dereference_protected(ctx->vm, true);
-}
-
static int live_nop_switch(void *arg)
{
const unsigned int nctx = 1024;
@@ -94,7 +88,7 @@ static int live_nop_switch(void *arg)
rq = i915_request_get(this);
i915_request_add(this);
}
- if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ if (i915_request_wait(rq, 0, HZ) < 0) {
pr_err("Failed to populated %d contexts\n", nctx);
intel_gt_set_wedged(&i915->gt);
i915_request_put(rq);
@@ -704,7 +698,7 @@ static int igt_ctx_exec(void *arg)
pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) [full-ppgtt? %s], err=%d\n",
ndwords, dw, max_dwords(obj),
engine->name,
- yesno(!!rcu_access_pointer(ctx->vm)),
+ yesno(i915_gem_context_has_full_ppgtt(ctx)),
err);
intel_context_put(ce);
kernel_context_close(ctx);
@@ -813,7 +807,7 @@ static int igt_shared_ctx_exec(void *arg)
struct i915_gem_context *ctx;
struct intel_context *ce;
- ctx = kernel_context(i915, ctx_vm(parent));
+ ctx = kernel_context(i915, parent->vm);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
goto out_test;
@@ -823,7 +817,7 @@ static int igt_shared_ctx_exec(void *arg)
GEM_BUG_ON(IS_ERR(ce));
if (!obj) {
- obj = create_test_object(ctx_vm(parent),
+ obj = create_test_object(parent->vm,
file, &objects);
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
@@ -838,7 +832,7 @@ static int igt_shared_ctx_exec(void *arg)
pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) [full-ppgtt? %s], err=%d\n",
ndwords, dw, max_dwords(obj),
engine->name,
- yesno(!!rcu_access_pointer(ctx->vm)),
+ yesno(i915_gem_context_has_full_ppgtt(ctx)),
err);
intel_context_put(ce);
kernel_context_close(ctx);
@@ -1380,7 +1374,7 @@ static int igt_ctx_readonly(void *arg)
goto out_file;
}
- vm = ctx_vm(ctx) ?: &i915->ggtt.alias->vm;
+ vm = ctx->vm ?: &i915->ggtt.alias->vm;
if (!vm || !vm->has_read_only) {
err = 0;
goto out_file;
@@ -1417,7 +1411,7 @@ static int igt_ctx_readonly(void *arg)
pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) [full-ppgtt? %s], err=%d\n",
ndwords, dw, max_dwords(obj),
ce->engine->name,
- yesno(!!ctx_vm(ctx)),
+ yesno(i915_gem_context_has_full_ppgtt(ctx)),
err);
i915_gem_context_unlock_engines(ctx);
goto out_file;
@@ -1499,7 +1493,7 @@ static int write_to_scratch(struct i915_gem_context *ctx,
GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE);
- err = check_scratch(ctx_vm(ctx), offset);
+ err = check_scratch(ctx->vm, offset);
if (err)
return err;
@@ -1528,7 +1522,7 @@ static int write_to_scratch(struct i915_gem_context *ctx,
intel_gt_chipset_flush(engine->gt);
- vm = i915_gem_context_get_vm_rcu(ctx);
+ vm = i915_gem_context_get_eb_vm(ctx);
vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
@@ -1596,7 +1590,7 @@ static int read_from_scratch(struct i915_gem_context *ctx,
GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE);
- err = check_scratch(ctx_vm(ctx), offset);
+ err = check_scratch(ctx->vm, offset);
if (err)
return err;
@@ -1607,7 +1601,7 @@ static int read_from_scratch(struct i915_gem_context *ctx,
if (GRAPHICS_VER(i915) >= 8) {
const u32 GPR0 = engine->mmio_base + 0x600;
- vm = i915_gem_context_get_vm_rcu(ctx);
+ vm = i915_gem_context_get_eb_vm(ctx);
vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
@@ -1739,7 +1733,7 @@ static int check_scratch_page(struct i915_gem_context *ctx, u32 *out)
u32 *vaddr;
int err = 0;
- vm = ctx_vm(ctx);
+ vm = ctx->vm;
if (!vm)
return -ENODEV;
@@ -1801,7 +1795,7 @@ static int igt_vm_isolation(void *arg)
}
/* We can only test vm isolation, if the vm are distinct */
- if (ctx_vm(ctx_a) == ctx_vm(ctx_b))
+ if (ctx_a->vm == ctx_b->vm)
goto out_file;
/* Read the initial state of the scratch page */
@@ -1813,8 +1807,8 @@ static int igt_vm_isolation(void *arg)
if (err)
goto out_file;
- vm_total = ctx_vm(ctx_a)->total;
- GEM_BUG_ON(ctx_vm(ctx_b)->total != vm_total);
+ vm_total = ctx_a->vm->total;
+ GEM_BUG_ON(ctx_b->vm->total != vm_total);
count = 0;
num_engines = 0;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c
deleted file mode 100644
index 16162fc2782d..000000000000
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c
+++ /dev/null
@@ -1,190 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2020 Intel Corporation
- */
-
-#include "i915_selftest.h"
-
-#include "gt/intel_engine_pm.h"
-#include "selftests/igt_flush_test.h"
-
-static u64 read_reloc(const u32 *map, int x, const u64 mask)
-{
- u64 reloc;
-
- memcpy(&reloc, &map[x], sizeof(reloc));
- return reloc & mask;
-}
-
-static int __igt_gpu_reloc(struct i915_execbuffer *eb,
- struct drm_i915_gem_object *obj)
-{
- const unsigned int offsets[] = { 8, 3, 0 };
- const u64 mask =
- GENMASK_ULL(eb->reloc_cache.use_64bit_reloc ? 63 : 31, 0);
- const u32 *map = page_mask_bits(obj->mm.mapping);
- struct i915_request *rq;
- struct i915_vma *vma;
- int err;
- int i;
-
- vma = i915_vma_instance(obj, eb->context->vm, NULL);
- if (IS_ERR(vma))
- return PTR_ERR(vma);
-
- err = i915_gem_object_lock(obj, &eb->ww);
- if (err)
- return err;
-
- err = i915_vma_pin_ww(vma, &eb->ww, 0, 0, PIN_USER | PIN_HIGH);
- if (err)
- return err;
-
- /* 8-Byte aligned */
- err = __reloc_entry_gpu(eb, vma, offsets[0] * sizeof(u32), 0);
- if (err <= 0)
- goto reloc_err;
-
- /* !8-Byte aligned */
- err = __reloc_entry_gpu(eb, vma, offsets[1] * sizeof(u32), 1);
- if (err <= 0)
- goto reloc_err;
-
- /* Skip to the end of the cmd page */
- i = PAGE_SIZE / sizeof(u32) - 1;
- i -= eb->reloc_cache.rq_size;
- memset32(eb->reloc_cache.rq_cmd + eb->reloc_cache.rq_size,
- MI_NOOP, i);
- eb->reloc_cache.rq_size += i;
-
- /* Force next batch */
- err = __reloc_entry_gpu(eb, vma, offsets[2] * sizeof(u32), 2);
- if (err <= 0)
- goto reloc_err;
-
- GEM_BUG_ON(!eb->reloc_cache.rq);
- rq = i915_request_get(eb->reloc_cache.rq);
- reloc_gpu_flush(eb, &eb->reloc_cache);
- GEM_BUG_ON(eb->reloc_cache.rq);
-
- err = i915_gem_object_wait(obj, I915_WAIT_INTERRUPTIBLE, HZ / 2);
- if (err) {
- intel_gt_set_wedged(eb->engine->gt);
- goto put_rq;
- }
-
- if (!i915_request_completed(rq)) {
- pr_err("%s: did not wait for relocations!\n", eb->engine->name);
- err = -EINVAL;
- goto put_rq;
- }
-
- for (i = 0; i < ARRAY_SIZE(offsets); i++) {
- u64 reloc = read_reloc(map, offsets[i], mask);
-
- if (reloc != i) {
- pr_err("%s[%d]: map[%d] %llx != %x\n",
- eb->engine->name, i, offsets[i], reloc, i);
- err = -EINVAL;
- }
- }
- if (err)
- igt_hexdump(map, 4096);
-
-put_rq:
- i915_request_put(rq);
-unpin_vma:
- i915_vma_unpin(vma);
- return err;
-
-reloc_err:
- if (!err)
- err = -EIO;
- goto unpin_vma;
-}
-
-static int igt_gpu_reloc(void *arg)
-{
- struct i915_execbuffer eb;
- struct drm_i915_gem_object *scratch;
- int err = 0;
- u32 *map;
-
- eb.i915 = arg;
-
- scratch = i915_gem_object_create_internal(eb.i915, 4096);
- if (IS_ERR(scratch))
- return PTR_ERR(scratch);
-
- map = i915_gem_object_pin_map_unlocked(scratch, I915_MAP_WC);
- if (IS_ERR(map)) {
- err = PTR_ERR(map);
- goto err_scratch;
- }
-
- intel_gt_pm_get(&eb.i915->gt);
-
- for_each_uabi_engine(eb.engine, eb.i915) {
- if (intel_engine_requires_cmd_parser(eb.engine) ||
- intel_engine_using_cmd_parser(eb.engine))
- continue;
-
- reloc_cache_init(&eb.reloc_cache, eb.i915);
- memset(map, POISON_INUSE, 4096);
-
- intel_engine_pm_get(eb.engine);
- eb.context = intel_context_create(eb.engine);
- if (IS_ERR(eb.context)) {
- err = PTR_ERR(eb.context);
- goto err_pm;
- }
- eb.reloc_pool = NULL;
- eb.reloc_context = NULL;
-
- i915_gem_ww_ctx_init(&eb.ww, false);
-retry:
- err = intel_context_pin_ww(eb.context, &eb.ww);
- if (!err) {
- err = __igt_gpu_reloc(&eb, scratch);
-
- intel_context_unpin(eb.context);
- }
- if (err == -EDEADLK) {
- err = i915_gem_ww_ctx_backoff(&eb.ww);
- if (!err)
- goto retry;
- }
- i915_gem_ww_ctx_fini(&eb.ww);
-
- if (eb.reloc_pool)
- intel_gt_buffer_pool_put(eb.reloc_pool);
- if (eb.reloc_context)
- intel_context_put(eb.reloc_context);
-
- intel_context_put(eb.context);
-err_pm:
- intel_engine_pm_put(eb.engine);
- if (err)
- break;
- }
-
- if (igt_flush_test(eb.i915))
- err = -EIO;
-
- intel_gt_pm_put(&eb.i915->gt);
-err_scratch:
- i915_gem_object_put(scratch);
- return err;
-}
-
-int i915_gem_execbuffer_live_selftests(struct drm_i915_private *i915)
-{
- static const struct i915_subtest tests[] = {
- SUBTEST(igt_gpu_reloc),
- };
-
- if (intel_gt_is_wedged(&i915->gt))
- return 0;
-
- return i915_live_subtests(tests, i915);
-}
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index a2c34e5a1c54..6d30cdfa80f3 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -903,7 +903,9 @@ static int __igt_mmap(struct drm_i915_private *i915,
pr_debug("igt_mmap(%s, %d) @ %lx\n", obj->mm.region->name, type, addr);
+ mmap_read_lock(current->mm);
area = vma_lookup(current->mm, addr);
+ mmap_read_unlock(current->mm);
if (!area) {
pr_err("%s: Did not create a vm_area_struct for the mmap\n",
obj->mm.region->name);
diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_context.c b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
index fee070df1c97..c0a8ef368044 100644
--- a/drivers/gpu/drm/i915/gem/selftests/mock_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
@@ -23,6 +23,7 @@ mock_context(struct drm_i915_private *i915,
kref_init(&ctx->ref);
INIT_LIST_HEAD(&ctx->link);
ctx->i915 = i915;
+ INIT_WORK(&ctx->release_work, i915_gem_context_release_work);
mutex_init(&ctx->mutex);
@@ -87,7 +88,7 @@ live_context(struct drm_i915_private *i915, struct file *file)
return ERR_CAST(pc);
ctx = i915_gem_create_context(i915, pc);
- proto_context_close(pc);
+ proto_context_close(i915, pc);
if (IS_ERR(ctx))
return ctx;
@@ -162,7 +163,7 @@ kernel_context(struct drm_i915_private *i915,
}
ctx = i915_gem_create_context(i915, pc);
- proto_context_close(pc);
+ proto_context_close(i915, pc);
if (IS_ERR(ctx))
return ctx;
diff --git a/drivers/gpu/drm/i915/gt/debugfs_engines.h b/drivers/gpu/drm/i915/gt/debugfs_engines.h
deleted file mode 100644
index f69257eaa1cc..000000000000
--- a/drivers/gpu/drm/i915/gt/debugfs_engines.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2019 Intel Corporation
- */
-
-#ifndef DEBUGFS_ENGINES_H
-#define DEBUGFS_ENGINES_H
-
-struct intel_gt;
-struct dentry;
-
-void debugfs_engines_register(struct intel_gt *gt, struct dentry *root);
-
-#endif /* DEBUGFS_ENGINES_H */
diff --git a/drivers/gpu/drm/i915/gt/debugfs_gt.c b/drivers/gpu/drm/i915/gt/debugfs_gt.c
deleted file mode 100644
index 591eb60785db..000000000000
--- a/drivers/gpu/drm/i915/gt/debugfs_gt.c
+++ /dev/null
@@ -1,47 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2019 Intel Corporation
- */
-
-#include <linux/debugfs.h>
-
-#include "debugfs_engines.h"
-#include "debugfs_gt.h"
-#include "debugfs_gt_pm.h"
-#include "intel_sseu_debugfs.h"
-#include "uc/intel_uc_debugfs.h"
-#include "i915_drv.h"
-
-void debugfs_gt_register(struct intel_gt *gt)
-{
- struct dentry *root;
-
- if (!gt->i915->drm.primary->debugfs_root)
- return;
-
- root = debugfs_create_dir("gt", gt->i915->drm.primary->debugfs_root);
- if (IS_ERR(root))
- return;
-
- debugfs_engines_register(gt, root);
- debugfs_gt_pm_register(gt, root);
- intel_sseu_debugfs_register(gt, root);
-
- intel_uc_debugfs_register(&gt->uc, root);
-}
-
-void intel_gt_debugfs_register_files(struct dentry *root,
- const struct debugfs_gt_file *files,
- unsigned long count, void *data)
-{
- while (count--) {
- umode_t mode = files->fops->write ? 0644 : 0444;
-
- if (!files->eval || files->eval(data))
- debugfs_create_file(files->name,
- mode, root, data,
- files->fops);
-
- files++;
- }
-}
diff --git a/drivers/gpu/drm/i915/gt/debugfs_gt_pm.h b/drivers/gpu/drm/i915/gt/debugfs_gt_pm.h
deleted file mode 100644
index 4cf5f5c9da7d..000000000000
--- a/drivers/gpu/drm/i915/gt/debugfs_gt_pm.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2019 Intel Corporation
- */
-
-#ifndef DEBUGFS_GT_PM_H
-#define DEBUGFS_GT_PM_H
-
-struct intel_gt;
-struct dentry;
-
-void debugfs_gt_pm_register(struct intel_gt *gt, struct dentry *root);
-
-#endif /* DEBUGFS_GT_PM_H */
diff --git a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
index 1aee5e6b1b23..890191f286e3 100644
--- a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
@@ -429,7 +429,7 @@ struct i915_ppgtt *gen6_ppgtt_create(struct intel_gt *gt)
mutex_init(&ppgtt->flush);
mutex_init(&ppgtt->pin_mutex);
- ppgtt_init(&ppgtt->base, gt);
+ ppgtt_init(&ppgtt->base, gt, 0);
ppgtt->base.vm.pd_shift = ilog2(SZ_4K * SZ_4K / sizeof(gen6_pte_t));
ppgtt->base.vm.top = 1;
diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
index 6e0e52eeb87a..037a9a6e4889 100644
--- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
@@ -548,6 +548,7 @@ static void gen8_ppgtt_insert_huge(struct i915_vma *vma,
I915_GTT_PAGE_SIZE_2M)))) {
vaddr = px_vaddr(pd);
vaddr[maybe_64K] |= GEN8_PDE_IPS_64K;
+ clflush_cache_range(vaddr, PAGE_SIZE);
page_size = I915_GTT_PAGE_SIZE_64K;
/*
@@ -568,6 +569,7 @@ static void gen8_ppgtt_insert_huge(struct i915_vma *vma,
for (i = 1; i < index; i += 16)
memset64(vaddr + i, encode, 15);
+ clflush_cache_range(vaddr, PAGE_SIZE);
}
}
@@ -751,7 +753,8 @@ err_pd:
* space.
*
*/
-struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt)
+struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt,
+ unsigned long lmem_pt_obj_flags)
{
struct i915_ppgtt *ppgtt;
int err;
@@ -760,7 +763,7 @@ struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt)
if (!ppgtt)
return ERR_PTR(-ENOMEM);
- ppgtt_init(ppgtt, gt);
+ ppgtt_init(ppgtt, gt, lmem_pt_obj_flags);
ppgtt->vm.top = i915_vm_is_4lvl(&ppgtt->vm) ? 3 : 2;
ppgtt->vm.pd_shift = ilog2(SZ_4K * SZ_4K / sizeof(gen8_pte_t));
diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.h b/drivers/gpu/drm/i915/gt/gen8_ppgtt.h
index b9028c2ad3c7..f541d19264b4 100644
--- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.h
+++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.h
@@ -12,7 +12,9 @@ struct i915_address_space;
struct intel_gt;
enum i915_cache_level;
-struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt);
+struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt,
+ unsigned long lmem_pt_obj_flags);
+
u64 gen8_ggtt_pte_encode(dma_addr_t addr,
enum i915_cache_level level,
u32 flags);
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index 17ca4dc4d0cb..5634d14052bc 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -240,6 +240,8 @@ int __intel_context_do_pin_ww(struct intel_context *ce,
if (err)
goto err_post_unpin;
+ intel_engine_pm_might_get(ce->engine);
+
if (unlikely(intel_context_is_closed(ce))) {
err = -ENOENT;
goto err_unlock;
@@ -395,19 +397,22 @@ intel_context_init(struct intel_context *ce, struct intel_engine_cs *engine)
spin_lock_init(&ce->guc_state.lock);
INIT_LIST_HEAD(&ce->guc_state.fences);
+ INIT_LIST_HEAD(&ce->guc_state.requests);
+
+ ce->guc_id.id = GUC_INVALID_LRC_ID;
+ INIT_LIST_HEAD(&ce->guc_id.link);
- spin_lock_init(&ce->guc_active.lock);
- INIT_LIST_HEAD(&ce->guc_active.requests);
+ INIT_LIST_HEAD(&ce->destroyed_link);
- ce->guc_id = GUC_INVALID_LRC_ID;
- INIT_LIST_HEAD(&ce->guc_id_link);
+ INIT_LIST_HEAD(&ce->parallel.child_list);
/*
* Initialize fence to be complete as this is expected to be complete
* unless there is a pending schedule disable outstanding.
*/
- i915_sw_fence_init(&ce->guc_blocked, sw_fence_dummy_notify);
- i915_sw_fence_commit(&ce->guc_blocked);
+ i915_sw_fence_init(&ce->guc_state.blocked,
+ sw_fence_dummy_notify);
+ i915_sw_fence_commit(&ce->guc_state.blocked);
i915_active_init(&ce->active,
__intel_context_active, __intel_context_retire, 0);
@@ -415,13 +420,20 @@ intel_context_init(struct intel_context *ce, struct intel_engine_cs *engine)
void intel_context_fini(struct intel_context *ce)
{
+ struct intel_context *child, *next;
+
if (ce->timeline)
intel_timeline_put(ce->timeline);
i915_vm_put(ce->vm);
+ /* Need to put the creation ref for the children */
+ if (intel_context_is_parent(ce))
+ for_each_child_safe(ce, child, next)
+ intel_context_put(child);
+
mutex_destroy(&ce->pin_mutex);
i915_active_fini(&ce->active);
- i915_sw_fence_fini(&ce->guc_blocked);
+ i915_sw_fence_fini(&ce->guc_state.blocked);
}
void i915_context_module_exit(void)
@@ -517,24 +529,53 @@ retry:
struct i915_request *intel_context_find_active_request(struct intel_context *ce)
{
+ struct intel_context *parent = intel_context_to_parent(ce);
struct i915_request *rq, *active = NULL;
unsigned long flags;
GEM_BUG_ON(!intel_engine_uses_guc(ce->engine));
- spin_lock_irqsave(&ce->guc_active.lock, flags);
- list_for_each_entry_reverse(rq, &ce->guc_active.requests,
+ /*
+ * We search the parent list to find an active request on the submitted
+ * context. The parent list contains the requests for all the contexts
+ * in the relationship so we have to do a compare of each request's
+ * context.
+ */
+ spin_lock_irqsave(&parent->guc_state.lock, flags);
+ list_for_each_entry_reverse(rq, &parent->guc_state.requests,
sched.link) {
+ if (rq->context != ce)
+ continue;
if (i915_request_completed(rq))
break;
active = rq;
}
- spin_unlock_irqrestore(&ce->guc_active.lock, flags);
+ spin_unlock_irqrestore(&parent->guc_state.lock, flags);
return active;
}
+void intel_context_bind_parent_child(struct intel_context *parent,
+ struct intel_context *child)
+{
+ /*
+ * Callers responsibility to validate that this function is used
+ * correctly but we use GEM_BUG_ON here ensure that they do.
+ */
+ GEM_BUG_ON(!intel_engine_uses_guc(parent->engine));
+ GEM_BUG_ON(intel_context_is_pinned(parent));
+ GEM_BUG_ON(intel_context_is_child(parent));
+ GEM_BUG_ON(intel_context_is_pinned(child));
+ GEM_BUG_ON(intel_context_is_child(child));
+ GEM_BUG_ON(intel_context_is_parent(child));
+
+ parent->parallel.child_index = parent->parallel.number_children++;
+ list_add_tail(&child->parallel.child_link,
+ &parent->parallel.child_list);
+ child->parallel.parent = parent;
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftest_context.c"
#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
index c41098950746..246c37d72cd7 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.h
+++ b/drivers/gpu/drm/i915/gt/intel_context.h
@@ -44,6 +44,54 @@ void intel_context_free(struct intel_context *ce);
int intel_context_reconfigure_sseu(struct intel_context *ce,
const struct intel_sseu sseu);
+#define PARENT_SCRATCH_SIZE PAGE_SIZE
+
+static inline bool intel_context_is_child(struct intel_context *ce)
+{
+ return !!ce->parallel.parent;
+}
+
+static inline bool intel_context_is_parent(struct intel_context *ce)
+{
+ return !!ce->parallel.number_children;
+}
+
+static inline bool intel_context_is_pinned(struct intel_context *ce);
+
+static inline struct intel_context *
+intel_context_to_parent(struct intel_context *ce)
+{
+ if (intel_context_is_child(ce)) {
+ /*
+ * The parent holds ref count to the child so it is always safe
+ * for the parent to access the child, but the child has a
+ * pointer to the parent without a ref. To ensure this is safe
+ * the child should only access the parent pointer while the
+ * parent is pinned.
+ */
+ GEM_BUG_ON(!intel_context_is_pinned(ce->parallel.parent));
+
+ return ce->parallel.parent;
+ } else {
+ return ce;
+ }
+}
+
+static inline bool intel_context_is_parallel(struct intel_context *ce)
+{
+ return intel_context_is_child(ce) || intel_context_is_parent(ce);
+}
+
+void intel_context_bind_parent_child(struct intel_context *parent,
+ struct intel_context *child);
+
+#define for_each_child(parent, ce)\
+ list_for_each_entry(ce, &(parent)->parallel.child_list,\
+ parallel.child_link)
+#define for_each_child_safe(parent, ce, cn)\
+ list_for_each_entry_safe(ce, cn, &(parent)->parallel.child_list,\
+ parallel.child_link)
+
/**
* intel_context_lock_pinned - Stablises the 'pinned' status of the HW context
* @ce - the context
@@ -193,7 +241,13 @@ intel_context_timeline_lock(struct intel_context *ce)
struct intel_timeline *tl = ce->timeline;
int err;
- err = mutex_lock_interruptible(&tl->mutex);
+ if (intel_context_is_parent(ce))
+ err = mutex_lock_interruptible_nested(&tl->mutex, 0);
+ else if (intel_context_is_child(ce))
+ err = mutex_lock_interruptible_nested(&tl->mutex,
+ ce->parallel.child_index + 1);
+ else
+ err = mutex_lock_interruptible(&tl->mutex);
if (err)
return ERR_PTR(err);
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index e54351a170e2..9e0177dc5484 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -55,9 +55,13 @@ struct intel_context_ops {
void (*reset)(struct intel_context *ce);
void (*destroy)(struct kref *kref);
- /* virtual engine/context interface */
+ /* virtual/parallel engine/context interface */
struct intel_context *(*create_virtual)(struct intel_engine_cs **engine,
- unsigned int count);
+ unsigned int count,
+ unsigned long flags);
+ struct intel_context *(*create_parallel)(struct intel_engine_cs **engines,
+ unsigned int num_siblings,
+ unsigned int width);
struct intel_engine_cs *(*get_sibling)(struct intel_engine_cs *engine,
unsigned int sibling);
};
@@ -112,6 +116,8 @@ struct intel_context {
#define CONTEXT_FORCE_SINGLE_SUBMISSION 7
#define CONTEXT_NOPREEMPT 8
#define CONTEXT_LRCA_DIRTY 9
+#define CONTEXT_GUC_INIT 10
+#define CONTEXT_PERMA_PIN 11
struct {
u64 timeout_us;
@@ -152,52 +158,141 @@ struct intel_context {
/** sseu: Control eu/slice partitioning */
struct intel_sseu sseu;
+ /**
+ * pinned_contexts_link: List link for the engine's pinned contexts.
+ * This is only used if this is a perma-pinned kernel context and
+ * the list is assumed to only be manipulated during driver load
+ * or unload time so no mutex protection currently.
+ */
+ struct list_head pinned_contexts_link;
+
u8 wa_bb_page; /* if set, page num reserved for context workarounds */
struct {
- /** lock: protects everything in guc_state */
+ /** @lock: protects everything in guc_state */
spinlock_t lock;
/**
- * sched_state: scheduling state of this context using GuC
+ * @sched_state: scheduling state of this context using GuC
* submission
*/
- u16 sched_state;
+ u32 sched_state;
/*
- * fences: maintains of list of requests that have a submit
- * fence related to GuC submission
+ * @fences: maintains a list of requests that are currently
+ * being fenced until a GuC operation completes
*/
struct list_head fences;
+ /**
+ * @blocked: fence used to signal when the blocking of a
+ * context's submissions is complete.
+ */
+ struct i915_sw_fence blocked;
+ /** @number_committed_requests: number of committed requests */
+ int number_committed_requests;
+ /** @requests: list of active requests on this context */
+ struct list_head requests;
+ /** @prio: the context's current guc priority */
+ u8 prio;
+ /**
+ * @prio_count: a counter of the number requests in flight in
+ * each priority bucket
+ */
+ u32 prio_count[GUC_CLIENT_PRIORITY_NUM];
} guc_state;
struct {
- /** lock: protects everything in guc_active */
- spinlock_t lock;
- /** requests: active requests on this context */
- struct list_head requests;
- } guc_active;
-
- /* GuC scheduling state flags that do not require a lock. */
- atomic_t guc_sched_state_no_lock;
-
- /* GuC LRC descriptor ID */
- u16 guc_id;
+ /**
+ * @id: handle which is used to uniquely identify this context
+ * with the GuC, protected by guc->submission_state.lock
+ */
+ u16 id;
+ /**
+ * @ref: the number of references to the guc_id, when
+ * transitioning in and out of zero protected by
+ * guc->submission_state.lock
+ */
+ atomic_t ref;
+ /**
+ * @link: in guc->guc_id_list when the guc_id has no refs but is
+ * still valid, protected by guc->submission_state.lock
+ */
+ struct list_head link;
+ } guc_id;
- /* GuC LRC descriptor reference count */
- atomic_t guc_id_ref;
+ /**
+ * @destroyed_link: link in guc->submission_state.destroyed_contexts, in
+ * list when context is pending to be destroyed (deregistered with the
+ * GuC), protected by guc->submission_state.lock
+ */
+ struct list_head destroyed_link;
- /*
- * GuC ID link - in list when unpinned but guc_id still valid in GuC
+ /** @parallel: sub-structure for parallel submission members */
+ struct {
+ union {
+ /**
+ * @child_list: parent's list of children
+ * contexts, no protection as immutable after context
+ * creation
+ */
+ struct list_head child_list;
+ /**
+ * @child_link: child's link into parent's list of
+ * children
+ */
+ struct list_head child_link;
+ };
+ /** @parent: pointer to parent if child */
+ struct intel_context *parent;
+ /**
+ * @last_rq: last request submitted on a parallel context, used
+ * to insert submit fences between requests in the parallel
+ * context
+ */
+ struct i915_request *last_rq;
+ /**
+ * @fence_context: fence context composite fence when doing
+ * parallel submission
+ */
+ u64 fence_context;
+ /**
+ * @seqno: seqno for composite fence when doing parallel
+ * submission
+ */
+ u32 seqno;
+ /** @number_children: number of children if parent */
+ u8 number_children;
+ /** @child_index: index into child_list if child */
+ u8 child_index;
+ /** @guc: GuC specific members for parallel submission */
+ struct {
+ /** @wqi_head: head pointer in work queue */
+ u16 wqi_head;
+ /** @wqi_tail: tail pointer in work queue */
+ u16 wqi_tail;
+ /**
+ * @parent_page: page in context state (ce->state) used
+ * by parent for work queue, process descriptor
+ */
+ u8 parent_page;
+ } guc;
+ } parallel;
+
+#ifdef CONFIG_DRM_I915_SELFTEST
+ /**
+ * @drop_schedule_enable: Force drop of schedule enable G2H for selftest
*/
- struct list_head guc_id_link;
+ bool drop_schedule_enable;
- /* GuC context blocked fence */
- struct i915_sw_fence guc_blocked;
+ /**
+ * @drop_schedule_disable: Force drop of schedule disable G2H for
+ * selftest
+ */
+ bool drop_schedule_disable;
- /*
- * GuC priority management
+ /**
+ * @drop_deregister: Force drop of deregister G2H for selftest
*/
- u8 guc_prio;
- u32 guc_prio_count[GUC_CLIENT_PRIORITY_NUM];
+ bool drop_deregister;
+#endif
};
#endif /* __INTEL_CONTEXT_TYPES__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
index 87579affb952..08559ace0ada 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -2,6 +2,7 @@
#ifndef _INTEL_RINGBUFFER_H_
#define _INTEL_RINGBUFFER_H_
+#include <asm/cacheflush.h>
#include <drm/drm_util.h>
#include <linux/hashtable.h>
@@ -175,6 +176,8 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
#define I915_GEM_HWS_SEQNO 0x40
#define I915_GEM_HWS_SEQNO_ADDR (I915_GEM_HWS_SEQNO * sizeof(u32))
#define I915_GEM_HWS_MIGRATE (0x42 * sizeof(u32))
+#define I915_GEM_HWS_PXP 0x60
+#define I915_GEM_HWS_PXP_ADDR (I915_GEM_HWS_PXP * sizeof(u32))
#define I915_GEM_HWS_SCRATCH 0x80
#define I915_HWS_CSB_BUF0_INDEX 0x10
@@ -273,15 +276,25 @@ static inline bool intel_engine_uses_guc(const struct intel_engine_cs *engine)
static inline bool
intel_engine_has_preempt_reset(const struct intel_engine_cs *engine)
{
- if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT))
+ if (!CONFIG_DRM_I915_PREEMPT_TIMEOUT)
return false;
return intel_engine_has_preemption(engine);
}
+#define FORCE_VIRTUAL BIT(0)
struct intel_context *
intel_engine_create_virtual(struct intel_engine_cs **siblings,
- unsigned int count);
+ unsigned int count, unsigned long flags);
+
+static inline struct intel_context *
+intel_engine_create_parallel(struct intel_engine_cs **engines,
+ unsigned int num_engines,
+ unsigned int width)
+{
+ GEM_BUG_ON(!engines[0]->cops->create_parallel);
+ return engines[0]->cops->create_parallel(engines, num_engines, width);
+}
static inline bool
intel_virtual_engine_has_heartbeat(const struct intel_engine_cs *engine)
@@ -300,7 +313,7 @@ intel_virtual_engine_has_heartbeat(const struct intel_engine_cs *engine)
static inline bool
intel_engine_has_heartbeat(const struct intel_engine_cs *engine)
{
- if (!IS_ACTIVE(CONFIG_DRM_I915_HEARTBEAT_INTERVAL))
+ if (!CONFIG_DRM_I915_HEARTBEAT_INTERVAL)
return false;
if (intel_engine_is_virtual(engine))
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 0d9105a31d84..ff6753ccb129 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -290,7 +290,8 @@ static void nop_irq_handler(struct intel_engine_cs *engine, u16 iir)
GEM_DEBUG_WARN_ON(iir);
}
-static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
+static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id,
+ u8 logical_instance)
{
const struct engine_info *info = &intel_engines[id];
struct drm_i915_private *i915 = gt->i915;
@@ -320,6 +321,7 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
BUILD_BUG_ON(BITS_PER_TYPE(engine->mask) < I915_NUM_ENGINES);
+ INIT_LIST_HEAD(&engine->pinned_contexts_list);
engine->id = id;
engine->legacy_idx = INVALID_ENGINE;
engine->mask = BIT(id);
@@ -334,6 +336,7 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
engine->class = info->class;
engine->instance = info->instance;
+ engine->logical_mask = BIT(logical_instance);
__sprint_engine_name(engine);
engine->props.heartbeat_interval_ms =
@@ -398,7 +401,8 @@ static void __setup_engine_capabilities(struct intel_engine_cs *engine)
engine->uabi_capabilities |=
I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC;
} else if (engine->class == VIDEO_ENHANCEMENT_CLASS) {
- if (GRAPHICS_VER(i915) >= 9)
+ if (GRAPHICS_VER(i915) >= 9 &&
+ engine->gt->info.sfc_mask & BIT(engine->instance))
engine->uabi_capabilities |=
I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC;
}
@@ -474,18 +478,25 @@ void intel_engines_free(struct intel_gt *gt)
}
static
-bool gen11_vdbox_has_sfc(struct drm_i915_private *i915,
+bool gen11_vdbox_has_sfc(struct intel_gt *gt,
unsigned int physical_vdbox,
unsigned int logical_vdbox, u16 vdbox_mask)
{
+ struct drm_i915_private *i915 = gt->i915;
+
/*
* In Gen11, only even numbered logical VDBOXes are hooked
* up to an SFC (Scaler & Format Converter) unit.
* In Gen12, Even numbered physical instance always are connected
* to an SFC. Odd numbered physical instances have SFC only if
* previous even instance is fused off.
+ *
+ * Starting with Xe_HP, there's also a dedicated SFC_ENABLE field
+ * in the fuse register that tells us whether a specific SFC is present.
*/
- if (GRAPHICS_VER(i915) == 12)
+ if ((gt->info.sfc_mask & BIT(physical_vdbox / 2)) == 0)
+ return false;
+ else if (GRAPHICS_VER(i915) == 12)
return (physical_vdbox % 2 == 0) ||
!(BIT(physical_vdbox - 1) & vdbox_mask);
else if (GRAPHICS_VER(i915) == 11)
@@ -512,7 +523,7 @@ static intel_engine_mask_t init_engine_mask(struct intel_gt *gt)
struct intel_uncore *uncore = gt->uncore;
unsigned int logical_vdbox = 0;
unsigned int i;
- u32 media_fuse;
+ u32 media_fuse, fuse1;
u16 vdbox_mask;
u16 vebox_mask;
@@ -534,6 +545,13 @@ static intel_engine_mask_t init_engine_mask(struct intel_gt *gt)
vebox_mask = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
GEN11_GT_VEBOX_DISABLE_SHIFT;
+ if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
+ fuse1 = intel_uncore_read(uncore, HSW_PAVP_FUSE1);
+ gt->info.sfc_mask = REG_FIELD_GET(XEHP_SFC_ENABLE_MASK, fuse1);
+ } else {
+ gt->info.sfc_mask = ~0;
+ }
+
for (i = 0; i < I915_MAX_VCS; i++) {
if (!HAS_ENGINE(gt, _VCS(i))) {
vdbox_mask &= ~BIT(i);
@@ -546,7 +564,7 @@ static intel_engine_mask_t init_engine_mask(struct intel_gt *gt)
continue;
}
- if (gen11_vdbox_has_sfc(i915, i, logical_vdbox, vdbox_mask))
+ if (gen11_vdbox_has_sfc(gt, i, logical_vdbox, vdbox_mask))
gt->info.vdbox_sfc_access |= BIT(i);
logical_vdbox++;
}
@@ -572,6 +590,37 @@ static intel_engine_mask_t init_engine_mask(struct intel_gt *gt)
return info->engine_mask;
}
+static void populate_logical_ids(struct intel_gt *gt, u8 *logical_ids,
+ u8 class, const u8 *map, u8 num_instances)
+{
+ int i, j;
+ u8 current_logical_id = 0;
+
+ for (j = 0; j < num_instances; ++j) {
+ for (i = 0; i < ARRAY_SIZE(intel_engines); ++i) {
+ if (!HAS_ENGINE(gt, i) ||
+ intel_engines[i].class != class)
+ continue;
+
+ if (intel_engines[i].instance == map[j]) {
+ logical_ids[intel_engines[i].instance] =
+ current_logical_id++;
+ break;
+ }
+ }
+ }
+}
+
+static void setup_logical_ids(struct intel_gt *gt, u8 *logical_ids, u8 class)
+{
+ int i;
+ u8 map[MAX_ENGINE_INSTANCE + 1];
+
+ for (i = 0; i < MAX_ENGINE_INSTANCE + 1; ++i)
+ map[i] = i;
+ populate_logical_ids(gt, logical_ids, class, map, ARRAY_SIZE(map));
+}
+
/**
* intel_engines_init_mmio() - allocate and prepare the Engine Command Streamers
* @gt: pointer to struct intel_gt
@@ -583,7 +632,8 @@ int intel_engines_init_mmio(struct intel_gt *gt)
struct drm_i915_private *i915 = gt->i915;
const unsigned int engine_mask = init_engine_mask(gt);
unsigned int mask = 0;
- unsigned int i;
+ unsigned int i, class;
+ u8 logical_ids[MAX_ENGINE_INSTANCE + 1];
int err;
drm_WARN_ON(&i915->drm, engine_mask == 0);
@@ -593,15 +643,23 @@ int intel_engines_init_mmio(struct intel_gt *gt)
if (i915_inject_probe_failure(i915))
return -ENODEV;
- for (i = 0; i < ARRAY_SIZE(intel_engines); i++) {
- if (!HAS_ENGINE(gt, i))
- continue;
+ for (class = 0; class < MAX_ENGINE_CLASS + 1; ++class) {
+ setup_logical_ids(gt, logical_ids, class);
- err = intel_engine_setup(gt, i);
- if (err)
- goto cleanup;
+ for (i = 0; i < ARRAY_SIZE(intel_engines); ++i) {
+ u8 instance = intel_engines[i].instance;
- mask |= BIT(i);
+ if (intel_engines[i].class != class ||
+ !HAS_ENGINE(gt, i))
+ continue;
+
+ err = intel_engine_setup(gt, i,
+ logical_ids[instance]);
+ if (err)
+ goto cleanup;
+
+ mask |= BIT(i);
+ }
}
/*
@@ -875,6 +933,8 @@ intel_engine_create_pinned_context(struct intel_engine_cs *engine,
return ERR_PTR(err);
}
+ list_add_tail(&ce->pinned_contexts_link, &engine->pinned_contexts_list);
+
/*
* Give our perma-pinned kernel timelines a separate lockdep class,
* so that we can use them from within the normal user timelines
@@ -897,6 +957,7 @@ void intel_engine_destroy_pinned_context(struct intel_context *ce)
list_del(&ce->timeline->engine_link);
mutex_unlock(&hwsp->vm->mutex);
+ list_del(&ce->pinned_contexts_link);
intel_context_unpin(ce);
intel_context_put(ce);
}
@@ -1163,16 +1224,16 @@ void intel_engine_get_instdone(const struct intel_engine_cs *engine,
u32 mmio_base = engine->mmio_base;
int slice;
int subslice;
+ int iter;
memset(instdone, 0, sizeof(*instdone));
- switch (GRAPHICS_VER(i915)) {
- default:
+ if (GRAPHICS_VER(i915) >= 8) {
instdone->instdone =
intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
if (engine->id != RCS0)
- break;
+ return;
instdone->slice_common =
intel_uncore_read(uncore, GEN7_SC_INSTDONE);
@@ -1182,21 +1243,39 @@ void intel_engine_get_instdone(const struct intel_engine_cs *engine,
instdone->slice_common_extra[1] =
intel_uncore_read(uncore, GEN12_SC_INSTDONE_EXTRA2);
}
- for_each_instdone_slice_subslice(i915, sseu, slice, subslice) {
- instdone->sampler[slice][subslice] =
- read_subslice_reg(engine, slice, subslice,
- GEN7_SAMPLER_INSTDONE);
- instdone->row[slice][subslice] =
- read_subslice_reg(engine, slice, subslice,
- GEN7_ROW_INSTDONE);
+
+ if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
+ for_each_instdone_gslice_dss_xehp(i915, sseu, iter, slice, subslice) {
+ instdone->sampler[slice][subslice] =
+ read_subslice_reg(engine, slice, subslice,
+ GEN7_SAMPLER_INSTDONE);
+ instdone->row[slice][subslice] =
+ read_subslice_reg(engine, slice, subslice,
+ GEN7_ROW_INSTDONE);
+ }
+ } else {
+ for_each_instdone_slice_subslice(i915, sseu, slice, subslice) {
+ instdone->sampler[slice][subslice] =
+ read_subslice_reg(engine, slice, subslice,
+ GEN7_SAMPLER_INSTDONE);
+ instdone->row[slice][subslice] =
+ read_subslice_reg(engine, slice, subslice,
+ GEN7_ROW_INSTDONE);
+ }
}
- break;
- case 7:
+
+ if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) {
+ for_each_instdone_gslice_dss_xehp(i915, sseu, iter, slice, subslice)
+ instdone->geom_svg[slice][subslice] =
+ read_subslice_reg(engine, slice, subslice,
+ XEHPG_INSTDONE_GEOM_SVG);
+ }
+ } else if (GRAPHICS_VER(i915) >= 7) {
instdone->instdone =
intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
if (engine->id != RCS0)
- break;
+ return;
instdone->slice_common =
intel_uncore_read(uncore, GEN7_SC_INSTDONE);
@@ -1204,22 +1283,15 @@ void intel_engine_get_instdone(const struct intel_engine_cs *engine,
intel_uncore_read(uncore, GEN7_SAMPLER_INSTDONE);
instdone->row[0][0] =
intel_uncore_read(uncore, GEN7_ROW_INSTDONE);
-
- break;
- case 6:
- case 5:
- case 4:
+ } else if (GRAPHICS_VER(i915) >= 4) {
instdone->instdone =
intel_uncore_read(uncore, RING_INSTDONE(mmio_base));
if (engine->id == RCS0)
/* HACK: Using the wrong struct member */
instdone->slice_common =
intel_uncore_read(uncore, GEN4_INSTDONE1);
- break;
- case 3:
- case 2:
+ } else {
instdone->instdone = intel_uncore_read(uncore, GEN2_INSTDONE);
- break;
}
}
@@ -1881,16 +1953,16 @@ ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine, ktime_t *now)
struct intel_context *
intel_engine_create_virtual(struct intel_engine_cs **siblings,
- unsigned int count)
+ unsigned int count, unsigned long flags)
{
if (count == 0)
return ERR_PTR(-EINVAL);
- if (count == 1)
+ if (count == 1 && !(flags & FORCE_VIRTUAL))
return intel_context_create(siblings[0]);
GEM_BUG_ON(!siblings[0]->cops->create_virtual);
- return siblings[0]->cops->create_virtual(siblings, count);
+ return siblings[0]->cops->create_virtual(siblings, count, flags);
}
struct i915_request *
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
index 74775ae961b2..a3698f611f45 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
@@ -207,7 +207,7 @@ out:
void intel_engine_unpark_heartbeat(struct intel_engine_cs *engine)
{
- if (!IS_ACTIVE(CONFIG_DRM_I915_HEARTBEAT_INTERVAL))
+ if (!CONFIG_DRM_I915_HEARTBEAT_INTERVAL)
return;
next_heartbeat(engine);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index 1f07ac4e0672..a1334b48dde7 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -162,6 +162,19 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine)
unsigned long flags;
bool result = true;
+ /*
+ * This is execlist specific behaviour intended to ensure the GPU is
+ * idle by switching to a known 'safe' context. With GuC submission, the
+ * same idle guarantee is achieved by other means (disabling
+ * scheduling). Further, switching to a 'safe' context has no effect
+ * with GuC submission as the scheduler can just switch back again.
+ *
+ * FIXME: Move this backend scheduler specific behaviour into the
+ * scheduler backend.
+ */
+ if (intel_engine_uses_guc(engine))
+ return true;
+
/* GPU is pointing to the void, as good as in the kernel context. */
if (intel_gt_is_wedged(engine->gt))
return true;
@@ -298,6 +311,29 @@ void intel_engine_init__pm(struct intel_engine_cs *engine)
intel_engine_init_heartbeat(engine);
}
+/**
+ * intel_engine_reset_pinned_contexts - Reset the pinned contexts of
+ * an engine.
+ * @engine: The engine whose pinned contexts we want to reset.
+ *
+ * Typically the pinned context LMEM images lose or get their content
+ * corrupted on suspend. This function resets their images.
+ */
+void intel_engine_reset_pinned_contexts(struct intel_engine_cs *engine)
+{
+ struct intel_context *ce;
+
+ list_for_each_entry(ce, &engine->pinned_contexts_list,
+ pinned_contexts_link) {
+ /* kernel context gets reset at __engine_unpark() */
+ if (ce == engine->kernel_context)
+ continue;
+
+ dbg_poison_ce(ce);
+ ce->ops->reset(ce);
+ }
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftest_engine_pm.c"
#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.h b/drivers/gpu/drm/i915/gt/intel_engine_pm.h
index 70ea46d6cfb0..d68675925b79 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.h
@@ -6,9 +6,11 @@
#ifndef INTEL_ENGINE_PM_H
#define INTEL_ENGINE_PM_H
+#include "i915_drv.h"
#include "i915_request.h"
#include "intel_engine_types.h"
#include "intel_wakeref.h"
+#include "intel_gt_pm.h"
static inline bool
intel_engine_pm_is_awake(const struct intel_engine_cs *engine)
@@ -16,6 +18,11 @@ intel_engine_pm_is_awake(const struct intel_engine_cs *engine)
return intel_wakeref_is_active(&engine->wakeref);
}
+static inline void __intel_engine_pm_get(struct intel_engine_cs *engine)
+{
+ __intel_wakeref_get(&engine->wakeref);
+}
+
static inline void intel_engine_pm_get(struct intel_engine_cs *engine)
{
intel_wakeref_get(&engine->wakeref);
@@ -26,6 +33,21 @@ static inline bool intel_engine_pm_get_if_awake(struct intel_engine_cs *engine)
return intel_wakeref_get_if_active(&engine->wakeref);
}
+static inline void intel_engine_pm_might_get(struct intel_engine_cs *engine)
+{
+ if (!intel_engine_is_virtual(engine)) {
+ intel_wakeref_might_get(&engine->wakeref);
+ } else {
+ struct intel_gt *gt = engine->gt;
+ struct intel_engine_cs *tengine;
+ intel_engine_mask_t tmp, mask = engine->mask;
+
+ for_each_engine_masked(tengine, gt, mask, tmp)
+ intel_wakeref_might_get(&tengine->wakeref);
+ }
+ intel_gt_pm_might_get(engine->gt);
+}
+
static inline void intel_engine_pm_put(struct intel_engine_cs *engine)
{
intel_wakeref_put(&engine->wakeref);
@@ -47,6 +69,21 @@ static inline void intel_engine_pm_flush(struct intel_engine_cs *engine)
intel_wakeref_unlock_wait(&engine->wakeref);
}
+static inline void intel_engine_pm_might_put(struct intel_engine_cs *engine)
+{
+ if (!intel_engine_is_virtual(engine)) {
+ intel_wakeref_might_put(&engine->wakeref);
+ } else {
+ struct intel_gt *gt = engine->gt;
+ struct intel_engine_cs *tengine;
+ intel_engine_mask_t tmp, mask = engine->mask;
+
+ for_each_engine_masked(tengine, gt, mask, tmp)
+ intel_wakeref_might_put(&tengine->wakeref);
+ }
+ intel_gt_pm_might_put(engine->gt);
+}
+
static inline struct i915_request *
intel_engine_create_kernel_request(struct intel_engine_cs *engine)
{
@@ -69,4 +106,6 @@ intel_engine_create_kernel_request(struct intel_engine_cs *engine)
void intel_engine_init__pm(struct intel_engine_cs *engine);
+void intel_engine_reset_pinned_contexts(struct intel_engine_cs *engine);
+
#endif /* INTEL_ENGINE_PM_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index ed91bcff20eb..e0f773585c29 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -67,8 +67,11 @@ struct intel_instdone {
/* The following exist only in the RCS engine */
u32 slice_common;
u32 slice_common_extra[2];
- u32 sampler[I915_MAX_SLICES][I915_MAX_SUBSLICES];
- u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES];
+ u32 sampler[GEN_MAX_GSLICES][I915_MAX_SUBSLICES];
+ u32 row[GEN_MAX_GSLICES][I915_MAX_SUBSLICES];
+
+ /* Added in XeHPG */
+ u32 geom_svg[GEN_MAX_GSLICES][I915_MAX_SUBSLICES];
};
/*
@@ -266,6 +269,13 @@ struct intel_engine_cs {
unsigned int guc_id;
intel_engine_mask_t mask;
+ /**
+ * @logical_mask: logical mask of engine, reported to user space via
+ * query IOCTL and used to communicate with the GuC in logical space.
+ * The logical instance of a physical engine can change based on product
+ * and fusing.
+ */
+ intel_engine_mask_t logical_mask;
u8 class;
u8 instance;
@@ -304,6 +314,13 @@ struct intel_engine_cs {
struct intel_context *kernel_context; /* pinned */
+ /**
+ * pinned_contexts_list: List of pinned contexts. This list is only
+ * assumed to be manipulated during driver load- or unload time and
+ * does therefore not have any additional protection.
+ */
+ struct list_head pinned_contexts_list;
+
intel_engine_mask_t saturated; /* submitting semaphores too late? */
struct {
@@ -546,7 +563,7 @@ intel_engine_has_semaphores(const struct intel_engine_cs *engine)
static inline bool
intel_engine_has_timeslices(const struct intel_engine_cs *engine)
{
- if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
+ if (!CONFIG_DRM_I915_TIMESLICE_DURATION)
return false;
return engine->flags & I915_ENGINE_HAS_TIMESLICES;
@@ -578,4 +595,12 @@ intel_engine_has_relative_mmio(const struct intel_engine_cs * const engine)
for_each_if((instdone_has_slice(dev_priv_, sseu_, slice_)) && \
(instdone_has_subslice(dev_priv_, sseu_, slice_, \
subslice_)))
+
+#define for_each_instdone_gslice_dss_xehp(dev_priv_, sseu_, iter_, gslice_, dss_) \
+ for ((iter_) = 0, (gslice_) = 0, (dss_) = 0; \
+ (iter_) < GEN_MAX_SUBSLICES; \
+ (iter_)++, (gslice_) = (iter_) / GEN_DSS_PER_GSLICE, \
+ (dss_) = (iter_) % GEN_DSS_PER_GSLICE) \
+ for_each_if(intel_sseu_has_subslice((sseu_), 0, (iter_)))
+
#endif /* __INTEL_ENGINE_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index de5f9c86b9a4..bedb80057046 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -201,7 +201,8 @@ static struct virtual_engine *to_virtual_engine(struct intel_engine_cs *engine)
}
static struct intel_context *
-execlists_create_virtual(struct intel_engine_cs **siblings, unsigned int count);
+execlists_create_virtual(struct intel_engine_cs **siblings, unsigned int count,
+ unsigned long flags);
static struct i915_request *
__active_request(const struct intel_timeline * const tl,
@@ -2140,10 +2141,6 @@ static void __execlists_unhold(struct i915_request *rq)
if (p->flags & I915_DEPENDENCY_WEAK)
continue;
- /* Propagate any change in error status */
- if (rq->fence.error)
- i915_request_set_error_once(w, rq->fence.error);
-
if (w->engine != rq->engine)
continue;
@@ -2565,7 +2562,7 @@ __execlists_context_pre_pin(struct intel_context *ce,
if (!__test_and_set_bit(CONTEXT_INIT_BIT, &ce->flags)) {
lrc_init_state(ce, engine, *vaddr);
- __i915_gem_object_flush_map(ce->state->obj, 0, engine->context_size);
+ __i915_gem_object_flush_map(ce->state->obj, 0, engine->context_size);
}
return 0;
@@ -2791,6 +2788,8 @@ static void execlists_sanitize(struct intel_engine_cs *engine)
/* And scrub the dirty cachelines for the HWSP */
clflush_cache_range(engine->status_page.addr, PAGE_SIZE);
+
+ intel_engine_reset_pinned_contexts(engine);
}
static void enable_error_interrupt(struct intel_engine_cs *engine)
@@ -3341,7 +3340,7 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
engine->flags |= I915_ENGINE_HAS_SEMAPHORES;
if (can_preempt(engine)) {
engine->flags |= I915_ENGINE_HAS_PREEMPTION;
- if (IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
+ if (CONFIG_DRM_I915_TIMESLICE_DURATION)
engine->flags |= I915_ENGINE_HAS_TIMESLICES;
}
}
@@ -3786,7 +3785,8 @@ unlock:
}
static struct intel_context *
-execlists_create_virtual(struct intel_engine_cs **siblings, unsigned int count)
+execlists_create_virtual(struct intel_engine_cs **siblings, unsigned int count,
+ unsigned long flags)
{
struct virtual_engine *ve;
unsigned int n;
@@ -3879,6 +3879,7 @@ execlists_create_virtual(struct intel_engine_cs **siblings, unsigned int count)
ve->siblings[ve->num_siblings++] = sibling;
ve->base.mask |= sibling->mask;
+ ve->base.logical_mask |= sibling->logical_mask;
/*
* All physical engines must be compatible for their emission
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index de3ac58fceec..f17383e76eb7 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -644,7 +644,7 @@ static int init_aliasing_ppgtt(struct i915_ggtt *ggtt)
struct i915_ppgtt *ppgtt;
int err;
- ppgtt = i915_ppgtt_create(ggtt->vm.gt);
+ ppgtt = i915_ppgtt_create(ggtt->vm.gt, 0);
if (IS_ERR(ppgtt))
return PTR_ERR(ppgtt);
@@ -727,7 +727,6 @@ static void ggtt_cleanup_hw(struct i915_ggtt *ggtt)
atomic_set(&ggtt->vm.open, 0);
- rcu_barrier(); /* flush the RCU'ed__i915_vm_release */
flush_workqueue(ggtt->vm.i915->wq);
mutex_lock(&ggtt->vm.mutex);
@@ -814,6 +813,21 @@ static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
return 0;
}
+static unsigned int gen6_gttmmadr_size(struct drm_i915_private *i915)
+{
+ /*
+ * GEN6: GTTMMADR size is 4MB and GTTADR starts at 2MB offset
+ * GEN8: GTTMMADR size is 16MB and GTTADR starts at 8MB offset
+ */
+ GEM_BUG_ON(GRAPHICS_VER(i915) < 6);
+ return (GRAPHICS_VER(i915) < 8) ? SZ_4M : SZ_16M;
+}
+
+static unsigned int gen6_gttadr_offset(struct drm_i915_private *i915)
+{
+ return gen6_gttmmadr_size(i915) / 2;
+}
+
static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
{
struct drm_i915_private *i915 = ggtt->vm.i915;
@@ -822,8 +836,8 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
u32 pte_flags;
int ret;
- /* For Modern GENs the PTEs and register space are split in the BAR */
- phys_addr = pci_resource_start(pdev, 0) + pci_resource_len(pdev, 0) / 2;
+ GEM_WARN_ON(pci_resource_len(pdev, 0) != gen6_gttmmadr_size(i915));
+ phys_addr = pci_resource_start(pdev, 0) + gen6_gttadr_offset(i915);
/*
* On BXT+/ICL+ writes larger than 64 bit to the GTT pagetable range
@@ -910,6 +924,7 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
size = gen8_get_total_gtt_size(snb_gmch_ctl);
ggtt->vm.alloc_pt_dma = alloc_pt_dma;
+ ggtt->vm.lmem_pt_obj_flags = I915_BO_ALLOC_PM_EARLY;
ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
ggtt->vm.cleanup = gen6_gmch_remove;
@@ -1373,13 +1388,28 @@ err_st_alloc:
}
static struct scatterlist *
-remap_pages(struct drm_i915_gem_object *obj, unsigned int offset,
+remap_pages(struct drm_i915_gem_object *obj,
+ unsigned int offset, unsigned int alignment_pad,
unsigned int width, unsigned int height,
unsigned int src_stride, unsigned int dst_stride,
struct sg_table *st, struct scatterlist *sg)
{
unsigned int row;
+ if (alignment_pad) {
+ st->nents++;
+
+ /*
+ * The DE ignores the PTEs for the padding tiles, the sg entry
+ * here is just a convenience to indicate how many padding PTEs
+ * to insert at this spot.
+ */
+ sg_set_page(sg, NULL, alignment_pad * 4096, 0);
+ sg_dma_address(sg) = 0;
+ sg_dma_len(sg) = alignment_pad * 4096;
+ sg = sg_next(sg);
+ }
+
for (row = 0; row < height; row++) {
unsigned int left = width * I915_GTT_PAGE_SIZE;
@@ -1439,6 +1469,7 @@ intel_remap_pages(struct intel_remapped_info *rem_info,
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct sg_table *st;
struct scatterlist *sg;
+ unsigned int gtt_offset = 0;
int ret = -ENOMEM;
int i;
@@ -1455,10 +1486,19 @@ intel_remap_pages(struct intel_remapped_info *rem_info,
sg = st->sgl;
for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) {
- sg = remap_pages(obj, rem_info->plane[i].offset,
+ unsigned int alignment_pad = 0;
+
+ if (rem_info->plane_alignment)
+ alignment_pad = ALIGN(gtt_offset, rem_info->plane_alignment) - gtt_offset;
+
+ sg = remap_pages(obj,
+ rem_info->plane[i].offset, alignment_pad,
rem_info->plane[i].width, rem_info->plane[i].height,
rem_info->plane[i].src_stride, rem_info->plane[i].dst_stride,
st, sg);
+
+ gtt_offset += alignment_pad +
+ rem_info->plane[i].dst_stride * rem_info->plane[i].height;
}
i915_sg_trim(st);
diff --git a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
index 1c3af0fc0456..f8253012d166 100644
--- a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
+++ b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
@@ -28,10 +28,13 @@
#define INSTR_26_TO_24_MASK 0x7000000
#define INSTR_26_TO_24_SHIFT 24
+#define __INSTR(client) ((client) << INSTR_CLIENT_SHIFT)
+
/*
* Memory interface instructions used by the kernel
*/
-#define MI_INSTR(opcode, flags) (((opcode) << 23) | (flags))
+#define MI_INSTR(opcode, flags) \
+ (__INSTR(INSTR_MI_CLIENT) | (opcode) << 23 | (flags))
/* Many MI commands use bit 22 of the header dword for GGTT vs PPGTT */
#define MI_GLOBAL_GTT (1<<22)
@@ -57,6 +60,7 @@
#define MI_SUSPEND_FLUSH MI_INSTR(0x0b, 0)
#define MI_SUSPEND_FLUSH_EN (1<<0)
#define MI_SET_APPID MI_INSTR(0x0e, 0)
+#define MI_SET_APPID_SESSION_ID(x) ((x) << 0)
#define MI_OVERLAY_FLIP MI_INSTR(0x11, 0)
#define MI_OVERLAY_CONTINUE (0x0<<21)
#define MI_OVERLAY_ON (0x1<<21)
@@ -146,6 +150,7 @@
#define MI_STORE_REGISTER_MEM_GEN8 MI_INSTR(0x24, 2)
#define MI_SRM_LRM_GLOBAL_GTT (1<<22)
#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */
+#define MI_FLUSH_DW_PROTECTED_MEM_EN (1 << 22)
#define MI_FLUSH_DW_STORE_INDEX (1<<21)
#define MI_INVALIDATE_TLB (1<<18)
#define MI_FLUSH_DW_OP_STOREDW (1<<14)
@@ -273,6 +278,19 @@
#define MI_MATH_REG_CF 0x33
/*
+ * Media instructions used by the kernel
+ */
+#define MEDIA_INSTR(pipe, op, sub_op, flags) \
+ (__INSTR(INSTR_RC_CLIENT) | (pipe) << INSTR_SUBCLIENT_SHIFT | \
+ (op) << INSTR_26_TO_24_SHIFT | (sub_op) << 16 | (flags))
+
+#define MFX_WAIT MEDIA_INSTR(1, 0, 0, 0)
+#define MFX_WAIT_DW0_MFX_SYNC_CONTROL_FLAG REG_BIT(8)
+#define MFX_WAIT_DW0_PXP_SYNC_CONTROL_FLAG REG_BIT(9)
+
+#define CRYPTO_KEY_EXCHANGE MEDIA_INSTR(2, 6, 9, 0)
+
+/*
* Commands used only by the command parser
*/
#define MI_SET_PREDICATE MI_INSTR(0x01, 0)
@@ -328,8 +346,6 @@
#define GFX_OP_3DSTATE_BINDING_TABLE_EDIT_PS \
((0x3<<29)|(0x3<<27)|(0x0<<24)|(0x47<<16))
-#define MFX_WAIT ((0x3<<29)|(0x1<<27)|(0x0<<16))
-
#define COLOR_BLT ((0x2<<29)|(0x40<<22))
#define SRC_COPY_BLT ((0x2<<29)|(0x43<<22))
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index 62d40c986642..1cb1948ac959 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -3,7 +3,7 @@
* Copyright © 2019 Intel Corporation
*/
-#include "debugfs_gt.h"
+#include "intel_gt_debugfs.h"
#include "gem/i915_gem_lmem.h"
#include "i915_drv.h"
@@ -15,12 +15,13 @@
#include "intel_gt_requests.h"
#include "intel_migrate.h"
#include "intel_mocs.h"
+#include "intel_pm.h"
#include "intel_rc6.h"
#include "intel_renderstate.h"
#include "intel_rps.h"
#include "intel_uncore.h"
-#include "intel_pm.h"
#include "shmem_utils.h"
+#include "pxp/intel_pxp.h"
void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
{
@@ -434,7 +435,7 @@ void intel_gt_driver_register(struct intel_gt *gt)
{
intel_rps_driver_register(&gt->rps);
- debugfs_gt_register(gt);
+ intel_gt_debugfs_register(gt);
}
static int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size)
@@ -481,7 +482,7 @@ static void intel_gt_fini_scratch(struct intel_gt *gt)
static struct i915_address_space *kernel_vm(struct intel_gt *gt)
{
if (INTEL_PPGTT(gt->i915) > INTEL_PPGTT_ALIASING)
- return &i915_ppgtt_create(gt)->vm;
+ return &i915_ppgtt_create(gt, I915_BO_ALLOC_PM_EARLY)->vm;
else
return i915_vm_get(&gt->ggtt->vm);
}
@@ -660,6 +661,8 @@ int intel_gt_init(struct intel_gt *gt)
if (err)
return err;
+ intel_gt_init_workarounds(gt);
+
/*
* This is just a security blanket to placate dragons.
* On some systems, we very sporadically observe that the first TLBs
@@ -682,6 +685,8 @@ int intel_gt_init(struct intel_gt *gt)
goto err_pm;
}
+ intel_set_mocs_index(gt);
+
err = intel_engines_init(gt);
if (err)
goto err_engines;
@@ -710,6 +715,8 @@ int intel_gt_init(struct intel_gt *gt)
intel_migrate_init(&gt->migrate, gt);
+ intel_pxp_init(&gt->pxp);
+
goto out_fw;
err_gt:
__intel_gt_disable(gt);
@@ -737,6 +744,8 @@ void intel_gt_driver_remove(struct intel_gt *gt)
intel_uc_driver_remove(&gt->uc);
intel_engines_release(gt);
+
+ intel_gt_flush_buffer_pool(gt);
}
void intel_gt_driver_unregister(struct intel_gt *gt)
@@ -745,12 +754,14 @@ void intel_gt_driver_unregister(struct intel_gt *gt)
intel_rps_driver_unregister(&gt->rps);
+ intel_pxp_fini(&gt->pxp);
+
/*
* Upon unregistering the device to prevent any new users, cancel
* all in-flight requests so that we can quickly unbind the active
* resources.
*/
- intel_gt_set_wedged(gt);
+ intel_gt_set_wedged_on_fini(gt);
/* Scrub all HW state upon release */
with_intel_runtime_pm(gt->uncore->rpm, wakeref)
@@ -765,6 +776,7 @@ void intel_gt_driver_release(struct intel_gt *gt)
if (vm) /* FIXME being called twice on error paths :( */
i915_vm_put(vm);
+ intel_wa_list_free(&gt->wa_list);
intel_gt_pm_fini(gt);
intel_gt_fini_scratch(gt);
intel_gt_fini_buffer_pool(gt);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
index aa0a59c5b614..acc49c56a9f3 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_buffer_pool.c
@@ -245,8 +245,6 @@ void intel_gt_fini_buffer_pool(struct intel_gt *gt)
struct intel_gt_buffer_pool *pool = &gt->buffer_pool;
int n;
- intel_gt_flush_buffer_pool(gt);
-
for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
GEM_BUG_ON(!list_empty(&pool->cache_list[n]));
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_debugfs.c b/drivers/gpu/drm/i915/gt/intel_gt_debugfs.c
new file mode 100644
index 000000000000..f103664b71d4
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_debugfs.c
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/debugfs.h>
+
+#include "i915_drv.h"
+#include "intel_gt_debugfs.h"
+#include "intel_gt_engines_debugfs.h"
+#include "intel_gt_pm_debugfs.h"
+#include "intel_sseu_debugfs.h"
+#include "pxp/intel_pxp_debugfs.h"
+#include "uc/intel_uc_debugfs.h"
+
+int intel_gt_debugfs_reset_show(struct intel_gt *gt, u64 *val)
+{
+ int ret = intel_gt_terminally_wedged(gt);
+
+ switch (ret) {
+ case -EIO:
+ *val = 1;
+ return 0;
+ case 0:
+ *val = 0;
+ return 0;
+ default:
+ return ret;
+ }
+}
+
+int intel_gt_debugfs_reset_store(struct intel_gt *gt, u64 val)
+{
+ /* Flush any previous reset before applying for a new one */
+ wait_event(gt->reset.queue,
+ !test_bit(I915_RESET_BACKOFF, &gt->reset.flags));
+
+ intel_gt_handle_error(gt, val, I915_ERROR_CAPTURE,
+ "Manually reset engine mask to %llx", val);
+ return 0;
+}
+
+/*
+ * keep the interface clean where the first parameter
+ * is a 'struct intel_gt *' instead of 'void *'
+ */
+static int __intel_gt_debugfs_reset_show(void *data, u64 *val)
+{
+ return intel_gt_debugfs_reset_show(data, val);
+}
+
+static int __intel_gt_debugfs_reset_store(void *data, u64 val)
+{
+ return intel_gt_debugfs_reset_store(data, val);
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(reset_fops, __intel_gt_debugfs_reset_show,
+ __intel_gt_debugfs_reset_store, "%llu\n");
+
+static void gt_debugfs_register(struct intel_gt *gt, struct dentry *root)
+{
+ static const struct intel_gt_debugfs_file files[] = {
+ { "reset", &reset_fops, NULL },
+ };
+
+ intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), gt);
+}
+
+void intel_gt_debugfs_register(struct intel_gt *gt)
+{
+ struct dentry *root;
+
+ if (!gt->i915->drm.primary->debugfs_root)
+ return;
+
+ root = debugfs_create_dir("gt", gt->i915->drm.primary->debugfs_root);
+ if (IS_ERR(root))
+ return;
+
+ gt_debugfs_register(gt, root);
+
+ intel_gt_engines_debugfs_register(gt, root);
+ intel_gt_pm_debugfs_register(gt, root);
+ intel_sseu_debugfs_register(gt, root);
+
+ intel_uc_debugfs_register(&gt->uc, root);
+ intel_pxp_debugfs_register(&gt->pxp, root);
+}
+
+void intel_gt_debugfs_register_files(struct dentry *root,
+ const struct intel_gt_debugfs_file *files,
+ unsigned long count, void *data)
+{
+ while (count--) {
+ umode_t mode = files->fops->write ? 0644 : 0444;
+
+ if (!files->eval || files->eval(data))
+ debugfs_create_file(files->name,
+ mode, root, data,
+ files->fops);
+
+ files++;
+ }
+}
diff --git a/drivers/gpu/drm/i915/gt/debugfs_gt.h b/drivers/gpu/drm/i915/gt/intel_gt_debugfs.h
index f77540f727e9..e307ceb99031 100644
--- a/drivers/gpu/drm/i915/gt/debugfs_gt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_debugfs.h
@@ -3,14 +3,14 @@
* Copyright © 2019 Intel Corporation
*/
-#ifndef DEBUGFS_GT_H
-#define DEBUGFS_GT_H
+#ifndef INTEL_GT_DEBUGFS_H
+#define INTEL_GT_DEBUGFS_H
#include <linux/file.h>
struct intel_gt;
-#define DEFINE_GT_DEBUGFS_ATTRIBUTE(__name) \
+#define DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(__name) \
static int __name ## _open(struct inode *inode, struct file *file) \
{ \
return single_open(file, __name ## _show, inode->i_private); \
@@ -23,16 +23,20 @@ static const struct file_operations __name ## _fops = { \
.release = single_release, \
}
-void debugfs_gt_register(struct intel_gt *gt);
+void intel_gt_debugfs_register(struct intel_gt *gt);
-struct debugfs_gt_file {
+struct intel_gt_debugfs_file {
const char *name;
const struct file_operations *fops;
bool (*eval)(void *data);
};
void intel_gt_debugfs_register_files(struct dentry *root,
- const struct debugfs_gt_file *files,
+ const struct intel_gt_debugfs_file *files,
unsigned long count, void *data);
-#endif /* DEBUGFS_GT_H */
+/* functions that need to be accessed by the upper level non-gt interfaces */
+int intel_gt_debugfs_reset_show(struct intel_gt *gt, u64 *val);
+int intel_gt_debugfs_reset_store(struct intel_gt *gt, u64 val);
+
+#endif /* INTEL_GT_DEBUGFS_H */
diff --git a/drivers/gpu/drm/i915/gt/debugfs_engines.c b/drivers/gpu/drm/i915/gt/intel_gt_engines_debugfs.c
index 5e3725e62241..8f9b874fdc9c 100644
--- a/drivers/gpu/drm/i915/gt/debugfs_engines.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_engines_debugfs.c
@@ -6,10 +6,10 @@
#include <drm/drm_print.h>
-#include "debugfs_engines.h"
-#include "debugfs_gt.h"
#include "i915_drv.h" /* for_each_engine! */
#include "intel_engine.h"
+#include "intel_gt_debugfs.h"
+#include "intel_gt_engines_debugfs.h"
static int engines_show(struct seq_file *m, void *data)
{
@@ -24,11 +24,11 @@ static int engines_show(struct seq_file *m, void *data)
return 0;
}
-DEFINE_GT_DEBUGFS_ATTRIBUTE(engines);
+DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(engines);
-void debugfs_engines_register(struct intel_gt *gt, struct dentry *root)
+void intel_gt_engines_debugfs_register(struct intel_gt *gt, struct dentry *root)
{
- static const struct debugfs_gt_file files[] = {
+ static const struct intel_gt_debugfs_file files[] = {
{ "engines", &engines_fops },
};
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_engines_debugfs.h b/drivers/gpu/drm/i915/gt/intel_gt_engines_debugfs.h
new file mode 100644
index 000000000000..dda113452da9
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_engines_debugfs.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_GT_ENGINES_DEBUGFS_H
+#define INTEL_GT_ENGINES_DEBUGFS_H
+
+struct intel_gt;
+struct dentry;
+
+void intel_gt_engines_debugfs_register(struct intel_gt *gt, struct dentry *root);
+
+#endif /* INTEL_GT_ENGINES_DEBUGFS_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
index b2de83be4d97..699a74582d32 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
@@ -13,6 +13,7 @@
#include "intel_lrc_reg.h"
#include "intel_uncore.h"
#include "intel_rps.h"
+#include "pxp/intel_pxp_irq.h"
static void guc_irq_handler(struct intel_guc *guc, u16 iir)
{
@@ -64,6 +65,9 @@ gen11_other_irq_handler(struct intel_gt *gt, const u8 instance,
if (instance == OTHER_GTPM_INSTANCE)
return gen11_rps_irq_handler(&gt->rps, iir);
+ if (instance == OTHER_KCR_INSTANCE)
+ return intel_pxp_irq_handler(&gt->pxp, iir);
+
WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n",
instance, iir);
}
@@ -196,6 +200,9 @@ void gen11_gt_irq_reset(struct intel_gt *gt)
intel_uncore_write(uncore, GEN11_GPM_WGBOXPERF_INTR_MASK, ~0);
intel_uncore_write(uncore, GEN11_GUC_SG_INTR_ENABLE, 0);
intel_uncore_write(uncore, GEN11_GUC_SG_INTR_MASK, ~0);
+
+ intel_uncore_write(uncore, GEN11_CRYPTO_RSVD_INTR_ENABLE, 0);
+ intel_uncore_write(uncore, GEN11_CRYPTO_RSVD_INTR_MASK, ~0);
}
void gen11_gt_irq_postinstall(struct intel_gt *gt)
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index dea8e2479897..524eaf678790 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -18,6 +18,9 @@
#include "intel_rc6.h"
#include "intel_rps.h"
#include "intel_wakeref.h"
+#include "pxp/intel_pxp_pm.h"
+
+#define I915_GT_SUSPEND_IDLE_TIMEOUT (HZ / 2)
static void user_forcewake(struct intel_gt *gt, bool suspend)
{
@@ -262,6 +265,8 @@ int intel_gt_resume(struct intel_gt *gt)
intel_uc_resume(&gt->uc);
+ intel_pxp_resume(&gt->pxp);
+
user_forcewake(gt, false);
out_fw:
@@ -279,7 +284,7 @@ static void wait_for_suspend(struct intel_gt *gt)
if (!intel_gt_pm_is_awake(gt))
return;
- if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME) {
+ if (intel_gt_wait_for_idle(gt, I915_GT_SUSPEND_IDLE_TIMEOUT) == -ETIME) {
/*
* Forcibly cancel outstanding work and leave
* the gpu quiet.
@@ -296,7 +301,7 @@ void intel_gt_suspend_prepare(struct intel_gt *gt)
user_forcewake(gt, true);
wait_for_suspend(gt);
- intel_uc_suspend(&gt->uc);
+ intel_pxp_suspend(&gt->pxp, false);
}
static suspend_state_t pm_suspend_target(void)
@@ -320,6 +325,8 @@ void intel_gt_suspend_late(struct intel_gt *gt)
GEM_BUG_ON(gt->awake);
+ intel_uc_suspend(&gt->uc);
+
/*
* On disabling the device, we want to turn off HW access to memory
* that we no longer own.
@@ -346,6 +353,7 @@ void intel_gt_suspend_late(struct intel_gt *gt)
void intel_gt_runtime_suspend(struct intel_gt *gt)
{
+ intel_pxp_suspend(&gt->pxp, true);
intel_uc_runtime_suspend(&gt->uc);
GT_TRACE(gt, "\n");
@@ -353,11 +361,19 @@ void intel_gt_runtime_suspend(struct intel_gt *gt)
int intel_gt_runtime_resume(struct intel_gt *gt)
{
+ int ret;
+
GT_TRACE(gt, "\n");
intel_gt_init_swizzling(gt);
intel_ggtt_restore_fences(gt->ggtt);
- return intel_uc_runtime_resume(&gt->uc);
+ ret = intel_uc_runtime_resume(&gt->uc);
+ if (ret)
+ return ret;
+
+ intel_pxp_resume(&gt->pxp);
+
+ return 0;
}
static ktime_t __intel_gt_get_awake_time(const struct intel_gt *gt)
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
index d0588d8aaa44..bc898df7a48c 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
@@ -31,6 +31,11 @@ static inline bool intel_gt_pm_get_if_awake(struct intel_gt *gt)
return intel_wakeref_get_if_active(&gt->wakeref);
}
+static inline void intel_gt_pm_might_get(struct intel_gt *gt)
+{
+ intel_wakeref_might_get(&gt->wakeref);
+}
+
static inline void intel_gt_pm_put(struct intel_gt *gt)
{
intel_wakeref_put(&gt->wakeref);
@@ -41,6 +46,15 @@ static inline void intel_gt_pm_put_async(struct intel_gt *gt)
intel_wakeref_put_async(&gt->wakeref);
}
+static inline void intel_gt_pm_might_put(struct intel_gt *gt)
+{
+ intel_wakeref_might_put(&gt->wakeref);
+}
+
+#define with_intel_gt_pm(gt, tmp) \
+ for (tmp = 1, intel_gt_pm_get(gt); tmp; \
+ intel_gt_pm_put(gt), tmp = 0)
+
static inline int intel_gt_pm_wait_for_idle(struct intel_gt *gt)
{
return intel_wakeref_wait_for_idle(&gt->wakeref);
diff --git a/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
index d6f5836396f8..404dfa7673c6 100644
--- a/drivers/gpu/drm/i915/gt/debugfs_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
@@ -6,18 +6,59 @@
#include <linux/seq_file.h>
-#include "debugfs_gt.h"
-#include "debugfs_gt_pm.h"
#include "i915_drv.h"
#include "intel_gt.h"
#include "intel_gt_clock_utils.h"
+#include "intel_gt_debugfs.h"
#include "intel_gt_pm.h"
+#include "intel_gt_pm_debugfs.h"
#include "intel_llc.h"
+#include "intel_pcode.h"
#include "intel_rc6.h"
#include "intel_rps.h"
#include "intel_runtime_pm.h"
-#include "intel_sideband.h"
#include "intel_uncore.h"
+#include "vlv_sideband.h"
+
+int intel_gt_pm_debugfs_forcewake_user_open(struct intel_gt *gt)
+{
+ atomic_inc(&gt->user_wakeref);
+ intel_gt_pm_get(gt);
+ if (GRAPHICS_VER(gt->i915) >= 6)
+ intel_uncore_forcewake_user_get(gt->uncore);
+
+ return 0;
+}
+
+int intel_gt_pm_debugfs_forcewake_user_release(struct intel_gt *gt)
+{
+ if (GRAPHICS_VER(gt->i915) >= 6)
+ intel_uncore_forcewake_user_put(gt->uncore);
+ intel_gt_pm_put(gt);
+ atomic_dec(&gt->user_wakeref);
+
+ return 0;
+}
+
+static int forcewake_user_open(struct inode *inode, struct file *file)
+{
+ struct intel_gt *gt = inode->i_private;
+
+ return intel_gt_pm_debugfs_forcewake_user_open(gt);
+}
+
+static int forcewake_user_release(struct inode *inode, struct file *file)
+{
+ struct intel_gt *gt = inode->i_private;
+
+ return intel_gt_pm_debugfs_forcewake_user_release(gt);
+}
+
+static const struct file_operations forcewake_user_fops = {
+ .owner = THIS_MODULE,
+ .open = forcewake_user_open,
+ .release = forcewake_user_release,
+};
static int fw_domains_show(struct seq_file *m, void *data)
{
@@ -36,7 +77,7 @@ static int fw_domains_show(struct seq_file *m, void *data)
return 0;
}
-DEFINE_GT_DEBUGFS_ATTRIBUTE(fw_domains);
+DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(fw_domains);
static void print_rc6_res(struct seq_file *m,
const char *title,
@@ -238,11 +279,10 @@ static int drpc_show(struct seq_file *m, void *unused)
return err;
}
-DEFINE_GT_DEBUGFS_ATTRIBUTE(drpc);
+DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(drpc);
-static int frequency_show(struct seq_file *m, void *unused)
+void intel_gt_pm_frequency_dump(struct intel_gt *gt, struct drm_printer *p)
{
- struct intel_gt *gt = m->private;
struct drm_i915_private *i915 = gt->i915;
struct intel_uncore *uncore = gt->uncore;
struct intel_rps *rps = &gt->rps;
@@ -254,21 +294,21 @@ static int frequency_show(struct seq_file *m, void *unused)
u16 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
u16 rgvstat = intel_uncore_read16(uncore, MEMSTAT_ILK);
- seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
- seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
- seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
+ drm_printf(p, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
+ drm_printf(p, "Requested VID: %d\n", rgvswctl & 0x3f);
+ drm_printf(p, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
MEMSTAT_VID_SHIFT);
- seq_printf(m, "Current P-state: %d\n",
+ drm_printf(p, "Current P-state: %d\n",
(rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
} else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
u32 rpmodectl, freq_sts;
rpmodectl = intel_uncore_read(uncore, GEN6_RP_CONTROL);
- seq_printf(m, "Video Turbo Mode: %s\n",
+ drm_printf(p, "Video Turbo Mode: %s\n",
yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
- seq_printf(m, "HW control enabled: %s\n",
+ drm_printf(p, "HW control enabled: %s\n",
yesno(rpmodectl & GEN6_RP_ENABLE));
- seq_printf(m, "SW control enabled: %s\n",
+ drm_printf(p, "SW control enabled: %s\n",
yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
GEN6_RP_MEDIA_SW_MODE));
@@ -276,25 +316,25 @@ static int frequency_show(struct seq_file *m, void *unused)
freq_sts = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS);
vlv_punit_put(i915);
- seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
- seq_printf(m, "DDR freq: %d MHz\n", i915->mem_freq);
+ drm_printf(p, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
+ drm_printf(p, "DDR freq: %d MHz\n", i915->mem_freq);
- seq_printf(m, "actual GPU freq: %d MHz\n",
+ drm_printf(p, "actual GPU freq: %d MHz\n",
intel_gpu_freq(rps, (freq_sts >> 8) & 0xff));
- seq_printf(m, "current GPU freq: %d MHz\n",
+ drm_printf(p, "current GPU freq: %d MHz\n",
intel_gpu_freq(rps, rps->cur_freq));
- seq_printf(m, "max GPU freq: %d MHz\n",
+ drm_printf(p, "max GPU freq: %d MHz\n",
intel_gpu_freq(rps, rps->max_freq));
- seq_printf(m, "min GPU freq: %d MHz\n",
+ drm_printf(p, "min GPU freq: %d MHz\n",
intel_gpu_freq(rps, rps->min_freq));
- seq_printf(m, "idle GPU freq: %d MHz\n",
+ drm_printf(p, "idle GPU freq: %d MHz\n",
intel_gpu_freq(rps, rps->idle_freq));
- seq_printf(m, "efficient (RPe) frequency: %d MHz\n",
+ drm_printf(p, "efficient (RPe) frequency: %d MHz\n",
intel_gpu_freq(rps, rps->efficient_freq));
} else if (GRAPHICS_VER(i915) >= 6) {
u32 rp_state_limits;
@@ -309,13 +349,11 @@ static int frequency_show(struct seq_file *m, void *unused)
int max_freq;
rp_state_limits = intel_uncore_read(uncore, GEN6_RP_STATE_LIMITS);
- if (IS_GEN9_LP(i915)) {
- rp_state_cap = intel_uncore_read(uncore, BXT_RP_STATE_CAP);
+ rp_state_cap = intel_rps_read_state_cap(rps);
+ if (IS_GEN9_LP(i915))
gt_perf_status = intel_uncore_read(uncore, BXT_GT_PERF_STATUS);
- } else {
- rp_state_cap = intel_uncore_read(uncore, GEN6_RP_STATE_CAP);
+ else
gt_perf_status = intel_uncore_read(uncore, GEN6_GT_PERF_STATUS);
- }
/* RPSTAT1 is in the GT power well */
intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
@@ -376,113 +414,121 @@ static int frequency_show(struct seq_file *m, void *unused)
}
pm_mask = intel_uncore_read(uncore, GEN6_PMINTRMSK);
- seq_printf(m, "Video Turbo Mode: %s\n",
+ drm_printf(p, "Video Turbo Mode: %s\n",
yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
- seq_printf(m, "HW control enabled: %s\n",
+ drm_printf(p, "HW control enabled: %s\n",
yesno(rpmodectl & GEN6_RP_ENABLE));
- seq_printf(m, "SW control enabled: %s\n",
+ drm_printf(p, "SW control enabled: %s\n",
yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
GEN6_RP_MEDIA_SW_MODE));
- seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
+ drm_printf(p, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
pm_ier, pm_imr, pm_mask);
if (GRAPHICS_VER(i915) <= 10)
- seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
+ drm_printf(p, "PM ISR=0x%08x IIR=0x%08x\n",
pm_isr, pm_iir);
- seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
+ drm_printf(p, "pm_intrmsk_mbz: 0x%08x\n",
rps->pm_intrmsk_mbz);
- seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
- seq_printf(m, "Render p-state ratio: %d\n",
+ drm_printf(p, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
+ drm_printf(p, "Render p-state ratio: %d\n",
(gt_perf_status & (GRAPHICS_VER(i915) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
- seq_printf(m, "Render p-state VID: %d\n",
+ drm_printf(p, "Render p-state VID: %d\n",
gt_perf_status & 0xff);
- seq_printf(m, "Render p-state limit: %d\n",
+ drm_printf(p, "Render p-state limit: %d\n",
rp_state_limits & 0xff);
- seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
- seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
- seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
- seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
- seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
- seq_printf(m, "CAGF: %dMHz\n", cagf);
- seq_printf(m, "RP CUR UP EI: %d (%lldns)\n",
+ drm_printf(p, "RPSTAT1: 0x%08x\n", rpstat);
+ drm_printf(p, "RPMODECTL: 0x%08x\n", rpmodectl);
+ drm_printf(p, "RPINCLIMIT: 0x%08x\n", rpinclimit);
+ drm_printf(p, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
+ drm_printf(p, "RPNSWREQ: %dMHz\n", reqf);
+ drm_printf(p, "CAGF: %dMHz\n", cagf);
+ drm_printf(p, "RP CUR UP EI: %d (%lldns)\n",
rpcurupei,
intel_gt_pm_interval_to_ns(gt, rpcurupei));
- seq_printf(m, "RP CUR UP: %d (%lldns)\n",
+ drm_printf(p, "RP CUR UP: %d (%lldns)\n",
rpcurup, intel_gt_pm_interval_to_ns(gt, rpcurup));
- seq_printf(m, "RP PREV UP: %d (%lldns)\n",
+ drm_printf(p, "RP PREV UP: %d (%lldns)\n",
rpprevup, intel_gt_pm_interval_to_ns(gt, rpprevup));
- seq_printf(m, "Up threshold: %d%%\n",
+ drm_printf(p, "Up threshold: %d%%\n",
rps->power.up_threshold);
- seq_printf(m, "RP UP EI: %d (%lldns)\n",
+ drm_printf(p, "RP UP EI: %d (%lldns)\n",
rpupei, intel_gt_pm_interval_to_ns(gt, rpupei));
- seq_printf(m, "RP UP THRESHOLD: %d (%lldns)\n",
+ drm_printf(p, "RP UP THRESHOLD: %d (%lldns)\n",
rpupt, intel_gt_pm_interval_to_ns(gt, rpupt));
- seq_printf(m, "RP CUR DOWN EI: %d (%lldns)\n",
+ drm_printf(p, "RP CUR DOWN EI: %d (%lldns)\n",
rpcurdownei,
intel_gt_pm_interval_to_ns(gt, rpcurdownei));
- seq_printf(m, "RP CUR DOWN: %d (%lldns)\n",
+ drm_printf(p, "RP CUR DOWN: %d (%lldns)\n",
rpcurdown,
intel_gt_pm_interval_to_ns(gt, rpcurdown));
- seq_printf(m, "RP PREV DOWN: %d (%lldns)\n",
+ drm_printf(p, "RP PREV DOWN: %d (%lldns)\n",
rpprevdown,
intel_gt_pm_interval_to_ns(gt, rpprevdown));
- seq_printf(m, "Down threshold: %d%%\n",
+ drm_printf(p, "Down threshold: %d%%\n",
rps->power.down_threshold);
- seq_printf(m, "RP DOWN EI: %d (%lldns)\n",
+ drm_printf(p, "RP DOWN EI: %d (%lldns)\n",
rpdownei, intel_gt_pm_interval_to_ns(gt, rpdownei));
- seq_printf(m, "RP DOWN THRESHOLD: %d (%lldns)\n",
+ drm_printf(p, "RP DOWN THRESHOLD: %d (%lldns)\n",
rpdownt, intel_gt_pm_interval_to_ns(gt, rpdownt));
max_freq = (IS_GEN9_LP(i915) ? rp_state_cap >> 0 :
rp_state_cap >> 16) & 0xff;
max_freq *= (IS_GEN9_BC(i915) ||
GRAPHICS_VER(i915) >= 11 ? GEN9_FREQ_SCALER : 1);
- seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
+ drm_printf(p, "Lowest (RPN) frequency: %dMHz\n",
intel_gpu_freq(rps, max_freq));
max_freq = (rp_state_cap & 0xff00) >> 8;
max_freq *= (IS_GEN9_BC(i915) ||
GRAPHICS_VER(i915) >= 11 ? GEN9_FREQ_SCALER : 1);
- seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
+ drm_printf(p, "Nominal (RP1) frequency: %dMHz\n",
intel_gpu_freq(rps, max_freq));
max_freq = (IS_GEN9_LP(i915) ? rp_state_cap >> 16 :
rp_state_cap >> 0) & 0xff;
max_freq *= (IS_GEN9_BC(i915) ||
GRAPHICS_VER(i915) >= 11 ? GEN9_FREQ_SCALER : 1);
- seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
+ drm_printf(p, "Max non-overclocked (RP0) frequency: %dMHz\n",
intel_gpu_freq(rps, max_freq));
- seq_printf(m, "Max overclocked frequency: %dMHz\n",
+ drm_printf(p, "Max overclocked frequency: %dMHz\n",
intel_gpu_freq(rps, rps->max_freq));
- seq_printf(m, "Current freq: %d MHz\n",
+ drm_printf(p, "Current freq: %d MHz\n",
intel_gpu_freq(rps, rps->cur_freq));
- seq_printf(m, "Actual freq: %d MHz\n", cagf);
- seq_printf(m, "Idle freq: %d MHz\n",
+ drm_printf(p, "Actual freq: %d MHz\n", cagf);
+ drm_printf(p, "Idle freq: %d MHz\n",
intel_gpu_freq(rps, rps->idle_freq));
- seq_printf(m, "Min freq: %d MHz\n",
+ drm_printf(p, "Min freq: %d MHz\n",
intel_gpu_freq(rps, rps->min_freq));
- seq_printf(m, "Boost freq: %d MHz\n",
+ drm_printf(p, "Boost freq: %d MHz\n",
intel_gpu_freq(rps, rps->boost_freq));
- seq_printf(m, "Max freq: %d MHz\n",
+ drm_printf(p, "Max freq: %d MHz\n",
intel_gpu_freq(rps, rps->max_freq));
- seq_printf(m,
+ drm_printf(p,
"efficient (RPe) frequency: %d MHz\n",
intel_gpu_freq(rps, rps->efficient_freq));
} else {
- seq_puts(m, "no P-state info available\n");
+ drm_puts(p, "no P-state info available\n");
}
- seq_printf(m, "Current CD clock frequency: %d kHz\n", i915->cdclk.hw.cdclk);
- seq_printf(m, "Max CD clock frequency: %d kHz\n", i915->max_cdclk_freq);
- seq_printf(m, "Max pixel clock frequency: %d kHz\n", i915->max_dotclk_freq);
+ drm_printf(p, "Current CD clock frequency: %d kHz\n", i915->cdclk.hw.cdclk);
+ drm_printf(p, "Max CD clock frequency: %d kHz\n", i915->max_cdclk_freq);
+ drm_printf(p, "Max pixel clock frequency: %d kHz\n", i915->max_dotclk_freq);
intel_runtime_pm_put(uncore->rpm, wakeref);
+}
+
+static int frequency_show(struct seq_file *m, void *unused)
+{
+ struct intel_gt *gt = m->private;
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ intel_gt_pm_frequency_dump(gt, &p);
return 0;
}
-DEFINE_GT_DEBUGFS_ATTRIBUTE(frequency);
+DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(frequency);
static int llc_show(struct seq_file *m, void *data)
{
@@ -535,7 +581,7 @@ static bool llc_eval(void *data)
return HAS_LLC(gt->i915);
}
-DEFINE_GT_DEBUGFS_ATTRIBUTE(llc);
+DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(llc);
static const char *rps_power_to_str(unsigned int power)
{
@@ -614,14 +660,15 @@ static bool rps_eval(void *data)
return HAS_RPS(gt->i915);
}
-DEFINE_GT_DEBUGFS_ATTRIBUTE(rps_boost);
+DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(rps_boost);
-void debugfs_gt_pm_register(struct intel_gt *gt, struct dentry *root)
+void intel_gt_pm_debugfs_register(struct intel_gt *gt, struct dentry *root)
{
- static const struct debugfs_gt_file files[] = {
+ static const struct intel_gt_debugfs_file files[] = {
{ "drpc", &drpc_fops, NULL },
{ "frequency", &frequency_fops, NULL },
{ "forcewake", &fw_domains_fops, NULL },
+ { "forcewake_user", &forcewake_user_fops, NULL},
{ "llc", &llc_fops, llc_eval },
{ "rps_boost", &rps_boost_fops, rps_eval },
};
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.h b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.h
new file mode 100644
index 000000000000..a8457887ec65
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_GT_PM_DEBUGFS_H
+#define INTEL_GT_PM_DEBUGFS_H
+
+struct intel_gt;
+struct dentry;
+struct drm_printer;
+
+void intel_gt_pm_debugfs_register(struct intel_gt *gt, struct dentry *root);
+void intel_gt_pm_frequency_dump(struct intel_gt *gt, struct drm_printer *m);
+
+/* functions that need to be accessed by the upper level non-gt interfaces */
+int intel_gt_pm_debugfs_forcewake_user_open(struct intel_gt *gt);
+int intel_gt_pm_debugfs_forcewake_user_release(struct intel_gt *gt);
+
+#endif /* INTEL_GT_PM_DEBUGFS_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
index a81e21bf1bd1..14216cc471b1 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
@@ -26,6 +26,7 @@
#include "intel_rps_types.h"
#include "intel_migrate_types.h"
#include "intel_wakeref.h"
+#include "pxp/intel_pxp_types.h"
struct drm_i915_private;
struct i915_ggtt;
@@ -72,6 +73,8 @@ struct intel_gt {
struct intel_uc uc;
+ struct i915_wa_list wa_list;
+
struct intel_gt_timelines {
spinlock_t lock; /* protects active_list */
struct list_head active_list;
@@ -184,6 +187,9 @@ struct intel_gt {
u8 num_engines;
+ /* General presence of SFC units */
+ u8 sfc_mask;
+
/* Media engine access to SFC per instance */
u8 vdbox_sfc_access;
@@ -192,6 +198,12 @@ struct intel_gt {
unsigned long mslice_mask;
} info;
+
+ struct {
+ u8 uc_index;
+ } mocs;
+
+ struct intel_pxp pxp;
};
enum intel_gt_scratch_field {
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c
index e137dd32b5b8..67d14afa6623 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.c
@@ -28,7 +28,8 @@ struct drm_i915_gem_object *alloc_pt_lmem(struct i915_address_space *vm, int sz)
* used the passed in size for the page size, which should ensure it
* also has the same alignment.
*/
- obj = __i915_gem_object_create_lmem_with_ps(vm->i915, sz, sz, 0);
+ obj = __i915_gem_object_create_lmem_with_ps(vm->i915, sz, sz,
+ vm->lmem_pt_obj_flags);
/*
* Ensure all paging structures for this vm share the same dma-resv
* object underneath, with the idea that one object_lock() will lock
@@ -155,7 +156,7 @@ void i915_vm_resv_release(struct kref *kref)
static void __i915_vm_release(struct work_struct *work)
{
struct i915_address_space *vm =
- container_of(work, struct i915_address_space, rcu.work);
+ container_of(work, struct i915_address_space, release_work);
vm->cleanup(vm);
i915_address_space_fini(vm);
@@ -171,7 +172,7 @@ void i915_vm_release(struct kref *kref)
GEM_BUG_ON(i915_is_ggtt(vm));
trace_i915_ppgtt_release(vm);
- queue_rcu_work(vm->i915->wq, &vm->rcu);
+ queue_work(vm->i915->wq, &vm->release_work);
}
void i915_address_space_init(struct i915_address_space *vm, int subclass)
@@ -185,7 +186,7 @@ void i915_address_space_init(struct i915_address_space *vm, int subclass)
if (!kref_read(&vm->resv_ref))
kref_init(&vm->resv_ref);
- INIT_RCU_WORK(&vm->rcu, __i915_vm_release);
+ INIT_WORK(&vm->release_work, __i915_vm_release);
atomic_set(&vm->open, 1);
/*
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h
index bc7153018ebd..bc6750263359 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.h
@@ -213,7 +213,7 @@ struct i915_vma_ops {
struct i915_address_space {
struct kref ref;
- struct rcu_work rcu;
+ struct work_struct release_work;
struct drm_mm mm;
struct intel_gt *gt;
@@ -260,6 +260,9 @@ struct i915_address_space {
u8 pd_shift;
u8 scratch_order;
+ /* Flags used when creating page-table objects for this vm */
+ unsigned long lmem_pt_obj_flags;
+
struct drm_i915_gem_object *
(*alloc_pt_dma)(struct i915_address_space *vm, int sz);
@@ -519,7 +522,8 @@ i915_page_dir_dma_addr(const struct i915_ppgtt *ppgtt, const unsigned int n)
return __px_dma(pt ? px_base(pt) : ppgtt->vm.scratch[ppgtt->vm.top]);
}
-void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt);
+void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt,
+ unsigned long lmem_pt_obj_flags);
int i915_ggtt_probe_hw(struct drm_i915_private *i915);
int i915_ggtt_init_hw(struct drm_i915_private *i915);
@@ -537,7 +541,8 @@ static inline bool i915_ggtt_has_aperture(const struct i915_ggtt *ggtt)
int i915_ppgtt_init_hw(struct intel_gt *gt);
-struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt);
+struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt,
+ unsigned long lmem_pt_obj_flags);
void i915_ggtt_suspend(struct i915_ggtt *gtt);
void i915_ggtt_resume(struct i915_ggtt *ggtt);
diff --git a/drivers/gpu/drm/i915/gt/intel_llc.c b/drivers/gpu/drm/i915/gt/intel_llc.c
index eb1a15deed22..08d7d5ae263a 100644
--- a/drivers/gpu/drm/i915/gt/intel_llc.c
+++ b/drivers/gpu/drm/i915/gt/intel_llc.c
@@ -3,12 +3,13 @@
* Copyright © 2019 Intel Corporation
*/
+#include <asm/tsc.h>
#include <linux/cpufreq.h>
#include "i915_drv.h"
#include "intel_gt.h"
#include "intel_llc.h"
-#include "intel_sideband.h"
+#include "intel_pcode.h"
struct ia_constants {
unsigned int min_gpu_freq;
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index bb4af4977920..56156cf18c41 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -226,6 +226,40 @@ static const u8 gen12_xcs_offsets[] = {
END
};
+static const u8 dg2_xcs_offsets[] = {
+ NOP(1),
+ LRI(15, POSTED),
+ REG16(0x244),
+ REG(0x034),
+ REG(0x030),
+ REG(0x038),
+ REG(0x03c),
+ REG(0x168),
+ REG(0x140),
+ REG(0x110),
+ REG(0x1c0),
+ REG(0x1c4),
+ REG(0x1c8),
+ REG(0x180),
+ REG16(0x2b4),
+ REG(0x120),
+ REG(0x124),
+
+ NOP(1),
+ LRI(9, POSTED),
+ REG16(0x3a8),
+ REG16(0x28c),
+ REG16(0x288),
+ REG16(0x284),
+ REG16(0x280),
+ REG16(0x27c),
+ REG16(0x278),
+ REG16(0x274),
+ REG16(0x270),
+
+ END
+};
+
static const u8 gen8_rcs_offsets[] = {
NOP(1),
LRI(14, POSTED),
@@ -525,6 +559,49 @@ static const u8 xehp_rcs_offsets[] = {
END
};
+static const u8 dg2_rcs_offsets[] = {
+ NOP(1),
+ LRI(15, POSTED),
+ REG16(0x244),
+ REG(0x034),
+ REG(0x030),
+ REG(0x038),
+ REG(0x03c),
+ REG(0x168),
+ REG(0x140),
+ REG(0x110),
+ REG(0x1c0),
+ REG(0x1c4),
+ REG(0x1c8),
+ REG(0x180),
+ REG16(0x2b4),
+ REG(0x120),
+ REG(0x124),
+
+ NOP(1),
+ LRI(9, POSTED),
+ REG16(0x3a8),
+ REG16(0x28c),
+ REG16(0x288),
+ REG16(0x284),
+ REG16(0x280),
+ REG16(0x27c),
+ REG16(0x278),
+ REG16(0x274),
+ REG16(0x270),
+
+ LRI(3, POSTED),
+ REG(0x1b0),
+ REG16(0x5a8),
+ REG16(0x5ac),
+
+ NOP(6),
+ LRI(1, 0),
+ REG(0x0c8),
+
+ END
+};
+
#undef END
#undef REG16
#undef REG
@@ -543,7 +620,9 @@ static const u8 *reg_offsets(const struct intel_engine_cs *engine)
!intel_engine_has_relative_mmio(engine));
if (engine->class == RENDER_CLASS) {
- if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50))
+ if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
+ return dg2_rcs_offsets;
+ else if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50))
return xehp_rcs_offsets;
else if (GRAPHICS_VER(engine->i915) >= 12)
return gen12_rcs_offsets;
@@ -554,7 +633,9 @@ static const u8 *reg_offsets(const struct intel_engine_cs *engine)
else
return gen8_rcs_offsets;
} else {
- if (GRAPHICS_VER(engine->i915) >= 12)
+ if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
+ return dg2_xcs_offsets;
+ else if (GRAPHICS_VER(engine->i915) >= 12)
return gen12_xcs_offsets;
else if (GRAPHICS_VER(engine->i915) >= 9)
return gen9_xcs_offsets;
@@ -861,7 +942,13 @@ __lrc_alloc_state(struct intel_context *ce, struct intel_engine_cs *engine)
context_size += PAGE_SIZE;
}
- obj = i915_gem_object_create_lmem(engine->i915, context_size, 0);
+ if (intel_context_is_parent(ce) && intel_engine_uses_guc(engine)) {
+ ce->parallel.guc.parent_page = context_size / PAGE_SIZE;
+ context_size += PARENT_SCRATCH_SIZE;
+ }
+
+ obj = i915_gem_object_create_lmem(engine->i915, context_size,
+ I915_BO_ALLOC_PM_VOLATILE);
if (IS_ERR(obj))
obj = i915_gem_object_create_shmem(engine->i915, context_size);
if (IS_ERR(obj))
diff --git a/drivers/gpu/drm/i915/gt/intel_migrate.c b/drivers/gpu/drm/i915/gt/intel_migrate.c
index 1dac21aa7e5c..afb1cce9a352 100644
--- a/drivers/gpu/drm/i915/gt/intel_migrate.c
+++ b/drivers/gpu/drm/i915/gt/intel_migrate.c
@@ -78,7 +78,7 @@ static struct i915_address_space *migrate_vm(struct intel_gt *gt)
* TODO: Add support for huge LMEM PTEs
*/
- vm = i915_ppgtt_create(gt);
+ vm = i915_ppgtt_create(gt, I915_BO_ALLOC_PM_EARLY);
if (IS_ERR(vm))
return ERR_CAST(vm);
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
index 582c4423b95d..15f9ada28a7a 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
@@ -22,6 +22,8 @@ struct drm_i915_mocs_table {
unsigned int size;
unsigned int n_entries;
const struct drm_i915_mocs_entry *table;
+ u8 uc_index;
+ u8 unused_entries_index;
};
/* Defines for the tables (XXX_MOCS_0 - XXX_MOCS_63) */
@@ -40,6 +42,8 @@ struct drm_i915_mocs_table {
#define L3_ESC(value) ((value) << 0)
#define L3_SCC(value) ((value) << 1)
#define _L3_CACHEABILITY(value) ((value) << 4)
+#define L3_GLBGO(value) ((value) << 6)
+#define L3_LKUP(value) ((value) << 7)
/* Helper defines */
#define GEN9_NUM_MOCS_ENTRIES 64 /* 63-64 are reserved, but configured. */
@@ -88,18 +92,25 @@ struct drm_i915_mocs_table {
*
* Entries not part of the following tables are undefined as far as
* userspace is concerned and shouldn't be relied upon. For Gen < 12
- * they will be initialized to PTE. Gen >= 12 onwards don't have a setting for
- * PTE and will be initialized to an invalid value.
+ * they will be initialized to PTE. Gen >= 12 don't have a setting for
+ * PTE and those platforms except TGL/RKL will be initialized L3 WB to
+ * catch accidental use of reserved and unused mocs indexes.
*
* The last few entries are reserved by the hardware. For ICL+ they
* should be initialized according to bspec and never used, for older
* platforms they should never be written to.
*
- * NOTE: These tables are part of bspec and defined as part of hardware
+ * NOTE1: These tables are part of bspec and defined as part of hardware
* interface for ICL+. For older platforms, they are part of kernel
* ABI. It is expected that, for specific hardware platform, existing
* entries will remain constant and the table will only be updated by
* adding new entries, filling unused positions.
+ *
+ * NOTE2: For GEN >= 12 except TGL and RKL, reserved and unspecified MOCS
+ * indices have been set to L3 WB. These reserved entries should never
+ * be used, they may be changed to low performant variants with better
+ * coherency in the future if more entries are needed.
+ * For TGL/RKL, all the unspecified MOCS indexes are mapped to L3 UC.
*/
#define GEN9_MOCS_ENTRIES \
MOCS_ENTRY(I915_MOCS_UNCACHED, \
@@ -282,17 +293,9 @@ static const struct drm_i915_mocs_entry icl_mocs_table[] = {
};
static const struct drm_i915_mocs_entry dg1_mocs_table[] = {
- /* Error */
- MOCS_ENTRY(0, 0, L3_0_DIRECT),
/* UC */
MOCS_ENTRY(1, 0, L3_1_UC),
-
- /* Reserved */
- MOCS_ENTRY(2, 0, L3_0_DIRECT),
- MOCS_ENTRY(3, 0, L3_0_DIRECT),
- MOCS_ENTRY(4, 0, L3_0_DIRECT),
-
/* WB - L3 */
MOCS_ENTRY(5, 0, L3_3_WB),
/* WB - L3 50% */
@@ -314,6 +317,83 @@ static const struct drm_i915_mocs_entry dg1_mocs_table[] = {
MOCS_ENTRY(63, 0, L3_1_UC),
};
+static const struct drm_i915_mocs_entry gen12_mocs_table[] = {
+ GEN11_MOCS_ENTRIES,
+ /* Implicitly enable L1 - HDC:L1 + L3 + LLC */
+ MOCS_ENTRY(48,
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(3),
+ L3_3_WB),
+ /* Implicitly enable L1 - HDC:L1 + L3 */
+ MOCS_ENTRY(49,
+ LE_1_UC | LE_TC_1_LLC,
+ L3_3_WB),
+ /* Implicitly enable L1 - HDC:L1 + LLC */
+ MOCS_ENTRY(50,
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(3),
+ L3_1_UC),
+ /* Implicitly enable L1 - HDC:L1 */
+ MOCS_ENTRY(51,
+ LE_1_UC | LE_TC_1_LLC,
+ L3_1_UC),
+ /* HW Special Case (CCS) */
+ MOCS_ENTRY(60,
+ LE_3_WB | LE_TC_1_LLC | LE_LRUM(3),
+ L3_1_UC),
+ /* HW Special Case (Displayable) */
+ MOCS_ENTRY(61,
+ LE_1_UC | LE_TC_1_LLC,
+ L3_3_WB),
+};
+
+static const struct drm_i915_mocs_entry xehpsdv_mocs_table[] = {
+ /* wa_1608975824 */
+ MOCS_ENTRY(0, 0, L3_3_WB | L3_LKUP(1)),
+
+ /* UC - Coherent; GO:L3 */
+ MOCS_ENTRY(1, 0, L3_1_UC | L3_LKUP(1)),
+ /* UC - Coherent; GO:Memory */
+ MOCS_ENTRY(2, 0, L3_1_UC | L3_GLBGO(1) | L3_LKUP(1)),
+ /* UC - Non-Coherent; GO:Memory */
+ MOCS_ENTRY(3, 0, L3_1_UC | L3_GLBGO(1)),
+ /* UC - Non-Coherent; GO:L3 */
+ MOCS_ENTRY(4, 0, L3_1_UC),
+
+ /* WB */
+ MOCS_ENTRY(5, 0, L3_3_WB | L3_LKUP(1)),
+
+ /* HW Reserved - SW program but never use. */
+ MOCS_ENTRY(48, 0, L3_3_WB | L3_LKUP(1)),
+ MOCS_ENTRY(49, 0, L3_1_UC | L3_LKUP(1)),
+ MOCS_ENTRY(60, 0, L3_1_UC),
+ MOCS_ENTRY(61, 0, L3_1_UC),
+ MOCS_ENTRY(62, 0, L3_1_UC),
+ MOCS_ENTRY(63, 0, L3_1_UC),
+};
+
+static const struct drm_i915_mocs_entry dg2_mocs_table[] = {
+ /* UC - Coherent; GO:L3 */
+ MOCS_ENTRY(0, 0, L3_1_UC | L3_LKUP(1)),
+ /* UC - Coherent; GO:Memory */
+ MOCS_ENTRY(1, 0, L3_1_UC | L3_GLBGO(1) | L3_LKUP(1)),
+ /* UC - Non-Coherent; GO:Memory */
+ MOCS_ENTRY(2, 0, L3_1_UC | L3_GLBGO(1)),
+
+ /* WB - LC */
+ MOCS_ENTRY(3, 0, L3_3_WB | L3_LKUP(1)),
+};
+
+static const struct drm_i915_mocs_entry dg2_mocs_table_g10_ax[] = {
+ /* Wa_14011441408: Set Go to Memory for MOCS#0 */
+ MOCS_ENTRY(0, 0, L3_1_UC | L3_GLBGO(1) | L3_LKUP(1)),
+ /* UC - Coherent; GO:Memory */
+ MOCS_ENTRY(1, 0, L3_1_UC | L3_GLBGO(1) | L3_LKUP(1)),
+ /* UC - Non-Coherent; GO:Memory */
+ MOCS_ENTRY(2, 0, L3_1_UC | L3_GLBGO(1)),
+
+ /* WB - LC */
+ MOCS_ENTRY(3, 0, L3_3_WB | L3_LKUP(1)),
+};
+
enum {
HAS_GLOBAL_MOCS = BIT(0),
HAS_ENGINE_MOCS = BIT(1),
@@ -340,14 +420,45 @@ static unsigned int get_mocs_settings(const struct drm_i915_private *i915,
{
unsigned int flags;
- if (IS_DG1(i915)) {
+ memset(table, 0, sizeof(struct drm_i915_mocs_table));
+
+ table->unused_entries_index = I915_MOCS_PTE;
+ if (IS_DG2(i915)) {
+ if (IS_DG2_GT_STEP(i915, G10, STEP_A0, STEP_B0)) {
+ table->size = ARRAY_SIZE(dg2_mocs_table_g10_ax);
+ table->table = dg2_mocs_table_g10_ax;
+ } else {
+ table->size = ARRAY_SIZE(dg2_mocs_table);
+ table->table = dg2_mocs_table;
+ }
+ table->uc_index = 1;
+ table->n_entries = GEN9_NUM_MOCS_ENTRIES;
+ table->unused_entries_index = 3;
+ } else if (IS_XEHPSDV(i915)) {
+ table->size = ARRAY_SIZE(xehpsdv_mocs_table);
+ table->table = xehpsdv_mocs_table;
+ table->uc_index = 2;
+ table->n_entries = GEN9_NUM_MOCS_ENTRIES;
+ table->unused_entries_index = 5;
+ } else if (IS_DG1(i915)) {
table->size = ARRAY_SIZE(dg1_mocs_table);
table->table = dg1_mocs_table;
+ table->uc_index = 1;
table->n_entries = GEN9_NUM_MOCS_ENTRIES;
- } else if (GRAPHICS_VER(i915) >= 12) {
+ table->uc_index = 1;
+ table->unused_entries_index = 5;
+ } else if (IS_TIGERLAKE(i915) || IS_ROCKETLAKE(i915)) {
+ /* For TGL/RKL, Can't be changed now for ABI reasons */
table->size = ARRAY_SIZE(tgl_mocs_table);
table->table = tgl_mocs_table;
table->n_entries = GEN9_NUM_MOCS_ENTRIES;
+ table->uc_index = 3;
+ } else if (GRAPHICS_VER(i915) >= 12) {
+ table->size = ARRAY_SIZE(gen12_mocs_table);
+ table->table = gen12_mocs_table;
+ table->n_entries = GEN9_NUM_MOCS_ENTRIES;
+ table->uc_index = 3;
+ table->unused_entries_index = 2;
} else if (GRAPHICS_VER(i915) == 11) {
table->size = ARRAY_SIZE(icl_mocs_table);
table->table = icl_mocs_table;
@@ -393,16 +504,16 @@ static unsigned int get_mocs_settings(const struct drm_i915_private *i915,
}
/*
- * Get control_value from MOCS entry taking into account when it's not used:
- * I915_MOCS_PTE's value is returned in this case.
+ * Get control_value from MOCS entry taking into account when it's not used
+ * then if unused_entries_index is non-zero then its value will be returned
+ * otherwise I915_MOCS_PTE's value is returned in this case.
*/
static u32 get_entry_control(const struct drm_i915_mocs_table *table,
unsigned int index)
{
if (index < table->size && table->table[index].used)
return table->table[index].control_value;
-
- return table->table[I915_MOCS_PTE].control_value;
+ return table->table[table->unused_entries_index].control_value;
}
#define for_each_mocs(mocs, t, i) \
@@ -417,6 +528,8 @@ static void __init_mocs_table(struct intel_uncore *uncore,
unsigned int i;
u32 mocs;
+ drm_WARN_ONCE(&uncore->i915->drm, !table->unused_entries_index,
+ "Unused entries index should have been defined\n");
for_each_mocs(mocs, table, i)
intel_uncore_write_fw(uncore, _MMIO(addr + i * 4), mocs);
}
@@ -443,16 +556,16 @@ static void init_mocs_table(struct intel_engine_cs *engine,
}
/*
- * Get l3cc_value from MOCS entry taking into account when it's not used:
- * I915_MOCS_PTE's value is returned in this case.
+ * Get l3cc_value from MOCS entry taking into account when it's not used
+ * then if unused_entries_index is not zero then its value will be returned
+ * otherwise I915_MOCS_PTE's value is returned in this case.
*/
static u16 get_entry_l3cc(const struct drm_i915_mocs_table *table,
unsigned int index)
{
if (index < table->size && table->table[index].used)
return table->table[index].l3cc_value;
-
- return table->table[I915_MOCS_PTE].l3cc_value;
+ return table->table[table->unused_entries_index].l3cc_value;
}
static u32 l3cc_combine(u16 low, u16 high)
@@ -468,10 +581,9 @@ static u32 l3cc_combine(u16 low, u16 high)
0; \
i++)
-static void init_l3cc_table(struct intel_engine_cs *engine,
+static void init_l3cc_table(struct intel_uncore *uncore,
const struct drm_i915_mocs_table *table)
{
- struct intel_uncore *uncore = engine->uncore;
unsigned int i;
u32 l3cc;
@@ -496,7 +608,7 @@ void intel_mocs_init_engine(struct intel_engine_cs *engine)
init_mocs_table(engine, &table);
if (flags & HAS_RENDER_L3CC && engine->class == RENDER_CLASS)
- init_l3cc_table(engine, &table);
+ init_l3cc_table(engine->uncore, &table);
}
static u32 global_mocs_offset(void)
@@ -504,6 +616,14 @@ static u32 global_mocs_offset(void)
return i915_mmio_reg_offset(GEN12_GLOBAL_MOCS(0));
}
+void intel_set_mocs_index(struct intel_gt *gt)
+{
+ struct drm_i915_mocs_table table;
+
+ get_mocs_settings(gt->i915, &table);
+ gt->mocs.uc_index = table.uc_index;
+}
+
void intel_mocs_init(struct intel_gt *gt)
{
struct drm_i915_mocs_table table;
@@ -515,6 +635,14 @@ void intel_mocs_init(struct intel_gt *gt)
flags = get_mocs_settings(gt->i915, &table);
if (flags & HAS_GLOBAL_MOCS)
__init_mocs_table(gt->uncore, &table, global_mocs_offset());
+
+ /*
+ * Initialize the L3CC table as part of mocs initalization to make
+ * sure the LNCFCMOCSx registers are programmed for the subsequent
+ * memory transactions including guc transactions
+ */
+ if (flags & HAS_RENDER_L3CC)
+ init_l3cc_table(gt->uncore, &table);
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.h b/drivers/gpu/drm/i915/gt/intel_mocs.h
index d83274f5163b..76db827210c0 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.h
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.h
@@ -36,5 +36,6 @@ struct intel_gt;
void intel_mocs_init(struct intel_gt *gt);
void intel_mocs_init_engine(struct intel_engine_cs *engine);
+void intel_set_mocs_index(struct intel_gt *gt);
#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_ppgtt.c b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
index 886060f7e6fc..4396bfd630d8 100644
--- a/drivers/gpu/drm/i915/gt/intel_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ppgtt.c
@@ -155,19 +155,20 @@ int i915_ppgtt_init_hw(struct intel_gt *gt)
}
static struct i915_ppgtt *
-__ppgtt_create(struct intel_gt *gt)
+__ppgtt_create(struct intel_gt *gt, unsigned long lmem_pt_obj_flags)
{
if (GRAPHICS_VER(gt->i915) < 8)
return gen6_ppgtt_create(gt);
else
- return gen8_ppgtt_create(gt);
+ return gen8_ppgtt_create(gt, lmem_pt_obj_flags);
}
-struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt)
+struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt,
+ unsigned long lmem_pt_obj_flags)
{
struct i915_ppgtt *ppgtt;
- ppgtt = __ppgtt_create(gt);
+ ppgtt = __ppgtt_create(gt, lmem_pt_obj_flags);
if (IS_ERR(ppgtt))
return ppgtt;
@@ -298,7 +299,8 @@ int ppgtt_set_pages(struct i915_vma *vma)
return 0;
}
-void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt)
+void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt,
+ unsigned long lmem_pt_obj_flags)
{
struct drm_i915_private *i915 = gt->i915;
@@ -306,6 +308,7 @@ void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt)
ppgtt->vm.i915 = i915;
ppgtt->vm.dma = i915->drm.dev;
ppgtt->vm.total = BIT_ULL(INTEL_INFO(i915)->ppgtt_size);
+ ppgtt->vm.lmem_pt_obj_flags = lmem_pt_obj_flags;
dma_resv_init(&ppgtt->vm._resv);
i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c
index 799d382eea79..43093dd2d0c9 100644
--- a/drivers/gpu/drm/i915/gt/intel_rc6.c
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.c
@@ -9,8 +9,8 @@
#include "i915_vgpu.h"
#include "intel_gt.h"
#include "intel_gt_pm.h"
+#include "intel_pcode.h"
#include "intel_rc6.h"
-#include "intel_sideband.h"
/**
* DOC: RC6
diff --git a/drivers/gpu/drm/i915/gt/intel_region_lmem.c b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
index a74b72f50cc9..afb35d2e5c73 100644
--- a/drivers/gpu/drm/i915/gt/intel_region_lmem.c
+++ b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
@@ -32,7 +32,7 @@ static int init_fake_lmem_bar(struct intel_memory_region *mem)
mem->remap_addr = dma_map_resource(i915->drm.dev,
mem->region.start,
mem->fake_mappable.size,
- PCI_DMA_BIDIRECTIONAL,
+ DMA_BIDIRECTIONAL,
DMA_ATTR_FORCE_CONTIGUOUS);
if (dma_mapping_error(i915->drm.dev, mem->remap_addr)) {
drm_mm_remove_node(&mem->fake_mappable);
@@ -62,7 +62,7 @@ static void release_fake_lmem_bar(struct intel_memory_region *mem)
dma_unmap_resource(mem->i915->drm.dev,
mem->remap_addr,
mem->fake_mappable.size,
- PCI_DMA_BIDIRECTIONAL,
+ DMA_BIDIRECTIONAL,
DMA_ATTR_FORCE_CONTIGUOUS);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_ring.c b/drivers/gpu/drm/i915/gt/intel_ring.c
index 7c4d5158e03b..2fdd52b62092 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring.c
@@ -112,7 +112,8 @@ static struct i915_vma *create_ring_vma(struct i915_ggtt *ggtt, int size)
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
- obj = i915_gem_object_create_lmem(i915, size, I915_BO_ALLOC_VOLATILE);
+ obj = i915_gem_object_create_lmem(i915, size, I915_BO_ALLOC_VOLATILE |
+ I915_BO_ALLOC_PM_VOLATILE);
if (IS_ERR(obj) && i915_ggtt_has_aperture(ggtt))
obj = i915_gem_object_create_stolen(i915, size);
if (IS_ERR(obj))
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index 2958e2fae380..586dca1731ce 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -17,6 +17,7 @@
#include "intel_ring.h"
#include "shmem_utils.h"
#include "intel_engine_heartbeat.h"
+#include "intel_engine_pm.h"
/* Rough estimate of the typical request size, performing a flush,
* set-context and then emitting the batch.
@@ -291,7 +292,9 @@ static void xcs_sanitize(struct intel_engine_cs *engine)
sanitize_hwsp(engine);
/* And scrub the dirty cachelines for the HWSP */
- clflush_cache_range(engine->status_page.addr, PAGE_SIZE);
+ drm_clflush_virt_range(engine->status_page.addr, PAGE_SIZE);
+
+ intel_engine_reset_pinned_contexts(engine);
}
static void reset_prepare(struct intel_engine_cs *engine)
@@ -1265,7 +1268,7 @@ static struct i915_vma *gen7_ctx_vma(struct intel_engine_cs *engine)
int size, err;
if (GRAPHICS_VER(engine->i915) != 7 || engine->class != RENDER_CLASS)
- return 0;
+ return NULL;
err = gen7_ctx_switch_bb_setup(engine, NULL /* probe size */);
if (err < 0)
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index 0a03fbed9f9b..5e275f8dda8c 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -11,8 +11,9 @@
#include "intel_gt_clock_utils.h"
#include "intel_gt_irq.h"
#include "intel_gt_pm_irq.h"
+#include "intel_pcode.h"
#include "intel_rps.h"
-#include "intel_sideband.h"
+#include "vlv_sideband.h"
#include "../../../platform/x86/intel_ips.h"
#define BUSY_MAX_EI 20u /* ms */
@@ -994,20 +995,16 @@ int intel_rps_set(struct intel_rps *rps, u8 val)
static void gen6_rps_init(struct intel_rps *rps)
{
struct drm_i915_private *i915 = rps_to_i915(rps);
- struct intel_uncore *uncore = rps_to_uncore(rps);
+ u32 rp_state_cap = intel_rps_read_state_cap(rps);
/* All of these values are in units of 50MHz */
/* static values from HW: RP0 > RP1 > RPn (min_freq) */
if (IS_GEN9_LP(i915)) {
- u32 rp_state_cap = intel_uncore_read(uncore, BXT_RP_STATE_CAP);
-
rps->rp0_freq = (rp_state_cap >> 16) & 0xff;
rps->rp1_freq = (rp_state_cap >> 8) & 0xff;
rps->min_freq = (rp_state_cap >> 0) & 0xff;
} else {
- u32 rp_state_cap = intel_uncore_read(uncore, GEN6_RP_STATE_CAP);
-
rps->rp0_freq = (rp_state_cap >> 0) & 0xff;
rps->rp1_freq = (rp_state_cap >> 8) & 0xff;
rps->min_freq = (rp_state_cap >> 16) & 0xff;
@@ -2144,6 +2141,19 @@ int intel_rps_set_min_frequency(struct intel_rps *rps, u32 val)
return set_min_freq(rps, val);
}
+u32 intel_rps_read_state_cap(struct intel_rps *rps)
+{
+ struct drm_i915_private *i915 = rps_to_i915(rps);
+ struct intel_uncore *uncore = rps_to_uncore(rps);
+
+ if (IS_XEHPSDV(i915))
+ return intel_uncore_read(uncore, XEHPSDV_RP_STATE_CAP);
+ else if (IS_GEN9_LP(i915))
+ return intel_uncore_read(uncore, BXT_RP_STATE_CAP);
+ else
+ return intel_uncore_read(uncore, GEN6_RP_STATE_CAP);
+}
+
/* External interface for intel_ips.ko */
static struct drm_i915_private __rcu *ips_mchdev;
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.h b/drivers/gpu/drm/i915/gt/intel_rps.h
index 4213bcce1667..11960d64ca82 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.h
+++ b/drivers/gpu/drm/i915/gt/intel_rps.h
@@ -41,6 +41,7 @@ u32 intel_rps_get_rp1_frequency(struct intel_rps *rps);
u32 intel_rps_get_rpn_frequency(struct intel_rps *rps);
u32 intel_rps_read_punit_req(struct intel_rps *rps);
u32 intel_rps_read_punit_req_frequency(struct intel_rps *rps);
+u32 intel_rps_read_state_cap(struct intel_rps *rps);
void gen5_rps_irq_handler(struct intel_rps *rps);
void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir);
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.c b/drivers/gpu/drm/i915/gt/intel_sseu.c
index bbd272943c3f..bdf09051b8a0 100644
--- a/drivers/gpu/drm/i915/gt/intel_sseu.c
+++ b/drivers/gpu/drm/i915/gt/intel_sseu.c
@@ -46,11 +46,11 @@ u32 intel_sseu_get_subslices(const struct sseu_dev_info *sseu, u8 slice)
}
void intel_sseu_set_subslices(struct sseu_dev_info *sseu, int slice,
- u32 ss_mask)
+ u8 *subslice_mask, u32 ss_mask)
{
int offset = slice * sseu->ss_stride;
- memcpy(&sseu->subslice_mask[offset], &ss_mask, sseu->ss_stride);
+ memcpy(&subslice_mask[offset], &ss_mask, sseu->ss_stride);
}
unsigned int
@@ -100,14 +100,24 @@ static u16 compute_eu_total(const struct sseu_dev_info *sseu)
return total;
}
-static void gen11_compute_sseu_info(struct sseu_dev_info *sseu,
- u8 s_en, u32 ss_en, u16 eu_en)
+static u32 get_ss_stride_mask(struct sseu_dev_info *sseu, u8 s, u32 ss_en)
+{
+ u32 ss_mask;
+
+ ss_mask = ss_en >> (s * sseu->max_subslices);
+ ss_mask &= GENMASK(sseu->max_subslices - 1, 0);
+
+ return ss_mask;
+}
+
+static void gen11_compute_sseu_info(struct sseu_dev_info *sseu, u8 s_en,
+ u32 g_ss_en, u32 c_ss_en, u16 eu_en)
{
int s, ss;
- /* ss_en represents entire subslice mask across all slices */
+ /* g_ss_en/c_ss_en represent entire subslice mask across all slices */
GEM_BUG_ON(sseu->max_slices * sseu->max_subslices >
- sizeof(ss_en) * BITS_PER_BYTE);
+ sizeof(g_ss_en) * BITS_PER_BYTE);
for (s = 0; s < sseu->max_slices; s++) {
if ((s_en & BIT(s)) == 0)
@@ -115,7 +125,22 @@ static void gen11_compute_sseu_info(struct sseu_dev_info *sseu,
sseu->slice_mask |= BIT(s);
- intel_sseu_set_subslices(sseu, s, ss_en);
+ /*
+ * XeHP introduces the concept of compute vs geometry DSS. To
+ * reduce variation between GENs around subslice usage, store a
+ * mask for both the geometry and compute enabled masks since
+ * userspace will need to be able to query these masks
+ * independently. Also compute a total enabled subslice count
+ * for the purposes of selecting subslices to use in a
+ * particular GEM context.
+ */
+ intel_sseu_set_subslices(sseu, s, sseu->compute_subslice_mask,
+ get_ss_stride_mask(sseu, s, c_ss_en));
+ intel_sseu_set_subslices(sseu, s, sseu->geometry_subslice_mask,
+ get_ss_stride_mask(sseu, s, g_ss_en));
+ intel_sseu_set_subslices(sseu, s, sseu->subslice_mask,
+ get_ss_stride_mask(sseu, s,
+ g_ss_en | c_ss_en));
for (ss = 0; ss < sseu->max_subslices; ss++)
if (intel_sseu_has_subslice(sseu, s, ss))
@@ -129,7 +154,7 @@ static void gen12_sseu_info_init(struct intel_gt *gt)
{
struct sseu_dev_info *sseu = &gt->info.sseu;
struct intel_uncore *uncore = gt->uncore;
- u32 dss_en;
+ u32 g_dss_en, c_dss_en = 0;
u16 eu_en = 0;
u8 eu_en_fuse;
u8 s_en;
@@ -160,7 +185,9 @@ static void gen12_sseu_info_init(struct intel_gt *gt)
s_en = intel_uncore_read(uncore, GEN11_GT_SLICE_ENABLE) &
GEN11_GT_S_ENA_MASK;
- dss_en = intel_uncore_read(uncore, GEN12_GT_DSS_ENABLE);
+ g_dss_en = intel_uncore_read(uncore, GEN12_GT_GEOMETRY_DSS_ENABLE);
+ if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 50))
+ c_dss_en = intel_uncore_read(uncore, GEN12_GT_COMPUTE_DSS_ENABLE);
/* one bit per pair of EUs */
if (GRAPHICS_VER_FULL(gt->i915) >= IP_VER(12, 50))
@@ -173,7 +200,7 @@ static void gen12_sseu_info_init(struct intel_gt *gt)
if (eu_en_fuse & BIT(eu))
eu_en |= BIT(eu * 2) | BIT(eu * 2 + 1);
- gen11_compute_sseu_info(sseu, s_en, dss_en, eu_en);
+ gen11_compute_sseu_info(sseu, s_en, g_dss_en, c_dss_en, eu_en);
/* TGL only supports slice-level power gating */
sseu->has_slice_pg = 1;
@@ -199,7 +226,7 @@ static void gen11_sseu_info_init(struct intel_gt *gt)
eu_en = ~(intel_uncore_read(uncore, GEN11_EU_DISABLE) &
GEN11_EU_DIS_MASK);
- gen11_compute_sseu_info(sseu, s_en, ss_en, eu_en);
+ gen11_compute_sseu_info(sseu, s_en, ss_en, 0, eu_en);
/* ICL has no power gating restrictions. */
sseu->has_slice_pg = 1;
@@ -240,7 +267,7 @@ static void cherryview_sseu_info_init(struct intel_gt *gt)
sseu_set_eus(sseu, 0, 1, ~disabled_mask);
}
- intel_sseu_set_subslices(sseu, 0, subslice_mask);
+ intel_sseu_set_subslices(sseu, 0, sseu->subslice_mask, subslice_mask);
sseu->eu_total = compute_eu_total(sseu);
@@ -296,7 +323,8 @@ static void gen9_sseu_info_init(struct intel_gt *gt)
/* skip disabled slice */
continue;
- intel_sseu_set_subslices(sseu, s, subslice_mask);
+ intel_sseu_set_subslices(sseu, s, sseu->subslice_mask,
+ subslice_mask);
eu_disable = intel_uncore_read(uncore, GEN9_EU_DISABLE(s));
for (ss = 0; ss < sseu->max_subslices; ss++) {
@@ -408,7 +436,8 @@ static void bdw_sseu_info_init(struct intel_gt *gt)
/* skip disabled slice */
continue;
- intel_sseu_set_subslices(sseu, s, subslice_mask);
+ intel_sseu_set_subslices(sseu, s, sseu->subslice_mask,
+ subslice_mask);
for (ss = 0; ss < sseu->max_subslices; ss++) {
u8 eu_disabled_mask;
@@ -485,10 +514,9 @@ static void hsw_sseu_info_init(struct intel_gt *gt)
}
fuse1 = intel_uncore_read(gt->uncore, HSW_PAVP_FUSE1);
- switch ((fuse1 & HSW_F1_EU_DIS_MASK) >> HSW_F1_EU_DIS_SHIFT) {
+ switch (REG_FIELD_GET(HSW_F1_EU_DIS_MASK, fuse1)) {
default:
- MISSING_CASE((fuse1 & HSW_F1_EU_DIS_MASK) >>
- HSW_F1_EU_DIS_SHIFT);
+ MISSING_CASE(REG_FIELD_GET(HSW_F1_EU_DIS_MASK, fuse1));
fallthrough;
case HSW_F1_EU_DIS_10EUS:
sseu->eu_per_subslice = 10;
@@ -506,7 +534,8 @@ static void hsw_sseu_info_init(struct intel_gt *gt)
sseu->eu_per_subslice);
for (s = 0; s < sseu->max_slices; s++) {
- intel_sseu_set_subslices(sseu, s, subslice_mask);
+ intel_sseu_set_subslices(sseu, s, sseu->subslice_mask,
+ subslice_mask);
for (ss = 0; ss < sseu->max_subslices; ss++) {
sseu_set_eus(sseu, s, ss,
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.h b/drivers/gpu/drm/i915/gt/intel_sseu.h
index 22fef98887c0..60882a74741e 100644
--- a/drivers/gpu/drm/i915/gt/intel_sseu.h
+++ b/drivers/gpu/drm/i915/gt/intel_sseu.h
@@ -26,9 +26,14 @@ struct drm_printer;
#define GEN_DSS_PER_CSLICE 8
#define GEN_DSS_PER_MSLICE 8
+#define GEN_MAX_GSLICES (GEN_MAX_SUBSLICES / GEN_DSS_PER_GSLICE)
+#define GEN_MAX_CSLICES (GEN_MAX_SUBSLICES / GEN_DSS_PER_CSLICE)
+
struct sseu_dev_info {
u8 slice_mask;
u8 subslice_mask[GEN_MAX_SLICES * GEN_MAX_SUBSLICE_STRIDE];
+ u8 geometry_subslice_mask[GEN_MAX_SLICES * GEN_MAX_SUBSLICE_STRIDE];
+ u8 compute_subslice_mask[GEN_MAX_SLICES * GEN_MAX_SUBSLICE_STRIDE];
u8 eu_mask[GEN_MAX_SLICES * GEN_MAX_SUBSLICES * GEN_MAX_EU_STRIDE];
u16 eu_total;
u8 eu_per_subslice;
@@ -78,6 +83,10 @@ intel_sseu_has_subslice(const struct sseu_dev_info *sseu, int slice,
u8 mask;
int ss_idx = subslice / BITS_PER_BYTE;
+ if (slice >= sseu->max_slices ||
+ subslice >= sseu->max_subslices)
+ return false;
+
GEM_BUG_ON(ss_idx >= sseu->ss_stride);
mask = sseu->subslice_mask[slice * sseu->ss_stride + ss_idx];
@@ -97,7 +106,7 @@ intel_sseu_subslices_per_slice(const struct sseu_dev_info *sseu, u8 slice);
u32 intel_sseu_get_subslices(const struct sseu_dev_info *sseu, u8 slice);
void intel_sseu_set_subslices(struct sseu_dev_info *sseu, int slice,
- u32 ss_mask);
+ u8 *subslice_mask, u32 ss_mask);
void intel_sseu_info_init(struct intel_gt *gt);
diff --git a/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c b/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c
index 1ba8b7da9d37..8bb3a91dad82 100644
--- a/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c
+++ b/drivers/gpu/drm/i915/gt/intel_sseu_debugfs.c
@@ -4,9 +4,9 @@
* Copyright © 2020 Intel Corporation
*/
-#include "debugfs_gt.h"
-#include "intel_sseu_debugfs.h"
#include "i915_drv.h"
+#include "intel_gt_debugfs.h"
+#include "intel_sseu_debugfs.h"
static void sseu_copy_subslices(const struct sseu_dev_info *sseu,
int slice, u8 *to_mask)
@@ -282,7 +282,7 @@ static int sseu_status_show(struct seq_file *m, void *unused)
return intel_sseu_status(m, gt);
}
-DEFINE_GT_DEBUGFS_ATTRIBUTE(sseu_status);
+DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(sseu_status);
static int rcs_topology_show(struct seq_file *m, void *unused)
{
@@ -293,11 +293,11 @@ static int rcs_topology_show(struct seq_file *m, void *unused)
return 0;
}
-DEFINE_GT_DEBUGFS_ATTRIBUTE(rcs_topology);
+DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(rcs_topology);
void intel_sseu_debugfs_register(struct intel_gt *gt, struct dentry *root)
{
- static const struct debugfs_gt_file files[] = {
+ static const struct intel_gt_debugfs_file files[] = {
{ "sseu_status", &sseu_status_fops, NULL },
{ "rcs_topology", &rcs_topology_fops, NULL },
};
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c
index 1257f4f11e66..438bbc7b8147 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.c
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.c
@@ -64,7 +64,7 @@ intel_timeline_pin_map(struct intel_timeline *timeline)
timeline->hwsp_map = vaddr;
timeline->hwsp_seqno = memset(vaddr + ofs, 0, TIMELINE_SEQNO_BYTES);
- clflush(vaddr + ofs);
+ drm_clflush_virt_range(vaddr + ofs, TIMELINE_SEQNO_BYTES);
return 0;
}
@@ -225,7 +225,7 @@ void intel_timeline_reset_seqno(const struct intel_timeline *tl)
memset(hwsp_seqno + 1, 0, TIMELINE_SEQNO_BYTES - sizeof(*hwsp_seqno));
WRITE_ONCE(*hwsp_seqno, tl->seqno);
- clflush(hwsp_seqno);
+ drm_clflush_virt_range(hwsp_seqno, TIMELINE_SEQNO_BYTES);
}
void intel_timeline_enter(struct intel_timeline *tl)
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index aae609d7d85d..e1f362530889 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -644,6 +644,72 @@ static void dg1_ctx_workarounds_init(struct intel_engine_cs *engine,
DG1_HZ_READ_SUPPRESSION_OPTIMIZATION_DISABLE);
}
+static void fakewa_disable_nestedbb_mode(struct intel_engine_cs *engine,
+ struct i915_wa_list *wal)
+{
+ /*
+ * This is a "fake" workaround defined by software to ensure we
+ * maintain reliable, backward-compatible behavior for userspace with
+ * regards to how nested MI_BATCH_BUFFER_START commands are handled.
+ *
+ * The per-context setting of MI_MODE[12] determines whether the bits
+ * of a nested MI_BATCH_BUFFER_START instruction should be interpreted
+ * in the traditional manner or whether they should instead use a new
+ * tgl+ meaning that breaks backward compatibility, but allows nesting
+ * into 3rd-level batchbuffers. When this new capability was first
+ * added in TGL, it remained off by default unless a context
+ * intentionally opted in to the new behavior. However Xe_HPG now
+ * flips this on by default and requires that we explicitly opt out if
+ * we don't want the new behavior.
+ *
+ * From a SW perspective, we want to maintain the backward-compatible
+ * behavior for userspace, so we'll apply a fake workaround to set it
+ * back to the legacy behavior on platforms where the hardware default
+ * is to break compatibility. At the moment there is no Linux
+ * userspace that utilizes third-level batchbuffers, so this will avoid
+ * userspace from needing to make any changes. using the legacy
+ * meaning is the correct thing to do. If/when we have userspace
+ * consumers that want to utilize third-level batch nesting, we can
+ * provide a context parameter to allow them to opt-in.
+ */
+ wa_masked_dis(wal, RING_MI_MODE(engine->mmio_base), TGL_NESTED_BB_EN);
+}
+
+static void gen12_ctx_gt_mocs_init(struct intel_engine_cs *engine,
+ struct i915_wa_list *wal)
+{
+ u8 mocs;
+
+ /*
+ * Some blitter commands do not have a field for MOCS, those
+ * commands will use MOCS index pointed by BLIT_CCTL.
+ * BLIT_CCTL registers are needed to be programmed to un-cached.
+ */
+ if (engine->class == COPY_ENGINE_CLASS) {
+ mocs = engine->gt->mocs.uc_index;
+ wa_write_clr_set(wal,
+ BLIT_CCTL(engine->mmio_base),
+ BLIT_CCTL_MASK,
+ BLIT_CCTL_MOCS(mocs, mocs));
+ }
+}
+
+/*
+ * gen12_ctx_gt_fake_wa_init() aren't programmingan official workaround
+ * defined by the hardware team, but it programming general context registers.
+ * Adding those context register programming in context workaround
+ * allow us to use the wa framework for proper application and validation.
+ */
+static void
+gen12_ctx_gt_fake_wa_init(struct intel_engine_cs *engine,
+ struct i915_wa_list *wal)
+{
+ if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
+ fakewa_disable_nestedbb_mode(engine, wal);
+
+ gen12_ctx_gt_mocs_init(engine, wal);
+}
+
static void
__intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
struct i915_wa_list *wal,
@@ -651,11 +717,19 @@ __intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
{
struct drm_i915_private *i915 = engine->i915;
- if (engine->class != RENDER_CLASS)
- return;
-
wa_init_start(wal, name, engine->name);
+ /* Applies to all engines */
+ /*
+ * Fake workarounds are not the actual workaround but
+ * programming of context registers using workaround framework.
+ */
+ if (GRAPHICS_VER(i915) >= 12)
+ gen12_ctx_gt_fake_wa_init(engine, wal);
+
+ if (engine->class != RENDER_CLASS)
+ goto done;
+
if (IS_DG1(i915))
dg1_ctx_workarounds_init(engine, wal);
else if (GRAPHICS_VER(i915) == 12)
@@ -685,6 +759,7 @@ __intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
else
MISSING_CASE(GRAPHICS_VER(i915));
+done:
wa_init_finish(wal);
}
@@ -729,7 +804,7 @@ int intel_engine_emit_ctx_wa(struct i915_request *rq)
}
static void
-gen4_gt_workarounds_init(struct drm_i915_private *i915,
+gen4_gt_workarounds_init(struct intel_gt *gt,
struct i915_wa_list *wal)
{
/* WaDisable_RenderCache_OperationalFlush:gen4,ilk */
@@ -737,29 +812,29 @@ gen4_gt_workarounds_init(struct drm_i915_private *i915,
}
static void
-g4x_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
+g4x_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
- gen4_gt_workarounds_init(i915, wal);
+ gen4_gt_workarounds_init(gt, wal);
/* WaDisableRenderCachePipelinedFlush:g4x,ilk */
wa_masked_en(wal, CACHE_MODE_0, CM0_PIPELINED_RENDER_FLUSH_DISABLE);
}
static void
-ilk_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
+ilk_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
- g4x_gt_workarounds_init(i915, wal);
+ g4x_gt_workarounds_init(gt, wal);
wa_masked_en(wal, _3D_CHICKEN2, _3D_CHICKEN2_WM_READ_PIPELINED);
}
static void
-snb_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
+snb_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
}
static void
-ivb_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
+ivb_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
/* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
wa_masked_dis(wal,
@@ -775,7 +850,7 @@ ivb_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
}
static void
-vlv_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
+vlv_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
/* WaForceL3Serialization:vlv */
wa_write_clr(wal, GEN7_L3SQCREG4, L3SQ_URB_READ_CAM_MATCH_DISABLE);
@@ -788,7 +863,7 @@ vlv_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
}
static void
-hsw_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
+hsw_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
/* L3 caching of data atomics doesn't work -- disable it. */
wa_write(wal, HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
@@ -803,8 +878,10 @@ hsw_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
}
static void
-gen9_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
+gen9_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
+ struct drm_i915_private *i915 = gt->i915;
+
/* WaDisableKillLogic:bxt,skl,kbl */
if (!IS_COFFEELAKE(i915) && !IS_COMETLAKE(i915))
wa_write_or(wal,
@@ -829,9 +906,9 @@ gen9_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal
}
static void
-skl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
+skl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
- gen9_gt_workarounds_init(i915, wal);
+ gen9_gt_workarounds_init(gt, wal);
/* WaDisableGafsUnitClkGating:skl */
wa_write_or(wal,
@@ -839,19 +916,19 @@ skl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
/* WaInPlaceDecompressionHang:skl */
- if (IS_SKL_GT_STEP(i915, STEP_A0, STEP_H0))
+ if (IS_SKL_GT_STEP(gt->i915, STEP_A0, STEP_H0))
wa_write_or(wal,
GEN9_GAMT_ECO_REG_RW_IA,
GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
}
static void
-kbl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
+kbl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
- gen9_gt_workarounds_init(i915, wal);
+ gen9_gt_workarounds_init(gt, wal);
/* WaDisableDynamicCreditSharing:kbl */
- if (IS_KBL_GT_STEP(i915, 0, STEP_C0))
+ if (IS_KBL_GT_STEP(gt->i915, 0, STEP_C0))
wa_write_or(wal,
GAMT_CHKN_BIT_REG,
GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
@@ -868,15 +945,15 @@ kbl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
}
static void
-glk_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
+glk_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
- gen9_gt_workarounds_init(i915, wal);
+ gen9_gt_workarounds_init(gt, wal);
}
static void
-cfl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
+cfl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
- gen9_gt_workarounds_init(i915, wal);
+ gen9_gt_workarounds_init(gt, wal);
/* WaDisableGafsUnitClkGating:cfl */
wa_write_or(wal,
@@ -901,21 +978,21 @@ static void __set_mcr_steering(struct i915_wa_list *wal,
wa_write_clr_set(wal, steering_reg, mcr_mask, mcr);
}
-static void __add_mcr_wa(struct drm_i915_private *i915, struct i915_wa_list *wal,
+static void __add_mcr_wa(struct intel_gt *gt, struct i915_wa_list *wal,
unsigned int slice, unsigned int subslice)
{
- drm_dbg(&i915->drm, "MCR slice=0x%x, subslice=0x%x\n", slice, subslice);
+ drm_dbg(&gt->i915->drm, "MCR slice=0x%x, subslice=0x%x\n", slice, subslice);
__set_mcr_steering(wal, GEN8_MCR_SELECTOR, slice, subslice);
}
static void
-icl_wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
+icl_wa_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal)
{
- const struct sseu_dev_info *sseu = &i915->gt.info.sseu;
+ const struct sseu_dev_info *sseu = &gt->info.sseu;
unsigned int slice, subslice;
- GEM_BUG_ON(GRAPHICS_VER(i915) < 11);
+ GEM_BUG_ON(GRAPHICS_VER(gt->i915) < 11);
GEM_BUG_ON(hweight8(sseu->slice_mask) > 1);
slice = 0;
@@ -935,16 +1012,15 @@ icl_wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
* then we can just rely on the default steering and won't need to
* worry about explicitly re-steering L3BANK reads later.
*/
- if (i915->gt.info.l3bank_mask & BIT(subslice))
- i915->gt.steering_table[L3BANK] = NULL;
+ if (gt->info.l3bank_mask & BIT(subslice))
+ gt->steering_table[L3BANK] = NULL;
- __add_mcr_wa(i915, wal, slice, subslice);
+ __add_mcr_wa(gt, wal, slice, subslice);
}
static void
xehp_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal)
{
- struct drm_i915_private *i915 = gt->i915;
const struct sseu_dev_info *sseu = &gt->info.sseu;
unsigned long slice, subslice = 0, slice_mask = 0;
u64 dss_mask = 0;
@@ -1008,7 +1084,7 @@ xehp_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal)
WARN_ON(subslice > GEN_DSS_PER_GSLICE);
WARN_ON(dss_mask >> (slice * GEN_DSS_PER_GSLICE) == 0);
- __add_mcr_wa(i915, wal, slice, subslice);
+ __add_mcr_wa(gt, wal, slice, subslice);
/*
* SQIDI ranges are special because they use different steering
@@ -1024,9 +1100,11 @@ xehp_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal)
}
static void
-icl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
+icl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
- icl_wa_init_mcr(i915, wal);
+ struct drm_i915_private *i915 = gt->i915;
+
+ icl_wa_init_mcr(gt, wal);
/* WaModifyGamTlbPartitioning:icl */
wa_write_clr_set(wal,
@@ -1077,10 +1155,9 @@ icl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
* the engine-specific workaround list.
*/
static void
-wa_14011060649(struct drm_i915_private *i915, struct i915_wa_list *wal)
+wa_14011060649(struct intel_gt *gt, struct i915_wa_list *wal)
{
struct intel_engine_cs *engine;
- struct intel_gt *gt = &i915->gt;
int id;
for_each_engine(engine, gt, id) {
@@ -1094,22 +1171,23 @@ wa_14011060649(struct drm_i915_private *i915, struct i915_wa_list *wal)
}
static void
-gen12_gt_workarounds_init(struct drm_i915_private *i915,
- struct i915_wa_list *wal)
+gen12_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
- icl_wa_init_mcr(i915, wal);
+ icl_wa_init_mcr(gt, wal);
/* Wa_14011060649:tgl,rkl,dg1,adl-s,adl-p */
- wa_14011060649(i915, wal);
+ wa_14011060649(gt, wal);
/* Wa_14011059788:tgl,rkl,adl-s,dg1,adl-p */
wa_write_or(wal, GEN10_DFR_RATIO_EN_AND_CHICKEN, DFR_DISABLE);
}
static void
-tgl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
+tgl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
- gen12_gt_workarounds_init(i915, wal);
+ struct drm_i915_private *i915 = gt->i915;
+
+ gen12_gt_workarounds_init(gt, wal);
/* Wa_1409420604:tgl */
if (IS_TGL_UY_GT_STEP(i915, STEP_A0, STEP_B0))
@@ -1130,9 +1208,11 @@ tgl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
}
static void
-dg1_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
+dg1_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
- gen12_gt_workarounds_init(i915, wal);
+ struct drm_i915_private *i915 = gt->i915;
+
+ gen12_gt_workarounds_init(gt, wal);
/* Wa_1607087056:dg1 */
if (IS_DG1_GT_STEP(i915, STEP_A0, STEP_B0))
@@ -1154,60 +1234,62 @@ dg1_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
}
static void
-xehpsdv_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
+xehpsdv_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
{
- xehp_init_mcr(&i915->gt, wal);
+ xehp_init_mcr(gt, wal);
}
static void
-gt_init_workarounds(struct drm_i915_private *i915, struct i915_wa_list *wal)
+gt_init_workarounds(struct intel_gt *gt, struct i915_wa_list *wal)
{
+ struct drm_i915_private *i915 = gt->i915;
+
if (IS_XEHPSDV(i915))
- xehpsdv_gt_workarounds_init(i915, wal);
+ xehpsdv_gt_workarounds_init(gt, wal);
else if (IS_DG1(i915))
- dg1_gt_workarounds_init(i915, wal);
+ dg1_gt_workarounds_init(gt, wal);
else if (IS_TIGERLAKE(i915))
- tgl_gt_workarounds_init(i915, wal);
+ tgl_gt_workarounds_init(gt, wal);
else if (GRAPHICS_VER(i915) == 12)
- gen12_gt_workarounds_init(i915, wal);
+ gen12_gt_workarounds_init(gt, wal);
else if (GRAPHICS_VER(i915) == 11)
- icl_gt_workarounds_init(i915, wal);
+ icl_gt_workarounds_init(gt, wal);
else if (IS_COFFEELAKE(i915) || IS_COMETLAKE(i915))
- cfl_gt_workarounds_init(i915, wal);
+ cfl_gt_workarounds_init(gt, wal);
else if (IS_GEMINILAKE(i915))
- glk_gt_workarounds_init(i915, wal);
+ glk_gt_workarounds_init(gt, wal);
else if (IS_KABYLAKE(i915))
- kbl_gt_workarounds_init(i915, wal);
+ kbl_gt_workarounds_init(gt, wal);
else if (IS_BROXTON(i915))
- gen9_gt_workarounds_init(i915, wal);
+ gen9_gt_workarounds_init(gt, wal);
else if (IS_SKYLAKE(i915))
- skl_gt_workarounds_init(i915, wal);
+ skl_gt_workarounds_init(gt, wal);
else if (IS_HASWELL(i915))
- hsw_gt_workarounds_init(i915, wal);
+ hsw_gt_workarounds_init(gt, wal);
else if (IS_VALLEYVIEW(i915))
- vlv_gt_workarounds_init(i915, wal);
+ vlv_gt_workarounds_init(gt, wal);
else if (IS_IVYBRIDGE(i915))
- ivb_gt_workarounds_init(i915, wal);
+ ivb_gt_workarounds_init(gt, wal);
else if (GRAPHICS_VER(i915) == 6)
- snb_gt_workarounds_init(i915, wal);
+ snb_gt_workarounds_init(gt, wal);
else if (GRAPHICS_VER(i915) == 5)
- ilk_gt_workarounds_init(i915, wal);
+ ilk_gt_workarounds_init(gt, wal);
else if (IS_G4X(i915))
- g4x_gt_workarounds_init(i915, wal);
+ g4x_gt_workarounds_init(gt, wal);
else if (GRAPHICS_VER(i915) == 4)
- gen4_gt_workarounds_init(i915, wal);
+ gen4_gt_workarounds_init(gt, wal);
else if (GRAPHICS_VER(i915) <= 8)
;
else
MISSING_CASE(GRAPHICS_VER(i915));
}
-void intel_gt_init_workarounds(struct drm_i915_private *i915)
+void intel_gt_init_workarounds(struct intel_gt *gt)
{
- struct i915_wa_list *wal = &i915->gt_wa_list;
+ struct i915_wa_list *wal = &gt->wa_list;
wa_init_start(wal, "GT", "global");
- gt_init_workarounds(i915, wal);
+ gt_init_workarounds(gt, wal);
wa_init_finish(wal);
}
@@ -1278,7 +1360,7 @@ wa_list_apply(struct intel_gt *gt, const struct i915_wa_list *wal)
void intel_gt_apply_workarounds(struct intel_gt *gt)
{
- wa_list_apply(gt, &gt->i915->gt_wa_list);
+ wa_list_apply(gt, &gt->wa_list);
}
static bool wa_list_verify(struct intel_gt *gt,
@@ -1310,7 +1392,7 @@ static bool wa_list_verify(struct intel_gt *gt,
bool intel_gt_verify_workarounds(struct intel_gt *gt, const char *from)
{
- return wa_list_verify(gt, &gt->i915->gt_wa_list, from);
+ return wa_list_verify(gt, &gt->wa_list, from);
}
__maybe_unused
@@ -1604,6 +1686,31 @@ void intel_engine_apply_whitelist(struct intel_engine_cs *engine)
i915_mmio_reg_offset(RING_NOPID(base)));
}
+/*
+ * engine_fake_wa_init(), a place holder to program the registers
+ * which are not part of an official workaround defined by the
+ * hardware team.
+ * Adding programming of those register inside workaround will
+ * allow utilizing wa framework to proper application and verification.
+ */
+static void
+engine_fake_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
+{
+ u8 mocs;
+
+ /*
+ * RING_CMD_CCTL are need to be programed to un-cached
+ * for memory writes and reads outputted by Command
+ * Streamers on Gen12 onward platforms.
+ */
+ if (GRAPHICS_VER(engine->i915) >= 12) {
+ mocs = engine->gt->mocs.uc_index;
+ wa_masked_field_set(wal,
+ RING_CMD_CCTL(engine->mmio_base),
+ CMD_CCTL_MOCS_MASK,
+ CMD_CCTL_MOCS_OVERRIDE(mocs, mocs));
+ }
+}
static void
rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
{
@@ -2044,6 +2151,8 @@ engine_init_workarounds(struct intel_engine_cs *engine, struct i915_wa_list *wal
if (I915_SELFTEST_ONLY(GRAPHICS_VER(engine->i915) < 4))
return;
+ engine_fake_wa_init(engine, wal);
+
if (engine->class == RENDER_CLASS)
rcs_engine_wa_init(engine, wal);
else
@@ -2067,12 +2176,7 @@ void intel_engine_apply_workarounds(struct intel_engine_cs *engine)
wa_list_apply(engine->gt, &engine->wa_list);
}
-struct mcr_range {
- u32 start;
- u32 end;
-};
-
-static const struct mcr_range mcr_ranges_gen8[] = {
+static const struct i915_range mcr_ranges_gen8[] = {
{ .start = 0x5500, .end = 0x55ff },
{ .start = 0x7000, .end = 0x7fff },
{ .start = 0x9400, .end = 0x97ff },
@@ -2081,7 +2185,7 @@ static const struct mcr_range mcr_ranges_gen8[] = {
{},
};
-static const struct mcr_range mcr_ranges_gen12[] = {
+static const struct i915_range mcr_ranges_gen12[] = {
{ .start = 0x8150, .end = 0x815f },
{ .start = 0x9520, .end = 0x955f },
{ .start = 0xb100, .end = 0xb3ff },
@@ -2090,7 +2194,7 @@ static const struct mcr_range mcr_ranges_gen12[] = {
{},
};
-static const struct mcr_range mcr_ranges_xehp[] = {
+static const struct i915_range mcr_ranges_xehp[] = {
{ .start = 0x4000, .end = 0x4aff },
{ .start = 0x5200, .end = 0x52ff },
{ .start = 0x5400, .end = 0x7fff },
@@ -2109,7 +2213,7 @@ static const struct mcr_range mcr_ranges_xehp[] = {
static bool mcr_range(struct drm_i915_private *i915, u32 offset)
{
- const struct mcr_range *mcr_ranges;
+ const struct i915_range *mcr_ranges;
int i;
if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.h b/drivers/gpu/drm/i915/gt/intel_workarounds.h
index 15abb68b6c00..9beaab77c7f0 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.h
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.h
@@ -24,7 +24,7 @@ static inline void intel_wa_list_free(struct i915_wa_list *wal)
void intel_engine_init_ctx_wa(struct intel_engine_cs *engine);
int intel_engine_emit_ctx_wa(struct i915_request *rq);
-void intel_gt_init_workarounds(struct drm_i915_private *i915);
+void intel_gt_init_workarounds(struct intel_gt *gt);
void intel_gt_apply_workarounds(struct intel_gt *gt);
bool intel_gt_verify_workarounds(struct intel_gt *gt, const char *from);
diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c
index 2c1af030310c..8b89215afe46 100644
--- a/drivers/gpu/drm/i915/gt/mock_engine.c
+++ b/drivers/gpu/drm/i915/gt/mock_engine.c
@@ -376,6 +376,8 @@ int mock_engine_init(struct intel_engine_cs *engine)
{
struct intel_context *ce;
+ INIT_LIST_HEAD(&engine->pinned_contexts_list);
+
engine->sched_engine = i915_sched_engine_create(ENGINE_MOCK);
if (!engine->sched_engine)
return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
index 317eebf086c3..6e6e4d747cca 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
@@ -290,7 +290,7 @@ static int live_heartbeat_fast(void *arg)
int err = 0;
/* Check that the heartbeat ticks at the desired rate. */
- if (!IS_ACTIVE(CONFIG_DRM_I915_HEARTBEAT_INTERVAL))
+ if (!CONFIG_DRM_I915_HEARTBEAT_INTERVAL)
return 0;
for_each_engine(engine, gt, id) {
@@ -352,7 +352,7 @@ static int live_heartbeat_off(void *arg)
int err = 0;
/* Check that we can turn off heartbeat and not interrupt VIP */
- if (!IS_ACTIVE(CONFIG_DRM_I915_HEARTBEAT_INTERVAL))
+ if (!CONFIG_DRM_I915_HEARTBEAT_INTERVAL)
return 0;
for_each_engine(engine, gt, id) {
diff --git a/drivers/gpu/drm/i915/gt/selftest_execlists.c b/drivers/gpu/drm/i915/gt/selftest_execlists.c
index f12ffe797639..b367ecfa42de 100644
--- a/drivers/gpu/drm/i915/gt/selftest_execlists.c
+++ b/drivers/gpu/drm/i915/gt/selftest_execlists.c
@@ -992,7 +992,7 @@ static int live_timeslice_preempt(void *arg)
* need to preempt the current task and replace it with another
* ready task.
*/
- if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
+ if (!CONFIG_DRM_I915_TIMESLICE_DURATION)
return 0;
obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
@@ -1122,7 +1122,7 @@ static int live_timeslice_rewind(void *arg)
* but only a few of those requests, forcing us to rewind the
* RING_TAIL of the original request.
*/
- if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
+ if (!CONFIG_DRM_I915_TIMESLICE_DURATION)
return 0;
for_each_engine(engine, gt, id) {
@@ -1299,7 +1299,7 @@ static int live_timeslice_queue(void *arg)
* ELSP[1] is already occupied, so must rely on timeslicing to
* eject ELSP[0] in favour of the queue.)
*/
- if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
+ if (!CONFIG_DRM_I915_TIMESLICE_DURATION)
return 0;
obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
@@ -1420,7 +1420,7 @@ static int live_timeslice_nopreempt(void *arg)
* We should not timeslice into a request that is marked with
* I915_REQUEST_NOPREEMPT.
*/
- if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
+ if (!CONFIG_DRM_I915_TIMESLICE_DURATION)
return 0;
if (igt_spinner_init(&spin, gt))
@@ -2260,7 +2260,7 @@ static int __cancel_hostile(struct live_preempt_cancel *arg)
int err;
/* Preempt cancel non-preemptible spinner in ELSP0 */
- if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT))
+ if (!CONFIG_DRM_I915_PREEMPT_TIMEOUT)
return 0;
if (!intel_has_reset_engine(arg->engine->gt))
@@ -2316,7 +2316,7 @@ static int __cancel_fail(struct live_preempt_cancel *arg)
struct i915_request *rq;
int err;
- if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT))
+ if (!CONFIG_DRM_I915_PREEMPT_TIMEOUT)
return 0;
if (!intel_has_reset_engine(engine->gt))
@@ -3375,7 +3375,7 @@ static int live_preempt_timeout(void *arg)
* Check that we force preemption to occur by cancelling the previous
* context if it refuses to yield the GPU.
*/
- if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT))
+ if (!CONFIG_DRM_I915_PREEMPT_TIMEOUT)
return 0;
if (!intel_has_reset_engine(gt))
@@ -3493,7 +3493,7 @@ static int smoke_submit(struct preempt_smoke *smoke,
if (batch) {
struct i915_address_space *vm;
- vm = i915_gem_context_get_vm_rcu(ctx);
+ vm = i915_gem_context_get_eb_vm(ctx);
vma = i915_vma_instance(batch, vm, NULL);
i915_vm_put(vm);
if (IS_ERR(vma))
@@ -3733,7 +3733,7 @@ static int nop_virtual_engine(struct intel_gt *gt,
GEM_BUG_ON(!nctx || nctx > ARRAY_SIZE(ve));
for (n = 0; n < nctx; n++) {
- ve[n] = intel_engine_create_virtual(siblings, nsibling);
+ ve[n] = intel_engine_create_virtual(siblings, nsibling, 0);
if (IS_ERR(ve[n])) {
err = PTR_ERR(ve[n]);
nctx = n;
@@ -3929,7 +3929,7 @@ static int mask_virtual_engine(struct intel_gt *gt,
* restrict it to our desired engine within the virtual engine.
*/
- ve = intel_engine_create_virtual(siblings, nsibling);
+ ve = intel_engine_create_virtual(siblings, nsibling, 0);
if (IS_ERR(ve)) {
err = PTR_ERR(ve);
goto out_close;
@@ -4060,7 +4060,7 @@ static int slicein_virtual_engine(struct intel_gt *gt,
i915_request_add(rq);
}
- ce = intel_engine_create_virtual(siblings, nsibling);
+ ce = intel_engine_create_virtual(siblings, nsibling, 0);
if (IS_ERR(ce)) {
err = PTR_ERR(ce);
goto out;
@@ -4112,7 +4112,7 @@ static int sliceout_virtual_engine(struct intel_gt *gt,
/* XXX We do not handle oversubscription and fairness with normal rq */
for (n = 0; n < nsibling; n++) {
- ce = intel_engine_create_virtual(siblings, nsibling);
+ ce = intel_engine_create_virtual(siblings, nsibling, 0);
if (IS_ERR(ce)) {
err = PTR_ERR(ce);
goto out;
@@ -4214,7 +4214,7 @@ static int preserved_virtual_engine(struct intel_gt *gt,
if (err)
goto out_scratch;
- ve = intel_engine_create_virtual(siblings, nsibling);
+ ve = intel_engine_create_virtual(siblings, nsibling, 0);
if (IS_ERR(ve)) {
err = PTR_ERR(ve);
goto out_scratch;
@@ -4354,7 +4354,7 @@ static int reset_virtual_engine(struct intel_gt *gt,
if (igt_spinner_init(&spin, gt))
return -ENOMEM;
- ve = intel_engine_create_virtual(siblings, nsibling);
+ ve = intel_engine_create_virtual(siblings, nsibling, 0);
if (IS_ERR(ve)) {
err = PTR_ERR(ve);
goto out_spin;
diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index 2c1ed32ca5ac..7e2d99dd012d 100644
--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -117,7 +117,7 @@ static struct i915_request *
hang_create_request(struct hang *h, struct intel_engine_cs *engine)
{
struct intel_gt *gt = h->gt;
- struct i915_address_space *vm = i915_gem_context_get_vm_rcu(h->ctx);
+ struct i915_address_space *vm = i915_gem_context_get_eb_vm(h->ctx);
struct drm_i915_gem_object *obj;
struct i915_request *rq = NULL;
struct i915_vma *hws, *vma;
@@ -789,7 +789,7 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active)
if (err)
pr_err("[%s] Wait for request %lld:%lld [0x%04X] failed: %d!\n",
engine->name, rq->fence.context,
- rq->fence.seqno, rq->context->guc_id, err);
+ rq->fence.seqno, rq->context->guc_id.id, err);
}
skip:
@@ -1098,7 +1098,7 @@ static int __igt_reset_engines(struct intel_gt *gt,
if (err)
pr_err("[%s] Wait for request %lld:%lld [0x%04X] failed: %d!\n",
engine->name, rq->fence.context,
- rq->fence.seqno, rq->context->guc_id, err);
+ rq->fence.seqno, rq->context->guc_id.id, err);
}
count++;
@@ -1108,7 +1108,7 @@ static int __igt_reset_engines(struct intel_gt *gt,
pr_err("i915_reset_engine(%s:%s): failed to reset request %lld:%lld [0x%04X]\n",
engine->name, test_name,
rq->fence.context,
- rq->fence.seqno, rq->context->guc_id);
+ rq->fence.seqno, rq->context->guc_id.id);
i915_request_put(rq);
GEM_TRACE_DUMP();
@@ -1596,7 +1596,7 @@ static int igt_reset_evict_ppgtt(void *arg)
if (INTEL_PPGTT(gt->i915) < INTEL_PPGTT_FULL)
return 0;
- ppgtt = i915_ppgtt_create(gt);
+ ppgtt = i915_ppgtt_create(gt, 0);
if (IS_ERR(ppgtt))
return PTR_ERR(ppgtt);
diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
index e623ac45f4aa..962e91ba3be4 100644
--- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
@@ -66,7 +66,7 @@ reference_lists_init(struct intel_gt *gt, struct wa_lists *lists)
memset(lists, 0, sizeof(*lists));
wa_init_start(&lists->gt_wa_list, "GT_REF", "global");
- gt_init_workarounds(gt->i915, &lists->gt_wa_list);
+ gt_init_workarounds(gt, &lists->gt_wa_list);
wa_init_finish(&lists->gt_wa_list);
for_each_engine(engine, gt, id) {
diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
index 8ff582222aff..ba10bd374cee 100644
--- a/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
+++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_actions_abi.h
@@ -142,6 +142,7 @@ enum intel_guc_action {
INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER = 0x4505,
INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER = 0x4506,
INTEL_GUC_ACTION_DEREGISTER_CONTEXT_DONE = 0x4600,
+ INTEL_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC = 0x4601,
INTEL_GUC_ACTION_RESET_CLIENT = 0x5507,
INTEL_GUC_ACTION_LIMIT
};
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
index fbfcae727d7f..6e228343e8cb 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -3,6 +3,7 @@
* Copyright © 2014-2019 Intel Corporation
*/
+#include "gem/i915_gem_lmem.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_irq.h"
#include "gt/intel_gt_pm_irq.h"
@@ -647,7 +648,14 @@ struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
u64 flags;
int ret;
- obj = i915_gem_object_create_shmem(gt->i915, size);
+ if (HAS_LMEM(gt->i915))
+ obj = i915_gem_object_create_lmem(gt->i915, size,
+ I915_BO_ALLOC_CPU_CLEAR |
+ I915_BO_ALLOC_CONTIGUOUS |
+ I915_BO_ALLOC_PM_EARLY);
+ else
+ obj = i915_gem_object_create_shmem(gt->i915, size);
+
if (IS_ERR(obj))
return ERR_CAST(obj);
@@ -748,3 +756,32 @@ void intel_guc_load_status(struct intel_guc *guc, struct drm_printer *p)
}
}
}
+
+void intel_guc_write_barrier(struct intel_guc *guc)
+{
+ struct intel_gt *gt = guc_to_gt(guc);
+
+ if (i915_gem_object_is_lmem(guc->ct.vma->obj)) {
+ /*
+ * Ensure intel_uncore_write_fw can be used rather than
+ * intel_uncore_write.
+ */
+ GEM_BUG_ON(guc->send_regs.fw_domains);
+
+ /*
+ * This register is used by the i915 and GuC for MMIO based
+ * communication. Once we are in this code CTBs are the only
+ * method the i915 uses to communicate with the GuC so it is
+ * safe to write to this register (a value of 0 is NOP for MMIO
+ * communication). If we ever start mixing CTBs and MMIOs a new
+ * register will have to be chosen. This function is also used
+ * to enforce ordering of a work queue item write and an update
+ * to the process descriptor. When a work queue is being used,
+ * CTBs are also the only mechanism of communication.
+ */
+ intel_uncore_write_fw(gt->uncore, GEN11_SOFT_SCRATCH(0), 0);
+ } else {
+ /* wmb() sufficient for a barrier if in smem */
+ wmb();
+ }
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
index 2e27fe59786b..31cf9fb48c7e 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
@@ -22,74 +22,155 @@
struct __guc_ads_blob;
-/*
- * Top level structure of GuC. It handles firmware loading and manages client
- * pool. intel_guc owns a intel_guc_client to replace the legacy ExecList
- * submission.
+/**
+ * struct intel_guc - Top level structure of GuC.
+ *
+ * It handles firmware loading and manages client pool. intel_guc owns an
+ * i915_sched_engine for submission.
*/
struct intel_guc {
+ /** @fw: the GuC firmware */
struct intel_uc_fw fw;
+ /** @log: sub-structure containing GuC log related data and objects */
struct intel_guc_log log;
+ /** @ct: the command transport communication channel */
struct intel_guc_ct ct;
+ /** @slpc: sub-structure containing SLPC related data and objects */
struct intel_guc_slpc slpc;
- /* Global engine used to submit requests to GuC */
+ /** @sched_engine: Global engine used to submit requests to GuC */
struct i915_sched_engine *sched_engine;
+ /**
+ * @stalled_request: if GuC can't process a request for any reason, we
+ * save it until GuC restarts processing. No other request can be
+ * submitted until the stalled request is processed.
+ */
struct i915_request *stalled_request;
+ /**
+ * @submission_stall_reason: reason why submission is stalled
+ */
+ enum {
+ STALL_NONE,
+ STALL_REGISTER_CONTEXT,
+ STALL_MOVE_LRC_TAIL,
+ STALL_ADD_REQUEST,
+ } submission_stall_reason;
/* intel_guc_recv interrupt related state */
+ /** @irq_lock: protects GuC irq state */
spinlock_t irq_lock;
+ /**
+ * @msg_enabled_mask: mask of events that are processed when receiving
+ * an INTEL_GUC_ACTION_DEFAULT G2H message.
+ */
unsigned int msg_enabled_mask;
+ /**
+ * @outstanding_submission_g2h: number of outstanding GuC to Host
+ * responses related to GuC submission, used to determine if the GT is
+ * idle
+ */
atomic_t outstanding_submission_g2h;
+ /** @interrupts: pointers to GuC interrupt-managing functions. */
struct {
void (*reset)(struct intel_guc *guc);
void (*enable)(struct intel_guc *guc);
void (*disable)(struct intel_guc *guc);
} interrupts;
- /*
- * contexts_lock protects the pool of free guc ids and a linked list of
- * guc ids available to be stolen
+ /**
+ * @submission_state: sub-structure for submission state protected by
+ * single lock
+ */
+ struct {
+ /**
+ * @lock: protects everything in submission_state,
+ * ce->guc_id.id, and ce->guc_id.ref when transitioning in and
+ * out of zero
+ */
+ spinlock_t lock;
+ /**
+ * @guc_ids: used to allocate new guc_ids, single-lrc
+ */
+ struct ida guc_ids;
+ /**
+ * @guc_ids_bitmap: used to allocate new guc_ids, multi-lrc
+ */
+ unsigned long *guc_ids_bitmap;
+ /**
+ * @guc_id_list: list of intel_context with valid guc_ids but no
+ * refs
+ */
+ struct list_head guc_id_list;
+ /**
+ * @destroyed_contexts: list of contexts waiting to be destroyed
+ * (deregistered with the GuC)
+ */
+ struct list_head destroyed_contexts;
+ /**
+ * @destroyed_worker: worker to deregister contexts, need as we
+ * need to take a GT PM reference and can't from destroy
+ * function as it might be in an atomic context (no sleeping)
+ */
+ struct work_struct destroyed_worker;
+ } submission_state;
+
+ /**
+ * @submission_supported: tracks whether we support GuC submission on
+ * the current platform
*/
- spinlock_t contexts_lock;
- struct ida guc_ids;
- struct list_head guc_id_list;
-
bool submission_supported;
+ /** @submission_selected: tracks whether the user enabled GuC submission */
bool submission_selected;
+ /**
+ * @rc_supported: tracks whether we support GuC rc on the current platform
+ */
bool rc_supported;
+ /** @rc_selected: tracks whether the user enabled GuC rc */
bool rc_selected;
+ /** @ads_vma: object allocated to hold the GuC ADS */
struct i915_vma *ads_vma;
+ /** @ads_blob: contents of the GuC ADS */
struct __guc_ads_blob *ads_blob;
+ /** @ads_regset_size: size of the save/restore regsets in the ADS */
u32 ads_regset_size;
+ /** @ads_golden_ctxt_size: size of the golden contexts in the ADS */
u32 ads_golden_ctxt_size;
+ /** @lrc_desc_pool: object allocated to hold the GuC LRC descriptor pool */
struct i915_vma *lrc_desc_pool;
+ /** @lrc_desc_pool_vaddr: contents of the GuC LRC descriptor pool */
void *lrc_desc_pool_vaddr;
- /* guc_id to intel_context lookup */
+ /**
+ * @context_lookup: used to resolve intel_context from guc_id, if a
+ * context is present in this structure it is registered with the GuC
+ */
struct xarray context_lookup;
- /* Control params for fw initialization */
+ /** @params: Control params for fw initialization */
u32 params[GUC_CTL_MAX_DWORDS];
- /* GuC's FW specific registers used in MMIO send */
+ /** @send_regs: GuC's FW specific registers used for sending MMIO H2G */
struct {
u32 base;
unsigned int count;
enum forcewake_domains fw_domains;
} send_regs;
- /* register used to send interrupts to the GuC FW */
+ /** @notify_reg: register used to send interrupts to the GuC FW */
i915_reg_t notify_reg;
- /* Store msg (e.g. log flush) that we see while CTBs are disabled */
+ /**
+ * @mmio_msg: notification bitmask that the GuC writes in one of its
+ * registers when the CT channel is disabled, to be processed when the
+ * channel is back up.
+ */
u32 mmio_msg;
- /* To serialize the intel_guc_send actions */
+ /** @send_mutex: used to serialize the intel_guc_send actions */
struct mutex send_mutex;
};
@@ -295,4 +376,6 @@ void intel_guc_submission_cancel_requests(struct intel_guc *guc);
void intel_guc_load_status(struct intel_guc *guc, struct drm_printer *p);
+void intel_guc_write_barrier(struct intel_guc *guc);
+
#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
index 6926919bcac6..621c893a009f 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c
@@ -176,7 +176,7 @@ static void guc_mapping_table_init(struct intel_gt *gt,
for_each_engine(engine, gt, id) {
u8 guc_class = engine_class_to_guc_class(engine->class);
- system_info->mapping_table[guc_class][engine->instance] =
+ system_info->mapping_table[guc_class][ilog2(engine->logical_mask)] =
engine->instance;
}
}
@@ -349,6 +349,8 @@ static void fill_engine_enable_masks(struct intel_gt *gt,
info->engine_enabled_masks[GUC_VIDEOENHANCE_CLASS] = VEBOX_MASK(gt);
}
+#define LR_HW_CONTEXT_SIZE (80 * sizeof(u32))
+#define LRC_SKIP_SIZE (LRC_PPHWSP_SZ * PAGE_SIZE + LR_HW_CONTEXT_SIZE)
static int guc_prep_golden_context(struct intel_guc *guc,
struct __guc_ads_blob *blob)
{
@@ -396,7 +398,18 @@ static int guc_prep_golden_context(struct intel_guc *guc,
if (!blob)
continue;
- blob->ads.eng_state_size[guc_class] = real_size;
+ /*
+ * This interface is slightly confusing. We need to pass the
+ * base address of the full golden context and the size of just
+ * the engine state, which is the section of the context image
+ * that starts after the execlists context. This is required to
+ * allow the GuC to restore just the engine state when a
+ * watchdog reset occurs.
+ * We calculate the engine state size by removing the size of
+ * what comes before it in the context image (which is identical
+ * on all engines).
+ */
+ blob->ads.eng_state_size[guc_class] = real_size - LRC_SKIP_SIZE;
blob->ads.golden_context_lrca[guc_class] = addr_ggtt;
addr_ggtt += alloc_size;
}
@@ -436,11 +449,6 @@ static void guc_init_golden_context(struct intel_guc *guc)
u8 engine_class, guc_class;
u8 *ptr;
- /* Skip execlist and PPGTT registers + HWSP */
- const u32 lr_hw_context_size = 80 * sizeof(u32);
- const u32 skip_size = LRC_PPHWSP_SZ * PAGE_SIZE +
- lr_hw_context_size;
-
if (!intel_uc_uses_guc_submission(&gt->uc))
return;
@@ -476,12 +484,12 @@ static void guc_init_golden_context(struct intel_guc *guc)
continue;
}
- GEM_BUG_ON(blob->ads.eng_state_size[guc_class] != real_size);
+ GEM_BUG_ON(blob->ads.eng_state_size[guc_class] !=
+ real_size - LRC_SKIP_SIZE);
GEM_BUG_ON(blob->ads.golden_context_lrca[guc_class] != addr_ggtt);
addr_ggtt += alloc_size;
- shmem_read(engine->default_state, skip_size, ptr + skip_size,
- real_size - skip_size);
+ shmem_read(engine->default_state, 0, ptr, real_size);
ptr += alloc_size;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
index 22b4733b55e2..a0cc34be7b56 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
@@ -168,12 +168,15 @@ static int guc_action_register_ct_buffer(struct intel_guc *guc, u32 type,
FIELD_PREP(HOST2GUC_REGISTER_CTB_REQUEST_MSG_2_DESC_ADDR, desc_addr),
FIELD_PREP(HOST2GUC_REGISTER_CTB_REQUEST_MSG_3_BUFF_ADDR, buff_addr),
};
+ int ret;
GEM_BUG_ON(type != GUC_CTB_TYPE_HOST2GUC && type != GUC_CTB_TYPE_GUC2HOST);
GEM_BUG_ON(size % SZ_4K);
/* CT registration must go over MMIO */
- return intel_guc_send_mmio(guc, request, ARRAY_SIZE(request), NULL, 0);
+ ret = intel_guc_send_mmio(guc, request, ARRAY_SIZE(request), NULL, 0);
+
+ return ret > 0 ? -EPROTO : ret;
}
static int ct_register_buffer(struct intel_guc_ct *ct, u32 type,
@@ -188,8 +191,8 @@ static int ct_register_buffer(struct intel_guc_ct *ct, u32 type,
err = guc_action_register_ct_buffer(ct_to_guc(ct), type,
desc_addr, buff_addr, size);
if (unlikely(err))
- CT_ERROR(ct, "Failed to register %s buffer (err=%d)\n",
- guc_ct_buffer_type_to_str(type), err);
+ CT_ERROR(ct, "Failed to register %s buffer (%pe)\n",
+ guc_ct_buffer_type_to_str(type), ERR_PTR(err));
return err;
}
@@ -201,11 +204,14 @@ static int guc_action_deregister_ct_buffer(struct intel_guc *guc, u32 type)
FIELD_PREP(GUC_HXG_REQUEST_MSG_0_ACTION, GUC_ACTION_HOST2GUC_DEREGISTER_CTB),
FIELD_PREP(HOST2GUC_DEREGISTER_CTB_REQUEST_MSG_1_TYPE, type),
};
+ int ret;
GEM_BUG_ON(type != GUC_CTB_TYPE_HOST2GUC && type != GUC_CTB_TYPE_GUC2HOST);
/* CT deregistration must go over MMIO */
- return intel_guc_send_mmio(guc, request, ARRAY_SIZE(request), NULL, 0);
+ ret = intel_guc_send_mmio(guc, request, ARRAY_SIZE(request), NULL, 0);
+
+ return ret > 0 ? -EPROTO : ret;
}
static int ct_deregister_buffer(struct intel_guc_ct *ct, u32 type)
@@ -213,8 +219,8 @@ static int ct_deregister_buffer(struct intel_guc_ct *ct, u32 type)
int err = guc_action_deregister_ct_buffer(ct_to_guc(ct), type);
if (unlikely(err))
- CT_ERROR(ct, "Failed to deregister %s buffer (err=%d)\n",
- guc_ct_buffer_type_to_str(type), err);
+ CT_ERROR(ct, "Failed to deregister %s buffer (%pe)\n",
+ guc_ct_buffer_type_to_str(type), ERR_PTR(err));
return err;
}
@@ -377,28 +383,6 @@ static u32 ct_get_next_fence(struct intel_guc_ct *ct)
return ++ct->requests.last_fence;
}
-static void write_barrier(struct intel_guc_ct *ct)
-{
- struct intel_guc *guc = ct_to_guc(ct);
- struct intel_gt *gt = guc_to_gt(guc);
-
- if (i915_gem_object_is_lmem(guc->ct.vma->obj)) {
- GEM_BUG_ON(guc->send_regs.fw_domains);
- /*
- * This register is used by the i915 and GuC for MMIO based
- * communication. Once we are in this code CTBs are the only
- * method the i915 uses to communicate with the GuC so it is
- * safe to write to this register (a value of 0 is NOP for MMIO
- * communication). If we ever start mixing CTBs and MMIOs a new
- * register will have to be chosen.
- */
- intel_uncore_write_fw(gt->uncore, GEN11_SOFT_SCRATCH(0), 0);
- } else {
- /* wmb() sufficient for a barrier if in smem */
- wmb();
- }
-}
-
static int ct_write(struct intel_guc_ct *ct,
const u32 *action,
u32 len /* in dwords */,
@@ -468,7 +452,7 @@ static int ct_write(struct intel_guc_ct *ct,
* make sure H2G buffer update and LRC tail update (if this triggering a
* submission) are visible before updating the descriptor tail
*/
- write_barrier(ct);
+ intel_guc_write_barrier(ct_to_guc(ct));
/* update local copies */
ctb->tail = tail;
@@ -522,9 +506,6 @@ static int wait_for_ct_request_update(struct ct_request *req, u32 *status)
err = wait_for(done, GUC_CTB_RESPONSE_TIMEOUT_LONG_MS);
#undef done
- if (unlikely(err))
- DRM_ERROR("CT: fence %u err %d\n", req->fence, err);
-
*status = req->status;
return err;
}
@@ -722,8 +703,11 @@ retry:
err = wait_for_ct_request_update(&request, status);
g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN);
- if (unlikely(err))
+ if (unlikely(err)) {
+ CT_ERROR(ct, "No response for request %#x (fence %u)\n",
+ action[0], request.fence);
goto unlink;
+ }
if (FIELD_GET(GUC_HXG_MSG_0_TYPE, *status) != GUC_HXG_TYPE_RESPONSE_SUCCESS) {
err = -EIO;
@@ -775,8 +759,8 @@ int intel_guc_ct_send(struct intel_guc_ct *ct, const u32 *action, u32 len,
ret = ct_send(ct, action, len, response_buf, response_buf_size, &status);
if (unlikely(ret < 0)) {
- CT_ERROR(ct, "Sending action %#x failed (err=%d status=%#X)\n",
- action[0], ret, status);
+ CT_ERROR(ct, "Sending action %#x failed (%pe) status=%#X\n",
+ action[0], ERR_PTR(ret), status);
} else if (unlikely(ret)) {
CT_DEBUG(ct, "send action %#x returned %d (%#x)\n",
action[0], ret, ret);
@@ -1042,9 +1026,9 @@ static void ct_incoming_request_worker_func(struct work_struct *w)
container_of(w, struct intel_guc_ct, requests.worker);
bool done;
- done = ct_process_incoming_requests(ct);
- if (!done)
- queue_work(system_unbound_wq, &ct->requests.worker);
+ do {
+ done = ct_process_incoming_requests(ct);
+ } while (!done);
}
static int ct_handle_event(struct intel_guc_ct *ct, struct ct_incoming_msg *request)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_debugfs.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_debugfs.c
index 887c8c8f35db..25f09a420561 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_debugfs.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_debugfs.c
@@ -5,14 +5,14 @@
#include <drm/drm_print.h>
-#include "gt/debugfs_gt.h"
+#include "gt/intel_gt_debugfs.h"
+#include "gt/uc/intel_guc_ads.h"
+#include "gt/uc/intel_guc_ct.h"
+#include "gt/uc/intel_guc_slpc.h"
+#include "gt/uc/intel_guc_submission.h"
#include "intel_guc.h"
#include "intel_guc_debugfs.h"
#include "intel_guc_log_debugfs.h"
-#include "gt/uc/intel_guc_ct.h"
-#include "gt/uc/intel_guc_ads.h"
-#include "gt/uc/intel_guc_submission.h"
-#include "gt/uc/intel_guc_slpc.h"
static int guc_info_show(struct seq_file *m, void *data)
{
@@ -35,7 +35,7 @@ static int guc_info_show(struct seq_file *m, void *data)
return 0;
}
-DEFINE_GT_DEBUGFS_ATTRIBUTE(guc_info);
+DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(guc_info);
static int guc_registered_contexts_show(struct seq_file *m, void *data)
{
@@ -49,7 +49,7 @@ static int guc_registered_contexts_show(struct seq_file *m, void *data)
return 0;
}
-DEFINE_GT_DEBUGFS_ATTRIBUTE(guc_registered_contexts);
+DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(guc_registered_contexts);
static int guc_slpc_info_show(struct seq_file *m, void *unused)
{
@@ -62,7 +62,7 @@ static int guc_slpc_info_show(struct seq_file *m, void *unused)
return intel_guc_slpc_print_info(slpc, &p);
}
-DEFINE_GT_DEBUGFS_ATTRIBUTE(guc_slpc_info);
+DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(guc_slpc_info);
static bool intel_eval_slpc_support(void *data)
{
@@ -73,7 +73,7 @@ static bool intel_eval_slpc_support(void *data)
void intel_guc_debugfs_register(struct intel_guc *guc, struct dentry *root)
{
- static const struct debugfs_gt_file files[] = {
+ static const struct intel_gt_debugfs_file files[] = {
{ "guc_info", &guc_info_fops, NULL },
{ "guc_registered_contexts", &guc_registered_contexts_fops, NULL },
{ "guc_slpc_info", &guc_slpc_info_fops, &intel_eval_slpc_support},
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
index 76fe766ad1bc..196424be0998 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fw.c
@@ -41,18 +41,21 @@ static void guc_prepare_xfer(struct intel_uncore *uncore)
}
/* Copy RSA signature from the fw image to HW for verification */
-static void guc_xfer_rsa(struct intel_uc_fw *guc_fw,
- struct intel_uncore *uncore)
+static int guc_xfer_rsa(struct intel_uc_fw *guc_fw,
+ struct intel_uncore *uncore)
{
u32 rsa[UOS_RSA_SCRATCH_COUNT];
size_t copied;
int i;
copied = intel_uc_fw_copy_rsa(guc_fw, rsa, sizeof(rsa));
- GEM_BUG_ON(copied < sizeof(rsa));
+ if (copied < sizeof(rsa))
+ return -ENOMEM;
for (i = 0; i < UOS_RSA_SCRATCH_COUNT; i++)
intel_uncore_write(uncore, UOS_RSA_SCRATCH(i), rsa[i]);
+
+ return 0;
}
/*
@@ -141,7 +144,9 @@ int intel_guc_fw_upload(struct intel_guc *guc)
* by the DMA engine in one operation, whereas the RSA signature is
* loaded via MMIO.
*/
- guc_xfer_rsa(&guc->fw, uncore);
+ ret = guc_xfer_rsa(&guc->fw, uncore);
+ if (ret)
+ goto out;
/*
* Current uCode expects the code to be loaded at 8k; locations below
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
index fa4be13c8854..722933e26347 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h
@@ -52,27 +52,27 @@
#define GUC_DOORBELL_INVALID 256
-#define GUC_WQ_SIZE (PAGE_SIZE * 2)
-
-/* Work queue item header definitions */
+/*
+ * Work queue item header definitions
+ *
+ * Work queue is circular buffer used to submit complex (multi-lrc) submissions
+ * to the GuC. A work queue item is an entry in the circular buffer.
+ */
#define WQ_STATUS_ACTIVE 1
#define WQ_STATUS_SUSPENDED 2
#define WQ_STATUS_CMD_ERROR 3
#define WQ_STATUS_ENGINE_ID_NOT_USED 4
#define WQ_STATUS_SUSPENDED_FROM_RESET 5
-#define WQ_TYPE_SHIFT 0
-#define WQ_TYPE_BATCH_BUF (0x1 << WQ_TYPE_SHIFT)
-#define WQ_TYPE_PSEUDO (0x2 << WQ_TYPE_SHIFT)
-#define WQ_TYPE_INORDER (0x3 << WQ_TYPE_SHIFT)
-#define WQ_TYPE_NOOP (0x4 << WQ_TYPE_SHIFT)
-#define WQ_TARGET_SHIFT 10
-#define WQ_LEN_SHIFT 16
-#define WQ_NO_WCFLUSH_WAIT (1 << 27)
-#define WQ_PRESENT_WORKLOAD (1 << 28)
-
-#define WQ_RING_TAIL_SHIFT 20
-#define WQ_RING_TAIL_MAX 0x7FF /* 2^11 QWords */
-#define WQ_RING_TAIL_MASK (WQ_RING_TAIL_MAX << WQ_RING_TAIL_SHIFT)
+#define WQ_TYPE_BATCH_BUF 0x1
+#define WQ_TYPE_PSEUDO 0x2
+#define WQ_TYPE_INORDER 0x3
+#define WQ_TYPE_NOOP 0x4
+#define WQ_TYPE_MULTI_LRC 0x5
+#define WQ_TYPE_MASK GENMASK(7, 0)
+#define WQ_LEN_MASK GENMASK(26, 16)
+
+#define WQ_GUC_ID_MASK GENMASK(15, 0)
+#define WQ_RING_TAIL_MASK GENMASK(28, 18)
#define GUC_STAGE_DESC_ATTR_ACTIVE BIT(0)
#define GUC_STAGE_DESC_ATTR_PENDING_DB BIT(1)
@@ -186,7 +186,7 @@ struct guc_process_desc {
u32 wq_status;
u32 engine_presence;
u32 priority;
- u32 reserved[30];
+ u32 reserved[36];
} __packed;
#define CONTEXT_REGISTRATION_FLAG_KMD BIT(0)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.c
index 64e0b86bf258..46026c2c1722 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log_debugfs.c
@@ -6,7 +6,7 @@
#include <linux/fs.h>
#include <drm/drm_print.h>
-#include "gt/debugfs_gt.h"
+#include "gt/intel_gt_debugfs.h"
#include "intel_guc.h"
#include "intel_guc_log.h"
#include "intel_guc_log_debugfs.h"
@@ -17,7 +17,7 @@ static int guc_log_dump_show(struct seq_file *m, void *data)
return intel_guc_log_dump(m->private, &p, false);
}
-DEFINE_GT_DEBUGFS_ATTRIBUTE(guc_log_dump);
+DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(guc_log_dump);
static int guc_load_err_log_dump_show(struct seq_file *m, void *data)
{
@@ -25,7 +25,7 @@ static int guc_load_err_log_dump_show(struct seq_file *m, void *data)
return intel_guc_log_dump(m->private, &p, true);
}
-DEFINE_GT_DEBUGFS_ATTRIBUTE(guc_load_err_log_dump);
+DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(guc_load_err_log_dump);
static int guc_log_level_get(void *data, u64 *val)
{
@@ -109,7 +109,7 @@ static const struct file_operations guc_log_relay_fops = {
void intel_guc_log_debugfs_register(struct intel_guc_log *log,
struct dentry *root)
{
- static const struct debugfs_gt_file files[] = {
+ static const struct intel_gt_debugfs_file files[] = {
{ "guc_log_dump", &guc_log_dump_fops, NULL },
{ "guc_load_err_log_dump", &guc_load_err_log_dump_fops, NULL },
{ "guc_log_level", &guc_log_level_fops, NULL },
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 87d8dc8f51b9..d7710debcd47 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -11,6 +11,7 @@
#include "gt/intel_context.h"
#include "gt/intel_engine_pm.h"
#include "gt/intel_engine_heartbeat.h"
+#include "gt/intel_gpu_commands.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_irq.h"
#include "gt/intel_gt_pm.h"
@@ -28,21 +29,6 @@
/**
* DOC: GuC-based command submission
*
- * IMPORTANT NOTE: GuC submission is currently not supported in i915. The GuC
- * firmware is moving to an updated submission interface and we plan to
- * turn submission back on when that lands. The below documentation (and related
- * code) matches the old submission model and will be updated as part of the
- * upgrade to the new flow.
- *
- * GuC stage descriptor:
- * During initialization, the driver allocates a static pool of 1024 such
- * descriptors, and shares them with the GuC. Currently, we only use one
- * descriptor. This stage descriptor lets the GuC know about the workqueue and
- * process descriptor. Theoretically, it also lets the GuC know about our HW
- * contexts (context ID, etc...), but we actually employ a kind of submission
- * where the GuC uses the LRCA sent via the work item instead. This is called
- * a "proxy" submission.
- *
* The Scratch registers:
* There are 16 MMIO-based registers start from 0xC180. The kernel driver writes
* a value to the action register (SOFT_SCRATCH_0) along with any data. It then
@@ -51,14 +37,85 @@
* processes the request. The kernel driver polls waiting for this update and
* then proceeds.
*
- * Work Items:
- * There are several types of work items that the host may place into a
- * workqueue, each with its own requirements and limitations. Currently only
- * WQ_TYPE_INORDER is needed to support legacy submission via GuC, which
- * represents in-order queue. The kernel driver packs ring tail pointer and an
- * ELSP context descriptor dword into Work Item.
- * See guc_add_request()
+ * Command Transport buffers (CTBs):
+ * Covered in detail in other sections but CTBs (Host to GuC - H2G, GuC to Host
+ * - G2H) are a message interface between the i915 and GuC.
+ *
+ * Context registration:
+ * Before a context can be submitted it must be registered with the GuC via a
+ * H2G. A unique guc_id is associated with each context. The context is either
+ * registered at request creation time (normal operation) or at submission time
+ * (abnormal operation, e.g. after a reset).
+ *
+ * Context submission:
+ * The i915 updates the LRC tail value in memory. The i915 must enable the
+ * scheduling of the context within the GuC for the GuC to actually consider it.
+ * Therefore, the first time a disabled context is submitted we use a schedule
+ * enable H2G, while follow up submissions are done via the context submit H2G,
+ * which informs the GuC that a previously enabled context has new work
+ * available.
+ *
+ * Context unpin:
+ * To unpin a context a H2G is used to disable scheduling. When the
+ * corresponding G2H returns indicating the scheduling disable operation has
+ * completed it is safe to unpin the context. While a disable is in flight it
+ * isn't safe to resubmit the context so a fence is used to stall all future
+ * requests of that context until the G2H is returned.
+ *
+ * Context deregistration:
+ * Before a context can be destroyed or if we steal its guc_id we must
+ * deregister the context with the GuC via H2G. If stealing the guc_id it isn't
+ * safe to submit anything to this guc_id until the deregister completes so a
+ * fence is used to stall all requests associated with this guc_id until the
+ * corresponding G2H returns indicating the guc_id has been deregistered.
+ *
+ * submission_state.guc_ids:
+ * Unique number associated with private GuC context data passed in during
+ * context registration / submission / deregistration. 64k available. Simple ida
+ * is used for allocation.
+ *
+ * Stealing guc_ids:
+ * If no guc_ids are available they can be stolen from another context at
+ * request creation time if that context is unpinned. If a guc_id can't be found
+ * we punt this problem to the user as we believe this is near impossible to hit
+ * during normal use cases.
+ *
+ * Locking:
+ * In the GuC submission code we have 3 basic spin locks which protect
+ * everything. Details about each below.
+ *
+ * sched_engine->lock
+ * This is the submission lock for all contexts that share an i915 schedule
+ * engine (sched_engine), thus only one of the contexts which share a
+ * sched_engine can be submitting at a time. Currently only one sched_engine is
+ * used for all of GuC submission but that could change in the future.
+ *
+ * guc->submission_state.lock
+ * Global lock for GuC submission state. Protects guc_ids and destroyed contexts
+ * list.
+ *
+ * ce->guc_state.lock
+ * Protects everything under ce->guc_state. Ensures that a context is in the
+ * correct state before issuing a H2G. e.g. We don't issue a schedule disable
+ * on a disabled context (bad idea), we don't issue a schedule enable when a
+ * schedule disable is in flight, etc... Also protects list of inflight requests
+ * on the context and the priority management state. Lock is individual to each
+ * context.
+ *
+ * Lock ordering rules:
+ * sched_engine->lock -> ce->guc_state.lock
+ * guc->submission_state.lock -> ce->guc_state.lock
*
+ * Reset races:
+ * When a full GT reset is triggered it is assumed that some G2H responses to
+ * H2Gs can be lost as the GuC is also reset. Losing these G2H can prove to be
+ * fatal as we do certain operations upon receiving a G2H (e.g. destroy
+ * contexts, release guc_ids, etc...). When this occurs we can scrub the
+ * context state and cleanup appropriately, however this is quite racey.
+ * To avoid races, the reset code must disable submission before scrubbing for
+ * the missing G2H, while the submission code must check for submission being
+ * disabled and skip sending H2Gs and updating context states when it is. Both
+ * sides must also make sure to hold the relevant locks.
*/
/* GuC Virtual Engine */
@@ -68,91 +125,56 @@ struct guc_virtual_engine {
};
static struct intel_context *
-guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count);
+guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count,
+ unsigned long flags);
+
+static struct intel_context *
+guc_create_parallel(struct intel_engine_cs **engines,
+ unsigned int num_siblings,
+ unsigned int width);
#define GUC_REQUEST_SIZE 64 /* bytes */
/*
- * Below is a set of functions which control the GuC scheduling state which do
- * not require a lock as all state transitions are mutually exclusive. i.e. It
- * is not possible for the context pinning code and submission, for the same
- * context, to be executing simultaneously. We still need an atomic as it is
- * possible for some of the bits to changing at the same time though.
+ * We reserve 1/16 of the guc_ids for multi-lrc as these need to be contiguous
+ * per the GuC submission interface. A different allocation algorithm is used
+ * (bitmap vs. ida) between multi-lrc and single-lrc hence the reason to
+ * partition the guc_id space. We believe the number of multi-lrc contexts in
+ * use should be low and 1/16 should be sufficient. Minimum of 32 guc_ids for
+ * multi-lrc.
*/
-#define SCHED_STATE_NO_LOCK_ENABLED BIT(0)
-#define SCHED_STATE_NO_LOCK_PENDING_ENABLE BIT(1)
-#define SCHED_STATE_NO_LOCK_REGISTERED BIT(2)
-static inline bool context_enabled(struct intel_context *ce)
-{
- return (atomic_read(&ce->guc_sched_state_no_lock) &
- SCHED_STATE_NO_LOCK_ENABLED);
-}
-
-static inline void set_context_enabled(struct intel_context *ce)
-{
- atomic_or(SCHED_STATE_NO_LOCK_ENABLED, &ce->guc_sched_state_no_lock);
-}
-
-static inline void clr_context_enabled(struct intel_context *ce)
-{
- atomic_and((u32)~SCHED_STATE_NO_LOCK_ENABLED,
- &ce->guc_sched_state_no_lock);
-}
-
-static inline bool context_pending_enable(struct intel_context *ce)
-{
- return (atomic_read(&ce->guc_sched_state_no_lock) &
- SCHED_STATE_NO_LOCK_PENDING_ENABLE);
-}
-
-static inline void set_context_pending_enable(struct intel_context *ce)
-{
- atomic_or(SCHED_STATE_NO_LOCK_PENDING_ENABLE,
- &ce->guc_sched_state_no_lock);
-}
-
-static inline void clr_context_pending_enable(struct intel_context *ce)
-{
- atomic_and((u32)~SCHED_STATE_NO_LOCK_PENDING_ENABLE,
- &ce->guc_sched_state_no_lock);
-}
-
-static inline bool context_registered(struct intel_context *ce)
-{
- return (atomic_read(&ce->guc_sched_state_no_lock) &
- SCHED_STATE_NO_LOCK_REGISTERED);
-}
-
-static inline void set_context_registered(struct intel_context *ce)
-{
- atomic_or(SCHED_STATE_NO_LOCK_REGISTERED,
- &ce->guc_sched_state_no_lock);
-}
-
-static inline void clr_context_registered(struct intel_context *ce)
-{
- atomic_and((u32)~SCHED_STATE_NO_LOCK_REGISTERED,
- &ce->guc_sched_state_no_lock);
-}
+#define NUMBER_MULTI_LRC_GUC_ID (GUC_MAX_LRC_DESCRIPTORS / 16)
/*
* Below is a set of functions which control the GuC scheduling state which
- * require a lock, aside from the special case where the functions are called
- * from guc_lrc_desc_pin(). In that case it isn't possible for any other code
- * path to be executing on the context.
+ * require a lock.
*/
#define SCHED_STATE_WAIT_FOR_DEREGISTER_TO_REGISTER BIT(0)
#define SCHED_STATE_DESTROYED BIT(1)
#define SCHED_STATE_PENDING_DISABLE BIT(2)
#define SCHED_STATE_BANNED BIT(3)
-#define SCHED_STATE_BLOCKED_SHIFT 4
+#define SCHED_STATE_ENABLED BIT(4)
+#define SCHED_STATE_PENDING_ENABLE BIT(5)
+#define SCHED_STATE_REGISTERED BIT(6)
+#define SCHED_STATE_BLOCKED_SHIFT 7
#define SCHED_STATE_BLOCKED BIT(SCHED_STATE_BLOCKED_SHIFT)
#define SCHED_STATE_BLOCKED_MASK (0xfff << SCHED_STATE_BLOCKED_SHIFT)
+
static inline void init_sched_state(struct intel_context *ce)
{
- /* Only should be called from guc_lrc_desc_pin() */
- atomic_set(&ce->guc_sched_state_no_lock, 0);
- ce->guc_state.sched_state = 0;
+ lockdep_assert_held(&ce->guc_state.lock);
+ ce->guc_state.sched_state &= SCHED_STATE_BLOCKED_MASK;
+}
+
+__maybe_unused
+static bool sched_state_is_init(struct intel_context *ce)
+{
+ /*
+ * XXX: Kernel contexts can have SCHED_STATE_NO_LOCK_REGISTERED after
+ * suspend.
+ */
+ return !(ce->guc_state.sched_state &=
+ ~(SCHED_STATE_BLOCKED_MASK | SCHED_STATE_REGISTERED));
}
static inline bool
@@ -165,7 +187,7 @@ context_wait_for_deregister_to_register(struct intel_context *ce)
static inline void
set_context_wait_for_deregister_to_register(struct intel_context *ce)
{
- /* Only should be called from guc_lrc_desc_pin() without lock */
+ lockdep_assert_held(&ce->guc_state.lock);
ce->guc_state.sched_state |=
SCHED_STATE_WAIT_FOR_DEREGISTER_TO_REGISTER;
}
@@ -225,6 +247,57 @@ static inline void clr_context_banned(struct intel_context *ce)
ce->guc_state.sched_state &= ~SCHED_STATE_BANNED;
}
+static inline bool context_enabled(struct intel_context *ce)
+{
+ return ce->guc_state.sched_state & SCHED_STATE_ENABLED;
+}
+
+static inline void set_context_enabled(struct intel_context *ce)
+{
+ lockdep_assert_held(&ce->guc_state.lock);
+ ce->guc_state.sched_state |= SCHED_STATE_ENABLED;
+}
+
+static inline void clr_context_enabled(struct intel_context *ce)
+{
+ lockdep_assert_held(&ce->guc_state.lock);
+ ce->guc_state.sched_state &= ~SCHED_STATE_ENABLED;
+}
+
+static inline bool context_pending_enable(struct intel_context *ce)
+{
+ return ce->guc_state.sched_state & SCHED_STATE_PENDING_ENABLE;
+}
+
+static inline void set_context_pending_enable(struct intel_context *ce)
+{
+ lockdep_assert_held(&ce->guc_state.lock);
+ ce->guc_state.sched_state |= SCHED_STATE_PENDING_ENABLE;
+}
+
+static inline void clr_context_pending_enable(struct intel_context *ce)
+{
+ lockdep_assert_held(&ce->guc_state.lock);
+ ce->guc_state.sched_state &= ~SCHED_STATE_PENDING_ENABLE;
+}
+
+static inline bool context_registered(struct intel_context *ce)
+{
+ return ce->guc_state.sched_state & SCHED_STATE_REGISTERED;
+}
+
+static inline void set_context_registered(struct intel_context *ce)
+{
+ lockdep_assert_held(&ce->guc_state.lock);
+ ce->guc_state.sched_state |= SCHED_STATE_REGISTERED;
+}
+
+static inline void clr_context_registered(struct intel_context *ce)
+{
+ lockdep_assert_held(&ce->guc_state.lock);
+ ce->guc_state.sched_state &= ~SCHED_STATE_REGISTERED;
+}
+
static inline u32 context_blocked(struct intel_context *ce)
{
return (ce->guc_state.sched_state & SCHED_STATE_BLOCKED_MASK) >>
@@ -233,7 +306,6 @@ static inline u32 context_blocked(struct intel_context *ce)
static inline void incr_context_blocked(struct intel_context *ce)
{
- lockdep_assert_held(&ce->engine->sched_engine->lock);
lockdep_assert_held(&ce->guc_state.lock);
ce->guc_state.sched_state += SCHED_STATE_BLOCKED;
@@ -243,7 +315,6 @@ static inline void incr_context_blocked(struct intel_context *ce)
static inline void decr_context_blocked(struct intel_context *ce)
{
- lockdep_assert_held(&ce->engine->sched_engine->lock);
lockdep_assert_held(&ce->guc_state.lock);
GEM_BUG_ON(!context_blocked(ce)); /* Underflow check */
@@ -251,14 +322,39 @@ static inline void decr_context_blocked(struct intel_context *ce)
ce->guc_state.sched_state -= SCHED_STATE_BLOCKED;
}
+static inline bool context_has_committed_requests(struct intel_context *ce)
+{
+ return !!ce->guc_state.number_committed_requests;
+}
+
+static inline void incr_context_committed_requests(struct intel_context *ce)
+{
+ lockdep_assert_held(&ce->guc_state.lock);
+ ++ce->guc_state.number_committed_requests;
+ GEM_BUG_ON(ce->guc_state.number_committed_requests < 0);
+}
+
+static inline void decr_context_committed_requests(struct intel_context *ce)
+{
+ lockdep_assert_held(&ce->guc_state.lock);
+ --ce->guc_state.number_committed_requests;
+ GEM_BUG_ON(ce->guc_state.number_committed_requests < 0);
+}
+
+static struct intel_context *
+request_to_scheduling_context(struct i915_request *rq)
+{
+ return intel_context_to_parent(rq->context);
+}
+
static inline bool context_guc_id_invalid(struct intel_context *ce)
{
- return ce->guc_id == GUC_INVALID_LRC_ID;
+ return ce->guc_id.id == GUC_INVALID_LRC_ID;
}
static inline void set_context_guc_id_invalid(struct intel_context *ce)
{
- ce->guc_id = GUC_INVALID_LRC_ID;
+ ce->guc_id.id = GUC_INVALID_LRC_ID;
}
static inline struct intel_guc *ce_to_guc(struct intel_context *ce)
@@ -271,6 +367,104 @@ static inline struct i915_priolist *to_priolist(struct rb_node *rb)
return rb_entry(rb, struct i915_priolist, node);
}
+/*
+ * When using multi-lrc submission a scratch memory area is reserved in the
+ * parent's context state for the process descriptor, work queue, and handshake
+ * between the parent + children contexts to insert safe preemption points
+ * between each of the BBs. Currently the scratch area is sized to a page.
+ *
+ * The layout of this scratch area is below:
+ * 0 guc_process_desc
+ * + sizeof(struct guc_process_desc) child go
+ * + CACHELINE_BYTES child join[0]
+ * ...
+ * + CACHELINE_BYTES child join[n - 1]
+ * ... unused
+ * PARENT_SCRATCH_SIZE / 2 work queue start
+ * ... work queue
+ * PARENT_SCRATCH_SIZE - 1 work queue end
+ */
+#define WQ_SIZE (PARENT_SCRATCH_SIZE / 2)
+#define WQ_OFFSET (PARENT_SCRATCH_SIZE - WQ_SIZE)
+
+struct sync_semaphore {
+ u32 semaphore;
+ u8 unused[CACHELINE_BYTES - sizeof(u32)];
+};
+
+struct parent_scratch {
+ struct guc_process_desc pdesc;
+
+ struct sync_semaphore go;
+ struct sync_semaphore join[MAX_ENGINE_INSTANCE + 1];
+
+ u8 unused[WQ_OFFSET - sizeof(struct guc_process_desc) -
+ sizeof(struct sync_semaphore) * (MAX_ENGINE_INSTANCE + 2)];
+
+ u32 wq[WQ_SIZE / sizeof(u32)];
+};
+
+static u32 __get_parent_scratch_offset(struct intel_context *ce)
+{
+ GEM_BUG_ON(!ce->parallel.guc.parent_page);
+
+ return ce->parallel.guc.parent_page * PAGE_SIZE;
+}
+
+static u32 __get_wq_offset(struct intel_context *ce)
+{
+ BUILD_BUG_ON(offsetof(struct parent_scratch, wq) != WQ_OFFSET);
+
+ return __get_parent_scratch_offset(ce) + WQ_OFFSET;
+}
+
+static struct parent_scratch *
+__get_parent_scratch(struct intel_context *ce)
+{
+ BUILD_BUG_ON(sizeof(struct parent_scratch) != PARENT_SCRATCH_SIZE);
+ BUILD_BUG_ON(sizeof(struct sync_semaphore) != CACHELINE_BYTES);
+
+ /*
+ * Need to subtract LRC_STATE_OFFSET here as the
+ * parallel.guc.parent_page is the offset into ce->state while
+ * ce->lrc_reg_reg is ce->state + LRC_STATE_OFFSET.
+ */
+ return (struct parent_scratch *)
+ (ce->lrc_reg_state +
+ ((__get_parent_scratch_offset(ce) -
+ LRC_STATE_OFFSET) / sizeof(u32)));
+}
+
+static struct guc_process_desc *
+__get_process_desc(struct intel_context *ce)
+{
+ struct parent_scratch *ps = __get_parent_scratch(ce);
+
+ return &ps->pdesc;
+}
+
+static u32 *get_wq_pointer(struct guc_process_desc *desc,
+ struct intel_context *ce,
+ u32 wqi_size)
+{
+ /*
+ * Check for space in work queue. Caching a value of head pointer in
+ * intel_context structure in order reduce the number accesses to shared
+ * GPU memory which may be across a PCIe bus.
+ */
+#define AVAILABLE_SPACE \
+ CIRC_SPACE(ce->parallel.guc.wqi_tail, ce->parallel.guc.wqi_head, WQ_SIZE)
+ if (wqi_size > AVAILABLE_SPACE) {
+ ce->parallel.guc.wqi_head = READ_ONCE(desc->head);
+
+ if (wqi_size > AVAILABLE_SPACE)
+ return NULL;
+ }
+#undef AVAILABLE_SPACE
+
+ return &__get_parent_scratch(ce)->wq[ce->parallel.guc.wqi_tail / sizeof(u32)];
+}
+
static struct guc_lrc_desc *__get_lrc_desc(struct intel_guc *guc, u32 index)
{
struct guc_lrc_desc *base = guc->lrc_desc_pool_vaddr;
@@ -352,20 +546,29 @@ static inline void set_lrc_desc_registered(struct intel_guc *guc, u32 id,
xa_unlock_irqrestore(&guc->context_lookup, flags);
}
+static void decr_outstanding_submission_g2h(struct intel_guc *guc)
+{
+ if (atomic_dec_and_test(&guc->outstanding_submission_g2h))
+ wake_up_all(&guc->ct.wq);
+}
+
static int guc_submission_send_busy_loop(struct intel_guc *guc,
const u32 *action,
u32 len,
u32 g2h_len_dw,
bool loop)
{
- int err;
-
- err = intel_guc_send_busy_loop(guc, action, len, g2h_len_dw, loop);
+ /*
+ * We always loop when a send requires a reply (i.e. g2h_len_dw > 0),
+ * so we don't handle the case where we don't get a reply because we
+ * aborted the send due to the channel being busy.
+ */
+ GEM_BUG_ON(g2h_len_dw && !loop);
- if (!err && g2h_len_dw)
+ if (g2h_len_dw)
atomic_inc(&guc->outstanding_submission_g2h);
- return err;
+ return intel_guc_send_busy_loop(guc, action, len, g2h_len_dw, loop);
}
int intel_guc_wait_for_pending_msg(struct intel_guc *guc,
@@ -421,15 +624,17 @@ int intel_guc_wait_for_idle(struct intel_guc *guc, long timeout)
static int guc_lrc_desc_pin(struct intel_context *ce, bool loop);
-static int guc_add_request(struct intel_guc *guc, struct i915_request *rq)
+static int __guc_add_request(struct intel_guc *guc, struct i915_request *rq)
{
int err = 0;
- struct intel_context *ce = rq->context;
+ struct intel_context *ce = request_to_scheduling_context(rq);
u32 action[3];
int len = 0;
u32 g2h_len_dw = 0;
bool enabled;
+ lockdep_assert_held(&rq->engine->sched_engine->lock);
+
/*
* Corner case where requests were sitting in the priority list or a
* request resubmitted after the context was banned.
@@ -437,41 +642,34 @@ static int guc_add_request(struct intel_guc *guc, struct i915_request *rq)
if (unlikely(intel_context_is_banned(ce))) {
i915_request_put(i915_request_mark_eio(rq));
intel_engine_signal_breadcrumbs(ce->engine);
- goto out;
+ return 0;
}
- GEM_BUG_ON(!atomic_read(&ce->guc_id_ref));
+ GEM_BUG_ON(!atomic_read(&ce->guc_id.ref));
GEM_BUG_ON(context_guc_id_invalid(ce));
- /*
- * Corner case where the GuC firmware was blown away and reloaded while
- * this context was pinned.
- */
- if (unlikely(!lrc_desc_registered(guc, ce->guc_id))) {
- err = guc_lrc_desc_pin(ce, false);
- if (unlikely(err))
- goto out;
- }
+ spin_lock(&ce->guc_state.lock);
/*
* The request / context will be run on the hardware when scheduling
- * gets enabled in the unblock.
+ * gets enabled in the unblock. For multi-lrc we still submit the
+ * context to move the LRC tails.
*/
- if (unlikely(context_blocked(ce)))
+ if (unlikely(context_blocked(ce) && !intel_context_is_parent(ce)))
goto out;
- enabled = context_enabled(ce);
+ enabled = context_enabled(ce) || context_blocked(ce);
if (!enabled) {
action[len++] = INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_SET;
- action[len++] = ce->guc_id;
+ action[len++] = ce->guc_id.id;
action[len++] = GUC_CONTEXT_ENABLE;
set_context_pending_enable(ce);
intel_context_get(ce);
g2h_len_dw = G2H_LEN_DW_SCHED_CONTEXT_MODE_SET;
} else {
action[len++] = INTEL_GUC_ACTION_SCHED_CONTEXT;
- action[len++] = ce->guc_id;
+ action[len++] = ce->guc_id.id;
}
err = intel_guc_send_nb(guc, action, len, g2h_len_dw);
@@ -479,6 +677,18 @@ static int guc_add_request(struct intel_guc *guc, struct i915_request *rq)
trace_intel_context_sched_enable(ce);
atomic_inc(&guc->outstanding_submission_g2h);
set_context_enabled(ce);
+
+ /*
+ * Without multi-lrc KMD does the submission step (moving the
+ * lrc tail) so enabling scheduling is sufficient to submit the
+ * context. This isn't the case in multi-lrc submission as the
+ * GuC needs to move the tails, hence the need for another H2G
+ * to submit a multi-lrc context after enabling scheduling.
+ */
+ if (intel_context_is_parent(ce)) {
+ action[0] = INTEL_GUC_ACTION_SCHED_CONTEXT;
+ err = intel_guc_send_nb(guc, action, len - 1, 0);
+ }
} else if (!enabled) {
clr_context_pending_enable(ce);
intel_context_put(ce);
@@ -487,9 +697,22 @@ static int guc_add_request(struct intel_guc *guc, struct i915_request *rq)
trace_i915_request_guc_submit(rq);
out:
+ spin_unlock(&ce->guc_state.lock);
return err;
}
+static int guc_add_request(struct intel_guc *guc, struct i915_request *rq)
+{
+ int ret = __guc_add_request(guc, rq);
+
+ if (unlikely(ret == -EBUSY)) {
+ guc->stalled_request = rq;
+ guc->submission_stall_reason = STALL_ADD_REQUEST;
+ }
+
+ return ret;
+}
+
static inline void guc_set_lrc_tail(struct i915_request *rq)
{
rq->context->lrc_reg_state[CTX_RING_TAIL] =
@@ -501,6 +724,135 @@ static inline int rq_prio(const struct i915_request *rq)
return rq->sched.attr.priority;
}
+static bool is_multi_lrc_rq(struct i915_request *rq)
+{
+ return intel_context_is_parallel(rq->context);
+}
+
+static bool can_merge_rq(struct i915_request *rq,
+ struct i915_request *last)
+{
+ return request_to_scheduling_context(rq) ==
+ request_to_scheduling_context(last);
+}
+
+static u32 wq_space_until_wrap(struct intel_context *ce)
+{
+ return (WQ_SIZE - ce->parallel.guc.wqi_tail);
+}
+
+static void write_wqi(struct guc_process_desc *desc,
+ struct intel_context *ce,
+ u32 wqi_size)
+{
+ BUILD_BUG_ON(!is_power_of_2(WQ_SIZE));
+
+ /*
+ * Ensure WQI are visible before updating tail
+ */
+ intel_guc_write_barrier(ce_to_guc(ce));
+
+ ce->parallel.guc.wqi_tail = (ce->parallel.guc.wqi_tail + wqi_size) &
+ (WQ_SIZE - 1);
+ WRITE_ONCE(desc->tail, ce->parallel.guc.wqi_tail);
+}
+
+static int guc_wq_noop_append(struct intel_context *ce)
+{
+ struct guc_process_desc *desc = __get_process_desc(ce);
+ u32 *wqi = get_wq_pointer(desc, ce, wq_space_until_wrap(ce));
+ u32 len_dw = wq_space_until_wrap(ce) / sizeof(u32) - 1;
+
+ if (!wqi)
+ return -EBUSY;
+
+ GEM_BUG_ON(!FIELD_FIT(WQ_LEN_MASK, len_dw));
+
+ *wqi = FIELD_PREP(WQ_TYPE_MASK, WQ_TYPE_NOOP) |
+ FIELD_PREP(WQ_LEN_MASK, len_dw);
+ ce->parallel.guc.wqi_tail = 0;
+
+ return 0;
+}
+
+static int __guc_wq_item_append(struct i915_request *rq)
+{
+ struct intel_context *ce = request_to_scheduling_context(rq);
+ struct intel_context *child;
+ struct guc_process_desc *desc = __get_process_desc(ce);
+ unsigned int wqi_size = (ce->parallel.number_children + 4) *
+ sizeof(u32);
+ u32 *wqi;
+ u32 len_dw = (wqi_size / sizeof(u32)) - 1;
+ int ret;
+
+ /* Ensure context is in correct state updating work queue */
+ GEM_BUG_ON(!atomic_read(&ce->guc_id.ref));
+ GEM_BUG_ON(context_guc_id_invalid(ce));
+ GEM_BUG_ON(context_wait_for_deregister_to_register(ce));
+ GEM_BUG_ON(!lrc_desc_registered(ce_to_guc(ce), ce->guc_id.id));
+
+ /* Insert NOOP if this work queue item will wrap the tail pointer. */
+ if (wqi_size > wq_space_until_wrap(ce)) {
+ ret = guc_wq_noop_append(ce);
+ if (ret)
+ return ret;
+ }
+
+ wqi = get_wq_pointer(desc, ce, wqi_size);
+ if (!wqi)
+ return -EBUSY;
+
+ GEM_BUG_ON(!FIELD_FIT(WQ_LEN_MASK, len_dw));
+
+ *wqi++ = FIELD_PREP(WQ_TYPE_MASK, WQ_TYPE_MULTI_LRC) |
+ FIELD_PREP(WQ_LEN_MASK, len_dw);
+ *wqi++ = ce->lrc.lrca;
+ *wqi++ = FIELD_PREP(WQ_GUC_ID_MASK, ce->guc_id.id) |
+ FIELD_PREP(WQ_RING_TAIL_MASK, ce->ring->tail / sizeof(u64));
+ *wqi++ = 0; /* fence_id */
+ for_each_child(ce, child)
+ *wqi++ = child->ring->tail / sizeof(u64);
+
+ write_wqi(desc, ce, wqi_size);
+
+ return 0;
+}
+
+static int guc_wq_item_append(struct intel_guc *guc,
+ struct i915_request *rq)
+{
+ struct intel_context *ce = request_to_scheduling_context(rq);
+ int ret = 0;
+
+ if (likely(!intel_context_is_banned(ce))) {
+ ret = __guc_wq_item_append(rq);
+
+ if (unlikely(ret == -EBUSY)) {
+ guc->stalled_request = rq;
+ guc->submission_stall_reason = STALL_MOVE_LRC_TAIL;
+ }
+ }
+
+ return ret;
+}
+
+static bool multi_lrc_submit(struct i915_request *rq)
+{
+ struct intel_context *ce = request_to_scheduling_context(rq);
+
+ intel_ring_set_tail(rq->ring, rq->tail);
+
+ /*
+ * We expect the front end (execbuf IOCTL) to set this flag on the last
+ * request generated from a multi-BB submission. This indicates to the
+ * backend (GuC interface) that we should submit this context thus
+ * submitting all the requests generated in parallel.
+ */
+ return test_bit(I915_FENCE_FLAG_SUBMIT_PARALLEL, &rq->fence.flags) ||
+ intel_context_is_banned(ce);
+}
+
static int guc_dequeue_one_context(struct intel_guc *guc)
{
struct i915_sched_engine * const sched_engine = guc->sched_engine;
@@ -514,7 +866,17 @@ static int guc_dequeue_one_context(struct intel_guc *guc)
if (guc->stalled_request) {
submit = true;
last = guc->stalled_request;
- goto resubmit;
+
+ switch (guc->submission_stall_reason) {
+ case STALL_REGISTER_CONTEXT:
+ goto register_context;
+ case STALL_MOVE_LRC_TAIL:
+ goto move_lrc_tail;
+ case STALL_ADD_REQUEST:
+ goto add_request;
+ default:
+ MISSING_CASE(guc->submission_stall_reason);
+ }
}
while ((rb = rb_first_cached(&sched_engine->queue))) {
@@ -522,8 +884,8 @@ static int guc_dequeue_one_context(struct intel_guc *guc)
struct i915_request *rq, *rn;
priolist_for_each_request_consume(rq, rn, p) {
- if (last && rq->context != last->context)
- goto done;
+ if (last && !can_merge_rq(rq, last))
+ goto register_context;
list_del_init(&rq->sched.link);
@@ -531,33 +893,84 @@ static int guc_dequeue_one_context(struct intel_guc *guc)
trace_i915_request_in(rq, 0);
last = rq;
- submit = true;
+
+ if (is_multi_lrc_rq(rq)) {
+ /*
+ * We need to coalesce all multi-lrc requests in
+ * a relationship into a single H2G. We are
+ * guaranteed that all of these requests will be
+ * submitted sequentially.
+ */
+ if (multi_lrc_submit(rq)) {
+ submit = true;
+ goto register_context;
+ }
+ } else {
+ submit = true;
+ }
}
rb_erase_cached(&p->node, &sched_engine->queue);
i915_priolist_free(p);
}
-done:
+
+register_context:
if (submit) {
- guc_set_lrc_tail(last);
-resubmit:
+ struct intel_context *ce = request_to_scheduling_context(last);
+
+ if (unlikely(!lrc_desc_registered(guc, ce->guc_id.id) &&
+ !intel_context_is_banned(ce))) {
+ ret = guc_lrc_desc_pin(ce, false);
+ if (unlikely(ret == -EPIPE)) {
+ goto deadlk;
+ } else if (ret == -EBUSY) {
+ guc->stalled_request = last;
+ guc->submission_stall_reason =
+ STALL_REGISTER_CONTEXT;
+ goto schedule_tasklet;
+ } else if (ret != 0) {
+ GEM_WARN_ON(ret); /* Unexpected */
+ goto deadlk;
+ }
+ }
+
+move_lrc_tail:
+ if (is_multi_lrc_rq(last)) {
+ ret = guc_wq_item_append(guc, last);
+ if (ret == -EBUSY) {
+ goto schedule_tasklet;
+ } else if (ret != 0) {
+ GEM_WARN_ON(ret); /* Unexpected */
+ goto deadlk;
+ }
+ } else {
+ guc_set_lrc_tail(last);
+ }
+
+add_request:
ret = guc_add_request(guc, last);
- if (unlikely(ret == -EPIPE))
+ if (unlikely(ret == -EPIPE)) {
+ goto deadlk;
+ } else if (ret == -EBUSY) {
+ goto schedule_tasklet;
+ } else if (ret != 0) {
+ GEM_WARN_ON(ret); /* Unexpected */
goto deadlk;
- else if (ret == -EBUSY) {
- tasklet_schedule(&sched_engine->tasklet);
- guc->stalled_request = last;
- return false;
}
}
guc->stalled_request = NULL;
+ guc->submission_stall_reason = STALL_NONE;
return submit;
deadlk:
sched_engine->tasklet.callback = NULL;
tasklet_disable_nosync(&sched_engine->tasklet);
return false;
+
+schedule_tasklet:
+ tasklet_schedule(&sched_engine->tasklet);
+ return false;
}
static void guc_submission_tasklet(struct tasklet_struct *t)
@@ -596,10 +1009,18 @@ static void scrub_guc_desc_for_outstanding_g2h(struct intel_guc *guc)
unsigned long index, flags;
bool pending_disable, pending_enable, deregister, destroyed, banned;
+ xa_lock_irqsave(&guc->context_lookup, flags);
xa_for_each(&guc->context_lookup, index, ce) {
- /* Flush context */
- spin_lock_irqsave(&ce->guc_state.lock, flags);
- spin_unlock_irqrestore(&ce->guc_state.lock, flags);
+ /*
+ * Corner case where the ref count on the object is zero but and
+ * deregister G2H was lost. In this case we don't touch the ref
+ * count and finish the destroy of the context.
+ */
+ bool do_put = kref_get_unless_zero(&ce->ref);
+
+ xa_unlock(&guc->context_lookup);
+
+ spin_lock(&ce->guc_state.lock);
/*
* Once we are at this point submission_disabled() is guaranteed
@@ -615,11 +1036,16 @@ static void scrub_guc_desc_for_outstanding_g2h(struct intel_guc *guc)
banned = context_banned(ce);
init_sched_state(ce);
+ spin_unlock(&ce->guc_state.lock);
+
+ GEM_BUG_ON(!do_put && !destroyed);
+
if (pending_enable || destroyed || deregister) {
- atomic_dec(&guc->outstanding_submission_g2h);
+ decr_outstanding_submission_g2h(guc);
if (deregister)
guc_signal_context_fence(ce);
if (destroyed) {
+ intel_gt_pm_put_async(guc_to_gt(guc));
release_guc_id(guc, ce);
__guc_context_destroy(ce);
}
@@ -635,14 +1061,20 @@ static void scrub_guc_desc_for_outstanding_g2h(struct intel_guc *guc)
intel_engine_signal_breadcrumbs(ce->engine);
}
intel_context_sched_disable_unpin(ce);
- atomic_dec(&guc->outstanding_submission_g2h);
- spin_lock_irqsave(&ce->guc_state.lock, flags);
+ decr_outstanding_submission_g2h(guc);
+
+ spin_lock(&ce->guc_state.lock);
guc_blocked_fence_complete(ce);
- spin_unlock_irqrestore(&ce->guc_state.lock, flags);
+ spin_unlock(&ce->guc_state.lock);
intel_context_put(ce);
}
+
+ if (do_put)
+ intel_context_put(ce);
+ xa_lock(&guc->context_lookup);
}
+ xa_unlock_irqrestore(&guc->context_lookup, flags);
}
static inline bool
@@ -692,6 +1124,8 @@ static void guc_flush_submissions(struct intel_guc *guc)
spin_unlock_irqrestore(&sched_engine->lock, flags);
}
+static void guc_flush_destroyed_contexts(struct intel_guc *guc);
+
void intel_guc_submission_reset_prepare(struct intel_guc *guc)
{
int i;
@@ -710,6 +1144,7 @@ void intel_guc_submission_reset_prepare(struct intel_guc *guc)
spin_unlock_irq(&guc_to_gt(guc)->irq_lock);
guc_flush_submissions(guc);
+ guc_flush_destroyed_contexts(guc);
/*
* Handle any outstanding G2Hs before reset. Call IRQ handler directly
@@ -725,6 +1160,7 @@ void intel_guc_submission_reset_prepare(struct intel_guc *guc)
wait_for_reset(guc, &guc->outstanding_submission_g2h);
} while (!list_empty(&guc->ct.requests.incoming));
}
+
scrub_guc_desc_for_outstanding_g2h(guc);
}
@@ -796,16 +1232,14 @@ __unwind_incomplete_requests(struct intel_context *ce)
unsigned long flags;
spin_lock_irqsave(&sched_engine->lock, flags);
- spin_lock(&ce->guc_active.lock);
- list_for_each_entry_safe(rq, rn,
- &ce->guc_active.requests,
- sched.link) {
+ spin_lock(&ce->guc_state.lock);
+ list_for_each_entry_safe_reverse(rq, rn,
+ &ce->guc_state.requests,
+ sched.link) {
if (i915_request_completed(rq))
continue;
list_del_init(&rq->sched.link);
- spin_unlock(&ce->guc_active.lock);
-
__i915_request_unsubmit(rq);
/* Push the request back into the queue for later resubmission. */
@@ -816,64 +1250,111 @@ __unwind_incomplete_requests(struct intel_context *ce)
}
GEM_BUG_ON(i915_sched_engine_is_empty(sched_engine));
- list_add_tail(&rq->sched.link, pl);
+ list_add(&rq->sched.link, pl);
set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
-
- spin_lock(&ce->guc_active.lock);
}
- spin_unlock(&ce->guc_active.lock);
+ spin_unlock(&ce->guc_state.lock);
spin_unlock_irqrestore(&sched_engine->lock, flags);
}
static void __guc_reset_context(struct intel_context *ce, bool stalled)
{
+ bool local_stalled;
struct i915_request *rq;
+ unsigned long flags;
u32 head;
+ int i, number_children = ce->parallel.number_children;
+ bool skip = false;
+ struct intel_context *parent = ce;
+
+ GEM_BUG_ON(intel_context_is_child(ce));
intel_context_get(ce);
/*
- * GuC will implicitly mark the context as non-schedulable
- * when it sends the reset notification. Make sure our state
- * reflects this change. The context will be marked enabled
- * on resubmission.
+ * GuC will implicitly mark the context as non-schedulable when it sends
+ * the reset notification. Make sure our state reflects this change. The
+ * context will be marked enabled on resubmission.
+ *
+ * XXX: If the context is reset as a result of the request cancellation
+ * this G2H is received after the schedule disable complete G2H which is
+ * wrong as this creates a race between the request cancellation code
+ * re-submitting the context and this G2H handler. This is a bug in the
+ * GuC but can be worked around in the meantime but converting this to a
+ * NOP if a pending enable is in flight as this indicates that a request
+ * cancellation has occurred.
*/
- clr_context_enabled(ce);
+ spin_lock_irqsave(&ce->guc_state.lock, flags);
+ if (likely(!context_pending_enable(ce)))
+ clr_context_enabled(ce);
+ else
+ skip = true;
+ spin_unlock_irqrestore(&ce->guc_state.lock, flags);
+ if (unlikely(skip))
+ goto out_put;
- rq = intel_context_find_active_request(ce);
- if (!rq) {
- head = ce->ring->tail;
- stalled = false;
- goto out_replay;
- }
+ /*
+ * For each context in the relationship find the hanging request
+ * resetting each context / request as needed
+ */
+ for (i = 0; i < number_children + 1; ++i) {
+ if (!intel_context_is_pinned(ce))
+ goto next_context;
+
+ local_stalled = false;
+ rq = intel_context_find_active_request(ce);
+ if (!rq) {
+ head = ce->ring->tail;
+ goto out_replay;
+ }
- if (!i915_request_started(rq))
- stalled = false;
+ if (i915_request_started(rq))
+ local_stalled = true;
- GEM_BUG_ON(i915_active_is_idle(&ce->active));
- head = intel_ring_wrap(ce->ring, rq->head);
- __i915_request_reset(rq, stalled);
+ GEM_BUG_ON(i915_active_is_idle(&ce->active));
+ head = intel_ring_wrap(ce->ring, rq->head);
+ __i915_request_reset(rq, local_stalled && stalled);
out_replay:
- guc_reset_state(ce, head, stalled);
- __unwind_incomplete_requests(ce);
- intel_context_put(ce);
+ guc_reset_state(ce, head, local_stalled && stalled);
+next_context:
+ if (i != number_children)
+ ce = list_next_entry(ce, parallel.child_link);
+ }
+
+ __unwind_incomplete_requests(parent);
+out_put:
+ intel_context_put(parent);
}
void intel_guc_submission_reset(struct intel_guc *guc, bool stalled)
{
struct intel_context *ce;
unsigned long index;
+ unsigned long flags;
if (unlikely(!guc_submission_initialized(guc))) {
/* Reset called during driver load? GuC not yet initialised! */
return;
}
- xa_for_each(&guc->context_lookup, index, ce)
- if (intel_context_is_pinned(ce))
+ xa_lock_irqsave(&guc->context_lookup, flags);
+ xa_for_each(&guc->context_lookup, index, ce) {
+ if (!kref_get_unless_zero(&ce->ref))
+ continue;
+
+ xa_unlock(&guc->context_lookup);
+
+ if (intel_context_is_pinned(ce) &&
+ !intel_context_is_child(ce))
__guc_reset_context(ce, stalled);
+ intel_context_put(ce);
+
+ xa_lock(&guc->context_lookup);
+ }
+ xa_unlock_irqrestore(&guc->context_lookup, flags);
+
/* GuC is blown away, drop all references to contexts */
xa_destroy(&guc->context_lookup);
}
@@ -886,10 +1367,10 @@ static void guc_cancel_context_requests(struct intel_context *ce)
/* Mark all executing requests as skipped. */
spin_lock_irqsave(&sched_engine->lock, flags);
- spin_lock(&ce->guc_active.lock);
- list_for_each_entry(rq, &ce->guc_active.requests, sched.link)
+ spin_lock(&ce->guc_state.lock);
+ list_for_each_entry(rq, &ce->guc_state.requests, sched.link)
i915_request_put(i915_request_mark_eio(rq));
- spin_unlock(&ce->guc_active.lock);
+ spin_unlock(&ce->guc_state.lock);
spin_unlock_irqrestore(&sched_engine->lock, flags);
}
@@ -948,11 +1429,25 @@ void intel_guc_submission_cancel_requests(struct intel_guc *guc)
{
struct intel_context *ce;
unsigned long index;
+ unsigned long flags;
+
+ xa_lock_irqsave(&guc->context_lookup, flags);
+ xa_for_each(&guc->context_lookup, index, ce) {
+ if (!kref_get_unless_zero(&ce->ref))
+ continue;
- xa_for_each(&guc->context_lookup, index, ce)
- if (intel_context_is_pinned(ce))
+ xa_unlock(&guc->context_lookup);
+
+ if (intel_context_is_pinned(ce) &&
+ !intel_context_is_child(ce))
guc_cancel_context_requests(ce);
+ intel_context_put(ce);
+
+ xa_lock(&guc->context_lookup);
+ }
+ xa_unlock_irqrestore(&guc->context_lookup, flags);
+
guc_cancel_sched_engine_requests(guc->sched_engine);
/* GuC is blown away, drop all references to contexts */
@@ -981,6 +1476,8 @@ void intel_guc_submission_reset_finish(struct intel_guc *guc)
intel_gt_unpark_heartbeats(guc_to_gt(guc));
}
+static void destroyed_worker_func(struct work_struct *w);
+
/*
* Set up the memory resources to be shared with the GuC (via the GGTT)
* at firmware loading time.
@@ -1003,9 +1500,17 @@ int intel_guc_submission_init(struct intel_guc *guc)
xa_init_flags(&guc->context_lookup, XA_FLAGS_LOCK_IRQ);
- spin_lock_init(&guc->contexts_lock);
- INIT_LIST_HEAD(&guc->guc_id_list);
- ida_init(&guc->guc_ids);
+ spin_lock_init(&guc->submission_state.lock);
+ INIT_LIST_HEAD(&guc->submission_state.guc_id_list);
+ ida_init(&guc->submission_state.guc_ids);
+ INIT_LIST_HEAD(&guc->submission_state.destroyed_contexts);
+ INIT_WORK(&guc->submission_state.destroyed_worker,
+ destroyed_worker_func);
+
+ guc->submission_state.guc_ids_bitmap =
+ bitmap_zalloc(NUMBER_MULTI_LRC_GUC_ID, GFP_KERNEL);
+ if (!guc->submission_state.guc_ids_bitmap)
+ return -ENOMEM;
return 0;
}
@@ -1015,8 +1520,10 @@ void intel_guc_submission_fini(struct intel_guc *guc)
if (!guc->lrc_desc_pool)
return;
+ guc_flush_destroyed_contexts(guc);
guc_lrc_desc_pool_destroy(guc);
i915_sched_engine_put(guc->sched_engine);
+ bitmap_free(guc->submission_state.guc_ids_bitmap);
}
static inline void queue_request(struct i915_sched_engine *sched_engine,
@@ -1027,21 +1534,28 @@ static inline void queue_request(struct i915_sched_engine *sched_engine,
list_add_tail(&rq->sched.link,
i915_sched_lookup_priolist(sched_engine, prio));
set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+ tasklet_hi_schedule(&sched_engine->tasklet);
}
static int guc_bypass_tasklet_submit(struct intel_guc *guc,
struct i915_request *rq)
{
- int ret;
+ int ret = 0;
__i915_request_submit(rq);
trace_i915_request_in(rq, 0);
- guc_set_lrc_tail(rq);
- ret = guc_add_request(guc, rq);
- if (ret == -EBUSY)
- guc->stalled_request = rq;
+ if (is_multi_lrc_rq(rq)) {
+ if (multi_lrc_submit(rq)) {
+ ret = guc_wq_item_append(guc, rq);
+ if (!ret)
+ ret = guc_add_request(guc, rq);
+ }
+ } else {
+ guc_set_lrc_tail(rq);
+ ret = guc_add_request(guc, rq);
+ }
if (unlikely(ret == -EPIPE))
disable_submission(guc);
@@ -1049,6 +1563,16 @@ static int guc_bypass_tasklet_submit(struct intel_guc *guc,
return ret;
}
+static bool need_tasklet(struct intel_guc *guc, struct i915_request *rq)
+{
+ struct i915_sched_engine *sched_engine = rq->engine->sched_engine;
+ struct intel_context *ce = request_to_scheduling_context(rq);
+
+ return submission_disabled(guc) || guc->stalled_request ||
+ !i915_sched_engine_is_empty(sched_engine) ||
+ !lrc_desc_registered(guc, ce->guc_id.id);
+}
+
static void guc_submit_request(struct i915_request *rq)
{
struct i915_sched_engine *sched_engine = rq->engine->sched_engine;
@@ -1058,8 +1582,7 @@ static void guc_submit_request(struct i915_request *rq)
/* Will be called from irq-context when using foreign fences. */
spin_lock_irqsave(&sched_engine->lock, flags);
- if (submission_disabled(guc) || guc->stalled_request ||
- !i915_sched_engine_is_empty(sched_engine))
+ if (need_tasklet(guc, rq))
queue_request(sched_engine, rq, rq_prio(rq));
else if (guc_bypass_tasklet_submit(guc, rq) == -EBUSY)
tasklet_hi_schedule(&sched_engine->tasklet);
@@ -1067,72 +1590,117 @@ static void guc_submit_request(struct i915_request *rq)
spin_unlock_irqrestore(&sched_engine->lock, flags);
}
-static int new_guc_id(struct intel_guc *guc)
+static int new_guc_id(struct intel_guc *guc, struct intel_context *ce)
{
- return ida_simple_get(&guc->guc_ids, 0,
- GUC_MAX_LRC_DESCRIPTORS, GFP_KERNEL |
- __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
+ int ret;
+
+ GEM_BUG_ON(intel_context_is_child(ce));
+
+ if (intel_context_is_parent(ce))
+ ret = bitmap_find_free_region(guc->submission_state.guc_ids_bitmap,
+ NUMBER_MULTI_LRC_GUC_ID,
+ order_base_2(ce->parallel.number_children
+ + 1));
+ else
+ ret = ida_simple_get(&guc->submission_state.guc_ids,
+ NUMBER_MULTI_LRC_GUC_ID,
+ GUC_MAX_LRC_DESCRIPTORS,
+ GFP_KERNEL | __GFP_RETRY_MAYFAIL |
+ __GFP_NOWARN);
+ if (unlikely(ret < 0))
+ return ret;
+
+ ce->guc_id.id = ret;
+ return 0;
}
static void __release_guc_id(struct intel_guc *guc, struct intel_context *ce)
{
+ GEM_BUG_ON(intel_context_is_child(ce));
+
if (!context_guc_id_invalid(ce)) {
- ida_simple_remove(&guc->guc_ids, ce->guc_id);
- reset_lrc_desc(guc, ce->guc_id);
+ if (intel_context_is_parent(ce))
+ bitmap_release_region(guc->submission_state.guc_ids_bitmap,
+ ce->guc_id.id,
+ order_base_2(ce->parallel.number_children
+ + 1));
+ else
+ ida_simple_remove(&guc->submission_state.guc_ids,
+ ce->guc_id.id);
+ reset_lrc_desc(guc, ce->guc_id.id);
set_context_guc_id_invalid(ce);
}
- if (!list_empty(&ce->guc_id_link))
- list_del_init(&ce->guc_id_link);
+ if (!list_empty(&ce->guc_id.link))
+ list_del_init(&ce->guc_id.link);
}
static void release_guc_id(struct intel_guc *guc, struct intel_context *ce)
{
unsigned long flags;
- spin_lock_irqsave(&guc->contexts_lock, flags);
+ spin_lock_irqsave(&guc->submission_state.lock, flags);
__release_guc_id(guc, ce);
- spin_unlock_irqrestore(&guc->contexts_lock, flags);
+ spin_unlock_irqrestore(&guc->submission_state.lock, flags);
}
-static int steal_guc_id(struct intel_guc *guc)
+static int steal_guc_id(struct intel_guc *guc, struct intel_context *ce)
{
- struct intel_context *ce;
- int guc_id;
+ struct intel_context *cn;
- lockdep_assert_held(&guc->contexts_lock);
+ lockdep_assert_held(&guc->submission_state.lock);
+ GEM_BUG_ON(intel_context_is_child(ce));
+ GEM_BUG_ON(intel_context_is_parent(ce));
- if (!list_empty(&guc->guc_id_list)) {
- ce = list_first_entry(&guc->guc_id_list,
+ if (!list_empty(&guc->submission_state.guc_id_list)) {
+ cn = list_first_entry(&guc->submission_state.guc_id_list,
struct intel_context,
- guc_id_link);
+ guc_id.link);
- GEM_BUG_ON(atomic_read(&ce->guc_id_ref));
- GEM_BUG_ON(context_guc_id_invalid(ce));
+ GEM_BUG_ON(atomic_read(&cn->guc_id.ref));
+ GEM_BUG_ON(context_guc_id_invalid(cn));
+ GEM_BUG_ON(intel_context_is_child(cn));
+ GEM_BUG_ON(intel_context_is_parent(cn));
- list_del_init(&ce->guc_id_link);
- guc_id = ce->guc_id;
- clr_context_registered(ce);
- set_context_guc_id_invalid(ce);
- return guc_id;
+ list_del_init(&cn->guc_id.link);
+ ce->guc_id = cn->guc_id;
+
+ spin_lock(&ce->guc_state.lock);
+ clr_context_registered(cn);
+ spin_unlock(&ce->guc_state.lock);
+
+ set_context_guc_id_invalid(cn);
+
+ return 0;
} else {
return -EAGAIN;
}
}
-static int assign_guc_id(struct intel_guc *guc, u16 *out)
+static int assign_guc_id(struct intel_guc *guc, struct intel_context *ce)
{
int ret;
- lockdep_assert_held(&guc->contexts_lock);
+ lockdep_assert_held(&guc->submission_state.lock);
+ GEM_BUG_ON(intel_context_is_child(ce));
- ret = new_guc_id(guc);
+ ret = new_guc_id(guc, ce);
if (unlikely(ret < 0)) {
- ret = steal_guc_id(guc);
+ if (intel_context_is_parent(ce))
+ return -ENOSPC;
+
+ ret = steal_guc_id(guc, ce);
if (ret < 0)
return ret;
}
- *out = ret;
+ if (intel_context_is_parent(ce)) {
+ struct intel_context *child;
+ int i = 1;
+
+ for_each_child(ce, child)
+ child->guc_id.id = ce->guc_id.id + i++;
+ }
+
return 0;
}
@@ -1142,26 +1710,28 @@ static int pin_guc_id(struct intel_guc *guc, struct intel_context *ce)
int ret = 0;
unsigned long flags, tries = PIN_GUC_ID_TRIES;
- GEM_BUG_ON(atomic_read(&ce->guc_id_ref));
+ GEM_BUG_ON(atomic_read(&ce->guc_id.ref));
try_again:
- spin_lock_irqsave(&guc->contexts_lock, flags);
+ spin_lock_irqsave(&guc->submission_state.lock, flags);
+
+ might_lock(&ce->guc_state.lock);
if (context_guc_id_invalid(ce)) {
- ret = assign_guc_id(guc, &ce->guc_id);
+ ret = assign_guc_id(guc, ce);
if (ret)
goto out_unlock;
ret = 1; /* Indidcates newly assigned guc_id */
}
- if (!list_empty(&ce->guc_id_link))
- list_del_init(&ce->guc_id_link);
- atomic_inc(&ce->guc_id_ref);
+ if (!list_empty(&ce->guc_id.link))
+ list_del_init(&ce->guc_id.link);
+ atomic_inc(&ce->guc_id.ref);
out_unlock:
- spin_unlock_irqrestore(&guc->contexts_lock, flags);
+ spin_unlock_irqrestore(&guc->submission_state.lock, flags);
/*
- * -EAGAIN indicates no guc_ids are available, let's retire any
+ * -EAGAIN indicates no guc_id are available, let's retire any
* outstanding requests to see if that frees up a guc_id. If the first
* retire didn't help, insert a sleep with the timeslice duration before
* attempting to retire more requests. Double the sleep period each
@@ -1189,16 +1759,43 @@ static void unpin_guc_id(struct intel_guc *guc, struct intel_context *ce)
{
unsigned long flags;
- GEM_BUG_ON(atomic_read(&ce->guc_id_ref) < 0);
+ GEM_BUG_ON(atomic_read(&ce->guc_id.ref) < 0);
+ GEM_BUG_ON(intel_context_is_child(ce));
- if (unlikely(context_guc_id_invalid(ce)))
+ if (unlikely(context_guc_id_invalid(ce) ||
+ intel_context_is_parent(ce)))
return;
- spin_lock_irqsave(&guc->contexts_lock, flags);
- if (!context_guc_id_invalid(ce) && list_empty(&ce->guc_id_link) &&
- !atomic_read(&ce->guc_id_ref))
- list_add_tail(&ce->guc_id_link, &guc->guc_id_list);
- spin_unlock_irqrestore(&guc->contexts_lock, flags);
+ spin_lock_irqsave(&guc->submission_state.lock, flags);
+ if (!context_guc_id_invalid(ce) && list_empty(&ce->guc_id.link) &&
+ !atomic_read(&ce->guc_id.ref))
+ list_add_tail(&ce->guc_id.link,
+ &guc->submission_state.guc_id_list);
+ spin_unlock_irqrestore(&guc->submission_state.lock, flags);
+}
+
+static int __guc_action_register_multi_lrc(struct intel_guc *guc,
+ struct intel_context *ce,
+ u32 guc_id,
+ u32 offset,
+ bool loop)
+{
+ struct intel_context *child;
+ u32 action[4 + MAX_ENGINE_INSTANCE];
+ int len = 0;
+
+ GEM_BUG_ON(ce->parallel.number_children > MAX_ENGINE_INSTANCE);
+
+ action[len++] = INTEL_GUC_ACTION_REGISTER_CONTEXT_MULTI_LRC;
+ action[len++] = guc_id;
+ action[len++] = ce->parallel.number_children + 1;
+ action[len++] = offset;
+ for_each_child(ce, child) {
+ offset += sizeof(struct guc_lrc_desc);
+ action[len++] = offset;
+ }
+
+ return guc_submission_send_busy_loop(guc, action, len, 0, loop);
}
static int __guc_action_register_context(struct intel_guc *guc,
@@ -1220,21 +1817,31 @@ static int register_context(struct intel_context *ce, bool loop)
{
struct intel_guc *guc = ce_to_guc(ce);
u32 offset = intel_guc_ggtt_offset(guc, guc->lrc_desc_pool) +
- ce->guc_id * sizeof(struct guc_lrc_desc);
+ ce->guc_id.id * sizeof(struct guc_lrc_desc);
int ret;
+ GEM_BUG_ON(intel_context_is_child(ce));
trace_intel_context_register(ce);
- ret = __guc_action_register_context(guc, ce->guc_id, offset, loop);
- if (likely(!ret))
+ if (intel_context_is_parent(ce))
+ ret = __guc_action_register_multi_lrc(guc, ce, ce->guc_id.id,
+ offset, loop);
+ else
+ ret = __guc_action_register_context(guc, ce->guc_id.id, offset,
+ loop);
+ if (likely(!ret)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&ce->guc_state.lock, flags);
set_context_registered(ce);
+ spin_unlock_irqrestore(&ce->guc_state.lock, flags);
+ }
return ret;
}
static int __guc_action_deregister_context(struct intel_guc *guc,
- u32 guc_id,
- bool loop)
+ u32 guc_id)
{
u32 action[] = {
INTEL_GUC_ACTION_DEREGISTER_CONTEXT,
@@ -1243,33 +1850,38 @@ static int __guc_action_deregister_context(struct intel_guc *guc,
return guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action),
G2H_LEN_DW_DEREGISTER_CONTEXT,
- loop);
+ true);
}
-static int deregister_context(struct intel_context *ce, u32 guc_id, bool loop)
+static int deregister_context(struct intel_context *ce, u32 guc_id)
{
struct intel_guc *guc = ce_to_guc(ce);
+ GEM_BUG_ON(intel_context_is_child(ce));
trace_intel_context_deregister(ce);
- return __guc_action_deregister_context(guc, guc_id, loop);
+ return __guc_action_deregister_context(guc, guc_id);
}
-static intel_engine_mask_t adjust_engine_mask(u8 class, intel_engine_mask_t mask)
+static inline void clear_children_join_go_memory(struct intel_context *ce)
{
- switch (class) {
- case RENDER_CLASS:
- return mask >> RCS0;
- case VIDEO_ENHANCEMENT_CLASS:
- return mask >> VECS0;
- case VIDEO_DECODE_CLASS:
- return mask >> VCS0;
- case COPY_ENGINE_CLASS:
- return mask >> BCS0;
- default:
- MISSING_CASE(class);
- return 0;
- }
+ struct parent_scratch *ps = __get_parent_scratch(ce);
+ int i;
+
+ ps->go.semaphore = 0;
+ for (i = 0; i < ce->parallel.number_children + 1; ++i)
+ ps->join[i].semaphore = 0;
+}
+
+static inline u32 get_children_go_value(struct intel_context *ce)
+{
+ return __get_parent_scratch(ce)->go.semaphore;
+}
+
+static inline u32 get_children_join_value(struct intel_context *ce,
+ u8 child_index)
+{
+ return __get_parent_scratch(ce)->join[child_index].semaphore;
}
static void guc_context_policy_init(struct intel_engine_cs *engine,
@@ -1285,22 +1897,20 @@ static void guc_context_policy_init(struct intel_engine_cs *engine,
desc->preemption_timeout = engine->props.preempt_timeout_ms * 1000;
}
-static inline u8 map_i915_prio_to_guc_prio(int prio);
-
static int guc_lrc_desc_pin(struct intel_context *ce, bool loop)
{
struct intel_engine_cs *engine = ce->engine;
struct intel_runtime_pm *runtime_pm = engine->uncore->rpm;
struct intel_guc *guc = &engine->gt->uc.guc;
- u32 desc_idx = ce->guc_id;
+ u32 desc_idx = ce->guc_id.id;
struct guc_lrc_desc *desc;
- const struct i915_gem_context *ctx;
- int prio = I915_CONTEXT_DEFAULT_PRIORITY;
bool context_registered;
intel_wakeref_t wakeref;
+ struct intel_context *child;
int ret = 0;
GEM_BUG_ON(!engine->mask);
+ GEM_BUG_ON(!sched_state_is_init(ce));
/*
* Ensure LRC + CT vmas are is same region as write barrier is done
@@ -1311,25 +1921,53 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop)
context_registered = lrc_desc_registered(guc, desc_idx);
- rcu_read_lock();
- ctx = rcu_dereference(ce->gem_context);
- if (ctx)
- prio = ctx->sched.priority;
- rcu_read_unlock();
-
reset_lrc_desc(guc, desc_idx);
set_lrc_desc_registered(guc, desc_idx, ce);
desc = __get_lrc_desc(guc, desc_idx);
desc->engine_class = engine_class_to_guc_class(engine->class);
- desc->engine_submit_mask = adjust_engine_mask(engine->class,
- engine->mask);
+ desc->engine_submit_mask = engine->logical_mask;
desc->hw_context_desc = ce->lrc.lrca;
- ce->guc_prio = map_i915_prio_to_guc_prio(prio);
- desc->priority = ce->guc_prio;
+ desc->priority = ce->guc_state.prio;
desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD;
guc_context_policy_init(engine, desc);
- init_sched_state(ce);
+
+ /*
+ * If context is a parent, we need to register a process descriptor
+ * describing a work queue and register all child contexts.
+ */
+ if (intel_context_is_parent(ce)) {
+ struct guc_process_desc *pdesc;
+
+ ce->parallel.guc.wqi_tail = 0;
+ ce->parallel.guc.wqi_head = 0;
+
+ desc->process_desc = i915_ggtt_offset(ce->state) +
+ __get_parent_scratch_offset(ce);
+ desc->wq_addr = i915_ggtt_offset(ce->state) +
+ __get_wq_offset(ce);
+ desc->wq_size = WQ_SIZE;
+
+ pdesc = __get_process_desc(ce);
+ memset(pdesc, 0, sizeof(*(pdesc)));
+ pdesc->stage_id = ce->guc_id.id;
+ pdesc->wq_base_addr = desc->wq_addr;
+ pdesc->wq_size_bytes = desc->wq_size;
+ pdesc->wq_status = WQ_STATUS_ACTIVE;
+
+ for_each_child(ce, child) {
+ desc = __get_lrc_desc(guc, child->guc_id.id);
+
+ desc->engine_class =
+ engine_class_to_guc_class(engine->class);
+ desc->hw_context_desc = child->lrc.lrca;
+ desc->priority = ce->guc_state.prio;
+ desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD;
+ guc_context_policy_init(engine, desc);
+ }
+
+ clear_children_join_go_memory(ce);
+ }
/*
* The context_lookup xarray is used to determine if the hardware
@@ -1340,26 +1978,23 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop)
* registering this context.
*/
if (context_registered) {
+ bool disabled;
+ unsigned long flags;
+
trace_intel_context_steal_guc_id(ce);
- if (!loop) {
+ GEM_BUG_ON(!loop);
+
+ /* Seal race with Reset */
+ spin_lock_irqsave(&ce->guc_state.lock, flags);
+ disabled = submission_disabled(guc);
+ if (likely(!disabled)) {
set_context_wait_for_deregister_to_register(ce);
intel_context_get(ce);
- } else {
- bool disabled;
- unsigned long flags;
-
- /* Seal race with Reset */
- spin_lock_irqsave(&ce->guc_state.lock, flags);
- disabled = submission_disabled(guc);
- if (likely(!disabled)) {
- set_context_wait_for_deregister_to_register(ce);
- intel_context_get(ce);
- }
- spin_unlock_irqrestore(&ce->guc_state.lock, flags);
- if (unlikely(disabled)) {
- reset_lrc_desc(guc, desc_idx);
- return 0; /* Will get registered later */
- }
+ }
+ spin_unlock_irqrestore(&ce->guc_state.lock, flags);
+ if (unlikely(disabled)) {
+ reset_lrc_desc(guc, desc_idx);
+ return 0; /* Will get registered later */
}
/*
@@ -1367,20 +2002,18 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop)
* context whose guc_id was stolen.
*/
with_intel_runtime_pm(runtime_pm, wakeref)
- ret = deregister_context(ce, ce->guc_id, loop);
- if (unlikely(ret == -EBUSY)) {
- clr_context_wait_for_deregister_to_register(ce);
- intel_context_put(ce);
- } else if (unlikely(ret == -ENODEV)) {
+ ret = deregister_context(ce, ce->guc_id.id);
+ if (unlikely(ret == -ENODEV))
ret = 0; /* Will get registered later */
- }
} else {
with_intel_runtime_pm(runtime_pm, wakeref)
ret = register_context(ce, loop);
- if (unlikely(ret == -EBUSY))
+ if (unlikely(ret == -EBUSY)) {
+ reset_lrc_desc(guc, desc_idx);
+ } else if (unlikely(ret == -ENODEV)) {
reset_lrc_desc(guc, desc_idx);
- else if (unlikely(ret == -ENODEV))
ret = 0; /* Will get registered later */
+ }
}
return ret;
@@ -1419,7 +2052,12 @@ static int guc_context_pre_pin(struct intel_context *ce,
static int guc_context_pin(struct intel_context *ce, void *vaddr)
{
- return __guc_context_pin(ce, ce->engine, vaddr);
+ int ret = __guc_context_pin(ce, ce->engine, vaddr);
+
+ if (likely(!ret && !intel_context_is_barrier(ce)))
+ intel_engine_pm_get(ce->engine);
+
+ return ret;
}
static void guc_context_unpin(struct intel_context *ce)
@@ -1428,6 +2066,9 @@ static void guc_context_unpin(struct intel_context *ce)
unpin_guc_id(guc, ce);
lrc_unpin(ce);
+
+ if (likely(!intel_context_is_barrier(ce)))
+ intel_engine_pm_put_async(ce->engine);
}
static void guc_context_post_unpin(struct intel_context *ce)
@@ -1440,7 +2081,7 @@ static void __guc_context_sched_enable(struct intel_guc *guc,
{
u32 action[] = {
INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_SET,
- ce->guc_id,
+ ce->guc_id.id,
GUC_CONTEXT_ENABLE
};
@@ -1456,12 +2097,13 @@ static void __guc_context_sched_disable(struct intel_guc *guc,
{
u32 action[] = {
INTEL_GUC_ACTION_SCHED_CONTEXT_MODE_SET,
- guc_id, /* ce->guc_id not stable */
+ guc_id, /* ce->guc_id.id not stable */
GUC_CONTEXT_DISABLE
};
GEM_BUG_ON(guc_id == GUC_INVALID_LRC_ID);
+ GEM_BUG_ON(intel_context_is_child(ce));
trace_intel_context_sched_disable(ce);
guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action),
@@ -1472,24 +2114,24 @@ static void guc_blocked_fence_complete(struct intel_context *ce)
{
lockdep_assert_held(&ce->guc_state.lock);
- if (!i915_sw_fence_done(&ce->guc_blocked))
- i915_sw_fence_complete(&ce->guc_blocked);
+ if (!i915_sw_fence_done(&ce->guc_state.blocked))
+ i915_sw_fence_complete(&ce->guc_state.blocked);
}
static void guc_blocked_fence_reinit(struct intel_context *ce)
{
lockdep_assert_held(&ce->guc_state.lock);
- GEM_BUG_ON(!i915_sw_fence_done(&ce->guc_blocked));
+ GEM_BUG_ON(!i915_sw_fence_done(&ce->guc_state.blocked));
/*
* This fence is always complete unless a pending schedule disable is
* outstanding. We arm the fence here and complete it when we receive
* the pending schedule disable complete message.
*/
- i915_sw_fence_fini(&ce->guc_blocked);
- i915_sw_fence_reinit(&ce->guc_blocked);
- i915_sw_fence_await(&ce->guc_blocked);
- i915_sw_fence_commit(&ce->guc_blocked);
+ i915_sw_fence_fini(&ce->guc_state.blocked);
+ i915_sw_fence_reinit(&ce->guc_state.blocked);
+ i915_sw_fence_await(&ce->guc_state.blocked);
+ i915_sw_fence_commit(&ce->guc_state.blocked);
}
static u16 prep_context_pending_disable(struct intel_context *ce)
@@ -1501,35 +2143,30 @@ static u16 prep_context_pending_disable(struct intel_context *ce)
guc_blocked_fence_reinit(ce);
intel_context_get(ce);
- return ce->guc_id;
+ return ce->guc_id.id;
}
static struct i915_sw_fence *guc_context_block(struct intel_context *ce)
{
struct intel_guc *guc = ce_to_guc(ce);
- struct i915_sched_engine *sched_engine = ce->engine->sched_engine;
unsigned long flags;
struct intel_runtime_pm *runtime_pm = ce->engine->uncore->rpm;
intel_wakeref_t wakeref;
u16 guc_id;
bool enabled;
+ GEM_BUG_ON(intel_context_is_child(ce));
+
spin_lock_irqsave(&ce->guc_state.lock, flags);
- /*
- * Sync with submission path, increment before below changes to context
- * state.
- */
- spin_lock(&sched_engine->lock);
incr_context_blocked(ce);
- spin_unlock(&sched_engine->lock);
enabled = context_enabled(ce);
if (unlikely(!enabled || submission_disabled(guc))) {
if (enabled)
clr_context_enabled(ce);
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
- return &ce->guc_blocked;
+ return &ce->guc_state.blocked;
}
/*
@@ -1545,26 +2182,41 @@ static struct i915_sw_fence *guc_context_block(struct intel_context *ce)
with_intel_runtime_pm(runtime_pm, wakeref)
__guc_context_sched_disable(guc, ce, guc_id);
- return &ce->guc_blocked;
+ return &ce->guc_state.blocked;
+}
+
+#define SCHED_STATE_MULTI_BLOCKED_MASK \
+ (SCHED_STATE_BLOCKED_MASK & ~SCHED_STATE_BLOCKED)
+#define SCHED_STATE_NO_UNBLOCK \
+ (SCHED_STATE_MULTI_BLOCKED_MASK | \
+ SCHED_STATE_PENDING_DISABLE | \
+ SCHED_STATE_BANNED)
+
+static bool context_cant_unblock(struct intel_context *ce)
+{
+ lockdep_assert_held(&ce->guc_state.lock);
+
+ return (ce->guc_state.sched_state & SCHED_STATE_NO_UNBLOCK) ||
+ context_guc_id_invalid(ce) ||
+ !lrc_desc_registered(ce_to_guc(ce), ce->guc_id.id) ||
+ !intel_context_is_pinned(ce);
}
static void guc_context_unblock(struct intel_context *ce)
{
struct intel_guc *guc = ce_to_guc(ce);
- struct i915_sched_engine *sched_engine = ce->engine->sched_engine;
unsigned long flags;
struct intel_runtime_pm *runtime_pm = ce->engine->uncore->rpm;
intel_wakeref_t wakeref;
bool enable;
GEM_BUG_ON(context_enabled(ce));
+ GEM_BUG_ON(intel_context_is_child(ce));
spin_lock_irqsave(&ce->guc_state.lock, flags);
if (unlikely(submission_disabled(guc) ||
- !intel_context_is_pinned(ce) ||
- context_pending_disable(ce) ||
- context_blocked(ce) > 1)) {
+ context_cant_unblock(ce))) {
enable = false;
} else {
enable = true;
@@ -1573,13 +2225,7 @@ static void guc_context_unblock(struct intel_context *ce)
intel_context_get(ce);
}
- /*
- * Sync with submission path, decrement after above changes to context
- * state.
- */
- spin_lock(&sched_engine->lock);
decr_context_blocked(ce);
- spin_unlock(&sched_engine->lock);
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
@@ -1592,16 +2238,29 @@ static void guc_context_unblock(struct intel_context *ce)
static void guc_context_cancel_request(struct intel_context *ce,
struct i915_request *rq)
{
+ struct intel_context *block_context =
+ request_to_scheduling_context(rq);
+
if (i915_sw_fence_signaled(&rq->submit)) {
- struct i915_sw_fence *fence = guc_context_block(ce);
+ struct i915_sw_fence *fence;
+ intel_context_get(ce);
+ fence = guc_context_block(block_context);
i915_sw_fence_wait(fence);
if (!i915_request_completed(rq)) {
__i915_request_skip(rq);
guc_reset_state(ce, intel_ring_wrap(ce->ring, rq->head),
true);
}
- guc_context_unblock(ce);
+
+ /*
+ * XXX: Racey if context is reset, see comment in
+ * __guc_reset_context().
+ */
+ flush_work(&ce_to_guc(ce)->ct.requests.worker);
+
+ guc_context_unblock(block_context);
+ intel_context_put(ce);
}
}
@@ -1626,6 +2285,8 @@ static void guc_context_ban(struct intel_context *ce, struct i915_request *rq)
intel_wakeref_t wakeref;
unsigned long flags;
+ GEM_BUG_ON(intel_context_is_child(ce));
+
guc_flush_submissions(guc);
spin_lock_irqsave(&ce->guc_state.lock, flags);
@@ -1662,7 +2323,7 @@ static void guc_context_ban(struct intel_context *ce, struct i915_request *rq)
if (!context_guc_id_invalid(ce))
with_intel_runtime_pm(runtime_pm, wakeref)
__guc_context_set_preemption_timeout(guc,
- ce->guc_id,
+ ce->guc_id.id,
1);
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
}
@@ -1675,40 +2336,24 @@ static void guc_context_sched_disable(struct intel_context *ce)
struct intel_runtime_pm *runtime_pm = &ce->engine->gt->i915->runtime_pm;
intel_wakeref_t wakeref;
u16 guc_id;
- bool enabled;
- if (submission_disabled(guc) || context_guc_id_invalid(ce) ||
- !lrc_desc_registered(guc, ce->guc_id)) {
- clr_context_enabled(ce);
- goto unpin;
- }
-
- if (!context_enabled(ce))
- goto unpin;
+ GEM_BUG_ON(intel_context_is_child(ce));
spin_lock_irqsave(&ce->guc_state.lock, flags);
/*
- * We have to check if the context has been disabled by another thread.
- * We also have to check if the context has been pinned again as another
- * pin operation is allowed to pass this function. Checking the pin
- * count, within ce->guc_state.lock, synchronizes this function with
- * guc_request_alloc ensuring a request doesn't slip through the
- * 'context_pending_disable' fence. Checking within the spin lock (can't
- * sleep) ensures another process doesn't pin this context and generate
- * a request before we set the 'context_pending_disable' flag here.
+ * We have to check if the context has been disabled by another thread,
+ * check if submssion has been disabled to seal a race with reset and
+ * finally check if any more requests have been committed to the
+ * context ensursing that a request doesn't slip through the
+ * 'context_pending_disable' fence.
*/
- enabled = context_enabled(ce);
- if (unlikely(!enabled || submission_disabled(guc))) {
- if (enabled)
- clr_context_enabled(ce);
+ if (unlikely(!context_enabled(ce) || submission_disabled(guc) ||
+ context_has_committed_requests(ce))) {
+ clr_context_enabled(ce);
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
goto unpin;
}
- if (unlikely(atomic_add_unless(&ce->pin_count, -2, 2))) {
- spin_unlock_irqrestore(&ce->guc_state.lock, flags);
- return;
- }
guc_id = prep_context_pending_disable(ce);
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
@@ -1724,21 +2369,40 @@ unpin:
static inline void guc_lrc_desc_unpin(struct intel_context *ce)
{
struct intel_guc *guc = ce_to_guc(ce);
+ struct intel_gt *gt = guc_to_gt(guc);
+ unsigned long flags;
+ bool disabled;
- GEM_BUG_ON(!lrc_desc_registered(guc, ce->guc_id));
- GEM_BUG_ON(ce != __get_context(guc, ce->guc_id));
+ GEM_BUG_ON(!intel_gt_pm_is_awake(gt));
+ GEM_BUG_ON(!lrc_desc_registered(guc, ce->guc_id.id));
+ GEM_BUG_ON(ce != __get_context(guc, ce->guc_id.id));
GEM_BUG_ON(context_enabled(ce));
- clr_context_registered(ce);
- deregister_context(ce, ce->guc_id, true);
+ /* Seal race with Reset */
+ spin_lock_irqsave(&ce->guc_state.lock, flags);
+ disabled = submission_disabled(guc);
+ if (likely(!disabled)) {
+ __intel_gt_pm_get(gt);
+ set_context_destroyed(ce);
+ clr_context_registered(ce);
+ }
+ spin_unlock_irqrestore(&ce->guc_state.lock, flags);
+ if (unlikely(disabled)) {
+ release_guc_id(guc, ce);
+ __guc_context_destroy(ce);
+ return;
+ }
+
+ deregister_context(ce, ce->guc_id.id);
}
static void __guc_context_destroy(struct intel_context *ce)
{
- GEM_BUG_ON(ce->guc_prio_count[GUC_CLIENT_PRIORITY_KMD_HIGH] ||
- ce->guc_prio_count[GUC_CLIENT_PRIORITY_HIGH] ||
- ce->guc_prio_count[GUC_CLIENT_PRIORITY_KMD_NORMAL] ||
- ce->guc_prio_count[GUC_CLIENT_PRIORITY_NORMAL]);
+ GEM_BUG_ON(ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_KMD_HIGH] ||
+ ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_HIGH] ||
+ ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_KMD_NORMAL] ||
+ ce->guc_state.prio_count[GUC_CLIENT_PRIORITY_NORMAL]);
+ GEM_BUG_ON(ce->guc_state.number_committed_requests);
lrc_fini(ce);
intel_context_fini(ce);
@@ -1756,76 +2420,86 @@ static void __guc_context_destroy(struct intel_context *ce)
}
}
+static void guc_flush_destroyed_contexts(struct intel_guc *guc)
+{
+ struct intel_context *ce, *cn;
+ unsigned long flags;
+
+ GEM_BUG_ON(!submission_disabled(guc) &&
+ guc_submission_initialized(guc));
+
+ spin_lock_irqsave(&guc->submission_state.lock, flags);
+ list_for_each_entry_safe(ce, cn,
+ &guc->submission_state.destroyed_contexts,
+ destroyed_link) {
+ list_del_init(&ce->destroyed_link);
+ __release_guc_id(guc, ce);
+ __guc_context_destroy(ce);
+ }
+ spin_unlock_irqrestore(&guc->submission_state.lock, flags);
+}
+
+static void deregister_destroyed_contexts(struct intel_guc *guc)
+{
+ struct intel_context *ce, *cn;
+ unsigned long flags;
+
+ spin_lock_irqsave(&guc->submission_state.lock, flags);
+ list_for_each_entry_safe(ce, cn,
+ &guc->submission_state.destroyed_contexts,
+ destroyed_link) {
+ list_del_init(&ce->destroyed_link);
+ guc_lrc_desc_unpin(ce);
+ }
+ spin_unlock_irqrestore(&guc->submission_state.lock, flags);
+}
+
+static void destroyed_worker_func(struct work_struct *w)
+{
+ struct intel_guc *guc = container_of(w, struct intel_guc,
+ submission_state.destroyed_worker);
+ struct intel_gt *gt = guc_to_gt(guc);
+ int tmp;
+
+ with_intel_gt_pm(gt, tmp)
+ deregister_destroyed_contexts(guc);
+}
+
static void guc_context_destroy(struct kref *kref)
{
struct intel_context *ce = container_of(kref, typeof(*ce), ref);
- struct intel_runtime_pm *runtime_pm = ce->engine->uncore->rpm;
struct intel_guc *guc = ce_to_guc(ce);
- intel_wakeref_t wakeref;
unsigned long flags;
- bool disabled;
+ bool destroy;
/*
* If the guc_id is invalid this context has been stolen and we can free
* it immediately. Also can be freed immediately if the context is not
* registered with the GuC or the GuC is in the middle of a reset.
*/
- if (context_guc_id_invalid(ce)) {
- __guc_context_destroy(ce);
- return;
- } else if (submission_disabled(guc) ||
- !lrc_desc_registered(guc, ce->guc_id)) {
- release_guc_id(guc, ce);
- __guc_context_destroy(ce);
- return;
- }
-
- /*
- * We have to acquire the context spinlock and check guc_id again, if it
- * is valid it hasn't been stolen and needs to be deregistered. We
- * delete this context from the list of unpinned guc_ids available to
- * steal to seal a race with guc_lrc_desc_pin(). When the G2H CTB
- * returns indicating this context has been deregistered the guc_id is
- * returned to the pool of available guc_ids.
- */
- spin_lock_irqsave(&guc->contexts_lock, flags);
- if (context_guc_id_invalid(ce)) {
- spin_unlock_irqrestore(&guc->contexts_lock, flags);
- __guc_context_destroy(ce);
- return;
+ spin_lock_irqsave(&guc->submission_state.lock, flags);
+ destroy = submission_disabled(guc) || context_guc_id_invalid(ce) ||
+ !lrc_desc_registered(guc, ce->guc_id.id);
+ if (likely(!destroy)) {
+ if (!list_empty(&ce->guc_id.link))
+ list_del_init(&ce->guc_id.link);
+ list_add_tail(&ce->destroyed_link,
+ &guc->submission_state.destroyed_contexts);
+ } else {
+ __release_guc_id(guc, ce);
}
-
- if (!list_empty(&ce->guc_id_link))
- list_del_init(&ce->guc_id_link);
- spin_unlock_irqrestore(&guc->contexts_lock, flags);
-
- /* Seal race with Reset */
- spin_lock_irqsave(&ce->guc_state.lock, flags);
- disabled = submission_disabled(guc);
- if (likely(!disabled))
- set_context_destroyed(ce);
- spin_unlock_irqrestore(&ce->guc_state.lock, flags);
- if (unlikely(disabled)) {
- release_guc_id(guc, ce);
+ spin_unlock_irqrestore(&guc->submission_state.lock, flags);
+ if (unlikely(destroy)) {
__guc_context_destroy(ce);
return;
}
/*
- * We defer GuC context deregistration until the context is destroyed
- * in order to save on CTBs. With this optimization ideally we only need
- * 1 CTB to register the context during the first pin and 1 CTB to
- * deregister the context when the context is destroyed. Without this
- * optimization, a CTB would be needed every pin & unpin.
- *
- * XXX: Need to acqiure the runtime wakeref as this can be triggered
- * from context_free_worker when runtime wakeref is not held.
- * guc_lrc_desc_unpin requires the runtime as a GuC register is written
- * in H2G CTB to deregister the context. A future patch may defer this
- * H2G CTB if the runtime wakeref is zero.
+ * We use a worker to issue the H2G to deregister the context as we can
+ * take the GT PM for the first time which isn't allowed from an atomic
+ * context.
*/
- with_intel_runtime_pm(runtime_pm, wakeref)
- guc_lrc_desc_unpin(ce);
+ queue_work(system_unbound_wq, &guc->submission_state.destroyed_worker);
}
static int guc_context_alloc(struct intel_context *ce)
@@ -1839,20 +2513,23 @@ static void guc_context_set_prio(struct intel_guc *guc,
{
u32 action[] = {
INTEL_GUC_ACTION_SET_CONTEXT_PRIORITY,
- ce->guc_id,
+ ce->guc_id.id,
prio,
};
GEM_BUG_ON(prio < GUC_CLIENT_PRIORITY_KMD_HIGH ||
prio > GUC_CLIENT_PRIORITY_NORMAL);
+ lockdep_assert_held(&ce->guc_state.lock);
- if (ce->guc_prio == prio || submission_disabled(guc) ||
- !context_registered(ce))
+ if (ce->guc_state.prio == prio || submission_disabled(guc) ||
+ !context_registered(ce)) {
+ ce->guc_state.prio = prio;
return;
+ }
guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true);
- ce->guc_prio = prio;
+ ce->guc_state.prio = prio;
trace_intel_context_set_prio(ce);
}
@@ -1871,25 +2548,25 @@ static inline u8 map_i915_prio_to_guc_prio(int prio)
static inline void add_context_inflight_prio(struct intel_context *ce,
u8 guc_prio)
{
- lockdep_assert_held(&ce->guc_active.lock);
- GEM_BUG_ON(guc_prio >= ARRAY_SIZE(ce->guc_prio_count));
+ lockdep_assert_held(&ce->guc_state.lock);
+ GEM_BUG_ON(guc_prio >= ARRAY_SIZE(ce->guc_state.prio_count));
- ++ce->guc_prio_count[guc_prio];
+ ++ce->guc_state.prio_count[guc_prio];
/* Overflow protection */
- GEM_WARN_ON(!ce->guc_prio_count[guc_prio]);
+ GEM_WARN_ON(!ce->guc_state.prio_count[guc_prio]);
}
static inline void sub_context_inflight_prio(struct intel_context *ce,
u8 guc_prio)
{
- lockdep_assert_held(&ce->guc_active.lock);
- GEM_BUG_ON(guc_prio >= ARRAY_SIZE(ce->guc_prio_count));
+ lockdep_assert_held(&ce->guc_state.lock);
+ GEM_BUG_ON(guc_prio >= ARRAY_SIZE(ce->guc_state.prio_count));
/* Underflow protection */
- GEM_WARN_ON(!ce->guc_prio_count[guc_prio]);
+ GEM_WARN_ON(!ce->guc_state.prio_count[guc_prio]);
- --ce->guc_prio_count[guc_prio];
+ --ce->guc_state.prio_count[guc_prio];
}
static inline void update_context_prio(struct intel_context *ce)
@@ -1900,10 +2577,10 @@ static inline void update_context_prio(struct intel_context *ce)
BUILD_BUG_ON(GUC_CLIENT_PRIORITY_KMD_HIGH != 0);
BUILD_BUG_ON(GUC_CLIENT_PRIORITY_KMD_HIGH > GUC_CLIENT_PRIORITY_NORMAL);
- lockdep_assert_held(&ce->guc_active.lock);
+ lockdep_assert_held(&ce->guc_state.lock);
- for (i = 0; i < ARRAY_SIZE(ce->guc_prio_count); ++i) {
- if (ce->guc_prio_count[i]) {
+ for (i = 0; i < ARRAY_SIZE(ce->guc_state.prio_count); ++i) {
+ if (ce->guc_state.prio_count[i]) {
guc_context_set_prio(guc, ce, i);
break;
}
@@ -1918,13 +2595,14 @@ static inline bool new_guc_prio_higher(u8 old_guc_prio, u8 new_guc_prio)
static void add_to_context(struct i915_request *rq)
{
- struct intel_context *ce = rq->context;
+ struct intel_context *ce = request_to_scheduling_context(rq);
u8 new_guc_prio = map_i915_prio_to_guc_prio(rq_prio(rq));
+ GEM_BUG_ON(intel_context_is_child(ce));
GEM_BUG_ON(rq->guc_prio == GUC_PRIO_FINI);
- spin_lock(&ce->guc_active.lock);
- list_move_tail(&rq->sched.link, &ce->guc_active.requests);
+ spin_lock(&ce->guc_state.lock);
+ list_move_tail(&rq->sched.link, &ce->guc_state.requests);
if (rq->guc_prio == GUC_PRIO_INIT) {
rq->guc_prio = new_guc_prio;
@@ -1936,12 +2614,12 @@ static void add_to_context(struct i915_request *rq)
}
update_context_prio(ce);
- spin_unlock(&ce->guc_active.lock);
+ spin_unlock(&ce->guc_state.lock);
}
static void guc_prio_fini(struct i915_request *rq, struct intel_context *ce)
{
- lockdep_assert_held(&ce->guc_active.lock);
+ lockdep_assert_held(&ce->guc_state.lock);
if (rq->guc_prio != GUC_PRIO_INIT &&
rq->guc_prio != GUC_PRIO_FINI) {
@@ -1953,9 +2631,11 @@ static void guc_prio_fini(struct i915_request *rq, struct intel_context *ce)
static void remove_from_context(struct i915_request *rq)
{
- struct intel_context *ce = rq->context;
+ struct intel_context *ce = request_to_scheduling_context(rq);
- spin_lock_irq(&ce->guc_active.lock);
+ GEM_BUG_ON(intel_context_is_child(ce));
+
+ spin_lock_irq(&ce->guc_state.lock);
list_del_init(&rq->sched.link);
clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
@@ -1965,9 +2645,11 @@ static void remove_from_context(struct i915_request *rq)
guc_prio_fini(rq, ce);
- spin_unlock_irq(&ce->guc_active.lock);
+ decr_context_committed_requests(ce);
+
+ spin_unlock_irq(&ce->guc_state.lock);
- atomic_dec(&ce->guc_id_ref);
+ atomic_dec(&ce->guc_id.ref);
i915_request_notify_execute_cb_imm(rq);
}
@@ -1992,19 +2674,35 @@ static const struct intel_context_ops guc_context_ops = {
.destroy = guc_context_destroy,
.create_virtual = guc_create_virtual,
+ .create_parallel = guc_create_parallel,
};
+static void submit_work_cb(struct irq_work *wrk)
+{
+ struct i915_request *rq = container_of(wrk, typeof(*rq), submit_work);
+
+ might_lock(&rq->engine->sched_engine->lock);
+ i915_sw_fence_complete(&rq->submit);
+}
+
static void __guc_signal_context_fence(struct intel_context *ce)
{
- struct i915_request *rq;
+ struct i915_request *rq, *rn;
lockdep_assert_held(&ce->guc_state.lock);
if (!list_empty(&ce->guc_state.fences))
trace_intel_context_fence_release(ce);
- list_for_each_entry(rq, &ce->guc_state.fences, guc_fence_link)
- i915_sw_fence_complete(&rq->submit);
+ /*
+ * Use an IRQ to ensure locking order of sched_engine->lock ->
+ * ce->guc_state.lock is preserved.
+ */
+ list_for_each_entry_safe(rq, rn, &ce->guc_state.fences,
+ guc_fence_link) {
+ list_del(&rq->guc_fence_link);
+ irq_work_queue(&rq->submit_work);
+ }
INIT_LIST_HEAD(&ce->guc_state.fences);
}
@@ -2013,6 +2711,8 @@ static void guc_signal_context_fence(struct intel_context *ce)
{
unsigned long flags;
+ GEM_BUG_ON(intel_context_is_child(ce));
+
spin_lock_irqsave(&ce->guc_state.lock, flags);
clr_context_wait_for_deregister_to_register(ce);
__guc_signal_context_fence(ce);
@@ -2022,13 +2722,28 @@ static void guc_signal_context_fence(struct intel_context *ce)
static bool context_needs_register(struct intel_context *ce, bool new_guc_id)
{
return (new_guc_id || test_bit(CONTEXT_LRCA_DIRTY, &ce->flags) ||
- !lrc_desc_registered(ce_to_guc(ce), ce->guc_id)) &&
+ !lrc_desc_registered(ce_to_guc(ce), ce->guc_id.id)) &&
!submission_disabled(ce_to_guc(ce));
}
+static void guc_context_init(struct intel_context *ce)
+{
+ const struct i915_gem_context *ctx;
+ int prio = I915_CONTEXT_DEFAULT_PRIORITY;
+
+ rcu_read_lock();
+ ctx = rcu_dereference(ce->gem_context);
+ if (ctx)
+ prio = ctx->sched.priority;
+ rcu_read_unlock();
+
+ ce->guc_state.prio = map_i915_prio_to_guc_prio(prio);
+ set_bit(CONTEXT_GUC_INIT, &ce->flags);
+}
+
static int guc_request_alloc(struct i915_request *rq)
{
- struct intel_context *ce = rq->context;
+ struct intel_context *ce = request_to_scheduling_context(rq);
struct intel_guc *guc = ce_to_guc(ce);
unsigned long flags;
int ret;
@@ -2057,14 +2772,17 @@ static int guc_request_alloc(struct i915_request *rq)
rq->reserved_space -= GUC_REQUEST_SIZE;
+ if (unlikely(!test_bit(CONTEXT_GUC_INIT, &ce->flags)))
+ guc_context_init(ce);
+
/*
* Call pin_guc_id here rather than in the pinning step as with
* dma_resv, contexts can be repeatedly pinned / unpinned trashing the
- * guc_ids and creating horrible race conditions. This is especially bad
- * when guc_ids are being stolen due to over subscription. By the time
+ * guc_id and creating horrible race conditions. This is especially bad
+ * when guc_id are being stolen due to over subscription. By the time
* this function is reached, it is guaranteed that the guc_id will be
* persistent until the generated request is retired. Thus, sealing these
- * race conditions. It is still safe to fail here if guc_ids are
+ * race conditions. It is still safe to fail here if guc_id are
* exhausted and return -EAGAIN to the user indicating that they can try
* again in the future.
*
@@ -2074,7 +2792,7 @@ static int guc_request_alloc(struct i915_request *rq)
* decremented on each retire. When it is zero, a lock around the
* increment (in pin_guc_id) is needed to seal a race with unpin_guc_id.
*/
- if (atomic_add_unless(&ce->guc_id_ref, 1, 0))
+ if (atomic_add_unless(&ce->guc_id.ref, 1, 0))
goto out;
ret = pin_guc_id(guc, ce); /* returns 1 if new guc_id assigned */
@@ -2087,7 +2805,7 @@ static int guc_request_alloc(struct i915_request *rq)
disable_submission(guc);
goto out; /* GPU will be reset */
}
- atomic_dec(&ce->guc_id_ref);
+ atomic_dec(&ce->guc_id.ref);
unpin_guc_id(guc, ce);
return ret;
}
@@ -2102,22 +2820,16 @@ out:
* schedule enable or context registration if either G2H is pending
* respectfully. Once a G2H returns, the fence is released that is
* blocking these requests (see guc_signal_context_fence).
- *
- * We can safely check the below fields outside of the lock as it isn't
- * possible for these fields to transition from being clear to set but
- * converse is possible, hence the need for the check within the lock.
*/
- if (likely(!context_wait_for_deregister_to_register(ce) &&
- !context_pending_disable(ce)))
- return 0;
-
spin_lock_irqsave(&ce->guc_state.lock, flags);
if (context_wait_for_deregister_to_register(ce) ||
context_pending_disable(ce)) {
+ init_irq_work(&rq->submit_work, submit_work_cb);
i915_sw_fence_await(&rq->submit);
list_add_tail(&rq->guc_fence_link, &ce->guc_state.fences);
}
+ incr_context_committed_requests(ce);
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
return 0;
@@ -2135,8 +2847,30 @@ static int guc_virtual_context_pre_pin(struct intel_context *ce,
static int guc_virtual_context_pin(struct intel_context *ce, void *vaddr)
{
struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0);
+ int ret = __guc_context_pin(ce, engine, vaddr);
+ intel_engine_mask_t tmp, mask = ce->engine->mask;
- return __guc_context_pin(ce, engine, vaddr);
+ if (likely(!ret))
+ for_each_engine_masked(engine, ce->engine->gt, mask, tmp)
+ intel_engine_pm_get(engine);
+
+ return ret;
+}
+
+static void guc_virtual_context_unpin(struct intel_context *ce)
+{
+ intel_engine_mask_t tmp, mask = ce->engine->mask;
+ struct intel_engine_cs *engine;
+ struct intel_guc *guc = ce_to_guc(ce);
+
+ GEM_BUG_ON(context_enabled(ce));
+ GEM_BUG_ON(intel_context_is_barrier(ce));
+
+ unpin_guc_id(guc, ce);
+ lrc_unpin(ce);
+
+ for_each_engine_masked(engine, ce->engine->gt, mask, tmp)
+ intel_engine_pm_put_async(engine);
}
static void guc_virtual_context_enter(struct intel_context *ce)
@@ -2173,7 +2907,98 @@ static const struct intel_context_ops virtual_guc_context_ops = {
.pre_pin = guc_virtual_context_pre_pin,
.pin = guc_virtual_context_pin,
- .unpin = guc_context_unpin,
+ .unpin = guc_virtual_context_unpin,
+ .post_unpin = guc_context_post_unpin,
+
+ .ban = guc_context_ban,
+
+ .cancel_request = guc_context_cancel_request,
+
+ .enter = guc_virtual_context_enter,
+ .exit = guc_virtual_context_exit,
+
+ .sched_disable = guc_context_sched_disable,
+
+ .destroy = guc_context_destroy,
+
+ .get_sibling = guc_virtual_get_sibling,
+};
+
+static int guc_parent_context_pin(struct intel_context *ce, void *vaddr)
+{
+ struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0);
+ struct intel_guc *guc = ce_to_guc(ce);
+ int ret;
+
+ GEM_BUG_ON(!intel_context_is_parent(ce));
+ GEM_BUG_ON(!intel_engine_is_virtual(ce->engine));
+
+ ret = pin_guc_id(guc, ce);
+ if (unlikely(ret < 0))
+ return ret;
+
+ return __guc_context_pin(ce, engine, vaddr);
+}
+
+static int guc_child_context_pin(struct intel_context *ce, void *vaddr)
+{
+ struct intel_engine_cs *engine = guc_virtual_get_sibling(ce->engine, 0);
+
+ GEM_BUG_ON(!intel_context_is_child(ce));
+ GEM_BUG_ON(!intel_engine_is_virtual(ce->engine));
+
+ __intel_context_pin(ce->parallel.parent);
+ return __guc_context_pin(ce, engine, vaddr);
+}
+
+static void guc_parent_context_unpin(struct intel_context *ce)
+{
+ struct intel_guc *guc = ce_to_guc(ce);
+
+ GEM_BUG_ON(context_enabled(ce));
+ GEM_BUG_ON(intel_context_is_barrier(ce));
+ GEM_BUG_ON(!intel_context_is_parent(ce));
+ GEM_BUG_ON(!intel_engine_is_virtual(ce->engine));
+
+ if (ce->parallel.last_rq)
+ i915_request_put(ce->parallel.last_rq);
+ unpin_guc_id(guc, ce);
+ lrc_unpin(ce);
+}
+
+static void guc_child_context_unpin(struct intel_context *ce)
+{
+ GEM_BUG_ON(context_enabled(ce));
+ GEM_BUG_ON(intel_context_is_barrier(ce));
+ GEM_BUG_ON(!intel_context_is_child(ce));
+ GEM_BUG_ON(!intel_engine_is_virtual(ce->engine));
+
+ lrc_unpin(ce);
+}
+
+static void guc_child_context_post_unpin(struct intel_context *ce)
+{
+ GEM_BUG_ON(!intel_context_is_child(ce));
+ GEM_BUG_ON(!intel_context_is_pinned(ce->parallel.parent));
+ GEM_BUG_ON(!intel_engine_is_virtual(ce->engine));
+
+ lrc_post_unpin(ce);
+ intel_context_unpin(ce->parallel.parent);
+}
+
+static void guc_child_context_destroy(struct kref *kref)
+{
+ struct intel_context *ce = container_of(kref, typeof(*ce), ref);
+
+ __guc_context_destroy(ce);
+}
+
+static const struct intel_context_ops virtual_parent_context_ops = {
+ .alloc = guc_virtual_context_alloc,
+
+ .pre_pin = guc_context_pre_pin,
+ .pin = guc_parent_context_pin,
+ .unpin = guc_parent_context_unpin,
.post_unpin = guc_context_post_unpin,
.ban = guc_context_ban,
@@ -2190,6 +3015,110 @@ static const struct intel_context_ops virtual_guc_context_ops = {
.get_sibling = guc_virtual_get_sibling,
};
+static const struct intel_context_ops virtual_child_context_ops = {
+ .alloc = guc_virtual_context_alloc,
+
+ .pre_pin = guc_context_pre_pin,
+ .pin = guc_child_context_pin,
+ .unpin = guc_child_context_unpin,
+ .post_unpin = guc_child_context_post_unpin,
+
+ .cancel_request = guc_context_cancel_request,
+
+ .enter = guc_virtual_context_enter,
+ .exit = guc_virtual_context_exit,
+
+ .destroy = guc_child_context_destroy,
+
+ .get_sibling = guc_virtual_get_sibling,
+};
+
+/*
+ * The below override of the breadcrumbs is enabled when the user configures a
+ * context for parallel submission (multi-lrc, parent-child).
+ *
+ * The overridden breadcrumbs implements an algorithm which allows the GuC to
+ * safely preempt all the hw contexts configured for parallel submission
+ * between each BB. The contract between the i915 and GuC is if the parent
+ * context can be preempted, all the children can be preempted, and the GuC will
+ * always try to preempt the parent before the children. A handshake between the
+ * parent / children breadcrumbs ensures the i915 holds up its end of the deal
+ * creating a window to preempt between each set of BBs.
+ */
+static int emit_bb_start_parent_no_preempt_mid_batch(struct i915_request *rq,
+ u64 offset, u32 len,
+ const unsigned int flags);
+static int emit_bb_start_child_no_preempt_mid_batch(struct i915_request *rq,
+ u64 offset, u32 len,
+ const unsigned int flags);
+static u32 *
+emit_fini_breadcrumb_parent_no_preempt_mid_batch(struct i915_request *rq,
+ u32 *cs);
+static u32 *
+emit_fini_breadcrumb_child_no_preempt_mid_batch(struct i915_request *rq,
+ u32 *cs);
+
+static struct intel_context *
+guc_create_parallel(struct intel_engine_cs **engines,
+ unsigned int num_siblings,
+ unsigned int width)
+{
+ struct intel_engine_cs **siblings = NULL;
+ struct intel_context *parent = NULL, *ce, *err;
+ int i, j;
+
+ siblings = kmalloc_array(num_siblings,
+ sizeof(*siblings),
+ GFP_KERNEL);
+ if (!siblings)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < width; ++i) {
+ for (j = 0; j < num_siblings; ++j)
+ siblings[j] = engines[i * num_siblings + j];
+
+ ce = intel_engine_create_virtual(siblings, num_siblings,
+ FORCE_VIRTUAL);
+ if (!ce) {
+ err = ERR_PTR(-ENOMEM);
+ goto unwind;
+ }
+
+ if (i == 0) {
+ parent = ce;
+ parent->ops = &virtual_parent_context_ops;
+ } else {
+ ce->ops = &virtual_child_context_ops;
+ intel_context_bind_parent_child(parent, ce);
+ }
+ }
+
+ parent->parallel.fence_context = dma_fence_context_alloc(1);
+
+ parent->engine->emit_bb_start =
+ emit_bb_start_parent_no_preempt_mid_batch;
+ parent->engine->emit_fini_breadcrumb =
+ emit_fini_breadcrumb_parent_no_preempt_mid_batch;
+ parent->engine->emit_fini_breadcrumb_dw =
+ 12 + 4 * parent->parallel.number_children;
+ for_each_child(parent, ce) {
+ ce->engine->emit_bb_start =
+ emit_bb_start_child_no_preempt_mid_batch;
+ ce->engine->emit_fini_breadcrumb =
+ emit_fini_breadcrumb_child_no_preempt_mid_batch;
+ ce->engine->emit_fini_breadcrumb_dw = 16;
+ }
+
+ kfree(siblings);
+ return parent;
+
+unwind:
+ if (parent)
+ intel_context_put(parent);
+ kfree(siblings);
+ return err;
+}
+
static bool
guc_irq_enable_breadcrumbs(struct intel_breadcrumbs *b)
{
@@ -2249,7 +3178,7 @@ static void guc_init_breadcrumbs(struct intel_engine_cs *engine)
static void guc_bump_inflight_request_prio(struct i915_request *rq,
int prio)
{
- struct intel_context *ce = rq->context;
+ struct intel_context *ce = request_to_scheduling_context(rq);
u8 new_guc_prio = map_i915_prio_to_guc_prio(prio);
/* Short circuit function */
@@ -2259,7 +3188,7 @@ static void guc_bump_inflight_request_prio(struct i915_request *rq,
!new_guc_prio_higher(rq->guc_prio, new_guc_prio)))
return;
- spin_lock(&ce->guc_active.lock);
+ spin_lock(&ce->guc_state.lock);
if (rq->guc_prio != GUC_PRIO_FINI) {
if (rq->guc_prio != GUC_PRIO_INIT)
sub_context_inflight_prio(ce, rq->guc_prio);
@@ -2267,16 +3196,16 @@ static void guc_bump_inflight_request_prio(struct i915_request *rq,
add_context_inflight_prio(ce, rq->guc_prio);
update_context_prio(ce);
}
- spin_unlock(&ce->guc_active.lock);
+ spin_unlock(&ce->guc_state.lock);
}
static void guc_retire_inflight_request_prio(struct i915_request *rq)
{
- struct intel_context *ce = rq->context;
+ struct intel_context *ce = request_to_scheduling_context(rq);
- spin_lock(&ce->guc_active.lock);
+ spin_lock(&ce->guc_state.lock);
guc_prio_fini(rq, ce);
- spin_unlock(&ce->guc_active.lock);
+ spin_unlock(&ce->guc_state.lock);
}
static void sanitize_hwsp(struct intel_engine_cs *engine)
@@ -2310,6 +3239,8 @@ static void guc_sanitize(struct intel_engine_cs *engine)
/* And scrub the dirty cachelines for the HWSP */
clflush_cache_range(engine->status_page.addr, PAGE_SIZE);
+
+ intel_engine_reset_pinned_contexts(engine);
}
static void setup_hwsp(struct intel_engine_cs *engine)
@@ -2385,9 +3316,13 @@ static inline void guc_init_lrc_mapping(struct intel_guc *guc)
* and even it did this code would be run again.
*/
- for_each_engine(engine, gt, id)
- if (engine->kernel_context)
- guc_kernel_context_pin(guc, engine->kernel_context);
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce;
+
+ list_for_each_entry(ce, &engine->pinned_contexts_list,
+ pinned_contexts_link)
+ guc_kernel_context_pin(guc, ce);
+ }
}
static void guc_release(struct intel_engine_cs *engine)
@@ -2580,13 +3515,13 @@ g2h_context_lookup(struct intel_guc *guc, u32 desc_idx)
return NULL;
}
- return ce;
-}
+ if (unlikely(intel_context_is_child(ce))) {
+ drm_err(&guc_to_gt(guc)->i915->drm,
+ "Context is child, desc_idx %u", desc_idx);
+ return NULL;
+ }
-static void decr_outstanding_submission_g2h(struct intel_guc *guc)
-{
- if (atomic_dec_and_test(&guc->outstanding_submission_g2h))
- wake_up_all(&guc->ct.wq);
+ return ce;
}
int intel_guc_deregister_done_process_msg(struct intel_guc *guc,
@@ -2607,6 +3542,13 @@ int intel_guc_deregister_done_process_msg(struct intel_guc *guc,
trace_intel_context_deregister_done(ce);
+#ifdef CONFIG_DRM_I915_SELFTEST
+ if (unlikely(ce->drop_deregister)) {
+ ce->drop_deregister = false;
+ return 0;
+ }
+#endif
+
if (context_wait_for_deregister_to_register(ce)) {
struct intel_runtime_pm *runtime_pm =
&ce->engine->gt->i915->runtime_pm;
@@ -2622,6 +3564,7 @@ int intel_guc_deregister_done_process_msg(struct intel_guc *guc,
intel_context_put(ce);
} else if (context_destroyed(ce)) {
/* Context has been destroyed */
+ intel_gt_pm_put_async(guc_to_gt(guc));
release_guc_id(guc, ce);
__guc_context_destroy(ce);
}
@@ -2652,8 +3595,7 @@ int intel_guc_sched_done_process_msg(struct intel_guc *guc,
(!context_pending_enable(ce) &&
!context_pending_disable(ce)))) {
drm_err(&guc_to_gt(guc)->i915->drm,
- "Bad context sched_state 0x%x, 0x%x, desc_idx %u",
- atomic_read(&ce->guc_sched_state_no_lock),
+ "Bad context sched_state 0x%x, desc_idx %u",
ce->guc_state.sched_state, desc_idx);
return -EPROTO;
}
@@ -2661,10 +3603,26 @@ int intel_guc_sched_done_process_msg(struct intel_guc *guc,
trace_intel_context_sched_done(ce);
if (context_pending_enable(ce)) {
+#ifdef CONFIG_DRM_I915_SELFTEST
+ if (unlikely(ce->drop_schedule_enable)) {
+ ce->drop_schedule_enable = false;
+ return 0;
+ }
+#endif
+
+ spin_lock_irqsave(&ce->guc_state.lock, flags);
clr_context_pending_enable(ce);
+ spin_unlock_irqrestore(&ce->guc_state.lock, flags);
} else if (context_pending_disable(ce)) {
bool banned;
+#ifdef CONFIG_DRM_I915_SELFTEST
+ if (unlikely(ce->drop_schedule_disable)) {
+ ce->drop_schedule_disable = false;
+ return 0;
+ }
+#endif
+
/*
* Unpin must be done before __guc_signal_context_fence,
* otherwise a race exists between the requests getting
@@ -2721,7 +3679,12 @@ static void guc_handle_context_reset(struct intel_guc *guc,
{
trace_intel_context_reset(ce);
- if (likely(!intel_context_is_banned(ce))) {
+ /*
+ * XXX: Racey if request cancellation has occurred, see comment in
+ * __guc_reset_context().
+ */
+ if (likely(!intel_context_is_banned(ce) &&
+ !context_blocked(ce))) {
capture_error_state(guc, ce);
guc_context_replay(ce);
}
@@ -2797,33 +3760,47 @@ void intel_guc_find_hung_context(struct intel_engine_cs *engine)
struct intel_context *ce;
struct i915_request *rq;
unsigned long index;
+ unsigned long flags;
/* Reset called during driver load? GuC not yet initialised! */
if (unlikely(!guc_submission_initialized(guc)))
return;
+ xa_lock_irqsave(&guc->context_lookup, flags);
xa_for_each(&guc->context_lookup, index, ce) {
- if (!intel_context_is_pinned(ce))
+ if (!kref_get_unless_zero(&ce->ref))
continue;
+ xa_unlock(&guc->context_lookup);
+
+ if (!intel_context_is_pinned(ce))
+ goto next;
+
if (intel_engine_is_virtual(ce->engine)) {
if (!(ce->engine->mask & engine->mask))
- continue;
+ goto next;
} else {
if (ce->engine != engine)
- continue;
+ goto next;
}
- list_for_each_entry(rq, &ce->guc_active.requests, sched.link) {
+ list_for_each_entry(rq, &ce->guc_state.requests, sched.link) {
if (i915_test_request_state(rq) != I915_REQUEST_ACTIVE)
continue;
intel_engine_set_hung_context(engine, ce);
/* Can only cope with one hang at a time... */
- return;
+ intel_context_put(ce);
+ xa_lock(&guc->context_lookup);
+ goto done;
}
+next:
+ intel_context_put(ce);
+ xa_lock(&guc->context_lookup);
}
+done:
+ xa_unlock_irqrestore(&guc->context_lookup, flags);
}
void intel_guc_dump_active_requests(struct intel_engine_cs *engine,
@@ -2839,23 +3816,34 @@ void intel_guc_dump_active_requests(struct intel_engine_cs *engine,
if (unlikely(!guc_submission_initialized(guc)))
return;
+ xa_lock_irqsave(&guc->context_lookup, flags);
xa_for_each(&guc->context_lookup, index, ce) {
- if (!intel_context_is_pinned(ce))
+ if (!kref_get_unless_zero(&ce->ref))
continue;
+ xa_unlock(&guc->context_lookup);
+
+ if (!intel_context_is_pinned(ce))
+ goto next;
+
if (intel_engine_is_virtual(ce->engine)) {
if (!(ce->engine->mask & engine->mask))
- continue;
+ goto next;
} else {
if (ce->engine != engine)
- continue;
+ goto next;
}
- spin_lock_irqsave(&ce->guc_active.lock, flags);
- intel_engine_dump_active_requests(&ce->guc_active.requests,
+ spin_lock(&ce->guc_state.lock);
+ intel_engine_dump_active_requests(&ce->guc_state.requests,
hung_rq, m);
- spin_unlock_irqrestore(&ce->guc_active.lock, flags);
+ spin_unlock(&ce->guc_state.lock);
+
+next:
+ intel_context_put(ce);
+ xa_lock(&guc->context_lookup);
}
+ xa_unlock_irqrestore(&guc->context_lookup, flags);
}
void intel_guc_submission_print_info(struct intel_guc *guc,
@@ -2881,7 +3869,7 @@ void intel_guc_submission_print_info(struct intel_guc *guc,
priolist_for_each_request(rq, pl)
drm_printf(p, "guc_id=%u, seqno=%llu\n",
- rq->context->guc_id,
+ rq->context->guc_id.id,
rq->fence.seqno);
}
spin_unlock_irqrestore(&sched_engine->lock, flags);
@@ -2893,46 +3881,348 @@ static inline void guc_log_context_priority(struct drm_printer *p,
{
int i;
- drm_printf(p, "\t\tPriority: %d\n",
- ce->guc_prio);
+ drm_printf(p, "\t\tPriority: %d\n", ce->guc_state.prio);
drm_printf(p, "\t\tNumber Requests (lower index == higher priority)\n");
for (i = GUC_CLIENT_PRIORITY_KMD_HIGH;
i < GUC_CLIENT_PRIORITY_NUM; ++i) {
drm_printf(p, "\t\tNumber requests in priority band[%d]: %d\n",
- i, ce->guc_prio_count[i]);
+ i, ce->guc_state.prio_count[i]);
}
drm_printf(p, "\n");
}
+static inline void guc_log_context(struct drm_printer *p,
+ struct intel_context *ce)
+{
+ drm_printf(p, "GuC lrc descriptor %u:\n", ce->guc_id.id);
+ drm_printf(p, "\tHW Context Desc: 0x%08x\n", ce->lrc.lrca);
+ drm_printf(p, "\t\tLRC Head: Internal %u, Memory %u\n",
+ ce->ring->head,
+ ce->lrc_reg_state[CTX_RING_HEAD]);
+ drm_printf(p, "\t\tLRC Tail: Internal %u, Memory %u\n",
+ ce->ring->tail,
+ ce->lrc_reg_state[CTX_RING_TAIL]);
+ drm_printf(p, "\t\tContext Pin Count: %u\n",
+ atomic_read(&ce->pin_count));
+ drm_printf(p, "\t\tGuC ID Ref Count: %u\n",
+ atomic_read(&ce->guc_id.ref));
+ drm_printf(p, "\t\tSchedule State: 0x%x\n\n",
+ ce->guc_state.sched_state);
+}
+
void intel_guc_submission_print_context_info(struct intel_guc *guc,
struct drm_printer *p)
{
struct intel_context *ce;
unsigned long index;
+ unsigned long flags;
+ xa_lock_irqsave(&guc->context_lookup, flags);
xa_for_each(&guc->context_lookup, index, ce) {
- drm_printf(p, "GuC lrc descriptor %u:\n", ce->guc_id);
- drm_printf(p, "\tHW Context Desc: 0x%08x\n", ce->lrc.lrca);
- drm_printf(p, "\t\tLRC Head: Internal %u, Memory %u\n",
- ce->ring->head,
- ce->lrc_reg_state[CTX_RING_HEAD]);
- drm_printf(p, "\t\tLRC Tail: Internal %u, Memory %u\n",
- ce->ring->tail,
- ce->lrc_reg_state[CTX_RING_TAIL]);
- drm_printf(p, "\t\tContext Pin Count: %u\n",
- atomic_read(&ce->pin_count));
- drm_printf(p, "\t\tGuC ID Ref Count: %u\n",
- atomic_read(&ce->guc_id_ref));
- drm_printf(p, "\t\tSchedule State: 0x%x, 0x%x\n\n",
- ce->guc_state.sched_state,
- atomic_read(&ce->guc_sched_state_no_lock));
+ GEM_BUG_ON(intel_context_is_child(ce));
+ guc_log_context(p, ce);
guc_log_context_priority(p, ce);
+
+ if (intel_context_is_parent(ce)) {
+ struct guc_process_desc *desc = __get_process_desc(ce);
+ struct intel_context *child;
+
+ drm_printf(p, "\t\tNumber children: %u\n",
+ ce->parallel.number_children);
+ drm_printf(p, "\t\tWQI Head: %u\n",
+ READ_ONCE(desc->head));
+ drm_printf(p, "\t\tWQI Tail: %u\n",
+ READ_ONCE(desc->tail));
+ drm_printf(p, "\t\tWQI Status: %u\n\n",
+ READ_ONCE(desc->wq_status));
+
+ if (ce->engine->emit_bb_start ==
+ emit_bb_start_parent_no_preempt_mid_batch) {
+ u8 i;
+
+ drm_printf(p, "\t\tChildren Go: %u\n\n",
+ get_children_go_value(ce));
+ for (i = 0; i < ce->parallel.number_children; ++i)
+ drm_printf(p, "\t\tChildren Join: %u\n",
+ get_children_join_value(ce, i));
+ }
+
+ for_each_child(ce, child)
+ guc_log_context(p, child);
+ }
+ }
+ xa_unlock_irqrestore(&guc->context_lookup, flags);
+}
+
+static inline u32 get_children_go_addr(struct intel_context *ce)
+{
+ GEM_BUG_ON(!intel_context_is_parent(ce));
+
+ return i915_ggtt_offset(ce->state) +
+ __get_parent_scratch_offset(ce) +
+ offsetof(struct parent_scratch, go.semaphore);
+}
+
+static inline u32 get_children_join_addr(struct intel_context *ce,
+ u8 child_index)
+{
+ GEM_BUG_ON(!intel_context_is_parent(ce));
+
+ return i915_ggtt_offset(ce->state) +
+ __get_parent_scratch_offset(ce) +
+ offsetof(struct parent_scratch, join[child_index].semaphore);
+}
+
+#define PARENT_GO_BB 1
+#define PARENT_GO_FINI_BREADCRUMB 0
+#define CHILD_GO_BB 1
+#define CHILD_GO_FINI_BREADCRUMB 0
+static int emit_bb_start_parent_no_preempt_mid_batch(struct i915_request *rq,
+ u64 offset, u32 len,
+ const unsigned int flags)
+{
+ struct intel_context *ce = rq->context;
+ u32 *cs;
+ u8 i;
+
+ GEM_BUG_ON(!intel_context_is_parent(ce));
+
+ cs = intel_ring_begin(rq, 10 + 4 * ce->parallel.number_children);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ /* Wait on children */
+ for (i = 0; i < ce->parallel.number_children; ++i) {
+ *cs++ = (MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_EQ_SDD);
+ *cs++ = PARENT_GO_BB;
+ *cs++ = get_children_join_addr(ce, i);
+ *cs++ = 0;
+ }
+
+ /* Turn off preemption */
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+ *cs++ = MI_NOOP;
+
+ /* Tell children go */
+ cs = gen8_emit_ggtt_write(cs,
+ CHILD_GO_BB,
+ get_children_go_addr(ce),
+ 0);
+
+ /* Jump to batch */
+ *cs++ = MI_BATCH_BUFFER_START_GEN8 |
+ (flags & I915_DISPATCH_SECURE ? 0 : BIT(8));
+ *cs++ = lower_32_bits(offset);
+ *cs++ = upper_32_bits(offset);
+ *cs++ = MI_NOOP;
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static int emit_bb_start_child_no_preempt_mid_batch(struct i915_request *rq,
+ u64 offset, u32 len,
+ const unsigned int flags)
+{
+ struct intel_context *ce = rq->context;
+ struct intel_context *parent = intel_context_to_parent(ce);
+ u32 *cs;
+
+ GEM_BUG_ON(!intel_context_is_child(ce));
+
+ cs = intel_ring_begin(rq, 12);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ /* Signal parent */
+ cs = gen8_emit_ggtt_write(cs,
+ PARENT_GO_BB,
+ get_children_join_addr(parent,
+ ce->parallel.child_index),
+ 0);
+
+ /* Wait on parent for go */
+ *cs++ = (MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_EQ_SDD);
+ *cs++ = CHILD_GO_BB;
+ *cs++ = get_children_go_addr(parent);
+ *cs++ = 0;
+
+ /* Turn off preemption */
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+
+ /* Jump to batch */
+ *cs++ = MI_BATCH_BUFFER_START_GEN8 |
+ (flags & I915_DISPATCH_SECURE ? 0 : BIT(8));
+ *cs++ = lower_32_bits(offset);
+ *cs++ = upper_32_bits(offset);
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static u32 *
+__emit_fini_breadcrumb_parent_no_preempt_mid_batch(struct i915_request *rq,
+ u32 *cs)
+{
+ struct intel_context *ce = rq->context;
+ u8 i;
+
+ GEM_BUG_ON(!intel_context_is_parent(ce));
+
+ /* Wait on children */
+ for (i = 0; i < ce->parallel.number_children; ++i) {
+ *cs++ = (MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_EQ_SDD);
+ *cs++ = PARENT_GO_FINI_BREADCRUMB;
+ *cs++ = get_children_join_addr(ce, i);
+ *cs++ = 0;
+ }
+
+ /* Turn on preemption */
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+ *cs++ = MI_NOOP;
+
+ /* Tell children go */
+ cs = gen8_emit_ggtt_write(cs,
+ CHILD_GO_FINI_BREADCRUMB,
+ get_children_go_addr(ce),
+ 0);
+
+ return cs;
+}
+
+/*
+ * If this true, a submission of multi-lrc requests had an error and the
+ * requests need to be skipped. The front end (execuf IOCTL) should've called
+ * i915_request_skip which squashes the BB but we still need to emit the fini
+ * breadrcrumbs seqno write. At this point we don't know how many of the
+ * requests in the multi-lrc submission were generated so we can't do the
+ * handshake between the parent and children (e.g. if 4 requests should be
+ * generated but 2nd hit an error only 1 would be seen by the GuC backend).
+ * Simply skip the handshake, but still emit the breadcrumbd seqno, if an error
+ * has occurred on any of the requests in submission / relationship.
+ */
+static inline bool skip_handshake(struct i915_request *rq)
+{
+ return test_bit(I915_FENCE_FLAG_SKIP_PARALLEL, &rq->fence.flags);
+}
+
+static u32 *
+emit_fini_breadcrumb_parent_no_preempt_mid_batch(struct i915_request *rq,
+ u32 *cs)
+{
+ struct intel_context *ce = rq->context;
+
+ GEM_BUG_ON(!intel_context_is_parent(ce));
+
+ if (unlikely(skip_handshake(rq))) {
+ /*
+ * NOP everything in __emit_fini_breadcrumb_parent_no_preempt_mid_batch,
+ * the -6 comes from the length of the emits below.
+ */
+ memset(cs, 0, sizeof(u32) *
+ (ce->engine->emit_fini_breadcrumb_dw - 6));
+ cs += ce->engine->emit_fini_breadcrumb_dw - 6;
+ } else {
+ cs = __emit_fini_breadcrumb_parent_no_preempt_mid_batch(rq, cs);
+ }
+
+ /* Emit fini breadcrumb */
+ cs = gen8_emit_ggtt_write(cs,
+ rq->fence.seqno,
+ i915_request_active_timeline(rq)->hwsp_offset,
+ 0);
+
+ /* User interrupt */
+ *cs++ = MI_USER_INTERRUPT;
+ *cs++ = MI_NOOP;
+
+ rq->tail = intel_ring_offset(rq, cs);
+
+ return cs;
+}
+
+static u32 *
+__emit_fini_breadcrumb_child_no_preempt_mid_batch(struct i915_request *rq,
+ u32 *cs)
+{
+ struct intel_context *ce = rq->context;
+ struct intel_context *parent = intel_context_to_parent(ce);
+
+ GEM_BUG_ON(!intel_context_is_child(ce));
+
+ /* Turn on preemption */
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+ *cs++ = MI_NOOP;
+
+ /* Signal parent */
+ cs = gen8_emit_ggtt_write(cs,
+ PARENT_GO_FINI_BREADCRUMB,
+ get_children_join_addr(parent,
+ ce->parallel.child_index),
+ 0);
+
+ /* Wait parent on for go */
+ *cs++ = (MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_EQ_SDD);
+ *cs++ = CHILD_GO_FINI_BREADCRUMB;
+ *cs++ = get_children_go_addr(parent);
+ *cs++ = 0;
+
+ return cs;
+}
+
+static u32 *
+emit_fini_breadcrumb_child_no_preempt_mid_batch(struct i915_request *rq,
+ u32 *cs)
+{
+ struct intel_context *ce = rq->context;
+
+ GEM_BUG_ON(!intel_context_is_child(ce));
+
+ if (unlikely(skip_handshake(rq))) {
+ /*
+ * NOP everything in __emit_fini_breadcrumb_child_no_preempt_mid_batch,
+ * the -6 comes from the length of the emits below.
+ */
+ memset(cs, 0, sizeof(u32) *
+ (ce->engine->emit_fini_breadcrumb_dw - 6));
+ cs += ce->engine->emit_fini_breadcrumb_dw - 6;
+ } else {
+ cs = __emit_fini_breadcrumb_child_no_preempt_mid_batch(rq, cs);
}
+
+ /* Emit fini breadcrumb */
+ cs = gen8_emit_ggtt_write(cs,
+ rq->fence.seqno,
+ i915_request_active_timeline(rq)->hwsp_offset,
+ 0);
+
+ /* User interrupt */
+ *cs++ = MI_USER_INTERRUPT;
+ *cs++ = MI_NOOP;
+
+ rq->tail = intel_ring_offset(rq, cs);
+
+ return cs;
}
static struct intel_context *
-guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count)
+guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count,
+ unsigned long flags)
{
struct guc_virtual_engine *ve;
struct intel_guc *guc;
@@ -2981,6 +4271,7 @@ guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count)
}
ve->base.mask |= sibling->mask;
+ ve->base.logical_mask |= sibling->logical_mask;
if (n != 0 && ve->base.class != sibling->class) {
DRM_DEBUG("invalid mixing of engine class, sibling %d, already %d\n",
@@ -3036,3 +4327,8 @@ bool intel_guc_virtual_engine_has_heartbeat(const struct intel_engine_cs *ve)
return false;
}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_guc.c"
+#include "selftest_guc_multi_lrc.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
index fc5387b410a2..ff4b6869b80b 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
@@ -87,17 +87,25 @@ static int intel_huc_rsa_data_create(struct intel_huc *huc)
vma->obj, true));
if (IS_ERR(vaddr)) {
i915_vma_unpin_and_release(&vma, 0);
- return PTR_ERR(vaddr);
+ err = PTR_ERR(vaddr);
+ goto unpin_out;
}
copied = intel_uc_fw_copy_rsa(&huc->fw, vaddr, vma->size);
- GEM_BUG_ON(copied < huc->fw.rsa_size);
-
i915_gem_object_unpin_map(vma->obj);
+ if (copied < huc->fw.rsa_size) {
+ err = -ENOMEM;
+ goto unpin_out;
+ }
+
huc->rsa_data = vma;
return 0;
+
+unpin_out:
+ i915_vma_unpin_and_release(&vma, 0);
+ return err;
}
static void intel_huc_rsa_data_destroy(struct intel_huc *huc)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc_debugfs.c b/drivers/gpu/drm/i915/gt/uc/intel_huc_debugfs.c
index 5733c15fd123..15998963b863 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc_debugfs.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc_debugfs.c
@@ -5,7 +5,7 @@
#include <drm/drm_print.h>
-#include "gt/debugfs_gt.h"
+#include "gt/intel_gt_debugfs.h"
#include "intel_huc.h"
#include "intel_huc_debugfs.h"
@@ -21,11 +21,11 @@ static int huc_info_show(struct seq_file *m, void *data)
return 0;
}
-DEFINE_GT_DEBUGFS_ATTRIBUTE(huc_info);
+DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(huc_info);
void intel_huc_debugfs_register(struct intel_huc *huc, struct dentry *root)
{
- static const struct debugfs_gt_file files[] = {
+ static const struct intel_gt_debugfs_file files[] = {
{ "huc_info", &huc_info_fops, NULL },
};
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
index 86c318516e14..2fef3b0bbe95 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -35,7 +35,7 @@ static void uc_expand_default_options(struct intel_uc *uc)
}
/* Intermediate platforms are HuC authentication only */
- if (IS_DG1(i915) || IS_ALDERLAKE_S(i915)) {
+ if (IS_ALDERLAKE_S(i915)) {
i915->params.enable_guc = ENABLE_GUC_LOAD_HUC;
return;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c
index 089d98662f46..c2f7924295e7 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_debugfs.c
@@ -6,7 +6,7 @@
#include <linux/debugfs.h>
#include <drm/drm_print.h>
-#include "gt/debugfs_gt.h"
+#include "gt/intel_gt_debugfs.h"
#include "intel_guc_debugfs.h"
#include "intel_huc_debugfs.h"
#include "intel_uc.h"
@@ -32,11 +32,11 @@ static int uc_usage_show(struct seq_file *m, void *data)
return 0;
}
-DEFINE_GT_DEBUGFS_ATTRIBUTE(uc_usage);
+DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(uc_usage);
void intel_uc_debugfs_register(struct intel_uc *uc, struct dentry *gt_root)
{
- static const struct debugfs_gt_file files[] = {
+ static const struct intel_gt_debugfs_file files[] = {
{ "usage", &uc_usage_fops, NULL },
};
struct dentry *root;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index 3a16d08608a5..3aa87be4f2e4 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -7,6 +7,7 @@
#include <linux/firmware.h>
#include <drm/drm_print.h>
+#include "gem/i915_gem_lmem.h"
#include "intel_uc_fw.h"
#include "intel_uc_fw_abi.h"
#include "i915_drv.h"
@@ -50,6 +51,7 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
#define INTEL_UC_FIRMWARE_DEFS(fw_def, guc_def, huc_def) \
fw_def(ALDERLAKE_P, 0, guc_def(adlp, 62, 0, 3), huc_def(tgl, 7, 9, 3)) \
fw_def(ALDERLAKE_S, 0, guc_def(tgl, 62, 0, 0), huc_def(tgl, 7, 9, 3)) \
+ fw_def(DG1, 0, guc_def(dg1, 62, 0, 0), huc_def(dg1, 7, 9, 3)) \
fw_def(ROCKETLAKE, 0, guc_def(tgl, 62, 0, 0), huc_def(tgl, 7, 9, 3)) \
fw_def(TIGERLAKE, 0, guc_def(tgl, 62, 0, 0), huc_def(tgl, 7, 9, 3)) \
fw_def(JASPERLAKE, 0, guc_def(ehl, 62, 0, 0), huc_def(ehl, 9, 0, 0)) \
@@ -370,7 +372,14 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
if (uc_fw->type == INTEL_UC_FW_TYPE_GUC)
uc_fw->private_data_size = css->private_data_size;
- obj = i915_gem_object_create_shmem_from_data(i915, fw->data, fw->size);
+ if (HAS_LMEM(i915)) {
+ obj = i915_gem_object_create_lmem_from_data(i915, fw->data, fw->size);
+ if (!IS_ERR(obj))
+ obj->flags |= I915_BO_ALLOC_PM_EARLY;
+ } else {
+ obj = i915_gem_object_create_shmem_from_data(i915, fw->data, fw->size);
+ }
+
if (IS_ERR(obj)) {
err = PTR_ERR(obj);
goto fail;
@@ -413,20 +422,25 @@ static void uc_fw_bind_ggtt(struct intel_uc_fw *uc_fw)
{
struct drm_i915_gem_object *obj = uc_fw->obj;
struct i915_ggtt *ggtt = __uc_fw_to_gt(uc_fw)->ggtt;
- struct i915_vma dummy = {
- .node.start = uc_fw_ggtt_offset(uc_fw),
- .node.size = obj->base.size,
- .pages = obj->mm.pages,
- .vm = &ggtt->vm,
- };
+ struct i915_vma *dummy = &uc_fw->dummy;
+ u32 pte_flags = 0;
+
+ dummy->node.start = uc_fw_ggtt_offset(uc_fw);
+ dummy->node.size = obj->base.size;
+ dummy->pages = obj->mm.pages;
+ dummy->vm = &ggtt->vm;
GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
- GEM_BUG_ON(dummy.node.size > ggtt->uc_fw.size);
+ GEM_BUG_ON(dummy->node.size > ggtt->uc_fw.size);
/* uc_fw->obj cache domains were not controlled across suspend */
- drm_clflush_sg(dummy.pages);
+ if (i915_gem_object_has_struct_page(obj))
+ drm_clflush_sg(dummy->pages);
- ggtt->vm.insert_entries(&ggtt->vm, &dummy, I915_CACHE_NONE, 0);
+ if (i915_gem_object_is_lmem(obj))
+ pte_flags |= PTE_LM;
+
+ ggtt->vm.insert_entries(&ggtt->vm, dummy, I915_CACHE_NONE, pte_flags);
}
static void uc_fw_unbind_ggtt(struct intel_uc_fw *uc_fw)
@@ -585,13 +599,68 @@ void intel_uc_fw_cleanup_fetch(struct intel_uc_fw *uc_fw)
*/
size_t intel_uc_fw_copy_rsa(struct intel_uc_fw *uc_fw, void *dst, u32 max_len)
{
- struct sg_table *pages = uc_fw->obj->mm.pages;
+ struct intel_memory_region *mr = uc_fw->obj->mm.region;
u32 size = min_t(u32, uc_fw->rsa_size, max_len);
u32 offset = sizeof(struct uc_css_header) + uc_fw->ucode_size;
+ struct sgt_iter iter;
+ size_t count = 0;
+ int idx;
+ /* Called during reset handling, must be atomic [no fs_reclaim] */
GEM_BUG_ON(!intel_uc_fw_is_available(uc_fw));
- return sg_pcopy_to_buffer(pages->sgl, pages->nents, dst, size, offset);
+ idx = offset >> PAGE_SHIFT;
+ offset = offset_in_page(offset);
+ if (i915_gem_object_has_struct_page(uc_fw->obj)) {
+ struct page *page;
+
+ for_each_sgt_page(page, iter, uc_fw->obj->mm.pages) {
+ u32 len = min_t(u32, size, PAGE_SIZE - offset);
+ void *vaddr;
+
+ if (idx > 0) {
+ idx--;
+ continue;
+ }
+
+ vaddr = kmap_atomic(page);
+ memcpy(dst, vaddr + offset, len);
+ kunmap_atomic(vaddr);
+
+ offset = 0;
+ dst += len;
+ size -= len;
+ count += len;
+ if (!size)
+ break;
+ }
+ } else {
+ dma_addr_t addr;
+
+ for_each_sgt_daddr(addr, iter, uc_fw->obj->mm.pages) {
+ u32 len = min_t(u32, size, PAGE_SIZE - offset);
+ void __iomem *vaddr;
+
+ if (idx > 0) {
+ idx--;
+ continue;
+ }
+
+ vaddr = io_mapping_map_atomic_wc(&mr->iomap,
+ addr - mr->region.start);
+ memcpy_fromio(dst, vaddr + offset, len);
+ io_mapping_unmap_atomic(vaddr);
+
+ offset = 0;
+ dst += len;
+ size -= len;
+ count += len;
+ if (!size)
+ break;
+ }
+ }
+
+ return count;
}
/**
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
index 99bb1fe1af66..1e00bf65639e 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
@@ -10,6 +10,7 @@
#include "intel_uc_fw_abi.h"
#include "intel_device_info.h"
#include "i915_gem.h"
+#include "i915_vma.h"
struct drm_printer;
struct drm_i915_private;
@@ -75,6 +76,14 @@ struct intel_uc_fw {
bool user_overridden;
size_t size;
struct drm_i915_gem_object *obj;
+ /**
+ * @dummy: A vma used in binding the uc fw to ggtt. We can't define this
+ * vma on the stack as it can lead to a stack overflow, so we define it
+ * here. Safe to have 1 copy per uc fw because the binding is single
+ * threaded as it done during driver load (inherently single threaded)
+ * or during a GT reset (mutex guarantees single threaded).
+ */
+ struct i915_vma dummy;
/*
* The firmware build process will generate a version header file with major and
diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
new file mode 100644
index 000000000000..fb0e4a7bd8ca
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
@@ -0,0 +1,127 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright �� 2021 Intel Corporation
+ */
+
+#include "selftests/intel_scheduler_helpers.h"
+
+static struct i915_request *nop_user_request(struct intel_context *ce,
+ struct i915_request *from)
+{
+ struct i915_request *rq;
+ int ret;
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ return rq;
+
+ if (from) {
+ ret = i915_sw_fence_await_dma_fence(&rq->submit,
+ &from->fence, 0,
+ I915_FENCE_GFP);
+ if (ret < 0) {
+ i915_request_put(rq);
+ return ERR_PTR(ret);
+ }
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ return rq;
+}
+
+static int intel_guc_scrub_ctbs(void *arg)
+{
+ struct intel_gt *gt = arg;
+ int ret = 0;
+ int i;
+ struct i915_request *last[3] = {NULL, NULL, NULL}, *rq;
+ intel_wakeref_t wakeref;
+ struct intel_engine_cs *engine;
+ struct intel_context *ce;
+
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
+ engine = intel_selftest_find_any_engine(gt);
+
+ /* Submit requests and inject errors forcing G2H to be dropped */
+ for (i = 0; i < 3; ++i) {
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ ret = PTR_ERR(ce);
+ pr_err("Failed to create context, %d: %d\n", i, ret);
+ goto err;
+ }
+
+ switch (i) {
+ case 0:
+ ce->drop_schedule_enable = true;
+ break;
+ case 1:
+ ce->drop_schedule_disable = true;
+ break;
+ case 2:
+ ce->drop_deregister = true;
+ break;
+ }
+
+ rq = nop_user_request(ce, NULL);
+ intel_context_put(ce);
+
+ if (IS_ERR(rq)) {
+ ret = PTR_ERR(rq);
+ pr_err("Failed to create request, %d: %d\n", i, ret);
+ goto err;
+ }
+
+ last[i] = rq;
+ }
+
+ for (i = 0; i < 3; ++i) {
+ ret = i915_request_wait(last[i], 0, HZ);
+ if (ret < 0) {
+ pr_err("Last request failed to complete: %d\n", ret);
+ goto err;
+ }
+ i915_request_put(last[i]);
+ last[i] = NULL;
+ }
+
+ /* Force all H2G / G2H to be submitted / processed */
+ intel_gt_retire_requests(gt);
+ msleep(500);
+
+ /* Scrub missing G2H */
+ intel_gt_handle_error(engine->gt, -1, 0, "selftest reset");
+
+ /* GT will not idle if G2H are lost */
+ ret = intel_gt_wait_for_idle(gt, HZ);
+ if (ret < 0) {
+ pr_err("GT failed to idle: %d\n", ret);
+ goto err;
+ }
+
+err:
+ for (i = 0; i < 3; ++i)
+ if (last[i])
+ i915_request_put(last[i]);
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
+
+ return ret;
+}
+
+int intel_guc_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(intel_guc_scrub_ctbs),
+ };
+ struct intel_gt *gt = &i915->gt;
+
+ if (intel_gt_is_wedged(gt))
+ return 0;
+
+ if (!intel_uc_uses_guc_submission(&gt->uc))
+ return 0;
+
+ return intel_gt_live_subtests(tests, gt);
+}
diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c
new file mode 100644
index 000000000000..50953c8e8b53
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c
@@ -0,0 +1,179 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright �� 2019 Intel Corporation
+ */
+
+#include "selftests/igt_spinner.h"
+#include "selftests/igt_reset.h"
+#include "selftests/intel_scheduler_helpers.h"
+#include "gt/intel_engine_heartbeat.h"
+#include "gem/selftests/mock_context.h"
+
+static void logical_sort(struct intel_engine_cs **engines, int num_engines)
+{
+ struct intel_engine_cs *sorted[MAX_ENGINE_INSTANCE + 1];
+ int i, j;
+
+ for (i = 0; i < num_engines; ++i)
+ for (j = 0; j < MAX_ENGINE_INSTANCE + 1; ++j) {
+ if (engines[j]->logical_mask & BIT(i)) {
+ sorted[i] = engines[j];
+ break;
+ }
+ }
+
+ memcpy(*engines, *sorted,
+ sizeof(struct intel_engine_cs *) * num_engines);
+}
+
+static struct intel_context *
+multi_lrc_create_parent(struct intel_gt *gt, u8 class,
+ unsigned long flags)
+{
+ struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int i = 0;
+
+ for_each_engine(engine, gt, id) {
+ if (engine->class != class)
+ continue;
+
+ siblings[i++] = engine;
+ }
+
+ if (i <= 1)
+ return ERR_PTR(0);
+
+ logical_sort(siblings, i);
+
+ return intel_engine_create_parallel(siblings, 1, i);
+}
+
+static void multi_lrc_context_unpin(struct intel_context *ce)
+{
+ struct intel_context *child;
+
+ GEM_BUG_ON(!intel_context_is_parent(ce));
+
+ for_each_child(ce, child)
+ intel_context_unpin(child);
+ intel_context_unpin(ce);
+}
+
+static void multi_lrc_context_put(struct intel_context *ce)
+{
+ GEM_BUG_ON(!intel_context_is_parent(ce));
+
+ /*
+ * Only the parent gets the creation ref put in the uAPI, the parent
+ * itself is responsible for creation ref put on the children.
+ */
+ intel_context_put(ce);
+}
+
+static struct i915_request *
+multi_lrc_nop_request(struct intel_context *ce)
+{
+ struct intel_context *child;
+ struct i915_request *rq, *child_rq;
+ int i = 0;
+
+ GEM_BUG_ON(!intel_context_is_parent(ce));
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ return rq;
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ for_each_child(ce, child) {
+ child_rq = intel_context_create_request(child);
+ if (IS_ERR(child_rq))
+ goto child_error;
+
+ if (++i == ce->parallel.number_children)
+ set_bit(I915_FENCE_FLAG_SUBMIT_PARALLEL,
+ &child_rq->fence.flags);
+ i915_request_add(child_rq);
+ }
+
+ return rq;
+
+child_error:
+ i915_request_put(rq);
+
+ return ERR_PTR(-ENOMEM);
+}
+
+static int __intel_guc_multi_lrc_basic(struct intel_gt *gt, unsigned int class)
+{
+ struct intel_context *parent;
+ struct i915_request *rq;
+ int ret;
+
+ parent = multi_lrc_create_parent(gt, class, 0);
+ if (IS_ERR(parent)) {
+ pr_err("Failed creating contexts: %ld", PTR_ERR(parent));
+ return PTR_ERR(parent);
+ } else if (!parent) {
+ pr_debug("Not enough engines in class: %d", class);
+ return 0;
+ }
+
+ rq = multi_lrc_nop_request(parent);
+ if (IS_ERR(rq)) {
+ ret = PTR_ERR(rq);
+ pr_err("Failed creating requests: %d", ret);
+ goto out;
+ }
+
+ ret = intel_selftest_wait_for_rq(rq);
+ if (ret)
+ pr_err("Failed waiting on request: %d", ret);
+
+ i915_request_put(rq);
+
+ if (ret >= 0) {
+ ret = intel_gt_wait_for_idle(gt, HZ * 5);
+ if (ret < 0)
+ pr_err("GT failed to idle: %d\n", ret);
+ }
+
+out:
+ multi_lrc_context_unpin(parent);
+ multi_lrc_context_put(parent);
+ return ret;
+}
+
+static int intel_guc_multi_lrc_basic(void *arg)
+{
+ struct intel_gt *gt = arg;
+ unsigned int class;
+ int ret;
+
+ for (class = 0; class < MAX_ENGINE_CLASS + 1; ++class) {
+ ret = __intel_guc_multi_lrc_basic(gt, class);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int intel_guc_multi_lrc_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(intel_guc_multi_lrc_basic),
+ };
+ struct intel_gt *gt = &i915->gt;
+
+ if (intel_gt_is_wedged(gt))
+ return 0;
+
+ if (!intel_uc_uses_guc_submission(&gt->uc))
+ return 0;
+
+ return intel_gt_live_subtests(tests, gt);
+}
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index e5c2fdfc20e3..53d0cb327539 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -745,7 +745,7 @@ static void ppgtt_free_spt(struct intel_vgpu_ppgtt_spt *spt)
trace_spt_free(spt->vgpu->id, spt, spt->guest_page.type);
dma_unmap_page(kdev, spt->shadow_page.mfn << I915_GTT_PAGE_SHIFT, 4096,
- PCI_DMA_BIDIRECTIONAL);
+ DMA_BIDIRECTIONAL);
radix_tree_delete(&spt->vgpu->gtt.spt_tree, spt->shadow_page.mfn);
@@ -849,7 +849,7 @@ retry:
*/
spt->shadow_page.type = type;
daddr = dma_map_page(kdev, spt->shadow_page.page,
- 0, 4096, PCI_DMA_BIDIRECTIONAL);
+ 0, 4096, DMA_BIDIRECTIONAL);
if (dma_mapping_error(kdev, daddr)) {
gvt_vgpu_err("fail to map dma addr\n");
ret = -EINVAL;
@@ -865,7 +865,7 @@ retry:
return spt;
err_unmap_dma:
- dma_unmap_page(kdev, daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_page(kdev, daddr, PAGE_SIZE, DMA_BIDIRECTIONAL);
err_free_spt:
free_spt(spt);
return ERR_PTR(ret);
@@ -2409,8 +2409,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
return -ENOMEM;
}
- daddr = dma_map_page(dev, virt_to_page(scratch_pt), 0,
- 4096, PCI_DMA_BIDIRECTIONAL);
+ daddr = dma_map_page(dev, virt_to_page(scratch_pt), 0, 4096, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, daddr)) {
gvt_vgpu_err("fail to dmamap scratch_pt\n");
__free_page(virt_to_page(scratch_pt));
@@ -2461,7 +2460,7 @@ static int release_scratch_page_tree(struct intel_vgpu *vgpu)
if (vgpu->gtt.scratch_pt[i].page != NULL) {
daddr = (dma_addr_t)(vgpu->gtt.scratch_pt[i].page_mfn <<
I915_GTT_PAGE_SHIFT);
- dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_page(dev, daddr, 4096, DMA_BIDIRECTIONAL);
__free_page(vgpu->gtt.scratch_pt[i].page);
vgpu->gtt.scratch_pt[i].page = NULL;
vgpu->gtt.scratch_pt[i].page_mfn = 0;
@@ -2741,7 +2740,7 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
}
daddr = dma_map_page(dev, virt_to_page(page), 0,
- 4096, PCI_DMA_BIDIRECTIONAL);
+ 4096, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, daddr)) {
gvt_err("fail to dmamap scratch ggtt page\n");
__free_page(virt_to_page(page));
@@ -2755,7 +2754,7 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
ret = setup_spt_oos(gvt);
if (ret) {
gvt_err("fail to initialize SPT oos\n");
- dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_page(dev, daddr, 4096, DMA_BIDIRECTIONAL);
__free_page(gvt->gtt.scratch_page);
return ret;
}
@@ -2779,7 +2778,7 @@ void intel_gvt_clean_gtt(struct intel_gvt *gvt)
dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_mfn <<
I915_GTT_PAGE_SHIFT);
- dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_page(dev, daddr, 4096, DMA_BIDIRECTIONAL);
__free_page(gvt->gtt.scratch_page);
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 7efa386449d1..20b82fb036f8 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -328,7 +328,7 @@ static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn,
return ret;
/* Setup DMA mapping. */
- *dma_addr = dma_map_page(dev, page, 0, size, PCI_DMA_BIDIRECTIONAL);
+ *dma_addr = dma_map_page(dev, page, 0, size, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, *dma_addr)) {
gvt_vgpu_err("DMA mapping failed for pfn 0x%lx, ret %d\n",
page_to_pfn(page), ret);
@@ -344,7 +344,7 @@ static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn,
{
struct device *dev = vgpu->gvt->gt->i915->drm.dev;
- dma_unmap_page(dev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL);
gvt_unpin_guest_page(vgpu, gfn, size);
}
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 1bb1be5c48c8..6c804102528b 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -1386,7 +1386,7 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
enum intel_engine_id i;
int ret;
- ppgtt = i915_ppgtt_create(&i915->gt);
+ ppgtt = i915_ppgtt_create(&i915->gt, I915_BO_ALLOC_PM_EARLY);
if (IS_ERR(ppgtt))
return PTR_ERR(ppgtt);
diff --git a/drivers/gpu/drm/i915/i915_buddy.c b/drivers/gpu/drm/i915/i915_buddy.c
index 7b274c51cac0..6e2ad68f8f3f 100644
--- a/drivers/gpu/drm/i915/i915_buddy.c
+++ b/drivers/gpu/drm/i915/i915_buddy.c
@@ -4,6 +4,7 @@
*/
#include <linux/kmemleak.h>
+#include <linux/sizes.h>
#include "i915_buddy.h"
@@ -82,6 +83,7 @@ int i915_buddy_init(struct i915_buddy_mm *mm, u64 size, u64 chunk_size)
size = round_down(size, chunk_size);
mm->size = size;
+ mm->avail = size;
mm->chunk_size = chunk_size;
mm->max_order = ilog2(size) - ilog2(chunk_size);
@@ -155,6 +157,8 @@ void i915_buddy_fini(struct i915_buddy_mm *mm)
i915_block_free(mm, mm->roots[i]);
}
+ GEM_WARN_ON(mm->avail != mm->size);
+
kfree(mm->roots);
kfree(mm->free_list);
}
@@ -230,6 +234,7 @@ void i915_buddy_free(struct i915_buddy_mm *mm,
struct i915_buddy_block *block)
{
GEM_BUG_ON(!i915_buddy_block_is_allocated(block));
+ mm->avail += i915_buddy_block_size(mm, block);
__i915_buddy_free(mm, block);
}
@@ -283,6 +288,7 @@ i915_buddy_alloc(struct i915_buddy_mm *mm, unsigned int order)
}
mark_allocated(block);
+ mm->avail -= i915_buddy_block_size(mm, block);
kmemleak_update_trace(block);
return block;
@@ -368,6 +374,7 @@ int i915_buddy_alloc_range(struct i915_buddy_mm *mm,
}
mark_allocated(block);
+ mm->avail -= i915_buddy_block_size(mm, block);
list_add_tail(&block->link, &allocated);
continue;
}
@@ -402,6 +409,44 @@ err_free:
return err;
}
+void i915_buddy_block_print(struct i915_buddy_mm *mm,
+ struct i915_buddy_block *block,
+ struct drm_printer *p)
+{
+ u64 start = i915_buddy_block_offset(block);
+ u64 size = i915_buddy_block_size(mm, block);
+
+ drm_printf(p, "%#018llx-%#018llx: %llu\n", start, start + size, size);
+}
+
+void i915_buddy_print(struct i915_buddy_mm *mm, struct drm_printer *p)
+{
+ int order;
+
+ drm_printf(p, "chunk_size: %lluKiB, total: %lluMiB, free: %lluMiB\n",
+ mm->chunk_size >> 10, mm->size >> 20, mm->avail >> 20);
+
+ for (order = mm->max_order; order >= 0; order--) {
+ struct i915_buddy_block *block;
+ u64 count = 0, free;
+
+ list_for_each_entry(block, &mm->free_list[order], link) {
+ GEM_BUG_ON(!i915_buddy_block_is_free(block));
+ count++;
+ }
+
+ drm_printf(p, "order-%d ", order);
+
+ free = count * (mm->chunk_size << order);
+ if (free < SZ_1M)
+ drm_printf(p, "free: %lluKiB", free >> 10);
+ else
+ drm_printf(p, "free: %lluMiB", free >> 20);
+
+ drm_printf(p, ", pages: %llu\n", count);
+ }
+}
+
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/i915_buddy.c"
#endif
diff --git a/drivers/gpu/drm/i915/i915_buddy.h b/drivers/gpu/drm/i915/i915_buddy.h
index 3940d632f208..7077742112ac 100644
--- a/drivers/gpu/drm/i915/i915_buddy.h
+++ b/drivers/gpu/drm/i915/i915_buddy.h
@@ -10,6 +10,8 @@
#include <linux/list.h>
#include <linux/slab.h>
+#include <drm/drm_print.h>
+
struct i915_buddy_block {
#define I915_BUDDY_HEADER_OFFSET GENMASK_ULL(63, 12)
#define I915_BUDDY_HEADER_STATE GENMASK_ULL(11, 10)
@@ -69,6 +71,7 @@ struct i915_buddy_mm {
/* Must be at least PAGE_SIZE */
u64 chunk_size;
u64 size;
+ u64 avail;
};
static inline u64
@@ -129,6 +132,11 @@ void i915_buddy_free(struct i915_buddy_mm *mm, struct i915_buddy_block *block);
void i915_buddy_free_list(struct i915_buddy_mm *mm, struct list_head *objects);
+void i915_buddy_print(struct i915_buddy_mm *mm, struct drm_printer *p);
+void i915_buddy_block_print(struct i915_buddy_mm *mm,
+ struct i915_buddy_block *block,
+ struct drm_printer *p);
+
void i915_buddy_module_exit(void);
int i915_buddy_module_init(void);
diff --git a/drivers/gpu/drm/i915/i915_config.c b/drivers/gpu/drm/i915/i915_config.c
index b79b5f6d2cfa..afb828dab53b 100644
--- a/drivers/gpu/drm/i915/i915_config.c
+++ b/drivers/gpu/drm/i915/i915_config.c
@@ -8,7 +8,7 @@
unsigned long
i915_fence_context_timeout(const struct drm_i915_private *i915, u64 context)
{
- if (context && IS_ACTIVE(CONFIG_DRM_I915_FENCE_TIMEOUT))
+ if (CONFIG_DRM_I915_FENCE_TIMEOUT && context)
return msecs_to_jiffies_timeout(CONFIG_DRM_I915_FENCE_TIMEOUT);
return 0;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 44969f5dde50..fe638b5da7c0 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -32,13 +32,15 @@
#include <drm/drm_debugfs.h>
#include "gem/i915_gem_context.h"
+#include "gt/intel_gt.h"
#include "gt/intel_gt_buffer_pool.h"
#include "gt/intel_gt_clock_utils.h"
-#include "gt/intel_gt.h"
+#include "gt/intel_gt_debugfs.h"
#include "gt/intel_gt_pm.h"
+#include "gt/intel_gt_pm_debugfs.h"
#include "gt/intel_gt_requests.h"
-#include "gt/intel_reset.h"
#include "gt/intel_rc6.h"
+#include "gt/intel_reset.h"
#include "gt/intel_rps.h"
#include "gt/intel_sseu_debugfs.h"
@@ -48,7 +50,6 @@
#include "i915_scheduler.h"
#include "i915_trace.h"
#include "intel_pm.h"
-#include "intel_sideband.h"
static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
{
@@ -139,7 +140,6 @@ void
i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
- struct intel_engine_cs *engine;
struct i915_vma *vma;
int pin_count = 0;
@@ -229,15 +229,12 @@ i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
seq_printf(m, " (stolen: %08llx)", obj->stolen->start);
if (i915_gem_object_is_framebuffer(obj))
seq_printf(m, " (fb)");
-
- engine = i915_gem_object_last_write_engine(obj);
- if (engine)
- seq_printf(m, " (%s)", engine->name);
}
static int i915_gem_object_info(struct seq_file *m, void *data)
{
struct drm_i915_private *i915 = node_to_i915(m->private);
+ struct drm_printer p = drm_seq_file_printer(m);
struct intel_memory_region *mr;
enum intel_region_id id;
@@ -246,8 +243,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
atomic_read(&i915->mm.free_count),
i915->mm.shrink_memory);
for_each_memory_region(mr, i915, id)
- seq_printf(m, "%s: total:%pa, available:%pa bytes\n",
- mr->name, &mr->total, &mr->avail);
+ intel_memory_region_debug(mr, &p);
return 0;
}
@@ -354,232 +350,12 @@ static const struct file_operations i915_error_state_fops = {
static int i915_frequency_info(struct seq_file *m, void *unused)
{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct intel_uncore *uncore = &dev_priv->uncore;
- struct intel_rps *rps = &dev_priv->gt.rps;
- intel_wakeref_t wakeref;
-
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
-
- if (GRAPHICS_VER(dev_priv) == 5) {
- u16 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL);
- u16 rgvstat = intel_uncore_read16(uncore, MEMSTAT_ILK);
-
- seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
- seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
- seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
- MEMSTAT_VID_SHIFT);
- seq_printf(m, "Current P-state: %d\n",
- (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
- } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- u32 rpmodectl, freq_sts;
-
- rpmodectl = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CONTROL);
- seq_printf(m, "Video Turbo Mode: %s\n",
- yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
- seq_printf(m, "HW control enabled: %s\n",
- yesno(rpmodectl & GEN6_RP_ENABLE));
- seq_printf(m, "SW control enabled: %s\n",
- yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
- GEN6_RP_MEDIA_SW_MODE));
-
- vlv_punit_get(dev_priv);
- freq_sts = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
- vlv_punit_put(dev_priv);
-
- seq_printf(m, "PUNIT_REG_GPU_FREQ_STS: 0x%08x\n", freq_sts);
- seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq);
-
- seq_printf(m, "actual GPU freq: %d MHz\n",
- intel_gpu_freq(rps, (freq_sts >> 8) & 0xff));
-
- seq_printf(m, "current GPU freq: %d MHz\n",
- intel_gpu_freq(rps, rps->cur_freq));
-
- seq_printf(m, "max GPU freq: %d MHz\n",
- intel_gpu_freq(rps, rps->max_freq));
-
- seq_printf(m, "min GPU freq: %d MHz\n",
- intel_gpu_freq(rps, rps->min_freq));
-
- seq_printf(m, "idle GPU freq: %d MHz\n",
- intel_gpu_freq(rps, rps->idle_freq));
-
- seq_printf(m,
- "efficient (RPe) frequency: %d MHz\n",
- intel_gpu_freq(rps, rps->efficient_freq));
- } else if (GRAPHICS_VER(dev_priv) >= 6) {
- u32 rp_state_limits;
- u32 gt_perf_status;
- u32 rp_state_cap;
- u32 rpmodectl, rpinclimit, rpdeclimit;
- u32 rpstat, cagf, reqf;
- u32 rpupei, rpcurup, rpprevup;
- u32 rpdownei, rpcurdown, rpprevdown;
- u32 pm_ier, pm_imr, pm_isr, pm_iir, pm_mask;
- int max_freq;
-
- rp_state_limits = intel_uncore_read(&dev_priv->uncore, GEN6_RP_STATE_LIMITS);
- if (IS_GEN9_LP(dev_priv)) {
- rp_state_cap = intel_uncore_read(&dev_priv->uncore, BXT_RP_STATE_CAP);
- gt_perf_status = intel_uncore_read(&dev_priv->uncore, BXT_GT_PERF_STATUS);
- } else {
- rp_state_cap = intel_uncore_read(&dev_priv->uncore, GEN6_RP_STATE_CAP);
- gt_perf_status = intel_uncore_read(&dev_priv->uncore, GEN6_GT_PERF_STATUS);
- }
-
- /* RPSTAT1 is in the GT power well */
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
-
- reqf = intel_uncore_read(&dev_priv->uncore, GEN6_RPNSWREQ);
- if (GRAPHICS_VER(dev_priv) >= 9)
- reqf >>= 23;
- else {
- reqf &= ~GEN6_TURBO_DISABLE;
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- reqf >>= 24;
- else
- reqf >>= 25;
- }
- reqf = intel_gpu_freq(rps, reqf);
-
- rpmodectl = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CONTROL);
- rpinclimit = intel_uncore_read(&dev_priv->uncore, GEN6_RP_UP_THRESHOLD);
- rpdeclimit = intel_uncore_read(&dev_priv->uncore, GEN6_RP_DOWN_THRESHOLD);
-
- rpstat = intel_uncore_read(&dev_priv->uncore, GEN6_RPSTAT1);
- rpupei = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
- rpcurup = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
- rpprevup = intel_uncore_read(&dev_priv->uncore, GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
- rpdownei = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
- rpcurdown = intel_uncore_read(&dev_priv->uncore, GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
- rpprevdown = intel_uncore_read(&dev_priv->uncore, GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
- cagf = intel_rps_read_actual_frequency(rps);
-
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-
- if (GRAPHICS_VER(dev_priv) >= 11) {
- pm_ier = intel_uncore_read(&dev_priv->uncore, GEN11_GPM_WGBOXPERF_INTR_ENABLE);
- pm_imr = intel_uncore_read(&dev_priv->uncore, GEN11_GPM_WGBOXPERF_INTR_MASK);
- /*
- * The equivalent to the PM ISR & IIR cannot be read
- * without affecting the current state of the system
- */
- pm_isr = 0;
- pm_iir = 0;
- } else if (GRAPHICS_VER(dev_priv) >= 8) {
- pm_ier = intel_uncore_read(&dev_priv->uncore, GEN8_GT_IER(2));
- pm_imr = intel_uncore_read(&dev_priv->uncore, GEN8_GT_IMR(2));
- pm_isr = intel_uncore_read(&dev_priv->uncore, GEN8_GT_ISR(2));
- pm_iir = intel_uncore_read(&dev_priv->uncore, GEN8_GT_IIR(2));
- } else {
- pm_ier = intel_uncore_read(&dev_priv->uncore, GEN6_PMIER);
- pm_imr = intel_uncore_read(&dev_priv->uncore, GEN6_PMIMR);
- pm_isr = intel_uncore_read(&dev_priv->uncore, GEN6_PMISR);
- pm_iir = intel_uncore_read(&dev_priv->uncore, GEN6_PMIIR);
- }
- pm_mask = intel_uncore_read(&dev_priv->uncore, GEN6_PMINTRMSK);
-
- seq_printf(m, "Video Turbo Mode: %s\n",
- yesno(rpmodectl & GEN6_RP_MEDIA_TURBO));
- seq_printf(m, "HW control enabled: %s\n",
- yesno(rpmodectl & GEN6_RP_ENABLE));
- seq_printf(m, "SW control enabled: %s\n",
- yesno((rpmodectl & GEN6_RP_MEDIA_MODE_MASK) ==
- GEN6_RP_MEDIA_SW_MODE));
-
- seq_printf(m, "PM IER=0x%08x IMR=0x%08x, MASK=0x%08x\n",
- pm_ier, pm_imr, pm_mask);
- if (GRAPHICS_VER(dev_priv) <= 10)
- seq_printf(m, "PM ISR=0x%08x IIR=0x%08x\n",
- pm_isr, pm_iir);
- seq_printf(m, "pm_intrmsk_mbz: 0x%08x\n",
- rps->pm_intrmsk_mbz);
- seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
- seq_printf(m, "Render p-state ratio: %d\n",
- (gt_perf_status & (GRAPHICS_VER(dev_priv) >= 9 ? 0x1ff00 : 0xff00)) >> 8);
- seq_printf(m, "Render p-state VID: %d\n",
- gt_perf_status & 0xff);
- seq_printf(m, "Render p-state limit: %d\n",
- rp_state_limits & 0xff);
- seq_printf(m, "RPSTAT1: 0x%08x\n", rpstat);
- seq_printf(m, "RPMODECTL: 0x%08x\n", rpmodectl);
- seq_printf(m, "RPINCLIMIT: 0x%08x\n", rpinclimit);
- seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
- seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
- seq_printf(m, "CAGF: %dMHz\n", cagf);
- seq_printf(m, "RP CUR UP EI: %d (%lldns)\n",
- rpupei,
- intel_gt_pm_interval_to_ns(&dev_priv->gt, rpupei));
- seq_printf(m, "RP CUR UP: %d (%lldun)\n",
- rpcurup,
- intel_gt_pm_interval_to_ns(&dev_priv->gt, rpcurup));
- seq_printf(m, "RP PREV UP: %d (%lldns)\n",
- rpprevup,
- intel_gt_pm_interval_to_ns(&dev_priv->gt, rpprevup));
- seq_printf(m, "Up threshold: %d%%\n",
- rps->power.up_threshold);
-
- seq_printf(m, "RP CUR DOWN EI: %d (%lldns)\n",
- rpdownei,
- intel_gt_pm_interval_to_ns(&dev_priv->gt,
- rpdownei));
- seq_printf(m, "RP CUR DOWN: %d (%lldns)\n",
- rpcurdown,
- intel_gt_pm_interval_to_ns(&dev_priv->gt,
- rpcurdown));
- seq_printf(m, "RP PREV DOWN: %d (%lldns)\n",
- rpprevdown,
- intel_gt_pm_interval_to_ns(&dev_priv->gt,
- rpprevdown));
- seq_printf(m, "Down threshold: %d%%\n",
- rps->power.down_threshold);
-
- max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 0 :
- rp_state_cap >> 16) & 0xff;
- max_freq *= (IS_GEN9_BC(dev_priv) ||
- GRAPHICS_VER(dev_priv) >= 11 ? GEN9_FREQ_SCALER : 1);
- seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
- intel_gpu_freq(rps, max_freq));
-
- max_freq = (rp_state_cap & 0xff00) >> 8;
- max_freq *= (IS_GEN9_BC(dev_priv) ||
- GRAPHICS_VER(dev_priv) >= 11 ? GEN9_FREQ_SCALER : 1);
- seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
- intel_gpu_freq(rps, max_freq));
-
- max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 :
- rp_state_cap >> 0) & 0xff;
- max_freq *= (IS_GEN9_BC(dev_priv) ||
- GRAPHICS_VER(dev_priv) >= 11 ? GEN9_FREQ_SCALER : 1);
- seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
- intel_gpu_freq(rps, max_freq));
- seq_printf(m, "Max overclocked frequency: %dMHz\n",
- intel_gpu_freq(rps, rps->max_freq));
-
- seq_printf(m, "Current freq: %d MHz\n",
- intel_gpu_freq(rps, rps->cur_freq));
- seq_printf(m, "Actual freq: %d MHz\n", cagf);
- seq_printf(m, "Idle freq: %d MHz\n",
- intel_gpu_freq(rps, rps->idle_freq));
- seq_printf(m, "Min freq: %d MHz\n",
- intel_gpu_freq(rps, rps->min_freq));
- seq_printf(m, "Boost freq: %d MHz\n",
- intel_gpu_freq(rps, rps->boost_freq));
- seq_printf(m, "Max freq: %d MHz\n",
- intel_gpu_freq(rps, rps->max_freq));
- seq_printf(m,
- "efficient (RPe) frequency: %d MHz\n",
- intel_gpu_freq(rps, rps->efficient_freq));
- } else {
- seq_puts(m, "no P-state info available\n");
- }
+ struct drm_i915_private *i915 = node_to_i915(m->private);
+ struct intel_gt *gt = &i915->gt;
+ struct drm_printer p = drm_seq_file_printer(m);
- seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk.hw.cdclk);
- seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq);
- seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq);
+ intel_gt_pm_frequency_dump(gt, &p);
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
return 0;
}
@@ -778,36 +554,18 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
return 0;
}
-static int
-i915_wedged_get(void *data, u64 *val)
+static int i915_wedged_get(void *data, u64 *val)
{
struct drm_i915_private *i915 = data;
- int ret = intel_gt_terminally_wedged(&i915->gt);
- switch (ret) {
- case -EIO:
- *val = 1;
- return 0;
- case 0:
- *val = 0;
- return 0;
- default:
- return ret;
- }
+ return intel_gt_debugfs_reset_show(&i915->gt, val);
}
-static int
-i915_wedged_set(void *data, u64 val)
+static int i915_wedged_set(void *data, u64 val)
{
struct drm_i915_private *i915 = data;
- /* Flush any previous reset before applying for a new one */
- wait_event(i915->gt.reset.queue,
- !test_bit(I915_RESET_BACKOFF, &i915->gt.reset.flags));
-
- intel_gt_handle_error(&i915->gt, val, I915_ERROR_CAPTURE,
- "Manually set wedged engine mask = %llx", val);
- return 0;
+ return intel_gt_debugfs_reset_store(&i915->gt, val);
}
DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops,
@@ -952,27 +710,15 @@ static int i915_sseu_status(struct seq_file *m, void *unused)
static int i915_forcewake_open(struct inode *inode, struct file *file)
{
struct drm_i915_private *i915 = inode->i_private;
- struct intel_gt *gt = &i915->gt;
- atomic_inc(&gt->user_wakeref);
- intel_gt_pm_get(gt);
- if (GRAPHICS_VER(i915) >= 6)
- intel_uncore_forcewake_user_get(gt->uncore);
-
- return 0;
+ return intel_gt_pm_debugfs_forcewake_user_open(&i915->gt);
}
static int i915_forcewake_release(struct inode *inode, struct file *file)
{
struct drm_i915_private *i915 = inode->i_private;
- struct intel_gt *gt = &i915->gt;
- if (GRAPHICS_VER(i915) >= 6)
- intel_uncore_forcewake_user_put(&i915->uncore);
- intel_gt_pm_put(gt);
- atomic_dec(&gt->user_wakeref);
-
- return 0;
+ return intel_gt_pm_debugfs_forcewake_user_release(&i915->gt);
}
static const struct file_operations i915_forcewake_fops = {
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 59fb4c710c8c..b18a250e5d2e 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -67,6 +67,8 @@
#include "gt/intel_gt_pm.h"
#include "gt/intel_rc6.h"
+#include "pxp/intel_pxp_pm.h"
+
#include "i915_debugfs.h"
#include "i915_drv.h"
#include "i915_ioc32.h"
@@ -82,9 +84,9 @@
#include "intel_dram.h"
#include "intel_gvt.h"
#include "intel_memory_region.h"
+#include "intel_pcode.h"
#include "intel_pm.h"
#include "intel_region_ttm.h"
-#include "intel_sideband.h"
#include "vlv_suspend.h"
static const struct drm_driver driver;
@@ -97,7 +99,7 @@ static int i915_get_bridge_dev(struct drm_i915_private *dev_priv)
pci_get_domain_bus_and_slot(domain, 0, PCI_DEVFN(0, 0));
if (!dev_priv->bridge_dev) {
drm_err(&dev_priv->drm, "bridge device not found\n");
- return -1;
+ return -EIO;
}
return 0;
}
@@ -409,8 +411,9 @@ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv)
if (i915_inject_probe_failure(dev_priv))
return -ENODEV;
- if (i915_get_bridge_dev(dev_priv))
- return -EIO;
+ ret = i915_get_bridge_dev(dev_priv);
+ if (ret < 0)
+ return ret;
ret = intel_uncore_init_mmio(&dev_priv->uncore);
if (ret < 0)
@@ -588,8 +591,6 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
pci_set_master(pdev);
- intel_gt_init_workarounds(dev_priv);
-
/* On the 945G/GM, the chipset reports the MSI capability on the
* integrated graphics even though the support isn't actually there
* according to the published specs. It doesn't appear to function
@@ -1096,9 +1097,7 @@ static int i915_drm_prepare(struct drm_device *dev)
* split out that work and pull it forward so that after point,
* the GPU is not woken again.
*/
- i915_gem_suspend(i915);
-
- return 0;
+ return i915_gem_backup_suspend(i915);
}
static int i915_drm_suspend(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 005b1cec7007..12256218634f 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -323,15 +323,15 @@ struct intel_crtc;
struct intel_limit;
struct dpll;
-struct drm_i915_display_funcs {
- void (*get_cdclk)(struct drm_i915_private *dev_priv,
- struct intel_cdclk_config *cdclk_config);
- void (*set_cdclk)(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_config *cdclk_config,
- enum pipe pipe);
- int (*bw_calc_min_cdclk)(struct intel_atomic_state *state);
- int (*get_fifo_size)(struct drm_i915_private *dev_priv,
- enum i9xx_plane_id i9xx_plane);
+/* functions used internal in intel_pm.c */
+struct drm_i915_clock_gating_funcs {
+ void (*init_clock_gating)(struct drm_i915_private *dev_priv);
+};
+
+/* functions used for watermark calcs for display. */
+struct drm_i915_wm_disp_funcs {
+ /* update_wm is for legacy wm management */
+ void (*update_wm)(struct drm_i915_private *dev_priv);
int (*compute_pipe_wm)(struct intel_atomic_state *state,
struct intel_crtc *crtc);
int (*compute_intermediate_wm)(struct intel_atomic_state *state,
@@ -343,39 +343,9 @@ struct drm_i915_display_funcs {
void (*optimize_watermarks)(struct intel_atomic_state *state,
struct intel_crtc *crtc);
int (*compute_global_watermarks)(struct intel_atomic_state *state);
- void (*update_wm)(struct intel_crtc *crtc);
- int (*modeset_calc_cdclk)(struct intel_cdclk_state *state);
- u8 (*calc_voltage_level)(int cdclk);
- /* Returns the active state of the crtc, and if the crtc is active,
- * fills out the pipe-config with the hw state. */
- bool (*get_pipe_config)(struct intel_crtc *,
- struct intel_crtc_state *);
- void (*get_initial_plane_config)(struct intel_crtc *,
- struct intel_initial_plane_config *);
- int (*crtc_compute_clock)(struct intel_crtc *crtc,
- struct intel_crtc_state *crtc_state);
- void (*crtc_enable)(struct intel_atomic_state *state,
- struct intel_crtc *crtc);
- void (*crtc_disable)(struct intel_atomic_state *state,
- struct intel_crtc *crtc);
- void (*commit_modeset_enables)(struct intel_atomic_state *state);
- void (*commit_modeset_disables)(struct intel_atomic_state *state);
- void (*audio_codec_enable)(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state,
- const struct drm_connector_state *conn_state);
- void (*audio_codec_disable)(struct intel_encoder *encoder,
- const struct intel_crtc_state *old_crtc_state,
- const struct drm_connector_state *old_conn_state);
- void (*fdi_link_train)(struct intel_crtc *crtc,
- const struct intel_crtc_state *crtc_state);
- void (*init_clock_gating)(struct drm_i915_private *dev_priv);
- void (*hpd_irq_setup)(struct drm_i915_private *dev_priv);
- /* clock updates for mode set */
- /* cursor updates */
- /* render clock increase/decrease */
- /* display clock increase/decrease */
- /* pll clock increase/decrease */
+};
+struct intel_color_funcs {
int (*color_check)(struct intel_crtc_state *crtc_state);
/*
* Program double buffered color management registers during
@@ -394,6 +364,53 @@ struct drm_i915_display_funcs {
void (*read_luts)(struct intel_crtc_state *crtc_state);
};
+struct intel_audio_funcs {
+ void (*audio_codec_enable)(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
+ void (*audio_codec_disable)(struct intel_encoder *encoder,
+ const struct intel_crtc_state *old_crtc_state,
+ const struct drm_connector_state *old_conn_state);
+};
+
+struct intel_cdclk_funcs {
+ void (*get_cdclk)(struct drm_i915_private *dev_priv,
+ struct intel_cdclk_config *cdclk_config);
+ void (*set_cdclk)(struct drm_i915_private *dev_priv,
+ const struct intel_cdclk_config *cdclk_config,
+ enum pipe pipe);
+ int (*bw_calc_min_cdclk)(struct intel_atomic_state *state);
+ int (*modeset_calc_cdclk)(struct intel_cdclk_state *state);
+ u8 (*calc_voltage_level)(int cdclk);
+};
+
+struct intel_hotplug_funcs {
+ void (*hpd_irq_setup)(struct drm_i915_private *dev_priv);
+};
+
+struct intel_fdi_funcs {
+ void (*fdi_link_train)(struct intel_crtc *crtc,
+ const struct intel_crtc_state *crtc_state);
+};
+
+struct intel_dpll_funcs {
+ int (*crtc_compute_clock)(struct intel_crtc_state *crtc_state);
+};
+
+struct drm_i915_display_funcs {
+ /* Returns the active state of the crtc, and if the crtc is active,
+ * fills out the pipe-config with the hw state. */
+ bool (*get_pipe_config)(struct intel_crtc *,
+ struct intel_crtc_state *);
+ void (*get_initial_plane_config)(struct intel_crtc *,
+ struct intel_initial_plane_config *);
+ void (*crtc_enable)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+ void (*crtc_disable)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+ void (*commit_modeset_enables)(struct intel_atomic_state *state);
+};
+
#define I915_COLOR_UNEVICTABLE (-1) /* a non-vma sharing the address space */
@@ -454,7 +471,6 @@ struct intel_fbc {
} fb;
unsigned int fence_y_offset;
- u16 gen9_wa_cfb_stride;
u16 interval;
s8 fence_id;
bool psr2_active;
@@ -479,9 +495,10 @@ struct intel_fbc {
u64 modifier;
} fb;
- int cfb_size;
+ unsigned int cfb_stride;
+ unsigned int cfb_size;
unsigned int fence_y_offset;
- u16 gen9_wa_cfb_stride;
+ u16 override_cfb_stride;
u16 interval;
s8 fence_id;
bool plane_visible;
@@ -636,22 +653,6 @@ i915_fence_timeout(const struct drm_i915_private *i915)
/* Amount of PSF GV points, BSpec precisely defines this */
#define I915_NUM_PSF_GV_POINTS 3
-struct ddi_vbt_port_info {
- /* Non-NULL if port present. */
- struct intel_bios_encoder_data *devdata;
-
- int max_tmds_clock;
-
- /* This is an index in the HDMI/DVI DDI buffer translation table. */
- u8 hdmi_level_shift;
- u8 hdmi_level_shift_set:1;
-
- u8 alternate_aux_channel;
- u8 alternate_ddc_pin;
-
- int dp_max_link_rate; /* 0 for not limited by VBT */
-};
-
enum psr_lines_to_wait {
PSR_0_LINES_TO_WAIT = 0,
PSR_1_LINE_TO_WAIT,
@@ -706,6 +707,7 @@ struct intel_vbt_data {
struct {
u16 pwm_freq_hz;
+ u16 brightness_precision_bits;
bool present;
bool active_low_pwm;
u8 min_brightness; /* min_brightness/255 of max */
@@ -732,7 +734,7 @@ struct intel_vbt_data {
struct list_head display_devices;
- struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS];
+ struct intel_bios_encoder_data *ports[I915_MAX_PORTS]; /* Non-NULL if port present. */
struct sdvo_device_mapping sdvo_mappings[2];
};
@@ -886,8 +888,6 @@ struct drm_i915_private {
*/
u32 gpio_mmio_base;
- u32 hsw_psr_mmio_adjust;
-
/* MMIO base address for MIPI regs */
u32 mipi_mmio_base;
@@ -974,8 +974,32 @@ struct drm_i915_private {
/* unbound hipri wq for page flips/plane updates */
struct workqueue_struct *flip_wq;
+ /* pm private clock gating functions */
+ const struct drm_i915_clock_gating_funcs *clock_gating_funcs;
+
+ /* pm display functions */
+ const struct drm_i915_wm_disp_funcs *wm_disp;
+
+ /* irq display functions */
+ const struct intel_hotplug_funcs *hotplug_funcs;
+
+ /* fdi display functions */
+ const struct intel_fdi_funcs *fdi_funcs;
+
+ /* display pll funcs */
+ const struct intel_dpll_funcs *dpll_funcs;
+
/* Display functions */
- struct drm_i915_display_funcs display;
+ const struct drm_i915_display_funcs *display;
+
+ /* Display internal color functions */
+ const struct intel_color_funcs *color_funcs;
+
+ /* Display internal audio functions */
+ const struct intel_audio_funcs *audio_funcs;
+
+ /* Display CDCLK functions */
+ const struct intel_cdclk_funcs *cdclk_funcs;
/* PCH chipset type */
enum intel_pch pch_type;
@@ -1022,8 +1046,6 @@ struct drm_i915_private {
*/
u8 active_pipes;
- struct i915_wa_list gt_wa_list;
-
struct i915_frontbuffer_tracking fb_tracking;
struct intel_atomic_helper {
@@ -1665,6 +1687,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_IPS(dev_priv) (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv))
#define HAS_DP_MST(dev_priv) (INTEL_INFO(dev_priv)->display.has_dp_mst)
+#define HAS_DP20(dev_priv) (IS_DG2(dev_priv))
#define HAS_CDCLK_CRAWL(dev_priv) (INTEL_INFO(dev_priv)->display.has_cdclk_crawl)
#define HAS_DDI(dev_priv) (INTEL_INFO(dev_priv)->display.has_ddi)
@@ -1702,6 +1725,9 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_GLOBAL_MOCS_REGISTERS(dev_priv) (INTEL_INFO(dev_priv)->has_global_mocs)
+#define HAS_PXP(dev_priv) ((IS_ENABLED(CONFIG_DRM_I915_PXP) && \
+ INTEL_INFO(dev_priv)->has_pxp) && \
+ VDBOX_MASK(&dev_priv->gt))
#define HAS_GMCH(dev_priv) (INTEL_INFO(dev_priv)->display.has_gmch)
@@ -1721,6 +1747,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_VRR(i915) (GRAPHICS_VER(i915) >= 12)
+#define HAS_ASYNC_FLIPS(i915) (DISPLAY_VER(i915) >= 5)
+
/* Only valid when HAS_DISPLAY() is true */
#define INTEL_DISPLAY_ENABLED(dev_priv) \
(drm_WARN_ON(&(dev_priv)->drm, !HAS_DISPLAY(dev_priv)), !(dev_priv)->params.disable_display)
@@ -1881,11 +1909,11 @@ i915_gem_vm_lookup(struct drm_i915_file_private *file_priv, u32 id)
{
struct i915_address_space *vm;
- rcu_read_lock();
+ xa_lock(&file_priv->vm_xa);
vm = xa_load(&file_priv->vm_xa, id);
- if (vm && !kref_get_unless_zero(&vm->ref))
- vm = NULL;
- rcu_read_unlock();
+ if (vm)
+ kref_get(&vm->ref);
+ xa_unlock(&file_priv->vm_xa);
return vm;
}
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 590efc8b0265..981e383d1a5d 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1139,8 +1139,6 @@ void i915_gem_driver_release(struct drm_i915_private *dev_priv)
{
intel_gt_driver_release(&dev_priv->gt);
- intel_wa_list_free(&dev_priv->gt_wa_list);
-
intel_uc_cleanup_firmwares(&dev_priv->gt.uc);
i915_gem_drain_freed_objects(dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 36489be4896b..cd5f2348a187 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -30,7 +30,7 @@ int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
do {
if (dma_map_sg_attrs(obj->base.dev->dev,
pages->sgl, pages->nents,
- PCI_DMA_BIDIRECTIONAL,
+ DMA_BIDIRECTIONAL,
DMA_ATTR_SKIP_CPU_SYNC |
DMA_ATTR_NO_KERNEL_MAPPING |
DMA_ATTR_NO_WARN))
@@ -64,7 +64,7 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
usleep_range(100, 250);
dma_unmap_sg(i915->drm.dev, pages->sgl, pages->nents,
- PCI_DMA_BIDIRECTIONAL);
+ DMA_BIDIRECTIONAL);
}
/**
diff --git a/drivers/gpu/drm/i915/i915_gem_ww.h b/drivers/gpu/drm/i915/i915_gem_ww.h
index f6b1a796667b..86f0fe343de6 100644
--- a/drivers/gpu/drm/i915/i915_gem_ww.h
+++ b/drivers/gpu/drm/i915/i915_gem_ww.h
@@ -11,8 +11,7 @@ struct i915_gem_ww_ctx {
struct ww_acquire_ctx ctx;
struct list_head obj_list;
struct drm_i915_gem_object *contended;
- unsigned short intr;
- unsigned short loop;
+ bool intr;
};
void i915_gem_ww_ctx_init(struct i915_gem_ww_ctx *ctx, bool intr);
@@ -20,31 +19,23 @@ void i915_gem_ww_ctx_fini(struct i915_gem_ww_ctx *ctx);
int __must_check i915_gem_ww_ctx_backoff(struct i915_gem_ww_ctx *ctx);
void i915_gem_ww_unlock_single(struct drm_i915_gem_object *obj);
-/* Internal functions used by the inlines! Don't use. */
+/* Internal function used by the inlines! Don't use. */
static inline int __i915_gem_ww_fini(struct i915_gem_ww_ctx *ww, int err)
{
- ww->loop = 0;
if (err == -EDEADLK) {
err = i915_gem_ww_ctx_backoff(ww);
if (!err)
- ww->loop = 1;
+ err = -EDEADLK;
}
- if (!ww->loop)
+ if (err != -EDEADLK)
i915_gem_ww_ctx_fini(ww);
return err;
}
-static inline void
-__i915_gem_ww_init(struct i915_gem_ww_ctx *ww, bool intr)
-{
- i915_gem_ww_ctx_init(ww, intr);
- ww->loop = 1;
-}
-
-#define for_i915_gem_ww(_ww, _err, _intr) \
- for (__i915_gem_ww_init(_ww, _intr); (_ww)->loop; \
- _err = __i915_gem_ww_fini(_ww, _err))
-
+#define for_i915_gem_ww(_ww, _err, _intr) \
+ for (i915_gem_ww_ctx_init(_ww, _intr), (_err) = -EDEADLK; \
+ (_err) == -EDEADLK; \
+ (_err) = __i915_gem_ww_fini(_ww, _err))
#endif
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 9cf6ac575de1..2a2d7643b551 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -431,6 +431,7 @@ static void error_print_instdone(struct drm_i915_error_state_buf *m,
const struct sseu_dev_info *sseu = &ee->engine->gt->info.sseu;
int slice;
int subslice;
+ int iter;
err_printf(m, " INSTDONE: 0x%08x\n",
ee->instdone.instdone);
@@ -444,19 +445,38 @@ static void error_print_instdone(struct drm_i915_error_state_buf *m,
if (GRAPHICS_VER(m->i915) <= 6)
return;
- for_each_instdone_slice_subslice(m->i915, sseu, slice, subslice)
- err_printf(m, " SAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
- slice, subslice,
- ee->instdone.sampler[slice][subslice]);
+ if (GRAPHICS_VER_FULL(m->i915) >= IP_VER(12, 50)) {
+ for_each_instdone_gslice_dss_xehp(m->i915, sseu, iter, slice, subslice)
+ err_printf(m, " SAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
+ slice, subslice,
+ ee->instdone.sampler[slice][subslice]);
- for_each_instdone_slice_subslice(m->i915, sseu, slice, subslice)
- err_printf(m, " ROW_INSTDONE[%d][%d]: 0x%08x\n",
- slice, subslice,
- ee->instdone.row[slice][subslice]);
+ for_each_instdone_gslice_dss_xehp(m->i915, sseu, iter, slice, subslice)
+ err_printf(m, " ROW_INSTDONE[%d][%d]: 0x%08x\n",
+ slice, subslice,
+ ee->instdone.row[slice][subslice]);
+ } else {
+ for_each_instdone_slice_subslice(m->i915, sseu, slice, subslice)
+ err_printf(m, " SAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
+ slice, subslice,
+ ee->instdone.sampler[slice][subslice]);
+
+ for_each_instdone_slice_subslice(m->i915, sseu, slice, subslice)
+ err_printf(m, " ROW_INSTDONE[%d][%d]: 0x%08x\n",
+ slice, subslice,
+ ee->instdone.row[slice][subslice]);
+ }
if (GRAPHICS_VER(m->i915) < 12)
return;
+ if (GRAPHICS_VER_FULL(m->i915) >= IP_VER(12, 55)) {
+ for_each_instdone_gslice_dss_xehp(m->i915, sseu, iter, slice, subslice)
+ err_printf(m, " GEOM_SVGUNIT_INSTDONE[%d][%d]: 0x%08x\n",
+ slice, subslice,
+ ee->instdone.geom_svg[slice][subslice]);
+ }
+
err_printf(m, " SC_INSTDONE_EXTRA: 0x%08x\n",
ee->instdone.slice_common_extra[0]);
err_printf(m, " SC_INSTDONE_EXTRA2: 0x%08x\n",
@@ -733,7 +753,8 @@ static void err_print_gt(struct drm_i915_error_state_buf *m,
* only exists if the corresponding VCS engine is
* present.
*/
- if (!HAS_ENGINE(gt->_gt, _VCS(i * 2)))
+ if ((gt->_gt->info.sfc_mask & BIT(i)) == 0 ||
+ !HAS_ENGINE(gt->_gt, _VCS(i * 2)))
continue;
err_printf(m, " SFC_DONE[%d]: 0x%08x\n", i,
@@ -1612,7 +1633,8 @@ static void gt_record_regs(struct intel_gt_coredump *gt)
* only exists if the corresponding VCS engine is
* present.
*/
- if (!HAS_ENGINE(gt->_gt, _VCS(i * 2)))
+ if ((gt->_gt->info.sfc_mask & BIT(i)) == 0 ||
+ !HAS_ENGINE(gt->_gt, _VCS(i * 2)))
continue;
gt->sfc_done[i] =
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 9bc4f4a8e12e..77680bca46ee 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -359,9 +359,8 @@ void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
* @interrupt_mask: mask of interrupt bits to update
* @enabled_irq_mask: mask of interrupt bits to enable
*/
-void ilk_update_display_irq(struct drm_i915_private *dev_priv,
- u32 interrupt_mask,
- u32 enabled_irq_mask)
+static void ilk_update_display_irq(struct drm_i915_private *dev_priv,
+ u32 interrupt_mask, u32 enabled_irq_mask)
{
u32 new_val;
@@ -380,6 +379,16 @@ void ilk_update_display_irq(struct drm_i915_private *dev_priv,
}
}
+void ilk_enable_display_irq(struct drm_i915_private *i915, u32 bits)
+{
+ ilk_update_display_irq(i915, bits, bits);
+}
+
+void ilk_disable_display_irq(struct drm_i915_private *i915, u32 bits)
+{
+ ilk_update_display_irq(i915, bits, 0);
+}
+
/**
* bdw_update_port_irq - update DE port interrupt
* @dev_priv: driver private
@@ -419,10 +428,9 @@ static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
* @interrupt_mask: mask of interrupt bits to update
* @enabled_irq_mask: mask of interrupt bits to enable
*/
-void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
- enum pipe pipe,
- u32 interrupt_mask,
- u32 enabled_irq_mask)
+static void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
+ enum pipe pipe, u32 interrupt_mask,
+ u32 enabled_irq_mask)
{
u32 new_val;
@@ -444,15 +452,27 @@ void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
}
}
+void bdw_enable_pipe_irq(struct drm_i915_private *i915,
+ enum pipe pipe, u32 bits)
+{
+ bdw_update_pipe_irq(i915, pipe, bits, bits);
+}
+
+void bdw_disable_pipe_irq(struct drm_i915_private *i915,
+ enum pipe pipe, u32 bits)
+{
+ bdw_update_pipe_irq(i915, pipe, bits, 0);
+}
+
/**
* ibx_display_interrupt_update - update SDEIMR
* @dev_priv: driver private
* @interrupt_mask: mask of interrupt bits to update
* @enabled_irq_mask: mask of interrupt bits to enable
*/
-void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
- u32 interrupt_mask,
- u32 enabled_irq_mask)
+static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
+ u32 interrupt_mask,
+ u32 enabled_irq_mask)
{
u32 sdeimr = intel_uncore_read(&dev_priv->uncore, SDEIMR);
sdeimr &= ~interrupt_mask;
@@ -469,6 +489,16 @@ void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
intel_uncore_posting_read(&dev_priv->uncore, SDEIMR);
}
+void ibx_enable_display_interrupt(struct drm_i915_private *i915, u32 bits)
+{
+ ibx_display_interrupt_update(i915, bits, bits);
+}
+
+void ibx_disable_display_interrupt(struct drm_i915_private *i915, u32 bits)
+{
+ ibx_display_interrupt_update(i915, bits, 0);
+}
+
u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
@@ -2093,22 +2123,6 @@ static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
if (de_iir & DE_ERR_INT_IVB)
ivb_err_int_handler(dev_priv);
- if (de_iir & DE_EDP_PSR_INT_HSW) {
- struct intel_encoder *encoder;
-
- for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-
- u32 psr_iir = intel_uncore_read(&dev_priv->uncore,
- EDP_PSR_IIR);
-
- intel_psr_irq_handler(intel_dp, psr_iir);
- intel_uncore_write(&dev_priv->uncore,
- EDP_PSR_IIR, psr_iir);
- break;
- }
- }
-
if (de_iir & DE_AUX_CHANNEL_A_IVB)
dp_aux_irq_handler(dev_priv);
@@ -4331,6 +4345,20 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
return ret;
}
+#define HPD_FUNCS(platform) \
+static const struct intel_hotplug_funcs platform##_hpd_funcs = { \
+ .hpd_irq_setup = platform##_hpd_irq_setup, \
+}
+
+HPD_FUNCS(i915);
+HPD_FUNCS(dg1);
+HPD_FUNCS(gen11);
+HPD_FUNCS(bxt);
+HPD_FUNCS(icp);
+HPD_FUNCS(spt);
+HPD_FUNCS(ilk);
+#undef HPD_FUNCS
+
/**
* intel_irq_init - initializes irq support
* @dev_priv: i915 device instance
@@ -4381,20 +4409,20 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
if (HAS_GMCH(dev_priv)) {
if (I915_HAS_HOTPLUG(dev_priv))
- dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
+ dev_priv->hotplug_funcs = &i915_hpd_funcs;
} else {
if (HAS_PCH_DG1(dev_priv))
- dev_priv->display.hpd_irq_setup = dg1_hpd_irq_setup;
+ dev_priv->hotplug_funcs = &dg1_hpd_funcs;
else if (DISPLAY_VER(dev_priv) >= 11)
- dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup;
+ dev_priv->hotplug_funcs = &gen11_hpd_funcs;
else if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv))
- dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
+ dev_priv->hotplug_funcs = &bxt_hpd_funcs;
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP)
- dev_priv->display.hpd_irq_setup = icp_hpd_irq_setup;
+ dev_priv->hotplug_funcs = &icp_hpd_funcs;
else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT)
- dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
+ dev_priv->hotplug_funcs = &spt_hpd_funcs;
else
- dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
+ dev_priv->hotplug_funcs = &ilk_hpd_funcs;
}
}
diff --git a/drivers/gpu/drm/i915/i915_irq.h b/drivers/gpu/drm/i915/i915_irq.h
index e43b6734f21b..0eb90d271fa7 100644
--- a/drivers/gpu/drm/i915/i915_irq.h
+++ b/drivers/gpu/drm/i915/i915_irq.h
@@ -9,9 +9,9 @@
#include <linux/ktime.h>
#include <linux/types.h>
-#include "display/intel_display.h"
#include "i915_reg.h"
+enum pipe;
struct drm_crtc;
struct drm_device;
struct drm_display_mode;
@@ -40,46 +40,15 @@ void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv);
void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
u32 mask,
u32 bits);
-void ilk_update_display_irq(struct drm_i915_private *dev_priv,
- u32 interrupt_mask,
- u32 enabled_irq_mask);
-static inline void
-ilk_enable_display_irq(struct drm_i915_private *dev_priv, u32 bits)
-{
- ilk_update_display_irq(dev_priv, bits, bits);
-}
-static inline void
-ilk_disable_display_irq(struct drm_i915_private *dev_priv, u32 bits)
-{
- ilk_update_display_irq(dev_priv, bits, 0);
-}
-void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
- enum pipe pipe,
- u32 interrupt_mask,
- u32 enabled_irq_mask);
-static inline void bdw_enable_pipe_irq(struct drm_i915_private *dev_priv,
- enum pipe pipe, u32 bits)
-{
- bdw_update_pipe_irq(dev_priv, pipe, bits, bits);
-}
-static inline void bdw_disable_pipe_irq(struct drm_i915_private *dev_priv,
- enum pipe pipe, u32 bits)
-{
- bdw_update_pipe_irq(dev_priv, pipe, bits, 0);
-}
-void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
- u32 interrupt_mask,
- u32 enabled_irq_mask);
-static inline void
-ibx_enable_display_interrupt(struct drm_i915_private *dev_priv, u32 bits)
-{
- ibx_display_interrupt_update(dev_priv, bits, bits);
-}
-static inline void
-ibx_disable_display_interrupt(struct drm_i915_private *dev_priv, u32 bits)
-{
- ibx_display_interrupt_update(dev_priv, bits, 0);
-}
+
+void ilk_enable_display_irq(struct drm_i915_private *i915, u32 bits);
+void ilk_disable_display_irq(struct drm_i915_private *i915, u32 bits);
+
+void bdw_enable_pipe_irq(struct drm_i915_private *i915, enum pipe pipe, u32 bits);
+void bdw_disable_pipe_irq(struct drm_i915_private *i915, enum pipe pipe, u32 bits);
+
+void ibx_enable_display_interrupt(struct drm_i915_private *i915, u32 bits);
+void ibx_disable_display_interrupt(struct drm_i915_private *i915, u32 bits);
void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, u32 mask);
void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, u32 mask);
diff --git a/drivers/gpu/drm/i915/i915_module.c b/drivers/gpu/drm/i915/i915_module.c
index d8b4482c69d0..ab2295dd4500 100644
--- a/drivers/gpu/drm/i915/i915_module.c
+++ b/drivers/gpu/drm/i915/i915_module.c
@@ -67,8 +67,8 @@ static const struct {
{ .init = i915_mock_selftests },
{ .init = i915_pmu_init,
.exit = i915_pmu_exit },
- { .init = i915_register_pci_driver,
- .exit = i915_unregister_pci_driver },
+ { .init = i915_pci_register_driver,
+ .exit = i915_pci_unregister_driver },
{ .init = i915_perf_sysctl_register,
.exit = i915_perf_sysctl_unregister },
};
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index f27eceb82c0f..8d725b64592d 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -55,7 +55,7 @@ struct drm_printer;
param(int, enable_fbc, -1, 0600) \
param(int, enable_psr, -1, 0600) \
param(bool, psr_safest_params, false, 0400) \
- param(bool, enable_psr2_sel_fetch, false, 0400) \
+ param(bool, enable_psr2_sel_fetch, true, 0400) \
param(int, disable_power_well, -1, 0400) \
param(int, enable_ips, 1, 0600) \
param(int, invert_brightness, 0, 0600) \
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 1bbd09ad5287..169837de395d 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -537,8 +537,6 @@ static const struct intel_device_info vlv_info = {
BIT(TRANSCODER_C) | BIT(TRANSCODER_EDP), \
.display.has_ddi = 1, \
.display.has_fpga_dbg = 1, \
- .display.has_psr = 1, \
- .display.has_psr_hw_tracking = 1, \
.display.has_dp_mst = 1, \
.has_rc6p = 0 /* RC6p removed-by HSW */, \
HSW_PIPE_OFFSETS, \
@@ -642,6 +640,8 @@ static const struct intel_device_info chv_info = {
.has_gt_uc = 1, \
.display.has_hdcp = 1, \
.display.has_ipc = 1, \
+ .display.has_psr = 1, \
+ .display.has_psr_hw_tracking = 1, \
.dbuf.size = 896 - 4, /* 4 blocks for bypass path allocation */ \
.dbuf.slice_mask = BIT(DBUF_S1)
@@ -865,6 +865,7 @@ static const struct intel_device_info jsl_info = {
}, \
TGL_CURSOR_OFFSETS, \
.has_global_mocs = 1, \
+ .has_pxp = 1, \
.display.has_dsb = 1
static const struct intel_device_info tgl_info = {
@@ -891,10 +892,11 @@ static const struct intel_device_info rkl_info = {
#define DGFX_FEATURES \
.memory_regions = REGION_SMEM | REGION_LMEM | REGION_STOLEN_LMEM, \
.has_llc = 0, \
+ .has_pxp = 0, \
.has_snoop = 1, \
.is_dgfx = 1
-static const struct intel_device_info dg1_info __maybe_unused = {
+static const struct intel_device_info dg1_info = {
GEN12_FEATURES,
DGFX_FEATURES,
.graphics_rel = 10,
@@ -912,7 +914,6 @@ static const struct intel_device_info adl_s_info = {
GEN12_FEATURES,
PLATFORM(INTEL_ALDERLAKE_S),
.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
- .require_force_probe = 1,
.display.has_hti = 1,
.display.has_psr_hw_tracking = 0,
.platform_engine_mask =
@@ -1115,6 +1116,7 @@ static const struct pci_device_id pciidlist[] = {
INTEL_RKL_IDS(&rkl_info),
INTEL_ADLS_IDS(&adl_s_info),
INTEL_ADLP_IDS(&adl_p_info),
+ INTEL_DG1_IDS(&dg1_info),
{0, 0, 0}
};
MODULE_DEVICE_TABLE(pci, pciidlist);
@@ -1234,12 +1236,12 @@ static struct pci_driver i915_pci_driver = {
.driver.pm = &i915_pm_ops,
};
-int i915_register_pci_driver(void)
+int i915_pci_register_driver(void)
{
return pci_register_driver(&i915_pci_driver);
}
-void i915_unregister_pci_driver(void)
+void i915_pci_unregister_driver(void)
{
pci_unregister_driver(&i915_pci_driver);
}
diff --git a/drivers/gpu/drm/i915/i915_pci.h b/drivers/gpu/drm/i915/i915_pci.h
index b386f319f52e..ee048c238174 100644
--- a/drivers/gpu/drm/i915/i915_pci.h
+++ b/drivers/gpu/drm/i915/i915_pci.h
@@ -1,8 +1,12 @@
+/* SPDX-License-Identifier: MIT */
/*
- * SPDX-License-Identifier: MIT
- *
* Copyright © 2021 Intel Corporation
*/
-int i915_register_pci_driver(void);
-void i915_unregister_pci_driver(void);
+#ifndef __I915_PCI_H__
+#define __I915_PCI_H__
+
+int i915_pci_register_driver(void);
+void i915_pci_unregister_driver(void);
+
+#endif /* __I915_PCI_H__ */
diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c
index e49da36c62fb..51b368be0fc4 100644
--- a/drivers/gpu/drm/i915/i915_query.c
+++ b/drivers/gpu/drm/i915/i915_query.c
@@ -124,7 +124,9 @@ query_engine_info(struct drm_i915_private *i915,
for_each_uabi_engine(engine, i915) {
info.engine.engine_class = engine->uabi_class;
info.engine.engine_instance = engine->uabi_instance;
+ info.flags = I915_ENGINE_INFO_HAS_LOGICAL_INSTANCE;
info.capabilities = engine->uabi_capabilities;
+ info.logical_instance = ilog2(engine->logical_mask);
if (copy_to_user(info_ptr, &info, sizeof(info)))
return -EFAULT;
@@ -432,9 +434,6 @@ static int query_memregion_info(struct drm_i915_private *i915,
u32 total_length;
int ret, id, i;
- if (!IS_ENABLED(CONFIG_DRM_I915_UNSTABLE_FAKE_LMEM))
- return -ENODEV;
-
if (query_item->flags != 0)
return -EINVAL;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 4037030f0984..da9055c3ebf0 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -1968,7 +1968,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
_ICL_PORT_PCS_LN(ln) + 4 * (dw))
#define ICL_PORT_PCS_DW1_AUX(phy) _MMIO(_ICL_PORT_PCS_DW_AUX(1, phy))
#define ICL_PORT_PCS_DW1_GRP(phy) _MMIO(_ICL_PORT_PCS_DW_GRP(1, phy))
-#define ICL_PORT_PCS_DW1_LN0(phy) _MMIO(_ICL_PORT_PCS_DW_LN(1, 0, phy))
+#define ICL_PORT_PCS_DW1_LN(ln, phy) _MMIO(_ICL_PORT_PCS_DW_LN(1, ln, phy))
#define DCC_MODE_SELECT_MASK (0x3 << 20)
#define DCC_MODE_SELECT_CONTINUOSLY (0x3 << 20)
#define COMMON_KEEPER_EN (1 << 26)
@@ -1989,7 +1989,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define ICL_PORT_TX_DW2_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(2, phy))
#define ICL_PORT_TX_DW2_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(2, phy))
-#define ICL_PORT_TX_DW2_LN0(phy) _MMIO(_ICL_PORT_TX_DW_LN(2, 0, phy))
+#define ICL_PORT_TX_DW2_LN(ln, phy) _MMIO(_ICL_PORT_TX_DW_LN(2, ln, phy))
#define SWING_SEL_UPPER(x) (((x) >> 3) << 15)
#define SWING_SEL_UPPER_MASK (1 << 15)
#define SWING_SEL_LOWER(x) (((x) & 0x7) << 11)
@@ -2001,7 +2001,6 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define ICL_PORT_TX_DW4_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(4, phy))
#define ICL_PORT_TX_DW4_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(4, phy))
-#define ICL_PORT_TX_DW4_LN0(phy) _MMIO(_ICL_PORT_TX_DW_LN(4, 0, phy))
#define ICL_PORT_TX_DW4_LN(ln, phy) _MMIO(_ICL_PORT_TX_DW_LN(4, ln, phy))
#define LOADGEN_SELECT (1 << 31)
#define POST_CURSOR_1(x) ((x) << 12)
@@ -2013,7 +2012,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define ICL_PORT_TX_DW5_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(5, phy))
#define ICL_PORT_TX_DW5_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(5, phy))
-#define ICL_PORT_TX_DW5_LN0(phy) _MMIO(_ICL_PORT_TX_DW_LN(5, 0, phy))
+#define ICL_PORT_TX_DW5_LN(ln, phy) _MMIO(_ICL_PORT_TX_DW_LN(5, ln, phy))
#define TX_TRAINING_EN (1 << 31)
#define TAP2_DISABLE (1 << 30)
#define TAP3_DISABLE (1 << 29)
@@ -2024,14 +2023,13 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define ICL_PORT_TX_DW7_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(7, phy))
#define ICL_PORT_TX_DW7_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(7, phy))
-#define ICL_PORT_TX_DW7_LN0(phy) _MMIO(_ICL_PORT_TX_DW_LN(7, 0, phy))
#define ICL_PORT_TX_DW7_LN(ln, phy) _MMIO(_ICL_PORT_TX_DW_LN(7, ln, phy))
#define N_SCALAR(x) ((x) << 24)
#define N_SCALAR_MASK (0x7F << 24)
#define ICL_PORT_TX_DW8_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(8, phy))
#define ICL_PORT_TX_DW8_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(8, phy))
-#define ICL_PORT_TX_DW8_LN0(phy) _MMIO(_ICL_PORT_TX_DW_LN(8, 0, phy))
+#define ICL_PORT_TX_DW8_LN(ln, phy) _MMIO(_ICL_PORT_TX_DW_LN(8, ln, phy))
#define ICL_PORT_TX_DW8_ODCC_CLK_SEL REG_BIT(31)
#define ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_MASK REG_GENMASK(30, 29)
#define ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_DIV2 REG_FIELD_PREP(ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_MASK, 0x1)
@@ -2237,10 +2235,14 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define SNPS_PHY_MPLLB_DIV(phy) _MMIO_SNPS(phy, 0x168004)
#define SNPS_PHY_MPLLB_FORCE_EN REG_BIT(31)
+#define SNPS_PHY_MPLLB_DIV_CLK_EN REG_BIT(30)
#define SNPS_PHY_MPLLB_DIV5_CLK_EN REG_BIT(29)
#define SNPS_PHY_MPLLB_V2I REG_GENMASK(27, 26)
#define SNPS_PHY_MPLLB_FREQ_VCO REG_GENMASK(25, 24)
+#define SNPS_PHY_MPLLB_DIV_MULTIPLIER REG_GENMASK(23, 16)
#define SNPS_PHY_MPLLB_PMIX_EN REG_BIT(10)
+#define SNPS_PHY_MPLLB_DP2_MODE REG_BIT(9)
+#define SNPS_PHY_MPLLB_WORD_DIV2_EN REG_BIT(8)
#define SNPS_PHY_MPLLB_TX_CLK_DIV REG_GENMASK(7, 5)
#define SNPS_PHY_MPLLB_FRACN1(phy) _MMIO_SNPS(phy, 0x168008)
@@ -2551,6 +2553,32 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define RING_HWS_PGA(base) _MMIO((base) + 0x80)
#define RING_ID(base) _MMIO((base) + 0x8c)
#define RING_HWS_PGA_GEN6(base) _MMIO((base) + 0x2080)
+
+#define RING_CMD_CCTL(base) _MMIO((base) + 0xc4)
+/*
+ * CMD_CCTL read/write fields take a MOCS value and _not_ a table index.
+ * The lsb of each can be considered a separate enabling bit for encryption.
+ * 6:0 == default MOCS value for reads => 6:1 == table index for reads.
+ * 13:7 == default MOCS value for writes => 13:8 == table index for writes.
+ * 15:14 == Reserved => 31:30 are set to 0.
+ */
+#define CMD_CCTL_WRITE_OVERRIDE_MASK REG_GENMASK(13, 7)
+#define CMD_CCTL_READ_OVERRIDE_MASK REG_GENMASK(6, 0)
+#define CMD_CCTL_MOCS_MASK (CMD_CCTL_WRITE_OVERRIDE_MASK | \
+ CMD_CCTL_READ_OVERRIDE_MASK)
+#define CMD_CCTL_MOCS_OVERRIDE(write, read) \
+ (REG_FIELD_PREP(CMD_CCTL_WRITE_OVERRIDE_MASK, (write) << 1) | \
+ REG_FIELD_PREP(CMD_CCTL_READ_OVERRIDE_MASK, (read) << 1))
+
+#define BLIT_CCTL(base) _MMIO((base) + 0x204)
+#define BLIT_CCTL_DST_MOCS_MASK REG_GENMASK(14, 8)
+#define BLIT_CCTL_SRC_MOCS_MASK REG_GENMASK(6, 0)
+#define BLIT_CCTL_MASK (BLIT_CCTL_DST_MOCS_MASK | \
+ BLIT_CCTL_SRC_MOCS_MASK)
+#define BLIT_CCTL_MOCS(dst, src) \
+ (REG_FIELD_PREP(BLIT_CCTL_DST_MOCS_MASK, (dst) << 1) | \
+ REG_FIELD_PREP(BLIT_CCTL_SRC_MOCS_MASK, (src) << 1))
+
#define RING_RESET_CTL(base) _MMIO((base) + 0xd0)
#define RESET_CTL_CAT_ERROR REG_BIT(2)
#define RESET_CTL_READY_TO_RESET REG_BIT(1)
@@ -2686,6 +2714,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN12_SC_INSTDONE_EXTRA2 _MMIO(0x7108)
#define GEN7_SAMPLER_INSTDONE _MMIO(0xe160)
#define GEN7_ROW_INSTDONE _MMIO(0xe164)
+#define XEHPG_INSTDONE_GEOM_SVG _MMIO(0x666c)
#define MCFG_MCR_SELECTOR _MMIO(0xfd0)
#define SF_MCR_SELECTOR _MMIO(0xfd8)
#define GEN8_MCR_SELECTOR _MMIO(0xfdc)
@@ -2820,6 +2849,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define MI_MODE _MMIO(0x209c)
# define VS_TIMER_DISPATCH (1 << 6)
# define MI_FLUSH_ENABLE (1 << 12)
+# define TGL_NESTED_BB_EN (1 << 12)
# define ASYNC_FLIP_PERF_DISABLE (1 << 14)
# define MODE_IDLE (1 << 9)
# define STOP_RING (1 << 8)
@@ -3080,8 +3110,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
/* Fuse readout registers for GT */
#define HSW_PAVP_FUSE1 _MMIO(0x911C)
-#define HSW_F1_EU_DIS_SHIFT 16
-#define HSW_F1_EU_DIS_MASK (0x3 << HSW_F1_EU_DIS_SHIFT)
+#define XEHP_SFC_ENABLE_MASK REG_GENMASK(27, 24)
+#define HSW_F1_EU_DIS_MASK REG_GENMASK(17, 16)
#define HSW_F1_EU_DIS_10EUS 0
#define HSW_F1_EU_DIS_8EUS 1
#define HSW_F1_EU_DIS_6EUS 2
@@ -3150,7 +3180,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN11_GT_SUBSLICE_DISABLE _MMIO(0x913C)
-#define GEN12_GT_DSS_ENABLE _MMIO(0x913C)
+#define GEN12_GT_GEOMETRY_DSS_ENABLE _MMIO(0x913C)
+#define GEN12_GT_COMPUTE_DSS_ENABLE _MMIO(0x9144)
#define XEHP_EU_ENABLE _MMIO(0x9134)
#define XEHP_EU_ENA_MASK 0xFF
@@ -3356,6 +3387,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define ILK_DPFC_DISABLE_DUMMY0 (1 << 8)
#define ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL (1 << 14)
#define ILK_DPFC_NUKE_ON_ANY_MODIFICATION (1 << 23)
+#define GLK_FBC_STRIDE _MMIO(0x43228)
+#define FBC_STRIDE_OVERRIDE REG_BIT(15)
+#define FBC_STRIDE_MASK REG_GENMASK(14, 0)
+#define FBC_STRIDE(x) REG_FIELD_PREP(FBC_STRIDE_MASK, (x))
#define ILK_FBC_RT_BASE _MMIO(0x2128)
#define ILK_FBC_RT_VALID (1 << 0)
#define SNB_FBC_FRONT_BUFFER (1 << 1)
@@ -4113,6 +4148,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define RPN_CAP_MASK REG_GENMASK(23, 16)
#define BXT_RP_STATE_CAP _MMIO(0x138170)
#define GEN9_RP_STATE_LIMITS _MMIO(0x138148)
+#define XEHPSDV_RP_STATE_CAP _MMIO(0x250014)
/*
* Logical Context regs
@@ -4231,6 +4267,7 @@ enum {
#define DUPS1_GATING_DIS (1 << 15)
#define DUPS2_GATING_DIS (1 << 19)
#define DUPS3_GATING_DIS (1 << 23)
+#define CURSOR_GATING_DIS REG_BIT(28)
#define DPF_GATING_DIS (1 << 10)
#define DPF_RAM_GATING_DIS (1 << 9)
#define DPFR_GATING_DIS (1 << 8)
@@ -4509,11 +4546,9 @@ enum {
* HSW PSR registers are relative to DDIA(_DDI_BUF_CTL_A + 0x800) with just one
* instance of it
*/
-#define _HSW_EDP_PSR_BASE 0x64800
#define _SRD_CTL_A 0x60800
#define _SRD_CTL_EDP 0x6f800
-#define _PSR_ADJ(tran, reg) (_TRANS2(tran, reg) - dev_priv->hsw_psr_mmio_adjust)
-#define EDP_PSR_CTL(tran) _MMIO(_PSR_ADJ(tran, _SRD_CTL_A))
+#define EDP_PSR_CTL(tran) _MMIO(_TRANS2(tran, _SRD_CTL_A))
#define EDP_PSR_ENABLE (1 << 31)
#define BDW_PSR_SINGLE_FRAME (1 << 30)
#define EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK (1 << 29) /* SW can't modify */
@@ -4557,22 +4592,13 @@ enum {
#define EDP_PSR_POST_EXIT(trans) (0x2 << _EDP_PSR_TRANS_SHIFT(trans))
#define EDP_PSR_PRE_ENTRY(trans) (0x1 << _EDP_PSR_TRANS_SHIFT(trans))
-#define _SRD_AUX_CTL_A 0x60810
-#define _SRD_AUX_CTL_EDP 0x6f810
-#define EDP_PSR_AUX_CTL(tran) _MMIO(_PSR_ADJ(tran, _SRD_AUX_CTL_A))
-#define EDP_PSR_AUX_CTL_TIME_OUT_MASK (3 << 26)
-#define EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK (0x1f << 20)
-#define EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK (0xf << 16)
-#define EDP_PSR_AUX_CTL_ERROR_INTERRUPT (1 << 11)
-#define EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK (0x7ff)
-
#define _SRD_AUX_DATA_A 0x60814
#define _SRD_AUX_DATA_EDP 0x6f814
-#define EDP_PSR_AUX_DATA(tran, i) _MMIO(_PSR_ADJ(tran, _SRD_AUX_DATA_A) + (i) + 4) /* 5 registers */
+#define EDP_PSR_AUX_DATA(tran, i) _MMIO(_TRANS2(tran, _SRD_AUX_DATA_A) + (i) + 4) /* 5 registers */
#define _SRD_STATUS_A 0x60840
#define _SRD_STATUS_EDP 0x6f840
-#define EDP_PSR_STATUS(tran) _MMIO(_PSR_ADJ(tran, _SRD_STATUS_A))
+#define EDP_PSR_STATUS(tran) _MMIO(_TRANS2(tran, _SRD_STATUS_A))
#define EDP_PSR_STATUS_STATE_MASK (7 << 29)
#define EDP_PSR_STATUS_STATE_SHIFT 29
#define EDP_PSR_STATUS_STATE_IDLE (0 << 29)
@@ -4599,13 +4625,13 @@ enum {
#define _SRD_PERF_CNT_A 0x60844
#define _SRD_PERF_CNT_EDP 0x6f844
-#define EDP_PSR_PERF_CNT(tran) _MMIO(_PSR_ADJ(tran, _SRD_PERF_CNT_A))
+#define EDP_PSR_PERF_CNT(tran) _MMIO(_TRANS2(tran, _SRD_PERF_CNT_A))
#define EDP_PSR_PERF_CNT_MASK 0xffffff
/* PSR_MASK on SKL+ */
#define _SRD_DEBUG_A 0x60860
#define _SRD_DEBUG_EDP 0x6f860
-#define EDP_PSR_DEBUG(tran) _MMIO(_PSR_ADJ(tran, _SRD_DEBUG_A))
+#define EDP_PSR_DEBUG(tran) _MMIO(_TRANS2(tran, _SRD_DEBUG_A))
#define EDP_PSR_DEBUG_MASK_MAX_SLEEP (1 << 28)
#define EDP_PSR_DEBUG_MASK_LPSP (1 << 27)
#define EDP_PSR_DEBUG_MASK_MEMUP (1 << 26)
@@ -7230,6 +7256,7 @@ enum {
#define _PLANE_COLOR_CTL_3_A 0x703CC /* GLK+ */
#define PLANE_COLOR_PIPE_GAMMA_ENABLE (1 << 30) /* Pre-ICL */
#define PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE (1 << 28)
+#define PLANE_COLOR_PLANE_CSC_ENABLE REG_BIT(21) /* ICL+ */
#define PLANE_COLOR_INPUT_CSC_ENABLE (1 << 20) /* ICL+ */
#define PLANE_COLOR_PIPE_CSC_ENABLE (1 << 23) /* Pre-ICL */
#define PLANE_COLOR_CSC_MODE_BYPASS (0 << 17)
@@ -7353,6 +7380,7 @@ enum {
#define _PLANE_SURF_3(pipe) _PIPE(pipe, _PLANE_SURF_3_A, _PLANE_SURF_3_B)
#define PLANE_SURF(pipe, plane) \
_MMIO_PLANE(plane, _PLANE_SURF_1(pipe), _PLANE_SURF_2(pipe))
+#define PLANE_SURF_DECRYPT REG_BIT(2)
#define _PLANE_OFFSET_1_B 0x711a4
#define _PLANE_OFFSET_2_B 0x712a4
@@ -8094,6 +8122,7 @@ enum {
/* irq instances for OTHER_CLASS */
#define OTHER_GUC_INSTANCE 0
#define OTHER_GTPM_INSTANCE 1
+#define OTHER_KCR_INSTANCE 4
#define GEN11_INTR_IDENTITY_REG(x) _MMIO(0x190060 + ((x) * 4))
@@ -8176,8 +8205,9 @@ enum {
#define GLK_CL0_PWR_DOWN (1 << 10)
#define CHICKEN_MISC_4 _MMIO(0x4208c)
-#define FBC_STRIDE_OVERRIDE (1 << 13)
-#define FBC_STRIDE_MASK 0x1FFF
+#define CHICKEN_FBC_STRIDE_OVERRIDE REG_BIT(13)
+#define CHICKEN_FBC_STRIDE_MASK REG_GENMASK(12, 0)
+#define CHICKEN_FBC_STRIDE(x) REG_FIELD_PREP(CHICKEN_FBC_STRIDE_MASK, (x))
#define _CHICKEN_PIPESL_1_A 0x420b0
#define _CHICKEN_PIPESL_1_B 0x420b4
@@ -8216,6 +8246,7 @@ enum {
#define VSC_DATA_SEL_SOFTWARE_CONTROL REG_BIT(25) /* GLK */
#define FECSTALL_DIS_DPTSTREAM_DPTTG REG_BIT(23)
#define DDI_TRAINING_OVERRIDE_ENABLE REG_BIT(19)
+#define ADLP_1_BASED_X_GRANULARITY REG_BIT(18)
#define DDI_TRAINING_OVERRIDE_VALUE REG_BIT(18)
#define DDIE_TRAINING_OVERRIDE_ENABLE REG_BIT(17) /* CHICKEN_TRANS_A only */
#define DDIE_TRAINING_OVERRIDE_VALUE REG_BIT(16) /* CHICKEN_TRANS_A only */
@@ -9101,6 +9132,29 @@ enum {
#define TRANS_DP_HSYNC_ACTIVE_LOW 0
#define TRANS_DP_SYNC_MASK (3 << 3)
+#define _TRANS_DP2_CTL_A 0x600a0
+#define _TRANS_DP2_CTL_B 0x610a0
+#define _TRANS_DP2_CTL_C 0x620a0
+#define _TRANS_DP2_CTL_D 0x630a0
+#define TRANS_DP2_CTL(trans) _MMIO_TRANS(trans, _TRANS_DP2_CTL_A, _TRANS_DP2_CTL_B)
+#define TRANS_DP2_128B132B_CHANNEL_CODING REG_BIT(31)
+#define TRANS_DP2_PANEL_REPLAY_ENABLE REG_BIT(30)
+#define TRANS_DP2_DEBUG_ENABLE REG_BIT(23)
+
+#define _TRANS_DP2_VFREQHIGH_A 0x600a4
+#define _TRANS_DP2_VFREQHIGH_B 0x610a4
+#define _TRANS_DP2_VFREQHIGH_C 0x620a4
+#define _TRANS_DP2_VFREQHIGH_D 0x630a4
+#define TRANS_DP2_VFREQHIGH(trans) _MMIO_TRANS(trans, _TRANS_DP2_VFREQHIGH_A, _TRANS_DP2_VFREQHIGH_B)
+#define TRANS_DP2_VFREQ_PIXEL_CLOCK_MASK REG_GENMASK(31, 8)
+#define TRANS_DP2_VFREQ_PIXEL_CLOCK(clk_hz) REG_FIELD_PREP(TRANS_DP2_VFREQ_PIXEL_CLOCK_MASK, (clk_hz))
+
+#define _TRANS_DP2_VFREQLOW_A 0x600a8
+#define _TRANS_DP2_VFREQLOW_B 0x610a8
+#define _TRANS_DP2_VFREQLOW_C 0x620a8
+#define _TRANS_DP2_VFREQLOW_D 0x630a8
+#define TRANS_DP2_VFREQLOW(trans) _MMIO_TRANS(trans, _TRANS_DP2_VFREQLOW_A, _TRANS_DP2_VFREQLOW_B)
+
/* SNB eDP training params */
/* SNB A-stepping */
#define EDP_LINK_TRAIN_400MV_0DB_SNB_A (0x38 << 22)
@@ -9715,6 +9769,11 @@ enum {
#define AUDIO_CP_READY(trans) ((1 << 1) << ((trans) * 4))
#define AUDIO_ELD_VALID(trans) ((1 << 0) << ((trans) * 4))
+#define _AUD_TCA_DP_2DOT0_CTRL 0x650bc
+#define _AUD_TCB_DP_2DOT0_CTRL 0x651bc
+#define AUD_DP_2DOT0_CTRL(trans) _MMIO_TRANS(trans, _AUD_TCA_DP_2DOT0_CTRL, _AUD_TCB_DP_2DOT0_CTRL)
+#define AUD_ENABLE_SDP_SPLIT REG_BIT(31)
+
#define HSW_AUD_CHICKENBIT _MMIO(0x65f10)
#define SKL_AUD_CODEC_WAKE_SIGNAL (1 << 15)
@@ -10160,7 +10219,7 @@ enum skl_power_gate {
#define TRANS_DDI_MODE_SELECT_DVI (1 << 24)
#define TRANS_DDI_MODE_SELECT_DP_SST (2 << 24)
#define TRANS_DDI_MODE_SELECT_DP_MST (3 << 24)
-#define TRANS_DDI_MODE_SELECT_FDI (4 << 24)
+#define TRANS_DDI_MODE_SELECT_FDI_OR_128B132B (4 << 24)
#define TRANS_DDI_BPC_MASK (7 << 20)
#define TRANS_DDI_BPC_8 (0 << 20)
#define TRANS_DDI_BPC_10 (1 << 20)
@@ -10963,7 +11022,6 @@ enum skl_power_gate {
_DKL_TX_DPCNTL1)
#define _DKL_TX_DPCNTL2 0x2C8
-#define DKL_TX_LOADGEN_SHARING_PMD_DISABLE REG_BIT(12)
#define DKL_TX_DP20BITMODE (1 << 2)
#define DKL_TX_DPCNTL2(tc_port) _MMIO(_PORT(tc_port, \
_DKL_PHY1_BASE, \
@@ -11048,12 +11106,6 @@ enum skl_power_gate {
#define DC_STATE_DEBUG_MASK_CORES (1 << 0)
#define DC_STATE_DEBUG_MASK_MEMORY_UP (1 << 1)
-#define BXT_P_CR_MC_BIOS_REQ_0_0_0 _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x7114)
-#define BXT_REQ_DATA_MASK 0x3F
-#define BXT_DRAM_CHANNEL_ACTIVE_SHIFT 12
-#define BXT_DRAM_CHANNEL_ACTIVE_MASK (0xF << 12)
-#define BXT_MEMORY_FREQ_MULTIPLIER_HZ 133333333
-
#define BXT_D_CR_DRP0_DUNIT8 0x1000
#define BXT_D_CR_DRP0_DUNIT9 0x1200
#define BXT_D_CR_DRP0_DUNIT_START 8
@@ -11084,9 +11136,7 @@ enum skl_power_gate {
#define BXT_DRAM_TYPE_LPDDR4 (0x2 << 22)
#define BXT_DRAM_TYPE_DDR4 (0x4 << 22)
-#define SKL_MEMORY_FREQ_MULTIPLIER_HZ 266666666
#define SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5E04)
-#define SKL_REQ_DATA_MASK (0xF << 0)
#define DG1_GEAR_TYPE REG_BIT(16)
#define SKL_MAD_INTER_CHANNEL_0_0_0_MCHBAR_MCMAIN _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5000)
@@ -11350,6 +11400,51 @@ enum skl_power_gate {
_PAL_PREC_MULTI_SEG_DATA_A, \
_PAL_PREC_MULTI_SEG_DATA_B)
+#define _MMIO_PLANE_GAMC(plane, i, a, b) _MMIO(_PIPE(plane, a, b) + (i) * 4)
+
+/* Plane CSC Registers */
+#define _PLANE_CSC_RY_GY_1_A 0x70210
+#define _PLANE_CSC_RY_GY_2_A 0x70310
+
+#define _PLANE_CSC_RY_GY_1_B 0x71210
+#define _PLANE_CSC_RY_GY_2_B 0x71310
+
+#define _PLANE_CSC_RY_GY_1(pipe) _PIPE(pipe, _PLANE_CSC_RY_GY_1_A, \
+ _PLANE_CSC_RY_GY_1_B)
+#define _PLANE_CSC_RY_GY_2(pipe) _PIPE(pipe, _PLANE_INPUT_CSC_RY_GY_2_A, \
+ _PLANE_INPUT_CSC_RY_GY_2_B)
+#define PLANE_CSC_COEFF(pipe, plane, index) _MMIO_PLANE(plane, \
+ _PLANE_CSC_RY_GY_1(pipe) + (index) * 4, \
+ _PLANE_CSC_RY_GY_2(pipe) + (index) * 4)
+
+#define _PLANE_CSC_PREOFF_HI_1_A 0x70228
+#define _PLANE_CSC_PREOFF_HI_2_A 0x70328
+
+#define _PLANE_CSC_PREOFF_HI_1_B 0x71228
+#define _PLANE_CSC_PREOFF_HI_2_B 0x71328
+
+#define _PLANE_CSC_PREOFF_HI_1(pipe) _PIPE(pipe, _PLANE_CSC_PREOFF_HI_1_A, \
+ _PLANE_CSC_PREOFF_HI_1_B)
+#define _PLANE_CSC_PREOFF_HI_2(pipe) _PIPE(pipe, _PLANE_CSC_PREOFF_HI_2_A, \
+ _PLANE_CSC_PREOFF_HI_2_B)
+#define PLANE_CSC_PREOFF(pipe, plane, index) _MMIO_PLANE(plane, _PLANE_CSC_PREOFF_HI_1(pipe) + \
+ (index) * 4, _PLANE_CSC_PREOFF_HI_2(pipe) + \
+ (index) * 4)
+
+#define _PLANE_CSC_POSTOFF_HI_1_A 0x70234
+#define _PLANE_CSC_POSTOFF_HI_2_A 0x70334
+
+#define _PLANE_CSC_POSTOFF_HI_1_B 0x71234
+#define _PLANE_CSC_POSTOFF_HI_2_B 0x71334
+
+#define _PLANE_CSC_POSTOFF_HI_1(pipe) _PIPE(pipe, _PLANE_CSC_POSTOFF_HI_1_A, \
+ _PLANE_CSC_POSTOFF_HI_1_B)
+#define _PLANE_CSC_POSTOFF_HI_2(pipe) _PIPE(pipe, _PLANE_CSC_POSTOFF_HI_2_A, \
+ _PLANE_CSC_POSTOFF_HI_2_B)
+#define PLANE_CSC_POSTOFF(pipe, plane, index) _MMIO_PLANE(plane, _PLANE_CSC_POSTOFF_HI_1(pipe) + \
+ (index) * 4, _PLANE_CSC_POSTOFF_HI_2(pipe) + \
+ (index) * 4)
+
/* pipe CSC & degamma/gamma LUTs on CHV */
#define _CGM_PIPE_A_CSC_COEFF01 (VLV_DISPLAY_BASE + 0x67900)
#define _CGM_PIPE_A_CSC_COEFF23 (VLV_DISPLAY_BASE + 0x67904)
@@ -11616,6 +11711,14 @@ enum skl_power_gate {
_ICL_DSI_IO_MODECTL_1)
#define COMBO_PHY_MODE_DSI (1 << 0)
+/* TGL DSI Chicken register */
+#define _TGL_DSI_CHKN_REG_0 0x6B0C0
+#define _TGL_DSI_CHKN_REG_1 0x6B8C0
+#define TGL_DSI_CHKN_REG(port) _MMIO_PORT(port, \
+ _TGL_DSI_CHKN_REG_0, \
+ _TGL_DSI_CHKN_REG_1)
+#define TGL_DSI_CHKN_LSHS_GB REG_GENMASK(15, 12)
+
/* Display Stream Splitter Control */
#define DSS_CTL1 _MMIO(0x67400)
#define SPLITTER_ENABLE (1 << 31)
@@ -12739,4 +12842,7 @@ enum skl_power_gate {
#define CLKREQ_POLICY _MMIO(0x101038)
#define CLKREQ_POLICY_MEM_UP_OVRD REG_BIT(1)
+#define CLKGATE_DIS_MISC _MMIO(0x46534)
+#define CLKGATE_DIS_MISC_DMASC_GATING_DIS REG_BIT(21)
+
#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 79da5eca60af..2c3cd6e635b5 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -1145,6 +1145,12 @@ __emit_semaphore_wait(struct i915_request *to,
return 0;
}
+static bool
+can_use_semaphore_wait(struct i915_request *to, struct i915_request *from)
+{
+ return to->engine->gt->ggtt == from->engine->gt->ggtt;
+}
+
static int
emit_semaphore_wait(struct i915_request *to,
struct i915_request *from,
@@ -1153,6 +1159,9 @@ emit_semaphore_wait(struct i915_request *to,
const intel_engine_mask_t mask = READ_ONCE(from->engine)->mask;
struct i915_sw_fence *wait = &to->submit;
+ if (!can_use_semaphore_wait(to, from))
+ goto await_fence;
+
if (!intel_context_use_semaphores(to->context))
goto await_fence;
@@ -1256,7 +1265,8 @@ __i915_request_await_execution(struct i915_request *to,
* immediate execution, and so we must wait until it reaches the
* active slot.
*/
- if (intel_engine_has_semaphores(to->engine) &&
+ if (can_use_semaphore_wait(to, from) &&
+ intel_engine_has_semaphores(to->engine) &&
!i915_request_has_initial_breadcrumb(to)) {
err = __emit_semaphore_wait(to, from, from->fence.seqno - 1);
if (err < 0)
@@ -1325,6 +1335,25 @@ i915_request_await_external(struct i915_request *rq, struct dma_fence *fence)
return err;
}
+static inline bool is_parallel_rq(struct i915_request *rq)
+{
+ return intel_context_is_parallel(rq->context);
+}
+
+static inline struct intel_context *request_to_parent(struct i915_request *rq)
+{
+ return intel_context_to_parent(rq->context);
+}
+
+static bool is_same_parallel_context(struct i915_request *to,
+ struct i915_request *from)
+{
+ if (is_parallel_rq(to))
+ return request_to_parent(to) == request_to_parent(from);
+
+ return false;
+}
+
int
i915_request_await_execution(struct i915_request *rq,
struct dma_fence *fence)
@@ -1356,11 +1385,14 @@ i915_request_await_execution(struct i915_request *rq,
* want to run our callback in all cases.
*/
- if (dma_fence_is_i915(fence))
+ if (dma_fence_is_i915(fence)) {
+ if (is_same_parallel_context(rq, to_request(fence)))
+ continue;
ret = __i915_request_await_execution(rq,
to_request(fence));
- else
+ } else {
ret = i915_request_await_external(rq, fence);
+ }
if (ret < 0)
return ret;
} while (--nchild);
@@ -1461,10 +1493,13 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence)
fence))
continue;
- if (dma_fence_is_i915(fence))
+ if (dma_fence_is_i915(fence)) {
+ if (is_same_parallel_context(rq, to_request(fence)))
+ continue;
ret = i915_request_await_request(rq, to_request(fence));
- else
+ } else {
ret = i915_request_await_external(rq, fence);
+ }
if (ret < 0)
return ret;
@@ -1540,35 +1575,51 @@ i915_request_await_object(struct i915_request *to,
}
static struct i915_request *
-__i915_request_add_to_timeline(struct i915_request *rq)
+__i915_request_ensure_parallel_ordering(struct i915_request *rq,
+ struct intel_timeline *timeline)
{
- struct intel_timeline *timeline = i915_request_timeline(rq);
struct i915_request *prev;
- /*
- * Dependency tracking and request ordering along the timeline
- * is special cased so that we can eliminate redundant ordering
- * operations while building the request (we know that the timeline
- * itself is ordered, and here we guarantee it).
- *
- * As we know we will need to emit tracking along the timeline,
- * we embed the hooks into our request struct -- at the cost of
- * having to have specialised no-allocation interfaces (which will
- * be beneficial elsewhere).
- *
- * A second benefit to open-coding i915_request_await_request is
- * that we can apply a slight variant of the rules specialised
- * for timelines that jump between engines (such as virtual engines).
- * If we consider the case of virtual engine, we must emit a dma-fence
- * to prevent scheduling of the second request until the first is
- * complete (to maximise our greedy late load balancing) and this
- * precludes optimising to use semaphores serialisation of a single
- * timeline across engines.
- */
+ GEM_BUG_ON(!is_parallel_rq(rq));
+
+ prev = request_to_parent(rq)->parallel.last_rq;
+ if (prev) {
+ if (!__i915_request_is_complete(prev)) {
+ i915_sw_fence_await_sw_fence(&rq->submit,
+ &prev->submit,
+ &rq->submitq);
+
+ if (rq->engine->sched_engine->schedule)
+ __i915_sched_node_add_dependency(&rq->sched,
+ &prev->sched,
+ &rq->dep,
+ 0);
+ }
+ i915_request_put(prev);
+ }
+
+ request_to_parent(rq)->parallel.last_rq = i915_request_get(rq);
+
+ return to_request(__i915_active_fence_set(&timeline->last_request,
+ &rq->fence));
+}
+
+static struct i915_request *
+__i915_request_ensure_ordering(struct i915_request *rq,
+ struct intel_timeline *timeline)
+{
+ struct i915_request *prev;
+
+ GEM_BUG_ON(is_parallel_rq(rq));
+
prev = to_request(__i915_active_fence_set(&timeline->last_request,
&rq->fence));
+
if (prev && !__i915_request_is_complete(prev)) {
bool uses_guc = intel_engine_uses_guc(rq->engine);
+ bool pow2 = is_power_of_2(READ_ONCE(prev->engine)->mask |
+ rq->engine->mask);
+ bool same_context = prev->context == rq->context;
/*
* The requests are supposed to be kept in order. However,
@@ -1576,13 +1627,11 @@ __i915_request_add_to_timeline(struct i915_request *rq)
* is used as a barrier for external modification to this
* context.
*/
- GEM_BUG_ON(prev->context == rq->context &&
+ GEM_BUG_ON(same_context &&
i915_seqno_passed(prev->fence.seqno,
rq->fence.seqno));
- if ((!uses_guc &&
- is_power_of_2(READ_ONCE(prev->engine)->mask | rq->engine->mask)) ||
- (uses_guc && prev->context == rq->context))
+ if ((same_context && uses_guc) || (!uses_guc && pow2))
i915_sw_fence_await_sw_fence(&rq->submit,
&prev->submit,
&rq->submitq);
@@ -1597,6 +1646,50 @@ __i915_request_add_to_timeline(struct i915_request *rq)
0);
}
+ return prev;
+}
+
+static struct i915_request *
+__i915_request_add_to_timeline(struct i915_request *rq)
+{
+ struct intel_timeline *timeline = i915_request_timeline(rq);
+ struct i915_request *prev;
+
+ /*
+ * Dependency tracking and request ordering along the timeline
+ * is special cased so that we can eliminate redundant ordering
+ * operations while building the request (we know that the timeline
+ * itself is ordered, and here we guarantee it).
+ *
+ * As we know we will need to emit tracking along the timeline,
+ * we embed the hooks into our request struct -- at the cost of
+ * having to have specialised no-allocation interfaces (which will
+ * be beneficial elsewhere).
+ *
+ * A second benefit to open-coding i915_request_await_request is
+ * that we can apply a slight variant of the rules specialised
+ * for timelines that jump between engines (such as virtual engines).
+ * If we consider the case of virtual engine, we must emit a dma-fence
+ * to prevent scheduling of the second request until the first is
+ * complete (to maximise our greedy late load balancing) and this
+ * precludes optimising to use semaphores serialisation of a single
+ * timeline across engines.
+ *
+ * We do not order parallel submission requests on the timeline as each
+ * parallel submission context has its own timeline and the ordering
+ * rules for parallel requests are that they must be submitted in the
+ * order received from the execbuf IOCTL. So rather than using the
+ * timeline we store a pointer to last request submitted in the
+ * relationship in the gem context and insert a submission fence
+ * between that request and request passed into this function or
+ * alternatively we use completion fence if gem context has a single
+ * timeline and this is the first submission of an execbuf IOCTL.
+ */
+ if (likely(!is_parallel_rq(rq)))
+ prev = __i915_request_ensure_ordering(rq, timeline);
+ else
+ prev = __i915_request_ensure_parallel_ordering(rq, timeline);
+
/*
* Make sure that no request gazumped us - if it was allocated after
* our i915_request_alloc() and called __i915_request_add() before
@@ -1852,7 +1945,7 @@ long i915_request_wait(struct i915_request *rq,
* completion. That requires having a good predictor for the request
* duration, which we currently lack.
*/
- if (IS_ACTIVE(CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT) &&
+ if (CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT &&
__i915_spin_request(rq, state))
goto out;
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 1bc1349ba3c2..dc359242d1ae 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -139,6 +139,29 @@ enum {
* the GPU. Here we track such boost requests on a per-request basis.
*/
I915_FENCE_FLAG_BOOST,
+
+ /*
+ * I915_FENCE_FLAG_SUBMIT_PARALLEL - request with a context in a
+ * parent-child relationship (parallel submission, multi-lrc) should
+ * trigger a submission to the GuC rather than just moving the context
+ * tail.
+ */
+ I915_FENCE_FLAG_SUBMIT_PARALLEL,
+
+ /*
+ * I915_FENCE_FLAG_SKIP_PARALLEL - request with a context in a
+ * parent-child relationship (parallel submission, multi-lrc) that
+ * hit an error while generating requests in the execbuf IOCTL.
+ * Indicates this request should be skipped as another request in
+ * submission / relationship encoutered an error.
+ */
+ I915_FENCE_FLAG_SKIP_PARALLEL,
+
+ /*
+ * I915_FENCE_FLAG_COMPOSITE - Indicates fence is part of a composite
+ * fence (dma_fence_array) and i915 generated for parallel submission.
+ */
+ I915_FENCE_FLAG_COMPOSITE,
};
/**
@@ -218,6 +241,11 @@ struct i915_request {
};
struct llist_head execute_cb;
struct i915_sw_fence semaphore;
+ /**
+ * @submit_work: complete submit fence from an IRQ if needed for
+ * locking hierarchy reasons.
+ */
+ struct irq_work submit_work;
/*
* A list of everyone we wait upon, and everyone who waits upon us.
@@ -285,18 +313,23 @@ struct i915_request {
struct hrtimer timer;
} watchdog;
- /*
- * Requests may need to be stalled when using GuC submission waiting for
- * certain GuC operations to complete. If that is the case, stalled
- * requests are added to a per context list of stalled requests. The
- * below list_head is the link in that list.
+ /**
+ * @guc_fence_link: Requests may need to be stalled when using GuC
+ * submission waiting for certain GuC operations to complete. If that is
+ * the case, stalled requests are added to a per context list of stalled
+ * requests. The below list_head is the link in that list. Protected by
+ * ce->guc_state.lock.
*/
struct list_head guc_fence_link;
/**
- * Priority level while the request is inflight. Differs from i915
- * scheduler priority. See comment above
- * I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP for details.
+ * @guc_prio: Priority level while the request is in flight. Differs
+ * from i915 scheduler priority. See comment above
+ * I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP for details. Protected by
+ * ce->guc_active.lock. Two special values (GUC_PRIO_INIT and
+ * GUC_PRIO_FINI) outside the GuC priority range are used to indicate
+ * if the priority has not been initialized yet or if no more updates
+ * are possible because the request has completed.
*/
#define GUC_PRIO_INIT 0xff
#define GUC_PRIO_FINI 0xfe
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index cdf0e9c6fd73..1804f4142740 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -37,7 +37,6 @@
#include "i915_drv.h"
#include "i915_sysfs.h"
#include "intel_pm.h"
-#include "intel_sideband.h"
static inline struct drm_i915_private *kdev_minor_to_i915(struct device *kdev)
{
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 806ad688274b..8104981a6604 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -794,7 +794,6 @@ DECLARE_EVENT_CLASS(i915_request,
TP_STRUCT__entry(
__field(u32, dev)
__field(u64, ctx)
- __field(u32, guc_id)
__field(u16, class)
__field(u16, instance)
__field(u32, seqno)
@@ -805,16 +804,14 @@ DECLARE_EVENT_CLASS(i915_request,
__entry->dev = rq->engine->i915->drm.primary->index;
__entry->class = rq->engine->uabi_class;
__entry->instance = rq->engine->uabi_instance;
- __entry->guc_id = rq->context->guc_id;
__entry->ctx = rq->fence.context;
__entry->seqno = rq->fence.seqno;
__entry->tail = rq->tail;
),
- TP_printk("dev=%u, engine=%u:%u, guc_id=%u, ctx=%llu, seqno=%u, tail=%u",
+ TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u, tail=%u",
__entry->dev, __entry->class, __entry->instance,
- __entry->guc_id, __entry->ctx, __entry->seqno,
- __entry->tail)
+ __entry->ctx, __entry->seqno, __entry->tail)
);
DEFINE_EVENT(i915_request, i915_request_add,
@@ -903,23 +900,19 @@ DECLARE_EVENT_CLASS(intel_context,
__field(u32, guc_id)
__field(int, pin_count)
__field(u32, sched_state)
- __field(u32, guc_sched_state_no_lock)
__field(u8, guc_prio)
),
TP_fast_assign(
- __entry->guc_id = ce->guc_id;
+ __entry->guc_id = ce->guc_id.id;
__entry->pin_count = atomic_read(&ce->pin_count);
__entry->sched_state = ce->guc_state.sched_state;
- __entry->guc_sched_state_no_lock =
- atomic_read(&ce->guc_sched_state_no_lock);
- __entry->guc_prio = ce->guc_prio;
+ __entry->guc_prio = ce->guc_state.prio;
),
- TP_printk("guc_id=%d, pin_count=%d sched_state=0x%x,0x%x, guc_prio=%u",
+ TP_printk("guc_id=%d, pin_count=%d sched_state=0x%x, guc_prio=%u",
__entry->guc_id, __entry->pin_count,
__entry->sched_state,
- __entry->guc_sched_state_no_lock,
__entry->guc_prio)
);
@@ -1246,7 +1239,7 @@ DECLARE_EVENT_CLASS(i915_context,
TP_fast_assign(
__entry->dev = ctx->i915->drm.primary->index;
__entry->ctx = ctx;
- __entry->vm = rcu_access_pointer(ctx->vm);
+ __entry->vm = ctx->vm;
),
TP_printk("dev=%u, ctx=%p, ctx_vm=%p",
diff --git a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
index 6877362f6b85..d59fbb019032 100644
--- a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
+++ b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
@@ -126,12 +126,30 @@ static void i915_ttm_buddy_man_free(struct ttm_resource_manager *man,
kfree(bman_res);
}
+static void i915_ttm_buddy_man_debug(struct ttm_resource_manager *man,
+ struct drm_printer *printer)
+{
+ struct i915_ttm_buddy_manager *bman = to_buddy_manager(man);
+ struct i915_buddy_block *block;
+
+ mutex_lock(&bman->lock);
+ drm_printf(printer, "default_page_size: %lluKiB\n",
+ bman->default_page_size >> 10);
+
+ i915_buddy_print(&bman->mm, printer);
+
+ drm_printf(printer, "reserved:\n");
+ list_for_each_entry(block, &bman->reserved, link)
+ i915_buddy_block_print(&bman->mm, block, printer);
+ mutex_unlock(&bman->lock);
+}
+
static const struct ttm_resource_manager_func i915_ttm_buddy_manager_func = {
.alloc = i915_ttm_buddy_man_alloc,
.free = i915_ttm_buddy_man_free,
+ .debug = i915_ttm_buddy_man_debug,
};
-
/**
* i915_ttm_buddy_man_init - Setup buddy allocator based ttm manager
* @bdev: The ttm device
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index 5259edacde38..7a5925072466 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -30,6 +30,7 @@
#include <linux/sched.h>
#include <linux/types.h>
#include <linux/workqueue.h>
+#include <linux/sched/clock.h>
struct drm_i915_private;
struct timer_list;
@@ -458,17 +459,4 @@ static inline bool timer_expired(const struct timer_list *t)
return timer_active(t) && !timer_pending(t);
}
-/*
- * This is a lookalike for IS_ENABLED() that takes a kconfig value,
- * e.g. CONFIG_DRM_I915_SPIN_REQUEST, and evaluates whether it is non-zero
- * i.e. whether the configuration is active. Wrapping up the config inside
- * a boolean context prevents clang and smatch from complaining about potential
- * issues in confusing logical-&& with bitwise-& for constants.
- *
- * Sadly IS_ENABLED() itself does not work with kconfig values.
- *
- * Returns 0 if @config is 0, 1 if set to any value.
- */
-#define IS_ACTIVE(config) ((config) != 0)
-
#endif /* !__I915_UTILS_H */
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 4b7fc4647e46..90546fa58fc1 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -1234,9 +1234,10 @@ int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq)
return i915_active_add_request(&vma->active, rq);
}
-int i915_vma_move_to_active(struct i915_vma *vma,
- struct i915_request *rq,
- unsigned int flags)
+int _i915_vma_move_to_active(struct i915_vma *vma,
+ struct i915_request *rq,
+ struct dma_fence *fence,
+ unsigned int flags)
{
struct drm_i915_gem_object *obj = vma->obj;
int err;
@@ -1257,9 +1258,11 @@ int i915_vma_move_to_active(struct i915_vma *vma,
intel_frontbuffer_put(front);
}
- dma_resv_add_excl_fence(vma->resv, &rq->fence);
- obj->write_domain = I915_GEM_DOMAIN_RENDER;
- obj->read_domains = 0;
+ if (fence) {
+ dma_resv_add_excl_fence(vma->resv, fence);
+ obj->write_domain = I915_GEM_DOMAIN_RENDER;
+ obj->read_domains = 0;
+ }
} else {
if (!(flags & __EXEC_OBJECT_NO_RESERVE)) {
err = dma_resv_reserve_shared(vma->resv, 1);
@@ -1267,8 +1270,10 @@ int i915_vma_move_to_active(struct i915_vma *vma,
return err;
}
- dma_resv_add_shared_fence(vma->resv, &rq->fence);
- obj->write_domain = 0;
+ if (fence) {
+ dma_resv_add_shared_fence(vma->resv, fence);
+ obj->write_domain = 0;
+ }
}
if (flags & EXEC_OBJECT_NEEDS_FENCE && vma->fence)
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index ed69f66c7ab0..648dbe744c96 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -57,9 +57,16 @@ static inline bool i915_vma_is_active(const struct i915_vma *vma)
int __must_check __i915_vma_move_to_active(struct i915_vma *vma,
struct i915_request *rq);
-int __must_check i915_vma_move_to_active(struct i915_vma *vma,
- struct i915_request *rq,
- unsigned int flags);
+int __must_check _i915_vma_move_to_active(struct i915_vma *vma,
+ struct i915_request *rq,
+ struct dma_fence *fence,
+ unsigned int flags);
+static inline int __must_check
+i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq,
+ unsigned int flags)
+{
+ return _i915_vma_move_to_active(vma, rq, &rq->fence, flags);
+}
#define __i915_vma_flags(v) ((unsigned long *)&(v)->flags.counter)
diff --git a/drivers/gpu/drm/i915/i915_vma_types.h b/drivers/gpu/drm/i915/i915_vma_types.h
index 995b502d7e5d..80e93bf00f2e 100644
--- a/drivers/gpu/drm/i915/i915_vma_types.h
+++ b/drivers/gpu/drm/i915/i915_vma_types.h
@@ -105,8 +105,9 @@ struct intel_remapped_plane_info {
} __packed;
struct intel_remapped_info {
- struct intel_remapped_plane_info plane[2];
- u32 unused_mbz;
+ struct intel_remapped_plane_info plane[4];
+ /* in gtt pages */
+ u32 plane_alignment;
} __packed;
struct intel_rotation_info {
@@ -129,7 +130,7 @@ static inline void assert_i915_gem_gtt_types(void)
{
BUILD_BUG_ON(sizeof(struct intel_rotation_info) != 2 * sizeof(u32) + 8 * sizeof(u16));
BUILD_BUG_ON(sizeof(struct intel_partial_info) != sizeof(u64) + sizeof(unsigned int));
- BUILD_BUG_ON(sizeof(struct intel_remapped_info) != 3 * sizeof(u32) + 8 * sizeof(u16));
+ BUILD_BUG_ON(sizeof(struct intel_remapped_info) != 5 * sizeof(u32) + 16 * sizeof(u16));
/* Check that rotation/remapped shares offsets for simplicity */
BUILD_BUG_ON(offsetof(struct intel_remapped_info, plane[0]) !=
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index d328bb95c49b..8e6f48d1eb7b 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -133,6 +133,7 @@ enum intel_ppgtt_type {
func(has_logical_ring_elsq); \
func(has_mslices); \
func(has_pooled_eu); \
+ func(has_pxp); \
func(has_rc6); \
func(has_rc6p); \
func(has_rps); \
diff --git a/drivers/gpu/drm/i915/intel_dram.c b/drivers/gpu/drm/i915/intel_dram.c
index 91866520c173..84bb212bae4b 100644
--- a/drivers/gpu/drm/i915/intel_dram.c
+++ b/drivers/gpu/drm/i915/intel_dram.c
@@ -5,7 +5,7 @@
#include "i915_drv.h"
#include "intel_dram.h"
-#include "intel_sideband.h"
+#include "intel_pcode.h"
struct dram_dimm_info {
u16 size;
@@ -244,7 +244,6 @@ static int
skl_get_dram_info(struct drm_i915_private *i915)
{
struct dram_info *dram_info = &i915->dram_info;
- u32 mem_freq_khz, val;
int ret;
dram_info->type = skl_get_dram_type(i915);
@@ -255,17 +254,6 @@ skl_get_dram_info(struct drm_i915_private *i915)
if (ret)
return ret;
- val = intel_uncore_read(&i915->uncore,
- SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU);
- mem_freq_khz = DIV_ROUND_UP((val & SKL_REQ_DATA_MASK) *
- SKL_MEMORY_FREQ_MULTIPLIER_HZ, 1000);
-
- if (dram_info->num_channels * mem_freq_khz == 0) {
- drm_info(&i915->drm,
- "Couldn't get system memory bandwidth\n");
- return -EINVAL;
- }
-
return 0;
}
@@ -350,24 +338,10 @@ static void bxt_get_dimm_info(struct dram_dimm_info *dimm, u32 val)
static int bxt_get_dram_info(struct drm_i915_private *i915)
{
struct dram_info *dram_info = &i915->dram_info;
- u32 dram_channels;
- u32 mem_freq_khz, val;
- u8 num_active_channels, valid_ranks = 0;
+ u32 val;
+ u8 valid_ranks = 0;
int i;
- val = intel_uncore_read(&i915->uncore, BXT_P_CR_MC_BIOS_REQ_0_0_0);
- mem_freq_khz = DIV_ROUND_UP((val & BXT_REQ_DATA_MASK) *
- BXT_MEMORY_FREQ_MULTIPLIER_HZ, 1000);
-
- dram_channels = val & BXT_DRAM_CHANNEL_ACTIVE_MASK;
- num_active_channels = hweight32(dram_channels);
-
- if (mem_freq_khz * num_active_channels == 0) {
- drm_info(&i915->drm,
- "Couldn't get system memory bandwidth\n");
- return -EINVAL;
- }
-
/*
* Now read each DUNIT8/9/10/11 to check the rank of each dimms.
*/
@@ -444,7 +418,7 @@ static int icl_pcode_read_mem_global_info(struct drm_i915_private *dev_priv)
break;
default:
MISSING_CASE(val & 0xf);
- return -1;
+ return -EINVAL;
}
} else {
switch (val & 0xf) {
@@ -462,7 +436,7 @@ static int icl_pcode_read_mem_global_info(struct drm_i915_private *dev_priv)
break;
default:
MISSING_CASE(val & 0xf);
- return -1;
+ return -EINVAL;
}
}
diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c
index 779eb2fa90b6..e7f7e6627750 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/intel_memory_region.c
@@ -78,6 +78,18 @@ int intel_memory_region_reserve(struct intel_memory_region *mem,
return i915_ttm_buddy_man_reserve(man, offset, size);
}
+void intel_memory_region_debug(struct intel_memory_region *mr,
+ struct drm_printer *printer)
+{
+ drm_printf(printer, "%s: ", mr->name);
+
+ if (mr->region_private)
+ ttm_resource_manager_debug(mr->region_private, printer);
+ else
+ drm_printf(printer, "total:%pa, available:%pa bytes\n",
+ &mr->total, &mr->avail);
+}
+
struct intel_memory_region *
intel_memory_region_create(struct drm_i915_private *i915,
resource_size_t start,
diff --git a/drivers/gpu/drm/i915/intel_memory_region.h b/drivers/gpu/drm/i915/intel_memory_region.h
index 1f2b96efa69d..3feae3353d33 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.h
+++ b/drivers/gpu/drm/i915/intel_memory_region.h
@@ -15,6 +15,7 @@
struct drm_i915_private;
struct drm_i915_gem_object;
+struct drm_printer;
struct intel_memory_region;
struct sg_table;
struct ttm_resource;
@@ -127,6 +128,9 @@ int intel_memory_region_reserve(struct intel_memory_region *mem,
resource_size_t offset,
resource_size_t size);
+void intel_memory_region_debug(struct intel_memory_region *mr,
+ struct drm_printer *printer);
+
struct intel_memory_region *
i915_gem_ttm_system_setup(struct drm_i915_private *i915,
u16 type, u16 instance);
diff --git a/drivers/gpu/drm/i915/intel_pcode.c b/drivers/gpu/drm/i915/intel_pcode.c
new file mode 100644
index 000000000000..e8c886e4e78d
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_pcode.c
@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2013-2021 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "intel_pcode.h"
+
+static int gen6_check_mailbox_status(u32 mbox)
+{
+ switch (mbox & GEN6_PCODE_ERROR_MASK) {
+ case GEN6_PCODE_SUCCESS:
+ return 0;
+ case GEN6_PCODE_UNIMPLEMENTED_CMD:
+ return -ENODEV;
+ case GEN6_PCODE_ILLEGAL_CMD:
+ return -ENXIO;
+ case GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
+ case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
+ return -EOVERFLOW;
+ case GEN6_PCODE_TIMEOUT:
+ return -ETIMEDOUT;
+ default:
+ MISSING_CASE(mbox & GEN6_PCODE_ERROR_MASK);
+ return 0;
+ }
+}
+
+static int gen7_check_mailbox_status(u32 mbox)
+{
+ switch (mbox & GEN6_PCODE_ERROR_MASK) {
+ case GEN6_PCODE_SUCCESS:
+ return 0;
+ case GEN6_PCODE_ILLEGAL_CMD:
+ return -ENXIO;
+ case GEN7_PCODE_TIMEOUT:
+ return -ETIMEDOUT;
+ case GEN7_PCODE_ILLEGAL_DATA:
+ return -EINVAL;
+ case GEN11_PCODE_ILLEGAL_SUBCOMMAND:
+ return -ENXIO;
+ case GEN11_PCODE_LOCKED:
+ return -EBUSY;
+ case GEN11_PCODE_REJECTED:
+ return -EACCES;
+ case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
+ return -EOVERFLOW;
+ default:
+ MISSING_CASE(mbox & GEN6_PCODE_ERROR_MASK);
+ return 0;
+ }
+}
+
+static int __sandybridge_pcode_rw(struct drm_i915_private *i915,
+ u32 mbox, u32 *val, u32 *val1,
+ int fast_timeout_us,
+ int slow_timeout_ms,
+ bool is_read)
+{
+ struct intel_uncore *uncore = &i915->uncore;
+
+ lockdep_assert_held(&i915->sb_lock);
+
+ /*
+ * GEN6_PCODE_* are outside of the forcewake domain, we can use
+ * intel_uncore_read/write_fw variants to reduce the amount of work
+ * required when reading/writing.
+ */
+
+ if (intel_uncore_read_fw(uncore, GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY)
+ return -EAGAIN;
+
+ intel_uncore_write_fw(uncore, GEN6_PCODE_DATA, *val);
+ intel_uncore_write_fw(uncore, GEN6_PCODE_DATA1, val1 ? *val1 : 0);
+ intel_uncore_write_fw(uncore,
+ GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
+
+ if (__intel_wait_for_register_fw(uncore,
+ GEN6_PCODE_MAILBOX,
+ GEN6_PCODE_READY, 0,
+ fast_timeout_us,
+ slow_timeout_ms,
+ &mbox))
+ return -ETIMEDOUT;
+
+ if (is_read)
+ *val = intel_uncore_read_fw(uncore, GEN6_PCODE_DATA);
+ if (is_read && val1)
+ *val1 = intel_uncore_read_fw(uncore, GEN6_PCODE_DATA1);
+
+ if (GRAPHICS_VER(i915) > 6)
+ return gen7_check_mailbox_status(mbox);
+ else
+ return gen6_check_mailbox_status(mbox);
+}
+
+int sandybridge_pcode_read(struct drm_i915_private *i915, u32 mbox,
+ u32 *val, u32 *val1)
+{
+ int err;
+
+ mutex_lock(&i915->sb_lock);
+ err = __sandybridge_pcode_rw(i915, mbox, val, val1,
+ 500, 20,
+ true);
+ mutex_unlock(&i915->sb_lock);
+
+ if (err) {
+ drm_dbg(&i915->drm,
+ "warning: pcode (read from mbox %x) mailbox access failed for %ps: %d\n",
+ mbox, __builtin_return_address(0), err);
+ }
+
+ return err;
+}
+
+int sandybridge_pcode_write_timeout(struct drm_i915_private *i915,
+ u32 mbox, u32 val,
+ int fast_timeout_us,
+ int slow_timeout_ms)
+{
+ int err;
+
+ mutex_lock(&i915->sb_lock);
+ err = __sandybridge_pcode_rw(i915, mbox, &val, NULL,
+ fast_timeout_us, slow_timeout_ms,
+ false);
+ mutex_unlock(&i915->sb_lock);
+
+ if (err) {
+ drm_dbg(&i915->drm,
+ "warning: pcode (write of 0x%08x to mbox %x) mailbox access failed for %ps: %d\n",
+ val, mbox, __builtin_return_address(0), err);
+ }
+
+ return err;
+}
+
+static bool skl_pcode_try_request(struct drm_i915_private *i915, u32 mbox,
+ u32 request, u32 reply_mask, u32 reply,
+ u32 *status)
+{
+ *status = __sandybridge_pcode_rw(i915, mbox, &request, NULL,
+ 500, 0,
+ true);
+
+ return *status || ((request & reply_mask) == reply);
+}
+
+/**
+ * skl_pcode_request - send PCODE request until acknowledgment
+ * @i915: device private
+ * @mbox: PCODE mailbox ID the request is targeted for
+ * @request: request ID
+ * @reply_mask: mask used to check for request acknowledgment
+ * @reply: value used to check for request acknowledgment
+ * @timeout_base_ms: timeout for polling with preemption enabled
+ *
+ * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE
+ * reports an error or an overall timeout of @timeout_base_ms+50 ms expires.
+ * The request is acknowledged once the PCODE reply dword equals @reply after
+ * applying @reply_mask. Polling is first attempted with preemption enabled
+ * for @timeout_base_ms and if this times out for another 50 ms with
+ * preemption disabled.
+ *
+ * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some
+ * other error as reported by PCODE.
+ */
+int skl_pcode_request(struct drm_i915_private *i915, u32 mbox, u32 request,
+ u32 reply_mask, u32 reply, int timeout_base_ms)
+{
+ u32 status;
+ int ret;
+
+ mutex_lock(&i915->sb_lock);
+
+#define COND \
+ skl_pcode_try_request(i915, mbox, request, reply_mask, reply, &status)
+
+ /*
+ * Prime the PCODE by doing a request first. Normally it guarantees
+ * that a subsequent request, at most @timeout_base_ms later, succeeds.
+ * _wait_for() doesn't guarantee when its passed condition is evaluated
+ * first, so send the first request explicitly.
+ */
+ if (COND) {
+ ret = 0;
+ goto out;
+ }
+ ret = _wait_for(COND, timeout_base_ms * 1000, 10, 10);
+ if (!ret)
+ goto out;
+
+ /*
+ * The above can time out if the number of requests was low (2 in the
+ * worst case) _and_ PCODE was busy for some reason even after a
+ * (queued) request and @timeout_base_ms delay. As a workaround retry
+ * the poll with preemption disabled to maximize the number of
+ * requests. Increase the timeout from @timeout_base_ms to 50ms to
+ * account for interrupts that could reduce the number of these
+ * requests, and for any quirks of the PCODE firmware that delays
+ * the request completion.
+ */
+ drm_dbg_kms(&i915->drm,
+ "PCODE timeout, retrying with preemption disabled\n");
+ drm_WARN_ON_ONCE(&i915->drm, timeout_base_ms > 3);
+ preempt_disable();
+ ret = wait_for_atomic(COND, 50);
+ preempt_enable();
+
+out:
+ mutex_unlock(&i915->sb_lock);
+ return ret ? ret : status;
+#undef COND
+}
+
+int intel_pcode_init(struct drm_i915_private *i915)
+{
+ int ret = 0;
+
+ if (!IS_DGFX(i915))
+ return ret;
+
+ ret = skl_pcode_request(i915, DG1_PCODE_STATUS,
+ DG1_UNCORE_GET_INIT_STATUS,
+ DG1_UNCORE_INIT_STATUS_COMPLETE,
+ DG1_UNCORE_INIT_STATUS_COMPLETE, 180000);
+
+ drm_dbg(&i915->drm, "PCODE init status %d\n", ret);
+
+ if (ret)
+ drm_err(&i915->drm, "Pcode did not report uncore initialization completion!\n");
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/intel_pcode.h b/drivers/gpu/drm/i915/intel_pcode.h
new file mode 100644
index 000000000000..50806649d4b6
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_pcode.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2013-2021 Intel Corporation
+ */
+
+#ifndef _INTEL_PCODE_H_
+#define _INTEL_PCODE_H_
+
+#include <linux/types.h>
+
+struct drm_i915_private;
+
+int sandybridge_pcode_read(struct drm_i915_private *i915, u32 mbox,
+ u32 *val, u32 *val1);
+int sandybridge_pcode_write_timeout(struct drm_i915_private *i915, u32 mbox,
+ u32 val, int fast_timeout_us,
+ int slow_timeout_ms);
+#define sandybridge_pcode_write(i915, mbox, val) \
+ sandybridge_pcode_write_timeout(i915, mbox, val, 500, 0)
+
+int skl_pcode_request(struct drm_i915_private *i915, u32 mbox, u32 request,
+ u32 reply_mask, u32 reply, int timeout_base_ms);
+
+int intel_pcode_init(struct drm_i915_private *i915);
+
+#endif /* _INTEL_PCODE_H */
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index a725792d5248..ecbb3d141632 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -47,8 +47,9 @@
#include "i915_fixed.h"
#include "i915_irq.h"
#include "i915_trace.h"
+#include "intel_pcode.h"
#include "intel_pm.h"
-#include "intel_sideband.h"
+#include "vlv_sideband.h"
#include "../../../platform/x86/intel_ips.h"
/* Stores plane specific WM parameters */
@@ -893,9 +894,8 @@ static struct intel_crtc *single_enabled_crtc(struct drm_i915_private *dev_priv)
return enabled;
}
-static void pnv_update_wm(struct intel_crtc *unused_crtc)
+static void pnv_update_wm(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
struct intel_crtc *crtc;
const struct cxsr_latency *latency;
u32 reg;
@@ -1164,17 +1164,13 @@ static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state,
cpp = plane_state->hw.fb->format->cpp[0];
/*
- * Not 100% sure which way ELK should go here as the
- * spec only says CL/CTG should assume 32bpp and BW
- * doesn't need to. But as these things followed the
- * mobile vs. desktop lines on gen3 as well, let's
- * assume ELK doesn't need this.
+ * WaUse32BppForSRWM:ctg,elk
*
- * The spec also fails to list such a restriction for
- * the HPLL watermark, which seems a little strange.
+ * The spec fails to list this restriction for the
+ * HPLL watermark, which seems a little strange.
* Let's use 32bpp for the HPLL watermark as well.
*/
- if (IS_GM45(dev_priv) && plane->id == PLANE_PRIMARY &&
+ if (plane->id == PLANE_PRIMARY &&
level != G4X_WM_LEVEL_NORMAL)
cpp = max(cpp, 4u);
@@ -1388,8 +1384,7 @@ static int g4x_compute_pipe_wm(struct intel_atomic_state *state,
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct g4x_wm_state *wm_state = &crtc_state->wm.g4x.optimal;
- int num_active_planes = hweight8(crtc_state->active_planes &
- ~BIT(PLANE_CURSOR));
+ u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
const struct g4x_pipe_wm *raw;
const struct intel_plane_state *old_plane_state;
const struct intel_plane_state *new_plane_state;
@@ -1429,7 +1424,7 @@ static int g4x_compute_pipe_wm(struct intel_atomic_state *state,
wm_state->sr.cursor = raw->plane[PLANE_CURSOR];
wm_state->sr.fbc = raw->fbc;
- wm_state->cxsr = num_active_planes == BIT(PLANE_PRIMARY);
+ wm_state->cxsr = active_planes == BIT(PLANE_PRIMARY);
level = G4X_WM_LEVEL_HPLL;
if (!g4x_raw_crtc_wm_is_valid(crtc_state, level))
@@ -1720,7 +1715,7 @@ static int vlv_compute_fifo(struct intel_crtc_state *crtc_state)
const struct g4x_pipe_wm *raw =
&crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2];
struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
- unsigned int active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
+ u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
int num_active_planes = hweight8(active_planes);
const int fifo_size = 511;
int fifo_extra, fifo_left = fifo_size;
@@ -1912,8 +1907,8 @@ static int vlv_compute_pipe_wm(struct intel_atomic_state *state,
struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal;
const struct vlv_fifo_state *fifo_state =
&crtc_state->wm.vlv.fifo_state;
- int num_active_planes = hweight8(crtc_state->active_planes &
- ~BIT(PLANE_CURSOR));
+ u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR);
+ int num_active_planes = hweight8(active_planes);
bool needs_modeset = drm_atomic_crtc_needs_modeset(&crtc_state->uapi);
const struct intel_plane_state *old_plane_state;
const struct intel_plane_state *new_plane_state;
@@ -2265,9 +2260,8 @@ static void vlv_optimize_watermarks(struct intel_atomic_state *state,
mutex_unlock(&dev_priv->wm.wm_mutex);
}
-static void i965_update_wm(struct intel_crtc *unused_crtc)
+static void i965_update_wm(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
struct intel_crtc *crtc;
int srwm = 1;
int cursor_sr = 16;
@@ -2341,9 +2335,8 @@ static void i965_update_wm(struct intel_crtc *unused_crtc)
#undef FW_WM
-static void i9xx_update_wm(struct intel_crtc *unused_crtc)
+static void i9xx_update_wm(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
const struct intel_watermark_params *wm_info;
u32 fwater_lo;
u32 fwater_hi;
@@ -2359,7 +2352,10 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
else
wm_info = &i830_a_wm_info;
- fifo_size = dev_priv->display.get_fifo_size(dev_priv, PLANE_A);
+ if (DISPLAY_VER(dev_priv) == 2)
+ fifo_size = i830_get_fifo_size(dev_priv, PLANE_A);
+ else
+ fifo_size = i9xx_get_fifo_size(dev_priv, PLANE_A);
crtc = intel_get_crtc_for_plane(dev_priv, PLANE_A);
if (intel_crtc_active(crtc)) {
const struct drm_display_mode *pipe_mode =
@@ -2386,7 +2382,10 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
if (DISPLAY_VER(dev_priv) == 2)
wm_info = &i830_bc_wm_info;
- fifo_size = dev_priv->display.get_fifo_size(dev_priv, PLANE_B);
+ if (DISPLAY_VER(dev_priv) == 2)
+ fifo_size = i830_get_fifo_size(dev_priv, PLANE_B);
+ else
+ fifo_size = i9xx_get_fifo_size(dev_priv, PLANE_B);
crtc = intel_get_crtc_for_plane(dev_priv, PLANE_B);
if (intel_crtc_active(crtc)) {
const struct drm_display_mode *pipe_mode =
@@ -2487,9 +2486,8 @@ static void i9xx_update_wm(struct intel_crtc *unused_crtc)
intel_set_memory_cxsr(dev_priv, true);
}
-static void i845_update_wm(struct intel_crtc *unused_crtc)
+static void i845_update_wm(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(unused_crtc->base.dev);
struct intel_crtc *crtc;
const struct drm_display_mode *pipe_mode;
u32 fwater_lo;
@@ -2502,7 +2500,7 @@ static void i845_update_wm(struct intel_crtc *unused_crtc)
pipe_mode = &crtc->config->hw.pipe_mode;
planea_wm = intel_calculate_wm(pipe_mode->crtc_clock,
&i845_wm_info,
- dev_priv->display.get_fifo_size(dev_priv, PLANE_A),
+ i845_get_fifo_size(dev_priv, PLANE_A),
4, pessimal_latency_ns);
fwater_lo = intel_uncore_read(&dev_priv->uncore, FW_BLC) & ~0xfff;
fwater_lo |= (3<<8) | planea_wm;
@@ -2871,6 +2869,7 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
u32 val;
int ret, i;
int level, max_level = ilk_wm_max_level(dev_priv);
+ int mult = IS_DG2(dev_priv) ? 2 : 1;
/* read the first set of memory latencies[0:3] */
val = 0; /* data0 to be programmed to 0 for first set */
@@ -2884,13 +2883,13 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
return;
}
- wm[0] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
- wm[1] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
- GEN9_MEM_LATENCY_LEVEL_MASK;
- wm[2] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
- GEN9_MEM_LATENCY_LEVEL_MASK;
- wm[3] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
- GEN9_MEM_LATENCY_LEVEL_MASK;
+ wm[0] = (val & GEN9_MEM_LATENCY_LEVEL_MASK) * mult;
+ wm[1] = ((val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
+ GEN9_MEM_LATENCY_LEVEL_MASK) * mult;
+ wm[2] = ((val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
+ GEN9_MEM_LATENCY_LEVEL_MASK) * mult;
+ wm[3] = ((val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
+ GEN9_MEM_LATENCY_LEVEL_MASK) * mult;
/* read the second set of memory latencies[4:7] */
val = 1; /* data0 to be programmed to 1 for second set */
@@ -2903,13 +2902,13 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
return;
}
- wm[4] = val & GEN9_MEM_LATENCY_LEVEL_MASK;
- wm[5] = (val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
- GEN9_MEM_LATENCY_LEVEL_MASK;
- wm[6] = (val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
- GEN9_MEM_LATENCY_LEVEL_MASK;
- wm[7] = (val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
- GEN9_MEM_LATENCY_LEVEL_MASK;
+ wm[4] = (val & GEN9_MEM_LATENCY_LEVEL_MASK) * mult;
+ wm[5] = ((val >> GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT) &
+ GEN9_MEM_LATENCY_LEVEL_MASK) * mult;
+ wm[6] = ((val >> GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT) &
+ GEN9_MEM_LATENCY_LEVEL_MASK) * mult;
+ wm[7] = ((val >> GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT) &
+ GEN9_MEM_LATENCY_LEVEL_MASK) * mult;
/*
* If a level n (n > 1) has a 0us latency, all levels m (m >= n)
@@ -6844,7 +6843,8 @@ void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv)
for_each_plane_id_on_crtc(crtc, plane_id)
raw->plane[plane_id] = active->wm.plane[plane_id];
- if (++level > max_level)
+ level = G4X_WM_LEVEL_SR;
+ if (level > max_level)
goto out;
raw = &crtc_state->wm.g4x.raw[level];
@@ -6853,7 +6853,8 @@ void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv)
raw->plane[PLANE_SPRITE0] = 0;
raw->fbc = active->sr.fbc;
- if (++level > max_level)
+ level = G4X_WM_LEVEL_HPLL;
+ if (level > max_level)
goto out;
raw = &crtc_state->wm.g4x.raw[level];
@@ -6862,6 +6863,7 @@ void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv)
raw->plane[PLANE_SPRITE0] = 0;
raw->fbc = active->hpll.fbc;
+ level++;
out:
for_each_plane_id_on_crtc(crtc, plane_id)
g4x_raw_plane_wm_set(crtc_state, level,
@@ -7141,47 +7143,6 @@ void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv)
!(intel_uncore_read(&dev_priv->uncore, DISP_ARB_CTL) & DISP_FBC_WM_DIS);
}
-/**
- * intel_update_watermarks - update FIFO watermark values based on current modes
- * @crtc: the #intel_crtc on which to compute the WM
- *
- * Calculate watermark values for the various WM regs based on current mode
- * and plane configuration.
- *
- * There are several cases to deal with here:
- * - normal (i.e. non-self-refresh)
- * - self-refresh (SR) mode
- * - lines are large relative to FIFO size (buffer can hold up to 2)
- * - lines are small relative to FIFO size (buffer can hold more than 2
- * lines), so need to account for TLB latency
- *
- * The normal calculation is:
- * watermark = dotclock * bytes per pixel * latency
- * where latency is platform & configuration dependent (we assume pessimal
- * values here).
- *
- * The SR calculation is:
- * watermark = (trunc(latency/line time)+1) * surface width *
- * bytes per pixel
- * where
- * line time = htotal / dotclock
- * surface width = hdisplay for normal plane and 64 for cursor
- * and latency is assumed to be high, as above.
- *
- * The final value programmed to the register should always be rounded up,
- * and include an extra 2 entries to account for clock crossings.
- *
- * We don't use the sprite, so we can ignore that. And on Crestline we have
- * to set the non-SR watermarks to 8.
- */
-void intel_update_watermarks(struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
-
- if (dev_priv->display.update_wm)
- dev_priv->display.update_wm(crtc);
-}
-
void intel_enable_ipc(struct drm_i915_private *dev_priv)
{
u32 val;
@@ -7639,11 +7600,6 @@ static void bdw_init_clock_gating(struct drm_i915_private *dev_priv)
intel_uncore_write(&dev_priv->uncore, CHICKEN_PIPESL_1(pipe),
intel_uncore_read(&dev_priv->uncore, CHICKEN_PIPESL_1(pipe)) |
BDW_DPRS_MASK_VBLANK_SRD);
-
- /* Undocumented but fixes async flip + VT-d corruption */
- if (intel_vtd_active())
- intel_uncore_rmw(&dev_priv->uncore, CHICKEN_PIPESL_1(pipe),
- HSW_PRI_STRETCH_MAX_MASK, HSW_PRI_STRETCH_MAX_X1);
}
/* WaVSRefCountFullforceMissDisable:bdw */
@@ -7679,20 +7635,11 @@ static void bdw_init_clock_gating(struct drm_i915_private *dev_priv)
static void hsw_init_clock_gating(struct drm_i915_private *dev_priv)
{
- enum pipe pipe;
-
/* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
intel_uncore_write(&dev_priv->uncore, CHICKEN_PIPESL_1(PIPE_A),
intel_uncore_read(&dev_priv->uncore, CHICKEN_PIPESL_1(PIPE_A)) |
HSW_FBCQ_DIS);
- for_each_pipe(dev_priv, pipe) {
- /* Undocumented but fixes async flip + VT-d corruption */
- if (intel_vtd_active())
- intel_uncore_rmw(&dev_priv->uncore, CHICKEN_PIPESL_1(pipe),
- HSW_PRI_STRETCH_MAX_MASK, HSW_PRI_STRETCH_MAX_X1);
- }
-
/* This is required by WaCatErrorRejectionIssue:hsw */
intel_uncore_write(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
intel_uncore_read(&dev_priv->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
@@ -7921,7 +7868,7 @@ static void i830_init_clock_gating(struct drm_i915_private *dev_priv)
void intel_init_clock_gating(struct drm_i915_private *dev_priv)
{
- dev_priv->display.init_clock_gating(dev_priv);
+ dev_priv->clock_gating_funcs->init_clock_gating(dev_priv);
}
void intel_suspend_hw(struct drm_i915_private *dev_priv)
@@ -7936,6 +7883,36 @@ static void nop_init_clock_gating(struct drm_i915_private *dev_priv)
"No clock gating settings or workarounds applied.\n");
}
+#define CG_FUNCS(platform) \
+static const struct drm_i915_clock_gating_funcs platform##_clock_gating_funcs = { \
+ .init_clock_gating = platform##_init_clock_gating, \
+}
+
+CG_FUNCS(adlp);
+CG_FUNCS(dg1);
+CG_FUNCS(gen12lp);
+CG_FUNCS(icl);
+CG_FUNCS(cfl);
+CG_FUNCS(skl);
+CG_FUNCS(kbl);
+CG_FUNCS(bxt);
+CG_FUNCS(glk);
+CG_FUNCS(bdw);
+CG_FUNCS(chv);
+CG_FUNCS(hsw);
+CG_FUNCS(ivb);
+CG_FUNCS(vlv);
+CG_FUNCS(gen6);
+CG_FUNCS(ilk);
+CG_FUNCS(g4x);
+CG_FUNCS(i965gm);
+CG_FUNCS(i965g);
+CG_FUNCS(gen3);
+CG_FUNCS(i85x);
+CG_FUNCS(i830);
+CG_FUNCS(nop);
+#undef CG_FUNCS
+
/**
* intel_init_clock_gating_hooks - setup the clock gating hooks
* @dev_priv: device private
@@ -7948,55 +7925,100 @@ static void nop_init_clock_gating(struct drm_i915_private *dev_priv)
void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
{
if (IS_ALDERLAKE_P(dev_priv))
- dev_priv->display.init_clock_gating = adlp_init_clock_gating;
+ dev_priv->clock_gating_funcs = &adlp_clock_gating_funcs;
else if (IS_DG1(dev_priv))
- dev_priv->display.init_clock_gating = dg1_init_clock_gating;
+ dev_priv->clock_gating_funcs = &dg1_clock_gating_funcs;
else if (GRAPHICS_VER(dev_priv) == 12)
- dev_priv->display.init_clock_gating = gen12lp_init_clock_gating;
+ dev_priv->clock_gating_funcs = &gen12lp_clock_gating_funcs;
else if (GRAPHICS_VER(dev_priv) == 11)
- dev_priv->display.init_clock_gating = icl_init_clock_gating;
+ dev_priv->clock_gating_funcs = &icl_clock_gating_funcs;
else if (IS_COFFEELAKE(dev_priv) || IS_COMETLAKE(dev_priv))
- dev_priv->display.init_clock_gating = cfl_init_clock_gating;
+ dev_priv->clock_gating_funcs = &cfl_clock_gating_funcs;
else if (IS_SKYLAKE(dev_priv))
- dev_priv->display.init_clock_gating = skl_init_clock_gating;
+ dev_priv->clock_gating_funcs = &skl_clock_gating_funcs;
else if (IS_KABYLAKE(dev_priv))
- dev_priv->display.init_clock_gating = kbl_init_clock_gating;
+ dev_priv->clock_gating_funcs = &kbl_clock_gating_funcs;
else if (IS_BROXTON(dev_priv))
- dev_priv->display.init_clock_gating = bxt_init_clock_gating;
+ dev_priv->clock_gating_funcs = &bxt_clock_gating_funcs;
else if (IS_GEMINILAKE(dev_priv))
- dev_priv->display.init_clock_gating = glk_init_clock_gating;
+ dev_priv->clock_gating_funcs = &glk_clock_gating_funcs;
else if (IS_BROADWELL(dev_priv))
- dev_priv->display.init_clock_gating = bdw_init_clock_gating;
+ dev_priv->clock_gating_funcs = &bdw_clock_gating_funcs;
else if (IS_CHERRYVIEW(dev_priv))
- dev_priv->display.init_clock_gating = chv_init_clock_gating;
+ dev_priv->clock_gating_funcs = &chv_clock_gating_funcs;
else if (IS_HASWELL(dev_priv))
- dev_priv->display.init_clock_gating = hsw_init_clock_gating;
+ dev_priv->clock_gating_funcs = &hsw_clock_gating_funcs;
else if (IS_IVYBRIDGE(dev_priv))
- dev_priv->display.init_clock_gating = ivb_init_clock_gating;
+ dev_priv->clock_gating_funcs = &ivb_clock_gating_funcs;
else if (IS_VALLEYVIEW(dev_priv))
- dev_priv->display.init_clock_gating = vlv_init_clock_gating;
+ dev_priv->clock_gating_funcs = &vlv_clock_gating_funcs;
else if (GRAPHICS_VER(dev_priv) == 6)
- dev_priv->display.init_clock_gating = gen6_init_clock_gating;
+ dev_priv->clock_gating_funcs = &gen6_clock_gating_funcs;
else if (GRAPHICS_VER(dev_priv) == 5)
- dev_priv->display.init_clock_gating = ilk_init_clock_gating;
+ dev_priv->clock_gating_funcs = &ilk_clock_gating_funcs;
else if (IS_G4X(dev_priv))
- dev_priv->display.init_clock_gating = g4x_init_clock_gating;
+ dev_priv->clock_gating_funcs = &g4x_clock_gating_funcs;
else if (IS_I965GM(dev_priv))
- dev_priv->display.init_clock_gating = i965gm_init_clock_gating;
+ dev_priv->clock_gating_funcs = &i965gm_clock_gating_funcs;
else if (IS_I965G(dev_priv))
- dev_priv->display.init_clock_gating = i965g_init_clock_gating;
+ dev_priv->clock_gating_funcs = &i965g_clock_gating_funcs;
else if (GRAPHICS_VER(dev_priv) == 3)
- dev_priv->display.init_clock_gating = gen3_init_clock_gating;
+ dev_priv->clock_gating_funcs = &gen3_clock_gating_funcs;
else if (IS_I85X(dev_priv) || IS_I865G(dev_priv))
- dev_priv->display.init_clock_gating = i85x_init_clock_gating;
+ dev_priv->clock_gating_funcs = &i85x_clock_gating_funcs;
else if (GRAPHICS_VER(dev_priv) == 2)
- dev_priv->display.init_clock_gating = i830_init_clock_gating;
+ dev_priv->clock_gating_funcs = &i830_clock_gating_funcs;
else {
MISSING_CASE(INTEL_DEVID(dev_priv));
- dev_priv->display.init_clock_gating = nop_init_clock_gating;
+ dev_priv->clock_gating_funcs = &nop_clock_gating_funcs;
}
}
+static const struct drm_i915_wm_disp_funcs skl_wm_funcs = {
+ .compute_global_watermarks = skl_compute_wm,
+};
+
+static const struct drm_i915_wm_disp_funcs ilk_wm_funcs = {
+ .compute_pipe_wm = ilk_compute_pipe_wm,
+ .compute_intermediate_wm = ilk_compute_intermediate_wm,
+ .initial_watermarks = ilk_initial_watermarks,
+ .optimize_watermarks = ilk_optimize_watermarks,
+};
+
+static const struct drm_i915_wm_disp_funcs vlv_wm_funcs = {
+ .compute_pipe_wm = vlv_compute_pipe_wm,
+ .compute_intermediate_wm = vlv_compute_intermediate_wm,
+ .initial_watermarks = vlv_initial_watermarks,
+ .optimize_watermarks = vlv_optimize_watermarks,
+ .atomic_update_watermarks = vlv_atomic_update_fifo,
+};
+
+static const struct drm_i915_wm_disp_funcs g4x_wm_funcs = {
+ .compute_pipe_wm = g4x_compute_pipe_wm,
+ .compute_intermediate_wm = g4x_compute_intermediate_wm,
+ .initial_watermarks = g4x_initial_watermarks,
+ .optimize_watermarks = g4x_optimize_watermarks,
+};
+
+static const struct drm_i915_wm_disp_funcs pnv_wm_funcs = {
+ .update_wm = pnv_update_wm,
+};
+
+static const struct drm_i915_wm_disp_funcs i965_wm_funcs = {
+ .update_wm = i965_update_wm,
+};
+
+static const struct drm_i915_wm_disp_funcs i9xx_wm_funcs = {
+ .update_wm = i9xx_update_wm,
+};
+
+static const struct drm_i915_wm_disp_funcs i845_wm_funcs = {
+ .update_wm = i845_update_wm,
+};
+
+static const struct drm_i915_wm_disp_funcs nop_funcs = {
+};
+
/* Set up chip specific power management-related functions */
void intel_init_pm(struct drm_i915_private *dev_priv)
{
@@ -8012,7 +8034,7 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
/* For FIFO watermark updates */
if (DISPLAY_VER(dev_priv) >= 9) {
skl_setup_wm_latency(dev_priv);
- dev_priv->display.compute_global_watermarks = skl_compute_wm;
+ dev_priv->wm_disp = &skl_wm_funcs;
} else if (HAS_PCH_SPLIT(dev_priv)) {
ilk_setup_wm_latency(dev_priv);
@@ -8020,31 +8042,19 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
dev_priv->wm.spr_latency[1] && dev_priv->wm.cur_latency[1]) ||
(DISPLAY_VER(dev_priv) != 5 && dev_priv->wm.pri_latency[0] &&
dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) {
- dev_priv->display.compute_pipe_wm = ilk_compute_pipe_wm;
- dev_priv->display.compute_intermediate_wm =
- ilk_compute_intermediate_wm;
- dev_priv->display.initial_watermarks =
- ilk_initial_watermarks;
- dev_priv->display.optimize_watermarks =
- ilk_optimize_watermarks;
+ dev_priv->wm_disp = &ilk_wm_funcs;
} else {
drm_dbg_kms(&dev_priv->drm,
"Failed to read display plane latency. "
"Disable CxSR\n");
+ dev_priv->wm_disp = &nop_funcs;
}
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
vlv_setup_wm_latency(dev_priv);
- dev_priv->display.compute_pipe_wm = vlv_compute_pipe_wm;
- dev_priv->display.compute_intermediate_wm = vlv_compute_intermediate_wm;
- dev_priv->display.initial_watermarks = vlv_initial_watermarks;
- dev_priv->display.optimize_watermarks = vlv_optimize_watermarks;
- dev_priv->display.atomic_update_watermarks = vlv_atomic_update_fifo;
+ dev_priv->wm_disp = &vlv_wm_funcs;
} else if (IS_G4X(dev_priv)) {
g4x_setup_wm_latency(dev_priv);
- dev_priv->display.compute_pipe_wm = g4x_compute_pipe_wm;
- dev_priv->display.compute_intermediate_wm = g4x_compute_intermediate_wm;
- dev_priv->display.initial_watermarks = g4x_initial_watermarks;
- dev_priv->display.optimize_watermarks = g4x_optimize_watermarks;
+ dev_priv->wm_disp = &g4x_wm_funcs;
} else if (IS_PINEVIEW(dev_priv)) {
if (!intel_get_cxsr_latency(!IS_MOBILE(dev_priv),
dev_priv->is_ddr3,
@@ -8058,25 +8068,22 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
dev_priv->fsb_freq, dev_priv->mem_freq);
/* Disable CxSR and never update its watermark again */
intel_set_memory_cxsr(dev_priv, false);
- dev_priv->display.update_wm = NULL;
+ dev_priv->wm_disp = &nop_funcs;
} else
- dev_priv->display.update_wm = pnv_update_wm;
+ dev_priv->wm_disp = &pnv_wm_funcs;
} else if (DISPLAY_VER(dev_priv) == 4) {
- dev_priv->display.update_wm = i965_update_wm;
+ dev_priv->wm_disp = &i965_wm_funcs;
} else if (DISPLAY_VER(dev_priv) == 3) {
- dev_priv->display.update_wm = i9xx_update_wm;
- dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
+ dev_priv->wm_disp = &i9xx_wm_funcs;
} else if (DISPLAY_VER(dev_priv) == 2) {
- if (INTEL_NUM_PIPES(dev_priv) == 1) {
- dev_priv->display.update_wm = i845_update_wm;
- dev_priv->display.get_fifo_size = i845_get_fifo_size;
- } else {
- dev_priv->display.update_wm = i9xx_update_wm;
- dev_priv->display.get_fifo_size = i830_get_fifo_size;
- }
+ if (INTEL_NUM_PIPES(dev_priv) == 1)
+ dev_priv->wm_disp = &i845_wm_funcs;
+ else
+ dev_priv->wm_disp = &i9xx_wm_funcs;
} else {
drm_err(&dev_priv->drm,
"unexpected fall-through in %s\n", __func__);
+ dev_priv->wm_disp = &nop_funcs;
}
}
diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h
index 91f23b7f0af2..990cdcaf85ce 100644
--- a/drivers/gpu/drm/i915/intel_pm.h
+++ b/drivers/gpu/drm/i915/intel_pm.h
@@ -8,7 +8,6 @@
#include <linux/types.h>
-#include "display/intel_bw.h"
#include "display/intel_display.h"
#include "display/intel_global_state.h"
@@ -19,6 +18,7 @@ struct drm_device;
struct drm_i915_private;
struct i915_request;
struct intel_atomic_state;
+struct intel_bw_state;
struct intel_crtc;
struct intel_crtc_state;
struct intel_plane;
@@ -29,7 +29,6 @@ struct skl_wm_level;
void intel_init_clock_gating(struct drm_i915_private *dev_priv);
void intel_suspend_hw(struct drm_i915_private *dev_priv);
int ilk_wm_max_level(const struct drm_i915_private *dev_priv);
-void intel_update_watermarks(struct intel_crtc *crtc);
void intel_init_pm(struct drm_i915_private *dev_priv);
void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv);
void intel_pm_setup(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.h b/drivers/gpu/drm/i915/intel_runtime_pm.h
index 183ea2b187fe..47a85fab4130 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.h
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.h
@@ -8,8 +8,6 @@
#include <linux/types.h>
-#include "display/intel_display.h"
-
#include "intel_wakeref.h"
#include "i915_utils.h"
diff --git a/drivers/gpu/drm/i915/intel_sbi.c b/drivers/gpu/drm/i915/intel_sbi.c
new file mode 100644
index 000000000000..5ba8490a31e6
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_sbi.c
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2013-2021 Intel Corporation
+ *
+ * LPT/WPT IOSF sideband.
+ */
+
+#include "i915_drv.h"
+#include "intel_sbi.h"
+
+/* SBI access */
+static int intel_sbi_rw(struct drm_i915_private *i915, u16 reg,
+ enum intel_sbi_destination destination,
+ u32 *val, bool is_read)
+{
+ struct intel_uncore *uncore = &i915->uncore;
+ u32 cmd;
+
+ lockdep_assert_held(&i915->sb_lock);
+
+ if (intel_wait_for_register_fw(uncore,
+ SBI_CTL_STAT, SBI_BUSY, 0,
+ 100)) {
+ drm_err(&i915->drm,
+ "timeout waiting for SBI to become ready\n");
+ return -EBUSY;
+ }
+
+ intel_uncore_write_fw(uncore, SBI_ADDR, (u32)reg << 16);
+ intel_uncore_write_fw(uncore, SBI_DATA, is_read ? 0 : *val);
+
+ if (destination == SBI_ICLK)
+ cmd = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRRD;
+ else
+ cmd = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD;
+ if (!is_read)
+ cmd |= BIT(8);
+ intel_uncore_write_fw(uncore, SBI_CTL_STAT, cmd | SBI_BUSY);
+
+ if (__intel_wait_for_register_fw(uncore,
+ SBI_CTL_STAT, SBI_BUSY, 0,
+ 100, 100, &cmd)) {
+ drm_err(&i915->drm,
+ "timeout waiting for SBI to complete read\n");
+ return -ETIMEDOUT;
+ }
+
+ if (cmd & SBI_RESPONSE_FAIL) {
+ drm_err(&i915->drm, "error during SBI read of reg %x\n", reg);
+ return -ENXIO;
+ }
+
+ if (is_read)
+ *val = intel_uncore_read_fw(uncore, SBI_DATA);
+
+ return 0;
+}
+
+u32 intel_sbi_read(struct drm_i915_private *i915, u16 reg,
+ enum intel_sbi_destination destination)
+{
+ u32 result = 0;
+
+ intel_sbi_rw(i915, reg, destination, &result, true);
+
+ return result;
+}
+
+void intel_sbi_write(struct drm_i915_private *i915, u16 reg, u32 value,
+ enum intel_sbi_destination destination)
+{
+ intel_sbi_rw(i915, reg, destination, &value, false);
+}
diff --git a/drivers/gpu/drm/i915/intel_sbi.h b/drivers/gpu/drm/i915/intel_sbi.h
new file mode 100644
index 000000000000..f5a862210454
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_sbi.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2013-2021 Intel Corporation
+ */
+
+#ifndef _INTEL_SBI_H_
+#define _INTEL_SBI_H_
+
+#include <linux/types.h>
+
+struct drm_i915_private;
+
+enum intel_sbi_destination {
+ SBI_ICLK,
+ SBI_MPHY,
+};
+
+u32 intel_sbi_read(struct drm_i915_private *i915, u16 reg,
+ enum intel_sbi_destination destination);
+void intel_sbi_write(struct drm_i915_private *i915, u16 reg, u32 value,
+ enum intel_sbi_destination destination);
+
+#endif /* _INTEL_SBI_H_ */
diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c
deleted file mode 100644
index e304bf44e1ff..000000000000
--- a/drivers/gpu/drm/i915/intel_sideband.c
+++ /dev/null
@@ -1,577 +0,0 @@
-/*
- * Copyright © 2013 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#include <asm/iosf_mbi.h>
-
-#include "i915_drv.h"
-#include "intel_sideband.h"
-
-/*
- * IOSF sideband, see VLV2_SidebandMsg_HAS.docx and
- * VLV_VLV2_PUNIT_HAS_0.8.docx
- */
-
-/* Standard MMIO read, non-posted */
-#define SB_MRD_NP 0x00
-/* Standard MMIO write, non-posted */
-#define SB_MWR_NP 0x01
-/* Private register read, double-word addressing, non-posted */
-#define SB_CRRDDA_NP 0x06
-/* Private register write, double-word addressing, non-posted */
-#define SB_CRWRDA_NP 0x07
-
-static void ping(void *info)
-{
-}
-
-static void __vlv_punit_get(struct drm_i915_private *i915)
-{
- iosf_mbi_punit_acquire();
-
- /*
- * Prevent the cpu from sleeping while we use this sideband, otherwise
- * the punit may cause a machine hang. The issue appears to be isolated
- * with changing the power state of the CPU package while changing
- * the power state via the punit, and we have only observed it
- * reliably on 4-core Baytail systems suggesting the issue is in the
- * power delivery mechanism and likely to be be board/function
- * specific. Hence we presume the workaround needs only be applied
- * to the Valleyview P-unit and not all sideband communications.
- */
- if (IS_VALLEYVIEW(i915)) {
- cpu_latency_qos_update_request(&i915->sb_qos, 0);
- on_each_cpu(ping, NULL, 1);
- }
-}
-
-static void __vlv_punit_put(struct drm_i915_private *i915)
-{
- if (IS_VALLEYVIEW(i915))
- cpu_latency_qos_update_request(&i915->sb_qos,
- PM_QOS_DEFAULT_VALUE);
-
- iosf_mbi_punit_release();
-}
-
-void vlv_iosf_sb_get(struct drm_i915_private *i915, unsigned long ports)
-{
- if (ports & BIT(VLV_IOSF_SB_PUNIT))
- __vlv_punit_get(i915);
-
- mutex_lock(&i915->sb_lock);
-}
-
-void vlv_iosf_sb_put(struct drm_i915_private *i915, unsigned long ports)
-{
- mutex_unlock(&i915->sb_lock);
-
- if (ports & BIT(VLV_IOSF_SB_PUNIT))
- __vlv_punit_put(i915);
-}
-
-static int vlv_sideband_rw(struct drm_i915_private *i915,
- u32 devfn, u32 port, u32 opcode,
- u32 addr, u32 *val)
-{
- struct intel_uncore *uncore = &i915->uncore;
- const bool is_read = (opcode == SB_MRD_NP || opcode == SB_CRRDDA_NP);
- int err;
-
- lockdep_assert_held(&i915->sb_lock);
- if (port == IOSF_PORT_PUNIT)
- iosf_mbi_assert_punit_acquired();
-
- /* Flush the previous comms, just in case it failed last time. */
- if (intel_wait_for_register(uncore,
- VLV_IOSF_DOORBELL_REQ, IOSF_SB_BUSY, 0,
- 5)) {
- drm_dbg(&i915->drm, "IOSF sideband idle wait (%s) timed out\n",
- is_read ? "read" : "write");
- return -EAGAIN;
- }
-
- preempt_disable();
-
- intel_uncore_write_fw(uncore, VLV_IOSF_ADDR, addr);
- intel_uncore_write_fw(uncore, VLV_IOSF_DATA, is_read ? 0 : *val);
- intel_uncore_write_fw(uncore, VLV_IOSF_DOORBELL_REQ,
- (devfn << IOSF_DEVFN_SHIFT) |
- (opcode << IOSF_OPCODE_SHIFT) |
- (port << IOSF_PORT_SHIFT) |
- (0xf << IOSF_BYTE_ENABLES_SHIFT) |
- (0 << IOSF_BAR_SHIFT) |
- IOSF_SB_BUSY);
-
- if (__intel_wait_for_register_fw(uncore,
- VLV_IOSF_DOORBELL_REQ, IOSF_SB_BUSY, 0,
- 10000, 0, NULL) == 0) {
- if (is_read)
- *val = intel_uncore_read_fw(uncore, VLV_IOSF_DATA);
- err = 0;
- } else {
- drm_dbg(&i915->drm, "IOSF sideband finish wait (%s) timed out\n",
- is_read ? "read" : "write");
- err = -ETIMEDOUT;
- }
-
- preempt_enable();
-
- return err;
-}
-
-u32 vlv_punit_read(struct drm_i915_private *i915, u32 addr)
-{
- u32 val = 0;
-
- vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT,
- SB_CRRDDA_NP, addr, &val);
-
- return val;
-}
-
-int vlv_punit_write(struct drm_i915_private *i915, u32 addr, u32 val)
-{
- return vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT,
- SB_CRWRDA_NP, addr, &val);
-}
-
-u32 vlv_bunit_read(struct drm_i915_private *i915, u32 reg)
-{
- u32 val = 0;
-
- vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_BUNIT,
- SB_CRRDDA_NP, reg, &val);
-
- return val;
-}
-
-void vlv_bunit_write(struct drm_i915_private *i915, u32 reg, u32 val)
-{
- vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_BUNIT,
- SB_CRWRDA_NP, reg, &val);
-}
-
-u32 vlv_nc_read(struct drm_i915_private *i915, u8 addr)
-{
- u32 val = 0;
-
- vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_NC,
- SB_CRRDDA_NP, addr, &val);
-
- return val;
-}
-
-u32 vlv_iosf_sb_read(struct drm_i915_private *i915, u8 port, u32 reg)
-{
- u32 val = 0;
-
- vlv_sideband_rw(i915, PCI_DEVFN(0, 0), port,
- SB_CRRDDA_NP, reg, &val);
-
- return val;
-}
-
-void vlv_iosf_sb_write(struct drm_i915_private *i915,
- u8 port, u32 reg, u32 val)
-{
- vlv_sideband_rw(i915, PCI_DEVFN(0, 0), port,
- SB_CRWRDA_NP, reg, &val);
-}
-
-u32 vlv_cck_read(struct drm_i915_private *i915, u32 reg)
-{
- u32 val = 0;
-
- vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_CCK,
- SB_CRRDDA_NP, reg, &val);
-
- return val;
-}
-
-void vlv_cck_write(struct drm_i915_private *i915, u32 reg, u32 val)
-{
- vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_CCK,
- SB_CRWRDA_NP, reg, &val);
-}
-
-u32 vlv_ccu_read(struct drm_i915_private *i915, u32 reg)
-{
- u32 val = 0;
-
- vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_CCU,
- SB_CRRDDA_NP, reg, &val);
-
- return val;
-}
-
-void vlv_ccu_write(struct drm_i915_private *i915, u32 reg, u32 val)
-{
- vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_CCU,
- SB_CRWRDA_NP, reg, &val);
-}
-
-static u32 vlv_dpio_phy_iosf_port(struct drm_i915_private *i915, enum dpio_phy phy)
-{
- /*
- * IOSF_PORT_DPIO: VLV x2 PHY (DP/HDMI B and C), CHV x1 PHY (DP/HDMI D)
- * IOSF_PORT_DPIO_2: CHV x2 PHY (DP/HDMI B and C)
- */
- if (IS_CHERRYVIEW(i915))
- return phy == DPIO_PHY0 ? IOSF_PORT_DPIO_2 : IOSF_PORT_DPIO;
- else
- return IOSF_PORT_DPIO;
-}
-
-u32 vlv_dpio_read(struct drm_i915_private *i915, enum pipe pipe, int reg)
-{
- u32 port = vlv_dpio_phy_iosf_port(i915, DPIO_PHY(pipe));
- u32 val = 0;
-
- vlv_sideband_rw(i915, DPIO_DEVFN, port, SB_MRD_NP, reg, &val);
-
- /*
- * FIXME: There might be some registers where all 1's is a valid value,
- * so ideally we should check the register offset instead...
- */
- drm_WARN(&i915->drm, val == 0xffffffff,
- "DPIO read pipe %c reg 0x%x == 0x%x\n",
- pipe_name(pipe), reg, val);
-
- return val;
-}
-
-void vlv_dpio_write(struct drm_i915_private *i915,
- enum pipe pipe, int reg, u32 val)
-{
- u32 port = vlv_dpio_phy_iosf_port(i915, DPIO_PHY(pipe));
-
- vlv_sideband_rw(i915, DPIO_DEVFN, port, SB_MWR_NP, reg, &val);
-}
-
-u32 vlv_flisdsi_read(struct drm_i915_private *i915, u32 reg)
-{
- u32 val = 0;
-
- vlv_sideband_rw(i915, DPIO_DEVFN, IOSF_PORT_FLISDSI, SB_CRRDDA_NP,
- reg, &val);
- return val;
-}
-
-void vlv_flisdsi_write(struct drm_i915_private *i915, u32 reg, u32 val)
-{
- vlv_sideband_rw(i915, DPIO_DEVFN, IOSF_PORT_FLISDSI, SB_CRWRDA_NP,
- reg, &val);
-}
-
-/* SBI access */
-static int intel_sbi_rw(struct drm_i915_private *i915, u16 reg,
- enum intel_sbi_destination destination,
- u32 *val, bool is_read)
-{
- struct intel_uncore *uncore = &i915->uncore;
- u32 cmd;
-
- lockdep_assert_held(&i915->sb_lock);
-
- if (intel_wait_for_register_fw(uncore,
- SBI_CTL_STAT, SBI_BUSY, 0,
- 100)) {
- drm_err(&i915->drm,
- "timeout waiting for SBI to become ready\n");
- return -EBUSY;
- }
-
- intel_uncore_write_fw(uncore, SBI_ADDR, (u32)reg << 16);
- intel_uncore_write_fw(uncore, SBI_DATA, is_read ? 0 : *val);
-
- if (destination == SBI_ICLK)
- cmd = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRRD;
- else
- cmd = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD;
- if (!is_read)
- cmd |= BIT(8);
- intel_uncore_write_fw(uncore, SBI_CTL_STAT, cmd | SBI_BUSY);
-
- if (__intel_wait_for_register_fw(uncore,
- SBI_CTL_STAT, SBI_BUSY, 0,
- 100, 100, &cmd)) {
- drm_err(&i915->drm,
- "timeout waiting for SBI to complete read\n");
- return -ETIMEDOUT;
- }
-
- if (cmd & SBI_RESPONSE_FAIL) {
- drm_err(&i915->drm, "error during SBI read of reg %x\n", reg);
- return -ENXIO;
- }
-
- if (is_read)
- *val = intel_uncore_read_fw(uncore, SBI_DATA);
-
- return 0;
-}
-
-u32 intel_sbi_read(struct drm_i915_private *i915, u16 reg,
- enum intel_sbi_destination destination)
-{
- u32 result = 0;
-
- intel_sbi_rw(i915, reg, destination, &result, true);
-
- return result;
-}
-
-void intel_sbi_write(struct drm_i915_private *i915, u16 reg, u32 value,
- enum intel_sbi_destination destination)
-{
- intel_sbi_rw(i915, reg, destination, &value, false);
-}
-
-static int gen6_check_mailbox_status(u32 mbox)
-{
- switch (mbox & GEN6_PCODE_ERROR_MASK) {
- case GEN6_PCODE_SUCCESS:
- return 0;
- case GEN6_PCODE_UNIMPLEMENTED_CMD:
- return -ENODEV;
- case GEN6_PCODE_ILLEGAL_CMD:
- return -ENXIO;
- case GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
- case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
- return -EOVERFLOW;
- case GEN6_PCODE_TIMEOUT:
- return -ETIMEDOUT;
- default:
- MISSING_CASE(mbox & GEN6_PCODE_ERROR_MASK);
- return 0;
- }
-}
-
-static int gen7_check_mailbox_status(u32 mbox)
-{
- switch (mbox & GEN6_PCODE_ERROR_MASK) {
- case GEN6_PCODE_SUCCESS:
- return 0;
- case GEN6_PCODE_ILLEGAL_CMD:
- return -ENXIO;
- case GEN7_PCODE_TIMEOUT:
- return -ETIMEDOUT;
- case GEN7_PCODE_ILLEGAL_DATA:
- return -EINVAL;
- case GEN11_PCODE_ILLEGAL_SUBCOMMAND:
- return -ENXIO;
- case GEN11_PCODE_LOCKED:
- return -EBUSY;
- case GEN11_PCODE_REJECTED:
- return -EACCES;
- case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
- return -EOVERFLOW;
- default:
- MISSING_CASE(mbox & GEN6_PCODE_ERROR_MASK);
- return 0;
- }
-}
-
-static int __sandybridge_pcode_rw(struct drm_i915_private *i915,
- u32 mbox, u32 *val, u32 *val1,
- int fast_timeout_us,
- int slow_timeout_ms,
- bool is_read)
-{
- struct intel_uncore *uncore = &i915->uncore;
-
- lockdep_assert_held(&i915->sb_lock);
-
- /*
- * GEN6_PCODE_* are outside of the forcewake domain, we can use
- * intel_uncore_read/write_fw variants to reduce the amount of work
- * required when reading/writing.
- */
-
- if (intel_uncore_read_fw(uncore, GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY)
- return -EAGAIN;
-
- intel_uncore_write_fw(uncore, GEN6_PCODE_DATA, *val);
- intel_uncore_write_fw(uncore, GEN6_PCODE_DATA1, val1 ? *val1 : 0);
- intel_uncore_write_fw(uncore,
- GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
-
- if (__intel_wait_for_register_fw(uncore,
- GEN6_PCODE_MAILBOX,
- GEN6_PCODE_READY, 0,
- fast_timeout_us,
- slow_timeout_ms,
- &mbox))
- return -ETIMEDOUT;
-
- if (is_read)
- *val = intel_uncore_read_fw(uncore, GEN6_PCODE_DATA);
- if (is_read && val1)
- *val1 = intel_uncore_read_fw(uncore, GEN6_PCODE_DATA1);
-
- if (GRAPHICS_VER(i915) > 6)
- return gen7_check_mailbox_status(mbox);
- else
- return gen6_check_mailbox_status(mbox);
-}
-
-int sandybridge_pcode_read(struct drm_i915_private *i915, u32 mbox,
- u32 *val, u32 *val1)
-{
- int err;
-
- mutex_lock(&i915->sb_lock);
- err = __sandybridge_pcode_rw(i915, mbox, val, val1,
- 500, 20,
- true);
- mutex_unlock(&i915->sb_lock);
-
- if (err) {
- drm_dbg(&i915->drm,
- "warning: pcode (read from mbox %x) mailbox access failed for %ps: %d\n",
- mbox, __builtin_return_address(0), err);
- }
-
- return err;
-}
-
-int sandybridge_pcode_write_timeout(struct drm_i915_private *i915,
- u32 mbox, u32 val,
- int fast_timeout_us,
- int slow_timeout_ms)
-{
- int err;
-
- mutex_lock(&i915->sb_lock);
- err = __sandybridge_pcode_rw(i915, mbox, &val, NULL,
- fast_timeout_us, slow_timeout_ms,
- false);
- mutex_unlock(&i915->sb_lock);
-
- if (err) {
- drm_dbg(&i915->drm,
- "warning: pcode (write of 0x%08x to mbox %x) mailbox access failed for %ps: %d\n",
- val, mbox, __builtin_return_address(0), err);
- }
-
- return err;
-}
-
-static bool skl_pcode_try_request(struct drm_i915_private *i915, u32 mbox,
- u32 request, u32 reply_mask, u32 reply,
- u32 *status)
-{
- *status = __sandybridge_pcode_rw(i915, mbox, &request, NULL,
- 500, 0,
- true);
-
- return *status || ((request & reply_mask) == reply);
-}
-
-/**
- * skl_pcode_request - send PCODE request until acknowledgment
- * @i915: device private
- * @mbox: PCODE mailbox ID the request is targeted for
- * @request: request ID
- * @reply_mask: mask used to check for request acknowledgment
- * @reply: value used to check for request acknowledgment
- * @timeout_base_ms: timeout for polling with preemption enabled
- *
- * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE
- * reports an error or an overall timeout of @timeout_base_ms+50 ms expires.
- * The request is acknowledged once the PCODE reply dword equals @reply after
- * applying @reply_mask. Polling is first attempted with preemption enabled
- * for @timeout_base_ms and if this times out for another 50 ms with
- * preemption disabled.
- *
- * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some
- * other error as reported by PCODE.
- */
-int skl_pcode_request(struct drm_i915_private *i915, u32 mbox, u32 request,
- u32 reply_mask, u32 reply, int timeout_base_ms)
-{
- u32 status;
- int ret;
-
- mutex_lock(&i915->sb_lock);
-
-#define COND \
- skl_pcode_try_request(i915, mbox, request, reply_mask, reply, &status)
-
- /*
- * Prime the PCODE by doing a request first. Normally it guarantees
- * that a subsequent request, at most @timeout_base_ms later, succeeds.
- * _wait_for() doesn't guarantee when its passed condition is evaluated
- * first, so send the first request explicitly.
- */
- if (COND) {
- ret = 0;
- goto out;
- }
- ret = _wait_for(COND, timeout_base_ms * 1000, 10, 10);
- if (!ret)
- goto out;
-
- /*
- * The above can time out if the number of requests was low (2 in the
- * worst case) _and_ PCODE was busy for some reason even after a
- * (queued) request and @timeout_base_ms delay. As a workaround retry
- * the poll with preemption disabled to maximize the number of
- * requests. Increase the timeout from @timeout_base_ms to 50ms to
- * account for interrupts that could reduce the number of these
- * requests, and for any quirks of the PCODE firmware that delays
- * the request completion.
- */
- drm_dbg_kms(&i915->drm,
- "PCODE timeout, retrying with preemption disabled\n");
- drm_WARN_ON_ONCE(&i915->drm, timeout_base_ms > 3);
- preempt_disable();
- ret = wait_for_atomic(COND, 50);
- preempt_enable();
-
-out:
- mutex_unlock(&i915->sb_lock);
- return ret ? ret : status;
-#undef COND
-}
-
-int intel_pcode_init(struct drm_i915_private *i915)
-{
- int ret = 0;
-
- if (!IS_DGFX(i915))
- return ret;
-
- ret = skl_pcode_request(i915, DG1_PCODE_STATUS,
- DG1_UNCORE_GET_INIT_STATUS,
- DG1_UNCORE_INIT_STATUS_COMPLETE,
- DG1_UNCORE_INIT_STATUS_COMPLETE, 180000);
-
- drm_dbg(&i915->drm, "PCODE init status %d\n", ret);
-
- if (ret)
- drm_err(&i915->drm, "Pcode did not report uncore initialization completion!\n");
-
- return ret;
-}
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 6b38bc2811c1..e072054adac5 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -36,6 +36,12 @@
#define __raw_posting_read(...) ((void)__raw_uncore_read32(__VA_ARGS__))
+static void
+fw_domains_get(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
+{
+ uncore->fw_get_funcs->force_wake_get(uncore, fw_domains);
+}
+
void
intel_uncore_mmio_debug_init_early(struct intel_uncore_mmio_debug *mmio_debug)
{
@@ -64,7 +70,7 @@ static void mmio_debug_resume(struct intel_uncore_mmio_debug *mmio_debug)
static const char * const forcewake_domain_names[] = {
"render",
- "blitter",
+ "gt",
"media",
"vdbox0",
"vdbox1",
@@ -248,7 +254,7 @@ fw_domain_put(const struct intel_uncore_forcewake_domain *d)
}
static void
-fw_domains_get(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
+fw_domains_get_normal(struct intel_uncore *uncore, enum forcewake_domains fw_domains)
{
struct intel_uncore_forcewake_domain *d;
unsigned int tmp;
@@ -340,7 +346,7 @@ static void __gen6_gt_wait_for_thread_c0(struct intel_uncore *uncore)
static void fw_domains_get_with_thread_status(struct intel_uncore *uncore,
enum forcewake_domains fw_domains)
{
- fw_domains_get(uncore, fw_domains);
+ fw_domains_get_normal(uncore, fw_domains);
/* WaRsForcewakeWaitTC0:snb,ivb,hsw,bdw,vlv */
__gen6_gt_wait_for_thread_c0(uncore);
@@ -396,7 +402,7 @@ intel_uncore_fw_release_timer(struct hrtimer *timer)
GEM_BUG_ON(!domain->wake_count);
if (--domain->wake_count == 0)
- uncore->funcs.force_wake_put(uncore, domain->mask);
+ fw_domains_put(uncore, domain->mask);
spin_unlock_irqrestore(&uncore->lock, irqflags);
@@ -454,7 +460,7 @@ intel_uncore_forcewake_reset(struct intel_uncore *uncore)
fw = uncore->fw_domains_active;
if (fw)
- uncore->funcs.force_wake_put(uncore, fw);
+ fw_domains_put(uncore, fw);
fw_domains_reset(uncore, uncore->fw_domains);
assert_forcewakes_inactive(uncore);
@@ -562,7 +568,7 @@ static void forcewake_early_sanitize(struct intel_uncore *uncore,
intel_uncore_forcewake_reset(uncore);
if (restore_forcewake) {
spin_lock_irq(&uncore->lock);
- uncore->funcs.force_wake_get(uncore, restore_forcewake);
+ fw_domains_get(uncore, restore_forcewake);
if (intel_uncore_has_fifo(uncore))
uncore->fifo_count = fifo_free_entries(uncore);
@@ -623,7 +629,7 @@ static void __intel_uncore_forcewake_get(struct intel_uncore *uncore,
}
if (fw_domains)
- uncore->funcs.force_wake_get(uncore, fw_domains);
+ fw_domains_get(uncore, fw_domains);
}
/**
@@ -644,7 +650,7 @@ void intel_uncore_forcewake_get(struct intel_uncore *uncore,
{
unsigned long irqflags;
- if (!uncore->funcs.force_wake_get)
+ if (!uncore->fw_get_funcs)
return;
assert_rpm_wakelock_held(uncore->rpm);
@@ -711,7 +717,7 @@ void intel_uncore_forcewake_get__locked(struct intel_uncore *uncore,
{
lockdep_assert_held(&uncore->lock);
- if (!uncore->funcs.force_wake_get)
+ if (!uncore->fw_get_funcs)
return;
__intel_uncore_forcewake_get(uncore, fw_domains);
@@ -733,7 +739,7 @@ static void __intel_uncore_forcewake_put(struct intel_uncore *uncore,
continue;
}
- uncore->funcs.force_wake_put(uncore, domain->mask);
+ fw_domains_put(uncore, domain->mask);
}
}
@@ -750,7 +756,7 @@ void intel_uncore_forcewake_put(struct intel_uncore *uncore,
{
unsigned long irqflags;
- if (!uncore->funcs.force_wake_put)
+ if (!uncore->fw_get_funcs)
return;
spin_lock_irqsave(&uncore->lock, irqflags);
@@ -769,7 +775,7 @@ void intel_uncore_forcewake_flush(struct intel_uncore *uncore,
struct intel_uncore_forcewake_domain *domain;
unsigned int tmp;
- if (!uncore->funcs.force_wake_put)
+ if (!uncore->fw_get_funcs)
return;
fw_domains &= uncore->fw_domains;
@@ -793,7 +799,7 @@ void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore,
{
lockdep_assert_held(&uncore->lock);
- if (!uncore->funcs.force_wake_put)
+ if (!uncore->fw_get_funcs)
return;
__intel_uncore_forcewake_put(uncore, fw_domains);
@@ -801,7 +807,7 @@ void intel_uncore_forcewake_put__locked(struct intel_uncore *uncore,
void assert_forcewakes_inactive(struct intel_uncore *uncore)
{
- if (!uncore->funcs.force_wake_get)
+ if (!uncore->fw_get_funcs)
return;
drm_WARN(&uncore->i915->drm, uncore->fw_domains_active,
@@ -818,7 +824,7 @@ void assert_forcewakes_active(struct intel_uncore *uncore,
if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM))
return;
- if (!uncore->funcs.force_wake_get)
+ if (!uncore->fw_get_funcs)
return;
spin_lock_irq(&uncore->lock);
@@ -851,16 +857,9 @@ void assert_forcewakes_active(struct intel_uncore *uncore,
}
/* We give fast paths for the really cool registers */
-#define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000)
-
-#define __gen6_reg_read_fw_domains(uncore, offset) \
-({ \
- enum forcewake_domains __fwd; \
- if (NEEDS_FORCE_WAKE(offset)) \
- __fwd = FORCEWAKE_RENDER; \
- else \
- __fwd = 0; \
- __fwd; \
+#define NEEDS_FORCE_WAKE(reg) ({ \
+ u32 __reg = (reg); \
+ __reg < 0x40000 || __reg >= GEN11_BSD_RING_BASE; \
})
static int fw_range_cmp(u32 offset, const struct intel_forcewake_range *entry)
@@ -942,125 +941,146 @@ static const struct intel_forcewake_range __vlv_fw_ranges[] = {
__fwd; \
})
-#define __gen11_fwtable_reg_read_fw_domains(uncore, offset) \
- find_fw_domain(uncore, offset)
-
-#define __gen12_fwtable_reg_read_fw_domains(uncore, offset) \
- find_fw_domain(uncore, offset)
-
/* *Must* be sorted by offset! See intel_shadow_table_check(). */
-static const i915_reg_t gen8_shadowed_regs[] = {
- RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */
- GEN6_RPNSWREQ, /* 0xA008 */
- GEN6_RC_VIDEO_FREQ, /* 0xA00C */
- RING_TAIL(GEN6_BSD_RING_BASE), /* 0x12000 (base) */
- RING_TAIL(VEBOX_RING_BASE), /* 0x1a000 (base) */
- RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */
+static const struct i915_range gen8_shadowed_regs[] = {
+ { .start = 0x2030, .end = 0x2030 },
+ { .start = 0xA008, .end = 0xA00C },
+ { .start = 0x12030, .end = 0x12030 },
+ { .start = 0x1a030, .end = 0x1a030 },
+ { .start = 0x22030, .end = 0x22030 },
/* TODO: Other registers are not yet used */
};
-static const i915_reg_t gen11_shadowed_regs[] = {
- RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */
- RING_EXECLIST_CONTROL(RENDER_RING_BASE), /* 0x2550 */
- GEN6_RPNSWREQ, /* 0xA008 */
- GEN6_RC_VIDEO_FREQ, /* 0xA00C */
- RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */
- RING_EXECLIST_CONTROL(BLT_RING_BASE), /* 0x22550 */
- RING_TAIL(GEN11_BSD_RING_BASE), /* 0x1C0000 (base) */
- RING_EXECLIST_CONTROL(GEN11_BSD_RING_BASE), /* 0x1C0550 */
- RING_TAIL(GEN11_BSD2_RING_BASE), /* 0x1C4000 (base) */
- RING_EXECLIST_CONTROL(GEN11_BSD2_RING_BASE), /* 0x1C4550 */
- RING_TAIL(GEN11_VEBOX_RING_BASE), /* 0x1C8000 (base) */
- RING_EXECLIST_CONTROL(GEN11_VEBOX_RING_BASE), /* 0x1C8550 */
- RING_TAIL(GEN11_BSD3_RING_BASE), /* 0x1D0000 (base) */
- RING_EXECLIST_CONTROL(GEN11_BSD3_RING_BASE), /* 0x1D0550 */
- RING_TAIL(GEN11_BSD4_RING_BASE), /* 0x1D4000 (base) */
- RING_EXECLIST_CONTROL(GEN11_BSD4_RING_BASE), /* 0x1D4550 */
- RING_TAIL(GEN11_VEBOX2_RING_BASE), /* 0x1D8000 (base) */
- RING_EXECLIST_CONTROL(GEN11_VEBOX2_RING_BASE), /* 0x1D8550 */
- /* TODO: Other registers are not yet used */
+static const struct i915_range gen11_shadowed_regs[] = {
+ { .start = 0x2030, .end = 0x2030 },
+ { .start = 0x2550, .end = 0x2550 },
+ { .start = 0xA008, .end = 0xA00C },
+ { .start = 0x22030, .end = 0x22030 },
+ { .start = 0x22230, .end = 0x22230 },
+ { .start = 0x22510, .end = 0x22550 },
+ { .start = 0x1C0030, .end = 0x1C0030 },
+ { .start = 0x1C0230, .end = 0x1C0230 },
+ { .start = 0x1C0510, .end = 0x1C0550 },
+ { .start = 0x1C4030, .end = 0x1C4030 },
+ { .start = 0x1C4230, .end = 0x1C4230 },
+ { .start = 0x1C4510, .end = 0x1C4550 },
+ { .start = 0x1C8030, .end = 0x1C8030 },
+ { .start = 0x1C8230, .end = 0x1C8230 },
+ { .start = 0x1C8510, .end = 0x1C8550 },
+ { .start = 0x1D0030, .end = 0x1D0030 },
+ { .start = 0x1D0230, .end = 0x1D0230 },
+ { .start = 0x1D0510, .end = 0x1D0550 },
+ { .start = 0x1D4030, .end = 0x1D4030 },
+ { .start = 0x1D4230, .end = 0x1D4230 },
+ { .start = 0x1D4510, .end = 0x1D4550 },
+ { .start = 0x1D8030, .end = 0x1D8030 },
+ { .start = 0x1D8230, .end = 0x1D8230 },
+ { .start = 0x1D8510, .end = 0x1D8550 },
};
-static const i915_reg_t gen12_shadowed_regs[] = {
- RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */
- RING_EXECLIST_CONTROL(RENDER_RING_BASE), /* 0x2550 */
- GEN6_RPNSWREQ, /* 0xA008 */
- GEN6_RC_VIDEO_FREQ, /* 0xA00C */
- RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */
- RING_EXECLIST_CONTROL(BLT_RING_BASE), /* 0x22550 */
- RING_TAIL(GEN11_BSD_RING_BASE), /* 0x1C0000 (base) */
- RING_EXECLIST_CONTROL(GEN11_BSD_RING_BASE), /* 0x1C0550 */
- RING_TAIL(GEN11_BSD2_RING_BASE), /* 0x1C4000 (base) */
- RING_EXECLIST_CONTROL(GEN11_BSD2_RING_BASE), /* 0x1C4550 */
- RING_TAIL(GEN11_VEBOX_RING_BASE), /* 0x1C8000 (base) */
- RING_EXECLIST_CONTROL(GEN11_VEBOX_RING_BASE), /* 0x1C8550 */
- RING_TAIL(GEN11_BSD3_RING_BASE), /* 0x1D0000 (base) */
- RING_EXECLIST_CONTROL(GEN11_BSD3_RING_BASE), /* 0x1D0550 */
- RING_TAIL(GEN11_BSD4_RING_BASE), /* 0x1D4000 (base) */
- RING_EXECLIST_CONTROL(GEN11_BSD4_RING_BASE), /* 0x1D4550 */
- RING_TAIL(GEN11_VEBOX2_RING_BASE), /* 0x1D8000 (base) */
- RING_EXECLIST_CONTROL(GEN11_VEBOX2_RING_BASE), /* 0x1D8550 */
- /* TODO: Other registers are not yet used */
+static const struct i915_range gen12_shadowed_regs[] = {
+ { .start = 0x2030, .end = 0x2030 },
+ { .start = 0x2510, .end = 0x2550 },
+ { .start = 0xA008, .end = 0xA00C },
+ { .start = 0xA188, .end = 0xA188 },
+ { .start = 0xA278, .end = 0xA278 },
+ { .start = 0xA540, .end = 0xA56C },
+ { .start = 0xC4C8, .end = 0xC4C8 },
+ { .start = 0xC4D4, .end = 0xC4D4 },
+ { .start = 0xC600, .end = 0xC600 },
+ { .start = 0x22030, .end = 0x22030 },
+ { .start = 0x22510, .end = 0x22550 },
+ { .start = 0x1C0030, .end = 0x1C0030 },
+ { .start = 0x1C0510, .end = 0x1C0550 },
+ { .start = 0x1C4030, .end = 0x1C4030 },
+ { .start = 0x1C4510, .end = 0x1C4550 },
+ { .start = 0x1C8030, .end = 0x1C8030 },
+ { .start = 0x1C8510, .end = 0x1C8550 },
+ { .start = 0x1D0030, .end = 0x1D0030 },
+ { .start = 0x1D0510, .end = 0x1D0550 },
+ { .start = 0x1D4030, .end = 0x1D4030 },
+ { .start = 0x1D4510, .end = 0x1D4550 },
+ { .start = 0x1D8030, .end = 0x1D8030 },
+ { .start = 0x1D8510, .end = 0x1D8550 },
+
+ /*
+ * The rest of these ranges are specific to Xe_HP and beyond, but
+ * are reserved/unused ranges on earlier gen12 platforms, so they can
+ * be safely added to the gen12 table.
+ */
+ { .start = 0x1E0030, .end = 0x1E0030 },
+ { .start = 0x1E0510, .end = 0x1E0550 },
+ { .start = 0x1E4030, .end = 0x1E4030 },
+ { .start = 0x1E4510, .end = 0x1E4550 },
+ { .start = 0x1E8030, .end = 0x1E8030 },
+ { .start = 0x1E8510, .end = 0x1E8550 },
+ { .start = 0x1F0030, .end = 0x1F0030 },
+ { .start = 0x1F0510, .end = 0x1F0550 },
+ { .start = 0x1F4030, .end = 0x1F4030 },
+ { .start = 0x1F4510, .end = 0x1F4550 },
+ { .start = 0x1F8030, .end = 0x1F8030 },
+ { .start = 0x1F8510, .end = 0x1F8550 },
};
-static const i915_reg_t xehp_shadowed_regs[] = {
- RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */
- RING_EXECLIST_CONTROL(RENDER_RING_BASE), /* 0x2550 */
- GEN6_RPNSWREQ, /* 0xA008 */
- GEN6_RC_VIDEO_FREQ, /* 0xA00C */
- RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */
- RING_EXECLIST_CONTROL(BLT_RING_BASE), /* 0x22550 */
- RING_TAIL(GEN11_BSD_RING_BASE), /* 0x1C0000 (base) */
- RING_EXECLIST_CONTROL(GEN11_BSD_RING_BASE), /* 0x1C0550 */
- RING_TAIL(GEN11_BSD2_RING_BASE), /* 0x1C4000 (base) */
- RING_EXECLIST_CONTROL(GEN11_BSD2_RING_BASE), /* 0x1C4550 */
- RING_TAIL(GEN11_VEBOX_RING_BASE), /* 0x1C8000 (base) */
- RING_EXECLIST_CONTROL(GEN11_VEBOX_RING_BASE), /* 0x1C8550 */
- RING_TAIL(GEN11_BSD3_RING_BASE), /* 0x1D0000 (base) */
- RING_EXECLIST_CONTROL(GEN11_BSD3_RING_BASE), /* 0x1D0550 */
- RING_TAIL(GEN11_BSD4_RING_BASE), /* 0x1D4000 (base) */
- RING_EXECLIST_CONTROL(GEN11_BSD4_RING_BASE), /* 0x1D4550 */
- RING_TAIL(GEN11_VEBOX2_RING_BASE), /* 0x1D8000 (base) */
- RING_EXECLIST_CONTROL(GEN11_VEBOX2_RING_BASE), /* 0x1D8550 */
- RING_TAIL(XEHP_BSD5_RING_BASE), /* 0x1E0000 (base) */
- RING_EXECLIST_CONTROL(XEHP_BSD5_RING_BASE), /* 0x1E0550 */
- RING_TAIL(XEHP_BSD6_RING_BASE), /* 0x1E4000 (base) */
- RING_EXECLIST_CONTROL(XEHP_BSD6_RING_BASE), /* 0x1E4550 */
- RING_TAIL(XEHP_VEBOX3_RING_BASE), /* 0x1E8000 (base) */
- RING_EXECLIST_CONTROL(XEHP_VEBOX3_RING_BASE), /* 0x1E8550 */
- RING_TAIL(XEHP_BSD7_RING_BASE), /* 0x1F0000 (base) */
- RING_EXECLIST_CONTROL(XEHP_BSD7_RING_BASE), /* 0x1F0550 */
- RING_TAIL(XEHP_BSD8_RING_BASE), /* 0x1F4000 (base) */
- RING_EXECLIST_CONTROL(XEHP_BSD8_RING_BASE), /* 0x1F4550 */
- RING_TAIL(XEHP_VEBOX4_RING_BASE), /* 0x1F8000 (base) */
- RING_EXECLIST_CONTROL(XEHP_VEBOX4_RING_BASE), /* 0x1F8550 */
- /* TODO: Other registers are not yet used */
+static const struct i915_range dg2_shadowed_regs[] = {
+ { .start = 0x2030, .end = 0x2030 },
+ { .start = 0x2510, .end = 0x2550 },
+ { .start = 0xA008, .end = 0xA00C },
+ { .start = 0xA188, .end = 0xA188 },
+ { .start = 0xA278, .end = 0xA278 },
+ { .start = 0xA540, .end = 0xA56C },
+ { .start = 0xC4C8, .end = 0xC4C8 },
+ { .start = 0xC4E0, .end = 0xC4E0 },
+ { .start = 0xC600, .end = 0xC600 },
+ { .start = 0xC658, .end = 0xC658 },
+ { .start = 0x22030, .end = 0x22030 },
+ { .start = 0x22510, .end = 0x22550 },
+ { .start = 0x1C0030, .end = 0x1C0030 },
+ { .start = 0x1C0510, .end = 0x1C0550 },
+ { .start = 0x1C4030, .end = 0x1C4030 },
+ { .start = 0x1C4510, .end = 0x1C4550 },
+ { .start = 0x1C8030, .end = 0x1C8030 },
+ { .start = 0x1C8510, .end = 0x1C8550 },
+ { .start = 0x1D0030, .end = 0x1D0030 },
+ { .start = 0x1D0510, .end = 0x1D0550 },
+ { .start = 0x1D4030, .end = 0x1D4030 },
+ { .start = 0x1D4510, .end = 0x1D4550 },
+ { .start = 0x1D8030, .end = 0x1D8030 },
+ { .start = 0x1D8510, .end = 0x1D8550 },
+ { .start = 0x1E0030, .end = 0x1E0030 },
+ { .start = 0x1E0510, .end = 0x1E0550 },
+ { .start = 0x1E4030, .end = 0x1E4030 },
+ { .start = 0x1E4510, .end = 0x1E4550 },
+ { .start = 0x1E8030, .end = 0x1E8030 },
+ { .start = 0x1E8510, .end = 0x1E8550 },
+ { .start = 0x1F0030, .end = 0x1F0030 },
+ { .start = 0x1F0510, .end = 0x1F0550 },
+ { .start = 0x1F4030, .end = 0x1F4030 },
+ { .start = 0x1F4510, .end = 0x1F4550 },
+ { .start = 0x1F8030, .end = 0x1F8030 },
+ { .start = 0x1F8510, .end = 0x1F8550 },
};
-static int mmio_reg_cmp(u32 key, const i915_reg_t *reg)
+static int mmio_range_cmp(u32 key, const struct i915_range *range)
{
- u32 offset = i915_mmio_reg_offset(*reg);
-
- if (key < offset)
+ if (key < range->start)
return -1;
- else if (key > offset)
+ else if (key > range->end)
return 1;
else
return 0;
}
-#define __is_X_shadowed(x) \
-static bool is_##x##_shadowed(u32 offset) \
-{ \
- const i915_reg_t *regs = x##_shadowed_regs; \
- return BSEARCH(offset, regs, ARRAY_SIZE(x##_shadowed_regs), \
- mmio_reg_cmp); \
-}
+static bool is_shadowed(struct intel_uncore *uncore, u32 offset)
+{
+ if (drm_WARN_ON(&uncore->i915->drm, !uncore->shadowed_reg_table))
+ return false;
-__is_X_shadowed(gen8)
-__is_X_shadowed(gen11)
-__is_X_shadowed(gen12)
-__is_X_shadowed(xehp)
+ return BSEARCH(offset,
+ uncore->shadowed_reg_table,
+ uncore->shadowed_reg_table_entries,
+ mmio_range_cmp);
+}
static enum forcewake_domains
gen6_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg)
@@ -1068,15 +1088,9 @@ gen6_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg)
return FORCEWAKE_RENDER;
}
-#define __gen8_reg_write_fw_domains(uncore, offset) \
-({ \
- enum forcewake_domains __fwd; \
- if (NEEDS_FORCE_WAKE(offset) && !is_gen8_shadowed(offset)) \
- __fwd = FORCEWAKE_RENDER; \
- else \
- __fwd = 0; \
- __fwd; \
-})
+static const struct intel_forcewake_range __gen6_fw_ranges[] = {
+ GEN_FW_RANGE(0x0, 0x3ffff, FORCEWAKE_RENDER),
+};
/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */
static const struct intel_forcewake_range __chv_fw_ranges[] = {
@@ -1101,34 +1115,8 @@ static const struct intel_forcewake_range __chv_fw_ranges[] = {
#define __fwtable_reg_write_fw_domains(uncore, offset) \
({ \
enum forcewake_domains __fwd = 0; \
- if (NEEDS_FORCE_WAKE((offset)) && !is_gen8_shadowed(offset)) \
- __fwd = find_fw_domain(uncore, offset); \
- __fwd; \
-})
-
-#define __gen11_fwtable_reg_write_fw_domains(uncore, offset) \
-({ \
- enum forcewake_domains __fwd = 0; \
const u32 __offset = (offset); \
- if (!is_gen11_shadowed(__offset)) \
- __fwd = find_fw_domain(uncore, __offset); \
- __fwd; \
-})
-
-#define __gen12_fwtable_reg_write_fw_domains(uncore, offset) \
-({ \
- enum forcewake_domains __fwd = 0; \
- const u32 __offset = (offset); \
- if (!is_gen12_shadowed(__offset)) \
- __fwd = find_fw_domain(uncore, __offset); \
- __fwd; \
-})
-
-#define __xehp_fwtable_reg_write_fw_domains(uncore, offset) \
-({ \
- enum forcewake_domains __fwd = 0; \
- const u32 __offset = (offset); \
- if (!is_xehp_shadowed(__offset)) \
+ if (NEEDS_FORCE_WAKE((__offset)) && !is_shadowed(uncore, __offset)) \
__fwd = find_fw_domain(uncore, __offset); \
__fwd; \
})
@@ -1605,7 +1593,7 @@ static noinline void ___force_wake_auto(struct intel_uncore *uncore,
for_each_fw_domain_masked(domain, fw_domains, uncore, tmp)
fw_domain_arm_timer(domain);
- uncore->funcs.force_wake_get(uncore, fw_domains);
+ fw_domains_get(uncore, fw_domains);
}
static inline void __force_wake_auto(struct intel_uncore *uncore,
@@ -1621,35 +1609,30 @@ static inline void __force_wake_auto(struct intel_uncore *uncore,
___force_wake_auto(uncore, fw_domains);
}
-#define __gen_read(func, x) \
+#define __gen_fwtable_read(x) \
static u##x \
-func##_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { \
+fwtable_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) \
+{ \
enum forcewake_domains fw_engine; \
GEN6_READ_HEADER(x); \
- fw_engine = __##func##_reg_read_fw_domains(uncore, offset); \
+ fw_engine = __fwtable_reg_read_fw_domains(uncore, offset); \
if (fw_engine) \
__force_wake_auto(uncore, fw_engine); \
val = __raw_uncore_read##x(uncore, reg); \
GEN6_READ_FOOTER; \
}
-#define __gen_reg_read_funcs(func) \
-static enum forcewake_domains \
-func##_reg_read_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) { \
- return __##func##_reg_read_fw_domains(uncore, i915_mmio_reg_offset(reg)); \
-} \
-\
-__gen_read(func, 8) \
-__gen_read(func, 16) \
-__gen_read(func, 32) \
-__gen_read(func, 64)
+static enum forcewake_domains
+fwtable_reg_read_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) {
+ return __fwtable_reg_read_fw_domains(uncore, i915_mmio_reg_offset(reg));
+}
-__gen_reg_read_funcs(gen12_fwtable);
-__gen_reg_read_funcs(gen11_fwtable);
-__gen_reg_read_funcs(fwtable);
-__gen_reg_read_funcs(gen6);
+__gen_fwtable_read(8)
+__gen_fwtable_read(16)
+__gen_fwtable_read(32)
+__gen_fwtable_read(64)
-#undef __gen_reg_read_funcs
+#undef __gen_fwtable_read
#undef GEN6_READ_FOOTER
#undef GEN6_READ_HEADER
@@ -1714,35 +1697,29 @@ __gen6_write(8)
__gen6_write(16)
__gen6_write(32)
-#define __gen_write(func, x) \
+#define __gen_fwtable_write(x) \
static void \
-func##_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
+fwtable_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { \
enum forcewake_domains fw_engine; \
GEN6_WRITE_HEADER; \
- fw_engine = __##func##_reg_write_fw_domains(uncore, offset); \
+ fw_engine = __fwtable_reg_write_fw_domains(uncore, offset); \
if (fw_engine) \
__force_wake_auto(uncore, fw_engine); \
__raw_uncore_write##x(uncore, reg, val); \
GEN6_WRITE_FOOTER; \
}
-#define __gen_reg_write_funcs(func) \
-static enum forcewake_domains \
-func##_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) { \
- return __##func##_reg_write_fw_domains(uncore, i915_mmio_reg_offset(reg)); \
-} \
-\
-__gen_write(func, 8) \
-__gen_write(func, 16) \
-__gen_write(func, 32)
+static enum forcewake_domains
+fwtable_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg)
+{
+ return __fwtable_reg_write_fw_domains(uncore, i915_mmio_reg_offset(reg));
+}
-__gen_reg_write_funcs(xehp_fwtable);
-__gen_reg_write_funcs(gen12_fwtable);
-__gen_reg_write_funcs(gen11_fwtable);
-__gen_reg_write_funcs(fwtable);
-__gen_reg_write_funcs(gen8);
+__gen_fwtable_write(8)
+__gen_fwtable_write(16)
+__gen_fwtable_write(32)
-#undef __gen_reg_write_funcs
+#undef __gen_fwtable_write
#undef GEN6_WRITE_FOOTER
#undef GEN6_WRITE_HEADER
@@ -1866,6 +1843,18 @@ static void intel_uncore_fw_domains_fini(struct intel_uncore *uncore)
fw_domain_fini(uncore, d->id);
}
+static const struct intel_uncore_fw_get uncore_get_fallback = {
+ .force_wake_get = fw_domains_get_with_fallback
+};
+
+static const struct intel_uncore_fw_get uncore_get_normal = {
+ .force_wake_get = fw_domains_get_normal,
+};
+
+static const struct intel_uncore_fw_get uncore_get_thread_status = {
+ .force_wake_get = fw_domains_get_with_thread_status
+};
+
static int intel_uncore_fw_domains_init(struct intel_uncore *uncore)
{
struct drm_i915_private *i915 = uncore->i915;
@@ -1881,8 +1870,7 @@ static int intel_uncore_fw_domains_init(struct intel_uncore *uncore)
intel_engine_mask_t emask = INTEL_INFO(i915)->platform_engine_mask;
int i;
- uncore->funcs.force_wake_get = fw_domains_get_with_fallback;
- uncore->funcs.force_wake_put = fw_domains_put;
+ uncore->fw_get_funcs = &uncore_get_fallback;
fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
FORCEWAKE_RENDER_GEN9,
FORCEWAKE_ACK_RENDER_GEN9);
@@ -1907,8 +1895,7 @@ static int intel_uncore_fw_domains_init(struct intel_uncore *uncore)
FORCEWAKE_ACK_MEDIA_VEBOX_GEN11(i));
}
} else if (IS_GRAPHICS_VER(i915, 9, 10)) {
- uncore->funcs.force_wake_get = fw_domains_get_with_fallback;
- uncore->funcs.force_wake_put = fw_domains_put;
+ uncore->fw_get_funcs = &uncore_get_fallback;
fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
FORCEWAKE_RENDER_GEN9,
FORCEWAKE_ACK_RENDER_GEN9);
@@ -1918,16 +1905,13 @@ static int intel_uncore_fw_domains_init(struct intel_uncore *uncore)
fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
FORCEWAKE_MEDIA_GEN9, FORCEWAKE_ACK_MEDIA_GEN9);
} else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
- uncore->funcs.force_wake_get = fw_domains_get;
- uncore->funcs.force_wake_put = fw_domains_put;
+ uncore->fw_get_funcs = &uncore_get_normal;
fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
FORCEWAKE_VLV, FORCEWAKE_ACK_VLV);
fw_domain_init(uncore, FW_DOMAIN_ID_MEDIA,
FORCEWAKE_MEDIA_VLV, FORCEWAKE_ACK_MEDIA_VLV);
} else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) {
- uncore->funcs.force_wake_get =
- fw_domains_get_with_thread_status;
- uncore->funcs.force_wake_put = fw_domains_put;
+ uncore->fw_get_funcs = &uncore_get_thread_status;
fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
FORCEWAKE_MT, FORCEWAKE_ACK_HSW);
} else if (IS_IVYBRIDGE(i915)) {
@@ -1942,9 +1926,7 @@ static int intel_uncore_fw_domains_init(struct intel_uncore *uncore)
* (correctly) interpreted by the test below as MT
* forcewake being disabled.
*/
- uncore->funcs.force_wake_get =
- fw_domains_get_with_thread_status;
- uncore->funcs.force_wake_put = fw_domains_put;
+ uncore->fw_get_funcs = &uncore_get_thread_status;
/* We need to init first for ECOBUS access and then
* determine later if we want to reinit, in case of MT access is
@@ -1975,9 +1957,7 @@ static int intel_uncore_fw_domains_init(struct intel_uncore *uncore)
FORCEWAKE, FORCEWAKE_ACK);
}
} else if (GRAPHICS_VER(i915) == 6) {
- uncore->funcs.force_wake_get =
- fw_domains_get_with_thread_status;
- uncore->funcs.force_wake_put = fw_domains_put;
+ uncore->fw_get_funcs = &uncore_get_thread_status;
fw_domain_init(uncore, FW_DOMAIN_ID_RENDER,
FORCEWAKE, FORCEWAKE_ACK);
}
@@ -2001,6 +1981,12 @@ out:
(uncore)->fw_domains_table_entries = ARRAY_SIZE((d)); \
}
+#define ASSIGN_SHADOW_TABLE(uncore, d) \
+{ \
+ (uncore)->shadowed_reg_table = d; \
+ (uncore)->shadowed_reg_table_entries = ARRAY_SIZE((d)); \
+}
+
static int i915_pmic_bus_access_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
@@ -2111,40 +2097,42 @@ static int uncore_forcewake_init(struct intel_uncore *uncore)
return ret;
forcewake_early_sanitize(uncore, 0);
+ ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
+
if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) {
ASSIGN_FW_DOMAINS_TABLE(uncore, __dg2_fw_ranges);
- ASSIGN_WRITE_MMIO_VFUNCS(uncore, xehp_fwtable);
- ASSIGN_READ_MMIO_VFUNCS(uncore, gen11_fwtable);
+ ASSIGN_SHADOW_TABLE(uncore, dg2_shadowed_regs);
+ ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
} else if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
ASSIGN_FW_DOMAINS_TABLE(uncore, __xehp_fw_ranges);
- ASSIGN_WRITE_MMIO_VFUNCS(uncore, xehp_fwtable);
- ASSIGN_READ_MMIO_VFUNCS(uncore, gen11_fwtable);
+ ASSIGN_SHADOW_TABLE(uncore, gen12_shadowed_regs);
+ ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
} else if (GRAPHICS_VER(i915) >= 12) {
ASSIGN_FW_DOMAINS_TABLE(uncore, __gen12_fw_ranges);
- ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen12_fwtable);
- ASSIGN_READ_MMIO_VFUNCS(uncore, gen12_fwtable);
+ ASSIGN_SHADOW_TABLE(uncore, gen12_shadowed_regs);
+ ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
} else if (GRAPHICS_VER(i915) == 11) {
ASSIGN_FW_DOMAINS_TABLE(uncore, __gen11_fw_ranges);
- ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen11_fwtable);
- ASSIGN_READ_MMIO_VFUNCS(uncore, gen11_fwtable);
+ ASSIGN_SHADOW_TABLE(uncore, gen11_shadowed_regs);
+ ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
} else if (IS_GRAPHICS_VER(i915, 9, 10)) {
ASSIGN_FW_DOMAINS_TABLE(uncore, __gen9_fw_ranges);
+ ASSIGN_SHADOW_TABLE(uncore, gen8_shadowed_regs);
ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
- ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
} else if (IS_CHERRYVIEW(i915)) {
ASSIGN_FW_DOMAINS_TABLE(uncore, __chv_fw_ranges);
+ ASSIGN_SHADOW_TABLE(uncore, gen8_shadowed_regs);
ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
- ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
} else if (GRAPHICS_VER(i915) == 8) {
- ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen8);
- ASSIGN_READ_MMIO_VFUNCS(uncore, gen6);
+ ASSIGN_FW_DOMAINS_TABLE(uncore, __gen6_fw_ranges);
+ ASSIGN_SHADOW_TABLE(uncore, gen8_shadowed_regs);
+ ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable);
} else if (IS_VALLEYVIEW(i915)) {
ASSIGN_FW_DOMAINS_TABLE(uncore, __vlv_fw_ranges);
ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6);
- ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable);
} else if (IS_GRAPHICS_VER(i915, 6, 7)) {
+ ASSIGN_FW_DOMAINS_TABLE(uncore, __gen6_fw_ranges);
ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen6);
- ASSIGN_READ_MMIO_VFUNCS(uncore, gen6);
}
uncore->pmic_bus_access_nb.notifier_call = i915_pmic_bus_access_notifier;
@@ -2186,8 +2174,7 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore)
}
/* make sure fw funcs are set if and only if we have fw*/
- GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.force_wake_get);
- GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.force_wake_put);
+ GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->fw_get_funcs);
GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.read_fw_domains);
GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.write_fw_domains);
diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h
index 3c0b0a8b5250..3248e4e2c540 100644
--- a/drivers/gpu/drm/i915/intel_uncore.h
+++ b/drivers/gpu/drm/i915/intel_uncore.h
@@ -84,12 +84,12 @@ enum forcewake_domains {
FORCEWAKE_ALL = BIT(FW_DOMAIN_ID_COUNT) - 1,
};
-struct intel_uncore_funcs {
+struct intel_uncore_fw_get {
void (*force_wake_get)(struct intel_uncore *uncore,
enum forcewake_domains domains);
- void (*force_wake_put)(struct intel_uncore *uncore,
- enum forcewake_domains domains);
+};
+struct intel_uncore_funcs {
enum forcewake_domains (*read_fw_domains)(struct intel_uncore *uncore,
i915_reg_t r);
enum forcewake_domains (*write_fw_domains)(struct intel_uncore *uncore,
@@ -119,6 +119,12 @@ struct intel_forcewake_range {
enum forcewake_domains domains;
};
+/* Other register ranges (e.g., shadow tables, MCR tables, etc.) */
+struct i915_range {
+ u32 start;
+ u32 end;
+};
+
struct intel_uncore {
void __iomem *regs;
@@ -136,7 +142,15 @@ struct intel_uncore {
const struct intel_forcewake_range *fw_domains_table;
unsigned int fw_domains_table_entries;
+ /*
+ * Shadowed registers are special cases where we can safely write
+ * to the register *without* grabbing forcewake.
+ */
+ const struct i915_range *shadowed_reg_table;
+ unsigned int shadowed_reg_table_entries;
+
struct notifier_block pmic_bus_access_nb;
+ const struct intel_uncore_fw_get *fw_get_funcs;
struct intel_uncore_funcs funcs;
unsigned int fifo_count;
diff --git a/drivers/gpu/drm/i915/intel_wakeref.h b/drivers/gpu/drm/i915/intel_wakeref.h
index 545c8f277c46..4f4c2e15e736 100644
--- a/drivers/gpu/drm/i915/intel_wakeref.h
+++ b/drivers/gpu/drm/i915/intel_wakeref.h
@@ -123,6 +123,12 @@ enum {
__INTEL_WAKEREF_PUT_LAST_BIT__
};
+static inline void
+intel_wakeref_might_get(struct intel_wakeref *wf)
+{
+ might_lock(&wf->mutex);
+}
+
/**
* intel_wakeref_put_flags: Release the wakeref
* @wf: the wakeref
@@ -170,6 +176,12 @@ intel_wakeref_put_delay(struct intel_wakeref *wf, unsigned long delay)
FIELD_PREP(INTEL_WAKEREF_PUT_DELAY, delay));
}
+static inline void
+intel_wakeref_might_put(struct intel_wakeref *wf)
+{
+ might_lock(&wf->mutex);
+}
+
/**
* intel_wakeref_lock: Lock the wakeref (mutex)
* @wf: the wakeref
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp.c b/drivers/gpu/drm/i915/pxp/intel_pxp.c
new file mode 100644
index 000000000000..e2314ad9546d
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp.c
@@ -0,0 +1,299 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright(c) 2020 Intel Corporation.
+ */
+#include <linux/workqueue.h>
+#include "intel_pxp.h"
+#include "intel_pxp_irq.h"
+#include "intel_pxp_session.h"
+#include "intel_pxp_tee.h"
+#include "gem/i915_gem_context.h"
+#include "gt/intel_context.h"
+#include "i915_drv.h"
+
+/**
+ * DOC: PXP
+ *
+ * PXP (Protected Xe Path) is a feature available in Gen12 and newer platforms.
+ * It allows execution and flip to display of protected (i.e. encrypted)
+ * objects. The SW support is enabled via the CONFIG_DRM_I915_PXP kconfig.
+ *
+ * Objects can opt-in to PXP encryption at creation time via the
+ * I915_GEM_CREATE_EXT_PROTECTED_CONTENT create_ext flag. For objects to be
+ * correctly protected they must be used in conjunction with a context created
+ * with the I915_CONTEXT_PARAM_PROTECTED_CONTENT flag. See the documentation
+ * of those two uapi flags for details and restrictions.
+ *
+ * Protected objects are tied to a pxp session; currently we only support one
+ * session, which i915 manages and whose index is available in the uapi
+ * (I915_PROTECTED_CONTENT_DEFAULT_SESSION) for use in instructions targeting
+ * protected objects.
+ * The session is invalidated by the HW when certain events occur (e.g.
+ * suspend/resume). When this happens, all the objects that were used with the
+ * session are marked as invalid and all contexts marked as using protected
+ * content are banned. Any further attempt at using them in an execbuf call is
+ * rejected, while flips are converted to black frames.
+ *
+ * Some of the PXP setup operations are performed by the Management Engine,
+ * which is handled by the mei driver; communication between i915 and mei is
+ * performed via the mei_pxp component module.
+ */
+
+struct intel_gt *pxp_to_gt(const struct intel_pxp *pxp)
+{
+ return container_of(pxp, struct intel_gt, pxp);
+}
+
+bool intel_pxp_is_active(const struct intel_pxp *pxp)
+{
+ return pxp->arb_is_valid;
+}
+
+/* KCR register definitions */
+#define KCR_INIT _MMIO(0x320f0)
+/* Setting KCR Init bit is required after system boot */
+#define KCR_INIT_ALLOW_DISPLAY_ME_WRITES REG_BIT(14)
+
+static void kcr_pxp_enable(struct intel_gt *gt)
+{
+ intel_uncore_write(gt->uncore, KCR_INIT,
+ _MASKED_BIT_ENABLE(KCR_INIT_ALLOW_DISPLAY_ME_WRITES));
+}
+
+static void kcr_pxp_disable(struct intel_gt *gt)
+{
+ intel_uncore_write(gt->uncore, KCR_INIT,
+ _MASKED_BIT_DISABLE(KCR_INIT_ALLOW_DISPLAY_ME_WRITES));
+}
+
+static int create_vcs_context(struct intel_pxp *pxp)
+{
+ static struct lock_class_key pxp_lock;
+ struct intel_gt *gt = pxp_to_gt(pxp);
+ struct intel_engine_cs *engine;
+ struct intel_context *ce;
+ int i;
+
+ /*
+ * Find the first VCS engine present. We're guaranteed there is one
+ * if we're in this function due to the check in has_pxp
+ */
+ for (i = 0, engine = NULL; !engine; i++)
+ engine = gt->engine_class[VIDEO_DECODE_CLASS][i];
+
+ GEM_BUG_ON(!engine || engine->class != VIDEO_DECODE_CLASS);
+
+ ce = intel_engine_create_pinned_context(engine, engine->gt->vm, SZ_4K,
+ I915_GEM_HWS_PXP_ADDR,
+ &pxp_lock, "pxp_context");
+ if (IS_ERR(ce)) {
+ drm_err(&gt->i915->drm, "failed to create VCS ctx for PXP\n");
+ return PTR_ERR(ce);
+ }
+
+ pxp->ce = ce;
+
+ return 0;
+}
+
+static void destroy_vcs_context(struct intel_pxp *pxp)
+{
+ intel_engine_destroy_pinned_context(fetch_and_zero(&pxp->ce));
+}
+
+void intel_pxp_init(struct intel_pxp *pxp)
+{
+ struct intel_gt *gt = pxp_to_gt(pxp);
+ int ret;
+
+ if (!HAS_PXP(gt->i915))
+ return;
+
+ mutex_init(&pxp->tee_mutex);
+
+ /*
+ * we'll use the completion to check if there is a termination pending,
+ * so we start it as completed and we reinit it when a termination
+ * is triggered.
+ */
+ init_completion(&pxp->termination);
+ complete_all(&pxp->termination);
+
+ mutex_init(&pxp->arb_mutex);
+ INIT_WORK(&pxp->session_work, intel_pxp_session_work);
+
+ ret = create_vcs_context(pxp);
+ if (ret)
+ return;
+
+ ret = intel_pxp_tee_component_init(pxp);
+ if (ret)
+ goto out_context;
+
+ drm_info(&gt->i915->drm, "Protected Xe Path (PXP) protected content support initialized\n");
+
+ return;
+
+out_context:
+ destroy_vcs_context(pxp);
+}
+
+void intel_pxp_fini(struct intel_pxp *pxp)
+{
+ if (!intel_pxp_is_enabled(pxp))
+ return;
+
+ pxp->arb_is_valid = false;
+
+ intel_pxp_tee_component_fini(pxp);
+
+ destroy_vcs_context(pxp);
+}
+
+void intel_pxp_mark_termination_in_progress(struct intel_pxp *pxp)
+{
+ pxp->arb_is_valid = false;
+ reinit_completion(&pxp->termination);
+}
+
+static void pxp_queue_termination(struct intel_pxp *pxp)
+{
+ struct intel_gt *gt = pxp_to_gt(pxp);
+
+ /*
+ * We want to get the same effect as if we received a termination
+ * interrupt, so just pretend that we did.
+ */
+ spin_lock_irq(&gt->irq_lock);
+ intel_pxp_mark_termination_in_progress(pxp);
+ pxp->session_events |= PXP_TERMINATION_REQUEST;
+ queue_work(system_unbound_wq, &pxp->session_work);
+ spin_unlock_irq(&gt->irq_lock);
+}
+
+/*
+ * the arb session is restarted from the irq work when we receive the
+ * termination completion interrupt
+ */
+int intel_pxp_start(struct intel_pxp *pxp)
+{
+ int ret = 0;
+
+ if (!intel_pxp_is_enabled(pxp))
+ return -ENODEV;
+
+ mutex_lock(&pxp->arb_mutex);
+
+ if (pxp->arb_is_valid)
+ goto unlock;
+
+ pxp_queue_termination(pxp);
+
+ if (!wait_for_completion_timeout(&pxp->termination,
+ msecs_to_jiffies(250))) {
+ ret = -ETIMEDOUT;
+ goto unlock;
+ }
+
+ /* make sure the compiler doesn't optimize the double access */
+ barrier();
+
+ if (!pxp->arb_is_valid)
+ ret = -EIO;
+
+unlock:
+ mutex_unlock(&pxp->arb_mutex);
+ return ret;
+}
+
+void intel_pxp_init_hw(struct intel_pxp *pxp)
+{
+ kcr_pxp_enable(pxp_to_gt(pxp));
+ intel_pxp_irq_enable(pxp);
+}
+
+void intel_pxp_fini_hw(struct intel_pxp *pxp)
+{
+ kcr_pxp_disable(pxp_to_gt(pxp));
+
+ intel_pxp_irq_disable(pxp);
+}
+
+int intel_pxp_key_check(struct intel_pxp *pxp,
+ struct drm_i915_gem_object *obj,
+ bool assign)
+{
+ if (!intel_pxp_is_active(pxp))
+ return -ENODEV;
+
+ if (!i915_gem_object_is_protected(obj))
+ return -EINVAL;
+
+ GEM_BUG_ON(!pxp->key_instance);
+
+ /*
+ * If this is the first time we're using this object, it's not
+ * encrypted yet; it will be encrypted with the current key, so mark it
+ * as such. If the object is already encrypted, check instead if the
+ * used key is still valid.
+ */
+ if (!obj->pxp_key_instance && assign)
+ obj->pxp_key_instance = pxp->key_instance;
+
+ if (obj->pxp_key_instance != pxp->key_instance)
+ return -ENOEXEC;
+
+ return 0;
+}
+
+void intel_pxp_invalidate(struct intel_pxp *pxp)
+{
+ struct drm_i915_private *i915 = pxp_to_gt(pxp)->i915;
+ struct i915_gem_context *ctx, *cn;
+
+ /* ban all contexts marked as protected */
+ spin_lock_irq(&i915->gem.contexts.lock);
+ list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) {
+ struct i915_gem_engines_iter it;
+ struct intel_context *ce;
+
+ if (!kref_get_unless_zero(&ctx->ref))
+ continue;
+
+ if (likely(!i915_gem_context_uses_protected_content(ctx))) {
+ i915_gem_context_put(ctx);
+ continue;
+ }
+
+ spin_unlock_irq(&i915->gem.contexts.lock);
+
+ /*
+ * By the time we get here we are either going to suspend with
+ * quiesced execution or the HW keys are already long gone and
+ * in this case it is worthless to attempt to close the context
+ * and wait for its execution. It will hang the GPU if it has
+ * not already. So, as a fast mitigation, we can ban the
+ * context as quick as we can. That might race with the
+ * execbuffer, but currently this is the best that can be done.
+ */
+ for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it)
+ intel_context_ban(ce, NULL);
+ i915_gem_context_unlock_engines(ctx);
+
+ /*
+ * The context has been banned, no need to keep the wakeref.
+ * This is safe from races because the only other place this
+ * is touched is context_release and we're holding a ctx ref
+ */
+ if (ctx->pxp_wakeref) {
+ intel_runtime_pm_put(&i915->runtime_pm,
+ ctx->pxp_wakeref);
+ ctx->pxp_wakeref = 0;
+ }
+
+ spin_lock_irq(&i915->gem.contexts.lock);
+ list_safe_reset_next(ctx, cn, link);
+ i915_gem_context_put(ctx);
+ }
+ spin_unlock_irq(&i915->gem.contexts.lock);
+}
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp.h b/drivers/gpu/drm/i915/pxp/intel_pxp.h
new file mode 100644
index 000000000000..aa262258d4d4
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright(c) 2020, Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INTEL_PXP_H__
+#define __INTEL_PXP_H__
+
+#include "intel_pxp_types.h"
+
+struct drm_i915_gem_object;
+
+static inline bool intel_pxp_is_enabled(const struct intel_pxp *pxp)
+{
+ return pxp->ce;
+}
+
+#ifdef CONFIG_DRM_I915_PXP
+struct intel_gt *pxp_to_gt(const struct intel_pxp *pxp);
+bool intel_pxp_is_active(const struct intel_pxp *pxp);
+
+void intel_pxp_init(struct intel_pxp *pxp);
+void intel_pxp_fini(struct intel_pxp *pxp);
+
+void intel_pxp_init_hw(struct intel_pxp *pxp);
+void intel_pxp_fini_hw(struct intel_pxp *pxp);
+
+void intel_pxp_mark_termination_in_progress(struct intel_pxp *pxp);
+
+int intel_pxp_start(struct intel_pxp *pxp);
+
+int intel_pxp_key_check(struct intel_pxp *pxp,
+ struct drm_i915_gem_object *obj,
+ bool assign);
+
+void intel_pxp_invalidate(struct intel_pxp *pxp);
+#else
+static inline void intel_pxp_init(struct intel_pxp *pxp)
+{
+}
+
+static inline void intel_pxp_fini(struct intel_pxp *pxp)
+{
+}
+
+static inline int intel_pxp_start(struct intel_pxp *pxp)
+{
+ return -ENODEV;
+}
+
+static inline bool intel_pxp_is_active(const struct intel_pxp *pxp)
+{
+ return false;
+}
+
+static inline int intel_pxp_key_check(struct intel_pxp *pxp,
+ struct drm_i915_gem_object *obj,
+ bool assign)
+{
+ return -ENODEV;
+}
+#endif
+
+#endif /* __INTEL_PXP_H__ */
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_cmd.c b/drivers/gpu/drm/i915/pxp/intel_pxp_cmd.c
new file mode 100644
index 000000000000..f41e45763d0d
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_cmd.c
@@ -0,0 +1,141 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright(c) 2020, Intel Corporation. All rights reserved.
+ */
+
+#include "intel_pxp.h"
+#include "intel_pxp_cmd.h"
+#include "intel_pxp_session.h"
+#include "gt/intel_context.h"
+#include "gt/intel_engine_pm.h"
+#include "gt/intel_gpu_commands.h"
+#include "gt/intel_ring.h"
+
+#include "i915_trace.h"
+
+/* stall until prior PXP and MFX/HCP/HUC objects are cmopleted */
+#define MFX_WAIT_PXP (MFX_WAIT | \
+ MFX_WAIT_DW0_PXP_SYNC_CONTROL_FLAG | \
+ MFX_WAIT_DW0_MFX_SYNC_CONTROL_FLAG)
+
+static u32 *pxp_emit_session_selection(u32 *cs, u32 idx)
+{
+ *cs++ = MFX_WAIT_PXP;
+
+ /* pxp off */
+ *cs++ = MI_FLUSH_DW;
+ *cs++ = 0;
+ *cs++ = 0;
+
+ /* select session */
+ *cs++ = MI_SET_APPID | MI_SET_APPID_SESSION_ID(idx);
+
+ *cs++ = MFX_WAIT_PXP;
+
+ /* pxp on */
+ *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_PROTECTED_MEM_EN |
+ MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX;
+ *cs++ = I915_GEM_HWS_PXP_ADDR | MI_FLUSH_DW_USE_GTT;
+ *cs++ = 0;
+
+ *cs++ = MFX_WAIT_PXP;
+
+ return cs;
+}
+
+static u32 *pxp_emit_inline_termination(u32 *cs)
+{
+ /* session inline termination */
+ *cs++ = CRYPTO_KEY_EXCHANGE;
+ *cs++ = 0;
+
+ return cs;
+}
+
+static u32 *pxp_emit_session_termination(u32 *cs, u32 idx)
+{
+ cs = pxp_emit_session_selection(cs, idx);
+ cs = pxp_emit_inline_termination(cs);
+
+ return cs;
+}
+
+static u32 *pxp_emit_wait(u32 *cs)
+{
+ /* wait for cmds to go through */
+ *cs++ = MFX_WAIT_PXP;
+ *cs++ = 0;
+
+ return cs;
+}
+
+/*
+ * if we ever need to terminate more than one session, we can submit multiple
+ * selections and terminations back-to-back with a single wait at the end
+ */
+#define SELECTION_LEN 10
+#define TERMINATION_LEN 2
+#define SESSION_TERMINATION_LEN(x) ((SELECTION_LEN + TERMINATION_LEN) * (x))
+#define WAIT_LEN 2
+
+static void pxp_request_commit(struct i915_request *rq)
+{
+ struct i915_sched_attr attr = { .priority = I915_PRIORITY_MAX };
+ struct intel_timeline * const tl = i915_request_timeline(rq);
+
+ lockdep_unpin_lock(&tl->mutex, rq->cookie);
+
+ trace_i915_request_add(rq);
+ __i915_request_commit(rq);
+ __i915_request_queue(rq, &attr);
+
+ mutex_unlock(&tl->mutex);
+}
+
+int intel_pxp_terminate_session(struct intel_pxp *pxp, u32 id)
+{
+ struct i915_request *rq;
+ struct intel_context *ce = pxp->ce;
+ u32 *cs;
+ int err = 0;
+
+ if (!intel_pxp_is_enabled(pxp))
+ return 0;
+
+ rq = i915_request_create(ce);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ if (ce->engine->emit_init_breadcrumb) {
+ err = ce->engine->emit_init_breadcrumb(rq);
+ if (err)
+ goto out_rq;
+ }
+
+ cs = intel_ring_begin(rq, SESSION_TERMINATION_LEN(1) + WAIT_LEN);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto out_rq;
+ }
+
+ cs = pxp_emit_session_termination(cs, id);
+ cs = pxp_emit_wait(cs);
+
+ intel_ring_advance(rq, cs);
+
+out_rq:
+ i915_request_get(rq);
+
+ if (unlikely(err))
+ i915_request_set_error_once(rq, err);
+
+ pxp_request_commit(rq);
+
+ if (!err && i915_request_wait(rq, 0, HZ / 5) < 0)
+ err = -ETIME;
+
+ i915_request_put(rq);
+
+ return err;
+}
+
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_cmd.h b/drivers/gpu/drm/i915/pxp/intel_pxp_cmd.h
new file mode 100644
index 000000000000..6d6299543578
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_cmd.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright(c) 2020, Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INTEL_PXP_CMD_H__
+#define __INTEL_PXP_CMD_H__
+
+#include <linux/types.h>
+
+struct intel_pxp;
+
+int intel_pxp_terminate_session(struct intel_pxp *pxp, u32 idx);
+
+#endif /* __INTEL_PXP_CMD_H__ */
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c b/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c
new file mode 100644
index 000000000000..10e1e45471f1
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include <linux/debugfs.h>
+#include <drm/drm_print.h>
+
+#include "gt/intel_gt_debugfs.h"
+#include "pxp/intel_pxp.h"
+#include "pxp/intel_pxp_irq.h"
+#include "i915_drv.h"
+
+static int pxp_info_show(struct seq_file *m, void *data)
+{
+ struct intel_pxp *pxp = m->private;
+ struct drm_printer p = drm_seq_file_printer(m);
+ bool enabled = intel_pxp_is_enabled(pxp);
+
+ if (!enabled) {
+ drm_printf(&p, "pxp disabled\n");
+ return 0;
+ }
+
+ drm_printf(&p, "active: %s\n", yesno(intel_pxp_is_active(pxp)));
+ drm_printf(&p, "instance counter: %u\n", pxp->key_instance);
+
+ return 0;
+}
+DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(pxp_info);
+
+static int pxp_terminate_get(void *data, u64 *val)
+{
+ /* nothing to read */
+ return -EPERM;
+}
+
+static int pxp_terminate_set(void *data, u64 val)
+{
+ struct intel_pxp *pxp = data;
+ struct intel_gt *gt = pxp_to_gt(pxp);
+
+ if (!intel_pxp_is_active(pxp))
+ return -ENODEV;
+
+ /* simulate a termination interrupt */
+ spin_lock_irq(&gt->irq_lock);
+ intel_pxp_irq_handler(pxp, GEN12_DISPLAY_PXP_STATE_TERMINATED_INTERRUPT);
+ spin_unlock_irq(&gt->irq_lock);
+
+ if (!wait_for_completion_timeout(&pxp->termination,
+ msecs_to_jiffies(100)))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(pxp_terminate_fops, pxp_terminate_get, pxp_terminate_set, "%llx\n");
+void intel_pxp_debugfs_register(struct intel_pxp *pxp, struct dentry *gt_root)
+{
+ static const struct intel_gt_debugfs_file files[] = {
+ { "info", &pxp_info_fops, NULL },
+ { "terminate_state", &pxp_terminate_fops, NULL },
+ };
+ struct dentry *root;
+
+ if (!gt_root)
+ return;
+
+ if (!HAS_PXP((pxp_to_gt(pxp)->i915)))
+ return;
+
+ root = debugfs_create_dir("pxp", gt_root);
+ if (IS_ERR(root))
+ return;
+
+ intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), pxp);
+}
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.h b/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.h
new file mode 100644
index 000000000000..7e0c3d2f5d7e
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef __INTEL_PXP_DEBUGFS_H__
+#define __INTEL_PXP_DEBUGFS_H__
+
+struct intel_pxp;
+struct dentry;
+
+#ifdef CONFIG_DRM_I915_PXP
+void intel_pxp_debugfs_register(struct intel_pxp *pxp, struct dentry *root);
+#else
+static inline void
+intel_pxp_debugfs_register(struct intel_pxp *pxp, struct dentry *root)
+{
+}
+#endif
+
+#endif /* __INTEL_PXP_DEBUGFS_H__ */
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c b/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c
new file mode 100644
index 000000000000..8d5553772ded
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c
@@ -0,0 +1,101 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright(c) 2020 Intel Corporation.
+ */
+#include <linux/workqueue.h>
+#include "intel_pxp.h"
+#include "intel_pxp_irq.h"
+#include "intel_pxp_session.h"
+#include "gt/intel_gt_irq.h"
+#include "gt/intel_gt_types.h"
+#include "i915_irq.h"
+#include "i915_reg.h"
+#include "intel_runtime_pm.h"
+
+/**
+ * intel_pxp_irq_handler - Handles PXP interrupts.
+ * @pxp: pointer to pxp struct
+ * @iir: interrupt vector
+ */
+void intel_pxp_irq_handler(struct intel_pxp *pxp, u16 iir)
+{
+ struct intel_gt *gt = pxp_to_gt(pxp);
+
+ if (GEM_WARN_ON(!intel_pxp_is_enabled(pxp)))
+ return;
+
+ lockdep_assert_held(&gt->irq_lock);
+
+ if (unlikely(!iir))
+ return;
+
+ if (iir & (GEN12_DISPLAY_PXP_STATE_TERMINATED_INTERRUPT |
+ GEN12_DISPLAY_APP_TERMINATED_PER_FW_REQ_INTERRUPT)) {
+ /* immediately mark PXP as inactive on termination */
+ intel_pxp_mark_termination_in_progress(pxp);
+ pxp->session_events |= PXP_TERMINATION_REQUEST | PXP_INVAL_REQUIRED;
+ }
+
+ if (iir & GEN12_DISPLAY_STATE_RESET_COMPLETE_INTERRUPT)
+ pxp->session_events |= PXP_TERMINATION_COMPLETE;
+
+ if (pxp->session_events)
+ queue_work(system_unbound_wq, &pxp->session_work);
+}
+
+static inline void __pxp_set_interrupts(struct intel_gt *gt, u32 interrupts)
+{
+ struct intel_uncore *uncore = gt->uncore;
+ const u32 mask = interrupts << 16;
+
+ intel_uncore_write(uncore, GEN11_CRYPTO_RSVD_INTR_ENABLE, mask);
+ intel_uncore_write(uncore, GEN11_CRYPTO_RSVD_INTR_MASK, ~mask);
+}
+
+static inline void pxp_irq_reset(struct intel_gt *gt)
+{
+ spin_lock_irq(&gt->irq_lock);
+ gen11_gt_reset_one_iir(gt, 0, GEN11_KCR);
+ spin_unlock_irq(&gt->irq_lock);
+}
+
+void intel_pxp_irq_enable(struct intel_pxp *pxp)
+{
+ struct intel_gt *gt = pxp_to_gt(pxp);
+
+ spin_lock_irq(&gt->irq_lock);
+
+ if (!pxp->irq_enabled)
+ WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_KCR));
+
+ __pxp_set_interrupts(gt, GEN12_PXP_INTERRUPTS);
+ pxp->irq_enabled = true;
+
+ spin_unlock_irq(&gt->irq_lock);
+}
+
+void intel_pxp_irq_disable(struct intel_pxp *pxp)
+{
+ struct intel_gt *gt = pxp_to_gt(pxp);
+
+ /*
+ * We always need to submit a global termination when we re-enable the
+ * interrupts, so there is no need to make sure that the session state
+ * makes sense at the end of this function. Just make sure this is not
+ * called in a path were the driver consider the session as valid and
+ * doesn't call a termination on restart.
+ */
+ GEM_WARN_ON(intel_pxp_is_active(pxp));
+
+ spin_lock_irq(&gt->irq_lock);
+
+ pxp->irq_enabled = false;
+ __pxp_set_interrupts(gt, 0);
+
+ spin_unlock_irq(&gt->irq_lock);
+ intel_synchronize_irq(gt->i915);
+
+ pxp_irq_reset(gt);
+
+ flush_work(&pxp->session_work);
+}
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_irq.h b/drivers/gpu/drm/i915/pxp/intel_pxp_irq.h
new file mode 100644
index 000000000000..8b5793654844
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_irq.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright(c) 2020, Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INTEL_PXP_IRQ_H__
+#define __INTEL_PXP_IRQ_H__
+
+#include <linux/types.h>
+
+struct intel_pxp;
+
+#define GEN12_DISPLAY_PXP_STATE_TERMINATED_INTERRUPT BIT(1)
+#define GEN12_DISPLAY_APP_TERMINATED_PER_FW_REQ_INTERRUPT BIT(2)
+#define GEN12_DISPLAY_STATE_RESET_COMPLETE_INTERRUPT BIT(3)
+
+#define GEN12_PXP_INTERRUPTS \
+ (GEN12_DISPLAY_PXP_STATE_TERMINATED_INTERRUPT | \
+ GEN12_DISPLAY_APP_TERMINATED_PER_FW_REQ_INTERRUPT | \
+ GEN12_DISPLAY_STATE_RESET_COMPLETE_INTERRUPT)
+
+#ifdef CONFIG_DRM_I915_PXP
+void intel_pxp_irq_enable(struct intel_pxp *pxp);
+void intel_pxp_irq_disable(struct intel_pxp *pxp);
+void intel_pxp_irq_handler(struct intel_pxp *pxp, u16 iir);
+#else
+static inline void intel_pxp_irq_handler(struct intel_pxp *pxp, u16 iir)
+{
+}
+#endif
+
+#endif /* __INTEL_PXP_IRQ_H__ */
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_pm.c b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.c
new file mode 100644
index 000000000000..23fd86de5a24
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright(c) 2020 Intel Corporation.
+ */
+
+#include "intel_pxp.h"
+#include "intel_pxp_irq.h"
+#include "intel_pxp_pm.h"
+#include "intel_pxp_session.h"
+
+void intel_pxp_suspend(struct intel_pxp *pxp, bool runtime)
+{
+ if (!intel_pxp_is_enabled(pxp))
+ return;
+
+ pxp->arb_is_valid = false;
+
+ /*
+ * Contexts using protected objects keep a runtime PM reference, so we
+ * can only runtime suspend when all of them have been either closed
+ * or banned. Therefore, there is no need to invalidate in that
+ * scenario.
+ */
+ if (!runtime)
+ intel_pxp_invalidate(pxp);
+
+ intel_pxp_fini_hw(pxp);
+
+ pxp->hw_state_invalidated = false;
+}
+
+void intel_pxp_resume(struct intel_pxp *pxp)
+{
+ if (!intel_pxp_is_enabled(pxp))
+ return;
+
+ /*
+ * The PXP component gets automatically unbound when we go into S3 and
+ * re-bound after we come out, so in that scenario we can defer the
+ * hw init to the bind call.
+ */
+ if (!pxp->pxp_component)
+ return;
+
+ intel_pxp_init_hw(pxp);
+}
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_pm.h b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.h
new file mode 100644
index 000000000000..c89e97a0c3d0
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_pm.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright(c) 2020, Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INTEL_PXP_PM_H__
+#define __INTEL_PXP_PM_H__
+
+#include "intel_pxp_types.h"
+
+#ifdef CONFIG_DRM_I915_PXP
+void intel_pxp_suspend(struct intel_pxp *pxp, bool runtime);
+void intel_pxp_resume(struct intel_pxp *pxp);
+#else
+static inline void intel_pxp_suspend(struct intel_pxp *pxp, bool runtime)
+{
+}
+
+static inline void intel_pxp_resume(struct intel_pxp *pxp)
+{
+}
+#endif
+
+#endif /* __INTEL_PXP_PM_H__ */
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_session.c b/drivers/gpu/drm/i915/pxp/intel_pxp_session.c
new file mode 100644
index 000000000000..d02732f04757
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_session.c
@@ -0,0 +1,175 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright(c) 2020, Intel Corporation. All rights reserved.
+ */
+
+#include "drm/i915_drm.h"
+#include "i915_drv.h"
+
+#include "intel_pxp.h"
+#include "intel_pxp_cmd.h"
+#include "intel_pxp_session.h"
+#include "intel_pxp_tee.h"
+#include "intel_pxp_types.h"
+
+#define ARB_SESSION I915_PROTECTED_CONTENT_DEFAULT_SESSION /* shorter define */
+
+#define GEN12_KCR_SIP _MMIO(0x32260) /* KCR hwdrm session in play 0-31 */
+
+/* PXP global terminate register for session termination */
+#define PXP_GLOBAL_TERMINATE _MMIO(0x320f8)
+
+static bool intel_pxp_session_is_in_play(struct intel_pxp *pxp, u32 id)
+{
+ struct intel_uncore *uncore = pxp_to_gt(pxp)->uncore;
+ intel_wakeref_t wakeref;
+ u32 sip = 0;
+
+ /* if we're suspended the session is considered off */
+ with_intel_runtime_pm_if_in_use(uncore->rpm, wakeref)
+ sip = intel_uncore_read(uncore, GEN12_KCR_SIP);
+
+ return sip & BIT(id);
+}
+
+static int pxp_wait_for_session_state(struct intel_pxp *pxp, u32 id, bool in_play)
+{
+ struct intel_uncore *uncore = pxp_to_gt(pxp)->uncore;
+ intel_wakeref_t wakeref;
+ u32 mask = BIT(id);
+ int ret;
+
+ /* if we're suspended the session is considered off */
+ wakeref = intel_runtime_pm_get_if_in_use(uncore->rpm);
+ if (!wakeref)
+ return in_play ? -ENODEV : 0;
+
+ ret = intel_wait_for_register(uncore,
+ GEN12_KCR_SIP,
+ mask,
+ in_play ? mask : 0,
+ 100);
+
+ intel_runtime_pm_put(uncore->rpm, wakeref);
+
+ return ret;
+}
+
+static int pxp_create_arb_session(struct intel_pxp *pxp)
+{
+ struct intel_gt *gt = pxp_to_gt(pxp);
+ int ret;
+
+ pxp->arb_is_valid = false;
+
+ if (intel_pxp_session_is_in_play(pxp, ARB_SESSION)) {
+ drm_err(&gt->i915->drm, "arb session already in play at creation time\n");
+ return -EEXIST;
+ }
+
+ ret = intel_pxp_tee_cmd_create_arb_session(pxp, ARB_SESSION);
+ if (ret) {
+ drm_err(&gt->i915->drm, "tee cmd for arb session creation failed\n");
+ return ret;
+ }
+
+ ret = pxp_wait_for_session_state(pxp, ARB_SESSION, true);
+ if (ret) {
+ drm_err(&gt->i915->drm, "arb session failed to go in play\n");
+ return ret;
+ }
+
+ if (!++pxp->key_instance)
+ ++pxp->key_instance;
+
+ pxp->arb_is_valid = true;
+
+ return 0;
+}
+
+static int pxp_terminate_arb_session_and_global(struct intel_pxp *pxp)
+{
+ int ret;
+ struct intel_gt *gt = pxp_to_gt(pxp);
+
+ /* must mark termination in progress calling this function */
+ GEM_WARN_ON(pxp->arb_is_valid);
+
+ /* terminate the hw sessions */
+ ret = intel_pxp_terminate_session(pxp, ARB_SESSION);
+ if (ret) {
+ drm_err(&gt->i915->drm, "Failed to submit session termination\n");
+ return ret;
+ }
+
+ ret = pxp_wait_for_session_state(pxp, ARB_SESSION, false);
+ if (ret) {
+ drm_err(&gt->i915->drm, "Session state did not clear\n");
+ return ret;
+ }
+
+ intel_uncore_write(gt->uncore, PXP_GLOBAL_TERMINATE, 1);
+
+ return ret;
+}
+
+static void pxp_terminate(struct intel_pxp *pxp)
+{
+ int ret;
+
+ pxp->hw_state_invalidated = true;
+
+ /*
+ * if we fail to submit the termination there is no point in waiting for
+ * it to complete. PXP will be marked as non-active until the next
+ * termination is issued.
+ */
+ ret = pxp_terminate_arb_session_and_global(pxp);
+ if (ret)
+ complete_all(&pxp->termination);
+}
+
+static void pxp_terminate_complete(struct intel_pxp *pxp)
+{
+ /* Re-create the arb session after teardown handle complete */
+ if (fetch_and_zero(&pxp->hw_state_invalidated))
+ pxp_create_arb_session(pxp);
+
+ complete_all(&pxp->termination);
+}
+
+void intel_pxp_session_work(struct work_struct *work)
+{
+ struct intel_pxp *pxp = container_of(work, typeof(*pxp), session_work);
+ struct intel_gt *gt = pxp_to_gt(pxp);
+ intel_wakeref_t wakeref;
+ u32 events = 0;
+
+ spin_lock_irq(&gt->irq_lock);
+ events = fetch_and_zero(&pxp->session_events);
+ spin_unlock_irq(&gt->irq_lock);
+
+ if (!events)
+ return;
+
+ if (events & PXP_INVAL_REQUIRED)
+ intel_pxp_invalidate(pxp);
+
+ /*
+ * If we're processing an event while suspending then don't bother,
+ * we're going to re-init everything on resume anyway.
+ */
+ wakeref = intel_runtime_pm_get_if_in_use(gt->uncore->rpm);
+ if (!wakeref)
+ return;
+
+ if (events & PXP_TERMINATION_REQUEST) {
+ events &= ~PXP_TERMINATION_COMPLETE;
+ pxp_terminate(pxp);
+ }
+
+ if (events & PXP_TERMINATION_COMPLETE)
+ pxp_terminate_complete(pxp);
+
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
+}
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_session.h b/drivers/gpu/drm/i915/pxp/intel_pxp_session.h
new file mode 100644
index 000000000000..ba4c9d2b94b7
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_session.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright(c) 2020, Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INTEL_PXP_SESSION_H__
+#define __INTEL_PXP_SESSION_H__
+
+#include <linux/types.h>
+
+struct work_struct;
+
+void intel_pxp_session_work(struct work_struct *work);
+
+#endif /* __INTEL_PXP_SESSION_H__ */
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c
new file mode 100644
index 000000000000..49508f31dcb7
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c
@@ -0,0 +1,172 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright(c) 2020 Intel Corporation.
+ */
+
+#include <linux/component.h>
+#include "drm/i915_pxp_tee_interface.h"
+#include "drm/i915_component.h"
+#include "i915_drv.h"
+#include "intel_pxp.h"
+#include "intel_pxp_session.h"
+#include "intel_pxp_tee.h"
+#include "intel_pxp_tee_interface.h"
+
+static inline struct intel_pxp *i915_dev_to_pxp(struct device *i915_kdev)
+{
+ return &kdev_to_i915(i915_kdev)->gt.pxp;
+}
+
+static int intel_pxp_tee_io_message(struct intel_pxp *pxp,
+ void *msg_in, u32 msg_in_size,
+ void *msg_out, u32 msg_out_max_size,
+ u32 *msg_out_rcv_size)
+{
+ struct drm_i915_private *i915 = pxp_to_gt(pxp)->i915;
+ struct i915_pxp_component *pxp_component = pxp->pxp_component;
+ int ret = 0;
+
+ mutex_lock(&pxp->tee_mutex);
+
+ /*
+ * The binding of the component is asynchronous from i915 probe, so we
+ * can't be sure it has happened.
+ */
+ if (!pxp_component) {
+ ret = -ENODEV;
+ goto unlock;
+ }
+
+ ret = pxp_component->ops->send(pxp_component->tee_dev, msg_in, msg_in_size);
+ if (ret) {
+ drm_err(&i915->drm, "Failed to send PXP TEE message\n");
+ goto unlock;
+ }
+
+ ret = pxp_component->ops->recv(pxp_component->tee_dev, msg_out, msg_out_max_size);
+ if (ret < 0) {
+ drm_err(&i915->drm, "Failed to receive PXP TEE message\n");
+ goto unlock;
+ }
+
+ if (ret > msg_out_max_size) {
+ drm_err(&i915->drm,
+ "Failed to receive PXP TEE message due to unexpected output size\n");
+ ret = -ENOSPC;
+ goto unlock;
+ }
+
+ if (msg_out_rcv_size)
+ *msg_out_rcv_size = ret;
+
+ ret = 0;
+unlock:
+ mutex_unlock(&pxp->tee_mutex);
+ return ret;
+}
+
+/**
+ * i915_pxp_tee_component_bind - bind function to pass the function pointers to pxp_tee
+ * @i915_kdev: pointer to i915 kernel device
+ * @tee_kdev: pointer to tee kernel device
+ * @data: pointer to pxp_tee_master containing the function pointers
+ *
+ * This bind function is called during the system boot or resume from system sleep.
+ *
+ * Return: return 0 if successful.
+ */
+static int i915_pxp_tee_component_bind(struct device *i915_kdev,
+ struct device *tee_kdev, void *data)
+{
+ struct drm_i915_private *i915 = kdev_to_i915(i915_kdev);
+ struct intel_pxp *pxp = i915_dev_to_pxp(i915_kdev);
+ intel_wakeref_t wakeref;
+
+ mutex_lock(&pxp->tee_mutex);
+ pxp->pxp_component = data;
+ pxp->pxp_component->tee_dev = tee_kdev;
+ mutex_unlock(&pxp->tee_mutex);
+
+ /* if we are suspended, the HW will be re-initialized on resume */
+ wakeref = intel_runtime_pm_get_if_in_use(&i915->runtime_pm);
+ if (!wakeref)
+ return 0;
+
+ /* the component is required to fully start the PXP HW */
+ intel_pxp_init_hw(pxp);
+
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+
+ return 0;
+}
+
+static void i915_pxp_tee_component_unbind(struct device *i915_kdev,
+ struct device *tee_kdev, void *data)
+{
+ struct intel_pxp *pxp = i915_dev_to_pxp(i915_kdev);
+
+ intel_pxp_fini_hw(pxp);
+
+ mutex_lock(&pxp->tee_mutex);
+ pxp->pxp_component = NULL;
+ mutex_unlock(&pxp->tee_mutex);
+}
+
+static const struct component_ops i915_pxp_tee_component_ops = {
+ .bind = i915_pxp_tee_component_bind,
+ .unbind = i915_pxp_tee_component_unbind,
+};
+
+int intel_pxp_tee_component_init(struct intel_pxp *pxp)
+{
+ int ret;
+ struct intel_gt *gt = pxp_to_gt(pxp);
+ struct drm_i915_private *i915 = gt->i915;
+
+ ret = component_add_typed(i915->drm.dev, &i915_pxp_tee_component_ops,
+ I915_COMPONENT_PXP);
+ if (ret < 0) {
+ drm_err(&i915->drm, "Failed to add PXP component (%d)\n", ret);
+ return ret;
+ }
+
+ pxp->pxp_component_added = true;
+
+ return 0;
+}
+
+void intel_pxp_tee_component_fini(struct intel_pxp *pxp)
+{
+ struct drm_i915_private *i915 = pxp_to_gt(pxp)->i915;
+
+ if (!pxp->pxp_component_added)
+ return;
+
+ component_del(i915->drm.dev, &i915_pxp_tee_component_ops);
+ pxp->pxp_component_added = false;
+}
+
+int intel_pxp_tee_cmd_create_arb_session(struct intel_pxp *pxp,
+ int arb_session_id)
+{
+ struct drm_i915_private *i915 = pxp_to_gt(pxp)->i915;
+ struct pxp_tee_create_arb_in msg_in = {0};
+ struct pxp_tee_create_arb_out msg_out = {0};
+ int ret;
+
+ msg_in.header.api_version = PXP_TEE_APIVER;
+ msg_in.header.command_id = PXP_TEE_ARB_CMDID;
+ msg_in.header.buffer_len = sizeof(msg_in) - sizeof(msg_in.header);
+ msg_in.protection_mode = PXP_TEE_ARB_PROTECTION_MODE;
+ msg_in.session_id = arb_session_id;
+
+ ret = intel_pxp_tee_io_message(pxp,
+ &msg_in, sizeof(msg_in),
+ &msg_out, sizeof(msg_out),
+ NULL);
+
+ if (ret)
+ drm_err(&i915->drm, "Failed to send tee msg ret=[%d]\n", ret);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_tee.h b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.h
new file mode 100644
index 000000000000..c136053ce340
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright(c) 2020, Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INTEL_PXP_TEE_H__
+#define __INTEL_PXP_TEE_H__
+
+#include "intel_pxp.h"
+
+int intel_pxp_tee_component_init(struct intel_pxp *pxp);
+void intel_pxp_tee_component_fini(struct intel_pxp *pxp);
+
+int intel_pxp_tee_cmd_create_arb_session(struct intel_pxp *pxp,
+ int arb_session_id);
+
+#endif /* __INTEL_PXP_TEE_H__ */
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_tee_interface.h b/drivers/gpu/drm/i915/pxp/intel_pxp_tee_interface.h
new file mode 100644
index 000000000000..36e9b0868f5c
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_tee_interface.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright(c) 2020, Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INTEL_PXP_TEE_INTERFACE_H__
+#define __INTEL_PXP_TEE_INTERFACE_H__
+
+#include <linux/types.h>
+
+#define PXP_TEE_APIVER 0x40002
+#define PXP_TEE_ARB_CMDID 0x1e
+#define PXP_TEE_ARB_PROTECTION_MODE 0x2
+
+/* PXP TEE message header */
+struct pxp_tee_cmd_header {
+ u32 api_version;
+ u32 command_id;
+ u32 status;
+ /* Length of the message (excluding the header) */
+ u32 buffer_len;
+} __packed;
+
+/* PXP TEE message input to create a arbitrary session */
+struct pxp_tee_create_arb_in {
+ struct pxp_tee_cmd_header header;
+ u32 protection_mode;
+ u32 session_id;
+} __packed;
+
+/* PXP TEE message output to create a arbitrary session */
+struct pxp_tee_create_arb_out {
+ struct pxp_tee_cmd_header header;
+} __packed;
+
+#endif /* __INTEL_PXP_TEE_INTERFACE_H__ */
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_types.h b/drivers/gpu/drm/i915/pxp/intel_pxp_types.h
new file mode 100644
index 000000000000..73ef7d1754e1
--- /dev/null
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_types.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright(c) 2020, Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INTEL_PXP_TYPES_H__
+#define __INTEL_PXP_TYPES_H__
+
+#include <linux/completion.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+struct intel_context;
+struct i915_pxp_component;
+
+/**
+ * struct intel_pxp - pxp state
+ */
+struct intel_pxp {
+ /**
+ * @pxp_component: i915_pxp_component struct of the bound mei_pxp
+ * module. Only set and cleared inside component bind/unbind functions,
+ * which are protected by &tee_mutex.
+ */
+ struct i915_pxp_component *pxp_component;
+ /**
+ * @pxp_component_added: track if the pxp component has been added.
+ * Set and cleared in tee init and fini functions respectively.
+ */
+ bool pxp_component_added;
+
+ /** @ce: kernel-owned context used for PXP operations */
+ struct intel_context *ce;
+
+ /** @arb_mutex: protects arb session start */
+ struct mutex arb_mutex;
+ /**
+ * @arb_is_valid: tracks arb session status.
+ * After a teardown, the arb session can still be in play on the HW
+ * even if the keys are gone, so we can't rely on the HW state of the
+ * session to know if it's valid and need to track the status in SW.
+ */
+ bool arb_is_valid;
+
+ /**
+ * @key_instance: tracks which key instance we're on, so we can use it
+ * to determine if an object was created using the current key or a
+ * previous one.
+ */
+ u32 key_instance;
+
+ /** @tee_mutex: protects the tee channel binding and messaging. */
+ struct mutex tee_mutex;
+
+ /**
+ * @hw_state_invalidated: if the HW perceives an attack on the integrity
+ * of the encryption it will invalidate the keys and expect SW to
+ * re-initialize the session. We keep track of this state to make sure
+ * we only re-start the arb session when required.
+ */
+ bool hw_state_invalidated;
+
+ /** @irq_enabled: tracks the status of the kcr irqs */
+ bool irq_enabled;
+ /**
+ * @termination: tracks the status of a pending termination. Only
+ * re-initialized under gt->irq_lock and completed in &session_work.
+ */
+ struct completion termination;
+
+ /** @session_work: worker that manages session events. */
+ struct work_struct session_work;
+ /** @session_events: pending session events, protected with gt->irq_lock. */
+ u32 session_events;
+#define PXP_TERMINATION_REQUEST BIT(0)
+#define PXP_TERMINATION_COMPLETE BIT(1)
+#define PXP_INVAL_REQUIRED BIT(2)
+};
+
+#endif /* __INTEL_PXP_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
index f843a5040706..46f4236039a9 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
@@ -155,7 +155,7 @@ static int igt_ppgtt_alloc(void *arg)
if (!HAS_PPGTT(dev_priv))
return 0;
- ppgtt = i915_ppgtt_create(&dev_priv->gt);
+ ppgtt = i915_ppgtt_create(&dev_priv->gt, 0);
if (IS_ERR(ppgtt))
return PTR_ERR(ppgtt);
@@ -1053,7 +1053,7 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv,
if (IS_ERR(file))
return PTR_ERR(file);
- ppgtt = i915_ppgtt_create(&dev_priv->gt);
+ ppgtt = i915_ppgtt_create(&dev_priv->gt, 0);
if (IS_ERR(ppgtt)) {
err = PTR_ERR(ppgtt);
goto out_free;
@@ -1300,7 +1300,7 @@ static int exercise_mock(struct drm_i915_private *i915,
if (!ctx)
return -ENOMEM;
- vm = i915_gem_context_get_vm_rcu(ctx);
+ vm = i915_gem_context_get_eb_vm(ctx);
err = func(vm, 0, min(vm->total, limit), end_time);
i915_vm_put(vm);
@@ -1848,7 +1848,7 @@ static int igt_cs_tlb(void *arg)
goto out_unlock;
}
- vm = i915_gem_context_get_vm_rcu(ctx);
+ vm = i915_gem_context_get_eb_vm(ctx);
if (i915_is_ggtt(vm))
goto out_vm;
diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
index cfa5c4165a4f..bdd290f2bf3c 100644
--- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
@@ -47,5 +47,7 @@ selftest(execlists, intel_execlists_live_selftests)
selftest(ring_submission, intel_ring_submission_live_selftests)
selftest(perf, i915_perf_live_selftests)
selftest(slpc, intel_slpc_live_selftests)
+selftest(guc, intel_guc_live_selftests)
+selftest(guc_multi_lrc, intel_guc_multi_lrc_live_selftests)
/* Here be dragons: keep last to run last! */
selftest(late_gt_pm, intel_gt_pm_late_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c
index dd0607254a95..1f10fe36619b 100644
--- a/drivers/gpu/drm/i915/selftests/i915_vma.c
+++ b/drivers/gpu/drm/i915/selftests/i915_vma.c
@@ -39,7 +39,7 @@ static bool assert_vma(struct i915_vma *vma,
{
bool ok = true;
- if (vma->vm != rcu_access_pointer(ctx->vm)) {
+ if (vma->vm != ctx->vm) {
pr_err("VMA created with wrong VM\n");
ok = false;
}
@@ -118,7 +118,7 @@ static int create_vmas(struct drm_i915_private *i915,
struct i915_vma *vma;
int err;
- vm = i915_gem_context_get_vm_rcu(ctx);
+ vm = i915_gem_context_get_eb_vm(ctx);
vma = checked_vma_instance(obj, vm, NULL);
i915_vm_put(vm);
if (IS_ERR(vma))
diff --git a/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c b/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c
index 4b328346b48a..310fb83c527e 100644
--- a/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c
+++ b/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.c
@@ -14,6 +14,18 @@
#define REDUCED_PREEMPT 10
#define WAIT_FOR_RESET_TIME 10000
+struct intel_engine_cs *intel_selftest_find_any_engine(struct intel_gt *gt)
+{
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ for_each_engine(engine, gt, id)
+ return engine;
+
+ pr_err("No valid engine found!\n");
+ return NULL;
+}
+
int intel_selftest_modify_policy(struct intel_engine_cs *engine,
struct intel_selftest_saved_policy *saved,
u32 modify_type)
diff --git a/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.h b/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.h
index 35c098601ac0..ae60bb507f45 100644
--- a/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.h
+++ b/drivers/gpu/drm/i915/selftests/intel_scheduler_helpers.h
@@ -10,6 +10,7 @@
struct i915_request;
struct intel_engine_cs;
+struct intel_gt;
struct intel_selftest_saved_policy {
u32 flags;
@@ -23,6 +24,7 @@ enum selftest_scheduler_modify {
SELFTEST_SCHEDULER_MODIFY_FAST_RESET,
};
+struct intel_engine_cs *intel_selftest_find_any_engine(struct intel_gt *gt);
int intel_selftest_modify_policy(struct intel_engine_cs *engine,
struct intel_selftest_saved_policy *saved,
enum selftest_scheduler_modify modify_type);
diff --git a/drivers/gpu/drm/i915/selftests/intel_uncore.c b/drivers/gpu/drm/i915/selftests/intel_uncore.c
index 720b60853f8b..bc8128170a99 100644
--- a/drivers/gpu/drm/i915/selftests/intel_uncore.c
+++ b/drivers/gpu/drm/i915/selftests/intel_uncore.c
@@ -62,30 +62,40 @@ static int intel_fw_table_check(const struct intel_forcewake_range *ranges,
static int intel_shadow_table_check(void)
{
struct {
- const i915_reg_t *regs;
+ const struct i915_range *regs;
unsigned int size;
- } reg_lists[] = {
+ } range_lists[] = {
{ gen8_shadowed_regs, ARRAY_SIZE(gen8_shadowed_regs) },
{ gen11_shadowed_regs, ARRAY_SIZE(gen11_shadowed_regs) },
{ gen12_shadowed_regs, ARRAY_SIZE(gen12_shadowed_regs) },
- { xehp_shadowed_regs, ARRAY_SIZE(xehp_shadowed_regs) },
+ { dg2_shadowed_regs, ARRAY_SIZE(dg2_shadowed_regs) },
};
- const i915_reg_t *reg;
+ const struct i915_range *range;
unsigned int i, j;
s32 prev;
- for (j = 0; j < ARRAY_SIZE(reg_lists); ++j) {
- reg = reg_lists[j].regs;
- for (i = 0, prev = -1; i < reg_lists[j].size; i++, reg++) {
- u32 offset = i915_mmio_reg_offset(*reg);
+ for (j = 0; j < ARRAY_SIZE(range_lists); ++j) {
+ range = range_lists[j].regs;
+ for (i = 0, prev = -1; i < range_lists[j].size; i++, range++) {
+ if (range->end < range->start) {
+ pr_err("%s: range[%d]:(%06x-%06x) has end before start\n",
+ __func__, i, range->start, range->end);
+ return -EINVAL;
+ }
+
+ if (prev >= (s32)range->start) {
+ pr_err("%s: range[%d]:(%06x-%06x) is before end of previous (%06x)\n",
+ __func__, i, range->start, range->end, prev);
+ return -EINVAL;
+ }
- if (prev >= (s32)offset) {
- pr_err("%s: entry[%d]:(%x) is before previous (%x)\n",
- __func__, i, offset, prev);
+ if (range->start % 4) {
+ pr_err("%s: range[%d]:(%06x-%06x) has non-dword-aligned start\n",
+ __func__, i, range->start, range->end);
return -EINVAL;
}
- prev = offset;
+ prev = range->end;
}
}
diff --git a/drivers/gpu/drm/i915/selftests/mock_region.c b/drivers/gpu/drm/i915/selftests/mock_region.c
index efa86dffe3c6..75793008c4ef 100644
--- a/drivers/gpu/drm/i915/selftests/mock_region.c
+++ b/drivers/gpu/drm/i915/selftests/mock_region.c
@@ -6,8 +6,6 @@
#include <drm/ttm/ttm_placement.h>
#include <linux/scatterlist.h>
-#include <drm/ttm/ttm_placement.h>
-
#include "gem/i915_gem_region.h"
#include "intel_memory_region.h"
#include "intel_region_ttm.h"
diff --git a/drivers/gpu/drm/i915/vlv_sideband.c b/drivers/gpu/drm/i915/vlv_sideband.c
new file mode 100644
index 000000000000..35380738a951
--- /dev/null
+++ b/drivers/gpu/drm/i915/vlv_sideband.c
@@ -0,0 +1,266 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2013-2021 Intel Corporation
+ */
+
+#include <asm/iosf_mbi.h>
+
+#include "i915_drv.h"
+#include "vlv_sideband.h"
+
+/*
+ * IOSF sideband, see VLV2_SidebandMsg_HAS.docx and
+ * VLV_VLV2_PUNIT_HAS_0.8.docx
+ */
+
+/* Standard MMIO read, non-posted */
+#define SB_MRD_NP 0x00
+/* Standard MMIO write, non-posted */
+#define SB_MWR_NP 0x01
+/* Private register read, double-word addressing, non-posted */
+#define SB_CRRDDA_NP 0x06
+/* Private register write, double-word addressing, non-posted */
+#define SB_CRWRDA_NP 0x07
+
+static void ping(void *info)
+{
+}
+
+static void __vlv_punit_get(struct drm_i915_private *i915)
+{
+ iosf_mbi_punit_acquire();
+
+ /*
+ * Prevent the cpu from sleeping while we use this sideband, otherwise
+ * the punit may cause a machine hang. The issue appears to be isolated
+ * with changing the power state of the CPU package while changing
+ * the power state via the punit, and we have only observed it
+ * reliably on 4-core Baytail systems suggesting the issue is in the
+ * power delivery mechanism and likely to be board/function
+ * specific. Hence we presume the workaround needs only be applied
+ * to the Valleyview P-unit and not all sideband communications.
+ */
+ if (IS_VALLEYVIEW(i915)) {
+ cpu_latency_qos_update_request(&i915->sb_qos, 0);
+ on_each_cpu(ping, NULL, 1);
+ }
+}
+
+static void __vlv_punit_put(struct drm_i915_private *i915)
+{
+ if (IS_VALLEYVIEW(i915))
+ cpu_latency_qos_update_request(&i915->sb_qos,
+ PM_QOS_DEFAULT_VALUE);
+
+ iosf_mbi_punit_release();
+}
+
+void vlv_iosf_sb_get(struct drm_i915_private *i915, unsigned long ports)
+{
+ if (ports & BIT(VLV_IOSF_SB_PUNIT))
+ __vlv_punit_get(i915);
+
+ mutex_lock(&i915->sb_lock);
+}
+
+void vlv_iosf_sb_put(struct drm_i915_private *i915, unsigned long ports)
+{
+ mutex_unlock(&i915->sb_lock);
+
+ if (ports & BIT(VLV_IOSF_SB_PUNIT))
+ __vlv_punit_put(i915);
+}
+
+static int vlv_sideband_rw(struct drm_i915_private *i915,
+ u32 devfn, u32 port, u32 opcode,
+ u32 addr, u32 *val)
+{
+ struct intel_uncore *uncore = &i915->uncore;
+ const bool is_read = (opcode == SB_MRD_NP || opcode == SB_CRRDDA_NP);
+ int err;
+
+ lockdep_assert_held(&i915->sb_lock);
+ if (port == IOSF_PORT_PUNIT)
+ iosf_mbi_assert_punit_acquired();
+
+ /* Flush the previous comms, just in case it failed last time. */
+ if (intel_wait_for_register(uncore,
+ VLV_IOSF_DOORBELL_REQ, IOSF_SB_BUSY, 0,
+ 5)) {
+ drm_dbg(&i915->drm, "IOSF sideband idle wait (%s) timed out\n",
+ is_read ? "read" : "write");
+ return -EAGAIN;
+ }
+
+ preempt_disable();
+
+ intel_uncore_write_fw(uncore, VLV_IOSF_ADDR, addr);
+ intel_uncore_write_fw(uncore, VLV_IOSF_DATA, is_read ? 0 : *val);
+ intel_uncore_write_fw(uncore, VLV_IOSF_DOORBELL_REQ,
+ (devfn << IOSF_DEVFN_SHIFT) |
+ (opcode << IOSF_OPCODE_SHIFT) |
+ (port << IOSF_PORT_SHIFT) |
+ (0xf << IOSF_BYTE_ENABLES_SHIFT) |
+ (0 << IOSF_BAR_SHIFT) |
+ IOSF_SB_BUSY);
+
+ if (__intel_wait_for_register_fw(uncore,
+ VLV_IOSF_DOORBELL_REQ, IOSF_SB_BUSY, 0,
+ 10000, 0, NULL) == 0) {
+ if (is_read)
+ *val = intel_uncore_read_fw(uncore, VLV_IOSF_DATA);
+ err = 0;
+ } else {
+ drm_dbg(&i915->drm, "IOSF sideband finish wait (%s) timed out\n",
+ is_read ? "read" : "write");
+ err = -ETIMEDOUT;
+ }
+
+ preempt_enable();
+
+ return err;
+}
+
+u32 vlv_punit_read(struct drm_i915_private *i915, u32 addr)
+{
+ u32 val = 0;
+
+ vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT,
+ SB_CRRDDA_NP, addr, &val);
+
+ return val;
+}
+
+int vlv_punit_write(struct drm_i915_private *i915, u32 addr, u32 val)
+{
+ return vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT,
+ SB_CRWRDA_NP, addr, &val);
+}
+
+u32 vlv_bunit_read(struct drm_i915_private *i915, u32 reg)
+{
+ u32 val = 0;
+
+ vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_BUNIT,
+ SB_CRRDDA_NP, reg, &val);
+
+ return val;
+}
+
+void vlv_bunit_write(struct drm_i915_private *i915, u32 reg, u32 val)
+{
+ vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_BUNIT,
+ SB_CRWRDA_NP, reg, &val);
+}
+
+u32 vlv_nc_read(struct drm_i915_private *i915, u8 addr)
+{
+ u32 val = 0;
+
+ vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_NC,
+ SB_CRRDDA_NP, addr, &val);
+
+ return val;
+}
+
+u32 vlv_iosf_sb_read(struct drm_i915_private *i915, u8 port, u32 reg)
+{
+ u32 val = 0;
+
+ vlv_sideband_rw(i915, PCI_DEVFN(0, 0), port,
+ SB_CRRDDA_NP, reg, &val);
+
+ return val;
+}
+
+void vlv_iosf_sb_write(struct drm_i915_private *i915,
+ u8 port, u32 reg, u32 val)
+{
+ vlv_sideband_rw(i915, PCI_DEVFN(0, 0), port,
+ SB_CRWRDA_NP, reg, &val);
+}
+
+u32 vlv_cck_read(struct drm_i915_private *i915, u32 reg)
+{
+ u32 val = 0;
+
+ vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_CCK,
+ SB_CRRDDA_NP, reg, &val);
+
+ return val;
+}
+
+void vlv_cck_write(struct drm_i915_private *i915, u32 reg, u32 val)
+{
+ vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_CCK,
+ SB_CRWRDA_NP, reg, &val);
+}
+
+u32 vlv_ccu_read(struct drm_i915_private *i915, u32 reg)
+{
+ u32 val = 0;
+
+ vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_CCU,
+ SB_CRRDDA_NP, reg, &val);
+
+ return val;
+}
+
+void vlv_ccu_write(struct drm_i915_private *i915, u32 reg, u32 val)
+{
+ vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_CCU,
+ SB_CRWRDA_NP, reg, &val);
+}
+
+static u32 vlv_dpio_phy_iosf_port(struct drm_i915_private *i915, enum dpio_phy phy)
+{
+ /*
+ * IOSF_PORT_DPIO: VLV x2 PHY (DP/HDMI B and C), CHV x1 PHY (DP/HDMI D)
+ * IOSF_PORT_DPIO_2: CHV x2 PHY (DP/HDMI B and C)
+ */
+ if (IS_CHERRYVIEW(i915))
+ return phy == DPIO_PHY0 ? IOSF_PORT_DPIO_2 : IOSF_PORT_DPIO;
+ else
+ return IOSF_PORT_DPIO;
+}
+
+u32 vlv_dpio_read(struct drm_i915_private *i915, enum pipe pipe, int reg)
+{
+ u32 port = vlv_dpio_phy_iosf_port(i915, DPIO_PHY(pipe));
+ u32 val = 0;
+
+ vlv_sideband_rw(i915, DPIO_DEVFN, port, SB_MRD_NP, reg, &val);
+
+ /*
+ * FIXME: There might be some registers where all 1's is a valid value,
+ * so ideally we should check the register offset instead...
+ */
+ drm_WARN(&i915->drm, val == 0xffffffff,
+ "DPIO read pipe %c reg 0x%x == 0x%x\n",
+ pipe_name(pipe), reg, val);
+
+ return val;
+}
+
+void vlv_dpio_write(struct drm_i915_private *i915,
+ enum pipe pipe, int reg, u32 val)
+{
+ u32 port = vlv_dpio_phy_iosf_port(i915, DPIO_PHY(pipe));
+
+ vlv_sideband_rw(i915, DPIO_DEVFN, port, SB_MWR_NP, reg, &val);
+}
+
+u32 vlv_flisdsi_read(struct drm_i915_private *i915, u32 reg)
+{
+ u32 val = 0;
+
+ vlv_sideband_rw(i915, DPIO_DEVFN, IOSF_PORT_FLISDSI, SB_CRRDDA_NP,
+ reg, &val);
+ return val;
+}
+
+void vlv_flisdsi_write(struct drm_i915_private *i915, u32 reg, u32 val)
+{
+ vlv_sideband_rw(i915, DPIO_DEVFN, IOSF_PORT_FLISDSI, SB_CRWRDA_NP,
+ reg, &val);
+}
diff --git a/drivers/gpu/drm/i915/intel_sideband.h b/drivers/gpu/drm/i915/vlv_sideband.h
index d1d14bcb8f56..d7732f612e7f 100644
--- a/drivers/gpu/drm/i915/intel_sideband.h
+++ b/drivers/gpu/drm/i915/vlv_sideband.h
@@ -1,18 +1,16 @@
/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2013-2021 Intel Corporation
+ */
-#ifndef _INTEL_SIDEBAND_H_
-#define _INTEL_SIDEBAND_H_
+#ifndef _VLV_SIDEBAND_H_
+#define _VLV_SIDEBAND_H_
#include <linux/bitops.h>
#include <linux/types.h>
-struct drm_i915_private;
enum pipe;
-
-enum intel_sbi_destination {
- SBI_ICLK,
- SBI_MPHY,
-};
+struct drm_i915_private;
enum {
VLV_IOSF_SB_BUNIT,
@@ -122,22 +120,4 @@ static inline void vlv_punit_put(struct drm_i915_private *i915)
vlv_iosf_sb_put(i915, BIT(VLV_IOSF_SB_PUNIT));
}
-u32 intel_sbi_read(struct drm_i915_private *i915, u16 reg,
- enum intel_sbi_destination destination);
-void intel_sbi_write(struct drm_i915_private *i915, u16 reg, u32 value,
- enum intel_sbi_destination destination);
-
-int sandybridge_pcode_read(struct drm_i915_private *i915, u32 mbox,
- u32 *val, u32 *val1);
-int sandybridge_pcode_write_timeout(struct drm_i915_private *i915, u32 mbox,
- u32 val, int fast_timeout_us,
- int slow_timeout_ms);
-#define sandybridge_pcode_write(i915, mbox, val) \
- sandybridge_pcode_write_timeout(i915, mbox, val, 500, 0)
-
-int skl_pcode_request(struct drm_i915_private *i915, u32 mbox, u32 request,
- u32 reply_mask, u32 reply, int timeout_base_ms);
-
-int intel_pcode_init(struct drm_i915_private *i915);
-
-#endif /* _INTEL_SIDEBAND_H */
+#endif /* _VLV_SIDEBAND_H_ */
diff --git a/drivers/gpu/drm/kmb/kmb_crtc.c b/drivers/gpu/drm/kmb/kmb_crtc.c
index 44327bc629ca..06613ffeaaf8 100644
--- a/drivers/gpu/drm/kmb/kmb_crtc.c
+++ b/drivers/gpu/drm/kmb/kmb_crtc.c
@@ -66,7 +66,8 @@ static const struct drm_crtc_funcs kmb_crtc_funcs = {
.disable_vblank = kmb_crtc_disable_vblank,
};
-static void kmb_crtc_set_mode(struct drm_crtc *crtc)
+static void kmb_crtc_set_mode(struct drm_crtc *crtc,
+ struct drm_atomic_state *old_state)
{
struct drm_device *dev = crtc->dev;
struct drm_display_mode *m = &crtc->state->adjusted_mode;
@@ -75,7 +76,7 @@ static void kmb_crtc_set_mode(struct drm_crtc *crtc)
unsigned int val = 0;
/* Initialize mipi */
- kmb_dsi_mode_set(kmb->kmb_dsi, m, kmb->sys_clk_mhz);
+ kmb_dsi_mode_set(kmb->kmb_dsi, m, kmb->sys_clk_mhz, old_state);
drm_info(dev,
"vfp= %d vbp= %d vsync_len=%d hfp=%d hbp=%d hsync_len=%d\n",
m->crtc_vsync_start - m->crtc_vdisplay,
@@ -138,7 +139,7 @@ static void kmb_crtc_atomic_enable(struct drm_crtc *crtc,
struct kmb_drm_private *kmb = crtc_to_kmb_priv(crtc);
clk_prepare_enable(kmb->kmb_clk.clk_lcd);
- kmb_crtc_set_mode(crtc);
+ kmb_crtc_set_mode(crtc, state);
drm_crtc_vblank_on(crtc);
}
@@ -185,11 +186,45 @@ static void kmb_crtc_atomic_flush(struct drm_crtc *crtc,
spin_unlock_irq(&crtc->dev->event_lock);
}
+static enum drm_mode_status
+ kmb_crtc_mode_valid(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode)
+{
+ int refresh;
+ struct drm_device *dev = crtc->dev;
+ int vfp = mode->vsync_start - mode->vdisplay;
+
+ if (mode->vdisplay < KMB_CRTC_MAX_HEIGHT) {
+ drm_dbg(dev, "height = %d less than %d",
+ mode->vdisplay, KMB_CRTC_MAX_HEIGHT);
+ return MODE_BAD_VVALUE;
+ }
+ if (mode->hdisplay < KMB_CRTC_MAX_WIDTH) {
+ drm_dbg(dev, "width = %d less than %d",
+ mode->hdisplay, KMB_CRTC_MAX_WIDTH);
+ return MODE_BAD_HVALUE;
+ }
+ refresh = drm_mode_vrefresh(mode);
+ if (refresh < KMB_MIN_VREFRESH || refresh > KMB_MAX_VREFRESH) {
+ drm_dbg(dev, "refresh = %d less than %d or greater than %d",
+ refresh, KMB_MIN_VREFRESH, KMB_MAX_VREFRESH);
+ return MODE_BAD;
+ }
+
+ if (vfp < KMB_CRTC_MIN_VFP) {
+ drm_dbg(dev, "vfp = %d less than %d", vfp, KMB_CRTC_MIN_VFP);
+ return MODE_BAD;
+ }
+
+ return MODE_OK;
+}
+
static const struct drm_crtc_helper_funcs kmb_crtc_helper_funcs = {
.atomic_begin = kmb_crtc_atomic_begin,
.atomic_enable = kmb_crtc_atomic_enable,
.atomic_disable = kmb_crtc_atomic_disable,
.atomic_flush = kmb_crtc_atomic_flush,
+ .mode_valid = kmb_crtc_mode_valid,
};
int kmb_setup_crtc(struct drm_device *drm)
diff --git a/drivers/gpu/drm/kmb/kmb_drv.c b/drivers/gpu/drm/kmb/kmb_drv.c
index 12ce669650cc..961ac6fb5fcf 100644
--- a/drivers/gpu/drm/kmb/kmb_drv.c
+++ b/drivers/gpu/drm/kmb/kmb_drv.c
@@ -380,7 +380,7 @@ static irqreturn_t handle_lcd_irq(struct drm_device *dev)
if (val & LAYER3_DMA_FIFO_UNDERFLOW)
drm_dbg(&kmb->drm,
"LAYER3:GL1 DMA UNDERFLOW val = 0x%lx", val);
- if (val & LAYER3_DMA_FIFO_UNDERFLOW)
+ if (val & LAYER3_DMA_FIFO_OVERFLOW)
drm_dbg(&kmb->drm,
"LAYER3:GL1 DMA OVERFLOW val = 0x%lx", val);
}
diff --git a/drivers/gpu/drm/kmb/kmb_drv.h b/drivers/gpu/drm/kmb/kmb_drv.h
index 69a62e2d03ff..bf085e95b28f 100644
--- a/drivers/gpu/drm/kmb/kmb_drv.h
+++ b/drivers/gpu/drm/kmb/kmb_drv.h
@@ -20,11 +20,18 @@
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 1
+/* Platform definitions */
+#define KMB_CRTC_MIN_VFP 4
+#define KMB_CRTC_MAX_WIDTH 1920 /* max width in pixels */
+#define KMB_CRTC_MAX_HEIGHT 1080 /* max height in pixels */
+#define KMB_CRTC_MIN_WIDTH 1920
+#define KMB_CRTC_MIN_HEIGHT 1080
#define KMB_FB_MAX_WIDTH 1920
#define KMB_FB_MAX_HEIGHT 1080
#define KMB_FB_MIN_WIDTH 1
#define KMB_FB_MIN_HEIGHT 1
-
+#define KMB_MIN_VREFRESH 59 /*vertical refresh in Hz */
+#define KMB_MAX_VREFRESH 60 /*vertical refresh in Hz */
#define KMB_LCD_DEFAULT_CLK 200000000
#define KMB_SYS_CLK_MHZ 500
@@ -50,6 +57,7 @@ struct kmb_drm_private {
spinlock_t irq_lock;
int irq_lcd;
int sys_clk_mhz;
+ struct disp_cfg init_disp_cfg[KMB_MAX_PLANES];
struct layer_status plane_status[KMB_MAX_PLANES];
int kmb_under_flow;
int kmb_flush_done;
diff --git a/drivers/gpu/drm/kmb/kmb_dsi.c b/drivers/gpu/drm/kmb/kmb_dsi.c
index 1793cd31b117..f6071882054c 100644
--- a/drivers/gpu/drm/kmb/kmb_dsi.c
+++ b/drivers/gpu/drm/kmb/kmb_dsi.c
@@ -482,6 +482,10 @@ static u32 mipi_tx_fg_section_cfg(struct kmb_dsi *kmb_dsi,
return 0;
}
+#define CLK_DIFF_LOW 50
+#define CLK_DIFF_HI 60
+#define SYSCLK_500 500
+
static void mipi_tx_fg_cfg_regs(struct kmb_dsi *kmb_dsi, u8 frame_gen,
struct mipi_tx_frame_timing_cfg *fg_cfg)
{
@@ -492,7 +496,12 @@ static void mipi_tx_fg_cfg_regs(struct kmb_dsi *kmb_dsi, u8 frame_gen,
/* 500 Mhz system clock minus 50 to account for the difference in
* MIPI clock speed in RTL tests
*/
- sysclk = kmb_dsi->sys_clk_mhz - 50;
+ if (kmb_dsi->sys_clk_mhz == SYSCLK_500) {
+ sysclk = kmb_dsi->sys_clk_mhz - CLK_DIFF_LOW;
+ } else {
+ /* 700 Mhz clk*/
+ sysclk = kmb_dsi->sys_clk_mhz - CLK_DIFF_HI;
+ }
/* PPL-Pixel Packing Layer, LLP-Low Level Protocol
* Frame genartor timing parameters are clocked on the system clock,
@@ -1322,7 +1331,8 @@ static u32 mipi_tx_init_dphy(struct kmb_dsi *kmb_dsi,
return 0;
}
-static void connect_lcd_to_mipi(struct kmb_dsi *kmb_dsi)
+static void connect_lcd_to_mipi(struct kmb_dsi *kmb_dsi,
+ struct drm_atomic_state *old_state)
{
struct regmap *msscam;
@@ -1331,7 +1341,7 @@ static void connect_lcd_to_mipi(struct kmb_dsi *kmb_dsi)
dev_dbg(kmb_dsi->dev, "failed to get msscam syscon");
return;
}
-
+ drm_atomic_bridge_chain_enable(adv_bridge, old_state);
/* DISABLE MIPI->CIF CONNECTION */
regmap_write(msscam, MSS_MIPI_CIF_CFG, 0);
@@ -1342,7 +1352,7 @@ static void connect_lcd_to_mipi(struct kmb_dsi *kmb_dsi)
}
int kmb_dsi_mode_set(struct kmb_dsi *kmb_dsi, struct drm_display_mode *mode,
- int sys_clk_mhz)
+ int sys_clk_mhz, struct drm_atomic_state *old_state)
{
u64 data_rate;
@@ -1384,18 +1394,13 @@ int kmb_dsi_mode_set(struct kmb_dsi *kmb_dsi, struct drm_display_mode *mode,
mipi_tx_init_cfg.lane_rate_mbps = data_rate;
}
- kmb_write_mipi(kmb_dsi, DPHY_ENABLE, 0);
- kmb_write_mipi(kmb_dsi, DPHY_INIT_CTRL0, 0);
- kmb_write_mipi(kmb_dsi, DPHY_INIT_CTRL1, 0);
- kmb_write_mipi(kmb_dsi, DPHY_INIT_CTRL2, 0);
-
/* Initialize mipi controller */
mipi_tx_init_cntrl(kmb_dsi, &mipi_tx_init_cfg);
/* Dphy initialization */
mipi_tx_init_dphy(kmb_dsi, &mipi_tx_init_cfg);
- connect_lcd_to_mipi(kmb_dsi);
+ connect_lcd_to_mipi(kmb_dsi, old_state);
dev_info(kmb_dsi->dev, "mipi hw initialized");
return 0;
diff --git a/drivers/gpu/drm/kmb/kmb_dsi.h b/drivers/gpu/drm/kmb/kmb_dsi.h
index 66b7c500d9bc..09dc88743d77 100644
--- a/drivers/gpu/drm/kmb/kmb_dsi.h
+++ b/drivers/gpu/drm/kmb/kmb_dsi.h
@@ -380,7 +380,7 @@ int kmb_dsi_host_bridge_init(struct device *dev);
struct kmb_dsi *kmb_dsi_init(struct platform_device *pdev);
void kmb_dsi_host_unregister(struct kmb_dsi *kmb_dsi);
int kmb_dsi_mode_set(struct kmb_dsi *kmb_dsi, struct drm_display_mode *mode,
- int sys_clk_mhz);
+ int sys_clk_mhz, struct drm_atomic_state *old_state);
int kmb_dsi_map_mmio(struct kmb_dsi *kmb_dsi);
int kmb_dsi_clk_init(struct kmb_dsi *kmb_dsi);
int kmb_dsi_encoder_init(struct drm_device *dev, struct kmb_dsi *kmb_dsi);
diff --git a/drivers/gpu/drm/kmb/kmb_plane.c b/drivers/gpu/drm/kmb/kmb_plane.c
index 06b0c42c9e91..00404ba4126d 100644
--- a/drivers/gpu/drm/kmb/kmb_plane.c
+++ b/drivers/gpu/drm/kmb/kmb_plane.c
@@ -67,8 +67,21 @@ static const u32 kmb_formats_v[] = {
static unsigned int check_pixel_format(struct drm_plane *plane, u32 format)
{
+ struct kmb_drm_private *kmb;
+ struct kmb_plane *kmb_plane = to_kmb_plane(plane);
int i;
+ int plane_id = kmb_plane->id;
+ struct disp_cfg init_disp_cfg;
+ kmb = to_kmb(plane->dev);
+ init_disp_cfg = kmb->init_disp_cfg[plane_id];
+ /* Due to HW limitations, changing pixel format after initial
+ * plane configuration is not supported.
+ */
+ if (init_disp_cfg.format && init_disp_cfg.format != format) {
+ drm_dbg(&kmb->drm, "Cannot change format after initial plane configuration");
+ return -EINVAL;
+ }
for (i = 0; i < plane->format_count; i++) {
if (plane->format_types[i] == format)
return 0;
@@ -81,11 +94,17 @@ static int kmb_plane_atomic_check(struct drm_plane *plane,
{
struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
plane);
+ struct kmb_drm_private *kmb;
+ struct kmb_plane *kmb_plane = to_kmb_plane(plane);
+ int plane_id = kmb_plane->id;
+ struct disp_cfg init_disp_cfg;
struct drm_framebuffer *fb;
int ret;
struct drm_crtc_state *crtc_state;
bool can_position;
+ kmb = to_kmb(plane->dev);
+ init_disp_cfg = kmb->init_disp_cfg[plane_id];
fb = new_plane_state->fb;
if (!fb || !new_plane_state->crtc)
return 0;
@@ -99,6 +118,16 @@ static int kmb_plane_atomic_check(struct drm_plane *plane,
new_plane_state->crtc_w < KMB_FB_MIN_WIDTH ||
new_plane_state->crtc_h < KMB_FB_MIN_HEIGHT)
return -EINVAL;
+
+ /* Due to HW limitations, changing plane height or width after
+ * initial plane configuration is not supported.
+ */
+ if ((init_disp_cfg.width && init_disp_cfg.height) &&
+ (init_disp_cfg.width != fb->width ||
+ init_disp_cfg.height != fb->height)) {
+ drm_dbg(&kmb->drm, "Cannot change plane height or width after initial configuration");
+ return -EINVAL;
+ }
can_position = (plane->type == DRM_PLANE_TYPE_OVERLAY);
crtc_state =
drm_atomic_get_existing_crtc_state(state,
@@ -335,6 +364,7 @@ static void kmb_plane_atomic_update(struct drm_plane *plane,
unsigned char plane_id;
int num_planes;
static dma_addr_t addr[MAX_SUB_PLANES];
+ struct disp_cfg *init_disp_cfg;
if (!plane || !new_plane_state || !old_plane_state)
return;
@@ -357,7 +387,8 @@ static void kmb_plane_atomic_update(struct drm_plane *plane,
}
spin_unlock_irq(&kmb->irq_lock);
- src_w = (new_plane_state->src_w >> 16);
+ init_disp_cfg = &kmb->init_disp_cfg[plane_id];
+ src_w = new_plane_state->src_w >> 16;
src_h = new_plane_state->src_h >> 16;
crtc_x = new_plane_state->crtc_x;
crtc_y = new_plane_state->crtc_y;
@@ -500,6 +531,16 @@ static void kmb_plane_atomic_update(struct drm_plane *plane,
/* Enable DMA */
kmb_write_lcd(kmb, LCD_LAYERn_DMA_CFG(plane_id), dma_cfg);
+
+ /* Save initial display config */
+ if (!init_disp_cfg->width ||
+ !init_disp_cfg->height ||
+ !init_disp_cfg->format) {
+ init_disp_cfg->width = width;
+ init_disp_cfg->height = height;
+ init_disp_cfg->format = fb->format->format;
+ }
+
drm_dbg(&kmb->drm, "dma_cfg=0x%x LCD_DMA_CFG=0x%x\n", dma_cfg,
kmb_read_lcd(kmb, LCD_LAYERn_DMA_CFG(plane_id)));
diff --git a/drivers/gpu/drm/kmb/kmb_plane.h b/drivers/gpu/drm/kmb/kmb_plane.h
index 6e8d22cf8819..b51144044fe8 100644
--- a/drivers/gpu/drm/kmb/kmb_plane.h
+++ b/drivers/gpu/drm/kmb/kmb_plane.h
@@ -63,6 +63,12 @@ struct layer_status {
u32 ctrl;
};
+struct disp_cfg {
+ unsigned int width;
+ unsigned int height;
+ unsigned int format;
+};
+
struct kmb_plane *kmb_plane_init(struct drm_device *drm);
void kmb_plane_destroy(struct drm_plane *plane);
#endif /* __KMB_PLANE_H__ */
diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c
index de62966243cd..640acc060467 100644
--- a/drivers/gpu/drm/lima/lima_gem.c
+++ b/drivers/gpu/drm/lima/lima_gem.c
@@ -267,7 +267,9 @@ static int lima_gem_sync_bo(struct lima_sched_task *task, struct lima_bo *bo,
if (explicit)
return 0;
- return drm_gem_fence_array_add_implicit(&task->deps, &bo->base.base, write);
+ return drm_sched_job_add_implicit_dependencies(&task->base,
+ &bo->base.base,
+ write);
}
static int lima_gem_add_deps(struct drm_file *file, struct lima_submit *submit)
@@ -285,7 +287,7 @@ static int lima_gem_add_deps(struct drm_file *file, struct lima_submit *submit)
if (err)
return err;
- err = drm_gem_fence_array_add(&submit->task->deps, fence);
+ err = drm_sched_job_add_dependency(&submit->task->base, fence);
if (err) {
dma_fence_put(fence);
return err;
@@ -359,8 +361,7 @@ int lima_gem_submit(struct drm_file *file, struct lima_submit *submit)
goto err_out2;
}
- fence = lima_sched_context_queue_task(
- submit->ctx->context + submit->pipe, submit->task);
+ fence = lima_sched_context_queue_task(submit->task);
for (i = 0; i < submit->nr_bos; i++) {
if (submit->bos[i].flags & LIMA_SUBMIT_BO_WRITE)
diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c
index dba8329937a3..99d5f6f1a882 100644
--- a/drivers/gpu/drm/lima/lima_sched.c
+++ b/drivers/gpu/drm/lima/lima_sched.c
@@ -129,27 +129,20 @@ int lima_sched_task_init(struct lima_sched_task *task,
return err;
}
+ drm_sched_job_arm(&task->base);
+
task->num_bos = num_bos;
task->vm = lima_vm_get(vm);
- xa_init_flags(&task->deps, XA_FLAGS_ALLOC);
-
return 0;
}
void lima_sched_task_fini(struct lima_sched_task *task)
{
- struct dma_fence *fence;
- unsigned long index;
int i;
drm_sched_job_cleanup(&task->base);
- xa_for_each(&task->deps, index, fence) {
- dma_fence_put(fence);
- }
- xa_destroy(&task->deps);
-
if (task->bos) {
for (i = 0; i < task->num_bos; i++)
drm_gem_object_put(&task->bos[i]->base.base);
@@ -175,27 +168,15 @@ void lima_sched_context_fini(struct lima_sched_pipe *pipe,
drm_sched_entity_fini(&context->base);
}
-struct dma_fence *lima_sched_context_queue_task(struct lima_sched_context *context,
- struct lima_sched_task *task)
+struct dma_fence *lima_sched_context_queue_task(struct lima_sched_task *task)
{
struct dma_fence *fence = dma_fence_get(&task->base.s_fence->finished);
trace_lima_task_submit(task);
- drm_sched_entity_push_job(&task->base, &context->base);
+ drm_sched_entity_push_job(&task->base);
return fence;
}
-static struct dma_fence *lima_sched_dependency(struct drm_sched_job *job,
- struct drm_sched_entity *entity)
-{
- struct lima_sched_task *task = to_lima_task(job);
-
- if (!xa_empty(&task->deps))
- return xa_erase(&task->deps, task->last_dep++);
-
- return NULL;
-}
-
static int lima_pm_busy(struct lima_device *ldev)
{
int ret;
@@ -471,7 +452,6 @@ static void lima_sched_free_job(struct drm_sched_job *job)
}
static const struct drm_sched_backend_ops lima_sched_ops = {
- .dependency = lima_sched_dependency,
.run_job = lima_sched_run_job,
.timedout_job = lima_sched_timedout_job,
.free_job = lima_sched_free_job,
diff --git a/drivers/gpu/drm/lima/lima_sched.h b/drivers/gpu/drm/lima/lima_sched.h
index 90f03c48ef4a..6a11764d87b3 100644
--- a/drivers/gpu/drm/lima/lima_sched.h
+++ b/drivers/gpu/drm/lima/lima_sched.h
@@ -23,9 +23,6 @@ struct lima_sched_task {
struct lima_vm *vm;
void *frame;
- struct xarray deps;
- unsigned long last_dep;
-
struct lima_bo **bos;
int num_bos;
@@ -98,8 +95,7 @@ int lima_sched_context_init(struct lima_sched_pipe *pipe,
atomic_t *guilty);
void lima_sched_context_fini(struct lima_sched_pipe *pipe,
struct lima_sched_context *context);
-struct dma_fence *lima_sched_context_queue_task(struct lima_sched_context *context,
- struct lima_sched_task *task);
+struct dma_fence *lima_sched_context_queue_task(struct lima_sched_task *task);
int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name);
void lima_sched_pipe_fini(struct lima_sched_pipe *pipe);
diff --git a/drivers/gpu/drm/mcde/mcde_drv.c b/drivers/gpu/drm/mcde/mcde_drv.c
index e60566a5739c..5b5afc6aaf8e 100644
--- a/drivers/gpu/drm/mcde/mcde_drv.c
+++ b/drivers/gpu/drm/mcde/mcde_drv.c
@@ -276,7 +276,6 @@ static int mcde_probe(struct platform_device *pdev)
struct drm_device *drm;
struct mcde *mcde;
struct component_match *match = NULL;
- struct resource *res;
u32 pid;
int irq;
int ret;
@@ -344,8 +343,7 @@ static int mcde_probe(struct platform_device *pdev)
goto clk_disable;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mcde->regs = devm_ioremap_resource(dev, res);
+ mcde->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mcde->regs)) {
dev_err(dev, "no MCDE regs\n");
ret = -EINVAL;
diff --git a/drivers/gpu/drm/mcde/mcde_dsi.c b/drivers/gpu/drm/mcde/mcde_dsi.c
index 180ebbccbeda..5651734ce977 100644
--- a/drivers/gpu/drm/mcde/mcde_dsi.c
+++ b/drivers/gpu/drm/mcde/mcde_dsi.c
@@ -1169,7 +1169,6 @@ static int mcde_dsi_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct mcde_dsi *d;
struct mipi_dsi_host *host;
- struct resource *res;
u32 dsi_id;
int ret;
@@ -1187,8 +1186,7 @@ static int mcde_dsi_probe(struct platform_device *pdev)
return PTR_ERR(d->prcmu);
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- d->regs = devm_ioremap_resource(dev, res);
+ d->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(d->regs))
return PTR_ERR(d->regs);
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 93b40c245f00..5d90d2eb0019 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -11,6 +11,7 @@
#include <linux/of_platform.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
+#include <linux/reset.h>
#include <video/mipi_display.h>
#include <video/videomode.h>
@@ -980,8 +981,10 @@ static int mtk_dsi_bind(struct device *dev, struct device *master, void *data)
struct mtk_dsi *dsi = dev_get_drvdata(dev);
ret = mtk_dsi_encoder_init(drm, dsi);
+ if (ret)
+ return ret;
- return ret;
+ return device_reset_optional(dev);
}
static void mtk_dsi_unbind(struct device *dev, struct device *master,
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index bc0d60df04ae..7f41a33592c8 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -206,8 +206,7 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
priv->compat = match->compat;
priv->afbcd.ops = match->afbcd_ops;
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vpu");
- regs = devm_ioremap_resource(dev, res);
+ regs = devm_platform_ioremap_resource_byname(pdev, "vpu");
if (IS_ERR(regs)) {
ret = PTR_ERR(regs);
goto free_drm;
diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c
index 2ed87cfdd735..0afbd1e70bfc 100644
--- a/drivers/gpu/drm/meson/meson_dw_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c
@@ -978,7 +978,6 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
struct dw_hdmi_plat_data *dw_plat_data;
struct drm_bridge *next_bridge;
struct drm_encoder *encoder;
- struct resource *res;
int irq;
int ret;
@@ -1042,8 +1041,7 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
return PTR_ERR(meson_dw_hdmi->hdmitx_phy);
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- meson_dw_hdmi->hdmitx = devm_ioremap_resource(dev, res);
+ meson_dw_hdmi->hdmitx = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(meson_dw_hdmi->hdmitx))
return PTR_ERR(meson_dw_hdmi->hdmitx);
diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
index 4fd4de16cd32..894472921c30 100644
--- a/drivers/gpu/drm/mga/mga_ioc32.c
+++ b/drivers/gpu/drm/mga/mga_ioc32.c
@@ -38,16 +38,18 @@
typedef struct drm32_mga_init {
int func;
u32 sarea_priv_offset;
- int chipset;
- int sgram;
- unsigned int maccess;
- unsigned int fb_cpp;
- unsigned int front_offset, front_pitch;
- unsigned int back_offset, back_pitch;
- unsigned int depth_cpp;
- unsigned int depth_offset, depth_pitch;
- unsigned int texture_offset[MGA_NR_TEX_HEAPS];
- unsigned int texture_size[MGA_NR_TEX_HEAPS];
+ struct_group(always32bit,
+ int chipset;
+ int sgram;
+ unsigned int maccess;
+ unsigned int fb_cpp;
+ unsigned int front_offset, front_pitch;
+ unsigned int back_offset, back_pitch;
+ unsigned int depth_cpp;
+ unsigned int depth_offset, depth_pitch;
+ unsigned int texture_offset[MGA_NR_TEX_HEAPS];
+ unsigned int texture_size[MGA_NR_TEX_HEAPS];
+ );
u32 fb_offset;
u32 mmio_offset;
u32 status_offset;
@@ -67,9 +69,8 @@ static int compat_mga_init(struct file *file, unsigned int cmd,
init.func = init32.func;
init.sarea_priv_offset = init32.sarea_priv_offset;
- memcpy(&init.chipset, &init32.chipset,
- offsetof(drm_mga_init_t, fb_offset) -
- offsetof(drm_mga_init_t, chipset));
+ memcpy(&init.always32bit, &init32.always32bit,
+ sizeof(init32.always32bit));
init.fb_offset = init32.fb_offset;
init.mmio_offset = init32.mmio_offset;
init.status_offset = init32.status_offset;
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index 196f74a0834e..4368112023f7 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -224,8 +224,6 @@ struct mga_device {
enum mga_type type;
- int fb_mtrr;
-
union {
struct {
long ref_clk;
diff --git a/drivers/gpu/drm/mgag200/mgag200_mm.c b/drivers/gpu/drm/mgag200/mgag200_mm.c
index b667371b69a4..fa996d46feed 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mm.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mm.c
@@ -75,26 +75,12 @@ static size_t mgag200_probe_vram(struct mga_device *mdev, void __iomem *mem,
return offset - 65536;
}
-static void mgag200_mm_release(struct drm_device *dev, void *ptr)
-{
- struct mga_device *mdev = to_mga_device(dev);
- struct pci_dev *pdev = to_pci_dev(dev->dev);
-
- mdev->vram_fb_available = 0;
- iounmap(mdev->vram);
- arch_io_free_memtype_wc(pci_resource_start(pdev, 0),
- pci_resource_len(pdev, 0));
- arch_phys_wc_del(mdev->fb_mtrr);
- mdev->fb_mtrr = 0;
-}
-
int mgag200_mm_init(struct mga_device *mdev)
{
struct drm_device *dev = &mdev->base;
struct pci_dev *pdev = to_pci_dev(dev->dev);
u8 misc;
resource_size_t start, len;
- int ret;
WREG_ECRT(0x04, 0x00);
@@ -112,15 +98,13 @@ int mgag200_mm_init(struct mga_device *mdev)
return -ENXIO;
}
- arch_io_reserve_memtype_wc(start, len);
-
- mdev->fb_mtrr = arch_phys_wc_add(start, len);
+ /* Don't fail on errors, but performance might be reduced. */
+ devm_arch_io_reserve_memtype_wc(dev->dev, start, len);
+ devm_arch_phys_wc_add(dev->dev, start, len);
- mdev->vram = ioremap(start, len);
- if (!mdev->vram) {
- ret = -ENOMEM;
- goto err_arch_phys_wc_del;
- }
+ mdev->vram = devm_ioremap(dev->dev, start, len);
+ if (!mdev->vram)
+ return -ENOMEM;
mdev->mc.vram_size = mgag200_probe_vram(mdev, mdev->vram, len);
mdev->mc.vram_base = start;
@@ -128,10 +112,5 @@ int mgag200_mm_init(struct mga_device *mdev)
mdev->vram_fb_available = mdev->mc.vram_size;
- return drmm_add_action_or_reset(dev, mgag200_mm_release, NULL);
-
-err_arch_phys_wc_del:
- arch_phys_wc_del(mdev->fb_mtrr);
- arch_io_free_memtype_wc(start, len);
- return ret;
+ return 0;
}
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 3ddf739a6f9b..ae11061727ff 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -3,9 +3,9 @@
config DRM_MSM
tristate "MSM DRM"
depends on DRM
- depends on ARCH_QCOM || SOC_IMX5 || (ARM && COMPILE_TEST)
+ depends on ARCH_QCOM || SOC_IMX5 || COMPILE_TEST
depends on IOMMU_SUPPORT
- depends on OF && COMMON_CLK
+ depends on (OF && COMMON_CLK) || COMPILE_TEST
depends on QCOM_OCMEM || QCOM_OCMEM=n
depends on QCOM_LLCC || QCOM_LLCC=n
depends on QCOM_COMMAND_DB || QCOM_COMMAND_DB=n
@@ -14,6 +14,8 @@ config DRM_MSM
select REGULATOR
select DRM_KMS_HELPER
select DRM_PANEL
+ select DRM_BRIDGE
+ select DRM_PANEL_BRIDGE
select DRM_SCHED
select SHMEM
select TMPFS
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 904535eda0c4..40577f8856d8 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -51,7 +51,6 @@ msm-y := \
disp/mdp5/mdp5_mixer.o \
disp/mdp5/mdp5_plane.o \
disp/mdp5/mdp5_smp.o \
- disp/dpu1/dpu_core_irq.o \
disp/dpu1/dpu_core_perf.o \
disp/dpu1/dpu_crtc.o \
disp/dpu1/dpu_encoder.o \
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
index c9d11d57aed6..dd593ec2bc56 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
@@ -138,7 +138,7 @@ reset_set(void *data, u64 val)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(reset_fops, NULL, reset_set, "%llx\n");
+DEFINE_DEBUGFS_ATTRIBUTE(reset_fops, NULL, reset_set, "%llx\n");
void a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor)
@@ -154,6 +154,6 @@ void a5xx_debugfs_init(struct msm_gpu *gpu, struct drm_minor *minor)
ARRAY_SIZE(a5xx_debugfs_list),
minor->debugfs_root, minor);
- debugfs_create_file("reset", S_IWUGO, minor->debugfs_root, dev,
- &reset_fops);
+ debugfs_create_file_unsafe("reset", S_IWUGO, minor->debugfs_root, dev,
+ &reset_fops);
}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index 8b73f70766a4..71e52b2b2025 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -516,11 +516,11 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
struct platform_device *pdev = to_platform_device(gmu->dev);
void __iomem *pdcptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc");
- void __iomem *seqptr;
+ void __iomem *seqptr = NULL;
uint32_t pdc_address_offset;
bool pdc_in_aop = false;
- if (!pdcptr)
+ if (IS_ERR(pdcptr))
goto err;
if (adreno_is_a650(adreno_gpu) || adreno_is_a660_family(adreno_gpu))
@@ -532,7 +532,7 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
if (!pdc_in_aop) {
seqptr = a6xx_gmu_get_mmio(pdev, "gmu_pdc_seq");
- if (!seqptr)
+ if (IS_ERR(seqptr))
goto err;
}
@@ -891,7 +891,7 @@ static void a6xx_gmu_set_initial_freq(struct msm_gpu *gpu, struct a6xx_gmu *gmu)
unsigned long gpu_freq = gmu->gpu_freqs[gmu->current_perf_index];
gpu_opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, gpu_freq, true);
- if (IS_ERR_OR_NULL(gpu_opp))
+ if (IS_ERR(gpu_opp))
return;
gmu->freq = 0; /* so a6xx_gmu_set_freq() doesn't exit early */
@@ -905,7 +905,7 @@ static void a6xx_gmu_set_initial_bw(struct msm_gpu *gpu, struct a6xx_gmu *gmu)
unsigned long gpu_freq = gmu->gpu_freqs[gmu->current_perf_index];
gpu_opp = dev_pm_opp_find_freq_exact(&gpu->pdev->dev, gpu_freq, true);
- if (IS_ERR_OR_NULL(gpu_opp))
+ if (IS_ERR(gpu_opp))
return;
dev_pm_opp_set_opp(&gpu->pdev->dev, gpu_opp);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 33da25b81615..267a880811d6 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -1838,6 +1838,13 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
adreno_cmp_rev(ADRENO_REV(6, 3, 5, ANY_ID), info->rev)))
adreno_gpu->base.hw_apriv = true;
+ /*
+ * For now only clamp to idle freq for devices where this is known not
+ * to cause power supply issues:
+ */
+ if (info && (info->revn == 618))
+ gpu->clamp_to_idle = true;
+
a6xx_llc_slices_init(pdev, a6xx_gpu);
ret = a6xx_set_supported_hw(&pdev->dev, config->rev);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
index e8f65cd8eca6..7501849ed15d 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
@@ -180,7 +180,7 @@ static int debugbus_read(struct msm_gpu *gpu, u32 block, u32 offset,
msm_readl((ptr) + ((offset) << 2))
/* read a value from the CX debug bus */
-static int cx_debugbus_read(void *__iomem cxdbg, u32 block, u32 offset,
+static int cx_debugbus_read(void __iomem *cxdbg, u32 block, u32 offset,
u32 *data)
{
u32 reg = A6XX_CX_DBGC_CFG_DBGBUS_SEL_A_PING_INDEX(offset) |
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c
deleted file mode 100644
index d2457490930b..000000000000
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c
+++ /dev/null
@@ -1,256 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- */
-
-#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
-
-#include <linux/debugfs.h>
-#include <linux/irqdomain.h>
-#include <linux/irq.h>
-#include <linux/kthread.h>
-
-#include "dpu_core_irq.h"
-#include "dpu_trace.h"
-
-/**
- * dpu_core_irq_callback_handler - dispatch core interrupts
- * @arg: private data of callback handler
- * @irq_idx: interrupt index
- */
-static void dpu_core_irq_callback_handler(void *arg, int irq_idx)
-{
- struct dpu_kms *dpu_kms = arg;
- struct dpu_irq *irq_obj = &dpu_kms->irq_obj;
- struct dpu_irq_callback *cb;
-
- VERB("irq_idx=%d\n", irq_idx);
-
- if (list_empty(&irq_obj->irq_cb_tbl[irq_idx]))
- DRM_ERROR("no registered cb, idx:%d\n", irq_idx);
-
- atomic_inc(&irq_obj->irq_counts[irq_idx]);
-
- /*
- * Perform registered function callback
- */
- list_for_each_entry(cb, &irq_obj->irq_cb_tbl[irq_idx], list)
- if (cb->func)
- cb->func(cb->arg, irq_idx);
-}
-
-u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, int irq_idx, bool clear)
-{
- if (!dpu_kms->hw_intr ||
- !dpu_kms->hw_intr->ops.get_interrupt_status)
- return 0;
-
- if (irq_idx < 0) {
- DPU_ERROR("[%pS] invalid irq_idx=%d\n",
- __builtin_return_address(0), irq_idx);
- return 0;
- }
-
- return dpu_kms->hw_intr->ops.get_interrupt_status(dpu_kms->hw_intr,
- irq_idx, clear);
-}
-
-int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, int irq_idx,
- struct dpu_irq_callback *register_irq_cb)
-{
- unsigned long irq_flags;
-
- if (!dpu_kms->irq_obj.irq_cb_tbl) {
- DPU_ERROR("invalid params\n");
- return -EINVAL;
- }
-
- if (!register_irq_cb || !register_irq_cb->func) {
- DPU_ERROR("invalid irq_cb:%d func:%d\n",
- register_irq_cb != NULL,
- register_irq_cb ?
- register_irq_cb->func != NULL : -1);
- return -EINVAL;
- }
-
- if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->total_irqs) {
- DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
- return -EINVAL;
- }
-
- VERB("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
-
- irq_flags = dpu_kms->hw_intr->ops.lock(dpu_kms->hw_intr);
- trace_dpu_core_irq_register_callback(irq_idx, register_irq_cb);
- list_del_init(&register_irq_cb->list);
- list_add_tail(&register_irq_cb->list,
- &dpu_kms->irq_obj.irq_cb_tbl[irq_idx]);
- if (list_is_first(&register_irq_cb->list,
- &dpu_kms->irq_obj.irq_cb_tbl[irq_idx])) {
- int ret = dpu_kms->hw_intr->ops.enable_irq_locked(
- dpu_kms->hw_intr,
- irq_idx);
- if (ret)
- DPU_ERROR("Fail to enable IRQ for irq_idx:%d\n",
- irq_idx);
- }
- dpu_kms->hw_intr->ops.unlock(dpu_kms->hw_intr, irq_flags);
-
- return 0;
-}
-
-int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx,
- struct dpu_irq_callback *register_irq_cb)
-{
- unsigned long irq_flags;
-
- if (!dpu_kms->irq_obj.irq_cb_tbl) {
- DPU_ERROR("invalid params\n");
- return -EINVAL;
- }
-
- if (!register_irq_cb || !register_irq_cb->func) {
- DPU_ERROR("invalid irq_cb:%d func:%d\n",
- register_irq_cb != NULL,
- register_irq_cb ?
- register_irq_cb->func != NULL : -1);
- return -EINVAL;
- }
-
- if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->total_irqs) {
- DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
- return -EINVAL;
- }
-
- VERB("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
-
- irq_flags = dpu_kms->hw_intr->ops.lock(dpu_kms->hw_intr);
- trace_dpu_core_irq_unregister_callback(irq_idx, register_irq_cb);
- list_del_init(&register_irq_cb->list);
- /* empty callback list but interrupt is still enabled */
- if (list_empty(&dpu_kms->irq_obj.irq_cb_tbl[irq_idx])) {
- int ret = dpu_kms->hw_intr->ops.disable_irq_locked(
- dpu_kms->hw_intr,
- irq_idx);
- if (ret)
- DPU_ERROR("Fail to disable IRQ for irq_idx:%d\n",
- irq_idx);
- VERB("irq_idx=%d ret=%d\n", irq_idx, ret);
- }
- dpu_kms->hw_intr->ops.unlock(dpu_kms->hw_intr, irq_flags);
-
- return 0;
-}
-
-static void dpu_clear_all_irqs(struct dpu_kms *dpu_kms)
-{
- if (!dpu_kms->hw_intr || !dpu_kms->hw_intr->ops.clear_all_irqs)
- return;
-
- dpu_kms->hw_intr->ops.clear_all_irqs(dpu_kms->hw_intr);
-}
-
-static void dpu_disable_all_irqs(struct dpu_kms *dpu_kms)
-{
- if (!dpu_kms->hw_intr || !dpu_kms->hw_intr->ops.disable_all_irqs)
- return;
-
- dpu_kms->hw_intr->ops.disable_all_irqs(dpu_kms->hw_intr);
-}
-
-#ifdef CONFIG_DEBUG_FS
-static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v)
-{
- struct dpu_kms *dpu_kms = s->private;
- struct dpu_irq *irq_obj = &dpu_kms->irq_obj;
- struct dpu_irq_callback *cb;
- unsigned long irq_flags;
- int i, irq_count, cb_count;
-
- if (WARN_ON(!irq_obj->irq_cb_tbl))
- return 0;
-
- for (i = 0; i < irq_obj->total_irqs; i++) {
- irq_flags = dpu_kms->hw_intr->ops.lock(dpu_kms->hw_intr);
- cb_count = 0;
- irq_count = atomic_read(&irq_obj->irq_counts[i]);
- list_for_each_entry(cb, &irq_obj->irq_cb_tbl[i], list)
- cb_count++;
- dpu_kms->hw_intr->ops.unlock(dpu_kms->hw_intr, irq_flags);
-
- if (irq_count || cb_count)
- seq_printf(s, "idx:%d irq:%d cb:%d\n",
- i, irq_count, cb_count);
- }
-
- return 0;
-}
-
-DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_core_irq);
-
-void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
- struct dentry *parent)
-{
- debugfs_create_file("core_irq", 0600, parent, dpu_kms,
- &dpu_debugfs_core_irq_fops);
-}
-#endif
-
-void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms)
-{
- int i;
-
- pm_runtime_get_sync(&dpu_kms->pdev->dev);
- dpu_clear_all_irqs(dpu_kms);
- dpu_disable_all_irqs(dpu_kms);
- pm_runtime_put_sync(&dpu_kms->pdev->dev);
-
- /* Create irq callbacks for all possible irq_idx */
- dpu_kms->irq_obj.total_irqs = dpu_kms->hw_intr->total_irqs;
- dpu_kms->irq_obj.irq_cb_tbl = kcalloc(dpu_kms->irq_obj.total_irqs,
- sizeof(struct list_head), GFP_KERNEL);
- dpu_kms->irq_obj.irq_counts = kcalloc(dpu_kms->irq_obj.total_irqs,
- sizeof(atomic_t), GFP_KERNEL);
- for (i = 0; i < dpu_kms->irq_obj.total_irqs; i++) {
- INIT_LIST_HEAD(&dpu_kms->irq_obj.irq_cb_tbl[i]);
- atomic_set(&dpu_kms->irq_obj.irq_counts[i], 0);
- }
-}
-
-void dpu_core_irq_uninstall(struct dpu_kms *dpu_kms)
-{
- int i;
-
- pm_runtime_get_sync(&dpu_kms->pdev->dev);
- for (i = 0; i < dpu_kms->irq_obj.total_irqs; i++)
- if (!list_empty(&dpu_kms->irq_obj.irq_cb_tbl[i]))
- DPU_ERROR("irq_idx=%d still enabled/registered\n", i);
-
- dpu_clear_all_irqs(dpu_kms);
- dpu_disable_all_irqs(dpu_kms);
- pm_runtime_put_sync(&dpu_kms->pdev->dev);
-
- kfree(dpu_kms->irq_obj.irq_cb_tbl);
- kfree(dpu_kms->irq_obj.irq_counts);
- dpu_kms->irq_obj.irq_cb_tbl = NULL;
- dpu_kms->irq_obj.irq_counts = NULL;
- dpu_kms->irq_obj.total_irqs = 0;
-}
-
-irqreturn_t dpu_core_irq(struct dpu_kms *dpu_kms)
-{
- /*
- * Dispatch to HW driver to handle interrupt lookup that is being
- * fired. When matching interrupt is located, HW driver will call to
- * dpu_core_irq_callback_handler with the irq_idx from the lookup table.
- * dpu_core_irq_callback_handler will perform the registered function
- * callback, and do the interrupt status clearing once the registered
- * callback is finished.
- * Function will also clear the interrupt status after reading.
- */
- dpu_kms->hw_intr->ops.dispatch_irqs(
- dpu_kms->hw_intr,
- dpu_core_irq_callback_handler,
- dpu_kms);
-
- return IRQ_HANDLED;
-}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index 768012243b44..967245b8cc02 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*/
@@ -70,17 +70,147 @@ static struct drm_encoder *get_encoder_from_crtc(struct drm_crtc *crtc)
return NULL;
}
-static u32 dpu_crtc_get_vblank_counter(struct drm_crtc *crtc)
+static enum dpu_crtc_crc_source dpu_crtc_parse_crc_source(const char *src_name)
{
- struct drm_encoder *encoder;
+ if (!src_name ||
+ !strcmp(src_name, "none"))
+ return DPU_CRTC_CRC_SOURCE_NONE;
+ if (!strcmp(src_name, "auto") ||
+ !strcmp(src_name, "lm"))
+ return DPU_CRTC_CRC_SOURCE_LAYER_MIXER;
+
+ return DPU_CRTC_CRC_SOURCE_INVALID;
+}
- encoder = get_encoder_from_crtc(crtc);
+static int dpu_crtc_verify_crc_source(struct drm_crtc *crtc,
+ const char *src_name, size_t *values_cnt)
+{
+ enum dpu_crtc_crc_source source = dpu_crtc_parse_crc_source(src_name);
+ struct dpu_crtc_state *crtc_state = to_dpu_crtc_state(crtc->state);
+
+ if (source < 0) {
+ DRM_DEBUG_DRIVER("Invalid source %s for CRTC%d\n", src_name, crtc->index);
+ return -EINVAL;
+ }
+
+ if (source == DPU_CRTC_CRC_SOURCE_LAYER_MIXER)
+ *values_cnt = crtc_state->num_mixers;
+
+ return 0;
+}
+
+static int dpu_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
+{
+ enum dpu_crtc_crc_source source = dpu_crtc_parse_crc_source(src_name);
+ enum dpu_crtc_crc_source current_source;
+ struct dpu_crtc_state *crtc_state;
+ struct drm_device *drm_dev = crtc->dev;
+ struct dpu_crtc_mixer *m;
+
+ bool was_enabled;
+ bool enable = false;
+ int i, ret = 0;
+
+ if (source < 0) {
+ DRM_DEBUG_DRIVER("Invalid CRC source %s for CRTC%d\n", src_name, crtc->index);
+ return -EINVAL;
+ }
+
+ ret = drm_modeset_lock(&crtc->mutex, NULL);
+
+ if (ret)
+ return ret;
+
+ enable = (source != DPU_CRTC_CRC_SOURCE_NONE);
+ crtc_state = to_dpu_crtc_state(crtc->state);
+
+ spin_lock_irq(&drm_dev->event_lock);
+ current_source = crtc_state->crc_source;
+ spin_unlock_irq(&drm_dev->event_lock);
+
+ was_enabled = (current_source != DPU_CRTC_CRC_SOURCE_NONE);
+
+ if (!was_enabled && enable) {
+ ret = drm_crtc_vblank_get(crtc);
+
+ if (ret)
+ goto cleanup;
+
+ } else if (was_enabled && !enable) {
+ drm_crtc_vblank_put(crtc);
+ }
+
+ spin_lock_irq(&drm_dev->event_lock);
+ crtc_state->crc_source = source;
+ spin_unlock_irq(&drm_dev->event_lock);
+
+ crtc_state->crc_frame_skip_count = 0;
+
+ for (i = 0; i < crtc_state->num_mixers; ++i) {
+ m = &crtc_state->mixers[i];
+
+ if (!m->hw_lm || !m->hw_lm->ops.setup_misr)
+ continue;
+
+ /* Calculate MISR over 1 frame */
+ m->hw_lm->ops.setup_misr(m->hw_lm, true, 1);
+ }
+
+
+cleanup:
+ drm_modeset_unlock(&crtc->mutex);
+
+ return ret;
+}
+
+static u32 dpu_crtc_get_vblank_counter(struct drm_crtc *crtc)
+{
+ struct drm_encoder *encoder = get_encoder_from_crtc(crtc);
if (!encoder) {
DRM_ERROR("no encoder found for crtc %d\n", crtc->index);
- return false;
+ return 0;
+ }
+
+ return dpu_encoder_get_vsync_count(encoder);
+}
+
+
+static int dpu_crtc_get_crc(struct drm_crtc *crtc)
+{
+ struct dpu_crtc_state *crtc_state;
+ struct dpu_crtc_mixer *m;
+ u32 crcs[CRTC_DUAL_MIXERS];
+
+ int i = 0;
+ int rc = 0;
+
+ crtc_state = to_dpu_crtc_state(crtc->state);
+
+ BUILD_BUG_ON(ARRAY_SIZE(crcs) != ARRAY_SIZE(crtc_state->mixers));
+
+ /* Skip first 2 frames in case of "uncooked" CRCs */
+ if (crtc_state->crc_frame_skip_count < 2) {
+ crtc_state->crc_frame_skip_count++;
+ return 0;
}
- return dpu_encoder_get_frame_count(encoder);
+ for (i = 0; i < crtc_state->num_mixers; ++i) {
+
+ m = &crtc_state->mixers[i];
+
+ if (!m->hw_lm || !m->hw_lm->ops.collect_misr)
+ continue;
+
+ rc = m->hw_lm->ops.collect_misr(m->hw_lm, &crcs[i]);
+
+ if (rc) {
+ DRM_DEBUG_DRIVER("MISR read failed\n");
+ return rc;
+ }
+ }
+
+ return drm_crtc_add_crc_entry(crtc, true,
+ drm_crtc_accurate_vblank_count(crtc), crcs);
}
static bool dpu_crtc_get_scanout_position(struct drm_crtc *crtc,
@@ -389,6 +519,9 @@ void dpu_crtc_vblank_callback(struct drm_crtc *crtc)
dpu_crtc->vblank_cb_time = ktime_get();
else
dpu_crtc->vblank_cb_count++;
+
+ dpu_crtc_get_crc(crtc);
+
drm_crtc_handle_vblank(crtc);
trace_dpu_crtc_vblank_cb(DRMID(crtc));
}
@@ -1332,6 +1465,8 @@ static const struct drm_crtc_funcs dpu_crtc_funcs = {
.atomic_destroy_state = dpu_crtc_destroy_state,
.late_register = dpu_crtc_late_register,
.early_unregister = dpu_crtc_early_unregister,
+ .verify_crc_source = dpu_crtc_verify_crc_source,
+ .set_crc_source = dpu_crtc_set_crc_source,
.enable_vblank = msm_crtc_enable_vblank,
.disable_vblank = msm_crtc_disable_vblank,
.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
index cec3474340e8..ae9546ca1359 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2021 The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*/
@@ -70,6 +70,19 @@ struct dpu_crtc_smmu_state_data {
};
/**
+ * enum dpu_crtc_crc_source: CRC source
+ * @DPU_CRTC_CRC_SOURCE_NONE: no source set
+ * @DPU_CRTC_CRC_SOURCE_LAYER_MIXER: CRC in layer mixer
+ * @DPU_CRTC_CRC_SOURCE_INVALID: Invalid source
+ */
+enum dpu_crtc_crc_source {
+ DPU_CRTC_CRC_SOURCE_NONE = 0,
+ DPU_CRTC_CRC_SOURCE_LAYER_MIXER,
+ DPU_CRTC_CRC_SOURCE_MAX,
+ DPU_CRTC_CRC_SOURCE_INVALID = -1
+};
+
+/**
* struct dpu_crtc_mixer: stores the map for each virtual pipeline in the CRTC
* @hw_lm: LM HW Driver context
* @lm_ctl: CTL Path HW driver context
@@ -139,6 +152,7 @@ struct dpu_crtc_frame_event {
* @event_lock : Spinlock around event handling code
* @phandle: Pointer to power handler
* @cur_perf : current performance committed to clock/bandwidth driver
+ * @crc_source : CRC source
*/
struct dpu_crtc {
struct drm_crtc base;
@@ -210,6 +224,9 @@ struct dpu_crtc_state {
u32 num_ctls;
struct dpu_hw_ctl *hw_ctls[CRTC_DUAL_MIXERS];
+
+ enum dpu_crtc_crc_source crc_source;
+ int crc_frame_skip_count;
};
#define to_dpu_crtc_state(x) \
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index 0e9d3fa1544b..e7ee4cfb8461 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -168,6 +168,7 @@ enum dpu_enc_rc_states {
* @vsync_event_work: worker to handle vsync event for autorefresh
* @topology: topology of the display
* @idle_timeout: idle timeout duration in milliseconds
+ * @dp: msm_dp pointer, for DP encoders
*/
struct dpu_encoder_virt {
struct drm_encoder base;
@@ -206,6 +207,8 @@ struct dpu_encoder_virt {
struct msm_display_topology topology;
u32 idle_timeout;
+
+ struct msm_dp *dp;
};
#define to_dpu_encoder_virt(x) container_of(x, struct dpu_encoder_virt, base)
@@ -395,19 +398,11 @@ int dpu_encoder_helper_unregister_irq(struct dpu_encoder_phys *phys_enc,
return 0;
}
-int dpu_encoder_get_frame_count(struct drm_encoder *drm_enc)
+int dpu_encoder_get_vsync_count(struct drm_encoder *drm_enc)
{
- struct dpu_encoder_virt *dpu_enc;
- struct dpu_encoder_phys *phys;
- int framecount = 0;
-
- dpu_enc = to_dpu_encoder_virt(drm_enc);
- phys = dpu_enc ? dpu_enc->cur_master : NULL;
-
- if (phys && phys->ops.get_frame_count)
- framecount = phys->ops.get_frame_count(phys);
-
- return framecount;
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
+ struct dpu_encoder_phys *phys = dpu_enc ? dpu_enc->cur_master : NULL;
+ return phys ? atomic_read(&phys->vsync_cnt) : 0;
}
int dpu_encoder_get_linecount(struct drm_encoder *drm_enc)
@@ -1000,8 +995,8 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
trace_dpu_enc_mode_set(DRMID(drm_enc));
- if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS && priv->dp)
- msm_dp_display_mode_set(priv->dp, drm_enc, mode, adj_mode);
+ if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS)
+ msm_dp_display_mode_set(dpu_enc->dp, drm_enc, mode, adj_mode);
list_for_each_entry(conn_iter, connector_list, head)
if (conn_iter->encoder == drm_enc)
@@ -1182,9 +1177,8 @@ static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc)
_dpu_encoder_virt_enable_helper(drm_enc);
- if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS && priv->dp) {
- ret = msm_dp_display_enable(priv->dp,
- drm_enc);
+ if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS) {
+ ret = msm_dp_display_enable(dpu_enc->dp, drm_enc);
if (ret) {
DPU_ERROR_ENC(dpu_enc, "dp display enable failed: %d\n",
ret);
@@ -1224,8 +1218,8 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
/* wait for idle */
dpu_encoder_wait_for_event(drm_enc, MSM_ENC_TX_COMPLETE);
- if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS && priv->dp) {
- if (msm_dp_display_pre_disable(priv->dp, drm_enc))
+ if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS) {
+ if (msm_dp_display_pre_disable(dpu_enc->dp, drm_enc))
DPU_ERROR_ENC(dpu_enc, "dp display push idle failed\n");
}
@@ -1253,8 +1247,8 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n");
- if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS && priv->dp) {
- if (msm_dp_display_disable(priv->dp, drm_enc))
+ if (drm_enc->encoder_type == DRM_MODE_ENCODER_TMDS) {
+ if (msm_dp_display_disable(dpu_enc->dp, drm_enc))
DPU_ERROR_ENC(dpu_enc, "dp display disable failed\n");
}
@@ -2170,7 +2164,8 @@ int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc,
timer_setup(&dpu_enc->vsync_event_timer,
dpu_encoder_vsync_event_handler,
0);
-
+ else if (disp_info->intf_type == DRM_MODE_ENCODER_TMDS)
+ dpu_enc->dp = priv->dp[disp_info->h_tile_instance[0]];
INIT_DELAYED_WORK(&dpu_enc->delayed_off_work,
dpu_encoder_off_work);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
index 99a5d73c9b88..e241914a9677 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
@@ -163,9 +163,9 @@ void dpu_encoder_set_idle_timeout(struct drm_encoder *drm_enc,
int dpu_encoder_get_linecount(struct drm_encoder *drm_enc);
/**
- * dpu_encoder_get_frame_count - get interface frame count for the encoder.
+ * dpu_encoder_get_vsync_count - get vsync count for the encoder.
* @drm_enc: Pointer to previously created drm encoder structure
*/
-int dpu_encoder_get_frame_count(struct drm_encoder *drm_enc);
+int dpu_encoder_get_vsync_count(struct drm_encoder *drm_enc);
#endif /* __DPU_ENCODER_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
index aa01698d6b25..34a6940d12c5 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
@@ -42,7 +42,7 @@
static bool dpu_encoder_phys_cmd_is_master(struct dpu_encoder_phys *phys_enc)
{
- return (phys_enc->split_role != ENC_ROLE_SLAVE) ? true : false;
+ return (phys_enc->split_role != ENC_ROLE_SLAVE);
}
static bool dpu_encoder_phys_cmd_mode_fixup(
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
index 700d65e39feb..ce6f32a919e5 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -844,7 +844,7 @@ static const struct dpu_intf_cfg sdm845_intf[] = {
};
static const struct dpu_intf_cfg sc7180_intf[] = {
- INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0, 24, INTF_SC7180_MASK, MDP_SSPP_TOP0_INTR, 24, 25),
+ INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, MSM_DP_CONTROLLER_0, 24, INTF_SC7180_MASK, MDP_SSPP_TOP0_INTR, 24, 25),
INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0, 24, INTF_SC7180_MASK, MDP_SSPP_TOP0_INTR, 26, 27),
};
@@ -958,12 +958,6 @@ static const struct dpu_perf_cfg sdm845_perf_data = {
.min_core_ib = 2400000,
.min_llcc_ib = 800000,
.min_dram_ib = 800000,
- .core_ib_ff = "6.0",
- .core_clk_ff = "1.0",
- .comp_ratio_rt =
- "NV12/5/1/1.23 AB24/5/1/1.23 XB24/5/1/1.23",
- .comp_ratio_nrt =
- "NV12/5/1/1.25 AB24/5/1/1.25 XB24/5/1/1.25",
.undersized_prefill_lines = 2,
.xtra_prefill_lines = 2,
.dest_scale_prefill_lines = 3,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
index d2a945a27cfa..4ade44bbd37e 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
@@ -676,10 +676,6 @@ struct dpu_perf_cdp_cfg {
* @min_core_ib minimum mnoc ib vote in kbps
* @min_llcc_ib minimum llcc ib vote in kbps
* @min_dram_ib minimum dram ib vote in kbps
- * @core_ib_ff core instantaneous bandwidth fudge factor
- * @core_clk_ff core clock fudge factor
- * @comp_ratio_rt string of 0 or more of <fourcc>/<ven>/<mod>/<comp ratio>
- * @comp_ratio_nrt string of 0 or more of <fourcc>/<ven>/<mod>/<comp ratio>
* @undersized_prefill_lines undersized prefill in lines
* @xtra_prefill_lines extra prefill latency in lines
* @dest_scale_prefill_lines destination scaler latency in lines
@@ -702,10 +698,6 @@ struct dpu_perf_cfg {
u32 min_core_ib;
u32 min_llcc_ib;
u32 min_dram_ib;
- const char *core_ib_ff;
- const char *core_clk_ff;
- const char *comp_ratio_rt;
- const char *comp_ratio_nrt;
u32 undersized_prefill_lines;
u32 xtra_prefill_lines;
u32 dest_scale_prefill_lines;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
index 2e816f232e85..d2b6dca487e3 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
@@ -3,12 +3,15 @@
*/
#include <linux/bitops.h>
+#include <linux/debugfs.h>
#include <linux/slab.h>
+#include "dpu_core_irq.h"
#include "dpu_kms.h"
#include "dpu_hw_interrupts.h"
#include "dpu_hw_util.h"
#include "dpu_hw_mdss.h"
+#include "dpu_trace.h"
/**
* Register offsets in MDSS register file for the interrupt registers
@@ -117,25 +120,33 @@ static const struct dpu_intr_reg dpu_intr_set[] = {
#define DPU_IRQ_REG(irq_idx) (irq_idx / 32)
#define DPU_IRQ_MASK(irq_idx) (BIT(irq_idx % 32))
-static void dpu_hw_intr_clear_intr_status_nolock(struct dpu_hw_intr *intr,
- int irq_idx)
+/**
+ * dpu_core_irq_callback_handler - dispatch core interrupts
+ * @arg: private data of callback handler
+ * @irq_idx: interrupt index
+ */
+static void dpu_core_irq_callback_handler(struct dpu_kms *dpu_kms, int irq_idx)
{
- int reg_idx;
+ struct dpu_irq_callback *cb;
- if (!intr)
- return;
+ VERB("irq_idx=%d\n", irq_idx);
- reg_idx = DPU_IRQ_REG(irq_idx);
- DPU_REG_WRITE(&intr->hw, dpu_intr_set[reg_idx].clr_off, DPU_IRQ_MASK(irq_idx));
+ if (list_empty(&dpu_kms->hw_intr->irq_cb_tbl[irq_idx]))
+ DRM_ERROR("no registered cb, idx:%d\n", irq_idx);
- /* ensure register writes go through */
- wmb();
+ atomic_inc(&dpu_kms->hw_intr->irq_counts[irq_idx]);
+
+ /*
+ * Perform registered function callback
+ */
+ list_for_each_entry(cb, &dpu_kms->hw_intr->irq_cb_tbl[irq_idx], list)
+ if (cb->func)
+ cb->func(cb->arg, irq_idx);
}
-static void dpu_hw_intr_dispatch_irq(struct dpu_hw_intr *intr,
- void (*cbfunc)(void *, int),
- void *arg)
+irqreturn_t dpu_core_irq(struct dpu_kms *dpu_kms)
{
+ struct dpu_hw_intr *intr = dpu_kms->hw_intr;
int reg_idx;
int irq_idx;
u32 irq_status;
@@ -144,13 +155,8 @@ static void dpu_hw_intr_dispatch_irq(struct dpu_hw_intr *intr,
unsigned long irq_flags;
if (!intr)
- return;
+ return IRQ_NONE;
- /*
- * The dispatcher will save the IRQ status before calling here.
- * Now need to go through each IRQ status and find matching
- * irq lookup index.
- */
spin_lock_irqsave(&intr->irq_lock, irq_flags);
for (reg_idx = 0; reg_idx < ARRAY_SIZE(dpu_intr_set); reg_idx++) {
if (!test_bit(reg_idx, &intr->irq_mask))
@@ -178,17 +184,8 @@ static void dpu_hw_intr_dispatch_irq(struct dpu_hw_intr *intr,
*/
while ((bit = ffs(irq_status)) != 0) {
irq_idx = DPU_IRQ_IDX(reg_idx, bit - 1);
- /*
- * Once a match on irq mask, perform a callback
- * to the given cbfunc. cbfunc will take care
- * the interrupt status clearing. If cbfunc is
- * not provided, then the interrupt clearing
- * is here.
- */
- if (cbfunc)
- cbfunc(arg, irq_idx);
- dpu_hw_intr_clear_intr_status_nolock(intr, irq_idx);
+ dpu_core_irq_callback_handler(dpu_kms, irq_idx);
/*
* When callback finish, clear the irq_status
@@ -203,6 +200,8 @@ static void dpu_hw_intr_dispatch_irq(struct dpu_hw_intr *intr,
wmb();
spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
+
+ return IRQ_HANDLED;
}
static int dpu_hw_intr_enable_irq_locked(struct dpu_hw_intr *intr, int irq_idx)
@@ -303,12 +302,13 @@ static int dpu_hw_intr_disable_irq_locked(struct dpu_hw_intr *intr, int irq_idx)
return 0;
}
-static int dpu_hw_intr_clear_irqs(struct dpu_hw_intr *intr)
+static void dpu_clear_irqs(struct dpu_kms *dpu_kms)
{
+ struct dpu_hw_intr *intr = dpu_kms->hw_intr;
int i;
if (!intr)
- return -EINVAL;
+ return;
for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++) {
if (test_bit(i, &intr->irq_mask))
@@ -318,16 +318,15 @@ static int dpu_hw_intr_clear_irqs(struct dpu_hw_intr *intr)
/* ensure register writes go through */
wmb();
-
- return 0;
}
-static int dpu_hw_intr_disable_irqs(struct dpu_hw_intr *intr)
+static void dpu_disable_all_irqs(struct dpu_kms *dpu_kms)
{
+ struct dpu_hw_intr *intr = dpu_kms->hw_intr;
int i;
if (!intr)
- return -EINVAL;
+ return;
for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++) {
if (test_bit(i, &intr->irq_mask))
@@ -337,13 +336,11 @@ static int dpu_hw_intr_disable_irqs(struct dpu_hw_intr *intr)
/* ensure register writes go through */
wmb();
-
- return 0;
}
-static u32 dpu_hw_intr_get_interrupt_status(struct dpu_hw_intr *intr,
- int irq_idx, bool clear)
+u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, int irq_idx, bool clear)
{
+ struct dpu_hw_intr *intr = dpu_kms->hw_intr;
int reg_idx;
unsigned long irq_flags;
u32 intr_status;
@@ -351,6 +348,12 @@ static u32 dpu_hw_intr_get_interrupt_status(struct dpu_hw_intr *intr,
if (!intr)
return 0;
+ if (irq_idx < 0) {
+ DPU_ERROR("[%pS] invalid irq_idx=%d\n",
+ __builtin_return_address(0), irq_idx);
+ return 0;
+ }
+
if (irq_idx < 0 || irq_idx >= intr->total_irqs) {
pr_err("invalid IRQ index: [%d]\n", irq_idx);
return 0;
@@ -374,32 +377,6 @@ static u32 dpu_hw_intr_get_interrupt_status(struct dpu_hw_intr *intr,
return intr_status;
}
-static unsigned long dpu_hw_intr_lock(struct dpu_hw_intr *intr)
-{
- unsigned long irq_flags;
-
- spin_lock_irqsave(&intr->irq_lock, irq_flags);
-
- return irq_flags;
-}
-
-static void dpu_hw_intr_unlock(struct dpu_hw_intr *intr, unsigned long irq_flags)
-{
- spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
-}
-
-static void __setup_intr_ops(struct dpu_hw_intr_ops *ops)
-{
- ops->enable_irq_locked = dpu_hw_intr_enable_irq_locked;
- ops->disable_irq_locked = dpu_hw_intr_disable_irq_locked;
- ops->dispatch_irqs = dpu_hw_intr_dispatch_irq;
- ops->clear_all_irqs = dpu_hw_intr_clear_irqs;
- ops->disable_all_irqs = dpu_hw_intr_disable_irqs;
- ops->get_interrupt_status = dpu_hw_intr_get_interrupt_status;
- ops->lock = dpu_hw_intr_lock;
- ops->unlock = dpu_hw_intr_unlock;
-}
-
static void __intr_offset(struct dpu_mdss_cfg *m,
void __iomem *addr, struct dpu_hw_blk_reg_map *hw)
{
@@ -421,7 +398,6 @@ struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
return ERR_PTR(-ENOMEM);
__intr_offset(m, addr, &intr->hw);
- __setup_intr_ops(&intr->ops);
intr->total_irqs = ARRAY_SIZE(dpu_intr_set) * 32;
@@ -443,7 +419,168 @@ void dpu_hw_intr_destroy(struct dpu_hw_intr *intr)
{
if (intr) {
kfree(intr->cache_irq_mask);
+
+ kfree(intr->irq_cb_tbl);
+ kfree(intr->irq_counts);
+
kfree(intr);
}
}
+int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, int irq_idx,
+ struct dpu_irq_callback *register_irq_cb)
+{
+ unsigned long irq_flags;
+
+ if (!dpu_kms->hw_intr->irq_cb_tbl) {
+ DPU_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ if (!register_irq_cb || !register_irq_cb->func) {
+ DPU_ERROR("invalid irq_cb:%d func:%d\n",
+ register_irq_cb != NULL,
+ register_irq_cb ?
+ register_irq_cb->func != NULL : -1);
+ return -EINVAL;
+ }
+
+ if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->total_irqs) {
+ DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
+ return -EINVAL;
+ }
+
+ VERB("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
+
+ spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
+ trace_dpu_core_irq_register_callback(irq_idx, register_irq_cb);
+ list_del_init(&register_irq_cb->list);
+ list_add_tail(&register_irq_cb->list,
+ &dpu_kms->hw_intr->irq_cb_tbl[irq_idx]);
+ if (list_is_first(&register_irq_cb->list,
+ &dpu_kms->hw_intr->irq_cb_tbl[irq_idx])) {
+ int ret = dpu_hw_intr_enable_irq_locked(
+ dpu_kms->hw_intr,
+ irq_idx);
+ if (ret)
+ DPU_ERROR("Fail to enable IRQ for irq_idx:%d\n",
+ irq_idx);
+ }
+ spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
+
+ return 0;
+}
+
+int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx,
+ struct dpu_irq_callback *register_irq_cb)
+{
+ unsigned long irq_flags;
+
+ if (!dpu_kms->hw_intr->irq_cb_tbl) {
+ DPU_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ if (!register_irq_cb || !register_irq_cb->func) {
+ DPU_ERROR("invalid irq_cb:%d func:%d\n",
+ register_irq_cb != NULL,
+ register_irq_cb ?
+ register_irq_cb->func != NULL : -1);
+ return -EINVAL;
+ }
+
+ if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->total_irqs) {
+ DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
+ return -EINVAL;
+ }
+
+ VERB("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
+
+ spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
+ trace_dpu_core_irq_unregister_callback(irq_idx, register_irq_cb);
+ list_del_init(&register_irq_cb->list);
+ /* empty callback list but interrupt is still enabled */
+ if (list_empty(&dpu_kms->hw_intr->irq_cb_tbl[irq_idx])) {
+ int ret = dpu_hw_intr_disable_irq_locked(
+ dpu_kms->hw_intr,
+ irq_idx);
+ if (ret)
+ DPU_ERROR("Fail to disable IRQ for irq_idx:%d\n",
+ irq_idx);
+ VERB("irq_idx=%d ret=%d\n", irq_idx, ret);
+ }
+ spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
+
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v)
+{
+ struct dpu_kms *dpu_kms = s->private;
+ struct dpu_irq_callback *cb;
+ unsigned long irq_flags;
+ int i, irq_count, cb_count;
+
+ if (WARN_ON(!dpu_kms->hw_intr->irq_cb_tbl))
+ return 0;
+
+ for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++) {
+ spin_lock_irqsave(&dpu_kms->hw_intr->irq_lock, irq_flags);
+ cb_count = 0;
+ irq_count = atomic_read(&dpu_kms->hw_intr->irq_counts[i]);
+ list_for_each_entry(cb, &dpu_kms->hw_intr->irq_cb_tbl[i], list)
+ cb_count++;
+ spin_unlock_irqrestore(&dpu_kms->hw_intr->irq_lock, irq_flags);
+
+ if (irq_count || cb_count)
+ seq_printf(s, "idx:%d irq:%d cb:%d\n",
+ i, irq_count, cb_count);
+ }
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(dpu_debugfs_core_irq);
+
+void dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
+ struct dentry *parent)
+{
+ debugfs_create_file("core_irq", 0600, parent, dpu_kms,
+ &dpu_debugfs_core_irq_fops);
+}
+#endif
+
+void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms)
+{
+ int i;
+
+ pm_runtime_get_sync(&dpu_kms->pdev->dev);
+ dpu_clear_irqs(dpu_kms);
+ dpu_disable_all_irqs(dpu_kms);
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+ /* Create irq callbacks for all possible irq_idx */
+ dpu_kms->hw_intr->irq_cb_tbl = kcalloc(dpu_kms->hw_intr->total_irqs,
+ sizeof(struct list_head), GFP_KERNEL);
+ dpu_kms->hw_intr->irq_counts = kcalloc(dpu_kms->hw_intr->total_irqs,
+ sizeof(atomic_t), GFP_KERNEL);
+ for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++) {
+ INIT_LIST_HEAD(&dpu_kms->hw_intr->irq_cb_tbl[i]);
+ atomic_set(&dpu_kms->hw_intr->irq_counts[i], 0);
+ }
+}
+
+void dpu_core_irq_uninstall(struct dpu_kms *dpu_kms)
+{
+ int i;
+
+ pm_runtime_get_sync(&dpu_kms->pdev->dev);
+ for (i = 0; i < dpu_kms->hw_intr->total_irqs; i++)
+ if (!list_empty(&dpu_kms->hw_intr->irq_cb_tbl[i]))
+ DPU_ERROR("irq_idx=%d still enabled/registered\n", i);
+
+ dpu_clear_irqs(dpu_kms);
+ dpu_disable_all_irqs(dpu_kms);
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
index ac83c1159815..d50e78c9f148 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
@@ -32,92 +32,6 @@ enum dpu_hw_intr_reg {
#define DPU_IRQ_IDX(reg_idx, offset) (reg_idx * 32 + offset)
-struct dpu_hw_intr;
-
-/**
- * Interrupt operations.
- */
-struct dpu_hw_intr_ops {
-
- /**
- * enable_irq - Enable IRQ based on lookup IRQ index
- * @intr: HW interrupt handle
- * @irq_idx: Lookup irq index return from irq_idx_lookup
- * @return: 0 for success, otherwise failure
- */
- int (*enable_irq_locked)(
- struct dpu_hw_intr *intr,
- int irq_idx);
-
- /**
- * disable_irq - Disable IRQ based on lookup IRQ index
- * @intr: HW interrupt handle
- * @irq_idx: Lookup irq index return from irq_idx_lookup
- * @return: 0 for success, otherwise failure
- */
- int (*disable_irq_locked)(
- struct dpu_hw_intr *intr,
- int irq_idx);
-
- /**
- * clear_all_irqs - Clears all the interrupts (i.e. acknowledges
- * any asserted IRQs). Useful during reset.
- * @intr: HW interrupt handle
- * @return: 0 for success, otherwise failure
- */
- int (*clear_all_irqs)(
- struct dpu_hw_intr *intr);
-
- /**
- * disable_all_irqs - Disables all the interrupts. Useful during reset.
- * @intr: HW interrupt handle
- * @return: 0 for success, otherwise failure
- */
- int (*disable_all_irqs)(
- struct dpu_hw_intr *intr);
-
- /**
- * dispatch_irqs - IRQ dispatcher will call the given callback
- * function when a matching interrupt status bit is
- * found in the irq mapping table.
- * @intr: HW interrupt handle
- * @cbfunc: Callback function pointer
- * @arg: Argument to pass back during callback
- */
- void (*dispatch_irqs)(
- struct dpu_hw_intr *intr,
- void (*cbfunc)(void *arg, int irq_idx),
- void *arg);
-
- /**
- * get_interrupt_status - Gets HW interrupt status, and clear if set,
- * based on given lookup IRQ index.
- * @intr: HW interrupt handle
- * @irq_idx: Lookup irq index return from irq_idx_lookup
- * @clear: True to clear irq after read
- */
- u32 (*get_interrupt_status)(
- struct dpu_hw_intr *intr,
- int irq_idx,
- bool clear);
-
- /**
- * lock - take the IRQ lock
- * @intr: HW interrupt handle
- * @return: irq_flags for the taken spinlock
- */
- unsigned long (*lock)(
- struct dpu_hw_intr *intr);
-
- /**
- * unlock - take the IRQ lock
- * @intr: HW interrupt handle
- * @irq_flags: the irq_flags returned from lock
- */
- void (*unlock)(
- struct dpu_hw_intr *intr, unsigned long irq_flags);
-};
-
/**
* struct dpu_hw_intr: hw interrupts handling data structure
* @hw: virtual address mapping
@@ -126,15 +40,19 @@ struct dpu_hw_intr_ops {
* @save_irq_status: array of IRQ status reg storage created during init
* @total_irqs: total number of irq_idx mapped in the hw_interrupts
* @irq_lock: spinlock for accessing IRQ resources
+ * @irq_cb_tbl: array of IRQ callbacks lists
+ * @irq_counts: array of IRQ counts
*/
struct dpu_hw_intr {
struct dpu_hw_blk_reg_map hw;
- struct dpu_hw_intr_ops ops;
u32 *cache_irq_mask;
u32 *save_irq_status;
u32 total_irqs;
spinlock_t irq_lock;
unsigned long irq_mask;
+
+ struct list_head *irq_cb_tbl;
+ atomic_t *irq_counts;
};
/**
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
index cb6bb7a22c15..86363c0ec834 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
*/
#include "dpu_kms.h"
@@ -24,6 +25,15 @@
#define LM_BLEND0_FG_ALPHA 0x04
#define LM_BLEND0_BG_ALPHA 0x08
+#define LM_MISR_CTRL 0x310
+#define LM_MISR_SIGNATURE 0x314
+#define LM_MISR_FRAME_COUNT_MASK 0xFF
+#define LM_MISR_CTRL_ENABLE BIT(8)
+#define LM_MISR_CTRL_STATUS BIT(9)
+#define LM_MISR_CTRL_STATUS_CLEAR BIT(10)
+#define LM_MISR_CTRL_FREE_RUN_MASK BIT(31)
+
+
static const struct dpu_lm_cfg *_lm_offset(enum dpu_lm mixer,
const struct dpu_mdss_cfg *m,
void __iomem *addr,
@@ -96,6 +106,48 @@ static void dpu_hw_lm_setup_border_color(struct dpu_hw_mixer *ctx,
}
}
+static void dpu_hw_lm_setup_misr(struct dpu_hw_mixer *ctx, bool enable, u32 frame_count)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ u32 config = 0;
+
+ DPU_REG_WRITE(c, LM_MISR_CTRL, LM_MISR_CTRL_STATUS_CLEAR);
+
+ /* Clear old MISR value (in case it's read before a new value is calculated)*/
+ wmb();
+
+ if (enable) {
+ config = (frame_count & LM_MISR_FRAME_COUNT_MASK) |
+ LM_MISR_CTRL_ENABLE | LM_MISR_CTRL_FREE_RUN_MASK;
+
+ DPU_REG_WRITE(c, LM_MISR_CTRL, config);
+ } else {
+ DPU_REG_WRITE(c, LM_MISR_CTRL, 0);
+ }
+
+}
+
+static int dpu_hw_lm_collect_misr(struct dpu_hw_mixer *ctx, u32 *misr_value)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ u32 ctrl = 0;
+
+ if (!misr_value)
+ return -EINVAL;
+
+ ctrl = DPU_REG_READ(c, LM_MISR_CTRL);
+
+ if (!(ctrl & LM_MISR_CTRL_ENABLE))
+ return -EINVAL;
+
+ if (!(ctrl & LM_MISR_CTRL_STATUS))
+ return -EINVAL;
+
+ *misr_value = DPU_REG_READ(c, LM_MISR_SIGNATURE);
+
+ return 0;
+}
+
static void dpu_hw_lm_setup_blend_config_sdm845(struct dpu_hw_mixer *ctx,
u32 stage, u32 fg_alpha, u32 bg_alpha, u32 blend_op)
{
@@ -158,6 +210,8 @@ static void _setup_mixer_ops(const struct dpu_mdss_cfg *m,
ops->setup_blend_config = dpu_hw_lm_setup_blend_config;
ops->setup_alpha_out = dpu_hw_lm_setup_color3;
ops->setup_border_color = dpu_hw_lm_setup_border_color;
+ ops->setup_misr = dpu_hw_lm_setup_misr;
+ ops->collect_misr = dpu_hw_lm_collect_misr;
}
struct dpu_hw_mixer *dpu_hw_lm_init(enum dpu_lm idx,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
index 4a6b2de19ef6..d8052fb2d5da 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
@@ -1,5 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
*/
#ifndef _DPU_HW_LM_H
@@ -53,6 +54,16 @@ struct dpu_hw_lm_ops {
void (*setup_border_color)(struct dpu_hw_mixer *ctx,
struct dpu_mdss_color *color,
u8 border_en);
+
+ /**
+ * setup_misr: Enable/disable MISR
+ */
+ void (*setup_misr)(struct dpu_hw_mixer *ctx, bool enable, u32 frame_count);
+
+ /**
+ * collect_misr: Read MISR signature
+ */
+ int (*collect_misr)(struct dpu_hw_mixer *ctx, u32 *misr_value);
};
struct dpu_hw_mixer {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
index 69eed7932486..f9460672176a 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
@@ -138,11 +138,13 @@ static int _sspp_subblk_offset(struct dpu_hw_pipe *ctx,
u32 *idx)
{
int rc = 0;
- const struct dpu_sspp_sub_blks *sblk = ctx->cap->sblk;
+ const struct dpu_sspp_sub_blks *sblk;
- if (!ctx)
+ if (!ctx || !ctx->cap || !ctx->cap->sblk)
return -EINVAL;
+ sblk = ctx->cap->sblk;
+
switch (s_id) {
case DPU_SSPP_SRC:
*idx = sblk->src_blk.base;
@@ -419,7 +421,7 @@ static void _dpu_hw_sspp_setup_scaler3(struct dpu_hw_pipe *ctx,
(void)pe;
if (_sspp_subblk_offset(ctx, DPU_SSPP_SCALER_QSEED3, &idx) || !sspp
- || !scaler3_cfg || !ctx || !ctx->cap || !ctx->cap->sblk)
+ || !scaler3_cfg)
return;
dpu_hw_setup_scaler3(&ctx->hw, scaler3_cfg, idx,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
index ff3cffde84cd..6d4911957e33 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
@@ -1,5 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+/*
+ * Copyright (c) 2015-2021, The Linux Foundation. All rights reserved.
*/
#ifndef _DPU_HW_UTIL_H
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index ae48f41821cf..a15b26428280 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -188,6 +188,7 @@ static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
struct dentry *entry;
struct drm_device *dev;
struct msm_drm_private *priv;
+ int i;
if (!p)
return -EINVAL;
@@ -203,8 +204,10 @@ static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
dpu_debugfs_vbif_init(dpu_kms, entry);
dpu_debugfs_core_irq_init(dpu_kms, entry);
- if (priv->dp)
- msm_dp_debugfs_init(priv->dp, minor);
+ for (i = 0; i < ARRAY_SIZE(priv->dp); i++) {
+ if (priv->dp[i])
+ msm_dp_debugfs_init(priv->dp[i], minor);
+ }
return dpu_core_perf_debugfs_init(dpu_kms, entry);
}
@@ -544,35 +547,42 @@ static int _dpu_kms_initialize_displayport(struct drm_device *dev,
{
struct drm_encoder *encoder = NULL;
struct msm_display_info info;
- int rc = 0;
+ int rc;
+ int i;
- if (!priv->dp)
- return rc;
+ for (i = 0; i < ARRAY_SIZE(priv->dp); i++) {
+ if (!priv->dp[i])
+ continue;
- encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_TMDS);
- if (IS_ERR(encoder)) {
- DPU_ERROR("encoder init failed for dsi display\n");
- return PTR_ERR(encoder);
- }
+ encoder = dpu_encoder_init(dev, DRM_MODE_ENCODER_TMDS);
+ if (IS_ERR(encoder)) {
+ DPU_ERROR("encoder init failed for dsi display\n");
+ return PTR_ERR(encoder);
+ }
- memset(&info, 0, sizeof(info));
- rc = msm_dp_modeset_init(priv->dp, dev, encoder);
- if (rc) {
- DPU_ERROR("modeset_init failed for DP, rc = %d\n", rc);
- drm_encoder_cleanup(encoder);
- return rc;
- }
+ memset(&info, 0, sizeof(info));
+ rc = msm_dp_modeset_init(priv->dp[i], dev, encoder);
+ if (rc) {
+ DPU_ERROR("modeset_init failed for DP, rc = %d\n", rc);
+ drm_encoder_cleanup(encoder);
+ return rc;
+ }
- priv->encoders[priv->num_encoders++] = encoder;
+ priv->encoders[priv->num_encoders++] = encoder;
- info.num_of_h_tiles = 1;
- info.capabilities = MSM_DISPLAY_CAP_VID_MODE;
- info.intf_type = encoder->encoder_type;
- rc = dpu_encoder_setup(dev, encoder, &info);
- if (rc)
- DPU_ERROR("failed to setup DPU encoder %d: rc:%d\n",
- encoder->base.id, rc);
- return rc;
+ info.num_of_h_tiles = 1;
+ info.h_tile_instance[0] = i;
+ info.capabilities = MSM_DISPLAY_CAP_VID_MODE;
+ info.intf_type = encoder->encoder_type;
+ rc = dpu_encoder_setup(dev, encoder, &info);
+ if (rc) {
+ DPU_ERROR("failed to setup DPU encoder %d: rc:%d\n",
+ encoder->base.id, rc);
+ return rc;
+ }
+ }
+
+ return 0;
}
/**
@@ -792,6 +802,7 @@ static int dpu_irq_postinstall(struct msm_kms *kms)
{
struct msm_drm_private *priv;
struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+ int i;
if (!dpu_kms || !dpu_kms->dev)
return -EINVAL;
@@ -800,7 +811,8 @@ static int dpu_irq_postinstall(struct msm_kms *kms)
if (!priv)
return -EINVAL;
- msm_dp_irq_postinstall(priv->dp);
+ for (i = 0; i < ARRAY_SIZE(priv->dp); i++)
+ msm_dp_irq_postinstall(priv->dp[i]);
return 0;
}
@@ -908,6 +920,10 @@ static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms)
return 0;
mmu = msm_iommu_new(dpu_kms->dev->dev, domain);
+ if (IS_ERR(mmu)) {
+ iommu_domain_free(domain);
+ return PTR_ERR(mmu);
+ }
aspace = msm_gem_address_space_create(mmu, "dpu1",
0x1000, 0x100000000 - 0x1000);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
index 323a6bce9e64..775bcbda860f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
@@ -78,18 +78,6 @@ struct dpu_irq_callback {
void *arg;
};
-/**
- * struct dpu_irq: IRQ structure contains callback registration info
- * @total_irq: total number of irq_idx obtained from HW interrupts mapping
- * @irq_cb_tbl: array of IRQ callbacks setting
- * @debugfs_file: debugfs file for irq statistics
- */
-struct dpu_irq {
- u32 total_irqs;
- struct list_head *irq_cb_tbl;
- atomic_t *irq_counts;
-};
-
struct dpu_kms {
struct msm_kms base;
struct drm_device *dev;
@@ -104,7 +92,6 @@ struct dpu_kms {
struct regulator *venus;
struct dpu_hw_intr *hw_intr;
- struct dpu_irq irq_obj;
struct dpu_core_perf perf;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index c989621209aa..a3e3b9d1b82e 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -1193,7 +1193,7 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane)
if (DPU_FORMAT_IS_YUV(fmt))
_dpu_plane_setup_csc(pdpu);
else
- pdpu->csc_ptr = 0;
+ pdpu->csc_ptr = NULL;
}
_dpu_plane_set_qos_lut(plane, fb);
@@ -1330,7 +1330,7 @@ static void dpu_plane_reset(struct drm_plane *plane)
/* remove previous state, if present */
if (plane->state) {
dpu_plane_destroy_state(plane, plane->state);
- plane->state = 0;
+ plane->state = NULL;
}
pstate = kzalloc(sizeof(*pstate), GFP_KERNEL);
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
index cdcaf470f148..5a33bb148e9e 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
@@ -173,12 +173,9 @@ int mdp4_disable(struct mdp4_kms *mdp4_kms)
DBG("");
clk_disable_unprepare(mdp4_kms->clk);
- if (mdp4_kms->pclk)
- clk_disable_unprepare(mdp4_kms->pclk);
- if (mdp4_kms->lut_clk)
- clk_disable_unprepare(mdp4_kms->lut_clk);
- if (mdp4_kms->axi_clk)
- clk_disable_unprepare(mdp4_kms->axi_clk);
+ clk_disable_unprepare(mdp4_kms->pclk);
+ clk_disable_unprepare(mdp4_kms->lut_clk);
+ clk_disable_unprepare(mdp4_kms->axi_clk);
return 0;
}
@@ -188,12 +185,9 @@ int mdp4_enable(struct mdp4_kms *mdp4_kms)
DBG("");
clk_prepare_enable(mdp4_kms->clk);
- if (mdp4_kms->pclk)
- clk_prepare_enable(mdp4_kms->pclk);
- if (mdp4_kms->lut_clk)
- clk_prepare_enable(mdp4_kms->lut_clk);
- if (mdp4_kms->axi_clk)
- clk_prepare_enable(mdp4_kms->axi_clk);
+ clk_prepare_enable(mdp4_kms->pclk);
+ clk_prepare_enable(mdp4_kms->lut_clk);
+ clk_prepare_enable(mdp4_kms->axi_clk);
return 0;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
index 9741544ffc35..1bf9ff5dbabc 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_cfg.c
@@ -752,6 +752,94 @@ const struct mdp5_cfg_hw msm8x76_config = {
.max_clk = 360000000,
};
+static const struct mdp5_cfg_hw msm8x53_config = {
+ .name = "msm8x53",
+ .mdp = {
+ .count = 1,
+ .caps = MDP_CAP_CDM |
+ MDP_CAP_SRC_SPLIT,
+ },
+ .ctl = {
+ .count = 3,
+ .base = { 0x01000, 0x01200, 0x01400 },
+ .flush_hw_mask = 0xffffffff,
+ },
+ .pipe_vig = {
+ .count = 1,
+ .base = { 0x04000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SCALE |
+ MDP_PIPE_CAP_CSC |
+ MDP_PIPE_CAP_DECIMATION |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_rgb = {
+ .count = 2,
+ .base = { 0x14000, 0x16000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_DECIMATION |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_dma = {
+ .count = 1,
+ .base = { 0x24000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ 0,
+ },
+ .pipe_cursor = {
+ .count = 1,
+ .base = { 0x34000 },
+ .caps = MDP_PIPE_CAP_HFLIP |
+ MDP_PIPE_CAP_VFLIP |
+ MDP_PIPE_CAP_SW_PIX_EXT |
+ MDP_PIPE_CAP_CURSOR |
+ 0,
+ },
+
+ .lm = {
+ .count = 3,
+ .base = { 0x44000, 0x45000 },
+ .instances = {
+ { .id = 0, .pp = 0, .dspp = 0,
+ .caps = MDP_LM_CAP_DISPLAY |
+ MDP_LM_CAP_PAIR },
+ { .id = 1, .pp = 1, .dspp = -1,
+ .caps = MDP_LM_CAP_DISPLAY },
+ },
+ .nb_stages = 5,
+ .max_width = 2048,
+ .max_height = 0xFFFF,
+ },
+ .dspp = {
+ .count = 1,
+ .base = { 0x54000 },
+
+ },
+ .pp = {
+ .count = 2,
+ .base = { 0x70000, 0x70800 },
+ },
+ .cdm = {
+ .count = 1,
+ .base = { 0x79200 },
+ },
+ .intf = {
+ .base = { 0x6a000, 0x6a800, 0x6b000 },
+ .connect = {
+ [0] = INTF_DISABLED,
+ [1] = INTF_DSI,
+ [2] = INTF_DSI,
+ },
+ },
+ .max_clk = 400000000,
+};
+
static const struct mdp5_cfg_hw msm8917_config = {
.name = "msm8917",
.mdp = {
@@ -1151,6 +1239,7 @@ static const struct mdp5_cfg_handler cfg_handlers_v1[] = {
{ .revision = 7, .config = { .hw = &msm8x96_config } },
{ .revision = 11, .config = { .hw = &msm8x76_config } },
{ .revision = 15, .config = { .hw = &msm8917_config } },
+ { .revision = 16, .config = { .hw = &msm8x53_config } },
};
static const struct mdp5_cfg_handler cfg_handlers_v3[] = {
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
index b3b42672b2d4..7b242246d4e7 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
@@ -295,15 +295,12 @@ static int mdp5_disable(struct mdp5_kms *mdp5_kms)
mdp5_kms->enable_count--;
WARN_ON(mdp5_kms->enable_count < 0);
- if (mdp5_kms->tbu_rt_clk)
- clk_disable_unprepare(mdp5_kms->tbu_rt_clk);
- if (mdp5_kms->tbu_clk)
- clk_disable_unprepare(mdp5_kms->tbu_clk);
+ clk_disable_unprepare(mdp5_kms->tbu_rt_clk);
+ clk_disable_unprepare(mdp5_kms->tbu_clk);
clk_disable_unprepare(mdp5_kms->ahb_clk);
clk_disable_unprepare(mdp5_kms->axi_clk);
clk_disable_unprepare(mdp5_kms->core_clk);
- if (mdp5_kms->lut_clk)
- clk_disable_unprepare(mdp5_kms->lut_clk);
+ clk_disable_unprepare(mdp5_kms->lut_clk);
return 0;
}
@@ -317,12 +314,9 @@ static int mdp5_enable(struct mdp5_kms *mdp5_kms)
clk_prepare_enable(mdp5_kms->ahb_clk);
clk_prepare_enable(mdp5_kms->axi_clk);
clk_prepare_enable(mdp5_kms->core_clk);
- if (mdp5_kms->lut_clk)
- clk_prepare_enable(mdp5_kms->lut_clk);
- if (mdp5_kms->tbu_clk)
- clk_prepare_enable(mdp5_kms->tbu_clk);
- if (mdp5_kms->tbu_rt_clk)
- clk_prepare_enable(mdp5_kms->tbu_rt_clk);
+ clk_prepare_enable(mdp5_kms->lut_clk);
+ clk_prepare_enable(mdp5_kms->tbu_clk);
+ clk_prepare_enable(mdp5_kms->tbu_rt_clk);
return 0;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c
index 2f4895bcb0b0..0ea53420bc40 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c
@@ -136,10 +136,8 @@ static int mdp5_mdss_enable(struct msm_mdss *mdss)
DBG("");
clk_prepare_enable(mdp5_mdss->ahb_clk);
- if (mdp5_mdss->axi_clk)
- clk_prepare_enable(mdp5_mdss->axi_clk);
- if (mdp5_mdss->vsync_clk)
- clk_prepare_enable(mdp5_mdss->vsync_clk);
+ clk_prepare_enable(mdp5_mdss->axi_clk);
+ clk_prepare_enable(mdp5_mdss->vsync_clk);
return 0;
}
@@ -149,10 +147,8 @@ static int mdp5_mdss_disable(struct msm_mdss *mdss)
struct mdp5_mdss *mdp5_mdss = to_mdp5_mdss(mdss);
DBG("");
- if (mdp5_mdss->vsync_clk)
- clk_disable_unprepare(mdp5_mdss->vsync_clk);
- if (mdp5_mdss->axi_clk)
- clk_disable_unprepare(mdp5_mdss->axi_clk);
+ clk_disable_unprepare(mdp5_mdss->vsync_clk);
+ clk_disable_unprepare(mdp5_mdss->axi_clk);
clk_disable_unprepare(mdp5_mdss->ahb_clk);
return 0;
diff --git a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
index cabe15190ec1..2e1acb1bc390 100644
--- a/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
+++ b/drivers/gpu/drm/msm/disp/msm_disp_snapshot_util.c
@@ -126,8 +126,12 @@ void msm_disp_snapshot_capture_state(struct msm_disp_state *disp_state)
priv = drm_dev->dev_private;
kms = priv->kms;
- if (priv->dp)
- msm_dp_snapshot(disp_state, priv->dp);
+ for (i = 0; i < ARRAY_SIZE(priv->dp); i++) {
+ if (!priv->dp[i])
+ continue;
+
+ msm_dp_snapshot(disp_state, priv->dp[i]);
+ }
for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) {
if (!priv->dsi[i])
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c
index cc2bb8295329..6ae9b29044b6 100644
--- a/drivers/gpu/drm/msm/dp/dp_catalog.c
+++ b/drivers/gpu/drm/msm/dp/dp_catalog.c
@@ -24,15 +24,6 @@
#define DP_INTERRUPT_STATUS_ACK_SHIFT 1
#define DP_INTERRUPT_STATUS_MASK_SHIFT 2
-#define MSM_DP_CONTROLLER_AHB_OFFSET 0x0000
-#define MSM_DP_CONTROLLER_AHB_SIZE 0x0200
-#define MSM_DP_CONTROLLER_AUX_OFFSET 0x0200
-#define MSM_DP_CONTROLLER_AUX_SIZE 0x0200
-#define MSM_DP_CONTROLLER_LINK_OFFSET 0x0400
-#define MSM_DP_CONTROLLER_LINK_SIZE 0x0C00
-#define MSM_DP_CONTROLLER_P0_OFFSET 0x1000
-#define MSM_DP_CONTROLLER_P0_SIZE 0x0400
-
#define DP_INTERRUPT_STATUS1 \
(DP_INTR_AUX_I2C_DONE| \
DP_INTR_WRONG_ADDR | DP_INTR_TIMEOUT | \
@@ -66,82 +57,77 @@ void dp_catalog_snapshot(struct dp_catalog *dp_catalog, struct msm_disp_state *d
{
struct dp_catalog_private *catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog);
+ struct dss_io_data *dss = &catalog->io->dp_controller;
- msm_disp_snapshot_add_block(disp_state, catalog->io->dp_controller.len,
- catalog->io->dp_controller.base, "dp_ctrl");
+ msm_disp_snapshot_add_block(disp_state, dss->ahb.len, dss->ahb.base, "dp_ahb");
+ msm_disp_snapshot_add_block(disp_state, dss->aux.len, dss->aux.base, "dp_aux");
+ msm_disp_snapshot_add_block(disp_state, dss->link.len, dss->link.base, "dp_link");
+ msm_disp_snapshot_add_block(disp_state, dss->p0.len, dss->p0.base, "dp_p0");
}
static inline u32 dp_read_aux(struct dp_catalog_private *catalog, u32 offset)
{
- offset += MSM_DP_CONTROLLER_AUX_OFFSET;
- return readl_relaxed(catalog->io->dp_controller.base + offset);
+ return readl_relaxed(catalog->io->dp_controller.aux.base + offset);
}
static inline void dp_write_aux(struct dp_catalog_private *catalog,
u32 offset, u32 data)
{
- offset += MSM_DP_CONTROLLER_AUX_OFFSET;
/*
* To make sure aux reg writes happens before any other operation,
* this function uses writel() instread of writel_relaxed()
*/
- writel(data, catalog->io->dp_controller.base + offset);
+ writel(data, catalog->io->dp_controller.aux.base + offset);
}
static inline u32 dp_read_ahb(struct dp_catalog_private *catalog, u32 offset)
{
- offset += MSM_DP_CONTROLLER_AHB_OFFSET;
- return readl_relaxed(catalog->io->dp_controller.base + offset);
+ return readl_relaxed(catalog->io->dp_controller.ahb.base + offset);
}
static inline void dp_write_ahb(struct dp_catalog_private *catalog,
u32 offset, u32 data)
{
- offset += MSM_DP_CONTROLLER_AHB_OFFSET;
/*
* To make sure phy reg writes happens before any other operation,
* this function uses writel() instread of writel_relaxed()
*/
- writel(data, catalog->io->dp_controller.base + offset);
+ writel(data, catalog->io->dp_controller.ahb.base + offset);
}
static inline void dp_write_p0(struct dp_catalog_private *catalog,
u32 offset, u32 data)
{
- offset += MSM_DP_CONTROLLER_P0_OFFSET;
/*
* To make sure interface reg writes happens before any other operation,
* this function uses writel() instread of writel_relaxed()
*/
- writel(data, catalog->io->dp_controller.base + offset);
+ writel(data, catalog->io->dp_controller.p0.base + offset);
}
static inline u32 dp_read_p0(struct dp_catalog_private *catalog,
u32 offset)
{
- offset += MSM_DP_CONTROLLER_P0_OFFSET;
/*
* To make sure interface reg writes happens before any other operation,
* this function uses writel() instread of writel_relaxed()
*/
- return readl_relaxed(catalog->io->dp_controller.base + offset);
+ return readl_relaxed(catalog->io->dp_controller.p0.base + offset);
}
static inline u32 dp_read_link(struct dp_catalog_private *catalog, u32 offset)
{
- offset += MSM_DP_CONTROLLER_LINK_OFFSET;
- return readl_relaxed(catalog->io->dp_controller.base + offset);
+ return readl_relaxed(catalog->io->dp_controller.link.base + offset);
}
static inline void dp_write_link(struct dp_catalog_private *catalog,
u32 offset, u32 data)
{
- offset += MSM_DP_CONTROLLER_LINK_OFFSET;
/*
* To make sure link reg writes happens before any other operation,
* this function uses writel() instread of writel_relaxed()
*/
- writel(data, catalog->io->dp_controller.base + offset);
+ writel(data, catalog->io->dp_controller.link.base + offset);
}
/* aux related catalog functions */
@@ -276,29 +262,21 @@ static void dump_regs(void __iomem *base, int len)
void dp_catalog_dump_regs(struct dp_catalog *dp_catalog)
{
- u32 offset, len;
struct dp_catalog_private *catalog = container_of(dp_catalog,
struct dp_catalog_private, dp_catalog);
+ struct dss_io_data *io = &catalog->io->dp_controller;
pr_info("AHB regs\n");
- offset = MSM_DP_CONTROLLER_AHB_OFFSET;
- len = MSM_DP_CONTROLLER_AHB_SIZE;
- dump_regs(catalog->io->dp_controller.base + offset, len);
+ dump_regs(io->ahb.base, io->ahb.len);
pr_info("AUXCLK regs\n");
- offset = MSM_DP_CONTROLLER_AUX_OFFSET;
- len = MSM_DP_CONTROLLER_AUX_SIZE;
- dump_regs(catalog->io->dp_controller.base + offset, len);
+ dump_regs(io->aux.base, io->aux.len);
pr_info("LCLK regs\n");
- offset = MSM_DP_CONTROLLER_LINK_OFFSET;
- len = MSM_DP_CONTROLLER_LINK_SIZE;
- dump_regs(catalog->io->dp_controller.base + offset, len);
+ dump_regs(io->link.base, io->link.len);
pr_info("P0CLK regs\n");
- offset = MSM_DP_CONTROLLER_P0_OFFSET;
- len = MSM_DP_CONTROLLER_P0_SIZE;
- dump_regs(catalog->io->dp_controller.base + offset, len);
+ dump_regs(io->p0.base, io->p0.len);
}
u32 dp_catalog_aux_get_irq(struct dp_catalog *dp_catalog)
@@ -493,8 +471,7 @@ int dp_catalog_ctrl_set_pattern(struct dp_catalog *dp_catalog,
bit = BIT(pattern - 1) << DP_MAINLINK_READY_LINK_TRAINING_SHIFT;
/* Poll for mainlink ready status */
- ret = readx_poll_timeout(readl, catalog->io->dp_controller.base +
- MSM_DP_CONTROLLER_LINK_OFFSET +
+ ret = readx_poll_timeout(readl, catalog->io->dp_controller.link.base +
REG_DP_MAINLINK_READY,
data, data & bit,
POLLING_SLEEP_US, POLLING_TIMEOUT_US);
@@ -541,8 +518,7 @@ bool dp_catalog_ctrl_mainlink_ready(struct dp_catalog *dp_catalog)
struct dp_catalog_private, dp_catalog);
/* Poll for mainlink ready status */
- ret = readl_poll_timeout(catalog->io->dp_controller.base +
- MSM_DP_CONTROLLER_LINK_OFFSET +
+ ret = readl_poll_timeout(catalog->io->dp_controller.link.base +
REG_DP_MAINLINK_READY,
data, data & DP_MAINLINK_READY_FOR_VIDEO,
POLLING_SLEEP_US, POLLING_TIMEOUT_US);
diff --git a/drivers/gpu/drm/msm/dp/dp_debug.c b/drivers/gpu/drm/msm/dp/dp_debug.c
index 2f6247e80e9d..da4323556ef3 100644
--- a/drivers/gpu/drm/msm/dp/dp_debug.c
+++ b/drivers/gpu/drm/msm/dp/dp_debug.c
@@ -24,240 +24,108 @@ struct dp_debug_private {
struct dp_usbpd *usbpd;
struct dp_link *link;
struct dp_panel *panel;
- struct drm_connector **connector;
+ struct drm_connector *connector;
struct device *dev;
struct drm_device *drm_dev;
struct dp_debug dp_debug;
};
-static int dp_debug_check_buffer_overflow(int rc, int *max_size, int *len)
-{
- if (rc >= *max_size) {
- DRM_ERROR("buffer overflow\n");
- return -EINVAL;
- }
- *len += rc;
- *max_size = SZ_4K - *len;
-
- return 0;
-}
-
-static ssize_t dp_debug_read_info(struct file *file, char __user *user_buff,
- size_t count, loff_t *ppos)
+static int dp_debug_show(struct seq_file *seq, void *p)
{
- struct dp_debug_private *debug = file->private_data;
- char *buf;
- u32 len = 0, rc = 0;
+ struct dp_debug_private *debug = seq->private;
u64 lclk = 0;
- u32 max_size = SZ_4K;
u32 link_params_rate;
- struct drm_display_mode *drm_mode;
+ const struct drm_display_mode *drm_mode;
if (!debug)
return -ENODEV;
- if (*ppos)
- return 0;
-
- buf = kzalloc(SZ_4K, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
drm_mode = &debug->panel->dp_mode.drm_mode;
- rc = snprintf(buf + len, max_size, "\tname = %s\n", DEBUG_NAME);
- if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
- goto error;
-
- rc = snprintf(buf + len, max_size,
- "\tdp_panel\n\t\tmax_pclk_khz = %d\n",
+ seq_printf(seq, "\tname = %s\n", DEBUG_NAME);
+ seq_printf(seq, "\tdp_panel\n\t\tmax_pclk_khz = %d\n",
debug->panel->max_pclk_khz);
- if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
- goto error;
-
- rc = snprintf(buf + len, max_size,
- "\tdrm_dp_link\n\t\trate = %u\n",
+ seq_printf(seq, "\tdrm_dp_link\n\t\trate = %u\n",
debug->panel->link_info.rate);
- if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
- goto error;
-
- rc = snprintf(buf + len, max_size,
- "\t\tnum_lanes = %u\n",
+ seq_printf(seq, "\t\tnum_lanes = %u\n",
debug->panel->link_info.num_lanes);
- if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
- goto error;
-
- rc = snprintf(buf + len, max_size,
- "\t\tcapabilities = %lu\n",
+ seq_printf(seq, "\t\tcapabilities = %lu\n",
debug->panel->link_info.capabilities);
- if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
- goto error;
-
- rc = snprintf(buf + len, max_size,
- "\tdp_panel_info:\n\t\tactive = %dx%d\n",
+ seq_printf(seq, "\tdp_panel_info:\n\t\tactive = %dx%d\n",
drm_mode->hdisplay,
drm_mode->vdisplay);
- if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
- goto error;
-
- rc = snprintf(buf + len, max_size,
- "\t\tback_porch = %dx%d\n",
+ seq_printf(seq, "\t\tback_porch = %dx%d\n",
drm_mode->htotal - drm_mode->hsync_end,
drm_mode->vtotal - drm_mode->vsync_end);
- if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
- goto error;
-
- rc = snprintf(buf + len, max_size,
- "\t\tfront_porch = %dx%d\n",
+ seq_printf(seq, "\t\tfront_porch = %dx%d\n",
drm_mode->hsync_start - drm_mode->hdisplay,
drm_mode->vsync_start - drm_mode->vdisplay);
- if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
- goto error;
-
- rc = snprintf(buf + len, max_size,
- "\t\tsync_width = %dx%d\n",
+ seq_printf(seq, "\t\tsync_width = %dx%d\n",
drm_mode->hsync_end - drm_mode->hsync_start,
drm_mode->vsync_end - drm_mode->vsync_start);
- if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
- goto error;
-
- rc = snprintf(buf + len, max_size,
- "\t\tactive_low = %dx%d\n",
+ seq_printf(seq, "\t\tactive_low = %dx%d\n",
debug->panel->dp_mode.h_active_low,
debug->panel->dp_mode.v_active_low);
- if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
- goto error;
-
- rc = snprintf(buf + len, max_size,
- "\t\th_skew = %d\n",
+ seq_printf(seq, "\t\th_skew = %d\n",
drm_mode->hskew);
- if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
- goto error;
-
- rc = snprintf(buf + len, max_size,
- "\t\trefresh rate = %d\n",
+ seq_printf(seq, "\t\trefresh rate = %d\n",
drm_mode_vrefresh(drm_mode));
- if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
- goto error;
-
- rc = snprintf(buf + len, max_size,
- "\t\tpixel clock khz = %d\n",
+ seq_printf(seq, "\t\tpixel clock khz = %d\n",
drm_mode->clock);
- if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
- goto error;
-
- rc = snprintf(buf + len, max_size,
- "\t\tbpp = %d\n",
+ seq_printf(seq, "\t\tbpp = %d\n",
debug->panel->dp_mode.bpp);
- if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
- goto error;
/* Link Information */
- rc = snprintf(buf + len, max_size,
- "\tdp_link:\n\t\ttest_requested = %d\n",
+ seq_printf(seq, "\tdp_link:\n\t\ttest_requested = %d\n",
debug->link->sink_request);
- if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
- goto error;
-
- rc = snprintf(buf + len, max_size,
- "\t\tnum_lanes = %d\n",
+ seq_printf(seq, "\t\tnum_lanes = %d\n",
debug->link->link_params.num_lanes);
- if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
- goto error;
-
link_params_rate = debug->link->link_params.rate;
- rc = snprintf(buf + len, max_size,
- "\t\tbw_code = %d\n",
+ seq_printf(seq, "\t\tbw_code = %d\n",
drm_dp_link_rate_to_bw_code(link_params_rate));
- if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
- goto error;
-
lclk = debug->link->link_params.rate * 1000;
- rc = snprintf(buf + len, max_size,
- "\t\tlclk = %lld\n", lclk);
- if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
- goto error;
-
- rc = snprintf(buf + len, max_size,
- "\t\tv_level = %d\n",
+ seq_printf(seq, "\t\tlclk = %lld\n", lclk);
+ seq_printf(seq, "\t\tv_level = %d\n",
debug->link->phy_params.v_level);
- if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
- goto error;
-
- rc = snprintf(buf + len, max_size,
- "\t\tp_level = %d\n",
+ seq_printf(seq, "\t\tp_level = %d\n",
debug->link->phy_params.p_level);
- if (dp_debug_check_buffer_overflow(rc, &max_size, &len))
- goto error;
-
- if (copy_to_user(user_buff, buf, len))
- goto error;
-
- *ppos += len;
- kfree(buf);
- return len;
- error:
- kfree(buf);
- return -EINVAL;
+ return 0;
}
+DEFINE_SHOW_ATTRIBUTE(dp_debug);
static int dp_test_data_show(struct seq_file *m, void *data)
{
- struct drm_device *dev;
- struct dp_debug_private *debug;
- struct drm_connector *connector;
- struct drm_connector_list_iter conn_iter;
+ const struct dp_debug_private *debug = m->private;
+ const struct drm_connector *connector = debug->connector;
u32 bpc;
- debug = m->private;
- dev = debug->drm_dev;
- drm_connector_list_iter_begin(dev, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
-
- if (connector->connector_type !=
- DRM_MODE_CONNECTOR_DisplayPort)
- continue;
-
- if (connector->status == connector_status_connected) {
- bpc = debug->link->test_video.test_bit_depth;
- seq_printf(m, "hdisplay: %d\n",
- debug->link->test_video.test_h_width);
- seq_printf(m, "vdisplay: %d\n",
- debug->link->test_video.test_v_height);
- seq_printf(m, "bpc: %u\n",
- dp_link_bit_depth_to_bpc(bpc));
- } else
- seq_puts(m, "0");
+ if (connector->status == connector_status_connected) {
+ bpc = debug->link->test_video.test_bit_depth;
+ seq_printf(m, "hdisplay: %d\n",
+ debug->link->test_video.test_h_width);
+ seq_printf(m, "vdisplay: %d\n",
+ debug->link->test_video.test_v_height);
+ seq_printf(m, "bpc: %u\n",
+ dp_link_bit_depth_to_bpc(bpc));
+ } else {
+ seq_puts(m, "0");
}
- drm_connector_list_iter_end(&conn_iter);
-
return 0;
}
DEFINE_SHOW_ATTRIBUTE(dp_test_data);
static int dp_test_type_show(struct seq_file *m, void *data)
{
- struct dp_debug_private *debug = m->private;
- struct drm_device *dev = debug->drm_dev;
- struct drm_connector *connector;
- struct drm_connector_list_iter conn_iter;
+ const struct dp_debug_private *debug = m->private;
+ const struct drm_connector *connector = debug->connector;
- drm_connector_list_iter_begin(dev, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
-
- if (connector->connector_type !=
- DRM_MODE_CONNECTOR_DisplayPort)
- continue;
-
- if (connector->status == connector_status_connected)
- seq_printf(m, "%02x", DP_TEST_LINK_VIDEO_PATTERN);
- else
- seq_puts(m, "0");
- }
- drm_connector_list_iter_end(&conn_iter);
+ if (connector->status == connector_status_connected)
+ seq_printf(m, "%02x", DP_TEST_LINK_VIDEO_PATTERN);
+ else
+ seq_puts(m, "0");
return 0;
}
@@ -269,14 +137,12 @@ static ssize_t dp_test_active_write(struct file *file,
{
char *input_buffer;
int status = 0;
- struct dp_debug_private *debug;
- struct drm_device *dev;
- struct drm_connector *connector;
- struct drm_connector_list_iter conn_iter;
+ const struct dp_debug_private *debug;
+ const struct drm_connector *connector;
int val = 0;
debug = ((struct seq_file *)file->private_data)->private;
- dev = debug->drm_dev;
+ connector = debug->connector;
if (len == 0)
return 0;
@@ -287,30 +153,22 @@ static ssize_t dp_test_active_write(struct file *file,
DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
- drm_connector_list_iter_begin(dev, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
- if (connector->connector_type !=
- DRM_MODE_CONNECTOR_DisplayPort)
- continue;
-
- if (connector->status == connector_status_connected) {
- status = kstrtoint(input_buffer, 10, &val);
- if (status < 0)
- break;
- DRM_DEBUG_DRIVER("Got %d for test active\n", val);
- /* To prevent erroneous activation of the compliance
- * testing code, only accept an actual value of 1 here
- */
- if (val == 1)
- debug->panel->video_test = true;
- else
- debug->panel->video_test = false;
+ if (connector->status == connector_status_connected) {
+ status = kstrtoint(input_buffer, 10, &val);
+ if (status < 0) {
+ kfree(input_buffer);
+ return status;
}
+ DRM_DEBUG_DRIVER("Got %d for test active\n", val);
+ /* To prevent erroneous activation of the compliance
+ * testing code, only accept an actual value of 1 here
+ */
+ if (val == 1)
+ debug->panel->video_test = true;
+ else
+ debug->panel->video_test = false;
}
- drm_connector_list_iter_end(&conn_iter);
kfree(input_buffer);
- if (status < 0)
- return status;
*offp += len;
return len;
@@ -319,25 +177,16 @@ static ssize_t dp_test_active_write(struct file *file,
static int dp_test_active_show(struct seq_file *m, void *data)
{
struct dp_debug_private *debug = m->private;
- struct drm_device *dev = debug->drm_dev;
- struct drm_connector *connector;
- struct drm_connector_list_iter conn_iter;
-
- drm_connector_list_iter_begin(dev, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
- if (connector->connector_type !=
- DRM_MODE_CONNECTOR_DisplayPort)
- continue;
-
- if (connector->status == connector_status_connected) {
- if (debug->panel->video_test)
- seq_puts(m, "1");
- else
- seq_puts(m, "0");
- } else
+ struct drm_connector *connector = debug->connector;
+
+ if (connector->status == connector_status_connected) {
+ if (debug->panel->video_test)
+ seq_puts(m, "1");
+ else
seq_puts(m, "0");
+ } else {
+ seq_puts(m, "0");
}
- drm_connector_list_iter_end(&conn_iter);
return 0;
}
@@ -349,11 +198,6 @@ static int dp_test_active_open(struct inode *inode,
inode->i_private);
}
-static const struct file_operations dp_debug_fops = {
- .open = simple_open,
- .read = dp_debug_read_info,
-};
-
static const struct file_operations test_active_fops = {
.owner = THIS_MODULE,
.open = dp_test_active_open,
@@ -391,7 +235,7 @@ static int dp_debug_init(struct dp_debug *dp_debug, struct drm_minor *minor)
struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel,
struct dp_usbpd *usbpd, struct dp_link *link,
- struct drm_connector **connector, struct drm_minor *minor)
+ struct drm_connector *connector, struct drm_minor *minor)
{
int rc = 0;
struct dp_debug_private *debug;
diff --git a/drivers/gpu/drm/msm/dp/dp_debug.h b/drivers/gpu/drm/msm/dp/dp_debug.h
index 7eaedfbb149c..8c0d0b5178fd 100644
--- a/drivers/gpu/drm/msm/dp/dp_debug.h
+++ b/drivers/gpu/drm/msm/dp/dp_debug.h
@@ -43,7 +43,7 @@ struct dp_debug {
*/
struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel,
struct dp_usbpd *usbpd, struct dp_link *link,
- struct drm_connector **connector,
+ struct drm_connector *connector,
struct drm_minor *minor);
/**
@@ -60,7 +60,7 @@ void dp_debug_put(struct dp_debug *dp_debug);
static inline
struct dp_debug *dp_debug_get(struct device *dev, struct dp_panel *panel,
struct dp_usbpd *usbpd, struct dp_link *link,
- struct drm_connector **connector, struct drm_minor *minor)
+ struct drm_connector *connector, struct drm_minor *minor)
{
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index a0392e4d8134..aba8aa47ed76 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -10,6 +10,7 @@
#include <linux/component.h>
#include <linux/of_irq.h>
#include <linux/delay.h>
+#include <drm/drm_panel.h>
#include "msm_drv.h"
#include "msm_kms.h"
@@ -27,7 +28,6 @@
#include "dp_audio.h"
#include "dp_debug.h"
-static struct msm_dp *g_dp_display;
#define HPD_STRING_SIZE 30
enum {
@@ -79,6 +79,8 @@ struct dp_display_private {
char *name;
int irq;
+ unsigned int id;
+
/* state variables */
bool core_initialized;
bool hpd_irq_on;
@@ -116,11 +118,35 @@ struct dp_display_private {
struct dp_audio *audio;
};
+struct msm_dp_desc {
+ phys_addr_t io_start;
+ unsigned int connector_type;
+};
+
+struct msm_dp_config {
+ const struct msm_dp_desc *descs;
+ size_t num_descs;
+};
+
+static const struct msm_dp_config sc7180_dp_cfg = {
+ .descs = (const struct msm_dp_desc[]) {
+ [MSM_DP_CONTROLLER_0] = { .io_start = 0x0ae90000, .connector_type = DRM_MODE_CONNECTOR_DisplayPort },
+ },
+ .num_descs = 1,
+};
+
static const struct of_device_id dp_dt_match[] = {
- {.compatible = "qcom,sc7180-dp"},
+ { .compatible = "qcom,sc7180-dp", .data = &sc7180_dp_cfg },
{}
};
+static struct dp_display_private *dev_get_dp_display_private(struct device *dev)
+{
+ struct msm_dp *dp = dev_get_drvdata(dev);
+
+ return container_of(dp, struct dp_display_private, dp_display);
+}
+
static int dp_add_event(struct dp_display_private *dp_priv, u32 event,
u32 data, u32 delay)
{
@@ -197,25 +223,24 @@ static int dp_display_bind(struct device *dev, struct device *master,
void *data)
{
int rc = 0;
- struct dp_display_private *dp;
- struct drm_device *drm;
+ struct dp_display_private *dp = dev_get_dp_display_private(dev);
struct msm_drm_private *priv;
+ struct drm_device *drm;
drm = dev_get_drvdata(master);
- dp = container_of(g_dp_display,
- struct dp_display_private, dp_display);
-
dp->dp_display.drm_dev = drm;
priv = drm->dev_private;
- priv->dp = &(dp->dp_display);
+ priv->dp[dp->id] = &dp->dp_display;
- rc = dp->parser->parse(dp->parser);
+ rc = dp->parser->parse(dp->parser, dp->dp_display.connector_type);
if (rc) {
DRM_ERROR("device tree parsing failed\n");
goto end;
}
+ dp->dp_display.panel_bridge = dp->parser->panel_bridge;
+
dp->aux->drm_dev = drm;
rc = dp_aux_register(dp->aux);
if (rc) {
@@ -240,16 +265,13 @@ end:
static void dp_display_unbind(struct device *dev, struct device *master,
void *data)
{
- struct dp_display_private *dp;
+ struct dp_display_private *dp = dev_get_dp_display_private(dev);
struct drm_device *drm = dev_get_drvdata(master);
struct msm_drm_private *priv = drm->dev_private;
- dp = container_of(g_dp_display,
- struct dp_display_private, dp_display);
-
dp_power_client_deinit(dp->power);
dp_aux_unregister(dp->aux);
- priv->dp = NULL;
+ priv->dp[dp->id] = NULL;
}
static const struct component_ops dp_display_comp_ops = {
@@ -379,38 +401,17 @@ static void dp_display_host_deinit(struct dp_display_private *dp)
static int dp_display_usbpd_configure_cb(struct device *dev)
{
- int rc = 0;
- struct dp_display_private *dp;
-
- if (!dev) {
- DRM_ERROR("invalid dev\n");
- rc = -EINVAL;
- goto end;
- }
-
- dp = container_of(g_dp_display,
- struct dp_display_private, dp_display);
+ struct dp_display_private *dp = dev_get_dp_display_private(dev);
dp_display_host_init(dp, false);
- rc = dp_display_process_hpd_high(dp);
-end:
- return rc;
+ return dp_display_process_hpd_high(dp);
}
static int dp_display_usbpd_disconnect_cb(struct device *dev)
{
int rc = 0;
- struct dp_display_private *dp;
-
- if (!dev) {
- DRM_ERROR("invalid dev\n");
- rc = -EINVAL;
- return rc;
- }
-
- dp = container_of(g_dp_display,
- struct dp_display_private, dp_display);
+ struct dp_display_private *dp = dev_get_dp_display_private(dev);
dp_add_event(dp, EV_USER_NOTIFICATION, false, 0);
@@ -472,15 +473,7 @@ static int dp_display_usbpd_attention_cb(struct device *dev)
{
int rc = 0;
u32 sink_request;
- struct dp_display_private *dp;
-
- if (!dev) {
- DRM_ERROR("invalid dev\n");
- return -EINVAL;
- }
-
- dp = container_of(g_dp_display,
- struct dp_display_private, dp_display);
+ struct dp_display_private *dp = dev_get_dp_display_private(dev);
/* check for any test request issued by sink */
rc = dp_link_process_request(dp->link);
@@ -647,7 +640,7 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
DRM_DEBUG_DP("hpd_state=%d\n", state);
/* signal the disconnect event early to ensure proper teardown */
- dp_display_handle_plugged_change(g_dp_display, false);
+ dp_display_handle_plugged_change(&dp->dp_display, false);
/* enable HDP plug interrupt to prepare for next plugin */
dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK, true);
@@ -834,7 +827,7 @@ static int dp_display_set_mode(struct msm_dp *dp_display,
return 0;
}
-static int dp_display_prepare(struct msm_dp *dp)
+static int dp_display_prepare(struct msm_dp *dp_display)
{
return 0;
}
@@ -842,9 +835,7 @@ static int dp_display_prepare(struct msm_dp *dp)
static int dp_display_enable(struct dp_display_private *dp, u32 data)
{
int rc = 0;
- struct msm_dp *dp_display;
-
- dp_display = g_dp_display;
+ struct msm_dp *dp_display = &dp->dp_display;
DRM_DEBUG_DP("sink_count=%d\n", dp->link->sink_count);
if (dp_display->power_on) {
@@ -880,9 +871,7 @@ static int dp_display_post_enable(struct msm_dp *dp_display)
static int dp_display_disable(struct dp_display_private *dp, u32 data)
{
- struct msm_dp *dp_display;
-
- dp_display = g_dp_display;
+ struct msm_dp *dp_display = &dp->dp_display;
if (!dp_display->power_on)
return 0;
@@ -912,7 +901,7 @@ static int dp_display_disable(struct dp_display_private *dp, u32 data)
return 0;
}
-static int dp_display_unprepare(struct msm_dp *dp)
+static int dp_display_unprepare(struct msm_dp *dp_display)
{
return 0;
}
@@ -1213,10 +1202,33 @@ int dp_display_request_irq(struct msm_dp *dp_display)
return 0;
}
+static const struct msm_dp_desc *dp_display_get_desc(struct platform_device *pdev,
+ unsigned int *id)
+{
+ const struct msm_dp_config *cfg = of_device_get_match_data(&pdev->dev);
+ struct resource *res;
+ int i;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return NULL;
+
+ for (i = 0; i < cfg->num_descs; i++) {
+ if (cfg->descs[i].io_start == res->start) {
+ *id = i;
+ return &cfg->descs[i];
+ }
+ }
+
+ dev_err(&pdev->dev, "unknown displayport instance\n");
+ return NULL;
+}
+
static int dp_display_probe(struct platform_device *pdev)
{
int rc = 0;
struct dp_display_private *dp;
+ const struct msm_dp_desc *desc;
if (!pdev || !pdev->dev.of_node) {
DRM_ERROR("pdev not found\n");
@@ -1227,8 +1239,13 @@ static int dp_display_probe(struct platform_device *pdev)
if (!dp)
return -ENOMEM;
+ desc = dp_display_get_desc(pdev, &dp->id);
+ if (!desc)
+ return -EINVAL;
+
dp->pdev = pdev;
dp->name = "drm_dp";
+ dp->dp_display.connector_type = desc->connector_type;
rc = dp_init_sub_modules(dp);
if (rc) {
@@ -1237,14 +1254,13 @@ static int dp_display_probe(struct platform_device *pdev)
}
mutex_init(&dp->event_mutex);
- g_dp_display = &dp->dp_display;
/* Store DP audio handle inside DP display */
- g_dp_display->dp_audio = dp->audio;
+ dp->dp_display.dp_audio = dp->audio;
init_completion(&dp->audio_comp);
- platform_set_drvdata(pdev, g_dp_display);
+ platform_set_drvdata(pdev, &dp->dp_display);
rc = component_add(&pdev->dev, &dp_display_comp_ops);
if (rc) {
@@ -1257,10 +1273,7 @@ static int dp_display_probe(struct platform_device *pdev)
static int dp_display_remove(struct platform_device *pdev)
{
- struct dp_display_private *dp;
-
- dp = container_of(g_dp_display,
- struct dp_display_private, dp_display);
+ struct dp_display_private *dp = dev_get_dp_display_private(&pdev->dev);
dp_display_deinit_sub_modules(dp);
@@ -1315,7 +1328,7 @@ static int dp_pm_resume(struct device *dev)
dp->dp_display.is_connected = true;
} else {
dp->dp_display.is_connected = false;
- dp_display_handle_plugged_change(g_dp_display, false);
+ dp_display_handle_plugged_change(dp_display, false);
}
DRM_DEBUG_DP("After, sink_count=%d is_connected=%d core_inited=%d power_on=%d\n",
@@ -1429,7 +1442,7 @@ void msm_dp_debugfs_init(struct msm_dp *dp_display, struct drm_minor *minor)
dev = &dp->pdev->dev;
dp->debug = dp_debug_get(dev, dp->panel, dp->usbpd,
- dp->link, &dp->dp_display.connector,
+ dp->link, dp->dp_display.connector,
minor);
if (IS_ERR(dp->debug)) {
rc = PTR_ERR(dp->debug);
diff --git a/drivers/gpu/drm/msm/dp/dp_display.h b/drivers/gpu/drm/msm/dp/dp_display.h
index 8b47cdabb67e..8e80e3bac394 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.h
+++ b/drivers/gpu/drm/msm/dp/dp_display.h
@@ -15,9 +15,11 @@ struct msm_dp {
struct device *codec_dev;
struct drm_connector *connector;
struct drm_encoder *encoder;
+ struct drm_bridge *panel_bridge;
bool is_connected;
bool audio_enabled;
bool power_on;
+ unsigned int connector_type;
hdmi_codec_plugged_cb plugged_cb;
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c
index 764f4b81017e..76856c4ee1d6 100644
--- a/drivers/gpu/drm/msm/dp/dp_drm.c
+++ b/drivers/gpu/drm/msm/dp/dp_drm.c
@@ -5,6 +5,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_crtc.h>
#include "msm_drv.h"
@@ -147,7 +148,7 @@ struct drm_connector *dp_drm_connector_init(struct msm_dp *dp_display)
ret = drm_connector_init(dp_display->drm_dev, connector,
&dp_connector_funcs,
- DRM_MODE_CONNECTOR_DisplayPort);
+ dp_display->connector_type);
if (ret)
return ERR_PTR(ret);
@@ -160,5 +161,15 @@ struct drm_connector *dp_drm_connector_init(struct msm_dp *dp_display)
drm_connector_attach_encoder(connector, dp_display->encoder);
+ if (dp_display->panel_bridge) {
+ ret = drm_bridge_attach(dp_display->encoder,
+ dp_display->panel_bridge, NULL,
+ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (ret < 0) {
+ DRM_ERROR("failed to attach panel bridge: %d\n", ret);
+ return ERR_PTR(ret);
+ }
+ }
+
return connector;
}
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
index 2181b60e1d1d..71db10c0f262 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.c
+++ b/drivers/gpu/drm/msm/dp/dp_panel.c
@@ -234,7 +234,7 @@ u32 dp_panel_get_mode_bpp(struct dp_panel *dp_panel,
u32 mode_edid_bpp, u32 mode_pclk_khz)
{
struct dp_panel_private *panel;
- u32 bpp = mode_edid_bpp;
+ u32 bpp;
if (!dp_panel || !mode_edid_bpp || !mode_pclk_khz) {
DRM_ERROR("invalid input\n");
diff --git a/drivers/gpu/drm/msm/dp/dp_parser.c b/drivers/gpu/drm/msm/dp/dp_parser.c
index 0519dd3ac3c3..a7acc23f742b 100644
--- a/drivers/gpu/drm/msm/dp/dp_parser.c
+++ b/drivers/gpu/drm/msm/dp/dp_parser.c
@@ -6,11 +6,22 @@
#include <linux/of_gpio.h>
#include <linux/phy/phy.h>
+#include <drm/drm_of.h>
#include <drm/drm_print.h>
+#include <drm/drm_bridge.h>
#include "dp_parser.h"
#include "dp_reg.h"
+#define DP_DEFAULT_AHB_OFFSET 0x0000
+#define DP_DEFAULT_AHB_SIZE 0x0200
+#define DP_DEFAULT_AUX_OFFSET 0x0200
+#define DP_DEFAULT_AUX_SIZE 0x0200
+#define DP_DEFAULT_LINK_OFFSET 0x0400
+#define DP_DEFAULT_LINK_SIZE 0x0C00
+#define DP_DEFAULT_P0_OFFSET 0x1000
+#define DP_DEFAULT_P0_SIZE 0x0400
+
static const struct dp_regulator_cfg sdm845_dp_reg_cfg = {
.num = 2,
.regs = {
@@ -19,67 +30,73 @@ static const struct dp_regulator_cfg sdm845_dp_reg_cfg = {
},
};
-static int msm_dss_ioremap(struct platform_device *pdev,
- struct dss_io_data *io_data)
-{
- struct resource *res = NULL;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- DRM_ERROR("%pS->%s: msm_dss_get_res failed\n",
- __builtin_return_address(0), __func__);
- return -ENODEV;
- }
-
- io_data->len = (u32)resource_size(res);
- io_data->base = ioremap(res->start, io_data->len);
- if (!io_data->base) {
- DRM_ERROR("%pS->%s: ioremap failed\n",
- __builtin_return_address(0), __func__);
- return -EIO;
- }
-
- return 0;
-}
-
-static void msm_dss_iounmap(struct dss_io_data *io_data)
+static void __iomem *dp_ioremap(struct platform_device *pdev, int idx, size_t *len)
{
- if (io_data->base) {
- iounmap(io_data->base);
- io_data->base = NULL;
- }
- io_data->len = 0;
-}
+ struct resource *res;
+ void __iomem *base;
-static void dp_parser_unmap_io_resources(struct dp_parser *parser)
-{
- struct dp_io *io = &parser->io;
+ base = devm_platform_get_and_ioremap_resource(pdev, idx, &res);
+ if (!IS_ERR(base))
+ *len = resource_size(res);
- msm_dss_iounmap(&io->dp_controller);
+ return base;
}
static int dp_parser_ctrl_res(struct dp_parser *parser)
{
- int rc = 0;
struct platform_device *pdev = parser->pdev;
struct dp_io *io = &parser->io;
+ struct dss_io_data *dss = &io->dp_controller;
+
+ dss->ahb.base = dp_ioremap(pdev, 0, &dss->ahb.len);
+ if (IS_ERR(dss->ahb.base))
+ return PTR_ERR(dss->ahb.base);
+
+ dss->aux.base = dp_ioremap(pdev, 1, &dss->aux.len);
+ if (IS_ERR(dss->aux.base)) {
+ /*
+ * The initial binding had a single reg, but in order to
+ * support variation in the sub-region sizes this was split.
+ * dp_ioremap() will fail with -EINVAL here if only a single
+ * reg is specified, so fill in the sub-region offsets and
+ * lengths based on this single region.
+ */
+ if (PTR_ERR(dss->aux.base) == -EINVAL) {
+ if (dss->ahb.len < DP_DEFAULT_P0_OFFSET + DP_DEFAULT_P0_SIZE) {
+ DRM_ERROR("legacy memory region not large enough\n");
+ return -EINVAL;
+ }
+
+ dss->ahb.len = DP_DEFAULT_AHB_SIZE;
+ dss->aux.base = dss->ahb.base + DP_DEFAULT_AUX_OFFSET;
+ dss->aux.len = DP_DEFAULT_AUX_SIZE;
+ dss->link.base = dss->ahb.base + DP_DEFAULT_LINK_OFFSET;
+ dss->link.len = DP_DEFAULT_LINK_SIZE;
+ dss->p0.base = dss->ahb.base + DP_DEFAULT_P0_OFFSET;
+ dss->p0.len = DP_DEFAULT_P0_SIZE;
+ } else {
+ DRM_ERROR("unable to remap aux region: %pe\n", dss->aux.base);
+ return PTR_ERR(dss->aux.base);
+ }
+ } else {
+ dss->link.base = dp_ioremap(pdev, 2, &dss->link.len);
+ if (IS_ERR(dss->link.base)) {
+ DRM_ERROR("unable to remap link region: %pe\n", dss->link.base);
+ return PTR_ERR(dss->link.base);
+ }
- rc = msm_dss_ioremap(pdev, &io->dp_controller);
- if (rc) {
- DRM_ERROR("unable to remap dp io resources, rc=%d\n", rc);
- goto err;
+ dss->p0.base = dp_ioremap(pdev, 3, &dss->p0.len);
+ if (IS_ERR(dss->p0.base)) {
+ DRM_ERROR("unable to remap p0 region: %pe\n", dss->p0.base);
+ return PTR_ERR(dss->p0.base);
+ }
}
io->phy = devm_phy_get(&pdev->dev, "dp");
- if (IS_ERR(io->phy)) {
- rc = PTR_ERR(io->phy);
- goto err;
- }
+ if (IS_ERR(io->phy))
+ return PTR_ERR(io->phy);
return 0;
-err:
- dp_parser_unmap_io_resources(parser);
- return rc;
}
static int dp_parser_misc(struct dp_parser *parser)
@@ -248,7 +265,28 @@ static int dp_parser_clock(struct dp_parser *parser)
return 0;
}
-static int dp_parser_parse(struct dp_parser *parser)
+static int dp_parser_find_panel(struct dp_parser *parser)
+{
+ struct device *dev = &parser->pdev->dev;
+ struct drm_panel *panel;
+ int rc;
+
+ rc = drm_of_find_panel_or_bridge(dev->of_node, 1, 0, &panel, NULL);
+ if (rc) {
+ DRM_ERROR("failed to acquire DRM panel: %d\n", rc);
+ return rc;
+ }
+
+ parser->panel_bridge = devm_drm_panel_bridge_add(dev, panel);
+ if (IS_ERR(parser->panel_bridge)) {
+ DRM_ERROR("failed to create panel bridge\n");
+ return PTR_ERR(parser->panel_bridge);
+ }
+
+ return 0;
+}
+
+static int dp_parser_parse(struct dp_parser *parser, int connector_type)
{
int rc = 0;
@@ -269,6 +307,12 @@ static int dp_parser_parse(struct dp_parser *parser)
if (rc)
return rc;
+ if (connector_type == DRM_MODE_CONNECTOR_eDP) {
+ rc = dp_parser_find_panel(parser);
+ if (rc)
+ return rc;
+ }
+
/* Map the corresponding regulator information according to
* version. Currently, since we only have one supported platform,
* mapping the regulator directly.
diff --git a/drivers/gpu/drm/msm/dp/dp_parser.h b/drivers/gpu/drm/msm/dp/dp_parser.h
index 34b49628bbaf..3172da089421 100644
--- a/drivers/gpu/drm/msm/dp/dp_parser.h
+++ b/drivers/gpu/drm/msm/dp/dp_parser.h
@@ -25,11 +25,18 @@ enum dp_pm_type {
DP_MAX_PM
};
-struct dss_io_data {
- u32 len;
+struct dss_io_region {
+ size_t len;
void __iomem *base;
};
+struct dss_io_data {
+ struct dss_io_region ahb;
+ struct dss_io_region aux;
+ struct dss_io_region link;
+ struct dss_io_region p0;
+};
+
static inline const char *dp_parser_pm_name(enum dp_pm_type module)
{
switch (module) {
@@ -116,8 +123,9 @@ struct dp_parser {
struct dp_display_data disp_data;
const struct dp_regulator_cfg *regulator_cfg;
u32 max_dp_lanes;
+ struct drm_bridge *panel_bridge;
- int (*parse)(struct dp_parser *parser);
+ int (*parse)(struct dp_parser *parser, int connector_type);
};
/**
diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h
index b50db91cb8a7..569c8ff062ba 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.h
@@ -107,6 +107,8 @@ void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host,
u32 dma_base, u32 len);
int msm_dsi_host_enable(struct mipi_dsi_host *host);
int msm_dsi_host_disable(struct mipi_dsi_host *host);
+void msm_dsi_host_enable_irq(struct mipi_dsi_host *host);
+void msm_dsi_host_disable_irq(struct mipi_dsi_host *host);
int msm_dsi_host_power_on(struct mipi_dsi_host *host,
struct msm_dsi_phy_shared_timings *phy_shared_timings,
bool is_bonded_dsi, struct msm_dsi_phy *phy);
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index c86b5090fae6..f69a125f9559 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -106,7 +106,8 @@ struct msm_dsi_host {
phys_addr_t ctrl_size;
struct regulator_bulk_data supplies[DSI_DEV_REGULATOR_MAX];
- struct clk *bus_clks[DSI_BUS_CLK_MAX];
+ int num_bus_clks;
+ struct clk_bulk_data bus_clks[DSI_BUS_CLK_MAX];
struct clk *byte_clk;
struct clk *esc_clk;
@@ -115,16 +116,16 @@ struct msm_dsi_host {
struct clk *pixel_clk_src;
struct clk *byte_intf_clk;
- u32 byte_clk_rate;
- u32 pixel_clk_rate;
- u32 esc_clk_rate;
+ unsigned long byte_clk_rate;
+ unsigned long pixel_clk_rate;
+ unsigned long esc_clk_rate;
/* DSI v2 specific clocks */
struct clk *src_clk;
struct clk *esc_clk_src;
struct clk *dsi_clk_src;
- u32 src_clk_rate;
+ unsigned long src_clk_rate;
struct gpio_desc *disp_en_gpio;
struct gpio_desc *te_gpio;
@@ -374,15 +375,14 @@ static int dsi_clk_init(struct msm_dsi_host *msm_host)
int i, ret = 0;
/* get bus clocks */
- for (i = 0; i < cfg->num_bus_clks; i++) {
- msm_host->bus_clks[i] = msm_clk_get(pdev,
- cfg->bus_clk_names[i]);
- if (IS_ERR(msm_host->bus_clks[i])) {
- ret = PTR_ERR(msm_host->bus_clks[i]);
- pr_err("%s: Unable to get %s clock, ret = %d\n",
- __func__, cfg->bus_clk_names[i], ret);
- goto exit;
- }
+ for (i = 0; i < cfg->num_bus_clks; i++)
+ msm_host->bus_clks[i].id = cfg->bus_clk_names[i];
+ msm_host->num_bus_clks = cfg->num_bus_clks;
+
+ ret = devm_clk_bulk_get(&pdev->dev, msm_host->num_bus_clks, msm_host->bus_clks);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Unable to get clocks, ret = %d\n", ret);
+ goto exit;
}
/* get link and source clocks */
@@ -433,41 +433,6 @@ exit:
return ret;
}
-static int dsi_bus_clk_enable(struct msm_dsi_host *msm_host)
-{
- const struct msm_dsi_config *cfg = msm_host->cfg_hnd->cfg;
- int i, ret;
-
- DBG("id=%d", msm_host->id);
-
- for (i = 0; i < cfg->num_bus_clks; i++) {
- ret = clk_prepare_enable(msm_host->bus_clks[i]);
- if (ret) {
- pr_err("%s: failed to enable bus clock %d ret %d\n",
- __func__, i, ret);
- goto err;
- }
- }
-
- return 0;
-err:
- while (--i >= 0)
- clk_disable_unprepare(msm_host->bus_clks[i]);
-
- return ret;
-}
-
-static void dsi_bus_clk_disable(struct msm_dsi_host *msm_host)
-{
- const struct msm_dsi_config *cfg = msm_host->cfg_hnd->cfg;
- int i;
-
- DBG("");
-
- for (i = cfg->num_bus_clks - 1; i >= 0; i--)
- clk_disable_unprepare(msm_host->bus_clks[i]);
-}
-
int msm_dsi_runtime_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -478,7 +443,7 @@ int msm_dsi_runtime_suspend(struct device *dev)
if (!msm_host->cfg_hnd)
return 0;
- dsi_bus_clk_disable(msm_host);
+ clk_bulk_disable_unprepare(msm_host->num_bus_clks, msm_host->bus_clks);
return 0;
}
@@ -493,15 +458,15 @@ int msm_dsi_runtime_resume(struct device *dev)
if (!msm_host->cfg_hnd)
return 0;
- return dsi_bus_clk_enable(msm_host);
+ return clk_bulk_prepare_enable(msm_host->num_bus_clks, msm_host->bus_clks);
}
int dsi_link_clk_set_rate_6g(struct msm_dsi_host *msm_host)
{
- u32 byte_intf_rate;
+ unsigned long byte_intf_rate;
int ret;
- DBG("Set clk rates: pclk=%d, byteclk=%d",
+ DBG("Set clk rates: pclk=%d, byteclk=%lu",
msm_host->mode->clock, msm_host->byte_clk_rate);
ret = dev_pm_opp_set_rate(&msm_host->pdev->dev,
@@ -558,13 +523,11 @@ int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host)
goto pixel_clk_err;
}
- if (msm_host->byte_intf_clk) {
- ret = clk_prepare_enable(msm_host->byte_intf_clk);
- if (ret) {
- pr_err("%s: Failed to enable byte intf clk\n",
- __func__);
- goto byte_intf_clk_err;
- }
+ ret = clk_prepare_enable(msm_host->byte_intf_clk);
+ if (ret) {
+ pr_err("%s: Failed to enable byte intf clk\n",
+ __func__);
+ goto byte_intf_clk_err;
}
return 0;
@@ -583,7 +546,7 @@ int dsi_link_clk_set_rate_v2(struct msm_dsi_host *msm_host)
{
int ret;
- DBG("Set clk rates: pclk=%d, byteclk=%d, esc_clk=%d, dsi_src_clk=%d",
+ DBG("Set clk rates: pclk=%d, byteclk=%lu, esc_clk=%lu, dsi_src_clk=%lu",
msm_host->mode->clock, msm_host->byte_clk_rate,
msm_host->esc_clk_rate, msm_host->src_clk_rate);
@@ -660,8 +623,7 @@ void dsi_link_clk_disable_6g(struct msm_dsi_host *msm_host)
dev_pm_opp_set_rate(&msm_host->pdev->dev, 0);
clk_disable_unprepare(msm_host->esc_clk);
clk_disable_unprepare(msm_host->pixel_clk);
- if (msm_host->byte_intf_clk)
- clk_disable_unprepare(msm_host->byte_intf_clk);
+ clk_disable_unprepare(msm_host->byte_intf_clk);
clk_disable_unprepare(msm_host->byte_clk);
}
@@ -673,10 +635,10 @@ void dsi_link_clk_disable_v2(struct msm_dsi_host *msm_host)
clk_disable_unprepare(msm_host->byte_clk);
}
-static u32 dsi_get_pclk_rate(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
+static unsigned long dsi_get_pclk_rate(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
{
struct drm_display_mode *mode = msm_host->mode;
- u32 pclk_rate;
+ unsigned long pclk_rate;
pclk_rate = mode->clock * 1000;
@@ -696,7 +658,7 @@ static void dsi_calc_pclk(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
{
u8 lanes = msm_host->lanes;
u32 bpp = dsi_get_bpp(msm_host->format);
- u32 pclk_rate = dsi_get_pclk_rate(msm_host, is_bonded_dsi);
+ unsigned long pclk_rate = dsi_get_pclk_rate(msm_host, is_bonded_dsi);
u64 pclk_bpp = (u64)pclk_rate * bpp;
if (lanes == 0) {
@@ -713,7 +675,7 @@ static void dsi_calc_pclk(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
msm_host->pixel_clk_rate = pclk_rate;
msm_host->byte_clk_rate = pclk_bpp;
- DBG("pclk=%d, bclk=%d", msm_host->pixel_clk_rate,
+ DBG("pclk=%lu, bclk=%lu", msm_host->pixel_clk_rate,
msm_host->byte_clk_rate);
}
@@ -772,7 +734,7 @@ int dsi_calc_clk_rate_v2(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
msm_host->esc_clk_rate = msm_host->byte_clk_rate / esc_div;
- DBG("esc=%d, src=%d", msm_host->esc_clk_rate,
+ DBG("esc=%lu, src=%lu", msm_host->esc_clk_rate,
msm_host->src_clk_rate);
return 0;
@@ -1898,6 +1860,23 @@ int msm_dsi_host_init(struct msm_dsi *msm_dsi)
return ret;
}
+ msm_host->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+ if (msm_host->irq < 0) {
+ ret = msm_host->irq;
+ dev_err(&pdev->dev, "failed to get irq: %d\n", ret);
+ return ret;
+ }
+
+ /* do not autoenable, will be enabled later */
+ ret = devm_request_irq(&pdev->dev, msm_host->irq, dsi_host_irq,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT | IRQF_NO_AUTOEN,
+ "dsi_isr", msm_host);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to request IRQ%u: %d\n",
+ msm_host->irq, ret);
+ return ret;
+ }
+
init_completion(&msm_host->dma_comp);
init_completion(&msm_host->video_comp);
mutex_init(&msm_host->dev_mutex);
@@ -1925,7 +1904,6 @@ void msm_dsi_host_destroy(struct mipi_dsi_host *host)
DBG("");
dsi_tx_buf_free(msm_host);
if (msm_host->workqueue) {
- flush_workqueue(msm_host->workqueue);
destroy_workqueue(msm_host->workqueue);
msm_host->workqueue = NULL;
}
@@ -1941,25 +1919,8 @@ int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
- struct platform_device *pdev = msm_host->pdev;
int ret;
- msm_host->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
- if (msm_host->irq < 0) {
- ret = msm_host->irq;
- DRM_DEV_ERROR(dev->dev, "failed to get irq: %d\n", ret);
- return ret;
- }
-
- ret = devm_request_irq(&pdev->dev, msm_host->irq,
- dsi_host_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
- "dsi_isr", msm_host);
- if (ret < 0) {
- DRM_DEV_ERROR(&pdev->dev, "failed to request IRQ%u: %d\n",
- msm_host->irq, ret);
- return ret;
- }
-
msm_host->dev = dev;
ret = cfg_hnd->ops->tx_buf_alloc(msm_host, SZ_4K);
if (ret) {
@@ -2315,6 +2276,20 @@ void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host,
clk_req->escclk_rate = msm_host->esc_clk_rate;
}
+void msm_dsi_host_enable_irq(struct mipi_dsi_host *host)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+ enable_irq(msm_host->irq);
+}
+
+void msm_dsi_host_disable_irq(struct mipi_dsi_host *host)
+{
+ struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+
+ disable_irq(msm_host->irq);
+}
+
int msm_dsi_host_enable(struct mipi_dsi_host *host)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index c41d39f5b7cf..20c4d650fd80 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -3,6 +3,8 @@
* Copyright (c) 2015, The Linux Foundation. All rights reserved.
*/
+#include "drm/drm_bridge_connector.h"
+
#include "msm_kms.h"
#include "dsi.h"
@@ -377,6 +379,14 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
}
}
+ /*
+ * Enable before preparing the panel, disable after unpreparing, so
+ * that the panel can communicate over the DSI link.
+ */
+ msm_dsi_host_enable_irq(host);
+ if (is_bonded_dsi && msm_dsi1)
+ msm_dsi_host_enable_irq(msm_dsi1->host);
+
/* Always call panel functions once, because even for dual panels,
* there is only one drm_panel instance.
*/
@@ -411,6 +421,10 @@ host_en_fail:
if (panel)
drm_panel_unprepare(panel);
panel_prep_fail:
+ msm_dsi_host_disable_irq(host);
+ if (is_bonded_dsi && msm_dsi1)
+ msm_dsi_host_disable_irq(msm_dsi1->host);
+
if (is_bonded_dsi && msm_dsi1)
msm_dsi_host_power_off(msm_dsi1->host);
host1_on_fail:
@@ -523,6 +537,10 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
id, ret);
}
+ msm_dsi_host_disable_irq(host);
+ if (is_bonded_dsi && msm_dsi1)
+ msm_dsi_host_disable_irq(msm_dsi1->host);
+
/* Save PHY status if it is a clock source */
msm_dsi_phy_pll_save_state(msm_dsi->phy);
@@ -688,10 +706,10 @@ struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id)
{
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct drm_device *dev = msm_dsi->dev;
+ struct drm_connector *connector;
struct drm_encoder *encoder;
struct drm_bridge *int_bridge, *ext_bridge;
- struct drm_connector *connector;
- struct list_head *connector_list;
+ int ret;
int_bridge = msm_dsi->bridge;
ext_bridge = msm_dsi->external_bridge =
@@ -699,22 +717,44 @@ struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id)
encoder = msm_dsi->encoder;
- /* link the internal dsi bridge to the external bridge */
- drm_bridge_attach(encoder, ext_bridge, int_bridge, 0);
-
/*
- * we need the drm_connector created by the external bridge
- * driver (or someone else) to feed it to our driver's
- * priv->connector[] list, mainly for msm_fbdev_init()
+ * Try first to create the bridge without it creating its own
+ * connector.. currently some bridges support this, and others
+ * do not (and some support both modes)
*/
- connector_list = &dev->mode_config.connector_list;
+ ret = drm_bridge_attach(encoder, ext_bridge, int_bridge,
+ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (ret == -EINVAL) {
+ struct drm_connector *connector;
+ struct list_head *connector_list;
+
+ /* link the internal dsi bridge to the external bridge */
+ drm_bridge_attach(encoder, ext_bridge, int_bridge, 0);
+
+ /*
+ * we need the drm_connector created by the external bridge
+ * driver (or someone else) to feed it to our driver's
+ * priv->connector[] list, mainly for msm_fbdev_init()
+ */
+ connector_list = &dev->mode_config.connector_list;
+
+ list_for_each_entry(connector, connector_list, head) {
+ if (drm_connector_has_possible_encoder(connector, encoder))
+ return connector;
+ }
+
+ return ERR_PTR(-ENODEV);
+ }
- list_for_each_entry(connector, connector_list, head) {
- if (drm_connector_has_possible_encoder(connector, encoder))
- return connector;
+ connector = drm_bridge_connector_init(dev, encoder);
+ if (IS_ERR(connector)) {
+ DRM_ERROR("Unable to create bridge connector\n");
+ return ERR_CAST(connector);
}
- return ERR_PTR(-ENODEV);
+ drm_connector_attach_encoder(connector, encoder);
+
+ return connector;
}
void msm_dsi_manager_bridge_destroy(struct drm_bridge *bridge)
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index 8c65ef6968ca..9842e04b5858 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -627,6 +627,8 @@ static const struct of_device_id dsi_phy_dt_match[] = {
.data = &dsi_phy_14nm_cfgs },
{ .compatible = "qcom,dsi-phy-14nm-660",
.data = &dsi_phy_14nm_660_cfgs },
+ { .compatible = "qcom,dsi-phy-14nm-8953",
+ .data = &dsi_phy_14nm_8953_cfgs },
#endif
#ifdef CONFIG_DRM_MSM_DSI_10NM_PHY
{ .compatible = "qcom,dsi-phy-10nm",
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
index b91303ada74f..4c8257581bfc 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.h
@@ -48,6 +48,7 @@ extern const struct msm_dsi_phy_cfg dsi_phy_20nm_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_28nm_8960_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_14nm_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_14nm_660_cfgs;
+extern const struct msm_dsi_phy_cfg dsi_phy_14nm_8953_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_10nm_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_10nm_8998_cfgs;
extern const struct msm_dsi_phy_cfg dsi_phy_7nm_cfgs;
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
index 5b4e991f220d..7414966f198e 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_14nm.c
@@ -213,9 +213,7 @@ static void pll_14nm_dec_frac_calc(struct dsi_pll_14nm *pll, struct dsi_pll_conf
DBG("vco_clk_rate=%lld ref_clk_rate=%lld", vco_clk_rate, fref);
dec_start_multiple = div_u64(vco_clk_rate * multiplier, fref);
- div_u64_rem(dec_start_multiple, multiplier, &div_frac_start);
-
- dec_start = div_u64(dec_start_multiple, multiplier);
+ dec_start = div_u64_rem(dec_start_multiple, multiplier, &div_frac_start);
pconf->dec_start = (u32)dec_start;
pconf->div_frac_start = div_frac_start;
@@ -1065,3 +1063,24 @@ const struct msm_dsi_phy_cfg dsi_phy_14nm_660_cfgs = {
.io_start = { 0xc994400, 0xc996000 },
.num_dsi_phy = 2,
};
+
+const struct msm_dsi_phy_cfg dsi_phy_14nm_8953_cfgs = {
+ .has_phy_lane = true,
+ .reg_cfg = {
+ .num = 1,
+ .regs = {
+ {"vcca", 17000, 32},
+ },
+ },
+ .ops = {
+ .enable = dsi_14nm_phy_enable,
+ .disable = dsi_14nm_phy_disable,
+ .pll_init = dsi_pll_14nm_init,
+ .save_pll_state = dsi_14nm_pll_save_state,
+ .restore_pll_state = dsi_14nm_pll_restore_state,
+ },
+ .min_pll_rate = VCO_MIN_RATE,
+ .max_pll_rate = VCO_MAX_RATE,
+ .io_start = { 0x1a94400, 0x1a96400 },
+ .num_dsi_phy = 2,
+};
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
index cb297b08458e..079613d2aaa9 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
@@ -114,9 +114,7 @@ static void dsi_pll_calc_dec_frac(struct dsi_pll_7nm *pll, struct dsi_pll_config
multiplier = 1 << FRAC_BITS;
dec_multiple = div_u64(pll_freq * multiplier, divider);
- div_u64_rem(dec_multiple, multiplier, &frac);
-
- dec = div_u64(dec_multiple, multiplier);
+ dec = div_u64_rem(dec_multiple, multiplier, &frac);
if (!(pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_1))
config->pll_clock_inverters = 0x28;
diff --git a/drivers/gpu/drm/msm/edp/edp_ctrl.c b/drivers/gpu/drm/msm/edp/edp_ctrl.c
index fe1366b4c49f..a68a4a1867c1 100644
--- a/drivers/gpu/drm/msm/edp/edp_ctrl.c
+++ b/drivers/gpu/drm/msm/edp/edp_ctrl.c
@@ -1190,7 +1190,6 @@ void msm_edp_ctrl_destroy(struct edp_ctrl *ctrl)
return;
if (ctrl->workqueue) {
- flush_workqueue(ctrl->workqueue);
destroy_workqueue(ctrl->workqueue);
ctrl->workqueue = NULL;
}
@@ -1243,8 +1242,6 @@ bool msm_edp_ctrl_panel_connected(struct edp_ctrl *ctrl)
int msm_edp_ctrl_get_panel_info(struct edp_ctrl *ctrl,
struct drm_connector *connector, struct edid **edid)
{
- int ret = 0;
-
mutex_lock(&ctrl->dev_mutex);
if (ctrl->edid) {
@@ -1279,7 +1276,7 @@ disable_ret:
}
unlock_ret:
mutex_unlock(&ctrl->dev_mutex);
- return ret;
+ return 0;
}
int msm_edp_ctrl_timing_cfg(struct edp_ctrl *ctrl,
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 737453b6e596..75b64e6ae035 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -61,10 +61,8 @@ static void msm_hdmi_destroy(struct hdmi *hdmi)
* at this point, hpd has been disabled,
* after flush workq, it's safe to deinit hdcp
*/
- if (hdmi->workq) {
- flush_workqueue(hdmi->workq);
+ if (hdmi->workq)
destroy_workqueue(hdmi->workq);
- }
msm_hdmi_hdcp_destroy(hdmi);
if (hdmi->phy_dev) {
@@ -154,19 +152,13 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
ret = -ENOMEM;
goto fail;
}
- for (i = 0; i < config->hpd_reg_cnt; i++) {
- struct regulator *reg;
-
- reg = devm_regulator_get(&pdev->dev,
- config->hpd_reg_names[i]);
- if (IS_ERR(reg)) {
- ret = PTR_ERR(reg);
- DRM_DEV_ERROR(&pdev->dev, "failed to get hpd regulator: %s (%d)\n",
- config->hpd_reg_names[i], ret);
- goto fail;
- }
+ for (i = 0; i < config->hpd_reg_cnt; i++)
+ hdmi->hpd_regs[i].supply = config->hpd_reg_names[i];
- hdmi->hpd_regs[i] = reg;
+ ret = devm_regulator_bulk_get(&pdev->dev, config->hpd_reg_cnt, hdmi->hpd_regs);
+ if (ret) {
+ DRM_DEV_ERROR(&pdev->dev, "failed to get hpd regulator: %d\n", ret);
+ goto fail;
}
hdmi->pwr_regs = devm_kcalloc(&pdev->dev,
@@ -177,19 +169,11 @@ static struct hdmi *msm_hdmi_init(struct platform_device *pdev)
ret = -ENOMEM;
goto fail;
}
- for (i = 0; i < config->pwr_reg_cnt; i++) {
- struct regulator *reg;
-
- reg = devm_regulator_get(&pdev->dev,
- config->pwr_reg_names[i]);
- if (IS_ERR(reg)) {
- ret = PTR_ERR(reg);
- DRM_DEV_ERROR(&pdev->dev, "failed to get pwr regulator: %s (%d)\n",
- config->pwr_reg_names[i], ret);
- goto fail;
- }
- hdmi->pwr_regs[i] = reg;
+ ret = devm_regulator_bulk_get(&pdev->dev, config->pwr_reg_cnt, hdmi->pwr_regs);
+ if (ret) {
+ DRM_DEV_ERROR(&pdev->dev, "failed to get pwr regulator: %d\n", ret);
+ goto fail;
}
hdmi->hpd_clks = devm_kcalloc(&pdev->dev,
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index d0b84f0abee1..82261078c6b1 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -56,8 +56,8 @@ struct hdmi {
void __iomem *qfprom_mmio;
phys_addr_t mmio_phy_addr;
- struct regulator **hpd_regs;
- struct regulator **pwr_regs;
+ struct regulator_bulk_data *hpd_regs;
+ struct regulator_bulk_data *pwr_regs;
struct clk **hpd_clks;
struct clk **pwr_clks;
@@ -163,7 +163,7 @@ struct hdmi_phy {
void __iomem *mmio;
struct hdmi_phy_cfg *cfg;
const struct hdmi_phy_funcs *funcs;
- struct regulator **regs;
+ struct regulator_bulk_data *regs;
struct clk **clks;
};
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
index 6e380db9287b..f04eb4a70f0d 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
@@ -28,13 +28,9 @@ static void msm_hdmi_power_on(struct drm_bridge *bridge)
pm_runtime_get_sync(&hdmi->pdev->dev);
- for (i = 0; i < config->pwr_reg_cnt; i++) {
- ret = regulator_enable(hdmi->pwr_regs[i]);
- if (ret) {
- DRM_DEV_ERROR(dev->dev, "failed to enable pwr regulator: %s (%d)\n",
- config->pwr_reg_names[i], ret);
- }
- }
+ ret = regulator_bulk_enable(config->pwr_reg_cnt, hdmi->pwr_regs);
+ if (ret)
+ DRM_DEV_ERROR(dev->dev, "failed to enable pwr regulator: %d\n", ret);
if (config->pwr_clk_cnt > 0) {
DBG("pixclock: %lu", hdmi->pixclock);
@@ -70,13 +66,9 @@ static void power_off(struct drm_bridge *bridge)
for (i = 0; i < config->pwr_clk_cnt; i++)
clk_disable_unprepare(hdmi->pwr_clks[i]);
- for (i = 0; i < config->pwr_reg_cnt; i++) {
- ret = regulator_disable(hdmi->pwr_regs[i]);
- if (ret) {
- DRM_DEV_ERROR(dev->dev, "failed to disable pwr regulator: %s (%d)\n",
- config->pwr_reg_names[i], ret);
- }
- }
+ ret = regulator_bulk_disable(config->pwr_reg_cnt, hdmi->pwr_regs);
+ if (ret)
+ DRM_DEV_ERROR(dev->dev, "failed to disable pwr regulator: %d\n", ret);
pm_runtime_put_autosuspend(&hdmi->pdev->dev);
}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
index 58707a1f3878..a7f729cdec7b 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
@@ -146,16 +146,13 @@ int msm_hdmi_hpd_enable(struct drm_connector *connector)
const struct hdmi_platform_config *config = hdmi->config;
struct device *dev = &hdmi->pdev->dev;
uint32_t hpd_ctrl;
- int i, ret;
+ int ret;
unsigned long flags;
- for (i = 0; i < config->hpd_reg_cnt; i++) {
- ret = regulator_enable(hdmi->hpd_regs[i]);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to enable hpd regulator: %s (%d)\n",
- config->hpd_reg_names[i], ret);
- goto fail;
- }
+ ret = regulator_bulk_enable(config->hpd_reg_cnt, hdmi->hpd_regs);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to enable hpd regulators: %d\n", ret);
+ goto fail;
}
ret = pinctrl_pm_select_default_state(dev);
@@ -207,7 +204,7 @@ static void hdp_disable(struct hdmi_connector *hdmi_connector)
struct hdmi *hdmi = hdmi_connector->hdmi;
const struct hdmi_platform_config *config = hdmi->config;
struct device *dev = &hdmi->pdev->dev;
- int i, ret = 0;
+ int ret;
/* Disable HPD interrupt */
hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, 0);
@@ -225,12 +222,9 @@ static void hdp_disable(struct hdmi_connector *hdmi_connector)
if (ret)
dev_warn(dev, "pinctrl state chg failed: %d\n", ret);
- for (i = 0; i < config->hpd_reg_cnt; i++) {
- ret = regulator_disable(hdmi->hpd_regs[i]);
- if (ret)
- dev_warn(dev, "failed to disable hpd regulator: %s (%d)\n",
- config->hpd_reg_names[i], ret);
- }
+ ret = regulator_bulk_disable(config->hpd_reg_cnt, hdmi->hpd_regs);
+ if (ret)
+ dev_warn(dev, "failed to disable hpd regulator: %d\n", ret);
}
static void
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy.c
index 8a38d4b95102..16b0e8836d27 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy.c
@@ -23,22 +23,15 @@ static int msm_hdmi_phy_resource_init(struct hdmi_phy *phy)
if (!phy->clks)
return -ENOMEM;
- for (i = 0; i < cfg->num_regs; i++) {
- struct regulator *reg;
-
- reg = devm_regulator_get(dev, cfg->reg_names[i]);
- if (IS_ERR(reg)) {
- ret = PTR_ERR(reg);
- if (ret != -EPROBE_DEFER) {
- DRM_DEV_ERROR(dev,
- "failed to get phy regulator: %s (%d)\n",
- cfg->reg_names[i], ret);
- }
+ for (i = 0; i < cfg->num_regs; i++)
+ phy->regs[i].supply = cfg->reg_names[i];
- return ret;
- }
+ ret = devm_regulator_bulk_get(dev, cfg->num_regs, phy->regs);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ DRM_DEV_ERROR(dev, "failed to get phy regulators: %d\n", ret);
- phy->regs[i] = reg;
+ return ret;
}
for (i = 0; i < cfg->num_clks; i++) {
@@ -66,11 +59,10 @@ int msm_hdmi_phy_resource_enable(struct hdmi_phy *phy)
pm_runtime_get_sync(dev);
- for (i = 0; i < cfg->num_regs; i++) {
- ret = regulator_enable(phy->regs[i]);
- if (ret)
- DRM_DEV_ERROR(dev, "failed to enable regulator: %s (%d)\n",
- cfg->reg_names[i], ret);
+ ret = regulator_bulk_enable(cfg->num_regs, phy->regs);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to enable regulators: (%d)\n", ret);
+ return ret;
}
for (i = 0; i < cfg->num_clks; i++) {
@@ -92,8 +84,7 @@ void msm_hdmi_phy_resource_disable(struct hdmi_phy *phy)
for (i = cfg->num_clks - 1; i >= 0; i--)
clk_disable_unprepare(phy->clks[i]);
- for (i = cfg->num_regs - 1; i >= 0; i--)
- regulator_disable(phy->regs[i]);
+ regulator_bulk_disable(cfg->num_regs, phy->regs);
pm_runtime_put_sync(dev);
}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
index a8f3b2cbfdc5..99c7853353fd 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8996.c
@@ -682,7 +682,7 @@ static int hdmi_8996_pll_is_enabled(struct clk_hw *hw)
return pll_locked;
}
-static struct clk_ops hdmi_8996_pll_ops = {
+static const struct clk_ops hdmi_8996_pll_ops = {
.set_rate = hdmi_8996_pll_set_clk_rate,
.round_rate = hdmi_8996_pll_round_rate,
.recalc_rate = hdmi_8996_pll_recalc_rate,
@@ -695,7 +695,7 @@ static const char * const hdmi_pll_parents[] = {
"xo",
};
-static struct clk_init_data pll_init = {
+static const struct clk_init_data pll_init = {
.name = "hdmipll",
.ops = &hdmi_8996_pll_ops,
.parent_names = hdmi_pll_parents,
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index fab09e7c6efc..27c9ae563f2f 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -116,20 +116,10 @@ out:
trace_msm_atomic_async_commit_finish(crtc_mask);
}
-static enum hrtimer_restart msm_atomic_pending_timer(struct hrtimer *t)
-{
- struct msm_pending_timer *timer = container_of(t,
- struct msm_pending_timer, timer);
-
- kthread_queue_work(timer->worker, &timer->work);
-
- return HRTIMER_NORESTART;
-}
-
static void msm_atomic_pending_work(struct kthread_work *work)
{
struct msm_pending_timer *timer = container_of(work,
- struct msm_pending_timer, work);
+ struct msm_pending_timer, work.work);
msm_atomic_async_commit(timer->kms, timer->crtc_idx);
}
@@ -139,8 +129,6 @@ int msm_atomic_init_pending_timer(struct msm_pending_timer *timer,
{
timer->kms = kms;
timer->crtc_idx = crtc_idx;
- hrtimer_init(&timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
- timer->timer.function = msm_atomic_pending_timer;
timer->worker = kthread_create_worker(0, "atomic-worker-%d", crtc_idx);
if (IS_ERR(timer->worker)) {
@@ -149,7 +137,10 @@ int msm_atomic_init_pending_timer(struct msm_pending_timer *timer,
return ret;
}
sched_set_fifo(timer->worker->task);
- kthread_init_work(&timer->work, msm_atomic_pending_work);
+
+ msm_hrtimer_work_init(&timer->work, timer->worker,
+ msm_atomic_pending_work,
+ CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
return 0;
}
@@ -258,7 +249,7 @@ void msm_atomic_commit_tail(struct drm_atomic_state *state)
vsync_time = kms->funcs->vsync_time(kms, async_crtc);
wakeup_time = ktime_sub(vsync_time, ms_to_ktime(1));
- hrtimer_start(&timer->timer, wakeup_time,
+ msm_hrtimer_queue_work(&timer->work, wakeup_time,
HRTIMER_MODE_ABS);
}
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index d4e09703a87d..7936e8d498dd 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -58,7 +58,7 @@ static const struct drm_mode_config_helper_funcs mode_config_helper_funcs = {
};
#ifdef CONFIG_DRM_MSM_REGISTER_LOGGING
-static bool reglog = false;
+static bool reglog;
MODULE_PARM_DESC(reglog, "Enable register read/write logging");
module_param(reglog, bool, 0600);
#else
@@ -75,7 +75,7 @@ static char *vram = "16m";
MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU)");
module_param(vram, charp, 0);
-bool dumpstate = false;
+bool dumpstate;
MODULE_PARM_DESC(dumpstate, "Dump KMS state on errors");
module_param(dumpstate, bool, 0600);
@@ -200,6 +200,35 @@ void msm_rmw(void __iomem *addr, u32 mask, u32 or)
msm_writel(val | or, addr);
}
+static enum hrtimer_restart msm_hrtimer_worktimer(struct hrtimer *t)
+{
+ struct msm_hrtimer_work *work = container_of(t,
+ struct msm_hrtimer_work, timer);
+
+ kthread_queue_work(work->worker, &work->work);
+
+ return HRTIMER_NORESTART;
+}
+
+void msm_hrtimer_queue_work(struct msm_hrtimer_work *work,
+ ktime_t wakeup_time,
+ enum hrtimer_mode mode)
+{
+ hrtimer_start(&work->timer, wakeup_time, mode);
+}
+
+void msm_hrtimer_work_init(struct msm_hrtimer_work *work,
+ struct kthread_worker *worker,
+ kthread_work_func_t fn,
+ clockid_t clock_id,
+ enum hrtimer_mode mode)
+{
+ hrtimer_init(&work->timer, clock_id, mode);
+ work->timer.function = msm_hrtimer_worktimer;
+ work->worker = worker;
+ kthread_init_work(&work->work, fn);
+}
+
static irqreturn_t msm_irq(int irq, void *arg)
{
struct drm_device *dev = arg;
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index c552f0c3890c..69952b239384 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -60,6 +60,13 @@ enum msm_mdp_plane_property {
PLANE_PROP_MAX_NUM
};
+enum msm_dp_controller {
+ MSM_DP_CONTROLLER_0,
+ MSM_DP_CONTROLLER_1,
+ MSM_DP_CONTROLLER_2,
+ MSM_DP_CONTROLLER_COUNT,
+};
+
#define MSM_GPU_MAX_RINGS 4
#define MAX_H_TILES_PER_DISPLAY 2
@@ -153,7 +160,7 @@ struct msm_drm_private {
/* DSI is shared by mdp4 and mdp5 */
struct msm_dsi *dsi[2];
- struct msm_dp *dp;
+ struct msm_dp *dp[MSM_DP_CONTROLLER_COUNT];
/* when we have more than one 'msm_gpu' these need to be an array: */
struct msm_gpu *gpu;
@@ -480,6 +487,28 @@ void msm_writel(u32 data, void __iomem *addr);
u32 msm_readl(const void __iomem *addr);
void msm_rmw(void __iomem *addr, u32 mask, u32 or);
+/**
+ * struct msm_hrtimer_work - a helper to combine an hrtimer with kthread_work
+ *
+ * @timer: hrtimer to control when the kthread work is triggered
+ * @work: the kthread work
+ * @worker: the kthread worker the work will be scheduled on
+ */
+struct msm_hrtimer_work {
+ struct hrtimer timer;
+ struct kthread_work work;
+ struct kthread_worker *worker;
+};
+
+void msm_hrtimer_queue_work(struct msm_hrtimer_work *work,
+ ktime_t wakeup_time,
+ enum hrtimer_mode mode);
+void msm_hrtimer_work_init(struct msm_hrtimer_work *work,
+ struct kthread_worker *worker,
+ kthread_work_func_t fn,
+ clockid_t clock_id,
+ enum hrtimer_mode mode);
+
#define DBG(fmt, ...) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__)
#define VERB(fmt, ...) if (0) DRM_DEBUG_DRIVER(fmt"\n", ##__VA_ARGS__)
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 22308a1b66fc..104fdfc14027 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -5,6 +5,7 @@
*/
#include <linux/dma-map-ops.h>
+#include <linux/vmalloc.h>
#include <linux/spinlock.h>
#include <linux/shmem_fs.h>
#include <linux/dma-buf.h>
@@ -85,7 +86,7 @@ static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
paddr = physaddr(obj);
for (i = 0; i < npages; i++) {
- p[i] = phys_to_page(paddr);
+ p[i] = pfn_to_page(__phys_to_pfn(paddr));
paddr += PAGE_SIZE;
}
@@ -1132,6 +1133,7 @@ static int msm_gem_new_impl(struct drm_device *dev,
msm_obj->flags = flags;
msm_obj->madv = MSM_MADV_WILLNEED;
+ INIT_LIST_HEAD(&msm_obj->node);
INIT_LIST_HEAD(&msm_obj->vmas);
*obj = &msm_obj->base;
@@ -1166,7 +1168,7 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32
ret = msm_gem_new_impl(dev, size, flags, &obj);
if (ret)
- goto fail;
+ return ERR_PTR(ret);
msm_obj = to_msm_bo(obj);
@@ -1250,7 +1252,7 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
if (ret)
- goto fail;
+ return ERR_PTR(ret);
drm_gem_private_object_init(dev, obj, size);
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index e39a8e7ad843..54ca0817d807 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -309,11 +309,6 @@ struct msm_gem_submit {
struct ww_acquire_ctx ticket;
uint32_t seqno; /* Sequence number of the submit on the ring */
- /* Array of struct dma_fence * to block on before submitting this job.
- */
- struct xarray deps;
- unsigned long last_dep;
-
/* Hw fence, which is created when the scheduler executes the job, and
* is signaled when the hw finishes (via seqno write from cmdstream)
*/
diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c
index 0f1b29ee04a9..4a1420b05e97 100644
--- a/drivers/gpu/drm/msm/msm_gem_shrinker.c
+++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c
@@ -4,6 +4,8 @@
* Author: Rob Clark <robdclark@gmail.com>
*/
+#include <linux/vmalloc.h>
+
#include "msm_drv.h"
#include "msm_gem.h"
#include "msm_gpu.h"
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 151d19e4453c..3cb029f10925 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -52,8 +52,6 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
return ERR_PTR(ret);
}
- xa_init_flags(&submit->deps, XA_FLAGS_ALLOC);
-
kref_init(&submit->ref);
submit->dev = dev;
submit->aspace = queue->ctx->aspace;
@@ -72,8 +70,6 @@ void __msm_gem_submit_destroy(struct kref *kref)
{
struct msm_gem_submit *submit =
container_of(kref, struct msm_gem_submit, ref);
- unsigned long index;
- struct dma_fence *fence;
unsigned i;
if (submit->fence_id) {
@@ -82,12 +78,6 @@ void __msm_gem_submit_destroy(struct kref *kref)
mutex_unlock(&submit->queue->lock);
}
- xa_for_each (&submit->deps, index, fence) {
- dma_fence_put(fence);
- }
-
- xa_destroy(&submit->deps);
-
dma_fence_put(submit->user_fence);
dma_fence_put(submit->hw_fence);
@@ -341,11 +331,13 @@ static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit)
return ret;
}
- if (no_implicit)
+ /* exclusive fences must be ordered */
+ if (no_implicit && !write)
continue;
- ret = drm_gem_fence_array_add_implicit(&submit->deps, obj,
- write);
+ ret = drm_sched_job_add_implicit_dependencies(&submit->base,
+ obj,
+ write);
if (ret)
break;
}
@@ -589,7 +581,7 @@ static struct drm_syncobj **msm_parse_deps(struct msm_gem_submit *submit,
if (ret)
break;
- ret = drm_gem_fence_array_add(&submit->deps, fence);
+ ret = drm_sched_job_add_dependency(&submit->base, fence);
if (ret)
break;
@@ -799,7 +791,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
goto out_unlock;
}
- ret = drm_gem_fence_array_add(&submit->deps, in_fence);
+ ret = drm_sched_job_add_dependency(&submit->base, in_fence);
if (ret)
goto out_unlock;
}
@@ -879,6 +871,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
submit->nr_cmds = i;
+ drm_sched_job_arm(&submit->base);
+
submit->user_fence = dma_fence_get(&submit->base.s_fence->finished);
/*
@@ -890,17 +884,16 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (submit->fence_id < 0) {
ret = submit->fence_id = 0;
submit->fence_id = 0;
- goto out;
}
- if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
+ if (ret == 0 && args->flags & MSM_SUBMIT_FENCE_FD_OUT) {
struct sync_file *sync_file = sync_file_create(submit->user_fence);
if (!sync_file) {
ret = -ENOMEM;
- goto out;
+ } else {
+ fd_install(out_fence_fd, sync_file->file);
+ args->fence_fd = out_fence_fd;
}
- fd_install(out_fence_fd, sync_file->file);
- args->fence_fd = out_fence_fd;
}
submit_attach_object_fences(submit);
@@ -908,7 +901,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
/* The scheduler owns a ref now: */
msm_gem_submit_get(submit);
- drm_sched_entity_push_job(&submit->base, queue->entity);
+ drm_sched_entity_push_job(&submit->base);
args->fence = submit->fence_id;
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 8a3a592da3a4..2c46cd968ac4 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -296,7 +296,7 @@ static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
state->bos = kcalloc(nr,
sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
- for (i = 0; i < submit->nr_bos; i++) {
+ for (i = 0; state->bos && i < submit->nr_bos; i++) {
if (should_dump(submit, i)) {
msm_gpu_crashstate_get_bo(state, submit->bos[i].obj,
submit->bos[i].iova, submit->bos[i].flags);
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index 030f82f149c2..59cdd00b69d0 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -112,6 +112,13 @@ struct msm_gpu_devfreq {
* it is inactive.
*/
unsigned long idle_freq;
+
+ /**
+ * idle_work:
+ *
+ * Used to delay clamping to idle freq on active->idle transition.
+ */
+ struct msm_hrtimer_work idle_work;
};
struct msm_gpu {
@@ -203,6 +210,10 @@ struct msm_gpu {
uint32_t suspend_count;
struct msm_gpu_state *crashstate;
+
+ /* Enable clamping to idle freq when inactive: */
+ bool clamp_to_idle;
+
/* True if the hardware supports expanded apriv (a650 and newer) */
bool hw_apriv;
diff --git a/drivers/gpu/drm/msm/msm_gpu_devfreq.c b/drivers/gpu/drm/msm/msm_gpu_devfreq.c
index 84e98c07c900..8b7473f69cb8 100644
--- a/drivers/gpu/drm/msm/msm_gpu_devfreq.c
+++ b/drivers/gpu/drm/msm/msm_gpu_devfreq.c
@@ -88,8 +88,12 @@ static struct devfreq_dev_profile msm_devfreq_profile = {
.get_cur_freq = msm_devfreq_get_cur_freq,
};
+static void msm_devfreq_idle_work(struct kthread_work *work);
+
void msm_devfreq_init(struct msm_gpu *gpu)
{
+ struct msm_gpu_devfreq *df = &gpu->devfreq;
+
/* We need target support to do devfreq */
if (!gpu->funcs->gpu_busy)
return;
@@ -105,25 +109,27 @@ void msm_devfreq_init(struct msm_gpu *gpu)
msm_devfreq_profile.freq_table = NULL;
msm_devfreq_profile.max_state = 0;
- gpu->devfreq.devfreq = devm_devfreq_add_device(&gpu->pdev->dev,
+ df->devfreq = devm_devfreq_add_device(&gpu->pdev->dev,
&msm_devfreq_profile, DEVFREQ_GOV_SIMPLE_ONDEMAND,
NULL);
- if (IS_ERR(gpu->devfreq.devfreq)) {
+ if (IS_ERR(df->devfreq)) {
DRM_DEV_ERROR(&gpu->pdev->dev, "Couldn't initialize GPU devfreq\n");
- gpu->devfreq.devfreq = NULL;
+ df->devfreq = NULL;
return;
}
- devfreq_suspend_device(gpu->devfreq.devfreq);
+ devfreq_suspend_device(df->devfreq);
- gpu->cooling = of_devfreq_cooling_register(gpu->pdev->dev.of_node,
- gpu->devfreq.devfreq);
+ gpu->cooling = of_devfreq_cooling_register(gpu->pdev->dev.of_node, df->devfreq);
if (IS_ERR(gpu->cooling)) {
DRM_DEV_ERROR(&gpu->pdev->dev,
"Couldn't register GPU cooling device\n");
gpu->cooling = NULL;
}
+
+ msm_hrtimer_work_init(&df->idle_work, gpu->worker, msm_devfreq_idle_work,
+ CLOCK_MONOTONIC, HRTIMER_MODE_REL);
}
void msm_devfreq_cleanup(struct msm_gpu *gpu)
@@ -155,6 +161,11 @@ void msm_devfreq_active(struct msm_gpu *gpu)
return;
/*
+ * Cancel any pending transition to idle frequency:
+ */
+ hrtimer_cancel(&df->idle_work.timer);
+
+ /*
* Hold devfreq lock to synchronize with get_dev_status()/
* target() callbacks
*/
@@ -184,9 +195,12 @@ void msm_devfreq_active(struct msm_gpu *gpu)
mutex_unlock(&df->devfreq->lock);
}
-void msm_devfreq_idle(struct msm_gpu *gpu)
+
+static void msm_devfreq_idle_work(struct kthread_work *work)
{
- struct msm_gpu_devfreq *df = &gpu->devfreq;
+ struct msm_gpu_devfreq *df = container_of(work,
+ struct msm_gpu_devfreq, idle_work.work);
+ struct msm_gpu *gpu = container_of(df, struct msm_gpu, devfreq);
unsigned long idle_freq, target_freq = 0;
if (!df->devfreq)
@@ -200,10 +214,19 @@ void msm_devfreq_idle(struct msm_gpu *gpu)
idle_freq = get_freq(gpu);
- msm_devfreq_target(&gpu->pdev->dev, &target_freq, 0);
+ if (gpu->clamp_to_idle)
+ msm_devfreq_target(&gpu->pdev->dev, &target_freq, 0);
df->idle_time = ktime_get();
df->idle_freq = idle_freq;
mutex_unlock(&df->devfreq->lock);
}
+
+void msm_devfreq_idle(struct msm_gpu *gpu)
+{
+ struct msm_gpu_devfreq *df = &gpu->devfreq;
+
+ msm_hrtimer_queue_work(&df->idle_work, ms_to_ktime(1),
+ HRTIMER_MODE_ABS);
+}
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index de2bc3467bb5..6a42b819abc4 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -136,8 +136,7 @@ struct msm_kms;
* shortly before vblank to flush pending async updates.
*/
struct msm_pending_timer {
- struct hrtimer timer;
- struct kthread_work work;
+ struct msm_hrtimer_work work;
struct kthread_worker *worker;
struct msm_kms *kms;
unsigned crtc_idx;
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index bd54c1412649..652b1dedd7c1 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -11,17 +11,6 @@ static uint num_hw_submissions = 8;
MODULE_PARM_DESC(num_hw_submissions, "The max # of jobs to write into ringbuffer (default 8)");
module_param(num_hw_submissions, uint, 0600);
-static struct dma_fence *msm_job_dependency(struct drm_sched_job *job,
- struct drm_sched_entity *s_entity)
-{
- struct msm_gem_submit *submit = to_msm_submit(job);
-
- if (!xa_empty(&submit->deps))
- return xa_erase(&submit->deps, submit->last_dep++);
-
- return NULL;
-}
-
static struct dma_fence *msm_job_run(struct drm_sched_job *job)
{
struct msm_gem_submit *submit = to_msm_submit(job);
@@ -52,7 +41,6 @@ static void msm_job_free(struct drm_sched_job *job)
}
const struct drm_sched_backend_ops msm_sched_ops = {
- .dependency = msm_job_dependency,
.run_job = msm_job_run,
.free_job = msm_job_free
};
diff --git a/drivers/gpu/drm/msm/msm_submitqueue.c b/drivers/gpu/drm/msm/msm_submitqueue.c
index b8621c6e0554..7cb158bcbcf6 100644
--- a/drivers/gpu/drm/msm/msm_submitqueue.c
+++ b/drivers/gpu/drm/msm/msm_submitqueue.c
@@ -101,6 +101,7 @@ get_sched_entity(struct msm_file_private *ctx, struct msm_ringbuffer *ring,
ret = drm_sched_entity_init(entity, sched_prio, &sched, 1, NULL);
if (ret) {
+ mutex_unlock(&entity_lock);
kfree(entity);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
index ec0432fe1bdf..86d78634a979 100644
--- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c
+++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c
@@ -173,7 +173,11 @@ static void mxsfb_irq_disable(struct drm_device *drm)
struct mxsfb_drm_private *mxsfb = drm->dev_private;
mxsfb_enable_axi_clk(mxsfb);
- mxsfb->crtc.funcs->disable_vblank(&mxsfb->crtc);
+
+ /* Disable and clear VBLANK IRQ */
+ writel(CTRL1_CUR_FRAME_DONE_IRQ_EN, mxsfb->base + LCDC_CTRL1 + REG_CLR);
+ writel(CTRL1_CUR_FRAME_DONE_IRQ, mxsfb->base + LCDC_CTRL1 + REG_CLR);
+
mxsfb_disable_axi_clk(mxsfb);
}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index d7b9f7f8c9e3..8e28403ea9b1 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -1414,7 +1414,7 @@ nv50_mstm_prepare(struct nv50_mstm *mstm)
int ret;
NV_ATOMIC(drm, "%s: mstm prepare\n", mstm->outp->base.base.name);
- ret = drm_dp_update_payload_part1(&mstm->mgr);
+ ret = drm_dp_update_payload_part1(&mstm->mgr, 1);
drm_for_each_encoder(encoder, mstm->outp->base.base.dev) {
if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 7c15f6448428..6140db756d06 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -364,7 +364,6 @@ void *
nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector)
{
struct acpi_device *acpidev;
- acpi_handle handle;
int type, ret;
void *edid;
@@ -377,12 +376,8 @@ nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector)
return NULL;
}
- handle = ACPI_HANDLE(dev->dev);
- if (!handle)
- return NULL;
-
- ret = acpi_bus_get_device(handle, &acpidev);
- if (ret)
+ acpidev = ACPI_COMPANION(dev->dev);
+ if (!acpidev)
return NULL;
ret = acpi_video_get_edid(acpidev, type, -1, &edid);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index c58bcdba2c7a..12b107acb6ee 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1250,7 +1250,7 @@ nouveau_ttm_tt_populate(struct ttm_device *bdev,
struct ttm_tt *ttm_dma = (void *)ttm;
struct nouveau_drm *drm;
struct device *dev;
- bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
+ bool slave = !!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL);
if (ttm_tt_is_populated(ttm))
return 0;
@@ -1273,11 +1273,13 @@ nouveau_ttm_tt_unpopulate(struct ttm_device *bdev,
{
struct nouveau_drm *drm;
struct device *dev;
- bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
+ bool slave = !!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL);
if (slave)
return;
+ nouveau_ttm_tt_unbind(bdev, ttm);
+
drm = nouveau_bdev(bdev);
dev = drm->dev->dev;
@@ -1291,8 +1293,6 @@ nouveau_ttm_tt_destroy(struct ttm_device *bdev,
#if IS_ENABLED(CONFIG_AGP)
struct nouveau_drm *drm = nouveau_bdev(bdev);
if (drm->agp.bridge) {
- ttm_agp_unbind(ttm);
- ttm_tt_destroy_common(bdev, ttm);
ttm_agp_destroy(ttm);
return;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 256ec5b35473..85c03c83259b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -21,8 +21,6 @@ nouveau_sgdma_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
if (ttm) {
- nouveau_sgdma_unbind(bdev, ttm);
- ttm_tt_destroy_common(bdev, ttm);
ttm_tt_fini(&nvbe->ttm);
kfree(nvbe);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
index b0c3422cb01f..1a896a24288a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
@@ -992,7 +992,7 @@ nouveau_svm_fault_buffer_ctor(struct nouveau_svm *svm, s32 oclass, int id)
if (ret)
return ret;
- buffer->fault = kvzalloc(sizeof(*buffer->fault) * buffer->entries, GFP_KERNEL);
+ buffer->fault = kvcalloc(sizeof(*buffer->fault), buffer->entries, GFP_KERNEL);
if (!buffer->fault)
return -ENOMEM;
diff --git a/drivers/gpu/drm/omapdrm/Kconfig b/drivers/gpu/drm/omapdrm/Kconfig
index e7281da5bc6a..455e1a91f0e5 100644
--- a/drivers/gpu/drm/omapdrm/Kconfig
+++ b/drivers/gpu/drm/omapdrm/Kconfig
@@ -1,9 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_OMAP
tristate "OMAP DRM"
- depends on DRM
+ depends on DRM && OF
depends on ARCH_OMAP2PLUS || ARCH_MULTIPLATFORM
- select OMAP2_DSS
select DRM_KMS_HELPER
select VIDEOMODE_HELPERS
select HDMI
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index 5f1722b040f4..503b5d4bf2c2 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -2094,7 +2094,7 @@ static int dsi_vc_send_long(struct dsi_data *dsi, int vc,
u8 b1, b2, b3, b4;
if (dsi->debug_write)
- DSSDBG("dsi_vc_send_long, %d bytes\n", msg->tx_len);
+ DSSDBG("dsi_vc_send_long, %zu bytes\n", msg->tx_len);
/* len + header */
if (dsi->vc[vc].tx_fifo_size * 32 * 4 < msg->tx_len + 4) {
@@ -2390,7 +2390,7 @@ static int dsi_vc_generic_read(struct omap_dss_device *dssdev, int vc,
return 0;
err:
- DSSERR("%s(vc %d, reqlen %d) failed\n", __func__, vc, msg->tx_len);
+ DSSERR("%s(vc %d, reqlen %zu) failed\n", __func__, vc, msg->tx_len);
return r;
}
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index f86e20578143..c05d3975cb31 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -572,7 +572,7 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev)
priv->dss->mgr_ops_priv = priv;
soc = soc_device_match(omapdrm_soc_devices);
- priv->omaprev = soc ? (unsigned int)soc->data : 0;
+ priv->omaprev = soc ? (uintptr_t)soc->data : 0;
priv->wq = alloc_ordered_workqueue("omapdrm", 0);
mutex_init(&priv->list_lock);
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index 418638e6e3b0..2cb8eba76af8 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -77,14 +77,26 @@ config DRM_PANEL_LVDS
backlight handling if the panel is attached to a backlight controller.
config DRM_PANEL_SIMPLE
- tristate "support for simple panels"
+ tristate "support for simple panels (other than eDP ones)"
+ depends on OF
+ depends on BACKLIGHT_CLASS_DEVICE
+ depends on PM
+ select VIDEOMODE_HELPERS
+ help
+ DRM panel driver for dumb non-eDP panels that need at most a regulator
+ and a GPIO to be powered up. Optionally a backlight can be attached so
+ that it can be automatically turned off when the panel goes into a
+ low power state.
+
+config DRM_PANEL_EDP
+ tristate "support for simple Embedded DisplayPort panels"
depends on OF
depends on BACKLIGHT_CLASS_DEVICE
depends on PM
select VIDEOMODE_HELPERS
select DRM_DP_AUX_BUS
help
- DRM panel driver for dumb panels that need at most a regulator and
+ DRM panel driver for dumb eDP panels that need at most a regulator and
a GPIO to be powered up. Optionally a backlight can be attached so
that it can be automatically turned off when the panel goes into a
low power state.
@@ -393,6 +405,17 @@ config DRM_PANEL_SAMSUNG_S6D16D0
depends on DRM_MIPI_DSI
select VIDEOMODE_HELPERS
+config DRM_PANEL_SAMSUNG_S6D27A1
+ tristate "Samsung S6D27A1 DPI panel driver"
+ depends on OF && SPI && GPIOLIB
+ select DRM_MIPI_DBI
+ help
+ Say Y here if you want to enable support for the Samsung
+ S6D27A1 DPI 480x800 panel.
+
+ This panel can be found in Samsung Galaxy Ace 2
+ GT-I8160 mobile phone.
+
config DRM_PANEL_SAMSUNG_S6E3HA2
tristate "Samsung S6E3HA2 DSI video mode panel"
depends on OF
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index c8132050bcec..6e30640b9099 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -7,6 +7,7 @@ obj-$(CONFIG_DRM_PANEL_BOE_TV101WUM_NL6) += panel-boe-tv101wum-nl6.o
obj-$(CONFIG_DRM_PANEL_DSI_CM) += panel-dsi-cm.o
obj-$(CONFIG_DRM_PANEL_LVDS) += panel-lvds.o
obj-$(CONFIG_DRM_PANEL_SIMPLE) += panel-simple.o
+obj-$(CONFIG_DRM_PANEL_EDP) += panel-edp.o
obj-$(CONFIG_DRM_PANEL_ELIDA_KD35T133) += panel-elida-kd35t133.o
obj-$(CONFIG_DRM_PANEL_FEIXIN_K101_IM2BA02) += panel-feixin-k101-im2ba02.o
obj-$(CONFIG_DRM_PANEL_FEIYANG_FY07024DI26A30D) += panel-feiyang-fy07024di26a30d.o
@@ -39,6 +40,7 @@ obj-$(CONFIG_DRM_PANEL_SAMSUNG_ATNA33XC20) += panel-samsung-atna33xc20.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_DB7430) += panel-samsung-db7430.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_LD9040) += panel-samsung-ld9040.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6D16D0) += panel-samsung-s6d16d0.o
+obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6D27A1) += panel-samsung-s6d27a1.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E3HA2) += panel-samsung-s6e3ha2.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63J0X03) += panel-samsung-s6e63j0x03.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63M0) += panel-samsung-s6e63m0.o
diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
index db9d0b86d542..529561b4fbbc 100644
--- a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
+++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
@@ -45,6 +45,7 @@ struct boe_panel {
const struct panel_desc *desc;
enum drm_panel_orientation orientation;
+ struct regulator *pp3300;
struct regulator *pp1800;
struct regulator *avee;
struct regulator *avdd;
@@ -74,6 +75,670 @@ struct panel_init_cmd {
.len = sizeof((char[]){__VA_ARGS__}), \
.data = (char[]){__VA_ARGS__} }
+static const struct panel_init_cmd boe_tv110c9m_init_cmd[] = {
+ _INIT_DCS_CMD(0xFF, 0x20),
+ _INIT_DCS_CMD(0xFB, 0x01),
+ _INIT_DCS_CMD(0x05, 0xD9),
+ _INIT_DCS_CMD(0x07, 0x78),
+ _INIT_DCS_CMD(0x08, 0x5A),
+ _INIT_DCS_CMD(0x0D, 0x63),
+ _INIT_DCS_CMD(0x0E, 0x91),
+ _INIT_DCS_CMD(0x0F, 0x73),
+ _INIT_DCS_CMD(0x95, 0xEB),
+ _INIT_DCS_CMD(0x96, 0xEB),
+ _INIT_DCS_CMD(0x30, 0x11),
+ _INIT_DCS_CMD(0x6D, 0x66),
+ _INIT_DCS_CMD(0x75, 0xA2),
+ _INIT_DCS_CMD(0x77, 0x3B),
+
+ _INIT_DCS_CMD(0xB0, 0x00, 0x08, 0x00, 0x23, 0x00, 0x4D, 0x00, 0x6D, 0x00, 0x89, 0x00, 0xA1, 0x00, 0xB6, 0x00, 0xC9),
+ _INIT_DCS_CMD(0xB1, 0x00, 0xDA, 0x01, 0x13, 0x01, 0x3C, 0x01, 0x7E, 0x01, 0xAB, 0x01, 0xF7, 0x02, 0x2F, 0x02, 0x31),
+ _INIT_DCS_CMD(0xB2, 0x02, 0x67, 0x02, 0xA6, 0x02, 0xD1, 0x03, 0x08, 0x03, 0x2E, 0x03, 0x5B, 0x03, 0x6B, 0x03, 0x7B),
+ _INIT_DCS_CMD(0xB3, 0x03, 0x8E, 0x03, 0xA2, 0x03, 0xB7, 0x03, 0xE7, 0x03, 0xFD, 0x03, 0xFF),
+
+ _INIT_DCS_CMD(0xB4, 0x00, 0x08, 0x00, 0x23, 0x00, 0x4D, 0x00, 0x6D, 0x00, 0x89, 0x00, 0xA1, 0x00, 0xB6, 0x00, 0xC9),
+ _INIT_DCS_CMD(0xB5, 0x00, 0xDA, 0x01, 0x13, 0x01, 0x3C, 0x01, 0x7E, 0x01, 0xAB, 0x01, 0xF7, 0x02, 0x2F, 0x02, 0x31),
+ _INIT_DCS_CMD(0xB6, 0x02, 0x67, 0x02, 0xA6, 0x02, 0xD1, 0x03, 0x08, 0x03, 0x2E, 0x03, 0x5B, 0x03, 0x6B, 0x03, 0x7B),
+ _INIT_DCS_CMD(0xB7, 0x03, 0x8E, 0x03, 0xA2, 0x03, 0xB7, 0x03, 0xE7, 0x03, 0xFD, 0x03, 0xFF),
+ _INIT_DCS_CMD(0xB8, 0x00, 0x08, 0x00, 0x23, 0x00, 0x4D, 0x00, 0x6D, 0x00, 0x89, 0x00, 0xA1, 0x00, 0xB6, 0x00, 0xC9),
+ _INIT_DCS_CMD(0xB9, 0x00, 0xDA, 0x01, 0x13, 0x01, 0x3C, 0x01, 0x7E, 0x01, 0xAB, 0x01, 0xF7, 0x02, 0x2F, 0x02, 0x31),
+ _INIT_DCS_CMD(0xBA, 0x02, 0x67, 0x02, 0xA6, 0x02, 0xD1, 0x03, 0x08, 0x03, 0x2E, 0x03, 0x5B, 0x03, 0x6B, 0x03, 0x7B),
+ _INIT_DCS_CMD(0xBB, 0x03, 0x8E, 0x03, 0xA2, 0x03, 0xB7, 0x03, 0xE7, 0x03, 0xFD, 0x03, 0xFF),
+
+ _INIT_DCS_CMD(0xFF, 0x21),
+ _INIT_DCS_CMD(0xFB, 0x01),
+
+ _INIT_DCS_CMD(0xB0, 0x00, 0x00, 0x00, 0x1B, 0x00, 0x45, 0x00, 0x65, 0x00, 0x81, 0x00, 0x99, 0x00, 0xAE, 0x00, 0xC1),
+ _INIT_DCS_CMD(0xB1, 0x00, 0xD2, 0x01, 0x0B, 0x01, 0x34, 0x01, 0x76, 0x01, 0xA3, 0x01, 0xEF, 0x02, 0x27, 0x02, 0x29),
+ _INIT_DCS_CMD(0xB2, 0x02, 0x5F, 0x02, 0x9E, 0x02, 0xC9, 0x03, 0x00, 0x03, 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73),
+ _INIT_DCS_CMD(0xB3, 0x03, 0x86, 0x03, 0x9A, 0x03, 0xAF, 0x03, 0xDF, 0x03, 0xF5, 0x03, 0xF7),
+
+ _INIT_DCS_CMD(0xB4, 0x00, 0x00, 0x00, 0x1B, 0x00, 0x45, 0x00, 0x65, 0x00, 0x81, 0x00, 0x99, 0x00, 0xAE, 0x00, 0xC1),
+ _INIT_DCS_CMD(0xB5, 0x00, 0xD2, 0x01, 0x0B, 0x01, 0x34, 0x01, 0x76, 0x01, 0xA3, 0x01, 0xEF, 0x02, 0x27, 0x02, 0x29),
+ _INIT_DCS_CMD(0xB6, 0x02, 0x5F, 0x02, 0x9E, 0x02, 0xC9, 0x03, 0x00, 0x03, 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73),
+ _INIT_DCS_CMD(0xB7, 0x03, 0x86, 0x03, 0x9A, 0x03, 0xAF, 0x03, 0xDF, 0x03, 0xF5, 0x03, 0xF7),
+
+ _INIT_DCS_CMD(0xB8, 0x00, 0x00, 0x00, 0x1B, 0x00, 0x45, 0x00, 0x65, 0x00, 0x81, 0x00, 0x99, 0x00, 0xAE, 0x00, 0xC1),
+ _INIT_DCS_CMD(0xB9, 0x00, 0xD2, 0x01, 0x0B, 0x01, 0x34, 0x01, 0x76, 0x01, 0xA3, 0x01, 0xEF, 0x02, 0x27, 0x02, 0x29),
+ _INIT_DCS_CMD(0xBA, 0x02, 0x5F, 0x02, 0x9E, 0x02, 0xC9, 0x03, 0x00, 0x03, 0x26, 0x03, 0x53, 0x03, 0x63, 0x03, 0x73),
+ _INIT_DCS_CMD(0xBB, 0x03, 0x86, 0x03, 0x9A, 0x03, 0xAF, 0x03, 0xDF, 0x03, 0xF5, 0x03, 0xF7),
+
+ _INIT_DCS_CMD(0xFF, 0x24),
+ _INIT_DCS_CMD(0xFB, 0x01),
+
+ _INIT_DCS_CMD(0x00, 0x00),
+ _INIT_DCS_CMD(0x01, 0x00),
+
+ _INIT_DCS_CMD(0x02, 0x1C),
+ _INIT_DCS_CMD(0x03, 0x1C),
+
+ _INIT_DCS_CMD(0x04, 0x1D),
+ _INIT_DCS_CMD(0x05, 0x1D),
+
+ _INIT_DCS_CMD(0x06, 0x04),
+ _INIT_DCS_CMD(0x07, 0x04),
+
+ _INIT_DCS_CMD(0x08, 0x0F),
+ _INIT_DCS_CMD(0x09, 0x0F),
+
+ _INIT_DCS_CMD(0x0A, 0x0E),
+ _INIT_DCS_CMD(0x0B, 0x0E),
+
+ _INIT_DCS_CMD(0x0C, 0x0D),
+ _INIT_DCS_CMD(0x0D, 0x0D),
+
+ _INIT_DCS_CMD(0x0E, 0x0C),
+ _INIT_DCS_CMD(0x0F, 0x0C),
+
+ _INIT_DCS_CMD(0x10, 0x08),
+ _INIT_DCS_CMD(0x11, 0x08),
+
+ _INIT_DCS_CMD(0x12, 0x00),
+ _INIT_DCS_CMD(0x13, 0x00),
+ _INIT_DCS_CMD(0x14, 0x00),
+ _INIT_DCS_CMD(0x15, 0x00),
+
+ _INIT_DCS_CMD(0x16, 0x00),
+ _INIT_DCS_CMD(0x17, 0x00),
+
+ _INIT_DCS_CMD(0x18, 0x1C),
+ _INIT_DCS_CMD(0x19, 0x1C),
+
+ _INIT_DCS_CMD(0x1A, 0x1D),
+ _INIT_DCS_CMD(0x1B, 0x1D),
+
+ _INIT_DCS_CMD(0x1C, 0x04),
+ _INIT_DCS_CMD(0x1D, 0x04),
+
+ _INIT_DCS_CMD(0x1E, 0x0F),
+ _INIT_DCS_CMD(0x1F, 0x0F),
+
+ _INIT_DCS_CMD(0x20, 0x0E),
+ _INIT_DCS_CMD(0x21, 0x0E),
+
+ _INIT_DCS_CMD(0x22, 0x0D),
+ _INIT_DCS_CMD(0x23, 0x0D),
+
+ _INIT_DCS_CMD(0x24, 0x0C),
+ _INIT_DCS_CMD(0x25, 0x0C),
+
+ _INIT_DCS_CMD(0x26, 0x08),
+ _INIT_DCS_CMD(0x27, 0x08),
+
+ _INIT_DCS_CMD(0x28, 0x00),
+ _INIT_DCS_CMD(0x29, 0x00),
+ _INIT_DCS_CMD(0x2A, 0x00),
+ _INIT_DCS_CMD(0x2B, 0x00),
+
+ _INIT_DCS_CMD(0x2D, 0x20),
+ _INIT_DCS_CMD(0x2F, 0x0A),
+ _INIT_DCS_CMD(0x30, 0x44),
+ _INIT_DCS_CMD(0x33, 0x0C),
+ _INIT_DCS_CMD(0x34, 0x32),
+
+ _INIT_DCS_CMD(0x37, 0x44),
+ _INIT_DCS_CMD(0x38, 0x40),
+ _INIT_DCS_CMD(0x39, 0x00),
+ _INIT_DCS_CMD(0x3A, 0x5D),
+ _INIT_DCS_CMD(0x3B, 0x60),
+ _INIT_DCS_CMD(0x3D, 0x42),
+ _INIT_DCS_CMD(0x3F, 0x06),
+ _INIT_DCS_CMD(0x43, 0x06),
+ _INIT_DCS_CMD(0x47, 0x66),
+ _INIT_DCS_CMD(0x4A, 0x5D),
+ _INIT_DCS_CMD(0x4B, 0x60),
+ _INIT_DCS_CMD(0x4C, 0x91),
+ _INIT_DCS_CMD(0x4D, 0x21),
+ _INIT_DCS_CMD(0x4E, 0x43),
+ _INIT_DCS_CMD(0x51, 0x12),
+ _INIT_DCS_CMD(0x52, 0x34),
+ _INIT_DCS_CMD(0x55, 0x82, 0x02),
+ _INIT_DCS_CMD(0x56, 0x04),
+ _INIT_DCS_CMD(0x58, 0x21),
+ _INIT_DCS_CMD(0x59, 0x30),
+ _INIT_DCS_CMD(0x5A, 0x60),
+ _INIT_DCS_CMD(0x5B, 0x50),
+ _INIT_DCS_CMD(0x5E, 0x00, 0x06),
+ _INIT_DCS_CMD(0x5F, 0x00),
+ _INIT_DCS_CMD(0x65, 0x82),
+ _INIT_DCS_CMD(0x7E, 0x20),
+ _INIT_DCS_CMD(0x7F, 0x3C),
+ _INIT_DCS_CMD(0x82, 0x04),
+ _INIT_DCS_CMD(0x97, 0xC0),
+ _INIT_DCS_CMD(0xB6, 0x05, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0x05, 0x00, 0x00),
+ _INIT_DCS_CMD(0x91, 0x44),
+ _INIT_DCS_CMD(0x92, 0xA9),
+ _INIT_DCS_CMD(0x93, 0x1A),
+ _INIT_DCS_CMD(0x94, 0x96),
+ _INIT_DCS_CMD(0xD7, 0x55),
+ _INIT_DCS_CMD(0xDA, 0x0A),
+ _INIT_DCS_CMD(0xDE, 0x08),
+ _INIT_DCS_CMD(0xDB, 0x05),
+ _INIT_DCS_CMD(0xDC, 0xA9),
+ _INIT_DCS_CMD(0xDD, 0x22),
+
+ _INIT_DCS_CMD(0xDF, 0x05),
+ _INIT_DCS_CMD(0xE0, 0xA9),
+ _INIT_DCS_CMD(0xE1, 0x05),
+ _INIT_DCS_CMD(0xE2, 0xA9),
+ _INIT_DCS_CMD(0xE3, 0x05),
+ _INIT_DCS_CMD(0xE4, 0xA9),
+ _INIT_DCS_CMD(0xE5, 0x05),
+ _INIT_DCS_CMD(0xE6, 0xA9),
+ _INIT_DCS_CMD(0x5C, 0x00),
+ _INIT_DCS_CMD(0x5D, 0x00),
+ _INIT_DCS_CMD(0x8D, 0x00),
+ _INIT_DCS_CMD(0x8E, 0x00),
+ _INIT_DCS_CMD(0xB5, 0x90),
+ _INIT_DCS_CMD(0xFF, 0x25),
+ _INIT_DCS_CMD(0xFB, 0x01),
+ _INIT_DCS_CMD(0x05, 0x00),
+ _INIT_DCS_CMD(0x19, 0x07),
+ _INIT_DCS_CMD(0x1F, 0x60),
+ _INIT_DCS_CMD(0x20, 0x50),
+ _INIT_DCS_CMD(0x26, 0x60),
+ _INIT_DCS_CMD(0x27, 0x50),
+ _INIT_DCS_CMD(0x33, 0x60),
+ _INIT_DCS_CMD(0x34, 0x50),
+ _INIT_DCS_CMD(0x3F, 0xE0),
+ _INIT_DCS_CMD(0x40, 0x00),
+ _INIT_DCS_CMD(0x44, 0x00),
+ _INIT_DCS_CMD(0x45, 0x40),
+ _INIT_DCS_CMD(0x48, 0x60),
+ _INIT_DCS_CMD(0x49, 0x50),
+ _INIT_DCS_CMD(0x5B, 0x00),
+ _INIT_DCS_CMD(0x5C, 0x00),
+ _INIT_DCS_CMD(0x5D, 0x00),
+ _INIT_DCS_CMD(0x5E, 0xD0),
+ _INIT_DCS_CMD(0x61, 0x60),
+ _INIT_DCS_CMD(0x62, 0x50),
+ _INIT_DCS_CMD(0xF1, 0x10),
+ _INIT_DCS_CMD(0xFF, 0x2A),
+ _INIT_DCS_CMD(0xFB, 0x01),
+
+ _INIT_DCS_CMD(0x64, 0x16),
+ _INIT_DCS_CMD(0x67, 0x16),
+ _INIT_DCS_CMD(0x6A, 0x16),
+
+ _INIT_DCS_CMD(0x70, 0x30),
+
+ _INIT_DCS_CMD(0xA2, 0xF3),
+ _INIT_DCS_CMD(0xA3, 0xFF),
+ _INIT_DCS_CMD(0xA4, 0xFF),
+ _INIT_DCS_CMD(0xA5, 0xFF),
+
+ _INIT_DCS_CMD(0xD6, 0x08),
+
+ _INIT_DCS_CMD(0xFF, 0x26),
+ _INIT_DCS_CMD(0xFB, 0x01),
+ _INIT_DCS_CMD(0x00, 0xA1),
+
+ _INIT_DCS_CMD(0x02, 0x31),
+ _INIT_DCS_CMD(0x04, 0x28),
+ _INIT_DCS_CMD(0x06, 0x30),
+ _INIT_DCS_CMD(0x0C, 0x16),
+ _INIT_DCS_CMD(0x0D, 0x0D),
+ _INIT_DCS_CMD(0x0F, 0x00),
+ _INIT_DCS_CMD(0x11, 0x00),
+ _INIT_DCS_CMD(0x12, 0x50),
+ _INIT_DCS_CMD(0x13, 0x56),
+ _INIT_DCS_CMD(0x14, 0x57),
+ _INIT_DCS_CMD(0x15, 0x00),
+ _INIT_DCS_CMD(0x16, 0x10),
+ _INIT_DCS_CMD(0x17, 0xA0),
+ _INIT_DCS_CMD(0x18, 0x86),
+ _INIT_DCS_CMD(0x19, 0x0D),
+ _INIT_DCS_CMD(0x1A, 0x7F),
+ _INIT_DCS_CMD(0x1B, 0x0C),
+ _INIT_DCS_CMD(0x1C, 0xBF),
+ _INIT_DCS_CMD(0x22, 0x00),
+ _INIT_DCS_CMD(0x23, 0x00),
+ _INIT_DCS_CMD(0x2A, 0x0D),
+ _INIT_DCS_CMD(0x2B, 0x7F),
+
+ _INIT_DCS_CMD(0x1D, 0x00),
+ _INIT_DCS_CMD(0x1E, 0x65),
+ _INIT_DCS_CMD(0x1F, 0x65),
+ _INIT_DCS_CMD(0x24, 0x00),
+ _INIT_DCS_CMD(0x25, 0x65),
+ _INIT_DCS_CMD(0x2F, 0x05),
+ _INIT_DCS_CMD(0x30, 0x65),
+ _INIT_DCS_CMD(0x31, 0x05),
+ _INIT_DCS_CMD(0x32, 0x7D),
+ _INIT_DCS_CMD(0x39, 0x00),
+ _INIT_DCS_CMD(0x3A, 0x65),
+ _INIT_DCS_CMD(0x20, 0x01),
+ _INIT_DCS_CMD(0x33, 0x11),
+ _INIT_DCS_CMD(0x34, 0x78),
+ _INIT_DCS_CMD(0x35, 0x16),
+ _INIT_DCS_CMD(0xC8, 0x04),
+ _INIT_DCS_CMD(0xC9, 0x80),
+ _INIT_DCS_CMD(0xCA, 0x4E),
+ _INIT_DCS_CMD(0xCB, 0x00),
+ _INIT_DCS_CMD(0xA9, 0x4C),
+ _INIT_DCS_CMD(0xAA, 0x47),
+
+ _INIT_DCS_CMD(0xFF, 0x27),
+ _INIT_DCS_CMD(0xFB, 0x01),
+
+ _INIT_DCS_CMD(0x56, 0x06),
+ _INIT_DCS_CMD(0x58, 0x80),
+ _INIT_DCS_CMD(0x59, 0x75),
+ _INIT_DCS_CMD(0x5A, 0x00),
+ _INIT_DCS_CMD(0x5B, 0x02),
+ _INIT_DCS_CMD(0x5C, 0x00),
+ _INIT_DCS_CMD(0x5D, 0x00),
+ _INIT_DCS_CMD(0x5E, 0x20),
+ _INIT_DCS_CMD(0x5F, 0x10),
+ _INIT_DCS_CMD(0x60, 0x00),
+ _INIT_DCS_CMD(0x61, 0x2E),
+ _INIT_DCS_CMD(0x62, 0x00),
+ _INIT_DCS_CMD(0x63, 0x01),
+ _INIT_DCS_CMD(0x64, 0x43),
+ _INIT_DCS_CMD(0x65, 0x2D),
+ _INIT_DCS_CMD(0x66, 0x00),
+ _INIT_DCS_CMD(0x67, 0x01),
+ _INIT_DCS_CMD(0x68, 0x44),
+
+ _INIT_DCS_CMD(0x00, 0x00),
+ _INIT_DCS_CMD(0x78, 0x00),
+ _INIT_DCS_CMD(0xC3, 0x00),
+
+ _INIT_DCS_CMD(0xFF, 0x2A),
+ _INIT_DCS_CMD(0xFB, 0x01),
+
+ _INIT_DCS_CMD(0x22, 0x2F),
+ _INIT_DCS_CMD(0x23, 0x08),
+
+ _INIT_DCS_CMD(0x24, 0x00),
+ _INIT_DCS_CMD(0x25, 0x65),
+ _INIT_DCS_CMD(0x26, 0xF8),
+ _INIT_DCS_CMD(0x27, 0x00),
+ _INIT_DCS_CMD(0x28, 0x1A),
+ _INIT_DCS_CMD(0x29, 0x00),
+ _INIT_DCS_CMD(0x2A, 0x1A),
+ _INIT_DCS_CMD(0x2B, 0x00),
+ _INIT_DCS_CMD(0x2D, 0x1A),
+
+ _INIT_DCS_CMD(0xFF, 0x23),
+ _INIT_DCS_CMD(0xFB, 0x01),
+
+ _INIT_DCS_CMD(0x00, 0x80),
+ _INIT_DCS_CMD(0x07, 0x00),
+
+ _INIT_DCS_CMD(0xFF, 0xE0),
+ _INIT_DCS_CMD(0xFB, 0x01),
+ _INIT_DCS_CMD(0x14, 0x60),
+ _INIT_DCS_CMD(0x16, 0xC0),
+
+ _INIT_DCS_CMD(0xFF, 0xF0),
+ _INIT_DCS_CMD(0xFB, 0x01),
+ _INIT_DCS_CMD(0x3A, 0x08),
+
+ _INIT_DCS_CMD(0xFF, 0x10),
+ _INIT_DCS_CMD(0xFB, 0x01),
+ _INIT_DCS_CMD(0xB9, 0x01),
+ _INIT_DCS_CMD(0xFF, 0x20),
+ _INIT_DCS_CMD(0xFB, 0x01),
+ _INIT_DCS_CMD(0x18, 0x40),
+
+ _INIT_DCS_CMD(0xFF, 0x10),
+ _INIT_DCS_CMD(0xFB, 0x01),
+ _INIT_DCS_CMD(0xB9, 0x02),
+ _INIT_DCS_CMD(0x35, 0x00),
+ _INIT_DCS_CMD(0x51, 0x00, 0xFF),
+ _INIT_DCS_CMD(0x53, 0x24),
+ _INIT_DCS_CMD(0x55, 0x00),
+ _INIT_DCS_CMD(0xBB, 0x13),
+ _INIT_DCS_CMD(0x3B, 0x03, 0x96, 0x1A, 0x04, 0x04),
+ _INIT_DELAY_CMD(100),
+ _INIT_DCS_CMD(0x11),
+ _INIT_DELAY_CMD(200),
+ _INIT_DCS_CMD(0x29),
+ _INIT_DELAY_CMD(100),
+ {},
+};
+
+static const struct panel_init_cmd inx_init_cmd[] = {
+ _INIT_DCS_CMD(0xFF, 0x20),
+ _INIT_DCS_CMD(0xFB, 0x01),
+ _INIT_DCS_CMD(0x05, 0xD1),
+ _INIT_DCS_CMD(0x0D, 0x63),
+ _INIT_DCS_CMD(0x07, 0x8C),
+ _INIT_DCS_CMD(0x08, 0x4B),
+ _INIT_DCS_CMD(0x0E, 0x91),
+ _INIT_DCS_CMD(0x0F, 0x69),
+ _INIT_DCS_CMD(0x95, 0xFF),
+ _INIT_DCS_CMD(0x96, 0xFF),
+ _INIT_DCS_CMD(0x9D, 0x0A),
+ _INIT_DCS_CMD(0x9E, 0x0A),
+ _INIT_DCS_CMD(0x69, 0x98),
+ _INIT_DCS_CMD(0x75, 0xA2),
+ _INIT_DCS_CMD(0x77, 0xB3),
+ _INIT_DCS_CMD(0xFF, 0x24),
+ _INIT_DCS_CMD(0xFB, 0x01),
+ _INIT_DCS_CMD(0x91, 0x44),
+ _INIT_DCS_CMD(0x92, 0x7A),
+ _INIT_DCS_CMD(0x93, 0x1A),
+ _INIT_DCS_CMD(0x94, 0x40),
+ _INIT_DCS_CMD(0x9A, 0x08),
+ _INIT_DCS_CMD(0x60, 0x96),
+ _INIT_DCS_CMD(0x61, 0xD0),
+ _INIT_DCS_CMD(0x63, 0x70),
+ _INIT_DCS_CMD(0xC2, 0xCF),
+ _INIT_DCS_CMD(0x9B, 0x0F),
+ _INIT_DCS_CMD(0x9A, 0x08),
+ _INIT_DCS_CMD(0x00, 0x03),
+ _INIT_DCS_CMD(0x01, 0x03),
+ _INIT_DCS_CMD(0x02, 0x03),
+ _INIT_DCS_CMD(0x03, 0x03),
+ _INIT_DCS_CMD(0x04, 0x03),
+ _INIT_DCS_CMD(0x05, 0x03),
+ _INIT_DCS_CMD(0x06, 0x22),
+ _INIT_DCS_CMD(0x07, 0x06),
+ _INIT_DCS_CMD(0x08, 0x00),
+ _INIT_DCS_CMD(0x09, 0x1D),
+ _INIT_DCS_CMD(0x0A, 0x1C),
+ _INIT_DCS_CMD(0x0B, 0x13),
+ _INIT_DCS_CMD(0x0C, 0x12),
+ _INIT_DCS_CMD(0x0D, 0x11),
+ _INIT_DCS_CMD(0x0E, 0x10),
+ _INIT_DCS_CMD(0x0F, 0x0F),
+ _INIT_DCS_CMD(0x10, 0x0E),
+ _INIT_DCS_CMD(0x11, 0x0D),
+ _INIT_DCS_CMD(0x12, 0x0C),
+ _INIT_DCS_CMD(0x13, 0x04),
+ _INIT_DCS_CMD(0x14, 0x03),
+ _INIT_DCS_CMD(0x15, 0x03),
+ _INIT_DCS_CMD(0x16, 0x03),
+ _INIT_DCS_CMD(0x17, 0x03),
+ _INIT_DCS_CMD(0x18, 0x03),
+ _INIT_DCS_CMD(0x19, 0x03),
+ _INIT_DCS_CMD(0x1A, 0x03),
+ _INIT_DCS_CMD(0x1B, 0x03),
+ _INIT_DCS_CMD(0x1C, 0x22),
+ _INIT_DCS_CMD(0x1D, 0x06),
+ _INIT_DCS_CMD(0x1E, 0x00),
+ _INIT_DCS_CMD(0x1F, 0x1D),
+ _INIT_DCS_CMD(0x20, 0x1C),
+ _INIT_DCS_CMD(0x21, 0x13),
+ _INIT_DCS_CMD(0x22, 0x12),
+ _INIT_DCS_CMD(0x23, 0x11),
+ _INIT_DCS_CMD(0x24, 0x10),
+ _INIT_DCS_CMD(0x25, 0x0F),
+ _INIT_DCS_CMD(0x26, 0x0E),
+ _INIT_DCS_CMD(0x27, 0x0D),
+ _INIT_DCS_CMD(0x28, 0x0C),
+ _INIT_DCS_CMD(0x29, 0x04),
+ _INIT_DCS_CMD(0x2A, 0x03),
+ _INIT_DCS_CMD(0x2B, 0x03),
+
+ _INIT_DCS_CMD(0x2F, 0x06),
+ _INIT_DCS_CMD(0x30, 0x32),
+ _INIT_DCS_CMD(0x31, 0x43),
+ _INIT_DCS_CMD(0x33, 0x06),
+ _INIT_DCS_CMD(0x34, 0x32),
+ _INIT_DCS_CMD(0x35, 0x43),
+ _INIT_DCS_CMD(0x37, 0x44),
+ _INIT_DCS_CMD(0x38, 0x40),
+ _INIT_DCS_CMD(0x39, 0x00),
+ _INIT_DCS_CMD(0x3A, 0x01),
+ _INIT_DCS_CMD(0x3B, 0x48),
+ _INIT_DCS_CMD(0x3D, 0x93),
+ _INIT_DCS_CMD(0xAB, 0x44),
+ _INIT_DCS_CMD(0xAC, 0x40),
+
+ _INIT_DCS_CMD(0x4D, 0x21),
+ _INIT_DCS_CMD(0x4E, 0x43),
+ _INIT_DCS_CMD(0x4F, 0x65),
+ _INIT_DCS_CMD(0x50, 0x87),
+ _INIT_DCS_CMD(0x51, 0x78),
+ _INIT_DCS_CMD(0x52, 0x56),
+ _INIT_DCS_CMD(0x53, 0x34),
+ _INIT_DCS_CMD(0x54, 0x21),
+ _INIT_DCS_CMD(0x55, 0x83),
+ _INIT_DCS_CMD(0x56, 0x08),
+ _INIT_DCS_CMD(0x58, 0x21),
+ _INIT_DCS_CMD(0x59, 0x40),
+ _INIT_DCS_CMD(0x5A, 0x09),
+ _INIT_DCS_CMD(0x5B, 0x48),
+ _INIT_DCS_CMD(0x5E, 0x00, 0x10),
+ _INIT_DCS_CMD(0x5F, 0x00),
+
+ _INIT_DCS_CMD(0x7A, 0x00),
+ _INIT_DCS_CMD(0x7B, 0x00),
+ _INIT_DCS_CMD(0x7C, 0x00),
+ _INIT_DCS_CMD(0x7D, 0x00),
+ _INIT_DCS_CMD(0x7E, 0x20),
+ _INIT_DCS_CMD(0x7F, 0x3C),
+ _INIT_DCS_CMD(0x80, 0x00),
+ _INIT_DCS_CMD(0x81, 0x00),
+ _INIT_DCS_CMD(0x82, 0x08),
+ _INIT_DCS_CMD(0x97, 0x02),
+ _INIT_DCS_CMD(0xC5, 0x10),
+ _INIT_DCS_CMD(0xDA, 0x05),
+ _INIT_DCS_CMD(0xDB, 0x01),
+ _INIT_DCS_CMD(0xDC, 0x7A),
+ _INIT_DCS_CMD(0xDD, 0x55),
+ _INIT_DCS_CMD(0xDE, 0x27),
+ _INIT_DCS_CMD(0xDF, 0x01),
+ _INIT_DCS_CMD(0xE0, 0x7A),
+ _INIT_DCS_CMD(0xE1, 0x01),
+ _INIT_DCS_CMD(0xE2, 0x7A),
+ _INIT_DCS_CMD(0xE3, 0x01),
+ _INIT_DCS_CMD(0xE4, 0x7A),
+ _INIT_DCS_CMD(0xE5, 0x01),
+ _INIT_DCS_CMD(0xE6, 0x7A),
+ _INIT_DCS_CMD(0xE7, 0x00),
+ _INIT_DCS_CMD(0xE8, 0x00),
+ _INIT_DCS_CMD(0xE9, 0x01),
+ _INIT_DCS_CMD(0xEA, 0x7A),
+ _INIT_DCS_CMD(0xEB, 0x01),
+ _INIT_DCS_CMD(0xEE, 0x7A),
+ _INIT_DCS_CMD(0xEF, 0x01),
+ _INIT_DCS_CMD(0xF0, 0x7A),
+
+ _INIT_DCS_CMD(0xFF, 0x25),
+ _INIT_DCS_CMD(0xFB, 0x01),
+
+ _INIT_DCS_CMD(0x05, 0x00),
+
+ _INIT_DCS_CMD(0xF1, 0x10),
+ _INIT_DCS_CMD(0x1E, 0x00),
+ _INIT_DCS_CMD(0x1F, 0x09),
+ _INIT_DCS_CMD(0x20, 0x46),
+ _INIT_DCS_CMD(0x25, 0x00),
+ _INIT_DCS_CMD(0x26, 0x09),
+ _INIT_DCS_CMD(0x27, 0x46),
+ _INIT_DCS_CMD(0x3F, 0x80),
+ _INIT_DCS_CMD(0x40, 0x00),
+ _INIT_DCS_CMD(0x43, 0x00),
+
+ _INIT_DCS_CMD(0x44, 0x09),
+ _INIT_DCS_CMD(0x45, 0x46),
+
+ _INIT_DCS_CMD(0x48, 0x09),
+ _INIT_DCS_CMD(0x49, 0x46),
+ _INIT_DCS_CMD(0x5B, 0x80),
+ _INIT_DCS_CMD(0x5C, 0x00),
+ _INIT_DCS_CMD(0x5D, 0x01),
+ _INIT_DCS_CMD(0x5E, 0x46),
+ _INIT_DCS_CMD(0x61, 0x01),
+ _INIT_DCS_CMD(0x62, 0x46),
+ _INIT_DCS_CMD(0x68, 0x10),
+ _INIT_DCS_CMD(0xFF, 0x26),
+ _INIT_DCS_CMD(0xFB, 0x01),
+
+ _INIT_DCS_CMD(0x00, 0xA1),
+ _INIT_DCS_CMD(0x02, 0x31),
+ _INIT_DCS_CMD(0x0A, 0xF2),
+ _INIT_DCS_CMD(0x04, 0x28),
+ _INIT_DCS_CMD(0x06, 0x30),
+ _INIT_DCS_CMD(0x0C, 0x16),
+ _INIT_DCS_CMD(0x0D, 0x0D),
+ _INIT_DCS_CMD(0x0F, 0x00),
+ _INIT_DCS_CMD(0x11, 0x00),
+ _INIT_DCS_CMD(0x12, 0x50),
+ _INIT_DCS_CMD(0x13, 0x56),
+ _INIT_DCS_CMD(0x14, 0x57),
+ _INIT_DCS_CMD(0x15, 0x00),
+ _INIT_DCS_CMD(0x16, 0x10),
+ _INIT_DCS_CMD(0x17, 0xA0),
+ _INIT_DCS_CMD(0x18, 0x86),
+ _INIT_DCS_CMD(0x22, 0x00),
+ _INIT_DCS_CMD(0x23, 0x00),
+ _INIT_DCS_CMD(0x19, 0x0D),
+ _INIT_DCS_CMD(0x1A, 0x7F),
+ _INIT_DCS_CMD(0x1B, 0x0C),
+ _INIT_DCS_CMD(0x1C, 0xBF),
+ _INIT_DCS_CMD(0x2A, 0x0D),
+ _INIT_DCS_CMD(0x2B, 0x7F),
+ _INIT_DCS_CMD(0x20, 0x00),
+
+ _INIT_DCS_CMD(0x1D, 0x00),
+ _INIT_DCS_CMD(0x1E, 0x78),
+ _INIT_DCS_CMD(0x1F, 0x78),
+
+ _INIT_DCS_CMD(0x2F, 0x03),
+ _INIT_DCS_CMD(0x30, 0x78),
+ _INIT_DCS_CMD(0x33, 0x78),
+ _INIT_DCS_CMD(0x34, 0x66),
+ _INIT_DCS_CMD(0x35, 0x11),
+
+ _INIT_DCS_CMD(0x39, 0x10),
+ _INIT_DCS_CMD(0x3A, 0x78),
+ _INIT_DCS_CMD(0x3B, 0x06),
+
+ _INIT_DCS_CMD(0xC8, 0x04),
+ _INIT_DCS_CMD(0xC9, 0x84),
+ _INIT_DCS_CMD(0xCA, 0x4E),
+ _INIT_DCS_CMD(0xCB, 0x00),
+
+ _INIT_DCS_CMD(0xA9, 0x50),
+ _INIT_DCS_CMD(0xAA, 0x4F),
+ _INIT_DCS_CMD(0xAB, 0x4D),
+ _INIT_DCS_CMD(0xAC, 0x4A),
+ _INIT_DCS_CMD(0xAD, 0x48),
+ _INIT_DCS_CMD(0xAE, 0x46),
+ _INIT_DCS_CMD(0xFF, 0x27),
+ _INIT_DCS_CMD(0xFB, 0x01),
+ _INIT_DCS_CMD(0xC0, 0x18),
+ _INIT_DCS_CMD(0xC1, 0x00),
+ _INIT_DCS_CMD(0xC2, 0x00),
+ _INIT_DCS_CMD(0x56, 0x06),
+ _INIT_DCS_CMD(0x58, 0x80),
+ _INIT_DCS_CMD(0x59, 0x75),
+ _INIT_DCS_CMD(0x5A, 0x00),
+ _INIT_DCS_CMD(0x5B, 0x02),
+ _INIT_DCS_CMD(0x5C, 0x00),
+ _INIT_DCS_CMD(0x5D, 0x00),
+ _INIT_DCS_CMD(0x5E, 0x20),
+ _INIT_DCS_CMD(0x5F, 0x10),
+ _INIT_DCS_CMD(0x60, 0x00),
+ _INIT_DCS_CMD(0x61, 0x2E),
+ _INIT_DCS_CMD(0x62, 0x00),
+ _INIT_DCS_CMD(0x63, 0x01),
+ _INIT_DCS_CMD(0x64, 0x43),
+ _INIT_DCS_CMD(0x65, 0x2D),
+ _INIT_DCS_CMD(0x66, 0x00),
+ _INIT_DCS_CMD(0x67, 0x01),
+ _INIT_DCS_CMD(0x68, 0x43),
+ _INIT_DCS_CMD(0x98, 0x01),
+ _INIT_DCS_CMD(0xB4, 0x03),
+ _INIT_DCS_CMD(0x9B, 0xBD),
+ _INIT_DCS_CMD(0xA0, 0x90),
+ _INIT_DCS_CMD(0xAB, 0x1B),
+ _INIT_DCS_CMD(0xBC, 0x0C),
+ _INIT_DCS_CMD(0xBD, 0x28),
+
+ _INIT_DCS_CMD(0xFF, 0x2A),
+ _INIT_DCS_CMD(0xFB, 0x01),
+
+ _INIT_DCS_CMD(0x22, 0x2F),
+ _INIT_DCS_CMD(0x23, 0x08),
+
+ _INIT_DCS_CMD(0x24, 0x00),
+ _INIT_DCS_CMD(0x25, 0x65),
+ _INIT_DCS_CMD(0x26, 0xF8),
+ _INIT_DCS_CMD(0x27, 0x00),
+ _INIT_DCS_CMD(0x28, 0x1A),
+ _INIT_DCS_CMD(0x29, 0x00),
+ _INIT_DCS_CMD(0x2A, 0x1A),
+ _INIT_DCS_CMD(0x2B, 0x00),
+ _INIT_DCS_CMD(0x2D, 0x1A),
+
+ _INIT_DCS_CMD(0x64, 0x96),
+ _INIT_DCS_CMD(0x65, 0x00),
+ _INIT_DCS_CMD(0x66, 0x00),
+ _INIT_DCS_CMD(0x6A, 0x96),
+ _INIT_DCS_CMD(0x6B, 0x00),
+ _INIT_DCS_CMD(0x6C, 0x00),
+ _INIT_DCS_CMD(0x70, 0x92),
+ _INIT_DCS_CMD(0x71, 0x00),
+ _INIT_DCS_CMD(0x72, 0x00),
+ _INIT_DCS_CMD(0xA2, 0x33),
+ _INIT_DCS_CMD(0xA3, 0x30),
+ _INIT_DCS_CMD(0xA4, 0xC0),
+ _INIT_DCS_CMD(0xE8, 0x00),
+ _INIT_DCS_CMD(0xFF, 0xF0),
+ _INIT_DCS_CMD(0xFB, 0x01),
+ _INIT_DCS_CMD(0x3A, 0x08),
+ _INIT_DCS_CMD(0xFF, 0xD0),
+ _INIT_DCS_CMD(0xFB, 0x01),
+ _INIT_DCS_CMD(0x00, 0x33),
+ _INIT_DCS_CMD(0x02, 0x77),
+ _INIT_DCS_CMD(0x08, 0x01),
+ _INIT_DCS_CMD(0x09, 0xBF),
+ _INIT_DCS_CMD(0x28, 0x30),
+ _INIT_DCS_CMD(0x2F, 0x33),
+ _INIT_DCS_CMD(0xFF, 0x23),
+ _INIT_DCS_CMD(0xFB, 0x01),
+ _INIT_DCS_CMD(0x00, 0x80),
+ _INIT_DCS_CMD(0x07, 0x00),
+ _INIT_DCS_CMD(0xFF, 0x20),
+ _INIT_DCS_CMD(0xFB, 0x01),
+ _INIT_DCS_CMD(0x30, 0x00),
+ _INIT_DCS_CMD(0xFF, 0x10),
+ _INIT_DCS_CMD(0xB9, 0x01),
+ _INIT_DCS_CMD(0xFF, 0x20),
+ _INIT_DCS_CMD(0x18, 0x40),
+ _INIT_DCS_CMD(0xFF, 0x10),
+ _INIT_DCS_CMD(0xB9, 0x02),
+ _INIT_DCS_CMD(0xFF, 0x10),
+ _INIT_DCS_CMD(0xFB, 0x01),
+ _INIT_DCS_CMD(0xBB, 0x13),
+ _INIT_DCS_CMD(0x3B, 0x03, 0x96, 0x1A, 0x04, 0x04),
+ _INIT_DCS_CMD(0x35, 0x00),
+ _INIT_DCS_CMD(0x51, 0x0F, 0xFF),
+ _INIT_DCS_CMD(0x53, 0x24),
+ _INIT_DELAY_CMD(100),
+ _INIT_DCS_CMD(0x11),
+ _INIT_DELAY_CMD(200),
+ _INIT_DCS_CMD(0x29),
+ _INIT_DELAY_CMD(100),
+ {},
+};
+
static const struct panel_init_cmd boe_init_cmd[] = {
_INIT_DELAY_CMD(24),
_INIT_DCS_CMD(0xB0, 0x05),
@@ -511,13 +1176,15 @@ static int boe_panel_unprepare(struct drm_panel *panel)
gpiod_set_value(boe->enable_gpio, 0);
usleep_range(5000, 7000);
regulator_disable(boe->pp1800);
+ regulator_disable(boe->pp3300);
} else {
gpiod_set_value(boe->enable_gpio, 0);
- usleep_range(500, 1000);
+ usleep_range(1000, 2000);
regulator_disable(boe->avee);
regulator_disable(boe->avdd);
usleep_range(5000, 7000);
regulator_disable(boe->pp1800);
+ regulator_disable(boe->pp3300);
}
boe->prepared = false;
@@ -536,6 +1203,10 @@ static int boe_panel_prepare(struct drm_panel *panel)
gpiod_set_value(boe->enable_gpio, 0);
usleep_range(1000, 1500);
+ ret = regulator_enable(boe->pp3300);
+ if (ret < 0)
+ return ret;
+
ret = regulator_enable(boe->pp1800);
if (ret < 0)
return ret;
@@ -549,7 +1220,7 @@ static int boe_panel_prepare(struct drm_panel *panel)
if (ret < 0)
goto poweroffavdd;
- usleep_range(5000, 10000);
+ usleep_range(10000, 11000);
gpiod_set_value(boe->enable_gpio, 1);
usleep_range(1000, 2000);
@@ -586,6 +1257,64 @@ static int boe_panel_enable(struct drm_panel *panel)
return 0;
}
+static const struct drm_display_mode boe_tv110c9m_default_mode = {
+ .clock = 166594,
+ .hdisplay = 1200,
+ .hsync_start = 1200 + 40,
+ .hsync_end = 1200 + 40 + 8,
+ .htotal = 1200 + 40 + 8 + 28,
+ .vdisplay = 2000,
+ .vsync_start = 2000 + 26,
+ .vsync_end = 2000 + 26 + 2,
+ .vtotal = 2000 + 26 + 2 + 148,
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+};
+
+static const struct panel_desc boe_tv110c9m_desc = {
+ .modes = &boe_tv110c9m_default_mode,
+ .bpc = 8,
+ .size = {
+ .width_mm = 143,
+ .height_mm = 238,
+ },
+ .lanes = 4,
+ .format = MIPI_DSI_FMT_RGB888,
+ .mode_flags = MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_VIDEO
+ | MIPI_DSI_MODE_VIDEO_HSE
+ | MIPI_DSI_CLOCK_NON_CONTINUOUS
+ | MIPI_DSI_MODE_VIDEO_BURST,
+ .init_cmds = boe_tv110c9m_init_cmd,
+};
+
+static const struct drm_display_mode inx_hj110iz_default_mode = {
+ .clock = 166594,
+ .hdisplay = 1200,
+ .hsync_start = 1200 + 40,
+ .hsync_end = 1200 + 40 + 8,
+ .htotal = 1200 + 40 + 8 + 28,
+ .vdisplay = 2000,
+ .vsync_start = 2000 + 26,
+ .vsync_end = 2000 + 26 + 1,
+ .vtotal = 2000 + 26 + 1 + 149,
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+};
+
+static const struct panel_desc inx_hj110iz_desc = {
+ .modes = &inx_hj110iz_default_mode,
+ .bpc = 8,
+ .size = {
+ .width_mm = 143,
+ .height_mm = 238,
+ },
+ .lanes = 4,
+ .format = MIPI_DSI_FMT_RGB888,
+ .mode_flags = MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_VIDEO
+ | MIPI_DSI_MODE_VIDEO_HSE
+ | MIPI_DSI_CLOCK_NON_CONTINUOUS
+ | MIPI_DSI_MODE_VIDEO_BURST,
+ .init_cmds = inx_init_cmd,
+};
+
static const struct drm_display_mode boe_tv101wum_nl6_default_mode = {
.clock = 159425,
.hdisplay = 1200,
@@ -767,6 +1496,10 @@ static int boe_panel_add(struct boe_panel *boe)
if (IS_ERR(boe->avee))
return PTR_ERR(boe->avee);
+ boe->pp3300 = devm_regulator_get(dev, "pp3300");
+ if (IS_ERR(boe->pp3300))
+ return PTR_ERR(boe->pp3300);
+
boe->pp1800 = devm_regulator_get(dev, "pp1800");
if (IS_ERR(boe->pp1800))
return PTR_ERR(boe->pp1800);
@@ -870,6 +1603,12 @@ static const struct of_device_id boe_of_match[] = {
{ .compatible = "boe,tv105wum-nw0",
.data = &boe_tv105wum_nw0_desc
},
+ { .compatible = "boe,tv110c9m-ll3",
+ .data = &boe_tv110c9m_desc
+ },
+ { .compatible = "innolux,hj110iz-01a",
+ .data = &inx_hj110iz_desc
+ },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, boe_of_match);
diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c
new file mode 100644
index 000000000000..fc03046de134
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-edp.c
@@ -0,0 +1,1896 @@
+/*
+ * Copyright (C) 2013, NVIDIA Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/display_timing.h>
+#include <video/of_display_timing.h>
+#include <video/videomode.h>
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_device.h>
+#include <drm/drm_dp_aux_bus.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_panel.h>
+
+/**
+ * struct panel_delay - Describes delays for a simple panel.
+ */
+struct panel_delay {
+ /**
+ * @hpd_reliable: Time for HPD to be reliable
+ *
+ * The time (in milliseconds) that it takes after powering the panel
+ * before the HPD signal is reliable. Ideally this is 0 but some panels,
+ * board designs, or bad pulldown configs can cause a glitch here.
+ *
+ * NOTE: on some old panel data this number appers to be much too big.
+ * Presumably some old panels simply didn't have HPD hooked up and put
+ * the hpd_absent here because this field predates the
+ * hpd_absent. While that works, it's non-ideal.
+ */
+ unsigned int hpd_reliable;
+
+ /**
+ * @hpd_absent: Time to wait if HPD isn't hooked up.
+ *
+ * Add this to the prepare delay if we know Hot Plug Detect isn't used.
+ *
+ * This is T3-max on eDP timing diagrams or the delay from power on
+ * until HPD is guaranteed to be asserted.
+ */
+ unsigned int hpd_absent;
+
+ /**
+ * @prepare_to_enable: Time between prepare and enable.
+ *
+ * The minimum time, in milliseconds, that needs to have passed
+ * between when prepare finished and enable may begin. If at
+ * enable time less time has passed since prepare finished,
+ * the driver waits for the remaining time.
+ *
+ * If a fixed enable delay is also specified, we'll start
+ * counting before delaying for the fixed delay.
+ *
+ * If a fixed prepare delay is also specified, we won't start
+ * counting until after the fixed delay. We can't overlap this
+ * fixed delay with the min time because the fixed delay
+ * doesn't happen at the end of the function if a HPD GPIO was
+ * specified.
+ *
+ * In other words:
+ * prepare()
+ * ...
+ * // do fixed prepare delay
+ * // wait for HPD GPIO if applicable
+ * // start counting for prepare_to_enable
+ *
+ * enable()
+ * // do fixed enable delay
+ * // enforce prepare_to_enable min time
+ *
+ * This is not specified in a standard way on eDP timing diagrams.
+ * It is effectively the time from HPD going high till you can
+ * turn on the backlight.
+ */
+ unsigned int prepare_to_enable;
+
+ /**
+ * @enable: Time for the panel to display a valid frame.
+ *
+ * The time (in milliseconds) that it takes for the panel to
+ * display the first valid frame after starting to receive
+ * video data.
+ *
+ * This is (T6-min + max(T7-max, T8-min)) on eDP timing diagrams or
+ * the delay after link training finishes until we can turn the
+ * backlight on and see valid data.
+ */
+ unsigned int enable;
+
+ /**
+ * @disable: Time for the panel to turn the display off.
+ *
+ * The time (in milliseconds) that it takes for the panel to
+ * turn the display off (no content is visible).
+ *
+ * This is T9-min (delay from backlight off to end of valid video
+ * data) on eDP timing diagrams. It is not common to set.
+ */
+ unsigned int disable;
+
+ /**
+ * @unprepare: Time to power down completely.
+ *
+ * The time (in milliseconds) that it takes for the panel
+ * to power itself down completely.
+ *
+ * This time is used to prevent a future "prepare" from
+ * starting until at least this many milliseconds has passed.
+ * If at prepare time less time has passed since unprepare
+ * finished, the driver waits for the remaining time.
+ *
+ * This is T12-min on eDP timing diagrams.
+ */
+ unsigned int unprepare;
+};
+
+/**
+ * struct panel_desc - Describes a simple panel.
+ */
+struct panel_desc {
+ /**
+ * @modes: Pointer to array of fixed modes appropriate for this panel.
+ *
+ * If only one mode then this can just be the address of the mode.
+ * NOTE: cannot be used with "timings" and also if this is specified
+ * then you cannot override the mode in the device tree.
+ */
+ const struct drm_display_mode *modes;
+
+ /** @num_modes: Number of elements in modes array. */
+ unsigned int num_modes;
+
+ /**
+ * @timings: Pointer to array of display timings
+ *
+ * NOTE: cannot be used with "modes" and also these will be used to
+ * validate a device tree override if one is present.
+ */
+ const struct display_timing *timings;
+
+ /** @num_timings: Number of elements in timings array. */
+ unsigned int num_timings;
+
+ /** @bpc: Bits per color. */
+ unsigned int bpc;
+
+ /** @size: Structure containing the physical size of this panel. */
+ struct {
+ /**
+ * @size.width: Width (in mm) of the active display area.
+ */
+ unsigned int width;
+
+ /**
+ * @size.height: Height (in mm) of the active display area.
+ */
+ unsigned int height;
+ } size;
+
+ /** @delay: Structure containing various delay values for this panel. */
+ struct panel_delay delay;
+};
+
+/**
+ * struct edp_panel_entry - Maps panel ID to delay / panel name.
+ */
+struct edp_panel_entry {
+ /** @panel_id: 32-bit ID for panel, encoded with drm_edid_encode_panel_id(). */
+ u32 panel_id;
+
+ /* @delay: The power sequencing delays needed for this panel. */
+ const struct panel_delay *delay;
+
+ /* @name: Name of this panel (for printing to logs). */
+ const char *name;
+};
+
+struct panel_edp {
+ struct drm_panel base;
+ bool enabled;
+ bool no_hpd;
+
+ bool prepared;
+
+ ktime_t prepared_time;
+ ktime_t unprepared_time;
+
+ const struct panel_desc *desc;
+
+ struct regulator *supply;
+ struct i2c_adapter *ddc;
+ struct drm_dp_aux *aux;
+
+ struct gpio_desc *enable_gpio;
+ struct gpio_desc *hpd_gpio;
+
+ struct edid *edid;
+
+ struct drm_display_mode override_mode;
+
+ enum drm_panel_orientation orientation;
+};
+
+static inline struct panel_edp *to_panel_edp(struct drm_panel *panel)
+{
+ return container_of(panel, struct panel_edp, base);
+}
+
+static unsigned int panel_edp_get_timings_modes(struct panel_edp *panel,
+ struct drm_connector *connector)
+{
+ struct drm_display_mode *mode;
+ unsigned int i, num = 0;
+
+ for (i = 0; i < panel->desc->num_timings; i++) {
+ const struct display_timing *dt = &panel->desc->timings[i];
+ struct videomode vm;
+
+ videomode_from_timing(dt, &vm);
+ mode = drm_mode_create(connector->dev);
+ if (!mode) {
+ dev_err(panel->base.dev, "failed to add mode %ux%u\n",
+ dt->hactive.typ, dt->vactive.typ);
+ continue;
+ }
+
+ drm_display_mode_from_videomode(&vm, mode);
+
+ mode->type |= DRM_MODE_TYPE_DRIVER;
+
+ if (panel->desc->num_timings == 1)
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+ drm_mode_probed_add(connector, mode);
+ num++;
+ }
+
+ return num;
+}
+
+static unsigned int panel_edp_get_display_modes(struct panel_edp *panel,
+ struct drm_connector *connector)
+{
+ struct drm_display_mode *mode;
+ unsigned int i, num = 0;
+
+ for (i = 0; i < panel->desc->num_modes; i++) {
+ const struct drm_display_mode *m = &panel->desc->modes[i];
+
+ mode = drm_mode_duplicate(connector->dev, m);
+ if (!mode) {
+ dev_err(panel->base.dev, "failed to add mode %ux%u@%u\n",
+ m->hdisplay, m->vdisplay,
+ drm_mode_vrefresh(m));
+ continue;
+ }
+
+ mode->type |= DRM_MODE_TYPE_DRIVER;
+
+ if (panel->desc->num_modes == 1)
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+ drm_mode_set_name(mode);
+
+ drm_mode_probed_add(connector, mode);
+ num++;
+ }
+
+ return num;
+}
+
+static int panel_edp_get_non_edid_modes(struct panel_edp *panel,
+ struct drm_connector *connector)
+{
+ struct drm_display_mode *mode;
+ bool has_override = panel->override_mode.type;
+ unsigned int num = 0;
+
+ if (!panel->desc)
+ return 0;
+
+ if (has_override) {
+ mode = drm_mode_duplicate(connector->dev,
+ &panel->override_mode);
+ if (mode) {
+ drm_mode_probed_add(connector, mode);
+ num = 1;
+ } else {
+ dev_err(panel->base.dev, "failed to add override mode\n");
+ }
+ }
+
+ /* Only add timings if override was not there or failed to validate */
+ if (num == 0 && panel->desc->num_timings)
+ num = panel_edp_get_timings_modes(panel, connector);
+
+ /*
+ * Only add fixed modes if timings/override added no mode.
+ *
+ * We should only ever have either the display timings specified
+ * or a fixed mode. Anything else is rather bogus.
+ */
+ WARN_ON(panel->desc->num_timings && panel->desc->num_modes);
+ if (num == 0)
+ num = panel_edp_get_display_modes(panel, connector);
+
+ connector->display_info.bpc = panel->desc->bpc;
+ connector->display_info.width_mm = panel->desc->size.width;
+ connector->display_info.height_mm = panel->desc->size.height;
+
+ return num;
+}
+
+static void panel_edp_wait(ktime_t start_ktime, unsigned int min_ms)
+{
+ ktime_t now_ktime, min_ktime;
+
+ if (!min_ms)
+ return;
+
+ min_ktime = ktime_add(start_ktime, ms_to_ktime(min_ms));
+ now_ktime = ktime_get();
+
+ if (ktime_before(now_ktime, min_ktime))
+ msleep(ktime_to_ms(ktime_sub(min_ktime, now_ktime)) + 1);
+}
+
+static int panel_edp_disable(struct drm_panel *panel)
+{
+ struct panel_edp *p = to_panel_edp(panel);
+
+ if (!p->enabled)
+ return 0;
+
+ if (p->desc->delay.disable)
+ msleep(p->desc->delay.disable);
+
+ p->enabled = false;
+
+ return 0;
+}
+
+static int panel_edp_suspend(struct device *dev)
+{
+ struct panel_edp *p = dev_get_drvdata(dev);
+
+ gpiod_set_value_cansleep(p->enable_gpio, 0);
+ regulator_disable(p->supply);
+ p->unprepared_time = ktime_get();
+
+ return 0;
+}
+
+static int panel_edp_unprepare(struct drm_panel *panel)
+{
+ struct panel_edp *p = to_panel_edp(panel);
+ int ret;
+
+ /* Unpreparing when already unprepared is a no-op */
+ if (!p->prepared)
+ return 0;
+
+ pm_runtime_mark_last_busy(panel->dev);
+ ret = pm_runtime_put_autosuspend(panel->dev);
+ if (ret < 0)
+ return ret;
+ p->prepared = false;
+
+ return 0;
+}
+
+static int panel_edp_get_hpd_gpio(struct device *dev, struct panel_edp *p)
+{
+ int err;
+
+ p->hpd_gpio = devm_gpiod_get_optional(dev, "hpd", GPIOD_IN);
+ if (IS_ERR(p->hpd_gpio)) {
+ err = PTR_ERR(p->hpd_gpio);
+
+ if (err != -EPROBE_DEFER)
+ dev_err(dev, "failed to get 'hpd' GPIO: %d\n", err);
+
+ return err;
+ }
+
+ return 0;
+}
+
+static int panel_edp_prepare_once(struct panel_edp *p)
+{
+ struct device *dev = p->base.dev;
+ unsigned int delay;
+ int err;
+ int hpd_asserted;
+ unsigned long hpd_wait_us;
+
+ panel_edp_wait(p->unprepared_time, p->desc->delay.unprepare);
+
+ err = regulator_enable(p->supply);
+ if (err < 0) {
+ dev_err(dev, "failed to enable supply: %d\n", err);
+ return err;
+ }
+
+ gpiod_set_value_cansleep(p->enable_gpio, 1);
+
+ delay = p->desc->delay.hpd_reliable;
+ if (p->no_hpd)
+ delay = max(delay, p->desc->delay.hpd_absent);
+ if (delay)
+ msleep(delay);
+
+ if (p->hpd_gpio) {
+ if (p->desc->delay.hpd_absent)
+ hpd_wait_us = p->desc->delay.hpd_absent * 1000UL;
+ else
+ hpd_wait_us = 2000000;
+
+ err = readx_poll_timeout(gpiod_get_value_cansleep, p->hpd_gpio,
+ hpd_asserted, hpd_asserted,
+ 1000, hpd_wait_us);
+ if (hpd_asserted < 0)
+ err = hpd_asserted;
+
+ if (err) {
+ if (err != -ETIMEDOUT)
+ dev_err(dev,
+ "error waiting for hpd GPIO: %d\n", err);
+ goto error;
+ }
+ }
+
+ p->prepared_time = ktime_get();
+
+ return 0;
+
+error:
+ gpiod_set_value_cansleep(p->enable_gpio, 0);
+ regulator_disable(p->supply);
+ p->unprepared_time = ktime_get();
+
+ return err;
+}
+
+/*
+ * Some panels simply don't always come up and need to be power cycled to
+ * work properly. We'll allow for a handful of retries.
+ */
+#define MAX_PANEL_PREPARE_TRIES 5
+
+static int panel_edp_resume(struct device *dev)
+{
+ struct panel_edp *p = dev_get_drvdata(dev);
+ int ret;
+ int try;
+
+ for (try = 0; try < MAX_PANEL_PREPARE_TRIES; try++) {
+ ret = panel_edp_prepare_once(p);
+ if (ret != -ETIMEDOUT)
+ break;
+ }
+
+ if (ret == -ETIMEDOUT)
+ dev_err(dev, "Prepare timeout after %d tries\n", try);
+ else if (try)
+ dev_warn(dev, "Prepare needed %d retries\n", try);
+
+ return ret;
+}
+
+static int panel_edp_prepare(struct drm_panel *panel)
+{
+ struct panel_edp *p = to_panel_edp(panel);
+ int ret;
+
+ /* Preparing when already prepared is a no-op */
+ if (p->prepared)
+ return 0;
+
+ ret = pm_runtime_get_sync(panel->dev);
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(panel->dev);
+ return ret;
+ }
+
+ p->prepared = true;
+
+ return 0;
+}
+
+static int panel_edp_enable(struct drm_panel *panel)
+{
+ struct panel_edp *p = to_panel_edp(panel);
+ unsigned int delay;
+
+ if (p->enabled)
+ return 0;
+
+ delay = p->desc->delay.enable;
+
+ /*
+ * If there is a "prepare_to_enable" delay then that's supposed to be
+ * the delay from HPD going high until we can turn the backlight on.
+ * However, we can only count this if HPD is handled by the panel
+ * driver, not if it goes to a dedicated pin on the controller.
+ * If we aren't handling the HPD pin ourselves then the best we
+ * can do is assume that HPD went high immediately before we were
+ * called (and link training took zero time).
+ *
+ * NOTE: if we ever end up in this "if" statement then we're
+ * guaranteed that the panel_edp_wait() call below will do no delay.
+ * It already handles that case, though, so we don't need any special
+ * code for it.
+ */
+ if (p->desc->delay.prepare_to_enable && !p->hpd_gpio && !p->no_hpd)
+ delay = max(delay, p->desc->delay.prepare_to_enable);
+
+ if (delay)
+ msleep(delay);
+
+ panel_edp_wait(p->prepared_time, p->desc->delay.prepare_to_enable);
+
+ p->enabled = true;
+
+ return 0;
+}
+
+static int panel_edp_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct panel_edp *p = to_panel_edp(panel);
+ int num = 0;
+
+ /* probe EDID if a DDC bus is available */
+ if (p->ddc) {
+ pm_runtime_get_sync(panel->dev);
+
+ if (!p->edid)
+ p->edid = drm_get_edid(connector, p->ddc);
+
+ if (p->edid)
+ num += drm_add_edid_modes(connector, p->edid);
+
+ pm_runtime_mark_last_busy(panel->dev);
+ pm_runtime_put_autosuspend(panel->dev);
+ }
+
+ /*
+ * Add hard-coded panel modes. Don't call this if there are no timings
+ * and no modes (the generic edp-panel case) because it will clobber
+ * the display_info that was already set by drm_add_edid_modes().
+ */
+ if (p->desc->num_timings || p->desc->num_modes)
+ num += panel_edp_get_non_edid_modes(p, connector);
+ else if (!num)
+ dev_warn(p->base.dev, "No display modes\n");
+
+ /* set up connector's "panel orientation" property */
+ drm_connector_set_panel_orientation(connector, p->orientation);
+
+ return num;
+}
+
+static int panel_edp_get_timings(struct drm_panel *panel,
+ unsigned int num_timings,
+ struct display_timing *timings)
+{
+ struct panel_edp *p = to_panel_edp(panel);
+ unsigned int i;
+
+ if (p->desc->num_timings < num_timings)
+ num_timings = p->desc->num_timings;
+
+ if (timings)
+ for (i = 0; i < num_timings; i++)
+ timings[i] = p->desc->timings[i];
+
+ return p->desc->num_timings;
+}
+
+static const struct drm_panel_funcs panel_edp_funcs = {
+ .disable = panel_edp_disable,
+ .unprepare = panel_edp_unprepare,
+ .prepare = panel_edp_prepare,
+ .enable = panel_edp_enable,
+ .get_modes = panel_edp_get_modes,
+ .get_timings = panel_edp_get_timings,
+};
+
+#define PANEL_EDP_BOUNDS_CHECK(to_check, bounds, field) \
+ (to_check->field.typ >= bounds->field.min && \
+ to_check->field.typ <= bounds->field.max)
+static void panel_edp_parse_panel_timing_node(struct device *dev,
+ struct panel_edp *panel,
+ const struct display_timing *ot)
+{
+ const struct panel_desc *desc = panel->desc;
+ struct videomode vm;
+ unsigned int i;
+
+ if (WARN_ON(desc->num_modes)) {
+ dev_err(dev, "Reject override mode: panel has a fixed mode\n");
+ return;
+ }
+ if (WARN_ON(!desc->num_timings)) {
+ dev_err(dev, "Reject override mode: no timings specified\n");
+ return;
+ }
+
+ for (i = 0; i < panel->desc->num_timings; i++) {
+ const struct display_timing *dt = &panel->desc->timings[i];
+
+ if (!PANEL_EDP_BOUNDS_CHECK(ot, dt, hactive) ||
+ !PANEL_EDP_BOUNDS_CHECK(ot, dt, hfront_porch) ||
+ !PANEL_EDP_BOUNDS_CHECK(ot, dt, hback_porch) ||
+ !PANEL_EDP_BOUNDS_CHECK(ot, dt, hsync_len) ||
+ !PANEL_EDP_BOUNDS_CHECK(ot, dt, vactive) ||
+ !PANEL_EDP_BOUNDS_CHECK(ot, dt, vfront_porch) ||
+ !PANEL_EDP_BOUNDS_CHECK(ot, dt, vback_porch) ||
+ !PANEL_EDP_BOUNDS_CHECK(ot, dt, vsync_len))
+ continue;
+
+ if (ot->flags != dt->flags)
+ continue;
+
+ videomode_from_timing(ot, &vm);
+ drm_display_mode_from_videomode(&vm, &panel->override_mode);
+ panel->override_mode.type |= DRM_MODE_TYPE_DRIVER |
+ DRM_MODE_TYPE_PREFERRED;
+ break;
+ }
+
+ if (WARN_ON(!panel->override_mode.type))
+ dev_err(dev, "Reject override mode: No display_timing found\n");
+}
+
+static const struct edp_panel_entry *find_edp_panel(u32 panel_id);
+
+static int generic_edp_panel_probe(struct device *dev, struct panel_edp *panel)
+{
+ const struct edp_panel_entry *edp_panel;
+ struct panel_desc *desc;
+ u32 panel_id;
+ char vend[4];
+ u16 product_id;
+ u32 reliable_ms = 0;
+ u32 absent_ms = 0;
+ int ret;
+
+ desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+ panel->desc = desc;
+
+ /*
+ * Read the dts properties for the initial probe. These are used by
+ * the runtime resume code which will get called by the
+ * pm_runtime_get_sync() call below.
+ */
+ of_property_read_u32(dev->of_node, "hpd-reliable-delay-ms", &reliable_ms);
+ desc->delay.hpd_reliable = reliable_ms;
+ of_property_read_u32(dev->of_node, "hpd-absent-delay-ms", &absent_ms);
+ desc->delay.hpd_reliable = absent_ms;
+
+ /* Power the panel on so we can read the EDID */
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "Couldn't power on panel to read EDID: %d\n", ret);
+ goto exit;
+ }
+
+ panel_id = drm_edid_get_panel_id(panel->ddc);
+ if (!panel_id) {
+ dev_err(dev, "Couldn't identify panel via EDID\n");
+ ret = -EIO;
+ goto exit;
+ }
+ drm_edid_decode_panel_id(panel_id, vend, &product_id);
+
+ edp_panel = find_edp_panel(panel_id);
+
+ /*
+ * We're using non-optimized timings and want it really obvious that
+ * someone needs to add an entry to the table, so we'll do a WARN_ON
+ * splat.
+ */
+ if (WARN_ON(!edp_panel)) {
+ dev_warn(dev,
+ "Unknown panel %s %#06x, using conservative timings\n",
+ vend, product_id);
+
+ /*
+ * It's highly likely that the panel will work if we use very
+ * conservative timings, so let's do that. We already know that
+ * the HPD-related delays must have worked since we got this
+ * far, so we really just need the "unprepare" / "enable"
+ * delays. We don't need "prepare_to_enable" since that
+ * overlaps the "enable" delay anyway.
+ *
+ * Nearly all panels have a "unprepare" delay of 500 ms though
+ * there are a few with 1000. Let's stick 2000 in just to be
+ * super conservative.
+ *
+ * An "enable" delay of 80 ms seems the most common, but we'll
+ * throw in 200 ms to be safe.
+ */
+ desc->delay.unprepare = 2000;
+ desc->delay.enable = 200;
+ } else {
+ dev_info(dev, "Detected %s %s (%#06x)\n",
+ vend, edp_panel->name, product_id);
+
+ /* Update the delay; everything else comes from EDID */
+ desc->delay = *edp_panel->delay;
+ }
+
+ ret = 0;
+exit:
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
+ return ret;
+}
+
+static int panel_edp_probe(struct device *dev, const struct panel_desc *desc,
+ struct drm_dp_aux *aux)
+{
+ struct panel_edp *panel;
+ struct display_timing dt;
+ struct device_node *ddc;
+ int err;
+
+ panel = devm_kzalloc(dev, sizeof(*panel), GFP_KERNEL);
+ if (!panel)
+ return -ENOMEM;
+
+ panel->enabled = false;
+ panel->prepared_time = 0;
+ panel->desc = desc;
+ panel->aux = aux;
+
+ panel->no_hpd = of_property_read_bool(dev->of_node, "no-hpd");
+ if (!panel->no_hpd) {
+ err = panel_edp_get_hpd_gpio(dev, panel);
+ if (err)
+ return err;
+ }
+
+ panel->supply = devm_regulator_get(dev, "power");
+ if (IS_ERR(panel->supply))
+ return PTR_ERR(panel->supply);
+
+ panel->enable_gpio = devm_gpiod_get_optional(dev, "enable",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(panel->enable_gpio)) {
+ err = PTR_ERR(panel->enable_gpio);
+ if (err != -EPROBE_DEFER)
+ dev_err(dev, "failed to request GPIO: %d\n", err);
+ return err;
+ }
+
+ err = of_drm_get_panel_orientation(dev->of_node, &panel->orientation);
+ if (err) {
+ dev_err(dev, "%pOF: failed to get orientation %d\n", dev->of_node, err);
+ return err;
+ }
+
+ ddc = of_parse_phandle(dev->of_node, "ddc-i2c-bus", 0);
+ if (ddc) {
+ panel->ddc = of_find_i2c_adapter_by_node(ddc);
+ of_node_put(ddc);
+
+ if (!panel->ddc)
+ return -EPROBE_DEFER;
+ } else if (aux) {
+ panel->ddc = &aux->ddc;
+ }
+
+ if (!of_get_display_timing(dev->of_node, "panel-timing", &dt))
+ panel_edp_parse_panel_timing_node(dev, panel, &dt);
+
+ dev_set_drvdata(dev, panel);
+
+ drm_panel_init(&panel->base, dev, &panel_edp_funcs, DRM_MODE_CONNECTOR_eDP);
+
+ err = drm_panel_of_backlight(&panel->base);
+ if (err)
+ goto err_finished_ddc_init;
+
+ /*
+ * We use runtime PM for prepare / unprepare since those power the panel
+ * on and off and those can be very slow operations. This is important
+ * to optimize powering the panel on briefly to read the EDID before
+ * fully enabling the panel.
+ */
+ pm_runtime_enable(dev);
+ pm_runtime_set_autosuspend_delay(dev, 1000);
+ pm_runtime_use_autosuspend(dev);
+
+ if (of_device_is_compatible(dev->of_node, "edp-panel")) {
+ err = generic_edp_panel_probe(dev, panel);
+ if (err) {
+ dev_err_probe(dev, err,
+ "Couldn't detect panel nor find a fallback\n");
+ goto err_finished_pm_runtime;
+ }
+ /* generic_edp_panel_probe() replaces desc in the panel */
+ desc = panel->desc;
+ } else if (desc->bpc != 6 && desc->bpc != 8 && desc->bpc != 10) {
+ dev_warn(dev, "Expected bpc in {6,8,10} but got: %u\n", desc->bpc);
+ }
+
+ if (!panel->base.backlight && panel->aux) {
+ pm_runtime_get_sync(dev);
+ err = drm_panel_dp_aux_backlight(&panel->base, panel->aux);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+ if (err)
+ goto err_finished_pm_runtime;
+ }
+
+ drm_panel_add(&panel->base);
+
+ return 0;
+
+err_finished_pm_runtime:
+ pm_runtime_dont_use_autosuspend(dev);
+ pm_runtime_disable(dev);
+err_finished_ddc_init:
+ if (panel->ddc && (!panel->aux || panel->ddc != &panel->aux->ddc))
+ put_device(&panel->ddc->dev);
+
+ return err;
+}
+
+static int panel_edp_remove(struct device *dev)
+{
+ struct panel_edp *panel = dev_get_drvdata(dev);
+
+ drm_panel_remove(&panel->base);
+ drm_panel_disable(&panel->base);
+ drm_panel_unprepare(&panel->base);
+
+ pm_runtime_dont_use_autosuspend(dev);
+ pm_runtime_disable(dev);
+ if (panel->ddc && (!panel->aux || panel->ddc != &panel->aux->ddc))
+ put_device(&panel->ddc->dev);
+
+ kfree(panel->edid);
+ panel->edid = NULL;
+
+ return 0;
+}
+
+static void panel_edp_shutdown(struct device *dev)
+{
+ struct panel_edp *panel = dev_get_drvdata(dev);
+
+ drm_panel_disable(&panel->base);
+ drm_panel_unprepare(&panel->base);
+}
+
+static const struct display_timing auo_b101ean01_timing = {
+ .pixelclock = { 65300000, 72500000, 75000000 },
+ .hactive = { 1280, 1280, 1280 },
+ .hfront_porch = { 18, 119, 119 },
+ .hback_porch = { 21, 21, 21 },
+ .hsync_len = { 32, 32, 32 },
+ .vactive = { 800, 800, 800 },
+ .vfront_porch = { 4, 4, 4 },
+ .vback_porch = { 8, 8, 8 },
+ .vsync_len = { 18, 20, 20 },
+};
+
+static const struct panel_desc auo_b101ean01 = {
+ .timings = &auo_b101ean01_timing,
+ .num_timings = 1,
+ .bpc = 6,
+ .size = {
+ .width = 217,
+ .height = 136,
+ },
+};
+
+static const struct drm_display_mode auo_b116xak01_mode = {
+ .clock = 69300,
+ .hdisplay = 1366,
+ .hsync_start = 1366 + 48,
+ .hsync_end = 1366 + 48 + 32,
+ .htotal = 1366 + 48 + 32 + 10,
+ .vdisplay = 768,
+ .vsync_start = 768 + 4,
+ .vsync_end = 768 + 4 + 6,
+ .vtotal = 768 + 4 + 6 + 15,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+};
+
+static const struct panel_desc auo_b116xak01 = {
+ .modes = &auo_b116xak01_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 256,
+ .height = 144,
+ },
+ .delay = {
+ .hpd_absent = 200,
+ },
+};
+
+static const struct drm_display_mode auo_b116xw03_mode = {
+ .clock = 70589,
+ .hdisplay = 1366,
+ .hsync_start = 1366 + 40,
+ .hsync_end = 1366 + 40 + 40,
+ .htotal = 1366 + 40 + 40 + 32,
+ .vdisplay = 768,
+ .vsync_start = 768 + 10,
+ .vsync_end = 768 + 10 + 12,
+ .vtotal = 768 + 10 + 12 + 6,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+};
+
+static const struct panel_desc auo_b116xw03 = {
+ .modes = &auo_b116xw03_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 256,
+ .height = 144,
+ },
+ .delay = {
+ .enable = 400,
+ },
+};
+
+static const struct drm_display_mode auo_b133han05_mode = {
+ .clock = 142600,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 58,
+ .hsync_end = 1920 + 58 + 42,
+ .htotal = 1920 + 58 + 42 + 60,
+ .vdisplay = 1080,
+ .vsync_start = 1080 + 3,
+ .vsync_end = 1080 + 3 + 5,
+ .vtotal = 1080 + 3 + 5 + 54,
+};
+
+static const struct panel_desc auo_b133han05 = {
+ .modes = &auo_b133han05_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 293,
+ .height = 165,
+ },
+ .delay = {
+ .hpd_reliable = 100,
+ .enable = 20,
+ .unprepare = 50,
+ },
+};
+
+static const struct drm_display_mode auo_b133htn01_mode = {
+ .clock = 150660,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 172,
+ .hsync_end = 1920 + 172 + 80,
+ .htotal = 1920 + 172 + 80 + 60,
+ .vdisplay = 1080,
+ .vsync_start = 1080 + 25,
+ .vsync_end = 1080 + 25 + 10,
+ .vtotal = 1080 + 25 + 10 + 10,
+};
+
+static const struct panel_desc auo_b133htn01 = {
+ .modes = &auo_b133htn01_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 293,
+ .height = 165,
+ },
+ .delay = {
+ .hpd_reliable = 105,
+ .enable = 20,
+ .unprepare = 50,
+ },
+};
+
+static const struct drm_display_mode auo_b133xtn01_mode = {
+ .clock = 69500,
+ .hdisplay = 1366,
+ .hsync_start = 1366 + 48,
+ .hsync_end = 1366 + 48 + 32,
+ .htotal = 1366 + 48 + 32 + 20,
+ .vdisplay = 768,
+ .vsync_start = 768 + 3,
+ .vsync_end = 768 + 3 + 6,
+ .vtotal = 768 + 3 + 6 + 13,
+};
+
+static const struct panel_desc auo_b133xtn01 = {
+ .modes = &auo_b133xtn01_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 293,
+ .height = 165,
+ },
+};
+
+static const struct drm_display_mode auo_b140han06_mode = {
+ .clock = 141000,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 16,
+ .hsync_end = 1920 + 16 + 16,
+ .htotal = 1920 + 16 + 16 + 152,
+ .vdisplay = 1080,
+ .vsync_start = 1080 + 3,
+ .vsync_end = 1080 + 3 + 14,
+ .vtotal = 1080 + 3 + 14 + 19,
+};
+
+static const struct panel_desc auo_b140han06 = {
+ .modes = &auo_b140han06_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 309,
+ .height = 174,
+ },
+ .delay = {
+ .hpd_reliable = 100,
+ .enable = 20,
+ .unprepare = 50,
+ },
+};
+
+static const struct drm_display_mode boe_nv101wxmn51_modes[] = {
+ {
+ .clock = 71900,
+ .hdisplay = 1280,
+ .hsync_start = 1280 + 48,
+ .hsync_end = 1280 + 48 + 32,
+ .htotal = 1280 + 48 + 32 + 80,
+ .vdisplay = 800,
+ .vsync_start = 800 + 3,
+ .vsync_end = 800 + 3 + 5,
+ .vtotal = 800 + 3 + 5 + 24,
+ },
+ {
+ .clock = 57500,
+ .hdisplay = 1280,
+ .hsync_start = 1280 + 48,
+ .hsync_end = 1280 + 48 + 32,
+ .htotal = 1280 + 48 + 32 + 80,
+ .vdisplay = 800,
+ .vsync_start = 800 + 3,
+ .vsync_end = 800 + 3 + 5,
+ .vtotal = 800 + 3 + 5 + 24,
+ },
+};
+
+static const struct panel_desc boe_nv101wxmn51 = {
+ .modes = boe_nv101wxmn51_modes,
+ .num_modes = ARRAY_SIZE(boe_nv101wxmn51_modes),
+ .bpc = 8,
+ .size = {
+ .width = 217,
+ .height = 136,
+ },
+ .delay = {
+ /* TODO: should be hpd-absent and no-hpd should be set? */
+ .hpd_reliable = 210,
+ .enable = 50,
+ .unprepare = 160,
+ },
+};
+
+static const struct drm_display_mode boe_nv110wtm_n61_modes[] = {
+ {
+ .clock = 207800,
+ .hdisplay = 2160,
+ .hsync_start = 2160 + 48,
+ .hsync_end = 2160 + 48 + 32,
+ .htotal = 2160 + 48 + 32 + 100,
+ .vdisplay = 1440,
+ .vsync_start = 1440 + 3,
+ .vsync_end = 1440 + 3 + 6,
+ .vtotal = 1440 + 3 + 6 + 31,
+ .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC,
+ },
+ {
+ .clock = 138500,
+ .hdisplay = 2160,
+ .hsync_start = 2160 + 48,
+ .hsync_end = 2160 + 48 + 32,
+ .htotal = 2160 + 48 + 32 + 100,
+ .vdisplay = 1440,
+ .vsync_start = 1440 + 3,
+ .vsync_end = 1440 + 3 + 6,
+ .vtotal = 1440 + 3 + 6 + 31,
+ .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC,
+ },
+};
+
+static const struct panel_desc boe_nv110wtm_n61 = {
+ .modes = boe_nv110wtm_n61_modes,
+ .num_modes = ARRAY_SIZE(boe_nv110wtm_n61_modes),
+ .bpc = 8,
+ .size = {
+ .width = 233,
+ .height = 155,
+ },
+ .delay = {
+ .hpd_absent = 200,
+ .prepare_to_enable = 80,
+ .enable = 50,
+ .unprepare = 500,
+ },
+};
+
+/* Also used for boe_nv133fhm_n62 */
+static const struct drm_display_mode boe_nv133fhm_n61_modes = {
+ .clock = 147840,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 48,
+ .hsync_end = 1920 + 48 + 32,
+ .htotal = 1920 + 48 + 32 + 200,
+ .vdisplay = 1080,
+ .vsync_start = 1080 + 3,
+ .vsync_end = 1080 + 3 + 6,
+ .vtotal = 1080 + 3 + 6 + 31,
+ .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC,
+};
+
+/* Also used for boe_nv133fhm_n62 */
+static const struct panel_desc boe_nv133fhm_n61 = {
+ .modes = &boe_nv133fhm_n61_modes,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 294,
+ .height = 165,
+ },
+ .delay = {
+ /*
+ * When power is first given to the panel there's a short
+ * spike on the HPD line. It was explained that this spike
+ * was until the TCON data download was complete. On
+ * one system this was measured at 8 ms. We'll put 15 ms
+ * in the prepare delay just to be safe. That means:
+ * - If HPD isn't hooked up you still have 200 ms delay.
+ * - If HPD is hooked up we won't try to look at it for the
+ * first 15 ms.
+ */
+ .hpd_reliable = 15,
+ .hpd_absent = 200,
+
+ .unprepare = 500,
+ },
+};
+
+static const struct drm_display_mode boe_nv140fhmn49_modes[] = {
+ {
+ .clock = 148500,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 48,
+ .hsync_end = 1920 + 48 + 32,
+ .htotal = 2200,
+ .vdisplay = 1080,
+ .vsync_start = 1080 + 3,
+ .vsync_end = 1080 + 3 + 5,
+ .vtotal = 1125,
+ },
+};
+
+static const struct panel_desc boe_nv140fhmn49 = {
+ .modes = boe_nv140fhmn49_modes,
+ .num_modes = ARRAY_SIZE(boe_nv140fhmn49_modes),
+ .bpc = 6,
+ .size = {
+ .width = 309,
+ .height = 174,
+ },
+ .delay = {
+ /* TODO: should be hpd-absent and no-hpd should be set? */
+ .hpd_reliable = 210,
+ .enable = 50,
+ .unprepare = 160,
+ },
+};
+
+static const struct drm_display_mode innolux_n116bca_ea1_mode = {
+ .clock = 76420,
+ .hdisplay = 1366,
+ .hsync_start = 1366 + 136,
+ .hsync_end = 1366 + 136 + 30,
+ .htotal = 1366 + 136 + 30 + 60,
+ .vdisplay = 768,
+ .vsync_start = 768 + 8,
+ .vsync_end = 768 + 8 + 12,
+ .vtotal = 768 + 8 + 12 + 12,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+};
+
+static const struct panel_desc innolux_n116bca_ea1 = {
+ .modes = &innolux_n116bca_ea1_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 256,
+ .height = 144,
+ },
+ .delay = {
+ .hpd_absent = 200,
+ .prepare_to_enable = 80,
+ .unprepare = 500,
+ },
+};
+
+/*
+ * Datasheet specifies that at 60 Hz refresh rate:
+ * - total horizontal time: { 1506, 1592, 1716 }
+ * - total vertical time: { 788, 800, 868 }
+ *
+ * ...but doesn't go into exactly how that should be split into a front
+ * porch, back porch, or sync length. For now we'll leave a single setting
+ * here which allows a bit of tweaking of the pixel clock at the expense of
+ * refresh rate.
+ */
+static const struct display_timing innolux_n116bge_timing = {
+ .pixelclock = { 72600000, 76420000, 80240000 },
+ .hactive = { 1366, 1366, 1366 },
+ .hfront_porch = { 136, 136, 136 },
+ .hback_porch = { 60, 60, 60 },
+ .hsync_len = { 30, 30, 30 },
+ .vactive = { 768, 768, 768 },
+ .vfront_porch = { 8, 8, 8 },
+ .vback_porch = { 12, 12, 12 },
+ .vsync_len = { 12, 12, 12 },
+ .flags = DISPLAY_FLAGS_VSYNC_LOW | DISPLAY_FLAGS_HSYNC_LOW,
+};
+
+static const struct panel_desc innolux_n116bge = {
+ .timings = &innolux_n116bge_timing,
+ .num_timings = 1,
+ .bpc = 6,
+ .size = {
+ .width = 256,
+ .height = 144,
+ },
+};
+
+static const struct drm_display_mode innolux_n125hce_gn1_mode = {
+ .clock = 162000,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 40,
+ .hsync_end = 1920 + 40 + 40,
+ .htotal = 1920 + 40 + 40 + 80,
+ .vdisplay = 1080,
+ .vsync_start = 1080 + 4,
+ .vsync_end = 1080 + 4 + 4,
+ .vtotal = 1080 + 4 + 4 + 24,
+};
+
+static const struct panel_desc innolux_n125hce_gn1 = {
+ .modes = &innolux_n125hce_gn1_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 276,
+ .height = 155,
+ },
+};
+
+static const struct drm_display_mode innolux_p120zdg_bf1_mode = {
+ .clock = 206016,
+ .hdisplay = 2160,
+ .hsync_start = 2160 + 48,
+ .hsync_end = 2160 + 48 + 32,
+ .htotal = 2160 + 48 + 32 + 80,
+ .vdisplay = 1440,
+ .vsync_start = 1440 + 3,
+ .vsync_end = 1440 + 3 + 10,
+ .vtotal = 1440 + 3 + 10 + 27,
+ .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
+};
+
+static const struct panel_desc innolux_p120zdg_bf1 = {
+ .modes = &innolux_p120zdg_bf1_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 254,
+ .height = 169,
+ },
+ .delay = {
+ .hpd_absent = 200,
+ .unprepare = 500,
+ },
+};
+
+static const struct drm_display_mode ivo_m133nwf4_r0_mode = {
+ .clock = 138778,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 24,
+ .hsync_end = 1920 + 24 + 48,
+ .htotal = 1920 + 24 + 48 + 88,
+ .vdisplay = 1080,
+ .vsync_start = 1080 + 3,
+ .vsync_end = 1080 + 3 + 12,
+ .vtotal = 1080 + 3 + 12 + 17,
+ .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
+};
+
+static const struct panel_desc ivo_m133nwf4_r0 = {
+ .modes = &ivo_m133nwf4_r0_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 294,
+ .height = 165,
+ },
+ .delay = {
+ .hpd_absent = 200,
+ .unprepare = 500,
+ },
+};
+
+static const struct drm_display_mode kingdisplay_kd116n21_30nv_a010_mode = {
+ .clock = 81000,
+ .hdisplay = 1366,
+ .hsync_start = 1366 + 40,
+ .hsync_end = 1366 + 40 + 32,
+ .htotal = 1366 + 40 + 32 + 62,
+ .vdisplay = 768,
+ .vsync_start = 768 + 5,
+ .vsync_end = 768 + 5 + 5,
+ .vtotal = 768 + 5 + 5 + 122,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+};
+
+static const struct panel_desc kingdisplay_kd116n21_30nv_a010 = {
+ .modes = &kingdisplay_kd116n21_30nv_a010_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 256,
+ .height = 144,
+ },
+ .delay = {
+ .hpd_absent = 200,
+ },
+};
+
+static const struct drm_display_mode lg_lp079qx1_sp0v_mode = {
+ .clock = 200000,
+ .hdisplay = 1536,
+ .hsync_start = 1536 + 12,
+ .hsync_end = 1536 + 12 + 16,
+ .htotal = 1536 + 12 + 16 + 48,
+ .vdisplay = 2048,
+ .vsync_start = 2048 + 8,
+ .vsync_end = 2048 + 8 + 4,
+ .vtotal = 2048 + 8 + 4 + 8,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+};
+
+static const struct panel_desc lg_lp079qx1_sp0v = {
+ .modes = &lg_lp079qx1_sp0v_mode,
+ .num_modes = 1,
+ .size = {
+ .width = 129,
+ .height = 171,
+ },
+};
+
+static const struct drm_display_mode lg_lp097qx1_spa1_mode = {
+ .clock = 205210,
+ .hdisplay = 2048,
+ .hsync_start = 2048 + 150,
+ .hsync_end = 2048 + 150 + 5,
+ .htotal = 2048 + 150 + 5 + 5,
+ .vdisplay = 1536,
+ .vsync_start = 1536 + 3,
+ .vsync_end = 1536 + 3 + 1,
+ .vtotal = 1536 + 3 + 1 + 9,
+};
+
+static const struct panel_desc lg_lp097qx1_spa1 = {
+ .modes = &lg_lp097qx1_spa1_mode,
+ .num_modes = 1,
+ .size = {
+ .width = 208,
+ .height = 147,
+ },
+};
+
+static const struct drm_display_mode lg_lp120up1_mode = {
+ .clock = 162300,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 40,
+ .hsync_end = 1920 + 40 + 40,
+ .htotal = 1920 + 40 + 40 + 80,
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 4,
+ .vsync_end = 1280 + 4 + 4,
+ .vtotal = 1280 + 4 + 4 + 12,
+};
+
+static const struct panel_desc lg_lp120up1 = {
+ .modes = &lg_lp120up1_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 267,
+ .height = 183,
+ },
+};
+
+static const struct drm_display_mode lg_lp129qe_mode = {
+ .clock = 285250,
+ .hdisplay = 2560,
+ .hsync_start = 2560 + 48,
+ .hsync_end = 2560 + 48 + 32,
+ .htotal = 2560 + 48 + 32 + 80,
+ .vdisplay = 1700,
+ .vsync_start = 1700 + 3,
+ .vsync_end = 1700 + 3 + 10,
+ .vtotal = 1700 + 3 + 10 + 36,
+};
+
+static const struct panel_desc lg_lp129qe = {
+ .modes = &lg_lp129qe_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 272,
+ .height = 181,
+ },
+};
+
+static const struct drm_display_mode neweast_wjfh116008a_modes[] = {
+ {
+ .clock = 138500,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 48,
+ .hsync_end = 1920 + 48 + 32,
+ .htotal = 1920 + 48 + 32 + 80,
+ .vdisplay = 1080,
+ .vsync_start = 1080 + 3,
+ .vsync_end = 1080 + 3 + 5,
+ .vtotal = 1080 + 3 + 5 + 23,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+ }, {
+ .clock = 110920,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 48,
+ .hsync_end = 1920 + 48 + 32,
+ .htotal = 1920 + 48 + 32 + 80,
+ .vdisplay = 1080,
+ .vsync_start = 1080 + 3,
+ .vsync_end = 1080 + 3 + 5,
+ .vtotal = 1080 + 3 + 5 + 23,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+ }
+};
+
+static const struct panel_desc neweast_wjfh116008a = {
+ .modes = neweast_wjfh116008a_modes,
+ .num_modes = 2,
+ .bpc = 6,
+ .size = {
+ .width = 260,
+ .height = 150,
+ },
+ .delay = {
+ .hpd_reliable = 110,
+ .enable = 20,
+ .unprepare = 500,
+ },
+};
+
+static const struct drm_display_mode samsung_lsn122dl01_c01_mode = {
+ .clock = 271560,
+ .hdisplay = 2560,
+ .hsync_start = 2560 + 48,
+ .hsync_end = 2560 + 48 + 32,
+ .htotal = 2560 + 48 + 32 + 80,
+ .vdisplay = 1600,
+ .vsync_start = 1600 + 2,
+ .vsync_end = 1600 + 2 + 5,
+ .vtotal = 1600 + 2 + 5 + 57,
+};
+
+static const struct panel_desc samsung_lsn122dl01_c01 = {
+ .modes = &samsung_lsn122dl01_c01_mode,
+ .num_modes = 1,
+ .size = {
+ .width = 263,
+ .height = 164,
+ },
+};
+
+static const struct drm_display_mode samsung_ltn140at29_301_mode = {
+ .clock = 76300,
+ .hdisplay = 1366,
+ .hsync_start = 1366 + 64,
+ .hsync_end = 1366 + 64 + 48,
+ .htotal = 1366 + 64 + 48 + 128,
+ .vdisplay = 768,
+ .vsync_start = 768 + 2,
+ .vsync_end = 768 + 2 + 5,
+ .vtotal = 768 + 2 + 5 + 17,
+};
+
+static const struct panel_desc samsung_ltn140at29_301 = {
+ .modes = &samsung_ltn140at29_301_mode,
+ .num_modes = 1,
+ .bpc = 6,
+ .size = {
+ .width = 320,
+ .height = 187,
+ },
+};
+
+static const struct drm_display_mode sharp_ld_d5116z01b_mode = {
+ .clock = 168480,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 48,
+ .hsync_end = 1920 + 48 + 32,
+ .htotal = 1920 + 48 + 32 + 80,
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 3,
+ .vsync_end = 1280 + 3 + 10,
+ .vtotal = 1280 + 3 + 10 + 57,
+ .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
+};
+
+static const struct panel_desc sharp_ld_d5116z01b = {
+ .modes = &sharp_ld_d5116z01b_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 260,
+ .height = 120,
+ },
+};
+
+static const struct display_timing sharp_lq123p1jx31_timing = {
+ .pixelclock = { 252750000, 252750000, 266604720 },
+ .hactive = { 2400, 2400, 2400 },
+ .hfront_porch = { 48, 48, 48 },
+ .hback_porch = { 80, 80, 84 },
+ .hsync_len = { 32, 32, 32 },
+ .vactive = { 1600, 1600, 1600 },
+ .vfront_porch = { 3, 3, 3 },
+ .vback_porch = { 33, 33, 120 },
+ .vsync_len = { 10, 10, 10 },
+ .flags = DISPLAY_FLAGS_VSYNC_LOW | DISPLAY_FLAGS_HSYNC_LOW,
+};
+
+static const struct panel_desc sharp_lq123p1jx31 = {
+ .timings = &sharp_lq123p1jx31_timing,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 259,
+ .height = 173,
+ },
+ .delay = {
+ .hpd_reliable = 110,
+ .enable = 50,
+ .unprepare = 550,
+ },
+};
+
+static const struct drm_display_mode starry_kr122ea0sra_mode = {
+ .clock = 147000,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 16,
+ .hsync_end = 1920 + 16 + 16,
+ .htotal = 1920 + 16 + 16 + 32,
+ .vdisplay = 1200,
+ .vsync_start = 1200 + 15,
+ .vsync_end = 1200 + 15 + 2,
+ .vtotal = 1200 + 15 + 2 + 18,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+};
+
+static const struct panel_desc starry_kr122ea0sra = {
+ .modes = &starry_kr122ea0sra_mode,
+ .num_modes = 1,
+ .size = {
+ .width = 263,
+ .height = 164,
+ },
+ .delay = {
+ /* TODO: should be hpd-absent and no-hpd should be set? */
+ .hpd_reliable = 10 + 200,
+ .enable = 50,
+ .unprepare = 10 + 500,
+ },
+};
+
+static const struct of_device_id platform_of_match[] = {
+ {
+ /* Must be first */
+ .compatible = "edp-panel",
+ }, {
+ .compatible = "auo,b101ean01",
+ .data = &auo_b101ean01,
+ }, {
+ .compatible = "auo,b116xa01",
+ .data = &auo_b116xak01,
+ }, {
+ .compatible = "auo,b116xw03",
+ .data = &auo_b116xw03,
+ }, {
+ .compatible = "auo,b133han05",
+ .data = &auo_b133han05,
+ }, {
+ .compatible = "auo,b133htn01",
+ .data = &auo_b133htn01,
+ }, {
+ .compatible = "auo,b133xtn01",
+ .data = &auo_b133xtn01,
+ }, {
+ .compatible = "auo,b140han06",
+ .data = &auo_b140han06,
+ }, {
+ .compatible = "boe,nv101wxmn51",
+ .data = &boe_nv101wxmn51,
+ }, {
+ .compatible = "boe,nv110wtm-n61",
+ .data = &boe_nv110wtm_n61,
+ }, {
+ .compatible = "boe,nv133fhm-n61",
+ .data = &boe_nv133fhm_n61,
+ }, {
+ .compatible = "boe,nv133fhm-n62",
+ .data = &boe_nv133fhm_n61,
+ }, {
+ .compatible = "boe,nv140fhmn49",
+ .data = &boe_nv140fhmn49,
+ }, {
+ .compatible = "innolux,n116bca-ea1",
+ .data = &innolux_n116bca_ea1,
+ }, {
+ .compatible = "innolux,n116bge",
+ .data = &innolux_n116bge,
+ }, {
+ .compatible = "innolux,n125hce-gn1",
+ .data = &innolux_n125hce_gn1,
+ }, {
+ .compatible = "innolux,p120zdg-bf1",
+ .data = &innolux_p120zdg_bf1,
+ }, {
+ .compatible = "ivo,m133nwf4-r0",
+ .data = &ivo_m133nwf4_r0,
+ }, {
+ .compatible = "kingdisplay,kd116n21-30nv-a010",
+ .data = &kingdisplay_kd116n21_30nv_a010,
+ }, {
+ .compatible = "lg,lp079qx1-sp0v",
+ .data = &lg_lp079qx1_sp0v,
+ }, {
+ .compatible = "lg,lp097qx1-spa1",
+ .data = &lg_lp097qx1_spa1,
+ }, {
+ .compatible = "lg,lp120up1",
+ .data = &lg_lp120up1,
+ }, {
+ .compatible = "lg,lp129qe",
+ .data = &lg_lp129qe,
+ }, {
+ .compatible = "neweast,wjfh116008a",
+ .data = &neweast_wjfh116008a,
+ }, {
+ .compatible = "samsung,lsn122dl01-c01",
+ .data = &samsung_lsn122dl01_c01,
+ }, {
+ .compatible = "samsung,ltn140at29-301",
+ .data = &samsung_ltn140at29_301,
+ }, {
+ .compatible = "sharp,ld-d5116z01b",
+ .data = &sharp_ld_d5116z01b,
+ }, {
+ .compatible = "sharp,lq123p1jx31",
+ .data = &sharp_lq123p1jx31,
+ }, {
+ .compatible = "starry,kr122ea0sra",
+ .data = &starry_kr122ea0sra,
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(of, platform_of_match);
+
+static const struct panel_delay delay_200_500_p2e80 = {
+ .hpd_absent = 200,
+ .unprepare = 500,
+ .prepare_to_enable = 80,
+};
+
+static const struct panel_delay delay_200_500_p2e100 = {
+ .hpd_absent = 200,
+ .unprepare = 500,
+ .prepare_to_enable = 100,
+};
+
+static const struct panel_delay delay_200_500_e50 = {
+ .hpd_absent = 200,
+ .unprepare = 500,
+ .enable = 50,
+};
+
+#define EDP_PANEL_ENTRY(vend_chr_0, vend_chr_1, vend_chr_2, product_id, _delay, _name) \
+{ \
+ .name = _name, \
+ .panel_id = drm_edid_encode_panel_id(vend_chr_0, vend_chr_1, vend_chr_2, \
+ product_id), \
+ .delay = _delay \
+}
+
+/*
+ * This table is used to figure out power sequencing delays for panels that
+ * are detected by EDID. Entries here may point to entries in the
+ * platform_of_match table (if a panel is listed in both places).
+ *
+ * Sort first by vendor, then by product ID.
+ */
+static const struct edp_panel_entry edp_panels[] = {
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x405c, &auo_b116xak01.delay, "B116XAK01"),
+ EDP_PANEL_ENTRY('A', 'U', 'O', 0x615c, &delay_200_500_e50, "B116XAN06.1"),
+
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x0786, &delay_200_500_p2e80, "NV116WHM-T01"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x07d1, &boe_nv133fhm_n61.delay, "NV133FHM-N61"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x082d, &boe_nv133fhm_n61.delay, "NV133FHM-N62"),
+ EDP_PANEL_ENTRY('B', 'O', 'E', 0x098d, &boe_nv110wtm_n61.delay, "NV110WTM-N61"),
+
+ EDP_PANEL_ENTRY('C', 'M', 'N', 0x114c, &innolux_n116bca_ea1.delay, "N116BCA-EA1"),
+
+ EDP_PANEL_ENTRY('K', 'D', 'B', 0x0624, &kingdisplay_kd116n21_30nv_a010.delay, "116N21-30NV-A010"),
+
+ EDP_PANEL_ENTRY('S', 'H', 'P', 0x154c, &delay_200_500_p2e100, "LQ116M1JW10"),
+
+ { /* sentinal */ }
+};
+
+static const struct edp_panel_entry *find_edp_panel(u32 panel_id)
+{
+ const struct edp_panel_entry *panel;
+
+ if (!panel_id)
+ return NULL;
+
+ for (panel = edp_panels; panel->panel_id; panel++)
+ if (panel->panel_id == panel_id)
+ return panel;
+
+ return NULL;
+}
+
+static int panel_edp_platform_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *id;
+
+ /* Skip one since "edp-panel" is only supported on DP AUX bus */
+ id = of_match_node(platform_of_match + 1, pdev->dev.of_node);
+ if (!id)
+ return -ENODEV;
+
+ return panel_edp_probe(&pdev->dev, id->data, NULL);
+}
+
+static int panel_edp_platform_remove(struct platform_device *pdev)
+{
+ return panel_edp_remove(&pdev->dev);
+}
+
+static void panel_edp_platform_shutdown(struct platform_device *pdev)
+{
+ panel_edp_shutdown(&pdev->dev);
+}
+
+static const struct dev_pm_ops panel_edp_pm_ops = {
+ SET_RUNTIME_PM_OPS(panel_edp_suspend, panel_edp_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+};
+
+static struct platform_driver panel_edp_platform_driver = {
+ .driver = {
+ .name = "panel-edp",
+ .of_match_table = platform_of_match,
+ .pm = &panel_edp_pm_ops,
+ },
+ .probe = panel_edp_platform_probe,
+ .remove = panel_edp_platform_remove,
+ .shutdown = panel_edp_platform_shutdown,
+};
+
+static int panel_edp_dp_aux_ep_probe(struct dp_aux_ep_device *aux_ep)
+{
+ const struct of_device_id *id;
+
+ id = of_match_node(platform_of_match, aux_ep->dev.of_node);
+ if (!id)
+ return -ENODEV;
+
+ return panel_edp_probe(&aux_ep->dev, id->data, aux_ep->aux);
+}
+
+static void panel_edp_dp_aux_ep_remove(struct dp_aux_ep_device *aux_ep)
+{
+ panel_edp_remove(&aux_ep->dev);
+}
+
+static void panel_edp_dp_aux_ep_shutdown(struct dp_aux_ep_device *aux_ep)
+{
+ panel_edp_shutdown(&aux_ep->dev);
+}
+
+static struct dp_aux_ep_driver panel_edp_dp_aux_ep_driver = {
+ .driver = {
+ .name = "panel-simple-dp-aux",
+ .of_match_table = platform_of_match, /* Same as platform one! */
+ .pm = &panel_edp_pm_ops,
+ },
+ .probe = panel_edp_dp_aux_ep_probe,
+ .remove = panel_edp_dp_aux_ep_remove,
+ .shutdown = panel_edp_dp_aux_ep_shutdown,
+};
+
+static int __init panel_edp_init(void)
+{
+ int err;
+
+ err = platform_driver_register(&panel_edp_platform_driver);
+ if (err < 0)
+ return err;
+
+ err = dp_aux_dp_driver_register(&panel_edp_dp_aux_ep_driver);
+ if (err < 0)
+ goto err_did_platform_register;
+
+ return 0;
+
+err_did_platform_register:
+ platform_driver_unregister(&panel_edp_platform_driver);
+
+ return err;
+}
+module_init(panel_edp_init);
+
+static void __exit panel_edp_exit(void)
+{
+ dp_aux_dp_driver_unregister(&panel_edp_dp_aux_ep_driver);
+ platform_driver_unregister(&panel_edp_platform_driver);
+}
+module_exit(panel_edp_exit);
+
+MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
+MODULE_DESCRIPTION("DRM Driver for Simple eDP Panels");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
index 0145129d7c66..534dd7414d42 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c
@@ -590,14 +590,14 @@ static const struct drm_display_mode k101_im2byl02_default_mode = {
.clock = 69700,
.hdisplay = 800,
- .hsync_start = 800 + 6,
- .hsync_end = 800 + 6 + 15,
- .htotal = 800 + 6 + 15 + 16,
+ .hsync_start = 800 + 52,
+ .hsync_end = 800 + 52 + 8,
+ .htotal = 800 + 52 + 8 + 48,
.vdisplay = 1280,
- .vsync_start = 1280 + 8,
- .vsync_end = 1280 + 8 + 48,
- .vtotal = 1280 + 8 + 48 + 52,
+ .vsync_start = 1280 + 16,
+ .vsync_end = 1280 + 16 + 6,
+ .vtotal = 1280 + 16 + 6 + 15,
.width_mm = 135,
.height_mm = 217,
diff --git a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
index f80b44a8a700..dfb43b1374e7 100644
--- a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
+++ b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
@@ -60,6 +60,9 @@
#define MCS_CMD2_ENA1 0xFF00 /* Enable Access Command2 "CMD2" */
#define MCS_CMD2_ENA2 0xFF80 /* Enable Access Orise Command2 */
+#define OTM8009A_HDISPLAY 480
+#define OTM8009A_VDISPLAY 800
+
struct otm8009a {
struct device *dev;
struct drm_panel panel;
@@ -70,19 +73,35 @@ struct otm8009a {
bool enabled;
};
-static const struct drm_display_mode default_mode = {
- .clock = 29700,
- .hdisplay = 480,
- .hsync_start = 480 + 98,
- .hsync_end = 480 + 98 + 32,
- .htotal = 480 + 98 + 32 + 98,
- .vdisplay = 800,
- .vsync_start = 800 + 15,
- .vsync_end = 800 + 15 + 10,
- .vtotal = 800 + 15 + 10 + 14,
- .flags = 0,
- .width_mm = 52,
- .height_mm = 86,
+static const struct drm_display_mode modes[] = {
+ { /* 50 Hz, preferred */
+ .clock = 29700,
+ .hdisplay = 480,
+ .hsync_start = 480 + 98,
+ .hsync_end = 480 + 98 + 32,
+ .htotal = 480 + 98 + 32 + 98,
+ .vdisplay = 800,
+ .vsync_start = 800 + 15,
+ .vsync_end = 800 + 15 + 10,
+ .vtotal = 800 + 15 + 10 + 14,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+ .width_mm = 52,
+ .height_mm = 86,
+ },
+ { /* 60 Hz */
+ .clock = 33000,
+ .hdisplay = 480,
+ .hsync_start = 480 + 70,
+ .hsync_end = 480 + 70 + 32,
+ .htotal = 480 + 70 + 32 + 72,
+ .vdisplay = 800,
+ .vsync_start = 800 + 15,
+ .vsync_end = 800 + 15 + 10,
+ .vtotal = 800 + 15 + 10 + 16,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+ .width_mm = 52,
+ .height_mm = 86,
+ },
};
static inline struct otm8009a *panel_to_otm8009a(struct drm_panel *panel)
@@ -208,12 +227,11 @@ static int otm8009a_init_sequence(struct otm8009a *ctx)
/* Default portrait 480x800 rgb24 */
dcs_write_seq(ctx, MIPI_DCS_SET_ADDRESS_MODE, 0x00);
- ret = mipi_dsi_dcs_set_column_address(dsi, 0,
- default_mode.hdisplay - 1);
+ ret = mipi_dsi_dcs_set_column_address(dsi, 0, OTM8009A_HDISPLAY - 1);
if (ret)
return ret;
- ret = mipi_dsi_dcs_set_page_address(dsi, 0, default_mode.vdisplay - 1);
+ ret = mipi_dsi_dcs_set_page_address(dsi, 0, OTM8009A_VDISPLAY - 1);
if (ret)
return ret;
@@ -337,24 +355,33 @@ static int otm8009a_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
struct drm_display_mode *mode;
-
- mode = drm_mode_duplicate(connector->dev, &default_mode);
- if (!mode) {
- dev_err(panel->dev, "failed to add mode %ux%u@%u\n",
- default_mode.hdisplay, default_mode.vdisplay,
- drm_mode_vrefresh(&default_mode));
- return -ENOMEM;
+ unsigned int num_modes = ARRAY_SIZE(modes);
+ unsigned int i;
+
+ for (i = 0; i < num_modes; i++) {
+ mode = drm_mode_duplicate(connector->dev, &modes[i]);
+ if (!mode) {
+ dev_err(panel->dev, "failed to add mode %ux%u@%u\n",
+ modes[i].hdisplay,
+ modes[i].vdisplay,
+ drm_mode_vrefresh(&modes[i]));
+ return -ENOMEM;
+ }
+
+ mode->type = DRM_MODE_TYPE_DRIVER;
+
+ /* Setting first mode as preferred */
+ if (!i)
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+
+ drm_mode_set_name(mode);
+ drm_mode_probed_add(connector, mode);
}
- drm_mode_set_name(mode);
-
- mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
- drm_mode_probed_add(connector, mode);
-
connector->display_info.width_mm = mode->width_mm;
connector->display_info.height_mm = mode->height_mm;
- return 1;
+ return num_modes;
}
static const struct drm_panel_funcs otm8009a_drm_funcs = {
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6d27a1.c b/drivers/gpu/drm/panel/panel-samsung-s6d27a1.c
new file mode 100644
index 000000000000..1696ceb36aa0
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-samsung-s6d27a1.c
@@ -0,0 +1,320 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Panel driver for the Samsung S6D27A1 480x800 DPI RGB panel.
+ * Found in the Samsung Galaxy Ace 2 GT-I8160 mobile phone.
+ */
+
+#include <drm/drm_mipi_dbi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/media-bus-format.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+#include <linux/spi/spi.h>
+
+#include <video/mipi_display.h>
+
+#define S6D27A1_PASSWD_L2 0xF0 /* Password Command for Level 2 Control */
+#define S6D27A1_RESCTL 0xB3 /* Resolution Select Control */
+#define S6D27A1_PANELCTL2 0xB4 /* ASG Signal Control */
+#define S6D27A1_READID1 0xDA /* Read panel ID 1 */
+#define S6D27A1_READID2 0xDB /* Read panel ID 2 */
+#define S6D27A1_READID3 0xDC /* Read panel ID 3 */
+#define S6D27A1_DISPCTL 0xF2 /* Display Control */
+#define S6D27A1_MANPWR 0xF3 /* Manual Control */
+#define S6D27A1_PWRCTL1 0xF4 /* Power Control */
+#define S6D27A1_SRCCTL 0xF6 /* Source Control */
+#define S6D27A1_PANELCTL 0xF7 /* Panel Control*/
+
+static const u8 s6d27a1_dbi_read_commands[] = {
+ S6D27A1_READID1,
+ S6D27A1_READID2,
+ S6D27A1_READID3,
+ 0, /* sentinel */
+};
+
+struct s6d27a1 {
+ struct device *dev;
+ struct mipi_dbi dbi;
+ struct drm_panel panel;
+ struct gpio_desc *reset;
+ struct regulator_bulk_data regulators[2];
+};
+
+static const struct drm_display_mode s6d27a1_480_800_mode = {
+ /*
+ * The vendor driver states that the S6D27A1 panel
+ * has a pixel clock frequency of 49920000 Hz / 2 = 24960000 Hz.
+ */
+ .clock = 24960,
+ .hdisplay = 480,
+ .hsync_start = 480 + 63,
+ .hsync_end = 480 + 63 + 2,
+ .htotal = 480 + 63 + 2 + 63,
+ .vdisplay = 800,
+ .vsync_start = 800 + 11,
+ .vsync_end = 800 + 11 + 2,
+ .vtotal = 800 + 11 + 2 + 10,
+ .width_mm = 50,
+ .height_mm = 84,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+};
+
+static inline struct s6d27a1 *to_s6d27a1(struct drm_panel *panel)
+{
+ return container_of(panel, struct s6d27a1, panel);
+}
+
+static void s6d27a1_read_mtp_id(struct s6d27a1 *ctx)
+{
+ struct mipi_dbi *dbi = &ctx->dbi;
+ u8 id1, id2, id3;
+ int ret;
+
+ ret = mipi_dbi_command_read(dbi, S6D27A1_READID1, &id1);
+ if (ret) {
+ dev_err(ctx->dev, "unable to read MTP ID 1\n");
+ return;
+ }
+ ret = mipi_dbi_command_read(dbi, S6D27A1_READID2, &id2);
+ if (ret) {
+ dev_err(ctx->dev, "unable to read MTP ID 2\n");
+ return;
+ }
+ ret = mipi_dbi_command_read(dbi, S6D27A1_READID3, &id3);
+ if (ret) {
+ dev_err(ctx->dev, "unable to read MTP ID 3\n");
+ return;
+ }
+ dev_info(ctx->dev, "MTP ID: %02x %02x %02x\n", id1, id2, id3);
+}
+
+static int s6d27a1_power_on(struct s6d27a1 *ctx)
+{
+ struct mipi_dbi *dbi = &ctx->dbi;
+ int ret;
+
+ /* Power up */
+ ret = regulator_bulk_enable(ARRAY_SIZE(ctx->regulators),
+ ctx->regulators);
+ if (ret) {
+ dev_err(ctx->dev, "failed to enable regulators: %d\n", ret);
+ return ret;
+ }
+
+ msleep(20);
+
+ /* Assert reset >=1 ms */
+ gpiod_set_value_cansleep(ctx->reset, 1);
+ usleep_range(1000, 5000);
+ /* De-assert reset */
+ gpiod_set_value_cansleep(ctx->reset, 0);
+ /* Wait >= 10 ms */
+ msleep(20);
+
+ /*
+ * Exit sleep mode and initialize display - some hammering is
+ * necessary.
+ */
+ mipi_dbi_command(dbi, MIPI_DCS_EXIT_SLEEP_MODE);
+ mipi_dbi_command(dbi, MIPI_DCS_EXIT_SLEEP_MODE);
+ msleep(120);
+
+ /* Magic to unlock level 2 control of the display */
+ mipi_dbi_command(dbi, S6D27A1_PASSWD_L2, 0x5A, 0x5A);
+
+ /* Configure resolution to 480RGBx800 */
+ mipi_dbi_command(dbi, S6D27A1_RESCTL, 0x22);
+
+ mipi_dbi_command(dbi, S6D27A1_PANELCTL2, 0x00, 0x02, 0x03, 0x04, 0x05, 0x08, 0x00, 0x0c);
+
+ mipi_dbi_command(dbi, S6D27A1_MANPWR, 0x01, 0x00, 0x00, 0x08, 0x08, 0x02, 0x00);
+
+ mipi_dbi_command(dbi, S6D27A1_DISPCTL, 0x19, 0x00, 0x08, 0x0D, 0x03, 0x41, 0x3F);
+
+ mipi_dbi_command(dbi, S6D27A1_PWRCTL1, 0x00, 0x00, 0x00, 0x00, 0x55,
+ 0x44, 0x05, 0x88, 0x4B, 0x50);
+
+ mipi_dbi_command(dbi, S6D27A1_SRCCTL, 0x03, 0x09, 0x8A, 0x00, 0x01, 0x16);
+
+ mipi_dbi_command(dbi, S6D27A1_PANELCTL, 0x00, 0x05, 0x06, 0x07, 0x08,
+ 0x01, 0x09, 0x0D, 0x0A, 0x0E,
+ 0x0B, 0x0F, 0x0C, 0x10, 0x01,
+ 0x11, 0x12, 0x13, 0x14, 0x05,
+ 0x06, 0x07, 0x08, 0x01, 0x09,
+ 0x0D, 0x0A, 0x0E, 0x0B, 0x0F,
+ 0x0C, 0x10, 0x01, 0x11, 0x12,
+ 0x13, 0x14);
+
+ /* lock the level 2 control */
+ mipi_dbi_command(dbi, S6D27A1_PASSWD_L2, 0xA5, 0xA5);
+
+ s6d27a1_read_mtp_id(ctx);
+
+ return 0;
+}
+
+static int s6d27a1_power_off(struct s6d27a1 *ctx)
+{
+ /* Go into RESET and disable regulators */
+ gpiod_set_value_cansleep(ctx->reset, 1);
+ return regulator_bulk_disable(ARRAY_SIZE(ctx->regulators),
+ ctx->regulators);
+}
+
+static int s6d27a1_unprepare(struct drm_panel *panel)
+{
+ struct s6d27a1 *ctx = to_s6d27a1(panel);
+ struct mipi_dbi *dbi = &ctx->dbi;
+
+ mipi_dbi_command(dbi, MIPI_DCS_ENTER_SLEEP_MODE);
+ msleep(120);
+ return s6d27a1_power_off(to_s6d27a1(panel));
+}
+
+static int s6d27a1_disable(struct drm_panel *panel)
+{
+ struct s6d27a1 *ctx = to_s6d27a1(panel);
+ struct mipi_dbi *dbi = &ctx->dbi;
+
+ mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_OFF);
+ msleep(25);
+
+ return 0;
+}
+
+static int s6d27a1_prepare(struct drm_panel *panel)
+{
+ return s6d27a1_power_on(to_s6d27a1(panel));
+}
+
+static int s6d27a1_enable(struct drm_panel *panel)
+{
+ struct s6d27a1 *ctx = to_s6d27a1(panel);
+ struct mipi_dbi *dbi = &ctx->dbi;
+
+ mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_ON);
+
+ return 0;
+}
+
+static int s6d27a1_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct s6d27a1 *ctx = to_s6d27a1(panel);
+ struct drm_display_mode *mode;
+ static const u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+
+ mode = drm_mode_duplicate(connector->dev, &s6d27a1_480_800_mode);
+ if (!mode) {
+ dev_err(ctx->dev, "failed to add mode\n");
+ return -ENOMEM;
+ }
+
+ connector->display_info.bpc = 8;
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+ connector->display_info.bus_flags =
+ DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE;
+ drm_display_info_set_bus_formats(&connector->display_info,
+ &bus_format, 1);
+
+ drm_mode_set_name(mode);
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs s6d27a1_drm_funcs = {
+ .disable = s6d27a1_disable,
+ .unprepare = s6d27a1_unprepare,
+ .prepare = s6d27a1_prepare,
+ .enable = s6d27a1_enable,
+ .get_modes = s6d27a1_get_modes,
+};
+
+static int s6d27a1_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct s6d27a1 *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->dev = dev;
+
+ /*
+ * VCI is the analog voltage supply
+ * VCCIO is the digital I/O voltage supply
+ */
+ ctx->regulators[0].supply = "vci";
+ ctx->regulators[1].supply = "vccio";
+ ret = devm_regulator_bulk_get(dev,
+ ARRAY_SIZE(ctx->regulators),
+ ctx->regulators);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to get regulators\n");
+
+ ctx->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(ctx->reset)) {
+ ret = PTR_ERR(ctx->reset);
+ return dev_err_probe(dev, ret, "no RESET GPIO\n");
+ }
+
+ ret = mipi_dbi_spi_init(spi, &ctx->dbi, NULL);
+ if (ret)
+ return dev_err_probe(dev, ret, "MIPI DBI init failed\n");
+
+ ctx->dbi.read_commands = s6d27a1_dbi_read_commands;
+
+ drm_panel_init(&ctx->panel, dev, &s6d27a1_drm_funcs,
+ DRM_MODE_CONNECTOR_DPI);
+
+ ret = drm_panel_of_backlight(&ctx->panel);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to add backlight\n");
+
+ spi_set_drvdata(spi, ctx);
+
+ drm_panel_add(&ctx->panel);
+
+ return 0;
+}
+
+static int s6d27a1_remove(struct spi_device *spi)
+{
+ struct s6d27a1 *ctx = spi_get_drvdata(spi);
+
+ drm_panel_remove(&ctx->panel);
+ return 0;
+}
+
+static const struct of_device_id s6d27a1_match[] = {
+ { .compatible = "samsung,s6d27a1", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, s6d27a1_match);
+
+static struct spi_driver s6d27a1_driver = {
+ .probe = s6d27a1_probe,
+ .remove = s6d27a1_remove,
+ .driver = {
+ .name = "s6d27a1-panel",
+ .of_match_table = s6d27a1_match,
+ },
+};
+module_spi_driver(s6d27a1_driver);
+
+MODULE_AUTHOR("Markuss Broks <markuss.broks@gmail.com>");
+MODULE_DESCRIPTION("Samsung S6D27A1 panel driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 9b6c4e6c38a1..7f3e1b84b5f5 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -23,7 +23,6 @@
#include <linux/delay.h>
#include <linux/gpio/consumer.h>
-#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
@@ -36,8 +35,6 @@
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
-#include <drm/drm_dp_aux_bus.h>
-#include <drm/drm_dp_helper.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_panel.h>
@@ -95,44 +92,6 @@ struct panel_desc {
unsigned int prepare;
/**
- * @delay.hpd_absent_delay: Time to wait if HPD isn't hooked up.
- *
- * Add this to the prepare delay if we know Hot Plug Detect
- * isn't used.
- */
- unsigned int hpd_absent_delay;
-
- /**
- * @delay.prepare_to_enable: Time between prepare and enable.
- *
- * The minimum time, in milliseconds, that needs to have passed
- * between when prepare finished and enable may begin. If at
- * enable time less time has passed since prepare finished,
- * the driver waits for the remaining time.
- *
- * If a fixed enable delay is also specified, we'll start
- * counting before delaying for the fixed delay.
- *
- * If a fixed prepare delay is also specified, we won't start
- * counting until after the fixed delay. We can't overlap this
- * fixed delay with the min time because the fixed delay
- * doesn't happen at the end of the function if a HPD GPIO was
- * specified.
- *
- * In other words:
- * prepare()
- * ...
- * // do fixed prepare delay
- * // wait for HPD GPIO if applicable
- * // start counting for prepare_to_enable
- *
- * enable()
- * // do fixed enable delay
- * // enforce prepare_to_enable min time
- */
- unsigned int prepare_to_enable;
-
- /**
* @delay.enable: Time for the panel to display a valid frame.
*
* The time (in milliseconds) that it takes for the panel to
@@ -176,7 +135,6 @@ struct panel_desc {
struct panel_simple {
struct drm_panel base;
bool enabled;
- bool no_hpd;
bool prepared;
@@ -187,10 +145,8 @@ struct panel_simple {
struct regulator *supply;
struct i2c_adapter *ddc;
- struct drm_dp_aux *aux;
struct gpio_desc *enable_gpio;
- struct gpio_desc *hpd_gpio;
struct edid *edid;
@@ -374,30 +330,10 @@ static int panel_simple_unprepare(struct drm_panel *panel)
return 0;
}
-static int panel_simple_get_hpd_gpio(struct device *dev, struct panel_simple *p)
-{
- int err;
-
- p->hpd_gpio = devm_gpiod_get_optional(dev, "hpd", GPIOD_IN);
- if (IS_ERR(p->hpd_gpio)) {
- err = PTR_ERR(p->hpd_gpio);
-
- if (err != -EPROBE_DEFER)
- dev_err(dev, "failed to get 'hpd' GPIO: %d\n", err);
-
- return err;
- }
-
- return 0;
-}
-
-static int panel_simple_prepare_once(struct panel_simple *p)
+static int panel_simple_resume(struct device *dev)
{
- struct device *dev = p->base.dev;
- unsigned int delay;
+ struct panel_simple *p = dev_get_drvdata(dev);
int err;
- int hpd_asserted;
- unsigned long hpd_wait_us;
panel_simple_wait(p->unprepared_time, p->desc->delay.unprepare);
@@ -409,68 +345,12 @@ static int panel_simple_prepare_once(struct panel_simple *p)
gpiod_set_value_cansleep(p->enable_gpio, 1);
- delay = p->desc->delay.prepare;
- if (p->no_hpd)
- delay += p->desc->delay.hpd_absent_delay;
- if (delay)
- msleep(delay);
-
- if (p->hpd_gpio) {
- if (p->desc->delay.hpd_absent_delay)
- hpd_wait_us = p->desc->delay.hpd_absent_delay * 1000UL;
- else
- hpd_wait_us = 2000000;
-
- err = readx_poll_timeout(gpiod_get_value_cansleep, p->hpd_gpio,
- hpd_asserted, hpd_asserted,
- 1000, hpd_wait_us);
- if (hpd_asserted < 0)
- err = hpd_asserted;
-
- if (err) {
- if (err != -ETIMEDOUT)
- dev_err(dev,
- "error waiting for hpd GPIO: %d\n", err);
- goto error;
- }
- }
+ if (p->desc->delay.prepare)
+ msleep(p->desc->delay.prepare);
p->prepared_time = ktime_get();
return 0;
-
-error:
- gpiod_set_value_cansleep(p->enable_gpio, 0);
- regulator_disable(p->supply);
- p->unprepared_time = ktime_get();
-
- return err;
-}
-
-/*
- * Some panels simply don't always come up and need to be power cycled to
- * work properly. We'll allow for a handful of retries.
- */
-#define MAX_PANEL_PREPARE_TRIES 5
-
-static int panel_simple_resume(struct device *dev)
-{
- struct panel_simple *p = dev_get_drvdata(dev);
- int ret;
- int try;
-
- for (try = 0; try < MAX_PANEL_PREPARE_TRIES; try++) {
- ret = panel_simple_prepare_once(p);
- if (ret != -ETIMEDOUT)
- break;
- }
-
- if (ret == -ETIMEDOUT)
- dev_err(dev, "Prepare timeout after %d tries\n", try);
- else if (try)
- dev_warn(dev, "Prepare needed %d retries\n", try);
-
- return ret;
}
static int panel_simple_prepare(struct drm_panel *panel)
@@ -503,8 +383,6 @@ static int panel_simple_enable(struct drm_panel *panel)
if (p->desc->delay.enable)
msleep(p->desc->delay.enable);
- panel_simple_wait(p->prepared_time, p->desc->delay.prepare_to_enable);
-
p->enabled = true;
return 0;
@@ -660,8 +538,7 @@ static void panel_simple_parse_panel_timing_node(struct device *dev,
dev_err(dev, "Reject override mode: No display_timing found\n");
}
-static int panel_simple_probe(struct device *dev, const struct panel_desc *desc,
- struct drm_dp_aux *aux)
+static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
{
struct panel_simple *panel;
struct display_timing dt;
@@ -677,14 +554,6 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc,
panel->enabled = false;
panel->prepared_time = 0;
panel->desc = desc;
- panel->aux = aux;
-
- panel->no_hpd = of_property_read_bool(dev->of_node, "no-hpd");
- if (!panel->no_hpd) {
- err = panel_simple_get_hpd_gpio(dev, panel);
- if (err)
- return err;
- }
panel->supply = devm_regulator_get(dev, "power");
if (IS_ERR(panel->supply))
@@ -712,8 +581,6 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc,
if (!panel->ddc)
return -EPROBE_DEFER;
- } else if (aux) {
- panel->ddc = &aux->ddc;
}
if (desc == &panel_dpi) {
@@ -749,9 +616,9 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc,
desc->bpc != 8);
break;
case DRM_MODE_CONNECTOR_eDP:
- if (desc->bpc != 6 && desc->bpc != 8 && desc->bpc != 10)
- dev_warn(dev, "Expected bpc in {6,8,10} but got: %u\n", desc->bpc);
- break;
+ dev_warn(dev, "eDP panels moved to panel-edp\n");
+ err = -EINVAL;
+ goto free_ddc;
case DRM_MODE_CONNECTOR_DSI:
if (desc->bpc != 6 && desc->bpc != 8)
dev_warn(dev, "Expected bpc in {6,8} but got: %u\n", desc->bpc);
@@ -798,15 +665,6 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc,
if (err)
goto disable_pm_runtime;
- if (!panel->base.backlight && panel->aux) {
- pm_runtime_get_sync(dev);
- err = drm_panel_dp_aux_backlight(&panel->base, panel->aux);
- pm_runtime_mark_last_busy(dev);
- pm_runtime_put_autosuspend(dev);
- if (err)
- goto disable_pm_runtime;
- }
-
drm_panel_add(&panel->base);
return 0;
@@ -815,7 +673,7 @@ disable_pm_runtime:
pm_runtime_dont_use_autosuspend(dev);
pm_runtime_disable(dev);
free_ddc:
- if (panel->ddc && (!panel->aux || panel->ddc != &panel->aux->ddc))
+ if (panel->ddc)
put_device(&panel->ddc->dev);
return err;
@@ -831,7 +689,7 @@ static int panel_simple_remove(struct device *dev)
pm_runtime_dont_use_autosuspend(dev);
pm_runtime_disable(dev);
- if (panel->ddc && (!panel->aux || panel->ddc != &panel->aux->ddc))
+ if (panel->ddc)
put_device(&panel->ddc->dev);
return 0;
@@ -970,28 +828,6 @@ static const struct panel_desc auo_b101aw03 = {
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
-static const struct display_timing auo_b101ean01_timing = {
- .pixelclock = { 65300000, 72500000, 75000000 },
- .hactive = { 1280, 1280, 1280 },
- .hfront_porch = { 18, 119, 119 },
- .hback_porch = { 21, 21, 21 },
- .hsync_len = { 32, 32, 32 },
- .vactive = { 800, 800, 800 },
- .vfront_porch = { 4, 4, 4 },
- .vback_porch = { 8, 8, 8 },
- .vsync_len = { 18, 20, 20 },
-};
-
-static const struct panel_desc auo_b101ean01 = {
- .timings = &auo_b101ean01_timing,
- .num_timings = 1,
- .bpc = 6,
- .size = {
- .width = 217,
- .height = 136,
- },
-};
-
static const struct drm_display_mode auo_b101xtn01_mode = {
.clock = 72000,
.hdisplay = 1366,
@@ -1015,172 +851,6 @@ static const struct panel_desc auo_b101xtn01 = {
},
};
-static const struct drm_display_mode auo_b116xak01_mode = {
- .clock = 69300,
- .hdisplay = 1366,
- .hsync_start = 1366 + 48,
- .hsync_end = 1366 + 48 + 32,
- .htotal = 1366 + 48 + 32 + 10,
- .vdisplay = 768,
- .vsync_start = 768 + 4,
- .vsync_end = 768 + 4 + 6,
- .vtotal = 768 + 4 + 6 + 15,
- .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
-};
-
-static const struct panel_desc auo_b116xak01 = {
- .modes = &auo_b116xak01_mode,
- .num_modes = 1,
- .bpc = 6,
- .size = {
- .width = 256,
- .height = 144,
- },
- .delay = {
- .hpd_absent_delay = 200,
- },
- .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
- .connector_type = DRM_MODE_CONNECTOR_eDP,
-};
-
-static const struct drm_display_mode auo_b116xw03_mode = {
- .clock = 70589,
- .hdisplay = 1366,
- .hsync_start = 1366 + 40,
- .hsync_end = 1366 + 40 + 40,
- .htotal = 1366 + 40 + 40 + 32,
- .vdisplay = 768,
- .vsync_start = 768 + 10,
- .vsync_end = 768 + 10 + 12,
- .vtotal = 768 + 10 + 12 + 6,
- .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
-};
-
-static const struct panel_desc auo_b116xw03 = {
- .modes = &auo_b116xw03_mode,
- .num_modes = 1,
- .bpc = 6,
- .size = {
- .width = 256,
- .height = 144,
- },
- .delay = {
- .enable = 400,
- },
- .bus_flags = DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE,
- .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
- .connector_type = DRM_MODE_CONNECTOR_eDP,
-};
-
-static const struct drm_display_mode auo_b133xtn01_mode = {
- .clock = 69500,
- .hdisplay = 1366,
- .hsync_start = 1366 + 48,
- .hsync_end = 1366 + 48 + 32,
- .htotal = 1366 + 48 + 32 + 20,
- .vdisplay = 768,
- .vsync_start = 768 + 3,
- .vsync_end = 768 + 3 + 6,
- .vtotal = 768 + 3 + 6 + 13,
-};
-
-static const struct panel_desc auo_b133xtn01 = {
- .modes = &auo_b133xtn01_mode,
- .num_modes = 1,
- .bpc = 6,
- .size = {
- .width = 293,
- .height = 165,
- },
-};
-
-static const struct drm_display_mode auo_b133han05_mode = {
- .clock = 142600,
- .hdisplay = 1920,
- .hsync_start = 1920 + 58,
- .hsync_end = 1920 + 58 + 42,
- .htotal = 1920 + 58 + 42 + 60,
- .vdisplay = 1080,
- .vsync_start = 1080 + 3,
- .vsync_end = 1080 + 3 + 5,
- .vtotal = 1080 + 3 + 5 + 54,
-};
-
-static const struct panel_desc auo_b133han05 = {
- .modes = &auo_b133han05_mode,
- .num_modes = 1,
- .bpc = 8,
- .size = {
- .width = 293,
- .height = 165,
- },
- .delay = {
- .prepare = 100,
- .enable = 20,
- .unprepare = 50,
- },
- .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
- .bus_flags = DRM_BUS_FLAG_DATA_MSB_TO_LSB,
- .connector_type = DRM_MODE_CONNECTOR_eDP,
-};
-
-static const struct drm_display_mode auo_b133htn01_mode = {
- .clock = 150660,
- .hdisplay = 1920,
- .hsync_start = 1920 + 172,
- .hsync_end = 1920 + 172 + 80,
- .htotal = 1920 + 172 + 80 + 60,
- .vdisplay = 1080,
- .vsync_start = 1080 + 25,
- .vsync_end = 1080 + 25 + 10,
- .vtotal = 1080 + 25 + 10 + 10,
-};
-
-static const struct panel_desc auo_b133htn01 = {
- .modes = &auo_b133htn01_mode,
- .num_modes = 1,
- .bpc = 6,
- .size = {
- .width = 293,
- .height = 165,
- },
- .delay = {
- .prepare = 105,
- .enable = 20,
- .unprepare = 50,
- },
-};
-
-static const struct drm_display_mode auo_b140han06_mode = {
- .clock = 141000,
- .hdisplay = 1920,
- .hsync_start = 1920 + 16,
- .hsync_end = 1920 + 16 + 16,
- .htotal = 1920 + 16 + 16 + 152,
- .vdisplay = 1080,
- .vsync_start = 1080 + 3,
- .vsync_end = 1080 + 3 + 14,
- .vtotal = 1080 + 3 + 14 + 19,
-};
-
-static const struct panel_desc auo_b140han06 = {
- .modes = &auo_b140han06_mode,
- .num_modes = 1,
- .bpc = 8,
- .size = {
- .width = 309,
- .height = 174,
- },
- .delay = {
- .prepare = 100,
- .enable = 20,
- .unprepare = 50,
- },
- .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
- .bus_flags = DRM_BUS_FLAG_DATA_MSB_TO_LSB,
- .connector_type = DRM_MODE_CONNECTOR_eDP,
-};
-
static const struct display_timing auo_g070vvn01_timings = {
.pixelclock = { 33300000, 34209000, 45000000 },
.hactive = { 800, 800, 800 },
@@ -1524,169 +1194,6 @@ static const struct panel_desc boe_hv070wsa = {
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
-static const struct drm_display_mode boe_nv101wxmn51_modes[] = {
- {
- .clock = 71900,
- .hdisplay = 1280,
- .hsync_start = 1280 + 48,
- .hsync_end = 1280 + 48 + 32,
- .htotal = 1280 + 48 + 32 + 80,
- .vdisplay = 800,
- .vsync_start = 800 + 3,
- .vsync_end = 800 + 3 + 5,
- .vtotal = 800 + 3 + 5 + 24,
- },
- {
- .clock = 57500,
- .hdisplay = 1280,
- .hsync_start = 1280 + 48,
- .hsync_end = 1280 + 48 + 32,
- .htotal = 1280 + 48 + 32 + 80,
- .vdisplay = 800,
- .vsync_start = 800 + 3,
- .vsync_end = 800 + 3 + 5,
- .vtotal = 800 + 3 + 5 + 24,
- },
-};
-
-static const struct panel_desc boe_nv101wxmn51 = {
- .modes = boe_nv101wxmn51_modes,
- .num_modes = ARRAY_SIZE(boe_nv101wxmn51_modes),
- .bpc = 8,
- .size = {
- .width = 217,
- .height = 136,
- },
- .delay = {
- .prepare = 210,
- .enable = 50,
- .unprepare = 160,
- },
-};
-
-static const struct drm_display_mode boe_nv110wtm_n61_modes[] = {
- {
- .clock = 207800,
- .hdisplay = 2160,
- .hsync_start = 2160 + 48,
- .hsync_end = 2160 + 48 + 32,
- .htotal = 2160 + 48 + 32 + 100,
- .vdisplay = 1440,
- .vsync_start = 1440 + 3,
- .vsync_end = 1440 + 3 + 6,
- .vtotal = 1440 + 3 + 6 + 31,
- .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC,
- },
- {
- .clock = 138500,
- .hdisplay = 2160,
- .hsync_start = 2160 + 48,
- .hsync_end = 2160 + 48 + 32,
- .htotal = 2160 + 48 + 32 + 100,
- .vdisplay = 1440,
- .vsync_start = 1440 + 3,
- .vsync_end = 1440 + 3 + 6,
- .vtotal = 1440 + 3 + 6 + 31,
- .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC,
- },
-};
-
-static const struct panel_desc boe_nv110wtm_n61 = {
- .modes = boe_nv110wtm_n61_modes,
- .num_modes = ARRAY_SIZE(boe_nv110wtm_n61_modes),
- .bpc = 8,
- .size = {
- .width = 233,
- .height = 155,
- },
- .delay = {
- .hpd_absent_delay = 200,
- .prepare_to_enable = 80,
- .enable = 50,
- .unprepare = 500,
- },
- .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
- .bus_flags = DRM_BUS_FLAG_DATA_MSB_TO_LSB,
- .connector_type = DRM_MODE_CONNECTOR_eDP,
-};
-
-/* Also used for boe_nv133fhm_n62 */
-static const struct drm_display_mode boe_nv133fhm_n61_modes = {
- .clock = 147840,
- .hdisplay = 1920,
- .hsync_start = 1920 + 48,
- .hsync_end = 1920 + 48 + 32,
- .htotal = 1920 + 48 + 32 + 200,
- .vdisplay = 1080,
- .vsync_start = 1080 + 3,
- .vsync_end = 1080 + 3 + 6,
- .vtotal = 1080 + 3 + 6 + 31,
- .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC,
-};
-
-/* Also used for boe_nv133fhm_n62 */
-static const struct panel_desc boe_nv133fhm_n61 = {
- .modes = &boe_nv133fhm_n61_modes,
- .num_modes = 1,
- .bpc = 6,
- .size = {
- .width = 294,
- .height = 165,
- },
- .delay = {
- /*
- * When power is first given to the panel there's a short
- * spike on the HPD line. It was explained that this spike
- * was until the TCON data download was complete. On
- * one system this was measured at 8 ms. We'll put 15 ms
- * in the prepare delay just to be safe and take it away
- * from the hpd_absent_delay (which would otherwise be 200 ms)
- * to handle this. That means:
- * - If HPD isn't hooked up you still have 200 ms delay.
- * - If HPD is hooked up we won't try to look at it for the
- * first 15 ms.
- */
- .prepare = 15,
- .hpd_absent_delay = 185,
-
- .unprepare = 500,
- },
- .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
- .bus_flags = DRM_BUS_FLAG_DATA_MSB_TO_LSB,
- .connector_type = DRM_MODE_CONNECTOR_eDP,
-};
-
-static const struct drm_display_mode boe_nv140fhmn49_modes[] = {
- {
- .clock = 148500,
- .hdisplay = 1920,
- .hsync_start = 1920 + 48,
- .hsync_end = 1920 + 48 + 32,
- .htotal = 2200,
- .vdisplay = 1080,
- .vsync_start = 1080 + 3,
- .vsync_end = 1080 + 3 + 5,
- .vtotal = 1125,
- },
-};
-
-static const struct panel_desc boe_nv140fhmn49 = {
- .modes = boe_nv140fhmn49_modes,
- .num_modes = ARRAY_SIZE(boe_nv140fhmn49_modes),
- .bpc = 6,
- .size = {
- .width = 309,
- .height = 174,
- },
- .delay = {
- .prepare = 210,
- .enable = 50,
- .unprepare = 160,
- },
- .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
- .connector_type = DRM_MODE_CONNECTOR_eDP,
-};
-
static const struct drm_display_mode cdtech_s043wq26h_ct7_mode = {
.clock = 9000,
.hdisplay = 480,
@@ -2609,96 +2116,6 @@ static const struct panel_desc innolux_g121x1_l03 = {
},
};
-static const struct drm_display_mode innolux_n116bca_ea1_mode = {
- .clock = 76420,
- .hdisplay = 1366,
- .hsync_start = 1366 + 136,
- .hsync_end = 1366 + 136 + 30,
- .htotal = 1366 + 136 + 30 + 60,
- .vdisplay = 768,
- .vsync_start = 768 + 8,
- .vsync_end = 768 + 8 + 12,
- .vtotal = 768 + 8 + 12 + 12,
- .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
-};
-
-static const struct panel_desc innolux_n116bca_ea1 = {
- .modes = &innolux_n116bca_ea1_mode,
- .num_modes = 1,
- .bpc = 6,
- .size = {
- .width = 256,
- .height = 144,
- },
- .delay = {
- .hpd_absent_delay = 200,
- .prepare_to_enable = 80,
- .unprepare = 500,
- },
- .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
- .connector_type = DRM_MODE_CONNECTOR_eDP,
-};
-
-/*
- * Datasheet specifies that at 60 Hz refresh rate:
- * - total horizontal time: { 1506, 1592, 1716 }
- * - total vertical time: { 788, 800, 868 }
- *
- * ...but doesn't go into exactly how that should be split into a front
- * porch, back porch, or sync length. For now we'll leave a single setting
- * here which allows a bit of tweaking of the pixel clock at the expense of
- * refresh rate.
- */
-static const struct display_timing innolux_n116bge_timing = {
- .pixelclock = { 72600000, 76420000, 80240000 },
- .hactive = { 1366, 1366, 1366 },
- .hfront_porch = { 136, 136, 136 },
- .hback_porch = { 60, 60, 60 },
- .hsync_len = { 30, 30, 30 },
- .vactive = { 768, 768, 768 },
- .vfront_porch = { 8, 8, 8 },
- .vback_porch = { 12, 12, 12 },
- .vsync_len = { 12, 12, 12 },
- .flags = DISPLAY_FLAGS_VSYNC_LOW | DISPLAY_FLAGS_HSYNC_LOW,
-};
-
-static const struct panel_desc innolux_n116bge = {
- .timings = &innolux_n116bge_timing,
- .num_timings = 1,
- .bpc = 6,
- .size = {
- .width = 256,
- .height = 144,
- },
- .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
- .connector_type = DRM_MODE_CONNECTOR_eDP,
-};
-
-static const struct drm_display_mode innolux_n125hce_gn1_mode = {
- .clock = 162000,
- .hdisplay = 1920,
- .hsync_start = 1920 + 40,
- .hsync_end = 1920 + 40 + 40,
- .htotal = 1920 + 40 + 40 + 80,
- .vdisplay = 1080,
- .vsync_start = 1080 + 4,
- .vsync_end = 1080 + 4 + 4,
- .vtotal = 1080 + 4 + 4 + 24,
-};
-
-static const struct panel_desc innolux_n125hce_gn1 = {
- .modes = &innolux_n125hce_gn1_mode,
- .num_modes = 1,
- .bpc = 8,
- .size = {
- .width = 276,
- .height = 155,
- },
- .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
- .bus_flags = DRM_BUS_FLAG_DATA_MSB_TO_LSB,
- .connector_type = DRM_MODE_CONNECTOR_eDP,
-};
-
static const struct drm_display_mode innolux_n156bge_l21_mode = {
.clock = 69300,
.hdisplay = 1366,
@@ -2724,33 +2141,6 @@ static const struct panel_desc innolux_n156bge_l21 = {
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
-static const struct drm_display_mode innolux_p120zdg_bf1_mode = {
- .clock = 206016,
- .hdisplay = 2160,
- .hsync_start = 2160 + 48,
- .hsync_end = 2160 + 48 + 32,
- .htotal = 2160 + 48 + 32 + 80,
- .vdisplay = 1440,
- .vsync_start = 1440 + 3,
- .vsync_end = 1440 + 3 + 10,
- .vtotal = 1440 + 3 + 10 + 27,
- .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
-};
-
-static const struct panel_desc innolux_p120zdg_bf1 = {
- .modes = &innolux_p120zdg_bf1_mode,
- .num_modes = 1,
- .bpc = 8,
- .size = {
- .width = 254,
- .height = 169,
- },
- .delay = {
- .hpd_absent_delay = 200,
- .unprepare = 500,
- },
-};
-
static const struct drm_display_mode innolux_zj070na_01p_mode = {
.clock = 51501,
.hdisplay = 1024,
@@ -2773,64 +2163,6 @@ static const struct panel_desc innolux_zj070na_01p = {
},
};
-static const struct drm_display_mode ivo_m133nwf4_r0_mode = {
- .clock = 138778,
- .hdisplay = 1920,
- .hsync_start = 1920 + 24,
- .hsync_end = 1920 + 24 + 48,
- .htotal = 1920 + 24 + 48 + 88,
- .vdisplay = 1080,
- .vsync_start = 1080 + 3,
- .vsync_end = 1080 + 3 + 12,
- .vtotal = 1080 + 3 + 12 + 17,
- .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
-};
-
-static const struct panel_desc ivo_m133nwf4_r0 = {
- .modes = &ivo_m133nwf4_r0_mode,
- .num_modes = 1,
- .bpc = 8,
- .size = {
- .width = 294,
- .height = 165,
- },
- .delay = {
- .hpd_absent_delay = 200,
- .unprepare = 500,
- },
- .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
- .bus_flags = DRM_BUS_FLAG_DATA_MSB_TO_LSB,
- .connector_type = DRM_MODE_CONNECTOR_eDP,
-};
-
-static const struct drm_display_mode kingdisplay_kd116n21_30nv_a010_mode = {
- .clock = 81000,
- .hdisplay = 1366,
- .hsync_start = 1366 + 40,
- .hsync_end = 1366 + 40 + 32,
- .htotal = 1366 + 40 + 32 + 62,
- .vdisplay = 768,
- .vsync_start = 768 + 5,
- .vsync_end = 768 + 5 + 5,
- .vtotal = 768 + 5 + 5 + 122,
- .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
-};
-
-static const struct panel_desc kingdisplay_kd116n21_30nv_a010 = {
- .modes = &kingdisplay_kd116n21_30nv_a010_mode,
- .num_modes = 1,
- .bpc = 6,
- .size = {
- .width = 256,
- .height = 144,
- },
- .delay = {
- .hpd_absent_delay = 200,
- },
- .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
- .connector_type = DRM_MODE_CONNECTOR_eDP,
-};
-
static const struct display_timing koe_tx14d24vm1bpa_timing = {
.pixelclock = { 5580000, 5850000, 6200000 },
.hactive = { 320, 320, 320 },
@@ -2982,94 +2314,6 @@ static const struct panel_desc lg_lb070wv8 = {
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
-static const struct drm_display_mode lg_lp079qx1_sp0v_mode = {
- .clock = 200000,
- .hdisplay = 1536,
- .hsync_start = 1536 + 12,
- .hsync_end = 1536 + 12 + 16,
- .htotal = 1536 + 12 + 16 + 48,
- .vdisplay = 2048,
- .vsync_start = 2048 + 8,
- .vsync_end = 2048 + 8 + 4,
- .vtotal = 2048 + 8 + 4 + 8,
- .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
-};
-
-static const struct panel_desc lg_lp079qx1_sp0v = {
- .modes = &lg_lp079qx1_sp0v_mode,
- .num_modes = 1,
- .size = {
- .width = 129,
- .height = 171,
- },
-};
-
-static const struct drm_display_mode lg_lp097qx1_spa1_mode = {
- .clock = 205210,
- .hdisplay = 2048,
- .hsync_start = 2048 + 150,
- .hsync_end = 2048 + 150 + 5,
- .htotal = 2048 + 150 + 5 + 5,
- .vdisplay = 1536,
- .vsync_start = 1536 + 3,
- .vsync_end = 1536 + 3 + 1,
- .vtotal = 1536 + 3 + 1 + 9,
-};
-
-static const struct panel_desc lg_lp097qx1_spa1 = {
- .modes = &lg_lp097qx1_spa1_mode,
- .num_modes = 1,
- .size = {
- .width = 208,
- .height = 147,
- },
-};
-
-static const struct drm_display_mode lg_lp120up1_mode = {
- .clock = 162300,
- .hdisplay = 1920,
- .hsync_start = 1920 + 40,
- .hsync_end = 1920 + 40 + 40,
- .htotal = 1920 + 40 + 40+ 80,
- .vdisplay = 1280,
- .vsync_start = 1280 + 4,
- .vsync_end = 1280 + 4 + 4,
- .vtotal = 1280 + 4 + 4 + 12,
-};
-
-static const struct panel_desc lg_lp120up1 = {
- .modes = &lg_lp120up1_mode,
- .num_modes = 1,
- .bpc = 8,
- .size = {
- .width = 267,
- .height = 183,
- },
- .connector_type = DRM_MODE_CONNECTOR_eDP,
-};
-
-static const struct drm_display_mode lg_lp129qe_mode = {
- .clock = 285250,
- .hdisplay = 2560,
- .hsync_start = 2560 + 48,
- .hsync_end = 2560 + 48 + 32,
- .htotal = 2560 + 48 + 32 + 80,
- .vdisplay = 1700,
- .vsync_start = 1700 + 3,
- .vsync_end = 1700 + 3 + 10,
- .vtotal = 1700 + 3 + 10 + 36,
-};
-
-static const struct panel_desc lg_lp129qe = {
- .modes = &lg_lp129qe_mode,
- .num_modes = 1,
- .bpc = 8,
- .size = {
- .width = 272,
- .height = 181,
- },
-};
-
static const struct display_timing logictechno_lt161010_2nh_timing = {
.pixelclock = { 26400000, 33300000, 46800000 },
.hactive = { 800, 800, 800 },
@@ -3158,19 +2402,6 @@ static const struct panel_desc logictechno_lttd800480070_l6wh_rt = {
.connector_type = DRM_MODE_CONNECTOR_DPI,
};
-static const struct drm_display_mode mitsubishi_aa070mc01_mode = {
- .clock = 30400,
- .hdisplay = 800,
- .hsync_start = 800 + 0,
- .hsync_end = 800 + 1,
- .htotal = 800 + 0 + 1 + 160,
- .vdisplay = 480,
- .vsync_start = 480 + 0,
- .vsync_end = 480 + 48 + 1,
- .vtotal = 480 + 48 + 1 + 0,
- .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
-};
-
static const struct drm_display_mode logicpd_type_28_mode = {
.clock = 9107,
.hdisplay = 480,
@@ -3205,6 +2436,19 @@ static const struct panel_desc logicpd_type_28 = {
.connector_type = DRM_MODE_CONNECTOR_DPI,
};
+static const struct drm_display_mode mitsubishi_aa070mc01_mode = {
+ .clock = 30400,
+ .hdisplay = 800,
+ .hsync_start = 800 + 0,
+ .hsync_end = 800 + 1,
+ .htotal = 800 + 0 + 1 + 160,
+ .vdisplay = 480,
+ .vsync_start = 480 + 0,
+ .vsync_end = 480 + 48 + 1,
+ .vtotal = 480 + 48 + 1 + 0,
+ .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
+};
+
static const struct panel_desc mitsubishi_aa070mc01 = {
.modes = &mitsubishi_aa070mc01_mode,
.num_modes = 1,
@@ -3330,49 +2574,6 @@ static const struct panel_desc netron_dy_e231732 = {
.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
};
-static const struct drm_display_mode neweast_wjfh116008a_modes[] = {
- {
- .clock = 138500,
- .hdisplay = 1920,
- .hsync_start = 1920 + 48,
- .hsync_end = 1920 + 48 + 32,
- .htotal = 1920 + 48 + 32 + 80,
- .vdisplay = 1080,
- .vsync_start = 1080 + 3,
- .vsync_end = 1080 + 3 + 5,
- .vtotal = 1080 + 3 + 5 + 23,
- .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
- }, {
- .clock = 110920,
- .hdisplay = 1920,
- .hsync_start = 1920 + 48,
- .hsync_end = 1920 + 48 + 32,
- .htotal = 1920 + 48 + 32 + 80,
- .vdisplay = 1080,
- .vsync_start = 1080 + 3,
- .vsync_end = 1080 + 3 + 5,
- .vtotal = 1080 + 3 + 5 + 23,
- .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
- }
-};
-
-static const struct panel_desc neweast_wjfh116008a = {
- .modes = neweast_wjfh116008a_modes,
- .num_modes = 2,
- .bpc = 6,
- .size = {
- .width = 260,
- .height = 150,
- },
- .delay = {
- .prepare = 110,
- .enable = 20,
- .unprepare = 500,
- },
- .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
- .connector_type = DRM_MODE_CONNECTOR_eDP,
-};
-
static const struct drm_display_mode newhaven_nhd_43_480272ef_atxl_mode = {
.clock = 9000,
.hdisplay = 480,
@@ -3783,27 +2984,6 @@ static const struct panel_desc rocktech_rk101ii01d_ct = {
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
-static const struct drm_display_mode samsung_lsn122dl01_c01_mode = {
- .clock = 271560,
- .hdisplay = 2560,
- .hsync_start = 2560 + 48,
- .hsync_end = 2560 + 48 + 32,
- .htotal = 2560 + 48 + 32 + 80,
- .vdisplay = 1600,
- .vsync_start = 1600 + 2,
- .vsync_end = 1600 + 2 + 5,
- .vtotal = 1600 + 2 + 5 + 57,
-};
-
-static const struct panel_desc samsung_lsn122dl01_c01 = {
- .modes = &samsung_lsn122dl01_c01_mode,
- .num_modes = 1,
- .size = {
- .width = 263,
- .height = 164,
- },
-};
-
static const struct drm_display_mode samsung_ltn101nt05_mode = {
.clock = 54030,
.hdisplay = 1024,
@@ -3829,28 +3009,6 @@ static const struct panel_desc samsung_ltn101nt05 = {
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
-static const struct drm_display_mode samsung_ltn140at29_301_mode = {
- .clock = 76300,
- .hdisplay = 1366,
- .hsync_start = 1366 + 64,
- .hsync_end = 1366 + 64 + 48,
- .htotal = 1366 + 64 + 48 + 128,
- .vdisplay = 768,
- .vsync_start = 768 + 2,
- .vsync_end = 768 + 2 + 5,
- .vtotal = 768 + 2 + 5 + 17,
-};
-
-static const struct panel_desc samsung_ltn140at29_301 = {
- .modes = &samsung_ltn140at29_301_mode,
- .num_modes = 1,
- .bpc = 6,
- .size = {
- .width = 320,
- .height = 187,
- },
-};
-
static const struct display_timing satoz_sat050at40h12r2_timing = {
.pixelclock = {33300000, 33300000, 50000000},
.hactive = {800, 800, 800},
@@ -3875,31 +3033,6 @@ static const struct panel_desc satoz_sat050at40h12r2 = {
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
-static const struct drm_display_mode sharp_ld_d5116z01b_mode = {
- .clock = 168480,
- .hdisplay = 1920,
- .hsync_start = 1920 + 48,
- .hsync_end = 1920 + 48 + 32,
- .htotal = 1920 + 48 + 32 + 80,
- .vdisplay = 1280,
- .vsync_start = 1280 + 3,
- .vsync_end = 1280 + 3 + 10,
- .vtotal = 1280 + 3 + 10 + 57,
- .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
-};
-
-static const struct panel_desc sharp_ld_d5116z01b = {
- .modes = &sharp_ld_d5116z01b_mode,
- .num_modes = 1,
- .bpc = 8,
- .size = {
- .width = 260,
- .height = 120,
- },
- .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
- .bus_flags = DRM_BUS_FLAG_DATA_MSB_TO_LSB,
-};
-
static const struct drm_display_mode sharp_lq070y3dg3b_mode = {
.clock = 33260,
.hdisplay = 800,
@@ -3974,34 +3107,6 @@ static const struct panel_desc sharp_lq101k1ly04 = {
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
-static const struct display_timing sharp_lq123p1jx31_timing = {
- .pixelclock = { 252750000, 252750000, 266604720 },
- .hactive = { 2400, 2400, 2400 },
- .hfront_porch = { 48, 48, 48 },
- .hback_porch = { 80, 80, 84 },
- .hsync_len = { 32, 32, 32 },
- .vactive = { 1600, 1600, 1600 },
- .vfront_porch = { 3, 3, 3 },
- .vback_porch = { 33, 33, 120 },
- .vsync_len = { 10, 10, 10 },
- .flags = DISPLAY_FLAGS_VSYNC_LOW | DISPLAY_FLAGS_HSYNC_LOW,
-};
-
-static const struct panel_desc sharp_lq123p1jx31 = {
- .timings = &sharp_lq123p1jx31_timing,
- .num_timings = 1,
- .bpc = 8,
- .size = {
- .width = 259,
- .height = 173,
- },
- .delay = {
- .prepare = 110,
- .enable = 50,
- .unprepare = 550,
- },
-};
-
static const struct drm_display_mode sharp_ls020b1dd01d_modes[] = {
{ /* 50 Hz */
.clock = 3000,
@@ -4090,33 +3195,6 @@ static const struct panel_desc starry_kr070pe2t = {
.connector_type = DRM_MODE_CONNECTOR_DPI,
};
-static const struct drm_display_mode starry_kr122ea0sra_mode = {
- .clock = 147000,
- .hdisplay = 1920,
- .hsync_start = 1920 + 16,
- .hsync_end = 1920 + 16 + 16,
- .htotal = 1920 + 16 + 16 + 32,
- .vdisplay = 1200,
- .vsync_start = 1200 + 15,
- .vsync_end = 1200 + 15 + 2,
- .vtotal = 1200 + 15 + 2 + 18,
- .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
-};
-
-static const struct panel_desc starry_kr122ea0sra = {
- .modes = &starry_kr122ea0sra_mode,
- .num_modes = 1,
- .size = {
- .width = 263,
- .height = 164,
- },
- .delay = {
- .prepare = 10 + 200,
- .enable = 50,
- .unprepare = 10 + 500,
- },
-};
-
static const struct drm_display_mode tfc_s9700rtwv43tr_01b_mode = {
.clock = 30000,
.hdisplay = 800,
@@ -4484,30 +3562,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "auo,b101aw03",
.data = &auo_b101aw03,
}, {
- .compatible = "auo,b101ean01",
- .data = &auo_b101ean01,
- }, {
.compatible = "auo,b101xtn01",
.data = &auo_b101xtn01,
}, {
- .compatible = "auo,b116xa01",
- .data = &auo_b116xak01,
- }, {
- .compatible = "auo,b116xw03",
- .data = &auo_b116xw03,
- }, {
- .compatible = "auo,b133han05",
- .data = &auo_b133han05,
- }, {
- .compatible = "auo,b133htn01",
- .data = &auo_b133htn01,
- }, {
- .compatible = "auo,b140han06",
- .data = &auo_b140han06,
- }, {
- .compatible = "auo,b133xtn01",
- .data = &auo_b133xtn01,
- }, {
.compatible = "auo,g070vvn01",
.data = &auo_g070vvn01,
}, {
@@ -4547,21 +3604,6 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "boe,hv070wsa-100",
.data = &boe_hv070wsa
}, {
- .compatible = "boe,nv101wxmn51",
- .data = &boe_nv101wxmn51,
- }, {
- .compatible = "boe,nv110wtm-n61",
- .data = &boe_nv110wtm_n61,
- }, {
- .compatible = "boe,nv133fhm-n61",
- .data = &boe_nv133fhm_n61,
- }, {
- .compatible = "boe,nv133fhm-n62",
- .data = &boe_nv133fhm_n61,
- }, {
- .compatible = "boe,nv140fhmn49",
- .data = &boe_nv140fhmn49,
- }, {
.compatible = "cdtech,s043wq26h-ct7",
.data = &cdtech_s043wq26h_ct7,
}, {
@@ -4673,30 +3715,12 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "innolux,g121x1-l03",
.data = &innolux_g121x1_l03,
}, {
- .compatible = "innolux,n116bca-ea1",
- .data = &innolux_n116bca_ea1,
- }, {
- .compatible = "innolux,n116bge",
- .data = &innolux_n116bge,
- }, {
- .compatible = "innolux,n125hce-gn1",
- .data = &innolux_n125hce_gn1,
- }, {
.compatible = "innolux,n156bge-l21",
.data = &innolux_n156bge_l21,
}, {
- .compatible = "innolux,p120zdg-bf1",
- .data = &innolux_p120zdg_bf1,
- }, {
.compatible = "innolux,zj070na-01p",
.data = &innolux_zj070na_01p,
}, {
- .compatible = "ivo,m133nwf4-r0",
- .data = &ivo_m133nwf4_r0,
- }, {
- .compatible = "kingdisplay,kd116n21-30nv-a010",
- .data = &kingdisplay_kd116n21_30nv_a010,
- }, {
.compatible = "koe,tx14d24vm1bpa",
.data = &koe_tx14d24vm1bpa,
}, {
@@ -4715,18 +3739,6 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "lg,lb070wv8",
.data = &lg_lb070wv8,
}, {
- .compatible = "lg,lp079qx1-sp0v",
- .data = &lg_lp079qx1_sp0v,
- }, {
- .compatible = "lg,lp097qx1-spa1",
- .data = &lg_lp097qx1_spa1,
- }, {
- .compatible = "lg,lp120up1",
- .data = &lg_lp120up1,
- }, {
- .compatible = "lg,lp129qe",
- .data = &lg_lp129qe,
- }, {
.compatible = "logicpd,type28",
.data = &logicpd_type_28,
}, {
@@ -4757,9 +3769,6 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "netron-dy,e231732",
.data = &netron_dy_e231732,
}, {
- .compatible = "neweast,wjfh116008a",
- .data = &neweast_wjfh116008a,
- }, {
.compatible = "newhaven,nhd-4.3-480272ef-atxl",
.data = &newhaven_nhd_43_480272ef_atxl,
}, {
@@ -4808,21 +3817,12 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "rocktech,rk101ii01d-ct",
.data = &rocktech_rk101ii01d_ct,
}, {
- .compatible = "samsung,lsn122dl01-c01",
- .data = &samsung_lsn122dl01_c01,
- }, {
.compatible = "samsung,ltn101nt05",
.data = &samsung_ltn101nt05,
}, {
- .compatible = "samsung,ltn140at29-301",
- .data = &samsung_ltn140at29_301,
- }, {
.compatible = "satoz,sat050at40h12r2",
.data = &satoz_sat050at40h12r2,
}, {
- .compatible = "sharp,ld-d5116z01b",
- .data = &sharp_ld_d5116z01b,
- }, {
.compatible = "sharp,lq035q7db03",
.data = &sharp_lq035q7db03,
}, {
@@ -4832,9 +3832,6 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "sharp,lq101k1ly04",
.data = &sharp_lq101k1ly04,
}, {
- .compatible = "sharp,lq123p1jx31",
- .data = &sharp_lq123p1jx31,
- }, {
.compatible = "sharp,ls020b1dd01d",
.data = &sharp_ls020b1dd01d,
}, {
@@ -4844,9 +3841,6 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "starry,kr070pe2t",
.data = &starry_kr070pe2t,
}, {
- .compatible = "starry,kr122ea0sra",
- .data = &starry_kr122ea0sra,
- }, {
.compatible = "tfc,s9700rtwv43tr-01b",
.data = &tfc_s9700rtwv43tr_01b,
}, {
@@ -4918,7 +3912,7 @@ static int panel_simple_platform_probe(struct platform_device *pdev)
if (!id)
return -ENODEV;
- return panel_simple_probe(&pdev->dev, id->data, NULL);
+ return panel_simple_probe(&pdev->dev, id->data);
}
static int panel_simple_platform_remove(struct platform_device *pdev)
@@ -5198,7 +4192,7 @@ static int panel_simple_dsi_probe(struct mipi_dsi_device *dsi)
desc = id->data;
- err = panel_simple_probe(&dsi->dev, &desc->desc, NULL);
+ err = panel_simple_probe(&dsi->dev, &desc->desc);
if (err < 0)
return err;
@@ -5243,38 +4237,6 @@ static struct mipi_dsi_driver panel_simple_dsi_driver = {
.shutdown = panel_simple_dsi_shutdown,
};
-static int panel_simple_dp_aux_ep_probe(struct dp_aux_ep_device *aux_ep)
-{
- const struct of_device_id *id;
-
- id = of_match_node(platform_of_match, aux_ep->dev.of_node);
- if (!id)
- return -ENODEV;
-
- return panel_simple_probe(&aux_ep->dev, id->data, aux_ep->aux);
-}
-
-static void panel_simple_dp_aux_ep_remove(struct dp_aux_ep_device *aux_ep)
-{
- panel_simple_remove(&aux_ep->dev);
-}
-
-static void panel_simple_dp_aux_ep_shutdown(struct dp_aux_ep_device *aux_ep)
-{
- panel_simple_shutdown(&aux_ep->dev);
-}
-
-static struct dp_aux_ep_driver panel_simple_dp_aux_ep_driver = {
- .driver = {
- .name = "panel-simple-dp-aux",
- .of_match_table = platform_of_match, /* Same as platform one! */
- .pm = &panel_simple_pm_ops,
- },
- .probe = panel_simple_dp_aux_ep_probe,
- .remove = panel_simple_dp_aux_ep_remove,
- .shutdown = panel_simple_dp_aux_ep_shutdown,
-};
-
static int __init panel_simple_init(void)
{
int err;
@@ -5283,21 +4245,14 @@ static int __init panel_simple_init(void)
if (err < 0)
return err;
- err = dp_aux_dp_driver_register(&panel_simple_dp_aux_ep_driver);
- if (err < 0)
- goto err_did_platform_register;
-
if (IS_ENABLED(CONFIG_DRM_MIPI_DSI)) {
err = mipi_dsi_driver_register(&panel_simple_dsi_driver);
if (err < 0)
- goto err_did_aux_ep_register;
+ goto err_did_platform_register;
}
return 0;
-err_did_aux_ep_register:
- dp_aux_dp_driver_unregister(&panel_simple_dp_aux_ep_driver);
-
err_did_platform_register:
platform_driver_unregister(&panel_simple_platform_driver);
@@ -5310,7 +4265,6 @@ static void __exit panel_simple_exit(void)
if (IS_ENABLED(CONFIG_DRM_MIPI_DSI))
mipi_dsi_driver_unregister(&panel_simple_dsi_driver);
- dp_aux_dp_driver_unregister(&panel_simple_dp_aux_ep_driver);
platform_driver_unregister(&panel_simple_platform_driver);
}
module_exit(panel_simple_exit);
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c b/drivers/gpu/drm/panfrost/panfrost_device.c
index bd9b7be63b0f..7f51a4682ccb 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.c
+++ b/drivers/gpu/drm/panfrost/panfrost_device.c
@@ -198,7 +198,6 @@ err:
int panfrost_device_init(struct panfrost_device *pfdev)
{
int err;
- struct resource *res;
mutex_init(&pfdev->sched_lock);
INIT_LIST_HEAD(&pfdev->scheduled_jobs);
@@ -236,8 +235,7 @@ int panfrost_device_init(struct panfrost_device *pfdev)
if (err)
goto out_reset;
- res = platform_get_resource(pfdev->pdev, IORESOURCE_MEM, 0);
- pfdev->iomem = devm_ioremap_resource(pfdev->dev, res);
+ pfdev->iomem = devm_platform_ioremap_resource(pfdev->pdev, 0);
if (IS_ERR(pfdev->iomem)) {
err = PTR_ERR(pfdev->iomem);
goto out_pm_domain;
@@ -400,8 +398,7 @@ void panfrost_device_reset(struct panfrost_device *pfdev)
#ifdef CONFIG_PM
int panfrost_device_resume(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct panfrost_device *pfdev = platform_get_drvdata(pdev);
+ struct panfrost_device *pfdev = dev_get_drvdata(dev);
panfrost_device_reset(pfdev);
panfrost_devfreq_resume(pfdev);
@@ -411,8 +408,7 @@ int panfrost_device_resume(struct device *dev)
int panfrost_device_suspend(struct device *dev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct panfrost_device *pfdev = platform_get_drvdata(pdev);
+ struct panfrost_device *pfdev = dev_get_drvdata(dev);
if (!panfrost_job_is_idle(pfdev))
return -EBUSY;
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index 1ffaef5ec5ff..82ad9a67f251 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -218,7 +218,7 @@ panfrost_copy_in_sync(struct drm_device *dev,
if (ret)
goto fail;
- ret = drm_gem_fence_array_add(&job->deps, fence);
+ ret = drm_sched_job_add_dependency(&job->base, fence);
if (ret)
goto fail;
@@ -236,7 +236,7 @@ static int panfrost_ioctl_submit(struct drm_device *dev, void *data,
struct drm_panfrost_submit *args = data;
struct drm_syncobj *sync_out = NULL;
struct panfrost_job *job;
- int ret = 0;
+ int ret = 0, slot;
if (!args->jc)
return -EINVAL;
@@ -253,38 +253,47 @@ static int panfrost_ioctl_submit(struct drm_device *dev, void *data,
job = kzalloc(sizeof(*job), GFP_KERNEL);
if (!job) {
ret = -ENOMEM;
- goto fail_out_sync;
+ goto out_put_syncout;
}
kref_init(&job->refcount);
- xa_init_flags(&job->deps, XA_FLAGS_ALLOC);
-
job->pfdev = pfdev;
job->jc = args->jc;
job->requirements = args->requirements;
job->flush_id = panfrost_gpu_get_latest_flush_id(pfdev);
job->file_priv = file->driver_priv;
+ slot = panfrost_job_get_slot(job);
+
+ ret = drm_sched_job_init(&job->base,
+ &job->file_priv->sched_entity[slot],
+ NULL);
+ if (ret)
+ goto out_put_job;
+
ret = panfrost_copy_in_sync(dev, file, args, job);
if (ret)
- goto fail_job;
+ goto out_cleanup_job;
ret = panfrost_lookup_bos(dev, file, args, job);
if (ret)
- goto fail_job;
+ goto out_cleanup_job;
ret = panfrost_job_push(job);
if (ret)
- goto fail_job;
+ goto out_cleanup_job;
/* Update the return sync object for the job */
if (sync_out)
drm_syncobj_replace_fence(sync_out, job->render_done_fence);
-fail_job:
+out_cleanup_job:
+ if (ret)
+ drm_sched_job_cleanup(&job->base);
+out_put_job:
panfrost_job_put(job);
-fail_out_sync:
+out_put_syncout:
if (sync_out)
drm_syncobj_put(sync_out);
@@ -629,8 +638,8 @@ static const struct panfrost_compatible amlogic_data = {
.vendor_quirk = panfrost_gpu_amlogic_quirk,
};
-const char * const mediatek_mt8183_supplies[] = { "mali", "sram" };
-const char * const mediatek_mt8183_pm_domains[] = { "core0", "core1", "core2" };
+static const char * const mediatek_mt8183_supplies[] = { "mali", "sram" };
+static const char * const mediatek_mt8183_pm_domains[] = { "core0", "core1", "core2" };
static const struct panfrost_compatible mediatek_mt8183_data = {
.num_supplies = ARRAY_SIZE(mediatek_mt8183_supplies),
.supply_names = mediatek_mt8183_supplies,
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
index 71a72fb50e6b..908d79520853 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -102,7 +102,7 @@ static struct dma_fence *panfrost_fence_create(struct panfrost_device *pfdev, in
return &fence->base;
}
-static int panfrost_job_get_slot(struct panfrost_job *job)
+int panfrost_job_get_slot(struct panfrost_job *job)
{
/* JS0: fragment jobs.
* JS1: vertex/tiler jobs
@@ -137,8 +137,8 @@ static void panfrost_job_write_affinity(struct panfrost_device *pfdev,
*/
affinity = pfdev->features.shader_present;
- job_write(pfdev, JS_AFFINITY_NEXT_LO(js), affinity & 0xFFFFFFFF);
- job_write(pfdev, JS_AFFINITY_NEXT_HI(js), affinity >> 32);
+ job_write(pfdev, JS_AFFINITY_NEXT_LO(js), lower_32_bits(affinity));
+ job_write(pfdev, JS_AFFINITY_NEXT_HI(js), upper_32_bits(affinity));
}
static u32
@@ -203,8 +203,8 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
cfg = panfrost_mmu_as_get(pfdev, job->file_priv->mmu);
- job_write(pfdev, JS_HEAD_NEXT_LO(js), jc_head & 0xFFFFFFFF);
- job_write(pfdev, JS_HEAD_NEXT_HI(js), jc_head >> 32);
+ job_write(pfdev, JS_HEAD_NEXT_LO(js), lower_32_bits(jc_head));
+ job_write(pfdev, JS_HEAD_NEXT_HI(js), upper_32_bits(jc_head));
panfrost_job_write_affinity(pfdev, job->requirements, js);
@@ -242,13 +242,14 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js)
static int panfrost_acquire_object_fences(struct drm_gem_object **bos,
int bo_count,
- struct xarray *deps)
+ struct drm_sched_job *job)
{
int i, ret;
for (i = 0; i < bo_count; i++) {
/* panfrost always uses write mode in its current uapi */
- ret = drm_gem_fence_array_add_implicit(deps, bos[i], true);
+ ret = drm_sched_job_add_implicit_dependencies(job, bos[i],
+ true);
if (ret)
return ret;
}
@@ -269,29 +270,21 @@ static void panfrost_attach_object_fences(struct drm_gem_object **bos,
int panfrost_job_push(struct panfrost_job *job)
{
struct panfrost_device *pfdev = job->pfdev;
- int slot = panfrost_job_get_slot(job);
- struct drm_sched_entity *entity = &job->file_priv->sched_entity[slot];
struct ww_acquire_ctx acquire_ctx;
int ret = 0;
-
ret = drm_gem_lock_reservations(job->bos, job->bo_count,
&acquire_ctx);
if (ret)
return ret;
mutex_lock(&pfdev->sched_lock);
-
- ret = drm_sched_job_init(&job->base, entity, NULL);
- if (ret) {
- mutex_unlock(&pfdev->sched_lock);
- goto unlock;
- }
+ drm_sched_job_arm(&job->base);
job->render_done_fence = dma_fence_get(&job->base.s_fence->finished);
ret = panfrost_acquire_object_fences(job->bos, job->bo_count,
- &job->deps);
+ &job->base);
if (ret) {
mutex_unlock(&pfdev->sched_lock);
goto unlock;
@@ -299,7 +292,7 @@ int panfrost_job_push(struct panfrost_job *job)
kref_get(&job->refcount); /* put by scheduler job completion */
- drm_sched_entity_push_job(&job->base, entity);
+ drm_sched_entity_push_job(&job->base);
mutex_unlock(&pfdev->sched_lock);
@@ -316,15 +309,8 @@ static void panfrost_job_cleanup(struct kref *ref)
{
struct panfrost_job *job = container_of(ref, struct panfrost_job,
refcount);
- struct dma_fence *fence;
- unsigned long index;
unsigned int i;
- xa_for_each(&job->deps, index, fence) {
- dma_fence_put(fence);
- }
- xa_destroy(&job->deps);
-
dma_fence_put(job->done_fence);
dma_fence_put(job->render_done_fence);
@@ -363,17 +349,6 @@ static void panfrost_job_free(struct drm_sched_job *sched_job)
panfrost_job_put(job);
}
-static struct dma_fence *panfrost_job_dependency(struct drm_sched_job *sched_job,
- struct drm_sched_entity *s_entity)
-{
- struct panfrost_job *job = to_panfrost_job(sched_job);
-
- if (!xa_empty(&job->deps))
- return xa_erase(&job->deps, job->last_dep++);
-
- return NULL;
-}
-
static struct dma_fence *panfrost_job_run(struct drm_sched_job *sched_job)
{
struct panfrost_job *job = to_panfrost_job(sched_job);
@@ -763,7 +738,6 @@ static void panfrost_reset_work(struct work_struct *work)
}
static const struct drm_sched_backend_ops panfrost_sched_ops = {
- .dependency = panfrost_job_dependency,
.run_job = panfrost_job_run,
.timedout_job = panfrost_job_timedout,
.free_job = panfrost_job_free
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.h b/drivers/gpu/drm/panfrost/panfrost_job.h
index 82306a03b57e..77e6d0e6f612 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.h
+++ b/drivers/gpu/drm/panfrost/panfrost_job.h
@@ -19,10 +19,6 @@ struct panfrost_job {
struct panfrost_device *pfdev;
struct panfrost_file_priv *file_priv;
- /* Contains both explicit and implicit fences */
- struct xarray deps;
- unsigned long last_dep;
-
/* Fence to be signaled by IRQ handler when the job is complete. */
struct dma_fence *done_fence;
@@ -42,6 +38,7 @@ int panfrost_job_init(struct panfrost_device *pfdev);
void panfrost_job_fini(struct panfrost_device *pfdev);
int panfrost_job_open(struct panfrost_file_priv *panfrost_priv);
void panfrost_job_close(struct panfrost_file_priv *panfrost_priv);
+int panfrost_job_get_slot(struct panfrost_job *job);
int panfrost_job_push(struct panfrost_job *job);
void panfrost_job_put(struct panfrost_job *job);
void panfrost_job_enable_interrupts(struct panfrost_device *pfdev);
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index dfe5f1d29763..f51d3f791a17 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -58,21 +58,37 @@ static int write_cmd(struct panfrost_device *pfdev, u32 as_nr, u32 cmd)
}
static void lock_region(struct panfrost_device *pfdev, u32 as_nr,
- u64 iova, u64 size)
+ u64 region_start, u64 size)
{
u8 region_width;
- u64 region = iova & PAGE_MASK;
+ u64 region;
+ u64 region_end = region_start + size;
- /* The size is encoded as ceil(log2) minus(1), which may be calculated
- * with fls. The size must be clamped to hardware bounds.
+ if (!size)
+ return;
+
+ /*
+ * The locked region is a naturally aligned power of 2 block encoded as
+ * log2 minus(1).
+ * Calculate the desired start/end and look for the highest bit which
+ * differs. The smallest naturally aligned block must include this bit
+ * change, the desired region starts with this bit (and subsequent bits)
+ * zeroed and ends with the bit (and subsequent bits) set to one.
*/
- size = max_t(u64, size, AS_LOCK_REGION_MIN_SIZE);
- region_width = fls64(size - 1) - 1;
- region |= region_width;
+ region_width = max(fls64(region_start ^ (region_end - 1)),
+ const_ilog2(AS_LOCK_REGION_MIN_SIZE)) - 1;
+
+ /*
+ * Mask off the low bits of region_start (which would be ignored by
+ * the hardware anyway)
+ */
+ region_start &= GENMASK_ULL(63, region_width);
+
+ region = region_width | region_start;
/* Lock the region that needs to be updated */
- mmu_write(pfdev, AS_LOCKADDR_LO(as_nr), region & 0xFFFFFFFFUL);
- mmu_write(pfdev, AS_LOCKADDR_HI(as_nr), (region >> 32) & 0xFFFFFFFFUL);
+ mmu_write(pfdev, AS_LOCKADDR_LO(as_nr), lower_32_bits(region));
+ mmu_write(pfdev, AS_LOCKADDR_HI(as_nr), upper_32_bits(region));
write_cmd(pfdev, as_nr, AS_COMMAND_LOCK);
}
@@ -114,14 +130,14 @@ static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_m
mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0ULL, AS_COMMAND_FLUSH_MEM);
- mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), transtab & 0xffffffffUL);
- mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), transtab >> 32);
+ mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), lower_32_bits(transtab));
+ mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), upper_32_bits(transtab));
/* Need to revisit mem attrs.
* NC is the default, Mali driver is inner WT.
*/
- mmu_write(pfdev, AS_MEMATTR_LO(as_nr), memattr & 0xffffffffUL);
- mmu_write(pfdev, AS_MEMATTR_HI(as_nr), memattr >> 32);
+ mmu_write(pfdev, AS_MEMATTR_LO(as_nr), lower_32_bits(memattr));
+ mmu_write(pfdev, AS_MEMATTR_HI(as_nr), upper_32_bits(memattr));
write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE);
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
index 5ab03d605f57..e116a4d9b8e5 100644
--- a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
+++ b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
@@ -51,8 +51,8 @@ static int panfrost_perfcnt_dump_locked(struct panfrost_device *pfdev)
reinit_completion(&pfdev->perfcnt->dump_comp);
gpuva = pfdev->perfcnt->mapping->mmnode.start << PAGE_SHIFT;
- gpu_write(pfdev, GPU_PERFCNT_BASE_LO, gpuva);
- gpu_write(pfdev, GPU_PERFCNT_BASE_HI, gpuva >> 32);
+ gpu_write(pfdev, GPU_PERFCNT_BASE_LO, lower_32_bits(gpuva));
+ gpu_write(pfdev, GPU_PERFCNT_BASE_HI, upper_32_bits(gpuva));
gpu_write(pfdev, GPU_INT_CLEAR,
GPU_IRQ_CLEAN_CACHES_COMPLETED |
GPU_IRQ_PERFCNT_SAMPLE_COMPLETED);
diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c
index b19f2f00b215..469979cd0341 100644
--- a/drivers/gpu/drm/qxl/qxl_release.c
+++ b/drivers/gpu/drm/qxl/qxl_release.c
@@ -36,10 +36,10 @@
/* manage releaseables */
/* stack them 16 high for now -drawable object is 191 */
#define RELEASE_SIZE 256
-#define RELEASES_PER_BO (4096 / RELEASE_SIZE)
+#define RELEASES_PER_BO (PAGE_SIZE / RELEASE_SIZE)
/* put an alloc/dealloc surface cmd into one bo and round up to 128 */
#define SURFACE_RELEASE_SIZE 128
-#define SURFACE_RELEASES_PER_BO (4096 / SURFACE_RELEASE_SIZE)
+#define SURFACE_RELEASES_PER_BO (PAGE_SIZE / SURFACE_RELEASE_SIZE)
static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE };
static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO };
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 37a1b6a6ad6d..b2e33d5ba5d0 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -101,7 +101,6 @@ int qxl_ttm_io_mem_reserve(struct ttm_device *bdev,
*/
static void qxl_ttm_backend_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
{
- ttm_tt_destroy_common(bdev, ttm);
ttm_tt_fini(ttm);
kfree(ttm);
}
diff --git a/drivers/gpu/drm/r128/ati_pcigart.c b/drivers/gpu/drm/r128/ati_pcigart.c
index d2a0f5394fef..dde0501aea68 100644
--- a/drivers/gpu/drm/r128/ati_pcigart.c
+++ b/drivers/gpu/drm/r128/ati_pcigart.c
@@ -99,7 +99,8 @@ int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info
for (i = 0; i < pages; i++) {
if (!entry->busaddr[i])
break;
- pci_unmap_page(pdev, entry->busaddr[i], PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_page(&pdev->dev, entry->busaddr[i],
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
}
if (gart_info->gart_table_location == DRM_ATI_GART_MAIN)
@@ -134,7 +135,7 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
DRM_DEBUG("PCI: no table in VRAM: using normal RAM\n");
- if (pci_set_dma_mask(pdev, gart_info->table_mask)) {
+ if (dma_set_mask(&pdev->dev, gart_info->table_mask)) {
DRM_ERROR("fail to set dma mask to 0x%Lx\n",
(unsigned long long)gart_info->table_mask);
ret = -EFAULT;
@@ -173,9 +174,9 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
gart_idx = 0;
for (i = 0; i < pages; i++) {
/* we need to support large memory configurations */
- entry->busaddr[i] = pci_map_page(pdev, entry->pagelist[i],
- 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(pdev, entry->busaddr[i])) {
+ entry->busaddr[i] = dma_map_page(&pdev->dev, entry->pagelist[i],
+ 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(&pdev->dev, entry->busaddr[i])) {
DRM_ERROR("unable to map PCIGART pages!\n");
drm_ati_pcigart_cleanup(dev, gart_info);
address = NULL;
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index 83e8b8547f9b..bd5dc09e860f 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -5983,7 +5983,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V3
#define GET_COMMAND_TABLE_COMMANDSET_REVISION(TABLE_HEADER_OFFSET) (((static_cast<ATOM_COMMON_TABLE_HEADER*>(TABLE_HEADER_OFFSET))->ucTableFormatRevision )&0x3F)
#define GET_COMMAND_TABLE_PARAMETER_REVISION(TABLE_HEADER_OFFSET) (((static_cast<ATOM_COMMON_TABLE_HEADER*>(TABLE_HEADER_OFFSET))->ucTableContentRevision)&0x3F)
#else // not __cplusplus
-#define GetIndexIntoMasterTable(MasterOrData, FieldName) (((char*)(&((ATOM_MASTER_LIST_OF_##MasterOrData##_TABLES*)0)->FieldName)-(char*)0)/sizeof(USHORT))
+#define GetIndexIntoMasterTable(MasterOrData, FieldName) (offsetof(ATOM_MASTER_LIST_OF_##MasterOrData##_TABLES, FieldName)/sizeof(USHORT))
#define GET_COMMAND_TABLE_COMMANDSET_REVISION(TABLE_HEADER_OFFSET) ((((ATOM_COMMON_TABLE_HEADER*)TABLE_HEADER_OFFSET)->ucTableFormatRevision)&0x3F)
#define GET_COMMAND_TABLE_PARAMETER_REVISION(TABLE_HEADER_OFFSET) ((((ATOM_COMMON_TABLE_HEADER*)TABLE_HEADER_OFFSET)->ucTableContentRevision)&0x3F)
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index f0cfb58da467..ac006bed4743 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -390,8 +390,7 @@ static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct radeon_device *rdev)
static int ci_populate_bapm_vddc_base_leakage_sidd(struct radeon_device *rdev)
{
struct ci_power_info *pi = ci_get_pi(rdev);
- u16 hi_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd;
- u16 lo_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd;
+ u16 hi_sidd, lo_sidd;
struct radeon_cac_tdp_table *cac_tdp_table =
rdev->pm.dpm.dyn_state.cac_tdp_table;
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
index 35b77c944701..9d2bcb9551e6 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.c
+++ b/drivers/gpu/drm/radeon/r600_dpm.c
@@ -820,12 +820,12 @@ union fan_info {
static int r600_parse_clk_voltage_dep_table(struct radeon_clock_voltage_dependency_table *radeon_table,
ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table)
{
- u32 size = atom_table->ucNumEntries *
- sizeof(struct radeon_clock_voltage_dependency_entry);
int i;
ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry;
- radeon_table->entries = kzalloc(size, GFP_KERNEL);
+ radeon_table->entries = kcalloc(atom_table->ucNumEntries,
+ sizeof(struct radeon_clock_voltage_dependency_entry),
+ GFP_KERNEL);
if (!radeon_table->entries)
return -ENOMEM;
@@ -1361,7 +1361,9 @@ u16 r600_get_pcie_lane_support(struct radeon_device *rdev,
u8 r600_encode_pci_lane_width(u32 lanes)
{
- u8 encoded_lanes[] = { 0, 1, 2, 0, 3, 0, 0, 0, 4, 0, 0, 0, 5, 0, 0, 0, 6 };
+ static const u8 encoded_lanes[] = {
+ 0, 1, 2, 0, 3, 0, 0, 0, 4, 0, 0, 0, 5, 0, 0, 0, 6
+ };
if (lanes > 16)
return 0;
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
index ec867fa880a4..751c2c075e09 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
@@ -423,7 +423,7 @@ radeon_mst_encoder_dpms(struct drm_encoder *encoder, int mode)
drm_dp_mst_allocate_vcpi(&radeon_connector->mst_port->mst_mgr,
radeon_connector->port,
mst_enc->pbn, slots);
- drm_dp_update_payload_part1(&radeon_connector->mst_port->mst_mgr);
+ drm_dp_update_payload_part1(&radeon_connector->mst_port->mst_mgr, 1);
radeon_dp_mst_set_be_cntl(primary, mst_enc,
radeon_connector->mst_port->hpd.hpd, true);
@@ -452,7 +452,7 @@ radeon_mst_encoder_dpms(struct drm_encoder *encoder, int mode)
return;
drm_dp_mst_reset_vcpi_slots(&radeon_connector->mst_port->mst_mgr, mst_enc->port);
- drm_dp_update_payload_part1(&radeon_connector->mst_port->mst_mgr);
+ drm_dp_update_payload_part1(&radeon_connector->mst_port->mst_mgr, 1);
drm_dp_check_act_status(&radeon_connector->mst_port->mst_mgr);
/* and this can also fail */
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index e9c47ec28ade..73e3117420bf 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -176,18 +176,11 @@ static int radeon_fence_check_signaled(wait_queue_entry_t *wait, unsigned mode,
*/
seq = atomic64_read(&fence->rdev->fence_drv[fence->ring].last_seq);
if (seq >= fence->seq) {
- int ret = dma_fence_signal_locked(&fence->base);
-
- if (!ret)
- DMA_FENCE_TRACE(&fence->base, "signaled from irq context\n");
- else
- DMA_FENCE_TRACE(&fence->base, "was already signaled\n");
-
+ dma_fence_signal_locked(&fence->base);
radeon_irq_kms_sw_irq_put(fence->rdev, fence->ring);
__remove_wait_queue(&fence->rdev->fence_queue, &fence->fence_wake);
dma_fence_put(&fence->base);
- } else
- DMA_FENCE_TRACE(&fence->base, "pending\n");
+ }
return 0;
}
@@ -422,8 +415,6 @@ static bool radeon_fence_enable_signaling(struct dma_fence *f)
fence->fence_wake.func = radeon_fence_check_signaled;
__add_wait_queue(&rdev->fence_queue, &fence->fence_wake);
dma_fence_get(f);
-
- DMA_FENCE_TRACE(&fence->base, "armed on ring %i!\n", fence->ring);
return true;
}
@@ -441,11 +432,7 @@ bool radeon_fence_signaled(struct radeon_fence *fence)
return true;
if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) {
- int ret;
-
- ret = dma_fence_signal(&fence->base);
- if (!ret)
- DMA_FENCE_TRACE(&fence->base, "signaled from radeon_fence_signaled\n");
+ dma_fence_signal(&fence->base);
return true;
}
return false;
@@ -550,7 +537,6 @@ long radeon_fence_wait_timeout(struct radeon_fence *fence, bool intr, long timeo
{
uint64_t seq[RADEON_NUM_RINGS] = {};
long r;
- int r_sig;
/*
* This function should not be called on !radeon fences.
@@ -567,9 +553,7 @@ long radeon_fence_wait_timeout(struct radeon_fence *fence, bool intr, long timeo
return r;
}
- r_sig = dma_fence_signal(&fence->base);
- if (!r_sig)
- DMA_FENCE_TRACE(&fence->base, "signaled from fence_wait\n");
+ dma_fence_signal(&fence->base);
return r;
}
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index a06d4cc2fb1c..11b21d605584 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -488,9 +488,6 @@ static void radeon_ttm_backend_destroy(struct ttm_device *bdev, struct ttm_tt *t
{
struct radeon_ttm_tt *gtt = (void *)ttm;
- radeon_ttm_backend_unbind(bdev, ttm);
- ttm_tt_destroy_common(bdev, ttm);
-
ttm_tt_fini(&gtt->ttm);
kfree(gtt);
}
@@ -548,14 +545,14 @@ static int radeon_ttm_tt_populate(struct ttm_device *bdev,
{
struct radeon_device *rdev = radeon_get_rdev(bdev);
struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(rdev, ttm);
- bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
+ bool slave = !!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL);
if (gtt && gtt->userptr) {
ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
if (!ttm->sg)
return -ENOMEM;
- ttm->page_flags |= TTM_PAGE_FLAG_SG;
+ ttm->page_flags |= TTM_TT_FLAG_EXTERNAL;
return 0;
}
@@ -572,11 +569,13 @@ static void radeon_ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm
{
struct radeon_device *rdev = radeon_get_rdev(bdev);
struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(rdev, ttm);
- bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
+ bool slave = !!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL);
+
+ radeon_ttm_tt_unbind(bdev, ttm);
if (gtt && gtt->userptr) {
kfree(ttm->sg);
- ttm->page_flags &= ~TTM_PAGE_FLAG_SG;
+ ttm->page_flags &= ~TTM_TT_FLAG_EXTERNAL;
return;
}
@@ -651,8 +650,6 @@ static void radeon_ttm_tt_destroy(struct ttm_device *bdev,
struct radeon_device *rdev = radeon_get_rdev(bdev);
if (rdev->flags & RADEON_IS_AGP) {
- ttm_agp_unbind(ttm);
- ttm_tt_destroy_common(bdev, ttm);
ttm_agp_destroy(ttm);
return;
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index ea7e39d03545..5672830ca184 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -1206,7 +1206,7 @@ int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int swindex,
int ret;
/* Get the CRTC clock and the optional external clock. */
- if (rcar_du_has(rcdu, RCAR_DU_FEATURE_CRTC_IRQ_CLOCK)) {
+ if (rcar_du_has(rcdu, RCAR_DU_FEATURE_CRTC_CLOCK)) {
sprintf(clk_name, "du.%u", hwindex);
name = clk_name;
} else {
@@ -1243,7 +1243,10 @@ int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int swindex,
rcrtc->group = rgrp;
rcrtc->mmio_offset = mmio_offsets[hwindex];
rcrtc->index = hwindex;
- rcrtc->dsysr = (rcrtc->index % 2 ? 0 : DSYSR_DRES) | DSYSR_TVM_TVSYNC;
+ rcrtc->dsysr = rcrtc->index % 2 ? 0 : DSYSR_DRES;
+
+ if (rcar_du_has(rcdu, RCAR_DU_FEATURE_TVM_SYNC))
+ rcrtc->dsysr |= DSYSR_TVM_TVSYNC;
if (rcar_du_has(rcdu, RCAR_DU_FEATURE_VSP1_SOURCE))
primary = &rcrtc->vsp->planes[rcrtc->vsp_pipe].plane;
@@ -1269,7 +1272,7 @@ int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int swindex,
drm_crtc_helper_add(crtc, &crtc_helper_funcs);
/* Register the interrupt handler. */
- if (rcar_du_has(rcdu, RCAR_DU_FEATURE_CRTC_IRQ_CLOCK)) {
+ if (rcar_du_has(rcdu, RCAR_DU_FEATURE_CRTC_IRQ)) {
/* The IRQ's are associated with the CRTC (sw)index. */
irq = platform_get_irq(pdev, swindex);
irqflags = 0;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
index 5f2940c42225..66e8839db708 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
@@ -93,17 +93,6 @@ struct rcar_du_crtc_state {
#define to_rcar_crtc_state(s) container_of(s, struct rcar_du_crtc_state, state)
-enum rcar_du_output {
- RCAR_DU_OUTPUT_DPAD0,
- RCAR_DU_OUTPUT_DPAD1,
- RCAR_DU_OUTPUT_LVDS0,
- RCAR_DU_OUTPUT_LVDS1,
- RCAR_DU_OUTPUT_HDMI0,
- RCAR_DU_OUTPUT_HDMI1,
- RCAR_DU_OUTPUT_TCON,
- RCAR_DU_OUTPUT_MAX,
-};
-
int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int swindex,
unsigned int hwindex);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index 4ac26d08ebb4..5612a9e7a905 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -8,6 +8,7 @@
*/
#include <linux/clk.h>
+#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/mm.h>
#include <linux/module.h>
@@ -36,7 +37,8 @@
static const struct rcar_du_device_info rzg1_du_r8a7743_info = {
.gen = 2,
- .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
+ .features = RCAR_DU_FEATURE_CRTC_IRQ
+ | RCAR_DU_FEATURE_CRTC_CLOCK
| RCAR_DU_FEATURE_INTERLACED
| RCAR_DU_FEATURE_TVM_SYNC,
.channels_mask = BIT(1) | BIT(0),
@@ -58,7 +60,8 @@ static const struct rcar_du_device_info rzg1_du_r8a7743_info = {
static const struct rcar_du_device_info rzg1_du_r8a7745_info = {
.gen = 2,
- .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
+ .features = RCAR_DU_FEATURE_CRTC_IRQ
+ | RCAR_DU_FEATURE_CRTC_CLOCK
| RCAR_DU_FEATURE_INTERLACED
| RCAR_DU_FEATURE_TVM_SYNC,
.channels_mask = BIT(1) | BIT(0),
@@ -79,7 +82,8 @@ static const struct rcar_du_device_info rzg1_du_r8a7745_info = {
static const struct rcar_du_device_info rzg1_du_r8a77470_info = {
.gen = 2,
- .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
+ .features = RCAR_DU_FEATURE_CRTC_IRQ
+ | RCAR_DU_FEATURE_CRTC_CLOCK
| RCAR_DU_FEATURE_INTERLACED
| RCAR_DU_FEATURE_TVM_SYNC,
.channels_mask = BIT(1) | BIT(0),
@@ -105,7 +109,8 @@ static const struct rcar_du_device_info rzg1_du_r8a77470_info = {
static const struct rcar_du_device_info rcar_du_r8a774a1_info = {
.gen = 3,
- .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
+ .features = RCAR_DU_FEATURE_CRTC_IRQ
+ | RCAR_DU_FEATURE_CRTC_CLOCK
| RCAR_DU_FEATURE_VSP1_SOURCE
| RCAR_DU_FEATURE_INTERLACED
| RCAR_DU_FEATURE_TVM_SYNC,
@@ -134,7 +139,8 @@ static const struct rcar_du_device_info rcar_du_r8a774a1_info = {
static const struct rcar_du_device_info rcar_du_r8a774b1_info = {
.gen = 3,
- .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
+ .features = RCAR_DU_FEATURE_CRTC_IRQ
+ | RCAR_DU_FEATURE_CRTC_CLOCK
| RCAR_DU_FEATURE_VSP1_SOURCE
| RCAR_DU_FEATURE_INTERLACED
| RCAR_DU_FEATURE_TVM_SYNC,
@@ -163,7 +169,8 @@ static const struct rcar_du_device_info rcar_du_r8a774b1_info = {
static const struct rcar_du_device_info rcar_du_r8a774c0_info = {
.gen = 3,
- .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
+ .features = RCAR_DU_FEATURE_CRTC_IRQ
+ | RCAR_DU_FEATURE_CRTC_CLOCK
| RCAR_DU_FEATURE_VSP1_SOURCE,
.channels_mask = BIT(1) | BIT(0),
.routes = {
@@ -189,7 +196,8 @@ static const struct rcar_du_device_info rcar_du_r8a774c0_info = {
static const struct rcar_du_device_info rcar_du_r8a774e1_info = {
.gen = 3,
- .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
+ .features = RCAR_DU_FEATURE_CRTC_IRQ
+ | RCAR_DU_FEATURE_CRTC_CLOCK
| RCAR_DU_FEATURE_VSP1_SOURCE
| RCAR_DU_FEATURE_INTERLACED
| RCAR_DU_FEATURE_TVM_SYNC,
@@ -239,7 +247,8 @@ static const struct rcar_du_device_info rcar_du_r8a7779_info = {
static const struct rcar_du_device_info rcar_du_r8a7790_info = {
.gen = 2,
- .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
+ .features = RCAR_DU_FEATURE_CRTC_IRQ
+ | RCAR_DU_FEATURE_CRTC_CLOCK
| RCAR_DU_FEATURE_INTERLACED
| RCAR_DU_FEATURE_TVM_SYNC,
.quirks = RCAR_DU_QUIRK_ALIGN_128B,
@@ -269,7 +278,8 @@ static const struct rcar_du_device_info rcar_du_r8a7790_info = {
/* M2-W (r8a7791) and M2-N (r8a7793) are identical */
static const struct rcar_du_device_info rcar_du_r8a7791_info = {
.gen = 2,
- .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
+ .features = RCAR_DU_FEATURE_CRTC_IRQ
+ | RCAR_DU_FEATURE_CRTC_CLOCK
| RCAR_DU_FEATURE_INTERLACED
| RCAR_DU_FEATURE_TVM_SYNC,
.channels_mask = BIT(1) | BIT(0),
@@ -292,7 +302,8 @@ static const struct rcar_du_device_info rcar_du_r8a7791_info = {
static const struct rcar_du_device_info rcar_du_r8a7792_info = {
.gen = 2,
- .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
+ .features = RCAR_DU_FEATURE_CRTC_IRQ
+ | RCAR_DU_FEATURE_CRTC_CLOCK
| RCAR_DU_FEATURE_INTERLACED
| RCAR_DU_FEATURE_TVM_SYNC,
.channels_mask = BIT(1) | BIT(0),
@@ -311,7 +322,8 @@ static const struct rcar_du_device_info rcar_du_r8a7792_info = {
static const struct rcar_du_device_info rcar_du_r8a7794_info = {
.gen = 2,
- .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
+ .features = RCAR_DU_FEATURE_CRTC_IRQ
+ | RCAR_DU_FEATURE_CRTC_CLOCK
| RCAR_DU_FEATURE_INTERLACED
| RCAR_DU_FEATURE_TVM_SYNC,
.channels_mask = BIT(1) | BIT(0),
@@ -333,7 +345,8 @@ static const struct rcar_du_device_info rcar_du_r8a7794_info = {
static const struct rcar_du_device_info rcar_du_r8a7795_info = {
.gen = 3,
- .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
+ .features = RCAR_DU_FEATURE_CRTC_IRQ
+ | RCAR_DU_FEATURE_CRTC_CLOCK
| RCAR_DU_FEATURE_VSP1_SOURCE
| RCAR_DU_FEATURE_INTERLACED
| RCAR_DU_FEATURE_TVM_SYNC,
@@ -366,7 +379,8 @@ static const struct rcar_du_device_info rcar_du_r8a7795_info = {
static const struct rcar_du_device_info rcar_du_r8a7796_info = {
.gen = 3,
- .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
+ .features = RCAR_DU_FEATURE_CRTC_IRQ
+ | RCAR_DU_FEATURE_CRTC_CLOCK
| RCAR_DU_FEATURE_VSP1_SOURCE
| RCAR_DU_FEATURE_INTERLACED
| RCAR_DU_FEATURE_TVM_SYNC,
@@ -395,7 +409,8 @@ static const struct rcar_du_device_info rcar_du_r8a7796_info = {
static const struct rcar_du_device_info rcar_du_r8a77965_info = {
.gen = 3,
- .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
+ .features = RCAR_DU_FEATURE_CRTC_IRQ
+ | RCAR_DU_FEATURE_CRTC_CLOCK
| RCAR_DU_FEATURE_VSP1_SOURCE
| RCAR_DU_FEATURE_INTERLACED
| RCAR_DU_FEATURE_TVM_SYNC,
@@ -424,7 +439,8 @@ static const struct rcar_du_device_info rcar_du_r8a77965_info = {
static const struct rcar_du_device_info rcar_du_r8a77970_info = {
.gen = 3,
- .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
+ .features = RCAR_DU_FEATURE_CRTC_IRQ
+ | RCAR_DU_FEATURE_CRTC_CLOCK
| RCAR_DU_FEATURE_VSP1_SOURCE
| RCAR_DU_FEATURE_INTERLACED
| RCAR_DU_FEATURE_TVM_SYNC,
@@ -448,7 +464,8 @@ static const struct rcar_du_device_info rcar_du_r8a77970_info = {
static const struct rcar_du_device_info rcar_du_r8a7799x_info = {
.gen = 3,
- .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK
+ .features = RCAR_DU_FEATURE_CRTC_IRQ
+ | RCAR_DU_FEATURE_CRTC_CLOCK
| RCAR_DU_FEATURE_VSP1_SOURCE,
.channels_mask = BIT(1) | BIT(0),
.routes = {
@@ -473,6 +490,25 @@ static const struct rcar_du_device_info rcar_du_r8a7799x_info = {
.lvds_clk_mask = BIT(1) | BIT(0),
};
+static const struct rcar_du_device_info rcar_du_r8a779a0_info = {
+ .gen = 3,
+ .features = RCAR_DU_FEATURE_CRTC_IRQ
+ | RCAR_DU_FEATURE_VSP1_SOURCE,
+ .channels_mask = BIT(1) | BIT(0),
+ .routes = {
+ /* R8A779A0 has two MIPI DSI outputs. */
+ [RCAR_DU_OUTPUT_DSI0] = {
+ .possible_crtcs = BIT(0),
+ .port = 0,
+ },
+ [RCAR_DU_OUTPUT_DSI1] = {
+ .possible_crtcs = BIT(1),
+ .port = 1,
+ },
+ },
+ .dsi_clk_mask = BIT(1) | BIT(0),
+};
+
static const struct of_device_id rcar_du_of_table[] = {
{ .compatible = "renesas,du-r8a7742", .data = &rcar_du_r8a7790_info },
{ .compatible = "renesas,du-r8a7743", .data = &rzg1_du_r8a7743_info },
@@ -497,11 +533,30 @@ static const struct of_device_id rcar_du_of_table[] = {
{ .compatible = "renesas,du-r8a77980", .data = &rcar_du_r8a77970_info },
{ .compatible = "renesas,du-r8a77990", .data = &rcar_du_r8a7799x_info },
{ .compatible = "renesas,du-r8a77995", .data = &rcar_du_r8a7799x_info },
+ { .compatible = "renesas,du-r8a779a0", .data = &rcar_du_r8a779a0_info },
{ }
};
MODULE_DEVICE_TABLE(of, rcar_du_of_table);
+const char *rcar_du_output_name(enum rcar_du_output output)
+{
+ static const char * const names[] = {
+ [RCAR_DU_OUTPUT_DPAD0] = "DPAD0",
+ [RCAR_DU_OUTPUT_DPAD1] = "DPAD1",
+ [RCAR_DU_OUTPUT_LVDS0] = "LVDS0",
+ [RCAR_DU_OUTPUT_LVDS1] = "LVDS1",
+ [RCAR_DU_OUTPUT_HDMI0] = "HDMI0",
+ [RCAR_DU_OUTPUT_HDMI1] = "HDMI1",
+ [RCAR_DU_OUTPUT_TCON] = "TCON",
+ };
+
+ if (output >= ARRAY_SIZE(names) || !names[output])
+ return "UNKNOWN";
+
+ return names[output];
+}
+
/* -----------------------------------------------------------------------------
* DRM operations
*/
@@ -510,7 +565,11 @@ DEFINE_DRM_GEM_CMA_FOPS(rcar_du_fops);
static const struct drm_driver rcar_du_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
- DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(rcar_du_dumb_create),
+ .dumb_create = rcar_du_dumb_create,
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_import_sg_table = rcar_du_gem_prime_import_sg_table,
+ .gem_prime_mmap = drm_gem_prime_mmap,
.fops = &rcar_du_fops,
.name = "rcar-du",
.desc = "Renesas R-Car Display Unit",
@@ -570,7 +629,7 @@ static void rcar_du_shutdown(struct platform_device *pdev)
static int rcar_du_probe(struct platform_device *pdev)
{
struct rcar_du_device *rcdu;
- struct resource *mem;
+ unsigned int mask;
int ret;
/* Allocate and initialize the R-Car device structure. */
@@ -585,11 +644,20 @@ static int rcar_du_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, rcdu);
/* I/O resources */
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- rcdu->mmio = devm_ioremap_resource(&pdev->dev, mem);
+ rcdu->mmio = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rcdu->mmio))
return PTR_ERR(rcdu->mmio);
+ /*
+ * Set the DMA coherent mask to reflect the DU 32-bit DMA address space
+ * limitations. When sourcing frames from a VSP the DU doesn't perform
+ * any memory access so set the mask to 40 bits to accept all buffers.
+ */
+ mask = rcar_du_has(rcdu, RCAR_DU_FEATURE_VSP1_SOURCE) ? 40 : 32;
+ ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(mask));
+ if (ret)
+ return ret;
+
/* DRM/KMS objects */
ret = rcar_du_modeset_init(rcdu);
if (ret < 0) {
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.h b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
index 02ca2d0e1b55..101f42df86ea 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.h
@@ -26,13 +26,27 @@ struct drm_bridge;
struct drm_property;
struct rcar_du_device;
-#define RCAR_DU_FEATURE_CRTC_IRQ_CLOCK BIT(0) /* Per-CRTC IRQ and clock */
-#define RCAR_DU_FEATURE_VSP1_SOURCE BIT(1) /* Has inputs from VSP1 */
-#define RCAR_DU_FEATURE_INTERLACED BIT(2) /* HW supports interlaced */
-#define RCAR_DU_FEATURE_TVM_SYNC BIT(3) /* Has TV switch/sync modes */
+#define RCAR_DU_FEATURE_CRTC_IRQ BIT(0) /* Per-CRTC IRQ */
+#define RCAR_DU_FEATURE_CRTC_CLOCK BIT(1) /* Per-CRTC clock */
+#define RCAR_DU_FEATURE_VSP1_SOURCE BIT(2) /* Has inputs from VSP1 */
+#define RCAR_DU_FEATURE_INTERLACED BIT(3) /* HW supports interlaced */
+#define RCAR_DU_FEATURE_TVM_SYNC BIT(4) /* Has TV switch/sync modes */
#define RCAR_DU_QUIRK_ALIGN_128B BIT(0) /* Align pitches to 128 bytes */
+enum rcar_du_output {
+ RCAR_DU_OUTPUT_DPAD0,
+ RCAR_DU_OUTPUT_DPAD1,
+ RCAR_DU_OUTPUT_DSI0,
+ RCAR_DU_OUTPUT_DSI1,
+ RCAR_DU_OUTPUT_HDMI0,
+ RCAR_DU_OUTPUT_HDMI1,
+ RCAR_DU_OUTPUT_LVDS0,
+ RCAR_DU_OUTPUT_LVDS1,
+ RCAR_DU_OUTPUT_TCON,
+ RCAR_DU_OUTPUT_MAX,
+};
+
/*
* struct rcar_du_output_routing - Output routing specification
* @possible_crtcs: bitmask of possible CRTCs for the output
@@ -56,6 +70,7 @@ struct rcar_du_output_routing {
* @routes: array of CRTC to output routes, indexed by output (RCAR_DU_OUTPUT_*)
* @num_lvds: number of internal LVDS encoders
* @dpll_mask: bit mask of DU channels equipped with a DPLL
+ * @dsi_clk_mask: bitmask of channels that can use the DSI clock as dot clock
* @lvds_clk_mask: bitmask of channels that can use the LVDS clock as dot clock
*/
struct rcar_du_device_info {
@@ -66,6 +81,7 @@ struct rcar_du_device_info {
struct rcar_du_output_routing routes[RCAR_DU_OUTPUT_MAX];
unsigned int num_lvds;
unsigned int dpll_mask;
+ unsigned int dsi_clk_mask;
unsigned int lvds_clk_mask;
};
@@ -126,4 +142,6 @@ static inline void rcar_du_write(struct rcar_du_device *rcdu, u32 reg, u32 data)
iowrite32(data, rcdu->mmio + reg);
}
+const char *rcar_du_output_name(enum rcar_du_output output);
+
#endif /* __RCAR_DU_DRV_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
index 4bf4e25d7f01..3977aaa1ab5a 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
@@ -103,8 +103,8 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
return -ENOLINK;
}
- dev_dbg(rcdu->dev, "initializing encoder %pOF for output %u\n",
- enc_node, output);
+ dev_dbg(rcdu->dev, "initializing encoder %pOF for output %s\n",
+ enc_node, rcar_du_output_name(output));
renc = drmm_encoder_alloc(&rcdu->ddev, struct rcar_du_encoder, base,
&rcar_du_encoder_funcs, DRM_MODE_ENCODER_NONE,
@@ -118,8 +118,9 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
ret = drm_bridge_attach(&renc->base, bridge, NULL,
DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret) {
- dev_err(rcdu->dev, "failed to attach bridge for output %u\n",
- output);
+ dev_err(rcdu->dev,
+ "failed to attach bridge %pOF for output %s (%d)\n",
+ bridge->of_node, rcar_du_output_name(output), ret);
return ret;
}
@@ -127,7 +128,8 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
connector = drm_bridge_connector_init(&rcdu->ddev, &renc->base);
if (IS_ERR(connector)) {
dev_err(rcdu->dev,
- "failed to created connector for output %u\n", output);
+ "failed to created connector for output %s (%ld)\n",
+ rcar_du_output_name(output), PTR_ERR(connector));
return PTR_ERR(connector);
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_group.c b/drivers/gpu/drm/rcar-du/rcar_du_group.c
index 88a783ceb3e9..8665a1dd2186 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_group.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_group.c
@@ -122,10 +122,12 @@ static void rcar_du_group_setup_didsr(struct rcar_du_group *rgrp)
didsr = DIDSR_CODE;
for (i = 0; i < num_crtcs; ++i, ++rcrtc) {
if (rcdu->info->lvds_clk_mask & BIT(rcrtc->index))
- didsr |= DIDSR_LCDS_LVDS0(i)
+ didsr |= DIDSR_LDCS_LVDS0(i)
| DIDSR_PDCS_CLK(i, 0);
+ else if (rcdu->info->dsi_clk_mask & BIT(rcrtc->index))
+ didsr |= DIDSR_LDCS_DSI(i);
else
- didsr |= DIDSR_LCDS_DCLKIN(i)
+ didsr |= DIDSR_LDCS_DCLKIN(i)
| DIDSR_PDCS_CLK(i, 0);
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index fdb8a0d127ad..eacb1f17f747 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -19,6 +19,7 @@
#include <drm/drm_vblank.h>
#include <linux/device.h>
+#include <linux/dma-buf.h>
#include <linux/of_graph.h>
#include <linux/of_platform.h>
#include <linux/wait.h>
@@ -325,6 +326,51 @@ const struct rcar_du_format_info *rcar_du_format_info(u32 fourcc)
* Frame buffer
*/
+static const struct drm_gem_object_funcs rcar_du_gem_funcs = {
+ .free = drm_gem_cma_free_object,
+ .print_info = drm_gem_cma_print_info,
+ .get_sg_table = drm_gem_cma_get_sg_table,
+ .vmap = drm_gem_cma_vmap,
+ .mmap = drm_gem_cma_mmap,
+ .vm_ops = &drm_gem_cma_vm_ops,
+};
+
+struct drm_gem_object *rcar_du_gem_prime_import_sg_table(struct drm_device *dev,
+ struct dma_buf_attachment *attach,
+ struct sg_table *sgt)
+{
+ struct rcar_du_device *rcdu = to_rcar_du_device(dev);
+ struct drm_gem_cma_object *cma_obj;
+ struct drm_gem_object *gem_obj;
+ int ret;
+
+ if (!rcar_du_has(rcdu, RCAR_DU_FEATURE_VSP1_SOURCE))
+ return drm_gem_cma_prime_import_sg_table(dev, attach, sgt);
+
+ /* Create a CMA GEM buffer. */
+ cma_obj = kzalloc(sizeof(*cma_obj), GFP_KERNEL);
+ if (!cma_obj)
+ return ERR_PTR(-ENOMEM);
+
+ gem_obj = &cma_obj->base;
+ gem_obj->funcs = &rcar_du_gem_funcs;
+
+ drm_gem_private_object_init(dev, gem_obj, attach->dmabuf->size);
+ cma_obj->map_noncoherent = false;
+
+ ret = drm_gem_create_mmap_offset(gem_obj);
+ if (ret) {
+ drm_gem_object_release(gem_obj);
+ kfree(cma_obj);
+ return ERR_PTR(ret);
+ }
+
+ cma_obj->paddr = 0;
+ cma_obj->sgt = sgt;
+
+ return gem_obj;
+}
+
int rcar_du_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
@@ -513,8 +559,8 @@ static int rcar_du_encoders_init_one(struct rcar_du_device *rcdu,
ret = rcar_du_encoder_init(rcdu, output, entity);
if (ret && ret != -EPROBE_DEFER && ret != -ENOLINK)
dev_warn(rcdu->dev,
- "failed to initialize encoder %pOF on output %u (%d), skipping\n",
- entity, output, ret);
+ "failed to initialize encoder %pOF on output %s (%d), skipping\n",
+ entity, rcar_du_output_name(output), ret);
of_node_put(entity);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.h b/drivers/gpu/drm/rcar-du/rcar_du_kms.h
index 8f5fff176754..789154e19535 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.h
@@ -12,10 +12,13 @@
#include <linux/types.h>
+struct dma_buf_attachment;
struct drm_file;
struct drm_device;
+struct drm_gem_object;
struct drm_mode_create_dumb;
struct rcar_du_device;
+struct sg_table;
struct rcar_du_format_info {
u32 fourcc;
@@ -34,4 +37,8 @@ int rcar_du_modeset_init(struct rcar_du_device *rcdu);
int rcar_du_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args);
+struct drm_gem_object *rcar_du_gem_prime_import_sg_table(struct drm_device *dev,
+ struct dma_buf_attachment *attach,
+ struct sg_table *sgt);
+
#endif /* __RCAR_DU_KMS_H__ */
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_regs.h b/drivers/gpu/drm/rcar-du/rcar_du_regs.h
index fb9964949368..1cdaa51eb9ac 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_regs.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_regs.h
@@ -257,10 +257,11 @@
#define DIDSR 0x20028
#define DIDSR_CODE (0x7790 << 16)
-#define DIDSR_LCDS_DCLKIN(n) (0 << (8 + (n) * 2))
-#define DIDSR_LCDS_LVDS0(n) (2 << (8 + (n) * 2))
-#define DIDSR_LCDS_LVDS1(n) (3 << (8 + (n) * 2))
-#define DIDSR_LCDS_MASK(n) (3 << (8 + (n) * 2))
+#define DIDSR_LDCS_DCLKIN(n) (0 << (8 + (n) * 2))
+#define DIDSR_LDCS_DSI(n) (2 << (8 + (n) * 2)) /* V3U only */
+#define DIDSR_LDCS_LVDS0(n) (2 << (8 + (n) * 2))
+#define DIDSR_LDCS_LVDS1(n) (3 << (8 + (n) * 2))
+#define DIDSR_LDCS_MASK(n) (3 << (8 + (n) * 2))
#define DIDSR_PDCS_CLK(n, clk) (clk << ((n) * 2))
#define DIDSR_PDCS_MASK(n) (3 << ((n) * 2))
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
index 23e41c83c875..b7fc5b069cbc 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
@@ -187,17 +187,43 @@ int rcar_du_vsp_map_fb(struct rcar_du_vsp *vsp, struct drm_framebuffer *fb,
struct sg_table sg_tables[3])
{
struct rcar_du_device *rcdu = vsp->dev;
- unsigned int i;
+ unsigned int i, j;
int ret;
for (i = 0; i < fb->format->num_planes; ++i) {
struct drm_gem_cma_object *gem = drm_fb_cma_get_gem_obj(fb, i);
struct sg_table *sgt = &sg_tables[i];
- ret = dma_get_sgtable(rcdu->dev, sgt, gem->vaddr, gem->paddr,
- gem->base.size);
- if (ret)
- goto fail;
+ if (gem->sgt) {
+ struct scatterlist *src;
+ struct scatterlist *dst;
+
+ /*
+ * If the GEM buffer has a scatter gather table, it has
+ * been imported from a dma-buf and has no physical
+ * address as it might not be physically contiguous.
+ * Copy the original scatter gather table to map it to
+ * the VSP.
+ */
+ ret = sg_alloc_table(sgt, gem->sgt->orig_nents,
+ GFP_KERNEL);
+ if (ret)
+ goto fail;
+
+ src = gem->sgt->sgl;
+ dst = sgt->sgl;
+ for (j = 0; j < gem->sgt->orig_nents; ++j) {
+ sg_set_page(dst, sg_page(src), src->length,
+ src->offset);
+ src = sg_next(src);
+ dst = sg_next(dst);
+ }
+ } else {
+ ret = dma_get_sgtable(rcdu->dev, sgt, gem->vaddr,
+ gem->paddr, gem->base.size);
+ if (ret)
+ goto fail;
+ }
ret = vsp1_du_map_sg(vsp->vsp, sgt);
if (ret) {
diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.c b/drivers/gpu/drm/rcar-du/rcar_lvds.c
index b672c5bd72ee..72a272cfc11e 100644
--- a/drivers/gpu/drm/rcar-du/rcar_lvds.c
+++ b/drivers/gpu/drm/rcar-du/rcar_lvds.c
@@ -813,7 +813,6 @@ static int rcar_lvds_probe(struct platform_device *pdev)
{
const struct soc_device_attribute *attr;
struct rcar_lvds *lvds;
- struct resource *mem;
int ret;
lvds = devm_kzalloc(&pdev->dev, sizeof(*lvds), GFP_KERNEL);
@@ -836,8 +835,7 @@ static int rcar_lvds_probe(struct platform_device *pdev)
lvds->bridge.funcs = &rcar_lvds_bridge_ops;
lvds->bridge.of_node = pdev->dev.of_node;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- lvds->mmio = devm_ioremap_resource(&pdev->dev, mem);
+ lvds->mmio = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(lvds->mmio))
return PTR_ERR(lvds->mmio);
diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig
index 558f1b58bd69..9f1ecefc3933 100644
--- a/drivers/gpu/drm/rockchip/Kconfig
+++ b/drivers/gpu/drm/rockchip/Kconfig
@@ -9,7 +9,6 @@ config DRM_ROCKCHIP
select DRM_ANALOGIX_DP if ROCKCHIP_ANALOGIX_DP
select DRM_DW_HDMI if ROCKCHIP_DW_HDMI
select DRM_DW_MIPI_DSI if ROCKCHIP_DW_MIPI_DSI
- select DRM_RGB if ROCKCHIP_RGB
select GENERIC_PHY if ROCKCHIP_DW_MIPI_DSI
select GENERIC_PHY_MIPI_DPHY if ROCKCHIP_DW_MIPI_DSI
select SND_SOC_HDMI_CODEC if ROCKCHIP_CDN_DP && SND_SOC
diff --git a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
index ade2327a10e2..8abb5ac26807 100644
--- a/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
+++ b/drivers/gpu/drm/rockchip/analogix_dp-rockchip.c
@@ -467,6 +467,6 @@ struct platform_driver rockchip_dp_driver = {
.driver = {
.name = "rockchip-dp",
.pm = &rockchip_dp_pm_ops,
- .of_match_table = of_match_ptr(rockchip_dp_dt_ids),
+ .of_match_table = rockchip_dp_dt_ids,
},
};
diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c
index 13c6b857158f..16497c31d9f9 100644
--- a/drivers/gpu/drm/rockchip/cdn-dp-core.c
+++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c
@@ -697,7 +697,6 @@ static int cdn_dp_parse_dt(struct cdn_dp_device *dp)
struct device *dev = dp->dev;
struct device_node *np = dev->of_node;
struct platform_device *pdev = to_platform_device(dev);
- struct resource *res;
dp->grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
if (IS_ERR(dp->grf)) {
@@ -705,8 +704,7 @@ static int cdn_dp_parse_dt(struct cdn_dp_device *dp)
return PTR_ERR(dp->grf);
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dp->regs = devm_ioremap_resource(dev, res);
+ dp->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dp->regs)) {
DRM_DEV_ERROR(dev, "ioremap reg failed\n");
return PTR_ERR(dp->regs);
diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
index a2262bee5aa4..a9acbcc420d0 100644
--- a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
+++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c
@@ -14,7 +14,6 @@
#include <linux/of_device.h>
#include <linux/phy/phy.h>
#include <linux/pm_runtime.h>
-#include <linux/phy/phy.h>
#include <linux/regmap.h>
#include <video/mipi_display.h>
@@ -643,7 +642,7 @@ struct hstt {
}
/* Table A-3 High-Speed Transition Times */
-struct hstt hstt_table[] = {
+static struct hstt hstt_table[] = {
HSTT( 90, 32, 20, 26, 13),
HSTT( 100, 35, 23, 28, 14),
HSTT( 110, 32, 22, 26, 13),
diff --git a/drivers/gpu/drm/rockchip/inno_hdmi.c b/drivers/gpu/drm/rockchip/inno_hdmi.c
index 7afdc54eb3ec..046e8ec2a71c 100644
--- a/drivers/gpu/drm/rockchip/inno_hdmi.c
+++ b/drivers/gpu/drm/rockchip/inno_hdmi.c
@@ -810,7 +810,6 @@ static int inno_hdmi_bind(struct device *dev, struct device *master,
struct platform_device *pdev = to_platform_device(dev);
struct drm_device *drm = data;
struct inno_hdmi *hdmi;
- struct resource *iores;
int irq;
int ret;
@@ -821,8 +820,7 @@ static int inno_hdmi_bind(struct device *dev, struct device *master,
hdmi->dev = dev;
hdmi->drm_dev = drm;
- iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hdmi->regs = devm_ioremap_resource(dev, iores);
+ hdmi->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hdmi->regs))
return PTR_ERR(hdmi->regs);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index bfba9793d238..e4ebe60b3cc1 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -138,9 +138,6 @@ static int rockchip_drm_bind(struct device *dev)
drm_dev->dev_private = private;
- INIT_LIST_HEAD(&private->psr_list);
- mutex_init(&private->psr_list_lock);
-
ret = rockchip_drm_init_iommu(drm_dev);
if (ret)
goto err_free;
@@ -275,10 +272,17 @@ int rockchip_drm_endpoint_is_subdriver(struct device_node *ep)
return -ENODEV;
/* status disabled will prevent creation of platform-devices */
+ if (!of_device_is_available(node)) {
+ of_node_put(node);
+ return -ENODEV;
+ }
+
pdev = of_find_device_by_node(node);
of_node_put(node);
+
+ /* enabled non-platform-devices can immediately return here */
if (!pdev)
- return -ENODEV;
+ return false;
/*
* All rockchip subdrivers have probed at this point, so
@@ -370,7 +374,7 @@ static int rockchip_drm_platform_of_probe(struct device *dev)
}
iommu = of_parse_phandle(port->parent, "iommus", 0);
- if (!iommu || !of_device_is_available(iommu->parent)) {
+ if (!iommu || !of_device_is_available(iommu)) {
DRM_DEV_DEBUG(dev,
"no iommu attached for %pOF, using non-iommu buffers\n",
port->parent);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
index e33c2dcd0d4b..aa0909e8edf9 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
@@ -48,8 +48,6 @@ struct rockchip_drm_private {
struct iommu_domain *domain;
struct mutex mm_lock;
struct drm_mm mm;
- struct list_head psr_list;
- struct mutex psr_list_lock;
};
int rockchip_drm_dma_attach_device(struct drm_device *drm_dev,
diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c
index 551653940e39..be74c87a8be4 100644
--- a/drivers/gpu/drm/rockchip/rockchip_lvds.c
+++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c
@@ -19,6 +19,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
+#include <drm/drm_bridge_connector.h>
#include <drm/drm_dp_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
@@ -439,11 +440,9 @@ struct drm_encoder_helper_funcs px30_lvds_encoder_helper_funcs = {
static int rk3288_lvds_probe(struct platform_device *pdev,
struct rockchip_lvds *lvds)
{
- struct resource *res;
int ret;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- lvds->regs = devm_ioremap_resource(lvds->dev, res);
+ lvds->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(lvds->regs))
return PTR_ERR(lvds->regs);
@@ -612,9 +611,9 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
}
drm_encoder_helper_add(encoder, lvds->soc_data->helper_funcs);
+ connector = &lvds->connector;
if (lvds->panel) {
- connector = &lvds->connector;
connector->dpms = DRM_MODE_DPMS_OFF;
ret = drm_connector_init(drm_dev, connector,
&rockchip_lvds_connector_funcs,
@@ -627,17 +626,27 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
drm_connector_helper_add(connector,
&rockchip_lvds_connector_helper_funcs);
-
- ret = drm_connector_attach_encoder(connector, encoder);
- if (ret < 0) {
- DRM_DEV_ERROR(drm_dev->dev,
- "failed to attach encoder: %d\n", ret);
- goto err_free_connector;
- }
} else {
- ret = drm_bridge_attach(encoder, lvds->bridge, NULL, 0);
+ ret = drm_bridge_attach(encoder, lvds->bridge, NULL,
+ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret)
goto err_free_encoder;
+
+ connector = drm_bridge_connector_init(lvds->drm_dev, encoder);
+ if (IS_ERR(connector)) {
+ DRM_DEV_ERROR(drm_dev->dev,
+ "failed to initialize bridge connector: %pe\n",
+ connector);
+ ret = PTR_ERR(connector);
+ goto err_free_encoder;
+ }
+ }
+
+ ret = drm_connector_attach_encoder(connector, encoder);
+ if (ret < 0) {
+ DRM_DEV_ERROR(drm_dev->dev,
+ "failed to attach encoder: %d\n", ret);
+ goto err_free_connector;
}
pm_runtime_enable(dev);
diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.c b/drivers/gpu/drm/rockchip/rockchip_rgb.c
index d691d9bef8e7..09be9678f2bd 100644
--- a/drivers/gpu/drm/rockchip/rockchip_rgb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_rgb.c
@@ -10,6 +10,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
+#include <drm/drm_bridge_connector.h>
#include <drm/drm_dp_helper.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
@@ -27,6 +28,7 @@ struct rockchip_rgb {
struct drm_device *drm_dev;
struct drm_bridge *bridge;
struct drm_encoder encoder;
+ struct drm_connector connector;
int output_mode;
};
@@ -80,6 +82,7 @@ struct rockchip_rgb *rockchip_rgb_init(struct device *dev,
int ret = 0, child_count = 0;
struct drm_panel *panel;
struct drm_bridge *bridge;
+ struct drm_connector *connector;
rgb = devm_kzalloc(dev, sizeof(*rgb), GFP_KERNEL);
if (!rgb)
@@ -142,12 +145,32 @@ struct rockchip_rgb *rockchip_rgb_init(struct device *dev,
rgb->bridge = bridge;
- ret = drm_bridge_attach(encoder, rgb->bridge, NULL, 0);
+ ret = drm_bridge_attach(encoder, rgb->bridge, NULL,
+ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
if (ret)
goto err_free_encoder;
+ connector = &rgb->connector;
+ connector = drm_bridge_connector_init(rgb->drm_dev, encoder);
+ if (IS_ERR(connector)) {
+ DRM_DEV_ERROR(drm_dev->dev,
+ "failed to initialize bridge connector: %pe\n",
+ connector);
+ ret = PTR_ERR(connector);
+ goto err_free_encoder;
+ }
+
+ ret = drm_connector_attach_encoder(connector, encoder);
+ if (ret < 0) {
+ DRM_DEV_ERROR(drm_dev->dev,
+ "failed to attach encoder: %d\n", ret);
+ goto err_free_connector;
+ }
+
return rgb;
+err_free_connector:
+ drm_connector_cleanup(connector);
err_free_encoder:
drm_encoder_cleanup(encoder);
return ERR_PTR(ret);
@@ -157,6 +180,7 @@ EXPORT_SYMBOL_GPL(rockchip_rgb_init);
void rockchip_rgb_fini(struct rockchip_rgb *rgb)
{
drm_panel_bridge_remove(rgb->bridge);
+ drm_connector_cleanup(&rgb->connector);
drm_encoder_cleanup(&rgb->encoder);
}
EXPORT_SYMBOL_GPL(rockchip_rgb_fini);
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
index ca7cc82125cb..1f7353f0684a 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
@@ -1124,6 +1124,6 @@ struct platform_driver vop_platform_driver = {
.remove = vop_remove,
.driver = {
.name = "rockchip-vop",
- .of_match_table = of_match_ptr(vop_driver_dt_match),
+ .of_match_table = vop_driver_dt_match,
},
};
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 79554aa4dbb1..27e1573af96e 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -45,8 +45,14 @@
* @guilty: atomic_t set to 1 when a job on this queue
* is found to be guilty causing a timeout
*
- * Note: the sched_list should have at least one element to schedule
- * the entity
+ * Note that the &sched_list must have at least one element to schedule the entity.
+ *
+ * For changing @priority later on at runtime see
+ * drm_sched_entity_set_priority(). For changing the set of schedulers
+ * @sched_list at runtime see drm_sched_entity_modify_sched().
+ *
+ * An entity is cleaned up by callind drm_sched_entity_fini(). See also
+ * drm_sched_entity_destroy().
*
* Returns 0 on success or a negative error code on failure.
*/
@@ -92,6 +98,11 @@ EXPORT_SYMBOL(drm_sched_entity_init);
* @sched_list: the list of new drm scheds which will replace
* existing entity->sched_list
* @num_sched_list: number of drm sched in sched_list
+ *
+ * Note that this must be called under the same common lock for @entity as
+ * drm_sched_job_arm() and drm_sched_entity_push_job(), or the driver needs to
+ * guarantee through some other means that this is never called while new jobs
+ * can be pushed to @entity.
*/
void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
struct drm_gpu_scheduler **sched_list,
@@ -104,13 +115,6 @@ void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
}
EXPORT_SYMBOL(drm_sched_entity_modify_sched);
-/**
- * drm_sched_entity_is_idle - Check if entity is idle
- *
- * @entity: scheduler entity
- *
- * Returns true if the entity does not have any unscheduled jobs.
- */
static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
{
rmb(); /* for list_empty to work without lock */
@@ -123,13 +127,7 @@ static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
return false;
}
-/**
- * drm_sched_entity_is_ready - Check if entity is ready
- *
- * @entity: scheduler entity
- *
- * Return true if entity could provide a job.
- */
+/* Return true if entity could provide a job. */
bool drm_sched_entity_is_ready(struct drm_sched_entity *entity)
{
if (spsc_queue_peek(&entity->job_queue) == NULL)
@@ -192,14 +190,7 @@ long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
}
EXPORT_SYMBOL(drm_sched_entity_flush);
-/**
- * drm_sched_entity_kill_jobs_cb - helper for drm_sched_entity_kill_jobs
- *
- * @f: signaled fence
- * @cb: our callback structure
- *
- * Signal the scheduler finished fence when the entity in question is killed.
- */
+/* Signal the scheduler finished fence when the entity in question is killed. */
static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
struct dma_fence_cb *cb)
{
@@ -211,14 +202,19 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
job->sched->ops->free_job(job);
}
-/**
- * drm_sched_entity_kill_jobs - Make sure all remaining jobs are killed
- *
- * @entity: entity which is cleaned up
- *
- * Makes sure that all remaining jobs in an entity are killed before it is
- * destroyed.
- */
+static struct dma_fence *
+drm_sched_job_dependency(struct drm_sched_job *job,
+ struct drm_sched_entity *entity)
+{
+ if (!xa_empty(&job->dependencies))
+ return xa_erase(&job->dependencies, job->last_dependency++);
+
+ if (job->sched->ops->dependency)
+ return job->sched->ops->dependency(job, entity);
+
+ return NULL;
+}
+
static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity)
{
struct drm_sched_job *job;
@@ -229,7 +225,7 @@ static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity)
struct drm_sched_fence *s_fence = job->s_fence;
/* Wait for all dependencies to avoid data corruptions */
- while ((f = job->sched->ops->dependency(job, entity)))
+ while ((f = drm_sched_job_dependency(job, entity)))
dma_fence_wait(f, false);
drm_sched_fence_scheduled(s_fence);
@@ -260,9 +256,11 @@ static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity)
*
* @entity: scheduler entity
*
- * This should be called after @drm_sched_entity_do_release. It goes over the
- * entity and signals all jobs with an error code if the process was killed.
+ * Cleanups up @entity which has been initialized by drm_sched_entity_init().
*
+ * If there are potentially job still in flight or getting newly queued
+ * drm_sched_entity_flush() must be called first. This function then goes over
+ * the entity and signals all jobs with an error code if the process was killed.
*/
void drm_sched_entity_fini(struct drm_sched_entity *entity)
{
@@ -302,10 +300,10 @@ EXPORT_SYMBOL(drm_sched_entity_fini);
/**
* drm_sched_entity_destroy - Destroy a context entity
- *
* @entity: scheduler entity
*
- * Calls drm_sched_entity_do_release() and drm_sched_entity_cleanup()
+ * Calls drm_sched_entity_flush() and drm_sched_entity_fini() as a
+ * convenience wrapper.
*/
void drm_sched_entity_destroy(struct drm_sched_entity *entity)
{
@@ -314,9 +312,7 @@ void drm_sched_entity_destroy(struct drm_sched_entity *entity)
}
EXPORT_SYMBOL(drm_sched_entity_destroy);
-/*
- * drm_sched_entity_clear_dep - callback to clear the entities dependency
- */
+/* drm_sched_entity_clear_dep - callback to clear the entities dependency */
static void drm_sched_entity_clear_dep(struct dma_fence *f,
struct dma_fence_cb *cb)
{
@@ -358,11 +354,7 @@ void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
}
EXPORT_SYMBOL(drm_sched_entity_set_priority);
-/**
- * drm_sched_entity_add_dependency_cb - add callback for the entities dependency
- *
- * @entity: entity with dependency
- *
+/*
* Add a callback to the current dependency of the entity to wake up the
* scheduler when the entity becomes available.
*/
@@ -410,16 +402,8 @@ static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
return false;
}
-/**
- * drm_sched_entity_pop_job - get a ready to be scheduled job from the entity
- *
- * @entity: entity to get the job from
- *
- * Process all dependencies and try to get one job from the entities queue.
- */
struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
{
- struct drm_gpu_scheduler *sched = entity->rq->sched;
struct drm_sched_job *sched_job;
sched_job = to_drm_sched_job(spsc_queue_peek(&entity->job_queue));
@@ -427,7 +411,7 @@ struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
return NULL;
while ((entity->dependency =
- sched->ops->dependency(sched_job, entity))) {
+ drm_sched_job_dependency(sched_job, entity))) {
trace_drm_sched_job_wait_dep(sched_job, entity->dependency);
if (drm_sched_entity_add_dependency_cb(entity))
@@ -439,30 +423,45 @@ struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
dma_fence_set_error(&sched_job->s_fence->finished, -ECANCELED);
dma_fence_put(entity->last_scheduled);
+
entity->last_scheduled = dma_fence_get(&sched_job->s_fence->finished);
+ /*
+ * If the queue is empty we allow drm_sched_entity_select_rq() to
+ * locklessly access ->last_scheduled. This only works if we set the
+ * pointer before we dequeue and if we a write barrier here.
+ */
+ smp_wmb();
+
spsc_queue_pop(&entity->job_queue);
return sched_job;
}
-/**
- * drm_sched_entity_select_rq - select a new rq for the entity
- *
- * @entity: scheduler entity
- *
- * Check all prerequisites and select a new rq for the entity for load
- * balancing.
- */
void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
{
struct dma_fence *fence;
struct drm_gpu_scheduler *sched;
struct drm_sched_rq *rq;
- if (spsc_queue_count(&entity->job_queue) || !entity->sched_list)
+ /* single possible engine and already selected */
+ if (!entity->sched_list)
return;
- fence = READ_ONCE(entity->last_scheduled);
+ /* queue non-empty, stay on the same engine */
+ if (spsc_queue_count(&entity->job_queue))
+ return;
+
+ /*
+ * Only when the queue is empty are we guaranteed that the scheduler
+ * thread cannot change ->last_scheduled. To enforce ordering we need
+ * a read barrier here. See drm_sched_entity_pop_job() for the other
+ * side.
+ */
+ smp_rmb();
+
+ fence = entity->last_scheduled;
+
+ /* stay on the same engine if the previous job hasn't finished */
if (fence && !dma_fence_is_signaled(fence))
return;
@@ -481,19 +480,18 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
/**
* drm_sched_entity_push_job - Submit a job to the entity's job queue
- *
* @sched_job: job to submit
- * @entity: scheduler entity
*
- * Note: To guarantee that the order of insertion to queue matches
- * the job's fence sequence number this function should be
- * called with drm_sched_job_init under common lock.
+ * Note: To guarantee that the order of insertion to queue matches the job's
+ * fence sequence number this function should be called with drm_sched_job_arm()
+ * under common lock for the struct drm_sched_entity that was set up for
+ * @sched_job in drm_sched_job_init().
*
* Returns 0 for success, negative error code otherwise.
*/
-void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
- struct drm_sched_entity *entity)
+void drm_sched_entity_push_job(struct drm_sched_job *sched_job)
{
+ struct drm_sched_entity *entity = sched_job->entity;
bool first;
trace_drm_sched_job(sched_job, entity);
diff --git a/drivers/gpu/drm/scheduler/sched_fence.c b/drivers/gpu/drm/scheduler/sched_fence.c
index 69de2c76731f..7fd869520ef2 100644
--- a/drivers/gpu/drm/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/scheduler/sched_fence.c
@@ -50,26 +50,12 @@ static void __exit drm_sched_fence_slab_fini(void)
void drm_sched_fence_scheduled(struct drm_sched_fence *fence)
{
- int ret = dma_fence_signal(&fence->scheduled);
-
- if (!ret)
- DMA_FENCE_TRACE(&fence->scheduled,
- "signaled from irq context\n");
- else
- DMA_FENCE_TRACE(&fence->scheduled,
- "was already signaled\n");
+ dma_fence_signal(&fence->scheduled);
}
void drm_sched_fence_finished(struct drm_sched_fence *fence)
{
- int ret = dma_fence_signal(&fence->finished);
-
- if (!ret)
- DMA_FENCE_TRACE(&fence->finished,
- "signaled from irq context\n");
- else
- DMA_FENCE_TRACE(&fence->finished,
- "was already signaled\n");
+ dma_fence_signal(&fence->finished);
}
static const char *drm_sched_fence_get_driver_name(struct dma_fence *fence)
@@ -83,19 +69,28 @@ static const char *drm_sched_fence_get_timeline_name(struct dma_fence *f)
return (const char *)fence->sched->name;
}
+static void drm_sched_fence_free_rcu(struct rcu_head *rcu)
+{
+ struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
+ struct drm_sched_fence *fence = to_drm_sched_fence(f);
+
+ if (!WARN_ON_ONCE(!fence))
+ kmem_cache_free(sched_fence_slab, fence);
+}
+
/**
- * drm_sched_fence_free - free up the fence memory
+ * drm_sched_fence_free - free up an uninitialized fence
*
- * @rcu: RCU callback head
+ * @fence: fence to free
*
- * Free up the fence memory after the RCU grace period.
+ * Free up the fence memory. Should only be used if drm_sched_fence_init()
+ * has not been called yet.
*/
-static void drm_sched_fence_free(struct rcu_head *rcu)
+void drm_sched_fence_free(struct drm_sched_fence *fence)
{
- struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
- struct drm_sched_fence *fence = to_drm_sched_fence(f);
-
- kmem_cache_free(sched_fence_slab, fence);
+ /* This function should not be called if the fence has been initialized. */
+ if (!WARN_ON_ONCE(fence->sched))
+ kmem_cache_free(sched_fence_slab, fence);
}
/**
@@ -111,7 +106,7 @@ static void drm_sched_fence_release_scheduled(struct dma_fence *f)
struct drm_sched_fence *fence = to_drm_sched_fence(f);
dma_fence_put(fence->parent);
- call_rcu(&fence->finished.rcu, drm_sched_fence_free);
+ call_rcu(&fence->finished.rcu, drm_sched_fence_free_rcu);
}
/**
@@ -152,27 +147,32 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f)
}
EXPORT_SYMBOL(to_drm_sched_fence);
-struct drm_sched_fence *drm_sched_fence_create(struct drm_sched_entity *entity,
- void *owner)
+struct drm_sched_fence *drm_sched_fence_alloc(struct drm_sched_entity *entity,
+ void *owner)
{
struct drm_sched_fence *fence = NULL;
- unsigned seq;
fence = kmem_cache_zalloc(sched_fence_slab, GFP_KERNEL);
if (fence == NULL)
return NULL;
fence->owner = owner;
- fence->sched = entity->rq->sched;
spin_lock_init(&fence->lock);
+ return fence;
+}
+
+void drm_sched_fence_init(struct drm_sched_fence *fence,
+ struct drm_sched_entity *entity)
+{
+ unsigned seq;
+
+ fence->sched = entity->rq->sched;
seq = atomic_inc_return(&entity->fence_seq);
dma_fence_init(&fence->scheduled, &drm_sched_fence_ops_scheduled,
&fence->lock, entity->fence_context, seq);
dma_fence_init(&fence->finished, &drm_sched_fence_ops_finished,
&fence->lock, entity->fence_context + 1, seq);
-
- return fence;
}
module_init(drm_sched_fence_slab_init);
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 67382621b429..042c16b5d54a 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -48,9 +48,11 @@
#include <linux/wait.h>
#include <linux/sched.h>
#include <linux/completion.h>
+#include <linux/dma-resv.h>
#include <uapi/linux/sched/types.h>
#include <drm/drm_print.h>
+#include <drm/drm_gem.h>
#include <drm/gpu_scheduler.h>
#include <drm/spsc_queue.h>
@@ -564,7 +566,6 @@ EXPORT_SYMBOL(drm_sched_resubmit_jobs_ext);
/**
* drm_sched_job_init - init a scheduler job
- *
* @job: scheduler job to init
* @entity: scheduler entity to use
* @owner: job owner for debugging
@@ -572,43 +573,193 @@ EXPORT_SYMBOL(drm_sched_resubmit_jobs_ext);
* Refer to drm_sched_entity_push_job() documentation
* for locking considerations.
*
+ * Drivers must make sure drm_sched_job_cleanup() if this function returns
+ * successfully, even when @job is aborted before drm_sched_job_arm() is called.
+ *
+ * WARNING: amdgpu abuses &drm_sched.ready to signal when the hardware
+ * has died, which can mean that there's no valid runqueue for a @entity.
+ * This function returns -ENOENT in this case (which probably should be -EIO as
+ * a more meanigful return value).
+ *
* Returns 0 for success, negative error code otherwise.
*/
int drm_sched_job_init(struct drm_sched_job *job,
struct drm_sched_entity *entity,
void *owner)
{
- struct drm_gpu_scheduler *sched;
-
drm_sched_entity_select_rq(entity);
if (!entity->rq)
return -ENOENT;
- sched = entity->rq->sched;
-
- job->sched = sched;
job->entity = entity;
- job->s_priority = entity->rq - sched->sched_rq;
- job->s_fence = drm_sched_fence_create(entity, owner);
+ job->s_fence = drm_sched_fence_alloc(entity, owner);
if (!job->s_fence)
return -ENOMEM;
- job->id = atomic64_inc_return(&sched->job_id_count);
INIT_LIST_HEAD(&job->list);
+ xa_init_flags(&job->dependencies, XA_FLAGS_ALLOC);
+
return 0;
}
EXPORT_SYMBOL(drm_sched_job_init);
/**
- * drm_sched_job_cleanup - clean up scheduler job resources
+ * drm_sched_job_arm - arm a scheduler job for execution
+ * @job: scheduler job to arm
+ *
+ * This arms a scheduler job for execution. Specifically it initializes the
+ * &drm_sched_job.s_fence of @job, so that it can be attached to struct dma_resv
+ * or other places that need to track the completion of this job.
+ *
+ * Refer to drm_sched_entity_push_job() documentation for locking
+ * considerations.
+ *
+ * This can only be called if drm_sched_job_init() succeeded.
+ */
+void drm_sched_job_arm(struct drm_sched_job *job)
+{
+ struct drm_gpu_scheduler *sched;
+ struct drm_sched_entity *entity = job->entity;
+
+ BUG_ON(!entity);
+
+ sched = entity->rq->sched;
+
+ job->sched = sched;
+ job->s_priority = entity->rq - sched->sched_rq;
+ job->id = atomic64_inc_return(&sched->job_id_count);
+
+ drm_sched_fence_init(job->s_fence, job->entity);
+}
+EXPORT_SYMBOL(drm_sched_job_arm);
+
+/**
+ * drm_sched_job_add_dependency - adds the fence as a job dependency
+ * @job: scheduler job to add the dependencies to
+ * @fence: the dma_fence to add to the list of dependencies.
+ *
+ * Note that @fence is consumed in both the success and error cases.
+ *
+ * Returns:
+ * 0 on success, or an error on failing to expand the array.
+ */
+int drm_sched_job_add_dependency(struct drm_sched_job *job,
+ struct dma_fence *fence)
+{
+ struct dma_fence *entry;
+ unsigned long index;
+ u32 id = 0;
+ int ret;
+
+ if (!fence)
+ return 0;
+
+ /* Deduplicate if we already depend on a fence from the same context.
+ * This lets the size of the array of deps scale with the number of
+ * engines involved, rather than the number of BOs.
+ */
+ xa_for_each(&job->dependencies, index, entry) {
+ if (entry->context != fence->context)
+ continue;
+
+ if (dma_fence_is_later(fence, entry)) {
+ dma_fence_put(entry);
+ xa_store(&job->dependencies, index, fence, GFP_KERNEL);
+ } else {
+ dma_fence_put(fence);
+ }
+ return 0;
+ }
+
+ ret = xa_alloc(&job->dependencies, &id, fence, xa_limit_32b, GFP_KERNEL);
+ if (ret != 0)
+ dma_fence_put(fence);
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_sched_job_add_dependency);
+
+/**
+ * drm_sched_job_add_implicit_dependencies - adds implicit dependencies as job
+ * dependencies
+ * @job: scheduler job to add the dependencies to
+ * @obj: the gem object to add new dependencies from.
+ * @write: whether the job might write the object (so we need to depend on
+ * shared fences in the reservation object).
*
+ * This should be called after drm_gem_lock_reservations() on your array of
+ * GEM objects used in the job but before updating the reservations with your
+ * own fences.
+ *
+ * Returns:
+ * 0 on success, or an error on failing to expand the array.
+ */
+int drm_sched_job_add_implicit_dependencies(struct drm_sched_job *job,
+ struct drm_gem_object *obj,
+ bool write)
+{
+ int ret;
+ struct dma_fence **fences;
+ unsigned int i, fence_count;
+
+ if (!write) {
+ struct dma_fence *fence = dma_resv_get_excl_unlocked(obj->resv);
+
+ return drm_sched_job_add_dependency(job, fence);
+ }
+
+ ret = dma_resv_get_fences(obj->resv, NULL, &fence_count, &fences);
+ if (ret || !fence_count)
+ return ret;
+
+ for (i = 0; i < fence_count; i++) {
+ ret = drm_sched_job_add_dependency(job, fences[i]);
+ if (ret)
+ break;
+ }
+
+ for (; i < fence_count; i++)
+ dma_fence_put(fences[i]);
+ kfree(fences);
+ return ret;
+}
+EXPORT_SYMBOL(drm_sched_job_add_implicit_dependencies);
+
+
+/**
+ * drm_sched_job_cleanup - clean up scheduler job resources
* @job: scheduler job to clean up
+ *
+ * Cleans up the resources allocated with drm_sched_job_init().
+ *
+ * Drivers should call this from their error unwind code if @job is aborted
+ * before drm_sched_job_arm() is called.
+ *
+ * After that point of no return @job is committed to be executed by the
+ * scheduler, and this function should be called from the
+ * &drm_sched_backend_ops.free_job callback.
*/
void drm_sched_job_cleanup(struct drm_sched_job *job)
{
- dma_fence_put(&job->s_fence->finished);
+ struct dma_fence *fence;
+ unsigned long index;
+
+ if (kref_read(&job->s_fence->finished.refcount)) {
+ /* drm_sched_job_arm() has been called */
+ dma_fence_put(&job->s_fence->finished);
+ } else {
+ /* aborted job before committing to run it */
+ drm_sched_fence_free(job->s_fence);
+ }
+
job->s_fence = NULL;
+
+ xa_for_each(&job->dependencies, index, fence) {
+ dma_fence_put(fence);
+ }
+ xa_destroy(&job->dependencies);
+
}
EXPORT_SYMBOL(drm_sched_job_cleanup);
@@ -676,15 +827,6 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
{
struct drm_sched_job *job, *next;
- /*
- * Don't destroy jobs while the timeout worker is running OR thread
- * is being parked and hence assumed to not touch pending_list
- */
- if ((sched->timeout != MAX_SCHEDULE_TIMEOUT &&
- !cancel_delayed_work(&sched->work_tdr)) ||
- kthread_should_park())
- return NULL;
-
spin_lock(&sched->job_list_lock);
job = list_first_entry_or_null(&sched->pending_list,
@@ -693,17 +835,21 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
if (job && dma_fence_is_signaled(&job->s_fence->finished)) {
/* remove job from pending_list */
list_del_init(&job->list);
+
+ /* cancel this job's TO timer */
+ cancel_delayed_work(&sched->work_tdr);
/* make the scheduled timestamp more accurate */
next = list_first_entry_or_null(&sched->pending_list,
typeof(*next), list);
- if (next)
+
+ if (next) {
next->s_fence->scheduled.timestamp =
job->s_fence->finished.timestamp;
-
+ /* start TO timer for next job */
+ drm_sched_start_timeout(sched);
+ }
} else {
job = NULL;
- /* queue timeout for next job */
- drm_sched_start_timeout(sched);
}
spin_unlock(&sched->job_list_lock);
@@ -791,11 +937,8 @@ static int drm_sched_main(void *param)
(entity = drm_sched_select_entity(sched))) ||
kthread_should_stop());
- if (cleanup_job) {
+ if (cleanup_job)
sched->ops->free_job(cleanup_job);
- /* queue timeout for next job */
- drm_sched_start_timeout(sched);
- }
if (!entity)
continue;
diff --git a/drivers/gpu/drm/selftests/test-drm_damage_helper.c b/drivers/gpu/drm/selftests/test-drm_damage_helper.c
index 1c19a5d3eefb..8d8d8e214c28 100644
--- a/drivers/gpu/drm/selftests/test-drm_damage_helper.c
+++ b/drivers/gpu/drm/selftests/test-drm_damage_helper.c
@@ -30,6 +30,7 @@ static void mock_setup(struct drm_plane_state *state)
mock_device.driver = &mock_driver;
mock_device.mode_config.prop_fb_damage_clips = &mock_prop;
mock_plane.dev = &mock_device;
+ mock_obj_props.count = 0;
mock_plane.base.properties = &mock_obj_props;
mock_prop.base.id = 1; /* 0 is an invalid id */
mock_prop.dev = &mock_device;
diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
index 7db01904d18d..80078a9fd7f6 100644
--- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c
+++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c
@@ -192,7 +192,6 @@ static int shmob_drm_probe(struct platform_device *pdev)
struct shmob_drm_platform_data *pdata = pdev->dev.platform_data;
struct shmob_drm_device *sdev;
struct drm_device *ddev;
- struct resource *res;
unsigned int i;
int ret;
@@ -215,8 +214,7 @@ static int shmob_drm_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, sdev);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- sdev->mmio = devm_ioremap_resource(&pdev->dev, res);
+ sdev->mmio = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(sdev->mmio))
return PTR_ERR(sdev->mmio);
diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c
index d09b08995b12..3c61ba8b43e0 100644
--- a/drivers/gpu/drm/sti/sti_hqvdp.c
+++ b/drivers/gpu/drm/sti/sti_hqvdp.c
@@ -927,12 +927,12 @@ static void sti_hqvdp_start_xp70(struct sti_hqvdp *hqvdp)
header = (struct fw_header *)firmware->data;
if (firmware->size < sizeof(*header)) {
- DRM_ERROR("Invalid firmware size (%d)\n", firmware->size);
+ DRM_ERROR("Invalid firmware size (%zu)\n", firmware->size);
goto out;
}
if ((sizeof(*header) + header->rd_size + header->wr_size +
header->pmem_size + header->dmem_size) != firmware->size) {
- DRM_ERROR("Invalid fmw structure (%d+%d+%d+%d+%d != %d)\n",
+ DRM_ERROR("Invalid fmw structure (%zu+%d+%d+%d+%d != %zu)\n",
sizeof(*header), header->rd_size, header->wr_size,
header->pmem_size, header->dmem_size,
firmware->size);
diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
index 195de30eb90c..dbdee954692a 100644
--- a/drivers/gpu/drm/stm/ltdc.c
+++ b/drivers/gpu/drm/stm/ltdc.c
@@ -845,7 +845,7 @@ static void ltdc_plane_atomic_update(struct drm_plane *plane,
LXCFBLR_CFBLL | LXCFBLR_CFBP, val);
/* Specifies the constant alpha value */
- val = CONSTA_MAX;
+ val = newstate->alpha >> 8;
reg_update_bits(ldev->regs, LTDC_L1CACR + lofs, LXCACR_CONSTA, val);
/* Specifies the blending factors */
@@ -997,6 +997,8 @@ static struct drm_plane *ltdc_plane_create(struct drm_device *ddev,
drm_plane_helper_add(plane, &ltdc_plane_helper_funcs);
+ drm_plane_create_alpha_property(plane);
+
DRM_DEBUG_DRIVER("plane:%d created\n", plane->base.id);
return plane;
@@ -1024,6 +1026,8 @@ static int ltdc_crtc_init(struct drm_device *ddev, struct drm_crtc *crtc)
return -EINVAL;
}
+ drm_plane_create_zpos_immutable_property(primary, 0);
+
ret = drm_crtc_init_with_planes(ddev, crtc, primary, NULL,
&ltdc_crtc_funcs, NULL);
if (ret) {
@@ -1046,6 +1050,7 @@ static int ltdc_crtc_init(struct drm_device *ddev, struct drm_crtc *crtc)
DRM_ERROR("Can not create overlay plane %d\n", i);
goto cleanup;
}
+ drm_plane_create_zpos_immutable_property(overlay, i);
}
return 0;
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
index bf8cfefa0365..f52ff4e6c662 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
@@ -782,7 +782,6 @@ static int sun4i_backend_bind(struct device *dev, struct device *master,
struct sun4i_drv *drv = drm->dev_private;
struct sun4i_backend *backend;
const struct sun4i_backend_quirks *quirks;
- struct resource *res;
void __iomem *regs;
int i, ret;
@@ -815,8 +814,7 @@ static int sun4i_backend_bind(struct device *dev, struct device *master,
if (IS_ERR(backend->frontend))
dev_warn(dev, "Couldn't find matching frontend, frontend features disabled\n");
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- regs = devm_ioremap_resource(dev, res);
+ regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs))
return PTR_ERR(regs);
diff --git a/drivers/gpu/drm/sun4i/sun4i_frontend.c b/drivers/gpu/drm/sun4i/sun4i_frontend.c
index edb60ae0a9b7..56ae38389db0 100644
--- a/drivers/gpu/drm/sun4i/sun4i_frontend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_frontend.c
@@ -561,7 +561,6 @@ static int sun4i_frontend_bind(struct device *dev, struct device *master,
struct sun4i_frontend *frontend;
struct drm_device *drm = data;
struct sun4i_drv *drv = drm->dev_private;
- struct resource *res;
void __iomem *regs;
frontend = devm_kzalloc(dev, sizeof(*frontend), GFP_KERNEL);
@@ -576,8 +575,7 @@ static int sun4i_frontend_bind(struct device *dev, struct device *master,
if (!frontend->data)
return -ENODEV;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- regs = devm_ioremap_resource(dev, res);
+ regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs))
return PTR_ERR(regs);
diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
index 2f2c9f0a1071..3799a745b7dd 100644
--- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
+++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c
@@ -489,7 +489,6 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
struct cec_connector_info conn_info;
struct sun4i_drv *drv = drm->dev_private;
struct sun4i_hdmi *hdmi;
- struct resource *res;
u32 reg;
int ret;
@@ -504,8 +503,7 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master,
if (!hdmi->variant)
return -EINVAL;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hdmi->base = devm_ioremap_resource(dev, res);
+ hdmi->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hdmi->base)) {
dev_err(dev, "Couldn't map the HDMI encoder registers\n");
return PTR_ERR(hdmi->base);
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index 9f06dec0fc61..88db2d2a9336 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -841,11 +841,9 @@ static int sun4i_tcon_init_regmap(struct device *dev,
struct sun4i_tcon *tcon)
{
struct platform_device *pdev = to_platform_device(dev);
- struct resource *res;
void __iomem *regs;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- regs = devm_ioremap_resource(dev, res);
+ regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs))
return PTR_ERR(regs);
diff --git a/drivers/gpu/drm/sun4i/sun4i_tv.c b/drivers/gpu/drm/sun4i/sun4i_tv.c
index cb91bc11a0c7..94883abe0dfd 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tv.c
@@ -538,7 +538,6 @@ static int sun4i_tv_bind(struct device *dev, struct device *master,
struct drm_device *drm = data;
struct sun4i_drv *drv = drm->dev_private;
struct sun4i_tv *tv;
- struct resource *res;
void __iomem *regs;
int ret;
@@ -548,8 +547,7 @@ static int sun4i_tv_bind(struct device *dev, struct device *master,
tv->drv = drv;
dev_set_drvdata(dev, tv);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- regs = devm_ioremap_resource(dev, res);
+ regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs)) {
dev_err(dev, "Couldn't map the TV encoder registers\n");
return PTR_ERR(regs);
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
index 4f5efcace68e..527c7b2474da 100644
--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
+++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
@@ -1104,7 +1104,6 @@ static int sun6i_dsi_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
const char *bus_clk_name = NULL;
struct sun6i_dsi *dsi;
- struct resource *res;
void __iomem *base;
int ret;
@@ -1120,18 +1119,16 @@ static int sun6i_dsi_probe(struct platform_device *pdev)
"allwinner,sun6i-a31-mipi-dsi"))
bus_clk_name = "bus";
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base)) {
dev_err(dev, "Couldn't map the DSI encoder registers\n");
return PTR_ERR(base);
}
dsi->regulator = devm_regulator_get(dev, "vcc-dsi");
- if (IS_ERR(dsi->regulator)) {
- dev_err(dev, "Couldn't get VCC-DSI supply\n");
- return PTR_ERR(dsi->regulator);
- }
+ if (IS_ERR(dsi->regulator))
+ return dev_err_probe(dev, PTR_ERR(dsi->regulator),
+ "Couldn't get VCC-DSI supply\n");
dsi->reset = devm_reset_control_get_shared(dev, NULL);
if (IS_ERR(dsi->reset)) {
@@ -1146,10 +1143,9 @@ static int sun6i_dsi_probe(struct platform_device *pdev)
}
dsi->bus_clk = devm_clk_get(dev, bus_clk_name);
- if (IS_ERR(dsi->bus_clk)) {
- dev_err(dev, "Couldn't get the DSI bus clock\n");
- return PTR_ERR(dsi->bus_clk);
- }
+ if (IS_ERR(dsi->bus_clk))
+ return dev_err_probe(dev, PTR_ERR(dsi->bus_clk),
+ "Couldn't get the DSI bus clock\n");
ret = regmap_mmio_attach_clk(dsi->regs, dsi->bus_clk);
if (ret)
diff --git a/drivers/gpu/drm/sun4i/sun8i_csc.h b/drivers/gpu/drm/sun4i/sun8i_csc.h
index a55a38ad849c..022cafa6c06c 100644
--- a/drivers/gpu/drm/sun4i/sun8i_csc.h
+++ b/drivers/gpu/drm/sun4i/sun8i_csc.h
@@ -16,8 +16,8 @@ struct sun8i_mixer;
#define CCSC10_OFFSET 0xA0000
#define CCSC11_OFFSET 0xF0000
-#define SUN8I_CSC_CTRL(base) (base + 0x0)
-#define SUN8I_CSC_COEFF(base, i) (base + 0x10 + 4 * i)
+#define SUN8I_CSC_CTRL(base) ((base) + 0x0)
+#define SUN8I_CSC_COEFF(base, i) ((base) + 0x10 + 4 * (i))
#define SUN8I_CSC_CTRL_EN BIT(0)
diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
index 016b877051da..a8d75fd7e9f4 100644
--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
+++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
@@ -153,22 +153,19 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
return -EPROBE_DEFER;
hdmi->rst_ctrl = devm_reset_control_get(dev, "ctrl");
- if (IS_ERR(hdmi->rst_ctrl)) {
- dev_err(dev, "Could not get ctrl reset control\n");
- return PTR_ERR(hdmi->rst_ctrl);
- }
+ if (IS_ERR(hdmi->rst_ctrl))
+ return dev_err_probe(dev, PTR_ERR(hdmi->rst_ctrl),
+ "Could not get ctrl reset control\n");
hdmi->clk_tmds = devm_clk_get(dev, "tmds");
- if (IS_ERR(hdmi->clk_tmds)) {
- dev_err(dev, "Couldn't get the tmds clock\n");
- return PTR_ERR(hdmi->clk_tmds);
- }
+ if (IS_ERR(hdmi->clk_tmds))
+ return dev_err_probe(dev, PTR_ERR(hdmi->clk_tmds),
+ "Couldn't get the tmds clock\n");
hdmi->regulator = devm_regulator_get(dev, "hvcc");
- if (IS_ERR(hdmi->regulator)) {
- dev_err(dev, "Couldn't get regulator\n");
- return PTR_ERR(hdmi->regulator);
- }
+ if (IS_ERR(hdmi->regulator))
+ return dev_err_probe(dev, PTR_ERR(hdmi->regulator),
+ "Couldn't get regulator\n");
ret = sun8i_dw_hdmi_find_connector_pdev(dev, &connector_pdev);
if (!ret) {
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.c b/drivers/gpu/drm/sun4i/sun8i_mixer.c
index 5b42cf25cc86..f5e8aeaa3cdf 100644
--- a/drivers/gpu/drm/sun4i/sun8i_mixer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_mixer.c
@@ -337,7 +337,6 @@ static int sun8i_mixer_bind(struct device *dev, struct device *master,
struct drm_device *drm = data;
struct sun4i_drv *drv = drm->dev_private;
struct sun8i_mixer *mixer;
- struct resource *res;
void __iomem *regs;
unsigned int base;
int plane_cnt;
@@ -390,8 +389,7 @@ static int sun8i_mixer_bind(struct device *dev, struct device *master,
if (!mixer->cfg)
return -EINVAL;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- regs = devm_ioremap_resource(dev, res);
+ regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs))
return PTR_ERR(regs);
diff --git a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
index 75d8e60c149d..1b9b8b48f4a7 100644
--- a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
+++ b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
@@ -128,7 +128,6 @@ static int sun8i_tcon_top_bind(struct device *dev, struct device *master,
struct clk_hw_onecell_data *clk_data;
struct sun8i_tcon_top *tcon_top;
const struct sun8i_tcon_top_quirks *quirks;
- struct resource *res;
void __iomem *regs;
int ret, i;
@@ -158,8 +157,7 @@ static int sun8i_tcon_top_bind(struct device *dev, struct device *master,
return PTR_ERR(tcon_top->bus);
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- regs = devm_ioremap_resource(dev, res);
+ regs = devm_platform_ioremap_resource(pdev, 0);
tcon_top->regs = regs;
if (IS_ERR(regs))
return PTR_ERR(regs);
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index cae8b8cbe9dd..c04dda8353fd 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -44,7 +44,7 @@ int tegra_fb_get_tiling(struct drm_framebuffer *framebuffer,
{
uint64_t modifier = framebuffer->modifier;
- if ((modifier >> 56) == DRM_FORMAT_MOD_VENDOR_NVIDIA) {
+ if (fourcc_mod_is_vendor(modifier, NVIDIA)) {
if ((modifier & DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT) == 0)
tiling->sector_layout = TEGRA_BO_SECTOR_LAYOUT_TEGRA;
else
diff --git a/drivers/gpu/drm/tegra/plane.c b/drivers/gpu/drm/tegra/plane.c
index e00ec3f40ec8..16a1cdc28657 100644
--- a/drivers/gpu/drm/tegra/plane.c
+++ b/drivers/gpu/drm/tegra/plane.c
@@ -113,7 +113,7 @@ static bool tegra_plane_format_mod_supported(struct drm_plane *plane,
return true;
/* check for the sector layout bit */
- if ((modifier >> 56) == DRM_FORMAT_MOD_VENDOR_NVIDIA) {
+ if (fourcc_mod_is_vendor(modifier, NVIDIA)) {
if (modifier & DRM_FORMAT_MOD_NVIDIA_SECTOR_LAYOUT) {
if (!tegra_plane_supports_sector_layout(plane))
return false;
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index 6b03f89a98d4..3ddb7c710a3d 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -186,10 +186,8 @@ static void tilcdc_fini(struct drm_device *dev)
if (priv->mmio)
iounmap(priv->mmio);
- if (priv->wq) {
- flush_workqueue(priv->wq);
+ if (priv->wq)
destroy_workqueue(priv->wq);
- }
dev->dev_private = NULL;
diff --git a/drivers/gpu/drm/tiny/Kconfig b/drivers/gpu/drm/tiny/Kconfig
index d31be274a2bd..1ceb93fbdc50 100644
--- a/drivers/gpu/drm/tiny/Kconfig
+++ b/drivers/gpu/drm/tiny/Kconfig
@@ -44,7 +44,7 @@ config DRM_CIRRUS_QEMU
config DRM_GM12U320
tristate "GM12U320 driver for USB projectors"
- depends on DRM && USB
+ depends on DRM && USB && MMU
select DRM_KMS_HELPER
select DRM_GEM_SHMEM_HELPER
help
@@ -53,7 +53,7 @@ config DRM_GM12U320
config DRM_SIMPLEDRM
tristate "Simple framebuffer driver"
- depends on DRM
+ depends on DRM && MMU
select DRM_GEM_SHMEM_HELPER
select DRM_KMS_HELPER
help
diff --git a/drivers/gpu/drm/tiny/bochs.c b/drivers/gpu/drm/tiny/bochs.c
index 73415fa9ae0f..2ce3bd903b70 100644
--- a/drivers/gpu/drm/tiny/bochs.c
+++ b/drivers/gpu/drm/tiny/bochs.c
@@ -63,6 +63,7 @@ MODULE_PARM_DESC(defy, "default y resolution");
enum bochs_types {
BOCHS_QEMU_STDVGA,
+ BOCHS_SIMICS,
BOCHS_UNKNOWN,
};
@@ -695,6 +696,13 @@ static const struct pci_device_id bochs_pci_tbl[] = {
.subdevice = PCI_ANY_ID,
.driver_data = BOCHS_UNKNOWN,
},
+ {
+ .vendor = 0x4321,
+ .device = 0x1111,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = BOCHS_SIMICS,
+ },
{ /* end of list */ }
};
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index bb9e02c31946..d62b2013c367 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -69,7 +69,17 @@ static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
}
}
-static void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
+static inline void ttm_bo_move_to_pinned(struct ttm_buffer_object *bo)
+{
+ struct ttm_device *bdev = bo->bdev;
+
+ list_move_tail(&bo->lru, &bdev->pinned);
+
+ if (bdev->funcs->del_from_lru_notify)
+ bdev->funcs->del_from_lru_notify(bo);
+}
+
+static inline void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
{
struct ttm_device *bdev = bo->bdev;
@@ -98,7 +108,7 @@ void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
dma_resv_assert_held(bo->base.resv);
if (bo->pin_count) {
- ttm_bo_del_from_lru(bo);
+ ttm_bo_move_to_pinned(bo);
return;
}
@@ -342,7 +352,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
return ret;
}
- ttm_bo_del_from_lru(bo);
+ ttm_bo_move_to_pinned(bo);
list_del_init(&bo->ddestroy);
spin_unlock(&bo->bdev->lru_lock);
ttm_bo_cleanup_memtype_use(bo);
@@ -914,57 +924,11 @@ out:
return ret;
}
-static bool ttm_bo_places_compat(const struct ttm_place *places,
- unsigned num_placement,
- struct ttm_resource *mem,
- uint32_t *new_flags)
-{
- unsigned i;
-
- if (mem->placement & TTM_PL_FLAG_TEMPORARY)
- return false;
-
- for (i = 0; i < num_placement; i++) {
- const struct ttm_place *heap = &places[i];
-
- if ((mem->start < heap->fpfn ||
- (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn)))
- continue;
-
- *new_flags = heap->flags;
- if ((mem->mem_type == heap->mem_type) &&
- (!(*new_flags & TTM_PL_FLAG_CONTIGUOUS) ||
- (mem->placement & TTM_PL_FLAG_CONTIGUOUS)))
- return true;
- }
- return false;
-}
-
-bool ttm_bo_mem_compat(struct ttm_placement *placement,
- struct ttm_resource *mem,
- uint32_t *new_flags)
-{
- if (ttm_bo_places_compat(placement->placement, placement->num_placement,
- mem, new_flags))
- return true;
-
- if ((placement->busy_placement != placement->placement ||
- placement->num_busy_placement > placement->num_placement) &&
- ttm_bo_places_compat(placement->busy_placement,
- placement->num_busy_placement,
- mem, new_flags))
- return true;
-
- return false;
-}
-EXPORT_SYMBOL(ttm_bo_mem_compat);
-
int ttm_bo_validate(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
struct ttm_operation_ctx *ctx)
{
int ret;
- uint32_t new_flags;
dma_resv_assert_held(bo->base.resv);
@@ -977,7 +941,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
/*
* Check whether we need to move buffer.
*/
- if (!ttm_bo_mem_compat(placement, bo->resource, &new_flags)) {
+ if (!ttm_resource_compat(bo->resource, placement)) {
ret = ttm_bo_move_buffer(bo, placement, ctx);
if (ret)
return ret;
@@ -1151,8 +1115,8 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
return -EBUSY;
if (!bo->ttm || !ttm_tt_is_populated(bo->ttm) ||
- bo->ttm->page_flags & TTM_PAGE_FLAG_SG ||
- bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED ||
+ bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL ||
+ bo->ttm->page_flags & TTM_TT_FLAG_SWAPPED ||
!ttm_bo_get_unless_zero(bo)) {
if (locked)
dma_resv_unlock(bo->base.resv);
@@ -1165,7 +1129,7 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
return ret == -EBUSY ? -ENOSPC : ret;
}
- ttm_bo_del_from_lru(bo);
+ ttm_bo_move_to_pinned(bo);
/* TODO: Cleanup the locking */
spin_unlock(&bo->bdev->lru_lock);
@@ -1224,6 +1188,7 @@ void ttm_bo_tt_destroy(struct ttm_buffer_object *bo)
if (bo->ttm == NULL)
return;
+ ttm_tt_unpopulate(bo->bdev, bo->ttm);
ttm_tt_destroy(bo->bdev, bo->ttm);
bo->ttm = NULL;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 1c5ffe2935af..72a94301bc95 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -78,22 +78,21 @@ void ttm_mem_io_free(struct ttm_device *bdev,
/**
* ttm_move_memcpy - Helper to perform a memcpy ttm move operation.
- * @bo: The struct ttm_buffer_object.
- * @new_mem: The struct ttm_resource we're moving to (copy destination).
- * @new_iter: A struct ttm_kmap_iter representing the destination resource.
+ * @clear: Whether to clear rather than copy.
+ * @num_pages: Number of pages of the operation.
+ * @dst_iter: A struct ttm_kmap_iter representing the destination resource.
* @src_iter: A struct ttm_kmap_iter representing the source resource.
*
* This function is intended to be able to move out async under a
* dma-fence if desired.
*/
-void ttm_move_memcpy(struct ttm_buffer_object *bo,
+void ttm_move_memcpy(bool clear,
u32 num_pages,
struct ttm_kmap_iter *dst_iter,
struct ttm_kmap_iter *src_iter)
{
const struct ttm_kmap_iter_ops *dst_ops = dst_iter->ops;
const struct ttm_kmap_iter_ops *src_ops = src_iter->ops;
- struct ttm_tt *ttm = bo->ttm;
struct dma_buf_map src_map, dst_map;
pgoff_t i;
@@ -102,10 +101,7 @@ void ttm_move_memcpy(struct ttm_buffer_object *bo,
return;
/* Don't move nonexistent data. Clear destination instead. */
- if (src_ops->maps_tt && (!ttm || !ttm_tt_is_populated(ttm))) {
- if (ttm && !(ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC))
- return;
-
+ if (clear) {
for (i = 0; i < num_pages; ++i) {
dst_ops->map_local(dst_iter, &dst_map, i);
if (dst_map.is_iomem)
@@ -148,9 +144,10 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
struct ttm_kmap_iter_linear_io io;
} _dst_iter, _src_iter;
struct ttm_kmap_iter *dst_iter, *src_iter;
+ bool clear;
int ret = 0;
- if (ttm && ((ttm->page_flags & TTM_PAGE_FLAG_SWAPPED) ||
+ if (ttm && ((ttm->page_flags & TTM_TT_FLAG_SWAPPED) ||
dst_man->use_tt)) {
ret = ttm_tt_populate(bdev, ttm, ctx);
if (ret)
@@ -171,7 +168,9 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
goto out_src_iter;
}
- ttm_move_memcpy(bo, dst_mem->num_pages, dst_iter, src_iter);
+ clear = src_iter->ops->maps_tt && (!ttm || !ttm_tt_is_populated(ttm));
+ if (!(clear && ttm && !(ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC)))
+ ttm_move_memcpy(clear, dst_mem->num_pages, dst_iter, src_iter);
if (!src_iter->ops->maps_tt)
ttm_kmap_iter_linear_io_fini(&_src_iter.io, bdev, src_mem);
@@ -190,6 +189,7 @@ static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
struct ttm_transfer_obj *fbo;
fbo = container_of(bo, struct ttm_transfer_obj, base);
+ dma_resv_fini(&fbo->base.base._resv);
ttm_bo_put(fbo->bo);
kfree(fbo);
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index f56be5bc0861..33680c94127c 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -162,9 +162,11 @@ vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
* Refuse to fault imported pages. This should be handled
* (if at all) by redirecting mmap to the exporter.
*/
- if (bo->ttm && (bo->ttm->page_flags & TTM_PAGE_FLAG_SG)) {
- dma_resv_unlock(bo->base.resv);
- return VM_FAULT_SIGBUS;
+ if (bo->ttm && (bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
+ if (!(bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE)) {
+ dma_resv_unlock(bo->base.resv);
+ return VM_FAULT_SIGBUS;
+ }
}
return 0;
@@ -346,8 +348,6 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
} else if (unlikely(!page)) {
break;
}
- page->index = drm_vma_node_start(&bo->base.vma_node) +
- page_offset;
pfn = page_to_pfn(page);
}
@@ -519,11 +519,6 @@ int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
switch (bo->resource->mem_type) {
case TTM_PL_SYSTEM:
- if (unlikely(bo->ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
- ret = ttm_tt_swapin(bo->ttm);
- if (unlikely(ret != 0))
- return ret;
- }
fallthrough;
case TTM_PL_TT:
ret = ttm_bo_vm_access_kmap(bo, offset, buf, len, write);
diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c
index 2df59b3c2ea1..be24bb6cefd0 100644
--- a/drivers/gpu/drm/ttm/ttm_device.c
+++ b/drivers/gpu/drm/ttm/ttm_device.c
@@ -220,6 +220,7 @@ int ttm_device_init(struct ttm_device *bdev, struct ttm_device_funcs *funcs,
INIT_DELAYED_WORK(&bdev->wq, ttm_device_delayed_workqueue);
spin_lock_init(&bdev->lru_lock);
INIT_LIST_HEAD(&bdev->ddestroy);
+ INIT_LIST_HEAD(&bdev->pinned);
bdev->dev_mapping = mapping;
mutex_lock(&ttm_global_mutex);
list_add_tail(&bdev->device_list, &glob->device_list);
@@ -257,3 +258,50 @@ void ttm_device_fini(struct ttm_device *bdev)
ttm_global_release();
}
EXPORT_SYMBOL(ttm_device_fini);
+
+void ttm_device_clear_dma_mappings(struct ttm_device *bdev)
+{
+ struct ttm_resource_manager *man;
+ struct ttm_buffer_object *bo;
+ unsigned int i, j;
+
+ spin_lock(&bdev->lru_lock);
+ while (!list_empty(&bdev->pinned)) {
+ bo = list_first_entry(&bdev->pinned, struct ttm_buffer_object, lru);
+ /* Take ref against racing releases once lru_lock is unlocked */
+ if (ttm_bo_get_unless_zero(bo)) {
+ list_del_init(&bo->lru);
+ spin_unlock(&bdev->lru_lock);
+
+ if (bo->ttm)
+ ttm_tt_unpopulate(bo->bdev, bo->ttm);
+
+ ttm_bo_put(bo);
+ spin_lock(&bdev->lru_lock);
+ }
+ }
+
+ for (i = TTM_PL_SYSTEM; i < TTM_NUM_MEM_TYPES; ++i) {
+ man = ttm_manager_type(bdev, i);
+ if (!man || !man->use_tt)
+ continue;
+
+ for (j = 0; j < TTM_MAX_BO_PRIORITY; ++j) {
+ while (!list_empty(&man->lru[j])) {
+ bo = list_first_entry(&man->lru[j], struct ttm_buffer_object, lru);
+ if (ttm_bo_get_unless_zero(bo)) {
+ list_del_init(&bo->lru);
+ spin_unlock(&bdev->lru_lock);
+
+ if (bo->ttm)
+ ttm_tt_unpopulate(bo->bdev, bo->ttm);
+
+ ttm_bo_put(bo);
+ spin_lock(&bdev->lru_lock);
+ }
+ }
+ }
+ }
+ spin_unlock(&bdev->lru_lock);
+}
+EXPORT_SYMBOL(ttm_device_clear_dma_mappings);
diff --git a/drivers/gpu/drm/ttm/ttm_module.c b/drivers/gpu/drm/ttm/ttm_module.c
index 7fcdef278c74..0037eefe3239 100644
--- a/drivers/gpu/drm/ttm/ttm_module.c
+++ b/drivers/gpu/drm/ttm/ttm_module.c
@@ -40,6 +40,18 @@
#include "ttm_module.h"
/**
+ * DOC: TTM
+ *
+ * TTM is a memory manager for accelerator devices with dedicated memory.
+ *
+ * The basic idea is that resources are grouped together in buffer objects of
+ * certain size and TTM handles lifetime, movement and CPU mappings of those
+ * objects.
+ *
+ * TODO: Add more design background and information here.
+ */
+
+/**
* ttm_prot_from_caching - Modify the page protection according to the
* ttm cacing mode
* @caching: The ttm caching mode
diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
index 82cbb29a05aa..1bba0a0ed3f9 100644
--- a/drivers/gpu/drm/ttm/ttm_pool.c
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -70,7 +70,7 @@ static struct ttm_pool_type global_uncached[MAX_ORDER];
static struct ttm_pool_type global_dma32_write_combined[MAX_ORDER];
static struct ttm_pool_type global_dma32_uncached[MAX_ORDER];
-static struct mutex shrinker_lock;
+static spinlock_t shrinker_lock;
static struct list_head shrinker_list;
static struct shrinker mm_shrinker;
@@ -263,9 +263,9 @@ static void ttm_pool_type_init(struct ttm_pool_type *pt, struct ttm_pool *pool,
spin_lock_init(&pt->lock);
INIT_LIST_HEAD(&pt->pages);
- mutex_lock(&shrinker_lock);
+ spin_lock(&shrinker_lock);
list_add_tail(&pt->shrinker_list, &shrinker_list);
- mutex_unlock(&shrinker_lock);
+ spin_unlock(&shrinker_lock);
}
/* Remove a pool_type from the global shrinker list and free all pages */
@@ -273,9 +273,9 @@ static void ttm_pool_type_fini(struct ttm_pool_type *pt)
{
struct page *p;
- mutex_lock(&shrinker_lock);
+ spin_lock(&shrinker_lock);
list_del(&pt->shrinker_list);
- mutex_unlock(&shrinker_lock);
+ spin_unlock(&shrinker_lock);
while ((p = ttm_pool_type_take(pt)))
ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
@@ -313,24 +313,23 @@ static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool,
static unsigned int ttm_pool_shrink(void)
{
struct ttm_pool_type *pt;
- unsigned int num_freed;
+ unsigned int num_pages;
struct page *p;
- mutex_lock(&shrinker_lock);
+ spin_lock(&shrinker_lock);
pt = list_first_entry(&shrinker_list, typeof(*pt), shrinker_list);
+ list_move_tail(&pt->shrinker_list, &shrinker_list);
+ spin_unlock(&shrinker_lock);
p = ttm_pool_type_take(pt);
if (p) {
ttm_pool_free_page(pt->pool, pt->caching, pt->order, p);
- num_freed = 1 << pt->order;
+ num_pages = 1 << pt->order;
} else {
- num_freed = 0;
+ num_pages = 0;
}
- list_move_tail(&pt->shrinker_list, &shrinker_list);
- mutex_unlock(&shrinker_lock);
-
- return num_freed;
+ return num_pages;
}
/* Return the allocation order based for a page */
@@ -372,7 +371,7 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
WARN_ON(!num_pages || ttm_tt_is_populated(tt));
WARN_ON(dma_addr && !pool->dev);
- if (tt->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
+ if (tt->page_flags & TTM_TT_FLAG_ZERO_ALLOC)
gfp_flags |= __GFP_ZERO;
if (ctx->gfp_retry_mayfail)
@@ -531,6 +530,11 @@ void ttm_pool_fini(struct ttm_pool *pool)
for (j = 0; j < MAX_ORDER; ++j)
ttm_pool_type_fini(&pool->caching[i].orders[j]);
}
+
+ /* We removed the pool types from the LRU, but we need to also make sure
+ * that no shrinker is concurrently freeing pages from the pool.
+ */
+ synchronize_shrinkers();
}
/* As long as pages are available make sure to release at least one */
@@ -605,7 +609,7 @@ static int ttm_pool_debugfs_globals_show(struct seq_file *m, void *data)
{
ttm_pool_debugfs_header(m);
- mutex_lock(&shrinker_lock);
+ spin_lock(&shrinker_lock);
seq_puts(m, "wc\t:");
ttm_pool_debugfs_orders(global_write_combined, m);
seq_puts(m, "uc\t:");
@@ -614,7 +618,7 @@ static int ttm_pool_debugfs_globals_show(struct seq_file *m, void *data)
ttm_pool_debugfs_orders(global_dma32_write_combined, m);
seq_puts(m, "uc 32\t:");
ttm_pool_debugfs_orders(global_dma32_uncached, m);
- mutex_unlock(&shrinker_lock);
+ spin_unlock(&shrinker_lock);
ttm_pool_debugfs_footer(m);
@@ -641,7 +645,7 @@ int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
ttm_pool_debugfs_header(m);
- mutex_lock(&shrinker_lock);
+ spin_lock(&shrinker_lock);
for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) {
seq_puts(m, "DMA ");
switch (i) {
@@ -657,7 +661,7 @@ int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m)
}
ttm_pool_debugfs_orders(pool->caching[i].orders, m);
}
- mutex_unlock(&shrinker_lock);
+ spin_unlock(&shrinker_lock);
ttm_pool_debugfs_footer(m);
return 0;
@@ -694,7 +698,7 @@ int ttm_pool_mgr_init(unsigned long num_pages)
if (!page_pool_size)
page_pool_size = num_pages;
- mutex_init(&shrinker_lock);
+ spin_lock_init(&shrinker_lock);
INIT_LIST_HEAD(&shrinker_list);
for (i = 0; i < MAX_ORDER; ++i) {
diff --git a/drivers/gpu/drm/ttm/ttm_range_manager.c b/drivers/gpu/drm/ttm/ttm_range_manager.c
index f4b08a8705b3..67d68a4a8640 100644
--- a/drivers/gpu/drm/ttm/ttm_range_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_range_manager.c
@@ -138,7 +138,7 @@ static const struct ttm_resource_manager_func ttm_range_manager_func = {
* Initialise a generic range manager for the selected memory type.
* The range manager is installed for this device in the type slot.
*/
-int ttm_range_man_init(struct ttm_device *bdev,
+int ttm_range_man_init_nocheck(struct ttm_device *bdev,
unsigned type, bool use_tt,
unsigned long p_size)
{
@@ -163,7 +163,7 @@ int ttm_range_man_init(struct ttm_device *bdev,
ttm_resource_manager_set_used(man, true);
return 0;
}
-EXPORT_SYMBOL(ttm_range_man_init);
+EXPORT_SYMBOL(ttm_range_man_init_nocheck);
/**
* ttm_range_man_fini
@@ -173,7 +173,7 @@ EXPORT_SYMBOL(ttm_range_man_init);
*
* Remove the generic range manager from a slot and tear it down.
*/
-int ttm_range_man_fini(struct ttm_device *bdev,
+int ttm_range_man_fini_nocheck(struct ttm_device *bdev,
unsigned type)
{
struct ttm_resource_manager *man = ttm_manager_type(bdev, type);
@@ -200,4 +200,4 @@ int ttm_range_man_fini(struct ttm_device *bdev,
kfree(rman);
return 0;
}
-EXPORT_SYMBOL(ttm_range_man_fini);
+EXPORT_SYMBOL(ttm_range_man_fini_nocheck);
diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
index 2431717376e7..035d71332d18 100644
--- a/drivers/gpu/drm/ttm/ttm_resource.c
+++ b/drivers/gpu/drm/ttm/ttm_resource.c
@@ -67,6 +67,55 @@ void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res)
}
EXPORT_SYMBOL(ttm_resource_free);
+static bool ttm_resource_places_compat(struct ttm_resource *res,
+ const struct ttm_place *places,
+ unsigned num_placement)
+{
+ unsigned i;
+
+ if (res->placement & TTM_PL_FLAG_TEMPORARY)
+ return false;
+
+ for (i = 0; i < num_placement; i++) {
+ const struct ttm_place *heap = &places[i];
+
+ if (res->start < heap->fpfn || (heap->lpfn &&
+ (res->start + res->num_pages) > heap->lpfn))
+ continue;
+
+ if ((res->mem_type == heap->mem_type) &&
+ (!(heap->flags & TTM_PL_FLAG_CONTIGUOUS) ||
+ (res->placement & TTM_PL_FLAG_CONTIGUOUS)))
+ return true;
+ }
+ return false;
+}
+
+/**
+ * ttm_resource_compat - check if resource is compatible with placement
+ *
+ * @res: the resource to check
+ * @placement: the placement to check against
+ *
+ * Returns true if the placement is compatible.
+ */
+bool ttm_resource_compat(struct ttm_resource *res,
+ struct ttm_placement *placement)
+{
+ if (ttm_resource_places_compat(res, placement->placement,
+ placement->num_placement))
+ return true;
+
+ if ((placement->busy_placement != placement->placement ||
+ placement->num_busy_placement > placement->num_placement) &&
+ ttm_resource_places_compat(res, placement->busy_placement,
+ placement->num_busy_placement))
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL(ttm_resource_compat);
+
/**
* ttm_resource_manager_init
*
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index d5cd8b5dc0bf..7e83c00a3f48 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -68,12 +68,12 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
switch (bo->type) {
case ttm_bo_type_device:
if (zero_alloc)
- page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
+ page_flags |= TTM_TT_FLAG_ZERO_ALLOC;
break;
case ttm_bo_type_kernel:
break;
case ttm_bo_type_sg:
- page_flags |= TTM_PAGE_FLAG_SG;
+ page_flags |= TTM_TT_FLAG_EXTERNAL;
break;
default:
pr_err("Illegal buffer object type\n");
@@ -84,6 +84,9 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc)
if (unlikely(bo->ttm == NULL))
return -ENOMEM;
+ WARN_ON(bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL_MAPPABLE &&
+ !(bo->ttm->page_flags & TTM_TT_FLAG_EXTERNAL));
+
return 0;
}
@@ -122,17 +125,6 @@ static int ttm_sg_tt_alloc_page_directory(struct ttm_tt *ttm)
return 0;
}
-void ttm_tt_destroy_common(struct ttm_device *bdev, struct ttm_tt *ttm)
-{
- ttm_tt_unpopulate(bdev, ttm);
-
- if (ttm->swap_storage)
- fput(ttm->swap_storage);
-
- ttm->swap_storage = NULL;
-}
-EXPORT_SYMBOL(ttm_tt_destroy_common);
-
void ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
{
bdev->funcs->ttm_tt_destroy(bdev, ttm);
@@ -167,6 +159,12 @@ EXPORT_SYMBOL(ttm_tt_init);
void ttm_tt_fini(struct ttm_tt *ttm)
{
+ WARN_ON(ttm->page_flags & TTM_TT_FLAG_PRIV_POPULATED);
+
+ if (ttm->swap_storage)
+ fput(ttm->swap_storage);
+ ttm->swap_storage = NULL;
+
if (ttm->pages)
kvfree(ttm->pages);
else
@@ -183,7 +181,7 @@ int ttm_sg_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo,
ttm_tt_init_fields(ttm, bo, page_flags, caching);
- if (page_flags & TTM_PAGE_FLAG_SG)
+ if (page_flags & TTM_TT_FLAG_EXTERNAL)
ret = ttm_sg_tt_alloc_page_directory(ttm);
else
ret = ttm_dma_tt_alloc_page_directory(ttm);
@@ -229,7 +227,7 @@ int ttm_tt_swapin(struct ttm_tt *ttm)
fput(swap_storage);
ttm->swap_storage = NULL;
- ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
+ ttm->page_flags &= ~TTM_TT_FLAG_SWAPPED;
return 0;
@@ -284,7 +282,7 @@ int ttm_tt_swapout(struct ttm_device *bdev, struct ttm_tt *ttm,
ttm_tt_unpopulate(bdev, ttm);
ttm->swap_storage = swap_storage;
- ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
+ ttm->page_flags |= TTM_TT_FLAG_SWAPPED;
return ttm->num_pages;
@@ -294,17 +292,6 @@ out_err:
return ret;
}
-static void ttm_tt_add_mapping(struct ttm_device *bdev, struct ttm_tt *ttm)
-{
- pgoff_t i;
-
- if (ttm->page_flags & TTM_PAGE_FLAG_SG)
- return;
-
- for (i = 0; i < ttm->num_pages; ++i)
- ttm->pages[i]->mapping = bdev->dev_mapping;
-}
-
int ttm_tt_populate(struct ttm_device *bdev,
struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
{
@@ -316,7 +303,7 @@ int ttm_tt_populate(struct ttm_device *bdev,
if (ttm_tt_is_populated(ttm))
return 0;
- if (!(ttm->page_flags & TTM_PAGE_FLAG_SG)) {
+ if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
atomic_long_add(ttm->num_pages, &ttm_pages_allocated);
if (bdev->pool.use_dma32)
atomic_long_add(ttm->num_pages,
@@ -341,9 +328,8 @@ int ttm_tt_populate(struct ttm_device *bdev,
if (ret)
goto error;
- ttm_tt_add_mapping(bdev, ttm);
- ttm->page_flags |= TTM_PAGE_FLAG_PRIV_POPULATED;
- if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
+ ttm->page_flags |= TTM_TT_FLAG_PRIV_POPULATED;
+ if (unlikely(ttm->page_flags & TTM_TT_FLAG_SWAPPED)) {
ret = ttm_tt_swapin(ttm);
if (unlikely(ret != 0)) {
ttm_tt_unpopulate(bdev, ttm);
@@ -354,7 +340,7 @@ int ttm_tt_populate(struct ttm_device *bdev,
return 0;
error:
- if (!(ttm->page_flags & TTM_PAGE_FLAG_SG)) {
+ if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
atomic_long_sub(ttm->num_pages, &ttm_pages_allocated);
if (bdev->pool.use_dma32)
atomic_long_sub(ttm->num_pages,
@@ -364,39 +350,24 @@ error:
}
EXPORT_SYMBOL(ttm_tt_populate);
-static void ttm_tt_clear_mapping(struct ttm_tt *ttm)
-{
- pgoff_t i;
- struct page **page = ttm->pages;
-
- if (ttm->page_flags & TTM_PAGE_FLAG_SG)
- return;
-
- for (i = 0; i < ttm->num_pages; ++i) {
- (*page)->mapping = NULL;
- (*page++)->index = 0;
- }
-}
-
void ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm)
{
if (!ttm_tt_is_populated(ttm))
return;
- ttm_tt_clear_mapping(ttm);
if (bdev->funcs->ttm_tt_unpopulate)
bdev->funcs->ttm_tt_unpopulate(bdev, ttm);
else
ttm_pool_free(&bdev->pool, ttm);
- if (!(ttm->page_flags & TTM_PAGE_FLAG_SG)) {
+ if (!(ttm->page_flags & TTM_TT_FLAG_EXTERNAL)) {
atomic_long_sub(ttm->num_pages, &ttm_pages_allocated);
if (bdev->pool.use_dma32)
atomic_long_sub(ttm->num_pages,
&ttm_dma32_pages_allocated);
}
- ttm->page_flags &= ~TTM_PAGE_FLAG_PRIV_POPULATED;
+ ttm->page_flags &= ~TTM_TT_FLAG_PRIV_POPULATED;
}
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/gpu/drm/udl/Kconfig b/drivers/gpu/drm/udl/Kconfig
index 1f497d8f1ae5..c744175c6992 100644
--- a/drivers/gpu/drm/udl/Kconfig
+++ b/drivers/gpu/drm/udl/Kconfig
@@ -4,6 +4,7 @@ config DRM_UDL
depends on DRM
depends on USB
depends on USB_ARCH_HAS_HCD
+ depends on MMU
select DRM_GEM_SHMEM_HELPER
select DRM_KMS_HELPER
help
diff --git a/drivers/gpu/drm/v3d/Kconfig b/drivers/gpu/drm/v3d/Kconfig
index 9a5c44606337..e973ec487484 100644
--- a/drivers/gpu/drm/v3d/Kconfig
+++ b/drivers/gpu/drm/v3d/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
config DRM_V3D
tristate "Broadcom V3D 3.x and newer"
- depends on ARCH_BCM || ARCH_BCMSTB || COMPILE_TEST
+ depends on ARCH_BCM || ARCH_BRCMSTB || COMPILE_TEST
depends on DRM
depends on COMMON_CLK
depends on MMU
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index 9403c3b36aca..bd46396a1ae0 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -83,7 +83,6 @@ static int v3d_get_param_ioctl(struct drm_device *dev, void *data,
return 0;
}
-
switch (args->param) {
case DRM_V3D_PARAM_SUPPORTS_TFU:
args->value = 1;
@@ -97,6 +96,9 @@ static int v3d_get_param_ioctl(struct drm_device *dev, void *data,
case DRM_V3D_PARAM_SUPPORTS_PERFMON:
args->value = (v3d->ver >= 40);
return 0;
+ case DRM_V3D_PARAM_SUPPORTS_MULTISYNC_EXT:
+ args->value = 1;
+ return 0;
default:
DRM_DEBUG("Unknown parameter %d\n", args->param);
return -EINVAL;
@@ -136,9 +138,8 @@ v3d_postclose(struct drm_device *dev, struct drm_file *file)
struct v3d_file_priv *v3d_priv = file->driver_priv;
enum v3d_queue q;
- for (q = 0; q < V3D_MAX_QUEUES; q++) {
+ for (q = 0; q < V3D_MAX_QUEUES; q++)
drm_sched_entity_destroy(&v3d_priv->sched_entity[q]);
- }
v3d_perfmon_close_file(v3d_priv);
kfree(v3d_priv);
@@ -147,7 +148,7 @@ v3d_postclose(struct drm_device *dev, struct drm_file *file)
DEFINE_DRM_GEM_FOPS(v3d_drm_fops);
/* DRM_AUTH is required on SUBMIT_CL for now, while we don't have GMP
- * protection between clients. Note that render nodes would be be
+ * protection between clients. Note that render nodes would be
* able to submit CLs that could access BOs from clients authenticated
* with the master node. The TFU doesn't use the GMP, so it would
* need to stay DRM_AUTH until we do buffer size/offset validation.
@@ -206,10 +207,7 @@ MODULE_DEVICE_TABLE(of, v3d_of_match);
static int
map_regs(struct v3d_dev *v3d, void __iomem **regs, const char *name)
{
- struct resource *res =
- platform_get_resource_byname(v3d_to_pdev(v3d), IORESOURCE_MEM, name);
-
- *regs = devm_ioremap_resource(v3d->drm.dev, res);
+ *regs = devm_platform_ioremap_resource_byname(v3d_to_pdev(v3d), name);
return PTR_ERR_OR_ZERO(*regs);
}
@@ -222,7 +220,6 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
u32 mmu_debug;
u32 ident1;
-
v3d = devm_drm_dev_alloc(dev, &v3d_drm_driver, struct v3d_dev, drm);
if (IS_ERR(v3d))
return PTR_ERR(v3d);
diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
index 270134779073..b74b1351bfc8 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.h
+++ b/drivers/gpu/drm/v3d/v3d_drv.h
@@ -19,15 +19,6 @@ struct reset_control;
#define GMP_GRANULARITY (128 * 1024)
-/* Enum for each of the V3D queues. */
-enum v3d_queue {
- V3D_BIN,
- V3D_RENDER,
- V3D_TFU,
- V3D_CSD,
- V3D_CACHE_CLEAN,
-};
-
#define V3D_MAX_QUEUES (V3D_CACHE_CLEAN + 1)
struct v3d_queue_state {
@@ -234,11 +225,6 @@ struct v3d_job {
struct drm_gem_object **bo;
u32 bo_count;
- /* Array of struct dma_fence * to block on before submitting this job.
- */
- struct xarray deps;
- unsigned long last_dep;
-
/* v3d fence to be signaled by IRQ handler when the job is complete. */
struct dma_fence *irq_fence;
@@ -299,6 +285,21 @@ struct v3d_csd_job {
struct drm_v3d_submit_csd args;
};
+struct v3d_submit_outsync {
+ struct drm_syncobj *syncobj;
+};
+
+struct v3d_submit_ext {
+ u32 flags;
+ u32 wait_stage;
+
+ u32 in_sync_count;
+ u64 in_syncs;
+
+ u32 out_sync_count;
+ struct v3d_submit_outsync *out_syncs;
+};
+
/**
* __wait_for - magic wait macro
*
@@ -379,6 +380,7 @@ int v3d_submit_csd_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+void v3d_job_cleanup(struct v3d_job *job);
void v3d_job_put(struct v3d_job *job);
void v3d_reset(struct v3d_dev *v3d);
void v3d_invalidate_caches(struct v3d_dev *v3d);
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index 5689da118197..6a000d77c568 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -197,8 +197,8 @@ v3d_clean_caches(struct v3d_dev *v3d)
V3D_CORE_WRITE(core, V3D_CTL_L2TCACTL, V3D_L2TCACTL_TMUWCF);
if (wait_for(!(V3D_CORE_READ(core, V3D_CTL_L2TCACTL) &
- V3D_L2TCACTL_L2TFLS), 100)) {
- DRM_ERROR("Timeout waiting for L1T write combiner flush\n");
+ V3D_L2TCACTL_TMUWCF), 100)) {
+ DRM_ERROR("Timeout waiting for TMU write combiner flush\n");
}
mutex_lock(&v3d->cache_clean_lock);
@@ -259,8 +259,8 @@ v3d_lock_bo_reservations(struct v3d_job *job,
return ret;
for (i = 0; i < job->bo_count; i++) {
- ret = drm_gem_fence_array_add_implicit(&job->deps,
- job->bo[i], true);
+ ret = drm_sched_job_add_implicit_dependencies(&job->base,
+ job->bo[i], true);
if (ret) {
drm_gem_unlock_reservations(job->bo, job->bo_count,
acquire_ctx);
@@ -356,8 +356,6 @@ static void
v3d_job_free(struct kref *ref)
{
struct v3d_job *job = container_of(ref, struct v3d_job, refcount);
- unsigned long index;
- struct dma_fence *fence;
int i;
for (i = 0; i < job->bo_count; i++) {
@@ -366,11 +364,6 @@ v3d_job_free(struct kref *ref)
}
kvfree(job->bo);
- xa_for_each(&job->deps, index, fence) {
- dma_fence_put(fence);
- }
- xa_destroy(&job->deps);
-
dma_fence_put(job->irq_fence);
dma_fence_put(job->done_fence);
@@ -397,6 +390,15 @@ v3d_render_job_free(struct kref *ref)
v3d_job_free(ref);
}
+void v3d_job_cleanup(struct v3d_job *job)
+{
+ if (!job)
+ return;
+
+ drm_sched_job_cleanup(&job->base);
+ v3d_job_put(job);
+}
+
void v3d_job_put(struct v3d_job *job)
{
kref_put(&job->refcount, job->free);
@@ -417,7 +419,7 @@ v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
ret = drm_gem_dma_resv_wait(file_priv, args->handle,
- true, timeout_jiffies);
+ true, timeout_jiffies);
/* Decrement the user's timeout, in case we got interrupted
* such that the ioctl will be restarted.
@@ -436,58 +438,97 @@ v3d_wait_bo_ioctl(struct drm_device *dev, void *data,
}
static int
-v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
- struct v3d_job *job, void (*free)(struct kref *ref),
- u32 in_sync)
+v3d_job_add_deps(struct drm_file *file_priv, struct v3d_job *job,
+ u32 in_sync, u32 point)
{
struct dma_fence *in_fence = NULL;
int ret;
+ ret = drm_syncobj_find_fence(file_priv, in_sync, point, 0, &in_fence);
+ if (ret == -EINVAL)
+ return ret;
+
+ return drm_sched_job_add_dependency(&job->base, in_fence);
+}
+
+static int
+v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
+ void **container, size_t size, void (*free)(struct kref *ref),
+ u32 in_sync, struct v3d_submit_ext *se, enum v3d_queue queue)
+{
+ struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
+ struct v3d_job *job;
+ bool has_multisync = se && (se->flags & DRM_V3D_EXT_ID_MULTI_SYNC);
+ int ret, i;
+
+ *container = kcalloc(1, size, GFP_KERNEL);
+ if (!*container) {
+ DRM_ERROR("Cannot allocate memory for v3d job.");
+ return -ENOMEM;
+ }
+
+ job = *container;
job->v3d = v3d;
job->free = free;
ret = pm_runtime_get_sync(v3d->drm.dev);
if (ret < 0)
- return ret;
-
- xa_init_flags(&job->deps, XA_FLAGS_ALLOC);
-
- ret = drm_syncobj_find_fence(file_priv, in_sync, 0, 0, &in_fence);
- if (ret == -EINVAL)
goto fail;
- ret = drm_gem_fence_array_add(&job->deps, in_fence);
+ ret = drm_sched_job_init(&job->base, &v3d_priv->sched_entity[queue],
+ v3d_priv);
if (ret)
- goto fail;
+ goto fail_job;
+
+ if (has_multisync) {
+ if (se->in_sync_count && se->wait_stage == queue) {
+ struct drm_v3d_sem __user *handle = u64_to_user_ptr(se->in_syncs);
+
+ for (i = 0; i < se->in_sync_count; i++) {
+ struct drm_v3d_sem in;
+
+ ret = copy_from_user(&in, handle++, sizeof(in));
+ if (ret) {
+ DRM_DEBUG("Failed to copy wait dep handle.\n");
+ goto fail_deps;
+ }
+ ret = v3d_job_add_deps(file_priv, job, in.handle, 0);
+ if (ret)
+ goto fail_deps;
+ }
+ }
+ } else {
+ ret = v3d_job_add_deps(file_priv, job, in_sync, 0);
+ if (ret)
+ goto fail_deps;
+ }
kref_init(&job->refcount);
return 0;
-fail:
- xa_destroy(&job->deps);
+
+fail_deps:
+ drm_sched_job_cleanup(&job->base);
+fail_job:
pm_runtime_put_autosuspend(v3d->drm.dev);
+fail:
+ kfree(*container);
+ *container = NULL;
+
return ret;
}
-static int
-v3d_push_job(struct v3d_file_priv *v3d_priv,
- struct v3d_job *job, enum v3d_queue queue)
+static void
+v3d_push_job(struct v3d_job *job)
{
- int ret;
-
- ret = drm_sched_job_init(&job->base, &v3d_priv->sched_entity[queue],
- v3d_priv);
- if (ret)
- return ret;
+ drm_sched_job_arm(&job->base);
job->done_fence = dma_fence_get(&job->base.s_fence->finished);
/* put by scheduler job completion */
kref_get(&job->refcount);
- drm_sched_entity_push_job(&job->base, &v3d_priv->sched_entity[queue]);
-
- return 0;
+ drm_sched_entity_push_job(&job->base);
}
static void
@@ -495,27 +536,173 @@ v3d_attach_fences_and_unlock_reservation(struct drm_file *file_priv,
struct v3d_job *job,
struct ww_acquire_ctx *acquire_ctx,
u32 out_sync,
+ struct v3d_submit_ext *se,
struct dma_fence *done_fence)
{
struct drm_syncobj *sync_out;
+ bool has_multisync = se && (se->flags & DRM_V3D_EXT_ID_MULTI_SYNC);
int i;
for (i = 0; i < job->bo_count; i++) {
/* XXX: Use shared fences for read-only objects. */
dma_resv_add_excl_fence(job->bo[i]->resv,
- job->done_fence);
+ job->done_fence);
}
drm_gem_unlock_reservations(job->bo, job->bo_count, acquire_ctx);
/* Update the return sync object for the job */
- sync_out = drm_syncobj_find(file_priv, out_sync);
- if (sync_out) {
- drm_syncobj_replace_fence(sync_out, done_fence);
- drm_syncobj_put(sync_out);
+ /* If it only supports a single signal semaphore*/
+ if (!has_multisync) {
+ sync_out = drm_syncobj_find(file_priv, out_sync);
+ if (sync_out) {
+ drm_syncobj_replace_fence(sync_out, done_fence);
+ drm_syncobj_put(sync_out);
+ }
+ return;
+ }
+
+ /* If multiple semaphores extension is supported */
+ if (se->out_sync_count) {
+ for (i = 0; i < se->out_sync_count; i++) {
+ drm_syncobj_replace_fence(se->out_syncs[i].syncobj,
+ done_fence);
+ drm_syncobj_put(se->out_syncs[i].syncobj);
+ }
+ kvfree(se->out_syncs);
}
}
+static void
+v3d_put_multisync_post_deps(struct v3d_submit_ext *se)
+{
+ unsigned int i;
+
+ if (!(se && se->out_sync_count))
+ return;
+
+ for (i = 0; i < se->out_sync_count; i++)
+ drm_syncobj_put(se->out_syncs[i].syncobj);
+ kvfree(se->out_syncs);
+}
+
+static int
+v3d_get_multisync_post_deps(struct drm_file *file_priv,
+ struct v3d_submit_ext *se,
+ u32 count, u64 handles)
+{
+ struct drm_v3d_sem __user *post_deps;
+ int i, ret;
+
+ if (!count)
+ return 0;
+
+ se->out_syncs = (struct v3d_submit_outsync *)
+ kvmalloc_array(count,
+ sizeof(struct v3d_submit_outsync),
+ GFP_KERNEL);
+ if (!se->out_syncs)
+ return -ENOMEM;
+
+ post_deps = u64_to_user_ptr(handles);
+
+ for (i = 0; i < count; i++) {
+ struct drm_v3d_sem out;
+
+ ret = copy_from_user(&out, post_deps++, sizeof(out));
+ if (ret) {
+ DRM_DEBUG("Failed to copy post dep handles\n");
+ goto fail;
+ }
+
+ se->out_syncs[i].syncobj = drm_syncobj_find(file_priv,
+ out.handle);
+ if (!se->out_syncs[i].syncobj) {
+ ret = -EINVAL;
+ goto fail;
+ }
+ }
+ se->out_sync_count = count;
+
+ return 0;
+
+fail:
+ for (i--; i >= 0; i--)
+ drm_syncobj_put(se->out_syncs[i].syncobj);
+ kvfree(se->out_syncs);
+
+ return ret;
+}
+
+/* Get data for multiple binary semaphores synchronization. Parse syncobj
+ * to be signaled when job completes (out_sync).
+ */
+static int
+v3d_get_multisync_submit_deps(struct drm_file *file_priv,
+ struct drm_v3d_extension __user *ext,
+ void *data)
+{
+ struct drm_v3d_multi_sync multisync;
+ struct v3d_submit_ext *se = data;
+ int ret;
+
+ ret = copy_from_user(&multisync, ext, sizeof(multisync));
+ if (ret)
+ return ret;
+
+ if (multisync.pad)
+ return -EINVAL;
+
+ ret = v3d_get_multisync_post_deps(file_priv, data, multisync.out_sync_count,
+ multisync.out_syncs);
+ if (ret)
+ return ret;
+
+ se->in_sync_count = multisync.in_sync_count;
+ se->in_syncs = multisync.in_syncs;
+ se->flags |= DRM_V3D_EXT_ID_MULTI_SYNC;
+ se->wait_stage = multisync.wait_stage;
+
+ return 0;
+}
+
+/* Whenever userspace sets ioctl extensions, v3d_get_extensions parses data
+ * according to the extension id (name).
+ */
+static int
+v3d_get_extensions(struct drm_file *file_priv,
+ u64 ext_handles,
+ void *data)
+{
+ struct drm_v3d_extension __user *user_ext;
+ int ret;
+
+ user_ext = u64_to_user_ptr(ext_handles);
+ while (user_ext) {
+ struct drm_v3d_extension ext;
+
+ if (copy_from_user(&ext, user_ext, sizeof(ext))) {
+ DRM_DEBUG("Failed to copy submit extension\n");
+ return -EFAULT;
+ }
+
+ switch (ext.id) {
+ case DRM_V3D_EXT_ID_MULTI_SYNC:
+ ret = v3d_get_multisync_submit_deps(file_priv, user_ext, data);
+ if (ret)
+ return ret;
+ break;
+ default:
+ DRM_DEBUG_DRIVER("Unknown extension id: %d\n", ext.id);
+ return -EINVAL;
+ }
+
+ user_ext = u64_to_user_ptr(ext.next);
+ }
+
+ return 0;
+}
+
/**
* v3d_submit_cl_ioctl() - Submits a job (frame) to the V3D.
* @dev: DRM device
@@ -535,8 +722,9 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
struct v3d_dev *v3d = to_v3d_dev(dev);
struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
struct drm_v3d_submit_cl *args = data;
+ struct v3d_submit_ext se = {0};
struct v3d_bin_job *bin = NULL;
- struct v3d_render_job *render;
+ struct v3d_render_job *render = NULL;
struct v3d_job *clean_job = NULL;
struct v3d_job *last_job;
struct ww_acquire_ctx acquire_ctx;
@@ -544,44 +732,38 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
trace_v3d_submit_cl_ioctl(&v3d->drm, args->rcl_start, args->rcl_end);
- if (args->pad != 0)
+ if (args->pad)
return -EINVAL;
- if (args->flags != 0 &&
- args->flags != DRM_V3D_SUBMIT_CL_FLUSH_CACHE) {
+ if (args->flags &&
+ args->flags & ~(DRM_V3D_SUBMIT_CL_FLUSH_CACHE |
+ DRM_V3D_SUBMIT_EXTENSION)) {
DRM_INFO("invalid flags: %d\n", args->flags);
return -EINVAL;
}
- render = kcalloc(1, sizeof(*render), GFP_KERNEL);
- if (!render)
- return -ENOMEM;
+ if (args->flags & DRM_V3D_SUBMIT_EXTENSION) {
+ ret = v3d_get_extensions(file_priv, args->extensions, &se);
+ if (ret) {
+ DRM_DEBUG("Failed to get extensions.\n");
+ return ret;
+ }
+ }
+
+ ret = v3d_job_init(v3d, file_priv, (void *)&render, sizeof(*render),
+ v3d_render_job_free, args->in_sync_rcl, &se, V3D_RENDER);
+ if (ret)
+ goto fail;
render->start = args->rcl_start;
render->end = args->rcl_end;
INIT_LIST_HEAD(&render->unref_list);
- ret = v3d_job_init(v3d, file_priv, &render->base,
- v3d_render_job_free, args->in_sync_rcl);
- if (ret) {
- kfree(render);
- return ret;
- }
-
if (args->bcl_start != args->bcl_end) {
- bin = kcalloc(1, sizeof(*bin), GFP_KERNEL);
- if (!bin) {
- v3d_job_put(&render->base);
- return -ENOMEM;
- }
-
- ret = v3d_job_init(v3d, file_priv, &bin->base,
- v3d_job_free, args->in_sync_bcl);
- if (ret) {
- v3d_job_put(&render->base);
- kfree(bin);
- return ret;
- }
+ ret = v3d_job_init(v3d, file_priv, (void *)&bin, sizeof(*bin),
+ v3d_job_free, args->in_sync_bcl, &se, V3D_BIN);
+ if (ret)
+ goto fail;
bin->start = args->bcl_start;
bin->end = args->bcl_end;
@@ -592,18 +774,10 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
}
if (args->flags & DRM_V3D_SUBMIT_CL_FLUSH_CACHE) {
- clean_job = kcalloc(1, sizeof(*clean_job), GFP_KERNEL);
- if (!clean_job) {
- ret = -ENOMEM;
- goto fail;
- }
-
- ret = v3d_job_init(v3d, file_priv, clean_job, v3d_job_free, 0);
- if (ret) {
- kfree(clean_job);
- clean_job = NULL;
+ ret = v3d_job_init(v3d, file_priv, (void *)&clean_job, sizeof(*clean_job),
+ v3d_job_free, 0, 0, V3D_CACHE_CLEAN);
+ if (ret)
goto fail;
- }
last_job = clean_job;
} else {
@@ -633,31 +807,26 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
if (bin) {
bin->base.perfmon = render->base.perfmon;
v3d_perfmon_get(bin->base.perfmon);
- ret = v3d_push_job(v3d_priv, &bin->base, V3D_BIN);
- if (ret)
- goto fail_unreserve;
+ v3d_push_job(&bin->base);
- ret = drm_gem_fence_array_add(&render->base.deps,
- dma_fence_get(bin->base.done_fence));
+ ret = drm_sched_job_add_dependency(&render->base.base,
+ dma_fence_get(bin->base.done_fence));
if (ret)
goto fail_unreserve;
}
- ret = v3d_push_job(v3d_priv, &render->base, V3D_RENDER);
- if (ret)
- goto fail_unreserve;
+ v3d_push_job(&render->base);
if (clean_job) {
struct dma_fence *render_fence =
dma_fence_get(render->base.done_fence);
- ret = drm_gem_fence_array_add(&clean_job->deps, render_fence);
+ ret = drm_sched_job_add_dependency(&clean_job->base,
+ render_fence);
if (ret)
goto fail_unreserve;
clean_job->perfmon = render->base.perfmon;
v3d_perfmon_get(clean_job->perfmon);
- ret = v3d_push_job(v3d_priv, clean_job, V3D_CACHE_CLEAN);
- if (ret)
- goto fail_unreserve;
+ v3d_push_job(clean_job);
}
mutex_unlock(&v3d->sched_lock);
@@ -666,6 +835,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
last_job,
&acquire_ctx,
args->out_sync,
+ &se,
last_job->done_fence);
if (bin)
@@ -681,11 +851,10 @@ fail_unreserve:
drm_gem_unlock_reservations(last_job->bo,
last_job->bo_count, &acquire_ctx);
fail:
- if (bin)
- v3d_job_put(&bin->base);
- v3d_job_put(&render->base);
- if (clean_job)
- v3d_job_put(clean_job);
+ v3d_job_cleanup((void *)bin);
+ v3d_job_cleanup((void *)render);
+ v3d_job_cleanup(clean_job);
+ v3d_put_multisync_post_deps(&se);
return ret;
}
@@ -704,30 +873,37 @@ v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct v3d_dev *v3d = to_v3d_dev(dev);
- struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
struct drm_v3d_submit_tfu *args = data;
- struct v3d_tfu_job *job;
+ struct v3d_submit_ext se = {0};
+ struct v3d_tfu_job *job = NULL;
struct ww_acquire_ctx acquire_ctx;
int ret = 0;
trace_v3d_submit_tfu_ioctl(&v3d->drm, args->iia);
- job = kcalloc(1, sizeof(*job), GFP_KERNEL);
- if (!job)
- return -ENOMEM;
+ if (args->flags && !(args->flags & DRM_V3D_SUBMIT_EXTENSION)) {
+ DRM_DEBUG("invalid flags: %d\n", args->flags);
+ return -EINVAL;
+ }
- ret = v3d_job_init(v3d, file_priv, &job->base,
- v3d_job_free, args->in_sync);
- if (ret) {
- kfree(job);
- return ret;
+ if (args->flags & DRM_V3D_SUBMIT_EXTENSION) {
+ ret = v3d_get_extensions(file_priv, args->extensions, &se);
+ if (ret) {
+ DRM_DEBUG("Failed to get extensions.\n");
+ return ret;
+ }
}
+ ret = v3d_job_init(v3d, file_priv, (void *)&job, sizeof(*job),
+ v3d_job_free, args->in_sync, &se, V3D_TFU);
+ if (ret)
+ goto fail;
+
job->base.bo = kcalloc(ARRAY_SIZE(args->bo_handles),
sizeof(*job->base.bo), GFP_KERNEL);
if (!job->base.bo) {
- v3d_job_put(&job->base);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto fail;
}
job->args = *args;
@@ -761,26 +937,22 @@ v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
goto fail;
mutex_lock(&v3d->sched_lock);
- ret = v3d_push_job(v3d_priv, &job->base, V3D_TFU);
- if (ret)
- goto fail_unreserve;
+ v3d_push_job(&job->base);
mutex_unlock(&v3d->sched_lock);
v3d_attach_fences_and_unlock_reservation(file_priv,
&job->base, &acquire_ctx,
args->out_sync,
+ &se,
job->base.done_fence);
v3d_job_put(&job->base);
return 0;
-fail_unreserve:
- mutex_unlock(&v3d->sched_lock);
- drm_gem_unlock_reservations(job->base.bo, job->base.bo_count,
- &acquire_ctx);
fail:
- v3d_job_put(&job->base);
+ v3d_job_cleanup((void *)job);
+ v3d_put_multisync_post_deps(&se);
return ret;
}
@@ -801,42 +973,44 @@ v3d_submit_csd_ioctl(struct drm_device *dev, void *data,
struct v3d_dev *v3d = to_v3d_dev(dev);
struct v3d_file_priv *v3d_priv = file_priv->driver_priv;
struct drm_v3d_submit_csd *args = data;
- struct v3d_csd_job *job;
- struct v3d_job *clean_job;
+ struct v3d_submit_ext se = {0};
+ struct v3d_csd_job *job = NULL;
+ struct v3d_job *clean_job = NULL;
struct ww_acquire_ctx acquire_ctx;
int ret;
trace_v3d_submit_csd_ioctl(&v3d->drm, args->cfg[5], args->cfg[6]);
+ if (args->pad)
+ return -EINVAL;
+
if (!v3d_has_csd(v3d)) {
DRM_DEBUG("Attempting CSD submit on non-CSD hardware\n");
return -EINVAL;
}
- job = kcalloc(1, sizeof(*job), GFP_KERNEL);
- if (!job)
- return -ENOMEM;
-
- ret = v3d_job_init(v3d, file_priv, &job->base,
- v3d_job_free, args->in_sync);
- if (ret) {
- kfree(job);
- return ret;
+ if (args->flags && !(args->flags & DRM_V3D_SUBMIT_EXTENSION)) {
+ DRM_INFO("invalid flags: %d\n", args->flags);
+ return -EINVAL;
}
- clean_job = kcalloc(1, sizeof(*clean_job), GFP_KERNEL);
- if (!clean_job) {
- v3d_job_put(&job->base);
- kfree(job);
- return -ENOMEM;
+ if (args->flags & DRM_V3D_SUBMIT_EXTENSION) {
+ ret = v3d_get_extensions(file_priv, args->extensions, &se);
+ if (ret) {
+ DRM_DEBUG("Failed to get extensions.\n");
+ return ret;
+ }
}
- ret = v3d_job_init(v3d, file_priv, clean_job, v3d_job_free, 0);
- if (ret) {
- v3d_job_put(&job->base);
- kfree(clean_job);
- return ret;
- }
+ ret = v3d_job_init(v3d, file_priv, (void *)&job, sizeof(*job),
+ v3d_job_free, args->in_sync, &se, V3D_CSD);
+ if (ret)
+ goto fail;
+
+ ret = v3d_job_init(v3d, file_priv, (void *)&clean_job, sizeof(*clean_job),
+ v3d_job_free, 0, 0, V3D_CACHE_CLEAN);
+ if (ret)
+ goto fail;
job->args = *args;
@@ -859,24 +1033,21 @@ v3d_submit_csd_ioctl(struct drm_device *dev, void *data,
}
mutex_lock(&v3d->sched_lock);
- ret = v3d_push_job(v3d_priv, &job->base, V3D_CSD);
- if (ret)
- goto fail_unreserve;
+ v3d_push_job(&job->base);
- ret = drm_gem_fence_array_add(&clean_job->deps,
- dma_fence_get(job->base.done_fence));
+ ret = drm_sched_job_add_dependency(&clean_job->base,
+ dma_fence_get(job->base.done_fence));
if (ret)
goto fail_unreserve;
- ret = v3d_push_job(v3d_priv, clean_job, V3D_CACHE_CLEAN);
- if (ret)
- goto fail_unreserve;
+ v3d_push_job(clean_job);
mutex_unlock(&v3d->sched_lock);
v3d_attach_fences_and_unlock_reservation(file_priv,
clean_job,
&acquire_ctx,
args->out_sync,
+ &se,
clean_job->done_fence);
v3d_job_put(&job->base);
@@ -889,8 +1060,9 @@ fail_unreserve:
drm_gem_unlock_reservations(clean_job->bo, clean_job->bo_count,
&acquire_ctx);
fail:
- v3d_job_put(&job->base);
- v3d_job_put(clean_job);
+ v3d_job_cleanup((void *)job);
+ v3d_job_cleanup(clean_job);
+ v3d_put_multisync_post_deps(&se);
return ret;
}
@@ -924,8 +1096,7 @@ v3d_gem_init(struct drm_device *dev)
if (!v3d->pt) {
drm_mm_takedown(&v3d->mm);
dev_err(v3d->drm.dev,
- "Failed to allocate page tables. "
- "Please ensure you have CMA enabled.\n");
+ "Failed to allocate page tables. Please ensure you have CMA enabled.\n");
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
index dd7fcc36d726..e0cb7d0697a7 100644
--- a/drivers/gpu/drm/v3d/v3d_sched.c
+++ b/drivers/gpu/drm/v3d/v3d_sched.c
@@ -13,7 +13,7 @@
* jobs when bulk background jobs are queued up, we submit a new job
* to the HW only when it has completed the last one, instead of
* filling up the CT[01]Q FIFOs with jobs. Similarly, we use
- * v3d_job_dependency() to manage the dependency between bin and
+ * drm_sched_job_add_dependency() to manage the dependency between bin and
* render, instead of having the clients submit jobs using the HW's
* semaphores to interlock between them.
*/
@@ -55,12 +55,11 @@ to_csd_job(struct drm_sched_job *sched_job)
}
static void
-v3d_job_free(struct drm_sched_job *sched_job)
+v3d_sched_job_free(struct drm_sched_job *sched_job)
{
struct v3d_job *job = to_v3d_job(sched_job);
- drm_sched_job_cleanup(sched_job);
- v3d_job_put(job);
+ v3d_job_cleanup(job);
}
static void
@@ -73,28 +72,6 @@ v3d_switch_perfmon(struct v3d_dev *v3d, struct v3d_job *job)
v3d_perfmon_start(v3d, job->perfmon);
}
-/*
- * Returns the fences that the job depends on, one by one.
- *
- * If placed in the scheduler's .dependency method, the corresponding
- * .run_job won't be called until all of them have been signaled.
- */
-static struct dma_fence *
-v3d_job_dependency(struct drm_sched_job *sched_job,
- struct drm_sched_entity *s_entity)
-{
- struct v3d_job *job = to_v3d_job(sched_job);
-
- /* XXX: Wait on a fence for switching the GMP if necessary,
- * and then do so.
- */
-
- if (!xa_empty(&job->deps))
- return xa_erase(&job->deps, job->last_dep++);
-
- return NULL;
-}
-
static struct dma_fence *v3d_bin_job_run(struct drm_sched_job *sched_job)
{
struct v3d_bin_job *job = to_bin_job(sched_job);
@@ -373,38 +350,33 @@ v3d_csd_job_timedout(struct drm_sched_job *sched_job)
}
static const struct drm_sched_backend_ops v3d_bin_sched_ops = {
- .dependency = v3d_job_dependency,
.run_job = v3d_bin_job_run,
.timedout_job = v3d_bin_job_timedout,
- .free_job = v3d_job_free,
+ .free_job = v3d_sched_job_free,
};
static const struct drm_sched_backend_ops v3d_render_sched_ops = {
- .dependency = v3d_job_dependency,
.run_job = v3d_render_job_run,
.timedout_job = v3d_render_job_timedout,
- .free_job = v3d_job_free,
+ .free_job = v3d_sched_job_free,
};
static const struct drm_sched_backend_ops v3d_tfu_sched_ops = {
- .dependency = v3d_job_dependency,
.run_job = v3d_tfu_job_run,
.timedout_job = v3d_generic_job_timedout,
- .free_job = v3d_job_free,
+ .free_job = v3d_sched_job_free,
};
static const struct drm_sched_backend_ops v3d_csd_sched_ops = {
- .dependency = v3d_job_dependency,
.run_job = v3d_csd_job_run,
.timedout_job = v3d_csd_job_timedout,
- .free_job = v3d_job_free
+ .free_job = v3d_sched_job_free
};
static const struct drm_sched_backend_ops v3d_cache_clean_sched_ops = {
- .dependency = v3d_job_dependency,
.run_job = v3d_cache_clean_job_run,
.timedout_job = v3d_generic_job_timedout,
- .free_job = v3d_job_free
+ .free_job = v3d_sched_job_free
};
int
diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.c b/drivers/gpu/drm/vboxvideo/vbox_drv.c
index 2b81cb259d23..a6c81af37345 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_drv.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_drv.c
@@ -69,7 +69,7 @@ static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ret = vbox_mode_init(vbox);
if (ret)
- goto err_mm_fini;
+ goto err_hw_fini;
ret = vbox_irq_init(vbox);
if (ret)
@@ -87,8 +87,6 @@ err_irq_fini:
vbox_irq_fini(vbox);
err_mode_fini:
vbox_mode_fini(vbox);
-err_mm_fini:
- vbox_mm_fini(vbox);
err_hw_fini:
vbox_hw_fini(vbox);
return ret;
@@ -101,7 +99,6 @@ static void vbox_pci_remove(struct pci_dev *pdev)
drm_dev_unregister(&vbox->ddev);
vbox_irq_fini(vbox);
vbox_mode_fini(vbox);
- vbox_mm_fini(vbox);
vbox_hw_fini(vbox);
}
diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.h b/drivers/gpu/drm/vboxvideo/vbox_drv.h
index 4903b91d7fe4..e77bd6512eb1 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_drv.h
+++ b/drivers/gpu/drm/vboxvideo/vbox_drv.h
@@ -139,7 +139,6 @@ void vbox_mode_fini(struct vbox_private *vbox);
void vbox_report_caps(struct vbox_private *vbox);
int vbox_mm_init(struct vbox_private *vbox);
-void vbox_mm_fini(struct vbox_private *vbox);
/* vbox_irq.c */
int vbox_irq_init(struct vbox_private *vbox);
diff --git a/drivers/gpu/drm/vboxvideo/vbox_ttm.c b/drivers/gpu/drm/vboxvideo/vbox_ttm.c
index fd8a53a4d8d6..dc24c2172fd4 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_ttm.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_ttm.c
@@ -13,22 +13,21 @@
int vbox_mm_init(struct vbox_private *vbox)
{
int ret;
+ resource_size_t base, size;
struct drm_device *dev = &vbox->ddev;
struct pci_dev *pdev = to_pci_dev(dev->dev);
- ret = drmm_vram_helper_init(dev, pci_resource_start(pdev, 0),
- vbox->available_vram_size);
+ base = pci_resource_start(pdev, 0);
+ size = pci_resource_len(pdev, 0);
+
+ /* Don't fail on errors, but performance might be reduced. */
+ devm_arch_phys_wc_add(&pdev->dev, base, size);
+
+ ret = drmm_vram_helper_init(dev, base, vbox->available_vram_size);
if (ret) {
DRM_ERROR("Error initializing VRAM MM; %d\n", ret);
return ret;
}
- vbox->fb_mtrr = arch_phys_wc_add(pci_resource_start(pdev, 0),
- pci_resource_len(pdev, 0));
return 0;
}
-
-void vbox_mm_fini(struct vbox_private *vbox)
-{
- arch_phys_wc_del(vbox->fb_mtrr);
-}
diff --git a/drivers/gpu/drm/vc4/vc4_dpi.c b/drivers/gpu/drm/vc4/vc4_dpi.c
index a90f2545baee..c180eb60bee8 100644
--- a/drivers/gpu/drm/vc4/vc4_dpi.c
+++ b/drivers/gpu/drm/vc4/vc4_dpi.c
@@ -229,26 +229,19 @@ static const struct of_device_id vc4_dpi_dt_match[] = {
static int vc4_dpi_init_bridge(struct vc4_dpi *dpi)
{
struct device *dev = &dpi->pdev->dev;
- struct drm_panel *panel;
struct drm_bridge *bridge;
- int ret;
- ret = drm_of_find_panel_or_bridge(dev->of_node, 0, 0,
- &panel, &bridge);
- if (ret) {
+ bridge = devm_drm_of_get_bridge(dev, dev->of_node, 0, 0);
+ if (IS_ERR(bridge)) {
/* If nothing was connected in the DT, that's not an
* error.
*/
- if (ret == -ENODEV)
+ if (PTR_ERR(bridge) == -ENODEV)
return 0;
else
- return ret;
+ return PTR_ERR(bridge);
}
- if (panel)
- bridge = drm_panel_bridge_add_typed(panel,
- DRM_MODE_CONNECTOR_DPI);
-
return drm_bridge_attach(dpi->encoder, bridge, NULL, 0);
}
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index f6c16c5aee68..16abc3a3d601 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -50,13 +50,11 @@
#define DRIVER_PATCHLEVEL 0
/* Helper function for mapping the regs on a platform device. */
-void __iomem *vc4_ioremap_regs(struct platform_device *dev, int index)
+void __iomem *vc4_ioremap_regs(struct platform_device *pdev, int index)
{
- struct resource *res;
void __iomem *map;
- res = platform_get_resource(dev, IORESOURCE_MEM, index);
- map = devm_ioremap_resource(&dev->dev, res);
+ map = devm_platform_ioremap_resource(pdev, index);
if (IS_ERR(map))
return map;
diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
index a185027911ce..a229da58962a 100644
--- a/drivers/gpu/drm/vc4/vc4_dsi.c
+++ b/drivers/gpu/drm/vc4/vc4_dsi.c
@@ -1497,7 +1497,6 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
struct drm_device *drm = dev_get_drvdata(master);
struct vc4_dsi *dsi = dev_get_drvdata(dev);
struct vc4_dsi_encoder *vc4_dsi_encoder;
- struct drm_panel *panel;
const struct of_device_id *match;
dma_cap_mask_t dma_mask;
int ret;
@@ -1609,27 +1608,9 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
return ret;
}
- ret = drm_of_find_panel_or_bridge(dev->of_node, 0, 0,
- &panel, &dsi->bridge);
- if (ret) {
- /* If the bridge or panel pointed by dev->of_node is not
- * enabled, just return 0 here so that we don't prevent the DRM
- * dev from being registered. Of course that means the DSI
- * encoder won't be exposed, but that's not a problem since
- * nothing is connected to it.
- */
- if (ret == -ENODEV)
- return 0;
-
- return ret;
- }
-
- if (panel) {
- dsi->bridge = devm_drm_panel_bridge_add_typed(dev, panel,
- DRM_MODE_CONNECTOR_DSI);
- if (IS_ERR(dsi->bridge))
- return PTR_ERR(dsi->bridge);
- }
+ dsi->bridge = devm_drm_of_get_bridge(dev, dev->of_node, 0, 0);
+ if (IS_ERR(dsi->bridge))
+ return PTR_ERR(dsi->bridge);
/* The esc clock rate is supposed to always be 100Mhz. */
ret = clk_set_rate(dsi->escape_clock, 100 * 1000000);
@@ -1667,8 +1648,7 @@ static void vc4_dsi_unbind(struct device *dev, struct device *master,
{
struct vc4_dsi *dsi = dev_get_drvdata(dev);
- if (dsi->bridge)
- pm_runtime_disable(dev);
+ pm_runtime_disable(dev);
/*
* Restore the bridge_chain so the bridge detach procedure can happen
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index ed8a4b7f8b6e..b284623e2863 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -1556,10 +1556,11 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi)
static irqreturn_t vc4_hdmi_hpd_irq_thread(int irq, void *priv)
{
struct vc4_hdmi *vc4_hdmi = priv;
- struct drm_device *dev = vc4_hdmi->connector.dev;
+ struct drm_connector *connector = &vc4_hdmi->connector;
+ struct drm_device *dev = connector->dev;
if (dev && dev->registered)
- drm_kms_helper_hotplug_event(dev);
+ drm_connector_helper_hpd_irq_event(connector);
return IRQ_HANDLED;
}
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index bf38a7e319d1..a87eafa89e9f 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -38,6 +38,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
+#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_managed.h>
#include <drm/drm_prime.h>
@@ -50,87 +51,11 @@
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
-static const struct drm_gem_object_funcs vgem_gem_object_funcs;
-
static struct vgem_device {
struct drm_device drm;
struct platform_device *platform;
} *vgem_device;
-static void vgem_gem_free_object(struct drm_gem_object *obj)
-{
- struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj);
-
- kvfree(vgem_obj->pages);
- mutex_destroy(&vgem_obj->pages_lock);
-
- if (obj->import_attach)
- drm_prime_gem_destroy(obj, vgem_obj->table);
-
- drm_gem_object_release(obj);
- kfree(vgem_obj);
-}
-
-static vm_fault_t vgem_gem_fault(struct vm_fault *vmf)
-{
- struct vm_area_struct *vma = vmf->vma;
- struct drm_vgem_gem_object *obj = vma->vm_private_data;
- /* We don't use vmf->pgoff since that has the fake offset */
- unsigned long vaddr = vmf->address;
- vm_fault_t ret = VM_FAULT_SIGBUS;
- loff_t num_pages;
- pgoff_t page_offset;
- page_offset = (vaddr - vma->vm_start) >> PAGE_SHIFT;
-
- num_pages = DIV_ROUND_UP(obj->base.size, PAGE_SIZE);
-
- if (page_offset >= num_pages)
- return VM_FAULT_SIGBUS;
-
- mutex_lock(&obj->pages_lock);
- if (obj->pages) {
- get_page(obj->pages[page_offset]);
- vmf->page = obj->pages[page_offset];
- ret = 0;
- }
- mutex_unlock(&obj->pages_lock);
- if (ret) {
- struct page *page;
-
- page = shmem_read_mapping_page(
- file_inode(obj->base.filp)->i_mapping,
- page_offset);
- if (!IS_ERR(page)) {
- vmf->page = page;
- ret = 0;
- } else switch (PTR_ERR(page)) {
- case -ENOSPC:
- case -ENOMEM:
- ret = VM_FAULT_OOM;
- break;
- case -EBUSY:
- ret = VM_FAULT_RETRY;
- break;
- case -EFAULT:
- case -EINVAL:
- ret = VM_FAULT_SIGBUS;
- break;
- default:
- WARN_ON(PTR_ERR(page));
- ret = VM_FAULT_SIGBUS;
- break;
- }
-
- }
- return ret;
-}
-
-static const struct vm_operations_struct vgem_gem_vm_ops = {
- .fault = vgem_gem_fault,
- .open = drm_gem_vm_open,
- .close = drm_gem_vm_close,
-};
-
static int vgem_open(struct drm_device *dev, struct drm_file *file)
{
struct vgem_file *vfile;
@@ -159,266 +84,30 @@ static void vgem_postclose(struct drm_device *dev, struct drm_file *file)
kfree(vfile);
}
-static struct drm_vgem_gem_object *__vgem_gem_create(struct drm_device *dev,
- unsigned long size)
-{
- struct drm_vgem_gem_object *obj;
- int ret;
-
- obj = kzalloc(sizeof(*obj), GFP_KERNEL);
- if (!obj)
- return ERR_PTR(-ENOMEM);
-
- obj->base.funcs = &vgem_gem_object_funcs;
-
- ret = drm_gem_object_init(dev, &obj->base, roundup(size, PAGE_SIZE));
- if (ret) {
- kfree(obj);
- return ERR_PTR(ret);
- }
-
- mutex_init(&obj->pages_lock);
-
- return obj;
-}
-
-static void __vgem_gem_destroy(struct drm_vgem_gem_object *obj)
-{
- drm_gem_object_release(&obj->base);
- kfree(obj);
-}
-
-static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
- struct drm_file *file,
- unsigned int *handle,
- unsigned long size)
-{
- struct drm_vgem_gem_object *obj;
- int ret;
-
- obj = __vgem_gem_create(dev, size);
- if (IS_ERR(obj))
- return ERR_CAST(obj);
-
- ret = drm_gem_handle_create(file, &obj->base, handle);
- if (ret) {
- drm_gem_object_put(&obj->base);
- return ERR_PTR(ret);
- }
-
- return &obj->base;
-}
-
-static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
- struct drm_mode_create_dumb *args)
-{
- struct drm_gem_object *gem_object;
- u64 pitch, size;
-
- pitch = args->width * DIV_ROUND_UP(args->bpp, 8);
- size = args->height * pitch;
- if (size == 0)
- return -EINVAL;
-
- gem_object = vgem_gem_create(dev, file, &args->handle, size);
- if (IS_ERR(gem_object))
- return PTR_ERR(gem_object);
-
- args->size = gem_object->size;
- args->pitch = pitch;
-
- drm_gem_object_put(gem_object);
-
- DRM_DEBUG("Created object of size %llu\n", args->size);
-
- return 0;
-}
-
static struct drm_ioctl_desc vgem_ioctls[] = {
DRM_IOCTL_DEF_DRV(VGEM_FENCE_ATTACH, vgem_fence_attach_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(VGEM_FENCE_SIGNAL, vgem_fence_signal_ioctl, DRM_RENDER_ALLOW),
};
-static int vgem_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- unsigned long flags = vma->vm_flags;
- int ret;
-
- ret = drm_gem_mmap(filp, vma);
- if (ret)
- return ret;
-
- /* Keep the WC mmaping set by drm_gem_mmap() but our pages
- * are ordinary and not special.
- */
- vma->vm_flags = flags | VM_DONTEXPAND | VM_DONTDUMP;
- return 0;
-}
+DEFINE_DRM_GEM_FOPS(vgem_driver_fops);
-static const struct file_operations vgem_driver_fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .mmap = vgem_mmap,
- .poll = drm_poll,
- .read = drm_read,
- .unlocked_ioctl = drm_ioctl,
- .compat_ioctl = drm_compat_ioctl,
- .release = drm_release,
-};
-
-static struct page **vgem_pin_pages(struct drm_vgem_gem_object *bo)
-{
- mutex_lock(&bo->pages_lock);
- if (bo->pages_pin_count++ == 0) {
- struct page **pages;
-
- pages = drm_gem_get_pages(&bo->base);
- if (IS_ERR(pages)) {
- bo->pages_pin_count--;
- mutex_unlock(&bo->pages_lock);
- return pages;
- }
-
- bo->pages = pages;
- }
- mutex_unlock(&bo->pages_lock);
-
- return bo->pages;
-}
-
-static void vgem_unpin_pages(struct drm_vgem_gem_object *bo)
-{
- mutex_lock(&bo->pages_lock);
- if (--bo->pages_pin_count == 0) {
- drm_gem_put_pages(&bo->base, bo->pages, true, true);
- bo->pages = NULL;
- }
- mutex_unlock(&bo->pages_lock);
-}
-
-static int vgem_prime_pin(struct drm_gem_object *obj)
+static struct drm_gem_object *vgem_gem_create_object(struct drm_device *dev, size_t size)
{
- struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
- long n_pages = obj->size >> PAGE_SHIFT;
- struct page **pages;
+ struct drm_gem_shmem_object *obj;
- pages = vgem_pin_pages(bo);
- if (IS_ERR(pages))
- return PTR_ERR(pages);
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (!obj)
+ return NULL;
- /* Flush the object from the CPU cache so that importers can rely
- * on coherent indirect access via the exported dma-address.
+ /*
+ * vgem doesn't have any begin/end cpu access ioctls, therefore must use
+ * coherent memory or dma-buf sharing just wont work.
*/
- drm_clflush_pages(pages, n_pages);
-
- return 0;
-}
-
-static void vgem_prime_unpin(struct drm_gem_object *obj)
-{
- struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
-
- vgem_unpin_pages(bo);
-}
-
-static struct sg_table *vgem_prime_get_sg_table(struct drm_gem_object *obj)
-{
- struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
-
- return drm_prime_pages_to_sg(obj->dev, bo->pages, bo->base.size >> PAGE_SHIFT);
-}
-
-static struct drm_gem_object* vgem_prime_import(struct drm_device *dev,
- struct dma_buf *dma_buf)
-{
- struct vgem_device *vgem = container_of(dev, typeof(*vgem), drm);
-
- return drm_gem_prime_import_dev(dev, dma_buf, &vgem->platform->dev);
-}
-
-static struct drm_gem_object *vgem_prime_import_sg_table(struct drm_device *dev,
- struct dma_buf_attachment *attach, struct sg_table *sg)
-{
- struct drm_vgem_gem_object *obj;
- int npages;
-
- obj = __vgem_gem_create(dev, attach->dmabuf->size);
- if (IS_ERR(obj))
- return ERR_CAST(obj);
+ obj->map_wc = true;
- npages = PAGE_ALIGN(attach->dmabuf->size) / PAGE_SIZE;
-
- obj->table = sg;
- obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
- if (!obj->pages) {
- __vgem_gem_destroy(obj);
- return ERR_PTR(-ENOMEM);
- }
-
- obj->pages_pin_count++; /* perma-pinned */
- drm_prime_sg_to_page_array(obj->table, obj->pages, npages);
return &obj->base;
}
-static int vgem_prime_vmap(struct drm_gem_object *obj, struct dma_buf_map *map)
-{
- struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
- long n_pages = obj->size >> PAGE_SHIFT;
- struct page **pages;
- void *vaddr;
-
- pages = vgem_pin_pages(bo);
- if (IS_ERR(pages))
- return PTR_ERR(pages);
-
- vaddr = vmap(pages, n_pages, 0, pgprot_writecombine(PAGE_KERNEL));
- if (!vaddr)
- return -ENOMEM;
- dma_buf_map_set_vaddr(map, vaddr);
-
- return 0;
-}
-
-static void vgem_prime_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map)
-{
- struct drm_vgem_gem_object *bo = to_vgem_bo(obj);
-
- vunmap(map->vaddr);
- vgem_unpin_pages(bo);
-}
-
-static int vgem_prime_mmap(struct drm_gem_object *obj,
- struct vm_area_struct *vma)
-{
- int ret;
-
- if (obj->size < vma->vm_end - vma->vm_start)
- return -EINVAL;
-
- if (!obj->filp)
- return -ENODEV;
-
- ret = call_mmap(obj->filp, vma);
- if (ret)
- return ret;
-
- vma_set_file(vma, obj->filp);
- vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
- vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
-
- return 0;
-}
-
-static const struct drm_gem_object_funcs vgem_gem_object_funcs = {
- .free = vgem_gem_free_object,
- .pin = vgem_prime_pin,
- .unpin = vgem_prime_unpin,
- .get_sg_table = vgem_prime_get_sg_table,
- .vmap = vgem_prime_vmap,
- .vunmap = vgem_prime_vunmap,
- .vm_ops = &vgem_gem_vm_ops,
-};
-
static const struct drm_driver vgem_driver = {
.driver_features = DRIVER_GEM | DRIVER_RENDER,
.open = vgem_open,
@@ -427,13 +116,8 @@ static const struct drm_driver vgem_driver = {
.num_ioctls = ARRAY_SIZE(vgem_ioctls),
.fops = &vgem_driver_fops,
- .dumb_create = vgem_gem_dumb_create,
-
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
- .gem_prime_import = vgem_prime_import,
- .gem_prime_import_sg_table = vgem_prime_import_sg_table,
- .gem_prime_mmap = vgem_prime_mmap,
+ DRM_GEM_SHMEM_DRIVER_OPS,
+ .gem_create_object = vgem_gem_create_object,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
diff --git a/drivers/gpu/drm/virtio/virtgpu_debugfs.c b/drivers/gpu/drm/virtio/virtgpu_debugfs.c
index c2b20e0ee030..b6954e2f75e6 100644
--- a/drivers/gpu/drm/virtio/virtgpu_debugfs.c
+++ b/drivers/gpu/drm/virtio/virtgpu_debugfs.c
@@ -52,6 +52,7 @@ static int virtio_gpu_features(struct seq_file *m, void *data)
vgdev->has_resource_assign_uuid);
virtio_gpu_add_bool(m, "blob resources", vgdev->has_resource_blob);
+ virtio_gpu_add_bool(m, "context init", vgdev->has_context_init);
virtio_gpu_add_int(m, "cap sets", vgdev->num_capsets);
virtio_gpu_add_int(m, "scanouts", vgdev->num_scanouts);
if (vgdev->host_visible_region.len) {
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index ed85a7863256..749db18dcfa2 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -29,6 +29,8 @@
#include <linux/module.h>
#include <linux/console.h>
#include <linux/pci.h>
+#include <linux/poll.h>
+#include <linux/wait.h>
#include <drm/drm.h>
#include <drm/drm_aperture.h>
@@ -155,6 +157,35 @@ static void virtio_gpu_config_changed(struct virtio_device *vdev)
schedule_work(&vgdev->config_changed_work);
}
+static __poll_t virtio_gpu_poll(struct file *filp,
+ struct poll_table_struct *wait)
+{
+ struct drm_file *drm_file = filp->private_data;
+ struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv;
+ struct drm_device *dev = drm_file->minor->dev;
+ struct drm_pending_event *e = NULL;
+ __poll_t mask = 0;
+
+ if (!vfpriv->ring_idx_mask)
+ return drm_poll(filp, wait);
+
+ poll_wait(filp, &drm_file->event_wait, wait);
+
+ if (!list_empty(&drm_file->event_list)) {
+ spin_lock_irq(&dev->event_lock);
+ e = list_first_entry(&drm_file->event_list,
+ struct drm_pending_event, link);
+ drm_file->event_space += e->event->length;
+ list_del(&e->link);
+ spin_unlock_irq(&dev->event_lock);
+
+ kfree(e);
+ mask |= EPOLLIN | EPOLLRDNORM;
+ }
+
+ return mask;
+}
+
static struct virtio_device_id id_table[] = {
{ VIRTIO_ID_GPU, VIRTIO_DEV_ANY_ID },
{ 0 },
@@ -172,6 +203,7 @@ static unsigned int features[] = {
VIRTIO_GPU_F_EDID,
VIRTIO_GPU_F_RESOURCE_UUID,
VIRTIO_GPU_F_RESOURCE_BLOB,
+ VIRTIO_GPU_F_CONTEXT_INIT,
};
static struct virtio_driver virtio_gpu_driver = {
.feature_table = features,
@@ -193,7 +225,17 @@ MODULE_AUTHOR("Dave Airlie <airlied@redhat.com>");
MODULE_AUTHOR("Gerd Hoffmann <kraxel@redhat.com>");
MODULE_AUTHOR("Alon Levy");
-DEFINE_DRM_GEM_FOPS(virtio_gpu_driver_fops);
+static const struct file_operations virtio_gpu_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .compat_ioctl = drm_compat_ioctl,
+ .poll = virtio_gpu_poll,
+ .read = drm_read,
+ .llseek = noop_llseek,
+ .mmap = drm_gem_mmap
+};
static const struct drm_driver driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_RENDER | DRIVER_ATOMIC,
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index d4e610a44e12..e0265fe74aa5 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -26,6 +26,7 @@
#ifndef VIRTIO_DRV_H
#define VIRTIO_DRV_H
+#include <linux/dma-direction.h>
#include <linux/virtio.h>
#include <linux/virtio_ids.h>
#include <linux/virtio_config.h>
@@ -54,6 +55,9 @@
#define STATE_OK 1
#define STATE_ERR 2
+#define MAX_CAPSET_ID 63
+#define MAX_RINGS 64
+
struct virtio_gpu_object_params {
unsigned long size;
bool dumb;
@@ -134,9 +138,18 @@ struct virtio_gpu_fence_driver {
spinlock_t lock;
};
+#define VIRTGPU_EVENT_FENCE_SIGNALED_INTERNAL 0x10000000
+struct virtio_gpu_fence_event {
+ struct drm_pending_event base;
+ struct drm_event event;
+};
+
struct virtio_gpu_fence {
struct dma_fence f;
+ uint32_t ring_idx;
uint64_t fence_id;
+ bool emit_fence_info;
+ struct virtio_gpu_fence_event *e;
struct virtio_gpu_fence_driver *drv;
struct list_head node;
};
@@ -233,6 +246,7 @@ struct virtio_gpu_device {
bool has_resource_assign_uuid;
bool has_resource_blob;
bool has_host_visible;
+ bool has_context_init;
struct virtio_shm_region host_visible_region;
struct drm_mm host_visible_mm;
@@ -244,6 +258,7 @@ struct virtio_gpu_device {
struct virtio_gpu_drv_capset *capsets;
uint32_t num_capsets;
+ uint64_t capset_id_mask;
struct list_head cap_cache;
/* protects uuid state when exporting */
@@ -254,12 +269,16 @@ struct virtio_gpu_device {
struct virtio_gpu_fpriv {
uint32_t ctx_id;
+ uint32_t context_init;
bool context_created;
+ uint32_t num_rings;
+ uint64_t base_fence_ctx;
+ uint64_t ring_idx_mask;
struct mutex context_lock;
};
/* virtgpu_ioctl.c */
-#define DRM_VIRTIO_NUM_IOCTLS 11
+#define DRM_VIRTIO_NUM_IOCTLS 12
extern struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS];
void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file);
@@ -337,7 +356,8 @@ int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
struct virtio_gpu_drv_cap_cache **cache_p);
int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev);
void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
- uint32_t nlen, const char *name);
+ uint32_t context_init, uint32_t nlen,
+ const char *name);
void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
uint32_t id);
void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
@@ -417,8 +437,9 @@ struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev,
int index);
/* virtgpu_fence.c */
-struct virtio_gpu_fence *virtio_gpu_fence_alloc(
- struct virtio_gpu_device *vgdev);
+struct virtio_gpu_fence *virtio_gpu_fence_alloc(struct virtio_gpu_device *vgdev,
+ uint64_t base_fence_ctx,
+ uint32_t ring_idx);
void virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
struct virtio_gpu_ctrl_hdr *cmd_hdr,
struct virtio_gpu_fence *fence);
@@ -459,4 +480,11 @@ bool virtio_gpu_is_vram(struct virtio_gpu_object *bo);
int virtio_gpu_vram_create(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object_params *params,
struct virtio_gpu_object **bo_ptr);
+struct sg_table *virtio_gpu_vram_map_dma_buf(struct virtio_gpu_object *bo,
+ struct device *dev,
+ enum dma_data_direction dir);
+void virtio_gpu_vram_unmap_dma_buf(struct device *dev,
+ struct sg_table *sgt,
+ enum dma_data_direction dir);
+
#endif
diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c
index d28e25e8409b..f28357dbde35 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fence.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fence.c
@@ -71,22 +71,29 @@ static const struct dma_fence_ops virtio_gpu_fence_ops = {
.timeline_value_str = virtio_gpu_timeline_value_str,
};
-struct virtio_gpu_fence *virtio_gpu_fence_alloc(struct virtio_gpu_device *vgdev)
+struct virtio_gpu_fence *virtio_gpu_fence_alloc(struct virtio_gpu_device *vgdev,
+ uint64_t base_fence_ctx,
+ uint32_t ring_idx)
{
+ uint64_t fence_context = base_fence_ctx + ring_idx;
struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
struct virtio_gpu_fence *fence = kzalloc(sizeof(struct virtio_gpu_fence),
GFP_KERNEL);
+
if (!fence)
return fence;
fence->drv = drv;
+ fence->ring_idx = ring_idx;
+ fence->emit_fence_info = !(base_fence_ctx == drv->context);
/* This only partially initializes the fence because the seqno is
* unknown yet. The fence must not be used outside of the driver
* until virtio_gpu_fence_emit is called.
*/
- dma_fence_init(&fence->f, &virtio_gpu_fence_ops, &drv->lock, drv->context,
- 0);
+
+ dma_fence_init(&fence->f, &virtio_gpu_fence_ops, &drv->lock,
+ fence_context, 0);
return fence;
}
@@ -108,6 +115,13 @@ void virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
cmd_hdr->flags |= cpu_to_le32(VIRTIO_GPU_FLAG_FENCE);
cmd_hdr->fence_id = cpu_to_le64(fence->fence_id);
+
+ /* Only currently defined fence param. */
+ if (fence->emit_fence_info) {
+ cmd_hdr->flags |=
+ cpu_to_le32(VIRTIO_GPU_FLAG_INFO_RING_IDX);
+ cmd_hdr->ring_idx = (u8)fence->ring_idx;
+ }
}
void virtio_gpu_fence_event_process(struct virtio_gpu_device *vgdev,
@@ -138,11 +152,21 @@ void virtio_gpu_fence_event_process(struct virtio_gpu_device *vgdev,
continue;
dma_fence_signal_locked(&curr->f);
+ if (curr->e) {
+ drm_send_event(vgdev->ddev, &curr->e->base);
+ curr->e = NULL;
+ }
+
list_del(&curr->node);
dma_fence_put(&curr->f);
}
dma_fence_signal_locked(&signaled->f);
+ if (signaled->e) {
+ drm_send_event(vgdev->ddev, &signaled->e->base);
+ signaled->e = NULL;
+ }
+
list_del(&signaled->node);
dma_fence_put(&signaled->f);
break;
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index 5c1ad1596889..5618a1d5879c 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -38,20 +38,60 @@
VIRTGPU_BLOB_FLAG_USE_SHAREABLE | \
VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE)
+static int virtio_gpu_fence_event_create(struct drm_device *dev,
+ struct drm_file *file,
+ struct virtio_gpu_fence *fence,
+ uint32_t ring_idx)
+{
+ struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
+ struct virtio_gpu_fence_event *e = NULL;
+ int ret;
+
+ if (!(vfpriv->ring_idx_mask & (1 << ring_idx)))
+ return 0;
+
+ e = kzalloc(sizeof(*e), GFP_KERNEL);
+ if (!e)
+ return -ENOMEM;
+
+ e->event.type = VIRTGPU_EVENT_FENCE_SIGNALED_INTERNAL;
+ e->event.length = sizeof(e->event);
+
+ ret = drm_event_reserve_init(dev, file, &e->base, &e->event);
+ if (ret)
+ goto free;
+
+ fence->e = e;
+ return 0;
+free:
+ kfree(e);
+ return ret;
+}
+
+/* Must be called with &virtio_gpu_fpriv.struct_mutex held. */
+static void virtio_gpu_create_context_locked(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_fpriv *vfpriv)
+{
+ char dbgname[TASK_COMM_LEN];
+
+ get_task_comm(dbgname, current);
+ virtio_gpu_cmd_context_create(vgdev, vfpriv->ctx_id,
+ vfpriv->context_init, strlen(dbgname),
+ dbgname);
+
+ vfpriv->context_created = true;
+}
+
void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file)
{
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
- char dbgname[TASK_COMM_LEN];
mutex_lock(&vfpriv->context_lock);
if (vfpriv->context_created)
goto out_unlock;
- get_task_comm(dbgname, current);
- virtio_gpu_cmd_context_create(vgdev, vfpriv->ctx_id,
- strlen(dbgname), dbgname);
- vfpriv->context_created = true;
+ virtio_gpu_create_context_locked(vgdev, vfpriv);
out_unlock:
mutex_unlock(&vfpriv->context_lock);
@@ -89,6 +129,11 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
int in_fence_fd = exbuf->fence_fd;
int out_fence_fd = -1;
void *buf;
+ uint64_t fence_ctx;
+ uint32_t ring_idx;
+
+ fence_ctx = vgdev->fence_drv.context;
+ ring_idx = 0;
if (vgdev->has_virgl_3d == false)
return -ENOSYS;
@@ -96,6 +141,17 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
if ((exbuf->flags & ~VIRTGPU_EXECBUF_FLAGS))
return -EINVAL;
+ if ((exbuf->flags & VIRTGPU_EXECBUF_RING_IDX)) {
+ if (exbuf->ring_idx >= vfpriv->num_rings)
+ return -EINVAL;
+
+ if (!vfpriv->base_fence_ctx)
+ return -EINVAL;
+
+ fence_ctx = vfpriv->base_fence_ctx;
+ ring_idx = exbuf->ring_idx;
+ }
+
exbuf->fence_fd = -1;
virtio_gpu_create_context(dev, file);
@@ -163,12 +219,16 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
goto out_memdup;
}
- out_fence = virtio_gpu_fence_alloc(vgdev);
+ out_fence = virtio_gpu_fence_alloc(vgdev, fence_ctx, ring_idx);
if(!out_fence) {
ret = -ENOMEM;
goto out_unresv;
}
+ ret = virtio_gpu_fence_event_create(dev, file, out_fence, ring_idx);
+ if (ret)
+ goto out_unresv;
+
if (out_fence_fd >= 0) {
sync_file = sync_file_create(&out_fence->f);
if (!sync_file) {
@@ -226,6 +286,12 @@ static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
case VIRTGPU_PARAM_CROSS_DEVICE:
value = vgdev->has_resource_assign_uuid ? 1 : 0;
break;
+ case VIRTGPU_PARAM_CONTEXT_INIT:
+ value = vgdev->has_context_init ? 1 : 0;
+ break;
+ case VIRTGPU_PARAM_SUPPORTED_CAPSET_IDs:
+ value = vgdev->capset_id_mask;
+ break;
default:
return -EINVAL;
}
@@ -278,7 +344,7 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
if (params.size == 0)
params.size = PAGE_SIZE;
- fence = virtio_gpu_fence_alloc(vgdev);
+ fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context, 0);
if (!fence)
return -ENOMEM;
ret = virtio_gpu_object_create(vgdev, &params, &qobj, fence);
@@ -357,7 +423,7 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
if (ret != 0)
goto err_put_free;
- fence = virtio_gpu_fence_alloc(vgdev);
+ fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context, 0);
if (!fence) {
ret = -ENOMEM;
goto err_unlock;
@@ -417,7 +483,8 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
goto err_put_free;
ret = -ENOMEM;
- fence = virtio_gpu_fence_alloc(vgdev);
+ fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context,
+ 0);
if (!fence)
goto err_unlock;
@@ -662,6 +729,113 @@ static int virtio_gpu_resource_create_blob_ioctl(struct drm_device *dev,
return 0;
}
+static int virtio_gpu_context_init_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file)
+{
+ int ret = 0;
+ uint32_t num_params, i, param, value;
+ uint64_t valid_ring_mask;
+ size_t len;
+ struct drm_virtgpu_context_set_param *ctx_set_params = NULL;
+ struct virtio_gpu_device *vgdev = dev->dev_private;
+ struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
+ struct drm_virtgpu_context_init *args = data;
+
+ num_params = args->num_params;
+ len = num_params * sizeof(struct drm_virtgpu_context_set_param);
+
+ if (!vgdev->has_context_init || !vgdev->has_virgl_3d)
+ return -EINVAL;
+
+ /* Number of unique parameters supported at this time. */
+ if (num_params > 3)
+ return -EINVAL;
+
+ ctx_set_params = memdup_user(u64_to_user_ptr(args->ctx_set_params),
+ len);
+
+ if (IS_ERR(ctx_set_params))
+ return PTR_ERR(ctx_set_params);
+
+ mutex_lock(&vfpriv->context_lock);
+ if (vfpriv->context_created) {
+ ret = -EEXIST;
+ goto out_unlock;
+ }
+
+ for (i = 0; i < num_params; i++) {
+ param = ctx_set_params[i].param;
+ value = ctx_set_params[i].value;
+
+ switch (param) {
+ case VIRTGPU_CONTEXT_PARAM_CAPSET_ID:
+ if (value > MAX_CAPSET_ID) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ if ((vgdev->capset_id_mask & (1 << value)) == 0) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ /* Context capset ID already set */
+ if (vfpriv->context_init &
+ VIRTIO_GPU_CONTEXT_INIT_CAPSET_ID_MASK) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ vfpriv->context_init |= value;
+ break;
+ case VIRTGPU_CONTEXT_PARAM_NUM_RINGS:
+ if (vfpriv->base_fence_ctx) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ if (value > MAX_RINGS) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ vfpriv->base_fence_ctx = dma_fence_context_alloc(value);
+ vfpriv->num_rings = value;
+ break;
+ case VIRTGPU_CONTEXT_PARAM_POLL_RINGS_MASK:
+ if (vfpriv->ring_idx_mask) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ vfpriv->ring_idx_mask = value;
+ break;
+ default:
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ }
+
+ if (vfpriv->ring_idx_mask) {
+ valid_ring_mask = 0;
+ for (i = 0; i < vfpriv->num_rings; i++)
+ valid_ring_mask |= 1 << i;
+
+ if (~valid_ring_mask & vfpriv->ring_idx_mask) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+ }
+
+ virtio_gpu_create_context_locked(vgdev, vfpriv);
+ virtio_gpu_notify(vgdev);
+
+out_unlock:
+ mutex_unlock(&vfpriv->context_lock);
+ kfree(ctx_set_params);
+ return ret;
+}
+
struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {
DRM_IOCTL_DEF_DRV(VIRTGPU_MAP, virtio_gpu_map_ioctl,
DRM_RENDER_ALLOW),
@@ -698,4 +872,7 @@ struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = {
DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE_BLOB,
virtio_gpu_resource_create_blob_ioctl,
DRM_RENDER_ALLOW),
+
+ DRM_IOCTL_DEF_DRV(VIRTGPU_CONTEXT_INIT, virtio_gpu_context_init_ioctl,
+ DRM_RENDER_ALLOW),
};
diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
index f3379059f324..21f410901694 100644
--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
+++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
@@ -65,6 +65,7 @@ static void virtio_gpu_get_capsets(struct virtio_gpu_device *vgdev,
int num_capsets)
{
int i, ret;
+ bool invalid_capset_id = false;
vgdev->capsets = kcalloc(num_capsets,
sizeof(struct virtio_gpu_drv_capset),
@@ -78,19 +79,34 @@ static void virtio_gpu_get_capsets(struct virtio_gpu_device *vgdev,
virtio_gpu_notify(vgdev);
ret = wait_event_timeout(vgdev->resp_wq,
vgdev->capsets[i].id > 0, 5 * HZ);
- if (ret == 0) {
+ /*
+ * Capability ids are defined in the virtio-gpu spec and are
+ * between 1 to 63, inclusive.
+ */
+ if (!vgdev->capsets[i].id ||
+ vgdev->capsets[i].id > MAX_CAPSET_ID)
+ invalid_capset_id = true;
+
+ if (ret == 0)
DRM_ERROR("timed out waiting for cap set %d\n", i);
+ else if (invalid_capset_id)
+ DRM_ERROR("invalid capset id %u", vgdev->capsets[i].id);
+
+ if (ret == 0 || invalid_capset_id) {
spin_lock(&vgdev->display_info_lock);
kfree(vgdev->capsets);
vgdev->capsets = NULL;
spin_unlock(&vgdev->display_info_lock);
return;
}
+
+ vgdev->capset_id_mask |= 1 << vgdev->capsets[i].id;
DRM_INFO("cap set %d: id %d, max-version %d, max-size %d\n",
i, vgdev->capsets[i].id,
vgdev->capsets[i].max_version,
vgdev->capsets[i].max_size);
}
+
vgdev->num_capsets = num_capsets;
}
@@ -175,13 +191,19 @@ int virtio_gpu_init(struct drm_device *dev)
(unsigned long)vgdev->host_visible_region.addr,
(unsigned long)vgdev->host_visible_region.len);
}
+ if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_CONTEXT_INIT)) {
+ vgdev->has_context_init = true;
+ }
- DRM_INFO("features: %cvirgl %cedid %cresource_blob %chost_visible\n",
+ DRM_INFO("features: %cvirgl %cedid %cresource_blob %chost_visible",
vgdev->has_virgl_3d ? '+' : '-',
vgdev->has_edid ? '+' : '-',
vgdev->has_resource_blob ? '+' : '-',
vgdev->has_host_visible ? '+' : '-');
+ DRM_INFO("features: %ccontext_init\n",
+ vgdev->has_context_init ? '+' : '-');
+
ret = virtio_find_vqs(vgdev->vdev, 2, vqs, callbacks, names, NULL);
if (ret) {
DRM_ERROR("failed to find virt queues\n");
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index a49fd9480381..6d3cc9e238a4 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -256,7 +256,8 @@ static int virtio_gpu_plane_prepare_fb(struct drm_plane *plane,
return 0;
if (bo->dumb && (plane->state->fb != new_state->fb)) {
- vgfb->fence = virtio_gpu_fence_alloc(vgdev);
+ vgfb->fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context,
+ 0);
if (!vgfb->fence)
return -ENOMEM;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c
index e45dbf14b307..55d80b77d9b0 100644
--- a/drivers/gpu/drm/virtio/virtgpu_prime.c
+++ b/drivers/gpu/drm/virtio/virtgpu_prime.c
@@ -43,13 +43,41 @@ static int virtgpu_virtio_get_uuid(struct dma_buf *buf,
return 0;
}
+static struct sg_table *
+virtgpu_gem_map_dma_buf(struct dma_buf_attachment *attach,
+ enum dma_data_direction dir)
+{
+ struct drm_gem_object *obj = attach->dmabuf->priv;
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
+
+ if (virtio_gpu_is_vram(bo))
+ return virtio_gpu_vram_map_dma_buf(bo, attach->dev, dir);
+
+ return drm_gem_map_dma_buf(attach, dir);
+}
+
+static void virtgpu_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
+ struct sg_table *sgt,
+ enum dma_data_direction dir)
+{
+ struct drm_gem_object *obj = attach->dmabuf->priv;
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
+
+ if (virtio_gpu_is_vram(bo)) {
+ virtio_gpu_vram_unmap_dma_buf(attach->dev, sgt, dir);
+ return;
+ }
+
+ drm_gem_unmap_dma_buf(attach, sgt, dir);
+}
+
static const struct virtio_dma_buf_ops virtgpu_dmabuf_ops = {
.ops = {
.cache_sgt_mapping = true,
.attach = virtio_dma_buf_attach,
.detach = drm_gem_map_detach,
- .map_dma_buf = drm_gem_map_dma_buf,
- .unmap_dma_buf = drm_gem_unmap_dma_buf,
+ .map_dma_buf = virtgpu_gem_map_dma_buf,
+ .unmap_dma_buf = virtgpu_gem_unmap_dma_buf,
.release = drm_gem_dmabuf_release,
.mmap = drm_gem_dmabuf_mmap,
.vmap = drm_gem_dmabuf_vmap,
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 2e71e91278b4..7c052efe8836 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -91,9 +91,7 @@ virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
{
struct virtio_gpu_vbuffer *vbuf;
- vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL);
- if (!vbuf)
- return ERR_PTR(-ENOMEM);
+ vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL | __GFP_NOFAIL);
BUG_ON(size > MAX_INLINE_CMD_SIZE ||
size < sizeof(struct virtio_gpu_ctrl_hdr));
@@ -147,10 +145,6 @@ static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size,
resp_size, resp_buf, cb);
- if (IS_ERR(vbuf)) {
- *vbuffer_p = NULL;
- return ERR_CAST(vbuf);
- }
*vbuffer_p = vbuf;
return (struct virtio_gpu_command *)vbuf->buf;
}
@@ -205,7 +199,7 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
struct list_head reclaim_list;
struct virtio_gpu_vbuffer *entry, *tmp;
struct virtio_gpu_ctrl_hdr *resp;
- u64 fence_id = 0;
+ u64 fence_id;
INIT_LIST_HEAD(&reclaim_list);
spin_lock(&vgdev->ctrlq.qlock);
@@ -232,23 +226,14 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
}
if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
- u64 f = le64_to_cpu(resp->fence_id);
-
- if (fence_id > f) {
- DRM_ERROR("%s: Oops: fence %llx -> %llx\n",
- __func__, fence_id, f);
- } else {
- fence_id = f;
- }
+ fence_id = le64_to_cpu(resp->fence_id);
+ virtio_gpu_fence_event_process(vgdev, fence_id);
}
if (entry->resp_cb)
entry->resp_cb(vgdev, entry);
}
wake_up(&vgdev->ctrlq.ack_queue);
- if (fence_id)
- virtio_gpu_fence_event_process(vgdev, fence_id);
-
list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
if (entry->objs)
virtio_gpu_array_put_free_delayed(vgdev, entry->objs);
@@ -917,7 +902,8 @@ int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev)
}
void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
- uint32_t nlen, const char *name)
+ uint32_t context_init, uint32_t nlen,
+ const char *name)
{
struct virtio_gpu_ctx_create *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
@@ -928,6 +914,7 @@ void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE);
cmd_p->hdr.ctx_id = cpu_to_le32(id);
cmd_p->nlen = cpu_to_le32(nlen);
+ cmd_p->context_init = cpu_to_le32(context_init);
strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name) - 1);
cmd_p->debug_name[sizeof(cmd_p->debug_name) - 1] = 0;
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
diff --git a/drivers/gpu/drm/virtio/virtgpu_vram.c b/drivers/gpu/drm/virtio/virtgpu_vram.c
index 5cc34e7330fa..6b45b0429fef 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vram.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vram.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
#include "virtgpu_drv.h"
+#include <linux/dma-mapping.h>
+
static void virtio_gpu_vram_free(struct drm_gem_object *obj)
{
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
@@ -64,6 +66,65 @@ static int virtio_gpu_vram_mmap(struct drm_gem_object *obj,
return ret;
}
+struct sg_table *virtio_gpu_vram_map_dma_buf(struct virtio_gpu_object *bo,
+ struct device *dev,
+ enum dma_data_direction dir)
+{
+ struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
+ struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
+ struct sg_table *sgt;
+ dma_addr_t addr;
+ int ret;
+
+ sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
+ if (!sgt)
+ return ERR_PTR(-ENOMEM);
+
+ if (!(bo->blob_flags & VIRTGPU_BLOB_FLAG_USE_MAPPABLE)) {
+ // Virtio devices can access the dma-buf via its UUID. Return a stub
+ // sg_table so the dma-buf API still works.
+ if (!is_virtio_device(dev) || !vgdev->has_resource_assign_uuid) {
+ ret = -EIO;
+ goto out;
+ }
+ return sgt;
+ }
+
+ ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
+ if (ret)
+ goto out;
+
+ addr = dma_map_resource(dev, vram->vram_node.start,
+ vram->vram_node.size, dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ ret = dma_mapping_error(dev, addr);
+ if (ret)
+ goto out;
+
+ sg_set_page(sgt->sgl, NULL, vram->vram_node.size, 0);
+ sg_dma_address(sgt->sgl) = addr;
+ sg_dma_len(sgt->sgl) = vram->vram_node.size;
+
+ return sgt;
+out:
+ sg_free_table(sgt);
+ kfree(sgt);
+ return ERR_PTR(ret);
+}
+
+void virtio_gpu_vram_unmap_dma_buf(struct device *dev,
+ struct sg_table *sgt,
+ enum dma_data_direction dir)
+{
+ if (sgt->nents) {
+ dma_unmap_resource(dev, sg_dma_address(sgt->sgl),
+ sg_dma_len(sgt->sgl), dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ }
+ sg_free_table(sgt);
+ kfree(sgt);
+}
+
static const struct drm_gem_object_funcs virtio_gpu_vram_funcs = {
.open = virtio_gpu_gem_object_open,
.close = virtio_gpu_gem_object_close,
diff --git a/drivers/gpu/drm/vmwgfx/ttm_memory.c b/drivers/gpu/drm/vmwgfx/ttm_memory.c
index edd17c30d5a5..7f7fe35fc21d 100644
--- a/drivers/gpu/drm/vmwgfx/ttm_memory.c
+++ b/drivers/gpu/drm/vmwgfx/ttm_memory.c
@@ -468,7 +468,6 @@ void ttm_mem_global_release(struct ttm_mem_global *glob)
struct ttm_mem_zone *zone;
unsigned int i;
- flush_workqueue(glob->swap_queue);
destroy_workqueue(glob->swap_queue);
glob->swap_queue = NULL;
for (i = 0; i < glob->num_zones; ++i) {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index 9e3e1429db94..fd007f1c1776 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -94,7 +94,6 @@ int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
struct ttm_operation_ctx ctx = {interruptible, false };
struct ttm_buffer_object *bo = &buf->base;
int ret;
- uint32_t new_flags;
vmw_execbuf_release_pinned_bo(dev_priv);
@@ -103,8 +102,8 @@ int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
goto err;
if (buf->base.pin_count > 0)
- ret = ttm_bo_mem_compat(placement, bo->resource,
- &new_flags) == true ? 0 : -EINVAL;
+ ret = ttm_resource_compat(bo->resource, placement)
+ ? 0 : -EINVAL;
else
ret = ttm_bo_validate(bo, placement, &ctx);
@@ -136,7 +135,6 @@ int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
struct ttm_operation_ctx ctx = {interruptible, false };
struct ttm_buffer_object *bo = &buf->base;
int ret;
- uint32_t new_flags;
vmw_execbuf_release_pinned_bo(dev_priv);
@@ -145,8 +143,8 @@ int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
goto err;
if (buf->base.pin_count > 0) {
- ret = ttm_bo_mem_compat(&vmw_vram_gmr_placement, bo->resource,
- &new_flags) == true ? 0 : -EINVAL;
+ ret = ttm_resource_compat(bo->resource, &vmw_vram_gmr_placement)
+ ? 0 : -EINVAL;
goto out_unreserve;
}
@@ -208,7 +206,6 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
struct ttm_placement placement;
struct ttm_place place;
int ret = 0;
- uint32_t new_flags;
place = vmw_vram_placement.placement[0];
place.lpfn = bo->resource->num_pages;
@@ -236,8 +233,8 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
}
if (buf->base.pin_count > 0)
- ret = ttm_bo_mem_compat(&placement, bo->resource,
- &new_flags) == true ? 0 : -EINVAL;
+ ret = ttm_resource_compat(bo->resource, &placement)
+ ? 0 : -EINVAL;
else
ret = ttm_bo_validate(bo, &placement, &ctx);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index ab9a1750e1df..bfd71c86faa5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -29,7 +29,7 @@
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/pci.h>
-#include <linux/mem_encrypt.h>
+#include <linux/cc_platform.h>
#include <drm/drm_aperture.h>
#include <drm/drm_drv.h>
@@ -666,7 +666,7 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv)
[vmw_dma_map_bind] = "Giving up DMA mappings early."};
/* TTM currently doesn't fully support SEV encryption. */
- if (mem_encrypt_active())
+ if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
return -EINVAL;
if (vmw_force_coherent)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
index e50fb82a3030..2aceac7856e2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
@@ -28,7 +28,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/mem_encrypt.h>
+#include <linux/cc_platform.h>
#include <asm/hypervisor.h>
#include <drm/drm_ioctl.h>
@@ -160,7 +160,7 @@ static unsigned long vmw_port_hb_out(struct rpc_channel *channel,
unsigned long msg_len = strlen(msg);
/* HB port can't access encrypted memory. */
- if (hb && !mem_encrypt_active()) {
+ if (hb && !cc_platform_has(CC_ATTR_MEM_ENCRYPT)) {
unsigned long bp = channel->cookie_high;
u32 channel_id = (channel->channel_id << 16);
@@ -216,7 +216,7 @@ static unsigned long vmw_port_hb_in(struct rpc_channel *channel, char *reply,
unsigned long si, di, eax, ebx, ecx, edx;
/* HB port can't access encrypted memory */
- if (hb && !mem_encrypt_active()) {
+ if (hb && !cc_platform_has(CC_ATTR_MEM_ENCRYPT)) {
unsigned long bp = channel->cookie_low;
u32 channel_id = (channel->channel_id << 16);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
index 8b8991e3ed2d..e899a936a42a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
@@ -522,14 +522,8 @@ static void vmw_ttm_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
struct vmw_ttm_tt *vmw_be =
container_of(ttm, struct vmw_ttm_tt, dma_ttm);
- vmw_ttm_unbind(bdev, ttm);
- ttm_tt_destroy_common(bdev, ttm);
vmw_ttm_unmap_dma(vmw_be);
- if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
- ttm_tt_fini(&vmw_be->dma_ttm);
- else
- ttm_tt_fini(ttm);
-
+ ttm_tt_fini(ttm);
if (vmw_be->mob)
vmw_mob_destroy(vmw_be->mob);
@@ -574,6 +568,8 @@ static void vmw_ttm_unpopulate(struct ttm_device *bdev,
dma_ttm);
unsigned int i;
+ vmw_ttm_unbind(bdev, ttm);
+
if (vmw_tt->mob) {
vmw_mob_destroy(vmw_tt->mob);
vmw_tt->mob = NULL;
diff --git a/drivers/gpu/drm/zte/Kconfig b/drivers/gpu/drm/zte/Kconfig
deleted file mode 100644
index aa8594190b50..000000000000
--- a/drivers/gpu/drm/zte/Kconfig
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-config DRM_ZTE
- tristate "DRM Support for ZTE SoCs"
- depends on DRM && ARCH_ZX
- select DRM_KMS_CMA_HELPER
- select DRM_KMS_HELPER
- select SND_SOC_HDMI_CODEC if SND_SOC
- select VIDEOMODE_HELPERS
- help
- Choose this option to enable DRM on ZTE ZX SoCs.
diff --git a/drivers/gpu/drm/zte/Makefile b/drivers/gpu/drm/zte/Makefile
deleted file mode 100644
index b6d966d849dd..000000000000
--- a/drivers/gpu/drm/zte/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0
-zxdrm-y := \
- zx_drm_drv.o \
- zx_hdmi.o \
- zx_plane.o \
- zx_tvenc.o \
- zx_vga.o \
- zx_vou.o
-
-obj-$(CONFIG_DRM_ZTE) += zxdrm.o
diff --git a/drivers/gpu/drm/zte/zx_common_regs.h b/drivers/gpu/drm/zte/zx_common_regs.h
deleted file mode 100644
index b7b996db129d..000000000000
--- a/drivers/gpu/drm/zte/zx_common_regs.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2017 Sanechips Technology Co., Ltd.
- * Copyright 2017 Linaro Ltd.
- */
-
-#ifndef __ZX_COMMON_REGS_H__
-#define __ZX_COMMON_REGS_H__
-
-/* CSC registers */
-#define CSC_CTRL0 0x30
-#define CSC_COV_MODE_SHIFT 16
-#define CSC_COV_MODE_MASK (0xffff << CSC_COV_MODE_SHIFT)
-#define CSC_BT601_IMAGE_RGB2YCBCR 0
-#define CSC_BT601_IMAGE_YCBCR2RGB 1
-#define CSC_BT601_VIDEO_RGB2YCBCR 2
-#define CSC_BT601_VIDEO_YCBCR2RGB 3
-#define CSC_BT709_IMAGE_RGB2YCBCR 4
-#define CSC_BT709_IMAGE_YCBCR2RGB 5
-#define CSC_BT709_VIDEO_RGB2YCBCR 6
-#define CSC_BT709_VIDEO_YCBCR2RGB 7
-#define CSC_BT2020_IMAGE_RGB2YCBCR 8
-#define CSC_BT2020_IMAGE_YCBCR2RGB 9
-#define CSC_BT2020_VIDEO_RGB2YCBCR 10
-#define CSC_BT2020_VIDEO_YCBCR2RGB 11
-#define CSC_WORK_ENABLE BIT(0)
-
-#endif /* __ZX_COMMON_REGS_H__ */
diff --git a/drivers/gpu/drm/zte/zx_drm_drv.c b/drivers/gpu/drm/zte/zx_drm_drv.c
deleted file mode 100644
index 064056503ebb..000000000000
--- a/drivers/gpu/drm/zte/zx_drm_drv.c
+++ /dev/null
@@ -1,184 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright 2016 Linaro Ltd.
- * Copyright 2016 ZTE Corporation.
- */
-
-#include <linux/clk.h>
-#include <linux/component.h>
-#include <linux/list.h>
-#include <linux/module.h>
-#include <linux/of_graph.h>
-#include <linux/of_platform.h>
-#include <linux/spinlock.h>
-
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_drv.h>
-#include <drm/drm_fb_cma_helper.h>
-#include <drm/drm_fb_helper.h>
-#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_gem_framebuffer_helper.h>
-#include <drm/drm_of.h>
-#include <drm/drm_probe_helper.h>
-#include <drm/drm_vblank.h>
-
-#include "zx_drm_drv.h"
-#include "zx_vou.h"
-
-static const struct drm_mode_config_funcs zx_drm_mode_config_funcs = {
- .fb_create = drm_gem_fb_create,
- .atomic_check = drm_atomic_helper_check,
- .atomic_commit = drm_atomic_helper_commit,
-};
-
-DEFINE_DRM_GEM_CMA_FOPS(zx_drm_fops);
-
-static const struct drm_driver zx_drm_driver = {
- .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
- DRM_GEM_CMA_DRIVER_OPS,
- .fops = &zx_drm_fops,
- .name = "zx-vou",
- .desc = "ZTE VOU Controller DRM",
- .date = "20160811",
- .major = 1,
- .minor = 0,
-};
-
-static int zx_drm_bind(struct device *dev)
-{
- struct drm_device *drm;
- int ret;
-
- drm = drm_dev_alloc(&zx_drm_driver, dev);
- if (IS_ERR(drm))
- return PTR_ERR(drm);
-
- dev_set_drvdata(dev, drm);
-
- drm_mode_config_init(drm);
- drm->mode_config.min_width = 16;
- drm->mode_config.min_height = 16;
- drm->mode_config.max_width = 4096;
- drm->mode_config.max_height = 4096;
- drm->mode_config.funcs = &zx_drm_mode_config_funcs;
-
- ret = component_bind_all(dev, drm);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to bind all components: %d\n", ret);
- goto out_unregister;
- }
-
- ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
- if (ret < 0) {
- DRM_DEV_ERROR(dev, "failed to init vblank: %d\n", ret);
- goto out_unbind;
- }
-
- drm_mode_config_reset(drm);
- drm_kms_helper_poll_init(drm);
-
- ret = drm_dev_register(drm, 0);
- if (ret)
- goto out_poll_fini;
-
- drm_fbdev_generic_setup(drm, 32);
-
- return 0;
-
-out_poll_fini:
- drm_kms_helper_poll_fini(drm);
- drm_mode_config_cleanup(drm);
-out_unbind:
- component_unbind_all(dev, drm);
-out_unregister:
- dev_set_drvdata(dev, NULL);
- drm_dev_put(drm);
- return ret;
-}
-
-static void zx_drm_unbind(struct device *dev)
-{
- struct drm_device *drm = dev_get_drvdata(dev);
-
- drm_dev_unregister(drm);
- drm_kms_helper_poll_fini(drm);
- drm_atomic_helper_shutdown(drm);
- drm_mode_config_cleanup(drm);
- component_unbind_all(dev, drm);
- dev_set_drvdata(dev, NULL);
- drm_dev_put(drm);
-}
-
-static const struct component_master_ops zx_drm_master_ops = {
- .bind = zx_drm_bind,
- .unbind = zx_drm_unbind,
-};
-
-static int compare_of(struct device *dev, void *data)
-{
- return dev->of_node == data;
-}
-
-static int zx_drm_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct device_node *parent = dev->of_node;
- struct device_node *child;
- struct component_match *match = NULL;
- int ret;
-
- ret = devm_of_platform_populate(dev);
- if (ret)
- return ret;
-
- for_each_available_child_of_node(parent, child)
- component_match_add(dev, &match, compare_of, child);
-
- return component_master_add_with_match(dev, &zx_drm_master_ops, match);
-}
-
-static int zx_drm_remove(struct platform_device *pdev)
-{
- component_master_del(&pdev->dev, &zx_drm_master_ops);
- return 0;
-}
-
-static const struct of_device_id zx_drm_of_match[] = {
- { .compatible = "zte,zx296718-vou", },
- { /* end */ },
-};
-MODULE_DEVICE_TABLE(of, zx_drm_of_match);
-
-static struct platform_driver zx_drm_platform_driver = {
- .probe = zx_drm_probe,
- .remove = zx_drm_remove,
- .driver = {
- .name = "zx-drm",
- .of_match_table = zx_drm_of_match,
- },
-};
-
-static struct platform_driver *drivers[] = {
- &zx_crtc_driver,
- &zx_hdmi_driver,
- &zx_tvenc_driver,
- &zx_vga_driver,
- &zx_drm_platform_driver,
-};
-
-static int zx_drm_init(void)
-{
- return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
-}
-module_init(zx_drm_init);
-
-static void zx_drm_exit(void)
-{
- platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
-}
-module_exit(zx_drm_exit);
-
-MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>");
-MODULE_DESCRIPTION("ZTE ZX VOU DRM driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/zte/zx_drm_drv.h b/drivers/gpu/drm/zte/zx_drm_drv.h
deleted file mode 100644
index 80cdaf479c74..000000000000
--- a/drivers/gpu/drm/zte/zx_drm_drv.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright 2016 Linaro Ltd.
- * Copyright 2016 ZTE Corporation.
- */
-
-#ifndef __ZX_DRM_DRV_H__
-#define __ZX_DRM_DRV_H__
-
-extern struct platform_driver zx_crtc_driver;
-extern struct platform_driver zx_hdmi_driver;
-extern struct platform_driver zx_tvenc_driver;
-extern struct platform_driver zx_vga_driver;
-
-static inline u32 zx_readl(void __iomem *reg)
-{
- return readl_relaxed(reg);
-}
-
-static inline void zx_writel(void __iomem *reg, u32 val)
-{
- writel_relaxed(val, reg);
-}
-
-static inline void zx_writel_mask(void __iomem *reg, u32 mask, u32 val)
-{
- u32 tmp;
-
- tmp = zx_readl(reg);
- tmp = (tmp & ~mask) | (val & mask);
- zx_writel(reg, tmp);
-}
-
-#endif /* __ZX_DRM_DRV_H__ */
diff --git a/drivers/gpu/drm/zte/zx_hdmi.c b/drivers/gpu/drm/zte/zx_hdmi.c
deleted file mode 100644
index cd79ca0a92a9..000000000000
--- a/drivers/gpu/drm/zte/zx_hdmi.c
+++ /dev/null
@@ -1,760 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright 2016 Linaro Ltd.
- * Copyright 2016 ZTE Corporation.
- */
-
-#include <linux/clk.h>
-#include <linux/component.h>
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/hdmi.h>
-#include <linux/irq.h>
-#include <linux/mfd/syscon.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/of_device.h>
-
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_edid.h>
-#include <drm/drm_of.h>
-#include <drm/drm_probe_helper.h>
-#include <drm/drm_print.h>
-#include <drm/drm_simple_kms_helper.h>
-
-#include <sound/hdmi-codec.h>
-
-#include "zx_hdmi_regs.h"
-#include "zx_vou.h"
-
-#define ZX_HDMI_INFOFRAME_SIZE 31
-#define DDC_SEGMENT_ADDR 0x30
-
-struct zx_hdmi_i2c {
- struct i2c_adapter adap;
- struct mutex lock;
-};
-
-struct zx_hdmi {
- struct drm_connector connector;
- struct drm_encoder encoder;
- struct zx_hdmi_i2c *ddc;
- struct device *dev;
- struct drm_device *drm;
- void __iomem *mmio;
- struct clk *cec_clk;
- struct clk *osc_clk;
- struct clk *xclk;
- bool sink_is_hdmi;
- bool sink_has_audio;
- struct platform_device *audio_pdev;
-};
-
-#define to_zx_hdmi(x) container_of(x, struct zx_hdmi, x)
-
-static inline u8 hdmi_readb(struct zx_hdmi *hdmi, u16 offset)
-{
- return readl_relaxed(hdmi->mmio + offset * 4);
-}
-
-static inline void hdmi_writeb(struct zx_hdmi *hdmi, u16 offset, u8 val)
-{
- writel_relaxed(val, hdmi->mmio + offset * 4);
-}
-
-static inline void hdmi_writeb_mask(struct zx_hdmi *hdmi, u16 offset,
- u8 mask, u8 val)
-{
- u8 tmp;
-
- tmp = hdmi_readb(hdmi, offset);
- tmp = (tmp & ~mask) | (val & mask);
- hdmi_writeb(hdmi, offset, tmp);
-}
-
-static int zx_hdmi_infoframe_trans(struct zx_hdmi *hdmi,
- union hdmi_infoframe *frame, u8 fsel)
-{
- u8 buffer[ZX_HDMI_INFOFRAME_SIZE];
- int num;
- int i;
-
- hdmi_writeb(hdmi, TPI_INFO_FSEL, fsel);
-
- num = hdmi_infoframe_pack(frame, buffer, ZX_HDMI_INFOFRAME_SIZE);
- if (num < 0) {
- DRM_DEV_ERROR(hdmi->dev, "failed to pack infoframe: %d\n", num);
- return num;
- }
-
- for (i = 0; i < num; i++)
- hdmi_writeb(hdmi, TPI_INFO_B0 + i, buffer[i]);
-
- hdmi_writeb_mask(hdmi, TPI_INFO_EN, TPI_INFO_TRANS_RPT,
- TPI_INFO_TRANS_RPT);
- hdmi_writeb_mask(hdmi, TPI_INFO_EN, TPI_INFO_TRANS_EN,
- TPI_INFO_TRANS_EN);
-
- return num;
-}
-
-static int zx_hdmi_config_video_vsi(struct zx_hdmi *hdmi,
- struct drm_display_mode *mode)
-{
- union hdmi_infoframe frame;
- int ret;
-
- ret = drm_hdmi_vendor_infoframe_from_display_mode(&frame.vendor.hdmi,
- &hdmi->connector,
- mode);
- if (ret) {
- DRM_DEV_ERROR(hdmi->dev, "failed to get vendor infoframe: %d\n",
- ret);
- return ret;
- }
-
- return zx_hdmi_infoframe_trans(hdmi, &frame, FSEL_VSIF);
-}
-
-static int zx_hdmi_config_video_avi(struct zx_hdmi *hdmi,
- struct drm_display_mode *mode)
-{
- union hdmi_infoframe frame;
- int ret;
-
- ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
- &hdmi->connector,
- mode);
- if (ret) {
- DRM_DEV_ERROR(hdmi->dev, "failed to get avi infoframe: %d\n",
- ret);
- return ret;
- }
-
- /* We always use YUV444 for HDMI output. */
- frame.avi.colorspace = HDMI_COLORSPACE_YUV444;
-
- return zx_hdmi_infoframe_trans(hdmi, &frame, FSEL_AVI);
-}
-
-static void zx_hdmi_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adj_mode)
-{
- struct zx_hdmi *hdmi = to_zx_hdmi(encoder);
-
- if (hdmi->sink_is_hdmi) {
- zx_hdmi_config_video_avi(hdmi, mode);
- zx_hdmi_config_video_vsi(hdmi, mode);
- }
-}
-
-static void zx_hdmi_phy_start(struct zx_hdmi *hdmi)
-{
- /* Copy from ZTE BSP code */
- hdmi_writeb(hdmi, 0x222, 0x0);
- hdmi_writeb(hdmi, 0x224, 0x4);
- hdmi_writeb(hdmi, 0x909, 0x0);
- hdmi_writeb(hdmi, 0x7b0, 0x90);
- hdmi_writeb(hdmi, 0x7b1, 0x00);
- hdmi_writeb(hdmi, 0x7b2, 0xa7);
- hdmi_writeb(hdmi, 0x7b8, 0xaa);
- hdmi_writeb(hdmi, 0x7b2, 0xa7);
- hdmi_writeb(hdmi, 0x7b3, 0x0f);
- hdmi_writeb(hdmi, 0x7b4, 0x0f);
- hdmi_writeb(hdmi, 0x7b5, 0x55);
- hdmi_writeb(hdmi, 0x7b7, 0x03);
- hdmi_writeb(hdmi, 0x7b9, 0x12);
- hdmi_writeb(hdmi, 0x7ba, 0x32);
- hdmi_writeb(hdmi, 0x7bc, 0x68);
- hdmi_writeb(hdmi, 0x7be, 0x40);
- hdmi_writeb(hdmi, 0x7bf, 0x84);
- hdmi_writeb(hdmi, 0x7c1, 0x0f);
- hdmi_writeb(hdmi, 0x7c8, 0x02);
- hdmi_writeb(hdmi, 0x7c9, 0x03);
- hdmi_writeb(hdmi, 0x7ca, 0x40);
- hdmi_writeb(hdmi, 0x7dc, 0x31);
- hdmi_writeb(hdmi, 0x7e2, 0x04);
- hdmi_writeb(hdmi, 0x7e0, 0x06);
- hdmi_writeb(hdmi, 0x7cb, 0x68);
- hdmi_writeb(hdmi, 0x7f9, 0x02);
- hdmi_writeb(hdmi, 0x7b6, 0x02);
- hdmi_writeb(hdmi, 0x7f3, 0x0);
-}
-
-static void zx_hdmi_hw_enable(struct zx_hdmi *hdmi)
-{
- /* Enable pclk */
- hdmi_writeb_mask(hdmi, CLKPWD, CLKPWD_PDIDCK, CLKPWD_PDIDCK);
-
- /* Enable HDMI for TX */
- hdmi_writeb_mask(hdmi, FUNC_SEL, FUNC_HDMI_EN, FUNC_HDMI_EN);
-
- /* Enable deep color packet */
- hdmi_writeb_mask(hdmi, P2T_CTRL, P2T_DC_PKT_EN, P2T_DC_PKT_EN);
-
- /* Enable HDMI/MHL mode for output */
- hdmi_writeb_mask(hdmi, TEST_TXCTRL, TEST_TXCTRL_HDMI_MODE,
- TEST_TXCTRL_HDMI_MODE);
-
- /* Configure reg_qc_sel */
- hdmi_writeb(hdmi, HDMICTL4, 0x3);
-
- /* Enable interrupt */
- hdmi_writeb_mask(hdmi, INTR1_MASK, INTR1_MONITOR_DETECT,
- INTR1_MONITOR_DETECT);
-
- /* Start up phy */
- zx_hdmi_phy_start(hdmi);
-}
-
-static void zx_hdmi_hw_disable(struct zx_hdmi *hdmi)
-{
- /* Disable interrupt */
- hdmi_writeb_mask(hdmi, INTR1_MASK, INTR1_MONITOR_DETECT, 0);
-
- /* Disable deep color packet */
- hdmi_writeb_mask(hdmi, P2T_CTRL, P2T_DC_PKT_EN, P2T_DC_PKT_EN);
-
- /* Disable HDMI for TX */
- hdmi_writeb_mask(hdmi, FUNC_SEL, FUNC_HDMI_EN, 0);
-
- /* Disable pclk */
- hdmi_writeb_mask(hdmi, CLKPWD, CLKPWD_PDIDCK, 0);
-}
-
-static void zx_hdmi_encoder_enable(struct drm_encoder *encoder)
-{
- struct zx_hdmi *hdmi = to_zx_hdmi(encoder);
-
- clk_prepare_enable(hdmi->cec_clk);
- clk_prepare_enable(hdmi->osc_clk);
- clk_prepare_enable(hdmi->xclk);
-
- zx_hdmi_hw_enable(hdmi);
-
- vou_inf_enable(VOU_HDMI, encoder->crtc);
-}
-
-static void zx_hdmi_encoder_disable(struct drm_encoder *encoder)
-{
- struct zx_hdmi *hdmi = to_zx_hdmi(encoder);
-
- vou_inf_disable(VOU_HDMI, encoder->crtc);
-
- zx_hdmi_hw_disable(hdmi);
-
- clk_disable_unprepare(hdmi->xclk);
- clk_disable_unprepare(hdmi->osc_clk);
- clk_disable_unprepare(hdmi->cec_clk);
-}
-
-static const struct drm_encoder_helper_funcs zx_hdmi_encoder_helper_funcs = {
- .enable = zx_hdmi_encoder_enable,
- .disable = zx_hdmi_encoder_disable,
- .mode_set = zx_hdmi_encoder_mode_set,
-};
-
-static int zx_hdmi_connector_get_modes(struct drm_connector *connector)
-{
- struct zx_hdmi *hdmi = to_zx_hdmi(connector);
- struct edid *edid;
- int ret;
-
- edid = drm_get_edid(connector, &hdmi->ddc->adap);
- if (!edid)
- return 0;
-
- hdmi->sink_is_hdmi = drm_detect_hdmi_monitor(edid);
- hdmi->sink_has_audio = drm_detect_monitor_audio(edid);
- drm_connector_update_edid_property(connector, edid);
- ret = drm_add_edid_modes(connector, edid);
- kfree(edid);
-
- return ret;
-}
-
-static enum drm_mode_status
-zx_hdmi_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- return MODE_OK;
-}
-
-static struct drm_connector_helper_funcs zx_hdmi_connector_helper_funcs = {
- .get_modes = zx_hdmi_connector_get_modes,
- .mode_valid = zx_hdmi_connector_mode_valid,
-};
-
-static enum drm_connector_status
-zx_hdmi_connector_detect(struct drm_connector *connector, bool force)
-{
- struct zx_hdmi *hdmi = to_zx_hdmi(connector);
-
- return (hdmi_readb(hdmi, TPI_HPD_RSEN) & TPI_HPD_CONNECTION) ?
- connector_status_connected : connector_status_disconnected;
-}
-
-static const struct drm_connector_funcs zx_hdmi_connector_funcs = {
- .fill_modes = drm_helper_probe_single_connector_modes,
- .detect = zx_hdmi_connector_detect,
- .destroy = drm_connector_cleanup,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static int zx_hdmi_register(struct drm_device *drm, struct zx_hdmi *hdmi)
-{
- struct drm_encoder *encoder = &hdmi->encoder;
-
- encoder->possible_crtcs = VOU_CRTC_MASK;
-
- drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TMDS);
- drm_encoder_helper_add(encoder, &zx_hdmi_encoder_helper_funcs);
-
- hdmi->connector.polled = DRM_CONNECTOR_POLL_HPD;
-
- drm_connector_init_with_ddc(drm, &hdmi->connector,
- &zx_hdmi_connector_funcs,
- DRM_MODE_CONNECTOR_HDMIA,
- &hdmi->ddc->adap);
- drm_connector_helper_add(&hdmi->connector,
- &zx_hdmi_connector_helper_funcs);
-
- drm_connector_attach_encoder(&hdmi->connector, encoder);
-
- return 0;
-}
-
-static irqreturn_t zx_hdmi_irq_thread(int irq, void *dev_id)
-{
- struct zx_hdmi *hdmi = dev_id;
-
- drm_helper_hpd_irq_event(hdmi->connector.dev);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t zx_hdmi_irq_handler(int irq, void *dev_id)
-{
- struct zx_hdmi *hdmi = dev_id;
- u8 lstat;
-
- lstat = hdmi_readb(hdmi, L1_INTR_STAT);
-
- /* Monitor detect/HPD interrupt */
- if (lstat & L1_INTR_STAT_INTR1) {
- u8 stat;
-
- stat = hdmi_readb(hdmi, INTR1_STAT);
- hdmi_writeb(hdmi, INTR1_STAT, stat);
-
- if (stat & INTR1_MONITOR_DETECT)
- return IRQ_WAKE_THREAD;
- }
-
- return IRQ_NONE;
-}
-
-static int zx_hdmi_audio_startup(struct device *dev, void *data)
-{
- struct zx_hdmi *hdmi = dev_get_drvdata(dev);
- struct drm_encoder *encoder = &hdmi->encoder;
-
- vou_inf_hdmi_audio_sel(encoder->crtc, VOU_HDMI_AUD_SPDIF);
-
- return 0;
-}
-
-static void zx_hdmi_audio_shutdown(struct device *dev, void *data)
-{
- struct zx_hdmi *hdmi = dev_get_drvdata(dev);
-
- /* Disable audio input */
- hdmi_writeb_mask(hdmi, AUD_EN, AUD_IN_EN, 0);
-}
-
-static inline int zx_hdmi_audio_get_n(unsigned int fs)
-{
- unsigned int n;
-
- if (fs && (fs % 44100) == 0)
- n = 6272 * (fs / 44100);
- else
- n = fs * 128 / 1000;
-
- return n;
-}
-
-static int zx_hdmi_audio_hw_params(struct device *dev,
- void *data,
- struct hdmi_codec_daifmt *daifmt,
- struct hdmi_codec_params *params)
-{
- struct zx_hdmi *hdmi = dev_get_drvdata(dev);
- struct hdmi_audio_infoframe *cea = &params->cea;
- union hdmi_infoframe frame;
- int n;
-
- /* We only support spdif for now */
- if (daifmt->fmt != HDMI_SPDIF) {
- DRM_DEV_ERROR(hdmi->dev, "invalid daifmt %d\n", daifmt->fmt);
- return -EINVAL;
- }
-
- switch (params->sample_width) {
- case 16:
- hdmi_writeb_mask(hdmi, TPI_AUD_CONFIG, SPDIF_SAMPLE_SIZE_MASK,
- SPDIF_SAMPLE_SIZE_16BIT);
- break;
- case 20:
- hdmi_writeb_mask(hdmi, TPI_AUD_CONFIG, SPDIF_SAMPLE_SIZE_MASK,
- SPDIF_SAMPLE_SIZE_20BIT);
- break;
- case 24:
- hdmi_writeb_mask(hdmi, TPI_AUD_CONFIG, SPDIF_SAMPLE_SIZE_MASK,
- SPDIF_SAMPLE_SIZE_24BIT);
- break;
- default:
- DRM_DEV_ERROR(hdmi->dev, "invalid sample width %d\n",
- params->sample_width);
- return -EINVAL;
- }
-
- /* CTS is calculated by hardware, and we only need to take care of N */
- n = zx_hdmi_audio_get_n(params->sample_rate);
- hdmi_writeb(hdmi, N_SVAL1, n & 0xff);
- hdmi_writeb(hdmi, N_SVAL2, (n >> 8) & 0xff);
- hdmi_writeb(hdmi, N_SVAL3, (n >> 16) & 0xf);
-
- /* Enable spdif mode */
- hdmi_writeb_mask(hdmi, AUD_MODE, SPDIF_EN, SPDIF_EN);
-
- /* Enable audio input */
- hdmi_writeb_mask(hdmi, AUD_EN, AUD_IN_EN, AUD_IN_EN);
-
- memcpy(&frame.audio, cea, sizeof(*cea));
-
- return zx_hdmi_infoframe_trans(hdmi, &frame, FSEL_AUDIO);
-}
-
-static int zx_hdmi_audio_mute(struct device *dev, void *data,
- bool enable, int direction)
-{
- struct zx_hdmi *hdmi = dev_get_drvdata(dev);
-
- if (enable)
- hdmi_writeb_mask(hdmi, TPI_AUD_CONFIG, TPI_AUD_MUTE,
- TPI_AUD_MUTE);
- else
- hdmi_writeb_mask(hdmi, TPI_AUD_CONFIG, TPI_AUD_MUTE, 0);
-
- return 0;
-}
-
-static int zx_hdmi_audio_get_eld(struct device *dev, void *data,
- uint8_t *buf, size_t len)
-{
- struct zx_hdmi *hdmi = dev_get_drvdata(dev);
- struct drm_connector *connector = &hdmi->connector;
-
- memcpy(buf, connector->eld, min(sizeof(connector->eld), len));
-
- return 0;
-}
-
-static const struct hdmi_codec_ops zx_hdmi_codec_ops = {
- .audio_startup = zx_hdmi_audio_startup,
- .hw_params = zx_hdmi_audio_hw_params,
- .audio_shutdown = zx_hdmi_audio_shutdown,
- .mute_stream = zx_hdmi_audio_mute,
- .get_eld = zx_hdmi_audio_get_eld,
- .no_capture_mute = 1,
-};
-
-static struct hdmi_codec_pdata zx_hdmi_codec_pdata = {
- .ops = &zx_hdmi_codec_ops,
- .spdif = 1,
-};
-
-static int zx_hdmi_audio_register(struct zx_hdmi *hdmi)
-{
- struct platform_device *pdev;
-
- pdev = platform_device_register_data(hdmi->dev, HDMI_CODEC_DRV_NAME,
- PLATFORM_DEVID_AUTO,
- &zx_hdmi_codec_pdata,
- sizeof(zx_hdmi_codec_pdata));
- if (IS_ERR(pdev))
- return PTR_ERR(pdev);
-
- hdmi->audio_pdev = pdev;
-
- return 0;
-}
-
-static int zx_hdmi_i2c_read(struct zx_hdmi *hdmi, struct i2c_msg *msg)
-{
- int len = msg->len;
- u8 *buf = msg->buf;
- int retry = 0;
- int ret = 0;
-
- /* Bits [9:8] of bytes */
- hdmi_writeb(hdmi, ZX_DDC_DIN_CNT2, (len >> 8) & 0xff);
- /* Bits [7:0] of bytes */
- hdmi_writeb(hdmi, ZX_DDC_DIN_CNT1, len & 0xff);
-
- /* Clear FIFO */
- hdmi_writeb_mask(hdmi, ZX_DDC_CMD, DDC_CMD_MASK, DDC_CMD_CLEAR_FIFO);
-
- /* Kick off the read */
- hdmi_writeb_mask(hdmi, ZX_DDC_CMD, DDC_CMD_MASK,
- DDC_CMD_SEQUENTIAL_READ);
-
- while (len > 0) {
- int cnt, i;
-
- /* FIFO needs some time to get ready */
- usleep_range(500, 1000);
-
- cnt = hdmi_readb(hdmi, ZX_DDC_DOUT_CNT) & DDC_DOUT_CNT_MASK;
- if (cnt == 0) {
- if (++retry > 5) {
- DRM_DEV_ERROR(hdmi->dev,
- "DDC FIFO read timed out!");
- return -ETIMEDOUT;
- }
- continue;
- }
-
- for (i = 0; i < cnt; i++)
- *buf++ = hdmi_readb(hdmi, ZX_DDC_DATA);
- len -= cnt;
- }
-
- return ret;
-}
-
-static int zx_hdmi_i2c_write(struct zx_hdmi *hdmi, struct i2c_msg *msg)
-{
- /*
- * The DDC I2C adapter is only for reading EDID data, so we assume
- * that the write to this adapter must be the EDID data offset.
- */
- if ((msg->len != 1) ||
- ((msg->addr != DDC_ADDR) && (msg->addr != DDC_SEGMENT_ADDR)))
- return -EINVAL;
-
- if (msg->addr == DDC_SEGMENT_ADDR)
- hdmi_writeb(hdmi, ZX_DDC_SEGM, msg->addr << 1);
- else if (msg->addr == DDC_ADDR)
- hdmi_writeb(hdmi, ZX_DDC_ADDR, msg->addr << 1);
-
- hdmi_writeb(hdmi, ZX_DDC_OFFSET, msg->buf[0]);
-
- return 0;
-}
-
-static int zx_hdmi_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
- int num)
-{
- struct zx_hdmi *hdmi = i2c_get_adapdata(adap);
- struct zx_hdmi_i2c *ddc = hdmi->ddc;
- int i, ret = 0;
-
- mutex_lock(&ddc->lock);
-
- /* Enable DDC master access */
- hdmi_writeb_mask(hdmi, TPI_DDC_MASTER_EN, HW_DDC_MASTER, HW_DDC_MASTER);
-
- for (i = 0; i < num; i++) {
- DRM_DEV_DEBUG(hdmi->dev,
- "xfer: num: %d/%d, len: %d, flags: %#x\n",
- i + 1, num, msgs[i].len, msgs[i].flags);
-
- if (msgs[i].flags & I2C_M_RD)
- ret = zx_hdmi_i2c_read(hdmi, &msgs[i]);
- else
- ret = zx_hdmi_i2c_write(hdmi, &msgs[i]);
-
- if (ret < 0)
- break;
- }
-
- if (!ret)
- ret = num;
-
- /* Disable DDC master access */
- hdmi_writeb_mask(hdmi, TPI_DDC_MASTER_EN, HW_DDC_MASTER, 0);
-
- mutex_unlock(&ddc->lock);
-
- return ret;
-}
-
-static u32 zx_hdmi_i2c_func(struct i2c_adapter *adapter)
-{
- return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
-}
-
-static const struct i2c_algorithm zx_hdmi_algorithm = {
- .master_xfer = zx_hdmi_i2c_xfer,
- .functionality = zx_hdmi_i2c_func,
-};
-
-static int zx_hdmi_ddc_register(struct zx_hdmi *hdmi)
-{
- struct i2c_adapter *adap;
- struct zx_hdmi_i2c *ddc;
- int ret;
-
- ddc = devm_kzalloc(hdmi->dev, sizeof(*ddc), GFP_KERNEL);
- if (!ddc)
- return -ENOMEM;
-
- hdmi->ddc = ddc;
- mutex_init(&ddc->lock);
-
- adap = &ddc->adap;
- adap->owner = THIS_MODULE;
- adap->class = I2C_CLASS_DDC;
- adap->dev.parent = hdmi->dev;
- adap->algo = &zx_hdmi_algorithm;
- snprintf(adap->name, sizeof(adap->name), "zx hdmi i2c");
-
- ret = i2c_add_adapter(adap);
- if (ret) {
- DRM_DEV_ERROR(hdmi->dev, "failed to add I2C adapter: %d\n",
- ret);
- return ret;
- }
-
- i2c_set_adapdata(adap, hdmi);
-
- return 0;
-}
-
-static int zx_hdmi_bind(struct device *dev, struct device *master, void *data)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct drm_device *drm = data;
- struct resource *res;
- struct zx_hdmi *hdmi;
- int irq;
- int ret;
-
- hdmi = devm_kzalloc(dev, sizeof(*hdmi), GFP_KERNEL);
- if (!hdmi)
- return -ENOMEM;
-
- hdmi->dev = dev;
- hdmi->drm = drm;
-
- dev_set_drvdata(dev, hdmi);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hdmi->mmio = devm_ioremap_resource(dev, res);
- if (IS_ERR(hdmi->mmio)) {
- ret = PTR_ERR(hdmi->mmio);
- DRM_DEV_ERROR(dev, "failed to remap hdmi region: %d\n", ret);
- return ret;
- }
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return irq;
-
- hdmi->cec_clk = devm_clk_get(hdmi->dev, "osc_cec");
- if (IS_ERR(hdmi->cec_clk)) {
- ret = PTR_ERR(hdmi->cec_clk);
- DRM_DEV_ERROR(dev, "failed to get cec_clk: %d\n", ret);
- return ret;
- }
-
- hdmi->osc_clk = devm_clk_get(hdmi->dev, "osc_clk");
- if (IS_ERR(hdmi->osc_clk)) {
- ret = PTR_ERR(hdmi->osc_clk);
- DRM_DEV_ERROR(dev, "failed to get osc_clk: %d\n", ret);
- return ret;
- }
-
- hdmi->xclk = devm_clk_get(hdmi->dev, "xclk");
- if (IS_ERR(hdmi->xclk)) {
- ret = PTR_ERR(hdmi->xclk);
- DRM_DEV_ERROR(dev, "failed to get xclk: %d\n", ret);
- return ret;
- }
-
- ret = zx_hdmi_ddc_register(hdmi);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to register ddc: %d\n", ret);
- return ret;
- }
-
- ret = zx_hdmi_audio_register(hdmi);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to register audio: %d\n", ret);
- return ret;
- }
-
- ret = zx_hdmi_register(drm, hdmi);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to register hdmi: %d\n", ret);
- return ret;
- }
-
- ret = devm_request_threaded_irq(dev, irq, zx_hdmi_irq_handler,
- zx_hdmi_irq_thread, IRQF_SHARED,
- dev_name(dev), hdmi);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to request threaded irq: %d\n", ret);
- return ret;
- }
-
- return 0;
-}
-
-static void zx_hdmi_unbind(struct device *dev, struct device *master,
- void *data)
-{
- struct zx_hdmi *hdmi = dev_get_drvdata(dev);
-
- hdmi->connector.funcs->destroy(&hdmi->connector);
- hdmi->encoder.funcs->destroy(&hdmi->encoder);
-
- if (hdmi->audio_pdev)
- platform_device_unregister(hdmi->audio_pdev);
-}
-
-static const struct component_ops zx_hdmi_component_ops = {
- .bind = zx_hdmi_bind,
- .unbind = zx_hdmi_unbind,
-};
-
-static int zx_hdmi_probe(struct platform_device *pdev)
-{
- return component_add(&pdev->dev, &zx_hdmi_component_ops);
-}
-
-static int zx_hdmi_remove(struct platform_device *pdev)
-{
- component_del(&pdev->dev, &zx_hdmi_component_ops);
- return 0;
-}
-
-static const struct of_device_id zx_hdmi_of_match[] = {
- { .compatible = "zte,zx296718-hdmi", },
- { /* end */ },
-};
-MODULE_DEVICE_TABLE(of, zx_hdmi_of_match);
-
-struct platform_driver zx_hdmi_driver = {
- .probe = zx_hdmi_probe,
- .remove = zx_hdmi_remove,
- .driver = {
- .name = "zx-hdmi",
- .of_match_table = zx_hdmi_of_match,
- },
-};
diff --git a/drivers/gpu/drm/zte/zx_hdmi_regs.h b/drivers/gpu/drm/zte/zx_hdmi_regs.h
deleted file mode 100644
index 397949e64eff..000000000000
--- a/drivers/gpu/drm/zte/zx_hdmi_regs.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright 2016 Linaro Ltd.
- * Copyright 2016 ZTE Corporation.
- */
-
-#ifndef __ZX_HDMI_REGS_H__
-#define __ZX_HDMI_REGS_H__
-
-#define FUNC_SEL 0x000b
-#define FUNC_HDMI_EN BIT(0)
-#define CLKPWD 0x000d
-#define CLKPWD_PDIDCK BIT(2)
-#define P2T_CTRL 0x0066
-#define P2T_DC_PKT_EN BIT(7)
-#define L1_INTR_STAT 0x007e
-#define L1_INTR_STAT_INTR1 BIT(0)
-#define INTR1_STAT 0x008f
-#define INTR1_MASK 0x0095
-#define INTR1_MONITOR_DETECT (BIT(5) | BIT(6))
-#define ZX_DDC_ADDR 0x00ed
-#define ZX_DDC_SEGM 0x00ee
-#define ZX_DDC_OFFSET 0x00ef
-#define ZX_DDC_DIN_CNT1 0x00f0
-#define ZX_DDC_DIN_CNT2 0x00f1
-#define ZX_DDC_CMD 0x00f3
-#define DDC_CMD_MASK 0xf
-#define DDC_CMD_CLEAR_FIFO 0x9
-#define DDC_CMD_SEQUENTIAL_READ 0x2
-#define ZX_DDC_DATA 0x00f4
-#define ZX_DDC_DOUT_CNT 0x00f5
-#define DDC_DOUT_CNT_MASK 0x1f
-#define TEST_TXCTRL 0x00f7
-#define TEST_TXCTRL_HDMI_MODE BIT(1)
-#define HDMICTL4 0x0235
-#define TPI_HPD_RSEN 0x063b
-#define TPI_HPD_CONNECTION (BIT(1) | BIT(2))
-#define TPI_INFO_FSEL 0x06bf
-#define FSEL_AVI 0
-#define FSEL_GBD 1
-#define FSEL_AUDIO 2
-#define FSEL_SPD 3
-#define FSEL_MPEG 4
-#define FSEL_VSIF 5
-#define TPI_INFO_B0 0x06c0
-#define TPI_INFO_EN 0x06df
-#define TPI_INFO_TRANS_EN BIT(7)
-#define TPI_INFO_TRANS_RPT BIT(6)
-#define TPI_DDC_MASTER_EN 0x06f8
-#define HW_DDC_MASTER BIT(7)
-#define N_SVAL1 0xa03
-#define N_SVAL2 0xa04
-#define N_SVAL3 0xa05
-#define AUD_EN 0xa13
-#define AUD_IN_EN BIT(0)
-#define AUD_MODE 0xa14
-#define SPDIF_EN BIT(1)
-#define TPI_AUD_CONFIG 0xa62
-#define SPDIF_SAMPLE_SIZE_SHIFT 6
-#define SPDIF_SAMPLE_SIZE_MASK (0x3 << SPDIF_SAMPLE_SIZE_SHIFT)
-#define SPDIF_SAMPLE_SIZE_16BIT (0x1 << SPDIF_SAMPLE_SIZE_SHIFT)
-#define SPDIF_SAMPLE_SIZE_20BIT (0x2 << SPDIF_SAMPLE_SIZE_SHIFT)
-#define SPDIF_SAMPLE_SIZE_24BIT (0x3 << SPDIF_SAMPLE_SIZE_SHIFT)
-#define TPI_AUD_MUTE BIT(4)
-
-#endif /* __ZX_HDMI_REGS_H__ */
diff --git a/drivers/gpu/drm/zte/zx_plane.c b/drivers/gpu/drm/zte/zx_plane.c
deleted file mode 100644
index 93bcca428e35..000000000000
--- a/drivers/gpu/drm/zte/zx_plane.c
+++ /dev/null
@@ -1,537 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright 2016 Linaro Ltd.
- * Copyright 2016 ZTE Corporation.
- */
-
-#include <drm/drm_atomic.h>
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_fb_cma_helper.h>
-#include <drm/drm_fourcc.h>
-#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_modeset_helper_vtables.h>
-#include <drm/drm_plane_helper.h>
-
-#include "zx_common_regs.h"
-#include "zx_drm_drv.h"
-#include "zx_plane.h"
-#include "zx_plane_regs.h"
-#include "zx_vou.h"
-
-static const uint32_t gl_formats[] = {
- DRM_FORMAT_ARGB8888,
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_RGB888,
- DRM_FORMAT_RGB565,
- DRM_FORMAT_ARGB1555,
- DRM_FORMAT_ARGB4444,
-};
-
-static const uint32_t vl_formats[] = {
- DRM_FORMAT_NV12, /* Semi-planar YUV420 */
- DRM_FORMAT_YUV420, /* Planar YUV420 */
- DRM_FORMAT_YUYV, /* Packed YUV422 */
- DRM_FORMAT_YVYU,
- DRM_FORMAT_UYVY,
- DRM_FORMAT_VYUY,
- DRM_FORMAT_YUV444, /* YUV444 8bit */
- /*
- * TODO: add formats below that HW supports:
- * - YUV420 P010
- * - YUV420 Hantro
- * - YUV444 10bit
- */
-};
-
-#define FRAC_16_16(mult, div) (((mult) << 16) / (div))
-
-static int zx_vl_plane_atomic_check(struct drm_plane *plane,
- struct drm_atomic_state *state)
-{
- struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state,
- plane);
- struct drm_framebuffer *fb = plane_state->fb;
- struct drm_crtc *crtc = plane_state->crtc;
- struct drm_crtc_state *crtc_state;
- int min_scale = FRAC_16_16(1, 8);
- int max_scale = FRAC_16_16(8, 1);
-
- if (!crtc || WARN_ON(!fb))
- return 0;
-
- crtc_state = drm_atomic_get_existing_crtc_state(state,
- crtc);
- if (WARN_ON(!crtc_state))
- return -EINVAL;
-
- /* nothing to check when disabling or disabled */
- if (!crtc_state->enable)
- return 0;
-
- /* plane must be enabled */
- if (!plane_state->crtc)
- return -EINVAL;
-
- return drm_atomic_helper_check_plane_state(plane_state, crtc_state,
- min_scale, max_scale,
- true, true);
-}
-
-static int zx_vl_get_fmt(uint32_t format)
-{
- switch (format) {
- case DRM_FORMAT_NV12:
- return VL_FMT_YUV420;
- case DRM_FORMAT_YUV420:
- return VL_YUV420_PLANAR | VL_FMT_YUV420;
- case DRM_FORMAT_YUYV:
- return VL_YUV422_YUYV | VL_FMT_YUV422;
- case DRM_FORMAT_YVYU:
- return VL_YUV422_YVYU | VL_FMT_YUV422;
- case DRM_FORMAT_UYVY:
- return VL_YUV422_UYVY | VL_FMT_YUV422;
- case DRM_FORMAT_VYUY:
- return VL_YUV422_VYUY | VL_FMT_YUV422;
- case DRM_FORMAT_YUV444:
- return VL_FMT_YUV444_8BIT;
- default:
- WARN_ONCE(1, "invalid pixel format %d\n", format);
- return -EINVAL;
- }
-}
-
-static inline void zx_vl_set_update(struct zx_plane *zplane)
-{
- void __iomem *layer = zplane->layer;
-
- zx_writel_mask(layer + VL_CTRL0, VL_UPDATE, VL_UPDATE);
-}
-
-static inline void zx_vl_rsz_set_update(struct zx_plane *zplane)
-{
- zx_writel(zplane->rsz + RSZ_VL_ENABLE_CFG, 1);
-}
-
-static int zx_vl_rsz_get_fmt(uint32_t format)
-{
- switch (format) {
- case DRM_FORMAT_NV12:
- case DRM_FORMAT_YUV420:
- return RSZ_VL_FMT_YCBCR420;
- case DRM_FORMAT_YUYV:
- case DRM_FORMAT_YVYU:
- case DRM_FORMAT_UYVY:
- case DRM_FORMAT_VYUY:
- return RSZ_VL_FMT_YCBCR422;
- case DRM_FORMAT_YUV444:
- return RSZ_VL_FMT_YCBCR444;
- default:
- WARN_ONCE(1, "invalid pixel format %d\n", format);
- return -EINVAL;
- }
-}
-
-static inline u32 rsz_step_value(u32 src, u32 dst)
-{
- u32 val = 0;
-
- if (src == dst)
- val = 0;
- else if (src < dst)
- val = RSZ_PARA_STEP((src << 16) / dst);
- else if (src > dst)
- val = RSZ_DATA_STEP(src / dst) |
- RSZ_PARA_STEP(((src << 16) / dst) & 0xffff);
-
- return val;
-}
-
-static void zx_vl_rsz_setup(struct zx_plane *zplane, uint32_t format,
- u32 src_w, u32 src_h, u32 dst_w, u32 dst_h)
-{
- void __iomem *rsz = zplane->rsz;
- u32 src_chroma_w = src_w;
- u32 src_chroma_h = src_h;
- int fmt;
-
- /* Set up source and destination resolution */
- zx_writel(rsz + RSZ_SRC_CFG, RSZ_VER(src_h - 1) | RSZ_HOR(src_w - 1));
- zx_writel(rsz + RSZ_DEST_CFG, RSZ_VER(dst_h - 1) | RSZ_HOR(dst_w - 1));
-
- /* Configure data format for VL RSZ */
- fmt = zx_vl_rsz_get_fmt(format);
- if (fmt >= 0)
- zx_writel_mask(rsz + RSZ_VL_CTRL_CFG, RSZ_VL_FMT_MASK, fmt);
-
- /* Calculate Chroma height and width */
- if (fmt == RSZ_VL_FMT_YCBCR420) {
- src_chroma_w = src_w >> 1;
- src_chroma_h = src_h >> 1;
- } else if (fmt == RSZ_VL_FMT_YCBCR422) {
- src_chroma_w = src_w >> 1;
- }
-
- /* Set up Luma and Chroma step registers */
- zx_writel(rsz + RSZ_VL_LUMA_HOR, rsz_step_value(src_w, dst_w));
- zx_writel(rsz + RSZ_VL_LUMA_VER, rsz_step_value(src_h, dst_h));
- zx_writel(rsz + RSZ_VL_CHROMA_HOR, rsz_step_value(src_chroma_w, dst_w));
- zx_writel(rsz + RSZ_VL_CHROMA_VER, rsz_step_value(src_chroma_h, dst_h));
-
- zx_vl_rsz_set_update(zplane);
-}
-
-static void zx_vl_plane_atomic_update(struct drm_plane *plane,
- struct drm_atomic_state *state)
-{
- struct zx_plane *zplane = to_zx_plane(plane);
- struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
- plane);
- struct drm_framebuffer *fb = new_state->fb;
- struct drm_rect *src = &new_state->src;
- struct drm_rect *dst = &new_state->dst;
- struct drm_gem_cma_object *cma_obj;
- void __iomem *layer = zplane->layer;
- void __iomem *hbsc = zplane->hbsc;
- void __iomem *paddr_reg;
- dma_addr_t paddr;
- u32 src_x, src_y, src_w, src_h;
- u32 dst_x, dst_y, dst_w, dst_h;
- uint32_t format;
- int fmt;
- int i;
-
- if (!fb)
- return;
-
- format = fb->format->format;
-
- src_x = src->x1 >> 16;
- src_y = src->y1 >> 16;
- src_w = drm_rect_width(src) >> 16;
- src_h = drm_rect_height(src) >> 16;
-
- dst_x = dst->x1;
- dst_y = dst->y1;
- dst_w = drm_rect_width(dst);
- dst_h = drm_rect_height(dst);
-
- /* Set up data address registers for Y, Cb and Cr planes */
- paddr_reg = layer + VL_Y;
- for (i = 0; i < fb->format->num_planes; i++) {
- cma_obj = drm_fb_cma_get_gem_obj(fb, i);
- paddr = cma_obj->paddr + fb->offsets[i];
- paddr += src_y * fb->pitches[i];
- paddr += src_x * fb->format->cpp[i];
- zx_writel(paddr_reg, paddr);
- paddr_reg += 4;
- }
-
- /* Set up source height/width register */
- zx_writel(layer + VL_SRC_SIZE, GL_SRC_W(src_w) | GL_SRC_H(src_h));
-
- /* Set up start position register */
- zx_writel(layer + VL_POS_START, GL_POS_X(dst_x) | GL_POS_Y(dst_y));
-
- /* Set up end position register */
- zx_writel(layer + VL_POS_END,
- GL_POS_X(dst_x + dst_w) | GL_POS_Y(dst_y + dst_h));
-
- /* Strides of Cb and Cr planes should be identical */
- zx_writel(layer + VL_STRIDE, LUMA_STRIDE(fb->pitches[0]) |
- CHROMA_STRIDE(fb->pitches[1]));
-
- /* Set up video layer data format */
- fmt = zx_vl_get_fmt(format);
- if (fmt >= 0)
- zx_writel(layer + VL_CTRL1, fmt);
-
- /* Always use scaler since it exists (set for not bypass) */
- zx_writel_mask(layer + VL_CTRL2, VL_SCALER_BYPASS_MODE,
- VL_SCALER_BYPASS_MODE);
-
- zx_vl_rsz_setup(zplane, format, src_w, src_h, dst_w, dst_h);
-
- /* Enable HBSC block */
- zx_writel_mask(hbsc + HBSC_CTRL0, HBSC_CTRL_EN, HBSC_CTRL_EN);
-
- zx_vou_layer_enable(plane);
-
- zx_vl_set_update(zplane);
-}
-
-static void zx_plane_atomic_disable(struct drm_plane *plane,
- struct drm_atomic_state *state)
-{
- struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state,
- plane);
- struct zx_plane *zplane = to_zx_plane(plane);
- void __iomem *hbsc = zplane->hbsc;
-
- zx_vou_layer_disable(plane, old_state);
-
- /* Disable HBSC block */
- zx_writel_mask(hbsc + HBSC_CTRL0, HBSC_CTRL_EN, 0);
-}
-
-static const struct drm_plane_helper_funcs zx_vl_plane_helper_funcs = {
- .atomic_check = zx_vl_plane_atomic_check,
- .atomic_update = zx_vl_plane_atomic_update,
- .atomic_disable = zx_plane_atomic_disable,
-};
-
-static int zx_gl_plane_atomic_check(struct drm_plane *plane,
- struct drm_atomic_state *state)
-{
- struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state,
- plane);
- struct drm_framebuffer *fb = plane_state->fb;
- struct drm_crtc *crtc = plane_state->crtc;
- struct drm_crtc_state *crtc_state;
-
- if (!crtc || WARN_ON(!fb))
- return 0;
-
- crtc_state = drm_atomic_get_existing_crtc_state(state,
- crtc);
- if (WARN_ON(!crtc_state))
- return -EINVAL;
-
- /* nothing to check when disabling or disabled */
- if (!crtc_state->enable)
- return 0;
-
- /* plane must be enabled */
- if (!plane_state->crtc)
- return -EINVAL;
-
- return drm_atomic_helper_check_plane_state(plane_state, crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
- false, true);
-}
-
-static int zx_gl_get_fmt(uint32_t format)
-{
- switch (format) {
- case DRM_FORMAT_ARGB8888:
- case DRM_FORMAT_XRGB8888:
- return GL_FMT_ARGB8888;
- case DRM_FORMAT_RGB888:
- return GL_FMT_RGB888;
- case DRM_FORMAT_RGB565:
- return GL_FMT_RGB565;
- case DRM_FORMAT_ARGB1555:
- return GL_FMT_ARGB1555;
- case DRM_FORMAT_ARGB4444:
- return GL_FMT_ARGB4444;
- default:
- WARN_ONCE(1, "invalid pixel format %d\n", format);
- return -EINVAL;
- }
-}
-
-static inline void zx_gl_set_update(struct zx_plane *zplane)
-{
- void __iomem *layer = zplane->layer;
-
- zx_writel_mask(layer + GL_CTRL0, GL_UPDATE, GL_UPDATE);
-}
-
-static inline void zx_gl_rsz_set_update(struct zx_plane *zplane)
-{
- zx_writel(zplane->rsz + RSZ_ENABLE_CFG, 1);
-}
-
-static void zx_gl_rsz_setup(struct zx_plane *zplane, u32 src_w, u32 src_h,
- u32 dst_w, u32 dst_h)
-{
- void __iomem *rsz = zplane->rsz;
-
- zx_writel(rsz + RSZ_SRC_CFG, RSZ_VER(src_h - 1) | RSZ_HOR(src_w - 1));
- zx_writel(rsz + RSZ_DEST_CFG, RSZ_VER(dst_h - 1) | RSZ_HOR(dst_w - 1));
-
- zx_gl_rsz_set_update(zplane);
-}
-
-static void zx_gl_plane_atomic_update(struct drm_plane *plane,
- struct drm_atomic_state *state)
-{
- struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
- plane);
- struct zx_plane *zplane = to_zx_plane(plane);
- struct drm_framebuffer *fb = new_state->fb;
- struct drm_gem_cma_object *cma_obj;
- void __iomem *layer = zplane->layer;
- void __iomem *csc = zplane->csc;
- void __iomem *hbsc = zplane->hbsc;
- u32 src_x, src_y, src_w, src_h;
- u32 dst_x, dst_y, dst_w, dst_h;
- unsigned int bpp;
- uint32_t format;
- dma_addr_t paddr;
- u32 stride;
- int fmt;
-
- if (!fb)
- return;
-
- format = fb->format->format;
- stride = fb->pitches[0];
-
- src_x = new_state->src_x >> 16;
- src_y = new_state->src_y >> 16;
- src_w = new_state->src_w >> 16;
- src_h = new_state->src_h >> 16;
-
- dst_x = new_state->crtc_x;
- dst_y = new_state->crtc_y;
- dst_w = new_state->crtc_w;
- dst_h = new_state->crtc_h;
-
- bpp = fb->format->cpp[0];
-
- cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
- paddr = cma_obj->paddr + fb->offsets[0];
- paddr += src_y * stride + src_x * bpp / 8;
- zx_writel(layer + GL_ADDR, paddr);
-
- /* Set up source height/width register */
- zx_writel(layer + GL_SRC_SIZE, GL_SRC_W(src_w) | GL_SRC_H(src_h));
-
- /* Set up start position register */
- zx_writel(layer + GL_POS_START, GL_POS_X(dst_x) | GL_POS_Y(dst_y));
-
- /* Set up end position register */
- zx_writel(layer + GL_POS_END,
- GL_POS_X(dst_x + dst_w) | GL_POS_Y(dst_y + dst_h));
-
- /* Set up stride register */
- zx_writel(layer + GL_STRIDE, stride & 0xffff);
-
- /* Set up graphic layer data format */
- fmt = zx_gl_get_fmt(format);
- if (fmt >= 0)
- zx_writel_mask(layer + GL_CTRL1, GL_DATA_FMT_MASK,
- fmt << GL_DATA_FMT_SHIFT);
-
- /* Initialize global alpha with a sane value */
- zx_writel_mask(layer + GL_CTRL2, GL_GLOBAL_ALPHA_MASK,
- 0xff << GL_GLOBAL_ALPHA_SHIFT);
-
- /* Setup CSC for the GL */
- if (dst_h > 720)
- zx_writel_mask(csc + CSC_CTRL0, CSC_COV_MODE_MASK,
- CSC_BT709_IMAGE_RGB2YCBCR << CSC_COV_MODE_SHIFT);
- else
- zx_writel_mask(csc + CSC_CTRL0, CSC_COV_MODE_MASK,
- CSC_BT601_IMAGE_RGB2YCBCR << CSC_COV_MODE_SHIFT);
- zx_writel_mask(csc + CSC_CTRL0, CSC_WORK_ENABLE, CSC_WORK_ENABLE);
-
- /* Always use scaler since it exists (set for not bypass) */
- zx_writel_mask(layer + GL_CTRL3, GL_SCALER_BYPASS_MODE,
- GL_SCALER_BYPASS_MODE);
-
- zx_gl_rsz_setup(zplane, src_w, src_h, dst_w, dst_h);
-
- /* Enable HBSC block */
- zx_writel_mask(hbsc + HBSC_CTRL0, HBSC_CTRL_EN, HBSC_CTRL_EN);
-
- zx_vou_layer_enable(plane);
-
- zx_gl_set_update(zplane);
-}
-
-static const struct drm_plane_helper_funcs zx_gl_plane_helper_funcs = {
- .atomic_check = zx_gl_plane_atomic_check,
- .atomic_update = zx_gl_plane_atomic_update,
- .atomic_disable = zx_plane_atomic_disable,
-};
-
-static const struct drm_plane_funcs zx_plane_funcs = {
- .update_plane = drm_atomic_helper_update_plane,
- .disable_plane = drm_atomic_helper_disable_plane,
- .destroy = drm_plane_cleanup,
- .reset = drm_atomic_helper_plane_reset,
- .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
-};
-
-void zx_plane_set_update(struct drm_plane *plane)
-{
- struct zx_plane *zplane = to_zx_plane(plane);
-
- /* Do nothing if the plane is not enabled */
- if (!plane->state->crtc)
- return;
-
- switch (plane->type) {
- case DRM_PLANE_TYPE_PRIMARY:
- zx_gl_rsz_set_update(zplane);
- zx_gl_set_update(zplane);
- break;
- case DRM_PLANE_TYPE_OVERLAY:
- zx_vl_rsz_set_update(zplane);
- zx_vl_set_update(zplane);
- break;
- default:
- WARN_ONCE(1, "unsupported plane type %d\n", plane->type);
- }
-}
-
-static void zx_plane_hbsc_init(struct zx_plane *zplane)
-{
- void __iomem *hbsc = zplane->hbsc;
-
- /*
- * Initialize HBSC block with a sane configuration per recommedation
- * from ZTE BSP code.
- */
- zx_writel(hbsc + HBSC_SATURATION, 0x200);
- zx_writel(hbsc + HBSC_HUE, 0x0);
- zx_writel(hbsc + HBSC_BRIGHT, 0x0);
- zx_writel(hbsc + HBSC_CONTRAST, 0x200);
-
- zx_writel(hbsc + HBSC_THRESHOLD_COL1, (0x3ac << 16) | 0x40);
- zx_writel(hbsc + HBSC_THRESHOLD_COL2, (0x3c0 << 16) | 0x40);
- zx_writel(hbsc + HBSC_THRESHOLD_COL3, (0x3c0 << 16) | 0x40);
-}
-
-int zx_plane_init(struct drm_device *drm, struct zx_plane *zplane,
- enum drm_plane_type type)
-{
- const struct drm_plane_helper_funcs *helper;
- struct drm_plane *plane = &zplane->plane;
- struct device *dev = zplane->dev;
- const uint32_t *formats;
- unsigned int format_count;
- int ret;
-
- zx_plane_hbsc_init(zplane);
-
- switch (type) {
- case DRM_PLANE_TYPE_PRIMARY:
- helper = &zx_gl_plane_helper_funcs;
- formats = gl_formats;
- format_count = ARRAY_SIZE(gl_formats);
- break;
- case DRM_PLANE_TYPE_OVERLAY:
- helper = &zx_vl_plane_helper_funcs;
- formats = vl_formats;
- format_count = ARRAY_SIZE(vl_formats);
- break;
- default:
- return -ENODEV;
- }
-
- ret = drm_universal_plane_init(drm, plane, VOU_CRTC_MASK,
- &zx_plane_funcs, formats, format_count,
- NULL, type, NULL);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to init universal plane: %d\n", ret);
- return ret;
- }
-
- drm_plane_helper_add(plane, helper);
-
- return 0;
-}
diff --git a/drivers/gpu/drm/zte/zx_plane.h b/drivers/gpu/drm/zte/zx_plane.h
deleted file mode 100644
index 5a7cc8b3b985..000000000000
--- a/drivers/gpu/drm/zte/zx_plane.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright 2016 Linaro Ltd.
- * Copyright 2016 ZTE Corporation.
- */
-
-#ifndef __ZX_PLANE_H__
-#define __ZX_PLANE_H__
-
-struct zx_plane {
- struct drm_plane plane;
- struct device *dev;
- void __iomem *layer;
- void __iomem *csc;
- void __iomem *hbsc;
- void __iomem *rsz;
- const struct vou_layer_bits *bits;
-};
-
-#define to_zx_plane(plane) container_of(plane, struct zx_plane, plane)
-
-int zx_plane_init(struct drm_device *drm, struct zx_plane *zplane,
- enum drm_plane_type type);
-void zx_plane_set_update(struct drm_plane *plane);
-
-#endif /* __ZX_PLANE_H__ */
diff --git a/drivers/gpu/drm/zte/zx_plane_regs.h b/drivers/gpu/drm/zte/zx_plane_regs.h
deleted file mode 100644
index ce830637a92d..000000000000
--- a/drivers/gpu/drm/zte/zx_plane_regs.h
+++ /dev/null
@@ -1,120 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright 2016 Linaro Ltd.
- * Copyright 2016 ZTE Corporation.
- */
-
-#ifndef __ZX_PLANE_REGS_H__
-#define __ZX_PLANE_REGS_H__
-
-/* GL registers */
-#define GL_CTRL0 0x00
-#define GL_UPDATE BIT(5)
-#define GL_CTRL1 0x04
-#define GL_DATA_FMT_SHIFT 0
-#define GL_DATA_FMT_MASK (0xf << GL_DATA_FMT_SHIFT)
-#define GL_FMT_ARGB8888 0
-#define GL_FMT_RGB888 1
-#define GL_FMT_RGB565 2
-#define GL_FMT_ARGB1555 3
-#define GL_FMT_ARGB4444 4
-#define GL_CTRL2 0x08
-#define GL_GLOBAL_ALPHA_SHIFT 8
-#define GL_GLOBAL_ALPHA_MASK (0xff << GL_GLOBAL_ALPHA_SHIFT)
-#define GL_CTRL3 0x0c
-#define GL_SCALER_BYPASS_MODE BIT(0)
-#define GL_STRIDE 0x18
-#define GL_ADDR 0x1c
-#define GL_SRC_SIZE 0x38
-#define GL_SRC_W_SHIFT 16
-#define GL_SRC_W_MASK (0x3fff << GL_SRC_W_SHIFT)
-#define GL_SRC_H_SHIFT 0
-#define GL_SRC_H_MASK (0x3fff << GL_SRC_H_SHIFT)
-#define GL_POS_START 0x9c
-#define GL_POS_END 0xa0
-#define GL_POS_X_SHIFT 16
-#define GL_POS_X_MASK (0x1fff << GL_POS_X_SHIFT)
-#define GL_POS_Y_SHIFT 0
-#define GL_POS_Y_MASK (0x1fff << GL_POS_Y_SHIFT)
-
-#define GL_SRC_W(x) (((x) << GL_SRC_W_SHIFT) & GL_SRC_W_MASK)
-#define GL_SRC_H(x) (((x) << GL_SRC_H_SHIFT) & GL_SRC_H_MASK)
-#define GL_POS_X(x) (((x) << GL_POS_X_SHIFT) & GL_POS_X_MASK)
-#define GL_POS_Y(x) (((x) << GL_POS_Y_SHIFT) & GL_POS_Y_MASK)
-
-/* VL registers */
-#define VL_CTRL0 0x00
-#define VL_UPDATE BIT(3)
-#define VL_CTRL1 0x04
-#define VL_YUV420_PLANAR BIT(5)
-#define VL_YUV422_SHIFT 3
-#define VL_YUV422_YUYV (0 << VL_YUV422_SHIFT)
-#define VL_YUV422_YVYU (1 << VL_YUV422_SHIFT)
-#define VL_YUV422_UYVY (2 << VL_YUV422_SHIFT)
-#define VL_YUV422_VYUY (3 << VL_YUV422_SHIFT)
-#define VL_FMT_YUV420 0
-#define VL_FMT_YUV422 1
-#define VL_FMT_YUV420_P010 2
-#define VL_FMT_YUV420_HANTRO 3
-#define VL_FMT_YUV444_8BIT 4
-#define VL_FMT_YUV444_10BIT 5
-#define VL_CTRL2 0x08
-#define VL_SCALER_BYPASS_MODE BIT(0)
-#define VL_STRIDE 0x0c
-#define LUMA_STRIDE_SHIFT 16
-#define LUMA_STRIDE_MASK (0xffff << LUMA_STRIDE_SHIFT)
-#define CHROMA_STRIDE_SHIFT 0
-#define CHROMA_STRIDE_MASK (0xffff << CHROMA_STRIDE_SHIFT)
-#define VL_SRC_SIZE 0x10
-#define VL_Y 0x14
-#define VL_POS_START 0x30
-#define VL_POS_END 0x34
-
-#define LUMA_STRIDE(x) (((x) << LUMA_STRIDE_SHIFT) & LUMA_STRIDE_MASK)
-#define CHROMA_STRIDE(x) (((x) << CHROMA_STRIDE_SHIFT) & CHROMA_STRIDE_MASK)
-
-/* RSZ registers */
-#define RSZ_SRC_CFG 0x00
-#define RSZ_DEST_CFG 0x04
-#define RSZ_ENABLE_CFG 0x14
-
-#define RSZ_VL_LUMA_HOR 0x08
-#define RSZ_VL_LUMA_VER 0x0c
-#define RSZ_VL_CHROMA_HOR 0x10
-#define RSZ_VL_CHROMA_VER 0x14
-#define RSZ_VL_CTRL_CFG 0x18
-#define RSZ_VL_FMT_SHIFT 3
-#define RSZ_VL_FMT_MASK (0x3 << RSZ_VL_FMT_SHIFT)
-#define RSZ_VL_FMT_YCBCR420 (0x0 << RSZ_VL_FMT_SHIFT)
-#define RSZ_VL_FMT_YCBCR422 (0x1 << RSZ_VL_FMT_SHIFT)
-#define RSZ_VL_FMT_YCBCR444 (0x2 << RSZ_VL_FMT_SHIFT)
-#define RSZ_VL_ENABLE_CFG 0x1c
-
-#define RSZ_VER_SHIFT 16
-#define RSZ_VER_MASK (0xffff << RSZ_VER_SHIFT)
-#define RSZ_HOR_SHIFT 0
-#define RSZ_HOR_MASK (0xffff << RSZ_HOR_SHIFT)
-
-#define RSZ_VER(x) (((x) << RSZ_VER_SHIFT) & RSZ_VER_MASK)
-#define RSZ_HOR(x) (((x) << RSZ_HOR_SHIFT) & RSZ_HOR_MASK)
-
-#define RSZ_DATA_STEP_SHIFT 16
-#define RSZ_DATA_STEP_MASK (0xffff << RSZ_DATA_STEP_SHIFT)
-#define RSZ_PARA_STEP_SHIFT 0
-#define RSZ_PARA_STEP_MASK (0xffff << RSZ_PARA_STEP_SHIFT)
-
-#define RSZ_DATA_STEP(x) (((x) << RSZ_DATA_STEP_SHIFT) & RSZ_DATA_STEP_MASK)
-#define RSZ_PARA_STEP(x) (((x) << RSZ_PARA_STEP_SHIFT) & RSZ_PARA_STEP_MASK)
-
-/* HBSC registers */
-#define HBSC_SATURATION 0x00
-#define HBSC_HUE 0x04
-#define HBSC_BRIGHT 0x08
-#define HBSC_CONTRAST 0x0c
-#define HBSC_THRESHOLD_COL1 0x10
-#define HBSC_THRESHOLD_COL2 0x14
-#define HBSC_THRESHOLD_COL3 0x18
-#define HBSC_CTRL0 0x28
-#define HBSC_CTRL_EN BIT(2)
-
-#endif /* __ZX_PLANE_REGS_H__ */
diff --git a/drivers/gpu/drm/zte/zx_tvenc.c b/drivers/gpu/drm/zte/zx_tvenc.c
deleted file mode 100644
index d8a89ba383bc..000000000000
--- a/drivers/gpu/drm/zte/zx_tvenc.c
+++ /dev/null
@@ -1,400 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright 2017 Linaro Ltd.
- * Copyright 2017 ZTE Corporation.
- */
-
-#include <linux/clk.h>
-#include <linux/component.h>
-#include <linux/mfd/syscon.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/regmap.h>
-
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_print.h>
-#include <drm/drm_probe_helper.h>
-#include <drm/drm_simple_kms_helper.h>
-
-#include "zx_drm_drv.h"
-#include "zx_tvenc_regs.h"
-#include "zx_vou.h"
-
-struct zx_tvenc_pwrctrl {
- struct regmap *regmap;
- u32 reg;
- u32 mask;
-};
-
-struct zx_tvenc {
- struct drm_connector connector;
- struct drm_encoder encoder;
- struct device *dev;
- void __iomem *mmio;
- const struct vou_inf *inf;
- struct zx_tvenc_pwrctrl pwrctrl;
-};
-
-#define to_zx_tvenc(x) container_of(x, struct zx_tvenc, x)
-
-struct zx_tvenc_mode {
- struct drm_display_mode mode;
- u32 video_info;
- u32 video_res;
- u32 field1_param;
- u32 field2_param;
- u32 burst_line_odd1;
- u32 burst_line_even1;
- u32 burst_line_odd2;
- u32 burst_line_even2;
- u32 line_timing_param;
- u32 weight_value;
- u32 blank_black_level;
- u32 burst_level;
- u32 control_param;
- u32 sub_carrier_phase1;
- u32 phase_line_incr_cvbs;
-};
-
-/*
- * The CRM cannot directly provide a suitable frequency, and we have to
- * ask a multiplied rate from CRM and use the divider in VOU to get the
- * desired one.
- */
-#define TVENC_CLOCK_MULTIPLIER 4
-
-static const struct zx_tvenc_mode tvenc_mode_pal = {
- .mode = {
- .clock = 13500 * TVENC_CLOCK_MULTIPLIER,
- .hdisplay = 720,
- .hsync_start = 720 + 12,
- .hsync_end = 720 + 12 + 2,
- .htotal = 720 + 12 + 2 + 130,
- .vdisplay = 576,
- .vsync_start = 576 + 2,
- .vsync_end = 576 + 2 + 2,
- .vtotal = 576 + 2 + 2 + 20,
- .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE,
- },
- .video_info = 0x00040040,
- .video_res = 0x05a9c760,
- .field1_param = 0x0004d416,
- .field2_param = 0x0009b94f,
- .burst_line_odd1 = 0x0004d406,
- .burst_line_even1 = 0x0009b53e,
- .burst_line_odd2 = 0x0004d805,
- .burst_line_even2 = 0x0009b93f,
- .line_timing_param = 0x06a96fdf,
- .weight_value = 0x00c188a0,
- .blank_black_level = 0x0000fcfc,
- .burst_level = 0x00001595,
- .control_param = 0x00000001,
- .sub_carrier_phase1 = 0x1504c566,
- .phase_line_incr_cvbs = 0xc068db8c,
-};
-
-static const struct zx_tvenc_mode tvenc_mode_ntsc = {
- .mode = {
- .clock = 13500 * TVENC_CLOCK_MULTIPLIER,
- .hdisplay = 720,
- .hsync_start = 720 + 16,
- .hsync_end = 720 + 16 + 2,
- .htotal = 720 + 16 + 2 + 120,
- .vdisplay = 480,
- .vsync_start = 480 + 3,
- .vsync_end = 480 + 3 + 2,
- .vtotal = 480 + 3 + 2 + 17,
- .flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
- DRM_MODE_FLAG_INTERLACE,
- },
- .video_info = 0x00040080,
- .video_res = 0x05a8375a,
- .field1_param = 0x00041817,
- .field2_param = 0x0008351e,
- .burst_line_odd1 = 0x00041006,
- .burst_line_even1 = 0x0008290d,
- .burst_line_odd2 = 0x00000000,
- .burst_line_even2 = 0x00000000,
- .line_timing_param = 0x06a8ef9e,
- .weight_value = 0x00b68197,
- .blank_black_level = 0x0000f0f0,
- .burst_level = 0x0000009c,
- .control_param = 0x00000001,
- .sub_carrier_phase1 = 0x10f83e10,
- .phase_line_incr_cvbs = 0x80000000,
-};
-
-static const struct zx_tvenc_mode *tvenc_modes[] = {
- &tvenc_mode_pal,
- &tvenc_mode_ntsc,
-};
-
-static const struct zx_tvenc_mode *
-zx_tvenc_find_zmode(struct drm_display_mode *mode)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(tvenc_modes); i++) {
- const struct zx_tvenc_mode *zmode = tvenc_modes[i];
-
- if (drm_mode_equal(mode, &zmode->mode))
- return zmode;
- }
-
- return NULL;
-}
-
-static void zx_tvenc_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adj_mode)
-{
- struct zx_tvenc *tvenc = to_zx_tvenc(encoder);
- const struct zx_tvenc_mode *zmode;
- struct vou_div_config configs[] = {
- { VOU_DIV_INF, VOU_DIV_4 },
- { VOU_DIV_TVENC, VOU_DIV_1 },
- { VOU_DIV_LAYER, VOU_DIV_2 },
- };
-
- zx_vou_config_dividers(encoder->crtc, configs, ARRAY_SIZE(configs));
-
- zmode = zx_tvenc_find_zmode(mode);
- if (!zmode) {
- DRM_DEV_ERROR(tvenc->dev, "failed to find zmode\n");
- return;
- }
-
- zx_writel(tvenc->mmio + VENC_VIDEO_INFO, zmode->video_info);
- zx_writel(tvenc->mmio + VENC_VIDEO_RES, zmode->video_res);
- zx_writel(tvenc->mmio + VENC_FIELD1_PARAM, zmode->field1_param);
- zx_writel(tvenc->mmio + VENC_FIELD2_PARAM, zmode->field2_param);
- zx_writel(tvenc->mmio + VENC_LINE_O_1, zmode->burst_line_odd1);
- zx_writel(tvenc->mmio + VENC_LINE_E_1, zmode->burst_line_even1);
- zx_writel(tvenc->mmio + VENC_LINE_O_2, zmode->burst_line_odd2);
- zx_writel(tvenc->mmio + VENC_LINE_E_2, zmode->burst_line_even2);
- zx_writel(tvenc->mmio + VENC_LINE_TIMING_PARAM,
- zmode->line_timing_param);
- zx_writel(tvenc->mmio + VENC_WEIGHT_VALUE, zmode->weight_value);
- zx_writel(tvenc->mmio + VENC_BLANK_BLACK_LEVEL,
- zmode->blank_black_level);
- zx_writel(tvenc->mmio + VENC_BURST_LEVEL, zmode->burst_level);
- zx_writel(tvenc->mmio + VENC_CONTROL_PARAM, zmode->control_param);
- zx_writel(tvenc->mmio + VENC_SUB_CARRIER_PHASE1,
- zmode->sub_carrier_phase1);
- zx_writel(tvenc->mmio + VENC_PHASE_LINE_INCR_CVBS,
- zmode->phase_line_incr_cvbs);
-}
-
-static void zx_tvenc_encoder_enable(struct drm_encoder *encoder)
-{
- struct zx_tvenc *tvenc = to_zx_tvenc(encoder);
- struct zx_tvenc_pwrctrl *pwrctrl = &tvenc->pwrctrl;
-
- /* Set bit to power up TVENC DAC */
- regmap_update_bits(pwrctrl->regmap, pwrctrl->reg, pwrctrl->mask,
- pwrctrl->mask);
-
- vou_inf_enable(VOU_TV_ENC, encoder->crtc);
-
- zx_writel(tvenc->mmio + VENC_ENABLE, 1);
-}
-
-static void zx_tvenc_encoder_disable(struct drm_encoder *encoder)
-{
- struct zx_tvenc *tvenc = to_zx_tvenc(encoder);
- struct zx_tvenc_pwrctrl *pwrctrl = &tvenc->pwrctrl;
-
- zx_writel(tvenc->mmio + VENC_ENABLE, 0);
-
- vou_inf_disable(VOU_TV_ENC, encoder->crtc);
-
- /* Clear bit to power down TVENC DAC */
- regmap_update_bits(pwrctrl->regmap, pwrctrl->reg, pwrctrl->mask, 0);
-}
-
-static const struct drm_encoder_helper_funcs zx_tvenc_encoder_helper_funcs = {
- .enable = zx_tvenc_encoder_enable,
- .disable = zx_tvenc_encoder_disable,
- .mode_set = zx_tvenc_encoder_mode_set,
-};
-
-static int zx_tvenc_connector_get_modes(struct drm_connector *connector)
-{
- struct zx_tvenc *tvenc = to_zx_tvenc(connector);
- struct device *dev = tvenc->dev;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(tvenc_modes); i++) {
- const struct zx_tvenc_mode *zmode = tvenc_modes[i];
- struct drm_display_mode *mode;
-
- mode = drm_mode_duplicate(connector->dev, &zmode->mode);
- if (!mode) {
- DRM_DEV_ERROR(dev, "failed to duplicate drm mode\n");
- continue;
- }
-
- drm_mode_set_name(mode);
- drm_mode_probed_add(connector, mode);
- }
-
- return i;
-}
-
-static enum drm_mode_status
-zx_tvenc_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- struct zx_tvenc *tvenc = to_zx_tvenc(connector);
- const struct zx_tvenc_mode *zmode;
-
- zmode = zx_tvenc_find_zmode(mode);
- if (!zmode) {
- DRM_DEV_ERROR(tvenc->dev, "unsupported mode: %s\n", mode->name);
- return MODE_NOMODE;
- }
-
- return MODE_OK;
-}
-
-static struct drm_connector_helper_funcs zx_tvenc_connector_helper_funcs = {
- .get_modes = zx_tvenc_connector_get_modes,
- .mode_valid = zx_tvenc_connector_mode_valid,
-};
-
-static const struct drm_connector_funcs zx_tvenc_connector_funcs = {
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = drm_connector_cleanup,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static int zx_tvenc_register(struct drm_device *drm, struct zx_tvenc *tvenc)
-{
- struct drm_encoder *encoder = &tvenc->encoder;
- struct drm_connector *connector = &tvenc->connector;
-
- /*
- * The tvenc is designed to use aux channel, as there is a deflicker
- * block for the channel.
- */
- encoder->possible_crtcs = BIT(1);
-
- drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_TVDAC);
- drm_encoder_helper_add(encoder, &zx_tvenc_encoder_helper_funcs);
-
- connector->interlace_allowed = true;
-
- drm_connector_init(drm, connector, &zx_tvenc_connector_funcs,
- DRM_MODE_CONNECTOR_Composite);
- drm_connector_helper_add(connector, &zx_tvenc_connector_helper_funcs);
-
- drm_connector_attach_encoder(connector, encoder);
-
- return 0;
-}
-
-static int zx_tvenc_pwrctrl_init(struct zx_tvenc *tvenc)
-{
- struct zx_tvenc_pwrctrl *pwrctrl = &tvenc->pwrctrl;
- struct device *dev = tvenc->dev;
- struct of_phandle_args out_args;
- struct regmap *regmap;
- int ret;
-
- ret = of_parse_phandle_with_fixed_args(dev->of_node,
- "zte,tvenc-power-control", 2, 0, &out_args);
- if (ret)
- return ret;
-
- regmap = syscon_node_to_regmap(out_args.np);
- if (IS_ERR(regmap)) {
- ret = PTR_ERR(regmap);
- goto out;
- }
-
- pwrctrl->regmap = regmap;
- pwrctrl->reg = out_args.args[0];
- pwrctrl->mask = out_args.args[1];
-
-out:
- of_node_put(out_args.np);
- return ret;
-}
-
-static int zx_tvenc_bind(struct device *dev, struct device *master, void *data)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct drm_device *drm = data;
- struct resource *res;
- struct zx_tvenc *tvenc;
- int ret;
-
- tvenc = devm_kzalloc(dev, sizeof(*tvenc), GFP_KERNEL);
- if (!tvenc)
- return -ENOMEM;
-
- tvenc->dev = dev;
- dev_set_drvdata(dev, tvenc);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- tvenc->mmio = devm_ioremap_resource(dev, res);
- if (IS_ERR(tvenc->mmio)) {
- ret = PTR_ERR(tvenc->mmio);
- DRM_DEV_ERROR(dev, "failed to remap tvenc region: %d\n", ret);
- return ret;
- }
-
- ret = zx_tvenc_pwrctrl_init(tvenc);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to init power control: %d\n", ret);
- return ret;
- }
-
- ret = zx_tvenc_register(drm, tvenc);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to register tvenc: %d\n", ret);
- return ret;
- }
-
- return 0;
-}
-
-static void zx_tvenc_unbind(struct device *dev, struct device *master,
- void *data)
-{
- /* Nothing to do */
-}
-
-static const struct component_ops zx_tvenc_component_ops = {
- .bind = zx_tvenc_bind,
- .unbind = zx_tvenc_unbind,
-};
-
-static int zx_tvenc_probe(struct platform_device *pdev)
-{
- return component_add(&pdev->dev, &zx_tvenc_component_ops);
-}
-
-static int zx_tvenc_remove(struct platform_device *pdev)
-{
- component_del(&pdev->dev, &zx_tvenc_component_ops);
- return 0;
-}
-
-static const struct of_device_id zx_tvenc_of_match[] = {
- { .compatible = "zte,zx296718-tvenc", },
- { /* end */ },
-};
-MODULE_DEVICE_TABLE(of, zx_tvenc_of_match);
-
-struct platform_driver zx_tvenc_driver = {
- .probe = zx_tvenc_probe,
- .remove = zx_tvenc_remove,
- .driver = {
- .name = "zx-tvenc",
- .of_match_table = zx_tvenc_of_match,
- },
-};
diff --git a/drivers/gpu/drm/zte/zx_tvenc_regs.h b/drivers/gpu/drm/zte/zx_tvenc_regs.h
deleted file mode 100644
index 40f033109374..000000000000
--- a/drivers/gpu/drm/zte/zx_tvenc_regs.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright 2017 Linaro Ltd.
- * Copyright 2017 ZTE Corporation.
- */
-
-#ifndef __ZX_TVENC_REGS_H__
-#define __ZX_TVENC_REGS_H__
-
-#define VENC_VIDEO_INFO 0x04
-#define VENC_VIDEO_RES 0x08
-#define VENC_FIELD1_PARAM 0x10
-#define VENC_FIELD2_PARAM 0x14
-#define VENC_LINE_O_1 0x18
-#define VENC_LINE_E_1 0x1c
-#define VENC_LINE_O_2 0x20
-#define VENC_LINE_E_2 0x24
-#define VENC_LINE_TIMING_PARAM 0x28
-#define VENC_WEIGHT_VALUE 0x2c
-#define VENC_BLANK_BLACK_LEVEL 0x30
-#define VENC_BURST_LEVEL 0x34
-#define VENC_CONTROL_PARAM 0x3c
-#define VENC_SUB_CARRIER_PHASE1 0x40
-#define VENC_PHASE_LINE_INCR_CVBS 0x48
-#define VENC_ENABLE 0xa8
-
-#endif /* __ZX_TVENC_REGS_H__ */
diff --git a/drivers/gpu/drm/zte/zx_vga.c b/drivers/gpu/drm/zte/zx_vga.c
deleted file mode 100644
index 0f9bbb7e3b8d..000000000000
--- a/drivers/gpu/drm/zte/zx_vga.c
+++ /dev/null
@@ -1,527 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2017 Sanechips Technology Co., Ltd.
- * Copyright 2017 Linaro Ltd.
- */
-
-#include <linux/clk.h>
-#include <linux/component.h>
-#include <linux/mfd/syscon.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/regmap.h>
-
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_print.h>
-#include <drm/drm_probe_helper.h>
-#include <drm/drm_simple_kms_helper.h>
-
-#include "zx_drm_drv.h"
-#include "zx_vga_regs.h"
-#include "zx_vou.h"
-
-struct zx_vga_pwrctrl {
- struct regmap *regmap;
- u32 reg;
- u32 mask;
-};
-
-struct zx_vga_i2c {
- struct i2c_adapter adap;
- struct mutex lock;
-};
-
-struct zx_vga {
- struct drm_connector connector;
- struct drm_encoder encoder;
- struct zx_vga_i2c *ddc;
- struct device *dev;
- void __iomem *mmio;
- struct clk *i2c_wclk;
- struct zx_vga_pwrctrl pwrctrl;
- struct completion complete;
- bool connected;
-};
-
-#define to_zx_vga(x) container_of(x, struct zx_vga, x)
-
-static void zx_vga_encoder_enable(struct drm_encoder *encoder)
-{
- struct zx_vga *vga = to_zx_vga(encoder);
- struct zx_vga_pwrctrl *pwrctrl = &vga->pwrctrl;
-
- /* Set bit to power up VGA DACs */
- regmap_update_bits(pwrctrl->regmap, pwrctrl->reg, pwrctrl->mask,
- pwrctrl->mask);
-
- vou_inf_enable(VOU_VGA, encoder->crtc);
-}
-
-static void zx_vga_encoder_disable(struct drm_encoder *encoder)
-{
- struct zx_vga *vga = to_zx_vga(encoder);
- struct zx_vga_pwrctrl *pwrctrl = &vga->pwrctrl;
-
- vou_inf_disable(VOU_VGA, encoder->crtc);
-
- /* Clear bit to power down VGA DACs */
- regmap_update_bits(pwrctrl->regmap, pwrctrl->reg, pwrctrl->mask, 0);
-}
-
-static const struct drm_encoder_helper_funcs zx_vga_encoder_helper_funcs = {
- .enable = zx_vga_encoder_enable,
- .disable = zx_vga_encoder_disable,
-};
-
-static int zx_vga_connector_get_modes(struct drm_connector *connector)
-{
- struct zx_vga *vga = to_zx_vga(connector);
- struct edid *edid;
- int ret;
-
- /*
- * Clear both detection bits to switch I2C bus from device
- * detecting to EDID reading.
- */
- zx_writel(vga->mmio + VGA_AUTO_DETECT_SEL, 0);
-
- edid = drm_get_edid(connector, &vga->ddc->adap);
- if (!edid) {
- /*
- * If EDID reading fails, we set the device state into
- * disconnected. Locking is not required here, since the
- * VGA_AUTO_DETECT_SEL register write in irq handler cannot
- * be triggered when both detection bits are cleared as above.
- */
- zx_writel(vga->mmio + VGA_AUTO_DETECT_SEL,
- VGA_DETECT_SEL_NO_DEVICE);
- vga->connected = false;
- return 0;
- }
-
- /*
- * As edid reading succeeds, device must be connected, so we set
- * up detection bit for unplug interrupt here.
- */
- zx_writel(vga->mmio + VGA_AUTO_DETECT_SEL, VGA_DETECT_SEL_HAS_DEVICE);
-
- drm_connector_update_edid_property(connector, edid);
- ret = drm_add_edid_modes(connector, edid);
- kfree(edid);
-
- return ret;
-}
-
-static enum drm_mode_status
-zx_vga_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- return MODE_OK;
-}
-
-static struct drm_connector_helper_funcs zx_vga_connector_helper_funcs = {
- .get_modes = zx_vga_connector_get_modes,
- .mode_valid = zx_vga_connector_mode_valid,
-};
-
-static enum drm_connector_status
-zx_vga_connector_detect(struct drm_connector *connector, bool force)
-{
- struct zx_vga *vga = to_zx_vga(connector);
-
- return vga->connected ? connector_status_connected :
- connector_status_disconnected;
-}
-
-static const struct drm_connector_funcs zx_vga_connector_funcs = {
- .fill_modes = drm_helper_probe_single_connector_modes,
- .detect = zx_vga_connector_detect,
- .destroy = drm_connector_cleanup,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static int zx_vga_register(struct drm_device *drm, struct zx_vga *vga)
-{
- struct drm_encoder *encoder = &vga->encoder;
- struct drm_connector *connector = &vga->connector;
- struct device *dev = vga->dev;
- int ret;
-
- encoder->possible_crtcs = VOU_CRTC_MASK;
-
- ret = drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_DAC);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to init encoder: %d\n", ret);
- return ret;
- }
-
- drm_encoder_helper_add(encoder, &zx_vga_encoder_helper_funcs);
-
- vga->connector.polled = DRM_CONNECTOR_POLL_HPD;
-
- ret = drm_connector_init_with_ddc(drm, connector,
- &zx_vga_connector_funcs,
- DRM_MODE_CONNECTOR_VGA,
- &vga->ddc->adap);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to init connector: %d\n", ret);
- goto clean_encoder;
- }
-
- drm_connector_helper_add(connector, &zx_vga_connector_helper_funcs);
-
- ret = drm_connector_attach_encoder(connector, encoder);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to attach encoder: %d\n", ret);
- goto clean_connector;
- }
-
- return 0;
-
-clean_connector:
- drm_connector_cleanup(connector);
-clean_encoder:
- drm_encoder_cleanup(encoder);
- return ret;
-}
-
-static int zx_vga_pwrctrl_init(struct zx_vga *vga)
-{
- struct zx_vga_pwrctrl *pwrctrl = &vga->pwrctrl;
- struct device *dev = vga->dev;
- struct of_phandle_args out_args;
- struct regmap *regmap;
- int ret;
-
- ret = of_parse_phandle_with_fixed_args(dev->of_node,
- "zte,vga-power-control", 2, 0, &out_args);
- if (ret)
- return ret;
-
- regmap = syscon_node_to_regmap(out_args.np);
- if (IS_ERR(regmap)) {
- ret = PTR_ERR(regmap);
- goto out;
- }
-
- pwrctrl->regmap = regmap;
- pwrctrl->reg = out_args.args[0];
- pwrctrl->mask = out_args.args[1];
-
-out:
- of_node_put(out_args.np);
- return ret;
-}
-
-static int zx_vga_i2c_read(struct zx_vga *vga, struct i2c_msg *msg)
-{
- int len = msg->len;
- u8 *buf = msg->buf;
- u32 offset = 0;
- int i;
-
- reinit_completion(&vga->complete);
-
- /* Select combo write */
- zx_writel_mask(vga->mmio + VGA_CMD_CFG, VGA_CMD_COMBO, VGA_CMD_COMBO);
- zx_writel_mask(vga->mmio + VGA_CMD_CFG, VGA_CMD_RW, 0);
-
- while (len > 0) {
- u32 cnt;
-
- /* Clear RX FIFO */
- zx_writel_mask(vga->mmio + VGA_RXF_CTRL, VGA_RX_FIFO_CLEAR,
- VGA_RX_FIFO_CLEAR);
-
- /* Data offset to read from */
- zx_writel(vga->mmio + VGA_SUB_ADDR, offset);
-
- /* Kick off the transfer */
- zx_writel_mask(vga->mmio + VGA_CMD_CFG, VGA_CMD_TRANS,
- VGA_CMD_TRANS);
-
- if (!wait_for_completion_timeout(&vga->complete,
- msecs_to_jiffies(1000))) {
- DRM_DEV_ERROR(vga->dev, "transfer timeout\n");
- return -ETIMEDOUT;
- }
-
- cnt = zx_readl(vga->mmio + VGA_RXF_STATUS);
- cnt = (cnt & VGA_RXF_COUNT_MASK) >> VGA_RXF_COUNT_SHIFT;
- /* FIFO status may report more data than we need to read */
- cnt = min_t(u32, len, cnt);
-
- for (i = 0; i < cnt; i++)
- *buf++ = zx_readl(vga->mmio + VGA_DATA);
-
- len -= cnt;
- offset += cnt;
- }
-
- return 0;
-}
-
-static int zx_vga_i2c_write(struct zx_vga *vga, struct i2c_msg *msg)
-{
- /*
- * The DDC I2C adapter is only for reading EDID data, so we assume
- * that the write to this adapter must be the EDID data offset.
- */
- if ((msg->len != 1) || ((msg->addr != DDC_ADDR)))
- return -EINVAL;
-
- /* Hardware will take care of the slave address shifting */
- zx_writel(vga->mmio + VGA_DEVICE_ADDR, msg->addr);
-
- return 0;
-}
-
-static int zx_vga_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
- int num)
-{
- struct zx_vga *vga = i2c_get_adapdata(adap);
- struct zx_vga_i2c *ddc = vga->ddc;
- int ret = 0;
- int i;
-
- mutex_lock(&ddc->lock);
-
- for (i = 0; i < num; i++) {
- if (msgs[i].flags & I2C_M_RD)
- ret = zx_vga_i2c_read(vga, &msgs[i]);
- else
- ret = zx_vga_i2c_write(vga, &msgs[i]);
-
- if (ret < 0)
- break;
- }
-
- if (!ret)
- ret = num;
-
- mutex_unlock(&ddc->lock);
-
- return ret;
-}
-
-static u32 zx_vga_i2c_func(struct i2c_adapter *adapter)
-{
- return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
-}
-
-static const struct i2c_algorithm zx_vga_algorithm = {
- .master_xfer = zx_vga_i2c_xfer,
- .functionality = zx_vga_i2c_func,
-};
-
-static int zx_vga_ddc_register(struct zx_vga *vga)
-{
- struct device *dev = vga->dev;
- struct i2c_adapter *adap;
- struct zx_vga_i2c *ddc;
- int ret;
-
- ddc = devm_kzalloc(dev, sizeof(*ddc), GFP_KERNEL);
- if (!ddc)
- return -ENOMEM;
-
- vga->ddc = ddc;
- mutex_init(&ddc->lock);
-
- adap = &ddc->adap;
- adap->owner = THIS_MODULE;
- adap->class = I2C_CLASS_DDC;
- adap->dev.parent = dev;
- adap->algo = &zx_vga_algorithm;
- snprintf(adap->name, sizeof(adap->name), "zx vga i2c");
-
- ret = i2c_add_adapter(adap);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to add I2C adapter: %d\n", ret);
- return ret;
- }
-
- i2c_set_adapdata(adap, vga);
-
- return 0;
-}
-
-static irqreturn_t zx_vga_irq_thread(int irq, void *dev_id)
-{
- struct zx_vga *vga = dev_id;
-
- drm_helper_hpd_irq_event(vga->connector.dev);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t zx_vga_irq_handler(int irq, void *dev_id)
-{
- struct zx_vga *vga = dev_id;
- u32 status;
-
- status = zx_readl(vga->mmio + VGA_I2C_STATUS);
-
- /* Clear interrupt status */
- zx_writel_mask(vga->mmio + VGA_I2C_STATUS, VGA_CLEAR_IRQ,
- VGA_CLEAR_IRQ);
-
- if (status & VGA_DEVICE_CONNECTED) {
- /*
- * Since VGA_DETECT_SEL bits need to be reset for switching DDC
- * bus from device detection to EDID read, rather than setting
- * up HAS_DEVICE bit here, we need to do that in .get_modes
- * hook for unplug detecting after EDID read succeeds.
- */
- vga->connected = true;
- return IRQ_WAKE_THREAD;
- }
-
- if (status & VGA_DEVICE_DISCONNECTED) {
- zx_writel(vga->mmio + VGA_AUTO_DETECT_SEL,
- VGA_DETECT_SEL_NO_DEVICE);
- vga->connected = false;
- return IRQ_WAKE_THREAD;
- }
-
- if (status & VGA_TRANS_DONE) {
- complete(&vga->complete);
- return IRQ_HANDLED;
- }
-
- return IRQ_NONE;
-}
-
-static void zx_vga_hw_init(struct zx_vga *vga)
-{
- unsigned long ref = clk_get_rate(vga->i2c_wclk);
- int div;
-
- /*
- * Set up I2C fast speed divider per formula below to get 400kHz.
- * scl = ref / ((div + 1) * 4)
- */
- div = DIV_ROUND_UP(ref / 1000, 400 * 4) - 1;
- zx_writel(vga->mmio + VGA_CLK_DIV_FS, div);
-
- /* Set up device detection */
- zx_writel(vga->mmio + VGA_AUTO_DETECT_PARA, 0x80);
- zx_writel(vga->mmio + VGA_AUTO_DETECT_SEL, VGA_DETECT_SEL_NO_DEVICE);
-
- /*
- * We need to poke monitor via DDC bus to get connection irq
- * start working.
- */
- zx_writel(vga->mmio + VGA_DEVICE_ADDR, DDC_ADDR);
- zx_writel_mask(vga->mmio + VGA_CMD_CFG, VGA_CMD_TRANS, VGA_CMD_TRANS);
-}
-
-static int zx_vga_bind(struct device *dev, struct device *master, void *data)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct drm_device *drm = data;
- struct resource *res;
- struct zx_vga *vga;
- int irq;
- int ret;
-
- vga = devm_kzalloc(dev, sizeof(*vga), GFP_KERNEL);
- if (!vga)
- return -ENOMEM;
-
- vga->dev = dev;
- dev_set_drvdata(dev, vga);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- vga->mmio = devm_ioremap_resource(dev, res);
- if (IS_ERR(vga->mmio))
- return PTR_ERR(vga->mmio);
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return irq;
-
- vga->i2c_wclk = devm_clk_get(dev, "i2c_wclk");
- if (IS_ERR(vga->i2c_wclk)) {
- ret = PTR_ERR(vga->i2c_wclk);
- DRM_DEV_ERROR(dev, "failed to get i2c_wclk: %d\n", ret);
- return ret;
- }
-
- ret = zx_vga_pwrctrl_init(vga);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to init power control: %d\n", ret);
- return ret;
- }
-
- ret = zx_vga_ddc_register(vga);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to register ddc: %d\n", ret);
- return ret;
- }
-
- ret = zx_vga_register(drm, vga);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to register vga: %d\n", ret);
- return ret;
- }
-
- init_completion(&vga->complete);
-
- ret = devm_request_threaded_irq(dev, irq, zx_vga_irq_handler,
- zx_vga_irq_thread, IRQF_SHARED,
- dev_name(dev), vga);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to request threaded irq: %d\n", ret);
- return ret;
- }
-
- ret = clk_prepare_enable(vga->i2c_wclk);
- if (ret)
- return ret;
-
- zx_vga_hw_init(vga);
-
- return 0;
-}
-
-static void zx_vga_unbind(struct device *dev, struct device *master,
- void *data)
-{
- struct zx_vga *vga = dev_get_drvdata(dev);
-
- clk_disable_unprepare(vga->i2c_wclk);
-}
-
-static const struct component_ops zx_vga_component_ops = {
- .bind = zx_vga_bind,
- .unbind = zx_vga_unbind,
-};
-
-static int zx_vga_probe(struct platform_device *pdev)
-{
- return component_add(&pdev->dev, &zx_vga_component_ops);
-}
-
-static int zx_vga_remove(struct platform_device *pdev)
-{
- component_del(&pdev->dev, &zx_vga_component_ops);
- return 0;
-}
-
-static const struct of_device_id zx_vga_of_match[] = {
- { .compatible = "zte,zx296718-vga", },
- { /* end */ },
-};
-MODULE_DEVICE_TABLE(of, zx_vga_of_match);
-
-struct platform_driver zx_vga_driver = {
- .probe = zx_vga_probe,
- .remove = zx_vga_remove,
- .driver = {
- .name = "zx-vga",
- .of_match_table = zx_vga_of_match,
- },
-};
diff --git a/drivers/gpu/drm/zte/zx_vga_regs.h b/drivers/gpu/drm/zte/zx_vga_regs.h
deleted file mode 100644
index 1e8825ae70a5..000000000000
--- a/drivers/gpu/drm/zte/zx_vga_regs.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2017 Sanechips Technology Co., Ltd.
- * Copyright 2017 Linaro Ltd.
- */
-
-#ifndef __ZX_VGA_REGS_H__
-#define __ZX_VGA_REGS_H__
-
-#define VGA_CMD_CFG 0x04
-#define VGA_CMD_TRANS BIT(6)
-#define VGA_CMD_COMBO BIT(5)
-#define VGA_CMD_RW BIT(4)
-#define VGA_SUB_ADDR 0x0c
-#define VGA_DEVICE_ADDR 0x10
-#define VGA_CLK_DIV_FS 0x14
-#define VGA_RXF_CTRL 0x20
-#define VGA_RX_FIFO_CLEAR BIT(7)
-#define VGA_DATA 0x24
-#define VGA_I2C_STATUS 0x28
-#define VGA_DEVICE_DISCONNECTED BIT(7)
-#define VGA_DEVICE_CONNECTED BIT(6)
-#define VGA_CLEAR_IRQ BIT(4)
-#define VGA_TRANS_DONE BIT(0)
-#define VGA_RXF_STATUS 0x30
-#define VGA_RXF_COUNT_SHIFT 2
-#define VGA_RXF_COUNT_MASK GENMASK(7, 2)
-#define VGA_AUTO_DETECT_PARA 0x34
-#define VGA_AUTO_DETECT_SEL 0x38
-#define VGA_DETECT_SEL_HAS_DEVICE BIT(1)
-#define VGA_DETECT_SEL_NO_DEVICE BIT(0)
-
-#endif /* __ZX_VGA_REGS_H__ */
diff --git a/drivers/gpu/drm/zte/zx_vou.c b/drivers/gpu/drm/zte/zx_vou.c
deleted file mode 100644
index 904f62f3bfc1..000000000000
--- a/drivers/gpu/drm/zte/zx_vou.c
+++ /dev/null
@@ -1,921 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright 2016 Linaro Ltd.
- * Copyright 2016 ZTE Corporation.
- */
-
-#include <linux/clk.h>
-#include <linux/component.h>
-#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/platform_device.h>
-
-#include <video/videomode.h>
-
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_fb_cma_helper.h>
-#include <drm/drm_fb_helper.h>
-#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_of.h>
-#include <drm/drm_plane_helper.h>
-#include <drm/drm_probe_helper.h>
-#include <drm/drm_vblank.h>
-
-#include "zx_common_regs.h"
-#include "zx_drm_drv.h"
-#include "zx_plane.h"
-#include "zx_vou.h"
-#include "zx_vou_regs.h"
-
-#define GL_NUM 2
-#define VL_NUM 3
-
-enum vou_chn_type {
- VOU_CHN_MAIN,
- VOU_CHN_AUX,
-};
-
-struct zx_crtc_regs {
- u32 fir_active;
- u32 fir_htiming;
- u32 fir_vtiming;
- u32 sec_vtiming;
- u32 timing_shift;
- u32 timing_pi_shift;
-};
-
-static const struct zx_crtc_regs main_crtc_regs = {
- .fir_active = FIR_MAIN_ACTIVE,
- .fir_htiming = FIR_MAIN_H_TIMING,
- .fir_vtiming = FIR_MAIN_V_TIMING,
- .sec_vtiming = SEC_MAIN_V_TIMING,
- .timing_shift = TIMING_MAIN_SHIFT,
- .timing_pi_shift = TIMING_MAIN_PI_SHIFT,
-};
-
-static const struct zx_crtc_regs aux_crtc_regs = {
- .fir_active = FIR_AUX_ACTIVE,
- .fir_htiming = FIR_AUX_H_TIMING,
- .fir_vtiming = FIR_AUX_V_TIMING,
- .sec_vtiming = SEC_AUX_V_TIMING,
- .timing_shift = TIMING_AUX_SHIFT,
- .timing_pi_shift = TIMING_AUX_PI_SHIFT,
-};
-
-struct zx_crtc_bits {
- u32 polarity_mask;
- u32 polarity_shift;
- u32 int_frame_mask;
- u32 tc_enable;
- u32 sec_vactive_shift;
- u32 sec_vactive_mask;
- u32 interlace_select;
- u32 pi_enable;
- u32 div_vga_shift;
- u32 div_pic_shift;
- u32 div_tvenc_shift;
- u32 div_hdmi_pnx_shift;
- u32 div_hdmi_shift;
- u32 div_inf_shift;
- u32 div_layer_shift;
-};
-
-static const struct zx_crtc_bits main_crtc_bits = {
- .polarity_mask = MAIN_POL_MASK,
- .polarity_shift = MAIN_POL_SHIFT,
- .int_frame_mask = TIMING_INT_MAIN_FRAME,
- .tc_enable = MAIN_TC_EN,
- .sec_vactive_shift = SEC_VACT_MAIN_SHIFT,
- .sec_vactive_mask = SEC_VACT_MAIN_MASK,
- .interlace_select = MAIN_INTERLACE_SEL,
- .pi_enable = MAIN_PI_EN,
- .div_vga_shift = VGA_MAIN_DIV_SHIFT,
- .div_pic_shift = PIC_MAIN_DIV_SHIFT,
- .div_tvenc_shift = TVENC_MAIN_DIV_SHIFT,
- .div_hdmi_pnx_shift = HDMI_MAIN_PNX_DIV_SHIFT,
- .div_hdmi_shift = HDMI_MAIN_DIV_SHIFT,
- .div_inf_shift = INF_MAIN_DIV_SHIFT,
- .div_layer_shift = LAYER_MAIN_DIV_SHIFT,
-};
-
-static const struct zx_crtc_bits aux_crtc_bits = {
- .polarity_mask = AUX_POL_MASK,
- .polarity_shift = AUX_POL_SHIFT,
- .int_frame_mask = TIMING_INT_AUX_FRAME,
- .tc_enable = AUX_TC_EN,
- .sec_vactive_shift = SEC_VACT_AUX_SHIFT,
- .sec_vactive_mask = SEC_VACT_AUX_MASK,
- .interlace_select = AUX_INTERLACE_SEL,
- .pi_enable = AUX_PI_EN,
- .div_vga_shift = VGA_AUX_DIV_SHIFT,
- .div_pic_shift = PIC_AUX_DIV_SHIFT,
- .div_tvenc_shift = TVENC_AUX_DIV_SHIFT,
- .div_hdmi_pnx_shift = HDMI_AUX_PNX_DIV_SHIFT,
- .div_hdmi_shift = HDMI_AUX_DIV_SHIFT,
- .div_inf_shift = INF_AUX_DIV_SHIFT,
- .div_layer_shift = LAYER_AUX_DIV_SHIFT,
-};
-
-struct zx_crtc {
- struct drm_crtc crtc;
- struct drm_plane *primary;
- struct zx_vou_hw *vou;
- void __iomem *chnreg;
- void __iomem *chncsc;
- void __iomem *dither;
- const struct zx_crtc_regs *regs;
- const struct zx_crtc_bits *bits;
- enum vou_chn_type chn_type;
- struct clk *pixclk;
-};
-
-#define to_zx_crtc(x) container_of(x, struct zx_crtc, crtc)
-
-struct vou_layer_bits {
- u32 enable;
- u32 chnsel;
- u32 clksel;
-};
-
-static const struct vou_layer_bits zx_gl_bits[GL_NUM] = {
- {
- .enable = OSD_CTRL0_GL0_EN,
- .chnsel = OSD_CTRL0_GL0_SEL,
- .clksel = VOU_CLK_GL0_SEL,
- }, {
- .enable = OSD_CTRL0_GL1_EN,
- .chnsel = OSD_CTRL0_GL1_SEL,
- .clksel = VOU_CLK_GL1_SEL,
- },
-};
-
-static const struct vou_layer_bits zx_vl_bits[VL_NUM] = {
- {
- .enable = OSD_CTRL0_VL0_EN,
- .chnsel = OSD_CTRL0_VL0_SEL,
- .clksel = VOU_CLK_VL0_SEL,
- }, {
- .enable = OSD_CTRL0_VL1_EN,
- .chnsel = OSD_CTRL0_VL1_SEL,
- .clksel = VOU_CLK_VL1_SEL,
- }, {
- .enable = OSD_CTRL0_VL2_EN,
- .chnsel = OSD_CTRL0_VL2_SEL,
- .clksel = VOU_CLK_VL2_SEL,
- },
-};
-
-struct zx_vou_hw {
- struct device *dev;
- void __iomem *osd;
- void __iomem *timing;
- void __iomem *vouctl;
- void __iomem *otfppu;
- void __iomem *dtrc;
- struct clk *axi_clk;
- struct clk *ppu_clk;
- struct clk *main_clk;
- struct clk *aux_clk;
- struct zx_crtc *main_crtc;
- struct zx_crtc *aux_crtc;
-};
-
-enum vou_inf_data_sel {
- VOU_YUV444 = 0,
- VOU_RGB_101010 = 1,
- VOU_RGB_888 = 2,
- VOU_RGB_666 = 3,
-};
-
-struct vou_inf {
- enum vou_inf_id id;
- enum vou_inf_data_sel data_sel;
- u32 clocks_en_bits;
- u32 clocks_sel_bits;
-};
-
-static struct vou_inf vou_infs[] = {
- [VOU_HDMI] = {
- .data_sel = VOU_YUV444,
- .clocks_en_bits = BIT(24) | BIT(18) | BIT(6),
- .clocks_sel_bits = BIT(13) | BIT(2),
- },
- [VOU_TV_ENC] = {
- .data_sel = VOU_YUV444,
- .clocks_en_bits = BIT(15),
- .clocks_sel_bits = BIT(11) | BIT(0),
- },
- [VOU_VGA] = {
- .data_sel = VOU_RGB_888,
- .clocks_en_bits = BIT(1),
- .clocks_sel_bits = BIT(10),
- },
-};
-
-static inline struct zx_vou_hw *crtc_to_vou(struct drm_crtc *crtc)
-{
- struct zx_crtc *zcrtc = to_zx_crtc(crtc);
-
- return zcrtc->vou;
-}
-
-void vou_inf_hdmi_audio_sel(struct drm_crtc *crtc,
- enum vou_inf_hdmi_audio aud)
-{
- struct zx_crtc *zcrtc = to_zx_crtc(crtc);
- struct zx_vou_hw *vou = zcrtc->vou;
-
- zx_writel_mask(vou->vouctl + VOU_INF_HDMI_CTRL, VOU_HDMI_AUD_MASK, aud);
-}
-
-void vou_inf_enable(enum vou_inf_id id, struct drm_crtc *crtc)
-{
- struct zx_crtc *zcrtc = to_zx_crtc(crtc);
- struct zx_vou_hw *vou = zcrtc->vou;
- struct vou_inf *inf = &vou_infs[id];
- void __iomem *dither = zcrtc->dither;
- void __iomem *csc = zcrtc->chncsc;
- bool is_main = zcrtc->chn_type == VOU_CHN_MAIN;
- u32 data_sel_shift = id << 1;
-
- if (inf->data_sel != VOU_YUV444) {
- /* Enable channel CSC for RGB output */
- zx_writel_mask(csc + CSC_CTRL0, CSC_COV_MODE_MASK,
- CSC_BT709_IMAGE_YCBCR2RGB << CSC_COV_MODE_SHIFT);
- zx_writel_mask(csc + CSC_CTRL0, CSC_WORK_ENABLE,
- CSC_WORK_ENABLE);
-
- /* Bypass Dither block for RGB output */
- zx_writel_mask(dither + OSD_DITHER_CTRL0, DITHER_BYSPASS,
- DITHER_BYSPASS);
- } else {
- zx_writel_mask(csc + CSC_CTRL0, CSC_WORK_ENABLE, 0);
- zx_writel_mask(dither + OSD_DITHER_CTRL0, DITHER_BYSPASS, 0);
- }
-
- /* Select data format */
- zx_writel_mask(vou->vouctl + VOU_INF_DATA_SEL, 0x3 << data_sel_shift,
- inf->data_sel << data_sel_shift);
-
- /* Select channel */
- zx_writel_mask(vou->vouctl + VOU_INF_CH_SEL, 0x1 << id,
- zcrtc->chn_type << id);
-
- /* Select interface clocks */
- zx_writel_mask(vou->vouctl + VOU_CLK_SEL, inf->clocks_sel_bits,
- is_main ? 0 : inf->clocks_sel_bits);
-
- /* Enable interface clocks */
- zx_writel_mask(vou->vouctl + VOU_CLK_EN, inf->clocks_en_bits,
- inf->clocks_en_bits);
-
- /* Enable the device */
- zx_writel_mask(vou->vouctl + VOU_INF_EN, 1 << id, 1 << id);
-}
-
-void vou_inf_disable(enum vou_inf_id id, struct drm_crtc *crtc)
-{
- struct zx_vou_hw *vou = crtc_to_vou(crtc);
- struct vou_inf *inf = &vou_infs[id];
-
- /* Disable the device */
- zx_writel_mask(vou->vouctl + VOU_INF_EN, 1 << id, 0);
-
- /* Disable interface clocks */
- zx_writel_mask(vou->vouctl + VOU_CLK_EN, inf->clocks_en_bits, 0);
-}
-
-void zx_vou_config_dividers(struct drm_crtc *crtc,
- struct vou_div_config *configs, int num)
-{
- struct zx_crtc *zcrtc = to_zx_crtc(crtc);
- struct zx_vou_hw *vou = zcrtc->vou;
- const struct zx_crtc_bits *bits = zcrtc->bits;
- int i;
-
- /* Clear update flag bit */
- zx_writel_mask(vou->vouctl + VOU_DIV_PARA, DIV_PARA_UPDATE, 0);
-
- for (i = 0; i < num; i++) {
- struct vou_div_config *cfg = configs + i;
- u32 reg, shift;
-
- switch (cfg->id) {
- case VOU_DIV_VGA:
- reg = VOU_CLK_SEL;
- shift = bits->div_vga_shift;
- break;
- case VOU_DIV_PIC:
- reg = VOU_CLK_SEL;
- shift = bits->div_pic_shift;
- break;
- case VOU_DIV_TVENC:
- reg = VOU_DIV_PARA;
- shift = bits->div_tvenc_shift;
- break;
- case VOU_DIV_HDMI_PNX:
- reg = VOU_DIV_PARA;
- shift = bits->div_hdmi_pnx_shift;
- break;
- case VOU_DIV_HDMI:
- reg = VOU_DIV_PARA;
- shift = bits->div_hdmi_shift;
- break;
- case VOU_DIV_INF:
- reg = VOU_DIV_PARA;
- shift = bits->div_inf_shift;
- break;
- case VOU_DIV_LAYER:
- reg = VOU_DIV_PARA;
- shift = bits->div_layer_shift;
- break;
- default:
- continue;
- }
-
- /* Each divider occupies 3 bits */
- zx_writel_mask(vou->vouctl + reg, 0x7 << shift,
- cfg->val << shift);
- }
-
- /* Set update flag bit to get dividers effected */
- zx_writel_mask(vou->vouctl + VOU_DIV_PARA, DIV_PARA_UPDATE,
- DIV_PARA_UPDATE);
-}
-
-static inline void vou_chn_set_update(struct zx_crtc *zcrtc)
-{
- zx_writel(zcrtc->chnreg + CHN_UPDATE, 1);
-}
-
-static void zx_crtc_atomic_enable(struct drm_crtc *crtc,
- struct drm_atomic_state *state)
-{
- struct drm_display_mode *mode = &crtc->state->adjusted_mode;
- bool interlaced = mode->flags & DRM_MODE_FLAG_INTERLACE;
- struct zx_crtc *zcrtc = to_zx_crtc(crtc);
- struct zx_vou_hw *vou = zcrtc->vou;
- const struct zx_crtc_regs *regs = zcrtc->regs;
- const struct zx_crtc_bits *bits = zcrtc->bits;
- struct videomode vm;
- u32 scan_mask;
- u32 pol = 0;
- u32 val;
- int ret;
-
- drm_display_mode_to_videomode(mode, &vm);
-
- /* Set up timing parameters */
- val = V_ACTIVE((interlaced ? vm.vactive / 2 : vm.vactive) - 1);
- val |= H_ACTIVE(vm.hactive - 1);
- zx_writel(vou->timing + regs->fir_active, val);
-
- val = SYNC_WIDE(vm.hsync_len - 1);
- val |= BACK_PORCH(vm.hback_porch - 1);
- val |= FRONT_PORCH(vm.hfront_porch - 1);
- zx_writel(vou->timing + regs->fir_htiming, val);
-
- val = SYNC_WIDE(vm.vsync_len - 1);
- val |= BACK_PORCH(vm.vback_porch - 1);
- val |= FRONT_PORCH(vm.vfront_porch - 1);
- zx_writel(vou->timing + regs->fir_vtiming, val);
-
- if (interlaced) {
- u32 shift = bits->sec_vactive_shift;
- u32 mask = bits->sec_vactive_mask;
-
- val = zx_readl(vou->timing + SEC_V_ACTIVE);
- val &= ~mask;
- val |= ((vm.vactive / 2 - 1) << shift) & mask;
- zx_writel(vou->timing + SEC_V_ACTIVE, val);
-
- val = SYNC_WIDE(vm.vsync_len - 1);
- /*
- * The vback_porch for the second field needs to shift one on
- * the value for the first field.
- */
- val |= BACK_PORCH(vm.vback_porch);
- val |= FRONT_PORCH(vm.vfront_porch - 1);
- zx_writel(vou->timing + regs->sec_vtiming, val);
- }
-
- /* Set up polarities */
- if (vm.flags & DISPLAY_FLAGS_VSYNC_LOW)
- pol |= 1 << POL_VSYNC_SHIFT;
- if (vm.flags & DISPLAY_FLAGS_HSYNC_LOW)
- pol |= 1 << POL_HSYNC_SHIFT;
-
- zx_writel_mask(vou->timing + TIMING_CTRL, bits->polarity_mask,
- pol << bits->polarity_shift);
-
- /* Setup SHIFT register by following what ZTE BSP does */
- val = H_SHIFT_VAL;
- if (interlaced)
- val |= V_SHIFT_VAL << 16;
- zx_writel(vou->timing + regs->timing_shift, val);
- zx_writel(vou->timing + regs->timing_pi_shift, H_PI_SHIFT_VAL);
-
- /* Progressive or interlace scan select */
- scan_mask = bits->interlace_select | bits->pi_enable;
- zx_writel_mask(vou->timing + SCAN_CTRL, scan_mask,
- interlaced ? scan_mask : 0);
-
- /* Enable TIMING_CTRL */
- zx_writel_mask(vou->timing + TIMING_TC_ENABLE, bits->tc_enable,
- bits->tc_enable);
-
- /* Configure channel screen size */
- zx_writel_mask(zcrtc->chnreg + CHN_CTRL1, CHN_SCREEN_W_MASK,
- vm.hactive << CHN_SCREEN_W_SHIFT);
- zx_writel_mask(zcrtc->chnreg + CHN_CTRL1, CHN_SCREEN_H_MASK,
- vm.vactive << CHN_SCREEN_H_SHIFT);
-
- /* Configure channel interlace buffer control */
- zx_writel_mask(zcrtc->chnreg + CHN_INTERLACE_BUF_CTRL, CHN_INTERLACE_EN,
- interlaced ? CHN_INTERLACE_EN : 0);
-
- /* Update channel */
- vou_chn_set_update(zcrtc);
-
- /* Enable channel */
- zx_writel_mask(zcrtc->chnreg + CHN_CTRL0, CHN_ENABLE, CHN_ENABLE);
-
- drm_crtc_vblank_on(crtc);
-
- ret = clk_set_rate(zcrtc->pixclk, mode->clock * 1000);
- if (ret) {
- DRM_DEV_ERROR(vou->dev, "failed to set pixclk rate: %d\n", ret);
- return;
- }
-
- ret = clk_prepare_enable(zcrtc->pixclk);
- if (ret)
- DRM_DEV_ERROR(vou->dev, "failed to enable pixclk: %d\n", ret);
-}
-
-static void zx_crtc_atomic_disable(struct drm_crtc *crtc,
- struct drm_atomic_state *state)
-{
- struct zx_crtc *zcrtc = to_zx_crtc(crtc);
- const struct zx_crtc_bits *bits = zcrtc->bits;
- struct zx_vou_hw *vou = zcrtc->vou;
-
- clk_disable_unprepare(zcrtc->pixclk);
-
- drm_crtc_vblank_off(crtc);
-
- /* Disable channel */
- zx_writel_mask(zcrtc->chnreg + CHN_CTRL0, CHN_ENABLE, 0);
-
- /* Disable TIMING_CTRL */
- zx_writel_mask(vou->timing + TIMING_TC_ENABLE, bits->tc_enable, 0);
-}
-
-static void zx_crtc_atomic_flush(struct drm_crtc *crtc,
- struct drm_atomic_state *state)
-{
- struct drm_pending_vblank_event *event = crtc->state->event;
-
- if (!event)
- return;
-
- crtc->state->event = NULL;
-
- spin_lock_irq(&crtc->dev->event_lock);
- if (drm_crtc_vblank_get(crtc) == 0)
- drm_crtc_arm_vblank_event(crtc, event);
- else
- drm_crtc_send_vblank_event(crtc, event);
- spin_unlock_irq(&crtc->dev->event_lock);
-}
-
-static const struct drm_crtc_helper_funcs zx_crtc_helper_funcs = {
- .atomic_flush = zx_crtc_atomic_flush,
- .atomic_enable = zx_crtc_atomic_enable,
- .atomic_disable = zx_crtc_atomic_disable,
-};
-
-static int zx_vou_enable_vblank(struct drm_crtc *crtc)
-{
- struct zx_crtc *zcrtc = to_zx_crtc(crtc);
- struct zx_vou_hw *vou = crtc_to_vou(crtc);
- u32 int_frame_mask = zcrtc->bits->int_frame_mask;
-
- zx_writel_mask(vou->timing + TIMING_INT_CTRL, int_frame_mask,
- int_frame_mask);
-
- return 0;
-}
-
-static void zx_vou_disable_vblank(struct drm_crtc *crtc)
-{
- struct zx_crtc *zcrtc = to_zx_crtc(crtc);
- struct zx_vou_hw *vou = crtc_to_vou(crtc);
-
- zx_writel_mask(vou->timing + TIMING_INT_CTRL,
- zcrtc->bits->int_frame_mask, 0);
-}
-
-static const struct drm_crtc_funcs zx_crtc_funcs = {
- .destroy = drm_crtc_cleanup,
- .set_config = drm_atomic_helper_set_config,
- .page_flip = drm_atomic_helper_page_flip,
- .reset = drm_atomic_helper_crtc_reset,
- .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
- .enable_vblank = zx_vou_enable_vblank,
- .disable_vblank = zx_vou_disable_vblank,
-};
-
-static int zx_crtc_init(struct drm_device *drm, struct zx_vou_hw *vou,
- enum vou_chn_type chn_type)
-{
- struct device *dev = vou->dev;
- struct zx_plane *zplane;
- struct zx_crtc *zcrtc;
- int ret;
-
- zcrtc = devm_kzalloc(dev, sizeof(*zcrtc), GFP_KERNEL);
- if (!zcrtc)
- return -ENOMEM;
-
- zcrtc->vou = vou;
- zcrtc->chn_type = chn_type;
-
- zplane = devm_kzalloc(dev, sizeof(*zplane), GFP_KERNEL);
- if (!zplane)
- return -ENOMEM;
-
- zplane->dev = dev;
-
- if (chn_type == VOU_CHN_MAIN) {
- zplane->layer = vou->osd + MAIN_GL_OFFSET;
- zplane->csc = vou->osd + MAIN_GL_CSC_OFFSET;
- zplane->hbsc = vou->osd + MAIN_HBSC_OFFSET;
- zplane->rsz = vou->otfppu + MAIN_RSZ_OFFSET;
- zplane->bits = &zx_gl_bits[0];
- zcrtc->chnreg = vou->osd + OSD_MAIN_CHN;
- zcrtc->chncsc = vou->osd + MAIN_CHN_CSC_OFFSET;
- zcrtc->dither = vou->osd + MAIN_DITHER_OFFSET;
- zcrtc->regs = &main_crtc_regs;
- zcrtc->bits = &main_crtc_bits;
- } else {
- zplane->layer = vou->osd + AUX_GL_OFFSET;
- zplane->csc = vou->osd + AUX_GL_CSC_OFFSET;
- zplane->hbsc = vou->osd + AUX_HBSC_OFFSET;
- zplane->rsz = vou->otfppu + AUX_RSZ_OFFSET;
- zplane->bits = &zx_gl_bits[1];
- zcrtc->chnreg = vou->osd + OSD_AUX_CHN;
- zcrtc->chncsc = vou->osd + AUX_CHN_CSC_OFFSET;
- zcrtc->dither = vou->osd + AUX_DITHER_OFFSET;
- zcrtc->regs = &aux_crtc_regs;
- zcrtc->bits = &aux_crtc_bits;
- }
-
- zcrtc->pixclk = devm_clk_get(dev, (chn_type == VOU_CHN_MAIN) ?
- "main_wclk" : "aux_wclk");
- if (IS_ERR(zcrtc->pixclk)) {
- ret = PTR_ERR(zcrtc->pixclk);
- DRM_DEV_ERROR(dev, "failed to get pix clk: %d\n", ret);
- return ret;
- }
-
- ret = zx_plane_init(drm, zplane, DRM_PLANE_TYPE_PRIMARY);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to init primary plane: %d\n", ret);
- return ret;
- }
-
- zcrtc->primary = &zplane->plane;
-
- ret = drm_crtc_init_with_planes(drm, &zcrtc->crtc, zcrtc->primary, NULL,
- &zx_crtc_funcs, NULL);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to init drm crtc: %d\n", ret);
- return ret;
- }
-
- drm_crtc_helper_add(&zcrtc->crtc, &zx_crtc_helper_funcs);
-
- if (chn_type == VOU_CHN_MAIN)
- vou->main_crtc = zcrtc;
- else
- vou->aux_crtc = zcrtc;
-
- return 0;
-}
-
-void zx_vou_layer_enable(struct drm_plane *plane)
-{
- struct zx_crtc *zcrtc = to_zx_crtc(plane->state->crtc);
- struct zx_vou_hw *vou = zcrtc->vou;
- struct zx_plane *zplane = to_zx_plane(plane);
- const struct vou_layer_bits *bits = zplane->bits;
-
- if (zcrtc->chn_type == VOU_CHN_MAIN) {
- zx_writel_mask(vou->osd + OSD_CTRL0, bits->chnsel, 0);
- zx_writel_mask(vou->vouctl + VOU_CLK_SEL, bits->clksel, 0);
- } else {
- zx_writel_mask(vou->osd + OSD_CTRL0, bits->chnsel,
- bits->chnsel);
- zx_writel_mask(vou->vouctl + VOU_CLK_SEL, bits->clksel,
- bits->clksel);
- }
-
- zx_writel_mask(vou->osd + OSD_CTRL0, bits->enable, bits->enable);
-}
-
-void zx_vou_layer_disable(struct drm_plane *plane,
- struct drm_plane_state *old_state)
-{
- struct zx_crtc *zcrtc = to_zx_crtc(old_state->crtc);
- struct zx_vou_hw *vou = zcrtc->vou;
- struct zx_plane *zplane = to_zx_plane(plane);
- const struct vou_layer_bits *bits = zplane->bits;
-
- zx_writel_mask(vou->osd + OSD_CTRL0, bits->enable, 0);
-}
-
-static void zx_overlay_init(struct drm_device *drm, struct zx_vou_hw *vou)
-{
- struct device *dev = vou->dev;
- struct zx_plane *zplane;
- int i;
- int ret;
-
- /*
- * VL0 has some quirks on scaling support which need special handling.
- * Let's leave it out for now.
- */
- for (i = 1; i < VL_NUM; i++) {
- zplane = devm_kzalloc(dev, sizeof(*zplane), GFP_KERNEL);
- if (!zplane) {
- DRM_DEV_ERROR(dev, "failed to allocate zplane %d\n", i);
- return;
- }
-
- zplane->layer = vou->osd + OSD_VL_OFFSET(i);
- zplane->hbsc = vou->osd + HBSC_VL_OFFSET(i);
- zplane->rsz = vou->otfppu + RSZ_VL_OFFSET(i);
- zplane->bits = &zx_vl_bits[i];
-
- ret = zx_plane_init(drm, zplane, DRM_PLANE_TYPE_OVERLAY);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to init overlay %d\n", i);
- continue;
- }
- }
-}
-
-static inline void zx_osd_int_update(struct zx_crtc *zcrtc)
-{
- struct drm_crtc *crtc = &zcrtc->crtc;
- struct drm_plane *plane;
-
- vou_chn_set_update(zcrtc);
-
- drm_for_each_plane_mask(plane, crtc->dev, crtc->state->plane_mask)
- zx_plane_set_update(plane);
-}
-
-static irqreturn_t vou_irq_handler(int irq, void *dev_id)
-{
- struct zx_vou_hw *vou = dev_id;
- u32 state;
-
- /* Handle TIMING_CTRL frame interrupts */
- state = zx_readl(vou->timing + TIMING_INT_STATE);
- zx_writel(vou->timing + TIMING_INT_STATE, state);
-
- if (state & TIMING_INT_MAIN_FRAME)
- drm_crtc_handle_vblank(&vou->main_crtc->crtc);
-
- if (state & TIMING_INT_AUX_FRAME)
- drm_crtc_handle_vblank(&vou->aux_crtc->crtc);
-
- /* Handle OSD interrupts */
- state = zx_readl(vou->osd + OSD_INT_STA);
- zx_writel(vou->osd + OSD_INT_CLRSTA, state);
-
- if (state & OSD_INT_MAIN_UPT)
- zx_osd_int_update(vou->main_crtc);
-
- if (state & OSD_INT_AUX_UPT)
- zx_osd_int_update(vou->aux_crtc);
-
- if (state & OSD_INT_ERROR)
- DRM_DEV_ERROR(vou->dev, "OSD ERROR: 0x%08x!\n", state);
-
- return IRQ_HANDLED;
-}
-
-static void vou_dtrc_init(struct zx_vou_hw *vou)
-{
- /* Clear bit for bypass by ID */
- zx_writel_mask(vou->dtrc + DTRC_DETILE_CTRL,
- TILE2RASTESCAN_BYPASS_MODE, 0);
-
- /* Select ARIDR mode */
- zx_writel_mask(vou->dtrc + DTRC_DETILE_CTRL, DETILE_ARIDR_MODE_MASK,
- DETILE_ARID_IN_ARIDR);
-
- /* Bypass decompression for both frames */
- zx_writel_mask(vou->dtrc + DTRC_F0_CTRL, DTRC_DECOMPRESS_BYPASS,
- DTRC_DECOMPRESS_BYPASS);
- zx_writel_mask(vou->dtrc + DTRC_F1_CTRL, DTRC_DECOMPRESS_BYPASS,
- DTRC_DECOMPRESS_BYPASS);
-
- /* Set up ARID register */
- zx_writel(vou->dtrc + DTRC_ARID, DTRC_ARID3(0xf) | DTRC_ARID2(0xe) |
- DTRC_ARID1(0xf) | DTRC_ARID0(0xe));
-}
-
-static void vou_hw_init(struct zx_vou_hw *vou)
-{
- /* Release reset for all VOU modules */
- zx_writel(vou->vouctl + VOU_SOFT_RST, ~0);
-
- /* Enable all VOU module clocks */
- zx_writel(vou->vouctl + VOU_CLK_EN, ~0);
-
- /* Clear both OSD and TIMING_CTRL interrupt state */
- zx_writel(vou->osd + OSD_INT_CLRSTA, ~0);
- zx_writel(vou->timing + TIMING_INT_STATE, ~0);
-
- /* Enable OSD and TIMING_CTRL interrrupts */
- zx_writel(vou->osd + OSD_INT_MSK, OSD_INT_ENABLE);
- zx_writel(vou->timing + TIMING_INT_CTRL, TIMING_INT_ENABLE);
-
- /* Select GPC as input to gl/vl scaler as a sane default setting */
- zx_writel(vou->otfppu + OTFPPU_RSZ_DATA_SOURCE, 0x2a);
-
- /*
- * Needs to reset channel and layer logic per frame when frame starts
- * to get VOU work properly.
- */
- zx_writel_mask(vou->osd + OSD_RST_CLR, RST_PER_FRAME, RST_PER_FRAME);
-
- vou_dtrc_init(vou);
-}
-
-static int zx_crtc_bind(struct device *dev, struct device *master, void *data)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct drm_device *drm = data;
- struct zx_vou_hw *vou;
- struct resource *res;
- int irq;
- int ret;
-
- vou = devm_kzalloc(dev, sizeof(*vou), GFP_KERNEL);
- if (!vou)
- return -ENOMEM;
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "osd");
- vou->osd = devm_ioremap_resource(dev, res);
- if (IS_ERR(vou->osd)) {
- ret = PTR_ERR(vou->osd);
- DRM_DEV_ERROR(dev, "failed to remap osd region: %d\n", ret);
- return ret;
- }
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "timing_ctrl");
- vou->timing = devm_ioremap_resource(dev, res);
- if (IS_ERR(vou->timing)) {
- ret = PTR_ERR(vou->timing);
- DRM_DEV_ERROR(dev, "failed to remap timing_ctrl region: %d\n",
- ret);
- return ret;
- }
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dtrc");
- vou->dtrc = devm_ioremap_resource(dev, res);
- if (IS_ERR(vou->dtrc)) {
- ret = PTR_ERR(vou->dtrc);
- DRM_DEV_ERROR(dev, "failed to remap dtrc region: %d\n", ret);
- return ret;
- }
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vou_ctrl");
- vou->vouctl = devm_ioremap_resource(dev, res);
- if (IS_ERR(vou->vouctl)) {
- ret = PTR_ERR(vou->vouctl);
- DRM_DEV_ERROR(dev, "failed to remap vou_ctrl region: %d\n",
- ret);
- return ret;
- }
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "otfppu");
- vou->otfppu = devm_ioremap_resource(dev, res);
- if (IS_ERR(vou->otfppu)) {
- ret = PTR_ERR(vou->otfppu);
- DRM_DEV_ERROR(dev, "failed to remap otfppu region: %d\n", ret);
- return ret;
- }
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return irq;
-
- vou->axi_clk = devm_clk_get(dev, "aclk");
- if (IS_ERR(vou->axi_clk)) {
- ret = PTR_ERR(vou->axi_clk);
- DRM_DEV_ERROR(dev, "failed to get axi_clk: %d\n", ret);
- return ret;
- }
-
- vou->ppu_clk = devm_clk_get(dev, "ppu_wclk");
- if (IS_ERR(vou->ppu_clk)) {
- ret = PTR_ERR(vou->ppu_clk);
- DRM_DEV_ERROR(dev, "failed to get ppu_clk: %d\n", ret);
- return ret;
- }
-
- ret = clk_prepare_enable(vou->axi_clk);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to enable axi_clk: %d\n", ret);
- return ret;
- }
-
- clk_prepare_enable(vou->ppu_clk);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to enable ppu_clk: %d\n", ret);
- goto disable_axi_clk;
- }
-
- vou->dev = dev;
- dev_set_drvdata(dev, vou);
-
- vou_hw_init(vou);
-
- ret = devm_request_irq(dev, irq, vou_irq_handler, 0, "zx_vou", vou);
- if (ret < 0) {
- DRM_DEV_ERROR(dev, "failed to request vou irq: %d\n", ret);
- goto disable_ppu_clk;
- }
-
- ret = zx_crtc_init(drm, vou, VOU_CHN_MAIN);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to init main channel crtc: %d\n",
- ret);
- goto disable_ppu_clk;
- }
-
- ret = zx_crtc_init(drm, vou, VOU_CHN_AUX);
- if (ret) {
- DRM_DEV_ERROR(dev, "failed to init aux channel crtc: %d\n",
- ret);
- goto disable_ppu_clk;
- }
-
- zx_overlay_init(drm, vou);
-
- return 0;
-
-disable_ppu_clk:
- clk_disable_unprepare(vou->ppu_clk);
-disable_axi_clk:
- clk_disable_unprepare(vou->axi_clk);
- return ret;
-}
-
-static void zx_crtc_unbind(struct device *dev, struct device *master,
- void *data)
-{
- struct zx_vou_hw *vou = dev_get_drvdata(dev);
-
- clk_disable_unprepare(vou->axi_clk);
- clk_disable_unprepare(vou->ppu_clk);
-}
-
-static const struct component_ops zx_crtc_component_ops = {
- .bind = zx_crtc_bind,
- .unbind = zx_crtc_unbind,
-};
-
-static int zx_crtc_probe(struct platform_device *pdev)
-{
- return component_add(&pdev->dev, &zx_crtc_component_ops);
-}
-
-static int zx_crtc_remove(struct platform_device *pdev)
-{
- component_del(&pdev->dev, &zx_crtc_component_ops);
- return 0;
-}
-
-static const struct of_device_id zx_crtc_of_match[] = {
- { .compatible = "zte,zx296718-dpc", },
- { /* end */ },
-};
-MODULE_DEVICE_TABLE(of, zx_crtc_of_match);
-
-struct platform_driver zx_crtc_driver = {
- .probe = zx_crtc_probe,
- .remove = zx_crtc_remove,
- .driver = {
- .name = "zx-crtc",
- .of_match_table = zx_crtc_of_match,
- },
-};
diff --git a/drivers/gpu/drm/zte/zx_vou.h b/drivers/gpu/drm/zte/zx_vou.h
deleted file mode 100644
index b25f34f865ae..000000000000
--- a/drivers/gpu/drm/zte/zx_vou.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright 2016 Linaro Ltd.
- * Copyright 2016 ZTE Corporation.
- */
-
-#ifndef __ZX_VOU_H__
-#define __ZX_VOU_H__
-
-#define VOU_CRTC_MASK 0x3
-
-/* VOU output interfaces */
-enum vou_inf_id {
- VOU_HDMI = 0,
- VOU_RGB_LCD = 1,
- VOU_TV_ENC = 2,
- VOU_MIPI_DSI = 3,
- VOU_LVDS = 4,
- VOU_VGA = 5,
-};
-
-enum vou_inf_hdmi_audio {
- VOU_HDMI_AUD_SPDIF = BIT(0),
- VOU_HDMI_AUD_I2S = BIT(1),
- VOU_HDMI_AUD_DSD = BIT(2),
- VOU_HDMI_AUD_HBR = BIT(3),
- VOU_HDMI_AUD_PARALLEL = BIT(4),
-};
-
-void vou_inf_hdmi_audio_sel(struct drm_crtc *crtc,
- enum vou_inf_hdmi_audio aud);
-void vou_inf_enable(enum vou_inf_id id, struct drm_crtc *crtc);
-void vou_inf_disable(enum vou_inf_id id, struct drm_crtc *crtc);
-
-enum vou_div_id {
- VOU_DIV_VGA,
- VOU_DIV_PIC,
- VOU_DIV_TVENC,
- VOU_DIV_HDMI_PNX,
- VOU_DIV_HDMI,
- VOU_DIV_INF,
- VOU_DIV_LAYER,
-};
-
-enum vou_div_val {
- VOU_DIV_1 = 0,
- VOU_DIV_2 = 1,
- VOU_DIV_4 = 3,
- VOU_DIV_8 = 7,
-};
-
-struct vou_div_config {
- enum vou_div_id id;
- enum vou_div_val val;
-};
-
-void zx_vou_config_dividers(struct drm_crtc *crtc,
- struct vou_div_config *configs, int num);
-
-void zx_vou_layer_enable(struct drm_plane *plane);
-void zx_vou_layer_disable(struct drm_plane *plane,
- struct drm_plane_state *old_state);
-
-#endif /* __ZX_VOU_H__ */
diff --git a/drivers/gpu/drm/zte/zx_vou_regs.h b/drivers/gpu/drm/zte/zx_vou_regs.h
deleted file mode 100644
index 2ddb199cb912..000000000000
--- a/drivers/gpu/drm/zte/zx_vou_regs.h
+++ /dev/null
@@ -1,212 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright 2016 Linaro Ltd.
- * Copyright 2016 ZTE Corporation.
- */
-
-#ifndef __ZX_VOU_REGS_H__
-#define __ZX_VOU_REGS_H__
-
-/* Sub-module offset */
-#define MAIN_GL_OFFSET 0x130
-#define MAIN_GL_CSC_OFFSET 0x580
-#define MAIN_CHN_CSC_OFFSET 0x6c0
-#define MAIN_HBSC_OFFSET 0x820
-#define MAIN_DITHER_OFFSET 0x960
-#define MAIN_RSZ_OFFSET 0x600 /* OTFPPU sub-module */
-
-#define AUX_GL_OFFSET 0x200
-#define AUX_GL_CSC_OFFSET 0x5d0
-#define AUX_CHN_CSC_OFFSET 0x710
-#define AUX_HBSC_OFFSET 0x860
-#define AUX_DITHER_OFFSET 0x970
-#define AUX_RSZ_OFFSET 0x800
-
-#define OSD_VL0_OFFSET 0x040
-#define OSD_VL_OFFSET(i) (OSD_VL0_OFFSET + 0x050 * (i))
-
-#define HBSC_VL0_OFFSET 0x760
-#define HBSC_VL_OFFSET(i) (HBSC_VL0_OFFSET + 0x040 * (i))
-
-#define RSZ_VL1_U0 0xa00
-#define RSZ_VL_OFFSET(i) (RSZ_VL1_U0 + 0x200 * (i))
-
-/* OSD (GPC_GLOBAL) registers */
-#define OSD_INT_STA 0x04
-#define OSD_INT_CLRSTA 0x08
-#define OSD_INT_MSK 0x0c
-#define OSD_INT_AUX_UPT BIT(14)
-#define OSD_INT_MAIN_UPT BIT(13)
-#define OSD_INT_GL1_LBW BIT(10)
-#define OSD_INT_GL0_LBW BIT(9)
-#define OSD_INT_VL2_LBW BIT(8)
-#define OSD_INT_VL1_LBW BIT(7)
-#define OSD_INT_VL0_LBW BIT(6)
-#define OSD_INT_BUS_ERR BIT(3)
-#define OSD_INT_CFG_ERR BIT(2)
-#define OSD_INT_ERROR (\
- OSD_INT_GL1_LBW | OSD_INT_GL0_LBW | \
- OSD_INT_VL2_LBW | OSD_INT_VL1_LBW | OSD_INT_VL0_LBW | \
- OSD_INT_BUS_ERR | OSD_INT_CFG_ERR \
-)
-#define OSD_INT_ENABLE (OSD_INT_ERROR | OSD_INT_AUX_UPT | OSD_INT_MAIN_UPT)
-#define OSD_CTRL0 0x10
-#define OSD_CTRL0_VL0_EN BIT(13)
-#define OSD_CTRL0_VL0_SEL BIT(12)
-#define OSD_CTRL0_VL1_EN BIT(11)
-#define OSD_CTRL0_VL1_SEL BIT(10)
-#define OSD_CTRL0_VL2_EN BIT(9)
-#define OSD_CTRL0_VL2_SEL BIT(8)
-#define OSD_CTRL0_GL0_EN BIT(7)
-#define OSD_CTRL0_GL0_SEL BIT(6)
-#define OSD_CTRL0_GL1_EN BIT(5)
-#define OSD_CTRL0_GL1_SEL BIT(4)
-#define OSD_RST_CLR 0x1c
-#define RST_PER_FRAME BIT(19)
-
-/* Main/Aux channel registers */
-#define OSD_MAIN_CHN 0x470
-#define OSD_AUX_CHN 0x4d0
-#define CHN_CTRL0 0x00
-#define CHN_ENABLE BIT(0)
-#define CHN_CTRL1 0x04
-#define CHN_SCREEN_W_SHIFT 18
-#define CHN_SCREEN_W_MASK (0x1fff << CHN_SCREEN_W_SHIFT)
-#define CHN_SCREEN_H_SHIFT 5
-#define CHN_SCREEN_H_MASK (0x1fff << CHN_SCREEN_H_SHIFT)
-#define CHN_UPDATE 0x08
-#define CHN_INTERLACE_BUF_CTRL 0x24
-#define CHN_INTERLACE_EN BIT(2)
-
-/* Dither registers */
-#define OSD_DITHER_CTRL0 0x00
-#define DITHER_BYSPASS BIT(31)
-
-/* TIMING_CTRL registers */
-#define TIMING_TC_ENABLE 0x04
-#define AUX_TC_EN BIT(1)
-#define MAIN_TC_EN BIT(0)
-#define FIR_MAIN_ACTIVE 0x08
-#define FIR_AUX_ACTIVE 0x0c
-#define V_ACTIVE_SHIFT 16
-#define V_ACTIVE_MASK (0xffff << V_ACTIVE_SHIFT)
-#define H_ACTIVE_SHIFT 0
-#define H_ACTIVE_MASK (0xffff << H_ACTIVE_SHIFT)
-#define FIR_MAIN_H_TIMING 0x10
-#define FIR_MAIN_V_TIMING 0x14
-#define FIR_AUX_H_TIMING 0x18
-#define FIR_AUX_V_TIMING 0x1c
-#define SYNC_WIDE_SHIFT 22
-#define SYNC_WIDE_MASK (0x3ff << SYNC_WIDE_SHIFT)
-#define BACK_PORCH_SHIFT 11
-#define BACK_PORCH_MASK (0x7ff << BACK_PORCH_SHIFT)
-#define FRONT_PORCH_SHIFT 0
-#define FRONT_PORCH_MASK (0x7ff << FRONT_PORCH_SHIFT)
-#define TIMING_CTRL 0x20
-#define AUX_POL_SHIFT 3
-#define AUX_POL_MASK (0x7 << AUX_POL_SHIFT)
-#define MAIN_POL_SHIFT 0
-#define MAIN_POL_MASK (0x7 << MAIN_POL_SHIFT)
-#define POL_DE_SHIFT 2
-#define POL_VSYNC_SHIFT 1
-#define POL_HSYNC_SHIFT 0
-#define TIMING_INT_CTRL 0x24
-#define TIMING_INT_STATE 0x28
-#define TIMING_INT_AUX_FRAME BIT(3)
-#define TIMING_INT_MAIN_FRAME BIT(1)
-#define TIMING_INT_AUX_FRAME_SEL_VSW (0x2 << 10)
-#define TIMING_INT_MAIN_FRAME_SEL_VSW (0x2 << 6)
-#define TIMING_INT_ENABLE (\
- TIMING_INT_MAIN_FRAME_SEL_VSW | TIMING_INT_AUX_FRAME_SEL_VSW | \
- TIMING_INT_MAIN_FRAME | TIMING_INT_AUX_FRAME \
-)
-#define TIMING_MAIN_SHIFT 0x2c
-#define TIMING_AUX_SHIFT 0x30
-#define H_SHIFT_VAL 0x0048
-#define V_SHIFT_VAL 0x0001
-#define SCAN_CTRL 0x34
-#define AUX_PI_EN BIT(19)
-#define MAIN_PI_EN BIT(18)
-#define AUX_INTERLACE_SEL BIT(1)
-#define MAIN_INTERLACE_SEL BIT(0)
-#define SEC_V_ACTIVE 0x38
-#define SEC_VACT_MAIN_SHIFT 0
-#define SEC_VACT_MAIN_MASK (0xffff << SEC_VACT_MAIN_SHIFT)
-#define SEC_VACT_AUX_SHIFT 16
-#define SEC_VACT_AUX_MASK (0xffff << SEC_VACT_AUX_SHIFT)
-#define SEC_MAIN_V_TIMING 0x3c
-#define SEC_AUX_V_TIMING 0x40
-#define TIMING_MAIN_PI_SHIFT 0x68
-#define TIMING_AUX_PI_SHIFT 0x6c
-#define H_PI_SHIFT_VAL 0x000f
-
-#define V_ACTIVE(x) (((x) << V_ACTIVE_SHIFT) & V_ACTIVE_MASK)
-#define H_ACTIVE(x) (((x) << H_ACTIVE_SHIFT) & H_ACTIVE_MASK)
-
-#define SYNC_WIDE(x) (((x) << SYNC_WIDE_SHIFT) & SYNC_WIDE_MASK)
-#define BACK_PORCH(x) (((x) << BACK_PORCH_SHIFT) & BACK_PORCH_MASK)
-#define FRONT_PORCH(x) (((x) << FRONT_PORCH_SHIFT) & FRONT_PORCH_MASK)
-
-/* DTRC registers */
-#define DTRC_F0_CTRL 0x2c
-#define DTRC_F1_CTRL 0x5c
-#define DTRC_DECOMPRESS_BYPASS BIT(17)
-#define DTRC_DETILE_CTRL 0x68
-#define TILE2RASTESCAN_BYPASS_MODE BIT(30)
-#define DETILE_ARIDR_MODE_MASK (0x3 << 0)
-#define DETILE_ARID_ALL 0
-#define DETILE_ARID_IN_ARIDR 1
-#define DETILE_ARID_BYP_BUT_ARIDR 2
-#define DETILE_ARID_IN_ARIDR2 3
-#define DTRC_ARID 0x6c
-#define DTRC_ARID3_SHIFT 24
-#define DTRC_ARID3_MASK (0xff << DTRC_ARID3_SHIFT)
-#define DTRC_ARID2_SHIFT 16
-#define DTRC_ARID2_MASK (0xff << DTRC_ARID2_SHIFT)
-#define DTRC_ARID1_SHIFT 8
-#define DTRC_ARID1_MASK (0xff << DTRC_ARID1_SHIFT)
-#define DTRC_ARID0_SHIFT 0
-#define DTRC_ARID0_MASK (0xff << DTRC_ARID0_SHIFT)
-#define DTRC_DEC2DDR_ARID 0x70
-
-#define DTRC_ARID3(x) (((x) << DTRC_ARID3_SHIFT) & DTRC_ARID3_MASK)
-#define DTRC_ARID2(x) (((x) << DTRC_ARID2_SHIFT) & DTRC_ARID2_MASK)
-#define DTRC_ARID1(x) (((x) << DTRC_ARID1_SHIFT) & DTRC_ARID1_MASK)
-#define DTRC_ARID0(x) (((x) << DTRC_ARID0_SHIFT) & DTRC_ARID0_MASK)
-
-/* VOU_CTRL registers */
-#define VOU_INF_EN 0x00
-#define VOU_INF_CH_SEL 0x04
-#define VOU_INF_DATA_SEL 0x08
-#define VOU_SOFT_RST 0x14
-#define VOU_CLK_SEL 0x18
-#define VGA_AUX_DIV_SHIFT 29
-#define VGA_MAIN_DIV_SHIFT 26
-#define PIC_MAIN_DIV_SHIFT 23
-#define PIC_AUX_DIV_SHIFT 20
-#define VOU_CLK_VL2_SEL BIT(8)
-#define VOU_CLK_VL1_SEL BIT(7)
-#define VOU_CLK_VL0_SEL BIT(6)
-#define VOU_CLK_GL1_SEL BIT(5)
-#define VOU_CLK_GL0_SEL BIT(4)
-#define VOU_DIV_PARA 0x1c
-#define DIV_PARA_UPDATE BIT(31)
-#define TVENC_AUX_DIV_SHIFT 28
-#define HDMI_AUX_PNX_DIV_SHIFT 25
-#define HDMI_MAIN_PNX_DIV_SHIFT 22
-#define HDMI_AUX_DIV_SHIFT 19
-#define HDMI_MAIN_DIV_SHIFT 16
-#define TVENC_MAIN_DIV_SHIFT 13
-#define INF_AUX_DIV_SHIFT 9
-#define INF_MAIN_DIV_SHIFT 6
-#define LAYER_AUX_DIV_SHIFT 3
-#define LAYER_MAIN_DIV_SHIFT 0
-#define VOU_CLK_REQEN 0x20
-#define VOU_CLK_EN 0x24
-#define VOU_INF_HDMI_CTRL 0x30
-#define VOU_HDMI_AUD_MASK 0x1f
-
-/* OTFPPU_CTRL registers */
-#define OTFPPU_RSZ_DATA_SOURCE 0x04
-
-#endif /* __ZX_VOU_REGS_H__ */
diff --git a/drivers/gpu/ipu-v3/ipu-csi.c b/drivers/gpu/ipu-v3/ipu-csi.c
index 8ae301eef643..a9639d098893 100644
--- a/drivers/gpu/ipu-v3/ipu-csi.c
+++ b/drivers/gpu/ipu-v3/ipu-csi.c
@@ -259,10 +259,24 @@ static int mbus_code_to_bus_cfg(struct ipu_csi_bus_config *cfg, u32 mbus_code,
cfg->data_width = IPU_CSI_DATA_WIDTH_8;
break;
case MEDIA_BUS_FMT_UYVY8_1X16:
+ if (mbus_type == V4L2_MBUS_BT656) {
+ cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_YUV422_UYVY;
+ cfg->data_width = IPU_CSI_DATA_WIDTH_8;
+ } else {
+ cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER;
+ cfg->data_width = IPU_CSI_DATA_WIDTH_16;
+ }
+ cfg->mipi_dt = MIPI_DT_YUV422;
+ break;
case MEDIA_BUS_FMT_YUYV8_1X16:
- cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER;
+ if (mbus_type == V4L2_MBUS_BT656) {
+ cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_YUV422_YUYV;
+ cfg->data_width = IPU_CSI_DATA_WIDTH_8;
+ } else {
+ cfg->data_fmt = CSI_SENS_CONF_DATA_FMT_BAYER;
+ cfg->data_width = IPU_CSI_DATA_WIDTH_16;
+ }
cfg->mipi_dt = MIPI_DT_YUV422;
- cfg->data_width = IPU_CSI_DATA_WIDTH_16;
break;
case MEDIA_BUS_FMT_SBGGR8_1X8:
case MEDIA_BUS_FMT_SGBRG8_1X8:
@@ -332,7 +346,7 @@ static int fill_csi_bus_cfg(struct ipu_csi_bus_config *csicfg,
const struct v4l2_mbus_config *mbus_cfg,
const struct v4l2_mbus_framefmt *mbus_fmt)
{
- int ret;
+ int ret, is_bt1120;
memset(csicfg, 0, sizeof(*csicfg));
@@ -353,11 +367,18 @@ static int fill_csi_bus_cfg(struct ipu_csi_bus_config *csicfg,
break;
case V4L2_MBUS_BT656:
csicfg->ext_vsync = 0;
+ /* UYVY10_1X20 etc. should be supported as well */
+ is_bt1120 = mbus_fmt->code == MEDIA_BUS_FMT_UYVY8_1X16 ||
+ mbus_fmt->code == MEDIA_BUS_FMT_YUYV8_1X16;
if (V4L2_FIELD_HAS_BOTH(mbus_fmt->field) ||
mbus_fmt->field == V4L2_FIELD_ALTERNATE)
- csicfg->clk_mode = IPU_CSI_CLK_MODE_CCIR656_INTERLACED;
+ csicfg->clk_mode = is_bt1120 ?
+ IPU_CSI_CLK_MODE_CCIR1120_INTERLACED_SDR :
+ IPU_CSI_CLK_MODE_CCIR656_INTERLACED;
else
- csicfg->clk_mode = IPU_CSI_CLK_MODE_CCIR656_PROGRESSIVE;
+ csicfg->clk_mode = is_bt1120 ?
+ IPU_CSI_CLK_MODE_CCIR1120_PROGRESSIVE_SDR :
+ IPU_CSI_CLK_MODE_CCIR656_PROGRESSIVE;
break;
case V4L2_MBUS_CSI2_DPHY:
/*
diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
index 477baa30889c..ece147d1a278 100644
--- a/drivers/hid/hid-cp2112.c
+++ b/drivers/hid/hid-cp2112.c
@@ -129,10 +129,12 @@ struct cp2112_xfer_status_report {
struct cp2112_string_report {
u8 dummy; /* force .string to be aligned */
- u8 report; /* CP2112_*_STRING */
- u8 length; /* length in bytes of everyting after .report */
- u8 type; /* USB_DT_STRING */
- wchar_t string[30]; /* UTF16_LITTLE_ENDIAN string */
+ struct_group_attr(contents, __packed,
+ u8 report; /* CP2112_*_STRING */
+ u8 length; /* length in bytes of everything after .report */
+ u8 type; /* USB_DT_STRING */
+ wchar_t string[30]; /* UTF16_LITTLE_ENDIAN string */
+ );
} __packed;
/* Number of times to request transfer status before giving up waiting for a
@@ -986,8 +988,8 @@ static ssize_t pstr_show(struct device *kdev,
u8 length;
int ret;
- ret = cp2112_hid_get(hdev, attr->report, &report.report,
- sizeof(report) - 1, HID_FEATURE_REPORT);
+ ret = cp2112_hid_get(hdev, attr->report, (u8 *)&report.contents,
+ sizeof(report.contents), HID_FEATURE_REPORT);
if (ret < 3) {
hid_err(hdev, "error reading %s string: %d\n", kattr->attr.name,
ret);
diff --git a/drivers/hid/hid-roccat-kone.c b/drivers/hid/hid-roccat-kone.c
index 1ca64481145e..ea17abc7ad52 100644
--- a/drivers/hid/hid-roccat-kone.c
+++ b/drivers/hid/hid-roccat-kone.c
@@ -857,7 +857,7 @@ static int kone_raw_event(struct hid_device *hdev, struct hid_report *report,
memcpy(&kone->last_mouse_event, event,
sizeof(struct kone_mouse_event));
else
- memset(&event->tilt, 0, 5);
+ memset(&event->wipe, 0, sizeof(event->wipe));
kone_keep_values_up_to_date(kone, event);
diff --git a/drivers/hid/hid-roccat-kone.h b/drivers/hid/hid-roccat-kone.h
index 4a1a9cb76b08..65c800e3addc 100644
--- a/drivers/hid/hid-roccat-kone.h
+++ b/drivers/hid/hid-roccat-kone.h
@@ -152,11 +152,13 @@ struct kone_mouse_event {
uint16_t x;
uint16_t y;
uint8_t wheel; /* up = 1, down = -1 */
- uint8_t tilt; /* right = 1, left = -1 */
- uint8_t unknown;
- uint8_t event;
- uint8_t value; /* press = 0, release = 1 */
- uint8_t macro_key; /* 0 to 8 */
+ struct_group(wipe,
+ uint8_t tilt; /* right = 1, left = -1 */
+ uint8_t unknown;
+ uint8_t event;
+ uint8_t value; /* press = 0, release = 1 */
+ uint8_t macro_key; /* 0 to 8 */
+ );
} __attribute__ ((__packed__));
enum kone_mouse_events {
diff --git a/drivers/hid/surface-hid/surface_hid.c b/drivers/hid/surface-hid/surface_hid.c
index a3a70e4f3f6c..d4aa8c81903a 100644
--- a/drivers/hid/surface-hid/surface_hid.c
+++ b/drivers/hid/surface-hid/surface_hid.c
@@ -209,7 +209,7 @@ static int surface_hid_probe(struct ssam_device *sdev)
shid->notif.base.priority = 1;
shid->notif.base.fn = ssam_hid_event_fn;
- shid->notif.event.reg = SSAM_EVENT_REGISTRY_REG;
+ shid->notif.event.reg = SSAM_EVENT_REGISTRY_REG(sdev->uid.target);
shid->notif.event.id.target_category = sdev->uid.category;
shid->notif.event.id.instance = sdev->uid.instance;
shid->notif.event.mask = SSAM_EVENT_MASK_STRICT;
@@ -230,7 +230,7 @@ static void surface_hid_remove(struct ssam_device *sdev)
}
static const struct ssam_device_id surface_hid_match[] = {
- { SSAM_SDEV(HID, 0x02, SSAM_ANY_IID, 0x00) },
+ { SSAM_SDEV(HID, SSAM_ANY_TID, SSAM_ANY_IID, 0x00) },
{ },
};
MODULE_DEVICE_TABLE(ssam, surface_hid_match);
diff --git a/drivers/hsi/clients/ssi_protocol.c b/drivers/hsi/clients/ssi_protocol.c
index 96d0eccca3aa..21f11a5b965b 100644
--- a/drivers/hsi/clients/ssi_protocol.c
+++ b/drivers/hsi/clients/ssi_protocol.c
@@ -1055,14 +1055,16 @@ static const struct net_device_ops ssip_pn_ops = {
static void ssip_pn_setup(struct net_device *dev)
{
+ static const u8 addr = PN_MEDIA_SOS;
+
dev->features = 0;
dev->netdev_ops = &ssip_pn_ops;
dev->type = ARPHRD_PHONET;
dev->flags = IFF_POINTOPOINT | IFF_NOARP;
dev->mtu = SSIP_DEFAULT_MTU;
dev->hard_header_len = 1;
- dev->dev_addr[0] = PN_MEDIA_SOS;
dev->addr_len = 1;
+ dev_addr_set(dev, &addr);
dev->tx_queue_len = SSIP_TXQUEUE_LEN;
dev->needs_free_netdev = true;
diff --git a/drivers/hv/Kconfig b/drivers/hv/Kconfig
index d1123ceb38f3..dd12af20e467 100644
--- a/drivers/hv/Kconfig
+++ b/drivers/hv/Kconfig
@@ -8,6 +8,7 @@ config HYPERV
|| (ARM64 && !CPU_BIG_ENDIAN))
select PARAVIRT
select X86_HV_CALLBACK_VECTOR if X86
+ select VMAP_PFN
help
Select this option to run Linux as a Hyper-V client operating
system.
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index f3761c73b074..dc5c35210c16 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -17,6 +17,7 @@
#include <linux/hyperv.h>
#include <linux/uio.h>
#include <linux/interrupt.h>
+#include <linux/set_memory.h>
#include <asm/page.h>
#include <asm/mshyperv.h>
@@ -456,7 +457,7 @@ nomem:
static int __vmbus_establish_gpadl(struct vmbus_channel *channel,
enum hv_gpadl_type type, void *kbuffer,
u32 size, u32 send_offset,
- u32 *gpadl_handle)
+ struct vmbus_gpadl *gpadl)
{
struct vmbus_channel_gpadl_header *gpadlmsg;
struct vmbus_channel_gpadl_body *gpadl_body;
@@ -474,6 +475,15 @@ static int __vmbus_establish_gpadl(struct vmbus_channel *channel,
if (ret)
return ret;
+ ret = set_memory_decrypted((unsigned long)kbuffer,
+ PFN_UP(size));
+ if (ret) {
+ dev_warn(&channel->device_obj->device,
+ "Failed to set host visibility for new GPADL %d.\n",
+ ret);
+ return ret;
+ }
+
init_completion(&msginfo->waitevent);
msginfo->waiting_channel = channel;
@@ -537,7 +547,10 @@ static int __vmbus_establish_gpadl(struct vmbus_channel *channel,
}
/* At this point, we received the gpadl created msg */
- *gpadl_handle = gpadlmsg->gpadl;
+ gpadl->gpadl_handle = gpadlmsg->gpadl;
+ gpadl->buffer = kbuffer;
+ gpadl->size = size;
+
cleanup:
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
@@ -549,6 +562,11 @@ cleanup:
}
kfree(msginfo);
+
+ if (ret)
+ set_memory_encrypted((unsigned long)kbuffer,
+ PFN_UP(size));
+
return ret;
}
@@ -561,10 +579,10 @@ cleanup:
* @gpadl_handle: some funky thing
*/
int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
- u32 size, u32 *gpadl_handle)
+ u32 size, struct vmbus_gpadl *gpadl)
{
return __vmbus_establish_gpadl(channel, HV_GPADL_BUFFER, kbuffer, size,
- 0U, gpadl_handle);
+ 0U, gpadl);
}
EXPORT_SYMBOL_GPL(vmbus_establish_gpadl);
@@ -665,17 +683,8 @@ static int __vmbus_open(struct vmbus_channel *newchannel,
if (!newchannel->max_pkt_size)
newchannel->max_pkt_size = VMBUS_DEFAULT_MAX_PKT_SIZE;
- err = hv_ringbuffer_init(&newchannel->outbound, page, send_pages, 0);
- if (err)
- goto error_clean_ring;
-
- err = hv_ringbuffer_init(&newchannel->inbound, &page[send_pages],
- recv_pages, newchannel->max_pkt_size);
- if (err)
- goto error_clean_ring;
-
/* Establish the gpadl for the ring buffer */
- newchannel->ringbuffer_gpadlhandle = 0;
+ newchannel->ringbuffer_gpadlhandle.gpadl_handle = 0;
err = __vmbus_establish_gpadl(newchannel, HV_GPADL_RING,
page_address(newchannel->ringbuffer_page),
@@ -685,6 +694,16 @@ static int __vmbus_open(struct vmbus_channel *newchannel,
if (err)
goto error_clean_ring;
+ err = hv_ringbuffer_init(&newchannel->outbound,
+ page, send_pages, 0);
+ if (err)
+ goto error_free_gpadl;
+
+ err = hv_ringbuffer_init(&newchannel->inbound, &page[send_pages],
+ recv_pages, newchannel->max_pkt_size);
+ if (err)
+ goto error_free_gpadl;
+
/* Create and init the channel open message */
open_info = kzalloc(sizeof(*open_info) +
sizeof(struct vmbus_channel_open_channel),
@@ -701,7 +720,8 @@ static int __vmbus_open(struct vmbus_channel *newchannel,
open_msg->header.msgtype = CHANNELMSG_OPENCHANNEL;
open_msg->openid = newchannel->offermsg.child_relid;
open_msg->child_relid = newchannel->offermsg.child_relid;
- open_msg->ringbuffer_gpadlhandle = newchannel->ringbuffer_gpadlhandle;
+ open_msg->ringbuffer_gpadlhandle
+ = newchannel->ringbuffer_gpadlhandle.gpadl_handle;
/*
* The unit of ->downstream_ringbuffer_pageoffset is HV_HYP_PAGE and
* the unit of ->ringbuffer_send_offset (i.e. send_pages) is PAGE, so
@@ -759,8 +779,7 @@ error_clean_msglist:
error_free_info:
kfree(open_info);
error_free_gpadl:
- vmbus_teardown_gpadl(newchannel, newchannel->ringbuffer_gpadlhandle);
- newchannel->ringbuffer_gpadlhandle = 0;
+ vmbus_teardown_gpadl(newchannel, &newchannel->ringbuffer_gpadlhandle);
error_clean_ring:
hv_ringbuffer_cleanup(&newchannel->outbound);
hv_ringbuffer_cleanup(&newchannel->inbound);
@@ -806,7 +825,7 @@ EXPORT_SYMBOL_GPL(vmbus_open);
/*
* vmbus_teardown_gpadl -Teardown the specified GPADL handle
*/
-int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle)
+int vmbus_teardown_gpadl(struct vmbus_channel *channel, struct vmbus_gpadl *gpadl)
{
struct vmbus_channel_gpadl_teardown *msg;
struct vmbus_channel_msginfo *info;
@@ -825,7 +844,7 @@ int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle)
msg->header.msgtype = CHANNELMSG_GPADL_TEARDOWN;
msg->child_relid = channel->offermsg.child_relid;
- msg->gpadl = gpadl_handle;
+ msg->gpadl = gpadl->gpadl_handle;
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
list_add_tail(&info->msglistentry,
@@ -845,6 +864,8 @@ int vmbus_teardown_gpadl(struct vmbus_channel *channel, u32 gpadl_handle)
wait_for_completion(&info->waitevent);
+ gpadl->gpadl_handle = 0;
+
post_msg_err:
/*
* If the channel has been rescinded;
@@ -859,6 +880,12 @@ post_msg_err:
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
kfree(info);
+
+ ret = set_memory_encrypted((unsigned long)gpadl->buffer,
+ PFN_UP(gpadl->size));
+ if (ret)
+ pr_warn("Fail to set mem host visibility in GPADL teardown %d.\n", ret);
+
return ret;
}
EXPORT_SYMBOL_GPL(vmbus_teardown_gpadl);
@@ -933,9 +960,8 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
}
/* Tear down the gpadl for the channel's ring buffer */
- else if (channel->ringbuffer_gpadlhandle) {
- ret = vmbus_teardown_gpadl(channel,
- channel->ringbuffer_gpadlhandle);
+ else if (channel->ringbuffer_gpadlhandle.gpadl_handle) {
+ ret = vmbus_teardown_gpadl(channel, &channel->ringbuffer_gpadlhandle);
if (ret) {
pr_err("Close failed: teardown gpadl return %d\n", ret);
/*
@@ -943,8 +969,6 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
* it is perhaps better to leak memory.
*/
}
-
- channel->ringbuffer_gpadlhandle = 0;
}
if (!ret)
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 142308526ec6..2829575fd9b7 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -1581,21 +1581,6 @@ cleanup:
return ret;
}
-static void invoke_sc_cb(struct vmbus_channel *primary_channel)
-{
- struct list_head *cur, *tmp;
- struct vmbus_channel *cur_channel;
-
- if (primary_channel->sc_creation_callback == NULL)
- return;
-
- list_for_each_safe(cur, tmp, &primary_channel->sc_list) {
- cur_channel = list_entry(cur, struct vmbus_channel, sc_list);
-
- primary_channel->sc_creation_callback(cur_channel);
- }
-}
-
void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel,
void (*sc_cr_cb)(struct vmbus_channel *new_sc))
{
@@ -1603,25 +1588,6 @@ void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel,
}
EXPORT_SYMBOL_GPL(vmbus_set_sc_create_callback);
-bool vmbus_are_subchannels_present(struct vmbus_channel *primary)
-{
- bool ret;
-
- ret = !list_empty(&primary->sc_list);
-
- if (ret) {
- /*
- * Invoke the callback on sub-channel creation.
- * This will present a uniform interface to the
- * clients.
- */
- invoke_sc_cb(primary);
- }
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(vmbus_are_subchannels_present);
-
void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel,
void (*chn_rescind_cb)(struct vmbus_channel *))
{
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index 5e479d54918c..a3d8be8d6cfb 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -19,6 +19,8 @@
#include <linux/vmalloc.h>
#include <linux/hyperv.h>
#include <linux/export.h>
+#include <linux/io.h>
+#include <linux/set_memory.h>
#include <asm/mshyperv.h>
#include "hyperv_vmbus.h"
@@ -102,8 +104,9 @@ int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, u32 version)
vmbus_connection.msg_conn_id = VMBUS_MESSAGE_CONNECTION_ID;
}
- msg->monitor_page1 = virt_to_phys(vmbus_connection.monitor_pages[0]);
- msg->monitor_page2 = virt_to_phys(vmbus_connection.monitor_pages[1]);
+ msg->monitor_page1 = vmbus_connection.monitor_pages_pa[0];
+ msg->monitor_page2 = vmbus_connection.monitor_pages_pa[1];
+
msg->target_vcpu = hv_cpu_number_to_vp_number(VMBUS_CONNECT_CPU);
/*
@@ -216,6 +219,65 @@ int vmbus_connect(void)
goto cleanup;
}
+ vmbus_connection.monitor_pages_original[0]
+ = vmbus_connection.monitor_pages[0];
+ vmbus_connection.monitor_pages_original[1]
+ = vmbus_connection.monitor_pages[1];
+ vmbus_connection.monitor_pages_pa[0]
+ = virt_to_phys(vmbus_connection.monitor_pages[0]);
+ vmbus_connection.monitor_pages_pa[1]
+ = virt_to_phys(vmbus_connection.monitor_pages[1]);
+
+ if (hv_is_isolation_supported()) {
+ ret = set_memory_decrypted((unsigned long)
+ vmbus_connection.monitor_pages[0],
+ 1);
+ ret |= set_memory_decrypted((unsigned long)
+ vmbus_connection.monitor_pages[1],
+ 1);
+ if (ret)
+ goto cleanup;
+
+ /*
+ * Isolation VM with AMD SNP needs to access monitor page via
+ * address space above shared gpa boundary.
+ */
+ if (hv_isolation_type_snp()) {
+ vmbus_connection.monitor_pages_pa[0] +=
+ ms_hyperv.shared_gpa_boundary;
+ vmbus_connection.monitor_pages_pa[1] +=
+ ms_hyperv.shared_gpa_boundary;
+
+ vmbus_connection.monitor_pages[0]
+ = memremap(vmbus_connection.monitor_pages_pa[0],
+ HV_HYP_PAGE_SIZE,
+ MEMREMAP_WB);
+ if (!vmbus_connection.monitor_pages[0]) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ vmbus_connection.monitor_pages[1]
+ = memremap(vmbus_connection.monitor_pages_pa[1],
+ HV_HYP_PAGE_SIZE,
+ MEMREMAP_WB);
+ if (!vmbus_connection.monitor_pages[1]) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+ }
+
+ /*
+ * Set memory host visibility hvcall smears memory
+ * and so zero monitor pages here.
+ */
+ memset(vmbus_connection.monitor_pages[0], 0x00,
+ HV_HYP_PAGE_SIZE);
+ memset(vmbus_connection.monitor_pages[1], 0x00,
+ HV_HYP_PAGE_SIZE);
+
+ }
+
msginfo = kzalloc(sizeof(*msginfo) +
sizeof(struct vmbus_channel_initiate_contact),
GFP_KERNEL);
@@ -303,10 +365,31 @@ void vmbus_disconnect(void)
vmbus_connection.int_page = NULL;
}
- hv_free_hyperv_page((unsigned long)vmbus_connection.monitor_pages[0]);
- hv_free_hyperv_page((unsigned long)vmbus_connection.monitor_pages[1]);
- vmbus_connection.monitor_pages[0] = NULL;
- vmbus_connection.monitor_pages[1] = NULL;
+ if (hv_is_isolation_supported()) {
+ /*
+ * memunmap() checks input address is ioremap address or not
+ * inside. It doesn't unmap any thing in the non-SNP CVM and
+ * so not check CVM type here.
+ */
+ memunmap(vmbus_connection.monitor_pages[0]);
+ memunmap(vmbus_connection.monitor_pages[1]);
+
+ set_memory_encrypted((unsigned long)
+ vmbus_connection.monitor_pages_original[0],
+ 1);
+ set_memory_encrypted((unsigned long)
+ vmbus_connection.monitor_pages_original[1],
+ 1);
+ }
+
+ hv_free_hyperv_page((unsigned long)
+ vmbus_connection.monitor_pages_original[0]);
+ hv_free_hyperv_page((unsigned long)
+ vmbus_connection.monitor_pages_original[1]);
+ vmbus_connection.monitor_pages_original[0] =
+ vmbus_connection.monitor_pages[0] = NULL;
+ vmbus_connection.monitor_pages_original[1] =
+ vmbus_connection.monitor_pages[1] = NULL;
}
/*
@@ -447,6 +530,10 @@ void vmbus_set_event(struct vmbus_channel *channel)
++channel->sig_events;
- hv_do_fast_hypercall8(HVCALL_SIGNAL_EVENT, channel->sig_event);
+ if (hv_isolation_type_snp())
+ hv_ghcb_hypercall(HVCALL_SIGNAL_EVENT, &channel->sig_event,
+ NULL, sizeof(channel->sig_event));
+ else
+ hv_do_fast_hypercall8(HVCALL_SIGNAL_EVENT, channel->sig_event);
}
EXPORT_SYMBOL_GPL(vmbus_set_event);
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index e83507f49676..4d6480d57546 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -8,6 +8,7 @@
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/slab.h>
@@ -97,7 +98,13 @@ int hv_post_message(union hv_connection_id connection_id,
aligned_msg->payload_size = payload_size;
memcpy((void *)aligned_msg->payload, payload, payload_size);
- status = hv_do_hypercall(HVCALL_POST_MESSAGE, aligned_msg, NULL);
+ if (hv_isolation_type_snp())
+ status = hv_ghcb_hypercall(HVCALL_POST_MESSAGE,
+ (void *)aligned_msg, NULL,
+ sizeof(*aligned_msg));
+ else
+ status = hv_do_hypercall(HVCALL_POST_MESSAGE,
+ aligned_msg, NULL);
/* Preemption must remain disabled until after the hypercall
* so some other thread can't get scheduled onto this cpu and
@@ -136,17 +143,24 @@ int hv_synic_alloc(void)
tasklet_init(&hv_cpu->msg_dpc,
vmbus_on_msg_dpc, (unsigned long) hv_cpu);
- hv_cpu->synic_message_page =
- (void *)get_zeroed_page(GFP_ATOMIC);
- if (hv_cpu->synic_message_page == NULL) {
- pr_err("Unable to allocate SYNIC message page\n");
- goto err;
- }
+ /*
+ * Synic message and event pages are allocated by paravisor.
+ * Skip these pages allocation here.
+ */
+ if (!hv_isolation_type_snp()) {
+ hv_cpu->synic_message_page =
+ (void *)get_zeroed_page(GFP_ATOMIC);
+ if (hv_cpu->synic_message_page == NULL) {
+ pr_err("Unable to allocate SYNIC message page\n");
+ goto err;
+ }
- hv_cpu->synic_event_page = (void *)get_zeroed_page(GFP_ATOMIC);
- if (hv_cpu->synic_event_page == NULL) {
- pr_err("Unable to allocate SYNIC event page\n");
- goto err;
+ hv_cpu->synic_event_page =
+ (void *)get_zeroed_page(GFP_ATOMIC);
+ if (hv_cpu->synic_event_page == NULL) {
+ pr_err("Unable to allocate SYNIC event page\n");
+ goto err;
+ }
}
hv_cpu->post_msg_page = (void *)get_zeroed_page(GFP_ATOMIC);
@@ -201,16 +215,35 @@ void hv_synic_enable_regs(unsigned int cpu)
/* Setup the Synic's message page */
simp.as_uint64 = hv_get_register(HV_REGISTER_SIMP);
simp.simp_enabled = 1;
- simp.base_simp_gpa = virt_to_phys(hv_cpu->synic_message_page)
- >> HV_HYP_PAGE_SHIFT;
+
+ if (hv_isolation_type_snp()) {
+ hv_cpu->synic_message_page
+ = memremap(simp.base_simp_gpa << HV_HYP_PAGE_SHIFT,
+ HV_HYP_PAGE_SIZE, MEMREMAP_WB);
+ if (!hv_cpu->synic_message_page)
+ pr_err("Fail to map syinc message page.\n");
+ } else {
+ simp.base_simp_gpa = virt_to_phys(hv_cpu->synic_message_page)
+ >> HV_HYP_PAGE_SHIFT;
+ }
hv_set_register(HV_REGISTER_SIMP, simp.as_uint64);
/* Setup the Synic's event page */
siefp.as_uint64 = hv_get_register(HV_REGISTER_SIEFP);
siefp.siefp_enabled = 1;
- siefp.base_siefp_gpa = virt_to_phys(hv_cpu->synic_event_page)
- >> HV_HYP_PAGE_SHIFT;
+
+ if (hv_isolation_type_snp()) {
+ hv_cpu->synic_event_page =
+ memremap(siefp.base_siefp_gpa << HV_HYP_PAGE_SHIFT,
+ HV_HYP_PAGE_SIZE, MEMREMAP_WB);
+
+ if (!hv_cpu->synic_event_page)
+ pr_err("Fail to map syinc event page.\n");
+ } else {
+ siefp.base_siefp_gpa = virt_to_phys(hv_cpu->synic_event_page)
+ >> HV_HYP_PAGE_SHIFT;
+ }
hv_set_register(HV_REGISTER_SIEFP, siefp.as_uint64);
@@ -257,6 +290,8 @@ int hv_synic_init(unsigned int cpu)
*/
void hv_synic_disable_regs(unsigned int cpu)
{
+ struct hv_per_cpu_context *hv_cpu
+ = per_cpu_ptr(hv_context.cpu_context, cpu);
union hv_synic_sint shared_sint;
union hv_synic_simp simp;
union hv_synic_siefp siefp;
@@ -273,14 +308,27 @@ void hv_synic_disable_regs(unsigned int cpu)
shared_sint.as_uint64);
simp.as_uint64 = hv_get_register(HV_REGISTER_SIMP);
+ /*
+ * In Isolation VM, sim and sief pages are allocated by
+ * paravisor. These pages also will be used by kdump
+ * kernel. So just reset enable bit here and keep page
+ * addresses.
+ */
simp.simp_enabled = 0;
- simp.base_simp_gpa = 0;
+ if (hv_isolation_type_snp())
+ memunmap(hv_cpu->synic_message_page);
+ else
+ simp.base_simp_gpa = 0;
hv_set_register(HV_REGISTER_SIMP, simp.as_uint64);
siefp.as_uint64 = hv_get_register(HV_REGISTER_SIEFP);
siefp.siefp_enabled = 0;
- siefp.base_siefp_gpa = 0;
+
+ if (hv_isolation_type_snp())
+ memunmap(hv_cpu->synic_event_page);
+ else
+ siefp.base_siefp_gpa = 0;
hv_set_register(HV_REGISTER_SIEFP, siefp.as_uint64);
diff --git a/drivers/hv/hv_common.c b/drivers/hv/hv_common.c
index c0d9048a4112..7be173a99f27 100644
--- a/drivers/hv/hv_common.c
+++ b/drivers/hv/hv_common.c
@@ -249,6 +249,12 @@ bool __weak hv_is_isolation_supported(void)
}
EXPORT_SYMBOL_GPL(hv_is_isolation_supported);
+bool __weak hv_isolation_type_snp(void)
+{
+ return false;
+}
+EXPORT_SYMBOL_GPL(hv_isolation_type_snp);
+
void __weak hv_setup_vmbus_handler(void (*handler)(void))
{
}
@@ -283,3 +289,9 @@ void __weak hyperv_cleanup(void)
{
}
EXPORT_SYMBOL_GPL(hyperv_cleanup);
+
+u64 __weak hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size)
+{
+ return HV_STATUS_INVALID_PARAMETER;
+}
+EXPORT_SYMBOL_GPL(hv_ghcb_hypercall);
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 42f3d9d123a1..3a1f007b678a 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -13,6 +13,7 @@
#define _HYPERV_VMBUS_H
#include <linux/list.h>
+#include <linux/bitops.h>
#include <asm/sync_bitops.h>
#include <asm/hyperv-tlfs.h>
#include <linux/atomic.h>
@@ -240,6 +241,8 @@ struct vmbus_connection {
* is child->parent notification
*/
struct hv_monitor_page *monitor_pages[2];
+ void *monitor_pages_original[2];
+ phys_addr_t monitor_pages_pa[2];
struct list_head chn_msg_list;
spinlock_t channelmsg_lock;
diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c
index 314015d9e912..71efacb90965 100644
--- a/drivers/hv/ring_buffer.c
+++ b/drivers/hv/ring_buffer.c
@@ -17,6 +17,8 @@
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/prefetch.h>
+#include <linux/io.h>
+#include <asm/mshyperv.h>
#include "hyperv_vmbus.h"
@@ -183,8 +185,10 @@ void hv_ringbuffer_pre_init(struct vmbus_channel *channel)
int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
struct page *pages, u32 page_cnt, u32 max_pkt_size)
{
- int i;
struct page **pages_wraparound;
+ unsigned long *pfns_wraparound;
+ u64 pfn;
+ int i;
BUILD_BUG_ON((sizeof(struct hv_ring_buffer) != PAGE_SIZE));
@@ -192,23 +196,50 @@ int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
* First page holds struct hv_ring_buffer, do wraparound mapping for
* the rest.
*/
- pages_wraparound = kcalloc(page_cnt * 2 - 1, sizeof(struct page *),
- GFP_KERNEL);
- if (!pages_wraparound)
- return -ENOMEM;
+ if (hv_isolation_type_snp()) {
+ pfn = page_to_pfn(pages) +
+ PFN_DOWN(ms_hyperv.shared_gpa_boundary);
+
+ pfns_wraparound = kcalloc(page_cnt * 2 - 1,
+ sizeof(unsigned long), GFP_KERNEL);
+ if (!pfns_wraparound)
+ return -ENOMEM;
- pages_wraparound[0] = pages;
- for (i = 0; i < 2 * (page_cnt - 1); i++)
- pages_wraparound[i + 1] = &pages[i % (page_cnt - 1) + 1];
+ pfns_wraparound[0] = pfn;
+ for (i = 0; i < 2 * (page_cnt - 1); i++)
+ pfns_wraparound[i + 1] = pfn + i % (page_cnt - 1) + 1;
- ring_info->ring_buffer = (struct hv_ring_buffer *)
- vmap(pages_wraparound, page_cnt * 2 - 1, VM_MAP, PAGE_KERNEL);
+ ring_info->ring_buffer = (struct hv_ring_buffer *)
+ vmap_pfn(pfns_wraparound, page_cnt * 2 - 1,
+ PAGE_KERNEL);
+ kfree(pfns_wraparound);
- kfree(pages_wraparound);
+ if (!ring_info->ring_buffer)
+ return -ENOMEM;
+ /* Zero ring buffer after setting memory host visibility. */
+ memset(ring_info->ring_buffer, 0x00, PAGE_SIZE * page_cnt);
+ } else {
+ pages_wraparound = kcalloc(page_cnt * 2 - 1,
+ sizeof(struct page *),
+ GFP_KERNEL);
+ if (!pages_wraparound)
+ return -ENOMEM;
+
+ pages_wraparound[0] = pages;
+ for (i = 0; i < 2 * (page_cnt - 1); i++)
+ pages_wraparound[i + 1] =
+ &pages[i % (page_cnt - 1) + 1];
+
+ ring_info->ring_buffer = (struct hv_ring_buffer *)
+ vmap(pages_wraparound, page_cnt * 2 - 1, VM_MAP,
+ PAGE_KERNEL);
+
+ kfree(pages_wraparound);
+ if (!ring_info->ring_buffer)
+ return -ENOMEM;
+ }
- if (!ring_info->ring_buffer)
- return -ENOMEM;
ring_info->ring_buffer->read_index =
ring_info->ring_buffer->write_index = 0;
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index c4578e8f34bb..64bd3dfba2c4 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -1032,6 +1032,16 @@ config SENSORS_MAX31730
This driver can also be built as a module. If so, the module
will be called max31730.
+config SENSORS_MAX6620
+ tristate "Maxim MAX6620 fan controller"
+ depends on I2C
+ help
+ If you say yes here you get support for the MAX6620
+ fan controller.
+
+ This driver can also be built as a module. If so, the module
+ will be called max6620.
+
config SENSORS_MAX6621
tristate "Maxim MAX6621 sensor chip"
depends on I2C
@@ -1317,7 +1327,7 @@ config SENSORS_LM90
Maxim MAX6646, MAX6647, MAX6648, MAX6649, MAX6654, MAX6657, MAX6658,
MAX6659, MAX6680, MAX6681, MAX6692, MAX6695, MAX6696,
ON Semiconductor NCT1008, Winbond/Nuvoton W83L771W/G/AWG/ASG,
- Philips SA56004, GMT G781, and Texas Instruments TMP451
+ Philips SA56004, GMT G781, Texas Instruments TMP451 and TMP461
sensor chips.
This driver can also be built as a module. If so, the module
@@ -1433,6 +1443,7 @@ config SENSORS_NCT6683
config SENSORS_NCT6775
tristate "Nuvoton NCT6775F and compatibles"
depends on !PPC
+ depends on ACPI_WMI || ACPI_WMI=n
select HWMON_VID
help
If you say yes here you get support for the hardware monitoring
@@ -1930,7 +1941,7 @@ config SENSORS_TMP401
depends on I2C
help
If you say yes here you get support for Texas Instruments TMP401,
- TMP411, TMP431, TMP432, TMP435, and TMP461 temperature sensor chips.
+ TMP411, TMP431, TMP432, and TMP435 temperature sensor chips.
This driver can also be built as a module. If so, the module
will be called tmp401.
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 162940270661..baee6a8d4dd1 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -135,6 +135,7 @@ obj-$(CONFIG_SENSORS_MAX1668) += max1668.o
obj-$(CONFIG_SENSORS_MAX197) += max197.o
obj-$(CONFIG_SENSORS_MAX31722) += max31722.o
obj-$(CONFIG_SENSORS_MAX31730) += max31730.o
+obj-$(CONFIG_SENSORS_MAX6620) += max6620.o
obj-$(CONFIG_SENSORS_MAX6621) += max6621.o
obj-$(CONFIG_SENSORS_MAX6639) += max6639.o
obj-$(CONFIG_SENSORS_MAX6642) += max6642.o
diff --git a/drivers/hwmon/abituguru3.c b/drivers/hwmon/abituguru3.c
index 112dd0d9377c..8229ad30c909 100644
--- a/drivers/hwmon/abituguru3.c
+++ b/drivers/hwmon/abituguru3.c
@@ -145,7 +145,7 @@ struct abituguru3_data {
struct device *hwmon_dev; /* hwmon registered device */
struct mutex update_lock; /* protect access to data and uGuru */
unsigned short addr; /* uguru base address */
- char valid; /* !=0 if following fields are valid */
+ bool valid; /* true if following fields are valid */
unsigned long last_updated; /* In jiffies */
/*
@@ -1083,7 +1083,7 @@ static struct abituguru3_data *abituguru3_update_device(struct device *dev)
mutex_lock(&data->update_lock);
if (!data->valid || time_after(jiffies, data->last_updated + HZ)) {
/* Clear data->valid while updating */
- data->valid = 0;
+ data->valid = false;
/* Read alarms */
if (abituguru3_read_increment_offset(data,
ABIT_UGURU3_SETTINGS_BANK,
@@ -1117,7 +1117,7 @@ static struct abituguru3_data *abituguru3_update_device(struct device *dev)
goto LEAVE_UPDATE;
}
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
LEAVE_UPDATE:
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
index 014505b1faf7..c405a5869581 100644
--- a/drivers/hwmon/acpi_power_meter.c
+++ b/drivers/hwmon/acpi_power_meter.c
@@ -535,7 +535,7 @@ static void remove_domain_devices(struct acpi_power_meter_resource *resource)
sysfs_remove_link(resource->holders_dir,
kobject_name(&obj->dev.kobj));
- put_device(&obj->dev);
+ acpi_dev_put(obj);
}
kfree(resource->domain_devices);
@@ -597,18 +597,15 @@ static int read_domain_devices(struct acpi_power_meter_resource *resource)
continue;
/* Create a symlink to domain objects */
- resource->domain_devices[i] = NULL;
- if (acpi_bus_get_device(element->reference.handle,
- &resource->domain_devices[i]))
+ obj = acpi_bus_get_acpi_device(element->reference.handle);
+ resource->domain_devices[i] = obj;
+ if (!obj)
continue;
- obj = resource->domain_devices[i];
- get_device(&obj->dev);
-
res = sysfs_create_link(resource->holders_dir, &obj->dev.kobj,
kobject_name(&obj->dev.kobj));
if (res) {
- put_device(&obj->dev);
+ acpi_dev_put(obj);
resource->domain_devices[i] = NULL;
}
}
diff --git a/drivers/hwmon/ad7414.c b/drivers/hwmon/ad7414.c
index 6a765755d061..0afb89c4629d 100644
--- a/drivers/hwmon/ad7414.c
+++ b/drivers/hwmon/ad7414.c
@@ -37,7 +37,7 @@ static u8 AD7414_REG_LIMIT[] = { AD7414_REG_T_HIGH, AD7414_REG_T_LOW };
struct ad7414_data {
struct i2c_client *client;
struct mutex lock; /* atomic read data updates */
- char valid; /* !=0 if following fields are valid */
+ bool valid; /* true if following fields are valid */
unsigned long next_update; /* In jiffies */
s16 temp_input; /* Register values */
s8 temps[ARRAY_SIZE(AD7414_REG_LIMIT)];
@@ -95,7 +95,7 @@ static struct ad7414_data *ad7414_update_device(struct device *dev)
}
data->next_update = jiffies + HZ + HZ / 2;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->lock);
diff --git a/drivers/hwmon/ad7418.c b/drivers/hwmon/ad7418.c
index d618f6b2f382..22bdb7e5b9e0 100644
--- a/drivers/hwmon/ad7418.c
+++ b/drivers/hwmon/ad7418.c
@@ -46,7 +46,7 @@ struct ad7418_data {
enum chips type;
struct mutex lock;
int adc_max; /* number of ADC channels */
- char valid;
+ bool valid;
unsigned long last_updated; /* In jiffies */
s16 temp[3]; /* Register values */
u16 in[4];
@@ -111,14 +111,14 @@ static int ad7418_update_device(struct device *dev)
goto abort;
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->lock);
return 0;
abort:
- data->valid = 0;
+ data->valid = false;
mutex_unlock(&data->lock);
return val;
}
diff --git a/drivers/hwmon/adm1021.c b/drivers/hwmon/adm1021.c
index 71deb2cd20f5..38b447c6e8cd 100644
--- a/drivers/hwmon/adm1021.c
+++ b/drivers/hwmon/adm1021.c
@@ -72,7 +72,7 @@ struct adm1021_data {
const struct attribute_group *groups[3];
struct mutex update_lock;
- char valid; /* !=0 if following fields are valid */
+ bool valid; /* true if following fields are valid */
char low_power; /* !=0 if device in low power mode */
unsigned long last_updated; /* In jiffies */
@@ -135,7 +135,7 @@ static struct adm1021_data *adm1021_update_device(struct device *dev)
ADM1023_REG_REM_OFFSET_PREC);
}
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/adm1025.c b/drivers/hwmon/adm1025.c
index de51e01c061b..4352f6a884e8 100644
--- a/drivers/hwmon/adm1025.c
+++ b/drivers/hwmon/adm1025.c
@@ -97,7 +97,7 @@ struct adm1025_data {
struct i2c_client *client;
const struct attribute_group *groups[3];
struct mutex update_lock;
- char valid; /* zero until following fields are valid */
+ bool valid; /* false until following fields are valid */
unsigned long last_updated; /* in jiffies */
u8 in[6]; /* register value */
@@ -148,7 +148,7 @@ static struct adm1025_data *adm1025_update_device(struct device *dev)
ADM1025_REG_VID4) & 0x01) << 4);
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/adm1026.c b/drivers/hwmon/adm1026.c
index 49cefbadb156..69b3ec752944 100644
--- a/drivers/hwmon/adm1026.c
+++ b/drivers/hwmon/adm1026.c
@@ -259,7 +259,7 @@ struct adm1026_data {
const struct attribute_group *groups[3];
struct mutex update_lock;
- int valid; /* !=0 if following fields are valid */
+ bool valid; /* true if following fields are valid */
unsigned long last_reading; /* In jiffies */
unsigned long last_config; /* In jiffies */
@@ -459,7 +459,7 @@ static struct adm1026_data *adm1026_update_device(struct device *dev)
data->last_config = jiffies;
} /* last_config */
- data->valid = 1;
+ data->valid = true;
mutex_unlock(&data->update_lock);
return data;
}
diff --git a/drivers/hwmon/adm1029.c b/drivers/hwmon/adm1029.c
index 50b1df7b008c..3e1999413f32 100644
--- a/drivers/hwmon/adm1029.c
+++ b/drivers/hwmon/adm1029.c
@@ -99,7 +99,7 @@ static const u8 ADM1029_REG_FAN_DIV[] = {
struct adm1029_data {
struct i2c_client *client;
struct mutex update_lock; /* protect register access */
- char valid; /* zero until following fields are valid */
+ bool valid; /* false until following fields are valid */
unsigned long last_updated; /* in jiffies */
/* registers values, signed for temperature, unsigned for other stuff */
@@ -143,7 +143,7 @@ static struct adm1029_data *adm1029_update_device(struct device *dev)
}
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/adm1031.c b/drivers/hwmon/adm1031.c
index b538ace2d292..257ec53ae723 100644
--- a/drivers/hwmon/adm1031.c
+++ b/drivers/hwmon/adm1031.c
@@ -65,7 +65,7 @@ struct adm1031_data {
const struct attribute_group *groups[3];
struct mutex update_lock;
int chip_type;
- char valid; /* !=0 if following fields are valid */
+ bool valid; /* true if following fields are valid */
unsigned long last_updated; /* In jiffies */
unsigned int update_interval; /* In milliseconds */
/*
@@ -187,7 +187,7 @@ static struct adm1031_data *adm1031_update_device(struct device *dev)
ADM1031_REG_PWM) >> (4 * chan)) & 0x0f;
}
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
@@ -650,7 +650,7 @@ static ssize_t fan_div_store(struct device *dev,
data->fan_min[nr]);
/* Invalidate the cache: fan speed is no longer valid */
- data->valid = 0;
+ data->valid = false;
mutex_unlock(&data->update_lock);
return count;
}
diff --git a/drivers/hwmon/adt7310.c b/drivers/hwmon/adt7310.c
index 9fad01191620..c40cac16af68 100644
--- a/drivers/hwmon/adt7310.c
+++ b/drivers/hwmon/adt7310.c
@@ -90,7 +90,8 @@ static int adt7310_spi_probe(struct spi_device *spi)
static int adt7310_spi_remove(struct spi_device *spi)
{
- return adt7x10_remove(&spi->dev, spi->irq);
+ adt7x10_remove(&spi->dev, spi->irq);
+ return 0;
}
static const struct spi_device_id adt7310_id[] = {
diff --git a/drivers/hwmon/adt7410.c b/drivers/hwmon/adt7410.c
index 9d80895d0266..973db057427b 100644
--- a/drivers/hwmon/adt7410.c
+++ b/drivers/hwmon/adt7410.c
@@ -50,7 +50,8 @@ static int adt7410_i2c_probe(struct i2c_client *client)
static int adt7410_i2c_remove(struct i2c_client *client)
{
- return adt7x10_remove(&client->dev, client->irq);
+ adt7x10_remove(&client->dev, client->irq);
+ return 0;
}
static const struct i2c_device_id adt7410_ids[] = {
diff --git a/drivers/hwmon/adt7x10.c b/drivers/hwmon/adt7x10.c
index 3f03b4cf5858..e9d33aa78a19 100644
--- a/drivers/hwmon/adt7x10.c
+++ b/drivers/hwmon/adt7x10.c
@@ -444,7 +444,7 @@ exit_restore:
}
EXPORT_SYMBOL_GPL(adt7x10_probe);
-int adt7x10_remove(struct device *dev, int irq)
+void adt7x10_remove(struct device *dev, int irq)
{
struct adt7x10_data *data = dev_get_drvdata(dev);
@@ -457,7 +457,6 @@ int adt7x10_remove(struct device *dev, int irq)
sysfs_remove_group(&dev->kobj, &adt7x10_group);
if (data->oldconfig != data->config)
adt7x10_write_byte(dev, ADT7X10_CONFIG, data->oldconfig);
- return 0;
}
EXPORT_SYMBOL_GPL(adt7x10_remove);
diff --git a/drivers/hwmon/adt7x10.h b/drivers/hwmon/adt7x10.h
index 21ad15ce3163..a1ae682eb32e 100644
--- a/drivers/hwmon/adt7x10.h
+++ b/drivers/hwmon/adt7x10.h
@@ -26,7 +26,7 @@ struct adt7x10_ops {
int adt7x10_probe(struct device *dev, const char *name, int irq,
const struct adt7x10_ops *ops);
-int adt7x10_remove(struct device *dev, int irq);
+void adt7x10_remove(struct device *dev, int irq);
#ifdef CONFIG_PM_SLEEP
extern const struct dev_pm_ops adt7x10_dev_pm_ops;
diff --git a/drivers/hwmon/amc6821.c b/drivers/hwmon/amc6821.c
index 6b1ce2242c61..0c16face3fd3 100644
--- a/drivers/hwmon/amc6821.c
+++ b/drivers/hwmon/amc6821.c
@@ -141,7 +141,7 @@ static const u8 fan_reg_hi[] = {AMC6821_REG_TDATA_HI,
struct amc6821_data {
struct i2c_client *client;
struct mutex update_lock;
- char valid; /* zero until following fields are valid */
+ bool valid; /* false until following fields are valid */
unsigned long last_updated; /* in jiffies */
/* register values */
@@ -258,7 +258,7 @@ static struct amc6821_data *amc6821_update_device(struct device *dev)
}
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
return data;
@@ -511,7 +511,7 @@ static ssize_t temp_auto_point_temp_store(struct device *dev,
}
mutex_lock(&data->update_lock);
- data->valid = 0;
+ data->valid = false;
switch (ix) {
case 0:
@@ -584,7 +584,7 @@ static ssize_t pwm1_auto_point_pwm_store(struct device *dev,
}
EXIT:
- data->valid = 0;
+ data->valid = false;
mutex_unlock(&data->update_lock);
return count;
}
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index c31759794a29..fc6d6a9053ce 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -391,7 +391,7 @@ static const struct applesmc_entry *applesmc_get_entry_by_index(int index)
cache->len = info[0];
memcpy(cache->type, &info[1], 4);
cache->flags = info[5];
- cache->valid = 1;
+ cache->valid = true;
out:
mutex_unlock(&smcreg.mutex);
diff --git a/drivers/hwmon/asb100.c b/drivers/hwmon/asb100.c
index ba9fcf6f9264..8cf0bcb85eb4 100644
--- a/drivers/hwmon/asb100.c
+++ b/drivers/hwmon/asb100.c
@@ -186,7 +186,7 @@ struct asb100_data {
/* array of 2 pointers to subclients */
struct i2c_client *lm75[2];
- char valid; /* !=0 if following fields are valid */
+ bool valid; /* true if following fields are valid */
u8 in[7]; /* Register value */
u8 in_max[7]; /* Register value */
u8 in_min[7]; /* Register value */
@@ -993,7 +993,7 @@ static struct asb100_data *asb100_update_device(struct device *dev)
(asb100_read_value(client, ASB100_REG_ALARM2) << 8);
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
dev_dbg(&client->dev, "... device update complete\n");
}
diff --git a/drivers/hwmon/asc7621.c b/drivers/hwmon/asc7621.c
index 600ffc7e1900..e835605a7456 100644
--- a/drivers/hwmon/asc7621.c
+++ b/drivers/hwmon/asc7621.c
@@ -77,7 +77,7 @@ struct asc7621_data {
struct i2c_client client;
struct device *class_dev;
struct mutex update_lock;
- int valid; /* !=0 if following fields are valid */
+ bool valid; /* true if following fields are valid */
unsigned long last_high_reading; /* In jiffies */
unsigned long last_low_reading; /* In jiffies */
/*
@@ -1032,7 +1032,7 @@ static struct asc7621_data *asc7621_update_device(struct device *dev)
data->last_low_reading = jiffies;
} /* last_reading */
- data->valid = 1;
+ data->valid = true;
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/atxp1.c b/drivers/hwmon/atxp1.c
index 1e08a5431f12..4fd8de8022bc 100644
--- a/drivers/hwmon/atxp1.c
+++ b/drivers/hwmon/atxp1.c
@@ -37,7 +37,7 @@ struct atxp1_data {
struct i2c_client *client;
struct mutex update_lock;
unsigned long last_updated;
- u8 valid;
+ bool valid;
struct {
u8 vid; /* VID output register */
u8 cpu_vid; /* VID input from CPU */
@@ -63,7 +63,7 @@ static struct atxp1_data *atxp1_update_device(struct device *dev)
data->reg.gpio1 = i2c_smbus_read_byte_data(client, ATXP1_GPIO1);
data->reg.gpio2 = i2c_smbus_read_byte_data(client, ATXP1_GPIO2);
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
@@ -136,7 +136,7 @@ static ssize_t cpu0_vid_store(struct device *dev,
ATXP1_VID, cvid | ATXP1_VIDENA);
}
- data->valid = 0;
+ data->valid = false;
return count;
}
@@ -180,7 +180,7 @@ static ssize_t gpio1_store(struct device *dev, struct device_attribute *attr,
i2c_smbus_write_byte_data(client, ATXP1_GPIO1, value);
- data->valid = 0;
+ data->valid = false;
}
return count;
@@ -224,7 +224,7 @@ static ssize_t gpio2_store(struct device *dev, struct device_attribute *attr,
i2c_smbus_write_byte_data(client, ATXP1_GPIO2, value);
- data->valid = 0;
+ data->valid = false;
}
return count;
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index bb9211215a68..ccf0af5b988a 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -167,7 +167,7 @@ static ssize_t show_temp(struct device *dev,
* really help at all.
*/
tdata->temp = tdata->tjmax - ((eax >> 16) & 0x7f) * 1000;
- tdata->valid = 1;
+ tdata->valid = true;
tdata->last_updated = jiffies;
}
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
index 774c1b0715d9..eaace478f508 100644
--- a/drivers/hwmon/dell-smm-hwmon.c
+++ b/drivers/hwmon/dell-smm-hwmon.c
@@ -12,24 +12,24 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/capability.h>
#include <linux/cpu.h>
+#include <linux/ctype.h>
#include <linux/delay.h>
+#include <linux/dmi.h>
#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/hwmon.h>
+#include <linux/init.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/platform_device.h>
-#include <linux/types.h>
-#include <linux/init.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
-#include <linux/dmi.h>
-#include <linux/capability.h>
-#include <linux/mutex.h>
-#include <linux/hwmon.h>
-#include <linux/uaccess.h>
-#include <linux/io.h>
-#include <linux/sched.h>
-#include <linux/ctype.h>
+#include <linux/string.h>
#include <linux/smp.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
#include <linux/i8k.h>
@@ -76,6 +76,7 @@ struct dell_smm_data {
int temp_type[DELL_SMM_NO_TEMP];
bool fan[DELL_SMM_NO_FANS];
int fan_type[DELL_SMM_NO_FANS];
+ int *fan_nominal_speed[DELL_SMM_NO_FANS];
};
MODULE_AUTHOR("Massimo Dal Zotto (dz@debian.org)");
@@ -326,7 +327,7 @@ static int i8k_enable_fan_auto_mode(const struct dell_smm_data *data, bool enabl
}
/*
- * Set the fan speed (off, low, high). Returns the new fan status.
+ * Set the fan speed (off, low, high, ...).
*/
static int i8k_set_fan(const struct dell_smm_data *data, int fan, int speed)
{
@@ -338,7 +339,7 @@ static int i8k_set_fan(const struct dell_smm_data *data, int fan, int speed)
speed = (speed < 0) ? 0 : ((speed > data->i8k_fan_max) ? data->i8k_fan_max : speed);
regs.ebx = (fan & 0xff) | (speed << 8);
- return i8k_smm(&regs) ? : i8k_get_fan_status(data, fan);
+ return i8k_smm(&regs);
}
static int __init i8k_get_temp_type(int sensor)
@@ -452,7 +453,7 @@ static int
i8k_ioctl_unlocked(struct file *fp, struct dell_smm_data *data, unsigned int cmd, unsigned long arg)
{
int val = 0;
- int speed;
+ int speed, err;
unsigned char buff[16];
int __user *argp = (int __user *)arg;
@@ -473,8 +474,7 @@ i8k_ioctl_unlocked(struct file *fp, struct dell_smm_data *data, unsigned int cmd
if (restricted && !capable(CAP_SYS_ADMIN))
return -EPERM;
- memset(buff, 0, sizeof(buff));
- strscpy(buff, data->bios_machineid, sizeof(buff));
+ strscpy_pad(buff, data->bios_machineid, sizeof(buff));
break;
case I8K_FN_STATUS:
@@ -513,11 +513,15 @@ i8k_ioctl_unlocked(struct file *fp, struct dell_smm_data *data, unsigned int cmd
if (copy_from_user(&speed, argp + 1, sizeof(int)))
return -EFAULT;
- val = i8k_set_fan(data, val, speed);
+ err = i8k_set_fan(data, val, speed);
+ if (err < 0)
+ return err;
+
+ val = i8k_get_fan_status(data, val);
break;
default:
- return -EINVAL;
+ return -ENOIOCTLCMD;
}
if (val < 0)
@@ -674,6 +678,13 @@ static umode_t dell_smm_is_visible(const void *drvdata, enum hwmon_sensor_types
return 0444;
break;
+ case hwmon_fan_min:
+ case hwmon_fan_max:
+ case hwmon_fan_target:
+ if (data->fan_nominal_speed[channel])
+ return 0444;
+
+ break;
default:
break;
}
@@ -741,6 +752,25 @@ static int dell_smm_read(struct device *dev, enum hwmon_sensor_types type, u32 a
*val = ret;
return 0;
+ case hwmon_fan_min:
+ *val = data->fan_nominal_speed[channel][0];
+
+ return 0;
+ case hwmon_fan_max:
+ *val = data->fan_nominal_speed[channel][data->i8k_fan_max];
+
+ return 0;
+ case hwmon_fan_target:
+ ret = i8k_get_fan_status(data, channel);
+ if (ret < 0)
+ return ret;
+
+ if (ret > data->i8k_fan_max)
+ ret = data->i8k_fan_max;
+
+ *val = data->fan_nominal_speed[channel][ret];
+
+ return 0;
default:
break;
}
@@ -889,9 +919,12 @@ static const struct hwmon_channel_info *dell_smm_info[] = {
HWMON_T_INPUT | HWMON_T_LABEL
),
HWMON_CHANNEL_INFO(fan,
- HWMON_F_INPUT | HWMON_F_LABEL,
- HWMON_F_INPUT | HWMON_F_LABEL,
- HWMON_F_INPUT | HWMON_F_LABEL
+ HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MIN | HWMON_F_MAX |
+ HWMON_F_TARGET,
+ HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MIN | HWMON_F_MAX |
+ HWMON_F_TARGET,
+ HWMON_F_INPUT | HWMON_F_LABEL | HWMON_F_MIN | HWMON_F_MAX |
+ HWMON_F_TARGET
),
HWMON_CHANNEL_INFO(pwm,
HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
@@ -910,7 +943,7 @@ static int __init dell_smm_init_hwmon(struct device *dev)
{
struct dell_smm_data *data = dev_get_drvdata(dev);
struct device *dell_smm_hwmon_dev;
- int i, err;
+ int i, state, err;
for (i = 0; i < DELL_SMM_NO_TEMP; i++) {
data->temp_type[i] = i8k_get_temp_type(i);
@@ -926,8 +959,27 @@ static int __init dell_smm_init_hwmon(struct device *dev)
err = i8k_get_fan_status(data, i);
if (err < 0)
err = i8k_get_fan_type(data, i);
- if (err >= 0)
- data->fan[i] = true;
+
+ if (err < 0)
+ continue;
+
+ data->fan[i] = true;
+ data->fan_nominal_speed[i] = devm_kmalloc_array(dev, data->i8k_fan_max + 1,
+ sizeof(*data->fan_nominal_speed[i]),
+ GFP_KERNEL);
+ if (!data->fan_nominal_speed[i])
+ continue;
+
+ for (state = 0; state <= data->i8k_fan_max; state++) {
+ err = i8k_get_fan_nominal_speed(data, i, state);
+ if (err < 0) {
+ /* Mark nominal speed table as invalid in case of error */
+ devm_kfree(dev, data->fan_nominal_speed[i]);
+ data->fan_nominal_speed[i] = NULL;
+ break;
+ }
+ data->fan_nominal_speed[i][state] = err;
+ }
}
dell_smm_hwmon_dev = devm_hwmon_device_register_with_info(dev, "dell_smm", data,
@@ -948,6 +1000,11 @@ enum i8k_configs {
DELL_XPS,
};
+/*
+ * Only use for machines which need some special configuration
+ * in order to work correctly (e.g. if autoconfig fails on this machines).
+ */
+
static const struct i8k_config_data i8k_config_data[] __initconst = {
[DELL_LATITUDE_D520] = {
.fan_mult = 1,
diff --git a/drivers/hwmon/dme1737.c b/drivers/hwmon/dme1737.c
index c1e4cfb40c3d..e3ad4c2d0038 100644
--- a/drivers/hwmon/dme1737.c
+++ b/drivers/hwmon/dme1737.c
@@ -203,7 +203,7 @@ struct dme1737_data {
unsigned int addr; /* for ISA devices only */
struct mutex update_lock;
- int valid; /* !=0 if following fields are valid */
+ bool valid; /* true if following fields are valid */
unsigned long last_update; /* in jiffies */
unsigned long last_vbat; /* in jiffies */
enum chips type;
@@ -778,7 +778,7 @@ static struct dme1737_data *dme1737_update_device(struct device *dev)
}
data->last_update = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/ds1621.c b/drivers/hwmon/ds1621.c
index bf1c4b7ecb40..0886abf6ebab 100644
--- a/drivers/hwmon/ds1621.c
+++ b/drivers/hwmon/ds1621.c
@@ -109,7 +109,7 @@ static const u8 DS1621_REG_TEMP[3] = {
struct ds1621_data {
struct i2c_client *client;
struct mutex update_lock;
- char valid; /* !=0 if following fields are valid */
+ bool valid; /* true if following fields are valid */
unsigned long last_updated; /* In jiffies */
enum chips kind; /* device type */
@@ -213,7 +213,7 @@ static struct ds1621_data *ds1621_update_client(struct device *dev)
new_conf);
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/ds620.c b/drivers/hwmon/ds620.c
index 9ec722798c4a..82d7c3d58f49 100644
--- a/drivers/hwmon/ds620.c
+++ b/drivers/hwmon/ds620.c
@@ -56,7 +56,7 @@ static const u8 DS620_REG_TEMP[3] = {
struct ds620_data {
struct i2c_client *client;
struct mutex update_lock;
- char valid; /* !=0 if following fields are valid */
+ bool valid; /* true if following fields are valid */
unsigned long last_updated; /* In jiffies */
s16 temp[3]; /* Register values, word */
@@ -118,7 +118,7 @@ static struct ds620_data *ds620_update_client(struct device *dev)
}
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
abort:
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/emc6w201.c b/drivers/hwmon/emc6w201.c
index ec5c98702bf5..29082c8463f4 100644
--- a/drivers/hwmon/emc6w201.c
+++ b/drivers/hwmon/emc6w201.c
@@ -45,7 +45,7 @@ enum subfeature { input, min, max };
struct emc6w201_data {
struct i2c_client *client;
struct mutex update_lock;
- char valid; /* zero until following fields are valid */
+ bool valid; /* false until following fields are valid */
unsigned long last_updated; /* in jiffies */
/* registers values */
@@ -162,7 +162,7 @@ static struct emc6w201_data *emc6w201_update_device(struct device *dev)
}
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/f71805f.c b/drivers/hwmon/f71805f.c
index 67b47de8263a..7f20edb0677c 100644
--- a/drivers/hwmon/f71805f.c
+++ b/drivers/hwmon/f71805f.c
@@ -165,7 +165,7 @@ struct f71805f_data {
struct device *hwmon_dev;
struct mutex update_lock;
- char valid; /* !=0 if following fields are valid */
+ bool valid; /* true if following fields are valid */
unsigned long last_updated; /* In jiffies */
unsigned long last_limits; /* In jiffies */
@@ -404,7 +404,7 @@ static struct f71805f_data *f71805f_update_device(struct device *dev)
+ (f71805f_read8(data, F71805F_REG_STATUS(2)) << 16);
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c
index 4dec793fd07d..4673d403759a 100644
--- a/drivers/hwmon/f71882fg.c
+++ b/drivers/hwmon/f71882fg.c
@@ -253,7 +253,7 @@ struct f71882fg_data {
struct mutex update_lock;
int temp_start; /* temp numbering start (0 or 1) */
- char valid; /* !=0 if following fields are valid */
+ bool valid; /* true if following fields are valid */
char auto_point_temp_signed;
unsigned long last_updated; /* In jiffies */
unsigned long last_limits; /* In jiffies */
@@ -1359,7 +1359,7 @@ static struct f71882fg_data *f71882fg_update_device(struct device *dev)
F71882FG_REG_IN(nr));
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/f75375s.c b/drivers/hwmon/f75375s.c
index 3e567be60fb1..57c8a473698d 100644
--- a/drivers/hwmon/f75375s.c
+++ b/drivers/hwmon/f75375s.c
@@ -85,7 +85,7 @@ struct f75375_data {
const char *name;
int kind;
struct mutex update_lock; /* protect register access */
- char valid;
+ bool valid;
unsigned long last_updated; /* In jiffies */
unsigned long last_limits; /* In jiffies */
@@ -228,7 +228,7 @@ static struct f75375_data *f75375_update_device(struct device *dev)
f75375_read8(client, F75375_REG_VOLT(nr));
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/fschmd.c b/drivers/hwmon/fschmd.c
index 5191cd85a8d1..c26195e3aad7 100644
--- a/drivers/hwmon/fschmd.c
+++ b/drivers/hwmon/fschmd.c
@@ -264,7 +264,7 @@ struct fschmd_data {
unsigned long watchdog_is_open;
char watchdog_expect_close;
char watchdog_name[10]; /* must be unique to avoid sysfs conflict */
- char valid; /* zero until following fields are valid */
+ bool valid; /* false until following fields are valid */
unsigned long last_updated; /* in jiffies */
/* register values */
@@ -1356,7 +1356,7 @@ static struct fschmd_data *fschmd_update_device(struct device *dev)
FSCHMD_REG_VOLT[data->kind][i]);
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/g760a.c b/drivers/hwmon/g760a.c
index a692f7b2f6f7..36717b524dbd 100644
--- a/drivers/hwmon/g760a.c
+++ b/drivers/hwmon/g760a.c
@@ -95,7 +95,7 @@ static struct g760a_data *g760a_update_client(struct device *dev)
data->fan_sta = g760a_read_value(client, G760A_REG_FAN_STA);
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/gl518sm.c b/drivers/hwmon/gl518sm.c
index 7aaee5a48243..dd683b0a648f 100644
--- a/drivers/hwmon/gl518sm.c
+++ b/drivers/hwmon/gl518sm.c
@@ -107,7 +107,7 @@ struct gl518_data {
enum chips type;
struct mutex update_lock;
- char valid; /* !=0 if following fields are valid */
+ bool valid; /* true if following fields are valid */
unsigned long last_updated; /* In jiffies */
u8 voltage_in[4]; /* Register values; [0] = VDD */
@@ -211,7 +211,7 @@ static struct gl518_data *gl518_update_device(struct device *dev)
gl518_read_value(client, GL518_REG_VIN3);
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/gl520sm.c b/drivers/hwmon/gl520sm.c
index 4ae1295cc3ea..096ba9797211 100644
--- a/drivers/hwmon/gl520sm.c
+++ b/drivers/hwmon/gl520sm.c
@@ -64,7 +64,7 @@ struct gl520_data {
struct i2c_client *client;
const struct attribute_group *groups[3];
struct mutex update_lock;
- char valid; /* zero until the following fields are valid */
+ bool valid; /* false until the following fields are valid */
unsigned long last_updated; /* in jiffies */
u8 vid;
@@ -174,7 +174,7 @@ static struct gl520_data *gl520_update_device(struct device *dev)
}
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c
index 8d3b1dae31df..3501a3ead4ba 100644
--- a/drivers/hwmon/hwmon.c
+++ b/drivers/hwmon/hwmon.c
@@ -796,8 +796,10 @@ __hwmon_device_register(struct device *dev, const char *name, void *drvdata,
dev_set_drvdata(hdev, drvdata);
dev_set_name(hdev, HWMON_ID_FORMAT, id);
err = device_register(hdev);
- if (err)
- goto free_hwmon;
+ if (err) {
+ put_device(hdev);
+ goto ida_remove;
+ }
INIT_LIST_HEAD(&hwdev->tzdata);
diff --git a/drivers/hwmon/i5500_temp.c b/drivers/hwmon/i5500_temp.c
index 360f5aee1394..05f68e9c9477 100644
--- a/drivers/hwmon/i5500_temp.c
+++ b/drivers/hwmon/i5500_temp.c
@@ -5,6 +5,7 @@
* Copyright (C) 2012, 2014 Jean Delvare <jdelvare@suse.de>
*/
+#include <linux/bitops.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
@@ -12,7 +13,6 @@
#include <linux/device.h>
#include <linux/pci.h>
#include <linux/hwmon.h>
-#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
#include <linux/mutex.h>
@@ -29,69 +29,78 @@
#define REG_CTCTRL 0xF7
#define REG_TSTIMER 0xF8
-/*
- * Sysfs stuff
- */
-
-/* Sensor resolution : 0.5 degree C */
-static ssize_t temp1_input_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
+static umode_t i5500_is_visible(const void *drvdata, enum hwmon_sensor_types type, u32 attr,
+ int channel)
{
- struct pci_dev *pdev = to_pci_dev(dev->parent);
- long temp;
- u16 tsthrhi;
- s8 tsfsc;
-
- pci_read_config_word(pdev, REG_TSTHRHI, &tsthrhi);
- pci_read_config_byte(pdev, REG_TSFSC, &tsfsc);
- temp = ((long)tsthrhi - tsfsc) * 500;
-
- return sprintf(buf, "%ld\n", temp);
+ return 0444;
}
-static ssize_t thresh_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
+static int i5500_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel,
+ long *val)
{
struct pci_dev *pdev = to_pci_dev(dev->parent);
- int reg = to_sensor_dev_attr(devattr)->index;
- long temp;
u16 tsthr;
+ s8 tsfsc;
+ u8 ctsts;
- pci_read_config_word(pdev, reg, &tsthr);
- temp = tsthr * 500;
+ switch (type) {
+ case hwmon_temp:
+ switch (attr) {
+ /* Sensor resolution : 0.5 degree C */
+ case hwmon_temp_input:
+ pci_read_config_word(pdev, REG_TSTHRHI, &tsthr);
+ pci_read_config_byte(pdev, REG_TSFSC, &tsfsc);
+ *val = (tsthr - tsfsc) * 500;
+ return 0;
+ case hwmon_temp_max:
+ pci_read_config_word(pdev, REG_TSTHRHI, &tsthr);
+ *val = tsthr * 500;
+ return 0;
+ case hwmon_temp_max_hyst:
+ pci_read_config_word(pdev, REG_TSTHRLO, &tsthr);
+ *val = tsthr * 500;
+ return 0;
+ case hwmon_temp_crit:
+ pci_read_config_word(pdev, REG_TSTHRCATA, &tsthr);
+ *val = tsthr * 500;
+ return 0;
+ case hwmon_temp_max_alarm:
+ pci_read_config_byte(pdev, REG_CTSTS, &ctsts);
+ *val = !!(ctsts & BIT(1));
+ return 0;
+ case hwmon_temp_crit_alarm:
+ pci_read_config_byte(pdev, REG_CTSTS, &ctsts);
+ *val = !!(ctsts & BIT(0));
+ return 0;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
- return sprintf(buf, "%ld\n", temp);
+ return -EOPNOTSUPP;
}
-static ssize_t alarm_show(struct device *dev,
- struct device_attribute *devattr, char *buf)
-{
- struct pci_dev *pdev = to_pci_dev(dev->parent);
- int nr = to_sensor_dev_attr(devattr)->index;
- u8 ctsts;
-
- pci_read_config_byte(pdev, REG_CTSTS, &ctsts);
- return sprintf(buf, "%u\n", (unsigned int)ctsts & (1 << nr));
-}
+static const struct hwmon_ops i5500_ops = {
+ .is_visible = i5500_is_visible,
+ .read = i5500_read,
+};
-static DEVICE_ATTR_RO(temp1_input);
-static SENSOR_DEVICE_ATTR_RO(temp1_crit, thresh, 0xE2);
-static SENSOR_DEVICE_ATTR_RO(temp1_max_hyst, thresh, 0xEC);
-static SENSOR_DEVICE_ATTR_RO(temp1_max, thresh, 0xEE);
-static SENSOR_DEVICE_ATTR_RO(temp1_crit_alarm, alarm, 0);
-static SENSOR_DEVICE_ATTR_RO(temp1_max_alarm, alarm, 1);
-
-static struct attribute *i5500_temp_attrs[] = {
- &dev_attr_temp1_input.attr,
- &sensor_dev_attr_temp1_crit.dev_attr.attr,
- &sensor_dev_attr_temp1_max_hyst.dev_attr.attr,
- &sensor_dev_attr_temp1_max.dev_attr.attr,
- &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr,
- &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
+static const struct hwmon_channel_info *i5500_info[] = {
+ HWMON_CHANNEL_INFO(chip, HWMON_C_REGISTER_TZ),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MAX_HYST | HWMON_T_CRIT |
+ HWMON_T_MAX_ALARM | HWMON_T_CRIT_ALARM
+ ),
NULL
};
-ATTRIBUTE_GROUPS(i5500_temp);
+static const struct hwmon_chip_info i5500_chip_info = {
+ .ops = &i5500_ops,
+ .info = i5500_info,
+};
static const struct pci_device_id i5500_temp_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3438) },
@@ -121,9 +130,8 @@ static int i5500_temp_probe(struct pci_dev *pdev,
return -ENODEV;
}
- hwmon_dev = devm_hwmon_device_register_with_groups(&pdev->dev,
- "intel5500", NULL,
- i5500_temp_groups);
+ hwmon_dev = devm_hwmon_device_register_with_info(&pdev->dev, "intel5500", NULL,
+ &i5500_chip_info, NULL);
return PTR_ERR_OR_ZERO(hwmon_dev);
}
diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
index a4ec85207782..de6baf6ca3d1 100644
--- a/drivers/hwmon/ibmaem.c
+++ b/drivers/hwmon/ibmaem.c
@@ -127,7 +127,7 @@ struct aem_data {
struct device *hwmon_dev;
struct platform_device *pdev;
struct mutex lock;
- char valid;
+ bool valid;
unsigned long last_updated; /* In jiffies */
u8 ver_major;
u8 ver_minor;
diff --git a/drivers/hwmon/ibmpex.c b/drivers/hwmon/ibmpex.c
index b2ab83c9fd9a..f6ec165c0fa8 100644
--- a/drivers/hwmon/ibmpex.c
+++ b/drivers/hwmon/ibmpex.c
@@ -66,7 +66,7 @@ struct ibmpex_bmc_data {
struct device *hwmon_dev;
struct device *bmc_device;
struct mutex lock;
- char valid;
+ bool valid;
unsigned long last_updated; /* In jiffies */
struct ipmi_addr address;
@@ -239,7 +239,7 @@ static void ibmpex_update_device(struct ibmpex_bmc_data *data)
}
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
out:
mutex_unlock(&data->lock);
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
index 1f93134afcb9..0e543dbe0a6b 100644
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -519,7 +519,7 @@ struct it87_data {
unsigned short addr;
const char *name;
struct mutex update_lock;
- char valid; /* !=0 if following fields are valid */
+ bool valid; /* true if following fields are valid */
unsigned long last_updated; /* In jiffies */
u16 in_scaled; /* Internal voltage sensors are scaled */
@@ -844,7 +844,7 @@ static struct it87_data *it87_update_device(struct device *dev)
data->vid &= 0x3f;
}
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
@@ -980,7 +980,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *attr,
regval |= 0x80;
it87_write_value(data, IT87_REG_BEEP_ENABLE, regval);
}
- data->valid = 0;
+ data->valid = false;
reg = IT87_REG_TEMP_OFFSET[nr];
break;
}
@@ -1079,7 +1079,7 @@ static ssize_t set_temp_type(struct device *dev, struct device_attribute *attr,
it87_write_value(data, IT87_REG_TEMP_ENABLE, data->sensor);
if (has_temp_old_peci(data, nr))
it87_write_value(data, IT87_REG_TEMP_EXTRA, data->extra);
- data->valid = 0; /* Force cache refresh */
+ data->valid = false; /* Force cache refresh */
mutex_unlock(&data->update_lock);
return count;
}
@@ -1834,7 +1834,7 @@ static ssize_t clear_intrusion(struct device *dev,
config |= BIT(5);
it87_write_value(data, IT87_REG_CONFIG, config);
/* Invalidate cache to force re-read */
- data->valid = 0;
+ data->valid = false;
}
mutex_unlock(&data->update_lock);
@@ -3229,7 +3229,7 @@ static int __maybe_unused it87_resume(struct device *dev)
it87_start_monitoring(data);
/* force update */
- data->valid = 0;
+ data->valid = false;
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/lineage-pem.c b/drivers/hwmon/lineage-pem.c
index 1109fffa76fb..ef5a49cd9149 100644
--- a/drivers/hwmon/lineage-pem.c
+++ b/drivers/hwmon/lineage-pem.c
@@ -191,7 +191,7 @@ static struct pem_data *pem_update_device(struct device *dev)
i2c_smbus_write_byte(client, PEM_CLEAR_INFO_FLAGS);
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
abort:
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/lm63.c b/drivers/hwmon/lm63.c
index c8f93c5d1ccc..339a145afc09 100644
--- a/drivers/hwmon/lm63.c
+++ b/drivers/hwmon/lm63.c
@@ -139,7 +139,7 @@ struct lm63_data {
struct i2c_client *client;
struct mutex update_lock;
const struct attribute_group *groups[5];
- char valid; /* zero until following fields are valid */
+ bool valid; /* false until following fields are valid */
char lut_valid; /* zero until lut fields are valid */
unsigned long last_updated; /* in jiffies */
unsigned long lut_last_updated; /* in jiffies */
@@ -289,7 +289,7 @@ static struct lm63_data *lm63_update_device(struct device *dev)
LM63_REG_ALERT_STATUS) & 0x7F;
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
lm63_update_lut(data);
@@ -714,7 +714,7 @@ static ssize_t temp2_type_store(struct device *dev,
reg = i2c_smbus_read_byte_data(client, LM96163_REG_TRUTHERM) & ~0x02;
i2c_smbus_write_byte_data(client, LM96163_REG_TRUTHERM,
reg | (data->trutherm ? 0x02 : 0x00));
- data->valid = 0;
+ data->valid = false;
mutex_unlock(&data->update_lock);
return count;
diff --git a/drivers/hwmon/lm77.c b/drivers/hwmon/lm77.c
index 7570c9d50ddc..df6af85e170a 100644
--- a/drivers/hwmon/lm77.c
+++ b/drivers/hwmon/lm77.c
@@ -55,7 +55,7 @@ static const u8 temp_regs[t_num_temp] = {
struct lm77_data {
struct i2c_client *client;
struct mutex update_lock;
- char valid;
+ bool valid;
unsigned long last_updated; /* In jiffies */
int temp[t_num_temp]; /* index using temp_index */
u8 alarms;
@@ -118,7 +118,7 @@ static struct lm77_data *lm77_update_device(struct device *dev)
data->alarms =
lm77_read_value(client, LM77_REG_TEMP) & 0x0007;
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c
index 1aa35ca0c6fe..5e129cbec1cb 100644
--- a/drivers/hwmon/lm78.c
+++ b/drivers/hwmon/lm78.c
@@ -117,7 +117,7 @@ struct lm78_data {
int isa_addr;
struct mutex update_lock;
- char valid; /* !=0 if following fields are valid */
+ bool valid; /* true if following fields are valid */
unsigned long last_updated; /* In jiffies */
u8 in[7]; /* Register value */
@@ -772,7 +772,7 @@ static struct lm78_data *lm78_update_device(struct device *dev)
data->alarms = lm78_read_value(data, LM78_REG_ALARM1) +
(lm78_read_value(data, LM78_REG_ALARM2) << 8);
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
data->fan_div[2] = 1;
}
diff --git a/drivers/hwmon/lm80.c b/drivers/hwmon/lm80.c
index 97ab491d2922..e85e062bbf32 100644
--- a/drivers/hwmon/lm80.c
+++ b/drivers/hwmon/lm80.c
@@ -117,7 +117,7 @@ struct lm80_data {
struct i2c_client *client;
struct mutex update_lock;
char error; /* !=0 if error occurred during last update */
- char valid; /* !=0 if following fields are valid */
+ bool valid; /* true if following fields are valid */
unsigned long last_updated; /* In jiffies */
u8 in[i_num_in][7]; /* Register value, 1st index is enum in_index */
@@ -236,14 +236,14 @@ static struct lm80_data *lm80_update_device(struct device *dev)
data->alarms = prev_rv + (rv << 8);
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
data->error = 0;
}
goto done;
abort:
ret = ERR_PTR(rv);
- data->valid = 0;
+ data->valid = false;
data->error = 1;
done:
diff --git a/drivers/hwmon/lm83.c b/drivers/hwmon/lm83.c
index 2ff5ecce608e..74fd7aa373a3 100644
--- a/drivers/hwmon/lm83.c
+++ b/drivers/hwmon/lm83.c
@@ -105,7 +105,7 @@ struct lm83_data {
struct i2c_client *client;
const struct attribute_group *groups[3];
struct mutex update_lock;
- char valid; /* zero until following fields are valid */
+ bool valid; /* false until following fields are valid */
unsigned long last_updated; /* in jiffies */
/* registers values */
@@ -137,7 +137,7 @@ static struct lm83_data *lm83_update_device(struct device *dev)
<< 8);
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/lm85.c b/drivers/hwmon/lm85.c
index c7bf5de7b70f..88cf2012d34b 100644
--- a/drivers/hwmon/lm85.c
+++ b/drivers/hwmon/lm85.c
@@ -294,7 +294,7 @@ struct lm85_data {
bool has_vid5; /* true if VID5 is configured for ADT7463 or ADT7468 */
struct mutex update_lock;
- int valid; /* !=0 if following fields are valid */
+ bool valid; /* true if following fields are valid */
unsigned long last_reading; /* In jiffies */
unsigned long last_config; /* In jiffies */
@@ -541,7 +541,7 @@ static struct lm85_data *lm85_update_device(struct device *dev)
data->last_config = jiffies;
} /* last_config */
- data->valid = 1;
+ data->valid = true;
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/lm87.c b/drivers/hwmon/lm87.c
index b2d820125bb6..1750bc588856 100644
--- a/drivers/hwmon/lm87.c
+++ b/drivers/hwmon/lm87.c
@@ -141,7 +141,7 @@ static u8 LM87_REG_TEMP_LOW[3] = { 0x3A, 0x38, 0x2C };
struct lm87_data {
struct mutex update_lock;
- char valid; /* zero until following fields are valid */
+ bool valid; /* false until following fields are valid */
unsigned long last_updated; /* In jiffies */
u8 channel; /* register value */
@@ -251,7 +251,7 @@ static struct lm87_data *lm87_update_device(struct device *dev)
data->aout = lm87_read_value(client, LM87_REG_AOUT);
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index 567b7c521f38..618052c6cdb6 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -69,10 +69,10 @@
* This driver also supports the G781 from GMT. This device is compatible
* with the ADM1032.
*
- * This driver also supports TMP451 from Texas Instruments. This device is
- * supported in both compatibility and extended mode. It's mostly compatible
- * with ADT7461 except for local temperature low byte register and max
- * conversion rate.
+ * This driver also supports TMP451 and TMP461 from Texas Instruments.
+ * Those devices are supported in both compatibility and extended mode.
+ * They are mostly compatible with ADT7461 except for local temperature
+ * low byte register and max conversion rate.
*
* Since the LM90 was the first chipset supported by this driver, most
* comments will refer to this chipset, but are actually general and
@@ -112,7 +112,7 @@ static const unsigned short normal_i2c[] = {
0x4d, 0x4e, 0x4f, I2C_CLIENT_END };
enum chips { lm90, adm1032, lm99, lm86, max6657, max6659, adt7461, max6680,
- max6646, w83l771, max6696, sa56004, g781, tmp451, max6654 };
+ max6646, w83l771, max6696, sa56004, g781, tmp451, tmp461, max6654 };
/*
* The LM90 registers
@@ -168,8 +168,12 @@ enum chips { lm90, adm1032, lm99, lm86, max6657, max6659, adt7461, max6680,
#define LM90_MAX_CONVRATE_MS 16000 /* Maximum conversion rate in ms */
-/* TMP451 registers */
+/* TMP451/TMP461 registers */
#define TMP451_REG_R_LOCAL_TEMPL 0x15
+#define TMP451_REG_CONALERT 0x22
+
+#define TMP461_REG_CHEN 0x16
+#define TMP461_REG_DFC 0x24
/*
* Device flags
@@ -182,7 +186,8 @@ enum chips { lm90, adm1032, lm99, lm86, max6657, max6659, adt7461, max6680,
#define LM90_HAVE_EMERGENCY_ALARM (1 << 5)/* emergency alarm */
#define LM90_HAVE_TEMP3 (1 << 6) /* 3rd temperature sensor */
#define LM90_HAVE_BROKEN_ALERT (1 << 7) /* Broken alert */
-#define LM90_PAUSE_FOR_CONFIG (1 << 8) /* Pause conversion for config */
+#define LM90_HAVE_EXTENDED_TEMP (1 << 8) /* extended temperature support*/
+#define LM90_PAUSE_FOR_CONFIG (1 << 9) /* Pause conversion for config */
/* LM90 status */
#define LM90_STATUS_LTHRM (1 << 0) /* local THERM limit tripped */
@@ -229,6 +234,7 @@ static const struct i2c_device_id lm90_id[] = {
{ "w83l771", w83l771 },
{ "sa56004", sa56004 },
{ "tmp451", tmp451 },
+ { "tmp461", tmp461 },
{ }
};
MODULE_DEVICE_TABLE(i2c, lm90_id);
@@ -326,6 +332,10 @@ static const struct of_device_id __maybe_unused lm90_of_match[] = {
.compatible = "ti,tmp451",
.data = (void *)tmp451
},
+ {
+ .compatible = "ti,tmp461",
+ .data = (void *)tmp461
+ },
{ },
};
MODULE_DEVICE_TABLE(of, lm90_of_match);
@@ -350,7 +360,7 @@ static const struct lm90_params lm90_params[] = {
},
[adt7461] = {
.flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
- | LM90_HAVE_BROKEN_ALERT,
+ | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_EXTENDED_TEMP,
.alert_alarms = 0x7c,
.max_convrate = 10,
},
@@ -422,7 +432,14 @@ static const struct lm90_params lm90_params[] = {
},
[tmp451] = {
.flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
- | LM90_HAVE_BROKEN_ALERT,
+ | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_EXTENDED_TEMP,
+ .alert_alarms = 0x7c,
+ .max_convrate = 9,
+ .reg_local_ext = TMP451_REG_R_LOCAL_TEMPL,
+ },
+ [tmp461] = {
+ .flags = LM90_HAVE_OFFSET | LM90_HAVE_REM_LIMIT_EXT
+ | LM90_HAVE_BROKEN_ALERT | LM90_HAVE_EXTENDED_TEMP,
.alert_alarms = 0x7c,
.max_convrate = 9,
.reg_local_ext = TMP451_REG_R_LOCAL_TEMPL,
@@ -998,7 +1015,7 @@ static int lm90_get_temp11(struct lm90_data *data, int index)
s16 temp11 = data->temp11[index];
int temp;
- if (data->kind == adt7461 || data->kind == tmp451)
+ if (data->flags & LM90_HAVE_EXTENDED_TEMP)
temp = temp_from_u16_adt7461(data, temp11);
else if (data->kind == max6646)
temp = temp_from_u16(temp11);
@@ -1035,7 +1052,7 @@ static int lm90_set_temp11(struct lm90_data *data, int index, long val)
val -= 16000;
}
- if (data->kind == adt7461 || data->kind == tmp451)
+ if (data->flags & LM90_HAVE_EXTENDED_TEMP)
data->temp11[index] = temp_to_u16_adt7461(data, val);
else if (data->kind == max6646)
data->temp11[index] = temp_to_u8(val) << 8;
@@ -1062,7 +1079,7 @@ static int lm90_get_temp8(struct lm90_data *data, int index)
s8 temp8 = data->temp8[index];
int temp;
- if (data->kind == adt7461 || data->kind == tmp451)
+ if (data->flags & LM90_HAVE_EXTENDED_TEMP)
temp = temp_from_u8_adt7461(data, temp8);
else if (data->kind == max6646)
temp = temp_from_u8(temp8);
@@ -1098,7 +1115,7 @@ static int lm90_set_temp8(struct lm90_data *data, int index, long val)
val -= 16000;
}
- if (data->kind == adt7461 || data->kind == tmp451)
+ if (data->flags & LM90_HAVE_EXTENDED_TEMP)
data->temp8[index] = temp_to_u8_adt7461(data, val);
else if (data->kind == max6646)
data->temp8[index] = temp_to_u8(val);
@@ -1116,7 +1133,7 @@ static int lm90_get_temphyst(struct lm90_data *data, int index)
{
int temp;
- if (data->kind == adt7461 || data->kind == tmp451)
+ if (data->flags & LM90_HAVE_EXTENDED_TEMP)
temp = temp_from_u8_adt7461(data, data->temp8[index]);
else if (data->kind == max6646)
temp = temp_from_u8(data->temp8[index]);
@@ -1136,7 +1153,7 @@ static int lm90_set_temphyst(struct lm90_data *data, long val)
int temp;
int err;
- if (data->kind == adt7461 || data->kind == tmp451)
+ if (data->flags & LM90_HAVE_EXTENDED_TEMP)
temp = temp_from_u8_adt7461(data, data->temp8[LOCAL_CRIT]);
else if (data->kind == max6646)
temp = temp_from_u8(data->temp8[LOCAL_CRIT]);
@@ -1627,18 +1644,26 @@ static int lm90_detect(struct i2c_client *client,
&& convrate <= 0x08)
name = "g781";
} else
- if (address == 0x4C
- && man_id == 0x55) { /* Texas Instruments */
- int local_ext;
+ if (man_id == 0x55 && chip_id == 0x00 &&
+ (config1 & 0x1B) == 0x00 && convrate <= 0x09) {
+ int local_ext, conalert, chen, dfc;
local_ext = i2c_smbus_read_byte_data(client,
TMP451_REG_R_LOCAL_TEMPL);
-
- if (chip_id == 0x00 /* TMP451 */
- && (config1 & 0x1B) == 0x00
- && convrate <= 0x09
- && (local_ext & 0x0F) == 0x00)
- name = "tmp451";
+ conalert = i2c_smbus_read_byte_data(client,
+ TMP451_REG_CONALERT);
+ chen = i2c_smbus_read_byte_data(client, TMP461_REG_CHEN);
+ dfc = i2c_smbus_read_byte_data(client, TMP461_REG_DFC);
+
+ if ((local_ext & 0x0F) == 0x00 &&
+ (conalert & 0xf1) == 0x01 &&
+ (chen & 0xfc) == 0x00 &&
+ (dfc & 0xfc) == 0x00) {
+ if (address == 0x4c && !(chen & 0x03))
+ name = "tmp451";
+ else if (address >= 0x48 && address <= 0x4f)
+ name = "tmp461";
+ }
}
if (!name) { /* identification failed */
@@ -1685,7 +1710,7 @@ static int lm90_init_client(struct i2c_client *client, struct lm90_data *data)
lm90_set_convrate(client, data, 500); /* 500ms; 2Hz conversion rate */
/* Check Temperature Range Select */
- if (data->kind == adt7461 || data->kind == tmp451) {
+ if (data->flags & LM90_HAVE_EXTENDED_TEMP) {
if (config & 0x04)
data->flags |= LM90_FLAG_ADT7461_EXT;
}
diff --git a/drivers/hwmon/lm92.c b/drivers/hwmon/lm92.c
index 9bf278cf0bd0..5bae6eedcaf1 100644
--- a/drivers/hwmon/lm92.c
+++ b/drivers/hwmon/lm92.c
@@ -99,7 +99,7 @@ static const u8 regs[t_num_regs] = {
struct lm92_data {
struct i2c_client *client;
struct mutex update_lock;
- char valid; /* zero until following fields are valid */
+ bool valid; /* false until following fields are valid */
unsigned long last_updated; /* in jiffies */
/* registers values */
@@ -126,7 +126,7 @@ static struct lm92_data *lm92_update_device(struct device *dev)
i2c_smbus_read_word_swapped(client, regs[i]);
}
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/lm93.c b/drivers/hwmon/lm93.c
index 78d6dfaf145b..dc67bf954b21 100644
--- a/drivers/hwmon/lm93.c
+++ b/drivers/hwmon/lm93.c
@@ -202,7 +202,7 @@ struct lm93_data {
/* client update function */
void (*update)(struct lm93_data *, struct i2c_client *);
- char valid; /* !=0 if following fields are valid */
+ bool valid; /* true if following fields are valid */
/* register values, arranged by block read groups */
struct block1_t block1;
@@ -917,7 +917,7 @@ static struct lm93_data *lm93_update_device(struct device *dev)
data->update(data, client);
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/lm95241.c b/drivers/hwmon/lm95241.c
index 00dbc170c8c6..8ea46ff20be5 100644
--- a/drivers/hwmon/lm95241.c
+++ b/drivers/hwmon/lm95241.c
@@ -78,7 +78,7 @@ struct lm95241_data {
struct mutex update_lock;
unsigned long last_updated; /* in jiffies */
unsigned long interval; /* in milli-seconds */
- char valid; /* zero until following fields are valid */
+ bool valid; /* false until following fields are valid */
/* registers values */
u8 temp[ARRAY_SIZE(lm95241_reg_address)];
u8 status, config, model, trutherm;
@@ -118,7 +118,7 @@ static struct lm95241_data *lm95241_update_device(struct device *dev)
data->status = i2c_smbus_read_byte_data(client,
LM95241_REG_R_STATUS);
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
@@ -257,7 +257,7 @@ static int lm95241_write_temp(struct device *dev, u32 attr, int channel,
else
data->config &= ~R2DF_MASK;
}
- data->valid = 0;
+ data->valid = false;
ret = i2c_smbus_write_byte_data(client, LM95241_REG_RW_CONFIG,
data->config);
break;
@@ -273,7 +273,7 @@ static int lm95241_write_temp(struct device *dev, u32 attr, int channel,
else
data->config &= ~R2DF_MASK;
}
- data->valid = 0;
+ data->valid = false;
ret = i2c_smbus_write_byte_data(client, LM95241_REG_RW_CONFIG,
data->config);
break;
diff --git a/drivers/hwmon/ltc4151.c b/drivers/hwmon/ltc4151.c
index 13b85367a21f..e3ac004c1ed1 100644
--- a/drivers/hwmon/ltc4151.c
+++ b/drivers/hwmon/ltc4151.c
@@ -77,7 +77,7 @@ static struct ltc4151_data *ltc4151_update_device(struct device *dev)
data->regs[i] = val;
}
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
abort:
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/ltc4215.c b/drivers/hwmon/ltc4215.c
index 1d18c212054f..fa43d26ddd4f 100644
--- a/drivers/hwmon/ltc4215.c
+++ b/drivers/hwmon/ltc4215.c
@@ -64,7 +64,7 @@ static struct ltc4215_data *ltc4215_update_device(struct device *dev)
}
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/ltc4261.c b/drivers/hwmon/ltc4261.c
index b81e9c3d297b..b91cc4fe84e5 100644
--- a/drivers/hwmon/ltc4261.c
+++ b/drivers/hwmon/ltc4261.c
@@ -73,13 +73,13 @@ static struct ltc4261_data *ltc4261_update_device(struct device *dev)
"Failed to read ADC value: error %d\n",
val);
ret = ERR_PTR(val);
- data->valid = 0;
+ data->valid = false;
goto abort;
}
data->regs[i] = val;
}
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
abort:
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/max16065.c b/drivers/hwmon/max16065.c
index ae3a6a7bdaa2..daa5d8af1e69 100644
--- a/drivers/hwmon/max16065.c
+++ b/drivers/hwmon/max16065.c
@@ -166,7 +166,7 @@ static struct max16065_data *max16065_update_device(struct device *dev)
= i2c_smbus_read_byte_data(client, MAX16065_FAULT(i));
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
return data;
diff --git a/drivers/hwmon/max1619.c b/drivers/hwmon/max1619.c
index 8bd941cae4d1..eae9e68027bc 100644
--- a/drivers/hwmon/max1619.c
+++ b/drivers/hwmon/max1619.c
@@ -79,7 +79,7 @@ enum temp_index {
struct max1619_data {
struct i2c_client *client;
struct mutex update_lock;
- char valid; /* zero until following fields are valid */
+ bool valid; /* false until following fields are valid */
unsigned long last_updated; /* in jiffies */
/* registers values */
@@ -124,7 +124,7 @@ static struct max1619_data *max1619_update_device(struct device *dev)
data->alarms ^= 0x02;
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/max1668.c b/drivers/hwmon/max1668.c
index 5c41c78f0458..78688e6cb87d 100644
--- a/drivers/hwmon/max1668.c
+++ b/drivers/hwmon/max1668.c
@@ -58,7 +58,7 @@ struct max1668_data {
enum chips type;
struct mutex update_lock;
- char valid; /* !=0 if following fields are valid */
+ bool valid; /* true if following fields are valid */
unsigned long last_updated; /* In jiffies */
/* 1x local and 4x remote */
@@ -120,7 +120,7 @@ static struct max1668_data *max1668_update_device(struct device *dev)
data->alarms |= val;
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
abort:
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/max31722.c b/drivers/hwmon/max31722.c
index 613338cbcb17..4cf4fe6809a3 100644
--- a/drivers/hwmon/max31722.c
+++ b/drivers/hwmon/max31722.c
@@ -103,10 +103,16 @@ static int max31722_probe(struct spi_device *spi)
static int max31722_remove(struct spi_device *spi)
{
struct max31722_data *data = spi_get_drvdata(spi);
+ int ret;
hwmon_device_unregister(data->hwmon_dev);
- return max31722_set_mode(data, MAX31722_MODE_STANDBY);
+ ret = max31722_set_mode(data, MAX31722_MODE_STANDBY);
+ if (ret)
+ /* There is nothing we can do about this ... */
+ dev_warn(&spi->dev, "Failed to put device in stand-by mode\n");
+
+ return 0;
}
static int __maybe_unused max31722_suspend(struct device *dev)
diff --git a/drivers/hwmon/max6620.c b/drivers/hwmon/max6620.c
new file mode 100644
index 000000000000..202b6438179d
--- /dev/null
+++ b/drivers/hwmon/max6620.c
@@ -0,0 +1,514 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Hardware monitoring driver for Maxim MAX6620
+ *
+ * Originally from L. Grunenberg.
+ * (C) 2012 by L. Grunenberg <contact@lgrunenberg.de>
+ *
+ * Copyright (c) 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+ *
+ * based on code written by :
+ * 2007 by Hans J. Koch <hjk@hansjkoch.de>
+ * John Morris <john.morris@spirentcom.com>
+ * Copyright (c) 2003 Spirent Communications
+ * and Claus Gindhart <claus.gindhart@kontron.com>
+ *
+ * This module has only been tested with the MAX6620 chip.
+ *
+ * The datasheet was last seen at:
+ *
+ * http://pdfserv.maxim-ic.com/en/ds/MAX6620.pdf
+ *
+ */
+
+#include <linux/bits.h>
+#include <linux/err.h>
+#include <linux/hwmon.h>
+#include <linux/i2c.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+/*
+ * MAX 6620 registers
+ */
+
+#define MAX6620_REG_CONFIG 0x00
+#define MAX6620_REG_FAULT 0x01
+#define MAX6620_REG_CONF_FAN0 0x02
+#define MAX6620_REG_CONF_FAN1 0x03
+#define MAX6620_REG_CONF_FAN2 0x04
+#define MAX6620_REG_CONF_FAN3 0x05
+#define MAX6620_REG_DYN_FAN0 0x06
+#define MAX6620_REG_DYN_FAN1 0x07
+#define MAX6620_REG_DYN_FAN2 0x08
+#define MAX6620_REG_DYN_FAN3 0x09
+#define MAX6620_REG_TACH0 0x10
+#define MAX6620_REG_TACH1 0x12
+#define MAX6620_REG_TACH2 0x14
+#define MAX6620_REG_TACH3 0x16
+#define MAX6620_REG_VOLT0 0x18
+#define MAX6620_REG_VOLT1 0x1A
+#define MAX6620_REG_VOLT2 0x1C
+#define MAX6620_REG_VOLT3 0x1E
+#define MAX6620_REG_TAR0 0x20
+#define MAX6620_REG_TAR1 0x22
+#define MAX6620_REG_TAR2 0x24
+#define MAX6620_REG_TAR3 0x26
+#define MAX6620_REG_DAC0 0x28
+#define MAX6620_REG_DAC1 0x2A
+#define MAX6620_REG_DAC2 0x2C
+#define MAX6620_REG_DAC3 0x2E
+
+/*
+ * Config register bits
+ */
+
+#define MAX6620_CFG_RUN BIT(7)
+#define MAX6620_CFG_POR BIT(6)
+#define MAX6620_CFG_TIMEOUT BIT(5)
+#define MAX6620_CFG_FULLFAN BIT(4)
+#define MAX6620_CFG_OSC BIT(3)
+#define MAX6620_CFG_WD_MASK (BIT(2) | BIT(1))
+#define MAX6620_CFG_WD_2 BIT(1)
+#define MAX6620_CFG_WD_6 BIT(2)
+#define MAX6620_CFG_WD10 (BIT(2) | BIT(1))
+#define MAX6620_CFG_WD BIT(0)
+
+/*
+ * Failure status register bits
+ */
+
+#define MAX6620_FAIL_TACH0 BIT(4)
+#define MAX6620_FAIL_TACH1 BIT(5)
+#define MAX6620_FAIL_TACH2 BIT(6)
+#define MAX6620_FAIL_TACH3 BIT(7)
+#define MAX6620_FAIL_MASK0 BIT(0)
+#define MAX6620_FAIL_MASK1 BIT(1)
+#define MAX6620_FAIL_MASK2 BIT(2)
+#define MAX6620_FAIL_MASK3 BIT(3)
+
+#define MAX6620_CLOCK_FREQ 8192 /* Clock frequency in Hz */
+#define MAX6620_PULSE_PER_REV 2 /* Tachometer pulses per revolution */
+
+/* Minimum and maximum values of the FAN-RPM */
+#define FAN_RPM_MIN 240
+#define FAN_RPM_MAX 30000
+
+static const u8 config_reg[] = {
+ MAX6620_REG_CONF_FAN0,
+ MAX6620_REG_CONF_FAN1,
+ MAX6620_REG_CONF_FAN2,
+ MAX6620_REG_CONF_FAN3,
+};
+
+static const u8 dyn_reg[] = {
+ MAX6620_REG_DYN_FAN0,
+ MAX6620_REG_DYN_FAN1,
+ MAX6620_REG_DYN_FAN2,
+ MAX6620_REG_DYN_FAN3,
+};
+
+static const u8 tach_reg[] = {
+ MAX6620_REG_TACH0,
+ MAX6620_REG_TACH1,
+ MAX6620_REG_TACH2,
+ MAX6620_REG_TACH3,
+};
+
+static const u8 target_reg[] = {
+ MAX6620_REG_TAR0,
+ MAX6620_REG_TAR1,
+ MAX6620_REG_TAR2,
+ MAX6620_REG_TAR3,
+};
+
+/*
+ * Client data (each client gets its own)
+ */
+
+struct max6620_data {
+ struct i2c_client *client;
+ struct mutex update_lock;
+ bool valid; /* false until following fields are valid */
+ unsigned long last_updated; /* in jiffies */
+
+ /* register values */
+ u8 fancfg[4];
+ u8 fandyn[4];
+ u8 fault;
+ u16 tach[4];
+ u16 target[4];
+};
+
+static u8 max6620_fan_div_from_reg(u8 val)
+{
+ return BIT((val & 0xE0) >> 5);
+}
+
+static u16 max6620_fan_rpm_to_tach(u8 div, int rpm)
+{
+ return (60 * div * MAX6620_CLOCK_FREQ) / (rpm * MAX6620_PULSE_PER_REV);
+}
+
+static int max6620_fan_tach_to_rpm(u8 div, u16 tach)
+{
+ return (60 * div * MAX6620_CLOCK_FREQ) / (tach * MAX6620_PULSE_PER_REV);
+}
+
+static int max6620_update_device(struct device *dev)
+{
+ struct max6620_data *data = dev_get_drvdata(dev);
+ struct i2c_client *client = data->client;
+ int i;
+ int ret = 0;
+
+ mutex_lock(&data->update_lock);
+
+ if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
+ for (i = 0; i < 4; i++) {
+ ret = i2c_smbus_read_byte_data(client, config_reg[i]);
+ if (ret < 0)
+ goto error;
+ data->fancfg[i] = ret;
+
+ ret = i2c_smbus_read_byte_data(client, dyn_reg[i]);
+ if (ret < 0)
+ goto error;
+ data->fandyn[i] = ret;
+
+ ret = i2c_smbus_read_byte_data(client, tach_reg[i]);
+ if (ret < 0)
+ goto error;
+ data->tach[i] = (ret << 3) & 0x7f8;
+ ret = i2c_smbus_read_byte_data(client, tach_reg[i] + 1);
+ if (ret < 0)
+ goto error;
+ data->tach[i] |= (ret >> 5) & 0x7;
+
+ ret = i2c_smbus_read_byte_data(client, target_reg[i]);
+ if (ret < 0)
+ goto error;
+ data->target[i] = (ret << 3) & 0x7f8;
+ ret = i2c_smbus_read_byte_data(client, target_reg[i] + 1);
+ if (ret < 0)
+ goto error;
+ data->target[i] |= (ret >> 5) & 0x7;
+ }
+
+ /*
+ * Alarms are cleared on read in case the condition that
+ * caused the alarm is removed. Keep the value latched here
+ * for providing the register through different alarm files.
+ */
+ ret = i2c_smbus_read_byte_data(client, MAX6620_REG_FAULT);
+ if (ret < 0)
+ goto error;
+ data->fault |= (ret >> 4) & (ret & 0x0F);
+
+ data->last_updated = jiffies;
+ data->valid = true;
+ }
+
+error:
+ mutex_unlock(&data->update_lock);
+ return ret;
+}
+
+static umode_t
+max6620_is_visible(const void *data, enum hwmon_sensor_types type, u32 attr,
+ int channel)
+{
+ switch (type) {
+ case hwmon_fan:
+ switch (attr) {
+ case hwmon_fan_alarm:
+ case hwmon_fan_input:
+ return 0444;
+ case hwmon_fan_div:
+ case hwmon_fan_target:
+ return 0644;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int
+max6620_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
+ int channel, long *val)
+{
+ struct max6620_data *data;
+ struct i2c_client *client;
+ int ret;
+ u8 div;
+ u8 val1;
+ u8 val2;
+
+ ret = max6620_update_device(dev);
+ if (ret < 0)
+ return ret;
+ data = dev_get_drvdata(dev);
+ client = data->client;
+
+ switch (type) {
+ case hwmon_fan:
+ switch (attr) {
+ case hwmon_fan_alarm:
+ mutex_lock(&data->update_lock);
+ *val = !!(data->fault & BIT(channel));
+
+ /* Setting TACH count to re-enable fan fault detection */
+ if (*val == 1) {
+ val1 = (data->target[channel] >> 3) & 0xff;
+ val2 = (data->target[channel] << 5) & 0xe0;
+ ret = i2c_smbus_write_byte_data(client,
+ target_reg[channel], val1);
+ if (ret < 0) {
+ mutex_unlock(&data->update_lock);
+ return ret;
+ }
+ ret = i2c_smbus_write_byte_data(client,
+ target_reg[channel] + 1, val2);
+ if (ret < 0) {
+ mutex_unlock(&data->update_lock);
+ return ret;
+ }
+
+ data->fault &= ~BIT(channel);
+ }
+ mutex_unlock(&data->update_lock);
+
+ break;
+ case hwmon_fan_div:
+ *val = max6620_fan_div_from_reg(data->fandyn[channel]);
+ break;
+ case hwmon_fan_input:
+ if (data->tach[channel] == 0) {
+ *val = 0;
+ } else {
+ div = max6620_fan_div_from_reg(data->fandyn[channel]);
+ *val = max6620_fan_tach_to_rpm(div, data->tach[channel]);
+ }
+ break;
+ case hwmon_fan_target:
+ if (data->target[channel] == 0) {
+ *val = 0;
+ } else {
+ div = max6620_fan_div_from_reg(data->fandyn[channel]);
+ *val = max6620_fan_tach_to_rpm(div, data->target[channel]);
+ }
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int
+max6620_write(struct device *dev, enum hwmon_sensor_types type, u32 attr,
+ int channel, long val)
+{
+ struct max6620_data *data;
+ struct i2c_client *client;
+ int ret;
+ u8 div;
+ u16 tach;
+ u8 val1;
+ u8 val2;
+
+ ret = max6620_update_device(dev);
+ if (ret < 0)
+ return ret;
+ data = dev_get_drvdata(dev);
+ client = data->client;
+ mutex_lock(&data->update_lock);
+
+ switch (type) {
+ case hwmon_fan:
+ switch (attr) {
+ case hwmon_fan_div:
+ switch (val) {
+ case 1:
+ div = 0;
+ break;
+ case 2:
+ div = 1;
+ break;
+ case 4:
+ div = 2;
+ break;
+ case 8:
+ div = 3;
+ break;
+ case 16:
+ div = 4;
+ break;
+ case 32:
+ div = 5;
+ break;
+ default:
+ ret = -EINVAL;
+ goto error;
+ }
+ data->fandyn[channel] &= 0x1F;
+ data->fandyn[channel] |= div << 5;
+ ret = i2c_smbus_write_byte_data(client, dyn_reg[channel],
+ data->fandyn[channel]);
+ break;
+ case hwmon_fan_target:
+ val = clamp_val(val, FAN_RPM_MIN, FAN_RPM_MAX);
+ div = max6620_fan_div_from_reg(data->fandyn[channel]);
+ tach = max6620_fan_rpm_to_tach(div, val);
+ val1 = (tach >> 3) & 0xff;
+ val2 = (tach << 5) & 0xe0;
+ ret = i2c_smbus_write_byte_data(client, target_reg[channel], val1);
+ if (ret < 0)
+ break;
+ ret = i2c_smbus_write_byte_data(client, target_reg[channel] + 1, val2);
+ if (ret < 0)
+ break;
+
+ /* Setting TACH count re-enables fan fault detection */
+ data->fault &= ~BIT(channel);
+
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+ break;
+
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+error:
+ mutex_unlock(&data->update_lock);
+ return ret;
+}
+
+static const struct hwmon_channel_info *max6620_info[] = {
+ HWMON_CHANNEL_INFO(fan,
+ HWMON_F_INPUT | HWMON_F_DIV | HWMON_F_TARGET | HWMON_F_ALARM,
+ HWMON_F_INPUT | HWMON_F_DIV | HWMON_F_TARGET | HWMON_F_ALARM,
+ HWMON_F_INPUT | HWMON_F_DIV | HWMON_F_TARGET | HWMON_F_ALARM,
+ HWMON_F_INPUT | HWMON_F_DIV | HWMON_F_TARGET | HWMON_F_ALARM),
+ NULL
+};
+
+static const struct hwmon_ops max6620_hwmon_ops = {
+ .read = max6620_read,
+ .write = max6620_write,
+ .is_visible = max6620_is_visible,
+};
+
+static const struct hwmon_chip_info max6620_chip_info = {
+ .ops = &max6620_hwmon_ops,
+ .info = max6620_info,
+};
+
+static int max6620_init_client(struct max6620_data *data)
+{
+ struct i2c_client *client = data->client;
+ int config;
+ int err;
+ int i;
+ int reg;
+
+ config = i2c_smbus_read_byte_data(client, MAX6620_REG_CONFIG);
+ if (config < 0) {
+ dev_err(&client->dev, "Error reading config, aborting.\n");
+ return config;
+ }
+
+ /*
+ * Set bit 4, disable other fans from going full speed on a fail
+ * failure.
+ */
+ err = i2c_smbus_write_byte_data(client, MAX6620_REG_CONFIG, config | 0x10);
+ if (err < 0) {
+ dev_err(&client->dev, "Config write error, aborting.\n");
+ return err;
+ }
+
+ for (i = 0; i < 4; i++) {
+ reg = i2c_smbus_read_byte_data(client, config_reg[i]);
+ if (reg < 0)
+ return reg;
+ data->fancfg[i] = reg;
+
+ /* Enable RPM mode */
+ data->fancfg[i] |= 0xa8;
+ err = i2c_smbus_write_byte_data(client, config_reg[i], data->fancfg[i]);
+ if (err < 0)
+ return err;
+
+ /* 2 counts (001) and Rate change 100 (0.125 secs) */
+ data->fandyn[i] = 0x30;
+ err = i2c_smbus_write_byte_data(client, dyn_reg[i], data->fandyn[i]);
+ if (err < 0)
+ return err;
+ }
+ return 0;
+}
+
+static int max6620_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct max6620_data *data;
+ struct device *hwmon_dev;
+ int err;
+
+ data = devm_kzalloc(dev, sizeof(struct max6620_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->client = client;
+ mutex_init(&data->update_lock);
+
+ err = max6620_init_client(data);
+ if (err)
+ return err;
+
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
+ data,
+ &max6620_chip_info,
+ NULL);
+
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static const struct i2c_device_id max6620_id[] = {
+ { "max6620", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, max6620_id);
+
+static struct i2c_driver max6620_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = "max6620",
+ },
+ .probe_new = max6620_probe,
+ .id_table = max6620_id,
+};
+
+module_i2c_driver(max6620_driver);
+
+MODULE_AUTHOR("Lucas Grunenberg");
+MODULE_DESCRIPTION("MAX6620 sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/hwmon/max6639.c b/drivers/hwmon/max6639.c
index b71899c641fa..ccc0f047bd44 100644
--- a/drivers/hwmon/max6639.c
+++ b/drivers/hwmon/max6639.c
@@ -69,7 +69,7 @@ static const int rpm_ranges[] = { 2000, 4000, 8000, 16000 };
struct max6639_data {
struct i2c_client *client;
struct mutex update_lock;
- char valid; /* !=0 if following fields are valid */
+ bool valid; /* true if following fields are valid */
unsigned long last_updated; /* In jiffies */
/* Register values sampled regularly */
@@ -141,7 +141,7 @@ static struct max6639_data *max6639_update_device(struct device *dev)
}
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
abort:
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/max6642.c b/drivers/hwmon/max6642.c
index 23d93142b0b3..699d265aae2e 100644
--- a/drivers/hwmon/max6642.c
+++ b/drivers/hwmon/max6642.c
@@ -181,7 +181,7 @@ static struct max6642_data *max6642_update_device(struct device *dev)
MAX6642_REG_R_STATUS);
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/mlxreg-fan.c b/drivers/hwmon/mlxreg-fan.c
index 89fe7b9fe26b..4a8becdb0d58 100644
--- a/drivers/hwmon/mlxreg-fan.c
+++ b/drivers/hwmon/mlxreg-fan.c
@@ -12,7 +12,9 @@
#include <linux/regmap.h>
#include <linux/thermal.h>
-#define MLXREG_FAN_MAX_TACHO 12
+#define MLXREG_FAN_MAX_TACHO 14
+#define MLXREG_FAN_MAX_PWM 4
+#define MLXREG_FAN_PWM_NOT_CONNECTED 0xff
#define MLXREG_FAN_MAX_STATE 10
#define MLXREG_FAN_MIN_DUTY 51 /* 20% */
#define MLXREG_FAN_MAX_DUTY 255 /* 100% */
@@ -61,6 +63,8 @@
MLXREG_FAN_MAX_DUTY, \
MLXREG_FAN_MAX_STATE))
+struct mlxreg_fan;
+
/*
* struct mlxreg_fan_tacho - tachometer data (internal use):
*
@@ -79,12 +83,18 @@ struct mlxreg_fan_tacho {
/*
* struct mlxreg_fan_pwm - PWM data (internal use):
*
+ * @fan: private data;
* @connected: indicates if PWM is connected;
* @reg: register offset;
+ * @cooling: cooling device levels;
+ * @cdev: cooling device;
*/
struct mlxreg_fan_pwm {
+ struct mlxreg_fan *fan;
bool connected;
u32 reg;
+ u8 cooling_levels[MLXREG_FAN_MAX_STATE + 1];
+ struct thermal_cooling_device *cdev;
};
/*
@@ -97,20 +107,16 @@ struct mlxreg_fan_pwm {
* @tachos_per_drwr - number of tachometers per drawer;
* @samples: minimum allowed samples per pulse;
* @divider: divider value for tachometer RPM calculation;
- * @cooling: cooling device levels;
- * @cdev: cooling device;
*/
struct mlxreg_fan {
struct device *dev;
void *regmap;
struct mlxreg_core_platform_data *pdata;
struct mlxreg_fan_tacho tacho[MLXREG_FAN_MAX_TACHO];
- struct mlxreg_fan_pwm pwm;
+ struct mlxreg_fan_pwm pwm[MLXREG_FAN_MAX_PWM];
int tachos_per_drwr;
int samples;
int divider;
- u8 cooling_levels[MLXREG_FAN_MAX_STATE + 1];
- struct thermal_cooling_device *cdev;
};
static int
@@ -119,6 +125,7 @@ mlxreg_fan_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
{
struct mlxreg_fan *fan = dev_get_drvdata(dev);
struct mlxreg_fan_tacho *tacho;
+ struct mlxreg_fan_pwm *pwm;
u32 regval;
int err;
@@ -169,9 +176,10 @@ mlxreg_fan_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
break;
case hwmon_pwm:
+ pwm = &fan->pwm[channel];
switch (attr) {
case hwmon_pwm_input:
- err = regmap_read(fan->regmap, fan->pwm.reg, &regval);
+ err = regmap_read(fan->regmap, pwm->reg, &regval);
if (err)
return err;
@@ -195,6 +203,7 @@ mlxreg_fan_write(struct device *dev, enum hwmon_sensor_types type, u32 attr,
int channel, long val)
{
struct mlxreg_fan *fan = dev_get_drvdata(dev);
+ struct mlxreg_fan_pwm *pwm;
switch (type) {
case hwmon_pwm:
@@ -203,7 +212,8 @@ mlxreg_fan_write(struct device *dev, enum hwmon_sensor_types type, u32 attr,
if (val < MLXREG_FAN_MIN_DUTY ||
val > MLXREG_FAN_MAX_DUTY)
return -EINVAL;
- return regmap_write(fan->regmap, fan->pwm.reg, val);
+ pwm = &fan->pwm[channel];
+ return regmap_write(fan->regmap, pwm->reg, val);
default:
return -EOPNOTSUPP;
}
@@ -235,7 +245,7 @@ mlxreg_fan_is_visible(const void *data, enum hwmon_sensor_types type, u32 attr,
break;
case hwmon_pwm:
- if (!(((struct mlxreg_fan *)data)->pwm.connected))
+ if (!(((struct mlxreg_fan *)data)->pwm[channel].connected))
return 0;
switch (attr) {
@@ -253,6 +263,13 @@ mlxreg_fan_is_visible(const void *data, enum hwmon_sensor_types type, u32 attr,
return 0;
}
+static char *mlxreg_fan_name[] = {
+ "mlxreg_fan",
+ "mlxreg_fan1",
+ "mlxreg_fan2",
+ "mlxreg_fan3",
+};
+
static const struct hwmon_channel_info *mlxreg_fan_hwmon_info[] = {
HWMON_CHANNEL_INFO(fan,
HWMON_F_INPUT | HWMON_F_FAULT,
@@ -266,8 +283,13 @@ static const struct hwmon_channel_info *mlxreg_fan_hwmon_info[] = {
HWMON_F_INPUT | HWMON_F_FAULT,
HWMON_F_INPUT | HWMON_F_FAULT,
HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
+ HWMON_F_INPUT | HWMON_F_FAULT,
HWMON_F_INPUT | HWMON_F_FAULT),
HWMON_CHANNEL_INFO(pwm,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
HWMON_PWM_INPUT),
NULL
};
@@ -294,11 +316,12 @@ static int mlxreg_fan_get_cur_state(struct thermal_cooling_device *cdev,
unsigned long *state)
{
- struct mlxreg_fan *fan = cdev->devdata;
+ struct mlxreg_fan_pwm *pwm = cdev->devdata;
+ struct mlxreg_fan *fan = pwm->fan;
u32 regval;
int err;
- err = regmap_read(fan->regmap, fan->pwm.reg, &regval);
+ err = regmap_read(fan->regmap, pwm->reg, &regval);
if (err) {
dev_err(fan->dev, "Failed to query PWM duty\n");
return err;
@@ -313,7 +336,8 @@ static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
unsigned long state)
{
- struct mlxreg_fan *fan = cdev->devdata;
+ struct mlxreg_fan_pwm *pwm = cdev->devdata;
+ struct mlxreg_fan *fan = pwm->fan;
unsigned long cur_state;
int i, config = 0;
u32 regval;
@@ -337,11 +361,11 @@ static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
config = 1;
state -= MLXREG_FAN_MAX_STATE;
for (i = 0; i < state; i++)
- fan->cooling_levels[i] = state;
+ pwm->cooling_levels[i] = state;
for (i = state; i <= MLXREG_FAN_MAX_STATE; i++)
- fan->cooling_levels[i] = i;
+ pwm->cooling_levels[i] = i;
- err = regmap_read(fan->regmap, fan->pwm.reg, &regval);
+ err = regmap_read(fan->regmap, pwm->reg, &regval);
if (err) {
dev_err(fan->dev, "Failed to query PWM duty\n");
return err;
@@ -358,8 +382,8 @@ static int mlxreg_fan_set_cur_state(struct thermal_cooling_device *cdev,
return -EINVAL;
/* Normalize the state to the valid speed range. */
- state = fan->cooling_levels[state];
- err = regmap_write(fan->regmap, fan->pwm.reg,
+ state = pwm->cooling_levels[state];
+ err = regmap_write(fan->regmap, pwm->reg,
MLXREG_FAN_PWM_STATE2DUTY(state));
if (err) {
dev_err(fan->dev, "Failed to write PWM duty\n");
@@ -390,6 +414,22 @@ static int mlxreg_fan_connect_verify(struct mlxreg_fan *fan,
return !!(regval & data->bit);
}
+static int mlxreg_pwm_connect_verify(struct mlxreg_fan *fan,
+ struct mlxreg_core_data *data)
+{
+ u32 regval;
+ int err;
+
+ err = regmap_read(fan->regmap, data->reg, &regval);
+ if (err) {
+ dev_err(fan->dev, "Failed to query pwm register 0x%08x\n",
+ data->reg);
+ return err;
+ }
+
+ return regval != MLXREG_FAN_PWM_NOT_CONNECTED;
+}
+
static int mlxreg_fan_speed_divider_get(struct mlxreg_fan *fan,
struct mlxreg_core_data *data)
{
@@ -418,8 +458,8 @@ static int mlxreg_fan_speed_divider_get(struct mlxreg_fan *fan,
static int mlxreg_fan_config(struct mlxreg_fan *fan,
struct mlxreg_core_platform_data *pdata)
{
+ int tacho_num = 0, tacho_avail = 0, pwm_num = 0, i;
struct mlxreg_core_data *data = pdata->data;
- int tacho_num = 0, tacho_avail = 0, i;
bool configured = false;
int err;
@@ -449,13 +489,24 @@ static int mlxreg_fan_config(struct mlxreg_fan *fan,
fan->tacho[tacho_num++].connected = true;
tacho_avail++;
} else if (strnstr(data->label, "pwm", sizeof(data->label))) {
- if (fan->pwm.connected) {
- dev_err(fan->dev, "duplicate pwm entry: %s\n",
+ if (pwm_num == MLXREG_FAN_MAX_TACHO) {
+ dev_err(fan->dev, "too many pwm entries: %s\n",
data->label);
return -EINVAL;
}
- fan->pwm.reg = data->reg;
- fan->pwm.connected = true;
+
+ /* Validate if more then one PWM is connected. */
+ if (pwm_num) {
+ err = mlxreg_pwm_connect_verify(fan, data);
+ if (err < 0)
+ return err;
+ else if (!err)
+ continue;
+ }
+
+ fan->pwm[pwm_num].reg = data->reg;
+ fan->pwm[pwm_num].connected = true;
+ pwm_num++;
} else if (strnstr(data->label, "conf", sizeof(data->label))) {
if (configured) {
dev_err(fan->dev, "duplicate conf entry: %s\n",
@@ -508,11 +559,32 @@ static int mlxreg_fan_config(struct mlxreg_fan *fan,
fan->tachos_per_drwr = tacho_avail / drwr_avail;
}
- /* Init cooling levels per PWM state. */
- for (i = 0; i < MLXREG_FAN_SPEED_MIN_LEVEL; i++)
- fan->cooling_levels[i] = MLXREG_FAN_SPEED_MIN_LEVEL;
- for (i = MLXREG_FAN_SPEED_MIN_LEVEL; i <= MLXREG_FAN_MAX_STATE; i++)
- fan->cooling_levels[i] = i;
+ return 0;
+}
+
+static int mlxreg_fan_cooling_config(struct device *dev, struct mlxreg_fan *fan)
+{
+ int i, j;
+
+ for (i = 0; i < MLXREG_FAN_MAX_PWM; i++) {
+ struct mlxreg_fan_pwm *pwm = &fan->pwm[i];
+
+ if (!pwm->connected)
+ continue;
+ pwm->fan = fan;
+ pwm->cdev = devm_thermal_of_cooling_device_register(dev, NULL, mlxreg_fan_name[i],
+ pwm, &mlxreg_fan_cooling_ops);
+ if (IS_ERR(pwm->cdev)) {
+ dev_err(dev, "Failed to register cooling device\n");
+ return PTR_ERR(pwm->cdev);
+ }
+
+ /* Init cooling levels per PWM state. */
+ for (j = 0; j < MLXREG_FAN_SPEED_MIN_LEVEL; j++)
+ pwm->cooling_levels[j] = MLXREG_FAN_SPEED_MIN_LEVEL;
+ for (j = MLXREG_FAN_SPEED_MIN_LEVEL; j <= MLXREG_FAN_MAX_STATE; j++)
+ pwm->cooling_levels[j] = j;
+ }
return 0;
}
@@ -551,16 +623,10 @@ static int mlxreg_fan_probe(struct platform_device *pdev)
return PTR_ERR(hwm);
}
- if (IS_REACHABLE(CONFIG_THERMAL)) {
- fan->cdev = devm_thermal_of_cooling_device_register(dev,
- NULL, "mlxreg_fan", fan, &mlxreg_fan_cooling_ops);
- if (IS_ERR(fan->cdev)) {
- dev_err(dev, "Failed to register cooling device\n");
- return PTR_ERR(fan->cdev);
- }
- }
+ if (IS_REACHABLE(CONFIG_THERMAL))
+ err = mlxreg_fan_cooling_config(dev, fan);
- return 0;
+ return err;
}
static struct platform_driver mlxreg_fan_driver = {
diff --git a/drivers/hwmon/nct6683.c b/drivers/hwmon/nct6683.c
index 35f8635dc7f3..6a9f420e7d32 100644
--- a/drivers/hwmon/nct6683.c
+++ b/drivers/hwmon/nct6683.c
@@ -174,6 +174,7 @@ superio_exit(int ioreg)
#define NCT6683_CUSTOMER_ID_MITAC 0xa0e
#define NCT6683_CUSTOMER_ID_MSI 0x201
#define NCT6683_CUSTOMER_ID_ASROCK 0xe2c
+#define NCT6683_CUSTOMER_ID_ASROCK2 0xe1b
#define NCT6683_REG_BUILD_YEAR 0x604
#define NCT6683_REG_BUILD_MONTH 0x605
@@ -1221,6 +1222,8 @@ static int nct6683_probe(struct platform_device *pdev)
break;
case NCT6683_CUSTOMER_ID_ASROCK:
break;
+ case NCT6683_CUSTOMER_ID_ASROCK2:
+ break;
default:
if (!force)
return -ENODEV;
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
index 5bd15622a85f..93dca471972e 100644
--- a/drivers/hwmon/nct6775.c
+++ b/drivers/hwmon/nct6775.c
@@ -55,6 +55,7 @@
#include <linux/dmi.h>
#include <linux/io.h>
#include <linux/nospec.h>
+#include <linux/wmi.h>
#include "lm75.h"
#define USE_ALTERNATE
@@ -132,31 +133,135 @@ MODULE_PARM_DESC(fan_debounce, "Enable debouncing for fan RPM signal");
#define SIO_ID_MASK 0xFFF8
enum pwm_enable { off, manual, thermal_cruise, speed_cruise, sf3, sf4 };
+enum sensor_access { access_direct, access_asuswmi };
-static inline void
-superio_outb(int ioreg, int reg, int val)
+struct nct6775_sio_data {
+ int sioreg;
+ int ld;
+ enum kinds kind;
+ enum sensor_access access;
+
+ /* superio_() callbacks */
+ void (*sio_outb)(struct nct6775_sio_data *sio_data, int reg, int val);
+ int (*sio_inb)(struct nct6775_sio_data *sio_data, int reg);
+ void (*sio_select)(struct nct6775_sio_data *sio_data, int ld);
+ int (*sio_enter)(struct nct6775_sio_data *sio_data);
+ void (*sio_exit)(struct nct6775_sio_data *sio_data);
+};
+
+#define ASUSWMI_MONITORING_GUID "466747A0-70EC-11DE-8A39-0800200C9A66"
+#define ASUSWMI_METHODID_RSIO 0x5253494F
+#define ASUSWMI_METHODID_WSIO 0x5753494F
+#define ASUSWMI_METHODID_RHWM 0x5248574D
+#define ASUSWMI_METHODID_WHWM 0x5748574D
+#define ASUSWMI_UNSUPPORTED_METHOD 0xFFFFFFFE
+
+static int nct6775_asuswmi_evaluate_method(u32 method_id, u8 bank, u8 reg, u8 val, u32 *retval)
+{
+#if IS_ENABLED(CONFIG_ACPI_WMI)
+ u32 args = bank | (reg << 8) | (val << 16);
+ struct acpi_buffer input = { (acpi_size) sizeof(args), &args };
+ struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+ acpi_status status;
+ union acpi_object *obj;
+ u32 tmp = ASUSWMI_UNSUPPORTED_METHOD;
+
+ status = wmi_evaluate_method(ASUSWMI_MONITORING_GUID, 0,
+ method_id, &input, &output);
+
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ obj = output.pointer;
+ if (obj && obj->type == ACPI_TYPE_INTEGER)
+ tmp = obj->integer.value;
+
+ if (retval)
+ *retval = tmp;
+
+ kfree(obj);
+
+ if (tmp == ASUSWMI_UNSUPPORTED_METHOD)
+ return -ENODEV;
+ return 0;
+#else
+ return -EOPNOTSUPP;
+#endif
+}
+
+static inline int nct6775_asuswmi_write(u8 bank, u8 reg, u8 val)
+{
+ return nct6775_asuswmi_evaluate_method(ASUSWMI_METHODID_WHWM, bank,
+ reg, val, NULL);
+}
+
+static inline int nct6775_asuswmi_read(u8 bank, u8 reg, u8 *val)
+{
+ u32 ret, tmp = 0;
+
+ ret = nct6775_asuswmi_evaluate_method(ASUSWMI_METHODID_RHWM, bank,
+ reg, 0, &tmp);
+ *val = tmp;
+ return ret;
+}
+
+static int superio_wmi_inb(struct nct6775_sio_data *sio_data, int reg)
+{
+ int tmp = 0;
+
+ nct6775_asuswmi_evaluate_method(ASUSWMI_METHODID_RSIO, sio_data->ld,
+ reg, 0, &tmp);
+ return tmp;
+}
+
+static void superio_wmi_outb(struct nct6775_sio_data *sio_data, int reg, int val)
{
+ nct6775_asuswmi_evaluate_method(ASUSWMI_METHODID_WSIO, sio_data->ld,
+ reg, val, NULL);
+}
+
+static void superio_wmi_select(struct nct6775_sio_data *sio_data, int ld)
+{
+ sio_data->ld = ld;
+}
+
+static int superio_wmi_enter(struct nct6775_sio_data *sio_data)
+{
+ return 0;
+}
+
+static void superio_wmi_exit(struct nct6775_sio_data *sio_data)
+{
+}
+
+static void superio_outb(struct nct6775_sio_data *sio_data, int reg, int val)
+{
+ int ioreg = sio_data->sioreg;
+
outb(reg, ioreg);
outb(val, ioreg + 1);
}
-static inline int
-superio_inb(int ioreg, int reg)
+static int superio_inb(struct nct6775_sio_data *sio_data, int reg)
{
+ int ioreg = sio_data->sioreg;
+
outb(reg, ioreg);
return inb(ioreg + 1);
}
-static inline void
-superio_select(int ioreg, int ld)
+static void superio_select(struct nct6775_sio_data *sio_data, int ld)
{
+ int ioreg = sio_data->sioreg;
+
outb(SIO_REG_LDSEL, ioreg);
outb(ld, ioreg + 1);
}
-static inline int
-superio_enter(int ioreg)
+static int superio_enter(struct nct6775_sio_data *sio_data)
{
+ int ioreg = sio_data->sioreg;
+
/*
* Try to reserve <ioreg> and <ioreg + 1> for exclusive access.
*/
@@ -169,9 +274,10 @@ superio_enter(int ioreg)
return 0;
}
-static inline void
-superio_exit(int ioreg)
+static void superio_exit(struct nct6775_sio_data *sio_data)
{
+ int ioreg = sio_data->sioreg;
+
outb(0xaa, ioreg);
outb(0x02, ioreg);
outb(0x02, ioreg + 1);
@@ -190,6 +296,7 @@ superio_exit(int ioreg)
#define NCT6775_REG_BANK 0x4E
#define NCT6775_REG_CONFIG 0x40
+#define NCT6775_PORT_CHIPID 0x58
/*
* Not currently used:
@@ -1215,11 +1322,10 @@ struct nct6775_data {
u8 fandiv1;
u8 fandiv2;
u8 sio_reg_enable;
-};
-struct nct6775_sio_data {
- int sioreg;
- enum kinds kind;
+ /* nct6775_*() callbacks */
+ u16 (*read_value)(struct nct6775_data *data, u16 reg);
+ int (*write_value)(struct nct6775_data *data, u16 reg, u16 value);
};
struct sensor_device_template {
@@ -1407,6 +1513,54 @@ static bool is_word_sized(struct nct6775_data *data, u16 reg)
return false;
}
+static inline void nct6775_wmi_set_bank(struct nct6775_data *data, u16 reg)
+{
+ u8 bank = reg >> 8;
+
+ data->bank = bank;
+}
+
+static u16 nct6775_wmi_read_value(struct nct6775_data *data, u16 reg)
+{
+ int res, err, word_sized = is_word_sized(data, reg);
+ u8 tmp = 0;
+
+ nct6775_wmi_set_bank(data, reg);
+
+ err = nct6775_asuswmi_read(data->bank, reg, &tmp);
+ if (err)
+ return 0;
+
+ res = tmp;
+ if (word_sized) {
+ err = nct6775_asuswmi_read(data->bank, (reg & 0xff) + 1, &tmp);
+ if (err)
+ return 0;
+
+ res = (res << 8) + tmp;
+ }
+ return res;
+}
+
+static int nct6775_wmi_write_value(struct nct6775_data *data, u16 reg, u16 value)
+{
+ int res, word_sized = is_word_sized(data, reg);
+
+ nct6775_wmi_set_bank(data, reg);
+
+ if (word_sized) {
+ res = nct6775_asuswmi_write(data->bank, reg & 0xff, value >> 8);
+ if (res)
+ return res;
+
+ res = nct6775_asuswmi_write(data->bank, (reg & 0xff) + 1, value);
+ } else {
+ res = nct6775_asuswmi_write(data->bank, reg & 0xff, value);
+ }
+
+ return res;
+}
+
/*
* On older chips, only registers 0x50-0x5f are banked.
* On more recent chips, all registers are banked.
@@ -1459,7 +1613,7 @@ static u16 nct6775_read_temp(struct nct6775_data *data, u16 reg)
{
u16 res;
- res = nct6775_read_value(data, reg);
+ res = data->read_value(data, reg);
if (!is_word_sized(data, reg))
res <<= 8;
@@ -1470,7 +1624,7 @@ static int nct6775_write_temp(struct nct6775_data *data, u16 reg, u16 value)
{
if (!is_word_sized(data, reg))
value >>= 8;
- return nct6775_write_value(data, reg, value);
+ return data->write_value(data, reg, value);
}
/* This function assumes that the caller holds data->update_lock */
@@ -1480,24 +1634,24 @@ static void nct6775_write_fan_div(struct nct6775_data *data, int nr)
switch (nr) {
case 0:
- reg = (nct6775_read_value(data, NCT6775_REG_FANDIV1) & 0x70)
+ reg = (data->read_value(data, NCT6775_REG_FANDIV1) & 0x70)
| (data->fan_div[0] & 0x7);
- nct6775_write_value(data, NCT6775_REG_FANDIV1, reg);
+ data->write_value(data, NCT6775_REG_FANDIV1, reg);
break;
case 1:
- reg = (nct6775_read_value(data, NCT6775_REG_FANDIV1) & 0x7)
+ reg = (data->read_value(data, NCT6775_REG_FANDIV1) & 0x7)
| ((data->fan_div[1] << 4) & 0x70);
- nct6775_write_value(data, NCT6775_REG_FANDIV1, reg);
+ data->write_value(data, NCT6775_REG_FANDIV1, reg);
break;
case 2:
- reg = (nct6775_read_value(data, NCT6775_REG_FANDIV2) & 0x70)
+ reg = (data->read_value(data, NCT6775_REG_FANDIV2) & 0x70)
| (data->fan_div[2] & 0x7);
- nct6775_write_value(data, NCT6775_REG_FANDIV2, reg);
+ data->write_value(data, NCT6775_REG_FANDIV2, reg);
break;
case 3:
- reg = (nct6775_read_value(data, NCT6775_REG_FANDIV2) & 0x7)
+ reg = (data->read_value(data, NCT6775_REG_FANDIV2) & 0x7)
| ((data->fan_div[3] << 4) & 0x70);
- nct6775_write_value(data, NCT6775_REG_FANDIV2, reg);
+ data->write_value(data, NCT6775_REG_FANDIV2, reg);
break;
}
}
@@ -1512,10 +1666,10 @@ static void nct6775_update_fan_div(struct nct6775_data *data)
{
u8 i;
- i = nct6775_read_value(data, NCT6775_REG_FANDIV1);
+ i = data->read_value(data, NCT6775_REG_FANDIV1);
data->fan_div[0] = i & 0x7;
data->fan_div[1] = (i & 0x70) >> 4;
- i = nct6775_read_value(data, NCT6775_REG_FANDIV2);
+ i = data->read_value(data, NCT6775_REG_FANDIV2);
data->fan_div[2] = i & 0x7;
if (data->has_fan & BIT(3))
data->fan_div[3] = (i & 0x70) >> 4;
@@ -1563,11 +1717,11 @@ static void nct6775_init_fan_common(struct device *dev,
*/
for (i = 0; i < ARRAY_SIZE(data->fan_min); i++) {
if (data->has_fan_min & BIT(i)) {
- reg = nct6775_read_value(data, data->REG_FAN_MIN[i]);
+ reg = data->read_value(data, data->REG_FAN_MIN[i]);
if (!reg)
- nct6775_write_value(data, data->REG_FAN_MIN[i],
- data->has_fan_div ? 0xff
- : 0xff1f);
+ data->write_value(data, data->REG_FAN_MIN[i],
+ data->has_fan_div ? 0xff
+ : 0xff1f);
}
}
}
@@ -1611,8 +1765,8 @@ static void nct6775_select_fan_div(struct device *dev,
}
if (fan_min != data->fan_min[nr]) {
data->fan_min[nr] = fan_min;
- nct6775_write_value(data, data->REG_FAN_MIN[nr],
- fan_min);
+ data->write_value(data, data->REG_FAN_MIN[nr],
+ fan_min);
}
}
data->fan_div[nr] = fan_div;
@@ -1632,16 +1786,15 @@ static void nct6775_update_pwm(struct device *dev)
continue;
duty_is_dc = data->REG_PWM_MODE[i] &&
- (nct6775_read_value(data, data->REG_PWM_MODE[i])
+ (data->read_value(data, data->REG_PWM_MODE[i])
& data->PWM_MODE_MASK[i]);
data->pwm_mode[i] = !duty_is_dc;
- fanmodecfg = nct6775_read_value(data, data->REG_FAN_MODE[i]);
+ fanmodecfg = data->read_value(data, data->REG_FAN_MODE[i]);
for (j = 0; j < ARRAY_SIZE(data->REG_PWM); j++) {
if (data->REG_PWM[j] && data->REG_PWM[j][i]) {
- data->pwm[j][i]
- = nct6775_read_value(data,
- data->REG_PWM[j][i]);
+ data->pwm[j][i] = data->read_value(data,
+ data->REG_PWM[j][i]);
}
}
@@ -1656,17 +1809,17 @@ static void nct6775_update_pwm(struct device *dev)
u8 t = fanmodecfg & 0x0f;
if (data->REG_TOLERANCE_H) {
- t |= (nct6775_read_value(data,
+ t |= (data->read_value(data,
data->REG_TOLERANCE_H[i]) & 0x70) >> 1;
}
data->target_speed_tolerance[i] = t;
}
data->temp_tolerance[1][i] =
- nct6775_read_value(data,
- data->REG_CRITICAL_TEMP_TOLERANCE[i]);
+ data->read_value(data,
+ data->REG_CRITICAL_TEMP_TOLERANCE[i]);
- reg = nct6775_read_value(data, data->REG_TEMP_SEL[i]);
+ reg = data->read_value(data, data->REG_TEMP_SEL[i]);
data->pwm_temp_sel[i] = reg & 0x1f;
/* If fan can stop, report floor as 0 */
if (reg & 0x80)
@@ -1675,7 +1828,7 @@ static void nct6775_update_pwm(struct device *dev)
if (!data->REG_WEIGHT_TEMP_SEL[i])
continue;
- reg = nct6775_read_value(data, data->REG_WEIGHT_TEMP_SEL[i]);
+ reg = data->read_value(data, data->REG_WEIGHT_TEMP_SEL[i]);
data->pwm_weight_temp_sel[i] = reg & 0x1f;
/* If weight is disabled, report weight source as 0 */
if (!(reg & 0x80))
@@ -1683,9 +1836,8 @@ static void nct6775_update_pwm(struct device *dev)
/* Weight temp data */
for (j = 0; j < ARRAY_SIZE(data->weight_temp); j++) {
- data->weight_temp[j][i]
- = nct6775_read_value(data,
- data->REG_WEIGHT_TEMP[j][i]);
+ data->weight_temp[j][i] = data->read_value(data,
+ data->REG_WEIGHT_TEMP[j][i]);
}
}
}
@@ -1703,10 +1855,10 @@ static void nct6775_update_pwm_limits(struct device *dev)
for (j = 0; j < ARRAY_SIZE(data->fan_time); j++) {
data->fan_time[j][i] =
- nct6775_read_value(data, data->REG_FAN_TIME[j][i]);
+ data->read_value(data, data->REG_FAN_TIME[j][i]);
}
- reg_t = nct6775_read_value(data, data->REG_TARGET[i]);
+ reg_t = data->read_value(data, data->REG_TARGET[i]);
/* Update only in matching mode or if never updated */
if (!data->target_temp[i] ||
data->pwm_enable[i] == thermal_cruise)
@@ -1714,7 +1866,7 @@ static void nct6775_update_pwm_limits(struct device *dev)
if (!data->target_speed[i] ||
data->pwm_enable[i] == speed_cruise) {
if (data->REG_TOLERANCE_H) {
- reg_t |= (nct6775_read_value(data,
+ reg_t |= (data->read_value(data,
data->REG_TOLERANCE_H[i]) & 0x0f) << 8;
}
data->target_speed[i] = reg_t;
@@ -1722,21 +1874,21 @@ static void nct6775_update_pwm_limits(struct device *dev)
for (j = 0; j < data->auto_pwm_num; j++) {
data->auto_pwm[i][j] =
- nct6775_read_value(data,
- NCT6775_AUTO_PWM(data, i, j));
+ data->read_value(data,
+ NCT6775_AUTO_PWM(data, i, j));
data->auto_temp[i][j] =
- nct6775_read_value(data,
- NCT6775_AUTO_TEMP(data, i, j));
+ data->read_value(data,
+ NCT6775_AUTO_TEMP(data, i, j));
}
/* critical auto_pwm temperature data */
data->auto_temp[i][data->auto_pwm_num] =
- nct6775_read_value(data, data->REG_CRITICAL_TEMP[i]);
+ data->read_value(data, data->REG_CRITICAL_TEMP[i]);
switch (data->kind) {
case nct6775:
- reg = nct6775_read_value(data,
- NCT6775_REG_CRITICAL_ENAB[i]);
+ reg = data->read_value(data,
+ NCT6775_REG_CRITICAL_ENAB[i]);
data->auto_pwm[i][data->auto_pwm_num] =
(reg & 0x02) ? 0xff : 0x00;
break;
@@ -1753,10 +1905,10 @@ static void nct6775_update_pwm_limits(struct device *dev)
case nct6796:
case nct6797:
case nct6798:
- reg = nct6775_read_value(data,
+ reg = data->read_value(data,
data->REG_CRITICAL_PWM_ENABLE[i]);
if (reg & data->CRITICAL_PWM_ENABLE_MASK)
- reg = nct6775_read_value(data,
+ reg = data->read_value(data,
data->REG_CRITICAL_PWM[i]);
else
reg = 0xff;
@@ -1783,11 +1935,11 @@ static struct nct6775_data *nct6775_update_device(struct device *dev)
if (!(data->have_in & BIT(i)))
continue;
- data->in[i][0] = nct6775_read_value(data,
- data->REG_VIN[i]);
- data->in[i][1] = nct6775_read_value(data,
+ data->in[i][0] = data->read_value(data,
+ data->REG_VIN[i]);
+ data->in[i][1] = data->read_value(data,
data->REG_IN_MINMAX[0][i]);
- data->in[i][2] = nct6775_read_value(data,
+ data->in[i][2] = data->read_value(data,
data->REG_IN_MINMAX[1][i]);
}
@@ -1798,18 +1950,18 @@ static struct nct6775_data *nct6775_update_device(struct device *dev)
if (!(data->has_fan & BIT(i)))
continue;
- reg = nct6775_read_value(data, data->REG_FAN[i]);
+ reg = data->read_value(data, data->REG_FAN[i]);
data->rpm[i] = data->fan_from_reg(reg,
data->fan_div[i]);
if (data->has_fan_min & BIT(i))
- data->fan_min[i] = nct6775_read_value(data,
+ data->fan_min[i] = data->read_value(data,
data->REG_FAN_MIN[i]);
if (data->REG_FAN_PULSES[i]) {
data->fan_pulses[i] =
- (nct6775_read_value(data,
- data->REG_FAN_PULSES[i])
+ (data->read_value(data,
+ data->REG_FAN_PULSES[i])
>> data->FAN_PULSE_SHIFT[i]) & 0x03;
}
@@ -1825,15 +1977,14 @@ static struct nct6775_data *nct6775_update_device(struct device *dev)
continue;
for (j = 0; j < ARRAY_SIZE(data->reg_temp); j++) {
if (data->reg_temp[j][i])
- data->temp[j][i]
- = nct6775_read_temp(data,
- data->reg_temp[j][i]);
+ data->temp[j][i] = nct6775_read_temp(data,
+ data->reg_temp[j][i]);
}
if (i >= NUM_TEMP_FIXED ||
!(data->have_temp_fixed & BIT(i)))
continue;
- data->temp_offset[i]
- = nct6775_read_value(data, data->REG_TEMP_OFFSET[i]);
+ data->temp_offset[i] = data->read_value(data,
+ data->REG_TEMP_OFFSET[i]);
}
data->alarms = 0;
@@ -1842,7 +1993,7 @@ static struct nct6775_data *nct6775_update_device(struct device *dev)
if (!data->REG_ALARM[i])
continue;
- alarm = nct6775_read_value(data, data->REG_ALARM[i]);
+ alarm = data->read_value(data, data->REG_ALARM[i]);
data->alarms |= ((u64)alarm) << (i << 3);
}
@@ -1852,7 +2003,7 @@ static struct nct6775_data *nct6775_update_device(struct device *dev)
if (!data->REG_BEEP[i])
continue;
- beep = nct6775_read_value(data, data->REG_BEEP[i]);
+ beep = data->read_value(data, data->REG_BEEP[i]);
data->beeps |= ((u64)beep) << (i << 3);
}
@@ -1894,8 +2045,8 @@ store_in_reg(struct device *dev, struct device_attribute *attr, const char *buf,
return err;
mutex_lock(&data->update_lock);
data->in[nr][index] = in_to_reg(val, nr);
- nct6775_write_value(data, data->REG_IN_MINMAX[index - 1][nr],
- data->in[nr][index]);
+ data->write_value(data, data->REG_IN_MINMAX[index - 1][nr],
+ data->in[nr][index]);
mutex_unlock(&data->update_lock);
return count;
}
@@ -1919,8 +2070,8 @@ static int find_temp_source(struct nct6775_data *data, int index, int count)
for (nr = 0; nr < count; nr++) {
int src;
- src = nct6775_read_value(data,
- data->REG_TEMP_SOURCE[nr]) & 0x1f;
+ src = data->read_value(data,
+ data->REG_TEMP_SOURCE[nr]) & 0x1f;
if (src == source)
return nr;
}
@@ -1981,8 +2132,8 @@ store_beep(struct device *dev, struct device_attribute *attr, const char *buf,
data->beeps |= (1ULL << nr);
else
data->beeps &= ~(1ULL << nr);
- nct6775_write_value(data, data->REG_BEEP[regindex],
- (data->beeps >> (regindex << 3)) & 0xff);
+ data->write_value(data, data->REG_BEEP[regindex],
+ (data->beeps >> (regindex << 3)) & 0xff);
mutex_unlock(&data->update_lock);
return count;
}
@@ -2037,8 +2188,8 @@ store_temp_beep(struct device *dev, struct device_attribute *attr,
data->beeps |= (1ULL << bit);
else
data->beeps &= ~(1ULL << bit);
- nct6775_write_value(data, data->REG_BEEP[regindex],
- (data->beeps >> (regindex << 3)) & 0xff);
+ data->write_value(data, data->REG_BEEP[regindex],
+ (data->beeps >> (regindex << 3)) & 0xff);
mutex_unlock(&data->update_lock);
return count;
@@ -2205,7 +2356,7 @@ write_div:
}
write_min:
- nct6775_write_value(data, data->REG_FAN_MIN[nr], data->fan_min[nr]);
+ data->write_value(data, data->REG_FAN_MIN[nr], data->fan_min[nr]);
mutex_unlock(&data->update_lock);
return count;
@@ -2241,10 +2392,10 @@ store_fan_pulses(struct device *dev, struct device_attribute *attr,
mutex_lock(&data->update_lock);
data->fan_pulses[nr] = val & 3;
- reg = nct6775_read_value(data, data->REG_FAN_PULSES[nr]);
+ reg = data->read_value(data, data->REG_FAN_PULSES[nr]);
reg &= ~(0x03 << data->FAN_PULSE_SHIFT[nr]);
reg |= (val & 3) << data->FAN_PULSE_SHIFT[nr];
- nct6775_write_value(data, data->REG_FAN_PULSES[nr], reg);
+ data->write_value(data, data->REG_FAN_PULSES[nr], reg);
mutex_unlock(&data->update_lock);
return count;
@@ -2378,7 +2529,7 @@ store_temp_offset(struct device *dev, struct device_attribute *attr,
mutex_lock(&data->update_lock);
data->temp_offset[nr] = val;
- nct6775_write_value(data, data->REG_TEMP_OFFSET[nr], val);
+ data->write_value(data, data->REG_TEMP_OFFSET[nr], val);
mutex_unlock(&data->update_lock);
return count;
@@ -2417,8 +2568,8 @@ store_temp_type(struct device *dev, struct device_attribute *attr,
data->temp_type[nr] = val;
vbit = 0x02 << nr;
dbit = data->DIODE_MASK << nr;
- vbat = nct6775_read_value(data, data->REG_VBAT) & ~vbit;
- diode = nct6775_read_value(data, data->REG_DIODE) & ~dbit;
+ vbat = data->read_value(data, data->REG_VBAT) & ~vbit;
+ diode = data->read_value(data, data->REG_DIODE) & ~dbit;
switch (val) {
case 1: /* CPU diode (diode, current mode) */
vbat |= vbit;
@@ -2430,8 +2581,8 @@ store_temp_type(struct device *dev, struct device_attribute *attr,
case 4: /* thermistor */
break;
}
- nct6775_write_value(data, data->REG_VBAT, vbat);
- nct6775_write_value(data, data->REG_DIODE, diode);
+ data->write_value(data, data->REG_VBAT, vbat);
+ data->write_value(data, data->REG_DIODE, diode);
mutex_unlock(&data->update_lock);
return count;
@@ -2555,11 +2706,11 @@ store_pwm_mode(struct device *dev, struct device_attribute *attr,
mutex_lock(&data->update_lock);
data->pwm_mode[nr] = val;
- reg = nct6775_read_value(data, data->REG_PWM_MODE[nr]);
+ reg = data->read_value(data, data->REG_PWM_MODE[nr]);
reg &= ~data->PWM_MODE_MASK[nr];
if (!val)
reg |= data->PWM_MODE_MASK[nr];
- nct6775_write_value(data, data->REG_PWM_MODE[nr], reg);
+ data->write_value(data, data->REG_PWM_MODE[nr], reg);
mutex_unlock(&data->update_lock);
return count;
}
@@ -2578,7 +2729,7 @@ show_pwm(struct device *dev, struct device_attribute *attr, char *buf)
* Otherwise, show the configured value.
*/
if (index == 0 && data->pwm_enable[nr] > manual)
- pwm = nct6775_read_value(data, data->REG_PWM_READ[nr]);
+ pwm = data->read_value(data, data->REG_PWM_READ[nr]);
else
pwm = data->pwm[index][nr];
@@ -2607,13 +2758,13 @@ store_pwm(struct device *dev, struct device_attribute *attr, const char *buf,
mutex_lock(&data->update_lock);
data->pwm[index][nr] = val;
- nct6775_write_value(data, data->REG_PWM[index][nr], val);
+ data->write_value(data, data->REG_PWM[index][nr], val);
if (index == 2) { /* floor: disable if val == 0 */
- reg = nct6775_read_value(data, data->REG_TEMP_SEL[nr]);
+ reg = data->read_value(data, data->REG_TEMP_SEL[nr]);
reg &= 0x7f;
if (val)
reg |= 0x80;
- nct6775_write_value(data, data->REG_TEMP_SEL[nr], reg);
+ data->write_value(data, data->REG_TEMP_SEL[nr], reg);
}
mutex_unlock(&data->update_lock);
return count;
@@ -2652,29 +2803,29 @@ static void pwm_update_registers(struct nct6775_data *data, int nr)
case manual:
break;
case speed_cruise:
- reg = nct6775_read_value(data, data->REG_FAN_MODE[nr]);
+ reg = data->read_value(data, data->REG_FAN_MODE[nr]);
reg = (reg & ~data->tolerance_mask) |
(data->target_speed_tolerance[nr] & data->tolerance_mask);
- nct6775_write_value(data, data->REG_FAN_MODE[nr], reg);
- nct6775_write_value(data, data->REG_TARGET[nr],
+ data->write_value(data, data->REG_FAN_MODE[nr], reg);
+ data->write_value(data, data->REG_TARGET[nr],
data->target_speed[nr] & 0xff);
if (data->REG_TOLERANCE_H) {
reg = (data->target_speed[nr] >> 8) & 0x0f;
reg |= (data->target_speed_tolerance[nr] & 0x38) << 1;
- nct6775_write_value(data,
- data->REG_TOLERANCE_H[nr],
- reg);
+ data->write_value(data,
+ data->REG_TOLERANCE_H[nr],
+ reg);
}
break;
case thermal_cruise:
- nct6775_write_value(data, data->REG_TARGET[nr],
- data->target_temp[nr]);
+ data->write_value(data, data->REG_TARGET[nr],
+ data->target_temp[nr]);
fallthrough;
default:
- reg = nct6775_read_value(data, data->REG_FAN_MODE[nr]);
+ reg = data->read_value(data, data->REG_FAN_MODE[nr]);
reg = (reg & ~data->tolerance_mask) |
data->temp_tolerance[0][nr];
- nct6775_write_value(data, data->REG_FAN_MODE[nr], reg);
+ data->write_value(data, data->REG_FAN_MODE[nr], reg);
break;
}
}
@@ -2722,13 +2873,13 @@ store_pwm_enable(struct device *dev, struct device_attribute *attr,
* turn off pwm control: select manual mode, set pwm to maximum
*/
data->pwm[0][nr] = 255;
- nct6775_write_value(data, data->REG_PWM[0][nr], 255);
+ data->write_value(data, data->REG_PWM[0][nr], 255);
}
pwm_update_registers(data, nr);
- reg = nct6775_read_value(data, data->REG_FAN_MODE[nr]);
+ reg = data->read_value(data, data->REG_FAN_MODE[nr]);
reg &= 0x0f;
reg |= pwm_enable_to_reg(val) << 4;
- nct6775_write_value(data, data->REG_FAN_MODE[nr], reg);
+ data->write_value(data, data->REG_FAN_MODE[nr], reg);
mutex_unlock(&data->update_lock);
return count;
}
@@ -2781,10 +2932,10 @@ store_pwm_temp_sel(struct device *dev, struct device_attribute *attr,
mutex_lock(&data->update_lock);
src = data->temp_src[val - 1];
data->pwm_temp_sel[nr] = src;
- reg = nct6775_read_value(data, data->REG_TEMP_SEL[nr]);
+ reg = data->read_value(data, data->REG_TEMP_SEL[nr]);
reg &= 0xe0;
reg |= src;
- nct6775_write_value(data, data->REG_TEMP_SEL[nr], reg);
+ data->write_value(data, data->REG_TEMP_SEL[nr], reg);
mutex_unlock(&data->update_lock);
return count;
@@ -2826,15 +2977,15 @@ store_pwm_weight_temp_sel(struct device *dev, struct device_attribute *attr,
if (val) {
src = data->temp_src[val - 1];
data->pwm_weight_temp_sel[nr] = src;
- reg = nct6775_read_value(data, data->REG_WEIGHT_TEMP_SEL[nr]);
+ reg = data->read_value(data, data->REG_WEIGHT_TEMP_SEL[nr]);
reg &= 0xe0;
reg |= (src | 0x80);
- nct6775_write_value(data, data->REG_WEIGHT_TEMP_SEL[nr], reg);
+ data->write_value(data, data->REG_WEIGHT_TEMP_SEL[nr], reg);
} else {
data->pwm_weight_temp_sel[nr] = 0;
- reg = nct6775_read_value(data, data->REG_WEIGHT_TEMP_SEL[nr]);
+ reg = data->read_value(data, data->REG_WEIGHT_TEMP_SEL[nr]);
reg &= 0x7f;
- nct6775_write_value(data, data->REG_WEIGHT_TEMP_SEL[nr], reg);
+ data->write_value(data, data->REG_WEIGHT_TEMP_SEL[nr], reg);
}
mutex_unlock(&data->update_lock);
@@ -2946,9 +3097,9 @@ store_temp_tolerance(struct device *dev, struct device_attribute *attr,
if (index)
pwm_update_registers(data, nr);
else
- nct6775_write_value(data,
- data->REG_CRITICAL_TEMP_TOLERANCE[nr],
- val);
+ data->write_value(data,
+ data->REG_CRITICAL_TEMP_TOLERANCE[nr],
+ val);
mutex_unlock(&data->update_lock);
return count;
}
@@ -3071,7 +3222,7 @@ store_weight_temp(struct device *dev, struct device_attribute *attr,
mutex_lock(&data->update_lock);
data->weight_temp[index][nr] = val;
- nct6775_write_value(data, data->REG_WEIGHT_TEMP[index][nr], val);
+ data->write_value(data, data->REG_WEIGHT_TEMP[index][nr], val);
mutex_unlock(&data->update_lock);
return count;
}
@@ -3120,7 +3271,7 @@ store_fan_time(struct device *dev, struct device_attribute *attr,
val = step_time_to_reg(val, data->pwm_mode[nr]);
mutex_lock(&data->update_lock);
data->fan_time[index][nr] = val;
- nct6775_write_value(data, data->REG_FAN_TIME[index][nr], val);
+ data->write_value(data, data->REG_FAN_TIME[index][nr], val);
mutex_unlock(&data->update_lock);
return count;
}
@@ -3162,21 +3313,21 @@ store_auto_pwm(struct device *dev, struct device_attribute *attr,
mutex_lock(&data->update_lock);
data->auto_pwm[nr][point] = val;
if (point < data->auto_pwm_num) {
- nct6775_write_value(data,
+ data->write_value(data,
NCT6775_AUTO_PWM(data, nr, point),
data->auto_pwm[nr][point]);
} else {
switch (data->kind) {
case nct6775:
/* disable if needed (pwm == 0) */
- reg = nct6775_read_value(data,
- NCT6775_REG_CRITICAL_ENAB[nr]);
+ reg = data->read_value(data,
+ NCT6775_REG_CRITICAL_ENAB[nr]);
if (val)
reg |= 0x02;
else
reg &= ~0x02;
- nct6775_write_value(data, NCT6775_REG_CRITICAL_ENAB[nr],
- reg);
+ data->write_value(data, NCT6775_REG_CRITICAL_ENAB[nr],
+ reg);
break;
case nct6776:
break; /* always enabled, nothing to do */
@@ -3190,17 +3341,17 @@ store_auto_pwm(struct device *dev, struct device_attribute *attr,
case nct6796:
case nct6797:
case nct6798:
- nct6775_write_value(data, data->REG_CRITICAL_PWM[nr],
+ data->write_value(data, data->REG_CRITICAL_PWM[nr],
val);
- reg = nct6775_read_value(data,
+ reg = data->read_value(data,
data->REG_CRITICAL_PWM_ENABLE[nr]);
if (val == 255)
reg &= ~data->CRITICAL_PWM_ENABLE_MASK;
else
reg |= data->CRITICAL_PWM_ENABLE_MASK;
- nct6775_write_value(data,
- data->REG_CRITICAL_PWM_ENABLE[nr],
- reg);
+ data->write_value(data,
+ data->REG_CRITICAL_PWM_ENABLE[nr],
+ reg);
break;
}
}
@@ -3243,11 +3394,11 @@ store_auto_temp(struct device *dev, struct device_attribute *attr,
mutex_lock(&data->update_lock);
data->auto_temp[nr][point] = DIV_ROUND_CLOSEST(val, 1000);
if (point < data->auto_pwm_num) {
- nct6775_write_value(data,
+ data->write_value(data,
NCT6775_AUTO_TEMP(data, nr, point),
data->auto_temp[nr][point]);
} else {
- nct6775_write_value(data, data->REG_CRITICAL_TEMP[nr],
+ data->write_value(data, data->REG_CRITICAL_TEMP[nr],
data->auto_temp[nr][point]);
}
mutex_unlock(&data->update_lock);
@@ -3410,6 +3561,7 @@ clear_caseopen(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct nct6775_data *data = dev_get_drvdata(dev);
+ struct nct6775_sio_data *sio_data = dev_get_platdata(dev);
int nr = to_sensor_dev_attr(attr)->index - INTRUSION_ALARM_BASE;
unsigned long val;
u8 reg;
@@ -3425,19 +3577,19 @@ clear_caseopen(struct device *dev, struct device_attribute *attr,
* The CR registers are the same for all chips, and not all chips
* support clearing the caseopen status through "regular" registers.
*/
- ret = superio_enter(data->sioreg);
+ ret = sio_data->sio_enter(sio_data);
if (ret) {
count = ret;
goto error;
}
- superio_select(data->sioreg, NCT6775_LD_ACPI);
- reg = superio_inb(data->sioreg, NCT6775_REG_CR_CASEOPEN_CLR[nr]);
+ sio_data->sio_select(sio_data, NCT6775_LD_ACPI);
+ reg = sio_data->sio_inb(sio_data, NCT6775_REG_CR_CASEOPEN_CLR[nr]);
reg |= NCT6775_CR_CASEOPEN_CLR_MASK[nr];
- superio_outb(data->sioreg, NCT6775_REG_CR_CASEOPEN_CLR[nr], reg);
+ sio_data->sio_outb(sio_data, NCT6775_REG_CR_CASEOPEN_CLR[nr], reg);
reg &= ~NCT6775_CR_CASEOPEN_CLR_MASK[nr];
- superio_outb(data->sioreg, NCT6775_REG_CR_CASEOPEN_CLR[nr], reg);
- superio_exit(data->sioreg);
+ sio_data->sio_outb(sio_data, NCT6775_REG_CR_CASEOPEN_CLR[nr], reg);
+ sio_data->sio_exit(sio_data);
data->valid = false; /* Force cache refresh */
error:
@@ -3506,9 +3658,9 @@ static inline void nct6775_init_device(struct nct6775_data *data)
/* Start monitoring if needed */
if (data->REG_CONFIG) {
- tmp = nct6775_read_value(data, data->REG_CONFIG);
+ tmp = data->read_value(data, data->REG_CONFIG);
if (!(tmp & 0x01))
- nct6775_write_value(data, data->REG_CONFIG, tmp | 0x01);
+ data->write_value(data, data->REG_CONFIG, tmp | 0x01);
}
/* Enable temperature sensors if needed */
@@ -3517,18 +3669,18 @@ static inline void nct6775_init_device(struct nct6775_data *data)
continue;
if (!data->reg_temp_config[i])
continue;
- tmp = nct6775_read_value(data, data->reg_temp_config[i]);
+ tmp = data->read_value(data, data->reg_temp_config[i]);
if (tmp & 0x01)
- nct6775_write_value(data, data->reg_temp_config[i],
+ data->write_value(data, data->reg_temp_config[i],
tmp & 0xfe);
}
/* Enable VBAT monitoring if needed */
- tmp = nct6775_read_value(data, data->REG_VBAT);
+ tmp = data->read_value(data, data->REG_VBAT);
if (!(tmp & 0x01))
- nct6775_write_value(data, data->REG_VBAT, tmp | 0x01);
+ data->write_value(data, data->REG_VBAT, tmp | 0x01);
- diode = nct6775_read_value(data, data->REG_DIODE);
+ diode = data->read_value(data, data->REG_DIODE);
for (i = 0; i < data->temp_fixed_num; i++) {
if (!(data->have_temp_fixed & BIT(i)))
@@ -3542,29 +3694,28 @@ static inline void nct6775_init_device(struct nct6775_data *data)
}
static void
-nct6775_check_fan_inputs(struct nct6775_data *data)
+nct6775_check_fan_inputs(struct nct6775_data *data, struct nct6775_sio_data *sio_data)
{
bool fan3pin = false, fan4pin = false, fan4min = false;
bool fan5pin = false, fan6pin = false, fan7pin = false;
bool pwm3pin = false, pwm4pin = false, pwm5pin = false;
bool pwm6pin = false, pwm7pin = false;
- int sioreg = data->sioreg;
/* Store SIO_REG_ENABLE for use during resume */
- superio_select(sioreg, NCT6775_LD_HWM);
- data->sio_reg_enable = superio_inb(sioreg, SIO_REG_ENABLE);
+ sio_data->sio_select(sio_data, NCT6775_LD_HWM);
+ data->sio_reg_enable = sio_data->sio_inb(sio_data, SIO_REG_ENABLE);
/* fan4 and fan5 share some pins with the GPIO and serial flash */
if (data->kind == nct6775) {
- int cr2c = superio_inb(sioreg, 0x2c);
+ int cr2c = sio_data->sio_inb(sio_data, 0x2c);
fan3pin = cr2c & BIT(6);
pwm3pin = cr2c & BIT(7);
/* On NCT6775, fan4 shares pins with the fdc interface */
- fan4pin = !(superio_inb(sioreg, 0x2A) & 0x80);
+ fan4pin = !(sio_data->sio_inb(sio_data, 0x2A) & 0x80);
} else if (data->kind == nct6776) {
- bool gpok = superio_inb(sioreg, 0x27) & 0x80;
+ bool gpok = sio_data->sio_inb(sio_data, 0x27) & 0x80;
const char *board_vendor, *board_name;
board_vendor = dmi_get_system_info(DMI_BOARD_VENDOR);
@@ -3580,7 +3731,7 @@ nct6775_check_fan_inputs(struct nct6775_data *data)
if (!strcmp(board_name, "Z77 Pro4-M")) {
if ((data->sio_reg_enable & 0xe0) != 0xe0) {
data->sio_reg_enable |= 0xe0;
- superio_outb(sioreg, SIO_REG_ENABLE,
+ sio_data->sio_outb(sio_data, SIO_REG_ENABLE,
data->sio_reg_enable);
}
}
@@ -3589,32 +3740,32 @@ nct6775_check_fan_inputs(struct nct6775_data *data)
if (data->sio_reg_enable & 0x80)
fan3pin = gpok;
else
- fan3pin = !(superio_inb(sioreg, 0x24) & 0x40);
+ fan3pin = !(sio_data->sio_inb(sio_data, 0x24) & 0x40);
if (data->sio_reg_enable & 0x40)
fan4pin = gpok;
else
- fan4pin = superio_inb(sioreg, 0x1C) & 0x01;
+ fan4pin = sio_data->sio_inb(sio_data, 0x1C) & 0x01;
if (data->sio_reg_enable & 0x20)
fan5pin = gpok;
else
- fan5pin = superio_inb(sioreg, 0x1C) & 0x02;
+ fan5pin = sio_data->sio_inb(sio_data, 0x1C) & 0x02;
fan4min = fan4pin;
pwm3pin = fan3pin;
} else if (data->kind == nct6106) {
- int cr24 = superio_inb(sioreg, 0x24);
+ int cr24 = sio_data->sio_inb(sio_data, 0x24);
fan3pin = !(cr24 & 0x80);
pwm3pin = cr24 & 0x08;
} else if (data->kind == nct6116) {
- int cr1a = superio_inb(sioreg, 0x1a);
- int cr1b = superio_inb(sioreg, 0x1b);
- int cr24 = superio_inb(sioreg, 0x24);
- int cr2a = superio_inb(sioreg, 0x2a);
- int cr2b = superio_inb(sioreg, 0x2b);
- int cr2f = superio_inb(sioreg, 0x2f);
+ int cr1a = sio_data->sio_inb(sio_data, 0x1a);
+ int cr1b = sio_data->sio_inb(sio_data, 0x1b);
+ int cr24 = sio_data->sio_inb(sio_data, 0x24);
+ int cr2a = sio_data->sio_inb(sio_data, 0x2a);
+ int cr2b = sio_data->sio_inb(sio_data, 0x2b);
+ int cr2f = sio_data->sio_inb(sio_data, 0x2f);
fan3pin = !(cr2b & 0x10);
fan4pin = (cr2b & 0x80) || // pin 1(2)
@@ -3630,24 +3781,24 @@ nct6775_check_fan_inputs(struct nct6775_data *data)
* NCT6779D, NCT6791D, NCT6792D, NCT6793D, NCT6795D, NCT6796D,
* NCT6797D, NCT6798D
*/
- int cr1a = superio_inb(sioreg, 0x1a);
- int cr1b = superio_inb(sioreg, 0x1b);
- int cr1c = superio_inb(sioreg, 0x1c);
- int cr1d = superio_inb(sioreg, 0x1d);
- int cr2a = superio_inb(sioreg, 0x2a);
- int cr2b = superio_inb(sioreg, 0x2b);
- int cr2d = superio_inb(sioreg, 0x2d);
- int cr2f = superio_inb(sioreg, 0x2f);
+ int cr1a = sio_data->sio_inb(sio_data, 0x1a);
+ int cr1b = sio_data->sio_inb(sio_data, 0x1b);
+ int cr1c = sio_data->sio_inb(sio_data, 0x1c);
+ int cr1d = sio_data->sio_inb(sio_data, 0x1d);
+ int cr2a = sio_data->sio_inb(sio_data, 0x2a);
+ int cr2b = sio_data->sio_inb(sio_data, 0x2b);
+ int cr2d = sio_data->sio_inb(sio_data, 0x2d);
+ int cr2f = sio_data->sio_inb(sio_data, 0x2f);
bool dsw_en = cr2f & BIT(3);
bool ddr4_en = cr2f & BIT(4);
int cre0;
int creb;
int cred;
- superio_select(sioreg, NCT6775_LD_12);
- cre0 = superio_inb(sioreg, 0xe0);
- creb = superio_inb(sioreg, 0xeb);
- cred = superio_inb(sioreg, 0xed);
+ sio_data->sio_select(sio_data, NCT6775_LD_12);
+ cre0 = sio_data->sio_inb(sio_data, 0xe0);
+ creb = sio_data->sio_inb(sio_data, 0xeb);
+ cred = sio_data->sio_inb(sio_data, 0xed);
fan3pin = !(cr1c & BIT(5));
fan4pin = !(cr1c & BIT(6));
@@ -3774,7 +3925,7 @@ static void add_temp_sensors(struct nct6775_data *data, const u16 *regp,
if (!regp[i])
continue;
- src = nct6775_read_value(data, regp[i]);
+ src = data->read_value(data, regp[i]);
src &= 0x1f;
if (!src || (*mask & BIT(src)))
continue;
@@ -3782,7 +3933,7 @@ static void add_temp_sensors(struct nct6775_data *data, const u16 *regp,
continue;
index = __ffs(*available);
- nct6775_write_value(data, data->REG_TEMP_SOURCE[index], src);
+ data->write_value(data, data->REG_TEMP_SOURCE[index], src);
*available &= ~BIT(index);
*mask |= BIT(src);
}
@@ -3805,10 +3956,12 @@ static int nct6775_probe(struct platform_device *pdev)
struct device *hwmon_dev;
int num_attr_groups = 0;
- res = platform_get_resource(pdev, IORESOURCE_IO, 0);
- if (!devm_request_region(&pdev->dev, res->start, IOREGION_LENGTH,
- DRVNAME))
- return -EBUSY;
+ if (sio_data->access == access_direct) {
+ res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (!devm_request_region(&pdev->dev, res->start, IOREGION_LENGTH,
+ DRVNAME))
+ return -EBUSY;
+ }
data = devm_kzalloc(&pdev->dev, sizeof(struct nct6775_data),
GFP_KERNEL);
@@ -3817,7 +3970,16 @@ static int nct6775_probe(struct platform_device *pdev)
data->kind = sio_data->kind;
data->sioreg = sio_data->sioreg;
- data->addr = res->start;
+
+ if (sio_data->access == access_direct) {
+ data->addr = res->start;
+ data->read_value = nct6775_read_value;
+ data->write_value = nct6775_write_value;
+ } else {
+ data->read_value = nct6775_wmi_read_value;
+ data->write_value = nct6775_wmi_write_value;
+ }
+
mutex_init(&data->update_lock);
data->name = nct6775_device_names[data->kind];
data->bank = 0xff; /* Force initial bank selection */
@@ -4337,7 +4499,7 @@ static int nct6775_probe(struct platform_device *pdev)
if (reg_temp[i] == 0)
continue;
- src = nct6775_read_value(data, data->REG_TEMP_SOURCE[i]) & 0x1f;
+ src = data->read_value(data, data->REG_TEMP_SOURCE[i]) & 0x1f;
if (!src || (mask & BIT(src)))
available |= BIT(i);
@@ -4357,7 +4519,7 @@ static int nct6775_probe(struct platform_device *pdev)
if (reg_temp[i] == 0)
continue;
- src = nct6775_read_value(data, data->REG_TEMP_SOURCE[i]) & 0x1f;
+ src = data->read_value(data, data->REG_TEMP_SOURCE[i]) & 0x1f;
if (!src || (mask & BIT(src)))
continue;
@@ -4417,7 +4579,7 @@ static int nct6775_probe(struct platform_device *pdev)
if (reg_temp_mon[i] == 0)
continue;
- src = nct6775_read_value(data, data->REG_TEMP_SEL[i]) & 0x1f;
+ src = data->read_value(data, data->REG_TEMP_SEL[i]) & 0x1f;
if (!src)
continue;
@@ -4502,11 +4664,11 @@ static int nct6775_probe(struct platform_device *pdev)
/* Initialize the chip */
nct6775_init_device(data);
- err = superio_enter(sio_data->sioreg);
+ err = sio_data->sio_enter(sio_data);
if (err)
return err;
- cr2a = superio_inb(sio_data->sioreg, 0x2a);
+ cr2a = sio_data->sio_inb(sio_data, 0x2a);
switch (data->kind) {
case nct6775:
data->have_vid = (cr2a & 0x40);
@@ -4532,17 +4694,17 @@ static int nct6775_probe(struct platform_device *pdev)
* We can get the VID input values directly at logical device D 0xe3.
*/
if (data->have_vid) {
- superio_select(sio_data->sioreg, NCT6775_LD_VID);
- data->vid = superio_inb(sio_data->sioreg, 0xe3);
+ sio_data->sio_select(sio_data, NCT6775_LD_VID);
+ data->vid = sio_data->sio_inb(sio_data, 0xe3);
data->vrm = vid_which_vrm();
}
if (fan_debounce) {
u8 tmp;
- superio_select(sio_data->sioreg, NCT6775_LD_HWM);
- tmp = superio_inb(sio_data->sioreg,
- NCT6775_REG_CR_FAN_DEBOUNCE);
+ sio_data->sio_select(sio_data, NCT6775_LD_HWM);
+ tmp = sio_data->sio_inb(sio_data,
+ NCT6775_REG_CR_FAN_DEBOUNCE);
switch (data->kind) {
case nct6106:
case nct6116:
@@ -4565,15 +4727,15 @@ static int nct6775_probe(struct platform_device *pdev)
tmp |= 0x7e;
break;
}
- superio_outb(sio_data->sioreg, NCT6775_REG_CR_FAN_DEBOUNCE,
+ sio_data->sio_outb(sio_data, NCT6775_REG_CR_FAN_DEBOUNCE,
tmp);
dev_info(&pdev->dev, "Enabled fan debounce for chip %s\n",
data->name);
}
- nct6775_check_fan_inputs(data);
+ nct6775_check_fan_inputs(data, sio_data);
- superio_exit(sio_data->sioreg);
+ sio_data->sio_exit(sio_data);
/* Read fan clock dividers immediately */
nct6775_init_fan_common(dev, data);
@@ -4613,15 +4775,15 @@ static int nct6775_probe(struct platform_device *pdev)
return PTR_ERR_OR_ZERO(hwmon_dev);
}
-static void nct6791_enable_io_mapping(int sioaddr)
+static void nct6791_enable_io_mapping(struct nct6775_sio_data *sio_data)
{
int val;
- val = superio_inb(sioaddr, NCT6791_REG_HM_IO_SPACE_LOCK_ENABLE);
+ val = sio_data->sio_inb(sio_data, NCT6791_REG_HM_IO_SPACE_LOCK_ENABLE);
if (val & 0x10) {
pr_info("Enabling hardware monitor logical device mappings.\n");
- superio_outb(sioaddr, NCT6791_REG_HM_IO_SPACE_LOCK_ENABLE,
- val & ~0x10);
+ sio_data->sio_outb(sio_data, NCT6791_REG_HM_IO_SPACE_LOCK_ENABLE,
+ val & ~0x10);
}
}
@@ -4630,10 +4792,10 @@ static int __maybe_unused nct6775_suspend(struct device *dev)
struct nct6775_data *data = nct6775_update_device(dev);
mutex_lock(&data->update_lock);
- data->vbat = nct6775_read_value(data, data->REG_VBAT);
+ data->vbat = data->read_value(data, data->REG_VBAT);
if (data->kind == nct6775) {
- data->fandiv1 = nct6775_read_value(data, NCT6775_REG_FANDIV1);
- data->fandiv2 = nct6775_read_value(data, NCT6775_REG_FANDIV2);
+ data->fandiv1 = data->read_value(data, NCT6775_REG_FANDIV1);
+ data->fandiv2 = data->read_value(data, NCT6775_REG_FANDIV2);
}
mutex_unlock(&data->update_lock);
@@ -4643,47 +4805,47 @@ static int __maybe_unused nct6775_suspend(struct device *dev)
static int __maybe_unused nct6775_resume(struct device *dev)
{
struct nct6775_data *data = dev_get_drvdata(dev);
- int sioreg = data->sioreg;
+ struct nct6775_sio_data *sio_data = dev_get_platdata(dev);
int i, j, err = 0;
u8 reg;
mutex_lock(&data->update_lock);
data->bank = 0xff; /* Force initial bank selection */
- err = superio_enter(sioreg);
+ err = sio_data->sio_enter(sio_data);
if (err)
goto abort;
- superio_select(sioreg, NCT6775_LD_HWM);
- reg = superio_inb(sioreg, SIO_REG_ENABLE);
+ sio_data->sio_select(sio_data, NCT6775_LD_HWM);
+ reg = sio_data->sio_inb(sio_data, SIO_REG_ENABLE);
if (reg != data->sio_reg_enable)
- superio_outb(sioreg, SIO_REG_ENABLE, data->sio_reg_enable);
+ sio_data->sio_outb(sio_data, SIO_REG_ENABLE, data->sio_reg_enable);
if (data->kind == nct6791 || data->kind == nct6792 ||
data->kind == nct6793 || data->kind == nct6795 ||
data->kind == nct6796 || data->kind == nct6797 ||
data->kind == nct6798)
- nct6791_enable_io_mapping(sioreg);
+ nct6791_enable_io_mapping(sio_data);
- superio_exit(sioreg);
+ sio_data->sio_exit(sio_data);
/* Restore limits */
for (i = 0; i < data->in_num; i++) {
if (!(data->have_in & BIT(i)))
continue;
- nct6775_write_value(data, data->REG_IN_MINMAX[0][i],
- data->in[i][1]);
- nct6775_write_value(data, data->REG_IN_MINMAX[1][i],
- data->in[i][2]);
+ data->write_value(data, data->REG_IN_MINMAX[0][i],
+ data->in[i][1]);
+ data->write_value(data, data->REG_IN_MINMAX[1][i],
+ data->in[i][2]);
}
for (i = 0; i < ARRAY_SIZE(data->fan_min); i++) {
if (!(data->has_fan_min & BIT(i)))
continue;
- nct6775_write_value(data, data->REG_FAN_MIN[i],
- data->fan_min[i]);
+ data->write_value(data, data->REG_FAN_MIN[i],
+ data->fan_min[i]);
}
for (i = 0; i < NUM_TEMP; i++) {
@@ -4697,10 +4859,10 @@ static int __maybe_unused nct6775_resume(struct device *dev)
}
/* Restore other settings */
- nct6775_write_value(data, data->REG_VBAT, data->vbat);
+ data->write_value(data, data->REG_VBAT, data->vbat);
if (data->kind == nct6775) {
- nct6775_write_value(data, NCT6775_REG_FANDIV1, data->fandiv1);
- nct6775_write_value(data, NCT6775_REG_FANDIV2, data->fandiv2);
+ data->write_value(data, NCT6775_REG_FANDIV1, data->fandiv1);
+ data->write_value(data, NCT6775_REG_FANDIV2, data->fandiv2);
}
abort:
@@ -4728,12 +4890,15 @@ static int __init nct6775_find(int sioaddr, struct nct6775_sio_data *sio_data)
int err;
int addr;
- err = superio_enter(sioaddr);
+ sio_data->access = access_direct;
+ sio_data->sioreg = sioaddr;
+
+ err = sio_data->sio_enter(sio_data);
if (err)
return err;
- val = (superio_inb(sioaddr, SIO_REG_DEVID) << 8) |
- superio_inb(sioaddr, SIO_REG_DEVID + 1);
+ val = (sio_data->sio_inb(sio_data, SIO_REG_DEVID) << 8) |
+ sio_data->sio_inb(sio_data, SIO_REG_DEVID + 1);
if (force_id && val != 0xffff)
val = force_id;
@@ -4777,38 +4942,37 @@ static int __init nct6775_find(int sioaddr, struct nct6775_sio_data *sio_data)
default:
if (val != 0xffff)
pr_debug("unsupported chip ID: 0x%04x\n", val);
- superio_exit(sioaddr);
+ sio_data->sio_exit(sio_data);
return -ENODEV;
}
/* We have a known chip, find the HWM I/O address */
- superio_select(sioaddr, NCT6775_LD_HWM);
- val = (superio_inb(sioaddr, SIO_REG_ADDR) << 8)
- | superio_inb(sioaddr, SIO_REG_ADDR + 1);
+ sio_data->sio_select(sio_data, NCT6775_LD_HWM);
+ val = (sio_data->sio_inb(sio_data, SIO_REG_ADDR) << 8)
+ | sio_data->sio_inb(sio_data, SIO_REG_ADDR + 1);
addr = val & IOREGION_ALIGNMENT;
if (addr == 0) {
pr_err("Refusing to enable a Super-I/O device with a base I/O port 0\n");
- superio_exit(sioaddr);
+ sio_data->sio_exit(sio_data);
return -ENODEV;
}
/* Activate logical device if needed */
- val = superio_inb(sioaddr, SIO_REG_ENABLE);
+ val = sio_data->sio_inb(sio_data, SIO_REG_ENABLE);
if (!(val & 0x01)) {
pr_warn("Forcibly enabling Super-I/O. Sensor is probably unusable.\n");
- superio_outb(sioaddr, SIO_REG_ENABLE, val | 0x01);
+ sio_data->sio_outb(sio_data, SIO_REG_ENABLE, val | 0x01);
}
if (sio_data->kind == nct6791 || sio_data->kind == nct6792 ||
sio_data->kind == nct6793 || sio_data->kind == nct6795 ||
sio_data->kind == nct6796 || sio_data->kind == nct6797 ||
sio_data->kind == nct6798)
- nct6791_enable_io_mapping(sioaddr);
+ nct6791_enable_io_mapping(sio_data);
- superio_exit(sioaddr);
+ sio_data->sio_exit(sio_data);
pr_info("Found %s or compatible chip at %#x:%#x\n",
nct6775_sio_names[sio_data->kind], sioaddr, addr);
- sio_data->sioreg = sioaddr;
return addr;
}
@@ -4821,6 +4985,34 @@ static int __init nct6775_find(int sioaddr, struct nct6775_sio_data *sio_data)
*/
static struct platform_device *pdev[2];
+static const char * const asus_wmi_boards[] = {
+ "ProArt X570-CREATOR WIFI",
+ "Pro WS X570-ACE",
+ "PRIME B360-PLUS",
+ "PRIME B460-PLUS",
+ "PRIME X570-PRO",
+ "ROG CROSSHAIR VIII DARK HERO",
+ "ROG CROSSHAIR VIII FORMULA",
+ "ROG CROSSHAIR VIII HERO",
+ "ROG CROSSHAIR VIII IMPACT",
+ "ROG STRIX B550-E GAMING",
+ "ROG STRIX B550-F GAMING",
+ "ROG STRIX B550-F GAMING (WI-FI)",
+ "ROG STRIX B550-I GAMING",
+ "ROG STRIX X570-F GAMING",
+ "ROG STRIX Z390-E GAMING",
+ "ROG STRIX Z490-I GAMING",
+ "TUF GAMING B550M-PLUS",
+ "TUF GAMING B550M-PLUS (WI-FI)",
+ "TUF GAMING B550-PLUS",
+ "TUF GAMING B550-PRO",
+ "TUF GAMING X570-PLUS",
+ "TUF GAMING X570-PLUS (WI-FI)",
+ "TUF GAMING X570-PRO (WI-FI)",
+ "TUF GAMING Z490-PLUS",
+ "TUF GAMING Z490-PLUS (WI-FI)",
+};
+
static int __init sensors_nct6775_init(void)
{
int i, err;
@@ -4829,11 +5021,32 @@ static int __init sensors_nct6775_init(void)
struct resource res;
struct nct6775_sio_data sio_data;
int sioaddr[2] = { 0x2e, 0x4e };
+ enum sensor_access access = access_direct;
+ const char *board_vendor, *board_name;
+ u8 tmp;
err = platform_driver_register(&nct6775_driver);
if (err)
return err;
+ board_vendor = dmi_get_system_info(DMI_BOARD_VENDOR);
+ board_name = dmi_get_system_info(DMI_BOARD_NAME);
+
+ if (board_name && board_vendor &&
+ !strcmp(board_vendor, "ASUSTeK COMPUTER INC.")) {
+ err = match_string(asus_wmi_boards, ARRAY_SIZE(asus_wmi_boards),
+ board_name);
+ if (err >= 0) {
+ /* if reading chip id via WMI succeeds, use WMI */
+ if (!nct6775_asuswmi_read(0, NCT6775_PORT_CHIPID, &tmp)) {
+ pr_info("Using Asus WMI to access %#x chip.\n", tmp);
+ access = access_asuswmi;
+ } else {
+ pr_err("Can't read ChipID by Asus WMI.\n");
+ }
+ }
+ }
+
/*
* initialize sio_data->kind and sio_data->sioreg.
*
@@ -4842,12 +5055,28 @@ static int __init sensors_nct6775_init(void)
* nct6775 hardware monitor, and call probe()
*/
for (i = 0; i < ARRAY_SIZE(pdev); i++) {
+ sio_data.sio_outb = superio_outb;
+ sio_data.sio_inb = superio_inb;
+ sio_data.sio_select = superio_select;
+ sio_data.sio_enter = superio_enter;
+ sio_data.sio_exit = superio_exit;
+
address = nct6775_find(sioaddr[i], &sio_data);
if (address <= 0)
continue;
found = true;
+ sio_data.access = access;
+
+ if (access == access_asuswmi) {
+ sio_data.sio_outb = superio_wmi_outb;
+ sio_data.sio_inb = superio_wmi_inb;
+ sio_data.sio_select = superio_wmi_select;
+ sio_data.sio_enter = superio_wmi_enter;
+ sio_data.sio_exit = superio_wmi_exit;
+ }
+
pdev[i] = platform_device_alloc(DRVNAME, address);
if (!pdev[i]) {
err = -ENOMEM;
@@ -4859,23 +5088,25 @@ static int __init sensors_nct6775_init(void)
if (err)
goto exit_device_put;
- memset(&res, 0, sizeof(res));
- res.name = DRVNAME;
- res.start = address + IOREGION_OFFSET;
- res.end = address + IOREGION_OFFSET + IOREGION_LENGTH - 1;
- res.flags = IORESOURCE_IO;
+ if (sio_data.access == access_direct) {
+ memset(&res, 0, sizeof(res));
+ res.name = DRVNAME;
+ res.start = address + IOREGION_OFFSET;
+ res.end = address + IOREGION_OFFSET + IOREGION_LENGTH - 1;
+ res.flags = IORESOURCE_IO;
+
+ err = acpi_check_resource_conflict(&res);
+ if (err) {
+ platform_device_put(pdev[i]);
+ pdev[i] = NULL;
+ continue;
+ }
- err = acpi_check_resource_conflict(&res);
- if (err) {
- platform_device_put(pdev[i]);
- pdev[i] = NULL;
- continue;
+ err = platform_device_add_resources(pdev[i], &res, 1);
+ if (err)
+ goto exit_device_put;
}
- err = platform_device_add_resources(pdev[i], &res, 1);
- if (err)
- goto exit_device_put;
-
/* platform_device_add calls probe() */
err = platform_device_add(pdev[i]);
if (err)
diff --git a/drivers/hwmon/nct7802.c b/drivers/hwmon/nct7802.c
index 604af2f6103a..d1eeef02b6dc 100644
--- a/drivers/hwmon/nct7802.c
+++ b/drivers/hwmon/nct7802.c
@@ -52,6 +52,23 @@ static const u8 REG_VOLTAGE_LIMIT_MSB_SHIFT[2][5] = {
#define REG_VERSION_ID 0xff
/*
+ * Resistance temperature detector (RTD) modes according to 7.2.32 Mode
+ * Selection Register
+ */
+#define RTD_MODE_CURRENT 0x1
+#define RTD_MODE_THERMISTOR 0x2
+#define RTD_MODE_VOLTAGE 0x3
+
+#define MODE_RTD_MASK 0x3
+#define MODE_LTD_EN 0x40
+
+/*
+ * Bit offset for sensors modes in REG_MODE.
+ * Valid for index 0..2, indicating RTD1..3.
+ */
+#define MODE_BIT_OFFSET_RTD(index) ((index) * 2)
+
+/*
* Data structures and manipulation thereof
*/
@@ -1038,7 +1055,114 @@ static const struct regmap_config nct7802_regmap_config = {
.volatile_reg = nct7802_regmap_is_volatile,
};
-static int nct7802_init_chip(struct nct7802_data *data)
+static int nct7802_get_channel_config(struct device *dev,
+ struct device_node *node, u8 *mode_mask,
+ u8 *mode_val)
+{
+ u32 reg;
+ const char *type_str, *md_str;
+ u8 md;
+
+ if (!node->name || of_node_cmp(node->name, "channel"))
+ return 0;
+
+ if (of_property_read_u32(node, "reg", &reg)) {
+ dev_err(dev, "Could not read reg value for '%s'\n",
+ node->full_name);
+ return -EINVAL;
+ }
+
+ if (reg > 3) {
+ dev_err(dev, "Invalid reg (%u) in '%s'\n", reg,
+ node->full_name);
+ return -EINVAL;
+ }
+
+ if (reg == 0) {
+ if (!of_device_is_available(node))
+ *mode_val &= ~MODE_LTD_EN;
+ else
+ *mode_val |= MODE_LTD_EN;
+ *mode_mask |= MODE_LTD_EN;
+ return 0;
+ }
+
+ /* At this point we have reg >= 1 && reg <= 3 */
+
+ if (!of_device_is_available(node)) {
+ *mode_val &= ~(MODE_RTD_MASK << MODE_BIT_OFFSET_RTD(reg - 1));
+ *mode_mask |= MODE_RTD_MASK << MODE_BIT_OFFSET_RTD(reg - 1);
+ return 0;
+ }
+
+ if (of_property_read_string(node, "sensor-type", &type_str)) {
+ dev_err(dev, "No type for '%s'\n", node->full_name);
+ return -EINVAL;
+ }
+
+ if (!strcmp(type_str, "voltage")) {
+ *mode_val |= (RTD_MODE_VOLTAGE & MODE_RTD_MASK)
+ << MODE_BIT_OFFSET_RTD(reg - 1);
+ *mode_mask |= MODE_RTD_MASK << MODE_BIT_OFFSET_RTD(reg - 1);
+ return 0;
+ }
+
+ if (strcmp(type_str, "temperature")) {
+ dev_err(dev, "Invalid type '%s' for '%s'\n", type_str,
+ node->full_name);
+ return -EINVAL;
+ }
+
+ if (reg == 3) {
+ /* RTD3 only supports thermistor mode */
+ md = RTD_MODE_THERMISTOR;
+ } else {
+ if (of_property_read_string(node, "temperature-mode",
+ &md_str)) {
+ dev_err(dev, "No mode for '%s'\n", node->full_name);
+ return -EINVAL;
+ }
+
+ if (!strcmp(md_str, "thermal-diode"))
+ md = RTD_MODE_CURRENT;
+ else if (!strcmp(md_str, "thermistor"))
+ md = RTD_MODE_THERMISTOR;
+ else {
+ dev_err(dev, "Invalid mode '%s' for '%s'\n", md_str,
+ node->full_name);
+ return -EINVAL;
+ }
+ }
+
+ *mode_val |= (md & MODE_RTD_MASK) << MODE_BIT_OFFSET_RTD(reg - 1);
+ *mode_mask |= MODE_RTD_MASK << MODE_BIT_OFFSET_RTD(reg - 1);
+
+ return 0;
+}
+
+static int nct7802_configure_channels(struct device *dev,
+ struct nct7802_data *data)
+{
+ /* Enable local temperature sensor by default */
+ u8 mode_mask = MODE_LTD_EN, mode_val = MODE_LTD_EN;
+ struct device_node *node;
+ int err;
+
+ if (dev->of_node) {
+ for_each_child_of_node(dev->of_node, node) {
+ err = nct7802_get_channel_config(dev, node, &mode_mask,
+ &mode_val);
+ if (err) {
+ of_node_put(node);
+ return err;
+ }
+ }
+ }
+
+ return regmap_update_bits(data->regmap, REG_MODE, mode_mask, mode_val);
+}
+
+static int nct7802_init_chip(struct device *dev, struct nct7802_data *data)
{
int err;
@@ -1047,8 +1171,7 @@ static int nct7802_init_chip(struct nct7802_data *data)
if (err)
return err;
- /* Enable local temperature sensor */
- err = regmap_update_bits(data->regmap, REG_MODE, 0x40, 0x40);
+ err = nct7802_configure_channels(dev, data);
if (err)
return err;
@@ -1074,7 +1197,7 @@ static int nct7802_probe(struct i2c_client *client)
mutex_init(&data->access_lock);
mutex_init(&data->in_alarm_lock);
- ret = nct7802_init_chip(data);
+ ret = nct7802_init_chip(dev, data);
if (ret < 0)
return ret;
diff --git a/drivers/hwmon/pc87360.c b/drivers/hwmon/pc87360.c
index 6a9ba23cd302..0828436a1f6c 100644
--- a/drivers/hwmon/pc87360.c
+++ b/drivers/hwmon/pc87360.c
@@ -178,7 +178,7 @@ struct pc87360_data {
struct device *hwmon_dev;
struct mutex lock;
struct mutex update_lock;
- char valid; /* !=0 if following fields are valid */
+ bool valid; /* true if following fields are valid */
unsigned long last_updated; /* In jiffies */
int address[3];
@@ -1673,7 +1673,7 @@ static struct pc87360_data *pc87360_update_device(struct device *dev)
}
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/pmbus/ibm-cffps.c b/drivers/hwmon/pmbus/ibm-cffps.c
index 53f7d1418bc9..e3294a1a54bb 100644
--- a/drivers/hwmon/pmbus/ibm-cffps.c
+++ b/drivers/hwmon/pmbus/ibm-cffps.c
@@ -18,6 +18,7 @@
#include "pmbus.h"
+#define CFFPS_MFG_ID_CMD 0x99
#define CFFPS_FRU_CMD 0x9A
#define CFFPS_PN_CMD 0x9B
#define CFFPS_HEADER_CMD 0x9C
@@ -34,7 +35,7 @@
#define CFFPS_INPUT_HISTORY_SIZE 100
#define CFFPS_CCIN_REVISION GENMASK(7, 0)
-#define CFFPS_CCIN_REVISION_LEGACY 0xde
+#define CFFPS_CCIN_REVISION_LEGACY 0xde
#define CFFPS_CCIN_VERSION GENMASK(15, 8)
#define CFFPS_CCIN_VERSION_1 0x2b
#define CFFPS_CCIN_VERSION_2 0x2e
@@ -57,6 +58,7 @@
enum {
CFFPS_DEBUGFS_INPUT_HISTORY = 0,
+ CFFPS_DEBUGFS_MFG_ID,
CFFPS_DEBUGFS_FRU,
CFFPS_DEBUGFS_PN,
CFFPS_DEBUGFS_HEADER,
@@ -158,6 +160,9 @@ static ssize_t ibm_cffps_debugfs_read(struct file *file, char __user *buf,
switch (idx) {
case CFFPS_DEBUGFS_INPUT_HISTORY:
return ibm_cffps_read_input_history(psu, buf, count, ppos);
+ case CFFPS_DEBUGFS_MFG_ID:
+ cmd = CFFPS_MFG_ID_CMD;
+ break;
case CFFPS_DEBUGFS_FRU:
cmd = CFFPS_FRU_CMD;
break;
@@ -503,16 +508,27 @@ static int ibm_cffps_probe(struct i2c_client *client)
u16 ccin_revision = 0;
u16 ccin_version = CFFPS_CCIN_VERSION_1;
int ccin = i2c_smbus_read_word_swapped(client, CFFPS_CCIN_CMD);
+ char mfg_id[I2C_SMBUS_BLOCK_MAX + 2] = { 0 };
if (ccin > 0) {
ccin_revision = FIELD_GET(CFFPS_CCIN_REVISION, ccin);
ccin_version = FIELD_GET(CFFPS_CCIN_VERSION, ccin);
}
+ rc = i2c_smbus_read_block_data(client, PMBUS_MFR_ID, mfg_id);
+ if (rc < 0) {
+ dev_err(&client->dev, "Failed to read Manufacturer ID\n");
+ return rc;
+ }
+
switch (ccin_version) {
default:
case CFFPS_CCIN_VERSION_1:
- vs = cffps1;
+ if ((strncmp(mfg_id, "ACBE", 4) == 0) ||
+ (strncmp(mfg_id, "ARTE", 4) == 0))
+ vs = cffps1;
+ else
+ vs = cffps2;
break;
case CFFPS_CCIN_VERSION_2:
vs = cffps2;
@@ -564,6 +580,9 @@ static int ibm_cffps_probe(struct i2c_client *client)
debugfs_create_file("input_history", 0444, ibm_cffps_dir,
&psu->debugfs_entries[CFFPS_DEBUGFS_INPUT_HISTORY],
&ibm_cffps_fops);
+ debugfs_create_file("mfg_id", 0444, ibm_cffps_dir,
+ &psu->debugfs_entries[CFFPS_DEBUGFS_MFG_ID],
+ &ibm_cffps_fops);
debugfs_create_file("fru", 0444, ibm_cffps_dir,
&psu->debugfs_entries[CFFPS_DEBUGFS_FRU],
&ibm_cffps_fops);
diff --git a/drivers/hwmon/pmbus/lm25066.c b/drivers/hwmon/pmbus/lm25066.c
index d209e0afc2ca..8402b41520eb 100644
--- a/drivers/hwmon/pmbus/lm25066.c
+++ b/drivers/hwmon/pmbus/lm25066.c
@@ -14,6 +14,7 @@
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/log2.h>
+#include <linux/of_device.h>
#include "pmbus.h"
enum chips { lm25056, lm25066, lm5064, lm5066, lm5066i };
@@ -51,26 +52,31 @@ struct __coeff {
#define PSC_CURRENT_IN_L (PSC_NUM_CLASSES)
#define PSC_POWER_L (PSC_NUM_CLASSES + 1)
-static struct __coeff lm25066_coeff[6][PSC_NUM_CLASSES + 2] = {
+static const struct __coeff lm25066_coeff[][PSC_NUM_CLASSES + 2] = {
[lm25056] = {
[PSC_VOLTAGE_IN] = {
.m = 16296,
+ .b = 1343,
.R = -2,
},
[PSC_CURRENT_IN] = {
.m = 13797,
+ .b = -1833,
.R = -2,
},
[PSC_CURRENT_IN_L] = {
.m = 6726,
+ .b = -537,
.R = -2,
},
[PSC_POWER] = {
.m = 5501,
+ .b = -2908,
.R = -3,
},
[PSC_POWER_L] = {
.m = 26882,
+ .b = -5646,
.R = -4,
},
[PSC_TEMPERATURE] = {
@@ -82,26 +88,32 @@ static struct __coeff lm25066_coeff[6][PSC_NUM_CLASSES + 2] = {
[lm25066] = {
[PSC_VOLTAGE_IN] = {
.m = 22070,
+ .b = -1800,
.R = -2,
},
[PSC_VOLTAGE_OUT] = {
.m = 22070,
+ .b = -1800,
.R = -2,
},
[PSC_CURRENT_IN] = {
.m = 13661,
+ .b = -5200,
.R = -2,
},
[PSC_CURRENT_IN_L] = {
- .m = 6852,
+ .m = 6854,
+ .b = -3100,
.R = -2,
},
[PSC_POWER] = {
.m = 736,
+ .b = -3300,
.R = -2,
},
[PSC_POWER_L] = {
.m = 369,
+ .b = -1900,
.R = -2,
},
[PSC_TEMPERATURE] = {
@@ -111,26 +123,32 @@ static struct __coeff lm25066_coeff[6][PSC_NUM_CLASSES + 2] = {
[lm5064] = {
[PSC_VOLTAGE_IN] = {
.m = 4611,
+ .b = -642,
.R = -2,
},
[PSC_VOLTAGE_OUT] = {
.m = 4621,
+ .b = 423,
.R = -2,
},
[PSC_CURRENT_IN] = {
.m = 10742,
+ .b = 1552,
.R = -2,
},
[PSC_CURRENT_IN_L] = {
.m = 5456,
+ .b = 2118,
.R = -2,
},
[PSC_POWER] = {
.m = 1204,
+ .b = 8524,
.R = -3,
},
[PSC_POWER_L] = {
.m = 612,
+ .b = 11202,
.R = -3,
},
[PSC_TEMPERATURE] = {
@@ -140,26 +158,32 @@ static struct __coeff lm25066_coeff[6][PSC_NUM_CLASSES + 2] = {
[lm5066] = {
[PSC_VOLTAGE_IN] = {
.m = 4587,
+ .b = -1200,
.R = -2,
},
[PSC_VOLTAGE_OUT] = {
.m = 4587,
+ .b = -2400,
.R = -2,
},
[PSC_CURRENT_IN] = {
.m = 10753,
+ .b = -1200,
.R = -2,
},
[PSC_CURRENT_IN_L] = {
.m = 5405,
+ .b = -600,
.R = -2,
},
[PSC_POWER] = {
.m = 1204,
+ .b = -6000,
.R = -3,
},
[PSC_POWER_L] = {
.m = 605,
+ .b = -8000,
.R = -3,
},
[PSC_TEMPERATURE] = {
@@ -211,8 +235,6 @@ struct lm25066_data {
#define to_lm25066_data(x) container_of(x, struct lm25066_data, info)
-static const struct i2c_device_id lm25066_id[];
-
static int lm25066_read_word_data(struct i2c_client *client, int page,
int phase, int reg)
{
@@ -413,12 +435,35 @@ static int lm25066_write_word_data(struct i2c_client *client, int page, int reg,
return ret;
}
+static const struct i2c_device_id lm25066_id[] = {
+ {"lm25056", lm25056},
+ {"lm25066", lm25066},
+ {"lm5064", lm5064},
+ {"lm5066", lm5066},
+ {"lm5066i", lm5066i},
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, lm25066_id);
+
+static const struct of_device_id __maybe_unused lm25066_of_match[] = {
+ { .compatible = "ti,lm25056", .data = (void *)lm25056, },
+ { .compatible = "ti,lm25066", .data = (void *)lm25066, },
+ { .compatible = "ti,lm5064", .data = (void *)lm5064, },
+ { .compatible = "ti,lm5066", .data = (void *)lm5066, },
+ { .compatible = "ti,lm5066i", .data = (void *)lm5066i, },
+ { },
+};
+MODULE_DEVICE_TABLE(of, lm25066_of_match);
+
static int lm25066_probe(struct i2c_client *client)
{
int config;
+ u32 shunt;
struct lm25066_data *data;
struct pmbus_driver_info *info;
- struct __coeff *coeff;
+ const struct __coeff *coeff;
+ const struct of_device_id *of_id;
+ const struct i2c_device_id *i2c_id;
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_READ_BYTE_DATA))
@@ -433,7 +478,14 @@ static int lm25066_probe(struct i2c_client *client)
if (config < 0)
return config;
- data->id = i2c_match_id(lm25066_id, client)->driver_data;
+ i2c_id = i2c_match_id(lm25066_id, client);
+
+ of_id = of_match_device(lm25066_of_match, &client->dev);
+ if (of_id && (unsigned long)of_id->data != i2c_id->driver_data)
+ dev_notice(&client->dev, "Device mismatch: %s in device tree, %s detected\n",
+ of_id->name, i2c_id->name);
+
+ data->id = i2c_id->driver_data;
info = &data->info;
info->pages = 1;
@@ -483,25 +535,25 @@ static int lm25066_probe(struct i2c_client *client)
info->b[PSC_POWER] = coeff[PSC_POWER].b;
}
- return pmbus_do_probe(client, info);
-}
+ /*
+ * Values in the TI datasheets are normalized for a 1mOhm sense
+ * resistor; assume that unless DT specifies a value explicitly.
+ */
+ if (of_property_read_u32(client->dev.of_node, "shunt-resistor-micro-ohms", &shunt))
+ shunt = 1000;
-static const struct i2c_device_id lm25066_id[] = {
- {"lm25056", lm25056},
- {"lm25066", lm25066},
- {"lm5064", lm5064},
- {"lm5066", lm5066},
- {"lm5066i", lm5066i},
- { }
-};
+ info->m[PSC_CURRENT_IN] = info->m[PSC_CURRENT_IN] * shunt / 1000;
+ info->m[PSC_POWER] = info->m[PSC_POWER] * shunt / 1000;
-MODULE_DEVICE_TABLE(i2c, lm25066_id);
+ return pmbus_do_probe(client, info);
+}
/* This is the driver that will be inserted */
static struct i2c_driver lm25066_driver = {
.driver = {
.name = "lm25066",
- },
+ .of_match_table = of_match_ptr(lm25066_of_match),
+ },
.probe_new = lm25066_probe,
.id_table = lm25066_id,
};
diff --git a/drivers/hwmon/raspberrypi-hwmon.c b/drivers/hwmon/raspberrypi-hwmon.c
index 805d396aa81b..573f53d52912 100644
--- a/drivers/hwmon/raspberrypi-hwmon.c
+++ b/drivers/hwmon/raspberrypi-hwmon.c
@@ -53,7 +53,7 @@ static void rpi_firmware_get_throttled(struct rpi_hwmon_data *data)
else
dev_info(data->hwmon_dev, "Voltage normalised\n");
- sysfs_notify(&data->hwmon_dev->kobj, NULL, "in0_lcrit_alarm");
+ hwmon_notify_event(data->hwmon_dev, hwmon_in, hwmon_in_lcrit_alarm, 0);
}
static void get_values_poll(struct work_struct *work)
diff --git a/drivers/hwmon/sch5636.c b/drivers/hwmon/sch5636.c
index a5cd4de36575..39ff1c9b1df5 100644
--- a/drivers/hwmon/sch5636.c
+++ b/drivers/hwmon/sch5636.c
@@ -56,7 +56,7 @@ struct sch5636_data {
struct device *hwmon_dev;
struct mutex update_lock;
- char valid; /* !=0 if following fields are valid */
+ bool valid; /* true if following fields are valid */
unsigned long last_updated; /* In jiffies */
u8 in[SCH5636_NO_INS];
u8 temp_val[SCH5636_NO_TEMPS];
@@ -140,7 +140,7 @@ static struct sch5636_data *sch5636_update_device(struct device *dev)
}
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
abort:
mutex_unlock(&data->update_lock);
return ret;
diff --git a/drivers/hwmon/sht21.c b/drivers/hwmon/sht21.c
index 7d18ce5d3839..e23dbf287233 100644
--- a/drivers/hwmon/sht21.c
+++ b/drivers/hwmon/sht21.c
@@ -41,7 +41,7 @@ struct sht21 {
unsigned long last_update;
int temperature;
int humidity;
- char valid;
+ bool valid;
char eic[18];
};
@@ -105,7 +105,7 @@ static int sht21_update_measurements(struct device *dev)
goto out;
sht21->humidity = sht21_rh_ticks_to_per_cent_mille(ret);
sht21->last_update = jiffies;
- sht21->valid = 1;
+ sht21->valid = true;
}
out:
mutex_unlock(&sht21->lock);
diff --git a/drivers/hwmon/sis5595.c b/drivers/hwmon/sis5595.c
index 0c6741f949f5..018cb5a7651f 100644
--- a/drivers/hwmon/sis5595.c
+++ b/drivers/hwmon/sis5595.c
@@ -172,7 +172,7 @@ struct sis5595_data {
struct mutex lock;
struct mutex update_lock;
- char valid; /* !=0 if following fields are valid */
+ bool valid; /* true if following fields are valid */
unsigned long last_updated; /* In jiffies */
char maxins; /* == 3 if temp enabled, otherwise == 4 */
u8 revision; /* Reg. value */
@@ -728,7 +728,7 @@ static struct sis5595_data *sis5595_update_device(struct device *dev)
sis5595_read_value(data, SIS5595_REG_ALARM1) |
(sis5595_read_value(data, SIS5595_REG_ALARM2) << 8);
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/smm665.c b/drivers/hwmon/smm665.c
index 62906d9c4b86..8c4ed72e5d68 100644
--- a/drivers/hwmon/smm665.c
+++ b/drivers/hwmon/smm665.c
@@ -265,7 +265,7 @@ static struct smm665_data *smm665_update_device(struct device *dev)
data->adc[i] = val;
}
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
abort:
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/smsc47b397.c b/drivers/hwmon/smsc47b397.c
index f928b8d4ff48..c26d6eae0e4e 100644
--- a/drivers/hwmon/smsc47b397.c
+++ b/drivers/hwmon/smsc47b397.c
@@ -96,7 +96,7 @@ struct smsc47b397_data {
struct mutex update_lock;
unsigned long last_updated; /* in jiffies */
- int valid;
+ bool valid;
/* register values */
u16 fan[4];
@@ -137,7 +137,7 @@ static struct smsc47b397_data *smsc47b397_update_device(struct device *dev)
}
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
dev_dbg(dev, "... device update complete\n");
}
diff --git a/drivers/hwmon/smsc47m192.c b/drivers/hwmon/smsc47m192.c
index 03a87aa2017a..a5db15c087ae 100644
--- a/drivers/hwmon/smsc47m192.c
+++ b/drivers/hwmon/smsc47m192.c
@@ -86,7 +86,7 @@ struct smsc47m192_data {
struct i2c_client *client;
const struct attribute_group *groups[3];
struct mutex update_lock;
- char valid; /* !=0 if following fields are valid */
+ bool valid; /* true if following fields are valid */
unsigned long last_updated; /* In jiffies */
u8 in[8]; /* Register value */
@@ -157,7 +157,7 @@ static struct smsc47m192_data *smsc47m192_update_device(struct device *dev)
SMSC47M192_REG_ALARM2) << 8);
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/thmc50.c b/drivers/hwmon/thmc50.c
index fde5e2d0825a..6a804f5036f4 100644
--- a/drivers/hwmon/thmc50.c
+++ b/drivers/hwmon/thmc50.c
@@ -62,7 +62,7 @@ struct thmc50_data {
enum chips type;
unsigned long last_updated; /* In jiffies */
char has_temp3; /* !=0 if it is ADM1022 in temp3 mode */
- char valid; /* !=0 if following fields are valid */
+ bool valid; /* true if following fields are valid */
/* Register values */
s8 temp_input[3];
@@ -107,7 +107,7 @@ static struct thmc50_data *thmc50_update_device(struct device *dev)
data->alarms =
i2c_smbus_read_byte_data(client, THMC50_REG_INTR);
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/tmp103.c b/drivers/hwmon/tmp103.c
index a7e202cc8323..5cab4436aa77 100644
--- a/drivers/hwmon/tmp103.c
+++ b/drivers/hwmon/tmp103.c
@@ -51,51 +51,92 @@ static inline u8 tmp103_mc_to_reg(int val)
return DIV_ROUND_CLOSEST(val, 1000);
}
-static ssize_t tmp103_temp_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static int tmp103_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *temp)
{
- struct sensor_device_attribute *sda = to_sensor_dev_attr(attr);
struct regmap *regmap = dev_get_drvdata(dev);
unsigned int regval;
- int ret;
+ int err, reg;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ reg = TMP103_TEMP_REG;
+ break;
+ case hwmon_temp_min:
+ reg = TMP103_TLOW_REG;
+ break;
+ case hwmon_temp_max:
+ reg = TMP103_THIGH_REG;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
- ret = regmap_read(regmap, sda->index, &regval);
- if (ret < 0)
- return ret;
+ err = regmap_read(regmap, reg, &regval);
+ if (err < 0)
+ return err;
+
+ *temp = tmp103_reg_to_mc(regval);
- return sprintf(buf, "%d\n", tmp103_reg_to_mc(regval));
+ return 0;
}
-static ssize_t tmp103_temp_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
+static int tmp103_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long temp)
{
- struct sensor_device_attribute *sda = to_sensor_dev_attr(attr);
struct regmap *regmap = dev_get_drvdata(dev);
- long val;
- int ret;
-
- if (kstrtol(buf, 10, &val) < 0)
- return -EINVAL;
+ int reg;
+
+ switch (attr) {
+ case hwmon_temp_min:
+ reg = TMP103_TLOW_REG;
+ break;
+ case hwmon_temp_max:
+ reg = TMP103_THIGH_REG;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
- val = clamp_val(val, -55000, 127000);
- ret = regmap_write(regmap, sda->index, tmp103_mc_to_reg(val));
- return ret ? ret : count;
+ temp = clamp_val(temp, -55000, 127000);
+ return regmap_write(regmap, reg, tmp103_mc_to_reg(temp));
}
-static SENSOR_DEVICE_ATTR_RO(temp1_input, tmp103_temp, TMP103_TEMP_REG);
+static umode_t tmp103_is_visible(const void *data, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ if (type != hwmon_temp)
+ return 0;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ return 0444;
+ case hwmon_temp_min:
+ case hwmon_temp_max:
+ return 0644;
+ default:
+ return 0;
+ }
+}
-static SENSOR_DEVICE_ATTR_RW(temp1_min, tmp103_temp, TMP103_TLOW_REG);
+static const struct hwmon_channel_info *tmp103_info[] = {
+ HWMON_CHANNEL_INFO(chip,
+ HWMON_C_REGISTER_TZ),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN),
+ NULL
+};
-static SENSOR_DEVICE_ATTR_RW(temp1_max, tmp103_temp, TMP103_THIGH_REG);
+static const struct hwmon_ops tmp103_hwmon_ops = {
+ .is_visible = tmp103_is_visible,
+ .read = tmp103_read,
+ .write = tmp103_write,
+};
-static struct attribute *tmp103_attrs[] = {
- &sensor_dev_attr_temp1_input.dev_attr.attr,
- &sensor_dev_attr_temp1_min.dev_attr.attr,
- &sensor_dev_attr_temp1_max.dev_attr.attr,
- NULL
+static const struct hwmon_chip_info tmp103_chip_info = {
+ .ops = &tmp103_hwmon_ops,
+ .info = tmp103_info,
};
-ATTRIBUTE_GROUPS(tmp103);
static bool tmp103_regmap_is_volatile(struct device *dev, unsigned int reg)
{
@@ -130,8 +171,10 @@ static int tmp103_probe(struct i2c_client *client)
}
i2c_set_clientdata(client, regmap);
- hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
- regmap, tmp103_groups);
+ hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name,
+ regmap,
+ &tmp103_chip_info,
+ NULL);
return PTR_ERR_OR_ZERO(hwmon_dev);
}
diff --git a/drivers/hwmon/tmp401.c b/drivers/hwmon/tmp401.c
index 9dc210b55e69..b31f4964f852 100644
--- a/drivers/hwmon/tmp401.c
+++ b/drivers/hwmon/tmp401.c
@@ -34,7 +34,7 @@
static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4c, 0x4d,
0x4e, 0x4f, I2C_CLIENT_END };
-enum chips { tmp401, tmp411, tmp431, tmp432, tmp435, tmp461 };
+enum chips { tmp401, tmp411, tmp431, tmp432, tmp435 };
/*
* The TMP401 registers, note some registers have different addresses for
@@ -56,7 +56,6 @@ static const u8 TMP401_TEMP_MSB_READ[7][2] = {
{ 0x20, 0x19 }, /* therm (crit) limit */
{ 0x30, 0x34 }, /* lowest */
{ 0x32, 0x36 }, /* highest */
- { 0, 0x11 }, /* offset */
};
static const u8 TMP401_TEMP_MSB_WRITE[7][2] = {
@@ -66,7 +65,6 @@ static const u8 TMP401_TEMP_MSB_WRITE[7][2] = {
{ 0x20, 0x19 }, /* therm (crit) limit */
{ 0x30, 0x34 }, /* lowest */
{ 0x32, 0x36 }, /* highest */
- { 0, 0x11 }, /* offset */
};
static const u8 TMP432_TEMP_MSB_READ[4][3] = {
@@ -123,7 +121,6 @@ static const struct i2c_device_id tmp401_id[] = {
{ "tmp431", tmp431 },
{ "tmp432", tmp432 },
{ "tmp435", tmp435 },
- { "tmp461", tmp461 },
{ }
};
MODULE_DEVICE_TABLE(i2c, tmp401_id);
@@ -136,7 +133,7 @@ struct tmp401_data {
struct i2c_client *client;
const struct attribute_group *groups[3];
struct mutex update_lock;
- char valid; /* zero until following fields are valid */
+ bool valid; /* false until following fields are valid */
unsigned long last_updated; /* in jiffies */
enum chips kind;
@@ -267,7 +264,7 @@ static struct tmp401_data *tmp401_update_device(struct device *dev)
data->temp_crit_hyst = val;
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
abort:
@@ -413,7 +410,7 @@ static ssize_t reset_temp_history_store(struct device *dev,
}
mutex_lock(&data->update_lock);
i2c_smbus_write_byte_data(client, TMP401_TEMP_MSB_WRITE[5][0], val);
- data->valid = 0;
+ data->valid = false;
mutex_unlock(&data->update_lock);
return count;
@@ -571,21 +568,6 @@ static const struct attribute_group tmp432_group = {
};
/*
- * Additional features of the TMP461 chip.
- * The TMP461 temperature offset for the remote channel.
- */
-static SENSOR_DEVICE_ATTR_2_RW(temp2_offset, temp, 6, 1);
-
-static struct attribute *tmp461_attributes[] = {
- &sensor_dev_attr_temp2_offset.dev_attr.attr,
- NULL
-};
-
-static const struct attribute_group tmp461_group = {
- .attrs = tmp461_attributes,
-};
-
-/*
* Begin non sysfs callback code (aka Real code)
*/
@@ -686,7 +668,7 @@ static int tmp401_detect(struct i2c_client *client,
static int tmp401_probe(struct i2c_client *client)
{
static const char * const names[] = {
- "TMP401", "TMP411", "TMP431", "TMP432", "TMP435", "TMP461"
+ "TMP401", "TMP411", "TMP431", "TMP432", "TMP435"
};
struct device *dev = &client->dev;
struct device *hwmon_dev;
@@ -717,9 +699,6 @@ static int tmp401_probe(struct i2c_client *client)
if (data->kind == tmp432)
data->groups[groups++] = &tmp432_group;
- if (data->kind == tmp461)
- data->groups[groups++] = &tmp461_group;
-
hwmon_dev = devm_hwmon_device_register_with_groups(dev, client->name,
data, data->groups);
if (IS_ERR(hwmon_dev))
diff --git a/drivers/hwmon/tmp421.c b/drivers/hwmon/tmp421.c
index b963a369c5ab..1fd8d41d90c8 100644
--- a/drivers/hwmon/tmp421.c
+++ b/drivers/hwmon/tmp421.c
@@ -29,15 +29,20 @@ static const unsigned short normal_i2c[] = { 0x2a, 0x4c, 0x4d, 0x4e, 0x4f,
enum chips { tmp421, tmp422, tmp423, tmp441, tmp442 };
+#define MAX_CHANNELS 4
/* The TMP421 registers */
#define TMP421_STATUS_REG 0x08
#define TMP421_CONFIG_REG_1 0x09
+#define TMP421_CONFIG_REG_2 0x0A
+#define TMP421_CONFIG_REG_REN(x) (BIT(3 + (x)))
+#define TMP421_CONFIG_REG_REN_MASK GENMASK(6, 3)
#define TMP421_CONVERSION_RATE_REG 0x0B
+#define TMP421_N_FACTOR_REG_1 0x21
#define TMP421_MANUFACTURER_ID_REG 0xFE
#define TMP421_DEVICE_ID_REG 0xFF
-static const u8 TMP421_TEMP_MSB[4] = { 0x00, 0x01, 0x02, 0x03 };
-static const u8 TMP421_TEMP_LSB[4] = { 0x10, 0x11, 0x12, 0x13 };
+static const u8 TMP421_TEMP_MSB[MAX_CHANNELS] = { 0x00, 0x01, 0x02, 0x03 };
+static const u8 TMP421_TEMP_LSB[MAX_CHANNELS] = { 0x10, 0x11, 0x12, 0x13 };
/* Flags */
#define TMP421_CONFIG_SHUTDOWN 0x40
@@ -86,18 +91,24 @@ static const struct of_device_id __maybe_unused tmp421_of_match[] = {
};
MODULE_DEVICE_TABLE(of, tmp421_of_match);
+struct tmp421_channel {
+ const char *label;
+ bool enabled;
+ s16 temp;
+};
+
struct tmp421_data {
struct i2c_client *client;
struct mutex update_lock;
- u32 temp_config[5];
+ u32 temp_config[MAX_CHANNELS + 1];
struct hwmon_channel_info temp_info;
const struct hwmon_channel_info *info[2];
struct hwmon_chip_info chip;
- char valid;
+ bool valid;
unsigned long last_updated;
unsigned long channels;
u8 config;
- s16 temp[4];
+ struct tmp421_channel channel[MAX_CHANNELS];
};
static int temp_from_raw(u16 reg, bool extended)
@@ -132,28 +143,56 @@ static int tmp421_update_device(struct tmp421_data *data)
ret = i2c_smbus_read_byte_data(client, TMP421_TEMP_MSB[i]);
if (ret < 0)
goto exit;
- data->temp[i] = ret << 8;
+ data->channel[i].temp = ret << 8;
ret = i2c_smbus_read_byte_data(client, TMP421_TEMP_LSB[i]);
if (ret < 0)
goto exit;
- data->temp[i] |= ret;
+ data->channel[i].temp |= ret;
}
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
exit:
mutex_unlock(&data->update_lock);
if (ret < 0) {
- data->valid = 0;
+ data->valid = false;
return ret;
}
return 0;
}
+static int tmp421_enable_channels(struct tmp421_data *data)
+{
+ int err;
+ struct i2c_client *client = data->client;
+ struct device *dev = &client->dev;
+ int old = i2c_smbus_read_byte_data(client, TMP421_CONFIG_REG_2);
+ int new, i;
+
+ if (old < 0) {
+ dev_err(dev, "error reading register, can't disable channels\n");
+ return old;
+ }
+
+ new = old & ~TMP421_CONFIG_REG_REN_MASK;
+ for (i = 0; i < data->channels; i++)
+ if (data->channel[i].enabled)
+ new |= TMP421_CONFIG_REG_REN(i);
+
+ if (new == old)
+ return 0;
+
+ err = i2c_smbus_write_byte_data(client, TMP421_CONFIG_REG_2, new);
+ if (err < 0)
+ dev_err(dev, "error writing register, can't disable channels\n");
+
+ return err;
+}
+
static int tmp421_read(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, long *val)
{
@@ -166,15 +205,22 @@ static int tmp421_read(struct device *dev, enum hwmon_sensor_types type,
switch (attr) {
case hwmon_temp_input:
- *val = temp_from_raw(tmp421->temp[channel],
+ if (!tmp421->channel[channel].enabled)
+ return -ENODATA;
+ *val = temp_from_raw(tmp421->channel[channel].temp,
tmp421->config & TMP421_CONFIG_RANGE);
return 0;
case hwmon_temp_fault:
+ if (!tmp421->channel[channel].enabled)
+ return -ENODATA;
/*
* Any of OPEN or /PVLD bits indicate a hardware mulfunction
* and the conversion result may be incorrect
*/
- *val = !!(tmp421->temp[channel] & 0x03);
+ *val = !!(tmp421->channel[channel].temp & 0x03);
+ return 0;
+ case hwmon_temp_enable:
+ *val = tmp421->channel[channel].enabled;
return 0;
default:
return -EOPNOTSUPP;
@@ -182,6 +228,34 @@ static int tmp421_read(struct device *dev, enum hwmon_sensor_types type,
}
+static int tmp421_read_string(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, const char **str)
+{
+ struct tmp421_data *data = dev_get_drvdata(dev);
+
+ *str = data->channel[channel].label;
+
+ return 0;
+}
+
+static int tmp421_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ struct tmp421_data *data = dev_get_drvdata(dev);
+ int ret;
+
+ switch (attr) {
+ case hwmon_temp_enable:
+ data->channel[channel].enabled = val;
+ ret = tmp421_enable_channels(data);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
static umode_t tmp421_is_visible(const void *data, enum hwmon_sensor_types type,
u32 attr, int channel)
{
@@ -189,14 +263,19 @@ static umode_t tmp421_is_visible(const void *data, enum hwmon_sensor_types type,
case hwmon_temp_fault:
case hwmon_temp_input:
return 0444;
+ case hwmon_temp_label:
+ return 0444;
+ case hwmon_temp_enable:
+ return 0644;
default:
return 0;
}
}
-static int tmp421_init_client(struct i2c_client *client)
+static int tmp421_init_client(struct tmp421_data *data)
{
int config, config_orig;
+ struct i2c_client *client = data->client;
/* Set the conversion rate to 2 Hz */
i2c_smbus_write_byte_data(client, TMP421_CONVERSION_RATE_REG, 0x05);
@@ -217,7 +296,7 @@ static int tmp421_init_client(struct i2c_client *client)
i2c_smbus_write_byte_data(client, TMP421_CONFIG_REG_1, config);
}
- return 0;
+ return tmp421_enable_channels(data);
}
static int tmp421_detect(struct i2c_client *client,
@@ -281,9 +360,78 @@ static int tmp421_detect(struct i2c_client *client,
return 0;
}
+static int tmp421_probe_child_from_dt(struct i2c_client *client,
+ struct device_node *child,
+ struct tmp421_data *data)
+
+{
+ struct device *dev = &client->dev;
+ u32 i;
+ s32 val;
+ int err;
+
+ err = of_property_read_u32(child, "reg", &i);
+ if (err) {
+ dev_err(dev, "missing reg property of %pOFn\n", child);
+ return err;
+ }
+
+ if (i >= data->channels) {
+ dev_err(dev, "invalid reg %d of %pOFn\n", i, child);
+ return -EINVAL;
+ }
+
+ of_property_read_string(child, "label", &data->channel[i].label);
+ if (data->channel[i].label)
+ data->temp_config[i] |= HWMON_T_LABEL;
+
+ data->channel[i].enabled = of_device_is_available(child);
+
+ err = of_property_read_s32(child, "ti,n-factor", &val);
+ if (!err) {
+ if (i == 0) {
+ dev_err(dev, "n-factor can't be set for internal channel\n");
+ return -EINVAL;
+ }
+
+ if (val > 127 || val < -128) {
+ dev_err(dev, "n-factor for channel %d invalid (%d)\n",
+ i, val);
+ return -EINVAL;
+ }
+ i2c_smbus_write_byte_data(client, TMP421_N_FACTOR_REG_1 + i - 1,
+ val);
+ }
+
+ return 0;
+}
+
+static int tmp421_probe_from_dt(struct i2c_client *client, struct tmp421_data *data)
+{
+ struct device *dev = &client->dev;
+ const struct device_node *np = dev->of_node;
+ struct device_node *child;
+ int err;
+
+ for_each_child_of_node(np, child) {
+ if (strcmp(child->name, "channel"))
+ continue;
+
+ err = tmp421_probe_child_from_dt(client, child, data);
+ if (err) {
+ of_node_put(child);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
static const struct hwmon_ops tmp421_ops = {
.is_visible = tmp421_is_visible,
.read = tmp421_read,
+ .read_string = tmp421_read_string,
+ .write = tmp421_write,
};
static int tmp421_probe(struct i2c_client *client)
@@ -305,12 +453,18 @@ static int tmp421_probe(struct i2c_client *client)
data->channels = i2c_match_id(tmp421_id, client)->driver_data;
data->client = client;
- err = tmp421_init_client(client);
+ for (i = 0; i < data->channels; i++) {
+ data->temp_config[i] = HWMON_T_INPUT | HWMON_T_FAULT | HWMON_T_ENABLE;
+ data->channel[i].enabled = true;
+ }
+
+ err = tmp421_probe_from_dt(client, data);
if (err)
return err;
- for (i = 0; i < data->channels; i++)
- data->temp_config[i] = HWMON_T_INPUT | HWMON_T_FAULT;
+ err = tmp421_init_client(data);
+ if (err)
+ return err;
data->chip.ops = &tmp421_ops;
data->chip.info = data->info;
diff --git a/drivers/hwmon/via686a.c b/drivers/hwmon/via686a.c
index a2eddd2c2538..55634110c2f9 100644
--- a/drivers/hwmon/via686a.c
+++ b/drivers/hwmon/via686a.c
@@ -304,7 +304,7 @@ struct via686a_data {
const char *name;
struct device *hwmon_dev;
struct mutex update_lock;
- char valid; /* !=0 if following fields are valid */
+ bool valid; /* true if following fields are valid */
unsigned long last_updated; /* In jiffies */
u8 in[5]; /* Register value */
@@ -800,7 +800,7 @@ static struct via686a_data *via686a_update_device(struct device *dev)
VIA686A_REG_ALARM1) |
(via686a_read_value(data, VIA686A_REG_ALARM2) << 8);
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/vt1211.c b/drivers/hwmon/vt1211.c
index 2fbdc532aed4..4a5e911d26eb 100644
--- a/drivers/hwmon/vt1211.c
+++ b/drivers/hwmon/vt1211.c
@@ -105,7 +105,7 @@ struct vt1211_data {
struct device *hwmon_dev;
struct mutex update_lock;
- char valid; /* !=0 if following fields are valid */
+ bool valid; /* true if following fields are valid */
unsigned long last_updated; /* In jiffies */
/* Register values */
@@ -319,7 +319,7 @@ static struct vt1211_data *vt1211_update_device(struct device *dev)
vt1211_read8(data, VT1211_REG_ALARM1);
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/vt8231.c b/drivers/hwmon/vt8231.c
index 6603727e15a0..03275ac8ba72 100644
--- a/drivers/hwmon/vt8231.c
+++ b/drivers/hwmon/vt8231.c
@@ -145,7 +145,7 @@ struct vt8231_data {
struct mutex update_lock;
struct device *hwmon_dev;
- char valid; /* !=0 if following fields are valid */
+ bool valid; /* true if following fields are valid */
unsigned long last_updated; /* In jiffies */
u8 in[6]; /* Register value */
@@ -929,7 +929,7 @@ static struct vt8231_data *vt8231_update_device(struct device *dev)
data->alarms &= ~0x80;
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
index 705a59663d42..af89b32a93a5 100644
--- a/drivers/hwmon/w83627ehf.c
+++ b/drivers/hwmon/w83627ehf.c
@@ -320,7 +320,7 @@ struct w83627ehf_data {
const u16 *scale_in;
struct mutex update_lock;
- char valid; /* !=0 if following fields are valid */
+ bool valid; /* true if following fields are valid */
unsigned long last_updated; /* In jiffies */
/* Register values */
@@ -688,7 +688,7 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev)
W83627EHF_REG_CASEOPEN_DET);
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
@@ -1099,7 +1099,7 @@ clear_caseopen(struct device *dev, struct w83627ehf_data *data, int channel,
reg = w83627ehf_read_value(data, W83627EHF_REG_CASEOPEN_CLR);
w83627ehf_write_value(data, W83627EHF_REG_CASEOPEN_CLR, reg | mask);
w83627ehf_write_value(data, W83627EHF_REG_CASEOPEN_CLR, reg & ~mask);
- data->valid = 0; /* Force cache refresh */
+ data->valid = false; /* Force cache refresh */
mutex_unlock(&data->update_lock);
return 0;
@@ -2004,7 +2004,7 @@ static int __maybe_unused w83627ehf_resume(struct device *dev)
w83627ehf_write_value(data, W83627EHF_REG_VBAT, data->vbat);
/* Force re-reading all values */
- data->valid = 0;
+ data->valid = false;
mutex_unlock(&data->update_lock);
return 0;
diff --git a/drivers/hwmon/w83627hf.c b/drivers/hwmon/w83627hf.c
index a07b97400cba..9be277156ed2 100644
--- a/drivers/hwmon/w83627hf.c
+++ b/drivers/hwmon/w83627hf.c
@@ -355,7 +355,7 @@ struct w83627hf_data {
enum chips type;
struct mutex update_lock;
- char valid; /* !=0 if following fields are valid */
+ bool valid; /* true if following fields are valid */
unsigned long last_updated; /* In jiffies */
u8 in[9]; /* Register value */
@@ -448,7 +448,7 @@ static int w83627hf_resume(struct device *dev)
w83627hf_write_value(data, W83781D_REG_SCFG2, data->scfg2);
/* Force re-reading all values */
- data->valid = 0;
+ data->valid = false;
mutex_unlock(&data->update_lock);
return 0;
@@ -1905,7 +1905,7 @@ static struct w83627hf_data *w83627hf_update_device(struct device *dev)
w83627hf_read_value(data, W83781D_REG_BEEP_INTS1) |
w83627hf_read_value(data, W83781D_REG_BEEP_INTS3) << 16;
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/w83781d.c b/drivers/hwmon/w83781d.c
index ce8e2c10e854..b3579721265f 100644
--- a/drivers/hwmon/w83781d.c
+++ b/drivers/hwmon/w83781d.c
@@ -203,7 +203,7 @@ struct w83781d_data {
int isa_addr;
struct mutex update_lock;
- char valid; /* !=0 if following fields are valid */
+ bool valid; /* true if following fields are valid */
unsigned long last_updated; /* In jiffies */
struct i2c_client *lm75[2]; /* for secondary I2C addresses */
@@ -1554,7 +1554,7 @@ static struct w83781d_data *w83781d_update_device(struct device *dev)
W83781D_REG_BEEP_INTS3) << 16;
}
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c
index 3c1be2c11fdf..80a9a78d7ce9 100644
--- a/drivers/hwmon/w83791d.c
+++ b/drivers/hwmon/w83791d.c
@@ -270,7 +270,7 @@ struct w83791d_data {
struct device *hwmon_dev;
struct mutex update_lock;
- char valid; /* !=0 if following fields are valid */
+ bool valid; /* true if following fields are valid */
unsigned long last_updated; /* In jiffies */
/* volts */
@@ -1596,7 +1596,7 @@ static struct w83791d_data *w83791d_update_device(struct device *dev)
<< 4;
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/w83792d.c b/drivers/hwmon/w83792d.c
index 1f175f381350..31a1cdc30877 100644
--- a/drivers/hwmon/w83792d.c
+++ b/drivers/hwmon/w83792d.c
@@ -261,7 +261,7 @@ struct w83792d_data {
struct device *hwmon_dev;
struct mutex update_lock;
- char valid; /* !=0 if following fields are valid */
+ bool valid; /* true if following fields are valid */
unsigned long last_updated; /* In jiffies */
u8 in[9]; /* Register value */
@@ -740,7 +740,7 @@ intrusion0_alarm_store(struct device *dev, struct device_attribute *attr,
mutex_lock(&data->update_lock);
reg = w83792d_read_value(client, W83792D_REG_CHASSIS_CLR);
w83792d_write_value(client, W83792D_REG_CHASSIS_CLR, reg | 0x80);
- data->valid = 0; /* Force cache refresh */
+ data->valid = false; /* Force cache refresh */
mutex_unlock(&data->update_lock);
return count;
@@ -1589,7 +1589,7 @@ static struct w83792d_data *w83792d_update_device(struct device *dev)
}
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/w83793.c b/drivers/hwmon/w83793.c
index 1d2854de1cfc..0a65d164c8f0 100644
--- a/drivers/hwmon/w83793.c
+++ b/drivers/hwmon/w83793.c
@@ -204,7 +204,7 @@ static inline s8 TEMP_TO_REG(long val, s8 min, s8 max)
struct w83793_data {
struct device *hwmon_dev;
struct mutex update_lock;
- char valid; /* !=0 if following fields are valid */
+ bool valid; /* true if following fields are valid */
unsigned long last_updated; /* In jiffies */
unsigned long last_nonvolatile; /* In jiffies, last time we update the
* nonvolatile registers
@@ -452,7 +452,7 @@ store_chassis_clear(struct device *dev,
mutex_lock(&data->update_lock);
reg = w83793_read_value(client, W83793_REG_CLR_CHASSIS);
w83793_write_value(client, W83793_REG_CLR_CHASSIS, reg | 0x80);
- data->valid = 0; /* Force cache refresh */
+ data->valid = false; /* Force cache refresh */
mutex_unlock(&data->update_lock);
return count;
}
@@ -2077,7 +2077,7 @@ static struct w83793_data *w83793_update_device(struct device *dev)
data->vid[1] = w83793_read_value(client, W83793_REG_VID_INB);
w83793_update_nonvolatile(dev);
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
END:
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/w83795.c b/drivers/hwmon/w83795.c
index 621b05afa837..45b12c4287df 100644
--- a/drivers/hwmon/w83795.c
+++ b/drivers/hwmon/w83795.c
@@ -379,7 +379,7 @@ struct w83795_data {
u8 enable_beep;
u8 beeps[6]; /* Register value */
- char valid;
+ bool valid;
char valid_limits;
char valid_pwm_config;
};
@@ -684,7 +684,7 @@ static struct w83795_data *w83795_update_device(struct device *dev)
tmp & ~ALARM_CTRL_RTSACS);
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
END:
mutex_unlock(&data->update_lock);
@@ -764,7 +764,7 @@ store_chassis_clear(struct device *dev,
/* Clear status and force cache refresh */
w83795_read(client, W83795_REG_ALARM(5));
- data->valid = 0;
+ data->valid = false;
mutex_unlock(&data->update_lock);
return count;
}
diff --git a/drivers/hwmon/w83l785ts.c b/drivers/hwmon/w83l785ts.c
index 656a77102ca6..a41f989d66e2 100644
--- a/drivers/hwmon/w83l785ts.c
+++ b/drivers/hwmon/w83l785ts.c
@@ -98,7 +98,7 @@ static struct i2c_driver w83l785ts_driver = {
struct w83l785ts_data {
struct device *hwmon_dev;
struct mutex update_lock;
- char valid; /* zero until following fields are valid */
+ bool valid; /* false until following fields are valid */
unsigned long last_updated; /* in jiffies */
/* registers values */
@@ -270,7 +270,7 @@ static struct w83l785ts_data *w83l785ts_update_device(struct device *dev)
W83L785TS_REG_TEMP_OVER, data->temp[1]);
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/w83l786ng.c b/drivers/hwmon/w83l786ng.c
index 542afff1423b..11ba23c1af85 100644
--- a/drivers/hwmon/w83l786ng.c
+++ b/drivers/hwmon/w83l786ng.c
@@ -113,7 +113,7 @@ DIV_TO_REG(long val)
struct w83l786ng_data {
struct i2c_client *client;
struct mutex update_lock;
- char valid; /* !=0 if following fields are valid */
+ bool valid; /* true if following fields are valid */
unsigned long last_updated; /* In jiffies */
unsigned long last_nonvolatile; /* In jiffies, last time we update the
* nonvolatile registers */
@@ -209,7 +209,7 @@ static struct w83l786ng_data *w83l786ng_update_device(struct device *dev)
data->tolerance[1] = (reg_tmp >> 4) & 0x0f;
data->last_updated = jiffies;
- data->valid = 1;
+ data->valid = true;
}
diff --git a/drivers/hwmon/xgene-hwmon.c b/drivers/hwmon/xgene-hwmon.c
index 382ef0395d8e..30aae8642069 100644
--- a/drivers/hwmon/xgene-hwmon.c
+++ b/drivers/hwmon/xgene-hwmon.c
@@ -93,6 +93,7 @@ struct slimpro_resp_msg {
struct xgene_hwmon_dev {
struct device *dev;
struct mbox_chan *mbox_chan;
+ struct pcc_mbox_chan *pcc_chan;
struct mbox_client mbox_client;
int mbox_idx;
@@ -652,7 +653,7 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
goto out_mbox_free;
}
} else {
- struct acpi_pcct_hw_reduced *cppc_ss;
+ struct pcc_mbox_chan *pcc_chan;
const struct acpi_device_id *acpi_id;
int version;
@@ -671,26 +672,16 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
}
cl->rx_callback = xgene_hwmon_pcc_rx_cb;
- ctx->mbox_chan = pcc_mbox_request_channel(cl, ctx->mbox_idx);
- if (IS_ERR(ctx->mbox_chan)) {
+ pcc_chan = pcc_mbox_request_channel(cl, ctx->mbox_idx);
+ if (IS_ERR(pcc_chan)) {
dev_err(&pdev->dev,
"PPC channel request failed\n");
rc = -ENODEV;
goto out_mbox_free;
}
- /*
- * The PCC mailbox controller driver should
- * have parsed the PCCT (global table of all
- * PCC channels) and stored pointers to the
- * subspace communication region in con_priv.
- */
- cppc_ss = ctx->mbox_chan->con_priv;
- if (!cppc_ss) {
- dev_err(&pdev->dev, "PPC subspace not found\n");
- rc = -ENODEV;
- goto out;
- }
+ ctx->pcc_chan = pcc_chan;
+ ctx->mbox_chan = pcc_chan->mchan;
if (!ctx->mbox_chan->mbox->txdone_irq) {
dev_err(&pdev->dev, "PCC IRQ not supported\n");
@@ -702,16 +693,16 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
* This is the shared communication region
* for the OS and Platform to communicate over.
*/
- ctx->comm_base_addr = cppc_ss->base_address;
+ ctx->comm_base_addr = pcc_chan->shmem_base_addr;
if (ctx->comm_base_addr) {
if (version == XGENE_HWMON_V2)
ctx->pcc_comm_addr = (void __force *)ioremap(
ctx->comm_base_addr,
- cppc_ss->length);
+ pcc_chan->shmem_size);
else
ctx->pcc_comm_addr = memremap(
ctx->comm_base_addr,
- cppc_ss->length,
+ pcc_chan->shmem_size,
MEMREMAP_WB);
} else {
dev_err(&pdev->dev, "Failed to get PCC comm region\n");
@@ -727,11 +718,11 @@ static int xgene_hwmon_probe(struct platform_device *pdev)
}
/*
- * cppc_ss->latency is just a Nominal value. In reality
+ * pcc_chan->latency is just a Nominal value. In reality
* the remote processor could be much slower to reply.
* So add an arbitrary amount of wait on top of Nominal.
*/
- ctx->usecs_lat = PCC_NUM_RETRIES * cppc_ss->latency;
+ ctx->usecs_lat = PCC_NUM_RETRIES * pcc_chan->latency;
}
ctx->hwmon_dev = hwmon_device_register_with_groups(ctx->dev,
@@ -757,7 +748,7 @@ out:
if (acpi_disabled)
mbox_free_channel(ctx->mbox_chan);
else
- pcc_mbox_free_channel(ctx->mbox_chan);
+ pcc_mbox_free_channel(ctx->pcc_chan);
out_mbox_free:
kfifo_free(&ctx->async_msg_fifo);
@@ -773,7 +764,7 @@ static int xgene_hwmon_remove(struct platform_device *pdev)
if (acpi_disabled)
mbox_free_channel(ctx->mbox_chan);
else
- pcc_mbox_free_channel(ctx->mbox_chan);
+ pcc_mbox_free_channel(ctx->pcc_chan);
return 0;
}
diff --git a/drivers/i2c/busses/i2c-virtio.c b/drivers/i2c/busses/i2c-virtio.c
index f10a603b13fb..1ed4daa918a0 100644
--- a/drivers/i2c/busses/i2c-virtio.c
+++ b/drivers/i2c/busses/i2c-virtio.c
@@ -63,34 +63,32 @@ static int virtio_i2c_prepare_reqs(struct virtqueue *vq,
int outcnt = 0, incnt = 0;
/*
- * We don't support 0 length messages and so filter out
- * 0 length transfers by using i2c_adapter_quirks.
- */
- if (!msgs[i].len)
- break;
-
- /*
* Only 7-bit mode supported for this moment. For the address
* format, Please check the Virtio I2C Specification.
*/
reqs[i].out_hdr.addr = cpu_to_le16(msgs[i].addr << 1);
+ if (msgs[i].flags & I2C_M_RD)
+ reqs[i].out_hdr.flags |= cpu_to_le32(VIRTIO_I2C_FLAGS_M_RD);
+
if (i != num - 1)
- reqs[i].out_hdr.flags = cpu_to_le32(VIRTIO_I2C_FLAGS_FAIL_NEXT);
+ reqs[i].out_hdr.flags |= cpu_to_le32(VIRTIO_I2C_FLAGS_FAIL_NEXT);
sg_init_one(&out_hdr, &reqs[i].out_hdr, sizeof(reqs[i].out_hdr));
sgs[outcnt++] = &out_hdr;
- reqs[i].buf = i2c_get_dma_safe_msg_buf(&msgs[i], 1);
- if (!reqs[i].buf)
- break;
+ if (msgs[i].len) {
+ reqs[i].buf = i2c_get_dma_safe_msg_buf(&msgs[i], 1);
+ if (!reqs[i].buf)
+ break;
- sg_init_one(&msg_buf, reqs[i].buf, msgs[i].len);
+ sg_init_one(&msg_buf, reqs[i].buf, msgs[i].len);
- if (msgs[i].flags & I2C_M_RD)
- sgs[outcnt + incnt++] = &msg_buf;
- else
- sgs[outcnt++] = &msg_buf;
+ if (msgs[i].flags & I2C_M_RD)
+ sgs[outcnt + incnt++] = &msg_buf;
+ else
+ sgs[outcnt++] = &msg_buf;
+ }
sg_init_one(&in_hdr, &reqs[i].in_hdr, sizeof(reqs[i].in_hdr));
sgs[outcnt + incnt++] = &in_hdr;
@@ -191,7 +189,7 @@ static int virtio_i2c_setup_vqs(struct virtio_i2c *vi)
static u32 virtio_i2c_func(struct i2c_adapter *adap)
{
- return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
static struct i2c_algorithm virtio_algorithm = {
@@ -199,15 +197,16 @@ static struct i2c_algorithm virtio_algorithm = {
.functionality = virtio_i2c_func,
};
-static const struct i2c_adapter_quirks virtio_i2c_quirks = {
- .flags = I2C_AQ_NO_ZERO_LEN,
-};
-
static int virtio_i2c_probe(struct virtio_device *vdev)
{
struct virtio_i2c *vi;
int ret;
+ if (!virtio_has_feature(vdev, VIRTIO_I2C_F_ZERO_LENGTH_REQUEST)) {
+ dev_err(&vdev->dev, "Zero-length request feature is mandatory\n");
+ return -EINVAL;
+ }
+
vi = devm_kzalloc(&vdev->dev, sizeof(*vi), GFP_KERNEL);
if (!vi)
return -ENOMEM;
@@ -225,7 +224,6 @@ static int virtio_i2c_probe(struct virtio_device *vdev)
snprintf(vi->adap.name, sizeof(vi->adap.name),
"i2c_virtio at virtio bus %d", vdev->index);
vi->adap.algo = &virtio_algorithm;
- vi->adap.quirks = &virtio_i2c_quirks;
vi->adap.dev.parent = &vdev->dev;
vi->adap.dev.of_node = vdev->dev.of_node;
i2c_set_adapdata(&vi->adap, vi);
@@ -270,11 +268,17 @@ static int virtio_i2c_restore(struct virtio_device *vdev)
}
#endif
+static const unsigned int features[] = {
+ VIRTIO_I2C_F_ZERO_LENGTH_REQUEST,
+};
+
static struct virtio_driver virtio_i2c_driver = {
- .id_table = id_table,
- .probe = virtio_i2c_probe,
- .remove = virtio_i2c_remove,
- .driver = {
+ .feature_table = features,
+ .feature_table_size = ARRAY_SIZE(features),
+ .id_table = id_table,
+ .probe = virtio_i2c_probe,
+ .remove = virtio_i2c_remove,
+ .driver = {
.name = "i2c_virtio",
},
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/i2c/busses/i2c-xgene-slimpro.c b/drivers/i2c/busses/i2c-xgene-slimpro.c
index bba08cbce6e1..1a19ebad60ad 100644
--- a/drivers/i2c/busses/i2c-xgene-slimpro.c
+++ b/drivers/i2c/busses/i2c-xgene-slimpro.c
@@ -103,6 +103,7 @@ struct slimpro_i2c_dev {
struct i2c_adapter adapter;
struct device *dev;
struct mbox_chan *mbox_chan;
+ struct pcc_mbox_chan *pcc_chan;
struct mbox_client mbox_client;
int mbox_idx;
struct completion rd_complete;
@@ -466,7 +467,7 @@ static int xgene_slimpro_i2c_probe(struct platform_device *pdev)
return PTR_ERR(ctx->mbox_chan);
}
} else {
- struct acpi_pcct_hw_reduced *cppc_ss;
+ struct pcc_mbox_chan *pcc_chan;
const struct acpi_device_id *acpi_id;
int version = XGENE_SLIMPRO_I2C_V1;
@@ -483,24 +484,14 @@ static int xgene_slimpro_i2c_probe(struct platform_device *pdev)
cl->tx_block = false;
cl->rx_callback = slimpro_i2c_pcc_rx_cb;
- ctx->mbox_chan = pcc_mbox_request_channel(cl, ctx->mbox_idx);
- if (IS_ERR(ctx->mbox_chan)) {
+ pcc_chan = pcc_mbox_request_channel(cl, ctx->mbox_idx);
+ if (IS_ERR(pcc_chan)) {
dev_err(&pdev->dev, "PCC mailbox channel request failed\n");
- return PTR_ERR(ctx->mbox_chan);
+ return PTR_ERR(ctx->pcc_chan);
}
- /*
- * The PCC mailbox controller driver should
- * have parsed the PCCT (global table of all
- * PCC channels) and stored pointers to the
- * subspace communication region in con_priv.
- */
- cppc_ss = ctx->mbox_chan->con_priv;
- if (!cppc_ss) {
- dev_err(&pdev->dev, "PPC subspace not found\n");
- rc = -ENOENT;
- goto mbox_err;
- }
+ ctx->pcc_chan = pcc_chan;
+ ctx->mbox_chan = pcc_chan->mchan;
if (!ctx->mbox_chan->mbox->txdone_irq) {
dev_err(&pdev->dev, "PCC IRQ not supported\n");
@@ -512,17 +503,17 @@ static int xgene_slimpro_i2c_probe(struct platform_device *pdev)
* This is the shared communication region
* for the OS and Platform to communicate over.
*/
- ctx->comm_base_addr = cppc_ss->base_address;
+ ctx->comm_base_addr = pcc_chan->shmem_base_addr;
if (ctx->comm_base_addr) {
if (version == XGENE_SLIMPRO_I2C_V2)
ctx->pcc_comm_addr = memremap(
ctx->comm_base_addr,
- cppc_ss->length,
+ pcc_chan->shmem_size,
MEMREMAP_WT);
else
ctx->pcc_comm_addr = memremap(
ctx->comm_base_addr,
- cppc_ss->length,
+ pcc_chan->shmem_size,
MEMREMAP_WB);
} else {
dev_err(&pdev->dev, "Failed to get PCC comm region\n");
@@ -561,7 +552,7 @@ mbox_err:
if (acpi_disabled)
mbox_free_channel(ctx->mbox_chan);
else
- pcc_mbox_free_channel(ctx->mbox_chan);
+ pcc_mbox_free_channel(ctx->pcc_chan);
return rc;
}
@@ -575,7 +566,7 @@ static int xgene_slimpro_i2c_remove(struct platform_device *pdev)
if (acpi_disabled)
mbox_free_channel(ctx->mbox_chan);
else
- pcc_mbox_free_channel(ctx->mbox_chan);
+ pcc_mbox_free_channel(ctx->pcc_chan);
return 0;
}
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index e6c543b5ee1d..0b66e25c0e2d 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -89,6 +89,12 @@ static struct cpuidle_state *cpuidle_state_table __initdata;
static unsigned int mwait_substates __initdata;
/*
+ * Enable interrupts before entering the C-state. On some platforms and for
+ * some C-states, this may measurably decrease interrupt latency.
+ */
+#define CPUIDLE_FLAG_IRQ_ENABLE BIT(14)
+
+/*
* Enable this state by default even if the ACPI _CST does not list it.
*/
#define CPUIDLE_FLAG_ALWAYS_ENABLE BIT(15)
@@ -127,6 +133,9 @@ static __cpuidle int intel_idle(struct cpuidle_device *dev,
unsigned long eax = flg2MWAIT(state->flags);
unsigned long ecx = 1; /* break on interrupt flag */
+ if (state->flags & CPUIDLE_FLAG_IRQ_ENABLE)
+ local_irq_enable();
+
mwait_idle_with_hints(eax, ecx);
return index;
@@ -698,7 +707,7 @@ static struct cpuidle_state skx_cstates[] __initdata = {
{
.name = "C1",
.desc = "MWAIT 0x00",
- .flags = MWAIT2flg(0x00),
+ .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_IRQ_ENABLE,
.exit_latency = 2,
.target_residency = 2,
.enter = &intel_idle,
@@ -727,7 +736,7 @@ static struct cpuidle_state icx_cstates[] __initdata = {
{
.name = "C1",
.desc = "MWAIT 0x00",
- .flags = MWAIT2flg(0x00),
+ .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_IRQ_ENABLE,
.exit_latency = 1,
.target_residency = 1,
.enter = &intel_idle,
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 704ce595542c..835ac54d4a24 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -453,7 +453,7 @@ static void _cma_attach_to_dev(struct rdma_id_private *id_priv,
id_priv->id.device = cma_dev->device;
id_priv->id.route.addr.dev_addr.transport =
rdma_node_get_transport(cma_dev->device->node_type);
- list_add_tail(&id_priv->list, &cma_dev->id_list);
+ list_add_tail(&id_priv->device_item, &cma_dev->id_list);
trace_cm_id_attach(id_priv, cma_dev->device);
}
@@ -470,7 +470,7 @@ static void cma_attach_to_dev(struct rdma_id_private *id_priv,
static void cma_release_dev(struct rdma_id_private *id_priv)
{
mutex_lock(&lock);
- list_del(&id_priv->list);
+ list_del_init(&id_priv->device_item);
cma_dev_put(id_priv->cma_dev);
id_priv->cma_dev = NULL;
id_priv->id.device = NULL;
@@ -854,6 +854,7 @@ __rdma_create_id(struct net *net, rdma_cm_event_handler event_handler,
init_completion(&id_priv->comp);
refcount_set(&id_priv->refcount, 1);
mutex_init(&id_priv->handler_mutex);
+ INIT_LIST_HEAD(&id_priv->device_item);
INIT_LIST_HEAD(&id_priv->listen_list);
INIT_LIST_HEAD(&id_priv->mc_list);
get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num);
@@ -1647,7 +1648,7 @@ static struct rdma_id_private *cma_find_listener(
return id_priv;
list_for_each_entry(id_priv_dev,
&id_priv->listen_list,
- listen_list) {
+ listen_item) {
if (id_priv_dev->id.device == cm_id->device &&
cma_match_net_dev(&id_priv_dev->id,
net_dev, req))
@@ -1756,14 +1757,15 @@ static void _cma_cancel_listens(struct rdma_id_private *id_priv)
* Remove from listen_any_list to prevent added devices from spawning
* additional listen requests.
*/
- list_del(&id_priv->list);
+ list_del_init(&id_priv->listen_any_item);
while (!list_empty(&id_priv->listen_list)) {
- dev_id_priv = list_entry(id_priv->listen_list.next,
- struct rdma_id_private, listen_list);
+ dev_id_priv =
+ list_first_entry(&id_priv->listen_list,
+ struct rdma_id_private, listen_item);
/* sync with device removal to avoid duplicate destruction */
- list_del_init(&dev_id_priv->list);
- list_del(&dev_id_priv->listen_list);
+ list_del_init(&dev_id_priv->device_item);
+ list_del_init(&dev_id_priv->listen_item);
mutex_unlock(&lock);
rdma_destroy_id(&dev_id_priv->id);
@@ -2564,7 +2566,7 @@ static int cma_listen_on_dev(struct rdma_id_private *id_priv,
ret = rdma_listen(&dev_id_priv->id, id_priv->backlog);
if (ret)
goto err_listen;
- list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list);
+ list_add_tail(&dev_id_priv->listen_item, &id_priv->listen_list);
return 0;
err_listen:
/* Caller must destroy this after releasing lock */
@@ -2580,13 +2582,13 @@ static int cma_listen_on_all(struct rdma_id_private *id_priv)
int ret;
mutex_lock(&lock);
- list_add_tail(&id_priv->list, &listen_any_list);
+ list_add_tail(&id_priv->listen_any_item, &listen_any_list);
list_for_each_entry(cma_dev, &dev_list, list) {
ret = cma_listen_on_dev(id_priv, cma_dev, &to_destroy);
if (ret) {
/* Prevent racing with cma_process_remove() */
if (to_destroy)
- list_del_init(&to_destroy->list);
+ list_del_init(&to_destroy->device_item);
goto err_listen;
}
}
@@ -4895,7 +4897,7 @@ static int cma_netdev_callback(struct notifier_block *self, unsigned long event,
mutex_lock(&lock);
list_for_each_entry(cma_dev, &dev_list, list)
- list_for_each_entry(id_priv, &cma_dev->id_list, list) {
+ list_for_each_entry(id_priv, &cma_dev->id_list, device_item) {
ret = cma_netdev_change(ndev, id_priv);
if (ret)
goto out;
@@ -4955,10 +4957,10 @@ static void cma_process_remove(struct cma_device *cma_dev)
mutex_lock(&lock);
while (!list_empty(&cma_dev->id_list)) {
struct rdma_id_private *id_priv = list_first_entry(
- &cma_dev->id_list, struct rdma_id_private, list);
+ &cma_dev->id_list, struct rdma_id_private, device_item);
- list_del(&id_priv->listen_list);
- list_del_init(&id_priv->list);
+ list_del_init(&id_priv->listen_item);
+ list_del_init(&id_priv->device_item);
cma_id_get(id_priv);
mutex_unlock(&lock);
@@ -5035,7 +5037,7 @@ static int cma_add_one(struct ib_device *device)
mutex_lock(&lock);
list_add_tail(&cma_dev->list, &dev_list);
- list_for_each_entry(id_priv, &listen_any_list, list) {
+ list_for_each_entry(id_priv, &listen_any_list, listen_any_item) {
ret = cma_listen_on_dev(id_priv, cma_dev, &to_destroy);
if (ret)
goto free_listen;
diff --git a/drivers/infiniband/core/cma_priv.h b/drivers/infiniband/core/cma_priv.h
index f92f101ea981..757a0ef79872 100644
--- a/drivers/infiniband/core/cma_priv.h
+++ b/drivers/infiniband/core/cma_priv.h
@@ -55,8 +55,15 @@ struct rdma_id_private {
struct rdma_bind_list *bind_list;
struct hlist_node node;
- struct list_head list; /* listen_any_list or cma_device.list */
- struct list_head listen_list; /* per device listens */
+ union {
+ struct list_head device_item; /* On cma_device->id_list */
+ struct list_head listen_any_item; /* On listen_any_list */
+ };
+ union {
+ /* On rdma_id_private->listen_list */
+ struct list_head listen_item;
+ struct list_head listen_list;
+ };
struct cma_device *cma_dev;
struct list_head mc_list;
diff --git a/drivers/infiniband/core/counters.c b/drivers/infiniband/core/counters.c
index df9e6c5e4ddf..af59486fe418 100644
--- a/drivers/infiniband/core/counters.c
+++ b/drivers/infiniband/core/counters.c
@@ -106,6 +106,38 @@ static int __rdma_counter_bind_qp(struct rdma_counter *counter,
return ret;
}
+int rdma_counter_modify(struct ib_device *dev, u32 port,
+ unsigned int index, bool enable)
+{
+ struct rdma_hw_stats *stats;
+ int ret = 0;
+
+ if (!dev->ops.modify_hw_stat)
+ return -EOPNOTSUPP;
+
+ stats = ib_get_hw_stats_port(dev, port);
+ if (!stats || index >= stats->num_counters ||
+ !(stats->descs[index].flags & IB_STAT_FLAG_OPTIONAL))
+ return -EINVAL;
+
+ mutex_lock(&stats->lock);
+
+ if (enable != test_bit(index, stats->is_disabled))
+ goto out;
+
+ ret = dev->ops.modify_hw_stat(dev, port, index, enable);
+ if (ret)
+ goto out;
+
+ if (enable)
+ clear_bit(index, stats->is_disabled);
+ else
+ set_bit(index, stats->is_disabled);
+out:
+ mutex_unlock(&stats->lock);
+ return ret;
+}
+
static struct rdma_counter *alloc_and_bind(struct ib_device *dev, u32 port,
struct ib_qp *qp,
enum rdma_nl_counter_mode mode)
@@ -165,7 +197,7 @@ static struct rdma_counter *alloc_and_bind(struct ib_device *dev, u32 port,
return counter;
err_mode:
- kfree(counter->stats);
+ rdma_free_hw_stats_struct(counter->stats);
err_stats:
rdma_restrack_put(&counter->res);
kfree(counter);
@@ -186,7 +218,7 @@ static void rdma_counter_free(struct rdma_counter *counter)
mutex_unlock(&port_counter->lock);
rdma_restrack_del(&counter->res);
- kfree(counter->stats);
+ rdma_free_hw_stats_struct(counter->stats);
kfree(counter);
}
@@ -618,7 +650,7 @@ void rdma_counter_init(struct ib_device *dev)
fail:
for (i = port; i >= rdma_start_port(dev); i--) {
port_counter = &dev->port_data[port].port_counter;
- kfree(port_counter->hstats);
+ rdma_free_hw_stats_struct(port_counter->hstats);
port_counter->hstats = NULL;
mutex_destroy(&port_counter->lock);
}
@@ -631,7 +663,7 @@ void rdma_counter_release(struct ib_device *dev)
rdma_for_each_port(dev, port) {
port_counter = &dev->port_data[port].port_counter;
- kfree(port_counter->hstats);
+ rdma_free_hw_stats_struct(port_counter->hstats);
mutex_destroy(&port_counter->lock);
}
}
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c
index f4814bb7f082..22a4adda7981 100644
--- a/drivers/infiniband/core/device.c
+++ b/drivers/infiniband/core/device.c
@@ -2676,6 +2676,7 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
SET_DEVICE_OP(dev_ops, modify_cq);
SET_DEVICE_OP(dev_ops, modify_device);
SET_DEVICE_OP(dev_ops, modify_flow_action_esp);
+ SET_DEVICE_OP(dev_ops, modify_hw_stat);
SET_DEVICE_OP(dev_ops, modify_port);
SET_DEVICE_OP(dev_ops, modify_qp);
SET_DEVICE_OP(dev_ops, modify_srq);
diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c
index 54f4feb604d8..358a2db38d23 100644
--- a/drivers/infiniband/core/iwpm_util.c
+++ b/drivers/infiniband/core/iwpm_util.c
@@ -762,7 +762,7 @@ int iwpm_send_hello(u8 nl_client, int iwpm_pid, u16 abi_version)
{
struct sk_buff *skb = NULL;
struct nlmsghdr *nlh;
- const char *err_str = "";
+ const char *err_str;
int ret = -EINVAL;
skb = iwpm_create_nlmsg(RDMA_NL_IWPM_HELLO, &nlh, nl_client);
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index e9b4b2cccaa0..fedc0fa6ebf9 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -154,6 +154,8 @@ static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = {
[RDMA_NLDEV_NET_NS_FD] = { .type = NLA_U32 },
[RDMA_NLDEV_SYS_ATTR_NETNS_MODE] = { .type = NLA_U8 },
[RDMA_NLDEV_SYS_ATTR_COPY_ON_FORK] = { .type = NLA_U8 },
+ [RDMA_NLDEV_ATTR_STAT_HWCOUNTER_INDEX] = { .type = NLA_U32 },
+ [RDMA_NLDEV_ATTR_STAT_HWCOUNTER_DYNAMIC] = { .type = NLA_U8 },
};
static int put_driver_name_print_type(struct sk_buff *msg, const char *name,
@@ -968,14 +970,21 @@ static int fill_stat_counter_hwcounters(struct sk_buff *msg,
if (!table_attr)
return -EMSGSIZE;
- for (i = 0; i < st->num_counters; i++)
- if (rdma_nl_stat_hwcounter_entry(msg, st->names[i], st->value[i]))
+ mutex_lock(&st->lock);
+ for (i = 0; i < st->num_counters; i++) {
+ if (test_bit(i, st->is_disabled))
+ continue;
+ if (rdma_nl_stat_hwcounter_entry(msg, st->descs[i].name,
+ st->value[i]))
goto err;
+ }
+ mutex_unlock(&st->lock);
nla_nest_end(msg, table_attr);
return 0;
err:
+ mutex_unlock(&st->lock);
nla_nest_cancel(msg, table_attr);
return -EMSGSIZE;
}
@@ -1888,24 +1897,111 @@ static int nldev_set_sys_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
return err;
}
+static int nldev_stat_set_mode_doit(struct sk_buff *msg,
+ struct netlink_ext_ack *extack,
+ struct nlattr *tb[],
+ struct ib_device *device, u32 port)
+{
+ u32 mode, mask = 0, qpn, cntn = 0;
+ int ret;
+
+ /* Currently only counter for QP is supported */
+ if (nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES]) != RDMA_NLDEV_ATTR_RES_QP)
+ return -EINVAL;
+
+ mode = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_MODE]);
+ if (mode == RDMA_COUNTER_MODE_AUTO) {
+ if (tb[RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK])
+ mask = nla_get_u32(
+ tb[RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK]);
+ return rdma_counter_set_auto_mode(device, port, mask, extack);
+ }
+
+ if (!tb[RDMA_NLDEV_ATTR_RES_LQPN])
+ return -EINVAL;
+
+ qpn = nla_get_u32(tb[RDMA_NLDEV_ATTR_RES_LQPN]);
+ if (tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]) {
+ cntn = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]);
+ ret = rdma_counter_bind_qpn(device, port, qpn, cntn);
+ if (ret)
+ return ret;
+ } else {
+ ret = rdma_counter_bind_qpn_alloc(device, port, qpn, &cntn);
+ if (ret)
+ return ret;
+ }
+
+ if (nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, cntn) ||
+ nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qpn)) {
+ ret = -EMSGSIZE;
+ goto err_fill;
+ }
+
+ return 0;
+
+err_fill:
+ rdma_counter_unbind_qpn(device, port, qpn, cntn);
+ return ret;
+}
+
+static int nldev_stat_set_counter_dynamic_doit(struct nlattr *tb[],
+ struct ib_device *device,
+ u32 port)
+{
+ struct rdma_hw_stats *stats;
+ int rem, i, index, ret = 0;
+ struct nlattr *entry_attr;
+ unsigned long *target;
+
+ stats = ib_get_hw_stats_port(device, port);
+ if (!stats)
+ return -EINVAL;
+
+ target = kcalloc(BITS_TO_LONGS(stats->num_counters),
+ sizeof(*stats->is_disabled), GFP_KERNEL);
+ if (!target)
+ return -ENOMEM;
+
+ nla_for_each_nested(entry_attr, tb[RDMA_NLDEV_ATTR_STAT_HWCOUNTERS],
+ rem) {
+ index = nla_get_u32(entry_attr);
+ if ((index >= stats->num_counters) ||
+ !(stats->descs[index].flags & IB_STAT_FLAG_OPTIONAL)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ set_bit(index, target);
+ }
+
+ for (i = 0; i < stats->num_counters; i++) {
+ if (!(stats->descs[i].flags & IB_STAT_FLAG_OPTIONAL))
+ continue;
+
+ ret = rdma_counter_modify(device, port, i, test_bit(i, target));
+ if (ret)
+ goto out;
+ }
+
+out:
+ kfree(target);
+ return ret;
+}
+
static int nldev_stat_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
- u32 index, port, mode, mask = 0, qpn, cntn = 0;
struct nlattr *tb[RDMA_NLDEV_ATTR_MAX];
struct ib_device *device;
struct sk_buff *msg;
+ u32 index, port;
int ret;
- ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
- nldev_policy, extack);
- /* Currently only counter for QP is supported */
- if (ret || !tb[RDMA_NLDEV_ATTR_STAT_RES] ||
- !tb[RDMA_NLDEV_ATTR_DEV_INDEX] ||
- !tb[RDMA_NLDEV_ATTR_PORT_INDEX] || !tb[RDMA_NLDEV_ATTR_STAT_MODE])
- return -EINVAL;
-
- if (nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES]) != RDMA_NLDEV_ATTR_RES_QP)
+ ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy,
+ extack);
+ if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX] ||
+ !tb[RDMA_NLDEV_ATTR_PORT_INDEX])
return -EINVAL;
index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
@@ -1916,59 +2012,49 @@ static int nldev_stat_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
if (!rdma_is_port_valid(device, port)) {
ret = -EINVAL;
- goto err;
+ goto err_put_device;
+ }
+
+ if (!tb[RDMA_NLDEV_ATTR_STAT_MODE] &&
+ !tb[RDMA_NLDEV_ATTR_STAT_HWCOUNTERS]) {
+ ret = -EINVAL;
+ goto err_put_device;
}
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg) {
ret = -ENOMEM;
- goto err;
+ goto err_put_device;
}
nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
RDMA_NL_GET_TYPE(RDMA_NL_NLDEV,
RDMA_NLDEV_CMD_STAT_SET),
0, 0);
+ if (fill_nldev_handle(msg, device) ||
+ nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port)) {
+ ret = -EMSGSIZE;
+ goto err_free_msg;
+ }
- mode = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_MODE]);
- if (mode == RDMA_COUNTER_MODE_AUTO) {
- if (tb[RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK])
- mask = nla_get_u32(
- tb[RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK]);
- ret = rdma_counter_set_auto_mode(device, port, mask, extack);
- if (ret)
- goto err_msg;
- } else {
- if (!tb[RDMA_NLDEV_ATTR_RES_LQPN])
- goto err_msg;
- qpn = nla_get_u32(tb[RDMA_NLDEV_ATTR_RES_LQPN]);
- if (tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]) {
- cntn = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]);
- ret = rdma_counter_bind_qpn(device, port, qpn, cntn);
- } else {
- ret = rdma_counter_bind_qpn_alloc(device, port,
- qpn, &cntn);
- }
+ if (tb[RDMA_NLDEV_ATTR_STAT_MODE]) {
+ ret = nldev_stat_set_mode_doit(msg, extack, tb, device, port);
if (ret)
- goto err_msg;
+ goto err_free_msg;
+ }
- if (fill_nldev_handle(msg, device) ||
- nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) ||
- nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, cntn) ||
- nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qpn)) {
- ret = -EMSGSIZE;
- goto err_fill;
- }
+ if (tb[RDMA_NLDEV_ATTR_STAT_HWCOUNTERS]) {
+ ret = nldev_stat_set_counter_dynamic_doit(tb, device, port);
+ if (ret)
+ goto err_free_msg;
}
nlmsg_end(msg, nlh);
ib_device_put(device);
return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
-err_fill:
- rdma_counter_unbind_qpn(device, port, qpn, cntn);
-err_msg:
+err_free_msg:
nlmsg_free(msg);
-err:
+err_put_device:
ib_device_put(device);
return ret;
}
@@ -2103,9 +2189,13 @@ static int stat_get_doit_default_counter(struct sk_buff *skb,
goto err_stats;
}
for (i = 0; i < num_cnts; i++) {
+ if (test_bit(i, stats->is_disabled))
+ continue;
+
v = stats->value[i] +
rdma_counter_get_hwstat_value(device, port, i);
- if (rdma_nl_stat_hwcounter_entry(msg, stats->names[i], v)) {
+ if (rdma_nl_stat_hwcounter_entry(msg,
+ stats->descs[i].name, v)) {
ret = -EMSGSIZE;
goto err_table;
}
@@ -2253,6 +2343,99 @@ static int nldev_stat_get_dumpit(struct sk_buff *skb,
return ret;
}
+static int nldev_stat_get_counter_status_doit(struct sk_buff *skb,
+ struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb[RDMA_NLDEV_ATTR_MAX], *table, *entry;
+ struct rdma_hw_stats *stats;
+ struct ib_device *device;
+ struct sk_buff *msg;
+ u32 devid, port;
+ int ret, i;
+
+ ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
+ nldev_policy, extack);
+ if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX] ||
+ !tb[RDMA_NLDEV_ATTR_PORT_INDEX])
+ return -EINVAL;
+
+ devid = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]);
+ device = ib_device_get_by_index(sock_net(skb->sk), devid);
+ if (!device)
+ return -EINVAL;
+
+ port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]);
+ if (!rdma_is_port_valid(device, port)) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ stats = ib_get_hw_stats_port(device, port);
+ if (!stats) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ nlh = nlmsg_put(
+ msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq,
+ RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_STAT_GET_STATUS),
+ 0, 0);
+
+ ret = -EMSGSIZE;
+ if (fill_nldev_handle(msg, device) ||
+ nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port))
+ goto err_msg;
+
+ table = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTERS);
+ if (!table)
+ goto err_msg;
+
+ mutex_lock(&stats->lock);
+ for (i = 0; i < stats->num_counters; i++) {
+ entry = nla_nest_start(msg,
+ RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY);
+ if (!entry)
+ goto err_msg_table;
+
+ if (nla_put_string(msg,
+ RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_NAME,
+ stats->descs[i].name) ||
+ nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_INDEX, i))
+ goto err_msg_entry;
+
+ if ((stats->descs[i].flags & IB_STAT_FLAG_OPTIONAL) &&
+ (nla_put_u8(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_DYNAMIC,
+ !test_bit(i, stats->is_disabled))))
+ goto err_msg_entry;
+
+ nla_nest_end(msg, entry);
+ }
+ mutex_unlock(&stats->lock);
+
+ nla_nest_end(msg, table);
+ nlmsg_end(msg, nlh);
+ ib_device_put(device);
+ return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
+
+err_msg_entry:
+ nla_nest_cancel(msg, entry);
+err_msg_table:
+ mutex_unlock(&stats->lock);
+ nla_nest_cancel(msg, table);
+err_msg:
+ nlmsg_free(msg);
+err:
+ ib_device_put(device);
+ return ret;
+}
+
static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
[RDMA_NLDEV_CMD_GET] = {
.doit = nldev_get_doit,
@@ -2342,6 +2525,9 @@ static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = {
.dump = nldev_res_get_mr_raw_dumpit,
.flags = RDMA_NL_ADMIN_PERM,
},
+ [RDMA_NLDEV_CMD_STAT_GET_STATUS] = {
+ .doit = nldev_stat_get_counter_status_doit,
+ },
};
void __init nldev_init(void)
diff --git a/drivers/infiniband/core/rw.c b/drivers/infiniband/core/rw.c
index 5221cce65675..5a3bd41b331c 100644
--- a/drivers/infiniband/core/rw.c
+++ b/drivers/infiniband/core/rw.c
@@ -282,15 +282,22 @@ static void rdma_rw_unmap_sg(struct ib_device *dev, struct scatterlist *sg,
ib_dma_unmap_sg(dev, sg, sg_cnt, dir);
}
-static int rdma_rw_map_sg(struct ib_device *dev, struct scatterlist *sg,
- u32 sg_cnt, enum dma_data_direction dir)
+static int rdma_rw_map_sgtable(struct ib_device *dev, struct sg_table *sgt,
+ enum dma_data_direction dir)
{
- if (is_pci_p2pdma_page(sg_page(sg))) {
+ int nents;
+
+ if (is_pci_p2pdma_page(sg_page(sgt->sgl))) {
if (WARN_ON_ONCE(ib_uses_virt_dma(dev)))
return 0;
- return pci_p2pdma_map_sg(dev->dma_device, sg, sg_cnt, dir);
+ nents = pci_p2pdma_map_sg(dev->dma_device, sgt->sgl,
+ sgt->orig_nents, dir);
+ if (!nents)
+ return -EIO;
+ sgt->nents = nents;
+ return 0;
}
- return ib_dma_map_sg(dev, sg, sg_cnt, dir);
+ return ib_dma_map_sgtable_attrs(dev, sgt, dir, 0);
}
/**
@@ -313,12 +320,16 @@ int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u32 port_num,
u64 remote_addr, u32 rkey, enum dma_data_direction dir)
{
struct ib_device *dev = qp->pd->device;
+ struct sg_table sgt = {
+ .sgl = sg,
+ .orig_nents = sg_cnt,
+ };
int ret;
- ret = rdma_rw_map_sg(dev, sg, sg_cnt, dir);
- if (!ret)
- return -ENOMEM;
- sg_cnt = ret;
+ ret = rdma_rw_map_sgtable(dev, &sgt, dir);
+ if (ret)
+ return ret;
+ sg_cnt = sgt.nents;
/*
* Skip to the S/G entry that sg_offset falls into:
@@ -354,7 +365,7 @@ int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u32 port_num,
return ret;
out_unmap_sg:
- rdma_rw_unmap_sg(dev, sg, sg_cnt, dir);
+ rdma_rw_unmap_sg(dev, sgt.sgl, sgt.orig_nents, dir);
return ret;
}
EXPORT_SYMBOL(rdma_rw_ctx_init);
@@ -385,6 +396,14 @@ int rdma_rw_ctx_signature_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
struct ib_device *dev = qp->pd->device;
u32 pages_per_mr = rdma_rw_fr_page_list_len(qp->pd->device,
qp->integrity_en);
+ struct sg_table sgt = {
+ .sgl = sg,
+ .orig_nents = sg_cnt,
+ };
+ struct sg_table prot_sgt = {
+ .sgl = prot_sg,
+ .orig_nents = prot_sg_cnt,
+ };
struct ib_rdma_wr *rdma_wr;
int count = 0, ret;
@@ -394,18 +413,14 @@ int rdma_rw_ctx_signature_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
return -EINVAL;
}
- ret = rdma_rw_map_sg(dev, sg, sg_cnt, dir);
- if (!ret)
- return -ENOMEM;
- sg_cnt = ret;
+ ret = rdma_rw_map_sgtable(dev, &sgt, dir);
+ if (ret)
+ return ret;
if (prot_sg_cnt) {
- ret = rdma_rw_map_sg(dev, prot_sg, prot_sg_cnt, dir);
- if (!ret) {
- ret = -ENOMEM;
+ ret = rdma_rw_map_sgtable(dev, &prot_sgt, dir);
+ if (ret)
goto out_unmap_sg;
- }
- prot_sg_cnt = ret;
}
ctx->type = RDMA_RW_SIG_MR;
@@ -426,10 +441,11 @@ int rdma_rw_ctx_signature_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
memcpy(ctx->reg->mr->sig_attrs, sig_attrs, sizeof(struct ib_sig_attrs));
- ret = ib_map_mr_sg_pi(ctx->reg->mr, sg, sg_cnt, NULL, prot_sg,
- prot_sg_cnt, NULL, SZ_4K);
+ ret = ib_map_mr_sg_pi(ctx->reg->mr, sg, sgt.nents, NULL, prot_sg,
+ prot_sgt.nents, NULL, SZ_4K);
if (unlikely(ret)) {
- pr_err("failed to map PI sg (%u)\n", sg_cnt + prot_sg_cnt);
+ pr_err("failed to map PI sg (%u)\n",
+ sgt.nents + prot_sgt.nents);
goto out_destroy_sig_mr;
}
@@ -468,10 +484,10 @@ out_destroy_sig_mr:
out_free_ctx:
kfree(ctx->reg);
out_unmap_prot_sg:
- if (prot_sg_cnt)
- rdma_rw_unmap_sg(dev, prot_sg, prot_sg_cnt, dir);
+ if (prot_sgt.nents)
+ rdma_rw_unmap_sg(dev, prot_sgt.sgl, prot_sgt.orig_nents, dir);
out_unmap_sg:
- rdma_rw_unmap_sg(dev, sg, sg_cnt, dir);
+ rdma_rw_unmap_sg(dev, sgt.sgl, sgt.orig_nents, dir);
return ret;
}
EXPORT_SYMBOL(rdma_rw_ctx_signature_init);
diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c
index a20b8108e160..74ecd7456a11 100644
--- a/drivers/infiniband/core/sa_query.c
+++ b/drivers/infiniband/core/sa_query.c
@@ -706,8 +706,9 @@ static void ib_nl_set_path_rec_attrs(struct sk_buff *skb,
/* Construct the family header first */
header = skb_put(skb, NLMSG_ALIGN(sizeof(*header)));
- memcpy(header->device_name, dev_name(&query->port->agent->device->dev),
- LS_DEVICE_NAME_MAX);
+ strscpy_pad(header->device_name,
+ dev_name(&query->port->agent->device->dev),
+ LS_DEVICE_NAME_MAX);
header->port_num = query->port->port_num;
if ((comp_mask & IB_SA_PATH_REC_REVERSIBLE) &&
@@ -2261,7 +2262,6 @@ err1:
void ib_sa_cleanup(void)
{
cancel_delayed_work(&ib_nl_timed_work);
- flush_workqueue(ib_nl_wq);
destroy_workqueue(ib_nl_wq);
mcast_cleanup();
ib_unregister_client(&sa_client);
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index 6146c3c1cbe5..a3f84b50c46a 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -755,9 +755,9 @@ static void ib_port_release(struct kobject *kobj)
for (i = 0; i != ARRAY_SIZE(port->groups); i++)
kfree(port->groups[i].attrs);
if (port->hw_stats_data)
- kfree(port->hw_stats_data->stats);
+ rdma_free_hw_stats_struct(port->hw_stats_data->stats);
kfree(port->hw_stats_data);
- kfree(port);
+ kvfree(port);
}
static void ib_port_gid_attr_release(struct kobject *kobj)
@@ -895,7 +895,7 @@ alloc_hw_stats_device(struct ib_device *ibdev)
stats = ibdev->ops.alloc_hw_device_stats(ibdev);
if (!stats)
return ERR_PTR(-ENOMEM);
- if (!stats->names || stats->num_counters <= 0)
+ if (!stats->descs || stats->num_counters <= 0)
goto err_free_stats;
/*
@@ -911,7 +911,6 @@ alloc_hw_stats_device(struct ib_device *ibdev)
if (!data->group.attrs)
goto err_free_data;
- mutex_init(&stats->lock);
data->group.name = "hw_counters";
data->stats = stats;
return data;
@@ -919,14 +918,14 @@ alloc_hw_stats_device(struct ib_device *ibdev)
err_free_data:
kfree(data);
err_free_stats:
- kfree(stats);
+ rdma_free_hw_stats_struct(stats);
return ERR_PTR(-ENOMEM);
}
void ib_device_release_hw_stats(struct hw_stats_device_data *data)
{
kfree(data->group.attrs);
- kfree(data->stats);
+ rdma_free_hw_stats_struct(data->stats);
kfree(data);
}
@@ -934,7 +933,8 @@ int ib_setup_device_attrs(struct ib_device *ibdev)
{
struct hw_stats_device_attribute *attr;
struct hw_stats_device_data *data;
- int i, ret;
+ bool opstat_skipped = false;
+ int i, ret, pos = 0;
data = alloc_hw_stats_device(ibdev);
if (IS_ERR(data)) {
@@ -955,16 +955,23 @@ int ib_setup_device_attrs(struct ib_device *ibdev)
data->stats->timestamp = jiffies;
for (i = 0; i < data->stats->num_counters; i++) {
- attr = &data->attrs[i];
+ if (data->stats->descs[i].flags & IB_STAT_FLAG_OPTIONAL) {
+ opstat_skipped = true;
+ continue;
+ }
+
+ WARN_ON(opstat_skipped);
+ attr = &data->attrs[pos];
sysfs_attr_init(&attr->attr.attr);
- attr->attr.attr.name = data->stats->names[i];
+ attr->attr.attr.name = data->stats->descs[i].name;
attr->attr.attr.mode = 0444;
attr->attr.show = hw_stat_device_show;
attr->show = show_hw_stats;
- data->group.attrs[i] = &attr->attr.attr;
+ data->group.attrs[pos] = &attr->attr.attr;
+ pos++;
}
- attr = &data->attrs[i];
+ attr = &data->attrs[pos];
sysfs_attr_init(&attr->attr.attr);
attr->attr.attr.name = "lifespan";
attr->attr.attr.mode = 0644;
@@ -972,7 +979,7 @@ int ib_setup_device_attrs(struct ib_device *ibdev)
attr->show = show_stats_lifespan;
attr->attr.store = hw_stat_device_store;
attr->store = set_stats_lifespan;
- data->group.attrs[i] = &attr->attr.attr;
+ data->group.attrs[pos] = &attr->attr.attr;
for (i = 0; i != ARRAY_SIZE(ibdev->groups); i++)
if (!ibdev->groups[i]) {
ibdev->groups[i] = &data->group;
@@ -994,7 +1001,7 @@ alloc_hw_stats_port(struct ib_port *port, struct attribute_group *group)
stats = ibdev->ops.alloc_hw_port_stats(port->ibdev, port->port_num);
if (!stats)
return ERR_PTR(-ENOMEM);
- if (!stats->names || stats->num_counters <= 0)
+ if (!stats->descs || stats->num_counters <= 0)
goto err_free_stats;
/*
@@ -1010,7 +1017,6 @@ alloc_hw_stats_port(struct ib_port *port, struct attribute_group *group)
if (!group->attrs)
goto err_free_data;
- mutex_init(&stats->lock);
group->name = "hw_counters";
data->stats = stats;
return data;
@@ -1018,7 +1024,7 @@ alloc_hw_stats_port(struct ib_port *port, struct attribute_group *group)
err_free_data:
kfree(data);
err_free_stats:
- kfree(stats);
+ rdma_free_hw_stats_struct(stats);
return ERR_PTR(-ENOMEM);
}
@@ -1027,7 +1033,8 @@ static int setup_hw_port_stats(struct ib_port *port,
{
struct hw_stats_port_attribute *attr;
struct hw_stats_port_data *data;
- int i, ret;
+ bool opstat_skipped = false;
+ int i, ret, pos = 0;
data = alloc_hw_stats_port(port, group);
if (IS_ERR(data))
@@ -1045,16 +1052,23 @@ static int setup_hw_port_stats(struct ib_port *port,
data->stats->timestamp = jiffies;
for (i = 0; i < data->stats->num_counters; i++) {
- attr = &data->attrs[i];
+ if (data->stats->descs[i].flags & IB_STAT_FLAG_OPTIONAL) {
+ opstat_skipped = true;
+ continue;
+ }
+
+ WARN_ON(opstat_skipped);
+ attr = &data->attrs[pos];
sysfs_attr_init(&attr->attr.attr);
- attr->attr.attr.name = data->stats->names[i];
+ attr->attr.attr.name = data->stats->descs[i].name;
attr->attr.attr.mode = 0444;
attr->attr.show = hw_stat_port_show;
attr->show = show_hw_stats;
- group->attrs[i] = &attr->attr.attr;
+ group->attrs[pos] = &attr->attr.attr;
+ pos++;
}
- attr = &data->attrs[i];
+ attr = &data->attrs[pos];
sysfs_attr_init(&attr->attr.attr);
attr->attr.attr.name = "lifespan";
attr->attr.attr.mode = 0644;
@@ -1062,7 +1076,7 @@ static int setup_hw_port_stats(struct ib_port *port,
attr->show = show_stats_lifespan;
attr->attr.store = hw_stat_port_store;
attr->store = set_stats_lifespan;
- group->attrs[i] = &attr->attr.attr;
+ group->attrs[pos] = &attr->attr.attr;
port->hw_stats_data = data;
return 0;
@@ -1189,7 +1203,7 @@ static struct ib_port *setup_port(struct ib_core_device *coredev, int port_num,
struct ib_port *p;
int ret;
- p = kzalloc(struct_size(p, attrs_list,
+ p = kvzalloc(struct_size(p, attrs_list,
attr->gid_tbl_len + attr->pkey_tbl_len),
GFP_KERNEL);
if (!p)
diff --git a/drivers/infiniband/core/umem_dmabuf.c b/drivers/infiniband/core/umem_dmabuf.c
index 2d14929543af..f0760741f281 100644
--- a/drivers/infiniband/core/umem_dmabuf.c
+++ b/drivers/infiniband/core/umem_dmabuf.c
@@ -166,12 +166,63 @@ out_release_dmabuf:
}
EXPORT_SYMBOL(ib_umem_dmabuf_get);
+static void
+ib_umem_dmabuf_unsupported_move_notify(struct dma_buf_attachment *attach)
+{
+ struct ib_umem_dmabuf *umem_dmabuf = attach->importer_priv;
+
+ ibdev_warn_ratelimited(umem_dmabuf->umem.ibdev,
+ "Invalidate callback should not be called when memory is pinned\n");
+}
+
+static struct dma_buf_attach_ops ib_umem_dmabuf_attach_pinned_ops = {
+ .allow_peer2peer = true,
+ .move_notify = ib_umem_dmabuf_unsupported_move_notify,
+};
+
+struct ib_umem_dmabuf *ib_umem_dmabuf_get_pinned(struct ib_device *device,
+ unsigned long offset,
+ size_t size, int fd,
+ int access)
+{
+ struct ib_umem_dmabuf *umem_dmabuf;
+ int err;
+
+ umem_dmabuf = ib_umem_dmabuf_get(device, offset, size, fd, access,
+ &ib_umem_dmabuf_attach_pinned_ops);
+ if (IS_ERR(umem_dmabuf))
+ return umem_dmabuf;
+
+ dma_resv_lock(umem_dmabuf->attach->dmabuf->resv, NULL);
+ err = dma_buf_pin(umem_dmabuf->attach);
+ if (err)
+ goto err_release;
+ umem_dmabuf->pinned = 1;
+
+ err = ib_umem_dmabuf_map_pages(umem_dmabuf);
+ if (err)
+ goto err_unpin;
+ dma_resv_unlock(umem_dmabuf->attach->dmabuf->resv);
+
+ return umem_dmabuf;
+
+err_unpin:
+ dma_buf_unpin(umem_dmabuf->attach);
+err_release:
+ dma_resv_unlock(umem_dmabuf->attach->dmabuf->resv);
+ ib_umem_release(&umem_dmabuf->umem);
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL(ib_umem_dmabuf_get_pinned);
+
void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf)
{
struct dma_buf *dmabuf = umem_dmabuf->attach->dmabuf;
dma_resv_lock(dmabuf->resv, NULL);
ib_umem_dmabuf_unmap_pages(umem_dmabuf);
+ if (umem_dmabuf->pinned)
+ dma_buf_unpin(umem_dmabuf->attach);
dma_resv_unlock(dmabuf->resv);
dma_buf_detach(dmabuf, umem_dmabuf->attach);
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 740e6b2efe0e..d1345d76d9b1 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -837,11 +837,8 @@ static int ib_uverbs_rereg_mr(struct uverbs_attr_bundle *attrs)
new_mr->device = new_pd->device;
new_mr->pd = new_pd;
new_mr->type = IB_MR_TYPE_USER;
- new_mr->dm = NULL;
- new_mr->sig_attrs = NULL;
new_mr->uobject = uobj;
atomic_inc(&new_pd->usecnt);
- new_mr->iova = cmd.hca_va;
new_uobj->object = new_mr;
rdma_restrack_new(&new_mr->res, RDMA_RESTRACK_MR);
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 89a2b21976d6..692d5ff657df 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -2976,3 +2976,52 @@ bool __rdma_block_iter_next(struct ib_block_iter *biter)
return true;
}
EXPORT_SYMBOL(__rdma_block_iter_next);
+
+/**
+ * rdma_alloc_hw_stats_struct - Helper function to allocate dynamic struct
+ * for the drivers.
+ * @descs: array of static descriptors
+ * @num_counters: number of elements in array
+ * @lifespan: milliseconds between updates
+ */
+struct rdma_hw_stats *rdma_alloc_hw_stats_struct(
+ const struct rdma_stat_desc *descs, int num_counters,
+ unsigned long lifespan)
+{
+ struct rdma_hw_stats *stats;
+
+ stats = kzalloc(struct_size(stats, value, num_counters), GFP_KERNEL);
+ if (!stats)
+ return NULL;
+
+ stats->is_disabled = kcalloc(BITS_TO_LONGS(num_counters),
+ sizeof(*stats->is_disabled), GFP_KERNEL);
+ if (!stats->is_disabled)
+ goto err;
+
+ stats->descs = descs;
+ stats->num_counters = num_counters;
+ stats->lifespan = msecs_to_jiffies(lifespan);
+ mutex_init(&stats->lock);
+
+ return stats;
+
+err:
+ kfree(stats);
+ return NULL;
+}
+EXPORT_SYMBOL(rdma_alloc_hw_stats_struct);
+
+/**
+ * rdma_free_hw_stats_struct - Helper function to release rdma_hw_stats
+ * @stats: statistics to release
+ */
+void rdma_free_hw_stats_struct(struct rdma_hw_stats *stats)
+{
+ if (!stats)
+ return;
+
+ kfree(stats->is_disabled);
+ kfree(stats);
+}
+EXPORT_SYMBOL(rdma_free_hw_stats_struct);
diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
index ba26d8e6a9c2..79401e6c6aa9 100644
--- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h
+++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h
@@ -39,22 +39,13 @@
#ifndef __BNXT_RE_H__
#define __BNXT_RE_H__
+#include "hw_counters.h"
#define ROCE_DRV_MODULE_NAME "bnxt_re"
#define BNXT_RE_DESC "Broadcom NetXtreme-C/E RoCE Driver"
-#define BNXT_RE_PAGE_SHIFT_4K (12)
-#define BNXT_RE_PAGE_SHIFT_8K (13)
-#define BNXT_RE_PAGE_SHIFT_64K (16)
-#define BNXT_RE_PAGE_SHIFT_2M (21)
-#define BNXT_RE_PAGE_SHIFT_8M (23)
-#define BNXT_RE_PAGE_SHIFT_1G (30)
-#define BNXT_RE_PAGE_SIZE_4K BIT(BNXT_RE_PAGE_SHIFT_4K)
-#define BNXT_RE_PAGE_SIZE_8K BIT(BNXT_RE_PAGE_SHIFT_8K)
-#define BNXT_RE_PAGE_SIZE_64K BIT(BNXT_RE_PAGE_SHIFT_64K)
-#define BNXT_RE_PAGE_SIZE_2M BIT(BNXT_RE_PAGE_SHIFT_2M)
-#define BNXT_RE_PAGE_SIZE_8M BIT(BNXT_RE_PAGE_SHIFT_8M)
-#define BNXT_RE_PAGE_SIZE_1G BIT(BNXT_RE_PAGE_SHIFT_1G)
+#define BNXT_RE_PAGE_SHIFT_1G (30)
+#define BNXT_RE_PAGE_SIZE_SUPPORTED 0x7FFFF000 /* 4kb - 1G */
#define BNXT_RE_MAX_MR_SIZE_LOW BIT_ULL(BNXT_RE_PAGE_SHIFT_1G)
#define BNXT_RE_MAX_MR_SIZE_HIGH BIT_ULL(39)
@@ -177,15 +168,17 @@ struct bnxt_re_dev {
atomic_t srq_count;
atomic_t mr_count;
atomic_t mw_count;
+ atomic_t ah_count;
+ atomic_t pd_count;
/* Max of 2 lossless traffic class supported per port */
u16 cosq[2];
/* QP for for handling QP1 packets */
struct bnxt_re_gsi_context gsi_ctx;
+ struct bnxt_re_stats stats;
atomic_t nq_alloc_cnt;
u32 is_virtfn;
u32 num_vfs;
- struct bnxt_qplib_roce_stats stats;
};
#define to_bnxt_re_dev(ptr, member) \
diff --git a/drivers/infiniband/hw/bnxt_re/hw_counters.c b/drivers/infiniband/hw/bnxt_re/hw_counters.c
index 7ba07797845c..825d512799d9 100644
--- a/drivers/infiniband/hw/bnxt_re/hw_counters.c
+++ b/drivers/infiniband/hw/bnxt_re/hw_counters.c
@@ -57,69 +57,208 @@
#include "bnxt_re.h"
#include "hw_counters.h"
-static const char * const bnxt_re_stat_name[] = {
- [BNXT_RE_ACTIVE_QP] = "active_qps",
- [BNXT_RE_ACTIVE_SRQ] = "active_srqs",
- [BNXT_RE_ACTIVE_CQ] = "active_cqs",
- [BNXT_RE_ACTIVE_MR] = "active_mrs",
- [BNXT_RE_ACTIVE_MW] = "active_mws",
- [BNXT_RE_RX_PKTS] = "rx_pkts",
- [BNXT_RE_RX_BYTES] = "rx_bytes",
- [BNXT_RE_TX_PKTS] = "tx_pkts",
- [BNXT_RE_TX_BYTES] = "tx_bytes",
- [BNXT_RE_RECOVERABLE_ERRORS] = "recoverable_errors",
- [BNXT_RE_RX_DROPS] = "rx_roce_drops",
- [BNXT_RE_RX_DISCARDS] = "rx_roce_discards",
- [BNXT_RE_TO_RETRANSMITS] = "to_retransmits",
- [BNXT_RE_SEQ_ERR_NAKS_RCVD] = "seq_err_naks_rcvd",
- [BNXT_RE_MAX_RETRY_EXCEEDED] = "max_retry_exceeded",
- [BNXT_RE_RNR_NAKS_RCVD] = "rnr_naks_rcvd",
- [BNXT_RE_MISSING_RESP] = "missing_resp",
- [BNXT_RE_UNRECOVERABLE_ERR] = "unrecoverable_err",
- [BNXT_RE_BAD_RESP_ERR] = "bad_resp_err",
- [BNXT_RE_LOCAL_QP_OP_ERR] = "local_qp_op_err",
- [BNXT_RE_LOCAL_PROTECTION_ERR] = "local_protection_err",
- [BNXT_RE_MEM_MGMT_OP_ERR] = "mem_mgmt_op_err",
- [BNXT_RE_REMOTE_INVALID_REQ_ERR] = "remote_invalid_req_err",
- [BNXT_RE_REMOTE_ACCESS_ERR] = "remote_access_err",
- [BNXT_RE_REMOTE_OP_ERR] = "remote_op_err",
- [BNXT_RE_DUP_REQ] = "dup_req",
- [BNXT_RE_RES_EXCEED_MAX] = "res_exceed_max",
- [BNXT_RE_RES_LENGTH_MISMATCH] = "res_length_mismatch",
- [BNXT_RE_RES_EXCEEDS_WQE] = "res_exceeds_wqe",
- [BNXT_RE_RES_OPCODE_ERR] = "res_opcode_err",
- [BNXT_RE_RES_RX_INVALID_RKEY] = "res_rx_invalid_rkey",
- [BNXT_RE_RES_RX_DOMAIN_ERR] = "res_rx_domain_err",
- [BNXT_RE_RES_RX_NO_PERM] = "res_rx_no_perm",
- [BNXT_RE_RES_RX_RANGE_ERR] = "res_rx_range_err",
- [BNXT_RE_RES_TX_INVALID_RKEY] = "res_tx_invalid_rkey",
- [BNXT_RE_RES_TX_DOMAIN_ERR] = "res_tx_domain_err",
- [BNXT_RE_RES_TX_NO_PERM] = "res_tx_no_perm",
- [BNXT_RE_RES_TX_RANGE_ERR] = "res_tx_range_err",
- [BNXT_RE_RES_IRRQ_OFLOW] = "res_irrq_oflow",
- [BNXT_RE_RES_UNSUP_OPCODE] = "res_unsup_opcode",
- [BNXT_RE_RES_UNALIGNED_ATOMIC] = "res_unaligned_atomic",
- [BNXT_RE_RES_REM_INV_ERR] = "res_rem_inv_err",
- [BNXT_RE_RES_MEM_ERROR] = "res_mem_err",
- [BNXT_RE_RES_SRQ_ERR] = "res_srq_err",
- [BNXT_RE_RES_CMP_ERR] = "res_cmp_err",
- [BNXT_RE_RES_INVALID_DUP_RKEY] = "res_invalid_dup_rkey",
- [BNXT_RE_RES_WQE_FORMAT_ERR] = "res_wqe_format_err",
- [BNXT_RE_RES_CQ_LOAD_ERR] = "res_cq_load_err",
- [BNXT_RE_RES_SRQ_LOAD_ERR] = "res_srq_load_err",
- [BNXT_RE_RES_TX_PCI_ERR] = "res_tx_pci_err",
- [BNXT_RE_RES_RX_PCI_ERR] = "res_rx_pci_err",
- [BNXT_RE_OUT_OF_SEQ_ERR] = "oos_drop_count"
+static const struct rdma_stat_desc bnxt_re_stat_descs[] = {
+ [BNXT_RE_ACTIVE_PD].name = "active_pds",
+ [BNXT_RE_ACTIVE_AH].name = "active_ahs",
+ [BNXT_RE_ACTIVE_QP].name = "active_qps",
+ [BNXT_RE_ACTIVE_SRQ].name = "active_srqs",
+ [BNXT_RE_ACTIVE_CQ].name = "active_cqs",
+ [BNXT_RE_ACTIVE_MR].name = "active_mrs",
+ [BNXT_RE_ACTIVE_MW].name = "active_mws",
+ [BNXT_RE_RX_PKTS].name = "rx_pkts",
+ [BNXT_RE_RX_BYTES].name = "rx_bytes",
+ [BNXT_RE_TX_PKTS].name = "tx_pkts",
+ [BNXT_RE_TX_BYTES].name = "tx_bytes",
+ [BNXT_RE_RECOVERABLE_ERRORS].name = "recoverable_errors",
+ [BNXT_RE_RX_ERRORS].name = "rx_roce_errors",
+ [BNXT_RE_RX_DISCARDS].name = "rx_roce_discards",
+ [BNXT_RE_TO_RETRANSMITS].name = "to_retransmits",
+ [BNXT_RE_SEQ_ERR_NAKS_RCVD].name = "seq_err_naks_rcvd",
+ [BNXT_RE_MAX_RETRY_EXCEEDED].name = "max_retry_exceeded",
+ [BNXT_RE_RNR_NAKS_RCVD].name = "rnr_naks_rcvd",
+ [BNXT_RE_MISSING_RESP].name = "missing_resp",
+ [BNXT_RE_UNRECOVERABLE_ERR].name = "unrecoverable_err",
+ [BNXT_RE_BAD_RESP_ERR].name = "bad_resp_err",
+ [BNXT_RE_LOCAL_QP_OP_ERR].name = "local_qp_op_err",
+ [BNXT_RE_LOCAL_PROTECTION_ERR].name = "local_protection_err",
+ [BNXT_RE_MEM_MGMT_OP_ERR].name = "mem_mgmt_op_err",
+ [BNXT_RE_REMOTE_INVALID_REQ_ERR].name = "remote_invalid_req_err",
+ [BNXT_RE_REMOTE_ACCESS_ERR].name = "remote_access_err",
+ [BNXT_RE_REMOTE_OP_ERR].name = "remote_op_err",
+ [BNXT_RE_DUP_REQ].name = "dup_req",
+ [BNXT_RE_RES_EXCEED_MAX].name = "res_exceed_max",
+ [BNXT_RE_RES_LENGTH_MISMATCH].name = "res_length_mismatch",
+ [BNXT_RE_RES_EXCEEDS_WQE].name = "res_exceeds_wqe",
+ [BNXT_RE_RES_OPCODE_ERR].name = "res_opcode_err",
+ [BNXT_RE_RES_RX_INVALID_RKEY].name = "res_rx_invalid_rkey",
+ [BNXT_RE_RES_RX_DOMAIN_ERR].name = "res_rx_domain_err",
+ [BNXT_RE_RES_RX_NO_PERM].name = "res_rx_no_perm",
+ [BNXT_RE_RES_RX_RANGE_ERR].name = "res_rx_range_err",
+ [BNXT_RE_RES_TX_INVALID_RKEY].name = "res_tx_invalid_rkey",
+ [BNXT_RE_RES_TX_DOMAIN_ERR].name = "res_tx_domain_err",
+ [BNXT_RE_RES_TX_NO_PERM].name = "res_tx_no_perm",
+ [BNXT_RE_RES_TX_RANGE_ERR].name = "res_tx_range_err",
+ [BNXT_RE_RES_IRRQ_OFLOW].name = "res_irrq_oflow",
+ [BNXT_RE_RES_UNSUP_OPCODE].name = "res_unsup_opcode",
+ [BNXT_RE_RES_UNALIGNED_ATOMIC].name = "res_unaligned_atomic",
+ [BNXT_RE_RES_REM_INV_ERR].name = "res_rem_inv_err",
+ [BNXT_RE_RES_MEM_ERROR].name = "res_mem_err",
+ [BNXT_RE_RES_SRQ_ERR].name = "res_srq_err",
+ [BNXT_RE_RES_CMP_ERR].name = "res_cmp_err",
+ [BNXT_RE_RES_INVALID_DUP_RKEY].name = "res_invalid_dup_rkey",
+ [BNXT_RE_RES_WQE_FORMAT_ERR].name = "res_wqe_format_err",
+ [BNXT_RE_RES_CQ_LOAD_ERR].name = "res_cq_load_err",
+ [BNXT_RE_RES_SRQ_LOAD_ERR].name = "res_srq_load_err",
+ [BNXT_RE_RES_TX_PCI_ERR].name = "res_tx_pci_err",
+ [BNXT_RE_RES_RX_PCI_ERR].name = "res_rx_pci_err",
+ [BNXT_RE_OUT_OF_SEQ_ERR].name = "oos_drop_count",
+ [BNXT_RE_TX_ATOMIC_REQ].name = "tx_atomic_req",
+ [BNXT_RE_TX_READ_REQ].name = "tx_read_req",
+ [BNXT_RE_TX_READ_RES].name = "tx_read_resp",
+ [BNXT_RE_TX_WRITE_REQ].name = "tx_write_req",
+ [BNXT_RE_TX_SEND_REQ].name = "tx_send_req",
+ [BNXT_RE_RX_ATOMIC_REQ].name = "rx_atomic_req",
+ [BNXT_RE_RX_READ_REQ].name = "rx_read_req",
+ [BNXT_RE_RX_READ_RESP].name = "rx_read_resp",
+ [BNXT_RE_RX_WRITE_REQ].name = "rx_write_req",
+ [BNXT_RE_RX_SEND_REQ].name = "rx_send_req",
+ [BNXT_RE_RX_ROCE_GOOD_PKTS].name = "rx_roce_good_pkts",
+ [BNXT_RE_RX_ROCE_GOOD_BYTES].name = "rx_roce_good_bytes",
+ [BNXT_RE_OOB].name = "rx_out_of_buffer"
};
+static void bnxt_re_copy_ext_stats(struct bnxt_re_dev *rdev,
+ struct rdma_hw_stats *stats,
+ struct bnxt_qplib_ext_stat *s)
+{
+ stats->value[BNXT_RE_TX_ATOMIC_REQ] = s->tx_atomic_req;
+ stats->value[BNXT_RE_TX_READ_REQ] = s->tx_read_req;
+ stats->value[BNXT_RE_TX_READ_RES] = s->tx_read_res;
+ stats->value[BNXT_RE_TX_WRITE_REQ] = s->tx_write_req;
+ stats->value[BNXT_RE_TX_SEND_REQ] = s->tx_send_req;
+ stats->value[BNXT_RE_RX_ATOMIC_REQ] = s->rx_atomic_req;
+ stats->value[BNXT_RE_RX_READ_REQ] = s->rx_read_req;
+ stats->value[BNXT_RE_RX_READ_RESP] = s->rx_read_res;
+ stats->value[BNXT_RE_RX_WRITE_REQ] = s->rx_write_req;
+ stats->value[BNXT_RE_RX_SEND_REQ] = s->rx_send_req;
+ stats->value[BNXT_RE_RX_ROCE_GOOD_PKTS] = s->rx_roce_good_pkts;
+ stats->value[BNXT_RE_RX_ROCE_GOOD_BYTES] = s->rx_roce_good_bytes;
+ stats->value[BNXT_RE_OOB] = s->rx_out_of_buffer;
+}
+
+static int bnxt_re_get_ext_stat(struct bnxt_re_dev *rdev,
+ struct rdma_hw_stats *stats)
+{
+ struct bnxt_qplib_ext_stat *estat = &rdev->stats.rstat.ext_stat;
+ u32 fid;
+ int rc;
+
+ fid = PCI_FUNC(rdev->en_dev->pdev->devfn);
+ rc = bnxt_qplib_qext_stat(&rdev->rcfw, fid, estat);
+ if (rc)
+ goto done;
+ bnxt_re_copy_ext_stats(rdev, stats, estat);
+
+done:
+ return rc;
+}
+
+static void bnxt_re_copy_err_stats(struct bnxt_re_dev *rdev,
+ struct rdma_hw_stats *stats,
+ struct bnxt_qplib_roce_stats *err_s)
+{
+ stats->value[BNXT_RE_TO_RETRANSMITS] =
+ err_s->to_retransmits;
+ stats->value[BNXT_RE_SEQ_ERR_NAKS_RCVD] =
+ err_s->seq_err_naks_rcvd;
+ stats->value[BNXT_RE_MAX_RETRY_EXCEEDED] =
+ err_s->max_retry_exceeded;
+ stats->value[BNXT_RE_RNR_NAKS_RCVD] =
+ err_s->rnr_naks_rcvd;
+ stats->value[BNXT_RE_MISSING_RESP] =
+ err_s->missing_resp;
+ stats->value[BNXT_RE_UNRECOVERABLE_ERR] =
+ err_s->unrecoverable_err;
+ stats->value[BNXT_RE_BAD_RESP_ERR] =
+ err_s->bad_resp_err;
+ stats->value[BNXT_RE_LOCAL_QP_OP_ERR] =
+ err_s->local_qp_op_err;
+ stats->value[BNXT_RE_LOCAL_PROTECTION_ERR] =
+ err_s->local_protection_err;
+ stats->value[BNXT_RE_MEM_MGMT_OP_ERR] =
+ err_s->mem_mgmt_op_err;
+ stats->value[BNXT_RE_REMOTE_INVALID_REQ_ERR] =
+ err_s->remote_invalid_req_err;
+ stats->value[BNXT_RE_REMOTE_ACCESS_ERR] =
+ err_s->remote_access_err;
+ stats->value[BNXT_RE_REMOTE_OP_ERR] =
+ err_s->remote_op_err;
+ stats->value[BNXT_RE_DUP_REQ] =
+ err_s->dup_req;
+ stats->value[BNXT_RE_RES_EXCEED_MAX] =
+ err_s->res_exceed_max;
+ stats->value[BNXT_RE_RES_LENGTH_MISMATCH] =
+ err_s->res_length_mismatch;
+ stats->value[BNXT_RE_RES_EXCEEDS_WQE] =
+ err_s->res_exceeds_wqe;
+ stats->value[BNXT_RE_RES_OPCODE_ERR] =
+ err_s->res_opcode_err;
+ stats->value[BNXT_RE_RES_RX_INVALID_RKEY] =
+ err_s->res_rx_invalid_rkey;
+ stats->value[BNXT_RE_RES_RX_DOMAIN_ERR] =
+ err_s->res_rx_domain_err;
+ stats->value[BNXT_RE_RES_RX_NO_PERM] =
+ err_s->res_rx_no_perm;
+ stats->value[BNXT_RE_RES_RX_RANGE_ERR] =
+ err_s->res_rx_range_err;
+ stats->value[BNXT_RE_RES_TX_INVALID_RKEY] =
+ err_s->res_tx_invalid_rkey;
+ stats->value[BNXT_RE_RES_TX_DOMAIN_ERR] =
+ err_s->res_tx_domain_err;
+ stats->value[BNXT_RE_RES_TX_NO_PERM] =
+ err_s->res_tx_no_perm;
+ stats->value[BNXT_RE_RES_TX_RANGE_ERR] =
+ err_s->res_tx_range_err;
+ stats->value[BNXT_RE_RES_IRRQ_OFLOW] =
+ err_s->res_irrq_oflow;
+ stats->value[BNXT_RE_RES_UNSUP_OPCODE] =
+ err_s->res_unsup_opcode;
+ stats->value[BNXT_RE_RES_UNALIGNED_ATOMIC] =
+ err_s->res_unaligned_atomic;
+ stats->value[BNXT_RE_RES_REM_INV_ERR] =
+ err_s->res_rem_inv_err;
+ stats->value[BNXT_RE_RES_MEM_ERROR] =
+ err_s->res_mem_error;
+ stats->value[BNXT_RE_RES_SRQ_ERR] =
+ err_s->res_srq_err;
+ stats->value[BNXT_RE_RES_CMP_ERR] =
+ err_s->res_cmp_err;
+ stats->value[BNXT_RE_RES_INVALID_DUP_RKEY] =
+ err_s->res_invalid_dup_rkey;
+ stats->value[BNXT_RE_RES_WQE_FORMAT_ERR] =
+ err_s->res_wqe_format_err;
+ stats->value[BNXT_RE_RES_CQ_LOAD_ERR] =
+ err_s->res_cq_load_err;
+ stats->value[BNXT_RE_RES_SRQ_LOAD_ERR] =
+ err_s->res_srq_load_err;
+ stats->value[BNXT_RE_RES_TX_PCI_ERR] =
+ err_s->res_tx_pci_err;
+ stats->value[BNXT_RE_RES_RX_PCI_ERR] =
+ err_s->res_rx_pci_err;
+ stats->value[BNXT_RE_OUT_OF_SEQ_ERR] =
+ err_s->res_oos_drop_count;
+}
+
int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
struct rdma_hw_stats *stats,
u32 port, int index)
{
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
- struct ctx_hw_stats *bnxt_re_stats = rdev->qplib_ctx.stats.dma;
+ struct ctx_hw_stats *hw_stats = NULL;
+ struct bnxt_qplib_roce_stats *err_s = NULL;
int rc = 0;
+ hw_stats = rdev->qplib_ctx.stats.dma;
if (!port || !stats)
return -EINVAL;
@@ -128,118 +267,61 @@ int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
stats->value[BNXT_RE_ACTIVE_CQ] = atomic_read(&rdev->cq_count);
stats->value[BNXT_RE_ACTIVE_MR] = atomic_read(&rdev->mr_count);
stats->value[BNXT_RE_ACTIVE_MW] = atomic_read(&rdev->mw_count);
- if (bnxt_re_stats) {
+ stats->value[BNXT_RE_ACTIVE_PD] = atomic_read(&rdev->pd_count);
+ stats->value[BNXT_RE_ACTIVE_AH] = atomic_read(&rdev->ah_count);
+
+ if (hw_stats) {
stats->value[BNXT_RE_RECOVERABLE_ERRORS] =
- le64_to_cpu(bnxt_re_stats->tx_bcast_pkts);
- stats->value[BNXT_RE_RX_DROPS] =
- le64_to_cpu(bnxt_re_stats->rx_error_pkts);
+ le64_to_cpu(hw_stats->tx_bcast_pkts);
+ stats->value[BNXT_RE_RX_ERRORS] =
+ le64_to_cpu(hw_stats->rx_error_pkts);
stats->value[BNXT_RE_RX_DISCARDS] =
- le64_to_cpu(bnxt_re_stats->rx_discard_pkts);
+ le64_to_cpu(hw_stats->rx_discard_pkts);
stats->value[BNXT_RE_RX_PKTS] =
- le64_to_cpu(bnxt_re_stats->rx_ucast_pkts);
+ le64_to_cpu(hw_stats->rx_ucast_pkts);
stats->value[BNXT_RE_RX_BYTES] =
- le64_to_cpu(bnxt_re_stats->rx_ucast_bytes);
+ le64_to_cpu(hw_stats->rx_ucast_bytes);
stats->value[BNXT_RE_TX_PKTS] =
- le64_to_cpu(bnxt_re_stats->tx_ucast_pkts);
+ le64_to_cpu(hw_stats->tx_ucast_pkts);
stats->value[BNXT_RE_TX_BYTES] =
- le64_to_cpu(bnxt_re_stats->tx_ucast_bytes);
+ le64_to_cpu(hw_stats->tx_ucast_bytes);
}
+ err_s = &rdev->stats.rstat.errs;
if (test_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags)) {
- rc = bnxt_qplib_get_roce_stats(&rdev->rcfw, &rdev->stats);
- if (rc)
+ rc = bnxt_qplib_get_roce_stats(&rdev->rcfw, err_s);
+ if (rc) {
clear_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS,
&rdev->flags);
- stats->value[BNXT_RE_TO_RETRANSMITS] =
- rdev->stats.to_retransmits;
- stats->value[BNXT_RE_SEQ_ERR_NAKS_RCVD] =
- rdev->stats.seq_err_naks_rcvd;
- stats->value[BNXT_RE_MAX_RETRY_EXCEEDED] =
- rdev->stats.max_retry_exceeded;
- stats->value[BNXT_RE_RNR_NAKS_RCVD] =
- rdev->stats.rnr_naks_rcvd;
- stats->value[BNXT_RE_MISSING_RESP] =
- rdev->stats.missing_resp;
- stats->value[BNXT_RE_UNRECOVERABLE_ERR] =
- rdev->stats.unrecoverable_err;
- stats->value[BNXT_RE_BAD_RESP_ERR] =
- rdev->stats.bad_resp_err;
- stats->value[BNXT_RE_LOCAL_QP_OP_ERR] =
- rdev->stats.local_qp_op_err;
- stats->value[BNXT_RE_LOCAL_PROTECTION_ERR] =
- rdev->stats.local_protection_err;
- stats->value[BNXT_RE_MEM_MGMT_OP_ERR] =
- rdev->stats.mem_mgmt_op_err;
- stats->value[BNXT_RE_REMOTE_INVALID_REQ_ERR] =
- rdev->stats.remote_invalid_req_err;
- stats->value[BNXT_RE_REMOTE_ACCESS_ERR] =
- rdev->stats.remote_access_err;
- stats->value[BNXT_RE_REMOTE_OP_ERR] =
- rdev->stats.remote_op_err;
- stats->value[BNXT_RE_DUP_REQ] =
- rdev->stats.dup_req;
- stats->value[BNXT_RE_RES_EXCEED_MAX] =
- rdev->stats.res_exceed_max;
- stats->value[BNXT_RE_RES_LENGTH_MISMATCH] =
- rdev->stats.res_length_mismatch;
- stats->value[BNXT_RE_RES_EXCEEDS_WQE] =
- rdev->stats.res_exceeds_wqe;
- stats->value[BNXT_RE_RES_OPCODE_ERR] =
- rdev->stats.res_opcode_err;
- stats->value[BNXT_RE_RES_RX_INVALID_RKEY] =
- rdev->stats.res_rx_invalid_rkey;
- stats->value[BNXT_RE_RES_RX_DOMAIN_ERR] =
- rdev->stats.res_rx_domain_err;
- stats->value[BNXT_RE_RES_RX_NO_PERM] =
- rdev->stats.res_rx_no_perm;
- stats->value[BNXT_RE_RES_RX_RANGE_ERR] =
- rdev->stats.res_rx_range_err;
- stats->value[BNXT_RE_RES_TX_INVALID_RKEY] =
- rdev->stats.res_tx_invalid_rkey;
- stats->value[BNXT_RE_RES_TX_DOMAIN_ERR] =
- rdev->stats.res_tx_domain_err;
- stats->value[BNXT_RE_RES_TX_NO_PERM] =
- rdev->stats.res_tx_no_perm;
- stats->value[BNXT_RE_RES_TX_RANGE_ERR] =
- rdev->stats.res_tx_range_err;
- stats->value[BNXT_RE_RES_IRRQ_OFLOW] =
- rdev->stats.res_irrq_oflow;
- stats->value[BNXT_RE_RES_UNSUP_OPCODE] =
- rdev->stats.res_unsup_opcode;
- stats->value[BNXT_RE_RES_UNALIGNED_ATOMIC] =
- rdev->stats.res_unaligned_atomic;
- stats->value[BNXT_RE_RES_REM_INV_ERR] =
- rdev->stats.res_rem_inv_err;
- stats->value[BNXT_RE_RES_MEM_ERROR] =
- rdev->stats.res_mem_error;
- stats->value[BNXT_RE_RES_SRQ_ERR] =
- rdev->stats.res_srq_err;
- stats->value[BNXT_RE_RES_CMP_ERR] =
- rdev->stats.res_cmp_err;
- stats->value[BNXT_RE_RES_INVALID_DUP_RKEY] =
- rdev->stats.res_invalid_dup_rkey;
- stats->value[BNXT_RE_RES_WQE_FORMAT_ERR] =
- rdev->stats.res_wqe_format_err;
- stats->value[BNXT_RE_RES_CQ_LOAD_ERR] =
- rdev->stats.res_cq_load_err;
- stats->value[BNXT_RE_RES_SRQ_LOAD_ERR] =
- rdev->stats.res_srq_load_err;
- stats->value[BNXT_RE_RES_TX_PCI_ERR] =
- rdev->stats.res_tx_pci_err;
- stats->value[BNXT_RE_RES_RX_PCI_ERR] =
- rdev->stats.res_rx_pci_err;
- stats->value[BNXT_RE_OUT_OF_SEQ_ERR] =
- rdev->stats.res_oos_drop_count;
+ goto done;
+ }
+ if (_is_ext_stats_supported(rdev->dev_attr.dev_cap_flags) &&
+ !rdev->is_virtfn) {
+ rc = bnxt_re_get_ext_stat(rdev, stats);
+ if (rc) {
+ clear_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS,
+ &rdev->flags);
+ goto done;
+ }
+ }
+ bnxt_re_copy_err_stats(rdev, stats, err_s);
}
- return ARRAY_SIZE(bnxt_re_stat_name);
+done:
+ return bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ?
+ BNXT_RE_NUM_EXT_COUNTERS : BNXT_RE_NUM_STD_COUNTERS;
}
struct rdma_hw_stats *bnxt_re_ib_alloc_hw_port_stats(struct ib_device *ibdev,
u32 port_num)
{
- BUILD_BUG_ON(ARRAY_SIZE(bnxt_re_stat_name) != BNXT_RE_NUM_COUNTERS);
+ struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
+ int num_counters = 0;
+
+ if (bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx))
+ num_counters = BNXT_RE_NUM_EXT_COUNTERS;
+ else
+ num_counters = BNXT_RE_NUM_STD_COUNTERS;
- return rdma_alloc_hw_stats_struct(bnxt_re_stat_name,
- ARRAY_SIZE(bnxt_re_stat_name),
+ return rdma_alloc_hw_stats_struct(bnxt_re_stat_descs, num_counters,
RDMA_HW_STATS_DEFAULT_LIFESPAN);
}
diff --git a/drivers/infiniband/hw/bnxt_re/hw_counters.h b/drivers/infiniband/hw/bnxt_re/hw_counters.h
index 6f2d2f91d9ff..7943b2c393e4 100644
--- a/drivers/infiniband/hw/bnxt_re/hw_counters.h
+++ b/drivers/infiniband/hw/bnxt_re/hw_counters.h
@@ -41,6 +41,8 @@
#define __BNXT_RE_HW_STATS_H__
enum bnxt_re_hw_stats {
+ BNXT_RE_ACTIVE_PD,
+ BNXT_RE_ACTIVE_AH,
BNXT_RE_ACTIVE_QP,
BNXT_RE_ACTIVE_SRQ,
BNXT_RE_ACTIVE_CQ,
@@ -51,7 +53,7 @@ enum bnxt_re_hw_stats {
BNXT_RE_TX_PKTS,
BNXT_RE_TX_BYTES,
BNXT_RE_RECOVERABLE_ERRORS,
- BNXT_RE_RX_DROPS,
+ BNXT_RE_RX_ERRORS,
BNXT_RE_RX_DISCARDS,
BNXT_RE_TO_RETRANSMITS,
BNXT_RE_SEQ_ERR_NAKS_RCVD,
@@ -93,7 +95,31 @@ enum bnxt_re_hw_stats {
BNXT_RE_RES_TX_PCI_ERR,
BNXT_RE_RES_RX_PCI_ERR,
BNXT_RE_OUT_OF_SEQ_ERR,
- BNXT_RE_NUM_COUNTERS
+ BNXT_RE_TX_ATOMIC_REQ,
+ BNXT_RE_TX_READ_REQ,
+ BNXT_RE_TX_READ_RES,
+ BNXT_RE_TX_WRITE_REQ,
+ BNXT_RE_TX_SEND_REQ,
+ BNXT_RE_RX_ATOMIC_REQ,
+ BNXT_RE_RX_READ_REQ,
+ BNXT_RE_RX_READ_RESP,
+ BNXT_RE_RX_WRITE_REQ,
+ BNXT_RE_RX_SEND_REQ,
+ BNXT_RE_RX_ROCE_GOOD_PKTS,
+ BNXT_RE_RX_ROCE_GOOD_BYTES,
+ BNXT_RE_OOB,
+ BNXT_RE_NUM_EXT_COUNTERS
+};
+
+#define BNXT_RE_NUM_STD_COUNTERS (BNXT_RE_OUT_OF_SEQ_ERR + 1)
+
+struct bnxt_re_rstat {
+ struct bnxt_qplib_roce_stats errs;
+ struct bnxt_qplib_ext_stat ext_stat;
+};
+
+struct bnxt_re_stats {
+ struct bnxt_re_rstat rstat;
};
struct rdma_hw_stats *bnxt_re_ib_alloc_hw_port_stats(struct ib_device *ibdev,
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
index 408dfbcc47b5..29cc0d14399a 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c
@@ -41,6 +41,7 @@
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/if_ether.h>
+#include <net/addrconf.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_user_verbs.h>
@@ -130,10 +131,10 @@ int bnxt_re_query_device(struct ib_device *ibdev,
memcpy(&ib_attr->fw_ver, dev_attr->fw_ver,
min(sizeof(dev_attr->fw_ver),
sizeof(ib_attr->fw_ver)));
- bnxt_qplib_get_guid(rdev->netdev->dev_addr,
- (u8 *)&ib_attr->sys_image_guid);
+ addrconf_addr_eui48((u8 *)&ib_attr->sys_image_guid,
+ rdev->netdev->dev_addr);
ib_attr->max_mr_size = BNXT_RE_MAX_MR_SIZE;
- ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_4K | BNXT_RE_PAGE_SIZE_2M;
+ ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_SUPPORTED;
ib_attr->vendor_id = rdev->en_dev->pdev->vendor;
ib_attr->vendor_part_id = rdev->en_dev->pdev->device;
@@ -541,9 +542,12 @@ int bnxt_re_dealloc_pd(struct ib_pd *ib_pd, struct ib_udata *udata)
bnxt_re_destroy_fence_mr(pd);
- if (pd->qplib_pd.id)
- bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
- &pd->qplib_pd);
+ if (pd->qplib_pd.id) {
+ if (!bnxt_qplib_dealloc_pd(&rdev->qplib_res,
+ &rdev->qplib_res.pd_tbl,
+ &pd->qplib_pd))
+ atomic_dec(&rdev->pd_count);
+ }
return 0;
}
@@ -595,6 +599,8 @@ int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
if (bnxt_re_create_fence_mr(pd))
ibdev_warn(&rdev->ibdev,
"Failed to create Fence-MR\n");
+ atomic_inc(&rdev->pd_count);
+
return 0;
dbfail:
bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
@@ -611,6 +617,8 @@ int bnxt_re_destroy_ah(struct ib_ah *ib_ah, u32 flags)
bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah,
!(flags & RDMA_DESTROY_AH_SLEEPABLE));
+ atomic_dec(&rdev->ah_count);
+
return 0;
}
@@ -695,15 +703,11 @@ int bnxt_re_create_ah(struct ib_ah *ib_ah, struct rdma_ah_init_attr *init_attr,
wmb(); /* make sure cache is updated. */
spin_unlock_irqrestore(&uctx->sh_lock, flag);
}
+ atomic_inc(&rdev->ah_count);
return 0;
}
-int bnxt_re_modify_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
-{
- return 0;
-}
-
int bnxt_re_query_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr)
{
struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah);
@@ -760,6 +764,7 @@ static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp)
bnxt_qplib_destroy_ah(&rdev->qplib_res,
&gsi_sah->qplib_ah,
true);
+ atomic_dec(&rdev->ah_count);
bnxt_qplib_clean_qp(&qp->qplib_qp);
ibdev_dbg(&rdev->ibdev, "Destroy the shadow QP\n");
@@ -1006,6 +1011,7 @@ static struct bnxt_re_ah *bnxt_re_create_shadow_qp_ah
"Failed to allocate HW AH for Shadow QP");
goto fail;
}
+ atomic_inc(&rdev->ah_count);
return ah;
@@ -2478,7 +2484,8 @@ static int bnxt_re_build_reg_wqe(const struct ib_reg_wr *wr,
wqe->frmr.l_key = wr->key;
wqe->frmr.length = wr->mr->length;
- wqe->frmr.pbl_pg_sz_log = (wr->mr->page_size >> PAGE_SHIFT_4K) - 1;
+ wqe->frmr.pbl_pg_sz_log = ilog2(PAGE_SIZE >> PAGE_SHIFT_4K);
+ wqe->frmr.pg_sz_log = ilog2(wr->mr->page_size >> PAGE_SHIFT_4K);
wqe->frmr.va = wr->mr->iova;
return 0;
}
@@ -3354,8 +3361,11 @@ static void bnxt_re_process_res_ud_wc(struct bnxt_re_qp *qp,
struct ib_wc *wc,
struct bnxt_qplib_cqe *cqe)
{
+ struct bnxt_re_dev *rdev;
+ u16 vlan_id = 0;
u8 nw_type;
+ rdev = qp->rdev;
wc->opcode = IB_WC_RECV;
wc->status = __rc_to_ib_wc_status(cqe->status);
@@ -3367,9 +3377,12 @@ static void bnxt_re_process_res_ud_wc(struct bnxt_re_qp *qp,
memcpy(wc->smac, cqe->smac, ETH_ALEN);
wc->wc_flags |= IB_WC_WITH_SMAC;
if (cqe->flags & CQ_RES_UD_FLAGS_META_FORMAT_VLAN) {
- wc->vlan_id = (cqe->cfa_meta & 0xFFF);
- if (wc->vlan_id < 0x1000)
- wc->wc_flags |= IB_WC_WITH_VLAN;
+ vlan_id = (cqe->cfa_meta & 0xFFF);
+ }
+ /* Mark only if vlan_id is non zero */
+ if (vlan_id && bnxt_re_check_if_vlan_valid(rdev, vlan_id)) {
+ wc->vlan_id = vlan_id;
+ wc->wc_flags |= IB_WC_WITH_VLAN;
}
nw_type = (cqe->flags & CQ_RES_UD_FLAGS_ROCE_IP_VER_MASK) >>
CQ_RES_UD_FLAGS_ROCE_IP_VER_SFT;
@@ -3798,7 +3811,7 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
mr->qplib_mr.va = virt_addr;
page_size = ib_umem_find_best_pgsz(
- umem, BNXT_RE_PAGE_SIZE_4K | BNXT_RE_PAGE_SIZE_2M, virt_addr);
+ umem, BNXT_RE_PAGE_SIZE_SUPPORTED, virt_addr);
if (!page_size) {
ibdev_err(&rdev->ibdev, "umem page size unsupported!");
rc = -EFAULT;
diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
index b5c6e0f4f877..94326267f9bb 100644
--- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h
+++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h
@@ -166,7 +166,6 @@ int bnxt_re_alloc_pd(struct ib_pd *pd, struct ib_udata *udata);
int bnxt_re_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata);
int bnxt_re_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
struct ib_udata *udata);
-int bnxt_re_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
int bnxt_re_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
int bnxt_re_destroy_ah(struct ib_ah *ah, u32 flags);
int bnxt_re_create_srq(struct ib_srq *srq,
diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
index 66268e41b470..b44944fb9b24 100644
--- a/drivers/infiniband/hw/bnxt_re/main.c
+++ b/drivers/infiniband/hw/bnxt_re/main.c
@@ -127,6 +127,8 @@ static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev, u8 wqe_mode)
rdev->qplib_res.cctx = rdev->chip_ctx;
rdev->rcfw.res = &rdev->qplib_res;
+ rdev->qplib_res.dattr = &rdev->dev_attr;
+ rdev->qplib_res.is_vf = BNXT_VF(bp);
bnxt_re_set_drv_mode(rdev, wqe_mode);
if (bnxt_qplib_determine_atomics(en_dev->pdev))
@@ -523,7 +525,8 @@ static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev *rdev,
u32 fw_stats_ctx_id)
{
struct bnxt_en_dev *en_dev = rdev->en_dev;
- struct hwrm_stat_ctx_free_input req = {0};
+ struct hwrm_stat_ctx_free_input req = {};
+ struct hwrm_stat_ctx_free_output resp = {};
struct bnxt_fw_msg fw_msg;
int rc = -EINVAL;
@@ -537,8 +540,8 @@ static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev *rdev,
bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_FREE, -1, -1);
req.stat_ctx_id = cpu_to_le32(fw_stats_ctx_id);
- bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&req,
- sizeof(req), DFLT_HWRM_CMD_TIMEOUT);
+ bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp,
+ sizeof(resp), DFLT_HWRM_CMD_TIMEOUT);
rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg);
if (rc)
ibdev_err(&rdev->ibdev, "Failed to free HW stats context %#x",
@@ -693,7 +696,6 @@ static const struct ib_device_ops bnxt_re_dev_ops = {
.get_port_immutable = bnxt_re_get_port_immutable,
.map_mr_sg = bnxt_re_map_mr_sg,
.mmap = bnxt_re_mmap,
- .modify_ah = bnxt_re_modify_ah,
.modify_qp = bnxt_re_modify_qp,
.modify_srq = bnxt_re_modify_srq,
.poll_cq = bnxt_re_poll_cq,
@@ -727,7 +729,7 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
strlen(BNXT_RE_DESC) + 5);
ibdev->phys_port_cnt = 1;
- bnxt_qplib_get_guid(rdev->netdev->dev_addr, (u8 *)&ibdev->node_guid);
+ addrconf_addr_eui48((u8 *)&ibdev->node_guid, rdev->netdev->dev_addr);
ibdev->num_comp_vectors = rdev->num_msix - 1;
ibdev->dev.parent = &rdev->en_dev->pdev->dev;
@@ -777,6 +779,8 @@ static struct bnxt_re_dev *bnxt_re_dev_add(struct net_device *netdev,
atomic_set(&rdev->srq_count, 0);
atomic_set(&rdev->mr_count, 0);
atomic_set(&rdev->mw_count, 0);
+ atomic_set(&rdev->ah_count, 0);
+ atomic_set(&rdev->pd_count, 0);
rdev->cosq[0] = 0xFFFF;
rdev->cosq[1] = 0xFFFF;
@@ -1725,7 +1729,7 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier,
}
if (sch_work) {
/* Allocate for the deferred task */
- re_work = kzalloc(sizeof(*re_work), GFP_ATOMIC);
+ re_work = kzalloc(sizeof(*re_work), GFP_KERNEL);
if (re_work) {
get_device(&rdev->ibdev.dev);
re_work->rdev = rdev;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
index d4d4959c2434..ca88849559bf 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c
@@ -707,12 +707,13 @@ int bnxt_qplib_query_srq(struct bnxt_qplib_res *res,
int rc = 0;
RCFW_CMD_PREP(req, QUERY_SRQ, cmd_flags);
- req.srq_cid = cpu_to_le32(srq->id);
/* Configure the request */
sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
if (!sbuf)
return -ENOMEM;
+ req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
+ req.srq_cid = cpu_to_le32(srq->id);
sb = sbuf->sb;
rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
(void *)sbuf, 0);
@@ -1049,6 +1050,9 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION;
if (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE)
qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_VARIABLE_SIZED_WQE_ENABLED;
+ if (_is_ext_stats_supported(res->dattr->dev_cap_flags) && !res->is_vf)
+ qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_EXT_STATS_ENABLED;
+
req.qp_flags = cpu_to_le32(qp_flags);
/* ORRQ and IRRQ */
@@ -2851,6 +2855,7 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
struct cq_base *hw_cqe;
u32 sw_cons, raw_cons;
int budget, rc = 0;
+ u8 type;
raw_cons = cq->hwq.cons;
budget = num_cqes;
@@ -2869,7 +2874,8 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
*/
dma_rmb();
/* From the device's respective CQE format to qplib_wc*/
- switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) {
+ type = hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK;
+ switch (type) {
case CQ_BASE_CQE_TYPE_REQ:
rc = bnxt_qplib_cq_process_req(cq,
(struct cq_req *)hw_cqe,
@@ -2916,8 +2922,9 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
/* Error while processing the CQE, just skip to the
* next one
*/
- dev_err(&cq->hwq.pdev->dev,
- "process_cqe error rc = 0x%x\n", rc);
+ if (type != CQ_BASE_CQE_TYPE_TERMINAL)
+ dev_err(&cq->hwq.pdev->dev,
+ "process_cqe error rc = 0x%x\n", rc);
}
raw_cons++;
}
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
index 5d384def5e5f..3de854727460 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c
@@ -78,7 +78,7 @@ static int __block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
if (!test_bit(cbit, cmdq->cmdq_bitmap))
goto done;
do {
- mdelay(1); /* 1m sec */
+ udelay(1);
bnxt_qplib_service_creq(&rcfw->creq.creq_tasklet);
} while (test_bit(cbit, cmdq->cmdq_bitmap) && --count);
done:
@@ -848,13 +848,13 @@ struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf(
{
struct bnxt_qplib_rcfw_sbuf *sbuf;
- sbuf = kzalloc(sizeof(*sbuf), GFP_ATOMIC);
+ sbuf = kzalloc(sizeof(*sbuf), GFP_KERNEL);
if (!sbuf)
return NULL;
sbuf->size = size;
sbuf->sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf->size,
- &sbuf->dma_addr, GFP_ATOMIC);
+ &sbuf->dma_addr, GFP_KERNEL);
if (!sbuf->sb)
goto bail;
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
index 9474c0046582..82faa4e4cda8 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h
@@ -96,7 +96,7 @@ static inline void bnxt_qplib_set_cmd_slots(struct cmdq_base *req)
#define RCFW_MAX_COOKIE_VALUE 0x7FFF
#define RCFW_CMD_IS_BLOCKING 0x8000
-#define RCFW_BLOCKED_CMD_WAIT_COUNT 0x4E20
+#define RCFW_BLOCKED_CMD_WAIT_COUNT 20000000UL /* 20 sec */
#define HWRM_VERSION_RCFW_CMDQ_DEPTH_CHECK 0x1000900020011ULL
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c
index 44282a8cdd4f..bc1ba4b51ba4 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c
@@ -228,15 +228,16 @@ int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
npages++;
}
- if (npages == MAX_PBL_LVL_0_PGS) {
+ if (npages == MAX_PBL_LVL_0_PGS && !hwq_attr->sginfo->nopte) {
/* This request is Level 0, map PTE */
rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], hwq_attr->sginfo);
if (rc)
goto fail;
hwq->level = PBL_LVL_0;
+ goto done;
}
- if (npages > MAX_PBL_LVL_0_PGS) {
+ if (npages >= MAX_PBL_LVL_0_PGS) {
if (npages > MAX_PBL_LVL_1_PGS) {
u32 flag = (hwq_attr->type == HWQ_TYPE_L2_CMPL) ?
0 : PTU_PTE_VALID;
@@ -571,23 +572,6 @@ fail:
return rc;
}
-/* GUID */
-void bnxt_qplib_get_guid(u8 *dev_addr, u8 *guid)
-{
- u8 mac[ETH_ALEN];
-
- /* MAC-48 to EUI-64 mapping */
- memcpy(mac, dev_addr, ETH_ALEN);
- guid[0] = mac[0] ^ 2;
- guid[1] = mac[1];
- guid[2] = mac[2];
- guid[3] = 0xff;
- guid[4] = 0xfe;
- guid[5] = mac[3];
- guid[6] = mac[4];
- guid[7] = mac[5];
-}
-
static void bnxt_qplib_free_sgid_tbl(struct bnxt_qplib_res *res,
struct bnxt_qplib_sgid_tbl *sgid_tbl)
{
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h
index 91031502e8f5..e1411a2352a7 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_res.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h
@@ -253,14 +253,15 @@ struct bnxt_qplib_ctx {
struct bnxt_qplib_res {
struct pci_dev *pdev;
struct bnxt_qplib_chip_ctx *cctx;
+ struct bnxt_qplib_dev_attr *dattr;
struct net_device *netdev;
-
struct bnxt_qplib_rcfw *rcfw;
struct bnxt_qplib_pd_tbl pd_tbl;
struct bnxt_qplib_sgid_tbl sgid_tbl;
struct bnxt_qplib_pkey_tbl pkey_tbl;
struct bnxt_qplib_dpi_tbl dpi_tbl;
bool prio;
+ bool is_vf;
};
static inline bool bnxt_qplib_is_chip_gen_p5(struct bnxt_qplib_chip_ctx *cctx)
@@ -345,7 +346,6 @@ void bnxt_qplib_free_hwq(struct bnxt_qplib_res *res,
struct bnxt_qplib_hwq *hwq);
int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
struct bnxt_qplib_hwq_attr *hwq_attr);
-void bnxt_qplib_get_guid(u8 *dev_addr, u8 *guid);
int bnxt_qplib_alloc_pd(struct bnxt_qplib_pd_tbl *pd_tbl,
struct bnxt_qplib_pd *pd);
int bnxt_qplib_dealloc_pd(struct bnxt_qplib_res *res,
@@ -450,4 +450,10 @@ static inline void bnxt_qplib_ring_nq_db(struct bnxt_qplib_db_info *info,
else
bnxt_qplib_ring_db32(info, arm);
}
+
+static inline bool _is_ext_stats_supported(u16 dev_cap_flags)
+{
+ return dev_cap_flags &
+ CREQ_QUERY_FUNC_RESP_SB_EXT_STATS;
+}
#endif /* __BNXT_QPLIB_RES_H__ */
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
index 3d9259632eb3..379e715ebd30 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c
@@ -161,6 +161,7 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
attr->l2_db_size = (sb->l2_db_space_size + 1) *
(0x01 << RCFW_DBR_BASE_PAGE_SHIFT);
attr->max_sgid = BNXT_QPLIB_NUM_GIDS_SUPPORTED;
+ attr->dev_cap_flags = le16_to_cpu(sb->dev_cap_flags);
bnxt_qplib_query_version(rcfw, attr->fw_ver);
@@ -286,8 +287,8 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
}
int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
- struct bnxt_qplib_gid *gid, u8 *smac, u16 vlan_id,
- bool update, u32 *index)
+ struct bnxt_qplib_gid *gid, const u8 *smac,
+ u16 vlan_id, bool update, u32 *index)
{
struct bnxt_qplib_res *res = to_bnxt_qplib(sgid_tbl,
struct bnxt_qplib_res,
@@ -378,7 +379,7 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
int bnxt_qplib_update_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
struct bnxt_qplib_gid *gid, u16 gid_idx,
- u8 *smac)
+ const u8 *smac)
{
struct bnxt_qplib_res *res = to_bnxt_qplib(sgid_tbl,
struct bnxt_qplib_res,
@@ -869,3 +870,53 @@ bail:
bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
return rc;
}
+
+int bnxt_qplib_qext_stat(struct bnxt_qplib_rcfw *rcfw, u32 fid,
+ struct bnxt_qplib_ext_stat *estat)
+{
+ struct creq_query_roce_stats_ext_resp resp = {};
+ struct creq_query_roce_stats_ext_resp_sb *sb;
+ struct cmdq_query_roce_stats_ext req = {};
+ struct bnxt_qplib_rcfw_sbuf *sbuf;
+ u16 cmd_flags = 0;
+ int rc;
+
+ sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
+ if (!sbuf) {
+ dev_err(&rcfw->pdev->dev,
+ "SP: QUERY_ROCE_STATS_EXT alloc sb failed");
+ return -ENOMEM;
+ }
+
+ RCFW_CMD_PREP(req, QUERY_ROCE_STATS_EXT, cmd_flags);
+
+ req.resp_size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS);
+ req.resp_addr = cpu_to_le64(sbuf->dma_addr);
+ req.function_id = cpu_to_le32(fid);
+ req.flags = cpu_to_le16(CMDQ_QUERY_ROCE_STATS_EXT_FLAGS_FUNCTION_ID);
+
+ rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
+ (void *)&resp, (void *)sbuf, 0);
+ if (rc)
+ goto bail;
+
+ sb = sbuf->sb;
+ estat->tx_atomic_req = le64_to_cpu(sb->tx_atomic_req_pkts);
+ estat->tx_read_req = le64_to_cpu(sb->tx_read_req_pkts);
+ estat->tx_read_res = le64_to_cpu(sb->tx_read_res_pkts);
+ estat->tx_write_req = le64_to_cpu(sb->tx_write_req_pkts);
+ estat->tx_send_req = le64_to_cpu(sb->tx_send_req_pkts);
+ estat->rx_atomic_req = le64_to_cpu(sb->rx_atomic_req_pkts);
+ estat->rx_read_req = le64_to_cpu(sb->rx_read_req_pkts);
+ estat->rx_read_res = le64_to_cpu(sb->rx_read_res_pkts);
+ estat->rx_write_req = le64_to_cpu(sb->rx_write_req_pkts);
+ estat->rx_send_req = le64_to_cpu(sb->rx_send_req_pkts);
+ estat->rx_roce_good_pkts = le64_to_cpu(sb->rx_roce_good_pkts);
+ estat->rx_roce_good_bytes = le64_to_cpu(sb->rx_roce_good_bytes);
+ estat->rx_out_of_buffer = le64_to_cpu(sb->rx_out_of_buffer_pkts);
+ estat->rx_out_of_sequence = le64_to_cpu(sb->rx_out_of_sequence_pkts);
+
+bail:
+ bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
+ return rc;
+}
diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
index 260104783691..a18f568cb23e 100644
--- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h
+++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h
@@ -71,6 +71,7 @@ struct bnxt_qplib_dev_attr {
u32 l2_db_size;
u8 tqm_alloc_reqs[MAX_TQM_ALLOC_REQ];
bool is_atomic;
+ u16 dev_cap_flags;
};
struct bnxt_qplib_pd {
@@ -219,16 +220,41 @@ struct bnxt_qplib_roce_stats {
/* port 3 active qps */
};
+struct bnxt_qplib_ext_stat {
+ u64 tx_atomic_req;
+ u64 tx_read_req;
+ u64 tx_read_res;
+ u64 tx_write_req;
+ u64 tx_send_req;
+ u64 tx_roce_pkts;
+ u64 tx_roce_bytes;
+ u64 rx_atomic_req;
+ u64 rx_read_req;
+ u64 rx_read_res;
+ u64 rx_write_req;
+ u64 rx_send_req;
+ u64 rx_roce_pkts;
+ u64 rx_roce_bytes;
+ u64 rx_roce_good_pkts;
+ u64 rx_roce_good_bytes;
+ u64 rx_out_of_buffer;
+ u64 rx_out_of_sequence;
+ u64 tx_cnp;
+ u64 rx_cnp;
+ u64 rx_ecn_marked;
+};
+
int bnxt_qplib_get_sgid(struct bnxt_qplib_res *res,
struct bnxt_qplib_sgid_tbl *sgid_tbl, int index,
struct bnxt_qplib_gid *gid);
int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
struct bnxt_qplib_gid *gid, u16 vlan_id, bool update);
int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
- struct bnxt_qplib_gid *gid, u8 *mac, u16 vlan_id,
+ struct bnxt_qplib_gid *gid, const u8 *mac, u16 vlan_id,
bool update, u32 *index);
int bnxt_qplib_update_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl,
- struct bnxt_qplib_gid *gid, u16 gid_idx, u8 *smac);
+ struct bnxt_qplib_gid *gid, u16 gid_idx,
+ const u8 *smac);
int bnxt_qplib_get_pkey(struct bnxt_qplib_res *res,
struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 index,
u16 *pkey);
@@ -263,4 +289,7 @@ int bnxt_qplib_free_fast_reg_page_list(struct bnxt_qplib_res *res,
int bnxt_qplib_map_tc2cos(struct bnxt_qplib_res *res, u16 *cids);
int bnxt_qplib_get_roce_stats(struct bnxt_qplib_rcfw *rcfw,
struct bnxt_qplib_roce_stats *stats);
+int bnxt_qplib_qext_stat(struct bnxt_qplib_rcfw *rcfw, u32 fid,
+ struct bnxt_qplib_ext_stat *estat);
+
#endif /* __BNXT_QPLIB_SP_H__*/
diff --git a/drivers/infiniband/hw/bnxt_re/roce_hsi.h b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
index 3e40e0d76efd..ecb719098b75 100644
--- a/drivers/infiniband/hw/bnxt_re/roce_hsi.h
+++ b/drivers/infiniband/hw/bnxt_re/roce_hsi.h
@@ -1102,6 +1102,7 @@ struct cmdq_base {
#define CMDQ_BASE_OPCODE_MODIFY_CC 0x8cUL
#define CMDQ_BASE_OPCODE_QUERY_CC 0x8dUL
#define CMDQ_BASE_OPCODE_QUERY_ROCE_STATS 0x8eUL
+ #define CMDQ_BASE_OPCODE_QUERY_ROCE_STATS_EXT 0x92UL
u8 cmd_size;
__le16 flags;
__le16 cookie;
@@ -1127,6 +1128,10 @@ struct cmdq_create_qp {
#define CMDQ_CREATE_QP_QP_FLAGS_RESERVED_LKEY_ENABLE 0x4UL
#define CMDQ_CREATE_QP_QP_FLAGS_FR_PMR_ENABLED 0x8UL
#define CMDQ_CREATE_QP_QP_FLAGS_VARIABLE_SIZED_WQE_ENABLED 0x10UL
+ #define CMDQ_CREATE_QP_QP_FLAGS_EXT_STATS_ENABLED 0x80UL
+ #define CMDQ_CREATE_QP_QP_FLAGS_LAST \
+ CMDQ_CREATE_QP_QP_FLAGS_EXT_STATS_ENABLED
+
u8 type;
#define CMDQ_CREATE_QP_TYPE_RC 0x2UL
#define CMDQ_CREATE_QP_TYPE_UD 0x4UL
@@ -2848,6 +2853,7 @@ struct creq_query_func_resp_sb {
__le16 max_qp_wr;
__le16 dev_cap_flags;
#define CREQ_QUERY_FUNC_RESP_SB_DEV_CAP_FLAGS_RESIZE_QP 0x1UL
+ #define CREQ_QUERY_FUNC_RESP_SB_EXT_STATS 0x10UL
__le32 max_cq;
__le32 max_cqe;
__le32 max_pd;
@@ -3087,6 +3093,85 @@ struct creq_query_roce_stats_resp_sb {
__le64 active_qp_count_p3;
};
+/* cmdq_query_roce_stats_ext (size:192b/24B) */
+struct cmdq_query_roce_stats_ext {
+ u8 opcode;
+ #define CMDQ_QUERY_ROCE_STATS_EXT_OPCODE_QUERY_ROCE_STATS 0x92UL
+ #define CMDQ_QUERY_ROCE_STATS_EXT_OPCODE_LAST \
+ CMDQ_QUERY_ROCE_STATS_EXT_OPCODE_QUERY_ROCE_STATS
+ u8 cmd_size;
+ __le16 flags;
+ #define CMDQ_QUERY_ROCE_STATS_EXT_FLAGS_COLLECTION_ID 0x1UL
+ #define CMDQ_QUERY_ROCE_STATS_EXT_FLAGS_FUNCTION_ID 0x2UL
+ __le16 cookie;
+ u8 resp_size;
+ u8 collection_id;
+ __le64 resp_addr;
+ __le32 function_id;
+ #define CMDQ_QUERY_ROCE_STATS_EXT_PF_NUM_MASK 0xffUL
+ #define CMDQ_QUERY_ROCE_STATS_EXT_PF_NUM_SFT 0
+ #define CMDQ_QUERY_ROCE_STATS_EXT_VF_NUM_MASK 0xffff00UL
+ #define CMDQ_QUERY_ROCE_STATS_EXT_VF_NUM_SFT 8
+ #define CMDQ_QUERY_ROCE_STATS_EXT_VF_VALID 0x1000000UL
+ __le32 reserved32;
+};
+
+/* creq_query_roce_stats_ext_resp (size:128b/16B) */
+struct creq_query_roce_stats_ext_resp {
+ u8 type;
+ #define CREQ_QUERY_ROCE_STATS_EXT_RESP_TYPE_MASK 0x3fUL
+ #define CREQ_QUERY_ROCE_STATS_EXT_RESP_TYPE_SFT 0
+ #define CREQ_QUERY_ROCE_STATS_EXT_RESP_TYPE_QP_EVENT 0x38UL
+ #define CREQ_QUERY_ROCE_STATS_EXT_RESP_TYPE_LAST \
+ CREQ_QUERY_ROCE_STATS_EXT_RESP_TYPE_QP_EVENT
+ u8 status;
+ __le16 cookie;
+ __le32 size;
+ u8 v;
+ #define CREQ_QUERY_ROCE_STATS_EXT_RESP_V 0x1UL
+ u8 event;
+ #define CREQ_QUERY_ROCE_STATS_EXT_RESP_EVENT_QUERY_ROCE_STATS_EXT 0x92UL
+ #define CREQ_QUERY_ROCE_STATS_EXT_RESP_EVENT_LAST \
+ CREQ_QUERY_ROCE_STATS_EXT_RESP_EVENT_QUERY_ROCE_STATS_EXT
+ u8 reserved48[6];
+};
+
+/* creq_query_roce_stats_ext_resp_sb (size:1536b/192B) */
+struct creq_query_roce_stats_ext_resp_sb {
+ u8 opcode;
+ #define CREQ_QUERY_ROCE_STATS_EXT_RESP_SB_OPCODE_QUERY_ROCE_STATS_EXT 0x92UL
+ #define CREQ_QUERY_ROCE_STATS_EXT_RESP_SB_OPCODE_LAST \
+ CREQ_QUERY_ROCE_STATS_EXT_RESP_SB_OPCODE_QUERY_ROCE_STATS_EXT
+ u8 status;
+ __le16 cookie;
+ __le16 flags;
+ u8 resp_size;
+ u8 rsvd;
+ __le64 tx_atomic_req_pkts;
+ __le64 tx_read_req_pkts;
+ __le64 tx_read_res_pkts;
+ __le64 tx_write_req_pkts;
+ __le64 tx_send_req_pkts;
+ __le64 tx_roce_pkts;
+ __le64 tx_roce_bytes;
+ __le64 rx_atomic_req_pkts;
+ __le64 rx_read_req_pkts;
+ __le64 rx_read_res_pkts;
+ __le64 rx_write_req_pkts;
+ __le64 rx_send_req_pkts;
+ __le64 rx_roce_pkts;
+ __le64 rx_roce_bytes;
+ __le64 rx_roce_good_pkts;
+ __le64 rx_roce_good_bytes;
+ __le64 rx_out_of_buffer_pkts;
+ __le64 rx_out_of_sequence_pkts;
+ __le64 tx_cnp_pkts;
+ __le64 rx_cnp_pkts;
+ __le64 rx_ecn_marked_pkts;
+ __le64 tx_cnp_bytes;
+ __le64 rx_cnp_bytes;
+};
+
/* QP error notification event (16 bytes) */
struct creq_qp_error_notification {
u8 type;
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 291471d12197..913f39ee4416 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -4464,6 +4464,5 @@ int __init c4iw_cm_init(void)
void c4iw_cm_term(void)
{
WARN_ON(!list_empty(&timeout_list));
- flush_workqueue(workq);
destroy_workqueue(workq);
}
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index 541dbcf22d0e..80970a1738f8 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -1562,7 +1562,6 @@ static void __exit c4iw_exit_module(void)
kfree(ctx);
}
mutex_unlock(&dev_mutex);
- flush_workqueue(reg_workq);
destroy_workqueue(reg_workq);
cxgb4_unregister_uld(CXGB4_ULD_RDMA);
c4iw_cm_term();
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index e7337662aff8..0c8fd5a85fcb 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -366,23 +366,23 @@ enum counters {
NR_COUNTERS
};
-static const char * const names[] = {
- [IP4INSEGS] = "ip4InSegs",
- [IP4OUTSEGS] = "ip4OutSegs",
- [IP4RETRANSSEGS] = "ip4RetransSegs",
- [IP4OUTRSTS] = "ip4OutRsts",
- [IP6INSEGS] = "ip6InSegs",
- [IP6OUTSEGS] = "ip6OutSegs",
- [IP6RETRANSSEGS] = "ip6RetransSegs",
- [IP6OUTRSTS] = "ip6OutRsts"
+static const struct rdma_stat_desc cxgb4_descs[] = {
+ [IP4INSEGS].name = "ip4InSegs",
+ [IP4OUTSEGS].name = "ip4OutSegs",
+ [IP4RETRANSSEGS].name = "ip4RetransSegs",
+ [IP4OUTRSTS].name = "ip4OutRsts",
+ [IP6INSEGS].name = "ip6InSegs",
+ [IP6OUTSEGS].name = "ip6OutSegs",
+ [IP6RETRANSSEGS].name = "ip6RetransSegs",
+ [IP6OUTRSTS].name = "ip6OutRsts"
};
static struct rdma_hw_stats *c4iw_alloc_device_stats(struct ib_device *ibdev)
{
- BUILD_BUG_ON(ARRAY_SIZE(names) != NR_COUNTERS);
+ BUILD_BUG_ON(ARRAY_SIZE(cxgb4_descs) != NR_COUNTERS);
/* FIXME: these look like port stats */
- return rdma_alloc_hw_stats_struct(names, NR_COUNTERS,
+ return rdma_alloc_hw_stats_struct(cxgb4_descs, NR_COUNTERS,
RDMA_HW_STATS_DEFAULT_LIFESPAN);
}
diff --git a/drivers/infiniband/hw/efa/efa.h b/drivers/infiniband/hw/efa/efa.h
index 87b1dadeb7fe..7352a1f5d811 100644
--- a/drivers/infiniband/hw/efa/efa.h
+++ b/drivers/infiniband/hw/efa/efa.h
@@ -20,14 +20,14 @@
#define EFA_IRQNAME_SIZE 40
-/* 1 for AENQ + ADMIN */
-#define EFA_NUM_MSIX_VEC 1
#define EFA_MGMNT_MSIX_VEC_IDX 0
+#define EFA_COMP_EQS_VEC_BASE 1
struct efa_irq {
irq_handler_t handler;
void *data;
u32 irqn;
+ u32 vector;
cpumask_t affinity_hint_mask;
char name[EFA_IRQNAME_SIZE];
};
@@ -61,6 +61,13 @@ struct efa_dev {
struct efa_irq admin_irq;
struct efa_stats stats;
+
+ /* Array of completion EQs */
+ struct efa_eq *eqs;
+ unsigned int neqs;
+
+ /* Only stores CQs with interrupts enabled */
+ struct xarray cqs_xa;
};
struct efa_ucontext {
@@ -84,8 +91,11 @@ struct efa_cq {
dma_addr_t dma_addr;
void *cpu_addr;
struct rdma_user_mmap_entry *mmap_entry;
+ struct rdma_user_mmap_entry *db_mmap_entry;
size_t size;
u16 cq_idx;
+ /* NULL when no interrupts requested */
+ struct efa_eq *eq;
};
struct efa_qp {
@@ -116,6 +126,11 @@ struct efa_ah {
u8 id[EFA_GID_SIZE];
};
+struct efa_eq {
+ struct efa_com_eq eeq;
+ struct efa_irq irq;
+};
+
int efa_query_device(struct ib_device *ibdev,
struct ib_device_attr *props,
struct ib_udata *udata);
@@ -139,6 +154,10 @@ int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
u64 virt_addr, int access_flags,
struct ib_udata *udata);
+struct ib_mr *efa_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 start,
+ u64 length, u64 virt_addr,
+ int fd, int access_flags,
+ struct ib_udata *udata);
int efa_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
int efa_get_port_immutable(struct ib_device *ibdev, u32 port_num,
struct ib_port_immutable *immutable);
diff --git a/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h b/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
index fa38b34eddb8..0b0b93b529f3 100644
--- a/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
+++ b/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h
@@ -28,7 +28,9 @@ enum efa_admin_aq_opcode {
EFA_ADMIN_DEALLOC_PD = 15,
EFA_ADMIN_ALLOC_UAR = 16,
EFA_ADMIN_DEALLOC_UAR = 17,
- EFA_ADMIN_MAX_OPCODE = 17,
+ EFA_ADMIN_CREATE_EQ = 18,
+ EFA_ADMIN_DESTROY_EQ = 19,
+ EFA_ADMIN_MAX_OPCODE = 19,
};
enum efa_admin_aq_feature_id {
@@ -38,6 +40,7 @@ enum efa_admin_aq_feature_id {
EFA_ADMIN_QUEUE_ATTR = 4,
EFA_ADMIN_HW_HINTS = 5,
EFA_ADMIN_HOST_INFO = 6,
+ EFA_ADMIN_EVENT_QUEUE_ATTR = 7,
};
/* QP transport type */
@@ -430,8 +433,8 @@ struct efa_admin_create_cq_cmd {
/*
* 4:0 : reserved5 - MBZ
* 5 : interrupt_mode_enabled - if set, cq operates
- * in interrupt mode (i.e. CQ events and MSI-X are
- * generated), otherwise - polling
+ * in interrupt mode (i.e. CQ events and EQ elements
+ * are generated), otherwise - polling
* 6 : virt - If set, ring base address is virtual
* (IOVA returned by MR registration)
* 7 : reserved6 - MBZ
@@ -448,8 +451,11 @@ struct efa_admin_create_cq_cmd {
/* completion queue depth in # of entries. must be power of 2 */
u16 cq_depth;
- /* msix vector assigned to this cq */
- u32 msix_vector_idx;
+ /* EQ number assigned to this cq */
+ u16 eqn;
+
+ /* MBZ */
+ u16 reserved;
/*
* CQ ring base address, virtual or physical depending on 'virt'
@@ -480,6 +486,15 @@ struct efa_admin_create_cq_resp {
/* actual cq depth in number of entries */
u16 cq_actual_depth;
+
+ /* CQ doorbell address, as offset to PCIe DB BAR */
+ u32 db_offset;
+
+ /*
+ * 0 : db_valid - If set, doorbell offset is valid.
+ * Always set when interrupts are requested.
+ */
+ u32 flags;
};
struct efa_admin_destroy_cq_cmd {
@@ -669,6 +684,17 @@ struct efa_admin_feature_queue_attr_desc {
u16 max_tx_batch;
};
+struct efa_admin_event_queue_attr_desc {
+ /* The maximum number of event queues supported */
+ u32 max_eq;
+
+ /* Maximum number of EQEs per Event Queue */
+ u32 max_eq_depth;
+
+ /* Supported events bitmask */
+ u32 event_bitmask;
+};
+
struct efa_admin_feature_aenq_desc {
/* bitmask for AENQ groups the device can report */
u32 supported_groups;
@@ -727,6 +753,8 @@ struct efa_admin_get_feature_resp {
struct efa_admin_feature_queue_attr_desc queue_attr;
+ struct efa_admin_event_queue_attr_desc event_queue_attr;
+
struct efa_admin_hw_hints hw_hints;
} u;
};
@@ -810,6 +838,60 @@ struct efa_admin_dealloc_uar_resp {
struct efa_admin_acq_common_desc acq_common_desc;
};
+struct efa_admin_create_eq_cmd {
+ struct efa_admin_aq_common_desc aq_common_descriptor;
+
+ /* Size of the EQ in entries, must be power of 2 */
+ u16 depth;
+
+ /* MSI-X table entry index */
+ u8 msix_vec;
+
+ /*
+ * 4:0 : entry_size_words - size of EQ entry in
+ * 32-bit words
+ * 7:5 : reserved - MBZ
+ */
+ u8 caps;
+
+ /* EQ ring base address */
+ struct efa_common_mem_addr ba;
+
+ /*
+ * Enabled events on this EQ
+ * 0 : completion_events - Enable completion events
+ * 31:1 : reserved - MBZ
+ */
+ u32 event_bitmask;
+
+ /* MBZ */
+ u32 reserved;
+};
+
+struct efa_admin_create_eq_resp {
+ struct efa_admin_acq_common_desc acq_common_desc;
+
+ /* EQ number */
+ u16 eqn;
+
+ /* MBZ */
+ u16 reserved;
+};
+
+struct efa_admin_destroy_eq_cmd {
+ struct efa_admin_aq_common_desc aq_common_descriptor;
+
+ /* EQ number */
+ u16 eqn;
+
+ /* MBZ */
+ u16 reserved;
+};
+
+struct efa_admin_destroy_eq_resp {
+ struct efa_admin_acq_common_desc acq_common_desc;
+};
+
/* asynchronous event notification groups */
enum efa_admin_aenq_group {
EFA_ADMIN_FATAL_ERROR = 1,
@@ -899,10 +981,18 @@ struct efa_admin_host_info {
#define EFA_ADMIN_CREATE_CQ_CMD_VIRT_MASK BIT(6)
#define EFA_ADMIN_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK GENMASK(4, 0)
+/* create_cq_resp */
+#define EFA_ADMIN_CREATE_CQ_RESP_DB_VALID_MASK BIT(0)
+
/* feature_device_attr_desc */
#define EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_RDMA_READ_MASK BIT(0)
#define EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_RNR_RETRY_MASK BIT(1)
+/* create_eq_cmd */
+#define EFA_ADMIN_CREATE_EQ_CMD_ENTRY_SIZE_WORDS_MASK GENMASK(4, 0)
+#define EFA_ADMIN_CREATE_EQ_CMD_VIRT_MASK BIT(6)
+#define EFA_ADMIN_CREATE_EQ_CMD_COMPLETION_EVENTS_MASK BIT(0)
+
/* host_info */
#define EFA_ADMIN_HOST_INFO_DRIVER_MODULE_TYPE_MASK GENMASK(7, 0)
#define EFA_ADMIN_HOST_INFO_DRIVER_SUB_MINOR_MASK GENMASK(15, 8)
diff --git a/drivers/infiniband/hw/efa/efa_admin_defs.h b/drivers/infiniband/hw/efa/efa_admin_defs.h
index 78ff9389ae25..83f20c38a840 100644
--- a/drivers/infiniband/hw/efa/efa_admin_defs.h
+++ b/drivers/infiniband/hw/efa/efa_admin_defs.h
@@ -118,6 +118,43 @@ struct efa_admin_aenq_entry {
u32 inline_data_w4[12];
};
+enum efa_admin_eqe_event_type {
+ EFA_ADMIN_EQE_EVENT_TYPE_COMPLETION = 0,
+};
+
+/* Completion event */
+struct efa_admin_comp_event {
+ /* CQ number */
+ u16 cqn;
+
+ /* MBZ */
+ u16 reserved;
+
+ /* MBZ */
+ u32 reserved2;
+};
+
+/* Event Queue Element */
+struct efa_admin_eqe {
+ /*
+ * 0 : phase
+ * 8:1 : event_type - Event type
+ * 31:9 : reserved - MBZ
+ */
+ u32 common;
+
+ /* MBZ */
+ u32 reserved;
+
+ union {
+ /* Event data */
+ u32 event_data[2];
+
+ /* Completion Event */
+ struct efa_admin_comp_event comp_event;
+ } u;
+};
+
/* aq_common_desc */
#define EFA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK GENMASK(11, 0)
#define EFA_ADMIN_AQ_COMMON_DESC_PHASE_MASK BIT(0)
@@ -131,4 +168,8 @@ struct efa_admin_aenq_entry {
/* aenq_common_desc */
#define EFA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK BIT(0)
+/* eqe */
+#define EFA_ADMIN_EQE_PHASE_MASK BIT(0)
+#define EFA_ADMIN_EQE_EVENT_TYPE_MASK GENMASK(8, 1)
+
#endif /* _EFA_ADMIN_H_ */
diff --git a/drivers/infiniband/hw/efa/efa_com.c b/drivers/infiniband/hw/efa/efa_com.c
index 0d523ad736c7..16a24a05fc2a 100644
--- a/drivers/infiniband/hw/efa/efa_com.c
+++ b/drivers/infiniband/hw/efa/efa_com.c
@@ -56,11 +56,19 @@ static const char *efa_com_cmd_str(u8 cmd)
EFA_CMD_STR_CASE(DEALLOC_PD);
EFA_CMD_STR_CASE(ALLOC_UAR);
EFA_CMD_STR_CASE(DEALLOC_UAR);
+ EFA_CMD_STR_CASE(CREATE_EQ);
+ EFA_CMD_STR_CASE(DESTROY_EQ);
default: return "unknown command opcode";
}
#undef EFA_CMD_STR_CASE
}
+void efa_com_set_dma_addr(dma_addr_t addr, u32 *addr_high, u32 *addr_low)
+{
+ *addr_low = lower_32_bits(addr);
+ *addr_high = upper_32_bits(addr);
+}
+
static u32 efa_com_reg_read32(struct efa_com_dev *edev, u16 offset)
{
struct efa_com_mmio_read *mmio_read = &edev->mmio_read;
@@ -1081,3 +1089,159 @@ int efa_com_dev_reset(struct efa_com_dev *edev,
return 0;
}
+
+static int efa_com_create_eq(struct efa_com_dev *edev,
+ struct efa_com_create_eq_params *params,
+ struct efa_com_create_eq_result *result)
+{
+ struct efa_com_admin_queue *aq = &edev->aq;
+ struct efa_admin_create_eq_resp resp = {};
+ struct efa_admin_create_eq_cmd cmd = {};
+ int err;
+
+ cmd.aq_common_descriptor.opcode = EFA_ADMIN_CREATE_EQ;
+ EFA_SET(&cmd.caps, EFA_ADMIN_CREATE_EQ_CMD_ENTRY_SIZE_WORDS,
+ params->entry_size_in_bytes / 4);
+ cmd.depth = params->depth;
+ cmd.event_bitmask = params->event_bitmask;
+ cmd.msix_vec = params->msix_vec;
+
+ efa_com_set_dma_addr(params->dma_addr, &cmd.ba.mem_addr_high,
+ &cmd.ba.mem_addr_low);
+
+ err = efa_com_cmd_exec(aq,
+ (struct efa_admin_aq_entry *)&cmd,
+ sizeof(cmd),
+ (struct efa_admin_acq_entry *)&resp,
+ sizeof(resp));
+ if (err) {
+ ibdev_err_ratelimited(edev->efa_dev,
+ "Failed to create eq[%d]\n", err);
+ return err;
+ }
+
+ result->eqn = resp.eqn;
+
+ return 0;
+}
+
+static void efa_com_destroy_eq(struct efa_com_dev *edev,
+ struct efa_com_destroy_eq_params *params)
+{
+ struct efa_com_admin_queue *aq = &edev->aq;
+ struct efa_admin_destroy_eq_resp resp = {};
+ struct efa_admin_destroy_eq_cmd cmd = {};
+ int err;
+
+ cmd.aq_common_descriptor.opcode = EFA_ADMIN_DESTROY_EQ;
+ cmd.eqn = params->eqn;
+
+ err = efa_com_cmd_exec(aq,
+ (struct efa_admin_aq_entry *)&cmd,
+ sizeof(cmd),
+ (struct efa_admin_acq_entry *)&resp,
+ sizeof(resp));
+ if (err)
+ ibdev_err_ratelimited(edev->efa_dev,
+ "Failed to destroy EQ-%u [%d]\n", cmd.eqn,
+ err);
+}
+
+static void efa_com_arm_eq(struct efa_com_dev *edev, struct efa_com_eq *eeq)
+{
+ u32 val = 0;
+
+ EFA_SET(&val, EFA_REGS_EQ_DB_EQN, eeq->eqn);
+ EFA_SET(&val, EFA_REGS_EQ_DB_ARM, 1);
+
+ writel(val, edev->reg_bar + EFA_REGS_EQ_DB_OFF);
+}
+
+void efa_com_eq_comp_intr_handler(struct efa_com_dev *edev,
+ struct efa_com_eq *eeq)
+{
+ struct efa_admin_eqe *eqe;
+ u32 processed = 0;
+ u8 phase;
+ u32 ci;
+
+ ci = eeq->cc & (eeq->depth - 1);
+ phase = eeq->phase;
+ eqe = &eeq->eqes[ci];
+
+ /* Go over all the events */
+ while ((READ_ONCE(eqe->common) & EFA_ADMIN_EQE_PHASE_MASK) == phase) {
+ /*
+ * Do not read the rest of the completion entry before the
+ * phase bit was validated
+ */
+ dma_rmb();
+
+ eeq->cb(eeq, eqe);
+
+ /* Get next event entry */
+ ci++;
+ processed++;
+
+ if (ci == eeq->depth) {
+ ci = 0;
+ phase = !phase;
+ }
+
+ eqe = &eeq->eqes[ci];
+ }
+
+ eeq->cc += processed;
+ eeq->phase = phase;
+ efa_com_arm_eq(eeq->edev, eeq);
+}
+
+void efa_com_eq_destroy(struct efa_com_dev *edev, struct efa_com_eq *eeq)
+{
+ struct efa_com_destroy_eq_params params = {
+ .eqn = eeq->eqn,
+ };
+
+ efa_com_destroy_eq(edev, &params);
+ dma_free_coherent(edev->dmadev, eeq->depth * sizeof(*eeq->eqes),
+ eeq->eqes, eeq->dma_addr);
+}
+
+int efa_com_eq_init(struct efa_com_dev *edev, struct efa_com_eq *eeq,
+ efa_eqe_handler cb, u16 depth, u8 msix_vec)
+{
+ struct efa_com_create_eq_params params = {};
+ struct efa_com_create_eq_result result = {};
+ int err;
+
+ params.depth = depth;
+ params.entry_size_in_bytes = sizeof(*eeq->eqes);
+ EFA_SET(&params.event_bitmask,
+ EFA_ADMIN_CREATE_EQ_CMD_COMPLETION_EVENTS, 1);
+ params.msix_vec = msix_vec;
+
+ eeq->eqes = dma_alloc_coherent(edev->dmadev,
+ params.depth * sizeof(*eeq->eqes),
+ &params.dma_addr, GFP_KERNEL);
+ if (!eeq->eqes)
+ return -ENOMEM;
+
+ err = efa_com_create_eq(edev, &params, &result);
+ if (err)
+ goto err_free_coherent;
+
+ eeq->eqn = result.eqn;
+ eeq->edev = edev;
+ eeq->dma_addr = params.dma_addr;
+ eeq->phase = 1;
+ eeq->depth = params.depth;
+ eeq->cb = cb;
+ efa_com_arm_eq(edev, eeq);
+
+ return 0;
+
+err_free_coherent:
+ dma_free_coherent(edev->dmadev, params.depth * sizeof(*eeq->eqes),
+ eeq->eqes, params.dma_addr);
+ return err;
+}
diff --git a/drivers/infiniband/hw/efa/efa_com.h b/drivers/infiniband/hw/efa/efa_com.h
index 5e4c88877ddb..77282234ce68 100644
--- a/drivers/infiniband/hw/efa/efa_com.h
+++ b/drivers/infiniband/hw/efa/efa_com.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
/*
- * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2021 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef _EFA_COM_H_
@@ -80,6 +80,9 @@ struct efa_com_admin_queue {
};
struct efa_aenq_handlers;
+struct efa_com_eq;
+typedef void (*efa_eqe_handler)(struct efa_com_eq *eeq,
+ struct efa_admin_eqe *eqe);
struct efa_com_aenq {
struct efa_admin_aenq_entry *entries;
@@ -112,6 +115,33 @@ struct efa_com_dev {
struct efa_com_mmio_read mmio_read;
};
+struct efa_com_eq {
+ struct efa_com_dev *edev;
+ struct efa_admin_eqe *eqes;
+ dma_addr_t dma_addr;
+ u32 cc; /* Consumer counter */
+ u16 eqn;
+ u16 depth;
+ u8 phase;
+ efa_eqe_handler cb;
+};
+
+struct efa_com_create_eq_params {
+ dma_addr_t dma_addr;
+ u32 event_bitmask;
+ u16 depth;
+ u8 entry_size_in_bytes;
+ u8 msix_vec;
+};
+
+struct efa_com_create_eq_result {
+ u16 eqn;
+};
+
+struct efa_com_destroy_eq_params {
+ u16 eqn;
+};
+
typedef void (*efa_aenq_handler)(void *data,
struct efa_admin_aenq_entry *aenq_e);
@@ -121,9 +151,13 @@ struct efa_aenq_handlers {
efa_aenq_handler unimplemented_handler;
};
+void efa_com_set_dma_addr(dma_addr_t addr, u32 *addr_high, u32 *addr_low);
int efa_com_admin_init(struct efa_com_dev *edev,
struct efa_aenq_handlers *aenq_handlers);
void efa_com_admin_destroy(struct efa_com_dev *edev);
+int efa_com_eq_init(struct efa_com_dev *edev, struct efa_com_eq *eeq,
+ efa_eqe_handler cb, u16 depth, u8 msix_vec);
+void efa_com_eq_destroy(struct efa_com_dev *edev, struct efa_com_eq *eeq);
int efa_com_dev_reset(struct efa_com_dev *edev,
enum efa_regs_reset_reason_types reset_reason);
void efa_com_set_admin_polling_mode(struct efa_com_dev *edev, bool polling);
@@ -140,5 +174,7 @@ int efa_com_cmd_exec(struct efa_com_admin_queue *aq,
struct efa_admin_acq_entry *comp,
size_t comp_size);
void efa_com_aenq_intr_handler(struct efa_com_dev *edev, void *data);
+void efa_com_eq_comp_intr_handler(struct efa_com_dev *edev,
+ struct efa_com_eq *eeq);
#endif /* _EFA_COM_H_ */
diff --git a/drivers/infiniband/hw/efa/efa_com_cmd.c b/drivers/infiniband/hw/efa/efa_com_cmd.c
index f752ef64159c..fb405da4e1db 100644
--- a/drivers/infiniband/hw/efa/efa_com_cmd.c
+++ b/drivers/infiniband/hw/efa/efa_com_cmd.c
@@ -1,17 +1,11 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
/*
- * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2021 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#include "efa_com.h"
#include "efa_com_cmd.h"
-void efa_com_set_dma_addr(dma_addr_t addr, u32 *addr_high, u32 *addr_low)
-{
- *addr_low = lower_32_bits(addr);
- *addr_high = upper_32_bits(addr);
-}
-
int efa_com_create_qp(struct efa_com_dev *edev,
struct efa_com_create_qp_params *params,
struct efa_com_create_qp_result *res)
@@ -157,7 +151,7 @@ int efa_com_create_cq(struct efa_com_dev *edev,
struct efa_com_create_cq_params *params,
struct efa_com_create_cq_result *result)
{
- struct efa_admin_create_cq_resp cmd_completion;
+ struct efa_admin_create_cq_resp cmd_completion = {};
struct efa_admin_create_cq_cmd create_cmd = {};
struct efa_com_admin_queue *aq = &edev->aq;
int err;
@@ -169,6 +163,11 @@ int efa_com_create_cq(struct efa_com_dev *edev,
create_cmd.cq_depth = params->cq_depth;
create_cmd.num_sub_cqs = params->num_sub_cqs;
create_cmd.uar = params->uarn;
+ if (params->interrupt_mode_enabled) {
+ EFA_SET(&create_cmd.cq_caps_1,
+ EFA_ADMIN_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED, 1);
+ create_cmd.eqn = params->eqn;
+ }
efa_com_set_dma_addr(params->dma_addr,
&create_cmd.cq_ba.mem_addr_high,
@@ -187,6 +186,9 @@ int efa_com_create_cq(struct efa_com_dev *edev,
result->cq_idx = cmd_completion.cq_idx;
result->actual_depth = params->cq_depth;
+ result->db_off = cmd_completion.db_offset;
+ result->db_valid = EFA_GET(&cmd_completion.flags,
+ EFA_ADMIN_CREATE_CQ_RESP_DB_VALID);
return 0;
}
@@ -497,6 +499,23 @@ int efa_com_get_device_attr(struct efa_com_dev *edev,
sizeof(resp.u.network_attr.addr));
result->mtu = resp.u.network_attr.mtu;
+ if (efa_com_check_supported_feature_id(edev,
+ EFA_ADMIN_EVENT_QUEUE_ATTR)) {
+ err = efa_com_get_feature(edev, &resp,
+ EFA_ADMIN_EVENT_QUEUE_ATTR);
+ if (err) {
+ ibdev_err_ratelimited(
+ edev->efa_dev,
+ "Failed to get event queue attributes %d\n",
+ err);
+ return err;
+ }
+
+ result->max_eq = resp.u.event_queue_attr.max_eq;
+ result->max_eq_depth = resp.u.event_queue_attr.max_eq_depth;
+ result->event_bitmask = resp.u.event_queue_attr.event_bitmask;
+ }
+
return 0;
}
diff --git a/drivers/infiniband/hw/efa/efa_com_cmd.h b/drivers/infiniband/hw/efa/efa_com_cmd.h
index eea4ebfbe6ec..c33010bbf9e8 100644
--- a/drivers/infiniband/hw/efa/efa_com_cmd.h
+++ b/drivers/infiniband/hw/efa/efa_com_cmd.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
/*
- * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2021 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef _EFA_COM_CMD_H_
@@ -73,7 +73,9 @@ struct efa_com_create_cq_params {
u16 cq_depth;
u16 num_sub_cqs;
u16 uarn;
+ u16 eqn;
u8 entry_size_in_bytes;
+ bool interrupt_mode_enabled;
};
struct efa_com_create_cq_result {
@@ -81,6 +83,8 @@ struct efa_com_create_cq_result {
u16 cq_idx;
/* actual cq depth in # of entries */
u16 actual_depth;
+ u32 db_off;
+ bool db_valid;
};
struct efa_com_destroy_cq_params {
@@ -125,6 +129,9 @@ struct efa_com_get_device_attr_result {
u32 max_llq_size;
u32 max_rdma_size;
u32 device_caps;
+ u32 max_eq;
+ u32 max_eq_depth;
+ u32 event_bitmask; /* EQ events bitmask */
u16 sub_cqs_per_cq;
u16 max_sq_sge;
u16 max_rq_sge;
@@ -260,7 +267,6 @@ union efa_com_get_stats_result {
struct efa_com_rdma_read_stats rdma_read_stats;
};
-void efa_com_set_dma_addr(dma_addr_t addr, u32 *addr_high, u32 *addr_low);
int efa_com_create_qp(struct efa_com_dev *edev,
struct efa_com_create_qp_params *params,
struct efa_com_create_qp_result *res);
diff --git a/drivers/infiniband/hw/efa/efa_main.c b/drivers/infiniband/hw/efa/efa_main.c
index 417dea5f90cf..94b94cca4870 100644
--- a/drivers/infiniband/hw/efa/efa_main.c
+++ b/drivers/infiniband/hw/efa/efa_main.c
@@ -67,6 +67,47 @@ static void efa_release_bars(struct efa_dev *dev, int bars_mask)
pci_release_selected_regions(pdev, release_bars);
}
+static void efa_process_comp_eqe(struct efa_dev *dev, struct efa_admin_eqe *eqe)
+{
+ u16 cqn = eqe->u.comp_event.cqn;
+ struct efa_cq *cq;
+
+ /* Safe to load as we're in irq and removal calls synchronize_irq() */
+ cq = xa_load(&dev->cqs_xa, cqn);
+ if (unlikely(!cq)) {
+ ibdev_err_ratelimited(&dev->ibdev,
+ "Completion event on non-existent CQ[%u]",
+ cqn);
+ return;
+ }
+
+ cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
+}
+
+static void efa_process_eqe(struct efa_com_eq *eeq, struct efa_admin_eqe *eqe)
+{
+ struct efa_dev *dev = container_of(eeq->edev, struct efa_dev, edev);
+
+ if (likely(EFA_GET(&eqe->common, EFA_ADMIN_EQE_EVENT_TYPE) ==
+ EFA_ADMIN_EQE_EVENT_TYPE_COMPLETION))
+ efa_process_comp_eqe(dev, eqe);
+ else
+ ibdev_err_ratelimited(&dev->ibdev,
+ "Unknown event type received %lu",
+ EFA_GET(&eqe->common,
+ EFA_ADMIN_EQE_EVENT_TYPE));
+}
+
+static irqreturn_t efa_intr_msix_comp(int irq, void *data)
+{
+ struct efa_eq *eq = data;
+ struct efa_com_dev *edev = eq->eeq.edev;
+
+ efa_com_eq_comp_intr_handler(edev, &eq->eeq);
+
+ return IRQ_HANDLED;
+}
+
static irqreturn_t efa_intr_msix_mgmnt(int irq, void *data)
{
struct efa_dev *dev = data;
@@ -77,26 +118,43 @@ static irqreturn_t efa_intr_msix_mgmnt(int irq, void *data)
return IRQ_HANDLED;
}
-static int efa_request_mgmnt_irq(struct efa_dev *dev)
+static int efa_request_irq(struct efa_dev *dev, struct efa_irq *irq)
{
- struct efa_irq *irq;
int err;
- irq = &dev->admin_irq;
err = request_irq(irq->irqn, irq->handler, 0, irq->name, irq->data);
if (err) {
- dev_err(&dev->pdev->dev, "Failed to request admin irq (%d)\n",
- err);
+ dev_err(&dev->pdev->dev, "Failed to request irq %s (%d)\n",
+ irq->name, err);
return err;
}
- dev_dbg(&dev->pdev->dev, "Set affinity hint of mgmnt irq to %*pbl (irq vector: %d)\n",
- nr_cpumask_bits, &irq->affinity_hint_mask, irq->irqn);
irq_set_affinity_hint(irq->irqn, &irq->affinity_hint_mask);
return 0;
}
+static void efa_setup_comp_irq(struct efa_dev *dev, struct efa_eq *eq,
+ int vector)
+{
+ u32 cpu;
+
+ cpu = vector - EFA_COMP_EQS_VEC_BASE;
+ snprintf(eq->irq.name, EFA_IRQNAME_SIZE, "efa-comp%d@pci:%s", cpu,
+ pci_name(dev->pdev));
+ eq->irq.handler = efa_intr_msix_comp;
+ eq->irq.data = eq;
+ eq->irq.vector = vector;
+ eq->irq.irqn = pci_irq_vector(dev->pdev, vector);
+ cpumask_set_cpu(cpu, &eq->irq.affinity_hint_mask);
+}
+
+static void efa_free_irq(struct efa_dev *dev, struct efa_irq *irq)
+{
+ irq_set_affinity_hint(irq->irqn, NULL);
+ free_irq(irq->irqn, irq->data);
+}
+
static void efa_setup_mgmnt_irq(struct efa_dev *dev)
{
u32 cpu;
@@ -105,8 +163,9 @@ static void efa_setup_mgmnt_irq(struct efa_dev *dev)
"efa-mgmnt@pci:%s", pci_name(dev->pdev));
dev->admin_irq.handler = efa_intr_msix_mgmnt;
dev->admin_irq.data = dev;
- dev->admin_irq.irqn =
- pci_irq_vector(dev->pdev, dev->admin_msix_vector_idx);
+ dev->admin_irq.vector = dev->admin_msix_vector_idx;
+ dev->admin_irq.irqn = pci_irq_vector(dev->pdev,
+ dev->admin_msix_vector_idx);
cpu = cpumask_first(cpu_online_mask);
cpumask_set_cpu(cpu,
&dev->admin_irq.affinity_hint_mask);
@@ -115,20 +174,11 @@ static void efa_setup_mgmnt_irq(struct efa_dev *dev)
dev->admin_irq.name);
}
-static void efa_free_mgmnt_irq(struct efa_dev *dev)
-{
- struct efa_irq *irq;
-
- irq = &dev->admin_irq;
- irq_set_affinity_hint(irq->irqn, NULL);
- free_irq(irq->irqn, irq->data);
-}
-
static int efa_set_mgmnt_irq(struct efa_dev *dev)
{
efa_setup_mgmnt_irq(dev);
- return efa_request_mgmnt_irq(dev);
+ return efa_request_irq(dev, &dev->admin_irq);
}
static int efa_request_doorbell_bar(struct efa_dev *dev)
@@ -234,6 +284,72 @@ static void efa_set_host_info(struct efa_dev *dev)
dma_free_coherent(&dev->pdev->dev, bufsz, hinf, hinf_dma);
}
+static void efa_destroy_eq(struct efa_dev *dev, struct efa_eq *eq)
+{
+ efa_com_eq_destroy(&dev->edev, &eq->eeq);
+ efa_free_irq(dev, &eq->irq);
+}
+
+static int efa_create_eq(struct efa_dev *dev, struct efa_eq *eq, u8 msix_vec)
+{
+ int err;
+
+ efa_setup_comp_irq(dev, eq, msix_vec);
+ err = efa_request_irq(dev, &eq->irq);
+ if (err)
+ return err;
+
+ err = efa_com_eq_init(&dev->edev, &eq->eeq, efa_process_eqe,
+ dev->dev_attr.max_eq_depth, msix_vec);
+ if (err)
+ goto err_free_comp_irq;
+
+ return 0;
+
+err_free_comp_irq:
+ efa_free_irq(dev, &eq->irq);
+ return err;
+}
+
+static int efa_create_eqs(struct efa_dev *dev)
+{
+ unsigned int neqs = dev->dev_attr.max_eq;
+ int err;
+ int i;
+
+ neqs = min_t(unsigned int, neqs, num_online_cpus());
+ dev->neqs = neqs;
+ dev->eqs = kcalloc(neqs, sizeof(*dev->eqs), GFP_KERNEL);
+ if (!dev->eqs)
+ return -ENOMEM;
+
+ for (i = 0; i < neqs; i++) {
+ err = efa_create_eq(dev, &dev->eqs[i],
+ i + EFA_COMP_EQS_VEC_BASE);
+ if (err)
+ goto err_destroy_eqs;
+ }
+
+ return 0;
+
+err_destroy_eqs:
+ for (i--; i >= 0; i--)
+ efa_destroy_eq(dev, &dev->eqs[i]);
+ kfree(dev->eqs);
+
+ return err;
+}
+
+static void efa_destroy_eqs(struct efa_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < dev->neqs; i++)
+ efa_destroy_eq(dev, &dev->eqs[i]);
+
+ kfree(dev->eqs);
+}
+
static const struct ib_device_ops efa_dev_ops = {
.owner = THIS_MODULE,
.driver_id = RDMA_DRIVER_EFA,
@@ -264,6 +380,7 @@ static const struct ib_device_ops efa_dev_ops = {
.query_port = efa_query_port,
.query_qp = efa_query_qp,
.reg_user_mr = efa_reg_mr,
+ .reg_user_mr_dmabuf = efa_reg_user_mr_dmabuf,
INIT_RDMA_OBJ_SIZE(ib_ah, efa_ah, ibah),
INIT_RDMA_OBJ_SIZE(ib_cq, efa_cq, ibcq),
@@ -300,23 +417,29 @@ static int efa_ib_device_add(struct efa_dev *dev)
if (err)
goto err_release_doorbell_bar;
+ err = efa_create_eqs(dev);
+ if (err)
+ goto err_release_doorbell_bar;
+
efa_set_host_info(dev);
dev->ibdev.node_type = RDMA_NODE_UNSPECIFIED;
dev->ibdev.phys_port_cnt = 1;
- dev->ibdev.num_comp_vectors = 1;
+ dev->ibdev.num_comp_vectors = dev->neqs ?: 1;
dev->ibdev.dev.parent = &pdev->dev;
ib_set_device_ops(&dev->ibdev, &efa_dev_ops);
err = ib_register_device(&dev->ibdev, "efa_%d", &pdev->dev);
if (err)
- goto err_release_doorbell_bar;
+ goto err_destroy_eqs;
ibdev_info(&dev->ibdev, "IB device registered\n");
return 0;
+err_destroy_eqs:
+ efa_destroy_eqs(dev);
err_release_doorbell_bar:
efa_release_doorbell_bar(dev);
return err;
@@ -324,9 +447,10 @@ err_release_doorbell_bar:
static void efa_ib_device_remove(struct efa_dev *dev)
{
- efa_com_dev_reset(&dev->edev, EFA_REGS_RESET_NORMAL);
ibdev_info(&dev->ibdev, "Unregister ib device\n");
ib_unregister_device(&dev->ibdev);
+ efa_destroy_eqs(dev);
+ efa_com_dev_reset(&dev->edev, EFA_REGS_RESET_NORMAL);
efa_release_doorbell_bar(dev);
}
@@ -339,8 +463,12 @@ static int efa_enable_msix(struct efa_dev *dev)
{
int msix_vecs, irq_num;
- /* Reserve the max msix vectors we might need */
- msix_vecs = EFA_NUM_MSIX_VEC;
+ /*
+ * Reserve the max msix vectors we might need, one vector is reserved
+ * for admin.
+ */
+ msix_vecs = min_t(int, pci_msix_vec_count(dev->pdev),
+ num_online_cpus() + 1);
dev_dbg(&dev->pdev->dev, "Trying to enable MSI-X, vectors %d\n",
msix_vecs);
@@ -421,6 +549,7 @@ static struct efa_dev *efa_probe_device(struct pci_dev *pdev)
edev->efa_dev = dev;
edev->dmadev = &pdev->dev;
dev->pdev = pdev;
+ xa_init(&dev->cqs_xa);
bars = pci_select_bars(pdev, IORESOURCE_MEM) & EFA_BASE_BAR_MASK;
err = pci_request_selected_regions(pdev, bars, DRV_MODULE_NAME);
@@ -476,7 +605,7 @@ static struct efa_dev *efa_probe_device(struct pci_dev *pdev)
return dev;
err_free_mgmnt_irq:
- efa_free_mgmnt_irq(dev);
+ efa_free_irq(dev, &dev->admin_irq);
err_disable_msix:
efa_disable_msix(dev);
err_reg_read_destroy:
@@ -499,11 +628,12 @@ static void efa_remove_device(struct pci_dev *pdev)
edev = &dev->edev;
efa_com_admin_destroy(edev);
- efa_free_mgmnt_irq(dev);
+ efa_free_irq(dev, &dev->admin_irq);
efa_disable_msix(dev);
efa_com_mmio_reg_read_destroy(edev);
devm_iounmap(&pdev->dev, edev->reg_bar);
efa_release_bars(dev, EFA_BASE_BAR_MASK);
+ xa_destroy(&dev->cqs_xa);
ib_dealloc_device(&dev->ibdev);
pci_disable_device(pdev);
}
diff --git a/drivers/infiniband/hw/efa/efa_regs_defs.h b/drivers/infiniband/hw/efa/efa_regs_defs.h
index 4017982fe13b..714ae6258800 100644
--- a/drivers/infiniband/hw/efa/efa_regs_defs.h
+++ b/drivers/infiniband/hw/efa/efa_regs_defs.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */
/*
- * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2021 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef _EFA_REGS_H_
@@ -42,6 +42,7 @@ enum efa_regs_reset_reason_types {
#define EFA_REGS_MMIO_REG_READ_OFF 0x5c
#define EFA_REGS_MMIO_RESP_LO_OFF 0x60
#define EFA_REGS_MMIO_RESP_HI_OFF 0x64
+#define EFA_REGS_EQ_DB_OFF 0x68
/* version register */
#define EFA_REGS_VERSION_MINOR_VERSION_MASK 0xff
@@ -93,4 +94,8 @@ enum efa_regs_reset_reason_types {
#define EFA_REGS_MMIO_REG_READ_REQ_ID_MASK 0xffff
#define EFA_REGS_MMIO_REG_READ_REG_OFF_MASK 0xffff0000
+/* eq_db register */
+#define EFA_REGS_EQ_DB_EQN_MASK 0xffff
+#define EFA_REGS_EQ_DB_ARM_MASK 0x80000000
+
#endif /* _EFA_REGS_H_ */
diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c
index e5f9d90aad5e..ecfe70eb5efb 100644
--- a/drivers/infiniband/hw/efa/efa_verbs.c
+++ b/drivers/infiniband/hw/efa/efa_verbs.c
@@ -3,6 +3,8 @@
* Copyright 2018-2021 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
+#include <linux/dma-buf.h>
+#include <linux/dma-resv.h>
#include <linux/vmalloc.h>
#include <linux/log2.h>
@@ -60,13 +62,14 @@ struct efa_user_mmap_entry {
op(EFA_RDMA_READ_RESP_BYTES, "rdma_read_resp_bytes") \
#define EFA_STATS_ENUM(ename, name) ename,
-#define EFA_STATS_STR(ename, name) [ename] = name,
+#define EFA_STATS_STR(ename, nam) \
+ [ename].name = nam,
enum efa_hw_device_stats {
EFA_DEFINE_DEVICE_STATS(EFA_STATS_ENUM)
};
-static const char *const efa_device_stats_names[] = {
+static const struct rdma_stat_desc efa_device_stats_descs[] = {
EFA_DEFINE_DEVICE_STATS(EFA_STATS_STR)
};
@@ -74,7 +77,7 @@ enum efa_hw_port_stats {
EFA_DEFINE_PORT_STATS(EFA_STATS_ENUM)
};
-static const char *const efa_port_stats_names[] = {
+static const struct rdma_stat_desc efa_port_stats_descs[] = {
EFA_DEFINE_PORT_STATS(EFA_STATS_STR)
};
@@ -245,6 +248,9 @@ int efa_query_device(struct ib_device *ibdev,
if (EFA_DEV_CAP(dev, RNR_RETRY))
resp.device_caps |= EFA_QUERY_DEVICE_CAPS_RNR_RETRY;
+ if (dev->neqs)
+ resp.device_caps |= EFA_QUERY_DEVICE_CAPS_CQ_NOTIFICATIONS;
+
err = ib_copy_to_udata(udata, &resp,
min(sizeof(resp), udata->outlen));
if (err) {
@@ -984,6 +990,12 @@ static int efa_destroy_cq_idx(struct efa_dev *dev, int cq_idx)
return efa_com_destroy_cq(&dev->edev, &params);
}
+static void efa_cq_user_mmap_entries_remove(struct efa_cq *cq)
+{
+ rdma_user_mmap_entry_remove(cq->db_mmap_entry);
+ rdma_user_mmap_entry_remove(cq->mmap_entry);
+}
+
int efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
{
struct efa_dev *dev = to_edev(ibcq->device);
@@ -993,15 +1005,25 @@ int efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
"Destroy cq[%d] virt[0x%p] freed: size[%lu], dma[%pad]\n",
cq->cq_idx, cq->cpu_addr, cq->size, &cq->dma_addr);
- rdma_user_mmap_entry_remove(cq->mmap_entry);
+ efa_cq_user_mmap_entries_remove(cq);
efa_destroy_cq_idx(dev, cq->cq_idx);
+ if (cq->eq) {
+ xa_erase(&dev->cqs_xa, cq->cq_idx);
+ synchronize_irq(cq->eq->irq.irqn);
+ }
efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size,
DMA_FROM_DEVICE);
return 0;
}
+static struct efa_eq *efa_vec2eq(struct efa_dev *dev, int vec)
+{
+ return &dev->eqs[vec];
+}
+
static int cq_mmap_entries_setup(struct efa_dev *dev, struct efa_cq *cq,
- struct efa_ibv_create_cq_resp *resp)
+ struct efa_ibv_create_cq_resp *resp,
+ bool db_valid)
{
resp->q_mmap_size = cq->size;
cq->mmap_entry = efa_user_mmap_entry_insert(&cq->ucontext->ibucontext,
@@ -1011,6 +1033,21 @@ static int cq_mmap_entries_setup(struct efa_dev *dev, struct efa_cq *cq,
if (!cq->mmap_entry)
return -ENOMEM;
+ if (db_valid) {
+ cq->db_mmap_entry =
+ efa_user_mmap_entry_insert(&cq->ucontext->ibucontext,
+ dev->db_bar_addr + resp->db_off,
+ PAGE_SIZE, EFA_MMAP_IO_NC,
+ &resp->db_mmap_key);
+ if (!cq->db_mmap_entry) {
+ rdma_user_mmap_entry_remove(cq->mmap_entry);
+ return -ENOMEM;
+ }
+
+ resp->db_off &= ~PAGE_MASK;
+ resp->comp_mask |= EFA_CREATE_CQ_RESP_DB_OFF;
+ }
+
return 0;
}
@@ -1019,8 +1056,8 @@ int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
{
struct efa_ucontext *ucontext = rdma_udata_to_drv_context(
udata, struct efa_ucontext, ibucontext);
+ struct efa_com_create_cq_params params = {};
struct efa_ibv_create_cq_resp resp = {};
- struct efa_com_create_cq_params params;
struct efa_com_create_cq_result result;
struct ib_device *ibdev = ibcq->device;
struct efa_dev *dev = to_edev(ibdev);
@@ -1065,7 +1102,7 @@ int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
goto err_out;
}
- if (cmd.comp_mask || !is_reserved_cleared(cmd.reserved_50)) {
+ if (cmd.comp_mask || !is_reserved_cleared(cmd.reserved_58)) {
ibdev_dbg(ibdev,
"Incompatible ABI params, unknown fields in udata\n");
err = -EINVAL;
@@ -1101,29 +1138,45 @@ int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
params.dma_addr = cq->dma_addr;
params.entry_size_in_bytes = cmd.cq_entry_size;
params.num_sub_cqs = cmd.num_sub_cqs;
+ if (cmd.flags & EFA_CREATE_CQ_WITH_COMPLETION_CHANNEL) {
+ cq->eq = efa_vec2eq(dev, attr->comp_vector);
+ params.eqn = cq->eq->eeq.eqn;
+ params.interrupt_mode_enabled = true;
+ }
+
err = efa_com_create_cq(&dev->edev, &params, &result);
if (err)
goto err_free_mapped;
+ resp.db_off = result.db_off;
resp.cq_idx = result.cq_idx;
cq->cq_idx = result.cq_idx;
cq->ibcq.cqe = result.actual_depth;
WARN_ON_ONCE(entries != result.actual_depth);
- err = cq_mmap_entries_setup(dev, cq, &resp);
+ err = cq_mmap_entries_setup(dev, cq, &resp, result.db_valid);
if (err) {
ibdev_dbg(ibdev, "Could not setup cq[%u] mmap entries\n",
cq->cq_idx);
goto err_destroy_cq;
}
+ if (cq->eq) {
+ err = xa_err(xa_store(&dev->cqs_xa, cq->cq_idx, cq, GFP_KERNEL));
+ if (err) {
+ ibdev_dbg(ibdev, "Failed to store cq[%u] in xarray\n",
+ cq->cq_idx);
+ goto err_remove_mmap;
+ }
+ }
+
if (udata->outlen) {
err = ib_copy_to_udata(udata, &resp,
min(sizeof(resp), udata->outlen));
if (err) {
ibdev_dbg(ibdev,
"Failed to copy udata for create_cq\n");
- goto err_remove_mmap;
+ goto err_xa_erase;
}
}
@@ -1132,8 +1185,11 @@ int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
return 0;
+err_xa_erase:
+ if (cq->eq)
+ xa_erase(&dev->cqs_xa, cq->cq_idx);
err_remove_mmap:
- rdma_user_mmap_entry_remove(cq->mmap_entry);
+ efa_cq_user_mmap_entries_remove(cq);
err_destroy_cq:
efa_destroy_cq_idx(dev, cq->cq_idx);
err_free_mapped:
@@ -1490,26 +1546,18 @@ static int efa_create_pbl(struct efa_dev *dev,
return 0;
}
-struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
- u64 virt_addr, int access_flags,
- struct ib_udata *udata)
+static struct efa_mr *efa_alloc_mr(struct ib_pd *ibpd, int access_flags,
+ struct ib_udata *udata)
{
struct efa_dev *dev = to_edev(ibpd->device);
- struct efa_com_reg_mr_params params = {};
- struct efa_com_reg_mr_result result = {};
- struct pbl_context pbl;
int supp_access_flags;
- unsigned int pg_sz;
struct efa_mr *mr;
- int inline_size;
- int err;
if (udata && udata->inlen &&
!ib_is_udata_cleared(udata, 0, sizeof(udata->inlen))) {
ibdev_dbg(&dev->ibdev,
"Incompatible ABI params, udata not cleared\n");
- err = -EINVAL;
- goto err_out;
+ return ERR_PTR(-EINVAL);
}
supp_access_flags =
@@ -1521,23 +1569,26 @@ struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
ibdev_dbg(&dev->ibdev,
"Unsupported access flags[%#x], supported[%#x]\n",
access_flags, supp_access_flags);
- err = -EOPNOTSUPP;
- goto err_out;
+ return ERR_PTR(-EOPNOTSUPP);
}
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
- if (!mr) {
- err = -ENOMEM;
- goto err_out;
- }
+ if (!mr)
+ return ERR_PTR(-ENOMEM);
- mr->umem = ib_umem_get(ibpd->device, start, length, access_flags);
- if (IS_ERR(mr->umem)) {
- err = PTR_ERR(mr->umem);
- ibdev_dbg(&dev->ibdev,
- "Failed to pin and map user space memory[%d]\n", err);
- goto err_free;
- }
+ return mr;
+}
+
+static int efa_register_mr(struct ib_pd *ibpd, struct efa_mr *mr, u64 start,
+ u64 length, u64 virt_addr, int access_flags)
+{
+ struct efa_dev *dev = to_edev(ibpd->device);
+ struct efa_com_reg_mr_params params = {};
+ struct efa_com_reg_mr_result result = {};
+ struct pbl_context pbl;
+ unsigned int pg_sz;
+ int inline_size;
+ int err;
params.pd = to_epd(ibpd)->pdn;
params.iova = virt_addr;
@@ -1548,10 +1599,9 @@ struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
dev->dev_attr.page_size_cap,
virt_addr);
if (!pg_sz) {
- err = -EOPNOTSUPP;
ibdev_dbg(&dev->ibdev, "Failed to find a suitable page size in page_size_cap %#llx\n",
dev->dev_attr.page_size_cap);
- goto err_unmap;
+ return -EOPNOTSUPP;
}
params.page_shift = order_base_2(pg_sz);
@@ -1565,21 +1615,21 @@ struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
if (params.page_num <= inline_size) {
err = efa_create_inline_pbl(dev, mr, &params);
if (err)
- goto err_unmap;
+ return err;
err = efa_com_register_mr(&dev->edev, &params, &result);
if (err)
- goto err_unmap;
+ return err;
} else {
err = efa_create_pbl(dev, &pbl, mr, &params);
if (err)
- goto err_unmap;
+ return err;
err = efa_com_register_mr(&dev->edev, &params, &result);
pbl_destroy(dev, &pbl);
if (err)
- goto err_unmap;
+ return err;
}
mr->ibmr.lkey = result.l_key;
@@ -1587,9 +1637,78 @@ struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
mr->ibmr.length = length;
ibdev_dbg(&dev->ibdev, "Registered mr[%d]\n", mr->ibmr.lkey);
+ return 0;
+}
+
+struct ib_mr *efa_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 start,
+ u64 length, u64 virt_addr,
+ int fd, int access_flags,
+ struct ib_udata *udata)
+{
+ struct efa_dev *dev = to_edev(ibpd->device);
+ struct ib_umem_dmabuf *umem_dmabuf;
+ struct efa_mr *mr;
+ int err;
+
+ mr = efa_alloc_mr(ibpd, access_flags, udata);
+ if (IS_ERR(mr)) {
+ err = PTR_ERR(mr);
+ goto err_out;
+ }
+
+ umem_dmabuf = ib_umem_dmabuf_get_pinned(ibpd->device, start, length, fd,
+ access_flags);
+ if (IS_ERR(umem_dmabuf)) {
+ err = PTR_ERR(umem_dmabuf);
+ ibdev_dbg(&dev->ibdev, "Failed to get dmabuf umem[%d]\n", err);
+ goto err_free;
+ }
+
+ mr->umem = &umem_dmabuf->umem;
+ err = efa_register_mr(ibpd, mr, start, length, virt_addr, access_flags);
+ if (err)
+ goto err_release;
+
+ return &mr->ibmr;
+
+err_release:
+ ib_umem_release(mr->umem);
+err_free:
+ kfree(mr);
+err_out:
+ atomic64_inc(&dev->stats.reg_mr_err);
+ return ERR_PTR(err);
+}
+
+struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length,
+ u64 virt_addr, int access_flags,
+ struct ib_udata *udata)
+{
+ struct efa_dev *dev = to_edev(ibpd->device);
+ struct efa_mr *mr;
+ int err;
+
+ mr = efa_alloc_mr(ibpd, access_flags, udata);
+ if (IS_ERR(mr)) {
+ err = PTR_ERR(mr);
+ goto err_out;
+ }
+
+ mr->umem = ib_umem_get(ibpd->device, start, length, access_flags);
+ if (IS_ERR(mr->umem)) {
+ err = PTR_ERR(mr->umem);
+ ibdev_dbg(&dev->ibdev,
+ "Failed to pin and map user space memory[%d]\n", err);
+ goto err_free;
+ }
+
+ err = efa_register_mr(ibpd, mr, start, length, virt_addr, access_flags);
+ if (err)
+ goto err_release;
+
return &mr->ibmr;
-err_unmap:
+err_release:
ib_umem_release(mr->umem);
err_free:
kfree(mr);
@@ -1906,15 +2025,15 @@ int efa_destroy_ah(struct ib_ah *ibah, u32 flags)
struct rdma_hw_stats *efa_alloc_hw_port_stats(struct ib_device *ibdev,
u32 port_num)
{
- return rdma_alloc_hw_stats_struct(efa_port_stats_names,
- ARRAY_SIZE(efa_port_stats_names),
+ return rdma_alloc_hw_stats_struct(efa_port_stats_descs,
+ ARRAY_SIZE(efa_port_stats_descs),
RDMA_HW_STATS_DEFAULT_LIFESPAN);
}
struct rdma_hw_stats *efa_alloc_hw_device_stats(struct ib_device *ibdev)
{
- return rdma_alloc_hw_stats_struct(efa_device_stats_names,
- ARRAY_SIZE(efa_device_stats_names),
+ return rdma_alloc_hw_stats_struct(efa_device_stats_descs,
+ ARRAY_SIZE(efa_device_stats_descs),
RDMA_HW_STATS_DEFAULT_LIFESPAN);
}
@@ -1939,7 +2058,7 @@ static int efa_fill_device_stats(struct efa_dev *dev,
stats->value[EFA_CREATE_AH_ERR] = atomic64_read(&s->create_ah_err);
stats->value[EFA_MMAP_ERR] = atomic64_read(&s->mmap_err);
- return ARRAY_SIZE(efa_device_stats_names);
+ return ARRAY_SIZE(efa_device_stats_descs);
}
static int efa_fill_port_stats(struct efa_dev *dev, struct rdma_hw_stats *stats,
@@ -1988,7 +2107,7 @@ static int efa_fill_port_stats(struct efa_dev *dev, struct rdma_hw_stats *stats,
stats->value[EFA_RDMA_READ_WR_ERR] = rrs->read_wr_err;
stats->value[EFA_RDMA_READ_RESP_BYTES] = rrs->read_resp_bytes;
- return ARRAY_SIZE(efa_port_stats_names);
+ return ARRAY_SIZE(efa_port_stats_descs);
}
int efa_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats,
diff --git a/drivers/infiniband/hw/hfi1/Kconfig b/drivers/infiniband/hw/hfi1/Kconfig
index 519866b30a13..6eb739052121 100644
--- a/drivers/infiniband/hw/hfi1/Kconfig
+++ b/drivers/infiniband/hw/hfi1/Kconfig
@@ -1,12 +1,12 @@
# SPDX-License-Identifier: GPL-2.0-only
config INFINIBAND_HFI1
- tristate "Intel OPA Gen1 support"
+ tristate "Cornelis OPX Gen1 support"
depends on X86_64 && INFINIBAND_RDMAVT && I2C
select MMU_NOTIFIER
select CRC32
select I2C_ALGOBIT
help
- This is a low-level driver for Intel OPA Gen1 adapter.
+ This is a low-level driver for Cornelis OPX Gen1 adapter.
config HFI1_DEBUG_SDMA_ORDER
bool "HFI1 SDMA Order debug"
depends on INFINIBAND_HFI1
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c
index 37273dc0c03c..ec37f4fd8e96 100644
--- a/drivers/infiniband/hw/hfi1/chip.c
+++ b/drivers/infiniband/hw/hfi1/chip.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
/*
* Copyright(c) 2015 - 2020 Intel Corporation.
+ * Copyright(c) 2021 Cornelis Networks.
*/
/*
@@ -14918,7 +14919,7 @@ static int obtain_boardname(struct hfi1_devdata *dd)
{
/* generic board description */
const char generic[] =
- "Intel Omni-Path Host Fabric Interface Adapter 100 Series";
+ "Cornelis Omni-Path Host Fabric Interface Adapter 100 Series";
unsigned long size;
int ret;
diff --git a/drivers/infiniband/hw/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c
index de411884386b..61f341c3005c 100644
--- a/drivers/infiniband/hw/hfi1/driver.c
+++ b/drivers/infiniband/hw/hfi1/driver.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
/*
* Copyright(c) 2015-2020 Intel Corporation.
+ * Copyright(c) 2021 Cornelis Networks.
*/
#include <linux/spinlock.h>
@@ -56,7 +57,7 @@ module_param_cb(cap_mask, &cap_ops, &hfi1_cap_mask, S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(cap_mask, "Bit mask of enabled/disabled HW features");
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_DESCRIPTION("Intel Omni-Path Architecture driver");
+MODULE_DESCRIPTION("Cornelis Omni-Path Express driver");
/*
* MAX_PKT_RCV is the max # if packets processed per receive interrupt.
diff --git a/drivers/infiniband/hw/hfi1/efivar.c b/drivers/infiniband/hw/hfi1/efivar.c
index f275dd1abed8..e8ed05516bf2 100644
--- a/drivers/infiniband/hw/hfi1/efivar.c
+++ b/drivers/infiniband/hw/hfi1/efivar.c
@@ -3,7 +3,9 @@
* Copyright(c) 2015, 2016 Intel Corporation.
*/
-#include <linux/ctype.h>
+#include <linux/string.h>
+#include <linux/string_helpers.h>
+
#include "efivar.h"
/* GUID for HFI1 variables in EFI */
@@ -112,7 +114,6 @@ int read_hfi1_efi_var(struct hfi1_devdata *dd, const char *kind,
char prefix_name[64];
char name[64];
int result;
- int i;
/* create a common prefix */
snprintf(prefix_name, sizeof(prefix_name), "%04x:%02x:%02x.%x",
@@ -128,10 +129,7 @@ int read_hfi1_efi_var(struct hfi1_devdata *dd, const char *kind,
* variable.
*/
if (result) {
- /* Converting to uppercase */
- for (i = 0; prefix_name[i]; i++)
- if (isalpha(prefix_name[i]))
- prefix_name[i] = toupper(prefix_name[i]);
+ string_upper(prefix_name, prefix_name);
snprintf(name, sizeof(name), "%s-%s", prefix_name, kind);
result = read_efi_var(name, size, return_data);
}
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index e3679d076eaa..dbd1c31830b9 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
/*
* Copyright(c) 2015 - 2020 Intel Corporation.
+ * Copyright(c) 2021 Cornelis Networks.
*/
#include <linux/pci.h>
@@ -1342,7 +1343,7 @@ static void remove_one(struct pci_dev *);
static int init_one(struct pci_dev *, const struct pci_device_id *);
static void shutdown_one(struct pci_dev *);
-#define DRIVER_LOAD_MSG "Intel " DRIVER_NAME " loaded: "
+#define DRIVER_LOAD_MSG "Cornelis " DRIVER_NAME " loaded: "
#define PFX DRIVER_NAME ": "
const struct pci_device_id hfi1_pci_tbl[] = {
diff --git a/drivers/infiniband/hw/hfi1/ipoib.h b/drivers/infiniband/hw/hfi1/ipoib.h
index 2cff38b105ac..909122934246 100644
--- a/drivers/infiniband/hw/hfi1/ipoib.h
+++ b/drivers/infiniband/hw/hfi1/ipoib.h
@@ -44,22 +44,52 @@ union hfi1_ipoib_flow {
};
/**
+ * struct ipoib_txreq - IPOIB transmit descriptor
+ * @txreq: sdma transmit request
+ * @sdma_hdr: 9b ib headers
+ * @sdma_status: status returned by sdma engine
+ * @complete: non-zero implies complete
+ * @priv: ipoib netdev private data
+ * @txq: txq on which skb was output
+ * @skb: skb to send
+ */
+struct ipoib_txreq {
+ struct sdma_txreq txreq;
+ struct hfi1_sdma_header sdma_hdr;
+ int sdma_status;
+ int complete;
+ struct hfi1_ipoib_dev_priv *priv;
+ struct hfi1_ipoib_txq *txq;
+ struct sk_buff *skb;
+};
+
+/**
* struct hfi1_ipoib_circ_buf - List of items to be processed
- * @items: ring of items
- * @head: ring head
- * @tail: ring tail
+ * @items: ring of items each a power of two size
* @max_items: max items + 1 that the ring can contain
- * @producer_lock: producer sync lock
- * @consumer_lock: consumer sync lock
+ * @shift: log2 of size for getting txreq
+ * @sent_txreqs: count of txreqs posted to sdma
+ * @tail: ring tail
+ * @stops: count of stops of queue
+ * @ring_full: ring has been filled
+ * @no_desc: descriptor shortage seen
+ * @complete_txreqs: count of txreqs completed by sdma
+ * @head: ring head
*/
-struct ipoib_txreq;
struct hfi1_ipoib_circ_buf {
- struct ipoib_txreq **items;
- unsigned long head;
- unsigned long tail;
- unsigned long max_items;
- spinlock_t producer_lock; /* head sync lock */
- spinlock_t consumer_lock; /* tail sync lock */
+ void *items;
+ u32 max_items;
+ u32 shift;
+ /* consumer cache line */
+ u64 ____cacheline_aligned_in_smp sent_txreqs;
+ u32 avail;
+ u32 tail;
+ atomic_t stops;
+ atomic_t ring_full;
+ atomic_t no_desc;
+ /* producer cache line */
+ u64 ____cacheline_aligned_in_smp complete_txreqs;
+ u32 head;
};
/**
@@ -68,33 +98,24 @@ struct hfi1_ipoib_circ_buf {
* @sde: sdma engine
* @tx_list: tx request list
* @sent_txreqs: count of txreqs posted to sdma
- * @stops: count of stops of queue
- * @ring_full: ring has been filled
- * @no_desc: descriptor shortage seen
* @flow: tracks when list needs to be flushed for a flow change
* @q_idx: ipoib Tx queue index
* @pkts_sent: indicator packets have been sent from this queue
* @wait: iowait structure
- * @complete_txreqs: count of txreqs completed by sdma
* @napi: pointer to tx napi interface
* @tx_ring: ring of ipoib txreqs to be reaped by napi callback
*/
struct hfi1_ipoib_txq {
+ struct napi_struct napi;
struct hfi1_ipoib_dev_priv *priv;
struct sdma_engine *sde;
struct list_head tx_list;
- u64 sent_txreqs;
- atomic_t stops;
- atomic_t ring_full;
- atomic_t no_desc;
union hfi1_ipoib_flow flow;
u8 q_idx;
bool pkts_sent;
struct iowait wait;
- atomic64_t ____cacheline_aligned_in_smp complete_txreqs;
- struct napi_struct *napi;
- struct hfi1_ipoib_circ_buf tx_ring;
+ struct hfi1_ipoib_circ_buf ____cacheline_aligned_in_smp tx_ring;
};
struct hfi1_ipoib_dev_priv {
@@ -102,15 +123,12 @@ struct hfi1_ipoib_dev_priv {
struct net_device *netdev;
struct ib_device *device;
struct hfi1_ipoib_txq *txqs;
- struct kmem_cache *txreq_cache;
- struct napi_struct *tx_napis;
+ const struct net_device_ops *netdev_ops;
+ struct rvt_qp *qp;
+ u32 qkey;
u16 pkey;
u16 pkey_index;
- u32 qkey;
u8 port_num;
-
- const struct net_device_ops *netdev_ops;
- struct rvt_qp *qp;
};
/* hfi1 ipoib rdma netdev's private data structure */
diff --git a/drivers/infiniband/hw/hfi1/ipoib_main.c b/drivers/infiniband/hw/hfi1/ipoib_main.c
index e594a961f513..e1a2b02bbd91 100644
--- a/drivers/infiniband/hw/hfi1/ipoib_main.c
+++ b/drivers/infiniband/hw/hfi1/ipoib_main.c
@@ -11,7 +11,7 @@
#include "ipoib.h"
#include "hfi.h"
-static u32 qpn_from_mac(u8 *mac_arr)
+static u32 qpn_from_mac(const u8 *mac_arr)
{
return (u32)mac_arr[1] << 16 | mac_arr[2] << 8 | mac_arr[3];
}
diff --git a/drivers/infiniband/hw/hfi1/ipoib_tx.c b/drivers/infiniband/hw/hfi1/ipoib_tx.c
index 15b0cb0f363f..f4010890309f 100644
--- a/drivers/infiniband/hw/hfi1/ipoib_tx.c
+++ b/drivers/infiniband/hw/hfi1/ipoib_tx.c
@@ -22,24 +22,6 @@
#define CIRC_NEXT(val, size) CIRC_ADD(val, 1, size)
#define CIRC_PREV(val, size) CIRC_ADD(val, -1, size)
-/**
- * struct ipoib_txreq - IPOIB transmit descriptor
- * @txreq: sdma transmit request
- * @sdma_hdr: 9b ib headers
- * @sdma_status: status returned by sdma engine
- * @priv: ipoib netdev private data
- * @txq: txq on which skb was output
- * @skb: skb to send
- */
-struct ipoib_txreq {
- struct sdma_txreq txreq;
- struct hfi1_sdma_header sdma_hdr;
- int sdma_status;
- struct hfi1_ipoib_dev_priv *priv;
- struct hfi1_ipoib_txq *txq;
- struct sk_buff *skb;
-};
-
struct ipoib_txparms {
struct hfi1_devdata *dd;
struct rdma_ah_attr *ah_attr;
@@ -51,28 +33,34 @@ struct ipoib_txparms {
u8 entropy;
};
-static u64 hfi1_ipoib_txreqs(const u64 sent, const u64 completed)
+static struct ipoib_txreq *
+hfi1_txreq_from_idx(struct hfi1_ipoib_circ_buf *r, u32 idx)
+{
+ return (struct ipoib_txreq *)(r->items + (idx << r->shift));
+}
+
+static u32 hfi1_ipoib_txreqs(const u64 sent, const u64 completed)
{
return sent - completed;
}
static u64 hfi1_ipoib_used(struct hfi1_ipoib_txq *txq)
{
- return hfi1_ipoib_txreqs(txq->sent_txreqs,
- atomic64_read(&txq->complete_txreqs));
+ return hfi1_ipoib_txreqs(txq->tx_ring.sent_txreqs,
+ txq->tx_ring.complete_txreqs);
}
static void hfi1_ipoib_stop_txq(struct hfi1_ipoib_txq *txq)
{
trace_hfi1_txq_stop(txq);
- if (atomic_inc_return(&txq->stops) == 1)
+ if (atomic_inc_return(&txq->tx_ring.stops) == 1)
netif_stop_subqueue(txq->priv->netdev, txq->q_idx);
}
static void hfi1_ipoib_wake_txq(struct hfi1_ipoib_txq *txq)
{
trace_hfi1_txq_wake(txq);
- if (atomic_dec_and_test(&txq->stops))
+ if (atomic_dec_and_test(&txq->tx_ring.stops))
netif_wake_subqueue(txq->priv->netdev, txq->q_idx);
}
@@ -90,9 +78,9 @@ static uint hfi1_ipoib_ring_lwat(struct hfi1_ipoib_txq *txq)
static void hfi1_ipoib_check_queue_depth(struct hfi1_ipoib_txq *txq)
{
- ++txq->sent_txreqs;
+ ++txq->tx_ring.sent_txreqs;
if (hfi1_ipoib_used(txq) >= hfi1_ipoib_ring_hwat(txq) &&
- !atomic_xchg(&txq->ring_full, 1)) {
+ !atomic_xchg(&txq->tx_ring.ring_full, 1)) {
trace_hfi1_txq_full(txq);
hfi1_ipoib_stop_txq(txq);
}
@@ -117,7 +105,7 @@ static void hfi1_ipoib_check_queue_stopped(struct hfi1_ipoib_txq *txq)
* to protect against ring overflow.
*/
if (hfi1_ipoib_used(txq) < hfi1_ipoib_ring_lwat(txq) &&
- atomic_xchg(&txq->ring_full, 0)) {
+ atomic_xchg(&txq->tx_ring.ring_full, 0)) {
trace_hfi1_txq_xmit_unstopped(txq);
hfi1_ipoib_wake_txq(txq);
}
@@ -125,7 +113,7 @@ static void hfi1_ipoib_check_queue_stopped(struct hfi1_ipoib_txq *txq)
static void hfi1_ipoib_free_tx(struct ipoib_txreq *tx, int budget)
{
- struct hfi1_ipoib_dev_priv *priv = tx->priv;
+ struct hfi1_ipoib_dev_priv *priv = tx->txq->priv;
if (likely(!tx->sdma_status)) {
dev_sw_netstats_tx_add(priv->netdev, 1, tx->skb->len);
@@ -139,97 +127,73 @@ static void hfi1_ipoib_free_tx(struct ipoib_txreq *tx, int budget)
}
napi_consume_skb(tx->skb, budget);
+ tx->skb = NULL;
sdma_txclean(priv->dd, &tx->txreq);
- kmem_cache_free(priv->txreq_cache, tx);
}
-static int hfi1_ipoib_drain_tx_ring(struct hfi1_ipoib_txq *txq, int budget)
+static void hfi1_ipoib_drain_tx_ring(struct hfi1_ipoib_txq *txq)
{
struct hfi1_ipoib_circ_buf *tx_ring = &txq->tx_ring;
- unsigned long head;
- unsigned long tail;
- unsigned int max_tx;
- int work_done;
- int tx_count;
-
- spin_lock_bh(&tx_ring->consumer_lock);
-
- /* Read index before reading contents at that index. */
- head = smp_load_acquire(&tx_ring->head);
- tail = tx_ring->tail;
- max_tx = tx_ring->max_items;
-
- work_done = min_t(int, CIRC_CNT(head, tail, max_tx), budget);
+ int i;
+ struct ipoib_txreq *tx;
- for (tx_count = work_done; tx_count; tx_count--) {
- hfi1_ipoib_free_tx(tx_ring->items[tail], budget);
- tail = CIRC_NEXT(tail, max_tx);
+ for (i = 0; i < tx_ring->max_items; i++) {
+ tx = hfi1_txreq_from_idx(tx_ring, i);
+ tx->complete = 0;
+ dev_kfree_skb_any(tx->skb);
+ tx->skb = NULL;
+ sdma_txclean(txq->priv->dd, &tx->txreq);
}
+ tx_ring->head = 0;
+ tx_ring->tail = 0;
+ tx_ring->complete_txreqs = 0;
+ tx_ring->sent_txreqs = 0;
+ tx_ring->avail = hfi1_ipoib_ring_hwat(txq);
+}
- atomic64_add(work_done, &txq->complete_txreqs);
+static int hfi1_ipoib_poll_tx_ring(struct napi_struct *napi, int budget)
+{
+ struct hfi1_ipoib_txq *txq =
+ container_of(napi, struct hfi1_ipoib_txq, napi);
+ struct hfi1_ipoib_circ_buf *tx_ring = &txq->tx_ring;
+ u32 head = tx_ring->head;
+ u32 max_tx = tx_ring->max_items;
+ int work_done;
+ struct ipoib_txreq *tx = hfi1_txreq_from_idx(tx_ring, head);
- /* Finished freeing tx items so store the tail value. */
- smp_store_release(&tx_ring->tail, tail);
+ trace_hfi1_txq_poll(txq);
+ for (work_done = 0; work_done < budget; work_done++) {
+ /* See hfi1_ipoib_sdma_complete() */
+ if (!smp_load_acquire(&tx->complete))
+ break;
+ tx->complete = 0;
+ trace_hfi1_tx_produce(tx, head);
+ hfi1_ipoib_free_tx(tx, budget);
+ head = CIRC_NEXT(head, max_tx);
+ tx = hfi1_txreq_from_idx(tx_ring, head);
+ }
+ tx_ring->complete_txreqs += work_done;
- spin_unlock_bh(&tx_ring->consumer_lock);
+ /* Finished freeing tx items so store the head value. */
+ smp_store_release(&tx_ring->head, head);
hfi1_ipoib_check_queue_stopped(txq);
- return work_done;
-}
-
-static int hfi1_ipoib_process_tx_ring(struct napi_struct *napi, int budget)
-{
- struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(napi->dev);
- struct hfi1_ipoib_txq *txq = &priv->txqs[napi - priv->tx_napis];
-
- int work_done = hfi1_ipoib_drain_tx_ring(txq, budget);
-
if (work_done < budget)
napi_complete_done(napi, work_done);
return work_done;
}
-static void hfi1_ipoib_add_tx(struct ipoib_txreq *tx)
-{
- struct hfi1_ipoib_circ_buf *tx_ring = &tx->txq->tx_ring;
- unsigned long head;
- unsigned long tail;
- size_t max_tx;
-
- spin_lock(&tx_ring->producer_lock);
-
- head = tx_ring->head;
- tail = READ_ONCE(tx_ring->tail);
- max_tx = tx_ring->max_items;
-
- if (likely(CIRC_SPACE(head, tail, max_tx))) {
- tx_ring->items[head] = tx;
-
- /* Finish storing txreq before incrementing head. */
- smp_store_release(&tx_ring->head, CIRC_ADD(head, 1, max_tx));
- napi_schedule_irqoff(tx->txq->napi);
- } else {
- struct hfi1_ipoib_txq *txq = tx->txq;
- struct hfi1_ipoib_dev_priv *priv = tx->priv;
-
- /* Ring was full */
- hfi1_ipoib_free_tx(tx, 0);
- atomic64_inc(&txq->complete_txreqs);
- dd_dev_dbg(priv->dd, "txq %d full.\n", txq->q_idx);
- }
-
- spin_unlock(&tx_ring->producer_lock);
-}
-
static void hfi1_ipoib_sdma_complete(struct sdma_txreq *txreq, int status)
{
struct ipoib_txreq *tx = container_of(txreq, struct ipoib_txreq, txreq);
+ trace_hfi1_txq_complete(tx->txq);
tx->sdma_status = status;
-
- hfi1_ipoib_add_tx(tx);
+ /* see hfi1_ipoib_poll_tx_ring */
+ smp_store_release(&tx->complete, 1);
+ napi_schedule_irqoff(&tx->txq->napi);
}
static int hfi1_ipoib_build_ulp_payload(struct ipoib_txreq *tx,
@@ -291,7 +255,7 @@ static int hfi1_ipoib_build_tx_desc(struct ipoib_txreq *tx,
static void hfi1_ipoib_build_ib_tx_headers(struct ipoib_txreq *tx,
struct ipoib_txparms *txp)
{
- struct hfi1_ipoib_dev_priv *priv = tx->priv;
+ struct hfi1_ipoib_dev_priv *priv = tx->txq->priv;
struct hfi1_sdma_header *sdma_hdr = &tx->sdma_hdr;
struct sk_buff *skb = tx->skb;
struct hfi1_pportdata *ppd = ppd_from_ibp(txp->ibp);
@@ -362,7 +326,7 @@ static void hfi1_ipoib_build_ib_tx_headers(struct ipoib_txreq *tx,
ohdr->bth[0] = cpu_to_be32(bth0);
ohdr->bth[1] = cpu_to_be32(txp->dqpn);
- ohdr->bth[2] = cpu_to_be32(mask_psn((u32)txp->txq->sent_txreqs));
+ ohdr->bth[2] = cpu_to_be32(mask_psn((u32)txp->txq->tx_ring.sent_txreqs));
/* Build the deth */
ohdr->u.ud.deth[0] = cpu_to_be32(priv->qkey);
@@ -385,19 +349,32 @@ static struct ipoib_txreq *hfi1_ipoib_send_dma_common(struct net_device *dev,
struct ipoib_txparms *txp)
{
struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
+ struct hfi1_ipoib_txq *txq = txp->txq;
struct ipoib_txreq *tx;
+ struct hfi1_ipoib_circ_buf *tx_ring = &txq->tx_ring;
+ u32 tail = tx_ring->tail;
int ret;
- tx = kmem_cache_alloc_node(priv->txreq_cache,
- GFP_ATOMIC,
- priv->dd->node);
- if (unlikely(!tx))
- return ERR_PTR(-ENOMEM);
+ if (unlikely(!tx_ring->avail)) {
+ u32 head;
+
+ if (hfi1_ipoib_used(txq) >= hfi1_ipoib_ring_hwat(txq))
+ /* This shouldn't happen with a stopped queue */
+ return ERR_PTR(-ENOMEM);
+ /* See hfi1_ipoib_poll_tx_ring() */
+ head = smp_load_acquire(&tx_ring->head);
+ tx_ring->avail =
+ min_t(u32, hfi1_ipoib_ring_hwat(txq),
+ CIRC_CNT(head, tail, tx_ring->max_items));
+ } else {
+ tx_ring->avail--;
+ }
+ tx = hfi1_txreq_from_idx(tx_ring, tail);
+ trace_hfi1_txq_alloc_tx(txq);
/* so that we can test if the sdma descriptors are there */
tx->txreq.num_desc = 0;
- tx->priv = priv;
- tx->txq = txp->txq;
+ tx->txq = txq;
tx->skb = skb;
INIT_LIST_HEAD(&tx->txreq.list);
@@ -405,21 +382,20 @@ static struct ipoib_txreq *hfi1_ipoib_send_dma_common(struct net_device *dev,
ret = hfi1_ipoib_build_tx_desc(tx, txp);
if (likely(!ret)) {
- if (txp->txq->flow.as_int != txp->flow.as_int) {
- txp->txq->flow.tx_queue = txp->flow.tx_queue;
- txp->txq->flow.sc5 = txp->flow.sc5;
- txp->txq->sde =
+ if (txq->flow.as_int != txp->flow.as_int) {
+ txq->flow.tx_queue = txp->flow.tx_queue;
+ txq->flow.sc5 = txp->flow.sc5;
+ txq->sde =
sdma_select_engine_sc(priv->dd,
txp->flow.tx_queue,
txp->flow.sc5);
- trace_hfi1_flow_switch(txp->txq);
+ trace_hfi1_flow_switch(txq);
}
return tx;
}
sdma_txclean(priv->dd, &tx->txreq);
- kmem_cache_free(priv->txreq_cache, tx);
return ERR_PTR(ret);
}
@@ -480,8 +456,8 @@ static int hfi1_ipoib_send_dma_single(struct net_device *dev,
struct sk_buff *skb,
struct ipoib_txparms *txp)
{
- struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
struct hfi1_ipoib_txq *txq = txp->txq;
+ struct hfi1_ipoib_circ_buf *tx_ring;
struct ipoib_txreq *tx;
int ret;
@@ -499,10 +475,14 @@ static int hfi1_ipoib_send_dma_single(struct net_device *dev,
return NETDEV_TX_OK;
}
+ tx_ring = &txq->tx_ring;
+ trace_hfi1_tx_consume(tx, tx_ring->tail);
+ /* consume tx */
+ smp_store_release(&tx_ring->tail, CIRC_NEXT(tx_ring->tail, tx_ring->max_items));
ret = hfi1_ipoib_submit_tx(txq, tx);
if (likely(!ret)) {
tx_ok:
- trace_sdma_output_ibhdr(tx->priv->dd,
+ trace_sdma_output_ibhdr(txq->priv->dd,
&tx->sdma_hdr.hdr,
ib_is_sc5(txp->flow.sc5));
hfi1_ipoib_check_queue_depth(txq);
@@ -514,9 +494,10 @@ tx_ok:
if (ret == -EBUSY || ret == -ECOMM)
goto tx_ok;
- sdma_txclean(priv->dd, &tx->txreq);
- dev_kfree_skb_any(skb);
- kmem_cache_free(priv->txreq_cache, tx);
+ /* mark complete and kick napi tx */
+ smp_store_release(&tx->complete, 1);
+ napi_schedule(&tx->txq->napi);
+
++dev->stats.tx_carrier_errors;
return NETDEV_TX_OK;
@@ -527,6 +508,7 @@ static int hfi1_ipoib_send_dma_list(struct net_device *dev,
struct ipoib_txparms *txp)
{
struct hfi1_ipoib_txq *txq = txp->txq;
+ struct hfi1_ipoib_circ_buf *tx_ring;
struct ipoib_txreq *tx;
/* Has the flow change ? */
@@ -556,11 +538,15 @@ static int hfi1_ipoib_send_dma_list(struct net_device *dev,
return NETDEV_TX_OK;
}
+ tx_ring = &txq->tx_ring;
+ trace_hfi1_tx_consume(tx, tx_ring->tail);
+ /* consume tx */
+ smp_store_release(&tx_ring->tail, CIRC_NEXT(tx_ring->tail, tx_ring->max_items));
list_add_tail(&tx->txreq.list, &txq->tx_list);
hfi1_ipoib_check_queue_depth(txq);
- trace_sdma_output_ibhdr(tx->priv->dd,
+ trace_sdma_output_ibhdr(txq->priv->dd,
&tx->sdma_hdr.hdr,
ib_is_sc5(txp->flow.sc5));
@@ -646,7 +632,7 @@ static int hfi1_ipoib_sdma_sleep(struct sdma_engine *sde,
if (list_empty(&txq->wait.list)) {
struct hfi1_ibport *ibp = &sde->ppd->ibport_data;
- if (!atomic_xchg(&txq->no_desc, 1)) {
+ if (!atomic_xchg(&txq->tx_ring.no_desc, 1)) {
trace_hfi1_txq_queued(txq);
hfi1_ipoib_stop_txq(txq);
}
@@ -689,45 +675,29 @@ static void hfi1_ipoib_flush_txq(struct work_struct *work)
if (likely(dev->reg_state == NETREG_REGISTERED) &&
likely(!hfi1_ipoib_flush_tx_list(dev, txq)))
- if (atomic_xchg(&txq->no_desc, 0))
+ if (atomic_xchg(&txq->tx_ring.no_desc, 0))
hfi1_ipoib_wake_txq(txq);
}
int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv)
{
struct net_device *dev = priv->netdev;
- char buf[HFI1_IPOIB_TXREQ_NAME_LEN];
- unsigned long tx_ring_size;
+ u32 tx_ring_size, tx_item_size;
int i;
/*
* Ring holds 1 less than tx_ring_size
* Round up to next power of 2 in order to hold at least tx_queue_len
*/
- tx_ring_size = roundup_pow_of_two((unsigned long)dev->tx_queue_len + 1);
-
- snprintf(buf, sizeof(buf), "hfi1_%u_ipoib_txreq_cache", priv->dd->unit);
- priv->txreq_cache = kmem_cache_create(buf,
- sizeof(struct ipoib_txreq),
- 0,
- 0,
- NULL);
- if (!priv->txreq_cache)
- return -ENOMEM;
-
- priv->tx_napis = kcalloc_node(dev->num_tx_queues,
- sizeof(struct napi_struct),
- GFP_KERNEL,
- priv->dd->node);
- if (!priv->tx_napis)
- goto free_txreq_cache;
+ tx_ring_size = roundup_pow_of_two(dev->tx_queue_len + 1);
+ tx_item_size = roundup_pow_of_two(sizeof(struct ipoib_txreq));
priv->txqs = kcalloc_node(dev->num_tx_queues,
sizeof(struct hfi1_ipoib_txq),
GFP_KERNEL,
priv->dd->node);
if (!priv->txqs)
- goto free_tx_napis;
+ return -ENOMEM;
for (i = 0; i < dev->num_tx_queues; i++) {
struct hfi1_ipoib_txq *txq = &priv->txqs[i];
@@ -743,10 +713,9 @@ int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv)
txq->priv = priv;
txq->sde = NULL;
INIT_LIST_HEAD(&txq->tx_list);
- atomic64_set(&txq->complete_txreqs, 0);
- atomic_set(&txq->stops, 0);
- atomic_set(&txq->ring_full, 0);
- atomic_set(&txq->no_desc, 0);
+ atomic_set(&txq->tx_ring.stops, 0);
+ atomic_set(&txq->tx_ring.ring_full, 0);
+ atomic_set(&txq->tx_ring.no_desc, 0);
txq->q_idx = i;
txq->flow.tx_queue = 0xff;
txq->flow.sc5 = 0xff;
@@ -756,19 +725,17 @@ int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv)
priv->dd->node);
txq->tx_ring.items =
- kcalloc_node(tx_ring_size,
- sizeof(struct ipoib_txreq *),
+ kcalloc_node(tx_ring_size, tx_item_size,
GFP_KERNEL, priv->dd->node);
if (!txq->tx_ring.items)
goto free_txqs;
- spin_lock_init(&txq->tx_ring.producer_lock);
- spin_lock_init(&txq->tx_ring.consumer_lock);
txq->tx_ring.max_items = tx_ring_size;
+ txq->tx_ring.shift = ilog2(tx_ring_size);
+ txq->tx_ring.avail = hfi1_ipoib_ring_hwat(txq);
- txq->napi = &priv->tx_napis[i];
- netif_tx_napi_add(dev, txq->napi,
- hfi1_ipoib_process_tx_ring,
+ netif_tx_napi_add(dev, &txq->napi,
+ hfi1_ipoib_poll_tx_ring,
NAPI_POLL_WEIGHT);
}
@@ -778,20 +745,12 @@ free_txqs:
for (i--; i >= 0; i--) {
struct hfi1_ipoib_txq *txq = &priv->txqs[i];
- netif_napi_del(txq->napi);
+ netif_napi_del(&txq->napi);
kfree(txq->tx_ring.items);
}
kfree(priv->txqs);
priv->txqs = NULL;
-
-free_tx_napis:
- kfree(priv->tx_napis);
- priv->tx_napis = NULL;
-
-free_txreq_cache:
- kmem_cache_destroy(priv->txreq_cache);
- priv->txreq_cache = NULL;
return -ENOMEM;
}
@@ -799,7 +758,6 @@ static void hfi1_ipoib_drain_tx_list(struct hfi1_ipoib_txq *txq)
{
struct sdma_txreq *txreq;
struct sdma_txreq *txreq_tmp;
- atomic64_t *complete_txreqs = &txq->complete_txreqs;
list_for_each_entry_safe(txreq, txreq_tmp, &txq->tx_list, list) {
struct ipoib_txreq *tx =
@@ -808,16 +766,16 @@ static void hfi1_ipoib_drain_tx_list(struct hfi1_ipoib_txq *txq)
list_del(&txreq->list);
sdma_txclean(txq->priv->dd, &tx->txreq);
dev_kfree_skb_any(tx->skb);
- kmem_cache_free(txq->priv->txreq_cache, tx);
- atomic64_inc(complete_txreqs);
+ tx->skb = NULL;
+ txq->tx_ring.complete_txreqs++;
}
if (hfi1_ipoib_used(txq))
dd_dev_warn(txq->priv->dd,
- "txq %d not empty found %llu requests\n",
+ "txq %d not empty found %u requests\n",
txq->q_idx,
- hfi1_ipoib_txreqs(txq->sent_txreqs,
- atomic64_read(complete_txreqs)));
+ hfi1_ipoib_txreqs(txq->tx_ring.sent_txreqs,
+ txq->tx_ring.complete_txreqs));
}
void hfi1_ipoib_txreq_deinit(struct hfi1_ipoib_dev_priv *priv)
@@ -830,19 +788,13 @@ void hfi1_ipoib_txreq_deinit(struct hfi1_ipoib_dev_priv *priv)
iowait_cancel_work(&txq->wait);
iowait_sdma_drain(&txq->wait);
hfi1_ipoib_drain_tx_list(txq);
- netif_napi_del(txq->napi);
- (void)hfi1_ipoib_drain_tx_ring(txq, txq->tx_ring.max_items);
+ netif_napi_del(&txq->napi);
+ hfi1_ipoib_drain_tx_ring(txq);
kfree(txq->tx_ring.items);
}
kfree(priv->txqs);
priv->txqs = NULL;
-
- kfree(priv->tx_napis);
- priv->tx_napis = NULL;
-
- kmem_cache_destroy(priv->txreq_cache);
- priv->txreq_cache = NULL;
}
void hfi1_ipoib_napi_tx_enable(struct net_device *dev)
@@ -853,7 +805,7 @@ void hfi1_ipoib_napi_tx_enable(struct net_device *dev)
for (i = 0; i < dev->num_tx_queues; i++) {
struct hfi1_ipoib_txq *txq = &priv->txqs[i];
- napi_enable(txq->napi);
+ napi_enable(&txq->napi);
}
}
@@ -865,8 +817,8 @@ void hfi1_ipoib_napi_tx_disable(struct net_device *dev)
for (i = 0; i < dev->num_tx_queues; i++) {
struct hfi1_ipoib_txq *txq = &priv->txqs[i];
- napi_disable(txq->napi);
- (void)hfi1_ipoib_drain_tx_ring(txq, txq->tx_ring.max_items);
+ napi_disable(&txq->napi);
+ hfi1_ipoib_drain_tx_ring(txq);
}
}
@@ -874,23 +826,23 @@ void hfi1_ipoib_tx_timeout(struct net_device *dev, unsigned int q)
{
struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
struct hfi1_ipoib_txq *txq = &priv->txqs[q];
- u64 completed = atomic64_read(&txq->complete_txreqs);
dd_dev_info(priv->dd, "timeout txq %p q %u stopped %u stops %d no_desc %d ring_full %d\n",
txq, q,
__netif_subqueue_stopped(dev, txq->q_idx),
- atomic_read(&txq->stops),
- atomic_read(&txq->no_desc),
- atomic_read(&txq->ring_full));
+ atomic_read(&txq->tx_ring.stops),
+ atomic_read(&txq->tx_ring.no_desc),
+ atomic_read(&txq->tx_ring.ring_full));
dd_dev_info(priv->dd, "sde %p engine %u\n",
txq->sde,
txq->sde ? txq->sde->this_idx : 0);
dd_dev_info(priv->dd, "flow %x\n", txq->flow.as_int);
dd_dev_info(priv->dd, "sent %llu completed %llu used %llu\n",
- txq->sent_txreqs, completed, hfi1_ipoib_used(txq));
- dd_dev_info(priv->dd, "tx_queue_len %u max_items %lu\n",
+ txq->tx_ring.sent_txreqs, txq->tx_ring.complete_txreqs,
+ hfi1_ipoib_used(txq));
+ dd_dev_info(priv->dd, "tx_queue_len %u max_items %u\n",
dev->tx_queue_len, txq->tx_ring.max_items);
- dd_dev_info(priv->dd, "head %lu tail %lu\n",
+ dd_dev_info(priv->dd, "head %u tail %u\n",
txq->tx_ring.head, txq->tx_ring.tail);
dd_dev_info(priv->dd, "wait queued %u\n",
!list_empty(&txq->wait.list));
diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c
index 489b436f19bb..3d42bd2b36bd 100644
--- a/drivers/infiniband/hw/hfi1/pio.c
+++ b/drivers/infiniband/hw/hfi1/pio.c
@@ -878,6 +878,7 @@ void sc_disable(struct send_context *sc)
{
u64 reg;
struct pio_buf *pbuf;
+ LIST_HEAD(wake_list);
if (!sc)
return;
@@ -912,19 +913,21 @@ void sc_disable(struct send_context *sc)
spin_unlock(&sc->release_lock);
write_seqlock(&sc->waitlock);
- while (!list_empty(&sc->piowait)) {
+ if (!list_empty(&sc->piowait))
+ list_move(&sc->piowait, &wake_list);
+ write_sequnlock(&sc->waitlock);
+ while (!list_empty(&wake_list)) {
struct iowait *wait;
struct rvt_qp *qp;
struct hfi1_qp_priv *priv;
- wait = list_first_entry(&sc->piowait, struct iowait, list);
+ wait = list_first_entry(&wake_list, struct iowait, list);
qp = iowait_to_qp(wait);
priv = qp->priv;
list_del_init(&priv->s_iowait.list);
priv->s_iowait.lock = NULL;
hfi1_qp_wakeup(qp, RVT_S_WAIT_PIO | HFI1_S_WAIT_PIO_DRAIN);
}
- write_sequnlock(&sc->waitlock);
spin_unlock_irq(&sc->alloc_lock);
}
diff --git a/drivers/infiniband/hw/hfi1/trace_tx.h b/drivers/infiniband/hw/hfi1/trace_tx.h
index 7318aa6150b5..ed1b9e1e4b17 100644
--- a/drivers/infiniband/hw/hfi1/trace_tx.h
+++ b/drivers/infiniband/hw/hfi1/trace_tx.h
@@ -917,20 +917,22 @@ DECLARE_EVENT_CLASS(/* AIP */
__entry->tail = txq->tx_ring.tail;
__entry->idx = txq->q_idx;
__entry->used =
- txq->sent_txreqs -
- atomic64_read(&txq->complete_txreqs);
+ txq->tx_ring.sent_txreqs -
+ txq->tx_ring.complete_txreqs;
__entry->flow = txq->flow.as_int;
- __entry->stops = atomic_read(&txq->stops);
- __entry->no_desc = atomic_read(&txq->no_desc);
+ __entry->stops = atomic_read(&txq->tx_ring.stops);
+ __entry->no_desc = atomic_read(&txq->tx_ring.no_desc);
__entry->stopped =
__netif_subqueue_stopped(txq->priv->netdev, txq->q_idx);
),
TP_printk(/* print */
- "[%s] txq %llx idx %u sde %llx head %lx tail %lx flow %x used %u stops %d no_desc %d stopped %u",
+ "[%s] txq %llx idx %u sde %llx:%u cpu %d head %lx tail %lx flow %x used %u stops %d no_desc %d stopped %u",
__get_str(dev),
(unsigned long long)__entry->txq,
__entry->idx,
(unsigned long long)__entry->sde,
+ __entry->sde ? __entry->sde->this_idx : 0,
+ __entry->sde ? __entry->sde->cpu : 0,
__entry->head,
__entry->tail,
__entry->flow,
@@ -995,6 +997,65 @@ DEFINE_EVENT(/* xmit_unstopped */
TP_ARGS(txq)
);
+DECLARE_EVENT_CLASS(/* AIP */
+ hfi1_ipoib_tx_template,
+ TP_PROTO(struct ipoib_txreq *tx, u32 idx),
+ TP_ARGS(tx, idx),
+ TP_STRUCT__entry(/* entry */
+ DD_DEV_ENTRY(tx->txq->priv->dd)
+ __field(struct ipoib_txreq *, tx)
+ __field(struct hfi1_ipoib_txq *, txq)
+ __field(struct sk_buff *, skb)
+ __field(ulong, idx)
+ ),
+ TP_fast_assign(/* assign */
+ DD_DEV_ASSIGN(tx->txq->priv->dd);
+ __entry->tx = tx;
+ __entry->skb = tx->skb;
+ __entry->txq = tx->txq;
+ __entry->idx = idx;
+ ),
+ TP_printk(/* print */
+ "[%s] tx %llx txq %llx,%u skb %llx idx %lu",
+ __get_str(dev),
+ (unsigned long long)__entry->tx,
+ (unsigned long long)__entry->txq,
+ __entry->txq ? __entry->txq->q_idx : 0,
+ (unsigned long long)__entry->skb,
+ __entry->idx
+ )
+);
+
+DEFINE_EVENT(/* produce */
+ hfi1_ipoib_tx_template, hfi1_tx_produce,
+ TP_PROTO(struct ipoib_txreq *tx, u32 idx),
+ TP_ARGS(tx, idx)
+);
+
+DEFINE_EVENT(/* consume */
+ hfi1_ipoib_tx_template, hfi1_tx_consume,
+ TP_PROTO(struct ipoib_txreq *tx, u32 idx),
+ TP_ARGS(tx, idx)
+);
+
+DEFINE_EVENT(/* alloc_tx */
+ hfi1_ipoib_txq_template, hfi1_txq_alloc_tx,
+ TP_PROTO(struct hfi1_ipoib_txq *txq),
+ TP_ARGS(txq)
+);
+
+DEFINE_EVENT(/* poll */
+ hfi1_ipoib_txq_template, hfi1_txq_poll,
+ TP_PROTO(struct hfi1_ipoib_txq *txq),
+ TP_ARGS(txq)
+);
+
+DEFINE_EVENT(/* complete */
+ hfi1_ipoib_txq_template, hfi1_txq_complete,
+ TP_PROTO(struct hfi1_ipoib_txq *txq),
+ TP_ARGS(txq)
+);
+
#endif /* __HFI1_TRACE_TX_H */
#undef TRACE_INCLUDE_PATH
diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.c b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
index 0c86e9d354f8..186d30291260 100644
--- a/drivers/infiniband/hw/hfi1/user_exp_rcv.c
+++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
@@ -692,8 +692,7 @@ static int set_rcvarray_entry(struct hfi1_filedata *fd,
* Allocate the node first so we can handle a potential
* failure before we've programmed anything.
*/
- node = kzalloc(sizeof(*node) + (sizeof(struct page *) * npages),
- GFP_KERNEL);
+ node = kzalloc(struct_size(node, pages, npages), GFP_KERNEL);
if (!node)
return -ENOMEM;
@@ -713,7 +712,7 @@ static int set_rcvarray_entry(struct hfi1_filedata *fd,
node->dma_addr = phys;
node->grp = grp;
node->freed = false;
- memcpy(node->pages, pages, sizeof(struct page *) * npages);
+ memcpy(node->pages, pages, flex_array_size(node, pages, npages));
if (fd->use_mn) {
ret = mmu_interval_notifier_insert(
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index 26bea51869bf..ed9fa0d84e9e 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -1602,8 +1602,8 @@ static const char * const driver_cntr_names[] = {
};
static DEFINE_MUTEX(cntr_names_lock); /* protects the *_cntr_names bufers */
-static const char **dev_cntr_names;
-static const char **port_cntr_names;
+static struct rdma_stat_desc *dev_cntr_descs;
+static struct rdma_stat_desc *port_cntr_descs;
int num_driver_cntrs = ARRAY_SIZE(driver_cntr_names);
static int num_dev_cntrs;
static int num_port_cntrs;
@@ -1614,13 +1614,12 @@ static int cntr_names_initialized;
* strings. Optionally some entries can be reserved in the array to hold extra
* external strings.
*/
-static int init_cntr_names(const char *names_in,
- const size_t names_len,
- int num_extra_names,
- int *num_cntrs,
- const char ***cntr_names)
+static int init_cntr_names(const char *names_in, const size_t names_len,
+ int num_extra_names, int *num_cntrs,
+ struct rdma_stat_desc **cntr_descs)
{
- char *names_out, *p, **q;
+ struct rdma_stat_desc *q;
+ char *names_out, *p;
int i, n;
n = 0;
@@ -1628,26 +1627,28 @@ static int init_cntr_names(const char *names_in,
if (names_in[i] == '\n')
n++;
- names_out = kmalloc((n + num_extra_names) * sizeof(char *) + names_len,
- GFP_KERNEL);
+ names_out =
+ kmalloc((n + num_extra_names) * sizeof(struct rdma_stat_desc) +
+ names_len,
+ GFP_KERNEL);
if (!names_out) {
*num_cntrs = 0;
- *cntr_names = NULL;
+ *cntr_descs = NULL;
return -ENOMEM;
}
- p = names_out + (n + num_extra_names) * sizeof(char *);
+ p = names_out + (n + num_extra_names) * sizeof(struct rdma_stat_desc);
memcpy(p, names_in, names_len);
- q = (char **)names_out;
+ q = (struct rdma_stat_desc *)names_out;
for (i = 0; i < n; i++) {
- q[i] = p;
+ q[i].name = p;
p = strchr(p, '\n');
*p++ = '\0';
}
*num_cntrs = n;
- *cntr_names = (const char **)names_out;
+ *cntr_descs = (struct rdma_stat_desc *)names_out;
return 0;
}
@@ -1661,18 +1662,18 @@ static int init_counters(struct ib_device *ibdev)
goto out_unlock;
err = init_cntr_names(dd->cntrnames, dd->cntrnameslen, num_driver_cntrs,
- &num_dev_cntrs, &dev_cntr_names);
+ &num_dev_cntrs, &dev_cntr_descs);
if (err)
goto out_unlock;
for (i = 0; i < num_driver_cntrs; i++)
- dev_cntr_names[num_dev_cntrs + i] = driver_cntr_names[i];
+ dev_cntr_descs[num_dev_cntrs + i].name = driver_cntr_names[i];
err = init_cntr_names(dd->portcntrnames, dd->portcntrnameslen, 0,
- &num_port_cntrs, &port_cntr_names);
+ &num_port_cntrs, &port_cntr_descs);
if (err) {
- kfree(dev_cntr_names);
- dev_cntr_names = NULL;
+ kfree(dev_cntr_descs);
+ dev_cntr_descs = NULL;
goto out_unlock;
}
cntr_names_initialized = 1;
@@ -1686,7 +1687,7 @@ static struct rdma_hw_stats *hfi1_alloc_hw_device_stats(struct ib_device *ibdev)
{
if (init_counters(ibdev))
return NULL;
- return rdma_alloc_hw_stats_struct(dev_cntr_names,
+ return rdma_alloc_hw_stats_struct(dev_cntr_descs,
num_dev_cntrs + num_driver_cntrs,
RDMA_HW_STATS_DEFAULT_LIFESPAN);
}
@@ -1696,7 +1697,7 @@ static struct rdma_hw_stats *hfi_alloc_hw_port_stats(struct ib_device *ibdev,
{
if (init_counters(ibdev))
return NULL;
- return rdma_alloc_hw_stats_struct(port_cntr_names, num_port_cntrs,
+ return rdma_alloc_hw_stats_struct(port_cntr_descs, num_port_cntrs,
RDMA_HW_STATS_DEFAULT_LIFESPAN);
}
@@ -1921,10 +1922,10 @@ void hfi1_unregister_ib_device(struct hfi1_devdata *dd)
verbs_txreq_exit(dev);
mutex_lock(&cntr_names_lock);
- kfree(dev_cntr_names);
- kfree(port_cntr_names);
- dev_cntr_names = NULL;
- port_cntr_names = NULL;
+ kfree(dev_cntr_descs);
+ kfree(port_cntr_descs);
+ dev_cntr_descs = NULL;
+ port_cntr_descs = NULL;
cntr_names_initialized = 0;
mutex_unlock(&cntr_names_lock);
}
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index 9467c39e3d28..43e17d61cb63 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -225,11 +225,24 @@ struct hns_roce_uar {
unsigned long logic_idx;
};
+enum hns_roce_mmap_type {
+ HNS_ROCE_MMAP_TYPE_DB = 1,
+ HNS_ROCE_MMAP_TYPE_TPTR,
+};
+
+struct hns_user_mmap_entry {
+ struct rdma_user_mmap_entry rdma_entry;
+ enum hns_roce_mmap_type mmap_type;
+ u64 address;
+};
+
struct hns_roce_ucontext {
struct ib_ucontext ibucontext;
struct hns_roce_uar uar;
struct list_head page_list;
struct mutex page_mutex;
+ struct hns_user_mmap_entry *db_mmap_entry;
+ struct hns_user_mmap_entry *tptr_mmap_entry;
};
struct hns_roce_pd {
@@ -898,7 +911,8 @@ struct hns_roce_hw {
bool (*chk_mbox_avail)(struct hns_roce_dev *hr_dev, bool *is_busy);
int (*set_gid)(struct hns_roce_dev *hr_dev, u32 port, int gid_index,
const union ib_gid *gid, const struct ib_gid_attr *attr);
- int (*set_mac)(struct hns_roce_dev *hr_dev, u8 phy_port, u8 *addr);
+ int (*set_mac)(struct hns_roce_dev *hr_dev, u8 phy_port,
+ const u8 *addr);
void (*set_mtu)(struct hns_roce_dev *hr_dev, u8 phy_port,
enum ib_mtu mtu);
int (*write_mtpt)(struct hns_roce_dev *hr_dev, void *mb_buf,
@@ -1049,6 +1063,12 @@ static inline struct hns_roce_srq *to_hr_srq(struct ib_srq *ibsrq)
return container_of(ibsrq, struct hns_roce_srq, ibsrq);
}
+static inline struct hns_user_mmap_entry *
+to_hns_mmap(struct rdma_user_mmap_entry *rdma_entry)
+{
+ return container_of(rdma_entry, struct hns_user_mmap_entry, rdma_entry);
+}
+
static inline void hns_roce_write64_k(__le32 val[2], void __iomem *dest)
{
writeq(*(u64 *)val, dest);
@@ -1259,4 +1279,8 @@ int hns_roce_init(struct hns_roce_dev *hr_dev);
void hns_roce_exit(struct hns_roce_dev *hr_dev);
int hns_roce_fill_res_cq_entry(struct sk_buff *msg,
struct ib_cq *ib_cq);
+struct hns_user_mmap_entry *
+hns_roce_user_mmap_entry_insert(struct ib_ucontext *ucontext, u64 address,
+ size_t length,
+ enum hns_roce_mmap_type mmap_type);
#endif /* _HNS_ROCE_DEVICE_H */
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index e0f59b8d7d5d..f4af3992ba95 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -90,11 +90,11 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp,
unsigned long flags = 0;
void *wqe = NULL;
__le32 doorbell[2];
+ const u8 *smac;
int ret = 0;
int loopback;
u32 wqe_idx;
int nreq;
- u8 *smac;
if (unlikely(ibqp->qp_type != IB_QPT_GSI &&
ibqp->qp_type != IB_QPT_RC)) {
@@ -154,7 +154,7 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp,
UD_SEND_WQE_U32_8_DMAC_5_S,
ah->av.mac[5]);
- smac = (u8 *)hr_dev->dev_addr[qp->port];
+ smac = (const u8 *)hr_dev->dev_addr[qp->port];
loopback = ether_addr_equal_unaligned(ah->av.mac,
smac) ? 1 : 0;
roce_set_bit(ud_sq_wqe->u32_8,
@@ -1782,7 +1782,7 @@ static int hns_roce_v1_set_gid(struct hns_roce_dev *hr_dev, u32 port,
}
static int hns_roce_v1_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
- u8 *addr)
+ const u8 *addr)
{
u32 reg_smac_l;
u16 reg_smac_h;
@@ -2743,12 +2743,12 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
__le32 doorbell[2] = {0};
u64 *mtts_2 = NULL;
int ret = -EINVAL;
+ const u8 *smac;
u64 sq_ba = 0;
u64 rq_ba = 0;
u32 port;
u32 port_num;
u8 *dmac;
- u8 *smac;
if (!check_qp_state(cur_state, new_state)) {
ibdev_err(ibqp->device,
@@ -2947,7 +2947,7 @@ static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) :
hr_qp->port;
- smac = (u8 *)hr_dev->dev_addr[port];
+ smac = (const u8 *)hr_dev->dev_addr[port];
/* when dmac equals smac or loop_idc is 1, it should loopback */
if (ether_addr_equal_unaligned(dmac, smac) ||
hr_dev->loop_idc == 0x1)
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index d5f3faa1627a..9bfbaddd1763 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -1165,32 +1165,22 @@ static int hns_roce_alloc_cmq_desc(struct hns_roce_dev *hr_dev,
{
int size = ring->desc_num * sizeof(struct hns_roce_cmq_desc);
- ring->desc = kzalloc(size, GFP_KERNEL);
+ ring->desc = dma_alloc_coherent(hr_dev->dev, size,
+ &ring->desc_dma_addr, GFP_KERNEL);
if (!ring->desc)
return -ENOMEM;
- ring->desc_dma_addr = dma_map_single(hr_dev->dev, ring->desc, size,
- DMA_BIDIRECTIONAL);
- if (dma_mapping_error(hr_dev->dev, ring->desc_dma_addr)) {
- ring->desc_dma_addr = 0;
- kfree(ring->desc);
- ring->desc = NULL;
-
- return -ENOMEM;
- }
-
return 0;
}
static void hns_roce_free_cmq_desc(struct hns_roce_dev *hr_dev,
struct hns_roce_v2_cmq_ring *ring)
{
- dma_unmap_single(hr_dev->dev, ring->desc_dma_addr,
- ring->desc_num * sizeof(struct hns_roce_cmq_desc),
- DMA_BIDIRECTIONAL);
+ dma_free_coherent(hr_dev->dev,
+ ring->desc_num * sizeof(struct hns_roce_cmq_desc),
+ ring->desc, ring->desc_dma_addr);
ring->desc_dma_addr = 0;
- kfree(ring->desc);
}
static int init_csq(struct hns_roce_dev *hr_dev,
@@ -2992,7 +2982,7 @@ static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, u32 port,
}
static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
- u8 *addr)
+ const u8 *addr)
{
struct hns_roce_cmq_desc desc;
struct hns_roce_cfg_smac_tb *smac_tb =
@@ -3328,7 +3318,7 @@ static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev,
memset(cq_context, 0, sizeof(*cq_context));
hr_reg_write(cq_context, CQC_CQ_ST, V2_CQ_STATE_VALID);
- hr_reg_write(cq_context, CQC_ARM_ST, REG_NXT_CEQE);
+ hr_reg_write(cq_context, CQC_ARM_ST, NO_ARMED);
hr_reg_write(cq_context, CQC_SHIFT, ilog2(hr_cq->cq_depth));
hr_reg_write(cq_context, CQC_CEQN, hr_cq->vector);
hr_reg_write(cq_context, CQC_CQN, hr_cq->cqn);
@@ -4318,10 +4308,10 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
dma_addr_t trrl_ba;
dma_addr_t irrl_ba;
enum ib_mtu ib_mtu;
+ const u8 *smac;
u8 lp_pktn_ini;
u64 *mtts;
u8 *dmac;
- u8 *smac;
u32 port;
int mtu;
int ret;
@@ -4374,7 +4364,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) : hr_qp->port;
- smac = (u8 *)hr_dev->dev_addr[port];
+ smac = (const u8 *)hr_dev->dev_addr[port];
dmac = (u8 *)attr->ah_attr.roce.dmac;
/* when dmac equals smac or loop_idc is 1, it should loopback */
if (ether_addr_equal_unaligned(dmac, smac) ||
@@ -4399,8 +4389,8 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
mtu = ib_mtu_enum_to_int(ib_mtu);
if (WARN_ON(mtu <= 0))
return -EINVAL;
-#define MAX_LP_MSG_LEN 65536
- /* MTU * (2 ^ LP_PKTN_INI) shouldn't be bigger than 64KB */
+#define MAX_LP_MSG_LEN 16384
+ /* MTU * (2 ^ LP_PKTN_INI) shouldn't be bigger than 16KB */
lp_pktn_ini = ilog2(MAX_LP_MSG_LEN / mtu);
if (WARN_ON(lp_pktn_ini >= 0xF))
return -EINVAL;
diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c
index 5d39bd08582a..4194b626f3c6 100644
--- a/drivers/infiniband/hw/hns/hns_roce_main.c
+++ b/drivers/infiniband/hw/hns/hns_roce_main.c
@@ -42,7 +42,8 @@
#include "hns_roce_device.h"
#include "hns_roce_hem.h"
-static int hns_roce_set_mac(struct hns_roce_dev *hr_dev, u32 port, u8 *addr)
+static int hns_roce_set_mac(struct hns_roce_dev *hr_dev, u32 port,
+ const u8 *addr)
{
u8 phy_port;
u32 i;
@@ -291,6 +292,79 @@ static int hns_roce_modify_device(struct ib_device *ib_dev, int mask,
return 0;
}
+struct hns_user_mmap_entry *
+hns_roce_user_mmap_entry_insert(struct ib_ucontext *ucontext, u64 address,
+ size_t length,
+ enum hns_roce_mmap_type mmap_type)
+{
+ struct hns_user_mmap_entry *entry;
+ int ret;
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return NULL;
+
+ entry->address = address;
+ entry->mmap_type = mmap_type;
+
+ ret = rdma_user_mmap_entry_insert_exact(
+ ucontext, &entry->rdma_entry, length,
+ mmap_type == HNS_ROCE_MMAP_TYPE_DB ? 0 : 1);
+ if (ret) {
+ kfree(entry);
+ return NULL;
+ }
+
+ return entry;
+}
+
+static void hns_roce_dealloc_uar_entry(struct hns_roce_ucontext *context)
+{
+ if (context->db_mmap_entry)
+ rdma_user_mmap_entry_remove(
+ &context->db_mmap_entry->rdma_entry);
+
+ if (context->tptr_mmap_entry)
+ rdma_user_mmap_entry_remove(
+ &context->tptr_mmap_entry->rdma_entry);
+}
+
+static int hns_roce_alloc_uar_entry(struct ib_ucontext *uctx)
+{
+ struct hns_roce_ucontext *context = to_hr_ucontext(uctx);
+ struct hns_roce_dev *hr_dev = to_hr_dev(uctx->device);
+ u64 address;
+ int ret;
+
+ address = context->uar.pfn << PAGE_SHIFT;
+ context->db_mmap_entry = hns_roce_user_mmap_entry_insert(
+ uctx, address, PAGE_SIZE, HNS_ROCE_MMAP_TYPE_DB);
+ if (!context->db_mmap_entry)
+ return -ENOMEM;
+
+ if (!hr_dev->tptr_dma_addr || !hr_dev->tptr_size)
+ return 0;
+
+ /*
+ * FIXME: using io_remap_pfn_range on the dma address returned
+ * by dma_alloc_coherent is totally wrong.
+ */
+ context->tptr_mmap_entry =
+ hns_roce_user_mmap_entry_insert(uctx, hr_dev->tptr_dma_addr,
+ hr_dev->tptr_size,
+ HNS_ROCE_MMAP_TYPE_TPTR);
+ if (!context->tptr_mmap_entry) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ return 0;
+
+err:
+ hns_roce_dealloc_uar_entry(context);
+ return ret;
+}
+
static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx,
struct ib_udata *udata)
{
@@ -309,6 +383,10 @@ static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx,
if (ret)
goto error_fail_uar_alloc;
+ ret = hns_roce_alloc_uar_entry(uctx);
+ if (ret)
+ goto error_fail_uar_entry;
+
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB ||
hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) {
INIT_LIST_HEAD(&context->page_list);
@@ -325,6 +403,9 @@ static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx,
return 0;
error_fail_copy_to_udata:
+ hns_roce_dealloc_uar_entry(context);
+
+error_fail_uar_entry:
ida_free(&hr_dev->uar_ida.ida, (int)context->uar.logic_idx);
error_fail_uar_alloc:
@@ -336,39 +417,43 @@ static void hns_roce_dealloc_ucontext(struct ib_ucontext *ibcontext)
struct hns_roce_ucontext *context = to_hr_ucontext(ibcontext);
struct hns_roce_dev *hr_dev = to_hr_dev(ibcontext->device);
+ hns_roce_dealloc_uar_entry(context);
+
ida_free(&hr_dev->uar_ida.ida, (int)context->uar.logic_idx);
}
-static int hns_roce_mmap(struct ib_ucontext *context,
- struct vm_area_struct *vma)
+static int hns_roce_mmap(struct ib_ucontext *uctx, struct vm_area_struct *vma)
{
- struct hns_roce_dev *hr_dev = to_hr_dev(context->device);
-
- switch (vma->vm_pgoff) {
- case 0:
- return rdma_user_mmap_io(context, vma,
- to_hr_ucontext(context)->uar.pfn,
- PAGE_SIZE,
- pgprot_noncached(vma->vm_page_prot),
- NULL);
-
- /* vm_pgoff: 1 -- TPTR */
- case 1:
- if (!hr_dev->tptr_dma_addr || !hr_dev->tptr_size)
- return -EINVAL;
- /*
- * FIXME: using io_remap_pfn_range on the dma address returned
- * by dma_alloc_coherent is totally wrong.
- */
- return rdma_user_mmap_io(context, vma,
- hr_dev->tptr_dma_addr >> PAGE_SHIFT,
- hr_dev->tptr_size,
- vma->vm_page_prot,
- NULL);
+ struct rdma_user_mmap_entry *rdma_entry;
+ struct hns_user_mmap_entry *entry;
+ phys_addr_t pfn;
+ pgprot_t prot;
+ int ret;
- default:
+ rdma_entry = rdma_user_mmap_entry_get_pgoff(uctx, vma->vm_pgoff);
+ if (!rdma_entry)
return -EINVAL;
- }
+
+ entry = to_hns_mmap(rdma_entry);
+ pfn = entry->address >> PAGE_SHIFT;
+ prot = vma->vm_page_prot;
+
+ if (entry->mmap_type != HNS_ROCE_MMAP_TYPE_TPTR)
+ prot = pgprot_noncached(prot);
+
+ ret = rdma_user_mmap_io(uctx, vma, pfn, rdma_entry->npages * PAGE_SIZE,
+ prot, rdma_entry);
+
+ rdma_user_mmap_entry_put(rdma_entry);
+
+ return ret;
+}
+
+static void hns_roce_free_mmap(struct rdma_user_mmap_entry *rdma_entry)
+{
+ struct hns_user_mmap_entry *entry = to_hns_mmap(rdma_entry);
+
+ kfree(entry);
}
static int hns_roce_port_immutable(struct ib_device *ib_dev, u32 port_num,
@@ -444,6 +529,7 @@ static const struct ib_device_ops hns_roce_dev_ops = {
.get_link_layer = hns_roce_get_link_layer,
.get_port_immutable = hns_roce_port_immutable,
.mmap = hns_roce_mmap,
+ .mmap_free = hns_roce_free_mmap,
.modify_device = hns_roce_modify_device,
.modify_qp = hns_roce_modify_qp,
.query_ah = hns_roce_query_ah,
diff --git a/drivers/infiniband/hw/irdma/cm.h b/drivers/infiniband/hw/irdma/cm.h
index d03cd29333ea..3bf42728e9b7 100644
--- a/drivers/infiniband/hw/irdma/cm.h
+++ b/drivers/infiniband/hw/irdma/cm.h
@@ -159,14 +159,6 @@ enum irdma_cm_event_type {
IRDMA_CM_EVENT_ABORTED,
};
-struct irdma_bth { /* Base Trasnport Header */
- u8 opcode;
- u8 flags;
- __be16 pkey;
- __be32 qpn;
- __be32 apsn;
-};
-
struct ietf_mpa_v1 {
u8 key[IETF_MPA_KEY_SIZE];
u8 flags;
@@ -397,7 +389,7 @@ int irdma_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len);
int irdma_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param);
int irdma_create_listen(struct iw_cm_id *cm_id, int backlog);
int irdma_destroy_listen(struct iw_cm_id *cm_id);
-int irdma_add_arp(struct irdma_pci_f *rf, u32 *ip, bool ipv4, u8 *mac);
+int irdma_add_arp(struct irdma_pci_f *rf, u32 *ip, bool ipv4, const u8 *mac);
void irdma_cm_teardown_connections(struct irdma_device *iwdev, u32 *ipaddr,
struct irdma_cm_info *nfo,
bool disconnect_all);
@@ -406,7 +398,7 @@ int irdma_cm_stop(struct irdma_device *dev);
bool irdma_ipv4_is_lpb(u32 loc_addr, u32 rem_addr);
bool irdma_ipv6_is_lpb(u32 *loc_addr, u32 *rem_addr);
int irdma_arp_table(struct irdma_pci_f *rf, u32 *ip_addr, bool ipv4,
- u8 *mac_addr, u32 action);
+ const u8 *mac_addr, u32 action);
void irdma_if_notify(struct irdma_device *iwdev, struct net_device *netdev,
u32 *ipaddr, bool ipv4, bool ifup);
bool irdma_port_in_use(struct irdma_cm_core *cm_core, u16 port);
diff --git a/drivers/infiniband/hw/irdma/ctrl.c b/drivers/infiniband/hw/irdma/ctrl.c
index f1e5515256e0..7264f8c2f7d5 100644
--- a/drivers/infiniband/hw/irdma/ctrl.c
+++ b/drivers/infiniband/hw/irdma/ctrl.c
@@ -1420,44 +1420,6 @@ void irdma_sc_send_lsmm(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size,
}
/**
- * irdma_sc_send_lsmm_nostag - for privilege qp
- * @qp: sc qp struct
- * @lsmm_buf: buffer with lsmm message
- * @size: size of lsmm buffer
- */
-void irdma_sc_send_lsmm_nostag(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size)
-{
- __le64 *wqe;
- u64 hdr;
- struct irdma_qp_uk *qp_uk;
-
- qp_uk = &qp->qp_uk;
- wqe = qp_uk->sq_base->elem;
-
- set_64bit_val(wqe, 0, (uintptr_t)lsmm_buf);
-
- if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1)
- set_64bit_val(wqe, 8,
- FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, size));
- else
- set_64bit_val(wqe, 8,
- FIELD_PREP(IRDMAQPSQ_FRAG_LEN, size) |
- FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity));
- set_64bit_val(wqe, 16, 0);
-
- hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_RDMA_SEND) |
- FIELD_PREP(IRDMAQPSQ_STREAMMODE, 1) |
- FIELD_PREP(IRDMAQPSQ_WAITFORRCVPDU, 1) |
- FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity);
- dma_wmb(); /* make sure WQE is written before valid bit is set */
-
- set_64bit_val(wqe, 24, hdr);
-
- print_hex_dump_debug("WQE: SEND_LSMM_NOSTAG WQE", DUMP_PREFIX_OFFSET,
- 16, 8, wqe, IRDMA_QP_WQE_MIN_SIZE, false);
-}
-
-/**
* irdma_sc_send_rtt - send last read0 or write0
* @qp: sc qp struct
* @read: Do read0 or write0
@@ -2501,7 +2463,6 @@ static inline void irdma_sc_cq_ack(struct irdma_sc_cq *cq)
enum irdma_status_code irdma_sc_cq_init(struct irdma_sc_cq *cq,
struct irdma_cq_init_info *info)
{
- enum irdma_status_code ret_code;
u32 pble_obj_cnt;
pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt;
@@ -2513,9 +2474,7 @@ enum irdma_status_code irdma_sc_cq_init(struct irdma_sc_cq *cq,
cq->ceq_id = info->ceq_id;
info->cq_uk_init_info.cqe_alloc_db = cq->dev->cq_arm_db;
info->cq_uk_init_info.cq_ack_db = cq->dev->cq_ack_db;
- ret_code = irdma_uk_cq_init(&cq->cq_uk, &info->cq_uk_init_info);
- if (ret_code)
- return ret_code;
+ irdma_uk_cq_init(&cq->cq_uk, &info->cq_uk_init_info);
cq->virtual_map = info->virtual_map;
cq->pbl_chunk_size = info->pbl_chunk_size;
diff --git a/drivers/infiniband/hw/irdma/hw.c b/drivers/infiniband/hw/irdma/hw.c
index 7de525a5ccf8..4108dcabece2 100644
--- a/drivers/infiniband/hw/irdma/hw.c
+++ b/drivers/infiniband/hw/irdma/hw.c
@@ -1057,7 +1057,7 @@ static enum irdma_status_code irdma_alloc_set_mac(struct irdma_device *iwdev)
&iwdev->mac_ip_table_idx);
if (!status) {
status = irdma_add_local_mac_entry(iwdev->rf,
- (u8 *)iwdev->netdev->dev_addr,
+ (const u8 *)iwdev->netdev->dev_addr,
(u8)iwdev->mac_ip_table_idx);
if (status)
irdma_del_local_mac_entry(iwdev->rf,
@@ -2191,7 +2191,7 @@ void irdma_del_local_mac_entry(struct irdma_pci_f *rf, u16 idx)
* @mac_addr: pointer to mac address
* @idx: the index of the mac ip address to add
*/
-int irdma_add_local_mac_entry(struct irdma_pci_f *rf, u8 *mac_addr, u16 idx)
+int irdma_add_local_mac_entry(struct irdma_pci_f *rf, const u8 *mac_addr, u16 idx)
{
struct irdma_local_mac_entry_info *info;
struct irdma_cqp *iwcqp = &rf->cqp;
@@ -2362,7 +2362,8 @@ void irdma_del_apbvt(struct irdma_device *iwdev,
* @ipv4: flag inicating IPv4
* @action: add, delete or modify
*/
-void irdma_manage_arp_cache(struct irdma_pci_f *rf, unsigned char *mac_addr,
+void irdma_manage_arp_cache(struct irdma_pci_f *rf,
+ const unsigned char *mac_addr,
u32 *ip_addr, bool ipv4, u32 action)
{
struct irdma_add_arp_cache_entry_info *info;
diff --git a/drivers/infiniband/hw/irdma/main.h b/drivers/infiniband/hw/irdma/main.h
index b678fe712447..91a497139ba3 100644
--- a/drivers/infiniband/hw/irdma/main.h
+++ b/drivers/infiniband/hw/irdma/main.h
@@ -467,7 +467,8 @@ void irdma_qp_rem_ref(struct ib_qp *ibqp);
void irdma_free_lsmm_rsrc(struct irdma_qp *iwqp);
struct ib_qp *irdma_get_qp(struct ib_device *ibdev, int qpn);
void irdma_flush_wqes(struct irdma_qp *iwqp, u32 flush_mask);
-void irdma_manage_arp_cache(struct irdma_pci_f *rf, unsigned char *mac_addr,
+void irdma_manage_arp_cache(struct irdma_pci_f *rf,
+ const unsigned char *mac_addr,
u32 *ip_addr, bool ipv4, u32 action);
struct irdma_apbvt_entry *irdma_add_apbvt(struct irdma_device *iwdev, u16 port);
void irdma_del_apbvt(struct irdma_device *iwdev,
@@ -479,7 +480,7 @@ void irdma_free_cqp_request(struct irdma_cqp *cqp,
void irdma_put_cqp_request(struct irdma_cqp *cqp,
struct irdma_cqp_request *cqp_request);
int irdma_alloc_local_mac_entry(struct irdma_pci_f *rf, u16 *mac_tbl_idx);
-int irdma_add_local_mac_entry(struct irdma_pci_f *rf, u8 *mac_addr, u16 idx);
+int irdma_add_local_mac_entry(struct irdma_pci_f *rf, const u8 *mac_addr, u16 idx);
void irdma_del_local_mac_entry(struct irdma_pci_f *rf, u16 idx);
u32 irdma_initialize_hw_rsrc(struct irdma_pci_f *rf);
diff --git a/drivers/infiniband/hw/irdma/osdep.h b/drivers/infiniband/hw/irdma/osdep.h
index b2ab52335ca6..63d8bb3a6903 100644
--- a/drivers/infiniband/hw/irdma/osdep.h
+++ b/drivers/infiniband/hw/irdma/osdep.h
@@ -37,7 +37,6 @@ struct irdma_hw;
struct irdma_pci_f;
struct ib_device *to_ibdev(struct irdma_sc_dev *dev);
-u8 __iomem *irdma_get_hw_addr(void *dev);
void irdma_ieq_mpa_crc_ae(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp);
enum irdma_status_code irdma_vf_wait_vchnl_resp(struct irdma_sc_dev *dev);
bool irdma_vf_clear_to_send(struct irdma_sc_dev *dev);
diff --git a/drivers/infiniband/hw/irdma/protos.h b/drivers/infiniband/hw/irdma/protos.h
index 78f598fdbccf..a17c0ffb0cc8 100644
--- a/drivers/infiniband/hw/irdma/protos.h
+++ b/drivers/infiniband/hw/irdma/protos.h
@@ -37,8 +37,6 @@ void irdma_hw_stats_read_all(struct irdma_vsi_pestat *stats,
enum irdma_status_code
irdma_cqp_ws_node_cmd(struct irdma_sc_dev *dev, u8 cmd,
struct irdma_ws_node_info *node_info);
-enum irdma_status_code irdma_cqp_up_map_cmd(struct irdma_sc_dev *dev, u8 cmd,
- struct irdma_up_info *map_info);
enum irdma_status_code irdma_cqp_ceq_cmd(struct irdma_sc_dev *dev,
struct irdma_sc_ceq *sc_ceq, u8 op);
enum irdma_status_code irdma_cqp_aeq_cmd(struct irdma_sc_dev *dev,
diff --git a/drivers/infiniband/hw/irdma/trace_cm.h b/drivers/infiniband/hw/irdma/trace_cm.h
index bcf10ec427d6..f633fb343328 100644
--- a/drivers/infiniband/hw/irdma/trace_cm.h
+++ b/drivers/infiniband/hw/irdma/trace_cm.h
@@ -144,7 +144,7 @@ DEFINE_EVENT(tos_template, irdma_dcb_tos,
DECLARE_EVENT_CLASS(qhash_template,
TP_PROTO(struct irdma_device *iwdev,
struct irdma_cm_listener *listener,
- char *dev_addr),
+ const char *dev_addr),
TP_ARGS(iwdev, listener, dev_addr),
TP_STRUCT__entry(__field(struct irdma_device *, iwdev)
__field(u16, lport)
@@ -173,12 +173,14 @@ DECLARE_EVENT_CLASS(qhash_template,
DEFINE_EVENT(qhash_template, irdma_add_mqh_6,
TP_PROTO(struct irdma_device *iwdev,
- struct irdma_cm_listener *listener, char *dev_addr),
+ struct irdma_cm_listener *listener,
+ const char *dev_addr),
TP_ARGS(iwdev, listener, dev_addr));
DEFINE_EVENT(qhash_template, irdma_add_mqh_4,
TP_PROTO(struct irdma_device *iwdev,
- struct irdma_cm_listener *listener, char *dev_addr),
+ struct irdma_cm_listener *listener,
+ const char *dev_addr),
TP_ARGS(iwdev, listener, dev_addr));
TRACE_EVENT(irdma_addr_resolve,
diff --git a/drivers/infiniband/hw/irdma/type.h b/drivers/infiniband/hw/irdma/type.h
index 874bc25a938b..9483bb3e10ea 100644
--- a/drivers/infiniband/hw/irdma/type.h
+++ b/drivers/infiniband/hw/irdma/type.h
@@ -852,7 +852,6 @@ struct irdma_roce_offload_info {
u16 err_rq_idx;
u32 qkey;
u32 dest_qp;
- u32 local_qp;
u8 roce_tver;
u8 ack_credits;
u8 err_rq_idx_valid;
@@ -1256,7 +1255,7 @@ enum irdma_status_code irdma_sc_qp_modify(struct irdma_sc_qp *qp,
u64 scratch, bool post_sq);
void irdma_sc_send_lsmm(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size,
irdma_stag stag);
-void irdma_sc_send_lsmm_nostag(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size);
+
void irdma_sc_send_rtt(struct irdma_sc_qp *qp, bool read);
void irdma_sc_qp_setctx(struct irdma_sc_qp *qp, __le64 *qp_ctx,
struct irdma_qp_host_ctx_info *info);
diff --git a/drivers/infiniband/hw/irdma/uk.c b/drivers/infiniband/hw/irdma/uk.c
index 5fb92de1f015..57a9444e9ea7 100644
--- a/drivers/infiniband/hw/irdma/uk.c
+++ b/drivers/infiniband/hw/irdma/uk.c
@@ -13,16 +13,16 @@
* @sge: sge length and stag
* @valid: The wqe valid
*/
-static void irdma_set_fragment(__le64 *wqe, u32 offset, struct irdma_sge *sge,
+static void irdma_set_fragment(__le64 *wqe, u32 offset, struct ib_sge *sge,
u8 valid)
{
if (sge) {
set_64bit_val(wqe, offset,
- FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->tag_off));
+ FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->addr));
set_64bit_val(wqe, offset + 8,
FIELD_PREP(IRDMAQPSQ_VALID, valid) |
- FIELD_PREP(IRDMAQPSQ_FRAG_LEN, sge->len) |
- FIELD_PREP(IRDMAQPSQ_FRAG_STAG, sge->stag));
+ FIELD_PREP(IRDMAQPSQ_FRAG_LEN, sge->length) |
+ FIELD_PREP(IRDMAQPSQ_FRAG_STAG, sge->lkey));
} else {
set_64bit_val(wqe, offset, 0);
set_64bit_val(wqe, offset + 8,
@@ -38,14 +38,14 @@ static void irdma_set_fragment(__le64 *wqe, u32 offset, struct irdma_sge *sge,
* @valid: wqe valid flag
*/
static void irdma_set_fragment_gen_1(__le64 *wqe, u32 offset,
- struct irdma_sge *sge, u8 valid)
+ struct ib_sge *sge, u8 valid)
{
if (sge) {
set_64bit_val(wqe, offset,
- FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->tag_off));
+ FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->addr));
set_64bit_val(wqe, offset + 8,
- FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, sge->len) |
- FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_STAG, sge->stag));
+ FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, sge->length) |
+ FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_STAG, sge->lkey));
} else {
set_64bit_val(wqe, offset, 0);
set_64bit_val(wqe, offset + 8, 0);
@@ -289,7 +289,7 @@ enum irdma_status_code irdma_uk_rdma_write(struct irdma_qp_uk *qp,
return IRDMA_ERR_INVALID_FRAG_COUNT;
for (i = 0; i < op_info->num_lo_sges; i++)
- total_size += op_info->lo_sg_list[i].len;
+ total_size += op_info->lo_sg_list[i].length;
read_fence |= info->read_fence;
@@ -310,7 +310,7 @@ enum irdma_status_code irdma_uk_rdma_write(struct irdma_qp_uk *qp,
irdma_clr_wqes(qp, wqe_idx);
set_64bit_val(wqe, 16,
- FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.tag_off));
+ FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr));
if (info->imm_data_valid) {
set_64bit_val(wqe, 0,
@@ -339,7 +339,7 @@ enum irdma_status_code irdma_uk_rdma_write(struct irdma_qp_uk *qp,
++addl_frag_cnt;
}
- hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.stag) |
+ hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) |
FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, info->imm_data_valid) |
FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt) |
@@ -391,7 +391,7 @@ enum irdma_status_code irdma_uk_rdma_read(struct irdma_qp_uk *qp,
return IRDMA_ERR_INVALID_FRAG_COUNT;
for (i = 0; i < op_info->num_lo_sges; i++)
- total_size += op_info->lo_sg_list[i].len;
+ total_size += op_info->lo_sg_list[i].length;
ret_code = irdma_fragcnt_to_quanta_sq(op_info->num_lo_sges, &quanta);
if (ret_code)
@@ -426,8 +426,8 @@ enum irdma_status_code irdma_uk_rdma_read(struct irdma_qp_uk *qp,
++addl_frag_cnt;
}
set_64bit_val(wqe, 16,
- FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.tag_off));
- hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.stag) |
+ FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr));
+ hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) |
FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) |
FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) |
FIELD_PREP(IRDMAQPSQ_OPCODE,
@@ -477,7 +477,7 @@ enum irdma_status_code irdma_uk_send(struct irdma_qp_uk *qp,
return IRDMA_ERR_INVALID_FRAG_COUNT;
for (i = 0; i < op_info->num_sges; i++)
- total_size += op_info->sg_list[i].len;
+ total_size += op_info->sg_list[i].length;
if (info->imm_data_valid)
frag_cnt = op_info->num_sges + 1;
@@ -705,9 +705,9 @@ irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *in
read_fence |= info->read_fence;
set_64bit_val(wqe, 16,
- FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.tag_off));
+ FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr));
- hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.stag) |
+ hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) |
FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) |
FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, op_info->len) |
FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt ? 1 : 0) |
@@ -826,7 +826,7 @@ irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
u64 hdr;
u32 wqe_idx;
bool local_fence = false;
- struct irdma_sge sge = {};
+ struct ib_sge sge = {};
info->push_wqe = qp->push_db ? true : false;
op_info = &info->op.inv_local_stag;
@@ -839,7 +839,7 @@ irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
irdma_clr_wqes(qp, wqe_idx);
- sge.stag = op_info->target_stag;
+ sge.lkey = op_info->target_stag;
qp->wqe_ops.iw_set_fragment(wqe, 0, &sge, 0);
set_64bit_val(wqe, 16, 0);
@@ -867,63 +867,6 @@ irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
}
/**
- * irdma_uk_mw_bind - bind Memory Window
- * @qp: hw qp ptr
- * @info: post sq information
- * @post_sq: flag to post sq
- */
-enum irdma_status_code irdma_uk_mw_bind(struct irdma_qp_uk *qp,
- struct irdma_post_sq_info *info,
- bool post_sq)
-{
- __le64 *wqe;
- struct irdma_bind_window *op_info;
- u64 hdr;
- u32 wqe_idx;
- bool local_fence = false;
-
- info->push_wqe = qp->push_db ? true : false;
- op_info = &info->op.bind_window;
- local_fence |= info->local_fence;
-
- wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, IRDMA_QP_WQE_MIN_QUANTA,
- 0, info);
- if (!wqe)
- return IRDMA_ERR_QP_TOOMANY_WRS_POSTED;
-
- irdma_clr_wqes(qp, wqe_idx);
-
- qp->wqe_ops.iw_set_mw_bind_wqe(wqe, op_info);
-
- hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMA_OP_TYPE_BIND_MW) |
- FIELD_PREP(IRDMAQPSQ_STAGRIGHTS,
- ((op_info->ena_reads << 2) | (op_info->ena_writes << 3))) |
- FIELD_PREP(IRDMAQPSQ_VABASEDTO,
- (op_info->addressing_type == IRDMA_ADDR_TYPE_VA_BASED ? 1 : 0)) |
- FIELD_PREP(IRDMAQPSQ_MEMWINDOWTYPE,
- (op_info->mem_window_type_1 ? 1 : 0)) |
- FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) |
- FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) |
- FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) |
- FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) |
- FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity);
-
- dma_wmb(); /* make sure WQE is populated before valid bit is set */
-
- set_64bit_val(wqe, 24, hdr);
-
- if (info->push_wqe) {
- irdma_qp_push_wqe(qp, wqe, IRDMA_QP_WQE_MIN_QUANTA, wqe_idx,
- post_sq);
- } else {
- if (post_sq)
- irdma_uk_qp_post_wr(qp);
- }
-
- return 0;
-}
-
-/**
* irdma_uk_post_receive - post receive wqe
* @qp: hw qp ptr
* @info: post rq information
@@ -1092,12 +1035,12 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, struct irdma_cq_poll_info *info)
if (cq->avoid_mem_cflct) {
ext_cqe = (__le64 *)((u8 *)cqe + 32);
get_64bit_val(ext_cqe, 24, &qword7);
- polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
+ polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7);
} else {
peek_head = (cq->cq_ring.head + 1) % cq->cq_ring.size;
ext_cqe = cq->cq_base[peek_head].buf;
get_64bit_val(ext_cqe, 24, &qword7);
- polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
+ polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword7);
if (!peek_head)
polarity ^= 1;
}
@@ -1503,8 +1446,8 @@ enum irdma_status_code irdma_uk_qp_init(struct irdma_qp_uk *qp,
* @cq: hw cq
* @info: hw cq initialization info
*/
-enum irdma_status_code irdma_uk_cq_init(struct irdma_cq_uk *cq,
- struct irdma_cq_uk_init_info *info)
+void irdma_uk_cq_init(struct irdma_cq_uk *cq,
+ struct irdma_cq_uk_init_info *info)
{
cq->cq_base = info->cq_base;
cq->cq_id = info->cq_id;
@@ -1515,8 +1458,6 @@ enum irdma_status_code irdma_uk_cq_init(struct irdma_cq_uk *cq,
cq->avoid_mem_cflct = info->avoid_mem_cflct;
IRDMA_RING_INIT(cq->cq_ring, cq->cq_size);
cq->polarity = 1;
-
- return 0;
}
/**
diff --git a/drivers/infiniband/hw/irdma/user.h b/drivers/infiniband/hw/irdma/user.h
index 3dcbb1fbf2c6..3c811fb88404 100644
--- a/drivers/infiniband/hw/irdma/user.h
+++ b/drivers/infiniband/hw/irdma/user.h
@@ -16,7 +16,6 @@
#define irdma_access_privileges u32
#define irdma_physical_fragment u64
#define irdma_address_list u64 *
-#define irdma_sgl struct irdma_sge *
#define IRDMA_MAX_MR_SIZE 0x200000000000ULL
@@ -151,12 +150,6 @@ struct irdma_cq_uk;
struct irdma_qp_uk_init_info;
struct irdma_cq_uk_init_info;
-struct irdma_sge {
- irdma_tagged_offset tag_off;
- u32 len;
- irdma_stag stag;
-};
-
struct irdma_ring {
u32 head;
u32 tail;
@@ -172,7 +165,7 @@ struct irdma_extended_cqe {
};
struct irdma_post_send {
- irdma_sgl sg_list;
+ struct ib_sge *sg_list;
u32 num_sges;
u32 qkey;
u32 dest_qp;
@@ -189,26 +182,26 @@ struct irdma_post_inline_send {
struct irdma_post_rq_info {
u64 wr_id;
- irdma_sgl sg_list;
+ struct ib_sge *sg_list;
u32 num_sges;
};
struct irdma_rdma_write {
- irdma_sgl lo_sg_list;
+ struct ib_sge *lo_sg_list;
u32 num_lo_sges;
- struct irdma_sge rem_addr;
+ struct ib_sge rem_addr;
};
struct irdma_inline_rdma_write {
void *data;
u32 len;
- struct irdma_sge rem_addr;
+ struct ib_sge rem_addr;
};
struct irdma_rdma_read {
- irdma_sgl lo_sg_list;
+ struct ib_sge *lo_sg_list;
u32 num_lo_sges;
- struct irdma_sge rem_addr;
+ struct ib_sge rem_addr;
};
struct irdma_bind_window {
@@ -283,9 +276,7 @@ enum irdma_status_code irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
enum irdma_status_code irdma_uk_inline_send(struct irdma_qp_uk *qp,
struct irdma_post_sq_info *info,
bool post_sq);
-enum irdma_status_code irdma_uk_mw_bind(struct irdma_qp_uk *qp,
- struct irdma_post_sq_info *info,
- bool post_sq);
+
enum irdma_status_code irdma_uk_post_nop(struct irdma_qp_uk *qp, u64 wr_id,
bool signaled, bool post_sq);
enum irdma_status_code irdma_uk_post_receive(struct irdma_qp_uk *qp,
@@ -306,7 +297,7 @@ enum irdma_status_code irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
struct irdma_wqe_uk_ops {
void (*iw_copy_inline_data)(u8 *dest, u8 *src, u32 len, u8 polarity);
u16 (*iw_inline_data_size_to_quanta)(u32 data_size);
- void (*iw_set_fragment)(__le64 *wqe, u32 offset, struct irdma_sge *sge,
+ void (*iw_set_fragment)(__le64 *wqe, u32 offset, struct ib_sge *sge,
u8 valid);
void (*iw_set_mw_bind_wqe)(__le64 *wqe,
struct irdma_bind_window *op_info);
@@ -318,8 +309,8 @@ void irdma_uk_cq_request_notification(struct irdma_cq_uk *cq,
enum irdma_cmpl_notify cq_notify);
void irdma_uk_cq_resize(struct irdma_cq_uk *cq, void *cq_base, int size);
void irdma_uk_cq_set_resized_cnt(struct irdma_cq_uk *qp, u16 cnt);
-enum irdma_status_code irdma_uk_cq_init(struct irdma_cq_uk *cq,
- struct irdma_cq_uk_init_info *info);
+void irdma_uk_cq_init(struct irdma_cq_uk *cq,
+ struct irdma_cq_uk_init_info *info);
enum irdma_status_code irdma_uk_qp_init(struct irdma_qp_uk *qp,
struct irdma_qp_uk_init_info *info);
struct irdma_sq_uk_wr_trk_info {
@@ -369,7 +360,6 @@ struct irdma_qp_uk {
bool rq_flush_complete:1; /* Indicates flush was seen and RQ was empty after the flush */
bool destroy_pending:1; /* Indicates the QP is being destroyed */
void *back_qp;
- spinlock_t *lock;
u8 dbg_rq_flushed;
u8 sq_flush_seen;
u8 rq_flush_seen;
diff --git a/drivers/infiniband/hw/irdma/utils.c b/drivers/infiniband/hw/irdma/utils.c
index ac91ea5296db..8b42c43fc14f 100644
--- a/drivers/infiniband/hw/irdma/utils.c
+++ b/drivers/infiniband/hw/irdma/utils.c
@@ -11,7 +11,7 @@
* @action: modify, delete or add
*/
int irdma_arp_table(struct irdma_pci_f *rf, u32 *ip_addr, bool ipv4,
- u8 *mac_addr, u32 action)
+ const u8 *mac_addr, u32 action)
{
unsigned long flags;
int arp_index;
@@ -77,7 +77,7 @@ int irdma_arp_table(struct irdma_pci_f *rf, u32 *ip_addr, bool ipv4,
* @ipv4: IPv4 flag
* @mac: MAC address
*/
-int irdma_add_arp(struct irdma_pci_f *rf, u32 *ip, bool ipv4, u8 *mac)
+int irdma_add_arp(struct irdma_pci_f *rf, u32 *ip, bool ipv4, const u8 *mac)
{
int arpidx;
@@ -768,17 +768,6 @@ struct ib_qp *irdma_get_qp(struct ib_device *device, int qpn)
}
/**
- * irdma_get_hw_addr - return hw addr
- * @par: points to shared dev
- */
-u8 __iomem *irdma_get_hw_addr(void *par)
-{
- struct irdma_sc_dev *dev = par;
-
- return dev->hw->hw_addr;
-}
-
-/**
* irdma_remove_cqp_head - return head entry and remove
* @dev: device
*/
@@ -2060,40 +2049,6 @@ exit:
}
/**
- * irdma_cqp_up_map_cmd - Set the up-up mapping
- * @dev: pointer to device structure
- * @cmd: map command
- * @map_info: pointer to up map info
- */
-enum irdma_status_code irdma_cqp_up_map_cmd(struct irdma_sc_dev *dev, u8 cmd,
- struct irdma_up_info *map_info)
-{
- struct irdma_pci_f *rf = dev_to_rf(dev);
- struct irdma_cqp *iwcqp = &rf->cqp;
- struct irdma_sc_cqp *cqp = &iwcqp->sc_cqp;
- struct irdma_cqp_request *cqp_request;
- struct cqp_cmds_info *cqp_info;
- enum irdma_status_code status;
-
- cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, false);
- if (!cqp_request)
- return IRDMA_ERR_NO_MEMORY;
-
- cqp_info = &cqp_request->info;
- memset(cqp_info, 0, sizeof(*cqp_info));
- cqp_info->cqp_cmd = cmd;
- cqp_info->post_sq = 1;
- cqp_info->in.u.up_map.info = *map_info;
- cqp_info->in.u.up_map.cqp = cqp;
- cqp_info->in.u.up_map.scratch = (uintptr_t)cqp_request;
-
- status = irdma_handle_cqp_op(rf, cqp_request);
- irdma_put_cqp_request(&rf->cqp, cqp_request);
-
- return status;
-}
-
-/**
* irdma_ah_cqp_op - perform an AH cqp operation
* @rf: RDMA PCI function
* @sc_ah: address handle
diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c
index 7110ebf834f9..0f66e809d418 100644
--- a/drivers/infiniband/hw/irdma/verbs.c
+++ b/drivers/infiniband/hw/irdma/verbs.c
@@ -833,7 +833,6 @@ static int irdma_create_qp(struct ib_qp *ibqp,
qp = &iwqp->sc_qp;
qp->qp_uk.back_qp = iwqp;
- qp->qp_uk.lock = &iwqp->lock;
qp->push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX;
iwqp->iwdev = iwdev;
@@ -1198,7 +1197,6 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr,
av->attrs = attr->ah_attr;
rdma_gid2ip((struct sockaddr *)&av->sgid_addr, &sgid_attr->gid);
rdma_gid2ip((struct sockaddr *)&av->dgid_addr, &attr->ah_attr.grh.dgid);
- roce_info->local_qp = ibqp->qp_num;
if (av->sgid_addr.saddr.sa_family == AF_INET6) {
__be32 *daddr =
av->dgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32;
@@ -3041,24 +3039,6 @@ done:
}
/**
- * irdma_copy_sg_list - copy sg list for qp
- * @sg_list: copied into sg_list
- * @sgl: copy from sgl
- * @num_sges: count of sg entries
- */
-static void irdma_copy_sg_list(struct irdma_sge *sg_list, struct ib_sge *sgl,
- int num_sges)
-{
- unsigned int i;
-
- for (i = 0; (i < num_sges) && (i < IRDMA_MAX_WQ_FRAGMENT_COUNT); i++) {
- sg_list[i].tag_off = sgl[i].addr;
- sg_list[i].len = sgl[i].length;
- sg_list[i].stag = sgl[i].lkey;
- }
-}
-
-/**
* irdma_post_send - kernel application wr
* @ibqp: qp ptr for wr
* @ib_wr: work request ptr
@@ -3134,8 +3114,7 @@ static int irdma_post_send(struct ib_qp *ibqp,
ret = irdma_uk_inline_send(ukqp, &info, false);
} else {
info.op.send.num_sges = ib_wr->num_sge;
- info.op.send.sg_list = (struct irdma_sge *)
- ib_wr->sg_list;
+ info.op.send.sg_list = ib_wr->sg_list;
if (iwqp->ibqp.qp_type == IB_QPT_UD ||
iwqp->ibqp.qp_type == IB_QPT_GSI) {
ah = to_iwah(ud_wr(ib_wr)->ah);
@@ -3170,15 +3149,18 @@ static int irdma_post_send(struct ib_qp *ibqp,
if (ib_wr->send_flags & IB_SEND_INLINE) {
info.op.inline_rdma_write.data = (void *)(uintptr_t)ib_wr->sg_list[0].addr;
- info.op.inline_rdma_write.len = ib_wr->sg_list[0].length;
- info.op.inline_rdma_write.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
- info.op.inline_rdma_write.rem_addr.stag = rdma_wr(ib_wr)->rkey;
+ info.op.inline_rdma_write.len =
+ ib_wr->sg_list[0].length;
+ info.op.inline_rdma_write.rem_addr.addr =
+ rdma_wr(ib_wr)->remote_addr;
+ info.op.inline_rdma_write.rem_addr.lkey =
+ rdma_wr(ib_wr)->rkey;
ret = irdma_uk_inline_rdma_write(ukqp, &info, false);
} else {
info.op.rdma_write.lo_sg_list = (void *)ib_wr->sg_list;
info.op.rdma_write.num_lo_sges = ib_wr->num_sge;
- info.op.rdma_write.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
- info.op.rdma_write.rem_addr.stag = rdma_wr(ib_wr)->rkey;
+ info.op.rdma_write.rem_addr.addr = rdma_wr(ib_wr)->remote_addr;
+ info.op.rdma_write.rem_addr.lkey = rdma_wr(ib_wr)->rkey;
ret = irdma_uk_rdma_write(ukqp, &info, false);
}
@@ -3199,8 +3181,8 @@ static int irdma_post_send(struct ib_qp *ibqp,
break;
}
info.op_type = IRDMA_OP_TYPE_RDMA_READ;
- info.op.rdma_read.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr;
- info.op.rdma_read.rem_addr.stag = rdma_wr(ib_wr)->rkey;
+ info.op.rdma_read.rem_addr.addr = rdma_wr(ib_wr)->remote_addr;
+ info.op.rdma_read.rem_addr.lkey = rdma_wr(ib_wr)->rkey;
info.op.rdma_read.lo_sg_list = (void *)ib_wr->sg_list;
info.op.rdma_read.num_lo_sges = ib_wr->num_sge;
@@ -3287,7 +3269,6 @@ static int irdma_post_recv(struct ib_qp *ibqp,
struct irdma_qp *iwqp;
struct irdma_qp_uk *ukqp;
struct irdma_post_rq_info post_recv = {};
- struct irdma_sge sg_list[IRDMA_MAX_WQ_FRAGMENT_COUNT];
enum irdma_status_code ret = 0;
unsigned long flags;
int err = 0;
@@ -3302,8 +3283,7 @@ static int irdma_post_recv(struct ib_qp *ibqp,
while (ib_wr) {
post_recv.num_sges = ib_wr->num_sge;
post_recv.wr_id = ib_wr->wr_id;
- irdma_copy_sg_list(sg_list, ib_wr->sg_list, ib_wr->num_sge);
- post_recv.sg_list = sg_list;
+ post_recv.sg_list = ib_wr->sg_list;
ret = irdma_uk_post_receive(ukqp, &post_recv);
if (ret) {
ibdev_dbg(&iwqp->iwdev->ibdev,
@@ -3399,9 +3379,13 @@ static void irdma_process_cqe(struct ib_wc *entry,
}
if (cq_poll_info->ud_vlan_valid) {
- entry->vlan_id = cq_poll_info->ud_vlan & VLAN_VID_MASK;
- entry->wc_flags |= IB_WC_WITH_VLAN;
+ u16 vlan = cq_poll_info->ud_vlan & VLAN_VID_MASK;
+
entry->sl = cq_poll_info->ud_vlan >> VLAN_PRIO_SHIFT;
+ if (vlan) {
+ entry->vlan_id = vlan;
+ entry->wc_flags |= IB_WC_WITH_VLAN;
+ }
} else {
entry->sl = 0;
}
@@ -3647,89 +3631,89 @@ static int irdma_iw_port_immutable(struct ib_device *ibdev, u32 port_num,
return 0;
}
-static const char *const irdma_hw_stat_names[] = {
+static const struct rdma_stat_desc irdma_hw_stat_descs[] = {
/* 32bit names */
- [IRDMA_HW_STAT_INDEX_RXVLANERR] = "rxVlanErrors",
- [IRDMA_HW_STAT_INDEX_IP4RXDISCARD] = "ip4InDiscards",
- [IRDMA_HW_STAT_INDEX_IP4RXTRUNC] = "ip4InTruncatedPkts",
- [IRDMA_HW_STAT_INDEX_IP4TXNOROUTE] = "ip4OutNoRoutes",
- [IRDMA_HW_STAT_INDEX_IP6RXDISCARD] = "ip6InDiscards",
- [IRDMA_HW_STAT_INDEX_IP6RXTRUNC] = "ip6InTruncatedPkts",
- [IRDMA_HW_STAT_INDEX_IP6TXNOROUTE] = "ip6OutNoRoutes",
- [IRDMA_HW_STAT_INDEX_TCPRTXSEG] = "tcpRetransSegs",
- [IRDMA_HW_STAT_INDEX_TCPRXOPTERR] = "tcpInOptErrors",
- [IRDMA_HW_STAT_INDEX_TCPRXPROTOERR] = "tcpInProtoErrors",
- [IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED] = "cnpHandled",
- [IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED] = "cnpIgnored",
- [IRDMA_HW_STAT_INDEX_TXNPCNPSENT] = "cnpSent",
+ [IRDMA_HW_STAT_INDEX_RXVLANERR].name = "rxVlanErrors",
+ [IRDMA_HW_STAT_INDEX_IP4RXDISCARD].name = "ip4InDiscards",
+ [IRDMA_HW_STAT_INDEX_IP4RXTRUNC].name = "ip4InTruncatedPkts",
+ [IRDMA_HW_STAT_INDEX_IP4TXNOROUTE].name = "ip4OutNoRoutes",
+ [IRDMA_HW_STAT_INDEX_IP6RXDISCARD].name = "ip6InDiscards",
+ [IRDMA_HW_STAT_INDEX_IP6RXTRUNC].name = "ip6InTruncatedPkts",
+ [IRDMA_HW_STAT_INDEX_IP6TXNOROUTE].name = "ip6OutNoRoutes",
+ [IRDMA_HW_STAT_INDEX_TCPRTXSEG].name = "tcpRetransSegs",
+ [IRDMA_HW_STAT_INDEX_TCPRXOPTERR].name = "tcpInOptErrors",
+ [IRDMA_HW_STAT_INDEX_TCPRXPROTOERR].name = "tcpInProtoErrors",
+ [IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED].name = "cnpHandled",
+ [IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED].name = "cnpIgnored",
+ [IRDMA_HW_STAT_INDEX_TXNPCNPSENT].name = "cnpSent",
/* 64bit names */
- [IRDMA_HW_STAT_INDEX_IP4RXOCTS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ [IRDMA_HW_STAT_INDEX_IP4RXOCTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
"ip4InOctets",
- [IRDMA_HW_STAT_INDEX_IP4RXPKTS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ [IRDMA_HW_STAT_INDEX_IP4RXPKTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
"ip4InPkts",
- [IRDMA_HW_STAT_INDEX_IP4RXFRAGS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ [IRDMA_HW_STAT_INDEX_IP4RXFRAGS + IRDMA_HW_STAT_INDEX_MAX_32].name =
"ip4InReasmRqd",
- [IRDMA_HW_STAT_INDEX_IP4RXMCOCTS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ [IRDMA_HW_STAT_INDEX_IP4RXMCOCTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
"ip4InMcastOctets",
- [IRDMA_HW_STAT_INDEX_IP4RXMCPKTS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ [IRDMA_HW_STAT_INDEX_IP4RXMCPKTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
"ip4InMcastPkts",
- [IRDMA_HW_STAT_INDEX_IP4TXOCTS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ [IRDMA_HW_STAT_INDEX_IP4TXOCTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
"ip4OutOctets",
- [IRDMA_HW_STAT_INDEX_IP4TXPKTS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ [IRDMA_HW_STAT_INDEX_IP4TXPKTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
"ip4OutPkts",
- [IRDMA_HW_STAT_INDEX_IP4TXFRAGS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ [IRDMA_HW_STAT_INDEX_IP4TXFRAGS + IRDMA_HW_STAT_INDEX_MAX_32].name =
"ip4OutSegRqd",
- [IRDMA_HW_STAT_INDEX_IP4TXMCOCTS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ [IRDMA_HW_STAT_INDEX_IP4TXMCOCTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
"ip4OutMcastOctets",
- [IRDMA_HW_STAT_INDEX_IP4TXMCPKTS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ [IRDMA_HW_STAT_INDEX_IP4TXMCPKTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
"ip4OutMcastPkts",
- [IRDMA_HW_STAT_INDEX_IP6RXOCTS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ [IRDMA_HW_STAT_INDEX_IP6RXOCTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
"ip6InOctets",
- [IRDMA_HW_STAT_INDEX_IP6RXPKTS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ [IRDMA_HW_STAT_INDEX_IP6RXPKTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
"ip6InPkts",
- [IRDMA_HW_STAT_INDEX_IP6RXFRAGS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ [IRDMA_HW_STAT_INDEX_IP6RXFRAGS + IRDMA_HW_STAT_INDEX_MAX_32].name =
"ip6InReasmRqd",
- [IRDMA_HW_STAT_INDEX_IP6RXMCOCTS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ [IRDMA_HW_STAT_INDEX_IP6RXMCOCTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
"ip6InMcastOctets",
- [IRDMA_HW_STAT_INDEX_IP6RXMCPKTS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ [IRDMA_HW_STAT_INDEX_IP6RXMCPKTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
"ip6InMcastPkts",
- [IRDMA_HW_STAT_INDEX_IP6TXOCTS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ [IRDMA_HW_STAT_INDEX_IP6TXOCTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
"ip6OutOctets",
- [IRDMA_HW_STAT_INDEX_IP6TXPKTS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ [IRDMA_HW_STAT_INDEX_IP6TXPKTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
"ip6OutPkts",
- [IRDMA_HW_STAT_INDEX_IP6TXFRAGS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ [IRDMA_HW_STAT_INDEX_IP6TXFRAGS + IRDMA_HW_STAT_INDEX_MAX_32].name =
"ip6OutSegRqd",
- [IRDMA_HW_STAT_INDEX_IP6TXMCOCTS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ [IRDMA_HW_STAT_INDEX_IP6TXMCOCTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
"ip6OutMcastOctets",
- [IRDMA_HW_STAT_INDEX_IP6TXMCPKTS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ [IRDMA_HW_STAT_INDEX_IP6TXMCPKTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
"ip6OutMcastPkts",
- [IRDMA_HW_STAT_INDEX_TCPRXSEGS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ [IRDMA_HW_STAT_INDEX_TCPRXSEGS + IRDMA_HW_STAT_INDEX_MAX_32].name =
"tcpInSegs",
- [IRDMA_HW_STAT_INDEX_TCPTXSEG + IRDMA_HW_STAT_INDEX_MAX_32] =
+ [IRDMA_HW_STAT_INDEX_TCPTXSEG + IRDMA_HW_STAT_INDEX_MAX_32].name =
"tcpOutSegs",
- [IRDMA_HW_STAT_INDEX_RDMARXRDS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ [IRDMA_HW_STAT_INDEX_RDMARXRDS + IRDMA_HW_STAT_INDEX_MAX_32].name =
"iwInRdmaReads",
- [IRDMA_HW_STAT_INDEX_RDMARXSNDS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ [IRDMA_HW_STAT_INDEX_RDMARXSNDS + IRDMA_HW_STAT_INDEX_MAX_32].name =
"iwInRdmaSends",
- [IRDMA_HW_STAT_INDEX_RDMARXWRS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ [IRDMA_HW_STAT_INDEX_RDMARXWRS + IRDMA_HW_STAT_INDEX_MAX_32].name =
"iwInRdmaWrites",
- [IRDMA_HW_STAT_INDEX_RDMATXRDS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ [IRDMA_HW_STAT_INDEX_RDMATXRDS + IRDMA_HW_STAT_INDEX_MAX_32].name =
"iwOutRdmaReads",
- [IRDMA_HW_STAT_INDEX_RDMATXSNDS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ [IRDMA_HW_STAT_INDEX_RDMATXSNDS + IRDMA_HW_STAT_INDEX_MAX_32].name =
"iwOutRdmaSends",
- [IRDMA_HW_STAT_INDEX_RDMATXWRS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ [IRDMA_HW_STAT_INDEX_RDMATXWRS + IRDMA_HW_STAT_INDEX_MAX_32].name =
"iwOutRdmaWrites",
- [IRDMA_HW_STAT_INDEX_RDMAVBND + IRDMA_HW_STAT_INDEX_MAX_32] =
+ [IRDMA_HW_STAT_INDEX_RDMAVBND + IRDMA_HW_STAT_INDEX_MAX_32].name =
"iwRdmaBnd",
- [IRDMA_HW_STAT_INDEX_RDMAVINV + IRDMA_HW_STAT_INDEX_MAX_32] =
+ [IRDMA_HW_STAT_INDEX_RDMAVINV + IRDMA_HW_STAT_INDEX_MAX_32].name =
"iwRdmaInv",
- [IRDMA_HW_STAT_INDEX_UDPRXPKTS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ [IRDMA_HW_STAT_INDEX_UDPRXPKTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
"RxUDP",
- [IRDMA_HW_STAT_INDEX_UDPTXPKTS + IRDMA_HW_STAT_INDEX_MAX_32] =
+ [IRDMA_HW_STAT_INDEX_UDPTXPKTS + IRDMA_HW_STAT_INDEX_MAX_32].name =
"TxUDP",
- [IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS + IRDMA_HW_STAT_INDEX_MAX_32] =
- "RxECNMrkd",
+ [IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS + IRDMA_HW_STAT_INDEX_MAX_32]
+ .name = "RxECNMrkd",
};
static void irdma_get_dev_fw_str(struct ib_device *dev, char *str)
@@ -3753,10 +3737,10 @@ static struct rdma_hw_stats *irdma_alloc_hw_port_stats(struct ib_device *ibdev,
IRDMA_HW_STAT_INDEX_MAX_64;
unsigned long lifespan = RDMA_HW_STATS_DEFAULT_LIFESPAN;
- BUILD_BUG_ON(ARRAY_SIZE(irdma_hw_stat_names) !=
+ BUILD_BUG_ON(ARRAY_SIZE(irdma_hw_stat_descs) !=
(IRDMA_HW_STAT_INDEX_MAX_32 + IRDMA_HW_STAT_INDEX_MAX_64));
- return rdma_alloc_hw_stats_struct(irdma_hw_stat_names, num_counters,
+ return rdma_alloc_hw_stats_struct(irdma_hw_stat_descs, num_counters,
lifespan);
}
@@ -4326,7 +4310,7 @@ static enum rdma_link_layer irdma_get_link_layer(struct ib_device *ibdev,
static __be64 irdma_mac_to_guid(struct net_device *ndev)
{
- unsigned char *mac = ndev->dev_addr;
+ const unsigned char *mac = ndev->dev_addr;
__be64 guid;
unsigned char *dst = (unsigned char *)&guid;
diff --git a/drivers/infiniband/hw/irdma/ws.c b/drivers/infiniband/hw/irdma/ws.c
index b68c575eb78e..b0d6ee0739f5 100644
--- a/drivers/infiniband/hw/irdma/ws.c
+++ b/drivers/infiniband/hw/irdma/ws.c
@@ -330,8 +330,10 @@ enum irdma_status_code irdma_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri)
tc_node->enable = true;
ret = irdma_ws_cqp_cmd(vsi, tc_node, IRDMA_OP_WS_MODIFY_NODE);
- if (ret)
+ if (ret) {
+ vsi->unregister_qset(vsi, tc_node);
goto reg_err;
+ }
}
ibdev_dbg(to_ibdev(vsi->dev),
"WS: Using node %d which represents VSI %d TC %d\n",
@@ -350,6 +352,10 @@ enum irdma_status_code irdma_ws_add(struct irdma_sc_vsi *vsi, u8 user_pri)
}
goto exit;
+reg_err:
+ irdma_ws_cqp_cmd(vsi, tc_node, IRDMA_OP_WS_DELETE_NODE);
+ list_del(&tc_node->siblings);
+ irdma_free_node(vsi, tc_node);
leaf_add_err:
if (list_empty(&vsi_node->child_list_head)) {
if (irdma_ws_cqp_cmd(vsi, vsi_node, IRDMA_OP_WS_DELETE_NODE))
@@ -369,11 +375,6 @@ vsi_add_err:
exit:
mutex_unlock(&vsi->dev->ws_mutex);
return ret;
-
-reg_err:
- mutex_unlock(&vsi->dev->ws_mutex);
- irdma_ws_remove(vsi, user_pri);
- return ret;
}
/**
diff --git a/drivers/infiniband/hw/mlx4/alias_GUID.c b/drivers/infiniband/hw/mlx4/alias_GUID.c
index 571d9c542024..e2e1f5daddc4 100644
--- a/drivers/infiniband/hw/mlx4/alias_GUID.c
+++ b/drivers/infiniband/hw/mlx4/alias_GUID.c
@@ -822,10 +822,8 @@ void mlx4_ib_destroy_alias_guid_service(struct mlx4_ib_dev *dev)
}
spin_unlock_irqrestore(&sriov->alias_guid.ag_work_lock, flags);
}
- for (i = 0 ; i < dev->num_ports; i++) {
- flush_workqueue(dev->sriov.alias_guid.ports_guid[i].wq);
+ for (i = 0 ; i < dev->num_ports; i++)
destroy_workqueue(dev->sriov.alias_guid.ports_guid[i].wq);
- }
ib_sa_unregister_client(dev->sriov.alias_guid.sa_client);
kfree(dev->sriov.alias_guid.sa_client);
}
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index f367f4a4abff..ceca05982f61 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -2105,10 +2105,10 @@ mlx4_ib_alloc_hw_device_stats(struct ib_device *ibdev)
struct mlx4_ib_dev *dev = to_mdev(ibdev);
struct mlx4_ib_diag_counters *diag = dev->diag_counters;
- if (!diag[0].name)
+ if (!diag[0].descs)
return NULL;
- return rdma_alloc_hw_stats_struct(diag[0].name, diag[0].num_counters,
+ return rdma_alloc_hw_stats_struct(diag[0].descs, diag[0].num_counters,
RDMA_HW_STATS_DEFAULT_LIFESPAN);
}
@@ -2118,10 +2118,10 @@ mlx4_ib_alloc_hw_port_stats(struct ib_device *ibdev, u32 port_num)
struct mlx4_ib_dev *dev = to_mdev(ibdev);
struct mlx4_ib_diag_counters *diag = dev->diag_counters;
- if (!diag[1].name)
+ if (!diag[1].descs)
return NULL;
- return rdma_alloc_hw_stats_struct(diag[1].name, diag[1].num_counters,
+ return rdma_alloc_hw_stats_struct(diag[1].descs, diag[1].num_counters,
RDMA_HW_STATS_DEFAULT_LIFESPAN);
}
@@ -2151,10 +2151,8 @@ static int mlx4_ib_get_hw_stats(struct ib_device *ibdev,
}
static int __mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev,
- const char ***name,
- u32 **offset,
- u32 *num,
- bool port)
+ struct rdma_stat_desc **pdescs,
+ u32 **offset, u32 *num, bool port)
{
u32 num_counters;
@@ -2166,46 +2164,46 @@ static int __mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev,
if (!port)
num_counters += ARRAY_SIZE(diag_device_only);
- *name = kcalloc(num_counters, sizeof(**name), GFP_KERNEL);
- if (!*name)
+ *pdescs = kcalloc(num_counters, sizeof(struct rdma_stat_desc),
+ GFP_KERNEL);
+ if (!*pdescs)
return -ENOMEM;
*offset = kcalloc(num_counters, sizeof(**offset), GFP_KERNEL);
if (!*offset)
- goto err_name;
+ goto err;
*num = num_counters;
return 0;
-err_name:
- kfree(*name);
+err:
+ kfree(*pdescs);
return -ENOMEM;
}
static void mlx4_ib_fill_diag_counters(struct mlx4_ib_dev *ibdev,
- const char **name,
- u32 *offset,
- bool port)
+ struct rdma_stat_desc *descs,
+ u32 *offset, bool port)
{
int i;
int j;
for (i = 0, j = 0; i < ARRAY_SIZE(diag_basic); i++, j++) {
- name[i] = diag_basic[i].name;
+ descs[i].name = diag_basic[i].name;
offset[i] = diag_basic[i].offset;
}
if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT) {
for (i = 0; i < ARRAY_SIZE(diag_ext); i++, j++) {
- name[j] = diag_ext[i].name;
+ descs[j].name = diag_ext[i].name;
offset[j] = diag_ext[i].offset;
}
}
if (!port) {
for (i = 0; i < ARRAY_SIZE(diag_device_only); i++, j++) {
- name[j] = diag_device_only[i].name;
+ descs[j].name = diag_device_only[i].name;
offset[j] = diag_device_only[i].offset;
}
}
@@ -2233,13 +2231,13 @@ static int mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev)
if (i && !per_port)
continue;
- ret = __mlx4_ib_alloc_diag_counters(ibdev, &diag[i].name,
+ ret = __mlx4_ib_alloc_diag_counters(ibdev, &diag[i].descs,
&diag[i].offset,
&diag[i].num_counters, i);
if (ret)
goto err_alloc;
- mlx4_ib_fill_diag_counters(ibdev, diag[i].name,
+ mlx4_ib_fill_diag_counters(ibdev, diag[i].descs,
diag[i].offset, i);
}
@@ -2249,7 +2247,7 @@ static int mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev)
err_alloc:
if (i) {
- kfree(diag[i - 1].name);
+ kfree(diag[i - 1].descs);
kfree(diag[i - 1].offset);
}
@@ -2262,7 +2260,7 @@ static void mlx4_ib_diag_cleanup(struct mlx4_ib_dev *ibdev)
for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) {
kfree(ibdev->diag_counters[i].offset);
- kfree(ibdev->diag_counters[i].name);
+ kfree(ibdev->diag_counters[i].descs);
}
}
@@ -2275,7 +2273,7 @@ static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev,
u64 release_mac = MLX4_IB_INVALID_MAC;
struct mlx4_ib_qp *qp;
- new_smac = mlx4_mac_to_u64(dev->dev_addr);
+ new_smac = ether_addr_to_u64(dev->dev_addr);
atomic64_set(&ibdev->iboe.mac[port - 1], new_smac);
/* no need for update QP1 and mac registration in non-SRIOV */
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index c60f6e9ac640..d84023b4b1b8 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -601,7 +601,7 @@ struct mlx4_ib_counters {
#define MLX4_DIAG_COUNTERS_TYPES 2
struct mlx4_ib_diag_counters {
- const char **name;
+ struct rdma_stat_desc *descs;
u32 *offset;
u32 num_counters;
};
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 8662f462e2a5..b17d6ebc5b70 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1099,8 +1099,10 @@ static int create_qp_common(struct ib_pd *pd, struct ib_qp_init_attr *init_attr,
if (dev->steering_support ==
MLX4_STEERING_MODE_DEVICE_MANAGED)
qp->flags |= MLX4_IB_QP_NETIF;
- else
+ else {
+ err = -EINVAL;
goto err;
+ }
}
err = set_kernel_sq_size(dev, &init_attr->cap, qp_type, qp);
@@ -1853,7 +1855,7 @@ static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_qp_attr *qp,
u16 vlan_id, u8 *smac)
{
return _mlx4_set_path(dev, &qp->ah_attr,
- mlx4_mac_to_u64(smac),
+ ether_addr_to_u64(smac),
vlan_id,
path, &mqp->pri, port);
}
diff --git a/drivers/infiniband/hw/mlx5/cmd.c b/drivers/infiniband/hw/mlx5/cmd.c
index a8db8a051170..ff3742b0460a 100644
--- a/drivers/infiniband/hw/mlx5/cmd.c
+++ b/drivers/infiniband/hw/mlx5/cmd.c
@@ -206,3 +206,29 @@ out:
kfree(in);
return err;
}
+
+int mlx5_cmd_uar_alloc(struct mlx5_core_dev *dev, u32 *uarn, u16 uid)
+{
+ u32 out[MLX5_ST_SZ_DW(alloc_uar_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(alloc_uar_in)] = {};
+ int err;
+
+ MLX5_SET(alloc_uar_in, in, opcode, MLX5_CMD_OP_ALLOC_UAR);
+ MLX5_SET(alloc_uar_in, in, uid, uid);
+ err = mlx5_cmd_exec_inout(dev, alloc_uar, in, out);
+ if (err)
+ return err;
+
+ *uarn = MLX5_GET(alloc_uar_out, out, uar);
+ return 0;
+}
+
+int mlx5_cmd_uar_dealloc(struct mlx5_core_dev *dev, u32 uarn, u16 uid)
+{
+ u32 in[MLX5_ST_SZ_DW(dealloc_uar_in)] = {};
+
+ MLX5_SET(dealloc_uar_in, in, opcode, MLX5_CMD_OP_DEALLOC_UAR);
+ MLX5_SET(dealloc_uar_in, in, uar, uarn);
+ MLX5_SET(dealloc_uar_in, in, uid, uid);
+ return mlx5_cmd_exec_in(dev, dealloc_uar, in);
+}
diff --git a/drivers/infiniband/hw/mlx5/cmd.h b/drivers/infiniband/hw/mlx5/cmd.h
index 66c96292ed43..ee46638db5de 100644
--- a/drivers/infiniband/hw/mlx5/cmd.h
+++ b/drivers/infiniband/hw/mlx5/cmd.h
@@ -57,4 +57,6 @@ int mlx5_cmd_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn, u16 uid);
int mlx5_cmd_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn, u16 uid);
int mlx5_cmd_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
u16 opmod, u8 port);
+int mlx5_cmd_uar_alloc(struct mlx5_core_dev *dev, u32 *uarn, u16 uid);
+int mlx5_cmd_uar_dealloc(struct mlx5_core_dev *dev, u32 uarn, u16 uid);
#endif /* MLX5_IB_CMD_H */
diff --git a/drivers/infiniband/hw/mlx5/counters.c b/drivers/infiniband/hw/mlx5/counters.c
index 224ba36f2946..945758f39523 100644
--- a/drivers/infiniband/hw/mlx5/counters.c
+++ b/drivers/infiniband/hw/mlx5/counters.c
@@ -12,6 +12,7 @@
struct mlx5_ib_counter {
const char *name;
size_t offset;
+ u32 type;
};
#define INIT_Q_COUNTER(_name) \
@@ -75,6 +76,21 @@ static const struct mlx5_ib_counter ext_ppcnt_cnts[] = {
INIT_EXT_PPCNT_COUNTER(rx_icrc_encapsulated),
};
+#define INIT_OP_COUNTER(_name, _type) \
+ { .name = #_name, .type = MLX5_IB_OPCOUNTER_##_type}
+
+static const struct mlx5_ib_counter basic_op_cnts[] = {
+ INIT_OP_COUNTER(cc_rx_ce_pkts, CC_RX_CE_PKTS),
+};
+
+static const struct mlx5_ib_counter rdmarx_cnp_op_cnts[] = {
+ INIT_OP_COUNTER(cc_rx_cnp_pkts, CC_RX_CNP_PKTS),
+};
+
+static const struct mlx5_ib_counter rdmatx_cnp_op_cnts[] = {
+ INIT_OP_COUNTER(cc_tx_cnp_pkts, CC_TX_CNP_PKTS),
+};
+
static int mlx5_ib_read_counters(struct ib_counters *counters,
struct ib_counters_read_attr *read_attr,
struct uverbs_attr_bundle *attrs)
@@ -161,17 +177,34 @@ u16 mlx5_ib_get_counters_id(struct mlx5_ib_dev *dev, u32 port_num)
return cnts->set_id;
}
+static struct rdma_hw_stats *do_alloc_stats(const struct mlx5_ib_counters *cnts)
+{
+ struct rdma_hw_stats *stats;
+ u32 num_hw_counters;
+ int i;
+
+ num_hw_counters = cnts->num_q_counters + cnts->num_cong_counters +
+ cnts->num_ext_ppcnt_counters;
+ stats = rdma_alloc_hw_stats_struct(cnts->descs,
+ num_hw_counters +
+ cnts->num_op_counters,
+ RDMA_HW_STATS_DEFAULT_LIFESPAN);
+ if (!stats)
+ return NULL;
+
+ for (i = 0; i < cnts->num_op_counters; i++)
+ set_bit(num_hw_counters + i, stats->is_disabled);
+
+ return stats;
+}
+
static struct rdma_hw_stats *
mlx5_ib_alloc_hw_device_stats(struct ib_device *ibdev)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
const struct mlx5_ib_counters *cnts = &dev->port[0].cnts;
- return rdma_alloc_hw_stats_struct(cnts->names,
- cnts->num_q_counters +
- cnts->num_cong_counters +
- cnts->num_ext_ppcnt_counters,
- RDMA_HW_STATS_DEFAULT_LIFESPAN);
+ return do_alloc_stats(cnts);
}
static struct rdma_hw_stats *
@@ -180,11 +213,7 @@ mlx5_ib_alloc_hw_port_stats(struct ib_device *ibdev, u32 port_num)
struct mlx5_ib_dev *dev = to_mdev(ibdev);
const struct mlx5_ib_counters *cnts = &dev->port[port_num - 1].cnts;
- return rdma_alloc_hw_stats_struct(cnts->names,
- cnts->num_q_counters +
- cnts->num_cong_counters +
- cnts->num_ext_ppcnt_counters,
- RDMA_HW_STATS_DEFAULT_LIFESPAN);
+ return do_alloc_stats(cnts);
}
static int mlx5_ib_query_q_counters(struct mlx5_core_dev *mdev,
@@ -241,9 +270,9 @@ free:
return ret;
}
-static int mlx5_ib_get_hw_stats(struct ib_device *ibdev,
- struct rdma_hw_stats *stats,
- u32 port_num, int index)
+static int do_get_hw_stats(struct ib_device *ibdev,
+ struct rdma_hw_stats *stats,
+ u32 port_num, int index)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
const struct mlx5_ib_counters *cnts = get_counters(dev, port_num - 1);
@@ -295,6 +324,88 @@ done:
return num_counters;
}
+static int do_get_op_stat(struct ib_device *ibdev,
+ struct rdma_hw_stats *stats,
+ u32 port_num, int index)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ const struct mlx5_ib_counters *cnts;
+ const struct mlx5_ib_op_fc *opfcs;
+ u64 packets = 0, bytes;
+ u32 type;
+ int ret;
+
+ cnts = get_counters(dev, port_num - 1);
+ opfcs = cnts->opfcs;
+ type = *(u32 *)cnts->descs[index].priv;
+ if (type >= MLX5_IB_OPCOUNTER_MAX)
+ return -EINVAL;
+
+ if (!opfcs[type].fc)
+ goto out;
+
+ ret = mlx5_fc_query(dev->mdev, opfcs[type].fc,
+ &packets, &bytes);
+ if (ret)
+ return ret;
+
+out:
+ stats->value[index] = packets;
+ return index;
+}
+
+static int do_get_op_stats(struct ib_device *ibdev,
+ struct rdma_hw_stats *stats,
+ u32 port_num)
+{
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ const struct mlx5_ib_counters *cnts;
+ int index, ret, num_hw_counters;
+
+ cnts = get_counters(dev, port_num - 1);
+ num_hw_counters = cnts->num_q_counters + cnts->num_cong_counters +
+ cnts->num_ext_ppcnt_counters;
+ for (index = num_hw_counters;
+ index < (num_hw_counters + cnts->num_op_counters); index++) {
+ ret = do_get_op_stat(ibdev, stats, port_num, index);
+ if (ret != index)
+ return ret;
+ }
+
+ return cnts->num_op_counters;
+}
+
+static int mlx5_ib_get_hw_stats(struct ib_device *ibdev,
+ struct rdma_hw_stats *stats,
+ u32 port_num, int index)
+{
+ int num_counters, num_hw_counters, num_op_counters;
+ struct mlx5_ib_dev *dev = to_mdev(ibdev);
+ const struct mlx5_ib_counters *cnts;
+
+ cnts = get_counters(dev, port_num - 1);
+ num_hw_counters = cnts->num_q_counters + cnts->num_cong_counters +
+ cnts->num_ext_ppcnt_counters;
+ num_counters = num_hw_counters + cnts->num_op_counters;
+
+ if (index < 0 || index > num_counters)
+ return -EINVAL;
+ else if (index > 0 && index < num_hw_counters)
+ return do_get_hw_stats(ibdev, stats, port_num, index);
+ else if (index >= num_hw_counters && index < num_counters)
+ return do_get_op_stat(ibdev, stats, port_num, index);
+
+ num_hw_counters = do_get_hw_stats(ibdev, stats, port_num, index);
+ if (num_hw_counters < 0)
+ return num_hw_counters;
+
+ num_op_counters = do_get_op_stats(ibdev, stats, port_num);
+ if (num_op_counters < 0)
+ return num_op_counters;
+
+ return num_hw_counters + num_op_counters;
+}
+
static struct rdma_hw_stats *
mlx5_ib_counter_alloc_stats(struct rdma_counter *counter)
{
@@ -302,11 +413,7 @@ mlx5_ib_counter_alloc_stats(struct rdma_counter *counter)
const struct mlx5_ib_counters *cnts =
get_counters(dev, counter->port - 1);
- return rdma_alloc_hw_stats_struct(cnts->names,
- cnts->num_q_counters +
- cnts->num_cong_counters +
- cnts->num_ext_ppcnt_counters,
- RDMA_HW_STATS_DEFAULT_LIFESPAN);
+ return do_alloc_stats(cnts);
}
static int mlx5_ib_counter_update_stats(struct rdma_counter *counter)
@@ -371,67 +478,89 @@ static int mlx5_ib_counter_unbind_qp(struct ib_qp *qp)
return mlx5_ib_qp_set_counter(qp, NULL);
}
-
static void mlx5_ib_fill_counters(struct mlx5_ib_dev *dev,
- const char **names,
- size_t *offsets)
+ struct rdma_stat_desc *descs, size_t *offsets)
{
int i;
int j = 0;
for (i = 0; i < ARRAY_SIZE(basic_q_cnts); i++, j++) {
- names[j] = basic_q_cnts[i].name;
+ descs[j].name = basic_q_cnts[i].name;
offsets[j] = basic_q_cnts[i].offset;
}
if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt)) {
for (i = 0; i < ARRAY_SIZE(out_of_seq_q_cnts); i++, j++) {
- names[j] = out_of_seq_q_cnts[i].name;
+ descs[j].name = out_of_seq_q_cnts[i].name;
offsets[j] = out_of_seq_q_cnts[i].offset;
}
}
if (MLX5_CAP_GEN(dev->mdev, retransmission_q_counters)) {
for (i = 0; i < ARRAY_SIZE(retrans_q_cnts); i++, j++) {
- names[j] = retrans_q_cnts[i].name;
+ descs[j].name = retrans_q_cnts[i].name;
offsets[j] = retrans_q_cnts[i].offset;
}
}
if (MLX5_CAP_GEN(dev->mdev, enhanced_error_q_counters)) {
for (i = 0; i < ARRAY_SIZE(extended_err_cnts); i++, j++) {
- names[j] = extended_err_cnts[i].name;
+ descs[j].name = extended_err_cnts[i].name;
offsets[j] = extended_err_cnts[i].offset;
}
}
if (MLX5_CAP_GEN(dev->mdev, roce_accl)) {
for (i = 0; i < ARRAY_SIZE(roce_accl_cnts); i++, j++) {
- names[j] = roce_accl_cnts[i].name;
+ descs[j].name = roce_accl_cnts[i].name;
offsets[j] = roce_accl_cnts[i].offset;
}
}
if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) {
for (i = 0; i < ARRAY_SIZE(cong_cnts); i++, j++) {
- names[j] = cong_cnts[i].name;
+ descs[j].name = cong_cnts[i].name;
offsets[j] = cong_cnts[i].offset;
}
}
if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) {
for (i = 0; i < ARRAY_SIZE(ext_ppcnt_cnts); i++, j++) {
- names[j] = ext_ppcnt_cnts[i].name;
+ descs[j].name = ext_ppcnt_cnts[i].name;
offsets[j] = ext_ppcnt_cnts[i].offset;
}
}
+
+ for (i = 0; i < ARRAY_SIZE(basic_op_cnts); i++, j++) {
+ descs[j].name = basic_op_cnts[i].name;
+ descs[j].flags |= IB_STAT_FLAG_OPTIONAL;
+ descs[j].priv = &basic_op_cnts[i].type;
+ }
+
+ if (MLX5_CAP_FLOWTABLE(dev->mdev,
+ ft_field_support_2_nic_receive_rdma.bth_opcode)) {
+ for (i = 0; i < ARRAY_SIZE(rdmarx_cnp_op_cnts); i++, j++) {
+ descs[j].name = rdmarx_cnp_op_cnts[i].name;
+ descs[j].flags |= IB_STAT_FLAG_OPTIONAL;
+ descs[j].priv = &rdmarx_cnp_op_cnts[i].type;
+ }
+ }
+
+ if (MLX5_CAP_FLOWTABLE(dev->mdev,
+ ft_field_support_2_nic_transmit_rdma.bth_opcode)) {
+ for (i = 0; i < ARRAY_SIZE(rdmatx_cnp_op_cnts); i++, j++) {
+ descs[j].name = rdmatx_cnp_op_cnts[i].name;
+ descs[j].flags |= IB_STAT_FLAG_OPTIONAL;
+ descs[j].priv = &rdmatx_cnp_op_cnts[i].type;
+ }
+ }
}
static int __mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev,
struct mlx5_ib_counters *cnts)
{
- u32 num_counters;
+ u32 num_counters, num_op_counters;
num_counters = ARRAY_SIZE(basic_q_cnts);
@@ -457,20 +586,34 @@ static int __mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev,
cnts->num_ext_ppcnt_counters = ARRAY_SIZE(ext_ppcnt_cnts);
num_counters += ARRAY_SIZE(ext_ppcnt_cnts);
}
- cnts->names = kcalloc(num_counters, sizeof(*cnts->names), GFP_KERNEL);
- if (!cnts->names)
+
+ num_op_counters = ARRAY_SIZE(basic_op_cnts);
+
+ if (MLX5_CAP_FLOWTABLE(dev->mdev,
+ ft_field_support_2_nic_receive_rdma.bth_opcode))
+ num_op_counters += ARRAY_SIZE(rdmarx_cnp_op_cnts);
+
+ if (MLX5_CAP_FLOWTABLE(dev->mdev,
+ ft_field_support_2_nic_transmit_rdma.bth_opcode))
+ num_op_counters += ARRAY_SIZE(rdmatx_cnp_op_cnts);
+
+ cnts->num_op_counters = num_op_counters;
+ num_counters += num_op_counters;
+ cnts->descs = kcalloc(num_counters,
+ sizeof(struct rdma_stat_desc), GFP_KERNEL);
+ if (!cnts->descs)
return -ENOMEM;
cnts->offsets = kcalloc(num_counters,
sizeof(*cnts->offsets), GFP_KERNEL);
if (!cnts->offsets)
- goto err_names;
+ goto err;
return 0;
-err_names:
- kfree(cnts->names);
- cnts->names = NULL;
+err:
+ kfree(cnts->descs);
+ cnts->descs = NULL;
return -ENOMEM;
}
@@ -478,7 +621,7 @@ static void mlx5_ib_dealloc_counters(struct mlx5_ib_dev *dev)
{
u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)] = {};
int num_cnt_ports;
- int i;
+ int i, j;
num_cnt_ports = is_mdev_switchdev_mode(dev->mdev) ? 1 : dev->num_ports;
@@ -491,8 +634,20 @@ static void mlx5_ib_dealloc_counters(struct mlx5_ib_dev *dev)
dev->port[i].cnts.set_id);
mlx5_cmd_exec_in(dev->mdev, dealloc_q_counter, in);
}
- kfree(dev->port[i].cnts.names);
+ kfree(dev->port[i].cnts.descs);
kfree(dev->port[i].cnts.offsets);
+
+ for (j = 0; j < MLX5_IB_OPCOUNTER_MAX; j++) {
+ if (!dev->port[i].cnts.opfcs[j].fc)
+ continue;
+
+ if (IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS))
+ mlx5_ib_fs_remove_op_fc(dev,
+ &dev->port[i].cnts.opfcs[j], j);
+ mlx5_fc_destroy(dev->mdev,
+ dev->port[i].cnts.opfcs[j].fc);
+ dev->port[i].cnts.opfcs[j].fc = NULL;
+ }
}
}
@@ -514,7 +669,7 @@ static int mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev)
if (err)
goto err_alloc;
- mlx5_ib_fill_counters(dev, dev->port[i].cnts.names,
+ mlx5_ib_fill_counters(dev, dev->port[i].cnts.descs,
dev->port[i].cnts.offsets);
MLX5_SET(alloc_q_counter_in, in, uid,
@@ -672,6 +827,56 @@ void mlx5_ib_counters_clear_description(struct ib_counters *counters)
mutex_unlock(&mcounters->mcntrs_mutex);
}
+static int mlx5_ib_modify_stat(struct ib_device *device, u32 port,
+ unsigned int index, bool enable)
+{
+ struct mlx5_ib_dev *dev = to_mdev(device);
+ struct mlx5_ib_counters *cnts;
+ struct mlx5_ib_op_fc *opfc;
+ u32 num_hw_counters, type;
+ int ret;
+
+ cnts = &dev->port[port - 1].cnts;
+ num_hw_counters = cnts->num_q_counters + cnts->num_cong_counters +
+ cnts->num_ext_ppcnt_counters;
+ if (index < num_hw_counters ||
+ index >= (num_hw_counters + cnts->num_op_counters))
+ return -EINVAL;
+
+ if (!(cnts->descs[index].flags & IB_STAT_FLAG_OPTIONAL))
+ return -EINVAL;
+
+ type = *(u32 *)cnts->descs[index].priv;
+ if (type >= MLX5_IB_OPCOUNTER_MAX)
+ return -EINVAL;
+
+ opfc = &cnts->opfcs[type];
+
+ if (enable) {
+ if (opfc->fc)
+ return -EEXIST;
+
+ opfc->fc = mlx5_fc_create(dev->mdev, false);
+ if (IS_ERR(opfc->fc))
+ return PTR_ERR(opfc->fc);
+
+ ret = mlx5_ib_fs_add_op_fc(dev, port, opfc, type);
+ if (ret) {
+ mlx5_fc_destroy(dev->mdev, opfc->fc);
+ opfc->fc = NULL;
+ }
+ return ret;
+ }
+
+ if (!opfc->fc)
+ return -EINVAL;
+
+ mlx5_ib_fs_remove_op_fc(dev, opfc, type);
+ mlx5_fc_destroy(dev->mdev, opfc->fc);
+ opfc->fc = NULL;
+ return 0;
+}
+
static const struct ib_device_ops hw_stats_ops = {
.alloc_hw_port_stats = mlx5_ib_alloc_hw_port_stats,
.get_hw_stats = mlx5_ib_get_hw_stats,
@@ -680,6 +885,8 @@ static const struct ib_device_ops hw_stats_ops = {
.counter_dealloc = mlx5_ib_counter_dealloc,
.counter_alloc_stats = mlx5_ib_counter_alloc_stats,
.counter_update_stats = mlx5_ib_counter_update_stats,
+ .modify_hw_stat = IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS) ?
+ mlx5_ib_modify_stat : NULL,
};
static const struct ib_device_ops hw_switchdev_stats_ops = {
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index e95967aefe78..08b7f6bc56c3 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -1292,21 +1292,16 @@ static int devx_handle_mkey_indirect(struct devx_obj *obj,
struct mlx5_ib_dev *dev,
void *in, void *out)
{
- struct mlx5_ib_devx_mr *devx_mr = &obj->devx_mr;
- struct mlx5_core_mkey *mkey;
+ struct mlx5_ib_mkey *mkey = &obj->mkey;
void *mkc;
u8 key;
- mkey = &devx_mr->mmkey;
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
key = MLX5_GET(mkc, mkc, mkey_7_0);
mkey->key = mlx5_idx_to_mkey(
MLX5_GET(create_mkey_out, out, mkey_index)) | key;
mkey->type = MLX5_MKEY_INDIRECT_DEVX;
- mkey->iova = MLX5_GET64(mkc, mkc, start_addr);
- mkey->size = MLX5_GET64(mkc, mkc, len);
- mkey->pd = MLX5_GET(mkc, mkc, pd);
- devx_mr->ndescs = MLX5_GET(mkc, mkc, translations_octword_size);
+ mkey->ndescs = MLX5_GET(mkc, mkc, translations_octword_size);
init_waitqueue_head(&mkey->wait);
return mlx5r_store_odp_mkey(dev, mkey);
@@ -1384,13 +1379,13 @@ static int devx_obj_cleanup(struct ib_uobject *uobject,
dev = mlx5_udata_to_mdev(&attrs->driver_udata);
if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY &&
xa_erase(&obj->ib_dev->odp_mkeys,
- mlx5_base_mkey(obj->devx_mr.mmkey.key)))
+ mlx5_base_mkey(obj->mkey.key)))
/*
* The pagefault_single_data_segment() does commands against
* the mmkey, we must wait for that to stop before freeing the
* mkey, as another allocation could get the same mkey #.
*/
- mlx5r_deref_wait_odp_mkey(&obj->devx_mr.mmkey);
+ mlx5r_deref_wait_odp_mkey(&obj->mkey);
if (obj->flags & DEVX_OBJ_FLAGS_DCT)
ret = mlx5_core_destroy_dct(obj->ib_dev, &obj->core_dct);
diff --git a/drivers/infiniband/hw/mlx5/devx.h b/drivers/infiniband/hw/mlx5/devx.h
index 1f69866aed16..ee2213275fd6 100644
--- a/drivers/infiniband/hw/mlx5/devx.h
+++ b/drivers/infiniband/hw/mlx5/devx.h
@@ -16,7 +16,7 @@ struct devx_obj {
u32 dinbox[MLX5_MAX_DESTROY_INBOX_SIZE_DW];
u32 flags;
union {
- struct mlx5_ib_devx_mr devx_mr;
+ struct mlx5_ib_mkey mkey;
struct mlx5_core_dct core_dct;
struct mlx5_core_cq core_cq;
u32 flow_counter_bulk_size;
diff --git a/drivers/infiniband/hw/mlx5/fs.c b/drivers/infiniband/hw/mlx5/fs.c
index 5fbc0a8454b9..b780185d9dc6 100644
--- a/drivers/infiniband/hw/mlx5/fs.c
+++ b/drivers/infiniband/hw/mlx5/fs.c
@@ -10,12 +10,14 @@
#include <rdma/uverbs_std_types.h>
#include <rdma/mlx5_user_ioctl_cmds.h>
#include <rdma/mlx5_user_ioctl_verbs.h>
+#include <rdma/ib_hdrs.h>
#include <rdma/ib_umem.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/fs.h>
#include <linux/mlx5/fs_helpers.h>
#include <linux/mlx5/accel.h>
#include <linux/mlx5/eswitch.h>
+#include <net/inet_ecn.h>
#include "mlx5_ib.h"
#include "counters.h"
#include "devx.h"
@@ -847,6 +849,191 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
return prio;
}
+enum {
+ RDMA_RX_ECN_OPCOUNTER_PRIO,
+ RDMA_RX_CNP_OPCOUNTER_PRIO,
+};
+
+enum {
+ RDMA_TX_CNP_OPCOUNTER_PRIO,
+};
+
+static int set_vhca_port_spec(struct mlx5_ib_dev *dev, u32 port_num,
+ struct mlx5_flow_spec *spec)
+{
+ if (!MLX5_CAP_FLOWTABLE_RDMA_RX(dev->mdev,
+ ft_field_support.source_vhca_port) ||
+ !MLX5_CAP_FLOWTABLE_RDMA_TX(dev->mdev,
+ ft_field_support.source_vhca_port))
+ return -EOPNOTSUPP;
+
+ MLX5_SET_TO_ONES(fte_match_param, &spec->match_criteria,
+ misc_parameters.source_vhca_port);
+ MLX5_SET(fte_match_param, &spec->match_value,
+ misc_parameters.source_vhca_port, port_num);
+
+ return 0;
+}
+
+static int set_ecn_ce_spec(struct mlx5_ib_dev *dev, u32 port_num,
+ struct mlx5_flow_spec *spec, int ipv)
+{
+ if (!MLX5_CAP_FLOWTABLE_RDMA_RX(dev->mdev,
+ ft_field_support.outer_ip_version))
+ return -EOPNOTSUPP;
+
+ if (mlx5_core_mp_enabled(dev->mdev) &&
+ set_vhca_port_spec(dev, port_num, spec))
+ return -EOPNOTSUPP;
+
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ outer_headers.ip_ecn);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_ecn,
+ INET_ECN_CE);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ outer_headers.ip_version);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version,
+ ipv);
+
+ spec->match_criteria_enable =
+ get_match_criteria_enable(spec->match_criteria);
+
+ return 0;
+}
+
+static int set_cnp_spec(struct mlx5_ib_dev *dev, u32 port_num,
+ struct mlx5_flow_spec *spec)
+{
+ if (mlx5_core_mp_enabled(dev->mdev) &&
+ set_vhca_port_spec(dev, port_num, spec))
+ return -EOPNOTSUPP;
+
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ misc_parameters.bth_opcode);
+ MLX5_SET(fte_match_param, spec->match_value, misc_parameters.bth_opcode,
+ IB_BTH_OPCODE_CNP);
+
+ spec->match_criteria_enable =
+ get_match_criteria_enable(spec->match_criteria);
+
+ return 0;
+}
+
+int mlx5_ib_fs_add_op_fc(struct mlx5_ib_dev *dev, u32 port_num,
+ struct mlx5_ib_op_fc *opfc,
+ enum mlx5_ib_optional_counter_type type)
+{
+ enum mlx5_flow_namespace_type fn_type;
+ int priority, i, err, spec_num;
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_destination dst;
+ struct mlx5_flow_namespace *ns;
+ struct mlx5_ib_flow_prio *prio;
+ struct mlx5_flow_spec *spec;
+
+ spec = kcalloc(MAX_OPFC_RULES, sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ switch (type) {
+ case MLX5_IB_OPCOUNTER_CC_RX_CE_PKTS:
+ if (set_ecn_ce_spec(dev, port_num, &spec[0],
+ MLX5_FS_IPV4_VERSION) ||
+ set_ecn_ce_spec(dev, port_num, &spec[1],
+ MLX5_FS_IPV6_VERSION)) {
+ err = -EOPNOTSUPP;
+ goto free;
+ }
+ spec_num = 2;
+ fn_type = MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS;
+ priority = RDMA_RX_ECN_OPCOUNTER_PRIO;
+ break;
+
+ case MLX5_IB_OPCOUNTER_CC_RX_CNP_PKTS:
+ if (!MLX5_CAP_FLOWTABLE(dev->mdev,
+ ft_field_support_2_nic_receive_rdma.bth_opcode) ||
+ set_cnp_spec(dev, port_num, &spec[0])) {
+ err = -EOPNOTSUPP;
+ goto free;
+ }
+ spec_num = 1;
+ fn_type = MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS;
+ priority = RDMA_RX_CNP_OPCOUNTER_PRIO;
+ break;
+
+ case MLX5_IB_OPCOUNTER_CC_TX_CNP_PKTS:
+ if (!MLX5_CAP_FLOWTABLE(dev->mdev,
+ ft_field_support_2_nic_transmit_rdma.bth_opcode) ||
+ set_cnp_spec(dev, port_num, &spec[0])) {
+ err = -EOPNOTSUPP;
+ goto free;
+ }
+ spec_num = 1;
+ fn_type = MLX5_FLOW_NAMESPACE_RDMA_TX_COUNTERS;
+ priority = RDMA_TX_CNP_OPCOUNTER_PRIO;
+ break;
+
+ default:
+ err = -EOPNOTSUPP;
+ goto free;
+ }
+
+ ns = mlx5_get_flow_namespace(dev->mdev, fn_type);
+ if (!ns) {
+ err = -EOPNOTSUPP;
+ goto free;
+ }
+
+ prio = &dev->flow_db->opfcs[type];
+ if (!prio->flow_table) {
+ prio = _get_prio(ns, prio, priority,
+ dev->num_ports * MAX_OPFC_RULES, 1, 0);
+ if (IS_ERR(prio)) {
+ err = PTR_ERR(prio);
+ goto free;
+ }
+ }
+
+ dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dst.counter_id = mlx5_fc_id(opfc->fc);
+
+ flow_act.action =
+ MLX5_FLOW_CONTEXT_ACTION_COUNT | MLX5_FLOW_CONTEXT_ACTION_ALLOW;
+
+ for (i = 0; i < spec_num; i++) {
+ opfc->rule[i] = mlx5_add_flow_rules(prio->flow_table, &spec[i],
+ &flow_act, &dst, 1);
+ if (IS_ERR(opfc->rule[i])) {
+ err = PTR_ERR(opfc->rule[i]);
+ goto del_rules;
+ }
+ }
+ prio->refcount += spec_num;
+ kfree(spec);
+
+ return 0;
+
+del_rules:
+ for (i -= 1; i >= 0; i--)
+ mlx5_del_flow_rules(opfc->rule[i]);
+ put_flow_table(dev, prio, false);
+free:
+ kfree(spec);
+ return err;
+}
+
+void mlx5_ib_fs_remove_op_fc(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_op_fc *opfc,
+ enum mlx5_ib_optional_counter_type type)
+{
+ int i;
+
+ for (i = 0; i < MAX_OPFC_RULES && opfc->rule[i]; i++) {
+ mlx5_del_flow_rules(opfc->rule[i]);
+ put_flow_table(dev, &dev->flow_db->opfcs[type], true);
+ }
+}
+
static void set_underlay_qp(struct mlx5_ib_dev *dev,
struct mlx5_flow_spec *spec,
u32 underlay_qpn)
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 8664bcf6d3f5..5ec8bd2f0b2f 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -1643,7 +1643,8 @@ static int allocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *conte
bfregi = &context->bfregi;
for (i = 0; i < bfregi->num_static_sys_pages; i++) {
- err = mlx5_cmd_alloc_uar(dev->mdev, &bfregi->sys_pages[i]);
+ err = mlx5_cmd_uar_alloc(dev->mdev, &bfregi->sys_pages[i],
+ context->devx_uid);
if (err)
goto error;
@@ -1657,7 +1658,8 @@ static int allocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *conte
error:
for (--i; i >= 0; i--)
- if (mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]))
+ if (mlx5_cmd_uar_dealloc(dev->mdev, bfregi->sys_pages[i],
+ context->devx_uid))
mlx5_ib_warn(dev, "failed to free uar %d\n", i);
return err;
@@ -1673,7 +1675,8 @@ static void deallocate_uars(struct mlx5_ib_dev *dev,
for (i = 0; i < bfregi->num_sys_pages; i++)
if (i < bfregi->num_static_sys_pages ||
bfregi->sys_pages[i] != MLX5_IB_INVALID_UAR_INDEX)
- mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]);
+ mlx5_cmd_uar_dealloc(dev->mdev, bfregi->sys_pages[i],
+ context->devx_uid);
}
int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp)
@@ -1891,6 +1894,13 @@ static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx,
if (req.num_low_latency_bfregs > req.total_num_bfregs - 1)
return -EINVAL;
+ if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX) {
+ err = mlx5_ib_devx_create(dev, true);
+ if (err < 0)
+ goto out_ctx;
+ context->devx_uid = err;
+ }
+
lib_uar_4k = req.lib_caps & MLX5_LIB_CAP_4K_UAR;
lib_uar_dyn = req.lib_caps & MLX5_LIB_CAP_DYN_UAR;
bfregi = &context->bfregi;
@@ -1903,7 +1913,7 @@ static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx,
/* updates req->total_num_bfregs */
err = calc_total_bfregs(dev, lib_uar_4k, &req, bfregi);
if (err)
- goto out_ctx;
+ goto out_devx;
mutex_init(&bfregi->lock);
bfregi->lib_uar_4k = lib_uar_4k;
@@ -1911,7 +1921,7 @@ static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx,
GFP_KERNEL);
if (!bfregi->count) {
err = -ENOMEM;
- goto out_ctx;
+ goto out_devx;
}
bfregi->sys_pages = kcalloc(bfregi->num_sys_pages,
@@ -1927,17 +1937,10 @@ static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx,
goto out_sys_pages;
uar_done:
- if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX) {
- err = mlx5_ib_devx_create(dev, true);
- if (err < 0)
- goto out_uars;
- context->devx_uid = err;
- }
-
err = mlx5_ib_alloc_transport_domain(dev, &context->tdn,
context->devx_uid);
if (err)
- goto out_devx;
+ goto out_uars;
INIT_LIST_HEAD(&context->db_page_list);
mutex_init(&context->db_page_mutex);
@@ -1972,9 +1975,6 @@ uar_done:
out_mdev:
mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid);
-out_devx:
- if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX)
- mlx5_ib_devx_destroy(dev, context->devx_uid);
out_uars:
deallocate_uars(dev, context);
@@ -1985,6 +1985,10 @@ out_sys_pages:
out_count:
kfree(bfregi->count);
+out_devx:
+ if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX)
+ mlx5_ib_devx_destroy(dev, context->devx_uid);
+
out_ctx:
return err;
}
@@ -2021,12 +2025,12 @@ static void mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
bfregi = &context->bfregi;
mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid);
- if (context->devx_uid)
- mlx5_ib_devx_destroy(dev, context->devx_uid);
-
deallocate_uars(dev, context);
kfree(bfregi->sys_pages);
kfree(bfregi->count);
+
+ if (context->devx_uid)
+ mlx5_ib_devx_destroy(dev, context->devx_uid);
}
static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev,
@@ -2119,6 +2123,7 @@ static void mlx5_ib_mmap_free(struct rdma_user_mmap_entry *entry)
struct mlx5_user_mmap_entry *mentry = to_mmmap(entry);
struct mlx5_ib_dev *dev = to_mdev(entry->ucontext->device);
struct mlx5_var_table *var_table = &dev->var_table;
+ struct mlx5_ib_ucontext *context = to_mucontext(entry->ucontext);
switch (mentry->mmap_flag) {
case MLX5_IB_MMAP_TYPE_MEMIC:
@@ -2133,7 +2138,8 @@ static void mlx5_ib_mmap_free(struct rdma_user_mmap_entry *entry)
break;
case MLX5_IB_MMAP_TYPE_UAR_WC:
case MLX5_IB_MMAP_TYPE_UAR_NC:
- mlx5_cmd_free_uar(dev->mdev, mentry->page_idx);
+ mlx5_cmd_uar_dealloc(dev->mdev, mentry->page_idx,
+ context->devx_uid);
kfree(mentry);
break;
default:
@@ -2211,7 +2217,8 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd,
bfregi->count[bfreg_dyn_idx]++;
mutex_unlock(&bfregi->lock);
- err = mlx5_cmd_alloc_uar(dev->mdev, &uar_index);
+ err = mlx5_cmd_uar_alloc(dev->mdev, &uar_index,
+ context->devx_uid);
if (err) {
mlx5_ib_warn(dev, "UAR alloc failed\n");
goto free_bfreg;
@@ -2240,7 +2247,7 @@ err:
if (!dyn_uar)
return err;
- mlx5_cmd_free_uar(dev->mdev, idx);
+ mlx5_cmd_uar_dealloc(dev->mdev, idx, context->devx_uid);
free_bfreg:
mlx5_ib_free_bfreg(dev, bfregi, bfreg_dyn_idx);
@@ -3489,7 +3496,7 @@ alloc_uar_entry(struct mlx5_ib_ucontext *c,
return ERR_PTR(-ENOMEM);
dev = to_mdev(c->ibucontext.device);
- err = mlx5_cmd_alloc_uar(dev->mdev, &uar_index);
+ err = mlx5_cmd_uar_alloc(dev->mdev, &uar_index, c->devx_uid);
if (err)
goto end;
@@ -3507,7 +3514,7 @@ alloc_uar_entry(struct mlx5_ib_ucontext *c,
return entry;
err_insert:
- mlx5_cmd_free_uar(dev->mdev, uar_index);
+ mlx5_cmd_uar_dealloc(dev->mdev, uar_index, c->devx_uid);
end:
kfree(entry);
return ERR_PTR(err);
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index bf20a388eabe..e636e954f6bf 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -263,6 +263,14 @@ struct mlx5_ib_pp {
struct mlx5_core_dev *mdev;
};
+enum mlx5_ib_optional_counter_type {
+ MLX5_IB_OPCOUNTER_CC_RX_CE_PKTS,
+ MLX5_IB_OPCOUNTER_CC_RX_CNP_PKTS,
+ MLX5_IB_OPCOUNTER_CC_TX_CNP_PKTS,
+
+ MLX5_IB_OPCOUNTER_MAX,
+};
+
struct mlx5_ib_flow_db {
struct mlx5_ib_flow_prio prios[MLX5_IB_NUM_FLOW_FT];
struct mlx5_ib_flow_prio egress_prios[MLX5_IB_NUM_FLOW_FT];
@@ -271,6 +279,7 @@ struct mlx5_ib_flow_db {
struct mlx5_ib_flow_prio fdb;
struct mlx5_ib_flow_prio rdma_rx[MLX5_IB_NUM_FLOW_FT];
struct mlx5_ib_flow_prio rdma_tx[MLX5_IB_NUM_FLOW_FT];
+ struct mlx5_ib_flow_prio opfcs[MLX5_IB_OPCOUNTER_MAX];
struct mlx5_flow_table *lag_demux_ft;
/* Protect flow steering bypass flow tables
* when add/del flow rules.
@@ -619,6 +628,20 @@ struct mlx5_user_mmap_entry {
u32 page_idx;
};
+enum mlx5_mkey_type {
+ MLX5_MKEY_MR = 1,
+ MLX5_MKEY_MW,
+ MLX5_MKEY_INDIRECT_DEVX,
+};
+
+struct mlx5_ib_mkey {
+ u32 key;
+ enum mlx5_mkey_type type;
+ unsigned int ndescs;
+ struct wait_queue_head wait;
+ refcount_t usecount;
+};
+
#define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE)
#define MLX5_IB_DM_MEMIC_ALLOWED_ACCESS (IB_ACCESS_LOCAL_WRITE |\
@@ -637,7 +660,7 @@ struct mlx5_user_mmap_entry {
struct mlx5_ib_mr {
struct ib_mr ibmr;
- struct mlx5_core_mkey mmkey;
+ struct mlx5_ib_mkey mmkey;
/* User MR data */
struct mlx5_cache_ent *cache_ent;
@@ -659,7 +682,6 @@ struct mlx5_ib_mr {
void *descs_alloc;
dma_addr_t desc_map;
int max_descs;
- int ndescs;
int desc_size;
int access_mode;
@@ -713,13 +735,7 @@ static inline bool is_dmabuf_mr(struct mlx5_ib_mr *mr)
struct mlx5_ib_mw {
struct ib_mw ibmw;
- struct mlx5_core_mkey mmkey;
- int ndescs;
-};
-
-struct mlx5_ib_devx_mr {
- struct mlx5_core_mkey mmkey;
- int ndescs;
+ struct mlx5_ib_mkey mmkey;
};
struct mlx5_ib_umr_context {
@@ -797,15 +813,32 @@ struct mlx5_ib_resources {
struct mlx5_ib_port_resources ports[2];
};
+#define MAX_OPFC_RULES 2
+
+struct mlx5_ib_op_fc {
+ struct mlx5_fc *fc;
+ struct mlx5_flow_handle *rule[MAX_OPFC_RULES];
+};
+
struct mlx5_ib_counters {
- const char **names;
+ struct rdma_stat_desc *descs;
size_t *offsets;
u32 num_q_counters;
u32 num_cong_counters;
u32 num_ext_ppcnt_counters;
+ u32 num_op_counters;
u16 set_id;
+ struct mlx5_ib_op_fc opfcs[MLX5_IB_OPCOUNTER_MAX];
};
+int mlx5_ib_fs_add_op_fc(struct mlx5_ib_dev *dev, u32 port_num,
+ struct mlx5_ib_op_fc *opfc,
+ enum mlx5_ib_optional_counter_type type);
+
+void mlx5_ib_fs_remove_op_fc(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_op_fc *opfc,
+ enum mlx5_ib_optional_counter_type type);
+
struct mlx5_ib_multiport_info;
struct mlx5_ib_multiport {
@@ -1579,7 +1612,7 @@ static inline bool mlx5_ib_can_reconfig_with_umr(struct mlx5_ib_dev *dev,
}
static inline int mlx5r_store_odp_mkey(struct mlx5_ib_dev *dev,
- struct mlx5_core_mkey *mmkey)
+ struct mlx5_ib_mkey *mmkey)
{
refcount_set(&mmkey->usecount, 1);
@@ -1588,14 +1621,14 @@ static inline int mlx5r_store_odp_mkey(struct mlx5_ib_dev *dev,
}
/* deref an mkey that can participate in ODP flow */
-static inline void mlx5r_deref_odp_mkey(struct mlx5_core_mkey *mmkey)
+static inline void mlx5r_deref_odp_mkey(struct mlx5_ib_mkey *mmkey)
{
if (refcount_dec_and_test(&mmkey->usecount))
wake_up(&mmkey->wait);
}
/* deref an mkey that can participate in ODP flow and wait for relese */
-static inline void mlx5r_deref_wait_odp_mkey(struct mlx5_core_mkey *mmkey)
+static inline void mlx5r_deref_wait_odp_mkey(struct mlx5_ib_mkey *mmkey)
{
mlx5r_deref_odp_mkey(mmkey);
wait_event(mmkey->wait, refcount_read(&mmkey->usecount) == 0);
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 3be36ebbf67a..157d862fb864 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -88,9 +88,8 @@ static void set_mkc_access_pd_addr_fields(void *mkc, int acc, u64 start_addr,
MLX5_SET64(mkc, mkc, start_addr, start_addr);
}
-static void
-assign_mkey_variant(struct mlx5_ib_dev *dev, struct mlx5_core_mkey *mkey,
- u32 *in)
+static void assign_mkey_variant(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_mkey *mkey, u32 *in)
{
u8 key = atomic_inc_return(&dev->mkey_var);
void *mkc;
@@ -100,17 +99,22 @@ assign_mkey_variant(struct mlx5_ib_dev *dev, struct mlx5_core_mkey *mkey,
mkey->key = key;
}
-static int
-mlx5_ib_create_mkey(struct mlx5_ib_dev *dev, struct mlx5_core_mkey *mkey,
- u32 *in, int inlen)
+static int mlx5_ib_create_mkey(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_mkey *mkey, u32 *in, int inlen)
{
+ int ret;
+
assign_mkey_variant(dev, mkey, in);
- return mlx5_core_create_mkey(dev->mdev, mkey, in, inlen);
+ ret = mlx5_core_create_mkey(dev->mdev, &mkey->key, in, inlen);
+ if (!ret)
+ init_waitqueue_head(&mkey->wait);
+
+ return ret;
}
static int
mlx5_ib_create_mkey_cb(struct mlx5_ib_dev *dev,
- struct mlx5_core_mkey *mkey,
+ struct mlx5_ib_mkey *mkey,
struct mlx5_async_ctx *async_ctx,
u32 *in, int inlen, u32 *out, int outlen,
struct mlx5_async_work *context)
@@ -133,7 +137,7 @@ static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
{
WARN_ON(xa_load(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key)));
- return mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
+ return mlx5_core_destroy_mkey(dev->mdev, mr->mmkey.key);
}
static void create_mkey_callback(int status, struct mlx5_async_work *context)
@@ -260,10 +264,11 @@ static struct mlx5_ib_mr *create_cache_mr(struct mlx5_cache_ent *ent)
goto free_in;
}
- err = mlx5_core_create_mkey(ent->dev->mdev, &mr->mmkey, in, inlen);
+ err = mlx5_core_create_mkey(ent->dev->mdev, &mr->mmkey.key, in, inlen);
if (err)
goto free_mr;
+ init_waitqueue_head(&mr->mmkey.wait);
mr->mmkey.type = MLX5_MKEY_MR;
WRITE_ONCE(ent->dev->cache.last_add, jiffies);
spin_lock_irq(&ent->lock);
@@ -290,7 +295,7 @@ static void remove_cache_mr_locked(struct mlx5_cache_ent *ent)
ent->available_mrs--;
ent->total_mrs--;
spin_unlock_irq(&ent->lock);
- mlx5_core_destroy_mkey(ent->dev->mdev, &mr->mmkey);
+ mlx5_core_destroy_mkey(ent->dev->mdev, mr->mmkey.key);
kfree(mr);
spin_lock_irq(&ent->lock);
}
@@ -600,29 +605,21 @@ struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
/* Return a MR already available in the cache */
static struct mlx5_ib_mr *get_cache_mr(struct mlx5_cache_ent *req_ent)
{
- struct mlx5_ib_dev *dev = req_ent->dev;
struct mlx5_ib_mr *mr = NULL;
struct mlx5_cache_ent *ent = req_ent;
- /* Try larger MR pools from the cache to satisfy the allocation */
- for (; ent != &dev->cache.ent[MR_CACHE_LAST_STD_ENTRY + 1]; ent++) {
- mlx5_ib_dbg(dev, "order %u, cache index %zu\n", ent->order,
- ent - dev->cache.ent);
-
- spin_lock_irq(&ent->lock);
- if (!list_empty(&ent->head)) {
- mr = list_first_entry(&ent->head, struct mlx5_ib_mr,
- list);
- list_del(&mr->list);
- ent->available_mrs--;
- queue_adjust_cache_locked(ent);
- spin_unlock_irq(&ent->lock);
- mlx5_clear_mr(mr);
- return mr;
- }
+ spin_lock_irq(&ent->lock);
+ if (!list_empty(&ent->head)) {
+ mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
+ list_del(&mr->list);
+ ent->available_mrs--;
queue_adjust_cache_locked(ent);
spin_unlock_irq(&ent->lock);
+ mlx5_clear_mr(mr);
+ return mr;
}
+ queue_adjust_cache_locked(ent);
+ spin_unlock_irq(&ent->lock);
req_ent->miss++;
return NULL;
}
@@ -658,7 +655,7 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c)
ent->available_mrs--;
ent->total_mrs--;
spin_unlock_irq(&ent->lock);
- mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
+ mlx5_core_destroy_mkey(dev->mdev, mr->mmkey.key);
}
list_for_each_entry_safe(mr, tmp_mr, &del_list, list) {
@@ -911,12 +908,13 @@ static struct mlx5_cache_ent *mr_cache_ent_from_order(struct mlx5_ib_dev *dev,
}
static void set_mr_fields(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
- u64 length, int access_flags)
+ u64 length, int access_flags, u64 iova)
{
mr->ibmr.lkey = mr->mmkey.key;
mr->ibmr.rkey = mr->mmkey.key;
mr->ibmr.length = length;
mr->ibmr.device = &dev->ib_dev;
+ mr->ibmr.iova = iova;
mr->access_flags = access_flags;
}
@@ -974,11 +972,8 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd,
mr->ibmr.pd = pd;
mr->umem = umem;
- mr->mmkey.iova = iova;
- mr->mmkey.size = umem->length;
- mr->mmkey.pd = to_mpd(pd)->pdn;
mr->page_shift = order_base_2(page_size);
- set_mr_fields(dev, mr, umem->length, access_flags);
+ set_mr_fields(dev, mr, umem->length, access_flags, iova);
return mr;
}
@@ -1087,8 +1082,8 @@ static void *mlx5_ib_create_xlt_wr(struct mlx5_ib_mr *mr,
wr->wr.opcode = MLX5_IB_WR_UMR;
wr->pd = mr->ibmr.pd;
wr->mkey = mr->mmkey.key;
- wr->length = mr->mmkey.size;
- wr->virt_addr = mr->mmkey.iova;
+ wr->length = mr->ibmr.length;
+ wr->virt_addr = mr->ibmr.iova;
wr->access_flags = mr->access_flags;
wr->page_shift = mr->page_shift;
wr->xlt_size = sg->length;
@@ -1339,9 +1334,8 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem,
goto err_2;
}
mr->mmkey.type = MLX5_MKEY_MR;
- mr->desc_size = sizeof(struct mlx5_mtt);
mr->umem = umem;
- set_mr_fields(dev, mr, umem->length, access_flags);
+ set_mr_fields(dev, mr, umem->length, access_flags, iova);
kvfree(in);
mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmkey.key);
@@ -1388,7 +1382,7 @@ static struct ib_mr *mlx5_ib_get_dm_mr(struct ib_pd *pd, u64 start_addr,
kfree(in);
- set_mr_fields(dev, mr, length, acc);
+ set_mr_fields(dev, mr, length, acc, start_addr);
return &mr->ibmr;
@@ -1533,6 +1527,7 @@ static struct ib_mr *create_user_odp_mr(struct ib_pd *pd, u64 start, u64 length,
ib_umem_release(&odp->umem);
return ERR_CAST(mr);
}
+ xa_init(&mr->implicit_children);
odp->private = mr;
err = mlx5r_store_odp_mkey(dev, &mr->mmkey);
@@ -1709,7 +1704,6 @@ static int umr_rereg_pd_access(struct mlx5_ib_mr *mr, struct ib_pd *pd,
return err;
mr->access_flags = access_flags;
- mr->mmkey.pd = to_mpd(pd)->pdn;
return 0;
}
@@ -1754,7 +1748,6 @@ static int umr_rereg_pas(struct mlx5_ib_mr *mr, struct ib_pd *pd,
if (flags & IB_MR_REREG_PD) {
mr->ibmr.pd = pd;
- mr->mmkey.pd = to_mpd(pd)->pdn;
upd_flags |= MLX5_IB_UPD_XLT_PD;
}
if (flags & IB_MR_REREG_ACCESS) {
@@ -1763,8 +1756,8 @@ static int umr_rereg_pas(struct mlx5_ib_mr *mr, struct ib_pd *pd,
}
mr->ibmr.length = new_umem->length;
- mr->mmkey.iova = iova;
- mr->mmkey.size = new_umem->length;
+ mr->ibmr.iova = iova;
+ mr->ibmr.length = new_umem->length;
mr->page_shift = order_base_2(page_size);
mr->umem = new_umem;
err = mlx5_ib_update_mr_pas(mr, upd_flags);
@@ -1834,7 +1827,7 @@ struct ib_mr *mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
mr->umem = NULL;
atomic_sub(ib_umem_num_pages(umem), &dev->mdev->priv.reg_pages);
- return create_real_mr(new_pd, umem, mr->mmkey.iova,
+ return create_real_mr(new_pd, umem, mr->ibmr.iova,
new_access_flags);
}
@@ -2263,9 +2256,9 @@ int mlx5_ib_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
struct mlx5_ib_dev *dev = to_mdev(ibmw->device);
int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
struct mlx5_ib_mw *mw = to_mmw(ibmw);
+ unsigned int ndescs;
u32 *in = NULL;
void *mkc;
- int ndescs;
int err;
struct mlx5_ib_alloc_mw req = {};
struct {
@@ -2310,7 +2303,7 @@ int mlx5_ib_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
mw->mmkey.type = MLX5_MKEY_MW;
ibmw->rkey = mw->mmkey.key;
- mw->ndescs = ndescs;
+ mw->mmkey.ndescs = ndescs;
resp.response_length =
min(offsetofend(typeof(resp), response_length), udata->outlen);
@@ -2330,7 +2323,7 @@ int mlx5_ib_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
return 0;
free_mkey:
- mlx5_core_destroy_mkey(dev->mdev, &mw->mmkey);
+ mlx5_core_destroy_mkey(dev->mdev, mw->mmkey.key);
free:
kfree(in);
return err;
@@ -2349,7 +2342,7 @@ int mlx5_ib_dealloc_mw(struct ib_mw *mw)
*/
mlx5r_deref_wait_odp_mkey(&mmw->mmkey);
- return mlx5_core_destroy_mkey(dev->mdev, &mmw->mmkey);
+ return mlx5_core_destroy_mkey(dev->mdev, mmw->mmkey.key);
}
int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
@@ -2406,7 +2399,7 @@ mlx5_ib_map_pa_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
mr->meta_length = 0;
if (data_sg_nents == 1) {
n++;
- mr->ndescs = 1;
+ mr->mmkey.ndescs = 1;
if (data_sg_offset)
sg_offset = *data_sg_offset;
mr->data_length = sg_dma_len(data_sg) - sg_offset;
@@ -2459,7 +2452,7 @@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
if (sg_offset_p)
*sg_offset_p = sg_offset;
- mr->ndescs = i;
+ mr->mmkey.ndescs = i;
mr->data_length = mr->ibmr.length;
if (meta_sg_nents) {
@@ -2492,11 +2485,11 @@ static int mlx5_set_page(struct ib_mr *ibmr, u64 addr)
struct mlx5_ib_mr *mr = to_mmr(ibmr);
__be64 *descs;
- if (unlikely(mr->ndescs == mr->max_descs))
+ if (unlikely(mr->mmkey.ndescs == mr->max_descs))
return -ENOMEM;
descs = mr->descs;
- descs[mr->ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR);
+ descs[mr->mmkey.ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR);
return 0;
}
@@ -2506,11 +2499,11 @@ static int mlx5_set_page_pi(struct ib_mr *ibmr, u64 addr)
struct mlx5_ib_mr *mr = to_mmr(ibmr);
__be64 *descs;
- if (unlikely(mr->ndescs + mr->meta_ndescs == mr->max_descs))
+ if (unlikely(mr->mmkey.ndescs + mr->meta_ndescs == mr->max_descs))
return -ENOMEM;
descs = mr->descs;
- descs[mr->ndescs + mr->meta_ndescs++] =
+ descs[mr->mmkey.ndescs + mr->meta_ndescs++] =
cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR);
return 0;
@@ -2526,7 +2519,7 @@ mlx5_ib_map_mtt_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
struct mlx5_ib_mr *pi_mr = mr->mtt_mr;
int n;
- pi_mr->ndescs = 0;
+ pi_mr->mmkey.ndescs = 0;
pi_mr->meta_ndescs = 0;
pi_mr->meta_length = 0;
@@ -2560,7 +2553,7 @@ mlx5_ib_map_mtt_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
* metadata offset at the first metadata page
*/
pi_mr->pi_iova = (iova & page_mask) +
- pi_mr->ndescs * ibmr->page_size +
+ pi_mr->mmkey.ndescs * ibmr->page_size +
(pi_mr->ibmr.iova & ~page_mask);
/*
* In order to use one MTT MR for data and metadata, we register
@@ -2591,7 +2584,7 @@ mlx5_ib_map_klm_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
struct mlx5_ib_mr *pi_mr = mr->klm_mr;
int n;
- pi_mr->ndescs = 0;
+ pi_mr->mmkey.ndescs = 0;
pi_mr->meta_ndescs = 0;
pi_mr->meta_length = 0;
@@ -2626,7 +2619,7 @@ int mlx5_ib_map_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
WARN_ON(ibmr->type != IB_MR_TYPE_INTEGRITY);
- mr->ndescs = 0;
+ mr->mmkey.ndescs = 0;
mr->data_length = 0;
mr->data_iova = 0;
mr->meta_ndescs = 0;
@@ -2682,7 +2675,7 @@ int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
struct mlx5_ib_mr *mr = to_mmr(ibmr);
int n;
- mr->ndescs = 0;
+ mr->mmkey.ndescs = 0;
ib_dma_sync_single_for_cpu(ibmr->device, mr->desc_map,
mr->desc_size * mr->max_descs,
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index d0d98e584ebc..91eb615b89ee 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -430,7 +430,7 @@ static struct mlx5_ib_mr *implicit_get_child_mr(struct mlx5_ib_mr *imr,
mr->umem = &odp->umem;
mr->ibmr.lkey = mr->mmkey.key;
mr->ibmr.rkey = mr->mmkey.key;
- mr->mmkey.iova = idx * MLX5_IMR_MTT_SIZE;
+ mr->ibmr.iova = idx * MLX5_IMR_MTT_SIZE;
mr->parent = imr;
odp->private = mr;
@@ -500,7 +500,7 @@ struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
}
imr->ibmr.pd = &pd->ibpd;
- imr->mmkey.iova = 0;
+ imr->ibmr.iova = 0;
imr->umem = &umem_odp->umem;
imr->ibmr.lkey = imr->mmkey.key;
imr->ibmr.rkey = imr->mmkey.key;
@@ -738,7 +738,7 @@ static int pagefault_mr(struct mlx5_ib_mr *mr, u64 io_virt, size_t bcnt,
{
struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
- if (unlikely(io_virt < mr->mmkey.iova))
+ if (unlikely(io_virt < mr->ibmr.iova))
return -EFAULT;
if (mr->umem->is_dmabuf)
@@ -747,7 +747,7 @@ static int pagefault_mr(struct mlx5_ib_mr *mr, u64 io_virt, size_t bcnt,
if (!odp->is_implicit_odp) {
u64 user_va;
- if (check_add_overflow(io_virt - mr->mmkey.iova,
+ if (check_add_overflow(io_virt - mr->ibmr.iova,
(u64)odp->umem.address, &user_va))
return -EFAULT;
if (unlikely(user_va >= ib_umem_end(odp) ||
@@ -788,7 +788,7 @@ struct pf_frame {
int depth;
};
-static bool mkey_is_eq(struct mlx5_core_mkey *mmkey, u32 key)
+static bool mkey_is_eq(struct mlx5_ib_mkey *mmkey, u32 key)
{
if (!mmkey)
return false;
@@ -797,21 +797,6 @@ static bool mkey_is_eq(struct mlx5_core_mkey *mmkey, u32 key)
return mmkey->key == key;
}
-static int get_indirect_num_descs(struct mlx5_core_mkey *mmkey)
-{
- struct mlx5_ib_mw *mw;
- struct mlx5_ib_devx_mr *devx_mr;
-
- if (mmkey->type == MLX5_MKEY_MW) {
- mw = container_of(mmkey, struct mlx5_ib_mw, mmkey);
- return mw->ndescs;
- }
-
- devx_mr = container_of(mmkey, struct mlx5_ib_devx_mr,
- mmkey);
- return devx_mr->ndescs;
-}
-
/*
* Handle a single data segment in a page-fault WQE or RDMA region.
*
@@ -831,12 +816,11 @@ static int pagefault_single_data_segment(struct mlx5_ib_dev *dev,
{
int npages = 0, ret, i, outlen, cur_outlen = 0, depth = 0;
struct pf_frame *head = NULL, *frame;
- struct mlx5_core_mkey *mmkey;
+ struct mlx5_ib_mkey *mmkey;
struct mlx5_ib_mr *mr;
struct mlx5_klm *pklm;
u32 *out = NULL;
size_t offset;
- int ndescs;
io_virt += *bytes_committed;
bcnt -= *bytes_committed;
@@ -885,8 +869,6 @@ next_mr:
case MLX5_MKEY_MW:
case MLX5_MKEY_INDIRECT_DEVX:
- ndescs = get_indirect_num_descs(mmkey);
-
if (depth >= MLX5_CAP_GEN(dev->mdev, max_indirection)) {
mlx5_ib_dbg(dev, "indirection level exceeded\n");
ret = -EFAULT;
@@ -894,7 +876,7 @@ next_mr:
}
outlen = MLX5_ST_SZ_BYTES(query_mkey_out) +
- sizeof(*pklm) * (ndescs - 2);
+ sizeof(*pklm) * (mmkey->ndescs - 2);
if (outlen > cur_outlen) {
kfree(out);
@@ -909,14 +891,14 @@ next_mr:
pklm = (struct mlx5_klm *)MLX5_ADDR_OF(query_mkey_out, out,
bsf0_klm0_pas_mtt0_1);
- ret = mlx5_core_query_mkey(dev->mdev, mmkey, out, outlen);
+ ret = mlx5_core_query_mkey(dev->mdev, mmkey->key, out, outlen);
if (ret)
goto end;
offset = io_virt - MLX5_GET64(query_mkey_out, out,
memory_key_mkey_entry.start_addr);
- for (i = 0; bcnt && i < ndescs; i++, pklm++) {
+ for (i = 0; bcnt && i < mmkey->ndescs; i++, pklm++) {
if (offset >= be32_to_cpu(pklm->bcount)) {
offset -= be32_to_cpu(pklm->bcount);
continue;
@@ -1559,6 +1541,7 @@ int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq)
eq->irq_nb.notifier_call = mlx5_ib_eq_pf_int;
param = (struct mlx5_eq_param) {
+ .irq_index = MLX5_IRQ_EQ_CTRL,
.nent = MLX5_IB_NUM_PF_EQE,
};
param.mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_FAULT;
@@ -1703,25 +1686,31 @@ get_prefetchable_mr(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice,
u32 lkey)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
- struct mlx5_core_mkey *mmkey;
struct mlx5_ib_mr *mr = NULL;
+ struct mlx5_ib_mkey *mmkey;
xa_lock(&dev->odp_mkeys);
mmkey = xa_load(&dev->odp_mkeys, mlx5_base_mkey(lkey));
- if (!mmkey || mmkey->key != lkey || mmkey->type != MLX5_MKEY_MR)
+ if (!mmkey || mmkey->key != lkey) {
+ mr = ERR_PTR(-ENOENT);
goto end;
+ }
+ if (mmkey->type != MLX5_MKEY_MR) {
+ mr = ERR_PTR(-EINVAL);
+ goto end;
+ }
mr = container_of(mmkey, struct mlx5_ib_mr, mmkey);
if (mr->ibmr.pd != pd) {
- mr = NULL;
+ mr = ERR_PTR(-EPERM);
goto end;
}
/* prefetch with write-access must be supported by the MR */
if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE &&
!mr->umem->writable) {
- mr = NULL;
+ mr = ERR_PTR(-EPERM);
goto end;
}
@@ -1753,7 +1742,7 @@ static void mlx5_ib_prefetch_mr_work(struct work_struct *w)
destroy_prefetch_work(work);
}
-static bool init_prefetch_work(struct ib_pd *pd,
+static int init_prefetch_work(struct ib_pd *pd,
enum ib_uverbs_advise_mr_advice advice,
u32 pf_flags, struct prefetch_mr_work *work,
struct ib_sge *sg_list, u32 num_sge)
@@ -1764,17 +1753,19 @@ static bool init_prefetch_work(struct ib_pd *pd,
work->pf_flags = pf_flags;
for (i = 0; i < num_sge; ++i) {
- work->frags[i].io_virt = sg_list[i].addr;
- work->frags[i].length = sg_list[i].length;
- work->frags[i].mr =
- get_prefetchable_mr(pd, advice, sg_list[i].lkey);
- if (!work->frags[i].mr) {
+ struct mlx5_ib_mr *mr;
+
+ mr = get_prefetchable_mr(pd, advice, sg_list[i].lkey);
+ if (IS_ERR(mr)) {
work->num_sge = i;
- return false;
+ return PTR_ERR(mr);
}
+ work->frags[i].io_virt = sg_list[i].addr;
+ work->frags[i].length = sg_list[i].length;
+ work->frags[i].mr = mr;
}
work->num_sge = num_sge;
- return true;
+ return 0;
}
static int mlx5_ib_prefetch_sg_list(struct ib_pd *pd,
@@ -1790,8 +1781,8 @@ static int mlx5_ib_prefetch_sg_list(struct ib_pd *pd,
struct mlx5_ib_mr *mr;
mr = get_prefetchable_mr(pd, advice, sg_list[i].lkey);
- if (!mr)
- return -ENOENT;
+ if (IS_ERR(mr))
+ return PTR_ERR(mr);
ret = pagefault_mr(mr, sg_list[i].addr, sg_list[i].length,
&bytes_mapped, pf_flags);
if (ret < 0) {
@@ -1811,6 +1802,7 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
{
u32 pf_flags = 0;
struct prefetch_mr_work *work;
+ int rc;
if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH)
pf_flags |= MLX5_PF_FLAGS_DOWNGRADE;
@@ -1826,9 +1818,10 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
if (!work)
return -ENOMEM;
- if (!init_prefetch_work(pd, advice, pf_flags, work, sg_list, num_sge)) {
+ rc = init_prefetch_work(pd, advice, pf_flags, work, sg_list, num_sge);
+ if (rc) {
destroy_prefetch_work(work);
- return -EINVAL;
+ return rc;
}
queue_work(system_unbound_wq, &work->work);
return 0;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index b2fca110346c..e5abbcfc1d57 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -4458,6 +4458,8 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
MLX5_SET(dctc, dctc, mtu, attr->path_mtu);
MLX5_SET(dctc, dctc, my_addr_index, attr->ah_attr.grh.sgid_index);
MLX5_SET(dctc, dctc, hop_limit, attr->ah_attr.grh.hop_limit);
+ if (attr->ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE)
+ MLX5_SET(dctc, dctc, eth_prio, attr->ah_attr.sl & 0x7);
err = mlx5_core_create_dct(dev, &qp->dct.mdct, qp->dct.in,
MLX5_ST_SZ_BYTES(create_dct_in), out,
diff --git a/drivers/infiniband/hw/mlx5/wr.c b/drivers/infiniband/hw/mlx5/wr.c
index 8841620af82f..51e48ca9016e 100644
--- a/drivers/infiniband/hw/mlx5/wr.c
+++ b/drivers/infiniband/hw/mlx5/wr.c
@@ -217,7 +217,7 @@ static __be64 sig_mkey_mask(void)
static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr,
struct mlx5_ib_mr *mr, u8 flags, bool atomic)
{
- int size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size;
+ int size = (mr->mmkey.ndescs + mr->meta_ndescs) * mr->desc_size;
memset(umr, 0, sizeof(*umr));
@@ -374,7 +374,7 @@ static void set_reg_mkey_seg(struct mlx5_mkey_seg *seg,
struct mlx5_ib_mr *mr,
u32 key, int access)
{
- int ndescs = ALIGN(mr->ndescs + mr->meta_ndescs, 8) >> 1;
+ int ndescs = ALIGN(mr->mmkey.ndescs + mr->meta_ndescs, 8) >> 1;
memset(seg, 0, sizeof(*seg));
@@ -439,7 +439,7 @@ static void set_reg_data_seg(struct mlx5_wqe_data_seg *dseg,
struct mlx5_ib_mr *mr,
struct mlx5_ib_pd *pd)
{
- int bcount = mr->desc_size * (mr->ndescs + mr->meta_ndescs);
+ int bcount = mr->desc_size * (mr->mmkey.ndescs + mr->meta_ndescs);
dseg->addr = cpu_to_be64(mr->desc_map);
dseg->byte_count = cpu_to_be32(ALIGN(bcount, 64));
@@ -861,7 +861,7 @@ static int set_reg_wr(struct mlx5_ib_qp *qp,
struct mlx5_ib_mr *mr = to_mmr(wr->mr);
struct mlx5_ib_pd *pd = to_mpd(qp->ibqp.pd);
struct mlx5_ib_dev *dev = to_mdev(pd->ibpd.device);
- int mr_list_size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size;
+ int mr_list_size = (mr->mmkey.ndescs + mr->meta_ndescs) * mr->desc_size;
bool umr_inline = mr_list_size <= MLX5_IB_SQ_UMR_INLINE_THRESHOLD;
bool atomic = wr->access & IB_ACCESS_REMOTE_ATOMIC;
u8 flags = 0;
@@ -1111,7 +1111,7 @@ static int handle_reg_mr_integrity(struct mlx5_ib_dev *dev,
memset(&pa_pi_mr, 0, sizeof(struct mlx5_ib_mr));
/* No UMR, use local_dma_lkey */
pa_pi_mr.ibmr.lkey = mr->ibmr.pd->local_dma_lkey;
- pa_pi_mr.ndescs = mr->ndescs;
+ pa_pi_mr.mmkey.ndescs = mr->mmkey.ndescs;
pa_pi_mr.data_length = mr->data_length;
pa_pi_mr.data_iova = mr->data_iova;
if (mr->meta_ndescs) {
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
index 755930be01b8..65ce6d0f1885 100644
--- a/drivers/infiniband/hw/qedr/main.c
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -228,7 +228,6 @@ static const struct ib_device_ops qedr_dev_ops = {
.query_srq = qedr_query_srq,
.reg_user_mr = qedr_reg_user_mr,
.req_notify_cq = qedr_arm_cq,
- .resize_cq = qedr_resize_cq,
INIT_RDMA_OBJ_SIZE(ib_ah, qedr_ah, ibah),
INIT_RDMA_OBJ_SIZE(ib_cq, qedr_cq, ibcq),
@@ -272,7 +271,7 @@ static int qedr_register_device(struct qedr_dev *dev)
static int qedr_alloc_mem_sb(struct qedr_dev *dev,
struct qed_sb_info *sb_info, u16 sb_id)
{
- struct status_block_e4 *sb_virt;
+ struct status_block *sb_virt;
dma_addr_t sb_phys;
int rc;
diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h
index 3cb4febaad0f..8def88cfa300 100644
--- a/drivers/infiniband/hw/qedr/qedr.h
+++ b/drivers/infiniband/hw/qedr/qedr.h
@@ -455,6 +455,7 @@ struct qedr_qp {
/* synchronization objects used with iwarp ep */
struct kref refcnt;
struct completion iwarp_cm_comp;
+ struct completion qp_rel_comp;
unsigned long iwarp_cm_flags; /* enum iwarp_cm_flags */
};
diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
index 1715fbe0719d..a51fc6854984 100644
--- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c
+++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
@@ -83,7 +83,7 @@ static void qedr_iw_free_qp(struct kref *ref)
{
struct qedr_qp *qp = container_of(ref, struct qedr_qp, refcnt);
- kfree(qp);
+ complete(&qp->qp_rel_comp);
}
static void
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 3fbf172dbbef..9100009f0a23 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -1052,16 +1052,6 @@ err0:
return -EINVAL;
}
-int qedr_resize_cq(struct ib_cq *ibcq, int new_cnt, struct ib_udata *udata)
-{
- struct qedr_dev *dev = get_qedr_dev(ibcq->device);
- struct qedr_cq *cq = get_qedr_cq(ibcq);
-
- DP_ERR(dev, "cq %p RESIZE NOT SUPPORTED\n", cq);
-
- return 0;
-}
-
#define QEDR_DESTROY_CQ_MAX_ITERATIONS (10)
#define QEDR_DESTROY_CQ_ITER_DURATION (10)
@@ -1357,6 +1347,7 @@ static void qedr_set_common_qp_params(struct qedr_dev *dev,
if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
kref_init(&qp->refcnt);
init_completion(&qp->iwarp_cm_comp);
+ init_completion(&qp->qp_rel_comp);
}
qp->pd = pd;
@@ -2743,15 +2734,18 @@ int qedr_query_qp(struct ib_qp *ibqp,
int rc = 0;
memset(&params, 0, sizeof(params));
-
- rc = dev->ops->rdma_query_qp(dev->rdma_ctx, qp->qed_qp, &params);
- if (rc)
- goto err;
-
memset(qp_attr, 0, sizeof(*qp_attr));
memset(qp_init_attr, 0, sizeof(*qp_init_attr));
- qp_attr->qp_state = qedr_get_ibqp_state(params.state);
+ if (qp->qp_type != IB_QPT_GSI) {
+ rc = dev->ops->rdma_query_qp(dev->rdma_ctx, qp->qed_qp, &params);
+ if (rc)
+ goto err;
+ qp_attr->qp_state = qedr_get_ibqp_state(params.state);
+ } else {
+ qp_attr->qp_state = qedr_get_ibqp_state(QED_ROCE_QP_STATE_RTS);
+ }
+
qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state);
qp_attr->path_mtu = ib_mtu_int_to_enum(params.mtu);
qp_attr->path_mig_state = IB_MIG_MIGRATED;
@@ -2857,8 +2851,10 @@ int qedr_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
qedr_free_qp_resources(dev, qp, udata);
- if (rdma_protocol_iwarp(&dev->ibdev, 1))
+ if (rdma_protocol_iwarp(&dev->ibdev, 1)) {
qedr_iw_qp_rem_ref(&qp->ibqp);
+ wait_for_completion(&qp->qp_rel_comp);
+ }
return 0;
}
diff --git a/drivers/infiniband/hw/qedr/verbs.h b/drivers/infiniband/hw/qedr/verbs.h
index 031687dafc61..081753df79ef 100644
--- a/drivers/infiniband/hw/qedr/verbs.h
+++ b/drivers/infiniband/hw/qedr/verbs.h
@@ -53,7 +53,6 @@ int qedr_alloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata);
int qedr_dealloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata);
int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct ib_udata *udata);
-int qedr_resize_cq(struct ib_cq *, int cqe, struct ib_udata *);
int qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata);
int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
int qedr_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *attrs,
diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c
index 84fc4dcc5399..bf3fa12fe935 100644
--- a/drivers/infiniband/hw/qib/qib_driver.c
+++ b/drivers/infiniband/hw/qib/qib_driver.c
@@ -1,4 +1,5 @@
/*
+ * Copyright (c) 2021 Cornelis Networks. All rights reserved.
* Copyright (c) 2013 Intel Corporation. All rights reserved.
* Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
@@ -62,8 +63,8 @@ MODULE_PARM_DESC(compat_ddr_negotiate,
"Attempt pre-IBTA 1.2 DDR speed negotiation");
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_AUTHOR("Intel <ibsupport@intel.com>");
-MODULE_DESCRIPTION("Intel IB driver");
+MODULE_AUTHOR("Cornelis <support@cornelisnetworks.com>");
+MODULE_DESCRIPTION("Cornelis IB driver");
/*
* QIB_PIO_MAXIBHDR is the max IB header size allowed for in our
diff --git a/drivers/infiniband/hw/qib/qib_user_sdma.c b/drivers/infiniband/hw/qib/qib_user_sdma.c
index a67599b5a550..ac11943a5ddb 100644
--- a/drivers/infiniband/hw/qib/qib_user_sdma.c
+++ b/drivers/infiniband/hw/qib/qib_user_sdma.c
@@ -602,7 +602,7 @@ done:
/*
* How many pages in this iovec element?
*/
-static int qib_user_sdma_num_pages(const struct iovec *iov)
+static size_t qib_user_sdma_num_pages(const struct iovec *iov)
{
const unsigned long addr = (unsigned long) iov->iov_base;
const unsigned long len = iov->iov_len;
@@ -658,7 +658,7 @@ static void qib_user_sdma_free_pkt_frag(struct device *dev,
static int qib_user_sdma_pin_pages(const struct qib_devdata *dd,
struct qib_user_sdma_queue *pq,
struct qib_user_sdma_pkt *pkt,
- unsigned long addr, int tlen, int npages)
+ unsigned long addr, int tlen, size_t npages)
{
struct page *pages[8];
int i, j;
@@ -722,7 +722,7 @@ static int qib_user_sdma_pin_pkt(const struct qib_devdata *dd,
unsigned long idx;
for (idx = 0; idx < niov; idx++) {
- const int npages = qib_user_sdma_num_pages(iov + idx);
+ const size_t npages = qib_user_sdma_num_pages(iov + idx);
const unsigned long addr = (unsigned long) iov[idx].iov_base;
ret = qib_user_sdma_pin_pages(dd, pq, pkt, addr,
@@ -824,8 +824,8 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
unsigned pktnw;
unsigned pktnwc;
int nfrags = 0;
- int npages = 0;
- int bytes_togo = 0;
+ size_t npages = 0;
+ size_t bytes_togo = 0;
int tiddma = 0;
int cfur;
@@ -885,7 +885,11 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
npages += qib_user_sdma_num_pages(&iov[idx]);
- bytes_togo += slen;
+ if (check_add_overflow(bytes_togo, slen, &bytes_togo) ||
+ bytes_togo > type_max(typeof(pkt->bytes_togo))) {
+ ret = -EINVAL;
+ goto free_pbc;
+ }
pktnwc += slen >> 2;
idx++;
nfrags++;
@@ -904,8 +908,7 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
}
if (frag_size) {
- int tidsmsize, n;
- size_t pktsize;
+ size_t tidsmsize, n, pktsize, sz, addrlimit;
n = npages*((2*PAGE_SIZE/frag_size)+1);
pktsize = struct_size(pkt, addr, n);
@@ -923,14 +926,24 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
else
tidsmsize = 0;
- pkt = kmalloc(pktsize+tidsmsize, GFP_KERNEL);
+ if (check_add_overflow(pktsize, tidsmsize, &sz)) {
+ ret = -EINVAL;
+ goto free_pbc;
+ }
+ pkt = kmalloc(sz, GFP_KERNEL);
if (!pkt) {
ret = -ENOMEM;
goto free_pbc;
}
pkt->largepkt = 1;
pkt->frag_size = frag_size;
- pkt->addrlimit = n + ARRAY_SIZE(pkt->addr);
+ if (check_add_overflow(n, ARRAY_SIZE(pkt->addr),
+ &addrlimit) ||
+ addrlimit > type_max(typeof(pkt->addrlimit))) {
+ ret = -EINVAL;
+ goto free_pbc;
+ }
+ pkt->addrlimit = addrlimit;
if (tiddma) {
char *tidsm = (char *)pkt + pktsize;
diff --git a/drivers/infiniband/hw/usnic/usnic_fwd.c b/drivers/infiniband/hw/usnic/usnic_fwd.c
index 398c4c00b932..18a70850b738 100644
--- a/drivers/infiniband/hw/usnic/usnic_fwd.c
+++ b/drivers/infiniband/hw/usnic/usnic_fwd.c
@@ -103,7 +103,7 @@ void usnic_fwd_dev_free(struct usnic_fwd_dev *ufdev)
kfree(ufdev);
}
-void usnic_fwd_set_mac(struct usnic_fwd_dev *ufdev, char mac[ETH_ALEN])
+void usnic_fwd_set_mac(struct usnic_fwd_dev *ufdev, const char mac[ETH_ALEN])
{
spin_lock(&ufdev->lock);
memcpy(&ufdev->mac, mac, sizeof(ufdev->mac));
diff --git a/drivers/infiniband/hw/usnic/usnic_fwd.h b/drivers/infiniband/hw/usnic/usnic_fwd.h
index f0b71d593da5..a91200886922 100644
--- a/drivers/infiniband/hw/usnic/usnic_fwd.h
+++ b/drivers/infiniband/hw/usnic/usnic_fwd.h
@@ -74,7 +74,7 @@ struct usnic_filter_action {
struct usnic_fwd_dev *usnic_fwd_dev_alloc(struct pci_dev *pdev);
void usnic_fwd_dev_free(struct usnic_fwd_dev *ufdev);
-void usnic_fwd_set_mac(struct usnic_fwd_dev *ufdev, char mac[ETH_ALEN]);
+void usnic_fwd_set_mac(struct usnic_fwd_dev *ufdev, const char mac[ETH_ALEN]);
void usnic_fwd_add_ipaddr(struct usnic_fwd_dev *ufdev, __be32 inaddr);
void usnic_fwd_del_ipaddr(struct usnic_fwd_dev *ufdev);
void usnic_fwd_carrier_up(struct usnic_fwd_dev *ufdev);
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index 49bdd78ac664..3305f2744bfa 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -1223,7 +1223,7 @@ int rvt_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *init_attr,
spin_lock(&rdi->n_qps_lock);
if (rdi->n_qps_allocated == rdi->dparms.props.max_qp) {
spin_unlock(&rdi->n_qps_lock);
- ret = ENOMEM;
+ ret = -ENOMEM;
goto bail_ip;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_av.c b/drivers/infiniband/sw/rxe/rxe_av.c
index da2e867a1ed9..38c7b6fb39d7 100644
--- a/drivers/infiniband/sw/rxe/rxe_av.c
+++ b/drivers/infiniband/sw/rxe/rxe_av.c
@@ -101,11 +101,29 @@ void rxe_av_fill_ip_info(struct rxe_av *av, struct rdma_ah_attr *attr)
struct rxe_av *rxe_get_av(struct rxe_pkt_info *pkt)
{
+ struct rxe_ah *ah;
+ u32 ah_num;
+
if (!pkt || !pkt->qp)
return NULL;
if (qp_type(pkt->qp) == IB_QPT_RC || qp_type(pkt->qp) == IB_QPT_UC)
return &pkt->qp->pri_av;
- return (pkt->wqe) ? &pkt->wqe->av : NULL;
+ if (!pkt->wqe)
+ return NULL;
+
+ ah_num = pkt->wqe->wr.wr.ud.ah_num;
+ if (ah_num) {
+ /* only new user provider or kernel client */
+ ah = rxe_pool_get_index(&pkt->rxe->ah_pool, ah_num);
+ if (!ah || ah->ah_num != ah_num || rxe_ah_pd(ah) != pkt->qp->pd) {
+ pr_warn("Unable to find AH matching ah_num\n");
+ return NULL;
+ }
+ return &ah->av;
+ }
+
+ /* only old user provider for UD sends*/
+ return &pkt->wqe->wr.wr.ud.av;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
index d2d802c776fd..d771ba8449a1 100644
--- a/drivers/infiniband/sw/rxe/rxe_comp.c
+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
@@ -142,10 +142,7 @@ static inline enum comp_state get_wqe(struct rxe_qp *qp,
/* we come here whether or not we found a response packet to see if
* there are any posted WQEs
*/
- if (qp->is_user)
- wqe = queue_head(qp->sq.queue, QUEUE_TYPE_FROM_USER);
- else
- wqe = queue_head(qp->sq.queue, QUEUE_TYPE_KERNEL);
+ wqe = queue_head(qp->sq.queue, QUEUE_TYPE_FROM_CLIENT);
*wqe_p = wqe;
/* no WQE or requester has not started it yet */
@@ -383,30 +380,35 @@ static inline enum comp_state do_atomic(struct rxe_qp *qp,
static void make_send_cqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
struct rxe_cqe *cqe)
{
+ struct ib_wc *wc = &cqe->ibwc;
+ struct ib_uverbs_wc *uwc = &cqe->uibwc;
+
memset(cqe, 0, sizeof(*cqe));
if (!qp->is_user) {
- struct ib_wc *wc = &cqe->ibwc;
-
- wc->wr_id = wqe->wr.wr_id;
- wc->status = wqe->status;
- wc->opcode = wr_to_wc_opcode(wqe->wr.opcode);
- if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
- wqe->wr.opcode == IB_WR_SEND_WITH_IMM)
- wc->wc_flags = IB_WC_WITH_IMM;
- wc->byte_len = wqe->dma.length;
- wc->qp = &qp->ibqp;
+ wc->wr_id = wqe->wr.wr_id;
+ wc->status = wqe->status;
+ wc->qp = &qp->ibqp;
} else {
- struct ib_uverbs_wc *uwc = &cqe->uibwc;
-
- uwc->wr_id = wqe->wr.wr_id;
- uwc->status = wqe->status;
- uwc->opcode = wr_to_wc_opcode(wqe->wr.opcode);
- if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
- wqe->wr.opcode == IB_WR_SEND_WITH_IMM)
- uwc->wc_flags = IB_WC_WITH_IMM;
- uwc->byte_len = wqe->dma.length;
- uwc->qp_num = qp->ibqp.qp_num;
+ uwc->wr_id = wqe->wr.wr_id;
+ uwc->status = wqe->status;
+ uwc->qp_num = qp->ibqp.qp_num;
+ }
+
+ if (wqe->status == IB_WC_SUCCESS) {
+ if (!qp->is_user) {
+ wc->opcode = wr_to_wc_opcode(wqe->wr.opcode);
+ if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
+ wqe->wr.opcode == IB_WR_SEND_WITH_IMM)
+ wc->wc_flags = IB_WC_WITH_IMM;
+ wc->byte_len = wqe->dma.length;
+ } else {
+ uwc->opcode = wr_to_wc_opcode(wqe->wr.opcode);
+ if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM ||
+ wqe->wr.opcode == IB_WR_SEND_WITH_IMM)
+ uwc->wc_flags = IB_WC_WITH_IMM;
+ uwc->byte_len = wqe->dma.length;
+ }
}
}
@@ -432,10 +434,7 @@ static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
if (post)
make_send_cqe(qp, wqe, &cqe);
- if (qp->is_user)
- advance_consumer(qp->sq.queue, QUEUE_TYPE_FROM_USER);
- else
- advance_consumer(qp->sq.queue, QUEUE_TYPE_KERNEL);
+ queue_advance_consumer(qp->sq.queue, QUEUE_TYPE_FROM_CLIENT);
if (post)
rxe_cq_post(qp->scq, &cqe, 0);
@@ -539,7 +538,7 @@ static void rxe_drain_resp_pkts(struct rxe_qp *qp, bool notify)
wqe->status = IB_WC_WR_FLUSH_ERR;
do_complete(qp, wqe);
} else {
- advance_consumer(q, q->type);
+ queue_advance_consumer(q, q->type);
}
}
}
diff --git a/drivers/infiniband/sw/rxe/rxe_cq.c b/drivers/infiniband/sw/rxe/rxe_cq.c
index aef288f164fd..6848426c074f 100644
--- a/drivers/infiniband/sw/rxe/rxe_cq.c
+++ b/drivers/infiniband/sw/rxe/rxe_cq.c
@@ -25,11 +25,7 @@ int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq,
}
if (cq) {
- if (cq->is_user)
- count = queue_count(cq->queue, QUEUE_TYPE_TO_USER);
- else
- count = queue_count(cq->queue, QUEUE_TYPE_KERNEL);
-
+ count = queue_count(cq->queue, QUEUE_TYPE_TO_CLIENT);
if (cqe < count) {
pr_warn("cqe(%d) < current # elements in queue (%d)",
cqe, count);
@@ -65,7 +61,7 @@ int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
int err;
enum queue_type type;
- type = uresp ? QUEUE_TYPE_TO_USER : QUEUE_TYPE_KERNEL;
+ type = QUEUE_TYPE_TO_CLIENT;
cq->queue = rxe_queue_init(rxe, &cqe,
sizeof(struct rxe_cqe), type);
if (!cq->queue) {
@@ -81,8 +77,7 @@ int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
return err;
}
- if (uresp)
- cq->is_user = 1;
+ cq->is_user = uresp;
cq->is_dying = false;
@@ -117,11 +112,7 @@ int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
spin_lock_irqsave(&cq->cq_lock, flags);
- if (cq->is_user)
- full = queue_full(cq->queue, QUEUE_TYPE_TO_USER);
- else
- full = queue_full(cq->queue, QUEUE_TYPE_KERNEL);
-
+ full = queue_full(cq->queue, QUEUE_TYPE_TO_CLIENT);
if (unlikely(full)) {
spin_unlock_irqrestore(&cq->cq_lock, flags);
if (cq->ibcq.event_handler) {
@@ -134,17 +125,10 @@ int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited)
return -EBUSY;
}
- if (cq->is_user)
- addr = producer_addr(cq->queue, QUEUE_TYPE_TO_USER);
- else
- addr = producer_addr(cq->queue, QUEUE_TYPE_KERNEL);
-
+ addr = queue_producer_addr(cq->queue, QUEUE_TYPE_TO_CLIENT);
memcpy(addr, cqe, sizeof(*cqe));
- if (cq->is_user)
- advance_producer(cq->queue, QUEUE_TYPE_TO_USER);
- else
- advance_producer(cq->queue, QUEUE_TYPE_KERNEL);
+ queue_advance_producer(cq->queue, QUEUE_TYPE_TO_CLIENT);
spin_unlock_irqrestore(&cq->cq_lock, flags);
diff --git a/drivers/infiniband/sw/rxe/rxe_hw_counters.c b/drivers/infiniband/sw/rxe/rxe_hw_counters.c
index d5ceb706d964..a012522b577a 100644
--- a/drivers/infiniband/sw/rxe/rxe_hw_counters.c
+++ b/drivers/infiniband/sw/rxe/rxe_hw_counters.c
@@ -6,22 +6,22 @@
#include "rxe.h"
#include "rxe_hw_counters.h"
-static const char * const rxe_counter_name[] = {
- [RXE_CNT_SENT_PKTS] = "sent_pkts",
- [RXE_CNT_RCVD_PKTS] = "rcvd_pkts",
- [RXE_CNT_DUP_REQ] = "duplicate_request",
- [RXE_CNT_OUT_OF_SEQ_REQ] = "out_of_seq_request",
- [RXE_CNT_RCV_RNR] = "rcvd_rnr_err",
- [RXE_CNT_SND_RNR] = "send_rnr_err",
- [RXE_CNT_RCV_SEQ_ERR] = "rcvd_seq_err",
- [RXE_CNT_COMPLETER_SCHED] = "ack_deferred",
- [RXE_CNT_RETRY_EXCEEDED] = "retry_exceeded_err",
- [RXE_CNT_RNR_RETRY_EXCEEDED] = "retry_rnr_exceeded_err",
- [RXE_CNT_COMP_RETRY] = "completer_retry_err",
- [RXE_CNT_SEND_ERR] = "send_err",
- [RXE_CNT_LINK_DOWNED] = "link_downed",
- [RXE_CNT_RDMA_SEND] = "rdma_sends",
- [RXE_CNT_RDMA_RECV] = "rdma_recvs",
+static const struct rdma_stat_desc rxe_counter_descs[] = {
+ [RXE_CNT_SENT_PKTS].name = "sent_pkts",
+ [RXE_CNT_RCVD_PKTS].name = "rcvd_pkts",
+ [RXE_CNT_DUP_REQ].name = "duplicate_request",
+ [RXE_CNT_OUT_OF_SEQ_REQ].name = "out_of_seq_request",
+ [RXE_CNT_RCV_RNR].name = "rcvd_rnr_err",
+ [RXE_CNT_SND_RNR].name = "send_rnr_err",
+ [RXE_CNT_RCV_SEQ_ERR].name = "rcvd_seq_err",
+ [RXE_CNT_COMPLETER_SCHED].name = "ack_deferred",
+ [RXE_CNT_RETRY_EXCEEDED].name = "retry_exceeded_err",
+ [RXE_CNT_RNR_RETRY_EXCEEDED].name = "retry_rnr_exceeded_err",
+ [RXE_CNT_COMP_RETRY].name = "completer_retry_err",
+ [RXE_CNT_SEND_ERR].name = "send_err",
+ [RXE_CNT_LINK_DOWNED].name = "link_downed",
+ [RXE_CNT_RDMA_SEND].name = "rdma_sends",
+ [RXE_CNT_RDMA_RECV].name = "rdma_recvs",
};
int rxe_ib_get_hw_stats(struct ib_device *ibdev,
@@ -34,18 +34,18 @@ int rxe_ib_get_hw_stats(struct ib_device *ibdev,
if (!port || !stats)
return -EINVAL;
- for (cnt = 0; cnt < ARRAY_SIZE(rxe_counter_name); cnt++)
+ for (cnt = 0; cnt < ARRAY_SIZE(rxe_counter_descs); cnt++)
stats->value[cnt] = atomic64_read(&dev->stats_counters[cnt]);
- return ARRAY_SIZE(rxe_counter_name);
+ return ARRAY_SIZE(rxe_counter_descs);
}
struct rdma_hw_stats *rxe_ib_alloc_hw_port_stats(struct ib_device *ibdev,
u32 port_num)
{
- BUILD_BUG_ON(ARRAY_SIZE(rxe_counter_name) != RXE_NUM_OF_COUNTERS);
+ BUILD_BUG_ON(ARRAY_SIZE(rxe_counter_descs) != RXE_NUM_OF_COUNTERS);
- return rdma_alloc_hw_stats_struct(rxe_counter_name,
- ARRAY_SIZE(rxe_counter_name),
+ return rdma_alloc_hw_stats_struct(rxe_counter_descs,
+ ARRAY_SIZE(rxe_counter_descs),
RDMA_HW_STATS_DEFAULT_LIFESPAN);
}
diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h
index f0c954575bde..1ca43b859d80 100644
--- a/drivers/infiniband/sw/rxe/rxe_loc.h
+++ b/drivers/infiniband/sw/rxe/rxe_loc.h
@@ -86,6 +86,8 @@ struct rxe_mr *lookup_mr(struct rxe_pd *pd, int access, u32 key,
int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length);
int advance_dma_data(struct rxe_dma_info *dma, unsigned int length);
int rxe_invalidate_mr(struct rxe_qp *qp, u32 rkey);
+int rxe_reg_fast_mr(struct rxe_qp *qp, struct rxe_send_wqe *wqe);
+int rxe_mr_set_page(struct ib_mr *ibmr, u64 addr);
int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
void rxe_mr_cleanup(struct rxe_pool_entry *arg);
diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c
index 5890a8246216..53271df10e47 100644
--- a/drivers/infiniband/sw/rxe/rxe_mr.c
+++ b/drivers/infiniband/sw/rxe/rxe_mr.c
@@ -24,17 +24,22 @@ u8 rxe_get_next_key(u32 last_key)
int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length)
{
+ struct rxe_map_set *set = mr->cur_map_set;
+
switch (mr->type) {
- case RXE_MR_TYPE_DMA:
+ case IB_MR_TYPE_DMA:
return 0;
- case RXE_MR_TYPE_MR:
- if (iova < mr->iova || length > mr->length ||
- iova > mr->iova + mr->length - length)
+ case IB_MR_TYPE_USER:
+ case IB_MR_TYPE_MEM_REG:
+ if (iova < set->iova || length > set->length ||
+ iova > set->iova + set->length - length)
return -EFAULT;
return 0;
default:
+ pr_warn("%s: mr type (%d) not supported\n",
+ __func__, mr->type);
return -EFAULT;
}
}
@@ -48,48 +53,101 @@ static void rxe_mr_init(int access, struct rxe_mr *mr)
u32 lkey = mr->pelem.index << 8 | rxe_get_next_key(-1);
u32 rkey = (access & IB_ACCESS_REMOTE) ? lkey : 0;
- mr->ibmr.lkey = lkey;
- mr->ibmr.rkey = rkey;
+ /* set ibmr->l/rkey and also copy into private l/rkey
+ * for user MRs these will always be the same
+ * for cases where caller 'owns' the key portion
+ * they may be different until REG_MR WQE is executed.
+ */
+ mr->lkey = mr->ibmr.lkey = lkey;
+ mr->rkey = mr->ibmr.rkey = rkey;
+
mr->state = RXE_MR_STATE_INVALID;
- mr->type = RXE_MR_TYPE_NONE;
mr->map_shift = ilog2(RXE_BUF_PER_MAP);
}
-static int rxe_mr_alloc(struct rxe_mr *mr, int num_buf)
+static void rxe_mr_free_map_set(int num_map, struct rxe_map_set *set)
{
int i;
- int num_map;
- struct rxe_map **map = mr->map;
- num_map = (num_buf + RXE_BUF_PER_MAP - 1) / RXE_BUF_PER_MAP;
+ for (i = 0; i < num_map; i++)
+ kfree(set->map[i]);
- mr->map = kmalloc_array(num_map, sizeof(*map), GFP_KERNEL);
- if (!mr->map)
- goto err1;
+ kfree(set->map);
+ kfree(set);
+}
+
+static int rxe_mr_alloc_map_set(int num_map, struct rxe_map_set **setp)
+{
+ int i;
+ struct rxe_map_set *set;
+
+ set = kmalloc(sizeof(*set), GFP_KERNEL);
+ if (!set)
+ goto err_out;
+
+ set->map = kmalloc_array(num_map, sizeof(struct rxe_map *), GFP_KERNEL);
+ if (!set->map)
+ goto err_free_set;
for (i = 0; i < num_map; i++) {
- mr->map[i] = kmalloc(sizeof(**map), GFP_KERNEL);
- if (!mr->map[i])
- goto err2;
+ set->map[i] = kmalloc(sizeof(struct rxe_map), GFP_KERNEL);
+ if (!set->map[i])
+ goto err_free_map;
}
+ *setp = set;
+
+ return 0;
+
+err_free_map:
+ for (i--; i >= 0; i--)
+ kfree(set->map[i]);
+
+ kfree(set->map);
+err_free_set:
+ kfree(set);
+err_out:
+ return -ENOMEM;
+}
+
+/**
+ * rxe_mr_alloc() - Allocate memory map array(s) for MR
+ * @mr: Memory region
+ * @num_buf: Number of buffer descriptors to support
+ * @both: If non zero allocate both mr->map and mr->next_map
+ * else just allocate mr->map. Used for fast MRs
+ *
+ * Return: 0 on success else an error
+ */
+static int rxe_mr_alloc(struct rxe_mr *mr, int num_buf, int both)
+{
+ int ret;
+ int num_map;
+
BUILD_BUG_ON(!is_power_of_2(RXE_BUF_PER_MAP));
+ num_map = (num_buf + RXE_BUF_PER_MAP - 1) / RXE_BUF_PER_MAP;
mr->map_shift = ilog2(RXE_BUF_PER_MAP);
mr->map_mask = RXE_BUF_PER_MAP - 1;
-
mr->num_buf = num_buf;
- mr->num_map = num_map;
mr->max_buf = num_map * RXE_BUF_PER_MAP;
+ mr->num_map = num_map;
- return 0;
+ ret = rxe_mr_alloc_map_set(num_map, &mr->cur_map_set);
+ if (ret)
+ goto err_out;
-err2:
- for (i--; i >= 0; i--)
- kfree(mr->map[i]);
+ if (both) {
+ ret = rxe_mr_alloc_map_set(num_map, &mr->next_map_set);
+ if (ret) {
+ rxe_mr_free_map_set(mr->num_map, mr->cur_map_set);
+ goto err_out;
+ }
+ }
- kfree(mr->map);
-err1:
+ return 0;
+
+err_out:
return -ENOMEM;
}
@@ -100,12 +158,13 @@ void rxe_mr_init_dma(struct rxe_pd *pd, int access, struct rxe_mr *mr)
mr->ibmr.pd = &pd->ibpd;
mr->access = access;
mr->state = RXE_MR_STATE_VALID;
- mr->type = RXE_MR_TYPE_DMA;
+ mr->type = IB_MR_TYPE_DMA;
}
int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
int access, struct rxe_mr *mr)
{
+ struct rxe_map_set *set;
struct rxe_map **map;
struct rxe_phys_buf *buf = NULL;
struct ib_umem *umem;
@@ -113,7 +172,6 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
int num_buf;
void *vaddr;
int err;
- int i;
umem = ib_umem_get(pd->ibpd.device, start, length, access);
if (IS_ERR(umem)) {
@@ -127,18 +185,20 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
rxe_mr_init(access, mr);
- err = rxe_mr_alloc(mr, num_buf);
+ err = rxe_mr_alloc(mr, num_buf, 0);
if (err) {
pr_warn("%s: Unable to allocate memory for map\n",
__func__);
goto err_release_umem;
}
- mr->page_shift = PAGE_SHIFT;
- mr->page_mask = PAGE_SIZE - 1;
+ set = mr->cur_map_set;
+ set->page_shift = PAGE_SHIFT;
+ set->page_mask = PAGE_SIZE - 1;
+
+ num_buf = 0;
+ map = set->map;
- num_buf = 0;
- map = mr->map;
if (length > 0) {
buf = map[0]->buf;
@@ -161,26 +221,24 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova,
buf->size = PAGE_SIZE;
num_buf++;
buf++;
-
}
}
mr->ibmr.pd = &pd->ibpd;
mr->umem = umem;
mr->access = access;
- mr->length = length;
- mr->iova = iova;
- mr->va = start;
- mr->offset = ib_umem_offset(umem);
mr->state = RXE_MR_STATE_VALID;
- mr->type = RXE_MR_TYPE_MR;
+ mr->type = IB_MR_TYPE_USER;
+
+ set->length = length;
+ set->iova = iova;
+ set->va = start;
+ set->offset = ib_umem_offset(umem);
return 0;
err_cleanup_map:
- for (i = 0; i < mr->num_map; i++)
- kfree(mr->map[i]);
- kfree(mr->map);
+ rxe_mr_free_map_set(mr->num_map, mr->cur_map_set);
err_release_umem:
ib_umem_release(umem);
err_out:
@@ -191,19 +249,17 @@ int rxe_mr_init_fast(struct rxe_pd *pd, int max_pages, struct rxe_mr *mr)
{
int err;
- rxe_mr_init(0, mr);
+ /* always allow remote access for FMRs */
+ rxe_mr_init(IB_ACCESS_REMOTE, mr);
- /* In fastreg, we also set the rkey */
- mr->ibmr.rkey = mr->ibmr.lkey;
-
- err = rxe_mr_alloc(mr, max_pages);
+ err = rxe_mr_alloc(mr, max_pages, 1);
if (err)
goto err1;
mr->ibmr.pd = &pd->ibpd;
mr->max_buf = max_pages;
mr->state = RXE_MR_STATE_FREE;
- mr->type = RXE_MR_TYPE_MR;
+ mr->type = IB_MR_TYPE_MEM_REG;
return 0;
@@ -214,21 +270,24 @@ err1:
static void lookup_iova(struct rxe_mr *mr, u64 iova, int *m_out, int *n_out,
size_t *offset_out)
{
- size_t offset = iova - mr->iova + mr->offset;
+ struct rxe_map_set *set = mr->cur_map_set;
+ size_t offset = iova - set->iova + set->offset;
int map_index;
int buf_index;
u64 length;
+ struct rxe_map *map;
- if (likely(mr->page_shift)) {
- *offset_out = offset & mr->page_mask;
- offset >>= mr->page_shift;
+ if (likely(set->page_shift)) {
+ *offset_out = offset & set->page_mask;
+ offset >>= set->page_shift;
*n_out = offset & mr->map_mask;
*m_out = offset >> mr->map_shift;
} else {
map_index = 0;
buf_index = 0;
- length = mr->map[map_index]->buf[buf_index].size;
+ map = set->map[map_index];
+ length = map->buf[buf_index].size;
while (offset >= length) {
offset -= length;
@@ -238,7 +297,8 @@ static void lookup_iova(struct rxe_mr *mr, u64 iova, int *m_out, int *n_out,
map_index++;
buf_index = 0;
}
- length = mr->map[map_index]->buf[buf_index].size;
+ map = set->map[map_index];
+ length = map->buf[buf_index].size;
}
*m_out = map_index;
@@ -259,7 +319,7 @@ void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length)
goto out;
}
- if (!mr->map) {
+ if (!mr->cur_map_set) {
addr = (void *)(uintptr_t)iova;
goto out;
}
@@ -272,13 +332,13 @@ void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length)
lookup_iova(mr, iova, &m, &n, &offset);
- if (offset + length > mr->map[m]->buf[n].size) {
+ if (offset + length > mr->cur_map_set->map[m]->buf[n].size) {
pr_warn("crosses page boundary\n");
addr = NULL;
goto out;
}
- addr = (void *)(uintptr_t)mr->map[m]->buf[n].addr + offset;
+ addr = (void *)(uintptr_t)mr->cur_map_set->map[m]->buf[n].addr + offset;
out:
return addr;
@@ -302,7 +362,7 @@ int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
if (length == 0)
return 0;
- if (mr->type == RXE_MR_TYPE_DMA) {
+ if (mr->type == IB_MR_TYPE_DMA) {
u8 *src, *dest;
src = (dir == RXE_TO_MR_OBJ) ? addr : ((void *)(uintptr_t)iova);
@@ -314,7 +374,7 @@ int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
return 0;
}
- WARN_ON_ONCE(!mr->map);
+ WARN_ON_ONCE(!mr->cur_map_set);
err = mr_check_range(mr, iova, length);
if (err) {
@@ -324,7 +384,7 @@ int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length,
lookup_iova(mr, iova, &m, &i, &offset);
- map = mr->map + m;
+ map = mr->cur_map_set->map + m;
buf = map[0]->buf + i;
while (length > 0) {
@@ -507,8 +567,8 @@ struct rxe_mr *lookup_mr(struct rxe_pd *pd, int access, u32 key,
if (!mr)
return NULL;
- if (unlikely((type == RXE_LOOKUP_LOCAL && mr_lkey(mr) != key) ||
- (type == RXE_LOOKUP_REMOTE && mr_rkey(mr) != key) ||
+ if (unlikely((type == RXE_LOOKUP_LOCAL && mr->lkey != key) ||
+ (type == RXE_LOOKUP_REMOTE && mr->rkey != key) ||
mr_pd(mr) != pd || (access && !(access & mr->access)) ||
mr->state != RXE_MR_STATE_VALID)) {
rxe_drop_ref(mr);
@@ -531,9 +591,9 @@ int rxe_invalidate_mr(struct rxe_qp *qp, u32 rkey)
goto err;
}
- if (rkey != mr->ibmr.rkey) {
- pr_err("%s: rkey (%#x) doesn't match mr->ibmr.rkey (%#x)\n",
- __func__, rkey, mr->ibmr.rkey);
+ if (rkey != mr->rkey) {
+ pr_err("%s: rkey (%#x) doesn't match mr->rkey (%#x)\n",
+ __func__, rkey, mr->rkey);
ret = -EINVAL;
goto err_drop_ref;
}
@@ -545,6 +605,12 @@ int rxe_invalidate_mr(struct rxe_qp *qp, u32 rkey)
goto err_drop_ref;
}
+ if (unlikely(mr->type != IB_MR_TYPE_MEM_REG)) {
+ pr_warn("%s: mr->type (%d) is wrong type\n", __func__, mr->type);
+ ret = -EINVAL;
+ goto err_drop_ref;
+ }
+
mr->state = RXE_MR_STATE_FREE;
ret = 0;
@@ -554,6 +620,67 @@ err:
return ret;
}
+/* user can (re)register fast MR by executing a REG_MR WQE.
+ * user is expected to hold a reference on the ib mr until the
+ * WQE completes.
+ * Once a fast MR is created this is the only way to change the
+ * private keys. It is the responsibility of the user to maintain
+ * the ib mr keys in sync with rxe mr keys.
+ */
+int rxe_reg_fast_mr(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
+{
+ struct rxe_mr *mr = to_rmr(wqe->wr.wr.reg.mr);
+ u32 key = wqe->wr.wr.reg.key & 0xff;
+ u32 access = wqe->wr.wr.reg.access;
+ struct rxe_map_set *set;
+
+ /* user can only register MR in free state */
+ if (unlikely(mr->state != RXE_MR_STATE_FREE)) {
+ pr_warn("%s: mr->lkey = 0x%x not free\n",
+ __func__, mr->lkey);
+ return -EINVAL;
+ }
+
+ /* user can only register mr with qp in same protection domain */
+ if (unlikely(qp->ibqp.pd != mr->ibmr.pd)) {
+ pr_warn("%s: qp->pd and mr->pd don't match\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ mr->access = access;
+ mr->lkey = (mr->lkey & ~0xff) | key;
+ mr->rkey = (access & IB_ACCESS_REMOTE) ? mr->lkey : 0;
+ mr->state = RXE_MR_STATE_VALID;
+
+ set = mr->cur_map_set;
+ mr->cur_map_set = mr->next_map_set;
+ mr->cur_map_set->iova = wqe->wr.wr.reg.mr->iova;
+ mr->next_map_set = set;
+
+ return 0;
+}
+
+int rxe_mr_set_page(struct ib_mr *ibmr, u64 addr)
+{
+ struct rxe_mr *mr = to_rmr(ibmr);
+ struct rxe_map_set *set = mr->next_map_set;
+ struct rxe_map *map;
+ struct rxe_phys_buf *buf;
+
+ if (unlikely(set->nbuf == mr->num_buf))
+ return -ENOMEM;
+
+ map = set->map[set->nbuf / RXE_BUF_PER_MAP];
+ buf = &map->buf[set->nbuf % RXE_BUF_PER_MAP];
+
+ buf->addr = addr;
+ buf->size = ibmr->page_size;
+ set->nbuf++;
+
+ return 0;
+}
+
int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
{
struct rxe_mr *mr = to_rmr(ibmr);
@@ -564,7 +691,7 @@ int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
return -EINVAL;
}
- mr->state = RXE_MR_STATE_ZOMBIE;
+ mr->state = RXE_MR_STATE_INVALID;
rxe_drop_ref(mr_pd(mr));
rxe_drop_index(mr);
rxe_drop_ref(mr);
@@ -575,14 +702,12 @@ int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata)
void rxe_mr_cleanup(struct rxe_pool_entry *arg)
{
struct rxe_mr *mr = container_of(arg, typeof(*mr), pelem);
- int i;
ib_umem_release(mr->umem);
- if (mr->map) {
- for (i = 0; i < mr->num_map; i++)
- kfree(mr->map[i]);
+ if (mr->cur_map_set)
+ rxe_mr_free_map_set(mr->num_map, mr->cur_map_set);
- kfree(mr->map);
- }
+ if (mr->next_map_set)
+ rxe_mr_free_map_set(mr->num_map, mr->next_map_set);
}
diff --git a/drivers/infiniband/sw/rxe/rxe_mw.c b/drivers/infiniband/sw/rxe/rxe_mw.c
index 5ba77df7598e..9534a7fe1a98 100644
--- a/drivers/infiniband/sw/rxe/rxe_mw.c
+++ b/drivers/infiniband/sw/rxe/rxe_mw.c
@@ -21,7 +21,7 @@ int rxe_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata)
}
rxe_add_index(mw);
- ibmw->rkey = (mw->pelem.index << 8) | rxe_get_next_key(-1);
+ mw->rkey = ibmw->rkey = (mw->pelem.index << 8) | rxe_get_next_key(-1);
mw->state = (mw->ibmw.type == IB_MW_TYPE_2) ?
RXE_MW_STATE_FREE : RXE_MW_STATE_VALID;
spin_lock_init(&mw->lock);
@@ -71,6 +71,8 @@ int rxe_dealloc_mw(struct ib_mw *ibmw)
static int rxe_check_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
struct rxe_mw *mw, struct rxe_mr *mr)
{
+ u32 key = wqe->wr.wr.mw.rkey & 0xff;
+
if (mw->ibmw.type == IB_MW_TYPE_1) {
if (unlikely(mw->state != RXE_MW_STATE_VALID)) {
pr_err_once(
@@ -108,7 +110,7 @@ static int rxe_check_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
}
}
- if (unlikely((wqe->wr.wr.mw.rkey & 0xff) == (mw->ibmw.rkey & 0xff))) {
+ if (unlikely(key == (mw->rkey & 0xff))) {
pr_err_once("attempt to bind MW with same key\n");
return -EINVAL;
}
@@ -140,15 +142,15 @@ static int rxe_check_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
/* C10-75 */
if (mw->access & IB_ZERO_BASED) {
- if (unlikely(wqe->wr.wr.mw.length > mr->length)) {
+ if (unlikely(wqe->wr.wr.mw.length > mr->cur_map_set->length)) {
pr_err_once(
"attempt to bind a ZB MW outside of the MR\n");
return -EINVAL;
}
} else {
- if (unlikely((wqe->wr.wr.mw.addr < mr->iova) ||
+ if (unlikely((wqe->wr.wr.mw.addr < mr->cur_map_set->iova) ||
((wqe->wr.wr.mw.addr + wqe->wr.wr.mw.length) >
- (mr->iova + mr->length)))) {
+ (mr->cur_map_set->iova + mr->cur_map_set->length)))) {
pr_err_once(
"attempt to bind a VA MW outside of the MR\n");
return -EINVAL;
@@ -161,13 +163,9 @@ static int rxe_check_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
static void rxe_do_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
struct rxe_mw *mw, struct rxe_mr *mr)
{
- u32 rkey;
- u32 new_rkey;
-
- rkey = mw->ibmw.rkey;
- new_rkey = (rkey & 0xffffff00) | (wqe->wr.wr.mw.rkey & 0x000000ff);
+ u32 key = wqe->wr.wr.mw.rkey & 0xff;
- mw->ibmw.rkey = new_rkey;
+ mw->rkey = (mw->rkey & ~0xff) | key;
mw->access = wqe->wr.wr.mw.access;
mw->state = RXE_MW_STATE_VALID;
mw->addr = wqe->wr.wr.mw.addr;
@@ -197,29 +195,29 @@ int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
struct rxe_mw *mw;
struct rxe_mr *mr;
struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
+ u32 mw_rkey = wqe->wr.wr.mw.mw_rkey;
+ u32 mr_lkey = wqe->wr.wr.mw.mr_lkey;
unsigned long flags;
- mw = rxe_pool_get_index(&rxe->mw_pool,
- wqe->wr.wr.mw.mw_rkey >> 8);
+ mw = rxe_pool_get_index(&rxe->mw_pool, mw_rkey >> 8);
if (unlikely(!mw)) {
ret = -EINVAL;
goto err;
}
- if (unlikely(mw->ibmw.rkey != wqe->wr.wr.mw.mw_rkey)) {
+ if (unlikely(mw->rkey != mw_rkey)) {
ret = -EINVAL;
goto err_drop_mw;
}
if (likely(wqe->wr.wr.mw.length)) {
- mr = rxe_pool_get_index(&rxe->mr_pool,
- wqe->wr.wr.mw.mr_lkey >> 8);
+ mr = rxe_pool_get_index(&rxe->mr_pool, mr_lkey >> 8);
if (unlikely(!mr)) {
ret = -EINVAL;
goto err_drop_mw;
}
- if (unlikely(mr->ibmr.lkey != wqe->wr.wr.mw.mr_lkey)) {
+ if (unlikely(mr->lkey != mr_lkey)) {
ret = -EINVAL;
goto err_drop_mr;
}
@@ -292,7 +290,7 @@ int rxe_invalidate_mw(struct rxe_qp *qp, u32 rkey)
goto err;
}
- if (rkey != mw->ibmw.rkey) {
+ if (rkey != mw->rkey) {
ret = -EINVAL;
goto err_drop_ref;
}
@@ -323,7 +321,7 @@ struct rxe_mw *rxe_lookup_mw(struct rxe_qp *qp, int access, u32 rkey)
if (!mw)
return NULL;
- if (unlikely((rxe_mw_rkey(mw) != rkey) || rxe_mw_pd(mw) != pd ||
+ if (unlikely((mw->rkey != rkey) || rxe_mw_pd(mw) != pd ||
(mw->ibmw.type == IB_MW_TYPE_2 && mw->qp != qp) ||
(mw->length == 0) ||
(access && !(access & mw->access)) ||
diff --git a/drivers/infiniband/sw/rxe/rxe_opcode.h b/drivers/infiniband/sw/rxe/rxe_opcode.h
index e02f039b8c44..8f9aaaf260f2 100644
--- a/drivers/infiniband/sw/rxe/rxe_opcode.h
+++ b/drivers/infiniband/sw/rxe/rxe_opcode.h
@@ -22,7 +22,6 @@ enum rxe_wr_mask {
WR_LOCAL_OP_MASK = BIT(5),
WR_READ_OR_WRITE_MASK = WR_READ_MASK | WR_WRITE_MASK,
- WR_READ_WRITE_OR_SEND_MASK = WR_READ_OR_WRITE_MASK | WR_SEND_MASK,
WR_WRITE_OR_SEND_MASK = WR_WRITE_MASK | WR_SEND_MASK,
WR_ATOMIC_OR_READ_MASK = WR_ATOMIC_MASK | WR_READ_MASK,
};
@@ -82,8 +81,9 @@ enum rxe_hdr_mask {
RXE_LOOPBACK_MASK = BIT(NUM_HDR_TYPES + 12),
- RXE_READ_OR_ATOMIC = (RXE_READ_MASK | RXE_ATOMIC_MASK),
- RXE_WRITE_OR_SEND = (RXE_WRITE_MASK | RXE_SEND_MASK),
+ RXE_READ_OR_ATOMIC_MASK = (RXE_READ_MASK | RXE_ATOMIC_MASK),
+ RXE_WRITE_OR_SEND_MASK = (RXE_WRITE_MASK | RXE_SEND_MASK),
+ RXE_READ_OR_WRITE_MASK = (RXE_READ_MASK | RXE_WRITE_MASK),
};
#define OPCODE_NONE (-1)
diff --git a/drivers/infiniband/sw/rxe/rxe_param.h b/drivers/infiniband/sw/rxe/rxe_param.h
index 742e6ec93686..918270e34a35 100644
--- a/drivers/infiniband/sw/rxe/rxe_param.h
+++ b/drivers/infiniband/sw/rxe/rxe_param.h
@@ -9,6 +9,8 @@
#include <uapi/rdma/rdma_user_rxe.h>
+#define DEFAULT_MAX_VALUE (1 << 20)
+
static inline enum ib_mtu rxe_mtu_int_to_enum(int mtu)
{
if (mtu < 256)
@@ -37,7 +39,7 @@ static inline enum ib_mtu eth_mtu_int_to_enum(int mtu)
enum rxe_device_param {
RXE_MAX_MR_SIZE = -1ull,
RXE_PAGE_SIZE_CAP = 0xfffff000,
- RXE_MAX_QP_WR = 0x4000,
+ RXE_MAX_QP_WR = DEFAULT_MAX_VALUE,
RXE_DEVICE_CAP_FLAGS = IB_DEVICE_BAD_PKEY_CNTR
| IB_DEVICE_BAD_QKEY_CNTR
| IB_DEVICE_AUTO_PATH_MIG
@@ -58,42 +60,44 @@ enum rxe_device_param {
RXE_MAX_INLINE_DATA = RXE_MAX_WQE_SIZE -
sizeof(struct rxe_send_wqe),
RXE_MAX_SGE_RD = 32,
- RXE_MAX_CQ = 16384,
+ RXE_MAX_CQ = DEFAULT_MAX_VALUE,
RXE_MAX_LOG_CQE = 15,
- RXE_MAX_PD = 0x7ffc,
+ RXE_MAX_PD = DEFAULT_MAX_VALUE,
RXE_MAX_QP_RD_ATOM = 128,
RXE_MAX_RES_RD_ATOM = 0x3f000,
RXE_MAX_QP_INIT_RD_ATOM = 128,
RXE_MAX_MCAST_GRP = 8192,
RXE_MAX_MCAST_QP_ATTACH = 56,
RXE_MAX_TOT_MCAST_QP_ATTACH = 0x70000,
- RXE_MAX_AH = 100,
- RXE_MAX_SRQ_WR = 0x4000,
+ RXE_MAX_AH = (1<<15) - 1, /* 32Ki - 1 */
+ RXE_MIN_AH_INDEX = 1,
+ RXE_MAX_AH_INDEX = RXE_MAX_AH,
+ RXE_MAX_SRQ_WR = DEFAULT_MAX_VALUE,
RXE_MIN_SRQ_WR = 1,
RXE_MAX_SRQ_SGE = 27,
RXE_MIN_SRQ_SGE = 1,
RXE_MAX_FMR_PAGE_LIST_LEN = 512,
- RXE_MAX_PKEYS = 1,
+ RXE_MAX_PKEYS = 64,
RXE_LOCAL_CA_ACK_DELAY = 15,
- RXE_MAX_UCONTEXT = 512,
+ RXE_MAX_UCONTEXT = DEFAULT_MAX_VALUE,
RXE_NUM_PORT = 1,
- RXE_MAX_QP = 0x10000,
RXE_MIN_QP_INDEX = 16,
- RXE_MAX_QP_INDEX = 0x00020000,
+ RXE_MAX_QP_INDEX = DEFAULT_MAX_VALUE,
+ RXE_MAX_QP = DEFAULT_MAX_VALUE - RXE_MIN_QP_INDEX,
- RXE_MAX_SRQ = 0x00001000,
RXE_MIN_SRQ_INDEX = 0x00020001,
- RXE_MAX_SRQ_INDEX = 0x00040000,
+ RXE_MAX_SRQ_INDEX = DEFAULT_MAX_VALUE,
+ RXE_MAX_SRQ = DEFAULT_MAX_VALUE - RXE_MIN_SRQ_INDEX,
- RXE_MAX_MR = 0x00001000,
- RXE_MAX_MW = 0x00001000,
RXE_MIN_MR_INDEX = 0x00000001,
- RXE_MAX_MR_INDEX = 0x00010000,
+ RXE_MAX_MR_INDEX = DEFAULT_MAX_VALUE,
+ RXE_MAX_MR = DEFAULT_MAX_VALUE - RXE_MIN_MR_INDEX,
RXE_MIN_MW_INDEX = 0x00010001,
RXE_MAX_MW_INDEX = 0x00020000,
+ RXE_MAX_MW = 0x00001000,
RXE_MAX_PKT_PER_ACK = 64,
@@ -113,7 +117,7 @@ enum rxe_device_param {
/* default/initial rxe port parameters */
enum rxe_port_param {
RXE_PORT_GID_TBL_LEN = 1024,
- RXE_PORT_PORT_CAP_FLAGS = RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP,
+ RXE_PORT_PORT_CAP_FLAGS = IB_PORT_CM_SUP,
RXE_PORT_MAX_MSG_SZ = 0x800000,
RXE_PORT_BAD_PKEY_CNTR = 0,
RXE_PORT_QKEY_VIOL_CNTR = 0,
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c
index ffa8420b4765..2e80bb6aa957 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.c
+++ b/drivers/infiniband/sw/rxe/rxe_pool.c
@@ -7,9 +7,17 @@
#include "rxe.h"
#include "rxe_loc.h"
-/* info about object pools
- */
-struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = {
+static const struct rxe_type_info {
+ const char *name;
+ size_t size;
+ size_t elem_offset;
+ void (*cleanup)(struct rxe_pool_entry *obj);
+ enum rxe_pool_flags flags;
+ u32 min_index;
+ u32 max_index;
+ size_t key_offset;
+ size_t key_size;
+} rxe_type_info[RXE_NUM_TYPES] = {
[RXE_TYPE_UC] = {
.name = "rxe-uc",
.size = sizeof(struct rxe_ucontext),
@@ -26,7 +34,9 @@ struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = {
.name = "rxe-ah",
.size = sizeof(struct rxe_ah),
.elem_offset = offsetof(struct rxe_ah, pelem),
- .flags = RXE_POOL_NO_ALLOC,
+ .flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC,
+ .min_index = RXE_MIN_AH_INDEX,
+ .max_index = RXE_MAX_AH_INDEX,
},
[RXE_TYPE_SRQ] = {
.name = "rxe-srq",
@@ -58,8 +68,8 @@ struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = {
.elem_offset = offsetof(struct rxe_mr, pelem),
.cleanup = rxe_mr_cleanup,
.flags = RXE_POOL_INDEX,
- .max_index = RXE_MAX_MR_INDEX,
.min_index = RXE_MIN_MR_INDEX,
+ .max_index = RXE_MAX_MR_INDEX,
},
[RXE_TYPE_MW] = {
.name = "rxe-mw",
@@ -67,8 +77,8 @@ struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = {
.elem_offset = offsetof(struct rxe_mw, pelem),
.cleanup = rxe_mw_cleanup,
.flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC,
- .max_index = RXE_MAX_MW_INDEX,
.min_index = RXE_MIN_MW_INDEX,
+ .max_index = RXE_MAX_MW_INDEX,
},
[RXE_TYPE_MC_GRP] = {
.name = "rxe-mc_grp",
@@ -94,7 +104,6 @@ static inline const char *pool_name(struct rxe_pool *pool)
static int rxe_pool_init_index(struct rxe_pool *pool, u32 max, u32 min)
{
int err = 0;
- size_t size;
if ((max - min + 1) < pool->max_elem) {
pr_warn("not enough indices for max_elem\n");
@@ -105,16 +114,12 @@ static int rxe_pool_init_index(struct rxe_pool *pool, u32 max, u32 min)
pool->index.max_index = max;
pool->index.min_index = min;
- size = BITS_TO_LONGS(max - min + 1) * sizeof(long);
- pool->index.table = kmalloc(size, GFP_KERNEL);
+ pool->index.table = bitmap_zalloc(max - min + 1, GFP_KERNEL);
if (!pool->index.table) {
err = -ENOMEM;
goto out;
}
- pool->index.table_size = size;
- bitmap_zero(pool->index.table, max - min + 1);
-
out:
return err;
}
@@ -166,7 +171,7 @@ void rxe_pool_cleanup(struct rxe_pool *pool)
pr_warn("%s pool destroyed with unfree'd elem\n",
pool_name(pool));
- kfree(pool->index.table);
+ bitmap_free(pool->index.table);
}
static u32 alloc_index(struct rxe_pool *pool)
@@ -327,7 +332,7 @@ void __rxe_drop_index(struct rxe_pool_entry *elem)
void *rxe_alloc_locked(struct rxe_pool *pool)
{
- struct rxe_type_info *info = &rxe_type_info[pool->type];
+ const struct rxe_type_info *info = &rxe_type_info[pool->type];
struct rxe_pool_entry *elem;
u8 *obj;
@@ -352,7 +357,7 @@ out_cnt:
void *rxe_alloc(struct rxe_pool *pool)
{
- struct rxe_type_info *info = &rxe_type_info[pool->type];
+ const struct rxe_type_info *info = &rxe_type_info[pool->type];
struct rxe_pool_entry *elem;
u8 *obj;
@@ -395,7 +400,7 @@ void rxe_elem_release(struct kref *kref)
struct rxe_pool_entry *elem =
container_of(kref, struct rxe_pool_entry, ref_cnt);
struct rxe_pool *pool = elem->pool;
- struct rxe_type_info *info = &rxe_type_info[pool->type];
+ const struct rxe_type_info *info = &rxe_type_info[pool->type];
u8 *obj;
if (pool->cleanup)
@@ -411,7 +416,7 @@ void rxe_elem_release(struct kref *kref)
void *rxe_pool_get_index_locked(struct rxe_pool *pool, u32 index)
{
- struct rxe_type_info *info = &rxe_type_info[pool->type];
+ const struct rxe_type_info *info = &rxe_type_info[pool->type];
struct rb_node *node;
struct rxe_pool_entry *elem;
u8 *obj;
@@ -453,7 +458,7 @@ void *rxe_pool_get_index(struct rxe_pool *pool, u32 index)
void *rxe_pool_get_key_locked(struct rxe_pool *pool, void *key)
{
- struct rxe_type_info *info = &rxe_type_info[pool->type];
+ const struct rxe_type_info *info = &rxe_type_info[pool->type];
struct rb_node *node;
struct rxe_pool_entry *elem;
u8 *obj;
diff --git a/drivers/infiniband/sw/rxe/rxe_pool.h b/drivers/infiniband/sw/rxe/rxe_pool.h
index 1feca1bffced..8ecd9f870aea 100644
--- a/drivers/infiniband/sw/rxe/rxe_pool.h
+++ b/drivers/infiniband/sw/rxe/rxe_pool.h
@@ -32,20 +32,6 @@ enum rxe_elem_type {
struct rxe_pool_entry;
-struct rxe_type_info {
- const char *name;
- size_t size;
- size_t elem_offset;
- void (*cleanup)(struct rxe_pool_entry *obj);
- enum rxe_pool_flags flags;
- u32 max_index;
- u32 min_index;
- size_t key_offset;
- size_t key_size;
-};
-
-extern struct rxe_type_info rxe_type_info[];
-
struct rxe_pool_entry {
struct rxe_pool *pool;
struct kref ref_cnt;
@@ -74,7 +60,6 @@ struct rxe_pool {
struct {
struct rb_root tree;
unsigned long *table;
- size_t table_size;
u32 last;
u32 max_index;
u32 min_index;
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index 1ab6af7ddb25..975321812c87 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -190,8 +190,6 @@ static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp,
INIT_LIST_HEAD(&qp->grp_list);
- skb_queue_head_init(&qp->send_pkts);
-
spin_lock_init(&qp->grp_lock);
spin_lock_init(&qp->state_lock);
@@ -231,7 +229,7 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
qp->sq.max_inline = init->cap.max_inline_data = wqe_size;
wqe_size += sizeof(struct rxe_send_wqe);
- type = uresp ? QUEUE_TYPE_FROM_USER : QUEUE_TYPE_KERNEL;
+ type = QUEUE_TYPE_FROM_CLIENT;
qp->sq.queue = rxe_queue_init(rxe, &qp->sq.max_wr,
wqe_size, type);
if (!qp->sq.queue)
@@ -248,12 +246,8 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp,
return err;
}
- if (qp->is_user)
- qp->req.wqe_index = producer_index(qp->sq.queue,
- QUEUE_TYPE_FROM_USER);
- else
- qp->req.wqe_index = producer_index(qp->sq.queue,
- QUEUE_TYPE_KERNEL);
+ qp->req.wqe_index = queue_get_producer(qp->sq.queue,
+ QUEUE_TYPE_FROM_CLIENT);
qp->req.state = QP_STATE_RESET;
qp->req.opcode = -1;
@@ -293,7 +287,7 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
pr_debug("qp#%d max_wr = %d, max_sge = %d, wqe_size = %d\n",
qp_num(qp), qp->rq.max_wr, qp->rq.max_sge, wqe_size);
- type = uresp ? QUEUE_TYPE_FROM_USER : QUEUE_TYPE_KERNEL;
+ type = QUEUE_TYPE_FROM_CLIENT;
qp->rq.queue = rxe_queue_init(rxe, &qp->rq.max_wr,
wqe_size, type);
if (!qp->rq.queue)
@@ -313,8 +307,6 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp,
spin_lock_init(&qp->rq.producer_lock);
spin_lock_init(&qp->rq.consumer_lock);
- qp->rq.is_user = qp->is_user;
-
skb_queue_head_init(&qp->resp_pkts);
rxe_init_task(rxe, &qp->resp.task, qp,
diff --git a/drivers/infiniband/sw/rxe/rxe_queue.c b/drivers/infiniband/sw/rxe/rxe_queue.c
index 72d95398e604..6e6e023c1b45 100644
--- a/drivers/infiniband/sw/rxe/rxe_queue.c
+++ b/drivers/infiniband/sw/rxe/rxe_queue.c
@@ -111,17 +111,33 @@ err1:
static int resize_finish(struct rxe_queue *q, struct rxe_queue *new_q,
unsigned int num_elem)
{
- if (!queue_empty(q, q->type) && (num_elem < queue_count(q, q->type)))
+ enum queue_type type = q->type;
+ u32 prod;
+ u32 cons;
+
+ if (!queue_empty(q, q->type) && (num_elem < queue_count(q, type)))
return -EINVAL;
- while (!queue_empty(q, q->type)) {
- memcpy(producer_addr(new_q, new_q->type),
- consumer_addr(q, q->type),
- new_q->elem_size);
- advance_producer(new_q, new_q->type);
- advance_consumer(q, q->type);
+ prod = queue_get_producer(new_q, type);
+ cons = queue_get_consumer(q, type);
+
+ while (!queue_empty(q, type)) {
+ memcpy(queue_addr_from_index(new_q, prod),
+ queue_addr_from_index(q, cons), new_q->elem_size);
+ prod = queue_next_index(new_q, prod);
+ cons = queue_next_index(q, cons);
}
+ new_q->buf->producer_index = prod;
+ q->buf->consumer_index = cons;
+
+ /* update private index copies */
+ if (type == QUEUE_TYPE_TO_CLIENT)
+ new_q->index = new_q->buf->producer_index;
+ else
+ q->index = q->buf->consumer_index;
+
+ /* exchange rxe_queue headers */
swap(*q, *new_q);
return 0;
diff --git a/drivers/infiniband/sw/rxe/rxe_queue.h b/drivers/infiniband/sw/rxe/rxe_queue.h
index 2702b0e55fc3..6227112ef7a2 100644
--- a/drivers/infiniband/sw/rxe/rxe_queue.h
+++ b/drivers/infiniband/sw/rxe/rxe_queue.h
@@ -10,34 +10,47 @@
/* for definition of shared struct rxe_queue_buf */
#include <uapi/rdma/rdma_user_rxe.h>
-/* implements a simple circular buffer that can optionally be
- * shared between user space and the kernel and can be resized
- * the requested element size is rounded up to a power of 2
- * and the number of elements in the buffer is also rounded
- * up to a power of 2. Since the queue is empty when the
- * producer and consumer indices match the maximum capacity
- * of the queue is one less than the number of element slots
+/* Implements a simple circular buffer that is shared between user
+ * and the driver and can be resized. The requested element size is
+ * rounded up to a power of 2 and the number of elements in the buffer
+ * is also rounded up to a power of 2. Since the queue is empty when
+ * the producer and consumer indices match the maximum capacity of the
+ * queue is one less than the number of element slots.
*
* Notes:
- * - Kernel space indices are always masked off to q->index_mask
- * before storing so do not need to be checked on reads.
- * - User space indices may be out of range and must be
- * masked before use when read.
- * - The kernel indices for shared queues must not be written
- * by user space so a local copy is used and a shared copy is
- * stored when the local copy changes.
+ * - The driver indices are always masked off to q->index_mask
+ * before storing so do not need to be checked on reads.
+ * - The user whether user space or kernel is generally
+ * not trusted so its parameters are masked to make sure
+ * they do not access the queue out of bounds on reads.
+ * - The driver indices for queues must not be written
+ * by user so a local copy is used and a shared copy is
+ * stored when the local copy is changed.
* - By passing the type in the parameter list separate from q
- * the compiler can eliminate the switch statement when the
- * actual queue type is known when the function is called.
- * In the performance path this is done. In less critical
- * paths just q->type is passed.
+ * the compiler can eliminate the switch statement when the
+ * actual queue type is known when the function is called at
+ * compile time.
+ * - These queues are lock free. The user and driver must protect
+ * changes to their end of the queues with locks if more than one
+ * CPU can be accessing it at the same time.
*/
-/* type of queue */
+/**
+ * enum queue_type - type of queue
+ * @QUEUE_TYPE_TO_CLIENT: Queue is written by rxe driver and
+ * read by client. Used by rxe driver only.
+ * @QUEUE_TYPE_FROM_CLIENT: Queue is written by client and
+ * read by rxe driver. Used by rxe driver only.
+ * @QUEUE_TYPE_TO_DRIVER: Queue is written by client and
+ * read by rxe driver. Used by kernel client only.
+ * @QUEUE_TYPE_FROM_DRIVER: Queue is written by rxe driver and
+ * read by client. Used by kernel client only.
+ */
enum queue_type {
- QUEUE_TYPE_KERNEL,
- QUEUE_TYPE_TO_USER,
- QUEUE_TYPE_FROM_USER,
+ QUEUE_TYPE_TO_CLIENT,
+ QUEUE_TYPE_FROM_CLIENT,
+ QUEUE_TYPE_TO_DRIVER,
+ QUEUE_TYPE_FROM_DRIVER,
};
struct rxe_queue {
@@ -69,238 +82,171 @@ struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe, int *num_elem,
int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p,
unsigned int elem_size, struct ib_udata *udata,
struct mminfo __user *outbuf,
- /* Protect producers while resizing queue */
- spinlock_t *producer_lock,
- /* Protect consumers while resizing queue */
- spinlock_t *consumer_lock);
+ spinlock_t *producer_lock, spinlock_t *consumer_lock);
void rxe_queue_cleanup(struct rxe_queue *queue);
-static inline int next_index(struct rxe_queue *q, int index)
+static inline u32 queue_next_index(struct rxe_queue *q, int index)
{
- return (index + 1) & q->buf->index_mask;
+ return (index + 1) & q->index_mask;
}
-static inline int queue_empty(struct rxe_queue *q, enum queue_type type)
+static inline u32 queue_get_producer(const struct rxe_queue *q,
+ enum queue_type type)
{
u32 prod;
- u32 cons;
switch (type) {
- case QUEUE_TYPE_FROM_USER:
- /* protect user space index */
+ case QUEUE_TYPE_FROM_CLIENT:
+ /* protect user index */
prod = smp_load_acquire(&q->buf->producer_index);
- cons = q->index;
break;
- case QUEUE_TYPE_TO_USER:
+ case QUEUE_TYPE_TO_CLIENT:
prod = q->index;
- /* protect user space index */
- cons = smp_load_acquire(&q->buf->consumer_index);
break;
- case QUEUE_TYPE_KERNEL:
+ case QUEUE_TYPE_FROM_DRIVER:
+ /* protect driver index */
+ prod = smp_load_acquire(&q->buf->producer_index);
+ break;
+ case QUEUE_TYPE_TO_DRIVER:
prod = q->buf->producer_index;
- cons = q->buf->consumer_index;
break;
}
- return ((prod - cons) & q->index_mask) == 0;
+ return prod;
}
-static inline int queue_full(struct rxe_queue *q, enum queue_type type)
+static inline u32 queue_get_consumer(const struct rxe_queue *q,
+ enum queue_type type)
{
- u32 prod;
u32 cons;
switch (type) {
- case QUEUE_TYPE_FROM_USER:
- /* protect user space index */
- prod = smp_load_acquire(&q->buf->producer_index);
+ case QUEUE_TYPE_FROM_CLIENT:
cons = q->index;
break;
- case QUEUE_TYPE_TO_USER:
- prod = q->index;
- /* protect user space index */
+ case QUEUE_TYPE_TO_CLIENT:
+ /* protect user index */
cons = smp_load_acquire(&q->buf->consumer_index);
break;
- case QUEUE_TYPE_KERNEL:
- prod = q->buf->producer_index;
+ case QUEUE_TYPE_FROM_DRIVER:
cons = q->buf->consumer_index;
break;
+ case QUEUE_TYPE_TO_DRIVER:
+ /* protect driver index */
+ cons = smp_load_acquire(&q->buf->consumer_index);
+ break;
}
- return ((prod + 1 - cons) & q->index_mask) == 0;
+ return cons;
}
-static inline unsigned int queue_count(const struct rxe_queue *q,
- enum queue_type type)
+static inline int queue_empty(struct rxe_queue *q, enum queue_type type)
{
- u32 prod;
- u32 cons;
-
- switch (type) {
- case QUEUE_TYPE_FROM_USER:
- /* protect user space index */
- prod = smp_load_acquire(&q->buf->producer_index);
- cons = q->index;
- break;
- case QUEUE_TYPE_TO_USER:
- prod = q->index;
- /* protect user space index */
- cons = smp_load_acquire(&q->buf->consumer_index);
- break;
- case QUEUE_TYPE_KERNEL:
- prod = q->buf->producer_index;
- cons = q->buf->consumer_index;
- break;
- }
+ u32 prod = queue_get_producer(q, type);
+ u32 cons = queue_get_consumer(q, type);
- return (prod - cons) & q->index_mask;
+ return ((prod - cons) & q->index_mask) == 0;
}
-static inline void advance_producer(struct rxe_queue *q, enum queue_type type)
+static inline int queue_full(struct rxe_queue *q, enum queue_type type)
{
- u32 prod;
+ u32 prod = queue_get_producer(q, type);
+ u32 cons = queue_get_consumer(q, type);
- switch (type) {
- case QUEUE_TYPE_FROM_USER:
- pr_warn_once("Normally kernel should not write user space index\n");
- /* protect user space index */
- prod = smp_load_acquire(&q->buf->producer_index);
- prod = (prod + 1) & q->index_mask;
- /* same */
- smp_store_release(&q->buf->producer_index, prod);
- break;
- case QUEUE_TYPE_TO_USER:
- prod = q->index;
- q->index = (prod + 1) & q->index_mask;
- q->buf->producer_index = q->index;
- break;
- case QUEUE_TYPE_KERNEL:
- prod = q->buf->producer_index;
- q->buf->producer_index = (prod + 1) & q->index_mask;
- break;
- }
+ return ((prod + 1 - cons) & q->index_mask) == 0;
}
-static inline void advance_consumer(struct rxe_queue *q, enum queue_type type)
+static inline u32 queue_count(const struct rxe_queue *q,
+ enum queue_type type)
{
- u32 cons;
+ u32 prod = queue_get_producer(q, type);
+ u32 cons = queue_get_consumer(q, type);
- switch (type) {
- case QUEUE_TYPE_FROM_USER:
- cons = q->index;
- q->index = (cons + 1) & q->index_mask;
- q->buf->consumer_index = q->index;
- break;
- case QUEUE_TYPE_TO_USER:
- pr_warn_once("Normally kernel should not write user space index\n");
- /* protect user space index */
- cons = smp_load_acquire(&q->buf->consumer_index);
- cons = (cons + 1) & q->index_mask;
- /* same */
- smp_store_release(&q->buf->consumer_index, cons);
- break;
- case QUEUE_TYPE_KERNEL:
- cons = q->buf->consumer_index;
- q->buf->consumer_index = (cons + 1) & q->index_mask;
- break;
- }
+ return (prod - cons) & q->index_mask;
}
-static inline void *producer_addr(struct rxe_queue *q, enum queue_type type)
+static inline void queue_advance_producer(struct rxe_queue *q,
+ enum queue_type type)
{
u32 prod;
switch (type) {
- case QUEUE_TYPE_FROM_USER:
- /* protect user space index */
- prod = smp_load_acquire(&q->buf->producer_index);
- prod &= q->index_mask;
+ case QUEUE_TYPE_FROM_CLIENT:
+ pr_warn("%s: attempt to advance client index\n",
+ __func__);
break;
- case QUEUE_TYPE_TO_USER:
+ case QUEUE_TYPE_TO_CLIENT:
prod = q->index;
+ prod = (prod + 1) & q->index_mask;
+ q->index = prod;
+ /* protect user index */
+ smp_store_release(&q->buf->producer_index, prod);
+ break;
+ case QUEUE_TYPE_FROM_DRIVER:
+ pr_warn("%s: attempt to advance driver index\n",
+ __func__);
break;
- case QUEUE_TYPE_KERNEL:
+ case QUEUE_TYPE_TO_DRIVER:
prod = q->buf->producer_index;
+ prod = (prod + 1) & q->index_mask;
+ q->buf->producer_index = prod;
break;
}
-
- return q->buf->data + (prod << q->log2_elem_size);
}
-static inline void *consumer_addr(struct rxe_queue *q, enum queue_type type)
+static inline void queue_advance_consumer(struct rxe_queue *q,
+ enum queue_type type)
{
u32 cons;
switch (type) {
- case QUEUE_TYPE_FROM_USER:
+ case QUEUE_TYPE_FROM_CLIENT:
cons = q->index;
+ cons = (cons + 1) & q->index_mask;
+ q->index = cons;
+ /* protect user index */
+ smp_store_release(&q->buf->consumer_index, cons);
break;
- case QUEUE_TYPE_TO_USER:
- /* protect user space index */
- cons = smp_load_acquire(&q->buf->consumer_index);
- cons &= q->index_mask;
+ case QUEUE_TYPE_TO_CLIENT:
+ pr_warn("%s: attempt to advance client index\n",
+ __func__);
break;
- case QUEUE_TYPE_KERNEL:
+ case QUEUE_TYPE_FROM_DRIVER:
cons = q->buf->consumer_index;
+ cons = (cons + 1) & q->index_mask;
+ q->buf->consumer_index = cons;
+ break;
+ case QUEUE_TYPE_TO_DRIVER:
+ pr_warn("%s: attempt to advance driver index\n",
+ __func__);
break;
}
-
- return q->buf->data + (cons << q->log2_elem_size);
}
-static inline unsigned int producer_index(struct rxe_queue *q,
- enum queue_type type)
+static inline void *queue_producer_addr(struct rxe_queue *q,
+ enum queue_type type)
{
- u32 prod;
+ u32 prod = queue_get_producer(q, type);
- switch (type) {
- case QUEUE_TYPE_FROM_USER:
- /* protect user space index */
- prod = smp_load_acquire(&q->buf->producer_index);
- prod &= q->index_mask;
- break;
- case QUEUE_TYPE_TO_USER:
- prod = q->index;
- break;
- case QUEUE_TYPE_KERNEL:
- prod = q->buf->producer_index;
- break;
- }
-
- return prod;
+ return q->buf->data + (prod << q->log2_elem_size);
}
-static inline unsigned int consumer_index(struct rxe_queue *q,
- enum queue_type type)
+static inline void *queue_consumer_addr(struct rxe_queue *q,
+ enum queue_type type)
{
- u32 cons;
-
- switch (type) {
- case QUEUE_TYPE_FROM_USER:
- cons = q->index;
- break;
- case QUEUE_TYPE_TO_USER:
- /* protect user space index */
- cons = smp_load_acquire(&q->buf->consumer_index);
- cons &= q->index_mask;
- break;
- case QUEUE_TYPE_KERNEL:
- cons = q->buf->consumer_index;
- break;
- }
+ u32 cons = queue_get_consumer(q, type);
- return cons;
+ return q->buf->data + (cons << q->log2_elem_size);
}
-static inline void *addr_from_index(struct rxe_queue *q,
- unsigned int index)
+static inline void *queue_addr_from_index(struct rxe_queue *q, u32 index)
{
return q->buf->data + ((index & q->index_mask)
- << q->buf->log2_elem_size);
+ << q->log2_elem_size);
}
-static inline unsigned int index_from_addr(const struct rxe_queue *q,
+static inline u32 queue_index_from_addr(const struct rxe_queue *q,
const void *addr)
{
return (((u8 *)addr - q->buf->data) >> q->log2_elem_size)
@@ -309,7 +255,7 @@ static inline unsigned int index_from_addr(const struct rxe_queue *q,
static inline void *queue_head(struct rxe_queue *q, enum queue_type type)
{
- return queue_empty(q, type) ? NULL : consumer_addr(q, type);
+ return queue_empty(q, type) ? NULL : queue_consumer_addr(q, type);
}
#endif /* RXE_QUEUE_H */
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index 3894197a82f6..0c9d2af15f3d 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -49,21 +49,16 @@ static void req_retry(struct rxe_qp *qp)
unsigned int cons;
unsigned int prod;
- if (qp->is_user) {
- cons = consumer_index(q, QUEUE_TYPE_FROM_USER);
- prod = producer_index(q, QUEUE_TYPE_FROM_USER);
- } else {
- cons = consumer_index(q, QUEUE_TYPE_KERNEL);
- prod = producer_index(q, QUEUE_TYPE_KERNEL);
- }
+ cons = queue_get_consumer(q, QUEUE_TYPE_FROM_CLIENT);
+ prod = queue_get_producer(q, QUEUE_TYPE_FROM_CLIENT);
qp->req.wqe_index = cons;
qp->req.psn = qp->comp.psn;
qp->req.opcode = -1;
for (wqe_index = cons; wqe_index != prod;
- wqe_index = next_index(q, wqe_index)) {
- wqe = addr_from_index(qp->sq.queue, wqe_index);
+ wqe_index = queue_next_index(q, wqe_index)) {
+ wqe = queue_addr_from_index(qp->sq.queue, wqe_index);
mask = wr_opcode_mask(wqe->wr.opcode, qp);
if (wqe->state == wqe_state_posted)
@@ -121,15 +116,9 @@ static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
unsigned int cons;
unsigned int prod;
- if (qp->is_user) {
- wqe = queue_head(q, QUEUE_TYPE_FROM_USER);
- cons = consumer_index(q, QUEUE_TYPE_FROM_USER);
- prod = producer_index(q, QUEUE_TYPE_FROM_USER);
- } else {
- wqe = queue_head(q, QUEUE_TYPE_KERNEL);
- cons = consumer_index(q, QUEUE_TYPE_KERNEL);
- prod = producer_index(q, QUEUE_TYPE_KERNEL);
- }
+ wqe = queue_head(q, QUEUE_TYPE_FROM_CLIENT);
+ cons = queue_get_consumer(q, QUEUE_TYPE_FROM_CLIENT);
+ prod = queue_get_producer(q, QUEUE_TYPE_FROM_CLIENT);
if (unlikely(qp->req.state == QP_STATE_DRAIN)) {
/* check to see if we are drained;
@@ -170,7 +159,7 @@ static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
if (index == prod)
return NULL;
- wqe = addr_from_index(q, index);
+ wqe = queue_addr_from_index(q, index);
if (unlikely((qp->req.state == QP_STATE_DRAIN ||
qp->req.state == QP_STATE_DRAINED) &&
@@ -390,9 +379,8 @@ static struct sk_buff *init_req_packet(struct rxe_qp *qp,
/* length from start of bth to end of icrc */
paylen = rxe_opcode[opcode].length + payload + pad + RXE_ICRC_SIZE;
- /* pkt->hdr, rxe, port_num and mask are initialized in ifc
- * layer
- */
+ /* pkt->hdr, port_num and mask are initialized in ifc layer */
+ pkt->rxe = rxe;
pkt->opcode = opcode;
pkt->qp = qp;
pkt->psn = qp->req.psn;
@@ -402,6 +390,9 @@ static struct sk_buff *init_req_packet(struct rxe_qp *qp,
/* init skb */
av = rxe_get_av(pkt);
+ if (!av)
+ return NULL;
+
skb = rxe_init_packet(rxe, av, paylen, pkt);
if (unlikely(!skb))
return NULL;
@@ -472,7 +463,7 @@ static int finish_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
if (err)
return err;
- if (pkt->mask & RXE_WRITE_OR_SEND) {
+ if (pkt->mask & RXE_WRITE_OR_SEND_MASK) {
if (wqe->wr.send_flags & IB_SEND_INLINE) {
u8 *tmp = &wqe->dma.inline_data[wqe->dma.sge_offset];
@@ -560,7 +551,8 @@ static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
qp->req.opcode = pkt->opcode;
if (pkt->mask & RXE_END_MASK)
- qp->req.wqe_index = next_index(qp->sq.queue, qp->req.wqe_index);
+ qp->req.wqe_index = queue_next_index(qp->sq.queue,
+ qp->req.wqe_index);
qp->need_req_skb = 0;
@@ -572,7 +564,6 @@ static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
static int rxe_do_local_ops(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
{
u8 opcode = wqe->wr.opcode;
- struct rxe_mr *mr;
u32 rkey;
int ret;
@@ -590,14 +581,11 @@ static int rxe_do_local_ops(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
}
break;
case IB_WR_REG_MR:
- mr = to_rmr(wqe->wr.wr.reg.mr);
- rxe_add_ref(mr);
- mr->state = RXE_MR_STATE_VALID;
- mr->access = wqe->wr.wr.reg.access;
- mr->ibmr.lkey = wqe->wr.wr.reg.key;
- mr->ibmr.rkey = wqe->wr.wr.reg.key;
- mr->iova = wqe->wr.wr.reg.mr->iova;
- rxe_drop_ref(mr);
+ ret = rxe_reg_fast_mr(qp, wqe);
+ if (unlikely(ret)) {
+ wqe->status = IB_WC_LOC_QP_OP_ERR;
+ return ret;
+ }
break;
case IB_WR_BIND_MW:
ret = rxe_bind_mw(qp, wqe);
@@ -614,7 +602,7 @@ static int rxe_do_local_ops(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
wqe->state = wqe_state_done;
wqe->status = IB_WC_SUCCESS;
- qp->req.wqe_index = next_index(qp->sq.queue, qp->req.wqe_index);
+ qp->req.wqe_index = queue_next_index(qp->sq.queue, qp->req.wqe_index);
if ((wqe->wr.send_flags & IB_SEND_SIGNALED) ||
qp->sq_sig_type == IB_SIGNAL_ALL_WR)
@@ -645,7 +633,8 @@ next_wqe:
goto exit;
if (unlikely(qp->req.state == QP_STATE_RESET)) {
- qp->req.wqe_index = consumer_index(q, q->type);
+ qp->req.wqe_index = queue_get_consumer(q,
+ QUEUE_TYPE_FROM_CLIENT);
qp->req.opcode = -1;
qp->req.need_rd_atomic = 0;
qp->req.wait_psn = 0;
@@ -691,13 +680,13 @@ next_wqe:
}
mask = rxe_opcode[opcode].mask;
- if (unlikely(mask & RXE_READ_OR_ATOMIC)) {
+ if (unlikely(mask & RXE_READ_OR_ATOMIC_MASK)) {
if (check_init_depth(qp, wqe))
goto exit;
}
mtu = get_mtu(qp);
- payload = (mask & RXE_WRITE_OR_SEND) ? wqe->dma.resid : 0;
+ payload = (mask & RXE_WRITE_OR_SEND_MASK) ? wqe->dma.resid : 0;
if (payload > mtu) {
if (qp_type(qp) == IB_QPT_UD) {
/* C10-93.1.1: If the total sum of all the buffer lengths specified for a
@@ -711,7 +700,7 @@ next_wqe:
wqe->last_psn = qp->req.psn;
qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK;
qp->req.opcode = IB_OPCODE_UD_SEND_ONLY;
- qp->req.wqe_index = next_index(qp->sq.queue,
+ qp->req.wqe_index = queue_next_index(qp->sq.queue,
qp->req.wqe_index);
wqe->state = wqe_state_done;
wqe->status = IB_WC_SUCCESS;
diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c
index 5501227ddc65..e8f435fa6e4d 100644
--- a/drivers/infiniband/sw/rxe/rxe_resp.c
+++ b/drivers/infiniband/sw/rxe/rxe_resp.c
@@ -303,10 +303,7 @@ static enum resp_states get_srq_wqe(struct rxe_qp *qp)
spin_lock_bh(&srq->rq.consumer_lock);
- if (qp->is_user)
- wqe = queue_head(q, QUEUE_TYPE_FROM_USER);
- else
- wqe = queue_head(q, QUEUE_TYPE_KERNEL);
+ wqe = queue_head(q, QUEUE_TYPE_FROM_CLIENT);
if (!wqe) {
spin_unlock_bh(&srq->rq.consumer_lock);
return RESPST_ERR_RNR;
@@ -322,13 +319,8 @@ static enum resp_states get_srq_wqe(struct rxe_qp *qp)
memcpy(&qp->resp.srq_wqe, wqe, size);
qp->resp.wqe = &qp->resp.srq_wqe.wqe;
- if (qp->is_user) {
- advance_consumer(q, QUEUE_TYPE_FROM_USER);
- count = queue_count(q, QUEUE_TYPE_FROM_USER);
- } else {
- advance_consumer(q, QUEUE_TYPE_KERNEL);
- count = queue_count(q, QUEUE_TYPE_KERNEL);
- }
+ queue_advance_consumer(q, QUEUE_TYPE_FROM_CLIENT);
+ count = queue_count(q, QUEUE_TYPE_FROM_CLIENT);
if (srq->limit && srq->ibsrq.event_handler && (count < srq->limit)) {
srq->limit = 0;
@@ -357,12 +349,8 @@ static enum resp_states check_resource(struct rxe_qp *qp,
qp->resp.status = IB_WC_WR_FLUSH_ERR;
return RESPST_COMPLETE;
} else if (!srq) {
- if (qp->is_user)
- qp->resp.wqe = queue_head(qp->rq.queue,
- QUEUE_TYPE_FROM_USER);
- else
- qp->resp.wqe = queue_head(qp->rq.queue,
- QUEUE_TYPE_KERNEL);
+ qp->resp.wqe = queue_head(qp->rq.queue,
+ QUEUE_TYPE_FROM_CLIENT);
if (qp->resp.wqe) {
qp->resp.status = IB_WC_WR_FLUSH_ERR;
return RESPST_COMPLETE;
@@ -374,7 +362,7 @@ static enum resp_states check_resource(struct rxe_qp *qp,
}
}
- if (pkt->mask & RXE_READ_OR_ATOMIC) {
+ if (pkt->mask & RXE_READ_OR_ATOMIC_MASK) {
/* it is the requesters job to not send
* too many read/atomic ops, we just
* recycle the responder resource queue
@@ -389,12 +377,8 @@ static enum resp_states check_resource(struct rxe_qp *qp,
if (srq)
return get_srq_wqe(qp);
- if (qp->is_user)
- qp->resp.wqe = queue_head(qp->rq.queue,
- QUEUE_TYPE_FROM_USER);
- else
- qp->resp.wqe = queue_head(qp->rq.queue,
- QUEUE_TYPE_KERNEL);
+ qp->resp.wqe = queue_head(qp->rq.queue,
+ QUEUE_TYPE_FROM_CLIENT);
return (qp->resp.wqe) ? RESPST_CHK_LENGTH : RESPST_ERR_RNR;
}
@@ -429,7 +413,7 @@ static enum resp_states check_rkey(struct rxe_qp *qp,
enum resp_states state;
int access;
- if (pkt->mask & (RXE_READ_MASK | RXE_WRITE_MASK)) {
+ if (pkt->mask & RXE_READ_OR_WRITE_MASK) {
if (pkt->mask & RXE_RETH_MASK) {
qp->resp.va = reth_va(pkt);
qp->resp.offset = 0;
@@ -450,7 +434,7 @@ static enum resp_states check_rkey(struct rxe_qp *qp,
}
/* A zero-byte op is not required to set an addr or rkey. */
- if ((pkt->mask & (RXE_READ_MASK | RXE_WRITE_OR_SEND)) &&
+ if ((pkt->mask & RXE_READ_OR_WRITE_MASK) &&
(pkt->mask & RXE_RETH_MASK) &&
reth_len(pkt) == 0) {
return RESPST_EXECUTE;
@@ -876,7 +860,6 @@ static enum resp_states do_complete(struct rxe_qp *qp,
wc->opcode = (pkt->mask & RXE_IMMDT_MASK &&
pkt->mask & RXE_WRITE_MASK) ?
IB_WC_RECV_RDMA_WITH_IMM : IB_WC_RECV;
- wc->vendor_err = 0;
wc->byte_len = (pkt->mask & RXE_IMMDT_MASK &&
pkt->mask & RXE_WRITE_MASK) ?
qp->resp.length : wqe->dma.length - wqe->dma.resid;
@@ -897,8 +880,6 @@ static enum resp_states do_complete(struct rxe_qp *qp,
uwc->ex.invalidate_rkey = ieth_rkey(pkt);
}
- uwc->qp_num = qp->ibqp.qp_num;
-
if (pkt->mask & RXE_DETH_MASK)
uwc->src_qp = deth_sqp(pkt);
@@ -930,18 +911,13 @@ static enum resp_states do_complete(struct rxe_qp *qp,
if (pkt->mask & RXE_DETH_MASK)
wc->src_qp = deth_sqp(pkt);
- wc->qp = &qp->ibqp;
wc->port_num = qp->attr.port_num;
}
}
/* have copy for srq and reference for !srq */
- if (!qp->srq) {
- if (qp->is_user)
- advance_consumer(qp->rq.queue, QUEUE_TYPE_FROM_USER);
- else
- advance_consumer(qp->rq.queue, QUEUE_TYPE_KERNEL);
- }
+ if (!qp->srq)
+ queue_advance_consumer(qp->rq.queue, QUEUE_TYPE_FROM_CLIENT);
qp->resp.wqe = NULL;
@@ -1213,7 +1189,7 @@ static void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify)
return;
while (!qp->srq && q && queue_head(q, q->type))
- advance_consumer(q, q->type);
+ queue_advance_consumer(q, q->type);
}
int rxe_responder(void *arg)
diff --git a/drivers/infiniband/sw/rxe/rxe_srq.c b/drivers/infiniband/sw/rxe/rxe_srq.c
index 610c98d24b5c..eb1c4c3b3a78 100644
--- a/drivers/infiniband/sw/rxe/rxe_srq.c
+++ b/drivers/infiniband/sw/rxe/rxe_srq.c
@@ -86,14 +86,13 @@ int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq,
srq->srq_num = srq->pelem.index;
srq->rq.max_wr = init->attr.max_wr;
srq->rq.max_sge = init->attr.max_sge;
- srq->rq.is_user = srq->is_user;
srq_wqe_size = rcv_wqe_size(srq->rq.max_sge);
spin_lock_init(&srq->rq.producer_lock);
spin_lock_init(&srq->rq.consumer_lock);
- type = uresp ? QUEUE_TYPE_FROM_USER : QUEUE_TYPE_KERNEL;
+ type = QUEUE_TYPE_FROM_CLIENT;
q = rxe_queue_init(rxe, &srq->rq.max_wr,
srq_wqe_size, type);
if (!q) {
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
index 267b5a9c345d..0aa0d7e52773 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.c
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
@@ -29,13 +29,10 @@ static int rxe_query_port(struct ib_device *dev,
u32 port_num, struct ib_port_attr *attr)
{
struct rxe_dev *rxe = to_rdev(dev);
- struct rxe_port *port;
int rc;
- port = &rxe->port;
-
/* *attr being zeroed by the caller, avoid zeroing it here */
- *attr = port->attr;
+ *attr = rxe->port.attr;
mutex_lock(&rxe->usdev_lock);
rc = ib_get_eth_speed(dev, port_num, &attr->active_speed,
@@ -161,9 +158,19 @@ static int rxe_create_ah(struct ib_ah *ibah,
struct ib_udata *udata)
{
- int err;
struct rxe_dev *rxe = to_rdev(ibah->device);
struct rxe_ah *ah = to_rah(ibah);
+ struct rxe_create_ah_resp __user *uresp = NULL;
+ int err;
+
+ if (udata) {
+ /* test if new user provider */
+ if (udata->outlen >= sizeof(*uresp))
+ uresp = udata->outbuf;
+ ah->is_user = true;
+ } else {
+ ah->is_user = false;
+ }
err = rxe_av_chk_attr(rxe, init_attr->ah_attr);
if (err)
@@ -173,6 +180,24 @@ static int rxe_create_ah(struct ib_ah *ibah,
if (err)
return err;
+ /* create index > 0 */
+ rxe_add_index(ah);
+ ah->ah_num = ah->pelem.index;
+
+ if (uresp) {
+ /* only if new user provider */
+ err = copy_to_user(&uresp->ah_num, &ah->ah_num,
+ sizeof(uresp->ah_num));
+ if (err) {
+ rxe_drop_index(ah);
+ rxe_drop_ref(ah);
+ return -EFAULT;
+ }
+ } else if (ah->is_user) {
+ /* only if old user provider */
+ ah->ah_num = 0;
+ }
+
rxe_init_av(init_attr->ah_attr, &ah->av);
return 0;
}
@@ -205,6 +230,7 @@ static int rxe_destroy_ah(struct ib_ah *ibah, u32 flags)
{
struct rxe_ah *ah = to_rah(ibah);
+ rxe_drop_index(ah);
rxe_drop_ref(ah);
return 0;
}
@@ -218,11 +244,7 @@ static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
int num_sge = ibwr->num_sge;
int full;
- if (rq->is_user)
- full = queue_full(rq->queue, QUEUE_TYPE_FROM_USER);
- else
- full = queue_full(rq->queue, QUEUE_TYPE_KERNEL);
-
+ full = queue_full(rq->queue, QUEUE_TYPE_TO_DRIVER);
if (unlikely(full)) {
err = -ENOMEM;
goto err1;
@@ -237,11 +259,7 @@ static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
for (i = 0; i < num_sge; i++)
length += ibwr->sg_list[i].length;
- if (rq->is_user)
- recv_wqe = producer_addr(rq->queue, QUEUE_TYPE_FROM_USER);
- else
- recv_wqe = producer_addr(rq->queue, QUEUE_TYPE_KERNEL);
-
+ recv_wqe = queue_producer_addr(rq->queue, QUEUE_TYPE_TO_DRIVER);
recv_wqe->wr_id = ibwr->wr_id;
recv_wqe->num_sge = num_sge;
@@ -254,10 +272,7 @@ static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
recv_wqe->dma.cur_sge = 0;
recv_wqe->dma.sge_offset = 0;
- if (rq->is_user)
- advance_producer(rq->queue, QUEUE_TYPE_FROM_USER);
- else
- advance_producer(rq->queue, QUEUE_TYPE_KERNEL);
+ queue_advance_producer(rq->queue, QUEUE_TYPE_TO_DRIVER);
return 0;
@@ -281,9 +296,6 @@ static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init,
if (udata->outlen < sizeof(*uresp))
return -EINVAL;
uresp = udata->outbuf;
- srq->is_user = true;
- } else {
- srq->is_user = false;
}
err = rxe_srq_chk_attr(rxe, NULL, &init->attr, IB_SRQ_INIT_MASK);
@@ -522,8 +534,11 @@ static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
if (qp_type(qp) == IB_QPT_UD ||
qp_type(qp) == IB_QPT_SMI ||
qp_type(qp) == IB_QPT_GSI) {
+ struct ib_ah *ibah = ud_wr(ibwr)->ah;
+
wr->wr.ud.remote_qpn = ud_wr(ibwr)->remote_qpn;
wr->wr.ud.remote_qkey = ud_wr(ibwr)->remote_qkey;
+ wr->wr.ud.ah_num = to_rah(ibah)->ah_num;
if (qp_type(qp) == IB_QPT_GSI)
wr->wr.ud.pkey_index = ud_wr(ibwr)->pkey_index;
if (wr->opcode == IB_WR_SEND_WITH_IMM)
@@ -595,11 +610,6 @@ static void init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
return;
}
- if (qp_type(qp) == IB_QPT_UD ||
- qp_type(qp) == IB_QPT_SMI ||
- qp_type(qp) == IB_QPT_GSI)
- memcpy(&wqe->av, &to_rah(ud_wr(ibwr)->ah)->av, sizeof(wqe->av));
-
if (unlikely(ibwr->send_flags & IB_SEND_INLINE))
copy_inline_data_to_wqe(wqe, ibwr);
else
@@ -633,27 +643,17 @@ static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
spin_lock_irqsave(&qp->sq.sq_lock, flags);
- if (qp->is_user)
- full = queue_full(sq->queue, QUEUE_TYPE_FROM_USER);
- else
- full = queue_full(sq->queue, QUEUE_TYPE_KERNEL);
+ full = queue_full(sq->queue, QUEUE_TYPE_TO_DRIVER);
if (unlikely(full)) {
spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
return -ENOMEM;
}
- if (qp->is_user)
- send_wqe = producer_addr(sq->queue, QUEUE_TYPE_FROM_USER);
- else
- send_wqe = producer_addr(sq->queue, QUEUE_TYPE_KERNEL);
-
+ send_wqe = queue_producer_addr(sq->queue, QUEUE_TYPE_TO_DRIVER);
init_send_wqe(qp, ibwr, mask, length, send_wqe);
- if (qp->is_user)
- advance_producer(sq->queue, QUEUE_TYPE_FROM_USER);
- else
- advance_producer(sq->queue, QUEUE_TYPE_KERNEL);
+ queue_advance_producer(sq->queue, QUEUE_TYPE_TO_DRIVER);
spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
@@ -845,18 +845,12 @@ static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
spin_lock_irqsave(&cq->cq_lock, flags);
for (i = 0; i < num_entries; i++) {
- if (cq->is_user)
- cqe = queue_head(cq->queue, QUEUE_TYPE_TO_USER);
- else
- cqe = queue_head(cq->queue, QUEUE_TYPE_KERNEL);
+ cqe = queue_head(cq->queue, QUEUE_TYPE_FROM_DRIVER);
if (!cqe)
break;
memcpy(wc++, &cqe->ibwc, sizeof(*wc));
- if (cq->is_user)
- advance_consumer(cq->queue, QUEUE_TYPE_TO_USER);
- else
- advance_consumer(cq->queue, QUEUE_TYPE_KERNEL);
+ queue_advance_consumer(cq->queue, QUEUE_TYPE_FROM_DRIVER);
}
spin_unlock_irqrestore(&cq->cq_lock, flags);
@@ -868,10 +862,7 @@ static int rxe_peek_cq(struct ib_cq *ibcq, int wc_cnt)
struct rxe_cq *cq = to_rcq(ibcq);
int count;
- if (cq->is_user)
- count = queue_count(cq->queue, QUEUE_TYPE_TO_USER);
- else
- count = queue_count(cq->queue, QUEUE_TYPE_KERNEL);
+ count = queue_count(cq->queue, QUEUE_TYPE_FROM_DRIVER);
return (count > wc_cnt) ? wc_cnt : count;
}
@@ -887,10 +878,7 @@ static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
if (cq->notify != IB_CQ_NEXT_COMP)
cq->notify = flags & IB_CQ_SOLICITED_MASK;
- if (cq->is_user)
- empty = queue_empty(cq->queue, QUEUE_TYPE_TO_USER);
- else
- empty = queue_empty(cq->queue, QUEUE_TYPE_KERNEL);
+ empty = queue_empty(cq->queue, QUEUE_TYPE_FROM_DRIVER);
if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !empty)
ret = 1;
@@ -987,41 +975,26 @@ err1:
return ERR_PTR(err);
}
-static int rxe_set_page(struct ib_mr *ibmr, u64 addr)
-{
- struct rxe_mr *mr = to_rmr(ibmr);
- struct rxe_map *map;
- struct rxe_phys_buf *buf;
-
- if (unlikely(mr->nbuf == mr->num_buf))
- return -ENOMEM;
-
- map = mr->map[mr->nbuf / RXE_BUF_PER_MAP];
- buf = &map->buf[mr->nbuf % RXE_BUF_PER_MAP];
-
- buf->addr = addr;
- buf->size = ibmr->page_size;
- mr->nbuf++;
-
- return 0;
-}
-
+/* build next_map_set from scatterlist
+ * The IB_WR_REG_MR WR will swap map_sets
+ */
static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
int sg_nents, unsigned int *sg_offset)
{
struct rxe_mr *mr = to_rmr(ibmr);
+ struct rxe_map_set *set = mr->next_map_set;
int n;
- mr->nbuf = 0;
+ set->nbuf = 0;
- n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rxe_set_page);
+ n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rxe_mr_set_page);
- mr->va = ibmr->iova;
- mr->iova = ibmr->iova;
- mr->length = ibmr->length;
- mr->page_shift = ilog2(ibmr->page_size);
- mr->page_mask = ibmr->page_size - 1;
- mr->offset = mr->iova & mr->page_mask;
+ set->va = ibmr->iova;
+ set->iova = ibmr->iova;
+ set->length = ibmr->length;
+ set->page_shift = ilog2(ibmr->page_size);
+ set->page_mask = ibmr->page_size - 1;
+ set->offset = set->iova & set->page_mask;
return n;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h
index ac2a2148027f..35e041450090 100644
--- a/drivers/infiniband/sw/rxe/rxe_verbs.h
+++ b/drivers/infiniband/sw/rxe/rxe_verbs.h
@@ -46,8 +46,9 @@ struct rxe_pd {
struct rxe_ah {
struct ib_ah ibah;
struct rxe_pool_entry pelem;
- struct rxe_pd *pd;
struct rxe_av av;
+ bool is_user;
+ int ah_num;
};
struct rxe_cqe {
@@ -64,7 +65,7 @@ struct rxe_cq {
spinlock_t cq_lock;
u8 notify;
bool is_dying;
- int is_user;
+ bool is_user;
struct tasklet_struct comp_task;
};
@@ -77,7 +78,6 @@ enum wqe_state {
};
struct rxe_sq {
- bool is_user;
int max_wr;
int max_sge;
int max_inline;
@@ -86,7 +86,6 @@ struct rxe_sq {
};
struct rxe_rq {
- bool is_user;
int max_wr;
int max_sge;
spinlock_t producer_lock; /* guard queue producer */
@@ -100,7 +99,6 @@ struct rxe_srq {
struct rxe_pd *pd;
struct rxe_rq rq;
u32 srq_num;
- bool is_user;
int limit;
int error;
@@ -240,7 +238,6 @@ struct rxe_qp {
struct sk_buff_head req_pkts;
struct sk_buff_head resp_pkts;
- struct sk_buff_head send_pkts;
struct rxe_req_info req;
struct rxe_comp_info comp;
@@ -267,18 +264,11 @@ struct rxe_qp {
};
enum rxe_mr_state {
- RXE_MR_STATE_ZOMBIE,
RXE_MR_STATE_INVALID,
RXE_MR_STATE_FREE,
RXE_MR_STATE_VALID,
};
-enum rxe_mr_type {
- RXE_MR_TYPE_NONE,
- RXE_MR_TYPE_DMA,
- RXE_MR_TYPE_MR,
-};
-
enum rxe_mr_copy_dir {
RXE_TO_MR_OBJ,
RXE_FROM_MR_OBJ,
@@ -300,6 +290,17 @@ struct rxe_map {
struct rxe_phys_buf buf[RXE_BUF_PER_MAP];
};
+struct rxe_map_set {
+ struct rxe_map **map;
+ u64 va;
+ u64 iova;
+ size_t length;
+ u32 offset;
+ u32 nbuf;
+ int page_shift;
+ int page_mask;
+};
+
static inline int rkey_is_mw(u32 rkey)
{
u32 index = rkey >> 8;
@@ -313,28 +314,24 @@ struct rxe_mr {
struct ib_umem *umem;
+ u32 lkey;
+ u32 rkey;
enum rxe_mr_state state;
- enum rxe_mr_type type;
- u64 va;
- u64 iova;
- size_t length;
- u32 offset;
+ enum ib_mr_type type;
int access;
- int page_shift;
- int page_mask;
int map_shift;
int map_mask;
u32 num_buf;
- u32 nbuf;
u32 max_buf;
u32 num_map;
atomic_t num_mw;
- struct rxe_map **map;
+ struct rxe_map_set *cur_map_set;
+ struct rxe_map_set *next_map_set;
};
enum rxe_mw_state {
@@ -350,6 +347,7 @@ struct rxe_mw {
enum rxe_mw_state state;
struct rxe_qp *qp; /* Type 2 only */
struct rxe_mr *mr;
+ u32 rkey;
int access;
u64 addr;
u64 length;
@@ -469,19 +467,14 @@ static inline struct rxe_mw *to_rmw(struct ib_mw *mw)
return mw ? container_of(mw, struct rxe_mw, ibmw) : NULL;
}
-static inline struct rxe_pd *mr_pd(struct rxe_mr *mr)
+static inline struct rxe_pd *rxe_ah_pd(struct rxe_ah *ah)
{
- return to_rpd(mr->ibmr.pd);
+ return to_rpd(ah->ibah.pd);
}
-static inline u32 mr_lkey(struct rxe_mr *mr)
-{
- return mr->ibmr.lkey;
-}
-
-static inline u32 mr_rkey(struct rxe_mr *mr)
+static inline struct rxe_pd *mr_pd(struct rxe_mr *mr)
{
- return mr->ibmr.rkey;
+ return to_rpd(mr->ibmr.pd);
}
static inline struct rxe_pd *rxe_mw_pd(struct rxe_mw *mw)
@@ -489,11 +482,6 @@ static inline struct rxe_pd *rxe_mw_pd(struct rxe_mw *mw)
return to_rpd(mw->ibmw.pd);
}
-static inline u32 rxe_mw_rkey(struct rxe_mw *mw)
-{
- return mw->ibmw.rkey;
-}
-
int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name);
void rxe_mc_cleanup(struct rxe_pool_entry *arg);
diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c
index 7a5ed86ffc9f..7acdd3c3a599 100644
--- a/drivers/infiniband/sw/siw/siw_cm.c
+++ b/drivers/infiniband/sw/siw/siw_cm.c
@@ -1951,8 +1951,6 @@ int siw_cm_init(void)
void siw_cm_exit(void)
{
- if (siw_cm_wq) {
- flush_workqueue(siw_cm_wq);
+ if (siw_cm_wq)
destroy_workqueue(siw_cm_wq);
- }
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 684c2ddb16f5..fd9d7f2c4d64 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -1583,6 +1583,7 @@ int ipoib_cm_dev_init(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
int max_srq_sge, i;
+ u8 addr;
INIT_LIST_HEAD(&priv->cm.passive_ids);
INIT_LIST_HEAD(&priv->cm.reap_list);
@@ -1636,7 +1637,8 @@ int ipoib_cm_dev_init(struct net_device *dev)
}
}
- priv->dev->dev_addr[0] = IPOIB_FLAGS_RC;
+ addr = IPOIB_FLAGS_RC;
+ dev_addr_mod(dev, 0, &addr, 1);
return 0;
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index ceabfb0b0a83..2c3dca41d3bd 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -1057,13 +1057,11 @@ static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv)
{
union ib_gid search_gid;
union ib_gid gid0;
- union ib_gid *netdev_gid;
int err;
u16 index;
u32 port;
bool ret = false;
- netdev_gid = (union ib_gid *)(priv->dev->dev_addr + 4);
if (rdma_query_gid(priv->ca, priv->port, 0, &gid0))
return false;
@@ -1073,7 +1071,8 @@ static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv)
* to do it later
*/
priv->local_gid.global.subnet_prefix = gid0.global.subnet_prefix;
- netdev_gid->global.subnet_prefix = gid0.global.subnet_prefix;
+ dev_addr_mod(priv->dev, 4, (u8 *)&gid0.global.subnet_prefix,
+ sizeof(gid0.global.subnet_prefix));
search_gid.global.subnet_prefix = gid0.global.subnet_prefix;
search_gid.global.interface_id = priv->local_gid.global.interface_id;
@@ -1135,8 +1134,8 @@ static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv)
if (!test_bit(IPOIB_FLAG_DEV_ADDR_CTRL, &priv->flags)) {
memcpy(&priv->local_gid, &gid0,
sizeof(priv->local_gid));
- memcpy(priv->dev->dev_addr + 4, &gid0,
- sizeof(priv->local_gid));
+ dev_addr_mod(priv->dev, 4, (u8 *)&gid0,
+ sizeof(priv->local_gid));
ret = true;
}
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 0aa8629fdf62..9934b8bd7f56 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -1696,6 +1696,7 @@ static void ipoib_dev_uninit_default(struct net_device *dev)
static int ipoib_dev_init_default(struct net_device *dev)
{
struct ipoib_dev_priv *priv = ipoib_priv(dev);
+ u8 addr_mod[3];
ipoib_napi_add(dev);
@@ -1723,9 +1724,10 @@ static int ipoib_dev_init_default(struct net_device *dev)
}
/* after qp created set dev address */
- priv->dev->dev_addr[1] = (priv->qp->qp_num >> 16) & 0xff;
- priv->dev->dev_addr[2] = (priv->qp->qp_num >> 8) & 0xff;
- priv->dev->dev_addr[3] = (priv->qp->qp_num) & 0xff;
+ addr_mod[0] = (priv->qp->qp_num >> 16) & 0xff;
+ addr_mod[1] = (priv->qp->qp_num >> 8) & 0xff;
+ addr_mod[2] = (priv->qp->qp_num) & 0xff;
+ dev_addr_mod(priv->dev, 1, addr_mod, sizeof(addr_mod));
return 0;
@@ -1886,8 +1888,7 @@ static int ipoib_parent_init(struct net_device *ndev)
priv->ca->name, priv->port, result);
return result;
}
- memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw,
- sizeof(union ib_gid));
+ dev_addr_mod(priv->dev, 4, priv->local_gid.raw, sizeof(union ib_gid));
SET_NETDEV_DEV(priv->dev, priv->ca->dev.parent);
priv->dev->dev_port = priv->port - 1;
@@ -1908,8 +1909,8 @@ static void ipoib_child_init(struct net_device *ndev)
memcpy(&priv->local_gid, priv->dev->dev_addr + 4,
sizeof(priv->local_gid));
else {
- memcpy(priv->dev->dev_addr, ppriv->dev->dev_addr,
- INFINIBAND_ALEN);
+ __dev_addr_set(priv->dev, ppriv->dev->dev_addr,
+ INFINIBAND_ALEN);
memcpy(&priv->local_gid, &ppriv->local_gid,
sizeof(priv->local_gid));
}
@@ -1997,7 +1998,6 @@ static void ipoib_ndo_uninit(struct net_device *dev)
if (priv->wq) {
/* See ipoib_mcast_carrier_on_task() */
WARN_ON(test_bit(IPOIB_FLAG_OPER_UP, &priv->flags));
- flush_workqueue(priv->wq);
destroy_workqueue(priv->wq);
priv->wq = NULL;
}
@@ -2327,7 +2327,7 @@ static void set_base_guid(struct ipoib_dev_priv *priv, union ib_gid *gid)
memcpy(&priv->local_gid.global.interface_id,
&gid->global.interface_id,
sizeof(gid->global.interface_id));
- memcpy(netdev->dev_addr + 4, &priv->local_gid, sizeof(priv->local_gid));
+ dev_addr_mod(netdev, 4, (u8 *)&priv->local_gid, sizeof(priv->local_gid));
clear_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags);
netif_addr_unlock_bh(netdev);
diff --git a/drivers/infiniband/ulp/opa_vnic/Kconfig b/drivers/infiniband/ulp/opa_vnic/Kconfig
index e84248587187..4d43d055fa8e 100644
--- a/drivers/infiniband/ulp/opa_vnic/Kconfig
+++ b/drivers/infiniband/ulp/opa_vnic/Kconfig
@@ -1,9 +1,9 @@
# SPDX-License-Identifier: GPL-2.0-only
config INFINIBAND_OPA_VNIC
- tristate "Intel OPA VNIC support"
+ tristate "Cornelis OPX VNIC support"
depends on X86_64 && INFINIBAND
help
- This is Omni-Path (OPA) Virtual Network Interface Controller (VNIC)
+ This is Omni-Path Express (OPX) Virtual Network Interface Controller (VNIC)
driver for Ethernet over Omni-Path feature. It implements the HW
independent VNIC functionality. It interfaces with Linux stack for
data path and IB MAD for the control path.
diff --git a/drivers/infiniband/ulp/opa_vnic/Makefile b/drivers/infiniband/ulp/opa_vnic/Makefile
index a8c21d140ccb..196183817cdc 100644
--- a/drivers/infiniband/ulp/opa_vnic/Makefile
+++ b/drivers/infiniband/ulp/opa_vnic/Makefile
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0-only
-# Makefile - Intel Omni-Path Virtual Network Controller driver
+# Makefile - Cornelis Omni-Path Express Virtual Network Controller driver
# Copyright(c) 2017, Intel Corporation.
+# Copyright(c) 2021, Cornelis Networks.
#
obj-$(CONFIG_INFINIBAND_OPA_VNIC) += opa_vnic.o
diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
index cecf0f7cadf9..21c6cea8b1db 100644
--- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
+++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c
@@ -1,5 +1,6 @@
/*
* Copyright(c) 2017 Intel Corporation.
+ * Copyright(c) 2021 Cornelis Networks.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -46,7 +47,7 @@
*/
/*
- * This file contains OPA Virtual Network Interface Controller (VNIC)
+ * This file contains OPX Virtual Network Interface Controller (VNIC)
* Ethernet Management Agent (EMA) driver
*/
@@ -1051,5 +1052,5 @@ static void opa_vnic_deinit(void)
module_exit(opa_vnic_deinit);
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_AUTHOR("Intel Corporation");
-MODULE_DESCRIPTION("Intel OPA Virtual Network driver");
+MODULE_AUTHOR("Cornelis Networks");
+MODULE_DESCRIPTION("Cornelis OPX Virtual Network driver");
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c b/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c
index 5e780bdd763d..f7e459fe68be 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c
@@ -37,46 +37,50 @@ void rtrs_clt_inc_failover_cnt(struct rtrs_clt_stats *stats)
s->rdma.failover_cnt++;
}
-int rtrs_clt_stats_migration_cnt_to_str(struct rtrs_clt_stats *stats,
- char *buf, size_t len)
+int rtrs_clt_stats_migration_from_cnt_to_str(struct rtrs_clt_stats *stats, char *buf)
{
struct rtrs_clt_stats_pcpu *s;
size_t used;
int cpu;
- used = scnprintf(buf, len, " ");
- for_each_possible_cpu(cpu)
- used += scnprintf(buf + used, len - used, " CPU%u", cpu);
-
- used += scnprintf(buf + used, len - used, "\nfrom:");
+ used = 0;
for_each_possible_cpu(cpu) {
s = per_cpu_ptr(stats->pcpu_stats, cpu);
- used += scnprintf(buf + used, len - used, " %d",
+ used += sysfs_emit_at(buf, used, "%d ",
atomic_read(&s->cpu_migr.from));
}
- used += scnprintf(buf + used, len - used, "\nto :");
+ used += sysfs_emit_at(buf, used, "\n");
+
+ return used;
+}
+
+int rtrs_clt_stats_migration_to_cnt_to_str(struct rtrs_clt_stats *stats, char *buf)
+{
+ struct rtrs_clt_stats_pcpu *s;
+
+ size_t used;
+ int cpu;
+
+ used = 0;
for_each_possible_cpu(cpu) {
s = per_cpu_ptr(stats->pcpu_stats, cpu);
- used += scnprintf(buf + used, len - used, " %d",
- s->cpu_migr.to);
+ used += sysfs_emit_at(buf, used, "%d ", s->cpu_migr.to);
}
- used += scnprintf(buf + used, len - used, "\n");
+
+ used += sysfs_emit_at(buf, used, "\n");
return used;
}
-int rtrs_clt_stats_reconnects_to_str(struct rtrs_clt_stats *stats, char *buf,
- size_t len)
+int rtrs_clt_stats_reconnects_to_str(struct rtrs_clt_stats *stats, char *buf)
{
- return scnprintf(buf, len, "%d %d\n",
- stats->reconnects.successful_cnt,
- stats->reconnects.fail_cnt);
+ return sysfs_emit(buf, "%d %d\n", stats->reconnects.successful_cnt,
+ stats->reconnects.fail_cnt);
}
-ssize_t rtrs_clt_stats_rdma_to_str(struct rtrs_clt_stats *stats,
- char *page, size_t len)
+ssize_t rtrs_clt_stats_rdma_to_str(struct rtrs_clt_stats *stats, char *page)
{
struct rtrs_clt_stats_rdma sum;
struct rtrs_clt_stats_rdma *r;
@@ -94,16 +98,15 @@ ssize_t rtrs_clt_stats_rdma_to_str(struct rtrs_clt_stats *stats,
sum.failover_cnt += r->failover_cnt;
}
- return scnprintf(page, len, "%llu %llu %llu %llu %u %llu\n",
+ return sysfs_emit(page, "%llu %llu %llu %llu %u %llu\n",
sum.dir[READ].cnt, sum.dir[READ].size_total,
sum.dir[WRITE].cnt, sum.dir[WRITE].size_total,
atomic_read(&stats->inflight), sum.failover_cnt);
}
-ssize_t rtrs_clt_reset_all_help(struct rtrs_clt_stats *s,
- char *page, size_t len)
+ssize_t rtrs_clt_reset_all_help(struct rtrs_clt_stats *s, char *page)
{
- return scnprintf(page, len, "echo 1 to reset all statistics\n");
+ return sysfs_emit(page, "echo 1 to reset all statistics\n");
}
int rtrs_clt_reset_rdma_stats(struct rtrs_clt_stats *stats, bool enable)
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c b/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
index 4ee592ccf979..0e69180c3771 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c
@@ -296,8 +296,12 @@ static struct kobj_attribute rtrs_clt_remove_path_attr =
__ATTR(remove_path, 0644, rtrs_clt_remove_path_show,
rtrs_clt_remove_path_store);
-STAT_ATTR(struct rtrs_clt_stats, cpu_migration,
- rtrs_clt_stats_migration_cnt_to_str,
+STAT_ATTR(struct rtrs_clt_stats, cpu_migration_from,
+ rtrs_clt_stats_migration_from_cnt_to_str,
+ rtrs_clt_reset_cpu_migr_stats);
+
+STAT_ATTR(struct rtrs_clt_stats, cpu_migration_to,
+ rtrs_clt_stats_migration_to_cnt_to_str,
rtrs_clt_reset_cpu_migr_stats);
STAT_ATTR(struct rtrs_clt_stats, reconnects,
@@ -313,7 +317,8 @@ STAT_ATTR(struct rtrs_clt_stats, reset_all,
rtrs_clt_reset_all_stats);
static struct attribute *rtrs_clt_stats_attrs[] = {
- &cpu_migration_attr.attr,
+ &cpu_migration_from_attr.attr,
+ &cpu_migration_to_attr.attr,
&reconnects_attr.attr,
&rdma_attr.attr,
&reset_all_attr.attr,
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
index bc8824b4ee0d..15c0077dd27e 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c
@@ -2788,6 +2788,12 @@ struct rtrs_clt *rtrs_clt_open(struct rtrs_clt_ops *ops,
struct rtrs_clt *clt;
int err, i;
+ if (strchr(sessname, '/') || strchr(sessname, '.')) {
+ pr_err("sessname cannot contain / and .\n");
+ err = -EINVAL;
+ goto out;
+ }
+
clt = alloc_clt(sessname, paths_num, port, pdu_sz, ops->priv,
ops->link_ev,
reconnect_delay_sec,
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.h b/drivers/infiniband/ulp/rtrs/rtrs-clt.h
index 9dc819885ec7..9afffccff973 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-clt.h
+++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.h
@@ -224,19 +224,18 @@ void rtrs_clt_update_all_stats(struct rtrs_clt_io_req *req, int dir);
int rtrs_clt_reset_rdma_lat_distr_stats(struct rtrs_clt_stats *stats,
bool enable);
ssize_t rtrs_clt_stats_rdma_lat_distr_to_str(struct rtrs_clt_stats *stats,
- char *page, size_t len);
+ char *page);
int rtrs_clt_reset_cpu_migr_stats(struct rtrs_clt_stats *stats, bool enable);
-int rtrs_clt_stats_migration_cnt_to_str(struct rtrs_clt_stats *stats, char *buf,
- size_t len);
+int rtrs_clt_stats_migration_from_cnt_to_str(struct rtrs_clt_stats *stats, char *buf);
+int rtrs_clt_stats_migration_to_cnt_to_str(struct rtrs_clt_stats *stats, char *buf);
int rtrs_clt_reset_reconnects_stat(struct rtrs_clt_stats *stats, bool enable);
-int rtrs_clt_stats_reconnects_to_str(struct rtrs_clt_stats *stats, char *buf,
- size_t len);
+int rtrs_clt_stats_reconnects_to_str(struct rtrs_clt_stats *stats, char *buf);
int rtrs_clt_reset_rdma_stats(struct rtrs_clt_stats *stats, bool enable);
ssize_t rtrs_clt_stats_rdma_to_str(struct rtrs_clt_stats *stats,
- char *page, size_t len);
+ char *page);
int rtrs_clt_reset_all_stats(struct rtrs_clt_stats *stats, bool enable);
ssize_t rtrs_clt_reset_all_help(struct rtrs_clt_stats *stats,
- char *page, size_t len);
+ char *page);
/* rtrs-clt-sysfs.c */
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-pri.h b/drivers/infiniband/ulp/rtrs/rtrs-pri.h
index d12ddfa50747..78eac9a4f703 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-pri.h
+++ b/drivers/infiniband/ulp/rtrs/rtrs-pri.h
@@ -398,7 +398,7 @@ static ssize_t get_value##_show(struct kobject *kobj, \
{ \
type *stats = container_of(kobj, type, kobj_stats); \
\
- return print(stats, page, PAGE_SIZE); \
+ return print(stats, page); \
}
#define STAT_ATTR(type, stat, print, reset) \
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv-stats.c b/drivers/infiniband/ulp/rtrs/rtrs-srv-stats.c
index 12c374b5eb6e..44b1c1652131 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv-stats.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv-stats.c
@@ -23,8 +23,7 @@ int rtrs_srv_reset_rdma_stats(struct rtrs_srv_stats *stats, bool enable)
return -EINVAL;
}
-ssize_t rtrs_srv_stats_rdma_to_str(struct rtrs_srv_stats *stats,
- char *page, size_t len)
+ssize_t rtrs_srv_stats_rdma_to_str(struct rtrs_srv_stats *stats, char *page)
{
struct rtrs_srv_stats_rdma_stats *r = &stats->rdma_stats;
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
index 20efd44297fb..9c43ce5ba1c1 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c
@@ -102,7 +102,7 @@ static ssize_t rtrs_srv_src_addr_show(struct kobject *kobj,
sess = container_of(kobj, struct rtrs_srv_sess, kobj);
cnt = sockaddr_to_str((struct sockaddr *)&sess->s.dst_addr,
page, PAGE_SIZE);
- return cnt + scnprintf(page + cnt, PAGE_SIZE - cnt, "\n");
+ return cnt + sysfs_emit_at(page, cnt, "\n");
}
static struct kobj_attribute rtrs_srv_src_addr_attr =
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
index 716ef7b23558..7df71f8cf149 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c
@@ -803,6 +803,11 @@ static int process_info_req(struct rtrs_srv_con *con,
return err;
}
+ if (strchr(msg->sessname, '/') || strchr(msg->sessname, '.')) {
+ rtrs_err(s, "sessname cannot contain / and .\n");
+ return -EINVAL;
+ }
+
if (exist_sessname(sess->srv->ctx,
msg->sessname, &sess->srv->paths_uuid)) {
rtrs_err(s, "sessname is duplicated: %s\n", msg->sessname);
@@ -1766,6 +1771,7 @@ static struct rtrs_srv_sess *__alloc_sess(struct rtrs_srv *srv,
strscpy(sess->s.sessname, str, sizeof(sess->s.sessname));
sess->s.con_num = con_num;
+ sess->s.irq_con_num = con_num;
sess->s.recon_cnt = recon_cnt;
uuid_copy(&sess->s.uuid, uuid);
spin_lock_init(&sess->state_lock);
diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.h b/drivers/infiniband/ulp/rtrs/rtrs-srv.h
index 9d8d2a91a235..7d403c12faf3 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs-srv.h
+++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.h
@@ -136,8 +136,7 @@ static inline void rtrs_srv_update_rdma_stats(struct rtrs_srv_stats *s,
/* functions which are implemented in rtrs-srv-stats.c */
int rtrs_srv_reset_rdma_stats(struct rtrs_srv_stats *stats, bool enable);
-ssize_t rtrs_srv_stats_rdma_to_str(struct rtrs_srv_stats *stats,
- char *page, size_t len);
+ssize_t rtrs_srv_stats_rdma_to_str(struct rtrs_srv_stats *stats, char *page);
int rtrs_srv_reset_all_stats(struct rtrs_srv_stats *stats, bool enable);
ssize_t rtrs_srv_reset_all_help(struct rtrs_srv_stats *stats,
char *page, size_t len);
diff --git a/drivers/infiniband/ulp/rtrs/rtrs.c b/drivers/infiniband/ulp/rtrs/rtrs.c
index ca542e477d38..37952c8e768c 100644
--- a/drivers/infiniband/ulp/rtrs/rtrs.c
+++ b/drivers/infiniband/ulp/rtrs/rtrs.c
@@ -222,13 +222,23 @@ static void qp_event_handler(struct ib_event *ev, void *ctx)
}
}
+static bool is_pollqueue(struct rtrs_con *con)
+{
+ return con->cid >= con->sess->irq_con_num;
+}
+
static int create_cq(struct rtrs_con *con, int cq_vector, int nr_cqe,
enum ib_poll_context poll_ctx)
{
struct rdma_cm_id *cm_id = con->cm_id;
struct ib_cq *cq;
- cq = ib_cq_pool_get(cm_id->device, nr_cqe, cq_vector, poll_ctx);
+ if (is_pollqueue(con))
+ cq = ib_alloc_cq(cm_id->device, con, nr_cqe, cq_vector,
+ poll_ctx);
+ else
+ cq = ib_cq_pool_get(cm_id->device, nr_cqe, cq_vector, poll_ctx);
+
if (IS_ERR(cq)) {
rtrs_err(con->sess, "Creating completion queue failed, errno: %ld\n",
PTR_ERR(cq));
@@ -269,6 +279,17 @@ static int create_qp(struct rtrs_con *con, struct ib_pd *pd,
return ret;
}
+static void destroy_cq(struct rtrs_con *con)
+{
+ if (con->cq) {
+ if (is_pollqueue(con))
+ ib_free_cq(con->cq);
+ else
+ ib_cq_pool_put(con->cq, con->nr_cqe);
+ }
+ con->cq = NULL;
+}
+
int rtrs_cq_qp_create(struct rtrs_sess *sess, struct rtrs_con *con,
u32 max_send_sge, int cq_vector, int nr_cqe,
u32 max_send_wr, u32 max_recv_wr,
@@ -283,8 +304,7 @@ int rtrs_cq_qp_create(struct rtrs_sess *sess, struct rtrs_con *con,
err = create_qp(con, sess->dev->ib_pd, max_send_wr, max_recv_wr,
max_send_sge);
if (err) {
- ib_cq_pool_put(con->cq, con->nr_cqe);
- con->cq = NULL;
+ destroy_cq(con);
return err;
}
con->sess = sess;
@@ -299,10 +319,7 @@ void rtrs_cq_qp_destroy(struct rtrs_con *con)
rdma_destroy_qp(con->cm_id);
con->qp = NULL;
}
- if (con->cq) {
- ib_cq_pool_put(con->cq, con->nr_cqe);
- con->cq = NULL;
- }
+ destroy_cq(con);
}
EXPORT_SYMBOL_GPL(rtrs_cq_qp_destroy);
diff --git a/drivers/input/misc/axp20x-pek.c b/drivers/input/misc/axp20x-pek.c
index 9c6386b2af33..e09b1fae42e1 100644
--- a/drivers/input/misc/axp20x-pek.c
+++ b/drivers/input/misc/axp20x-pek.c
@@ -22,6 +22,7 @@
#include <linux/kernel.h>
#include <linux/mfd/axp20x.h>
#include <linux/module.h>
+#include <linux/platform_data/x86/soc.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
@@ -255,41 +256,24 @@ static int axp20x_pek_probe_input_device(struct axp20x_pek *axp20x_pek,
return 0;
}
-#ifdef CONFIG_ACPI
-static bool axp20x_pek_should_register_input(struct axp20x_pek *axp20x_pek,
- struct platform_device *pdev)
+static bool axp20x_pek_should_register_input(struct axp20x_pek *axp20x_pek)
{
- unsigned long long hrv = 0;
- acpi_status status;
-
if (IS_ENABLED(CONFIG_INPUT_SOC_BUTTON_ARRAY) &&
axp20x_pek->axp20x->variant == AXP288_ID) {
- status = acpi_evaluate_integer(ACPI_HANDLE(pdev->dev.parent),
- "_HRV", NULL, &hrv);
- if (ACPI_FAILURE(status))
- dev_err(&pdev->dev, "Failed to get PMIC hardware revision\n");
-
/*
* On Cherry Trail platforms (hrv == 3), do not register the
* input device if there is an "INTCFD9" or "ACPI0011" gpio
* button ACPI device, as that handles the power button too,
* and otherwise we end up reporting all presses twice.
*/
- if (hrv == 3 && (acpi_dev_present("INTCFD9", NULL, -1) ||
+ if (soc_intel_is_cht() &&
+ (acpi_dev_present("INTCFD9", NULL, -1) ||
acpi_dev_present("ACPI0011", NULL, -1)))
return false;
-
}
return true;
}
-#else
-static bool axp20x_pek_should_register_input(struct axp20x_pek *axp20x_pek,
- struct platform_device *pdev)
-{
- return true;
-}
-#endif
static int axp20x_pek_probe(struct platform_device *pdev)
{
@@ -321,7 +305,7 @@ static int axp20x_pek_probe(struct platform_device *pdev)
axp20x_pek->irq_dbf = regmap_irq_get_virq(
axp20x_pek->axp20x->regmap_irqc, axp20x_pek->irq_dbf);
- if (axp20x_pek_should_register_input(axp20x_pek, pdev)) {
+ if (axp20x_pek_should_register_input(axp20x_pek)) {
error = axp20x_pek_probe_input_device(axp20x_pek, pdev);
if (error)
return error;
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index 2a822b229bd0..1eacd43cb436 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -20,7 +20,7 @@
#include <linux/amd-iommu.h>
#include <linux/export.h>
#include <linux/kmemleak.h>
-#include <linux/mem_encrypt.h>
+#include <linux/cc_platform.h>
#include <asm/pci-direct.h>
#include <asm/iommu.h>
#include <asm/apic.h>
@@ -121,8 +121,10 @@ struct ivhd_entry {
u8 type;
u16 devid;
u8 flags;
- u32 ext;
- u32 hidh;
+ struct_group(ext_hid,
+ u32 ext;
+ u32 hidh;
+ );
u64 cid;
u8 uidf;
u8 uidl;
@@ -964,7 +966,7 @@ static bool copy_device_table(void)
pr_err("The address of old device table is above 4G, not trustworthy!\n");
return false;
}
- old_devtb = (sme_active() && is_kdump_kernel())
+ old_devtb = (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT) && is_kdump_kernel())
? (__force void *)ioremap_encrypted(old_devtb_phys,
dev_table_size)
: memremap(old_devtb_phys, dev_table_size, MEMREMAP_WB);
@@ -1377,7 +1379,8 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
break;
}
- memcpy(hid, (u8 *)(&e->ext), ACPIHID_HID_LEN - 1);
+ BUILD_BUG_ON(sizeof(e->ext_hid) != ACPIHID_HID_LEN - 1);
+ memcpy(hid, &e->ext_hid, ACPIHID_HID_LEN - 1);
hid[ACPIHID_HID_LEN - 1] = '\0';
if (!(*hid)) {
@@ -3032,7 +3035,8 @@ static int __init amd_iommu_init(void)
static bool amd_iommu_sme_check(void)
{
- if (!sme_active() || (boot_cpu_data.x86 != 0x17))
+ if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT) ||
+ (boot_cpu_data.x86 != 0x17))
return true;
/* For Fam17h, a specific level of support is required */
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index 1722bb161841..9e5da037d949 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -31,6 +31,7 @@
#include <linux/irqdomain.h>
#include <linux/percpu.h>
#include <linux/io-pgtable.h>
+#include <linux/cc_platform.h>
#include <asm/irq_remapping.h>
#include <asm/io_apic.h>
#include <asm/apic.h>
@@ -2238,7 +2239,7 @@ static int amd_iommu_def_domain_type(struct device *dev)
* active, because some of those devices (AMD GPUs) don't have the
* encryption bit in their DMA-mask and require remapping.
*/
- if (!mem_encrypt_active() && dev_data->iommu_v2)
+ if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT) && dev_data->iommu_v2)
return IOMMU_DOMAIN_IDENTITY;
return 0;
diff --git a/drivers/iommu/amd/iommu_v2.c b/drivers/iommu/amd/iommu_v2.c
index a9e568276c99..13cbeb997cc1 100644
--- a/drivers/iommu/amd/iommu_v2.c
+++ b/drivers/iommu/amd/iommu_v2.c
@@ -17,6 +17,7 @@
#include <linux/wait.h>
#include <linux/pci.h>
#include <linux/gfp.h>
+#include <linux/cc_platform.h>
#include "amd_iommu.h"
@@ -742,7 +743,7 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
* When memory encryption is active the device is likely not in a
* direct-mapped domain. Forbid using IOMMUv2 functionality for now.
*/
- if (mem_encrypt_active())
+ if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
return -ENODEV;
if (!amd_iommu_v2_supported())
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 3303d707bab4..e80261d17a49 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -25,6 +25,7 @@
#include <linux/property.h>
#include <linux/fsl/mc.h>
#include <linux/module.h>
+#include <linux/cc_platform.h>
#include <trace/events/iommu.h>
static struct kset *iommu_group_kset;
@@ -130,7 +131,7 @@ static int __init iommu_subsys_init(void)
else
iommu_set_default_translated(false);
- if (iommu_default_passthrough() && mem_encrypt_active()) {
+ if (iommu_default_passthrough() && cc_platform_has(CC_ATTR_MEM_ENCRYPT)) {
pr_info("Memory encryption detected - Disabling default IOMMU Passthrough\n");
iommu_set_default_translated(false);
}
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index aca7b595c4c7..7038957f4a77 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -115,18 +115,24 @@ config BCM6345_L1_IRQ
select GENERIC_IRQ_EFFECTIVE_AFF_MASK
config BCM7038_L1_IRQ
- bool
+ tristate "Broadcom STB 7038-style L1/L2 interrupt controller driver"
+ depends on ARCH_BRCMSTB || BMIPS_GENERIC
+ default ARCH_BRCMSTB || BMIPS_GENERIC
select GENERIC_IRQ_CHIP
select IRQ_DOMAIN
select GENERIC_IRQ_EFFECTIVE_AFF_MASK
config BCM7120_L2_IRQ
- bool
+ tristate "Broadcom STB 7120-style L2 interrupt controller driver"
+ depends on ARCH_BRCMSTB || BMIPS_GENERIC
+ default ARCH_BRCMSTB || BMIPS_GENERIC
select GENERIC_IRQ_CHIP
select IRQ_DOMAIN
config BRCMSTB_L2_IRQ
- bool
+ tristate "Broadcom STB generic L2 interrupt controller driver"
+ depends on ARCH_BCM2835 || ARCH_BRCMSTB || BMIPS_GENERIC
+ default ARCH_BCM2835 || ARCH_BRCMSTB || BMIPS_GENERIC
select GENERIC_IRQ_CHIP
select IRQ_DOMAIN
@@ -400,8 +406,9 @@ config IRQ_UNIPHIER_AIDET
Support for the UniPhier AIDET (ARM Interrupt Detector).
config MESON_IRQ_GPIO
- bool "Meson GPIO Interrupt Multiplexer"
- depends on ARCH_MESON
+ tristate "Meson GPIO Interrupt Multiplexer"
+ depends on ARCH_MESON || COMPILE_TEST
+ default ARCH_MESON
select IRQ_DOMAIN_HIERARCHY
help
Support Meson SoC Family GPIO Interrupt Multiplexer
@@ -602,4 +609,12 @@ config APPLE_AIC
Support for the Apple Interrupt Controller found on Apple Silicon SoCs,
such as the M1.
+config MCHP_EIC
+ bool "Microchip External Interrupt Controller"
+ depends on ARCH_AT91 || COMPILE_TEST
+ select IRQ_DOMAIN
+ select IRQ_DOMAIN_HIERARCHY
+ help
+ Support for Microchip External Interrupt Controller.
+
endmenu
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index f88cbf36a9d2..c1f611cbfbf8 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -116,3 +116,4 @@ obj-$(CONFIG_MACH_REALTEK_RTL) += irq-realtek-rtl.o
obj-$(CONFIG_WPCM450_AIC) += irq-wpcm450-aic.o
obj-$(CONFIG_IRQ_IDT3243X) += irq-idt3243x.o
obj-$(CONFIG_APPLE_AIC) += irq-apple-aic.o
+obj-$(CONFIG_MCHP_EIC) += irq-mchp-eic.o
diff --git a/drivers/irqchip/irq-apple-aic.c b/drivers/irqchip/irq-apple-aic.c
index 6fc145aacaf0..3759dc36cc8f 100644
--- a/drivers/irqchip/irq-apple-aic.c
+++ b/drivers/irqchip/irq-apple-aic.c
@@ -245,7 +245,7 @@ static void __exception_irq_entry aic_handle_irq(struct pt_regs *regs)
irq = FIELD_GET(AIC_EVENT_NUM, event);
if (type == AIC_EVENT_TYPE_HW)
- handle_domain_irq(aic_irqc->hw_domain, irq, regs);
+ generic_handle_domain_irq(aic_irqc->hw_domain, irq);
else if (type == AIC_EVENT_TYPE_IPI && irq == 1)
aic_handle_ipi(regs);
else if (event != 0)
@@ -392,25 +392,25 @@ static void __exception_irq_entry aic_handle_fiq(struct pt_regs *regs)
}
if (TIMER_FIRING(read_sysreg(cntp_ctl_el0)))
- handle_domain_irq(aic_irqc->hw_domain,
- aic_irqc->nr_hw + AIC_TMR_EL0_PHYS, regs);
+ generic_handle_domain_irq(aic_irqc->hw_domain,
+ aic_irqc->nr_hw + AIC_TMR_EL0_PHYS);
if (TIMER_FIRING(read_sysreg(cntv_ctl_el0)))
- handle_domain_irq(aic_irqc->hw_domain,
- aic_irqc->nr_hw + AIC_TMR_EL0_VIRT, regs);
+ generic_handle_domain_irq(aic_irqc->hw_domain,
+ aic_irqc->nr_hw + AIC_TMR_EL0_VIRT);
if (is_kernel_in_hyp_mode()) {
uint64_t enabled = read_sysreg_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2);
if ((enabled & VM_TMR_FIQ_ENABLE_P) &&
TIMER_FIRING(read_sysreg_s(SYS_CNTP_CTL_EL02)))
- handle_domain_irq(aic_irqc->hw_domain,
- aic_irqc->nr_hw + AIC_TMR_EL02_PHYS, regs);
+ generic_handle_domain_irq(aic_irqc->hw_domain,
+ aic_irqc->nr_hw + AIC_TMR_EL02_PHYS);
if ((enabled & VM_TMR_FIQ_ENABLE_V) &&
TIMER_FIRING(read_sysreg_s(SYS_CNTV_CTL_EL02)))
- handle_domain_irq(aic_irqc->hw_domain,
- aic_irqc->nr_hw + AIC_TMR_EL02_VIRT, regs);
+ generic_handle_domain_irq(aic_irqc->hw_domain,
+ aic_irqc->nr_hw + AIC_TMR_EL02_VIRT);
}
if ((read_sysreg_s(SYS_IMP_APL_PMCR0_EL1) & (PMCR0_IMODE | PMCR0_IACT)) ==
@@ -674,7 +674,7 @@ static void aic_handle_ipi(struct pt_regs *regs)
firing = atomic_fetch_andnot(enabled, this_cpu_ptr(&aic_vipi_flag)) & enabled;
for_each_set_bit(i, &firing, AIC_NR_SWIPI)
- handle_domain_irq(aic_irqc->ipi_domain, i, regs);
+ generic_handle_domain_irq(aic_irqc->ipi_domain, i);
/*
* No ordering needed here; at worst this just changes the timing of
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index 53e0fb0562c1..80906bfec845 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -589,12 +589,7 @@ static void armada_370_xp_handle_msi_irq(struct pt_regs *regs, bool is_chained)
irq = msinr - PCI_MSI_DOORBELL_START;
- if (is_chained)
- generic_handle_domain_irq(armada_370_xp_msi_inner_domain,
- irq);
- else
- handle_domain_irq(armada_370_xp_msi_inner_domain,
- irq, regs);
+ generic_handle_domain_irq(armada_370_xp_msi_inner_domain, irq);
}
}
#else
@@ -646,8 +641,8 @@ armada_370_xp_handle_irq(struct pt_regs *regs)
break;
if (irqnr > 1) {
- handle_domain_irq(armada_370_xp_mpic_domain,
- irqnr, regs);
+ generic_handle_domain_irq(armada_370_xp_mpic_domain,
+ irqnr);
continue;
}
@@ -666,7 +661,7 @@ armada_370_xp_handle_irq(struct pt_regs *regs)
& IPI_DOORBELL_MASK;
for_each_set_bit(ipi, &ipimask, IPI_DOORBELL_END)
- handle_domain_irq(ipi_domain, ipi, regs);
+ generic_handle_domain_irq(ipi_domain, ipi);
}
#endif
diff --git a/drivers/irqchip/irq-aspeed-vic.c b/drivers/irqchip/irq-aspeed-vic.c
index 58717cd44f99..62ccf2c0c414 100644
--- a/drivers/irqchip/irq-aspeed-vic.c
+++ b/drivers/irqchip/irq-aspeed-vic.c
@@ -100,7 +100,7 @@ static void __exception_irq_entry avic_handle_irq(struct pt_regs *regs)
if (stat == 0)
break;
irq += ffs(stat) - 1;
- handle_domain_irq(vic->dom, irq, regs);
+ generic_handle_domain_irq(vic->dom, irq);
}
}
diff --git a/drivers/irqchip/irq-ativic32.c b/drivers/irqchip/irq-ativic32.c
index 476d6024aaf2..223dd2f97d28 100644
--- a/drivers/irqchip/irq-ativic32.c
+++ b/drivers/irqchip/irq-ativic32.c
@@ -5,11 +5,14 @@
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
+#include <linux/hardirq.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/irqchip.h>
#include <nds32_intrinsic.h>
+#include <asm/irq_regs.h>
+
unsigned long wake_mask;
static void ativic32_ack_irq(struct irq_data *data)
@@ -103,10 +106,25 @@ static irq_hw_number_t get_intr_src(void)
- NDS32_VECTOR_offINTERRUPT;
}
-asmlinkage void asm_do_IRQ(struct pt_regs *regs)
+static void ativic32_handle_irq(struct pt_regs *regs)
{
irq_hw_number_t hwirq = get_intr_src();
- handle_domain_irq(root_domain, hwirq, regs);
+ generic_handle_domain_irq(root_domain, hwirq);
+}
+
+/*
+ * TODO: convert nds32 to GENERIC_IRQ_MULTI_HANDLER so that this entry logic
+ * can live in arch code.
+ */
+asmlinkage void asm_do_IRQ(struct pt_regs *regs)
+{
+ struct pt_regs *old_regs;
+
+ irq_enter();
+ old_regs = set_irq_regs(regs);
+ ativic32_handle_irq(regs);
+ set_irq_regs(old_regs);
+ irq_exit();
}
int __init ativic32_init_irq(struct device_node *node, struct device_node *parent)
diff --git a/drivers/irqchip/irq-atmel-aic.c b/drivers/irqchip/irq-atmel-aic.c
index 2c999dc310c1..4631f6847953 100644
--- a/drivers/irqchip/irq-atmel-aic.c
+++ b/drivers/irqchip/irq-atmel-aic.c
@@ -71,7 +71,7 @@ aic_handle(struct pt_regs *regs)
if (!irqstat)
irq_reg_writel(gc, 0, AT91_AIC_EOICR);
else
- handle_domain_irq(aic_domain, irqnr, regs);
+ generic_handle_domain_irq(aic_domain, irqnr);
}
static int aic_retrigger(struct irq_data *d)
diff --git a/drivers/irqchip/irq-atmel-aic5.c b/drivers/irqchip/irq-atmel-aic5.c
index fb4ad2aaa727..145535bd7560 100644
--- a/drivers/irqchip/irq-atmel-aic5.c
+++ b/drivers/irqchip/irq-atmel-aic5.c
@@ -80,7 +80,7 @@ aic5_handle(struct pt_regs *regs)
if (!irqstat)
irq_reg_writel(bgc, 0, AT91_AIC5_EOICR);
else
- handle_domain_irq(aic5_domain, irqnr, regs);
+ generic_handle_domain_irq(aic5_domain, irqnr);
}
static void aic5_mask(struct irq_data *d)
diff --git a/drivers/irqchip/irq-bcm2835.c b/drivers/irqchip/irq-bcm2835.c
index adc1556ed332..e94e2882286c 100644
--- a/drivers/irqchip/irq-bcm2835.c
+++ b/drivers/irqchip/irq-bcm2835.c
@@ -246,7 +246,7 @@ static void __exception_irq_entry bcm2835_handle_irq(
u32 hwirq;
while ((hwirq = get_next_armctrl_hwirq()) != ~0)
- handle_domain_irq(intc.domain, hwirq, regs);
+ generic_handle_domain_irq(intc.domain, hwirq);
}
static void bcm2836_chained_handle_irq(struct irq_desc *desc)
diff --git a/drivers/irqchip/irq-bcm2836.c b/drivers/irqchip/irq-bcm2836.c
index 501facdb4570..51491c3c6fdd 100644
--- a/drivers/irqchip/irq-bcm2836.c
+++ b/drivers/irqchip/irq-bcm2836.c
@@ -143,7 +143,7 @@ __exception_irq_entry bcm2836_arm_irqchip_handle_irq(struct pt_regs *regs)
if (stat) {
u32 hwirq = ffs(stat) - 1;
- handle_domain_irq(intc.domain, hwirq, regs);
+ generic_handle_domain_irq(intc.domain, hwirq);
}
}
diff --git a/drivers/irqchip/irq-bcm6345-l1.c b/drivers/irqchip/irq-bcm6345-l1.c
index e3483789f4df..fd079215c17f 100644
--- a/drivers/irqchip/irq-bcm6345-l1.c
+++ b/drivers/irqchip/irq-bcm6345-l1.c
@@ -132,16 +132,12 @@ static void bcm6345_l1_irq_handle(struct irq_desc *desc)
int base = idx * IRQS_PER_WORD;
unsigned long pending;
irq_hw_number_t hwirq;
- unsigned int irq;
pending = __raw_readl(cpu->map_base + reg_status(intc, idx));
pending &= __raw_readl(cpu->map_base + reg_enable(intc, idx));
for_each_set_bit(hwirq, &pending, IRQS_PER_WORD) {
- irq = irq_linear_revmap(intc->domain, base + hwirq);
- if (irq)
- do_IRQ(irq);
- else
+ if (generic_handle_domain_irq(intc->domain, base + hwirq))
spurious_interrupt();
}
}
diff --git a/drivers/irqchip/irq-bcm7038-l1.c b/drivers/irqchip/irq-bcm7038-l1.c
index a035c385ca7a..a62b96237b82 100644
--- a/drivers/irqchip/irq-bcm7038-l1.c
+++ b/drivers/irqchip/irq-bcm7038-l1.c
@@ -28,9 +28,6 @@
#include <linux/irqchip.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/syscore_ops.h>
-#ifdef CONFIG_ARM
-#include <asm/smp_plat.h>
-#endif
#define IRQS_PER_WORD 32
#define REG_BYTES_PER_IRQ_WORD (sizeof(u32) * 4)
@@ -127,7 +124,7 @@ static void bcm7038_l1_irq_handle(struct irq_desc *desc)
struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned int idx;
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP) && defined(CONFIG_MIPS)
cpu = intc->cpus[cpu_logical_map(smp_processor_id())];
#else
cpu = intc->cpus[0];
@@ -194,6 +191,7 @@ static void bcm7038_l1_mask(struct irq_data *d)
raw_spin_unlock_irqrestore(&intc->lock, flags);
}
+#if defined(CONFIG_MIPS) && defined(CONFIG_SMP)
static int bcm7038_l1_set_affinity(struct irq_data *d,
const struct cpumask *dest,
bool force)
@@ -220,32 +218,6 @@ static int bcm7038_l1_set_affinity(struct irq_data *d,
return 0;
}
-
-#ifdef CONFIG_SMP
-static void bcm7038_l1_cpu_offline(struct irq_data *d)
-{
- struct cpumask *mask = irq_data_get_affinity_mask(d);
- int cpu = smp_processor_id();
- cpumask_t new_affinity;
-
- /* This CPU was not on the affinity mask */
- if (!cpumask_test_cpu(cpu, mask))
- return;
-
- if (cpumask_weight(mask) > 1) {
- /*
- * Multiple CPU affinity, remove this CPU from the affinity
- * mask
- */
- cpumask_copy(&new_affinity, mask);
- cpumask_clear_cpu(cpu, &new_affinity);
- } else {
- /* Only CPU, put on the lowest online CPU */
- cpumask_clear(&new_affinity);
- cpumask_set_cpu(cpumask_first(cpu_online_mask), &new_affinity);
- }
- irq_set_affinity_locked(d, &new_affinity, false);
-}
#endif
static int __init bcm7038_l1_init_one(struct device_node *dn,
@@ -328,7 +300,7 @@ static int bcm7038_l1_suspend(void)
u32 val;
/* Wakeup interrupt should only come from the boot cpu */
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP) && defined(CONFIG_MIPS)
boot_cpu = cpu_logical_map(0);
#else
boot_cpu = 0;
@@ -352,7 +324,7 @@ static void bcm7038_l1_resume(void)
struct bcm7038_l1_chip *intc;
int boot_cpu, word;
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP) && defined(CONFIG_MIPS)
boot_cpu = cpu_logical_map(0);
#else
boot_cpu = 0;
@@ -395,9 +367,8 @@ static struct irq_chip bcm7038_l1_irq_chip = {
.name = "bcm7038-l1",
.irq_mask = bcm7038_l1_mask,
.irq_unmask = bcm7038_l1_unmask,
+#if defined(CONFIG_SMP) && defined(CONFIG_MIPS)
.irq_set_affinity = bcm7038_l1_set_affinity,
-#ifdef CONFIG_SMP
- .irq_cpu_offline = bcm7038_l1_cpu_offline,
#endif
#ifdef CONFIG_PM_SLEEP
.irq_set_wake = bcm7038_l1_set_wake,
@@ -416,7 +387,7 @@ static int bcm7038_l1_map(struct irq_domain *d, unsigned int virq,
irq_set_chip_and_handler(virq, &bcm7038_l1_irq_chip, handle_level_irq);
irq_set_chip_data(virq, d->host_data);
- irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq)));
+ irqd_set_single_target(irq_get_irq_data(virq));
return 0;
}
@@ -484,4 +455,8 @@ out_free:
return ret;
}
-IRQCHIP_DECLARE(bcm7038_l1, "brcm,bcm7038-l1-intc", bcm7038_l1_of_init);
+IRQCHIP_PLATFORM_DRIVER_BEGIN(bcm7038_l1)
+IRQCHIP_MATCH("brcm,bcm7038-l1-intc", bcm7038_l1_of_init)
+IRQCHIP_PLATFORM_DRIVER_END(bcm7038_l1)
+MODULE_DESCRIPTION("Broadcom STB 7038-style L1/L2 interrupt controller");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/irqchip/irq-bcm7120-l2.c b/drivers/irqchip/irq-bcm7120-l2.c
index f23d7651ea84..d80e67a6aad2 100644
--- a/drivers/irqchip/irq-bcm7120-l2.c
+++ b/drivers/irqchip/irq-bcm7120-l2.c
@@ -220,6 +220,7 @@ static int __init bcm7120_l2_intc_probe(struct device_node *dn,
{
unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
struct bcm7120_l2_intc_data *data;
+ struct platform_device *pdev;
struct irq_chip_generic *gc;
struct irq_chip_type *ct;
int ret = 0;
@@ -230,7 +231,13 @@ static int __init bcm7120_l2_intc_probe(struct device_node *dn,
if (!data)
return -ENOMEM;
- data->num_parent_irqs = of_irq_count(dn);
+ pdev = of_find_device_by_node(dn);
+ if (!pdev) {
+ ret = -ENODEV;
+ goto out_free_data;
+ }
+
+ data->num_parent_irqs = platform_irq_count(pdev);
if (data->num_parent_irqs <= 0) {
pr_err("invalid number of parent interrupts\n");
ret = -ENOMEM;
@@ -329,6 +336,7 @@ out_unmap:
if (data->map_base[idx])
iounmap(data->map_base[idx]);
}
+out_free_data:
kfree(data);
return ret;
}
@@ -347,8 +355,9 @@ static int __init bcm7120_l2_intc_probe_3380(struct device_node *dn,
"BCM3380 L2");
}
-IRQCHIP_DECLARE(bcm7120_l2_intc, "brcm,bcm7120-l2-intc",
- bcm7120_l2_intc_probe_7120);
-
-IRQCHIP_DECLARE(bcm3380_l2_intc, "brcm,bcm3380-l2-intc",
- bcm7120_l2_intc_probe_3380);
+IRQCHIP_PLATFORM_DRIVER_BEGIN(bcm7120_l2)
+IRQCHIP_MATCH("brcm,bcm7120-l2-intc", bcm7120_l2_intc_probe_7120)
+IRQCHIP_MATCH("brcm,bcm3380-l2-intc", bcm7120_l2_intc_probe_3380)
+IRQCHIP_PLATFORM_DRIVER_END(bcm7120_l2)
+MODULE_DESCRIPTION("Broadcom STB 7120-style L2 interrupt controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c
index 8e0911561f2d..e4efc08ac594 100644
--- a/drivers/irqchip/irq-brcmstb-l2.c
+++ b/drivers/irqchip/irq-brcmstb-l2.c
@@ -275,16 +275,18 @@ static int __init brcmstb_l2_edge_intc_of_init(struct device_node *np,
{
return brcmstb_l2_intc_of_init(np, parent, &l2_edge_intc_init);
}
-IRQCHIP_DECLARE(brcmstb_l2_intc, "brcm,l2-intc", brcmstb_l2_edge_intc_of_init);
-IRQCHIP_DECLARE(brcmstb_hif_spi_l2_intc, "brcm,hif-spi-l2-intc",
- brcmstb_l2_edge_intc_of_init);
-IRQCHIP_DECLARE(brcmstb_upg_aux_aon_l2_intc, "brcm,upg-aux-aon-l2-intc",
- brcmstb_l2_edge_intc_of_init);
static int __init brcmstb_l2_lvl_intc_of_init(struct device_node *np,
struct device_node *parent)
{
return brcmstb_l2_intc_of_init(np, parent, &l2_lvl_intc_init);
}
-IRQCHIP_DECLARE(bcm7271_l2_intc, "brcm,bcm7271-l2-intc",
- brcmstb_l2_lvl_intc_of_init);
+
+IRQCHIP_PLATFORM_DRIVER_BEGIN(brcmstb_l2)
+IRQCHIP_MATCH("brcm,l2-intc", brcmstb_l2_edge_intc_of_init)
+IRQCHIP_MATCH("brcm,hif-spi-l2-intc", brcmstb_l2_edge_intc_of_init)
+IRQCHIP_MATCH("brcm,upg-aux-aon-l2-intc", brcmstb_l2_edge_intc_of_init)
+IRQCHIP_MATCH("brcm,bcm7271-l2-intc", brcmstb_l2_lvl_intc_of_init)
+IRQCHIP_PLATFORM_DRIVER_END(brcmstb_l2)
+MODULE_DESCRIPTION("Broadcom STB generic L2 interrupt controller");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/irqchip/irq-clps711x.c b/drivers/irqchip/irq-clps711x.c
index d0da29aeedc8..77ebe7e47e0e 100644
--- a/drivers/irqchip/irq-clps711x.c
+++ b/drivers/irqchip/irq-clps711x.c
@@ -77,14 +77,14 @@ static asmlinkage void __exception_irq_entry clps711x_irqh(struct pt_regs *regs)
irqstat = readw_relaxed(clps711x_intc->intmr[0]) &
readw_relaxed(clps711x_intc->intsr[0]);
if (irqstat)
- handle_domain_irq(clps711x_intc->domain,
- fls(irqstat) - 1, regs);
+ generic_handle_domain_irq(clps711x_intc->domain,
+ fls(irqstat) - 1);
irqstat = readw_relaxed(clps711x_intc->intmr[1]) &
readw_relaxed(clps711x_intc->intsr[1]);
if (irqstat)
- handle_domain_irq(clps711x_intc->domain,
- fls(irqstat) - 1 + 16, regs);
+ generic_handle_domain_irq(clps711x_intc->domain,
+ fls(irqstat) - 1 + 16);
} while (irqstat);
}
diff --git a/drivers/irqchip/irq-csky-apb-intc.c b/drivers/irqchip/irq-csky-apb-intc.c
index ab91afa86755..d36f536506ba 100644
--- a/drivers/irqchip/irq-csky-apb-intc.c
+++ b/drivers/irqchip/irq-csky-apb-intc.c
@@ -138,7 +138,7 @@ static inline bool handle_irq_perbit(struct pt_regs *regs, u32 hwirq,
if (hwirq == 0)
return 0;
- handle_domain_irq(root_domain, irq_base + __fls(hwirq), regs);
+ generic_handle_domain_irq(root_domain, irq_base + __fls(hwirq));
return 1;
}
diff --git a/drivers/irqchip/irq-csky-mpintc.c b/drivers/irqchip/irq-csky-mpintc.c
index a1534edef7fa..cb403c960ac0 100644
--- a/drivers/irqchip/irq-csky-mpintc.c
+++ b/drivers/irqchip/irq-csky-mpintc.c
@@ -74,8 +74,8 @@ static void csky_mpintc_handler(struct pt_regs *regs)
{
void __iomem *reg_base = this_cpu_read(intcl_reg);
- handle_domain_irq(root_domain,
- readl_relaxed(reg_base + INTCL_RDYIR), regs);
+ generic_handle_domain_irq(root_domain,
+ readl_relaxed(reg_base + INTCL_RDYIR));
}
static void csky_mpintc_enable(struct irq_data *d)
diff --git a/drivers/irqchip/irq-davinci-aintc.c b/drivers/irqchip/irq-davinci-aintc.c
index 810ccc4fe476..123eb7bfc117 100644
--- a/drivers/irqchip/irq-davinci-aintc.c
+++ b/drivers/irqchip/irq-davinci-aintc.c
@@ -73,7 +73,7 @@ davinci_aintc_handle_irq(struct pt_regs *regs)
irqnr >>= 2;
irqnr -= 1;
- handle_domain_irq(davinci_aintc_irq_domain, irqnr, regs);
+ generic_handle_domain_irq(davinci_aintc_irq_domain, irqnr);
}
/* ARM Interrupt Controller Initialization */
diff --git a/drivers/irqchip/irq-davinci-cp-intc.c b/drivers/irqchip/irq-davinci-cp-intc.c
index 276da2772e7f..7482c8ed34b2 100644
--- a/drivers/irqchip/irq-davinci-cp-intc.c
+++ b/drivers/irqchip/irq-davinci-cp-intc.c
@@ -135,7 +135,7 @@ davinci_cp_intc_handle_irq(struct pt_regs *regs)
return;
}
- handle_domain_irq(davinci_cp_intc_irq_domain, irqnr, regs);
+ generic_handle_domain_irq(davinci_cp_intc_irq_domain, irqnr);
}
static int davinci_cp_intc_host_map(struct irq_domain *h, unsigned int virq,
diff --git a/drivers/irqchip/irq-digicolor.c b/drivers/irqchip/irq-digicolor.c
index fc38d2da11b9..3b0d78aac13b 100644
--- a/drivers/irqchip/irq-digicolor.c
+++ b/drivers/irqchip/irq-digicolor.c
@@ -50,7 +50,7 @@ static void __exception_irq_entry digicolor_handle_irq(struct pt_regs *regs)
return;
}
- handle_domain_irq(digicolor_irq_domain, hwirq, regs);
+ generic_handle_domain_irq(digicolor_irq_domain, hwirq);
} while (1);
}
diff --git a/drivers/irqchip/irq-dw-apb-ictl.c b/drivers/irqchip/irq-dw-apb-ictl.c
index a67266e44491..d5c1c750c8d2 100644
--- a/drivers/irqchip/irq-dw-apb-ictl.c
+++ b/drivers/irqchip/irq-dw-apb-ictl.c
@@ -42,7 +42,7 @@ static void __irq_entry dw_apb_ictl_handle_irq(struct pt_regs *regs)
while (stat) {
u32 hwirq = ffs(stat) - 1;
- handle_domain_irq(d, hwirq, regs);
+ generic_handle_domain_irq(d, hwirq);
stat &= ~BIT(hwirq);
}
}
diff --git a/drivers/irqchip/irq-ftintc010.c b/drivers/irqchip/irq-ftintc010.c
index 0bf98425dca5..5cc268880f8e 100644
--- a/drivers/irqchip/irq-ftintc010.c
+++ b/drivers/irqchip/irq-ftintc010.c
@@ -134,7 +134,7 @@ asmlinkage void __exception_irq_entry ft010_irqchip_handle_irq(struct pt_regs *r
while ((status = readl(FT010_IRQ_STATUS(f->base)))) {
irq = ffs(status) - 1;
- handle_domain_irq(f->domain, irq, regs);
+ generic_handle_domain_irq(f->domain, irq);
}
}
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index fd4e9a37fea6..daec3309b014 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -660,7 +660,7 @@ static inline void gic_handle_nmi(u32 irqnr, struct pt_regs *regs)
* PSR.I will be restored when we ERET to the
* interrupted context.
*/
- err = handle_domain_nmi(gic_data.domain, irqnr, regs);
+ err = generic_handle_domain_nmi(gic_data.domain, irqnr);
if (err)
gic_deactivate_unhandled(irqnr);
@@ -728,7 +728,7 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs
else
isb();
- if (handle_domain_irq(gic_data.domain, irqnr, regs)) {
+ if (generic_handle_domain_irq(gic_data.domain, irqnr)) {
WARN_ONCE(true, "Unexpected interrupt received!\n");
gic_deactivate_unhandled(irqnr);
}
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 5f22c9d65e57..b8bb46c65a97 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -369,7 +369,7 @@ static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
this_cpu_write(sgi_intid, irqstat);
}
- handle_domain_irq(gic->domain, irqnr, regs);
+ generic_handle_domain_irq(gic->domain, irqnr);
} while (1);
}
diff --git a/drivers/irqchip/irq-hip04.c b/drivers/irqchip/irq-hip04.c
index 058ebaebe2c4..46161f6ff289 100644
--- a/drivers/irqchip/irq-hip04.c
+++ b/drivers/irqchip/irq-hip04.c
@@ -206,7 +206,7 @@ static void __exception_irq_entry hip04_handle_irq(struct pt_regs *regs)
irqnr = irqstat & GICC_IAR_INT_ID_MASK;
if (irqnr <= HIP04_MAX_IRQS)
- handle_domain_irq(hip04_data.domain, irqnr, regs);
+ generic_handle_domain_irq(hip04_data.domain, irqnr);
} while (irqnr > HIP04_MAX_IRQS);
}
diff --git a/drivers/irqchip/irq-ixp4xx.c b/drivers/irqchip/irq-ixp4xx.c
index 37e0749215c7..fb68f8c59fbb 100644
--- a/drivers/irqchip/irq-ixp4xx.c
+++ b/drivers/irqchip/irq-ixp4xx.c
@@ -114,7 +114,7 @@ asmlinkage void __exception_irq_entry ixp4xx_handle_irq(struct pt_regs *regs)
status = __raw_readl(ixi->irqbase + IXP4XX_ICIP);
for_each_set_bit(i, &status, 32)
- handle_domain_irq(ixi->domain, i, regs);
+ generic_handle_domain_irq(ixi->domain, i);
/*
* IXP465/IXP435 has an upper IRQ status register
@@ -122,7 +122,7 @@ asmlinkage void __exception_irq_entry ixp4xx_handle_irq(struct pt_regs *regs)
if (ixi->is_356) {
status = __raw_readl(ixi->irqbase + IXP4XX_ICIP2);
for_each_set_bit(i, &status, 32)
- handle_domain_irq(ixi->domain, i + 32, regs);
+ generic_handle_domain_irq(ixi->domain, i + 32);
}
}
diff --git a/drivers/irqchip/irq-lpc32xx.c b/drivers/irqchip/irq-lpc32xx.c
index 5e6f6e25f2ae..a29357f39450 100644
--- a/drivers/irqchip/irq-lpc32xx.c
+++ b/drivers/irqchip/irq-lpc32xx.c
@@ -126,7 +126,7 @@ static void __exception_irq_entry lpc32xx_handle_irq(struct pt_regs *regs)
while (hwirq) {
irq = __ffs(hwirq);
hwirq &= ~BIT(irq);
- handle_domain_irq(lpc32xx_mic_irqc->domain, irq, regs);
+ generic_handle_domain_irq(lpc32xx_mic_irqc->domain, irq);
}
}
diff --git a/drivers/irqchip/irq-mchp-eic.c b/drivers/irqchip/irq-mchp-eic.c
new file mode 100644
index 000000000000..c726a19837d2
--- /dev/null
+++ b/drivers/irqchip/irq-mchp-eic.c
@@ -0,0 +1,280 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Microchip External Interrupt Controller driver
+ *
+ * Copyright (C) 2021 Microchip Technology Inc. and its subsidiaries
+ *
+ * Author: Claudiu Beznea <claudiu.beznea@microchip.com>
+ */
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/irqchip.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/syscore_ops.h>
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#define MCHP_EIC_GFCS (0x0)
+#define MCHP_EIC_SCFG(x) (0x4 + (x) * 0x4)
+#define MCHP_EIC_SCFG_EN BIT(16)
+#define MCHP_EIC_SCFG_LVL BIT(9)
+#define MCHP_EIC_SCFG_POL BIT(8)
+
+#define MCHP_EIC_NIRQ (2)
+
+/*
+ * struct mchp_eic - EIC private data structure
+ * @base: base address
+ * @clk: peripheral clock
+ * @domain: irq domain
+ * @irqs: irqs b/w eic and gic
+ * @scfg: backup for scfg registers (necessary for backup and self-refresh mode)
+ * @wakeup_source: wakeup source mask
+ */
+struct mchp_eic {
+ void __iomem *base;
+ struct clk *clk;
+ struct irq_domain *domain;
+ u32 irqs[MCHP_EIC_NIRQ];
+ u32 scfg[MCHP_EIC_NIRQ];
+ u32 wakeup_source;
+};
+
+static struct mchp_eic *eic;
+
+static void mchp_eic_irq_mask(struct irq_data *d)
+{
+ unsigned int tmp;
+
+ tmp = readl_relaxed(eic->base + MCHP_EIC_SCFG(d->hwirq));
+ tmp &= ~MCHP_EIC_SCFG_EN;
+ writel_relaxed(tmp, eic->base + MCHP_EIC_SCFG(d->hwirq));
+
+ irq_chip_mask_parent(d);
+}
+
+static void mchp_eic_irq_unmask(struct irq_data *d)
+{
+ unsigned int tmp;
+
+ tmp = readl_relaxed(eic->base + MCHP_EIC_SCFG(d->hwirq));
+ tmp |= MCHP_EIC_SCFG_EN;
+ writel_relaxed(tmp, eic->base + MCHP_EIC_SCFG(d->hwirq));
+
+ irq_chip_unmask_parent(d);
+}
+
+static int mchp_eic_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ unsigned int parent_irq_type;
+ unsigned int tmp;
+
+ tmp = readl_relaxed(eic->base + MCHP_EIC_SCFG(d->hwirq));
+ tmp &= ~(MCHP_EIC_SCFG_POL | MCHP_EIC_SCFG_LVL);
+ switch (type) {
+ case IRQ_TYPE_LEVEL_HIGH:
+ tmp |= MCHP_EIC_SCFG_POL | MCHP_EIC_SCFG_LVL;
+ parent_irq_type = IRQ_TYPE_LEVEL_HIGH;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ tmp |= MCHP_EIC_SCFG_LVL;
+ parent_irq_type = IRQ_TYPE_LEVEL_HIGH;
+ break;
+ case IRQ_TYPE_EDGE_RISING:
+ parent_irq_type = IRQ_TYPE_EDGE_RISING;
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ tmp |= MCHP_EIC_SCFG_POL;
+ parent_irq_type = IRQ_TYPE_EDGE_RISING;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ writel_relaxed(tmp, eic->base + MCHP_EIC_SCFG(d->hwirq));
+
+ return irq_chip_set_type_parent(d, parent_irq_type);
+}
+
+static int mchp_eic_irq_set_wake(struct irq_data *d, unsigned int on)
+{
+ irq_set_irq_wake(eic->irqs[d->hwirq], on);
+ if (on)
+ eic->wakeup_source |= BIT(d->hwirq);
+ else
+ eic->wakeup_source &= ~BIT(d->hwirq);
+
+ return 0;
+}
+
+static int mchp_eic_irq_suspend(void)
+{
+ unsigned int hwirq;
+
+ for (hwirq = 0; hwirq < MCHP_EIC_NIRQ; hwirq++)
+ eic->scfg[hwirq] = readl_relaxed(eic->base +
+ MCHP_EIC_SCFG(hwirq));
+
+ if (!eic->wakeup_source)
+ clk_disable_unprepare(eic->clk);
+
+ return 0;
+}
+
+static void mchp_eic_irq_resume(void)
+{
+ unsigned int hwirq;
+
+ if (!eic->wakeup_source)
+ clk_prepare_enable(eic->clk);
+
+ for (hwirq = 0; hwirq < MCHP_EIC_NIRQ; hwirq++)
+ writel_relaxed(eic->scfg[hwirq], eic->base +
+ MCHP_EIC_SCFG(hwirq));
+}
+
+static struct syscore_ops mchp_eic_syscore_ops = {
+ .suspend = mchp_eic_irq_suspend,
+ .resume = mchp_eic_irq_resume,
+};
+
+static struct irq_chip mchp_eic_chip = {
+ .name = "eic",
+ .flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SET_TYPE_MASKED,
+ .irq_mask = mchp_eic_irq_mask,
+ .irq_unmask = mchp_eic_irq_unmask,
+ .irq_set_type = mchp_eic_irq_set_type,
+ .irq_ack = irq_chip_ack_parent,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_retrigger = irq_chip_retrigger_hierarchy,
+ .irq_set_wake = mchp_eic_irq_set_wake,
+};
+
+static int mchp_eic_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *data)
+{
+ struct irq_fwspec *fwspec = data;
+ struct irq_fwspec parent_fwspec;
+ irq_hw_number_t hwirq;
+ unsigned int type;
+ int ret;
+
+ if (WARN_ON(nr_irqs != 1))
+ return -EINVAL;
+
+ ret = irq_domain_translate_twocell(domain, fwspec, &hwirq, &type);
+ if (ret || hwirq >= MCHP_EIC_NIRQ)
+ return ret;
+
+ switch (type) {
+ case IRQ_TYPE_EDGE_RISING:
+ case IRQ_TYPE_LEVEL_HIGH:
+ break;
+ case IRQ_TYPE_EDGE_FALLING:
+ type = IRQ_TYPE_EDGE_RISING;
+ break;
+ case IRQ_TYPE_LEVEL_LOW:
+ type = IRQ_TYPE_LEVEL_HIGH;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ irq_domain_set_hwirq_and_chip(domain, virq, hwirq, &mchp_eic_chip, eic);
+
+ parent_fwspec.fwnode = domain->parent->fwnode;
+ parent_fwspec.param_count = 3;
+ parent_fwspec.param[0] = GIC_SPI;
+ parent_fwspec.param[1] = eic->irqs[hwirq];
+ parent_fwspec.param[2] = type;
+
+ return irq_domain_alloc_irqs_parent(domain, virq, 1, &parent_fwspec);
+}
+
+static const struct irq_domain_ops mchp_eic_domain_ops = {
+ .translate = irq_domain_translate_twocell,
+ .alloc = mchp_eic_domain_alloc,
+ .free = irq_domain_free_irqs_common,
+};
+
+static int mchp_eic_init(struct device_node *node, struct device_node *parent)
+{
+ struct irq_domain *parent_domain = NULL;
+ int ret, i;
+
+ eic = kzalloc(sizeof(*eic), GFP_KERNEL);
+ if (!eic)
+ return -ENOMEM;
+
+ eic->base = of_iomap(node, 0);
+ if (!eic->base) {
+ ret = -ENOMEM;
+ goto free;
+ }
+
+ parent_domain = irq_find_host(parent);
+ if (!parent_domain) {
+ ret = -ENODEV;
+ goto unmap;
+ }
+
+ eic->clk = of_clk_get_by_name(node, "pclk");
+ if (IS_ERR(eic->clk)) {
+ ret = PTR_ERR(eic->clk);
+ goto unmap;
+ }
+
+ ret = clk_prepare_enable(eic->clk);
+ if (ret)
+ goto unmap;
+
+ for (i = 0; i < MCHP_EIC_NIRQ; i++) {
+ struct of_phandle_args irq;
+
+ /* Disable it, if any. */
+ writel_relaxed(0UL, eic->base + MCHP_EIC_SCFG(i));
+
+ ret = of_irq_parse_one(node, i, &irq);
+ if (ret)
+ goto clk_unprepare;
+
+ if (WARN_ON(irq.args_count != 3)) {
+ ret = -EINVAL;
+ goto clk_unprepare;
+ }
+
+ eic->irqs[i] = irq.args[1];
+ }
+
+ eic->domain = irq_domain_add_hierarchy(parent_domain, 0, MCHP_EIC_NIRQ,
+ node, &mchp_eic_domain_ops, eic);
+ if (!eic->domain) {
+ pr_err("%pOF: Failed to add domain\n", node);
+ ret = -ENODEV;
+ goto clk_unprepare;
+ }
+
+ register_syscore_ops(&mchp_eic_syscore_ops);
+
+ pr_info("%pOF: EIC registered, nr_irqs %u\n", node, MCHP_EIC_NIRQ);
+
+ return 0;
+
+clk_unprepare:
+ clk_disable_unprepare(eic->clk);
+unmap:
+ iounmap(eic->base);
+free:
+ kfree(eic);
+ return ret;
+}
+
+IRQCHIP_PLATFORM_DRIVER_BEGIN(mchp_eic)
+IRQCHIP_MATCH("microchip,sama7g5-eic", mchp_eic_init)
+IRQCHIP_PLATFORM_DRIVER_END(mchp_eic)
+
+MODULE_DESCRIPTION("Microchip External Interrupt Controller");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Claudiu Beznea <claudiu.beznea@microchip.com>");
diff --git a/drivers/irqchip/irq-meson-gpio.c b/drivers/irqchip/irq-meson-gpio.c
index e50676ce2ec8..d90ff0b92480 100644
--- a/drivers/irqchip/irq-meson-gpio.c
+++ b/drivers/irqchip/irq-meson-gpio.c
@@ -436,8 +436,7 @@ static const struct irq_domain_ops meson_gpio_irq_domain_ops = {
.translate = meson_gpio_irq_domain_translate,
};
-static int __init meson_gpio_irq_parse_dt(struct device_node *node,
- struct meson_gpio_irq_controller *ctl)
+static int meson_gpio_irq_parse_dt(struct device_node *node, struct meson_gpio_irq_controller *ctl)
{
const struct of_device_id *match;
int ret;
@@ -463,8 +462,7 @@ static int __init meson_gpio_irq_parse_dt(struct device_node *node,
return 0;
}
-static int __init meson_gpio_irq_of_init(struct device_node *node,
- struct device_node *parent)
+static int meson_gpio_irq_of_init(struct device_node *node, struct device_node *parent)
{
struct irq_domain *domain, *parent_domain;
struct meson_gpio_irq_controller *ctl;
@@ -521,5 +519,10 @@ free_ctl:
return ret;
}
-IRQCHIP_DECLARE(meson_gpio_intc, "amlogic,meson-gpio-intc",
- meson_gpio_irq_of_init);
+IRQCHIP_PLATFORM_DRIVER_BEGIN(meson_gpio_intc)
+IRQCHIP_MATCH("amlogic,meson-gpio-intc", meson_gpio_irq_of_init)
+IRQCHIP_PLATFORM_DRIVER_END(meson_gpio_intc)
+
+MODULE_AUTHOR("Jerome Brunet <jbrunet@baylibre.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:meson-gpio-intc");
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index 54c7092cc61d..d02b05a067d9 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -381,24 +381,35 @@ static void gic_unmask_local_irq_all_vpes(struct irq_data *d)
spin_unlock_irqrestore(&gic_lock, flags);
}
-static void gic_all_vpes_irq_cpu_online(struct irq_data *d)
+static void gic_all_vpes_irq_cpu_online(void)
{
- struct gic_all_vpes_chip_data *cd;
- unsigned int intr;
+ static const unsigned int local_intrs[] = {
+ GIC_LOCAL_INT_TIMER,
+ GIC_LOCAL_INT_PERFCTR,
+ GIC_LOCAL_INT_FDC,
+ };
+ unsigned long flags;
+ int i;
- intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
- cd = irq_data_get_irq_chip_data(d);
+ spin_lock_irqsave(&gic_lock, flags);
- write_gic_vl_map(mips_gic_vx_map_reg(intr), cd->map);
- if (cd->mask)
- write_gic_vl_smask(BIT(intr));
+ for (i = 0; i < ARRAY_SIZE(local_intrs); i++) {
+ unsigned int intr = local_intrs[i];
+ struct gic_all_vpes_chip_data *cd;
+
+ cd = &gic_all_vpes_chip_data[intr];
+ write_gic_vl_map(mips_gic_vx_map_reg(intr), cd->map);
+ if (cd->mask)
+ write_gic_vl_smask(BIT(intr));
+ }
+
+ spin_unlock_irqrestore(&gic_lock, flags);
}
static struct irq_chip gic_all_vpes_local_irq_controller = {
.name = "MIPS GIC Local",
.irq_mask = gic_mask_local_irq_all_vpes,
.irq_unmask = gic_unmask_local_irq_all_vpes,
- .irq_cpu_online = gic_all_vpes_irq_cpu_online,
};
static void __gic_irq_dispatch(void)
@@ -477,6 +488,10 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
intr = GIC_HWIRQ_TO_LOCAL(hwirq);
map = GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin;
+ /*
+ * If adding support for more per-cpu interrupts, keep the the
+ * array in gic_all_vpes_irq_cpu_online() in sync.
+ */
switch (intr) {
case GIC_LOCAL_INT_TIMER:
/* CONFIG_MIPS_CMP workaround (see __gic_init) */
@@ -663,8 +678,8 @@ static int gic_cpu_startup(unsigned int cpu)
/* Clear all local IRQ masks (ie. disable all local interrupts) */
write_gic_vl_rmask(~0);
- /* Invoke irq_cpu_online callbacks to enable desired interrupts */
- irq_cpu_online();
+ /* Enable desired interrupts */
+ gic_all_vpes_irq_cpu_online();
return 0;
}
diff --git a/drivers/irqchip/irq-mmp.c b/drivers/irqchip/irq-mmp.c
index 4a74ac7b7c42..83455ca72439 100644
--- a/drivers/irqchip/irq-mmp.c
+++ b/drivers/irqchip/irq-mmp.c
@@ -230,7 +230,7 @@ static void __exception_irq_entry mmp_handle_irq(struct pt_regs *regs)
if (!(hwirq & SEL_INT_PENDING))
return;
hwirq &= SEL_INT_NUM_MASK;
- handle_domain_irq(icu_data[0].domain, hwirq, regs);
+ generic_handle_domain_irq(icu_data[0].domain, hwirq);
}
static void __exception_irq_entry mmp2_handle_irq(struct pt_regs *regs)
@@ -241,7 +241,7 @@ static void __exception_irq_entry mmp2_handle_irq(struct pt_regs *regs)
if (!(hwirq & SEL_INT_PENDING))
return;
hwirq &= SEL_INT_NUM_MASK;
- handle_domain_irq(icu_data[0].domain, hwirq, regs);
+ generic_handle_domain_irq(icu_data[0].domain, hwirq);
}
/* MMP (ARMv5) */
diff --git a/drivers/irqchip/irq-mvebu-icu.c b/drivers/irqchip/irq-mvebu-icu.c
index 090bc3f4f7d8..3e7297fc5948 100644
--- a/drivers/irqchip/irq-mvebu-icu.c
+++ b/drivers/irqchip/irq-mvebu-icu.c
@@ -347,7 +347,6 @@ builtin_platform_driver(mvebu_icu_subset_driver);
static int mvebu_icu_probe(struct platform_device *pdev)
{
struct mvebu_icu *icu;
- struct resource *res;
int i;
icu = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_icu),
@@ -357,8 +356,7 @@ static int mvebu_icu_probe(struct platform_device *pdev)
icu->dev = &pdev->dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- icu->base = devm_ioremap_resource(&pdev->dev, res);
+ icu->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(icu->base))
return PTR_ERR(icu->base);
diff --git a/drivers/irqchip/irq-mvebu-pic.c b/drivers/irqchip/irq-mvebu-pic.c
index dc1cee4b0fe1..870f9866b8da 100644
--- a/drivers/irqchip/irq-mvebu-pic.c
+++ b/drivers/irqchip/irq-mvebu-pic.c
@@ -121,14 +121,12 @@ static int mvebu_pic_probe(struct platform_device *pdev)
struct device_node *node = pdev->dev.of_node;
struct mvebu_pic *pic;
struct irq_chip *irq_chip;
- struct resource *res;
pic = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_pic), GFP_KERNEL);
if (!pic)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pic->base = devm_ioremap_resource(&pdev->dev, res);
+ pic->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pic->base))
return PTR_ERR(pic->base);
diff --git a/drivers/irqchip/irq-mxs.c b/drivers/irqchip/irq-mxs.c
index d1f5740cd575..55cb6b5a686e 100644
--- a/drivers/irqchip/irq-mxs.c
+++ b/drivers/irqchip/irq-mxs.c
@@ -136,7 +136,7 @@ asmlinkage void __exception_irq_entry icoll_handle_irq(struct pt_regs *regs)
irqnr = __raw_readl(icoll_priv.stat);
__raw_writel(irqnr, icoll_priv.vector);
- handle_domain_irq(icoll_domain, irqnr, regs);
+ generic_handle_domain_irq(icoll_domain, irqnr);
}
static int icoll_irq_domain_map(struct irq_domain *d, unsigned int virq,
diff --git a/drivers/irqchip/irq-nvic.c b/drivers/irqchip/irq-nvic.c
index b31c4cff4d3a..63bac3f78863 100644
--- a/drivers/irqchip/irq-nvic.c
+++ b/drivers/irqchip/irq-nvic.c
@@ -37,10 +37,25 @@
static struct irq_domain *nvic_irq_domain;
+static void __nvic_handle_irq(irq_hw_number_t hwirq)
+{
+ generic_handle_domain_irq(nvic_irq_domain, hwirq);
+}
+
+/*
+ * TODO: restructure the ARMv7M entry logic so that this entry logic can live
+ * in arch code.
+ */
asmlinkage void __exception_irq_entry
nvic_handle_irq(irq_hw_number_t hwirq, struct pt_regs *regs)
{
- handle_domain_irq(nvic_irq_domain, hwirq, regs);
+ struct pt_regs *old_regs;
+
+ irq_enter();
+ old_regs = set_irq_regs(regs);
+ __nvic_handle_irq(hwirq);
+ set_irq_regs(old_regs);
+ irq_exit();
}
static int nvic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
diff --git a/drivers/irqchip/irq-omap-intc.c b/drivers/irqchip/irq-omap-intc.c
index d360a6eddd6d..dc82162ba763 100644
--- a/drivers/irqchip/irq-omap-intc.c
+++ b/drivers/irqchip/irq-omap-intc.c
@@ -357,7 +357,7 @@ omap_intc_handle_irq(struct pt_regs *regs)
}
irqnr &= ACTIVEIRQ_MASK;
- handle_domain_irq(domain, irqnr, regs);
+ generic_handle_domain_irq(domain, irqnr);
}
static int __init intc_of_init(struct device_node *node,
diff --git a/drivers/irqchip/irq-or1k-pic.c b/drivers/irqchip/irq-or1k-pic.c
index 03d2366118dd..49b47e787644 100644
--- a/drivers/irqchip/irq-or1k-pic.c
+++ b/drivers/irqchip/irq-or1k-pic.c
@@ -116,7 +116,7 @@ static void or1k_pic_handle_irq(struct pt_regs *regs)
int irq = -1;
while ((irq = pic_get_irq(irq + 1)) != NO_IRQ)
- handle_domain_irq(root_domain, irq, regs);
+ generic_handle_domain_irq(root_domain, irq);
}
static int or1k_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
diff --git a/drivers/irqchip/irq-orion.c b/drivers/irqchip/irq-orion.c
index b6868f7b805a..17c2c7a07f10 100644
--- a/drivers/irqchip/irq-orion.c
+++ b/drivers/irqchip/irq-orion.c
@@ -42,8 +42,8 @@ __exception_irq_entry orion_handle_irq(struct pt_regs *regs)
gc->mask_cache;
while (stat) {
u32 hwirq = __fls(stat);
- handle_domain_irq(orion_irq_domain,
- gc->irq_base + hwirq, regs);
+ generic_handle_domain_irq(orion_irq_domain,
+ gc->irq_base + hwirq);
stat &= ~(1 << hwirq);
}
}
diff --git a/drivers/irqchip/irq-rda-intc.c b/drivers/irqchip/irq-rda-intc.c
index 960168303b73..9f0144a73777 100644
--- a/drivers/irqchip/irq-rda-intc.c
+++ b/drivers/irqchip/irq-rda-intc.c
@@ -53,7 +53,7 @@ static void __exception_irq_entry rda_handle_irq(struct pt_regs *regs)
while (stat) {
hwirq = __fls(stat);
- handle_domain_irq(rda_irq_domain, hwirq, regs);
+ generic_handle_domain_irq(rda_irq_domain, hwirq);
stat &= ~BIT(hwirq);
}
}
diff --git a/drivers/irqchip/irq-riscv-intc.c b/drivers/irqchip/irq-riscv-intc.c
index 8017f6d32d52..b65bd8878d4f 100644
--- a/drivers/irqchip/irq-riscv-intc.c
+++ b/drivers/irqchip/irq-riscv-intc.c
@@ -37,7 +37,7 @@ static asmlinkage void riscv_intc_irq(struct pt_regs *regs)
break;
#endif
default:
- handle_domain_irq(intc_domain, cause, regs);
+ generic_handle_domain_irq(intc_domain, cause);
break;
}
}
diff --git a/drivers/irqchip/irq-sa11x0.c b/drivers/irqchip/irq-sa11x0.c
index dbccc7dafbf8..31c202a1ae62 100644
--- a/drivers/irqchip/irq-sa11x0.c
+++ b/drivers/irqchip/irq-sa11x0.c
@@ -140,8 +140,8 @@ sa1100_handle_irq(struct pt_regs *regs)
if (mask == 0)
break;
- handle_domain_irq(sa1100_normal_irqdomain,
- ffs(mask) - 1, regs);
+ generic_handle_domain_irq(sa1100_normal_irqdomain,
+ ffs(mask) - 1);
} while (1);
}
diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c
index 33c76710f845..b7cb2da71888 100644
--- a/drivers/irqchip/irq-stm32-exti.c
+++ b/drivers/irqchip/irq-stm32-exti.c
@@ -850,7 +850,6 @@ static int stm32_exti_probe(struct platform_device *pdev)
struct irq_domain *parent_domain, *domain;
struct stm32_exti_host_data *host_data;
const struct stm32_exti_drv_data *drv_data;
- struct resource *res;
host_data = devm_kzalloc(dev, sizeof(*host_data), GFP_KERNEL);
if (!host_data)
@@ -888,8 +887,7 @@ static int stm32_exti_probe(struct platform_device *pdev)
if (!host_data->chips_data)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- host_data->base = devm_ioremap_resource(dev, res);
+ host_data->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(host_data->base))
return PTR_ERR(host_data->base);
diff --git a/drivers/irqchip/irq-sun4i.c b/drivers/irqchip/irq-sun4i.c
index 8a315d6a3399..dd506ebfdacb 100644
--- a/drivers/irqchip/irq-sun4i.c
+++ b/drivers/irqchip/irq-sun4i.c
@@ -195,7 +195,7 @@ static void __exception_irq_entry sun4i_handle_irq(struct pt_regs *regs)
return;
do {
- handle_domain_irq(irq_ic_data->irq_domain, hwirq, regs);
+ generic_handle_domain_irq(irq_ic_data->irq_domain, hwirq);
hwirq = readl(irq_ic_data->irq_base +
SUN4I_IRQ_VECTOR_REG) >> 2;
} while (hwirq != 0);
diff --git a/drivers/irqchip/irq-ti-sci-inta.c b/drivers/irqchip/irq-ti-sci-inta.c
index 97f454ec376b..8eba08db33b2 100644
--- a/drivers/irqchip/irq-ti-sci-inta.c
+++ b/drivers/irqchip/irq-ti-sci-inta.c
@@ -650,7 +650,6 @@ static int ti_sci_inta_irq_domain_probe(struct platform_device *pdev)
struct device_node *parent_node, *node;
struct ti_sci_inta_irq_domain *inta;
struct device *dev = &pdev->dev;
- struct resource *res;
int ret;
node = dev_of_node(dev);
@@ -694,8 +693,7 @@ static int ti_sci_inta_irq_domain_probe(struct platform_device *pdev)
return PTR_ERR(inta->global_event);
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- inta->base = devm_ioremap_resource(dev, res);
+ inta->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(inta->base))
return PTR_ERR(inta->base);
diff --git a/drivers/irqchip/irq-ts4800.c b/drivers/irqchip/irq-ts4800.c
index 34337a61b1ef..f032db23b30f 100644
--- a/drivers/irqchip/irq-ts4800.c
+++ b/drivers/irqchip/irq-ts4800.c
@@ -93,15 +93,13 @@ static int ts4800_ic_probe(struct platform_device *pdev)
struct device_node *node = pdev->dev.of_node;
struct ts4800_irq_data *data;
struct irq_chip *irq_chip;
- struct resource *res;
int parent_irq;
data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- data->base = devm_ioremap_resource(&pdev->dev, res);
+ data->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(data->base))
return PTR_ERR(data->base);
diff --git a/drivers/irqchip/irq-versatile-fpga.c b/drivers/irqchip/irq-versatile-fpga.c
index 75be350cf82f..f2757b6aecc8 100644
--- a/drivers/irqchip/irq-versatile-fpga.c
+++ b/drivers/irqchip/irq-versatile-fpga.c
@@ -105,7 +105,7 @@ static int handle_one_fpga(struct fpga_irq_data *f, struct pt_regs *regs)
while ((status = readl(f->base + IRQ_STATUS))) {
irq = ffs(status) - 1;
- handle_domain_irq(f->domain, irq, regs);
+ generic_handle_domain_irq(f->domain, irq);
handled = 1;
}
diff --git a/drivers/irqchip/irq-vic.c b/drivers/irqchip/irq-vic.c
index 1e1f2d115257..9e3d5561e04e 100644
--- a/drivers/irqchip/irq-vic.c
+++ b/drivers/irqchip/irq-vic.c
@@ -208,7 +208,7 @@ static int handle_one_vic(struct vic_device *vic, struct pt_regs *regs)
while ((stat = readl_relaxed(vic->base + VIC_IRQ_STATUS))) {
irq = ffs(stat) - 1;
- handle_domain_irq(vic->domain, irq, regs);
+ generic_handle_domain_irq(vic->domain, irq);
handled = 1;
}
diff --git a/drivers/irqchip/irq-vt8500.c b/drivers/irqchip/irq-vt8500.c
index 5bce936af5d9..e17dd3a8c2d5 100644
--- a/drivers/irqchip/irq-vt8500.c
+++ b/drivers/irqchip/irq-vt8500.c
@@ -183,7 +183,7 @@ static void __exception_irq_entry vt8500_handle_irq(struct pt_regs *regs)
continue;
}
- handle_domain_irq(intc[i].domain, irqnr, regs);
+ generic_handle_domain_irq(intc[i].domain, irqnr);
}
}
diff --git a/drivers/irqchip/irq-wpcm450-aic.c b/drivers/irqchip/irq-wpcm450-aic.c
index f3ac392d5bc8..0dcbeb1a05a1 100644
--- a/drivers/irqchip/irq-wpcm450-aic.c
+++ b/drivers/irqchip/irq-wpcm450-aic.c
@@ -69,7 +69,7 @@ static void __exception_irq_entry wpcm450_aic_handle_irq(struct pt_regs *regs)
/* Read IPER to signal that nIRQ can be de-asserted */
hwirq = readl(aic->regs + AIC_IPER) / 4;
- handle_domain_irq(aic->domain, hwirq, regs);
+ generic_handle_domain_irq(aic->domain, hwirq);
}
static void wpcm450_aic_eoi(struct irq_data *d)
diff --git a/drivers/irqchip/irq-zevio.c b/drivers/irqchip/irq-zevio.c
index 84163f1ebfcf..7a72620fc478 100644
--- a/drivers/irqchip/irq-zevio.c
+++ b/drivers/irqchip/irq-zevio.c
@@ -50,7 +50,7 @@ static void __exception_irq_entry zevio_handle_irq(struct pt_regs *regs)
while (readl(zevio_irq_io + IO_STATUS)) {
irqnr = readl(zevio_irq_io + IO_CURRENT);
- handle_domain_irq(zevio_irq_domain, irqnr, regs);
+ generic_handle_domain_irq(zevio_irq_domain, irqnr);
}
}
diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c
index e501cb03f211..bd087cca1c1d 100644
--- a/drivers/isdn/hardware/mISDN/hfcpci.c
+++ b/drivers/isdn/hardware/mISDN/hfcpci.c
@@ -1994,14 +1994,14 @@ setup_hw(struct hfc_pci *hc)
pci_set_master(hc->pdev);
if (!hc->irq) {
printk(KERN_WARNING "HFC-PCI: No IRQ for PCI card found\n");
- return 1;
+ return -EINVAL;
}
hc->hw.pci_io =
(char __iomem *)(unsigned long)hc->pdev->resource[1].start;
if (!hc->hw.pci_io) {
printk(KERN_WARNING "HFC-PCI: No IO-Mem for PCI card found\n");
- return 1;
+ return -ENOMEM;
}
/* Allocate memory for FIFOS */
/* the memory needs to be on a 32k boundary within the first 4G */
@@ -2012,7 +2012,7 @@ setup_hw(struct hfc_pci *hc)
if (!buffer) {
printk(KERN_WARNING
"HFC-PCI: Error allocating memory for FIFO!\n");
- return 1;
+ return -ENOMEM;
}
hc->hw.fifos = buffer;
pci_write_config_dword(hc->pdev, 0x80, hc->hw.dmahandle);
@@ -2022,7 +2022,7 @@ setup_hw(struct hfc_pci *hc)
"HFC-PCI: Error in ioremap for PCI!\n");
dma_free_coherent(&hc->pdev->dev, 0x8000, hc->hw.fifos,
hc->hw.dmahandle);
- return 1;
+ return -ENOMEM;
}
printk(KERN_INFO
diff --git a/drivers/leds/led-class-flash.c b/drivers/leds/led-class-flash.c
index 185e17055317..6fe9d700dfef 100644
--- a/drivers/leds/led-class-flash.c
+++ b/drivers/leds/led-class-flash.c
@@ -207,7 +207,7 @@ static ssize_t flash_fault_show(struct device *dev,
mask <<= 1;
}
- return sprintf(buf, "%s\n", buf);
+ return strlen(strcat(buf, "\n"));
}
static DEVICE_ATTR_RO(flash_fault);
diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
index 4e7b78a84149..072491d3e17b 100644
--- a/drivers/leds/led-triggers.c
+++ b/drivers/leds/led-triggers.c
@@ -157,7 +157,6 @@ EXPORT_SYMBOL_GPL(led_trigger_read);
/* Caller must ensure led_cdev->trigger_lock held */
int led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trig)
{
- unsigned long flags;
char *event = NULL;
char *envp[2];
const char *name;
@@ -171,10 +170,13 @@ int led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trig)
/* Remove any existing trigger */
if (led_cdev->trigger) {
- write_lock_irqsave(&led_cdev->trigger->leddev_list_lock, flags);
- list_del(&led_cdev->trig_list);
- write_unlock_irqrestore(&led_cdev->trigger->leddev_list_lock,
- flags);
+ spin_lock(&led_cdev->trigger->leddev_list_lock);
+ list_del_rcu(&led_cdev->trig_list);
+ spin_unlock(&led_cdev->trigger->leddev_list_lock);
+
+ /* ensure it's no longer visible on the led_cdevs list */
+ synchronize_rcu();
+
cancel_work_sync(&led_cdev->set_brightness_work);
led_stop_software_blink(led_cdev);
if (led_cdev->trigger->deactivate)
@@ -186,9 +188,9 @@ int led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trig)
led_set_brightness(led_cdev, LED_OFF);
}
if (trig) {
- write_lock_irqsave(&trig->leddev_list_lock, flags);
- list_add_tail(&led_cdev->trig_list, &trig->led_cdevs);
- write_unlock_irqrestore(&trig->leddev_list_lock, flags);
+ spin_lock(&trig->leddev_list_lock);
+ list_add_tail_rcu(&led_cdev->trig_list, &trig->led_cdevs);
+ spin_unlock(&trig->leddev_list_lock);
led_cdev->trigger = trig;
if (trig->activate)
@@ -223,9 +225,10 @@ err_add_groups:
trig->deactivate(led_cdev);
err_activate:
- write_lock_irqsave(&led_cdev->trigger->leddev_list_lock, flags);
- list_del(&led_cdev->trig_list);
- write_unlock_irqrestore(&led_cdev->trigger->leddev_list_lock, flags);
+ spin_lock(&led_cdev->trigger->leddev_list_lock);
+ list_del_rcu(&led_cdev->trig_list);
+ spin_unlock(&led_cdev->trigger->leddev_list_lock);
+ synchronize_rcu();
led_cdev->trigger = NULL;
led_cdev->trigger_data = NULL;
led_set_brightness(led_cdev, LED_OFF);
@@ -285,7 +288,7 @@ int led_trigger_register(struct led_trigger *trig)
struct led_classdev *led_cdev;
struct led_trigger *_trig;
- rwlock_init(&trig->leddev_list_lock);
+ spin_lock_init(&trig->leddev_list_lock);
INIT_LIST_HEAD(&trig->led_cdevs);
down_write(&triggers_list_lock);
@@ -378,15 +381,14 @@ void led_trigger_event(struct led_trigger *trig,
enum led_brightness brightness)
{
struct led_classdev *led_cdev;
- unsigned long flags;
if (!trig)
return;
- read_lock_irqsave(&trig->leddev_list_lock, flags);
- list_for_each_entry(led_cdev, &trig->led_cdevs, trig_list)
+ rcu_read_lock();
+ list_for_each_entry_rcu(led_cdev, &trig->led_cdevs, trig_list)
led_set_brightness(led_cdev, brightness);
- read_unlock_irqrestore(&trig->leddev_list_lock, flags);
+ rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(led_trigger_event);
@@ -397,20 +399,19 @@ static void led_trigger_blink_setup(struct led_trigger *trig,
int invert)
{
struct led_classdev *led_cdev;
- unsigned long flags;
if (!trig)
return;
- read_lock_irqsave(&trig->leddev_list_lock, flags);
- list_for_each_entry(led_cdev, &trig->led_cdevs, trig_list) {
+ rcu_read_lock();
+ list_for_each_entry_rcu(led_cdev, &trig->led_cdevs, trig_list) {
if (oneshot)
led_blink_set_oneshot(led_cdev, delay_on, delay_off,
invert);
else
led_blink_set(led_cdev, delay_on, delay_off);
}
- read_unlock_irqrestore(&trig->leddev_list_lock, flags);
+ rcu_read_unlock();
}
void led_trigger_blink(struct led_trigger *trig,
diff --git a/drivers/leds/trigger/Kconfig b/drivers/leds/trigger/Kconfig
index 1f1d57288085..dc6816d36d06 100644
--- a/drivers/leds/trigger/Kconfig
+++ b/drivers/leds/trigger/Kconfig
@@ -64,6 +64,7 @@ config LEDS_TRIGGER_BACKLIGHT
config LEDS_TRIGGER_CPU
bool "LED CPU Trigger"
+ depends on !PREEMPT_RT
help
This allows LEDs to be controlled by active CPUs. This shows
the active CPUs across an array of LEDs so you can see which
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index fe63d5ee201b..d33913d523c1 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -848,7 +848,8 @@ int smu_queue_i2c(struct smu_i2c_cmd *cmd)
cmd->read = cmd->info.devaddr & 0x01;
switch(cmd->info.type) {
case SMU_I2C_TRANSFER_SIMPLE:
- memset(&cmd->info.sublen, 0, 4);
+ cmd->info.sublen = 0;
+ memset(cmd->info.subaddr, 0, sizeof(cmd->info.subaddr));
break;
case SMU_I2C_TRANSFER_COMBINED:
cmd->info.devaddr &= 0xfe;
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index c9fc06c7e685..d9cd3606040e 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -8,6 +8,18 @@ menuconfig MAILBOX
if MAILBOX
+config APPLE_MAILBOX
+ tristate "Apple Mailbox driver"
+ depends on ARCH_APPLE || (ARM64 && COMPILE_TEST)
+ default ARCH_APPLE
+ help
+ Apple SoCs have various co-processors required for certain
+ peripherals to work (NVMe, display controller, etc.). This
+ driver adds support for the mailbox controller used to
+ communicate with those.
+
+ Say Y here if you have a Apple SoC.
+
config ARM_MHU
tristate "ARM MHU Mailbox"
depends on ARM_AMBA
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
index c2089f04887e..338cc05e5431 100644
--- a/drivers/mailbox/Makefile
+++ b/drivers/mailbox/Makefile
@@ -58,3 +58,5 @@ obj-$(CONFIG_SUN6I_MSGBOX) += sun6i-msgbox.o
obj-$(CONFIG_SPRD_MBOX) += sprd-mailbox.o
obj-$(CONFIG_QCOM_IPCC) += qcom-ipcc.o
+
+obj-$(CONFIG_APPLE_MAILBOX) += apple-mailbox.o
diff --git a/drivers/mailbox/apple-mailbox.c b/drivers/mailbox/apple-mailbox.c
new file mode 100644
index 000000000000..72942002a54a
--- /dev/null
+++ b/drivers/mailbox/apple-mailbox.c
@@ -0,0 +1,384 @@
+// SPDX-License-Identifier: GPL-2.0-only OR MIT
+/*
+ * Apple mailbox driver
+ *
+ * Copyright (C) 2021 The Asahi Linux Contributors
+ *
+ * This driver adds support for two mailbox variants (called ASC and M3 by
+ * Apple) found in Apple SoCs such as the M1. It consists of two FIFOs used to
+ * exchange 64+32 bit messages between the main CPU and a co-processor.
+ * Various coprocessors implement different IPC protocols based on these simple
+ * messages and shared memory buffers.
+ *
+ * Both the main CPU and the co-processor see the same set of registers but
+ * the first FIFO (A2I) is always used to transfer messages from the application
+ * processor (us) to the I/O processor and the second one (I2A) for the
+ * other direction.
+ */
+
+#include <linux/apple-mailbox.h>
+#include <linux/device.h>
+#include <linux/gfp.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/mailbox_controller.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#define APPLE_ASC_MBOX_CONTROL_FULL BIT(16)
+#define APPLE_ASC_MBOX_CONTROL_EMPTY BIT(17)
+
+#define APPLE_ASC_MBOX_A2I_CONTROL 0x110
+#define APPLE_ASC_MBOX_A2I_SEND0 0x800
+#define APPLE_ASC_MBOX_A2I_SEND1 0x808
+#define APPLE_ASC_MBOX_A2I_RECV0 0x810
+#define APPLE_ASC_MBOX_A2I_RECV1 0x818
+
+#define APPLE_ASC_MBOX_I2A_CONTROL 0x114
+#define APPLE_ASC_MBOX_I2A_SEND0 0x820
+#define APPLE_ASC_MBOX_I2A_SEND1 0x828
+#define APPLE_ASC_MBOX_I2A_RECV0 0x830
+#define APPLE_ASC_MBOX_I2A_RECV1 0x838
+
+#define APPLE_M3_MBOX_CONTROL_FULL BIT(16)
+#define APPLE_M3_MBOX_CONTROL_EMPTY BIT(17)
+
+#define APPLE_M3_MBOX_A2I_CONTROL 0x50
+#define APPLE_M3_MBOX_A2I_SEND0 0x60
+#define APPLE_M3_MBOX_A2I_SEND1 0x68
+#define APPLE_M3_MBOX_A2I_RECV0 0x70
+#define APPLE_M3_MBOX_A2I_RECV1 0x78
+
+#define APPLE_M3_MBOX_I2A_CONTROL 0x80
+#define APPLE_M3_MBOX_I2A_SEND0 0x90
+#define APPLE_M3_MBOX_I2A_SEND1 0x98
+#define APPLE_M3_MBOX_I2A_RECV0 0xa0
+#define APPLE_M3_MBOX_I2A_RECV1 0xa8
+
+#define APPLE_M3_MBOX_IRQ_ENABLE 0x48
+#define APPLE_M3_MBOX_IRQ_ACK 0x4c
+#define APPLE_M3_MBOX_IRQ_A2I_EMPTY BIT(0)
+#define APPLE_M3_MBOX_IRQ_A2I_NOT_EMPTY BIT(1)
+#define APPLE_M3_MBOX_IRQ_I2A_EMPTY BIT(2)
+#define APPLE_M3_MBOX_IRQ_I2A_NOT_EMPTY BIT(3)
+
+#define APPLE_MBOX_MSG1_OUTCNT GENMASK(56, 52)
+#define APPLE_MBOX_MSG1_INCNT GENMASK(51, 48)
+#define APPLE_MBOX_MSG1_OUTPTR GENMASK(47, 44)
+#define APPLE_MBOX_MSG1_INPTR GENMASK(43, 40)
+#define APPLE_MBOX_MSG1_MSG GENMASK(31, 0)
+
+struct apple_mbox_hw {
+ unsigned int control_full;
+ unsigned int control_empty;
+
+ unsigned int a2i_control;
+ unsigned int a2i_send0;
+ unsigned int a2i_send1;
+
+ unsigned int i2a_control;
+ unsigned int i2a_recv0;
+ unsigned int i2a_recv1;
+
+ bool has_irq_controls;
+ unsigned int irq_enable;
+ unsigned int irq_ack;
+ unsigned int irq_bit_recv_not_empty;
+ unsigned int irq_bit_send_empty;
+};
+
+struct apple_mbox {
+ void __iomem *regs;
+ const struct apple_mbox_hw *hw;
+
+ int irq_recv_not_empty;
+ int irq_send_empty;
+
+ struct mbox_chan chan;
+
+ struct device *dev;
+ struct mbox_controller controller;
+};
+
+static const struct of_device_id apple_mbox_of_match[];
+
+static bool apple_mbox_hw_can_send(struct apple_mbox *apple_mbox)
+{
+ u32 mbox_ctrl =
+ readl_relaxed(apple_mbox->regs + apple_mbox->hw->a2i_control);
+
+ return !(mbox_ctrl & apple_mbox->hw->control_full);
+}
+
+static int apple_mbox_hw_send(struct apple_mbox *apple_mbox,
+ struct apple_mbox_msg *msg)
+{
+ if (!apple_mbox_hw_can_send(apple_mbox))
+ return -EBUSY;
+
+ dev_dbg(apple_mbox->dev, "> TX %016llx %08x\n", msg->msg0, msg->msg1);
+
+ writeq_relaxed(msg->msg0, apple_mbox->regs + apple_mbox->hw->a2i_send0);
+ writeq_relaxed(FIELD_PREP(APPLE_MBOX_MSG1_MSG, msg->msg1),
+ apple_mbox->regs + apple_mbox->hw->a2i_send1);
+
+ return 0;
+}
+
+static bool apple_mbox_hw_can_recv(struct apple_mbox *apple_mbox)
+{
+ u32 mbox_ctrl =
+ readl_relaxed(apple_mbox->regs + apple_mbox->hw->i2a_control);
+
+ return !(mbox_ctrl & apple_mbox->hw->control_empty);
+}
+
+static int apple_mbox_hw_recv(struct apple_mbox *apple_mbox,
+ struct apple_mbox_msg *msg)
+{
+ if (!apple_mbox_hw_can_recv(apple_mbox))
+ return -ENOMSG;
+
+ msg->msg0 = readq_relaxed(apple_mbox->regs + apple_mbox->hw->i2a_recv0);
+ msg->msg1 = FIELD_GET(
+ APPLE_MBOX_MSG1_MSG,
+ readq_relaxed(apple_mbox->regs + apple_mbox->hw->i2a_recv1));
+
+ dev_dbg(apple_mbox->dev, "< RX %016llx %08x\n", msg->msg0, msg->msg1);
+
+ return 0;
+}
+
+static int apple_mbox_chan_send_data(struct mbox_chan *chan, void *data)
+{
+ struct apple_mbox *apple_mbox = chan->con_priv;
+ struct apple_mbox_msg *msg = data;
+ int ret;
+
+ ret = apple_mbox_hw_send(apple_mbox, msg);
+ if (ret)
+ return ret;
+
+ /*
+ * The interrupt is level triggered and will keep firing as long as the
+ * FIFO is empty. It will also keep firing if the FIFO was empty
+ * at any point in the past until it has been acknowledged at the
+ * mailbox level. By acknowledging it here we can ensure that we will
+ * only get the interrupt once the FIFO has been cleared again.
+ * If the FIFO is already empty before the ack it will fire again
+ * immediately after the ack.
+ */
+ if (apple_mbox->hw->has_irq_controls) {
+ writel_relaxed(apple_mbox->hw->irq_bit_send_empty,
+ apple_mbox->regs + apple_mbox->hw->irq_ack);
+ }
+ enable_irq(apple_mbox->irq_send_empty);
+
+ return 0;
+}
+
+static irqreturn_t apple_mbox_send_empty_irq(int irq, void *data)
+{
+ struct apple_mbox *apple_mbox = data;
+
+ /*
+ * We don't need to acknowledge the interrupt at the mailbox level
+ * here even if supported by the hardware. It will keep firing but that
+ * doesn't matter since it's disabled at the main interrupt controller.
+ * apple_mbox_chan_send_data will acknowledge it before enabling
+ * it at the main controller again.
+ */
+ disable_irq_nosync(apple_mbox->irq_send_empty);
+ mbox_chan_txdone(&apple_mbox->chan, 0);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t apple_mbox_recv_irq(int irq, void *data)
+{
+ struct apple_mbox *apple_mbox = data;
+ struct apple_mbox_msg msg;
+
+ while (apple_mbox_hw_recv(apple_mbox, &msg) == 0)
+ mbox_chan_received_data(&apple_mbox->chan, (void *)&msg);
+
+ /*
+ * The interrupt will keep firing even if there are no more messages
+ * unless we also acknowledge it at the mailbox level here.
+ * There's no race if a message comes in between the check in the while
+ * loop above and the ack below: If a new messages arrives inbetween
+ * those two the interrupt will just fire again immediately after the
+ * ack since it's level triggered.
+ */
+ if (apple_mbox->hw->has_irq_controls) {
+ writel_relaxed(apple_mbox->hw->irq_bit_recv_not_empty,
+ apple_mbox->regs + apple_mbox->hw->irq_ack);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int apple_mbox_chan_startup(struct mbox_chan *chan)
+{
+ struct apple_mbox *apple_mbox = chan->con_priv;
+
+ /*
+ * Only some variants of this mailbox HW provide interrupt control
+ * at the mailbox level. We therefore need to handle enabling/disabling
+ * interrupts at the main interrupt controller anyway for hardware that
+ * doesn't. Just always keep the interrupts we care about enabled at
+ * the mailbox level so that both hardware revisions behave almost
+ * the same.
+ */
+ if (apple_mbox->hw->has_irq_controls) {
+ writel_relaxed(apple_mbox->hw->irq_bit_recv_not_empty |
+ apple_mbox->hw->irq_bit_send_empty,
+ apple_mbox->regs + apple_mbox->hw->irq_enable);
+ }
+
+ enable_irq(apple_mbox->irq_recv_not_empty);
+ return 0;
+}
+
+static void apple_mbox_chan_shutdown(struct mbox_chan *chan)
+{
+ struct apple_mbox *apple_mbox = chan->con_priv;
+
+ disable_irq(apple_mbox->irq_recv_not_empty);
+}
+
+static const struct mbox_chan_ops apple_mbox_ops = {
+ .send_data = apple_mbox_chan_send_data,
+ .startup = apple_mbox_chan_startup,
+ .shutdown = apple_mbox_chan_shutdown,
+};
+
+static struct mbox_chan *apple_mbox_of_xlate(struct mbox_controller *mbox,
+ const struct of_phandle_args *args)
+{
+ if (args->args_count != 0)
+ return ERR_PTR(-EINVAL);
+
+ return &mbox->chans[0];
+}
+
+static int apple_mbox_probe(struct platform_device *pdev)
+{
+ int ret;
+ const struct of_device_id *match;
+ char *irqname;
+ struct apple_mbox *mbox;
+ struct device *dev = &pdev->dev;
+
+ match = of_match_node(apple_mbox_of_match, pdev->dev.of_node);
+ if (!match)
+ return -EINVAL;
+ if (!match->data)
+ return -EINVAL;
+
+ mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL);
+ if (!mbox)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, mbox);
+
+ mbox->dev = dev;
+ mbox->regs = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mbox->regs))
+ return PTR_ERR(mbox->regs);
+
+ mbox->hw = match->data;
+ mbox->irq_recv_not_empty =
+ platform_get_irq_byname(pdev, "recv-not-empty");
+ if (mbox->irq_recv_not_empty < 0)
+ return -ENODEV;
+
+ mbox->irq_send_empty = platform_get_irq_byname(pdev, "send-empty");
+ if (mbox->irq_send_empty < 0)
+ return -ENODEV;
+
+ mbox->controller.dev = mbox->dev;
+ mbox->controller.num_chans = 1;
+ mbox->controller.chans = &mbox->chan;
+ mbox->controller.ops = &apple_mbox_ops;
+ mbox->controller.txdone_irq = true;
+ mbox->controller.of_xlate = apple_mbox_of_xlate;
+ mbox->chan.con_priv = mbox;
+
+ irqname = devm_kasprintf(dev, GFP_KERNEL, "%s-recv", dev_name(dev));
+ if (!irqname)
+ return -ENOMEM;
+
+ ret = devm_request_threaded_irq(dev, mbox->irq_recv_not_empty, NULL,
+ apple_mbox_recv_irq,
+ IRQF_NO_AUTOEN | IRQF_ONESHOT, irqname,
+ mbox);
+ if (ret)
+ return ret;
+
+ irqname = devm_kasprintf(dev, GFP_KERNEL, "%s-send", dev_name(dev));
+ if (!irqname)
+ return -ENOMEM;
+
+ ret = devm_request_irq(dev, mbox->irq_send_empty,
+ apple_mbox_send_empty_irq, IRQF_NO_AUTOEN,
+ irqname, mbox);
+ if (ret)
+ return ret;
+
+ return devm_mbox_controller_register(dev, &mbox->controller);
+}
+
+static const struct apple_mbox_hw apple_mbox_asc_hw = {
+ .control_full = APPLE_ASC_MBOX_CONTROL_FULL,
+ .control_empty = APPLE_ASC_MBOX_CONTROL_EMPTY,
+
+ .a2i_control = APPLE_ASC_MBOX_A2I_CONTROL,
+ .a2i_send0 = APPLE_ASC_MBOX_A2I_SEND0,
+ .a2i_send1 = APPLE_ASC_MBOX_A2I_SEND1,
+
+ .i2a_control = APPLE_ASC_MBOX_I2A_CONTROL,
+ .i2a_recv0 = APPLE_ASC_MBOX_I2A_RECV0,
+ .i2a_recv1 = APPLE_ASC_MBOX_I2A_RECV1,
+
+ .has_irq_controls = false,
+};
+
+static const struct apple_mbox_hw apple_mbox_m3_hw = {
+ .control_full = APPLE_M3_MBOX_CONTROL_FULL,
+ .control_empty = APPLE_M3_MBOX_CONTROL_EMPTY,
+
+ .a2i_control = APPLE_M3_MBOX_A2I_CONTROL,
+ .a2i_send0 = APPLE_M3_MBOX_A2I_SEND0,
+ .a2i_send1 = APPLE_M3_MBOX_A2I_SEND1,
+
+ .i2a_control = APPLE_M3_MBOX_I2A_CONTROL,
+ .i2a_recv0 = APPLE_M3_MBOX_I2A_RECV0,
+ .i2a_recv1 = APPLE_M3_MBOX_I2A_RECV1,
+
+ .has_irq_controls = true,
+ .irq_enable = APPLE_M3_MBOX_IRQ_ENABLE,
+ .irq_ack = APPLE_M3_MBOX_IRQ_ACK,
+ .irq_bit_recv_not_empty = APPLE_M3_MBOX_IRQ_I2A_NOT_EMPTY,
+ .irq_bit_send_empty = APPLE_M3_MBOX_IRQ_A2I_EMPTY,
+};
+
+static const struct of_device_id apple_mbox_of_match[] = {
+ { .compatible = "apple,t8103-asc-mailbox", .data = &apple_mbox_asc_hw },
+ { .compatible = "apple,t8103-m3-mailbox", .data = &apple_mbox_m3_hw },
+ {}
+};
+MODULE_DEVICE_TABLE(of, apple_mbox_of_match);
+
+static struct platform_driver apple_mbox_driver = {
+ .driver = {
+ .name = "apple-mailbox",
+ .of_match_table = apple_mbox_of_match,
+ },
+ .probe = apple_mbox_probe,
+};
+module_platform_driver(apple_mbox_driver);
+
+MODULE_LICENSE("Dual MIT/GPL");
+MODULE_AUTHOR("Sven Peter <sven@svenpeter.dev>");
+MODULE_DESCRIPTION("Apple Mailbox driver");
diff --git a/drivers/mailbox/bcm2835-mailbox.c b/drivers/mailbox/bcm2835-mailbox.c
index 86b7ce3549c5..fbfd0202047c 100644
--- a/drivers/mailbox/bcm2835-mailbox.c
+++ b/drivers/mailbox/bcm2835-mailbox.c
@@ -137,7 +137,6 @@ static int bcm2835_mbox_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
int ret = 0;
- struct resource *iomem;
struct bcm2835_mbox *mbox;
mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL);
@@ -153,8 +152,7 @@ static int bcm2835_mbox_probe(struct platform_device *pdev)
return -ENODEV;
}
- iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mbox->regs = devm_ioremap_resource(&pdev->dev, iomem);
+ mbox->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mbox->regs)) {
ret = PTR_ERR(mbox->regs);
return ret;
diff --git a/drivers/mailbox/hi3660-mailbox.c b/drivers/mailbox/hi3660-mailbox.c
index 395ddc250828..e41bd2f5ea46 100644
--- a/drivers/mailbox/hi3660-mailbox.c
+++ b/drivers/mailbox/hi3660-mailbox.c
@@ -240,7 +240,6 @@ static int hi3660_mbox_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct hi3660_mbox *mbox;
struct mbox_chan *chan;
- struct resource *res;
unsigned long ch;
int err;
@@ -248,8 +247,7 @@ static int hi3660_mbox_probe(struct platform_device *pdev)
if (!mbox)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mbox->base = devm_ioremap_resource(dev, res);
+ mbox->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mbox->base))
return PTR_ERR(mbox->base);
diff --git a/drivers/mailbox/hi6220-mailbox.c b/drivers/mailbox/hi6220-mailbox.c
index 560cd09538b1..fca61f5312d9 100644
--- a/drivers/mailbox/hi6220-mailbox.c
+++ b/drivers/mailbox/hi6220-mailbox.c
@@ -264,7 +264,6 @@ static int hi6220_mbox_probe(struct platform_device *pdev)
struct device_node *node = pdev->dev.of_node;
struct device *dev = &pdev->dev;
struct hi6220_mbox *mbox;
- struct resource *res;
int i, err;
mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL);
@@ -287,15 +286,13 @@ static int hi6220_mbox_probe(struct platform_device *pdev)
if (mbox->irq < 0)
return mbox->irq;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mbox->ipc = devm_ioremap_resource(dev, res);
+ mbox->ipc = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mbox->ipc)) {
dev_err(dev, "ioremap ipc failed\n");
return PTR_ERR(mbox->ipc);
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- mbox->base = devm_ioremap_resource(dev, res);
+ mbox->base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(mbox->base)) {
dev_err(dev, "ioremap buffer failed\n");
return PTR_ERR(mbox->base);
diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c
index 0ce75c6b36b6..ffe36a6bef9e 100644
--- a/drivers/mailbox/imx-mailbox.c
+++ b/drivers/mailbox/imx-mailbox.c
@@ -5,6 +5,7 @@
#include <linux/clk.h>
#include <linux/firmware/imx/ipc.h>
+#include <linux/firmware/imx/s4.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
@@ -18,6 +19,8 @@
#define IMX_MU_CHANS 16
/* TX0/RX0/RXDB[0-3] */
#define IMX_MU_SCU_CHANS 6
+/* TX0/RX0 */
+#define IMX_MU_S4_CHANS 2
#define IMX_MU_CHAN_NAME_SIZE 20
enum imx_mu_chan_type {
@@ -47,6 +50,11 @@ struct imx_sc_rpc_msg_max {
u32 data[7];
};
+struct imx_s4_rpc_msg_max {
+ struct imx_s4_rpc_msg hdr;
+ u32 data[254];
+};
+
struct imx_mu_con_priv {
unsigned int idx;
char irq_desc[IMX_MU_CHAN_NAME_SIZE];
@@ -58,6 +66,7 @@ struct imx_mu_con_priv {
struct imx_mu_priv {
struct device *dev;
void __iomem *base;
+ void *msg;
spinlock_t xcr_lock; /* control register lock */
struct mbox_controller mbox;
@@ -75,7 +84,8 @@ struct imx_mu_priv {
enum imx_mu_type {
IMX_MU_V1,
- IMX_MU_V2,
+ IMX_MU_V2 = BIT(1),
+ IMX_MU_V2_S4 = BIT(15),
};
struct imx_mu_dcfg {
@@ -89,18 +99,18 @@ struct imx_mu_dcfg {
u32 xCR[4]; /* Control Registers */
};
-#define IMX_MU_xSR_GIPn(type, x) (type == IMX_MU_V2 ? BIT(x) : BIT(28 + (3 - (x))))
-#define IMX_MU_xSR_RFn(type, x) (type == IMX_MU_V2 ? BIT(x) : BIT(24 + (3 - (x))))
-#define IMX_MU_xSR_TEn(type, x) (type == IMX_MU_V2 ? BIT(x) : BIT(20 + (3 - (x))))
+#define IMX_MU_xSR_GIPn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(28 + (3 - (x))))
+#define IMX_MU_xSR_RFn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(24 + (3 - (x))))
+#define IMX_MU_xSR_TEn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(20 + (3 - (x))))
/* General Purpose Interrupt Enable */
-#define IMX_MU_xCR_GIEn(type, x) (type == IMX_MU_V2 ? BIT(x) : BIT(28 + (3 - (x))))
+#define IMX_MU_xCR_GIEn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(28 + (3 - (x))))
/* Receive Interrupt Enable */
-#define IMX_MU_xCR_RIEn(type, x) (type == IMX_MU_V2 ? BIT(x) : BIT(24 + (3 - (x))))
+#define IMX_MU_xCR_RIEn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(24 + (3 - (x))))
/* Transmit Interrupt Enable */
-#define IMX_MU_xCR_TIEn(type, x) (type == IMX_MU_V2 ? BIT(x) : BIT(20 + (3 - (x))))
+#define IMX_MU_xCR_TIEn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(20 + (3 - (x))))
/* General Purpose Interrupt Request */
-#define IMX_MU_xCR_GIRn(type, x) (type == IMX_MU_V2 ? BIT(x) : BIT(16 + (3 - (x))))
+#define IMX_MU_xCR_GIRn(type, x) (type & IMX_MU_V2 ? BIT(x) : BIT(16 + (3 - (x))))
static struct imx_mu_priv *to_imx_mu_priv(struct mbox_controller *mbox)
@@ -167,14 +177,22 @@ static int imx_mu_generic_rx(struct imx_mu_priv *priv,
return 0;
}
-static int imx_mu_scu_tx(struct imx_mu_priv *priv,
- struct imx_mu_con_priv *cp,
- void *data)
+static int imx_mu_specific_tx(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp, void *data)
{
- struct imx_sc_rpc_msg_max *msg = data;
u32 *arg = data;
int i, ret;
u32 xsr;
+ u32 size, max_size, num_tr;
+
+ if (priv->dcfg->type & IMX_MU_V2_S4) {
+ size = ((struct imx_s4_rpc_msg_max *)data)->hdr.size;
+ max_size = sizeof(struct imx_s4_rpc_msg_max);
+ num_tr = 8;
+ } else {
+ size = ((struct imx_sc_rpc_msg_max *)data)->hdr.size;
+ max_size = sizeof(struct imx_sc_rpc_msg_max);
+ num_tr = 4;
+ }
switch (cp->type) {
case IMX_MU_TYPE_TX:
@@ -183,27 +201,27 @@ static int imx_mu_scu_tx(struct imx_mu_priv *priv,
* sizeof yields bytes.
*/
- if (msg->hdr.size > sizeof(*msg) / 4) {
+ if (size > max_size / 4) {
/*
* The real message size can be different to
- * struct imx_sc_rpc_msg_max size
+ * struct imx_sc_rpc_msg_max/imx_s4_rpc_msg_max size
*/
- dev_err(priv->dev, "Maximal message size (%zu bytes) exceeded on TX; got: %i bytes\n", sizeof(*msg), msg->hdr.size << 2);
+ dev_err(priv->dev, "Maximal message size (%u bytes) exceeded on TX; got: %i bytes\n", max_size, size << 2);
return -EINVAL;
}
- for (i = 0; i < 4 && i < msg->hdr.size; i++)
- imx_mu_write(priv, *arg++, priv->dcfg->xTR + (i % 4) * 4);
- for (; i < msg->hdr.size; i++) {
+ for (i = 0; i < num_tr && i < size; i++)
+ imx_mu_write(priv, *arg++, priv->dcfg->xTR + (i % num_tr) * 4);
+ for (; i < size; i++) {
ret = readl_poll_timeout(priv->base + priv->dcfg->xSR[IMX_MU_TSR],
xsr,
- xsr & IMX_MU_xSR_TEn(priv->dcfg->type, i % 4),
+ xsr & IMX_MU_xSR_TEn(priv->dcfg->type, i % num_tr),
0, 100);
if (ret) {
dev_err(priv->dev, "Send data index: %d timeout\n", i);
return ret;
}
- imx_mu_write(priv, *arg++, priv->dcfg->xTR + (i % 4) * 4);
+ imx_mu_write(priv, *arg++, priv->dcfg->xTR + (i % num_tr) * 4);
}
imx_mu_xcr_rmw(priv, IMX_MU_TCR, IMX_MU_xCR_TIEn(priv->dcfg->type, cp->idx), 0);
@@ -216,23 +234,32 @@ static int imx_mu_scu_tx(struct imx_mu_priv *priv,
return 0;
}
-static int imx_mu_scu_rx(struct imx_mu_priv *priv,
- struct imx_mu_con_priv *cp)
+static int imx_mu_specific_rx(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp)
{
- struct imx_sc_rpc_msg_max msg;
- u32 *data = (u32 *)&msg;
+ u32 *data;
int i, ret;
u32 xsr;
+ u32 size, max_size;
+
+ data = (u32 *)priv->msg;
imx_mu_xcr_rmw(priv, IMX_MU_RCR, 0, IMX_MU_xCR_RIEn(priv->dcfg->type, 0));
*data++ = imx_mu_read(priv, priv->dcfg->xRR);
- if (msg.hdr.size > sizeof(msg) / 4) {
- dev_err(priv->dev, "Maximal message size (%zu bytes) exceeded on RX; got: %i bytes\n", sizeof(msg), msg.hdr.size << 2);
+ if (priv->dcfg->type & IMX_MU_V2_S4) {
+ size = ((struct imx_s4_rpc_msg_max *)priv->msg)->hdr.size;
+ max_size = sizeof(struct imx_s4_rpc_msg_max);
+ } else {
+ size = ((struct imx_sc_rpc_msg_max *)priv->msg)->hdr.size;
+ max_size = sizeof(struct imx_sc_rpc_msg_max);
+ }
+
+ if (size > max_size / 4) {
+ dev_err(priv->dev, "Maximal message size (%u bytes) exceeded on RX; got: %i bytes\n", max_size, size << 2);
return -EINVAL;
}
- for (i = 1; i < msg.hdr.size; i++) {
+ for (i = 1; i < size; i++) {
ret = readl_poll_timeout(priv->base + priv->dcfg->xSR[IMX_MU_RSR], xsr,
xsr & IMX_MU_xSR_RFn(priv->dcfg->type, i % 4), 0, 100);
if (ret) {
@@ -243,7 +270,7 @@ static int imx_mu_scu_rx(struct imx_mu_priv *priv,
}
imx_mu_xcr_rmw(priv, IMX_MU_RCR, IMX_MU_xCR_RIEn(priv->dcfg->type, 0), 0);
- mbox_chan_received_data(cp->chan, (void *)&msg);
+ mbox_chan_received_data(cp->chan, (void *)priv->msg);
return 0;
}
@@ -394,8 +421,8 @@ static const struct mbox_chan_ops imx_mu_ops = {
.shutdown = imx_mu_shutdown,
};
-static struct mbox_chan *imx_mu_scu_xlate(struct mbox_controller *mbox,
- const struct of_phandle_args *sp)
+static struct mbox_chan *imx_mu_specific_xlate(struct mbox_controller *mbox,
+ const struct of_phandle_args *sp)
{
u32 type, idx, chan;
@@ -478,11 +505,12 @@ static void imx_mu_init_generic(struct imx_mu_priv *priv)
imx_mu_write(priv, 0, priv->dcfg->xCR[i]);
}
-static void imx_mu_init_scu(struct imx_mu_priv *priv)
+static void imx_mu_init_specific(struct imx_mu_priv *priv)
{
unsigned int i;
+ int num_chans = priv->dcfg->type & IMX_MU_V2_S4 ? IMX_MU_S4_CHANS : IMX_MU_SCU_CHANS;
- for (i = 0; i < IMX_MU_SCU_CHANS; i++) {
+ for (i = 0; i < num_chans; i++) {
struct imx_mu_con_priv *cp = &priv->con_priv[i];
cp->idx = i < 2 ? 0 : i - 2;
@@ -493,8 +521,8 @@ static void imx_mu_init_scu(struct imx_mu_priv *priv)
"imx_mu_chan[%i-%i]", cp->type, cp->idx);
}
- priv->mbox.num_chans = IMX_MU_SCU_CHANS;
- priv->mbox.of_xlate = imx_mu_scu_xlate;
+ priv->mbox.num_chans = num_chans;
+ priv->mbox.of_xlate = imx_mu_specific_xlate;
/* Set default MU configuration */
for (i = 0; i < IMX_MU_xCR_MAX; i++)
@@ -508,6 +536,7 @@ static int imx_mu_probe(struct platform_device *pdev)
struct imx_mu_priv *priv;
const struct imx_mu_dcfg *dcfg;
int ret;
+ u32 size;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -528,6 +557,15 @@ static int imx_mu_probe(struct platform_device *pdev)
return -EINVAL;
priv->dcfg = dcfg;
+ if (priv->dcfg->type & IMX_MU_V2_S4)
+ size = sizeof(struct imx_s4_rpc_msg_max);
+ else
+ size = sizeof(struct imx_sc_rpc_msg_max);
+
+ priv->msg = devm_kzalloc(dev, size, GFP_KERNEL);
+ if (IS_ERR(priv->msg))
+ return PTR_ERR(priv->msg);
+
priv->clk = devm_clk_get(dev, NULL);
if (IS_ERR(priv->clk)) {
if (PTR_ERR(priv->clk) != -ENOENT)
@@ -623,10 +661,21 @@ static const struct imx_mu_dcfg imx_mu_cfg_imx8ulp = {
.xCR = {0x110, 0x114, 0x120, 0x128},
};
+static const struct imx_mu_dcfg imx_mu_cfg_imx8ulp_s4 = {
+ .tx = imx_mu_specific_tx,
+ .rx = imx_mu_specific_rx,
+ .init = imx_mu_init_specific,
+ .type = IMX_MU_V2 | IMX_MU_V2_S4,
+ .xTR = 0x200,
+ .xRR = 0x280,
+ .xSR = {0xC, 0x118, 0x124, 0x12C},
+ .xCR = {0x110, 0x114, 0x120, 0x128},
+};
+
static const struct imx_mu_dcfg imx_mu_cfg_imx8_scu = {
- .tx = imx_mu_scu_tx,
- .rx = imx_mu_scu_rx,
- .init = imx_mu_init_scu,
+ .tx = imx_mu_specific_tx,
+ .rx = imx_mu_specific_rx,
+ .init = imx_mu_init_specific,
.xTR = 0x0,
.xRR = 0x10,
.xSR = {0x20, 0x20, 0x20, 0x20},
@@ -637,6 +686,7 @@ static const struct of_device_id imx_mu_dt_ids[] = {
{ .compatible = "fsl,imx7ulp-mu", .data = &imx_mu_cfg_imx7ulp },
{ .compatible = "fsl,imx6sx-mu", .data = &imx_mu_cfg_imx6sx },
{ .compatible = "fsl,imx8ulp-mu", .data = &imx_mu_cfg_imx8ulp },
+ { .compatible = "fsl,imx8ulp-mu-s4", .data = &imx_mu_cfg_imx8ulp_s4 },
{ .compatible = "fsl,imx8-mu-scu", .data = &imx_mu_cfg_imx8_scu },
{ },
};
diff --git a/drivers/mailbox/mailbox-altera.c b/drivers/mailbox/mailbox-altera.c
index 75282666fb06..afb320e9d69c 100644
--- a/drivers/mailbox/mailbox-altera.c
+++ b/drivers/mailbox/mailbox-altera.c
@@ -285,7 +285,6 @@ static const struct mbox_chan_ops altera_mbox_ops = {
static int altera_mbox_probe(struct platform_device *pdev)
{
struct altera_mbox *mbox;
- struct resource *regs;
struct mbox_chan *chans;
int ret;
@@ -299,9 +298,7 @@ static int altera_mbox_probe(struct platform_device *pdev)
if (!chans)
return -ENOMEM;
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- mbox->mbox_base = devm_ioremap_resource(&pdev->dev, regs);
+ mbox->mbox_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mbox->mbox_base))
return PTR_ERR(mbox->mbox_base);
diff --git a/drivers/mailbox/mailbox-sti.c b/drivers/mailbox/mailbox-sti.c
index ab3a6ab762d3..823061dd8c8e 100644
--- a/drivers/mailbox/mailbox-sti.c
+++ b/drivers/mailbox/mailbox-sti.c
@@ -408,7 +408,6 @@ static int sti_mbox_probe(struct platform_device *pdev)
struct sti_mbox_device *mdev;
struct device_node *np = pdev->dev.of_node;
struct mbox_chan *chans;
- struct resource *res;
int irq;
int ret;
@@ -425,8 +424,7 @@ static int sti_mbox_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, mdev);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mdev->base = devm_ioremap_resource(&pdev->dev, res);
+ mdev->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mdev->base))
return PTR_ERR(mdev->base);
diff --git a/drivers/mailbox/mailbox-xgene-slimpro.c b/drivers/mailbox/mailbox-xgene-slimpro.c
index 5b3a2dcd5955..946ea773ec33 100644
--- a/drivers/mailbox/mailbox-xgene-slimpro.c
+++ b/drivers/mailbox/mailbox-xgene-slimpro.c
@@ -170,7 +170,6 @@ static const struct mbox_chan_ops slimpro_mbox_ops = {
static int slimpro_mbox_probe(struct platform_device *pdev)
{
struct slimpro_mbox *ctx;
- struct resource *regs;
void __iomem *mb_base;
int rc;
int i;
@@ -181,8 +180,7 @@ static int slimpro_mbox_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ctx);
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mb_base = devm_ioremap_resource(&pdev->dev, regs);
+ mb_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mb_base))
return PTR_ERR(mb_base);
diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c
index 64175a893312..a8845b162dbf 100644
--- a/drivers/mailbox/mtk-cmdq-mailbox.c
+++ b/drivers/mailbox/mtk-cmdq-mailbox.c
@@ -195,7 +195,6 @@ static void cmdq_task_exec_done(struct cmdq_task *task, int sta)
struct cmdq_task_cb *cb = &task->pkt->async_cb;
struct cmdq_cb_data data;
- WARN_ON(cb->cb == (cmdq_async_flush_cb)NULL);
data.sta = sta;
data.data = cb->data;
data.pkt = task->pkt;
@@ -525,21 +524,20 @@ static struct mbox_chan *cmdq_xlate(struct mbox_controller *mbox,
static int cmdq_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct resource *res;
struct cmdq *cmdq;
int err, i;
struct gce_plat *plat_data;
struct device_node *phandle = dev->of_node;
struct device_node *node;
int alias_id = 0;
- char clk_name[4] = "gce";
+ static const char * const clk_name = "gce";
+ static const char * const clk_names[] = { "gce0", "gce1" };
cmdq = devm_kzalloc(dev, sizeof(*cmdq), GFP_KERNEL);
if (!cmdq)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- cmdq->base = devm_ioremap_resource(dev, res);
+ cmdq->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(cmdq->base))
return PTR_ERR(cmdq->base);
@@ -570,12 +568,9 @@ static int cmdq_probe(struct platform_device *pdev)
if (cmdq->gce_num > 1) {
for_each_child_of_node(phandle->parent, node) {
- char clk_id[8];
-
alias_id = of_alias_get_id(node, clk_name);
- if (alias_id < cmdq->gce_num) {
- snprintf(clk_id, sizeof(clk_id), "%s%d", clk_name, alias_id);
- cmdq->clocks[alias_id].id = clk_id;
+ if (alias_id >= 0 && alias_id < cmdq->gce_num) {
+ cmdq->clocks[alias_id].id = clk_names[alias_id];
cmdq->clocks[alias_id].clk = of_clk_get(node, 0);
if (IS_ERR(cmdq->clocks[alias_id].clk)) {
dev_err(dev, "failed to get gce clk: %d\n", alias_id);
diff --git a/drivers/mailbox/omap-mailbox.c b/drivers/mailbox/omap-mailbox.c
index 7295e3835e30..58f3d569f095 100644
--- a/drivers/mailbox/omap-mailbox.c
+++ b/drivers/mailbox/omap-mailbox.c
@@ -699,7 +699,6 @@ static struct mbox_chan *omap_mbox_of_xlate(struct mbox_controller *controller,
static int omap_mbox_probe(struct platform_device *pdev)
{
- struct resource *mem;
int ret;
struct mbox_chan *chnls;
struct omap_mbox **list, *mbox, *mboxblk;
@@ -776,8 +775,7 @@ static int omap_mbox_probe(struct platform_device *pdev)
if (!mdev)
return -ENOMEM;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mdev->mbox_base = devm_ioremap_resource(&pdev->dev, mem);
+ mdev->mbox_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mdev->mbox_base))
return PTR_ERR(mdev->mbox_base);
diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c
index 0296558f9e22..887a3704c12e 100644
--- a/drivers/mailbox/pcc.c
+++ b/drivers/mailbox/pcc.c
@@ -52,6 +52,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/list.h>
+#include <linux/log2.h>
#include <linux/platform_device.h>
#include <linux/mailbox_controller.h>
#include <linux/mailbox_client.h>
@@ -62,31 +63,48 @@
#define MBOX_IRQ_NAME "pcc-mbox"
-static struct mbox_chan *pcc_mbox_channels;
-
-/* Array of cached virtual address for doorbell registers */
-static void __iomem **pcc_doorbell_vaddr;
-/* Array of cached virtual address for doorbell ack registers */
-static void __iomem **pcc_doorbell_ack_vaddr;
-/* Array of doorbell interrupts */
-static int *pcc_doorbell_irq;
+/**
+ * struct pcc_chan_reg - PCC register bundle
+ *
+ * @vaddr: cached virtual address for this register
+ * @gas: pointer to the generic address structure for this register
+ * @preserve_mask: bitmask to preserve when writing to this register
+ * @set_mask: bitmask to set when writing to this register
+ * @status_mask: bitmask to determine and/or update the status for this register
+ */
+struct pcc_chan_reg {
+ void __iomem *vaddr;
+ struct acpi_generic_address *gas;
+ u64 preserve_mask;
+ u64 set_mask;
+ u64 status_mask;
+};
-static struct mbox_controller pcc_mbox_ctrl = {};
/**
- * get_pcc_channel - Given a PCC subspace idx, get
- * the respective mbox_channel.
- * @id: PCC subspace index.
+ * struct pcc_chan_info - PCC channel specific information
*
- * Return: ERR_PTR(errno) if error, else pointer
- * to mbox channel.
+ * @chan: PCC channel information with Shared Memory Region info
+ * @db: PCC register bundle for the doorbell register
+ * @plat_irq_ack: PCC register bundle for the platform interrupt acknowledge
+ * register
+ * @cmd_complete: PCC register bundle for the command complete check register
+ * @cmd_update: PCC register bundle for the command complete update register
+ * @error: PCC register bundle for the error status register
+ * @plat_irq: platform interrupt
*/
-static struct mbox_chan *get_pcc_channel(int id)
-{
- if (id < 0 || id >= pcc_mbox_ctrl.num_chans)
- return ERR_PTR(-ENOENT);
+struct pcc_chan_info {
+ struct pcc_mbox_chan chan;
+ struct pcc_chan_reg db;
+ struct pcc_chan_reg plat_irq_ack;
+ struct pcc_chan_reg cmd_complete;
+ struct pcc_chan_reg cmd_update;
+ struct pcc_chan_reg error;
+ int plat_irq;
+};
- return &pcc_mbox_channels[id];
-}
+#define to_pcc_chan_info(c) container_of(c, struct pcc_chan_info, chan)
+static struct pcc_chan_info *chan_info;
+static int pcc_chan_count;
/*
* PCC can be used with perf critical drivers such as CPPC
@@ -96,10 +114,8 @@ static struct mbox_chan *get_pcc_channel(int id)
* The below read_register and write_registers are used to read and
* write from perf critical registers such as PCC doorbell register
*/
-static int read_register(void __iomem *vaddr, u64 *val, unsigned int bit_width)
+static void read_register(void __iomem *vaddr, u64 *val, unsigned int bit_width)
{
- int ret_val = 0;
-
switch (bit_width) {
case 8:
*val = readb(vaddr);
@@ -113,19 +129,11 @@ static int read_register(void __iomem *vaddr, u64 *val, unsigned int bit_width)
case 64:
*val = readq(vaddr);
break;
- default:
- pr_debug("Error: Cannot read register of %u bit width",
- bit_width);
- ret_val = -EFAULT;
- break;
}
- return ret_val;
}
-static int write_register(void __iomem *vaddr, u64 val, unsigned int bit_width)
+static void write_register(void __iomem *vaddr, u64 val, unsigned int bit_width)
{
- int ret_val = 0;
-
switch (bit_width) {
case 8:
writeb(val, vaddr);
@@ -139,13 +147,54 @@ static int write_register(void __iomem *vaddr, u64 val, unsigned int bit_width)
case 64:
writeq(val, vaddr);
break;
- default:
- pr_debug("Error: Cannot write register of %u bit width",
- bit_width);
- ret_val = -EFAULT;
- break;
}
- return ret_val;
+}
+
+static int pcc_chan_reg_read(struct pcc_chan_reg *reg, u64 *val)
+{
+ int ret = 0;
+
+ if (!reg->gas) {
+ *val = 0;
+ return 0;
+ }
+
+ if (reg->vaddr)
+ read_register(reg->vaddr, val, reg->gas->bit_width);
+ else
+ ret = acpi_read(val, reg->gas);
+
+ return ret;
+}
+
+static int pcc_chan_reg_write(struct pcc_chan_reg *reg, u64 val)
+{
+ int ret = 0;
+
+ if (!reg->gas)
+ return 0;
+
+ if (reg->vaddr)
+ write_register(reg->vaddr, val, reg->gas->bit_width);
+ else
+ ret = acpi_write(val, reg->gas);
+
+ return ret;
+}
+
+static int pcc_chan_reg_read_modify_write(struct pcc_chan_reg *reg)
+{
+ int ret = 0;
+ u64 val;
+
+ ret = pcc_chan_reg_read(reg, &val);
+ if (ret)
+ return ret;
+
+ val &= reg->preserve_mask;
+ val |= reg->set_mask;
+
+ return pcc_chan_reg_write(reg, val);
}
/**
@@ -174,42 +223,42 @@ static int pcc_map_interrupt(u32 interrupt, u32 flags)
/**
* pcc_mbox_irq - PCC mailbox interrupt handler
+ * @irq: interrupt number
+ * @p: data/cookie passed from the caller to identify the channel
+ *
+ * Returns: IRQ_HANDLED if interrupt is handled or IRQ_NONE if not
*/
static irqreturn_t pcc_mbox_irq(int irq, void *p)
{
- struct acpi_generic_address *doorbell_ack;
- struct acpi_pcct_hw_reduced *pcct_ss;
+ struct pcc_chan_info *pchan;
struct mbox_chan *chan = p;
- u64 doorbell_ack_preserve;
- u64 doorbell_ack_write;
- u64 doorbell_ack_val;
+ u64 val;
int ret;
- pcct_ss = chan->con_priv;
+ pchan = chan->con_priv;
- mbox_chan_received_data(chan, NULL);
+ ret = pcc_chan_reg_read(&pchan->cmd_complete, &val);
+ if (ret)
+ return IRQ_NONE;
- if (pcct_ss->header.type == ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE_TYPE2) {
- struct acpi_pcct_hw_reduced_type2 *pcct2_ss = chan->con_priv;
- u32 id = chan - pcc_mbox_channels;
+ val &= pchan->cmd_complete.status_mask;
+ if (!val)
+ return IRQ_NONE;
- doorbell_ack = &pcct2_ss->platform_ack_register;
- doorbell_ack_preserve = pcct2_ss->ack_preserve_mask;
- doorbell_ack_write = pcct2_ss->ack_write_mask;
+ ret = pcc_chan_reg_read(&pchan->error, &val);
+ if (ret)
+ return IRQ_NONE;
+ val &= pchan->error.status_mask;
+ if (val) {
+ val &= ~pchan->error.status_mask;
+ pcc_chan_reg_write(&pchan->error, val);
+ return IRQ_NONE;
+ }
- ret = read_register(pcc_doorbell_ack_vaddr[id],
- &doorbell_ack_val,
- doorbell_ack->bit_width);
- if (ret)
- return IRQ_NONE;
+ if (pcc_chan_reg_read_modify_write(&pchan->plat_irq_ack))
+ return IRQ_NONE;
- ret = write_register(pcc_doorbell_ack_vaddr[id],
- (doorbell_ack_val & doorbell_ack_preserve)
- | doorbell_ack_write,
- doorbell_ack->bit_width);
- if (ret)
- return IRQ_NONE;
- }
+ mbox_chan_received_data(chan, NULL);
return IRQ_HANDLED;
}
@@ -224,29 +273,26 @@ static irqreturn_t pcc_mbox_irq(int irq, void *p)
* ACPI package. This is used to lookup the array of PCC
* subspaces as parsed by the PCC Mailbox controller.
*
- * Return: Pointer to the Mailbox Channel if successful or
- * ERR_PTR.
+ * Return: Pointer to the PCC Mailbox Channel if successful or ERR_PTR.
*/
-struct mbox_chan *pcc_mbox_request_channel(struct mbox_client *cl,
- int subspace_id)
+struct pcc_mbox_chan *
+pcc_mbox_request_channel(struct mbox_client *cl, int subspace_id)
{
- struct device *dev = pcc_mbox_ctrl.dev;
+ struct pcc_chan_info *pchan;
struct mbox_chan *chan;
+ struct device *dev;
unsigned long flags;
- /*
- * Each PCC Subspace is a Mailbox Channel.
- * The PCC Clients get their PCC Subspace ID
- * from their own tables and pass it here.
- * This returns a pointer to the PCC subspace
- * for the Client to operate on.
- */
- chan = get_pcc_channel(subspace_id);
+ if (subspace_id < 0 || subspace_id >= pcc_chan_count)
+ return ERR_PTR(-ENOENT);
+ pchan = chan_info + subspace_id;
+ chan = pchan->chan.mchan;
if (IS_ERR(chan) || chan->cl) {
dev_err(dev, "Channel not found for idx: %d\n", subspace_id);
return ERR_PTR(-EBUSY);
}
+ dev = chan->mbox->dev;
spin_lock_irqsave(&chan->lock, flags);
chan->msg_free = 0;
@@ -260,44 +306,40 @@ struct mbox_chan *pcc_mbox_request_channel(struct mbox_client *cl,
spin_unlock_irqrestore(&chan->lock, flags);
- if (pcc_doorbell_irq[subspace_id] > 0) {
+ if (pchan->plat_irq > 0) {
int rc;
- rc = devm_request_irq(dev, pcc_doorbell_irq[subspace_id],
- pcc_mbox_irq, 0, MBOX_IRQ_NAME, chan);
+ rc = devm_request_irq(dev, pchan->plat_irq, pcc_mbox_irq, 0,
+ MBOX_IRQ_NAME, chan);
if (unlikely(rc)) {
dev_err(dev, "failed to register PCC interrupt %d\n",
- pcc_doorbell_irq[subspace_id]);
- pcc_mbox_free_channel(chan);
- chan = ERR_PTR(rc);
+ pchan->plat_irq);
+ pcc_mbox_free_channel(&pchan->chan);
+ return ERR_PTR(rc);
}
}
- return chan;
+ return &pchan->chan;
}
EXPORT_SYMBOL_GPL(pcc_mbox_request_channel);
/**
* pcc_mbox_free_channel - Clients call this to free their Channel.
*
- * @chan: Pointer to the mailbox channel as returned by
- * pcc_mbox_request_channel()
+ * @pchan: Pointer to the PCC mailbox channel as returned by
+ * pcc_mbox_request_channel()
*/
-void pcc_mbox_free_channel(struct mbox_chan *chan)
+void pcc_mbox_free_channel(struct pcc_mbox_chan *pchan)
{
- u32 id = chan - pcc_mbox_channels;
+ struct pcc_chan_info *pchan_info = to_pcc_chan_info(pchan);
+ struct mbox_chan *chan = pchan->mchan;
unsigned long flags;
if (!chan || !chan->cl)
return;
- if (id >= pcc_mbox_ctrl.num_chans) {
- pr_debug("pcc_mbox_free_channel: Invalid mbox_chan passed\n");
- return;
- }
-
- if (pcc_doorbell_irq[id] > 0)
- devm_free_irq(chan->mbox->dev, pcc_doorbell_irq[id], chan);
+ if (pchan_info->plat_irq > 0)
+ devm_free_irq(chan->mbox->dev, pchan_info->plat_irq, chan);
spin_lock_irqsave(&chan->lock, flags);
chan->cl = NULL;
@@ -323,40 +365,14 @@ EXPORT_SYMBOL_GPL(pcc_mbox_free_channel);
*/
static int pcc_send_data(struct mbox_chan *chan, void *data)
{
- struct acpi_pcct_hw_reduced *pcct_ss = chan->con_priv;
- struct acpi_generic_address *doorbell;
- u64 doorbell_preserve;
- u64 doorbell_val;
- u64 doorbell_write;
- u32 id = chan - pcc_mbox_channels;
- int ret = 0;
-
- if (id >= pcc_mbox_ctrl.num_chans) {
- pr_debug("pcc_send_data: Invalid mbox_chan passed\n");
- return -ENOENT;
- }
+ int ret;
+ struct pcc_chan_info *pchan = chan->con_priv;
- doorbell = &pcct_ss->doorbell_register;
- doorbell_preserve = pcct_ss->preserve_mask;
- doorbell_write = pcct_ss->write_mask;
+ ret = pcc_chan_reg_read_modify_write(&pchan->cmd_update);
+ if (ret)
+ return ret;
- /* Sync notification from OS to Platform. */
- if (pcc_doorbell_vaddr[id]) {
- ret = read_register(pcc_doorbell_vaddr[id], &doorbell_val,
- doorbell->bit_width);
- if (ret)
- return ret;
- ret = write_register(pcc_doorbell_vaddr[id],
- (doorbell_val & doorbell_preserve) | doorbell_write,
- doorbell->bit_width);
- } else {
- ret = acpi_read(&doorbell_val, doorbell);
- if (ret)
- return ret;
- ret = acpi_write((doorbell_val & doorbell_preserve) | doorbell_write,
- doorbell);
- }
- return ret;
+ return pcc_chan_reg_read_modify_write(&pchan->db);
}
static const struct mbox_chan_ops pcc_chan_ops = {
@@ -364,7 +380,7 @@ static const struct mbox_chan_ops pcc_chan_ops = {
};
/**
- * parse_pcc_subspaces -- Count PCC subspaces defined
+ * parse_pcc_subspace - Count PCC subspaces defined
* @header: Pointer to the ACPI subtable header under the PCCT.
* @end: End of subtable entry.
*
@@ -384,41 +400,172 @@ static int parse_pcc_subspace(union acpi_subtable_headers *header,
return -EINVAL;
}
+static int
+pcc_chan_reg_init(struct pcc_chan_reg *reg, struct acpi_generic_address *gas,
+ u64 preserve_mask, u64 set_mask, u64 status_mask, char *name)
+{
+ if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
+ if (!(gas->bit_width >= 8 && gas->bit_width <= 64 &&
+ is_power_of_2(gas->bit_width))) {
+ pr_err("Error: Cannot access register of %u bit width",
+ gas->bit_width);
+ return -EFAULT;
+ }
+
+ reg->vaddr = acpi_os_ioremap(gas->address, gas->bit_width / 8);
+ if (!reg->vaddr) {
+ pr_err("Failed to ioremap PCC %s register\n", name);
+ return -ENOMEM;
+ }
+ }
+ reg->gas = gas;
+ reg->preserve_mask = preserve_mask;
+ reg->set_mask = set_mask;
+ reg->status_mask = status_mask;
+ return 0;
+}
+
/**
* pcc_parse_subspace_irq - Parse the PCC IRQ and PCC ACK register
- * There should be one entry per PCC client.
- * @id: PCC subspace index.
- * @pcct_ss: Pointer to the ACPI subtable header under the PCCT.
+ *
+ * @pchan: Pointer to the PCC channel info structure.
+ * @pcct_entry: Pointer to the ACPI subtable header.
*
* Return: 0 for Success, else errno.
*
- * This gets called for each entry in the PCC table.
+ * There should be one entry per PCC channel. This gets called for each
+ * entry in the PCC table. This uses PCCY Type1 structure for all applicable
+ * types(Type 1-4) to fetch irq
*/
-static int pcc_parse_subspace_irq(int id,
- struct acpi_pcct_hw_reduced *pcct_ss)
+static int pcc_parse_subspace_irq(struct pcc_chan_info *pchan,
+ struct acpi_subtable_header *pcct_entry)
{
- pcc_doorbell_irq[id] = pcc_map_interrupt(pcct_ss->platform_interrupt,
- (u32)pcct_ss->flags);
- if (pcc_doorbell_irq[id] <= 0) {
+ int ret = 0;
+ struct acpi_pcct_hw_reduced *pcct_ss;
+
+ if (pcct_entry->type < ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE ||
+ pcct_entry->type > ACPI_PCCT_TYPE_EXT_PCC_SLAVE_SUBSPACE)
+ return 0;
+
+ pcct_ss = (struct acpi_pcct_hw_reduced *)pcct_entry;
+ pchan->plat_irq = pcc_map_interrupt(pcct_ss->platform_interrupt,
+ (u32)pcct_ss->flags);
+ if (pchan->plat_irq <= 0) {
pr_err("PCC GSI %d not registered\n",
pcct_ss->platform_interrupt);
return -EINVAL;
}
- if (pcct_ss->header.type
- == ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE_TYPE2) {
+ if (pcct_ss->header.type == ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE_TYPE2) {
struct acpi_pcct_hw_reduced_type2 *pcct2_ss = (void *)pcct_ss;
- pcc_doorbell_ack_vaddr[id] = acpi_os_ioremap(
- pcct2_ss->platform_ack_register.address,
- pcct2_ss->platform_ack_register.bit_width / 8);
- if (!pcc_doorbell_ack_vaddr[id]) {
- pr_err("Failed to ioremap PCC ACK register\n");
- return -ENOMEM;
- }
+ ret = pcc_chan_reg_init(&pchan->plat_irq_ack,
+ &pcct2_ss->platform_ack_register,
+ pcct2_ss->ack_preserve_mask,
+ pcct2_ss->ack_write_mask, 0,
+ "PLAT IRQ ACK");
+
+ } else if (pcct_ss->header.type == ACPI_PCCT_TYPE_EXT_PCC_MASTER_SUBSPACE ||
+ pcct_ss->header.type == ACPI_PCCT_TYPE_EXT_PCC_SLAVE_SUBSPACE) {
+ struct acpi_pcct_ext_pcc_master *pcct_ext = (void *)pcct_ss;
+
+ ret = pcc_chan_reg_init(&pchan->plat_irq_ack,
+ &pcct_ext->platform_ack_register,
+ pcct_ext->ack_preserve_mask,
+ pcct_ext->ack_set_mask, 0,
+ "PLAT IRQ ACK");
}
- return 0;
+ return ret;
+}
+
+/**
+ * pcc_parse_subspace_db_reg - Parse the PCC doorbell register
+ *
+ * @pchan: Pointer to the PCC channel info structure.
+ * @pcct_entry: Pointer to the ACPI subtable header.
+ *
+ * Return: 0 for Success, else errno.
+ */
+static int pcc_parse_subspace_db_reg(struct pcc_chan_info *pchan,
+ struct acpi_subtable_header *pcct_entry)
+{
+ int ret = 0;
+
+ if (pcct_entry->type <= ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE_TYPE2) {
+ struct acpi_pcct_subspace *pcct_ss;
+
+ pcct_ss = (struct acpi_pcct_subspace *)pcct_entry;
+
+ ret = pcc_chan_reg_init(&pchan->db,
+ &pcct_ss->doorbell_register,
+ pcct_ss->preserve_mask,
+ pcct_ss->write_mask, 0, "Doorbell");
+
+ } else {
+ struct acpi_pcct_ext_pcc_master *pcct_ext;
+
+ pcct_ext = (struct acpi_pcct_ext_pcc_master *)pcct_entry;
+
+ ret = pcc_chan_reg_init(&pchan->db,
+ &pcct_ext->doorbell_register,
+ pcct_ext->preserve_mask,
+ pcct_ext->write_mask, 0, "Doorbell");
+ if (ret)
+ return ret;
+
+ ret = pcc_chan_reg_init(&pchan->cmd_complete,
+ &pcct_ext->cmd_complete_register,
+ 0, 0, pcct_ext->cmd_complete_mask,
+ "Command Complete Check");
+ if (ret)
+ return ret;
+
+ ret = pcc_chan_reg_init(&pchan->cmd_update,
+ &pcct_ext->cmd_update_register,
+ pcct_ext->cmd_update_preserve_mask,
+ pcct_ext->cmd_update_set_mask, 0,
+ "Command Complete Update");
+ if (ret)
+ return ret;
+
+ ret = pcc_chan_reg_init(&pchan->error,
+ &pcct_ext->error_status_register,
+ 0, 0, pcct_ext->error_status_mask,
+ "Error Status");
+ }
+ return ret;
+}
+
+/**
+ * pcc_parse_subspace_shmem - Parse the PCC Shared Memory Region information
+ *
+ * @pchan: Pointer to the PCC channel info structure.
+ * @pcct_entry: Pointer to the ACPI subtable header.
+ *
+ */
+static void pcc_parse_subspace_shmem(struct pcc_chan_info *pchan,
+ struct acpi_subtable_header *pcct_entry)
+{
+ if (pcct_entry->type <= ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE_TYPE2) {
+ struct acpi_pcct_subspace *pcct_ss =
+ (struct acpi_pcct_subspace *)pcct_entry;
+
+ pchan->chan.shmem_base_addr = pcct_ss->base_address;
+ pchan->chan.shmem_size = pcct_ss->length;
+ pchan->chan.latency = pcct_ss->latency;
+ pchan->chan.max_access_rate = pcct_ss->max_access_rate;
+ pchan->chan.min_turnaround_time = pcct_ss->min_turnaround_time;
+ } else {
+ struct acpi_pcct_ext_pcc_master *pcct_ext =
+ (struct acpi_pcct_ext_pcc_master *)pcct_entry;
+
+ pchan->chan.shmem_base_addr = pcct_ext->base_address;
+ pchan->chan.shmem_size = pcct_ext->length;
+ pchan->chan.latency = pcct_ext->latency;
+ pchan->chan.max_access_rate = pcct_ext->max_access_rate;
+ pchan->chan.min_turnaround_time = pcct_ext->min_turnaround_time;
+ }
}
/**
@@ -428,16 +575,12 @@ static int pcc_parse_subspace_irq(int id,
*/
static int __init acpi_pcc_probe(void)
{
+ int count, i, rc = 0;
+ acpi_status status;
struct acpi_table_header *pcct_tbl;
- struct acpi_subtable_header *pcct_entry;
- struct acpi_table_pcct *acpi_pcct_tbl;
struct acpi_subtable_proc proc[ACPI_PCCT_TYPE_RESERVED];
- int count, i, rc;
- acpi_status status = AE_OK;
- /* Search for PCCT */
status = acpi_get_table(ACPI_SIG_PCCT, 0, &pcct_tbl);
-
if (ACPI_FAILURE(status) || !pcct_tbl)
return -ENODEV;
@@ -459,33 +602,60 @@ static int __init acpi_pcc_probe(void)
pr_warn("Invalid PCCT: %d PCC subspaces\n", count);
rc = -EINVAL;
- goto err_put_pcct;
+ } else {
+ pcc_chan_count = count;
}
- pcc_mbox_channels = kcalloc(count, sizeof(struct mbox_chan),
- GFP_KERNEL);
- if (!pcc_mbox_channels) {
- pr_err("Could not allocate space for PCC mbox channels\n");
- rc = -ENOMEM;
- goto err_put_pcct;
- }
+ acpi_put_table(pcct_tbl);
+
+ return rc;
+}
+
+/**
+ * pcc_mbox_probe - Called when we find a match for the
+ * PCCT platform device. This is purely used to represent
+ * the PCCT as a virtual device for registering with the
+ * generic Mailbox framework.
+ *
+ * @pdev: Pointer to platform device returned when a match
+ * is found.
+ *
+ * Return: 0 for Success, else errno.
+ */
+static int pcc_mbox_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mbox_controller *pcc_mbox_ctrl;
+ struct mbox_chan *pcc_mbox_channels;
+ struct acpi_table_header *pcct_tbl;
+ struct acpi_subtable_header *pcct_entry;
+ struct acpi_table_pcct *acpi_pcct_tbl;
+ acpi_status status = AE_OK;
+ int i, rc, count = pcc_chan_count;
+
+ /* Search for PCCT */
+ status = acpi_get_table(ACPI_SIG_PCCT, 0, &pcct_tbl);
+
+ if (ACPI_FAILURE(status) || !pcct_tbl)
+ return -ENODEV;
- pcc_doorbell_vaddr = kcalloc(count, sizeof(void *), GFP_KERNEL);
- if (!pcc_doorbell_vaddr) {
+ pcc_mbox_channels = devm_kcalloc(dev, count, sizeof(*pcc_mbox_channels),
+ GFP_KERNEL);
+ if (!pcc_mbox_channels) {
rc = -ENOMEM;
- goto err_free_mbox;
+ goto err;
}
- pcc_doorbell_ack_vaddr = kcalloc(count, sizeof(void *), GFP_KERNEL);
- if (!pcc_doorbell_ack_vaddr) {
+ chan_info = devm_kcalloc(dev, count, sizeof(*chan_info), GFP_KERNEL);
+ if (!chan_info) {
rc = -ENOMEM;
- goto err_free_db_vaddr;
+ goto err;
}
- pcc_doorbell_irq = kcalloc(count, sizeof(int), GFP_KERNEL);
- if (!pcc_doorbell_irq) {
+ pcc_mbox_ctrl = devm_kmalloc(dev, sizeof(*pcc_mbox_ctrl), GFP_KERNEL);
+ if (!pcc_mbox_ctrl) {
rc = -ENOMEM;
- goto err_free_db_ack_vaddr;
+ goto err;
}
/* Point to the first PCC subspace entry */
@@ -494,85 +664,55 @@ static int __init acpi_pcc_probe(void)
acpi_pcct_tbl = (struct acpi_table_pcct *) pcct_tbl;
if (acpi_pcct_tbl->flags & ACPI_PCCT_DOORBELL)
- pcc_mbox_ctrl.txdone_irq = true;
+ pcc_mbox_ctrl->txdone_irq = true;
for (i = 0; i < count; i++) {
- struct acpi_generic_address *db_reg;
- struct acpi_pcct_subspace *pcct_ss;
- pcc_mbox_channels[i].con_priv = pcct_entry;
+ struct pcc_chan_info *pchan = chan_info + i;
- if (pcct_entry->type == ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE ||
- pcct_entry->type == ACPI_PCCT_TYPE_HW_REDUCED_SUBSPACE_TYPE2) {
- struct acpi_pcct_hw_reduced *pcct_hrss;
+ pcc_mbox_channels[i].con_priv = pchan;
+ pchan->chan.mchan = &pcc_mbox_channels[i];
- pcct_hrss = (struct acpi_pcct_hw_reduced *) pcct_entry;
+ if (pcct_entry->type == ACPI_PCCT_TYPE_EXT_PCC_SLAVE_SUBSPACE &&
+ !pcc_mbox_ctrl->txdone_irq) {
+ pr_err("Plaform Interrupt flag must be set to 1");
+ rc = -EINVAL;
+ goto err;
+ }
- if (pcc_mbox_ctrl.txdone_irq) {
- rc = pcc_parse_subspace_irq(i, pcct_hrss);
- if (rc < 0)
- goto err;
- }
+ if (pcc_mbox_ctrl->txdone_irq) {
+ rc = pcc_parse_subspace_irq(pchan, pcct_entry);
+ if (rc < 0)
+ goto err;
}
- pcct_ss = (struct acpi_pcct_subspace *) pcct_entry;
+ rc = pcc_parse_subspace_db_reg(pchan, pcct_entry);
+ if (rc < 0)
+ goto err;
+
+ pcc_parse_subspace_shmem(pchan, pcct_entry);
- /* If doorbell is in system memory cache the virt address */
- db_reg = &pcct_ss->doorbell_register;
- if (db_reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
- pcc_doorbell_vaddr[i] = acpi_os_ioremap(db_reg->address,
- db_reg->bit_width/8);
pcct_entry = (struct acpi_subtable_header *)
((unsigned long) pcct_entry + pcct_entry->length);
}
- pcc_mbox_ctrl.num_chans = count;
+ pcc_mbox_ctrl->num_chans = count;
- pr_info("Detected %d PCC Subspaces\n", pcc_mbox_ctrl.num_chans);
+ pr_info("Detected %d PCC Subspaces\n", pcc_mbox_ctrl->num_chans);
- return 0;
+ pcc_mbox_ctrl->chans = pcc_mbox_channels;
+ pcc_mbox_ctrl->ops = &pcc_chan_ops;
+ pcc_mbox_ctrl->dev = dev;
+ pr_info("Registering PCC driver as Mailbox controller\n");
+ rc = mbox_controller_register(pcc_mbox_ctrl);
+ if (rc)
+ pr_err("Err registering PCC as Mailbox controller: %d\n", rc);
+ else
+ return 0;
err:
- kfree(pcc_doorbell_irq);
-err_free_db_ack_vaddr:
- kfree(pcc_doorbell_ack_vaddr);
-err_free_db_vaddr:
- kfree(pcc_doorbell_vaddr);
-err_free_mbox:
- kfree(pcc_mbox_channels);
-err_put_pcct:
acpi_put_table(pcct_tbl);
return rc;
}
-/**
- * pcc_mbox_probe - Called when we find a match for the
- * PCCT platform device. This is purely used to represent
- * the PCCT as a virtual device for registering with the
- * generic Mailbox framework.
- *
- * @pdev: Pointer to platform device returned when a match
- * is found.
- *
- * Return: 0 for Success, else errno.
- */
-static int pcc_mbox_probe(struct platform_device *pdev)
-{
- int ret = 0;
-
- pcc_mbox_ctrl.chans = pcc_mbox_channels;
- pcc_mbox_ctrl.ops = &pcc_chan_ops;
- pcc_mbox_ctrl.dev = &pdev->dev;
-
- pr_info("Registering PCC driver as Mailbox controller\n");
- ret = mbox_controller_register(&pcc_mbox_ctrl);
-
- if (ret) {
- pr_err("Err registering PCC as Mailbox controller: %d\n", ret);
- ret = -ENODEV;
- }
-
- return ret;
-}
-
static struct platform_driver pcc_mbox_driver = {
.probe = pcc_mbox_probe,
.driver = {
diff --git a/drivers/mailbox/platform_mhu.c b/drivers/mailbox/platform_mhu.c
index b6e34952246b..a5922ac0b0bf 100644
--- a/drivers/mailbox/platform_mhu.c
+++ b/drivers/mailbox/platform_mhu.c
@@ -117,7 +117,6 @@ static int platform_mhu_probe(struct platform_device *pdev)
int i, err;
struct platform_mhu *mhu;
struct device *dev = &pdev->dev;
- struct resource *res;
int platform_mhu_reg[MHU_CHANS] = {
MHU_SEC_OFFSET, MHU_LP_OFFSET, MHU_HP_OFFSET
};
@@ -127,8 +126,7 @@ static int platform_mhu_probe(struct platform_device *pdev)
if (!mhu)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mhu->base = devm_ioremap_resource(dev, res);
+ mhu->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mhu->base)) {
dev_err(dev, "ioremap failed\n");
return PTR_ERR(mhu->base);
diff --git a/drivers/mailbox/qcom-apcs-ipc-mailbox.c b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
index 82ccfaf14b24..9325d2abc745 100644
--- a/drivers/mailbox/qcom-apcs-ipc-mailbox.c
+++ b/drivers/mailbox/qcom-apcs-ipc-mailbox.c
@@ -33,10 +33,6 @@ static const struct qcom_apcs_ipc_data ipq6018_apcs_data = {
.offset = 8, .clk_name = "qcom,apss-ipq6018-clk"
};
-static const struct qcom_apcs_ipc_data ipq8074_apcs_data = {
- .offset = 8, .clk_name = NULL
-};
-
static const struct qcom_apcs_ipc_data msm8916_apcs_data = {
.offset = 8, .clk_name = "qcom-apcs-msm8916-clk"
};
@@ -49,18 +45,6 @@ static const struct qcom_apcs_ipc_data msm8996_apcs_data = {
.offset = 16, .clk_name = NULL
};
-static const struct qcom_apcs_ipc_data msm8998_apcs_data = {
- .offset = 8, .clk_name = NULL
-};
-
-static const struct qcom_apcs_ipc_data sdm660_apcs_data = {
- .offset = 8, .clk_name = NULL
-};
-
-static const struct qcom_apcs_ipc_data sm6125_apcs_data = {
- .offset = 8, .clk_name = NULL
-};
-
static const struct qcom_apcs_ipc_data apps_shared_apcs_data = {
.offset = 12, .clk_name = NULL
};
@@ -95,7 +79,6 @@ static int qcom_apcs_ipc_probe(struct platform_device *pdev)
struct qcom_apcs_ipc *apcs;
const struct qcom_apcs_ipc_data *apcs_data;
struct regmap *regmap;
- struct resource *res;
void __iomem *base;
unsigned long i;
int ret;
@@ -104,8 +87,7 @@ static int qcom_apcs_ipc_probe(struct platform_device *pdev)
if (!apcs)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
@@ -160,21 +142,22 @@ static int qcom_apcs_ipc_remove(struct platform_device *pdev)
/* .data is the offset of the ipc register within the global block */
static const struct of_device_id qcom_apcs_ipc_of_match[] = {
{ .compatible = "qcom,ipq6018-apcs-apps-global", .data = &ipq6018_apcs_data },
- { .compatible = "qcom,ipq8074-apcs-apps-global", .data = &ipq8074_apcs_data },
+ { .compatible = "qcom,ipq8074-apcs-apps-global", .data = &msm8994_apcs_data },
{ .compatible = "qcom,msm8916-apcs-kpss-global", .data = &msm8916_apcs_data },
{ .compatible = "qcom,msm8939-apcs-kpss-global", .data = &msm8916_apcs_data },
{ .compatible = "qcom,msm8953-apcs-kpss-global", .data = &msm8994_apcs_data },
{ .compatible = "qcom,msm8994-apcs-kpss-global", .data = &msm8994_apcs_data },
{ .compatible = "qcom,msm8996-apcs-hmss-global", .data = &msm8996_apcs_data },
- { .compatible = "qcom,msm8998-apcs-hmss-global", .data = &msm8998_apcs_data },
+ { .compatible = "qcom,msm8998-apcs-hmss-global", .data = &msm8994_apcs_data },
+ { .compatible = "qcom,qcm2290-apcs-hmss-global", .data = &msm8994_apcs_data },
{ .compatible = "qcom,qcs404-apcs-apps-global", .data = &msm8916_apcs_data },
{ .compatible = "qcom,sc7180-apss-shared", .data = &apps_shared_apcs_data },
{ .compatible = "qcom,sc8180x-apss-shared", .data = &apps_shared_apcs_data },
- { .compatible = "qcom,sdm660-apcs-hmss-global", .data = &sdm660_apcs_data },
+ { .compatible = "qcom,sdm660-apcs-hmss-global", .data = &msm8994_apcs_data },
{ .compatible = "qcom,sdm845-apss-shared", .data = &apps_shared_apcs_data },
- { .compatible = "qcom,sm6125-apcs-hmss-global", .data = &sm6125_apcs_data },
+ { .compatible = "qcom,sm6125-apcs-hmss-global", .data = &msm8994_apcs_data },
{ .compatible = "qcom,sm8150-apss-shared", .data = &apps_shared_apcs_data },
- { .compatible = "qcom,sm6115-apcs-hmss-global", .data = &sdm660_apcs_data },
+ { .compatible = "qcom,sm6115-apcs-hmss-global", .data = &msm8994_apcs_data },
{ .compatible = "qcom,sdx55-apcs-gcc", .data = &sdx55_apcs_data },
{}
};
diff --git a/drivers/mailbox/stm32-ipcc.c b/drivers/mailbox/stm32-ipcc.c
index b84e0587937c..15d538fe2113 100644
--- a/drivers/mailbox/stm32-ipcc.c
+++ b/drivers/mailbox/stm32-ipcc.c
@@ -205,7 +205,6 @@ static int stm32_ipcc_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct stm32_ipcc *ipcc;
- struct resource *res;
unsigned long i;
int ret;
u32 ip_ver;
@@ -235,8 +234,7 @@ static int stm32_ipcc_probe(struct platform_device *pdev)
}
/* regs */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ipcc->reg_base = devm_ioremap_resource(dev, res);
+ ipcc->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ipcc->reg_base))
return PTR_ERR(ipcc->reg_base);
diff --git a/drivers/mailbox/sun6i-msgbox.c b/drivers/mailbox/sun6i-msgbox.c
index ccecf2e5941d..7f8d931042d3 100644
--- a/drivers/mailbox/sun6i-msgbox.c
+++ b/drivers/mailbox/sun6i-msgbox.c
@@ -197,7 +197,6 @@ static int sun6i_msgbox_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct mbox_chan *chans;
struct reset_control *reset;
- struct resource *res;
struct sun6i_msgbox *mbox;
int i, ret;
@@ -246,13 +245,7 @@ static int sun6i_msgbox_probe(struct platform_device *pdev)
goto err_disable_unprepare;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- ret = -ENODEV;
- goto err_disable_unprepare;
- }
-
- mbox->regs = devm_ioremap_resource(&pdev->dev, res);
+ mbox->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mbox->regs)) {
ret = PTR_ERR(mbox->regs);
dev_err(dev, "Failed to map MMIO resource: %d\n", ret);
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 5fc989a6d452..9ed9c955add7 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -178,7 +178,6 @@
#define pr_fmt(fmt) "bcache: %s() " fmt, __func__
-#include <linux/bcache.h>
#include <linux/bio.h>
#include <linux/kobject.h>
#include <linux/list.h>
@@ -190,6 +189,7 @@
#include <linux/workqueue.h>
#include <linux/kthread.h>
+#include "bcache_ondisk.h"
#include "bset.h"
#include "util.h"
#include "closure.h"
@@ -395,8 +395,6 @@ struct cached_dev {
atomic_t io_errors;
unsigned int error_limit;
unsigned int offline_seconds;
-
- char backing_dev_name[BDEVNAME_SIZE];
};
enum alloc_reserve {
@@ -470,8 +468,6 @@ struct cache {
atomic_long_t meta_sectors_written;
atomic_long_t btree_sectors_written;
atomic_long_t sectors_written;
-
- char cache_dev_name[BDEVNAME_SIZE];
};
struct gc_stat {
diff --git a/drivers/md/bcache/bcache_ondisk.h b/drivers/md/bcache/bcache_ondisk.h
new file mode 100644
index 000000000000..97413586195b
--- /dev/null
+++ b/drivers/md/bcache/bcache_ondisk.h
@@ -0,0 +1,445 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _LINUX_BCACHE_H
+#define _LINUX_BCACHE_H
+
+/*
+ * Bcache on disk data structures
+ */
+
+#include <linux/types.h>
+
+#define BITMASK(name, type, field, offset, size) \
+static inline __u64 name(const type *k) \
+{ return (k->field >> offset) & ~(~0ULL << size); } \
+ \
+static inline void SET_##name(type *k, __u64 v) \
+{ \
+ k->field &= ~(~(~0ULL << size) << offset); \
+ k->field |= (v & ~(~0ULL << size)) << offset; \
+}
+
+/* Btree keys - all units are in sectors */
+
+struct bkey {
+ __u64 high;
+ __u64 low;
+ __u64 ptr[];
+};
+
+#define KEY_FIELD(name, field, offset, size) \
+ BITMASK(name, struct bkey, field, offset, size)
+
+#define PTR_FIELD(name, offset, size) \
+static inline __u64 name(const struct bkey *k, unsigned int i) \
+{ return (k->ptr[i] >> offset) & ~(~0ULL << size); } \
+ \
+static inline void SET_##name(struct bkey *k, unsigned int i, __u64 v) \
+{ \
+ k->ptr[i] &= ~(~(~0ULL << size) << offset); \
+ k->ptr[i] |= (v & ~(~0ULL << size)) << offset; \
+}
+
+#define KEY_SIZE_BITS 16
+#define KEY_MAX_U64S 8
+
+KEY_FIELD(KEY_PTRS, high, 60, 3)
+KEY_FIELD(__PAD0, high, 58, 2)
+KEY_FIELD(KEY_CSUM, high, 56, 2)
+KEY_FIELD(__PAD1, high, 55, 1)
+KEY_FIELD(KEY_DIRTY, high, 36, 1)
+
+KEY_FIELD(KEY_SIZE, high, 20, KEY_SIZE_BITS)
+KEY_FIELD(KEY_INODE, high, 0, 20)
+
+/* Next time I change the on disk format, KEY_OFFSET() won't be 64 bits */
+
+static inline __u64 KEY_OFFSET(const struct bkey *k)
+{
+ return k->low;
+}
+
+static inline void SET_KEY_OFFSET(struct bkey *k, __u64 v)
+{
+ k->low = v;
+}
+
+/*
+ * The high bit being set is a relic from when we used it to do binary
+ * searches - it told you where a key started. It's not used anymore,
+ * and can probably be safely dropped.
+ */
+#define KEY(inode, offset, size) \
+((struct bkey) { \
+ .high = (1ULL << 63) | ((__u64) (size) << 20) | (inode), \
+ .low = (offset) \
+})
+
+#define ZERO_KEY KEY(0, 0, 0)
+
+#define MAX_KEY_INODE (~(~0 << 20))
+#define MAX_KEY_OFFSET (~0ULL >> 1)
+#define MAX_KEY KEY(MAX_KEY_INODE, MAX_KEY_OFFSET, 0)
+
+#define KEY_START(k) (KEY_OFFSET(k) - KEY_SIZE(k))
+#define START_KEY(k) KEY(KEY_INODE(k), KEY_START(k), 0)
+
+#define PTR_DEV_BITS 12
+
+PTR_FIELD(PTR_DEV, 51, PTR_DEV_BITS)
+PTR_FIELD(PTR_OFFSET, 8, 43)
+PTR_FIELD(PTR_GEN, 0, 8)
+
+#define PTR_CHECK_DEV ((1 << PTR_DEV_BITS) - 1)
+
+#define MAKE_PTR(gen, offset, dev) \
+ ((((__u64) dev) << 51) | ((__u64) offset) << 8 | gen)
+
+/* Bkey utility code */
+
+static inline unsigned long bkey_u64s(const struct bkey *k)
+{
+ return (sizeof(struct bkey) / sizeof(__u64)) + KEY_PTRS(k);
+}
+
+static inline unsigned long bkey_bytes(const struct bkey *k)
+{
+ return bkey_u64s(k) * sizeof(__u64);
+}
+
+#define bkey_copy(_dest, _src) memcpy(_dest, _src, bkey_bytes(_src))
+
+static inline void bkey_copy_key(struct bkey *dest, const struct bkey *src)
+{
+ SET_KEY_INODE(dest, KEY_INODE(src));
+ SET_KEY_OFFSET(dest, KEY_OFFSET(src));
+}
+
+static inline struct bkey *bkey_next(const struct bkey *k)
+{
+ __u64 *d = (void *) k;
+
+ return (struct bkey *) (d + bkey_u64s(k));
+}
+
+static inline struct bkey *bkey_idx(const struct bkey *k, unsigned int nr_keys)
+{
+ __u64 *d = (void *) k;
+
+ return (struct bkey *) (d + nr_keys);
+}
+/* Enough for a key with 6 pointers */
+#define BKEY_PAD 8
+
+#define BKEY_PADDED(key) \
+ union { struct bkey key; __u64 key ## _pad[BKEY_PAD]; }
+
+/* Superblock */
+
+/* Version 0: Cache device
+ * Version 1: Backing device
+ * Version 2: Seed pointer into btree node checksum
+ * Version 3: Cache device with new UUID format
+ * Version 4: Backing device with data offset
+ */
+#define BCACHE_SB_VERSION_CDEV 0
+#define BCACHE_SB_VERSION_BDEV 1
+#define BCACHE_SB_VERSION_CDEV_WITH_UUID 3
+#define BCACHE_SB_VERSION_BDEV_WITH_OFFSET 4
+#define BCACHE_SB_VERSION_CDEV_WITH_FEATURES 5
+#define BCACHE_SB_VERSION_BDEV_WITH_FEATURES 6
+#define BCACHE_SB_MAX_VERSION 6
+
+#define SB_SECTOR 8
+#define SB_OFFSET (SB_SECTOR << SECTOR_SHIFT)
+#define SB_SIZE 4096
+#define SB_LABEL_SIZE 32
+#define SB_JOURNAL_BUCKETS 256U
+/* SB_JOURNAL_BUCKETS must be divisible by BITS_PER_LONG */
+#define MAX_CACHES_PER_SET 8
+
+#define BDEV_DATA_START_DEFAULT 16 /* sectors */
+
+struct cache_sb_disk {
+ __le64 csum;
+ __le64 offset; /* sector where this sb was written */
+ __le64 version;
+
+ __u8 magic[16];
+
+ __u8 uuid[16];
+ union {
+ __u8 set_uuid[16];
+ __le64 set_magic;
+ };
+ __u8 label[SB_LABEL_SIZE];
+
+ __le64 flags;
+ __le64 seq;
+
+ __le64 feature_compat;
+ __le64 feature_incompat;
+ __le64 feature_ro_compat;
+
+ __le64 pad[5];
+
+ union {
+ struct {
+ /* Cache devices */
+ __le64 nbuckets; /* device size */
+
+ __le16 block_size; /* sectors */
+ __le16 bucket_size; /* sectors */
+
+ __le16 nr_in_set;
+ __le16 nr_this_dev;
+ };
+ struct {
+ /* Backing devices */
+ __le64 data_offset;
+
+ /*
+ * block_size from the cache device section is still used by
+ * backing devices, so don't add anything here until we fix
+ * things to not need it for backing devices anymore
+ */
+ };
+ };
+
+ __le32 last_mount; /* time overflow in y2106 */
+
+ __le16 first_bucket;
+ union {
+ __le16 njournal_buckets;
+ __le16 keys;
+ };
+ __le64 d[SB_JOURNAL_BUCKETS]; /* journal buckets */
+ __le16 obso_bucket_size_hi; /* obsoleted */
+};
+
+/*
+ * This is for in-memory bcache super block.
+ * NOTE: cache_sb is NOT exactly mapping to cache_sb_disk, the member
+ * size, ordering and even whole struct size may be different
+ * from cache_sb_disk.
+ */
+struct cache_sb {
+ __u64 offset; /* sector where this sb was written */
+ __u64 version;
+
+ __u8 magic[16];
+
+ __u8 uuid[16];
+ union {
+ __u8 set_uuid[16];
+ __u64 set_magic;
+ };
+ __u8 label[SB_LABEL_SIZE];
+
+ __u64 flags;
+ __u64 seq;
+
+ __u64 feature_compat;
+ __u64 feature_incompat;
+ __u64 feature_ro_compat;
+
+ union {
+ struct {
+ /* Cache devices */
+ __u64 nbuckets; /* device size */
+
+ __u16 block_size; /* sectors */
+ __u16 nr_in_set;
+ __u16 nr_this_dev;
+ __u32 bucket_size; /* sectors */
+ };
+ struct {
+ /* Backing devices */
+ __u64 data_offset;
+
+ /*
+ * block_size from the cache device section is still used by
+ * backing devices, so don't add anything here until we fix
+ * things to not need it for backing devices anymore
+ */
+ };
+ };
+
+ __u32 last_mount; /* time overflow in y2106 */
+
+ __u16 first_bucket;
+ union {
+ __u16 njournal_buckets;
+ __u16 keys;
+ };
+ __u64 d[SB_JOURNAL_BUCKETS]; /* journal buckets */
+};
+
+static inline _Bool SB_IS_BDEV(const struct cache_sb *sb)
+{
+ return sb->version == BCACHE_SB_VERSION_BDEV
+ || sb->version == BCACHE_SB_VERSION_BDEV_WITH_OFFSET
+ || sb->version == BCACHE_SB_VERSION_BDEV_WITH_FEATURES;
+}
+
+BITMASK(CACHE_SYNC, struct cache_sb, flags, 0, 1);
+BITMASK(CACHE_DISCARD, struct cache_sb, flags, 1, 1);
+BITMASK(CACHE_REPLACEMENT, struct cache_sb, flags, 2, 3);
+#define CACHE_REPLACEMENT_LRU 0U
+#define CACHE_REPLACEMENT_FIFO 1U
+#define CACHE_REPLACEMENT_RANDOM 2U
+
+BITMASK(BDEV_CACHE_MODE, struct cache_sb, flags, 0, 4);
+#define CACHE_MODE_WRITETHROUGH 0U
+#define CACHE_MODE_WRITEBACK 1U
+#define CACHE_MODE_WRITEAROUND 2U
+#define CACHE_MODE_NONE 3U
+BITMASK(BDEV_STATE, struct cache_sb, flags, 61, 2);
+#define BDEV_STATE_NONE 0U
+#define BDEV_STATE_CLEAN 1U
+#define BDEV_STATE_DIRTY 2U
+#define BDEV_STATE_STALE 3U
+
+/*
+ * Magic numbers
+ *
+ * The various other data structures have their own magic numbers, which are
+ * xored with the first part of the cache set's UUID
+ */
+
+#define JSET_MAGIC 0x245235c1a3625032ULL
+#define PSET_MAGIC 0x6750e15f87337f91ULL
+#define BSET_MAGIC 0x90135c78b99e07f5ULL
+
+static inline __u64 jset_magic(struct cache_sb *sb)
+{
+ return sb->set_magic ^ JSET_MAGIC;
+}
+
+static inline __u64 pset_magic(struct cache_sb *sb)
+{
+ return sb->set_magic ^ PSET_MAGIC;
+}
+
+static inline __u64 bset_magic(struct cache_sb *sb)
+{
+ return sb->set_magic ^ BSET_MAGIC;
+}
+
+/*
+ * Journal
+ *
+ * On disk format for a journal entry:
+ * seq is monotonically increasing; every journal entry has its own unique
+ * sequence number.
+ *
+ * last_seq is the oldest journal entry that still has keys the btree hasn't
+ * flushed to disk yet.
+ *
+ * version is for on disk format changes.
+ */
+
+#define BCACHE_JSET_VERSION_UUIDv1 1
+#define BCACHE_JSET_VERSION_UUID 1 /* Always latest UUID format */
+#define BCACHE_JSET_VERSION 1
+
+struct jset {
+ __u64 csum;
+ __u64 magic;
+ __u64 seq;
+ __u32 version;
+ __u32 keys;
+
+ __u64 last_seq;
+
+ BKEY_PADDED(uuid_bucket);
+ BKEY_PADDED(btree_root);
+ __u16 btree_level;
+ __u16 pad[3];
+
+ __u64 prio_bucket[MAX_CACHES_PER_SET];
+
+ union {
+ struct bkey start[0];
+ __u64 d[0];
+ };
+};
+
+/* Bucket prios/gens */
+
+struct prio_set {
+ __u64 csum;
+ __u64 magic;
+ __u64 seq;
+ __u32 version;
+ __u32 pad;
+
+ __u64 next_bucket;
+
+ struct bucket_disk {
+ __u16 prio;
+ __u8 gen;
+ } __attribute((packed)) data[];
+};
+
+/* UUIDS - per backing device/flash only volume metadata */
+
+struct uuid_entry {
+ union {
+ struct {
+ __u8 uuid[16];
+ __u8 label[32];
+ __u32 first_reg; /* time overflow in y2106 */
+ __u32 last_reg;
+ __u32 invalidated;
+
+ __u32 flags;
+ /* Size of flash only volumes */
+ __u64 sectors;
+ };
+
+ __u8 pad[128];
+ };
+};
+
+BITMASK(UUID_FLASH_ONLY, struct uuid_entry, flags, 0, 1);
+
+/* Btree nodes */
+
+/* Version 1: Seed pointer into btree node checksum
+ */
+#define BCACHE_BSET_CSUM 1
+#define BCACHE_BSET_VERSION 1
+
+/*
+ * Btree nodes
+ *
+ * On disk a btree node is a list/log of these; within each set the keys are
+ * sorted
+ */
+struct bset {
+ __u64 csum;
+ __u64 magic;
+ __u64 seq;
+ __u32 version;
+ __u32 keys;
+
+ union {
+ struct bkey start[0];
+ __u64 d[0];
+ };
+};
+
+/* OBSOLETE */
+
+/* UUIDS - per backing device/flash only volume metadata */
+
+struct uuid_entry_v0 {
+ __u8 uuid[16];
+ __u8 label[32];
+ __u32 first_reg;
+ __u32 last_reg;
+ __u32 invalidated;
+ __u32 pad;
+};
+
+#endif /* _LINUX_BCACHE_H */
diff --git a/drivers/md/bcache/bset.h b/drivers/md/bcache/bset.h
index a50dcfda656f..d795c84246b0 100644
--- a/drivers/md/bcache/bset.h
+++ b/drivers/md/bcache/bset.h
@@ -2,10 +2,10 @@
#ifndef _BCACHE_BSET_H
#define _BCACHE_BSET_H
-#include <linux/bcache.h>
#include <linux/kernel.h>
#include <linux/types.h>
+#include "bcache_ondisk.h"
#include "util.h" /* for time_stats */
/*
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 0595559de174..93b67b8d31c3 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -141,7 +141,7 @@ static uint64_t btree_csum_set(struct btree *b, struct bset *i)
uint64_t crc = b->key.ptr[0];
void *data = (void *) i + 8, *end = bset_bkey_last(i);
- crc = bch_crc64_update(crc, data, end - data);
+ crc = crc64_be(crc, data, end - data);
return crc ^ 0xffffffffffffffffULL;
}
diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
index 116edda845c3..6230dfdd9286 100644
--- a/drivers/md/bcache/debug.c
+++ b/drivers/md/bcache/debug.c
@@ -127,21 +127,20 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio)
citer.bi_size = UINT_MAX;
bio_for_each_segment(bv, bio, iter) {
- void *p1 = kmap_atomic(bv.bv_page);
+ void *p1 = bvec_kmap_local(&bv);
void *p2;
cbv = bio_iter_iovec(check, citer);
- p2 = page_address(cbv.bv_page);
+ p2 = bvec_kmap_local(&cbv);
- cache_set_err_on(memcmp(p1 + bv.bv_offset,
- p2 + bv.bv_offset,
- bv.bv_len),
+ cache_set_err_on(memcmp(p1, p2, bv.bv_len),
dc->disk.c,
- "verify failed at dev %s sector %llu",
- dc->backing_dev_name,
+ "verify failed at dev %pg sector %llu",
+ dc->bdev,
(uint64_t) bio->bi_iter.bi_sector);
- kunmap_atomic(p1);
+ kunmap_local(p2);
+ kunmap_local(p1);
bio_advance_iter(check, &citer, bv.bv_len);
}
diff --git a/drivers/md/bcache/features.c b/drivers/md/bcache/features.c
index 6d2b7b84a7b7..634922c5601d 100644
--- a/drivers/md/bcache/features.c
+++ b/drivers/md/bcache/features.c
@@ -6,7 +6,7 @@
* Copyright 2020 Coly Li <colyli@suse.de>
*
*/
-#include <linux/bcache.h>
+#include "bcache_ondisk.h"
#include "bcache.h"
#include "features.h"
diff --git a/drivers/md/bcache/features.h b/drivers/md/bcache/features.h
index d1c8fd3977fc..09161b89c63e 100644
--- a/drivers/md/bcache/features.h
+++ b/drivers/md/bcache/features.h
@@ -2,10 +2,11 @@
#ifndef _BCACHE_FEATURES_H
#define _BCACHE_FEATURES_H
-#include <linux/bcache.h>
#include <linux/kernel.h>
#include <linux/types.h>
+#include "bcache_ondisk.h"
+
#define BCH_FEATURE_COMPAT 0
#define BCH_FEATURE_RO_COMPAT 1
#define BCH_FEATURE_INCOMPAT 2
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
index e4388fe3ab7e..9c6f9ec55b72 100644
--- a/drivers/md/bcache/io.c
+++ b/drivers/md/bcache/io.c
@@ -65,15 +65,15 @@ void bch_count_backing_io_errors(struct cached_dev *dc, struct bio *bio)
* we shouldn't count failed REQ_RAHEAD bio to dc->io_errors.
*/
if (bio->bi_opf & REQ_RAHEAD) {
- pr_warn_ratelimited("%s: Read-ahead I/O failed on backing device, ignore\n",
- dc->backing_dev_name);
+ pr_warn_ratelimited("%pg: Read-ahead I/O failed on backing device, ignore\n",
+ dc->bdev);
return;
}
errors = atomic_add_return(1, &dc->io_errors);
if (errors < dc->error_limit)
- pr_err("%s: IO error on backing device, unrecoverable\n",
- dc->backing_dev_name);
+ pr_err("%pg: IO error on backing device, unrecoverable\n",
+ dc->bdev);
else
bch_cached_dev_error(dc);
}
@@ -123,13 +123,13 @@ void bch_count_io_errors(struct cache *ca,
errors >>= IO_ERROR_SHIFT;
if (errors < ca->set->error_limit)
- pr_err("%s: IO error on %s%s\n",
- ca->cache_dev_name, m,
+ pr_err("%pg: IO error on %s%s\n",
+ ca->bdev, m,
is_read ? ", recovering." : ".");
else
bch_cache_set_error(ca->set,
- "%s: too many IO errors %s\n",
- ca->cache_dev_name, m);
+ "%pg: too many IO errors %s\n",
+ ca->bdev, m);
}
}
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 6d1de889baeb..d15aae6c51c1 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -46,7 +46,7 @@ static void bio_csum(struct bio *bio, struct bkey *k)
bio_for_each_segment(bv, bio, iter) {
void *d = kmap(bv.bv_page) + bv.bv_offset;
- csum = bch_crc64_update(csum, d, bv.bv_len);
+ csum = crc64_be(csum, d, bv.bv_len);
kunmap(bv.bv_page);
}
@@ -651,8 +651,8 @@ static void backing_request_endio(struct bio *bio)
*/
if (unlikely(s->iop.writeback &&
bio->bi_opf & REQ_PREFLUSH)) {
- pr_err("Can't flush %s: returned bi_status %i\n",
- dc->backing_dev_name, bio->bi_status);
+ pr_err("Can't flush %pg: returned bi_status %i\n",
+ dc->bdev, bio->bi_status);
} else {
/* set to orig_bio->bi_status in bio_complete() */
s->iop.status = bio->bi_status;
@@ -1163,7 +1163,7 @@ static void quit_max_writeback_rate(struct cache_set *c,
/* Cached devices - read & write stuff */
-blk_qc_t cached_dev_submit_bio(struct bio *bio)
+void cached_dev_submit_bio(struct bio *bio)
{
struct search *s;
struct block_device *orig_bdev = bio->bi_bdev;
@@ -1176,7 +1176,7 @@ blk_qc_t cached_dev_submit_bio(struct bio *bio)
dc->io_disable)) {
bio->bi_status = BLK_STS_IOERR;
bio_endio(bio);
- return BLK_QC_T_NONE;
+ return;
}
if (likely(d->c)) {
@@ -1222,8 +1222,6 @@ blk_qc_t cached_dev_submit_bio(struct bio *bio)
} else
/* I/O request sent to backing device */
detached_dev_do_request(d, bio, orig_bdev, start_time);
-
- return BLK_QC_T_NONE;
}
static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode,
@@ -1273,7 +1271,7 @@ static void flash_dev_nodata(struct closure *cl)
continue_at(cl, search_free, NULL);
}
-blk_qc_t flash_dev_submit_bio(struct bio *bio)
+void flash_dev_submit_bio(struct bio *bio)
{
struct search *s;
struct closure *cl;
@@ -1282,7 +1280,7 @@ blk_qc_t flash_dev_submit_bio(struct bio *bio)
if (unlikely(d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags))) {
bio->bi_status = BLK_STS_IOERR;
bio_endio(bio);
- return BLK_QC_T_NONE;
+ return;
}
s = search_alloc(bio, d, bio->bi_bdev, bio_start_io_acct(bio));
@@ -1298,7 +1296,7 @@ blk_qc_t flash_dev_submit_bio(struct bio *bio)
continue_at_nobarrier(&s->cl,
flash_dev_nodata,
bcache_wq);
- return BLK_QC_T_NONE;
+ return;
} else if (bio_data_dir(bio)) {
bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys,
&KEY(d->id, bio->bi_iter.bi_sector, 0),
@@ -1314,7 +1312,6 @@ blk_qc_t flash_dev_submit_bio(struct bio *bio)
}
continue_at(cl, search_free, NULL);
- return BLK_QC_T_NONE;
}
static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode,
diff --git a/drivers/md/bcache/request.h b/drivers/md/bcache/request.h
index 82b38366a95d..38ab4856eaab 100644
--- a/drivers/md/bcache/request.h
+++ b/drivers/md/bcache/request.h
@@ -37,10 +37,10 @@ unsigned int bch_get_congested(const struct cache_set *c);
void bch_data_insert(struct closure *cl);
void bch_cached_dev_request_init(struct cached_dev *dc);
-blk_qc_t cached_dev_submit_bio(struct bio *bio);
+void cached_dev_submit_bio(struct bio *bio);
void bch_flash_dev_request_init(struct bcache_device *d);
-blk_qc_t flash_dev_submit_bio(struct bio *bio);
+void flash_dev_submit_bio(struct bio *bio);
extern struct kmem_cache *bch_search_cache;
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index f2874c77ff79..4a9a65dff95e 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -1002,7 +1002,7 @@ static void calc_cached_dev_sectors(struct cache_set *c)
struct cached_dev *dc;
list_for_each_entry(dc, &c->cached_devs, list)
- sectors += bdev_sectors(dc->bdev);
+ sectors += bdev_nr_sectors(dc->bdev);
c->cached_dev_sectors = sectors;
}
@@ -1026,8 +1026,8 @@ static int cached_dev_status_update(void *arg)
dc->offline_seconds = 0;
if (dc->offline_seconds >= BACKING_DEV_OFFLINE_TIMEOUT) {
- pr_err("%s: device offline for %d seconds\n",
- dc->backing_dev_name,
+ pr_err("%pg: device offline for %d seconds\n",
+ dc->bdev,
BACKING_DEV_OFFLINE_TIMEOUT);
pr_err("%s: disable I/O request due to backing device offline\n",
dc->disk.name);
@@ -1058,15 +1058,13 @@ int bch_cached_dev_run(struct cached_dev *dc)
};
if (dc->io_disable) {
- pr_err("I/O disabled on cached dev %s\n",
- dc->backing_dev_name);
+ pr_err("I/O disabled on cached dev %pg\n", dc->bdev);
ret = -EIO;
goto out;
}
if (atomic_xchg(&dc->running, 1)) {
- pr_info("cached dev %s is running already\n",
- dc->backing_dev_name);
+ pr_info("cached dev %pg is running already\n", dc->bdev);
ret = -EBUSY;
goto out;
}
@@ -1082,7 +1080,9 @@ int bch_cached_dev_run(struct cached_dev *dc)
closure_sync(&cl);
}
- add_disk(d->disk);
+ ret = add_disk(d->disk);
+ if (ret)
+ goto out;
bd_link_disk_holder(dc->bdev, dc->disk.disk);
/*
* won't show up in the uevent file, use udevadm monitor -e instead
@@ -1154,16 +1154,16 @@ static void cached_dev_detach_finish(struct work_struct *w)
mutex_lock(&bch_register_lock);
- calc_cached_dev_sectors(dc->disk.c);
bcache_device_detach(&dc->disk);
list_move(&dc->list, &uncached_devices);
+ calc_cached_dev_sectors(dc->disk.c);
clear_bit(BCACHE_DEV_DETACHING, &dc->disk.flags);
clear_bit(BCACHE_DEV_UNLINK_DONE, &dc->disk.flags);
mutex_unlock(&bch_register_lock);
- pr_info("Caching disabled for %s\n", dc->backing_dev_name);
+ pr_info("Caching disabled for %pg\n", dc->bdev);
/* Drop ref we took in cached_dev_detach() */
closure_put(&dc->disk.cl);
@@ -1203,29 +1203,27 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
return -ENOENT;
if (dc->disk.c) {
- pr_err("Can't attach %s: already attached\n",
- dc->backing_dev_name);
+ pr_err("Can't attach %pg: already attached\n", dc->bdev);
return -EINVAL;
}
if (test_bit(CACHE_SET_STOPPING, &c->flags)) {
- pr_err("Can't attach %s: shutting down\n",
- dc->backing_dev_name);
+ pr_err("Can't attach %pg: shutting down\n", dc->bdev);
return -EINVAL;
}
if (dc->sb.block_size < c->cache->sb.block_size) {
/* Will die */
- pr_err("Couldn't attach %s: block size less than set's block size\n",
- dc->backing_dev_name);
+ pr_err("Couldn't attach %pg: block size less than set's block size\n",
+ dc->bdev);
return -EINVAL;
}
/* Check whether already attached */
list_for_each_entry_safe(exist_dc, t, &c->cached_devs, list) {
if (!memcmp(dc->sb.uuid, exist_dc->sb.uuid, 16)) {
- pr_err("Tried to attach %s but duplicate UUID already attached\n",
- dc->backing_dev_name);
+ pr_err("Tried to attach %pg but duplicate UUID already attached\n",
+ dc->bdev);
return -EINVAL;
}
@@ -1243,15 +1241,13 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
if (!u) {
if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
- pr_err("Couldn't find uuid for %s in set\n",
- dc->backing_dev_name);
+ pr_err("Couldn't find uuid for %pg in set\n", dc->bdev);
return -ENOENT;
}
u = uuid_find_empty(c);
if (!u) {
- pr_err("Not caching %s, no room for UUID\n",
- dc->backing_dev_name);
+ pr_err("Not caching %pg, no room for UUID\n", dc->bdev);
return -EINVAL;
}
}
@@ -1319,8 +1315,7 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
*/
kthread_stop(dc->writeback_thread);
cancel_writeback_rate_update_dwork(dc);
- pr_err("Couldn't run cached device %s\n",
- dc->backing_dev_name);
+ pr_err("Couldn't run cached device %pg\n", dc->bdev);
return ret;
}
@@ -1336,8 +1331,8 @@ int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
/* Allow the writeback thread to proceed */
up_write(&dc->writeback_lock);
- pr_info("Caching %s as %s on set %pU\n",
- dc->backing_dev_name,
+ pr_info("Caching %pg as %s on set %pU\n",
+ dc->bdev,
dc->disk.disk->disk_name,
dc->disk.c->set_uuid);
return 0;
@@ -1461,7 +1456,6 @@ static int register_bdev(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
struct cache_set *c;
int ret = -ENOMEM;
- bdevname(bdev, dc->backing_dev_name);
memcpy(&dc->sb, sb, sizeof(struct cache_sb));
dc->bdev = bdev;
dc->bdev->bd_holder = dc;
@@ -1476,7 +1470,7 @@ static int register_bdev(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj))
goto err;
- pr_info("registered backing device %s\n", dc->backing_dev_name);
+ pr_info("registered backing device %pg\n", dc->bdev);
list_add(&dc->list, &uncached_devices);
/* attach to a matched cache set if it exists */
@@ -1493,7 +1487,7 @@ static int register_bdev(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
return 0;
err:
- pr_notice("error %s: %s\n", dc->backing_dev_name, err);
+ pr_notice("error %pg: %s\n", dc->bdev, err);
bcache_device_stop(&dc->disk);
return ret;
}
@@ -1534,10 +1528,11 @@ static void flash_dev_flush(struct closure *cl)
static int flash_dev_run(struct cache_set *c, struct uuid_entry *u)
{
+ int err = -ENOMEM;
struct bcache_device *d = kzalloc(sizeof(struct bcache_device),
GFP_KERNEL);
if (!d)
- return -ENOMEM;
+ goto err_ret;
closure_init(&d->cl, NULL);
set_closure_fn(&d->cl, flash_dev_flush, system_wq);
@@ -1551,9 +1546,12 @@ static int flash_dev_run(struct cache_set *c, struct uuid_entry *u)
bcache_device_attach(d, c, u - c->uuids);
bch_sectors_dirty_init(d);
bch_flash_dev_request_init(d);
- add_disk(d->disk);
+ err = add_disk(d->disk);
+ if (err)
+ goto err;
- if (kobject_add(&d->kobj, &disk_to_dev(d->disk)->kobj, "bcache"))
+ err = kobject_add(&d->kobj, &disk_to_dev(d->disk)->kobj, "bcache");
+ if (err)
goto err;
bcache_device_link(d, c, "volume");
@@ -1567,7 +1565,8 @@ static int flash_dev_run(struct cache_set *c, struct uuid_entry *u)
return 0;
err:
kobject_put(&d->kobj);
- return -ENOMEM;
+err_ret:
+ return err;
}
static int flash_devs_run(struct cache_set *c)
@@ -1621,8 +1620,8 @@ bool bch_cached_dev_error(struct cached_dev *dc)
/* make others know io_disable is true earlier */
smp_mb();
- pr_err("stop %s: too many IO errors on backing device %s\n",
- dc->disk.disk->disk_name, dc->backing_dev_name);
+ pr_err("stop %s: too many IO errors on backing device %pg\n",
+ dc->disk.disk->disk_name, dc->bdev);
bcache_device_stop(&dc->disk);
return true;
@@ -2338,7 +2337,7 @@ err_btree_alloc:
err_free:
module_put(THIS_MODULE);
if (err)
- pr_notice("error %s: %s\n", ca->cache_dev_name, err);
+ pr_notice("error %pg: %s\n", ca->bdev, err);
return ret;
}
@@ -2348,7 +2347,6 @@ static int register_cache(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
const char *err = NULL; /* must be set for any error case */
int ret = 0;
- bdevname(bdev, ca->cache_dev_name);
memcpy(&ca->sb, sb, sizeof(struct cache_sb));
ca->bdev = bdev;
ca->bdev->bd_holder = ca;
@@ -2390,14 +2388,14 @@ static int register_cache(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
goto out;
}
- pr_info("registered cache device %s\n", ca->cache_dev_name);
+ pr_info("registered cache device %pg\n", ca->bdev);
out:
kobject_put(&ca->kobj);
err:
if (err)
- pr_notice("error %s: %s\n", ca->cache_dev_name, err);
+ pr_notice("error %pg: %s\n", ca->bdev, err);
return ret;
}
@@ -2617,8 +2615,11 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
if (SB_IS_BDEV(sb)) {
struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
- if (!dc)
+ if (!dc) {
+ ret = -ENOMEM;
+ err = "cannot allocate memory";
goto out_put_sb_page;
+ }
mutex_lock(&bch_register_lock);
ret = register_bdev(sb, sb_disk, bdev, dc);
@@ -2629,11 +2630,15 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
} else {
struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
- if (!ca)
+ if (!ca) {
+ ret = -ENOMEM;
+ err = "cannot allocate memory";
goto out_put_sb_page;
+ }
/* blkdev_put() will be called in bch_cache_release() */
- if (register_cache(sb, sb_disk, bdev, ca) != 0)
+ ret = register_cache(sb, sb_disk, bdev, ca);
+ if (ret)
goto out_free_sb;
}
@@ -2750,7 +2755,7 @@ static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x)
* The reason bch_register_lock is not held to call
* bch_cache_set_stop() and bcache_device_stop() is to
* avoid potential deadlock during reboot, because cache
- * set or bcache device stopping process will acqurie
+ * set or bcache device stopping process will acquire
* bch_register_lock too.
*
* We are safe here because bcache_is_reboot sets to
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index 05ac1d6fbbf3..1f0dce30fa75 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -271,7 +271,7 @@ SHOW(__bch_cached_dev)
}
if (attr == &sysfs_backing_dev_name) {
- snprintf(buf, BDEVNAME_SIZE + 1, "%s", dc->backing_dev_name);
+ snprintf(buf, BDEVNAME_SIZE + 1, "%pg", dc->bdev);
strcat(buf, "\n");
return strlen(buf);
}
diff --git a/drivers/md/bcache/sysfs.h b/drivers/md/bcache/sysfs.h
index 215df32f567b..c1752ba2e05b 100644
--- a/drivers/md/bcache/sysfs.h
+++ b/drivers/md/bcache/sysfs.h
@@ -51,13 +51,27 @@ STORE(fn) \
#define sysfs_printf(file, fmt, ...) \
do { \
if (attr == &sysfs_ ## file) \
- return snprintf(buf, PAGE_SIZE, fmt "\n", __VA_ARGS__); \
+ return sysfs_emit(buf, fmt "\n", __VA_ARGS__); \
} while (0)
#define sysfs_print(file, var) \
do { \
if (attr == &sysfs_ ## file) \
- return snprint(buf, PAGE_SIZE, var); \
+ return sysfs_emit(buf, \
+ __builtin_types_compatible_p(typeof(var), int) \
+ ? "%i\n" : \
+ __builtin_types_compatible_p(typeof(var), unsigned int) \
+ ? "%u\n" : \
+ __builtin_types_compatible_p(typeof(var), long) \
+ ? "%li\n" : \
+ __builtin_types_compatible_p(typeof(var), unsigned long)\
+ ? "%lu\n" : \
+ __builtin_types_compatible_p(typeof(var), int64_t) \
+ ? "%lli\n" : \
+ __builtin_types_compatible_p(typeof(var), uint64_t) \
+ ? "%llu\n" : \
+ __builtin_types_compatible_p(typeof(var), const char *) \
+ ? "%s\n" : "%i\n", var); \
} while (0)
#define sysfs_hprint(file, val) \
diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h
index b64460a76267..6f3cb7c92130 100644
--- a/drivers/md/bcache/util.h
+++ b/drivers/md/bcache/util.h
@@ -340,23 +340,6 @@ static inline int bch_strtoul_h(const char *cp, long *res)
_r; \
})
-#define snprint(buf, size, var) \
- snprintf(buf, size, \
- __builtin_types_compatible_p(typeof(var), int) \
- ? "%i\n" : \
- __builtin_types_compatible_p(typeof(var), unsigned int) \
- ? "%u\n" : \
- __builtin_types_compatible_p(typeof(var), long) \
- ? "%li\n" : \
- __builtin_types_compatible_p(typeof(var), unsigned long)\
- ? "%lu\n" : \
- __builtin_types_compatible_p(typeof(var), int64_t) \
- ? "%lli\n" : \
- __builtin_types_compatible_p(typeof(var), uint64_t) \
- ? "%llu\n" : \
- __builtin_types_compatible_p(typeof(var), const char *) \
- ? "%s\n" : "%i\n", var)
-
ssize_t bch_hprint(char *buf, int64_t v);
bool bch_is_zero(const char *p, size_t n);
@@ -548,14 +531,6 @@ static inline uint64_t bch_crc64(const void *p, size_t len)
return crc ^ 0xffffffffffffffffULL;
}
-static inline uint64_t bch_crc64_update(uint64_t crc,
- const void *p,
- size_t len)
-{
- crc = crc64_be(crc, p, len);
- return crc;
-}
-
/*
* A stepwise-linear pseudo-exponential. This returns 1 << (x >>
* frac_bits), with the less-significant bits filled in by linear
@@ -584,8 +559,4 @@ static inline unsigned int fract_exp_two(unsigned int x,
void bch_bio_map(struct bio *bio, void *base);
int bch_bio_alloc_pages(struct bio *bio, gfp_t gfp_mask);
-static inline sector_t bdev_sectors(struct block_device *bdev)
-{
- return bdev->bd_inode->i_size >> 9;
-}
#endif /* _BCACHE_UTIL_H */
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 8120da278161..c7560f66dca8 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -45,7 +45,7 @@ static uint64_t __calc_target_rate(struct cached_dev *dc)
* backing volume uses about 2% of the cache for dirty data.
*/
uint32_t bdev_share =
- div64_u64(bdev_sectors(dc->bdev) << WRITEBACK_SHARE_SHIFT,
+ div64_u64(bdev_nr_sectors(dc->bdev) << WRITEBACK_SHARE_SHIFT,
c->cached_dev_sectors);
uint64_t cache_dirty_target =
diff --git a/drivers/md/dm-bio-record.h b/drivers/md/dm-bio-record.h
index a3b71350eec8..745e3ab4aa0a 100644
--- a/drivers/md/dm-bio-record.h
+++ b/drivers/md/dm-bio-record.h
@@ -8,6 +8,7 @@
#define DM_BIO_RECORD_H
#include <linux/bio.h>
+#include <linux/blk-integrity.h>
/*
* There are lots of mutable fields in the bio struct that get
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 50f3e673729c..104ebc1f08dc 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -1525,7 +1525,7 @@ EXPORT_SYMBOL_GPL(dm_bufio_get_block_size);
sector_t dm_bufio_get_device_size(struct dm_bufio_client *c)
{
- sector_t s = i_size_read(c->bdev->bd_inode) >> SECTOR_SHIFT;
+ sector_t s = bdev_nr_sectors(c->bdev);
if (s >= c->start)
s -= c->start;
else
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 89a73204dbf4..2874f222c313 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -334,7 +334,7 @@ static int __write_initial_superblock(struct dm_cache_metadata *cmd)
int r;
struct dm_block *sblock;
struct cache_disk_superblock *disk_super;
- sector_t bdev_size = i_size_read(cmd->bdev->bd_inode) >> SECTOR_SHIFT;
+ sector_t bdev_size = bdev_nr_sectors(cmd->bdev);
/* FIXME: see if we can lose the max sectors limit */
if (bdev_size > DM_CACHE_METADATA_MAX_SECTORS)
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index bdd500447dea..447d030036d1 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -1940,7 +1940,7 @@ static void cache_dtr(struct dm_target *ti)
static sector_t get_dev_size(struct dm_dev *dev)
{
- return i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
+ return bdev_nr_sectors(dev->bdev);
}
/*----------------------------------------------------------------*/
diff --git a/drivers/md/dm-clone-target.c b/drivers/md/dm-clone-target.c
index edd22e4d65df..4599632d7a84 100644
--- a/drivers/md/dm-clone-target.c
+++ b/drivers/md/dm-clone-target.c
@@ -1514,7 +1514,7 @@ error:
static sector_t get_dev_size(struct dm_dev *dev)
{
- return i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
+ return bdev_nr_sectors(dev->bdev);
}
/*---------------------------------------------------------------------------*/
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index 55dccdfbcb22..b855fef4f38a 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -13,7 +13,7 @@
#include <linux/ktime.h>
#include <linux/genhd.h>
#include <linux/blk-mq.h>
-#include <linux/keyslot-manager.h>
+#include <linux/blk-crypto-profile.h>
#include <trace/events/block.h>
@@ -200,7 +200,7 @@ struct dm_table {
struct dm_md_mempools *mempools;
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
- struct blk_keyslot_manager *ksm;
+ struct blk_crypto_profile *crypto_profile;
#endif
};
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 916b7da16de2..292f7896f733 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -15,6 +15,7 @@
#include <linux/key.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
+#include <linux/blk-integrity.h>
#include <linux/mempool.h>
#include <linux/slab.h>
#include <linux/crypto.h>
diff --git a/drivers/md/dm-dust.c b/drivers/md/dm-dust.c
index 3163e2b1418e..03672204b0e3 100644
--- a/drivers/md/dm-dust.c
+++ b/drivers/md/dm-dust.c
@@ -415,7 +415,7 @@ static int dust_message(struct dm_target *ti, unsigned int argc, char **argv,
char *result, unsigned int maxlen)
{
struct dust_device *dd = ti->private;
- sector_t size = i_size_read(dd->dev->bdev->bd_inode) >> SECTOR_SHIFT;
+ sector_t size = bdev_nr_sectors(dd->dev->bdev);
bool invalid_msg = false;
int r = -EINVAL;
unsigned long long tmp, block;
@@ -544,8 +544,7 @@ static int dust_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
/*
* Only pass ioctls through if the device sizes match exactly.
*/
- if (dd->start ||
- ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
+ if (dd->start || ti->len != bdev_nr_sectors(dev->bdev))
return 1;
return 0;
diff --git a/drivers/md/dm-ebs-target.c b/drivers/md/dm-ebs-target.c
index d25989660a76..7ce5d509b940 100644
--- a/drivers/md/dm-ebs-target.c
+++ b/drivers/md/dm-ebs-target.c
@@ -416,7 +416,7 @@ static int ebs_prepare_ioctl(struct dm_target *ti, struct block_device **bdev)
* Only pass ioctls through if the device sizes match exactly.
*/
*bdev = dev->bdev;
- return !!(ec->start || ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT);
+ return !!(ec->start || ti->len != bdev_nr_sectors(dev->bdev));
}
static void ebs_io_hints(struct dm_target *ti, struct queue_limits *limits)
diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c
index 2a78f6874143..1f6bf152b3c7 100644
--- a/drivers/md/dm-era-target.c
+++ b/drivers/md/dm-era-target.c
@@ -1681,7 +1681,7 @@ static int era_message(struct dm_target *ti, unsigned argc, char **argv,
static sector_t get_dev_size(struct dm_dev *dev)
{
- return i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
+ return bdev_nr_sectors(dev->bdev);
}
static int era_iterate_devices(struct dm_target *ti,
diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h
index 3f4139ac1f60..b5f20eba3641 100644
--- a/drivers/md/dm-exception-store.h
+++ b/drivers/md/dm-exception-store.h
@@ -168,7 +168,7 @@ static inline void dm_consecutive_chunk_count_dec(struct dm_exception *e)
*/
static inline sector_t get_dev_size(struct block_device *bdev)
{
- return i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
+ return bdev_nr_sectors(bdev);
}
static inline chunk_t sector_to_chunk(struct dm_exception_store *store,
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index 4b94ffe6f2d4..345229d7e59c 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -456,8 +456,7 @@ static int flakey_prepare_ioctl(struct dm_target *ti, struct block_device **bdev
/*
* Only pass ioctls through if the device sizes match exactly.
*/
- if (fc->start ||
- ti->len != i_size_read((*bdev)->bd_inode) >> SECTOR_SHIFT)
+ if (fc->start || ti->len != bdev_nr_sectors((*bdev)))
return 1;
return 0;
}
diff --git a/drivers/md/dm-ima.c b/drivers/md/dm-ima.c
index 2c5edfbd7711..957999998d70 100644
--- a/drivers/md/dm-ima.c
+++ b/drivers/md/dm-ima.c
@@ -12,6 +12,7 @@
#include "dm-ima.h"
#include <linux/ima.h>
+#include <linux/sched/mm.h>
#include <crypto/hash.h>
#include <linux/crypto.h>
#include <crypto/hash_info.h>
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index dc03b70f6e65..d0f788e72abf 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -4113,11 +4113,11 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
}
}
- ic->data_device_sectors = i_size_read(ic->dev->bdev->bd_inode) >> SECTOR_SHIFT;
+ ic->data_device_sectors = bdev_nr_sectors(ic->dev->bdev);
if (!ic->meta_dev)
ic->meta_device_sectors = ic->data_device_sectors;
else
- ic->meta_device_sectors = i_size_read(ic->meta_dev->bdev->bd_inode) >> SECTOR_SHIFT;
+ ic->meta_device_sectors = bdev_nr_sectors(ic->meta_dev->bdev);
if (!journal_sectors) {
journal_sectors = min((sector_t)DEFAULT_MAX_JOURNAL_SECTORS,
@@ -4367,7 +4367,7 @@ try_smaller_buffer:
DEBUG_print(" journal_sections %u\n", (unsigned)le32_to_cpu(ic->sb->journal_sections));
DEBUG_print(" journal_entries %u\n", ic->journal_entries);
DEBUG_print(" log2_interleave_sectors %d\n", ic->sb->log2_interleave_sectors);
- DEBUG_print(" data_device_sectors 0x%llx\n", i_size_read(ic->dev->bdev->bd_inode) >> SECTOR_SHIFT);
+ DEBUG_print(" data_device_sectors 0x%llx\n", bdev_nr_sectors(ic->dev->bdev));
DEBUG_print(" initial_sectors 0x%x\n", ic->initial_sectors);
DEBUG_print(" metadata_run 0x%x\n", ic->metadata_run);
DEBUG_print(" log2_metadata_run %d\n", ic->log2_metadata_run);
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 679b4c0a2eea..66ba16713f69 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -135,8 +135,7 @@ static int linear_prepare_ioctl(struct dm_target *ti, struct block_device **bdev
/*
* Only pass ioctls through if the device sizes match exactly.
*/
- if (lc->start ||
- ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
+ if (lc->start || ti->len != bdev_nr_sectors(dev->bdev))
return 1;
return 0;
}
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c
index d93a4db23512..46de085a9670 100644
--- a/drivers/md/dm-log-writes.c
+++ b/drivers/md/dm-log-writes.c
@@ -446,7 +446,7 @@ static int log_super(struct log_writes_c *lc)
static inline sector_t logdev_last_sector(struct log_writes_c *lc)
{
- return i_size_read(lc->logdev->bdev->bd_inode) >> SECTOR_SHIFT;
+ return bdev_nr_sectors(lc->logdev->bdev);
}
static int log_writes_kthread(void *arg)
@@ -851,7 +851,7 @@ static int log_writes_prepare_ioctl(struct dm_target *ti,
/*
* Only pass ioctls through if the device sizes match exactly.
*/
- if (ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
+ if (ti->len != bdev_nr_sectors(dev->bdev))
return 1;
return 0;
}
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index 1ecf75ef276a..06f328928a7f 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -447,7 +447,7 @@ static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti,
bdev_logical_block_size(lc->header_location.
bdev));
- if (buf_size > i_size_read(dev->bdev->bd_inode)) {
+ if (buf_size > bdev_nr_bytes(dev->bdev)) {
DMWARN("log device %s too small: need %llu bytes",
dev->name, (unsigned long long)buf_size);
kfree(lc);
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 694aaca4eea2..90dc9cc48881 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -530,7 +530,7 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
bdev = pgpath->path.dev->bdev;
q = bdev_get_queue(bdev);
- clone = blk_get_request(q, rq->cmd_flags | REQ_NOMERGE,
+ clone = blk_mq_alloc_request(q, rq->cmd_flags | REQ_NOMERGE,
BLK_MQ_REQ_NOWAIT);
if (IS_ERR(clone)) {
/* EBUSY, ENODEV or EWOULDBLOCK: requeue */
@@ -579,7 +579,7 @@ static void multipath_release_clone(struct request *clone,
clone->io_start_time_ns);
}
- blk_put_request(clone);
+ blk_mq_free_request(clone);
}
/*
@@ -2061,7 +2061,7 @@ static int multipath_prepare_ioctl(struct dm_target *ti,
/*
* Only pass ioctls through if the device sizes match exactly.
*/
- if (!r && ti->len != i_size_read((*bdev)->bd_inode) >> SECTOR_SHIFT)
+ if (!r && ti->len != bdev_nr_sectors((*bdev)))
return 1;
return r;
}
diff --git a/drivers/md/dm-ps-historical-service-time.c b/drivers/md/dm-ps-historical-service-time.c
index 1856a1b125cc..875bca30a0dd 100644
--- a/drivers/md/dm-ps-historical-service-time.c
+++ b/drivers/md/dm-ps-historical-service-time.c
@@ -27,6 +27,7 @@
#include <linux/blkdev.h>
#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/sched/clock.h>
#define DM_MSG_PREFIX "multipath historical-service-time"
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index d9ef52159a22..2b26435a6946 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -1261,7 +1261,7 @@ static int parse_raid_params(struct raid_set *rs, struct dm_arg_set *as,
md_rdev_init(jdev);
jdev->mddev = &rs->md;
jdev->bdev = rs->journal_dev.dev->bdev;
- jdev->sectors = to_sector(i_size_read(jdev->bdev->bd_inode));
+ jdev->sectors = bdev_nr_sectors(jdev->bdev);
if (jdev->sectors < MIN_RAID456_JOURNAL_SPACE) {
rs->ti->error = "No space for raid4/5/6 journal";
return -ENOSPC;
@@ -1607,7 +1607,7 @@ static int _check_data_dev_sectors(struct raid_set *rs)
rdev_for_each(rdev, &rs->md)
if (!test_bit(Journal, &rdev->flags) && rdev->bdev) {
- ds = min(ds, to_sector(i_size_read(rdev->bdev->bd_inode)));
+ ds = min(ds, bdev_nr_sectors(rdev->bdev));
if (ds < rs->md.dev_sectors) {
rs->ti->error = "Component device(s) too small";
return -EINVAL;
@@ -2662,7 +2662,7 @@ static int rs_adjust_data_offsets(struct raid_set *rs)
* Make sure we got a minimum amount of free sectors per device
*/
if (rs->data_offset &&
- to_sector(i_size_read(rdev->bdev->bd_inode)) - rs->md.dev_sectors < MIN_FREE_RESHAPE_SPACE) {
+ bdev_nr_sectors(rdev->bdev) - rs->md.dev_sectors < MIN_FREE_RESHAPE_SPACE) {
rs->ti->error = data_offset ? "No space for forward reshape" :
"No space for backward reshape";
return -ENOSPC;
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index a896dea9750e..579ab6183d4d 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -7,7 +7,6 @@
#include "dm-core.h"
#include "dm-rq.h"
-#include <linux/elevator.h> /* for rq_end_sector() */
#include <linux/blk-mq.h>
#define DM_MSG_PREFIX "core-rq"
diff --git a/drivers/md/dm-switch.c b/drivers/md/dm-switch.c
index 028a92ff6d57..534dc2ca8bb0 100644
--- a/drivers/md/dm-switch.c
+++ b/drivers/md/dm-switch.c
@@ -529,7 +529,7 @@ static int switch_prepare_ioctl(struct dm_target *ti, struct block_device **bdev
* Only pass ioctls through if the device sizes match exactly.
*/
if (ti->len + sctx->path_list[path_nr].start !=
- i_size_read((*bdev)->bd_inode) >> SECTOR_SHIFT)
+ bdev_nr_sectors((*bdev)))
return 1;
return 0;
}
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 2111daaacaba..bcddc5effd15 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -10,6 +10,7 @@
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/blkdev.h>
+#include <linux/blk-integrity.h>
#include <linux/namei.h>
#include <linux/ctype.h>
#include <linux/string.h>
@@ -169,7 +170,7 @@ static void free_devices(struct list_head *devices, struct mapped_device *md)
}
}
-static void dm_table_destroy_keyslot_manager(struct dm_table *t);
+static void dm_table_destroy_crypto_profile(struct dm_table *t);
void dm_table_destroy(struct dm_table *t)
{
@@ -199,7 +200,7 @@ void dm_table_destroy(struct dm_table *t)
dm_free_md_mempools(t->mempools);
- dm_table_destroy_keyslot_manager(t);
+ dm_table_destroy_crypto_profile(t);
kfree(t);
}
@@ -226,8 +227,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
{
struct queue_limits *limits = data;
struct block_device *bdev = dev->bdev;
- sector_t dev_size =
- i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
+ sector_t dev_size = bdev_nr_sectors(bdev);
unsigned short logical_block_size_sectors =
limits->logical_block_size >> SECTOR_SHIFT;
char b[BDEVNAME_SIZE];
@@ -1186,8 +1186,8 @@ static int dm_table_register_integrity(struct dm_table *t)
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
-struct dm_keyslot_manager {
- struct blk_keyslot_manager ksm;
+struct dm_crypto_profile {
+ struct blk_crypto_profile profile;
struct mapped_device *md;
};
@@ -1213,13 +1213,11 @@ static int dm_keyslot_evict_callback(struct dm_target *ti, struct dm_dev *dev,
* When an inline encryption key is evicted from a device-mapper device, evict
* it from all the underlying devices.
*/
-static int dm_keyslot_evict(struct blk_keyslot_manager *ksm,
+static int dm_keyslot_evict(struct blk_crypto_profile *profile,
const struct blk_crypto_key *key, unsigned int slot)
{
- struct dm_keyslot_manager *dksm = container_of(ksm,
- struct dm_keyslot_manager,
- ksm);
- struct mapped_device *md = dksm->md;
+ struct mapped_device *md =
+ container_of(profile, struct dm_crypto_profile, profile)->md;
struct dm_keyslot_evict_args args = { key };
struct dm_table *t;
int srcu_idx;
@@ -1239,150 +1237,148 @@ static int dm_keyslot_evict(struct blk_keyslot_manager *ksm,
return args.err;
}
-static const struct blk_ksm_ll_ops dm_ksm_ll_ops = {
- .keyslot_evict = dm_keyslot_evict,
-};
-
-static int device_intersect_crypto_modes(struct dm_target *ti,
- struct dm_dev *dev, sector_t start,
- sector_t len, void *data)
+static int
+device_intersect_crypto_capabilities(struct dm_target *ti, struct dm_dev *dev,
+ sector_t start, sector_t len, void *data)
{
- struct blk_keyslot_manager *parent = data;
- struct blk_keyslot_manager *child = bdev_get_queue(dev->bdev)->ksm;
+ struct blk_crypto_profile *parent = data;
+ struct blk_crypto_profile *child =
+ bdev_get_queue(dev->bdev)->crypto_profile;
- blk_ksm_intersect_modes(parent, child);
+ blk_crypto_intersect_capabilities(parent, child);
return 0;
}
-void dm_destroy_keyslot_manager(struct blk_keyslot_manager *ksm)
+void dm_destroy_crypto_profile(struct blk_crypto_profile *profile)
{
- struct dm_keyslot_manager *dksm = container_of(ksm,
- struct dm_keyslot_manager,
- ksm);
+ struct dm_crypto_profile *dmcp = container_of(profile,
+ struct dm_crypto_profile,
+ profile);
- if (!ksm)
+ if (!profile)
return;
- blk_ksm_destroy(ksm);
- kfree(dksm);
+ blk_crypto_profile_destroy(profile);
+ kfree(dmcp);
}
-static void dm_table_destroy_keyslot_manager(struct dm_table *t)
+static void dm_table_destroy_crypto_profile(struct dm_table *t)
{
- dm_destroy_keyslot_manager(t->ksm);
- t->ksm = NULL;
+ dm_destroy_crypto_profile(t->crypto_profile);
+ t->crypto_profile = NULL;
}
/*
- * Constructs and initializes t->ksm with a keyslot manager that
- * represents the common set of crypto capabilities of the devices
- * described by the dm_table. However, if the constructed keyslot
- * manager does not support a superset of the crypto capabilities
- * supported by the current keyslot manager of the mapped_device,
- * it returns an error instead, since we don't support restricting
- * crypto capabilities on table changes. Finally, if the constructed
- * keyslot manager doesn't actually support any crypto modes at all,
- * it just returns NULL.
+ * Constructs and initializes t->crypto_profile with a crypto profile that
+ * represents the common set of crypto capabilities of the devices described by
+ * the dm_table. However, if the constructed crypto profile doesn't support all
+ * crypto capabilities that are supported by the current mapped_device, it
+ * returns an error instead, since we don't support removing crypto capabilities
+ * on table changes. Finally, if the constructed crypto profile is "empty" (has
+ * no crypto capabilities at all), it just sets t->crypto_profile to NULL.
*/
-static int dm_table_construct_keyslot_manager(struct dm_table *t)
+static int dm_table_construct_crypto_profile(struct dm_table *t)
{
- struct dm_keyslot_manager *dksm;
- struct blk_keyslot_manager *ksm;
+ struct dm_crypto_profile *dmcp;
+ struct blk_crypto_profile *profile;
struct dm_target *ti;
unsigned int i;
- bool ksm_is_empty = true;
+ bool empty_profile = true;
- dksm = kmalloc(sizeof(*dksm), GFP_KERNEL);
- if (!dksm)
+ dmcp = kmalloc(sizeof(*dmcp), GFP_KERNEL);
+ if (!dmcp)
return -ENOMEM;
- dksm->md = t->md;
+ dmcp->md = t->md;
- ksm = &dksm->ksm;
- blk_ksm_init_passthrough(ksm);
- ksm->ksm_ll_ops = dm_ksm_ll_ops;
- ksm->max_dun_bytes_supported = UINT_MAX;
- memset(ksm->crypto_modes_supported, 0xFF,
- sizeof(ksm->crypto_modes_supported));
+ profile = &dmcp->profile;
+ blk_crypto_profile_init(profile, 0);
+ profile->ll_ops.keyslot_evict = dm_keyslot_evict;
+ profile->max_dun_bytes_supported = UINT_MAX;
+ memset(profile->modes_supported, 0xFF,
+ sizeof(profile->modes_supported));
for (i = 0; i < dm_table_get_num_targets(t); i++) {
ti = dm_table_get_target(t, i);
if (!dm_target_passes_crypto(ti->type)) {
- blk_ksm_intersect_modes(ksm, NULL);
+ blk_crypto_intersect_capabilities(profile, NULL);
break;
}
if (!ti->type->iterate_devices)
continue;
- ti->type->iterate_devices(ti, device_intersect_crypto_modes,
- ksm);
+ ti->type->iterate_devices(ti,
+ device_intersect_crypto_capabilities,
+ profile);
}
- if (t->md->queue && !blk_ksm_is_superset(ksm, t->md->queue->ksm)) {
+ if (t->md->queue &&
+ !blk_crypto_has_capabilities(profile,
+ t->md->queue->crypto_profile)) {
DMWARN("Inline encryption capabilities of new DM table were more restrictive than the old table's. This is not supported!");
- dm_destroy_keyslot_manager(ksm);
+ dm_destroy_crypto_profile(profile);
return -EINVAL;
}
/*
- * If the new KSM doesn't actually support any crypto modes, we may as
- * well represent it with a NULL ksm.
+ * If the new profile doesn't actually support any crypto capabilities,
+ * we may as well represent it with a NULL profile.
*/
- ksm_is_empty = true;
- for (i = 0; i < ARRAY_SIZE(ksm->crypto_modes_supported); i++) {
- if (ksm->crypto_modes_supported[i]) {
- ksm_is_empty = false;
+ for (i = 0; i < ARRAY_SIZE(profile->modes_supported); i++) {
+ if (profile->modes_supported[i]) {
+ empty_profile = false;
break;
}
}
- if (ksm_is_empty) {
- dm_destroy_keyslot_manager(ksm);
- ksm = NULL;
+ if (empty_profile) {
+ dm_destroy_crypto_profile(profile);
+ profile = NULL;
}
/*
- * t->ksm is only set temporarily while the table is being set
- * up, and it gets set to NULL after the capabilities have
- * been transferred to the request_queue.
+ * t->crypto_profile is only set temporarily while the table is being
+ * set up, and it gets set to NULL after the profile has been
+ * transferred to the request_queue.
*/
- t->ksm = ksm;
+ t->crypto_profile = profile;
return 0;
}
-static void dm_update_keyslot_manager(struct request_queue *q,
- struct dm_table *t)
+static void dm_update_crypto_profile(struct request_queue *q,
+ struct dm_table *t)
{
- if (!t->ksm)
+ if (!t->crypto_profile)
return;
- /* Make the ksm less restrictive */
- if (!q->ksm) {
- blk_ksm_register(t->ksm, q);
+ /* Make the crypto profile less restrictive. */
+ if (!q->crypto_profile) {
+ blk_crypto_register(t->crypto_profile, q);
} else {
- blk_ksm_update_capabilities(q->ksm, t->ksm);
- dm_destroy_keyslot_manager(t->ksm);
+ blk_crypto_update_capabilities(q->crypto_profile,
+ t->crypto_profile);
+ dm_destroy_crypto_profile(t->crypto_profile);
}
- t->ksm = NULL;
+ t->crypto_profile = NULL;
}
#else /* CONFIG_BLK_INLINE_ENCRYPTION */
-static int dm_table_construct_keyslot_manager(struct dm_table *t)
+static int dm_table_construct_crypto_profile(struct dm_table *t)
{
return 0;
}
-void dm_destroy_keyslot_manager(struct blk_keyslot_manager *ksm)
+void dm_destroy_crypto_profile(struct blk_crypto_profile *profile)
{
}
-static void dm_table_destroy_keyslot_manager(struct dm_table *t)
+static void dm_table_destroy_crypto_profile(struct dm_table *t)
{
}
-static void dm_update_keyslot_manager(struct request_queue *q,
- struct dm_table *t)
+static void dm_update_crypto_profile(struct request_queue *q,
+ struct dm_table *t)
{
}
@@ -1414,9 +1410,9 @@ int dm_table_complete(struct dm_table *t)
return r;
}
- r = dm_table_construct_keyslot_manager(t);
+ r = dm_table_construct_crypto_profile(t);
if (r) {
- DMERR("could not construct keyslot manager.");
+ DMERR("could not construct crypto profile.");
return r;
}
@@ -2070,7 +2066,7 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
return r;
}
- dm_update_keyslot_manager(q, t);
+ dm_update_crypto_profile(q, t);
disk_update_readahead(t->md->disk);
return 0;
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index c88ed14d49e6..1a96a07cbf44 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -549,7 +549,7 @@ static int __write_initial_superblock(struct dm_pool_metadata *pmd)
int r;
struct dm_block *sblock;
struct thin_disk_superblock *disk_super;
- sector_t bdev_size = i_size_read(pmd->bdev->bd_inode) >> SECTOR_SHIFT;
+ sector_t bdev_size = bdev_nr_sectors(pmd->bdev);
if (bdev_size > THIN_METADATA_MAX_SECTORS)
bdev_size = THIN_METADATA_MAX_SECTORS;
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 4c67b77c23c1..ec119d2422d5 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -3212,7 +3212,7 @@ static int metadata_pre_commit_callback(void *context)
static sector_t get_dev_size(struct block_device *bdev)
{
- return i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
+ return bdev_nr_sectors(bdev);
}
static void warn_if_metadata_device_too_big(struct block_device *bdev)
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index 88288c8d6bc8..a7efe83aad29 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -18,6 +18,7 @@
#include "dm-verity-verify-sig.h"
#include <linux/module.h>
#include <linux/reboot.h>
+#include <linux/scatterlist.h>
#define DM_MSG_PREFIX "verity"
@@ -833,8 +834,7 @@ static int verity_prepare_ioctl(struct dm_target *ti, struct block_device **bdev
*bdev = v->data_dev->bdev;
- if (v->data_start ||
- ti->len != i_size_read(v->data_dev->bdev->bd_inode) >> SECTOR_SHIFT)
+ if (v->data_start || ti->len != bdev_nr_sectors(v->data_dev->bdev))
return 1;
return 0;
}
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
index 18320444fb0a..017806096b91 100644
--- a/drivers/md/dm-writecache.c
+++ b/drivers/md/dm-writecache.c
@@ -2341,7 +2341,7 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
ti->error = "Cache data device lookup failed";
goto bad;
}
- wc->memory_map_size = i_size_read(wc->ssd_dev->bdev->bd_inode);
+ wc->memory_map_size = bdev_nr_bytes(wc->ssd_dev->bdev);
/*
* Parse the cache block size
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index ae1bc48c0043..8dc21c09329f 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -733,7 +733,7 @@ static int dmz_get_zoned_device(struct dm_target *ti, char *path,
dev->dev_idx = idx;
(void)bdevname(dev->bdev, dev->name);
- dev->capacity = i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
+ dev->capacity = bdev_nr_sectors(bdev);
if (ti->begin) {
ti->error = "Partial mapping is not supported";
goto err;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 76d9da49fda7..63aa52263658 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -29,7 +29,7 @@
#include <linux/refcount.h>
#include <linux/part_stat.h>
#include <linux/blk-crypto.h>
-#include <linux/keyslot-manager.h>
+#include <linux/blk-crypto-profile.h>
#define DM_MSG_PREFIX "core"
@@ -1183,14 +1183,13 @@ static noinline void __set_swap_bios_limit(struct mapped_device *md, int latch)
mutex_unlock(&md->swap_bios_lock);
}
-static blk_qc_t __map_bio(struct dm_target_io *tio)
+static void __map_bio(struct dm_target_io *tio)
{
int r;
sector_t sector;
struct bio *clone = &tio->clone;
struct dm_io *io = tio->io;
struct dm_target *ti = tio->ti;
- blk_qc_t ret = BLK_QC_T_NONE;
clone->bi_end_io = clone_endio;
@@ -1226,7 +1225,7 @@ static blk_qc_t __map_bio(struct dm_target_io *tio)
case DM_MAPIO_REMAPPED:
/* the bio has been remapped so dispatch it */
trace_block_bio_remap(clone, bio_dev(io->orig_bio), sector);
- ret = submit_bio_noacct(clone);
+ submit_bio_noacct(clone);
break;
case DM_MAPIO_KILL:
if (unlikely(swap_bios_limit(ti, clone))) {
@@ -1248,8 +1247,6 @@ static blk_qc_t __map_bio(struct dm_target_io *tio)
DMWARN("unimplemented target map return value: %d", r);
BUG();
}
-
- return ret;
}
static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len)
@@ -1336,7 +1333,7 @@ static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
}
}
-static blk_qc_t __clone_and_map_simple_bio(struct clone_info *ci,
+static void __clone_and_map_simple_bio(struct clone_info *ci,
struct dm_target_io *tio, unsigned *len)
{
struct bio *clone = &tio->clone;
@@ -1346,8 +1343,7 @@ static blk_qc_t __clone_and_map_simple_bio(struct clone_info *ci,
__bio_clone_fast(clone, ci->bio);
if (len)
bio_setup_sector(clone, ci->sector, *len);
-
- return __map_bio(tio);
+ __map_bio(tio);
}
static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
@@ -1361,7 +1357,7 @@ static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
while ((bio = bio_list_pop(&blist))) {
tio = container_of(bio, struct dm_target_io, clone);
- (void) __clone_and_map_simple_bio(ci, tio, len);
+ __clone_and_map_simple_bio(ci, tio, len);
}
}
@@ -1405,7 +1401,7 @@ static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
free_tio(tio);
return r;
}
- (void) __map_bio(tio);
+ __map_bio(tio);
return 0;
}
@@ -1520,11 +1516,10 @@ static void init_clone_info(struct clone_info *ci, struct mapped_device *md,
/*
* Entry point to split a bio into clones and submit them to the targets.
*/
-static blk_qc_t __split_and_process_bio(struct mapped_device *md,
+static void __split_and_process_bio(struct mapped_device *md,
struct dm_table *map, struct bio *bio)
{
struct clone_info ci;
- blk_qc_t ret = BLK_QC_T_NONE;
int error = 0;
init_clone_info(&ci, md, map, bio);
@@ -1567,19 +1562,17 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
bio_chain(b, bio);
trace_block_split(b, bio->bi_iter.bi_sector);
- ret = submit_bio_noacct(bio);
+ submit_bio_noacct(bio);
}
}
/* drop the extra reference count */
dm_io_dec_pending(ci.io, errno_to_blk_status(error));
- return ret;
}
-static blk_qc_t dm_submit_bio(struct bio *bio)
+static void dm_submit_bio(struct bio *bio)
{
struct mapped_device *md = bio->bi_bdev->bd_disk->private_data;
- blk_qc_t ret = BLK_QC_T_NONE;
int srcu_idx;
struct dm_table *map;
@@ -1609,10 +1602,9 @@ static blk_qc_t dm_submit_bio(struct bio *bio)
if (is_abnormal_io(bio))
blk_queue_split(&bio);
- ret = __split_and_process_bio(md, map, bio);
+ __split_and_process_bio(md, map, bio);
out:
dm_put_live_table(md, srcu_idx);
- return ret;
}
/*-----------------------------------------------------------------
@@ -1671,14 +1663,14 @@ static const struct dax_operations dm_dax_ops;
static void dm_wq_work(struct work_struct *work);
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
-static void dm_queue_destroy_keyslot_manager(struct request_queue *q)
+static void dm_queue_destroy_crypto_profile(struct request_queue *q)
{
- dm_destroy_keyslot_manager(q->ksm);
+ dm_destroy_crypto_profile(q->crypto_profile);
}
#else /* CONFIG_BLK_INLINE_ENCRYPTION */
-static inline void dm_queue_destroy_keyslot_manager(struct request_queue *q)
+static inline void dm_queue_destroy_crypto_profile(struct request_queue *q)
{
}
#endif /* !CONFIG_BLK_INLINE_ENCRYPTION */
@@ -1704,7 +1696,7 @@ static void cleanup_mapped_device(struct mapped_device *md)
dm_sysfs_exit(md);
del_gendisk(md->disk);
}
- dm_queue_destroy_keyslot_manager(md->queue);
+ dm_queue_destroy_crypto_profile(md->queue);
blk_cleanup_disk(md->disk);
}
@@ -2086,7 +2078,9 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
if (r)
return r;
- add_disk(md->disk);
+ r = add_disk(md->disk);
+ if (r)
+ return r;
r = dm_sysfs_init(md);
if (r) {
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 6c0c3d0d905a..5111ed966947 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -41,6 +41,7 @@
#include <linux/sched/signal.h>
#include <linux/kthread.h>
#include <linux/blkdev.h>
+#include <linux/blk-integrity.h>
#include <linux/badblocks.h>
#include <linux/sysctl.h>
#include <linux/seq_file.h>
@@ -51,6 +52,7 @@
#include <linux/hdreg.h>
#include <linux/proc_fs.h>
#include <linux/random.h>
+#include <linux/major.h>
#include <linux/module.h>
#include <linux/reboot.h>
#include <linux/file.h>
@@ -352,7 +354,7 @@ static bool create_on_open = true;
*/
static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
static atomic_t md_event_count;
-void md_new_event(struct mddev *mddev)
+void md_new_event(void)
{
atomic_inc(&md_event_count);
wake_up(&md_event_waiters);
@@ -441,19 +443,19 @@ check_suspended:
}
EXPORT_SYMBOL(md_handle_request);
-static blk_qc_t md_submit_bio(struct bio *bio)
+static void md_submit_bio(struct bio *bio)
{
const int rw = bio_data_dir(bio);
struct mddev *mddev = bio->bi_bdev->bd_disk->private_data;
if (mddev == NULL || mddev->pers == NULL) {
bio_io_error(bio);
- return BLK_QC_T_NONE;
+ return;
}
if (unlikely(test_bit(MD_BROKEN, &mddev->flags)) && (rw == WRITE)) {
bio_io_error(bio);
- return BLK_QC_T_NONE;
+ return;
}
blk_queue_split(&bio);
@@ -462,15 +464,13 @@ static blk_qc_t md_submit_bio(struct bio *bio)
if (bio_sectors(bio) != 0)
bio->bi_status = BLK_STS_IOERR;
bio_endio(bio);
- return BLK_QC_T_NONE;
+ return;
}
/* bio could be mergeable after passing to underlayer */
bio->bi_opf &= ~REQ_NOMERGE;
md_handle_request(mddev, bio);
-
- return BLK_QC_T_NONE;
}
/* mddev_suspend makes sure no new requests are submitted
@@ -888,8 +888,7 @@ static struct md_personality *find_pers(int level, char *clevel)
/* return the offset of the super block in 512byte sectors */
static inline sector_t calc_dev_sboffset(struct md_rdev *rdev)
{
- sector_t num_sectors = i_size_read(rdev->bdev->bd_inode) / 512;
- return MD_NEW_SIZE_SECTORS(num_sectors);
+ return MD_NEW_SIZE_SECTORS(bdev_nr_sectors(rdev->bdev));
}
static int alloc_disk_sb(struct md_rdev *rdev)
@@ -1631,8 +1630,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
*/
switch(minor_version) {
case 0:
- sb_start = i_size_read(rdev->bdev->bd_inode) >> 9;
- sb_start -= 8*2;
+ sb_start = bdev_nr_sectors(rdev->bdev) - 8 * 2;
sb_start &= ~(sector_t)(4*2-1);
break;
case 1:
@@ -1787,10 +1785,9 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
else
ret = 0;
}
- if (minor_version) {
- sectors = (i_size_read(rdev->bdev->bd_inode) >> 9);
- sectors -= rdev->data_offset;
- } else
+ if (minor_version)
+ sectors = bdev_nr_sectors(rdev->bdev) - rdev->data_offset;
+ else
sectors = rdev->sb_start;
if (sectors < le64_to_cpu(sb->data_size))
return -EINVAL;
@@ -2168,8 +2165,7 @@ super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
return 0; /* too confusing */
if (rdev->sb_start < rdev->data_offset) {
/* minor versions 1 and 2; superblock before data */
- max_sectors = i_size_read(rdev->bdev->bd_inode) >> 9;
- max_sectors -= rdev->data_offset;
+ max_sectors = bdev_nr_sectors(rdev->bdev) - rdev->data_offset;
if (!num_sectors || num_sectors > max_sectors)
num_sectors = max_sectors;
} else if (rdev->mddev->bitmap_info.offset) {
@@ -2178,7 +2174,7 @@ super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
} else {
/* minor version 0; superblock after data */
sector_t sb_start, bm_space;
- sector_t dev_size = i_size_read(rdev->bdev->bd_inode) >> 9;
+ sector_t dev_size = bdev_nr_sectors(rdev->bdev);
/* 8K is for superblock */
sb_start = dev_size - 8*2;
@@ -2886,7 +2882,7 @@ static int add_bound_rdev(struct md_rdev *rdev)
if (mddev->degraded)
set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
- md_new_event(mddev);
+ md_new_event();
md_wakeup_thread(mddev->thread);
return 0;
}
@@ -2976,7 +2972,11 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
* -write_error - clears WriteErrorSeen
* {,-}failfast - set/clear FailFast
*/
+
+ struct mddev *mddev = rdev->mddev;
int err = -EINVAL;
+ bool need_update_sb = false;
+
if (cmd_match(buf, "faulty") && rdev->mddev->pers) {
md_error(rdev->mddev, rdev);
if (test_bit(Faulty, &rdev->flags))
@@ -2991,7 +2991,6 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
if (rdev->raid_disk >= 0)
err = -EBUSY;
else {
- struct mddev *mddev = rdev->mddev;
err = 0;
if (mddev_is_clustered(mddev))
err = md_cluster_ops->remove_disk(mddev, rdev);
@@ -3002,16 +3001,18 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
md_wakeup_thread(mddev->thread);
}
- md_new_event(mddev);
+ md_new_event();
}
}
} else if (cmd_match(buf, "writemostly")) {
set_bit(WriteMostly, &rdev->flags);
mddev_create_serial_pool(rdev->mddev, rdev, false);
+ need_update_sb = true;
err = 0;
} else if (cmd_match(buf, "-writemostly")) {
mddev_destroy_serial_pool(rdev->mddev, rdev, false);
clear_bit(WriteMostly, &rdev->flags);
+ need_update_sb = true;
err = 0;
} else if (cmd_match(buf, "blocked")) {
set_bit(Blocked, &rdev->flags);
@@ -3037,9 +3038,11 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
err = 0;
} else if (cmd_match(buf, "failfast")) {
set_bit(FailFast, &rdev->flags);
+ need_update_sb = true;
err = 0;
} else if (cmd_match(buf, "-failfast")) {
clear_bit(FailFast, &rdev->flags);
+ need_update_sb = true;
err = 0;
} else if (cmd_match(buf, "-insync") && rdev->raid_disk >= 0 &&
!test_bit(Journal, &rdev->flags)) {
@@ -3118,6 +3121,8 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
clear_bit(ExternalBbl, &rdev->flags);
err = 0;
}
+ if (need_update_sb)
+ md_update_sb(mddev, 1);
if (!err)
sysfs_notify_dirent_safe(rdev->sysfs_state);
return err ? err : len;
@@ -3382,7 +3387,7 @@ rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len)
if (!sectors)
return -EBUSY;
} else if (!sectors)
- sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) -
+ sectors = bdev_nr_sectors(rdev->bdev) -
rdev->data_offset;
if (!my_mddev->pers->resize)
/* Cannot change size for RAID0 or Linear etc */
@@ -3709,7 +3714,7 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe
kobject_init(&rdev->kobj, &rdev_ktype);
- size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS;
+ size = bdev_nr_bytes(rdev->bdev) >> BLOCK_SIZE_BITS;
if (!size) {
pr_warn("md: %s has zero or unknown size, marking faulty!\n",
bdevname(rdev->bdev,b));
@@ -4099,7 +4104,7 @@ level_store(struct mddev *mddev, const char *buf, size_t len)
if (!mddev->thread)
md_update_sb(mddev, 1);
sysfs_notify_dirent_safe(mddev->sysfs_level);
- md_new_event(mddev);
+ md_new_event();
rv = len;
out_unlock:
mddev_unlock(mddev);
@@ -4620,7 +4625,7 @@ new_dev_store(struct mddev *mddev, const char *buf, size_t len)
export_rdev(rdev);
mddev_unlock(mddev);
if (!err)
- md_new_event(mddev);
+ md_new_event();
return err ? err : len;
}
@@ -5490,6 +5495,10 @@ static struct attribute *md_default_attrs[] = {
NULL,
};
+static const struct attribute_group md_default_group = {
+ .attrs = md_default_attrs,
+};
+
static struct attribute *md_redundancy_attrs[] = {
&md_scan_mode.attr,
&md_last_scan_mode.attr,
@@ -5512,6 +5521,12 @@ static const struct attribute_group md_redundancy_group = {
.attrs = md_redundancy_attrs,
};
+static const struct attribute_group *md_attr_groups[] = {
+ &md_default_group,
+ &md_bitmap_group,
+ NULL,
+};
+
static ssize_t
md_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
{
@@ -5587,7 +5602,7 @@ static const struct sysfs_ops md_sysfs_ops = {
static struct kobj_type md_ktype = {
.release = md_free,
.sysfs_ops = &md_sysfs_ops,
- .default_attrs = md_default_attrs,
+ .default_groups = md_attr_groups,
};
int mdp_major = 0;
@@ -5596,7 +5611,6 @@ static void mddev_delayed_delete(struct work_struct *ws)
{
struct mddev *mddev = container_of(ws, struct mddev, del_work);
- sysfs_remove_group(&mddev->kobj, &md_bitmap_group);
kobject_del(&mddev->kobj);
kobject_put(&mddev->kobj);
}
@@ -5663,7 +5677,7 @@ static int md_alloc(dev_t dev, char *name)
strcmp(mddev2->gendisk->disk_name, name) == 0) {
spin_unlock(&all_mddevs_lock);
error = -EEXIST;
- goto abort;
+ goto out_unlock_disks_mutex;
}
spin_unlock(&all_mddevs_lock);
}
@@ -5676,7 +5690,7 @@ static int md_alloc(dev_t dev, char *name)
error = -ENOMEM;
disk = blk_alloc_disk(NUMA_NO_NODE);
if (!disk)
- goto abort;
+ goto out_unlock_disks_mutex;
disk->major = MAJOR(mddev->unit);
disk->first_minor = unit << shift;
@@ -5700,27 +5714,25 @@ static int md_alloc(dev_t dev, char *name)
disk->flags |= GENHD_FL_EXT_DEVT;
disk->events |= DISK_EVENT_MEDIA_CHANGE;
mddev->gendisk = disk;
- add_disk(disk);
+ error = add_disk(disk);
+ if (error)
+ goto out_cleanup_disk;
error = kobject_add(&mddev->kobj, &disk_to_dev(disk)->kobj, "%s", "md");
- if (error) {
- /* This isn't possible, but as kobject_init_and_add is marked
- * __must_check, we must do something with the result
- */
- pr_debug("md: cannot register %s/md - name in use\n",
- disk->disk_name);
- error = 0;
- }
- if (mddev->kobj.sd &&
- sysfs_create_group(&mddev->kobj, &md_bitmap_group))
- pr_debug("pointless warning\n");
- abort:
+ if (error)
+ goto out_del_gendisk;
+
+ kobject_uevent(&mddev->kobj, KOBJ_ADD);
+ mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state");
+ mddev->sysfs_level = sysfs_get_dirent_safe(mddev->kobj.sd, "level");
+ goto out_unlock_disks_mutex;
+
+out_del_gendisk:
+ del_gendisk(disk);
+out_cleanup_disk:
+ blk_cleanup_disk(disk);
+out_unlock_disks_mutex:
mutex_unlock(&disks_mutex);
- if (!error && mddev->kobj.sd) {
- kobject_uevent(&mddev->kobj, KOBJ_ADD);
- mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state");
- mddev->sysfs_level = sysfs_get_dirent_safe(mddev->kobj.sd, "level");
- }
mddev_put(mddev);
return error;
}
@@ -6034,7 +6046,7 @@ int md_run(struct mddev *mddev)
if (mddev->sb_flags)
md_update_sb(mddev, 0);
- md_new_event(mddev);
+ md_new_event();
return 0;
bitmap_abort:
@@ -6424,7 +6436,7 @@ static int do_md_stop(struct mddev *mddev, int mode,
if (mddev->hold_active == UNTIL_STOP)
mddev->hold_active = 0;
}
- md_new_event(mddev);
+ md_new_event();
sysfs_notify_dirent_safe(mddev->sysfs_state);
return 0;
}
@@ -6880,7 +6892,7 @@ int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info)
if (!mddev->persistent) {
pr_debug("md: nonpersistent superblock ...\n");
- rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
+ rdev->sb_start = bdev_nr_sectors(rdev->bdev);
} else
rdev->sb_start = calc_dev_sboffset(rdev);
rdev->sectors = rdev->sb_start;
@@ -6928,7 +6940,7 @@ kick_rdev:
md_wakeup_thread(mddev->thread);
else
md_update_sb(mddev, 1);
- md_new_event(mddev);
+ md_new_event();
return 0;
busy:
@@ -6967,7 +6979,7 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev)
if (mddev->persistent)
rdev->sb_start = calc_dev_sboffset(rdev);
else
- rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
+ rdev->sb_start = bdev_nr_sectors(rdev->bdev);
rdev->sectors = rdev->sb_start;
@@ -7001,7 +7013,7 @@ static int hot_add_disk(struct mddev *mddev, dev_t dev)
*/
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
md_wakeup_thread(mddev->thread);
- md_new_event(mddev);
+ md_new_event();
return 0;
abort_export:
@@ -7975,7 +7987,7 @@ void md_error(struct mddev *mddev, struct md_rdev *rdev)
md_wakeup_thread(mddev->thread);
if (mddev->event_work.func)
queue_work(md_misc_wq, &mddev->event_work);
- md_new_event(mddev);
+ md_new_event();
}
EXPORT_SYMBOL(md_error);
@@ -8859,7 +8871,7 @@ void md_do_sync(struct md_thread *thread)
mddev->curr_resync = 3; /* no longer delayed */
mddev->curr_resync_completed = j;
sysfs_notify_dirent_safe(mddev->sysfs_completed);
- md_new_event(mddev);
+ md_new_event();
update_time = jiffies;
blk_start_plug(&plug);
@@ -8930,7 +8942,7 @@ void md_do_sync(struct md_thread *thread)
/* this is the earliest that rebuild will be
* visible in /proc/mdstat
*/
- md_new_event(mddev);
+ md_new_event();
if (last_check + window > io_sectors || j == max_sectors)
continue;
@@ -9154,7 +9166,7 @@ static int remove_and_add_spares(struct mddev *mddev,
sysfs_link_rdev(mddev, rdev);
if (!test_bit(Journal, &rdev->flags))
spares++;
- md_new_event(mddev);
+ md_new_event();
set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
}
}
@@ -9188,7 +9200,7 @@ static void md_start_sync(struct work_struct *ws)
} else
md_wakeup_thread(mddev->sync_thread);
sysfs_notify_dirent_safe(mddev->sysfs_action);
- md_new_event(mddev);
+ md_new_event();
}
/*
@@ -9447,7 +9459,7 @@ void md_reap_sync_thread(struct mddev *mddev)
/* flag recovery needed just to double check */
set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
sysfs_notify_dirent_safe(mddev->sysfs_action);
- md_new_event(mddev);
+ md_new_event();
if (mddev->event_work.func)
queue_work(md_misc_wq, &mddev->event_work);
}
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 4c96c36bd01a..53ea7a6961de 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -731,7 +731,7 @@ extern int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
struct page *page, int op, int op_flags,
bool metadata_op);
extern void md_do_sync(struct md_thread *thread);
-extern void md_new_event(struct mddev *mddev);
+extern void md_new_event(void);
extern void md_allow_write(struct mddev *mddev);
extern void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev);
extern void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors);
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 19598bd38939..7dc8026cf6ee 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1496,7 +1496,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
if (!r1_bio->bios[i])
continue;
- if (first_clone) {
+ if (first_clone && test_bit(WriteMostly, &rdev->flags)) {
/* do behind I/O ?
* Not if there are too many, or cannot
* allocate memory, or a reader on WriteMostly
@@ -1529,13 +1529,12 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
r1_bio->bios[i] = mbio;
- mbio->bi_iter.bi_sector = (r1_bio->sector +
- conf->mirrors[i].rdev->data_offset);
- bio_set_dev(mbio, conf->mirrors[i].rdev->bdev);
+ mbio->bi_iter.bi_sector = (r1_bio->sector + rdev->data_offset);
+ bio_set_dev(mbio, rdev->bdev);
mbio->bi_end_io = raid1_end_write_request;
mbio->bi_opf = bio_op(bio) | (bio->bi_opf & (REQ_SYNC | REQ_FUA));
- if (test_bit(FailFast, &conf->mirrors[i].rdev->flags) &&
- !test_bit(WriteMostly, &conf->mirrors[i].rdev->flags) &&
+ if (test_bit(FailFast, &rdev->flags) &&
+ !test_bit(WriteMostly, &rdev->flags) &&
conf->raid_disks - mddev->degraded > 1)
mbio->bi_opf |= MD_FAILFAST;
mbio->bi_private = r1_bio;
@@ -1546,7 +1545,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
trace_block_bio_remap(mbio, disk_devt(mddev->gendisk),
r1_bio->sector);
/* flush_pending_writes() needs access to the rdev so...*/
- mbio->bi_bdev = (void *)conf->mirrors[i].rdev;
+ mbio->bi_bdev = (void *)rdev;
cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug));
if (cb)
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index aa2636582841..dde98f65bd04 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -4647,7 +4647,7 @@ out:
}
conf->reshape_checkpoint = jiffies;
md_wakeup_thread(mddev->sync_thread);
- md_new_event(mddev);
+ md_new_event();
return 0;
abort:
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 02ed53b20654..9c1a5877cf9f 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -7732,10 +7732,7 @@ static int raid5_run(struct mddev *mddev)
* discard data disk but write parity disk
*/
stripe = stripe * PAGE_SIZE;
- /* Round up to power of 2, as discard handling
- * currently assumes that */
- while ((stripe-1) & stripe)
- stripe = (stripe | (stripe-1)) + 1;
+ stripe = roundup_pow_of_two(stripe);
mddev->queue->limits.discard_alignment = stripe;
mddev->queue->limits.discard_granularity = stripe;
@@ -8282,7 +8279,7 @@ static int raid5_start_reshape(struct mddev *mddev)
}
conf->reshape_checkpoint = jiffies;
md_wakeup_thread(mddev->sync_thread);
- md_new_event(mddev);
+ md_new_event();
return 0;
}
diff --git a/drivers/media/cec/Kconfig b/drivers/media/cec/Kconfig
index 9ba3a00dce31..94ef3349b8d6 100644
--- a/drivers/media/cec/Kconfig
+++ b/drivers/media/cec/Kconfig
@@ -8,6 +8,8 @@ config CEC_NOTIFIER
config CEC_PIN
bool
+menu "CEC support"
+
config MEDIA_CEC_RC
bool "HDMI CEC RC integration"
depends on CEC_CORE && RC_CORE
@@ -37,3 +39,5 @@ source "drivers/media/cec/i2c/Kconfig"
source "drivers/media/cec/platform/Kconfig"
source "drivers/media/cec/usb/Kconfig"
endif
+
+endmenu
diff --git a/drivers/media/cec/core/cec-pin.c b/drivers/media/cec/core/cec-pin.c
index 8c613aa649c6..a60b6f03a6a1 100644
--- a/drivers/media/cec/core/cec-pin.c
+++ b/drivers/media/cec/core/cec-pin.c
@@ -957,7 +957,7 @@ static enum hrtimer_restart cec_pin_timer(struct hrtimer *timer)
* so we can kick off the pending transmit.
*/
delta = ktime_us_delta(ts, pin->ts);
- if (delta / CEC_TIM_DATA_BIT_TOTAL >
+ if (delta / CEC_TIM_DATA_BIT_TOTAL >=
pin->tx_signal_free_time) {
pin->tx_nacked = false;
if (tx_custom_start(pin))
@@ -968,7 +968,7 @@ static enum hrtimer_restart cec_pin_timer(struct hrtimer *timer)
cec_pin_low(pin);
break;
}
- if (delta / CEC_TIM_DATA_BIT_TOTAL >
+ if (delta / CEC_TIM_DATA_BIT_TOTAL >=
pin->tx_signal_free_time - 1)
pin->state = CEC_ST_TX_WAIT;
break;
diff --git a/drivers/media/cec/platform/meson/ao-cec-g12a.c b/drivers/media/cec/platform/meson/ao-cec-g12a.c
index 891533060d49..68fe6d6a8178 100644
--- a/drivers/media/cec/platform/meson/ao-cec-g12a.c
+++ b/drivers/media/cec/platform/meson/ao-cec-g12a.c
@@ -633,7 +633,6 @@ static int meson_ao_cec_g12a_probe(struct platform_device *pdev)
{
struct meson_ao_cec_g12a_device *ao_cec;
struct device *hdmi_dev;
- struct resource *res;
void __iomem *base;
int ret, irq;
@@ -664,8 +663,7 @@ static int meson_ao_cec_g12a_probe(struct platform_device *pdev)
ao_cec->adap->owner = THIS_MODULE;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base)) {
ret = PTR_ERR(base);
goto out_probe_adapter;
diff --git a/drivers/media/cec/platform/meson/ao-cec.c b/drivers/media/cec/platform/meson/ao-cec.c
index 09aff82c3773..6b440f0635d9 100644
--- a/drivers/media/cec/platform/meson/ao-cec.c
+++ b/drivers/media/cec/platform/meson/ao-cec.c
@@ -602,7 +602,6 @@ static int meson_ao_cec_probe(struct platform_device *pdev)
{
struct meson_ao_cec_device *ao_cec;
struct device *hdmi_dev;
- struct resource *res;
int ret, irq;
hdmi_dev = cec_notifier_parse_hdmi_phandle(&pdev->dev);
@@ -626,8 +625,7 @@ static int meson_ao_cec_probe(struct platform_device *pdev)
ao_cec->adap->owner = THIS_MODULE;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ao_cec->base = devm_ioremap_resource(&pdev->dev, res);
+ ao_cec->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ao_cec->base)) {
ret = PTR_ERR(ao_cec->base);
goto out_probe_adapter;
diff --git a/drivers/media/cec/platform/s5p/s5p_cec.c b/drivers/media/cec/platform/s5p/s5p_cec.c
index 028a09a7531e..ce9a9d922f11 100644
--- a/drivers/media/cec/platform/s5p/s5p_cec.c
+++ b/drivers/media/cec/platform/s5p/s5p_cec.c
@@ -178,7 +178,6 @@ static int s5p_cec_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device *hdmi_dev;
- struct resource *res;
struct s5p_cec_dev *cec;
bool needs_hpd = of_property_read_bool(pdev->dev.of_node, "needs-hpd");
int ret;
@@ -212,8 +211,7 @@ static int s5p_cec_probe(struct platform_device *pdev)
if (IS_ERR(cec->pmu))
return -EPROBE_DEFER;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- cec->reg = devm_ioremap_resource(dev, res);
+ cec->reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(cec->reg))
return PTR_ERR(cec->reg);
diff --git a/drivers/media/cec/platform/sti/stih-cec.c b/drivers/media/cec/platform/sti/stih-cec.c
index f0c73e64b586..abf8e8bcbb34 100644
--- a/drivers/media/cec/platform/sti/stih-cec.c
+++ b/drivers/media/cec/platform/sti/stih-cec.c
@@ -299,7 +299,6 @@ static const struct cec_adap_ops sti_cec_adap_ops = {
static int stih_cec_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct resource *res;
struct stih_cec *cec;
struct device *hdmi_dev;
int ret;
@@ -315,8 +314,7 @@ static int stih_cec_probe(struct platform_device *pdev)
cec->dev = dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- cec->regs = devm_ioremap_resource(dev, res);
+ cec->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(cec->regs))
return PTR_ERR(cec->regs);
diff --git a/drivers/media/cec/platform/stm32/stm32-cec.c b/drivers/media/cec/platform/stm32/stm32-cec.c
index 0ffd89712536..40db7911b437 100644
--- a/drivers/media/cec/platform/stm32/stm32-cec.c
+++ b/drivers/media/cec/platform/stm32/stm32-cec.c
@@ -255,7 +255,6 @@ static const struct regmap_config stm32_cec_regmap_cfg = {
static int stm32_cec_probe(struct platform_device *pdev)
{
u32 caps = CEC_CAP_DEFAULTS | CEC_CAP_PHYS_ADDR | CEC_MODE_MONITOR_ALL;
- struct resource *res;
struct stm32_cec *cec;
void __iomem *mmio;
int ret;
@@ -266,8 +265,7 @@ static int stm32_cec_probe(struct platform_device *pdev)
cec->dev = &pdev->dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- mmio = devm_ioremap_resource(&pdev->dev, res);
+ mmio = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(mmio))
return PTR_ERR(mmio);
diff --git a/drivers/media/common/siano/smscoreapi.c b/drivers/media/common/siano/smscoreapi.c
index bceaf91faa15..7d4bc2733f2b 100644
--- a/drivers/media/common/siano/smscoreapi.c
+++ b/drivers/media/common/siano/smscoreapi.c
@@ -414,10 +414,10 @@ struct smscore_registry_entry_t {
static struct list_head g_smscore_notifyees;
static struct list_head g_smscore_devices;
-static struct mutex g_smscore_deviceslock;
+static DEFINE_MUTEX(g_smscore_deviceslock);
static struct list_head g_smscore_registry;
-static struct mutex g_smscore_registrylock;
+static DEFINE_MUTEX(g_smscore_registrylock);
static int default_mode = DEVICE_MODE_NONE;
@@ -2119,10 +2119,7 @@ static int __init smscore_module_init(void)
{
INIT_LIST_HEAD(&g_smscore_notifyees);
INIT_LIST_HEAD(&g_smscore_devices);
- mutex_init(&g_smscore_deviceslock);
-
INIT_LIST_HEAD(&g_smscore_registry);
- mutex_init(&g_smscore_registrylock);
return 0;
}
diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c
index 773c68dcd158..b203c1e26353 100644
--- a/drivers/media/common/videobuf2/videobuf2-core.c
+++ b/drivers/media/common/videobuf2/videobuf2-core.c
@@ -68,13 +68,13 @@ module_param(debug, int, 0644);
err; \
})
-#define call_ptr_memop(vb, op, args...) \
+#define call_ptr_memop(op, vb, args...) \
({ \
struct vb2_queue *_q = (vb)->vb2_queue; \
void *ptr; \
\
log_memop(vb, op); \
- ptr = _q->mem_ops->op ? _q->mem_ops->op(args) : NULL; \
+ ptr = _q->mem_ops->op ? _q->mem_ops->op(vb, args) : NULL; \
if (!IS_ERR_OR_NULL(ptr)) \
(vb)->cnt_mem_ ## op++; \
ptr; \
@@ -144,9 +144,9 @@ module_param(debug, int, 0644);
((vb)->vb2_queue->mem_ops->op ? \
(vb)->vb2_queue->mem_ops->op(args) : 0)
-#define call_ptr_memop(vb, op, args...) \
+#define call_ptr_memop(op, vb, args...) \
((vb)->vb2_queue->mem_ops->op ? \
- (vb)->vb2_queue->mem_ops->op(args) : NULL)
+ (vb)->vb2_queue->mem_ops->op(vb, args) : NULL)
#define call_void_memop(vb, op, args...) \
do { \
@@ -230,9 +230,10 @@ static int __vb2_buf_mem_alloc(struct vb2_buffer *vb)
if (size < vb->planes[plane].length)
goto free;
- mem_priv = call_ptr_memop(vb, alloc,
- q->alloc_devs[plane] ? : q->dev,
- q->dma_attrs, size, q->dma_dir, q->gfp_flags);
+ mem_priv = call_ptr_memop(alloc,
+ vb,
+ q->alloc_devs[plane] ? : q->dev,
+ size);
if (IS_ERR_OR_NULL(mem_priv)) {
if (mem_priv)
ret = PTR_ERR(mem_priv);
@@ -326,12 +327,9 @@ static void __vb2_buf_mem_prepare(struct vb2_buffer *vb)
if (vb->synced)
return;
- if (vb->need_cache_sync_on_prepare) {
- for (plane = 0; plane < vb->num_planes; ++plane)
- call_void_memop(vb, prepare,
- vb->planes[plane].mem_priv);
- }
vb->synced = 1;
+ for (plane = 0; plane < vb->num_planes; ++plane)
+ call_void_memop(vb, prepare, vb->planes[plane].mem_priv);
}
/*
@@ -345,12 +343,9 @@ static void __vb2_buf_mem_finish(struct vb2_buffer *vb)
if (!vb->synced)
return;
- if (vb->need_cache_sync_on_finish) {
- for (plane = 0; plane < vb->num_planes; ++plane)
- call_void_memop(vb, finish,
- vb->planes[plane].mem_priv);
- }
vb->synced = 0;
+ for (plane = 0; plane < vb->num_planes; ++plane)
+ call_void_memop(vb, finish, vb->planes[plane].mem_priv);
}
/*
@@ -381,6 +376,27 @@ static void __setup_offsets(struct vb2_buffer *vb)
}
}
+static void init_buffer_cache_hints(struct vb2_queue *q, struct vb2_buffer *vb)
+{
+ /*
+ * DMA exporter should take care of cache syncs, so we can avoid
+ * explicit ->prepare()/->finish() syncs. For other ->memory types
+ * we always need ->prepare() or/and ->finish() cache sync.
+ */
+ if (q->memory == VB2_MEMORY_DMABUF) {
+ vb->skip_cache_sync_on_finish = 1;
+ vb->skip_cache_sync_on_prepare = 1;
+ return;
+ }
+
+ /*
+ * ->finish() cache sync can be avoided when queue direction is
+ * TO_DEVICE.
+ */
+ if (q->dma_dir == DMA_TO_DEVICE)
+ vb->skip_cache_sync_on_finish = 1;
+}
+
/*
* __vb2_queue_alloc() - allocate videobuf buffer structures and (for MMAP type)
* video buffer memory for all buffers/planes on the queue and initializes the
@@ -414,17 +430,7 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory,
vb->index = q->num_buffers + buffer;
vb->type = q->type;
vb->memory = memory;
- /*
- * We need to set these flags here so that the videobuf2 core
- * will call ->prepare()/->finish() cache sync/flush on vb2
- * buffers when appropriate. However, we can avoid explicit
- * ->prepare() and ->finish() cache sync for DMABUF buffers,
- * because DMA exporter takes care of it.
- */
- if (q->memory != VB2_MEMORY_DMABUF) {
- vb->need_cache_sync_on_prepare = 1;
- vb->need_cache_sync_on_finish = 1;
- }
+ init_buffer_cache_hints(q, vb);
for (plane = 0; plane < num_planes; ++plane) {
vb->planes[plane].length = plane_sizes[plane];
vb->planes[plane].min_length = plane_sizes[plane];
@@ -732,11 +738,30 @@ int vb2_verify_memory_type(struct vb2_queue *q,
}
EXPORT_SYMBOL(vb2_verify_memory_type);
+static void set_queue_coherency(struct vb2_queue *q, bool non_coherent_mem)
+{
+ q->non_coherent_mem = 0;
+
+ if (!vb2_queue_allows_cache_hints(q))
+ return;
+ q->non_coherent_mem = non_coherent_mem;
+}
+
+static bool verify_coherency_flags(struct vb2_queue *q, bool non_coherent_mem)
+{
+ if (non_coherent_mem != q->non_coherent_mem) {
+ dprintk(q, 1, "memory coherency model mismatch\n");
+ return false;
+ }
+ return true;
+}
+
int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
- unsigned int *count)
+ unsigned int flags, unsigned int *count)
{
unsigned int num_buffers, allocated_buffers, num_planes = 0;
unsigned plane_sizes[VB2_MAX_PLANES] = { };
+ bool non_coherent_mem = flags & V4L2_MEMORY_FLAG_NON_COHERENT;
unsigned int i;
int ret;
@@ -751,7 +776,8 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
}
if (*count == 0 || q->num_buffers != 0 ||
- (q->memory != VB2_MEMORY_UNKNOWN && q->memory != memory)) {
+ (q->memory != VB2_MEMORY_UNKNOWN && q->memory != memory) ||
+ !verify_coherency_flags(q, non_coherent_mem)) {
/*
* We already have buffers allocated, so first check if they
* are not in use and can be freed.
@@ -788,6 +814,7 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
num_buffers = min_t(unsigned int, num_buffers, VB2_MAX_FRAME);
memset(q->alloc_devs, 0, sizeof(q->alloc_devs));
q->memory = memory;
+ set_queue_coherency(q, non_coherent_mem);
/*
* Ask the driver how many buffers and planes per buffer it requires.
@@ -872,12 +899,13 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
EXPORT_SYMBOL_GPL(vb2_core_reqbufs);
int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
- unsigned int *count,
+ unsigned int flags, unsigned int *count,
unsigned int requested_planes,
const unsigned int requested_sizes[])
{
unsigned int num_planes = 0, num_buffers, allocated_buffers;
unsigned plane_sizes[VB2_MAX_PLANES] = { };
+ bool non_coherent_mem = flags & V4L2_MEMORY_FLAG_NON_COHERENT;
int ret;
if (q->num_buffers == VB2_MAX_FRAME) {
@@ -893,11 +921,14 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
memset(q->alloc_devs, 0, sizeof(q->alloc_devs));
q->memory = memory;
q->waiting_for_buffers = !q->is_output;
+ set_queue_coherency(q, non_coherent_mem);
} else {
if (q->memory != memory) {
dprintk(q, 1, "memory model mismatch\n");
return -EINVAL;
}
+ if (!verify_coherency_flags(q, non_coherent_mem))
+ return -EINVAL;
}
num_buffers = min(*count, VB2_MAX_FRAME - q->num_buffers);
@@ -975,7 +1006,7 @@ void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no)
if (plane_no >= vb->num_planes || !vb->planes[plane_no].mem_priv)
return NULL;
- return call_ptr_memop(vb, vaddr, vb->planes[plane_no].mem_priv);
+ return call_ptr_memop(vaddr, vb, vb->planes[plane_no].mem_priv);
}
EXPORT_SYMBOL_GPL(vb2_plane_vaddr);
@@ -985,7 +1016,7 @@ void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no)
if (plane_no >= vb->num_planes || !vb->planes[plane_no].mem_priv)
return NULL;
- return call_ptr_memop(vb, cookie, vb->planes[plane_no].mem_priv);
+ return call_ptr_memop(cookie, vb, vb->planes[plane_no].mem_priv);
}
EXPORT_SYMBOL_GPL(vb2_plane_cookie);
@@ -1125,10 +1156,11 @@ static int __prepare_userptr(struct vb2_buffer *vb)
vb->planes[plane].data_offset = 0;
/* Acquire each plane's memory */
- mem_priv = call_ptr_memop(vb, get_userptr,
- q->alloc_devs[plane] ? : q->dev,
- planes[plane].m.userptr,
- planes[plane].length, q->dma_dir);
+ mem_priv = call_ptr_memop(get_userptr,
+ vb,
+ q->alloc_devs[plane] ? : q->dev,
+ planes[plane].m.userptr,
+ planes[plane].length);
if (IS_ERR(mem_priv)) {
dprintk(q, 1, "failed acquiring userspace memory for plane %d\n",
plane);
@@ -1249,9 +1281,11 @@ static int __prepare_dmabuf(struct vb2_buffer *vb)
vb->planes[plane].data_offset = 0;
/* Acquire each plane's memory */
- mem_priv = call_ptr_memop(vb, attach_dmabuf,
- q->alloc_devs[plane] ? : q->dev,
- dbuf, planes[plane].length, q->dma_dir);
+ mem_priv = call_ptr_memop(attach_dmabuf,
+ vb,
+ q->alloc_devs[plane] ? : q->dev,
+ dbuf,
+ planes[plane].length);
if (IS_ERR(mem_priv)) {
dprintk(q, 1, "failed to attach dmabuf\n");
ret = PTR_ERR(mem_priv);
@@ -1421,9 +1455,19 @@ int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb,
static void vb2_req_queue(struct media_request_object *obj)
{
struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj);
+ int err;
mutex_lock(vb->vb2_queue->lock);
- vb2_core_qbuf(vb->vb2_queue, vb->index, NULL, NULL);
+ /*
+ * There is no method to propagate an error from vb2_core_qbuf(),
+ * so if this returns a non-0 value, then WARN.
+ *
+ * The only exception is -EIO which is returned if q->error is
+ * set. We just ignore that, and expect this will be caught the
+ * next time vb2_req_prepare() is called.
+ */
+ err = vb2_core_qbuf(vb->vb2_queue, vb->index, NULL, NULL);
+ WARN_ON_ONCE(err && err != -EIO);
mutex_unlock(vb->vb2_queue->lock);
}
@@ -2187,8 +2231,10 @@ int vb2_core_expbuf(struct vb2_queue *q, int *fd, unsigned int type,
vb_plane = &vb->planes[plane];
- dbuf = call_ptr_memop(vb, get_dmabuf, vb_plane->mem_priv,
- flags & O_ACCMODE);
+ dbuf = call_ptr_memop(get_dmabuf,
+ vb,
+ vb_plane->mem_priv,
+ flags & O_ACCMODE);
if (IS_ERR_OR_NULL(dbuf)) {
dprintk(q, 1, "failed to export buffer %d, plane %d\n",
index, plane);
@@ -2342,6 +2388,17 @@ int vb2_core_queue_init(struct vb2_queue *q)
if (WARN_ON(q->requires_requests && !q->supports_requests))
return -EINVAL;
+ /*
+ * This combination is not allowed since a non-zero value of
+ * q->min_buffers_needed can cause vb2_core_qbuf() to fail if
+ * it has to call start_streaming(), and the Request API expects
+ * that queueing a request (and thus queueing a buffer contained
+ * in that request) will always succeed. There is no method of
+ * propagating an error back to userspace.
+ */
+ if (WARN_ON(q->supports_requests && q->min_buffers_needed))
+ return -EINVAL;
+
INIT_LIST_HEAD(&q->queued_list);
INIT_LIST_HEAD(&q->done_list);
spin_lock_init(&q->done_lock);
@@ -2576,7 +2633,7 @@ static int __vb2_init_fileio(struct vb2_queue *q, int read)
fileio->memory = VB2_MEMORY_MMAP;
fileio->type = q->type;
q->fileio = fileio;
- ret = vb2_core_reqbufs(q, fileio->memory, &fileio->count);
+ ret = vb2_core_reqbufs(q, fileio->memory, 0, &fileio->count);
if (ret)
goto err_kfree;
@@ -2633,7 +2690,7 @@ static int __vb2_init_fileio(struct vb2_queue *q, int read)
err_reqbufs:
fileio->count = 0;
- vb2_core_reqbufs(q, fileio->memory, &fileio->count);
+ vb2_core_reqbufs(q, fileio->memory, 0, &fileio->count);
err_kfree:
q->fileio = NULL;
@@ -2653,7 +2710,7 @@ static int __vb2_cleanup_fileio(struct vb2_queue *q)
vb2_core_streamoff(q, q->type);
q->fileio = NULL;
fileio->count = 0;
- vb2_core_reqbufs(q, fileio->memory, &fileio->count);
+ vb2_core_reqbufs(q, fileio->memory, 0, &fileio->count);
kfree(fileio);
dprintk(q, 3, "file io emulator closed\n");
}
diff --git a/drivers/media/common/videobuf2/videobuf2-dma-contig.c b/drivers/media/common/videobuf2/videobuf2-dma-contig.c
index 9a1a9baca2e4..556e42ba66e5 100644
--- a/drivers/media/common/videobuf2/videobuf2-dma-contig.c
+++ b/drivers/media/common/videobuf2/videobuf2-dma-contig.c
@@ -17,6 +17,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/dma-mapping.h>
+#include <linux/highmem.h>
#include <media/videobuf2-v4l2.h>
#include <media/videobuf2-dma-contig.h>
@@ -40,6 +41,9 @@ struct vb2_dc_buf {
/* DMABUF related */
struct dma_buf_attachment *db_attach;
+
+ struct vb2_buffer *vb;
+ bool non_coherent_mem;
};
/*********************************************/
@@ -66,24 +70,46 @@ static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
/* callbacks for all buffers */
/*********************************************/
-static void *vb2_dc_cookie(void *buf_priv)
+static void *vb2_dc_cookie(struct vb2_buffer *vb, void *buf_priv)
{
struct vb2_dc_buf *buf = buf_priv;
return &buf->dma_addr;
}
-static void *vb2_dc_vaddr(void *buf_priv)
+/*
+ * This function may fail if:
+ *
+ * - dma_buf_vmap() fails
+ * E.g. due to lack of virtual mapping address space, or due to
+ * dmabuf->ops misconfiguration.
+ *
+ * - dma_vmap_noncontiguous() fails
+ * For instance, when requested buffer size is larger than totalram_pages().
+ * Relevant for buffers that use non-coherent memory.
+ *
+ * - Queue DMA attrs have DMA_ATTR_NO_KERNEL_MAPPING set
+ * Relevant for buffers that use coherent memory.
+ */
+static void *vb2_dc_vaddr(struct vb2_buffer *vb, void *buf_priv)
{
struct vb2_dc_buf *buf = buf_priv;
- struct dma_buf_map map;
- int ret;
- if (!buf->vaddr && buf->db_attach) {
- ret = dma_buf_vmap(buf->db_attach->dmabuf, &map);
- buf->vaddr = ret ? NULL : map.vaddr;
+ if (buf->vaddr)
+ return buf->vaddr;
+
+ if (buf->db_attach) {
+ struct dma_buf_map map;
+
+ if (!dma_buf_vmap(buf->db_attach->dmabuf, &map))
+ buf->vaddr = map.vaddr;
+
+ return buf->vaddr;
}
+ if (buf->non_coherent_mem)
+ buf->vaddr = dma_vmap_noncontiguous(buf->dev, buf->size,
+ buf->dma_sgt);
return buf->vaddr;
}
@@ -99,10 +125,19 @@ static void vb2_dc_prepare(void *buf_priv)
struct vb2_dc_buf *buf = buf_priv;
struct sg_table *sgt = buf->dma_sgt;
- if (!sgt)
+ /* This takes care of DMABUF and user-enforced cache sync hint */
+ if (buf->vb->skip_cache_sync_on_prepare)
return;
+ if (!buf->non_coherent_mem)
+ return;
+
+ /* For both USERPTR and non-coherent MMAP */
dma_sync_sgtable_for_device(buf->dev, sgt, buf->dma_dir);
+
+ /* Non-coherent MMAP only */
+ if (buf->vaddr)
+ flush_kernel_vmap_range(buf->vaddr, buf->size);
}
static void vb2_dc_finish(void *buf_priv)
@@ -110,10 +145,19 @@ static void vb2_dc_finish(void *buf_priv)
struct vb2_dc_buf *buf = buf_priv;
struct sg_table *sgt = buf->dma_sgt;
- if (!sgt)
+ /* This takes care of DMABUF and user-enforced cache sync hint */
+ if (buf->vb->skip_cache_sync_on_finish)
+ return;
+
+ if (!buf->non_coherent_mem)
return;
+ /* For both USERPTR and non-coherent MMAP */
dma_sync_sgtable_for_cpu(buf->dev, sgt, buf->dma_dir);
+
+ /* Non-coherent MMAP only */
+ if (buf->vaddr)
+ invalidate_kernel_vmap_range(buf->vaddr, buf->size);
}
/*********************************************/
@@ -127,21 +171,69 @@ static void vb2_dc_put(void *buf_priv)
if (!refcount_dec_and_test(&buf->refcount))
return;
- if (buf->sgt_base) {
- sg_free_table(buf->sgt_base);
- kfree(buf->sgt_base);
+ if (buf->non_coherent_mem) {
+ if (buf->vaddr)
+ dma_vunmap_noncontiguous(buf->dev, buf->vaddr);
+ dma_free_noncontiguous(buf->dev, buf->size,
+ buf->dma_sgt, buf->dma_dir);
+ } else {
+ if (buf->sgt_base) {
+ sg_free_table(buf->sgt_base);
+ kfree(buf->sgt_base);
+ }
+ dma_free_attrs(buf->dev, buf->size, buf->cookie,
+ buf->dma_addr, buf->attrs);
}
- dma_free_attrs(buf->dev, buf->size, buf->cookie, buf->dma_addr,
- buf->attrs);
put_device(buf->dev);
kfree(buf);
}
-static void *vb2_dc_alloc(struct device *dev, unsigned long attrs,
- unsigned long size, enum dma_data_direction dma_dir,
- gfp_t gfp_flags)
+static int vb2_dc_alloc_coherent(struct vb2_dc_buf *buf)
+{
+ struct vb2_queue *q = buf->vb->vb2_queue;
+
+ buf->cookie = dma_alloc_attrs(buf->dev,
+ buf->size,
+ &buf->dma_addr,
+ GFP_KERNEL | q->gfp_flags,
+ buf->attrs);
+ if (!buf->cookie)
+ return -ENOMEM;
+
+ if (q->dma_attrs & DMA_ATTR_NO_KERNEL_MAPPING)
+ return 0;
+
+ buf->vaddr = buf->cookie;
+ return 0;
+}
+
+static int vb2_dc_alloc_non_coherent(struct vb2_dc_buf *buf)
+{
+ struct vb2_queue *q = buf->vb->vb2_queue;
+
+ buf->dma_sgt = dma_alloc_noncontiguous(buf->dev,
+ buf->size,
+ buf->dma_dir,
+ GFP_KERNEL | q->gfp_flags,
+ buf->attrs);
+ if (!buf->dma_sgt)
+ return -ENOMEM;
+
+ buf->dma_addr = sg_dma_address(buf->dma_sgt->sgl);
+
+ /*
+ * For non-coherent buffers the kernel mapping is created on demand
+ * in vb2_dc_vaddr().
+ */
+ return 0;
+}
+
+static void *vb2_dc_alloc(struct vb2_buffer *vb,
+ struct device *dev,
+ unsigned long size)
{
struct vb2_dc_buf *buf;
+ int ret;
if (WARN_ON(!dev))
return ERR_PTR(-EINVAL);
@@ -150,22 +242,25 @@ static void *vb2_dc_alloc(struct device *dev, unsigned long attrs,
if (!buf)
return ERR_PTR(-ENOMEM);
- buf->attrs = attrs;
- buf->cookie = dma_alloc_attrs(dev, size, &buf->dma_addr,
- GFP_KERNEL | gfp_flags, buf->attrs);
- if (!buf->cookie) {
- dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
- kfree(buf);
- return ERR_PTR(-ENOMEM);
- }
-
- if ((buf->attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0)
- buf->vaddr = buf->cookie;
+ buf->attrs = vb->vb2_queue->dma_attrs;
+ buf->dma_dir = vb->vb2_queue->dma_dir;
+ buf->vb = vb;
+ buf->non_coherent_mem = vb->vb2_queue->non_coherent_mem;
+ buf->size = size;
/* Prevent the device from being released while the buffer is used */
buf->dev = get_device(dev);
- buf->size = size;
- buf->dma_dir = dma_dir;
+
+ if (buf->non_coherent_mem)
+ ret = vb2_dc_alloc_non_coherent(buf);
+ else
+ ret = vb2_dc_alloc_coherent(buf);
+
+ if (ret) {
+ dev_err(dev, "dma alloc of size %ld failed\n", size);
+ kfree(buf);
+ return ERR_PTR(-ENOMEM);
+ }
buf->handler.refcount = &buf->refcount;
buf->handler.put = vb2_dc_put;
@@ -186,9 +281,12 @@ static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
return -EINVAL;
}
- ret = dma_mmap_attrs(buf->dev, vma, buf->cookie,
- buf->dma_addr, buf->size, buf->attrs);
-
+ if (buf->non_coherent_mem)
+ ret = dma_mmap_noncontiguous(buf->dev, vma, buf->size,
+ buf->dma_sgt);
+ else
+ ret = dma_mmap_attrs(buf->dev, vma, buf->cookie, buf->dma_addr,
+ buf->size, buf->attrs);
if (ret) {
pr_err("Remapping memory failed, error: %d\n", ret);
return ret;
@@ -350,9 +448,15 @@ vb2_dc_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf,
static int vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf, struct dma_buf_map *map)
{
- struct vb2_dc_buf *buf = dbuf->priv;
+ struct vb2_dc_buf *buf;
+ void *vaddr;
+
+ buf = dbuf->priv;
+ vaddr = vb2_dc_vaddr(buf->vb, buf);
+ if (!vaddr)
+ return -EINVAL;
- dma_buf_map_set_vaddr(map, buf->vaddr);
+ dma_buf_map_set_vaddr(map, vaddr);
return 0;
}
@@ -380,6 +484,9 @@ static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
int ret;
struct sg_table *sgt;
+ if (buf->non_coherent_mem)
+ return buf->dma_sgt;
+
sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
if (!sgt) {
dev_err(buf->dev, "failed to alloc sg table\n");
@@ -397,7 +504,9 @@ static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
return sgt;
}
-static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv, unsigned long flags)
+static struct dma_buf *vb2_dc_get_dmabuf(struct vb2_buffer *vb,
+ void *buf_priv,
+ unsigned long flags)
{
struct vb2_dc_buf *buf = buf_priv;
struct dma_buf *dbuf;
@@ -459,8 +568,8 @@ static void vb2_dc_put_userptr(void *buf_priv)
kfree(buf);
}
-static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr,
- unsigned long size, enum dma_data_direction dma_dir)
+static void *vb2_dc_get_userptr(struct vb2_buffer *vb, struct device *dev,
+ unsigned long vaddr, unsigned long size)
{
struct vb2_dc_buf *buf;
struct frame_vector *vec;
@@ -490,7 +599,8 @@ static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr,
return ERR_PTR(-ENOMEM);
buf->dev = dev;
- buf->dma_dir = dma_dir;
+ buf->dma_dir = vb->vb2_queue->dma_dir;
+ buf->vb = vb;
offset = lower_32_bits(offset_in_page(vaddr));
vec = vb2_create_framevec(vaddr, size);
@@ -555,6 +665,8 @@ static void *vb2_dc_get_userptr(struct device *dev, unsigned long vaddr,
buf->dma_addr = sg_dma_address(sgt->sgl);
buf->dma_sgt = sgt;
+ buf->non_coherent_mem = 1;
+
out:
buf->size = size;
@@ -660,8 +772,8 @@ static void vb2_dc_detach_dmabuf(void *mem_priv)
kfree(buf);
}
-static void *vb2_dc_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
- unsigned long size, enum dma_data_direction dma_dir)
+static void *vb2_dc_attach_dmabuf(struct vb2_buffer *vb, struct device *dev,
+ struct dma_buf *dbuf, unsigned long size)
{
struct vb2_dc_buf *buf;
struct dma_buf_attachment *dba;
@@ -677,6 +789,8 @@ static void *vb2_dc_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
return ERR_PTR(-ENOMEM);
buf->dev = dev;
+ buf->vb = vb;
+
/* create attachment for the dmabuf with the user device */
dba = dma_buf_attach(dbuf, buf->dev);
if (IS_ERR(dba)) {
@@ -685,7 +799,7 @@ static void *vb2_dc_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
return dba;
}
- buf->dma_dir = dma_dir;
+ buf->dma_dir = vb->vb2_queue->dma_dir;
buf->size = size;
buf->db_attach = dba;
diff --git a/drivers/media/common/videobuf2/videobuf2-dma-sg.c b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
index db90ebb8950f..1094575abf95 100644
--- a/drivers/media/common/videobuf2/videobuf2-dma-sg.c
+++ b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
@@ -51,6 +51,8 @@ struct vb2_dma_sg_buf {
struct vb2_vmarea_handler handler;
struct dma_buf_attachment *db_attach;
+
+ struct vb2_buffer *vb;
};
static void vb2_dma_sg_put(void *buf_priv);
@@ -96,9 +98,8 @@ static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf,
return 0;
}
-static void *vb2_dma_sg_alloc(struct device *dev, unsigned long dma_attrs,
- unsigned long size, enum dma_data_direction dma_dir,
- gfp_t gfp_flags)
+static void *vb2_dma_sg_alloc(struct vb2_buffer *vb, struct device *dev,
+ unsigned long size)
{
struct vb2_dma_sg_buf *buf;
struct sg_table *sgt;
@@ -113,7 +114,7 @@ static void *vb2_dma_sg_alloc(struct device *dev, unsigned long dma_attrs,
return ERR_PTR(-ENOMEM);
buf->vaddr = NULL;
- buf->dma_dir = dma_dir;
+ buf->dma_dir = vb->vb2_queue->dma_dir;
buf->offset = 0;
buf->size = size;
/* size is already page aligned */
@@ -130,7 +131,7 @@ static void *vb2_dma_sg_alloc(struct device *dev, unsigned long dma_attrs,
if (!buf->pages)
goto fail_pages_array_alloc;
- ret = vb2_dma_sg_alloc_compacted(buf, gfp_flags);
+ ret = vb2_dma_sg_alloc_compacted(buf, vb->vb2_queue->gfp_flags);
if (ret)
goto fail_pages_alloc;
@@ -154,6 +155,7 @@ static void *vb2_dma_sg_alloc(struct device *dev, unsigned long dma_attrs,
buf->handler.refcount = &buf->refcount;
buf->handler.put = vb2_dma_sg_put;
buf->handler.arg = buf;
+ buf->vb = vb;
refcount_set(&buf->refcount, 1);
@@ -202,6 +204,9 @@ static void vb2_dma_sg_prepare(void *buf_priv)
struct vb2_dma_sg_buf *buf = buf_priv;
struct sg_table *sgt = buf->dma_sgt;
+ if (buf->vb->skip_cache_sync_on_prepare)
+ return;
+
dma_sync_sgtable_for_device(buf->dev, sgt, buf->dma_dir);
}
@@ -210,12 +215,14 @@ static void vb2_dma_sg_finish(void *buf_priv)
struct vb2_dma_sg_buf *buf = buf_priv;
struct sg_table *sgt = buf->dma_sgt;
+ if (buf->vb->skip_cache_sync_on_finish)
+ return;
+
dma_sync_sgtable_for_cpu(buf->dev, sgt, buf->dma_dir);
}
-static void *vb2_dma_sg_get_userptr(struct device *dev, unsigned long vaddr,
- unsigned long size,
- enum dma_data_direction dma_dir)
+static void *vb2_dma_sg_get_userptr(struct vb2_buffer *vb, struct device *dev,
+ unsigned long vaddr, unsigned long size)
{
struct vb2_dma_sg_buf *buf;
struct sg_table *sgt;
@@ -230,7 +237,7 @@ static void *vb2_dma_sg_get_userptr(struct device *dev, unsigned long vaddr,
buf->vaddr = NULL;
buf->dev = dev;
- buf->dma_dir = dma_dir;
+ buf->dma_dir = vb->vb2_queue->dma_dir;
buf->offset = vaddr & ~PAGE_MASK;
buf->size = size;
buf->dma_sgt = &buf->sg_table;
@@ -292,7 +299,7 @@ static void vb2_dma_sg_put_userptr(void *buf_priv)
kfree(buf);
}
-static void *vb2_dma_sg_vaddr(void *buf_priv)
+static void *vb2_dma_sg_vaddr(struct vb2_buffer *vb, void *buf_priv)
{
struct vb2_dma_sg_buf *buf = buf_priv;
struct dma_buf_map map;
@@ -511,7 +518,9 @@ static const struct dma_buf_ops vb2_dma_sg_dmabuf_ops = {
.release = vb2_dma_sg_dmabuf_ops_release,
};
-static struct dma_buf *vb2_dma_sg_get_dmabuf(void *buf_priv, unsigned long flags)
+static struct dma_buf *vb2_dma_sg_get_dmabuf(struct vb2_buffer *vb,
+ void *buf_priv,
+ unsigned long flags)
{
struct vb2_dma_sg_buf *buf = buf_priv;
struct dma_buf *dbuf;
@@ -605,8 +614,8 @@ static void vb2_dma_sg_detach_dmabuf(void *mem_priv)
kfree(buf);
}
-static void *vb2_dma_sg_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
- unsigned long size, enum dma_data_direction dma_dir)
+static void *vb2_dma_sg_attach_dmabuf(struct vb2_buffer *vb, struct device *dev,
+ struct dma_buf *dbuf, unsigned long size)
{
struct vb2_dma_sg_buf *buf;
struct dma_buf_attachment *dba;
@@ -630,14 +639,14 @@ static void *vb2_dma_sg_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
return dba;
}
- buf->dma_dir = dma_dir;
+ buf->dma_dir = vb->vb2_queue->dma_dir;
buf->size = size;
buf->db_attach = dba;
return buf;
}
-static void *vb2_dma_sg_cookie(void *buf_priv)
+static void *vb2_dma_sg_cookie(struct vb2_buffer *vb, void *buf_priv)
{
struct vb2_dma_sg_buf *buf = buf_priv;
diff --git a/drivers/media/common/videobuf2/videobuf2-v4l2.c b/drivers/media/common/videobuf2/videobuf2-v4l2.c
index 2988bb38ceb1..6edf4508c636 100644
--- a/drivers/media/common/videobuf2/videobuf2-v4l2.c
+++ b/drivers/media/common/videobuf2/videobuf2-v4l2.c
@@ -345,24 +345,6 @@ static void set_buffer_cache_hints(struct vb2_queue *q,
struct vb2_buffer *vb,
struct v4l2_buffer *b)
{
- /*
- * DMA exporter should take care of cache syncs, so we can avoid
- * explicit ->prepare()/->finish() syncs. For other ->memory types
- * we always need ->prepare() or/and ->finish() cache sync.
- */
- if (q->memory == VB2_MEMORY_DMABUF) {
- vb->need_cache_sync_on_finish = 0;
- vb->need_cache_sync_on_prepare = 0;
- return;
- }
-
- /*
- * Cache sync/invalidation flags are set by default in order to
- * preserve existing behaviour for old apps/drivers.
- */
- vb->need_cache_sync_on_prepare = 1;
- vb->need_cache_sync_on_finish = 1;
-
if (!vb2_queue_allows_cache_hints(q)) {
/*
* Clear buffer cache flags if queue does not support user
@@ -374,18 +356,11 @@ static void set_buffer_cache_hints(struct vb2_queue *q,
return;
}
- /*
- * ->finish() cache sync can be avoided when queue direction is
- * TO_DEVICE.
- */
- if (q->dma_dir == DMA_TO_DEVICE)
- vb->need_cache_sync_on_finish = 0;
-
if (b->flags & V4L2_BUF_FLAG_NO_CACHE_INVALIDATE)
- vb->need_cache_sync_on_finish = 0;
+ vb->skip_cache_sync_on_finish = 1;
if (b->flags & V4L2_BUF_FLAG_NO_CACHE_CLEAN)
- vb->need_cache_sync_on_prepare = 0;
+ vb->skip_cache_sync_on_prepare = 1;
}
static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct media_device *mdev,
@@ -717,12 +692,32 @@ static void fill_buf_caps(struct vb2_queue *q, u32 *caps)
#endif
}
+static void validate_memory_flags(struct vb2_queue *q,
+ int memory,
+ u32 *flags)
+{
+ if (!q->allow_cache_hints || memory != V4L2_MEMORY_MMAP) {
+ /*
+ * This needs to clear V4L2_MEMORY_FLAG_NON_COHERENT only,
+ * but in order to avoid bugs we zero out all bits.
+ */
+ *flags = 0;
+ } else {
+ /* Clear all unknown flags. */
+ *flags &= V4L2_MEMORY_FLAG_NON_COHERENT;
+ }
+}
+
int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
{
int ret = vb2_verify_memory_type(q, req->memory, req->type);
+ u32 flags = req->flags;
fill_buf_caps(q, &req->capabilities);
- return ret ? ret : vb2_core_reqbufs(q, req->memory, &req->count);
+ validate_memory_flags(q, req->memory, &flags);
+ req->flags = flags;
+ return ret ? ret : vb2_core_reqbufs(q, req->memory,
+ req->flags, &req->count);
}
EXPORT_SYMBOL_GPL(vb2_reqbufs);
@@ -754,6 +749,7 @@ int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
unsigned i;
fill_buf_caps(q, &create->capabilities);
+ validate_memory_flags(q, create->memory, &create->flags);
create->index = q->num_buffers;
if (create->count == 0)
return ret != -EBUSY ? ret : 0;
@@ -797,6 +793,7 @@ int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
if (requested_sizes[i] == 0)
return -EINVAL;
return ret ? ret : vb2_core_create_bufs(q, create->memory,
+ create->flags,
&create->count,
requested_planes,
requested_sizes);
@@ -993,13 +990,16 @@ int vb2_ioctl_reqbufs(struct file *file, void *priv,
{
struct video_device *vdev = video_devdata(file);
int res = vb2_verify_memory_type(vdev->queue, p->memory, p->type);
+ u32 flags = p->flags;
fill_buf_caps(vdev->queue, &p->capabilities);
+ validate_memory_flags(vdev->queue, p->memory, &flags);
+ p->flags = flags;
if (res)
return res;
if (vb2_queue_is_busy(vdev, file))
return -EBUSY;
- res = vb2_core_reqbufs(vdev->queue, p->memory, &p->count);
+ res = vb2_core_reqbufs(vdev->queue, p->memory, p->flags, &p->count);
/* If count == 0, then the owner has released all buffers and he
is no longer owner of the queue. Otherwise we have a new owner. */
if (res == 0)
@@ -1017,6 +1017,7 @@ int vb2_ioctl_create_bufs(struct file *file, void *priv,
p->index = vdev->queue->num_buffers;
fill_buf_caps(vdev->queue, &p->capabilities);
+ validate_memory_flags(vdev->queue, p->memory, &p->flags);
/*
* If count == 0, then just check if memory and type are valid.
* Any -EBUSY result from vb2_verify_memory_type can be mapped to 0.
diff --git a/drivers/media/common/videobuf2/videobuf2-vmalloc.c b/drivers/media/common/videobuf2/videobuf2-vmalloc.c
index fa983897d0e9..0bbfea66554f 100644
--- a/drivers/media/common/videobuf2/videobuf2-vmalloc.c
+++ b/drivers/media/common/videobuf2/videobuf2-vmalloc.c
@@ -34,13 +34,12 @@ struct vb2_vmalloc_buf {
static void vb2_vmalloc_put(void *buf_priv);
-static void *vb2_vmalloc_alloc(struct device *dev, unsigned long attrs,
- unsigned long size, enum dma_data_direction dma_dir,
- gfp_t gfp_flags)
+static void *vb2_vmalloc_alloc(struct vb2_buffer *vb, struct device *dev,
+ unsigned long size)
{
struct vb2_vmalloc_buf *buf;
- buf = kzalloc(sizeof(*buf), GFP_KERNEL | gfp_flags);
+ buf = kzalloc(sizeof(*buf), GFP_KERNEL | vb->vb2_queue->gfp_flags);
if (!buf)
return ERR_PTR(-ENOMEM);
@@ -52,7 +51,7 @@ static void *vb2_vmalloc_alloc(struct device *dev, unsigned long attrs,
return ERR_PTR(-ENOMEM);
}
- buf->dma_dir = dma_dir;
+ buf->dma_dir = vb->vb2_queue->dma_dir;
buf->handler.refcount = &buf->refcount;
buf->handler.put = vb2_vmalloc_put;
buf->handler.arg = buf;
@@ -71,9 +70,8 @@ static void vb2_vmalloc_put(void *buf_priv)
}
}
-static void *vb2_vmalloc_get_userptr(struct device *dev, unsigned long vaddr,
- unsigned long size,
- enum dma_data_direction dma_dir)
+static void *vb2_vmalloc_get_userptr(struct vb2_buffer *vb, struct device *dev,
+ unsigned long vaddr, unsigned long size)
{
struct vb2_vmalloc_buf *buf;
struct frame_vector *vec;
@@ -84,7 +82,7 @@ static void *vb2_vmalloc_get_userptr(struct device *dev, unsigned long vaddr,
if (!buf)
return ERR_PTR(-ENOMEM);
- buf->dma_dir = dma_dir;
+ buf->dma_dir = vb->vb2_queue->dma_dir;
offset = vaddr & ~PAGE_MASK;
buf->size = size;
vec = vb2_create_framevec(vaddr, size);
@@ -147,7 +145,7 @@ static void vb2_vmalloc_put_userptr(void *buf_priv)
kfree(buf);
}
-static void *vb2_vmalloc_vaddr(void *buf_priv)
+static void *vb2_vmalloc_vaddr(struct vb2_buffer *vb, void *buf_priv)
{
struct vb2_vmalloc_buf *buf = buf_priv;
@@ -339,7 +337,9 @@ static const struct dma_buf_ops vb2_vmalloc_dmabuf_ops = {
.release = vb2_vmalloc_dmabuf_ops_release,
};
-static struct dma_buf *vb2_vmalloc_get_dmabuf(void *buf_priv, unsigned long flags)
+static struct dma_buf *vb2_vmalloc_get_dmabuf(struct vb2_buffer *vb,
+ void *buf_priv,
+ unsigned long flags)
{
struct vb2_vmalloc_buf *buf = buf_priv;
struct dma_buf *dbuf;
@@ -403,8 +403,10 @@ static void vb2_vmalloc_detach_dmabuf(void *mem_priv)
kfree(buf);
}
-static void *vb2_vmalloc_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
- unsigned long size, enum dma_data_direction dma_dir)
+static void *vb2_vmalloc_attach_dmabuf(struct vb2_buffer *vb,
+ struct device *dev,
+ struct dma_buf *dbuf,
+ unsigned long size)
{
struct vb2_vmalloc_buf *buf;
@@ -416,7 +418,7 @@ static void *vb2_vmalloc_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
return ERR_PTR(-ENOMEM);
buf->dbuf = dbuf;
- buf->dma_dir = dma_dir;
+ buf->dma_dir = vb->vb2_queue->dma_dir;
buf->size = size;
return buf;
diff --git a/drivers/media/dvb-core/dvb_net.c b/drivers/media/dvb-core/dvb_net.c
index dddebea644bb..8a2febf33ce2 100644
--- a/drivers/media/dvb-core/dvb_net.c
+++ b/drivers/media/dvb-core/dvb_net.c
@@ -1008,7 +1008,7 @@ static u8 mask_promisc[6]={0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
static int dvb_net_filter_sec_set(struct net_device *dev,
struct dmx_section_filter **secfilter,
- u8 *mac, u8 *mac_mask)
+ const u8 *mac, u8 *mac_mask)
{
struct dvb_net_priv *priv = netdev_priv(dev);
int ret;
@@ -1052,7 +1052,7 @@ static int dvb_net_feed_start(struct net_device *dev)
int ret = 0, i;
struct dvb_net_priv *priv = netdev_priv(dev);
struct dmx_demux *demux = priv->demux;
- unsigned char *mac = (unsigned char *) dev->dev_addr;
+ const unsigned char *mac = (const unsigned char *) dev->dev_addr;
netdev_dbg(dev, "rx_mode %i\n", priv->rx_mode);
mutex_lock(&priv->mutex);
@@ -1272,7 +1272,7 @@ static int dvb_net_set_mac (struct net_device *dev, void *p)
struct dvb_net_priv *priv = netdev_priv(dev);
struct sockaddr *addr=p;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
if (netif_running(dev))
schedule_work(&priv->restart_net_feed_wq);
@@ -1367,7 +1367,7 @@ static int dvb_net_add_if(struct dvb_net *dvbnet, u16 pid, u8 feedtype)
dvbnet->dvbdev->adapter->num, if_num);
net->addr_len = 6;
- memcpy(net->dev_addr, dvbnet->dvbdev->adapter->proposed_mac, 6);
+ eth_hw_addr_set(net, dvbnet->dvbdev->adapter->proposed_mac);
dvbnet->device[if_num] = net;
diff --git a/drivers/media/dvb-core/dvb_vb2.c b/drivers/media/dvb-core/dvb_vb2.c
index 6974f1731529..959d110407a4 100644
--- a/drivers/media/dvb-core/dvb_vb2.c
+++ b/drivers/media/dvb-core/dvb_vb2.c
@@ -342,7 +342,7 @@ int dvb_vb2_reqbufs(struct dvb_vb2_ctx *ctx, struct dmx_requestbuffers *req)
ctx->buf_siz = req->size;
ctx->buf_cnt = req->count;
- ret = vb2_core_reqbufs(&ctx->vb_q, VB2_MEMORY_MMAP, &req->count);
+ ret = vb2_core_reqbufs(&ctx->vb_q, VB2_MEMORY_MMAP, 0, &req->count);
if (ret) {
ctx->state = DVB_VB2_STATE_NONE;
dprintk(1, "[%s] count=%d size=%d errno=%d\n", ctx->name,
diff --git a/drivers/media/dvb-frontends/cxd2099.c b/drivers/media/dvb-frontends/cxd2099.c
index f88b5355493e..1c8207ab8988 100644
--- a/drivers/media/dvb-frontends/cxd2099.c
+++ b/drivers/media/dvb-frontends/cxd2099.c
@@ -3,15 +3,6 @@
* cxd2099.c: Driver for the Sony CXD2099AR Common Interface Controller
*
* Copyright (C) 2010-2013 Digital Devices GmbH
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 only, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/slab.h>
diff --git a/drivers/media/dvb-frontends/cxd2099.h b/drivers/media/dvb-frontends/cxd2099.h
index 0c101bdef01d..5d4060007c46 100644
--- a/drivers/media/dvb-frontends/cxd2099.h
+++ b/drivers/media/dvb-frontends/cxd2099.h
@@ -3,15 +3,6 @@
* cxd2099.h: Driver for the Sony CXD2099AR Common Interface Controller
*
* Copyright (C) 2010-2011 Digital Devices GmbH
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 only, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _CXD2099_H_
diff --git a/drivers/media/dvb-frontends/cxd2820r_priv.h b/drivers/media/dvb-frontends/cxd2820r_priv.h
index 7baf0162424f..09c42bcef971 100644
--- a/drivers/media/dvb-frontends/cxd2820r_priv.h
+++ b/drivers/media/dvb-frontends/cxd2820r_priv.h
@@ -13,7 +13,7 @@
#include <media/dvb_frontend.h>
#include <media/dvb_math.h>
#include "cxd2820r.h"
-#include <linux/gpio.h>
+#include <linux/gpio/driver.h> /* For gpio_chip */
#include <linux/math64.h>
#include <linux/regmap.h>
diff --git a/drivers/media/dvb-frontends/mb86a20s.c b/drivers/media/dvb-frontends/mb86a20s.c
index a7faf0cf8788..b74b9afed9a2 100644
--- a/drivers/media/dvb-frontends/mb86a20s.c
+++ b/drivers/media/dvb-frontends/mb86a20s.c
@@ -444,11 +444,11 @@ static int mb86a20s_get_interleaving(struct mb86a20s_state *state,
unsigned layer)
{
int rc;
- int interleaving[] = {
+ static const int interleaving[] = {
0, 1, 2, 4, 8
};
- static unsigned char reg[] = {
+ static const unsigned char reg[] = {
[0] = 0x88, /* Layer A */
[1] = 0x8c, /* Layer B */
[2] = 0x90, /* Layer C */
diff --git a/drivers/media/dvb-frontends/mn88443x.c b/drivers/media/dvb-frontends/mn88443x.c
index e4528784f847..fff212c0bf3b 100644
--- a/drivers/media/dvb-frontends/mn88443x.c
+++ b/drivers/media/dvb-frontends/mn88443x.c
@@ -204,11 +204,18 @@ struct mn88443x_priv {
struct regmap *regmap_t;
};
-static void mn88443x_cmn_power_on(struct mn88443x_priv *chip)
+static int mn88443x_cmn_power_on(struct mn88443x_priv *chip)
{
+ struct device *dev = &chip->client_s->dev;
struct regmap *r_t = chip->regmap_t;
+ int ret;
- clk_prepare_enable(chip->mclk);
+ ret = clk_prepare_enable(chip->mclk);
+ if (ret) {
+ dev_err(dev, "Failed to prepare and enable mclk: %d\n",
+ ret);
+ return ret;
+ }
gpiod_set_value_cansleep(chip->reset_gpio, 1);
usleep_range(100, 1000);
@@ -222,6 +229,8 @@ static void mn88443x_cmn_power_on(struct mn88443x_priv *chip)
} else {
regmap_write(r_t, HIZSET3, 0x8f);
}
+
+ return 0;
}
static void mn88443x_cmn_power_off(struct mn88443x_priv *chip)
@@ -738,7 +747,10 @@ static int mn88443x_probe(struct i2c_client *client,
chip->fe.demodulator_priv = chip;
i2c_set_clientdata(client, chip);
- mn88443x_cmn_power_on(chip);
+ ret = mn88443x_cmn_power_on(chip);
+ if (ret)
+ goto err_i2c_t;
+
mn88443x_s_sleep(chip);
mn88443x_t_sleep(chip);
diff --git a/drivers/media/dvb-frontends/mxl5xx.c b/drivers/media/dvb-frontends/mxl5xx.c
index 0b00a23436ed..934d1c0b214a 100644
--- a/drivers/media/dvb-frontends/mxl5xx.c
+++ b/drivers/media/dvb-frontends/mxl5xx.c
@@ -9,15 +9,6 @@
* based on code:
* Copyright (c) 2011-2013 MaxLinear, Inc. All rights reserved
* which was released under GPL V2
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/kernel.h>
diff --git a/drivers/media/dvb-frontends/mxl5xx.h b/drivers/media/dvb-frontends/mxl5xx.h
index 706a2f5d8f97..139e16b2ecfc 100644
--- a/drivers/media/dvb-frontends/mxl5xx.h
+++ b/drivers/media/dvb-frontends/mxl5xx.h
@@ -9,15 +9,6 @@
* based on code:
* Copyright (c) 2011-2013 MaxLinear, Inc. All rights reserved
* which was released under GPL V2
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _MXL5XX_H_
diff --git a/drivers/media/dvb-frontends/mxl5xx_defs.h b/drivers/media/dvb-frontends/mxl5xx_defs.h
index 1442af8dc176..097271f73740 100644
--- a/drivers/media/dvb-frontends/mxl5xx_defs.h
+++ b/drivers/media/dvb-frontends/mxl5xx_defs.h
@@ -7,10 +7,6 @@
* based on code:
* Copyright (c) 2011-2013 MaxLinear, Inc. All rights reserved
* which was released under GPL V2
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2, as published by the Free Software Foundation.
*/
enum MXL_BOOL_E {
diff --git a/drivers/media/dvb-frontends/mxl5xx_regs.h b/drivers/media/dvb-frontends/mxl5xx_regs.h
index 86d5317eba7a..b38a13847033 100644
--- a/drivers/media/dvb-frontends/mxl5xx_regs.h
+++ b/drivers/media/dvb-frontends/mxl5xx_regs.h
@@ -2,16 +2,6 @@
/*
* Copyright (c) 2011-2013 MaxLinear, Inc. All rights reserved
*
- * License type: GPLv2
- *
- * This program is free software; you can redistribute it and/or modify it under
- * the terms of the GNU General Public License as published by the Free Software
- * Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
- * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
- *
* This program may alternatively be licensed under a proprietary license from
* MaxLinear, Inc.
*
diff --git a/drivers/media/dvb-frontends/mxl692.c b/drivers/media/dvb-frontends/mxl692.c
index a246db683cdf..dd7954e8f553 100644
--- a/drivers/media/dvb-frontends/mxl692.c
+++ b/drivers/media/dvb-frontends/mxl692.c
@@ -7,15 +7,6 @@
* based on code:
* Copyright (c) 2016 MaxLinear, Inc. All rights reserved
* which was released under GPL V2
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/mutex.h>
diff --git a/drivers/media/dvb-frontends/mxl692.h b/drivers/media/dvb-frontends/mxl692.h
index 45bc48f1f12f..77764a047c07 100644
--- a/drivers/media/dvb-frontends/mxl692.h
+++ b/drivers/media/dvb-frontends/mxl692.h
@@ -7,15 +7,6 @@
* based on code:
* Copyright (c) 2016 MaxLinear, Inc. All rights reserved
* which was released under GPL V2
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _MXL692_H_
diff --git a/drivers/media/dvb-frontends/mxl692_defs.h b/drivers/media/dvb-frontends/mxl692_defs.h
index 776ac407b4e7..c603f3d6f27f 100644
--- a/drivers/media/dvb-frontends/mxl692_defs.h
+++ b/drivers/media/dvb-frontends/mxl692_defs.h
@@ -7,15 +7,6 @@
* based on code:
* Copyright (c) 2016 MaxLinear, Inc. All rights reserved
* which was released under GPL V2
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
/*****************************************************************************
diff --git a/drivers/media/dvb-frontends/rtl2832_sdr.c b/drivers/media/dvb-frontends/rtl2832_sdr.c
index 1a2f0d2adadf..6a4f2997d6f5 100644
--- a/drivers/media/dvb-frontends/rtl2832_sdr.c
+++ b/drivers/media/dvb-frontends/rtl2832_sdr.c
@@ -376,8 +376,11 @@ static int rtl2832_sdr_alloc_urbs(struct rtl2832_sdr_dev *dev)
dev_dbg(&pdev->dev, "alloc urb=%d\n", i);
dev->urb_list[i] = usb_alloc_urb(0, GFP_KERNEL);
if (!dev->urb_list[i]) {
- for (j = 0; j < i; j++)
+ for (j = 0; j < i; j++) {
usb_free_urb(dev->urb_list[j]);
+ dev->urb_list[j] = NULL;
+ }
+ dev->urbs_initialized = 0;
return -ENOMEM;
}
usb_fill_bulk_urb(dev->urb_list[i],
diff --git a/drivers/media/dvb-frontends/stv0910.c b/drivers/media/dvb-frontends/stv0910.c
index 68d7c7b41071..e517ff757744 100644
--- a/drivers/media/dvb-frontends/stv0910.c
+++ b/drivers/media/dvb-frontends/stv0910.c
@@ -5,15 +5,6 @@
* Copyright (C) 2014-2015 Ralph Metzler <rjkm@metzlerbros.de>
* Marcus Metzler <mocm@metzlerbros.de>
* developed for Digital Devices GmbH
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 only, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/kernel.h>
diff --git a/drivers/media/dvb-frontends/stv0910.h b/drivers/media/dvb-frontends/stv0910.h
index 24ecc6902235..0b6f02ad7910 100644
--- a/drivers/media/dvb-frontends/stv0910.h
+++ b/drivers/media/dvb-frontends/stv0910.h
@@ -5,15 +5,6 @@
* Copyright (C) 2014-2015 Ralph Metzler <rjkm@metzlerbros.de>
* Marcus Metzler <mocm@metzlerbros.de>
* developed for Digital Devices GmbH
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 only, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _STV0910_H_
diff --git a/drivers/media/dvb-frontends/stv6111.c b/drivers/media/dvb-frontends/stv6111.c
index d5035dac4574..2d0adb6fcb08 100644
--- a/drivers/media/dvb-frontends/stv6111.c
+++ b/drivers/media/dvb-frontends/stv6111.c
@@ -3,15 +3,6 @@
* Driver for the ST STV6111 tuner
*
* Copyright (C) 2014 Digital Devices GmbH
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 only, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/kernel.h>
diff --git a/drivers/media/dvb-frontends/stv6111.h b/drivers/media/dvb-frontends/stv6111.h
index 49e821ac9954..f172c3e3d886 100644
--- a/drivers/media/dvb-frontends/stv6111.h
+++ b/drivers/media/dvb-frontends/stv6111.h
@@ -3,15 +3,6 @@
* Driver for the ST STV6111 tuner
*
* Copyright (C) 2014 Digital Devices GmbH
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 only, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#ifndef _STV6111_H_
diff --git a/drivers/media/firewire/firedtv-avc.c b/drivers/media/firewire/firedtv-avc.c
index 2bf9467b917d..71991f8638e6 100644
--- a/drivers/media/firewire/firedtv-avc.c
+++ b/drivers/media/firewire/firedtv-avc.c
@@ -1165,7 +1165,11 @@ int avc_ca_pmt(struct firedtv *fdtv, char *msg, int length)
read_pos += program_info_length;
write_pos += program_info_length;
}
- while (read_pos < length) {
+ while (read_pos + 4 < length) {
+ if (write_pos + 4 >= sizeof(c->operand) - 4) {
+ ret = -EINVAL;
+ goto out;
+ }
c->operand[write_pos++] = msg[read_pos++];
c->operand[write_pos++] = msg[read_pos++];
c->operand[write_pos++] = msg[read_pos++];
@@ -1177,13 +1181,17 @@ int avc_ca_pmt(struct firedtv *fdtv, char *msg, int length)
c->operand[write_pos++] = es_info_length >> 8;
c->operand[write_pos++] = es_info_length & 0xff;
if (es_info_length > 0) {
+ if (read_pos >= length) {
+ ret = -EINVAL;
+ goto out;
+ }
pmt_cmd_id = msg[read_pos++];
if (pmt_cmd_id != 1 && pmt_cmd_id != 4)
dev_err(fdtv->device, "invalid pmt_cmd_id %d at stream level\n",
pmt_cmd_id);
- if (es_info_length > sizeof(c->operand) - 4 -
- write_pos) {
+ if (es_info_length > sizeof(c->operand) - 4 - write_pos ||
+ es_info_length > length - read_pos) {
ret = -EINVAL;
goto out;
}
diff --git a/drivers/media/firewire/firedtv-ci.c b/drivers/media/firewire/firedtv-ci.c
index 9363d005e2b6..e0d57e09dab0 100644
--- a/drivers/media/firewire/firedtv-ci.c
+++ b/drivers/media/firewire/firedtv-ci.c
@@ -134,6 +134,8 @@ static int fdtv_ca_pmt(struct firedtv *fdtv, void *arg)
} else {
data_length = msg->msg[3];
}
+ if (data_length > sizeof(msg->msg) - data_pos)
+ return -EINVAL;
return avc_ca_pmt(fdtv, &msg->msg[data_pos], data_length);
}
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index 08feb3e8c1bf..d6a5d4ca439a 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -450,6 +450,7 @@ config VIDEO_TW9906
config VIDEO_TW9910
tristate "Techwell TW9910 video decoder"
depends on VIDEO_V4L2 && I2C
+ select V4L2_ASYNC
help
Support for Techwell TW9910 NTSC/PAL/SECAM video decoder.
@@ -597,6 +598,7 @@ config VIDEO_AK881X
config VIDEO_THS8200
tristate "Texas Instruments THS8200 video encoder"
depends on VIDEO_V4L2 && I2C
+ select V4L2_ASYNC
help
Support for the Texas Instruments THS8200 video encoder.
@@ -610,6 +612,7 @@ menu "Video improvement chips"
config VIDEO_UPD64031A
tristate "NEC Electronics uPD64031A Ghost Reduction"
depends on VIDEO_V4L2 && I2C
+ select V4L2_ASYNC
help
Support for the NEC Electronics uPD64031A Ghost Reduction
video chip. It is most often found in NTSC TV cards made for
@@ -742,6 +745,19 @@ config VIDEO_HI556
To compile this driver as a module, choose M here: the
module will be called hi556.
+config VIDEO_HI846
+ tristate "Hynix Hi-846 sensor support"
+ depends on I2C && VIDEO_V4L2
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+ select V4L2_FWNODE
+ help
+ This is a Video4Linux2 sensor driver for the Hynix
+ Hi-846 camera.
+
+ To compile this driver as a module, choose M here: the
+ module will be called hi846.
+
config VIDEO_IMX208
tristate "Sony IMX208 sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
@@ -1186,6 +1202,16 @@ config VIDEO_OV13858
This is a Video4Linux2 sensor driver for the OmniVision
OV13858 camera.
+config VIDEO_OV13B10
+ tristate "OmniVision OV13B10 sensor support"
+ depends on I2C && VIDEO_V4L2
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+ select V4L2_FWNODE
+ help
+ This is a Video4Linux2 sensor driver for the OmniVision
+ OV13B10 camera.
+
config VIDEO_VS6624
tristate "ST VS6624 sensor support"
depends on VIDEO_V4L2 && I2C
@@ -1229,6 +1255,7 @@ config VIDEO_MT9P031
select MEDIA_CONTROLLER
select VIDEO_V4L2_SUBDEV_API
select VIDEO_APTINA_PLL
+ select V4L2_FWNODE
help
This is a Video4Linux2 sensor driver for the Aptina
(Micron) mt9p031 5 Mpixel camera.
diff --git a/drivers/media/i2c/Makefile b/drivers/media/i2c/Makefile
index 83268f20aa3a..4d4fe08d7a6a 100644
--- a/drivers/media/i2c/Makefile
+++ b/drivers/media/i2c/Makefile
@@ -89,6 +89,7 @@ obj-$(CONFIG_VIDEO_OV9640) += ov9640.o
obj-$(CONFIG_VIDEO_OV9650) += ov9650.o
obj-$(CONFIG_VIDEO_OV9734) += ov9734.o
obj-$(CONFIG_VIDEO_OV13858) += ov13858.o
+obj-$(CONFIG_VIDEO_OV13B10) += ov13b10.o
obj-$(CONFIG_VIDEO_MT9M001) += mt9m001.o
obj-$(CONFIG_VIDEO_MT9M032) += mt9m032.o
obj-$(CONFIG_VIDEO_MT9M111) += mt9m111.o
@@ -117,6 +118,7 @@ obj-$(CONFIG_VIDEO_ML86V7667) += ml86v7667.o
obj-$(CONFIG_VIDEO_OV2659) += ov2659.o
obj-$(CONFIG_VIDEO_TC358743) += tc358743.o
obj-$(CONFIG_VIDEO_HI556) += hi556.o
+obj-$(CONFIG_VIDEO_HI846) += hi846.o
obj-$(CONFIG_VIDEO_IMX208) += imx208.o
obj-$(CONFIG_VIDEO_IMX214) += imx214.o
obj-$(CONFIG_VIDEO_IMX219) += imx219.o
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
index 122e1fdccd96..44768b59a6ff 100644
--- a/drivers/media/i2c/adv7604.c
+++ b/drivers/media/i2c/adv7604.c
@@ -41,7 +41,7 @@ static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "debug level (0-2)");
-MODULE_DESCRIPTION("Analog Devices ADV7604 video decoder driver");
+MODULE_DESCRIPTION("Analog Devices ADV7604/10/11/12 video decoder driver");
MODULE_AUTHOR("Hans Verkuil <hans.verkuil@cisco.com>");
MODULE_AUTHOR("Mats Randgaard <mats.randgaard@cisco.com>");
MODULE_LICENSE("GPL");
@@ -77,7 +77,7 @@ MODULE_LICENSE("GPL");
enum adv76xx_type {
ADV7604,
- ADV7611,
+ ADV7611, // including ADV7610
ADV7612,
};
@@ -3176,6 +3176,7 @@ static const struct adv76xx_chip_info adv76xx_chip_info[] = {
static const struct i2c_device_id adv76xx_i2c_id[] = {
{ "adv7604", (kernel_ulong_t)&adv76xx_chip_info[ADV7604] },
+ { "adv7610", (kernel_ulong_t)&adv76xx_chip_info[ADV7611] },
{ "adv7611", (kernel_ulong_t)&adv76xx_chip_info[ADV7611] },
{ "adv7612", (kernel_ulong_t)&adv76xx_chip_info[ADV7612] },
{ }
@@ -3183,6 +3184,7 @@ static const struct i2c_device_id adv76xx_i2c_id[] = {
MODULE_DEVICE_TABLE(i2c, adv76xx_i2c_id);
static const struct of_device_id adv76xx_of_id[] __maybe_unused = {
+ { .compatible = "adi,adv7610", .data = &adv76xx_chip_info[ADV7611] },
{ .compatible = "adi,adv7611", .data = &adv76xx_chip_info[ADV7611] },
{ .compatible = "adi,adv7612", .data = &adv76xx_chip_info[ADV7612] },
{ }
@@ -3500,8 +3502,8 @@ static int adv76xx_probe(struct i2c_client *client,
return -ENODEV;
}
if (val != 0x68) {
- v4l2_err(sd, "not an adv7604 on address 0x%x\n",
- client->addr << 1);
+ v4l2_err(sd, "not an ADV7604 on address 0x%x\n",
+ client->addr << 1);
return -ENODEV;
}
break;
@@ -3525,8 +3527,9 @@ static int adv76xx_probe(struct i2c_client *client,
val |= val2;
if ((state->info->type == ADV7611 && val != 0x2051) ||
(state->info->type == ADV7612 && val != 0x2041)) {
- v4l2_err(sd, "not an adv761x on address 0x%x\n",
- client->addr << 1);
+ v4l2_err(sd, "not an %s on address 0x%x\n",
+ state->info->type == ADV7611 ? "ADV7610/11" : "ADV7612",
+ client->addr << 1);
return -ENODEV;
}
break;
diff --git a/drivers/media/i2c/dw9714.c b/drivers/media/i2c/dw9714.c
index c8b4292512dc..3863dfeb8293 100644
--- a/drivers/media/i2c/dw9714.c
+++ b/drivers/media/i2c/dw9714.c
@@ -7,6 +7,7 @@
#include <linux/pm_runtime.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
#define DW9714_NAME "dw9714"
#define DW9714_MAX_FOCUS_POS 1023
@@ -100,7 +101,15 @@ static const struct v4l2_subdev_internal_ops dw9714_int_ops = {
.close = dw9714_close,
};
-static const struct v4l2_subdev_ops dw9714_ops = { };
+static const struct v4l2_subdev_core_ops dw9714_core_ops = {
+ .log_status = v4l2_ctrl_subdev_log_status,
+ .subscribe_event = v4l2_ctrl_subdev_subscribe_event,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
+};
+
+static const struct v4l2_subdev_ops dw9714_ops = {
+ .core = &dw9714_core_ops,
+};
static void dw9714_subdev_cleanup(struct dw9714_device *dw9714_dev)
{
@@ -137,7 +146,8 @@ static int dw9714_probe(struct i2c_client *client)
return -ENOMEM;
v4l2_i2c_subdev_init(&dw9714_dev->sd, client, &dw9714_ops);
- dw9714_dev->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ dw9714_dev->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE |
+ V4L2_SUBDEV_FL_HAS_EVENTS;
dw9714_dev->sd.internal_ops = &dw9714_int_ops;
rval = dw9714_init_controls(dw9714_dev);
diff --git a/drivers/media/i2c/hi846.c b/drivers/media/i2c/hi846.c
new file mode 100644
index 000000000000..822ce3021fde
--- /dev/null
+++ b/drivers/media/i2c/hi846.c
@@ -0,0 +1,2190 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2021 Purism SPC
+
+#include <asm/unaligned.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of_graph.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm.h>
+#include <linux/regulator/consumer.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+
+#define HI846_MEDIA_BUS_FORMAT MEDIA_BUS_FMT_SGBRG10_1X10
+#define HI846_RGB_DEPTH 10
+
+/* Frame length lines / vertical timings */
+#define HI846_REG_FLL 0x0006
+#define HI846_FLL_MAX 0xffff
+
+/* Horizontal timing */
+#define HI846_REG_LLP 0x0008
+#define HI846_LINE_LENGTH 3800
+
+#define HI846_REG_BINNING_MODE 0x000c
+
+#define HI846_REG_IMAGE_ORIENTATION 0x000e
+
+#define HI846_REG_UNKNOWN_0022 0x0022
+
+#define HI846_REG_Y_ADDR_START_VACT_H 0x0026
+#define HI846_REG_Y_ADDR_START_VACT_L 0x0027
+#define HI846_REG_UNKNOWN_0028 0x0028
+
+#define HI846_REG_Y_ADDR_END_VACT_H 0x002c
+#define HI846_REG_Y_ADDR_END_VACT_L 0x002d
+
+#define HI846_REG_Y_ODD_INC_FOBP 0x002e
+#define HI846_REG_Y_EVEN_INC_FOBP 0x002f
+
+#define HI846_REG_Y_ODD_INC_VACT 0x0032
+#define HI846_REG_Y_EVEN_INC_VACT 0x0033
+
+#define HI846_REG_GROUPED_PARA_HOLD 0x0046
+
+#define HI846_REG_TG_ENABLE 0x004c
+
+#define HI846_REG_UNKNOWN_005C 0x005c
+
+#define HI846_REG_UNKNOWN_006A 0x006a
+
+/*
+ * Long exposure time. Actually, exposure is a 20 bit value that
+ * includes the lower 4 bits of 0x0073 too. Only 16 bits are used
+ * right now
+ */
+#define HI846_REG_EXPOSURE 0x0074
+#define HI846_EXPOSURE_MIN 6
+#define HI846_EXPOSURE_MAX_MARGIN 2
+#define HI846_EXPOSURE_STEP 1
+
+/* Analog gain controls from sensor */
+#define HI846_REG_ANALOG_GAIN 0x0077
+#define HI846_ANAL_GAIN_MIN 0
+#define HI846_ANAL_GAIN_MAX 240
+#define HI846_ANAL_GAIN_STEP 8
+
+/* Digital gain controls from sensor */
+#define HI846_REG_MWB_GR_GAIN_H 0x0078
+#define HI846_REG_MWB_GR_GAIN_L 0x0079
+#define HI846_REG_MWB_GB_GAIN_H 0x007a
+#define HI846_REG_MWB_GB_GAIN_L 0x007b
+#define HI846_REG_MWB_R_GAIN_H 0x007c
+#define HI846_REG_MWB_R_GAIN_L 0x007d
+#define HI846_REG_MWB_B_GAIN_H 0x007e
+#define HI846_REG_MWB_B_GAIN_L 0x007f
+#define HI846_DGTL_GAIN_MIN 512
+#define HI846_DGTL_GAIN_MAX 8191
+#define HI846_DGTL_GAIN_STEP 1
+#define HI846_DGTL_GAIN_DEFAULT 512
+
+#define HI846_REG_X_ADDR_START_HACT_H 0x0120
+#define HI846_REG_X_ADDR_END_HACT_H 0x0122
+
+#define HI846_REG_UNKNOWN_012A 0x012a
+
+#define HI846_REG_UNKNOWN_0200 0x0200
+
+#define HI846_REG_UNKNOWN_021C 0x021c
+#define HI846_REG_UNKNOWN_021E 0x021e
+
+#define HI846_REG_UNKNOWN_0402 0x0402
+#define HI846_REG_UNKNOWN_0404 0x0404
+#define HI846_REG_UNKNOWN_0408 0x0408
+#define HI846_REG_UNKNOWN_0410 0x0410
+#define HI846_REG_UNKNOWN_0412 0x0412
+#define HI846_REG_UNKNOWN_0414 0x0414
+
+#define HI846_REG_UNKNOWN_0418 0x0418
+
+#define HI846_REG_UNKNOWN_051E 0x051e
+
+/* Formatter */
+#define HI846_REG_X_START_H 0x0804
+#define HI846_REG_X_START_L 0x0805
+
+/* MIPI */
+#define HI846_REG_UNKNOWN_0900 0x0900
+#define HI846_REG_MIPI_TX_OP_EN 0x0901
+#define HI846_REG_MIPI_TX_OP_MODE 0x0902
+#define HI846_RAW8 BIT(5)
+
+#define HI846_REG_UNKNOWN_090C 0x090c
+#define HI846_REG_UNKNOWN_090E 0x090e
+
+#define HI846_REG_UNKNOWN_0914 0x0914
+#define HI846_REG_TLPX 0x0915
+#define HI846_REG_TCLK_PREPARE 0x0916
+#define HI846_REG_TCLK_ZERO 0x0917
+#define HI846_REG_UNKNOWN_0918 0x0918
+#define HI846_REG_THS_PREPARE 0x0919
+#define HI846_REG_THS_ZERO 0x091a
+#define HI846_REG_THS_TRAIL 0x091b
+#define HI846_REG_TCLK_POST 0x091c
+#define HI846_REG_TCLK_TRAIL_MIN 0x091d
+#define HI846_REG_UNKNOWN_091E 0x091e
+
+#define HI846_REG_UNKNOWN_0954 0x0954
+#define HI846_REG_UNKNOWN_0956 0x0956
+#define HI846_REG_UNKNOWN_0958 0x0958
+#define HI846_REG_UNKNOWN_095A 0x095a
+
+/* ISP Common */
+#define HI846_REG_MODE_SELECT 0x0a00
+#define HI846_MODE_STANDBY 0x00
+#define HI846_MODE_STREAMING 0x01
+#define HI846_REG_FAST_STANDBY_MODE 0x0a02
+#define HI846_REG_ISP_EN_H 0x0a04
+
+/* Test Pattern Control */
+#define HI846_REG_ISP 0x0a05
+#define HI846_REG_ISP_TPG_EN 0x01
+#define HI846_REG_TEST_PATTERN 0x020a /* 1-9 */
+
+#define HI846_REG_UNKNOWN_0A0C 0x0a0c
+
+/* Windowing */
+#define HI846_REG_X_OUTPUT_SIZE_H 0x0a12
+#define HI846_REG_X_OUTPUT_SIZE_L 0x0a13
+#define HI846_REG_Y_OUTPUT_SIZE_H 0x0a14
+#define HI846_REG_Y_OUTPUT_SIZE_L 0x0a15
+
+/* ISP Common */
+#define HI846_REG_PEDESTAL_EN 0x0a1a
+
+#define HI846_REG_UNKNOWN_0A1E 0x0a1e
+
+/* Horizontal Binning Mode */
+#define HI846_REG_HBIN_MODE 0x0a22
+
+#define HI846_REG_UNKNOWN_0A24 0x0a24
+#define HI846_REG_UNKNOWN_0B02 0x0b02
+#define HI846_REG_UNKNOWN_0B10 0x0b10
+#define HI846_REG_UNKNOWN_0B12 0x0b12
+#define HI846_REG_UNKNOWN_0B14 0x0b14
+
+/* BLC (Black Level Calibration) */
+#define HI846_REG_BLC_CTL0 0x0c00
+
+#define HI846_REG_UNKNOWN_0C06 0x0c06
+#define HI846_REG_UNKNOWN_0C10 0x0c10
+#define HI846_REG_UNKNOWN_0C12 0x0c12
+#define HI846_REG_UNKNOWN_0C14 0x0c14
+#define HI846_REG_UNKNOWN_0C16 0x0c16
+
+#define HI846_REG_UNKNOWN_0E04 0x0e04
+
+#define HI846_REG_CHIP_ID_L 0x0f16
+#define HI846_REG_CHIP_ID_H 0x0f17
+#define HI846_CHIP_ID_L 0x46
+#define HI846_CHIP_ID_H 0x08
+
+#define HI846_REG_UNKNOWN_0F04 0x0f04
+#define HI846_REG_UNKNOWN_0F08 0x0f08
+
+/* PLL */
+#define HI846_REG_PLL_CFG_MIPI2_H 0x0f2a
+#define HI846_REG_PLL_CFG_MIPI2_L 0x0f2b
+
+#define HI846_REG_UNKNOWN_0F30 0x0f30
+#define HI846_REG_PLL_CFG_RAMP1_H 0x0f32
+#define HI846_REG_UNKNOWN_0F36 0x0f36
+#define HI846_REG_PLL_CFG_MIPI1_H 0x0f38
+
+#define HI846_REG_UNKNOWN_2008 0x2008
+#define HI846_REG_UNKNOWN_326E 0x326e
+
+struct hi846_reg {
+ u16 address;
+ u16 val;
+};
+
+struct hi846_reg_list {
+ u32 num_of_regs;
+ const struct hi846_reg *regs;
+};
+
+struct hi846_mode {
+ /* Frame width in pixels */
+ u32 width;
+
+ /* Frame height in pixels */
+ u32 height;
+
+ /* Horizontal timing size */
+ u32 llp;
+
+ /* Link frequency needed for this resolution */
+ u8 link_freq_index;
+
+ u16 fps;
+
+ /* Vertical timining size */
+ u16 frame_len;
+
+ const struct hi846_reg_list reg_list_config;
+ const struct hi846_reg_list reg_list_2lane;
+ const struct hi846_reg_list reg_list_4lane;
+
+ /* Position inside of the 3264x2448 pixel array */
+ struct v4l2_rect crop;
+};
+
+static const struct hi846_reg hi846_init_2lane[] = {
+ {HI846_REG_MODE_SELECT, 0x0000},
+ /* regs below are unknown */
+ {0x2000, 0x100a},
+ {0x2002, 0x00ff},
+ {0x2004, 0x0007},
+ {0x2006, 0x3fff},
+ {0x2008, 0x3fff},
+ {0x200a, 0xc216},
+ {0x200c, 0x1292},
+ {0x200e, 0xc01a},
+ {0x2010, 0x403d},
+ {0x2012, 0x000e},
+ {0x2014, 0x403e},
+ {0x2016, 0x0b80},
+ {0x2018, 0x403f},
+ {0x201a, 0x82ae},
+ {0x201c, 0x1292},
+ {0x201e, 0xc00c},
+ {0x2020, 0x4130},
+ {0x2022, 0x43e2},
+ {0x2024, 0x0180},
+ {0x2026, 0x4130},
+ {0x2028, 0x7400},
+ {0x202a, 0x5000},
+ {0x202c, 0x0253},
+ {0x202e, 0x0ad1},
+ {0x2030, 0x2360},
+ {0x2032, 0x0009},
+ {0x2034, 0x5020},
+ {0x2036, 0x000b},
+ {0x2038, 0x0002},
+ {0x203a, 0x0044},
+ {0x203c, 0x0016},
+ {0x203e, 0x1792},
+ {0x2040, 0x7002},
+ {0x2042, 0x154f},
+ {0x2044, 0x00d5},
+ {0x2046, 0x000b},
+ {0x2048, 0x0019},
+ {0x204a, 0x1698},
+ {0x204c, 0x000e},
+ {0x204e, 0x099a},
+ {0x2050, 0x0058},
+ {0x2052, 0x7000},
+ {0x2054, 0x1799},
+ {0x2056, 0x0310},
+ {0x2058, 0x03c3},
+ {0x205a, 0x004c},
+ {0x205c, 0x064a},
+ {0x205e, 0x0001},
+ {0x2060, 0x0007},
+ {0x2062, 0x0bc7},
+ {0x2064, 0x0055},
+ {0x2066, 0x7000},
+ {0x2068, 0x1550},
+ {0x206a, 0x158a},
+ {0x206c, 0x0004},
+ {0x206e, 0x1488},
+ {0x2070, 0x7010},
+ {0x2072, 0x1508},
+ {0x2074, 0x0004},
+ {0x2076, 0x0016},
+ {0x2078, 0x03d5},
+ {0x207a, 0x0055},
+ {0x207c, 0x08ca},
+ {0x207e, 0x2019},
+ {0x2080, 0x0007},
+ {0x2082, 0x7057},
+ {0x2084, 0x0fc7},
+ {0x2086, 0x5041},
+ {0x2088, 0x12c8},
+ {0x208a, 0x5060},
+ {0x208c, 0x5080},
+ {0x208e, 0x2084},
+ {0x2090, 0x12c8},
+ {0x2092, 0x7800},
+ {0x2094, 0x0802},
+ {0x2096, 0x040f},
+ {0x2098, 0x1007},
+ {0x209a, 0x0803},
+ {0x209c, 0x080b},
+ {0x209e, 0x3803},
+ {0x20a0, 0x0807},
+ {0x20a2, 0x0404},
+ {0x20a4, 0x0400},
+ {0x20a6, 0xffff},
+ {0x20a8, 0xf0b2},
+ {0x20aa, 0xffef},
+ {0x20ac, 0x0a84},
+ {0x20ae, 0x1292},
+ {0x20b0, 0xc02e},
+ {0x20b2, 0x4130},
+ {0x23fe, 0xc056},
+ {0x3232, 0xfc0c},
+ {0x3236, 0xfc22},
+ {0x3248, 0xfca8},
+ {0x326a, 0x8302},
+ {0x326c, 0x830a},
+ {0x326e, 0x0000},
+ {0x32ca, 0xfc28},
+ {0x32cc, 0xc3bc},
+ {0x32ce, 0xc34c},
+ {0x32d0, 0xc35a},
+ {0x32d2, 0xc368},
+ {0x32d4, 0xc376},
+ {0x32d6, 0xc3c2},
+ {0x32d8, 0xc3e6},
+ {0x32da, 0x0003},
+ {0x32dc, 0x0003},
+ {0x32de, 0x00c7},
+ {0x32e0, 0x0031},
+ {0x32e2, 0x0031},
+ {0x32e4, 0x0031},
+ {0x32e6, 0xfc28},
+ {0x32e8, 0xc3bc},
+ {0x32ea, 0xc384},
+ {0x32ec, 0xc392},
+ {0x32ee, 0xc3a0},
+ {0x32f0, 0xc3ae},
+ {0x32f2, 0xc3c4},
+ {0x32f4, 0xc3e6},
+ {0x32f6, 0x0003},
+ {0x32f8, 0x0003},
+ {0x32fa, 0x00c7},
+ {0x32fc, 0x0031},
+ {0x32fe, 0x0031},
+ {0x3300, 0x0031},
+ {0x3302, 0x82ca},
+ {0x3304, 0xc164},
+ {0x3306, 0x82e6},
+ {0x3308, 0xc19c},
+ {0x330a, 0x001f},
+ {0x330c, 0x001a},
+ {0x330e, 0x0034},
+ {0x3310, 0x0000},
+ {0x3312, 0x0000},
+ {0x3314, 0xfc94},
+ {0x3316, 0xc3d8},
+ /* regs above are unknown */
+ {HI846_REG_MODE_SELECT, 0x0000},
+ {HI846_REG_UNKNOWN_0E04, 0x0012},
+ {HI846_REG_Y_ODD_INC_FOBP, 0x1111},
+ {HI846_REG_Y_ODD_INC_VACT, 0x1111},
+ {HI846_REG_UNKNOWN_0022, 0x0008},
+ {HI846_REG_Y_ADDR_START_VACT_H, 0x0040},
+ {HI846_REG_UNKNOWN_0028, 0x0017},
+ {HI846_REG_Y_ADDR_END_VACT_H, 0x09cf},
+ {HI846_REG_UNKNOWN_005C, 0x2101},
+ {HI846_REG_FLL, 0x09de},
+ {HI846_REG_LLP, 0x0ed8},
+ {HI846_REG_IMAGE_ORIENTATION, 0x0100},
+ {HI846_REG_BINNING_MODE, 0x0022},
+ {HI846_REG_HBIN_MODE, 0x0000},
+ {HI846_REG_UNKNOWN_0A24, 0x0000},
+ {HI846_REG_X_START_H, 0x0000},
+ {HI846_REG_X_OUTPUT_SIZE_H, 0x0cc0},
+ {HI846_REG_Y_OUTPUT_SIZE_H, 0x0990},
+ {HI846_REG_EXPOSURE, 0x09d8},
+ {HI846_REG_ANALOG_GAIN, 0x0000},
+ {HI846_REG_GROUPED_PARA_HOLD, 0x0000},
+ {HI846_REG_UNKNOWN_051E, 0x0000},
+ {HI846_REG_UNKNOWN_0200, 0x0400},
+ {HI846_REG_PEDESTAL_EN, 0x0c00},
+ {HI846_REG_UNKNOWN_0A0C, 0x0010},
+ {HI846_REG_UNKNOWN_0A1E, 0x0ccf},
+ {HI846_REG_UNKNOWN_0402, 0x0110},
+ {HI846_REG_UNKNOWN_0404, 0x00f4},
+ {HI846_REG_UNKNOWN_0408, 0x0000},
+ {HI846_REG_UNKNOWN_0410, 0x008d},
+ {HI846_REG_UNKNOWN_0412, 0x011a},
+ {HI846_REG_UNKNOWN_0414, 0x864c},
+ {HI846_REG_UNKNOWN_021C, 0x0003},
+ {HI846_REG_UNKNOWN_021E, 0x0235},
+ {HI846_REG_BLC_CTL0, 0x9150},
+ {HI846_REG_UNKNOWN_0C06, 0x0021},
+ {HI846_REG_UNKNOWN_0C10, 0x0040},
+ {HI846_REG_UNKNOWN_0C12, 0x0040},
+ {HI846_REG_UNKNOWN_0C14, 0x0040},
+ {HI846_REG_UNKNOWN_0C16, 0x0040},
+ {HI846_REG_FAST_STANDBY_MODE, 0x0100},
+ {HI846_REG_ISP_EN_H, 0x014a},
+ {HI846_REG_UNKNOWN_0418, 0x0000},
+ {HI846_REG_UNKNOWN_012A, 0x03b4},
+ {HI846_REG_X_ADDR_START_HACT_H, 0x0046},
+ {HI846_REG_X_ADDR_END_HACT_H, 0x0376},
+ {HI846_REG_UNKNOWN_0B02, 0xe04d},
+ {HI846_REG_UNKNOWN_0B10, 0x6821},
+ {HI846_REG_UNKNOWN_0B12, 0x0120},
+ {HI846_REG_UNKNOWN_0B14, 0x0001},
+ {HI846_REG_UNKNOWN_2008, 0x38fd},
+ {HI846_REG_UNKNOWN_326E, 0x0000},
+ {HI846_REG_UNKNOWN_0900, 0x0320},
+ {HI846_REG_MIPI_TX_OP_MODE, 0xc31a},
+ {HI846_REG_UNKNOWN_0914, 0xc109},
+ {HI846_REG_TCLK_PREPARE, 0x061a},
+ {HI846_REG_UNKNOWN_0918, 0x0306},
+ {HI846_REG_THS_ZERO, 0x0b09},
+ {HI846_REG_TCLK_POST, 0x0c07},
+ {HI846_REG_UNKNOWN_091E, 0x0a00},
+ {HI846_REG_UNKNOWN_090C, 0x042a},
+ {HI846_REG_UNKNOWN_090E, 0x006b},
+ {HI846_REG_UNKNOWN_0954, 0x0089},
+ {HI846_REG_UNKNOWN_0956, 0x0000},
+ {HI846_REG_UNKNOWN_0958, 0xca00},
+ {HI846_REG_UNKNOWN_095A, 0x9240},
+ {HI846_REG_UNKNOWN_0F08, 0x2f04},
+ {HI846_REG_UNKNOWN_0F30, 0x001f},
+ {HI846_REG_UNKNOWN_0F36, 0x001f},
+ {HI846_REG_UNKNOWN_0F04, 0x3a00},
+ {HI846_REG_PLL_CFG_RAMP1_H, 0x025a},
+ {HI846_REG_PLL_CFG_MIPI1_H, 0x025a},
+ {HI846_REG_PLL_CFG_MIPI2_H, 0x0024},
+ {HI846_REG_UNKNOWN_006A, 0x0100},
+ {HI846_REG_TG_ENABLE, 0x0100},
+};
+
+static const struct hi846_reg hi846_init_4lane[] = {
+ {0x2000, 0x987a},
+ {0x2002, 0x00ff},
+ {0x2004, 0x0047},
+ {0x2006, 0x3fff},
+ {0x2008, 0x3fff},
+ {0x200a, 0xc216},
+ {0x200c, 0x1292},
+ {0x200e, 0xc01a},
+ {0x2010, 0x403d},
+ {0x2012, 0x000e},
+ {0x2014, 0x403e},
+ {0x2016, 0x0b80},
+ {0x2018, 0x403f},
+ {0x201a, 0x82ae},
+ {0x201c, 0x1292},
+ {0x201e, 0xc00c},
+ {0x2020, 0x4130},
+ {0x2022, 0x43e2},
+ {0x2024, 0x0180},
+ {0x2026, 0x4130},
+ {0x2028, 0x7400},
+ {0x202a, 0x5000},
+ {0x202c, 0x0253},
+ {0x202e, 0x0ad1},
+ {0x2030, 0x2360},
+ {0x2032, 0x0009},
+ {0x2034, 0x5020},
+ {0x2036, 0x000b},
+ {0x2038, 0x0002},
+ {0x203a, 0x0044},
+ {0x203c, 0x0016},
+ {0x203e, 0x1792},
+ {0x2040, 0x7002},
+ {0x2042, 0x154f},
+ {0x2044, 0x00d5},
+ {0x2046, 0x000b},
+ {0x2048, 0x0019},
+ {0x204a, 0x1698},
+ {0x204c, 0x000e},
+ {0x204e, 0x099a},
+ {0x2050, 0x0058},
+ {0x2052, 0x7000},
+ {0x2054, 0x1799},
+ {0x2056, 0x0310},
+ {0x2058, 0x03c3},
+ {0x205a, 0x004c},
+ {0x205c, 0x064a},
+ {0x205e, 0x0001},
+ {0x2060, 0x0007},
+ {0x2062, 0x0bc7},
+ {0x2064, 0x0055},
+ {0x2066, 0x7000},
+ {0x2068, 0x1550},
+ {0x206a, 0x158a},
+ {0x206c, 0x0004},
+ {0x206e, 0x1488},
+ {0x2070, 0x7010},
+ {0x2072, 0x1508},
+ {0x2074, 0x0004},
+ {0x2076, 0x0016},
+ {0x2078, 0x03d5},
+ {0x207a, 0x0055},
+ {0x207c, 0x08ca},
+ {0x207e, 0x2019},
+ {0x2080, 0x0007},
+ {0x2082, 0x7057},
+ {0x2084, 0x0fc7},
+ {0x2086, 0x5041},
+ {0x2088, 0x12c8},
+ {0x208a, 0x5060},
+ {0x208c, 0x5080},
+ {0x208e, 0x2084},
+ {0x2090, 0x12c8},
+ {0x2092, 0x7800},
+ {0x2094, 0x0802},
+ {0x2096, 0x040f},
+ {0x2098, 0x1007},
+ {0x209a, 0x0803},
+ {0x209c, 0x080b},
+ {0x209e, 0x3803},
+ {0x20a0, 0x0807},
+ {0x20a2, 0x0404},
+ {0x20a4, 0x0400},
+ {0x20a6, 0xffff},
+ {0x20a8, 0xf0b2},
+ {0x20aa, 0xffef},
+ {0x20ac, 0x0a84},
+ {0x20ae, 0x1292},
+ {0x20b0, 0xc02e},
+ {0x20b2, 0x4130},
+ {0x20b4, 0xf0b2},
+ {0x20b6, 0xffbf},
+ {0x20b8, 0x2004},
+ {0x20ba, 0x403f},
+ {0x20bc, 0x00c3},
+ {0x20be, 0x4fe2},
+ {0x20c0, 0x8318},
+ {0x20c2, 0x43cf},
+ {0x20c4, 0x0000},
+ {0x20c6, 0x9382},
+ {0x20c8, 0xc314},
+ {0x20ca, 0x2003},
+ {0x20cc, 0x12b0},
+ {0x20ce, 0xcab0},
+ {0x20d0, 0x4130},
+ {0x20d2, 0x12b0},
+ {0x20d4, 0xc90a},
+ {0x20d6, 0x4130},
+ {0x20d8, 0x42d2},
+ {0x20da, 0x8318},
+ {0x20dc, 0x00c3},
+ {0x20de, 0x9382},
+ {0x20e0, 0xc314},
+ {0x20e2, 0x2009},
+ {0x20e4, 0x120b},
+ {0x20e6, 0x120a},
+ {0x20e8, 0x1209},
+ {0x20ea, 0x1208},
+ {0x20ec, 0x1207},
+ {0x20ee, 0x1206},
+ {0x20f0, 0x4030},
+ {0x20f2, 0xc15e},
+ {0x20f4, 0x4130},
+ {0x20f6, 0x1292},
+ {0x20f8, 0xc008},
+ {0x20fa, 0x4130},
+ {0x20fc, 0x42d2},
+ {0x20fe, 0x82a1},
+ {0x2100, 0x00c2},
+ {0x2102, 0x1292},
+ {0x2104, 0xc040},
+ {0x2106, 0x4130},
+ {0x2108, 0x1292},
+ {0x210a, 0xc006},
+ {0x210c, 0x42a2},
+ {0x210e, 0x7324},
+ {0x2110, 0x9382},
+ {0x2112, 0xc314},
+ {0x2114, 0x2011},
+ {0x2116, 0x425f},
+ {0x2118, 0x82a1},
+ {0x211a, 0xf25f},
+ {0x211c, 0x00c1},
+ {0x211e, 0xf35f},
+ {0x2120, 0x2406},
+ {0x2122, 0x425f},
+ {0x2124, 0x00c0},
+ {0x2126, 0xf37f},
+ {0x2128, 0x522f},
+ {0x212a, 0x4f82},
+ {0x212c, 0x7324},
+ {0x212e, 0x425f},
+ {0x2130, 0x82d4},
+ {0x2132, 0xf35f},
+ {0x2134, 0x4fc2},
+ {0x2136, 0x01b3},
+ {0x2138, 0x93c2},
+ {0x213a, 0x829f},
+ {0x213c, 0x2421},
+ {0x213e, 0x403e},
+ {0x2140, 0xfffe},
+ {0x2142, 0x40b2},
+ {0x2144, 0xec78},
+ {0x2146, 0x831c},
+ {0x2148, 0x40b2},
+ {0x214a, 0xec78},
+ {0x214c, 0x831e},
+ {0x214e, 0x40b2},
+ {0x2150, 0xec78},
+ {0x2152, 0x8320},
+ {0x2154, 0xb3d2},
+ {0x2156, 0x008c},
+ {0x2158, 0x2405},
+ {0x215a, 0x4e0f},
+ {0x215c, 0x503f},
+ {0x215e, 0xffd8},
+ {0x2160, 0x4f82},
+ {0x2162, 0x831c},
+ {0x2164, 0x90f2},
+ {0x2166, 0x0003},
+ {0x2168, 0x008c},
+ {0x216a, 0x2401},
+ {0x216c, 0x4130},
+ {0x216e, 0x421f},
+ {0x2170, 0x831c},
+ {0x2172, 0x5e0f},
+ {0x2174, 0x4f82},
+ {0x2176, 0x831e},
+ {0x2178, 0x5e0f},
+ {0x217a, 0x4f82},
+ {0x217c, 0x8320},
+ {0x217e, 0x3ff6},
+ {0x2180, 0x432e},
+ {0x2182, 0x3fdf},
+ {0x2184, 0x421f},
+ {0x2186, 0x7100},
+ {0x2188, 0x4f0e},
+ {0x218a, 0x503e},
+ {0x218c, 0xffd8},
+ {0x218e, 0x4e82},
+ {0x2190, 0x7a04},
+ {0x2192, 0x421e},
+ {0x2194, 0x831c},
+ {0x2196, 0x5f0e},
+ {0x2198, 0x4e82},
+ {0x219a, 0x7a06},
+ {0x219c, 0x0b00},
+ {0x219e, 0x7304},
+ {0x21a0, 0x0050},
+ {0x21a2, 0x40b2},
+ {0x21a4, 0xd081},
+ {0x21a6, 0x0b88},
+ {0x21a8, 0x421e},
+ {0x21aa, 0x831e},
+ {0x21ac, 0x5f0e},
+ {0x21ae, 0x4e82},
+ {0x21b0, 0x7a0e},
+ {0x21b2, 0x521f},
+ {0x21b4, 0x8320},
+ {0x21b6, 0x4f82},
+ {0x21b8, 0x7a10},
+ {0x21ba, 0x0b00},
+ {0x21bc, 0x7304},
+ {0x21be, 0x007a},
+ {0x21c0, 0x40b2},
+ {0x21c2, 0x0081},
+ {0x21c4, 0x0b88},
+ {0x21c6, 0x4392},
+ {0x21c8, 0x7a0a},
+ {0x21ca, 0x0800},
+ {0x21cc, 0x7a0c},
+ {0x21ce, 0x0b00},
+ {0x21d0, 0x7304},
+ {0x21d2, 0x022b},
+ {0x21d4, 0x40b2},
+ {0x21d6, 0xd081},
+ {0x21d8, 0x0b88},
+ {0x21da, 0x0b00},
+ {0x21dc, 0x7304},
+ {0x21de, 0x0255},
+ {0x21e0, 0x40b2},
+ {0x21e2, 0x0081},
+ {0x21e4, 0x0b88},
+ {0x21e6, 0x4130},
+ {0x23fe, 0xc056},
+ {0x3232, 0xfc0c},
+ {0x3236, 0xfc22},
+ {0x3238, 0xfcfc},
+ {0x323a, 0xfd84},
+ {0x323c, 0xfd08},
+ {0x3246, 0xfcd8},
+ {0x3248, 0xfca8},
+ {0x324e, 0xfcb4},
+ {0x326a, 0x8302},
+ {0x326c, 0x830a},
+ {0x326e, 0x0000},
+ {0x32ca, 0xfc28},
+ {0x32cc, 0xc3bc},
+ {0x32ce, 0xc34c},
+ {0x32d0, 0xc35a},
+ {0x32d2, 0xc368},
+ {0x32d4, 0xc376},
+ {0x32d6, 0xc3c2},
+ {0x32d8, 0xc3e6},
+ {0x32da, 0x0003},
+ {0x32dc, 0x0003},
+ {0x32de, 0x00c7},
+ {0x32e0, 0x0031},
+ {0x32e2, 0x0031},
+ {0x32e4, 0x0031},
+ {0x32e6, 0xfc28},
+ {0x32e8, 0xc3bc},
+ {0x32ea, 0xc384},
+ {0x32ec, 0xc392},
+ {0x32ee, 0xc3a0},
+ {0x32f0, 0xc3ae},
+ {0x32f2, 0xc3c4},
+ {0x32f4, 0xc3e6},
+ {0x32f6, 0x0003},
+ {0x32f8, 0x0003},
+ {0x32fa, 0x00c7},
+ {0x32fc, 0x0031},
+ {0x32fe, 0x0031},
+ {0x3300, 0x0031},
+ {0x3302, 0x82ca},
+ {0x3304, 0xc164},
+ {0x3306, 0x82e6},
+ {0x3308, 0xc19c},
+ {0x330a, 0x001f},
+ {0x330c, 0x001a},
+ {0x330e, 0x0034},
+ {0x3310, 0x0000},
+ {0x3312, 0x0000},
+ {0x3314, 0xfc94},
+ {0x3316, 0xc3d8},
+
+ {0x0a00, 0x0000},
+ {0x0e04, 0x0012},
+ {0x002e, 0x1111},
+ {0x0032, 0x1111},
+ {0x0022, 0x0008},
+ {0x0026, 0x0040},
+ {0x0028, 0x0017},
+ {0x002c, 0x09cf},
+ {0x005c, 0x2101},
+ {0x0006, 0x09de},
+ {0x0008, 0x0ed8},
+ {0x000e, 0x0100},
+ {0x000c, 0x0022},
+ {0x0a22, 0x0000},
+ {0x0a24, 0x0000},
+ {0x0804, 0x0000},
+ {0x0a12, 0x0cc0},
+ {0x0a14, 0x0990},
+ {0x0074, 0x09d8},
+ {0x0076, 0x0000},
+ {0x051e, 0x0000},
+ {0x0200, 0x0400},
+ {0x0a1a, 0x0c00},
+ {0x0a0c, 0x0010},
+ {0x0a1e, 0x0ccf},
+ {0x0402, 0x0110},
+ {0x0404, 0x00f4},
+ {0x0408, 0x0000},
+ {0x0410, 0x008d},
+ {0x0412, 0x011a},
+ {0x0414, 0x864c},
+ /* for OTP */
+ {0x021c, 0x0003},
+ {0x021e, 0x0235},
+ /* for OTP */
+ {0x0c00, 0x9950},
+ {0x0c06, 0x0021},
+ {0x0c10, 0x0040},
+ {0x0c12, 0x0040},
+ {0x0c14, 0x0040},
+ {0x0c16, 0x0040},
+ {0x0a02, 0x0100},
+ {0x0a04, 0x015a},
+ {0x0418, 0x0000},
+ {0x0128, 0x0028},
+ {0x012a, 0xffff},
+ {0x0120, 0x0046},
+ {0x0122, 0x0376},
+ {0x012c, 0x0020},
+ {0x012e, 0xffff},
+ {0x0124, 0x0040},
+ {0x0126, 0x0378},
+ {0x0746, 0x0050},
+ {0x0748, 0x01d5},
+ {0x074a, 0x022b},
+ {0x074c, 0x03b0},
+ {0x0756, 0x043f},
+ {0x0758, 0x3f1d},
+ {0x0b02, 0xe04d},
+ {0x0b10, 0x6821},
+ {0x0b12, 0x0120},
+ {0x0b14, 0x0001},
+ {0x2008, 0x38fd},
+ {0x326e, 0x0000},
+ {0x0900, 0x0300},
+ {0x0902, 0xc319},
+ {0x0914, 0xc109},
+ {0x0916, 0x061a},
+ {0x0918, 0x0407},
+ {0x091a, 0x0a0b},
+ {0x091c, 0x0e08},
+ {0x091e, 0x0a00},
+ {0x090c, 0x0427},
+ {0x090e, 0x0059},
+ {0x0954, 0x0089},
+ {0x0956, 0x0000},
+ {0x0958, 0xca80},
+ {0x095a, 0x9240},
+ {0x0f08, 0x2f04},
+ {0x0f30, 0x001f},
+ {0x0f36, 0x001f},
+ {0x0f04, 0x3a00},
+ {0x0f32, 0x025a},
+ {0x0f38, 0x025a},
+ {0x0f2a, 0x4124},
+ {0x006a, 0x0100},
+ {0x004c, 0x0100},
+ {0x0044, 0x0001},
+};
+
+static const struct hi846_reg mode_640x480_config[] = {
+ {HI846_REG_MODE_SELECT, 0x0000},
+ {HI846_REG_Y_ODD_INC_FOBP, 0x7711},
+ {HI846_REG_Y_ODD_INC_VACT, 0x7711},
+ {HI846_REG_Y_ADDR_START_VACT_H, 0x0148},
+ {HI846_REG_Y_ADDR_END_VACT_H, 0x08c7},
+ {HI846_REG_UNKNOWN_005C, 0x4404},
+ {HI846_REG_FLL, 0x0277},
+ {HI846_REG_LLP, 0x0ed8},
+ {HI846_REG_BINNING_MODE, 0x0322},
+ {HI846_REG_HBIN_MODE, 0x0200},
+ {HI846_REG_UNKNOWN_0A24, 0x0000},
+ {HI846_REG_X_START_H, 0x0058},
+ {HI846_REG_X_OUTPUT_SIZE_H, 0x0280},
+ {HI846_REG_Y_OUTPUT_SIZE_H, 0x01e0},
+
+ /* For OTP */
+ {HI846_REG_UNKNOWN_021C, 0x0003},
+ {HI846_REG_UNKNOWN_021E, 0x0235},
+
+ {HI846_REG_ISP_EN_H, 0x016a},
+ {HI846_REG_UNKNOWN_0418, 0x0210},
+ {HI846_REG_UNKNOWN_0B02, 0xe04d},
+ {HI846_REG_UNKNOWN_0B10, 0x7021},
+ {HI846_REG_UNKNOWN_0B12, 0x0120},
+ {HI846_REG_UNKNOWN_0B14, 0x0001},
+ {HI846_REG_UNKNOWN_2008, 0x38fd},
+ {HI846_REG_UNKNOWN_326E, 0x0000},
+};
+
+static const struct hi846_reg mode_640x480_mipi_2lane[] = {
+ {HI846_REG_UNKNOWN_0900, 0x0300},
+ {HI846_REG_MIPI_TX_OP_MODE, 0x4319},
+ {HI846_REG_UNKNOWN_0914, 0xc105},
+ {HI846_REG_TCLK_PREPARE, 0x030c},
+ {HI846_REG_UNKNOWN_0918, 0x0304},
+ {HI846_REG_THS_ZERO, 0x0708},
+ {HI846_REG_TCLK_POST, 0x0b04},
+ {HI846_REG_UNKNOWN_091E, 0x0500},
+ {HI846_REG_UNKNOWN_090C, 0x0208},
+ {HI846_REG_UNKNOWN_090E, 0x009a},
+ {HI846_REG_UNKNOWN_0954, 0x0089},
+ {HI846_REG_UNKNOWN_0956, 0x0000},
+ {HI846_REG_UNKNOWN_0958, 0xca80},
+ {HI846_REG_UNKNOWN_095A, 0x9240},
+ {HI846_REG_PLL_CFG_MIPI2_H, 0x4924},
+ {HI846_REG_TG_ENABLE, 0x0100},
+};
+
+static const struct hi846_reg mode_1280x720_config[] = {
+ {HI846_REG_MODE_SELECT, 0x0000},
+ {HI846_REG_Y_ODD_INC_FOBP, 0x3311},
+ {HI846_REG_Y_ODD_INC_VACT, 0x3311},
+ {HI846_REG_Y_ADDR_START_VACT_H, 0x0238},
+ {HI846_REG_Y_ADDR_END_VACT_H, 0x07d7},
+ {HI846_REG_UNKNOWN_005C, 0x4202},
+ {HI846_REG_FLL, 0x034a},
+ {HI846_REG_LLP, 0x0ed8},
+ {HI846_REG_BINNING_MODE, 0x0122},
+ {HI846_REG_HBIN_MODE, 0x0100},
+ {HI846_REG_UNKNOWN_0A24, 0x0000},
+ {HI846_REG_X_START_H, 0x00b0},
+ {HI846_REG_X_OUTPUT_SIZE_H, 0x0500},
+ {HI846_REG_Y_OUTPUT_SIZE_H, 0x02d0},
+ {HI846_REG_EXPOSURE, 0x0344},
+
+ /* For OTP */
+ {HI846_REG_UNKNOWN_021C, 0x0003},
+ {HI846_REG_UNKNOWN_021E, 0x0235},
+
+ {HI846_REG_ISP_EN_H, 0x016a},
+ {HI846_REG_UNKNOWN_0418, 0x0410},
+ {HI846_REG_UNKNOWN_0B02, 0xe04d},
+ {HI846_REG_UNKNOWN_0B10, 0x6c21},
+ {HI846_REG_UNKNOWN_0B12, 0x0120},
+ {HI846_REG_UNKNOWN_0B14, 0x0005},
+ {HI846_REG_UNKNOWN_2008, 0x38fd},
+ {HI846_REG_UNKNOWN_326E, 0x0000},
+};
+
+static const struct hi846_reg mode_1280x720_mipi_2lane[] = {
+ {HI846_REG_UNKNOWN_0900, 0x0300},
+ {HI846_REG_MIPI_TX_OP_MODE, 0x4319},
+ {HI846_REG_UNKNOWN_0914, 0xc109},
+ {HI846_REG_TCLK_PREPARE, 0x061a},
+ {HI846_REG_UNKNOWN_0918, 0x0407},
+ {HI846_REG_THS_ZERO, 0x0a0b},
+ {HI846_REG_TCLK_POST, 0x0e08},
+ {HI846_REG_UNKNOWN_091E, 0x0a00},
+ {HI846_REG_UNKNOWN_090C, 0x0427},
+ {HI846_REG_UNKNOWN_090E, 0x0145},
+ {HI846_REG_UNKNOWN_0954, 0x0089},
+ {HI846_REG_UNKNOWN_0956, 0x0000},
+ {HI846_REG_UNKNOWN_0958, 0xca80},
+ {HI846_REG_UNKNOWN_095A, 0x9240},
+ {HI846_REG_PLL_CFG_MIPI2_H, 0x4124},
+ {HI846_REG_TG_ENABLE, 0x0100},
+};
+
+static const struct hi846_reg mode_1280x720_mipi_4lane[] = {
+ /* 360Mbps */
+ {HI846_REG_UNKNOWN_0900, 0x0300},
+ {HI846_REG_MIPI_TX_OP_MODE, 0xc319},
+ {HI846_REG_UNKNOWN_0914, 0xc105},
+ {HI846_REG_TCLK_PREPARE, 0x030c},
+ {HI846_REG_UNKNOWN_0918, 0x0304},
+ {HI846_REG_THS_ZERO, 0x0708},
+ {HI846_REG_TCLK_POST, 0x0b04},
+ {HI846_REG_UNKNOWN_091E, 0x0500},
+ {HI846_REG_UNKNOWN_090C, 0x0208},
+ {HI846_REG_UNKNOWN_090E, 0x008a},
+ {HI846_REG_UNKNOWN_0954, 0x0089},
+ {HI846_REG_UNKNOWN_0956, 0x0000},
+ {HI846_REG_UNKNOWN_0958, 0xca80},
+ {HI846_REG_UNKNOWN_095A, 0x9240},
+ {HI846_REG_PLL_CFG_MIPI2_H, 0x4924},
+ {HI846_REG_TG_ENABLE, 0x0100},
+};
+
+static const struct hi846_reg mode_1632x1224_config[] = {
+ {HI846_REG_MODE_SELECT, 0x0000},
+ {HI846_REG_Y_ODD_INC_FOBP, 0x3311},
+ {HI846_REG_Y_ODD_INC_VACT, 0x3311},
+ {HI846_REG_Y_ADDR_START_VACT_H, 0x0040},
+ {HI846_REG_Y_ADDR_END_VACT_H, 0x09cf},
+ {HI846_REG_UNKNOWN_005C, 0x4202},
+ {HI846_REG_FLL, 0x09de},
+ {HI846_REG_LLP, 0x0ed8},
+ {HI846_REG_BINNING_MODE, 0x0122},
+ {HI846_REG_HBIN_MODE, 0x0100},
+ {HI846_REG_UNKNOWN_0A24, 0x0000},
+ {HI846_REG_X_START_H, 0x0000},
+ {HI846_REG_X_OUTPUT_SIZE_H, 0x0660},
+ {HI846_REG_Y_OUTPUT_SIZE_H, 0x04c8},
+ {HI846_REG_EXPOSURE, 0x09d8},
+
+ /* For OTP */
+ {HI846_REG_UNKNOWN_021C, 0x0003},
+ {HI846_REG_UNKNOWN_021E, 0x0235},
+
+ {HI846_REG_ISP_EN_H, 0x016a},
+ {HI846_REG_UNKNOWN_0418, 0x0000},
+ {HI846_REG_UNKNOWN_0B02, 0xe04d},
+ {HI846_REG_UNKNOWN_0B10, 0x6c21},
+ {HI846_REG_UNKNOWN_0B12, 0x0120},
+ {HI846_REG_UNKNOWN_0B14, 0x0005},
+ {HI846_REG_UNKNOWN_2008, 0x38fd},
+ {HI846_REG_UNKNOWN_326E, 0x0000},
+};
+
+static const struct hi846_reg mode_1632x1224_mipi_2lane[] = {
+ {HI846_REG_UNKNOWN_0900, 0x0300},
+ {HI846_REG_MIPI_TX_OP_MODE, 0x4319},
+ {HI846_REG_UNKNOWN_0914, 0xc109},
+ {HI846_REG_TCLK_PREPARE, 0x061a},
+ {HI846_REG_UNKNOWN_0918, 0x0407},
+ {HI846_REG_THS_ZERO, 0x0a0b},
+ {HI846_REG_TCLK_POST, 0x0e08},
+ {HI846_REG_UNKNOWN_091E, 0x0a00},
+ {HI846_REG_UNKNOWN_090C, 0x0427},
+ {HI846_REG_UNKNOWN_090E, 0x0069},
+ {HI846_REG_UNKNOWN_0954, 0x0089},
+ {HI846_REG_UNKNOWN_0956, 0x0000},
+ {HI846_REG_UNKNOWN_0958, 0xca80},
+ {HI846_REG_UNKNOWN_095A, 0x9240},
+ {HI846_REG_PLL_CFG_MIPI2_H, 0x4124},
+ {HI846_REG_TG_ENABLE, 0x0100},
+};
+
+static const struct hi846_reg mode_1632x1224_mipi_4lane[] = {
+ {HI846_REG_UNKNOWN_0900, 0x0300},
+ {HI846_REG_MIPI_TX_OP_MODE, 0xc319},
+ {HI846_REG_UNKNOWN_0914, 0xc105},
+ {HI846_REG_TCLK_PREPARE, 0x030c},
+ {HI846_REG_UNKNOWN_0918, 0x0304},
+ {HI846_REG_THS_ZERO, 0x0708},
+ {HI846_REG_TCLK_POST, 0x0b04},
+ {HI846_REG_UNKNOWN_091E, 0x0500},
+ {HI846_REG_UNKNOWN_090C, 0x0208},
+ {HI846_REG_UNKNOWN_090E, 0x001c},
+ {HI846_REG_UNKNOWN_0954, 0x0089},
+ {HI846_REG_UNKNOWN_0956, 0x0000},
+ {HI846_REG_UNKNOWN_0958, 0xca80},
+ {HI846_REG_UNKNOWN_095A, 0x9240},
+ {HI846_REG_PLL_CFG_MIPI2_H, 0x4924},
+ {HI846_REG_TG_ENABLE, 0x0100},
+};
+
+static const char * const hi846_test_pattern_menu[] = {
+ "Disabled",
+ "Solid Colour",
+ "100% Colour Bars",
+ "Fade To Grey Colour Bars",
+ "PN9",
+ "Gradient Horizontal",
+ "Gradient Vertical",
+ "Check Board",
+ "Slant Pattern",
+ "Resolution Pattern",
+};
+
+#define FREQ_INDEX_640 0
+#define FREQ_INDEX_1280 1
+static const s64 hi846_link_freqs[] = {
+ [FREQ_INDEX_640] = 80000000,
+ [FREQ_INDEX_1280] = 200000000,
+};
+
+static const struct hi846_reg_list hi846_init_regs_list_2lane = {
+ .num_of_regs = ARRAY_SIZE(hi846_init_2lane),
+ .regs = hi846_init_2lane,
+};
+
+static const struct hi846_reg_list hi846_init_regs_list_4lane = {
+ .num_of_regs = ARRAY_SIZE(hi846_init_4lane),
+ .regs = hi846_init_4lane,
+};
+
+static const struct hi846_mode supported_modes[] = {
+ {
+ .width = 640,
+ .height = 480,
+ .link_freq_index = FREQ_INDEX_640,
+ .fps = 120,
+ .frame_len = 631,
+ .llp = HI846_LINE_LENGTH,
+ .reg_list_config = {
+ .num_of_regs = ARRAY_SIZE(mode_640x480_config),
+ .regs = mode_640x480_config,
+ },
+ .reg_list_2lane = {
+ .num_of_regs = ARRAY_SIZE(mode_640x480_mipi_2lane),
+ .regs = mode_640x480_mipi_2lane,
+ },
+ .reg_list_4lane = {
+ .num_of_regs = 0,
+ },
+ .crop = {
+ .left = 0x58,
+ .top = 0x148,
+ .width = 640 * 4,
+ .height = 480 * 4,
+ },
+ },
+ {
+ .width = 1280,
+ .height = 720,
+ .link_freq_index = FREQ_INDEX_1280,
+ .fps = 90,
+ .frame_len = 842,
+ .llp = HI846_LINE_LENGTH,
+ .reg_list_config = {
+ .num_of_regs = ARRAY_SIZE(mode_1280x720_config),
+ .regs = mode_1280x720_config,
+ },
+ .reg_list_2lane = {
+ .num_of_regs = ARRAY_SIZE(mode_1280x720_mipi_2lane),
+ .regs = mode_1280x720_mipi_2lane,
+ },
+ .reg_list_4lane = {
+ .num_of_regs = ARRAY_SIZE(mode_1280x720_mipi_4lane),
+ .regs = mode_1280x720_mipi_4lane,
+ },
+ .crop = {
+ .left = 0xb0,
+ .top = 0x238,
+ .width = 1280 * 2,
+ .height = 720 * 2,
+ },
+ },
+ {
+ .width = 1632,
+ .height = 1224,
+ .link_freq_index = FREQ_INDEX_1280,
+ .fps = 30,
+ .frame_len = 2526,
+ .llp = HI846_LINE_LENGTH,
+ .reg_list_config = {
+ .num_of_regs = ARRAY_SIZE(mode_1632x1224_config),
+ .regs = mode_1632x1224_config,
+ },
+ .reg_list_2lane = {
+ .num_of_regs = ARRAY_SIZE(mode_1632x1224_mipi_2lane),
+ .regs = mode_1632x1224_mipi_2lane,
+ },
+ .reg_list_4lane = {
+ .num_of_regs = ARRAY_SIZE(mode_1632x1224_mipi_4lane),
+ .regs = mode_1632x1224_mipi_4lane,
+ },
+ .crop = {
+ .left = 0x0,
+ .top = 0x0,
+ .width = 1632 * 2,
+ .height = 1224 * 2,
+ },
+ }
+};
+
+struct hi846_datafmt {
+ u32 code;
+ enum v4l2_colorspace colorspace;
+};
+
+static const char * const hi846_supply_names[] = {
+ "vddio", /* Digital I/O (1.8V or 2.8V) */
+ "vdda", /* Analog (2.8V) */
+ "vddd", /* Digital Core (1.2V) */
+};
+
+#define HI846_NUM_SUPPLIES ARRAY_SIZE(hi846_supply_names)
+
+struct hi846 {
+ struct gpio_desc *rst_gpio;
+ struct gpio_desc *shutdown_gpio;
+ struct regulator_bulk_data supplies[HI846_NUM_SUPPLIES];
+ struct clk *clock;
+ const struct hi846_datafmt *fmt;
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+ struct v4l2_ctrl_handler ctrl_handler;
+ u8 nr_lanes;
+
+ struct v4l2_ctrl *link_freq;
+ struct v4l2_ctrl *pixel_rate;
+ struct v4l2_ctrl *vblank;
+ struct v4l2_ctrl *hblank;
+ struct v4l2_ctrl *exposure;
+
+ struct mutex mutex; /* protect cur_mode, streaming and chip access */
+ const struct hi846_mode *cur_mode;
+ bool streaming;
+};
+
+static inline struct hi846 *to_hi846(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct hi846, sd);
+}
+
+static const struct hi846_datafmt hi846_colour_fmts[] = {
+ { HI846_MEDIA_BUS_FORMAT, V4L2_COLORSPACE_RAW },
+};
+
+static const struct hi846_datafmt *hi846_find_datafmt(u32 code)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(hi846_colour_fmts); i++)
+ if (hi846_colour_fmts[i].code == code)
+ return &hi846_colour_fmts[i];
+
+ return NULL;
+}
+
+static inline u8 hi846_get_link_freq_index(struct hi846 *hi846)
+{
+ return hi846->cur_mode->link_freq_index;
+}
+
+static u64 hi846_get_link_freq(struct hi846 *hi846)
+{
+ u8 index = hi846_get_link_freq_index(hi846);
+
+ return hi846_link_freqs[index];
+}
+
+static u64 hi846_calc_pixel_rate(struct hi846 *hi846)
+{
+ u64 link_freq = hi846_get_link_freq(hi846);
+ u64 pixel_rate = link_freq * 2 * hi846->nr_lanes;
+
+ do_div(pixel_rate, HI846_RGB_DEPTH);
+
+ return pixel_rate;
+}
+
+static int hi846_read_reg(struct hi846 *hi846, u16 reg, u8 *val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&hi846->sd);
+ struct i2c_msg msgs[2];
+ u8 addr_buf[2];
+ u8 data_buf[1] = {0};
+ int ret;
+
+ put_unaligned_be16(reg, addr_buf);
+ msgs[0].addr = client->addr;
+ msgs[0].flags = 0;
+ msgs[0].len = sizeof(addr_buf);
+ msgs[0].buf = addr_buf;
+ msgs[1].addr = client->addr;
+ msgs[1].flags = I2C_M_RD;
+ msgs[1].len = 1;
+ msgs[1].buf = data_buf;
+
+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret != ARRAY_SIZE(msgs)) {
+ dev_err(&client->dev, "i2c read error: %d\n", ret);
+ return -EIO;
+ }
+
+ *val = data_buf[0];
+
+ return 0;
+}
+
+static int hi846_write_reg(struct hi846 *hi846, u16 reg, u8 val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&hi846->sd);
+ u8 buf[3] = { reg >> 8, reg & 0xff, val };
+ struct i2c_msg msg[] = {
+ { .addr = client->addr, .flags = 0,
+ .len = ARRAY_SIZE(buf), .buf = buf },
+ };
+ int ret;
+
+ ret = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg));
+ if (ret != ARRAY_SIZE(msg)) {
+ dev_err(&client->dev, "i2c write error\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void hi846_write_reg_16(struct hi846 *hi846, u16 reg, u16 val, int *err)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&hi846->sd);
+ u8 buf[4];
+ int ret;
+
+ if (*err < 0)
+ return;
+
+ put_unaligned_be16(reg, buf);
+ put_unaligned_be16(val, buf + 2);
+ ret = i2c_master_send(client, buf, sizeof(buf));
+ if (ret != sizeof(buf)) {
+ dev_err(&client->dev, "i2c_master_send != %zu: %d\n",
+ sizeof(buf), ret);
+ *err = -EIO;
+ }
+}
+
+static int hi846_write_reg_list(struct hi846 *hi846,
+ const struct hi846_reg_list *r_list)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&hi846->sd);
+ unsigned int i;
+ int ret = 0;
+
+ for (i = 0; i < r_list->num_of_regs; i++) {
+ hi846_write_reg_16(hi846, r_list->regs[i].address,
+ r_list->regs[i].val, &ret);
+ if (ret) {
+ dev_err_ratelimited(&client->dev,
+ "failed to write reg 0x%4.4x: %d",
+ r_list->regs[i].address, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int hi846_update_digital_gain(struct hi846 *hi846, u16 d_gain)
+{
+ int ret = 0;
+
+ hi846_write_reg_16(hi846, HI846_REG_MWB_GR_GAIN_H, d_gain, &ret);
+ hi846_write_reg_16(hi846, HI846_REG_MWB_GB_GAIN_H, d_gain, &ret);
+ hi846_write_reg_16(hi846, HI846_REG_MWB_R_GAIN_H, d_gain, &ret);
+ hi846_write_reg_16(hi846, HI846_REG_MWB_B_GAIN_H, d_gain, &ret);
+
+ return ret;
+}
+
+static int hi846_test_pattern(struct hi846 *hi846, u32 pattern)
+{
+ int ret;
+ u8 val;
+
+ if (pattern) {
+ ret = hi846_read_reg(hi846, HI846_REG_ISP, &val);
+ if (ret)
+ return ret;
+
+ ret = hi846_write_reg(hi846, HI846_REG_ISP,
+ val | HI846_REG_ISP_TPG_EN);
+ if (ret)
+ return ret;
+ }
+
+ return hi846_write_reg(hi846, HI846_REG_TEST_PATTERN, pattern);
+}
+
+static int hi846_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct hi846 *hi846 = container_of(ctrl->handler,
+ struct hi846, ctrl_handler);
+ struct i2c_client *client = v4l2_get_subdevdata(&hi846->sd);
+ s64 exposure_max;
+ int ret = 0;
+ u32 shutter, frame_len;
+
+ /* Propagate change of current control to all related controls */
+ if (ctrl->id == V4L2_CID_VBLANK) {
+ /* Update max exposure while meeting expected vblanking */
+ exposure_max = hi846->cur_mode->height + ctrl->val -
+ HI846_EXPOSURE_MAX_MARGIN;
+ __v4l2_ctrl_modify_range(hi846->exposure,
+ hi846->exposure->minimum,
+ exposure_max, hi846->exposure->step,
+ exposure_max);
+ }
+
+ if (!pm_runtime_get_if_in_use(&client->dev))
+ return 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_ANALOGUE_GAIN:
+ ret = hi846_write_reg(hi846, HI846_REG_ANALOG_GAIN, ctrl->val);
+ break;
+
+ case V4L2_CID_DIGITAL_GAIN:
+ ret = hi846_update_digital_gain(hi846, ctrl->val);
+ break;
+
+ case V4L2_CID_EXPOSURE:
+ shutter = ctrl->val;
+ frame_len = hi846->cur_mode->frame_len;
+
+ if (shutter > frame_len - 6) { /* margin */
+ frame_len = shutter + 6;
+ if (frame_len > 0xffff) { /* max frame len */
+ frame_len = 0xffff;
+ }
+ }
+
+ if (shutter < 6)
+ shutter = 6;
+ if (shutter > (0xffff - 6))
+ shutter = 0xffff - 6;
+
+ hi846_write_reg_16(hi846, HI846_REG_FLL, frame_len, &ret);
+ hi846_write_reg_16(hi846, HI846_REG_EXPOSURE, shutter, &ret);
+ break;
+
+ case V4L2_CID_VBLANK:
+ /* Update FLL that meets expected vertical blanking */
+ hi846_write_reg_16(hi846, HI846_REG_FLL,
+ hi846->cur_mode->height + ctrl->val, &ret);
+ break;
+ case V4L2_CID_TEST_PATTERN:
+ ret = hi846_test_pattern(hi846, ctrl->val);
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ pm_runtime_put(&client->dev);
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops hi846_ctrl_ops = {
+ .s_ctrl = hi846_set_ctrl,
+};
+
+static int hi846_init_controls(struct hi846 *hi846)
+{
+ struct v4l2_ctrl_handler *ctrl_hdlr;
+ s64 exposure_max, h_blank;
+ int ret;
+ struct i2c_client *client = v4l2_get_subdevdata(&hi846->sd);
+ struct v4l2_fwnode_device_properties props;
+
+ ctrl_hdlr = &hi846->ctrl_handler;
+ ret = v4l2_ctrl_handler_init(ctrl_hdlr, 10);
+ if (ret)
+ return ret;
+
+ ctrl_hdlr->lock = &hi846->mutex;
+
+ hi846->link_freq =
+ v4l2_ctrl_new_int_menu(ctrl_hdlr, &hi846_ctrl_ops,
+ V4L2_CID_LINK_FREQ,
+ ARRAY_SIZE(hi846_link_freqs) - 1,
+ 0, hi846_link_freqs);
+ if (hi846->link_freq)
+ hi846->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ hi846->pixel_rate =
+ v4l2_ctrl_new_std(ctrl_hdlr, &hi846_ctrl_ops,
+ V4L2_CID_PIXEL_RATE, 0,
+ hi846_calc_pixel_rate(hi846), 1,
+ hi846_calc_pixel_rate(hi846));
+ hi846->vblank = v4l2_ctrl_new_std(ctrl_hdlr, &hi846_ctrl_ops,
+ V4L2_CID_VBLANK,
+ hi846->cur_mode->frame_len -
+ hi846->cur_mode->height,
+ HI846_FLL_MAX -
+ hi846->cur_mode->height, 1,
+ hi846->cur_mode->frame_len -
+ hi846->cur_mode->height);
+
+ h_blank = hi846->cur_mode->llp - hi846->cur_mode->width;
+
+ hi846->hblank = v4l2_ctrl_new_std(ctrl_hdlr, &hi846_ctrl_ops,
+ V4L2_CID_HBLANK, h_blank, h_blank, 1,
+ h_blank);
+ if (hi846->hblank)
+ hi846->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ v4l2_ctrl_new_std(ctrl_hdlr, &hi846_ctrl_ops, V4L2_CID_ANALOGUE_GAIN,
+ HI846_ANAL_GAIN_MIN, HI846_ANAL_GAIN_MAX,
+ HI846_ANAL_GAIN_STEP, HI846_ANAL_GAIN_MIN);
+ v4l2_ctrl_new_std(ctrl_hdlr, &hi846_ctrl_ops, V4L2_CID_DIGITAL_GAIN,
+ HI846_DGTL_GAIN_MIN, HI846_DGTL_GAIN_MAX,
+ HI846_DGTL_GAIN_STEP, HI846_DGTL_GAIN_DEFAULT);
+ exposure_max = hi846->cur_mode->frame_len - HI846_EXPOSURE_MAX_MARGIN;
+ hi846->exposure = v4l2_ctrl_new_std(ctrl_hdlr, &hi846_ctrl_ops,
+ V4L2_CID_EXPOSURE,
+ HI846_EXPOSURE_MIN, exposure_max,
+ HI846_EXPOSURE_STEP,
+ exposure_max);
+ v4l2_ctrl_new_std_menu_items(ctrl_hdlr, &hi846_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(hi846_test_pattern_menu) - 1,
+ 0, 0, hi846_test_pattern_menu);
+ if (ctrl_hdlr->error) {
+ dev_err(&client->dev, "v4l ctrl handler error: %d\n",
+ ctrl_hdlr->error);
+ return ctrl_hdlr->error;
+ }
+
+ ret = v4l2_fwnode_device_parse(&client->dev, &props);
+ if (ret)
+ return ret;
+
+ ret = v4l2_ctrl_new_fwnode_properties(ctrl_hdlr, &hi846_ctrl_ops,
+ &props);
+ if (ret)
+ return ret;
+
+ hi846->sd.ctrl_handler = ctrl_hdlr;
+
+ return 0;
+}
+
+static int hi846_set_video_mode(struct hi846 *hi846, int fps)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&hi846->sd);
+ u64 frame_length;
+ int ret = 0;
+ int dummy_lines;
+ u64 link_freq = hi846_get_link_freq(hi846);
+
+ dev_dbg(&client->dev, "%s: link freq: %llu\n", __func__,
+ hi846_get_link_freq(hi846));
+
+ do_div(link_freq, fps);
+ frame_length = link_freq;
+ do_div(frame_length, HI846_LINE_LENGTH);
+
+ dummy_lines = (frame_length > hi846->cur_mode->frame_len) ?
+ (frame_length - hi846->cur_mode->frame_len) : 0;
+
+ frame_length = hi846->cur_mode->frame_len + dummy_lines;
+
+ dev_dbg(&client->dev, "%s: frame length calculated: %llu\n", __func__,
+ frame_length);
+
+ hi846_write_reg_16(hi846, HI846_REG_FLL, frame_length & 0xFFFF, &ret);
+ hi846_write_reg_16(hi846, HI846_REG_LLP,
+ HI846_LINE_LENGTH & 0xFFFF, &ret);
+
+ return ret;
+}
+
+static int hi846_start_streaming(struct hi846 *hi846)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&hi846->sd);
+ int ret = 0;
+ u8 val;
+
+ if (hi846->nr_lanes == 2)
+ ret = hi846_write_reg_list(hi846, &hi846_init_regs_list_2lane);
+ else
+ ret = hi846_write_reg_list(hi846, &hi846_init_regs_list_4lane);
+ if (ret) {
+ dev_err(&client->dev, "failed to set plls: %d\n", ret);
+ return ret;
+ }
+
+ ret = hi846_write_reg_list(hi846, &hi846->cur_mode->reg_list_config);
+ if (ret) {
+ dev_err(&client->dev, "failed to set mode: %d\n", ret);
+ return ret;
+ }
+
+ if (hi846->nr_lanes == 2)
+ ret = hi846_write_reg_list(hi846,
+ &hi846->cur_mode->reg_list_2lane);
+ else
+ ret = hi846_write_reg_list(hi846,
+ &hi846->cur_mode->reg_list_4lane);
+ if (ret) {
+ dev_err(&client->dev, "failed to set mipi mode: %d\n", ret);
+ return ret;
+ }
+
+ hi846_set_video_mode(hi846, hi846->cur_mode->fps);
+
+ ret = __v4l2_ctrl_handler_setup(hi846->sd.ctrl_handler);
+ if (ret)
+ return ret;
+
+ /*
+ * Reading 0x0034 is purely done for debugging reasons: It is not
+ * documented in the DS but only mentioned once:
+ * "If 0x0034[2] bit is disabled , Visible pixel width and height is 0."
+ * So even though that sounds like we won't see anything, we don't
+ * know more about this, so in that case only inform the user but do
+ * nothing more.
+ */
+ ret = hi846_read_reg(hi846, 0x0034, &val);
+ if (ret)
+ return ret;
+ if (!(val & BIT(2)))
+ dev_info(&client->dev, "visible pixel width and height is 0\n");
+
+ ret = hi846_write_reg(hi846, HI846_REG_MODE_SELECT,
+ HI846_MODE_STREAMING);
+ if (ret) {
+ dev_err(&client->dev, "failed to start stream");
+ return ret;
+ }
+
+ hi846->streaming = 1;
+
+ dev_dbg(&client->dev, "%s: started streaming successfully\n", __func__);
+
+ return ret;
+}
+
+static void hi846_stop_streaming(struct hi846 *hi846)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&hi846->sd);
+
+ if (hi846_write_reg(hi846, HI846_REG_MODE_SELECT, HI846_MODE_STANDBY))
+ dev_err(&client->dev, "failed to stop stream");
+
+ hi846->streaming = 0;
+}
+
+static int hi846_set_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct hi846 *hi846 = to_hi846(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret = 0;
+
+ if (hi846->streaming == enable)
+ return 0;
+
+ mutex_lock(&hi846->mutex);
+
+ if (enable) {
+ ret = pm_runtime_get_sync(&client->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&client->dev);
+ goto out;
+ }
+
+ ret = hi846_start_streaming(hi846);
+ }
+
+ if (!enable || ret) {
+ hi846_stop_streaming(hi846);
+ pm_runtime_put(&client->dev);
+ }
+
+out:
+ mutex_unlock(&hi846->mutex);
+
+ return ret;
+}
+
+static int hi846_power_on(struct hi846 *hi846)
+{
+ int ret;
+
+ ret = regulator_bulk_enable(HI846_NUM_SUPPLIES, hi846->supplies);
+ if (ret < 0)
+ return ret;
+
+ ret = clk_prepare_enable(hi846->clock);
+ if (ret < 0)
+ goto err_reg;
+
+ if (hi846->shutdown_gpio)
+ gpiod_set_value_cansleep(hi846->shutdown_gpio, 0);
+
+ /* 30us = 2400 cycles at 80Mhz */
+ usleep_range(30, 60);
+ if (hi846->rst_gpio)
+ gpiod_set_value_cansleep(hi846->rst_gpio, 0);
+ usleep_range(30, 60);
+
+ return 0;
+
+err_reg:
+ regulator_bulk_disable(HI846_NUM_SUPPLIES, hi846->supplies);
+
+ return ret;
+}
+
+static void hi846_power_off(struct hi846 *hi846)
+{
+ if (hi846->rst_gpio)
+ gpiod_set_value_cansleep(hi846->rst_gpio, 1);
+
+ if (hi846->shutdown_gpio)
+ gpiod_set_value_cansleep(hi846->shutdown_gpio, 1);
+
+ clk_disable_unprepare(hi846->clock);
+ regulator_bulk_disable(HI846_NUM_SUPPLIES, hi846->supplies);
+}
+
+static int __maybe_unused hi846_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct hi846 *hi846 = to_hi846(sd);
+
+ if (hi846->streaming)
+ hi846_stop_streaming(hi846);
+
+ hi846_power_off(hi846);
+
+ return 0;
+}
+
+static int __maybe_unused hi846_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct hi846 *hi846 = to_hi846(sd);
+ int ret;
+
+ ret = hi846_power_on(hi846);
+ if (ret)
+ return ret;
+
+ if (hi846->streaming) {
+ ret = hi846_start_streaming(hi846);
+ if (ret) {
+ dev_err(dev, "%s: start streaming failed: %d\n",
+ __func__, ret);
+ goto error;
+ }
+ }
+
+ return 0;
+
+error:
+ hi846_power_off(hi846);
+ return ret;
+}
+
+static int hi846_set_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *format)
+{
+ struct hi846 *hi846 = to_hi846(sd);
+ struct v4l2_mbus_framefmt *mf = &format->format;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ const struct hi846_datafmt *fmt = hi846_find_datafmt(mf->code);
+ u32 tgt_fps;
+ s32 vblank_def, h_blank;
+
+ if (!fmt) {
+ mf->code = hi846_colour_fmts[0].code;
+ mf->colorspace = hi846_colour_fmts[0].colorspace;
+ fmt = &hi846_colour_fmts[0];
+ }
+
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
+ *v4l2_subdev_get_try_format(sd, sd_state, format->pad) = *mf;
+ return 0;
+ }
+
+ if (hi846->nr_lanes == 2) {
+ if (!hi846->cur_mode->reg_list_2lane.num_of_regs) {
+ dev_err(&client->dev,
+ "this mode is not supported for 2 lanes\n");
+ return -EINVAL;
+ }
+ } else {
+ if (!hi846->cur_mode->reg_list_4lane.num_of_regs) {
+ dev_err(&client->dev,
+ "this mode is not supported for 4 lanes\n");
+ return -EINVAL;
+ }
+ }
+
+ mutex_lock(&hi846->mutex);
+
+ if (hi846->streaming) {
+ mutex_unlock(&hi846->mutex);
+ return -EBUSY;
+ }
+
+ hi846->fmt = fmt;
+
+ hi846->cur_mode =
+ v4l2_find_nearest_size(supported_modes,
+ ARRAY_SIZE(supported_modes),
+ width, height, mf->width, mf->height);
+ dev_dbg(&client->dev, "%s: found mode: %dx%d\n", __func__,
+ hi846->cur_mode->width, hi846->cur_mode->height);
+
+ tgt_fps = hi846->cur_mode->fps;
+ dev_dbg(&client->dev, "%s: target fps: %d\n", __func__, tgt_fps);
+
+ mf->width = hi846->cur_mode->width;
+ mf->height = hi846->cur_mode->height;
+ mf->code = HI846_MEDIA_BUS_FORMAT;
+ mf->field = V4L2_FIELD_NONE;
+
+ __v4l2_ctrl_s_ctrl(hi846->link_freq, hi846_get_link_freq_index(hi846));
+ __v4l2_ctrl_s_ctrl_int64(hi846->pixel_rate,
+ hi846_calc_pixel_rate(hi846));
+
+ /* Update limits and set FPS to default */
+ vblank_def = hi846->cur_mode->frame_len - hi846->cur_mode->height;
+ __v4l2_ctrl_modify_range(hi846->vblank,
+ hi846->cur_mode->frame_len -
+ hi846->cur_mode->height,
+ HI846_FLL_MAX - hi846->cur_mode->height, 1,
+ vblank_def);
+ __v4l2_ctrl_s_ctrl(hi846->vblank, vblank_def);
+
+ h_blank = hi846->cur_mode->llp - hi846->cur_mode->width;
+
+ __v4l2_ctrl_modify_range(hi846->hblank, h_blank, h_blank, 1,
+ h_blank);
+
+ dev_dbg(&client->dev, "Set fmt w=%d h=%d code=0x%x colorspace=0x%x\n",
+ mf->width, mf->height,
+ fmt->code, fmt->colorspace);
+
+ mutex_unlock(&hi846->mutex);
+
+ return 0;
+}
+
+static int hi846_get_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *format)
+{
+ struct hi846 *hi846 = to_hi846(sd);
+ struct v4l2_mbus_framefmt *mf = &format->format;
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
+ format->format = *v4l2_subdev_get_try_format(&hi846->sd,
+ sd_state,
+ format->pad);
+ return 0;
+ }
+
+ mutex_lock(&hi846->mutex);
+ mf->code = HI846_MEDIA_BUS_FORMAT;
+ mf->colorspace = V4L2_COLORSPACE_RAW;
+ mf->field = V4L2_FIELD_NONE;
+ mf->width = hi846->cur_mode->width;
+ mf->height = hi846->cur_mode->height;
+ mutex_unlock(&hi846->mutex);
+ dev_dbg(&client->dev,
+ "Get format w=%d h=%d code=0x%x colorspace=0x%x\n",
+ mf->width, mf->height, mf->code, mf->colorspace);
+
+ return 0;
+}
+
+static int hi846_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ if (code->pad || code->index > 0)
+ return -EINVAL;
+
+ code->code = HI846_MEDIA_BUS_FORMAT;
+
+ return 0;
+}
+
+static int hi846_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+
+ if (fse->pad || fse->index >= ARRAY_SIZE(supported_modes))
+ return -EINVAL;
+
+ if (fse->code != HI846_MEDIA_BUS_FORMAT) {
+ dev_err(&client->dev, "frame size enum not matching\n");
+ return -EINVAL;
+ }
+
+ fse->min_width = supported_modes[fse->index].width;
+ fse->max_width = supported_modes[fse->index].width;
+ fse->min_height = supported_modes[fse->index].height;
+ fse->max_height = supported_modes[fse->index].height;
+
+ dev_dbg(&client->dev, "%s: max width: %d max height: %d\n", __func__,
+ fse->max_width, fse->max_height);
+
+ return 0;
+}
+
+static int hi846_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_selection *sel)
+{
+ struct hi846 *hi846 = to_hi846(sd);
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ mutex_lock(&hi846->mutex);
+ switch (sel->which) {
+ case V4L2_SUBDEV_FORMAT_TRY:
+ v4l2_subdev_get_try_crop(sd, sd_state, sel->pad);
+ break;
+ case V4L2_SUBDEV_FORMAT_ACTIVE:
+ sel->r = hi846->cur_mode->crop;
+ break;
+ }
+ mutex_unlock(&hi846->mutex);
+ return 0;
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_NATIVE_SIZE:
+ sel->r.top = 0;
+ sel->r.left = 0;
+ sel->r.width = 3264;
+ sel->r.height = 2448;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int hi846_init_cfg(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state)
+{
+ struct hi846 *hi846 = to_hi846(sd);
+ struct v4l2_mbus_framefmt *mf;
+
+ mf = v4l2_subdev_get_try_format(sd, sd_state, 0);
+
+ mutex_lock(&hi846->mutex);
+ mf->code = HI846_MEDIA_BUS_FORMAT;
+ mf->colorspace = V4L2_COLORSPACE_RAW;
+ mf->field = V4L2_FIELD_NONE;
+ mf->width = hi846->cur_mode->width;
+ mf->height = hi846->cur_mode->height;
+ mutex_unlock(&hi846->mutex);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops hi846_video_ops = {
+ .s_stream = hi846_set_stream,
+};
+
+static const struct v4l2_subdev_pad_ops hi846_pad_ops = {
+ .init_cfg = hi846_init_cfg,
+ .enum_frame_size = hi846_enum_frame_size,
+ .enum_mbus_code = hi846_enum_mbus_code,
+ .set_fmt = hi846_set_format,
+ .get_fmt = hi846_get_format,
+ .get_selection = hi846_get_selection,
+};
+
+static const struct v4l2_subdev_ops hi846_subdev_ops = {
+ .video = &hi846_video_ops,
+ .pad = &hi846_pad_ops,
+};
+
+static const struct media_entity_operations hi846_subdev_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static int hi846_identify_module(struct hi846 *hi846)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&hi846->sd);
+ int ret;
+ u8 hi, lo;
+
+ ret = hi846_read_reg(hi846, HI846_REG_CHIP_ID_L, &lo);
+ if (ret)
+ return ret;
+
+ if (lo != HI846_CHIP_ID_L) {
+ dev_err(&client->dev, "wrong chip id low byte: %x", lo);
+ return -ENXIO;
+ }
+
+ ret = hi846_read_reg(hi846, HI846_REG_CHIP_ID_H, &hi);
+ if (ret)
+ return ret;
+
+ if (hi != HI846_CHIP_ID_H) {
+ dev_err(&client->dev, "wrong chip id high byte: %x", hi);
+ return -ENXIO;
+ }
+
+ dev_info(&client->dev, "chip id %02X %02X using %d mipi lanes\n",
+ hi, lo, hi846->nr_lanes);
+
+ return 0;
+}
+
+static s64 hi846_check_link_freqs(struct hi846 *hi846,
+ struct v4l2_fwnode_endpoint *ep)
+{
+ const s64 *freqs = hi846_link_freqs;
+ int freqs_count = ARRAY_SIZE(hi846_link_freqs);
+ int i, j;
+
+ for (i = 0; i < freqs_count; i++) {
+ for (j = 0; j < ep->nr_of_link_frequencies; j++)
+ if (freqs[i] == ep->link_frequencies[j])
+ break;
+ if (j == ep->nr_of_link_frequencies)
+ return freqs[i];
+ }
+
+ return 0;
+}
+
+static int hi846_parse_dt(struct hi846 *hi846, struct device *dev)
+{
+ struct fwnode_handle *ep;
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
+ struct v4l2_fwnode_endpoint bus_cfg = {
+ .bus_type = V4L2_MBUS_CSI2_DPHY
+ };
+ int ret;
+ s64 fq;
+
+ ep = fwnode_graph_get_next_endpoint(fwnode, NULL);
+ if (!ep) {
+ dev_err(dev, "unable to find endpoint node\n");
+ return -ENXIO;
+ }
+
+ ret = v4l2_fwnode_endpoint_alloc_parse(ep, &bus_cfg);
+ fwnode_handle_put(ep);
+ if (ret) {
+ dev_err(dev, "failed to parse endpoint node: %d\n", ret);
+ return ret;
+ }
+
+ if (bus_cfg.bus.mipi_csi2.num_data_lanes != 2 &&
+ bus_cfg.bus.mipi_csi2.num_data_lanes != 4) {
+ dev_err(dev, "number of CSI2 data lanes %d is not supported",
+ bus_cfg.bus.mipi_csi2.num_data_lanes);
+ v4l2_fwnode_endpoint_free(&bus_cfg);
+ return -EINVAL;
+ }
+
+ hi846->nr_lanes = bus_cfg.bus.mipi_csi2.num_data_lanes;
+
+ if (!bus_cfg.nr_of_link_frequencies) {
+ dev_err(dev, "link-frequency property not found in DT\n");
+ return -EINVAL;
+ }
+
+ /* Check that link frequences for all the modes are in device tree */
+ fq = hi846_check_link_freqs(hi846, &bus_cfg);
+ if (fq) {
+ dev_err(dev, "Link frequency of %lld is not supported\n", fq);
+ return -EINVAL;
+ }
+
+ v4l2_fwnode_endpoint_free(&bus_cfg);
+
+ hi846->rst_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(hi846->rst_gpio)) {
+ dev_err(dev, "failed to get reset gpio: %pe\n",
+ hi846->rst_gpio);
+ return PTR_ERR(hi846->rst_gpio);
+ }
+
+ hi846->shutdown_gpio = devm_gpiod_get_optional(dev, "shutdown",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(hi846->shutdown_gpio)) {
+ dev_err(dev, "failed to get shutdown gpio: %pe\n",
+ hi846->shutdown_gpio);
+ return PTR_ERR(hi846->shutdown_gpio);
+ }
+
+ return 0;
+}
+
+static int hi846_probe(struct i2c_client *client)
+{
+ struct hi846 *hi846;
+ int ret;
+ int i;
+ u32 mclk_freq;
+
+ hi846 = devm_kzalloc(&client->dev, sizeof(*hi846), GFP_KERNEL);
+ if (!hi846)
+ return -ENOMEM;
+
+ ret = hi846_parse_dt(hi846, &client->dev);
+ if (ret) {
+ dev_err(&client->dev, "failed to check HW configuration: %d",
+ ret);
+ return ret;
+ }
+
+ hi846->clock = devm_clk_get(&client->dev, NULL);
+ if (IS_ERR(hi846->clock)) {
+ dev_err(&client->dev, "failed to get clock: %pe\n",
+ hi846->clock);
+ return PTR_ERR(hi846->clock);
+ }
+
+ mclk_freq = clk_get_rate(hi846->clock);
+ if (mclk_freq != 25000000)
+ dev_warn(&client->dev,
+ "External clock freq should be 25000000, not %u.\n",
+ mclk_freq);
+
+ for (i = 0; i < HI846_NUM_SUPPLIES; i++)
+ hi846->supplies[i].supply = hi846_supply_names[i];
+
+ ret = devm_regulator_bulk_get(&client->dev, HI846_NUM_SUPPLIES,
+ hi846->supplies);
+ if (ret < 0)
+ return ret;
+
+ v4l2_i2c_subdev_init(&hi846->sd, client, &hi846_subdev_ops);
+
+ mutex_init(&hi846->mutex);
+
+ ret = hi846_power_on(hi846);
+ if (ret)
+ goto err_mutex;
+
+ ret = hi846_identify_module(hi846);
+ if (ret)
+ goto err_power_off;
+
+ hi846->cur_mode = &supported_modes[0];
+
+ ret = hi846_init_controls(hi846);
+ if (ret) {
+ dev_err(&client->dev, "failed to init controls: %d", ret);
+ goto err_power_off;
+ }
+
+ hi846->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ hi846->sd.entity.ops = &hi846_subdev_entity_ops;
+ hi846->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+ hi846->pad.flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&hi846->sd.entity, 1, &hi846->pad);
+ if (ret) {
+ dev_err(&client->dev, "failed to init entity pads: %d", ret);
+ goto err_v4l2_ctrl_handler_free;
+ }
+
+ ret = v4l2_async_register_subdev_sensor(&hi846->sd);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed to register V4L2 subdev: %d",
+ ret);
+ goto err_media_entity_cleanup;
+ }
+
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_enable(&client->dev);
+ pm_runtime_idle(&client->dev);
+
+ return 0;
+
+err_media_entity_cleanup:
+ media_entity_cleanup(&hi846->sd.entity);
+
+err_v4l2_ctrl_handler_free:
+ v4l2_ctrl_handler_free(hi846->sd.ctrl_handler);
+
+err_power_off:
+ hi846_power_off(hi846);
+
+err_mutex:
+ mutex_destroy(&hi846->mutex);
+
+ return ret;
+}
+
+static int hi846_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct hi846 *hi846 = to_hi846(sd);
+
+ v4l2_async_unregister_subdev(sd);
+ media_entity_cleanup(&sd->entity);
+ v4l2_ctrl_handler_free(sd->ctrl_handler);
+
+ pm_runtime_disable(&client->dev);
+ if (!pm_runtime_status_suspended(&client->dev))
+ hi846_suspend(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+
+ mutex_destroy(&hi846->mutex);
+
+ return 0;
+}
+
+static UNIVERSAL_DEV_PM_OPS(hi846_pm_ops, hi846_suspend, hi846_resume, NULL);
+
+static const struct of_device_id hi846_of_match[] = {
+ { .compatible = "hynix,hi846", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, hi846_of_match);
+
+static struct i2c_driver hi846_i2c_driver = {
+ .driver = {
+ .name = "hi846",
+ .pm = &hi846_pm_ops,
+ .of_match_table = of_match_ptr(hi846_of_match),
+ },
+ .probe_new = hi846_probe,
+ .remove = hi846_remove,
+};
+
+module_i2c_driver(hi846_i2c_driver);
+
+MODULE_AUTHOR("Angus Ainslie <angus@akkea.ca>");
+MODULE_AUTHOR("Martin Kepplinger <martin.kepplinger@puri.sm>");
+MODULE_DESCRIPTION("Hynix HI846 sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/imx258.c b/drivers/media/i2c/imx258.c
index 81cdf37216ca..c249507aa2db 100644
--- a/drivers/media/i2c/imx258.c
+++ b/drivers/media/i2c/imx258.c
@@ -1260,18 +1260,18 @@ static int imx258_probe(struct i2c_client *client)
return -ENOMEM;
imx258->clk = devm_clk_get_optional(&client->dev, NULL);
+ if (IS_ERR(imx258->clk))
+ return dev_err_probe(&client->dev, PTR_ERR(imx258->clk),
+ "error getting clock\n");
if (!imx258->clk) {
dev_dbg(&client->dev,
"no clock provided, using clock-frequency property\n");
device_property_read_u32(&client->dev, "clock-frequency", &val);
- if (val != IMX258_INPUT_CLOCK_FREQ)
- return -EINVAL;
- } else if (IS_ERR(imx258->clk)) {
- return dev_err_probe(&client->dev, PTR_ERR(imx258->clk),
- "error getting clock\n");
+ } else {
+ val = clk_get_rate(imx258->clk);
}
- if (clk_get_rate(imx258->clk) != IMX258_INPUT_CLOCK_FREQ) {
+ if (val != IMX258_INPUT_CLOCK_FREQ) {
dev_err(&client->dev, "input clock frequency not supported\n");
return -EINVAL;
}
diff --git a/drivers/media/i2c/ir-kbd-i2c.c b/drivers/media/i2c/ir-kbd-i2c.c
index 92376592455e..56674173524f 100644
--- a/drivers/media/i2c/ir-kbd-i2c.c
+++ b/drivers/media/i2c/ir-kbd-i2c.c
@@ -791,6 +791,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
rc_proto = RC_PROTO_BIT_RC5 | RC_PROTO_BIT_RC6_MCE |
RC_PROTO_BIT_RC6_6A_32;
ir_codes = RC_MAP_HAUPPAUGE;
+ ir->polling_interval = 125;
probe_tx = true;
break;
}
diff --git a/drivers/media/i2c/max9286.c b/drivers/media/i2c/max9286.c
index 1aa2c58fd38c..7c663fd587bb 100644
--- a/drivers/media/i2c/max9286.c
+++ b/drivers/media/i2c/max9286.c
@@ -606,19 +606,18 @@ static int max9286_v4l2_notifier_register(struct max9286_priv *priv)
if (!priv->nsources)
return 0;
- v4l2_async_notifier_init(&priv->notifier);
+ v4l2_async_nf_init(&priv->notifier);
for_each_source(priv, source) {
unsigned int i = to_index(priv, source);
struct max9286_asd *mas;
- mas = v4l2_async_notifier_add_fwnode_subdev(&priv->notifier,
- source->fwnode,
- struct max9286_asd);
+ mas = v4l2_async_nf_add_fwnode(&priv->notifier, source->fwnode,
+ struct max9286_asd);
if (IS_ERR(mas)) {
dev_err(dev, "Failed to add subdev for source %u: %ld",
i, PTR_ERR(mas));
- v4l2_async_notifier_cleanup(&priv->notifier);
+ v4l2_async_nf_cleanup(&priv->notifier);
return PTR_ERR(mas);
}
@@ -627,10 +626,10 @@ static int max9286_v4l2_notifier_register(struct max9286_priv *priv)
priv->notifier.ops = &max9286_notify_ops;
- ret = v4l2_async_subdev_notifier_register(&priv->sd, &priv->notifier);
+ ret = v4l2_async_subdev_nf_register(&priv->sd, &priv->notifier);
if (ret) {
dev_err(dev, "Failed to register subdev_notifier");
- v4l2_async_notifier_cleanup(&priv->notifier);
+ v4l2_async_nf_cleanup(&priv->notifier);
return ret;
}
@@ -642,8 +641,8 @@ static void max9286_v4l2_notifier_unregister(struct max9286_priv *priv)
if (!priv->nsources)
return;
- v4l2_async_notifier_unregister(&priv->notifier);
- v4l2_async_notifier_cleanup(&priv->notifier);
+ v4l2_async_nf_unregister(&priv->notifier);
+ v4l2_async_nf_cleanup(&priv->notifier);
}
static int max9286_s_stream(struct v4l2_subdev *sd, int enable)
diff --git a/drivers/media/i2c/mt9p031.c b/drivers/media/i2c/mt9p031.c
index 6eb88ef99783..cbce8b88dbcf 100644
--- a/drivers/media/i2c/mt9p031.c
+++ b/drivers/media/i2c/mt9p031.c
@@ -27,6 +27,7 @@
#include <media/v4l2-async.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
#include <media/v4l2-subdev.h>
#include "aptina-pll.h"
@@ -75,38 +76,38 @@
#define MT9P031_PLL_CONFIG_1 0x11
#define MT9P031_PLL_CONFIG_2 0x12
#define MT9P031_PIXEL_CLOCK_CONTROL 0x0a
-#define MT9P031_PIXEL_CLOCK_INVERT (1 << 15)
+#define MT9P031_PIXEL_CLOCK_INVERT BIT(15)
#define MT9P031_PIXEL_CLOCK_SHIFT(n) ((n) << 8)
#define MT9P031_PIXEL_CLOCK_DIVIDE(n) ((n) << 0)
-#define MT9P031_FRAME_RESTART 0x0b
+#define MT9P031_RESTART 0x0b
+#define MT9P031_FRAME_PAUSE_RESTART BIT(1)
+#define MT9P031_FRAME_RESTART BIT(0)
#define MT9P031_SHUTTER_DELAY 0x0c
#define MT9P031_RST 0x0d
-#define MT9P031_RST_ENABLE 1
-#define MT9P031_RST_DISABLE 0
+#define MT9P031_RST_ENABLE BIT(0)
#define MT9P031_READ_MODE_1 0x1e
#define MT9P031_READ_MODE_2 0x20
-#define MT9P031_READ_MODE_2_ROW_MIR (1 << 15)
-#define MT9P031_READ_MODE_2_COL_MIR (1 << 14)
-#define MT9P031_READ_MODE_2_ROW_BLC (1 << 6)
+#define MT9P031_READ_MODE_2_ROW_MIR BIT(15)
+#define MT9P031_READ_MODE_2_COL_MIR BIT(14)
+#define MT9P031_READ_MODE_2_ROW_BLC BIT(6)
#define MT9P031_ROW_ADDRESS_MODE 0x22
#define MT9P031_COLUMN_ADDRESS_MODE 0x23
#define MT9P031_GLOBAL_GAIN 0x35
#define MT9P031_GLOBAL_GAIN_MIN 8
#define MT9P031_GLOBAL_GAIN_MAX 1024
#define MT9P031_GLOBAL_GAIN_DEF 8
-#define MT9P031_GLOBAL_GAIN_MULT (1 << 6)
+#define MT9P031_GLOBAL_GAIN_MULT BIT(6)
#define MT9P031_ROW_BLACK_TARGET 0x49
#define MT9P031_ROW_BLACK_DEF_OFFSET 0x4b
#define MT9P031_GREEN1_OFFSET 0x60
#define MT9P031_GREEN2_OFFSET 0x61
#define MT9P031_BLACK_LEVEL_CALIBRATION 0x62
-#define MT9P031_BLC_MANUAL_BLC (1 << 0)
+#define MT9P031_BLC_MANUAL_BLC BIT(0)
#define MT9P031_RED_OFFSET 0x63
#define MT9P031_BLUE_OFFSET 0x64
#define MT9P031_TEST_PATTERN 0xa0
#define MT9P031_TEST_PATTERN_SHIFT 3
-#define MT9P031_TEST_PATTERN_ENABLE (1 << 0)
-#define MT9P031_TEST_PATTERN_DISABLE (0 << 0)
+#define MT9P031_TEST_PATTERN_ENABLE BIT(0)
#define MT9P031_TEST_PATTERN_GREEN 0xa1
#define MT9P031_TEST_PATTERN_RED 0xa2
#define MT9P031_TEST_PATTERN_BLUE 0xa3
@@ -196,7 +197,7 @@ static int mt9p031_reset(struct mt9p031 *mt9p031)
ret = mt9p031_write(client, MT9P031_RST, MT9P031_RST_ENABLE);
if (ret < 0)
return ret;
- ret = mt9p031_write(client, MT9P031_RST, MT9P031_RST_DISABLE);
+ ret = mt9p031_write(client, MT9P031_RST, 0);
if (ret < 0)
return ret;
@@ -229,6 +230,7 @@ static int mt9p031_clk_setup(struct mt9p031 *mt9p031)
struct i2c_client *client = v4l2_get_subdevdata(&mt9p031->subdev);
struct mt9p031_platform_data *pdata = mt9p031->pdata;
+ unsigned long ext_freq;
int ret;
mt9p031->clk = devm_clk_get(&client->dev, NULL);
@@ -239,13 +241,15 @@ static int mt9p031_clk_setup(struct mt9p031 *mt9p031)
if (ret < 0)
return ret;
+ ext_freq = clk_get_rate(mt9p031->clk);
+
/* If the external clock frequency is out of bounds for the PLL use the
* pixel clock divider only and disable the PLL.
*/
- if (pdata->ext_freq > limits.ext_clock_max) {
+ if (ext_freq > limits.ext_clock_max) {
unsigned int div;
- div = DIV_ROUND_UP(pdata->ext_freq, pdata->target_freq);
+ div = DIV_ROUND_UP(ext_freq, pdata->target_freq);
div = roundup_pow_of_two(div) / 2;
mt9p031->clk_div = min_t(unsigned int, div, 64);
@@ -254,7 +258,7 @@ static int mt9p031_clk_setup(struct mt9p031 *mt9p031)
return 0;
}
- mt9p031->pll.ext_clock = pdata->ext_freq;
+ mt9p031->pll.ext_clock = ext_freq;
mt9p031->pll.pix_clock = pdata->target_freq;
mt9p031->use_pll = true;
@@ -369,6 +373,14 @@ static int __mt9p031_set_power(struct mt9p031 *mt9p031, bool on)
return ret;
}
+ /* Configure the pixel clock polarity */
+ if (mt9p031->pdata && mt9p031->pdata->pixclk_pol) {
+ ret = mt9p031_write(client, MT9P031_PIXEL_CLOCK_CONTROL,
+ MT9P031_PIXEL_CLOCK_INVERT);
+ if (ret < 0)
+ return ret;
+ }
+
return v4l2_ctrl_handler_setup(&mt9p031->ctrls);
}
@@ -444,9 +456,23 @@ static int mt9p031_set_params(struct mt9p031 *mt9p031)
static int mt9p031_s_stream(struct v4l2_subdev *subdev, int enable)
{
struct mt9p031 *mt9p031 = to_mt9p031(subdev);
+ struct i2c_client *client = v4l2_get_subdevdata(subdev);
+ int val;
int ret;
if (!enable) {
+ /* enable pause restart */
+ val = MT9P031_FRAME_PAUSE_RESTART;
+ ret = mt9p031_write(client, MT9P031_RESTART, val);
+ if (ret < 0)
+ return ret;
+
+ /* enable restart + keep pause restart set */
+ val |= MT9P031_FRAME_RESTART;
+ ret = mt9p031_write(client, MT9P031_RESTART, val);
+ if (ret < 0)
+ return ret;
+
/* Stop sensor readout */
ret = mt9p031_set_output_control(mt9p031,
MT9P031_OUTPUT_CONTROL_CEN, 0);
@@ -466,6 +492,16 @@ static int mt9p031_s_stream(struct v4l2_subdev *subdev, int enable)
if (ret < 0)
return ret;
+ /*
+ * - clear pause restart
+ * - don't clear restart as clearing restart manually can cause
+ * undefined behavior
+ */
+ val = MT9P031_FRAME_RESTART;
+ ret = mt9p031_write(client, MT9P031_RESTART, val);
+ if (ret < 0)
+ return ret;
+
return mt9p031_pll_enable(mt9p031);
}
@@ -756,8 +792,7 @@ static int mt9p031_s_ctrl(struct v4l2_ctrl *ctrl)
if (ret < 0)
return ret;
- return mt9p031_write(client, MT9P031_TEST_PATTERN,
- MT9P031_TEST_PATTERN_DISABLE);
+ return mt9p031_write(client, MT9P031_TEST_PATTERN, 0);
}
ret = mt9p031_write(client, MT9P031_TEST_PATTERN_GREEN, 0x05a0);
@@ -1011,8 +1046,11 @@ static const struct v4l2_subdev_internal_ops mt9p031_subdev_internal_ops = {
static struct mt9p031_platform_data *
mt9p031_get_pdata(struct i2c_client *client)
{
- struct mt9p031_platform_data *pdata;
+ struct mt9p031_platform_data *pdata = NULL;
struct device_node *np;
+ struct v4l2_fwnode_endpoint endpoint = {
+ .bus_type = V4L2_MBUS_PARALLEL
+ };
if (!IS_ENABLED(CONFIG_OF) || !client->dev.of_node)
return client->dev.platform_data;
@@ -1021,6 +1059,9 @@ mt9p031_get_pdata(struct i2c_client *client)
if (!np)
return NULL;
+ if (v4l2_fwnode_endpoint_parse(of_fwnode_handle(np), &endpoint) < 0)
+ goto done;
+
pdata = devm_kzalloc(&client->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
goto done;
@@ -1028,6 +1069,9 @@ mt9p031_get_pdata(struct i2c_client *client)
of_property_read_u32(np, "input-clock-frequency", &pdata->ext_freq);
of_property_read_u32(np, "pixel-clock-frequency", &pdata->target_freq);
+ pdata->pixclk_pol = !!(endpoint.bus.parallel.flags &
+ V4L2_MBUS_PCLK_SAMPLE_RISING);
+
done:
of_node_put(np);
return pdata;
diff --git a/drivers/media/i2c/ov13858.c b/drivers/media/i2c/ov13858.c
index 7fc70af53e45..b4d22f5d9933 100644
--- a/drivers/media/i2c/ov13858.c
+++ b/drivers/media/i2c/ov13858.c
@@ -7,6 +7,7 @@
#include <linux/pm_runtime.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
#include <media/v4l2-fwnode.h>
#define OV13858_REG_VALUE_08BIT 1
@@ -1553,6 +1554,12 @@ static int ov13858_identify_module(struct ov13858 *ov13858)
return 0;
}
+static const struct v4l2_subdev_core_ops ov13858_core_ops = {
+ .log_status = v4l2_ctrl_subdev_log_status,
+ .subscribe_event = v4l2_ctrl_subdev_subscribe_event,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
+};
+
static const struct v4l2_subdev_video_ops ov13858_video_ops = {
.s_stream = ov13858_set_stream,
};
@@ -1569,6 +1576,7 @@ static const struct v4l2_subdev_sensor_ops ov13858_sensor_ops = {
};
static const struct v4l2_subdev_ops ov13858_subdev_ops = {
+ .core = &ov13858_core_ops,
.video = &ov13858_video_ops,
.pad = &ov13858_pad_ops,
.sensor = &ov13858_sensor_ops,
@@ -1724,7 +1732,8 @@ static int ov13858_probe(struct i2c_client *client,
/* Initialize subdev */
ov13858->sd.internal_ops = &ov13858_internal_ops;
- ov13858->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ ov13858->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE |
+ V4L2_SUBDEV_FL_HAS_EVENTS;
ov13858->sd.entity.ops = &ov13858_subdev_entity_ops;
ov13858->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
diff --git a/drivers/media/i2c/ov13b10.c b/drivers/media/i2c/ov13b10.c
new file mode 100644
index 000000000000..7caeae641051
--- /dev/null
+++ b/drivers/media/i2c/ov13b10.c
@@ -0,0 +1,1491 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2021 Intel Corporation.
+
+#include <linux/acpi.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <media/v4l2-ctrls.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-fwnode.h>
+
+#define OV13B10_REG_VALUE_08BIT 1
+#define OV13B10_REG_VALUE_16BIT 2
+#define OV13B10_REG_VALUE_24BIT 3
+
+#define OV13B10_REG_MODE_SELECT 0x0100
+#define OV13B10_MODE_STANDBY 0x00
+#define OV13B10_MODE_STREAMING 0x01
+
+#define OV13B10_REG_SOFTWARE_RST 0x0103
+#define OV13B10_SOFTWARE_RST 0x01
+
+/* Chip ID */
+#define OV13B10_REG_CHIP_ID 0x300a
+#define OV13B10_CHIP_ID 0x560d42
+
+/* V_TIMING internal */
+#define OV13B10_REG_VTS 0x380e
+#define OV13B10_VTS_30FPS 0x0c7c
+#define OV13B10_VTS_60FPS 0x063e
+#define OV13B10_VTS_MAX 0x7fff
+
+/* HBLANK control - read only */
+#define OV13B10_PPL_560MHZ 4704
+
+/* Exposure control */
+#define OV13B10_REG_EXPOSURE 0x3500
+#define OV13B10_EXPOSURE_MIN 4
+#define OV13B10_EXPOSURE_STEP 1
+#define OV13B10_EXPOSURE_DEFAULT 0x40
+
+/* Analog gain control */
+#define OV13B10_REG_ANALOG_GAIN 0x3508
+#define OV13B10_ANA_GAIN_MIN 0x80
+#define OV13B10_ANA_GAIN_MAX 0x07c0
+#define OV13B10_ANA_GAIN_STEP 1
+#define OV13B10_ANA_GAIN_DEFAULT 0x80
+
+/* Digital gain control */
+#define OV13B10_REG_DGTL_GAIN_H 0x350a
+#define OV13B10_REG_DGTL_GAIN_M 0x350b
+#define OV13B10_REG_DGTL_GAIN_L 0x350c
+
+#define OV13B10_DGTL_GAIN_MIN 1024 /* Min = 1 X */
+#define OV13B10_DGTL_GAIN_MAX (4096 - 1) /* Max = 4 X */
+#define OV13B10_DGTL_GAIN_DEFAULT 2560 /* Default gain = 2.5 X */
+#define OV13B10_DGTL_GAIN_STEP 1 /* Each step = 1/1024 */
+
+#define OV13B10_DGTL_GAIN_L_SHIFT 6
+#define OV13B10_DGTL_GAIN_L_MASK 0x3
+#define OV13B10_DGTL_GAIN_M_SHIFT 2
+#define OV13B10_DGTL_GAIN_M_MASK 0xff
+#define OV13B10_DGTL_GAIN_H_SHIFT 10
+#define OV13B10_DGTL_GAIN_H_MASK 0x3
+
+/* Test Pattern Control */
+#define OV13B10_REG_TEST_PATTERN 0x5080
+#define OV13B10_TEST_PATTERN_ENABLE BIT(7)
+#define OV13B10_TEST_PATTERN_MASK 0xf3
+#define OV13B10_TEST_PATTERN_BAR_SHIFT 2
+
+/* Flip Control */
+#define OV13B10_REG_FORMAT1 0x3820
+#define OV13B10_REG_FORMAT2 0x3821
+
+/* Horizontal Window Offset */
+#define OV13B10_REG_H_WIN_OFFSET 0x3811
+
+/* Vertical Window Offset */
+#define OV13B10_REG_V_WIN_OFFSET 0x3813
+
+struct ov13b10_reg {
+ u16 address;
+ u8 val;
+};
+
+struct ov13b10_reg_list {
+ u32 num_of_regs;
+ const struct ov13b10_reg *regs;
+};
+
+/* Link frequency config */
+struct ov13b10_link_freq_config {
+ u32 pixels_per_line;
+
+ /* registers for this link frequency */
+ struct ov13b10_reg_list reg_list;
+};
+
+/* Mode : resolution and related config&values */
+struct ov13b10_mode {
+ /* Frame width */
+ u32 width;
+ /* Frame height */
+ u32 height;
+
+ /* V-timing */
+ u32 vts_def;
+ u32 vts_min;
+
+ /* Index of Link frequency config to be used */
+ u32 link_freq_index;
+ /* Default register values */
+ struct ov13b10_reg_list reg_list;
+};
+
+/* 4208x3120 needs 1120Mbps/lane, 4 lanes */
+static const struct ov13b10_reg mipi_data_rate_1120mbps[] = {
+ {0x0103, 0x01},
+ {0x0303, 0x04},
+ {0x0305, 0xaf},
+ {0x0321, 0x00},
+ {0x0323, 0x04},
+ {0x0324, 0x01},
+ {0x0325, 0xa4},
+ {0x0326, 0x81},
+ {0x0327, 0x04},
+ {0x3012, 0x07},
+ {0x3013, 0x32},
+ {0x3107, 0x23},
+ {0x3501, 0x0c},
+ {0x3502, 0x10},
+ {0x3504, 0x08},
+ {0x3508, 0x07},
+ {0x3509, 0xc0},
+ {0x3600, 0x16},
+ {0x3601, 0x54},
+ {0x3612, 0x4e},
+ {0x3620, 0x00},
+ {0x3621, 0x68},
+ {0x3622, 0x66},
+ {0x3623, 0x03},
+ {0x3662, 0x92},
+ {0x3666, 0xbb},
+ {0x3667, 0x44},
+ {0x366e, 0xff},
+ {0x366f, 0xf3},
+ {0x3675, 0x44},
+ {0x3676, 0x00},
+ {0x367f, 0xe9},
+ {0x3681, 0x32},
+ {0x3682, 0x1f},
+ {0x3683, 0x0b},
+ {0x3684, 0x0b},
+ {0x3704, 0x0f},
+ {0x3706, 0x40},
+ {0x3708, 0x3b},
+ {0x3709, 0x72},
+ {0x370b, 0xa2},
+ {0x3714, 0x24},
+ {0x371a, 0x3e},
+ {0x3725, 0x42},
+ {0x3739, 0x12},
+ {0x3767, 0x00},
+ {0x377a, 0x0d},
+ {0x3789, 0x18},
+ {0x3790, 0x40},
+ {0x3791, 0xa2},
+ {0x37c2, 0x04},
+ {0x37c3, 0xf1},
+ {0x37d9, 0x0c},
+ {0x37da, 0x02},
+ {0x37dc, 0x02},
+ {0x37e1, 0x04},
+ {0x37e2, 0x0a},
+ {0x3800, 0x00},
+ {0x3801, 0x00},
+ {0x3802, 0x00},
+ {0x3803, 0x08},
+ {0x3804, 0x10},
+ {0x3805, 0x8f},
+ {0x3806, 0x0c},
+ {0x3807, 0x47},
+ {0x3808, 0x10},
+ {0x3809, 0x70},
+ {0x380a, 0x0c},
+ {0x380b, 0x30},
+ {0x380c, 0x04},
+ {0x380d, 0x98},
+ {0x380e, 0x0c},
+ {0x380f, 0x7c},
+ {0x3811, 0x0f},
+ {0x3813, 0x09},
+ {0x3814, 0x01},
+ {0x3815, 0x01},
+ {0x3816, 0x01},
+ {0x3817, 0x01},
+ {0x381f, 0x08},
+ {0x3820, 0x88},
+ {0x3821, 0x00},
+ {0x3822, 0x14},
+ {0x382e, 0xe6},
+ {0x3c80, 0x00},
+ {0x3c87, 0x01},
+ {0x3c8c, 0x19},
+ {0x3c8d, 0x1c},
+ {0x3ca0, 0x00},
+ {0x3ca1, 0x00},
+ {0x3ca2, 0x00},
+ {0x3ca3, 0x00},
+ {0x3ca4, 0x50},
+ {0x3ca5, 0x11},
+ {0x3ca6, 0x01},
+ {0x3ca7, 0x00},
+ {0x3ca8, 0x00},
+ {0x4008, 0x02},
+ {0x4009, 0x0f},
+ {0x400a, 0x01},
+ {0x400b, 0x19},
+ {0x4011, 0x21},
+ {0x4017, 0x08},
+ {0x4019, 0x04},
+ {0x401a, 0x58},
+ {0x4032, 0x1e},
+ {0x4050, 0x02},
+ {0x4051, 0x09},
+ {0x405e, 0x00},
+ {0x4066, 0x02},
+ {0x4501, 0x00},
+ {0x4502, 0x10},
+ {0x4505, 0x00},
+ {0x4800, 0x64},
+ {0x481b, 0x3e},
+ {0x481f, 0x30},
+ {0x4825, 0x34},
+ {0x4837, 0x0e},
+ {0x484b, 0x01},
+ {0x4883, 0x02},
+ {0x5000, 0xff},
+ {0x5001, 0x0f},
+ {0x5045, 0x20},
+ {0x5046, 0x20},
+ {0x5047, 0xa4},
+ {0x5048, 0x20},
+ {0x5049, 0xa4},
+ {0x0100, 0x01},
+};
+
+static const struct ov13b10_reg mode_4208x3120_regs[] = {
+ {0x0305, 0xaf},
+ {0x3501, 0x0c},
+ {0x3662, 0x92},
+ {0x3714, 0x24},
+ {0x3739, 0x12},
+ {0x37c2, 0x04},
+ {0x37d9, 0x0c},
+ {0x37e2, 0x0a},
+ {0x3800, 0x00},
+ {0x3801, 0x00},
+ {0x3802, 0x00},
+ {0x3803, 0x08},
+ {0x3804, 0x10},
+ {0x3805, 0x8f},
+ {0x3806, 0x0c},
+ {0x3807, 0x47},
+ {0x3808, 0x10},
+ {0x3809, 0x70},
+ {0x380a, 0x0c},
+ {0x380b, 0x30},
+ {0x380c, 0x04},
+ {0x380d, 0x98},
+ {0x380e, 0x0c},
+ {0x380f, 0x7c},
+ {0x3810, 0x00},
+ {0x3811, 0x0f},
+ {0x3812, 0x00},
+ {0x3813, 0x09},
+ {0x3814, 0x01},
+ {0x3816, 0x01},
+ {0x3820, 0x88},
+ {0x3c8c, 0x19},
+ {0x4008, 0x02},
+ {0x4009, 0x0f},
+ {0x4050, 0x02},
+ {0x4051, 0x09},
+ {0x4501, 0x00},
+ {0x4505, 0x00},
+ {0x4837, 0x0e},
+ {0x5000, 0xff},
+ {0x5001, 0x0f},
+};
+
+static const struct ov13b10_reg mode_4160x3120_regs[] = {
+ {0x0305, 0xaf},
+ {0x3501, 0x0c},
+ {0x3662, 0x92},
+ {0x3714, 0x24},
+ {0x3739, 0x12},
+ {0x37c2, 0x04},
+ {0x37d9, 0x0c},
+ {0x37e2, 0x0a},
+ {0x3800, 0x00},
+ {0x3801, 0x00},
+ {0x3802, 0x00},
+ {0x3803, 0x08},
+ {0x3804, 0x10},
+ {0x3805, 0x8f},
+ {0x3806, 0x0c},
+ {0x3807, 0x47},
+ {0x3808, 0x10},
+ {0x3809, 0x40},
+ {0x380a, 0x0c},
+ {0x380b, 0x30},
+ {0x380c, 0x04},
+ {0x380d, 0x98},
+ {0x380e, 0x0c},
+ {0x380f, 0x7c},
+ {0x3810, 0x00},
+ {0x3811, 0x27},
+ {0x3812, 0x00},
+ {0x3813, 0x09},
+ {0x3814, 0x01},
+ {0x3816, 0x01},
+ {0x3820, 0x88},
+ {0x3c8c, 0x19},
+ {0x4008, 0x02},
+ {0x4009, 0x0f},
+ {0x4050, 0x02},
+ {0x4051, 0x09},
+ {0x4501, 0x00},
+ {0x4505, 0x00},
+ {0x4837, 0x0e},
+ {0x5000, 0xff},
+ {0x5001, 0x0f},
+};
+
+static const struct ov13b10_reg mode_4160x2340_regs[] = {
+ {0x0305, 0xaf},
+ {0x3501, 0x0c},
+ {0x3662, 0x92},
+ {0x3714, 0x24},
+ {0x3739, 0x12},
+ {0x37c2, 0x04},
+ {0x37d9, 0x0c},
+ {0x37e2, 0x0a},
+ {0x3800, 0x00},
+ {0x3801, 0x00},
+ {0x3802, 0x00},
+ {0x3803, 0x08},
+ {0x3804, 0x10},
+ {0x3805, 0x8f},
+ {0x3806, 0x0c},
+ {0x3807, 0x47},
+ {0x3808, 0x10},
+ {0x3809, 0x40},
+ {0x380a, 0x09},
+ {0x380b, 0x24},
+ {0x380c, 0x04},
+ {0x380d, 0x98},
+ {0x380e, 0x0c},
+ {0x380f, 0x7c},
+ {0x3810, 0x00},
+ {0x3811, 0x27},
+ {0x3812, 0x01},
+ {0x3813, 0x8f},
+ {0x3814, 0x01},
+ {0x3816, 0x01},
+ {0x3820, 0x88},
+ {0x3c8c, 0x19},
+ {0x4008, 0x02},
+ {0x4009, 0x0f},
+ {0x4050, 0x02},
+ {0x4051, 0x09},
+ {0x4501, 0x00},
+ {0x4505, 0x00},
+ {0x4837, 0x0e},
+ {0x5000, 0xff},
+ {0x5001, 0x0f},
+};
+
+static const struct ov13b10_reg mode_2104x1560_regs[] = {
+ {0x0305, 0xaf},
+ {0x3501, 0x06},
+ {0x3662, 0x88},
+ {0x3714, 0x28},
+ {0x3739, 0x10},
+ {0x37c2, 0x14},
+ {0x37d9, 0x06},
+ {0x37e2, 0x0c},
+ {0x3800, 0x00},
+ {0x3801, 0x00},
+ {0x3802, 0x00},
+ {0x3803, 0x08},
+ {0x3804, 0x10},
+ {0x3805, 0x8f},
+ {0x3806, 0x0c},
+ {0x3807, 0x47},
+ {0x3808, 0x08},
+ {0x3809, 0x38},
+ {0x380a, 0x06},
+ {0x380b, 0x18},
+ {0x380c, 0x04},
+ {0x380d, 0x98},
+ {0x380e, 0x06},
+ {0x380f, 0x3e},
+ {0x3810, 0x00},
+ {0x3811, 0x07},
+ {0x3812, 0x00},
+ {0x3813, 0x05},
+ {0x3814, 0x03},
+ {0x3816, 0x03},
+ {0x3820, 0x8b},
+ {0x3c8c, 0x18},
+ {0x4008, 0x00},
+ {0x4009, 0x05},
+ {0x4050, 0x00},
+ {0x4051, 0x05},
+ {0x4501, 0x08},
+ {0x4505, 0x00},
+ {0x4837, 0x0e},
+ {0x5000, 0xfd},
+ {0x5001, 0x0d},
+};
+
+static const struct ov13b10_reg mode_2080x1170_regs[] = {
+ {0x0305, 0xaf},
+ {0x3501, 0x06},
+ {0x3662, 0x88},
+ {0x3714, 0x28},
+ {0x3739, 0x10},
+ {0x37c2, 0x14},
+ {0x37d9, 0x06},
+ {0x37e2, 0x0c},
+ {0x3800, 0x00},
+ {0x3801, 0x00},
+ {0x3802, 0x00},
+ {0x3803, 0x08},
+ {0x3804, 0x10},
+ {0x3805, 0x8f},
+ {0x3806, 0x0c},
+ {0x3807, 0x47},
+ {0x3808, 0x08},
+ {0x3809, 0x20},
+ {0x380a, 0x04},
+ {0x380b, 0x92},
+ {0x380c, 0x04},
+ {0x380d, 0x98},
+ {0x380e, 0x06},
+ {0x380f, 0x3e},
+ {0x3810, 0x00},
+ {0x3811, 0x13},
+ {0x3812, 0x00},
+ {0x3813, 0xc9},
+ {0x3814, 0x03},
+ {0x3816, 0x03},
+ {0x3820, 0x8b},
+ {0x3c8c, 0x18},
+ {0x4008, 0x00},
+ {0x4009, 0x05},
+ {0x4050, 0x00},
+ {0x4051, 0x05},
+ {0x4501, 0x08},
+ {0x4505, 0x00},
+ {0x4837, 0x0e},
+ {0x5000, 0xfd},
+ {0x5001, 0x0d},
+};
+
+static const char * const ov13b10_test_pattern_menu[] = {
+ "Disabled",
+ "Vertical Color Bar Type 1",
+ "Vertical Color Bar Type 2",
+ "Vertical Color Bar Type 3",
+ "Vertical Color Bar Type 4"
+};
+
+/* Configurations for supported link frequencies */
+#define OV13B10_LINK_FREQ_560MHZ 560000000ULL
+#define OV13B10_LINK_FREQ_INDEX_0 0
+
+#define OV13B10_EXT_CLK 19200000
+#define OV13B10_DATA_LANES 4
+
+/*
+ * pixel_rate = link_freq * data-rate * nr_of_lanes / bits_per_sample
+ * data rate => double data rate; number of lanes => 4; bits per pixel => 10
+ */
+static u64 link_freq_to_pixel_rate(u64 f)
+{
+ f *= 2 * OV13B10_DATA_LANES;
+ do_div(f, 10);
+
+ return f;
+}
+
+/* Menu items for LINK_FREQ V4L2 control */
+static const s64 link_freq_menu_items[] = {
+ OV13B10_LINK_FREQ_560MHZ
+};
+
+/* Link frequency configs */
+static const struct ov13b10_link_freq_config
+ link_freq_configs[] = {
+ {
+ .pixels_per_line = OV13B10_PPL_560MHZ,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mipi_data_rate_1120mbps),
+ .regs = mipi_data_rate_1120mbps,
+ }
+ }
+};
+
+/* Mode configs */
+static const struct ov13b10_mode supported_modes[] = {
+ {
+ .width = 4208,
+ .height = 3120,
+ .vts_def = OV13B10_VTS_30FPS,
+ .vts_min = OV13B10_VTS_30FPS,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_4208x3120_regs),
+ .regs = mode_4208x3120_regs,
+ },
+ .link_freq_index = OV13B10_LINK_FREQ_INDEX_0,
+ },
+ {
+ .width = 4160,
+ .height = 3120,
+ .vts_def = OV13B10_VTS_30FPS,
+ .vts_min = OV13B10_VTS_30FPS,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_4160x3120_regs),
+ .regs = mode_4160x3120_regs,
+ },
+ .link_freq_index = OV13B10_LINK_FREQ_INDEX_0,
+ },
+ {
+ .width = 4160,
+ .height = 2340,
+ .vts_def = OV13B10_VTS_30FPS,
+ .vts_min = OV13B10_VTS_30FPS,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_4160x2340_regs),
+ .regs = mode_4160x2340_regs,
+ },
+ .link_freq_index = OV13B10_LINK_FREQ_INDEX_0,
+ },
+ {
+ .width = 2104,
+ .height = 1560,
+ .vts_def = OV13B10_VTS_60FPS,
+ .vts_min = OV13B10_VTS_60FPS,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_2104x1560_regs),
+ .regs = mode_2104x1560_regs,
+ },
+ .link_freq_index = OV13B10_LINK_FREQ_INDEX_0,
+ },
+ {
+ .width = 2080,
+ .height = 1170,
+ .vts_def = OV13B10_VTS_60FPS,
+ .vts_min = OV13B10_VTS_60FPS,
+ .reg_list = {
+ .num_of_regs = ARRAY_SIZE(mode_2080x1170_regs),
+ .regs = mode_2080x1170_regs,
+ },
+ .link_freq_index = OV13B10_LINK_FREQ_INDEX_0,
+ }
+};
+
+struct ov13b10 {
+ struct v4l2_subdev sd;
+ struct media_pad pad;
+
+ struct v4l2_ctrl_handler ctrl_handler;
+ /* V4L2 Controls */
+ struct v4l2_ctrl *link_freq;
+ struct v4l2_ctrl *pixel_rate;
+ struct v4l2_ctrl *vblank;
+ struct v4l2_ctrl *hblank;
+ struct v4l2_ctrl *exposure;
+
+ /* Current mode */
+ const struct ov13b10_mode *cur_mode;
+
+ /* Mutex for serialized access */
+ struct mutex mutex;
+
+ /* Streaming on/off */
+ bool streaming;
+};
+
+#define to_ov13b10(_sd) container_of(_sd, struct ov13b10, sd)
+
+/* Read registers up to 4 at a time */
+static int ov13b10_read_reg(struct ov13b10 *ov13b,
+ u16 reg, u32 len, u32 *val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov13b->sd);
+ struct i2c_msg msgs[2];
+ u8 *data_be_p;
+ int ret;
+ __be32 data_be = 0;
+ __be16 reg_addr_be = cpu_to_be16(reg);
+
+ if (len > 4)
+ return -EINVAL;
+
+ data_be_p = (u8 *)&data_be;
+ /* Write register address */
+ msgs[0].addr = client->addr;
+ msgs[0].flags = 0;
+ msgs[0].len = 2;
+ msgs[0].buf = (u8 *)&reg_addr_be;
+
+ /* Read data from register */
+ msgs[1].addr = client->addr;
+ msgs[1].flags = I2C_M_RD;
+ msgs[1].len = len;
+ msgs[1].buf = &data_be_p[4 - len];
+
+ ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
+ if (ret != ARRAY_SIZE(msgs))
+ return -EIO;
+
+ *val = be32_to_cpu(data_be);
+
+ return 0;
+}
+
+/* Write registers up to 4 at a time */
+static int ov13b10_write_reg(struct ov13b10 *ov13b,
+ u16 reg, u32 len, u32 __val)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov13b->sd);
+ int buf_i, val_i;
+ u8 buf[6], *val_p;
+ __be32 val;
+
+ if (len > 4)
+ return -EINVAL;
+
+ buf[0] = reg >> 8;
+ buf[1] = reg & 0xff;
+
+ val = cpu_to_be32(__val);
+ val_p = (u8 *)&val;
+ buf_i = 2;
+ val_i = 4 - len;
+
+ while (val_i < 4)
+ buf[buf_i++] = val_p[val_i++];
+
+ if (i2c_master_send(client, buf, len + 2) != len + 2)
+ return -EIO;
+
+ return 0;
+}
+
+/* Write a list of registers */
+static int ov13b10_write_regs(struct ov13b10 *ov13b,
+ const struct ov13b10_reg *regs, u32 len)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov13b->sd);
+ int ret;
+ u32 i;
+
+ for (i = 0; i < len; i++) {
+ ret = ov13b10_write_reg(ov13b, regs[i].address, 1,
+ regs[i].val);
+ if (ret) {
+ dev_err_ratelimited(&client->dev,
+ "Failed to write reg 0x%4.4x. error = %d\n",
+ regs[i].address, ret);
+
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ov13b10_write_reg_list(struct ov13b10 *ov13b,
+ const struct ov13b10_reg_list *r_list)
+{
+ return ov13b10_write_regs(ov13b, r_list->regs, r_list->num_of_regs);
+}
+
+/* Open sub-device */
+static int ov13b10_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ const struct ov13b10_mode *default_mode = &supported_modes[0];
+ struct ov13b10 *ov13b = to_ov13b10(sd);
+ struct v4l2_mbus_framefmt *try_fmt = v4l2_subdev_get_try_format(sd,
+ fh->state,
+ 0);
+
+ mutex_lock(&ov13b->mutex);
+
+ /* Initialize try_fmt */
+ try_fmt->width = default_mode->width;
+ try_fmt->height = default_mode->height;
+ try_fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+ try_fmt->field = V4L2_FIELD_NONE;
+
+ /* No crop or compose */
+ mutex_unlock(&ov13b->mutex);
+
+ return 0;
+}
+
+static int ov13b10_update_digital_gain(struct ov13b10 *ov13b, u32 d_gain)
+{
+ int ret;
+ u32 val;
+
+ /*
+ * 0x350C[7:6], 0x350B[7:0], 0x350A[1:0]
+ */
+
+ val = (d_gain & OV13B10_DGTL_GAIN_L_MASK) << OV13B10_DGTL_GAIN_L_SHIFT;
+ ret = ov13b10_write_reg(ov13b, OV13B10_REG_DGTL_GAIN_L,
+ OV13B10_REG_VALUE_08BIT, val);
+ if (ret)
+ return ret;
+
+ val = (d_gain >> OV13B10_DGTL_GAIN_M_SHIFT) & OV13B10_DGTL_GAIN_M_MASK;
+ ret = ov13b10_write_reg(ov13b, OV13B10_REG_DGTL_GAIN_M,
+ OV13B10_REG_VALUE_08BIT, val);
+ if (ret)
+ return ret;
+
+ val = (d_gain >> OV13B10_DGTL_GAIN_H_SHIFT) & OV13B10_DGTL_GAIN_H_MASK;
+ ret = ov13b10_write_reg(ov13b, OV13B10_REG_DGTL_GAIN_H,
+ OV13B10_REG_VALUE_08BIT, val);
+
+ return ret;
+}
+
+static int ov13b10_enable_test_pattern(struct ov13b10 *ov13b, u32 pattern)
+{
+ int ret;
+ u32 val;
+
+ ret = ov13b10_read_reg(ov13b, OV13B10_REG_TEST_PATTERN,
+ OV13B10_REG_VALUE_08BIT, &val);
+ if (ret)
+ return ret;
+
+ if (pattern) {
+ val &= OV13B10_TEST_PATTERN_MASK;
+ val |= ((pattern - 1) << OV13B10_TEST_PATTERN_BAR_SHIFT) |
+ OV13B10_TEST_PATTERN_ENABLE;
+ } else {
+ val &= ~OV13B10_TEST_PATTERN_ENABLE;
+ }
+
+ return ov13b10_write_reg(ov13b, OV13B10_REG_TEST_PATTERN,
+ OV13B10_REG_VALUE_08BIT, val);
+}
+
+static int ov13b10_set_ctrl_hflip(struct ov13b10 *ov13b, u32 ctrl_val)
+{
+ int ret;
+ u32 val;
+
+ ret = ov13b10_read_reg(ov13b, OV13B10_REG_FORMAT1,
+ OV13B10_REG_VALUE_08BIT, &val);
+ if (ret)
+ return ret;
+
+ ret = ov13b10_write_reg(ov13b, OV13B10_REG_FORMAT1,
+ OV13B10_REG_VALUE_08BIT,
+ ctrl_val ? val & ~BIT(3) : val);
+
+ if (ret)
+ return ret;
+
+ ret = ov13b10_read_reg(ov13b, OV13B10_REG_H_WIN_OFFSET,
+ OV13B10_REG_VALUE_08BIT, &val);
+ if (ret)
+ return ret;
+
+ /*
+ * Applying cropping offset to reverse the change of Bayer order
+ * after mirroring image
+ */
+ return ov13b10_write_reg(ov13b, OV13B10_REG_H_WIN_OFFSET,
+ OV13B10_REG_VALUE_08BIT,
+ ctrl_val ? ++val : val);
+}
+
+static int ov13b10_set_ctrl_vflip(struct ov13b10 *ov13b, u32 ctrl_val)
+{
+ int ret;
+ u32 val;
+
+ ret = ov13b10_read_reg(ov13b, OV13B10_REG_FORMAT1,
+ OV13B10_REG_VALUE_08BIT, &val);
+ if (ret)
+ return ret;
+
+ ret = ov13b10_write_reg(ov13b, OV13B10_REG_FORMAT1,
+ OV13B10_REG_VALUE_08BIT,
+ ctrl_val ? val | BIT(4) | BIT(5) : val);
+
+ if (ret)
+ return ret;
+
+ ret = ov13b10_read_reg(ov13b, OV13B10_REG_V_WIN_OFFSET,
+ OV13B10_REG_VALUE_08BIT, &val);
+ if (ret)
+ return ret;
+
+ /*
+ * Applying cropping offset to reverse the change of Bayer order
+ * after flipping image
+ */
+ return ov13b10_write_reg(ov13b, OV13B10_REG_V_WIN_OFFSET,
+ OV13B10_REG_VALUE_08BIT,
+ ctrl_val ? --val : val);
+}
+
+static int ov13b10_set_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct ov13b10 *ov13b = container_of(ctrl->handler,
+ struct ov13b10, ctrl_handler);
+ struct i2c_client *client = v4l2_get_subdevdata(&ov13b->sd);
+ s64 max;
+ int ret;
+
+ /* Propagate change of current control to all related controls */
+ switch (ctrl->id) {
+ case V4L2_CID_VBLANK:
+ /* Update max exposure while meeting expected vblanking */
+ max = ov13b->cur_mode->height + ctrl->val - 8;
+ __v4l2_ctrl_modify_range(ov13b->exposure,
+ ov13b->exposure->minimum,
+ max, ov13b->exposure->step, max);
+ break;
+ }
+
+ /*
+ * Applying V4L2 control value only happens
+ * when power is up for streaming
+ */
+ if (!pm_runtime_get_if_in_use(&client->dev))
+ return 0;
+
+ ret = 0;
+ switch (ctrl->id) {
+ case V4L2_CID_ANALOGUE_GAIN:
+ ret = ov13b10_write_reg(ov13b, OV13B10_REG_ANALOG_GAIN,
+ OV13B10_REG_VALUE_16BIT,
+ ctrl->val << 1);
+ break;
+ case V4L2_CID_DIGITAL_GAIN:
+ ret = ov13b10_update_digital_gain(ov13b, ctrl->val);
+ break;
+ case V4L2_CID_EXPOSURE:
+ ret = ov13b10_write_reg(ov13b, OV13B10_REG_EXPOSURE,
+ OV13B10_REG_VALUE_24BIT,
+ ctrl->val);
+ break;
+ case V4L2_CID_VBLANK:
+ ret = ov13b10_write_reg(ov13b, OV13B10_REG_VTS,
+ OV13B10_REG_VALUE_16BIT,
+ ov13b->cur_mode->height
+ + ctrl->val);
+ break;
+ case V4L2_CID_TEST_PATTERN:
+ ret = ov13b10_enable_test_pattern(ov13b, ctrl->val);
+ break;
+ case V4L2_CID_HFLIP:
+ ov13b10_set_ctrl_hflip(ov13b, ctrl->val);
+ break;
+ case V4L2_CID_VFLIP:
+ ov13b10_set_ctrl_vflip(ov13b, ctrl->val);
+ break;
+ default:
+ dev_info(&client->dev,
+ "ctrl(id:0x%x,val:0x%x) is not handled\n",
+ ctrl->id, ctrl->val);
+ break;
+ }
+
+ pm_runtime_put(&client->dev);
+
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops ov13b10_ctrl_ops = {
+ .s_ctrl = ov13b10_set_ctrl,
+};
+
+static int ov13b10_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ /* Only one bayer order(GRBG) is supported */
+ if (code->index > 0)
+ return -EINVAL;
+
+ code->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+
+ return 0;
+}
+
+static int ov13b10_enum_frame_size(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ if (fse->index >= ARRAY_SIZE(supported_modes))
+ return -EINVAL;
+
+ if (fse->code != MEDIA_BUS_FMT_SGRBG10_1X10)
+ return -EINVAL;
+
+ fse->min_width = supported_modes[fse->index].width;
+ fse->max_width = fse->min_width;
+ fse->min_height = supported_modes[fse->index].height;
+ fse->max_height = fse->min_height;
+
+ return 0;
+}
+
+static void ov13b10_update_pad_format(const struct ov13b10_mode *mode,
+ struct v4l2_subdev_format *fmt)
+{
+ fmt->format.width = mode->width;
+ fmt->format.height = mode->height;
+ fmt->format.code = MEDIA_BUS_FMT_SGRBG10_1X10;
+ fmt->format.field = V4L2_FIELD_NONE;
+}
+
+static int ov13b10_do_get_pad_format(struct ov13b10 *ov13b,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct v4l2_mbus_framefmt *framefmt;
+ struct v4l2_subdev *sd = &ov13b->sd;
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ framefmt = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
+ fmt->format = *framefmt;
+ } else {
+ ov13b10_update_pad_format(ov13b->cur_mode, fmt);
+ }
+
+ return 0;
+}
+
+static int ov13b10_get_pad_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct ov13b10 *ov13b = to_ov13b10(sd);
+ int ret;
+
+ mutex_lock(&ov13b->mutex);
+ ret = ov13b10_do_get_pad_format(ov13b, sd_state, fmt);
+ mutex_unlock(&ov13b->mutex);
+
+ return ret;
+}
+
+static int
+ov13b10_set_pad_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *fmt)
+{
+ struct ov13b10 *ov13b = to_ov13b10(sd);
+ const struct ov13b10_mode *mode;
+ struct v4l2_mbus_framefmt *framefmt;
+ s32 vblank_def;
+ s32 vblank_min;
+ s64 h_blank;
+ s64 pixel_rate;
+ s64 link_freq;
+
+ mutex_lock(&ov13b->mutex);
+
+ /* Only one raw bayer(GRBG) order is supported */
+ if (fmt->format.code != MEDIA_BUS_FMT_SGRBG10_1X10)
+ fmt->format.code = MEDIA_BUS_FMT_SGRBG10_1X10;
+
+ mode = v4l2_find_nearest_size(supported_modes,
+ ARRAY_SIZE(supported_modes),
+ width, height,
+ fmt->format.width, fmt->format.height);
+ ov13b10_update_pad_format(mode, fmt);
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
+ framefmt = v4l2_subdev_get_try_format(sd, sd_state, fmt->pad);
+ *framefmt = fmt->format;
+ } else {
+ ov13b->cur_mode = mode;
+ __v4l2_ctrl_s_ctrl(ov13b->link_freq, mode->link_freq_index);
+ link_freq = link_freq_menu_items[mode->link_freq_index];
+ pixel_rate = link_freq_to_pixel_rate(link_freq);
+ __v4l2_ctrl_s_ctrl_int64(ov13b->pixel_rate, pixel_rate);
+
+ /* Update limits and set FPS to default */
+ vblank_def = ov13b->cur_mode->vts_def -
+ ov13b->cur_mode->height;
+ vblank_min = ov13b->cur_mode->vts_min -
+ ov13b->cur_mode->height;
+ __v4l2_ctrl_modify_range(ov13b->vblank, vblank_min,
+ OV13B10_VTS_MAX
+ - ov13b->cur_mode->height,
+ 1,
+ vblank_def);
+ __v4l2_ctrl_s_ctrl(ov13b->vblank, vblank_def);
+ h_blank =
+ link_freq_configs[mode->link_freq_index].pixels_per_line
+ - ov13b->cur_mode->width;
+ __v4l2_ctrl_modify_range(ov13b->hblank, h_blank,
+ h_blank, 1, h_blank);
+ }
+
+ mutex_unlock(&ov13b->mutex);
+
+ return 0;
+}
+
+static int ov13b10_start_streaming(struct ov13b10 *ov13b)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov13b->sd);
+ const struct ov13b10_reg_list *reg_list;
+ int ret, link_freq_index;
+
+ /* Get out of from software reset */
+ ret = ov13b10_write_reg(ov13b, OV13B10_REG_SOFTWARE_RST,
+ OV13B10_REG_VALUE_08BIT, OV13B10_SOFTWARE_RST);
+ if (ret) {
+ dev_err(&client->dev, "%s failed to set powerup registers\n",
+ __func__);
+ return ret;
+ }
+
+ link_freq_index = ov13b->cur_mode->link_freq_index;
+ reg_list = &link_freq_configs[link_freq_index].reg_list;
+ ret = ov13b10_write_reg_list(ov13b, reg_list);
+ if (ret) {
+ dev_err(&client->dev, "%s failed to set plls\n", __func__);
+ return ret;
+ }
+
+ /* Apply default values of current mode */
+ reg_list = &ov13b->cur_mode->reg_list;
+ ret = ov13b10_write_reg_list(ov13b, reg_list);
+ if (ret) {
+ dev_err(&client->dev, "%s failed to set mode\n", __func__);
+ return ret;
+ }
+
+ /* Apply customized values from user */
+ ret = __v4l2_ctrl_handler_setup(ov13b->sd.ctrl_handler);
+ if (ret)
+ return ret;
+
+ return ov13b10_write_reg(ov13b, OV13B10_REG_MODE_SELECT,
+ OV13B10_REG_VALUE_08BIT,
+ OV13B10_MODE_STREAMING);
+}
+
+/* Stop streaming */
+static int ov13b10_stop_streaming(struct ov13b10 *ov13b)
+{
+ return ov13b10_write_reg(ov13b, OV13B10_REG_MODE_SELECT,
+ OV13B10_REG_VALUE_08BIT, OV13B10_MODE_STANDBY);
+}
+
+static int ov13b10_set_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct ov13b10 *ov13b = to_ov13b10(sd);
+ struct i2c_client *client = v4l2_get_subdevdata(sd);
+ int ret = 0;
+
+ mutex_lock(&ov13b->mutex);
+ if (ov13b->streaming == enable) {
+ mutex_unlock(&ov13b->mutex);
+ return 0;
+ }
+
+ if (enable) {
+ ret = pm_runtime_resume_and_get(&client->dev);
+ if (ret < 0)
+ goto err_unlock;
+
+ /*
+ * Apply default & customized values
+ * and then start streaming.
+ */
+ ret = ov13b10_start_streaming(ov13b);
+ if (ret)
+ goto err_rpm_put;
+ } else {
+ ov13b10_stop_streaming(ov13b);
+ pm_runtime_put(&client->dev);
+ }
+
+ ov13b->streaming = enable;
+ mutex_unlock(&ov13b->mutex);
+
+ return ret;
+
+err_rpm_put:
+ pm_runtime_put(&client->dev);
+err_unlock:
+ mutex_unlock(&ov13b->mutex);
+
+ return ret;
+}
+
+static int __maybe_unused ov13b10_suspend(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct ov13b10 *ov13b = to_ov13b10(sd);
+
+ if (ov13b->streaming)
+ ov13b10_stop_streaming(ov13b);
+
+ return 0;
+}
+
+static int __maybe_unused ov13b10_resume(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct ov13b10 *ov13b = to_ov13b10(sd);
+ int ret;
+
+ if (ov13b->streaming) {
+ ret = ov13b10_start_streaming(ov13b);
+ if (ret)
+ goto error;
+ }
+
+ return 0;
+
+error:
+ ov13b10_stop_streaming(ov13b);
+ ov13b->streaming = false;
+ return ret;
+}
+
+/* Verify chip ID */
+static int ov13b10_identify_module(struct ov13b10 *ov13b)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov13b->sd);
+ int ret;
+ u32 val;
+
+ ret = ov13b10_read_reg(ov13b, OV13B10_REG_CHIP_ID,
+ OV13B10_REG_VALUE_24BIT, &val);
+ if (ret)
+ return ret;
+
+ if (val != OV13B10_CHIP_ID) {
+ dev_err(&client->dev, "chip id mismatch: %x!=%x\n",
+ OV13B10_CHIP_ID, val);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static const struct v4l2_subdev_video_ops ov13b10_video_ops = {
+ .s_stream = ov13b10_set_stream,
+};
+
+static const struct v4l2_subdev_pad_ops ov13b10_pad_ops = {
+ .enum_mbus_code = ov13b10_enum_mbus_code,
+ .get_fmt = ov13b10_get_pad_format,
+ .set_fmt = ov13b10_set_pad_format,
+ .enum_frame_size = ov13b10_enum_frame_size,
+};
+
+static const struct v4l2_subdev_ops ov13b10_subdev_ops = {
+ .video = &ov13b10_video_ops,
+ .pad = &ov13b10_pad_ops,
+};
+
+static const struct media_entity_operations ov13b10_subdev_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static const struct v4l2_subdev_internal_ops ov13b10_internal_ops = {
+ .open = ov13b10_open,
+};
+
+/* Initialize control handlers */
+static int ov13b10_init_controls(struct ov13b10 *ov13b)
+{
+ struct i2c_client *client = v4l2_get_subdevdata(&ov13b->sd);
+ struct v4l2_fwnode_device_properties props;
+ struct v4l2_ctrl_handler *ctrl_hdlr;
+ s64 exposure_max;
+ s64 vblank_def;
+ s64 vblank_min;
+ s64 hblank;
+ s64 pixel_rate_min;
+ s64 pixel_rate_max;
+ const struct ov13b10_mode *mode;
+ u32 max;
+ int ret;
+
+ ctrl_hdlr = &ov13b->ctrl_handler;
+ ret = v4l2_ctrl_handler_init(ctrl_hdlr, 10);
+ if (ret)
+ return ret;
+
+ mutex_init(&ov13b->mutex);
+ ctrl_hdlr->lock = &ov13b->mutex;
+ max = ARRAY_SIZE(link_freq_menu_items) - 1;
+ ov13b->link_freq = v4l2_ctrl_new_int_menu(ctrl_hdlr,
+ &ov13b10_ctrl_ops,
+ V4L2_CID_LINK_FREQ,
+ max,
+ 0,
+ link_freq_menu_items);
+ if (ov13b->link_freq)
+ ov13b->link_freq->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ pixel_rate_max = link_freq_to_pixel_rate(link_freq_menu_items[0]);
+ pixel_rate_min = 0;
+ /* By default, PIXEL_RATE is read only */
+ ov13b->pixel_rate = v4l2_ctrl_new_std(ctrl_hdlr, &ov13b10_ctrl_ops,
+ V4L2_CID_PIXEL_RATE,
+ pixel_rate_min, pixel_rate_max,
+ 1, pixel_rate_max);
+
+ mode = ov13b->cur_mode;
+ vblank_def = mode->vts_def - mode->height;
+ vblank_min = mode->vts_min - mode->height;
+ ov13b->vblank = v4l2_ctrl_new_std(ctrl_hdlr, &ov13b10_ctrl_ops,
+ V4L2_CID_VBLANK,
+ vblank_min,
+ OV13B10_VTS_MAX - mode->height, 1,
+ vblank_def);
+
+ hblank = link_freq_configs[mode->link_freq_index].pixels_per_line -
+ mode->width;
+ ov13b->hblank = v4l2_ctrl_new_std(ctrl_hdlr, &ov13b10_ctrl_ops,
+ V4L2_CID_HBLANK,
+ hblank, hblank, 1, hblank);
+ if (ov13b->hblank)
+ ov13b->hblank->flags |= V4L2_CTRL_FLAG_READ_ONLY;
+
+ exposure_max = mode->vts_def - 8;
+ ov13b->exposure = v4l2_ctrl_new_std(ctrl_hdlr, &ov13b10_ctrl_ops,
+ V4L2_CID_EXPOSURE,
+ OV13B10_EXPOSURE_MIN,
+ exposure_max, OV13B10_EXPOSURE_STEP,
+ exposure_max);
+
+ v4l2_ctrl_new_std(ctrl_hdlr, &ov13b10_ctrl_ops, V4L2_CID_ANALOGUE_GAIN,
+ OV13B10_ANA_GAIN_MIN, OV13B10_ANA_GAIN_MAX,
+ OV13B10_ANA_GAIN_STEP, OV13B10_ANA_GAIN_DEFAULT);
+
+ /* Digital gain */
+ v4l2_ctrl_new_std(ctrl_hdlr, &ov13b10_ctrl_ops, V4L2_CID_DIGITAL_GAIN,
+ OV13B10_DGTL_GAIN_MIN, OV13B10_DGTL_GAIN_MAX,
+ OV13B10_DGTL_GAIN_STEP, OV13B10_DGTL_GAIN_DEFAULT);
+
+ v4l2_ctrl_new_std_menu_items(ctrl_hdlr, &ov13b10_ctrl_ops,
+ V4L2_CID_TEST_PATTERN,
+ ARRAY_SIZE(ov13b10_test_pattern_menu) - 1,
+ 0, 0, ov13b10_test_pattern_menu);
+
+ v4l2_ctrl_new_std(ctrl_hdlr, &ov13b10_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ v4l2_ctrl_new_std(ctrl_hdlr, &ov13b10_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+
+ if (ctrl_hdlr->error) {
+ ret = ctrl_hdlr->error;
+ dev_err(&client->dev, "%s control init failed (%d)\n",
+ __func__, ret);
+ goto error;
+ }
+
+ ret = v4l2_fwnode_device_parse(&client->dev, &props);
+ if (ret)
+ goto error;
+
+ ret = v4l2_ctrl_new_fwnode_properties(ctrl_hdlr, &ov13b10_ctrl_ops,
+ &props);
+ if (ret)
+ goto error;
+
+ ov13b->sd.ctrl_handler = ctrl_hdlr;
+
+ return 0;
+
+error:
+ v4l2_ctrl_handler_free(ctrl_hdlr);
+ mutex_destroy(&ov13b->mutex);
+
+ return ret;
+}
+
+static void ov13b10_free_controls(struct ov13b10 *ov13b)
+{
+ v4l2_ctrl_handler_free(ov13b->sd.ctrl_handler);
+ mutex_destroy(&ov13b->mutex);
+}
+
+static int ov13b10_check_hwcfg(struct device *dev)
+{
+ struct v4l2_fwnode_endpoint bus_cfg = {
+ .bus_type = V4L2_MBUS_CSI2_DPHY
+ };
+ struct fwnode_handle *ep;
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
+ unsigned int i, j;
+ int ret;
+ u32 ext_clk;
+
+ if (!fwnode)
+ return -ENXIO;
+
+ ret = fwnode_property_read_u32(dev_fwnode(dev), "clock-frequency",
+ &ext_clk);
+ if (ret) {
+ dev_err(dev, "can't get clock frequency");
+ return ret;
+ }
+
+ if (ext_clk != OV13B10_EXT_CLK) {
+ dev_err(dev, "external clock %d is not supported",
+ ext_clk);
+ return -EINVAL;
+ }
+
+ ep = fwnode_graph_get_next_endpoint(fwnode, NULL);
+ if (!ep)
+ return -ENXIO;
+
+ ret = v4l2_fwnode_endpoint_alloc_parse(ep, &bus_cfg);
+ fwnode_handle_put(ep);
+ if (ret)
+ return ret;
+
+ if (bus_cfg.bus.mipi_csi2.num_data_lanes != OV13B10_DATA_LANES) {
+ dev_err(dev, "number of CSI2 data lanes %d is not supported",
+ bus_cfg.bus.mipi_csi2.num_data_lanes);
+ ret = -EINVAL;
+ goto out_err;
+ }
+
+ if (!bus_cfg.nr_of_link_frequencies) {
+ dev_err(dev, "no link frequencies defined");
+ ret = -EINVAL;
+ goto out_err;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(link_freq_menu_items); i++) {
+ for (j = 0; j < bus_cfg.nr_of_link_frequencies; j++) {
+ if (link_freq_menu_items[i] ==
+ bus_cfg.link_frequencies[j])
+ break;
+ }
+
+ if (j == bus_cfg.nr_of_link_frequencies) {
+ dev_err(dev, "no link frequency %lld supported",
+ link_freq_menu_items[i]);
+ ret = -EINVAL;
+ goto out_err;
+ }
+ }
+
+out_err:
+ v4l2_fwnode_endpoint_free(&bus_cfg);
+
+ return ret;
+}
+
+static int ov13b10_probe(struct i2c_client *client)
+{
+ struct ov13b10 *ov13b;
+ int ret;
+
+ /* Check HW config */
+ ret = ov13b10_check_hwcfg(&client->dev);
+ if (ret) {
+ dev_err(&client->dev, "failed to check hwcfg: %d", ret);
+ return ret;
+ }
+
+ ov13b = devm_kzalloc(&client->dev, sizeof(*ov13b), GFP_KERNEL);
+ if (!ov13b)
+ return -ENOMEM;
+
+ /* Initialize subdev */
+ v4l2_i2c_subdev_init(&ov13b->sd, client, &ov13b10_subdev_ops);
+
+ /* Check module identity */
+ ret = ov13b10_identify_module(ov13b);
+ if (ret) {
+ dev_err(&client->dev, "failed to find sensor: %d\n", ret);
+ return ret;
+ }
+
+ /* Set default mode to max resolution */
+ ov13b->cur_mode = &supported_modes[0];
+
+ ret = ov13b10_init_controls(ov13b);
+ if (ret)
+ return ret;
+
+ /* Initialize subdev */
+ ov13b->sd.internal_ops = &ov13b10_internal_ops;
+ ov13b->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ ov13b->sd.entity.ops = &ov13b10_subdev_entity_ops;
+ ov13b->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
+
+ /* Initialize source pad */
+ ov13b->pad.flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&ov13b->sd.entity, 1, &ov13b->pad);
+ if (ret) {
+ dev_err(&client->dev, "%s failed:%d\n", __func__, ret);
+ goto error_handler_free;
+ }
+
+ ret = v4l2_async_register_subdev_sensor(&ov13b->sd);
+ if (ret < 0)
+ goto error_media_entity;
+
+ /*
+ * Device is already turned on by i2c-core with ACPI domain PM.
+ * Enable runtime PM and turn off the device.
+ */
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_enable(&client->dev);
+ pm_runtime_idle(&client->dev);
+
+ return 0;
+
+error_media_entity:
+ media_entity_cleanup(&ov13b->sd.entity);
+
+error_handler_free:
+ ov13b10_free_controls(ov13b);
+ dev_err(&client->dev, "%s failed:%d\n", __func__, ret);
+
+ return ret;
+}
+
+static int ov13b10_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct ov13b10 *ov13b = to_ov13b10(sd);
+
+ v4l2_async_unregister_subdev(sd);
+ media_entity_cleanup(&sd->entity);
+ ov13b10_free_controls(ov13b);
+
+ pm_runtime_disable(&client->dev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops ov13b10_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(ov13b10_suspend, ov13b10_resume)
+};
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id ov13b10_acpi_ids[] = {
+ {"OVTIDB10"},
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(acpi, ov13b10_acpi_ids);
+#endif
+
+static struct i2c_driver ov13b10_i2c_driver = {
+ .driver = {
+ .name = "ov13b10",
+ .pm = &ov13b10_pm_ops,
+ .acpi_match_table = ACPI_PTR(ov13b10_acpi_ids),
+ },
+ .probe_new = ov13b10_probe,
+ .remove = ov13b10_remove,
+};
+
+module_i2c_driver(ov13b10_i2c_driver);
+
+MODULE_AUTHOR("Kao, Arec <arec.kao@intel.com>");
+MODULE_DESCRIPTION("Omnivision ov13b10 sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/media/i2c/ov5670.c b/drivers/media/i2c/ov5670.c
index 49189926afd6..251f459ab484 100644
--- a/drivers/media/i2c/ov5670.c
+++ b/drivers/media/i2c/ov5670.c
@@ -7,6 +7,7 @@
#include <linux/pm_runtime.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
#include <media/v4l2-fwnode.h>
#define OV5670_REG_CHIP_ID 0x300a
@@ -2420,6 +2421,12 @@ static int ov5670_identify_module(struct ov5670 *ov5670)
return 0;
}
+static const struct v4l2_subdev_core_ops ov5670_core_ops = {
+ .log_status = v4l2_ctrl_subdev_log_status,
+ .subscribe_event = v4l2_ctrl_subdev_subscribe_event,
+ .unsubscribe_event = v4l2_event_subdev_unsubscribe,
+};
+
static const struct v4l2_subdev_video_ops ov5670_video_ops = {
.s_stream = ov5670_set_stream,
};
@@ -2436,6 +2443,7 @@ static const struct v4l2_subdev_sensor_ops ov5670_sensor_ops = {
};
static const struct v4l2_subdev_ops ov5670_subdev_ops = {
+ .core = &ov5670_core_ops,
.video = &ov5670_video_ops,
.pad = &ov5670_pad_ops,
.sensor = &ov5670_sensor_ops,
@@ -2489,7 +2497,8 @@ static int ov5670_probe(struct i2c_client *client)
}
ov5670->sd.internal_ops = &ov5670_internal_ops;
- ov5670->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ ov5670->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE |
+ V4L2_SUBDEV_FL_HAS_EVENTS;
ov5670->sd.entity.ops = &ov5670_subdev_entity_ops;
ov5670->sd.entity.function = MEDIA_ENT_F_CAM_SENSOR;
diff --git a/drivers/media/i2c/ov8856.c b/drivers/media/i2c/ov8856.c
index aa74744b91c7..c6c6050cda1a 100644
--- a/drivers/media/i2c/ov8856.c
+++ b/drivers/media/i2c/ov8856.c
@@ -107,6 +107,11 @@ static const char * const ov8856_supply_names[] = {
"dvdd", /* Digital core power */
};
+enum {
+ OV8856_MEDIA_BUS_FMT_SBGGR10_1X10,
+ OV8856_MEDIA_BUS_FMT_SGRBG10_1X10,
+};
+
struct ov8856_reg {
u16 address;
u8 val;
@@ -145,6 +150,9 @@ struct ov8856_mode {
/* Number of data lanes */
u8 data_lanes;
+
+ /* Default MEDIA_BUS_FMT for this mode */
+ u32 default_mbus_index;
};
struct ov8856_mipi_data_rates {
@@ -1055,7 +1063,7 @@ static const struct ov8856_reg lane_4_mode_3264x2448[] = {
{0x3810, 0x00},
{0x3811, 0x04},
{0x3812, 0x00},
- {0x3813, 0x01},
+ {0x3813, 0x02},
{0x3814, 0x01},
{0x3815, 0x01},
{0x3816, 0x00},
@@ -1259,7 +1267,7 @@ static const struct ov8856_reg lane_4_mode_1632x1224[] = {
{0x3810, 0x00},
{0x3811, 0x02},
{0x3812, 0x00},
- {0x3813, 0x01},
+ {0x3813, 0x02},
{0x3814, 0x03},
{0x3815, 0x01},
{0x3816, 0x00},
@@ -1372,6 +1380,19 @@ static const struct ov8856_reg lane_4_mode_1632x1224[] = {
{0x5e10, 0xfc}
};
+static const struct ov8856_reg mipi_data_mbus_sbggr10_1x10[] = {
+ {0x3813, 0x02},
+};
+
+static const struct ov8856_reg mipi_data_mbus_sgrbg10_1x10[] = {
+ {0x3813, 0x01},
+};
+
+static const u32 ov8856_mbus_codes[] = {
+ MEDIA_BUS_FMT_SBGGR10_1X10,
+ MEDIA_BUS_FMT_SGRBG10_1X10
+};
+
static const char * const ov8856_test_pattern_menu[] = {
"Disabled",
"Standard Color Bar",
@@ -1380,6 +1401,17 @@ static const char * const ov8856_test_pattern_menu[] = {
"Bottom-Top Darker Color Bar"
};
+static const struct ov8856_reg_list bayer_offset_configs[] = {
+ [OV8856_MEDIA_BUS_FMT_SBGGR10_1X10] = {
+ .num_of_regs = ARRAY_SIZE(mipi_data_mbus_sbggr10_1x10),
+ .regs = mipi_data_mbus_sbggr10_1x10,
+ },
+ [OV8856_MEDIA_BUS_FMT_SGRBG10_1X10] = {
+ .num_of_regs = ARRAY_SIZE(mipi_data_mbus_sgrbg10_1x10),
+ .regs = mipi_data_mbus_sgrbg10_1x10,
+ }
+};
+
struct ov8856 {
struct v4l2_subdev sd;
struct media_pad pad;
@@ -1399,6 +1431,9 @@ struct ov8856 {
/* Current mode */
const struct ov8856_mode *cur_mode;
+ /* Application specified mbus format */
+ u32 cur_mbus_index;
+
/* To serialize asynchronus callbacks */
struct mutex mutex;
@@ -1450,6 +1485,7 @@ static const struct ov8856_lane_cfg lane_cfg_2 = {
},
.link_freq_index = 0,
.data_lanes = 2,
+ .default_mbus_index = OV8856_MEDIA_BUS_FMT_SGRBG10_1X10,
},
{
.width = 1640,
@@ -1464,6 +1500,7 @@ static const struct ov8856_lane_cfg lane_cfg_2 = {
},
.link_freq_index = 1,
.data_lanes = 2,
+ .default_mbus_index = OV8856_MEDIA_BUS_FMT_SGRBG10_1X10,
}}
};
@@ -1499,6 +1536,7 @@ static const struct ov8856_lane_cfg lane_cfg_4 = {
},
.link_freq_index = 0,
.data_lanes = 4,
+ .default_mbus_index = OV8856_MEDIA_BUS_FMT_SGRBG10_1X10,
},
{
.width = 1640,
@@ -1513,6 +1551,7 @@ static const struct ov8856_lane_cfg lane_cfg_4 = {
},
.link_freq_index = 1,
.data_lanes = 4,
+ .default_mbus_index = OV8856_MEDIA_BUS_FMT_SGRBG10_1X10,
},
{
.width = 3264,
@@ -1527,6 +1566,7 @@ static const struct ov8856_lane_cfg lane_cfg_4 = {
},
.link_freq_index = 0,
.data_lanes = 4,
+ .default_mbus_index = OV8856_MEDIA_BUS_FMT_SBGGR10_1X10,
},
{
.width = 1632,
@@ -1541,6 +1581,7 @@ static const struct ov8856_lane_cfg lane_cfg_4 = {
},
.link_freq_index = 1,
.data_lanes = 4,
+ .default_mbus_index = OV8856_MEDIA_BUS_FMT_SBGGR10_1X10,
}}
};
@@ -1904,12 +1945,21 @@ static int ov8856_init_controls(struct ov8856 *ov8856)
return 0;
}
-static void ov8856_update_pad_format(const struct ov8856_mode *mode,
+static void ov8856_update_pad_format(struct ov8856 *ov8856,
+ const struct ov8856_mode *mode,
struct v4l2_mbus_framefmt *fmt)
{
+ int index;
+
fmt->width = mode->width;
fmt->height = mode->height;
- fmt->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+ for (index = 0; index < ARRAY_SIZE(ov8856_mbus_codes); ++index)
+ if (ov8856_mbus_codes[index] == fmt->code)
+ break;
+ if (index == ARRAY_SIZE(ov8856_mbus_codes))
+ index = mode->default_mbus_index;
+ fmt->code = ov8856_mbus_codes[index];
+ ov8856->cur_mbus_index = index;
fmt->field = V4L2_FIELD_NONE;
}
@@ -1935,6 +1985,13 @@ static int ov8856_start_streaming(struct ov8856 *ov8856)
return ret;
}
+ reg_list = &bayer_offset_configs[ov8856->cur_mbus_index];
+ ret = ov8856_write_reg_list(ov8856, reg_list);
+ if (ret) {
+ dev_err(&client->dev, "failed to set mbus format");
+ return ret;
+ }
+
ret = __v4l2_ctrl_handler_setup(ov8856->sd.ctrl_handler);
if (ret)
return ret;
@@ -2096,7 +2153,7 @@ static int ov8856_set_format(struct v4l2_subdev *sd,
fmt->format.height);
mutex_lock(&ov8856->mutex);
- ov8856_update_pad_format(mode, &fmt->format);
+ ov8856_update_pad_format(ov8856, mode, &fmt->format);
if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
*v4l2_subdev_get_try_format(sd, sd_state, fmt->pad) = fmt->format;
} else {
@@ -2140,7 +2197,7 @@ static int ov8856_get_format(struct v4l2_subdev *sd,
sd_state,
fmt->pad);
else
- ov8856_update_pad_format(ov8856->cur_mode, &fmt->format);
+ ov8856_update_pad_format(ov8856, ov8856->cur_mode, &fmt->format);
mutex_unlock(&ov8856->mutex);
@@ -2151,11 +2208,10 @@ static int ov8856_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
{
- /* Only one bayer order GRBG is supported */
- if (code->index > 0)
+ if (code->index >= ARRAY_SIZE(ov8856_mbus_codes))
return -EINVAL;
- code->code = MEDIA_BUS_FMT_SGRBG10_1X10;
+ code->code = ov8856_mbus_codes[code->index];
return 0;
}
@@ -2165,11 +2221,15 @@ static int ov8856_enum_frame_size(struct v4l2_subdev *sd,
struct v4l2_subdev_frame_size_enum *fse)
{
struct ov8856 *ov8856 = to_ov8856(sd);
+ int index;
if (fse->index >= ov8856->modes_size)
return -EINVAL;
- if (fse->code != MEDIA_BUS_FMT_SGRBG10_1X10)
+ for (index = 0; index < ARRAY_SIZE(ov8856_mbus_codes); ++index)
+ if (fse->code == ov8856_mbus_codes[index])
+ break;
+ if (index == ARRAY_SIZE(ov8856_mbus_codes))
return -EINVAL;
fse->min_width = ov8856->priv_lane->supported_modes[fse->index].width;
@@ -2185,7 +2245,7 @@ static int ov8856_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
struct ov8856 *ov8856 = to_ov8856(sd);
mutex_lock(&ov8856->mutex);
- ov8856_update_pad_format(&ov8856->priv_lane->supported_modes[0],
+ ov8856_update_pad_format(ov8856, &ov8856->priv_lane->supported_modes[0],
v4l2_subdev_get_try_format(sd, fh->state, 0));
mutex_unlock(&ov8856->mutex);
@@ -2426,6 +2486,7 @@ static int ov8856_probe(struct i2c_client *client)
mutex_init(&ov8856->mutex);
ov8856->cur_mode = &ov8856->priv_lane->supported_modes[0];
+ ov8856->cur_mbus_index = ov8856->cur_mode->default_mbus_index;
ret = ov8856_init_controls(ov8856);
if (ret) {
dev_err(&client->dev, "failed to init controls: %d", ret);
diff --git a/drivers/media/i2c/st-mipid02.c b/drivers/media/i2c/st-mipid02.c
index f630b88cbfaa..ef976d085d72 100644
--- a/drivers/media/i2c/st-mipid02.c
+++ b/drivers/media/i2c/st-mipid02.c
@@ -876,11 +876,10 @@ static int mipid02_parse_rx_ep(struct mipid02_dev *bridge)
bridge->rx = ep;
/* register async notifier so we get noticed when sensor is connected */
- v4l2_async_notifier_init(&bridge->notifier);
- asd = v4l2_async_notifier_add_fwnode_remote_subdev(
- &bridge->notifier,
- of_fwnode_handle(ep_node),
- struct v4l2_async_subdev);
+ v4l2_async_nf_init(&bridge->notifier);
+ asd = v4l2_async_nf_add_fwnode_remote(&bridge->notifier,
+ of_fwnode_handle(ep_node),
+ struct v4l2_async_subdev);
of_node_put(ep_node);
if (IS_ERR(asd)) {
@@ -890,10 +889,9 @@ static int mipid02_parse_rx_ep(struct mipid02_dev *bridge)
}
bridge->notifier.ops = &mipid02_notifier_ops;
- ret = v4l2_async_subdev_notifier_register(&bridge->sd,
- &bridge->notifier);
+ ret = v4l2_async_subdev_nf_register(&bridge->sd, &bridge->notifier);
if (ret)
- v4l2_async_notifier_cleanup(&bridge->notifier);
+ v4l2_async_nf_cleanup(&bridge->notifier);
return ret;
@@ -1031,8 +1029,8 @@ static int mipid02_probe(struct i2c_client *client)
return 0;
unregister_notifier:
- v4l2_async_notifier_unregister(&bridge->notifier);
- v4l2_async_notifier_cleanup(&bridge->notifier);
+ v4l2_async_nf_unregister(&bridge->notifier);
+ v4l2_async_nf_cleanup(&bridge->notifier);
power_off:
mipid02_set_power_off(bridge);
entity_cleanup:
@@ -1048,8 +1046,8 @@ static int mipid02_remove(struct i2c_client *client)
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct mipid02_dev *bridge = to_mipid02_dev(sd);
- v4l2_async_notifier_unregister(&bridge->notifier);
- v4l2_async_notifier_cleanup(&bridge->notifier);
+ v4l2_async_nf_unregister(&bridge->notifier);
+ v4l2_async_nf_cleanup(&bridge->notifier);
v4l2_async_unregister_subdev(&bridge->sd);
mipid02_set_power_off(bridge);
media_entity_cleanup(&bridge->sd.entity);
diff --git a/drivers/media/i2c/tda1997x.c b/drivers/media/i2c/tda1997x.c
index 6070aaf0b32e..8fafce26d62f 100644
--- a/drivers/media/i2c/tda1997x.c
+++ b/drivers/media/i2c/tda1997x.c
@@ -1092,67 +1092,82 @@ tda1997x_detect_std(struct tda1997x_state *state,
struct v4l2_dv_timings *timings)
{
struct v4l2_subdev *sd = &state->sd;
- u32 vper;
- u16 hper;
- u16 hsper;
- int i;
/*
* Read the FMT registers
- * REG_V_PER: Period of a frame (or two fields) in MCLK(27MHz) cycles
- * REG_H_PER: Period of a line in MCLK(27MHz) cycles
- * REG_HS_WIDTH: Period of horiz sync pulse in MCLK(27MHz) cycles
+ * REG_V_PER: Period of a frame (or field) in MCLK (27MHz) cycles
+ * REG_H_PER: Period of a line in MCLK (27MHz) cycles
+ * REG_HS_WIDTH: Period of horiz sync pulse in MCLK (27MHz) cycles
*/
- vper = io_read24(sd, REG_V_PER) & MASK_VPER;
- hper = io_read16(sd, REG_H_PER) & MASK_HPER;
- hsper = io_read16(sd, REG_HS_WIDTH) & MASK_HSWIDTH;
- v4l2_dbg(1, debug, sd, "Signal Timings: %u/%u/%u\n", vper, hper, hsper);
+ u32 vper, vsync_pos;
+ u16 hper, hsync_pos, hsper, interlaced;
+ u16 htot, hact, hfront, hsync, hback;
+ u16 vtot, vact, vfront1, vfront2, vsync, vback1, vback2;
if (!state->input_detect[0] && !state->input_detect[1])
return -ENOLINK;
- for (i = 0; v4l2_dv_timings_presets[i].bt.width; i++) {
- const struct v4l2_bt_timings *bt;
- u32 lines, width, _hper, _hsper;
- u32 vmin, vmax, hmin, hmax, hsmin, hsmax;
- bool vmatch, hmatch, hsmatch;
-
- bt = &v4l2_dv_timings_presets[i].bt;
- width = V4L2_DV_BT_FRAME_WIDTH(bt);
- lines = V4L2_DV_BT_FRAME_HEIGHT(bt);
- _hper = (u32)bt->pixelclock / width;
- if (bt->interlaced)
- lines /= 2;
- /* vper +/- 0.7% */
- vmin = ((27000000 / 1000) * 993) / _hper * lines;
- vmax = ((27000000 / 1000) * 1007) / _hper * lines;
- /* hper +/- 1.0% */
- hmin = ((27000000 / 100) * 99) / _hper;
- hmax = ((27000000 / 100) * 101) / _hper;
- /* hsper +/- 2 (take care to avoid 32bit overflow) */
- _hsper = 27000 * bt->hsync / ((u32)bt->pixelclock/1000);
- hsmin = _hsper - 2;
- hsmax = _hsper + 2;
-
- /* vmatch matches the framerate */
- vmatch = ((vper <= vmax) && (vper >= vmin)) ? 1 : 0;
- /* hmatch matches the width */
- hmatch = ((hper <= hmax) && (hper >= hmin)) ? 1 : 0;
- /* hsmatch matches the hswidth */
- hsmatch = ((hsper <= hsmax) && (hsper >= hsmin)) ? 1 : 0;
- if (hmatch && vmatch && hsmatch) {
- v4l2_print_dv_timings(sd->name, "Detected format: ",
- &v4l2_dv_timings_presets[i],
- false);
- if (timings)
- *timings = v4l2_dv_timings_presets[i];
- return 0;
- }
- }
+ vper = io_read24(sd, REG_V_PER);
+ hper = io_read16(sd, REG_H_PER);
+ hsper = io_read16(sd, REG_HS_WIDTH);
+ vsync_pos = vper & MASK_VPER_SYNC_POS;
+ hsync_pos = hper & MASK_HPER_SYNC_POS;
+ interlaced = hsper & MASK_HSWIDTH_INTERLACED;
+ vper &= MASK_VPER;
+ hper &= MASK_HPER;
+ hsper &= MASK_HSWIDTH;
+ v4l2_dbg(1, debug, sd, "Signal Timings: %u/%u/%u\n", vper, hper, hsper);
+
+ htot = io_read16(sd, REG_FMT_H_TOT);
+ hact = io_read16(sd, REG_FMT_H_ACT);
+ hfront = io_read16(sd, REG_FMT_H_FRONT);
+ hsync = io_read16(sd, REG_FMT_H_SYNC);
+ hback = io_read16(sd, REG_FMT_H_BACK);
+
+ vtot = io_read16(sd, REG_FMT_V_TOT);
+ vact = io_read16(sd, REG_FMT_V_ACT);
+ vfront1 = io_read(sd, REG_FMT_V_FRONT_F1);
+ vfront2 = io_read(sd, REG_FMT_V_FRONT_F2);
+ vsync = io_read(sd, REG_FMT_V_SYNC);
+ vback1 = io_read(sd, REG_FMT_V_BACK_F1);
+ vback2 = io_read(sd, REG_FMT_V_BACK_F2);
+
+ v4l2_dbg(1, debug, sd, "Geometry: H %u %u %u %u %u Sync%c V %u %u %u %u %u %u %u Sync%c\n",
+ htot, hact, hfront, hsync, hback, hsync_pos ? '+' : '-',
+ vtot, vact, vfront1, vfront2, vsync, vback1, vback2, vsync_pos ? '+' : '-');
+
+ if (!timings)
+ return 0;
- v4l_err(state->client, "no resolution match for timings: %d/%d/%d\n",
- vper, hper, hsper);
- return -ERANGE;
+ timings->type = V4L2_DV_BT_656_1120;
+ timings->bt.width = hact;
+ timings->bt.hfrontporch = hfront;
+ timings->bt.hsync = hsync;
+ timings->bt.hbackporch = hback;
+ timings->bt.height = vact;
+ timings->bt.vfrontporch = vfront1;
+ timings->bt.vsync = vsync;
+ timings->bt.vbackporch = vback1;
+ timings->bt.interlaced = interlaced ? V4L2_DV_INTERLACED : V4L2_DV_PROGRESSIVE;
+ timings->bt.polarities = vsync_pos ? V4L2_DV_VSYNC_POS_POL : 0;
+ timings->bt.polarities |= hsync_pos ? V4L2_DV_HSYNC_POS_POL : 0;
+
+ timings->bt.pixelclock = (u64)htot * vtot * 27000000;
+ if (interlaced) {
+ timings->bt.il_vfrontporch = vfront2;
+ timings->bt.il_vsync = timings->bt.vsync;
+ timings->bt.il_vbackporch = vback2;
+ do_div(timings->bt.pixelclock, vper * 2 /* full frame */);
+ } else {
+ timings->bt.il_vfrontporch = 0;
+ timings->bt.il_vsync = 0;
+ timings->bt.il_vbackporch = 0;
+ do_div(timings->bt.pixelclock, vper);
+ }
+ v4l2_find_dv_timings_cap(timings, &tda1997x_dv_timings_cap,
+ (u32)timings->bt.pixelclock / 500, NULL, NULL);
+ v4l2_print_dv_timings(sd->name, "Detected format: ", timings, false);
+ return 0;
}
/* some sort of errata workaround for chip revision 0 (N1) */
@@ -1248,13 +1263,13 @@ tda1997x_parse_infoframe(struct tda1997x_state *state, u16 addr)
{
struct v4l2_subdev *sd = &state->sd;
union hdmi_infoframe frame;
- u8 buffer[40];
+ u8 buffer[40] = { 0 };
u8 reg;
int len, err;
/* read data */
len = io_readn(sd, addr, sizeof(buffer), buffer);
- err = hdmi_infoframe_unpack(&frame, buffer, sizeof(buffer));
+ err = hdmi_infoframe_unpack(&frame, buffer, len);
if (err) {
v4l_err(state->client,
"failed parsing %d byte infoframe: 0x%04x/0x%02x\n",
@@ -1928,13 +1943,13 @@ static int tda1997x_log_infoframe(struct v4l2_subdev *sd, int addr)
{
struct tda1997x_state *state = to_state(sd);
union hdmi_infoframe frame;
- u8 buffer[40];
+ u8 buffer[40] = { 0 };
int len, err;
/* read data */
len = io_readn(sd, addr, sizeof(buffer), buffer);
v4l2_dbg(1, debug, sd, "infoframe: addr=%d len=%d\n", addr, len);
- err = hdmi_infoframe_unpack(&frame, buffer, sizeof(buffer));
+ err = hdmi_infoframe_unpack(&frame, buffer, len);
if (err) {
v4l_err(state->client,
"failed parsing %d byte infoframe: 0x%04x/0x%02x\n",
@@ -2450,7 +2465,8 @@ static const struct media_entity_operations tda1997x_media_ops = {
static int tda1997x_pcm_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
- struct tda1997x_state *state = snd_soc_dai_get_drvdata(dai);
+ struct v4l2_subdev *sd = snd_soc_dai_get_drvdata(dai);
+ struct tda1997x_state *state = to_state(sd);
struct snd_soc_component *component = dai->component;
struct snd_pcm_runtime *rtd = substream->runtime;
int rate, err;
@@ -2759,7 +2775,6 @@ static int tda1997x_probe(struct i2c_client *client,
dev_err(&client->dev, "register audio codec failed\n");
goto err_free_media;
}
- dev_set_drvdata(&state->client->dev, state);
v4l_info(state->client, "registered audio codec\n");
}
diff --git a/drivers/media/i2c/tda1997x_regs.h b/drivers/media/i2c/tda1997x_regs.h
index d9b3daada07d..115371ba33f0 100644
--- a/drivers/media/i2c/tda1997x_regs.h
+++ b/drivers/media/i2c/tda1997x_regs.h
@@ -117,9 +117,12 @@
#define REG_CURPAGE_00H 0xFF
#define MASK_VPER 0x3fffff
+#define MASK_VPER_SYNC_POS 0x800000
#define MASK_VHREF 0x3fff
#define MASK_HPER 0x0fff
+#define MASK_HPER_SYNC_POS 0x8000
#define MASK_HSWIDTH 0x03ff
+#define MASK_HSWIDTH_INTERLACED 0x8000
/* HPD Detection */
#define DETECT_UTIL BIT(7) /* utility of HDMI level */
diff --git a/drivers/media/i2c/video-i2c.c b/drivers/media/i2c/video-i2c.c
index de12f38f347c..cb660b4bfd4b 100644
--- a/drivers/media/i2c/video-i2c.c
+++ b/drivers/media/i2c/video-i2c.c
@@ -441,14 +441,15 @@ static void buffer_queue(struct vb2_buffer *vb)
static int video_i2c_thread_vid_cap(void *priv)
{
struct video_i2c_data *data = priv;
- unsigned int delay = mult_frac(HZ, data->frame_interval.numerator,
- data->frame_interval.denominator);
+ u32 delay = mult_frac(1000000UL, data->frame_interval.numerator,
+ data->frame_interval.denominator);
+ s64 end_us = ktime_to_us(ktime_get());
set_freezable();
do {
- unsigned long start_jiffies = jiffies;
struct video_i2c_buffer *vid_cap_buf = NULL;
+ s64 current_us;
int schedule_delay;
try_to_freeze();
@@ -475,12 +476,14 @@ static int video_i2c_thread_vid_cap(void *priv)
VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
}
- schedule_delay = delay - (jiffies - start_jiffies);
-
- if (time_after(jiffies, start_jiffies + delay))
- schedule_delay = delay;
-
- schedule_timeout_interruptible(schedule_delay);
+ end_us += delay;
+ current_us = ktime_to_us(ktime_get());
+ if (current_us < end_us) {
+ schedule_delay = end_us - current_us;
+ usleep_range(schedule_delay * 3 / 4, schedule_delay);
+ } else {
+ end_us = current_us;
+ }
} while (!kthread_should_stop());
return 0;
diff --git a/drivers/media/mc/Kconfig b/drivers/media/mc/Kconfig
index 4815b9dde9af..375b09612981 100644
--- a/drivers/media/mc/Kconfig
+++ b/drivers/media/mc/Kconfig
@@ -16,13 +16,5 @@ config MEDIA_CONTROLLER_REQUEST_API
bool
depends on MEDIA_CONTROLLER
help
- DO NOT ENABLE THIS OPTION UNLESS YOU KNOW WHAT YOU'RE DOING.
-
This option enables the Request API for the Media controller and V4L2
interfaces. It is currently needed by a few stateless codec drivers.
-
- There is currently no intention to provide API or ABI stability for
- this new API as of yet.
-
-comment "Please notice that the enabled Media controller Request API is EXPERIMENTAL"
- depends on MEDIA_CONTROLLER_REQUEST_API
diff --git a/drivers/media/pci/cobalt/cobalt-driver.c b/drivers/media/pci/cobalt/cobalt-driver.c
index 16af58f2f93c..74edcc76d12f 100644
--- a/drivers/media/pci/cobalt/cobalt-driver.c
+++ b/drivers/media/pci/cobalt/cobalt-driver.c
@@ -332,8 +332,8 @@ static int cobalt_setup_pci(struct cobalt *cobalt, struct pci_dev *pci_dev,
}
}
- if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(64))) {
- ret = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32));
+ if (dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(64))) {
+ ret = dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(32));
if (ret) {
cobalt_err("no suitable DMA available\n");
goto err_disable;
diff --git a/drivers/media/pci/cx18/cx18-driver.c b/drivers/media/pci/cx18/cx18-driver.c
index f2440eb38820..59497ba6bf1f 100644
--- a/drivers/media/pci/cx18/cx18-driver.c
+++ b/drivers/media/pci/cx18/cx18-driver.c
@@ -804,7 +804,7 @@ static int cx18_setup_pci(struct cx18 *cx, struct pci_dev *pci_dev,
CX18_ERR("Can't enable device %d!\n", cx->instance);
return -EIO;
}
- if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32))) {
+ if (dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(32))) {
CX18_ERR("No suitable DMA available, card %d\n", cx->instance);
return -EIO;
}
diff --git a/drivers/media/pci/cx18/cx18-ioctl.c b/drivers/media/pci/cx18/cx18-ioctl.c
index 4864def20676..ce3f0141f94e 100644
--- a/drivers/media/pci/cx18/cx18-ioctl.c
+++ b/drivers/media/pci/cx18/cx18-ioctl.c
@@ -276,7 +276,7 @@ static int cx18_s_fmt_vid_cap(struct file *file, void *fh,
s->pixelformat = fmt->fmt.pix.pixelformat;
/* HM12 YUV size is (Y=(h*720) + UV=(h*(720/2)))
UYUV YUV size is (Y=(h*720) + UV=(h*(720))) */
- if (s->pixelformat == V4L2_PIX_FMT_HM12) {
+ if (s->pixelformat == V4L2_PIX_FMT_NV12_16L16) {
s->vb_bytes_per_frame = h * 720 * 3 / 2;
s->vb_bytes_per_line = 720; /* First plane */
} else {
@@ -470,7 +470,7 @@ static int cx18_enum_fmt_vid_cap(struct file *file, void *fh,
.index = 0,
.type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
.description = "HM12 (YUV 4:1:1)",
- .pixelformat = V4L2_PIX_FMT_HM12,
+ .pixelformat = V4L2_PIX_FMT_NV12_16L16,
},
{
.index = 1,
diff --git a/drivers/media/pci/cx18/cx18-queue.c b/drivers/media/pci/cx18/cx18-queue.c
index 2f5df471dada..013694bfcb1c 100644
--- a/drivers/media/pci/cx18/cx18-queue.c
+++ b/drivers/media/pci/cx18/cx18-queue.c
@@ -325,8 +325,8 @@ void _cx18_mdl_sync_for_device(struct cx18_stream *s, struct cx18_mdl *mdl)
struct cx18_buffer *buf;
list_for_each_entry(buf, &mdl->buf_list, list)
- pci_dma_sync_single_for_device(pci_dev, buf->dma_handle,
- buf_size, dma);
+ dma_sync_single_for_device(&pci_dev->dev, buf->dma_handle,
+ buf_size, dma);
}
int cx18_stream_alloc(struct cx18_stream *s)
@@ -385,8 +385,9 @@ int cx18_stream_alloc(struct cx18_stream *s)
cx18_enqueue(s, mdl, &s->q_idle);
INIT_LIST_HEAD(&buf->list);
- buf->dma_handle = pci_map_single(s->cx->pci_dev,
- buf->buf, s->buf_size, s->dma);
+ buf->dma_handle = dma_map_single(&s->cx->pci_dev->dev,
+ buf->buf, s->buf_size,
+ s->dma);
cx18_buf_sync_for_cpu(s, buf);
list_add_tail(&buf->list, &s->buf_pool);
}
@@ -419,8 +420,8 @@ void cx18_stream_free(struct cx18_stream *s)
buf = list_first_entry(&s->buf_pool, struct cx18_buffer, list);
list_del_init(&buf->list);
- pci_unmap_single(s->cx->pci_dev, buf->dma_handle,
- s->buf_size, s->dma);
+ dma_unmap_single(&s->cx->pci_dev->dev, buf->dma_handle,
+ s->buf_size, s->dma);
kfree(buf->buf);
kfree(buf);
}
diff --git a/drivers/media/pci/cx18/cx18-streams.c b/drivers/media/pci/cx18/cx18-streams.c
index c41bae118415..87ff554bb2d2 100644
--- a/drivers/media/pci/cx18/cx18-streams.c
+++ b/drivers/media/pci/cx18/cx18-streams.c
@@ -49,44 +49,44 @@ static struct {
{ /* CX18_ENC_STREAM_TYPE_MPG */
"encoder MPEG",
VFL_TYPE_VIDEO, 0,
- PCI_DMA_FROMDEVICE,
+ DMA_FROM_DEVICE,
V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
V4L2_CAP_AUDIO | V4L2_CAP_TUNER
},
{ /* CX18_ENC_STREAM_TYPE_TS */
"TS",
VFL_TYPE_VIDEO, -1,
- PCI_DMA_FROMDEVICE,
+ DMA_FROM_DEVICE,
},
{ /* CX18_ENC_STREAM_TYPE_YUV */
"encoder YUV",
VFL_TYPE_VIDEO, CX18_V4L2_ENC_YUV_OFFSET,
- PCI_DMA_FROMDEVICE,
+ DMA_FROM_DEVICE,
V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
V4L2_CAP_STREAMING | V4L2_CAP_AUDIO | V4L2_CAP_TUNER
},
{ /* CX18_ENC_STREAM_TYPE_VBI */
"encoder VBI",
VFL_TYPE_VBI, 0,
- PCI_DMA_FROMDEVICE,
+ DMA_FROM_DEVICE,
V4L2_CAP_VBI_CAPTURE | V4L2_CAP_SLICED_VBI_CAPTURE |
V4L2_CAP_READWRITE | V4L2_CAP_TUNER
},
{ /* CX18_ENC_STREAM_TYPE_PCM */
"encoder PCM audio",
VFL_TYPE_VIDEO, CX18_V4L2_ENC_PCM_OFFSET,
- PCI_DMA_FROMDEVICE,
+ DMA_FROM_DEVICE,
V4L2_CAP_TUNER | V4L2_CAP_AUDIO | V4L2_CAP_READWRITE,
},
{ /* CX18_ENC_STREAM_TYPE_IDX */
"encoder IDX",
VFL_TYPE_VIDEO, -1,
- PCI_DMA_FROMDEVICE,
+ DMA_FROM_DEVICE,
},
{ /* CX18_ENC_STREAM_TYPE_RAD */
"encoder radio",
VFL_TYPE_RADIO, 0,
- PCI_DMA_NONE,
+ DMA_NONE,
V4L2_CAP_RADIO | V4L2_CAP_TUNER
},
};
@@ -133,7 +133,7 @@ static int cx18_prepare_buffer(struct videobuf_queue *q,
/* HM12 YUV size is (Y=(h*720) + UV=(h*(720/2)))
UYUV YUV size is (Y=(h*720) + UV=(h*(720))) */
- if (s->pixelformat == V4L2_PIX_FMT_HM12)
+ if (s->pixelformat == V4L2_PIX_FMT_NV12_16L16)
s->vb_bytes_per_frame = height * 720 * 3 / 2;
else
s->vb_bytes_per_frame = height * 720 * 2;
@@ -155,7 +155,7 @@ static int cx18_prepare_buffer(struct videobuf_queue *q,
/* HM12 YUV size is (Y=(h*720) + UV=(h*(720/2)))
UYUV YUV size is (Y=(h*720) + UV=(h*(720))) */
- if (s->pixelformat == V4L2_PIX_FMT_HM12)
+ if (s->pixelformat == V4L2_PIX_FMT_NV12_16L16)
s->vb_bytes_per_frame = height * 720 * 3 / 2;
else
s->vb_bytes_per_frame = height * 720 * 2;
@@ -287,7 +287,7 @@ static void cx18_stream_init(struct cx18 *cx, int type)
s, &cx->serialize_lock);
/* Assume the previous pixel default */
- s->pixelformat = V4L2_PIX_FMT_HM12;
+ s->pixelformat = V4L2_PIX_FMT_NV12_16L16;
s->vb_bytes_per_frame = cx->cxhdl.height * 720 * 3 / 2;
s->vb_bytes_per_line = 720;
}
@@ -324,7 +324,7 @@ static int cx18_prep_dev(struct cx18 *cx, int type)
/* User explicitly selected 0 buffers for these streams, so don't
create them. */
- if (cx18_stream_info[type].dma != PCI_DMA_NONE &&
+ if (cx18_stream_info[type].dma != DMA_NONE &&
cx->stream_buffers[type] == 0) {
CX18_INFO("Disabled %s device\n", cx18_stream_info[type].name);
return 0;
@@ -733,7 +733,7 @@ static void cx18_stream_configure_mdls(struct cx18_stream *s)
* Set the MDL size to the exact size needed for one frame.
* Use enough buffers per MDL to cover the MDL size
*/
- if (s->pixelformat == V4L2_PIX_FMT_HM12)
+ if (s->pixelformat == V4L2_PIX_FMT_NV12_16L16)
s->mdl_size = 720 * s->cx->cxhdl.height * 3 / 2;
else
s->mdl_size = 720 * s->cx->cxhdl.height * 2;
diff --git a/drivers/media/pci/cx23885/cx23885-alsa.c b/drivers/media/pci/cx23885/cx23885-alsa.c
index ab14d35214aa..25dc8d4dc5b7 100644
--- a/drivers/media/pci/cx23885/cx23885-alsa.c
+++ b/drivers/media/pci/cx23885/cx23885-alsa.c
@@ -550,7 +550,7 @@ struct cx23885_audio_dev *cx23885_audio_register(struct cx23885_dev *dev)
SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1,
THIS_MODULE, sizeof(struct cx23885_audio_dev), &card);
if (err < 0)
- goto error;
+ goto error_msg;
chip = (struct cx23885_audio_dev *) card->private_data;
chip->dev = dev;
@@ -576,6 +576,7 @@ struct cx23885_audio_dev *cx23885_audio_register(struct cx23885_dev *dev)
error:
snd_card_free(card);
+error_msg:
pr_err("%s(): Failed to register analog audio adapter\n",
__func__);
diff --git a/drivers/media/pci/ddbridge/ddbridge-main.c b/drivers/media/pci/ddbridge/ddbridge-main.c
index 03dc9924fa2c..25d0d6745b52 100644
--- a/drivers/media/pci/ddbridge/ddbridge-main.c
+++ b/drivers/media/pci/ddbridge/ddbridge-main.c
@@ -180,8 +180,8 @@ static int ddb_probe(struct pci_dev *pdev,
pci_set_master(pdev);
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)))
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)))
return -ENODEV;
dev = vzalloc(sizeof(*dev));
diff --git a/drivers/media/pci/intel/ipu3/cio2-bridge.c b/drivers/media/pci/intel/ipu3/cio2-bridge.c
index 30d29b96a339..67c467d3c81f 100644
--- a/drivers/media/pci/intel/ipu3/cio2-bridge.c
+++ b/drivers/media/pci/intel/ipu3/cio2-bridge.c
@@ -29,6 +29,7 @@ static const struct cio2_sensor_config cio2_supported_sensors[] = {
static const struct cio2_property_names prop_names = {
.clock_frequency = "clock-frequency",
.rotation = "rotation",
+ .orientation = "orientation",
.bus_type = "bus-type",
.data_lanes = "data-lanes",
.remote_endpoint = "remote-endpoint",
@@ -72,11 +73,51 @@ out_free_buff:
return ret;
}
+static u32 cio2_bridge_parse_rotation(struct cio2_sensor *sensor)
+{
+ switch (sensor->ssdb.degree) {
+ case CIO2_SENSOR_ROTATION_NORMAL:
+ return 0;
+ case CIO2_SENSOR_ROTATION_INVERTED:
+ return 180;
+ default:
+ dev_warn(&sensor->adev->dev,
+ "Unknown rotation %d. Assume 0 degree rotation\n",
+ sensor->ssdb.degree);
+ return 0;
+ }
+}
+
+static enum v4l2_fwnode_orientation cio2_bridge_parse_orientation(struct cio2_sensor *sensor)
+{
+ switch (sensor->pld->panel) {
+ case ACPI_PLD_PANEL_FRONT:
+ return V4L2_FWNODE_ORIENTATION_FRONT;
+ case ACPI_PLD_PANEL_BACK:
+ return V4L2_FWNODE_ORIENTATION_BACK;
+ case ACPI_PLD_PANEL_TOP:
+ case ACPI_PLD_PANEL_LEFT:
+ case ACPI_PLD_PANEL_RIGHT:
+ case ACPI_PLD_PANEL_UNKNOWN:
+ return V4L2_FWNODE_ORIENTATION_EXTERNAL;
+ default:
+ dev_warn(&sensor->adev->dev, "Unknown _PLD panel value %d\n",
+ sensor->pld->panel);
+ return V4L2_FWNODE_ORIENTATION_EXTERNAL;
+ }
+}
+
static void cio2_bridge_create_fwnode_properties(
struct cio2_sensor *sensor,
struct cio2_bridge *bridge,
const struct cio2_sensor_config *cfg)
{
+ u32 rotation;
+ enum v4l2_fwnode_orientation orientation;
+
+ rotation = cio2_bridge_parse_rotation(sensor);
+ orientation = cio2_bridge_parse_orientation(sensor);
+
sensor->prop_names = prop_names;
sensor->local_ref[0] = SOFTWARE_NODE_REFERENCE(&sensor->swnodes[SWNODE_CIO2_ENDPOINT]);
@@ -85,9 +126,12 @@ static void cio2_bridge_create_fwnode_properties(
sensor->dev_properties[0] = PROPERTY_ENTRY_U32(
sensor->prop_names.clock_frequency,
sensor->ssdb.mclkspeed);
- sensor->dev_properties[1] = PROPERTY_ENTRY_U8(
+ sensor->dev_properties[1] = PROPERTY_ENTRY_U32(
sensor->prop_names.rotation,
- sensor->ssdb.degree);
+ rotation);
+ sensor->dev_properties[2] = PROPERTY_ENTRY_U32(
+ sensor->prop_names.orientation,
+ orientation);
sensor->ep_properties[0] = PROPERTY_ENTRY_U32(
sensor->prop_names.bus_type,
@@ -159,6 +203,7 @@ static void cio2_bridge_unregister_sensors(struct cio2_bridge *bridge)
for (i = 0; i < bridge->n_sensors; i++) {
sensor = &bridge->sensors[i];
software_node_unregister_nodes(sensor->swnodes);
+ ACPI_FREE(sensor->pld);
acpi_dev_put(sensor->adev);
}
}
@@ -170,6 +215,7 @@ static int cio2_bridge_connect_sensor(const struct cio2_sensor_config *cfg,
struct fwnode_handle *fwnode;
struct cio2_sensor *sensor;
struct acpi_device *adev;
+ acpi_status status;
int ret;
for_each_acpi_dev_match(adev, cfg->hid, NULL, -1) {
@@ -191,11 +237,15 @@ static int cio2_bridge_connect_sensor(const struct cio2_sensor_config *cfg,
if (ret)
goto err_put_adev;
+ status = acpi_get_physical_device_location(adev->handle, &sensor->pld);
+ if (ACPI_FAILURE(status))
+ goto err_put_adev;
+
if (sensor->ssdb.lanes > CIO2_MAX_LANES) {
dev_err(&adev->dev,
"Number of lanes in SSDB is invalid\n");
ret = -EINVAL;
- goto err_put_adev;
+ goto err_free_pld;
}
cio2_bridge_create_fwnode_properties(sensor, bridge, cfg);
@@ -203,7 +253,7 @@ static int cio2_bridge_connect_sensor(const struct cio2_sensor_config *cfg,
ret = software_node_register_nodes(sensor->swnodes);
if (ret)
- goto err_put_adev;
+ goto err_free_pld;
fwnode = software_node_fwnode(&sensor->swnodes[
SWNODE_SENSOR_HID]);
@@ -225,6 +275,8 @@ static int cio2_bridge_connect_sensor(const struct cio2_sensor_config *cfg,
err_free_swnodes:
software_node_unregister_nodes(sensor->swnodes);
+err_free_pld:
+ ACPI_FREE(sensor->pld);
err_put_adev:
acpi_dev_put(adev);
return ret;
diff --git a/drivers/media/pci/intel/ipu3/cio2-bridge.h b/drivers/media/pci/intel/ipu3/cio2-bridge.h
index dd0ffcafa489..202c7d494f7a 100644
--- a/drivers/media/pci/intel/ipu3/cio2-bridge.h
+++ b/drivers/media/pci/intel/ipu3/cio2-bridge.h
@@ -12,6 +12,10 @@
#define CIO2_MAX_LANES 4
#define MAX_NUM_LINK_FREQS 3
+/* Values are educated guesses as we don't have a spec */
+#define CIO2_SENSOR_ROTATION_NORMAL 0
+#define CIO2_SENSOR_ROTATION_INVERTED 1
+
#define CIO2_SENSOR_CONFIG(_HID, _NR, ...) \
(const struct cio2_sensor_config) { \
.hid = _HID, \
@@ -80,6 +84,7 @@ struct cio2_sensor_ssdb {
struct cio2_property_names {
char clock_frequency[16];
char rotation[9];
+ char orientation[12];
char bus_type[9];
char data_lanes[11];
char remote_endpoint[16];
@@ -106,9 +111,11 @@ struct cio2_sensor {
struct cio2_node_names node_names;
struct cio2_sensor_ssdb ssdb;
+ struct acpi_pld_info *pld;
+
struct cio2_property_names prop_names;
struct property_entry ep_properties[5];
- struct property_entry dev_properties[3];
+ struct property_entry dev_properties[4];
struct property_entry cio2_properties[3];
struct software_node_ref_args local_ref[1];
struct software_node_ref_args remote_ref[1];
diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c b/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c
index 47db0ee0fcbf..356ea966cf8d 100644
--- a/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c
+++ b/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c
@@ -11,6 +11,7 @@
* et al.
*/
+#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/iopoll.h>
@@ -102,26 +103,29 @@ static inline u32 cio2_bytesperline(const unsigned int width)
static void cio2_fbpt_exit_dummy(struct cio2_device *cio2)
{
+ struct device *dev = &cio2->pci_dev->dev;
+
if (cio2->dummy_lop) {
- dma_free_coherent(&cio2->pci_dev->dev, PAGE_SIZE,
- cio2->dummy_lop, cio2->dummy_lop_bus_addr);
+ dma_free_coherent(dev, PAGE_SIZE, cio2->dummy_lop,
+ cio2->dummy_lop_bus_addr);
cio2->dummy_lop = NULL;
}
if (cio2->dummy_page) {
- dma_free_coherent(&cio2->pci_dev->dev, PAGE_SIZE,
- cio2->dummy_page, cio2->dummy_page_bus_addr);
+ dma_free_coherent(dev, PAGE_SIZE, cio2->dummy_page,
+ cio2->dummy_page_bus_addr);
cio2->dummy_page = NULL;
}
}
static int cio2_fbpt_init_dummy(struct cio2_device *cio2)
{
+ struct device *dev = &cio2->pci_dev->dev;
unsigned int i;
- cio2->dummy_page = dma_alloc_coherent(&cio2->pci_dev->dev, PAGE_SIZE,
+ cio2->dummy_page = dma_alloc_coherent(dev, PAGE_SIZE,
&cio2->dummy_page_bus_addr,
GFP_KERNEL);
- cio2->dummy_lop = dma_alloc_coherent(&cio2->pci_dev->dev, PAGE_SIZE,
+ cio2->dummy_lop = dma_alloc_coherent(dev, PAGE_SIZE,
&cio2->dummy_lop_bus_addr,
GFP_KERNEL);
if (!cio2->dummy_page || !cio2->dummy_lop) {
@@ -497,6 +501,7 @@ static int cio2_hw_init(struct cio2_device *cio2, struct cio2_queue *q)
static void cio2_hw_exit(struct cio2_device *cio2, struct cio2_queue *q)
{
+ struct device *dev = &cio2->pci_dev->dev;
void __iomem *const base = cio2->base;
unsigned int i;
u32 value;
@@ -514,8 +519,7 @@ static void cio2_hw_exit(struct cio2_device *cio2, struct cio2_queue *q)
value, value & CIO2_CDMAC0_DMA_HALTED,
4000, 2000000);
if (ret)
- dev_err(&cio2->pci_dev->dev,
- "DMA %i can not be halted\n", CIO2_DMA_CHAN);
+ dev_err(dev, "DMA %i can not be halted\n", CIO2_DMA_CHAN);
for (i = 0; i < CIO2_NUM_PORTS; i++) {
writel(readl(base + CIO2_REG_PXM_FRF_CFG(i)) |
@@ -539,8 +543,7 @@ static void cio2_buffer_done(struct cio2_device *cio2, unsigned int dma_chan)
entry = &q->fbpt[q->bufs_first * CIO2_MAX_LOPS];
if (entry->first_entry.ctrl & CIO2_FBPT_CTRL_VALID) {
- dev_warn(&cio2->pci_dev->dev,
- "no ready buffers found on DMA channel %u\n",
+ dev_warn(dev, "no ready buffers found on DMA channel %u\n",
dma_chan);
return;
}
@@ -557,8 +560,7 @@ static void cio2_buffer_done(struct cio2_device *cio2, unsigned int dma_chan)
q->bufs[q->bufs_first] = NULL;
atomic_dec(&q->bufs_queued);
- dev_dbg(&cio2->pci_dev->dev,
- "buffer %i done\n", b->vbb.vb2_buf.index);
+ dev_dbg(dev, "buffer %i done\n", b->vbb.vb2_buf.index);
b->vbb.vb2_buf.timestamp = ns;
b->vbb.field = V4L2_FIELD_NONE;
@@ -612,6 +614,20 @@ static const char *const cio2_irq_errs[] = {
"non-matching Long Packet stalled",
};
+static void cio2_irq_log_irq_errs(struct device *dev, u8 port, u32 status)
+{
+ unsigned long csi2_status = status;
+ unsigned int i;
+
+ for_each_set_bit(i, &csi2_status, ARRAY_SIZE(cio2_irq_errs))
+ dev_err(dev, "CSI-2 receiver port %i: %s\n",
+ port, cio2_irq_errs[i]);
+
+ if (fls_long(csi2_status) >= ARRAY_SIZE(cio2_irq_errs))
+ dev_warn(dev, "unknown CSI2 error 0x%lx on port %i\n",
+ csi2_status, port);
+}
+
static const char *const cio2_port_errs[] = {
"ECC recoverable",
"DPHY not recoverable",
@@ -622,10 +638,19 @@ static const char *const cio2_port_errs[] = {
"PKT2LONG",
};
+static void cio2_irq_log_port_errs(struct device *dev, u8 port, u32 status)
+{
+ unsigned long port_status = status;
+ unsigned int i;
+
+ for_each_set_bit(i, &port_status, ARRAY_SIZE(cio2_port_errs))
+ dev_err(dev, "port %i error %s\n", port, cio2_port_errs[i]);
+}
+
static void cio2_irq_handle_once(struct cio2_device *cio2, u32 int_status)
{
- void __iomem *const base = cio2->base;
struct device *dev = &cio2->pci_dev->dev;
+ void __iomem *const base = cio2->base;
if (int_status & CIO2_INT_IOOE) {
/*
@@ -687,59 +712,32 @@ static void cio2_irq_handle_once(struct cio2_device *cio2, u32 int_status)
if (int_status & (CIO2_INT_IOIE | CIO2_INT_IOIRQ)) {
/* CSI2 receiver (error) interrupt */
- u32 ie_status, ie_clear;
unsigned int port;
+ u32 ie_status;
- ie_clear = readl(base + CIO2_REG_INT_STS_EXT_IE);
- ie_status = ie_clear;
+ ie_status = readl(base + CIO2_REG_INT_STS_EXT_IE);
for (port = 0; port < CIO2_NUM_PORTS; port++) {
u32 port_status = (ie_status >> (port * 8)) & 0xff;
- u32 err_mask = BIT_MASK(ARRAY_SIZE(cio2_port_errs)) - 1;
- void __iomem *const csi_rx_base =
- base + CIO2_REG_PIPE_BASE(port);
- unsigned int i;
-
- while (port_status & err_mask) {
- i = ffs(port_status) - 1;
- dev_err(dev, "port %i error %s\n",
- port, cio2_port_errs[i]);
- ie_status &= ~BIT(port * 8 + i);
- port_status &= ~BIT(i);
- }
+
+ cio2_irq_log_port_errs(dev, port, port_status);
if (ie_status & CIO2_INT_EXT_IE_IRQ(port)) {
- u32 csi2_status, csi2_clear;
+ void __iomem *csi_rx_base =
+ base + CIO2_REG_PIPE_BASE(port);
+ u32 csi2_status;
csi2_status = readl(csi_rx_base +
CIO2_REG_IRQCTRL_STATUS);
- csi2_clear = csi2_status;
- err_mask =
- BIT_MASK(ARRAY_SIZE(cio2_irq_errs)) - 1;
-
- while (csi2_status & err_mask) {
- i = ffs(csi2_status) - 1;
- dev_err(dev,
- "CSI-2 receiver port %i: %s\n",
- port, cio2_irq_errs[i]);
- csi2_status &= ~BIT(i);
- }
-
- writel(csi2_clear,
- csi_rx_base + CIO2_REG_IRQCTRL_CLEAR);
- if (csi2_status)
- dev_warn(dev,
- "unknown CSI2 error 0x%x on port %i\n",
- csi2_status, port);
- ie_status &= ~CIO2_INT_EXT_IE_IRQ(port);
+ cio2_irq_log_irq_errs(dev, port, csi2_status);
+
+ writel(csi2_status,
+ csi_rx_base + CIO2_REG_IRQCTRL_CLEAR);
}
}
- writel(ie_clear, base + CIO2_REG_INT_STS_EXT_IE);
- if (ie_status)
- dev_warn(dev, "unknown interrupt 0x%x on IE\n",
- ie_status);
+ writel(ie_status, base + CIO2_REG_INT_STS_EXT_IE);
int_status &= ~(CIO2_INT_IOIE | CIO2_INT_IOIRQ);
}
@@ -795,16 +793,21 @@ static int cio2_vb2_queue_setup(struct vb2_queue *vq,
struct device *alloc_devs[])
{
struct cio2_device *cio2 = vb2_get_drv_priv(vq);
+ struct device *dev = &cio2->pci_dev->dev;
struct cio2_queue *q = vb2q_to_cio2_queue(vq);
unsigned int i;
- *num_planes = q->format.num_planes;
+ if (*num_planes && *num_planes < q->format.num_planes)
+ return -EINVAL;
- for (i = 0; i < *num_planes; ++i) {
+ for (i = 0; i < q->format.num_planes; ++i) {
+ if (*num_planes && sizes[i] < q->format.plane_fmt[i].sizeimage)
+ return -EINVAL;
sizes[i] = q->format.plane_fmt[i].sizeimage;
- alloc_devs[i] = &cio2->pci_dev->dev;
+ alloc_devs[i] = dev;
}
+ *num_planes = q->format.num_planes;
*num_buffers = clamp_val(*num_buffers, 1, CIO2_MAX_BUFFERS);
/* Initialize buffer queue */
@@ -824,8 +827,7 @@ static int cio2_vb2_buf_init(struct vb2_buffer *vb)
{
struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
struct device *dev = &cio2->pci_dev->dev;
- struct cio2_buffer *b =
- container_of(vb, struct cio2_buffer, vbb.vb2_buf);
+ struct cio2_buffer *b = to_cio2_buffer(vb);
unsigned int pages = PFN_UP(vb->planes[0].length);
unsigned int lops = DIV_ROUND_UP(pages + 1, CIO2_LOP_ENTRIES);
struct sg_table *sg;
@@ -879,17 +881,17 @@ fail:
static void cio2_vb2_buf_queue(struct vb2_buffer *vb)
{
struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
+ struct device *dev = &cio2->pci_dev->dev;
struct cio2_queue *q =
container_of(vb->vb2_queue, struct cio2_queue, vbq);
- struct cio2_buffer *b =
- container_of(vb, struct cio2_buffer, vbb.vb2_buf);
+ struct cio2_buffer *b = to_cio2_buffer(vb);
struct cio2_fbpt_entry *entry;
unsigned long flags;
unsigned int i, j, next = q->bufs_next;
int bufs_queued = atomic_inc_return(&q->bufs_queued);
u32 fbpt_rp;
- dev_dbg(&cio2->pci_dev->dev, "queue buffer %d\n", vb->index);
+ dev_dbg(dev, "queue buffer %d\n", vb->index);
/*
* This code queues the buffer to the CIO2 DMA engine, which starts
@@ -940,12 +942,12 @@ static void cio2_vb2_buf_queue(struct vb2_buffer *vb)
return;
}
- dev_dbg(&cio2->pci_dev->dev, "entry %i was full!\n", next);
+ dev_dbg(dev, "entry %i was full!\n", next);
next = (next + 1) % CIO2_MAX_BUFFERS;
}
local_irq_restore(flags);
- dev_err(&cio2->pci_dev->dev, "error: all cio2 entries were full!\n");
+ dev_err(dev, "error: all cio2 entries were full!\n");
atomic_dec(&q->bufs_queued);
vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
}
@@ -954,14 +956,14 @@ static void cio2_vb2_buf_queue(struct vb2_buffer *vb)
static void cio2_vb2_buf_cleanup(struct vb2_buffer *vb)
{
struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
- struct cio2_buffer *b =
- container_of(vb, struct cio2_buffer, vbb.vb2_buf);
+ struct device *dev = &cio2->pci_dev->dev;
+ struct cio2_buffer *b = to_cio2_buffer(vb);
unsigned int i;
/* Free LOP table */
for (i = 0; i < CIO2_MAX_LOPS; i++) {
if (b->lop[i])
- dma_free_coherent(&cio2->pci_dev->dev, PAGE_SIZE,
+ dma_free_coherent(dev, PAGE_SIZE,
b->lop[i], b->lop_bus_addr[i]);
}
}
@@ -970,14 +972,15 @@ static int cio2_vb2_start_streaming(struct vb2_queue *vq, unsigned int count)
{
struct cio2_queue *q = vb2q_to_cio2_queue(vq);
struct cio2_device *cio2 = vb2_get_drv_priv(vq);
+ struct device *dev = &cio2->pci_dev->dev;
int r;
cio2->cur_queue = q;
atomic_set(&q->frame_sequence, 0);
- r = pm_runtime_resume_and_get(&cio2->pci_dev->dev);
+ r = pm_runtime_resume_and_get(dev);
if (r < 0) {
- dev_info(&cio2->pci_dev->dev, "failed to set power %d\n", r);
+ dev_info(dev, "failed to set power %d\n", r);
return r;
}
@@ -1003,9 +1006,9 @@ fail_csi2_subdev:
fail_hw:
media_pipeline_stop(&q->vdev.entity);
fail_pipeline:
- dev_dbg(&cio2->pci_dev->dev, "failed to start streaming (%d)\n", r);
+ dev_dbg(dev, "failed to start streaming (%d)\n", r);
cio2_vb2_return_all_buffers(q, VB2_BUF_STATE_QUEUED);
- pm_runtime_put(&cio2->pci_dev->dev);
+ pm_runtime_put(dev);
return r;
}
@@ -1014,16 +1017,16 @@ static void cio2_vb2_stop_streaming(struct vb2_queue *vq)
{
struct cio2_queue *q = vb2q_to_cio2_queue(vq);
struct cio2_device *cio2 = vb2_get_drv_priv(vq);
+ struct device *dev = &cio2->pci_dev->dev;
if (v4l2_subdev_call(q->sensor, video, s_stream, 0))
- dev_err(&cio2->pci_dev->dev,
- "failed to stop sensor streaming\n");
+ dev_err(dev, "failed to stop sensor streaming\n");
cio2_hw_exit(cio2, q);
synchronize_irq(cio2->pci_dev->irq);
cio2_vb2_return_all_buffers(q, VB2_BUF_STATE_ERROR);
media_pipeline_stop(&q->vdev.entity);
- pm_runtime_put(&cio2->pci_dev->dev);
+ pm_runtime_put(dev);
cio2->streaming = false;
}
@@ -1311,16 +1314,16 @@ static int cio2_subdev_link_validate_get_format(struct media_pad *pad,
static int cio2_video_link_validate(struct media_link *link)
{
- struct video_device *vd = container_of(link->sink->entity,
- struct video_device, entity);
+ struct media_entity *entity = link->sink->entity;
+ struct video_device *vd = media_entity_to_video_device(entity);
struct cio2_queue *q = container_of(vd, struct cio2_queue, vdev);
struct cio2_device *cio2 = video_get_drvdata(vd);
+ struct device *dev = &cio2->pci_dev->dev;
struct v4l2_subdev_format source_fmt;
int ret;
- if (!media_entity_remote_pad(link->sink->entity->pads)) {
- dev_info(&cio2->pci_dev->dev,
- "video node %s pad not connected\n", vd->name);
+ if (!media_entity_remote_pad(entity->pads)) {
+ dev_info(dev, "video node %s pad not connected\n", vd->name);
return -ENOTCONN;
}
@@ -1330,8 +1333,7 @@ static int cio2_video_link_validate(struct media_link *link)
if (source_fmt.format.width != q->format.width ||
source_fmt.format.height != q->format.height) {
- dev_err(&cio2->pci_dev->dev,
- "Wrong width or height %ux%u (%ux%u expected)\n",
+ dev_err(dev, "Wrong width or height %ux%u (%ux%u expected)\n",
q->format.width, q->format.height,
source_fmt.format.width, source_fmt.format.height);
return -EINVAL;
@@ -1371,15 +1373,15 @@ struct sensor_async_subdev {
struct csi2_bus_info csi2;
};
+#define to_sensor_asd(asd) container_of(asd, struct sensor_async_subdev, asd)
+
/* The .bound() notifier callback when a match is found */
static int cio2_notifier_bound(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *sd,
struct v4l2_async_subdev *asd)
{
- struct cio2_device *cio2 = container_of(notifier,
- struct cio2_device, notifier);
- struct sensor_async_subdev *s_asd = container_of(asd,
- struct sensor_async_subdev, asd);
+ struct cio2_device *cio2 = to_cio2_device(notifier);
+ struct sensor_async_subdev *s_asd = to_sensor_asd(asd);
struct cio2_queue *q;
if (cio2->queue[s_asd->csi2.port].sensor)
@@ -1399,10 +1401,8 @@ static void cio2_notifier_unbind(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *sd,
struct v4l2_async_subdev *asd)
{
- struct cio2_device *cio2 = container_of(notifier,
- struct cio2_device, notifier);
- struct sensor_async_subdev *s_asd = container_of(asd,
- struct sensor_async_subdev, asd);
+ struct cio2_device *cio2 = to_cio2_device(notifier);
+ struct sensor_async_subdev *s_asd = to_sensor_asd(asd);
cio2->queue[s_asd->csi2.port].sensor = NULL;
}
@@ -1410,8 +1410,8 @@ static void cio2_notifier_unbind(struct v4l2_async_notifier *notifier,
/* .complete() is called after all subdevices have been located */
static int cio2_notifier_complete(struct v4l2_async_notifier *notifier)
{
- struct cio2_device *cio2 = container_of(notifier, struct cio2_device,
- notifier);
+ struct cio2_device *cio2 = to_cio2_device(notifier);
+ struct device *dev = &cio2->pci_dev->dev;
struct sensor_async_subdev *s_asd;
struct v4l2_async_subdev *asd;
struct cio2_queue *q;
@@ -1419,7 +1419,7 @@ static int cio2_notifier_complete(struct v4l2_async_notifier *notifier)
int ret;
list_for_each_entry(asd, &cio2->notifier.asd_list, asd_list) {
- s_asd = container_of(asd, struct sensor_async_subdev, asd);
+ s_asd = to_sensor_asd(asd);
q = &cio2->queue[s_asd->csi2.port];
for (pad = 0; pad < q->sensor->entity.num_pads; pad++)
@@ -1428,8 +1428,7 @@ static int cio2_notifier_complete(struct v4l2_async_notifier *notifier)
break;
if (pad == q->sensor->entity.num_pads) {
- dev_err(&cio2->pci_dev->dev,
- "failed to find src pad for %s\n",
+ dev_err(dev, "failed to find src pad for %s\n",
q->sensor->name);
return -ENXIO;
}
@@ -1439,8 +1438,7 @@ static int cio2_notifier_complete(struct v4l2_async_notifier *notifier)
&q->subdev.entity, CIO2_PAD_SINK,
0);
if (ret) {
- dev_err(&cio2->pci_dev->dev,
- "failed to create link for %s\n",
+ dev_err(dev, "failed to create link for %s\n",
q->sensor->name);
return ret;
}
@@ -1457,6 +1455,7 @@ static const struct v4l2_async_notifier_operations cio2_async_ops = {
static int cio2_parse_firmware(struct cio2_device *cio2)
{
+ struct device *dev = &cio2->pci_dev->dev;
unsigned int i;
int ret;
@@ -1467,10 +1466,8 @@ static int cio2_parse_firmware(struct cio2_device *cio2)
struct sensor_async_subdev *s_asd;
struct fwnode_handle *ep;
- ep = fwnode_graph_get_endpoint_by_id(
- dev_fwnode(&cio2->pci_dev->dev), i, 0,
- FWNODE_GRAPH_ENDPOINT_NEXT);
-
+ ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(dev), i, 0,
+ FWNODE_GRAPH_ENDPOINT_NEXT);
if (!ep)
continue;
@@ -1478,8 +1475,9 @@ static int cio2_parse_firmware(struct cio2_device *cio2)
if (ret)
goto err_parse;
- s_asd = v4l2_async_notifier_add_fwnode_remote_subdev(
- &cio2->notifier, ep, struct sensor_async_subdev);
+ s_asd = v4l2_async_nf_add_fwnode_remote(&cio2->notifier, ep,
+ struct
+ sensor_async_subdev);
if (IS_ERR(s_asd)) {
ret = PTR_ERR(s_asd);
goto err_parse;
@@ -1502,10 +1500,9 @@ err_parse:
* suspend.
*/
cio2->notifier.ops = &cio2_async_ops;
- ret = v4l2_async_notifier_register(&cio2->v4l2_dev, &cio2->notifier);
+ ret = v4l2_async_nf_register(&cio2->v4l2_dev, &cio2->notifier);
if (ret)
- dev_err(&cio2->pci_dev->dev,
- "failed to register async notifier : %d\n", ret);
+ dev_err(dev, "failed to register async notifier : %d\n", ret);
return ret;
}
@@ -1524,7 +1521,7 @@ static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q)
static const u32 default_width = 1936;
static const u32 default_height = 1096;
const struct ipu3_cio2_fmt dflt_fmt = formats[0];
-
+ struct device *dev = &cio2->pci_dev->dev;
struct video_device *vdev = &q->vdev;
struct vb2_queue *vbq = &q->vbq;
struct v4l2_subdev *subdev = &q->subdev;
@@ -1566,8 +1563,7 @@ static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q)
subdev->internal_ops = &cio2_subdev_internal_ops;
r = media_entity_pads_init(&subdev->entity, CIO2_PADS, q->subdev_pads);
if (r) {
- dev_err(&cio2->pci_dev->dev,
- "failed initialize subdev media entity (%d)\n", r);
+ dev_err(dev, "failed initialize subdev media entity (%d)\n", r);
goto fail_subdev_media_entity;
}
@@ -1575,8 +1571,8 @@ static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q)
vdev->entity.ops = &cio2_video_entity_ops;
r = media_entity_pads_init(&vdev->entity, 1, &q->vdev_pad);
if (r) {
- dev_err(&cio2->pci_dev->dev,
- "failed initialize videodev media entity (%d)\n", r);
+ dev_err(dev, "failed initialize videodev media entity (%d)\n",
+ r);
goto fail_vdev_media_entity;
}
@@ -1590,8 +1586,7 @@ static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q)
v4l2_set_subdevdata(subdev, cio2);
r = v4l2_device_register_subdev(&cio2->v4l2_dev, subdev);
if (r) {
- dev_err(&cio2->pci_dev->dev,
- "failed initialize subdev (%d)\n", r);
+ dev_err(dev, "failed initialize subdev (%d)\n", r);
goto fail_subdev;
}
@@ -1607,8 +1602,7 @@ static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q)
vbq->lock = &q->lock;
r = vb2_queue_init(vbq);
if (r) {
- dev_err(&cio2->pci_dev->dev,
- "failed to initialize videobuf2 queue (%d)\n", r);
+ dev_err(dev, "failed to initialize videobuf2 queue (%d)\n", r);
goto fail_subdev;
}
@@ -1625,8 +1619,7 @@ static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q)
video_set_drvdata(vdev, cio2);
r = video_register_device(vdev, VFL_TYPE_VIDEO, -1);
if (r) {
- dev_err(&cio2->pci_dev->dev,
- "failed to register video device (%d)\n", r);
+ dev_err(dev, "failed to register video device (%d)\n", r);
goto fail_vdev;
}
@@ -1648,7 +1641,7 @@ fail_subdev:
fail_vdev_media_entity:
media_entity_cleanup(&subdev->entity);
fail_subdev_media_entity:
- cio2_fbpt_exit(q, &cio2->pci_dev->dev);
+ cio2_fbpt_exit(q, dev);
fail_fbpt:
mutex_destroy(&q->subdev_lock);
mutex_destroy(&q->lock);
@@ -1715,11 +1708,12 @@ static int cio2_check_fwnode_graph(struct fwnode_handle *fwnode)
static int cio2_pci_probe(struct pci_dev *pci_dev,
const struct pci_device_id *id)
{
- struct fwnode_handle *fwnode = dev_fwnode(&pci_dev->dev);
+ struct device *dev = &pci_dev->dev;
+ struct fwnode_handle *fwnode = dev_fwnode(dev);
struct cio2_device *cio2;
int r;
- cio2 = devm_kzalloc(&pci_dev->dev, sizeof(*cio2), GFP_KERNEL);
+ cio2 = devm_kzalloc(dev, sizeof(*cio2), GFP_KERNEL);
if (!cio2)
return -ENOMEM;
cio2->pci_dev = pci_dev;
@@ -1732,7 +1726,7 @@ static int cio2_pci_probe(struct pci_dev *pci_dev,
r = cio2_check_fwnode_graph(fwnode);
if (r) {
if (fwnode && !IS_ERR_OR_NULL(fwnode->secondary)) {
- dev_err(&pci_dev->dev, "fwnode graph has no endpoints connected\n");
+ dev_err(dev, "fwnode graph has no endpoints connected\n");
return -EINVAL;
}
@@ -1743,16 +1737,16 @@ static int cio2_pci_probe(struct pci_dev *pci_dev,
r = pcim_enable_device(pci_dev);
if (r) {
- dev_err(&pci_dev->dev, "failed to enable device (%d)\n", r);
+ dev_err(dev, "failed to enable device (%d)\n", r);
return r;
}
- dev_info(&pci_dev->dev, "device 0x%x (rev: 0x%x)\n",
+ dev_info(dev, "device 0x%x (rev: 0x%x)\n",
pci_dev->device, pci_dev->revision);
r = pcim_iomap_regions(pci_dev, 1 << CIO2_PCI_BAR, pci_name(pci_dev));
if (r) {
- dev_err(&pci_dev->dev, "failed to remap I/O memory (%d)\n", r);
+ dev_err(dev, "failed to remap I/O memory (%d)\n", r);
return -ENODEV;
}
@@ -1762,15 +1756,15 @@ static int cio2_pci_probe(struct pci_dev *pci_dev,
pci_set_master(pci_dev);
- r = pci_set_dma_mask(pci_dev, CIO2_DMA_MASK);
+ r = dma_set_mask(&pci_dev->dev, CIO2_DMA_MASK);
if (r) {
- dev_err(&pci_dev->dev, "failed to set DMA mask (%d)\n", r);
+ dev_err(dev, "failed to set DMA mask (%d)\n", r);
return -ENODEV;
}
r = pci_enable_msi(pci_dev);
if (r) {
- dev_err(&pci_dev->dev, "failed to enable MSI (%d)\n", r);
+ dev_err(dev, "failed to enable MSI (%d)\n", r);
return r;
}
@@ -1780,7 +1774,7 @@ static int cio2_pci_probe(struct pci_dev *pci_dev,
mutex_init(&cio2->lock);
- cio2->media_dev.dev = &cio2->pci_dev->dev;
+ cio2->media_dev.dev = dev;
strscpy(cio2->media_dev.model, CIO2_DEVICE_NAME,
sizeof(cio2->media_dev.model));
snprintf(cio2->media_dev.bus_info, sizeof(cio2->media_dev.bus_info),
@@ -1793,10 +1787,9 @@ static int cio2_pci_probe(struct pci_dev *pci_dev,
goto fail_mutex_destroy;
cio2->v4l2_dev.mdev = &cio2->media_dev;
- r = v4l2_device_register(&pci_dev->dev, &cio2->v4l2_dev);
+ r = v4l2_device_register(dev, &cio2->v4l2_dev);
if (r) {
- dev_err(&pci_dev->dev,
- "failed to register V4L2 device (%d)\n", r);
+ dev_err(dev, "failed to register V4L2 device (%d)\n", r);
goto fail_media_device_unregister;
}
@@ -1804,28 +1797,28 @@ static int cio2_pci_probe(struct pci_dev *pci_dev,
if (r)
goto fail_v4l2_device_unregister;
- v4l2_async_notifier_init(&cio2->notifier);
+ v4l2_async_nf_init(&cio2->notifier);
/* Register notifier for subdevices we care */
r = cio2_parse_firmware(cio2);
if (r)
goto fail_clean_notifier;
- r = devm_request_irq(&pci_dev->dev, pci_dev->irq, cio2_irq,
- IRQF_SHARED, CIO2_NAME, cio2);
+ r = devm_request_irq(dev, pci_dev->irq, cio2_irq, IRQF_SHARED,
+ CIO2_NAME, cio2);
if (r) {
- dev_err(&pci_dev->dev, "failed to request IRQ (%d)\n", r);
+ dev_err(dev, "failed to request IRQ (%d)\n", r);
goto fail_clean_notifier;
}
- pm_runtime_put_noidle(&pci_dev->dev);
- pm_runtime_allow(&pci_dev->dev);
+ pm_runtime_put_noidle(dev);
+ pm_runtime_allow(dev);
return 0;
fail_clean_notifier:
- v4l2_async_notifier_unregister(&cio2->notifier);
- v4l2_async_notifier_cleanup(&cio2->notifier);
+ v4l2_async_nf_unregister(&cio2->notifier);
+ v4l2_async_nf_cleanup(&cio2->notifier);
cio2_queues_exit(cio2);
fail_v4l2_device_unregister:
v4l2_device_unregister(&cio2->v4l2_dev);
@@ -1844,8 +1837,8 @@ static void cio2_pci_remove(struct pci_dev *pci_dev)
struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
media_device_unregister(&cio2->media_dev);
- v4l2_async_notifier_unregister(&cio2->notifier);
- v4l2_async_notifier_cleanup(&cio2->notifier);
+ v4l2_async_nf_unregister(&cio2->notifier);
+ v4l2_async_nf_cleanup(&cio2->notifier);
cio2_queues_exit(cio2);
cio2_fbpt_exit_dummy(cio2);
v4l2_device_unregister(&cio2->v4l2_dev);
@@ -2005,10 +1998,9 @@ static int __maybe_unused cio2_resume(struct device *dev)
if (!cio2->streaming)
return 0;
/* Start stream */
- r = pm_runtime_force_resume(&cio2->pci_dev->dev);
+ r = pm_runtime_force_resume(dev);
if (r < 0) {
- dev_err(&cio2->pci_dev->dev,
- "failed to set power %d\n", r);
+ dev_err(dev, "failed to set power %d\n", r);
return r;
}
diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.h b/drivers/media/pci/intel/ipu3/ipu3-cio2.h
index 3806d7f04d69..3a1f394e05aa 100644
--- a/drivers/media/pci/intel/ipu3/ipu3-cio2.h
+++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.h
@@ -338,6 +338,8 @@ struct cio2_buffer {
unsigned int offset;
};
+#define to_cio2_buffer(vb) container_of(vb, struct cio2_buffer, vbb.vb2_buf)
+
struct csi2_bus_info {
u32 port;
u32 lanes;
@@ -399,6 +401,8 @@ struct cio2_device {
dma_addr_t dummy_lop_bus_addr;
};
+#define to_cio2_device(n) container_of(n, struct cio2_device, notifier)
+
/**************** Virtual channel ****************/
/*
* This should come from sensor driver. No
diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c
index 8ebc97ebf1a2..57d4d5485d7a 100644
--- a/drivers/media/pci/ivtv/ivtv-driver.c
+++ b/drivers/media/pci/ivtv/ivtv-driver.c
@@ -837,7 +837,7 @@ static int ivtv_setup_pci(struct ivtv *itv, struct pci_dev *pdev,
IVTV_ERR("Can't enable device!\n");
return -EIO;
}
- if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
IVTV_ERR("No suitable DMA available.\n");
return -EIO;
}
diff --git a/drivers/media/pci/ivtv/ivtv-ioctl.c b/drivers/media/pci/ivtv/ivtv-ioctl.c
index da19b2e95e6c..0cdf6b3210c2 100644
--- a/drivers/media/pci/ivtv/ivtv-ioctl.c
+++ b/drivers/media/pci/ivtv/ivtv-ioctl.c
@@ -339,7 +339,7 @@ static int ivtv_g_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f
pixfmt->colorspace = V4L2_COLORSPACE_SMPTE170M;
pixfmt->field = V4L2_FIELD_INTERLACED;
if (id->type == IVTV_ENC_STREAM_TYPE_YUV) {
- pixfmt->pixelformat = V4L2_PIX_FMT_HM12;
+ pixfmt->pixelformat = V4L2_PIX_FMT_NV12_16L16;
/* YUV size is (Y=(h*720) + UV=(h*(720/2))) */
pixfmt->sizeimage = pixfmt->height * 720 * 3 / 2;
pixfmt->bytesperline = 720;
@@ -417,7 +417,7 @@ static int ivtv_g_fmt_vid_out(struct file *file, void *fh, struct v4l2_format *f
pixfmt->field = V4L2_FIELD_ANY;
break;
}
- pixfmt->pixelformat = V4L2_PIX_FMT_HM12;
+ pixfmt->pixelformat = V4L2_PIX_FMT_NV12_16L16;
pixfmt->bytesperline = 720;
pixfmt->width = itv->yuv_info.v4l2_src_w;
pixfmt->height = itv->yuv_info.v4l2_src_h;
@@ -917,7 +917,7 @@ static int ivtv_enum_fmt_vid_cap(struct file *file, void *fh, struct v4l2_fmtdes
static const struct v4l2_fmtdesc hm12 = {
.type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
.description = "HM12 (YUV 4:2:0)",
- .pixelformat = V4L2_PIX_FMT_HM12,
+ .pixelformat = V4L2_PIX_FMT_NV12_16L16,
};
static const struct v4l2_fmtdesc mpeg = {
.type = V4L2_BUF_TYPE_VIDEO_CAPTURE,
@@ -944,7 +944,7 @@ static int ivtv_enum_fmt_vid_out(struct file *file, void *fh, struct v4l2_fmtdes
static const struct v4l2_fmtdesc hm12 = {
.type = V4L2_BUF_TYPE_VIDEO_OUTPUT,
.description = "HM12 (YUV 4:2:0)",
- .pixelformat = V4L2_PIX_FMT_HM12,
+ .pixelformat = V4L2_PIX_FMT_NV12_16L16,
};
static const struct v4l2_fmtdesc mpeg = {
.type = V4L2_BUF_TYPE_VIDEO_OUTPUT,
diff --git a/drivers/media/pci/ivtv/ivtv-queue.c b/drivers/media/pci/ivtv/ivtv-queue.c
index 7ac4615e92ea..f9b192ab7e7c 100644
--- a/drivers/media/pci/ivtv/ivtv-queue.c
+++ b/drivers/media/pci/ivtv/ivtv-queue.c
@@ -188,7 +188,7 @@ int ivtv_stream_alloc(struct ivtv_stream *s)
return 0;
IVTV_DEBUG_INFO("Allocate %s%s stream: %d x %d buffers (%dkB total)\n",
- s->dma != PCI_DMA_NONE ? "DMA " : "",
+ s->dma != DMA_NONE ? "DMA " : "",
s->name, s->buffers, s->buf_size, s->buffers * s->buf_size / 1024);
s->sg_pending = kzalloc(SGsize, GFP_KERNEL|__GFP_NOWARN);
@@ -218,8 +218,9 @@ int ivtv_stream_alloc(struct ivtv_stream *s)
return -ENOMEM;
}
if (ivtv_might_use_dma(s)) {
- s->sg_handle = pci_map_single(itv->pdev, s->sg_dma,
- sizeof(struct ivtv_sg_element), PCI_DMA_TODEVICE);
+ s->sg_handle = dma_map_single(&itv->pdev->dev, s->sg_dma,
+ sizeof(struct ivtv_sg_element),
+ DMA_TO_DEVICE);
ivtv_stream_sync_for_cpu(s);
}
@@ -237,7 +238,7 @@ int ivtv_stream_alloc(struct ivtv_stream *s)
}
INIT_LIST_HEAD(&buf->list);
if (ivtv_might_use_dma(s)) {
- buf->dma_handle = pci_map_single(s->itv->pdev,
+ buf->dma_handle = dma_map_single(&s->itv->pdev->dev,
buf->buf, s->buf_size + 256, s->dma);
ivtv_buf_sync_for_cpu(s, buf);
}
@@ -260,8 +261,8 @@ void ivtv_stream_free(struct ivtv_stream *s)
/* empty q_free */
while ((buf = ivtv_dequeue(s, &s->q_free))) {
if (ivtv_might_use_dma(s))
- pci_unmap_single(s->itv->pdev, buf->dma_handle,
- s->buf_size + 256, s->dma);
+ dma_unmap_single(&s->itv->pdev->dev, buf->dma_handle,
+ s->buf_size + 256, s->dma);
kfree(buf->buf);
kfree(buf);
}
@@ -269,8 +270,9 @@ void ivtv_stream_free(struct ivtv_stream *s)
/* Free SG Array/Lists */
if (s->sg_dma != NULL) {
if (s->sg_handle != IVTV_DMA_UNMAPPED) {
- pci_unmap_single(s->itv->pdev, s->sg_handle,
- sizeof(struct ivtv_sg_element), PCI_DMA_TODEVICE);
+ dma_unmap_single(&s->itv->pdev->dev, s->sg_handle,
+ sizeof(struct ivtv_sg_element),
+ DMA_TO_DEVICE);
s->sg_handle = IVTV_DMA_UNMAPPED;
}
kfree(s->sg_pending);
diff --git a/drivers/media/pci/ivtv/ivtv-streams.c b/drivers/media/pci/ivtv/ivtv-streams.c
index f04ee84bab5f..6e455948cc77 100644
--- a/drivers/media/pci/ivtv/ivtv-streams.c
+++ b/drivers/media/pci/ivtv/ivtv-streams.c
@@ -100,7 +100,7 @@ static struct {
{ /* IVTV_ENC_STREAM_TYPE_MPG */
"encoder MPG",
VFL_TYPE_VIDEO, 0,
- PCI_DMA_FROMDEVICE, 0,
+ DMA_FROM_DEVICE, 0,
V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_TUNER |
V4L2_CAP_AUDIO | V4L2_CAP_READWRITE,
&ivtv_v4l2_enc_fops
@@ -108,7 +108,7 @@ static struct {
{ /* IVTV_ENC_STREAM_TYPE_YUV */
"encoder YUV",
VFL_TYPE_VIDEO, IVTV_V4L2_ENC_YUV_OFFSET,
- PCI_DMA_FROMDEVICE, 0,
+ DMA_FROM_DEVICE, 0,
V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_TUNER |
V4L2_CAP_AUDIO | V4L2_CAP_READWRITE,
&ivtv_v4l2_enc_fops
@@ -116,7 +116,7 @@ static struct {
{ /* IVTV_ENC_STREAM_TYPE_VBI */
"encoder VBI",
VFL_TYPE_VBI, 0,
- PCI_DMA_FROMDEVICE, 0,
+ DMA_FROM_DEVICE, 0,
V4L2_CAP_VBI_CAPTURE | V4L2_CAP_SLICED_VBI_CAPTURE | V4L2_CAP_TUNER |
V4L2_CAP_AUDIO | V4L2_CAP_READWRITE,
&ivtv_v4l2_enc_fops
@@ -124,42 +124,42 @@ static struct {
{ /* IVTV_ENC_STREAM_TYPE_PCM */
"encoder PCM",
VFL_TYPE_VIDEO, IVTV_V4L2_ENC_PCM_OFFSET,
- PCI_DMA_FROMDEVICE, 0,
+ DMA_FROM_DEVICE, 0,
V4L2_CAP_TUNER | V4L2_CAP_AUDIO | V4L2_CAP_READWRITE,
&ivtv_v4l2_enc_fops
},
{ /* IVTV_ENC_STREAM_TYPE_RAD */
"encoder radio",
VFL_TYPE_RADIO, 0,
- PCI_DMA_NONE, 1,
+ DMA_NONE, 1,
V4L2_CAP_RADIO | V4L2_CAP_TUNER,
&ivtv_v4l2_radio_fops
},
{ /* IVTV_DEC_STREAM_TYPE_MPG */
"decoder MPG",
VFL_TYPE_VIDEO, IVTV_V4L2_DEC_MPG_OFFSET,
- PCI_DMA_TODEVICE, 0,
+ DMA_TO_DEVICE, 0,
V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_AUDIO | V4L2_CAP_READWRITE,
&ivtv_v4l2_dec_fops
},
{ /* IVTV_DEC_STREAM_TYPE_VBI */
"decoder VBI",
VFL_TYPE_VBI, IVTV_V4L2_DEC_VBI_OFFSET,
- PCI_DMA_NONE, 1,
+ DMA_NONE, 1,
V4L2_CAP_SLICED_VBI_CAPTURE | V4L2_CAP_READWRITE,
&ivtv_v4l2_enc_fops
},
{ /* IVTV_DEC_STREAM_TYPE_VOUT */
"decoder VOUT",
VFL_TYPE_VBI, IVTV_V4L2_DEC_VOUT_OFFSET,
- PCI_DMA_NONE, 1,
+ DMA_NONE, 1,
V4L2_CAP_SLICED_VBI_OUTPUT | V4L2_CAP_AUDIO | V4L2_CAP_READWRITE,
&ivtv_v4l2_dec_fops
},
{ /* IVTV_DEC_STREAM_TYPE_YUV */
"decoder YUV",
VFL_TYPE_VIDEO, IVTV_V4L2_DEC_YUV_OFFSET,
- PCI_DMA_TODEVICE, 0,
+ DMA_TO_DEVICE, 0,
V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_AUDIO | V4L2_CAP_READWRITE,
&ivtv_v4l2_dec_fops
}
@@ -179,7 +179,7 @@ static void ivtv_stream_init(struct ivtv *itv, int type)
s->caps = ivtv_stream_info[type].v4l2_caps;
if (ivtv_stream_info[type].pio)
- s->dma = PCI_DMA_NONE;
+ s->dma = DMA_NONE;
else
s->dma = ivtv_stream_info[type].dma;
s->buf_size = itv->stream_buf_size[type];
@@ -217,7 +217,7 @@ static int ivtv_prep_dev(struct ivtv *itv, int type)
/* User explicitly selected 0 buffers for these streams, so don't
create them. */
- if (ivtv_stream_info[type].dma != PCI_DMA_NONE &&
+ if (ivtv_stream_info[type].dma != DMA_NONE &&
itv->options.kilobytes[type] == 0) {
IVTV_INFO("Disabled %s device\n", ivtv_stream_info[type].name);
return 0;
diff --git a/drivers/media/pci/ivtv/ivtv-udma.c b/drivers/media/pci/ivtv/ivtv-udma.c
index 0d8372cc364a..210be8290f24 100644
--- a/drivers/media/pci/ivtv/ivtv-udma.c
+++ b/drivers/media/pci/ivtv/ivtv-udma.c
@@ -81,8 +81,10 @@ void ivtv_udma_alloc(struct ivtv *itv)
{
if (itv->udma.SG_handle == 0) {
/* Map DMA Page Array Buffer */
- itv->udma.SG_handle = pci_map_single(itv->pdev, itv->udma.SGarray,
- sizeof(itv->udma.SGarray), PCI_DMA_TODEVICE);
+ itv->udma.SG_handle = dma_map_single(&itv->pdev->dev,
+ itv->udma.SGarray,
+ sizeof(itv->udma.SGarray),
+ DMA_TO_DEVICE);
ivtv_udma_sync_for_cpu(itv);
}
}
@@ -135,7 +137,8 @@ int ivtv_udma_setup(struct ivtv *itv, unsigned long ivtv_dest_addr,
}
/* Map SG List */
- dma->SG_length = pci_map_sg(itv->pdev, dma->SGlist, dma->page_count, PCI_DMA_TODEVICE);
+ dma->SG_length = dma_map_sg(&itv->pdev->dev, dma->SGlist,
+ dma->page_count, DMA_TO_DEVICE);
/* Fill SG Array with new values */
ivtv_udma_fill_sg_array (dma, ivtv_dest_addr, 0, -1);
@@ -159,7 +162,8 @@ void ivtv_udma_unmap(struct ivtv *itv)
/* Unmap Scatterlist */
if (dma->SG_length) {
- pci_unmap_sg(itv->pdev, dma->SGlist, dma->page_count, PCI_DMA_TODEVICE);
+ dma_unmap_sg(&itv->pdev->dev, dma->SGlist, dma->page_count,
+ DMA_TO_DEVICE);
dma->SG_length = 0;
}
/* sync DMA */
@@ -175,13 +179,14 @@ void ivtv_udma_free(struct ivtv *itv)
/* Unmap SG Array */
if (itv->udma.SG_handle) {
- pci_unmap_single(itv->pdev, itv->udma.SG_handle,
- sizeof(itv->udma.SGarray), PCI_DMA_TODEVICE);
+ dma_unmap_single(&itv->pdev->dev, itv->udma.SG_handle,
+ sizeof(itv->udma.SGarray), DMA_TO_DEVICE);
}
/* Unmap Scatterlist */
if (itv->udma.SG_length) {
- pci_unmap_sg(itv->pdev, itv->udma.SGlist, itv->udma.page_count, PCI_DMA_TODEVICE);
+ dma_unmap_sg(&itv->pdev->dev, itv->udma.SGlist,
+ itv->udma.page_count, DMA_TO_DEVICE);
}
for (i = 0; i < IVTV_DMA_SG_OSD_ENT; i++) {
diff --git a/drivers/media/pci/ivtv/ivtv-yuv.c b/drivers/media/pci/ivtv/ivtv-yuv.c
index 5f7dc9771f8d..e79e8a5a744a 100644
--- a/drivers/media/pci/ivtv/ivtv-yuv.c
+++ b/drivers/media/pci/ivtv/ivtv-yuv.c
@@ -113,7 +113,8 @@ static int ivtv_yuv_prep_user_dma(struct ivtv *itv, struct ivtv_user_dma *dma,
dma->page_count = 0;
return -ENOMEM;
}
- dma->SG_length = pci_map_sg(itv->pdev, dma->SGlist, dma->page_count, PCI_DMA_TODEVICE);
+ dma->SG_length = dma_map_sg(&itv->pdev->dev, dma->SGlist,
+ dma->page_count, DMA_TO_DEVICE);
/* Fill SG Array with new values */
ivtv_udma_fill_sg_array(dma, y_buffer_offset, uv_buffer_offset, y_size);
@@ -920,7 +921,9 @@ static void ivtv_yuv_init(struct ivtv *itv)
/* We need a buffer for blanking when Y plane is offset - non-fatal if we can't get one */
yi->blanking_ptr = kzalloc(720 * 16, GFP_ATOMIC|__GFP_NOWARN);
if (yi->blanking_ptr) {
- yi->blanking_dmaptr = pci_map_single(itv->pdev, yi->blanking_ptr, 720*16, PCI_DMA_TODEVICE);
+ yi->blanking_dmaptr = dma_map_single(&itv->pdev->dev,
+ yi->blanking_ptr,
+ 720 * 16, DMA_TO_DEVICE);
} else {
yi->blanking_dmaptr = 0;
IVTV_DEBUG_WARN("Failed to allocate yuv blanking buffer\n");
@@ -1264,7 +1267,8 @@ void ivtv_yuv_close(struct ivtv *itv)
if (yi->blanking_ptr) {
kfree(yi->blanking_ptr);
yi->blanking_ptr = NULL;
- pci_unmap_single(itv->pdev, yi->blanking_dmaptr, 720*16, PCI_DMA_TODEVICE);
+ dma_unmap_single(&itv->pdev->dev, yi->blanking_dmaptr,
+ 720 * 16, DMA_TO_DEVICE);
}
/* Invalidate the old dimension information */
diff --git a/drivers/media/pci/ivtv/ivtvfb.c b/drivers/media/pci/ivtv/ivtvfb.c
index e2d56dca5be4..2c43ebf83966 100644
--- a/drivers/media/pci/ivtv/ivtvfb.c
+++ b/drivers/media/pci/ivtv/ivtvfb.c
@@ -36,7 +36,7 @@
#include <linux/fb.h>
#include <linux/ivtvfb.h>
-#ifdef CONFIG_X86_64
+#if defined(CONFIG_X86_64) && !defined(CONFIG_UML)
#include <asm/memtype.h>
#endif
@@ -48,8 +48,8 @@ static bool osd_laced;
static int osd_depth;
static int osd_upper;
static int osd_left;
-static int osd_yres;
-static int osd_xres;
+static unsigned int osd_yres;
+static unsigned int osd_xres;
module_param(ivtvfb_card_id, int, 0444);
module_param_named(debug,ivtvfb_debug, int, 0644);
@@ -58,8 +58,8 @@ module_param(osd_laced, bool, 0444);
module_param(osd_depth, int, 0444);
module_param(osd_upper, int, 0444);
module_param(osd_left, int, 0444);
-module_param(osd_yres, int, 0444);
-module_param(osd_xres, int, 0444);
+module_param(osd_yres, uint, 0444);
+module_param(osd_xres, uint, 0444);
MODULE_PARM_DESC(ivtvfb_card_id,
"Only use framebuffer of the specified ivtv card (0-31)\n"
@@ -1157,7 +1157,7 @@ static int ivtvfb_init_card(struct ivtv *itv)
{
int rc;
-#ifdef CONFIG_X86_64
+#if defined(CONFIG_X86_64) && !defined(CONFIG_UML)
if (pat_enabled()) {
if (ivtvfb_force_pat) {
pr_info("PAT is enabled. Write-combined framebuffer caching will be disabled.\n");
diff --git a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
index 6f3125c2d097..8287851b5ffd 100644
--- a/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
+++ b/drivers/media/pci/netup_unidvb/netup_unidvb_core.c
@@ -258,19 +258,24 @@ static irqreturn_t netup_unidvb_isr(int irq, void *dev_id)
if ((reg40 & AVL_IRQ_ASSERTED) != 0) {
/* IRQ is being signaled */
reg_isr = readw(ndev->bmmio0 + REG_ISR);
- if (reg_isr & NETUP_UNIDVB_IRQ_I2C0) {
- iret = netup_i2c_interrupt(&ndev->i2c[0]);
- } else if (reg_isr & NETUP_UNIDVB_IRQ_I2C1) {
- iret = netup_i2c_interrupt(&ndev->i2c[1]);
- } else if (reg_isr & NETUP_UNIDVB_IRQ_SPI) {
+ if (reg_isr & NETUP_UNIDVB_IRQ_SPI)
iret = netup_spi_interrupt(ndev->spi);
- } else if (reg_isr & NETUP_UNIDVB_IRQ_DMA1) {
- iret = netup_dma_interrupt(&ndev->dma[0]);
- } else if (reg_isr & NETUP_UNIDVB_IRQ_DMA2) {
- iret = netup_dma_interrupt(&ndev->dma[1]);
- } else if (reg_isr & NETUP_UNIDVB_IRQ_CI) {
- iret = netup_ci_interrupt(ndev);
+ else if (!ndev->old_fw) {
+ if (reg_isr & NETUP_UNIDVB_IRQ_I2C0) {
+ iret = netup_i2c_interrupt(&ndev->i2c[0]);
+ } else if (reg_isr & NETUP_UNIDVB_IRQ_I2C1) {
+ iret = netup_i2c_interrupt(&ndev->i2c[1]);
+ } else if (reg_isr & NETUP_UNIDVB_IRQ_DMA1) {
+ iret = netup_dma_interrupt(&ndev->dma[0]);
+ } else if (reg_isr & NETUP_UNIDVB_IRQ_DMA2) {
+ iret = netup_dma_interrupt(&ndev->dma[1]);
+ } else if (reg_isr & NETUP_UNIDVB_IRQ_CI) {
+ iret = netup_ci_interrupt(ndev);
+ } else {
+ goto err;
+ }
} else {
+err:
dev_err(&pci_dev->dev,
"%s(): unknown interrupt 0x%x\n",
__func__, reg_isr);
@@ -841,7 +846,7 @@ static int netup_unidvb_initdev(struct pci_dev *pci_dev,
"%s(): board vendor 0x%x, revision 0x%x\n",
__func__, board_vendor, board_revision);
pci_set_master(pci_dev);
- if (pci_set_dma_mask(pci_dev, 0xffffffff) < 0) {
+ if (dma_set_mask(&pci_dev->dev, 0xffffffff) < 0) {
dev_err(&pci_dev->dev,
"%s(): 32bit PCI DMA is not supported\n", __func__);
goto pci_detect_err;
diff --git a/drivers/media/pci/pluto2/pluto2.c b/drivers/media/pci/pluto2/pluto2.c
index f1f4793a4452..6ac9b9bd7435 100644
--- a/drivers/media/pci/pluto2/pluto2.c
+++ b/drivers/media/pci/pluto2/pluto2.c
@@ -228,16 +228,16 @@ static void pluto_set_dma_addr(struct pluto *pluto)
static int pluto_dma_map(struct pluto *pluto)
{
- pluto->dma_addr = pci_map_single(pluto->pdev, pluto->dma_buf,
- TS_DMA_BYTES, PCI_DMA_FROMDEVICE);
+ pluto->dma_addr = dma_map_single(&pluto->pdev->dev, pluto->dma_buf,
+ TS_DMA_BYTES, DMA_FROM_DEVICE);
- return pci_dma_mapping_error(pluto->pdev, pluto->dma_addr);
+ return dma_mapping_error(&pluto->pdev->dev, pluto->dma_addr);
}
static void pluto_dma_unmap(struct pluto *pluto)
{
- pci_unmap_single(pluto->pdev, pluto->dma_addr,
- TS_DMA_BYTES, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&pluto->pdev->dev, pluto->dma_addr, TS_DMA_BYTES,
+ DMA_FROM_DEVICE);
}
static int pluto_start_feed(struct dvb_demux_feed *f)
@@ -276,8 +276,8 @@ static void pluto_dma_end(struct pluto *pluto, unsigned int nbpackets)
{
/* synchronize the DMA transfer with the CPU
* first so that we see updated contents. */
- pci_dma_sync_single_for_cpu(pluto->pdev, pluto->dma_addr,
- TS_DMA_BYTES, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(&pluto->pdev->dev, pluto->dma_addr,
+ TS_DMA_BYTES, DMA_FROM_DEVICE);
/* Workaround for broken hardware:
* [1] On startup NBPACKETS seems to contain an uninitialized value,
@@ -310,8 +310,8 @@ static void pluto_dma_end(struct pluto *pluto, unsigned int nbpackets)
pluto_set_dma_addr(pluto);
/* sync the buffer and give it back to the card */
- pci_dma_sync_single_for_device(pluto->pdev, pluto->dma_addr,
- TS_DMA_BYTES, PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(&pluto->pdev->dev, pluto->dma_addr,
+ TS_DMA_BYTES, DMA_FROM_DEVICE);
}
static irqreturn_t pluto_irq(int irq, void *dev_id)
@@ -595,7 +595,7 @@ static int pluto2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* enable interrupts */
pci_write_config_dword(pdev, 0x6c, 0x8000);
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (ret < 0)
goto err_pci_disable_device;
diff --git a/drivers/media/pci/pt1/pt1.c b/drivers/media/pci/pt1/pt1.c
index f2aa36814fba..121a4a92ea10 100644
--- a/drivers/media/pci/pt1/pt1.c
+++ b/drivers/media/pci/pt1/pt1.c
@@ -1340,7 +1340,7 @@ static int pt1_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret < 0)
goto err;
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (ret < 0)
goto err_pci_disable_device;
diff --git a/drivers/media/pci/saa7134/saa7134-cards.c b/drivers/media/pci/saa7134/saa7134-cards.c
index ce449c941171..0d82a4b27d5b 100644
--- a/drivers/media/pci/saa7134/saa7134-cards.c
+++ b/drivers/media/pci/saa7134/saa7134-cards.c
@@ -5765,6 +5765,33 @@ struct saa7134_board saa7134_boards[] = {
.gpio = 0x0200000,
},
},
+ [SAA7134_BOARD_LEADTEK_WINFAST_HDTV200_H] = {
+ .name = "Leadtek Winfast HDTV200 H",
+ .audio_clock = 0x00187de7,
+ .tuner_type = TUNER_PHILIPS_TDA8290,
+ .radio_type = UNSET,
+ .tuner_addr = ADDR_UNSET,
+ .radio_addr = ADDR_UNSET,
+ .mpeg = SAA7134_MPEG_DVB,
+ .ts_type = SAA7134_MPEG_TS_PARALLEL,
+ .gpiomask = 0x00200700,
+ .inputs = { {
+ .type = SAA7134_INPUT_TV,
+ .vmux = 1,
+ .amux = TV,
+ .gpio = 0x00000300,
+ }, {
+ .type = SAA7134_INPUT_COMPOSITE,
+ .vmux = 3,
+ .amux = LINE1,
+ .gpio = 0x00200300,
+ }, {
+ .type = SAA7134_INPUT_SVIDEO,
+ .vmux = 8,
+ .amux = LINE1,
+ .gpio = 0x00200300,
+ } },
+ },
};
const unsigned int saa7134_bcount = ARRAY_SIZE(saa7134_boards);
@@ -7041,6 +7068,12 @@ struct pci_device_id saa7134_pci_tbl[] = {
.subdevice = 0x13cf,
.driver_data = SAA7134_BOARD_SNAZIO_TVPVR_PRO,
}, {
+ .vendor = PCI_VENDOR_ID_PHILIPS,
+ .device = PCI_DEVICE_ID_PHILIPS_SAA7133,
+ .subvendor = 0x107d,
+ .subdevice = 0x6f2e,
+ .driver_data = SAA7134_BOARD_LEADTEK_WINFAST_HDTV200_H,
+ }, {
/* --- boards without eeprom + subsystem ID --- */
.vendor = PCI_VENDOR_ID_PHILIPS,
.device = PCI_DEVICE_ID_PHILIPS_SAA7134,
@@ -7245,6 +7278,22 @@ static int saa7134_kworld_pc150u_toggle_agc(struct saa7134_dev *dev,
return 0;
}
+static int saa7134_leadtek_hdtv200h_toggle_agc(struct saa7134_dev *dev,
+ enum tda18271_mode mode)
+{
+ switch (mode) {
+ case TDA18271_ANALOG:
+ saa7134_set_gpio(dev, 10, 0);
+ break;
+ case TDA18271_DIGITAL:
+ saa7134_set_gpio(dev, 10, 1);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int saa7134_tda8290_18271_callback(struct saa7134_dev *dev,
int command, int arg)
{
@@ -7264,6 +7313,9 @@ static int saa7134_tda8290_18271_callback(struct saa7134_dev *dev,
case SAA7134_BOARD_KWORLD_PC150U:
ret = saa7134_kworld_pc150u_toggle_agc(dev, arg);
break;
+ case SAA7134_BOARD_LEADTEK_WINFAST_HDTV200_H:
+ ret = saa7134_leadtek_hdtv200h_toggle_agc(dev, arg);
+ break;
default:
break;
}
@@ -7287,6 +7339,7 @@ static int saa7134_tda8290_callback(struct saa7134_dev *dev,
case SAA7134_BOARD_KWORLD_PCI_SBTVD_FULLSEG:
case SAA7134_BOARD_KWORLD_PC150U:
case SAA7134_BOARD_MAGICPRO_PROHDTV_PRO2:
+ case SAA7134_BOARD_LEADTEK_WINFAST_HDTV200_H:
/* tda8290 + tda18271 */
ret = saa7134_tda8290_18271_callback(dev, command, arg);
break;
diff --git a/drivers/media/pci/saa7134/saa7134-dvb.c b/drivers/media/pci/saa7134/saa7134-dvb.c
index f359cd5c006a..d17a1b15faee 100644
--- a/drivers/media/pci/saa7134/saa7134-dvb.c
+++ b/drivers/media/pci/saa7134/saa7134-dvb.c
@@ -1189,6 +1189,22 @@ static struct s5h1411_config kworld_s5h1411_config = {
S5H1411_MPEGTIMING_CONTINUOUS_NONINVERTING_CLOCK,
};
+static struct tda18271_config hdtv200h_tda18271_config = {
+ .gate = TDA18271_GATE_ANALOG,
+ .config = 3 /* Use tuner callback for AGC */
+};
+
+static struct s5h1411_config hdtv200h_s5h1411_config = {
+ .output_mode = S5H1411_PARALLEL_OUTPUT,
+ .gpio = S5H1411_GPIO_OFF,
+ .qam_if = S5H1411_IF_4000,
+ .vsb_if = S5H1411_IF_3250,
+ .inversion = S5H1411_INVERSION_ON,
+ .status_mode = S5H1411_DEMODLOCKING,
+ .mpeg_timing =
+ S5H1411_MPEGTIMING_CONTINUOUS_NONINVERTING_CLOCK,
+};
+
/* ==================================================================
* Core code
@@ -1854,6 +1870,19 @@ static int dvb_init(struct saa7134_dev *dev)
__func__);
}
break;
+ case SAA7134_BOARD_LEADTEK_WINFAST_HDTV200_H:
+ fe0->dvb.frontend = dvb_attach(s5h1411_attach,
+ &hdtv200h_s5h1411_config,
+ &dev->i2c_adap);
+ if (fe0->dvb.frontend) {
+ dvb_attach(tda829x_attach, fe0->dvb.frontend,
+ &dev->i2c_adap, 0x4b,
+ &tda829x_no_probe);
+ dvb_attach(tda18271_attach, fe0->dvb.frontend,
+ 0x60, &dev->i2c_adap,
+ &hdtv200h_tda18271_config);
+ }
+ break;
default:
pr_warn("Huh? unknown DVB card?\n");
break;
diff --git a/drivers/media/pci/saa7134/saa7134.h b/drivers/media/pci/saa7134/saa7134.h
index d29499cd7370..49fe0f6bacba 100644
--- a/drivers/media/pci/saa7134/saa7134.h
+++ b/drivers/media/pci/saa7134/saa7134.h
@@ -328,6 +328,7 @@ struct saa7134_card_ir {
#define SAA7134_BOARD_AVERMEDIA_505 194
#define SAA7134_BOARD_LEADTEK_WINFAST_TV2100_FM 195
#define SAA7134_BOARD_SNAZIO_TVPVR_PRO 196
+#define SAA7134_BOARD_LEADTEK_WINFAST_HDTV200_H 197
#define SAA7134_MAXBOARDS 32
#define SAA7134_INPUT_MAX 8
diff --git a/drivers/media/pci/saa7164/saa7164-api.c b/drivers/media/pci/saa7164/saa7164-api.c
index 4ddd0f5b50f1..5526bcc7a9bd 100644
--- a/drivers/media/pci/saa7164/saa7164-api.c
+++ b/drivers/media/pci/saa7164/saa7164-api.c
@@ -1057,8 +1057,6 @@ static int saa7164_api_dump_subdevs(struct saa7164_dev *dev, u8 *buf, int len)
dprintk(DBGLVL_API, " numformats = 0x%x\n",
vcoutputtermhdr->numformats);
- t = (struct tmComResDescrHeader *)
- ((struct tmComResDMATermDescrHeader *)(buf + idx));
next_offset = idx + (vcoutputtermhdr->len);
for (i = 0; i < vcoutputtermhdr->numformats; i++) {
t = (struct tmComResDescrHeader *)
diff --git a/drivers/media/pci/tw5864/tw5864-core.c b/drivers/media/pci/tw5864/tw5864-core.c
index 282f7dfb7aaf..23d3cae54a5d 100644
--- a/drivers/media/pci/tw5864/tw5864-core.c
+++ b/drivers/media/pci/tw5864/tw5864-core.c
@@ -262,7 +262,7 @@ static int tw5864_initdev(struct pci_dev *pci_dev,
pci_set_master(pci_dev);
- err = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32));
+ err = dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(32));
if (err) {
dev_err(&dev->pci->dev, "32 bit PCI DMA is not supported\n");
goto disable_pci;
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index 80321e03809a..cf4adc64c953 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -200,6 +200,22 @@ config VIDEO_TI_CAL_MC
endif # VIDEO_TI_CAL
+config VIDEO_RCAR_ISP
+ tristate "R-Car Image Signal Processor (ISP)"
+ depends on VIDEO_V4L2 && OF
+ depends on ARCH_RENESAS || COMPILE_TEST
+ select MEDIA_CONTROLLER
+ select VIDEO_V4L2_SUBDEV_API
+ select RESET_CONTROLLER
+ select V4L2_FWNODE
+ help
+ Support for Renesas R-Car Image Signal Processor (ISP).
+ Enable this to support the Renesas R-Car Image Signal
+ Processor (ISP).
+
+ To compile this driver as a module, choose M here: the
+ module will be called rcar-isp.
+
endif # V4L_PLATFORM_DRIVERS
menuconfig V4L_MEM2MEM_DRIVERS
@@ -314,6 +330,9 @@ config VIDEO_MEDIATEK_VCODEC
select V4L2_MEM2MEM_DEV
select VIDEO_MEDIATEK_VCODEC_VPU if VIDEO_MEDIATEK_VPU
select VIDEO_MEDIATEK_VCODEC_SCP if MTK_SCP
+ select V4L2_H264
+ select MEDIA_CONTROLLER
+ select MEDIA_CONTROLLER_REQUEST_API
help
Mediatek video codec driver provides HW capability to
encode and decode in a range of video formats on MT8173
@@ -635,6 +654,7 @@ config VIDEO_RCAR_DRIF
depends on VIDEO_V4L2
depends on ARCH_RENESAS || COMPILE_TEST
select VIDEOBUF2_VMALLOC
+ select V4L2_ASYNC
help
Say Y if you want to enable R-Car Gen3 DRIF support. DRIF is Digital
Radio Interface that interfaces with an RF front end chip. It is a
diff --git a/drivers/media/platform/Makefile b/drivers/media/platform/Makefile
index 73ce083c2fc6..a148553babfc 100644
--- a/drivers/media/platform/Makefile
+++ b/drivers/media/platform/Makefile
@@ -63,6 +63,7 @@ obj-$(CONFIG_VIDEO_AM437X_VPFE) += am437x/
obj-$(CONFIG_VIDEO_XILINX) += xilinx/
+obj-$(CONFIG_VIDEO_RCAR_ISP) += rcar-isp.o
obj-$(CONFIG_VIDEO_RCAR_VIN) += rcar-vin/
obj-$(CONFIG_VIDEO_ATMEL_ISC) += atmel/
diff --git a/drivers/media/platform/allegro-dvt/allegro-core.c b/drivers/media/platform/allegro-dvt/allegro-core.c
index 887b492e4ad1..c8156da33043 100644
--- a/drivers/media/platform/allegro-dvt/allegro-core.c
+++ b/drivers/media/platform/allegro-dvt/allegro-core.c
@@ -6,16 +6,20 @@
*/
#include <linux/bits.h>
+#include <linux/clk.h>
#include <linux/firmware.h>
#include <linux/gcd.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/log2.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mfd/syscon/xlnx-vcu.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/sizes.h>
#include <linux/slab.h>
@@ -101,6 +105,12 @@
#define BETA_OFFSET_DIV_2 -1
#define TC_OFFSET_DIV_2 -1
+/*
+ * This control allows applications to explicitly disable the encoder buffer.
+ * This value is Allegro specific.
+ */
+#define V4L2_CID_USER_ALLEGRO_ENCODER_BUFFER (V4L2_CID_USER_ALLEGRO_BASE + 0)
+
static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Debug level (0-2)");
@@ -125,6 +135,13 @@ struct allegro_mbox {
struct mutex lock;
};
+struct allegro_encoder_buffer {
+ unsigned int size;
+ unsigned int color_depth;
+ unsigned int num_cores;
+ unsigned int clk_rate;
+};
+
struct allegro_dev {
struct v4l2_device v4l2_dev;
struct video_device video_dev;
@@ -136,12 +153,19 @@ struct allegro_dev {
struct regmap *regmap;
struct regmap *sram;
+ struct regmap *settings;
+
+ struct clk *clk_core;
+ struct clk *clk_mcu;
const struct fw_info *fw_info;
struct allegro_buffer firmware;
struct allegro_buffer suballocator;
+ bool has_encoder_buffer;
+ struct allegro_encoder_buffer encoder_buffer;
struct completion init_complete;
+ bool initialized;
/* The mailbox interface */
struct allegro_mbox *mbox_command;
@@ -257,6 +281,8 @@ struct allegro_channel {
struct v4l2_ctrl *mpeg_video_cpb_size;
struct v4l2_ctrl *mpeg_video_gop_size;
+ struct v4l2_ctrl *encoder_buffer;
+
/* user_id is used to identify the channel during CREATE_CHANNEL */
/* not sure, what to set here and if this is actually required */
int user_id;
@@ -921,6 +947,52 @@ out:
kfree(msg);
}
+static int allegro_encoder_buffer_init(struct allegro_dev *dev,
+ struct allegro_encoder_buffer *buffer)
+{
+ int err;
+ struct regmap *settings = dev->settings;
+ unsigned int supports_10_bit;
+ unsigned int memory_depth;
+ unsigned int num_cores;
+ unsigned int color_depth;
+ unsigned long clk_rate;
+
+ /* We don't support the encoder buffer pre Firmware version 2019.2 */
+ if (dev->fw_info->mailbox_version < MCU_MSG_VERSION_2019_2)
+ return -ENODEV;
+
+ if (!settings)
+ return -EINVAL;
+
+ err = regmap_read(settings, VCU_ENC_COLOR_DEPTH, &supports_10_bit);
+ if (err < 0)
+ return err;
+ err = regmap_read(settings, VCU_MEMORY_DEPTH, &memory_depth);
+ if (err < 0)
+ return err;
+ err = regmap_read(settings, VCU_NUM_CORE, &num_cores);
+ if (err < 0)
+ return err;
+
+ clk_rate = clk_get_rate(dev->clk_core);
+ if (clk_rate == 0)
+ return -EINVAL;
+
+ color_depth = supports_10_bit ? 10 : 8;
+ /* The firmware expects the encoder buffer size in bits. */
+ buffer->size = color_depth * 32 * memory_depth;
+ buffer->color_depth = color_depth;
+ buffer->num_cores = num_cores;
+ buffer->clk_rate = clk_rate;
+
+ v4l2_dbg(1, debug, &dev->v4l2_dev,
+ "using %d bits encoder buffer with %d-bit color depth\n",
+ buffer->size, color_depth);
+
+ return 0;
+}
+
static void allegro_mcu_send_init(struct allegro_dev *dev,
dma_addr_t suballoc_dma, size_t suballoc_size)
{
@@ -934,10 +1006,17 @@ static void allegro_mcu_send_init(struct allegro_dev *dev,
msg.suballoc_dma = to_mcu_addr(dev, suballoc_dma);
msg.suballoc_size = to_mcu_size(dev, suballoc_size);
- /* disable L2 cache */
- msg.l2_cache[0] = -1;
- msg.l2_cache[1] = -1;
- msg.l2_cache[2] = -1;
+ if (dev->has_encoder_buffer) {
+ msg.encoder_buffer_size = dev->encoder_buffer.size;
+ msg.encoder_buffer_color_depth = dev->encoder_buffer.color_depth;
+ msg.num_cores = dev->encoder_buffer.num_cores;
+ msg.clk_rate = dev->encoder_buffer.clk_rate;
+ } else {
+ msg.encoder_buffer_size = -1;
+ msg.encoder_buffer_color_depth = -1;
+ msg.num_cores = -1;
+ msg.clk_rate = -1;
+ }
allegro_mbox_send(dev->mbox_command, &msg);
}
@@ -1184,9 +1263,8 @@ static int fill_create_channel_param(struct allegro_channel *channel,
param->max_transfo_depth_intra = channel->max_transfo_depth_intra;
param->max_transfo_depth_inter = channel->max_transfo_depth_inter;
- param->prefetch_auto = 0;
- param->prefetch_mem_offset = 0;
- param->prefetch_mem_size = 0;
+ param->encoder_buffer_enabled = v4l2_ctrl_g_ctrl(channel->encoder_buffer);
+ param->encoder_buffer_offset = 0;
param->rate_control_mode = channel->frame_rc_enable ?
v4l2_bitrate_mode_to_mcu_mode(bitrate_mode) : 0;
@@ -1311,6 +1389,7 @@ static int allegro_mcu_send_encode_frame(struct allegro_dev *dev,
u64 src_handle)
{
struct mcu_msg_encode_frame msg;
+ bool use_encoder_buffer = v4l2_ctrl_g_ctrl(channel->encoder_buffer);
memset(&msg, 0, sizeof(msg));
@@ -1319,6 +1398,8 @@ static int allegro_mcu_send_encode_frame(struct allegro_dev *dev,
msg.channel_id = channel->mcu_channel_id;
msg.encoding_options = AL_OPT_FORCE_LOAD;
+ if (use_encoder_buffer)
+ msg.encoding_options |= AL_OPT_USE_L2;
msg.pps_qp = 26; /* qp are relative to 26 */
msg.user_param = 0; /* copied to mcu_msg_encode_frame_response */
/* src_handle is copied to mcu_msg_encode_frame_response */
@@ -1326,8 +1407,6 @@ static int allegro_mcu_send_encode_frame(struct allegro_dev *dev,
msg.src_y = to_codec_addr(dev, src_y);
msg.src_uv = to_codec_addr(dev, src_uv);
msg.stride = channel->stride;
- msg.ep2 = 0x0;
- msg.ep2_v = to_mcu_addr(dev, msg.ep2);
allegro_mbox_send(dev->mbox_command, &msg);
@@ -1509,14 +1588,14 @@ static ssize_t allegro_h264_write_sps(struct allegro_channel *channel,
profile = v4l2_ctrl_g_ctrl(channel->mpeg_video_h264_profile);
level = v4l2_ctrl_g_ctrl(channel->mpeg_video_h264_level);
- sps->profile_idc = nal_h264_profile_from_v4l2(profile);
+ sps->profile_idc = nal_h264_profile(profile);
sps->constraint_set0_flag = 0;
sps->constraint_set1_flag = 1;
sps->constraint_set2_flag = 0;
sps->constraint_set3_flag = 0;
sps->constraint_set4_flag = 0;
sps->constraint_set5_flag = 0;
- sps->level_idc = nal_h264_level_from_v4l2(level);
+ sps->level_idc = nal_h264_level(level);
sps->seq_parameter_set_id = 0;
sps->log2_max_frame_num_minus4 = LOG2_MAX_FRAME_NUM - 4;
sps->pic_order_cnt_type = 0;
@@ -1541,13 +1620,17 @@ static ssize_t allegro_h264_write_sps(struct allegro_channel *channel,
sps->vui_parameters_present_flag = 1;
sps->vui.aspect_ratio_info_present_flag = 0;
sps->vui.overscan_info_present_flag = 0;
+
sps->vui.video_signal_type_present_flag = 1;
- sps->vui.video_format = 1;
- sps->vui.video_full_range_flag = 0;
+ sps->vui.video_format = 5; /* unspecified */
+ sps->vui.video_full_range_flag = nal_h264_full_range(channel->quantization);
sps->vui.colour_description_present_flag = 1;
- sps->vui.colour_primaries = 5;
- sps->vui.transfer_characteristics = 5;
- sps->vui.matrix_coefficients = 5;
+ sps->vui.colour_primaries = nal_h264_color_primaries(channel->colorspace);
+ sps->vui.transfer_characteristics =
+ nal_h264_transfer_characteristics(channel->colorspace, channel->xfer_func);
+ sps->vui.matrix_coefficients =
+ nal_h264_matrix_coeffs(channel->colorspace, channel->ycbcr_enc);
+
sps->vui.chroma_loc_info_present_flag = 1;
sps->vui.chroma_sample_loc_type_top_field = 0;
sps->vui.chroma_sample_loc_type_bottom_field = 0;
@@ -1560,8 +1643,9 @@ static ssize_t allegro_h264_write_sps(struct allegro_channel *channel,
sps->vui.nal_hrd_parameters_present_flag = 0;
sps->vui.vcl_hrd_parameters_present_flag = 1;
sps->vui.vcl_hrd_parameters.cpb_cnt_minus1 = 0;
- sps->vui.vcl_hrd_parameters.bit_rate_scale = 0;
/* See Rec. ITU-T H.264 (04/2017) p. 410 E-53 */
+ sps->vui.vcl_hrd_parameters.bit_rate_scale =
+ ffs(channel->bitrate_peak) - 6;
sps->vui.vcl_hrd_parameters.bit_rate_value_minus1[0] =
channel->bitrate_peak / (1 << (6 + sps->vui.vcl_hrd_parameters.bit_rate_scale)) - 1;
/* See Rec. ITU-T H.264 (04/2017) p. 410 E-54 */
@@ -1654,12 +1738,12 @@ static ssize_t allegro_hevc_write_vps(struct allegro_channel *channel,
vps->temporal_id_nesting_flag = 1;
ptl = &vps->profile_tier_level;
- ptl->general_profile_idc = nal_hevc_profile_from_v4l2(profile);
+ ptl->general_profile_idc = nal_hevc_profile(profile);
ptl->general_profile_compatibility_flag[ptl->general_profile_idc] = 1;
- ptl->general_tier_flag = nal_hevc_tier_from_v4l2(tier);
+ ptl->general_tier_flag = nal_hevc_tier(tier);
ptl->general_progressive_source_flag = 1;
ptl->general_frame_only_constraint_flag = 1;
- ptl->general_level_idc = nal_hevc_level_from_v4l2(level);
+ ptl->general_level_idc = nal_hevc_level(level);
vps->sub_layer_ordering_info_present_flag = 0;
vps->max_dec_pic_buffering_minus1[0] = num_ref_frames;
@@ -1678,7 +1762,10 @@ static ssize_t allegro_hevc_write_sps(struct allegro_channel *channel,
struct allegro_dev *dev = channel->dev;
struct nal_hevc_sps *sps;
struct nal_hevc_profile_tier_level *ptl;
+ struct nal_hevc_vui_parameters *vui;
+ struct nal_hevc_hrd_parameters *hrd;
ssize_t size;
+ unsigned int cpb_size;
unsigned int num_ref_frames = channel->num_ref_idx_l0;
s32 profile = v4l2_ctrl_g_ctrl(channel->mpeg_video_hevc_profile);
s32 level = v4l2_ctrl_g_ctrl(channel->mpeg_video_hevc_level);
@@ -1691,12 +1778,12 @@ static ssize_t allegro_hevc_write_sps(struct allegro_channel *channel,
sps->temporal_id_nesting_flag = 1;
ptl = &sps->profile_tier_level;
- ptl->general_profile_idc = nal_hevc_profile_from_v4l2(profile);
+ ptl->general_profile_idc = nal_hevc_profile(profile);
ptl->general_profile_compatibility_flag[ptl->general_profile_idc] = 1;
- ptl->general_tier_flag = nal_hevc_tier_from_v4l2(tier);
+ ptl->general_tier_flag = nal_hevc_tier(tier);
ptl->general_progressive_source_flag = 1;
ptl->general_frame_only_constraint_flag = 1;
- ptl->general_level_idc = nal_hevc_level_from_v4l2(level);
+ ptl->general_level_idc = nal_hevc_level(level);
sps->seq_parameter_set_id = 0;
sps->chroma_format_idc = 1; /* Only 4:2:0 sampling supported */
@@ -1731,6 +1818,50 @@ static ssize_t allegro_hevc_write_sps(struct allegro_channel *channel,
sps->sps_temporal_mvp_enabled_flag = channel->temporal_mvp_enable;
sps->strong_intra_smoothing_enabled_flag = channel->max_cu_size > 4;
+ sps->vui_parameters_present_flag = 1;
+ vui = &sps->vui;
+
+ vui->video_signal_type_present_flag = 1;
+ vui->video_format = 5; /* unspecified */
+ vui->video_full_range_flag = nal_hevc_full_range(channel->quantization);
+ vui->colour_description_present_flag = 1;
+ vui->colour_primaries = nal_hevc_color_primaries(channel->colorspace);
+ vui->transfer_characteristics = nal_hevc_transfer_characteristics(channel->colorspace,
+ channel->xfer_func);
+ vui->matrix_coeffs = nal_hevc_matrix_coeffs(channel->colorspace, channel->ycbcr_enc);
+
+ vui->chroma_loc_info_present_flag = 1;
+ vui->chroma_sample_loc_type_top_field = 0;
+ vui->chroma_sample_loc_type_bottom_field = 0;
+
+ vui->vui_timing_info_present_flag = 1;
+ vui->vui_num_units_in_tick = channel->framerate.denominator;
+ vui->vui_time_scale = channel->framerate.numerator;
+
+ vui->bitstream_restriction_flag = 1;
+ vui->motion_vectors_over_pic_boundaries_flag = 1;
+ vui->restricted_ref_pic_lists_flag = 1;
+ vui->log2_max_mv_length_horizontal = 15;
+ vui->log2_max_mv_length_vertical = 15;
+
+ vui->vui_hrd_parameters_present_flag = 1;
+ hrd = &vui->nal_hrd_parameters;
+ hrd->vcl_hrd_parameters_present_flag = 1;
+
+ hrd->initial_cpb_removal_delay_length_minus1 = 31;
+ hrd->au_cpb_removal_delay_length_minus1 = 30;
+ hrd->dpb_output_delay_length_minus1 = 30;
+
+ hrd->bit_rate_scale = ffs(channel->bitrate_peak) - 6;
+ hrd->vcl_hrd[0].bit_rate_value_minus1[0] =
+ (channel->bitrate_peak >> (6 + hrd->bit_rate_scale)) - 1;
+
+ cpb_size = v4l2_ctrl_g_ctrl(channel->mpeg_video_cpb_size) * 1000;
+ hrd->cpb_size_scale = ffs(cpb_size) - 4;
+ hrd->vcl_hrd[0].cpb_size_value_minus1[0] = (cpb_size >> (4 + hrd->cpb_size_scale)) - 1;
+
+ hrd->vcl_hrd[0].cbr_flag[0] = !v4l2_ctrl_g_ctrl(channel->mpeg_video_frame_rc_enable);
+
size = nal_hevc_write_sps(&dev->plat_dev->dev, dest, n, sps);
kfree(sps);
@@ -2185,6 +2316,15 @@ static irqreturn_t allegro_irq_thread(int irq, void *data)
{
struct allegro_dev *dev = data;
+ /*
+ * The firmware is initialized after the mailbox is setup. We further
+ * check the AL5_ITC_CPU_IRQ_STA register, if the firmware actually
+ * triggered the interrupt. Although this should not happen, make sure
+ * that we ignore interrupts, if the mailbox is not initialized.
+ */
+ if (!dev->mbox_status)
+ return IRQ_NONE;
+
allegro_mbox_notify(dev->mbox_status);
return IRQ_HANDLED;
@@ -2384,6 +2524,8 @@ static void allegro_destroy_channel(struct allegro_channel *channel)
v4l2_ctrl_grab(channel->mpeg_video_cpb_size, false);
v4l2_ctrl_grab(channel->mpeg_video_gop_size, false);
+ v4l2_ctrl_grab(channel->encoder_buffer, false);
+
if (channel->user_id != -1) {
clear_bit(channel->user_id, &dev->channel_user_ids);
channel->user_id = -1;
@@ -2450,6 +2592,8 @@ static int allegro_create_channel(struct allegro_channel *channel)
v4l2_ctrl_grab(channel->mpeg_video_cpb_size, true);
v4l2_ctrl_grab(channel->mpeg_video_gop_size, true);
+ v4l2_ctrl_grab(channel->encoder_buffer, true);
+
reinit_completion(&channel->completion);
allegro_mcu_send_create_channel(dev, channel);
timeout = wait_for_completion_timeout(&channel->completion,
@@ -2833,6 +2977,10 @@ static int allegro_try_ctrl(struct v4l2_ctrl *ctrl)
case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
allegro_clamp_bitrate(channel, ctrl);
break;
+ case V4L2_CID_USER_ALLEGRO_ENCODER_BUFFER:
+ if (!channel->dev->has_encoder_buffer)
+ ctrl->val = 0;
+ break;
}
return 0;
@@ -2873,6 +3021,16 @@ static const struct v4l2_ctrl_ops allegro_ctrl_ops = {
.s_ctrl = allegro_s_ctrl,
};
+static const struct v4l2_ctrl_config allegro_encoder_buffer_ctrl_config = {
+ .id = V4L2_CID_USER_ALLEGRO_ENCODER_BUFFER,
+ .name = "Encoder Buffer Enable",
+ .type = V4L2_CTRL_TYPE_BOOLEAN,
+ .min = 0,
+ .max = 1,
+ .step = 1,
+ .def = 1,
+};
+
static int allegro_open(struct file *file)
{
struct video_device *vdev = video_devdata(file);
@@ -3024,6 +3182,8 @@ static int allegro_open(struct file *file)
V4L2_CID_MPEG_VIDEO_GOP_SIZE,
0, ALLEGRO_GOP_SIZE_MAX,
1, ALLEGRO_GOP_SIZE_DEFAULT);
+ channel->encoder_buffer = v4l2_ctrl_new_custom(handler,
+ &allegro_encoder_buffer_ctrl_config, NULL);
v4l2_ctrl_new_std(handler,
&allegro_ctrl_ops,
V4L2_CID_MIN_BUFFERS_FOR_OUTPUT,
@@ -3504,6 +3664,11 @@ static int allegro_mcu_hw_init(struct allegro_dev *dev,
return -EIO;
}
+ err = allegro_encoder_buffer_init(dev, &dev->encoder_buffer);
+ dev->has_encoder_buffer = (err == 0);
+ if (!dev->has_encoder_buffer)
+ v4l2_info(&dev->v4l2_dev, "encoder buffer not available\n");
+
allegro_mcu_enable_interrupts(dev);
/* The mcu sends INIT after reset. */
@@ -3591,11 +3756,16 @@ static void allegro_fw_callback(const struct firmware *fw, void *context)
v4l2_info(&dev->v4l2_dev,
"using mcu firmware version '%s'\n", dev->fw_info->version);
+ pm_runtime_enable(&dev->plat_dev->dev);
+ err = pm_runtime_resume_and_get(&dev->plat_dev->dev);
+ if (err)
+ goto err_release_firmware_codec;
+
/* Ensure that the mcu is sleeping at the reset vector */
err = allegro_mcu_reset(dev);
if (err) {
v4l2_err(&dev->v4l2_dev, "failed to reset mcu\n");
- goto err_release_firmware_codec;
+ goto err_suspend;
}
allegro_copy_firmware(dev, fw->data, fw->size);
@@ -3623,6 +3793,8 @@ static void allegro_fw_callback(const struct firmware *fw, void *context)
"allegro codec registered as /dev/video%d\n",
dev->video_dev.num);
+ dev->initialized = true;
+
release_firmware(fw_codec);
release_firmware(fw);
@@ -3635,6 +3807,9 @@ err_mcu_hw_deinit:
allegro_mcu_hw_deinit(dev);
err_free_fw_codec:
allegro_free_fw_codec(dev);
+err_suspend:
+ pm_runtime_put(&dev->plat_dev->dev);
+ pm_runtime_disable(&dev->plat_dev->dev);
err_release_firmware_codec:
release_firmware(fw_codec);
err_release_firmware:
@@ -3669,6 +3844,8 @@ static int allegro_probe(struct platform_device *pdev)
mutex_init(&dev->lock);
+ dev->initialized = false;
+
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
if (!res) {
dev_err(&pdev->dev,
@@ -3707,6 +3884,18 @@ static int allegro_probe(struct platform_device *pdev)
return PTR_ERR(dev->sram);
}
+ dev->settings = syscon_regmap_lookup_by_compatible("xlnx,vcu-settings");
+ if (IS_ERR(dev->settings))
+ dev_warn(&pdev->dev, "failed to open settings\n");
+
+ dev->clk_core = devm_clk_get(&pdev->dev, "core_clk");
+ if (IS_ERR(dev->clk_core))
+ return PTR_ERR(dev->clk_core);
+
+ dev->clk_mcu = devm_clk_get(&pdev->dev, "mcu_clk");
+ if (IS_ERR(dev->clk_mcu))
+ return PTR_ERR(dev->clk_mcu);
+
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
@@ -3739,17 +3928,75 @@ static int allegro_remove(struct platform_device *pdev)
{
struct allegro_dev *dev = platform_get_drvdata(pdev);
- video_unregister_device(&dev->video_dev);
- if (dev->m2m_dev)
- v4l2_m2m_release(dev->m2m_dev);
- allegro_mcu_hw_deinit(dev);
- allegro_free_fw_codec(dev);
+ if (dev->initialized) {
+ video_unregister_device(&dev->video_dev);
+ if (dev->m2m_dev)
+ v4l2_m2m_release(dev->m2m_dev);
+ allegro_mcu_hw_deinit(dev);
+ allegro_free_fw_codec(dev);
+ }
+
+ pm_runtime_put(&dev->plat_dev->dev);
+ pm_runtime_disable(&dev->plat_dev->dev);
v4l2_device_unregister(&dev->v4l2_dev);
return 0;
}
+static int allegro_runtime_resume(struct device *device)
+{
+ struct allegro_dev *dev = dev_get_drvdata(device);
+ struct regmap *settings = dev->settings;
+ unsigned int clk_mcu;
+ unsigned int clk_core;
+ int err;
+
+ if (!settings)
+ return -EINVAL;
+
+#define MHZ_TO_HZ(freq) ((freq) * 1000 * 1000)
+
+ err = regmap_read(settings, VCU_CORE_CLK, &clk_core);
+ if (err < 0)
+ return err;
+ err = clk_set_rate(dev->clk_core, MHZ_TO_HZ(clk_core));
+ if (err < 0)
+ return err;
+ err = clk_prepare_enable(dev->clk_core);
+ if (err)
+ return err;
+
+ err = regmap_read(settings, VCU_MCU_CLK, &clk_mcu);
+ if (err < 0)
+ goto disable_clk_core;
+ err = clk_set_rate(dev->clk_mcu, MHZ_TO_HZ(clk_mcu));
+ if (err < 0)
+ goto disable_clk_core;
+ err = clk_prepare_enable(dev->clk_mcu);
+ if (err)
+ goto disable_clk_core;
+
+#undef MHZ_TO_HZ
+
+ return 0;
+
+disable_clk_core:
+ clk_disable_unprepare(dev->clk_core);
+
+ return err;
+}
+
+static int allegro_runtime_suspend(struct device *device)
+{
+ struct allegro_dev *dev = dev_get_drvdata(device);
+
+ clk_disable_unprepare(dev->clk_mcu);
+ clk_disable_unprepare(dev->clk_core);
+
+ return 0;
+}
+
static const struct of_device_id allegro_dt_ids[] = {
{ .compatible = "allegro,al5e-1.1" },
{ /* sentinel */ }
@@ -3757,12 +4004,18 @@ static const struct of_device_id allegro_dt_ids[] = {
MODULE_DEVICE_TABLE(of, allegro_dt_ids);
+static const struct dev_pm_ops allegro_pm_ops = {
+ .runtime_resume = allegro_runtime_resume,
+ .runtime_suspend = allegro_runtime_suspend,
+};
+
static struct platform_driver allegro_driver = {
.probe = allegro_probe,
.remove = allegro_remove,
.driver = {
.name = "allegro",
.of_match_table = of_match_ptr(allegro_dt_ids),
+ .pm = &allegro_pm_ops,
},
};
diff --git a/drivers/media/platform/allegro-dvt/allegro-mail.c b/drivers/media/platform/allegro-dvt/allegro-mail.c
index 7e08c5050f2e..16effad10746 100644
--- a/drivers/media/platform/allegro-dvt/allegro-mail.c
+++ b/drivers/media/platform/allegro-dvt/allegro-mail.c
@@ -49,11 +49,11 @@ allegro_enc_init(u32 *dst, struct mcu_msg_init_request *msg)
dst[i++] = msg->reserved0;
dst[i++] = msg->suballoc_dma;
dst[i++] = msg->suballoc_size;
- dst[i++] = msg->l2_cache[0];
- dst[i++] = msg->l2_cache[1];
- dst[i++] = msg->l2_cache[2];
+ dst[i++] = msg->encoder_buffer_size;
+ dst[i++] = msg->encoder_buffer_color_depth;
+ dst[i++] = msg->num_cores;
if (version >= MCU_MSG_VERSION_2019_2) {
- dst[i++] = -1;
+ dst[i++] = msg->clk_rate;
dst[i++] = 0;
}
@@ -146,13 +146,10 @@ allegro_encode_config_blob(u32 *dst, struct create_channel_param *param)
FIELD_PREP(GENMASK(7, 0), param->tc_offset);
dst[i++] = param->unknown11;
dst[i++] = param->unknown12;
- if (version >= MCU_MSG_VERSION_2019_2)
- dst[i++] = param->num_slices;
- else
- dst[i++] = FIELD_PREP(GENMASK(31, 16), param->prefetch_auto) |
- FIELD_PREP(GENMASK(15, 0), param->num_slices);
- dst[i++] = param->prefetch_mem_offset;
- dst[i++] = param->prefetch_mem_size;
+ dst[i++] = param->num_slices;
+ dst[i++] = param->encoder_buffer_offset;
+ dst[i++] = param->encoder_buffer_enabled;
+
dst[i++] = FIELD_PREP(GENMASK(31, 16), param->clip_vrt_range) |
FIELD_PREP(GENMASK(15, 0), param->clip_hrz_range);
dst[i++] = FIELD_PREP(GENMASK(31, 16), param->me_range[1]) |
@@ -429,8 +426,8 @@ allegro_dec_encode_frame(struct mcu_msg_encode_frame_response *msg, u32 *src)
msg->frame_tag_size = src[i++];
msg->stuffing = src[i++];
msg->filler = src[i++];
- msg->num_column = FIELD_GET(GENMASK(31, 16), src[i]);
- msg->num_row = FIELD_GET(GENMASK(15, 0), src[i++]);
+ msg->num_row = FIELD_GET(GENMASK(31, 16), src[i]);
+ msg->num_column = FIELD_GET(GENMASK(15, 0), src[i++]);
msg->num_ref_idx_l1 = FIELD_GET(GENMASK(31, 24), src[i]);
msg->num_ref_idx_l0 = FIELD_GET(GENMASK(23, 16), src[i]);
msg->qp = FIELD_GET(GENMASK(15, 0), src[i++]);
diff --git a/drivers/media/platform/allegro-dvt/allegro-mail.h b/drivers/media/platform/allegro-dvt/allegro-mail.h
index 2c7bc509eac3..a5686058d754 100644
--- a/drivers/media/platform/allegro-dvt/allegro-mail.h
+++ b/drivers/media/platform/allegro-dvt/allegro-mail.h
@@ -37,7 +37,10 @@ struct mcu_msg_init_request {
u32 reserved0; /* maybe a unused channel id */
u32 suballoc_dma;
u32 suballoc_size;
- s32 l2_cache[3];
+ s32 encoder_buffer_size;
+ s32 encoder_buffer_color_depth;
+ s32 num_cores;
+ s32 clk_rate;
};
struct mcu_msg_init_response {
@@ -79,9 +82,8 @@ struct create_channel_param {
u32 unknown11;
u32 unknown12;
u16 num_slices;
- u16 prefetch_auto;
- u32 prefetch_mem_offset;
- u32 prefetch_mem_size;
+ u32 encoder_buffer_offset;
+ u32 encoder_buffer_enabled;
u16 clip_hrz_range;
u16 clip_vrt_range;
u16 me_range[4];
diff --git a/drivers/media/platform/allegro-dvt/nal-h264.c b/drivers/media/platform/allegro-dvt/nal-h264.c
index 0ab2fcbee1b9..32663766340f 100644
--- a/drivers/media/platform/allegro-dvt/nal-h264.c
+++ b/drivers/media/platform/allegro-dvt/nal-h264.c
@@ -34,80 +34,6 @@ enum nal_unit_type {
FILLER_DATA = 12,
};
-/**
- * nal_h264_profile_from_v4l2() - Get profile_idc for v4l2 h264 profile
- * @profile: the profile as &enum v4l2_mpeg_video_h264_profile
- *
- * Convert the &enum v4l2_mpeg_video_h264_profile to profile_idc as specified
- * in Rec. ITU-T H.264 (04/2017) A.2.
- *
- * Return: the profile_idc for the passed level
- */
-int nal_h264_profile_from_v4l2(enum v4l2_mpeg_video_h264_profile profile)
-{
- switch (profile) {
- case V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE:
- return 66;
- case V4L2_MPEG_VIDEO_H264_PROFILE_MAIN:
- return 77;
- case V4L2_MPEG_VIDEO_H264_PROFILE_EXTENDED:
- return 88;
- case V4L2_MPEG_VIDEO_H264_PROFILE_HIGH:
- return 100;
- default:
- return -EINVAL;
- }
-}
-
-/**
- * nal_h264_level_from_v4l2() - Get level_idc for v4l2 h264 level
- * @level: the level as &enum v4l2_mpeg_video_h264_level
- *
- * Convert the &enum v4l2_mpeg_video_h264_level to level_idc as specified in
- * Rec. ITU-T H.264 (04/2017) A.3.2.
- *
- * Return: the level_idc for the passed level
- */
-int nal_h264_level_from_v4l2(enum v4l2_mpeg_video_h264_level level)
-{
- switch (level) {
- case V4L2_MPEG_VIDEO_H264_LEVEL_1_0:
- return 10;
- case V4L2_MPEG_VIDEO_H264_LEVEL_1B:
- return 9;
- case V4L2_MPEG_VIDEO_H264_LEVEL_1_1:
- return 11;
- case V4L2_MPEG_VIDEO_H264_LEVEL_1_2:
- return 12;
- case V4L2_MPEG_VIDEO_H264_LEVEL_1_3:
- return 13;
- case V4L2_MPEG_VIDEO_H264_LEVEL_2_0:
- return 20;
- case V4L2_MPEG_VIDEO_H264_LEVEL_2_1:
- return 21;
- case V4L2_MPEG_VIDEO_H264_LEVEL_2_2:
- return 22;
- case V4L2_MPEG_VIDEO_H264_LEVEL_3_0:
- return 30;
- case V4L2_MPEG_VIDEO_H264_LEVEL_3_1:
- return 31;
- case V4L2_MPEG_VIDEO_H264_LEVEL_3_2:
- return 32;
- case V4L2_MPEG_VIDEO_H264_LEVEL_4_0:
- return 40;
- case V4L2_MPEG_VIDEO_H264_LEVEL_4_1:
- return 41;
- case V4L2_MPEG_VIDEO_H264_LEVEL_4_2:
- return 42;
- case V4L2_MPEG_VIDEO_H264_LEVEL_5_0:
- return 50;
- case V4L2_MPEG_VIDEO_H264_LEVEL_5_1:
- return 51;
- default:
- return -EINVAL;
- }
-}
-
static void nal_h264_write_start_code_prefix(struct rbsp *rbsp)
{
u8 *p = rbsp->data + DIV_ROUND_UP(rbsp->pos, 8);
diff --git a/drivers/media/platform/allegro-dvt/nal-h264.h b/drivers/media/platform/allegro-dvt/nal-h264.h
index a19634fe8c0b..34db07cda652 100644
--- a/drivers/media/platform/allegro-dvt/nal-h264.h
+++ b/drivers/media/platform/allegro-dvt/nal-h264.h
@@ -8,8 +8,11 @@
#ifndef __NAL_H264_H__
#define __NAL_H264_H__
+#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/types.h>
+#include <linux/v4l2-controls.h>
+#include <linux/videodev2.h>
/*
* struct nal_h264_hrd_parameters - HRD parameters
@@ -187,8 +190,201 @@ struct nal_h264_pps {
};
};
-int nal_h264_profile_from_v4l2(enum v4l2_mpeg_video_h264_profile profile);
-int nal_h264_level_from_v4l2(enum v4l2_mpeg_video_h264_level level);
+/**
+ * nal_h264_profile() - Get profile_idc for v4l2 h264 profile
+ * @profile: the profile as &enum v4l2_mpeg_video_h264_profile
+ *
+ * Convert the &enum v4l2_mpeg_video_h264_profile to profile_idc as specified
+ * in Rec. ITU-T H.264 (04/2017) A.2.
+ *
+ * Return: the profile_idc for the passed level
+ */
+static inline int nal_h264_profile(enum v4l2_mpeg_video_h264_profile profile)
+{
+ switch (profile) {
+ case V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE:
+ return 66;
+ case V4L2_MPEG_VIDEO_H264_PROFILE_MAIN:
+ return 77;
+ case V4L2_MPEG_VIDEO_H264_PROFILE_EXTENDED:
+ return 88;
+ case V4L2_MPEG_VIDEO_H264_PROFILE_HIGH:
+ return 100;
+ default:
+ return -EINVAL;
+ }
+}
+
+/**
+ * nal_h264_level() - Get level_idc for v4l2 h264 level
+ * @level: the level as &enum v4l2_mpeg_video_h264_level
+ *
+ * Convert the &enum v4l2_mpeg_video_h264_level to level_idc as specified in
+ * Rec. ITU-T H.264 (04/2017) A.3.2.
+ *
+ * Return: the level_idc for the passed level
+ */
+static inline int nal_h264_level(enum v4l2_mpeg_video_h264_level level)
+{
+ switch (level) {
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_0:
+ return 10;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1B:
+ return 9;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_1:
+ return 11;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_2:
+ return 12;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_1_3:
+ return 13;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_2_0:
+ return 20;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_2_1:
+ return 21;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_2_2:
+ return 22;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_3_0:
+ return 30;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_3_1:
+ return 31;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_3_2:
+ return 32;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_4_0:
+ return 40;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_4_1:
+ return 41;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_4_2:
+ return 42;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_5_0:
+ return 50;
+ case V4L2_MPEG_VIDEO_H264_LEVEL_5_1:
+ return 51;
+ default:
+ return -EINVAL;
+ }
+}
+
+/**
+ * nal_h264_full_range() - Get video_full_range_flag for v4l2 quantization
+ * @quantization: the quantization type as &enum v4l2_quantization
+ *
+ * Convert the &enum v4l2_quantization to video_full_range_flag as specified in
+ * Rec. ITU-T H.264 (04/2017) E.2.1.
+ *
+ * Return: the video_full_range_flag value for the passed quantization
+ */
+static inline int nal_h264_full_range(enum v4l2_quantization quantization)
+{
+ switch (quantization) {
+ case V4L2_QUANTIZATION_FULL_RANGE:
+ return 1;
+ case V4L2_QUANTIZATION_LIM_RANGE:
+ return 0;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * nal_h264_color_primaries() - Get color_primaries for v4l2 colorspace
+ * @colorspace: the color space as &enum v4l2_colorspace
+ *
+ * Convert the &enum v4l2_colorspace to color_primaries as specified in
+ * Rec. ITU-T H.264 (04/2017) E.2.1.
+ *
+ * Return: the color_primaries value for the passed colorspace
+ */
+static inline int nal_h264_color_primaries(enum v4l2_colorspace colorspace)
+{
+ switch (colorspace) {
+ case V4L2_COLORSPACE_SMPTE170M:
+ return 6;
+ case V4L2_COLORSPACE_SMPTE240M:
+ return 7;
+ case V4L2_COLORSPACE_REC709:
+ return 1;
+ case V4L2_COLORSPACE_470_SYSTEM_M:
+ return 4;
+ case V4L2_COLORSPACE_JPEG:
+ case V4L2_COLORSPACE_SRGB:
+ case V4L2_COLORSPACE_470_SYSTEM_BG:
+ return 5;
+ case V4L2_COLORSPACE_BT2020:
+ return 9;
+ case V4L2_COLORSPACE_DEFAULT:
+ case V4L2_COLORSPACE_OPRGB:
+ case V4L2_COLORSPACE_RAW:
+ case V4L2_COLORSPACE_DCI_P3:
+ default:
+ return 2;
+ }
+}
+
+/**
+ * nal_h264_transfer_characteristics() - Get transfer_characteristics for v4l2 xfer_func
+ * @colorspace: the color space as &enum v4l2_colorspace
+ * @xfer_func: the transfer function as &enum v4l2_xfer_func
+ *
+ * Convert the &enum v4l2_xfer_func to transfer_characteristics as specified in
+ * Rec. ITU-T H.264 (04/2017) E.2.1.
+ *
+ * Return: the transfer_characteristics value for the passed transfer function
+ */
+static inline int nal_h264_transfer_characteristics(enum v4l2_colorspace colorspace,
+ enum v4l2_xfer_func xfer_func)
+{
+ if (xfer_func == V4L2_XFER_FUNC_DEFAULT)
+ xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(colorspace);
+
+ switch (xfer_func) {
+ case V4L2_XFER_FUNC_709:
+ return 6;
+ case V4L2_XFER_FUNC_SMPTE2084:
+ return 16;
+ case V4L2_XFER_FUNC_SRGB:
+ case V4L2_XFER_FUNC_OPRGB:
+ case V4L2_XFER_FUNC_NONE:
+ case V4L2_XFER_FUNC_DCI_P3:
+ case V4L2_XFER_FUNC_SMPTE240M:
+ default:
+ return 2;
+ }
+}
+
+/**
+ * nal_h264_matrix_coeffs() - Get matrix_coefficients for v4l2 v4l2_ycbcr_encoding
+ * @colorspace: the color space as &enum v4l2_colorspace
+ * @ycbcr_encoding: the ycbcr encoding as &enum v4l2_ycbcr_encoding
+ *
+ * Convert the &enum v4l2_ycbcr_encoding to matrix_coefficients as specified in
+ * Rec. ITU-T H.264 (04/2017) E.2.1.
+ *
+ * Return: the matrix_coefficients value for the passed encoding
+ */
+static inline int nal_h264_matrix_coeffs(enum v4l2_colorspace colorspace,
+ enum v4l2_ycbcr_encoding ycbcr_encoding)
+{
+ if (ycbcr_encoding == V4L2_YCBCR_ENC_DEFAULT)
+ ycbcr_encoding = V4L2_MAP_YCBCR_ENC_DEFAULT(colorspace);
+
+ switch (ycbcr_encoding) {
+ case V4L2_YCBCR_ENC_601:
+ case V4L2_YCBCR_ENC_XV601:
+ return 5;
+ case V4L2_YCBCR_ENC_709:
+ case V4L2_YCBCR_ENC_XV709:
+ return 1;
+ case V4L2_YCBCR_ENC_BT2020:
+ return 9;
+ case V4L2_YCBCR_ENC_BT2020_CONST_LUM:
+ return 10;
+ case V4L2_YCBCR_ENC_SMPTE240M:
+ default:
+ return 2;
+ }
+}
ssize_t nal_h264_write_sps(const struct device *dev,
void *dest, size_t n, struct nal_h264_sps *sps);
diff --git a/drivers/media/platform/allegro-dvt/nal-hevc.c b/drivers/media/platform/allegro-dvt/nal-hevc.c
index 15a352e45831..9cdf2756e0a3 100644
--- a/drivers/media/platform/allegro-dvt/nal-hevc.c
+++ b/drivers/media/platform/allegro-dvt/nal-hevc.c
@@ -35,76 +35,6 @@ enum nal_unit_type {
FD_NUT = 38,
};
-int nal_hevc_profile_from_v4l2(enum v4l2_mpeg_video_hevc_profile profile)
-{
- switch (profile) {
- case V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN:
- return 1;
- case V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10:
- return 2;
- case V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_STILL_PICTURE:
- return 3;
- default:
- return -EINVAL;
- }
-}
-EXPORT_SYMBOL_GPL(nal_hevc_profile_from_v4l2);
-
-int nal_hevc_tier_from_v4l2(enum v4l2_mpeg_video_hevc_tier tier)
-{
- switch (tier) {
- case V4L2_MPEG_VIDEO_HEVC_TIER_MAIN:
- return 0;
- case V4L2_MPEG_VIDEO_HEVC_TIER_HIGH:
- return 1;
- default:
- return -EINVAL;
- }
-}
-EXPORT_SYMBOL_GPL(nal_hevc_tier_from_v4l2);
-
-int nal_hevc_level_from_v4l2(enum v4l2_mpeg_video_hevc_level level)
-{
- /*
- * T-Rec-H.265 p. 280: general_level_idc and sub_layer_level_idc[ i ]
- * shall be set equal to a value of 30 times the level number
- * specified in Table A.6.
- */
- int factor = 30 / 10;
-
- switch (level) {
- case V4L2_MPEG_VIDEO_HEVC_LEVEL_1:
- return factor * 10;
- case V4L2_MPEG_VIDEO_HEVC_LEVEL_2:
- return factor * 20;
- case V4L2_MPEG_VIDEO_HEVC_LEVEL_2_1:
- return factor * 21;
- case V4L2_MPEG_VIDEO_HEVC_LEVEL_3:
- return factor * 30;
- case V4L2_MPEG_VIDEO_HEVC_LEVEL_3_1:
- return factor * 31;
- case V4L2_MPEG_VIDEO_HEVC_LEVEL_4:
- return factor * 40;
- case V4L2_MPEG_VIDEO_HEVC_LEVEL_4_1:
- return factor * 41;
- case V4L2_MPEG_VIDEO_HEVC_LEVEL_5:
- return factor * 50;
- case V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1:
- return factor * 51;
- case V4L2_MPEG_VIDEO_HEVC_LEVEL_5_2:
- return factor * 52;
- case V4L2_MPEG_VIDEO_HEVC_LEVEL_6:
- return factor * 60;
- case V4L2_MPEG_VIDEO_HEVC_LEVEL_6_1:
- return factor * 61;
- case V4L2_MPEG_VIDEO_HEVC_LEVEL_6_2:
- return factor * 62;
- default:
- return -EINVAL;
- }
-}
-EXPORT_SYMBOL_GPL(nal_hevc_level_from_v4l2);
-
static void nal_hevc_write_start_code_prefix(struct rbsp *rbsp)
{
u8 *p = rbsp->data + DIV_ROUND_UP(rbsp->pos, 8);
@@ -277,6 +207,136 @@ static void nal_hevc_rbsp_vps(struct rbsp *rbsp, struct nal_hevc_vps *vps)
rbsp_unsupported(rbsp);
}
+static void nal_hevc_rbsp_sub_layer_hrd_parameters(struct rbsp *rbsp,
+ struct nal_hevc_sub_layer_hrd_parameters *hrd)
+{
+ unsigned int i;
+ unsigned int cpb_cnt = 1;
+
+ for (i = 0; i < cpb_cnt; i++) {
+ rbsp_uev(rbsp, &hrd->bit_rate_value_minus1[i]);
+ rbsp_uev(rbsp, &hrd->cpb_size_value_minus1[i]);
+ rbsp_bit(rbsp, &hrd->cbr_flag[i]);
+ }
+}
+
+static void nal_hevc_rbsp_hrd_parameters(struct rbsp *rbsp,
+ struct nal_hevc_hrd_parameters *hrd)
+{
+ unsigned int i;
+ unsigned int max_num_sub_layers_minus_1 = 0;
+
+ rbsp_bit(rbsp, &hrd->nal_hrd_parameters_present_flag);
+ rbsp_bit(rbsp, &hrd->vcl_hrd_parameters_present_flag);
+ if (hrd->nal_hrd_parameters_present_flag || hrd->vcl_hrd_parameters_present_flag) {
+ rbsp_bit(rbsp, &hrd->sub_pic_hrd_params_present_flag);
+ if (hrd->sub_pic_hrd_params_present_flag) {
+ rbsp_bits(rbsp, 8, &hrd->tick_divisor_minus2);
+ rbsp_bits(rbsp, 5, &hrd->du_cpb_removal_delay_increment_length_minus1);
+ rbsp_bit(rbsp, &hrd->sub_pic_cpb_params_in_pic_timing_sei_flag);
+ rbsp_bits(rbsp, 5, &hrd->dpb_output_delay_du_length_minus1);
+ }
+ rbsp_bits(rbsp, 4, &hrd->bit_rate_scale);
+ rbsp_bits(rbsp, 4, &hrd->cpb_size_scale);
+ if (hrd->sub_pic_hrd_params_present_flag)
+ rbsp_bits(rbsp, 4, &hrd->cpb_size_du_scale);
+ rbsp_bits(rbsp, 5, &hrd->initial_cpb_removal_delay_length_minus1);
+ rbsp_bits(rbsp, 5, &hrd->au_cpb_removal_delay_length_minus1);
+ rbsp_bits(rbsp, 5, &hrd->dpb_output_delay_length_minus1);
+ }
+ for (i = 0; i <= max_num_sub_layers_minus_1; i++) {
+ rbsp_bit(rbsp, &hrd->fixed_pic_rate_general_flag[i]);
+ if (!hrd->fixed_pic_rate_general_flag[i])
+ rbsp_bit(rbsp, &hrd->fixed_pic_rate_within_cvs_flag[i]);
+ if (hrd->fixed_pic_rate_within_cvs_flag[i])
+ rbsp_uev(rbsp, &hrd->elemental_duration_in_tc_minus1[i]);
+ else
+ rbsp_bit(rbsp, &hrd->low_delay_hrd_flag[i]);
+ if (!hrd->low_delay_hrd_flag[i])
+ rbsp_uev(rbsp, &hrd->cpb_cnt_minus1[i]);
+ if (hrd->nal_hrd_parameters_present_flag)
+ nal_hevc_rbsp_sub_layer_hrd_parameters(rbsp, &hrd->vcl_hrd[i]);
+ if (hrd->vcl_hrd_parameters_present_flag)
+ nal_hevc_rbsp_sub_layer_hrd_parameters(rbsp, &hrd->vcl_hrd[i]);
+ }
+}
+
+static void nal_hevc_rbsp_vui_parameters(struct rbsp *rbsp,
+ struct nal_hevc_vui_parameters *vui)
+{
+ if (!vui) {
+ rbsp->error = -EINVAL;
+ return;
+ }
+
+ rbsp_bit(rbsp, &vui->aspect_ratio_info_present_flag);
+ if (vui->aspect_ratio_info_present_flag) {
+ rbsp_bits(rbsp, 8, &vui->aspect_ratio_idc);
+ if (vui->aspect_ratio_idc == 255) {
+ rbsp_bits(rbsp, 16, &vui->sar_width);
+ rbsp_bits(rbsp, 16, &vui->sar_height);
+ }
+ }
+
+ rbsp_bit(rbsp, &vui->overscan_info_present_flag);
+ if (vui->overscan_info_present_flag)
+ rbsp_bit(rbsp, &vui->overscan_appropriate_flag);
+
+ rbsp_bit(rbsp, &vui->video_signal_type_present_flag);
+ if (vui->video_signal_type_present_flag) {
+ rbsp_bits(rbsp, 3, &vui->video_format);
+ rbsp_bit(rbsp, &vui->video_full_range_flag);
+
+ rbsp_bit(rbsp, &vui->colour_description_present_flag);
+ if (vui->colour_description_present_flag) {
+ rbsp_bits(rbsp, 8, &vui->colour_primaries);
+ rbsp_bits(rbsp, 8, &vui->transfer_characteristics);
+ rbsp_bits(rbsp, 8, &vui->matrix_coeffs);
+ }
+ }
+
+ rbsp_bit(rbsp, &vui->chroma_loc_info_present_flag);
+ if (vui->chroma_loc_info_present_flag) {
+ rbsp_uev(rbsp, &vui->chroma_sample_loc_type_top_field);
+ rbsp_uev(rbsp, &vui->chroma_sample_loc_type_bottom_field);
+ }
+
+ rbsp_bit(rbsp, &vui->neutral_chroma_indication_flag);
+ rbsp_bit(rbsp, &vui->field_seq_flag);
+ rbsp_bit(rbsp, &vui->frame_field_info_present_flag);
+ rbsp_bit(rbsp, &vui->default_display_window_flag);
+ if (vui->default_display_window_flag) {
+ rbsp_uev(rbsp, &vui->def_disp_win_left_offset);
+ rbsp_uev(rbsp, &vui->def_disp_win_right_offset);
+ rbsp_uev(rbsp, &vui->def_disp_win_top_offset);
+ rbsp_uev(rbsp, &vui->def_disp_win_bottom_offset);
+ }
+
+ rbsp_bit(rbsp, &vui->vui_timing_info_present_flag);
+ if (vui->vui_timing_info_present_flag) {
+ rbsp_bits(rbsp, 32, &vui->vui_num_units_in_tick);
+ rbsp_bits(rbsp, 32, &vui->vui_time_scale);
+ rbsp_bit(rbsp, &vui->vui_poc_proportional_to_timing_flag);
+ if (vui->vui_poc_proportional_to_timing_flag)
+ rbsp_uev(rbsp, &vui->vui_num_ticks_poc_diff_one_minus1);
+ rbsp_bit(rbsp, &vui->vui_hrd_parameters_present_flag);
+ if (vui->vui_hrd_parameters_present_flag)
+ nal_hevc_rbsp_hrd_parameters(rbsp, &vui->nal_hrd_parameters);
+ }
+
+ rbsp_bit(rbsp, &vui->bitstream_restriction_flag);
+ if (vui->bitstream_restriction_flag) {
+ rbsp_bit(rbsp, &vui->tiles_fixed_structure_flag);
+ rbsp_bit(rbsp, &vui->motion_vectors_over_pic_boundaries_flag);
+ rbsp_bit(rbsp, &vui->restricted_ref_pic_lists_flag);
+ rbsp_uev(rbsp, &vui->min_spatial_segmentation_idc);
+ rbsp_uev(rbsp, &vui->max_bytes_per_pic_denom);
+ rbsp_uev(rbsp, &vui->max_bits_per_min_cu_denom);
+ rbsp_uev(rbsp, &vui->log2_max_mv_length_horizontal);
+ rbsp_uev(rbsp, &vui->log2_max_mv_length_vertical);
+ }
+}
+
static void nal_hevc_rbsp_sps(struct rbsp *rbsp, struct nal_hevc_sps *sps)
{
unsigned int i;
@@ -345,7 +405,7 @@ static void nal_hevc_rbsp_sps(struct rbsp *rbsp, struct nal_hevc_sps *sps)
rbsp_bit(rbsp, &sps->strong_intra_smoothing_enabled_flag);
rbsp_bit(rbsp, &sps->vui_parameters_present_flag);
if (sps->vui_parameters_present_flag)
- rbsp_unsupported(rbsp);
+ nal_hevc_rbsp_vui_parameters(rbsp, &sps->vui);
rbsp_bit(rbsp, &sps->extension_present_flag);
if (sps->extension_present_flag) {
diff --git a/drivers/media/platform/allegro-dvt/nal-hevc.h b/drivers/media/platform/allegro-dvt/nal-hevc.h
index c09bbe5446aa..eb46f12aae80 100644
--- a/drivers/media/platform/allegro-dvt/nal-hevc.h
+++ b/drivers/media/platform/allegro-dvt/nal-hevc.h
@@ -8,9 +8,11 @@
#ifndef __NAL_HEVC_H__
#define __NAL_HEVC_H__
+#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/types.h>
-#include <media/v4l2-ctrls.h>
+#include <linux/v4l2-controls.h>
+#include <linux/videodev2.h>
struct nal_hevc_profile_tier_level {
unsigned int general_profile_space;
@@ -318,16 +320,183 @@ struct nal_hevc_pps {
};
};
-int nal_hevc_profile_from_v4l2(enum v4l2_mpeg_video_hevc_profile profile);
-int nal_hevc_tier_from_v4l2(enum v4l2_mpeg_video_hevc_tier tier);
-int nal_hevc_level_from_v4l2(enum v4l2_mpeg_video_hevc_level level);
+/**
+ * nal_hevc_profile() - Get profile_idc for v4l2 hevc profile
+ * @profile: the profile as &enum v4l2_mpeg_video_hevc_profile
+ *
+ * Convert the &enum v4l2_mpeg_video_hevc_profile to profile_idc as specified
+ * in Rec. ITU-T H.265 (02/2018) A.3.
+ *
+ * Return: the profile_idc for the passed level
+ */
+static inline int nal_hevc_profile(enum v4l2_mpeg_video_hevc_profile profile)
+{
+ switch (profile) {
+ case V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN:
+ return 1;
+ case V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10:
+ return 2;
+ case V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_STILL_PICTURE:
+ return 3;
+ default:
+ return -EINVAL;
+ }
+}
+
+/**
+ * nal_hevc_tier() - Get tier_flag for v4l2 hevc tier
+ * @tier: the tier as &enum v4l2_mpeg_video_hevc_tier
+ *
+ * Convert the &enum v4l2_mpeg_video_hevc_tier to tier_flag as specified
+ * in Rec. ITU-T H.265 (02/2018) A.4.1.
+ *
+ * Return: the tier_flag for the passed tier
+ */
+static inline int nal_hevc_tier(enum v4l2_mpeg_video_hevc_tier tier)
+{
+ switch (tier) {
+ case V4L2_MPEG_VIDEO_HEVC_TIER_MAIN:
+ return 0;
+ case V4L2_MPEG_VIDEO_HEVC_TIER_HIGH:
+ return 1;
+ default:
+ return -EINVAL;
+ }
+}
+
+/**
+ * nal_hevc_level() - Get level_idc for v4l2 hevc level
+ * @level: the level as &enum v4l2_mpeg_video_hevc_level
+ *
+ * Convert the &enum v4l2_mpeg_video_hevc_level to level_idc as specified in
+ * Rec. ITU-T H.265 (02/2018) A.4.1.
+ *
+ * Return: the level_idc for the passed level
+ */
+static inline int nal_hevc_level(enum v4l2_mpeg_video_hevc_level level)
+{
+ /*
+ * T-Rec-H.265 p. 280: general_level_idc and sub_layer_level_idc[ i ]
+ * shall be set equal to a value of 30 times the level number
+ * specified in Table A.6.
+ */
+ int factor = 30 / 10;
+
+ switch (level) {
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_1:
+ return factor * 10;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_2:
+ return factor * 20;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_2_1:
+ return factor * 21;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_3:
+ return factor * 30;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_3_1:
+ return factor * 31;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_4:
+ return factor * 40;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_4_1:
+ return factor * 41;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_5:
+ return factor * 50;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_5_1:
+ return factor * 51;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_5_2:
+ return factor * 52;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_6:
+ return factor * 60;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_6_1:
+ return factor * 61;
+ case V4L2_MPEG_VIDEO_HEVC_LEVEL_6_2:
+ return factor * 62;
+ default:
+ return -EINVAL;
+ }
+}
+
+static inline int nal_hevc_full_range(enum v4l2_quantization quantization)
+{
+ switch (quantization) {
+ case V4L2_QUANTIZATION_FULL_RANGE:
+ return 1;
+ case V4L2_QUANTIZATION_LIM_RANGE:
+ return 0;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static inline int nal_hevc_color_primaries(enum v4l2_colorspace colorspace)
+{
+ switch (colorspace) {
+ case V4L2_COLORSPACE_SMPTE170M:
+ return 6;
+ case V4L2_COLORSPACE_SMPTE240M:
+ return 7;
+ case V4L2_COLORSPACE_REC709:
+ return 1;
+ case V4L2_COLORSPACE_470_SYSTEM_M:
+ return 4;
+ case V4L2_COLORSPACE_JPEG:
+ case V4L2_COLORSPACE_SRGB:
+ case V4L2_COLORSPACE_470_SYSTEM_BG:
+ return 5;
+ case V4L2_COLORSPACE_BT2020:
+ return 9;
+ case V4L2_COLORSPACE_DEFAULT:
+ case V4L2_COLORSPACE_OPRGB:
+ case V4L2_COLORSPACE_RAW:
+ case V4L2_COLORSPACE_DCI_P3:
+ default:
+ return 2;
+ }
+}
+
+static inline int nal_hevc_transfer_characteristics(enum v4l2_colorspace colorspace,
+ enum v4l2_xfer_func xfer_func)
+{
+ if (xfer_func == V4L2_XFER_FUNC_DEFAULT)
+ xfer_func = V4L2_MAP_XFER_FUNC_DEFAULT(colorspace);
+
+ switch (xfer_func) {
+ case V4L2_XFER_FUNC_709:
+ return 6;
+ case V4L2_XFER_FUNC_SMPTE2084:
+ return 16;
+ case V4L2_XFER_FUNC_SRGB:
+ case V4L2_XFER_FUNC_OPRGB:
+ case V4L2_XFER_FUNC_NONE:
+ case V4L2_XFER_FUNC_DCI_P3:
+ case V4L2_XFER_FUNC_SMPTE240M:
+ default:
+ return 2;
+ }
+}
+
+static inline int nal_hevc_matrix_coeffs(enum v4l2_colorspace colorspace,
+ enum v4l2_ycbcr_encoding ycbcr_encoding)
+{
+ if (ycbcr_encoding == V4L2_YCBCR_ENC_DEFAULT)
+ ycbcr_encoding = V4L2_MAP_YCBCR_ENC_DEFAULT(colorspace);
-int nal_range_from_v4l2(enum v4l2_quantization quantization);
-int nal_color_primaries_from_v4l2(enum v4l2_colorspace colorspace);
-int nal_transfer_characteristics_from_v4l2(enum v4l2_colorspace colorspace,
- enum v4l2_xfer_func xfer_func);
-int nal_matrix_coeffs_from_v4l2(enum v4l2_colorspace colorspace,
- enum v4l2_ycbcr_encoding ycbcr_encoding);
+ switch (ycbcr_encoding) {
+ case V4L2_YCBCR_ENC_601:
+ case V4L2_YCBCR_ENC_XV601:
+ return 5;
+ case V4L2_YCBCR_ENC_709:
+ case V4L2_YCBCR_ENC_XV709:
+ return 1;
+ case V4L2_YCBCR_ENC_BT2020:
+ return 9;
+ case V4L2_YCBCR_ENC_BT2020_CONST_LUM:
+ return 10;
+ case V4L2_YCBCR_ENC_SMPTE240M:
+ default:
+ return 2;
+ }
+}
ssize_t nal_hevc_write_vps(const struct device *dev,
void *dest, size_t n, struct nal_hevc_vps *vps);
diff --git a/drivers/media/platform/am437x/am437x-vpfe.c b/drivers/media/platform/am437x/am437x-vpfe.c
index 1c9cb9e05fdf..2dfae9bc0bba 100644
--- a/drivers/media/platform/am437x/am437x-vpfe.c
+++ b/drivers/media/platform/am437x/am437x-vpfe.c
@@ -2297,7 +2297,7 @@ vpfe_get_pdata(struct vpfe_device *vpfe)
dev_dbg(dev, "vpfe_get_pdata\n");
- v4l2_async_notifier_init(&vpfe->notifier);
+ v4l2_async_nf_init(&vpfe->notifier);
if (!IS_ENABLED(CONFIG_OF) || !dev->of_node)
return dev->platform_data;
@@ -2365,9 +2365,10 @@ vpfe_get_pdata(struct vpfe_device *vpfe)
goto cleanup;
}
- pdata->asd[i] = v4l2_async_notifier_add_fwnode_subdev(
- &vpfe->notifier, of_fwnode_handle(rem),
- struct v4l2_async_subdev);
+ pdata->asd[i] = v4l2_async_nf_add_fwnode(&vpfe->notifier,
+ of_fwnode_handle(rem),
+ struct
+ v4l2_async_subdev);
of_node_put(rem);
if (IS_ERR(pdata->asd[i]))
goto cleanup;
@@ -2377,7 +2378,7 @@ vpfe_get_pdata(struct vpfe_device *vpfe)
return pdata;
cleanup:
- v4l2_async_notifier_cleanup(&vpfe->notifier);
+ v4l2_async_nf_cleanup(&vpfe->notifier);
of_node_put(endpoint);
return NULL;
}
@@ -2392,7 +2393,6 @@ static int vpfe_probe(struct platform_device *pdev)
struct vpfe_config *vpfe_cfg;
struct vpfe_device *vpfe;
struct vpfe_ccdc *ccdc;
- struct resource *res;
int ret;
vpfe = devm_kzalloc(&pdev->dev, sizeof(*vpfe), GFP_KERNEL);
@@ -2410,8 +2410,7 @@ static int vpfe_probe(struct platform_device *pdev)
vpfe->cfg = vpfe_cfg;
ccdc = &vpfe->ccdc;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ccdc->ccdc_cfg.base_addr = devm_ioremap_resource(&pdev->dev, res);
+ ccdc->ccdc_cfg.base_addr = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ccdc->ccdc_cfg.base_addr)) {
ret = PTR_ERR(ccdc->ccdc_cfg.base_addr);
goto probe_out_cleanup;
@@ -2465,7 +2464,7 @@ static int vpfe_probe(struct platform_device *pdev)
}
vpfe->notifier.ops = &vpfe_async_ops;
- ret = v4l2_async_notifier_register(&vpfe->v4l2_dev, &vpfe->notifier);
+ ret = v4l2_async_nf_register(&vpfe->v4l2_dev, &vpfe->notifier);
if (ret) {
vpfe_err(vpfe, "Error registering async notifier\n");
ret = -EINVAL;
@@ -2477,7 +2476,7 @@ static int vpfe_probe(struct platform_device *pdev)
probe_out_v4l2_unregister:
v4l2_device_unregister(&vpfe->v4l2_dev);
probe_out_cleanup:
- v4l2_async_notifier_cleanup(&vpfe->notifier);
+ v4l2_async_nf_cleanup(&vpfe->notifier);
return ret;
}
@@ -2490,8 +2489,8 @@ static int vpfe_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
- v4l2_async_notifier_unregister(&vpfe->notifier);
- v4l2_async_notifier_cleanup(&vpfe->notifier);
+ v4l2_async_nf_unregister(&vpfe->notifier);
+ v4l2_async_nf_cleanup(&vpfe->notifier);
v4l2_device_unregister(&vpfe->v4l2_dev);
video_unregister_device(&vpfe->video_dev);
diff --git a/drivers/media/platform/aspeed-video.c b/drivers/media/platform/aspeed-video.c
index 7bb6babdcade..cad3f97515ae 100644
--- a/drivers/media/platform/aspeed-video.c
+++ b/drivers/media/platform/aspeed-video.c
@@ -23,6 +23,8 @@
#include <linux/videodev2.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
+#include <linux/debugfs.h>
+#include <linux/ktime.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-dev.h>
#include <media/v4l2-device.h>
@@ -201,6 +203,14 @@ struct aspeed_video_buffer {
struct list_head link;
};
+struct aspeed_video_perf {
+ ktime_t last_sample;
+ u32 totaltime;
+ u32 duration;
+ u32 duration_min;
+ u32 duration_max;
+};
+
#define to_aspeed_video_buffer(x) \
container_of((x), struct aspeed_video_buffer, vb)
@@ -242,6 +252,8 @@ struct aspeed_video {
unsigned int frame_left;
unsigned int frame_right;
unsigned int frame_top;
+
+ struct aspeed_video_perf perf;
};
#define to_aspeed_video(x) container_of((x), struct aspeed_video, v4l2_dev)
@@ -422,6 +434,21 @@ static void aspeed_video_init_jpeg_table(u32 *table, bool yuv420)
}
}
+// just update jpeg dct table per 420/444
+static void aspeed_video_update_jpeg_table(u32 *table, bool yuv420)
+{
+ int i;
+ unsigned int base;
+
+ for (i = 0; i < ASPEED_VIDEO_JPEG_NUM_QUALITIES; i++) {
+ base = 256 * i; /* AST HW requires this header spacing */
+ base += ASPEED_VIDEO_JPEG_HEADER_SIZE +
+ ASPEED_VIDEO_JPEG_DCT_SIZE;
+
+ table[base + 2] = (yuv420) ? 0x00220103 : 0x00110103;
+ }
+}
+
static void aspeed_video_update(struct aspeed_video *video, u32 reg, u32 clear,
u32 bits)
{
@@ -450,6 +477,16 @@ static void aspeed_video_write(struct aspeed_video *video, u32 reg, u32 val)
readl(video->base + reg));
}
+static void update_perf(struct aspeed_video_perf *p)
+{
+ p->duration =
+ ktime_to_ms(ktime_sub(ktime_get(), p->last_sample));
+ p->totaltime += p->duration;
+
+ p->duration_max = max(p->duration, p->duration_max);
+ p->duration_min = min(p->duration, p->duration_min);
+}
+
static int aspeed_video_start_frame(struct aspeed_video *video)
{
dma_addr_t addr;
@@ -488,6 +525,8 @@ static int aspeed_video_start_frame(struct aspeed_video *video)
aspeed_video_update(video, VE_INTERRUPT_CTRL, 0,
VE_INTERRUPT_COMP_COMPLETE);
+ video->perf.last_sample = ktime_get();
+
aspeed_video_update(video, VE_SEQ_CTRL, 0,
VE_SEQ_CTRL_TRIG_CAPTURE | VE_SEQ_CTRL_TRIG_COMP);
@@ -564,6 +603,12 @@ static irqreturn_t aspeed_video_irq(int irq, void *arg)
u32 sts = aspeed_video_read(video, VE_INTERRUPT_STATUS);
/*
+ * Hardware sometimes asserts interrupts that we haven't actually
+ * enabled; ignore them if so.
+ */
+ sts &= aspeed_video_read(video, VE_INTERRUPT_CTRL);
+
+ /*
* Resolution changed or signal was lost; reset the engine and
* re-initialize
*/
@@ -597,6 +642,8 @@ static irqreturn_t aspeed_video_irq(int irq, void *arg)
u32 frame_size = aspeed_video_read(video,
video->comp_size_read);
+ update_perf(&video->perf);
+
spin_lock(&video->lock);
clear_bit(VIDEO_FRAME_INPRG, &video->flags);
buf = list_first_entry_or_null(&video->buffers,
@@ -629,16 +676,6 @@ static irqreturn_t aspeed_video_irq(int irq, void *arg)
aspeed_video_start_frame(video);
}
- /*
- * CAPTURE_COMPLETE and FRAME_COMPLETE interrupts come even when these
- * are disabled in the VE_INTERRUPT_CTRL register so clear them to
- * prevent unnecessary interrupt calls.
- */
- if (sts & VE_INTERRUPT_CAPTURE_COMPLETE)
- sts &= ~VE_INTERRUPT_CAPTURE_COMPLETE;
- if (sts & VE_INTERRUPT_FRAME_COMPLETE)
- sts &= ~VE_INTERRUPT_FRAME_COMPLETE;
-
return sts ? IRQ_NONE : IRQ_HANDLED;
}
@@ -764,6 +801,7 @@ static void aspeed_video_get_resolution(struct aspeed_video *video)
det->width = MIN_WIDTH;
det->height = MIN_HEIGHT;
video->v4l2_input_status = V4L2_IN_ST_NO_SIGNAL;
+ memset(&video->perf, 0, sizeof(video->perf));
do {
if (tries) {
@@ -1293,7 +1331,7 @@ static void aspeed_video_update_jpeg_quality(struct aspeed_video *video)
static void aspeed_video_update_subsampling(struct aspeed_video *video)
{
if (video->jpeg.virt)
- aspeed_video_init_jpeg_table(video->jpeg.virt, video->yuv420);
+ aspeed_video_update_jpeg_table(video->jpeg.virt, video->yuv420);
if (video->yuv420)
aspeed_video_update(video, VE_SEQ_CTRL, 0, VE_SEQ_CTRL_YUV420);
@@ -1454,6 +1492,8 @@ static int aspeed_video_start_streaming(struct vb2_queue *q,
struct aspeed_video *video = vb2_get_drv_priv(q);
video->sequence = 0;
+ video->perf.duration_max = 0;
+ video->perf.duration_min = 0xffffffff;
rc = aspeed_video_start_frame(video);
if (rc) {
@@ -1521,6 +1561,71 @@ static const struct vb2_ops aspeed_video_vb2_ops = {
.buf_queue = aspeed_video_buf_queue,
};
+#ifdef CONFIG_DEBUG_FS
+static int aspeed_video_debugfs_show(struct seq_file *s, void *data)
+{
+ struct aspeed_video *v = s->private;
+
+ seq_puts(s, "\n");
+
+ seq_printf(s, " %-20s:\t%s\n", "Signal",
+ v->v4l2_input_status ? "Unlock" : "Lock");
+ seq_printf(s, " %-20s:\t%d\n", "Width", v->pix_fmt.width);
+ seq_printf(s, " %-20s:\t%d\n", "Height", v->pix_fmt.height);
+ seq_printf(s, " %-20s:\t%d\n", "FRC", v->frame_rate);
+
+ seq_puts(s, "\n");
+
+ seq_puts(s, "Performance:\n");
+ seq_printf(s, " %-20s:\t%d\n", "Frame#", v->sequence);
+ seq_printf(s, " %-20s:\n", "Frame Duration(ms)");
+ seq_printf(s, " %-18s:\t%d\n", "Now", v->perf.duration);
+ seq_printf(s, " %-18s:\t%d\n", "Min", v->perf.duration_min);
+ seq_printf(s, " %-18s:\t%d\n", "Max", v->perf.duration_max);
+ seq_printf(s, " %-20s:\t%d\n", "FPS", 1000 / (v->perf.totaltime / v->sequence));
+
+ return 0;
+}
+
+static int aspeed_video_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, aspeed_video_debugfs_show, inode->i_private);
+}
+
+static const struct file_operations aspeed_video_debugfs_ops = {
+ .owner = THIS_MODULE,
+ .open = aspeed_video_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static struct dentry *debugfs_entry;
+
+static void aspeed_video_debugfs_remove(struct aspeed_video *video)
+{
+ debugfs_remove_recursive(debugfs_entry);
+ debugfs_entry = NULL;
+}
+
+static int aspeed_video_debugfs_create(struct aspeed_video *video)
+{
+ debugfs_entry = debugfs_create_file(DEVICE_NAME, 0444, NULL,
+ video,
+ &aspeed_video_debugfs_ops);
+ if (!debugfs_entry)
+ aspeed_video_debugfs_remove(video);
+
+ return !debugfs_entry ? -EIO : 0;
+}
+#else
+static void aspeed_video_debugfs_remove(struct aspeed_video *video) { }
+static int aspeed_video_debugfs_create(struct aspeed_video *video)
+{
+ return 0;
+}
+#endif /* CONFIG_DEBUG_FS */
+
static int aspeed_video_setup_video(struct aspeed_video *video)
{
const u64 mask = ~(BIT(V4L2_JPEG_CHROMA_SUBSAMPLING_444) |
@@ -1725,6 +1830,10 @@ static int aspeed_video_probe(struct platform_device *pdev)
return rc;
}
+ rc = aspeed_video_debugfs_create(video);
+ if (rc)
+ dev_err(video->dev, "debugfs create failed\n");
+
return 0;
}
@@ -1736,6 +1845,8 @@ static int aspeed_video_remove(struct platform_device *pdev)
aspeed_video_off(video);
+ aspeed_video_debugfs_remove(video);
+
clk_unprepare(video->vclk);
clk_unprepare(video->eclk);
diff --git a/drivers/media/platform/atmel/atmel-isc-base.c b/drivers/media/platform/atmel/atmel-isc-base.c
index 136ab7cf36ed..660cd0ab6749 100644
--- a/drivers/media/platform/atmel/atmel-isc-base.c
+++ b/drivers/media/platform/atmel/atmel-isc-base.c
@@ -123,11 +123,9 @@ static int isc_clk_prepare(struct clk_hw *hw)
struct isc_clk *isc_clk = to_isc_clk(hw);
int ret;
- if (isc_clk->id == ISC_ISPCK) {
- ret = pm_runtime_resume_and_get(isc_clk->dev);
- if (ret < 0)
- return ret;
- }
+ ret = pm_runtime_resume_and_get(isc_clk->dev);
+ if (ret < 0)
+ return ret;
return isc_wait_clk_stable(hw);
}
@@ -138,8 +136,7 @@ static void isc_clk_unprepare(struct clk_hw *hw)
isc_wait_clk_stable(hw);
- if (isc_clk->id == ISC_ISPCK)
- pm_runtime_put_sync(isc_clk->dev);
+ pm_runtime_put_sync(isc_clk->dev);
}
static int isc_clk_enable(struct clk_hw *hw)
@@ -186,16 +183,13 @@ static int isc_clk_is_enabled(struct clk_hw *hw)
u32 status;
int ret;
- if (isc_clk->id == ISC_ISPCK) {
- ret = pm_runtime_resume_and_get(isc_clk->dev);
- if (ret < 0)
- return 0;
- }
+ ret = pm_runtime_resume_and_get(isc_clk->dev);
+ if (ret < 0)
+ return 0;
regmap_read(isc_clk->regmap, ISC_CLKSR, &status);
- if (isc_clk->id == ISC_ISPCK)
- pm_runtime_put_sync(isc_clk->dev);
+ pm_runtime_put_sync(isc_clk->dev);
return status & ISC_CLK(isc_clk->id) ? 1 : 0;
}
@@ -325,6 +319,9 @@ static int isc_clk_register(struct isc_device *isc, unsigned int id)
const char *parent_names[3];
int num_parents;
+ if (id == ISC_ISPCK && !isc->ispck_required)
+ return 0;
+
num_parents = of_clk_get_parent_count(np);
if (num_parents < 1 || num_parents > 3)
return -EINVAL;
@@ -2222,8 +2219,8 @@ void isc_subdev_cleanup(struct isc_device *isc)
struct isc_subdev_entity *subdev_entity;
list_for_each_entry(subdev_entity, &isc->subdev_entities, list) {
- v4l2_async_notifier_unregister(&subdev_entity->notifier);
- v4l2_async_notifier_cleanup(&subdev_entity->notifier);
+ v4l2_async_nf_unregister(&subdev_entity->notifier);
+ v4l2_async_nf_cleanup(&subdev_entity->notifier);
}
INIT_LIST_HEAD(&isc->subdev_entities);
diff --git a/drivers/media/platform/atmel/atmel-isc.h b/drivers/media/platform/atmel/atmel-isc.h
index 19cc60dfcbe0..2bfcb135ef13 100644
--- a/drivers/media/platform/atmel/atmel-isc.h
+++ b/drivers/media/platform/atmel/atmel-isc.h
@@ -178,6 +178,7 @@ struct isc_reg_offsets {
* @hclock: Hclock clock input (refer datasheet)
* @ispck: iscpck clock (refer datasheet)
* @isc_clks: ISC clocks
+ * @ispck_required: ISC requires ISP Clock initialization
* @dcfg: DMA master configuration, architecture dependent
*
* @dev: Registered device driver
@@ -252,6 +253,7 @@ struct isc_device {
struct clk *hclock;
struct clk *ispck;
struct isc_clk isc_clks[2];
+ bool ispck_required;
u32 dcfg;
struct device *dev;
diff --git a/drivers/media/platform/atmel/atmel-isi.c b/drivers/media/platform/atmel/atmel-isi.c
index 095d80c4f59e..4d15814e4481 100644
--- a/drivers/media/platform/atmel/atmel-isi.c
+++ b/drivers/media/platform/atmel/atmel-isi.c
@@ -1159,12 +1159,11 @@ static int isi_graph_init(struct atmel_isi *isi)
if (!ep)
return -EINVAL;
- v4l2_async_notifier_init(&isi->notifier);
+ v4l2_async_nf_init(&isi->notifier);
- asd = v4l2_async_notifier_add_fwnode_remote_subdev(
- &isi->notifier,
- of_fwnode_handle(ep),
- struct v4l2_async_subdev);
+ asd = v4l2_async_nf_add_fwnode_remote(&isi->notifier,
+ of_fwnode_handle(ep),
+ struct v4l2_async_subdev);
of_node_put(ep);
if (IS_ERR(asd))
@@ -1172,10 +1171,10 @@ static int isi_graph_init(struct atmel_isi *isi)
isi->notifier.ops = &isi_graph_notify_ops;
- ret = v4l2_async_notifier_register(&isi->v4l2_dev, &isi->notifier);
+ ret = v4l2_async_nf_register(&isi->v4l2_dev, &isi->notifier);
if (ret < 0) {
dev_err(isi->dev, "Notifier registration failed\n");
- v4l2_async_notifier_cleanup(&isi->notifier);
+ v4l2_async_nf_cleanup(&isi->notifier);
return ret;
}
@@ -1327,8 +1326,8 @@ static int atmel_isi_remove(struct platform_device *pdev)
isi->p_fb_descriptors,
isi->fb_descriptors_phys);
pm_runtime_disable(&pdev->dev);
- v4l2_async_notifier_unregister(&isi->notifier);
- v4l2_async_notifier_cleanup(&isi->notifier);
+ v4l2_async_nf_unregister(&isi->notifier);
+ v4l2_async_nf_cleanup(&isi->notifier);
v4l2_device_unregister(&isi->v4l2_dev);
return 0;
diff --git a/drivers/media/platform/atmel/atmel-sama5d2-isc.c b/drivers/media/platform/atmel/atmel-sama5d2-isc.c
index b66f1d174e9d..1b2063cce0f7 100644
--- a/drivers/media/platform/atmel/atmel-sama5d2-isc.c
+++ b/drivers/media/platform/atmel/atmel-sama5d2-isc.c
@@ -454,6 +454,9 @@ static int atmel_isc_probe(struct platform_device *pdev)
/* sama5d2-isc - 8 bits per beat */
isc->dcfg = ISC_DCFG_YMBSIZE_BEATS8 | ISC_DCFG_CMBSIZE_BEATS8;
+ /* sama5d2-isc : ISPCK is required and mandatory */
+ isc->ispck_required = true;
+
ret = isc_pipeline_init(isc);
if (ret)
return ret;
@@ -476,22 +479,6 @@ static int atmel_isc_probe(struct platform_device *pdev)
dev_err(dev, "failed to init isc clock: %d\n", ret);
goto unprepare_hclk;
}
-
- isc->ispck = isc->isc_clks[ISC_ISPCK].clk;
-
- ret = clk_prepare_enable(isc->ispck);
- if (ret) {
- dev_err(dev, "failed to enable ispck: %d\n", ret);
- goto unprepare_hclk;
- }
-
- /* ispck should be greater or equal to hclock */
- ret = clk_set_rate(isc->ispck, clk_get_rate(isc->hclock));
- if (ret) {
- dev_err(dev, "failed to set ispck rate: %d\n", ret);
- goto unprepare_clk;
- }
-
ret = v4l2_device_register(dev, &isc->v4l2_dev);
if (ret) {
dev_err(dev, "unable to register v4l2 device.\n");
@@ -512,13 +499,14 @@ static int atmel_isc_probe(struct platform_device *pdev)
list_for_each_entry(subdev_entity, &isc->subdev_entities, list) {
struct v4l2_async_subdev *asd;
+ struct fwnode_handle *fwnode =
+ of_fwnode_handle(subdev_entity->epn);
- v4l2_async_notifier_init(&subdev_entity->notifier);
+ v4l2_async_nf_init(&subdev_entity->notifier);
- asd = v4l2_async_notifier_add_fwnode_remote_subdev(
- &subdev_entity->notifier,
- of_fwnode_handle(subdev_entity->epn),
- struct v4l2_async_subdev);
+ asd = v4l2_async_nf_add_fwnode_remote(&subdev_entity->notifier,
+ fwnode,
+ struct v4l2_async_subdev);
of_node_put(subdev_entity->epn);
subdev_entity->epn = NULL;
@@ -530,8 +518,8 @@ static int atmel_isc_probe(struct platform_device *pdev)
subdev_entity->notifier.ops = &isc_async_ops;
- ret = v4l2_async_notifier_register(&isc->v4l2_dev,
- &subdev_entity->notifier);
+ ret = v4l2_async_nf_register(&isc->v4l2_dev,
+ &subdev_entity->notifier);
if (ret) {
dev_err(dev, "fail to register async notifier\n");
goto cleanup_subdev;
@@ -545,19 +533,35 @@ static int atmel_isc_probe(struct platform_device *pdev)
pm_runtime_enable(dev);
pm_request_idle(dev);
+ isc->ispck = isc->isc_clks[ISC_ISPCK].clk;
+
+ ret = clk_prepare_enable(isc->ispck);
+ if (ret) {
+ dev_err(dev, "failed to enable ispck: %d\n", ret);
+ goto cleanup_subdev;
+ }
+
+ /* ispck should be greater or equal to hclock */
+ ret = clk_set_rate(isc->ispck, clk_get_rate(isc->hclock));
+ if (ret) {
+ dev_err(dev, "failed to set ispck rate: %d\n", ret);
+ goto unprepare_clk;
+ }
+
regmap_read(isc->regmap, ISC_VERSION + isc->offsets.version, &ver);
dev_info(dev, "Microchip ISC version %x\n", ver);
return 0;
+unprepare_clk:
+ clk_disable_unprepare(isc->ispck);
+
cleanup_subdev:
isc_subdev_cleanup(isc);
unregister_v4l2_device:
v4l2_device_unregister(&isc->v4l2_dev);
-unprepare_clk:
- clk_disable_unprepare(isc->ispck);
unprepare_hclk:
clk_disable_unprepare(isc->hclock);
diff --git a/drivers/media/platform/atmel/atmel-sama7g5-isc.c b/drivers/media/platform/atmel/atmel-sama7g5-isc.c
index f2785131ff56..5d1c76f680f3 100644
--- a/drivers/media/platform/atmel/atmel-sama7g5-isc.c
+++ b/drivers/media/platform/atmel/atmel-sama7g5-isc.c
@@ -447,6 +447,9 @@ static int microchip_xisc_probe(struct platform_device *pdev)
/* sama7g5-isc RAM access port is full AXI4 - 32 bits per beat */
isc->dcfg = ISC_DCFG_YMBSIZE_BEATS32 | ISC_DCFG_CMBSIZE_BEATS32;
+ /* sama7g5-isc : ISPCK does not exist, ISC is clocked by MCK */
+ isc->ispck_required = false;
+
ret = isc_pipeline_init(isc);
if (ret)
return ret;
@@ -470,25 +473,10 @@ static int microchip_xisc_probe(struct platform_device *pdev)
goto unprepare_hclk;
}
- isc->ispck = isc->isc_clks[ISC_ISPCK].clk;
-
- ret = clk_prepare_enable(isc->ispck);
- if (ret) {
- dev_err(dev, "failed to enable ispck: %d\n", ret);
- goto unprepare_hclk;
- }
-
- /* ispck should be greater or equal to hclock */
- ret = clk_set_rate(isc->ispck, clk_get_rate(isc->hclock));
- if (ret) {
- dev_err(dev, "failed to set ispck rate: %d\n", ret);
- goto unprepare_clk;
- }
-
ret = v4l2_device_register(dev, &isc->v4l2_dev);
if (ret) {
dev_err(dev, "unable to register v4l2 device.\n");
- goto unprepare_clk;
+ goto unprepare_hclk;
}
ret = xisc_parse_dt(dev, isc);
@@ -505,13 +493,14 @@ static int microchip_xisc_probe(struct platform_device *pdev)
list_for_each_entry(subdev_entity, &isc->subdev_entities, list) {
struct v4l2_async_subdev *asd;
+ struct fwnode_handle *fwnode =
+ of_fwnode_handle(subdev_entity->epn);
- v4l2_async_notifier_init(&subdev_entity->notifier);
+ v4l2_async_nf_init(&subdev_entity->notifier);
- asd = v4l2_async_notifier_add_fwnode_remote_subdev(
- &subdev_entity->notifier,
- of_fwnode_handle(subdev_entity->epn),
- struct v4l2_async_subdev);
+ asd = v4l2_async_nf_add_fwnode_remote(&subdev_entity->notifier,
+ fwnode,
+ struct v4l2_async_subdev);
of_node_put(subdev_entity->epn);
subdev_entity->epn = NULL;
@@ -523,8 +512,8 @@ static int microchip_xisc_probe(struct platform_device *pdev)
subdev_entity->notifier.ops = &isc_async_ops;
- ret = v4l2_async_notifier_register(&isc->v4l2_dev,
- &subdev_entity->notifier);
+ ret = v4l2_async_nf_register(&isc->v4l2_dev,
+ &subdev_entity->notifier);
if (ret) {
dev_err(dev, "fail to register async notifier\n");
goto cleanup_subdev;
@@ -549,8 +538,6 @@ cleanup_subdev:
unregister_v4l2_device:
v4l2_device_unregister(&isc->v4l2_dev);
-unprepare_clk:
- clk_disable_unprepare(isc->ispck);
unprepare_hclk:
clk_disable_unprepare(isc->hclock);
diff --git a/drivers/media/platform/cadence/cdns-csi2rx.c b/drivers/media/platform/cadence/cdns-csi2rx.c
index f2b4ddd31177..cc3ebb0d96f6 100644
--- a/drivers/media/platform/cadence/cdns-csi2rx.c
+++ b/drivers/media/platform/cadence/cdns-csi2rx.c
@@ -279,13 +279,11 @@ static const struct v4l2_async_notifier_operations csi2rx_notifier_ops = {
static int csi2rx_get_resources(struct csi2rx_priv *csi2rx,
struct platform_device *pdev)
{
- struct resource *res;
unsigned char i;
u32 dev_cfg;
int ret;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- csi2rx->base = devm_ioremap_resource(&pdev->dev, res);
+ csi2rx->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(csi2rx->base))
return PTR_ERR(csi2rx->base);
@@ -401,21 +399,19 @@ static int csi2rx_parse_dt(struct csi2rx_priv *csi2rx)
return -EINVAL;
}
- v4l2_async_notifier_init(&csi2rx->notifier);
+ v4l2_async_nf_init(&csi2rx->notifier);
- asd = v4l2_async_notifier_add_fwnode_remote_subdev(&csi2rx->notifier,
- fwh,
- struct v4l2_async_subdev);
+ asd = v4l2_async_nf_add_fwnode_remote(&csi2rx->notifier, fwh,
+ struct v4l2_async_subdev);
of_node_put(ep);
if (IS_ERR(asd))
return PTR_ERR(asd);
csi2rx->notifier.ops = &csi2rx_notifier_ops;
- ret = v4l2_async_subdev_notifier_register(&csi2rx->subdev,
- &csi2rx->notifier);
+ ret = v4l2_async_subdev_nf_register(&csi2rx->subdev, &csi2rx->notifier);
if (ret)
- v4l2_async_notifier_cleanup(&csi2rx->notifier);
+ v4l2_async_nf_cleanup(&csi2rx->notifier);
return ret;
}
@@ -471,7 +467,7 @@ static int csi2rx_probe(struct platform_device *pdev)
return 0;
err_cleanup:
- v4l2_async_notifier_cleanup(&csi2rx->notifier);
+ v4l2_async_nf_cleanup(&csi2rx->notifier);
err_free_priv:
kfree(csi2rx);
return ret;
diff --git a/drivers/media/platform/cadence/cdns-csi2tx.c b/drivers/media/platform/cadence/cdns-csi2tx.c
index 5a67fba73ddd..8f8c36056354 100644
--- a/drivers/media/platform/cadence/cdns-csi2tx.c
+++ b/drivers/media/platform/cadence/cdns-csi2tx.c
@@ -433,13 +433,11 @@ static const struct v4l2_subdev_ops csi2tx_subdev_ops = {
static int csi2tx_get_resources(struct csi2tx_priv *csi2tx,
struct platform_device *pdev)
{
- struct resource *res;
unsigned int i;
u32 dev_cfg;
int ret;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- csi2tx->base = devm_ioremap_resource(&pdev->dev, res);
+ csi2tx->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(csi2tx->base))
return PTR_ERR(csi2tx->base);
diff --git a/drivers/media/platform/coda/imx-vdoa.c b/drivers/media/platform/coda/imx-vdoa.c
index 8bc0d8371819..6996d4571e36 100644
--- a/drivers/media/platform/coda/imx-vdoa.c
+++ b/drivers/media/platform/coda/imx-vdoa.c
@@ -301,8 +301,7 @@ static int vdoa_probe(struct platform_device *pdev)
return PTR_ERR(vdoa->vdoa_clk);
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- vdoa->regs = devm_ioremap_resource(vdoa->dev, res);
+ vdoa->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(vdoa->regs))
return PTR_ERR(vdoa->regs);
diff --git a/drivers/media/platform/davinci/vpbe_venc.c b/drivers/media/platform/davinci/vpbe_venc.c
index bde241c26d79..4c8e31de12b1 100644
--- a/drivers/media/platform/davinci/vpbe_venc.c
+++ b/drivers/media/platform/davinci/vpbe_venc.c
@@ -621,7 +621,6 @@ static int venc_probe(struct platform_device *pdev)
{
const struct platform_device_id *pdev_id;
struct venc_state *venc;
- struct resource *res;
if (!pdev->dev.platform_data) {
dev_err(&pdev->dev, "No platform data for VENC sub device");
@@ -640,16 +639,12 @@ static int venc_probe(struct platform_device *pdev)
venc->pdev = &pdev->dev;
venc->pdata = pdev->dev.platform_data;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- venc->venc_base = devm_ioremap_resource(&pdev->dev, res);
+ venc->venc_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(venc->venc_base))
return PTR_ERR(venc->venc_base);
if (venc->venc_type != VPBE_VERSION_1) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-
- venc->vdaccfg_reg = devm_ioremap_resource(&pdev->dev, res);
+ venc->vdaccfg_reg = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(venc->vdaccfg_reg))
return PTR_ERR(venc->vdaccfg_reg);
}
diff --git a/drivers/media/platform/davinci/vpif.c b/drivers/media/platform/davinci/vpif.c
index f1ce10828b8e..5a89d885d0e3 100644
--- a/drivers/media/platform/davinci/vpif.c
+++ b/drivers/media/platform/davinci/vpif.c
@@ -425,12 +425,11 @@ EXPORT_SYMBOL(vpif_channel_getfid);
static int vpif_probe(struct platform_device *pdev)
{
- static struct resource *res, *res_irq;
+ static struct resource *res_irq;
struct platform_device *pdev_capture, *pdev_display;
struct device_node *endpoint = NULL;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- vpif_base = devm_ioremap_resource(&pdev->dev, res);
+ vpif_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(vpif_base))
return PTR_ERR(vpif_base);
diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c
index c034e25dd9aa..ae92e2c206d0 100644
--- a/drivers/media/platform/davinci/vpif_capture.c
+++ b/drivers/media/platform/davinci/vpif_capture.c
@@ -1506,7 +1506,7 @@ vpif_capture_get_pdata(struct platform_device *pdev)
struct vpif_capture_chan_config *chan;
unsigned int i;
- v4l2_async_notifier_init(&vpif_obj.notifier);
+ v4l2_async_nf_init(&vpif_obj.notifier);
/*
* DT boot: OF node from parent device contains
@@ -1582,9 +1582,10 @@ vpif_capture_get_pdata(struct platform_device *pdev)
dev_dbg(&pdev->dev, "Remote device %pOF found\n", rem);
sdinfo->name = rem->full_name;
- pdata->asd[i] = v4l2_async_notifier_add_fwnode_subdev(
- &vpif_obj.notifier, of_fwnode_handle(rem),
- struct v4l2_async_subdev);
+ pdata->asd[i] = v4l2_async_nf_add_fwnode(&vpif_obj.notifier,
+ of_fwnode_handle(rem),
+ struct
+ v4l2_async_subdev);
if (IS_ERR(pdata->asd[i]))
goto err_cleanup;
@@ -1602,7 +1603,7 @@ done:
err_cleanup:
of_node_put(rem);
of_node_put(endpoint);
- v4l2_async_notifier_cleanup(&vpif_obj.notifier);
+ v4l2_async_nf_cleanup(&vpif_obj.notifier);
return NULL;
}
@@ -1692,8 +1693,8 @@ static __init int vpif_probe(struct platform_device *pdev)
goto probe_subdev_out;
} else {
vpif_obj.notifier.ops = &vpif_async_ops;
- err = v4l2_async_notifier_register(&vpif_obj.v4l2_dev,
- &vpif_obj.notifier);
+ err = v4l2_async_nf_register(&vpif_obj.v4l2_dev,
+ &vpif_obj.notifier);
if (err) {
vpif_err("Error registering async notifier\n");
err = -EINVAL;
@@ -1711,7 +1712,7 @@ vpif_unregister:
vpif_free:
free_vpif_objs();
cleanup:
- v4l2_async_notifier_cleanup(&vpif_obj.notifier);
+ v4l2_async_nf_cleanup(&vpif_obj.notifier);
return err;
}
@@ -1727,8 +1728,8 @@ static int vpif_remove(struct platform_device *device)
struct channel_obj *ch;
int i;
- v4l2_async_notifier_unregister(&vpif_obj.notifier);
- v4l2_async_notifier_cleanup(&vpif_obj.notifier);
+ v4l2_async_nf_unregister(&vpif_obj.notifier);
+ v4l2_async_nf_cleanup(&vpif_obj.notifier);
v4l2_device_unregister(&vpif_obj.v4l2_dev);
kfree(vpif_obj.sd);
diff --git a/drivers/media/platform/davinci/vpss.c b/drivers/media/platform/davinci/vpss.c
index 7000f0bf0b35..d15b991ab17c 100644
--- a/drivers/media/platform/davinci/vpss.c
+++ b/drivers/media/platform/davinci/vpss.c
@@ -392,7 +392,6 @@ EXPORT_SYMBOL(dm365_vpss_set_pg_frame_size);
static int vpss_probe(struct platform_device *pdev)
{
- struct resource *res;
char *platform_name;
if (!pdev->dev.platform_data) {
@@ -413,17 +412,12 @@ static int vpss_probe(struct platform_device *pdev)
}
dev_info(&pdev->dev, "%s vpss probed\n", platform_name);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- oper_cfg.vpss_regs_base0 = devm_ioremap_resource(&pdev->dev, res);
+ oper_cfg.vpss_regs_base0 = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(oper_cfg.vpss_regs_base0))
return PTR_ERR(oper_cfg.vpss_regs_base0);
if (oper_cfg.platform == DM355 || oper_cfg.platform == DM365) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-
- oper_cfg.vpss_regs_base1 = devm_ioremap_resource(&pdev->dev,
- res);
+ oper_cfg.vpss_regs_base1 = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(oper_cfg.vpss_regs_base1))
return PTR_ERR(oper_cfg.vpss_regs_base1);
}
diff --git a/drivers/media/platform/exynos-gsc/gsc-core.c b/drivers/media/platform/exynos-gsc/gsc-core.c
index f49f3322f835..cfd6ae70b8d8 100644
--- a/drivers/media/platform/exynos-gsc/gsc-core.c
+++ b/drivers/media/platform/exynos-gsc/gsc-core.c
@@ -1137,8 +1137,7 @@ static int gsc_probe(struct platform_device *pdev)
spin_lock_init(&gsc->slock);
mutex_init(&gsc->lock);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- gsc->regs = devm_ioremap_resource(dev, res);
+ gsc->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(gsc->regs))
return PTR_ERR(gsc->regs);
diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
index fa648721eaab..544b54e428c9 100644
--- a/drivers/media/platform/exynos4-is/media-dev.c
+++ b/drivers/media/platform/exynos4-is/media-dev.c
@@ -464,9 +464,9 @@ static int fimc_md_parse_one_endpoint(struct fimc_md *fmd,
return -EINVAL;
}
- asd = v4l2_async_notifier_add_fwnode_remote_subdev(
- &fmd->subdev_notifier, of_fwnode_handle(ep),
- struct v4l2_async_subdev);
+ asd = v4l2_async_nf_add_fwnode_remote(&fmd->subdev_notifier,
+ of_fwnode_handle(ep),
+ struct v4l2_async_subdev);
of_node_put(ep);
@@ -557,7 +557,7 @@ rpm_put:
cleanup:
of_node_put(ports);
- v4l2_async_notifier_cleanup(&fmd->subdev_notifier);
+ v4l2_async_nf_cleanup(&fmd->subdev_notifier);
pm_runtime_put(fmd->pmf);
return ret;
}
@@ -1481,7 +1481,7 @@ static int fimc_md_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, fmd);
- v4l2_async_notifier_init(&fmd->subdev_notifier);
+ v4l2_async_nf_init(&fmd->subdev_notifier);
ret = fimc_md_register_platform_entities(fmd, dev->of_node);
if (ret)
@@ -1509,8 +1509,8 @@ static int fimc_md_probe(struct platform_device *pdev)
fmd->subdev_notifier.ops = &subdev_notifier_ops;
fmd->num_sensors = 0;
- ret = v4l2_async_notifier_register(&fmd->v4l2_dev,
- &fmd->subdev_notifier);
+ ret = v4l2_async_nf_register(&fmd->v4l2_dev,
+ &fmd->subdev_notifier);
if (ret)
goto err_clk_p;
}
@@ -1522,7 +1522,7 @@ err_clk_p:
err_attr:
device_remove_file(&pdev->dev, &dev_attr_subdev_conf_mode);
err_cleanup:
- v4l2_async_notifier_cleanup(&fmd->subdev_notifier);
+ v4l2_async_nf_cleanup(&fmd->subdev_notifier);
err_m_ent:
fimc_md_unregister_entities(fmd);
err_clk:
@@ -1542,8 +1542,8 @@ static int fimc_md_remove(struct platform_device *pdev)
return 0;
fimc_md_unregister_clk_provider(fmd);
- v4l2_async_notifier_unregister(&fmd->subdev_notifier);
- v4l2_async_notifier_cleanup(&fmd->subdev_notifier);
+ v4l2_async_nf_unregister(&fmd->subdev_notifier);
+ v4l2_async_nf_cleanup(&fmd->subdev_notifier);
v4l2_device_unregister(&fmd->v4l2_dev);
device_remove_file(&pdev->dev, &dev_attr_subdev_conf_mode);
diff --git a/drivers/media/platform/exynos4-is/mipi-csis.c b/drivers/media/platform/exynos4-is/mipi-csis.c
index 32b23329b033..27a214936cb0 100644
--- a/drivers/media/platform/exynos4-is/mipi-csis.c
+++ b/drivers/media/platform/exynos4-is/mipi-csis.c
@@ -766,7 +766,6 @@ static int s5pcsis_probe(struct platform_device *pdev)
const struct of_device_id *of_id;
const struct csis_drvdata *drv_data;
struct device *dev = &pdev->dev;
- struct resource *mem_res;
struct csis_state *state;
int ret = -ENOMEM;
int i;
@@ -800,8 +799,7 @@ static int s5pcsis_probe(struct platform_device *pdev)
if (IS_ERR(state->phy))
return PTR_ERR(state->phy);
- mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- state->regs = devm_ioremap_resource(dev, mem_res);
+ state->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(state->regs))
return PTR_ERR(state->regs);
diff --git a/drivers/media/platform/imx-jpeg/mxc-jpeg.c b/drivers/media/platform/imx-jpeg/mxc-jpeg.c
index 755138063ee6..4ca96cf9def7 100644
--- a/drivers/media/platform/imx-jpeg/mxc-jpeg.c
+++ b/drivers/media/platform/imx-jpeg/mxc-jpeg.c
@@ -49,6 +49,7 @@
#include <linux/slab.h>
#include <linux/irqreturn.h>
#include <linux/interrupt.h>
+#include <linux/pm_runtime.h>
#include <linux/pm_domain.h>
#include <linux/string.h>
@@ -282,6 +283,20 @@ static const unsigned char jpeg_sos_maximal[] = {
0x11, 0x04, 0x11, 0x00, 0x3F, 0x00
};
+static const unsigned char jpeg_image_red[] = {
+ 0xFC, 0x5F, 0xA2, 0xBF, 0xCA, 0x73, 0xFE, 0xFE,
+ 0x02, 0x8A, 0x00, 0x28, 0xA0, 0x02, 0x8A, 0x00,
+ 0x28, 0xA0, 0x02, 0x8A, 0x00, 0x28, 0xA0, 0x02,
+ 0x8A, 0x00, 0x28, 0xA0, 0x02, 0x8A, 0x00, 0x28,
+ 0xA0, 0x02, 0x8A, 0x00, 0x28, 0xA0, 0x02, 0x8A,
+ 0x00, 0x28, 0xA0, 0x02, 0x8A, 0x00, 0x28, 0xA0,
+ 0x02, 0x8A, 0x00, 0x28, 0xA0, 0x02, 0x8A, 0x00,
+ 0x28, 0xA0, 0x02, 0x8A, 0x00, 0x28, 0xA0, 0x02,
+ 0x8A, 0x00, 0x28, 0xA0, 0x02, 0x8A, 0x00, 0x28,
+ 0xA0, 0x02, 0x8A, 0x00, 0x28, 0xA0, 0x02, 0x8A,
+ 0x00, 0x28, 0xA0, 0x02, 0x8A, 0x00
+};
+
static const unsigned char jpeg_eoi[] = {
0xFF, 0xD9
};
@@ -575,6 +590,10 @@ static irqreturn_t mxc_jpeg_dec_irq(int irq, void *priv)
dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
+ if (!dst_buf || !src_buf) {
+ dev_err(dev, "No source or destination buffer.\n");
+ goto job_unlock;
+ }
jpeg_src_buf = vb2_to_mxc_buf(&src_buf->vb2_buf);
if (dec_ret & SLOT_STATUS_ENC_CONFIG_ERR) {
@@ -760,6 +779,9 @@ static unsigned int mxc_jpeg_setup_cfg_stream(void *cfg_stream_vaddr,
sos = (struct mxc_jpeg_sos *)(cfg + offset);
offset += mxc_jpeg_fixup_sos(sos, fourcc);
+ memcpy(cfg + offset, jpeg_image_red, sizeof(jpeg_image_red));
+ offset += sizeof(jpeg_image_red);
+
memcpy(cfg + offset, jpeg_eoi, sizeof(jpeg_eoi));
offset += sizeof(jpeg_eoi);
@@ -795,6 +817,7 @@ static void mxc_jpeg_config_dec_desc(struct vb2_buffer *out_buf,
img_fmt = mxc_jpeg_fourcc_to_imgfmt(q_data_cap->fmt->fourcc);
desc->stm_ctrl &= ~STM_CTRL_IMAGE_FORMAT(0xF); /* clear image format */
desc->stm_ctrl |= STM_CTRL_IMAGE_FORMAT(img_fmt);
+ desc->stm_ctrl |= STM_CTRL_BITBUF_PTR_CLR(1);
desc->line_pitch = q_data_cap->bytesperline[0];
mxc_jpeg_addrs(desc, dst_buf, src_buf, 0);
mxc_jpeg_set_bufsize(desc, ALIGN(vb2_plane_size(src_buf, 0), 1024));
@@ -821,6 +844,7 @@ static void mxc_jpeg_config_dec_desc(struct vb2_buffer *out_buf,
cfg_desc->imgsize |= MXC_JPEG_MIN_HEIGHT;
cfg_desc->line_pitch = MXC_JPEG_MIN_WIDTH * 2;
cfg_desc->stm_ctrl = STM_CTRL_IMAGE_FORMAT(MXC_JPEG_YUV422);
+ cfg_desc->stm_ctrl |= STM_CTRL_BITBUF_PTR_CLR(1);
cfg_desc->stm_bufbase = cfg_stream_handle;
cfg_desc->stm_bufsize = ALIGN(*cfg_size, 1024);
print_descriptor_info(jpeg->dev, cfg_desc);
@@ -864,6 +888,7 @@ static void mxc_jpeg_config_enc_desc(struct vb2_buffer *out_buf,
cfg_desc->stm_bufsize = 0x0;
cfg_desc->imgsize = 0;
cfg_desc->stm_ctrl = STM_CTRL_CONFIG_MOD(1);
+ cfg_desc->stm_ctrl |= STM_CTRL_BITBUF_PTR_CLR(1);
desc->next_descpt_ptr = 0; /* end of chain */
@@ -878,6 +903,7 @@ static void mxc_jpeg_config_enc_desc(struct vb2_buffer *out_buf,
dev_err(jpeg->dev, "No valid image format detected\n");
desc->stm_ctrl = STM_CTRL_CONFIG_MOD(0) |
STM_CTRL_IMAGE_FORMAT(img_fmt);
+ desc->stm_ctrl |= STM_CTRL_BITBUF_PTR_CLR(1);
mxc_jpeg_addrs(desc, src_buf, dst_buf, 0);
dev_dbg(jpeg->dev, "cfg_desc:\n");
print_descriptor_info(jpeg->dev, cfg_desc);
@@ -933,11 +959,6 @@ static void mxc_jpeg_device_run(void *priv)
return;
}
- /*
- * TODO: this reset should be removed, once we figure out
- * how to overcome hardware issues both on encoder and decoder
- */
- mxc_jpeg_sw_reset(reg);
mxc_jpeg_enable(reg);
mxc_jpeg_set_l_endian(reg, 1);
@@ -1058,10 +1079,17 @@ static int mxc_jpeg_start_streaming(struct vb2_queue *q, unsigned int count)
{
struct mxc_jpeg_ctx *ctx = vb2_get_drv_priv(q);
struct mxc_jpeg_q_data *q_data = mxc_jpeg_get_q_data(ctx, q->type);
+ int ret;
dev_dbg(ctx->mxc_jpeg->dev, "Start streaming ctx=%p", ctx);
q_data->sequence = 0;
+ ret = pm_runtime_resume_and_get(ctx->mxc_jpeg->dev);
+ if (ret < 0) {
+ dev_err(ctx->mxc_jpeg->dev, "Failed to power up jpeg\n");
+ return ret;
+ }
+
return 0;
}
@@ -1079,9 +1107,10 @@ static void mxc_jpeg_stop_streaming(struct vb2_queue *q)
else
vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
if (!vbuf)
- return;
+ break;
v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
}
+ pm_runtime_put_sync(&ctx->mxc_jpeg->pdev->dev);
}
static int mxc_jpeg_valid_comp_id(struct device *dev,
@@ -1941,8 +1970,7 @@ static int mxc_jpeg_attach_pm_domains(struct mxc_jpeg_dev *jpeg)
jpeg->pd_link[i] = device_link_add(dev, jpeg->pd_dev[i],
DL_FLAG_STATELESS |
- DL_FLAG_PM_RUNTIME |
- DL_FLAG_RPM_ACTIVE);
+ DL_FLAG_PM_RUNTIME);
if (!jpeg->pd_link[i]) {
ret = -EINVAL;
goto fail;
@@ -1959,7 +1987,6 @@ static int mxc_jpeg_probe(struct platform_device *pdev)
{
struct mxc_jpeg_dev *jpeg;
struct device *dev = &pdev->dev;
- struct resource *res;
int dec_irq;
int ret;
int mode;
@@ -1982,8 +2009,7 @@ static int mxc_jpeg_probe(struct platform_device *pdev)
goto err_irq;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- jpeg->base_reg = devm_ioremap_resource(&pdev->dev, res);
+ jpeg->base_reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(jpeg->base_reg))
return PTR_ERR(jpeg->base_reg);
@@ -2007,6 +2033,19 @@ static int mxc_jpeg_probe(struct platform_device *pdev)
jpeg->dev = dev;
jpeg->mode = mode;
+ /* Get clocks */
+ jpeg->clk_ipg = devm_clk_get(dev, "ipg");
+ if (IS_ERR(jpeg->clk_ipg)) {
+ dev_err(dev, "failed to get clock: ipg\n");
+ goto err_clk;
+ }
+
+ jpeg->clk_per = devm_clk_get(dev, "per");
+ if (IS_ERR(jpeg->clk_per)) {
+ dev_err(dev, "failed to get clock: per\n");
+ goto err_clk;
+ }
+
ret = mxc_jpeg_attach_pm_domains(jpeg);
if (ret < 0) {
dev_err(dev, "failed to attach power domains %d\n", ret);
@@ -2075,6 +2114,7 @@ static int mxc_jpeg_probe(struct platform_device *pdev)
jpeg->dec_vdev->minor);
platform_set_drvdata(pdev, jpeg);
+ pm_runtime_enable(dev);
return 0;
@@ -2088,10 +2128,55 @@ err_m2m:
v4l2_device_unregister(&jpeg->v4l2_dev);
err_register:
+ mxc_jpeg_detach_pm_domains(jpeg);
+
err_irq:
+err_clk:
return ret;
}
+#ifdef CONFIG_PM
+static int mxc_jpeg_runtime_resume(struct device *dev)
+{
+ struct mxc_jpeg_dev *jpeg = dev_get_drvdata(dev);
+ int ret;
+
+ ret = clk_prepare_enable(jpeg->clk_ipg);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable clock: ipg\n");
+ goto err_ipg;
+ }
+
+ ret = clk_prepare_enable(jpeg->clk_per);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable clock: per\n");
+ goto err_per;
+ }
+
+ return 0;
+
+err_per:
+ clk_disable_unprepare(jpeg->clk_ipg);
+err_ipg:
+ return ret;
+}
+
+static int mxc_jpeg_runtime_suspend(struct device *dev)
+{
+ struct mxc_jpeg_dev *jpeg = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(jpeg->clk_ipg);
+ clk_disable_unprepare(jpeg->clk_per);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops mxc_jpeg_pm_ops = {
+ SET_RUNTIME_PM_OPS(mxc_jpeg_runtime_suspend,
+ mxc_jpeg_runtime_resume, NULL)
+};
+
static int mxc_jpeg_remove(struct platform_device *pdev)
{
unsigned int slot;
@@ -2100,6 +2185,7 @@ static int mxc_jpeg_remove(struct platform_device *pdev)
for (slot = 0; slot < MXC_MAX_SLOTS; slot++)
mxc_jpeg_free_slot_data(jpeg, slot);
+ pm_runtime_disable(&pdev->dev);
video_unregister_device(jpeg->dec_vdev);
v4l2_m2m_release(jpeg->m2m_dev);
v4l2_device_unregister(&jpeg->v4l2_dev);
@@ -2116,6 +2202,7 @@ static struct platform_driver mxc_jpeg_driver = {
.driver = {
.name = "mxc-jpeg",
.of_match_table = mxc_jpeg_match,
+ .pm = &mxc_jpeg_pm_ops,
},
};
module_platform_driver(mxc_jpeg_driver);
diff --git a/drivers/media/platform/imx-jpeg/mxc-jpeg.h b/drivers/media/platform/imx-jpeg/mxc-jpeg.h
index 4c210852e876..9fb2a5aaa941 100644
--- a/drivers/media/platform/imx-jpeg/mxc-jpeg.h
+++ b/drivers/media/platform/imx-jpeg/mxc-jpeg.h
@@ -109,6 +109,8 @@ struct mxc_jpeg_dev {
spinlock_t hw_lock; /* hardware access lock */
unsigned int mode;
struct mutex lock; /* v4l2 ioctls serialization */
+ struct clk *clk_ipg;
+ struct clk *clk_per;
struct platform_device *pdev;
struct device *dev;
void __iomem *base_reg;
diff --git a/drivers/media/platform/imx-pxp.c b/drivers/media/platform/imx-pxp.c
index 4321edc0c23d..723b096fedd1 100644
--- a/drivers/media/platform/imx-pxp.c
+++ b/drivers/media/platform/imx-pxp.c
@@ -1636,7 +1636,6 @@ static int pxp_soft_reset(struct pxp_dev *dev)
static int pxp_probe(struct platform_device *pdev)
{
struct pxp_dev *dev;
- struct resource *res;
struct video_device *vfd;
int irq;
int ret;
@@ -1652,8 +1651,7 @@ static int pxp_probe(struct platform_device *pdev)
return ret;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dev->mmio = devm_ioremap_resource(&pdev->dev, res);
+ dev->mmio = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dev->mmio))
return PTR_ERR(dev->mmio);
diff --git a/drivers/media/platform/marvell-ccic/cafe-driver.c b/drivers/media/platform/marvell-ccic/cafe-driver.c
index 9aa374fa8b36..b61b9d9551af 100644
--- a/drivers/media/platform/marvell-ccic/cafe-driver.c
+++ b/drivers/media/platform/marvell-ccic/cafe-driver.c
@@ -544,12 +544,11 @@ static int cafe_pci_probe(struct pci_dev *pdev,
if (ret)
goto out_pdown;
- v4l2_async_notifier_init(&mcam->notifier);
+ v4l2_async_nf_init(&mcam->notifier);
- asd = v4l2_async_notifier_add_i2c_subdev(&mcam->notifier,
- i2c_adapter_id(cam->i2c_adapter),
- ov7670_info.addr,
- struct v4l2_async_subdev);
+ asd = v4l2_async_nf_add_i2c(&mcam->notifier,
+ i2c_adapter_id(cam->i2c_adapter),
+ ov7670_info.addr, struct v4l2_async_subdev);
if (IS_ERR(asd)) {
ret = PTR_ERR(asd);
goto out_smbus_shutdown;
diff --git a/drivers/media/platform/marvell-ccic/mcam-core.c b/drivers/media/platform/marvell-ccic/mcam-core.c
index 58f9463f3b8c..ad4a7922d0d7 100644
--- a/drivers/media/platform/marvell-ccic/mcam-core.c
+++ b/drivers/media/platform/marvell-ccic/mcam-core.c
@@ -1877,7 +1877,7 @@ int mccic_register(struct mcam_camera *cam)
cam->mbus_code = mcam_def_mbus_code;
cam->notifier.ops = &mccic_notify_ops;
- ret = v4l2_async_notifier_register(&cam->v4l2_dev, &cam->notifier);
+ ret = v4l2_async_nf_register(&cam->v4l2_dev, &cam->notifier);
if (ret < 0) {
cam_warn(cam, "failed to register a sensor notifier");
goto out;
@@ -1914,9 +1914,9 @@ int mccic_register(struct mcam_camera *cam)
return 0;
out:
- v4l2_async_notifier_unregister(&cam->notifier);
+ v4l2_async_nf_unregister(&cam->notifier);
v4l2_device_unregister(&cam->v4l2_dev);
- v4l2_async_notifier_cleanup(&cam->notifier);
+ v4l2_async_nf_cleanup(&cam->notifier);
return ret;
}
EXPORT_SYMBOL_GPL(mccic_register);
@@ -1936,9 +1936,9 @@ void mccic_shutdown(struct mcam_camera *cam)
if (cam->buffer_mode == B_vmalloc)
mcam_free_dma_bufs(cam);
v4l2_ctrl_handler_free(&cam->ctrl_handler);
- v4l2_async_notifier_unregister(&cam->notifier);
+ v4l2_async_nf_unregister(&cam->notifier);
v4l2_device_unregister(&cam->v4l2_dev);
- v4l2_async_notifier_cleanup(&cam->notifier);
+ v4l2_async_nf_cleanup(&cam->notifier);
}
EXPORT_SYMBOL_GPL(mccic_shutdown);
diff --git a/drivers/media/platform/marvell-ccic/mmp-driver.c b/drivers/media/platform/marvell-ccic/mmp-driver.c
index f2f09cea751d..343ab4f7d807 100644
--- a/drivers/media/platform/marvell-ccic/mmp-driver.c
+++ b/drivers/media/platform/marvell-ccic/mmp-driver.c
@@ -239,10 +239,10 @@ static int mmpcam_probe(struct platform_device *pdev)
if (!ep)
return -ENODEV;
- v4l2_async_notifier_init(&mcam->notifier);
+ v4l2_async_nf_init(&mcam->notifier);
- asd = v4l2_async_notifier_add_fwnode_remote_subdev(&mcam->notifier, ep,
- struct v4l2_async_subdev);
+ asd = v4l2_async_nf_add_fwnode_remote(&mcam->notifier, ep,
+ struct v4l2_async_subdev);
fwnode_handle_put(ep);
if (IS_ERR(asd)) {
ret = PTR_ERR(asd);
diff --git a/drivers/media/platform/meson/ge2d/ge2d.c b/drivers/media/platform/meson/ge2d/ge2d.c
index a1393fefa8ae..ccda18e5a377 100644
--- a/drivers/media/platform/meson/ge2d/ge2d.c
+++ b/drivers/media/platform/meson/ge2d/ge2d.c
@@ -779,11 +779,7 @@ static int ge2d_s_ctrl(struct v4l2_ctrl *ctrl)
* If the rotation parameter changes the OUTPUT frames
* parameters, take them in account
*/
- if (fmt.width != ctx->out.pix_fmt.width ||
- fmt.height != ctx->out.pix_fmt.width ||
- fmt.bytesperline > ctx->out.pix_fmt.bytesperline ||
- fmt.sizeimage > ctx->out.pix_fmt.sizeimage)
- ctx->out.pix_fmt = fmt;
+ ctx->out.pix_fmt = fmt;
break;
}
@@ -926,7 +922,6 @@ static int ge2d_probe(struct platform_device *pdev)
struct reset_control *rst;
struct video_device *vfd;
struct meson_ge2d *ge2d;
- struct resource *res;
void __iomem *regs;
int ret = 0;
int irq;
@@ -941,8 +936,7 @@ static int ge2d_probe(struct platform_device *pdev)
ge2d->dev = &pdev->dev;
mutex_init(&ge2d->mutex);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- regs = devm_ioremap_resource(ge2d->dev, res);
+ regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs))
return PTR_ERR(regs);
diff --git a/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c b/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c
index a89c7b206eef..af994b9913a6 100644
--- a/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c
+++ b/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c
@@ -1341,7 +1341,6 @@ static inline void mtk_jpeg_clk_release(struct mtk_jpeg_dev *jpeg)
static int mtk_jpeg_probe(struct platform_device *pdev)
{
struct mtk_jpeg_dev *jpeg;
- struct resource *res;
int jpeg_irq;
int ret;
@@ -1355,8 +1354,7 @@ static int mtk_jpeg_probe(struct platform_device *pdev)
jpeg->variant = of_device_get_match_data(jpeg->dev);
INIT_DELAYED_WORK(&jpeg->job_timeout_work, mtk_jpeg_job_timeout_work);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- jpeg->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ jpeg->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(jpeg->reg_base)) {
ret = PTR_ERR(jpeg->reg_base);
return ret;
diff --git a/drivers/media/platform/mtk-vcodec/Makefile b/drivers/media/platform/mtk-vcodec/Makefile
index 4618d43dbbc8..ca8e9e7a9c4e 100644
--- a/drivers/media/platform/mtk-vcodec/Makefile
+++ b/drivers/media/platform/mtk-vcodec/Makefile
@@ -7,10 +7,13 @@ obj-$(CONFIG_VIDEO_MEDIATEK_VCODEC) += mtk-vcodec-dec.o \
mtk-vcodec-dec-y := vdec/vdec_h264_if.o \
vdec/vdec_vp8_if.o \
vdec/vdec_vp9_if.o \
+ vdec/vdec_h264_req_if.o \
mtk_vcodec_dec_drv.o \
vdec_drv_if.o \
vdec_vpu_if.o \
mtk_vcodec_dec.o \
+ mtk_vcodec_dec_stateful.o \
+ mtk_vcodec_dec_stateless.o \
mtk_vcodec_dec_pm.o \
mtk-vcodec-enc-y := venc/venc_vp8_if.o \
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c
index 56d86e59421e..2b334a8a81c6 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.c
@@ -16,68 +16,18 @@
#include "vdec_drv_if.h"
#include "mtk_vcodec_dec_pm.h"
-#define OUT_FMT_IDX 0
-#define CAP_FMT_IDX 3
-
-#define MTK_VDEC_MIN_W 64U
-#define MTK_VDEC_MIN_H 64U
#define DFT_CFG_WIDTH MTK_VDEC_MIN_W
#define DFT_CFG_HEIGHT MTK_VDEC_MIN_H
-static const struct mtk_video_fmt mtk_video_formats[] = {
- {
- .fourcc = V4L2_PIX_FMT_H264,
- .type = MTK_FMT_DEC,
- .num_planes = 1,
- .flags = V4L2_FMT_FLAG_DYN_RESOLUTION,
- },
- {
- .fourcc = V4L2_PIX_FMT_VP8,
- .type = MTK_FMT_DEC,
- .num_planes = 1,
- .flags = V4L2_FMT_FLAG_DYN_RESOLUTION,
- },
- {
- .fourcc = V4L2_PIX_FMT_VP9,
- .type = MTK_FMT_DEC,
- .num_planes = 1,
- .flags = V4L2_FMT_FLAG_DYN_RESOLUTION,
- },
- {
- .fourcc = V4L2_PIX_FMT_MT21C,
- .type = MTK_FMT_FRAME,
- .num_planes = 2,
- },
-};
-
-static const struct mtk_codec_framesizes mtk_vdec_framesizes[] = {
- {
- .fourcc = V4L2_PIX_FMT_H264,
- .stepwise = { MTK_VDEC_MIN_W, MTK_VDEC_MAX_W, 16,
- MTK_VDEC_MIN_H, MTK_VDEC_MAX_H, 16 },
- },
- {
- .fourcc = V4L2_PIX_FMT_VP8,
- .stepwise = { MTK_VDEC_MIN_W, MTK_VDEC_MAX_W, 16,
- MTK_VDEC_MIN_H, MTK_VDEC_MAX_H, 16 },
- },
- {
- .fourcc = V4L2_PIX_FMT_VP9,
- .stepwise = { MTK_VDEC_MIN_W, MTK_VDEC_MAX_W, 16,
- MTK_VDEC_MIN_H, MTK_VDEC_MAX_H, 16 },
- },
-};
-
-#define NUM_SUPPORTED_FRAMESIZE ARRAY_SIZE(mtk_vdec_framesizes)
-#define NUM_FORMATS ARRAY_SIZE(mtk_video_formats)
-
-static const struct mtk_video_fmt *mtk_vdec_find_format(struct v4l2_format *f)
+static const struct mtk_video_fmt *
+mtk_vdec_find_format(struct v4l2_format *f,
+ const struct mtk_vcodec_dec_pdata *dec_pdata)
{
const struct mtk_video_fmt *fmt;
unsigned int k;
- for (k = 0; k < NUM_FORMATS; k++) {
- fmt = &mtk_video_formats[k];
+ for (k = 0; k < dec_pdata->num_formats; k++) {
+ fmt = &dec_pdata->vdec_formats[k];
if (fmt->fourcc == f->fmt.pix_mp.pixelformat)
return fmt;
}
@@ -94,408 +44,17 @@ static struct mtk_q_data *mtk_vdec_get_q_data(struct mtk_vcodec_ctx *ctx,
return &ctx->q_data[MTK_Q_DATA_DST];
}
-/*
- * This function tries to clean all display buffers, the buffers will return
- * in display order.
- * Note the buffers returned from codec driver may still be in driver's
- * reference list.
- */
-static struct vb2_buffer *get_display_buffer(struct mtk_vcodec_ctx *ctx)
-{
- struct vdec_fb *disp_frame_buffer = NULL;
- struct mtk_video_dec_buf *dstbuf;
- struct vb2_v4l2_buffer *vb;
-
- mtk_v4l2_debug(3, "[%d]", ctx->id);
- if (vdec_if_get_param(ctx,
- GET_PARAM_DISP_FRAME_BUFFER,
- &disp_frame_buffer)) {
- mtk_v4l2_err("[%d]Cannot get param : GET_PARAM_DISP_FRAME_BUFFER",
- ctx->id);
- return NULL;
- }
-
- if (disp_frame_buffer == NULL) {
- mtk_v4l2_debug(3, "No display frame buffer");
- return NULL;
- }
-
- dstbuf = container_of(disp_frame_buffer, struct mtk_video_dec_buf,
- frame_buffer);
- vb = &dstbuf->m2m_buf.vb;
- mutex_lock(&ctx->lock);
- if (dstbuf->used) {
- vb2_set_plane_payload(&vb->vb2_buf, 0,
- ctx->picinfo.fb_sz[0]);
- if (ctx->q_data[MTK_Q_DATA_DST].fmt->num_planes == 2)
- vb2_set_plane_payload(&vb->vb2_buf, 1,
- ctx->picinfo.fb_sz[1]);
-
- mtk_v4l2_debug(2,
- "[%d]status=%x queue id=%d to done_list %d",
- ctx->id, disp_frame_buffer->status,
- vb->vb2_buf.index,
- dstbuf->queued_in_vb2);
-
- v4l2_m2m_buf_done(vb, VB2_BUF_STATE_DONE);
- ctx->decoded_frame_cnt++;
- }
- mutex_unlock(&ctx->lock);
- return &vb->vb2_buf;
-}
-
-/*
- * This function tries to clean all capture buffers that are not used as
- * reference buffers by codec driver any more
- * In this case, we need re-queue buffer to vb2 buffer if user space
- * already returns this buffer to v4l2 or this buffer is just the output of
- * previous sps/pps/resolution change decode, or do nothing if user
- * space still owns this buffer
- */
-static struct vb2_buffer *get_free_buffer(struct mtk_vcodec_ctx *ctx)
-{
- struct mtk_video_dec_buf *dstbuf;
- struct vdec_fb *free_frame_buffer = NULL;
- struct vb2_v4l2_buffer *vb;
-
- if (vdec_if_get_param(ctx,
- GET_PARAM_FREE_FRAME_BUFFER,
- &free_frame_buffer)) {
- mtk_v4l2_err("[%d] Error!! Cannot get param", ctx->id);
- return NULL;
- }
- if (free_frame_buffer == NULL) {
- mtk_v4l2_debug(3, " No free frame buffer");
- return NULL;
- }
-
- mtk_v4l2_debug(3, "[%d] tmp_frame_addr = 0x%p",
- ctx->id, free_frame_buffer);
-
- dstbuf = container_of(free_frame_buffer, struct mtk_video_dec_buf,
- frame_buffer);
- vb = &dstbuf->m2m_buf.vb;
-
- mutex_lock(&ctx->lock);
- if (dstbuf->used) {
- if ((dstbuf->queued_in_vb2) &&
- (dstbuf->queued_in_v4l2) &&
- (free_frame_buffer->status == FB_ST_FREE)) {
- /*
- * After decode sps/pps or non-display buffer, we don't
- * need to return capture buffer to user space, but
- * just re-queue this capture buffer to vb2 queue.
- * This reduce overheads that dq/q unused capture
- * buffer. In this case, queued_in_vb2 = true.
- */
- mtk_v4l2_debug(2,
- "[%d]status=%x queue id=%d to rdy_queue %d",
- ctx->id, free_frame_buffer->status,
- vb->vb2_buf.index,
- dstbuf->queued_in_vb2);
- v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
- } else if (!dstbuf->queued_in_vb2 && dstbuf->queued_in_v4l2) {
- /*
- * If buffer in v4l2 driver but not in vb2 queue yet,
- * and we get this buffer from free_list, it means
- * that codec driver do not use this buffer as
- * reference buffer anymore. We should q buffer to vb2
- * queue, so later work thread could get this buffer
- * for decode. In this case, queued_in_vb2 = false
- * means this buffer is not from previous decode
- * output.
- */
- mtk_v4l2_debug(2,
- "[%d]status=%x queue id=%d to rdy_queue",
- ctx->id, free_frame_buffer->status,
- vb->vb2_buf.index);
- v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
- dstbuf->queued_in_vb2 = true;
- } else {
- /*
- * Codec driver do not need to reference this capture
- * buffer and this buffer is not in v4l2 driver.
- * Then we don't need to do any thing, just add log when
- * we need to debug buffer flow.
- * When this buffer q from user space, it could
- * directly q to vb2 buffer
- */
- mtk_v4l2_debug(3, "[%d]status=%x err queue id=%d %d %d",
- ctx->id, free_frame_buffer->status,
- vb->vb2_buf.index,
- dstbuf->queued_in_vb2,
- dstbuf->queued_in_v4l2);
- }
- dstbuf->used = false;
- }
- mutex_unlock(&ctx->lock);
- return &vb->vb2_buf;
-}
-
-static void clean_display_buffer(struct mtk_vcodec_ctx *ctx)
-{
- struct vb2_buffer *framptr;
-
- do {
- framptr = get_display_buffer(ctx);
- } while (framptr);
-}
-
-static void clean_free_buffer(struct mtk_vcodec_ctx *ctx)
-{
- struct vb2_buffer *framptr;
-
- do {
- framptr = get_free_buffer(ctx);
- } while (framptr);
-}
-
-static void mtk_vdec_queue_res_chg_event(struct mtk_vcodec_ctx *ctx)
-{
- static const struct v4l2_event ev_src_ch = {
- .type = V4L2_EVENT_SOURCE_CHANGE,
- .u.src_change.changes =
- V4L2_EVENT_SRC_CH_RESOLUTION,
- };
-
- mtk_v4l2_debug(1, "[%d]", ctx->id);
- v4l2_event_queue_fh(&ctx->fh, &ev_src_ch);
-}
-
-static void mtk_vdec_flush_decoder(struct mtk_vcodec_ctx *ctx)
-{
- bool res_chg;
- int ret = 0;
-
- ret = vdec_if_decode(ctx, NULL, NULL, &res_chg);
- if (ret)
- mtk_v4l2_err("DecodeFinal failed, ret=%d", ret);
-
- clean_display_buffer(ctx);
- clean_free_buffer(ctx);
-}
-
-static void mtk_vdec_update_fmt(struct mtk_vcodec_ctx *ctx,
- unsigned int pixelformat)
-{
- const struct mtk_video_fmt *fmt;
- struct mtk_q_data *dst_q_data;
- unsigned int k;
-
- dst_q_data = &ctx->q_data[MTK_Q_DATA_DST];
- for (k = 0; k < NUM_FORMATS; k++) {
- fmt = &mtk_video_formats[k];
- if (fmt->fourcc == pixelformat) {
- mtk_v4l2_debug(1, "Update cap fourcc(%d -> %d)",
- dst_q_data->fmt->fourcc, pixelformat);
- dst_q_data->fmt = fmt;
- return;
- }
- }
-
- mtk_v4l2_err("Cannot get fourcc(%d), using init value", pixelformat);
-}
-
-static int mtk_vdec_pic_info_update(struct mtk_vcodec_ctx *ctx)
-{
- unsigned int dpbsize = 0;
- int ret;
-
- if (vdec_if_get_param(ctx,
- GET_PARAM_PIC_INFO,
- &ctx->last_decoded_picinfo)) {
- mtk_v4l2_err("[%d]Error!! Cannot get param : GET_PARAM_PICTURE_INFO ERR",
- ctx->id);
- return -EINVAL;
- }
-
- if (ctx->last_decoded_picinfo.pic_w == 0 ||
- ctx->last_decoded_picinfo.pic_h == 0 ||
- ctx->last_decoded_picinfo.buf_w == 0 ||
- ctx->last_decoded_picinfo.buf_h == 0) {
- mtk_v4l2_err("Cannot get correct pic info");
- return -EINVAL;
- }
-
- if (ctx->last_decoded_picinfo.cap_fourcc != ctx->picinfo.cap_fourcc &&
- ctx->picinfo.cap_fourcc != 0)
- mtk_vdec_update_fmt(ctx, ctx->picinfo.cap_fourcc);
-
- if ((ctx->last_decoded_picinfo.pic_w == ctx->picinfo.pic_w) ||
- (ctx->last_decoded_picinfo.pic_h == ctx->picinfo.pic_h))
- return 0;
-
- mtk_v4l2_debug(1,
- "[%d]-> new(%d,%d), old(%d,%d), real(%d,%d)",
- ctx->id, ctx->last_decoded_picinfo.pic_w,
- ctx->last_decoded_picinfo.pic_h,
- ctx->picinfo.pic_w, ctx->picinfo.pic_h,
- ctx->last_decoded_picinfo.buf_w,
- ctx->last_decoded_picinfo.buf_h);
-
- ret = vdec_if_get_param(ctx, GET_PARAM_DPB_SIZE, &dpbsize);
- if (dpbsize == 0)
- mtk_v4l2_err("Incorrect dpb size, ret=%d", ret);
-
- ctx->dpb_size = dpbsize;
-
- return ret;
-}
-
-static void mtk_vdec_worker(struct work_struct *work)
-{
- struct mtk_vcodec_ctx *ctx = container_of(work, struct mtk_vcodec_ctx,
- decode_work);
- struct mtk_vcodec_dev *dev = ctx->dev;
- struct vb2_v4l2_buffer *src_buf, *dst_buf;
- struct mtk_vcodec_mem buf;
- struct vdec_fb *pfb;
- bool res_chg = false;
- int ret;
- struct mtk_video_dec_buf *dst_buf_info, *src_buf_info;
-
- src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
- if (src_buf == NULL) {
- v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
- mtk_v4l2_debug(1, "[%d] src_buf empty!!", ctx->id);
- return;
- }
-
- dst_buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
- if (dst_buf == NULL) {
- v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
- mtk_v4l2_debug(1, "[%d] dst_buf empty!!", ctx->id);
- return;
- }
-
- src_buf_info = container_of(src_buf, struct mtk_video_dec_buf,
- m2m_buf.vb);
- dst_buf_info = container_of(dst_buf, struct mtk_video_dec_buf,
- m2m_buf.vb);
-
- pfb = &dst_buf_info->frame_buffer;
- pfb->base_y.va = vb2_plane_vaddr(&dst_buf->vb2_buf, 0);
- pfb->base_y.dma_addr = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
- pfb->base_y.size = ctx->picinfo.fb_sz[0];
-
- pfb->base_c.va = vb2_plane_vaddr(&dst_buf->vb2_buf, 1);
- pfb->base_c.dma_addr = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 1);
- pfb->base_c.size = ctx->picinfo.fb_sz[1];
- pfb->status = 0;
- mtk_v4l2_debug(3, "===>[%d] vdec_if_decode() ===>", ctx->id);
-
- mtk_v4l2_debug(3,
- "id=%d Framebuf pfb=%p VA=%p Y_DMA=%pad C_DMA=%pad Size=%zx",
- dst_buf->vb2_buf.index, pfb,
- pfb->base_y.va, &pfb->base_y.dma_addr,
- &pfb->base_c.dma_addr, pfb->base_y.size);
-
- if (src_buf_info->lastframe) {
- mtk_v4l2_debug(1, "Got empty flush input buffer.");
- src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
-
- /* update dst buf status */
- dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
- mutex_lock(&ctx->lock);
- dst_buf_info->used = false;
- mutex_unlock(&ctx->lock);
-
- vdec_if_decode(ctx, NULL, NULL, &res_chg);
- clean_display_buffer(ctx);
- vb2_set_plane_payload(&dst_buf->vb2_buf, 0, 0);
- if (ctx->q_data[MTK_Q_DATA_DST].fmt->num_planes == 2)
- vb2_set_plane_payload(&dst_buf->vb2_buf, 1, 0);
- dst_buf->flags |= V4L2_BUF_FLAG_LAST;
- v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE);
- clean_free_buffer(ctx);
- v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
- return;
- }
- buf.va = vb2_plane_vaddr(&src_buf->vb2_buf, 0);
- buf.dma_addr = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
- buf.size = (size_t)src_buf->vb2_buf.planes[0].bytesused;
- if (!buf.va) {
- v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
- mtk_v4l2_err("[%d] id=%d src_addr is NULL!!",
- ctx->id, src_buf->vb2_buf.index);
- return;
- }
- mtk_v4l2_debug(3, "[%d] Bitstream VA=%p DMA=%pad Size=%zx vb=%p",
- ctx->id, buf.va, &buf.dma_addr, buf.size, src_buf);
- dst_buf->vb2_buf.timestamp = src_buf->vb2_buf.timestamp;
- dst_buf->timecode = src_buf->timecode;
- mutex_lock(&ctx->lock);
- dst_buf_info->used = true;
- mutex_unlock(&ctx->lock);
- src_buf_info->used = true;
-
- ret = vdec_if_decode(ctx, &buf, pfb, &res_chg);
-
- if (ret) {
- mtk_v4l2_err(
- " <===[%d], src_buf[%d] sz=0x%zx pts=%llu dst_buf[%d] vdec_if_decode() ret=%d res_chg=%d===>",
- ctx->id,
- src_buf->vb2_buf.index,
- buf.size,
- src_buf->vb2_buf.timestamp,
- dst_buf->vb2_buf.index,
- ret, res_chg);
- src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
- if (ret == -EIO) {
- mutex_lock(&ctx->lock);
- src_buf_info->error = true;
- mutex_unlock(&ctx->lock);
- }
- v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
- } else if (!res_chg) {
- /*
- * we only return src buffer with VB2_BUF_STATE_DONE
- * when decode success without resolution change
- */
- src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
- v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
- }
-
- dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
- clean_display_buffer(ctx);
- clean_free_buffer(ctx);
-
- if (!ret && res_chg) {
- mtk_vdec_pic_info_update(ctx);
- /*
- * On encountering a resolution change in the stream.
- * The driver must first process and decode all
- * remaining buffers from before the resolution change
- * point, so call flush decode here
- */
- mtk_vdec_flush_decoder(ctx);
- /*
- * After all buffers containing decoded frames from
- * before the resolution change point ready to be
- * dequeued on the CAPTURE queue, the driver sends a
- * V4L2_EVENT_SOURCE_CHANGE event for source change
- * type V4L2_EVENT_SRC_CH_RESOLUTION
- */
- mtk_vdec_queue_res_chg_event(ctx);
- }
- v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
-}
-
static int vidioc_try_decoder_cmd(struct file *file, void *priv,
struct v4l2_decoder_cmd *cmd)
{
- switch (cmd->cmd) {
- case V4L2_DEC_CMD_STOP:
- case V4L2_DEC_CMD_START:
- if (cmd->flags != 0) {
- mtk_v4l2_err("cmd->flags=%u", cmd->flags);
- return -EINVAL;
- }
- break;
- default:
- return -EINVAL;
- }
- return 0;
+ struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+
+ /* Use M2M stateless helper if relevant */
+ if (ctx->dev->vdec_pdata->uses_stateless_api)
+ return v4l2_m2m_ioctl_stateless_try_decoder_cmd(file, priv,
+ cmd);
+ else
+ return v4l2_m2m_ioctl_try_decoder_cmd(file, priv, cmd);
}
@@ -510,6 +69,10 @@ static int vidioc_decoder_cmd(struct file *file, void *priv,
if (ret)
return ret;
+ /* Use M2M stateless helper if relevant */
+ if (ctx->dev->vdec_pdata->uses_stateless_api)
+ return v4l2_m2m_ioctl_stateless_decoder_cmd(file, priv, cmd);
+
mtk_v4l2_debug(1, "decoder cmd=%u", cmd->cmd);
dst_vq = v4l2_m2m_get_vq(ctx->m2m_ctx,
V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
@@ -525,8 +88,7 @@ static int vidioc_decoder_cmd(struct file *file, void *priv,
mtk_v4l2_debug(1, "Capture stream is off. No need to flush.");
return 0;
}
- v4l2_m2m_buf_queue(ctx->m2m_ctx,
- &ctx->empty_flush_buf->m2m_buf.vb);
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, &ctx->empty_flush_buf.vb);
v4l2_m2m_try_schedule(ctx->m2m_ctx);
break;
@@ -561,10 +123,12 @@ void mtk_vcodec_dec_set_default_params(struct mtk_vcodec_ctx *ctx)
{
struct mtk_q_data *q_data;
+ ctx->dev->vdec_pdata->init_vdec_params(ctx);
+
ctx->m2m_ctx->q_lock = &ctx->dev->dev_mutex;
ctx->fh.m2m_ctx = ctx->m2m_ctx;
ctx->fh.ctrl_handler = &ctx->ctrl_hdl;
- INIT_WORK(&ctx->decode_work, mtk_vdec_worker);
+ INIT_WORK(&ctx->decode_work, ctx->dev->vdec_pdata->worker);
ctx->colorspace = V4L2_COLORSPACE_REC709;
ctx->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
ctx->quantization = V4L2_QUANTIZATION_DEFAULT;
@@ -574,7 +138,7 @@ void mtk_vcodec_dec_set_default_params(struct mtk_vcodec_ctx *ctx)
memset(q_data, 0, sizeof(struct mtk_q_data));
q_data->visible_width = DFT_CFG_WIDTH;
q_data->visible_height = DFT_CFG_HEIGHT;
- q_data->fmt = &mtk_video_formats[OUT_FMT_IDX];
+ q_data->fmt = ctx->dev->vdec_pdata->default_out_fmt;
q_data->field = V4L2_FIELD_NONE;
q_data->sizeimage[0] = DFT_CFG_WIDTH * DFT_CFG_HEIGHT;
@@ -586,7 +150,7 @@ void mtk_vcodec_dec_set_default_params(struct mtk_vcodec_ctx *ctx)
q_data->visible_height = DFT_CFG_HEIGHT;
q_data->coded_width = DFT_CFG_WIDTH;
q_data->coded_height = DFT_CFG_HEIGHT;
- q_data->fmt = &mtk_video_formats[CAP_FMT_IDX];
+ q_data->fmt = ctx->dev->vdec_pdata->default_cap_fmt;
q_data->field = V4L2_FIELD_NONE;
v4l_bound_align_image(&q_data->coded_width,
@@ -660,19 +224,17 @@ static int vidioc_try_fmt(struct v4l2_format *f,
pix_fmt_mp->field = V4L2_FIELD_NONE;
+ pix_fmt_mp->width =
+ clamp(pix_fmt_mp->width, MTK_VDEC_MIN_W, MTK_VDEC_MAX_W);
+ pix_fmt_mp->height =
+ clamp(pix_fmt_mp->height, MTK_VDEC_MIN_H, MTK_VDEC_MAX_H);
+
if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
pix_fmt_mp->num_planes = 1;
pix_fmt_mp->plane_fmt[0].bytesperline = 0;
} else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
int tmp_w, tmp_h;
- pix_fmt_mp->height = clamp(pix_fmt_mp->height,
- MTK_VDEC_MIN_H,
- MTK_VDEC_MAX_H);
- pix_fmt_mp->width = clamp(pix_fmt_mp->width,
- MTK_VDEC_MIN_W,
- MTK_VDEC_MAX_W);
-
/*
* Find next closer width align 64, heign align 64, size align
* 64 rectangle
@@ -722,11 +284,14 @@ static int vidioc_try_fmt_vid_cap_mplane(struct file *file, void *priv,
struct v4l2_format *f)
{
const struct mtk_video_fmt *fmt;
+ struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ const struct mtk_vcodec_dec_pdata *dec_pdata = ctx->dev->vdec_pdata;
- fmt = mtk_vdec_find_format(f);
+ fmt = mtk_vdec_find_format(f, dec_pdata);
if (!fmt) {
- f->fmt.pix.pixelformat = mtk_video_formats[CAP_FMT_IDX].fourcc;
- fmt = mtk_vdec_find_format(f);
+ f->fmt.pix.pixelformat =
+ ctx->q_data[MTK_Q_DATA_DST].fmt->fourcc;
+ fmt = mtk_vdec_find_format(f, dec_pdata);
}
return vidioc_try_fmt(f, fmt);
@@ -737,11 +302,14 @@ static int vidioc_try_fmt_vid_out_mplane(struct file *file, void *priv,
{
struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp;
const struct mtk_video_fmt *fmt;
+ struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ const struct mtk_vcodec_dec_pdata *dec_pdata = ctx->dev->vdec_pdata;
- fmt = mtk_vdec_find_format(f);
+ fmt = mtk_vdec_find_format(f, dec_pdata);
if (!fmt) {
- f->fmt.pix.pixelformat = mtk_video_formats[OUT_FMT_IDX].fourcc;
- fmt = mtk_vdec_find_format(f);
+ f->fmt.pix.pixelformat =
+ ctx->q_data[MTK_Q_DATA_SRC].fmt->fourcc;
+ fmt = mtk_vdec_find_format(f, dec_pdata);
}
if (pix_fmt_mp->plane_fmt[0].sizeimage == 0) {
@@ -831,6 +399,7 @@ static int vidioc_vdec_s_fmt(struct file *file, void *priv,
struct mtk_q_data *q_data;
int ret = 0;
const struct mtk_video_fmt *fmt;
+ const struct mtk_vcodec_dec_pdata *dec_pdata = ctx->dev->vdec_pdata;
mtk_v4l2_debug(3, "[%d]", ctx->id);
@@ -843,7 +412,8 @@ static int vidioc_vdec_s_fmt(struct file *file, void *priv,
* Setting OUTPUT format after OUTPUT buffers are allocated is invalid
* if using the stateful API.
*/
- if ((f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) &&
+ if (!dec_pdata->uses_stateless_api &&
+ f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE &&
vb2_is_busy(&ctx->m2m_ctx->out_q_ctx.q)) {
mtk_v4l2_err("out_q_ctx buffers already requested");
ret = -EBUSY;
@@ -859,16 +429,16 @@ static int vidioc_vdec_s_fmt(struct file *file, void *priv,
ret = -EBUSY;
}
- fmt = mtk_vdec_find_format(f);
+ fmt = mtk_vdec_find_format(f, dec_pdata);
if (fmt == NULL) {
if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
f->fmt.pix.pixelformat =
- mtk_video_formats[OUT_FMT_IDX].fourcc;
- fmt = mtk_vdec_find_format(f);
+ dec_pdata->default_out_fmt->fourcc;
+ fmt = mtk_vdec_find_format(f, dec_pdata);
} else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
f->fmt.pix.pixelformat =
- mtk_video_formats[CAP_FMT_IDX].fourcc;
- fmt = mtk_vdec_find_format(f);
+ dec_pdata->default_cap_fmt->fourcc;
+ fmt = mtk_vdec_find_format(f, dec_pdata);
}
}
if (fmt == NULL)
@@ -886,6 +456,7 @@ static int vidioc_vdec_s_fmt(struct file *file, void *priv,
ctx->quantization = pix_mp->quantization;
ctx->xfer_func = pix_mp->xfer_func;
+ ctx->current_codec = fmt->fourcc;
if (ctx->state == MTK_STATE_FREE) {
ret = vdec_if_init(ctx, q_data->fmt->fourcc);
if (ret) {
@@ -897,6 +468,48 @@ static int vidioc_vdec_s_fmt(struct file *file, void *priv,
}
}
+ /*
+ * If using the stateless API, S_FMT should have the effect of setting
+ * the CAPTURE queue resolution no matter which queue it was called on.
+ */
+ if (dec_pdata->uses_stateless_api) {
+ ctx->picinfo.pic_w = pix_mp->width;
+ ctx->picinfo.pic_h = pix_mp->height;
+
+ ret = vdec_if_get_param(ctx, GET_PARAM_PIC_INFO, &ctx->picinfo);
+ if (ret) {
+ mtk_v4l2_err("[%d]Error!! Get GET_PARAM_PICTURE_INFO Fail",
+ ctx->id);
+ return -EINVAL;
+ }
+
+ ctx->last_decoded_picinfo = ctx->picinfo;
+
+ if (ctx->q_data[MTK_Q_DATA_DST].fmt->num_planes == 1) {
+ ctx->q_data[MTK_Q_DATA_DST].sizeimage[0] =
+ ctx->picinfo.fb_sz[0] +
+ ctx->picinfo.fb_sz[1];
+ ctx->q_data[MTK_Q_DATA_DST].bytesperline[0] =
+ ctx->picinfo.buf_w;
+ } else {
+ ctx->q_data[MTK_Q_DATA_DST].sizeimage[0] =
+ ctx->picinfo.fb_sz[0];
+ ctx->q_data[MTK_Q_DATA_DST].bytesperline[0] =
+ ctx->picinfo.buf_w;
+ ctx->q_data[MTK_Q_DATA_DST].sizeimage[1] =
+ ctx->picinfo.fb_sz[1];
+ ctx->q_data[MTK_Q_DATA_DST].bytesperline[1] =
+ ctx->picinfo.buf_w;
+ }
+
+ ctx->q_data[MTK_Q_DATA_DST].coded_width = ctx->picinfo.buf_w;
+ ctx->q_data[MTK_Q_DATA_DST].coded_height = ctx->picinfo.buf_h;
+ mtk_v4l2_debug(2, "[%d] vdec_if_init() num_plane = %d wxh=%dx%d pic wxh=%dx%d sz[0]=0x%x sz[1]=0x%x",
+ ctx->id, pix_mp->num_planes, ctx->picinfo.buf_w, ctx->picinfo.buf_h,
+ ctx->picinfo.pic_w, ctx->picinfo.pic_h,
+ ctx->q_data[MTK_Q_DATA_DST].sizeimage[0],
+ ctx->q_data[MTK_Q_DATA_DST].sizeimage[1]);
+ }
return 0;
}
@@ -905,16 +518,17 @@ static int vidioc_enum_framesizes(struct file *file, void *priv,
{
int i = 0;
struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ const struct mtk_vcodec_dec_pdata *dec_pdata = ctx->dev->vdec_pdata;
if (fsize->index != 0)
return -EINVAL;
- for (i = 0; i < NUM_SUPPORTED_FRAMESIZE; ++i) {
- if (fsize->pixel_format != mtk_vdec_framesizes[i].fourcc)
+ for (i = 0; i < dec_pdata->num_framesizes; ++i) {
+ if (fsize->pixel_format != dec_pdata->vdec_framesizes[i].fourcc)
continue;
fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
- fsize->stepwise = mtk_vdec_framesizes[i].stepwise;
+ fsize->stepwise = dec_pdata->vdec_framesizes[i].stepwise;
if (!(ctx->dev->dec_capability &
VCODEC_CAPABILITY_4K_DISABLED)) {
mtk_v4l2_debug(3, "4K is enabled");
@@ -937,16 +551,20 @@ static int vidioc_enum_framesizes(struct file *file, void *priv,
return -EINVAL;
}
-static int vidioc_enum_fmt(struct v4l2_fmtdesc *f, bool output_queue)
+static int vidioc_enum_fmt(struct v4l2_fmtdesc *f, void *priv,
+ bool output_queue)
{
+ struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ const struct mtk_vcodec_dec_pdata *dec_pdata = ctx->dev->vdec_pdata;
const struct mtk_video_fmt *fmt;
int i, j = 0;
- for (i = 0; i < NUM_FORMATS; i++) {
- if (output_queue && (mtk_video_formats[i].type != MTK_FMT_DEC))
+ for (i = 0; i < dec_pdata->num_formats; i++) {
+ if (output_queue &&
+ dec_pdata->vdec_formats[i].type != MTK_FMT_DEC)
continue;
if (!output_queue &&
- (mtk_video_formats[i].type != MTK_FMT_FRAME))
+ dec_pdata->vdec_formats[i].type != MTK_FMT_FRAME)
continue;
if (j == f->index)
@@ -954,10 +572,10 @@ static int vidioc_enum_fmt(struct v4l2_fmtdesc *f, bool output_queue)
++j;
}
- if (i == NUM_FORMATS)
+ if (i == dec_pdata->num_formats)
return -EINVAL;
- fmt = &mtk_video_formats[i];
+ fmt = &dec_pdata->vdec_formats[i];
f->pixelformat = fmt->fourcc;
f->flags = fmt->flags;
@@ -967,13 +585,13 @@ static int vidioc_enum_fmt(struct v4l2_fmtdesc *f, bool output_queue)
static int vidioc_vdec_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- return vidioc_enum_fmt(f, false);
+ return vidioc_enum_fmt(f, priv, false);
}
static int vidioc_vdec_enum_fmt_vid_out(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
- return vidioc_enum_fmt(f, true);
+ return vidioc_enum_fmt(f, priv, true);
}
static int vidioc_vdec_g_fmt(struct file *file, void *priv,
@@ -1064,11 +682,9 @@ static int vidioc_vdec_g_fmt(struct file *file, void *priv,
return 0;
}
-static int vb2ops_vdec_queue_setup(struct vb2_queue *vq,
- unsigned int *nbuffers,
- unsigned int *nplanes,
- unsigned int sizes[],
- struct device *alloc_devs[])
+int vb2ops_vdec_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
+ unsigned int *nplanes, unsigned int sizes[],
+ struct device *alloc_devs[])
{
struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(vq);
struct mtk_q_data *q_data;
@@ -1088,7 +704,7 @@ static int vb2ops_vdec_queue_setup(struct vb2_queue *vq,
}
} else {
if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
- *nplanes = 2;
+ *nplanes = q_data->fmt->num_planes;
else
*nplanes = 1;
@@ -1104,7 +720,7 @@ static int vb2ops_vdec_queue_setup(struct vb2_queue *vq,
return 0;
}
-static int vb2ops_vdec_buf_prepare(struct vb2_buffer *vb)
+int vb2ops_vdec_buf_prepare(struct vb2_buffer *vb)
{
struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
struct mtk_q_data *q_data;
@@ -1126,128 +742,7 @@ static int vb2ops_vdec_buf_prepare(struct vb2_buffer *vb)
return 0;
}
-static void vb2ops_vdec_buf_queue(struct vb2_buffer *vb)
-{
- struct vb2_v4l2_buffer *src_buf;
- struct mtk_vcodec_mem src_mem;
- bool res_chg = false;
- int ret = 0;
- unsigned int dpbsize = 1, i = 0;
- struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
- struct vb2_v4l2_buffer *vb2_v4l2 = NULL;
- struct mtk_video_dec_buf *buf = NULL;
- struct mtk_q_data *dst_q_data;
-
- mtk_v4l2_debug(3, "[%d] (%d) id=%d, vb=%p",
- ctx->id, vb->vb2_queue->type,
- vb->index, vb);
- /*
- * check if this buffer is ready to be used after decode
- */
- if (vb->vb2_queue->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
- vb2_v4l2 = to_vb2_v4l2_buffer(vb);
- buf = container_of(vb2_v4l2, struct mtk_video_dec_buf,
- m2m_buf.vb);
- mutex_lock(&ctx->lock);
- if (!buf->used) {
- v4l2_m2m_buf_queue(ctx->m2m_ctx, vb2_v4l2);
- buf->queued_in_vb2 = true;
- buf->queued_in_v4l2 = true;
- } else {
- buf->queued_in_vb2 = false;
- buf->queued_in_v4l2 = true;
- }
- mutex_unlock(&ctx->lock);
- return;
- }
-
- v4l2_m2m_buf_queue(ctx->m2m_ctx, to_vb2_v4l2_buffer(vb));
-
- if (ctx->state != MTK_STATE_INIT) {
- mtk_v4l2_debug(3, "[%d] already init driver %d",
- ctx->id, ctx->state);
- return;
- }
-
- src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
- if (!src_buf) {
- mtk_v4l2_err("No src buffer");
- return;
- }
- buf = container_of(src_buf, struct mtk_video_dec_buf, m2m_buf.vb);
- if (buf->lastframe) {
- /* This shouldn't happen. Just in case. */
- mtk_v4l2_err("Invalid flush buffer.");
- v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
- return;
- }
-
- src_mem.va = vb2_plane_vaddr(&src_buf->vb2_buf, 0);
- src_mem.dma_addr = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
- src_mem.size = (size_t)src_buf->vb2_buf.planes[0].bytesused;
- mtk_v4l2_debug(2,
- "[%d] buf id=%d va=%p dma=%pad size=%zx",
- ctx->id, src_buf->vb2_buf.index,
- src_mem.va, &src_mem.dma_addr,
- src_mem.size);
-
- ret = vdec_if_decode(ctx, &src_mem, NULL, &res_chg);
- if (ret || !res_chg) {
- /*
- * fb == NULL means to parse SPS/PPS header or
- * resolution info in src_mem. Decode can fail
- * if there is no SPS header or picture info
- * in bs
- */
-
- src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
- if (ret == -EIO) {
- mtk_v4l2_err("[%d] Unrecoverable error in vdec_if_decode.",
- ctx->id);
- ctx->state = MTK_STATE_ABORT;
- v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
- } else {
- v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
- }
- mtk_v4l2_debug(ret ? 0 : 1,
- "[%d] vdec_if_decode() src_buf=%d, size=%zu, fail=%d, res_chg=%d",
- ctx->id, src_buf->vb2_buf.index,
- src_mem.size, ret, res_chg);
- return;
- }
-
- if (vdec_if_get_param(ctx, GET_PARAM_PIC_INFO, &ctx->picinfo)) {
- mtk_v4l2_err("[%d]Error!! Cannot get param : GET_PARAM_PICTURE_INFO ERR",
- ctx->id);
- return;
- }
-
- ctx->last_decoded_picinfo = ctx->picinfo;
- dst_q_data = &ctx->q_data[MTK_Q_DATA_DST];
- for (i = 0; i < dst_q_data->fmt->num_planes; i++) {
- dst_q_data->sizeimage[i] = ctx->picinfo.fb_sz[i];
- dst_q_data->bytesperline[i] = ctx->picinfo.buf_w;
- }
-
- mtk_v4l2_debug(2, "[%d] vdec_if_init() OK wxh=%dx%d pic wxh=%dx%d sz[0]=0x%x sz[1]=0x%x",
- ctx->id,
- ctx->picinfo.buf_w, ctx->picinfo.buf_h,
- ctx->picinfo.pic_w, ctx->picinfo.pic_h,
- dst_q_data->sizeimage[0],
- dst_q_data->sizeimage[1]);
-
- ret = vdec_if_get_param(ctx, GET_PARAM_DPB_SIZE, &dpbsize);
- if (dpbsize == 0)
- mtk_v4l2_err("[%d] GET_PARAM_DPB_SIZE fail=%d", ctx->id, ret);
-
- ctx->dpb_size = dpbsize;
- ctx->state = MTK_STATE_HEADER;
- mtk_v4l2_debug(1, "[%d] dpbsize=%d", ctx->id, ctx->dpb_size);
-
- mtk_vdec_queue_res_chg_event(ctx);
-}
-
-static void vb2ops_vdec_buf_finish(struct vb2_buffer *vb)
+void vb2ops_vdec_buf_finish(struct vb2_buffer *vb)
{
struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
struct vb2_v4l2_buffer *vb2_v4l2;
@@ -1270,7 +765,7 @@ static void vb2ops_vdec_buf_finish(struct vb2_buffer *vb)
}
}
-static int vb2ops_vdec_buf_init(struct vb2_buffer *vb)
+int vb2ops_vdec_buf_init(struct vb2_buffer *vb)
{
struct vb2_v4l2_buffer *vb2_v4l2 = container_of(vb,
struct vb2_v4l2_buffer, vb2_buf);
@@ -1280,14 +775,12 @@ static int vb2ops_vdec_buf_init(struct vb2_buffer *vb)
if (vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
buf->used = false;
buf->queued_in_v4l2 = false;
- } else {
- buf->lastframe = false;
}
return 0;
}
-static int vb2ops_vdec_start_streaming(struct vb2_queue *q, unsigned int count)
+int vb2ops_vdec_start_streaming(struct vb2_queue *q, unsigned int count)
{
struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(q);
@@ -1297,21 +790,25 @@ static int vb2ops_vdec_start_streaming(struct vb2_queue *q, unsigned int count)
return 0;
}
-static void vb2ops_vdec_stop_streaming(struct vb2_queue *q)
+void vb2ops_vdec_stop_streaming(struct vb2_queue *q)
{
struct vb2_v4l2_buffer *src_buf = NULL, *dst_buf = NULL;
struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(q);
+ int ret;
mtk_v4l2_debug(3, "[%d] (%d) state=(%x) ctx->decoded_frame_cnt=%d",
ctx->id, q->type, ctx->state, ctx->decoded_frame_cnt);
if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
while ((src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx))) {
- struct mtk_video_dec_buf *buf_info = container_of(
- src_buf, struct mtk_video_dec_buf, m2m_buf.vb);
- if (!buf_info->lastframe)
+ if (src_buf != &ctx->empty_flush_buf.vb) {
+ struct media_request *req =
+ src_buf->vb2_buf.req_obj.req;
v4l2_m2m_buf_done(src_buf,
VB2_BUF_STATE_ERROR);
+ if (req)
+ v4l2_ctrl_request_complete(req, &ctx->ctrl_hdl);
+ }
}
return;
}
@@ -1334,7 +831,9 @@ static void vb2ops_vdec_stop_streaming(struct vb2_queue *q)
ctx->last_decoded_picinfo.buf_w,
ctx->last_decoded_picinfo.buf_h);
- mtk_vdec_flush_decoder(ctx);
+ ret = ctx->dev->vdec_pdata->flush_decoder(ctx);
+ if (ret)
+ mtk_v4l2_err("DecodeFinal failed, ret=%d", ret);
}
ctx->state = MTK_STATE_FLUSH;
@@ -1381,75 +880,12 @@ static void m2mops_vdec_job_abort(void *priv)
ctx->state = MTK_STATE_ABORT;
}
-static int mtk_vdec_g_v_ctrl(struct v4l2_ctrl *ctrl)
-{
- struct mtk_vcodec_ctx *ctx = ctrl_to_ctx(ctrl);
- int ret = 0;
-
- switch (ctrl->id) {
- case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
- if (ctx->state >= MTK_STATE_HEADER) {
- ctrl->val = ctx->dpb_size;
- } else {
- mtk_v4l2_debug(0, "Seqinfo not ready");
- ctrl->val = 0;
- }
- break;
- default:
- ret = -EINVAL;
- }
- return ret;
-}
-
-static const struct v4l2_ctrl_ops mtk_vcodec_dec_ctrl_ops = {
- .g_volatile_ctrl = mtk_vdec_g_v_ctrl,
-};
-
-int mtk_vcodec_dec_ctrls_setup(struct mtk_vcodec_ctx *ctx)
-{
- struct v4l2_ctrl *ctrl;
-
- v4l2_ctrl_handler_init(&ctx->ctrl_hdl, 1);
-
- ctrl = v4l2_ctrl_new_std(&ctx->ctrl_hdl,
- &mtk_vcodec_dec_ctrl_ops,
- V4L2_CID_MIN_BUFFERS_FOR_CAPTURE,
- 0, 32, 1, 1);
- ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
- v4l2_ctrl_new_std_menu(&ctx->ctrl_hdl,
- &mtk_vcodec_dec_ctrl_ops,
- V4L2_CID_MPEG_VIDEO_VP9_PROFILE,
- V4L2_MPEG_VIDEO_VP9_PROFILE_0,
- 0, V4L2_MPEG_VIDEO_VP9_PROFILE_0);
-
- if (ctx->ctrl_hdl.error) {
- mtk_v4l2_err("Adding control failed %d",
- ctx->ctrl_hdl.error);
- return ctx->ctrl_hdl.error;
- }
-
- v4l2_ctrl_handler_setup(&ctx->ctrl_hdl);
- return 0;
-}
-
const struct v4l2_m2m_ops mtk_vdec_m2m_ops = {
.device_run = m2mops_vdec_device_run,
.job_ready = m2mops_vdec_job_ready,
.job_abort = m2mops_vdec_job_abort,
};
-static const struct vb2_ops mtk_vdec_vb2_ops = {
- .queue_setup = vb2ops_vdec_queue_setup,
- .buf_prepare = vb2ops_vdec_buf_prepare,
- .buf_queue = vb2ops_vdec_buf_queue,
- .wait_prepare = vb2_ops_wait_prepare,
- .wait_finish = vb2_ops_wait_finish,
- .buf_init = vb2ops_vdec_buf_init,
- .buf_finish = vb2ops_vdec_buf_finish,
- .start_streaming = vb2ops_vdec_start_streaming,
- .stop_streaming = vb2ops_vdec_stop_streaming,
-};
-
const struct v4l2_ioctl_ops mtk_vdec_ioctl_ops = {
.vidioc_streamon = v4l2_m2m_ioctl_streamon,
.vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
@@ -1496,7 +932,7 @@ int mtk_vcodec_dec_queue_init(void *priv, struct vb2_queue *src_vq,
src_vq->io_modes = VB2_DMABUF | VB2_MMAP;
src_vq->drv_priv = ctx;
src_vq->buf_struct_size = sizeof(struct mtk_video_dec_buf);
- src_vq->ops = &mtk_vdec_vb2_ops;
+ src_vq->ops = ctx->dev->vdec_pdata->vdec_vb2_ops;
src_vq->mem_ops = &vb2_dma_contig_memops;
src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
src_vq->lock = &ctx->dev->dev_mutex;
@@ -1511,7 +947,7 @@ int mtk_vcodec_dec_queue_init(void *priv, struct vb2_queue *src_vq,
dst_vq->io_modes = VB2_DMABUF | VB2_MMAP;
dst_vq->drv_priv = ctx;
dst_vq->buf_struct_size = sizeof(struct mtk_video_dec_buf);
- dst_vq->ops = &mtk_vdec_vb2_ops;
+ dst_vq->ops = ctx->dev->vdec_pdata->vdec_vb2_ops;
dst_vq->mem_ops = &vb2_dma_contig_memops;
dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
dst_vq->lock = &ctx->dev->dev_mutex;
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.h
index cf26b6c1486a..46783516b84a 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.h
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec.h
@@ -16,6 +16,8 @@
#define VCODEC_DEC_4K_CODED_HEIGHT 2304U
#define MTK_VDEC_MAX_W 2048U
#define MTK_VDEC_MAX_H 1088U
+#define MTK_VDEC_MIN_W 64U
+#define MTK_VDEC_MIN_H 64U
#define MTK_VDEC_IRQ_STATUS_DEC_SUCCESS 0x10000
@@ -40,9 +42,9 @@ struct vdec_fb {
* @queued_in_vb2: Capture buffer is queue in vb2
* @queued_in_v4l2: Capture buffer is in v4l2 driver, but not in vb2
* queue yet
- * @lastframe: Intput buffer is last buffer - EOS
* @error: An unrecoverable error occurs on this buffer.
* @frame_buffer: Decode status, and buffer information of Capture buffer
+ * @bs_buffer: Output buffer info
*
* Note : These status information help us track and debug buffer state
*/
@@ -52,13 +54,19 @@ struct mtk_video_dec_buf {
bool used;
bool queued_in_vb2;
bool queued_in_v4l2;
- bool lastframe;
bool error;
- struct vdec_fb frame_buffer;
+
+ union {
+ struct vdec_fb frame_buffer;
+ struct mtk_vcodec_mem bs_buffer;
+ };
};
extern const struct v4l2_ioctl_ops mtk_vdec_ioctl_ops;
extern const struct v4l2_m2m_ops mtk_vdec_m2m_ops;
+extern const struct media_device_ops mtk_vcodec_media_ops;
+extern const struct mtk_vcodec_dec_pdata mtk_vdec_8173_pdata;
+extern const struct mtk_vcodec_dec_pdata mtk_vdec_8183_pdata;
/*
@@ -73,7 +81,18 @@ int mtk_vcodec_dec_queue_init(void *priv, struct vb2_queue *src_vq,
struct vb2_queue *dst_vq);
void mtk_vcodec_dec_set_default_params(struct mtk_vcodec_ctx *ctx);
void mtk_vcodec_dec_release(struct mtk_vcodec_ctx *ctx);
-int mtk_vcodec_dec_ctrls_setup(struct mtk_vcodec_ctx *ctx);
+
+/*
+ * VB2 ops
+ */
+int vb2ops_vdec_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
+ unsigned int *nplanes, unsigned int sizes[],
+ struct device *alloc_devs[]);
+int vb2ops_vdec_buf_prepare(struct vb2_buffer *vb);
+void vb2ops_vdec_buf_finish(struct vb2_buffer *vb);
+int vb2ops_vdec_buf_init(struct vb2_buffer *vb);
+int vb2ops_vdec_start_streaming(struct vb2_queue *q, unsigned int count);
+void vb2ops_vdec_stop_streaming(struct vb2_queue *q);
#endif /* _MTK_VCODEC_DEC_H_ */
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c
index f87dc47d9e63..e6e6a8203eeb 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_drv.c
@@ -14,6 +14,7 @@
#include <media/v4l2-event.h>
#include <media/v4l2-mem2mem.h>
#include <media/videobuf2-dma-contig.h>
+#include <media/v4l2-device.h>
#include "mtk_vcodec_drv.h"
#include "mtk_vcodec_dec.h"
@@ -81,21 +82,14 @@ static int fops_vcodec_open(struct file *file)
{
struct mtk_vcodec_dev *dev = video_drvdata(file);
struct mtk_vcodec_ctx *ctx = NULL;
- struct mtk_video_dec_buf *mtk_buf = NULL;
int ret = 0;
struct vb2_queue *src_vq;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
- mtk_buf = kzalloc(sizeof(*mtk_buf), GFP_KERNEL);
- if (!mtk_buf) {
- kfree(ctx);
- return -ENOMEM;
- }
mutex_lock(&dev->dev_mutex);
- ctx->empty_flush_buf = mtk_buf;
ctx->id = dev->id_counter++;
v4l2_fh_init(&ctx->fh, video_devdata(file));
file->private_data = &ctx->fh;
@@ -106,7 +100,7 @@ static int fops_vcodec_open(struct file *file)
mutex_init(&ctx->lock);
ctx->type = MTK_INST_DECODER;
- ret = mtk_vcodec_dec_ctrls_setup(ctx);
+ ret = dev->vdec_pdata->ctrls_setup(ctx);
if (ret) {
mtk_v4l2_err("Failed to setup mt vcodec controls");
goto err_ctrls_setup;
@@ -121,8 +115,7 @@ static int fops_vcodec_open(struct file *file)
}
src_vq = v4l2_m2m_get_vq(ctx->m2m_ctx,
V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
- ctx->empty_flush_buf->m2m_buf.vb.vb2_buf.vb2_queue = src_vq;
- ctx->empty_flush_buf->lastframe = true;
+ ctx->empty_flush_buf.vb.vb2_buf.vb2_queue = src_vq;
mtk_vcodec_dec_set_default_params(ctx);
if (v4l2_fh_is_singular(&ctx->fh)) {
@@ -162,7 +155,6 @@ err_m2m_ctx_init:
err_ctrls_setup:
v4l2_fh_del(&ctx->fh);
v4l2_fh_exit(&ctx->fh);
- kfree(ctx->empty_flush_buf);
kfree(ctx);
mutex_unlock(&dev->dev_mutex);
@@ -193,7 +185,6 @@ static int fops_vcodec_release(struct file *file)
v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
list_del_init(&ctx->list);
- kfree(ctx->empty_flush_buf);
kfree(ctx);
mutex_unlock(&dev->dev_mutex);
return 0;
@@ -224,6 +215,7 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&dev->ctx_list);
dev->plat_dev = pdev;
+ dev->vdec_pdata = of_device_get_match_data(&pdev->dev);
if (!of_property_read_u32(pdev->dev.of_node, "mediatek,vpu",
&rproc_phandle)) {
fw_type = VPU;
@@ -325,18 +317,47 @@ static int mtk_vcodec_probe(struct platform_device *pdev)
goto err_event_workq;
}
+ if (dev->vdec_pdata->uses_stateless_api) {
+ dev->mdev_dec.dev = &pdev->dev;
+ strscpy(dev->mdev_dec.model, MTK_VCODEC_DEC_NAME,
+ sizeof(dev->mdev_dec.model));
+
+ media_device_init(&dev->mdev_dec);
+ dev->mdev_dec.ops = &mtk_vcodec_media_ops;
+ dev->v4l2_dev.mdev = &dev->mdev_dec;
+
+ ret = v4l2_m2m_register_media_controller(dev->m2m_dev_dec, dev->vfd_dec,
+ MEDIA_ENT_F_PROC_VIDEO_DECODER);
+ if (ret) {
+ mtk_v4l2_err("Failed to register media controller");
+ goto err_reg_cont;
+ }
+
+ ret = media_device_register(&dev->mdev_dec);
+ if (ret) {
+ mtk_v4l2_err("Failed to register media device");
+ goto err_media_reg;
+ }
+
+ mtk_v4l2_debug(0, "media registered as /dev/media%d", vfd_dec->minor);
+ }
ret = video_register_device(vfd_dec, VFL_TYPE_VIDEO, 0);
if (ret) {
mtk_v4l2_err("Failed to register video device");
goto err_dec_reg;
}
- mtk_v4l2_debug(0, "decoder registered as /dev/video%d",
- vfd_dec->num);
+ mtk_v4l2_debug(0, "decoder registered as /dev/video%d", vfd_dec->minor);
return 0;
err_dec_reg:
+ if (dev->vdec_pdata->uses_stateless_api)
+ media_device_unregister(&dev->mdev_dec);
+err_media_reg:
+ if (dev->vdec_pdata->uses_stateless_api)
+ v4l2_m2m_unregister_media_controller(dev->m2m_dev_dec);
+err_reg_cont:
destroy_workqueue(dev->decode_workqueue);
err_event_workq:
v4l2_m2m_release(dev->m2m_dev_dec);
@@ -352,7 +373,14 @@ err_dec_pm:
}
static const struct of_device_id mtk_vcodec_match[] = {
- {.compatible = "mediatek,mt8173-vcodec-dec",},
+ {
+ .compatible = "mediatek,mt8173-vcodec-dec",
+ .data = &mtk_vdec_8173_pdata,
+ },
+ {
+ .compatible = "mediatek,mt8183-vcodec-dec",
+ .data = &mtk_vdec_8183_pdata,
+ },
{},
};
@@ -364,6 +392,13 @@ static int mtk_vcodec_dec_remove(struct platform_device *pdev)
flush_workqueue(dev->decode_workqueue);
destroy_workqueue(dev->decode_workqueue);
+
+ if (media_devnode_is_registered(dev->mdev_dec.devnode)) {
+ media_device_unregister(&dev->mdev_dec);
+ v4l2_m2m_unregister_media_controller(dev->m2m_dev_dec);
+ media_device_cleanup(&dev->mdev_dec);
+ }
+
if (dev->m2m_dev_dec)
v4l2_m2m_release(dev->m2m_dev_dec);
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_stateful.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_stateful.c
new file mode 100644
index 000000000000..bef49244e61b
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_stateful.c
@@ -0,0 +1,628 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <media/v4l2-event.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "mtk_vcodec_drv.h"
+#include "mtk_vcodec_dec.h"
+#include "mtk_vcodec_intr.h"
+#include "mtk_vcodec_util.h"
+#include "mtk_vcodec_dec_pm.h"
+#include "vdec_drv_if.h"
+
+static const struct mtk_video_fmt mtk_video_formats[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_H264,
+ .type = MTK_FMT_DEC,
+ .num_planes = 1,
+ .flags = V4L2_FMT_FLAG_DYN_RESOLUTION,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_VP8,
+ .type = MTK_FMT_DEC,
+ .num_planes = 1,
+ .flags = V4L2_FMT_FLAG_DYN_RESOLUTION,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_VP9,
+ .type = MTK_FMT_DEC,
+ .num_planes = 1,
+ .flags = V4L2_FMT_FLAG_DYN_RESOLUTION,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_MT21C,
+ .type = MTK_FMT_FRAME,
+ .num_planes = 2,
+ },
+};
+
+#define NUM_FORMATS ARRAY_SIZE(mtk_video_formats)
+#define DEFAULT_OUT_FMT_IDX 0
+#define DEFAULT_CAP_FMT_IDX 3
+
+static const struct mtk_codec_framesizes mtk_vdec_framesizes[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_H264,
+ .stepwise = { MTK_VDEC_MIN_W, MTK_VDEC_MAX_W, 16,
+ MTK_VDEC_MIN_H, MTK_VDEC_MAX_H, 16 },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_VP8,
+ .stepwise = { MTK_VDEC_MIN_W, MTK_VDEC_MAX_W, 16,
+ MTK_VDEC_MIN_H, MTK_VDEC_MAX_H, 16 },
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_VP9,
+ .stepwise = { MTK_VDEC_MIN_W, MTK_VDEC_MAX_W, 16,
+ MTK_VDEC_MIN_H, MTK_VDEC_MAX_H, 16 },
+ },
+};
+
+#define NUM_SUPPORTED_FRAMESIZE ARRAY_SIZE(mtk_vdec_framesizes)
+
+/*
+ * This function tries to clean all display buffers, the buffers will return
+ * in display order.
+ * Note the buffers returned from codec driver may still be in driver's
+ * reference list.
+ */
+static struct vb2_buffer *get_display_buffer(struct mtk_vcodec_ctx *ctx)
+{
+ struct vdec_fb *disp_frame_buffer = NULL;
+ struct mtk_video_dec_buf *dstbuf;
+ struct vb2_v4l2_buffer *vb;
+
+ mtk_v4l2_debug(3, "[%d]", ctx->id);
+ if (vdec_if_get_param(ctx, GET_PARAM_DISP_FRAME_BUFFER,
+ &disp_frame_buffer)) {
+ mtk_v4l2_err("[%d]Cannot get param : GET_PARAM_DISP_FRAME_BUFFER", ctx->id);
+ return NULL;
+ }
+
+ if (!disp_frame_buffer) {
+ mtk_v4l2_debug(3, "No display frame buffer");
+ return NULL;
+ }
+
+ dstbuf = container_of(disp_frame_buffer, struct mtk_video_dec_buf,
+ frame_buffer);
+ vb = &dstbuf->m2m_buf.vb;
+ mutex_lock(&ctx->lock);
+ if (dstbuf->used) {
+ vb2_set_plane_payload(&vb->vb2_buf, 0, ctx->picinfo.fb_sz[0]);
+ if (ctx->q_data[MTK_Q_DATA_DST].fmt->num_planes == 2)
+ vb2_set_plane_payload(&vb->vb2_buf, 1,
+ ctx->picinfo.fb_sz[1]);
+
+ mtk_v4l2_debug(2, "[%d]status=%x queue id=%d to done_list %d",
+ ctx->id, disp_frame_buffer->status,
+ vb->vb2_buf.index, dstbuf->queued_in_vb2);
+
+ v4l2_m2m_buf_done(vb, VB2_BUF_STATE_DONE);
+ ctx->decoded_frame_cnt++;
+ }
+ mutex_unlock(&ctx->lock);
+ return &vb->vb2_buf;
+}
+
+/*
+ * This function tries to clean all capture buffers that are not used as
+ * reference buffers by codec driver any more
+ * In this case, we need re-queue buffer to vb2 buffer if user space
+ * already returns this buffer to v4l2 or this buffer is just the output of
+ * previous sps/pps/resolution change decode, or do nothing if user
+ * space still owns this buffer
+ */
+static struct vb2_buffer *get_free_buffer(struct mtk_vcodec_ctx *ctx)
+{
+ struct mtk_video_dec_buf *dstbuf;
+ struct vdec_fb *free_frame_buffer = NULL;
+ struct vb2_v4l2_buffer *vb;
+
+ if (vdec_if_get_param(ctx, GET_PARAM_FREE_FRAME_BUFFER,
+ &free_frame_buffer)) {
+ mtk_v4l2_err("[%d] Error!! Cannot get param", ctx->id);
+ return NULL;
+ }
+ if (!free_frame_buffer) {
+ mtk_v4l2_debug(3, " No free frame buffer");
+ return NULL;
+ }
+
+ mtk_v4l2_debug(3, "[%d] tmp_frame_addr = 0x%p", ctx->id,
+ free_frame_buffer);
+
+ dstbuf = container_of(free_frame_buffer, struct mtk_video_dec_buf,
+ frame_buffer);
+ vb = &dstbuf->m2m_buf.vb;
+
+ mutex_lock(&ctx->lock);
+ if (dstbuf->used) {
+ if (dstbuf->queued_in_vb2 && dstbuf->queued_in_v4l2 &&
+ free_frame_buffer->status == FB_ST_FREE) {
+ /*
+ * After decode sps/pps or non-display buffer, we don't
+ * need to return capture buffer to user space, but
+ * just re-queue this capture buffer to vb2 queue.
+ * This reduce overheads that dq/q unused capture
+ * buffer. In this case, queued_in_vb2 = true.
+ */
+ mtk_v4l2_debug(2, "[%d]status=%x queue id=%d to rdy_queue %d",
+ ctx->id, free_frame_buffer->status,
+ vb->vb2_buf.index, dstbuf->queued_in_vb2);
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
+ } else if (!dstbuf->queued_in_vb2 && dstbuf->queued_in_v4l2) {
+ /*
+ * If buffer in v4l2 driver but not in vb2 queue yet,
+ * and we get this buffer from free_list, it means
+ * that codec driver do not use this buffer as
+ * reference buffer anymore. We should q buffer to vb2
+ * queue, so later work thread could get this buffer
+ * for decode. In this case, queued_in_vb2 = false
+ * means this buffer is not from previous decode
+ * output.
+ */
+ mtk_v4l2_debug(2,
+ "[%d]status=%x queue id=%d to rdy_queue",
+ ctx->id, free_frame_buffer->status,
+ vb->vb2_buf.index);
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
+ dstbuf->queued_in_vb2 = true;
+ } else {
+ /*
+ * Codec driver do not need to reference this capture
+ * buffer and this buffer is not in v4l2 driver.
+ * Then we don't need to do any thing, just add log when
+ * we need to debug buffer flow.
+ * When this buffer q from user space, it could
+ * directly q to vb2 buffer
+ */
+ mtk_v4l2_debug(3, "[%d]status=%x err queue id=%d %d %d",
+ ctx->id, free_frame_buffer->status,
+ vb->vb2_buf.index, dstbuf->queued_in_vb2,
+ dstbuf->queued_in_v4l2);
+ }
+ dstbuf->used = false;
+ }
+ mutex_unlock(&ctx->lock);
+ return &vb->vb2_buf;
+}
+
+static void clean_display_buffer(struct mtk_vcodec_ctx *ctx)
+{
+ while (get_display_buffer(ctx))
+ ;
+}
+
+static void clean_free_buffer(struct mtk_vcodec_ctx *ctx)
+{
+ while (get_free_buffer(ctx))
+ ;
+}
+
+static void mtk_vdec_queue_res_chg_event(struct mtk_vcodec_ctx *ctx)
+{
+ static const struct v4l2_event ev_src_ch = {
+ .type = V4L2_EVENT_SOURCE_CHANGE,
+ .u.src_change.changes = V4L2_EVENT_SRC_CH_RESOLUTION,
+ };
+
+ mtk_v4l2_debug(1, "[%d]", ctx->id);
+ v4l2_event_queue_fh(&ctx->fh, &ev_src_ch);
+}
+
+static int mtk_vdec_flush_decoder(struct mtk_vcodec_ctx *ctx)
+{
+ bool res_chg;
+ int ret;
+
+ ret = vdec_if_decode(ctx, NULL, NULL, &res_chg);
+ if (ret)
+ mtk_v4l2_err("DecodeFinal failed, ret=%d", ret);
+
+ clean_display_buffer(ctx);
+ clean_free_buffer(ctx);
+
+ return 0;
+}
+
+static void mtk_vdec_update_fmt(struct mtk_vcodec_ctx *ctx,
+ unsigned int pixelformat)
+{
+ const struct mtk_video_fmt *fmt;
+ struct mtk_q_data *dst_q_data;
+ unsigned int k;
+
+ dst_q_data = &ctx->q_data[MTK_Q_DATA_DST];
+ for (k = 0; k < NUM_FORMATS; k++) {
+ fmt = &mtk_video_formats[k];
+ if (fmt->fourcc == pixelformat) {
+ mtk_v4l2_debug(1, "Update cap fourcc(%d -> %d)",
+ dst_q_data->fmt->fourcc, pixelformat);
+ dst_q_data->fmt = fmt;
+ return;
+ }
+ }
+
+ mtk_v4l2_err("Cannot get fourcc(%d), using init value", pixelformat);
+}
+
+static int mtk_vdec_pic_info_update(struct mtk_vcodec_ctx *ctx)
+{
+ unsigned int dpbsize = 0;
+ int ret;
+
+ if (vdec_if_get_param(ctx, GET_PARAM_PIC_INFO,
+ &ctx->last_decoded_picinfo)) {
+ mtk_v4l2_err("[%d]Error!! Cannot get param : GET_PARAM_PICTURE_INFO ERR", ctx->id);
+ return -EINVAL;
+ }
+
+ if (ctx->last_decoded_picinfo.pic_w == 0 ||
+ ctx->last_decoded_picinfo.pic_h == 0 ||
+ ctx->last_decoded_picinfo.buf_w == 0 ||
+ ctx->last_decoded_picinfo.buf_h == 0) {
+ mtk_v4l2_err("Cannot get correct pic info");
+ return -EINVAL;
+ }
+
+ if (ctx->last_decoded_picinfo.cap_fourcc != ctx->picinfo.cap_fourcc &&
+ ctx->picinfo.cap_fourcc != 0)
+ mtk_vdec_update_fmt(ctx, ctx->picinfo.cap_fourcc);
+
+ if (ctx->last_decoded_picinfo.pic_w == ctx->picinfo.pic_w ||
+ ctx->last_decoded_picinfo.pic_h == ctx->picinfo.pic_h)
+ return 0;
+
+ mtk_v4l2_debug(1, "[%d]-> new(%d,%d), old(%d,%d), real(%d,%d)", ctx->id,
+ ctx->last_decoded_picinfo.pic_w,
+ ctx->last_decoded_picinfo.pic_h, ctx->picinfo.pic_w,
+ ctx->picinfo.pic_h, ctx->last_decoded_picinfo.buf_w,
+ ctx->last_decoded_picinfo.buf_h);
+
+ ret = vdec_if_get_param(ctx, GET_PARAM_DPB_SIZE, &dpbsize);
+ if (dpbsize == 0)
+ mtk_v4l2_err("Incorrect dpb size, ret=%d", ret);
+
+ ctx->dpb_size = dpbsize;
+
+ return ret;
+}
+
+static void mtk_vdec_worker(struct work_struct *work)
+{
+ struct mtk_vcodec_ctx *ctx =
+ container_of(work, struct mtk_vcodec_ctx, decode_work);
+ struct mtk_vcodec_dev *dev = ctx->dev;
+ struct vb2_v4l2_buffer *src_buf, *dst_buf;
+ struct mtk_vcodec_mem buf;
+ struct vdec_fb *pfb;
+ bool res_chg = false;
+ int ret;
+ struct mtk_video_dec_buf *dst_buf_info, *src_buf_info;
+
+ src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
+ if (!src_buf) {
+ v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
+ mtk_v4l2_debug(1, "[%d] src_buf empty!!", ctx->id);
+ return;
+ }
+
+ dst_buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+ if (!dst_buf) {
+ v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
+ mtk_v4l2_debug(1, "[%d] dst_buf empty!!", ctx->id);
+ return;
+ }
+
+ dst_buf_info =
+ container_of(dst_buf, struct mtk_video_dec_buf, m2m_buf.vb);
+
+ pfb = &dst_buf_info->frame_buffer;
+ pfb->base_y.va = vb2_plane_vaddr(&dst_buf->vb2_buf, 0);
+ pfb->base_y.dma_addr =
+ vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
+ pfb->base_y.size = ctx->picinfo.fb_sz[0];
+
+ pfb->base_c.va = vb2_plane_vaddr(&dst_buf->vb2_buf, 1);
+ pfb->base_c.dma_addr =
+ vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 1);
+ pfb->base_c.size = ctx->picinfo.fb_sz[1];
+ pfb->status = 0;
+ mtk_v4l2_debug(3, "===>[%d] vdec_if_decode() ===>", ctx->id);
+
+ mtk_v4l2_debug(3,
+ "id=%d Framebuf pfb=%p VA=%p Y_DMA=%pad C_DMA=%pad Size=%zx",
+ dst_buf->vb2_buf.index, pfb, pfb->base_y.va,
+ &pfb->base_y.dma_addr, &pfb->base_c.dma_addr, pfb->base_y.size);
+
+ if (src_buf == &ctx->empty_flush_buf.vb) {
+ mtk_v4l2_debug(1, "Got empty flush input buffer.");
+ src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+
+ /* update dst buf status */
+ dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
+ mutex_lock(&ctx->lock);
+ dst_buf_info->used = false;
+ mutex_unlock(&ctx->lock);
+
+ vdec_if_decode(ctx, NULL, NULL, &res_chg);
+ clean_display_buffer(ctx);
+ vb2_set_plane_payload(&dst_buf->vb2_buf, 0, 0);
+ if (ctx->q_data[MTK_Q_DATA_DST].fmt->num_planes == 2)
+ vb2_set_plane_payload(&dst_buf->vb2_buf, 1, 0);
+ dst_buf->flags |= V4L2_BUF_FLAG_LAST;
+ v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE);
+ clean_free_buffer(ctx);
+ v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
+ return;
+ }
+
+ src_buf_info =
+ container_of(src_buf, struct mtk_video_dec_buf, m2m_buf.vb);
+
+ buf.va = vb2_plane_vaddr(&src_buf->vb2_buf, 0);
+ buf.dma_addr = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
+ buf.size = (size_t)src_buf->vb2_buf.planes[0].bytesused;
+ if (!buf.va) {
+ v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
+ mtk_v4l2_err("[%d] id=%d src_addr is NULL!!", ctx->id,
+ src_buf->vb2_buf.index);
+ return;
+ }
+ mtk_v4l2_debug(3, "[%d] Bitstream VA=%p DMA=%pad Size=%zx vb=%p",
+ ctx->id, buf.va, &buf.dma_addr, buf.size, src_buf);
+ dst_buf->vb2_buf.timestamp = src_buf->vb2_buf.timestamp;
+ dst_buf->timecode = src_buf->timecode;
+ mutex_lock(&ctx->lock);
+ dst_buf_info->used = true;
+ mutex_unlock(&ctx->lock);
+ src_buf_info->used = true;
+
+ ret = vdec_if_decode(ctx, &buf, pfb, &res_chg);
+
+ if (ret) {
+ mtk_v4l2_err(" <===[%d], src_buf[%d] sz=0x%zx pts=%llu dst_buf[%d] vdec_if_decode() ret=%d res_chg=%d===>",
+ ctx->id, src_buf->vb2_buf.index, buf.size,
+ src_buf->vb2_buf.timestamp, dst_buf->vb2_buf.index, ret, res_chg);
+ src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+ if (ret == -EIO) {
+ mutex_lock(&ctx->lock);
+ src_buf_info->error = true;
+ mutex_unlock(&ctx->lock);
+ }
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
+ } else if (!res_chg) {
+ /*
+ * we only return src buffer with VB2_BUF_STATE_DONE
+ * when decode success without resolution change
+ */
+ src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
+ }
+
+ dst_buf = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx);
+ clean_display_buffer(ctx);
+ clean_free_buffer(ctx);
+
+ if (!ret && res_chg) {
+ mtk_vdec_pic_info_update(ctx);
+ /*
+ * On encountering a resolution change in the stream.
+ * The driver must first process and decode all
+ * remaining buffers from before the resolution change
+ * point, so call flush decode here
+ */
+ mtk_vdec_flush_decoder(ctx);
+ /*
+ * After all buffers containing decoded frames from
+ * before the resolution change point ready to be
+ * dequeued on the CAPTURE queue, the driver sends a
+ * V4L2_EVENT_SOURCE_CHANGE event for source change
+ * type V4L2_EVENT_SRC_CH_RESOLUTION
+ */
+ mtk_vdec_queue_res_chg_event(ctx);
+ }
+ v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
+}
+
+static void vb2ops_vdec_stateful_buf_queue(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *src_buf;
+ struct mtk_vcodec_mem src_mem;
+ bool res_chg = false;
+ int ret;
+ unsigned int dpbsize = 1, i;
+ struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_v4l2_buffer *vb2_v4l2;
+ struct mtk_q_data *dst_q_data;
+
+ mtk_v4l2_debug(3, "[%d] (%d) id=%d, vb=%p", ctx->id,
+ vb->vb2_queue->type, vb->index, vb);
+ /*
+ * check if this buffer is ready to be used after decode
+ */
+ if (vb->vb2_queue->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
+ struct mtk_video_dec_buf *buf;
+
+ vb2_v4l2 = to_vb2_v4l2_buffer(vb);
+ buf = container_of(vb2_v4l2, struct mtk_video_dec_buf,
+ m2m_buf.vb);
+ mutex_lock(&ctx->lock);
+ if (!buf->used) {
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, vb2_v4l2);
+ buf->queued_in_vb2 = true;
+ buf->queued_in_v4l2 = true;
+ } else {
+ buf->queued_in_vb2 = false;
+ buf->queued_in_v4l2 = true;
+ }
+ mutex_unlock(&ctx->lock);
+ return;
+ }
+
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, to_vb2_v4l2_buffer(vb));
+
+ if (ctx->state != MTK_STATE_INIT) {
+ mtk_v4l2_debug(3, "[%d] already init driver %d", ctx->id,
+ ctx->state);
+ return;
+ }
+
+ src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
+ if (!src_buf) {
+ mtk_v4l2_err("No src buffer");
+ return;
+ }
+
+ if (src_buf == &ctx->empty_flush_buf.vb) {
+ /* This shouldn't happen. Just in case. */
+ mtk_v4l2_err("Invalid flush buffer.");
+ v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+ return;
+ }
+
+ src_mem.va = vb2_plane_vaddr(&src_buf->vb2_buf, 0);
+ src_mem.dma_addr = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
+ src_mem.size = (size_t)src_buf->vb2_buf.planes[0].bytesused;
+ mtk_v4l2_debug(2, "[%d] buf id=%d va=%p dma=%pad size=%zx", ctx->id,
+ src_buf->vb2_buf.index, src_mem.va, &src_mem.dma_addr,
+ src_mem.size);
+
+ ret = vdec_if_decode(ctx, &src_mem, NULL, &res_chg);
+ if (ret || !res_chg) {
+ /*
+ * fb == NULL means to parse SPS/PPS header or
+ * resolution info in src_mem. Decode can fail
+ * if there is no SPS header or picture info
+ * in bs
+ */
+
+ src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+ if (ret == -EIO) {
+ mtk_v4l2_err("[%d] Unrecoverable error in vdec_if_decode.", ctx->id);
+ ctx->state = MTK_STATE_ABORT;
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
+ } else {
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_DONE);
+ }
+ mtk_v4l2_debug(ret ? 0 : 1,
+ "[%d] vdec_if_decode() src_buf=%d, size=%zu, fail=%d, res_chg=%d",
+ ctx->id, src_buf->vb2_buf.index, src_mem.size, ret, res_chg);
+ return;
+ }
+
+ if (vdec_if_get_param(ctx, GET_PARAM_PIC_INFO, &ctx->picinfo)) {
+ mtk_v4l2_err("[%d]Error!! Cannot get param : GET_PARAM_PICTURE_INFO ERR", ctx->id);
+ return;
+ }
+
+ ctx->last_decoded_picinfo = ctx->picinfo;
+ dst_q_data = &ctx->q_data[MTK_Q_DATA_DST];
+ for (i = 0; i < dst_q_data->fmt->num_planes; i++) {
+ dst_q_data->sizeimage[i] = ctx->picinfo.fb_sz[i];
+ dst_q_data->bytesperline[i] = ctx->picinfo.buf_w;
+ }
+
+ mtk_v4l2_debug(2, "[%d] vdec_if_init() OK wxh=%dx%d pic wxh=%dx%d sz[0]=0x%x sz[1]=0x%x",
+ ctx->id, ctx->picinfo.buf_w, ctx->picinfo.buf_h, ctx->picinfo.pic_w,
+ ctx->picinfo.pic_h, dst_q_data->sizeimage[0], dst_q_data->sizeimage[1]);
+
+ ret = vdec_if_get_param(ctx, GET_PARAM_DPB_SIZE, &dpbsize);
+ if (dpbsize == 0)
+ mtk_v4l2_err("[%d] GET_PARAM_DPB_SIZE fail=%d", ctx->id, ret);
+
+ ctx->dpb_size = dpbsize;
+ ctx->state = MTK_STATE_HEADER;
+ mtk_v4l2_debug(1, "[%d] dpbsize=%d", ctx->id, ctx->dpb_size);
+
+ mtk_vdec_queue_res_chg_event(ctx);
+}
+
+static int mtk_vdec_g_v_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct mtk_vcodec_ctx *ctx = ctrl_to_ctx(ctrl);
+ int ret = 0;
+
+ switch (ctrl->id) {
+ case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
+ if (ctx->state >= MTK_STATE_HEADER) {
+ ctrl->val = ctx->dpb_size;
+ } else {
+ mtk_v4l2_debug(0, "Seqinfo not ready");
+ ctrl->val = 0;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+static const struct v4l2_ctrl_ops mtk_vcodec_dec_ctrl_ops = {
+ .g_volatile_ctrl = mtk_vdec_g_v_ctrl,
+};
+
+static int mtk_vcodec_dec_ctrls_setup(struct mtk_vcodec_ctx *ctx)
+{
+ struct v4l2_ctrl *ctrl;
+
+ v4l2_ctrl_handler_init(&ctx->ctrl_hdl, 1);
+
+ ctrl = v4l2_ctrl_new_std(&ctx->ctrl_hdl, &mtk_vcodec_dec_ctrl_ops,
+ V4L2_CID_MIN_BUFFERS_FOR_CAPTURE, 0, 32, 1, 1);
+ ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
+ v4l2_ctrl_new_std_menu(&ctx->ctrl_hdl, &mtk_vcodec_dec_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_VP9_PROFILE,
+ V4L2_MPEG_VIDEO_VP9_PROFILE_0, 0,
+ V4L2_MPEG_VIDEO_VP9_PROFILE_0);
+ /*
+ * H264. Baseline / Extended decoding is not supported.
+ */
+ v4l2_ctrl_new_std_menu(&ctx->ctrl_hdl, &mtk_vcodec_dec_ctrl_ops,
+ V4L2_CID_MPEG_VIDEO_H264_PROFILE, V4L2_MPEG_VIDEO_H264_PROFILE_HIGH,
+ BIT(V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE) |
+ BIT(V4L2_MPEG_VIDEO_H264_PROFILE_EXTENDED),
+ V4L2_MPEG_VIDEO_H264_PROFILE_MAIN);
+
+ if (ctx->ctrl_hdl.error) {
+ mtk_v4l2_err("Adding control failed %d", ctx->ctrl_hdl.error);
+ return ctx->ctrl_hdl.error;
+ }
+
+ v4l2_ctrl_handler_setup(&ctx->ctrl_hdl);
+ return 0;
+}
+
+static void mtk_init_vdec_params(struct mtk_vcodec_ctx *ctx)
+{
+}
+
+static struct vb2_ops mtk_vdec_frame_vb2_ops = {
+ .queue_setup = vb2ops_vdec_queue_setup,
+ .buf_prepare = vb2ops_vdec_buf_prepare,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .start_streaming = vb2ops_vdec_start_streaming,
+
+ .buf_queue = vb2ops_vdec_stateful_buf_queue,
+ .buf_init = vb2ops_vdec_buf_init,
+ .buf_finish = vb2ops_vdec_buf_finish,
+ .stop_streaming = vb2ops_vdec_stop_streaming,
+};
+
+const struct mtk_vcodec_dec_pdata mtk_vdec_8173_pdata = {
+ .chip = MTK_MT8173,
+ .init_vdec_params = mtk_init_vdec_params,
+ .ctrls_setup = mtk_vcodec_dec_ctrls_setup,
+ .vdec_vb2_ops = &mtk_vdec_frame_vb2_ops,
+ .vdec_formats = mtk_video_formats,
+ .num_formats = NUM_FORMATS,
+ .default_out_fmt = &mtk_video_formats[DEFAULT_OUT_FMT_IDX],
+ .default_cap_fmt = &mtk_video_formats[DEFAULT_CAP_FMT_IDX],
+ .vdec_framesizes = mtk_vdec_framesizes,
+ .num_framesizes = NUM_SUPPORTED_FRAMESIZE,
+ .worker = mtk_vdec_worker,
+ .flush_decoder = mtk_vdec_flush_decoder,
+};
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_stateless.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_stateless.c
new file mode 100644
index 000000000000..8f4a1f0a0769
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_dec_stateless.c
@@ -0,0 +1,360 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <media/videobuf2-v4l2.h>
+#include <media/videobuf2-dma-contig.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-mem2mem.h>
+#include <linux/module.h>
+
+#include "mtk_vcodec_drv.h"
+#include "mtk_vcodec_dec.h"
+#include "mtk_vcodec_intr.h"
+#include "mtk_vcodec_util.h"
+#include "mtk_vcodec_dec_pm.h"
+#include "vdec_drv_if.h"
+
+/**
+ * struct mtk_stateless_control - CID control type
+ * @cfg: control configuration
+ * @codec_type: codec type (V4L2 pixel format) for CID control type
+ */
+struct mtk_stateless_control {
+ struct v4l2_ctrl_config cfg;
+ int codec_type;
+};
+
+static const struct mtk_stateless_control mtk_stateless_controls[] = {
+ {
+ .cfg = {
+ .id = V4L2_CID_STATELESS_H264_SPS,
+ },
+ .codec_type = V4L2_PIX_FMT_H264_SLICE,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_STATELESS_H264_PPS,
+ },
+ .codec_type = V4L2_PIX_FMT_H264_SLICE,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_STATELESS_H264_SCALING_MATRIX,
+ },
+ .codec_type = V4L2_PIX_FMT_H264_SLICE,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_STATELESS_H264_DECODE_PARAMS,
+ },
+ .codec_type = V4L2_PIX_FMT_H264_SLICE,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_MPEG_VIDEO_H264_PROFILE,
+ .def = V4L2_MPEG_VIDEO_H264_PROFILE_MAIN,
+ .max = V4L2_MPEG_VIDEO_H264_PROFILE_HIGH,
+ .menu_skip_mask =
+ BIT(V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE) |
+ BIT(V4L2_MPEG_VIDEO_H264_PROFILE_EXTENDED),
+ },
+ .codec_type = V4L2_PIX_FMT_H264_SLICE,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_STATELESS_H264_DECODE_MODE,
+ .min = V4L2_STATELESS_H264_DECODE_MODE_FRAME_BASED,
+ .def = V4L2_STATELESS_H264_DECODE_MODE_FRAME_BASED,
+ .max = V4L2_STATELESS_H264_DECODE_MODE_FRAME_BASED,
+ },
+ .codec_type = V4L2_PIX_FMT_H264_SLICE,
+ },
+ {
+ .cfg = {
+ .id = V4L2_CID_STATELESS_H264_START_CODE,
+ .min = V4L2_STATELESS_H264_START_CODE_ANNEX_B,
+ .def = V4L2_STATELESS_H264_START_CODE_ANNEX_B,
+ .max = V4L2_STATELESS_H264_START_CODE_ANNEX_B,
+ },
+ .codec_type = V4L2_PIX_FMT_H264_SLICE,
+ }
+};
+
+#define NUM_CTRLS ARRAY_SIZE(mtk_stateless_controls)
+
+static const struct mtk_video_fmt mtk_video_formats[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_H264_SLICE,
+ .type = MTK_FMT_DEC,
+ .num_planes = 1,
+ },
+ {
+ .fourcc = V4L2_PIX_FMT_MM21,
+ .type = MTK_FMT_FRAME,
+ .num_planes = 2,
+ },
+};
+
+#define NUM_FORMATS ARRAY_SIZE(mtk_video_formats)
+#define DEFAULT_OUT_FMT_IDX 0
+#define DEFAULT_CAP_FMT_IDX 1
+
+static const struct mtk_codec_framesizes mtk_vdec_framesizes[] = {
+ {
+ .fourcc = V4L2_PIX_FMT_H264_SLICE,
+ .stepwise = { MTK_VDEC_MIN_W, MTK_VDEC_MAX_W, 16,
+ MTK_VDEC_MIN_H, MTK_VDEC_MAX_H, 16 },
+ },
+};
+
+#define NUM_SUPPORTED_FRAMESIZE ARRAY_SIZE(mtk_vdec_framesizes)
+
+static void mtk_vdec_stateless_set_dst_payload(struct mtk_vcodec_ctx *ctx,
+ struct vdec_fb *fb)
+{
+ struct mtk_video_dec_buf *vdec_frame_buf =
+ container_of(fb, struct mtk_video_dec_buf, frame_buffer);
+ struct vb2_v4l2_buffer *vb = &vdec_frame_buf->m2m_buf.vb;
+ unsigned int cap_y_size = ctx->q_data[MTK_Q_DATA_DST].sizeimage[0];
+
+ vb2_set_plane_payload(&vb->vb2_buf, 0, cap_y_size);
+ if (ctx->q_data[MTK_Q_DATA_DST].fmt->num_planes == 2) {
+ unsigned int cap_c_size =
+ ctx->q_data[MTK_Q_DATA_DST].sizeimage[1];
+
+ vb2_set_plane_payload(&vb->vb2_buf, 1, cap_c_size);
+ }
+}
+
+static struct vdec_fb *vdec_get_cap_buffer(struct mtk_vcodec_ctx *ctx,
+ struct vb2_v4l2_buffer *vb2_v4l2)
+{
+ struct mtk_video_dec_buf *framebuf =
+ container_of(vb2_v4l2, struct mtk_video_dec_buf, m2m_buf.vb);
+ struct vdec_fb *pfb = &framebuf->frame_buffer;
+ struct vb2_buffer *dst_buf = &vb2_v4l2->vb2_buf;
+
+ pfb = &framebuf->frame_buffer;
+ pfb->base_y.va = NULL;
+ pfb->base_y.dma_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
+ pfb->base_y.size = ctx->q_data[MTK_Q_DATA_DST].sizeimage[0];
+
+ if (ctx->q_data[MTK_Q_DATA_DST].fmt->num_planes == 2) {
+ pfb->base_c.va = NULL;
+ pfb->base_c.dma_addr =
+ vb2_dma_contig_plane_dma_addr(dst_buf, 1);
+ pfb->base_c.size = ctx->q_data[MTK_Q_DATA_DST].sizeimage[1];
+ }
+ mtk_v4l2_debug(1, "id=%d Framebuf pfb=%p VA=%p Y_DMA=%pad C_DMA=%pad Size=%zx frame_count = %d",
+ dst_buf->index, pfb, pfb->base_y.va, &pfb->base_y.dma_addr,
+ &pfb->base_c.dma_addr, pfb->base_y.size, ctx->decoded_frame_cnt);
+
+ return pfb;
+}
+
+static void vb2ops_vdec_buf_request_complete(struct vb2_buffer *vb)
+{
+ struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+
+ v4l2_ctrl_request_complete(vb->req_obj.req, &ctx->ctrl_hdl);
+}
+
+static void mtk_vdec_worker(struct work_struct *work)
+{
+ struct mtk_vcodec_ctx *ctx =
+ container_of(work, struct mtk_vcodec_ctx, decode_work);
+ struct mtk_vcodec_dev *dev = ctx->dev;
+ struct vb2_v4l2_buffer *vb2_v4l2_src, *vb2_v4l2_dst;
+ struct vb2_buffer *vb2_src;
+ struct mtk_vcodec_mem *bs_src;
+ struct mtk_video_dec_buf *dec_buf_src;
+ struct media_request *src_buf_req;
+ struct vdec_fb *dst_buf;
+ bool res_chg = false;
+ int ret;
+
+ vb2_v4l2_src = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
+ if (!vb2_v4l2_src) {
+ v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
+ mtk_v4l2_debug(1, "[%d] no available source buffer", ctx->id);
+ return;
+ }
+
+ vb2_v4l2_dst = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
+ if (!vb2_v4l2_dst) {
+ v4l2_m2m_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx);
+ mtk_v4l2_debug(1, "[%d] no available destination buffer", ctx->id);
+ return;
+ }
+
+ vb2_src = &vb2_v4l2_src->vb2_buf;
+ dec_buf_src = container_of(vb2_v4l2_src, struct mtk_video_dec_buf,
+ m2m_buf.vb);
+ bs_src = &dec_buf_src->bs_buffer;
+
+ mtk_v4l2_debug(3, "[%d] (%d) id=%d, vb=%p", ctx->id,
+ vb2_src->vb2_queue->type, vb2_src->index, vb2_src);
+
+ bs_src->va = NULL;
+ bs_src->dma_addr = vb2_dma_contig_plane_dma_addr(vb2_src, 0);
+ bs_src->size = (size_t)vb2_src->planes[0].bytesused;
+
+ mtk_v4l2_debug(3, "[%d] Bitstream VA=%p DMA=%pad Size=%zx vb=%p",
+ ctx->id, bs_src->va, &bs_src->dma_addr, bs_src->size, vb2_src);
+ /* Apply request controls. */
+ src_buf_req = vb2_src->req_obj.req;
+ if (src_buf_req)
+ v4l2_ctrl_request_setup(src_buf_req, &ctx->ctrl_hdl);
+ else
+ mtk_v4l2_err("vb2 buffer media request is NULL");
+
+ dst_buf = vdec_get_cap_buffer(ctx, vb2_v4l2_dst);
+ v4l2_m2m_buf_copy_metadata(vb2_v4l2_src, vb2_v4l2_dst, true);
+ ret = vdec_if_decode(ctx, bs_src, dst_buf, &res_chg);
+ if (ret) {
+ mtk_v4l2_err(" <===[%d], src_buf[%d] sz=0x%zx pts=%llu vdec_if_decode() ret=%d res_chg=%d===>",
+ ctx->id, vb2_src->index, bs_src->size,
+ vb2_src->timestamp, ret, res_chg);
+ if (ret == -EIO) {
+ mutex_lock(&ctx->lock);
+ dec_buf_src->error = true;
+ mutex_unlock(&ctx->lock);
+ }
+ }
+
+ mtk_vdec_stateless_set_dst_payload(ctx, dst_buf);
+
+ v4l2_m2m_buf_done_and_job_finish(dev->m2m_dev_dec, ctx->m2m_ctx,
+ ret ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
+
+ v4l2_ctrl_request_complete(src_buf_req, &ctx->ctrl_hdl);
+}
+
+static void vb2ops_vdec_stateless_buf_queue(struct vb2_buffer *vb)
+{
+ struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
+ struct vb2_v4l2_buffer *vb2_v4l2 = to_vb2_v4l2_buffer(vb);
+
+ mtk_v4l2_debug(3, "[%d] (%d) id=%d, vb=%p", ctx->id, vb->vb2_queue->type, vb->index, vb);
+
+ mutex_lock(&ctx->lock);
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, vb2_v4l2);
+ mutex_unlock(&ctx->lock);
+ if (vb->vb2_queue->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ return;
+
+ /* If an OUTPUT buffer, we may need to update the state */
+ if (ctx->state == MTK_STATE_INIT) {
+ ctx->state = MTK_STATE_HEADER;
+ mtk_v4l2_debug(1, "Init driver from init to header.");
+ } else {
+ mtk_v4l2_debug(3, "[%d] already init driver %d", ctx->id, ctx->state);
+ }
+}
+
+static int mtk_vdec_flush_decoder(struct mtk_vcodec_ctx *ctx)
+{
+ bool res_chg;
+
+ return vdec_if_decode(ctx, NULL, NULL, &res_chg);
+}
+
+static int mtk_vcodec_dec_ctrls_setup(struct mtk_vcodec_ctx *ctx)
+{
+ unsigned int i;
+
+ v4l2_ctrl_handler_init(&ctx->ctrl_hdl, NUM_CTRLS);
+ if (ctx->ctrl_hdl.error) {
+ mtk_v4l2_err("v4l2_ctrl_handler_init failed\n");
+ return ctx->ctrl_hdl.error;
+ }
+
+ for (i = 0; i < NUM_CTRLS; i++) {
+ struct v4l2_ctrl_config cfg = mtk_stateless_controls[i].cfg;
+
+ v4l2_ctrl_new_custom(&ctx->ctrl_hdl, &cfg, NULL);
+ if (ctx->ctrl_hdl.error) {
+ mtk_v4l2_err("Adding control %d failed %d", i, ctx->ctrl_hdl.error);
+ return ctx->ctrl_hdl.error;
+ }
+ }
+
+ v4l2_ctrl_handler_setup(&ctx->ctrl_hdl);
+
+ return 0;
+}
+
+static int fops_media_request_validate(struct media_request *mreq)
+{
+ const unsigned int buffer_cnt = vb2_request_buffer_cnt(mreq);
+
+ switch (buffer_cnt) {
+ case 1:
+ /* We expect exactly one buffer with the request */
+ break;
+ case 0:
+ mtk_v4l2_debug(1, "No buffer provided with the request");
+ return -ENOENT;
+ default:
+ mtk_v4l2_debug(1, "Too many buffers (%d) provided with the request",
+ buffer_cnt);
+ return -EINVAL;
+ }
+
+ return vb2_request_validate(mreq);
+}
+
+const struct media_device_ops mtk_vcodec_media_ops = {
+ .req_validate = fops_media_request_validate,
+ .req_queue = v4l2_m2m_request_queue,
+};
+
+static void mtk_init_vdec_params(struct mtk_vcodec_ctx *ctx)
+{
+ struct vb2_queue *src_vq;
+
+ src_vq = v4l2_m2m_get_vq(ctx->m2m_ctx,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+
+ /* Support request api for output plane */
+ src_vq->supports_requests = true;
+ src_vq->requires_requests = true;
+}
+
+static int vb2ops_vdec_out_buf_validate(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+
+ vbuf->field = V4L2_FIELD_NONE;
+ return 0;
+}
+
+static struct vb2_ops mtk_vdec_request_vb2_ops = {
+ .queue_setup = vb2ops_vdec_queue_setup,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .start_streaming = vb2ops_vdec_start_streaming,
+ .stop_streaming = vb2ops_vdec_stop_streaming,
+
+ .buf_queue = vb2ops_vdec_stateless_buf_queue,
+ .buf_out_validate = vb2ops_vdec_out_buf_validate,
+ .buf_init = vb2ops_vdec_buf_init,
+ .buf_prepare = vb2ops_vdec_buf_prepare,
+ .buf_finish = vb2ops_vdec_buf_finish,
+ .buf_request_complete = vb2ops_vdec_buf_request_complete,
+};
+
+const struct mtk_vcodec_dec_pdata mtk_vdec_8183_pdata = {
+ .chip = MTK_MT8183,
+ .init_vdec_params = mtk_init_vdec_params,
+ .ctrls_setup = mtk_vcodec_dec_ctrls_setup,
+ .vdec_vb2_ops = &mtk_vdec_request_vb2_ops,
+ .vdec_formats = mtk_video_formats,
+ .num_formats = NUM_FORMATS,
+ .default_out_fmt = &mtk_video_formats[DEFAULT_OUT_FMT_IDX],
+ .default_cap_fmt = &mtk_video_formats[DEFAULT_CAP_FMT_IDX],
+ .vdec_framesizes = mtk_vdec_framesizes,
+ .num_framesizes = NUM_SUPPORTED_FRAMESIZE,
+ .uses_stateless_api = true,
+ .worker = mtk_vdec_worker,
+ .flush_decoder = mtk_vdec_flush_decoder,
+};
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h b/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h
index c6c7672fecfb..581522177308 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_drv.h
@@ -13,6 +13,7 @@
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mem2mem.h>
#include <media/videobuf2-core.h>
#include "mtk_vcodec_util.h"
@@ -249,7 +250,10 @@ struct vdec_pic_info {
* @decode_work: worker for the decoding
* @encode_work: worker for the encoding
* @last_decoded_picinfo: pic information get from latest decode
- * @empty_flush_buf: a fake size-0 capture buffer that indicates flush
+ * @empty_flush_buf: a fake size-0 capture buffer that indicates flush. Only
+ * to be used with encoder and stateful decoder.
+ * @is_flushing: set to true if flushing is in progress.
+ * @current_codec: current set input codec, in V4L2 pixel format
*
* @colorspace: enum v4l2_colorspace; supplemental to pixelformat
* @ycbcr_enc: enum v4l2_ycbcr_encoding, Y'CbCr encoding
@@ -288,7 +292,10 @@ struct mtk_vcodec_ctx {
struct work_struct decode_work;
struct work_struct encode_work;
struct vdec_pic_info last_decoded_picinfo;
- struct mtk_video_dec_buf *empty_flush_buf;
+ struct v4l2_m2m_buffer empty_flush_buf;
+ bool is_flushing;
+
+ u32 current_codec;
enum v4l2_colorspace colorspace;
enum v4l2_ycbcr_encoding ycbcr_enc;
@@ -304,6 +311,50 @@ enum mtk_chip {
MTK_MT8173,
MTK_MT8183,
MTK_MT8192,
+ MTK_MT8195,
+};
+
+/**
+ * struct mtk_vcodec_dec_pdata - compatible data for each IC
+ * @init_vdec_params: init vdec params
+ * @ctrls_setup: init vcodec dec ctrls
+ * @worker: worker to start a decode job
+ * @flush_decoder: function that flushes the decoder
+ *
+ * @vdec_vb2_ops: struct vb2_ops
+ *
+ * @vdec_formats: supported video decoder formats
+ * @num_formats: count of video decoder formats
+ * @default_out_fmt: default output buffer format
+ * @default_cap_fmt: default capture buffer format
+ *
+ * @vdec_framesizes: supported video decoder frame sizes
+ * @num_framesizes: count of video decoder frame sizes
+ *
+ * @chip: chip this decoder is compatible with
+ *
+ * @uses_stateless_api: whether the decoder uses the stateless API with requests
+ */
+
+struct mtk_vcodec_dec_pdata {
+ void (*init_vdec_params)(struct mtk_vcodec_ctx *ctx);
+ int (*ctrls_setup)(struct mtk_vcodec_ctx *ctx);
+ void (*worker)(struct work_struct *work);
+ int (*flush_decoder)(struct mtk_vcodec_ctx *ctx);
+
+ struct vb2_ops *vdec_vb2_ops;
+
+ const struct mtk_video_fmt *vdec_formats;
+ const int num_formats;
+ const struct mtk_video_fmt *default_out_fmt;
+ const struct mtk_video_fmt *default_cap_fmt;
+
+ const struct mtk_codec_framesizes *vdec_framesizes;
+ const int num_framesizes;
+
+ enum mtk_chip chip;
+
+ bool uses_stateless_api;
};
/**
@@ -339,6 +390,7 @@ struct mtk_vcodec_enc_pdata {
* struct mtk_vcodec_dev - driver data
* @v4l2_dev: V4L2 device to register video devices for.
* @vfd_dec: Video device for decoder
+ * @mdev_dec: Media device for decoder
* @vfd_enc: Video device for encoder.
*
* @m2m_dev_dec: m2m device for decoder
@@ -349,6 +401,7 @@ struct mtk_vcodec_enc_pdata {
* @curr_ctx: The context that is waiting for codec hardware
*
* @reg_base: Mapped address of MTK Vcodec registers.
+ * @vdec_pdata: decoder IC-specific data
* @venc_pdata: encoder IC-specific data
*
* @fw_handler: used to communicate with the firmware.
@@ -375,6 +428,7 @@ struct mtk_vcodec_enc_pdata {
struct mtk_vcodec_dev {
struct v4l2_device v4l2_dev;
struct video_device *vfd_dec;
+ struct media_device mdev_dec;
struct video_device *vfd_enc;
struct v4l2_m2m_dev *m2m_dev_dec;
@@ -384,6 +438,7 @@ struct mtk_vcodec_dev {
spinlock_t irqlock;
struct mtk_vcodec_ctx *curr_ctx;
void __iomem *reg_base[NUM_MAX_VCODEC_REG_BASE];
+ const struct mtk_vcodec_dec_pdata *vdec_pdata;
const struct mtk_vcodec_enc_pdata *venc_pdata;
struct mtk_vcodec_fw *fw_handler;
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
index 416f356af363..7457451ebff0 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc.c
@@ -672,6 +672,7 @@ static int vidioc_venc_dqbuf(struct file *file, void *priv,
struct v4l2_buffer *buf)
{
struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ int ret;
if (ctx->state == MTK_STATE_ABORT) {
mtk_v4l2_err("[%d] Call on QBUF after unrecoverable error",
@@ -679,7 +680,83 @@ static int vidioc_venc_dqbuf(struct file *file, void *priv,
return -EIO;
}
- return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
+ ret = v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
+ if (ret)
+ return ret;
+
+ /*
+ * Complete flush if the user dequeued the 0-payload LAST buffer.
+ * We check the payload because a buffer with the LAST flag can also
+ * be seen during resolution changes. If we happen to be flushing at
+ * that time, the last buffer before the resolution changes could be
+ * misinterpreted for the buffer generated by the flush and terminate
+ * it earlier than we want.
+ */
+ if (!V4L2_TYPE_IS_OUTPUT(buf->type) &&
+ buf->flags & V4L2_BUF_FLAG_LAST &&
+ buf->m.planes[0].bytesused == 0 &&
+ ctx->is_flushing) {
+ /*
+ * Last CAPTURE buffer is dequeued, we can allow another flush
+ * to take place.
+ */
+ ctx->is_flushing = false;
+ }
+
+ return 0;
+}
+
+static int vidioc_encoder_cmd(struct file *file, void *priv,
+ struct v4l2_encoder_cmd *cmd)
+{
+ struct mtk_vcodec_ctx *ctx = fh_to_ctx(priv);
+ struct vb2_queue *src_vq, *dst_vq;
+ int ret;
+
+ if (ctx->state == MTK_STATE_ABORT) {
+ mtk_v4l2_err("[%d] Call to CMD after unrecoverable error",
+ ctx->id);
+ return -EIO;
+ }
+
+ ret = v4l2_m2m_ioctl_try_encoder_cmd(file, priv, cmd);
+ if (ret)
+ return ret;
+
+ /* Calling START or STOP is invalid if a flush is in progress */
+ if (ctx->is_flushing)
+ return -EBUSY;
+
+ mtk_v4l2_debug(1, "encoder cmd=%u", cmd->cmd);
+
+ dst_vq = v4l2_m2m_get_vq(ctx->m2m_ctx,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+ switch (cmd->cmd) {
+ case V4L2_ENC_CMD_STOP:
+ src_vq = v4l2_m2m_get_vq(ctx->m2m_ctx,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ if (!vb2_is_streaming(src_vq)) {
+ mtk_v4l2_debug(1, "Output stream is off. No need to flush.");
+ return 0;
+ }
+ if (!vb2_is_streaming(dst_vq)) {
+ mtk_v4l2_debug(1, "Capture stream is off. No need to flush.");
+ return 0;
+ }
+ ctx->is_flushing = true;
+ v4l2_m2m_buf_queue(ctx->m2m_ctx, &ctx->empty_flush_buf.vb);
+ v4l2_m2m_try_schedule(ctx->m2m_ctx);
+ break;
+
+ case V4L2_ENC_CMD_START:
+ vb2_clear_last_buffer_dequeued(dst_vq);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
}
const struct v4l2_ioctl_ops mtk_venc_ioctl_ops = {
@@ -715,6 +792,9 @@ const struct v4l2_ioctl_ops mtk_venc_ioctl_ops = {
.vidioc_g_selection = vidioc_venc_g_selection,
.vidioc_s_selection = vidioc_venc_s_selection,
+
+ .vidioc_encoder_cmd = vidioc_encoder_cmd,
+ .vidioc_try_encoder_cmd = v4l2_m2m_ioctl_try_encoder_cmd,
};
static int vb2ops_venc_queue_setup(struct vb2_queue *vq,
@@ -793,7 +873,7 @@ static int vb2ops_venc_start_streaming(struct vb2_queue *q, unsigned int count)
{
struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(q);
struct venc_enc_param param;
- int ret;
+ int ret, pm_ret;
int i;
/* Once state turn into MTK_STATE_ABORT, we need stop_streaming
@@ -845,9 +925,9 @@ static int vb2ops_venc_start_streaming(struct vb2_queue *q, unsigned int count)
return 0;
err_set_param:
- ret = pm_runtime_put(&ctx->dev->plat_dev->dev);
- if (ret < 0)
- mtk_v4l2_err("pm_runtime_put fail %d", ret);
+ pm_ret = pm_runtime_put(&ctx->dev->plat_dev->dev);
+ if (pm_ret < 0)
+ mtk_v4l2_err("pm_runtime_put fail %d", pm_ret);
err_start_stream:
for (i = 0; i < q->num_buffers; ++i) {
@@ -882,9 +962,38 @@ static void vb2ops_venc_stop_streaming(struct vb2_queue *q)
dst_buf->vb2_buf.planes[0].bytesused = 0;
v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_ERROR);
}
+ /* STREAMOFF on the CAPTURE queue completes any ongoing flush */
+ if (ctx->is_flushing) {
+ struct v4l2_m2m_buffer *b, *n;
+
+ mtk_v4l2_debug(1, "STREAMOFF called while flushing");
+ /*
+ * STREAMOFF could be called before the flush buffer is
+ * dequeued. Check whether empty flush buf is still in
+ * queue before removing it.
+ */
+ v4l2_m2m_for_each_src_buf_safe(ctx->m2m_ctx, b, n) {
+ if (b == &ctx->empty_flush_buf) {
+ v4l2_m2m_src_buf_remove_by_buf(ctx->m2m_ctx, &b->vb);
+ break;
+ }
+ }
+ ctx->is_flushing = false;
+ }
} else {
- while ((src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx)))
- v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
+ while ((src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx))) {
+ if (src_buf != &ctx->empty_flush_buf.vb)
+ v4l2_m2m_buf_done(src_buf, VB2_BUF_STATE_ERROR);
+ }
+ if (ctx->is_flushing) {
+ /*
+ * If we are in the middle of a flush, put the flush
+ * buffer back into the queue so the next CAPTURE
+ * buffer gets returned with the LAST flag set.
+ */
+ v4l2_m2m_buf_queue(ctx->m2m_ctx,
+ &ctx->empty_flush_buf.vb);
+ }
}
if ((q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE &&
@@ -984,12 +1093,15 @@ static int mtk_venc_param_change(struct mtk_vcodec_ctx *ctx)
{
struct venc_enc_param enc_prm;
struct vb2_v4l2_buffer *vb2_v4l2 = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
- struct mtk_video_enc_buf *mtk_buf =
- container_of(vb2_v4l2, struct mtk_video_enc_buf,
- m2m_buf.vb);
-
+ struct mtk_video_enc_buf *mtk_buf;
int ret = 0;
+ /* Don't upcast the empty flush buffer */
+ if (vb2_v4l2 == &ctx->empty_flush_buf.vb)
+ return 0;
+
+ mtk_buf = container_of(vb2_v4l2, struct mtk_video_enc_buf, m2m_buf.vb);
+
memset(&enc_prm, 0, sizeof(enc_prm));
if (mtk_buf->param_change == MTK_ENCODE_PARAM_NONE)
return 0;
@@ -1075,6 +1187,20 @@ static void mtk_venc_worker(struct work_struct *work)
}
src_buf = v4l2_m2m_src_buf_remove(ctx->m2m_ctx);
+
+ /*
+ * If we see the flush buffer, send an empty buffer with the LAST flag
+ * to the client. is_flushing will be reset at the time the buffer
+ * is dequeued.
+ */
+ if (src_buf == &ctx->empty_flush_buf.vb) {
+ vb2_set_plane_payload(&dst_buf->vb2_buf, 0, 0);
+ dst_buf->flags |= V4L2_BUF_FLAG_LAST;
+ v4l2_m2m_buf_done(dst_buf, VB2_BUF_STATE_DONE);
+ v4l2_m2m_job_finish(ctx->dev->m2m_dev_enc, ctx->m2m_ctx);
+ return;
+ }
+
memset(&frm_buf, 0, sizeof(frm_buf));
for (i = 0; i < src_buf->vb2_buf.num_planes ; i++) {
frm_buf.fb_addr[i].dma_addr =
diff --git a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c
index 45d1870c83dd..eed67394cf46 100644
--- a/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c
+++ b/drivers/media/platform/mtk-vcodec/mtk_vcodec_enc_drv.c
@@ -26,7 +26,7 @@
module_param(mtk_v4l2_dbg_level, int, S_IRUGO | S_IWUSR);
module_param(mtk_vcodec_dbg, bool, S_IRUGO | S_IWUSR);
-static const struct mtk_video_fmt mtk_video_formats_output_mt8173[] = {
+static const struct mtk_video_fmt mtk_video_formats_output[] = {
{
.fourcc = V4L2_PIX_FMT_NV12M,
.type = MTK_FMT_FRAME,
@@ -49,7 +49,7 @@ static const struct mtk_video_fmt mtk_video_formats_output_mt8173[] = {
},
};
-static const struct mtk_video_fmt mtk_video_formats_capture_mt8173_avc[] = {
+static const struct mtk_video_fmt mtk_video_formats_capture_h264[] = {
{
.fourcc = V4L2_PIX_FMT_H264,
.type = MTK_FMT_ENC,
@@ -57,7 +57,7 @@ static const struct mtk_video_fmt mtk_video_formats_capture_mt8173_avc[] = {
},
};
-static const struct mtk_video_fmt mtk_video_formats_capture_mt8173_vp8[] = {
+static const struct mtk_video_fmt mtk_video_formats_capture_vp8[] = {
{
.fourcc = V4L2_PIX_FMT_VP8,
.type = MTK_FMT_ENC,
@@ -65,14 +65,6 @@ static const struct mtk_video_fmt mtk_video_formats_capture_mt8173_vp8[] = {
},
};
-static const struct mtk_video_fmt mtk_video_formats_capture_mt8183[] = {
- {
- .fourcc = V4L2_PIX_FMT_H264,
- .type = MTK_FMT_ENC,
- .num_planes = 1,
- },
-};
-
/* Wake up context wait_queue */
static void wake_up_ctx(struct mtk_vcodec_ctx *ctx, unsigned int reason)
{
@@ -131,6 +123,7 @@ static int fops_vcodec_open(struct file *file)
struct mtk_vcodec_dev *dev = video_drvdata(file);
struct mtk_vcodec_ctx *ctx = NULL;
int ret = 0;
+ struct vb2_queue *src_vq;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
@@ -157,13 +150,16 @@ static int fops_vcodec_open(struct file *file)
goto err_ctrls_setup;
}
ctx->m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev_enc, ctx,
- &mtk_vcodec_enc_queue_init);
+ &mtk_vcodec_enc_queue_init);
if (IS_ERR((__force void *)ctx->m2m_ctx)) {
ret = PTR_ERR((__force void *)ctx->m2m_ctx);
mtk_v4l2_err("Failed to v4l2_m2m_ctx_init() (%d)",
ret);
goto err_m2m_ctx_init;
}
+ src_vq = v4l2_m2m_get_vq(ctx->m2m_ctx,
+ V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ ctx->empty_flush_buf.vb.vb2_buf.vb2_queue = src_vq;
mtk_vcodec_enc_set_default_params(ctx);
if (v4l2_fh_is_singular(&ctx->fh)) {
@@ -392,34 +388,33 @@ err_enc_pm:
static const struct mtk_vcodec_enc_pdata mt8173_avc_pdata = {
.chip = MTK_MT8173,
- .capture_formats = mtk_video_formats_capture_mt8173_avc,
- .num_capture_formats = ARRAY_SIZE(mtk_video_formats_capture_mt8173_avc),
- .output_formats = mtk_video_formats_output_mt8173,
- .num_output_formats = ARRAY_SIZE(mtk_video_formats_output_mt8173),
- .min_bitrate = 1,
- .max_bitrate = 4000000,
+ .capture_formats = mtk_video_formats_capture_h264,
+ .num_capture_formats = ARRAY_SIZE(mtk_video_formats_capture_h264),
+ .output_formats = mtk_video_formats_output,
+ .num_output_formats = ARRAY_SIZE(mtk_video_formats_output),
+ .min_bitrate = 64,
+ .max_bitrate = 60000000,
.core_id = VENC_SYS,
};
static const struct mtk_vcodec_enc_pdata mt8173_vp8_pdata = {
.chip = MTK_MT8173,
- .capture_formats = mtk_video_formats_capture_mt8173_vp8,
- .num_capture_formats = ARRAY_SIZE(mtk_video_formats_capture_mt8173_vp8),
- .output_formats = mtk_video_formats_output_mt8173,
- .num_output_formats = ARRAY_SIZE(mtk_video_formats_output_mt8173),
+ .capture_formats = mtk_video_formats_capture_vp8,
+ .num_capture_formats = ARRAY_SIZE(mtk_video_formats_capture_vp8),
+ .output_formats = mtk_video_formats_output,
+ .num_output_formats = ARRAY_SIZE(mtk_video_formats_output),
.min_bitrate = 64,
- .max_bitrate = 4000000,
+ .max_bitrate = 9000000,
.core_id = VENC_LT_SYS,
};
static const struct mtk_vcodec_enc_pdata mt8183_pdata = {
.chip = MTK_MT8183,
.uses_ext = true,
- .capture_formats = mtk_video_formats_capture_mt8183,
- .num_capture_formats = ARRAY_SIZE(mtk_video_formats_capture_mt8183),
- /* MT8183 supports the same output formats as MT8173 */
- .output_formats = mtk_video_formats_output_mt8173,
- .num_output_formats = ARRAY_SIZE(mtk_video_formats_output_mt8173),
+ .capture_formats = mtk_video_formats_capture_h264,
+ .num_capture_formats = ARRAY_SIZE(mtk_video_formats_capture_h264),
+ .output_formats = mtk_video_formats_output,
+ .num_output_formats = ARRAY_SIZE(mtk_video_formats_output),
.min_bitrate = 64,
.max_bitrate = 40000000,
.core_id = VENC_SYS,
@@ -428,16 +423,27 @@ static const struct mtk_vcodec_enc_pdata mt8183_pdata = {
static const struct mtk_vcodec_enc_pdata mt8192_pdata = {
.chip = MTK_MT8192,
.uses_ext = true,
- /* MT8192 supports the same capture formats as MT8183 */
- .capture_formats = mtk_video_formats_capture_mt8183,
- .num_capture_formats = ARRAY_SIZE(mtk_video_formats_capture_mt8183),
- /* MT8192 supports the same output formats as MT8173 */
- .output_formats = mtk_video_formats_output_mt8173,
- .num_output_formats = ARRAY_SIZE(mtk_video_formats_output_mt8173),
+ .capture_formats = mtk_video_formats_capture_h264,
+ .num_capture_formats = ARRAY_SIZE(mtk_video_formats_capture_h264),
+ .output_formats = mtk_video_formats_output,
+ .num_output_formats = ARRAY_SIZE(mtk_video_formats_output),
.min_bitrate = 64,
.max_bitrate = 100000000,
.core_id = VENC_SYS,
};
+
+static const struct mtk_vcodec_enc_pdata mt8195_pdata = {
+ .chip = MTK_MT8195,
+ .uses_ext = true,
+ .capture_formats = mtk_video_formats_capture_h264,
+ .num_capture_formats = ARRAY_SIZE(mtk_video_formats_capture_h264),
+ .output_formats = mtk_video_formats_output,
+ .num_output_formats = ARRAY_SIZE(mtk_video_formats_output),
+ .min_bitrate = 64,
+ .max_bitrate = 100000000,
+ .core_id = VENC_SYS,
+};
+
static const struct of_device_id mtk_vcodec_enc_match[] = {
{.compatible = "mediatek,mt8173-vcodec-enc",
.data = &mt8173_avc_pdata},
@@ -445,6 +451,7 @@ static const struct of_device_id mtk_vcodec_enc_match[] = {
.data = &mt8173_vp8_pdata},
{.compatible = "mediatek,mt8183-vcodec-enc", .data = &mt8183_pdata},
{.compatible = "mediatek,mt8192-vcodec-enc", .data = &mt8192_pdata},
+ {.compatible = "mediatek,mt8195-vcodec-enc", .data = &mt8195_pdata},
{},
};
MODULE_DEVICE_TABLE(of, mtk_vcodec_enc_match);
diff --git a/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_req_if.c b/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_req_if.c
new file mode 100644
index 000000000000..946c23088308
--- /dev/null
+++ b/drivers/media/platform/mtk-vcodec/vdec/vdec_h264_req_if.c
@@ -0,0 +1,774 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/v4l2-h264.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "../mtk_vcodec_util.h"
+#include "../mtk_vcodec_dec.h"
+#include "../mtk_vcodec_intr.h"
+#include "../vdec_drv_base.h"
+#include "../vdec_drv_if.h"
+#include "../vdec_vpu_if.h"
+
+#define BUF_PREDICTION_SZ (64 * 4096)
+#define MB_UNIT_LEN 16
+
+/* get used parameters for sps/pps */
+#define GET_MTK_VDEC_FLAG(cond, flag) \
+ { dst_param->cond = ((src_param->flags & (flag)) ? (1) : (0)); }
+#define GET_MTK_VDEC_PARAM(param) \
+ { dst_param->param = src_param->param; }
+/* motion vector size (bytes) for every macro block */
+#define HW_MB_STORE_SZ 64
+
+#define H264_MAX_FB_NUM 17
+#define H264_MAX_MV_NUM 32
+#define HDR_PARSING_BUF_SZ 1024
+
+/**
+ * struct mtk_h264_dpb_info - h264 dpb information
+ * @y_dma_addr: Y bitstream physical address
+ * @c_dma_addr: CbCr bitstream physical address
+ * @reference_flag: reference picture flag (short/long term reference picture)
+ * @field: field picture flag
+ */
+struct mtk_h264_dpb_info {
+ dma_addr_t y_dma_addr;
+ dma_addr_t c_dma_addr;
+ int reference_flag;
+ int field;
+};
+
+/*
+ * struct mtk_h264_sps_param - parameters for sps
+ */
+struct mtk_h264_sps_param {
+ unsigned char chroma_format_idc;
+ unsigned char bit_depth_luma_minus8;
+ unsigned char bit_depth_chroma_minus8;
+ unsigned char log2_max_frame_num_minus4;
+ unsigned char pic_order_cnt_type;
+ unsigned char log2_max_pic_order_cnt_lsb_minus4;
+ unsigned char max_num_ref_frames;
+ unsigned char separate_colour_plane_flag;
+ unsigned short pic_width_in_mbs_minus1;
+ unsigned short pic_height_in_map_units_minus1;
+ unsigned int max_frame_nums;
+ unsigned char qpprime_y_zero_transform_bypass_flag;
+ unsigned char delta_pic_order_always_zero_flag;
+ unsigned char frame_mbs_only_flag;
+ unsigned char mb_adaptive_frame_field_flag;
+ unsigned char direct_8x8_inference_flag;
+ unsigned char reserved[3];
+};
+
+/*
+ * struct mtk_h264_pps_param - parameters for pps
+ */
+struct mtk_h264_pps_param {
+ unsigned char num_ref_idx_l0_default_active_minus1;
+ unsigned char num_ref_idx_l1_default_active_minus1;
+ unsigned char weighted_bipred_idc;
+ char pic_init_qp_minus26;
+ char chroma_qp_index_offset;
+ char second_chroma_qp_index_offset;
+ unsigned char entropy_coding_mode_flag;
+ unsigned char pic_order_present_flag;
+ unsigned char deblocking_filter_control_present_flag;
+ unsigned char constrained_intra_pred_flag;
+ unsigned char weighted_pred_flag;
+ unsigned char redundant_pic_cnt_present_flag;
+ unsigned char transform_8x8_mode_flag;
+ unsigned char scaling_matrix_present_flag;
+ unsigned char reserved[2];
+};
+
+struct slice_api_h264_scaling_matrix {
+ unsigned char scaling_list_4x4[6][16];
+ unsigned char scaling_list_8x8[6][64];
+};
+
+struct slice_h264_dpb_entry {
+ unsigned long long reference_ts;
+ unsigned short frame_num;
+ unsigned short pic_num;
+ /* Note that field is indicated by v4l2_buffer.field */
+ int top_field_order_cnt;
+ int bottom_field_order_cnt;
+ unsigned int flags; /* V4L2_H264_DPB_ENTRY_FLAG_* */
+};
+
+/*
+ * struct slice_api_h264_decode_param - parameters for decode.
+ */
+struct slice_api_h264_decode_param {
+ struct slice_h264_dpb_entry dpb[16];
+ unsigned short num_slices;
+ unsigned short nal_ref_idc;
+ unsigned char ref_pic_list_p0[32];
+ unsigned char ref_pic_list_b0[32];
+ unsigned char ref_pic_list_b1[32];
+ int top_field_order_cnt;
+ int bottom_field_order_cnt;
+ unsigned int flags; /* V4L2_H264_DECODE_PARAM_FLAG_* */
+};
+
+/*
+ * struct mtk_h264_dec_slice_param - parameters for decode current frame
+ */
+struct mtk_h264_dec_slice_param {
+ struct mtk_h264_sps_param sps;
+ struct mtk_h264_pps_param pps;
+ struct slice_api_h264_scaling_matrix scaling_matrix;
+ struct slice_api_h264_decode_param decode_params;
+ struct mtk_h264_dpb_info h264_dpb_info[16];
+};
+
+/**
+ * struct h264_fb - h264 decode frame buffer information
+ * @vdec_fb_va : virtual address of struct vdec_fb
+ * @y_fb_dma : dma address of Y frame buffer (luma)
+ * @c_fb_dma : dma address of C frame buffer (chroma)
+ * @poc : picture order count of frame buffer
+ * @reserved : for 8 bytes alignment
+ */
+struct h264_fb {
+ u64 vdec_fb_va;
+ u64 y_fb_dma;
+ u64 c_fb_dma;
+ s32 poc;
+ u32 reserved;
+};
+
+/**
+ * struct vdec_h264_dec_info - decode information
+ * @dpb_sz : decoding picture buffer size
+ * @resolution_changed : resoltion change happen
+ * @realloc_mv_buf : flag to notify driver to re-allocate mv buffer
+ * @cap_num_planes : number planes of capture buffer
+ * @bs_dma : Input bit-stream buffer dma address
+ * @y_fb_dma : Y frame buffer dma address
+ * @c_fb_dma : C frame buffer dma address
+ * @vdec_fb_va : VDEC frame buffer struct virtual address
+ */
+struct vdec_h264_dec_info {
+ u32 dpb_sz;
+ u32 resolution_changed;
+ u32 realloc_mv_buf;
+ u32 cap_num_planes;
+ u64 bs_dma;
+ u64 y_fb_dma;
+ u64 c_fb_dma;
+ u64 vdec_fb_va;
+};
+
+/**
+ * struct vdec_h264_vsi - shared memory for decode information exchange
+ * between VPU and Host.
+ * The memory is allocated by VPU then mapping to Host
+ * in vpu_dec_init() and freed in vpu_dec_deinit()
+ * by VPU.
+ * AP-W/R : AP is writer/reader on this item
+ * VPU-W/R: VPU is write/reader on this item
+ * @pred_buf_dma : HW working predication buffer dma address (AP-W, VPU-R)
+ * @mv_buf_dma : HW working motion vector buffer dma address (AP-W, VPU-R)
+ * @dec : decode information (AP-R, VPU-W)
+ * @pic : picture information (AP-R, VPU-W)
+ * @crop : crop information (AP-R, VPU-W)
+ * @h264_slice_params : the parameters that hardware use to decode
+ */
+struct vdec_h264_vsi {
+ u64 pred_buf_dma;
+ u64 mv_buf_dma[H264_MAX_MV_NUM];
+ struct vdec_h264_dec_info dec;
+ struct vdec_pic_info pic;
+ struct v4l2_rect crop;
+ struct mtk_h264_dec_slice_param h264_slice_params;
+};
+
+/**
+ * struct vdec_h264_slice_inst - h264 decoder instance
+ * @num_nalu : how many nalus be decoded
+ * @ctx : point to mtk_vcodec_ctx
+ * @pred_buf : HW working predication buffer
+ * @mv_buf : HW working motion vector buffer
+ * @vpu : VPU instance
+ * @vsi_ctx : Local VSI data for this decoding context
+ * @h264_slice_param : the parameters that hardware use to decode
+ * @dpb : decoded picture buffer used to store reference buffer information
+ */
+struct vdec_h264_slice_inst {
+ unsigned int num_nalu;
+ struct mtk_vcodec_ctx *ctx;
+ struct mtk_vcodec_mem pred_buf;
+ struct mtk_vcodec_mem mv_buf[H264_MAX_MV_NUM];
+ struct vdec_vpu_inst vpu;
+ struct vdec_h264_vsi vsi_ctx;
+ struct mtk_h264_dec_slice_param h264_slice_param;
+
+ struct v4l2_h264_dpb_entry dpb[16];
+};
+
+static void *get_ctrl_ptr(struct mtk_vcodec_ctx *ctx, int id)
+{
+ struct v4l2_ctrl *ctrl = v4l2_ctrl_find(&ctx->ctrl_hdl, id);
+
+ return ctrl->p_cur.p;
+}
+
+static void get_h264_dpb_list(struct vdec_h264_slice_inst *inst,
+ struct mtk_h264_dec_slice_param *slice_param)
+{
+ struct vb2_queue *vq;
+ struct vb2_buffer *vb;
+ struct vb2_v4l2_buffer *vb2_v4l2;
+ u64 index;
+
+ vq = v4l2_m2m_get_vq(inst->ctx->m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
+
+ for (index = 0; index < ARRAY_SIZE(slice_param->decode_params.dpb); index++) {
+ const struct slice_h264_dpb_entry *dpb;
+ int vb2_index;
+
+ dpb = &slice_param->decode_params.dpb[index];
+ if (!(dpb->flags & V4L2_H264_DPB_ENTRY_FLAG_ACTIVE)) {
+ slice_param->h264_dpb_info[index].reference_flag = 0;
+ continue;
+ }
+
+ vb2_index = vb2_find_timestamp(vq, dpb->reference_ts, 0);
+ if (vb2_index < 0) {
+ mtk_vcodec_err(inst, "Reference invalid: dpb_index(%lld) reference_ts(%lld)",
+ index, dpb->reference_ts);
+ continue;
+ }
+ /* 1 for short term reference, 2 for long term reference */
+ if (!(dpb->flags & V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM))
+ slice_param->h264_dpb_info[index].reference_flag = 1;
+ else
+ slice_param->h264_dpb_info[index].reference_flag = 2;
+
+ vb = vq->bufs[vb2_index];
+ vb2_v4l2 = container_of(vb, struct vb2_v4l2_buffer, vb2_buf);
+ slice_param->h264_dpb_info[index].field = vb2_v4l2->field;
+
+ slice_param->h264_dpb_info[index].y_dma_addr =
+ vb2_dma_contig_plane_dma_addr(vb, 0);
+ if (inst->ctx->q_data[MTK_Q_DATA_DST].fmt->num_planes == 2) {
+ slice_param->h264_dpb_info[index].c_dma_addr =
+ vb2_dma_contig_plane_dma_addr(vb, 1);
+ }
+ }
+}
+
+static void get_h264_sps_parameters(struct mtk_h264_sps_param *dst_param,
+ const struct v4l2_ctrl_h264_sps *src_param)
+{
+ GET_MTK_VDEC_PARAM(chroma_format_idc);
+ GET_MTK_VDEC_PARAM(bit_depth_luma_minus8);
+ GET_MTK_VDEC_PARAM(bit_depth_chroma_minus8);
+ GET_MTK_VDEC_PARAM(log2_max_frame_num_minus4);
+ GET_MTK_VDEC_PARAM(pic_order_cnt_type);
+ GET_MTK_VDEC_PARAM(log2_max_pic_order_cnt_lsb_minus4);
+ GET_MTK_VDEC_PARAM(max_num_ref_frames);
+ GET_MTK_VDEC_PARAM(pic_width_in_mbs_minus1);
+ GET_MTK_VDEC_PARAM(pic_height_in_map_units_minus1);
+
+ GET_MTK_VDEC_FLAG(separate_colour_plane_flag,
+ V4L2_H264_SPS_FLAG_SEPARATE_COLOUR_PLANE);
+ GET_MTK_VDEC_FLAG(qpprime_y_zero_transform_bypass_flag,
+ V4L2_H264_SPS_FLAG_QPPRIME_Y_ZERO_TRANSFORM_BYPASS);
+ GET_MTK_VDEC_FLAG(delta_pic_order_always_zero_flag,
+ V4L2_H264_SPS_FLAG_DELTA_PIC_ORDER_ALWAYS_ZERO);
+ GET_MTK_VDEC_FLAG(frame_mbs_only_flag,
+ V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY);
+ GET_MTK_VDEC_FLAG(mb_adaptive_frame_field_flag,
+ V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD);
+ GET_MTK_VDEC_FLAG(direct_8x8_inference_flag,
+ V4L2_H264_SPS_FLAG_DIRECT_8X8_INFERENCE);
+}
+
+static void get_h264_pps_parameters(struct mtk_h264_pps_param *dst_param,
+ const struct v4l2_ctrl_h264_pps *src_param)
+{
+ GET_MTK_VDEC_PARAM(num_ref_idx_l0_default_active_minus1);
+ GET_MTK_VDEC_PARAM(num_ref_idx_l1_default_active_minus1);
+ GET_MTK_VDEC_PARAM(weighted_bipred_idc);
+ GET_MTK_VDEC_PARAM(pic_init_qp_minus26);
+ GET_MTK_VDEC_PARAM(chroma_qp_index_offset);
+ GET_MTK_VDEC_PARAM(second_chroma_qp_index_offset);
+
+ GET_MTK_VDEC_FLAG(entropy_coding_mode_flag,
+ V4L2_H264_PPS_FLAG_ENTROPY_CODING_MODE);
+ GET_MTK_VDEC_FLAG(pic_order_present_flag,
+ V4L2_H264_PPS_FLAG_BOTTOM_FIELD_PIC_ORDER_IN_FRAME_PRESENT);
+ GET_MTK_VDEC_FLAG(weighted_pred_flag,
+ V4L2_H264_PPS_FLAG_WEIGHTED_PRED);
+ GET_MTK_VDEC_FLAG(deblocking_filter_control_present_flag,
+ V4L2_H264_PPS_FLAG_DEBLOCKING_FILTER_CONTROL_PRESENT);
+ GET_MTK_VDEC_FLAG(constrained_intra_pred_flag,
+ V4L2_H264_PPS_FLAG_CONSTRAINED_INTRA_PRED);
+ GET_MTK_VDEC_FLAG(redundant_pic_cnt_present_flag,
+ V4L2_H264_PPS_FLAG_REDUNDANT_PIC_CNT_PRESENT);
+ GET_MTK_VDEC_FLAG(transform_8x8_mode_flag,
+ V4L2_H264_PPS_FLAG_TRANSFORM_8X8_MODE);
+ GET_MTK_VDEC_FLAG(scaling_matrix_present_flag,
+ V4L2_H264_PPS_FLAG_SCALING_MATRIX_PRESENT);
+}
+
+static void
+get_h264_scaling_matrix(struct slice_api_h264_scaling_matrix *dst_matrix,
+ const struct v4l2_ctrl_h264_scaling_matrix *src_matrix)
+{
+ memcpy(dst_matrix->scaling_list_4x4, src_matrix->scaling_list_4x4,
+ sizeof(dst_matrix->scaling_list_4x4));
+
+ memcpy(dst_matrix->scaling_list_8x8, src_matrix->scaling_list_8x8,
+ sizeof(dst_matrix->scaling_list_8x8));
+}
+
+static void
+get_h264_decode_parameters(struct slice_api_h264_decode_param *dst_params,
+ const struct v4l2_ctrl_h264_decode_params *src_params,
+ const struct v4l2_h264_dpb_entry dpb[V4L2_H264_NUM_DPB_ENTRIES])
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dst_params->dpb); i++) {
+ struct slice_h264_dpb_entry *dst_entry = &dst_params->dpb[i];
+ const struct v4l2_h264_dpb_entry *src_entry = &dpb[i];
+
+ dst_entry->reference_ts = src_entry->reference_ts;
+ dst_entry->frame_num = src_entry->frame_num;
+ dst_entry->pic_num = src_entry->pic_num;
+ dst_entry->top_field_order_cnt = src_entry->top_field_order_cnt;
+ dst_entry->bottom_field_order_cnt =
+ src_entry->bottom_field_order_cnt;
+ dst_entry->flags = src_entry->flags;
+ }
+
+ /*
+ * num_slices is a leftover from the old H.264 support and is ignored
+ * by the firmware.
+ */
+ dst_params->num_slices = 0;
+ dst_params->nal_ref_idc = src_params->nal_ref_idc;
+ dst_params->top_field_order_cnt = src_params->top_field_order_cnt;
+ dst_params->bottom_field_order_cnt = src_params->bottom_field_order_cnt;
+ dst_params->flags = src_params->flags;
+}
+
+static bool dpb_entry_match(const struct v4l2_h264_dpb_entry *a,
+ const struct v4l2_h264_dpb_entry *b)
+{
+ return a->top_field_order_cnt == b->top_field_order_cnt &&
+ a->bottom_field_order_cnt == b->bottom_field_order_cnt;
+}
+
+/*
+ * Move DPB entries of dec_param that refer to a frame already existing in dpb
+ * into the already existing slot in dpb, and move other entries into new slots.
+ *
+ * This function is an adaptation of the similarly-named function in
+ * hantro_h264.c.
+ */
+static void update_dpb(const struct v4l2_ctrl_h264_decode_params *dec_param,
+ struct v4l2_h264_dpb_entry *dpb)
+{
+ DECLARE_BITMAP(new, ARRAY_SIZE(dec_param->dpb)) = { 0, };
+ DECLARE_BITMAP(in_use, ARRAY_SIZE(dec_param->dpb)) = { 0, };
+ DECLARE_BITMAP(used, ARRAY_SIZE(dec_param->dpb)) = { 0, };
+ unsigned int i, j;
+
+ /* Disable all entries by default, and mark the ones in use. */
+ for (i = 0; i < ARRAY_SIZE(dec_param->dpb); i++) {
+ if (dpb[i].flags & V4L2_H264_DPB_ENTRY_FLAG_ACTIVE)
+ set_bit(i, in_use);
+ dpb[i].flags &= ~V4L2_H264_DPB_ENTRY_FLAG_ACTIVE;
+ }
+
+ /* Try to match new DPB entries with existing ones by their POCs. */
+ for (i = 0; i < ARRAY_SIZE(dec_param->dpb); i++) {
+ const struct v4l2_h264_dpb_entry *ndpb = &dec_param->dpb[i];
+
+ if (!(ndpb->flags & V4L2_H264_DPB_ENTRY_FLAG_ACTIVE))
+ continue;
+
+ /*
+ * To cut off some comparisons, iterate only on target DPB
+ * entries were already used.
+ */
+ for_each_set_bit(j, in_use, ARRAY_SIZE(dec_param->dpb)) {
+ struct v4l2_h264_dpb_entry *cdpb;
+
+ cdpb = &dpb[j];
+ if (!dpb_entry_match(cdpb, ndpb))
+ continue;
+
+ *cdpb = *ndpb;
+ set_bit(j, used);
+ /* Don't reiterate on this one. */
+ clear_bit(j, in_use);
+ break;
+ }
+
+ if (j == ARRAY_SIZE(dec_param->dpb))
+ set_bit(i, new);
+ }
+
+ /* For entries that could not be matched, use remaining free slots. */
+ for_each_set_bit(i, new, ARRAY_SIZE(dec_param->dpb)) {
+ const struct v4l2_h264_dpb_entry *ndpb = &dec_param->dpb[i];
+ struct v4l2_h264_dpb_entry *cdpb;
+
+ /*
+ * Both arrays are of the same sizes, so there is no way
+ * we can end up with no space in target array, unless
+ * something is buggy.
+ */
+ j = find_first_zero_bit(used, ARRAY_SIZE(dec_param->dpb));
+ if (WARN_ON(j >= ARRAY_SIZE(dec_param->dpb)))
+ return;
+
+ cdpb = &dpb[j];
+ *cdpb = *ndpb;
+ set_bit(j, used);
+ }
+}
+
+/*
+ * The firmware expects unused reflist entries to have the value 0x20.
+ */
+static void fixup_ref_list(u8 *ref_list, size_t num_valid)
+{
+ memset(&ref_list[num_valid], 0x20, 32 - num_valid);
+}
+
+static void get_vdec_decode_parameters(struct vdec_h264_slice_inst *inst)
+{
+ const struct v4l2_ctrl_h264_decode_params *dec_params =
+ get_ctrl_ptr(inst->ctx, V4L2_CID_STATELESS_H264_DECODE_PARAMS);
+ const struct v4l2_ctrl_h264_sps *sps =
+ get_ctrl_ptr(inst->ctx, V4L2_CID_STATELESS_H264_SPS);
+ const struct v4l2_ctrl_h264_pps *pps =
+ get_ctrl_ptr(inst->ctx, V4L2_CID_STATELESS_H264_PPS);
+ const struct v4l2_ctrl_h264_scaling_matrix *scaling_matrix =
+ get_ctrl_ptr(inst->ctx, V4L2_CID_STATELESS_H264_SCALING_MATRIX);
+ struct mtk_h264_dec_slice_param *slice_param = &inst->h264_slice_param;
+ struct v4l2_h264_reflist_builder reflist_builder;
+ u8 *p0_reflist = slice_param->decode_params.ref_pic_list_p0;
+ u8 *b0_reflist = slice_param->decode_params.ref_pic_list_b0;
+ u8 *b1_reflist = slice_param->decode_params.ref_pic_list_b1;
+
+ update_dpb(dec_params, inst->dpb);
+
+ get_h264_sps_parameters(&slice_param->sps, sps);
+ get_h264_pps_parameters(&slice_param->pps, pps);
+ get_h264_scaling_matrix(&slice_param->scaling_matrix, scaling_matrix);
+ get_h264_decode_parameters(&slice_param->decode_params, dec_params,
+ inst->dpb);
+ get_h264_dpb_list(inst, slice_param);
+
+ /* Build the reference lists */
+ v4l2_h264_init_reflist_builder(&reflist_builder, dec_params, sps,
+ inst->dpb);
+ v4l2_h264_build_p_ref_list(&reflist_builder, p0_reflist);
+ v4l2_h264_build_b_ref_lists(&reflist_builder, b0_reflist, b1_reflist);
+ /* Adapt the built lists to the firmware's expectations */
+ fixup_ref_list(p0_reflist, reflist_builder.num_valid);
+ fixup_ref_list(b0_reflist, reflist_builder.num_valid);
+ fixup_ref_list(b1_reflist, reflist_builder.num_valid);
+
+ memcpy(&inst->vsi_ctx.h264_slice_params, slice_param,
+ sizeof(inst->vsi_ctx.h264_slice_params));
+}
+
+static unsigned int get_mv_buf_size(unsigned int width, unsigned int height)
+{
+ int unit_size = (width / MB_UNIT_LEN) * (height / MB_UNIT_LEN) + 8;
+
+ return HW_MB_STORE_SZ * unit_size;
+}
+
+static int allocate_predication_buf(struct vdec_h264_slice_inst *inst)
+{
+ int err;
+
+ inst->pred_buf.size = BUF_PREDICTION_SZ;
+ err = mtk_vcodec_mem_alloc(inst->ctx, &inst->pred_buf);
+ if (err) {
+ mtk_vcodec_err(inst, "failed to allocate ppl buf");
+ return err;
+ }
+
+ inst->vsi_ctx.pred_buf_dma = inst->pred_buf.dma_addr;
+ return 0;
+}
+
+static void free_predication_buf(struct vdec_h264_slice_inst *inst)
+{
+ struct mtk_vcodec_mem *mem = &inst->pred_buf;
+
+ mtk_vcodec_debug_enter(inst);
+
+ inst->vsi_ctx.pred_buf_dma = 0;
+ if (mem->va)
+ mtk_vcodec_mem_free(inst->ctx, mem);
+}
+
+static int alloc_mv_buf(struct vdec_h264_slice_inst *inst,
+ struct vdec_pic_info *pic)
+{
+ int i;
+ int err;
+ struct mtk_vcodec_mem *mem = NULL;
+ unsigned int buf_sz = get_mv_buf_size(pic->buf_w, pic->buf_h);
+
+ mtk_v4l2_debug(3, "size = 0x%lx", buf_sz);
+ for (i = 0; i < H264_MAX_MV_NUM; i++) {
+ mem = &inst->mv_buf[i];
+ if (mem->va)
+ mtk_vcodec_mem_free(inst->ctx, mem);
+ mem->size = buf_sz;
+ err = mtk_vcodec_mem_alloc(inst->ctx, mem);
+ if (err) {
+ mtk_vcodec_err(inst, "failed to allocate mv buf");
+ return err;
+ }
+ inst->vsi_ctx.mv_buf_dma[i] = mem->dma_addr;
+ }
+
+ return 0;
+}
+
+static void free_mv_buf(struct vdec_h264_slice_inst *inst)
+{
+ int i;
+ struct mtk_vcodec_mem *mem;
+
+ for (i = 0; i < H264_MAX_MV_NUM; i++) {
+ inst->vsi_ctx.mv_buf_dma[i] = 0;
+ mem = &inst->mv_buf[i];
+ if (mem->va)
+ mtk_vcodec_mem_free(inst->ctx, mem);
+ }
+}
+
+static void get_pic_info(struct vdec_h264_slice_inst *inst,
+ struct vdec_pic_info *pic)
+{
+ struct mtk_vcodec_ctx *ctx = inst->ctx;
+
+ ctx->picinfo.buf_w = (ctx->picinfo.pic_w + 15) & 0xFFFFFFF0;
+ ctx->picinfo.buf_h = (ctx->picinfo.pic_h + 31) & 0xFFFFFFE0;
+ ctx->picinfo.fb_sz[0] = ctx->picinfo.buf_w * ctx->picinfo.buf_h;
+ ctx->picinfo.fb_sz[1] = ctx->picinfo.fb_sz[0] >> 1;
+ inst->vsi_ctx.dec.cap_num_planes =
+ ctx->q_data[MTK_Q_DATA_DST].fmt->num_planes;
+
+ *pic = ctx->picinfo;
+ mtk_vcodec_debug(inst, "pic(%d, %d), buf(%d, %d)",
+ ctx->picinfo.pic_w, ctx->picinfo.pic_h,
+ ctx->picinfo.buf_w, ctx->picinfo.buf_h);
+ mtk_vcodec_debug(inst, "Y/C(%d, %d)", ctx->picinfo.fb_sz[0],
+ ctx->picinfo.fb_sz[1]);
+
+ if (ctx->last_decoded_picinfo.pic_w != ctx->picinfo.pic_w ||
+ ctx->last_decoded_picinfo.pic_h != ctx->picinfo.pic_h) {
+ inst->vsi_ctx.dec.resolution_changed = true;
+ if (ctx->last_decoded_picinfo.buf_w != ctx->picinfo.buf_w ||
+ ctx->last_decoded_picinfo.buf_h != ctx->picinfo.buf_h)
+ inst->vsi_ctx.dec.realloc_mv_buf = true;
+
+ mtk_v4l2_debug(1, "ResChg: (%d %d) : old(%d, %d) -> new(%d, %d)",
+ inst->vsi_ctx.dec.resolution_changed,
+ inst->vsi_ctx.dec.realloc_mv_buf,
+ ctx->last_decoded_picinfo.pic_w,
+ ctx->last_decoded_picinfo.pic_h,
+ ctx->picinfo.pic_w, ctx->picinfo.pic_h);
+ }
+}
+
+static void get_crop_info(struct vdec_h264_slice_inst *inst, struct v4l2_rect *cr)
+{
+ cr->left = inst->vsi_ctx.crop.left;
+ cr->top = inst->vsi_ctx.crop.top;
+ cr->width = inst->vsi_ctx.crop.width;
+ cr->height = inst->vsi_ctx.crop.height;
+
+ mtk_vcodec_debug(inst, "l=%d, t=%d, w=%d, h=%d",
+ cr->left, cr->top, cr->width, cr->height);
+}
+
+static void get_dpb_size(struct vdec_h264_slice_inst *inst, unsigned int *dpb_sz)
+{
+ *dpb_sz = inst->vsi_ctx.dec.dpb_sz;
+ mtk_vcodec_debug(inst, "sz=%d", *dpb_sz);
+}
+
+static int vdec_h264_slice_init(struct mtk_vcodec_ctx *ctx)
+{
+ struct vdec_h264_slice_inst *inst;
+ int err;
+
+ inst = kzalloc(sizeof(*inst), GFP_KERNEL);
+ if (!inst)
+ return -ENOMEM;
+
+ inst->ctx = ctx;
+
+ inst->vpu.id = SCP_IPI_VDEC_H264;
+ inst->vpu.ctx = ctx;
+
+ err = vpu_dec_init(&inst->vpu);
+ if (err) {
+ mtk_vcodec_err(inst, "vdec_h264 init err=%d", err);
+ goto error_free_inst;
+ }
+
+ memcpy(&inst->vsi_ctx, inst->vpu.vsi, sizeof(inst->vsi_ctx));
+ inst->vsi_ctx.dec.resolution_changed = true;
+ inst->vsi_ctx.dec.realloc_mv_buf = true;
+
+ err = allocate_predication_buf(inst);
+ if (err)
+ goto error_deinit;
+
+ mtk_vcodec_debug(inst, "struct size = %d,%d,%d,%d\n",
+ sizeof(struct mtk_h264_sps_param),
+ sizeof(struct mtk_h264_pps_param),
+ sizeof(struct mtk_h264_dec_slice_param),
+ sizeof(struct mtk_h264_dpb_info));
+
+ mtk_vcodec_debug(inst, "H264 Instance >> %p", inst);
+
+ ctx->drv_handle = inst;
+ return 0;
+
+error_deinit:
+ vpu_dec_deinit(&inst->vpu);
+
+error_free_inst:
+ kfree(inst);
+ return err;
+}
+
+static void vdec_h264_slice_deinit(void *h_vdec)
+{
+ struct vdec_h264_slice_inst *inst = h_vdec;
+
+ mtk_vcodec_debug_enter(inst);
+
+ vpu_dec_deinit(&inst->vpu);
+ free_predication_buf(inst);
+ free_mv_buf(inst);
+
+ kfree(inst);
+}
+
+static int vdec_h264_slice_decode(void *h_vdec, struct mtk_vcodec_mem *bs,
+ struct vdec_fb *fb, bool *res_chg)
+{
+ struct vdec_h264_slice_inst *inst = h_vdec;
+ const struct v4l2_ctrl_h264_decode_params *dec_params =
+ get_ctrl_ptr(inst->ctx, V4L2_CID_STATELESS_H264_DECODE_PARAMS);
+ struct vdec_vpu_inst *vpu = &inst->vpu;
+ u32 data[2];
+ u64 y_fb_dma;
+ u64 c_fb_dma;
+ int err;
+
+ /* bs NULL means flush decoder */
+ if (!bs)
+ return vpu_dec_reset(vpu);
+
+ y_fb_dma = fb ? (u64)fb->base_y.dma_addr : 0;
+ c_fb_dma = fb ? (u64)fb->base_c.dma_addr : 0;
+
+ mtk_vcodec_debug(inst, "+ [%d] FB y_dma=%llx c_dma=%llx va=%p",
+ ++inst->num_nalu, y_fb_dma, c_fb_dma, fb);
+
+ inst->vsi_ctx.dec.bs_dma = (uint64_t)bs->dma_addr;
+ inst->vsi_ctx.dec.y_fb_dma = y_fb_dma;
+ inst->vsi_ctx.dec.c_fb_dma = c_fb_dma;
+ inst->vsi_ctx.dec.vdec_fb_va = (u64)(uintptr_t)fb;
+
+ get_vdec_decode_parameters(inst);
+ data[0] = bs->size;
+ /*
+ * Reconstruct the first byte of the NAL unit, as the firmware requests
+ * that information to be passed even though it is present in the stream
+ * itself...
+ */
+ data[1] = (dec_params->nal_ref_idc << 5) |
+ ((dec_params->flags & V4L2_H264_DECODE_PARAM_FLAG_IDR_PIC)
+ ? 0x5 : 0x1);
+
+ *res_chg = inst->vsi_ctx.dec.resolution_changed;
+ if (*res_chg) {
+ mtk_vcodec_debug(inst, "- resolution changed -");
+ if (inst->vsi_ctx.dec.realloc_mv_buf) {
+ err = alloc_mv_buf(inst, &inst->ctx->picinfo);
+ inst->vsi_ctx.dec.realloc_mv_buf = false;
+ if (err)
+ goto err_free_fb_out;
+ }
+ *res_chg = false;
+ }
+
+ memcpy(inst->vpu.vsi, &inst->vsi_ctx, sizeof(inst->vsi_ctx));
+ err = vpu_dec_start(vpu, data, 2);
+ if (err)
+ goto err_free_fb_out;
+
+ /* wait decoder done interrupt */
+ err = mtk_vcodec_wait_for_done_ctx(inst->ctx,
+ MTK_INST_IRQ_RECEIVED,
+ WAIT_INTR_TIMEOUT_MS);
+ if (err)
+ goto err_free_fb_out;
+ vpu_dec_end(vpu);
+
+ memcpy(&inst->vsi_ctx, inst->vpu.vsi, sizeof(inst->vsi_ctx));
+ mtk_vcodec_debug(inst, "\n - NALU[%d]", inst->num_nalu);
+ return 0;
+
+err_free_fb_out:
+ mtk_vcodec_err(inst, "\n - NALU[%d] err=%d -\n", inst->num_nalu, err);
+ return err;
+}
+
+static int vdec_h264_slice_get_param(void *h_vdec, enum vdec_get_param_type type, void *out)
+{
+ struct vdec_h264_slice_inst *inst = h_vdec;
+
+ switch (type) {
+ case GET_PARAM_PIC_INFO:
+ get_pic_info(inst, out);
+ break;
+
+ case GET_PARAM_DPB_SIZE:
+ get_dpb_size(inst, out);
+ break;
+
+ case GET_PARAM_CROP_INFO:
+ get_crop_info(inst, out);
+ break;
+
+ default:
+ mtk_vcodec_err(inst, "invalid get parameter type=%d", type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+const struct vdec_common_if vdec_h264_slice_if = {
+ .init = vdec_h264_slice_init,
+ .decode = vdec_h264_slice_decode,
+ .get_param = vdec_h264_slice_get_param,
+ .deinit = vdec_h264_slice_deinit,
+};
diff --git a/drivers/media/platform/mtk-vcodec/vdec_drv_if.c b/drivers/media/platform/mtk-vcodec/vdec_drv_if.c
index b18743b906ea..42008243ceac 100644
--- a/drivers/media/platform/mtk-vcodec/vdec_drv_if.c
+++ b/drivers/media/platform/mtk-vcodec/vdec_drv_if.c
@@ -19,6 +19,9 @@ int vdec_if_init(struct mtk_vcodec_ctx *ctx, unsigned int fourcc)
int ret = 0;
switch (fourcc) {
+ case V4L2_PIX_FMT_H264_SLICE:
+ ctx->dec_if = &vdec_h264_slice_if;
+ break;
case V4L2_PIX_FMT_H264:
ctx->dec_if = &vdec_h264_if;
break;
diff --git a/drivers/media/platform/mtk-vcodec/vdec_drv_if.h b/drivers/media/platform/mtk-vcodec/vdec_drv_if.h
index ec8f4e8d3d23..d467e8af4a84 100644
--- a/drivers/media/platform/mtk-vcodec/vdec_drv_if.h
+++ b/drivers/media/platform/mtk-vcodec/vdec_drv_if.h
@@ -55,6 +55,7 @@ struct vdec_fb_node {
};
extern const struct vdec_common_if vdec_h264_if;
+extern const struct vdec_common_if vdec_h264_slice_if;
extern const struct vdec_common_if vdec_vp8_if;
extern const struct vdec_common_if vdec_vp9_if;
diff --git a/drivers/media/platform/mtk-vcodec/vdec_ipi_msg.h b/drivers/media/platform/mtk-vcodec/vdec_ipi_msg.h
index 68e8d5cb16d7..5f45a537beb4 100644
--- a/drivers/media/platform/mtk-vcodec/vdec_ipi_msg.h
+++ b/drivers/media/platform/mtk-vcodec/vdec_ipi_msg.h
@@ -29,11 +29,15 @@ enum vdec_ipi_msgid {
/**
* struct vdec_ap_ipi_cmd - generic AP to VPU ipi command format
* @msg_id : vdec_ipi_msgid
- * @vpu_inst_addr : VPU decoder instance address
+ * @vpu_inst_addr : VPU decoder instance address. Used if ABI version < 2.
+ * @inst_id : instance ID. Used if the ABI version >= 2.
*/
struct vdec_ap_ipi_cmd {
uint32_t msg_id;
- uint32_t vpu_inst_addr;
+ union {
+ uint32_t vpu_inst_addr;
+ uint32_t inst_id;
+ };
};
/**
@@ -63,7 +67,8 @@ struct vdec_ap_ipi_init {
/**
* struct vdec_ap_ipi_dec_start - for AP_IPIMSG_DEC_START
* @msg_id : AP_IPIMSG_DEC_START
- * @vpu_inst_addr : VPU decoder instance address
+ * @vpu_inst_addr : VPU decoder instance address. Used if ABI version < 2.
+ * @inst_id : instance ID. Used if the ABI version >= 2.
* @data : Header info
* H264 decoder [0]:buf_sz [1]:nal_start
* VP8 decoder [0]:width/height
@@ -72,7 +77,10 @@ struct vdec_ap_ipi_init {
*/
struct vdec_ap_ipi_dec_start {
uint32_t msg_id;
- uint32_t vpu_inst_addr;
+ union {
+ uint32_t vpu_inst_addr;
+ uint32_t inst_id;
+ };
uint32_t data[3];
uint32_t reserved;
};
@@ -83,12 +91,19 @@ struct vdec_ap_ipi_dec_start {
* @status : VPU exeuction result
* @ap_inst_addr : AP vcodec_vpu_inst instance address
* @vpu_inst_addr : VPU decoder instance address
+ * @vdec_abi_version: ABI version of the firmware. Kernel can use it to
+ * ensure that it is compatible with the firmware.
+ * This field is not valid for MT8173 and must not be
+ * accessed for this chip.
+ * @inst_id : instance ID. Valid only if the ABI version >= 2.
*/
struct vdec_vpu_ipi_init_ack {
uint32_t msg_id;
int32_t status;
uint64_t ap_inst_addr;
uint32_t vpu_inst_addr;
+ uint32_t vdec_abi_version;
+ uint32_t inst_id;
};
#endif
diff --git a/drivers/media/platform/mtk-vcodec/vdec_vpu_if.c b/drivers/media/platform/mtk-vcodec/vdec_vpu_if.c
index 58b0e6fa8fd2..5dffc459a33d 100644
--- a/drivers/media/platform/mtk-vcodec/vdec_vpu_if.c
+++ b/drivers/media/platform/mtk-vcodec/vdec_vpu_if.c
@@ -24,6 +24,34 @@ static void handle_init_ack_msg(const struct vdec_vpu_ipi_init_ack *msg)
vpu->inst_addr = msg->vpu_inst_addr;
mtk_vcodec_debug(vpu, "- vpu_inst_addr = 0x%x", vpu->inst_addr);
+
+ /* Set default ABI version if dealing with unversioned firmware. */
+ vpu->fw_abi_version = 0;
+ /*
+ * Instance ID is only used if ABI version >= 2. Initialize it with
+ * garbage by default.
+ */
+ vpu->inst_id = 0xdeadbeef;
+
+ /* Firmware version field does not exist on MT8173. */
+ if (vpu->ctx->dev->vdec_pdata->chip == MTK_MT8173)
+ return;
+
+ /* Check firmware version. */
+ vpu->fw_abi_version = msg->vdec_abi_version;
+ mtk_vcodec_debug(vpu, "firmware version 0x%x\n", vpu->fw_abi_version);
+ switch (vpu->fw_abi_version) {
+ case 1:
+ break;
+ case 2:
+ vpu->inst_id = msg->inst_id;
+ break;
+ default:
+ mtk_vcodec_err(vpu, "unhandled firmware version 0x%x\n",
+ vpu->fw_abi_version);
+ vpu->failure = 1;
+ break;
+ }
}
/*
@@ -44,6 +72,9 @@ static void vpu_dec_ipi_handler(void *data, unsigned int len, void *priv)
mtk_vcodec_debug(vpu, "+ id=%X", msg->msg_id);
+ vpu->failure = msg->status;
+ vpu->signaled = 1;
+
if (msg->status == 0) {
switch (msg->msg_id) {
case VPU_IPIMSG_DEC_INIT_ACK:
@@ -63,8 +94,6 @@ static void vpu_dec_ipi_handler(void *data, unsigned int len, void *priv)
}
mtk_vcodec_debug(vpu, "- id=%X", msg->msg_id);
- vpu->failure = msg->status;
- vpu->signaled = 1;
}
static int vcodec_vpu_send_msg(struct vdec_vpu_inst *vpu, void *msg, int len)
@@ -96,7 +125,10 @@ static int vcodec_send_ap_ipi(struct vdec_vpu_inst *vpu, unsigned int msg_id)
memset(&msg, 0, sizeof(msg));
msg.msg_id = msg_id;
- msg.vpu_inst_addr = vpu->inst_addr;
+ if (vpu->fw_abi_version < 2)
+ msg.vpu_inst_addr = vpu->inst_addr;
+ else
+ msg.inst_id = vpu->inst_id;
err = vcodec_vpu_send_msg(vpu, &msg, sizeof(msg));
mtk_vcodec_debug(vpu, "- id=%X ret=%d", msg_id, err);
@@ -146,7 +178,10 @@ int vpu_dec_start(struct vdec_vpu_inst *vpu, uint32_t *data, unsigned int len)
memset(&msg, 0, sizeof(msg));
msg.msg_id = AP_IPIMSG_DEC_START;
- msg.vpu_inst_addr = vpu->inst_addr;
+ if (vpu->fw_abi_version < 2)
+ msg.vpu_inst_addr = vpu->inst_addr;
+ else
+ msg.inst_id = vpu->inst_id;
for (i = 0; i < len; i++)
msg.data[i] = data[i];
diff --git a/drivers/media/platform/mtk-vcodec/vdec_vpu_if.h b/drivers/media/platform/mtk-vcodec/vdec_vpu_if.h
index 85224eb7e34b..c2ed5b6cab8b 100644
--- a/drivers/media/platform/mtk-vcodec/vdec_vpu_if.h
+++ b/drivers/media/platform/mtk-vcodec/vdec_vpu_if.h
@@ -18,6 +18,9 @@ struct mtk_vcodec_ctx;
* for control and info share
* @failure : VPU execution result status, 0: success, others: fail
* @inst_addr : VPU decoder instance address
+ * @fw_abi_version : ABI version of the firmware.
+ * @inst_id : if fw_abi_version >= 2, contains the instance ID to be given
+ * in place of inst_addr in messages.
* @signaled : 1 - Host has received ack message from VPU, 0 - not received
* @ctx : context for v4l2 layer integration
* @dev : platform device of VPU
@@ -29,6 +32,8 @@ struct vdec_vpu_inst {
void *vsi;
int32_t failure;
uint32_t inst_addr;
+ uint32_t fw_abi_version;
+ uint32_t inst_id;
unsigned int signaled;
struct mtk_vcodec_ctx *ctx;
wait_queue_head_t wq;
diff --git a/drivers/media/platform/mtk-vpu/mtk_vpu.c b/drivers/media/platform/mtk-vpu/mtk_vpu.c
index ec290dde59cf..7f1647da0ade 100644
--- a/drivers/media/platform/mtk-vpu/mtk_vpu.c
+++ b/drivers/media/platform/mtk-vpu/mtk_vpu.c
@@ -848,7 +848,8 @@ static int mtk_vpu_probe(struct platform_device *pdev)
vpu->wdt.wq = create_singlethread_workqueue("vpu_wdt");
if (!vpu->wdt.wq) {
dev_err(dev, "initialize wdt workqueue failed\n");
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto clk_unprepare;
}
INIT_WORK(&vpu->wdt.ws, vpu_wdt_reset_func);
mutex_init(&vpu->vpu_mutex);
@@ -942,6 +943,8 @@ disable_vpu_clk:
vpu_clock_disable(vpu);
workqueue_destroy:
destroy_workqueue(vpu->wdt.wq);
+clk_unprepare:
+ clk_unprepare(vpu->clk);
return ret;
}
diff --git a/drivers/media/platform/mx2_emmaprp.c b/drivers/media/platform/mx2_emmaprp.c
index 08a5473b5610..3ce84d0f078c 100644
--- a/drivers/media/platform/mx2_emmaprp.c
+++ b/drivers/media/platform/mx2_emmaprp.c
@@ -804,7 +804,6 @@ static int emmaprp_probe(struct platform_device *pdev)
{
struct emmaprp_dev *pcdev;
struct video_device *vfd;
- struct resource *res;
int irq, ret;
pcdev = devm_kzalloc(&pdev->dev, sizeof(*pcdev), GFP_KERNEL);
@@ -822,8 +821,7 @@ static int emmaprp_probe(struct platform_device *pdev)
if (IS_ERR(pcdev->clk_emma_ahb))
return PTR_ERR(pcdev->clk_emma_ahb);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pcdev->base_emma = devm_ioremap_resource(&pdev->dev, res);
+ pcdev->base_emma = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pcdev->base_emma))
return PTR_ERR(pcdev->base_emma);
diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
index 21193f0b7f61..3e0d9af7ffec 100644
--- a/drivers/media/platform/omap/omap_vout.c
+++ b/drivers/media/platform/omap/omap_vout.c
@@ -277,7 +277,7 @@ static int video_mode_to_dss_mode(struct omap_vout_device *vout)
*/
static int omapvid_setup_overlay(struct omap_vout_device *vout,
struct omap_overlay *ovl, int posx, int posy, int outw,
- int outh, u32 addr)
+ int outh, dma_addr_t addr)
{
int ret = 0;
struct omap_overlay_info info;
@@ -352,7 +352,7 @@ setup_ovl_err:
/*
* Initialize the overlay structure
*/
-static int omapvid_init(struct omap_vout_device *vout, u32 addr)
+static int omapvid_init(struct omap_vout_device *vout, dma_addr_t addr)
{
int ret = 0, i;
struct v4l2_window *win;
@@ -479,7 +479,8 @@ err:
static void omap_vout_isr(void *arg, unsigned int irqstatus)
{
int ret, fid, mgr_id;
- u32 addr, irq;
+ dma_addr_t addr;
+ u32 irq;
struct omap_overlay *ovl;
u64 ts;
struct omapvideo_info *ovid;
@@ -543,7 +544,7 @@ static void omap_vout_isr(void *arg, unsigned int irqstatus)
struct omap_vout_buffer, queue);
list_del(&vout->next_frm->queue);
- addr = (unsigned long)vout->queued_buf_addr[vout->next_frm->vbuf.vb2_buf.index]
+ addr = vout->queued_buf_addr[vout->next_frm->vbuf.vb2_buf.index]
+ vout->cropped_offset;
/* First save the configuration in ovelray structure */
@@ -976,7 +977,7 @@ static int omap_vout_vb2_prepare(struct vb2_buffer *vb)
vb2_set_plane_payload(vb, 0, vout->pix.sizeimage);
voutbuf->vbuf.field = V4L2_FIELD_NONE;
- vout->queued_buf_addr[vb->index] = (u8 *)buf_phy_addr;
+ vout->queued_buf_addr[vb->index] = buf_phy_addr;
if (ovid->rotation_type == VOUT_ROT_VRFB)
return omap_vout_prepare_vrfb(vout, vb);
return 0;
@@ -995,7 +996,8 @@ static int omap_vout_vb2_start_streaming(struct vb2_queue *vq, unsigned int coun
struct omap_vout_device *vout = vb2_get_drv_priv(vq);
struct omapvideo_info *ovid = &vout->vid_info;
struct omap_vout_buffer *buf, *tmp;
- u32 addr = 0, mask = 0;
+ dma_addr_t addr = 0;
+ u32 mask = 0;
int ret, j;
/* Get the next frame from the buffer queue */
@@ -1018,7 +1020,7 @@ static int omap_vout_vb2_start_streaming(struct vb2_queue *vq, unsigned int coun
goto out;
}
- addr = (unsigned long)vout->queued_buf_addr[vout->cur_frm->vbuf.vb2_buf.index]
+ addr = vout->queued_buf_addr[vout->cur_frm->vbuf.vb2_buf.index]
+ vout->cropped_offset;
mask = DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_EVEN | DISPC_IRQ_EVSYNC_ODD
@@ -1476,7 +1478,7 @@ static int __init omap_vout_create_video_devices(struct platform_device *pdev)
* To be precise: fbuf.base should match smem_start of
* struct fb_fix_screeninfo.
*/
- vout->fbuf.base = (void *)info.paddr;
+ vout->fbuf.base = (void *)(uintptr_t)info.paddr;
/* Set VRFB as rotation_type for omap2 and omap3 */
if (omap_vout_dss_omap24xx() || omap_vout_dss_omap34xx())
diff --git a/drivers/media/platform/omap/omap_vout_vrfb.c b/drivers/media/platform/omap/omap_vout_vrfb.c
index 6bd672cbdb62..0cfa0169875f 100644
--- a/drivers/media/platform/omap/omap_vout_vrfb.c
+++ b/drivers/media/platform/omap/omap_vout_vrfb.c
@@ -305,7 +305,7 @@ int omap_vout_prepare_vrfb(struct omap_vout_device *vout,
/* Store buffers physical address into an array. Addresses
* from this array will be used to configure DSS */
rotation = calc_rotation(vout);
- vout->queued_buf_addr[vb->index] = (u8 *)
+ vout->queued_buf_addr[vb->index] =
vout->vrfb_context[vb->index].paddr[rotation];
return 0;
}
diff --git a/drivers/media/platform/omap/omap_voutdef.h b/drivers/media/platform/omap/omap_voutdef.h
index 1cff6dea1879..b586193341d2 100644
--- a/drivers/media/platform/omap/omap_voutdef.h
+++ b/drivers/media/platform/omap/omap_voutdef.h
@@ -170,7 +170,7 @@ struct omap_vout_device {
struct omap_vout_buffer *cur_frm, *next_frm;
spinlock_t vbq_lock; /* spinlock for dma_queue */
struct list_head dma_queue;
- u8 *queued_buf_addr[VIDEO_MAX_FRAME];
+ dma_addr_t queued_buf_addr[VIDEO_MAX_FRAME];
u32 cropped_offset;
s32 tv_field1_offset;
void *isr_handle;
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
index 20f59c59ff8a..6de377ce281d 100644
--- a/drivers/media/platform/omap3isp/isp.c
+++ b/drivers/media/platform/omap3isp/isp.c
@@ -2003,7 +2003,7 @@ static int isp_remove(struct platform_device *pdev)
{
struct isp_device *isp = platform_get_drvdata(pdev);
- v4l2_async_notifier_unregister(&isp->notifier);
+ v4l2_async_nf_unregister(&isp->notifier);
isp_unregister_entities(isp);
isp_cleanup_modules(isp);
isp_xclk_cleanup(isp);
@@ -2013,7 +2013,7 @@ static int isp_remove(struct platform_device *pdev)
__omap3isp_put(isp, false);
media_entity_enum_cleanup(&isp->crashed);
- v4l2_async_notifier_cleanup(&isp->notifier);
+ v4l2_async_nf_cleanup(&isp->notifier);
kfree(isp);
@@ -2172,8 +2172,9 @@ static int isp_parse_of_endpoints(struct isp_device *isp)
ret = v4l2_fwnode_endpoint_parse(ep, &vep);
if (!ret) {
- isd = v4l2_async_notifier_add_fwnode_remote_subdev(
- &isp->notifier, ep, struct isp_async_subdev);
+ isd = v4l2_async_nf_add_fwnode_remote(&isp->notifier,
+ ep, struct
+ isp_async_subdev);
if (!IS_ERR(isd))
isp_parse_of_parallel_endpoint(isp->dev, &vep, &isd->bus);
}
@@ -2211,8 +2212,10 @@ static int isp_parse_of_endpoints(struct isp_device *isp)
}
if (!ret) {
- isd = v4l2_async_notifier_add_fwnode_remote_subdev(
- &isp->notifier, ep, struct isp_async_subdev);
+ isd = v4l2_async_nf_add_fwnode_remote(&isp->notifier,
+ ep,
+ struct
+ isp_async_subdev);
if (!IS_ERR(isd)) {
switch (vep.bus_type) {
@@ -2289,7 +2292,7 @@ static int isp_probe(struct platform_device *pdev)
mutex_init(&isp->isp_mutex);
spin_lock_init(&isp->stat_lock);
- v4l2_async_notifier_init(&isp->notifier);
+ v4l2_async_nf_init(&isp->notifier);
isp->dev = &pdev->dev;
ret = isp_parse_of_endpoints(isp);
@@ -2418,7 +2421,7 @@ static int isp_probe(struct platform_device *pdev)
isp->notifier.ops = &isp_subdev_notifier_ops;
- ret = v4l2_async_notifier_register(&isp->v4l2_dev, &isp->notifier);
+ ret = v4l2_async_nf_register(&isp->v4l2_dev, &isp->notifier);
if (ret)
goto error_register_entities;
@@ -2437,7 +2440,7 @@ error_isp:
isp_xclk_cleanup(isp);
__omap3isp_put(isp, false);
error:
- v4l2_async_notifier_cleanup(&isp->notifier);
+ v4l2_async_nf_cleanup(&isp->notifier);
mutex_destroy(&isp->isp_mutex);
error_release_isp:
kfree(isp);
diff --git a/drivers/media/platform/pxa_camera.c b/drivers/media/platform/pxa_camera.c
index ec4c010644ca..3ba00b0f9320 100644
--- a/drivers/media/platform/pxa_camera.c
+++ b/drivers/media/platform/pxa_camera.c
@@ -2249,10 +2249,9 @@ static int pxa_camera_pdata_from_dt(struct device *dev,
if (ep.bus.parallel.flags & V4L2_MBUS_PCLK_SAMPLE_FALLING)
pcdev->platform_flags |= PXA_CAMERA_PCLK_EN;
- asd = v4l2_async_notifier_add_fwnode_remote_subdev(
- &pcdev->notifier,
- of_fwnode_handle(np),
- struct v4l2_async_subdev);
+ asd = v4l2_async_nf_add_fwnode_remote(&pcdev->notifier,
+ of_fwnode_handle(np),
+ struct v4l2_async_subdev);
if (IS_ERR(asd))
err = PTR_ERR(asd);
out:
@@ -2289,7 +2288,7 @@ static int pxa_camera_probe(struct platform_device *pdev)
if (IS_ERR(pcdev->clk))
return PTR_ERR(pcdev->clk);
- v4l2_async_notifier_init(&pcdev->notifier);
+ v4l2_async_nf_init(&pcdev->notifier);
pcdev->res = res;
pcdev->pdata = pdev->dev.platform_data;
if (pcdev->pdata) {
@@ -2297,11 +2296,10 @@ static int pxa_camera_probe(struct platform_device *pdev)
pcdev->platform_flags = pcdev->pdata->flags;
pcdev->mclk = pcdev->pdata->mclk_10khz * 10000;
- asd = v4l2_async_notifier_add_i2c_subdev(
- &pcdev->notifier,
- pcdev->pdata->sensor_i2c_adapter_id,
- pcdev->pdata->sensor_i2c_address,
- struct v4l2_async_subdev);
+ asd = v4l2_async_nf_add_i2c(&pcdev->notifier,
+ pcdev->pdata->sensor_i2c_adapter_id,
+ pcdev->pdata->sensor_i2c_address,
+ struct v4l2_async_subdev);
if (IS_ERR(asd))
err = PTR_ERR(asd);
} else if (pdev->dev.of_node) {
@@ -2402,13 +2400,13 @@ static int pxa_camera_probe(struct platform_device *pdev)
goto exit_notifier_cleanup;
pcdev->notifier.ops = &pxa_camera_sensor_ops;
- err = v4l2_async_notifier_register(&pcdev->v4l2_dev, &pcdev->notifier);
+ err = v4l2_async_nf_register(&pcdev->v4l2_dev, &pcdev->notifier);
if (err)
goto exit_notifier_cleanup;
return 0;
exit_notifier_cleanup:
- v4l2_async_notifier_cleanup(&pcdev->notifier);
+ v4l2_async_nf_cleanup(&pcdev->notifier);
v4l2_device_unregister(&pcdev->v4l2_dev);
exit_deactivate:
pxa_camera_deactivate(pcdev);
@@ -2432,8 +2430,8 @@ static int pxa_camera_remove(struct platform_device *pdev)
dma_release_channel(pcdev->dma_chans[1]);
dma_release_channel(pcdev->dma_chans[2]);
- v4l2_async_notifier_unregister(&pcdev->notifier);
- v4l2_async_notifier_cleanup(&pcdev->notifier);
+ v4l2_async_nf_unregister(&pcdev->notifier);
+ v4l2_async_nf_cleanup(&pcdev->notifier);
v4l2_device_unregister(&pcdev->v4l2_dev);
diff --git a/drivers/media/platform/qcom/camss/camss-vfe-170.c b/drivers/media/platform/qcom/camss/camss-vfe-170.c
index 8594d275b41d..5c083d70d495 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe-170.c
+++ b/drivers/media/platform/qcom/camss/camss-vfe-170.c
@@ -177,7 +177,7 @@
#define VFE_BUS_WM_FRAME_INC(n) (0x2258 + (n) * 0x100)
#define VFE_BUS_WM_BURST_LIMIT(n) (0x225c + (n) * 0x100)
-static void vfe_hw_version_read(struct vfe_device *vfe, struct device *dev)
+static u32 vfe_hw_version(struct vfe_device *vfe)
{
u32 hw_version = readl_relaxed(vfe->base + VFE_HW_VERSION);
@@ -185,7 +185,10 @@ static void vfe_hw_version_read(struct vfe_device *vfe, struct device *dev)
u32 rev = (hw_version >> 16) & 0xFFF;
u32 step = hw_version & 0xFFFF;
- dev_err(dev, "VFE HW Version = %u.%u.%u\n", gen, rev, step);
+ dev_dbg(vfe->camss->dev, "VFE HW Version = %u.%u.%u\n",
+ gen, rev, step);
+
+ return hw_version;
}
static inline void vfe_reg_clr(struct vfe_device *vfe, u32 reg, u32 clr_bits)
@@ -771,7 +774,7 @@ static void vfe_subdev_init(struct device *dev, struct vfe_device *vfe)
const struct vfe_hw_ops vfe_ops_170 = {
.global_reset = vfe_global_reset,
- .hw_version_read = vfe_hw_version_read,
+ .hw_version = vfe_hw_version,
.isr_read = vfe_isr_read,
.isr = vfe_isr,
.pm_domain_off = vfe_pm_domain_off,
diff --git a/drivers/media/platform/qcom/camss/camss-vfe-4-1.c b/drivers/media/platform/qcom/camss/camss-vfe-4-1.c
index 53c56a8d4545..42047b11ba52 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe-4-1.c
+++ b/drivers/media/platform/qcom/camss/camss-vfe-4-1.c
@@ -210,11 +210,13 @@
#define MSM_VFE_VFE0_UB_SIZE 1023
#define MSM_VFE_VFE0_UB_SIZE_RDI (MSM_VFE_VFE0_UB_SIZE / 3)
-static void vfe_hw_version_read(struct vfe_device *vfe, struct device *dev)
+static u32 vfe_hw_version(struct vfe_device *vfe)
{
u32 hw_version = readl_relaxed(vfe->base + VFE_0_HW_VERSION);
- dev_dbg(dev, "VFE HW Version = 0x%08x\n", hw_version);
+ dev_dbg(vfe->camss->dev, "VFE HW Version = 0x%08x\n", hw_version);
+
+ return hw_version;
}
static u16 vfe_get_ub_size(u8 vfe_id)
@@ -288,22 +290,14 @@ static void vfe_wm_frame_based(struct vfe_device *vfe, u8 wm, u8 enable)
static void vfe_get_wm_sizes(struct v4l2_pix_format_mplane *pix, u8 plane,
u16 *width, u16 *height, u16 *bytesperline)
{
- switch (pix->pixelformat) {
- case V4L2_PIX_FMT_NV12:
- case V4L2_PIX_FMT_NV21:
- *width = pix->width;
- *height = pix->height;
- *bytesperline = pix->plane_fmt[0].bytesperline;
+ *width = pix->width;
+ *height = pix->height;
+ *bytesperline = pix->plane_fmt[0].bytesperline;
+
+ if (pix->pixelformat == V4L2_PIX_FMT_NV12 ||
+ pix->pixelformat == V4L2_PIX_FMT_NV21)
if (plane == 1)
*height /= 2;
- break;
- case V4L2_PIX_FMT_NV16:
- case V4L2_PIX_FMT_NV61:
- *width = pix->width;
- *height = pix->height;
- *bytesperline = pix->plane_fmt[0].bytesperline;
- break;
- }
}
static void vfe_wm_line_based(struct vfe_device *vfe, u32 wm,
@@ -1004,7 +998,7 @@ static void vfe_subdev_init(struct device *dev, struct vfe_device *vfe)
const struct vfe_hw_ops vfe_ops_4_1 = {
.global_reset = vfe_global_reset,
- .hw_version_read = vfe_hw_version_read,
+ .hw_version = vfe_hw_version,
.isr_read = vfe_isr_read,
.isr = vfe_isr,
.pm_domain_off = vfe_pm_domain_off,
diff --git a/drivers/media/platform/qcom/camss/camss-vfe-4-7.c b/drivers/media/platform/qcom/camss/camss-vfe-4-7.c
index a59635217758..ab2d57bdf5e7 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe-4-7.c
+++ b/drivers/media/platform/qcom/camss/camss-vfe-4-7.c
@@ -254,11 +254,13 @@
#define MSM_VFE_VFE1_UB_SIZE 1535
#define MSM_VFE_VFE1_UB_SIZE_RDI (MSM_VFE_VFE1_UB_SIZE / 3)
-static void vfe_hw_version_read(struct vfe_device *vfe, struct device *dev)
+static u32 vfe_hw_version(struct vfe_device *vfe)
{
u32 hw_version = readl_relaxed(vfe->base + VFE_0_HW_VERSION);
- dev_err(dev, "VFE HW Version = 0x%08x\n", hw_version);
+ dev_dbg(vfe->camss->dev, "VFE HW Version = 0x%08x\n", hw_version);
+
+ return hw_version;
}
static u16 vfe_get_ub_size(u8 vfe_id)
@@ -368,30 +370,26 @@ static int vfe_word_per_line_by_bytes(u32 bytes_per_line)
static void vfe_get_wm_sizes(struct v4l2_pix_format_mplane *pix, u8 plane,
u16 *width, u16 *height, u16 *bytesperline)
{
+ *width = pix->width;
+ *height = pix->height;
+
switch (pix->pixelformat) {
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV21:
- *width = pix->width;
- *height = pix->height;
*bytesperline = pix->plane_fmt[0].bytesperline;
if (plane == 1)
*height /= 2;
break;
case V4L2_PIX_FMT_NV16:
case V4L2_PIX_FMT_NV61:
- *width = pix->width;
- *height = pix->height;
*bytesperline = pix->plane_fmt[0].bytesperline;
break;
case V4L2_PIX_FMT_YUYV:
case V4L2_PIX_FMT_YVYU:
case V4L2_PIX_FMT_VYUY:
case V4L2_PIX_FMT_UYVY:
- *width = pix->width;
- *height = pix->height;
*bytesperline = pix->plane_fmt[plane].bytesperline;
break;
-
}
}
@@ -1196,7 +1194,7 @@ static void vfe_subdev_init(struct device *dev, struct vfe_device *vfe)
const struct vfe_hw_ops vfe_ops_4_7 = {
.global_reset = vfe_global_reset,
- .hw_version_read = vfe_hw_version_read,
+ .hw_version = vfe_hw_version,
.isr_read = vfe_isr_read,
.isr = vfe_isr,
.pm_domain_off = vfe_pm_domain_off,
diff --git a/drivers/media/platform/qcom/camss/camss-vfe-4-8.c b/drivers/media/platform/qcom/camss/camss-vfe-4-8.c
index 998429dbb65c..7e6b62c930ac 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe-4-8.c
+++ b/drivers/media/platform/qcom/camss/camss-vfe-4-8.c
@@ -247,11 +247,13 @@
#define MSM_VFE_VFE1_UB_SIZE 1535
#define MSM_VFE_VFE1_UB_SIZE_RDI (MSM_VFE_VFE1_UB_SIZE / 3)
-static void vfe_hw_version_read(struct vfe_device *vfe, struct device *dev)
+static u32 vfe_hw_version(struct vfe_device *vfe)
{
u32 hw_version = readl_relaxed(vfe->base + VFE_0_HW_VERSION);
- dev_err(dev, "VFE HW Version = 0x%08x\n", hw_version);
+ dev_dbg(vfe->camss->dev, "VFE HW Version = 0x%08x\n", hw_version);
+
+ return hw_version;
}
static inline void vfe_reg_clr(struct vfe_device *vfe, u32 reg, u32 clr_bits)
@@ -341,27 +343,24 @@ static int vfe_word_per_line_by_bytes(u32 bytes_per_line)
static void vfe_get_wm_sizes(struct v4l2_pix_format_mplane *pix, u8 plane,
u16 *width, u16 *height, u16 *bytesperline)
{
+ *width = pix->width;
+ *height = pix->height;
+
switch (pix->pixelformat) {
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV21:
- *width = pix->width;
- *height = pix->height;
*bytesperline = pix->plane_fmt[0].bytesperline;
if (plane == 1)
*height /= 2;
break;
case V4L2_PIX_FMT_NV16:
case V4L2_PIX_FMT_NV61:
- *width = pix->width;
- *height = pix->height;
*bytesperline = pix->plane_fmt[0].bytesperline;
break;
case V4L2_PIX_FMT_YUYV:
case V4L2_PIX_FMT_YVYU:
case V4L2_PIX_FMT_VYUY:
case V4L2_PIX_FMT_UYVY:
- *width = pix->width;
- *height = pix->height;
*bytesperline = pix->plane_fmt[plane].bytesperline;
break;
}
@@ -1180,7 +1179,7 @@ static void vfe_subdev_init(struct device *dev, struct vfe_device *vfe)
const struct vfe_hw_ops vfe_ops_4_8 = {
.global_reset = vfe_global_reset,
- .hw_version_read = vfe_hw_version_read,
+ .hw_version = vfe_hw_version,
.isr_read = vfe_isr_read,
.isr = vfe_isr,
.pm_domain_off = vfe_pm_domain_off,
diff --git a/drivers/media/platform/qcom/camss/camss-vfe.c b/drivers/media/platform/qcom/camss/camss-vfe.c
index e0f3a36f3f3f..71f78b40e7f5 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe.c
+++ b/drivers/media/platform/qcom/camss/camss-vfe.c
@@ -604,6 +604,8 @@ static int vfe_get(struct vfe_device *vfe)
vfe_reset_output_maps(vfe);
vfe_init_outputs(vfe);
+
+ vfe->ops->hw_version(vfe);
} else {
ret = vfe_check_clock_rates(vfe);
if (ret < 0)
@@ -713,8 +715,6 @@ static int vfe_set_power(struct v4l2_subdev *sd, int on)
ret = vfe_get(vfe);
if (ret < 0)
return ret;
-
- vfe->ops->hw_version_read(vfe, vfe->camss->dev);
} else {
vfe_put(vfe);
}
diff --git a/drivers/media/platform/qcom/camss/camss-vfe.h b/drivers/media/platform/qcom/camss/camss-vfe.h
index 844b9275031d..f166d176cb77 100644
--- a/drivers/media/platform/qcom/camss/camss-vfe.h
+++ b/drivers/media/platform/qcom/camss/camss-vfe.h
@@ -103,7 +103,7 @@ struct vfe_device;
struct vfe_hw_ops {
void (*enable_irq_common)(struct vfe_device *vfe);
void (*global_reset)(struct vfe_device *vfe);
- void (*hw_version_read)(struct vfe_device *vfe, struct device *dev);
+ u32 (*hw_version)(struct vfe_device *vfe);
irqreturn_t (*isr)(int irq, void *dev);
void (*isr_read)(struct vfe_device *vfe, u32 *value0, u32 *value1);
void (*pm_domain_off)(struct vfe_device *vfe);
diff --git a/drivers/media/platform/qcom/camss/camss.c b/drivers/media/platform/qcom/camss/camss.c
index ef100d5f7763..be091c50a3c0 100644
--- a/drivers/media/platform/qcom/camss/camss.c
+++ b/drivers/media/platform/qcom/camss/camss.c
@@ -886,9 +886,9 @@ static int camss_of_parse_ports(struct camss *camss)
goto err_cleanup;
}
- csd = v4l2_async_notifier_add_fwnode_subdev(
- &camss->notifier, of_fwnode_handle(remote),
- struct camss_async_subdev);
+ csd = v4l2_async_nf_add_fwnode(&camss->notifier,
+ of_fwnode_handle(remote),
+ struct camss_async_subdev);
of_node_put(remote);
if (IS_ERR(csd)) {
ret = PTR_ERR(csd);
@@ -1361,7 +1361,7 @@ static int camss_probe(struct platform_device *pdev)
goto err_free;
}
- v4l2_async_notifier_init(&camss->notifier);
+ v4l2_async_nf_init(&camss->notifier);
num_subdevs = camss_of_parse_ports(camss);
if (num_subdevs < 0) {
@@ -1397,8 +1397,8 @@ static int camss_probe(struct platform_device *pdev)
if (num_subdevs) {
camss->notifier.ops = &camss_subdev_notifier_ops;
- ret = v4l2_async_notifier_register(&camss->v4l2_dev,
- &camss->notifier);
+ ret = v4l2_async_nf_register(&camss->v4l2_dev,
+ &camss->notifier);
if (ret) {
dev_err(dev,
"Failed to register async subdev nodes: %d\n",
@@ -1436,7 +1436,7 @@ err_register_subdevs:
err_register_entities:
v4l2_device_unregister(&camss->v4l2_dev);
err_cleanup:
- v4l2_async_notifier_cleanup(&camss->notifier);
+ v4l2_async_nf_cleanup(&camss->notifier);
err_free:
kfree(camss);
@@ -1478,8 +1478,8 @@ static int camss_remove(struct platform_device *pdev)
{
struct camss *camss = platform_get_drvdata(pdev);
- v4l2_async_notifier_unregister(&camss->notifier);
- v4l2_async_notifier_cleanup(&camss->notifier);
+ v4l2_async_nf_unregister(&camss->notifier);
+ v4l2_async_nf_cleanup(&camss->notifier);
camss_unregister_entities(camss);
if (atomic_read(&camss->ref_count) == 0)
diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c
index 91b15842c555..f5fa81896012 100644
--- a/drivers/media/platform/qcom/venus/core.c
+++ b/drivers/media/platform/qcom/venus/core.c
@@ -65,7 +65,7 @@ static void venus_event_notify(struct venus_core *core, u32 event)
}
mutex_lock(&core->lock);
- core->sys_error = true;
+ set_bit(0, &core->sys_error);
list_for_each_entry(inst, &core->instances, list)
inst->ops->event_notify(inst, EVT_SESSION_ERROR, NULL);
mutex_unlock(&core->lock);
@@ -95,9 +95,8 @@ static void venus_sys_error_handler(struct work_struct *work)
failed = true;
}
- hfi_core_deinit(core, true);
-
- mutex_lock(&core->lock);
+ core->ops->core_deinit(core);
+ core->state = CORE_UNINIT;
for (i = 0; i < max_attempts; i++) {
if (!pm_runtime_active(core->dev_dec) && !pm_runtime_active(core->dev_enc))
@@ -105,6 +104,8 @@ static void venus_sys_error_handler(struct work_struct *work)
msleep(10);
}
+ mutex_lock(&core->lock);
+
venus_shutdown(core);
venus_coredump(core);
@@ -161,7 +162,8 @@ static void venus_sys_error_handler(struct work_struct *work)
dev_warn(core->dev, "system error has occurred (recovered)\n");
mutex_lock(&core->lock);
- core->sys_error = false;
+ clear_bit(0, &core->sys_error);
+ wake_up_all(&core->sys_err_done);
mutex_unlock(&core->lock);
}
@@ -267,7 +269,6 @@ static int venus_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct venus_core *core;
- struct resource *r;
int ret;
core = devm_kzalloc(dev, sizeof(*core), GFP_KERNEL);
@@ -276,8 +277,7 @@ static int venus_probe(struct platform_device *pdev)
core->dev = dev;
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- core->base = devm_ioremap_resource(dev, r);
+ core->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(core->base))
return PTR_ERR(core->base);
@@ -318,6 +318,7 @@ static int venus_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&core->instances);
mutex_init(&core->lock);
INIT_DELAYED_WORK(&core->work, venus_sys_error_handler);
+ init_waitqueue_head(&core->sys_err_done);
ret = devm_request_threaded_irq(dev, core->irq, hfi_isr, hfi_isr_thread,
IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
@@ -567,6 +568,69 @@ static const struct venus_resources msm8996_res = {
.fwname = "qcom/venus-4.2/venus.mdt",
};
+static const struct freq_tbl sdm660_freq_table[] = {
+ { 979200, 518400000 },
+ { 489600, 441600000 },
+ { 432000, 404000000 },
+ { 244800, 320000000 },
+ { 216000, 269330000 },
+ { 108000, 133330000 },
+};
+
+static const struct reg_val sdm660_reg_preset[] = {
+ { 0x80010, 0x001f001f },
+ { 0x80018, 0x00000156 },
+ { 0x8001c, 0x00000156 },
+};
+
+static const struct bw_tbl sdm660_bw_table_enc[] = {
+ { 979200, 1044000, 0, 2446336, 0 }, /* 4k UHD @ 30 */
+ { 864000, 887000, 0, 2108416, 0 }, /* 720p @ 240 */
+ { 489600, 666000, 0, 1207296, 0 }, /* 1080p @ 60 */
+ { 432000, 578000, 0, 1058816, 0 }, /* 720p @ 120 */
+ { 244800, 346000, 0, 616448, 0 }, /* 1080p @ 30 */
+ { 216000, 293000, 0, 534528, 0 }, /* 720p @ 60 */
+ { 108000, 151000, 0, 271360, 0 }, /* 720p @ 30 */
+};
+
+static const struct bw_tbl sdm660_bw_table_dec[] = {
+ { 979200, 2365000, 0, 1892000, 0 }, /* 4k UHD @ 30 */
+ { 864000, 1978000, 0, 1554000, 0 }, /* 720p @ 240 */
+ { 489600, 1133000, 0, 895000, 0 }, /* 1080p @ 60 */
+ { 432000, 994000, 0, 781000, 0 }, /* 720p @ 120 */
+ { 244800, 580000, 0, 460000, 0 }, /* 1080p @ 30 */
+ { 216000, 501000, 0, 301000, 0 }, /* 720p @ 60 */
+ { 108000, 255000, 0, 202000, 0 }, /* 720p @ 30 */
+};
+
+static const struct venus_resources sdm660_res = {
+ .freq_tbl = sdm660_freq_table,
+ .freq_tbl_size = ARRAY_SIZE(sdm660_freq_table),
+ .reg_tbl = sdm660_reg_preset,
+ .reg_tbl_size = ARRAY_SIZE(sdm660_reg_preset),
+ .bw_tbl_enc = sdm660_bw_table_enc,
+ .bw_tbl_enc_size = ARRAY_SIZE(sdm660_bw_table_enc),
+ .bw_tbl_dec = sdm660_bw_table_dec,
+ .bw_tbl_dec_size = ARRAY_SIZE(sdm660_bw_table_dec),
+ .clks = {"core", "iface", "bus", "bus_throttle" },
+ .clks_num = 4,
+ .vcodec0_clks = { "vcodec0_core" },
+ .vcodec1_clks = { "vcodec0_core" },
+ .vcodec_clks_num = 1,
+ .vcodec_num = 1,
+ .max_load = 1036800,
+ .hfi_version = HFI_VERSION_3XX,
+ .vmem_id = VIDC_RESOURCE_NONE,
+ .vmem_size = 0,
+ .vmem_addr = 0,
+ .cp_start = 0,
+ .cp_size = 0x79000000,
+ .cp_nonpixel_start = 0x1000000,
+ .cp_nonpixel_size = 0x28000000,
+ .dma_mask = 0xd9000000 - 1,
+ .fwname = "qcom/venus-4.4/venus.mdt",
+};
+
static const struct freq_tbl sdm845_freq_table[] = {
{ 3110400, 533000000 }, /* 4096x2160@90 */
{ 2073600, 444000000 }, /* 4096x2160@60 */
@@ -729,6 +793,7 @@ static const struct venus_resources sm8250_res = {
.vcodec_num = 1,
.max_load = 7833600,
.hfi_version = HFI_VERSION_6XX,
+ .num_vpp_pipes = 4,
.vmem_id = VIDC_RESOURCE_NONE,
.vmem_size = 0,
.vmem_addr = 0,
@@ -736,12 +801,66 @@ static const struct venus_resources sm8250_res = {
.fwname = "qcom/vpu-1.0/venus.mdt",
};
+static const struct freq_tbl sc7280_freq_table[] = {
+ { 0, 460000000 },
+ { 0, 424000000 },
+ { 0, 335000000 },
+ { 0, 240000000 },
+ { 0, 133333333 },
+};
+
+static const struct bw_tbl sc7280_bw_table_enc[] = {
+ { 1944000, 1896000, 0, 3657000, 0 }, /* 3840x2160@60 */
+ { 972000, 968000, 0, 1848000, 0 }, /* 3840x2160@30 */
+ { 489600, 618000, 0, 941000, 0 }, /* 1920x1080@60 */
+ { 244800, 318000, 0, 480000, 0 }, /* 1920x1080@30 */
+};
+
+static const struct bw_tbl sc7280_bw_table_dec[] = {
+ { 2073600, 2128000, 0, 3831000, 0 }, /* 4096x2160@60 */
+ { 1036800, 1085000, 0, 1937000, 0 }, /* 4096x2160@30 */
+ { 489600, 779000, 0, 998000, 0 }, /* 1920x1080@60 */
+ { 244800, 400000, 0, 509000, 0 }, /* 1920x1080@30 */
+};
+
+static const struct reg_val sm7280_reg_preset[] = {
+ { 0xb0088, 0 },
+};
+
+static const struct venus_resources sc7280_res = {
+ .freq_tbl = sc7280_freq_table,
+ .freq_tbl_size = ARRAY_SIZE(sc7280_freq_table),
+ .reg_tbl = sm7280_reg_preset,
+ .reg_tbl_size = ARRAY_SIZE(sm7280_reg_preset),
+ .bw_tbl_enc = sc7280_bw_table_enc,
+ .bw_tbl_enc_size = ARRAY_SIZE(sc7280_bw_table_enc),
+ .bw_tbl_dec = sc7280_bw_table_dec,
+ .bw_tbl_dec_size = ARRAY_SIZE(sc7280_bw_table_dec),
+ .clks = {"core", "bus", "iface"},
+ .clks_num = 3,
+ .vcodec0_clks = {"vcodec_core", "vcodec_bus"},
+ .vcodec_clks_num = 2,
+ .vcodec_pmdomains = { "venus", "vcodec0" },
+ .vcodec_pmdomains_num = 2,
+ .opp_pmdomain = (const char *[]) { "cx", NULL },
+ .vcodec_num = 1,
+ .hfi_version = HFI_VERSION_6XX,
+ .num_vpp_pipes = 1,
+ .vmem_id = VIDC_RESOURCE_NONE,
+ .vmem_size = 0,
+ .vmem_addr = 0,
+ .dma_mask = 0xe0000000 - 1,
+ .fwname = "qcom/vpu-2.0/venus.mbn",
+};
+
static const struct of_device_id venus_dt_match[] = {
{ .compatible = "qcom,msm8916-venus", .data = &msm8916_res, },
{ .compatible = "qcom,msm8996-venus", .data = &msm8996_res, },
+ { .compatible = "qcom,sdm660-venus", .data = &sdm660_res, },
{ .compatible = "qcom,sdm845-venus", .data = &sdm845_res, },
{ .compatible = "qcom,sdm845-venus-v2", .data = &sdm845_res_v2, },
{ .compatible = "qcom,sc7180-venus", .data = &sc7180_res, },
+ { .compatible = "qcom,sc7280-venus", .data = &sc7280_res, },
{ .compatible = "qcom,sm8250-venus", .data = &sm8250_res, },
{ }
};
diff --git a/drivers/media/platform/qcom/venus/core.h b/drivers/media/platform/qcom/venus/core.h
index 5ec851115eca..7c3bac01cd49 100644
--- a/drivers/media/platform/qcom/venus/core.h
+++ b/drivers/media/platform/qcom/venus/core.h
@@ -7,6 +7,7 @@
#ifndef __VENUS_CORE_H_
#define __VENUS_CORE_H_
+#include <linux/bitops.h>
#include <linux/list.h>
#include <media/videobuf2-v4l2.h>
#include <media/v4l2-ctrls.h>
@@ -68,6 +69,7 @@ struct venus_resources {
const char * const resets[VIDC_RESETS_NUM_MAX];
unsigned int resets_num;
enum hfi_version hfi_version;
+ u8 num_vpp_pipes;
u32 max_load;
unsigned int vmem_id;
u32 vmem_size;
@@ -181,7 +183,8 @@ struct venus_core {
unsigned int state;
struct completion done;
unsigned int error;
- bool sys_error;
+ unsigned long sys_error;
+ wait_queue_head_t sys_err_done;
const struct hfi_core_ops *core_ops;
const struct venus_pm_ops *pm_ops;
struct mutex pm_lock;
@@ -334,6 +337,7 @@ enum venus_inst_modes {
* @registeredbufs: a list of registered capture bufferes
* @delayed_process: a list of delayed buffers
* @delayed_process_work: a work_struct for process delayed buffers
+ * @nonblock: nonblocking flag
* @ctrl_handler: v4l control handler
* @controls: a union of decoder and encoder control parameters
* @fh: a holder of v4l file handle structure
@@ -397,6 +401,7 @@ struct venus_inst {
struct list_head registeredbufs;
struct list_head delayed_process;
struct work_struct delayed_process_work;
+ bool nonblock;
struct v4l2_ctrl_handler ctrl_handler;
union {
@@ -408,6 +413,7 @@ struct venus_inst {
u32 width;
u32 height;
struct v4l2_rect crop;
+ u32 fw_min_cnt;
u32 out_width;
u32 out_height;
u32 colorspace;
@@ -452,6 +458,7 @@ struct venus_inst {
bool next_buf_last;
bool drain_active;
enum venus_inst_modes flags;
+ struct ida dpb_ids;
};
#define IS_V1(core) ((core)->res->hfi_version == HFI_VERSION_1XX)
diff --git a/drivers/media/platform/qcom/venus/firmware.c b/drivers/media/platform/qcom/venus/firmware.c
index 227bd3b3f84c..14b6f1d05991 100644
--- a/drivers/media/platform/qcom/venus/firmware.c
+++ b/drivers/media/platform/qcom/venus/firmware.c
@@ -27,7 +27,12 @@
static void venus_reset_cpu(struct venus_core *core)
{
u32 fw_size = core->fw.mapped_mem_size;
- void __iomem *wrapper_base = core->wrapper_base;
+ void __iomem *wrapper_base;
+
+ if (IS_V6(core))
+ wrapper_base = core->wrapper_tz_base;
+ else
+ wrapper_base = core->wrapper_base;
writel(0, wrapper_base + WRAPPER_FW_START_ADDR);
writel(fw_size, wrapper_base + WRAPPER_FW_END_ADDR);
@@ -35,11 +40,17 @@ static void venus_reset_cpu(struct venus_core *core)
writel(fw_size, wrapper_base + WRAPPER_CPA_END_ADDR);
writel(fw_size, wrapper_base + WRAPPER_NONPIX_START_ADDR);
writel(fw_size, wrapper_base + WRAPPER_NONPIX_END_ADDR);
- writel(0x0, wrapper_base + WRAPPER_CPU_CGC_DIS);
- writel(0x0, wrapper_base + WRAPPER_CPU_CLOCK_CONFIG);
- /* Bring ARM9 out of reset */
- writel(0, wrapper_base + WRAPPER_A9SS_SW_RESET);
+ if (IS_V6(core)) {
+ /* Bring XTSS out of reset */
+ writel(0, wrapper_base + WRAPPER_TZ_XTSS_SW_RESET);
+ } else {
+ writel(0x0, wrapper_base + WRAPPER_CPU_CGC_DIS);
+ writel(0x0, wrapper_base + WRAPPER_CPU_CLOCK_CONFIG);
+
+ /* Bring ARM9 out of reset */
+ writel(0, wrapper_base + WRAPPER_A9SS_SW_RESET);
+ }
}
int venus_set_hw_state(struct venus_core *core, bool resume)
@@ -56,7 +67,9 @@ int venus_set_hw_state(struct venus_core *core, bool resume)
if (resume) {
venus_reset_cpu(core);
} else {
- if (!IS_V6(core))
+ if (IS_V6(core))
+ writel(1, core->wrapper_tz_base + WRAPPER_TZ_XTSS_SW_RESET);
+ else
writel(1, core->wrapper_base + WRAPPER_A9SS_SW_RESET);
}
@@ -162,12 +175,19 @@ static int venus_shutdown_no_tz(struct venus_core *core)
u32 reg;
struct device *dev = core->fw.dev;
void __iomem *wrapper_base = core->wrapper_base;
+ void __iomem *wrapper_tz_base = core->wrapper_tz_base;
- /* Assert the reset to ARM9 */
- reg = readl_relaxed(wrapper_base + WRAPPER_A9SS_SW_RESET);
- reg |= WRAPPER_A9SS_SW_RESET_BIT;
- writel_relaxed(reg, wrapper_base + WRAPPER_A9SS_SW_RESET);
-
+ if (IS_V6(core)) {
+ /* Assert the reset to XTSS */
+ reg = readl_relaxed(wrapper_tz_base + WRAPPER_TZ_XTSS_SW_RESET);
+ reg |= WRAPPER_XTSS_SW_RESET_BIT;
+ writel_relaxed(reg, wrapper_tz_base + WRAPPER_TZ_XTSS_SW_RESET);
+ } else {
+ /* Assert the reset to ARM9 */
+ reg = readl_relaxed(wrapper_base + WRAPPER_A9SS_SW_RESET);
+ reg |= WRAPPER_A9SS_SW_RESET_BIT;
+ writel_relaxed(reg, wrapper_base + WRAPPER_A9SS_SW_RESET);
+ }
/* Make sure reset is asserted before the mapping is removed */
mb();
diff --git a/drivers/media/platform/qcom/venus/helpers.c b/drivers/media/platform/qcom/venus/helpers.c
index 8012f5c7bf34..84c3a511ec31 100644
--- a/drivers/media/platform/qcom/venus/helpers.c
+++ b/drivers/media/platform/qcom/venus/helpers.c
@@ -3,6 +3,7 @@
* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
* Copyright (C) 2017 Linaro Ltd.
*/
+#include <linux/idr.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/slab.h>
@@ -18,8 +19,13 @@
#include "hfi_platform.h"
#include "hfi_parser.h"
-#define NUM_MBS_720P (((1280 + 15) >> 4) * ((720 + 15) >> 4))
-#define NUM_MBS_4K (((4096 + 15) >> 4) * ((2304 + 15) >> 4))
+#define NUM_MBS_720P (((ALIGN(1280, 16)) >> 4) * ((ALIGN(736, 16)) >> 4))
+#define NUM_MBS_4K (((ALIGN(4096, 16)) >> 4) * ((ALIGN(2304, 16)) >> 4))
+
+enum dpb_buf_owner {
+ DRIVER,
+ FIRMWARE,
+};
struct intbuf {
struct list_head list;
@@ -28,6 +34,8 @@ struct intbuf {
void *va;
dma_addr_t da;
unsigned long attrs;
+ enum dpb_buf_owner owned_by;
+ u32 dpb_out_tag;
};
bool venus_helper_check_codec(struct venus_inst *inst, u32 v4l2_pixfmt)
@@ -95,9 +103,16 @@ int venus_helper_queue_dpb_bufs(struct venus_inst *inst)
fdata.device_addr = buf->da;
fdata.buffer_type = buf->type;
+ if (buf->owned_by == FIRMWARE)
+ continue;
+
+ fdata.clnt_data = buf->dpb_out_tag;
+
ret = hfi_session_process_buf(inst, &fdata);
if (ret)
goto fail;
+
+ buf->owned_by = FIRMWARE;
}
fail:
@@ -110,13 +125,19 @@ int venus_helper_free_dpb_bufs(struct venus_inst *inst)
struct intbuf *buf, *n;
list_for_each_entry_safe(buf, n, &inst->dpbbufs, list) {
+ if (buf->owned_by == FIRMWARE)
+ continue;
+
+ ida_free(&inst->dpb_ids, buf->dpb_out_tag);
+
list_del_init(&buf->list);
dma_free_attrs(inst->core->dev, buf->size, buf->va, buf->da,
buf->attrs);
kfree(buf);
}
- INIT_LIST_HEAD(&inst->dpbbufs);
+ if (list_empty(&inst->dpbbufs))
+ INIT_LIST_HEAD(&inst->dpbbufs);
return 0;
}
@@ -134,6 +155,7 @@ int venus_helper_alloc_dpb_bufs(struct venus_inst *inst)
unsigned int i;
u32 count;
int ret;
+ int id;
/* no need to allocate dpb buffers */
if (!inst->dpb_fmt)
@@ -171,6 +193,15 @@ int venus_helper_alloc_dpb_bufs(struct venus_inst *inst)
ret = -ENOMEM;
goto fail;
}
+ buf->owned_by = DRIVER;
+
+ id = ida_alloc_min(&inst->dpb_ids, VB2_MAX_FRAME, GFP_KERNEL);
+ if (id < 0) {
+ ret = id;
+ goto fail;
+ }
+
+ buf->dpb_out_tag = id;
list_add_tail(&buf->list, &inst->dpbbufs);
}
@@ -583,7 +614,7 @@ static int platform_get_bufreq(struct venus_inst *inst, u32 buftype,
return -EINVAL;
params.version = version;
- params.num_vpp_pipes = hfi_platform_num_vpp_pipes(version);
+ params.num_vpp_pipes = inst->core->res->num_vpp_pipes;
if (is_dec) {
params.width = inst->width;
@@ -623,9 +654,15 @@ int venus_helper_get_bufreq(struct venus_inst *inst, u32 type,
if (req)
memset(req, 0, sizeof(*req));
+ if (type == HFI_BUFFER_OUTPUT || type == HFI_BUFFER_OUTPUT2)
+ req->count_min = inst->fw_min_cnt;
+
ret = platform_get_bufreq(inst, type, req);
- if (!ret)
+ if (!ret) {
+ if (type == HFI_BUFFER_OUTPUT || type == HFI_BUFFER_OUTPUT2)
+ inst->fw_min_cnt = req->count_min;
return 0;
+ }
ret = hfi_session_get_property(inst, ptype, &hprop);
if (ret)
@@ -1365,6 +1402,24 @@ venus_helper_find_buf(struct venus_inst *inst, unsigned int type, u32 idx)
}
EXPORT_SYMBOL_GPL(venus_helper_find_buf);
+void venus_helper_change_dpb_owner(struct venus_inst *inst,
+ struct vb2_v4l2_buffer *vbuf, unsigned int type,
+ unsigned int buf_type, u32 tag)
+{
+ struct intbuf *dpb_buf;
+
+ if (!V4L2_TYPE_IS_CAPTURE(type) ||
+ buf_type != inst->dpb_buftype)
+ return;
+
+ list_for_each_entry(dpb_buf, &inst->dpbbufs, list)
+ if (dpb_buf->dpb_out_tag == tag) {
+ dpb_buf->owned_by = DRIVER;
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(venus_helper_change_dpb_owner);
+
int venus_helper_vb2_buf_init(struct vb2_buffer *vb)
{
struct venus_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
@@ -1480,7 +1535,7 @@ void venus_helper_vb2_stop_streaming(struct vb2_queue *q)
ret |= venus_helper_intbufs_free(inst);
ret |= hfi_session_deinit(inst);
- if (inst->session_error || core->sys_error)
+ if (inst->session_error || test_bit(0, &core->sys_error))
ret = -EIO;
if (ret)
@@ -1504,10 +1559,24 @@ void venus_helper_vb2_stop_streaming(struct vb2_queue *q)
venus_pm_release_core(inst);
+ inst->session_error = 0;
+
mutex_unlock(&inst->lock);
}
EXPORT_SYMBOL_GPL(venus_helper_vb2_stop_streaming);
+void venus_helper_vb2_queue_error(struct venus_inst *inst)
+{
+ struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
+ struct vb2_queue *q;
+
+ q = v4l2_m2m_get_src_vq(m2m_ctx);
+ vb2_queue_error(q);
+ q = v4l2_m2m_get_dst_vq(m2m_ctx);
+ vb2_queue_error(q);
+}
+EXPORT_SYMBOL_GPL(venus_helper_vb2_queue_error);
+
int venus_helper_process_initial_cap_bufs(struct venus_inst *inst)
{
struct v4l2_m2m_ctx *m2m_ctx = inst->m2m_ctx;
diff --git a/drivers/media/platform/qcom/venus/helpers.h b/drivers/media/platform/qcom/venus/helpers.h
index e6269b4be3af..32619c3e8c97 100644
--- a/drivers/media/platform/qcom/venus/helpers.h
+++ b/drivers/media/platform/qcom/venus/helpers.h
@@ -14,6 +14,9 @@ struct venus_core;
bool venus_helper_check_codec(struct venus_inst *inst, u32 v4l2_pixfmt);
struct vb2_v4l2_buffer *venus_helper_find_buf(struct venus_inst *inst,
unsigned int type, u32 idx);
+void venus_helper_change_dpb_owner(struct venus_inst *inst,
+ struct vb2_v4l2_buffer *vbuf, unsigned int type,
+ unsigned int buf_type, u32 idx);
void venus_helper_buffers_done(struct venus_inst *inst, unsigned int type,
enum vb2_buffer_state state);
int venus_helper_vb2_buf_init(struct vb2_buffer *vb);
@@ -21,6 +24,7 @@ int venus_helper_vb2_buf_prepare(struct vb2_buffer *vb);
void venus_helper_vb2_buf_queue(struct vb2_buffer *vb);
void venus_helper_vb2_stop_streaming(struct vb2_queue *q);
int venus_helper_vb2_start_streaming(struct venus_inst *inst);
+void venus_helper_vb2_queue_error(struct venus_inst *inst);
void venus_helper_m2m_device_run(void *priv);
void venus_helper_m2m_job_abort(void *priv);
int venus_helper_get_bufreq(struct venus_inst *inst, u32 type,
diff --git a/drivers/media/platform/qcom/venus/hfi.c b/drivers/media/platform/qcom/venus/hfi.c
index 0f2482367e06..4e2151fb47f0 100644
--- a/drivers/media/platform/qcom/venus/hfi.c
+++ b/drivers/media/platform/qcom/venus/hfi.c
@@ -187,6 +187,11 @@ int hfi_session_create(struct venus_inst *inst, const struct hfi_inst_ops *ops)
mutex_lock(&core->lock);
+ if (test_bit(0, &inst->core->sys_error)) {
+ ret = -EIO;
+ goto unlock;
+ }
+
max = atomic_add_unless(&core->insts_count, 1,
core->max_sessions_supported);
if (!max) {
@@ -196,6 +201,7 @@ int hfi_session_create(struct venus_inst *inst, const struct hfi_inst_ops *ops)
ret = 0;
}
+unlock:
mutex_unlock(&core->lock);
return ret;
@@ -214,7 +220,7 @@ int hfi_session_init(struct venus_inst *inst, u32 pixfmt)
* session_init() can't pass successfully
*/
mutex_lock(&core->lock);
- if (!core->ops || core->sys_error) {
+ if (!core->ops || test_bit(0, &inst->core->sys_error)) {
mutex_unlock(&core->lock);
return -EIO;
}
@@ -263,6 +269,9 @@ int hfi_session_deinit(struct venus_inst *inst)
if (inst->state < INST_INIT)
return -EINVAL;
+ if (test_bit(0, &inst->core->sys_error))
+ goto done;
+
reinit_completion(&inst->done);
ret = ops->session_end(inst);
@@ -273,6 +282,7 @@ int hfi_session_deinit(struct venus_inst *inst)
if (ret)
return ret;
+done:
inst->state = INST_UNINIT;
return 0;
@@ -284,6 +294,9 @@ int hfi_session_start(struct venus_inst *inst)
const struct hfi_ops *ops = inst->core->ops;
int ret;
+ if (test_bit(0, &inst->core->sys_error))
+ return -EIO;
+
if (inst->state != INST_LOAD_RESOURCES)
return -EINVAL;
@@ -308,6 +321,9 @@ int hfi_session_stop(struct venus_inst *inst)
const struct hfi_ops *ops = inst->core->ops;
int ret;
+ if (test_bit(0, &inst->core->sys_error))
+ return -EIO;
+
if (inst->state != INST_START)
return -EINVAL;
@@ -331,6 +347,9 @@ int hfi_session_continue(struct venus_inst *inst)
{
struct venus_core *core = inst->core;
+ if (test_bit(0, &inst->core->sys_error))
+ return -EIO;
+
if (core->res->hfi_version == HFI_VERSION_1XX)
return 0;
@@ -343,6 +362,9 @@ int hfi_session_abort(struct venus_inst *inst)
const struct hfi_ops *ops = inst->core->ops;
int ret;
+ if (test_bit(0, &inst->core->sys_error))
+ return -EIO;
+
reinit_completion(&inst->done);
ret = ops->session_abort(inst);
@@ -362,6 +384,9 @@ int hfi_session_load_res(struct venus_inst *inst)
const struct hfi_ops *ops = inst->core->ops;
int ret;
+ if (test_bit(0, &inst->core->sys_error))
+ return -EIO;
+
if (inst->state != INST_INIT)
return -EINVAL;
@@ -385,6 +410,9 @@ int hfi_session_unload_res(struct venus_inst *inst)
const struct hfi_ops *ops = inst->core->ops;
int ret;
+ if (test_bit(0, &inst->core->sys_error))
+ return -EIO;
+
if (inst->state != INST_STOP)
return -EINVAL;
@@ -409,6 +437,9 @@ int hfi_session_flush(struct venus_inst *inst, u32 type, bool block)
const struct hfi_ops *ops = inst->core->ops;
int ret;
+ if (test_bit(0, &inst->core->sys_error))
+ return -EIO;
+
reinit_completion(&inst->done);
ret = ops->session_flush(inst, type);
@@ -429,6 +460,9 @@ int hfi_session_set_buffers(struct venus_inst *inst, struct hfi_buffer_desc *bd)
{
const struct hfi_ops *ops = inst->core->ops;
+ if (test_bit(0, &inst->core->sys_error))
+ return -EIO;
+
return ops->session_set_buffers(inst, bd);
}
@@ -438,6 +472,9 @@ int hfi_session_unset_buffers(struct venus_inst *inst,
const struct hfi_ops *ops = inst->core->ops;
int ret;
+ if (test_bit(0, &inst->core->sys_error))
+ return -EIO;
+
reinit_completion(&inst->done);
ret = ops->session_unset_buffers(inst, bd);
@@ -460,6 +497,9 @@ int hfi_session_get_property(struct venus_inst *inst, u32 ptype,
const struct hfi_ops *ops = inst->core->ops;
int ret;
+ if (test_bit(0, &inst->core->sys_error))
+ return -EIO;
+
if (inst->state < INST_INIT || inst->state >= INST_STOP)
return -EINVAL;
@@ -483,6 +523,9 @@ int hfi_session_set_property(struct venus_inst *inst, u32 ptype, void *pdata)
{
const struct hfi_ops *ops = inst->core->ops;
+ if (test_bit(0, &inst->core->sys_error))
+ return -EIO;
+
if (inst->state < INST_INIT || inst->state >= INST_STOP)
return -EINVAL;
@@ -494,6 +537,9 @@ int hfi_session_process_buf(struct venus_inst *inst, struct hfi_frame_data *fd)
{
const struct hfi_ops *ops = inst->core->ops;
+ if (test_bit(0, &inst->core->sys_error))
+ return -EIO;
+
if (fd->buffer_type == HFI_BUFFER_INPUT)
return ops->session_etb(inst, fd);
else if (fd->buffer_type == HFI_BUFFER_OUTPUT ||
diff --git a/drivers/media/platform/qcom/venus/hfi_cmds.c b/drivers/media/platform/qcom/venus/hfi_cmds.c
index 60f4b8e4b8d0..5aea07307e02 100644
--- a/drivers/media/platform/qcom/venus/hfi_cmds.c
+++ b/drivers/media/platform/qcom/venus/hfi_cmds.c
@@ -1299,6 +1299,13 @@ pkt_session_set_property_6xx(struct hfi_session_set_property_pkt *pkt,
pkt->shdr.hdr.size += sizeof(u32) + sizeof(*cq);
break;
}
+ case HFI_PROPERTY_PARAM_WORK_ROUTE: {
+ struct hfi_video_work_route *in = pdata, *wr = prop_data;
+
+ wr->video_work_route = in->video_work_route;
+ pkt->shdr.hdr.size += sizeof(u32) + sizeof(*wr);
+ break;
+ }
default:
return pkt_session_set_property_4xx(pkt, cookie, ptype, pdata);
}
diff --git a/drivers/media/platform/qcom/venus/hfi_helper.h b/drivers/media/platform/qcom/venus/hfi_helper.h
index bec4feb63ceb..2daa88e3df9f 100644
--- a/drivers/media/platform/qcom/venus/hfi_helper.h
+++ b/drivers/media/platform/qcom/venus/hfi_helper.h
@@ -167,6 +167,7 @@
#define HFI_PROPERTY_PARAM_VDEC_RECOVERY_POINT_SEI_EXTRADATA 0x120300c
#define HFI_PROPERTY_PARAM_VDEC_THUMBNAIL_MODE 0x120300d
#define HFI_PROPERTY_PARAM_VDEC_FRAME_ASSEMBLY 0x120300e
+#define HFI_PROPERTY_PARAM_VDEC_DPB_COUNTS 0x120300e
#define HFI_PROPERTY_PARAM_VDEC_VC1_FRAMEDISP_EXTRADATA 0x1203011
#define HFI_PROPERTY_PARAM_VDEC_VC1_SEQDISP_EXTRADATA 0x1203012
#define HFI_PROPERTY_PARAM_VDEC_TIMESTAMP_EXTRADATA 0x1203013
@@ -448,6 +449,7 @@
#define HFI_PROPERTY_PARAM_MVC_BUFFER_LAYOUT 0x100f
#define HFI_PROPERTY_PARAM_MAX_SESSIONS_SUPPORTED 0x1010
#define HFI_PROPERTY_PARAM_WORK_MODE 0x1015
+#define HFI_PROPERTY_PARAM_WORK_ROUTE 0x1017
/*
* HFI_PROPERTY_CONFIG_COMMON_START
@@ -873,6 +875,10 @@ struct hfi_video_work_mode {
u32 video_work_mode;
};
+struct hfi_video_work_route {
+ u32 video_work_route;
+};
+
struct hfi_h264_vui_timing_info {
u32 enable;
u32 fixed_framerate;
@@ -910,6 +916,14 @@ struct hfi_extradata_input_crop {
u32 height;
};
+struct hfi_dpb_counts {
+ u32 max_dpb_count;
+ u32 max_ref_frames;
+ u32 max_dec_buffering;
+ u32 max_reorder_frames;
+ u32 fw_min_cnt;
+};
+
#define HFI_COLOR_FORMAT_MONOCHROME 0x01
#define HFI_COLOR_FORMAT_NV12 0x02
#define HFI_COLOR_FORMAT_NV21 0x03
diff --git a/drivers/media/platform/qcom/venus/hfi_msgs.c b/drivers/media/platform/qcom/venus/hfi_msgs.c
index 9a2bdb002edc..df96db3761a7 100644
--- a/drivers/media/platform/qcom/venus/hfi_msgs.c
+++ b/drivers/media/platform/qcom/venus/hfi_msgs.c
@@ -32,6 +32,7 @@ static void event_seq_changed(struct venus_core *core, struct venus_inst *inst,
struct hfi_colour_space *colour_info;
struct hfi_buffer_requirements *bufreq;
struct hfi_extradata_input_crop *crop;
+ struct hfi_dpb_counts *dpb_count;
u8 *data_ptr;
u32 ptype;
@@ -110,6 +111,12 @@ static void event_seq_changed(struct venus_core *core, struct venus_inst *inst,
event.input_crop.height = crop->height;
data_ptr += sizeof(*crop);
break;
+ case HFI_PROPERTY_PARAM_VDEC_DPB_COUNTS:
+ data_ptr += sizeof(u32);
+ dpb_count = (struct hfi_dpb_counts *)data_ptr;
+ event.buf_count = dpb_count->fw_min_cnt;
+ data_ptr += sizeof(*dpb_count);
+ break;
default:
break;
}
diff --git a/drivers/media/platform/qcom/venus/hfi_plat_bufs_v6.c b/drivers/media/platform/qcom/venus/hfi_plat_bufs_v6.c
index 479178b0600d..ea25c451222b 100644
--- a/drivers/media/platform/qcom/venus/hfi_plat_bufs_v6.c
+++ b/drivers/media/platform/qcom/venus/hfi_plat_bufs_v6.c
@@ -1164,7 +1164,7 @@ static int output_buffer_count(u32 session_type, u32 codec)
output_min_count = 6;
break;
case V4L2_PIX_FMT_VP9:
- output_min_count = 9;
+ output_min_count = 11;
break;
case V4L2_PIX_FMT_H264:
case V4L2_PIX_FMT_HEVC:
@@ -1213,6 +1213,8 @@ static int bufreq_dec(struct hfi_plat_buffers_params *params, u32 buftype,
}
out_min_count = output_buffer_count(VIDC_SESSION_TYPE_DEC, codec);
+ /* Max of driver and FW count */
+ out_min_count = max(out_min_count, bufreq->count_min);
bufreq->type = buftype;
bufreq->region_size = 0;
@@ -1237,7 +1239,7 @@ static int bufreq_dec(struct hfi_plat_buffers_params *params, u32 buftype,
} else if (buftype == HFI_BUFFER_INTERNAL_SCRATCH(version)) {
bufreq->size = dec_ops->scratch(width, height, is_interlaced);
} else if (buftype == HFI_BUFFER_INTERNAL_SCRATCH_1(version)) {
- bufreq->size = dec_ops->scratch1(width, height, out_min_count,
+ bufreq->size = dec_ops->scratch1(width, height, VB2_MAX_FRAME,
is_secondary_output,
num_vpp_pipes);
} else if (buftype == HFI_BUFFER_INTERNAL_PERSIST_1) {
diff --git a/drivers/media/platform/qcom/venus/hfi_platform.c b/drivers/media/platform/qcom/venus/hfi_platform.c
index f5b4e1f4764f..f16f8962273c 100644
--- a/drivers/media/platform/qcom/venus/hfi_platform.c
+++ b/drivers/media/platform/qcom/venus/hfi_platform.c
@@ -66,16 +66,3 @@ hfi_platform_get_codec_lp_freq(enum hfi_version version, u32 codec, u32 session_
return freq;
}
-u8 hfi_platform_num_vpp_pipes(enum hfi_version version)
-{
- const struct hfi_platform *plat;
-
- plat = hfi_platform_get(version);
- if (!plat)
- return 0;
-
- if (plat->num_vpp_pipes)
- return plat->num_vpp_pipes();
-
- return 0;
-}
diff --git a/drivers/media/platform/qcom/venus/hfi_platform.h b/drivers/media/platform/qcom/venus/hfi_platform.h
index 2dbe608c53af..1dcf4085928c 100644
--- a/drivers/media/platform/qcom/venus/hfi_platform.h
+++ b/drivers/media/platform/qcom/venus/hfi_platform.h
@@ -52,7 +52,6 @@ struct hfi_platform {
unsigned long (*codec_lp_freq)(u32 session_type, u32 codec);
void (*codecs)(u32 *enc_codecs, u32 *dec_codecs, u32 *count);
const struct hfi_plat_caps *(*capabilities)(unsigned int *entries);
- u8 (*num_vpp_pipes)(void);
int (*bufreq)(struct hfi_plat_buffers_params *params, u32 session_type,
u32 buftype, struct hfi_buffer_requirements *bufreq);
};
@@ -67,5 +66,4 @@ unsigned long hfi_platform_get_codec_vsp_freq(enum hfi_version version, u32 code
u32 session_type);
unsigned long hfi_platform_get_codec_lp_freq(enum hfi_version version, u32 codec,
u32 session_type);
-u8 hfi_platform_num_vpp_pipes(enum hfi_version version);
#endif
diff --git a/drivers/media/platform/qcom/venus/hfi_platform_v6.c b/drivers/media/platform/qcom/venus/hfi_platform_v6.c
index d8243b22568a..c10618e44f5d 100644
--- a/drivers/media/platform/qcom/venus/hfi_platform_v6.c
+++ b/drivers/media/platform/qcom/venus/hfi_platform_v6.c
@@ -322,17 +322,11 @@ static unsigned long codec_lp_freq(u32 session_type, u32 codec)
return 0;
}
-static u8 num_vpp_pipes(void)
-{
- return 4;
-}
-
const struct hfi_platform hfi_plat_v6 = {
.codec_vpp_freq = codec_vpp_freq,
.codec_vsp_freq = codec_vsp_freq,
.codec_lp_freq = codec_lp_freq,
.codecs = get_codecs,
.capabilities = get_capabilities,
- .num_vpp_pipes = num_vpp_pipes,
.bufreq = hfi_plat_bufreq_v6,
};
diff --git a/drivers/media/platform/qcom/venus/hfi_venus.c b/drivers/media/platform/qcom/venus/hfi_venus.c
index ce98c523b3c6..3a75a27632fb 100644
--- a/drivers/media/platform/qcom/venus/hfi_venus.c
+++ b/drivers/media/platform/qcom/venus/hfi_venus.c
@@ -551,6 +551,9 @@ static int venus_halt_axi(struct venus_hfi_device *hdev)
if (IS_V6(hdev->core)) {
writel(0x3, cpu_cs_base + CPU_CS_X2RPMH_V6);
+ if (hdev->core->res->num_vpp_pipes == 1)
+ goto skip_aon_mvp_noc;
+
writel(0x1, aon_base + AON_WRAPPER_MVP_NOC_LPI_CONTROL);
ret = readl_poll_timeout(aon_base + AON_WRAPPER_MVP_NOC_LPI_STATUS,
val,
@@ -560,6 +563,7 @@ static int venus_halt_axi(struct venus_hfi_device *hdev)
if (ret)
return -ETIMEDOUT;
+skip_aon_mvp_noc:
mask_val = (BIT(2) | BIT(1) | BIT(0));
writel(mask_val, wrapper_base + WRAPPER_DEBUG_BRIDGE_LPI_CONTROL_V6);
diff --git a/drivers/media/platform/qcom/venus/hfi_venus_io.h b/drivers/media/platform/qcom/venus/hfi_venus_io.h
index 300c6e47e72f..9735a246ce36 100644
--- a/drivers/media/platform/qcom/venus/hfi_venus_io.h
+++ b/drivers/media/platform/qcom/venus/hfi_venus_io.h
@@ -149,6 +149,8 @@
/* Wrapper TZ 6xx */
#define WRAPPER_TZ_BASE_V6 0x000c0000
#define WRAPPER_TZ_CPU_STATUS_V6 0x10
+#define WRAPPER_TZ_XTSS_SW_RESET 0x1000
+#define WRAPPER_XTSS_SW_RESET_BIT BIT(0)
/* Venus AON */
#define AON_BASE_V6 0x000e0000
diff --git a/drivers/media/platform/qcom/venus/pm_helpers.c b/drivers/media/platform/qcom/venus/pm_helpers.c
index 3e2345eb47f7..cedc664ba755 100644
--- a/drivers/media/platform/qcom/venus/pm_helpers.c
+++ b/drivers/media/platform/qcom/venus/pm_helpers.c
@@ -1085,12 +1085,16 @@ static unsigned long calculate_inst_freq(struct venus_inst *inst,
if (inst->state != INST_START)
return 0;
- if (inst->session_type == VIDC_SESSION_TYPE_ENC)
+ if (inst->session_type == VIDC_SESSION_TYPE_ENC) {
vpp_freq_per_mb = inst->flags & VENUS_LOW_POWER ?
inst->clk_data.low_power_freq :
inst->clk_data.vpp_freq;
- vpp_freq = mbs_per_sec * vpp_freq_per_mb;
+ vpp_freq = mbs_per_sec * vpp_freq_per_mb;
+ } else {
+ vpp_freq = mbs_per_sec * inst->clk_data.vpp_freq;
+ }
+
/* 21 / 20 is overhead factor */
vpp_freq += vpp_freq / 20;
vsp_freq = mbs_per_sec * inst->clk_data.vsp_freq;
@@ -1139,9 +1143,10 @@ static int load_scale_v4(struct venus_inst *inst)
freq = max(freq_core1, freq_core2);
if (freq > table[0].freq) {
+ dev_dbg(dev, VDBGL "requested clock rate: %lu scaling clock rate : %lu\n",
+ freq, table[0].freq);
+
freq = table[0].freq;
- dev_warn(dev, "HW is overloaded, needed: %lu max: %lu\n",
- freq, table[0].freq);
goto set_freq;
}
diff --git a/drivers/media/platform/qcom/venus/vdec.c b/drivers/media/platform/qcom/venus/vdec.c
index 198e47eb63f4..91da3f509724 100644
--- a/drivers/media/platform/qcom/venus/vdec.c
+++ b/drivers/media/platform/qcom/venus/vdec.c
@@ -332,8 +332,11 @@ static int vdec_s_fmt(struct file *file, void *fh, struct v4l2_format *f)
if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
inst->fmt_out = fmt;
- else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
inst->fmt_cap = fmt;
+ inst->output2_buf_size =
+ venus_helper_get_framesz(pixfmt_cap, orig_pixmp.width, orig_pixmp.height);
+ }
return 0;
}
@@ -653,6 +656,19 @@ static int vdec_set_properties(struct venus_inst *inst)
return 0;
}
+static int vdec_set_work_route(struct venus_inst *inst)
+{
+ u32 ptype = HFI_PROPERTY_PARAM_WORK_ROUTE;
+ struct hfi_video_work_route wr;
+
+ if (!IS_V6(inst->core))
+ return 0;
+
+ wr.video_work_route = inst->core->res->num_vpp_pipes;
+
+ return hfi_session_set_property(inst, ptype, &wr);
+}
+
#define is_ubwc_fmt(fmt) (!!((fmt) & HFI_COLOR_FORMAT_UBWC_BASE))
static int vdec_output_conf(struct venus_inst *inst)
@@ -830,6 +846,7 @@ static int vdec_queue_setup(struct vb2_queue *q,
unsigned int sizes[], struct device *alloc_devs[])
{
struct venus_inst *inst = vb2_get_drv_priv(q);
+ struct venus_core *core = inst->core;
unsigned int in_num, out_num;
int ret = 0;
@@ -855,6 +872,16 @@ static int vdec_queue_setup(struct vb2_queue *q,
return 0;
}
+ if (test_bit(0, &core->sys_error)) {
+ if (inst->nonblock)
+ return -EAGAIN;
+
+ ret = wait_event_interruptible(core->sys_err_done,
+ !test_bit(0, &core->sys_error));
+ if (ret)
+ return ret;
+ }
+
ret = vdec_pm_get(inst);
if (ret)
return ret;
@@ -970,23 +997,23 @@ reconfigure:
if (ret)
goto err;
+ venus_pm_load_scale(inst);
+
+ inst->next_buf_last = false;
+
ret = venus_helper_alloc_dpb_bufs(inst);
if (ret)
goto err;
- ret = venus_helper_queue_dpb_bufs(inst);
+ ret = hfi_session_continue(inst);
if (ret)
goto free_dpb_bufs;
- ret = venus_helper_process_initial_cap_bufs(inst);
+ ret = venus_helper_queue_dpb_bufs(inst);
if (ret)
goto free_dpb_bufs;
- venus_pm_load_scale(inst);
-
- inst->next_buf_last = false;
-
- ret = hfi_session_continue(inst);
+ ret = venus_helper_process_initial_cap_bufs(inst);
if (ret)
goto free_dpb_bufs;
@@ -1039,6 +1066,10 @@ static int vdec_start_output(struct venus_inst *inst)
if (ret)
return ret;
+ ret = vdec_set_work_route(inst);
+ if (ret)
+ return ret;
+
ret = vdec_output_conf(inst);
if (ret)
return ret;
@@ -1178,6 +1209,8 @@ static void vdec_stop_streaming(struct vb2_queue *q)
venus_helper_buffers_done(inst, q->type, VB2_BUF_STATE_ERROR);
+ inst->session_error = 0;
+
if (ret)
goto unlock;
@@ -1211,7 +1244,7 @@ static void vdec_session_release(struct venus_inst *inst)
ret = hfi_session_deinit(inst);
abort = (ret && ret != -EINVAL) ? 1 : 0;
- if (inst->session_error || core->sys_error)
+ if (inst->session_error || test_bit(0, &core->sys_error))
abort = 1;
if (abort)
@@ -1306,8 +1339,10 @@ static void vdec_buf_done(struct venus_inst *inst, unsigned int buf_type,
type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
vbuf = venus_helper_find_buf(inst, type, tag);
- if (!vbuf)
+ if (!vbuf) {
+ venus_helper_change_dpb_owner(inst, vbuf, type, buf_type, tag);
return;
+ }
vbuf->flags = flags;
vbuf->field = V4L2_FIELD_NONE;
@@ -1389,6 +1424,11 @@ static void vdec_event_change(struct venus_inst *inst,
inst->crop.height = ev_data->height;
}
+ inst->fw_min_cnt = ev_data->buf_count;
+ /* overwriting this to 11 for vp9 due to fw bug */
+ if (inst->hfi_codec == HFI_VIDEO_CODEC_VP9)
+ inst->fw_min_cnt = 11;
+
inst->out_width = ev_data->width;
inst->out_height = ev_data->height;
@@ -1448,6 +1488,7 @@ static void vdec_event_notify(struct venus_inst *inst, u32 event,
switch (event) {
case EVT_SESSION_ERROR:
inst->session_error = true;
+ venus_helper_vb2_queue_error(inst);
dev_err(dev, "dec: event session error %x\n", inst->error);
break;
case EVT_SYS_EVENT_CHANGE:
@@ -1492,6 +1533,7 @@ static void vdec_inst_init(struct venus_inst *inst)
inst->crop.top = 0;
inst->crop.width = inst->width;
inst->crop.height = inst->height;
+ inst->fw_min_cnt = 8;
inst->out_width = frame_width_min(inst);
inst->out_height = frame_height_min(inst);
inst->fps = 30;
@@ -1568,6 +1610,8 @@ static int vdec_open(struct file *file)
inst->bit_depth = VIDC_BITDEPTH_8;
inst->pic_struct = HFI_INTERLACE_FRAME_PROGRESSIVE;
init_waitqueue_head(&inst->reconf_wait);
+ inst->nonblock = file->f_flags & O_NONBLOCK;
+
venus_helper_init_instance(inst);
ret = vdec_ctrl_init(inst);
@@ -1580,6 +1624,8 @@ static int vdec_open(struct file *file)
vdec_inst_init(inst);
+ ida_init(&inst->dpb_ids);
+
/*
* create m2m device for every instance, the m2m context scheduling
* is made by firmware side so we do not need to care about.
@@ -1625,6 +1671,7 @@ static int vdec_close(struct file *file)
v4l2_m2m_ctx_release(inst->m2m_ctx);
v4l2_m2m_release(inst->m2m_dev);
vdec_ctrl_deinit(inst);
+ ida_destroy(&inst->dpb_ids);
hfi_session_destroy(inst);
mutex_destroy(&inst->lock);
v4l2_fh_del(&inst->fh);
diff --git a/drivers/media/platform/qcom/venus/venc.c b/drivers/media/platform/qcom/venus/venc.c
index bc1c42dd53c0..84bafc3118cc 100644
--- a/drivers/media/platform/qcom/venus/venc.c
+++ b/drivers/media/platform/qcom/venus/venc.c
@@ -538,6 +538,64 @@ static const struct v4l2_ioctl_ops venc_ioctl_ops = {
.vidioc_unsubscribe_event = v4l2_event_unsubscribe,
};
+static int venc_pm_get(struct venus_inst *inst)
+{
+ struct venus_core *core = inst->core;
+ struct device *dev = core->dev_enc;
+ int ret;
+
+ mutex_lock(&core->pm_lock);
+ ret = pm_runtime_resume_and_get(dev);
+ mutex_unlock(&core->pm_lock);
+
+ return ret < 0 ? ret : 0;
+}
+
+static int venc_pm_put(struct venus_inst *inst, bool autosuspend)
+{
+ struct venus_core *core = inst->core;
+ struct device *dev = core->dev_enc;
+ int ret;
+
+ mutex_lock(&core->pm_lock);
+
+ if (autosuspend)
+ ret = pm_runtime_put_autosuspend(dev);
+ else
+ ret = pm_runtime_put_sync(dev);
+
+ mutex_unlock(&core->pm_lock);
+
+ return ret < 0 ? ret : 0;
+}
+
+static int venc_pm_get_put(struct venus_inst *inst)
+{
+ struct venus_core *core = inst->core;
+ struct device *dev = core->dev_enc;
+ int ret = 0;
+
+ mutex_lock(&core->pm_lock);
+
+ if (pm_runtime_suspended(dev)) {
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
+ goto error;
+
+ ret = pm_runtime_put_autosuspend(dev);
+ }
+
+error:
+ mutex_unlock(&core->pm_lock);
+
+ return ret < 0 ? ret : 0;
+}
+
+static void venc_pm_touch(struct venus_inst *inst)
+{
+ pm_runtime_mark_last_busy(inst->core->dev_enc);
+}
+
static int venc_set_properties(struct venus_inst *inst)
{
struct venc_controls *ctr = &inst->controls.enc;
@@ -908,6 +966,7 @@ static int venc_queue_setup(struct vb2_queue *q,
unsigned int sizes[], struct device *alloc_devs[])
{
struct venus_inst *inst = vb2_get_drv_priv(q);
+ struct venus_core *core = inst->core;
unsigned int num, min = 4;
int ret;
@@ -931,11 +990,29 @@ static int venc_queue_setup(struct vb2_queue *q,
return 0;
}
+ if (test_bit(0, &core->sys_error)) {
+ if (inst->nonblock)
+ return -EAGAIN;
+
+ ret = wait_event_interruptible(core->sys_err_done,
+ !test_bit(0, &core->sys_error));
+ if (ret)
+ return ret;
+ }
+
+ ret = venc_pm_get(inst);
+ if (ret)
+ return ret;
+
mutex_lock(&inst->lock);
ret = venc_init_session(inst);
mutex_unlock(&inst->lock);
if (ret)
+ goto put_power;
+
+ ret = venc_pm_put(inst, false);
+ if (ret)
return ret;
switch (q->type) {
@@ -971,6 +1048,9 @@ static int venc_queue_setup(struct vb2_queue *q,
}
return ret;
+put_power:
+ venc_pm_put(inst, false);
+ return ret;
}
static int venc_buf_init(struct vb2_buffer *vb)
@@ -986,6 +1066,8 @@ static void venc_release_session(struct venus_inst *inst)
{
int ret;
+ venc_pm_get(inst);
+
mutex_lock(&inst->lock);
ret = hfi_session_deinit(inst);
@@ -997,6 +1079,8 @@ static void venc_release_session(struct venus_inst *inst)
venus_pm_load_scale(inst);
INIT_LIST_HEAD(&inst->registeredbufs);
venus_pm_release_core(inst);
+
+ venc_pm_put(inst, false);
}
static void venc_buf_cleanup(struct vb2_buffer *vb)
@@ -1066,8 +1150,16 @@ static int venc_start_streaming(struct vb2_queue *q, unsigned int count)
inst->sequence_cap = 0;
inst->sequence_out = 0;
+ ret = venc_pm_get(inst);
+ if (ret)
+ goto error;
+
ret = venus_pm_acquire_core(inst);
if (ret)
+ goto put_power;
+
+ ret = venc_pm_put(inst, true);
+ if (ret)
goto error;
ret = venc_set_properties(inst);
@@ -1091,6 +1183,8 @@ static int venc_start_streaming(struct vb2_queue *q, unsigned int count)
return 0;
+put_power:
+ venc_pm_put(inst, false);
error:
venus_helper_buffers_done(inst, q->type, VB2_BUF_STATE_QUEUED);
if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
@@ -1105,6 +1199,8 @@ static void venc_vb2_buf_queue(struct vb2_buffer *vb)
{
struct venus_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
+ venc_pm_get_put(inst);
+
mutex_lock(&inst->lock);
venus_helper_vb2_buf_queue(vb);
mutex_unlock(&inst->lock);
@@ -1128,6 +1224,8 @@ static void venc_buf_done(struct venus_inst *inst, unsigned int buf_type,
struct vb2_buffer *vb;
unsigned int type;
+ venc_pm_touch(inst);
+
if (buf_type == HFI_BUFFER_INPUT)
type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
else
@@ -1157,8 +1255,11 @@ static void venc_event_notify(struct venus_inst *inst, u32 event,
{
struct device *dev = inst->core->dev_enc;
+ venc_pm_touch(inst);
+
if (event == EVT_SESSION_ERROR) {
inst->session_error = true;
+ venus_helper_vb2_queue_error(inst);
dev_err(dev, "enc: event session error %x\n", inst->error);
}
}
@@ -1242,16 +1343,13 @@ static int venc_open(struct file *file)
inst->session_type = VIDC_SESSION_TYPE_ENC;
inst->clk_data.core_id = VIDC_CORE_ID_DEFAULT;
inst->core_acquired = false;
+ inst->nonblock = file->f_flags & O_NONBLOCK;
venus_helper_init_instance(inst);
- ret = pm_runtime_resume_and_get(core->dev_enc);
- if (ret < 0)
- goto err_free;
-
ret = venc_ctrl_init(inst);
if (ret)
- goto err_put_sync;
+ goto err_free;
ret = hfi_session_create(inst, &venc_hfi_ops);
if (ret)
@@ -1290,8 +1388,6 @@ err_session_destroy:
hfi_session_destroy(inst);
err_ctrl_deinit:
venc_ctrl_deinit(inst);
-err_put_sync:
- pm_runtime_put_sync(core->dev_enc);
err_free:
kfree(inst);
return ret;
@@ -1301,6 +1397,8 @@ static int venc_close(struct file *file)
{
struct venus_inst *inst = to_inst(file);
+ venc_pm_get(inst);
+
v4l2_m2m_ctx_release(inst->m2m_ctx);
v4l2_m2m_release(inst->m2m_dev);
venc_ctrl_deinit(inst);
@@ -1309,7 +1407,7 @@ static int venc_close(struct file *file)
v4l2_fh_del(&inst->fh);
v4l2_fh_exit(&inst->fh);
- pm_runtime_put_sync(inst->core->dev_enc);
+ venc_pm_put(inst, false);
kfree(inst);
return 0;
@@ -1366,6 +1464,8 @@ static int venc_probe(struct platform_device *pdev)
core->dev_enc = dev;
video_set_drvdata(vdev, core);
+ pm_runtime_set_autosuspend_delay(dev, 2000);
+ pm_runtime_use_autosuspend(dev);
pm_runtime_enable(dev);
return 0;
diff --git a/drivers/media/platform/rcar-isp.c b/drivers/media/platform/rcar-isp.c
new file mode 100644
index 000000000000..2ffab30bc011
--- /dev/null
+++ b/drivers/media/platform/rcar-isp.c
@@ -0,0 +1,515 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2021 Renesas Electronics Corp.
+ *
+ * Driver for Renesas R-Car ISP Channel Selector
+ *
+ * The ISP hardware is capable of more than just channel selection, features
+ * such as demosaicing, white balance control and color space conversion are
+ * also possible. These more advanced features are not supported by the driver
+ * due to lack of documentation.
+ */
+
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+
+#include <media/v4l2-subdev.h>
+
+#define ISPINPUTSEL0_REG 0x0008
+#define ISPINPUTSEL0_SEL_CSI0 BIT(31)
+
+#define ISPSTART_REG 0x0014
+#define ISPSTART_START 0xffff
+#define ISPSTART_STOP 0x0000
+
+#define ISPPROCMODE_DT_REG(n) (0x1100 + (0x4 * (n)))
+#define ISPPROCMODE_DT_PROC_MODE_VC3(pm) (((pm) & 0x3f) << 24)
+#define ISPPROCMODE_DT_PROC_MODE_VC2(pm) (((pm) & 0x3f) << 16)
+#define ISPPROCMODE_DT_PROC_MODE_VC1(pm) (((pm) & 0x3f) << 8)
+#define ISPPROCMODE_DT_PROC_MODE_VC0(pm) ((pm) & 0x3f)
+
+#define ISPCS_FILTER_ID_CH_REG(n) (0x3000 + (0x0100 * (n)))
+
+#define ISPCS_DT_CODE03_CH_REG(n) (0x3008 + (0x100 * (n)))
+#define ISPCS_DT_CODE03_EN3 BIT(31)
+#define ISPCS_DT_CODE03_DT3(dt) (((dt) & 0x3f) << 24)
+#define ISPCS_DT_CODE03_EN2 BIT(23)
+#define ISPCS_DT_CODE03_DT2(dt) (((dt) & 0x3f) << 16)
+#define ISPCS_DT_CODE03_EN1 BIT(15)
+#define ISPCS_DT_CODE03_DT1(dt) (((dt) & 0x3f) << 8)
+#define ISPCS_DT_CODE03_EN0 BIT(7)
+#define ISPCS_DT_CODE03_DT0(dt) ((dt) & 0x3f)
+
+struct rcar_isp_format {
+ u32 code;
+ unsigned int datatype;
+ unsigned int procmode;
+};
+
+static const struct rcar_isp_format rcar_isp_formats[] = {
+ { .code = MEDIA_BUS_FMT_RGB888_1X24, .datatype = 0x24, .procmode = 0x15 },
+ { .code = MEDIA_BUS_FMT_Y10_1X10, .datatype = 0x2b, .procmode = 0x10 },
+ { .code = MEDIA_BUS_FMT_UYVY8_1X16, .datatype = 0x1e, .procmode = 0x0c },
+ { .code = MEDIA_BUS_FMT_YUYV8_1X16, .datatype = 0x1e, .procmode = 0x0c },
+ { .code = MEDIA_BUS_FMT_UYVY8_2X8, .datatype = 0x1e, .procmode = 0x0c },
+ { .code = MEDIA_BUS_FMT_YUYV10_2X10, .datatype = 0x1e, .procmode = 0x0c },
+};
+
+static const struct rcar_isp_format *risp_code_to_fmt(unsigned int code)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(rcar_isp_formats); i++) {
+ if (rcar_isp_formats[i].code == code)
+ return &rcar_isp_formats[i];
+ }
+
+ return NULL;
+}
+
+enum rcar_isp_input {
+ RISP_CSI_INPUT0,
+ RISP_CSI_INPUT1,
+};
+
+enum rcar_isp_pads {
+ RCAR_ISP_SINK,
+ RCAR_ISP_PORT0,
+ RCAR_ISP_PORT1,
+ RCAR_ISP_PORT2,
+ RCAR_ISP_PORT3,
+ RCAR_ISP_PORT4,
+ RCAR_ISP_PORT5,
+ RCAR_ISP_PORT6,
+ RCAR_ISP_PORT7,
+ RCAR_ISP_NUM_PADS,
+};
+
+struct rcar_isp {
+ struct device *dev;
+ void __iomem *base;
+ struct reset_control *rstc;
+
+ enum rcar_isp_input csi_input;
+
+ struct v4l2_subdev subdev;
+ struct media_pad pads[RCAR_ISP_NUM_PADS];
+
+ struct v4l2_async_notifier notifier;
+ struct v4l2_subdev *remote;
+
+ struct mutex lock; /* Protects mf and stream_count. */
+ struct v4l2_mbus_framefmt mf;
+ int stream_count;
+};
+
+static inline struct rcar_isp *sd_to_isp(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct rcar_isp, subdev);
+}
+
+static inline struct rcar_isp *notifier_to_isp(struct v4l2_async_notifier *n)
+{
+ return container_of(n, struct rcar_isp, notifier);
+}
+
+static void risp_write(struct rcar_isp *isp, u32 offset, u32 value)
+{
+ iowrite32(value, isp->base + offset);
+}
+
+static u32 risp_read(struct rcar_isp *isp, u32 offset)
+{
+ return ioread32(isp->base + offset);
+}
+
+static int risp_power_on(struct rcar_isp *isp)
+{
+ int ret;
+
+ ret = pm_runtime_resume_and_get(isp->dev);
+ if (ret < 0)
+ return ret;
+
+ ret = reset_control_deassert(isp->rstc);
+ if (ret < 0) {
+ pm_runtime_put(isp->dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void risp_power_off(struct rcar_isp *isp)
+{
+ reset_control_assert(isp->rstc);
+ pm_runtime_put(isp->dev);
+}
+
+static int risp_start(struct rcar_isp *isp)
+{
+ const struct rcar_isp_format *format;
+ unsigned int vc;
+ u32 sel_csi = 0;
+ int ret;
+
+ format = risp_code_to_fmt(isp->mf.code);
+ if (!format) {
+ dev_err(isp->dev, "Unsupported bus format\n");
+ return -EINVAL;
+ }
+
+ ret = risp_power_on(isp);
+ if (ret) {
+ dev_err(isp->dev, "Failed to power on ISP\n");
+ return ret;
+ }
+
+ /* Select CSI-2 input source. */
+ if (isp->csi_input == RISP_CSI_INPUT1)
+ sel_csi = ISPINPUTSEL0_SEL_CSI0;
+
+ risp_write(isp, ISPINPUTSEL0_REG,
+ risp_read(isp, ISPINPUTSEL0_REG) | sel_csi);
+
+ /* Configure Channel Selector. */
+ for (vc = 0; vc < 4; vc++) {
+ u8 ch = vc + 4;
+ u8 dt = format->datatype;
+
+ risp_write(isp, ISPCS_FILTER_ID_CH_REG(ch), BIT(vc));
+ risp_write(isp, ISPCS_DT_CODE03_CH_REG(ch),
+ ISPCS_DT_CODE03_EN3 | ISPCS_DT_CODE03_DT3(dt) |
+ ISPCS_DT_CODE03_EN2 | ISPCS_DT_CODE03_DT2(dt) |
+ ISPCS_DT_CODE03_EN1 | ISPCS_DT_CODE03_DT1(dt) |
+ ISPCS_DT_CODE03_EN0 | ISPCS_DT_CODE03_DT0(dt));
+ }
+
+ /* Setup processing method. */
+ risp_write(isp, ISPPROCMODE_DT_REG(format->datatype),
+ ISPPROCMODE_DT_PROC_MODE_VC3(format->procmode) |
+ ISPPROCMODE_DT_PROC_MODE_VC2(format->procmode) |
+ ISPPROCMODE_DT_PROC_MODE_VC1(format->procmode) |
+ ISPPROCMODE_DT_PROC_MODE_VC0(format->procmode));
+
+ /* Start ISP. */
+ risp_write(isp, ISPSTART_REG, ISPSTART_START);
+
+ ret = v4l2_subdev_call(isp->remote, video, s_stream, 1);
+ if (ret)
+ risp_power_off(isp);
+
+ return ret;
+}
+
+static void risp_stop(struct rcar_isp *isp)
+{
+ v4l2_subdev_call(isp->remote, video, s_stream, 0);
+
+ /* Stop ISP. */
+ risp_write(isp, ISPSTART_REG, ISPSTART_STOP);
+
+ risp_power_off(isp);
+}
+
+static int risp_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct rcar_isp *isp = sd_to_isp(sd);
+ int ret = 0;
+
+ mutex_lock(&isp->lock);
+
+ if (!isp->remote) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ if (enable && isp->stream_count == 0) {
+ ret = risp_start(isp);
+ if (ret)
+ goto out;
+ } else if (!enable && isp->stream_count == 1) {
+ risp_stop(isp);
+ }
+
+ isp->stream_count += enable ? 1 : -1;
+out:
+ mutex_unlock(&isp->lock);
+
+ return ret;
+}
+
+static const struct v4l2_subdev_video_ops risp_video_ops = {
+ .s_stream = risp_s_stream,
+};
+
+static int risp_set_pad_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *format)
+{
+ struct rcar_isp *isp = sd_to_isp(sd);
+ struct v4l2_mbus_framefmt *framefmt;
+
+ mutex_lock(&isp->lock);
+
+ if (!risp_code_to_fmt(format->format.code))
+ format->format.code = rcar_isp_formats[0].code;
+
+ if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
+ isp->mf = format->format;
+ } else {
+ framefmt = v4l2_subdev_get_try_format(sd, sd_state, 0);
+ *framefmt = format->format;
+ }
+
+ mutex_unlock(&isp->lock);
+
+ return 0;
+}
+
+static int risp_get_pad_format(struct v4l2_subdev *sd,
+ struct v4l2_subdev_state *sd_state,
+ struct v4l2_subdev_format *format)
+{
+ struct rcar_isp *isp = sd_to_isp(sd);
+
+ mutex_lock(&isp->lock);
+
+ if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE)
+ format->format = isp->mf;
+ else
+ format->format = *v4l2_subdev_get_try_format(sd, sd_state, 0);
+
+ mutex_unlock(&isp->lock);
+
+ return 0;
+}
+
+static const struct v4l2_subdev_pad_ops risp_pad_ops = {
+ .set_fmt = risp_set_pad_format,
+ .get_fmt = risp_get_pad_format,
+ .link_validate = v4l2_subdev_link_validate_default,
+};
+
+static const struct v4l2_subdev_ops rcar_isp_subdev_ops = {
+ .video = &risp_video_ops,
+ .pad = &risp_pad_ops,
+};
+
+/* -----------------------------------------------------------------------------
+ * Async handling and registration of subdevices and links
+ */
+
+static int risp_notify_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
+{
+ struct rcar_isp *isp = notifier_to_isp(notifier);
+ int pad;
+
+ pad = media_entity_get_fwnode_pad(&subdev->entity, asd->match.fwnode,
+ MEDIA_PAD_FL_SOURCE);
+ if (pad < 0) {
+ dev_err(isp->dev, "Failed to find pad for %s\n", subdev->name);
+ return pad;
+ }
+
+ isp->remote = subdev;
+
+ dev_dbg(isp->dev, "Bound %s pad: %d\n", subdev->name, pad);
+
+ return media_create_pad_link(&subdev->entity, pad,
+ &isp->subdev.entity, 0,
+ MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
+}
+
+static void risp_notify_unbind(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
+{
+ struct rcar_isp *isp = notifier_to_isp(notifier);
+
+ isp->remote = NULL;
+
+ dev_dbg(isp->dev, "Unbind %s\n", subdev->name);
+}
+
+static const struct v4l2_async_notifier_operations risp_notify_ops = {
+ .bound = risp_notify_bound,
+ .unbind = risp_notify_unbind,
+};
+
+static int risp_parse_dt(struct rcar_isp *isp)
+{
+ struct v4l2_async_subdev *asd;
+ struct fwnode_handle *fwnode;
+ struct fwnode_handle *ep;
+ unsigned int id;
+ int ret;
+
+ for (id = 0; id < 2; id++) {
+ ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(isp->dev),
+ 0, id, 0);
+ if (ep)
+ break;
+ }
+
+ if (!ep) {
+ dev_err(isp->dev, "Not connected to subdevice\n");
+ return -EINVAL;
+ }
+
+ if (id == 1)
+ isp->csi_input = RISP_CSI_INPUT1;
+
+ fwnode = fwnode_graph_get_remote_endpoint(ep);
+ fwnode_handle_put(ep);
+
+ dev_dbg(isp->dev, "Found '%pOF'\n", to_of_node(fwnode));
+
+ v4l2_async_nf_init(&isp->notifier);
+ isp->notifier.ops = &risp_notify_ops;
+
+ asd = v4l2_async_nf_add_fwnode(&isp->notifier, fwnode,
+ struct v4l2_async_subdev);
+ fwnode_handle_put(fwnode);
+ if (IS_ERR(asd))
+ return PTR_ERR(asd);
+
+ ret = v4l2_async_subdev_nf_register(&isp->subdev, &isp->notifier);
+ if (ret)
+ v4l2_async_nf_cleanup(&isp->notifier);
+
+ return ret;
+}
+
+/* -----------------------------------------------------------------------------
+ * Platform Device Driver
+ */
+
+static const struct media_entity_operations risp_entity_ops = {
+ .link_validate = v4l2_subdev_link_validate,
+};
+
+static int risp_probe_resources(struct rcar_isp *isp,
+ struct platform_device *pdev)
+{
+ struct resource *res;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ isp->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(isp->base))
+ return PTR_ERR(isp->base);
+
+ isp->rstc = devm_reset_control_get(&pdev->dev, NULL);
+
+ return PTR_ERR_OR_ZERO(isp->rstc);
+}
+
+static const struct of_device_id risp_of_id_table[] = {
+ { .compatible = "renesas,r8a779a0-isp" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, risp_of_id_table);
+
+static int risp_probe(struct platform_device *pdev)
+{
+ struct rcar_isp *isp;
+ unsigned int i;
+ int ret;
+
+ isp = devm_kzalloc(&pdev->dev, sizeof(*isp), GFP_KERNEL);
+ if (!isp)
+ return -ENOMEM;
+
+ isp->dev = &pdev->dev;
+
+ mutex_init(&isp->lock);
+
+ ret = risp_probe_resources(isp, pdev);
+ if (ret) {
+ dev_err(isp->dev, "Failed to get resources\n");
+ goto error_mutex;
+ }
+
+ platform_set_drvdata(pdev, isp);
+
+ pm_runtime_enable(&pdev->dev);
+
+ ret = risp_parse_dt(isp);
+ if (ret)
+ goto error_pm;
+
+ isp->subdev.owner = THIS_MODULE;
+ isp->subdev.dev = &pdev->dev;
+ v4l2_subdev_init(&isp->subdev, &rcar_isp_subdev_ops);
+ v4l2_set_subdevdata(&isp->subdev, &pdev->dev);
+ snprintf(isp->subdev.name, V4L2_SUBDEV_NAME_SIZE, "%s %s",
+ KBUILD_MODNAME, dev_name(&pdev->dev));
+ isp->subdev.flags = V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ isp->subdev.entity.function = MEDIA_ENT_F_VID_MUX;
+ isp->subdev.entity.ops = &risp_entity_ops;
+
+ isp->pads[RCAR_ISP_SINK].flags = MEDIA_PAD_FL_SINK;
+ for (i = RCAR_ISP_PORT0; i < RCAR_ISP_NUM_PADS; i++)
+ isp->pads[i].flags = MEDIA_PAD_FL_SOURCE;
+
+ ret = media_entity_pads_init(&isp->subdev.entity, RCAR_ISP_NUM_PADS,
+ isp->pads);
+ if (ret)
+ goto error_notifier;
+
+ ret = v4l2_async_register_subdev(&isp->subdev);
+ if (ret < 0)
+ goto error_notifier;
+
+ dev_info(isp->dev, "Using CSI-2 input: %u\n", isp->csi_input);
+
+ return 0;
+error_notifier:
+ v4l2_async_nf_unregister(&isp->notifier);
+ v4l2_async_nf_cleanup(&isp->notifier);
+error_pm:
+ pm_runtime_disable(&pdev->dev);
+error_mutex:
+ mutex_destroy(&isp->lock);
+
+ return ret;
+}
+
+static int risp_remove(struct platform_device *pdev)
+{
+ struct rcar_isp *isp = platform_get_drvdata(pdev);
+
+ v4l2_async_nf_unregister(&isp->notifier);
+ v4l2_async_nf_cleanup(&isp->notifier);
+
+ v4l2_async_unregister_subdev(&isp->subdev);
+
+ pm_runtime_disable(&pdev->dev);
+
+ mutex_destroy(&isp->lock);
+
+ return 0;
+}
+
+static struct platform_driver rcar_isp_driver = {
+ .driver = {
+ .name = "rcar-isp",
+ .of_match_table = risp_of_id_table,
+ },
+ .probe = risp_probe,
+ .remove = risp_remove,
+};
+
+module_platform_driver(rcar_isp_driver);
+
+MODULE_AUTHOR("Niklas Söderlund <niklas.soderlund@ragnatech.se>");
+MODULE_DESCRIPTION("Renesas R-Car ISP Channel Selector driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/platform/rcar-vin/rcar-core.c b/drivers/media/platform/rcar-vin/rcar-core.c
index 33957cc9118c..1d92cc8ede8f 100644
--- a/drivers/media/platform/rcar-vin/rcar-core.c
+++ b/drivers/media/platform/rcar-vin/rcar-core.c
@@ -45,188 +45,7 @@
#define v4l2_dev_to_vin(d) container_of(d, struct rvin_dev, v4l2_dev)
/* -----------------------------------------------------------------------------
- * Media Controller link notification
- */
-
-/* group lock should be held when calling this function. */
-static int rvin_group_entity_to_csi_id(struct rvin_group *group,
- struct media_entity *entity)
-{
- struct v4l2_subdev *sd;
- unsigned int i;
-
- sd = media_entity_to_v4l2_subdev(entity);
-
- for (i = 0; i < RVIN_CSI_MAX; i++)
- if (group->csi[i].subdev == sd)
- return i;
-
- return -ENODEV;
-}
-
-static unsigned int rvin_group_get_mask(struct rvin_dev *vin,
- enum rvin_csi_id csi_id,
- unsigned char channel)
-{
- const struct rvin_group_route *route;
- unsigned int mask = 0;
-
- for (route = vin->info->routes; route->mask; route++) {
- if (route->vin == vin->id &&
- route->csi == csi_id &&
- route->channel == channel) {
- vin_dbg(vin,
- "Adding route: vin: %d csi: %d channel: %d\n",
- route->vin, route->csi, route->channel);
- mask |= route->mask;
- }
- }
-
- return mask;
-}
-
-/*
- * Link setup for the links between a VIN and a CSI-2 receiver is a bit
- * complex. The reason for this is that the register controlling routing
- * is not present in each VIN instance. There are special VINs which
- * control routing for themselves and other VINs. There are not many
- * different possible links combinations that can be enabled at the same
- * time, therefor all already enabled links which are controlled by a
- * master VIN need to be taken into account when making the decision
- * if a new link can be enabled or not.
- *
- * 1. Find out which VIN the link the user tries to enable is connected to.
- * 2. Lookup which master VIN controls the links for this VIN.
- * 3. Start with a bitmask with all bits set.
- * 4. For each previously enabled link from the master VIN bitwise AND its
- * route mask (see documentation for mask in struct rvin_group_route)
- * with the bitmask.
- * 5. Bitwise AND the mask for the link the user tries to enable to the bitmask.
- * 6. If the bitmask is not empty at this point the new link can be enabled
- * while keeping all previous links enabled. Update the CHSEL value of the
- * master VIN and inform the user that the link could be enabled.
- *
- * Please note that no link can be enabled if any VIN in the group is
- * currently open.
- */
-static int rvin_group_link_notify(struct media_link *link, u32 flags,
- unsigned int notification)
-{
- struct rvin_group *group = container_of(link->graph_obj.mdev,
- struct rvin_group, mdev);
- unsigned int master_id, channel, mask_new, i;
- unsigned int mask = ~0;
- struct media_entity *entity;
- struct video_device *vdev;
- struct media_pad *csi_pad;
- struct rvin_dev *vin = NULL;
- int csi_id, ret;
-
- ret = v4l2_pipeline_link_notify(link, flags, notification);
- if (ret)
- return ret;
-
- /* Only care about link enablement for VIN nodes. */
- if (!(flags & MEDIA_LNK_FL_ENABLED) ||
- !is_media_entity_v4l2_video_device(link->sink->entity))
- return 0;
-
- /*
- * Don't allow link changes if any entity in the graph is
- * streaming, modifying the CHSEL register fields can disrupt
- * running streams.
- */
- media_device_for_each_entity(entity, &group->mdev)
- if (entity->stream_count)
- return -EBUSY;
-
- mutex_lock(&group->lock);
-
- /* Find the master VIN that controls the routes. */
- vdev = media_entity_to_video_device(link->sink->entity);
- vin = container_of(vdev, struct rvin_dev, vdev);
- master_id = rvin_group_id_to_master(vin->id);
-
- if (WARN_ON(!group->vin[master_id])) {
- ret = -ENODEV;
- goto out;
- }
-
- /* Build a mask for already enabled links. */
- for (i = master_id; i < master_id + 4; i++) {
- if (!group->vin[i])
- continue;
-
- /* Get remote CSI-2, if any. */
- csi_pad = media_entity_remote_pad(
- &group->vin[i]->vdev.entity.pads[0]);
- if (!csi_pad)
- continue;
-
- csi_id = rvin_group_entity_to_csi_id(group, csi_pad->entity);
- channel = rvin_group_csi_pad_to_channel(csi_pad->index);
-
- mask &= rvin_group_get_mask(group->vin[i], csi_id, channel);
- }
-
- /* Add the new link to the existing mask and check if it works. */
- csi_id = rvin_group_entity_to_csi_id(group, link->source->entity);
-
- if (csi_id == -ENODEV) {
- struct v4l2_subdev *sd;
-
- /*
- * Make sure the source entity subdevice is registered as
- * a parallel input of one of the enabled VINs if it is not
- * one of the CSI-2 subdevices.
- *
- * No hardware configuration required for parallel inputs,
- * we can return here.
- */
- sd = media_entity_to_v4l2_subdev(link->source->entity);
- for (i = 0; i < RCAR_VIN_NUM; i++) {
- if (group->vin[i] &&
- group->vin[i]->parallel.subdev == sd) {
- group->vin[i]->is_csi = false;
- ret = 0;
- goto out;
- }
- }
-
- vin_err(vin, "Subdevice %s not registered to any VIN\n",
- link->source->entity->name);
- ret = -ENODEV;
- goto out;
- }
-
- channel = rvin_group_csi_pad_to_channel(link->source->index);
- mask_new = mask & rvin_group_get_mask(vin, csi_id, channel);
- vin_dbg(vin, "Try link change mask: 0x%x new: 0x%x\n", mask, mask_new);
-
- if (!mask_new) {
- ret = -EMLINK;
- goto out;
- }
-
- /* New valid CHSEL found, set the new value. */
- ret = rvin_set_channel_routing(group->vin[master_id], __ffs(mask_new));
- if (ret)
- goto out;
-
- vin->is_csi = true;
-
-out:
- mutex_unlock(&group->lock);
-
- return ret;
-}
-
-static const struct media_device_ops rvin_media_ops = {
- .link_notify = rvin_group_link_notify,
-};
-
-/* -----------------------------------------------------------------------------
- * Gen3 CSI2 Group Allocator
+ * Gen3 Group Allocator
*/
/* FIXME: This should if we find a system that supports more
@@ -247,7 +66,9 @@ static void rvin_group_cleanup(struct rvin_group *group)
mutex_destroy(&group->lock);
}
-static int rvin_group_init(struct rvin_group *group, struct rvin_dev *vin)
+static int rvin_group_init(struct rvin_group *group, struct rvin_dev *vin,
+ int (*link_setup)(struct rvin_dev *),
+ const struct media_device_ops *ops)
{
struct media_device *mdev = &group->mdev;
const struct of_device_id *match;
@@ -263,8 +84,10 @@ static int rvin_group_init(struct rvin_group *group, struct rvin_dev *vin)
vin_dbg(vin, "found %u enabled VIN's in DT", group->count);
+ group->link_setup = link_setup;
+
mdev->dev = vin->dev;
- mdev->ops = &rvin_media_ops;
+ mdev->ops = ops;
match = of_match_node(vin->dev->driver->of_match_table,
vin->dev->of_node);
@@ -295,7 +118,9 @@ static void rvin_group_release(struct kref *kref)
mutex_unlock(&rvin_group_lock);
}
-static int rvin_group_get(struct rvin_dev *vin)
+static int rvin_group_get(struct rvin_dev *vin,
+ int (*link_setup)(struct rvin_dev *),
+ const struct media_device_ops *ops)
{
struct rvin_group *group;
u32 id;
@@ -327,7 +152,7 @@ static int rvin_group_get(struct rvin_dev *vin)
goto err_group;
}
- ret = rvin_group_init(group, vin);
+ ret = rvin_group_init(group, vin, link_setup, ops);
if (ret) {
kfree(group);
vin_err(vin, "Failed to initialize group\n");
@@ -383,6 +208,213 @@ out:
kref_put(&group->refcount, rvin_group_release);
}
+/* group lock should be held when calling this function. */
+static int rvin_group_entity_to_remote_id(struct rvin_group *group,
+ struct media_entity *entity)
+{
+ struct v4l2_subdev *sd;
+ unsigned int i;
+
+ sd = media_entity_to_v4l2_subdev(entity);
+
+ for (i = 0; i < RVIN_REMOTES_MAX; i++)
+ if (group->remotes[i].subdev == sd)
+ return i;
+
+ return -ENODEV;
+}
+
+static int rvin_group_notify_complete(struct v4l2_async_notifier *notifier)
+{
+ struct rvin_dev *vin = v4l2_dev_to_vin(notifier->v4l2_dev);
+ unsigned int i;
+ int ret;
+
+ ret = media_device_register(&vin->group->mdev);
+ if (ret)
+ return ret;
+
+ ret = v4l2_device_register_subdev_nodes(&vin->v4l2_dev);
+ if (ret) {
+ vin_err(vin, "Failed to register subdev nodes\n");
+ return ret;
+ }
+
+ /* Register all video nodes for the group. */
+ for (i = 0; i < RCAR_VIN_NUM; i++) {
+ if (vin->group->vin[i] &&
+ !video_is_registered(&vin->group->vin[i]->vdev)) {
+ ret = rvin_v4l2_register(vin->group->vin[i]);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return vin->group->link_setup(vin);
+}
+
+static void rvin_group_notify_unbind(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
+{
+ struct rvin_dev *vin = v4l2_dev_to_vin(notifier->v4l2_dev);
+ unsigned int i;
+
+ for (i = 0; i < RCAR_VIN_NUM; i++)
+ if (vin->group->vin[i])
+ rvin_v4l2_unregister(vin->group->vin[i]);
+
+ mutex_lock(&vin->group->lock);
+
+ for (i = 0; i < RVIN_CSI_MAX; i++) {
+ if (vin->group->remotes[i].asd != asd)
+ continue;
+ vin->group->remotes[i].subdev = NULL;
+ vin_dbg(vin, "Unbind %s from slot %u\n", subdev->name, i);
+ break;
+ }
+
+ mutex_unlock(&vin->group->lock);
+
+ media_device_unregister(&vin->group->mdev);
+}
+
+static int rvin_group_notify_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
+{
+ struct rvin_dev *vin = v4l2_dev_to_vin(notifier->v4l2_dev);
+ unsigned int i;
+
+ mutex_lock(&vin->group->lock);
+
+ for (i = 0; i < RVIN_CSI_MAX; i++) {
+ if (vin->group->remotes[i].asd != asd)
+ continue;
+ vin->group->remotes[i].subdev = subdev;
+ vin_dbg(vin, "Bound %s to slot %u\n", subdev->name, i);
+ break;
+ }
+
+ mutex_unlock(&vin->group->lock);
+
+ return 0;
+}
+
+static const struct v4l2_async_notifier_operations rvin_group_notify_ops = {
+ .bound = rvin_group_notify_bound,
+ .unbind = rvin_group_notify_unbind,
+ .complete = rvin_group_notify_complete,
+};
+
+static int rvin_group_parse_of(struct rvin_dev *vin, unsigned int port,
+ unsigned int id)
+{
+ struct fwnode_handle *ep, *fwnode;
+ struct v4l2_fwnode_endpoint vep = {
+ .bus_type = V4L2_MBUS_CSI2_DPHY,
+ };
+ struct v4l2_async_subdev *asd;
+ int ret;
+
+ ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(vin->dev), port, id, 0);
+ if (!ep)
+ return 0;
+
+ fwnode = fwnode_graph_get_remote_endpoint(ep);
+ ret = v4l2_fwnode_endpoint_parse(ep, &vep);
+ fwnode_handle_put(ep);
+ if (ret) {
+ vin_err(vin, "Failed to parse %pOF\n", to_of_node(fwnode));
+ ret = -EINVAL;
+ goto out;
+ }
+
+ asd = v4l2_async_nf_add_fwnode(&vin->group->notifier, fwnode,
+ struct v4l2_async_subdev);
+ if (IS_ERR(asd)) {
+ ret = PTR_ERR(asd);
+ goto out;
+ }
+
+ vin->group->remotes[vep.base.id].asd = asd;
+
+ vin_dbg(vin, "Add group OF device %pOF to slot %u\n",
+ to_of_node(fwnode), vep.base.id);
+out:
+ fwnode_handle_put(fwnode);
+
+ return ret;
+}
+
+static void rvin_group_notifier_cleanup(struct rvin_dev *vin)
+{
+ mutex_lock(&vin->group->lock);
+ if (&vin->v4l2_dev == vin->group->notifier.v4l2_dev) {
+ v4l2_async_nf_unregister(&vin->group->notifier);
+ v4l2_async_nf_cleanup(&vin->group->notifier);
+ }
+ mutex_unlock(&vin->group->lock);
+}
+
+static int rvin_group_notifier_init(struct rvin_dev *vin, unsigned int port,
+ unsigned int max_id)
+{
+ unsigned int count = 0, vin_mask = 0;
+ unsigned int i, id;
+ int ret;
+
+ mutex_lock(&vin->group->lock);
+
+ /* If not all VIN's are registered don't register the notifier. */
+ for (i = 0; i < RCAR_VIN_NUM; i++) {
+ if (vin->group->vin[i]) {
+ count++;
+ vin_mask |= BIT(i);
+ }
+ }
+
+ if (vin->group->count != count) {
+ mutex_unlock(&vin->group->lock);
+ return 0;
+ }
+
+ mutex_unlock(&vin->group->lock);
+
+ v4l2_async_nf_init(&vin->group->notifier);
+
+ /*
+ * Some subdevices may overlap but the parser function can handle it and
+ * each subdevice will only be registered once with the group notifier.
+ */
+ for (i = 0; i < RCAR_VIN_NUM; i++) {
+ if (!(vin_mask & BIT(i)))
+ continue;
+
+ for (id = 0; id < max_id; id++) {
+ if (vin->group->remotes[id].asd)
+ continue;
+
+ ret = rvin_group_parse_of(vin->group->vin[i], port, id);
+ if (ret)
+ return ret;
+ }
+ }
+
+ if (list_empty(&vin->group->notifier.asd_list))
+ return 0;
+
+ vin->group->notifier.ops = &rvin_group_notify_ops;
+ ret = v4l2_async_nf_register(&vin->v4l2_dev, &vin->group->notifier);
+ if (ret < 0) {
+ vin_err(vin, "Notifier registration failed\n");
+ v4l2_async_nf_cleanup(&vin->group->notifier);
+ return ret;
+ }
+
+ return 0;
+}
+
/* -----------------------------------------------------------------------------
* Controls
*/
@@ -405,6 +437,45 @@ static const struct v4l2_ctrl_ops rvin_ctrl_ops = {
.s_ctrl = rvin_s_ctrl,
};
+static void rvin_free_controls(struct rvin_dev *vin)
+{
+ v4l2_ctrl_handler_free(&vin->ctrl_handler);
+ vin->vdev.ctrl_handler = NULL;
+}
+
+static int rvin_create_controls(struct rvin_dev *vin, struct v4l2_subdev *subdev)
+{
+ int ret;
+
+ ret = v4l2_ctrl_handler_init(&vin->ctrl_handler, 16);
+ if (ret < 0)
+ return ret;
+
+ /* The VIN directly deals with alpha component. */
+ v4l2_ctrl_new_std(&vin->ctrl_handler, &rvin_ctrl_ops,
+ V4L2_CID_ALPHA_COMPONENT, 0, 255, 1, 255);
+
+ if (vin->ctrl_handler.error) {
+ ret = vin->ctrl_handler.error;
+ rvin_free_controls(vin);
+ return ret;
+ }
+
+ /* For the non-MC mode add controls from the subdevice. */
+ if (subdev) {
+ ret = v4l2_ctrl_add_handler(&vin->ctrl_handler,
+ subdev->ctrl_handler, NULL, true);
+ if (ret < 0) {
+ rvin_free_controls(vin);
+ return ret;
+ }
+ }
+
+ vin->vdev.ctrl_handler = &vin->ctrl_handler;
+
+ return 0;
+}
+
/* -----------------------------------------------------------------------------
* Async notifier
*/
@@ -490,28 +561,10 @@ static int rvin_parallel_subdevice_attach(struct rvin_dev *vin,
return ret;
/* Add the controls */
- ret = v4l2_ctrl_handler_init(&vin->ctrl_handler, 16);
+ ret = rvin_create_controls(vin, subdev);
if (ret < 0)
return ret;
- v4l2_ctrl_new_std(&vin->ctrl_handler, &rvin_ctrl_ops,
- V4L2_CID_ALPHA_COMPONENT, 0, 255, 1, 255);
-
- if (vin->ctrl_handler.error) {
- ret = vin->ctrl_handler.error;
- v4l2_ctrl_handler_free(&vin->ctrl_handler);
- return ret;
- }
-
- ret = v4l2_ctrl_add_handler(&vin->ctrl_handler, subdev->ctrl_handler,
- NULL, true);
- if (ret < 0) {
- v4l2_ctrl_handler_free(&vin->ctrl_handler);
- return ret;
- }
-
- vin->vdev.ctrl_handler = &vin->ctrl_handler;
-
vin->parallel.subdev = subdev;
return 0;
@@ -522,10 +575,8 @@ static void rvin_parallel_subdevice_detach(struct rvin_dev *vin)
rvin_v4l2_unregister(vin);
vin->parallel.subdev = NULL;
- if (!vin->info->use_mc) {
- v4l2_ctrl_handler_free(&vin->ctrl_handler);
- vin->vdev.ctrl_handler = NULL;
- }
+ if (!vin->info->use_mc)
+ rvin_free_controls(vin);
}
static int rvin_parallel_notify_complete(struct v4l2_async_notifier *notifier)
@@ -641,8 +692,8 @@ static int rvin_parallel_parse_of(struct rvin_dev *vin)
goto out;
}
- asd = v4l2_async_notifier_add_fwnode_subdev(&vin->notifier, fwnode,
- struct v4l2_async_subdev);
+ asd = v4l2_async_nf_add_fwnode(&vin->notifier, fwnode,
+ struct v4l2_async_subdev);
if (IS_ERR(asd)) {
ret = PTR_ERR(asd);
goto out;
@@ -657,28 +708,33 @@ out:
return ret;
}
+static void rvin_parallel_cleanup(struct rvin_dev *vin)
+{
+ v4l2_async_nf_unregister(&vin->notifier);
+ v4l2_async_nf_cleanup(&vin->notifier);
+}
+
static int rvin_parallel_init(struct rvin_dev *vin)
{
int ret;
- v4l2_async_notifier_init(&vin->notifier);
+ v4l2_async_nf_init(&vin->notifier);
ret = rvin_parallel_parse_of(vin);
if (ret)
return ret;
- /* If using mc, it's fine not to have any input registered. */
if (!vin->parallel.asd)
- return vin->info->use_mc ? 0 : -ENODEV;
+ return -ENODEV;
vin_dbg(vin, "Found parallel subdevice %pOF\n",
to_of_node(vin->parallel.asd->match.fwnode));
vin->notifier.ops = &rvin_parallel_notify_ops;
- ret = v4l2_async_notifier_register(&vin->v4l2_dev, &vin->notifier);
+ ret = v4l2_async_nf_register(&vin->v4l2_dev, &vin->notifier);
if (ret < 0) {
vin_err(vin, "Notifier registration failed\n");
- v4l2_async_notifier_cleanup(&vin->notifier);
+ v4l2_async_nf_cleanup(&vin->notifier);
return ret;
}
@@ -686,36 +742,175 @@ static int rvin_parallel_init(struct rvin_dev *vin)
}
/* -----------------------------------------------------------------------------
- * Group async notifier
+ * CSI-2
*/
-static int rvin_group_notify_complete(struct v4l2_async_notifier *notifier)
+static unsigned int rvin_csi2_get_mask(struct rvin_dev *vin,
+ enum rvin_csi_id csi_id,
+ unsigned char channel)
{
- struct rvin_dev *vin = v4l2_dev_to_vin(notifier->v4l2_dev);
const struct rvin_group_route *route;
- unsigned int i;
- int ret;
+ unsigned int mask = 0;
- ret = media_device_register(&vin->group->mdev);
+ for (route = vin->info->routes; route->mask; route++) {
+ if (route->vin == vin->id &&
+ route->csi == csi_id &&
+ route->channel == channel) {
+ vin_dbg(vin,
+ "Adding route: vin: %d csi: %d channel: %d\n",
+ route->vin, route->csi, route->channel);
+ mask |= route->mask;
+ }
+ }
+
+ return mask;
+}
+
+/*
+ * Link setup for the links between a VIN and a CSI-2 receiver is a bit
+ * complex. The reason for this is that the register controlling routing
+ * is not present in each VIN instance. There are special VINs which
+ * control routing for themselves and other VINs. There are not many
+ * different possible links combinations that can be enabled at the same
+ * time, therefor all already enabled links which are controlled by a
+ * master VIN need to be taken into account when making the decision
+ * if a new link can be enabled or not.
+ *
+ * 1. Find out which VIN the link the user tries to enable is connected to.
+ * 2. Lookup which master VIN controls the links for this VIN.
+ * 3. Start with a bitmask with all bits set.
+ * 4. For each previously enabled link from the master VIN bitwise AND its
+ * route mask (see documentation for mask in struct rvin_group_route)
+ * with the bitmask.
+ * 5. Bitwise AND the mask for the link the user tries to enable to the bitmask.
+ * 6. If the bitmask is not empty at this point the new link can be enabled
+ * while keeping all previous links enabled. Update the CHSEL value of the
+ * master VIN and inform the user that the link could be enabled.
+ *
+ * Please note that no link can be enabled if any VIN in the group is
+ * currently open.
+ */
+static int rvin_csi2_link_notify(struct media_link *link, u32 flags,
+ unsigned int notification)
+{
+ struct rvin_group *group = container_of(link->graph_obj.mdev,
+ struct rvin_group, mdev);
+ unsigned int master_id, channel, mask_new, i;
+ unsigned int mask = ~0;
+ struct media_entity *entity;
+ struct video_device *vdev;
+ struct media_pad *csi_pad;
+ struct rvin_dev *vin = NULL;
+ int csi_id, ret;
+
+ ret = v4l2_pipeline_link_notify(link, flags, notification);
if (ret)
return ret;
- ret = v4l2_device_register_subdev_nodes(&vin->v4l2_dev);
- if (ret) {
- vin_err(vin, "Failed to register subdev nodes\n");
- return ret;
+ /* Only care about link enablement for VIN nodes. */
+ if (!(flags & MEDIA_LNK_FL_ENABLED) ||
+ !is_media_entity_v4l2_video_device(link->sink->entity))
+ return 0;
+
+ /*
+ * Don't allow link changes if any entity in the graph is
+ * streaming, modifying the CHSEL register fields can disrupt
+ * running streams.
+ */
+ media_device_for_each_entity(entity, &group->mdev)
+ if (entity->stream_count)
+ return -EBUSY;
+
+ mutex_lock(&group->lock);
+
+ /* Find the master VIN that controls the routes. */
+ vdev = media_entity_to_video_device(link->sink->entity);
+ vin = container_of(vdev, struct rvin_dev, vdev);
+ master_id = rvin_group_id_to_master(vin->id);
+
+ if (WARN_ON(!group->vin[master_id])) {
+ ret = -ENODEV;
+ goto out;
}
- /* Register all video nodes for the group. */
- for (i = 0; i < RCAR_VIN_NUM; i++) {
- if (vin->group->vin[i] &&
- !video_is_registered(&vin->group->vin[i]->vdev)) {
- ret = rvin_v4l2_register(vin->group->vin[i]);
- if (ret)
- return ret;
+ /* Build a mask for already enabled links. */
+ for (i = master_id; i < master_id + 4; i++) {
+ if (!group->vin[i])
+ continue;
+
+ /* Get remote CSI-2, if any. */
+ csi_pad = media_entity_remote_pad(
+ &group->vin[i]->vdev.entity.pads[0]);
+ if (!csi_pad)
+ continue;
+
+ csi_id = rvin_group_entity_to_remote_id(group, csi_pad->entity);
+ channel = rvin_group_csi_pad_to_channel(csi_pad->index);
+
+ mask &= rvin_csi2_get_mask(group->vin[i], csi_id, channel);
+ }
+
+ /* Add the new link to the existing mask and check if it works. */
+ csi_id = rvin_group_entity_to_remote_id(group, link->source->entity);
+
+ if (csi_id == -ENODEV) {
+ struct v4l2_subdev *sd;
+
+ /*
+ * Make sure the source entity subdevice is registered as
+ * a parallel input of one of the enabled VINs if it is not
+ * one of the CSI-2 subdevices.
+ *
+ * No hardware configuration required for parallel inputs,
+ * we can return here.
+ */
+ sd = media_entity_to_v4l2_subdev(link->source->entity);
+ for (i = 0; i < RCAR_VIN_NUM; i++) {
+ if (group->vin[i] &&
+ group->vin[i]->parallel.subdev == sd) {
+ group->vin[i]->is_csi = false;
+ ret = 0;
+ goto out;
+ }
}
+
+ vin_err(vin, "Subdevice %s not registered to any VIN\n",
+ link->source->entity->name);
+ ret = -ENODEV;
+ goto out;
}
+ channel = rvin_group_csi_pad_to_channel(link->source->index);
+ mask_new = mask & rvin_csi2_get_mask(vin, csi_id, channel);
+ vin_dbg(vin, "Try link change mask: 0x%x new: 0x%x\n", mask, mask_new);
+
+ if (!mask_new) {
+ ret = -EMLINK;
+ goto out;
+ }
+
+ /* New valid CHSEL found, set the new value. */
+ ret = rvin_set_channel_routing(group->vin[master_id], __ffs(mask_new));
+ if (ret)
+ goto out;
+
+ vin->is_csi = true;
+
+out:
+ mutex_unlock(&group->lock);
+
+ return ret;
+}
+
+static const struct media_device_ops rvin_csi2_media_ops = {
+ .link_notify = rvin_csi2_link_notify,
+};
+
+static int rvin_csi2_setup_links(struct rvin_dev *vin)
+{
+ const struct rvin_group_route *route;
+ int ret = -EINVAL;
+
/* Create all media device links between VINs and CSI-2's. */
mutex_lock(&vin->group->lock);
for (route = vin->info->routes; route->mask; route++) {
@@ -732,10 +927,10 @@ static int rvin_group_notify_complete(struct v4l2_async_notifier *notifier)
continue;
/* Check that CSI-2 is part of the group. */
- if (!vin->group->csi[route->csi].subdev)
+ if (!vin->group->remotes[route->csi].subdev)
continue;
- source = &vin->group->csi[route->csi].subdev->entity;
+ source = &vin->group->remotes[route->csi].subdev->entity;
source_idx = rvin_group_csi_channel_to_pad(route->channel);
source_pad = &source->pads[source_idx];
@@ -758,167 +953,107 @@ static int rvin_group_notify_complete(struct v4l2_async_notifier *notifier)
return ret;
}
-static void rvin_group_notify_unbind(struct v4l2_async_notifier *notifier,
- struct v4l2_subdev *subdev,
- struct v4l2_async_subdev *asd)
-{
- struct rvin_dev *vin = v4l2_dev_to_vin(notifier->v4l2_dev);
- unsigned int i;
-
- for (i = 0; i < RCAR_VIN_NUM; i++)
- if (vin->group->vin[i])
- rvin_v4l2_unregister(vin->group->vin[i]);
-
- mutex_lock(&vin->group->lock);
-
- for (i = 0; i < RVIN_CSI_MAX; i++) {
- if (vin->group->csi[i].asd != asd)
- continue;
- vin->group->csi[i].subdev = NULL;
- vin_dbg(vin, "Unbind CSI-2 %s from slot %u\n", subdev->name, i);
- break;
- }
-
- mutex_unlock(&vin->group->lock);
-
- media_device_unregister(&vin->group->mdev);
-}
-
-static int rvin_group_notify_bound(struct v4l2_async_notifier *notifier,
- struct v4l2_subdev *subdev,
- struct v4l2_async_subdev *asd)
+static void rvin_csi2_cleanup(struct rvin_dev *vin)
{
- struct rvin_dev *vin = v4l2_dev_to_vin(notifier->v4l2_dev);
- unsigned int i;
-
- mutex_lock(&vin->group->lock);
-
- for (i = 0; i < RVIN_CSI_MAX; i++) {
- if (vin->group->csi[i].asd != asd)
- continue;
- vin->group->csi[i].subdev = subdev;
- vin_dbg(vin, "Bound CSI-2 %s to slot %u\n", subdev->name, i);
- break;
- }
-
- mutex_unlock(&vin->group->lock);
-
- return 0;
+ rvin_parallel_cleanup(vin);
+ rvin_group_notifier_cleanup(vin);
+ rvin_group_put(vin);
+ rvin_free_controls(vin);
}
-static const struct v4l2_async_notifier_operations rvin_group_notify_ops = {
- .bound = rvin_group_notify_bound,
- .unbind = rvin_group_notify_unbind,
- .complete = rvin_group_notify_complete,
-};
-
-static int rvin_mc_parse_of(struct rvin_dev *vin, unsigned int id)
+static int rvin_csi2_init(struct rvin_dev *vin)
{
- struct fwnode_handle *ep, *fwnode;
- struct v4l2_fwnode_endpoint vep = {
- .bus_type = V4L2_MBUS_CSI2_DPHY,
- };
- struct v4l2_async_subdev *asd;
int ret;
- ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(vin->dev), 1, id, 0);
- if (!ep)
- return 0;
+ vin->pad.flags = MEDIA_PAD_FL_SINK;
+ ret = media_entity_pads_init(&vin->vdev.entity, 1, &vin->pad);
+ if (ret)
+ return ret;
- fwnode = fwnode_graph_get_remote_endpoint(ep);
- ret = v4l2_fwnode_endpoint_parse(ep, &vep);
- fwnode_handle_put(ep);
- if (ret) {
- vin_err(vin, "Failed to parse %pOF\n", to_of_node(fwnode));
- ret = -EINVAL;
- goto out;
- }
+ ret = rvin_create_controls(vin, NULL);
+ if (ret < 0)
+ return ret;
- if (!of_device_is_available(to_of_node(fwnode))) {
- vin_dbg(vin, "OF device %pOF disabled, ignoring\n",
- to_of_node(fwnode));
- ret = -ENOTCONN;
- goto out;
- }
+ ret = rvin_group_get(vin, rvin_csi2_setup_links, &rvin_csi2_media_ops);
+ if (ret)
+ goto err_controls;
- asd = v4l2_async_notifier_add_fwnode_subdev(&vin->group->notifier,
- fwnode,
- struct v4l2_async_subdev);
- if (IS_ERR(asd)) {
- ret = PTR_ERR(asd);
- goto out;
- }
+ /* It's OK to not have a parallel subdevice. */
+ ret = rvin_parallel_init(vin);
+ if (ret && ret != -ENODEV)
+ goto err_group;
- vin->group->csi[vep.base.id].asd = asd;
+ ret = rvin_group_notifier_init(vin, 1, RVIN_CSI_MAX);
+ if (ret)
+ goto err_parallel;
- vin_dbg(vin, "Add group OF device %pOF to slot %u\n",
- to_of_node(fwnode), vep.base.id);
-out:
- fwnode_handle_put(fwnode);
+ return 0;
+err_parallel:
+ rvin_parallel_cleanup(vin);
+err_group:
+ rvin_group_put(vin);
+err_controls:
+ rvin_free_controls(vin);
return ret;
}
-static int rvin_mc_parse_of_graph(struct rvin_dev *vin)
+/* -----------------------------------------------------------------------------
+ * ISP
+ */
+
+static int rvin_isp_setup_links(struct rvin_dev *vin)
{
- unsigned int count = 0, vin_mask = 0;
- unsigned int i, id;
- int ret;
+ unsigned int i;
+ int ret = -EINVAL;
+ /* Create all media device links between VINs and ISP's. */
mutex_lock(&vin->group->lock);
-
- /* If not all VIN's are registered don't register the notifier. */
for (i = 0; i < RCAR_VIN_NUM; i++) {
- if (vin->group->vin[i]) {
- count++;
- vin_mask |= BIT(i);
- }
- }
+ struct media_pad *source_pad, *sink_pad;
+ struct media_entity *source, *sink;
+ unsigned int source_slot = i / 8;
+ unsigned int source_idx = i % 8 + 1;
- if (vin->group->count != count) {
- mutex_unlock(&vin->group->lock);
- return 0;
- }
+ if (!vin->group->vin[i])
+ continue;
- mutex_unlock(&vin->group->lock);
+ /* Check that ISP is part of the group. */
+ if (!vin->group->remotes[source_slot].subdev)
+ continue;
- v4l2_async_notifier_init(&vin->group->notifier);
+ source = &vin->group->remotes[source_slot].subdev->entity;
+ source_pad = &source->pads[source_idx];
- /*
- * Have all VIN's look for CSI-2 subdevices. Some subdevices will
- * overlap but the parser function can handle it, so each subdevice
- * will only be registered once with the group notifier.
- */
- for (i = 0; i < RCAR_VIN_NUM; i++) {
- if (!(vin_mask & BIT(i)))
- continue;
+ sink = &vin->group->vin[i]->vdev.entity;
+ sink_pad = &sink->pads[0];
- for (id = 0; id < RVIN_CSI_MAX; id++) {
- if (vin->group->csi[id].asd)
- continue;
+ /* Skip if link already exists. */
+ if (media_entity_find_link(source_pad, sink_pad))
+ continue;
- ret = rvin_mc_parse_of(vin->group->vin[i], id);
- if (ret)
- return ret;
+ ret = media_create_pad_link(source, source_idx, sink, 0,
+ MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
+ if (ret) {
+ vin_err(vin, "Error adding link from %s to %s\n",
+ source->name, sink->name);
+ break;
}
}
+ mutex_unlock(&vin->group->lock);
- if (list_empty(&vin->group->notifier.asd_list))
- return 0;
-
- vin->group->notifier.ops = &rvin_group_notify_ops;
- ret = v4l2_async_notifier_register(&vin->v4l2_dev,
- &vin->group->notifier);
- if (ret < 0) {
- vin_err(vin, "Notifier registration failed\n");
- v4l2_async_notifier_cleanup(&vin->group->notifier);
- return ret;
- }
+ return ret;
+}
- return 0;
+static void rvin_isp_cleanup(struct rvin_dev *vin)
+{
+ rvin_group_notifier_cleanup(vin);
+ rvin_group_put(vin);
+ rvin_free_controls(vin);
}
-static int rvin_mc_init(struct rvin_dev *vin)
+static int rvin_isp_init(struct rvin_dev *vin)
{
int ret;
@@ -927,28 +1062,23 @@ static int rvin_mc_init(struct rvin_dev *vin)
if (ret)
return ret;
- ret = rvin_group_get(vin);
- if (ret)
- return ret;
-
- ret = rvin_mc_parse_of_graph(vin);
- if (ret)
- rvin_group_put(vin);
-
- ret = v4l2_ctrl_handler_init(&vin->ctrl_handler, 1);
+ ret = rvin_create_controls(vin, NULL);
if (ret < 0)
return ret;
- v4l2_ctrl_new_std(&vin->ctrl_handler, &rvin_ctrl_ops,
- V4L2_CID_ALPHA_COMPONENT, 0, 255, 1, 255);
+ ret = rvin_group_get(vin, rvin_isp_setup_links, NULL);
+ if (ret)
+ goto err_controls;
- if (vin->ctrl_handler.error) {
- ret = vin->ctrl_handler.error;
- v4l2_ctrl_handler_free(&vin->ctrl_handler);
- return ret;
- }
+ ret = rvin_group_notifier_init(vin, 2, RVIN_ISP_MAX);
+ if (ret)
+ goto err_group;
- vin->vdev.ctrl_handler = &vin->ctrl_handler;
+ return 0;
+err_group:
+ rvin_group_put(vin);
+err_controls:
+ rvin_free_controls(vin);
return ret;
}
@@ -1325,6 +1455,15 @@ static const struct rvin_info rcar_info_r8a77995 = {
.routes = rcar_info_r8a77995_routes,
};
+static const struct rvin_info rcar_info_r8a779a0 = {
+ .model = RCAR_GEN3,
+ .use_mc = true,
+ .use_isp = true,
+ .nv12 = true,
+ .max_width = 4096,
+ .max_height = 4096,
+};
+
static const struct of_device_id rvin_of_id_table[] = {
{
.compatible = "renesas,vin-r8a774a1",
@@ -1386,6 +1525,10 @@ static const struct of_device_id rvin_of_id_table[] = {
.compatible = "renesas,vin-r8a77995",
.data = &rcar_info_r8a77995,
},
+ {
+ .compatible = "renesas,vin-r8a779a0",
+ .data = &rcar_info_r8a779a0,
+ },
{ /* Sentinel */ },
};
MODULE_DEVICE_TABLE(of, rvin_of_id_table);
@@ -1434,38 +1577,22 @@ static int rcar_vin_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, vin);
- if (vin->info->use_mc) {
- ret = rvin_mc_init(vin);
- if (ret)
- goto error_dma_unregister;
- }
+ if (vin->info->use_isp)
+ ret = rvin_isp_init(vin);
+ else if (vin->info->use_mc)
+ ret = rvin_csi2_init(vin);
+ else
+ ret = rvin_parallel_init(vin);
- ret = rvin_parallel_init(vin);
- if (ret)
- goto error_group_unregister;
+ if (ret) {
+ rvin_dma_unregister(vin);
+ return ret;
+ }
pm_suspend_ignore_children(&pdev->dev, true);
pm_runtime_enable(&pdev->dev);
return 0;
-
-error_group_unregister:
- v4l2_ctrl_handler_free(&vin->ctrl_handler);
-
- if (vin->info->use_mc) {
- mutex_lock(&vin->group->lock);
- if (&vin->v4l2_dev == vin->group->notifier.v4l2_dev) {
- v4l2_async_notifier_unregister(&vin->group->notifier);
- v4l2_async_notifier_cleanup(&vin->group->notifier);
- }
- mutex_unlock(&vin->group->lock);
- rvin_group_put(vin);
- }
-
-error_dma_unregister:
- rvin_dma_unregister(vin);
-
- return ret;
}
static int rcar_vin_remove(struct platform_device *pdev)
@@ -1476,16 +1603,12 @@ static int rcar_vin_remove(struct platform_device *pdev)
rvin_v4l2_unregister(vin);
- v4l2_async_notifier_unregister(&vin->notifier);
- v4l2_async_notifier_cleanup(&vin->notifier);
-
- if (vin->info->use_mc) {
- v4l2_async_notifier_unregister(&vin->group->notifier);
- v4l2_async_notifier_cleanup(&vin->group->notifier);
- rvin_group_put(vin);
- }
-
- v4l2_ctrl_handler_free(&vin->ctrl_handler);
+ if (vin->info->use_isp)
+ rvin_isp_cleanup(vin);
+ else if (vin->info->use_mc)
+ rvin_csi2_cleanup(vin);
+ else
+ rvin_parallel_cleanup(vin);
rvin_dma_unregister(vin);
diff --git a/drivers/media/platform/rcar-vin/rcar-csi2.c b/drivers/media/platform/rcar-vin/rcar-csi2.c
index e28eff039688..11848d0c4a55 100644
--- a/drivers/media/platform/rcar-vin/rcar-csi2.c
+++ b/drivers/media/platform/rcar-vin/rcar-csi2.c
@@ -126,6 +126,12 @@ struct rcar_csi2;
#define PHTW_CWEN BIT(8)
#define PHTW_TESTDIN_CODE(n) ((n & 0xff))
+#define PHYFRX_REG 0x64
+#define PHYFRX_FORCERX_MODE_3 BIT(3)
+#define PHYFRX_FORCERX_MODE_2 BIT(2)
+#define PHYFRX_FORCERX_MODE_1 BIT(1)
+#define PHYFRX_FORCERX_MODE_0 BIT(0)
+
struct phtw_value {
u16 data;
u16 code;
@@ -136,6 +142,31 @@ struct rcsi2_mbps_reg {
u16 reg;
};
+static const struct rcsi2_mbps_reg phtw_mbps_v3u[] = {
+ { .mbps = 1500, .reg = 0xcc },
+ { .mbps = 1550, .reg = 0x1d },
+ { .mbps = 1600, .reg = 0x27 },
+ { .mbps = 1650, .reg = 0x30 },
+ { .mbps = 1700, .reg = 0x39 },
+ { .mbps = 1750, .reg = 0x42 },
+ { .mbps = 1800, .reg = 0x4b },
+ { .mbps = 1850, .reg = 0x55 },
+ { .mbps = 1900, .reg = 0x5e },
+ { .mbps = 1950, .reg = 0x67 },
+ { .mbps = 2000, .reg = 0x71 },
+ { .mbps = 2050, .reg = 0x79 },
+ { .mbps = 2100, .reg = 0x83 },
+ { .mbps = 2150, .reg = 0x8c },
+ { .mbps = 2200, .reg = 0x95 },
+ { .mbps = 2250, .reg = 0x9e },
+ { .mbps = 2300, .reg = 0xa7 },
+ { .mbps = 2350, .reg = 0xb0 },
+ { .mbps = 2400, .reg = 0xba },
+ { .mbps = 2450, .reg = 0xc3 },
+ { .mbps = 2500, .reg = 0xcc },
+ { /* sentinel */ },
+};
+
static const struct rcsi2_mbps_reg phtw_mbps_h3_v3h_m3n[] = {
{ .mbps = 80, .reg = 0x86 },
{ .mbps = 90, .reg = 0x86 },
@@ -200,6 +231,72 @@ static const struct rcsi2_mbps_reg phtw_mbps_v3m_e3[] = {
#define PHYPLL_REG 0x68
#define PHYPLL_HSFREQRANGE(n) ((n) << 16)
+static const struct rcsi2_mbps_reg hsfreqrange_v3u[] = {
+ { .mbps = 80, .reg = 0x00 },
+ { .mbps = 90, .reg = 0x10 },
+ { .mbps = 100, .reg = 0x20 },
+ { .mbps = 110, .reg = 0x30 },
+ { .mbps = 120, .reg = 0x01 },
+ { .mbps = 130, .reg = 0x11 },
+ { .mbps = 140, .reg = 0x21 },
+ { .mbps = 150, .reg = 0x31 },
+ { .mbps = 160, .reg = 0x02 },
+ { .mbps = 170, .reg = 0x12 },
+ { .mbps = 180, .reg = 0x22 },
+ { .mbps = 190, .reg = 0x32 },
+ { .mbps = 205, .reg = 0x03 },
+ { .mbps = 220, .reg = 0x13 },
+ { .mbps = 235, .reg = 0x23 },
+ { .mbps = 250, .reg = 0x33 },
+ { .mbps = 275, .reg = 0x04 },
+ { .mbps = 300, .reg = 0x14 },
+ { .mbps = 325, .reg = 0x25 },
+ { .mbps = 350, .reg = 0x35 },
+ { .mbps = 400, .reg = 0x05 },
+ { .mbps = 450, .reg = 0x16 },
+ { .mbps = 500, .reg = 0x26 },
+ { .mbps = 550, .reg = 0x37 },
+ { .mbps = 600, .reg = 0x07 },
+ { .mbps = 650, .reg = 0x18 },
+ { .mbps = 700, .reg = 0x28 },
+ { .mbps = 750, .reg = 0x39 },
+ { .mbps = 800, .reg = 0x09 },
+ { .mbps = 850, .reg = 0x19 },
+ { .mbps = 900, .reg = 0x29 },
+ { .mbps = 950, .reg = 0x3a },
+ { .mbps = 1000, .reg = 0x0a },
+ { .mbps = 1050, .reg = 0x1a },
+ { .mbps = 1100, .reg = 0x2a },
+ { .mbps = 1150, .reg = 0x3b },
+ { .mbps = 1200, .reg = 0x0b },
+ { .mbps = 1250, .reg = 0x1b },
+ { .mbps = 1300, .reg = 0x2b },
+ { .mbps = 1350, .reg = 0x3c },
+ { .mbps = 1400, .reg = 0x0c },
+ { .mbps = 1450, .reg = 0x1c },
+ { .mbps = 1500, .reg = 0x2c },
+ { .mbps = 1550, .reg = 0x3d },
+ { .mbps = 1600, .reg = 0x0d },
+ { .mbps = 1650, .reg = 0x1d },
+ { .mbps = 1700, .reg = 0x2e },
+ { .mbps = 1750, .reg = 0x3e },
+ { .mbps = 1800, .reg = 0x0e },
+ { .mbps = 1850, .reg = 0x1e },
+ { .mbps = 1900, .reg = 0x2f },
+ { .mbps = 1950, .reg = 0x3f },
+ { .mbps = 2000, .reg = 0x0f },
+ { .mbps = 2050, .reg = 0x40 },
+ { .mbps = 2100, .reg = 0x41 },
+ { .mbps = 2150, .reg = 0x42 },
+ { .mbps = 2200, .reg = 0x43 },
+ { .mbps = 2300, .reg = 0x45 },
+ { .mbps = 2350, .reg = 0x46 },
+ { .mbps = 2400, .reg = 0x47 },
+ { .mbps = 2450, .reg = 0x48 },
+ { .mbps = 2500, .reg = 0x49 },
+ { /* sentinel */ },
+};
+
static const struct rcsi2_mbps_reg hsfreqrange_h3_v3h_m3n[] = {
{ .mbps = 80, .reg = 0x00 },
{ .mbps = 90, .reg = 0x10 },
@@ -355,6 +452,7 @@ struct rcar_csi2_info {
unsigned int csi0clkfreqrange;
unsigned int num_channels;
bool clear_ulps;
+ bool use_isp;
};
struct rcar_csi2 {
@@ -370,9 +468,8 @@ struct rcar_csi2 {
struct v4l2_subdev *remote;
unsigned int remote_pad;
+ struct mutex lock; /* Protects mf and stream_count. */
struct v4l2_mbus_framefmt mf;
-
- struct mutex lock;
int stream_count;
unsigned short lanes;
@@ -553,6 +650,8 @@ static int rcsi2_start_receiver(struct rcar_csi2 *priv)
/* Code is validated in set_fmt. */
format = rcsi2_code_to_fmt(priv->mf.code);
+ if (!format)
+ return -EINVAL;
/*
* Enable all supported CSI-2 channels with virtual channel and
@@ -609,9 +708,12 @@ static int rcsi2_start_receiver(struct rcar_csi2 *priv)
rcsi2_write(priv, PHTC_REG, 0);
/* Configure */
- rcsi2_write(priv, VCDT_REG, vcdt);
- if (vcdt2)
- rcsi2_write(priv, VCDT2_REG, vcdt2);
+ if (!priv->info->use_isp) {
+ rcsi2_write(priv, VCDT_REG, vcdt);
+ if (vcdt2)
+ rcsi2_write(priv, VCDT2_REG, vcdt2);
+ }
+
/* Lanes are zero indexed. */
rcsi2_write(priv, LSWAP_REG,
LSWAP_L0SEL(priv->lane_swap[0] - 1) |
@@ -636,6 +738,11 @@ static int rcsi2_start_receiver(struct rcar_csi2 *priv)
rcsi2_write(priv, CSI0CLKFCPR_REG,
CSI0CLKFREQRANGE(priv->info->csi0clkfreqrange));
+ if (priv->info->use_isp)
+ rcsi2_write(priv, PHYFRX_REG,
+ PHYFRX_FORCERX_MODE_3 | PHYFRX_FORCERX_MODE_2 |
+ PHYFRX_FORCERX_MODE_1 | PHYFRX_FORCERX_MODE_0);
+
rcsi2_write(priv, PHYCNT_REG, phycnt);
rcsi2_write(priv, LINKCNT_REG, LINKCNT_MONITOR_EN |
LINKCNT_REG_MONI_PACT_EN | LINKCNT_ICLK_NONSTOP);
@@ -647,6 +754,9 @@ static int rcsi2_start_receiver(struct rcar_csi2 *priv)
if (ret)
return ret;
+ if (priv->info->use_isp)
+ rcsi2_write(priv, PHYFRX_REG, 0);
+
/* Run post PHY start initialization, if needed. */
if (priv->info->phy_post_init) {
ret = priv->info->phy_post_init(priv);
@@ -725,6 +835,8 @@ static int rcsi2_set_pad_format(struct v4l2_subdev *sd,
struct rcar_csi2 *priv = sd_to_csi2(sd);
struct v4l2_mbus_framefmt *framefmt;
+ mutex_lock(&priv->lock);
+
if (!rcsi2_code_to_fmt(format->format.code))
format->format.code = rcar_csi2_formats[0].code;
@@ -735,6 +847,8 @@ static int rcsi2_set_pad_format(struct v4l2_subdev *sd,
*framefmt = format->format;
}
+ mutex_unlock(&priv->lock);
+
return 0;
}
@@ -744,11 +858,15 @@ static int rcsi2_get_pad_format(struct v4l2_subdev *sd,
{
struct rcar_csi2 *priv = sd_to_csi2(sd);
+ mutex_lock(&priv->lock);
+
if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE)
format->format = priv->mf;
else
format->format = *v4l2_subdev_get_try_format(sd, sd_state, 0);
+ mutex_unlock(&priv->lock);
+
return 0;
}
@@ -917,19 +1035,18 @@ static int rcsi2_parse_dt(struct rcar_csi2 *priv)
dev_dbg(priv->dev, "Found '%pOF'\n", to_of_node(fwnode));
- v4l2_async_notifier_init(&priv->notifier);
+ v4l2_async_nf_init(&priv->notifier);
priv->notifier.ops = &rcar_csi2_notify_ops;
- asd = v4l2_async_notifier_add_fwnode_subdev(&priv->notifier, fwnode,
- struct v4l2_async_subdev);
+ asd = v4l2_async_nf_add_fwnode(&priv->notifier, fwnode,
+ struct v4l2_async_subdev);
fwnode_handle_put(fwnode);
if (IS_ERR(asd))
return PTR_ERR(asd);
- ret = v4l2_async_subdev_notifier_register(&priv->subdev,
- &priv->notifier);
+ ret = v4l2_async_subdev_nf_register(&priv->subdev, &priv->notifier);
if (ret)
- v4l2_async_notifier_cleanup(&priv->notifier);
+ v4l2_async_nf_cleanup(&priv->notifier);
return ret;
}
@@ -1063,6 +1180,62 @@ static int rcsi2_phy_post_init_v3m_e3(struct rcar_csi2 *priv)
return rcsi2_phtw_write_array(priv, step1);
}
+static int rcsi2_init_phtw_v3u(struct rcar_csi2 *priv,
+ unsigned int mbps)
+{
+ /* In case of 1500Mbps or less */
+ static const struct phtw_value step1[] = {
+ { .data = 0xcc, .code = 0xe2 },
+ { /* sentinel */ },
+ };
+
+ static const struct phtw_value step2[] = {
+ { .data = 0x01, .code = 0xe3 },
+ { .data = 0x11, .code = 0xe4 },
+ { .data = 0x01, .code = 0xe5 },
+ { /* sentinel */ },
+ };
+
+ /* In case of 1500Mbps or less */
+ static const struct phtw_value step3[] = {
+ { .data = 0x38, .code = 0x08 },
+ { /* sentinel */ },
+ };
+
+ static const struct phtw_value step4[] = {
+ { .data = 0x01, .code = 0x00 },
+ { .data = 0x4b, .code = 0xac },
+ { .data = 0x03, .code = 0x00 },
+ { .data = 0x80, .code = 0x07 },
+ { /* sentinel */ },
+ };
+
+ int ret;
+
+ if (mbps != 0 && mbps <= 1500)
+ ret = rcsi2_phtw_write_array(priv, step1);
+ else
+ ret = rcsi2_phtw_write_mbps(priv, mbps, phtw_mbps_v3u, 0xe2);
+ if (ret)
+ return ret;
+
+ ret = rcsi2_phtw_write_array(priv, step2);
+ if (ret)
+ return ret;
+
+ if (mbps != 0 && mbps <= 1500) {
+ ret = rcsi2_phtw_write_array(priv, step3);
+ if (ret)
+ return ret;
+ }
+
+ ret = rcsi2_phtw_write_array(priv, step4);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
/* -----------------------------------------------------------------------------
* Platform Device Driver.
*/
@@ -1074,11 +1247,9 @@ static const struct media_entity_operations rcar_csi2_entity_ops = {
static int rcsi2_probe_resources(struct rcar_csi2 *priv,
struct platform_device *pdev)
{
- struct resource *res;
int irq, ret;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base = devm_ioremap_resource(&pdev->dev, res);
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
@@ -1155,6 +1326,14 @@ static const struct rcar_csi2_info rcar_csi2_info_r8a77990 = {
.num_channels = 2,
};
+static const struct rcar_csi2_info rcar_csi2_info_r8a779a0 = {
+ .init_phtw = rcsi2_init_phtw_v3u,
+ .hsfreqrange = hsfreqrange_v3u,
+ .csi0clkfreqrange = 0x20,
+ .clear_ulps = true,
+ .use_isp = true,
+};
+
static const struct of_device_id rcar_csi2_of_table[] = {
{
.compatible = "renesas,r8a774a1-csi2",
@@ -1200,6 +1379,10 @@ static const struct of_device_id rcar_csi2_of_table[] = {
.compatible = "renesas,r8a77990-csi2",
.data = &rcar_csi2_info_r8a77990,
},
+ {
+ .compatible = "renesas,r8a779a0-csi2",
+ .data = &rcar_csi2_info_r8a779a0,
+ },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, rcar_csi2_of_table);
@@ -1220,7 +1403,7 @@ static int rcsi2_probe(struct platform_device *pdev)
{
const struct soc_device_attribute *attr;
struct rcar_csi2 *priv;
- unsigned int i;
+ unsigned int i, num_pads;
int ret;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
@@ -1245,14 +1428,14 @@ static int rcsi2_probe(struct platform_device *pdev)
ret = rcsi2_probe_resources(priv, pdev);
if (ret) {
dev_err(priv->dev, "Failed to get resources\n");
- return ret;
+ goto error_mutex;
}
platform_set_drvdata(pdev, priv);
ret = rcsi2_parse_dt(priv);
if (ret)
- return ret;
+ goto error_mutex;
priv->subdev.owner = THIS_MODULE;
priv->subdev.dev = &pdev->dev;
@@ -1265,28 +1448,32 @@ static int rcsi2_probe(struct platform_device *pdev)
priv->subdev.entity.function = MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER;
priv->subdev.entity.ops = &rcar_csi2_entity_ops;
+ num_pads = priv->info->use_isp ? 2 : NR_OF_RCAR_CSI2_PAD;
+
priv->pads[RCAR_CSI2_SINK].flags = MEDIA_PAD_FL_SINK;
- for (i = RCAR_CSI2_SOURCE_VC0; i < NR_OF_RCAR_CSI2_PAD; i++)
+ for (i = RCAR_CSI2_SOURCE_VC0; i < num_pads; i++)
priv->pads[i].flags = MEDIA_PAD_FL_SOURCE;
- ret = media_entity_pads_init(&priv->subdev.entity, NR_OF_RCAR_CSI2_PAD,
+ ret = media_entity_pads_init(&priv->subdev.entity, num_pads,
priv->pads);
if (ret)
- goto error;
+ goto error_async;
pm_runtime_enable(&pdev->dev);
ret = v4l2_async_register_subdev(&priv->subdev);
if (ret < 0)
- goto error;
+ goto error_async;
dev_info(priv->dev, "%d lanes found\n", priv->lanes);
return 0;
-error:
- v4l2_async_notifier_unregister(&priv->notifier);
- v4l2_async_notifier_cleanup(&priv->notifier);
+error_async:
+ v4l2_async_nf_unregister(&priv->notifier);
+ v4l2_async_nf_cleanup(&priv->notifier);
+error_mutex:
+ mutex_destroy(&priv->lock);
return ret;
}
@@ -1295,12 +1482,14 @@ static int rcsi2_remove(struct platform_device *pdev)
{
struct rcar_csi2 *priv = platform_get_drvdata(pdev);
- v4l2_async_notifier_unregister(&priv->notifier);
- v4l2_async_notifier_cleanup(&priv->notifier);
+ v4l2_async_nf_unregister(&priv->notifier);
+ v4l2_async_nf_cleanup(&priv->notifier);
v4l2_async_unregister_subdev(&priv->subdev);
pm_runtime_disable(&pdev->dev);
+ mutex_destroy(&priv->lock);
+
return 0;
}
diff --git a/drivers/media/platform/rcar-vin/rcar-dma.c b/drivers/media/platform/rcar-vin/rcar-dma.c
index f5f722ab1d4e..25ead9333d00 100644
--- a/drivers/media/platform/rcar-vin/rcar-dma.c
+++ b/drivers/media/platform/rcar-vin/rcar-dma.c
@@ -114,6 +114,7 @@
/* Video n Data Mode Register bits */
#define VNDMR_A8BIT(n) (((n) & 0xff) << 24)
#define VNDMR_A8BIT_MASK (0xff << 24)
+#define VNDMR_YMODE_Y8 (1 << 12)
#define VNDMR_EXRGB (1 << 8)
#define VNDMR_BPSM (1 << 4)
#define VNDMR_ABIT (1 << 2)
@@ -603,6 +604,7 @@ void rvin_crop_scale_comp(struct rvin_dev *vin)
case V4L2_PIX_FMT_SGBRG8:
case V4L2_PIX_FMT_SGRBG8:
case V4L2_PIX_FMT_SRGGB8:
+ case V4L2_PIX_FMT_GREY:
stride /= 2;
break;
default:
@@ -695,6 +697,7 @@ static int rvin_setup(struct rvin_dev *vin)
case MEDIA_BUS_FMT_SGBRG8_1X8:
case MEDIA_BUS_FMT_SGRBG8_1X8:
case MEDIA_BUS_FMT_SRGGB8_1X8:
+ case MEDIA_BUS_FMT_Y8_1X8:
vnmc |= VNMC_INF_RAW8;
break;
default:
@@ -774,6 +777,14 @@ static int rvin_setup(struct rvin_dev *vin)
case V4L2_PIX_FMT_SRGGB8:
dmr = 0;
break;
+ case V4L2_PIX_FMT_GREY:
+ if (input_is_yuv) {
+ dmr = VNDMR_DTMD_YCSEP | VNDMR_YMODE_Y8;
+ output_is_yuv = true;
+ } else {
+ dmr = 0;
+ }
+ break;
default:
vin_err(vin, "Invalid pixelformat (0x%x)\n",
vin->format.pixelformat);
@@ -783,16 +794,18 @@ static int rvin_setup(struct rvin_dev *vin)
/* Always update on field change */
vnmc |= VNMC_VUP;
- /* If input and output use the same colorspace, use bypass mode */
- if (input_is_yuv == output_is_yuv)
- vnmc |= VNMC_BPS;
-
- if (vin->info->model == RCAR_GEN3) {
- /* Select between CSI-2 and parallel input */
- if (vin->is_csi)
- vnmc &= ~VNMC_DPINE;
- else
- vnmc |= VNMC_DPINE;
+ if (!vin->info->use_isp) {
+ /* If input and output use the same colorspace, use bypass mode */
+ if (input_is_yuv == output_is_yuv)
+ vnmc |= VNMC_BPS;
+
+ if (vin->info->model == RCAR_GEN3) {
+ /* Select between CSI-2 and parallel input */
+ if (vin->is_csi)
+ vnmc &= ~VNMC_DPINE;
+ else
+ vnmc |= VNMC_DPINE;
+ }
}
/* Progressive or interlaced mode */
@@ -904,7 +917,8 @@ static void rvin_fill_hw_slot(struct rvin_dev *vin, int slot)
vin->format.sizeimage / 2;
break;
}
- } else if (vin->state != RUNNING || list_empty(&vin->buf_list)) {
+ } else if ((vin->state != STOPPED && vin->state != RUNNING) ||
+ list_empty(&vin->buf_list)) {
vin->buf_hw[slot].buffer = NULL;
vin->buf_hw[slot].type = FULL;
phys_addr = vin->scratch_phys;
@@ -1145,6 +1159,10 @@ static int rvin_mc_validate_format(struct rvin_dev *vin, struct v4l2_subdev *sd,
if (vin->format.pixelformat != V4L2_PIX_FMT_SRGGB8)
return -EPIPE;
break;
+ case MEDIA_BUS_FMT_Y8_1X8:
+ if (vin->format.pixelformat != V4L2_PIX_FMT_GREY)
+ return -EPIPE;
+ break;
default:
return -EPIPE;
}
diff --git a/drivers/media/platform/rcar-vin/rcar-v4l2.c b/drivers/media/platform/rcar-vin/rcar-v4l2.c
index 0d141155f0e3..a5bfa76fdac6 100644
--- a/drivers/media/platform/rcar-vin/rcar-v4l2.c
+++ b/drivers/media/platform/rcar-vin/rcar-v4l2.c
@@ -82,6 +82,10 @@ static const struct rvin_video_format rvin_formats[] = {
.fourcc = V4L2_PIX_FMT_SRGGB8,
.bpp = 1,
},
+ {
+ .fourcc = V4L2_PIX_FMT_GREY,
+ .bpp = 1,
+ },
};
const struct rvin_video_format *rvin_format_from_pixel(struct rvin_dev *vin,
@@ -523,6 +527,24 @@ static int rvin_s_selection(struct file *file, void *fh,
return 0;
}
+static int rvin_g_parm(struct file *file, void *priv,
+ struct v4l2_streamparm *parm)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+ struct v4l2_subdev *sd = vin_to_source(vin);
+
+ return v4l2_g_parm_cap(&vin->vdev, sd, parm);
+}
+
+static int rvin_s_parm(struct file *file, void *priv,
+ struct v4l2_streamparm *parm)
+{
+ struct rvin_dev *vin = video_drvdata(file);
+ struct v4l2_subdev *sd = vin_to_source(vin);
+
+ return v4l2_s_parm_cap(&vin->vdev, sd, parm);
+}
+
static int rvin_g_pixelaspect(struct file *file, void *priv,
int type, struct v4l2_fract *f)
{
@@ -739,6 +761,9 @@ static const struct v4l2_ioctl_ops rvin_ioctl_ops = {
.vidioc_g_selection = rvin_g_selection,
.vidioc_s_selection = rvin_s_selection,
+ .vidioc_g_parm = rvin_g_parm,
+ .vidioc_s_parm = rvin_s_parm,
+
.vidioc_g_pixelaspect = rvin_g_pixelaspect,
.vidioc_enum_input = rvin_enum_input,
diff --git a/drivers/media/platform/rcar-vin/rcar-vin.h b/drivers/media/platform/rcar-vin/rcar-vin.h
index b263ead4db2b..6c06320174a2 100644
--- a/drivers/media/platform/rcar-vin/rcar-vin.h
+++ b/drivers/media/platform/rcar-vin/rcar-vin.h
@@ -29,7 +29,7 @@
#define HW_BUFFER_MASK 0x7f
/* Max number on VIN instances that can be in a system */
-#define RCAR_VIN_NUM 8
+#define RCAR_VIN_NUM 32
struct rvin_group;
@@ -48,6 +48,18 @@ enum rvin_csi_id {
RVIN_CSI_MAX,
};
+enum rvin_isp_id {
+ RVIN_ISP0,
+ RVIN_ISP1,
+ RVIN_ISP2,
+ RVIN_ISP4,
+ RVIN_ISP_MAX,
+};
+
+#define RVIN_REMOTES_MAX \
+ (((unsigned int)RVIN_CSI_MAX) > ((unsigned int)RVIN_ISP_MAX) ? \
+ RVIN_CSI_MAX : RVIN_ISP_MAX)
+
/**
* enum rvin_dma_state - DMA states
* @STOPPED: No operation in progress
@@ -147,6 +159,7 @@ struct rvin_group_route {
* struct rvin_info - Information about the particular VIN implementation
* @model: VIN model
* @use_mc: use media controller instead of controlling subdevice
+ * @use_isp: the VIN is connected to the ISP and not to the CSI-2
* @nv12: support outputing NV12 pixel format
* @max_width: max input width the VIN supports
* @max_height: max input height the VIN supports
@@ -156,6 +169,7 @@ struct rvin_group_route {
struct rvin_info {
enum model_id model;
bool use_mc;
+ bool use_isp;
bool nv12;
unsigned int max_width;
@@ -267,8 +281,9 @@ struct rvin_dev {
* @count: number of enabled VIN instances found in DT
* @notifier: group notifier for CSI-2 async subdevices
* @vin: VIN instances which are part of the group
- * @csi: array of pairs of fwnode and subdev pointers
- * to all CSI-2 subdevices.
+ * @link_setup: Callback to create all links for the media graph
+ * @remotes: array of pairs of fwnode and subdev pointers
+ * to all remote subdevices.
*/
struct rvin_group {
struct kref refcount;
@@ -280,10 +295,12 @@ struct rvin_group {
struct v4l2_async_notifier notifier;
struct rvin_dev *vin[RCAR_VIN_NUM];
+ int (*link_setup)(struct rvin_dev *vin);
+
struct {
struct v4l2_async_subdev *asd;
struct v4l2_subdev *subdev;
- } csi[RVIN_CSI_MAX];
+ } remotes[RVIN_REMOTES_MAX];
};
int rvin_dma_register(struct rvin_dev *vin, int irq);
diff --git a/drivers/media/platform/rcar_drif.c b/drivers/media/platform/rcar_drif.c
index 1e3b68a8743a..9a0982fa5c6b 100644
--- a/drivers/media/platform/rcar_drif.c
+++ b/drivers/media/platform/rcar_drif.c
@@ -1212,7 +1212,7 @@ static int rcar_drif_parse_subdevs(struct rcar_drif_sdr *sdr)
struct fwnode_handle *fwnode, *ep;
struct v4l2_async_subdev *asd;
- v4l2_async_notifier_init(notifier);
+ v4l2_async_nf_init(notifier);
ep = fwnode_graph_get_next_endpoint(of_fwnode_handle(sdr->dev->of_node),
NULL);
@@ -1229,8 +1229,8 @@ static int rcar_drif_parse_subdevs(struct rcar_drif_sdr *sdr)
return -EINVAL;
}
- asd = v4l2_async_notifier_add_fwnode_subdev(notifier, fwnode,
- struct v4l2_async_subdev);
+ asd = v4l2_async_nf_add_fwnode(notifier, fwnode,
+ struct v4l2_async_subdev);
fwnode_handle_put(fwnode);
if (IS_ERR(asd))
return PTR_ERR(asd);
@@ -1346,7 +1346,7 @@ static int rcar_drif_sdr_probe(struct rcar_drif_sdr *sdr)
sdr->notifier.ops = &rcar_drif_notify_ops;
/* Register notifier */
- ret = v4l2_async_notifier_register(&sdr->v4l2_dev, &sdr->notifier);
+ ret = v4l2_async_nf_register(&sdr->v4l2_dev, &sdr->notifier);
if (ret < 0) {
dev_err(sdr->dev, "failed: notifier register ret %d\n", ret);
goto cleanup;
@@ -1355,7 +1355,7 @@ static int rcar_drif_sdr_probe(struct rcar_drif_sdr *sdr)
return ret;
cleanup:
- v4l2_async_notifier_cleanup(&sdr->notifier);
+ v4l2_async_nf_cleanup(&sdr->notifier);
error:
v4l2_device_unregister(&sdr->v4l2_dev);
@@ -1365,8 +1365,8 @@ error:
/* V4L2 SDR device remove */
static void rcar_drif_sdr_remove(struct rcar_drif_sdr *sdr)
{
- v4l2_async_notifier_unregister(&sdr->notifier);
- v4l2_async_notifier_cleanup(&sdr->notifier);
+ v4l2_async_nf_unregister(&sdr->notifier);
+ v4l2_async_nf_cleanup(&sdr->notifier);
v4l2_device_unregister(&sdr->v4l2_dev);
}
@@ -1395,8 +1395,7 @@ static int rcar_drif_probe(struct platform_device *pdev)
}
/* Register map */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ch->base = devm_ioremap_resource(&pdev->dev, res);
+ ch->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(ch->base))
return PTR_ERR(ch->base);
diff --git a/drivers/media/platform/rcar_fdp1.c b/drivers/media/platform/rcar_fdp1.c
index 89aac60066d9..19de3c19bcca 100644
--- a/drivers/media/platform/rcar_fdp1.c
+++ b/drivers/media/platform/rcar_fdp1.c
@@ -2256,7 +2256,6 @@ static int fdp1_probe(struct platform_device *pdev)
struct fdp1_dev *fdp1;
struct video_device *vfd;
struct device_node *fcp_node;
- struct resource *res;
struct clk *clk;
unsigned int i;
@@ -2283,8 +2282,7 @@ static int fdp1_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, fdp1);
/* Memory-mapped registers */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- fdp1->regs = devm_ioremap_resource(&pdev->dev, res);
+ fdp1->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(fdp1->regs))
return PTR_ERR(fdp1->regs);
diff --git a/drivers/media/platform/rcar_jpu.c b/drivers/media/platform/rcar_jpu.c
index f57158bf2b11..56bb464629ed 100644
--- a/drivers/media/platform/rcar_jpu.c
+++ b/drivers/media/platform/rcar_jpu.c
@@ -1590,7 +1590,6 @@ MODULE_DEVICE_TABLE(of, jpu_dt_ids);
static int jpu_probe(struct platform_device *pdev)
{
struct jpu *jpu;
- struct resource *res;
int ret;
unsigned int i;
@@ -1603,8 +1602,7 @@ static int jpu_probe(struct platform_device *pdev)
jpu->dev = &pdev->dev;
/* memory-mapped registers */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- jpu->regs = devm_ioremap_resource(&pdev->dev, res);
+ jpu->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(jpu->regs))
return PTR_ERR(jpu->regs);
diff --git a/drivers/media/platform/renesas-ceu.c b/drivers/media/platform/renesas-ceu.c
index f432032c7084..2e8dbacc414e 100644
--- a/drivers/media/platform/renesas-ceu.c
+++ b/drivers/media/platform/renesas-ceu.c
@@ -1513,12 +1513,12 @@ static int ceu_parse_platform_data(struct ceu_device *ceudev,
/* Setup the ceu subdevice and the async subdevice. */
async_sd = &pdata->subdevs[i];
- ceu_sd = v4l2_async_notifier_add_i2c_subdev(&ceudev->notifier,
- async_sd->i2c_adapter_id,
- async_sd->i2c_address,
- struct ceu_subdev);
+ ceu_sd = v4l2_async_nf_add_i2c(&ceudev->notifier,
+ async_sd->i2c_adapter_id,
+ async_sd->i2c_address,
+ struct ceu_subdev);
if (IS_ERR(ceu_sd)) {
- v4l2_async_notifier_cleanup(&ceudev->notifier);
+ v4l2_async_nf_cleanup(&ceudev->notifier);
return PTR_ERR(ceu_sd);
}
ceu_sd->mbus_flags = async_sd->flags;
@@ -1576,9 +1576,9 @@ static int ceu_parse_dt(struct ceu_device *ceudev)
}
/* Setup the ceu subdevice and the async subdevice. */
- ceu_sd = v4l2_async_notifier_add_fwnode_remote_subdev(
- &ceudev->notifier, of_fwnode_handle(ep),
- struct ceu_subdev);
+ ceu_sd = v4l2_async_nf_add_fwnode_remote(&ceudev->notifier,
+ of_fwnode_handle(ep),
+ struct ceu_subdev);
if (IS_ERR(ceu_sd)) {
ret = PTR_ERR(ceu_sd);
goto error_cleanup;
@@ -1592,7 +1592,7 @@ static int ceu_parse_dt(struct ceu_device *ceudev)
return num_ep;
error_cleanup:
- v4l2_async_notifier_cleanup(&ceudev->notifier);
+ v4l2_async_nf_cleanup(&ceudev->notifier);
of_node_put(ep);
return ret;
}
@@ -1628,7 +1628,6 @@ static int ceu_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
const struct ceu_data *ceu_data;
struct ceu_device *ceudev;
- struct resource *res;
unsigned int irq;
int num_subdevs;
int ret;
@@ -1644,8 +1643,7 @@ static int ceu_probe(struct platform_device *pdev)
spin_lock_init(&ceudev->lock);
mutex_init(&ceudev->mlock);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ceudev->base = devm_ioremap_resource(dev, res);
+ ceudev->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ceudev->base)) {
ret = PTR_ERR(ceudev->base);
goto error_free_ceudev;
@@ -1669,7 +1667,7 @@ static int ceu_probe(struct platform_device *pdev)
if (ret)
goto error_pm_disable;
- v4l2_async_notifier_init(&ceudev->notifier);
+ v4l2_async_nf_init(&ceudev->notifier);
if (IS_ENABLED(CONFIG_OF) && dev->of_node) {
ceu_data = of_device_get_match_data(dev);
@@ -1691,8 +1689,7 @@ static int ceu_probe(struct platform_device *pdev)
ceudev->notifier.v4l2_dev = &ceudev->v4l2_dev;
ceudev->notifier.ops = &ceu_notify_ops;
- ret = v4l2_async_notifier_register(&ceudev->v4l2_dev,
- &ceudev->notifier);
+ ret = v4l2_async_nf_register(&ceudev->v4l2_dev, &ceudev->notifier);
if (ret)
goto error_cleanup;
@@ -1701,7 +1698,7 @@ static int ceu_probe(struct platform_device *pdev)
return 0;
error_cleanup:
- v4l2_async_notifier_cleanup(&ceudev->notifier);
+ v4l2_async_nf_cleanup(&ceudev->notifier);
error_v4l2_unregister:
v4l2_device_unregister(&ceudev->v4l2_dev);
error_pm_disable:
@@ -1718,9 +1715,9 @@ static int ceu_remove(struct platform_device *pdev)
pm_runtime_disable(ceudev->dev);
- v4l2_async_notifier_unregister(&ceudev->notifier);
+ v4l2_async_nf_unregister(&ceudev->notifier);
- v4l2_async_notifier_cleanup(&ceudev->notifier);
+ v4l2_async_nf_cleanup(&ceudev->notifier);
v4l2_device_unregister(&ceudev->v4l2_dev);
diff --git a/drivers/media/platform/rockchip/rga/rga.c b/drivers/media/platform/rockchip/rga/rga.c
index 6759091b15e0..4de5e8d2b261 100644
--- a/drivers/media/platform/rockchip/rga/rga.c
+++ b/drivers/media/platform/rockchip/rga/rga.c
@@ -800,7 +800,6 @@ static int rga_probe(struct platform_device *pdev)
{
struct rockchip_rga *rga;
struct video_device *vfd;
- struct resource *res;
int ret = 0;
int irq;
@@ -821,9 +820,7 @@ static int rga_probe(struct platform_device *pdev)
pm_runtime_enable(rga->dev);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- rga->regs = devm_ioremap_resource(rga->dev, res);
+ rga->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rga->regs)) {
ret = PTR_ERR(rga->regs);
goto err_put_clk;
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
index 41988eb0ec0a..768987d5f2dd 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c
@@ -685,12 +685,17 @@ static void rkisp1_handle_buffer(struct rkisp1_capture *cap)
spin_unlock(&cap->buf.lock);
}
-void rkisp1_capture_isr(struct rkisp1_device *rkisp1)
+irqreturn_t rkisp1_capture_isr(int irq, void *ctx)
{
+ struct device *dev = ctx;
+ struct rkisp1_device *rkisp1 = dev_get_drvdata(dev);
unsigned int i;
u32 status;
status = rkisp1_read(rkisp1, RKISP1_CIF_MI_MIS);
+ if (!status)
+ return IRQ_NONE;
+
rkisp1_write(rkisp1, status, RKISP1_CIF_MI_ICR);
for (i = 0; i < ARRAY_SIZE(rkisp1->capture_devs); ++i) {
@@ -718,6 +723,8 @@ void rkisp1_capture_isr(struct rkisp1_device *rkisp1)
cap->is_streaming = false;
wake_up(&cap->done);
}
+
+ return IRQ_HANDLED;
}
/* ----------------------------------------------------------------------------
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h b/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h
index bb73f4e17b66..d8fa3f1a5a85 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h
@@ -12,6 +12,7 @@
#define _RKISP1_COMMON_H
#include <linux/clk.h>
+#include <linux/interrupt.h>
#include <linux/mutex.h>
#include <linux/rkisp1-config.h>
#include <media/media-device.h>
@@ -231,6 +232,16 @@ struct rkisp1_capture {
} pix;
};
+struct rkisp1_stats;
+struct rkisp1_stats_ops {
+ void (*get_awb_meas)(struct rkisp1_stats *stats,
+ struct rkisp1_stat_buffer *pbuf);
+ void (*get_aec_meas)(struct rkisp1_stats *stats,
+ struct rkisp1_stat_buffer *pbuf);
+ void (*get_hst_meas)(struct rkisp1_stats *stats,
+ struct rkisp1_stat_buffer *pbuf);
+};
+
/*
* struct rkisp1_stats - ISP Statistics device
*
@@ -243,17 +254,42 @@ struct rkisp1_capture {
struct rkisp1_stats {
struct rkisp1_vdev_node vnode;
struct rkisp1_device *rkisp1;
+ const struct rkisp1_stats_ops *ops;
spinlock_t lock; /* locks the buffers list 'stats' */
struct list_head stat;
struct v4l2_format vdev_fmt;
};
+struct rkisp1_params;
+struct rkisp1_params_ops {
+ void (*lsc_matrix_config)(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_lsc_config *pconfig);
+ void (*goc_config)(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_goc_config *arg);
+ void (*awb_meas_config)(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_awb_meas_config *arg);
+ void (*awb_meas_enable)(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_awb_meas_config *arg,
+ bool en);
+ void (*awb_gain_config)(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_awb_gain_config *arg);
+ void (*aec_config)(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_aec_config *arg);
+ void (*hst_config)(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_hst_config *arg);
+ void (*hst_enable)(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_hst_config *arg, bool en);
+ void (*afm_config)(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_afc_config *arg);
+};
+
/*
* struct rkisp1_params - ISP input parameters device
*
* @vnode: video node
* @rkisp1: pointer to the rkisp1 device
+ * @ops: pointer to the variant-specific operations
* @config_lock: locks the buffer list 'params'
* @params: queue of rkisp1_buffer
* @vdev_fmt: v4l2_format of the metadata format
@@ -263,6 +299,7 @@ struct rkisp1_stats {
struct rkisp1_params {
struct rkisp1_vdev_node vnode;
struct rkisp1_device *rkisp1;
+ const struct rkisp1_params_ops *ops;
spinlock_t config_lock; /* locks the buffers list 'params' */
struct list_head params;
@@ -348,7 +385,6 @@ struct rkisp1_debug {
*/
struct rkisp1_device {
void __iomem *base_addr;
- int irq;
struct device *dev;
unsigned int clk_size;
struct clk_bulk_data clks[RKISP1_MAX_BUS_CLK];
@@ -456,9 +492,9 @@ void rkisp1_params_configure(struct rkisp1_params *params,
void rkisp1_params_disable(struct rkisp1_params *params);
/* irq handlers */
-void rkisp1_isp_isr(struct rkisp1_device *rkisp1);
-void rkisp1_mipi_isr(struct rkisp1_device *rkisp1);
-void rkisp1_capture_isr(struct rkisp1_device *rkisp1);
+irqreturn_t rkisp1_isp_isr(int irq, void *ctx);
+irqreturn_t rkisp1_mipi_isr(int irq, void *ctx);
+irqreturn_t rkisp1_capture_isr(int irq, void *ctx);
void rkisp1_stats_isr(struct rkisp1_stats *stats, u32 isp_ris);
void rkisp1_params_isr(struct rkisp1_device *rkisp1);
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
index 7474150b94ed..50b166c49a03 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c
@@ -101,9 +101,16 @@
* +-----------+ +-----------+
*/
+struct rkisp1_isr_data {
+ const char *name;
+ irqreturn_t (*isr)(int irq, void *ctx);
+};
+
struct rkisp1_match_data {
const char * const *clks;
- unsigned int size;
+ unsigned int clk_size;
+ const struct rkisp1_isr_data *isrs;
+ unsigned int isr_size;
enum rkisp1_cif_isp_version isp_ver;
};
@@ -246,7 +253,7 @@ static int rkisp1_subdev_notifier(struct rkisp1_device *rkisp1)
unsigned int next_id = 0;
int ret;
- v4l2_async_notifier_init(ntf);
+ v4l2_async_nf_init(ntf);
while (1) {
struct v4l2_fwnode_endpoint vep = {
@@ -265,8 +272,9 @@ static int rkisp1_subdev_notifier(struct rkisp1_device *rkisp1)
if (ret)
goto err_parse;
- rk_asd = v4l2_async_notifier_add_fwnode_remote_subdev(ntf, ep,
- struct rkisp1_sensor_async);
+ rk_asd = v4l2_async_nf_add_fwnode_remote(ntf, ep,
+ struct
+ rkisp1_sensor_async);
if (IS_ERR(rk_asd)) {
ret = PTR_ERR(rk_asd);
goto err_parse;
@@ -286,16 +294,16 @@ static int rkisp1_subdev_notifier(struct rkisp1_device *rkisp1)
continue;
err_parse:
fwnode_handle_put(ep);
- v4l2_async_notifier_cleanup(ntf);
+ v4l2_async_nf_cleanup(ntf);
return ret;
}
if (next_id == 0)
dev_dbg(rkisp1->dev, "no remote subdevice found\n");
ntf->ops = &rkisp1_subdev_notifier_ops;
- ret = v4l2_async_notifier_register(&rkisp1->v4l2_dev, ntf);
+ ret = v4l2_async_nf_register(&rkisp1->v4l2_dev, ntf);
if (ret) {
- v4l2_async_notifier_cleanup(ntf);
+ v4l2_async_nf_cleanup(ntf);
return ret;
}
return 0;
@@ -385,36 +393,64 @@ err_unreg_isp_subdev:
static irqreturn_t rkisp1_isr(int irq, void *ctx)
{
- struct device *dev = ctx;
- struct rkisp1_device *rkisp1 = dev_get_drvdata(dev);
-
/*
* Call rkisp1_capture_isr() first to handle the frame that
* potentially completed using the current frame_sequence number before
* it is potentially incremented by rkisp1_isp_isr() in the vertical
* sync.
*/
- rkisp1_capture_isr(rkisp1);
- rkisp1_isp_isr(rkisp1);
- rkisp1_mipi_isr(rkisp1);
+ rkisp1_capture_isr(irq, ctx);
+ rkisp1_isp_isr(irq, ctx);
+ rkisp1_mipi_isr(irq, ctx);
return IRQ_HANDLED;
}
+static const char * const px30_isp_clks[] = {
+ "isp",
+ "aclk",
+ "hclk",
+ "pclk",
+};
+
+static const struct rkisp1_isr_data px30_isp_isrs[] = {
+ { "isp", rkisp1_isp_isr },
+ { "mi", rkisp1_capture_isr },
+ { "mipi", rkisp1_mipi_isr },
+};
+
+static const struct rkisp1_match_data px30_isp_match_data = {
+ .clks = px30_isp_clks,
+ .clk_size = ARRAY_SIZE(px30_isp_clks),
+ .isrs = px30_isp_isrs,
+ .isr_size = ARRAY_SIZE(px30_isp_isrs),
+ .isp_ver = RKISP1_V12,
+};
+
static const char * const rk3399_isp_clks[] = {
"isp",
"aclk",
"hclk",
};
+static const struct rkisp1_isr_data rk3399_isp_isrs[] = {
+ { NULL, rkisp1_isr },
+};
+
static const struct rkisp1_match_data rk3399_isp_match_data = {
.clks = rk3399_isp_clks,
- .size = ARRAY_SIZE(rk3399_isp_clks),
+ .clk_size = ARRAY_SIZE(rk3399_isp_clks),
+ .isrs = rk3399_isp_isrs,
+ .isr_size = ARRAY_SIZE(rk3399_isp_isrs),
.isp_ver = RKISP1_V10,
};
static const struct of_device_id rkisp1_of_match[] = {
{
+ .compatible = "rockchip,px30-cif-isp",
+ .data = &px30_isp_match_data,
+ },
+ {
.compatible = "rockchip,rk3399-cif-isp",
.data = &rk3399_isp_match_data,
},
@@ -478,25 +514,27 @@ static int rkisp1_probe(struct platform_device *pdev)
if (IS_ERR(rkisp1->base_addr))
return PTR_ERR(rkisp1->base_addr);
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return irq;
-
- ret = devm_request_irq(dev, irq, rkisp1_isr, IRQF_SHARED,
- dev_driver_string(dev), dev);
- if (ret) {
- dev_err(dev, "request irq failed: %d\n", ret);
- return ret;
+ for (i = 0; i < match_data->isr_size; i++) {
+ irq = (match_data->isrs[i].name) ?
+ platform_get_irq_byname(pdev, match_data->isrs[i].name) :
+ platform_get_irq(pdev, i);
+ if (irq < 0)
+ return irq;
+
+ ret = devm_request_irq(dev, irq, match_data->isrs[i].isr, IRQF_SHARED,
+ dev_driver_string(dev), dev);
+ if (ret) {
+ dev_err(dev, "request irq failed: %d\n", ret);
+ return ret;
+ }
}
- rkisp1->irq = irq;
-
- for (i = 0; i < match_data->size; i++)
+ for (i = 0; i < match_data->clk_size; i++)
rkisp1->clks[i].id = match_data->clks[i];
- ret = devm_clk_bulk_get(dev, match_data->size, rkisp1->clks);
+ ret = devm_clk_bulk_get(dev, match_data->clk_size, rkisp1->clks);
if (ret)
return ret;
- rkisp1->clk_size = match_data->size;
+ rkisp1->clk_size = match_data->clk_size;
pm_runtime_enable(&pdev->dev);
@@ -542,8 +580,8 @@ static int rkisp1_remove(struct platform_device *pdev)
{
struct rkisp1_device *rkisp1 = platform_get_drvdata(pdev);
- v4l2_async_notifier_unregister(&rkisp1->notifier);
- v4l2_async_notifier_cleanup(&rkisp1->notifier);
+ v4l2_async_nf_unregister(&rkisp1->notifier);
+ v4l2_async_nf_cleanup(&rkisp1->notifier);
rkisp1_params_unregister(rkisp1);
rkisp1_stats_unregister(rkisp1);
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c
index d596bc040005..2a35bf24e54e 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c
@@ -414,6 +414,10 @@ static int rkisp1_config_mipi(struct rkisp1_device *rkisp1)
rkisp1_write(rkisp1, mipi_ctrl, RKISP1_CIF_MIPI_CTRL);
+ /* V12 could also use a newer csi2-host, but we don't want that yet */
+ if (rkisp1->media_dev.hw_revision == RKISP1_V12)
+ rkisp1_write(rkisp1, 0, RKISP1_CIF_ISP_CSI0_CTRL0);
+
/* Configure Data Type and Virtual Channel */
rkisp1_write(rkisp1,
RKISP1_CIF_MIPI_DATA_SEL_DT(sink_fmt->mipi_dt) |
@@ -533,6 +537,15 @@ static void rkisp1_config_clk(struct rkisp1_device *rkisp1)
RKISP1_CIF_ICCL_DCROP_CLK;
rkisp1_write(rkisp1, val, RKISP1_CIF_ICCL);
+
+ /* ensure sp and mp can run at the same time in V12 */
+ if (rkisp1->media_dev.hw_revision == RKISP1_V12) {
+ val = RKISP1_CIF_CLK_CTRL_MI_Y12 | RKISP1_CIF_CLK_CTRL_MI_SP |
+ RKISP1_CIF_CLK_CTRL_MI_RAW0 | RKISP1_CIF_CLK_CTRL_MI_RAW1 |
+ RKISP1_CIF_CLK_CTRL_MI_READ | RKISP1_CIF_CLK_CTRL_MI_RAWRD |
+ RKISP1_CIF_CLK_CTRL_CP | RKISP1_CIF_CLK_CTRL_IE;
+ rkisp1_write(rkisp1, val, RKISP1_CIF_VI_ISP_CLK_CTRL_V12);
+ }
}
static void rkisp1_isp_start(struct rkisp1_device *rkisp1)
@@ -1106,13 +1119,15 @@ void rkisp1_isp_unregister(struct rkisp1_device *rkisp1)
* Interrupt handlers
*/
-void rkisp1_mipi_isr(struct rkisp1_device *rkisp1)
+irqreturn_t rkisp1_mipi_isr(int irq, void *ctx)
{
+ struct device *dev = ctx;
+ struct rkisp1_device *rkisp1 = dev_get_drvdata(dev);
u32 val, status;
status = rkisp1_read(rkisp1, RKISP1_CIF_MIPI_MIS);
if (!status)
- return;
+ return IRQ_NONE;
rkisp1_write(rkisp1, status, RKISP1_CIF_MIPI_ICR);
@@ -1147,6 +1162,8 @@ void rkisp1_mipi_isr(struct rkisp1_device *rkisp1)
} else {
rkisp1->debug.mipi_error++;
}
+
+ return IRQ_HANDLED;
}
static void rkisp1_isp_queue_event_sof(struct rkisp1_isp *isp)
@@ -1159,13 +1176,15 @@ static void rkisp1_isp_queue_event_sof(struct rkisp1_isp *isp)
v4l2_event_queue(isp->sd.devnode, &event);
}
-void rkisp1_isp_isr(struct rkisp1_device *rkisp1)
+irqreturn_t rkisp1_isp_isr(int irq, void *ctx)
{
+ struct device *dev = ctx;
+ struct rkisp1_device *rkisp1 = dev_get_drvdata(dev);
u32 status, isp_err;
status = rkisp1_read(rkisp1, RKISP1_CIF_ISP_MIS);
if (!status)
- return;
+ return IRQ_NONE;
rkisp1_write(rkisp1, status, RKISP1_CIF_ISP_ICR);
@@ -1207,4 +1226,6 @@ void rkisp1_isp_isr(struct rkisp1_device *rkisp1)
*/
rkisp1_params_isr(rkisp1);
}
+
+ return IRQ_HANDLED;
}
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c
index 8fa5b0abf1f9..8f62f09e635f 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c
@@ -185,8 +185,8 @@ static void rkisp1_bls_config(struct rkisp1_params *params,
/* ISP LS correction interface function */
static void
-rkisp1_lsc_correct_matrix_config(struct rkisp1_params *params,
- const struct rkisp1_cif_isp_lsc_config *pconfig)
+rkisp1_lsc_matrix_config_v10(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_lsc_config *pconfig)
{
unsigned int isp_lsc_status, sram_addr, isp_lsc_table_sel, i, j, data;
@@ -212,39 +212,111 @@ rkisp1_lsc_correct_matrix_config(struct rkisp1_params *params,
* DWORDs (2nd value of last DWORD unused)
*/
for (j = 0; j < RKISP1_CIF_ISP_LSC_SAMPLES_MAX - 1; j += 2) {
- data = RKISP1_CIF_ISP_LSC_TABLE_DATA(pconfig->r_data_tbl[i][j],
- pconfig->r_data_tbl[i][j + 1]);
+ data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(pconfig->r_data_tbl[i][j],
+ pconfig->r_data_tbl[i][j + 1]);
rkisp1_write(params->rkisp1, data,
RKISP1_CIF_ISP_LSC_R_TABLE_DATA);
- data = RKISP1_CIF_ISP_LSC_TABLE_DATA(pconfig->gr_data_tbl[i][j],
- pconfig->gr_data_tbl[i][j + 1]);
+ data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(pconfig->gr_data_tbl[i][j],
+ pconfig->gr_data_tbl[i][j + 1]);
rkisp1_write(params->rkisp1, data,
RKISP1_CIF_ISP_LSC_GR_TABLE_DATA);
- data = RKISP1_CIF_ISP_LSC_TABLE_DATA(pconfig->gb_data_tbl[i][j],
- pconfig->gb_data_tbl[i][j + 1]);
+ data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(pconfig->gb_data_tbl[i][j],
+ pconfig->gb_data_tbl[i][j + 1]);
rkisp1_write(params->rkisp1, data,
RKISP1_CIF_ISP_LSC_GB_TABLE_DATA);
- data = RKISP1_CIF_ISP_LSC_TABLE_DATA(pconfig->b_data_tbl[i][j],
- pconfig->b_data_tbl[i][j + 1]);
+ data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(pconfig->b_data_tbl[i][j],
+ pconfig->b_data_tbl[i][j + 1]);
rkisp1_write(params->rkisp1, data,
RKISP1_CIF_ISP_LSC_B_TABLE_DATA);
}
- data = RKISP1_CIF_ISP_LSC_TABLE_DATA(pconfig->r_data_tbl[i][j], 0);
+ data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(pconfig->r_data_tbl[i][j], 0);
rkisp1_write(params->rkisp1, data,
RKISP1_CIF_ISP_LSC_R_TABLE_DATA);
- data = RKISP1_CIF_ISP_LSC_TABLE_DATA(pconfig->gr_data_tbl[i][j], 0);
+ data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(pconfig->gr_data_tbl[i][j], 0);
rkisp1_write(params->rkisp1, data,
RKISP1_CIF_ISP_LSC_GR_TABLE_DATA);
- data = RKISP1_CIF_ISP_LSC_TABLE_DATA(pconfig->gb_data_tbl[i][j], 0);
+ data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(pconfig->gb_data_tbl[i][j], 0);
rkisp1_write(params->rkisp1, data,
RKISP1_CIF_ISP_LSC_GB_TABLE_DATA);
- data = RKISP1_CIF_ISP_LSC_TABLE_DATA(pconfig->b_data_tbl[i][j], 0);
+ data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(pconfig->b_data_tbl[i][j], 0);
+ rkisp1_write(params->rkisp1, data,
+ RKISP1_CIF_ISP_LSC_B_TABLE_DATA);
+ }
+ isp_lsc_table_sel = (isp_lsc_status & RKISP1_CIF_ISP_LSC_ACTIVE_TABLE) ?
+ RKISP1_CIF_ISP_LSC_TABLE_0 :
+ RKISP1_CIF_ISP_LSC_TABLE_1;
+ rkisp1_write(params->rkisp1, isp_lsc_table_sel,
+ RKISP1_CIF_ISP_LSC_TABLE_SEL);
+}
+
+static void
+rkisp1_lsc_matrix_config_v12(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_lsc_config *pconfig)
+{
+ unsigned int isp_lsc_status, sram_addr, isp_lsc_table_sel, i, j, data;
+
+ isp_lsc_status = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_LSC_STATUS);
+
+ /* RKISP1_CIF_ISP_LSC_TABLE_ADDRESS_153 = ( 17 * 18 ) >> 1 */
+ sram_addr = (isp_lsc_status & RKISP1_CIF_ISP_LSC_ACTIVE_TABLE) ?
+ RKISP1_CIF_ISP_LSC_TABLE_ADDRESS_0 :
+ RKISP1_CIF_ISP_LSC_TABLE_ADDRESS_153;
+ rkisp1_write(params->rkisp1, sram_addr, RKISP1_CIF_ISP_LSC_R_TABLE_ADDR);
+ rkisp1_write(params->rkisp1, sram_addr, RKISP1_CIF_ISP_LSC_GR_TABLE_ADDR);
+ rkisp1_write(params->rkisp1, sram_addr, RKISP1_CIF_ISP_LSC_GB_TABLE_ADDR);
+ rkisp1_write(params->rkisp1, sram_addr, RKISP1_CIF_ISP_LSC_B_TABLE_ADDR);
+
+ /* program data tables (table size is 9 * 17 = 153) */
+ for (i = 0; i < RKISP1_CIF_ISP_LSC_SAMPLES_MAX; i++) {
+ /*
+ * 17 sectors with 2 values in one DWORD = 9
+ * DWORDs (2nd value of last DWORD unused)
+ */
+ for (j = 0; j < RKISP1_CIF_ISP_LSC_SAMPLES_MAX - 1; j += 2) {
+ data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(
+ pconfig->r_data_tbl[i][j],
+ pconfig->r_data_tbl[i][j + 1]);
+ rkisp1_write(params->rkisp1, data,
+ RKISP1_CIF_ISP_LSC_R_TABLE_DATA);
+
+ data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(
+ pconfig->gr_data_tbl[i][j],
+ pconfig->gr_data_tbl[i][j + 1]);
+ rkisp1_write(params->rkisp1, data,
+ RKISP1_CIF_ISP_LSC_GR_TABLE_DATA);
+
+ data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(
+ pconfig->gb_data_tbl[i][j],
+ pconfig->gb_data_tbl[i][j + 1]);
+ rkisp1_write(params->rkisp1, data,
+ RKISP1_CIF_ISP_LSC_GB_TABLE_DATA);
+
+ data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(
+ pconfig->b_data_tbl[i][j],
+ pconfig->b_data_tbl[i][j + 1]);
+ rkisp1_write(params->rkisp1, data,
+ RKISP1_CIF_ISP_LSC_B_TABLE_DATA);
+ }
+
+ data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(pconfig->r_data_tbl[i][j], 0);
+ rkisp1_write(params->rkisp1, data,
+ RKISP1_CIF_ISP_LSC_R_TABLE_DATA);
+
+ data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(pconfig->gr_data_tbl[i][j], 0);
+ rkisp1_write(params->rkisp1, data,
+ RKISP1_CIF_ISP_LSC_GR_TABLE_DATA);
+
+ data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(pconfig->gb_data_tbl[i][j], 0);
+ rkisp1_write(params->rkisp1, data,
+ RKISP1_CIF_ISP_LSC_GB_TABLE_DATA);
+
+ data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(pconfig->b_data_tbl[i][j], 0);
rkisp1_write(params->rkisp1, data,
RKISP1_CIF_ISP_LSC_B_TABLE_DATA);
}
@@ -265,7 +337,7 @@ static void rkisp1_lsc_config(struct rkisp1_params *params,
lsc_ctrl = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_LSC_CTRL);
rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_LSC_CTRL,
RKISP1_CIF_ISP_LSC_CTRL_ENA);
- rkisp1_lsc_correct_matrix_config(params, arg);
+ params->ops->lsc_matrix_config(params, arg);
for (i = 0; i < RKISP1_CIF_ISP_LSC_SECTORS_TBL_SIZE / 2; i++) {
/* program x size tables */
@@ -382,18 +454,37 @@ static void rkisp1_sdg_config(struct rkisp1_params *params,
}
/* ISP GAMMA correction interface function */
-static void rkisp1_goc_config(struct rkisp1_params *params,
- const struct rkisp1_cif_isp_goc_config *arg)
+static void rkisp1_goc_config_v10(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_goc_config *arg)
{
unsigned int i;
rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_CTRL,
RKISP1_CIF_ISP_CTRL_ISP_GAMMA_OUT_ENA);
- rkisp1_write(params->rkisp1, arg->mode, RKISP1_CIF_ISP_GAMMA_OUT_MODE);
+ rkisp1_write(params->rkisp1, arg->mode, RKISP1_CIF_ISP_GAMMA_OUT_MODE_V10);
for (i = 0; i < RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V10; i++)
rkisp1_write(params->rkisp1, arg->gamma_y[i],
- RKISP1_CIF_ISP_GAMMA_OUT_Y_0 + i * 4);
+ RKISP1_CIF_ISP_GAMMA_OUT_Y_0_V10 + i * 4);
+}
+
+static void rkisp1_goc_config_v12(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_goc_config *arg)
+{
+ unsigned int i;
+ u32 value;
+
+ rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_CTRL,
+ RKISP1_CIF_ISP_CTRL_ISP_GAMMA_OUT_ENA);
+ rkisp1_write(params->rkisp1, arg->mode, RKISP1_CIF_ISP_GAMMA_OUT_MODE_V12);
+
+ for (i = 0; i < RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V12 / 2; i++) {
+ value = RKISP1_CIF_ISP_GAMMA_VALUE_V12(
+ arg->gamma_y[2 * i + 1],
+ arg->gamma_y[2 * i]);
+ rkisp1_write(params->rkisp1, value,
+ RKISP1_CIF_ISP_GAMMA_OUT_Y_0_V12 + i * 4);
+ }
}
/* ISP Cross Talk */
@@ -433,8 +524,8 @@ static void rkisp1_ctk_enable(struct rkisp1_params *params, bool en)
}
/* ISP White Balance Mode */
-static void rkisp1_awb_meas_config(struct rkisp1_params *params,
- const struct rkisp1_cif_isp_awb_meas_config *arg)
+static void rkisp1_awb_meas_config_v10(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_awb_meas_config *arg)
{
u32 reg_val = 0;
/* based on the mode,configure the awb module */
@@ -442,43 +533,111 @@ static void rkisp1_awb_meas_config(struct rkisp1_params *params,
/* Reference Cb and Cr */
rkisp1_write(params->rkisp1,
RKISP1_CIF_ISP_AWB_REF_CR_SET(arg->awb_ref_cr) |
- arg->awb_ref_cb, RKISP1_CIF_ISP_AWB_REF);
+ arg->awb_ref_cb, RKISP1_CIF_ISP_AWB_REF_V10);
/* Yc Threshold */
rkisp1_write(params->rkisp1,
RKISP1_CIF_ISP_AWB_MAX_Y_SET(arg->max_y) |
RKISP1_CIF_ISP_AWB_MIN_Y_SET(arg->min_y) |
RKISP1_CIF_ISP_AWB_MAX_CS_SET(arg->max_csum) |
- arg->min_c, RKISP1_CIF_ISP_AWB_THRESH);
+ arg->min_c, RKISP1_CIF_ISP_AWB_THRESH_V10);
}
- reg_val = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_AWB_PROP);
+ reg_val = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_AWB_PROP_V10);
if (arg->enable_ymax_cmp)
reg_val |= RKISP1_CIF_ISP_AWB_YMAX_CMP_EN;
else
reg_val &= ~RKISP1_CIF_ISP_AWB_YMAX_CMP_EN;
- rkisp1_write(params->rkisp1, reg_val, RKISP1_CIF_ISP_AWB_PROP);
+ rkisp1_write(params->rkisp1, reg_val, RKISP1_CIF_ISP_AWB_PROP_V10);
/* window offset */
rkisp1_write(params->rkisp1,
- arg->awb_wnd.v_offs, RKISP1_CIF_ISP_AWB_WND_V_OFFS);
+ arg->awb_wnd.v_offs, RKISP1_CIF_ISP_AWB_WND_V_OFFS_V10);
rkisp1_write(params->rkisp1,
- arg->awb_wnd.h_offs, RKISP1_CIF_ISP_AWB_WND_H_OFFS);
+ arg->awb_wnd.h_offs, RKISP1_CIF_ISP_AWB_WND_H_OFFS_V10);
/* AWB window size */
rkisp1_write(params->rkisp1,
- arg->awb_wnd.v_size, RKISP1_CIF_ISP_AWB_WND_V_SIZE);
+ arg->awb_wnd.v_size, RKISP1_CIF_ISP_AWB_WND_V_SIZE_V10);
rkisp1_write(params->rkisp1,
- arg->awb_wnd.h_size, RKISP1_CIF_ISP_AWB_WND_H_SIZE);
+ arg->awb_wnd.h_size, RKISP1_CIF_ISP_AWB_WND_H_SIZE_V10);
/* Number of frames */
rkisp1_write(params->rkisp1,
- arg->frames, RKISP1_CIF_ISP_AWB_FRAMES);
+ arg->frames, RKISP1_CIF_ISP_AWB_FRAMES_V10);
+}
+
+static void rkisp1_awb_meas_config_v12(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_awb_meas_config *arg)
+{
+ u32 reg_val = 0;
+ /* based on the mode,configure the awb module */
+ if (arg->awb_mode == RKISP1_CIF_ISP_AWB_MODE_YCBCR) {
+ /* Reference Cb and Cr */
+ rkisp1_write(params->rkisp1,
+ RKISP1_CIF_ISP_AWB_REF_CR_SET(arg->awb_ref_cr) |
+ arg->awb_ref_cb, RKISP1_CIF_ISP_AWB_REF_V12);
+ /* Yc Threshold */
+ rkisp1_write(params->rkisp1,
+ RKISP1_CIF_ISP_AWB_MAX_Y_SET(arg->max_y) |
+ RKISP1_CIF_ISP_AWB_MIN_Y_SET(arg->min_y) |
+ RKISP1_CIF_ISP_AWB_MAX_CS_SET(arg->max_csum) |
+ arg->min_c, RKISP1_CIF_ISP_AWB_THRESH_V12);
+ }
+
+ reg_val = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_AWB_PROP_V12);
+ if (arg->enable_ymax_cmp)
+ reg_val |= RKISP1_CIF_ISP_AWB_YMAX_CMP_EN;
+ else
+ reg_val &= ~RKISP1_CIF_ISP_AWB_YMAX_CMP_EN;
+ reg_val &= ~RKISP1_CIF_ISP_AWB_SET_FRAMES_MASK_V12;
+ reg_val |= RKISP1_CIF_ISP_AWB_SET_FRAMES_V12(arg->frames);
+ rkisp1_write(params->rkisp1, reg_val, RKISP1_CIF_ISP_AWB_PROP_V12);
+
+ /* window offset */
+ rkisp1_write(params->rkisp1,
+ arg->awb_wnd.v_offs << 16 |
+ arg->awb_wnd.h_offs,
+ RKISP1_CIF_ISP_AWB_OFFS_V12);
+ /* AWB window size */
+ rkisp1_write(params->rkisp1,
+ arg->awb_wnd.v_size << 16 |
+ arg->awb_wnd.h_size,
+ RKISP1_CIF_ISP_AWB_SIZE_V12);
+}
+
+static void
+rkisp1_awb_meas_enable_v10(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_awb_meas_config *arg,
+ bool en)
+{
+ u32 reg_val = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_AWB_PROP_V10);
+
+ /* switch off */
+ reg_val &= RKISP1_CIF_ISP_AWB_MODE_MASK_NONE;
+
+ if (en) {
+ if (arg->awb_mode == RKISP1_CIF_ISP_AWB_MODE_RGB)
+ reg_val |= RKISP1_CIF_ISP_AWB_MODE_RGB_EN;
+ else
+ reg_val |= RKISP1_CIF_ISP_AWB_MODE_YCBCR_EN;
+
+ rkisp1_write(params->rkisp1, reg_val, RKISP1_CIF_ISP_AWB_PROP_V10);
+
+ /* Measurements require AWB block be active. */
+ rkisp1_param_set_bits(params, RKISP1_CIF_ISP_CTRL,
+ RKISP1_CIF_ISP_CTRL_ISP_AWB_ENA);
+ } else {
+ rkisp1_write(params->rkisp1,
+ reg_val, RKISP1_CIF_ISP_AWB_PROP_V10);
+ rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_CTRL,
+ RKISP1_CIF_ISP_CTRL_ISP_AWB_ENA);
+ }
}
static void
-rkisp1_awb_meas_enable(struct rkisp1_params *params,
- const struct rkisp1_cif_isp_awb_meas_config *arg,
- bool en)
+rkisp1_awb_meas_enable_v12(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_awb_meas_config *arg,
+ bool en)
{
- u32 reg_val = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_AWB_PROP);
+ u32 reg_val = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_AWB_PROP_V12);
/* switch off */
reg_val &= RKISP1_CIF_ISP_AWB_MODE_MASK_NONE;
@@ -489,34 +648,47 @@ rkisp1_awb_meas_enable(struct rkisp1_params *params,
else
reg_val |= RKISP1_CIF_ISP_AWB_MODE_YCBCR_EN;
- rkisp1_write(params->rkisp1, reg_val, RKISP1_CIF_ISP_AWB_PROP);
+ rkisp1_write(params->rkisp1, reg_val, RKISP1_CIF_ISP_AWB_PROP_V12);
/* Measurements require AWB block be active. */
rkisp1_param_set_bits(params, RKISP1_CIF_ISP_CTRL,
RKISP1_CIF_ISP_CTRL_ISP_AWB_ENA);
} else {
rkisp1_write(params->rkisp1,
- reg_val, RKISP1_CIF_ISP_AWB_PROP);
+ reg_val, RKISP1_CIF_ISP_AWB_PROP_V12);
rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_CTRL,
RKISP1_CIF_ISP_CTRL_ISP_AWB_ENA);
}
}
static void
-rkisp1_awb_gain_config(struct rkisp1_params *params,
- const struct rkisp1_cif_isp_awb_gain_config *arg)
+rkisp1_awb_gain_config_v10(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_awb_gain_config *arg)
+{
+ rkisp1_write(params->rkisp1,
+ RKISP1_CIF_ISP_AWB_GAIN_R_SET(arg->gain_green_r) |
+ arg->gain_green_b, RKISP1_CIF_ISP_AWB_GAIN_G_V10);
+
+ rkisp1_write(params->rkisp1,
+ RKISP1_CIF_ISP_AWB_GAIN_R_SET(arg->gain_red) |
+ arg->gain_blue, RKISP1_CIF_ISP_AWB_GAIN_RB_V10);
+}
+
+static void
+rkisp1_awb_gain_config_v12(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_awb_gain_config *arg)
{
rkisp1_write(params->rkisp1,
RKISP1_CIF_ISP_AWB_GAIN_R_SET(arg->gain_green_r) |
- arg->gain_green_b, RKISP1_CIF_ISP_AWB_GAIN_G);
+ arg->gain_green_b, RKISP1_CIF_ISP_AWB_GAIN_G_V12);
rkisp1_write(params->rkisp1,
RKISP1_CIF_ISP_AWB_GAIN_R_SET(arg->gain_red) |
- arg->gain_blue, RKISP1_CIF_ISP_AWB_GAIN_RB);
+ arg->gain_blue, RKISP1_CIF_ISP_AWB_GAIN_RB_V12);
}
-static void rkisp1_aec_config(struct rkisp1_params *params,
- const struct rkisp1_cif_isp_aec_config *arg)
+static void rkisp1_aec_config_v10(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_aec_config *arg)
{
unsigned int block_hsize, block_vsize;
u32 exp_ctrl;
@@ -531,21 +703,53 @@ static void rkisp1_aec_config(struct rkisp1_params *params,
rkisp1_write(params->rkisp1, exp_ctrl, RKISP1_CIF_ISP_EXP_CTRL);
rkisp1_write(params->rkisp1,
- arg->meas_window.h_offs, RKISP1_CIF_ISP_EXP_H_OFFSET);
+ arg->meas_window.h_offs, RKISP1_CIF_ISP_EXP_H_OFFSET_V10);
rkisp1_write(params->rkisp1,
- arg->meas_window.v_offs, RKISP1_CIF_ISP_EXP_V_OFFSET);
+ arg->meas_window.v_offs, RKISP1_CIF_ISP_EXP_V_OFFSET_V10);
block_hsize = arg->meas_window.h_size /
- RKISP1_CIF_ISP_EXP_COLUMN_NUM - 1;
+ RKISP1_CIF_ISP_EXP_COLUMN_NUM_V10 - 1;
block_vsize = arg->meas_window.v_size /
- RKISP1_CIF_ISP_EXP_ROW_NUM - 1;
+ RKISP1_CIF_ISP_EXP_ROW_NUM_V10 - 1;
rkisp1_write(params->rkisp1,
- RKISP1_CIF_ISP_EXP_H_SIZE_SET(block_hsize),
- RKISP1_CIF_ISP_EXP_H_SIZE);
+ RKISP1_CIF_ISP_EXP_H_SIZE_SET_V10(block_hsize),
+ RKISP1_CIF_ISP_EXP_H_SIZE_V10);
rkisp1_write(params->rkisp1,
- RKISP1_CIF_ISP_EXP_V_SIZE_SET(block_vsize),
- RKISP1_CIF_ISP_EXP_V_SIZE);
+ RKISP1_CIF_ISP_EXP_V_SIZE_SET_V10(block_vsize),
+ RKISP1_CIF_ISP_EXP_V_SIZE_V10);
+}
+
+static void rkisp1_aec_config_v12(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_aec_config *arg)
+{
+ u32 exp_ctrl;
+ u32 block_hsize, block_vsize;
+ u32 wnd_num_idx = 1;
+ const u32 ae_wnd_num[] = { 5, 9, 15, 15 };
+
+ /* avoid to override the old enable value */
+ exp_ctrl = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_EXP_CTRL);
+ exp_ctrl &= RKISP1_CIF_ISP_EXP_ENA;
+ if (arg->autostop)
+ exp_ctrl |= RKISP1_CIF_ISP_EXP_CTRL_AUTOSTOP;
+ if (arg->mode == RKISP1_CIF_ISP_EXP_MEASURING_MODE_1)
+ exp_ctrl |= RKISP1_CIF_ISP_EXP_CTRL_MEASMODE_1;
+ exp_ctrl |= RKISP1_CIF_ISP_EXP_CTRL_WNDNUM_SET_V12(wnd_num_idx);
+ rkisp1_write(params->rkisp1, exp_ctrl, RKISP1_CIF_ISP_EXP_CTRL);
+
+ rkisp1_write(params->rkisp1,
+ RKISP1_CIF_ISP_EXP_V_OFFSET_SET_V12(arg->meas_window.v_offs) |
+ RKISP1_CIF_ISP_EXP_H_OFFSET_SET_V12(arg->meas_window.h_offs),
+ RKISP1_CIF_ISP_EXP_OFFS_V12);
+
+ block_hsize = arg->meas_window.h_size / ae_wnd_num[wnd_num_idx] - 1;
+ block_vsize = arg->meas_window.v_size / ae_wnd_num[wnd_num_idx] - 1;
+
+ rkisp1_write(params->rkisp1,
+ RKISP1_CIF_ISP_EXP_V_SIZE_SET_V12(block_vsize) |
+ RKISP1_CIF_ISP_EXP_H_SIZE_SET_V12(block_hsize),
+ RKISP1_CIF_ISP_EXP_SIZE_V12);
}
static void rkisp1_cproc_config(struct rkisp1_params *params,
@@ -578,73 +782,151 @@ static void rkisp1_cproc_config(struct rkisp1_params *params,
}
}
-static void rkisp1_hst_config(struct rkisp1_params *params,
- const struct rkisp1_cif_isp_hst_config *arg)
+static void rkisp1_hst_config_v10(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_hst_config *arg)
{
unsigned int block_hsize, block_vsize;
static const u32 hist_weight_regs[] = {
- RKISP1_CIF_ISP_HIST_WEIGHT_00TO30,
- RKISP1_CIF_ISP_HIST_WEIGHT_40TO21,
- RKISP1_CIF_ISP_HIST_WEIGHT_31TO12,
- RKISP1_CIF_ISP_HIST_WEIGHT_22TO03,
- RKISP1_CIF_ISP_HIST_WEIGHT_13TO43,
- RKISP1_CIF_ISP_HIST_WEIGHT_04TO34,
+ RKISP1_CIF_ISP_HIST_WEIGHT_00TO30_V10,
+ RKISP1_CIF_ISP_HIST_WEIGHT_40TO21_V10,
+ RKISP1_CIF_ISP_HIST_WEIGHT_31TO12_V10,
+ RKISP1_CIF_ISP_HIST_WEIGHT_22TO03_V10,
+ RKISP1_CIF_ISP_HIST_WEIGHT_13TO43_V10,
+ RKISP1_CIF_ISP_HIST_WEIGHT_04TO34_V10,
};
const u8 *weight;
unsigned int i;
u32 hist_prop;
/* avoid to override the old enable value */
- hist_prop = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_HIST_PROP);
- hist_prop &= RKISP1_CIF_ISP_HIST_PROP_MODE_MASK;
- hist_prop |= RKISP1_CIF_ISP_HIST_PREDIV_SET(arg->histogram_predivider);
- rkisp1_write(params->rkisp1, hist_prop, RKISP1_CIF_ISP_HIST_PROP);
+ hist_prop = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_HIST_PROP_V10);
+ hist_prop &= RKISP1_CIF_ISP_HIST_PROP_MODE_MASK_V10;
+ hist_prop |= RKISP1_CIF_ISP_HIST_PREDIV_SET_V10(arg->histogram_predivider);
+ rkisp1_write(params->rkisp1, hist_prop, RKISP1_CIF_ISP_HIST_PROP_V10);
rkisp1_write(params->rkisp1,
arg->meas_window.h_offs,
- RKISP1_CIF_ISP_HIST_H_OFFS);
+ RKISP1_CIF_ISP_HIST_H_OFFS_V10);
rkisp1_write(params->rkisp1,
arg->meas_window.v_offs,
- RKISP1_CIF_ISP_HIST_V_OFFS);
+ RKISP1_CIF_ISP_HIST_V_OFFS_V10);
block_hsize = arg->meas_window.h_size /
- RKISP1_CIF_ISP_HIST_COLUMN_NUM - 1;
- block_vsize = arg->meas_window.v_size / RKISP1_CIF_ISP_HIST_ROW_NUM - 1;
+ RKISP1_CIF_ISP_HIST_COLUMN_NUM_V10 - 1;
+ block_vsize = arg->meas_window.v_size / RKISP1_CIF_ISP_HIST_ROW_NUM_V10 - 1;
- rkisp1_write(params->rkisp1, block_hsize, RKISP1_CIF_ISP_HIST_H_SIZE);
- rkisp1_write(params->rkisp1, block_vsize, RKISP1_CIF_ISP_HIST_V_SIZE);
+ rkisp1_write(params->rkisp1, block_hsize, RKISP1_CIF_ISP_HIST_H_SIZE_V10);
+ rkisp1_write(params->rkisp1, block_vsize, RKISP1_CIF_ISP_HIST_V_SIZE_V10);
weight = arg->hist_weight;
for (i = 0; i < ARRAY_SIZE(hist_weight_regs); ++i, weight += 4)
rkisp1_write(params->rkisp1,
- RKISP1_CIF_ISP_HIST_WEIGHT_SET(weight[0],
+ RKISP1_CIF_ISP_HIST_WEIGHT_SET_V10(weight[0],
weight[1],
weight[2],
weight[3]),
hist_weight_regs[i]);
- rkisp1_write(params->rkisp1, weight[0] & 0x1F, RKISP1_CIF_ISP_HIST_WEIGHT_44);
+ rkisp1_write(params->rkisp1, weight[0] & 0x1F, RKISP1_CIF_ISP_HIST_WEIGHT_44_V10);
+}
+
+static void rkisp1_hst_config_v12(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_hst_config *arg)
+{
+ unsigned int i, j;
+ u32 block_hsize, block_vsize;
+ u32 wnd_num_idx, hist_weight_num, hist_ctrl, value;
+ u8 weight15x15[RKISP1_CIF_ISP_HIST_WEIGHT_REG_SIZE_V12];
+ const u32 hist_wnd_num[] = { 5, 9, 15, 15 };
+
+ /* now we just support 9x9 window */
+ wnd_num_idx = 1;
+ memset(weight15x15, 0x00, sizeof(weight15x15));
+ /* avoid to override the old enable value */
+ hist_ctrl = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_HIST_CTRL_V12);
+ hist_ctrl &= RKISP1_CIF_ISP_HIST_CTRL_MODE_MASK_V12 |
+ RKISP1_CIF_ISP_HIST_CTRL_EN_MASK_V12;
+ hist_ctrl = hist_ctrl |
+ RKISP1_CIF_ISP_HIST_CTRL_INTRSEL_SET_V12(1) |
+ RKISP1_CIF_ISP_HIST_CTRL_DATASEL_SET_V12(0) |
+ RKISP1_CIF_ISP_HIST_CTRL_WATERLINE_SET_V12(0) |
+ RKISP1_CIF_ISP_HIST_CTRL_AUTOSTOP_SET_V12(0) |
+ RKISP1_CIF_ISP_HIST_CTRL_WNDNUM_SET_V12(1) |
+ RKISP1_CIF_ISP_HIST_CTRL_STEPSIZE_SET_V12(arg->histogram_predivider);
+ rkisp1_write(params->rkisp1, hist_ctrl, RKISP1_CIF_ISP_HIST_CTRL_V12);
+
+ rkisp1_write(params->rkisp1,
+ RKISP1_CIF_ISP_HIST_OFFS_SET_V12(arg->meas_window.h_offs,
+ arg->meas_window.v_offs),
+ RKISP1_CIF_ISP_HIST_OFFS_V12);
+
+ block_hsize = arg->meas_window.h_size / hist_wnd_num[wnd_num_idx] - 1;
+ block_vsize = arg->meas_window.v_size / hist_wnd_num[wnd_num_idx] - 1;
+ rkisp1_write(params->rkisp1,
+ RKISP1_CIF_ISP_HIST_SIZE_SET_V12(block_hsize, block_vsize),
+ RKISP1_CIF_ISP_HIST_SIZE_V12);
+
+ for (i = 0; i < hist_wnd_num[wnd_num_idx]; i++) {
+ for (j = 0; j < hist_wnd_num[wnd_num_idx]; j++) {
+ weight15x15[i * RKISP1_CIF_ISP_HIST_ROW_NUM_V12 + j] =
+ arg->hist_weight[i * hist_wnd_num[wnd_num_idx] + j];
+ }
+ }
+
+ hist_weight_num = RKISP1_CIF_ISP_HIST_WEIGHT_REG_SIZE_V12;
+ for (i = 0; i < (hist_weight_num / 4); i++) {
+ value = RKISP1_CIF_ISP_HIST_WEIGHT_SET_V12(
+ weight15x15[4 * i + 0],
+ weight15x15[4 * i + 1],
+ weight15x15[4 * i + 2],
+ weight15x15[4 * i + 3]);
+ rkisp1_write(params->rkisp1, value,
+ RKISP1_CIF_ISP_HIST_WEIGHT_V12 + 4 * i);
+ }
+ value = RKISP1_CIF_ISP_HIST_WEIGHT_SET_V12(weight15x15[4 * i + 0], 0, 0, 0);
+ rkisp1_write(params->rkisp1, value,
+ RKISP1_CIF_ISP_HIST_WEIGHT_V12 + 4 * i);
}
static void
-rkisp1_hst_enable(struct rkisp1_params *params,
- const struct rkisp1_cif_isp_hst_config *arg, bool en)
+rkisp1_hst_enable_v10(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_hst_config *arg, bool en)
{
if (en) {
u32 hist_prop = rkisp1_read(params->rkisp1,
- RKISP1_CIF_ISP_HIST_PROP);
+ RKISP1_CIF_ISP_HIST_PROP_V10);
- hist_prop &= ~RKISP1_CIF_ISP_HIST_PROP_MODE_MASK;
+ hist_prop &= ~RKISP1_CIF_ISP_HIST_PROP_MODE_MASK_V10;
hist_prop |= arg->mode;
- rkisp1_param_set_bits(params, RKISP1_CIF_ISP_HIST_PROP,
+ rkisp1_param_set_bits(params, RKISP1_CIF_ISP_HIST_PROP_V10,
hist_prop);
} else {
- rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_HIST_PROP,
- RKISP1_CIF_ISP_HIST_PROP_MODE_MASK);
+ rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_HIST_PROP_V10,
+ RKISP1_CIF_ISP_HIST_PROP_MODE_MASK_V10);
+ }
+}
+
+static void
+rkisp1_hst_enable_v12(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_hst_config *arg, bool en)
+{
+ if (en) {
+ u32 hist_ctrl = rkisp1_read(params->rkisp1,
+ RKISP1_CIF_ISP_HIST_CTRL_V12);
+
+ hist_ctrl &= ~RKISP1_CIF_ISP_HIST_CTRL_MODE_MASK_V12;
+ hist_ctrl |= RKISP1_CIF_ISP_HIST_CTRL_MODE_SET_V12(arg->mode);
+ hist_ctrl |= RKISP1_CIF_ISP_HIST_CTRL_EN_SET_V12(1);
+ rkisp1_param_set_bits(params, RKISP1_CIF_ISP_HIST_CTRL_V12,
+ hist_ctrl);
+ } else {
+ rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_HIST_CTRL_V12,
+ RKISP1_CIF_ISP_HIST_CTRL_MODE_MASK_V12 |
+ RKISP1_CIF_ISP_HIST_CTRL_EN_MASK_V12);
}
}
-static void rkisp1_afm_config(struct rkisp1_params *params,
- const struct rkisp1_cif_isp_afc_config *arg)
+static void rkisp1_afm_config_v10(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_afc_config *arg)
{
size_t num_of_win = min_t(size_t, ARRAY_SIZE(arg->afm_win),
arg->num_afm_win);
@@ -674,6 +956,45 @@ static void rkisp1_afm_config(struct rkisp1_params *params,
rkisp1_write(params->rkisp1, afm_ctrl, RKISP1_CIF_ISP_AFM_CTRL);
}
+static void rkisp1_afm_config_v12(struct rkisp1_params *params,
+ const struct rkisp1_cif_isp_afc_config *arg)
+{
+ size_t num_of_win = min_t(size_t, ARRAY_SIZE(arg->afm_win),
+ arg->num_afm_win);
+ u32 afm_ctrl = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_AFM_CTRL);
+ u32 lum_var_shift, afm_var_shift;
+ unsigned int i;
+
+ /* Switch off to configure. */
+ rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_AFM_CTRL,
+ RKISP1_CIF_ISP_AFM_ENA);
+
+ for (i = 0; i < num_of_win; i++) {
+ rkisp1_write(params->rkisp1,
+ RKISP1_CIF_ISP_AFM_WINDOW_X(arg->afm_win[i].h_offs) |
+ RKISP1_CIF_ISP_AFM_WINDOW_Y(arg->afm_win[i].v_offs),
+ RKISP1_CIF_ISP_AFM_LT_A + i * 8);
+ rkisp1_write(params->rkisp1,
+ RKISP1_CIF_ISP_AFM_WINDOW_X(arg->afm_win[i].h_size +
+ arg->afm_win[i].h_offs) |
+ RKISP1_CIF_ISP_AFM_WINDOW_Y(arg->afm_win[i].v_size +
+ arg->afm_win[i].v_offs),
+ RKISP1_CIF_ISP_AFM_RB_A + i * 8);
+ }
+ rkisp1_write(params->rkisp1, arg->thres, RKISP1_CIF_ISP_AFM_THRES);
+
+ lum_var_shift = RKISP1_CIF_ISP_AFM_GET_LUM_SHIFT_a_V12(arg->var_shift);
+ afm_var_shift = RKISP1_CIF_ISP_AFM_GET_AFM_SHIFT_a_V12(arg->var_shift);
+ rkisp1_write(params->rkisp1,
+ RKISP1_CIF_ISP_AFM_SET_SHIFT_a_V12(lum_var_shift, afm_var_shift) |
+ RKISP1_CIF_ISP_AFM_SET_SHIFT_b_V12(lum_var_shift, afm_var_shift) |
+ RKISP1_CIF_ISP_AFM_SET_SHIFT_c_V12(lum_var_shift, afm_var_shift),
+ RKISP1_CIF_ISP_AFM_VAR_SHIFT);
+
+ /* restore afm status */
+ rkisp1_write(params->rkisp1, afm_ctrl, RKISP1_CIF_ISP_AFM_CTRL);
+}
+
static void rkisp1_ie_config(struct rkisp1_params *params,
const struct rkisp1_cif_isp_ie_config *arg)
{
@@ -955,7 +1276,7 @@ rkisp1_isp_isr_other_config(struct rkisp1_params *params,
/* update awb gains */
if (module_cfg_update & RKISP1_CIF_ISP_MODULE_AWB_GAIN)
- rkisp1_awb_gain_config(params, &new_params->others.awb_gain_config);
+ params->ops->awb_gain_config(params, &new_params->others.awb_gain_config);
if (module_en_update & RKISP1_CIF_ISP_MODULE_AWB_GAIN) {
if (module_ens & RKISP1_CIF_ISP_MODULE_AWB_GAIN)
@@ -1010,8 +1331,7 @@ rkisp1_isp_isr_other_config(struct rkisp1_params *params,
/* update goc config */
if (module_cfg_update & RKISP1_CIF_ISP_MODULE_GOC)
- rkisp1_goc_config(params,
- &new_params->others.goc_config);
+ params->ops->goc_config(params, &new_params->others.goc_config);
if (module_en_update & RKISP1_CIF_ISP_MODULE_GOC) {
if (module_ens & RKISP1_CIF_ISP_MODULE_GOC)
@@ -1081,17 +1401,17 @@ static void rkisp1_isp_isr_meas_config(struct rkisp1_params *params,
/* update awb config */
if (module_cfg_update & RKISP1_CIF_ISP_MODULE_AWB)
- rkisp1_awb_meas_config(params, &new_params->meas.awb_meas_config);
+ params->ops->awb_meas_config(params, &new_params->meas.awb_meas_config);
if (module_en_update & RKISP1_CIF_ISP_MODULE_AWB)
- rkisp1_awb_meas_enable(params,
- &new_params->meas.awb_meas_config,
- !!(module_ens & RKISP1_CIF_ISP_MODULE_AWB));
+ params->ops->awb_meas_enable(params,
+ &new_params->meas.awb_meas_config,
+ !!(module_ens & RKISP1_CIF_ISP_MODULE_AWB));
/* update afc config */
if (module_cfg_update & RKISP1_CIF_ISP_MODULE_AFC)
- rkisp1_afm_config(params,
- &new_params->meas.afc_config);
+ params->ops->afm_config(params,
+ &new_params->meas.afc_config);
if (module_en_update & RKISP1_CIF_ISP_MODULE_AFC) {
if (module_ens & RKISP1_CIF_ISP_MODULE_AFC)
@@ -1106,18 +1426,18 @@ static void rkisp1_isp_isr_meas_config(struct rkisp1_params *params,
/* update hst config */
if (module_cfg_update & RKISP1_CIF_ISP_MODULE_HST)
- rkisp1_hst_config(params,
- &new_params->meas.hst_config);
+ params->ops->hst_config(params,
+ &new_params->meas.hst_config);
if (module_en_update & RKISP1_CIF_ISP_MODULE_HST)
- rkisp1_hst_enable(params,
- &new_params->meas.hst_config,
- !!(module_ens & RKISP1_CIF_ISP_MODULE_HST));
+ params->ops->hst_enable(params,
+ &new_params->meas.hst_config,
+ !!(module_ens & RKISP1_CIF_ISP_MODULE_HST));
/* update aec config */
if (module_cfg_update & RKISP1_CIF_ISP_MODULE_AEC)
- rkisp1_aec_config(params,
- &new_params->meas.aec_config);
+ params->ops->aec_config(params,
+ &new_params->meas.aec_config);
if (module_en_update & RKISP1_CIF_ISP_MODULE_AEC) {
if (module_ens & RKISP1_CIF_ISP_MODULE_AEC)
@@ -1218,21 +1538,21 @@ static void rkisp1_params_config_parameter(struct rkisp1_params *params)
{
struct rkisp1_cif_isp_hst_config hst = rkisp1_hst_params_default_config;
- rkisp1_awb_meas_config(params, &rkisp1_awb_params_default_config);
- rkisp1_awb_meas_enable(params, &rkisp1_awb_params_default_config,
- true);
+ params->ops->awb_meas_config(params, &rkisp1_awb_params_default_config);
+ params->ops->awb_meas_enable(params, &rkisp1_awb_params_default_config,
+ true);
- rkisp1_aec_config(params, &rkisp1_aec_params_default_config);
+ params->ops->aec_config(params, &rkisp1_aec_params_default_config);
rkisp1_param_set_bits(params, RKISP1_CIF_ISP_EXP_CTRL,
RKISP1_CIF_ISP_EXP_ENA);
- rkisp1_afm_config(params, &rkisp1_afc_params_default_config);
+ params->ops->afm_config(params, &rkisp1_afc_params_default_config);
rkisp1_param_set_bits(params, RKISP1_CIF_ISP_AFM_CTRL,
RKISP1_CIF_ISP_AFM_ENA);
memset(hst.hist_weight, 0x01, sizeof(hst.hist_weight));
- rkisp1_hst_config(params, &hst);
- rkisp1_param_set_bits(params, RKISP1_CIF_ISP_HIST_PROP,
+ params->ops->hst_config(params, &hst);
+ rkisp1_param_set_bits(params, RKISP1_CIF_ISP_HIST_PROP_V10,
rkisp1_hst_params_default_config.mode);
/* set the range */
@@ -1278,7 +1598,7 @@ void rkisp1_params_disable(struct rkisp1_params *params)
RKISP1_CIF_ISP_DEMOSAIC_BYPASS);
rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_FILT_MODE,
RKISP1_CIF_ISP_FLT_ENA);
- rkisp1_awb_meas_enable(params, NULL, false);
+ params->ops->awb_meas_enable(params, NULL, false);
rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_CTRL,
RKISP1_CIF_ISP_CTRL_ISP_AWB_ENA);
rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_EXP_CTRL,
@@ -1286,7 +1606,7 @@ void rkisp1_params_disable(struct rkisp1_params *params)
rkisp1_ctk_enable(params, false);
rkisp1_param_clear_bits(params, RKISP1_CIF_C_PROC_CTRL,
RKISP1_CIF_C_PROC_CTR_ENABLE);
- rkisp1_hst_enable(params, NULL, false);
+ params->ops->hst_enable(params, NULL, false);
rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_AFM_CTRL,
RKISP1_CIF_ISP_AFM_ENA);
rkisp1_ie_enable(params, false);
@@ -1294,6 +1614,30 @@ void rkisp1_params_disable(struct rkisp1_params *params)
RKISP1_CIF_ISP_DPF_MODE_EN);
}
+static const struct rkisp1_params_ops rkisp1_v10_params_ops = {
+ .lsc_matrix_config = rkisp1_lsc_matrix_config_v10,
+ .goc_config = rkisp1_goc_config_v10,
+ .awb_meas_config = rkisp1_awb_meas_config_v10,
+ .awb_meas_enable = rkisp1_awb_meas_enable_v10,
+ .awb_gain_config = rkisp1_awb_gain_config_v10,
+ .aec_config = rkisp1_aec_config_v10,
+ .hst_config = rkisp1_hst_config_v10,
+ .hst_enable = rkisp1_hst_enable_v10,
+ .afm_config = rkisp1_afm_config_v10,
+};
+
+static struct rkisp1_params_ops rkisp1_v12_params_ops = {
+ .lsc_matrix_config = rkisp1_lsc_matrix_config_v12,
+ .goc_config = rkisp1_goc_config_v12,
+ .awb_meas_config = rkisp1_awb_meas_config_v12,
+ .awb_meas_enable = rkisp1_awb_meas_enable_v12,
+ .awb_gain_config = rkisp1_awb_gain_config_v12,
+ .aec_config = rkisp1_aec_config_v12,
+ .hst_config = rkisp1_hst_config_v12,
+ .hst_enable = rkisp1_hst_enable_v12,
+ .afm_config = rkisp1_afm_config_v12,
+};
+
static int rkisp1_params_enum_fmt_meta_out(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
@@ -1459,6 +1803,11 @@ static void rkisp1_init_params(struct rkisp1_params *params)
V4L2_META_FMT_RK_ISP1_PARAMS;
params->vdev_fmt.fmt.meta.buffersize =
sizeof(struct rkisp1_params_cfg);
+
+ if (params->rkisp1->media_dev.hw_revision == RKISP1_V12)
+ params->ops = &rkisp1_v12_params_ops;
+ else
+ params->ops = &rkisp1_v10_params_ops;
}
int rkisp1_params_register(struct rkisp1_device *rkisp1)
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h b/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h
index fa33080f51db..d326214c7e07 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h
@@ -212,6 +212,35 @@
/* CCL */
#define RKISP1_CIF_CCL_CIF_CLK_DIS BIT(2)
+/* VI_ISP_CLK_CTRL */
+#define RKISP1_CIF_CLK_CTRL_ISP_RAW BIT(0)
+#define RKISP1_CIF_CLK_CTRL_ISP_RGB BIT(1)
+#define RKISP1_CIF_CLK_CTRL_ISP_YUV BIT(2)
+#define RKISP1_CIF_CLK_CTRL_ISP_3A BIT(3)
+#define RKISP1_CIF_CLK_CTRL_MIPI_RAW BIT(4)
+#define RKISP1_CIF_CLK_CTRL_ISP_IE BIT(5)
+#define RKISP1_CIF_CLK_CTRL_RSZ_RAM BIT(6)
+#define RKISP1_CIF_CLK_CTRL_JPEG_RAM BIT(7)
+#define RKISP1_CIF_CLK_CTRL_ACLK_ISP BIT(8)
+#define RKISP1_CIF_CLK_CTRL_MI_IDC BIT(9)
+#define RKISP1_CIF_CLK_CTRL_MI_MP BIT(10)
+#define RKISP1_CIF_CLK_CTRL_MI_JPEG BIT(11)
+#define RKISP1_CIF_CLK_CTRL_MI_DP BIT(12)
+#define RKISP1_CIF_CLK_CTRL_MI_Y12 BIT(13)
+#define RKISP1_CIF_CLK_CTRL_MI_SP BIT(14)
+#define RKISP1_CIF_CLK_CTRL_MI_RAW0 BIT(15)
+#define RKISP1_CIF_CLK_CTRL_MI_RAW1 BIT(16)
+#define RKISP1_CIF_CLK_CTRL_MI_READ BIT(17)
+#define RKISP1_CIF_CLK_CTRL_MI_RAWRD BIT(18)
+#define RKISP1_CIF_CLK_CTRL_CP BIT(19)
+#define RKISP1_CIF_CLK_CTRL_IE BIT(20)
+#define RKISP1_CIF_CLK_CTRL_SI BIT(21)
+#define RKISP1_CIF_CLK_CTRL_RSZM BIT(22)
+#define RKISP1_CIF_CLK_CTRL_DPMUX BIT(23)
+#define RKISP1_CIF_CLK_CTRL_JPEG BIT(24)
+#define RKISP1_CIF_CLK_CTRL_RSZS BIT(25)
+#define RKISP1_CIF_CLK_CTRL_MIPI BIT(26)
+#define RKISP1_CIF_CLK_CTRL_MARVINMI BIT(27)
/* ICCL */
#define RKISP1_CIF_ICCL_ISP_CLK BIT(0)
#define RKISP1_CIF_ICCL_CP_CLK BIT(1)
@@ -346,26 +375,58 @@
#define RKISP1_CIF_SUPER_IMP_CTRL_TRANSP_DIS BIT(2)
/* ISP HISTOGRAM CALCULATION : ISP_HIST_PROP */
-#define RKISP1_CIF_ISP_HIST_PROP_MODE_DIS (0 << 0)
-#define RKISP1_CIF_ISP_HIST_PROP_MODE_RGB BIT(0)
-#define RKISP1_CIF_ISP_HIST_PROP_MODE_RED (2 << 0)
-#define RKISP1_CIF_ISP_HIST_PROP_MODE_GREEN (3 << 0)
-#define RKISP1_CIF_ISP_HIST_PROP_MODE_BLUE (4 << 0)
-#define RKISP1_CIF_ISP_HIST_PROP_MODE_LUM (5 << 0)
-#define RKISP1_CIF_ISP_HIST_PROP_MODE_MASK 0x7
-#define RKISP1_CIF_ISP_HIST_PREDIV_SET(x) (((x) & 0x7F) << 3)
-#define RKISP1_CIF_ISP_HIST_WEIGHT_SET(v0, v1, v2, v3) \
+#define RKISP1_CIF_ISP_HIST_PROP_MODE_DIS_V10 (0 << 0)
+#define RKISP1_CIF_ISP_HIST_PROP_MODE_RGB_V10 BIT(0)
+#define RKISP1_CIF_ISP_HIST_PROP_MODE_RED_V10 (2 << 0)
+#define RKISP1_CIF_ISP_HIST_PROP_MODE_GREEN_V10 (3 << 0)
+#define RKISP1_CIF_ISP_HIST_PROP_MODE_BLUE_V10 (4 << 0)
+#define RKISP1_CIF_ISP_HIST_PROP_MODE_LUM_V10 (5 << 0)
+#define RKISP1_CIF_ISP_HIST_PROP_MODE_MASK_V10 0x7
+#define RKISP1_CIF_ISP_HIST_PREDIV_SET_V10(x) (((x) & 0x7F) << 3)
+#define RKISP1_CIF_ISP_HIST_WEIGHT_SET_V10(v0, v1, v2, v3) \
(((v0) & 0x1F) | (((v1) & 0x1F) << 8) |\
(((v2) & 0x1F) << 16) | \
(((v3) & 0x1F) << 24))
-#define RKISP1_CIF_ISP_HIST_WINDOW_OFFSET_RESERVED 0xFFFFF000
-#define RKISP1_CIF_ISP_HIST_WINDOW_SIZE_RESERVED 0xFFFFF800
-#define RKISP1_CIF_ISP_HIST_WEIGHT_RESERVED 0xE0E0E0E0
-#define RKISP1_CIF_ISP_MAX_HIST_PREDIVIDER 0x0000007F
-#define RKISP1_CIF_ISP_HIST_ROW_NUM 5
-#define RKISP1_CIF_ISP_HIST_COLUMN_NUM 5
-#define RKISP1_CIF_ISP_HIST_GET_BIN(x) ((x) & 0x000FFFFF)
+#define RKISP1_CIF_ISP_HIST_WINDOW_OFFSET_RESERVED_V10 0xFFFFF000
+#define RKISP1_CIF_ISP_HIST_WINDOW_SIZE_RESERVED_V10 0xFFFFF800
+#define RKISP1_CIF_ISP_HIST_WEIGHT_RESERVED_V10 0xE0E0E0E0
+#define RKISP1_CIF_ISP_MAX_HIST_PREDIVIDER_V10 0x0000007F
+#define RKISP1_CIF_ISP_HIST_ROW_NUM_V10 5
+#define RKISP1_CIF_ISP_HIST_COLUMN_NUM_V10 5
+#define RKISP1_CIF_ISP_HIST_GET_BIN_V10(x) ((x) & 0x000FFFFF)
+
+/* ISP HISTOGRAM CALCULATION : CIF_ISP_HIST */
+#define RKISP1_CIF_ISP_HIST_CTRL_EN_SET_V12(x) (((x) & 0x01) << 0)
+#define RKISP1_CIF_ISP_HIST_CTRL_EN_MASK_V12 RKISP1_CIF_ISP_HIST_CTRL_EN_SET_V12(0x01)
+#define RKISP1_CIF_ISP_HIST_CTRL_STEPSIZE_SET_V12(x) (((x) & 0x7F) << 1)
+#define RKISP1_CIF_ISP_HIST_CTRL_MODE_SET_V12(x) (((x) & 0x07) << 8)
+#define RKISP1_CIF_ISP_HIST_CTRL_MODE_MASK_V12 RKISP1_CIF_ISP_HIST_CTRL_MODE_SET_V12(0x07)
+#define RKISP1_CIF_ISP_HIST_CTRL_AUTOSTOP_SET_V12(x) (((x) & 0x01) << 11)
+#define RKISP1_CIF_ISP_HIST_CTRL_WATERLINE_SET_V12(x) (((x) & 0xFFF) << 12)
+#define RKISP1_CIF_ISP_HIST_CTRL_DATASEL_SET_V12(x) (((x) & 0x07) << 24)
+#define RKISP1_CIF_ISP_HIST_CTRL_INTRSEL_SET_V12(x) (((x) & 0x01) << 27)
+#define RKISP1_CIF_ISP_HIST_CTRL_WNDNUM_SET_V12(x) (((x) & 0x03) << 28)
+#define RKISP1_CIF_ISP_HIST_CTRL_DBGEN_SET_V12(x) (((x) & 0x01) << 30)
+#define RKISP1_CIF_ISP_HIST_ROW_NUM_V12 15
+#define RKISP1_CIF_ISP_HIST_COLUMN_NUM_V12 15
+#define RKISP1_CIF_ISP_HIST_WEIGHT_REG_SIZE_V12 \
+ (RKISP1_CIF_ISP_HIST_ROW_NUM_V12 * RKISP1_CIF_ISP_HIST_COLUMN_NUM_V12)
+
+#define RKISP1_CIF_ISP_HIST_WEIGHT_SET_V12(v0, v1, v2, v3) \
+ (((v0) & 0x3F) | (((v1) & 0x3F) << 8) |\
+ (((v2) & 0x3F) << 16) |\
+ (((v3) & 0x3F) << 24))
+
+#define RKISP1_CIF_ISP_HIST_OFFS_SET_V12(v0, v1) \
+ (((v0) & 0x1FFF) | (((v1) & 0x1FFF) << 16))
+#define RKISP1_CIF_ISP_HIST_SIZE_SET_V12(v0, v1) \
+ (((v0) & 0x7FF) | (((v1) & 0x7FF) << 16))
+
+#define RKISP1_CIF_ISP_HIST_GET_BIN0_V12(x) \
+ ((x) & 0xFFFF)
+#define RKISP1_CIF_ISP_HIST_GET_BIN1_V12(x) \
+ (((x) >> 16) & 0xFFFF)
/* AUTO FOCUS MEASUREMENT: ISP_AFM_CTRL */
#define RKISP1_ISP_AFM_CTRL_ENABLE BIT(0)
@@ -401,6 +462,8 @@
#define RKISP1_CIF_ISP_AWB_MODE_YCBCR_EN ((0 << 31) | (0x2 << 0))
#define RKISP1_CIF_ISP_AWB_MODE_MASK_NONE 0xFFFFFFFC
#define RKISP1_CIF_ISP_AWB_MODE_READ(x) ((x) & 3)
+#define RKISP1_CIF_ISP_AWB_SET_FRAMES_V12(x) (((x) & 0x07) << 28)
+#define RKISP1_CIF_ISP_AWB_SET_FRAMES_MASK_V12 RKISP1_CIF_ISP_AWB_SET_FRAMES_V12(0x07)
/* ISP_AWB_GAIN_RB, ISP_AWB_GAIN_G */
#define RKISP1_CIF_ISP_AWB_GAIN_R_SET(x) (((x) & 0x3FF) << 16)
#define RKISP1_CIF_ISP_AWB_GAIN_R_READ(x) (((x) >> 16) & 0x3FF)
@@ -435,6 +498,7 @@
/* ISP_EXP_CTRL */
#define RKISP1_CIF_ISP_EXP_ENA BIT(0)
#define RKISP1_CIF_ISP_EXP_CTRL_AUTOSTOP BIT(1)
+#define RKISP1_CIF_ISP_EXP_CTRL_WNDNUM_SET_V12(x) (((x) & 0x03) << 2)
/*
*'1' luminance calculation according to Y=(R+G+B) x 0.332 (85/256)
*'0' luminance calculation according to Y=16+0.25R+0.5G+0.1094B
@@ -442,42 +506,76 @@
#define RKISP1_CIF_ISP_EXP_CTRL_MEASMODE_1 BIT(31)
/* ISP_EXP_H_SIZE */
-#define RKISP1_CIF_ISP_EXP_H_SIZE_SET(x) ((x) & 0x7FF)
-#define RKISP1_CIF_ISP_EXP_HEIGHT_MASK 0x000007FF
+#define RKISP1_CIF_ISP_EXP_H_SIZE_SET_V10(x) ((x) & 0x7FF)
+#define RKISP1_CIF_ISP_EXP_HEIGHT_MASK_V10 0x000007FF
+#define RKISP1_CIF_ISP_EXP_H_SIZE_SET_V12(x) ((x) & 0x7FF)
+#define RKISP1_CIF_ISP_EXP_HEIGHT_MASK_V12 0x000007FF
/* ISP_EXP_V_SIZE : vertical size must be a multiple of 2). */
-#define RKISP1_CIF_ISP_EXP_V_SIZE_SET(x) ((x) & 0x7FE)
+#define RKISP1_CIF_ISP_EXP_V_SIZE_SET_V10(x) ((x) & 0x7FE)
+#define RKISP1_CIF_ISP_EXP_V_SIZE_SET_V12(x) (((x) & 0x7FE) << 16)
/* ISP_EXP_H_OFFSET */
-#define RKISP1_CIF_ISP_EXP_H_OFFSET_SET(x) ((x) & 0x1FFF)
-#define RKISP1_CIF_ISP_EXP_MAX_HOFFS 2424
+#define RKISP1_CIF_ISP_EXP_H_OFFSET_SET_V10(x) ((x) & 0x1FFF)
+#define RKISP1_CIF_ISP_EXP_MAX_HOFFS_V10 2424
+#define RKISP1_CIF_ISP_EXP_H_OFFSET_SET_V12(x) ((x) & 0x1FFF)
+#define RKISP1_CIF_ISP_EXP_MAX_HOFFS_V12 0x1FFF
/* ISP_EXP_V_OFFSET */
-#define RKISP1_CIF_ISP_EXP_V_OFFSET_SET(x) ((x) & 0x1FFF)
-#define RKISP1_CIF_ISP_EXP_MAX_VOFFS 1806
-
-#define RKISP1_CIF_ISP_EXP_ROW_NUM 5
-#define RKISP1_CIF_ISP_EXP_COLUMN_NUM 5
-#define RKISP1_CIF_ISP_EXP_NUM_LUMA_REGS \
- (RKISP1_CIF_ISP_EXP_ROW_NUM * RKISP1_CIF_ISP_EXP_COLUMN_NUM)
-#define RKISP1_CIF_ISP_EXP_BLOCK_MAX_HSIZE 516
-#define RKISP1_CIF_ISP_EXP_BLOCK_MIN_HSIZE 35
-#define RKISP1_CIF_ISP_EXP_BLOCK_MAX_VSIZE 390
-#define RKISP1_CIF_ISP_EXP_BLOCK_MIN_VSIZE 28
-#define RKISP1_CIF_ISP_EXP_MAX_HSIZE \
- (RKISP1_CIF_ISP_EXP_BLOCK_MAX_HSIZE * RKISP1_CIF_ISP_EXP_COLUMN_NUM + 1)
-#define RKISP1_CIF_ISP_EXP_MIN_HSIZE \
- (RKISP1_CIF_ISP_EXP_BLOCK_MIN_HSIZE * RKISP1_CIF_ISP_EXP_COLUMN_NUM + 1)
-#define RKISP1_CIF_ISP_EXP_MAX_VSIZE \
- (RKISP1_CIF_ISP_EXP_BLOCK_MAX_VSIZE * RKISP1_CIF_ISP_EXP_ROW_NUM + 1)
-#define RKISP1_CIF_ISP_EXP_MIN_VSIZE \
- (RKISP1_CIF_ISP_EXP_BLOCK_MIN_VSIZE * RKISP1_CIF_ISP_EXP_ROW_NUM + 1)
+#define RKISP1_CIF_ISP_EXP_V_OFFSET_SET_V10(x) ((x) & 0x1FFF)
+#define RKISP1_CIF_ISP_EXP_MAX_VOFFS_V10 1806
+#define RKISP1_CIF_ISP_EXP_V_OFFSET_SET_V12(x) (((x) & 0x1FFF) << 16)
+#define RKISP1_CIF_ISP_EXP_MAX_VOFFS_V12 0x1FFF
+
+#define RKISP1_CIF_ISP_EXP_ROW_NUM_V10 5
+#define RKISP1_CIF_ISP_EXP_COLUMN_NUM_V10 5
+#define RKISP1_CIF_ISP_EXP_NUM_LUMA_REGS_V10 \
+ (RKISP1_CIF_ISP_EXP_ROW_NUM_V10 * RKISP1_CIF_ISP_EXP_COLUMN_NUM_V10)
+#define RKISP1_CIF_ISP_EXP_BLOCK_MAX_HSIZE_V10 516
+#define RKISP1_CIF_ISP_EXP_BLOCK_MIN_HSIZE_V10 35
+#define RKISP1_CIF_ISP_EXP_BLOCK_MAX_VSIZE_V10 390
+#define RKISP1_CIF_ISP_EXP_BLOCK_MIN_VSIZE_V10 28
+#define RKISP1_CIF_ISP_EXP_MAX_HSIZE_V10 \
+ (RKISP1_CIF_ISP_EXP_BLOCK_MAX_HSIZE_V10 * RKISP1_CIF_ISP_EXP_COLUMN_NUM_V10 + 1)
+#define RKISP1_CIF_ISP_EXP_MIN_HSIZE_V10 \
+ (RKISP1_CIF_ISP_EXP_BLOCK_MIN_HSIZE_V10 * RKISP1_CIF_ISP_EXP_COLUMN_NUM_V10 + 1)
+#define RKISP1_CIF_ISP_EXP_MAX_VSIZE_V10 \
+ (RKISP1_CIF_ISP_EXP_BLOCK_MAX_VSIZE_V10 * RKISP1_CIF_ISP_EXP_ROW_NUM_V10 + 1)
+#define RKISP1_CIF_ISP_EXP_MIN_VSIZE_V10 \
+ (RKISP1_CIF_ISP_EXP_BLOCK_MIN_VSIZE_V10 * RKISP1_CIF_ISP_EXP_ROW_NUM_V10 + 1)
+
+#define RKISP1_CIF_ISP_EXP_ROW_NUM_V12 15
+#define RKISP1_CIF_ISP_EXP_COLUMN_NUM_V12 15
+#define RKISP1_CIF_ISP_EXP_NUM_LUMA_REGS_V12 \
+ (RKISP1_CIF_ISP_EXP_ROW_NUM_V12 * RKISP1_CIF_ISP_EXP_COLUMN_NUM_V12)
+
+#define RKISP1_CIF_ISP_EXP_BLOCK_MAX_HSIZE_V12 0x7FF
+#define RKISP1_CIF_ISP_EXP_BLOCK_MIN_HSIZE_V12 0xE
+#define RKISP1_CIF_ISP_EXP_BLOCK_MAX_VSIZE_V12 0x7FE
+#define RKISP1_CIF_ISP_EXP_BLOCK_MIN_VSIZE_V12 0xE
+#define RKISP1_CIF_ISP_EXP_MAX_HSIZE_V12 \
+ (RKISP1_CIF_ISP_EXP_BLOCK_MAX_HSIZE_V12 * RKISP1_CIF_ISP_EXP_COLUMN_NUM_V12 + 1)
+#define RKISP1_CIF_ISP_EXP_MIN_HSIZE_V12 \
+ (RKISP1_CIF_ISP_EXP_BLOCK_MIN_HSIZE_V12 * RKISP1_CIF_ISP_EXP_COLUMN_NUM_V12 + 1)
+#define RKISP1_CIF_ISP_EXP_MAX_VSIZE_V12 \
+ (RKISP1_CIF_ISP_EXP_BLOCK_MAX_VSIZE_V12 * RKISP1_CIF_ISP_EXP_ROW_NUM_V12 + 1)
+#define RKISP1_CIF_ISP_EXP_MIN_VSIZE_V12 \
+ (RKISP1_CIF_ISP_EXP_BLOCK_MIN_VSIZE_V12 * RKISP1_CIF_ISP_EXP_ROW_NUM_V12 + 1)
+
+#define RKISP1_CIF_ISP_EXP_GET_MEAN_xy0_V12(x) ((x) & 0xFF)
+#define RKISP1_CIF_ISP_EXP_GET_MEAN_xy1_V12(x) (((x) >> 8) & 0xFF)
+#define RKISP1_CIF_ISP_EXP_GET_MEAN_xy2_V12(x) (((x) >> 16) & 0xFF)
+#define RKISP1_CIF_ISP_EXP_GET_MEAN_xy3_V12(x) (((x) >> 24) & 0xFF)
/* LSC: ISP_LSC_CTRL */
#define RKISP1_CIF_ISP_LSC_CTRL_ENA BIT(0)
#define RKISP1_CIF_ISP_LSC_SECT_SIZE_RESERVED 0xFC00FC00
-#define RKISP1_CIF_ISP_LSC_GRAD_RESERVED 0xF000F000
-#define RKISP1_CIF_ISP_LSC_SAMPLE_RESERVED 0xF000F000
-#define RKISP1_CIF_ISP_LSC_TABLE_DATA(v0, v1) \
+#define RKISP1_CIF_ISP_LSC_GRAD_RESERVED_V10 0xF000F000
+#define RKISP1_CIF_ISP_LSC_SAMPLE_RESERVED_V10 0xF000F000
+#define RKISP1_CIF_ISP_LSC_GRAD_RESERVED_V12 0xE000E000
+#define RKISP1_CIF_ISP_LSC_SAMPLE_RESERVED_V12 0xE000E000
+#define RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(v0, v1) \
(((v0) & 0xFFF) | (((v1) & 0xFFF) << 12))
+#define RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(v0, v1) \
+ (((v0) & 0x1FFF) | (((v1) & 0x1FFF) << 13))
#define RKISP1_CIF_ISP_LSC_SECT_SIZE(v0, v1) \
(((v0) & 0xFFF) | (((v1) & 0xFFF) << 16))
#define RKISP1_CIF_ISP_LSC_GRAD_SIZE(v0, v1) \
@@ -550,6 +648,10 @@
(1 << 15) | (1 << 11) | (1 << 7) | (1 << 3))
#define RKISP1_CIFISP_DEGAMMA_Y_RESERVED 0xFFFFF000
+/* GAMMA-OUT */
+#define RKISP1_CIF_ISP_GAMMA_VALUE_V12(x, y) \
+ (((x) & 0xFFF) << 16 | ((y) & 0xFFF) << 0)
+
/* AFM */
#define RKISP1_CIF_ISP_AFM_ENA BIT(0)
#define RKISP1_CIF_ISP_AFM_THRES_RESERVED 0xFFFF0000
@@ -560,6 +662,11 @@
#define RKISP1_CIF_ISP_AFM_WINDOW_Y_MIN 0x2
#define RKISP1_CIF_ISP_AFM_WINDOW_X(x) (((x) & 0x1FFF) << 16)
#define RKISP1_CIF_ISP_AFM_WINDOW_Y(x) ((x) & 0x1FFF)
+#define RKISP1_CIF_ISP_AFM_SET_SHIFT_a_V12(x, y) (((x) & 0x7) << 16 | ((y) & 0x7) << 0)
+#define RKISP1_CIF_ISP_AFM_SET_SHIFT_b_V12(x, y) (((x) & 0x7) << 20 | ((y) & 0x7) << 4)
+#define RKISP1_CIF_ISP_AFM_SET_SHIFT_c_V12(x, y) (((x) & 0x7) << 24 | ((y) & 0x7) << 8)
+#define RKISP1_CIF_ISP_AFM_GET_LUM_SHIFT_a_V12(x) (((x) & 0x70000) >> 16)
+#define RKISP1_CIF_ISP_AFM_GET_AFM_SHIFT_a_V12(x) ((x) & 0x7)
/* DPF */
#define RKISP1_CIF_ISP_DPF_MODE_EN BIT(0)
@@ -582,6 +689,7 @@
#define RKISP1_CIF_CTRL_BASE 0x00000000
#define RKISP1_CIF_CCL (RKISP1_CIF_CTRL_BASE + 0x00000000)
#define RKISP1_CIF_VI_ID (RKISP1_CIF_CTRL_BASE + 0x00000008)
+#define RKISP1_CIF_VI_ISP_CLK_CTRL_V12 (RKISP1_CIF_CTRL_BASE + 0x0000000C)
#define RKISP1_CIF_ICCL (RKISP1_CIF_CTRL_BASE + 0x00000010)
#define RKISP1_CIF_IRCL (RKISP1_CIF_CTRL_BASE + 0x00000014)
#define RKISP1_CIF_VI_DPCL (RKISP1_CIF_CTRL_BASE + 0x00000018)
@@ -667,18 +775,35 @@
#define RKISP1_CIF_ISP_GAMMA_B_Y14 (RKISP1_CIF_ISP_BASE + 0x000000E4)
#define RKISP1_CIF_ISP_GAMMA_B_Y15 (RKISP1_CIF_ISP_BASE + 0x000000E8)
#define RKISP1_CIF_ISP_GAMMA_B_Y16 (RKISP1_CIF_ISP_BASE + 0x000000EC)
-#define RKISP1_CIF_ISP_AWB_PROP (RKISP1_CIF_ISP_BASE + 0x00000110)
-#define RKISP1_CIF_ISP_AWB_WND_H_OFFS (RKISP1_CIF_ISP_BASE + 0x00000114)
-#define RKISP1_CIF_ISP_AWB_WND_V_OFFS (RKISP1_CIF_ISP_BASE + 0x00000118)
-#define RKISP1_CIF_ISP_AWB_WND_H_SIZE (RKISP1_CIF_ISP_BASE + 0x0000011C)
-#define RKISP1_CIF_ISP_AWB_WND_V_SIZE (RKISP1_CIF_ISP_BASE + 0x00000120)
-#define RKISP1_CIF_ISP_AWB_FRAMES (RKISP1_CIF_ISP_BASE + 0x00000124)
-#define RKISP1_CIF_ISP_AWB_REF (RKISP1_CIF_ISP_BASE + 0x00000128)
-#define RKISP1_CIF_ISP_AWB_THRESH (RKISP1_CIF_ISP_BASE + 0x0000012C)
-#define RKISP1_CIF_ISP_AWB_GAIN_G (RKISP1_CIF_ISP_BASE + 0x00000138)
-#define RKISP1_CIF_ISP_AWB_GAIN_RB (RKISP1_CIF_ISP_BASE + 0x0000013C)
-#define RKISP1_CIF_ISP_AWB_WHITE_CNT (RKISP1_CIF_ISP_BASE + 0x00000140)
-#define RKISP1_CIF_ISP_AWB_MEAN (RKISP1_CIF_ISP_BASE + 0x00000144)
+#define RKISP1_CIF_ISP_AWB_PROP_V10 (RKISP1_CIF_ISP_BASE + 0x00000110)
+#define RKISP1_CIF_ISP_AWB_WND_H_OFFS_V10 (RKISP1_CIF_ISP_BASE + 0x00000114)
+#define RKISP1_CIF_ISP_AWB_WND_V_OFFS_V10 (RKISP1_CIF_ISP_BASE + 0x00000118)
+#define RKISP1_CIF_ISP_AWB_WND_H_SIZE_V10 (RKISP1_CIF_ISP_BASE + 0x0000011C)
+#define RKISP1_CIF_ISP_AWB_WND_V_SIZE_V10 (RKISP1_CIF_ISP_BASE + 0x00000120)
+#define RKISP1_CIF_ISP_AWB_FRAMES_V10 (RKISP1_CIF_ISP_BASE + 0x00000124)
+#define RKISP1_CIF_ISP_AWB_REF_V10 (RKISP1_CIF_ISP_BASE + 0x00000128)
+#define RKISP1_CIF_ISP_AWB_THRESH_V10 (RKISP1_CIF_ISP_BASE + 0x0000012C)
+#define RKISP1_CIF_ISP_AWB_GAIN_G_V10 (RKISP1_CIF_ISP_BASE + 0x00000138)
+#define RKISP1_CIF_ISP_AWB_GAIN_RB_V10 (RKISP1_CIF_ISP_BASE + 0x0000013C)
+#define RKISP1_CIF_ISP_AWB_WHITE_CNT_V10 (RKISP1_CIF_ISP_BASE + 0x00000140)
+#define RKISP1_CIF_ISP_AWB_MEAN_V10 (RKISP1_CIF_ISP_BASE + 0x00000144)
+#define RKISP1_CIF_ISP_AWB_PROP_V12 (RKISP1_CIF_ISP_BASE + 0x00000110)
+#define RKISP1_CIF_ISP_AWB_SIZE_V12 (RKISP1_CIF_ISP_BASE + 0x00000114)
+#define RKISP1_CIF_ISP_AWB_OFFS_V12 (RKISP1_CIF_ISP_BASE + 0x00000118)
+#define RKISP1_CIF_ISP_AWB_REF_V12 (RKISP1_CIF_ISP_BASE + 0x0000011C)
+#define RKISP1_CIF_ISP_AWB_THRESH_V12 (RKISP1_CIF_ISP_BASE + 0x00000120)
+#define RKISP1_CIF_ISP_X_COOR12_V12 (RKISP1_CIF_ISP_BASE + 0x00000124)
+#define RKISP1_CIF_ISP_X_COOR34_V12 (RKISP1_CIF_ISP_BASE + 0x00000128)
+#define RKISP1_CIF_ISP_AWB_WHITE_CNT_V12 (RKISP1_CIF_ISP_BASE + 0x0000012C)
+#define RKISP1_CIF_ISP_AWB_MEAN_V12 (RKISP1_CIF_ISP_BASE + 0x00000130)
+#define RKISP1_CIF_ISP_DEGAIN_V12 (RKISP1_CIF_ISP_BASE + 0x00000134)
+#define RKISP1_CIF_ISP_AWB_GAIN_G_V12 (RKISP1_CIF_ISP_BASE + 0x00000138)
+#define RKISP1_CIF_ISP_AWB_GAIN_RB_V12 (RKISP1_CIF_ISP_BASE + 0x0000013C)
+#define RKISP1_CIF_ISP_REGION_LINE_V12 (RKISP1_CIF_ISP_BASE + 0x00000140)
+#define RKISP1_CIF_ISP_WP_CNT_REGION0_V12 (RKISP1_CIF_ISP_BASE + 0x00000160)
+#define RKISP1_CIF_ISP_WP_CNT_REGION1_V12 (RKISP1_CIF_ISP_BASE + 0x00000164)
+#define RKISP1_CIF_ISP_WP_CNT_REGION2_V12 (RKISP1_CIF_ISP_BASE + 0x00000168)
+#define RKISP1_CIF_ISP_WP_CNT_REGION3_V12 (RKISP1_CIF_ISP_BASE + 0x0000016C)
#define RKISP1_CIF_ISP_CC_COEFF_0 (RKISP1_CIF_ISP_BASE + 0x00000170)
#define RKISP1_CIF_ISP_CC_COEFF_1 (RKISP1_CIF_ISP_BASE + 0x00000174)
#define RKISP1_CIF_ISP_CC_COEFF_2 (RKISP1_CIF_ISP_BASE + 0x00000178)
@@ -712,30 +837,32 @@
#define RKISP1_CIF_ISP_CT_COEFF_6 (RKISP1_CIF_ISP_BASE + 0x000001E8)
#define RKISP1_CIF_ISP_CT_COEFF_7 (RKISP1_CIF_ISP_BASE + 0x000001EC)
#define RKISP1_CIF_ISP_CT_COEFF_8 (RKISP1_CIF_ISP_BASE + 0x000001F0)
-#define RKISP1_CIF_ISP_GAMMA_OUT_MODE (RKISP1_CIF_ISP_BASE + 0x000001F4)
-#define RKISP1_CIF_ISP_GAMMA_OUT_Y_0 (RKISP1_CIF_ISP_BASE + 0x000001F8)
-#define RKISP1_CIF_ISP_GAMMA_OUT_Y_1 (RKISP1_CIF_ISP_BASE + 0x000001FC)
-#define RKISP1_CIF_ISP_GAMMA_OUT_Y_2 (RKISP1_CIF_ISP_BASE + 0x00000200)
-#define RKISP1_CIF_ISP_GAMMA_OUT_Y_3 (RKISP1_CIF_ISP_BASE + 0x00000204)
-#define RKISP1_CIF_ISP_GAMMA_OUT_Y_4 (RKISP1_CIF_ISP_BASE + 0x00000208)
-#define RKISP1_CIF_ISP_GAMMA_OUT_Y_5 (RKISP1_CIF_ISP_BASE + 0x0000020C)
-#define RKISP1_CIF_ISP_GAMMA_OUT_Y_6 (RKISP1_CIF_ISP_BASE + 0x00000210)
-#define RKISP1_CIF_ISP_GAMMA_OUT_Y_7 (RKISP1_CIF_ISP_BASE + 0x00000214)
-#define RKISP1_CIF_ISP_GAMMA_OUT_Y_8 (RKISP1_CIF_ISP_BASE + 0x00000218)
-#define RKISP1_CIF_ISP_GAMMA_OUT_Y_9 (RKISP1_CIF_ISP_BASE + 0x0000021C)
-#define RKISP1_CIF_ISP_GAMMA_OUT_Y_10 (RKISP1_CIF_ISP_BASE + 0x00000220)
-#define RKISP1_CIF_ISP_GAMMA_OUT_Y_11 (RKISP1_CIF_ISP_BASE + 0x00000224)
-#define RKISP1_CIF_ISP_GAMMA_OUT_Y_12 (RKISP1_CIF_ISP_BASE + 0x00000228)
-#define RKISP1_CIF_ISP_GAMMA_OUT_Y_13 (RKISP1_CIF_ISP_BASE + 0x0000022C)
-#define RKISP1_CIF_ISP_GAMMA_OUT_Y_14 (RKISP1_CIF_ISP_BASE + 0x00000230)
-#define RKISP1_CIF_ISP_GAMMA_OUT_Y_15 (RKISP1_CIF_ISP_BASE + 0x00000234)
-#define RKISP1_CIF_ISP_GAMMA_OUT_Y_16 (RKISP1_CIF_ISP_BASE + 0x00000238)
+#define RKISP1_CIF_ISP_GAMMA_OUT_MODE_V10 (RKISP1_CIF_ISP_BASE + 0x000001F4)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_0_V10 (RKISP1_CIF_ISP_BASE + 0x000001F8)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_1_V10 (RKISP1_CIF_ISP_BASE + 0x000001FC)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_2_V10 (RKISP1_CIF_ISP_BASE + 0x00000200)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_3_V10 (RKISP1_CIF_ISP_BASE + 0x00000204)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_4_V10 (RKISP1_CIF_ISP_BASE + 0x00000208)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_5_V10 (RKISP1_CIF_ISP_BASE + 0x0000020C)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_6_V10 (RKISP1_CIF_ISP_BASE + 0x00000210)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_7_V10 (RKISP1_CIF_ISP_BASE + 0x00000214)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_8_V10 (RKISP1_CIF_ISP_BASE + 0x00000218)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_9_V10 (RKISP1_CIF_ISP_BASE + 0x0000021C)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_10_V10 (RKISP1_CIF_ISP_BASE + 0x00000220)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_11_V10 (RKISP1_CIF_ISP_BASE + 0x00000224)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_12_V10 (RKISP1_CIF_ISP_BASE + 0x00000228)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_13_V10 (RKISP1_CIF_ISP_BASE + 0x0000022C)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_14_V10 (RKISP1_CIF_ISP_BASE + 0x00000230)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_15_V10 (RKISP1_CIF_ISP_BASE + 0x00000234)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_16_V10 (RKISP1_CIF_ISP_BASE + 0x00000238)
#define RKISP1_CIF_ISP_ERR (RKISP1_CIF_ISP_BASE + 0x0000023C)
#define RKISP1_CIF_ISP_ERR_CLR (RKISP1_CIF_ISP_BASE + 0x00000240)
#define RKISP1_CIF_ISP_FRAME_COUNT (RKISP1_CIF_ISP_BASE + 0x00000244)
#define RKISP1_CIF_ISP_CT_OFFSET_R (RKISP1_CIF_ISP_BASE + 0x00000248)
#define RKISP1_CIF_ISP_CT_OFFSET_G (RKISP1_CIF_ISP_BASE + 0x0000024C)
#define RKISP1_CIF_ISP_CT_OFFSET_B (RKISP1_CIF_ISP_BASE + 0x00000250)
+#define RKISP1_CIF_ISP_GAMMA_OUT_MODE_V12 (RKISP1_CIF_ISP_BASE + 0x00000300)
+#define RKISP1_CIF_ISP_GAMMA_OUT_Y_0_V12 (RKISP1_CIF_ISP_BASE + 0x00000304)
#define RKISP1_CIF_ISP_FLASH_BASE 0x00000660
#define RKISP1_CIF_ISP_FLASH_CMD (RKISP1_CIF_ISP_FLASH_BASE + 0x00000000)
@@ -1005,36 +1132,35 @@
#define RKISP1_CIF_ISP_IS_H_SIZE_SHD (RKISP1_CIF_ISP_IS_BASE + 0x0000002C)
#define RKISP1_CIF_ISP_IS_V_SIZE_SHD (RKISP1_CIF_ISP_IS_BASE + 0x00000030)
-#define RKISP1_CIF_ISP_HIST_BASE 0x00002400
-
-#define RKISP1_CIF_ISP_HIST_PROP (RKISP1_CIF_ISP_HIST_BASE + 0x00000000)
-#define RKISP1_CIF_ISP_HIST_H_OFFS (RKISP1_CIF_ISP_HIST_BASE + 0x00000004)
-#define RKISP1_CIF_ISP_HIST_V_OFFS (RKISP1_CIF_ISP_HIST_BASE + 0x00000008)
-#define RKISP1_CIF_ISP_HIST_H_SIZE (RKISP1_CIF_ISP_HIST_BASE + 0x0000000C)
-#define RKISP1_CIF_ISP_HIST_V_SIZE (RKISP1_CIF_ISP_HIST_BASE + 0x00000010)
-#define RKISP1_CIF_ISP_HIST_BIN_0 (RKISP1_CIF_ISP_HIST_BASE + 0x00000014)
-#define RKISP1_CIF_ISP_HIST_BIN_1 (RKISP1_CIF_ISP_HIST_BASE + 0x00000018)
-#define RKISP1_CIF_ISP_HIST_BIN_2 (RKISP1_CIF_ISP_HIST_BASE + 0x0000001C)
-#define RKISP1_CIF_ISP_HIST_BIN_3 (RKISP1_CIF_ISP_HIST_BASE + 0x00000020)
-#define RKISP1_CIF_ISP_HIST_BIN_4 (RKISP1_CIF_ISP_HIST_BASE + 0x00000024)
-#define RKISP1_CIF_ISP_HIST_BIN_5 (RKISP1_CIF_ISP_HIST_BASE + 0x00000028)
-#define RKISP1_CIF_ISP_HIST_BIN_6 (RKISP1_CIF_ISP_HIST_BASE + 0x0000002C)
-#define RKISP1_CIF_ISP_HIST_BIN_7 (RKISP1_CIF_ISP_HIST_BASE + 0x00000030)
-#define RKISP1_CIF_ISP_HIST_BIN_8 (RKISP1_CIF_ISP_HIST_BASE + 0x00000034)
-#define RKISP1_CIF_ISP_HIST_BIN_9 (RKISP1_CIF_ISP_HIST_BASE + 0x00000038)
-#define RKISP1_CIF_ISP_HIST_BIN_10 (RKISP1_CIF_ISP_HIST_BASE + 0x0000003C)
-#define RKISP1_CIF_ISP_HIST_BIN_11 (RKISP1_CIF_ISP_HIST_BASE + 0x00000040)
-#define RKISP1_CIF_ISP_HIST_BIN_12 (RKISP1_CIF_ISP_HIST_BASE + 0x00000044)
-#define RKISP1_CIF_ISP_HIST_BIN_13 (RKISP1_CIF_ISP_HIST_BASE + 0x00000048)
-#define RKISP1_CIF_ISP_HIST_BIN_14 (RKISP1_CIF_ISP_HIST_BASE + 0x0000004C)
-#define RKISP1_CIF_ISP_HIST_BIN_15 (RKISP1_CIF_ISP_HIST_BASE + 0x00000050)
-#define RKISP1_CIF_ISP_HIST_WEIGHT_00TO30 (RKISP1_CIF_ISP_HIST_BASE + 0x00000054)
-#define RKISP1_CIF_ISP_HIST_WEIGHT_40TO21 (RKISP1_CIF_ISP_HIST_BASE + 0x00000058)
-#define RKISP1_CIF_ISP_HIST_WEIGHT_31TO12 (RKISP1_CIF_ISP_HIST_BASE + 0x0000005C)
-#define RKISP1_CIF_ISP_HIST_WEIGHT_22TO03 (RKISP1_CIF_ISP_HIST_BASE + 0x00000060)
-#define RKISP1_CIF_ISP_HIST_WEIGHT_13TO43 (RKISP1_CIF_ISP_HIST_BASE + 0x00000064)
-#define RKISP1_CIF_ISP_HIST_WEIGHT_04TO34 (RKISP1_CIF_ISP_HIST_BASE + 0x00000068)
-#define RKISP1_CIF_ISP_HIST_WEIGHT_44 (RKISP1_CIF_ISP_HIST_BASE + 0x0000006C)
+#define RKISP1_CIF_ISP_HIST_BASE_V10 0x00002400
+#define RKISP1_CIF_ISP_HIST_PROP_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000000)
+#define RKISP1_CIF_ISP_HIST_H_OFFS_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000004)
+#define RKISP1_CIF_ISP_HIST_V_OFFS_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000008)
+#define RKISP1_CIF_ISP_HIST_H_SIZE_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x0000000C)
+#define RKISP1_CIF_ISP_HIST_V_SIZE_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000010)
+#define RKISP1_CIF_ISP_HIST_BIN_0_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000014)
+#define RKISP1_CIF_ISP_HIST_BIN_1_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000018)
+#define RKISP1_CIF_ISP_HIST_BIN_2_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x0000001C)
+#define RKISP1_CIF_ISP_HIST_BIN_3_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000020)
+#define RKISP1_CIF_ISP_HIST_BIN_4_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000024)
+#define RKISP1_CIF_ISP_HIST_BIN_5_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000028)
+#define RKISP1_CIF_ISP_HIST_BIN_6_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x0000002C)
+#define RKISP1_CIF_ISP_HIST_BIN_7_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000030)
+#define RKISP1_CIF_ISP_HIST_BIN_8_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000034)
+#define RKISP1_CIF_ISP_HIST_BIN_9_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000038)
+#define RKISP1_CIF_ISP_HIST_BIN_10_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x0000003C)
+#define RKISP1_CIF_ISP_HIST_BIN_11_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000040)
+#define RKISP1_CIF_ISP_HIST_BIN_12_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000044)
+#define RKISP1_CIF_ISP_HIST_BIN_13_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000048)
+#define RKISP1_CIF_ISP_HIST_BIN_14_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x0000004C)
+#define RKISP1_CIF_ISP_HIST_BIN_15_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000050)
+#define RKISP1_CIF_ISP_HIST_WEIGHT_00TO30_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000054)
+#define RKISP1_CIF_ISP_HIST_WEIGHT_40TO21_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000058)
+#define RKISP1_CIF_ISP_HIST_WEIGHT_31TO12_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x0000005C)
+#define RKISP1_CIF_ISP_HIST_WEIGHT_22TO03_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000060)
+#define RKISP1_CIF_ISP_HIST_WEIGHT_13TO43_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000064)
+#define RKISP1_CIF_ISP_HIST_WEIGHT_04TO34_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x00000068)
+#define RKISP1_CIF_ISP_HIST_WEIGHT_44_V10 (RKISP1_CIF_ISP_HIST_BASE_V10 + 0x0000006C)
#define RKISP1_CIF_ISP_FILT_BASE 0x00002500
#define RKISP1_CIF_ISP_FILT_MODE (RKISP1_CIF_ISP_FILT_BASE + 0x00000000)
@@ -1060,35 +1186,38 @@
#define RKISP1_CIF_ISP_EXP_BASE 0x00002600
#define RKISP1_CIF_ISP_EXP_CTRL (RKISP1_CIF_ISP_EXP_BASE + 0x00000000)
-#define RKISP1_CIF_ISP_EXP_H_OFFSET (RKISP1_CIF_ISP_EXP_BASE + 0x00000004)
-#define RKISP1_CIF_ISP_EXP_V_OFFSET (RKISP1_CIF_ISP_EXP_BASE + 0x00000008)
-#define RKISP1_CIF_ISP_EXP_H_SIZE (RKISP1_CIF_ISP_EXP_BASE + 0x0000000C)
-#define RKISP1_CIF_ISP_EXP_V_SIZE (RKISP1_CIF_ISP_EXP_BASE + 0x00000010)
-#define RKISP1_CIF_ISP_EXP_MEAN_00 (RKISP1_CIF_ISP_EXP_BASE + 0x00000014)
-#define RKISP1_CIF_ISP_EXP_MEAN_10 (RKISP1_CIF_ISP_EXP_BASE + 0x00000018)
-#define RKISP1_CIF_ISP_EXP_MEAN_20 (RKISP1_CIF_ISP_EXP_BASE + 0x0000001c)
-#define RKISP1_CIF_ISP_EXP_MEAN_30 (RKISP1_CIF_ISP_EXP_BASE + 0x00000020)
-#define RKISP1_CIF_ISP_EXP_MEAN_40 (RKISP1_CIF_ISP_EXP_BASE + 0x00000024)
-#define RKISP1_CIF_ISP_EXP_MEAN_01 (RKISP1_CIF_ISP_EXP_BASE + 0x00000028)
-#define RKISP1_CIF_ISP_EXP_MEAN_11 (RKISP1_CIF_ISP_EXP_BASE + 0x0000002c)
-#define RKISP1_CIF_ISP_EXP_MEAN_21 (RKISP1_CIF_ISP_EXP_BASE + 0x00000030)
-#define RKISP1_CIF_ISP_EXP_MEAN_31 (RKISP1_CIF_ISP_EXP_BASE + 0x00000034)
-#define RKISP1_CIF_ISP_EXP_MEAN_41 (RKISP1_CIF_ISP_EXP_BASE + 0x00000038)
-#define RKISP1_CIF_ISP_EXP_MEAN_02 (RKISP1_CIF_ISP_EXP_BASE + 0x0000003c)
-#define RKISP1_CIF_ISP_EXP_MEAN_12 (RKISP1_CIF_ISP_EXP_BASE + 0x00000040)
-#define RKISP1_CIF_ISP_EXP_MEAN_22 (RKISP1_CIF_ISP_EXP_BASE + 0x00000044)
-#define RKISP1_CIF_ISP_EXP_MEAN_32 (RKISP1_CIF_ISP_EXP_BASE + 0x00000048)
-#define RKISP1_CIF_ISP_EXP_MEAN_42 (RKISP1_CIF_ISP_EXP_BASE + 0x0000004c)
-#define RKISP1_CIF_ISP_EXP_MEAN_03 (RKISP1_CIF_ISP_EXP_BASE + 0x00000050)
-#define RKISP1_CIF_ISP_EXP_MEAN_13 (RKISP1_CIF_ISP_EXP_BASE + 0x00000054)
-#define RKISP1_CIF_ISP_EXP_MEAN_23 (RKISP1_CIF_ISP_EXP_BASE + 0x00000058)
-#define RKISP1_CIF_ISP_EXP_MEAN_33 (RKISP1_CIF_ISP_EXP_BASE + 0x0000005c)
-#define RKISP1_CIF_ISP_EXP_MEAN_43 (RKISP1_CIF_ISP_EXP_BASE + 0x00000060)
-#define RKISP1_CIF_ISP_EXP_MEAN_04 (RKISP1_CIF_ISP_EXP_BASE + 0x00000064)
-#define RKISP1_CIF_ISP_EXP_MEAN_14 (RKISP1_CIF_ISP_EXP_BASE + 0x00000068)
-#define RKISP1_CIF_ISP_EXP_MEAN_24 (RKISP1_CIF_ISP_EXP_BASE + 0x0000006c)
-#define RKISP1_CIF_ISP_EXP_MEAN_34 (RKISP1_CIF_ISP_EXP_BASE + 0x00000070)
-#define RKISP1_CIF_ISP_EXP_MEAN_44 (RKISP1_CIF_ISP_EXP_BASE + 0x00000074)
+#define RKISP1_CIF_ISP_EXP_H_OFFSET_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x00000004)
+#define RKISP1_CIF_ISP_EXP_V_OFFSET_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x00000008)
+#define RKISP1_CIF_ISP_EXP_H_SIZE_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x0000000C)
+#define RKISP1_CIF_ISP_EXP_V_SIZE_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x00000010)
+#define RKISP1_CIF_ISP_EXP_MEAN_00_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x00000014)
+#define RKISP1_CIF_ISP_EXP_MEAN_10_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x00000018)
+#define RKISP1_CIF_ISP_EXP_MEAN_20_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x0000001c)
+#define RKISP1_CIF_ISP_EXP_MEAN_30_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x00000020)
+#define RKISP1_CIF_ISP_EXP_MEAN_40_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x00000024)
+#define RKISP1_CIF_ISP_EXP_MEAN_01_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x00000028)
+#define RKISP1_CIF_ISP_EXP_MEAN_11_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x0000002c)
+#define RKISP1_CIF_ISP_EXP_MEAN_21_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x00000030)
+#define RKISP1_CIF_ISP_EXP_MEAN_31_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x00000034)
+#define RKISP1_CIF_ISP_EXP_MEAN_41_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x00000038)
+#define RKISP1_CIF_ISP_EXP_MEAN_02_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x0000003c)
+#define RKISP1_CIF_ISP_EXP_MEAN_12_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x00000040)
+#define RKISP1_CIF_ISP_EXP_MEAN_22_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x00000044)
+#define RKISP1_CIF_ISP_EXP_MEAN_32_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x00000048)
+#define RKISP1_CIF_ISP_EXP_MEAN_42_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x0000004c)
+#define RKISP1_CIF_ISP_EXP_MEAN_03_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x00000050)
+#define RKISP1_CIF_ISP_EXP_MEAN_13_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x00000054)
+#define RKISP1_CIF_ISP_EXP_MEAN_23_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x00000058)
+#define RKISP1_CIF_ISP_EXP_MEAN_33_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x0000005c)
+#define RKISP1_CIF_ISP_EXP_MEAN_43_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x00000060)
+#define RKISP1_CIF_ISP_EXP_MEAN_04_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x00000064)
+#define RKISP1_CIF_ISP_EXP_MEAN_14_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x00000068)
+#define RKISP1_CIF_ISP_EXP_MEAN_24_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x0000006c)
+#define RKISP1_CIF_ISP_EXP_MEAN_34_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x00000070)
+#define RKISP1_CIF_ISP_EXP_MEAN_44_V10 (RKISP1_CIF_ISP_EXP_BASE + 0x00000074)
+#define RKISP1_CIF_ISP_EXP_SIZE_V12 (RKISP1_CIF_ISP_EXP_BASE + 0x00000004)
+#define RKISP1_CIF_ISP_EXP_OFFS_V12 (RKISP1_CIF_ISP_EXP_BASE + 0x00000008)
+#define RKISP1_CIF_ISP_EXP_MEAN_V12 (RKISP1_CIF_ISP_EXP_BASE + 0x0000000c)
#define RKISP1_CIF_ISP_BLS_BASE 0x00002700
#define RKISP1_CIF_ISP_BLS_CTRL (RKISP1_CIF_ISP_BLS_BASE + 0x00000000)
@@ -1249,6 +1378,16 @@
#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_31_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x0000012C)
#define RKISP1_CIF_ISP_WDR_TONECURVE_YM_32_SHD (RKISP1_CIF_ISP_WDR_BASE + 0x00000130)
+#define RKISP1_CIF_ISP_HIST_BASE_V12 0x00002C00
+#define RKISP1_CIF_ISP_HIST_CTRL_V12 (RKISP1_CIF_ISP_HIST_BASE_V12 + 0x00000000)
+#define RKISP1_CIF_ISP_HIST_SIZE_V12 (RKISP1_CIF_ISP_HIST_BASE_V12 + 0x00000004)
+#define RKISP1_CIF_ISP_HIST_OFFS_V12 (RKISP1_CIF_ISP_HIST_BASE_V12 + 0x00000008)
+#define RKISP1_CIF_ISP_HIST_DBG1_V12 (RKISP1_CIF_ISP_HIST_BASE_V12 + 0x0000000C)
+#define RKISP1_CIF_ISP_HIST_DBG2_V12 (RKISP1_CIF_ISP_HIST_BASE_V12 + 0x0000001C)
+#define RKISP1_CIF_ISP_HIST_DBG3_V12 (RKISP1_CIF_ISP_HIST_BASE_V12 + 0x0000002C)
+#define RKISP1_CIF_ISP_HIST_WEIGHT_V12 (RKISP1_CIF_ISP_HIST_BASE_V12 + 0x0000003C)
+#define RKISP1_CIF_ISP_HIST_BIN_V12 (RKISP1_CIF_ISP_HIST_BASE_V12 + 0x00000120)
+
#define RKISP1_CIF_ISP_VSM_BASE 0x00002F00
#define RKISP1_CIF_ISP_VSM_MODE (RKISP1_CIF_ISP_VSM_BASE + 0x00000000)
#define RKISP1_CIF_ISP_VSM_H_OFFS (RKISP1_CIF_ISP_VSM_BASE + 0x00000004)
@@ -1260,4 +1399,7 @@
#define RKISP1_CIF_ISP_VSM_DELTA_H (RKISP1_CIF_ISP_VSM_BASE + 0x0000001C)
#define RKISP1_CIF_ISP_VSM_DELTA_V (RKISP1_CIF_ISP_VSM_BASE + 0x00000020)
+#define RKISP1_CIF_ISP_CSI0_BASE 0x00007000
+#define RKISP1_CIF_ISP_CSI0_CTRL0 (RKISP1_CIF_ISP_CSI0_BASE + 0x00000000)
+
#endif /* _RKISP1_REGS_H */
diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c
index e88bdd612d71..be5777c65bfb 100644
--- a/drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c
+++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c
@@ -174,18 +174,18 @@ rkisp1_stats_init_vb2_queue(struct vb2_queue *q, struct rkisp1_stats *stats)
return vb2_queue_init(q);
}
-static void rkisp1_stats_get_awb_meas(struct rkisp1_stats *stats,
- struct rkisp1_stat_buffer *pbuf)
+static void rkisp1_stats_get_awb_meas_v10(struct rkisp1_stats *stats,
+ struct rkisp1_stat_buffer *pbuf)
{
/* Protect against concurrent access from ISR? */
struct rkisp1_device *rkisp1 = stats->rkisp1;
u32 reg_val;
pbuf->meas_type |= RKISP1_CIF_ISP_STAT_AWB;
- reg_val = rkisp1_read(rkisp1, RKISP1_CIF_ISP_AWB_WHITE_CNT);
+ reg_val = rkisp1_read(rkisp1, RKISP1_CIF_ISP_AWB_WHITE_CNT_V10);
pbuf->params.awb.awb_mean[0].cnt =
RKISP1_CIF_ISP_AWB_GET_PIXEL_CNT(reg_val);
- reg_val = rkisp1_read(rkisp1, RKISP1_CIF_ISP_AWB_MEAN);
+ reg_val = rkisp1_read(rkisp1, RKISP1_CIF_ISP_AWB_MEAN_V10);
pbuf->params.awb.awb_mean[0].mean_cr_or_r =
RKISP1_CIF_ISP_AWB_GET_MEAN_CR_R(reg_val);
@@ -195,8 +195,29 @@ static void rkisp1_stats_get_awb_meas(struct rkisp1_stats *stats,
RKISP1_CIF_ISP_AWB_GET_MEAN_Y_G(reg_val);
}
-static void rkisp1_stats_get_aec_meas(struct rkisp1_stats *stats,
- struct rkisp1_stat_buffer *pbuf)
+static void rkisp1_stats_get_awb_meas_v12(struct rkisp1_stats *stats,
+ struct rkisp1_stat_buffer *pbuf)
+{
+ /* Protect against concurrent access from ISR? */
+ struct rkisp1_device *rkisp1 = stats->rkisp1;
+ u32 reg_val;
+
+ pbuf->meas_type |= RKISP1_CIF_ISP_STAT_AWB;
+ reg_val = rkisp1_read(rkisp1, RKISP1_CIF_ISP_AWB_WHITE_CNT_V12);
+ pbuf->params.awb.awb_mean[0].cnt =
+ RKISP1_CIF_ISP_AWB_GET_PIXEL_CNT(reg_val);
+ reg_val = rkisp1_read(rkisp1, RKISP1_CIF_ISP_AWB_MEAN_V12);
+
+ pbuf->params.awb.awb_mean[0].mean_cr_or_r =
+ RKISP1_CIF_ISP_AWB_GET_MEAN_CR_R(reg_val);
+ pbuf->params.awb.awb_mean[0].mean_cb_or_b =
+ RKISP1_CIF_ISP_AWB_GET_MEAN_CB_B(reg_val);
+ pbuf->params.awb.awb_mean[0].mean_y_or_g =
+ RKISP1_CIF_ISP_AWB_GET_MEAN_Y_G(reg_val);
+}
+
+static void rkisp1_stats_get_aec_meas_v10(struct rkisp1_stats *stats,
+ struct rkisp1_stat_buffer *pbuf)
{
struct rkisp1_device *rkisp1 = stats->rkisp1;
unsigned int i;
@@ -205,7 +226,31 @@ static void rkisp1_stats_get_aec_meas(struct rkisp1_stats *stats,
for (i = 0; i < RKISP1_CIF_ISP_AE_MEAN_MAX_V10; i++)
pbuf->params.ae.exp_mean[i] =
(u8)rkisp1_read(rkisp1,
- RKISP1_CIF_ISP_EXP_MEAN_00 + i * 4);
+ RKISP1_CIF_ISP_EXP_MEAN_00_V10 + i * 4);
+}
+
+static void rkisp1_stats_get_aec_meas_v12(struct rkisp1_stats *stats,
+ struct rkisp1_stat_buffer *pbuf)
+{
+ struct rkisp1_device *rkisp1 = stats->rkisp1;
+ u32 value;
+ int i;
+
+ pbuf->meas_type |= RKISP1_CIF_ISP_STAT_AUTOEXP;
+ for (i = 0; i < RKISP1_CIF_ISP_AE_MEAN_MAX_V12 / 4; i++) {
+ value = rkisp1_read(rkisp1, RKISP1_CIF_ISP_EXP_MEAN_V12 + i * 4);
+ pbuf->params.ae.exp_mean[4 * i + 0] =
+ RKISP1_CIF_ISP_EXP_GET_MEAN_xy0_V12(value);
+ pbuf->params.ae.exp_mean[4 * i + 1] =
+ RKISP1_CIF_ISP_EXP_GET_MEAN_xy1_V12(value);
+ pbuf->params.ae.exp_mean[4 * i + 2] =
+ RKISP1_CIF_ISP_EXP_GET_MEAN_xy2_V12(value);
+ pbuf->params.ae.exp_mean[4 * i + 3] =
+ RKISP1_CIF_ISP_EXP_GET_MEAN_xy3_V12(value);
+ }
+
+ value = rkisp1_read(rkisp1, RKISP1_CIF_ISP_EXP_MEAN_V12 + i * 4);
+ pbuf->params.ae.exp_mean[4 * i + 0] = RKISP1_CIF_ISP_EXP_GET_MEAN_xy0_V12(value);
}
static void rkisp1_stats_get_afc_meas(struct rkisp1_stats *stats,
@@ -225,17 +270,34 @@ static void rkisp1_stats_get_afc_meas(struct rkisp1_stats *stats,
af->window[2].lum = rkisp1_read(rkisp1, RKISP1_CIF_ISP_AFM_LUM_C);
}
-static void rkisp1_stats_get_hst_meas(struct rkisp1_stats *stats,
- struct rkisp1_stat_buffer *pbuf)
+static void rkisp1_stats_get_hst_meas_v10(struct rkisp1_stats *stats,
+ struct rkisp1_stat_buffer *pbuf)
{
struct rkisp1_device *rkisp1 = stats->rkisp1;
unsigned int i;
pbuf->meas_type |= RKISP1_CIF_ISP_STAT_HIST;
for (i = 0; i < RKISP1_CIF_ISP_HIST_BIN_N_MAX_V10; i++) {
- u32 reg_val = rkisp1_read(rkisp1, RKISP1_CIF_ISP_HIST_BIN_0 + i * 4);
+ u32 reg_val = rkisp1_read(rkisp1, RKISP1_CIF_ISP_HIST_BIN_0_V10 + i * 4);
+
+ pbuf->params.hist.hist_bins[i] = RKISP1_CIF_ISP_HIST_GET_BIN_V10(reg_val);
+ }
+}
- pbuf->params.hist.hist_bins[i] = RKISP1_CIF_ISP_HIST_GET_BIN(reg_val);
+static void rkisp1_stats_get_hst_meas_v12(struct rkisp1_stats *stats,
+ struct rkisp1_stat_buffer *pbuf)
+{
+ struct rkisp1_device *rkisp1 = stats->rkisp1;
+ u32 value;
+ int i;
+
+ pbuf->meas_type |= RKISP1_CIF_ISP_STAT_HIST;
+ for (i = 0; i < RKISP1_CIF_ISP_HIST_BIN_N_MAX_V12 / 2; i++) {
+ value = rkisp1_read(rkisp1, RKISP1_CIF_ISP_HIST_BIN_V12 + i * 4);
+ pbuf->params.hist.hist_bins[2 * i] =
+ RKISP1_CIF_ISP_HIST_GET_BIN0_V12(value);
+ pbuf->params.hist.hist_bins[2 * i + 1] =
+ RKISP1_CIF_ISP_HIST_GET_BIN1_V12(value);
}
}
@@ -286,6 +348,18 @@ static void rkisp1_stats_get_bls_meas(struct rkisp1_stats *stats,
}
}
+static const struct rkisp1_stats_ops rkisp1_v10_stats_ops = {
+ .get_awb_meas = rkisp1_stats_get_awb_meas_v10,
+ .get_aec_meas = rkisp1_stats_get_aec_meas_v10,
+ .get_hst_meas = rkisp1_stats_get_hst_meas_v10,
+};
+
+static struct rkisp1_stats_ops rkisp1_v12_stats_ops = {
+ .get_awb_meas = rkisp1_stats_get_awb_meas_v12,
+ .get_aec_meas = rkisp1_stats_get_aec_meas_v12,
+ .get_hst_meas = rkisp1_stats_get_hst_meas_v12,
+};
+
static void
rkisp1_stats_send_measurement(struct rkisp1_stats *stats, u32 isp_ris)
{
@@ -307,18 +381,18 @@ rkisp1_stats_send_measurement(struct rkisp1_stats *stats, u32 isp_ris)
cur_stat_buf = (struct rkisp1_stat_buffer *)
vb2_plane_vaddr(&cur_buf->vb.vb2_buf, 0);
if (isp_ris & RKISP1_CIF_ISP_AWB_DONE)
- rkisp1_stats_get_awb_meas(stats, cur_stat_buf);
+ stats->ops->get_awb_meas(stats, cur_stat_buf);
if (isp_ris & RKISP1_CIF_ISP_AFM_FIN)
rkisp1_stats_get_afc_meas(stats, cur_stat_buf);
if (isp_ris & RKISP1_CIF_ISP_EXP_END) {
- rkisp1_stats_get_aec_meas(stats, cur_stat_buf);
+ stats->ops->get_aec_meas(stats, cur_stat_buf);
rkisp1_stats_get_bls_meas(stats, cur_stat_buf);
}
if (isp_ris & RKISP1_CIF_ISP_HIST_MEASURE_RDY)
- rkisp1_stats_get_hst_meas(stats, cur_stat_buf);
+ stats->ops->get_hst_meas(stats, cur_stat_buf);
vb2_set_plane_payload(&cur_buf->vb.vb2_buf, 0,
sizeof(struct rkisp1_stat_buffer));
@@ -352,6 +426,11 @@ static void rkisp1_init_stats(struct rkisp1_stats *stats)
V4L2_META_FMT_RK_ISP1_STAT_3A;
stats->vdev_fmt.fmt.meta.buffersize =
sizeof(struct rkisp1_stat_buffer);
+
+ if (stats->rkisp1->media_dev.hw_revision == RKISP1_V12)
+ stats->ops = &rkisp1_v12_stats_ops;
+ else
+ stats->ops = &rkisp1_v10_stats_ops;
}
int rkisp1_stats_register(struct rkisp1_device *rkisp1)
diff --git a/drivers/media/platform/s3c-camif/camif-core.c b/drivers/media/platform/s3c-camif/camif-core.c
index e1d51fd3e700..32892ab359ee 100644
--- a/drivers/media/platform/s3c-camif/camif-core.c
+++ b/drivers/media/platform/s3c-camif/camif-core.c
@@ -23,7 +23,6 @@
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/types.h>
-#include <linux/version.h>
#include <media/media-device.h>
#include <media/v4l2-ctrls.h>
@@ -402,7 +401,6 @@ static int s3c_camif_probe(struct platform_device *pdev)
struct s3c_camif_plat_data *pdata = dev->platform_data;
struct s3c_camif_drvdata *drvdata;
struct camif_dev *camif;
- struct resource *mres;
int ret = 0;
camif = devm_kzalloc(dev, sizeof(*camif), GFP_KERNEL);
@@ -423,9 +421,7 @@ static int s3c_camif_probe(struct platform_device *pdev)
drvdata = (void *)platform_get_device_id(pdev)->driver_data;
camif->variant = drvdata->variant;
- mres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- camif->io_base = devm_ioremap_resource(dev, mres);
+ camif->io_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(camif->io_base))
return PTR_ERR(camif->io_base);
diff --git a/drivers/media/platform/s5p-g2d/g2d.c b/drivers/media/platform/s5p-g2d/g2d.c
index 1cb5eaabf340..fa0bb31bd2b9 100644
--- a/drivers/media/platform/s5p-g2d/g2d.c
+++ b/drivers/media/platform/s5p-g2d/g2d.c
@@ -635,9 +635,7 @@ static int g2d_probe(struct platform_device *pdev)
mutex_init(&dev->mutex);
atomic_set(&dev->num_inst, 0);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- dev->regs = devm_ioremap_resource(&pdev->dev, res);
+ dev->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dev->regs))
return PTR_ERR(dev->regs);
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c
index 7d0ab19c38bb..ebdfd24e9cd5 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-core.c
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c
@@ -2850,7 +2850,6 @@ static void *jpeg_get_drv_data(struct device *dev);
static int s5p_jpeg_probe(struct platform_device *pdev)
{
struct s5p_jpeg *jpeg;
- struct resource *res;
int i, ret;
/* JPEG IP abstraction struct */
@@ -2867,9 +2866,7 @@ static int s5p_jpeg_probe(struct platform_device *pdev)
jpeg->dev = &pdev->dev;
/* memory-mapped registers */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- jpeg->regs = devm_ioremap_resource(&pdev->dev, res);
+ jpeg->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(jpeg->regs))
return PTR_ERR(jpeg->regs);
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
index eba2b9f040df..fc85e4e2d020 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
@@ -1283,14 +1283,17 @@ static int s5p_mfc_probe(struct platform_device *pdev)
spin_lock_init(&dev->condlock);
dev->plat_dev = pdev;
if (!dev->plat_dev) {
- dev_err(&pdev->dev, "No platform data specified\n");
+ mfc_err("No platform data specified\n");
return -ENODEV;
}
dev->variant = of_device_get_match_data(&pdev->dev);
+ if (!dev->variant) {
+ dev_err(&pdev->dev, "Failed to get device MFC hardware variant information\n");
+ return -ENOENT;
+ }
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dev->regs_base = devm_ioremap_resource(&pdev->dev, res);
+ dev->regs_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dev->regs_base))
return PTR_ERR(dev->regs_base);
diff --git a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
index 6413cd279125..7d467f2ba072 100644
--- a/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
+++ b/drivers/media/platform/sti/bdisp/bdisp-v4l2.c
@@ -1315,8 +1315,7 @@ static int bdisp_probe(struct platform_device *pdev)
mutex_init(&bdisp->lock);
/* get resources */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- bdisp->regs = devm_ioremap_resource(dev, res);
+ bdisp->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(bdisp->regs)) {
ret = PTR_ERR(bdisp->regs);
goto err_wq;
diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
index 338b205ae3a7..02dc78bd7fab 100644
--- a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
+++ b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-core.c
@@ -28,7 +28,6 @@
#include <linux/usb.h>
#include <linux/slab.h>
#include <linux/time.h>
-#include <linux/version.h>
#include <linux/wait.h>
#include <linux/pinctrl/pinctrl.h>
diff --git a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-dvb.c b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-dvb.c
index 0560a9cb004b..feb48cb546d7 100644
--- a/drivers/media/platform/sti/c8sectpfe/c8sectpfe-dvb.c
+++ b/drivers/media/platform/sti/c8sectpfe/c8sectpfe-dvb.c
@@ -11,7 +11,6 @@
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
-#include <linux/version.h>
#include <dt-bindings/media/c8sectpfe.h>
diff --git a/drivers/media/platform/sti/hva/hva-hw.c b/drivers/media/platform/sti/hva/hva-hw.c
index 30fb1aa4a351..15e8f83b1b56 100644
--- a/drivers/media/platform/sti/hva/hva-hw.c
+++ b/drivers/media/platform/sti/hva/hva-hw.c
@@ -298,15 +298,13 @@ static unsigned long int hva_hw_get_ip_version(struct hva_dev *hva)
int hva_hw_probe(struct platform_device *pdev, struct hva_dev *hva)
{
struct device *dev = &pdev->dev;
- struct resource *regs;
struct resource *esram;
int ret;
WARN_ON(!hva);
/* get memory for registers */
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hva->regs = devm_ioremap_resource(dev, regs);
+ hva->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(hva->regs)) {
dev_err(dev, "%s failed to get regs\n", HVA_PREFIX);
return PTR_ERR(hva->regs);
diff --git a/drivers/media/platform/stm32/stm32-dcmi.c b/drivers/media/platform/stm32/stm32-dcmi.c
index d914ccef9831..e1b17c05229c 100644
--- a/drivers/media/platform/stm32/stm32-dcmi.c
+++ b/drivers/media/platform/stm32/stm32-dcmi.c
@@ -128,6 +128,7 @@ struct stm32_dcmi {
int sequence;
struct list_head buffers;
struct dcmi_buf *active;
+ int irq;
struct v4l2_device v4l2_dev;
struct video_device *vdev;
@@ -1759,6 +1760,14 @@ static int dcmi_graph_notify_complete(struct v4l2_async_notifier *notifier)
return ret;
}
+ ret = devm_request_threaded_irq(dcmi->dev, dcmi->irq, dcmi_irq_callback,
+ dcmi_irq_thread, IRQF_ONESHOT,
+ dev_name(dcmi->dev), dcmi);
+ if (ret) {
+ dev_err(dcmi->dev, "Unable to request irq %d\n", dcmi->irq);
+ return ret;
+ }
+
return 0;
}
@@ -1824,11 +1833,11 @@ static int dcmi_graph_init(struct stm32_dcmi *dcmi)
return -EINVAL;
}
- v4l2_async_notifier_init(&dcmi->notifier);
+ v4l2_async_nf_init(&dcmi->notifier);
- asd = v4l2_async_notifier_add_fwnode_remote_subdev(
- &dcmi->notifier, of_fwnode_handle(ep),
- struct v4l2_async_subdev);
+ asd = v4l2_async_nf_add_fwnode_remote(&dcmi->notifier,
+ of_fwnode_handle(ep),
+ struct v4l2_async_subdev);
of_node_put(ep);
@@ -1839,10 +1848,10 @@ static int dcmi_graph_init(struct stm32_dcmi *dcmi)
dcmi->notifier.ops = &dcmi_graph_notify_ops;
- ret = v4l2_async_notifier_register(&dcmi->v4l2_dev, &dcmi->notifier);
+ ret = v4l2_async_nf_register(&dcmi->v4l2_dev, &dcmi->notifier);
if (ret < 0) {
dev_err(dcmi->dev, "Failed to register notifier\n");
- v4l2_async_notifier_cleanup(&dcmi->notifier);
+ v4l2_async_nf_cleanup(&dcmi->notifier);
return ret;
}
@@ -1914,6 +1923,8 @@ static int dcmi_probe(struct platform_device *pdev)
if (irq <= 0)
return irq ? irq : -ENXIO;
+ dcmi->irq = irq;
+
dcmi->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!dcmi->res) {
dev_err(&pdev->dev, "Could not get resource\n");
@@ -1926,14 +1937,6 @@ static int dcmi_probe(struct platform_device *pdev)
return PTR_ERR(dcmi->regs);
}
- ret = devm_request_threaded_irq(&pdev->dev, irq, dcmi_irq_callback,
- dcmi_irq_thread, IRQF_ONESHOT,
- dev_name(&pdev->dev), dcmi);
- if (ret) {
- dev_err(&pdev->dev, "Unable to request irq %d\n", irq);
- return ret;
- }
-
mclk = devm_clk_get(&pdev->dev, "mclk");
if (IS_ERR(mclk)) {
if (PTR_ERR(mclk) != -EPROBE_DEFER)
@@ -2060,7 +2063,7 @@ static int dcmi_probe(struct platform_device *pdev)
return 0;
err_cleanup:
- v4l2_async_notifier_cleanup(&dcmi->notifier);
+ v4l2_async_nf_cleanup(&dcmi->notifier);
err_media_entity_cleanup:
media_entity_cleanup(&dcmi->vdev->entity);
err_device_release:
@@ -2080,8 +2083,8 @@ static int dcmi_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
- v4l2_async_notifier_unregister(&dcmi->notifier);
- v4l2_async_notifier_cleanup(&dcmi->notifier);
+ v4l2_async_nf_unregister(&dcmi->notifier);
+ v4l2_async_nf_cleanup(&dcmi->notifier);
media_entity_cleanup(&dcmi->vdev->entity);
v4l2_device_unregister(&dcmi->v4l2_dev);
media_device_cleanup(&dcmi->mdev);
diff --git a/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c b/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c
index 8d40a7acba9c..80a10f238bbe 100644
--- a/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c
+++ b/drivers/media/platform/sunxi/sun4i-csi/sun4i_csi.c
@@ -122,7 +122,7 @@ static int sun4i_csi_notifier_init(struct sun4i_csi *csi)
struct fwnode_handle *ep;
int ret;
- v4l2_async_notifier_init(&csi->notifier);
+ v4l2_async_nf_init(&csi->notifier);
ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(csi->dev), 0, 0,
FWNODE_GRAPH_ENDPOINT_NEXT);
@@ -135,8 +135,8 @@ static int sun4i_csi_notifier_init(struct sun4i_csi *csi)
csi->bus = vep.bus.parallel;
- asd = v4l2_async_notifier_add_fwnode_remote_subdev(&csi->notifier, ep,
- struct v4l2_async_subdev);
+ asd = v4l2_async_nf_add_fwnode_remote(&csi->notifier, ep,
+ struct v4l2_async_subdev);
if (IS_ERR(asd)) {
ret = PTR_ERR(asd);
goto out;
@@ -154,7 +154,6 @@ static int sun4i_csi_probe(struct platform_device *pdev)
struct v4l2_subdev *subdev;
struct video_device *vdev;
struct sun4i_csi *csi;
- struct resource *res;
int ret;
int irq;
@@ -179,8 +178,7 @@ static int sun4i_csi_probe(struct platform_device *pdev)
media_device_init(&csi->mdev);
csi->v4l.mdev = &csi->mdev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- csi->regs = devm_ioremap_resource(&pdev->dev, res);
+ csi->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(csi->regs))
return PTR_ERR(csi->regs);
@@ -244,7 +242,7 @@ static int sun4i_csi_probe(struct platform_device *pdev)
if (ret)
goto err_unregister_media;
- ret = v4l2_async_notifier_register(&csi->v4l, &csi->notifier);
+ ret = v4l2_async_nf_register(&csi->v4l, &csi->notifier);
if (ret) {
dev_err(csi->dev, "Couldn't register our notifier.\n");
goto err_unregister_media;
@@ -268,8 +266,8 @@ static int sun4i_csi_remove(struct platform_device *pdev)
{
struct sun4i_csi *csi = platform_get_drvdata(pdev);
- v4l2_async_notifier_unregister(&csi->notifier);
- v4l2_async_notifier_cleanup(&csi->notifier);
+ v4l2_async_nf_unregister(&csi->notifier);
+ v4l2_async_nf_cleanup(&csi->notifier);
vb2_video_unregister_device(&csi->vdev);
media_device_unregister(&csi->mdev);
sun4i_csi_dma_unregister(csi);
diff --git a/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c b/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c
index 27935f1e9555..fc96921b0583 100644
--- a/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c
+++ b/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c
@@ -61,7 +61,7 @@ bool sun6i_csi_is_format_supported(struct sun6i_csi *csi,
|| sdev->csi.v4l2_ep.bus_type == V4L2_MBUS_BT656)
&& sdev->csi.v4l2_ep.bus.parallel.bus_width == 16) {
switch (pixformat) {
- case V4L2_PIX_FMT_HM12:
+ case V4L2_PIX_FMT_NV12_16L16:
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV21:
case V4L2_PIX_FMT_NV16:
@@ -124,7 +124,7 @@ bool sun6i_csi_is_format_supported(struct sun6i_csi *csi,
case V4L2_PIX_FMT_VYUY:
return (mbus_code == MEDIA_BUS_FMT_VYUY8_2X8);
- case V4L2_PIX_FMT_HM12:
+ case V4L2_PIX_FMT_NV12_16L16:
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV21:
case V4L2_PIX_FMT_NV16:
@@ -269,7 +269,7 @@ static enum csi_output_fmt get_csi_output_format(struct sun6i_csi_dev *sdev,
case V4L2_PIX_FMT_VYUY:
return buf_interlaced ? CSI_FRAME_RAW_8 : CSI_FIELD_RAW_8;
- case V4L2_PIX_FMT_HM12:
+ case V4L2_PIX_FMT_NV12_16L16:
return buf_interlaced ? CSI_FRAME_MB_YUV420 :
CSI_FIELD_MB_YUV420;
case V4L2_PIX_FMT_NV12:
@@ -311,7 +311,7 @@ static enum csi_input_seq get_csi_input_seq(struct sun6i_csi_dev *sdev,
return 0;
switch (pixformat) {
- case V4L2_PIX_FMT_HM12:
+ case V4L2_PIX_FMT_NV12_16L16:
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV16:
case V4L2_PIX_FMT_YUV420:
@@ -526,7 +526,7 @@ static void sun6i_csi_set_window(struct sun6i_csi_dev *sdev)
planar_offset[0] = 0;
switch (config->pixelformat) {
- case V4L2_PIX_FMT_HM12:
+ case V4L2_PIX_FMT_NV12_16L16:
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV21:
case V4L2_PIX_FMT_NV16:
@@ -717,8 +717,8 @@ static int sun6i_csi_fwnode_parse(struct device *dev,
static void sun6i_csi_v4l2_cleanup(struct sun6i_csi *csi)
{
media_device_unregister(&csi->media_dev);
- v4l2_async_notifier_unregister(&csi->notifier);
- v4l2_async_notifier_cleanup(&csi->notifier);
+ v4l2_async_nf_unregister(&csi->notifier);
+ v4l2_async_nf_cleanup(&csi->notifier);
sun6i_video_cleanup(&csi->video);
v4l2_device_unregister(&csi->v4l2_dev);
v4l2_ctrl_handler_free(&csi->ctrl_handler);
@@ -737,7 +737,7 @@ static int sun6i_csi_v4l2_init(struct sun6i_csi *csi)
"platform:%s", dev_name(csi->dev));
media_device_init(&csi->media_dev);
- v4l2_async_notifier_init(&csi->notifier);
+ v4l2_async_nf_init(&csi->notifier);
ret = v4l2_ctrl_handler_init(&csi->ctrl_handler, 0);
if (ret) {
@@ -759,16 +759,17 @@ static int sun6i_csi_v4l2_init(struct sun6i_csi *csi)
if (ret)
goto unreg_v4l2;
- ret = v4l2_async_notifier_parse_fwnode_endpoints(csi->dev,
- &csi->notifier,
- sizeof(struct v4l2_async_subdev),
- sun6i_csi_fwnode_parse);
+ ret = v4l2_async_nf_parse_fwnode_endpoints(csi->dev,
+ &csi->notifier,
+ sizeof(struct
+ v4l2_async_subdev),
+ sun6i_csi_fwnode_parse);
if (ret)
goto clean_video;
csi->notifier.ops = &sun6i_csi_async_ops;
- ret = v4l2_async_notifier_register(&csi->v4l2_dev, &csi->notifier);
+ ret = v4l2_async_nf_register(&csi->v4l2_dev, &csi->notifier);
if (ret) {
dev_err(csi->dev, "notifier registration failed\n");
goto clean_video;
@@ -783,7 +784,7 @@ unreg_v4l2:
free_ctrl:
v4l2_ctrl_handler_free(&csi->ctrl_handler);
clean_media:
- v4l2_async_notifier_cleanup(&csi->notifier);
+ v4l2_async_nf_cleanup(&csi->notifier);
media_device_cleanup(&csi->media_dev);
return ret;
@@ -832,13 +833,11 @@ static const struct regmap_config sun6i_csi_regmap_config = {
static int sun6i_csi_resource_request(struct sun6i_csi_dev *sdev,
struct platform_device *pdev)
{
- struct resource *res;
void __iomem *io_base;
int ret;
int irq;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- io_base = devm_ioremap_resource(&pdev->dev, res);
+ io_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(io_base))
return PTR_ERR(io_base);
diff --git a/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.h b/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.h
index c626821aaedb..3a38d107ae3f 100644
--- a/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.h
+++ b/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.h
@@ -105,7 +105,7 @@ static inline int sun6i_csi_get_bpp(unsigned int pixformat)
case V4L2_PIX_FMT_SGBRG12:
case V4L2_PIX_FMT_SGRBG12:
case V4L2_PIX_FMT_SRGGB12:
- case V4L2_PIX_FMT_HM12:
+ case V4L2_PIX_FMT_NV12_16L16:
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV21:
case V4L2_PIX_FMT_YUV420:
diff --git a/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c b/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c
index 07b2161392d2..607a8d39fbe2 100644
--- a/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c
+++ b/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c
@@ -48,7 +48,7 @@ static const u32 supported_pixformats[] = {
V4L2_PIX_FMT_YVYU,
V4L2_PIX_FMT_UYVY,
V4L2_PIX_FMT_VYUY,
- V4L2_PIX_FMT_HM12,
+ V4L2_PIX_FMT_NV12_16L16,
V4L2_PIX_FMT_NV12,
V4L2_PIX_FMT_NV21,
V4L2_PIX_FMT_YUV420,
@@ -467,7 +467,7 @@ static const struct v4l2_ioctl_ops sun6i_video_ioctl_ops = {
static int sun6i_video_open(struct file *file)
{
struct sun6i_video *video = video_drvdata(file);
- int ret;
+ int ret = 0;
if (mutex_lock_interruptible(&video->lock))
return -ERESTARTSYS;
@@ -481,10 +481,8 @@ static int sun6i_video_open(struct file *file)
goto fh_release;
/* check if already powered */
- if (!v4l2_fh_is_singular_file(file)) {
- ret = -EBUSY;
+ if (!v4l2_fh_is_singular_file(file))
goto unlock;
- }
ret = sun6i_csi_set_power(video->csi, true);
if (ret < 0)
diff --git a/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c b/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
index 671e4a928993..aa65d70b6270 100644
--- a/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
+++ b/drivers/media/platform/sunxi/sun8i-di/sun8i-di.c
@@ -803,7 +803,6 @@ static int deinterlace_probe(struct platform_device *pdev)
{
struct deinterlace_dev *dev;
struct video_device *vfd;
- struct resource *res;
int irq, ret;
dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
@@ -825,8 +824,7 @@ static int deinterlace_probe(struct platform_device *pdev)
return ret;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dev->base = devm_ioremap_resource(&pdev->dev, res);
+ dev->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dev->base))
return PTR_ERR(dev->base);
diff --git a/drivers/media/platform/ti-vpe/cal.c b/drivers/media/platform/ti-vpe/cal.c
index 8e469d518a74..4a4a6c5983f7 100644
--- a/drivers/media/platform/ti-vpe/cal.c
+++ b/drivers/media/platform/ti-vpe/cal.c
@@ -781,7 +781,7 @@ static int cal_async_notifier_register(struct cal_dev *cal)
unsigned int i;
int ret;
- v4l2_async_notifier_init(&cal->notifier);
+ v4l2_async_nf_init(&cal->notifier);
cal->notifier.ops = &cal_async_notifier_ops;
for (i = 0; i < cal->data->num_csi2_phy; ++i) {
@@ -793,9 +793,9 @@ static int cal_async_notifier_register(struct cal_dev *cal)
continue;
fwnode = of_fwnode_handle(phy->source_node);
- casd = v4l2_async_notifier_add_fwnode_subdev(&cal->notifier,
- fwnode,
- struct cal_v4l2_async_subdev);
+ casd = v4l2_async_nf_add_fwnode(&cal->notifier,
+ fwnode,
+ struct cal_v4l2_async_subdev);
if (IS_ERR(casd)) {
phy_err(phy, "Failed to add subdev to notifier\n");
ret = PTR_ERR(casd);
@@ -805,7 +805,7 @@ static int cal_async_notifier_register(struct cal_dev *cal)
casd->phy = phy;
}
- ret = v4l2_async_notifier_register(&cal->v4l2_dev, &cal->notifier);
+ ret = v4l2_async_nf_register(&cal->v4l2_dev, &cal->notifier);
if (ret) {
cal_err(cal, "Error registering async notifier\n");
goto error;
@@ -814,14 +814,14 @@ static int cal_async_notifier_register(struct cal_dev *cal)
return 0;
error:
- v4l2_async_notifier_cleanup(&cal->notifier);
+ v4l2_async_nf_cleanup(&cal->notifier);
return ret;
}
static void cal_async_notifier_unregister(struct cal_dev *cal)
{
- v4l2_async_notifier_unregister(&cal->notifier);
- v4l2_async_notifier_cleanup(&cal->notifier);
+ v4l2_async_nf_unregister(&cal->notifier);
+ v4l2_async_nf_cleanup(&cal->notifier);
}
/* ------------------------------------------------------------------
diff --git a/drivers/media/platform/via-camera.c b/drivers/media/platform/via-camera.c
index 3655573e8581..95483c84c3f2 100644
--- a/drivers/media/platform/via-camera.c
+++ b/drivers/media/platform/via-camera.c
@@ -132,11 +132,11 @@ static struct via_camera *via_cam_info;
* Debugging and related.
*/
#define cam_err(cam, fmt, arg...) \
- dev_err(&(cam)->platdev->dev, fmt, ##arg);
+ dev_err(&(cam)->platdev->dev, fmt, ##arg)
#define cam_warn(cam, fmt, arg...) \
- dev_warn(&(cam)->platdev->dev, fmt, ##arg);
+ dev_warn(&(cam)->platdev->dev, fmt, ##arg)
#define cam_dbg(cam, fmt, arg...) \
- dev_dbg(&(cam)->platdev->dev, fmt, ##arg);
+ dev_dbg(&(cam)->platdev->dev, fmt, ##arg)
/*
* Format handling. This is ripped almost directly from Hans's changes
diff --git a/drivers/media/platform/video-mux.c b/drivers/media/platform/video-mux.c
index 905005e271ca..fda8fc0e4814 100644
--- a/drivers/media/platform/video-mux.c
+++ b/drivers/media/platform/video-mux.c
@@ -360,7 +360,7 @@ static int video_mux_async_register(struct video_mux *vmux,
unsigned int i;
int ret;
- v4l2_async_notifier_init(&vmux->notifier);
+ v4l2_async_nf_init(&vmux->notifier);
for (i = 0; i < num_input_pads; i++) {
struct v4l2_async_subdev *asd;
@@ -380,8 +380,8 @@ static int video_mux_async_register(struct video_mux *vmux,
}
fwnode_handle_put(remote_ep);
- asd = v4l2_async_notifier_add_fwnode_remote_subdev(
- &vmux->notifier, ep, struct v4l2_async_subdev);
+ asd = v4l2_async_nf_add_fwnode_remote(&vmux->notifier, ep,
+ struct v4l2_async_subdev);
fwnode_handle_put(ep);
@@ -395,8 +395,7 @@ static int video_mux_async_register(struct video_mux *vmux,
vmux->notifier.ops = &video_mux_notify_ops;
- ret = v4l2_async_subdev_notifier_register(&vmux->subdev,
- &vmux->notifier);
+ ret = v4l2_async_subdev_nf_register(&vmux->subdev, &vmux->notifier);
if (ret)
return ret;
@@ -477,8 +476,8 @@ static int video_mux_probe(struct platform_device *pdev)
ret = video_mux_async_register(vmux, num_pads - 1);
if (ret) {
- v4l2_async_notifier_unregister(&vmux->notifier);
- v4l2_async_notifier_cleanup(&vmux->notifier);
+ v4l2_async_nf_unregister(&vmux->notifier);
+ v4l2_async_nf_cleanup(&vmux->notifier);
}
return ret;
@@ -489,8 +488,8 @@ static int video_mux_remove(struct platform_device *pdev)
struct video_mux *vmux = platform_get_drvdata(pdev);
struct v4l2_subdev *sd = &vmux->subdev;
- v4l2_async_notifier_unregister(&vmux->notifier);
- v4l2_async_notifier_cleanup(&vmux->notifier);
+ v4l2_async_nf_unregister(&vmux->notifier);
+ v4l2_async_nf_cleanup(&vmux->notifier);
v4l2_async_unregister_subdev(sd);
media_entity_cleanup(&sd->entity);
diff --git a/drivers/media/platform/vsp1/vsp1_drm.c b/drivers/media/platform/vsp1/vsp1_drm.c
index 06f74d410973..0c2507dc03d6 100644
--- a/drivers/media/platform/vsp1/vsp1_drm.c
+++ b/drivers/media/platform/vsp1/vsp1_drm.c
@@ -455,6 +455,10 @@ static int vsp1_du_pipeline_setup_inputs(struct vsp1_device *vsp1,
dev_err(vsp1->dev, "%s: failed to setup UIF after %s\n",
__func__, BRX_NAME(pipe->brx));
+ /* If the DRM pipe does not have a UIF there is nothing we can update. */
+ if (!drm_pipe->uif)
+ return 0;
+
/*
* If the UIF is not in use schedule it for removal by setting its pipe
* pointer to NULL, vsp1_du_pipeline_configure() will remove it from the
@@ -462,9 +466,9 @@ static int vsp1_du_pipeline_setup_inputs(struct vsp1_device *vsp1,
* make sure it is present in the pipeline's list of entities if it
* wasn't already.
*/
- if (drm_pipe->uif && !use_uif) {
+ if (!use_uif) {
drm_pipe->uif->pipe = NULL;
- } else if (drm_pipe->uif && !drm_pipe->uif->pipe) {
+ } else if (!drm_pipe->uif->pipe) {
drm_pipe->uif->pipe = pipe;
list_add_tail(&drm_pipe->uif->list_pipe, &pipe->entities);
}
diff --git a/drivers/media/platform/vsp1/vsp1_drv.c b/drivers/media/platform/vsp1/vsp1_drv.c
index de442d6c9926..c9044785b903 100644
--- a/drivers/media/platform/vsp1/vsp1_drv.c
+++ b/drivers/media/platform/vsp1/vsp1_drv.c
@@ -44,7 +44,7 @@
static irqreturn_t vsp1_irq_handler(int irq, void *data)
{
- u32 mask = VI6_WFP_IRQ_STA_DFE | VI6_WFP_IRQ_STA_FRE;
+ u32 mask = VI6_WPF_IRQ_STA_DFE | VI6_WPF_IRQ_STA_FRE;
struct vsp1_device *vsp1 = data;
irqreturn_t ret = IRQ_NONE;
unsigned int i;
@@ -59,7 +59,7 @@ static irqreturn_t vsp1_irq_handler(int irq, void *data)
status = vsp1_read(vsp1, VI6_WPF_IRQ_STA(i));
vsp1_write(vsp1, VI6_WPF_IRQ_STA(i), ~status & mask);
- if (status & VI6_WFP_IRQ_STA_DFE) {
+ if (status & VI6_WPF_IRQ_STA_DFE) {
vsp1_pipeline_frame_end(wpf->entity.pipe);
ret = IRQ_HANDLED;
}
@@ -777,6 +777,16 @@ static const struct vsp1_device_info vsp1_device_infos[] = {
.uif_count = 2,
.wpf_count = 2,
.num_bru_inputs = 5,
+ }, {
+ .version = VI6_IP_VERSION_MODEL_VSPD_V3U,
+ .model = "VSP2-D",
+ .gen = 3,
+ .features = VSP1_HAS_BRU | VSP1_HAS_EXT_DL,
+ .lif_count = 1,
+ .rpf_count = 5,
+ .uif_count = 2,
+ .wpf_count = 1,
+ .num_bru_inputs = 5,
},
};
@@ -785,7 +795,6 @@ static int vsp1_probe(struct platform_device *pdev)
struct vsp1_device *vsp1;
struct device_node *fcp_node;
struct resource *irq;
- struct resource *io;
unsigned int i;
int ret;
@@ -800,8 +809,7 @@ static int vsp1_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, vsp1);
/* I/O and IRQ resources (clock managed by the clock PM domain). */
- io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- vsp1->mmio = devm_ioremap_resource(&pdev->dev, io);
+ vsp1->mmio = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(vsp1->mmio))
return PTR_ERR(vsp1->mmio);
diff --git a/drivers/media/platform/vsp1/vsp1_regs.h b/drivers/media/platform/vsp1/vsp1_regs.h
index fe3130db1fa2..fae7286eb01e 100644
--- a/drivers/media/platform/vsp1/vsp1_regs.h
+++ b/drivers/media/platform/vsp1/vsp1_regs.h
@@ -32,12 +32,12 @@
#define VI6_STATUS_SYS_ACT(n) BIT((n) + 8)
#define VI6_WPF_IRQ_ENB(n) (0x0048 + (n) * 12)
-#define VI6_WFP_IRQ_ENB_DFEE BIT(1)
-#define VI6_WFP_IRQ_ENB_FREE BIT(0)
+#define VI6_WPF_IRQ_ENB_DFEE BIT(1)
+#define VI6_WPF_IRQ_ENB_FREE BIT(0)
#define VI6_WPF_IRQ_STA(n) (0x004c + (n) * 12)
-#define VI6_WFP_IRQ_STA_DFE BIT(1)
-#define VI6_WFP_IRQ_STA_FRE BIT(0)
+#define VI6_WPF_IRQ_STA_DFE BIT(1)
+#define VI6_WPF_IRQ_STA_FRE BIT(0)
#define VI6_DISP_IRQ_ENB(n) (0x0078 + (n) * 60)
#define VI6_DISP_IRQ_ENB_DSTE BIT(8)
@@ -766,6 +766,8 @@
#define VI6_IP_VERSION_MODEL_VSPD_V3 (0x18 << 8)
#define VI6_IP_VERSION_MODEL_VSPDL_GEN3 (0x19 << 8)
#define VI6_IP_VERSION_MODEL_VSPBS_GEN3 (0x1a << 8)
+#define VI6_IP_VERSION_MODEL_VSPD_V3U (0x1c << 8)
+
#define VI6_IP_VERSION_SOC_MASK (0xff << 0)
#define VI6_IP_VERSION_SOC_H2 (0x01 << 0)
#define VI6_IP_VERSION_SOC_V2H (0x01 << 0)
@@ -777,6 +779,7 @@
#define VI6_IP_VERSION_SOC_D3 (0x04 << 0)
#define VI6_IP_VERSION_SOC_M3N (0x04 << 0)
#define VI6_IP_VERSION_SOC_E3 (0x04 << 0)
+#define VI6_IP_VERSION_SOC_V3U (0x05 << 0)
/* -----------------------------------------------------------------------------
* RPF CLUT Registers
diff --git a/drivers/media/platform/vsp1/vsp1_wpf.c b/drivers/media/platform/vsp1/vsp1_wpf.c
index 208498fa6ed7..94e91d7bb56c 100644
--- a/drivers/media/platform/vsp1/vsp1_wpf.c
+++ b/drivers/media/platform/vsp1/vsp1_wpf.c
@@ -342,7 +342,7 @@ static void wpf_configure_stream(struct vsp1_entity *entity,
/* Enable interrupts. */
vsp1_dl_body_write(dlb, VI6_WPF_IRQ_STA(index), 0);
vsp1_dl_body_write(dlb, VI6_WPF_IRQ_ENB(index),
- VI6_WFP_IRQ_ENB_DFEE);
+ VI6_WPF_IRQ_ENB_DFEE);
/*
* Configure writeback for display pipelines (the wpf writeback flag is
diff --git a/drivers/media/platform/xilinx/xilinx-vip.c b/drivers/media/platform/xilinx/xilinx-vip.c
index 425a32dd5d19..a0073122798f 100644
--- a/drivers/media/platform/xilinx/xilinx-vip.c
+++ b/drivers/media/platform/xilinx/xilinx-vip.c
@@ -205,10 +205,8 @@ EXPORT_SYMBOL_GPL(xvip_clr_and_set);
int xvip_init_resources(struct xvip_device *xvip)
{
struct platform_device *pdev = to_platform_device(xvip->dev);
- struct resource *res;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- xvip->iomem = devm_ioremap_resource(xvip->dev, res);
+ xvip->iomem = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(xvip->iomem))
return PTR_ERR(xvip->iomem);
diff --git a/drivers/media/platform/xilinx/xilinx-vipp.c b/drivers/media/platform/xilinx/xilinx-vipp.c
index 2ce31d7ce1a6..f34f8b077e03 100644
--- a/drivers/media/platform/xilinx/xilinx-vipp.c
+++ b/drivers/media/platform/xilinx/xilinx-vipp.c
@@ -382,9 +382,8 @@ static int xvip_graph_parse_one(struct xvip_composite_device *xdev,
continue;
}
- xge = v4l2_async_notifier_add_fwnode_subdev(
- &xdev->notifier, remote,
- struct xvip_graph_entity);
+ xge = v4l2_async_nf_add_fwnode(&xdev->notifier, remote,
+ struct xvip_graph_entity);
fwnode_handle_put(remote);
if (IS_ERR(xge)) {
ret = PTR_ERR(xge);
@@ -395,7 +394,7 @@ static int xvip_graph_parse_one(struct xvip_composite_device *xdev,
return 0;
err_notifier_cleanup:
- v4l2_async_notifier_cleanup(&xdev->notifier);
+ v4l2_async_nf_cleanup(&xdev->notifier);
fwnode_handle_put(ep);
return ret;
}
@@ -420,7 +419,7 @@ static int xvip_graph_parse(struct xvip_composite_device *xdev)
entity = to_xvip_entity(asd);
ret = xvip_graph_parse_one(xdev, entity->asd.match.fwnode);
if (ret < 0) {
- v4l2_async_notifier_cleanup(&xdev->notifier);
+ v4l2_async_nf_cleanup(&xdev->notifier);
break;
}
}
@@ -496,8 +495,8 @@ static void xvip_graph_cleanup(struct xvip_composite_device *xdev)
struct xvip_dma *dmap;
struct xvip_dma *dma;
- v4l2_async_notifier_unregister(&xdev->notifier);
- v4l2_async_notifier_cleanup(&xdev->notifier);
+ v4l2_async_nf_unregister(&xdev->notifier);
+ v4l2_async_nf_cleanup(&xdev->notifier);
list_for_each_entry_safe(dma, dmap, &xdev->dmas, list) {
xvip_dma_cleanup(dma);
@@ -532,7 +531,7 @@ static int xvip_graph_init(struct xvip_composite_device *xdev)
/* Register the subdevices notifier. */
xdev->notifier.ops = &xvip_graph_notify_ops;
- ret = v4l2_async_notifier_register(&xdev->v4l2_dev, &xdev->notifier);
+ ret = v4l2_async_nf_register(&xdev->v4l2_dev, &xdev->notifier);
if (ret < 0) {
dev_err(xdev->dev, "notifier registration failed\n");
goto done;
@@ -596,7 +595,7 @@ static int xvip_composite_probe(struct platform_device *pdev)
xdev->dev = &pdev->dev;
INIT_LIST_HEAD(&xdev->dmas);
- v4l2_async_notifier_init(&xdev->notifier);
+ v4l2_async_nf_init(&xdev->notifier);
ret = xvip_composite_v4l2_init(xdev);
if (ret < 0)
diff --git a/drivers/media/radio/radio-wl1273.c b/drivers/media/radio/radio-wl1273.c
index 112376873167..484046471c03 100644
--- a/drivers/media/radio/radio-wl1273.c
+++ b/drivers/media/radio/radio-wl1273.c
@@ -1279,7 +1279,7 @@ static int wl1273_fm_vidioc_querycap(struct file *file, void *priv,
strscpy(capability->driver, WL1273_FM_DRIVER_NAME,
sizeof(capability->driver));
- strscpy(capability->card, "Texas Instruments Wl1273 FM Radio",
+ strscpy(capability->card, "TI Wl1273 FM Radio",
sizeof(capability->card));
strscpy(capability->bus_info, radio->bus_type,
sizeof(capability->bus_info));
diff --git a/drivers/media/radio/si470x/radio-si470x-i2c.c b/drivers/media/radio/si470x/radio-si470x-i2c.c
index f491420d7b53..a972c0705ac7 100644
--- a/drivers/media/radio/si470x/radio-si470x-i2c.c
+++ b/drivers/media/radio/si470x/radio-si470x-i2c.c
@@ -11,7 +11,7 @@
/* driver definitions */
#define DRIVER_AUTHOR "Joonyoung Shim <jy0922.shim@samsung.com>";
-#define DRIVER_CARD "Silicon Labs Si470x FM Radio Receiver"
+#define DRIVER_CARD "Silicon Labs Si470x FM Radio"
#define DRIVER_DESC "I2C radio driver for Si470x FM Radio Receivers"
#define DRIVER_VERSION "1.0.2"
diff --git a/drivers/media/radio/si470x/radio-si470x-usb.c b/drivers/media/radio/si470x/radio-si470x-usb.c
index fedff68d8c49..3f8634a46573 100644
--- a/drivers/media/radio/si470x/radio-si470x-usb.c
+++ b/drivers/media/radio/si470x/radio-si470x-usb.c
@@ -16,7 +16,7 @@
/* driver definitions */
#define DRIVER_AUTHOR "Tobias Lorenz <tobias.lorenz@gmx.net>"
-#define DRIVER_CARD "Silicon Labs Si470x FM Radio Receiver"
+#define DRIVER_CARD "Silicon Labs Si470x FM Radio"
#define DRIVER_DESC "USB radio driver for Si470x FM Radio Receivers"
#define DRIVER_VERSION "1.0.10"
diff --git a/drivers/media/rc/Kconfig b/drivers/media/rc/Kconfig
index fd5a7a058714..9506baf3c4c1 100644
--- a/drivers/media/rc/Kconfig
+++ b/drivers/media/rc/Kconfig
@@ -453,14 +453,6 @@ config IR_SERIAL_TRANSMITTER
help
Serial Port Transmitter support
-config IR_SIR
- tristate "Built-in SIR IrDA port"
- help
- Say Y if you want to use a IrDA SIR port Transceivers.
-
- To compile this driver as a module, choose M here: the module will
- be called sir-ir.
-
config RC_XBOX_DVD
tristate "Xbox DVD Movie Playback Kit"
depends on USB
diff --git a/drivers/media/rc/Makefile b/drivers/media/rc/Makefile
index 0db51fad27d6..378d62d21e06 100644
--- a/drivers/media/rc/Makefile
+++ b/drivers/media/rc/Makefile
@@ -47,7 +47,6 @@ obj-$(CONFIG_RC_ST) += st_rc.o
obj-$(CONFIG_IR_SUNXI) += sunxi-cir.o
obj-$(CONFIG_IR_IMG) += img-ir/
obj-$(CONFIG_IR_SERIAL) += serial_ir.o
-obj-$(CONFIG_IR_SIR) += sir_ir.o
obj-$(CONFIG_IR_MTK) += mtk-cir.o
obj-$(CONFIG_RC_XBOX_DVD) += xbox_remote.o
obj-$(CONFIG_IR_TOY) += ir_toy.o
diff --git a/drivers/media/rc/img-ir/img-ir-core.c b/drivers/media/rc/img-ir/img-ir-core.c
index 094aa6a06315..6f8464872033 100644
--- a/drivers/media/rc/img-ir/img-ir-core.c
+++ b/drivers/media/rc/img-ir/img-ir-core.c
@@ -76,7 +76,6 @@ static void img_ir_ident(struct img_ir_priv *priv)
static int img_ir_probe(struct platform_device *pdev)
{
struct img_ir_priv *priv;
- struct resource *res_regs;
int irq, error, error2;
/* Get resources from platform device */
@@ -94,8 +93,7 @@ static int img_ir_probe(struct platform_device *pdev)
spin_lock_init(&priv->lock);
/* Ioremap the registers */
- res_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->reg_base = devm_ioremap_resource(&pdev->dev, res_regs);
+ priv->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->reg_base))
return PTR_ERR(priv->reg_base);
diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
index 2ca4e86c7b9f..54da6f60079b 100644
--- a/drivers/media/rc/imon.c
+++ b/drivers/media/rc/imon.c
@@ -2358,8 +2358,10 @@ urb_submit_failed:
touch_setup_failed:
find_endpoint_failed:
usb_put_dev(ictx->usbdev_intf1);
+ ictx->usbdev_intf1 = NULL;
mutex_unlock(&ictx->lock);
usb_free_urb(rx_urb);
+ ictx->rx_urb_intf1 = NULL;
rx_urb_alloc_failed:
dev_err(ictx->dev, "unable to initialize intf1, err %d\n", ret);
diff --git a/drivers/media/rc/ir-hix5hd2.c b/drivers/media/rc/ir-hix5hd2.c
index 4609fb4519e9..e0be6471afe5 100644
--- a/drivers/media/rc/ir-hix5hd2.c
+++ b/drivers/media/rc/ir-hix5hd2.c
@@ -249,7 +249,6 @@ static int hix5hd2_ir_probe(struct platform_device *pdev)
{
struct rc_dev *rdev;
struct device *dev = &pdev->dev;
- struct resource *res;
struct hix5hd2_ir_priv *priv;
struct device_node *node = pdev->dev.of_node;
const struct of_device_id *of_id;
@@ -274,8 +273,7 @@ static int hix5hd2_ir_probe(struct platform_device *pdev)
priv->regmap = NULL;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->base = devm_ioremap_resource(dev, res);
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
diff --git a/drivers/media/rc/ir_toy.c b/drivers/media/rc/ir_toy.c
index 48d52baec1a1..7e98e7e3aace 100644
--- a/drivers/media/rc/ir_toy.c
+++ b/drivers/media/rc/ir_toy.c
@@ -4,7 +4,9 @@
* Infrared Toy and IR Droid RC core driver
*
* Copyright (C) 2020 Sean Young <sean@mess.org>
-
+ *
+ * http://dangerousprototypes.com/docs/USB_IR_Toy:_Sampling_mode
+ *
* This driver is based on the lirc driver which can be found here:
* https://sourceforge.net/p/lirc/git/ci/master/tree/plugins/irtoy.c
* Copyright (C) 2011 Peter Kooiman <pkooiman@gmail.com>
@@ -46,7 +48,7 @@ static const u8 COMMAND_TXSTART[] = { 0x26, 0x24, 0x25, 0x03 };
enum state {
STATE_IRDATA,
- STATE_RESET,
+ STATE_COMMAND_NO_RESP,
STATE_COMMAND,
STATE_TX,
};
@@ -121,6 +123,7 @@ static void irtoy_response(struct irtoy *irtoy, u32 len)
len, irtoy->in);
}
break;
+ case STATE_COMMAND_NO_RESP:
case STATE_IRDATA: {
struct ir_raw_event rawir = { .pulse = irtoy->pulse };
__be16 *in = (__be16 *)irtoy->in;
@@ -166,10 +169,8 @@ static void irtoy_response(struct irtoy *irtoy, u32 len)
int err;
if (len != 1 || space > MAX_PACKET || space == 0) {
- dev_err(irtoy->dev, "packet length expected: %*phN\n",
+ dev_dbg(irtoy->dev, "packet length expected: %*phN\n",
len, irtoy->in);
- irtoy->state = STATE_IRDATA;
- complete(&irtoy->command_done);
break;
}
@@ -193,9 +194,6 @@ static void irtoy_response(struct irtoy *irtoy, u32 len)
irtoy->tx_len -= buf_len;
}
break;
- case STATE_RESET:
- dev_err(irtoy->dev, "unexpected response to reset: %*phN\n",
- len, irtoy->in);
}
}
@@ -204,7 +202,7 @@ static void irtoy_out_callback(struct urb *urb)
struct irtoy *irtoy = urb->context;
if (urb->status == 0) {
- if (irtoy->state == STATE_RESET)
+ if (irtoy->state == STATE_COMMAND_NO_RESP)
complete(&irtoy->command_done);
} else {
dev_warn(irtoy->dev, "out urb status: %d\n", urb->status);
@@ -216,10 +214,20 @@ static void irtoy_in_callback(struct urb *urb)
struct irtoy *irtoy = urb->context;
int ret;
- if (urb->status == 0)
+ switch (urb->status) {
+ case 0:
irtoy_response(irtoy, urb->actual_length);
- else
+ break;
+ case -ECONNRESET:
+ case -ENOENT:
+ case -ESHUTDOWN:
+ case -EPROTO:
+ case -EPIPE:
+ usb_unlink_urb(urb);
+ return;
+ default:
dev_dbg(irtoy->dev, "in urb status: %d\n", urb->status);
+ }
ret = usb_submit_urb(urb, GFP_ATOMIC);
if (ret && ret != -ENODEV)
@@ -256,7 +264,7 @@ static int irtoy_setup(struct irtoy *irtoy)
int err;
err = irtoy_command(irtoy, COMMAND_RESET, sizeof(COMMAND_RESET),
- STATE_RESET);
+ STATE_COMMAND_NO_RESP);
if (err != 0) {
dev_err(irtoy->dev, "could not write reset command: %d\n",
err);
@@ -310,7 +318,7 @@ static int irtoy_tx(struct rc_dev *rc, uint *txbuf, uint count)
buf[i] = cpu_to_be16(v);
}
- buf[count] = 0xffff;
+ buf[count] = cpu_to_be16(0xffff);
irtoy->tx_buf = buf;
irtoy->tx_len = size;
@@ -321,7 +329,7 @@ static int irtoy_tx(struct rc_dev *rc, uint *txbuf, uint count)
// with its led on. It does not respond to any command when this
// happens. To work around this, re-enter sample mode.
err = irtoy_command(irtoy, COMMAND_SMODE_EXIT,
- sizeof(COMMAND_SMODE_EXIT), STATE_RESET);
+ sizeof(COMMAND_SMODE_EXIT), STATE_COMMAND_NO_RESP);
if (err) {
dev_err(irtoy->dev, "exit sample mode: %d\n", err);
return err;
@@ -357,6 +365,27 @@ static int irtoy_tx(struct rc_dev *rc, uint *txbuf, uint count)
return count;
}
+static int irtoy_tx_carrier(struct rc_dev *rc, uint32_t carrier)
+{
+ struct irtoy *irtoy = rc->priv;
+ u8 buf[3];
+ int err;
+
+ if (carrier < 11800)
+ return -EINVAL;
+
+ buf[0] = 0x06;
+ buf[1] = DIV_ROUND_CLOSEST(48000000, 16 * carrier) - 1;
+ buf[2] = 0;
+
+ err = irtoy_command(irtoy, buf, sizeof(buf), STATE_COMMAND_NO_RESP);
+ if (err)
+ dev_err(irtoy->dev, "could not write carrier command: %d\n",
+ err);
+
+ return err;
+}
+
static int irtoy_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
@@ -436,8 +465,9 @@ static int irtoy_probe(struct usb_interface *intf,
if (err)
goto free_rcdev;
- dev_info(irtoy->dev, "version: hardware %u, firmware %u, protocol %u",
- irtoy->hw_version, irtoy->sw_version, irtoy->proto_version);
+ dev_info(irtoy->dev, "version: hardware %u, firmware %u.%u, protocol %u",
+ irtoy->hw_version, irtoy->sw_version / 10,
+ irtoy->sw_version % 10, irtoy->proto_version);
if (irtoy->sw_version < MIN_FW_VERSION) {
dev_err(irtoy->dev, "need firmware V%02u or higher",
@@ -455,6 +485,7 @@ static int irtoy_probe(struct usb_interface *intf,
rc->dev.parent = &intf->dev;
rc->priv = irtoy;
rc->tx_ir = irtoy_tx;
+ rc->s_tx_carrier = irtoy_tx_carrier;
rc->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER;
rc->map_name = RC_MAP_RC6_MCE;
rc->rx_resolution = UNIT_US;
diff --git a/drivers/media/rc/ite-cir.c b/drivers/media/rc/ite-cir.c
index 5bc23e8c6d91..4f77d4ebacdc 100644
--- a/drivers/media/rc/ite-cir.c
+++ b/drivers/media/rc/ite-cir.c
@@ -242,7 +242,7 @@ static irqreturn_t ite_cir_isr(int irq, void *data)
}
/* check for the receive interrupt */
- if (iflags & ITE_IRQ_RX_FIFO) {
+ if (iflags & (ITE_IRQ_RX_FIFO | ITE_IRQ_RX_FIFO_OVERRUN)) {
/* read the FIFO bytes */
rx_bytes = dev->params->get_rx_bytes(dev, rx_buf,
ITE_RX_FIFO_LEN);
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index e03dd1f0144f..d09bee82c04c 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -1386,6 +1386,7 @@ static void mceusb_dev_recv(struct urb *urb)
case -ECONNRESET:
case -ENOENT:
case -EILSEQ:
+ case -EPROTO:
case -ESHUTDOWN:
usb_unlink_urb(urb);
return;
@@ -1612,6 +1613,7 @@ static struct rc_dev *mceusb_init_rc_dev(struct mceusb_dev *ir)
rc->dev.parent = dev;
rc->priv = ir;
rc->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER;
+ rc->rx_resolution = MCE_TIME_UNIT;
rc->min_timeout = MCE_TIME_UNIT;
rc->timeout = MS_TO_US(100);
if (!mceusb_model[ir->model].broken_irtimeout) {
diff --git a/drivers/media/rc/meson-ir-tx.c b/drivers/media/rc/meson-ir-tx.c
index 3055f8e1b6ff..c22cd26a5c07 100644
--- a/drivers/media/rc/meson-ir-tx.c
+++ b/drivers/media/rc/meson-ir-tx.c
@@ -395,7 +395,6 @@ static struct platform_driver meson_irtx_pd = {
.remove = meson_irtx_remove,
.driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.of_match_table = meson_irtx_dt_match,
},
};
diff --git a/drivers/media/rc/meson-ir.c b/drivers/media/rc/meson-ir.c
index dad55950dfc6..4b769111f78e 100644
--- a/drivers/media/rc/meson-ir.c
+++ b/drivers/media/rc/meson-ir.c
@@ -102,7 +102,6 @@ static int meson_ir_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
- struct resource *res;
const char *map_name;
struct meson_ir *ir;
int irq, ret;
@@ -111,8 +110,7 @@ static int meson_ir_probe(struct platform_device *pdev)
if (!ir)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ir->reg = devm_ioremap_resource(dev, res);
+ ir->reg = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ir->reg))
return PTR_ERR(ir->reg);
diff --git a/drivers/media/rc/mtk-cir.c b/drivers/media/rc/mtk-cir.c
index 65a136c0fac2..840e7aec5c21 100644
--- a/drivers/media/rc/mtk-cir.c
+++ b/drivers/media/rc/mtk-cir.c
@@ -292,7 +292,6 @@ static int mtk_ir_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *dn = dev->of_node;
- struct resource *res;
struct mtk_ir *ir;
u32 val;
int ret = 0;
@@ -320,8 +319,7 @@ static int mtk_ir_probe(struct platform_device *pdev)
ir->bus = ir->clk;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ir->base = devm_ioremap_resource(dev, res);
+ ir->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ir->base))
return PTR_ERR(ir->base);
diff --git a/drivers/media/rc/sir_ir.c b/drivers/media/rc/sir_ir.c
deleted file mode 100644
index 6ec96dc34586..000000000000
--- a/drivers/media/rc/sir_ir.c
+++ /dev/null
@@ -1,438 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * IR SIR driver, (C) 2000 Milan Pikula <www@fornax.sk>
- *
- * sir_ir - Device driver for use with SIR (serial infra red)
- * mode of IrDA on many notebooks.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/serial_reg.h>
-#include <linux/ktime.h>
-#include <linux/delay.h>
-#include <linux/platform_device.h>
-
-#include <media/rc-core.h>
-
-/* SECTION: Definitions */
-#define PULSE '['
-
-/* 9bit * 1s/115200bit in milli seconds = 78.125ms*/
-#define TIME_CONST (9000000ul / 115200ul)
-
-/* timeout for sequences in jiffies (=5/100s), must be longer than TIME_CONST */
-#define SIR_TIMEOUT (HZ * 5 / 100)
-
-/* onboard sir ports are typically com3 */
-static int io = 0x3e8;
-static int irq = 4;
-static int threshold = 3;
-
-static DEFINE_SPINLOCK(timer_lock);
-static struct timer_list timerlist;
-/* time of last signal change detected */
-static ktime_t last;
-/* time of last UART data ready interrupt */
-static ktime_t last_intr_time;
-static int last_value;
-static struct rc_dev *rcdev;
-
-static struct platform_device *sir_ir_dev;
-
-static DEFINE_SPINLOCK(hardware_lock);
-
-/* SECTION: Prototypes */
-
-/* Communication with user-space */
-static void add_read_queue(int flag, unsigned long val);
-/* Hardware */
-static irqreturn_t sir_interrupt(int irq, void *dev_id);
-static void send_space(unsigned long len);
-static void send_pulse(unsigned long len);
-static int init_hardware(void);
-static void drop_hardware(void);
-/* Initialisation */
-
-static inline unsigned int sinp(int offset)
-{
- return inb(io + offset);
-}
-
-static inline void soutp(int offset, int value)
-{
- outb(value, io + offset);
-}
-
-/* SECTION: Communication with user-space */
-static int sir_tx_ir(struct rc_dev *dev, unsigned int *tx_buf,
- unsigned int count)
-{
- unsigned long flags;
- int i;
-
- local_irq_save(flags);
- for (i = 0; i < count;) {
- if (tx_buf[i])
- send_pulse(tx_buf[i]);
- i++;
- if (i >= count)
- break;
- if (tx_buf[i])
- send_space(tx_buf[i]);
- i++;
- }
- local_irq_restore(flags);
-
- return count;
-}
-
-static void add_read_queue(int flag, unsigned long val)
-{
- struct ir_raw_event ev = {};
-
- pr_debug("add flag %d with val %lu\n", flag, val);
-
- /*
- * statistically, pulses are ~TIME_CONST/2 too long. we could
- * maybe make this more exact, but this is good enough
- */
- if (flag) {
- /* pulse */
- if (val > TIME_CONST / 2)
- val -= TIME_CONST / 2;
- else /* should not ever happen */
- val = 1;
- ev.pulse = true;
- } else {
- val += TIME_CONST / 2;
- }
- ev.duration = val;
-
- ir_raw_event_store_with_filter(rcdev, &ev);
-}
-
-/* SECTION: Hardware */
-static void sir_timeout(struct timer_list *unused)
-{
- /*
- * if last received signal was a pulse, but receiving stopped
- * within the 9 bit frame, we need to finish this pulse and
- * simulate a signal change to from pulse to space. Otherwise
- * upper layers will receive two sequences next time.
- */
-
- unsigned long flags;
- unsigned long pulse_end;
-
- /* avoid interference with interrupt */
- spin_lock_irqsave(&timer_lock, flags);
- if (last_value) {
- /* clear unread bits in UART and restart */
- outb(UART_FCR_CLEAR_RCVR, io + UART_FCR);
- /* determine 'virtual' pulse end: */
- pulse_end = min_t(unsigned long,
- ktime_us_delta(last, last_intr_time),
- IR_MAX_DURATION);
- dev_dbg(&sir_ir_dev->dev, "timeout add %d for %lu usec\n",
- last_value, pulse_end);
- add_read_queue(last_value, pulse_end);
- last_value = 0;
- last = last_intr_time;
- }
- spin_unlock_irqrestore(&timer_lock, flags);
- ir_raw_event_handle(rcdev);
-}
-
-static irqreturn_t sir_interrupt(int irq, void *dev_id)
-{
- unsigned char data;
- ktime_t curr_time;
- unsigned long delt;
- unsigned long deltintr;
- unsigned long flags;
- int counter = 0;
- int iir, lsr;
-
- while ((iir = inb(io + UART_IIR) & UART_IIR_ID)) {
- if (++counter > 256) {
- dev_err(&sir_ir_dev->dev, "Trapped in interrupt");
- break;
- }
-
- switch (iir & UART_IIR_ID) { /* FIXME toto treba preriedit */
- case UART_IIR_MSI:
- (void)inb(io + UART_MSR);
- break;
- case UART_IIR_RLSI:
- case UART_IIR_THRI:
- (void)inb(io + UART_LSR);
- break;
- case UART_IIR_RDI:
- /* avoid interference with timer */
- spin_lock_irqsave(&timer_lock, flags);
- do {
- del_timer(&timerlist);
- data = inb(io + UART_RX);
- curr_time = ktime_get();
- delt = min_t(unsigned long,
- ktime_us_delta(last, curr_time),
- IR_MAX_DURATION);
- deltintr = min_t(unsigned long,
- ktime_us_delta(last_intr_time,
- curr_time),
- IR_MAX_DURATION);
- dev_dbg(&sir_ir_dev->dev, "t %lu, d %d\n",
- deltintr, (int)data);
- /*
- * if nothing came in last X cycles,
- * it was gap
- */
- if (deltintr > TIME_CONST * threshold) {
- if (last_value) {
- dev_dbg(&sir_ir_dev->dev, "GAP\n");
- /* simulate signal change */
- add_read_queue(last_value,
- delt -
- deltintr);
- last_value = 0;
- last = last_intr_time;
- delt = deltintr;
- }
- }
- data = 1;
- if (data ^ last_value) {
- /*
- * deltintr > 2*TIME_CONST, remember?
- * the other case is timeout
- */
- add_read_queue(last_value,
- delt - TIME_CONST);
- last_value = data;
- last = curr_time;
- last = ktime_sub_us(last,
- TIME_CONST);
- }
- last_intr_time = curr_time;
- if (data) {
- /*
- * start timer for end of
- * sequence detection
- */
- timerlist.expires = jiffies +
- SIR_TIMEOUT;
- add_timer(&timerlist);
- }
-
- lsr = inb(io + UART_LSR);
- } while (lsr & UART_LSR_DR); /* data ready */
- spin_unlock_irqrestore(&timer_lock, flags);
- break;
- default:
- break;
- }
- }
- ir_raw_event_handle(rcdev);
- return IRQ_RETVAL(IRQ_HANDLED);
-}
-
-static void send_space(unsigned long len)
-{
- usleep_range(len, len + 25);
-}
-
-static void send_pulse(unsigned long len)
-{
- long bytes_out = len / TIME_CONST;
-
- if (bytes_out == 0)
- bytes_out++;
-
- while (bytes_out--) {
- outb(PULSE, io + UART_TX);
- /* FIXME treba seriozne cakanie z char/serial.c */
- while (!(inb(io + UART_LSR) & UART_LSR_THRE))
- ;
- }
-}
-
-static int init_hardware(void)
-{
- u8 scratch, scratch2, scratch3;
- unsigned long flags;
-
- spin_lock_irqsave(&hardware_lock, flags);
-
- /*
- * This is a simple port existence test, borrowed from the autoconfig
- * function in drivers/tty/serial/8250/8250_port.c
- */
- scratch = sinp(UART_IER);
- soutp(UART_IER, 0);
-#ifdef __i386__
- outb(0xff, 0x080);
-#endif
- scratch2 = sinp(UART_IER) & 0x0f;
- soutp(UART_IER, 0x0f);
-#ifdef __i386__
- outb(0x00, 0x080);
-#endif
- scratch3 = sinp(UART_IER) & 0x0f;
- soutp(UART_IER, scratch);
- if (scratch2 != 0 || scratch3 != 0x0f) {
- /* we fail, there's nothing here */
- spin_unlock_irqrestore(&hardware_lock, flags);
- pr_err("port existence test failed, cannot continue\n");
- return -ENODEV;
- }
-
- /* reset UART */
- outb(0, io + UART_MCR);
- outb(0, io + UART_IER);
- /* init UART */
- /* set DLAB, speed = 115200 */
- outb(UART_LCR_DLAB | UART_LCR_WLEN7, io + UART_LCR);
- outb(1, io + UART_DLL); outb(0, io + UART_DLM);
- /* 7N1+start = 9 bits at 115200 ~ 3 bits at 44000 */
- outb(UART_LCR_WLEN7, io + UART_LCR);
- /* FIFO operation */
- outb(UART_FCR_ENABLE_FIFO, io + UART_FCR);
- /* interrupts */
- /* outb(UART_IER_RLSI|UART_IER_RDI|UART_IER_THRI, io + UART_IER); */
- outb(UART_IER_RDI, io + UART_IER);
- /* turn on UART */
- outb(UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2, io + UART_MCR);
- spin_unlock_irqrestore(&hardware_lock, flags);
-
- return 0;
-}
-
-static void drop_hardware(void)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&hardware_lock, flags);
-
- /* turn off interrupts */
- outb(0, io + UART_IER);
-
- spin_unlock_irqrestore(&hardware_lock, flags);
-}
-
-/* SECTION: Initialisation */
-static int sir_ir_probe(struct platform_device *dev)
-{
- int retval;
-
- rcdev = devm_rc_allocate_device(&sir_ir_dev->dev, RC_DRIVER_IR_RAW);
- if (!rcdev)
- return -ENOMEM;
-
- rcdev->device_name = "SIR IrDA port";
- rcdev->input_phys = KBUILD_MODNAME "/input0";
- rcdev->input_id.bustype = BUS_HOST;
- rcdev->input_id.vendor = 0x0001;
- rcdev->input_id.product = 0x0001;
- rcdev->input_id.version = 0x0100;
- rcdev->tx_ir = sir_tx_ir;
- rcdev->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER;
- rcdev->driver_name = KBUILD_MODNAME;
- rcdev->map_name = RC_MAP_RC6_MCE;
- rcdev->timeout = IR_DEFAULT_TIMEOUT;
- rcdev->dev.parent = &sir_ir_dev->dev;
-
- timer_setup(&timerlist, sir_timeout, 0);
-
- /* get I/O port access and IRQ line */
- if (!devm_request_region(&sir_ir_dev->dev, io, 8, KBUILD_MODNAME)) {
- pr_err("i/o port 0x%.4x already in use.\n", io);
- return -EBUSY;
- }
- retval = devm_request_irq(&sir_ir_dev->dev, irq, sir_interrupt, 0,
- KBUILD_MODNAME, NULL);
- if (retval < 0) {
- pr_err("IRQ %d already in use.\n", irq);
- return retval;
- }
-
- retval = init_hardware();
- if (retval) {
- del_timer_sync(&timerlist);
- return retval;
- }
-
- pr_info("I/O port 0x%.4x, IRQ %d.\n", io, irq);
-
- retval = devm_rc_register_device(&sir_ir_dev->dev, rcdev);
- if (retval < 0)
- return retval;
-
- return 0;
-}
-
-static int sir_ir_remove(struct platform_device *dev)
-{
- drop_hardware();
- del_timer_sync(&timerlist);
- return 0;
-}
-
-static struct platform_driver sir_ir_driver = {
- .probe = sir_ir_probe,
- .remove = sir_ir_remove,
- .driver = {
- .name = "sir_ir",
- },
-};
-
-static int __init sir_ir_init(void)
-{
- int retval;
-
- retval = platform_driver_register(&sir_ir_driver);
- if (retval)
- return retval;
-
- sir_ir_dev = platform_device_alloc("sir_ir", 0);
- if (!sir_ir_dev) {
- retval = -ENOMEM;
- goto pdev_alloc_fail;
- }
-
- retval = platform_device_add(sir_ir_dev);
- if (retval)
- goto pdev_add_fail;
-
- return 0;
-
-pdev_add_fail:
- platform_device_put(sir_ir_dev);
-pdev_alloc_fail:
- platform_driver_unregister(&sir_ir_driver);
- return retval;
-}
-
-static void __exit sir_ir_exit(void)
-{
- platform_device_unregister(sir_ir_dev);
- platform_driver_unregister(&sir_ir_driver);
-}
-
-module_init(sir_ir_init);
-module_exit(sir_ir_exit);
-
-MODULE_DESCRIPTION("Infrared receiver driver for SIR type serial ports");
-MODULE_AUTHOR("Milan Pikula");
-MODULE_LICENSE("GPL");
-
-module_param_hw(io, int, ioport, 0444);
-MODULE_PARM_DESC(io, "I/O address base (0x3f8 or 0x2f8)");
-
-module_param_hw(irq, int, irq, 0444);
-MODULE_PARM_DESC(irq, "Interrupt (4 or 3)");
-
-module_param(threshold, int, 0444);
-MODULE_PARM_DESC(threshold, "space detection threshold (3)");
diff --git a/drivers/media/rc/st_rc.c b/drivers/media/rc/st_rc.c
index d79d1e3996b2..4e419dbbacd3 100644
--- a/drivers/media/rc/st_rc.c
+++ b/drivers/media/rc/st_rc.c
@@ -231,7 +231,6 @@ static int st_rc_probe(struct platform_device *pdev)
int ret = -EINVAL;
struct rc_dev *rdev;
struct device *dev = &pdev->dev;
- struct resource *res;
struct st_rc_device *rc_dev;
struct device_node *np = pdev->dev.of_node;
const char *rx_mode;
@@ -274,9 +273,7 @@ static int st_rc_probe(struct platform_device *pdev)
goto err;
}
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-
- rc_dev->base = devm_ioremap_resource(dev, res);
+ rc_dev->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rc_dev->base)) {
ret = PTR_ERR(rc_dev->base);
goto err;
diff --git a/drivers/media/rc/streamzap.c b/drivers/media/rc/streamzap.c
index 9cd765e31c49..1cc5ebb85b6c 100644
--- a/drivers/media/rc/streamzap.c
+++ b/drivers/media/rc/streamzap.c
@@ -293,6 +293,7 @@ static struct rc_dev *streamzap_init_rc_dev(struct streamzap_ir *sz)
rdev->allowed_protocols = RC_PROTO_BIT_ALL_IR_DECODER;
rdev->driver_name = DRIVER_NAME;
rdev->map_name = RC_MAP_STREAMZAP;
+ rdev->rx_resolution = SZ_RESOLUTION;
ret = rc_register_device(rdev);
if (ret < 0) {
diff --git a/drivers/media/rc/sunxi-cir.c b/drivers/media/rc/sunxi-cir.c
index 168e1d2c876a..391a591c1b75 100644
--- a/drivers/media/rc/sunxi-cir.c
+++ b/drivers/media/rc/sunxi-cir.c
@@ -255,7 +255,6 @@ static int sunxi_ir_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct device_node *dn = dev->of_node;
const struct sunxi_ir_quirks *quirks;
- struct resource *res;
struct sunxi_ir *ir;
u32 b_clk_freq = SUNXI_IR_BASE_CLK;
@@ -301,8 +300,7 @@ static int sunxi_ir_probe(struct platform_device *pdev)
dev_dbg(dev, "set base clock frequency to %d Hz.\n", b_clk_freq);
/* IO */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- ir->base = devm_ioremap_resource(dev, res);
+ ir->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(ir->base)) {
return PTR_ERR(ir->base);
}
diff --git a/drivers/media/spi/cxd2880-spi.c b/drivers/media/spi/cxd2880-spi.c
index b91a1e845b97..506f52c1af10 100644
--- a/drivers/media/spi/cxd2880-spi.c
+++ b/drivers/media/spi/cxd2880-spi.c
@@ -618,7 +618,7 @@ fail_frontend:
fail_attach:
dvb_unregister_adapter(&dvb_spi->adapter);
fail_adapter:
- if (!dvb_spi->vcc_supply)
+ if (dvb_spi->vcc_supply)
regulator_disable(dvb_spi->vcc_supply);
fail_regulator:
kfree(dvb_spi);
diff --git a/drivers/media/test-drivers/vidtv/vidtv_bridge.c b/drivers/media/test-drivers/vidtv/vidtv_bridge.c
index 75617709c8ce..82620613d56b 100644
--- a/drivers/media/test-drivers/vidtv/vidtv_bridge.c
+++ b/drivers/media/test-drivers/vidtv/vidtv_bridge.c
@@ -564,6 +564,10 @@ static int vidtv_bridge_remove(struct platform_device *pdev)
static void vidtv_bridge_dev_release(struct device *dev)
{
+ struct vidtv_dvb *dvb;
+
+ dvb = dev_get_drvdata(dev);
+ kfree(dvb);
}
static struct platform_device vidtv_bridge_dev = {
diff --git a/drivers/media/test-drivers/vim2m.c b/drivers/media/test-drivers/vim2m.c
index d714fe50afe5..47575490e74a 100644
--- a/drivers/media/test-drivers/vim2m.c
+++ b/drivers/media/test-drivers/vim2m.c
@@ -12,11 +12,6 @@
* Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
* Pawel Osciak, <pawel@osciak.com>
* Marek Szyprowski, <m.szyprowski@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the
- * License, or (at your option) any later version
*/
#include <linux/module.h>
#include <linux/delay.h>
diff --git a/drivers/media/test-drivers/vimc/vimc-scaler.c b/drivers/media/test-drivers/vimc/vimc-scaler.c
index 06880dd0b6ac..820b8f5b502f 100644
--- a/drivers/media/test-drivers/vimc/vimc-scaler.c
+++ b/drivers/media/test-drivers/vimc/vimc-scaler.c
@@ -6,6 +6,7 @@
*/
#include <linux/moduleparam.h>
+#include <linux/string.h>
#include <linux/vmalloc.h>
#include <linux/v4l2-mediabus.h>
#include <media/v4l2-rect.h>
@@ -13,11 +14,11 @@
#include "vimc-common.h"
-static unsigned int sca_mult = 3;
-module_param(sca_mult, uint, 0000);
-MODULE_PARM_DESC(sca_mult, " the image size multiplier");
-
-#define MAX_ZOOM 8
+/* Pad identifier */
+enum vic_sca_pad {
+ VIMC_SCA_SINK = 0,
+ VIMC_SCA_SRC = 1,
+};
#define VIMC_SCA_FMT_WIDTH_DEFAULT 640
#define VIMC_SCA_FMT_HEIGHT_DEFAULT 480
@@ -25,19 +26,16 @@ MODULE_PARM_DESC(sca_mult, " the image size multiplier");
struct vimc_sca_device {
struct vimc_ent_device ved;
struct v4l2_subdev sd;
- /* NOTE: the source fmt is the same as the sink
- * with the width and hight multiplied by mult
- */
- struct v4l2_mbus_framefmt sink_fmt;
struct v4l2_rect crop_rect;
+ /* Frame format for both sink and src pad */
+ struct v4l2_mbus_framefmt fmt[2];
/* Values calculated when the stream starts */
u8 *src_frame;
- unsigned int src_line_size;
unsigned int bpp;
struct media_pad pads[2];
};
-static const struct v4l2_mbus_framefmt sink_fmt_default = {
+static const struct v4l2_mbus_framefmt fmt_default = {
.width = VIMC_SCA_FMT_WIDTH_DEFAULT,
.height = VIMC_SCA_FMT_HEIGHT_DEFAULT,
.code = MEDIA_BUS_FMT_RGB888_1X24,
@@ -72,17 +70,6 @@ vimc_sca_get_crop_bound_sink(const struct v4l2_mbus_framefmt *sink_fmt)
return r;
}
-static void vimc_sca_adjust_sink_crop(struct v4l2_rect *r,
- const struct v4l2_mbus_framefmt *sink_fmt)
-{
- const struct v4l2_rect sink_rect =
- vimc_sca_get_crop_bound_sink(sink_fmt);
-
- /* Disallow rectangles smaller than the minimal one. */
- v4l2_rect_set_min_size(r, &crop_rect_min);
- v4l2_rect_map_inside(r, &sink_rect);
-}
-
static int vimc_sca_init_cfg(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state)
{
@@ -90,19 +77,14 @@ static int vimc_sca_init_cfg(struct v4l2_subdev *sd,
struct v4l2_rect *r;
unsigned int i;
- mf = v4l2_subdev_get_try_format(sd, sd_state, 0);
- *mf = sink_fmt_default;
-
- r = v4l2_subdev_get_try_crop(sd, sd_state, 0);
- *r = crop_rect_default;
-
- for (i = 1; i < sd->entity.num_pads; i++) {
+ for (i = 0; i < sd->entity.num_pads; i++) {
mf = v4l2_subdev_get_try_format(sd, sd_state, i);
- *mf = sink_fmt_default;
- mf->width = mf->width * sca_mult;
- mf->height = mf->height * sca_mult;
+ *mf = fmt_default;
}
+ r = v4l2_subdev_get_try_crop(sd, sd_state, VIMC_SCA_SINK);
+ *r = crop_rect_default;
+
return 0;
}
@@ -144,112 +126,108 @@ static int vimc_sca_enum_frame_size(struct v4l2_subdev *sd,
fse->min_width = VIMC_FRAME_MIN_WIDTH;
fse->min_height = VIMC_FRAME_MIN_HEIGHT;
- if (VIMC_IS_SINK(fse->pad)) {
- fse->max_width = VIMC_FRAME_MAX_WIDTH;
- fse->max_height = VIMC_FRAME_MAX_HEIGHT;
- } else {
- fse->max_width = VIMC_FRAME_MAX_WIDTH * MAX_ZOOM;
- fse->max_height = VIMC_FRAME_MAX_HEIGHT * MAX_ZOOM;
- }
+ fse->max_width = VIMC_FRAME_MAX_WIDTH;
+ fse->max_height = VIMC_FRAME_MAX_HEIGHT;
return 0;
}
+static struct v4l2_mbus_framefmt *
+vimc_sca_pad_format(struct vimc_sca_device *vsca,
+ struct v4l2_subdev_state *sd_state, u32 pad,
+ enum v4l2_subdev_format_whence which)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_format(&vsca->sd, sd_state, pad);
+ else
+ return &vsca->fmt[pad];
+}
+
+static struct v4l2_rect *
+vimc_sca_pad_crop(struct vimc_sca_device *vsca,
+ struct v4l2_subdev_state *sd_state,
+ enum v4l2_subdev_format_whence which)
+{
+ if (which == V4L2_SUBDEV_FORMAT_TRY)
+ return v4l2_subdev_get_try_crop(&vsca->sd, sd_state,
+ VIMC_SCA_SINK);
+ else
+ return &vsca->crop_rect;
+}
+
static int vimc_sca_get_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *format)
{
struct vimc_sca_device *vsca = v4l2_get_subdevdata(sd);
- struct v4l2_rect *crop_rect;
-
- /* Get the current sink format */
- if (format->which == V4L2_SUBDEV_FORMAT_TRY) {
- format->format = *v4l2_subdev_get_try_format(sd, sd_state, 0);
- crop_rect = v4l2_subdev_get_try_crop(sd, sd_state, 0);
- } else {
- format->format = vsca->sink_fmt;
- crop_rect = &vsca->crop_rect;
- }
-
- /* Scale the frame size for the source pad */
- if (VIMC_IS_SRC(format->pad)) {
- format->format.width = crop_rect->width * sca_mult;
- format->format.height = crop_rect->height * sca_mult;
- }
+ format->format = *vimc_sca_pad_format(vsca, sd_state, format->pad,
+ format->which);
return 0;
}
-static void vimc_sca_adjust_sink_fmt(struct v4l2_mbus_framefmt *fmt)
-{
- const struct vimc_pix_map *vpix;
-
- /* Only accept code in the pix map table in non bayer format */
- vpix = vimc_pix_map_by_code(fmt->code);
- if (!vpix || vpix->bayer)
- fmt->code = sink_fmt_default.code;
-
- fmt->width = clamp_t(u32, fmt->width, VIMC_FRAME_MIN_WIDTH,
- VIMC_FRAME_MAX_WIDTH) & ~1;
- fmt->height = clamp_t(u32, fmt->height, VIMC_FRAME_MIN_HEIGHT,
- VIMC_FRAME_MAX_HEIGHT) & ~1;
-
- if (fmt->field == V4L2_FIELD_ANY)
- fmt->field = sink_fmt_default.field;
-
- vimc_colorimetry_clamp(fmt);
-}
-
static int vimc_sca_set_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
- struct v4l2_subdev_format *fmt)
+ struct v4l2_subdev_format *format)
{
struct vimc_sca_device *vsca = v4l2_get_subdevdata(sd);
- struct v4l2_mbus_framefmt *sink_fmt;
- struct v4l2_rect *crop_rect;
+ struct v4l2_mbus_framefmt *fmt;
- if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
- /* Do not change the format while stream is on */
- if (vsca->src_frame)
- return -EBUSY;
+ /* Do not change the active format while stream is on */
+ if (format->which == V4L2_SUBDEV_FORMAT_ACTIVE && vsca->src_frame)
+ return -EBUSY;
- sink_fmt = &vsca->sink_fmt;
- crop_rect = &vsca->crop_rect;
- } else {
- sink_fmt = v4l2_subdev_get_try_format(sd, sd_state, 0);
- crop_rect = v4l2_subdev_get_try_crop(sd, sd_state, 0);
+ fmt = vimc_sca_pad_format(vsca, sd_state, format->pad, format->which);
+
+ /*
+ * The media bus code and colorspace can only be changed on the sink
+ * pad, the source pad only follows.
+ */
+ if (format->pad == VIMC_SCA_SINK) {
+ const struct vimc_pix_map *vpix;
+
+ /* Only accept code in the pix map table in non bayer format. */
+ vpix = vimc_pix_map_by_code(format->format.code);
+ if (vpix && !vpix->bayer)
+ fmt->code = format->format.code;
+ else
+ fmt->code = fmt_default.code;
+
+ /* Clamp the colorspace to valid values. */
+ fmt->colorspace = format->format.colorspace;
+ fmt->ycbcr_enc = format->format.ycbcr_enc;
+ fmt->quantization = format->format.quantization;
+ fmt->xfer_func = format->format.xfer_func;
+ vimc_colorimetry_clamp(fmt);
}
+ /* Clamp and align the width and height */
+ fmt->width = clamp_t(u32, format->format.width, VIMC_FRAME_MIN_WIDTH,
+ VIMC_FRAME_MAX_WIDTH) & ~1;
+ fmt->height = clamp_t(u32, format->format.height, VIMC_FRAME_MIN_HEIGHT,
+ VIMC_FRAME_MAX_HEIGHT) & ~1;
+
/*
- * Do not change the format of the source pad,
- * it is propagated from the sink
+ * Propagate the sink pad format to the crop rectangle and the source
+ * pad.
*/
- if (VIMC_IS_SRC(fmt->pad)) {
- fmt->format = *sink_fmt;
- fmt->format.width = crop_rect->width * sca_mult;
- fmt->format.height = crop_rect->height * sca_mult;
- } else {
- /* Set the new format in the sink pad */
- vimc_sca_adjust_sink_fmt(&fmt->format);
-
- dev_dbg(vsca->ved.dev, "%s: sink format update: "
- "old:%dx%d (0x%x, %d, %d, %d, %d) "
- "new:%dx%d (0x%x, %d, %d, %d, %d)\n", vsca->sd.name,
- /* old */
- sink_fmt->width, sink_fmt->height, sink_fmt->code,
- sink_fmt->colorspace, sink_fmt->quantization,
- sink_fmt->xfer_func, sink_fmt->ycbcr_enc,
- /* new */
- fmt->format.width, fmt->format.height, fmt->format.code,
- fmt->format.colorspace, fmt->format.quantization,
- fmt->format.xfer_func, fmt->format.ycbcr_enc);
-
- *sink_fmt = fmt->format;
-
- /* Do the crop, but respect the current bounds */
- vimc_sca_adjust_sink_crop(crop_rect, sink_fmt);
+ if (format->pad == VIMC_SCA_SINK) {
+ struct v4l2_mbus_framefmt *src_fmt;
+ struct v4l2_rect *crop;
+
+ crop = vimc_sca_pad_crop(vsca, sd_state, format->which);
+ crop->width = fmt->width;
+ crop->height = fmt->height;
+ crop->top = 0;
+ crop->left = 0;
+
+ src_fmt = vimc_sca_pad_format(vsca, sd_state, VIMC_SCA_SRC,
+ format->which);
+ *src_fmt = *fmt;
}
+ format->format = *fmt;
+
return 0;
}
@@ -259,24 +237,17 @@ static int vimc_sca_get_selection(struct v4l2_subdev *sd,
{
struct vimc_sca_device *vsca = v4l2_get_subdevdata(sd);
struct v4l2_mbus_framefmt *sink_fmt;
- struct v4l2_rect *crop_rect;
if (VIMC_IS_SRC(sel->pad))
return -EINVAL;
- if (sel->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
- sink_fmt = &vsca->sink_fmt;
- crop_rect = &vsca->crop_rect;
- } else {
- sink_fmt = v4l2_subdev_get_try_format(sd, sd_state, 0);
- crop_rect = v4l2_subdev_get_try_crop(sd, sd_state, 0);
- }
-
switch (sel->target) {
case V4L2_SEL_TGT_CROP:
- sel->r = *crop_rect;
+ sel->r = *vimc_sca_pad_crop(vsca, sd_state, sel->which);
break;
case V4L2_SEL_TGT_CROP_BOUNDS:
+ sink_fmt = vimc_sca_pad_format(vsca, sd_state, VIMC_SCA_SINK,
+ sel->which);
sel->r = vimc_sca_get_crop_bound_sink(sink_fmt);
break;
default:
@@ -286,6 +257,17 @@ static int vimc_sca_get_selection(struct v4l2_subdev *sd,
return 0;
}
+static void vimc_sca_adjust_sink_crop(struct v4l2_rect *r,
+ const struct v4l2_mbus_framefmt *sink_fmt)
+{
+ const struct v4l2_rect sink_rect =
+ vimc_sca_get_crop_bound_sink(sink_fmt);
+
+ /* Disallow rectangles smaller than the minimal one. */
+ v4l2_rect_set_min_size(r, &crop_rect_min);
+ v4l2_rect_map_inside(r, &sink_rect);
+}
+
static int vimc_sca_set_selection(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_selection *sel)
@@ -294,30 +276,18 @@ static int vimc_sca_set_selection(struct v4l2_subdev *sd,
struct v4l2_mbus_framefmt *sink_fmt;
struct v4l2_rect *crop_rect;
- if (VIMC_IS_SRC(sel->pad))
+ /* Only support setting the crop of the sink pad */
+ if (VIMC_IS_SRC(sel->pad) || sel->target != V4L2_SEL_TGT_CROP)
return -EINVAL;
- if (sel->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
- /* Do not change the format while stream is on */
- if (vsca->src_frame)
- return -EBUSY;
+ if (sel->which == V4L2_SUBDEV_FORMAT_ACTIVE && vsca->src_frame)
+ return -EBUSY;
- crop_rect = &vsca->crop_rect;
- sink_fmt = &vsca->sink_fmt;
- } else {
- crop_rect = v4l2_subdev_get_try_crop(sd, sd_state, 0);
- sink_fmt = v4l2_subdev_get_try_format(sd, sd_state, 0);
- }
-
- switch (sel->target) {
- case V4L2_SEL_TGT_CROP:
- /* Do the crop, but respect the current bounds */
- vimc_sca_adjust_sink_crop(&sel->r, sink_fmt);
- *crop_rect = sel->r;
- break;
- default:
- return -EINVAL;
- }
+ crop_rect = vimc_sca_pad_crop(vsca, sd_state, sel->which);
+ sink_fmt = vimc_sca_pad_format(vsca, sd_state, VIMC_SCA_SINK,
+ sel->which);
+ vimc_sca_adjust_sink_crop(&sel->r, sink_fmt);
+ *crop_rect = sel->r;
return 0;
}
@@ -344,16 +314,12 @@ static int vimc_sca_s_stream(struct v4l2_subdev *sd, int enable)
return 0;
/* Save the bytes per pixel of the sink */
- vpix = vimc_pix_map_by_code(vsca->sink_fmt.code);
+ vpix = vimc_pix_map_by_code(vsca->fmt[VIMC_SCA_SINK].code);
vsca->bpp = vpix->bpp;
- /* Calculate the width in bytes of the src frame */
- vsca->src_line_size = vsca->crop_rect.width *
- sca_mult * vsca->bpp;
-
/* Calculate the frame size of the source pad */
- frame_size = vsca->src_line_size * vsca->crop_rect.height *
- sca_mult;
+ frame_size = vsca->fmt[VIMC_SCA_SRC].width
+ * vsca->fmt[VIMC_SCA_SRC].height * vsca->bpp;
/* Allocate the frame buffer. Use vmalloc to be able to
* allocate a large amount of memory
@@ -382,77 +348,32 @@ static const struct v4l2_subdev_ops vimc_sca_ops = {
.video = &vimc_sca_video_ops,
};
-static void vimc_sca_fill_pix(u8 *const ptr,
- const u8 *const pixel,
- const unsigned int bpp)
-{
- unsigned int i;
-
- /* copy the pixel to the pointer */
- for (i = 0; i < bpp; i++)
- ptr[i] = pixel[i];
-}
-
-static void vimc_sca_scale_pix(const struct vimc_sca_device *const vsca,
- unsigned int lin, unsigned int col,
- const u8 *const sink_frame)
-{
- const struct v4l2_rect crop_rect = vsca->crop_rect;
- unsigned int i, j, index;
- const u8 *pixel;
-
- /* Point to the pixel value in position (lin, col) in the sink frame */
- index = VIMC_FRAME_INDEX(lin, col,
- vsca->sink_fmt.width,
- vsca->bpp);
- pixel = &sink_frame[index];
-
- dev_dbg(vsca->ved.dev,
- "sca: %s: --- scale_pix sink pos %dx%d, index %d ---\n",
- vsca->sd.name, lin, col, index);
-
- /* point to the place we are going to put the first pixel
- * in the scaled src frame
- */
- lin -= crop_rect.top;
- col -= crop_rect.left;
- index = VIMC_FRAME_INDEX(lin * sca_mult, col * sca_mult,
- crop_rect.width * sca_mult, vsca->bpp);
-
- dev_dbg(vsca->ved.dev, "sca: %s: scale_pix src pos %dx%d, index %d\n",
- vsca->sd.name, lin * sca_mult, col * sca_mult, index);
-
- /* Repeat this pixel mult times */
- for (i = 0; i < sca_mult; i++) {
- /* Iterate through each beginning of a
- * pixel repetition in a line
- */
- for (j = 0; j < sca_mult * vsca->bpp; j += vsca->bpp) {
- dev_dbg(vsca->ved.dev,
- "sca: %s: sca: scale_pix src pos %d\n",
- vsca->sd.name, index + j);
-
- /* copy the pixel to the position index + j */
- vimc_sca_fill_pix(&vsca->src_frame[index + j],
- pixel, vsca->bpp);
- }
-
- /* move the index to the next line */
- index += vsca->src_line_size;
- }
-}
-
static void vimc_sca_fill_src_frame(const struct vimc_sca_device *const vsca,
const u8 *const sink_frame)
{
- const struct v4l2_rect r = vsca->crop_rect;
- unsigned int i, j;
-
- /* Scale each pixel from the original sink frame */
- /* TODO: implement scale down, only scale up is supported for now */
- for (i = r.top; i < r.top + r.height; i++)
- for (j = r.left; j < r.left + r.width; j++)
- vimc_sca_scale_pix(vsca, i, j, sink_frame);
+ const struct v4l2_mbus_framefmt *src_fmt = &vsca->fmt[VIMC_SCA_SRC];
+ const struct v4l2_rect *r = &vsca->crop_rect;
+ unsigned int snk_width = vsca->fmt[VIMC_SCA_SINK].width;
+ unsigned int src_x, src_y;
+ u8 *walker = vsca->src_frame;
+
+ /* Set each pixel at the src_frame to its sink_frame equivalent */
+ for (src_y = 0; src_y < src_fmt->height; src_y++) {
+ unsigned int snk_y, y_offset;
+
+ snk_y = (src_y * r->height) / src_fmt->height + r->top;
+ y_offset = snk_y * snk_width * vsca->bpp;
+
+ for (src_x = 0; src_x < src_fmt->width; src_x++) {
+ unsigned int snk_x, x_offset, index;
+
+ snk_x = (src_x * r->width) / src_fmt->width + r->left;
+ x_offset = snk_x * vsca->bpp;
+ index = y_offset + x_offset;
+ memcpy(walker, &sink_frame[index], vsca->bpp);
+ walker += vsca->bpp;
+ }
+ }
}
static void *vimc_sca_process_frame(struct vimc_ent_device *ved,
@@ -492,8 +413,8 @@ static struct vimc_ent_device *vimc_sca_add(struct vimc_device *vimc,
return ERR_PTR(-ENOMEM);
/* Initialize ved and sd */
- vsca->pads[0].flags = MEDIA_PAD_FL_SINK;
- vsca->pads[1].flags = MEDIA_PAD_FL_SOURCE;
+ vsca->pads[VIMC_SCA_SINK].flags = MEDIA_PAD_FL_SINK;
+ vsca->pads[VIMC_SCA_SRC].flags = MEDIA_PAD_FL_SOURCE;
ret = vimc_ent_sd_register(&vsca->ved, &vsca->sd, v4l2_dev,
vcfg_name,
@@ -508,7 +429,8 @@ static struct vimc_ent_device *vimc_sca_add(struct vimc_device *vimc,
vsca->ved.dev = vimc->mdev.dev;
/* Initialize the frame format */
- vsca->sink_fmt = sink_fmt_default;
+ vsca->fmt[VIMC_SCA_SINK] = fmt_default;
+ vsca->fmt[VIMC_SCA_SRC] = fmt_default;
/* Initialize the crop selection */
vsca->crop_rect = crop_rect_default;
diff --git a/drivers/media/test-drivers/vivid/vivid-cec.c b/drivers/media/test-drivers/vivid/vivid-cec.c
index 55ea039fe5b2..1f7469ff04d5 100644
--- a/drivers/media/test-drivers/vivid/vivid-cec.c
+++ b/drivers/media/test-drivers/vivid/vivid-cec.c
@@ -5,40 +5,23 @@
* Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
*/
+#include <linux/delay.h>
#include <media/cec.h>
#include "vivid-core.h"
#include "vivid-cec.h"
-#define CEC_TIM_START_BIT_TOTAL 4500
-#define CEC_TIM_START_BIT_LOW 3700
-#define CEC_TIM_START_BIT_HIGH 800
-#define CEC_TIM_DATA_BIT_TOTAL 2400
-#define CEC_TIM_DATA_BIT_0_LOW 1500
-#define CEC_TIM_DATA_BIT_0_HIGH 900
-#define CEC_TIM_DATA_BIT_1_LOW 600
-#define CEC_TIM_DATA_BIT_1_HIGH 1800
+#define CEC_START_BIT_US 4500
+#define CEC_DATA_BIT_US 2400
+#define CEC_MARGIN_US 350
-void vivid_cec_bus_free_work(struct vivid_dev *dev)
-{
- spin_lock(&dev->cec_slock);
- while (!list_empty(&dev->cec_work_list)) {
- struct vivid_cec_work *cw =
- list_first_entry(&dev->cec_work_list,
- struct vivid_cec_work, list);
-
- spin_unlock(&dev->cec_slock);
- cancel_delayed_work_sync(&cw->work);
- spin_lock(&dev->cec_slock);
- list_del(&cw->list);
- cec_transmit_attempt_done(cw->adap, CEC_TX_STATUS_LOW_DRIVE);
- kfree(cw);
- }
- spin_unlock(&dev->cec_slock);
-}
+struct xfer_on_bus {
+ struct cec_adapter *adap;
+ u8 status;
+};
-static bool vivid_cec_find_dest_adap(struct vivid_dev *dev,
- struct cec_adapter *adap, u8 dest)
+static bool find_dest_adap(struct vivid_dev *dev,
+ struct cec_adapter *adap, u8 dest)
{
unsigned int i;
@@ -61,116 +44,187 @@ static bool vivid_cec_find_dest_adap(struct vivid_dev *dev,
return false;
}
-static void vivid_cec_pin_adap_events(struct cec_adapter *adap, ktime_t ts,
- const struct cec_msg *msg, bool nacked)
+static bool xfer_ready(struct vivid_dev *dev)
{
- unsigned int len = nacked ? 1 : msg->len;
unsigned int i;
- bool bit;
-
- if (adap == NULL)
- return;
+ bool ready = false;
- /*
- * Suffix ULL on constant 10 makes the expression
- * CEC_TIM_START_BIT_TOTAL + 10ULL * len * CEC_TIM_DATA_BIT_TOTAL
- * to be evaluated using 64-bit unsigned arithmetic (u64), which
- * is what ktime_sub_us expects as second argument.
- */
- ts = ktime_sub_us(ts, CEC_TIM_START_BIT_TOTAL +
- 10ULL * len * CEC_TIM_DATA_BIT_TOTAL);
- cec_queue_pin_cec_event(adap, false, false, ts);
- ts = ktime_add_us(ts, CEC_TIM_START_BIT_LOW);
- cec_queue_pin_cec_event(adap, true, false, ts);
- ts = ktime_add_us(ts, CEC_TIM_START_BIT_HIGH);
-
- for (i = 0; i < 10 * len; i++) {
- switch (i % 10) {
- case 0 ... 7:
- bit = msg->msg[i / 10] & (0x80 >> (i % 10));
- break;
- case 8: /* EOM */
- bit = i / 10 == msg->len - 1;
- break;
- case 9: /* ACK */
- bit = cec_msg_is_broadcast(msg) ^ nacked;
+ spin_lock(&dev->cec_xfers_slock);
+ for (i = 0; i < ARRAY_SIZE(dev->xfers); i++) {
+ if (dev->xfers[i].sft &&
+ dev->xfers[i].sft <= dev->cec_sft) {
+ ready = true;
break;
}
- cec_queue_pin_cec_event(adap, false, false, ts);
- if (bit)
- ts = ktime_add_us(ts, CEC_TIM_DATA_BIT_1_LOW);
- else
- ts = ktime_add_us(ts, CEC_TIM_DATA_BIT_0_LOW);
- cec_queue_pin_cec_event(adap, true, false, ts);
- if (bit)
- ts = ktime_add_us(ts, CEC_TIM_DATA_BIT_1_HIGH);
- else
- ts = ktime_add_us(ts, CEC_TIM_DATA_BIT_0_HIGH);
}
-}
+ spin_unlock(&dev->cec_xfers_slock);
-static void vivid_cec_pin_events(struct vivid_dev *dev,
- const struct cec_msg *msg, bool nacked)
-{
- ktime_t ts = ktime_get();
- unsigned int i;
-
- vivid_cec_pin_adap_events(dev->cec_rx_adap, ts, msg, nacked);
- for (i = 0; i < MAX_OUTPUTS; i++)
- vivid_cec_pin_adap_events(dev->cec_tx_adap[i], ts, msg, nacked);
+ return ready;
}
-static void vivid_cec_xfer_done_worker(struct work_struct *work)
+/*
+ * If an adapter tries to send successive messages, it must wait for the
+ * longest signal-free time between its transmissions. But, if another
+ * adapter sends a message in the interim, then the wait can be reduced
+ * because the messages are no longer successive. Make these adjustments
+ * if necessary. Should be called holding cec_xfers_slock.
+ */
+static void adjust_sfts(struct vivid_dev *dev)
{
- struct vivid_cec_work *cw =
- container_of(work, struct vivid_cec_work, work.work);
- struct vivid_dev *dev = cw->dev;
- struct cec_adapter *adap = cw->adap;
- u8 dest = cec_msg_destination(&cw->msg);
- bool valid_dest;
unsigned int i;
+ u8 initiator;
- valid_dest = cec_msg_is_broadcast(&cw->msg);
- if (!valid_dest)
- valid_dest = vivid_cec_find_dest_adap(dev, adap, dest);
-
- cw->tx_status = valid_dest ? CEC_TX_STATUS_OK : CEC_TX_STATUS_NACK;
- spin_lock(&dev->cec_slock);
- dev->cec_xfer_time_jiffies = 0;
- dev->cec_xfer_start_jiffies = 0;
- list_del(&cw->list);
- spin_unlock(&dev->cec_slock);
- vivid_cec_pin_events(dev, &cw->msg, !valid_dest);
- cec_transmit_attempt_done(cw->adap, cw->tx_status);
-
- /* Broadcast message */
- if (adap != dev->cec_rx_adap)
- cec_received_msg(dev->cec_rx_adap, &cw->msg);
- for (i = 0; i < MAX_OUTPUTS && dev->cec_tx_adap[i]; i++)
- if (adap != dev->cec_tx_adap[i])
- cec_received_msg(dev->cec_tx_adap[i], &cw->msg);
- kfree(cw);
+ for (i = 0; i < ARRAY_SIZE(dev->xfers); i++) {
+ if (dev->xfers[i].sft <= CEC_SIGNAL_FREE_TIME_RETRY)
+ continue;
+ initiator = dev->xfers[i].msg[0] >> 4;
+ if (initiator == dev->last_initiator)
+ dev->xfers[i].sft = CEC_SIGNAL_FREE_TIME_NEXT_XFER;
+ else
+ dev->xfers[i].sft = CEC_SIGNAL_FREE_TIME_NEW_INITIATOR;
+ }
}
-static void vivid_cec_xfer_try_worker(struct work_struct *work)
+/*
+ * The main emulation of the bus on which CEC adapters attempt to send
+ * messages to each other. The bus keeps track of how long it has been
+ * signal-free and accepts a pending transmission only if the state of
+ * the bus matches the transmission's signal-free requirements. It calls
+ * cec_transmit_attempt_done() for all transmits that enter the bus and
+ * cec_received_msg() for successful transmits.
+ */
+int vivid_cec_bus_thread(void *_dev)
{
- struct vivid_cec_work *cw =
- container_of(work, struct vivid_cec_work, work.work);
- struct vivid_dev *dev = cw->dev;
-
- spin_lock(&dev->cec_slock);
- if (dev->cec_xfer_time_jiffies) {
- list_del(&cw->list);
- spin_unlock(&dev->cec_slock);
- cec_transmit_attempt_done(cw->adap, CEC_TX_STATUS_ARB_LOST);
- kfree(cw);
- } else {
- INIT_DELAYED_WORK(&cw->work, vivid_cec_xfer_done_worker);
- dev->cec_xfer_start_jiffies = jiffies;
- dev->cec_xfer_time_jiffies = usecs_to_jiffies(cw->usecs);
- spin_unlock(&dev->cec_slock);
- schedule_delayed_work(&cw->work, dev->cec_xfer_time_jiffies);
+ u32 last_sft;
+ unsigned int i;
+ unsigned int dest;
+ ktime_t start, end;
+ s64 delta_us, retry_us;
+ struct vivid_dev *dev = _dev;
+
+ dev->cec_sft = CEC_SIGNAL_FREE_TIME_NEXT_XFER;
+ for (;;) {
+ bool first = true;
+ int wait_xfer_us = 0;
+ bool valid_dest = false;
+ int wait_arb_lost_us = 0;
+ unsigned int first_idx = 0;
+ unsigned int first_status = 0;
+ struct cec_msg first_msg = {};
+ struct xfer_on_bus xfers_on_bus[MAX_OUTPUTS] = {};
+
+ wait_event_interruptible(dev->kthread_waitq_cec, xfer_ready(dev) ||
+ kthread_should_stop());
+ if (kthread_should_stop())
+ break;
+ last_sft = dev->cec_sft;
+ dev->cec_sft = 0;
+ /*
+ * Move the messages that are ready onto the bus. The adapter with
+ * the most leading zeros will win control of the bus and any other
+ * adapters will lose arbitration.
+ */
+ spin_lock(&dev->cec_xfers_slock);
+ for (i = 0; i < ARRAY_SIZE(dev->xfers); i++) {
+ if (!dev->xfers[i].sft || dev->xfers[i].sft > last_sft)
+ continue;
+ if (first) {
+ first = false;
+ first_idx = i;
+ xfers_on_bus[first_idx].adap = dev->xfers[i].adap;
+ memcpy(first_msg.msg, dev->xfers[i].msg, dev->xfers[i].len);
+ first_msg.len = dev->xfers[i].len;
+ } else {
+ xfers_on_bus[i].adap = dev->xfers[i].adap;
+ xfers_on_bus[i].status = CEC_TX_STATUS_ARB_LOST;
+ /*
+ * For simplicity wait for all 4 bits of the initiator's
+ * address even though HDMI specification uses bit-level
+ * precision.
+ */
+ wait_arb_lost_us = 4 * CEC_DATA_BIT_US + CEC_START_BIT_US;
+ }
+ dev->xfers[i].sft = 0;
+ }
+ dev->last_initiator = cec_msg_initiator(&first_msg);
+ adjust_sfts(dev);
+ spin_unlock(&dev->cec_xfers_slock);
+
+ dest = cec_msg_destination(&first_msg);
+ valid_dest = cec_msg_is_broadcast(&first_msg);
+ if (!valid_dest)
+ valid_dest = find_dest_adap(dev, xfers_on_bus[first_idx].adap, dest);
+ if (valid_dest) {
+ first_status = CEC_TX_STATUS_OK;
+ /*
+ * Message length is in bytes, but each byte is transmitted in
+ * a block of 10 bits.
+ */
+ wait_xfer_us = first_msg.len * 10 * CEC_DATA_BIT_US;
+ } else {
+ first_status = CEC_TX_STATUS_NACK;
+ /*
+ * A message that is not acknowledged stops transmitting after
+ * the header block of 10 bits.
+ */
+ wait_xfer_us = 10 * CEC_DATA_BIT_US;
+ }
+ wait_xfer_us += CEC_START_BIT_US;
+ xfers_on_bus[first_idx].status = first_status;
+
+ /* Sleep as if sending messages on a real hardware bus. */
+ start = ktime_get();
+ if (wait_arb_lost_us) {
+ usleep_range(wait_arb_lost_us - CEC_MARGIN_US, wait_arb_lost_us);
+ for (i = 0; i < ARRAY_SIZE(xfers_on_bus); i++) {
+ if (xfers_on_bus[i].status != CEC_TX_STATUS_ARB_LOST)
+ continue;
+ cec_transmit_attempt_done(xfers_on_bus[i].adap,
+ CEC_TX_STATUS_ARB_LOST);
+ }
+ if (kthread_should_stop())
+ break;
+ }
+ wait_xfer_us -= wait_arb_lost_us;
+ usleep_range(wait_xfer_us - CEC_MARGIN_US, wait_xfer_us);
+ cec_transmit_attempt_done(xfers_on_bus[first_idx].adap, first_status);
+ if (kthread_should_stop())
+ break;
+ if (first_status == CEC_TX_STATUS_OK) {
+ if (xfers_on_bus[first_idx].adap != dev->cec_rx_adap)
+ cec_received_msg(dev->cec_rx_adap, &first_msg);
+ for (i = 0; i < MAX_OUTPUTS && dev->cec_tx_adap[i]; i++)
+ if (xfers_on_bus[first_idx].adap != dev->cec_tx_adap[i])
+ cec_received_msg(dev->cec_tx_adap[i], &first_msg);
+ }
+ end = ktime_get();
+ /*
+ * If the emulated transfer took more or less time than it should
+ * have, then compensate by adjusting the wait time needed for the
+ * bus to be signal-free for 3 bit periods (the retry time).
+ */
+ delta_us = div_s64(end - start, 1000);
+ delta_us -= wait_xfer_us + wait_arb_lost_us;
+ retry_us = CEC_SIGNAL_FREE_TIME_RETRY * CEC_DATA_BIT_US - delta_us;
+ if (retry_us > CEC_MARGIN_US)
+ usleep_range(retry_us - CEC_MARGIN_US, retry_us);
+ dev->cec_sft = CEC_SIGNAL_FREE_TIME_RETRY;
+ /*
+ * If there are no messages that need to be retried, check if any
+ * adapters that did not just transmit a message are ready to
+ * transmit. If none of these adapters are ready, then increase
+ * the signal-free time so that the bus is available to all
+ * adapters and go back to waiting for a transmission.
+ */
+ while (dev->cec_sft >= CEC_SIGNAL_FREE_TIME_RETRY &&
+ dev->cec_sft < CEC_SIGNAL_FREE_TIME_NEXT_XFER &&
+ !xfer_ready(dev) && !kthread_should_stop()) {
+ usleep_range(2 * CEC_DATA_BIT_US - CEC_MARGIN_US,
+ 2 * CEC_DATA_BIT_US);
+ dev->cec_sft += 2;
+ }
}
+ return 0;
}
static int vivid_cec_adap_enable(struct cec_adapter *adap, bool enable)
@@ -184,41 +238,26 @@ static int vivid_cec_adap_log_addr(struct cec_adapter *adap, u8 log_addr)
return 0;
}
-/*
- * One data bit takes 2400 us, each byte needs 10 bits so that's 24000 us
- * per byte.
- */
-#define USECS_PER_BYTE 24000
-
static int vivid_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
u32 signal_free_time, struct cec_msg *msg)
{
struct vivid_dev *dev = cec_get_drvdata(adap);
- struct vivid_cec_work *cw = kzalloc(sizeof(*cw), GFP_KERNEL);
- long delta_jiffies = 0;
-
- if (cw == NULL)
- return -ENOMEM;
- cw->dev = dev;
- cw->adap = adap;
- cw->usecs = CEC_FREE_TIME_TO_USEC(signal_free_time) +
- msg->len * USECS_PER_BYTE;
- cw->msg = *msg;
-
- spin_lock(&dev->cec_slock);
- list_add(&cw->list, &dev->cec_work_list);
- if (dev->cec_xfer_time_jiffies == 0) {
- INIT_DELAYED_WORK(&cw->work, vivid_cec_xfer_done_worker);
- dev->cec_xfer_start_jiffies = jiffies;
- dev->cec_xfer_time_jiffies = usecs_to_jiffies(cw->usecs);
- delta_jiffies = dev->cec_xfer_time_jiffies;
- } else {
- INIT_DELAYED_WORK(&cw->work, vivid_cec_xfer_try_worker);
- delta_jiffies = dev->cec_xfer_start_jiffies +
- dev->cec_xfer_time_jiffies - jiffies;
+ u8 idx = cec_msg_initiator(msg);
+
+ spin_lock(&dev->cec_xfers_slock);
+ dev->xfers[idx].adap = adap;
+ memcpy(dev->xfers[idx].msg, msg->msg, CEC_MAX_MSG_SIZE);
+ dev->xfers[idx].len = msg->len;
+ dev->xfers[idx].sft = CEC_SIGNAL_FREE_TIME_RETRY;
+ if (signal_free_time > CEC_SIGNAL_FREE_TIME_RETRY) {
+ if (idx == dev->last_initiator)
+ dev->xfers[idx].sft = CEC_SIGNAL_FREE_TIME_NEXT_XFER;
+ else
+ dev->xfers[idx].sft = CEC_SIGNAL_FREE_TIME_NEW_INITIATOR;
}
- spin_unlock(&dev->cec_slock);
- schedule_delayed_work(&cw->work, delta_jiffies < 0 ? 0 : delta_jiffies);
+ spin_unlock(&dev->cec_xfers_slock);
+ wake_up_interruptible(&dev->kthread_waitq_cec);
+
return 0;
}
diff --git a/drivers/media/test-drivers/vivid/vivid-cec.h b/drivers/media/test-drivers/vivid/vivid-cec.h
index 7524ed48a914..b2bcddb50b83 100644
--- a/drivers/media/test-drivers/vivid/vivid-cec.h
+++ b/drivers/media/test-drivers/vivid/vivid-cec.h
@@ -9,12 +9,5 @@
struct cec_adapter *vivid_cec_alloc_adap(struct vivid_dev *dev,
unsigned int idx,
bool is_source);
-void vivid_cec_bus_free_work(struct vivid_dev *dev);
-
-#else
-
-static inline void vivid_cec_bus_free_work(struct vivid_dev *dev)
-{
-}
-
+int vivid_cec_bus_thread(void *_dev);
#endif
diff --git a/drivers/media/test-drivers/vivid/vivid-core.c b/drivers/media/test-drivers/vivid/vivid-core.c
index d2bd2653cf54..04b75666bad4 100644
--- a/drivers/media/test-drivers/vivid/vivid-core.c
+++ b/drivers/media/test-drivers/vivid/vivid-core.c
@@ -177,6 +177,15 @@ MODULE_PARM_DESC(cache_hints, " user-space cache hints, default is 0.\n"
"\t\t 0 == forbid\n"
"\t\t 1 == allow");
+static unsigned int supports_requests[VIVID_MAX_DEVS] = {
+ [0 ... (VIVID_MAX_DEVS - 1)] = 1
+};
+module_param_array(supports_requests, uint, NULL, 0444);
+MODULE_PARM_DESC(supports_requests, " support for requests, default is 1.\n"
+ "\t\t 0 == no support\n"
+ "\t\t 1 == supports requests\n"
+ "\t\t 2 == requires requests");
+
static struct vivid_dev *vivid_devs[VIVID_MAX_DEVS];
const struct v4l2_rect vivid_min_rect = {
@@ -883,10 +892,11 @@ static int vivid_create_queue(struct vivid_dev *dev,
q->mem_ops = allocators[dev->inst] == 1 ? &vb2_dma_contig_memops :
&vb2_vmalloc_memops;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- q->min_buffers_needed = min_buffers_needed;
+ q->min_buffers_needed = supports_requests[dev->inst] ? 0 : min_buffers_needed;
q->lock = &dev->mutex;
q->dev = dev->v4l2_dev.dev;
- q->supports_requests = true;
+ q->supports_requests = supports_requests[dev->inst];
+ q->requires_requests = supports_requests[dev->inst] >= 2;
q->allow_cache_hints = (cache_hints[dev->inst] == 1);
return vb2_queue_init(q);
@@ -1878,18 +1888,7 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
INIT_LIST_HEAD(&dev->meta_out_active);
INIT_LIST_HEAD(&dev->touch_cap_active);
- INIT_LIST_HEAD(&dev->cec_work_list);
- spin_lock_init(&dev->cec_slock);
- /*
- * Same as create_singlethread_workqueue, but now I can use the
- * string formatting of alloc_ordered_workqueue.
- */
- dev->cec_workqueue = alloc_ordered_workqueue("vivid-%03d-cec",
- WQ_MEM_RECLAIM, inst);
- if (!dev->cec_workqueue) {
- ret = -ENOMEM;
- goto unreg_dev;
- }
+ spin_lock_init(&dev->cec_xfers_slock);
if (allocators[inst] == 1)
dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
@@ -1929,6 +1928,19 @@ static int vivid_create_instance(struct platform_device *pdev, int inst)
cec_tx_bus_cnt++;
}
}
+
+ if (dev->cec_rx_adap || cec_tx_bus_cnt) {
+ init_waitqueue_head(&dev->kthread_waitq_cec);
+ dev->kthread_cec = kthread_run(vivid_cec_bus_thread, dev,
+ "vivid_cec-%s", dev->v4l2_dev.name);
+ if (IS_ERR(dev->kthread_cec)) {
+ ret = PTR_ERR(dev->kthread_cec);
+ dev->kthread_cec = NULL;
+ v4l2_err(&dev->v4l2_dev, "kernel_thread() failed\n");
+ goto unreg_dev;
+ }
+ }
+
#endif
v4l2_ctrl_handler_setup(&dev->ctrl_hdl_vid_cap);
@@ -1968,10 +1980,8 @@ unreg_dev:
cec_unregister_adapter(dev->cec_rx_adap);
for (i = 0; i < MAX_OUTPUTS; i++)
cec_unregister_adapter(dev->cec_tx_adap[i]);
- if (dev->cec_workqueue) {
- vivid_cec_bus_free_work(dev);
- destroy_workqueue(dev->cec_workqueue);
- }
+ if (dev->kthread_cec)
+ kthread_stop(dev->kthread_cec);
free_dev:
v4l2_device_put(&dev->v4l2_dev);
return ret;
@@ -2093,10 +2103,8 @@ static int vivid_remove(struct platform_device *pdev)
cec_unregister_adapter(dev->cec_rx_adap);
for (j = 0; j < MAX_OUTPUTS; j++)
cec_unregister_adapter(dev->cec_tx_adap[j]);
- if (dev->cec_workqueue) {
- vivid_cec_bus_free_work(dev);
- destroy_workqueue(dev->cec_workqueue);
- }
+ if (dev->kthread_cec)
+ kthread_stop(dev->kthread_cec);
v4l2_device_put(&dev->v4l2_dev);
vivid_devs[i] = NULL;
}
diff --git a/drivers/media/test-drivers/vivid/vivid-core.h b/drivers/media/test-drivers/vivid/vivid-core.h
index 1e3c4f5a9413..45f96706edde 100644
--- a/drivers/media/test-drivers/vivid/vivid-core.h
+++ b/drivers/media/test-drivers/vivid/vivid-core.h
@@ -110,15 +110,11 @@ enum vivid_colorspace {
#define VIVID_INVALID_SIGNAL(mode) \
((mode) == NO_SIGNAL || (mode) == NO_LOCK || (mode) == OUT_OF_RANGE)
-struct vivid_cec_work {
- struct list_head list;
- struct delayed_work work;
+struct vivid_cec_xfer {
struct cec_adapter *adap;
- struct vivid_dev *dev;
- unsigned int usecs;
- unsigned int timeout_ms;
- u8 tx_status;
- struct cec_msg msg;
+ u8 msg[CEC_MAX_MSG_SIZE];
+ u32 len;
+ u32 sft;
};
struct vivid_dev {
@@ -560,12 +556,13 @@ struct vivid_dev {
/* CEC */
struct cec_adapter *cec_rx_adap;
struct cec_adapter *cec_tx_adap[MAX_OUTPUTS];
- struct workqueue_struct *cec_workqueue;
- spinlock_t cec_slock;
- struct list_head cec_work_list;
- unsigned int cec_xfer_time_jiffies;
- unsigned long cec_xfer_start_jiffies;
u8 cec_output2bus_map[MAX_OUTPUTS];
+ struct task_struct *kthread_cec;
+ wait_queue_head_t kthread_waitq_cec;
+ struct vivid_cec_xfer xfers[MAX_OUTPUTS];
+ spinlock_t cec_xfers_slock; /* read and write cec messages */
+ u32 cec_sft; /* bus signal free time, in bit periods */
+ u8 last_initiator;
/* CEC OSD String */
char osd[14];
diff --git a/drivers/media/tuners/mxl5007t.c b/drivers/media/tuners/mxl5007t.c
index 26a277975cb1..03c46a62bf26 100644
--- a/drivers/media/tuners/mxl5007t.c
+++ b/drivers/media/tuners/mxl5007t.c
@@ -172,7 +172,6 @@ static void set_reg_bits(struct reg_pair_t *reg_pair, u8 reg, u8 mask, u8 val)
i++;
}
- return;
}
static void copy_reg_bits(struct reg_pair_t *reg_pair1,
@@ -193,7 +192,6 @@ static void copy_reg_bits(struct reg_pair_t *reg_pair1,
}
i++;
}
- return;
}
/* ------------------------------------------------------------------------- */
@@ -221,7 +219,6 @@ static void mxl5007t_set_mode_bits(struct mxl5007t_state *state,
default:
mxl_fail(-EINVAL);
}
- return;
}
static void mxl5007t_set_if_freq_bits(struct mxl5007t_state *state,
@@ -274,8 +271,6 @@ static void mxl5007t_set_if_freq_bits(struct mxl5007t_state *state,
set_reg_bits(state->tab_init, 0x02, 0x10, invert_if ? 0x10 : 0x00);
state->if_freq = if_freq;
-
- return;
}
static void mxl5007t_set_xtal_freq_bits(struct mxl5007t_state *state,
@@ -343,8 +338,6 @@ static void mxl5007t_set_xtal_freq_bits(struct mxl5007t_state *state,
mxl_fail(-EINVAL);
return;
}
-
- return;
}
static struct reg_pair_t *mxl5007t_calc_init_regs(struct mxl5007t_state *state,
@@ -398,8 +391,6 @@ static void mxl5007t_set_bw_bits(struct mxl5007t_state *state,
return;
}
set_reg_bits(state->tab_rftune, 0x0c, 0x3f, val);
-
- return;
}
static struct
diff --git a/drivers/media/tuners/tuner-types.c b/drivers/media/tuners/tuner-types.c
index 01f61ebabd56..0ed2c5bc082e 100644
--- a/drivers/media/tuners/tuner-types.c
+++ b/drivers/media/tuners/tuner-types.c
@@ -1942,6 +1942,10 @@ struct tunertype tuners[] = {
.params = tuner_sony_btf_pg463z_params,
.count = ARRAY_SIZE(tuner_sony_btf_pg463z_params),
},
+ [TUNER_SI2157] = {
+ .name = "Silicon Labs Si2157 tuner",
+ /* see si2157.c for details */
+ },
};
EXPORT_SYMBOL(tuners);
diff --git a/drivers/media/usb/airspy/airspy.c b/drivers/media/usb/airspy/airspy.c
index 7a81be7970b2..d568452618d1 100644
--- a/drivers/media/usb/airspy/airspy.c
+++ b/drivers/media/usb/airspy/airspy.c
@@ -415,8 +415,11 @@ static int airspy_alloc_urbs(struct airspy *s)
dev_dbg(s->dev, "alloc urb=%d\n", i);
s->urb_list[i] = usb_alloc_urb(0, GFP_ATOMIC);
if (!s->urb_list[i]) {
- for (j = 0; j < i; j++)
+ for (j = 0; j < i; j++) {
usb_free_urb(s->urb_list[j]);
+ s->urb_list[j] = NULL;
+ }
+ s->urbs_initialized = 0;
return -ENOMEM;
}
usb_fill_bulk_urb(s->urb_list[i],
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf.c b/drivers/media/usb/dvb-usb-v2/mxl111sf.c
index 7865fa0a8295..cd5861a30b6f 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf.c
@@ -931,8 +931,6 @@ static int mxl111sf_init(struct dvb_usb_device *d)
.len = sizeof(eeprom), .buf = eeprom },
};
- mutex_init(&state->msg_lock);
-
ret = get_chip_info(state);
if (mxl_fail(ret))
pr_err("failed to get chip info during probe");
@@ -1074,6 +1072,14 @@ static int mxl111sf_get_stream_config_dvbt(struct dvb_frontend *fe,
return 0;
}
+static int mxl111sf_probe(struct dvb_usb_device *dev)
+{
+ struct mxl111sf_state *state = d_to_priv(dev);
+
+ mutex_init(&state->msg_lock);
+ return 0;
+}
+
static struct dvb_usb_device_properties mxl111sf_props_dvbt = {
.driver_name = KBUILD_MODNAME,
.owner = THIS_MODULE,
@@ -1083,6 +1089,7 @@ static struct dvb_usb_device_properties mxl111sf_props_dvbt = {
.generic_bulk_ctrl_endpoint = 0x02,
.generic_bulk_ctrl_endpoint_response = 0x81,
+ .probe = mxl111sf_probe,
.i2c_algo = &mxl111sf_i2c_algo,
.frontend_attach = mxl111sf_frontend_attach_dvbt,
.tuner_attach = mxl111sf_attach_tuner,
@@ -1124,6 +1131,7 @@ static struct dvb_usb_device_properties mxl111sf_props_atsc = {
.generic_bulk_ctrl_endpoint = 0x02,
.generic_bulk_ctrl_endpoint_response = 0x81,
+ .probe = mxl111sf_probe,
.i2c_algo = &mxl111sf_i2c_algo,
.frontend_attach = mxl111sf_frontend_attach_atsc,
.tuner_attach = mxl111sf_attach_tuner,
@@ -1165,6 +1173,7 @@ static struct dvb_usb_device_properties mxl111sf_props_mh = {
.generic_bulk_ctrl_endpoint = 0x02,
.generic_bulk_ctrl_endpoint_response = 0x81,
+ .probe = mxl111sf_probe,
.i2c_algo = &mxl111sf_i2c_algo,
.frontend_attach = mxl111sf_frontend_attach_mh,
.tuner_attach = mxl111sf_attach_tuner,
@@ -1233,6 +1242,7 @@ static struct dvb_usb_device_properties mxl111sf_props_atsc_mh = {
.generic_bulk_ctrl_endpoint = 0x02,
.generic_bulk_ctrl_endpoint_response = 0x81,
+ .probe = mxl111sf_probe,
.i2c_algo = &mxl111sf_i2c_algo,
.frontend_attach = mxl111sf_frontend_attach_atsc_mh,
.tuner_attach = mxl111sf_attach_tuner,
@@ -1311,6 +1321,7 @@ static struct dvb_usb_device_properties mxl111sf_props_mercury = {
.generic_bulk_ctrl_endpoint = 0x02,
.generic_bulk_ctrl_endpoint_response = 0x81,
+ .probe = mxl111sf_probe,
.i2c_algo = &mxl111sf_i2c_algo,
.frontend_attach = mxl111sf_frontend_attach_mercury,
.tuner_attach = mxl111sf_attach_tuner,
@@ -1381,6 +1392,7 @@ static struct dvb_usb_device_properties mxl111sf_props_mercury_mh = {
.generic_bulk_ctrl_endpoint = 0x02,
.generic_bulk_ctrl_endpoint_response = 0x81,
+ .probe = mxl111sf_probe,
.i2c_algo = &mxl111sf_i2c_algo,
.frontend_attach = mxl111sf_frontend_attach_mercury_mh,
.tuner_attach = mxl111sf_attach_tuner,
diff --git a/drivers/media/usb/dvb-usb/az6027.c b/drivers/media/usb/dvb-usb/az6027.c
index 1c39b61cde29..86788771175b 100644
--- a/drivers/media/usb/dvb-usb/az6027.c
+++ b/drivers/media/usb/dvb-usb/az6027.c
@@ -391,6 +391,7 @@ static struct rc_map_table rc_map_az6027_table[] = {
/* remote control stuff (does not work with my box) */
static int az6027_rc_query(struct dvb_usb_device *d, u32 *event, int *state)
{
+ *state = REMOTE_NO_KEY_PRESSED;
return 0;
}
diff --git a/drivers/media/usb/dvb-usb/dibusb-common.c b/drivers/media/usb/dvb-usb/dibusb-common.c
index 02b51d1a1b67..aff60c10cb0b 100644
--- a/drivers/media/usb/dvb-usb/dibusb-common.c
+++ b/drivers/media/usb/dvb-usb/dibusb-common.c
@@ -223,7 +223,7 @@ int dibusb_read_eeprom_byte(struct dvb_usb_device *d, u8 offs, u8 *val)
u8 *buf;
int rc;
- buf = kmalloc(2, GFP_KERNEL);
+ buf = kzalloc(2, GFP_KERNEL);
if (!buf)
return -ENOMEM;
diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
index c1e0dccb7408..b207f34af5c6 100644
--- a/drivers/media/usb/em28xx/em28xx-cards.c
+++ b/drivers/media/usb/em28xx/em28xx-cards.c
@@ -2508,12 +2508,17 @@ const struct em28xx_board em28xx_boards[] = {
.def_i2c_bus = 1,
.i2c_speed = EM28XX_I2C_CLK_WAIT_ENABLE |
EM28XX_I2C_FREQ_400_KHZ,
- .tuner_type = TUNER_ABSENT,
+ .tuner_type = TUNER_SI2157,
.tuner_gpio = hauppauge_dualhd_dvb,
.has_dvb = 1,
.has_dual_ts = 1,
.ir_codes = RC_MAP_HAUPPAUGE,
.leds = hauppauge_dualhd_leds,
+ .input = { {
+ .type = EM28XX_VMUX_COMPOSITE,
+ .vmux = TVP5150_COMPOSITE1,
+ .amux = EM28XX_AMUX_LINE_IN,
+ } },
},
/*
* 2040:026d Hauppauge WinTV-dualHD (model 01595 - ATSC/QAM) Isoc.
@@ -4139,8 +4144,11 @@ static void em28xx_usb_disconnect(struct usb_interface *intf)
em28xx_close_extension(dev);
- if (dev->dev_next)
+ if (dev->dev_next) {
+ em28xx_close_extension(dev->dev_next);
em28xx_release_resources(dev->dev_next);
+ }
+
em28xx_release_resources(dev);
if (dev->dev_next) {
diff --git a/drivers/media/usb/em28xx/em28xx-core.c b/drivers/media/usb/em28xx/em28xx-core.c
index 584fa400cd7d..acc0bf7dbe2b 100644
--- a/drivers/media/usb/em28xx/em28xx-core.c
+++ b/drivers/media/usb/em28xx/em28xx-core.c
@@ -1154,8 +1154,9 @@ int em28xx_suspend_extension(struct em28xx *dev)
dev_info(&dev->intf->dev, "Suspending extensions\n");
mutex_lock(&em28xx_devlist_mutex);
list_for_each_entry(ops, &em28xx_extension_devlist, next) {
- if (ops->suspend)
- ops->suspend(dev);
+ if (!ops->suspend)
+ continue;
+ ops->suspend(dev);
if (dev->dev_next)
ops->suspend(dev->dev_next);
}
diff --git a/drivers/media/usb/gspca/gl860/gl860-mi1320.c b/drivers/media/usb/gspca/gl860/gl860-mi1320.c
index 0749fe13160f..d6a540ed378c 100644
--- a/drivers/media/usb/gspca/gl860/gl860-mi1320.c
+++ b/drivers/media/usb/gspca/gl860/gl860-mi1320.c
@@ -50,42 +50,69 @@ static struct validx tbl_post_unset_alt[] = {
};
static u8 *tbl_1280[] = {
- "\x0d\x80\xf1\x08\x03\x04\xf1\x00" "\x04\x05\xf1\x02\x05\x00\xf1\xf1"
- "\x06\x00\xf1\x0d\x20\x01\xf1\x00" "\x21\x84\xf1\x00\x0d\x00\xf1\x08"
- "\xf0\x00\xf1\x01\x34\x00\xf1\x00" "\x9b\x43\xf1\x00\xa6\x05\xf1\x00"
- "\xa9\x04\xf1\x00\xa1\x05\xf1\x00" "\xa4\x04\xf1\x00\xae\x0a\xf1\x08"
- ,
- "\xf0\x00\xf1\x02\x3a\x05\xf1\xf1" "\x3c\x05\xf1\xf1\x59\x01\xf1\x47"
- "\x5a\x01\xf1\x88\x5c\x0a\xf1\x06" "\x5d\x0e\xf1\x0a\x64\x5e\xf1\x1c"
- "\xd2\x00\xf1\xcf\xcb\x00\xf1\x01"
- ,
- "\xd3\x02\xd4\x28\xd5\x01\xd0\x02" "\xd1\x18\xd2\xc1"
+ (u8[]){
+ 0x0d, 0x80, 0xf1, 0x08, 0x03, 0x04, 0xf1, 0x00,
+ 0x04, 0x05, 0xf1, 0x02, 0x05, 0x00, 0xf1, 0xf1,
+ 0x06, 0x00, 0xf1, 0x0d, 0x20, 0x01, 0xf1, 0x00,
+ 0x21, 0x84, 0xf1, 0x00, 0x0d, 0x00, 0xf1, 0x08,
+ 0xf0, 0x00, 0xf1, 0x01, 0x34, 0x00, 0xf1, 0x00,
+ 0x9b, 0x43, 0xf1, 0x00, 0xa6, 0x05, 0xf1, 0x00,
+ 0xa9, 0x04, 0xf1, 0x00, 0xa1, 0x05, 0xf1, 0x00,
+ 0xa4, 0x04, 0xf1, 0x00, 0xae, 0x0a, 0xf1, 0x08
+ }, (u8[]){
+ 0xf0, 0x00, 0xf1, 0x02, 0x3a, 0x05, 0xf1, 0xf1,
+ 0x3c, 0x05, 0xf1, 0xf1, 0x59, 0x01, 0xf1, 0x47,
+ 0x5a, 0x01, 0xf1, 0x88, 0x5c, 0x0a, 0xf1, 0x06,
+ 0x5d, 0x0e, 0xf1, 0x0a, 0x64, 0x5e, 0xf1, 0x1c,
+ 0xd2, 0x00, 0xf1, 0xcf, 0xcb, 0x00, 0xf1, 0x01
+ }, (u8[]){
+ 0xd3, 0x02, 0xd4, 0x28, 0xd5, 0x01, 0xd0, 0x02,
+ 0xd1, 0x18, 0xd2, 0xc1
+ }
};
static u8 *tbl_800[] = {
- "\x0d\x80\xf1\x08\x03\x03\xf1\xc0" "\x04\x05\xf1\x02\x05\x00\xf1\xf1"
- "\x06\x00\xf1\x0d\x20\x01\xf1\x00" "\x21\x84\xf1\x00\x0d\x00\xf1\x08"
- "\xf0\x00\xf1\x01\x34\x00\xf1\x00" "\x9b\x43\xf1\x00\xa6\x05\xf1\x00"
- "\xa9\x03\xf1\xc0\xa1\x03\xf1\x20" "\xa4\x02\xf1\x5a\xae\x0a\xf1\x08"
- ,
- "\xf0\x00\xf1\x02\x3a\x05\xf1\xf1" "\x3c\x05\xf1\xf1\x59\x01\xf1\x47"
- "\x5a\x01\xf1\x88\x5c\x0a\xf1\x06" "\x5d\x0e\xf1\x0a\x64\x5e\xf1\x1c"
- "\xd2\x00\xf1\xcf\xcb\x00\xf1\x01"
- ,
- "\xd3\x02\xd4\x18\xd5\x21\xd0\x02" "\xd1\x10\xd2\x59"
+ (u8[]){
+ 0x0d, 0x80, 0xf1, 0x08, 0x03, 0x03, 0xf1, 0xc0,
+ 0x04, 0x05, 0xf1, 0x02, 0x05, 0x00, 0xf1, 0xf1,
+ 0x06, 0x00, 0xf1, 0x0d, 0x20, 0x01, 0xf1, 0x00,
+ 0x21, 0x84, 0xf1, 0x00, 0x0d, 0x00, 0xf1, 0x08,
+ 0xf0, 0x00, 0xf1, 0x01, 0x34, 0x00, 0xf1, 0x00,
+ 0x9b, 0x43, 0xf1, 0x00, 0xa6, 0x05, 0xf1, 0x00,
+ 0xa9, 0x03, 0xf1, 0xc0, 0xa1, 0x03, 0xf1, 0x20,
+ 0xa4, 0x02, 0xf1, 0x5a, 0xae, 0x0a, 0xf1, 0x08
+ }, (u8[]){
+ 0xf0, 0x00, 0xf1, 0x02, 0x3a, 0x05, 0xf1, 0xf1,
+ 0x3c, 0x05, 0xf1, 0xf1, 0x59, 0x01, 0xf1, 0x47,
+ 0x5a, 0x01, 0xf1, 0x88, 0x5c, 0x0a, 0xf1, 0x06,
+ 0x5d, 0x0e, 0xf1, 0x0a, 0x64, 0x5e, 0xf1, 0x1c,
+ 0xd2, 0x00, 0xf1, 0xcf, 0xcb, 0x00, 0xf1, 0x01
+ }, (u8[]){
+ 0xd3, 0x02, 0xd4, 0x18, 0xd5, 0x21, 0xd0, 0x02,
+ 0xd1, 0x10, 0xd2, 0x59
+ }
};
static u8 *tbl_640[] = {
- "\x0d\x80\xf1\x08\x03\x04\xf1\x04" "\x04\x05\xf1\x02\x07\x01\xf1\x7c"
- "\x08\x00\xf1\x0e\x21\x80\xf1\x00" "\x0d\x00\xf1\x08\xf0\x00\xf1\x01"
- "\x34\x10\xf1\x10\x3a\x43\xf1\x00" "\xa6\x05\xf1\x02\xa9\x04\xf1\x04"
- "\xa7\x02\xf1\x81\xaa\x01\xf1\xe2" "\xae\x0c\xf1\x09"
- ,
- "\xf0\x00\xf1\x02\x39\x03\xf1\xfc" "\x3b\x04\xf1\x04\x57\x01\xf1\xb6"
- "\x58\x02\xf1\x0d\x5c\x1f\xf1\x19" "\x5d\x24\xf1\x1e\x64\x5e\xf1\x1c"
- "\xd2\x00\xf1\x00\xcb\x00\xf1\x01"
- ,
- "\xd3\x02\xd4\x10\xd5\x81\xd0\x02" "\xd1\x08\xd2\xe1"
+ (u8[]){
+ 0x0d, 0x80, 0xf1, 0x08, 0x03, 0x04, 0xf1, 0x04,
+ 0x04, 0x05, 0xf1, 0x02, 0x07, 0x01, 0xf1, 0x7c,
+ 0x08, 0x00, 0xf1, 0x0e, 0x21, 0x80, 0xf1, 0x00,
+ 0x0d, 0x00, 0xf1, 0x08, 0xf0, 0x00, 0xf1, 0x01,
+ 0x34, 0x10, 0xf1, 0x10, 0x3a, 0x43, 0xf1, 0x00,
+ 0xa6, 0x05, 0xf1, 0x02, 0xa9, 0x04, 0xf1, 0x04,
+ 0xa7, 0x02, 0xf1, 0x81, 0xaa, 0x01, 0xf1, 0xe2,
+ 0xae, 0x0c, 0xf1, 0x09
+ }, (u8[]){
+ 0xf0, 0x00, 0xf1, 0x02, 0x39, 0x03, 0xf1, 0xfc,
+ 0x3b, 0x04, 0xf1, 0x04, 0x57, 0x01, 0xf1, 0xb6,
+ 0x58, 0x02, 0xf1, 0x0d, 0x5c, 0x1f, 0xf1, 0x19,
+ 0x5d, 0x24, 0xf1, 0x1e, 0x64, 0x5e, 0xf1, 0x1c,
+ 0xd2, 0x00, 0xf1, 0x00, 0xcb, 0x00, 0xf1, 0x01
+ }, (u8[]){
+ 0xd3, 0x02, 0xd4, 0x10, 0xd5, 0x81, 0xd0, 0x02,
+ 0xd1, 0x08, 0xd2, 0xe1
+ }
};
static s32 tbl_sat[] = {0x25, 0x1d, 0x15, 0x0d, 0x05, 0x4d, 0x55, 0x5d, 0x2d};
diff --git a/drivers/media/usb/gspca/gl860/gl860-ov9655.c b/drivers/media/usb/gspca/gl860/gl860-ov9655.c
index 59b87d066187..766677ebcb34 100644
--- a/drivers/media/usb/gspca/gl860/gl860-ov9655.c
+++ b/drivers/media/usb/gspca/gl860/gl860-ov9655.c
@@ -25,69 +25,118 @@ static struct validx tbl_commmon[] = {
static s32 tbl_length[] = {12, 56, 52, 54, 56, 42, 32, 12};
static u8 *tbl_640[] = {
- "\x00\x40\x07\x6a\x06\xf3\x0d\x6a" "\x10\x10\xc1\x01"
- ,
- "\x12\x80\x00\x00\x01\x98\x02\x80" "\x03\x12\x04\x03\x0b\x57\x0e\x61"
- "\x0f\x42\x11\x01\x12\x60\x13\x00" "\x14\x3a\x16\x24\x17\x14\x18\x00"
- "\x19\x01\x1a\x3d\x1e\x04\x24\x3c" "\x25\x36\x26\x72\x27\x08\x28\x08"
- "\x29\x15\x2a\x00\x2b\x00\x2c\x08"
- ,
- "\x32\xff\x33\x00\x34\x3d\x35\x00" "\x36\xfa\x38\x72\x39\x57\x3a\x00"
- "\x3b\x0c\x3d\x99\x3e\x0c\x3f\xc1" "\x40\xc0\x41\x00\x42\xc0\x43\x0a"
- "\x44\xf0\x45\x46\x46\x62\x47\x2a" "\x48\x3c\x4a\xee\x4b\xe7\x4c\xe7"
- "\x4d\xe7\x4e\xe7"
- ,
- "\x4f\x98\x50\x98\x51\x00\x52\x28" "\x53\x70\x54\x98\x58\x1a\x59\x85"
- "\x5a\xa9\x5b\x64\x5c\x84\x5d\x53" "\x5e\x0e\x5f\xf0\x60\xf0\x61\xf0"
- "\x62\x00\x63\x00\x64\x02\x65\x20" "\x66\x00\x69\x0a\x6b\x5a\x6c\x04"
- "\x6d\x55\x6e\x00\x6f\x9d"
- ,
- "\x70\x15\x71\x78\x72\x00\x73\x00" "\x74\x3a\x75\x35\x76\x01\x77\x02"
- "\x7a\x24\x7b\x04\x7c\x07\x7d\x10" "\x7e\x28\x7f\x36\x80\x44\x81\x52"
- "\x82\x60\x83\x6c\x84\x78\x85\x8c" "\x86\x9e\x87\xbb\x88\xd2\x89\xe5"
- "\x8a\x23\x8c\x8d\x90\x7c\x91\x7b"
- ,
- "\x9d\x02\x9e\x02\x9f\x74\xa0\x73" "\xa1\x40\xa4\x50\xa5\x68\xa6\x70"
- "\xa8\xc1\xa9\xef\xaa\x92\xab\x04" "\xac\x80\xad\x80\xae\x80\xaf\x80"
- "\xb2\xf2\xb3\x20\xb4\x20\xb5\x00" "\xb6\xaf"
- ,
- "\xbb\xae\xbc\x4f\xbd\x4e\xbe\x6a" "\xbf\x68\xc0\xaa\xc1\xc0\xc2\x01"
- "\xc3\x4e\xc6\x85\xc7\x81\xc9\xe0" "\xca\xe8\xcb\xf0\xcc\xd8\xcd\x93"
- ,
- "\xd0\x01\xd1\x08\xd2\xe0\xd3\x01" "\xd4\x10\xd5\x80"
+ (u8[]){
+ 0x00, 0x40, 0x07, 0x6a, 0x06, 0xf3, 0x0d, 0x6a,
+ 0x10, 0x10, 0xc1, 0x01
+ }, (u8[]){
+ 0x12, 0x80, 0x00, 0x00, 0x01, 0x98, 0x02, 0x80,
+ 0x03, 0x12, 0x04, 0x03, 0x0b, 0x57, 0x0e, 0x61,
+ 0x0f, 0x42, 0x11, 0x01, 0x12, 0x60, 0x13, 0x00,
+ 0x14, 0x3a, 0x16, 0x24, 0x17, 0x14, 0x18, 0x00,
+ 0x19, 0x01, 0x1a, 0x3d, 0x1e, 0x04, 0x24, 0x3c,
+ 0x25, 0x36, 0x26, 0x72, 0x27, 0x08, 0x28, 0x08,
+ 0x29, 0x15, 0x2a, 0x00, 0x2b, 0x00, 0x2c, 0x08
+ }, (u8[]){
+ 0x32, 0xff, 0x33, 0x00, 0x34, 0x3d, 0x35, 0x00,
+ 0x36, 0xfa, 0x38, 0x72, 0x39, 0x57, 0x3a, 0x00,
+ 0x3b, 0x0c, 0x3d, 0x99, 0x3e, 0x0c, 0x3f, 0xc1,
+ 0x40, 0xc0, 0x41, 0x00, 0x42, 0xc0, 0x43, 0x0a,
+ 0x44, 0xf0, 0x45, 0x46, 0x46, 0x62, 0x47, 0x2a,
+ 0x48, 0x3c, 0x4a, 0xee, 0x4b, 0xe7, 0x4c, 0xe7,
+ 0x4d, 0xe7, 0x4e, 0xe7
+ }, (u8[]){
+ 0x4f, 0x98, 0x50, 0x98, 0x51, 0x00, 0x52, 0x28,
+ 0x53, 0x70, 0x54, 0x98, 0x58, 0x1a, 0x59, 0x85,
+ 0x5a, 0xa9, 0x5b, 0x64, 0x5c, 0x84, 0x5d, 0x53,
+ 0x5e, 0x0e, 0x5f, 0xf0, 0x60, 0xf0, 0x61, 0xf0,
+ 0x62, 0x00, 0x63, 0x00, 0x64, 0x02, 0x65, 0x20,
+ 0x66, 0x00, 0x69, 0x0a, 0x6b, 0x5a, 0x6c, 0x04,
+ 0x6d, 0x55, 0x6e, 0x00, 0x6f, 0x9d
+ }, (u8[]){
+ 0x70, 0x15, 0x71, 0x78, 0x72, 0x00, 0x73, 0x00,
+ 0x74, 0x3a, 0x75, 0x35, 0x76, 0x01, 0x77, 0x02,
+ 0x7a, 0x24, 0x7b, 0x04, 0x7c, 0x07, 0x7d, 0x10,
+ 0x7e, 0x28, 0x7f, 0x36, 0x80, 0x44, 0x81, 0x52,
+ 0x82, 0x60, 0x83, 0x6c, 0x84, 0x78, 0x85, 0x8c,
+ 0x86, 0x9e, 0x87, 0xbb, 0x88, 0xd2, 0x89, 0xe5,
+ 0x8a, 0x23, 0x8c, 0x8d, 0x90, 0x7c, 0x91, 0x7b
+ }, (u8[]){
+ 0x9d, 0x02, 0x9e, 0x02, 0x9f, 0x74, 0xa0, 0x73,
+ 0xa1, 0x40, 0xa4, 0x50, 0xa5, 0x68, 0xa6, 0x70,
+ 0xa8, 0xc1, 0xa9, 0xef, 0xaa, 0x92, 0xab, 0x04,
+ 0xac, 0x80, 0xad, 0x80, 0xae, 0x80, 0xaf, 0x80,
+ 0xb2, 0xf2, 0xb3, 0x20, 0xb4, 0x20, 0xb5, 0x00,
+ 0xb6, 0xaf
+ }, (u8[]){
+ 0xbb, 0xae, 0xbc, 0x4f, 0xbd, 0x4e, 0xbe, 0x6a,
+ 0xbf, 0x68, 0xc0, 0xaa, 0xc1, 0xc0, 0xc2, 0x01,
+ 0xc3, 0x4e, 0xc6, 0x85, 0xc7, 0x81, 0xc9, 0xe0,
+ 0xca, 0xe8, 0xcb, 0xf0, 0xcc, 0xd8, 0xcd, 0x93
+ }, (u8[]){
+ 0xd0, 0x01, 0xd1, 0x08, 0xd2, 0xe0, 0xd3, 0x01,
+ 0xd4, 0x10, 0xd5, 0x80
+ }
};
static u8 *tbl_1280[] = {
- "\x00\x40\x07\x6a\x06\xf3\x0d\x6a" "\x10\x10\xc1\x01"
- ,
- "\x12\x80\x00\x00\x01\x98\x02\x80" "\x03\x12\x04\x01\x0b\x57\x0e\x61"
- "\x0f\x42\x11\x00\x12\x00\x13\x00" "\x14\x3a\x16\x24\x17\x1b\x18\xbb"
- "\x19\x01\x1a\x81\x1e\x04\x24\x3c" "\x25\x36\x26\x72\x27\x08\x28\x08"
- "\x29\x15\x2a\x00\x2b\x00\x2c\x08"
- ,
- "\x32\xa4\x33\x00\x34\x3d\x35\x00" "\x36\xf8\x38\x72\x39\x57\x3a\x00"
- "\x3b\x0c\x3d\x99\x3e\x0c\x3f\xc2" "\x40\xc0\x41\x00\x42\xc0\x43\x0a"
- "\x44\xf0\x45\x46\x46\x62\x47\x2a" "\x48\x3c\x4a\xec\x4b\xe8\x4c\xe8"
- "\x4d\xe8\x4e\xe8"
- ,
- "\x4f\x98\x50\x98\x51\x00\x52\x28" "\x53\x70\x54\x98\x58\x1a\x59\x85"
- "\x5a\xa9\x5b\x64\x5c\x84\x5d\x53" "\x5e\x0e\x5f\xf0\x60\xf0\x61\xf0"
- "\x62\x00\x63\x00\x64\x02\x65\x20" "\x66\x00\x69\x02\x6b\x5a\x6c\x04"
- "\x6d\x55\x6e\x00\x6f\x9d"
- ,
- "\x70\x08\x71\x78\x72\x00\x73\x01" "\x74\x3a\x75\x35\x76\x01\x77\x02"
- "\x7a\x24\x7b\x04\x7c\x07\x7d\x10" "\x7e\x28\x7f\x36\x80\x44\x81\x52"
- "\x82\x60\x83\x6c\x84\x78\x85\x8c" "\x86\x9e\x87\xbb\x88\xd2\x89\xe5"
- "\x8a\x23\x8c\x0d\x90\x90\x91\x90"
- ,
- "\x9d\x02\x9e\x02\x9f\x94\xa0\x94" "\xa1\x01\xa4\x50\xa5\x68\xa6\x70"
- "\xa8\xc1\xa9\xef\xaa\x92\xab\x04" "\xac\x80\xad\x80\xae\x80\xaf\x80"
- "\xb2\xf2\xb3\x20\xb4\x20\xb5\x00" "\xb6\xaf"
- ,
- "\xbb\xae\xbc\x38\xbd\x39\xbe\x01" "\xbf\x01\xc0\xe2\xc1\xc0\xc2\x01"
- "\xc3\x4e\xc6\x85\xc7\x81\xc9\xe0" "\xca\xe8\xcb\xf0\xcc\xd8\xcd\x93"
- ,
- "\xd0\x21\xd1\x18\xd2\xe0\xd3\x01" "\xd4\x28\xd5\x00"
+ (u8[]){
+ 0x00, 0x40, 0x07, 0x6a, 0x06, 0xf3, 0x0d, 0x6a,
+ 0x10, 0x10, 0xc1, 0x01
+ },
+ (u8[]){
+ 0x12, 0x80, 0x00, 0x00, 0x01, 0x98, 0x02, 0x80,
+ 0x03, 0x12, 0x04, 0x01, 0x0b, 0x57, 0x0e, 0x61,
+ 0x0f, 0x42, 0x11, 0x00, 0x12, 0x00, 0x13, 0x00,
+ 0x14, 0x3a, 0x16, 0x24, 0x17, 0x1b, 0x18, 0xbb,
+ 0x19, 0x01, 0x1a, 0x81, 0x1e, 0x04, 0x24, 0x3c,
+ 0x25, 0x36, 0x26, 0x72, 0x27, 0x08, 0x28, 0x08,
+ 0x29, 0x15, 0x2a, 0x00, 0x2b, 0x00, 0x2c, 0x08
+ },
+ (u8[]){
+ 0x32, 0xa4, 0x33, 0x00, 0x34, 0x3d, 0x35, 0x00,
+ 0x36, 0xf8, 0x38, 0x72, 0x39, 0x57, 0x3a, 0x00,
+ 0x3b, 0x0c, 0x3d, 0x99, 0x3e, 0x0c, 0x3f, 0xc2,
+ 0x40, 0xc0, 0x41, 0x00, 0x42, 0xc0, 0x43, 0x0a,
+ 0x44, 0xf0, 0x45, 0x46, 0x46, 0x62, 0x47, 0x2a,
+ 0x48, 0x3c, 0x4a, 0xec, 0x4b, 0xe8, 0x4c, 0xe8,
+ 0x4d, 0xe8, 0x4e, 0xe8
+ },
+ (u8[]){
+ 0x4f, 0x98, 0x50, 0x98, 0x51, 0x00, 0x52, 0x28,
+ 0x53, 0x70, 0x54, 0x98, 0x58, 0x1a, 0x59, 0x85,
+ 0x5a, 0xa9, 0x5b, 0x64, 0x5c, 0x84, 0x5d, 0x53,
+ 0x5e, 0x0e, 0x5f, 0xf0, 0x60, 0xf0, 0x61, 0xf0,
+ 0x62, 0x00, 0x63, 0x00, 0x64, 0x02, 0x65, 0x20,
+ 0x66, 0x00, 0x69, 0x02, 0x6b, 0x5a, 0x6c, 0x04,
+ 0x6d, 0x55, 0x6e, 0x00, 0x6f, 0x9d
+ },
+ (u8[]){
+ 0x70, 0x08, 0x71, 0x78, 0x72, 0x00, 0x73, 0x01,
+ 0x74, 0x3a, 0x75, 0x35, 0x76, 0x01, 0x77, 0x02,
+ 0x7a, 0x24, 0x7b, 0x04, 0x7c, 0x07, 0x7d, 0x10,
+ 0x7e, 0x28, 0x7f, 0x36, 0x80, 0x44, 0x81, 0x52,
+ 0x82, 0x60, 0x83, 0x6c, 0x84, 0x78, 0x85, 0x8c,
+ 0x86, 0x9e, 0x87, 0xbb, 0x88, 0xd2, 0x89, 0xe5,
+ 0x8a, 0x23, 0x8c, 0x0d, 0x90, 0x90, 0x91, 0x90
+ },
+ (u8[]){
+ 0x9d, 0x02, 0x9e, 0x02, 0x9f, 0x94, 0xa0, 0x94,
+ 0xa1, 0x01, 0xa4, 0x50, 0xa5, 0x68, 0xa6, 0x70,
+ 0xa8, 0xc1, 0xa9, 0xef, 0xaa, 0x92, 0xab, 0x04,
+ 0xac, 0x80, 0xad, 0x80, 0xae, 0x80, 0xaf, 0x80,
+ 0xb2, 0xf2, 0xb3, 0x20, 0xb4, 0x20, 0xb5, 0x00,
+ 0xb6, 0xaf
+ },
+ (u8[]){
+ 0xbb, 0xae, 0xbc, 0x38, 0xbd, 0x39, 0xbe, 0x01,
+ 0xbf, 0x01, 0xc0, 0xe2, 0xc1, 0xc0, 0xc2, 0x01,
+ 0xc3, 0x4e, 0xc6, 0x85, 0xc7, 0x81, 0xc9, 0xe0,
+ 0xca, 0xe8, 0xcb, 0xf0, 0xcc, 0xd8, 0xcd, 0x93
+ },
+ (u8[]){
+ 0xd0, 0x21, 0xd1, 0x18, 0xd2, 0xe0, 0xd3, 0x01,
+ 0xd4, 0x28, 0xd5, 0x00
+ }
};
static u8 c04[] = {0x04};
diff --git a/drivers/media/usb/gspca/gspca.c b/drivers/media/usb/gspca/gspca.c
index 47d8f28bfdfc..770714c34295 100644
--- a/drivers/media/usb/gspca/gspca.c
+++ b/drivers/media/usb/gspca/gspca.c
@@ -444,6 +444,8 @@ void gspca_frame_add(struct gspca_dev *gspca_dev,
* next first packet, wake up the application and advance
* in the queue */
if (packet_type == LAST_PACKET) {
+ if (gspca_dev->image_len > gspca_dev->pixfmt.sizeimage)
+ gspca_dev->image_len = gspca_dev->pixfmt.sizeimage;
spin_lock_irqsave(&gspca_dev->qlock, flags);
list_del(&buf->list);
spin_unlock_irqrestore(&gspca_dev->qlock, flags);
diff --git a/drivers/media/usb/gspca/m5602/m5602_ov7660.h b/drivers/media/usb/gspca/m5602/m5602_ov7660.h
index d60247e10c2c..6146e8ef17c0 100644
--- a/drivers/media/usb/gspca/m5602/m5602_ov7660.h
+++ b/drivers/media/usb/gspca/m5602/m5602_ov7660.h
@@ -86,7 +86,6 @@ extern bool dump_sensor;
int ov7660_probe(struct sd *sd);
int ov7660_init(struct sd *sd);
-int ov7660_init(struct sd *sd);
int ov7660_init_controls(struct sd *sd);
int ov7660_start(struct sd *sd);
int ov7660_stop(struct sd *sd);
diff --git a/drivers/media/usb/gspca/sn9c20x.c b/drivers/media/usb/gspca/sn9c20x.c
index bfd194c61819..da916127a896 100644
--- a/drivers/media/usb/gspca/sn9c20x.c
+++ b/drivers/media/usb/gspca/sn9c20x.c
@@ -50,6 +50,7 @@ MODULE_LICENSE("GPL");
#define HAS_NO_BUTTON 0x1
#define LED_REVERSE 0x2 /* some cameras unset gpio to turn on leds */
#define FLIP_DETECT 0x4
+#define HAS_LED_TORCH 0x8
/* specific webcam descriptor */
struct sd {
@@ -77,6 +78,8 @@ struct sd {
};
struct v4l2_ctrl *jpegqual;
+ struct v4l2_ctrl *led_mode;
+
struct work_struct work;
u32 pktsz; /* (used by pkt_scan) */
@@ -1533,6 +1536,12 @@ static void set_gain(struct gspca_dev *gspca_dev, s32 g)
i2c_w(gspca_dev, gain);
}
+static void set_led_mode(struct gspca_dev *gspca_dev, s32 val)
+{
+ reg_w1(gspca_dev, 0x1007, 0x60);
+ reg_w1(gspca_dev, 0x1006, val ? 0x40 : 0x00);
+}
+
static void set_quality(struct gspca_dev *gspca_dev, s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
@@ -1699,6 +1708,9 @@ static int sd_s_ctrl(struct v4l2_ctrl *ctrl)
case V4L2_CID_JPEG_COMPRESSION_QUALITY:
set_quality(gspca_dev, ctrl->val);
break;
+ case V4L2_CID_FLASH_LED_MODE:
+ set_led_mode(gspca_dev, ctrl->val);
+ break;
}
return gspca_dev->usb_err;
}
@@ -1757,6 +1769,12 @@ static int sd_init_controls(struct gspca_dev *gspca_dev)
sd->jpegqual = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops,
V4L2_CID_JPEG_COMPRESSION_QUALITY, 50, 90, 1, 80);
+
+ if (sd->flags & HAS_LED_TORCH)
+ sd->led_mode = v4l2_ctrl_new_std_menu(hdl, &sd_ctrl_ops,
+ V4L2_CID_FLASH_LED_MODE, V4L2_FLASH_LED_MODE_TORCH,
+ ~0x5, V4L2_FLASH_LED_MODE_NONE);
+
if (hdl->error) {
pr_err("Could not initialize controls\n");
return hdl->error;
@@ -2048,6 +2066,8 @@ static int sd_start(struct gspca_dev *gspca_dev)
sd->pktsz = sd->npkt = 0;
sd->nchg = 0;
}
+ if (sd->led_mode)
+ v4l2_ctrl_s_ctrl(sd->led_mode, 0);
return gspca_dev->usb_err;
}
@@ -2325,7 +2345,7 @@ static const struct sd_desc sd_desc = {
static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x0c45, 0x6240), SN9C20X(MT9M001, 0x5d, 0)},
- {USB_DEVICE(0x0c45, 0x6242), SN9C20X(MT9M111, 0x5d, 0)},
+ {USB_DEVICE(0x0c45, 0x6242), SN9C20X(MT9M111, 0x5d, HAS_LED_TORCH)},
{USB_DEVICE(0x0c45, 0x6248), SN9C20X(OV9655, 0x30, 0)},
{USB_DEVICE(0x0c45, 0x624c), SN9C20X(MT9M112, 0x5d, 0)},
{USB_DEVICE(0x0c45, 0x624e), SN9C20X(SOI968, 0x30, LED_REVERSE)},
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-ctrl.c b/drivers/media/usb/pvrusb2/pvrusb2-ctrl.c
index 9f71d8c2a3c6..8ae3ad80cccb 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-ctrl.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-ctrl.c
@@ -355,11 +355,8 @@ static int parse_token(const char *ptr,unsigned int len,
int *valptr,
const char * const *names, unsigned int namecnt)
{
- char buf[33];
unsigned int slen;
unsigned int idx;
- int negfl;
- char *p2;
*valptr = 0;
if (!names) namecnt = 0;
for (idx = 0; idx < namecnt; idx++) {
@@ -370,18 +367,7 @@ static int parse_token(const char *ptr,unsigned int len,
*valptr = idx;
return 0;
}
- negfl = 0;
- if ((*ptr == '-') || (*ptr == '+')) {
- negfl = (*ptr == '-');
- ptr++; len--;
- }
- if (len >= sizeof(buf)) return -EINVAL;
- memcpy(buf,ptr,len);
- buf[len] = 0;
- *valptr = simple_strtol(buf,&p2,0);
- if (negfl) *valptr = -(*valptr);
- if (*p2) return -EINVAL;
- return 1;
+ return kstrtoint(ptr, 0, valptr) ? -EINVAL : 1;
}
@@ -389,10 +375,8 @@ static int parse_mtoken(const char *ptr,unsigned int len,
int *valptr,
const char **names,int valid_bits)
{
- char buf[33];
unsigned int slen;
unsigned int idx;
- char *p2;
int msk;
*valptr = 0;
for (idx = 0, msk = 1; valid_bits; idx++, msk <<= 1) {
@@ -405,12 +389,7 @@ static int parse_mtoken(const char *ptr,unsigned int len,
*valptr = msk;
return 0;
}
- if (len >= sizeof(buf)) return -EINVAL;
- memcpy(buf,ptr,len);
- buf[len] = 0;
- *valptr = simple_strtol(buf,&p2,0);
- if (*p2) return -EINVAL;
- return 0;
+ return kstrtoint(ptr, 0, valptr);
}
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
index 9657c1883311..c04ab7258d64 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
@@ -640,10 +640,6 @@ static int pvr2_s_ext_ctrls(struct file *file, void *priv,
unsigned int idx;
int ret;
- /* Default value cannot be changed */
- if (ctls->which == V4L2_CTRL_WHICH_DEF_VAL)
- return -EINVAL;
-
ret = 0;
for (idx = 0; idx < ctls->count; idx++) {
ctrl = ctls->controls + idx;
diff --git a/drivers/media/usb/stkwebcam/stk-webcam.c b/drivers/media/usb/stkwebcam/stk-webcam.c
index 0e231e576dc3..9f445e6ab5fa 100644
--- a/drivers/media/usb/stkwebcam/stk-webcam.c
+++ b/drivers/media/usb/stkwebcam/stk-webcam.c
@@ -1234,6 +1234,11 @@ static void stk_v4l_dev_release(struct video_device *vd)
if (dev->sio_bufs != NULL || dev->isobufs != NULL)
pr_err("We are leaking memory\n");
usb_put_intf(dev->interface);
+ usb_put_dev(dev->udev);
+
+ v4l2_ctrl_handler_free(&dev->hdl);
+ v4l2_device_unregister(&dev->v4l2_dev);
+ kfree(dev);
}
static const struct video_device stk_v4l_data = {
@@ -1309,7 +1314,7 @@ static int stk_camera_probe(struct usb_interface *interface,
init_waitqueue_head(&dev->wait_frame);
dev->first_init = 1; /* webcam LED management */
- dev->udev = udev;
+ dev->udev = usb_get_dev(udev);
dev->interface = interface;
usb_get_intf(interface);
@@ -1365,6 +1370,7 @@ static int stk_camera_probe(struct usb_interface *interface,
error_put:
usb_put_intf(interface);
+ usb_put_dev(dev->udev);
error:
v4l2_ctrl_handler_free(hdl);
v4l2_device_unregister(&dev->v4l2_dev);
@@ -1385,9 +1391,6 @@ static void stk_camera_disconnect(struct usb_interface *interface)
video_device_node_name(&dev->vdev));
video_unregister_device(&dev->vdev);
- v4l2_ctrl_handler_free(&dev->hdl);
- v4l2_device_unregister(&dev->v4l2_dev);
- kfree(dev);
}
#ifdef CONFIG_PM
diff --git a/drivers/media/usb/tm6000/tm6000-video.c b/drivers/media/usb/tm6000/tm6000-video.c
index 3f650ede0c3d..e293f6f3d1bc 100644
--- a/drivers/media/usb/tm6000/tm6000-video.c
+++ b/drivers/media/usb/tm6000/tm6000-video.c
@@ -852,8 +852,7 @@ static int vidioc_querycap(struct file *file, void *priv,
struct tm6000_core *dev = ((struct tm6000_fh *)priv)->dev;
strscpy(cap->driver, "tm6000", sizeof(cap->driver));
- strscpy(cap->card, "Trident TVMaster TM5600/6000/6010",
- sizeof(cap->card));
+ strscpy(cap->card, "Trident TM5600/6000/6010", sizeof(cap->card));
usb_make_path(dev->udev, cap->bus_info, sizeof(cap->bus_info));
cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
V4L2_CAP_DEVICE_CAPS;
diff --git a/drivers/media/usb/ttusb-dec/ttusb_dec.c b/drivers/media/usb/ttusb-dec/ttusb_dec.c
index bfda46a36dc5..38822cedd93a 100644
--- a/drivers/media/usb/ttusb-dec/ttusb_dec.c
+++ b/drivers/media/usb/ttusb-dec/ttusb_dec.c
@@ -327,7 +327,7 @@ static int ttusb_dec_send_command(struct ttusb_dec *dec, const u8 command,
result = mutex_lock_interruptible(&dec->usb_mutex);
if (result) {
printk("%s: Failed to lock usb mutex.\n", __func__);
- goto err;
+ goto err_free;
}
b[0] = 0xaa;
@@ -349,7 +349,7 @@ static int ttusb_dec_send_command(struct ttusb_dec *dec, const u8 command,
if (result) {
printk("%s: command bulk message failed: error %d\n",
__func__, result);
- goto err;
+ goto err_mutex_unlock;
}
result = usb_bulk_msg(dec->udev, dec->result_pipe, b,
@@ -358,7 +358,7 @@ static int ttusb_dec_send_command(struct ttusb_dec *dec, const u8 command,
if (result) {
printk("%s: result bulk message failed: error %d\n",
__func__, result);
- goto err;
+ goto err_mutex_unlock;
} else {
if (debug) {
printk(KERN_DEBUG "%s: result: %*ph\n",
@@ -371,9 +371,9 @@ static int ttusb_dec_send_command(struct ttusb_dec *dec, const u8 command,
memcpy(cmd_result, &b[4], b[3]);
}
-err:
+err_mutex_unlock:
mutex_unlock(&dec->usb_mutex);
-
+err_free:
kfree(b);
return result;
}
diff --git a/drivers/media/usb/uvc/uvc_ctrl.c b/drivers/media/usb/uvc/uvc_ctrl.c
index b3dde98499f4..30bfe9069a1f 100644
--- a/drivers/media/usb/uvc/uvc_ctrl.c
+++ b/drivers/media/usb/uvc/uvc_ctrl.c
@@ -357,6 +357,11 @@ static const struct uvc_control_info uvc_ctrls[] = {
},
};
+static const u32 uvc_control_classes[] = {
+ V4L2_CID_CAMERA_CLASS,
+ V4L2_CID_USER_CLASS,
+};
+
static const struct uvc_menu_info power_line_frequency_controls[] = {
{ 0, "Disabled" },
{ 1, "50 Hz" },
@@ -427,7 +432,6 @@ static void uvc_ctrl_set_rel_speed(struct uvc_control_mapping *mapping,
static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
{
.id = V4L2_CID_BRIGHTNESS,
- .name = "Brightness",
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_BRIGHTNESS_CONTROL,
.size = 16,
@@ -437,7 +441,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_CONTRAST,
- .name = "Contrast",
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_CONTRAST_CONTROL,
.size = 16,
@@ -447,7 +450,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_HUE,
- .name = "Hue",
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_HUE_CONTROL,
.size = 16,
@@ -459,7 +461,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_SATURATION,
- .name = "Saturation",
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_SATURATION_CONTROL,
.size = 16,
@@ -469,7 +470,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_SHARPNESS,
- .name = "Sharpness",
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_SHARPNESS_CONTROL,
.size = 16,
@@ -479,7 +479,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_GAMMA,
- .name = "Gamma",
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_GAMMA_CONTROL,
.size = 16,
@@ -489,7 +488,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_BACKLIGHT_COMPENSATION,
- .name = "Backlight Compensation",
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_BACKLIGHT_COMPENSATION_CONTROL,
.size = 16,
@@ -499,7 +497,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_GAIN,
- .name = "Gain",
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_GAIN_CONTROL,
.size = 16,
@@ -509,7 +506,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_POWER_LINE_FREQUENCY,
- .name = "Power Line Frequency",
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_POWER_LINE_FREQUENCY_CONTROL,
.size = 2,
@@ -521,7 +517,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_HUE_AUTO,
- .name = "Hue, Auto",
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_HUE_AUTO_CONTROL,
.size = 1,
@@ -532,7 +527,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_EXPOSURE_AUTO,
- .name = "Exposure, Auto",
.entity = UVC_GUID_UVC_CAMERA,
.selector = UVC_CT_AE_MODE_CONTROL,
.size = 4,
@@ -545,7 +539,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_EXPOSURE_AUTO_PRIORITY,
- .name = "Exposure, Auto Priority",
.entity = UVC_GUID_UVC_CAMERA,
.selector = UVC_CT_AE_PRIORITY_CONTROL,
.size = 1,
@@ -555,7 +548,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_EXPOSURE_ABSOLUTE,
- .name = "Exposure (Absolute)",
.entity = UVC_GUID_UVC_CAMERA,
.selector = UVC_CT_EXPOSURE_TIME_ABSOLUTE_CONTROL,
.size = 32,
@@ -567,7 +559,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_AUTO_WHITE_BALANCE,
- .name = "White Balance Temperature, Auto",
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_WHITE_BALANCE_TEMPERATURE_AUTO_CONTROL,
.size = 1,
@@ -578,7 +569,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_WHITE_BALANCE_TEMPERATURE,
- .name = "White Balance Temperature",
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_WHITE_BALANCE_TEMPERATURE_CONTROL,
.size = 16,
@@ -590,7 +580,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_AUTO_WHITE_BALANCE,
- .name = "White Balance Component, Auto",
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_WHITE_BALANCE_COMPONENT_AUTO_CONTROL,
.size = 1,
@@ -602,7 +591,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_BLUE_BALANCE,
- .name = "White Balance Blue Component",
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_WHITE_BALANCE_COMPONENT_CONTROL,
.size = 16,
@@ -614,7 +602,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_RED_BALANCE,
- .name = "White Balance Red Component",
.entity = UVC_GUID_UVC_PROCESSING,
.selector = UVC_PU_WHITE_BALANCE_COMPONENT_CONTROL,
.size = 16,
@@ -626,7 +613,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_FOCUS_ABSOLUTE,
- .name = "Focus (absolute)",
.entity = UVC_GUID_UVC_CAMERA,
.selector = UVC_CT_FOCUS_ABSOLUTE_CONTROL,
.size = 16,
@@ -638,7 +624,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_FOCUS_AUTO,
- .name = "Focus, Auto",
.entity = UVC_GUID_UVC_CAMERA,
.selector = UVC_CT_FOCUS_AUTO_CONTROL,
.size = 1,
@@ -649,7 +634,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_IRIS_ABSOLUTE,
- .name = "Iris, Absolute",
.entity = UVC_GUID_UVC_CAMERA,
.selector = UVC_CT_IRIS_ABSOLUTE_CONTROL,
.size = 16,
@@ -659,7 +643,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_IRIS_RELATIVE,
- .name = "Iris, Relative",
.entity = UVC_GUID_UVC_CAMERA,
.selector = UVC_CT_IRIS_RELATIVE_CONTROL,
.size = 8,
@@ -669,7 +652,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_ZOOM_ABSOLUTE,
- .name = "Zoom, Absolute",
.entity = UVC_GUID_UVC_CAMERA,
.selector = UVC_CT_ZOOM_ABSOLUTE_CONTROL,
.size = 16,
@@ -679,7 +661,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_ZOOM_CONTINUOUS,
- .name = "Zoom, Continuous",
.entity = UVC_GUID_UVC_CAMERA,
.selector = UVC_CT_ZOOM_RELATIVE_CONTROL,
.size = 0,
@@ -691,7 +672,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_PAN_ABSOLUTE,
- .name = "Pan (Absolute)",
.entity = UVC_GUID_UVC_CAMERA,
.selector = UVC_CT_PANTILT_ABSOLUTE_CONTROL,
.size = 32,
@@ -701,7 +681,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_TILT_ABSOLUTE,
- .name = "Tilt (Absolute)",
.entity = UVC_GUID_UVC_CAMERA,
.selector = UVC_CT_PANTILT_ABSOLUTE_CONTROL,
.size = 32,
@@ -711,7 +690,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_PAN_SPEED,
- .name = "Pan (Speed)",
.entity = UVC_GUID_UVC_CAMERA,
.selector = UVC_CT_PANTILT_RELATIVE_CONTROL,
.size = 16,
@@ -723,7 +701,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_TILT_SPEED,
- .name = "Tilt (Speed)",
.entity = UVC_GUID_UVC_CAMERA,
.selector = UVC_CT_PANTILT_RELATIVE_CONTROL,
.size = 16,
@@ -735,7 +712,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_PRIVACY,
- .name = "Privacy",
.entity = UVC_GUID_UVC_CAMERA,
.selector = UVC_CT_PRIVACY_CONTROL,
.size = 1,
@@ -745,7 +721,6 @@ static const struct uvc_control_mapping uvc_ctrl_mappings[] = {
},
{
.id = V4L2_CID_PRIVACY,
- .name = "Privacy",
.entity = UVC_GUID_EXT_GPIO_CONTROLLER,
.selector = UVC_CT_PRIVACY_CONTROL,
.size = 1,
@@ -1024,6 +999,85 @@ static int __uvc_ctrl_get(struct uvc_video_chain *chain,
return 0;
}
+static int __uvc_query_v4l2_class(struct uvc_video_chain *chain, u32 req_id,
+ u32 found_id)
+{
+ bool find_next = req_id & V4L2_CTRL_FLAG_NEXT_CTRL;
+ unsigned int i;
+
+ req_id &= V4L2_CTRL_ID_MASK;
+
+ for (i = 0; i < ARRAY_SIZE(uvc_control_classes); i++) {
+ if (!(chain->ctrl_class_bitmap & BIT(i)))
+ continue;
+ if (!find_next) {
+ if (uvc_control_classes[i] == req_id)
+ return i;
+ continue;
+ }
+ if (uvc_control_classes[i] > req_id &&
+ uvc_control_classes[i] < found_id)
+ return i;
+ }
+
+ return -ENODEV;
+}
+
+static int uvc_query_v4l2_class(struct uvc_video_chain *chain, u32 req_id,
+ u32 found_id, struct v4l2_queryctrl *v4l2_ctrl)
+{
+ int idx;
+
+ idx = __uvc_query_v4l2_class(chain, req_id, found_id);
+ if (idx < 0)
+ return -ENODEV;
+
+ memset(v4l2_ctrl, 0, sizeof(*v4l2_ctrl));
+ v4l2_ctrl->id = uvc_control_classes[idx];
+ strscpy(v4l2_ctrl->name, v4l2_ctrl_get_name(v4l2_ctrl->id),
+ sizeof(v4l2_ctrl->name));
+ v4l2_ctrl->type = V4L2_CTRL_TYPE_CTRL_CLASS;
+ v4l2_ctrl->flags = V4L2_CTRL_FLAG_WRITE_ONLY
+ | V4L2_CTRL_FLAG_READ_ONLY;
+ return 0;
+}
+
+int uvc_ctrl_is_accessible(struct uvc_video_chain *chain, u32 v4l2_id,
+ bool read)
+{
+ struct uvc_control_mapping *mapping;
+ struct uvc_control *ctrl;
+
+ if (__uvc_query_v4l2_class(chain, v4l2_id, 0) >= 0)
+ return -EACCES;
+
+ ctrl = uvc_find_control(chain, v4l2_id, &mapping);
+ if (!ctrl)
+ return -EINVAL;
+
+ if (!(ctrl->info.flags & UVC_CTRL_FLAG_GET_CUR) && read)
+ return -EACCES;
+
+ if (!(ctrl->info.flags & UVC_CTRL_FLAG_SET_CUR) && !read)
+ return -EACCES;
+
+ return 0;
+}
+
+static const char *uvc_map_get_name(const struct uvc_control_mapping *map)
+{
+ const char *name;
+
+ if (map->name)
+ return map->name;
+
+ name = v4l2_ctrl_get_name(map->id);
+ if (name)
+ return name;
+
+ return "Unknown Control";
+}
+
static int __uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
struct uvc_control *ctrl,
struct uvc_control_mapping *mapping,
@@ -1037,7 +1091,8 @@ static int __uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
memset(v4l2_ctrl, 0, sizeof(*v4l2_ctrl));
v4l2_ctrl->id = mapping->id;
v4l2_ctrl->type = mapping->v4l2_type;
- strscpy(v4l2_ctrl->name, mapping->name, sizeof(v4l2_ctrl->name));
+ strscpy(v4l2_ctrl->name, uvc_map_get_name(mapping),
+ sizeof(v4l2_ctrl->name));
v4l2_ctrl->flags = 0;
if (!(ctrl->info.flags & UVC_CTRL_FLAG_GET_CUR))
@@ -1127,12 +1182,31 @@ int uvc_query_v4l2_ctrl(struct uvc_video_chain *chain,
if (ret < 0)
return -ERESTARTSYS;
+ /* Check if the ctrl is a know class */
+ if (!(v4l2_ctrl->id & V4L2_CTRL_FLAG_NEXT_CTRL)) {
+ ret = uvc_query_v4l2_class(chain, v4l2_ctrl->id, 0, v4l2_ctrl);
+ if (!ret)
+ goto done;
+ }
+
ctrl = uvc_find_control(chain, v4l2_ctrl->id, &mapping);
if (ctrl == NULL) {
ret = -EINVAL;
goto done;
}
+ /*
+ * If we're enumerating control with V4L2_CTRL_FLAG_NEXT_CTRL, check if
+ * a class should be inserted between the previous control and the one
+ * we have just found.
+ */
+ if (v4l2_ctrl->id & V4L2_CTRL_FLAG_NEXT_CTRL) {
+ ret = uvc_query_v4l2_class(chain, v4l2_ctrl->id, mapping->id,
+ v4l2_ctrl);
+ if (!ret)
+ goto done;
+ }
+
ret = __uvc_query_v4l2_ctrl(chain, ctrl, mapping, v4l2_ctrl);
done:
mutex_unlock(&chain->ctrl_mutex);
@@ -1426,6 +1500,11 @@ static int uvc_ctrl_add_event(struct v4l2_subscribed_event *sev, unsigned elems)
if (ret < 0)
return -ERESTARTSYS;
+ if (__uvc_query_v4l2_class(handle->chain, sev->id, 0) >= 0) {
+ ret = 0;
+ goto done;
+ }
+
ctrl = uvc_find_control(handle->chain, sev->id, &mapping);
if (ctrl == NULL) {
ret = -EINVAL;
@@ -1459,7 +1538,10 @@ static void uvc_ctrl_del_event(struct v4l2_subscribed_event *sev)
struct uvc_fh *handle = container_of(sev->fh, struct uvc_fh, vfh);
mutex_lock(&handle->chain->ctrl_mutex);
+ if (__uvc_query_v4l2_class(handle->chain, sev->id, 0) >= 0)
+ goto done;
list_del(&sev->node);
+done:
mutex_unlock(&handle->chain->ctrl_mutex);
}
@@ -1500,7 +1582,7 @@ int uvc_ctrl_begin(struct uvc_video_chain *chain)
}
static int uvc_ctrl_commit_entity(struct uvc_device *dev,
- struct uvc_entity *entity, int rollback)
+ struct uvc_entity *entity, int rollback, struct uvc_control **err_ctrl)
{
struct uvc_control *ctrl;
unsigned int i;
@@ -1542,31 +1624,59 @@ static int uvc_ctrl_commit_entity(struct uvc_device *dev,
ctrl->dirty = 0;
- if (ret < 0)
+ if (ret < 0) {
+ if (err_ctrl)
+ *err_ctrl = ctrl;
return ret;
+ }
}
return 0;
}
+static int uvc_ctrl_find_ctrl_idx(struct uvc_entity *entity,
+ struct v4l2_ext_controls *ctrls,
+ struct uvc_control *uvc_control)
+{
+ struct uvc_control_mapping *mapping;
+ struct uvc_control *ctrl_found;
+ unsigned int i;
+
+ if (!entity)
+ return ctrls->count;
+
+ for (i = 0; i < ctrls->count; i++) {
+ __uvc_find_control(entity, ctrls->controls[i].id, &mapping,
+ &ctrl_found, 0);
+ if (uvc_control == ctrl_found)
+ return i;
+ }
+
+ return ctrls->count;
+}
+
int __uvc_ctrl_commit(struct uvc_fh *handle, int rollback,
- const struct v4l2_ext_control *xctrls,
- unsigned int xctrls_count)
+ struct v4l2_ext_controls *ctrls)
{
struct uvc_video_chain *chain = handle->chain;
+ struct uvc_control *err_ctrl;
struct uvc_entity *entity;
int ret = 0;
/* Find the control. */
list_for_each_entry(entity, &chain->entities, chain) {
- ret = uvc_ctrl_commit_entity(chain->dev, entity, rollback);
+ ret = uvc_ctrl_commit_entity(chain->dev, entity, rollback,
+ &err_ctrl);
if (ret < 0)
goto done;
}
if (!rollback)
- uvc_ctrl_send_events(handle, xctrls, xctrls_count);
+ uvc_ctrl_send_events(handle, ctrls->controls, ctrls->count);
done:
+ if (ret < 0 && ctrls)
+ ctrls->error_idx = uvc_ctrl_find_ctrl_idx(entity, ctrls,
+ err_ctrl);
mutex_unlock(&chain->ctrl_mutex);
return ret;
}
@@ -1577,6 +1687,9 @@ int uvc_ctrl_get(struct uvc_video_chain *chain,
struct uvc_control *ctrl;
struct uvc_control_mapping *mapping;
+ if (__uvc_query_v4l2_class(chain, xctrl->id, 0) >= 0)
+ return -EACCES;
+
ctrl = uvc_find_control(chain, xctrl->id, &mapping);
if (ctrl == NULL)
return -EINVAL;
@@ -1596,6 +1709,9 @@ int uvc_ctrl_set(struct uvc_fh *handle,
s32 max;
int ret;
+ if (__uvc_query_v4l2_class(chain, xctrl->id, 0) >= 0)
+ return -EACCES;
+
ctrl = uvc_find_control(chain, xctrl->id, &mapping);
if (ctrl == NULL)
return -EINVAL;
@@ -2011,14 +2127,14 @@ int uvc_ctrl_restore_values(struct uvc_device *dev)
if (!ctrl->initialized || !ctrl->modified ||
(ctrl->info.flags & UVC_CTRL_FLAG_RESTORE) == 0)
continue;
- dev_info(&dev->udev->dev,
- "restoring control %pUl/%u/%u\n",
- ctrl->info.entity, ctrl->info.index,
- ctrl->info.selector);
+ dev_dbg(&dev->udev->dev,
+ "restoring control %pUl/%u/%u\n",
+ ctrl->info.entity, ctrl->info.index,
+ ctrl->info.selector);
ctrl->dirty = 1;
}
- ret = uvc_ctrl_commit_entity(dev, entity, 0);
+ ret = uvc_ctrl_commit_entity(dev, entity, 0, NULL);
if (ret < 0)
return ret;
}
@@ -2057,11 +2173,12 @@ static int uvc_ctrl_add_info(struct uvc_device *dev, struct uvc_control *ctrl,
/*
* Add a control mapping to a given control.
*/
-static int __uvc_ctrl_add_mapping(struct uvc_device *dev,
+static int __uvc_ctrl_add_mapping(struct uvc_video_chain *chain,
struct uvc_control *ctrl, const struct uvc_control_mapping *mapping)
{
struct uvc_control_mapping *map;
unsigned int size;
+ unsigned int i;
/* Most mappings come from static kernel data and need to be duplicated.
* Mappings that come from userspace will be unnecessarily duplicated,
@@ -2085,9 +2202,18 @@ static int __uvc_ctrl_add_mapping(struct uvc_device *dev,
if (map->set == NULL)
map->set = uvc_set_le_value;
+ for (i = 0; i < ARRAY_SIZE(uvc_control_classes); i++) {
+ if (V4L2_CTRL_ID2WHICH(uvc_control_classes[i]) ==
+ V4L2_CTRL_ID2WHICH(map->id)) {
+ chain->ctrl_class_bitmap |= BIT(i);
+ break;
+ }
+ }
+
list_add_tail(&map->list, &ctrl->info.mappings);
- uvc_dbg(dev, CONTROL, "Adding mapping '%s' to control %pUl/%u\n",
- map->name, ctrl->info.entity, ctrl->info.selector);
+ uvc_dbg(chain->dev, CONTROL, "Adding mapping '%s' to control %pUl/%u\n",
+ uvc_map_get_name(map), ctrl->info.entity,
+ ctrl->info.selector);
return 0;
}
@@ -2105,7 +2231,7 @@ int uvc_ctrl_add_mapping(struct uvc_video_chain *chain,
if (mapping->id & ~V4L2_CTRL_ID_MASK) {
uvc_dbg(dev, CONTROL,
"Can't add mapping '%s', control id 0x%08x is invalid\n",
- mapping->name, mapping->id);
+ uvc_map_get_name(mapping), mapping->id);
return -EINVAL;
}
@@ -2152,7 +2278,7 @@ int uvc_ctrl_add_mapping(struct uvc_video_chain *chain,
if (mapping->id == map->id) {
uvc_dbg(dev, CONTROL,
"Can't add mapping '%s', control id 0x%08x already exists\n",
- mapping->name, mapping->id);
+ uvc_map_get_name(mapping), mapping->id);
ret = -EEXIST;
goto done;
}
@@ -2163,12 +2289,12 @@ int uvc_ctrl_add_mapping(struct uvc_video_chain *chain,
atomic_dec(&dev->nmappings);
uvc_dbg(dev, CONTROL,
"Can't add mapping '%s', maximum mappings count (%u) exceeded\n",
- mapping->name, UVC_MAX_CONTROL_MAPPINGS);
+ uvc_map_get_name(mapping), UVC_MAX_CONTROL_MAPPINGS);
ret = -ENOMEM;
goto done;
}
- ret = __uvc_ctrl_add_mapping(dev, ctrl, mapping);
+ ret = __uvc_ctrl_add_mapping(chain, ctrl, mapping);
if (ret < 0)
atomic_dec(&dev->nmappings);
@@ -2244,7 +2370,8 @@ static void uvc_ctrl_prune_entity(struct uvc_device *dev,
* Add control information and hardcoded stock control mappings to the given
* device.
*/
-static void uvc_ctrl_init_ctrl(struct uvc_device *dev, struct uvc_control *ctrl)
+static void uvc_ctrl_init_ctrl(struct uvc_video_chain *chain,
+ struct uvc_control *ctrl)
{
const struct uvc_control_info *info = uvc_ctrls;
const struct uvc_control_info *iend = info + ARRAY_SIZE(uvc_ctrls);
@@ -2263,14 +2390,14 @@ static void uvc_ctrl_init_ctrl(struct uvc_device *dev, struct uvc_control *ctrl)
for (; info < iend; ++info) {
if (uvc_entity_match_guid(ctrl->entity, info->entity) &&
ctrl->index == info->index) {
- uvc_ctrl_add_info(dev, ctrl, info);
+ uvc_ctrl_add_info(chain->dev, ctrl, info);
/*
* Retrieve control flags from the device. Ignore errors
* and work with default flag values from the uvc_ctrl
* array when the device doesn't properly implement
* GET_INFO on standard controls.
*/
- uvc_ctrl_get_flags(dev, ctrl, &ctrl->info);
+ uvc_ctrl_get_flags(chain->dev, ctrl, &ctrl->info);
break;
}
}
@@ -2281,22 +2408,20 @@ static void uvc_ctrl_init_ctrl(struct uvc_device *dev, struct uvc_control *ctrl)
for (; mapping < mend; ++mapping) {
if (uvc_entity_match_guid(ctrl->entity, mapping->entity) &&
ctrl->info.selector == mapping->selector)
- __uvc_ctrl_add_mapping(dev, ctrl, mapping);
+ __uvc_ctrl_add_mapping(chain, ctrl, mapping);
}
}
/*
* Initialize device controls.
*/
-int uvc_ctrl_init_device(struct uvc_device *dev)
+static int uvc_ctrl_init_chain(struct uvc_video_chain *chain)
{
struct uvc_entity *entity;
unsigned int i;
- INIT_WORK(&dev->async_ctrl.work, uvc_ctrl_status_event_work);
-
/* Walk the entities list and instantiate controls */
- list_for_each_entry(entity, &dev->entities, list) {
+ list_for_each_entry(entity, &chain->entities, chain) {
struct uvc_control *ctrl;
unsigned int bControlSize = 0, ncontrols;
u8 *bmControls = NULL;
@@ -2316,7 +2441,7 @@ int uvc_ctrl_init_device(struct uvc_device *dev)
}
/* Remove bogus/blacklisted controls */
- uvc_ctrl_prune_entity(dev, entity);
+ uvc_ctrl_prune_entity(chain->dev, entity);
/* Count supported controls and allocate the controls array */
ncontrols = memweight(bmControls, bControlSize);
@@ -2338,7 +2463,7 @@ int uvc_ctrl_init_device(struct uvc_device *dev)
ctrl->entity = entity;
ctrl->index = i;
- uvc_ctrl_init_ctrl(dev, ctrl);
+ uvc_ctrl_init_ctrl(chain, ctrl);
ctrl++;
}
}
@@ -2346,6 +2471,22 @@ int uvc_ctrl_init_device(struct uvc_device *dev)
return 0;
}
+int uvc_ctrl_init_device(struct uvc_device *dev)
+{
+ struct uvc_video_chain *chain;
+ int ret;
+
+ INIT_WORK(&dev->async_ctrl.work, uvc_ctrl_status_event_work);
+
+ list_for_each_entry(chain, &dev->chains, list) {
+ ret = uvc_ctrl_init_chain(chain);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
/*
* Cleanup device controls.
*/
@@ -2357,6 +2498,7 @@ static void uvc_ctrl_cleanup_mappings(struct uvc_device *dev,
list_for_each_entry_safe(mapping, nm, &ctrl->info.mappings, list) {
list_del(&mapping->list);
kfree(mapping->menu_info);
+ kfree(mapping->name);
kfree(mapping);
}
}
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index 9a791d8ef200..7c007426e082 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -16,7 +16,6 @@
#include <linux/videodev2.h>
#include <linux/vmalloc.h>
#include <linux/wait.h>
-#include <linux/version.h>
#include <asm/unaligned.h>
#include <media/v4l2-common.h>
@@ -2194,6 +2193,7 @@ int uvc_register_video_device(struct uvc_device *dev,
const struct v4l2_file_operations *fops,
const struct v4l2_ioctl_ops *ioctl_ops)
{
+ const char *name;
int ret;
/* Initialize the video buffers queue. */
@@ -2222,16 +2222,20 @@ int uvc_register_video_device(struct uvc_device *dev,
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
default:
vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ name = "Video Capture";
break;
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
vdev->device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
+ name = "Video Output";
break;
case V4L2_BUF_TYPE_META_CAPTURE:
vdev->device_caps = V4L2_CAP_META_CAPTURE | V4L2_CAP_STREAMING;
+ name = "Metadata";
break;
}
- strscpy(vdev->name, dev->name, sizeof(vdev->name));
+ snprintf(vdev->name, sizeof(vdev->name), "%s %u", name,
+ stream->header.bTerminalLink);
/*
* Set the driver data before calling video_register_device, otherwise
@@ -2455,14 +2459,14 @@ static int uvc_probe(struct usb_interface *intf,
if (v4l2_device_register(&intf->dev, &dev->vdev) < 0)
goto error;
- /* Initialize controls. */
- if (uvc_ctrl_init_device(dev) < 0)
- goto error;
-
/* Scan the device for video chains. */
if (uvc_scan_device(dev) < 0)
goto error;
+ /* Initialize controls. */
+ if (uvc_ctrl_init_device(dev) < 0)
+ goto error;
+
/* Register video device nodes. */
if (uvc_register_chains(dev) < 0)
goto error;
diff --git a/drivers/media/usb/uvc/uvc_metadata.c b/drivers/media/usb/uvc/uvc_metadata.c
index b6279ad7ac84..82de7781f5b6 100644
--- a/drivers/media/usb/uvc/uvc_metadata.c
+++ b/drivers/media/usb/uvc/uvc_metadata.c
@@ -30,7 +30,7 @@ static int uvc_meta_v4l2_querycap(struct file *file, void *fh,
struct uvc_video_chain *chain = stream->chain;
strscpy(cap->driver, "uvcvideo", sizeof(cap->driver));
- strscpy(cap->card, vfh->vdev->name, sizeof(cap->card));
+ strscpy(cap->card, stream->dev->name, sizeof(cap->card));
usb_make_path(stream->dev->udev, cap->bus_info, sizeof(cap->bus_info));
cap->capabilities = V4L2_CAP_DEVICE_CAPS | V4L2_CAP_STREAMING
| chain->caps;
diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
index 6acb8013de08..f4e4aff8ddf7 100644
--- a/drivers/media/usb/uvc/uvc_v4l2.c
+++ b/drivers/media/usb/uvc/uvc_v4l2.c
@@ -40,7 +40,13 @@ static int uvc_ioctl_ctrl_map(struct uvc_video_chain *chain,
return -ENOMEM;
map->id = xmap->id;
- memcpy(map->name, xmap->name, sizeof(map->name));
+ /* Non standard control id. */
+ if (v4l2_ctrl_get_name(map->id) == NULL) {
+ map->name = kmemdup(xmap->name, sizeof(xmap->name),
+ GFP_KERNEL);
+ if (!map->name)
+ return -ENOMEM;
+ }
memcpy(map->entity, xmap->entity, sizeof(map->entity));
map->selector = xmap->selector;
map->size = xmap->size;
@@ -472,10 +478,13 @@ static int uvc_v4l2_set_streamparm(struct uvc_streaming *stream,
uvc_simplify_fraction(&timeperframe.numerator,
&timeperframe.denominator, 8, 333);
- if (parm->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
+ if (parm->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
parm->parm.capture.timeperframe = timeperframe;
- else
+ parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
+ } else {
parm->parm.output.timeperframe = timeperframe;
+ parm->parm.output.capability = V4L2_CAP_TIMEPERFRAME;
+ }
return 0;
}
@@ -614,13 +623,12 @@ static int uvc_v4l2_release(struct file *file)
static int uvc_ioctl_querycap(struct file *file, void *fh,
struct v4l2_capability *cap)
{
- struct video_device *vdev = video_devdata(file);
struct uvc_fh *handle = file->private_data;
struct uvc_video_chain *chain = handle->chain;
struct uvc_streaming *stream = handle->stream;
strscpy(cap->driver, "uvcvideo", sizeof(cap->driver));
- strscpy(cap->card, vdev->name, sizeof(cap->card));
+ strscpy(cap->card, handle->stream->dev->name, sizeof(cap->card));
usb_make_path(stream->dev->udev, cap->bus_info, sizeof(cap->bus_info));
cap->capabilities = V4L2_CAP_DEVICE_CAPS | V4L2_CAP_STREAMING
| chain->caps;
@@ -995,58 +1003,24 @@ static int uvc_ioctl_query_ext_ctrl(struct file *file, void *fh,
return 0;
}
-static int uvc_ioctl_g_ctrl(struct file *file, void *fh,
- struct v4l2_control *ctrl)
+static int uvc_ctrl_check_access(struct uvc_video_chain *chain,
+ struct v4l2_ext_controls *ctrls,
+ unsigned long ioctl)
{
- struct uvc_fh *handle = fh;
- struct uvc_video_chain *chain = handle->chain;
- struct v4l2_ext_control xctrl;
- int ret;
-
- memset(&xctrl, 0, sizeof(xctrl));
- xctrl.id = ctrl->id;
-
- ret = uvc_ctrl_begin(chain);
- if (ret < 0)
- return ret;
-
- ret = uvc_ctrl_get(chain, &xctrl);
- uvc_ctrl_rollback(handle);
- if (ret < 0)
- return ret;
-
- ctrl->value = xctrl.value;
- return 0;
-}
-
-static int uvc_ioctl_s_ctrl(struct file *file, void *fh,
- struct v4l2_control *ctrl)
-{
- struct uvc_fh *handle = fh;
- struct uvc_video_chain *chain = handle->chain;
- struct v4l2_ext_control xctrl;
- int ret;
-
- memset(&xctrl, 0, sizeof(xctrl));
- xctrl.id = ctrl->id;
- xctrl.value = ctrl->value;
-
- ret = uvc_ctrl_begin(chain);
- if (ret < 0)
- return ret;
+ struct v4l2_ext_control *ctrl = ctrls->controls;
+ unsigned int i;
+ int ret = 0;
- ret = uvc_ctrl_set(handle, &xctrl);
- if (ret < 0) {
- uvc_ctrl_rollback(handle);
- return ret;
+ for (i = 0; i < ctrls->count; ++ctrl, ++i) {
+ ret = uvc_ctrl_is_accessible(chain, ctrl->id,
+ ioctl == VIDIOC_G_EXT_CTRLS);
+ if (ret)
+ break;
}
- ret = uvc_ctrl_commit(handle, &xctrl, 1);
- if (ret < 0)
- return ret;
+ ctrls->error_idx = ioctl == VIDIOC_TRY_EXT_CTRLS ? i : ctrls->count;
- ctrl->value = xctrl.value;
- return 0;
+ return ret;
}
static int uvc_ioctl_g_ext_ctrls(struct file *file, void *fh,
@@ -1058,6 +1032,10 @@ static int uvc_ioctl_g_ext_ctrls(struct file *file, void *fh,
unsigned int i;
int ret;
+ ret = uvc_ctrl_check_access(chain, ctrls, VIDIOC_G_EXT_CTRLS);
+ if (ret < 0)
+ return ret;
+
if (ctrls->which == V4L2_CTRL_WHICH_DEF_VAL) {
for (i = 0; i < ctrls->count; ++ctrl, ++i) {
struct v4l2_queryctrl qc = { .id = ctrl->id };
@@ -1094,16 +1072,16 @@ static int uvc_ioctl_g_ext_ctrls(struct file *file, void *fh,
static int uvc_ioctl_s_try_ext_ctrls(struct uvc_fh *handle,
struct v4l2_ext_controls *ctrls,
- bool commit)
+ unsigned long ioctl)
{
struct v4l2_ext_control *ctrl = ctrls->controls;
struct uvc_video_chain *chain = handle->chain;
unsigned int i;
int ret;
- /* Default value cannot be changed */
- if (ctrls->which == V4L2_CTRL_WHICH_DEF_VAL)
- return -EINVAL;
+ ret = uvc_ctrl_check_access(chain, ctrls, ioctl);
+ if (ret < 0)
+ return ret;
ret = uvc_ctrl_begin(chain);
if (ret < 0)
@@ -1113,15 +1091,16 @@ static int uvc_ioctl_s_try_ext_ctrls(struct uvc_fh *handle,
ret = uvc_ctrl_set(handle, ctrl);
if (ret < 0) {
uvc_ctrl_rollback(handle);
- ctrls->error_idx = commit ? ctrls->count : i;
+ ctrls->error_idx = ioctl == VIDIOC_S_EXT_CTRLS ?
+ ctrls->count : i;
return ret;
}
}
ctrls->error_idx = 0;
- if (commit)
- return uvc_ctrl_commit(handle, ctrls->controls, ctrls->count);
+ if (ioctl == VIDIOC_S_EXT_CTRLS)
+ return uvc_ctrl_commit(handle, ctrls);
else
return uvc_ctrl_rollback(handle);
}
@@ -1131,7 +1110,7 @@ static int uvc_ioctl_s_ext_ctrls(struct file *file, void *fh,
{
struct uvc_fh *handle = fh;
- return uvc_ioctl_s_try_ext_ctrls(handle, ctrls, true);
+ return uvc_ioctl_s_try_ext_ctrls(handle, ctrls, VIDIOC_S_EXT_CTRLS);
}
static int uvc_ioctl_try_ext_ctrls(struct file *file, void *fh,
@@ -1139,7 +1118,7 @@ static int uvc_ioctl_try_ext_ctrls(struct file *file, void *fh,
{
struct uvc_fh *handle = fh;
- return uvc_ioctl_s_try_ext_ctrls(handle, ctrls, false);
+ return uvc_ioctl_s_try_ext_ctrls(handle, ctrls, VIDIOC_TRY_EXT_CTRLS);
}
static int uvc_ioctl_querymenu(struct file *file, void *fh,
@@ -1538,8 +1517,6 @@ const struct v4l2_ioctl_ops uvc_ioctl_ops = {
.vidioc_s_input = uvc_ioctl_s_input,
.vidioc_queryctrl = uvc_ioctl_queryctrl,
.vidioc_query_ext_ctrl = uvc_ioctl_query_ext_ctrl,
- .vidioc_g_ctrl = uvc_ioctl_g_ctrl,
- .vidioc_s_ctrl = uvc_ioctl_s_ctrl,
.vidioc_g_ext_ctrls = uvc_ioctl_g_ext_ctrls,
.vidioc_s_ext_ctrls = uvc_ioctl_s_ext_ctrls,
.vidioc_try_ext_ctrls = uvc_ioctl_try_ext_ctrls,
diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
index e16464606b14..9f37eaf28ce7 100644
--- a/drivers/media/usb/uvc/uvc_video.c
+++ b/drivers/media/usb/uvc/uvc_video.c
@@ -115,6 +115,11 @@ int uvc_query_ctrl(struct uvc_device *dev, u8 query, u8 unit,
case 5: /* Invalid unit */
case 6: /* Invalid control */
case 7: /* Invalid Request */
+ /*
+ * The firmware has not properly implemented
+ * the control or there has been a HW error.
+ */
+ return -EIO;
case 8: /* Invalid value within range */
return -EINVAL;
default: /* reserved or unknown */
diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
index cce5e38133cd..2e5366143b81 100644
--- a/drivers/media/usb/uvc/uvcvideo.h
+++ b/drivers/media/usb/uvc/uvcvideo.h
@@ -241,7 +241,7 @@ struct uvc_control_mapping {
struct list_head ev_subs;
u32 id;
- u8 name[32];
+ char *name;
u8 entity[16];
u8 selector;
@@ -476,6 +476,7 @@ struct uvc_video_chain {
struct v4l2_prio_state prio; /* V4L2 priority state */
u32 caps; /* V4L2 chain-wide caps */
+ u8 ctrl_class_bitmap; /* Bitmap of valid classes */
};
struct uvc_stats_frame {
@@ -523,7 +524,7 @@ struct uvc_stats_stream {
unsigned int max_sof; /* Maximum STC.SOF value */
};
-#define UVC_METADATA_BUF_SIZE 1024
+#define UVC_METADATA_BUF_SIZE 10240
/**
* struct uvc_copy_op: Context structure to schedule asynchronous memcpy
@@ -885,21 +886,21 @@ void uvc_ctrl_status_event(struct uvc_video_chain *chain,
int uvc_ctrl_begin(struct uvc_video_chain *chain);
int __uvc_ctrl_commit(struct uvc_fh *handle, int rollback,
- const struct v4l2_ext_control *xctrls,
- unsigned int xctrls_count);
+ struct v4l2_ext_controls *ctrls);
static inline int uvc_ctrl_commit(struct uvc_fh *handle,
- const struct v4l2_ext_control *xctrls,
- unsigned int xctrls_count)
+ struct v4l2_ext_controls *ctrls)
{
- return __uvc_ctrl_commit(handle, 0, xctrls, xctrls_count);
+ return __uvc_ctrl_commit(handle, 0, ctrls);
}
static inline int uvc_ctrl_rollback(struct uvc_fh *handle)
{
- return __uvc_ctrl_commit(handle, 1, NULL, 0);
+ return __uvc_ctrl_commit(handle, 1, NULL);
}
int uvc_ctrl_get(struct uvc_video_chain *chain, struct v4l2_ext_control *xctrl);
int uvc_ctrl_set(struct uvc_fh *handle, struct v4l2_ext_control *xctrl);
+int uvc_ctrl_is_accessible(struct uvc_video_chain *chain, u32 v4l2_id,
+ bool read);
int uvc_xu_ctrl_query(struct uvc_video_chain *chain,
struct uvc_xu_control_query *xqry);
diff --git a/drivers/media/v4l2-core/v4l2-async.c b/drivers/media/v4l2-core/v4l2-async.c
index cd9e78c63791..0404267f1ae4 100644
--- a/drivers/media/v4l2-core/v4l2-async.c
+++ b/drivers/media/v4l2-core/v4l2-async.c
@@ -24,9 +24,9 @@
#include <media/v4l2-fwnode.h>
#include <media/v4l2-subdev.h>
-static int v4l2_async_notifier_call_bound(struct v4l2_async_notifier *n,
- struct v4l2_subdev *subdev,
- struct v4l2_async_subdev *asd)
+static int v4l2_async_nf_call_bound(struct v4l2_async_notifier *n,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
{
if (!n->ops || !n->ops->bound)
return 0;
@@ -34,9 +34,9 @@ static int v4l2_async_notifier_call_bound(struct v4l2_async_notifier *n,
return n->ops->bound(n, subdev, asd);
}
-static void v4l2_async_notifier_call_unbind(struct v4l2_async_notifier *n,
- struct v4l2_subdev *subdev,
- struct v4l2_async_subdev *asd)
+static void v4l2_async_nf_call_unbind(struct v4l2_async_notifier *n,
+ struct v4l2_subdev *subdev,
+ struct v4l2_async_subdev *asd)
{
if (!n->ops || !n->ops->unbind)
return;
@@ -44,7 +44,7 @@ static void v4l2_async_notifier_call_unbind(struct v4l2_async_notifier *n,
n->ops->unbind(n, subdev, asd);
}
-static int v4l2_async_notifier_call_complete(struct v4l2_async_notifier *n)
+static int v4l2_async_nf_call_complete(struct v4l2_async_notifier *n)
{
if (!n->ops || !n->ops->complete)
return 0;
@@ -215,7 +215,7 @@ v4l2_async_find_subdev_notifier(struct v4l2_subdev *sd)
/* Get v4l2_device related to the notifier if one can be found. */
static struct v4l2_device *
-v4l2_async_notifier_find_v4l2_dev(struct v4l2_async_notifier *notifier)
+v4l2_async_nf_find_v4l2_dev(struct v4l2_async_notifier *notifier)
{
while (notifier->parent)
notifier = notifier->parent;
@@ -227,7 +227,7 @@ v4l2_async_notifier_find_v4l2_dev(struct v4l2_async_notifier *notifier)
* Return true if all child sub-device notifiers are complete, false otherwise.
*/
static bool
-v4l2_async_notifier_can_complete(struct v4l2_async_notifier *notifier)
+v4l2_async_nf_can_complete(struct v4l2_async_notifier *notifier)
{
struct v4l2_subdev *sd;
@@ -239,7 +239,7 @@ v4l2_async_notifier_can_complete(struct v4l2_async_notifier *notifier)
v4l2_async_find_subdev_notifier(sd);
if (subdev_notifier &&
- !v4l2_async_notifier_can_complete(subdev_notifier))
+ !v4l2_async_nf_can_complete(subdev_notifier))
return false;
}
@@ -251,7 +251,7 @@ v4l2_async_notifier_can_complete(struct v4l2_async_notifier *notifier)
* sub-devices have been bound; v4l2_device is also available then.
*/
static int
-v4l2_async_notifier_try_complete(struct v4l2_async_notifier *notifier)
+v4l2_async_nf_try_complete(struct v4l2_async_notifier *notifier)
{
/* Quick check whether there are still more sub-devices here. */
if (!list_empty(&notifier->waiting))
@@ -266,14 +266,14 @@ v4l2_async_notifier_try_complete(struct v4l2_async_notifier *notifier)
return 0;
/* Is everything ready? */
- if (!v4l2_async_notifier_can_complete(notifier))
+ if (!v4l2_async_nf_can_complete(notifier))
return 0;
- return v4l2_async_notifier_call_complete(notifier);
+ return v4l2_async_nf_call_complete(notifier);
}
static int
-v4l2_async_notifier_try_all_subdevs(struct v4l2_async_notifier *notifier);
+v4l2_async_nf_try_all_subdevs(struct v4l2_async_notifier *notifier);
static int v4l2_async_match_notify(struct v4l2_async_notifier *notifier,
struct v4l2_device *v4l2_dev,
@@ -287,7 +287,7 @@ static int v4l2_async_match_notify(struct v4l2_async_notifier *notifier,
if (ret < 0)
return ret;
- ret = v4l2_async_notifier_call_bound(notifier, sd, asd);
+ ret = v4l2_async_nf_call_bound(notifier, sd, asd);
if (ret < 0) {
v4l2_device_unregister_subdev(sd);
return ret;
@@ -315,15 +315,15 @@ static int v4l2_async_match_notify(struct v4l2_async_notifier *notifier,
*/
subdev_notifier->parent = notifier;
- return v4l2_async_notifier_try_all_subdevs(subdev_notifier);
+ return v4l2_async_nf_try_all_subdevs(subdev_notifier);
}
/* Test all async sub-devices in a notifier for a match. */
static int
-v4l2_async_notifier_try_all_subdevs(struct v4l2_async_notifier *notifier)
+v4l2_async_nf_try_all_subdevs(struct v4l2_async_notifier *notifier)
{
struct v4l2_device *v4l2_dev =
- v4l2_async_notifier_find_v4l2_dev(notifier);
+ v4l2_async_nf_find_v4l2_dev(notifier);
struct v4l2_subdev *sd;
if (!v4l2_dev)
@@ -367,7 +367,7 @@ static void v4l2_async_cleanup(struct v4l2_subdev *sd)
/* Unbind all sub-devices in the notifier tree. */
static void
-v4l2_async_notifier_unbind_all_subdevs(struct v4l2_async_notifier *notifier)
+v4l2_async_nf_unbind_all_subdevs(struct v4l2_async_notifier *notifier)
{
struct v4l2_subdev *sd, *tmp;
@@ -376,9 +376,9 @@ v4l2_async_notifier_unbind_all_subdevs(struct v4l2_async_notifier *notifier)
v4l2_async_find_subdev_notifier(sd);
if (subdev_notifier)
- v4l2_async_notifier_unbind_all_subdevs(subdev_notifier);
+ v4l2_async_nf_unbind_all_subdevs(subdev_notifier);
- v4l2_async_notifier_call_unbind(notifier, sd, sd->asd);
+ v4l2_async_nf_call_unbind(notifier, sd, sd->asd);
v4l2_async_cleanup(sd);
list_move(&sd->async_list, &subdev_list);
@@ -389,8 +389,8 @@ v4l2_async_notifier_unbind_all_subdevs(struct v4l2_async_notifier *notifier)
/* See if an async sub-device can be found in a notifier's lists. */
static bool
-__v4l2_async_notifier_has_async_subdev(struct v4l2_async_notifier *notifier,
- struct v4l2_async_subdev *asd)
+__v4l2_async_nf_has_async_subdev(struct v4l2_async_notifier *notifier,
+ struct v4l2_async_subdev *asd)
{
struct v4l2_async_subdev *asd_y;
struct v4l2_subdev *sd;
@@ -416,9 +416,8 @@ __v4l2_async_notifier_has_async_subdev(struct v4l2_async_notifier *notifier,
* If @this_index < 0, search the notifier's entire @asd_list.
*/
static bool
-v4l2_async_notifier_has_async_subdev(struct v4l2_async_notifier *notifier,
- struct v4l2_async_subdev *asd,
- int this_index)
+v4l2_async_nf_has_async_subdev(struct v4l2_async_notifier *notifier,
+ struct v4l2_async_subdev *asd, int this_index)
{
struct v4l2_async_subdev *asd_y;
int j = 0;
@@ -435,15 +434,15 @@ v4l2_async_notifier_has_async_subdev(struct v4l2_async_notifier *notifier,
/* Check that an asd does not exist in other notifiers. */
list_for_each_entry(notifier, &notifier_list, list)
- if (__v4l2_async_notifier_has_async_subdev(notifier, asd))
+ if (__v4l2_async_nf_has_async_subdev(notifier, asd))
return true;
return false;
}
-static int v4l2_async_notifier_asd_valid(struct v4l2_async_notifier *notifier,
- struct v4l2_async_subdev *asd,
- int this_index)
+static int v4l2_async_nf_asd_valid(struct v4l2_async_notifier *notifier,
+ struct v4l2_async_subdev *asd,
+ int this_index)
{
struct device *dev =
notifier->v4l2_dev ? notifier->v4l2_dev->dev : NULL;
@@ -454,8 +453,7 @@ static int v4l2_async_notifier_asd_valid(struct v4l2_async_notifier *notifier,
switch (asd->match_type) {
case V4L2_ASYNC_MATCH_I2C:
case V4L2_ASYNC_MATCH_FWNODE:
- if (v4l2_async_notifier_has_async_subdev(notifier, asd,
- this_index)) {
+ if (v4l2_async_nf_has_async_subdev(notifier, asd, this_index)) {
dev_dbg(dev, "subdev descriptor already listed in this or other notifiers\n");
return -EEXIST;
}
@@ -469,13 +467,13 @@ static int v4l2_async_notifier_asd_valid(struct v4l2_async_notifier *notifier,
return 0;
}
-void v4l2_async_notifier_init(struct v4l2_async_notifier *notifier)
+void v4l2_async_nf_init(struct v4l2_async_notifier *notifier)
{
INIT_LIST_HEAD(&notifier->asd_list);
}
-EXPORT_SYMBOL(v4l2_async_notifier_init);
+EXPORT_SYMBOL(v4l2_async_nf_init);
-static int __v4l2_async_notifier_register(struct v4l2_async_notifier *notifier)
+static int __v4l2_async_nf_register(struct v4l2_async_notifier *notifier)
{
struct v4l2_async_subdev *asd;
int ret, i = 0;
@@ -486,18 +484,18 @@ static int __v4l2_async_notifier_register(struct v4l2_async_notifier *notifier)
mutex_lock(&list_lock);
list_for_each_entry(asd, &notifier->asd_list, asd_list) {
- ret = v4l2_async_notifier_asd_valid(notifier, asd, i++);
+ ret = v4l2_async_nf_asd_valid(notifier, asd, i++);
if (ret)
goto err_unlock;
list_add_tail(&asd->list, &notifier->waiting);
}
- ret = v4l2_async_notifier_try_all_subdevs(notifier);
+ ret = v4l2_async_nf_try_all_subdevs(notifier);
if (ret < 0)
goto err_unbind;
- ret = v4l2_async_notifier_try_complete(notifier);
+ ret = v4l2_async_nf_try_complete(notifier);
if (ret < 0)
goto err_unbind;
@@ -512,7 +510,7 @@ err_unbind:
/*
* On failure, unbind all sub-devices registered through this notifier.
*/
- v4l2_async_notifier_unbind_all_subdevs(notifier);
+ v4l2_async_nf_unbind_all_subdevs(notifier);
err_unlock:
mutex_unlock(&list_lock);
@@ -520,8 +518,8 @@ err_unlock:
return ret;
}
-int v4l2_async_notifier_register(struct v4l2_device *v4l2_dev,
- struct v4l2_async_notifier *notifier)
+int v4l2_async_nf_register(struct v4l2_device *v4l2_dev,
+ struct v4l2_async_notifier *notifier)
{
int ret;
@@ -530,16 +528,16 @@ int v4l2_async_notifier_register(struct v4l2_device *v4l2_dev,
notifier->v4l2_dev = v4l2_dev;
- ret = __v4l2_async_notifier_register(notifier);
+ ret = __v4l2_async_nf_register(notifier);
if (ret)
notifier->v4l2_dev = NULL;
return ret;
}
-EXPORT_SYMBOL(v4l2_async_notifier_register);
+EXPORT_SYMBOL(v4l2_async_nf_register);
-int v4l2_async_subdev_notifier_register(struct v4l2_subdev *sd,
- struct v4l2_async_notifier *notifier)
+int v4l2_async_subdev_nf_register(struct v4l2_subdev *sd,
+ struct v4l2_async_notifier *notifier)
{
int ret;
@@ -548,21 +546,21 @@ int v4l2_async_subdev_notifier_register(struct v4l2_subdev *sd,
notifier->sd = sd;
- ret = __v4l2_async_notifier_register(notifier);
+ ret = __v4l2_async_nf_register(notifier);
if (ret)
notifier->sd = NULL;
return ret;
}
-EXPORT_SYMBOL(v4l2_async_subdev_notifier_register);
+EXPORT_SYMBOL(v4l2_async_subdev_nf_register);
static void
-__v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier)
+__v4l2_async_nf_unregister(struct v4l2_async_notifier *notifier)
{
if (!notifier || (!notifier->v4l2_dev && !notifier->sd))
return;
- v4l2_async_notifier_unbind_all_subdevs(notifier);
+ v4l2_async_nf_unbind_all_subdevs(notifier);
notifier->sd = NULL;
notifier->v4l2_dev = NULL;
@@ -570,17 +568,17 @@ __v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier)
list_del(&notifier->list);
}
-void v4l2_async_notifier_unregister(struct v4l2_async_notifier *notifier)
+void v4l2_async_nf_unregister(struct v4l2_async_notifier *notifier)
{
mutex_lock(&list_lock);
- __v4l2_async_notifier_unregister(notifier);
+ __v4l2_async_nf_unregister(notifier);
mutex_unlock(&list_lock);
}
-EXPORT_SYMBOL(v4l2_async_notifier_unregister);
+EXPORT_SYMBOL(v4l2_async_nf_unregister);
-static void __v4l2_async_notifier_cleanup(struct v4l2_async_notifier *notifier)
+static void __v4l2_async_nf_cleanup(struct v4l2_async_notifier *notifier)
{
struct v4l2_async_subdev *asd, *tmp;
@@ -601,24 +599,24 @@ static void __v4l2_async_notifier_cleanup(struct v4l2_async_notifier *notifier)
}
}
-void v4l2_async_notifier_cleanup(struct v4l2_async_notifier *notifier)
+void v4l2_async_nf_cleanup(struct v4l2_async_notifier *notifier)
{
mutex_lock(&list_lock);
- __v4l2_async_notifier_cleanup(notifier);
+ __v4l2_async_nf_cleanup(notifier);
mutex_unlock(&list_lock);
}
-EXPORT_SYMBOL_GPL(v4l2_async_notifier_cleanup);
+EXPORT_SYMBOL_GPL(v4l2_async_nf_cleanup);
-int __v4l2_async_notifier_add_subdev(struct v4l2_async_notifier *notifier,
- struct v4l2_async_subdev *asd)
+int __v4l2_async_nf_add_subdev(struct v4l2_async_notifier *notifier,
+ struct v4l2_async_subdev *asd)
{
int ret;
mutex_lock(&list_lock);
- ret = v4l2_async_notifier_asd_valid(notifier, asd, -1);
+ ret = v4l2_async_nf_asd_valid(notifier, asd, -1);
if (ret)
goto unlock;
@@ -628,12 +626,12 @@ unlock:
mutex_unlock(&list_lock);
return ret;
}
-EXPORT_SYMBOL_GPL(__v4l2_async_notifier_add_subdev);
+EXPORT_SYMBOL_GPL(__v4l2_async_nf_add_subdev);
struct v4l2_async_subdev *
-__v4l2_async_notifier_add_fwnode_subdev(struct v4l2_async_notifier *notifier,
- struct fwnode_handle *fwnode,
- unsigned int asd_struct_size)
+__v4l2_async_nf_add_fwnode(struct v4l2_async_notifier *notifier,
+ struct fwnode_handle *fwnode,
+ unsigned int asd_struct_size)
{
struct v4l2_async_subdev *asd;
int ret;
@@ -645,7 +643,7 @@ __v4l2_async_notifier_add_fwnode_subdev(struct v4l2_async_notifier *notifier,
asd->match_type = V4L2_ASYNC_MATCH_FWNODE;
asd->match.fwnode = fwnode_handle_get(fwnode);
- ret = __v4l2_async_notifier_add_subdev(notifier, asd);
+ ret = __v4l2_async_nf_add_subdev(notifier, asd);
if (ret) {
fwnode_handle_put(fwnode);
kfree(asd);
@@ -654,12 +652,12 @@ __v4l2_async_notifier_add_fwnode_subdev(struct v4l2_async_notifier *notifier,
return asd;
}
-EXPORT_SYMBOL_GPL(__v4l2_async_notifier_add_fwnode_subdev);
+EXPORT_SYMBOL_GPL(__v4l2_async_nf_add_fwnode);
struct v4l2_async_subdev *
-__v4l2_async_notifier_add_fwnode_remote_subdev(struct v4l2_async_notifier *notif,
- struct fwnode_handle *endpoint,
- unsigned int asd_struct_size)
+__v4l2_async_nf_add_fwnode_remote(struct v4l2_async_notifier *notif,
+ struct fwnode_handle *endpoint,
+ unsigned int asd_struct_size)
{
struct v4l2_async_subdev *asd;
struct fwnode_handle *remote;
@@ -668,21 +666,19 @@ __v4l2_async_notifier_add_fwnode_remote_subdev(struct v4l2_async_notifier *notif
if (!remote)
return ERR_PTR(-ENOTCONN);
- asd = __v4l2_async_notifier_add_fwnode_subdev(notif, remote,
- asd_struct_size);
+ asd = __v4l2_async_nf_add_fwnode(notif, remote, asd_struct_size);
/*
- * Calling __v4l2_async_notifier_add_fwnode_subdev grabs a refcount,
+ * Calling __v4l2_async_nf_add_fwnode grabs a refcount,
* so drop the one we got in fwnode_graph_get_remote_port_parent.
*/
fwnode_handle_put(remote);
return asd;
}
-EXPORT_SYMBOL_GPL(__v4l2_async_notifier_add_fwnode_remote_subdev);
+EXPORT_SYMBOL_GPL(__v4l2_async_nf_add_fwnode_remote);
struct v4l2_async_subdev *
-__v4l2_async_notifier_add_i2c_subdev(struct v4l2_async_notifier *notifier,
- int adapter_id, unsigned short address,
- unsigned int asd_struct_size)
+__v4l2_async_nf_add_i2c(struct v4l2_async_notifier *notifier, int adapter_id,
+ unsigned short address, unsigned int asd_struct_size)
{
struct v4l2_async_subdev *asd;
int ret;
@@ -695,7 +691,7 @@ __v4l2_async_notifier_add_i2c_subdev(struct v4l2_async_notifier *notifier,
asd->match.i2c.adapter_id = adapter_id;
asd->match.i2c.address = address;
- ret = __v4l2_async_notifier_add_subdev(notifier, asd);
+ ret = __v4l2_async_nf_add_subdev(notifier, asd);
if (ret) {
kfree(asd);
return ERR_PTR(ret);
@@ -703,7 +699,7 @@ __v4l2_async_notifier_add_i2c_subdev(struct v4l2_async_notifier *notifier,
return asd;
}
-EXPORT_SYMBOL_GPL(__v4l2_async_notifier_add_i2c_subdev);
+EXPORT_SYMBOL_GPL(__v4l2_async_nf_add_i2c);
int v4l2_async_register_subdev(struct v4l2_subdev *sd)
{
@@ -725,7 +721,7 @@ int v4l2_async_register_subdev(struct v4l2_subdev *sd)
list_for_each_entry(notifier, &notifier_list, list) {
struct v4l2_device *v4l2_dev =
- v4l2_async_notifier_find_v4l2_dev(notifier);
+ v4l2_async_nf_find_v4l2_dev(notifier);
struct v4l2_async_subdev *asd;
if (!v4l2_dev)
@@ -739,7 +735,7 @@ int v4l2_async_register_subdev(struct v4l2_subdev *sd)
if (ret)
goto err_unbind;
- ret = v4l2_async_notifier_try_complete(notifier);
+ ret = v4l2_async_nf_try_complete(notifier);
if (ret)
goto err_unbind;
@@ -761,10 +757,10 @@ err_unbind:
*/
subdev_notifier = v4l2_async_find_subdev_notifier(sd);
if (subdev_notifier)
- v4l2_async_notifier_unbind_all_subdevs(subdev_notifier);
+ v4l2_async_nf_unbind_all_subdevs(subdev_notifier);
if (sd->asd)
- v4l2_async_notifier_call_unbind(notifier, sd, sd->asd);
+ v4l2_async_nf_call_unbind(notifier, sd, sd->asd);
v4l2_async_cleanup(sd);
mutex_unlock(&list_lock);
@@ -780,8 +776,8 @@ void v4l2_async_unregister_subdev(struct v4l2_subdev *sd)
mutex_lock(&list_lock);
- __v4l2_async_notifier_unregister(sd->subdev_notifier);
- __v4l2_async_notifier_cleanup(sd->subdev_notifier);
+ __v4l2_async_nf_unregister(sd->subdev_notifier);
+ __v4l2_async_nf_cleanup(sd->subdev_notifier);
kfree(sd->subdev_notifier);
sd->subdev_notifier = NULL;
@@ -790,7 +786,7 @@ void v4l2_async_unregister_subdev(struct v4l2_subdev *sd)
list_add(&sd->asd->list, &notifier->waiting);
- v4l2_async_notifier_call_unbind(notifier, sd, sd->asd);
+ v4l2_async_nf_call_unbind(notifier, sd, sd->asd);
}
v4l2_async_cleanup(sd);
@@ -825,7 +821,7 @@ static void print_waiting_subdev(struct seq_file *s,
}
static const char *
-v4l2_async_notifier_name(struct v4l2_async_notifier *notifier)
+v4l2_async_nf_name(struct v4l2_async_notifier *notifier)
{
if (notifier->v4l2_dev)
return notifier->v4l2_dev->name;
@@ -843,7 +839,7 @@ static int pending_subdevs_show(struct seq_file *s, void *data)
mutex_lock(&list_lock);
list_for_each_entry(notif, &notifier_list, list) {
- seq_printf(s, "%s:\n", v4l2_async_notifier_name(notif));
+ seq_printf(s, "%s:\n", v4l2_async_nf_name(notif));
list_for_each_entry(asd, &notif->waiting, list)
print_waiting_subdev(s, asd);
}
diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c
index 04af03285a20..df34b2a283bc 100644
--- a/drivers/media/v4l2-core/v4l2-common.c
+++ b/drivers/media/v4l2-core/v4l2-common.c
@@ -275,6 +275,9 @@ const struct v4l2_format_info *v4l2_format_info(u32 format)
{ .format = V4L2_PIX_FMT_YUV422P, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 1 },
{ .format = V4L2_PIX_FMT_GREY, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 1, .bpp = { 1, 0, 0, 0 }, .hdiv = 1, .vdiv = 1 },
+ /* Tiled YUV formats */
+ { .format = V4L2_PIX_FMT_NV12_4L4, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 1, .comp_planes = 2, .bpp = { 1, 2, 0, 0 }, .hdiv = 2, .vdiv = 2 },
+
/* YUV planar formats, non contiguous variant */
{ .format = V4L2_PIX_FMT_YUV420M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 2 },
{ .format = V4L2_PIX_FMT_YVU420M, .pixel_enc = V4L2_PIXEL_ENC_YUV, .mem_planes = 3, .comp_planes = 3, .bpp = { 1, 1, 1, 0 }, .hdiv = 2, .vdiv = 2 },
diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
index 47aff3b19742..8176769a89fa 100644
--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
@@ -126,6 +126,9 @@ struct v4l2_format32 {
* @memory: buffer memory type
* @format: frame format, for which buffers are requested
* @capabilities: capabilities of this buffer type.
+ * @flags: additional buffer management attributes (ignored unless the
+ * queue has V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS capability and
+ * configured for MMAP streaming I/O).
* @reserved: future extensions
*/
struct v4l2_create_buffers32 {
@@ -134,7 +137,8 @@ struct v4l2_create_buffers32 {
__u32 memory; /* enum v4l2_memory */
struct v4l2_format32 format;
__u32 capabilities;
- __u32 reserved[7];
+ __u32 flags;
+ __u32 reserved[6];
};
static int get_v4l2_format32(struct v4l2_format *p64,
@@ -182,6 +186,8 @@ static int get_v4l2_create32(struct v4l2_create_buffers *p64,
if (copy_from_user(p64, p32,
offsetof(struct v4l2_create_buffers32, format)))
return -EFAULT;
+ if (copy_from_user(&p64->flags, &p32->flags, sizeof(p32->flags)))
+ return -EFAULT;
return get_v4l2_format32(&p64->format, &p32->format);
}
@@ -227,6 +233,7 @@ static int put_v4l2_create32(struct v4l2_create_buffers *p64,
if (copy_to_user(p32, p64,
offsetof(struct v4l2_create_buffers32, format)) ||
put_user(p64->capabilities, &p32->capabilities) ||
+ put_user(p64->flags, &p32->flags) ||
copy_to_user(p32->reserved, p64->reserved, sizeof(p64->reserved)))
return -EFAULT;
return put_v4l2_format32(&p64->format, &p32->format);
diff --git a/drivers/media/v4l2-core/v4l2-ctrls-core.c b/drivers/media/v4l2-core/v4l2-ctrls-core.c
index c4b5082849b6..70adfc1b9c81 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls-core.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls-core.c
@@ -687,6 +687,9 @@ static int std_validate_compound(const struct v4l2_ctrl *ctrl, u32 idx,
break;
+ case V4L2_CTRL_TYPE_HEVC_SCALING_MATRIX:
+ break;
+
case V4L2_CTRL_TYPE_AREA:
area = p;
if (!area->width || !area->height)
@@ -1240,6 +1243,9 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl,
case V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS:
elem_size = sizeof(struct v4l2_ctrl_hevc_slice_params);
break;
+ case V4L2_CTRL_TYPE_HEVC_SCALING_MATRIX:
+ elem_size = sizeof(struct v4l2_ctrl_hevc_scaling_matrix);
+ break;
case V4L2_CTRL_TYPE_HEVC_DECODE_PARAMS:
elem_size = sizeof(struct v4l2_ctrl_hevc_decode_params);
break;
diff --git a/drivers/media/v4l2-core/v4l2-ctrls-defs.c b/drivers/media/v4l2-core/v4l2-ctrls-defs.c
index 421300e13a41..ebe82b6ba6e6 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls-defs.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls-defs.c
@@ -997,6 +997,7 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_MPEG_VIDEO_HEVC_SPS: return "HEVC Sequence Parameter Set";
case V4L2_CID_MPEG_VIDEO_HEVC_PPS: return "HEVC Picture Parameter Set";
case V4L2_CID_MPEG_VIDEO_HEVC_SLICE_PARAMS: return "HEVC Slice Parameters";
+ case V4L2_CID_MPEG_VIDEO_HEVC_SCALING_MATRIX: return "HEVC Scaling Matrix";
case V4L2_CID_MPEG_VIDEO_HEVC_DECODE_PARAMS: return "HEVC Decode Parameters";
case V4L2_CID_MPEG_VIDEO_HEVC_DECODE_MODE: return "HEVC Decode Mode";
case V4L2_CID_MPEG_VIDEO_HEVC_START_CODE: return "HEVC Start Code";
@@ -1107,6 +1108,7 @@ const char *v4l2_ctrl_get_name(u32 id)
case V4L2_CID_TEST_PATTERN_GREENR: return "Green (Red) Pixel Value";
case V4L2_CID_TEST_PATTERN_BLUE: return "Blue Pixel Value";
case V4L2_CID_TEST_PATTERN_GREENB: return "Green (Blue) Pixel Value";
+ case V4L2_CID_NOTIFY_GAINS: return "Notify Gains";
/* Image processing controls */
/* Keep the order of the 'case's the same as in v4l2-controls.h! */
@@ -1490,6 +1492,9 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
case V4L2_CID_MPEG_VIDEO_HEVC_SLICE_PARAMS:
*type = V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS;
break;
+ case V4L2_CID_MPEG_VIDEO_HEVC_SCALING_MATRIX:
+ *type = V4L2_CTRL_TYPE_HEVC_SCALING_MATRIX;
+ break;
case V4L2_CID_MPEG_VIDEO_HEVC_DECODE_PARAMS:
*type = V4L2_CTRL_TYPE_HEVC_DECODE_PARAMS;
break;
diff --git a/drivers/media/v4l2-core/v4l2-fwnode.c b/drivers/media/v4l2-core/v4l2-fwnode.c
index 843259c304bb..00457e1e93f6 100644
--- a/drivers/media/v4l2-core/v4l2-fwnode.c
+++ b/drivers/media/v4l2-core/v4l2-fwnode.c
@@ -780,11 +780,11 @@ int v4l2_fwnode_device_parse(struct device *dev,
EXPORT_SYMBOL_GPL(v4l2_fwnode_device_parse);
static int
-v4l2_async_notifier_fwnode_parse_endpoint(struct device *dev,
- struct v4l2_async_notifier *notifier,
- struct fwnode_handle *endpoint,
- unsigned int asd_struct_size,
- parse_endpoint_func parse_endpoint)
+v4l2_async_nf_fwnode_parse_endpoint(struct device *dev,
+ struct v4l2_async_notifier *notifier,
+ struct fwnode_handle *endpoint,
+ unsigned int asd_struct_size,
+ parse_endpoint_func parse_endpoint)
{
struct v4l2_fwnode_endpoint vep = { .bus_type = 0 };
struct v4l2_async_subdev *asd;
@@ -822,7 +822,7 @@ v4l2_async_notifier_fwnode_parse_endpoint(struct device *dev,
if (ret < 0)
goto out_err;
- ret = __v4l2_async_notifier_add_subdev(notifier, asd);
+ ret = __v4l2_async_nf_add_subdev(notifier, asd);
if (ret < 0) {
/* not an error if asd already exists */
if (ret == -EEXIST)
@@ -839,13 +839,11 @@ out_err:
return ret == -ENOTCONN ? 0 : ret;
}
-static int
-__v4l2_async_notifier_parse_fwnode_ep(struct device *dev,
- struct v4l2_async_notifier *notifier,
- size_t asd_struct_size,
- unsigned int port,
- bool has_port,
- parse_endpoint_func parse_endpoint)
+int
+v4l2_async_nf_parse_fwnode_endpoints(struct device *dev,
+ struct v4l2_async_notifier *notifier,
+ size_t asd_struct_size,
+ parse_endpoint_func parse_endpoint)
{
struct fwnode_handle *fwnode;
int ret = 0;
@@ -863,22 +861,11 @@ __v4l2_async_notifier_parse_fwnode_ep(struct device *dev,
if (!is_available)
continue;
- if (has_port) {
- struct fwnode_endpoint ep;
-
- ret = fwnode_graph_parse_endpoint(fwnode, &ep);
- if (ret)
- break;
-
- if (ep.port != port)
- continue;
- }
- ret = v4l2_async_notifier_fwnode_parse_endpoint(dev,
- notifier,
- fwnode,
- asd_struct_size,
- parse_endpoint);
+ ret = v4l2_async_nf_fwnode_parse_endpoint(dev, notifier,
+ fwnode,
+ asd_struct_size,
+ parse_endpoint);
if (ret < 0)
break;
}
@@ -887,18 +874,7 @@ __v4l2_async_notifier_parse_fwnode_ep(struct device *dev,
return ret;
}
-
-int
-v4l2_async_notifier_parse_fwnode_endpoints(struct device *dev,
- struct v4l2_async_notifier *notifier,
- size_t asd_struct_size,
- parse_endpoint_func parse_endpoint)
-{
- return __v4l2_async_notifier_parse_fwnode_ep(dev, notifier,
- asd_struct_size, 0,
- false, parse_endpoint);
-}
-EXPORT_SYMBOL_GPL(v4l2_async_notifier_parse_fwnode_endpoints);
+EXPORT_SYMBOL_GPL(v4l2_async_nf_parse_fwnode_endpoints);
/*
* v4l2_fwnode_reference_parse - parse references for async sub-devices
@@ -942,9 +918,8 @@ static int v4l2_fwnode_reference_parse(struct device *dev,
index++) {
struct v4l2_async_subdev *asd;
- asd = v4l2_async_notifier_add_fwnode_subdev(notifier,
- args.fwnode,
- struct v4l2_async_subdev);
+ asd = v4l2_async_nf_add_fwnode(notifier, args.fwnode,
+ struct v4l2_async_subdev);
fwnode_handle_put(args.fwnode);
if (IS_ERR(asd)) {
/* not an error if asd already exists */
@@ -1243,8 +1218,8 @@ v4l2_fwnode_reference_parse_int_props(struct device *dev,
index++) {
struct v4l2_async_subdev *asd;
- asd = v4l2_async_notifier_add_fwnode_subdev(notifier, fwnode,
- struct v4l2_async_subdev);
+ asd = v4l2_async_nf_add_fwnode(notifier, fwnode,
+ struct v4l2_async_subdev);
fwnode_handle_put(fwnode);
if (IS_ERR(asd)) {
ret = PTR_ERR(asd);
@@ -1260,7 +1235,7 @@ v4l2_fwnode_reference_parse_int_props(struct device *dev,
}
/**
- * v4l2_async_notifier_parse_fwnode_sensor - parse common references on
+ * v4l2_async_nf_parse_fwnode_sensor - parse common references on
* sensors for async sub-devices
* @dev: the device node the properties of which are parsed for references
* @notifier: the async notifier where the async subdevs will be added
@@ -1269,7 +1244,7 @@ v4l2_fwnode_reference_parse_int_props(struct device *dev,
* sensor and set up async sub-devices for them.
*
* Any notifier populated using this function must be released with a call to
- * v4l2_async_notifier_release() after it has been unregistered and the async
+ * v4l2_async_nf_release() after it has been unregistered and the async
* sub-devices are no longer in use, even in the case the function returned an
* error.
*
@@ -1278,8 +1253,8 @@ v4l2_fwnode_reference_parse_int_props(struct device *dev,
* -EINVAL if property parsing failed
*/
static int
-v4l2_async_notifier_parse_fwnode_sensor(struct device *dev,
- struct v4l2_async_notifier *notifier)
+v4l2_async_nf_parse_fwnode_sensor(struct device *dev,
+ struct v4l2_async_notifier *notifier)
{
static const char * const led_props[] = { "led" };
static const struct v4l2_fwnode_int_props props[] = {
@@ -1320,13 +1295,13 @@ int v4l2_async_register_subdev_sensor(struct v4l2_subdev *sd)
if (!notifier)
return -ENOMEM;
- v4l2_async_notifier_init(notifier);
+ v4l2_async_nf_init(notifier);
- ret = v4l2_async_notifier_parse_fwnode_sensor(sd->dev, notifier);
+ ret = v4l2_async_nf_parse_fwnode_sensor(sd->dev, notifier);
if (ret < 0)
goto out_cleanup;
- ret = v4l2_async_subdev_notifier_register(sd, notifier);
+ ret = v4l2_async_subdev_nf_register(sd, notifier);
if (ret < 0)
goto out_cleanup;
@@ -1339,10 +1314,10 @@ int v4l2_async_register_subdev_sensor(struct v4l2_subdev *sd)
return 0;
out_unregister:
- v4l2_async_notifier_unregister(notifier);
+ v4l2_async_nf_unregister(notifier);
out_cleanup:
- v4l2_async_notifier_cleanup(notifier);
+ v4l2_async_nf_cleanup(notifier);
kfree(notifier);
return ret;
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index 05d5db3d85e5..31d0109ce5a8 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -869,7 +869,7 @@ static void v4l_print_default(const void *arg, bool write_only)
pr_cont("driver-specific ioctl\n");
}
-static int check_ext_ctrls(struct v4l2_ext_controls *c, int allow_priv)
+static bool check_ext_ctrls(struct v4l2_ext_controls *c, unsigned long ioctl)
{
__u32 i;
@@ -878,23 +878,41 @@ static int check_ext_ctrls(struct v4l2_ext_controls *c, int allow_priv)
for (i = 0; i < c->count; i++)
c->controls[i].reserved2[0] = 0;
- /* V4L2_CID_PRIVATE_BASE cannot be used as control class
- when using extended controls.
- Only when passed in through VIDIOC_G_CTRL and VIDIOC_S_CTRL
- is it allowed for backwards compatibility.
- */
- if (!allow_priv && c->which == V4L2_CID_PRIVATE_BASE)
- return 0;
- if (!c->which)
- return 1;
+ switch (c->which) {
+ case V4L2_CID_PRIVATE_BASE:
+ /*
+ * V4L2_CID_PRIVATE_BASE cannot be used as control class
+ * when using extended controls.
+ * Only when passed in through VIDIOC_G_CTRL and VIDIOC_S_CTRL
+ * is it allowed for backwards compatibility.
+ */
+ if (ioctl == VIDIOC_G_CTRL || ioctl == VIDIOC_S_CTRL)
+ return false;
+ break;
+ case V4L2_CTRL_WHICH_DEF_VAL:
+ /* Default value cannot be changed */
+ if (ioctl == VIDIOC_S_EXT_CTRLS ||
+ ioctl == VIDIOC_TRY_EXT_CTRLS) {
+ c->error_idx = c->count;
+ return false;
+ }
+ return true;
+ case V4L2_CTRL_WHICH_CUR_VAL:
+ return true;
+ case V4L2_CTRL_WHICH_REQUEST_VAL:
+ c->error_idx = c->count;
+ return false;
+ }
+
/* Check that all controls are from the same control class. */
for (i = 0; i < c->count; i++) {
if (V4L2_CTRL_ID2WHICH(c->controls[i].id) != c->which) {
- c->error_idx = i;
- return 0;
+ c->error_idx = ioctl == VIDIOC_TRY_EXT_CTRLS ? i :
+ c->count;
+ return false;
}
}
- return 1;
+ return true;
}
static int check_fmt(struct file *file, enum v4l2_buf_type type)
@@ -1274,7 +1292,6 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_YUV410: descr = "Planar YUV 4:1:0"; break;
case V4L2_PIX_FMT_YUV420: descr = "Planar YUV 4:2:0"; break;
case V4L2_PIX_FMT_HI240: descr = "8-bit Dithered RGB (BTTV)"; break;
- case V4L2_PIX_FMT_HM12: descr = "YUV 4:2:0 (16x16 Macroblocks)"; break;
case V4L2_PIX_FMT_M420: descr = "YUV 4:2:0 (M420)"; break;
case V4L2_PIX_FMT_NV12: descr = "Y/CbCr 4:2:0"; break;
case V4L2_PIX_FMT_NV21: descr = "Y/CrCb 4:2:0"; break;
@@ -1282,6 +1299,9 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_NV61: descr = "Y/CrCb 4:2:2"; break;
case V4L2_PIX_FMT_NV24: descr = "Y/CbCr 4:4:4"; break;
case V4L2_PIX_FMT_NV42: descr = "Y/CrCb 4:4:4"; break;
+ case V4L2_PIX_FMT_NV12_4L4: descr = "Y/CbCr 4:2:0 (4x4 Linear)"; break;
+ case V4L2_PIX_FMT_NV12_16L16: descr = "Y/CbCr 4:2:0 (16x16 Linear)"; break;
+ case V4L2_PIX_FMT_NV12_32L32: descr = "Y/CbCr 4:2:0 (32x32 Linear)"; break;
case V4L2_PIX_FMT_NV12M: descr = "Y/CbCr 4:2:0 (N-C)"; break;
case V4L2_PIX_FMT_NV21M: descr = "Y/CrCb 4:2:0 (N-C)"; break;
case V4L2_PIX_FMT_NV16M: descr = "Y/CbCr 4:2:2 (N-C)"; break;
@@ -1346,6 +1366,7 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_TM6000: descr = "A/V + VBI Mux Packet"; break;
case V4L2_PIX_FMT_CIT_YYVYUY: descr = "GSPCA CIT YYVYUY"; break;
case V4L2_PIX_FMT_KONICA420: descr = "GSPCA KONICA420"; break;
+ case V4L2_PIX_FMT_MM21: descr = "Mediatek 8-bit Block Format"; break;
case V4L2_PIX_FMT_HSV24: descr = "24-bit HSV 8-8-8"; break;
case V4L2_PIX_FMT_HSV32: descr = "32-bit XHSV 8-8-8-8"; break;
case V4L2_SDR_FMT_CU8: descr = "Complex U8"; break;
@@ -1415,7 +1436,6 @@ static void v4l_fill_fmtdesc(struct v4l2_fmtdesc *fmt)
case V4L2_PIX_FMT_SE401: descr = "GSPCA SE401"; break;
case V4L2_PIX_FMT_S5C_UYVY_JPG: descr = "S5C73MX interleaved UYVY/JPEG"; break;
case V4L2_PIX_FMT_MT21C: descr = "Mediatek Compressed Format"; break;
- case V4L2_PIX_FMT_SUNXI_TILED_NV12: descr = "Sunxi Tiled NV12 Format"; break;
default:
if (fmt->description[0])
return;
@@ -2004,7 +2024,7 @@ static int v4l_reqbufs(const struct v4l2_ioctl_ops *ops,
if (ret)
return ret;
- CLEAR_AFTER_FIELD(p, capabilities);
+ CLEAR_AFTER_FIELD(p, flags);
return ops->vidioc_reqbufs(file, fh, p);
}
@@ -2045,7 +2065,7 @@ static int v4l_create_bufs(const struct v4l2_ioctl_ops *ops,
if (ret)
return ret;
- CLEAR_AFTER_FIELD(create, capabilities);
+ CLEAR_AFTER_FIELD(create, flags);
v4l_sanitize_format(&create->format);
@@ -2187,7 +2207,7 @@ static int v4l_g_ctrl(const struct v4l2_ioctl_ops *ops,
ctrls.controls = &ctrl;
ctrl.id = p->id;
ctrl.value = p->value;
- if (check_ext_ctrls(&ctrls, 1)) {
+ if (check_ext_ctrls(&ctrls, VIDIOC_G_CTRL)) {
int ret = ops->vidioc_g_ext_ctrls(file, fh, &ctrls);
if (ret == 0)
@@ -2206,6 +2226,7 @@ static int v4l_s_ctrl(const struct v4l2_ioctl_ops *ops,
test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags) ? fh : NULL;
struct v4l2_ext_controls ctrls;
struct v4l2_ext_control ctrl;
+ int ret;
if (vfh && vfh->ctrl_handler)
return v4l2_s_ctrl(vfh, vfh->ctrl_handler, p);
@@ -2221,9 +2242,11 @@ static int v4l_s_ctrl(const struct v4l2_ioctl_ops *ops,
ctrls.controls = &ctrl;
ctrl.id = p->id;
ctrl.value = p->value;
- if (check_ext_ctrls(&ctrls, 1))
- return ops->vidioc_s_ext_ctrls(file, fh, &ctrls);
- return -EINVAL;
+ if (!check_ext_ctrls(&ctrls, VIDIOC_S_CTRL))
+ return -EINVAL;
+ ret = ops->vidioc_s_ext_ctrls(file, fh, &ctrls);
+ p->value = ctrl.value;
+ return ret;
}
static int v4l_g_ext_ctrls(const struct v4l2_ioctl_ops *ops,
@@ -2243,8 +2266,8 @@ static int v4l_g_ext_ctrls(const struct v4l2_ioctl_ops *ops,
vfd, vfd->v4l2_dev->mdev, p);
if (ops->vidioc_g_ext_ctrls == NULL)
return -ENOTTY;
- return check_ext_ctrls(p, 0) ? ops->vidioc_g_ext_ctrls(file, fh, p) :
- -EINVAL;
+ return check_ext_ctrls(p, VIDIOC_G_EXT_CTRLS) ?
+ ops->vidioc_g_ext_ctrls(file, fh, p) : -EINVAL;
}
static int v4l_s_ext_ctrls(const struct v4l2_ioctl_ops *ops,
@@ -2264,8 +2287,8 @@ static int v4l_s_ext_ctrls(const struct v4l2_ioctl_ops *ops,
vfd, vfd->v4l2_dev->mdev, p);
if (ops->vidioc_s_ext_ctrls == NULL)
return -ENOTTY;
- return check_ext_ctrls(p, 0) ? ops->vidioc_s_ext_ctrls(file, fh, p) :
- -EINVAL;
+ return check_ext_ctrls(p, VIDIOC_S_EXT_CTRLS) ?
+ ops->vidioc_s_ext_ctrls(file, fh, p) : -EINVAL;
}
static int v4l_try_ext_ctrls(const struct v4l2_ioctl_ops *ops,
@@ -2285,8 +2308,8 @@ static int v4l_try_ext_ctrls(const struct v4l2_ioctl_ops *ops,
vfd, vfd->v4l2_dev->mdev, p);
if (ops->vidioc_try_ext_ctrls == NULL)
return -ENOTTY;
- return check_ext_ctrls(p, 0) ? ops->vidioc_try_ext_ctrls(file, fh, p) :
- -EINVAL;
+ return check_ext_ctrls(p, VIDIOC_TRY_EXT_CTRLS) ?
+ ops->vidioc_try_ext_ctrls(file, fh, p) : -EINVAL;
}
/*
diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig
index 72c0df129d5c..30bff6cb1b8d 100644
--- a/drivers/memory/Kconfig
+++ b/drivers/memory/Kconfig
@@ -55,8 +55,8 @@ config ATMEL_EBI
SRAMs, ATA devices, etc.
config BRCMSTB_DPFE
- bool "Broadcom STB DPFE driver" if COMPILE_TEST
- default y if ARCH_BRCMSTB
+ tristate "Broadcom STB DPFE driver"
+ default ARCH_BRCMSTB
depends on ARCH_BRCMSTB || COMPILE_TEST
help
This driver provides access to the DPFE interface of Broadcom
@@ -210,6 +210,7 @@ config RENESAS_RPCIF
tristate "Renesas RPC-IF driver"
depends on ARCH_RENESAS || COMPILE_TEST
select REGMAP_MMIO
+ select RESET_CONTROLLER
help
This supports Renesas R-Car Gen3 or RZ/G2 RPC-IF which provides
either SPI host or HyperFlash. You'll have to select individual
diff --git a/drivers/memory/fsl_ifc.c b/drivers/memory/fsl_ifc.c
index d062c2f8250f..75a8c38df939 100644
--- a/drivers/memory/fsl_ifc.c
+++ b/drivers/memory/fsl_ifc.c
@@ -263,7 +263,7 @@ static int fsl_ifc_ctrl_probe(struct platform_device *dev)
ret = fsl_ifc_ctrl_init(fsl_ifc_ctrl_dev);
if (ret < 0)
- goto err;
+ goto err_unmap_nandirq;
init_waitqueue_head(&fsl_ifc_ctrl_dev->nand_wait);
@@ -272,7 +272,7 @@ static int fsl_ifc_ctrl_probe(struct platform_device *dev)
if (ret != 0) {
dev_err(&dev->dev, "failed to install irq (%d)\n",
fsl_ifc_ctrl_dev->irq);
- goto err_irq;
+ goto err_unmap_nandirq;
}
if (fsl_ifc_ctrl_dev->nand_irq) {
@@ -281,17 +281,16 @@ static int fsl_ifc_ctrl_probe(struct platform_device *dev)
if (ret != 0) {
dev_err(&dev->dev, "failed to install irq (%d)\n",
fsl_ifc_ctrl_dev->nand_irq);
- goto err_nandirq;
+ goto err_free_irq;
}
}
return 0;
-err_nandirq:
- free_irq(fsl_ifc_ctrl_dev->nand_irq, fsl_ifc_ctrl_dev);
- irq_dispose_mapping(fsl_ifc_ctrl_dev->nand_irq);
-err_irq:
+err_free_irq:
free_irq(fsl_ifc_ctrl_dev->irq, fsl_ifc_ctrl_dev);
+err_unmap_nandirq:
+ irq_dispose_mapping(fsl_ifc_ctrl_dev->nand_irq);
irq_dispose_mapping(fsl_ifc_ctrl_dev->irq);
err:
iounmap(fsl_ifc_ctrl_dev->gregs);
diff --git a/drivers/memory/jedec_ddr.h b/drivers/memory/jedec_ddr.h
index e59ccbd982d0..6cd508478b14 100644
--- a/drivers/memory/jedec_ddr.h
+++ b/drivers/memory/jedec_ddr.h
@@ -112,6 +112,26 @@
#define NUM_DDR_ADDR_TABLE_ENTRIES 11
#define NUM_DDR_TIMING_TABLE_ENTRIES 4
+#define LPDDR2_MANID_SAMSUNG 1
+#define LPDDR2_MANID_QIMONDA 2
+#define LPDDR2_MANID_ELPIDA 3
+#define LPDDR2_MANID_ETRON 4
+#define LPDDR2_MANID_NANYA 5
+#define LPDDR2_MANID_HYNIX 6
+#define LPDDR2_MANID_MOSEL 7
+#define LPDDR2_MANID_WINBOND 8
+#define LPDDR2_MANID_ESMT 9
+#define LPDDR2_MANID_SPANSION 11
+#define LPDDR2_MANID_SST 12
+#define LPDDR2_MANID_ZMOS 13
+#define LPDDR2_MANID_INTEL 14
+#define LPDDR2_MANID_NUMONYX 254
+#define LPDDR2_MANID_MICRON 255
+
+#define LPDDR2_TYPE_S4 0
+#define LPDDR2_TYPE_S2 1
+#define LPDDR2_TYPE_NVM 2
+
/* Structure for DDR addressing info from the JEDEC spec */
struct lpddr2_addressing {
u32 num_banks;
@@ -170,6 +190,33 @@ extern const struct lpddr2_timings
lpddr2_jedec_timings[NUM_DDR_TIMING_TABLE_ENTRIES];
extern const struct lpddr2_min_tck lpddr2_jedec_min_tck;
+/* Structure of MR8 */
+union lpddr2_basic_config4 {
+ u32 value;
+
+ struct {
+ unsigned int arch_type : 2;
+ unsigned int density : 4;
+ unsigned int io_width : 2;
+ } __packed;
+};
+
+/*
+ * Structure for information about LPDDR2 chip. All parameters are
+ * matching raw values of standard mode register bitfields or set to
+ * -ENOENT if info unavailable.
+ */
+struct lpddr2_info {
+ int arch_type;
+ int density;
+ int io_width;
+ int manufacturer_id;
+ int revision_id1;
+ int revision_id2;
+};
+
+const char *lpddr2_jedec_manufacturer(unsigned int manufacturer_id);
+
/*
* Structure for timings for LPDDR3 based on LPDDR2 plus additional fields.
* All parameters are in pico seconds(ps) excluding max_freq, min_freq which
diff --git a/drivers/memory/jedec_ddr_data.c b/drivers/memory/jedec_ddr_data.c
index ed601d813175..2cca4fa188f9 100644
--- a/drivers/memory/jedec_ddr_data.c
+++ b/drivers/memory/jedec_ddr_data.c
@@ -131,3 +131,44 @@ const struct lpddr2_min_tck lpddr2_jedec_min_tck = {
.tFAW = 8
};
EXPORT_SYMBOL_GPL(lpddr2_jedec_min_tck);
+
+const char *lpddr2_jedec_manufacturer(unsigned int manufacturer_id)
+{
+ switch (manufacturer_id) {
+ case LPDDR2_MANID_SAMSUNG:
+ return "Samsung";
+ case LPDDR2_MANID_QIMONDA:
+ return "Qimonda";
+ case LPDDR2_MANID_ELPIDA:
+ return "Elpida";
+ case LPDDR2_MANID_ETRON:
+ return "Etron";
+ case LPDDR2_MANID_NANYA:
+ return "Nanya";
+ case LPDDR2_MANID_HYNIX:
+ return "Hynix";
+ case LPDDR2_MANID_MOSEL:
+ return "Mosel";
+ case LPDDR2_MANID_WINBOND:
+ return "Winbond";
+ case LPDDR2_MANID_ESMT:
+ return "ESMT";
+ case LPDDR2_MANID_SPANSION:
+ return "Spansion";
+ case LPDDR2_MANID_SST:
+ return "SST";
+ case LPDDR2_MANID_ZMOS:
+ return "ZMOS";
+ case LPDDR2_MANID_INTEL:
+ return "Intel";
+ case LPDDR2_MANID_NUMONYX:
+ return "Numonyx";
+ case LPDDR2_MANID_MICRON:
+ return "Micron";
+ default:
+ break;
+ }
+
+ return "invalid";
+}
+EXPORT_SYMBOL_GPL(lpddr2_jedec_manufacturer);
diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c
index c5fb51f73b34..b883dcc0bbfa 100644
--- a/drivers/memory/mtk-smi.c
+++ b/drivers/memory/mtk-smi.c
@@ -17,13 +17,33 @@
#include <dt-bindings/memory/mt2701-larb-port.h>
#include <dt-bindings/memory/mtk-memory-port.h>
-/* mt8173 */
-#define SMI_LARB_MMU_EN 0xf00
+/* SMI COMMON */
+#define SMI_L1LEN 0x100
-/* mt8167 */
-#define MT8167_SMI_LARB_MMU_EN 0xfc0
+#define SMI_BUS_SEL 0x220
+#define SMI_BUS_LARB_SHIFT(larbid) ((larbid) << 1)
+/* All are MMU0 defaultly. Only specialize mmu1 here. */
+#define F_MMU1_LARB(larbid) (0x1 << SMI_BUS_LARB_SHIFT(larbid))
+
+#define SMI_M4U_TH 0x234
+#define SMI_FIFO_TH1 0x238
+#define SMI_FIFO_TH2 0x23c
+#define SMI_DCM 0x300
+#define SMI_DUMMY 0x444
-/* mt2701 */
+/* SMI LARB */
+#define SMI_LARB_CMD_THRT_CON 0x24
+#define SMI_LARB_THRT_RD_NU_LMT_MSK GENMASK(7, 4)
+#define SMI_LARB_THRT_RD_NU_LMT (5 << 4)
+
+#define SMI_LARB_SW_FLAG 0x40
+#define SMI_LARB_SW_FLAG_1 0x1
+
+#define SMI_LARB_OSTDL_PORT 0x200
+#define SMI_LARB_OSTDL_PORTx(id) (SMI_LARB_OSTDL_PORT + (((id) & 0x1f) << 2))
+
+/* Below are about mmu enable registers, they are different in SoCs */
+/* gen1: mt2701 */
#define REG_SMI_SECUR_CON_BASE 0x5c0
/* every register control 8 port, register offset 0x4 */
@@ -41,99 +61,94 @@
/* mt2701 domain should be set to 3 */
#define SMI_SECUR_CON_VAL_DOMAIN(id) (0x3 << ((((id) & 0x7) << 2) + 1))
-/* mt2712 */
-#define SMI_LARB_NONSEC_CON(id) (0x380 + ((id) * 4))
-#define F_MMU_EN BIT(0)
-#define BANK_SEL(id) ({ \
+/* gen2: */
+/* mt8167 */
+#define MT8167_SMI_LARB_MMU_EN 0xfc0
+
+/* mt8173 */
+#define MT8173_SMI_LARB_MMU_EN 0xf00
+
+/* general */
+#define SMI_LARB_NONSEC_CON(id) (0x380 + ((id) * 4))
+#define F_MMU_EN BIT(0)
+#define BANK_SEL(id) ({ \
u32 _id = (id) & 0x3; \
(_id << 8 | _id << 10 | _id << 12 | _id << 14); \
})
-/* SMI COMMON */
-#define SMI_BUS_SEL 0x220
-#define SMI_BUS_LARB_SHIFT(larbid) ((larbid) << 1)
-/* All are MMU0 defaultly. Only specialize mmu1 here. */
-#define F_MMU1_LARB(larbid) (0x1 << SMI_BUS_LARB_SHIFT(larbid))
+#define SMI_COMMON_INIT_REGS_NR 6
+#define SMI_LARB_PORT_NR_MAX 32
+
+#define MTK_SMI_FLAG_THRT_UPDATE BIT(0)
+#define MTK_SMI_FLAG_SW_FLAG BIT(1)
+#define MTK_SMI_CAPS(flags, _x) (!!((flags) & (_x)))
+
+struct mtk_smi_reg_pair {
+ unsigned int offset;
+ u32 value;
+};
-enum mtk_smi_gen {
+enum mtk_smi_type {
MTK_SMI_GEN1,
- MTK_SMI_GEN2
+ MTK_SMI_GEN2, /* gen2 smi common */
+ MTK_SMI_GEN2_SUB_COMM, /* gen2 smi sub common */
};
+#define MTK_SMI_CLK_NR_MAX 4
+
+/* larbs: Require apb/smi clocks while gals is optional. */
+static const char * const mtk_smi_larb_clks[] = {"apb", "smi", "gals"};
+#define MTK_SMI_LARB_REQ_CLK_NR 2
+#define MTK_SMI_LARB_OPT_CLK_NR 1
+
+/*
+ * common: Require these four clocks in has_gals case. Otherwise, only apb/smi are required.
+ * sub common: Require apb/smi/gals0 clocks in has_gals case. Otherwise, only apb/smi are required.
+ */
+static const char * const mtk_smi_common_clks[] = {"apb", "smi", "gals0", "gals1"};
+#define MTK_SMI_COM_REQ_CLK_NR 2
+#define MTK_SMI_COM_GALS_REQ_CLK_NR MTK_SMI_CLK_NR_MAX
+#define MTK_SMI_SUB_COM_GALS_REQ_CLK_NR 3
+
struct mtk_smi_common_plat {
- enum mtk_smi_gen gen;
- bool has_gals;
- u32 bus_sel; /* Balance some larbs to enter mmu0 or mmu1 */
+ enum mtk_smi_type type;
+ bool has_gals;
+ u32 bus_sel; /* Balance some larbs to enter mmu0 or mmu1 */
+
+ const struct mtk_smi_reg_pair *init;
};
struct mtk_smi_larb_gen {
int port_in_larb[MTK_LARB_NR_MAX + 1];
void (*config_port)(struct device *dev);
unsigned int larb_direct_to_common_mask;
- bool has_gals;
+ unsigned int flags_general;
+ const u8 (*ostd)[SMI_LARB_PORT_NR_MAX];
};
struct mtk_smi {
struct device *dev;
- struct clk *clk_apb, *clk_smi;
- struct clk *clk_gals0, *clk_gals1;
+ unsigned int clk_num;
+ struct clk_bulk_data clks[MTK_SMI_CLK_NR_MAX];
struct clk *clk_async; /*only needed by mt2701*/
union {
void __iomem *smi_ao_base; /* only for gen1 */
void __iomem *base; /* only for gen2 */
};
+ struct device *smi_common_dev; /* for sub common */
const struct mtk_smi_common_plat *plat;
};
struct mtk_smi_larb { /* larb: local arbiter */
struct mtk_smi smi;
void __iomem *base;
- struct device *smi_common_dev;
+ struct device *smi_common_dev; /* common or sub-common dev */
const struct mtk_smi_larb_gen *larb_gen;
int larbid;
u32 *mmu;
unsigned char *bank;
};
-static int mtk_smi_clk_enable(const struct mtk_smi *smi)
-{
- int ret;
-
- ret = clk_prepare_enable(smi->clk_apb);
- if (ret)
- return ret;
-
- ret = clk_prepare_enable(smi->clk_smi);
- if (ret)
- goto err_disable_apb;
-
- ret = clk_prepare_enable(smi->clk_gals0);
- if (ret)
- goto err_disable_smi;
-
- ret = clk_prepare_enable(smi->clk_gals1);
- if (ret)
- goto err_disable_gals0;
-
- return 0;
-
-err_disable_gals0:
- clk_disable_unprepare(smi->clk_gals0);
-err_disable_smi:
- clk_disable_unprepare(smi->clk_smi);
-err_disable_apb:
- clk_disable_unprepare(smi->clk_apb);
- return ret;
-}
-
-static void mtk_smi_clk_disable(const struct mtk_smi *smi)
-{
- clk_disable_unprepare(smi->clk_gals1);
- clk_disable_unprepare(smi->clk_gals0);
- clk_disable_unprepare(smi->clk_smi);
- clk_disable_unprepare(smi->clk_apb);
-}
-
int mtk_smi_larb_get(struct device *larbdev)
{
int ret = pm_runtime_resume_and_get(larbdev);
@@ -166,36 +181,16 @@ mtk_smi_larb_bind(struct device *dev, struct device *master, void *data)
return -ENODEV;
}
-static void mtk_smi_larb_config_port_gen2_general(struct device *dev)
-{
- struct mtk_smi_larb *larb = dev_get_drvdata(dev);
- u32 reg;
- int i;
-
- if (BIT(larb->larbid) & larb->larb_gen->larb_direct_to_common_mask)
- return;
-
- for_each_set_bit(i, (unsigned long *)larb->mmu, 32) {
- reg = readl_relaxed(larb->base + SMI_LARB_NONSEC_CON(i));
- reg |= F_MMU_EN;
- reg |= BANK_SEL(larb->bank[i]);
- writel(reg, larb->base + SMI_LARB_NONSEC_CON(i));
- }
-}
-
-static void mtk_smi_larb_config_port_mt8173(struct device *dev)
+static void
+mtk_smi_larb_unbind(struct device *dev, struct device *master, void *data)
{
- struct mtk_smi_larb *larb = dev_get_drvdata(dev);
-
- writel(*larb->mmu, larb->base + SMI_LARB_MMU_EN);
+ /* Do nothing as the iommu is always enabled. */
}
-static void mtk_smi_larb_config_port_mt8167(struct device *dev)
-{
- struct mtk_smi_larb *larb = dev_get_drvdata(dev);
-
- writel(*larb->mmu, larb->base + MT8167_SMI_LARB_MMU_EN);
-}
+static const struct component_ops mtk_smi_larb_component_ops = {
+ .bind = mtk_smi_larb_bind,
+ .unbind = mtk_smi_larb_unbind,
+};
static void mtk_smi_larb_config_port_gen1(struct device *dev)
{
@@ -228,25 +223,94 @@ static void mtk_smi_larb_config_port_gen1(struct device *dev)
}
}
-static void
-mtk_smi_larb_unbind(struct device *dev, struct device *master, void *data)
+static void mtk_smi_larb_config_port_mt8167(struct device *dev)
{
- /* Do nothing as the iommu is always enabled. */
+ struct mtk_smi_larb *larb = dev_get_drvdata(dev);
+
+ writel(*larb->mmu, larb->base + MT8167_SMI_LARB_MMU_EN);
}
-static const struct component_ops mtk_smi_larb_component_ops = {
- .bind = mtk_smi_larb_bind,
- .unbind = mtk_smi_larb_unbind,
-};
+static void mtk_smi_larb_config_port_mt8173(struct device *dev)
+{
+ struct mtk_smi_larb *larb = dev_get_drvdata(dev);
-static const struct mtk_smi_larb_gen mtk_smi_larb_mt8173 = {
- /* mt8173 do not need the port in larb */
- .config_port = mtk_smi_larb_config_port_mt8173,
-};
+ writel(*larb->mmu, larb->base + MT8173_SMI_LARB_MMU_EN);
+}
-static const struct mtk_smi_larb_gen mtk_smi_larb_mt8167 = {
- /* mt8167 do not need the port in larb */
- .config_port = mtk_smi_larb_config_port_mt8167,
+static void mtk_smi_larb_config_port_gen2_general(struct device *dev)
+{
+ struct mtk_smi_larb *larb = dev_get_drvdata(dev);
+ u32 reg, flags_general = larb->larb_gen->flags_general;
+ const u8 *larbostd = larb->larb_gen->ostd[larb->larbid];
+ int i;
+
+ if (BIT(larb->larbid) & larb->larb_gen->larb_direct_to_common_mask)
+ return;
+
+ if (MTK_SMI_CAPS(flags_general, MTK_SMI_FLAG_THRT_UPDATE)) {
+ reg = readl_relaxed(larb->base + SMI_LARB_CMD_THRT_CON);
+ reg &= ~SMI_LARB_THRT_RD_NU_LMT_MSK;
+ reg |= SMI_LARB_THRT_RD_NU_LMT;
+ writel_relaxed(reg, larb->base + SMI_LARB_CMD_THRT_CON);
+ }
+
+ if (MTK_SMI_CAPS(flags_general, MTK_SMI_FLAG_SW_FLAG))
+ writel_relaxed(SMI_LARB_SW_FLAG_1, larb->base + SMI_LARB_SW_FLAG);
+
+ for (i = 0; i < SMI_LARB_PORT_NR_MAX && larbostd && !!larbostd[i]; i++)
+ writel_relaxed(larbostd[i], larb->base + SMI_LARB_OSTDL_PORTx(i));
+
+ for_each_set_bit(i, (unsigned long *)larb->mmu, 32) {
+ reg = readl_relaxed(larb->base + SMI_LARB_NONSEC_CON(i));
+ reg |= F_MMU_EN;
+ reg |= BANK_SEL(larb->bank[i]);
+ writel(reg, larb->base + SMI_LARB_NONSEC_CON(i));
+ }
+}
+
+static const u8 mtk_smi_larb_mt8195_ostd[][SMI_LARB_PORT_NR_MAX] = {
+ [0] = {0x0a, 0xc, 0x22, 0x22, 0x01, 0x0a,}, /* larb0 */
+ [1] = {0x0a, 0xc, 0x22, 0x22, 0x01, 0x0a,}, /* larb1 */
+ [2] = {0x12, 0x12, 0x12, 0x12, 0x0a,}, /* ... */
+ [3] = {0x12, 0x12, 0x12, 0x12, 0x28, 0x28, 0x0a,},
+ [4] = {0x06, 0x01, 0x17, 0x06, 0x0a,},
+ [5] = {0x06, 0x01, 0x17, 0x06, 0x06, 0x01, 0x06, 0x0a,},
+ [6] = {0x06, 0x01, 0x06, 0x0a,},
+ [7] = {0x0c, 0x0c, 0x12,},
+ [8] = {0x0c, 0x0c, 0x12,},
+ [9] = {0x0a, 0x08, 0x04, 0x06, 0x01, 0x01, 0x10, 0x18, 0x11, 0x0a,
+ 0x08, 0x04, 0x11, 0x06, 0x02, 0x06, 0x01, 0x11, 0x11, 0x06,},
+ [10] = {0x18, 0x08, 0x01, 0x01, 0x20, 0x12, 0x18, 0x06, 0x05, 0x10,
+ 0x08, 0x08, 0x10, 0x08, 0x08, 0x18, 0x0c, 0x09, 0x0b, 0x0d,
+ 0x0d, 0x06, 0x10, 0x10,},
+ [11] = {0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x01, 0x01, 0x01, 0x01,},
+ [12] = {0x09, 0x09, 0x05, 0x05, 0x0c, 0x18, 0x02, 0x02, 0x04, 0x02,},
+ [13] = {0x02, 0x02, 0x12, 0x12, 0x02, 0x02, 0x02, 0x02, 0x08, 0x01,},
+ [14] = {0x12, 0x12, 0x02, 0x02, 0x02, 0x02, 0x16, 0x01, 0x16, 0x01,
+ 0x01, 0x02, 0x02, 0x08, 0x02,},
+ [15] = {},
+ [16] = {0x28, 0x02, 0x02, 0x12, 0x02, 0x12, 0x10, 0x02, 0x02, 0x0a,
+ 0x12, 0x02, 0x0a, 0x16, 0x02, 0x04,},
+ [17] = {0x1a, 0x0e, 0x0a, 0x0a, 0x0c, 0x0e, 0x10,},
+ [18] = {0x12, 0x06, 0x12, 0x06,},
+ [19] = {0x01, 0x04, 0x01, 0x01, 0x01, 0x01, 0x01, 0x04, 0x04, 0x01,
+ 0x01, 0x01, 0x04, 0x0a, 0x06, 0x01, 0x01, 0x01, 0x0a, 0x06,
+ 0x01, 0x01, 0x05, 0x03, 0x03, 0x04, 0x01,},
+ [20] = {0x01, 0x04, 0x01, 0x01, 0x01, 0x01, 0x01, 0x04, 0x04, 0x01,
+ 0x01, 0x01, 0x04, 0x0a, 0x06, 0x01, 0x01, 0x01, 0x0a, 0x06,
+ 0x01, 0x01, 0x05, 0x03, 0x03, 0x04, 0x01,},
+ [21] = {0x28, 0x19, 0x0c, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x04,},
+ [22] = {0x28, 0x19, 0x0c, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x04,},
+ [23] = {0x18, 0x01,},
+ [24] = {0x01, 0x01, 0x04, 0x01, 0x01, 0x01, 0x01, 0x01, 0x04, 0x01,
+ 0x01, 0x01,},
+ [25] = {0x02, 0x02, 0x02, 0x28, 0x16, 0x02, 0x02, 0x02, 0x12, 0x16,
+ 0x02, 0x01,},
+ [26] = {0x02, 0x02, 0x02, 0x28, 0x16, 0x02, 0x02, 0x02, 0x12, 0x16,
+ 0x02, 0x01,},
+ [27] = {0x02, 0x02, 0x02, 0x28, 0x16, 0x02, 0x02, 0x02, 0x12, 0x16,
+ 0x02, 0x01,},
+ [28] = {0x1a, 0x0e, 0x0a, 0x0a, 0x0c, 0x0e, 0x10,},
};
static const struct mtk_smi_larb_gen mtk_smi_larb_mt2701 = {
@@ -269,8 +333,17 @@ static const struct mtk_smi_larb_gen mtk_smi_larb_mt6779 = {
/* DUMMY | IPU0 | IPU1 | CCU | MDLA */
};
+static const struct mtk_smi_larb_gen mtk_smi_larb_mt8167 = {
+ /* mt8167 do not need the port in larb */
+ .config_port = mtk_smi_larb_config_port_mt8167,
+};
+
+static const struct mtk_smi_larb_gen mtk_smi_larb_mt8173 = {
+ /* mt8173 do not need the port in larb */
+ .config_port = mtk_smi_larb_config_port_mt8173,
+};
+
static const struct mtk_smi_larb_gen mtk_smi_larb_mt8183 = {
- .has_gals = true,
.config_port = mtk_smi_larb_config_port_gen2_general,
.larb_direct_to_common_mask = BIT(2) | BIT(3) | BIT(7),
/* IPU0 | IPU1 | CCU */
@@ -280,99 +353,114 @@ static const struct mtk_smi_larb_gen mtk_smi_larb_mt8192 = {
.config_port = mtk_smi_larb_config_port_gen2_general,
};
+static const struct mtk_smi_larb_gen mtk_smi_larb_mt8195 = {
+ .config_port = mtk_smi_larb_config_port_gen2_general,
+ .flags_general = MTK_SMI_FLAG_THRT_UPDATE | MTK_SMI_FLAG_SW_FLAG,
+ .ostd = mtk_smi_larb_mt8195_ostd,
+};
+
static const struct of_device_id mtk_smi_larb_of_ids[] = {
- {
- .compatible = "mediatek,mt8167-smi-larb",
- .data = &mtk_smi_larb_mt8167
- },
- {
- .compatible = "mediatek,mt8173-smi-larb",
- .data = &mtk_smi_larb_mt8173
- },
- {
- .compatible = "mediatek,mt2701-smi-larb",
- .data = &mtk_smi_larb_mt2701
- },
- {
- .compatible = "mediatek,mt2712-smi-larb",
- .data = &mtk_smi_larb_mt2712
- },
- {
- .compatible = "mediatek,mt6779-smi-larb",
- .data = &mtk_smi_larb_mt6779
- },
- {
- .compatible = "mediatek,mt8183-smi-larb",
- .data = &mtk_smi_larb_mt8183
- },
- {
- .compatible = "mediatek,mt8192-smi-larb",
- .data = &mtk_smi_larb_mt8192
- },
+ {.compatible = "mediatek,mt2701-smi-larb", .data = &mtk_smi_larb_mt2701},
+ {.compatible = "mediatek,mt2712-smi-larb", .data = &mtk_smi_larb_mt2712},
+ {.compatible = "mediatek,mt6779-smi-larb", .data = &mtk_smi_larb_mt6779},
+ {.compatible = "mediatek,mt8167-smi-larb", .data = &mtk_smi_larb_mt8167},
+ {.compatible = "mediatek,mt8173-smi-larb", .data = &mtk_smi_larb_mt8173},
+ {.compatible = "mediatek,mt8183-smi-larb", .data = &mtk_smi_larb_mt8183},
+ {.compatible = "mediatek,mt8192-smi-larb", .data = &mtk_smi_larb_mt8192},
+ {.compatible = "mediatek,mt8195-smi-larb", .data = &mtk_smi_larb_mt8195},
{}
};
-static int mtk_smi_larb_probe(struct platform_device *pdev)
+static int mtk_smi_device_link_common(struct device *dev, struct device **com_dev)
{
- struct mtk_smi_larb *larb;
- struct resource *res;
- struct device *dev = &pdev->dev;
- struct device_node *smi_node;
- struct platform_device *smi_pdev;
+ struct platform_device *smi_com_pdev;
+ struct device_node *smi_com_node;
+ struct device *smi_com_dev;
struct device_link *link;
- larb = devm_kzalloc(dev, sizeof(*larb), GFP_KERNEL);
- if (!larb)
- return -ENOMEM;
-
- larb->larb_gen = of_device_get_match_data(dev);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- larb->base = devm_ioremap_resource(dev, res);
- if (IS_ERR(larb->base))
- return PTR_ERR(larb->base);
-
- larb->smi.clk_apb = devm_clk_get(dev, "apb");
- if (IS_ERR(larb->smi.clk_apb))
- return PTR_ERR(larb->smi.clk_apb);
-
- larb->smi.clk_smi = devm_clk_get(dev, "smi");
- if (IS_ERR(larb->smi.clk_smi))
- return PTR_ERR(larb->smi.clk_smi);
-
- if (larb->larb_gen->has_gals) {
- /* The larbs may still haven't gals even if the SoC support.*/
- larb->smi.clk_gals0 = devm_clk_get(dev, "gals");
- if (PTR_ERR(larb->smi.clk_gals0) == -ENOENT)
- larb->smi.clk_gals0 = NULL;
- else if (IS_ERR(larb->smi.clk_gals0))
- return PTR_ERR(larb->smi.clk_gals0);
- }
- larb->smi.dev = dev;
-
- smi_node = of_parse_phandle(dev->of_node, "mediatek,smi", 0);
- if (!smi_node)
+ smi_com_node = of_parse_phandle(dev->of_node, "mediatek,smi", 0);
+ if (!smi_com_node)
return -EINVAL;
- smi_pdev = of_find_device_by_node(smi_node);
- of_node_put(smi_node);
- if (smi_pdev) {
- if (!platform_get_drvdata(smi_pdev))
+ smi_com_pdev = of_find_device_by_node(smi_com_node);
+ of_node_put(smi_com_node);
+ if (smi_com_pdev) {
+ /* smi common is the supplier, Make sure it is ready before */
+ if (!platform_get_drvdata(smi_com_pdev))
return -EPROBE_DEFER;
- larb->smi_common_dev = &smi_pdev->dev;
- link = device_link_add(dev, larb->smi_common_dev,
+ smi_com_dev = &smi_com_pdev->dev;
+ link = device_link_add(dev, smi_com_dev,
DL_FLAG_PM_RUNTIME | DL_FLAG_STATELESS);
if (!link) {
dev_err(dev, "Unable to link smi-common dev\n");
return -ENODEV;
}
+ *com_dev = smi_com_dev;
} else {
dev_err(dev, "Failed to get the smi_common device\n");
return -EINVAL;
}
+ return 0;
+}
+
+static int mtk_smi_dts_clk_init(struct device *dev, struct mtk_smi *smi,
+ const char * const clks[],
+ unsigned int clk_nr_required,
+ unsigned int clk_nr_optional)
+{
+ int i, ret;
+
+ for (i = 0; i < clk_nr_required; i++)
+ smi->clks[i].id = clks[i];
+ ret = devm_clk_bulk_get(dev, clk_nr_required, smi->clks);
+ if (ret)
+ return ret;
+
+ for (i = clk_nr_required; i < clk_nr_required + clk_nr_optional; i++)
+ smi->clks[i].id = clks[i];
+ ret = devm_clk_bulk_get_optional(dev, clk_nr_optional,
+ smi->clks + clk_nr_required);
+ smi->clk_num = clk_nr_required + clk_nr_optional;
+ return ret;
+}
+
+static int mtk_smi_larb_probe(struct platform_device *pdev)
+{
+ struct mtk_smi_larb *larb;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ larb = devm_kzalloc(dev, sizeof(*larb), GFP_KERNEL);
+ if (!larb)
+ return -ENOMEM;
+
+ larb->larb_gen = of_device_get_match_data(dev);
+ larb->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(larb->base))
+ return PTR_ERR(larb->base);
+
+ ret = mtk_smi_dts_clk_init(dev, &larb->smi, mtk_smi_larb_clks,
+ MTK_SMI_LARB_REQ_CLK_NR, MTK_SMI_LARB_OPT_CLK_NR);
+ if (ret)
+ return ret;
+
+ larb->smi.dev = dev;
+
+ ret = mtk_smi_device_link_common(dev, &larb->smi_common_dev);
+ if (ret < 0)
+ return ret;
pm_runtime_enable(dev);
platform_set_drvdata(pdev, larb);
- return component_add(dev, &mtk_smi_larb_component_ops);
+ ret = component_add(dev, &mtk_smi_larb_component_ops);
+ if (ret)
+ goto err_pm_disable;
+ return 0;
+
+err_pm_disable:
+ pm_runtime_disable(dev);
+ device_link_remove(dev, larb->smi_common_dev);
+ return ret;
}
static int mtk_smi_larb_remove(struct platform_device *pdev)
@@ -391,11 +479,9 @@ static int __maybe_unused mtk_smi_larb_resume(struct device *dev)
const struct mtk_smi_larb_gen *larb_gen = larb->larb_gen;
int ret;
- ret = mtk_smi_clk_enable(&larb->smi);
- if (ret < 0) {
- dev_err(dev, "Failed to enable clock(%d).\n", ret);
+ ret = clk_bulk_prepare_enable(larb->smi.clk_num, larb->smi.clks);
+ if (ret < 0)
return ret;
- }
/* Configure the basic setting for this larb */
larb_gen->config_port(dev);
@@ -407,7 +493,7 @@ static int __maybe_unused mtk_smi_larb_suspend(struct device *dev)
{
struct mtk_smi_larb *larb = dev_get_drvdata(dev);
- mtk_smi_clk_disable(&larb->smi);
+ clk_bulk_disable_unprepare(larb->smi.clk_num, larb->smi.clks);
return 0;
}
@@ -427,64 +513,75 @@ static struct platform_driver mtk_smi_larb_driver = {
}
};
+static const struct mtk_smi_reg_pair mtk_smi_common_mt8195_init[SMI_COMMON_INIT_REGS_NR] = {
+ {SMI_L1LEN, 0xb},
+ {SMI_M4U_TH, 0xe100e10},
+ {SMI_FIFO_TH1, 0x506090a},
+ {SMI_FIFO_TH2, 0x506090a},
+ {SMI_DCM, 0x4f1},
+ {SMI_DUMMY, 0x1},
+};
+
static const struct mtk_smi_common_plat mtk_smi_common_gen1 = {
- .gen = MTK_SMI_GEN1,
+ .type = MTK_SMI_GEN1,
};
static const struct mtk_smi_common_plat mtk_smi_common_gen2 = {
- .gen = MTK_SMI_GEN2,
+ .type = MTK_SMI_GEN2,
};
static const struct mtk_smi_common_plat mtk_smi_common_mt6779 = {
- .gen = MTK_SMI_GEN2,
- .has_gals = true,
- .bus_sel = F_MMU1_LARB(1) | F_MMU1_LARB(2) | F_MMU1_LARB(4) |
- F_MMU1_LARB(5) | F_MMU1_LARB(6) | F_MMU1_LARB(7),
+ .type = MTK_SMI_GEN2,
+ .has_gals = true,
+ .bus_sel = F_MMU1_LARB(1) | F_MMU1_LARB(2) | F_MMU1_LARB(4) |
+ F_MMU1_LARB(5) | F_MMU1_LARB(6) | F_MMU1_LARB(7),
};
static const struct mtk_smi_common_plat mtk_smi_common_mt8183 = {
- .gen = MTK_SMI_GEN2,
+ .type = MTK_SMI_GEN2,
.has_gals = true,
.bus_sel = F_MMU1_LARB(1) | F_MMU1_LARB(2) | F_MMU1_LARB(5) |
F_MMU1_LARB(7),
};
static const struct mtk_smi_common_plat mtk_smi_common_mt8192 = {
- .gen = MTK_SMI_GEN2,
+ .type = MTK_SMI_GEN2,
.has_gals = true,
.bus_sel = F_MMU1_LARB(1) | F_MMU1_LARB(2) | F_MMU1_LARB(5) |
F_MMU1_LARB(6),
};
+static const struct mtk_smi_common_plat mtk_smi_common_mt8195_vdo = {
+ .type = MTK_SMI_GEN2,
+ .has_gals = true,
+ .bus_sel = F_MMU1_LARB(1) | F_MMU1_LARB(3) | F_MMU1_LARB(5) |
+ F_MMU1_LARB(7),
+ .init = mtk_smi_common_mt8195_init,
+};
+
+static const struct mtk_smi_common_plat mtk_smi_common_mt8195_vpp = {
+ .type = MTK_SMI_GEN2,
+ .has_gals = true,
+ .bus_sel = F_MMU1_LARB(1) | F_MMU1_LARB(2) | F_MMU1_LARB(7),
+ .init = mtk_smi_common_mt8195_init,
+};
+
+static const struct mtk_smi_common_plat mtk_smi_sub_common_mt8195 = {
+ .type = MTK_SMI_GEN2_SUB_COMM,
+ .has_gals = true,
+};
+
static const struct of_device_id mtk_smi_common_of_ids[] = {
- {
- .compatible = "mediatek,mt8173-smi-common",
- .data = &mtk_smi_common_gen2,
- },
- {
- .compatible = "mediatek,mt8167-smi-common",
- .data = &mtk_smi_common_gen2,
- },
- {
- .compatible = "mediatek,mt2701-smi-common",
- .data = &mtk_smi_common_gen1,
- },
- {
- .compatible = "mediatek,mt2712-smi-common",
- .data = &mtk_smi_common_gen2,
- },
- {
- .compatible = "mediatek,mt6779-smi-common",
- .data = &mtk_smi_common_mt6779,
- },
- {
- .compatible = "mediatek,mt8183-smi-common",
- .data = &mtk_smi_common_mt8183,
- },
- {
- .compatible = "mediatek,mt8192-smi-common",
- .data = &mtk_smi_common_mt8192,
- },
+ {.compatible = "mediatek,mt2701-smi-common", .data = &mtk_smi_common_gen1},
+ {.compatible = "mediatek,mt2712-smi-common", .data = &mtk_smi_common_gen2},
+ {.compatible = "mediatek,mt6779-smi-common", .data = &mtk_smi_common_mt6779},
+ {.compatible = "mediatek,mt8167-smi-common", .data = &mtk_smi_common_gen2},
+ {.compatible = "mediatek,mt8173-smi-common", .data = &mtk_smi_common_gen2},
+ {.compatible = "mediatek,mt8183-smi-common", .data = &mtk_smi_common_mt8183},
+ {.compatible = "mediatek,mt8192-smi-common", .data = &mtk_smi_common_mt8192},
+ {.compatible = "mediatek,mt8195-smi-common-vdo", .data = &mtk_smi_common_mt8195_vdo},
+ {.compatible = "mediatek,mt8195-smi-common-vpp", .data = &mtk_smi_common_mt8195_vpp},
+ {.compatible = "mediatek,mt8195-smi-sub-common", .data = &mtk_smi_sub_common_mt8195},
{}
};
@@ -492,8 +589,7 @@ static int mtk_smi_common_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct mtk_smi *common;
- struct resource *res;
- int ret;
+ int ret, clk_required = MTK_SMI_COM_REQ_CLK_NR;
common = devm_kzalloc(dev, sizeof(*common), GFP_KERNEL);
if (!common)
@@ -501,23 +597,15 @@ static int mtk_smi_common_probe(struct platform_device *pdev)
common->dev = dev;
common->plat = of_device_get_match_data(dev);
- common->clk_apb = devm_clk_get(dev, "apb");
- if (IS_ERR(common->clk_apb))
- return PTR_ERR(common->clk_apb);
-
- common->clk_smi = devm_clk_get(dev, "smi");
- if (IS_ERR(common->clk_smi))
- return PTR_ERR(common->clk_smi);
-
if (common->plat->has_gals) {
- common->clk_gals0 = devm_clk_get(dev, "gals0");
- if (IS_ERR(common->clk_gals0))
- return PTR_ERR(common->clk_gals0);
-
- common->clk_gals1 = devm_clk_get(dev, "gals1");
- if (IS_ERR(common->clk_gals1))
- return PTR_ERR(common->clk_gals1);
+ if (common->plat->type == MTK_SMI_GEN2)
+ clk_required = MTK_SMI_COM_GALS_REQ_CLK_NR;
+ else if (common->plat->type == MTK_SMI_GEN2_SUB_COMM)
+ clk_required = MTK_SMI_SUB_COM_GALS_REQ_CLK_NR;
}
+ ret = mtk_smi_dts_clk_init(dev, common, mtk_smi_common_clks, clk_required, 0);
+ if (ret)
+ return ret;
/*
* for mtk smi gen 1, we need to get the ao(always on) base to config
@@ -525,9 +613,8 @@ static int mtk_smi_common_probe(struct platform_device *pdev)
* clock into emi clock domain, but for mtk smi gen2, there's no smi ao
* base.
*/
- if (common->plat->gen == MTK_SMI_GEN1) {
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- common->smi_ao_base = devm_ioremap_resource(dev, res);
+ if (common->plat->type == MTK_SMI_GEN1) {
+ common->smi_ao_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(common->smi_ao_base))
return PTR_ERR(common->smi_ao_base);
@@ -539,11 +626,18 @@ static int mtk_smi_common_probe(struct platform_device *pdev)
if (ret)
return ret;
} else {
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- common->base = devm_ioremap_resource(dev, res);
+ common->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(common->base))
return PTR_ERR(common->base);
}
+
+ /* link its smi-common if this is smi-sub-common */
+ if (common->plat->type == MTK_SMI_GEN2_SUB_COMM) {
+ ret = mtk_smi_device_link_common(dev, &common->smi_common_dev);
+ if (ret < 0)
+ return ret;
+ }
+
pm_runtime_enable(dev);
platform_set_drvdata(pdev, common);
return 0;
@@ -551,6 +645,10 @@ static int mtk_smi_common_probe(struct platform_device *pdev)
static int mtk_smi_common_remove(struct platform_device *pdev)
{
+ struct mtk_smi *common = dev_get_drvdata(&pdev->dev);
+
+ if (common->plat->type == MTK_SMI_GEN2_SUB_COMM)
+ device_link_remove(&pdev->dev, common->smi_common_dev);
pm_runtime_disable(&pdev->dev);
return 0;
}
@@ -558,17 +656,21 @@ static int mtk_smi_common_remove(struct platform_device *pdev)
static int __maybe_unused mtk_smi_common_resume(struct device *dev)
{
struct mtk_smi *common = dev_get_drvdata(dev);
- u32 bus_sel = common->plat->bus_sel;
- int ret;
+ const struct mtk_smi_reg_pair *init = common->plat->init;
+ u32 bus_sel = common->plat->bus_sel; /* default is 0 */
+ int ret, i;
- ret = mtk_smi_clk_enable(common);
- if (ret) {
- dev_err(common->dev, "Failed to enable clock(%d).\n", ret);
+ ret = clk_bulk_prepare_enable(common->clk_num, common->clks);
+ if (ret)
return ret;
- }
- if (common->plat->gen == MTK_SMI_GEN2 && bus_sel)
- writel(bus_sel, common->base + SMI_BUS_SEL);
+ if (common->plat->type != MTK_SMI_GEN2)
+ return 0;
+
+ for (i = 0; i < SMI_COMMON_INIT_REGS_NR && init && init[i].offset; i++)
+ writel_relaxed(init[i].value, common->base + init[i].offset);
+
+ writel(bus_sel, common->base + SMI_BUS_SEL);
return 0;
}
@@ -576,7 +678,7 @@ static int __maybe_unused mtk_smi_common_suspend(struct device *dev)
{
struct mtk_smi *common = dev_get_drvdata(dev);
- mtk_smi_clk_disable(common);
+ clk_bulk_disable_unprepare(common->clk_num, common->clks);
return 0;
}
diff --git a/drivers/memory/of_memory.c b/drivers/memory/of_memory.c
index d9f5437d3bce..b94408954d85 100644
--- a/drivers/memory/of_memory.c
+++ b/drivers/memory/of_memory.c
@@ -298,3 +298,90 @@ default_timings:
return NULL;
}
EXPORT_SYMBOL(of_lpddr3_get_ddr_timings);
+
+/**
+ * of_lpddr2_get_info() - extracts information about the lpddr2 chip.
+ * @np: Pointer to device tree node containing lpddr2 info
+ * @dev: Device requesting info
+ *
+ * Populates lpddr2_info structure by extracting data from device
+ * tree node. Returns pointer to populated structure. If error
+ * happened while populating, returns NULL. If property is missing
+ * in a device-tree, then the corresponding value is set to -ENOENT.
+ */
+const struct lpddr2_info
+*of_lpddr2_get_info(struct device_node *np, struct device *dev)
+{
+ struct lpddr2_info *ret_info, info = {};
+ struct property *prop;
+ const char *cp;
+ int err;
+
+ err = of_property_read_u32(np, "revision-id1", &info.revision_id1);
+ if (err)
+ info.revision_id1 = -ENOENT;
+
+ err = of_property_read_u32(np, "revision-id2", &info.revision_id2);
+ if (err)
+ info.revision_id2 = -ENOENT;
+
+ err = of_property_read_u32(np, "io-width", &info.io_width);
+ if (err)
+ return NULL;
+
+ info.io_width = 32 / info.io_width - 1;
+
+ err = of_property_read_u32(np, "density", &info.density);
+ if (err)
+ return NULL;
+
+ info.density = ffs(info.density) - 7;
+
+ if (of_device_is_compatible(np, "jedec,lpddr2-s4"))
+ info.arch_type = LPDDR2_TYPE_S4;
+ else if (of_device_is_compatible(np, "jedec,lpddr2-s2"))
+ info.arch_type = LPDDR2_TYPE_S2;
+ else if (of_device_is_compatible(np, "jedec,lpddr2-nvm"))
+ info.arch_type = LPDDR2_TYPE_NVM;
+ else
+ return NULL;
+
+ prop = of_find_property(np, "compatible", NULL);
+ for (cp = of_prop_next_string(prop, NULL); cp;
+ cp = of_prop_next_string(prop, cp)) {
+
+#define OF_LPDDR2_VENDOR_CMP(compat, ID) \
+ if (!of_compat_cmp(cp, compat ",", strlen(compat ","))) { \
+ info.manufacturer_id = LPDDR2_MANID_##ID; \
+ break; \
+ }
+
+ OF_LPDDR2_VENDOR_CMP("samsung", SAMSUNG)
+ OF_LPDDR2_VENDOR_CMP("qimonda", QIMONDA)
+ OF_LPDDR2_VENDOR_CMP("elpida", ELPIDA)
+ OF_LPDDR2_VENDOR_CMP("etron", ETRON)
+ OF_LPDDR2_VENDOR_CMP("nanya", NANYA)
+ OF_LPDDR2_VENDOR_CMP("hynix", HYNIX)
+ OF_LPDDR2_VENDOR_CMP("mosel", MOSEL)
+ OF_LPDDR2_VENDOR_CMP("winbond", WINBOND)
+ OF_LPDDR2_VENDOR_CMP("esmt", ESMT)
+ OF_LPDDR2_VENDOR_CMP("spansion", SPANSION)
+ OF_LPDDR2_VENDOR_CMP("sst", SST)
+ OF_LPDDR2_VENDOR_CMP("zmos", ZMOS)
+ OF_LPDDR2_VENDOR_CMP("intel", INTEL)
+ OF_LPDDR2_VENDOR_CMP("numonyx", NUMONYX)
+ OF_LPDDR2_VENDOR_CMP("micron", MICRON)
+
+#undef OF_LPDDR2_VENDOR_CMP
+ }
+
+ if (!info.manufacturer_id)
+ info.manufacturer_id = -ENOENT;
+
+ ret_info = devm_kzalloc(dev, sizeof(*ret_info), GFP_KERNEL);
+ if (ret_info)
+ *ret_info = info;
+
+ return ret_info;
+}
+EXPORT_SYMBOL(of_lpddr2_get_info);
diff --git a/drivers/memory/of_memory.h b/drivers/memory/of_memory.h
index 4a99b232ab0a..1c4e47fede8a 100644
--- a/drivers/memory/of_memory.h
+++ b/drivers/memory/of_memory.h
@@ -20,6 +20,9 @@ const struct lpddr3_min_tck *of_lpddr3_get_min_tck(struct device_node *np,
const struct lpddr3_timings *
of_lpddr3_get_ddr_timings(struct device_node *np_ddr,
struct device *dev, u32 device_type, u32 *nr_frequencies);
+
+const struct lpddr2_info *of_lpddr2_get_info(struct device_node *np,
+ struct device *dev);
#else
static inline const struct lpddr2_min_tck
*of_get_min_tck(struct device_node *np, struct device *dev)
@@ -46,6 +49,12 @@ static inline const struct lpddr3_timings
{
return NULL;
}
+
+static inline const struct lpddr2_info
+ *of_lpddr2_get_info(struct device_node *np, struct device *dev)
+{
+ return NULL;
+}
#endif /* CONFIG_OF && CONFIG_DDR */
#endif /* __LINUX_MEMORY_OF_REG_ */
diff --git a/drivers/memory/renesas-rpc-if.c b/drivers/memory/renesas-rpc-if.c
index 45eed659b0c6..7435baad0007 100644
--- a/drivers/memory/renesas-rpc-if.c
+++ b/drivers/memory/renesas-rpc-if.c
@@ -160,10 +160,61 @@ static const struct regmap_access_table rpcif_volatile_table = {
.n_yes_ranges = ARRAY_SIZE(rpcif_volatile_ranges),
};
+
+/*
+ * Custom accessor functions to ensure SMRDR0 and SMWDR0 are always accessed
+ * with proper width. Requires SMENR_SPIDE to be correctly set before!
+ */
+static int rpcif_reg_read(void *context, unsigned int reg, unsigned int *val)
+{
+ struct rpcif *rpc = context;
+
+ if (reg == RPCIF_SMRDR0 || reg == RPCIF_SMWDR0) {
+ u32 spide = readl(rpc->base + RPCIF_SMENR) & RPCIF_SMENR_SPIDE(0xF);
+
+ if (spide == 0x8) {
+ *val = readb(rpc->base + reg);
+ return 0;
+ } else if (spide == 0xC) {
+ *val = readw(rpc->base + reg);
+ return 0;
+ } else if (spide != 0xF) {
+ return -EILSEQ;
+ }
+ }
+
+ *val = readl(rpc->base + reg);
+ return 0;
+}
+
+static int rpcif_reg_write(void *context, unsigned int reg, unsigned int val)
+{
+ struct rpcif *rpc = context;
+
+ if (reg == RPCIF_SMRDR0 || reg == RPCIF_SMWDR0) {
+ u32 spide = readl(rpc->base + RPCIF_SMENR) & RPCIF_SMENR_SPIDE(0xF);
+
+ if (spide == 0x8) {
+ writeb(val, rpc->base + reg);
+ return 0;
+ } else if (spide == 0xC) {
+ writew(val, rpc->base + reg);
+ return 0;
+ } else if (spide != 0xF) {
+ return -EILSEQ;
+ }
+ }
+
+ writel(val, rpc->base + reg);
+ return 0;
+}
+
static const struct regmap_config rpcif_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
+ .reg_read = rpcif_reg_read,
+ .reg_write = rpcif_reg_write,
.fast_io = true,
.max_register = RPCIF_PHYINT,
.volatile_table = &rpcif_volatile_table,
@@ -173,17 +224,15 @@ int rpcif_sw_init(struct rpcif *rpc, struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct resource *res;
- void __iomem *base;
rpc->dev = dev;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
- base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(base))
- return PTR_ERR(base);
+ rpc->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(rpc->base))
+ return PTR_ERR(rpc->base);
- rpc->regmap = devm_regmap_init_mmio(&pdev->dev, base,
- &rpcif_regmap_config);
+ rpc->regmap = devm_regmap_init(&pdev->dev, NULL, rpc, &rpcif_regmap_config);
if (IS_ERR(rpc->regmap)) {
dev_err(&pdev->dev,
"failed to init regmap for rpcif, error %ld\n",
@@ -354,20 +403,16 @@ void rpcif_prepare(struct rpcif *rpc, const struct rpcif_op *op, u64 *offs,
nbytes = op->data.nbytes;
rpc->xferlen = nbytes;
- rpc->enable |= RPCIF_SMENR_SPIDE(rpcif_bits_set(rpc, nbytes)) |
- RPCIF_SMENR_SPIDB(rpcif_bit_size(op->data.buswidth));
+ rpc->enable |= RPCIF_SMENR_SPIDB(rpcif_bit_size(op->data.buswidth));
}
}
EXPORT_SYMBOL(rpcif_prepare);
int rpcif_manual_xfer(struct rpcif *rpc)
{
- u32 smenr, smcr, pos = 0, max = 4;
+ u32 smenr, smcr, pos = 0, max = rpc->bus_size == 2 ? 8 : 4;
int ret = 0;
- if (rpc->bus_size == 2)
- max = 8;
-
pm_runtime_get_sync(rpc->dev);
regmap_update_bits(rpc->regmap, RPCIF_PHYCNT,
@@ -378,37 +423,36 @@ int rpcif_manual_xfer(struct rpcif *rpc)
regmap_write(rpc->regmap, RPCIF_SMOPR, rpc->option);
regmap_write(rpc->regmap, RPCIF_SMDMCR, rpc->dummy);
regmap_write(rpc->regmap, RPCIF_SMDRENR, rpc->ddr);
+ regmap_write(rpc->regmap, RPCIF_SMADR, rpc->smadr);
smenr = rpc->enable;
switch (rpc->dir) {
case RPCIF_DATA_OUT:
while (pos < rpc->xferlen) {
- u32 nbytes = rpc->xferlen - pos;
- u32 data[2];
+ u32 bytes_left = rpc->xferlen - pos;
+ u32 nbytes, data[2];
smcr = rpc->smcr | RPCIF_SMCR_SPIE;
- if (nbytes > max) {
- nbytes = max;
+
+ /* nbytes may only be 1, 2, 4, or 8 */
+ nbytes = bytes_left >= max ? max : (1 << ilog2(bytes_left));
+ if (bytes_left > nbytes)
smcr |= RPCIF_SMCR_SSLKP;
- }
+
+ smenr |= RPCIF_SMENR_SPIDE(rpcif_bits_set(rpc, nbytes));
+ regmap_write(rpc->regmap, RPCIF_SMENR, smenr);
memcpy(data, rpc->buffer + pos, nbytes);
- if (nbytes > 4) {
+ if (nbytes == 8) {
regmap_write(rpc->regmap, RPCIF_SMWDR1,
data[0]);
regmap_write(rpc->regmap, RPCIF_SMWDR0,
data[1]);
- } else if (nbytes > 2) {
+ } else {
regmap_write(rpc->regmap, RPCIF_SMWDR0,
data[0]);
- } else {
- regmap_write(rpc->regmap, RPCIF_SMWDR0,
- data[0] << 16);
}
- regmap_write(rpc->regmap, RPCIF_SMADR,
- rpc->smadr + pos);
- regmap_write(rpc->regmap, RPCIF_SMENR, smenr);
regmap_write(rpc->regmap, RPCIF_SMCR, smcr);
ret = wait_msg_xfer_end(rpc);
if (ret)
@@ -448,14 +492,16 @@ int rpcif_manual_xfer(struct rpcif *rpc)
break;
}
while (pos < rpc->xferlen) {
- u32 nbytes = rpc->xferlen - pos;
- u32 data[2];
+ u32 bytes_left = rpc->xferlen - pos;
+ u32 nbytes, data[2];
- if (nbytes > max)
- nbytes = max;
+ /* nbytes may only be 1, 2, 4, or 8 */
+ nbytes = bytes_left >= max ? max : (1 << ilog2(bytes_left));
regmap_write(rpc->regmap, RPCIF_SMADR,
rpc->smadr + pos);
+ smenr &= ~RPCIF_SMENR_SPIDE(0xF);
+ smenr |= RPCIF_SMENR_SPIDE(rpcif_bits_set(rpc, nbytes));
regmap_write(rpc->regmap, RPCIF_SMENR, smenr);
regmap_write(rpc->regmap, RPCIF_SMCR,
rpc->smcr | RPCIF_SMCR_SPIE);
@@ -463,18 +509,14 @@ int rpcif_manual_xfer(struct rpcif *rpc)
if (ret)
goto err_out;
- if (nbytes > 4) {
+ if (nbytes == 8) {
regmap_read(rpc->regmap, RPCIF_SMRDR1,
&data[0]);
regmap_read(rpc->regmap, RPCIF_SMRDR0,
&data[1]);
- } else if (nbytes > 2) {
- regmap_read(rpc->regmap, RPCIF_SMRDR0,
- &data[0]);
- } else {
+ } else {
regmap_read(rpc->regmap, RPCIF_SMRDR0,
&data[0]);
- data[0] >>= 16;
}
memcpy(rpc->buffer + pos, data, nbytes);
@@ -502,6 +544,48 @@ err_out:
}
EXPORT_SYMBOL(rpcif_manual_xfer);
+static void memcpy_fromio_readw(void *to,
+ const void __iomem *from,
+ size_t count)
+{
+ const int maxw = (IS_ENABLED(CONFIG_64BIT)) ? 8 : 4;
+ u8 buf[2];
+
+ if (count && ((unsigned long)from & 1)) {
+ *(u16 *)buf = __raw_readw((void __iomem *)((unsigned long)from & ~1));
+ *(u8 *)to = buf[1];
+ from++;
+ to++;
+ count--;
+ }
+ while (count >= 2 && !IS_ALIGNED((unsigned long)from, maxw)) {
+ *(u16 *)to = __raw_readw(from);
+ from += 2;
+ to += 2;
+ count -= 2;
+ }
+ while (count >= maxw) {
+#ifdef CONFIG_64BIT
+ *(u64 *)to = __raw_readq(from);
+#else
+ *(u32 *)to = __raw_readl(from);
+#endif
+ from += maxw;
+ to += maxw;
+ count -= maxw;
+ }
+ while (count >= 2) {
+ *(u16 *)to = __raw_readw(from);
+ from += 2;
+ to += 2;
+ count -= 2;
+ }
+ if (count) {
+ *(u16 *)buf = __raw_readw(from);
+ *(u8 *)to = buf[0];
+ }
+}
+
ssize_t rpcif_dirmap_read(struct rpcif *rpc, u64 offs, size_t len, void *buf)
{
loff_t from = offs & (RPCIF_DIRMAP_SIZE - 1);
@@ -523,7 +607,10 @@ ssize_t rpcif_dirmap_read(struct rpcif *rpc, u64 offs, size_t len, void *buf)
regmap_write(rpc->regmap, RPCIF_DRDMCR, rpc->dummy);
regmap_write(rpc->regmap, RPCIF_DRDRENR, rpc->ddr);
- memcpy_fromio(buf, rpc->dirmap + from, len);
+ if (rpc->bus_size == 2)
+ memcpy_fromio_readw(buf, rpc->dirmap + from, len);
+ else
+ memcpy_fromio(buf, rpc->dirmap + from, len);
pm_runtime_put(rpc->dev);
diff --git a/drivers/memory/samsung/Kconfig b/drivers/memory/samsung/Kconfig
index 8e240f078afc..7fb70f573031 100644
--- a/drivers/memory/samsung/Kconfig
+++ b/drivers/memory/samsung/Kconfig
@@ -14,11 +14,12 @@ config EXYNOS5422_DMC
depends on DEVFREQ_GOV_SIMPLE_ONDEMAND
depends on (PM_DEVFREQ && PM_DEVFREQ_EVENT)
help
- This adds driver for Exynos5422 DMC (Dynamic Memory Controller).
- The driver provides support for Dynamic Voltage and Frequency Scaling in
- DMC and DRAM. It also supports changing timings of DRAM running with
- different frequency. The timings are calculated based on DT memory
- information.
+ This adds driver for Samsung Exynos5422 SoC DMC (Dynamic Memory
+ Controller). The driver provides support for Dynamic Voltage and
+ Frequency Scaling in DMC and DRAM. It also supports changing timings
+ of DRAM running with different frequency. The timings are calculated
+ based on DT memory information.
+ If unsure, say Y on devices with Samsung Exynos SoCs.
config EXYNOS_SROM
bool "Exynos SROM controller driver" if COMPILE_TEST
@@ -29,6 +30,6 @@ config EXYNOS_SROM
during suspend. If however appropriate device tree configuration
is provided, the driver enables support for external memory
or external devices.
- If unsure, say Y on devices with Samsung Exynos SocS.
+ If unsure, say Y on devices with Samsung Exynos SoCs.
endif
diff --git a/drivers/memory/tegra/Kconfig b/drivers/memory/tegra/Kconfig
index f9bae36c03a3..7951764b4efe 100644
--- a/drivers/memory/tegra/Kconfig
+++ b/drivers/memory/tegra/Kconfig
@@ -16,6 +16,7 @@ config TEGRA20_EMC
depends on ARCH_TEGRA_2x_SOC || COMPILE_TEST
select DEVFREQ_GOV_SIMPLE_ONDEMAND
select PM_DEVFREQ
+ select DDR
help
This driver is for the External Memory Controller (EMC) found on
Tegra20 chips. The EMC controls the external DRAM on the board.
diff --git a/drivers/memory/tegra/mc.c b/drivers/memory/tegra/mc.c
index 3c5aae7abf35..44b4a4080920 100644
--- a/drivers/memory/tegra/mc.c
+++ b/drivers/memory/tegra/mc.c
@@ -87,11 +87,9 @@ struct tegra_mc *devm_tegra_memory_controller_get(struct device *dev)
return ERR_PTR(-EPROBE_DEFER);
}
- err = devm_add_action(dev, tegra_mc_devm_action_put_device, mc);
- if (err) {
- put_device(mc->dev);
+ err = devm_add_action_or_reset(dev, tegra_mc_devm_action_put_device, mc);
+ if (err)
return ERR_PTR(err);
- }
return mc;
}
@@ -706,15 +704,6 @@ static int tegra_mc_interconnect_setup(struct tegra_mc *mc)
goto remove_nodes;
}
- /*
- * MC driver is registered too early, so early that generic driver
- * syncing doesn't work for the MC. But it doesn't really matter
- * since syncing works for the EMC drivers, hence we can sync the
- * MC driver by ourselves and then EMC will complete syncing of
- * the whole ICC state.
- */
- icc_sync_state(mc->dev);
-
return 0;
remove_nodes:
@@ -835,6 +824,15 @@ static int __maybe_unused tegra_mc_resume(struct device *dev)
return 0;
}
+static void tegra_mc_sync_state(struct device *dev)
+{
+ struct tegra_mc *mc = dev_get_drvdata(dev);
+
+ /* check whether ICC provider is registered */
+ if (mc->provider.dev == dev)
+ icc_sync_state(dev);
+}
+
static const struct dev_pm_ops tegra_mc_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(tegra_mc_suspend, tegra_mc_resume)
};
@@ -845,6 +843,7 @@ static struct platform_driver tegra_mc_driver = {
.of_match_table = tegra_mc_of_match,
.pm = &tegra_mc_pm_ops,
.suppress_bind_attrs = true,
+ .sync_state = tegra_mc_sync_state,
},
.prevent_deferred_probe = true,
.probe = tegra_mc_probe,
diff --git a/drivers/memory/tegra/tegra186-emc.c b/drivers/memory/tegra/tegra186-emc.c
index d65e7c2a580b..746c4ef2c0af 100644
--- a/drivers/memory/tegra/tegra186-emc.c
+++ b/drivers/memory/tegra/tegra186-emc.c
@@ -197,6 +197,11 @@ static int tegra186_emc_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "failed to EMC DVFS pairs: %d\n", err);
goto put_bpmp;
}
+ if (msg.rx.ret < 0) {
+ err = -EINVAL;
+ dev_err(&pdev->dev, "EMC DVFS MRQ failed: %d (BPMP error code)\n", msg.rx.ret);
+ goto put_bpmp;
+ }
emc->debugfs.min_rate = ULONG_MAX;
emc->debugfs.max_rate = 0;
diff --git a/drivers/memory/tegra/tegra20-emc.c b/drivers/memory/tegra/tegra20-emc.c
index c3462dbc8c22..497b6edbf3ca 100644
--- a/drivers/memory/tegra/tegra20-emc.c
+++ b/drivers/memory/tegra/tegra20-emc.c
@@ -5,6 +5,7 @@
* Author: Dmitry Osipenko <digetx@gmail.com>
*/
+#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/clk/tegra.h>
#include <linux/debugfs.h>
@@ -27,11 +28,15 @@
#include <soc/tegra/common.h>
#include <soc/tegra/fuse.h>
+#include "../jedec_ddr.h"
+#include "../of_memory.h"
+
#include "mc.h"
#define EMC_INTSTATUS 0x000
#define EMC_INTMASK 0x004
#define EMC_DBG 0x008
+#define EMC_ADR_CFG_0 0x010
#define EMC_TIMING_CONTROL 0x028
#define EMC_RC 0x02c
#define EMC_RFC 0x030
@@ -68,6 +73,7 @@
#define EMC_QUSE_EXTRA 0x0ac
#define EMC_ODT_WRITE 0x0b0
#define EMC_ODT_READ 0x0b4
+#define EMC_MRR 0x0ec
#define EMC_FBIO_CFG5 0x104
#define EMC_FBIO_CFG6 0x114
#define EMC_STAT_CONTROL 0x160
@@ -94,6 +100,7 @@
#define EMC_REFRESH_OVERFLOW_INT BIT(3)
#define EMC_CLKCHANGE_COMPLETE_INT BIT(4)
+#define EMC_MRR_DIVLD_INT BIT(5)
#define EMC_DBG_READ_MUX_ASSEMBLY BIT(0)
#define EMC_DBG_WRITE_MUX_ACTIVE BIT(1)
@@ -102,11 +109,25 @@
#define EMC_DBG_CFG_PRIORITY BIT(24)
#define EMC_FBIO_CFG5_DRAM_WIDTH_X16 BIT(4)
+#define EMC_FBIO_CFG5_DRAM_TYPE GENMASK(1, 0)
+
+#define EMC_MRR_DEV_SELECTN GENMASK(31, 30)
+#define EMC_MRR_MRR_MA GENMASK(23, 16)
+#define EMC_MRR_MRR_DATA GENMASK(15, 0)
+
+#define EMC_ADR_CFG_0_EMEM_NUMDEV GENMASK(25, 24)
#define EMC_PWR_GATHER_CLEAR (1 << 8)
#define EMC_PWR_GATHER_DISABLE (2 << 8)
#define EMC_PWR_GATHER_ENABLE (3 << 8)
+enum emc_dram_type {
+ DRAM_TYPE_RESERVED,
+ DRAM_TYPE_DDR1,
+ DRAM_TYPE_LPDDR2,
+ DRAM_TYPE_DDR2,
+};
+
static const u16 emc_timing_registers[] = {
EMC_RC,
EMC_RFC,
@@ -201,6 +222,14 @@ struct tegra_emc {
struct mutex rate_lock;
struct devfreq_simple_ondemand_data ondemand_data;
+
+ /* memory chip identity information */
+ union lpddr2_basic_config4 basic_conf4;
+ unsigned int manufacturer_id;
+ unsigned int revision_id1;
+ unsigned int revision_id2;
+
+ bool mrr_error;
};
static irqreturn_t tegra_emc_isr(int irq, void *data)
@@ -397,15 +426,19 @@ static int tegra_emc_load_timings_from_dt(struct tegra_emc *emc,
if (!emc->timings)
return -ENOMEM;
- emc->num_timings = child_count;
timing = emc->timings;
for_each_child_of_node(node, child) {
+ if (of_node_name_eq(child, "lpddr2"))
+ continue;
+
err = load_one_timing_from_dt(emc, timing++, child);
if (err) {
of_node_put(child);
return err;
}
+
+ emc->num_timings++;
}
sort(emc->timings, emc->num_timings, sizeof(*timing), cmp_timings,
@@ -422,12 +455,18 @@ static int tegra_emc_load_timings_from_dt(struct tegra_emc *emc,
}
static struct device_node *
-tegra_emc_find_node_by_ram_code(struct device *dev)
+tegra_emc_find_node_by_ram_code(struct tegra_emc *emc)
{
+ struct device *dev = emc->dev;
struct device_node *np;
u32 value, ram_code;
int err;
+ if (emc->mrr_error) {
+ dev_warn(dev, "memory timings skipped due to MRR error\n");
+ return NULL;
+ }
+
if (of_get_child_count(dev->of_node) == 0) {
dev_info_once(dev, "device-tree doesn't have memory timings\n");
return NULL;
@@ -442,8 +481,49 @@ tegra_emc_find_node_by_ram_code(struct device *dev)
np = of_find_node_by_name(np, "emc-tables")) {
err = of_property_read_u32(np, "nvidia,ram-code", &value);
if (err || value != ram_code) {
- of_node_put(np);
- continue;
+ struct device_node *lpddr2_np;
+ bool cfg_mismatches = false;
+
+ lpddr2_np = of_find_node_by_name(np, "lpddr2");
+ if (lpddr2_np) {
+ const struct lpddr2_info *info;
+
+ info = of_lpddr2_get_info(lpddr2_np, dev);
+ if (info) {
+ if (info->manufacturer_id >= 0 &&
+ info->manufacturer_id != emc->manufacturer_id)
+ cfg_mismatches = true;
+
+ if (info->revision_id1 >= 0 &&
+ info->revision_id1 != emc->revision_id1)
+ cfg_mismatches = true;
+
+ if (info->revision_id2 >= 0 &&
+ info->revision_id2 != emc->revision_id2)
+ cfg_mismatches = true;
+
+ if (info->density != emc->basic_conf4.density)
+ cfg_mismatches = true;
+
+ if (info->io_width != emc->basic_conf4.io_width)
+ cfg_mismatches = true;
+
+ if (info->arch_type != emc->basic_conf4.arch_type)
+ cfg_mismatches = true;
+ } else {
+ dev_err(dev, "failed to parse %pOF\n", lpddr2_np);
+ cfg_mismatches = true;
+ }
+
+ of_node_put(lpddr2_np);
+ } else {
+ cfg_mismatches = true;
+ }
+
+ if (cfg_mismatches) {
+ of_node_put(np);
+ continue;
+ }
}
return np;
@@ -455,10 +535,72 @@ tegra_emc_find_node_by_ram_code(struct device *dev)
return NULL;
}
+static int emc_read_lpddr_mode_register(struct tegra_emc *emc,
+ unsigned int emem_dev,
+ unsigned int register_addr,
+ unsigned int *register_data)
+{
+ u32 memory_dev = emem_dev + 1;
+ u32 val, mr_mask = 0xff;
+ int err;
+
+ /* clear data-valid interrupt status */
+ writel_relaxed(EMC_MRR_DIVLD_INT, emc->regs + EMC_INTSTATUS);
+
+ /* issue mode register read request */
+ val = FIELD_PREP(EMC_MRR_DEV_SELECTN, memory_dev);
+ val |= FIELD_PREP(EMC_MRR_MRR_MA, register_addr);
+
+ writel_relaxed(val, emc->regs + EMC_MRR);
+
+ /* wait for the LPDDR2 data-valid interrupt */
+ err = readl_relaxed_poll_timeout_atomic(emc->regs + EMC_INTSTATUS, val,
+ val & EMC_MRR_DIVLD_INT,
+ 1, 100);
+ if (err) {
+ dev_err(emc->dev, "mode register %u read failed: %d\n",
+ register_addr, err);
+ emc->mrr_error = true;
+ return err;
+ }
+
+ /* read out mode register data */
+ val = readl_relaxed(emc->regs + EMC_MRR);
+ *register_data = FIELD_GET(EMC_MRR_MRR_DATA, val) & mr_mask;
+
+ return 0;
+}
+
+static void emc_read_lpddr_sdram_info(struct tegra_emc *emc,
+ unsigned int emem_dev,
+ bool print_out)
+{
+ /* these registers are standard for all LPDDR JEDEC memory chips */
+ emc_read_lpddr_mode_register(emc, emem_dev, 5, &emc->manufacturer_id);
+ emc_read_lpddr_mode_register(emc, emem_dev, 6, &emc->revision_id1);
+ emc_read_lpddr_mode_register(emc, emem_dev, 7, &emc->revision_id2);
+ emc_read_lpddr_mode_register(emc, emem_dev, 8, &emc->basic_conf4.value);
+
+ if (!print_out)
+ return;
+
+ dev_info(emc->dev, "SDRAM[dev%u]: manufacturer: 0x%x (%s) rev1: 0x%x rev2: 0x%x prefetch: S%u density: %uMbit iowidth: %ubit\n",
+ emem_dev, emc->manufacturer_id,
+ lpddr2_jedec_manufacturer(emc->manufacturer_id),
+ emc->revision_id1, emc->revision_id2,
+ 4 >> emc->basic_conf4.arch_type,
+ 64 << emc->basic_conf4.density,
+ 32 >> emc->basic_conf4.io_width);
+}
+
static int emc_setup_hw(struct tegra_emc *emc)
{
+ u32 emc_cfg, emc_dbg, emc_fbio, emc_adr_cfg;
u32 intmask = EMC_REFRESH_OVERFLOW_INT;
- u32 emc_cfg, emc_dbg, emc_fbio;
+ static bool print_sdram_info_once;
+ enum emc_dram_type dram_type;
+ const char *dram_type_str;
+ unsigned int emem_numdev;
emc_cfg = readl_relaxed(emc->regs + EMC_CFG_2);
@@ -496,7 +638,36 @@ static int emc_setup_hw(struct tegra_emc *emc)
else
emc->dram_bus_width = 32;
- dev_info_once(emc->dev, "%ubit DRAM bus\n", emc->dram_bus_width);
+ dram_type = FIELD_GET(EMC_FBIO_CFG5_DRAM_TYPE, emc_fbio);
+
+ switch (dram_type) {
+ case DRAM_TYPE_RESERVED:
+ dram_type_str = "INVALID";
+ break;
+ case DRAM_TYPE_DDR1:
+ dram_type_str = "DDR1";
+ break;
+ case DRAM_TYPE_LPDDR2:
+ dram_type_str = "LPDDR2";
+ break;
+ case DRAM_TYPE_DDR2:
+ dram_type_str = "DDR2";
+ break;
+ }
+
+ emc_adr_cfg = readl_relaxed(emc->regs + EMC_ADR_CFG_0);
+ emem_numdev = FIELD_GET(EMC_ADR_CFG_0_EMEM_NUMDEV, emc_adr_cfg) + 1;
+
+ dev_info_once(emc->dev, "%ubit DRAM bus, %u %s %s attached\n",
+ emc->dram_bus_width, emem_numdev, dram_type_str,
+ emem_numdev == 2 ? "devices" : "device");
+
+ if (dram_type == DRAM_TYPE_LPDDR2) {
+ while (emem_numdev--)
+ emc_read_lpddr_sdram_info(emc, emem_numdev,
+ !print_sdram_info_once);
+ print_sdram_info_once = true;
+ }
return 0;
}
@@ -1049,14 +1220,6 @@ static int tegra_emc_probe(struct platform_device *pdev)
emc->clk_nb.notifier_call = tegra_emc_clk_change_notify;
emc->dev = &pdev->dev;
- np = tegra_emc_find_node_by_ram_code(&pdev->dev);
- if (np) {
- err = tegra_emc_load_timings_from_dt(emc, np);
- of_node_put(np);
- if (err)
- return err;
- }
-
emc->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(emc->regs))
return PTR_ERR(emc->regs);
@@ -1065,6 +1228,14 @@ static int tegra_emc_probe(struct platform_device *pdev)
if (err)
return err;
+ np = tegra_emc_find_node_by_ram_code(emc);
+ if (np) {
+ err = tegra_emc_load_timings_from_dt(emc, np);
+ of_node_put(np);
+ if (err)
+ return err;
+ }
+
err = devm_request_irq(&pdev->dev, irq, tegra_emc_isr, 0,
dev_name(&pdev->dev), emc);
if (err) {
@@ -1117,4 +1288,5 @@ module_platform_driver(tegra_emc_driver);
MODULE_AUTHOR("Dmitry Osipenko <digetx@gmail.com>");
MODULE_DESCRIPTION("NVIDIA Tegra20 EMC driver");
+MODULE_SOFTDEP("pre: governor_simpleondemand");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/memory/tegra/tegra210-emc-cc-r21021.c b/drivers/memory/tegra/tegra210-emc-cc-r21021.c
index 0ebfa8eccf0c..cc76adb8d7e8 100644
--- a/drivers/memory/tegra/tegra210-emc-cc-r21021.c
+++ b/drivers/memory/tegra/tegra210-emc-cc-r21021.c
@@ -478,7 +478,7 @@ static u32 periodic_compensation_handler(struct tegra210_emc *emc, u32 type,
static u32 tegra210_emc_r21021_periodic_compensation(struct tegra210_emc *emc)
{
u32 emc_cfg, emc_cfg_o, emc_cfg_update, del, value;
- u32 list[] = {
+ static const u32 list[] = {
EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_0,
EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_1,
EMC_PMACRO_OB_DDLL_LONG_DQ_RANK0_2,
diff --git a/drivers/memory/tegra/tegra210-emc-core.c b/drivers/memory/tegra/tegra210-emc-core.c
index 06c0f17fa429..13584f9317a4 100644
--- a/drivers/memory/tegra/tegra210-emc-core.c
+++ b/drivers/memory/tegra/tegra210-emc-core.c
@@ -1662,7 +1662,7 @@ static int tegra210_emc_debug_min_rate_set(void *data, u64 rate)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(tegra210_emc_debug_min_rate_fops,
+DEFINE_DEBUGFS_ATTRIBUTE(tegra210_emc_debug_min_rate_fops,
tegra210_emc_debug_min_rate_get,
tegra210_emc_debug_min_rate_set, "%llu\n");
@@ -1692,7 +1692,7 @@ static int tegra210_emc_debug_max_rate_set(void *data, u64 rate)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(tegra210_emc_debug_max_rate_fops,
+DEFINE_DEBUGFS_ATTRIBUTE(tegra210_emc_debug_max_rate_fops,
tegra210_emc_debug_max_rate_get,
tegra210_emc_debug_max_rate_set, "%llu\n");
@@ -1723,7 +1723,7 @@ static int tegra210_emc_debug_temperature_set(void *data, u64 temperature)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(tegra210_emc_debug_temperature_fops,
+DEFINE_DEBUGFS_ATTRIBUTE(tegra210_emc_debug_temperature_fops,
tegra210_emc_debug_temperature_get,
tegra210_emc_debug_temperature_set, "%llu\n");
diff --git a/drivers/memory/tegra/tegra30-emc.c b/drivers/memory/tegra/tegra30-emc.c
index 7e21a852f2e1..80f98d717e13 100644
--- a/drivers/memory/tegra/tegra30-emc.c
+++ b/drivers/memory/tegra/tegra30-emc.c
@@ -1289,7 +1289,7 @@ static int tegra_emc_debug_min_rate_set(void *data, u64 rate)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(tegra_emc_debug_min_rate_fops,
+DEFINE_DEBUGFS_ATTRIBUTE(tegra_emc_debug_min_rate_fops,
tegra_emc_debug_min_rate_get,
tegra_emc_debug_min_rate_set, "%llu\n");
@@ -1319,7 +1319,7 @@ static int tegra_emc_debug_max_rate_set(void *data, u64 rate)
return 0;
}
-DEFINE_SIMPLE_ATTRIBUTE(tegra_emc_debug_max_rate_fops,
+DEFINE_DEBUGFS_ATTRIBUTE(tegra_emc_debug_max_rate_fops,
tegra_emc_debug_max_rate_get,
tegra_emc_debug_max_rate_set, "%llu\n");
diff --git a/drivers/memstick/core/ms_block.c b/drivers/memstick/core/ms_block.c
index acf36676e388..0cda6c6baefc 100644
--- a/drivers/memstick/core/ms_block.c
+++ b/drivers/memstick/core/ms_block.c
@@ -1736,7 +1736,7 @@ static int msb_init_card(struct memstick_dev *card)
msb->pages_in_block = boot_block->attr.block_size * 2;
msb->block_size = msb->page_size * msb->pages_in_block;
- if (msb->page_size > PAGE_SIZE) {
+ if ((size_t)msb->page_size > PAGE_SIZE) {
/* this isn't supported by linux at all, anyway*/
dbg("device page %d size isn't supported", msb->page_size);
return -EINVAL;
@@ -2156,10 +2156,14 @@ static int msb_init_disk(struct memstick_dev *card)
set_disk_ro(msb->disk, 1);
msb_start(card);
- device_add_disk(&card->dev, msb->disk, NULL);
+ rc = device_add_disk(&card->dev, msb->disk, NULL);
+ if (rc)
+ goto out_cleanup_disk;
dbg("Disk added");
return 0;
+out_cleanup_disk:
+ blk_cleanup_disk(msb->disk);
out_free_tag_set:
blk_mq_free_tag_set(&msb->tag_set);
out_release_id:
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c
index 22778d0e24f5..c0450397b673 100644
--- a/drivers/memstick/core/mspro_block.c
+++ b/drivers/memstick/core/mspro_block.c
@@ -1239,10 +1239,14 @@ static int mspro_block_init_disk(struct memstick_dev *card)
set_capacity(msb->disk, capacity);
dev_dbg(&card->dev, "capacity set %ld\n", capacity);
- device_add_disk(&card->dev, msb->disk, NULL);
+ rc = device_add_disk(&card->dev, msb->disk, NULL);
+ if (rc)
+ goto out_cleanup_disk;
msb->active = 1;
return 0;
+out_cleanup_disk:
+ blk_cleanup_disk(msb->disk);
out_free_tag_set:
blk_mq_free_tag_set(&msb->tag_set);
out_release_id:
diff --git a/drivers/memstick/host/jmb38x_ms.c b/drivers/memstick/host/jmb38x_ms.c
index f9a93b0565e1..21cb2a786058 100644
--- a/drivers/memstick/host/jmb38x_ms.c
+++ b/drivers/memstick/host/jmb38x_ms.c
@@ -882,7 +882,7 @@ static struct memstick_host *jmb38x_ms_alloc_host(struct jmb38x_ms *jm, int cnt)
iounmap(host->addr);
err_out_free:
- kfree(msh);
+ memstick_free_host(msh);
return NULL;
}
@@ -927,8 +927,7 @@ static int jmb38x_ms_probe(struct pci_dev *pdev,
goto err_out_int;
}
- jm = kzalloc(sizeof(struct jmb38x_ms)
- + cnt * sizeof(struct memstick_host *), GFP_KERNEL);
+ jm = kzalloc(struct_size(jm, hosts, cnt), GFP_KERNEL);
if (!jm) {
rc = -ENOMEM;
goto err_out_int;
diff --git a/drivers/memstick/host/r592.c b/drivers/memstick/host/r592.c
index e79a0218c492..1d35d147552d 100644
--- a/drivers/memstick/host/r592.c
+++ b/drivers/memstick/host/r592.c
@@ -838,15 +838,15 @@ static void r592_remove(struct pci_dev *pdev)
}
memstick_remove_host(dev->host);
+ if (dev->dummy_dma_page)
+ dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->dummy_dma_page,
+ dev->dummy_dma_page_physical_address);
+
free_irq(dev->irq, dev);
iounmap(dev->mmio);
pci_release_regions(pdev);
pci_disable_device(pdev);
memstick_free_host(dev->host);
-
- if (dev->dummy_dma_page)
- dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->dummy_dma_page,
- dev->dummy_dma_page_physical_address);
}
#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/message/fusion/mptlan.c b/drivers/message/fusion/mptlan.c
index 3261cac762de..acdc257a900e 100644
--- a/drivers/message/fusion/mptlan.c
+++ b/drivers/message/fusion/mptlan.c
@@ -1350,7 +1350,7 @@ mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum)
HWaddr[5] = a[0];
dev->addr_len = FC_ALEN;
- memcpy(dev->dev_addr, HWaddr, FC_ALEN);
+ dev_addr_set(dev, HWaddr);
memset(dev->broadcast, 0xff, FC_ALEN);
/* The Tx queue is 127 deep on the 909.
diff --git a/drivers/misc/mei/Kconfig b/drivers/misc/mei/Kconfig
index f5fd5b786607..0e0bcd0da852 100644
--- a/drivers/misc/mei/Kconfig
+++ b/drivers/misc/mei/Kconfig
@@ -47,3 +47,5 @@ config INTEL_MEI_TXE
Intel Bay Trail
source "drivers/misc/mei/hdcp/Kconfig"
+source "drivers/misc/mei/pxp/Kconfig"
+
diff --git a/drivers/misc/mei/Makefile b/drivers/misc/mei/Makefile
index f1c76f7ee804..d8e5165917f2 100644
--- a/drivers/misc/mei/Makefile
+++ b/drivers/misc/mei/Makefile
@@ -26,3 +26,4 @@ mei-$(CONFIG_EVENT_TRACING) += mei-trace.o
CFLAGS_mei-trace.o = -I$(src)
obj-$(CONFIG_INTEL_MEI_HDCP) += hdcp/
+obj-$(CONFIG_INTEL_MEI_PXP) += pxp/
diff --git a/drivers/misc/mei/pxp/Kconfig b/drivers/misc/mei/pxp/Kconfig
new file mode 100644
index 000000000000..4029b96afc04
--- /dev/null
+++ b/drivers/misc/mei/pxp/Kconfig
@@ -0,0 +1,13 @@
+
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2020, Intel Corporation. All rights reserved.
+#
+config INTEL_MEI_PXP
+ tristate "Intel PXP services of ME Interface"
+ select INTEL_MEI_ME
+ depends on DRM_I915
+ help
+ MEI Support for PXP Services on Intel platforms.
+
+ Enables the ME FW services required for PXP support through
+ I915 display driver of Intel.
diff --git a/drivers/misc/mei/pxp/Makefile b/drivers/misc/mei/pxp/Makefile
new file mode 100644
index 000000000000..0329950d5794
--- /dev/null
+++ b/drivers/misc/mei/pxp/Makefile
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Copyright (c) 2020, Intel Corporation. All rights reserved.
+#
+# Makefile - PXP client driver for Intel MEI Bus Driver.
+
+obj-$(CONFIG_INTEL_MEI_PXP) += mei_pxp.o
diff --git a/drivers/misc/mei/pxp/mei_pxp.c b/drivers/misc/mei/pxp/mei_pxp.c
new file mode 100644
index 000000000000..f7380d387bab
--- /dev/null
+++ b/drivers/misc/mei/pxp/mei_pxp.c
@@ -0,0 +1,229 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright © 2020 - 2021 Intel Corporation
+ */
+
+/**
+ * DOC: MEI_PXP Client Driver
+ *
+ * The mei_pxp driver acts as a translation layer between PXP
+ * protocol implementer (I915) and ME FW by translating PXP
+ * negotiation messages to ME FW command payloads and vice versa.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/uuid.h>
+#include <linux/mei_cl_bus.h>
+#include <linux/component.h>
+#include <drm/drm_connector.h>
+#include <drm/i915_component.h>
+#include <drm/i915_pxp_tee_interface.h>
+
+#include "mei_pxp.h"
+
+/**
+ * mei_pxp_send_message() - Sends a PXP message to ME FW.
+ * @dev: device corresponding to the mei_cl_device
+ * @message: a message buffer to send
+ * @size: size of the message
+ * Return: 0 on Success, <0 on Failure
+ */
+static int
+mei_pxp_send_message(struct device *dev, const void *message, size_t size)
+{
+ struct mei_cl_device *cldev;
+ ssize_t byte;
+
+ if (!dev || !message)
+ return -EINVAL;
+
+ cldev = to_mei_cl_device(dev);
+
+ /* temporary drop const qualifier till the API is fixed */
+ byte = mei_cldev_send(cldev, (u8 *)message, size);
+ if (byte < 0) {
+ dev_dbg(dev, "mei_cldev_send failed. %zd\n", byte);
+ return byte;
+ }
+
+ return 0;
+}
+
+/**
+ * mei_pxp_receive_message() - Receives a PXP message from ME FW.
+ * @dev: device corresponding to the mei_cl_device
+ * @buffer: a message buffer to contain the received message
+ * @size: size of the buffer
+ * Return: bytes sent on Success, <0 on Failure
+ */
+static int
+mei_pxp_receive_message(struct device *dev, void *buffer, size_t size)
+{
+ struct mei_cl_device *cldev;
+ ssize_t byte;
+
+ if (!dev || !buffer)
+ return -EINVAL;
+
+ cldev = to_mei_cl_device(dev);
+
+ byte = mei_cldev_recv(cldev, buffer, size);
+ if (byte < 0) {
+ dev_dbg(dev, "mei_cldev_recv failed. %zd\n", byte);
+ return byte;
+ }
+
+ return byte;
+}
+
+static const struct i915_pxp_component_ops mei_pxp_ops = {
+ .owner = THIS_MODULE,
+ .send = mei_pxp_send_message,
+ .recv = mei_pxp_receive_message,
+};
+
+static int mei_component_master_bind(struct device *dev)
+{
+ struct mei_cl_device *cldev = to_mei_cl_device(dev);
+ struct i915_pxp_component *comp_master = mei_cldev_get_drvdata(cldev);
+ int ret;
+
+ comp_master->ops = &mei_pxp_ops;
+ comp_master->tee_dev = dev;
+ ret = component_bind_all(dev, comp_master);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void mei_component_master_unbind(struct device *dev)
+{
+ struct mei_cl_device *cldev = to_mei_cl_device(dev);
+ struct i915_pxp_component *comp_master = mei_cldev_get_drvdata(cldev);
+
+ component_unbind_all(dev, comp_master);
+}
+
+static const struct component_master_ops mei_component_master_ops = {
+ .bind = mei_component_master_bind,
+ .unbind = mei_component_master_unbind,
+};
+
+/**
+ * mei_pxp_component_match - compare function for matching mei pxp.
+ *
+ * The function checks if the driver is i915, the subcomponent is PXP
+ * and the grand parent of pxp and the parent of i915 are the same
+ * PCH device.
+ *
+ * @dev: master device
+ * @subcomponent: subcomponent to match (I915_COMPONENT_PXP)
+ * @data: compare data (mei pxp device)
+ *
+ * Return:
+ * * 1 - if components match
+ * * 0 - otherwise
+ */
+static int mei_pxp_component_match(struct device *dev, int subcomponent,
+ void *data)
+{
+ struct device *base = data;
+
+ if (strcmp(dev->driver->name, "i915") ||
+ subcomponent != I915_COMPONENT_PXP)
+ return 0;
+
+ base = base->parent;
+ if (!base)
+ return 0;
+
+ base = base->parent;
+ dev = dev->parent;
+
+ return (base && dev && dev == base);
+}
+
+static int mei_pxp_probe(struct mei_cl_device *cldev,
+ const struct mei_cl_device_id *id)
+{
+ struct i915_pxp_component *comp_master;
+ struct component_match *master_match;
+ int ret;
+
+ ret = mei_cldev_enable(cldev);
+ if (ret < 0) {
+ dev_err(&cldev->dev, "mei_cldev_enable Failed. %d\n", ret);
+ goto enable_err_exit;
+ }
+
+ comp_master = kzalloc(sizeof(*comp_master), GFP_KERNEL);
+ if (!comp_master) {
+ ret = -ENOMEM;
+ goto err_exit;
+ }
+
+ master_match = NULL;
+ component_match_add_typed(&cldev->dev, &master_match,
+ mei_pxp_component_match, &cldev->dev);
+ if (IS_ERR_OR_NULL(master_match)) {
+ ret = -ENOMEM;
+ goto err_exit;
+ }
+
+ mei_cldev_set_drvdata(cldev, comp_master);
+ ret = component_master_add_with_match(&cldev->dev,
+ &mei_component_master_ops,
+ master_match);
+ if (ret < 0) {
+ dev_err(&cldev->dev, "Master comp add failed %d\n", ret);
+ goto err_exit;
+ }
+
+ return 0;
+
+err_exit:
+ mei_cldev_set_drvdata(cldev, NULL);
+ kfree(comp_master);
+ mei_cldev_disable(cldev);
+enable_err_exit:
+ return ret;
+}
+
+static void mei_pxp_remove(struct mei_cl_device *cldev)
+{
+ struct i915_pxp_component *comp_master = mei_cldev_get_drvdata(cldev);
+ int ret;
+
+ component_master_del(&cldev->dev, &mei_component_master_ops);
+ kfree(comp_master);
+ mei_cldev_set_drvdata(cldev, NULL);
+
+ ret = mei_cldev_disable(cldev);
+ if (ret)
+ dev_warn(&cldev->dev, "mei_cldev_disable() failed\n");
+}
+
+/* fbf6fcf1-96cf-4e2e-a6a6-1bab8cbe36b1 : PAVP GUID*/
+#define MEI_GUID_PXP GUID_INIT(0xfbf6fcf1, 0x96cf, 0x4e2e, 0xA6, \
+ 0xa6, 0x1b, 0xab, 0x8c, 0xbe, 0x36, 0xb1)
+
+static struct mei_cl_device_id mei_pxp_tbl[] = {
+ { .uuid = MEI_GUID_PXP, .version = MEI_CL_VERSION_ANY },
+ { }
+};
+MODULE_DEVICE_TABLE(mei, mei_pxp_tbl);
+
+static struct mei_cl_driver mei_pxp_driver = {
+ .id_table = mei_pxp_tbl,
+ .name = KBUILD_MODNAME,
+ .probe = mei_pxp_probe,
+ .remove = mei_pxp_remove,
+};
+
+module_mei_cl_driver(mei_pxp_driver);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("MEI PXP");
diff --git a/drivers/misc/mei/pxp/mei_pxp.h b/drivers/misc/mei/pxp/mei_pxp.h
new file mode 100644
index 000000000000..e7b15373fefd
--- /dev/null
+++ b/drivers/misc/mei/pxp/mei_pxp.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright © 2020 Intel Corporation
+ *
+ * Authors:
+ * Vitaly Lubart <vitaly.lubart@intel.com>
+ */
+
+#ifndef __MEI_PXP_H__
+#define __MEI_PXP_H__
+
+/* me_pxp_status: Enumeration of all PXP Status Codes */
+enum me_pxp_status {
+ ME_PXP_STATUS_SUCCESS = 0x0000,
+
+};
+
+#endif /* __MEI_PXP_H__ */
diff --git a/drivers/misc/sgi-xp/xpnet.c b/drivers/misc/sgi-xp/xpnet.c
index 2508f83bdc3f..dab7b92db790 100644
--- a/drivers/misc/sgi-xp/xpnet.c
+++ b/drivers/misc/sgi-xp/xpnet.c
@@ -514,6 +514,7 @@ static const struct net_device_ops xpnet_netdev_ops = {
static int __init
xpnet_init(void)
{
+ u8 addr[ETH_ALEN];
int result;
if (!is_uv_system())
@@ -545,15 +546,17 @@ xpnet_init(void)
xpnet_device->min_mtu = XPNET_MIN_MTU;
xpnet_device->max_mtu = XPNET_MAX_MTU;
+ memset(addr, 0, sizeof(addr));
/*
* Multicast assumes the LSB of the first octet is set for multicast
* MAC addresses. We chose the first octet of the MAC to be unlikely
* to collide with any vendor's officially issued MAC.
*/
- xpnet_device->dev_addr[0] = 0x02; /* locally administered, no OUI */
+ addr[0] = 0x02; /* locally administered, no OUI */
- xpnet_device->dev_addr[XPNET_PARTID_OCTET + 1] = xp_partition_id;
- xpnet_device->dev_addr[XPNET_PARTID_OCTET + 0] = (xp_partition_id >> 8);
+ addr[XPNET_PARTID_OCTET + 1] = xp_partition_id;
+ addr[XPNET_PARTID_OCTET + 0] = (xp_partition_id >> 8);
+ eth_hw_addr_set(xpnet_device, addr);
/*
* ether_setup() sets this to a multicast device. We are
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 431af5e8be2f..90e1bcd03b46 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -258,7 +258,7 @@ static ssize_t power_ro_lock_store(struct device *dev,
mq = &md->queue;
/* Dispatch locking to the block layer */
- req = blk_get_request(mq->queue, REQ_OP_DRV_OUT, 0);
+ req = blk_mq_alloc_request(mq->queue, REQ_OP_DRV_OUT, 0);
if (IS_ERR(req)) {
count = PTR_ERR(req);
goto out_put;
@@ -266,7 +266,7 @@ static ssize_t power_ro_lock_store(struct device *dev,
req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_BOOT_WP;
blk_execute_rq(NULL, req, 0);
ret = req_to_mmc_queue_req(req)->drv_op_result;
- blk_put_request(req);
+ blk_mq_free_request(req);
if (!ret) {
pr_info("%s: Locking boot partition ro until next power on\n",
@@ -646,7 +646,7 @@ static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md,
* Dispatch the ioctl() into the block request queue.
*/
mq = &md->queue;
- req = blk_get_request(mq->queue,
+ req = blk_mq_alloc_request(mq->queue,
idata->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
if (IS_ERR(req)) {
err = PTR_ERR(req);
@@ -660,7 +660,7 @@ static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md,
blk_execute_rq(NULL, req, 0);
ioc_err = req_to_mmc_queue_req(req)->drv_op_result;
err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata);
- blk_put_request(req);
+ blk_mq_free_request(req);
cmd_done:
kfree(idata->buf);
@@ -716,7 +716,7 @@ static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md,
* Dispatch the ioctl()s into the block request queue.
*/
mq = &md->queue;
- req = blk_get_request(mq->queue,
+ req = blk_mq_alloc_request(mq->queue,
idata[0]->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
if (IS_ERR(req)) {
err = PTR_ERR(req);
@@ -733,7 +733,7 @@ static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md,
for (i = 0; i < num_of_cmds && !err; i++)
err = mmc_blk_ioctl_copy_to_user(&cmds[i], idata[i]);
- blk_put_request(req);
+ blk_mq_free_request(req);
cmd_err:
for (i = 0; i < num_of_cmds; i++) {
@@ -2442,9 +2442,14 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
/* used in ->open, must be set before add_disk: */
if (area_type == MMC_BLK_DATA_AREA_MAIN)
dev_set_drvdata(&card->dev, md);
- device_add_disk(md->parent, md->disk, mmc_disk_attr_groups);
+ ret = device_add_disk(md->parent, md->disk, mmc_disk_attr_groups);
+ if (ret)
+ goto err_cleanup_queue;
return md;
+ err_cleanup_queue:
+ blk_cleanup_queue(md->disk->queue);
+ blk_mq_free_tag_set(&md->queue.tag_set);
err_kfree:
kfree(md);
out:
@@ -2730,7 +2735,7 @@ static int mmc_dbg_card_status_get(void *data, u64 *val)
int ret;
/* Ask the block layer about the card status */
- req = blk_get_request(mq->queue, REQ_OP_DRV_IN, 0);
+ req = blk_mq_alloc_request(mq->queue, REQ_OP_DRV_IN, 0);
if (IS_ERR(req))
return PTR_ERR(req);
req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_CARD_STATUS;
@@ -2740,7 +2745,7 @@ static int mmc_dbg_card_status_get(void *data, u64 *val)
*val = ret;
ret = 0;
}
- blk_put_request(req);
+ blk_mq_free_request(req);
return ret;
}
@@ -2766,7 +2771,7 @@ static int mmc_ext_csd_open(struct inode *inode, struct file *filp)
return -ENOMEM;
/* Ask the block layer for the EXT CSD */
- req = blk_get_request(mq->queue, REQ_OP_DRV_IN, 0);
+ req = blk_mq_alloc_request(mq->queue, REQ_OP_DRV_IN, 0);
if (IS_ERR(req)) {
err = PTR_ERR(req);
goto out_free;
@@ -2775,7 +2780,7 @@ static int mmc_ext_csd_open(struct inode *inode, struct file *filp)
req_to_mmc_queue_req(req)->drv_op_data = &ext_csd;
blk_execute_rq(NULL, req, 0);
err = req_to_mmc_queue_req(req)->drv_op_result;
- blk_put_request(req);
+ blk_mq_free_request(req);
if (err) {
pr_err("FAILED %d\n", err);
goto out_free;
diff --git a/drivers/mmc/core/crypto.c b/drivers/mmc/core/crypto.c
index 67557808cada..fec4fbf16a5b 100644
--- a/drivers/mmc/core/crypto.c
+++ b/drivers/mmc/core/crypto.c
@@ -16,13 +16,13 @@ void mmc_crypto_set_initial_state(struct mmc_host *host)
{
/* Reset might clear all keys, so reprogram all the keys. */
if (host->caps2 & MMC_CAP2_CRYPTO)
- blk_ksm_reprogram_all_keys(&host->ksm);
+ blk_crypto_reprogram_all_keys(&host->crypto_profile);
}
void mmc_crypto_setup_queue(struct request_queue *q, struct mmc_host *host)
{
if (host->caps2 & MMC_CAP2_CRYPTO)
- blk_ksm_register(&host->ksm, q);
+ blk_crypto_register(&host->crypto_profile, q);
}
EXPORT_SYMBOL_GPL(mmc_crypto_setup_queue);
@@ -30,12 +30,15 @@ void mmc_crypto_prepare_req(struct mmc_queue_req *mqrq)
{
struct request *req = mmc_queue_req_to_req(mqrq);
struct mmc_request *mrq = &mqrq->brq.mrq;
+ struct blk_crypto_keyslot *keyslot;
if (!req->crypt_ctx)
return;
mrq->crypto_ctx = req->crypt_ctx;
- if (req->crypt_keyslot)
- mrq->crypto_key_slot = blk_ksm_get_slot_idx(req->crypt_keyslot);
+
+ keyslot = req->crypt_keyslot;
+ if (keyslot)
+ mrq->crypto_key_slot = blk_crypto_keyslot_index(keyslot);
}
EXPORT_SYMBOL_GPL(mmc_crypto_prepare_req);
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 29e58ffae379..b1c1716dacf0 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1224,6 +1224,14 @@ static int mmc_select_hs400(struct mmc_card *card)
mmc_set_timing(host, MMC_TIMING_MMC_HS400);
mmc_set_bus_speed(card);
+ if (host->ops->execute_hs400_tuning) {
+ mmc_retune_disable(host);
+ err = host->ops->execute_hs400_tuning(host, card);
+ mmc_retune_enable(host);
+ if (err)
+ goto out_err;
+ }
+
if (host->ops->hs400_complete)
host->ops->hs400_complete(host);
diff --git a/drivers/mmc/core/mmc_ops.h b/drivers/mmc/core/mmc_ops.h
index ae25ffc2e870..e5e94567a9a9 100644
--- a/drivers/mmc/core/mmc_ops.h
+++ b/drivers/mmc/core/mmc_ops.h
@@ -38,7 +38,6 @@ int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp);
int mmc_spi_set_crc(struct mmc_host *host, int use_crc);
int mmc_bus_test(struct mmc_card *card, u8 bus_width);
int mmc_can_ext_csd(struct mmc_card *card);
-int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd);
int mmc_switch_status(struct mmc_card *card, bool crc_err_fatal);
bool mmc_prepare_busy_cmd(struct mmc_host *host, struct mmc_command *cmd,
unsigned int timeout_ms);
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 4646b7a03db6..c9db24e16af1 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -12,6 +12,7 @@
#include <linux/slab.h>
#include <linux/stat.h>
#include <linux/pm_runtime.h>
+#include <linux/scatterlist.h>
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c
index 05e907451df9..dd2a4b6ab6ad 100644
--- a/drivers/mmc/core/slot-gpio.c
+++ b/drivers/mmc/core/slot-gpio.c
@@ -39,24 +39,24 @@ static irqreturn_t mmc_gpio_cd_irqt(int irq, void *dev_id)
int mmc_gpio_alloc(struct mmc_host *host)
{
- struct mmc_gpio *ctx = devm_kzalloc(host->parent,
- sizeof(*ctx), GFP_KERNEL);
-
- if (ctx) {
- ctx->cd_debounce_delay_ms = 200;
- ctx->cd_label = devm_kasprintf(host->parent, GFP_KERNEL,
- "%s cd", dev_name(host->parent));
- if (!ctx->cd_label)
- return -ENOMEM;
- ctx->ro_label = devm_kasprintf(host->parent, GFP_KERNEL,
- "%s ro", dev_name(host->parent));
- if (!ctx->ro_label)
- return -ENOMEM;
- host->slot.handler_priv = ctx;
- host->slot.cd_irq = -EINVAL;
- }
+ const char *devname = dev_name(host->parent);
+ struct mmc_gpio *ctx;
+
+ ctx = devm_kzalloc(host->parent, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->cd_debounce_delay_ms = 200;
+ ctx->cd_label = devm_kasprintf(host->parent, GFP_KERNEL, "%s cd", devname);
+ if (!ctx->cd_label)
+ return -ENOMEM;
+ ctx->ro_label = devm_kasprintf(host->parent, GFP_KERNEL, "%s ro", devname);
+ if (!ctx->ro_label)
+ return -ENOMEM;
+ host->slot.handler_priv = ctx;
+ host->slot.cd_irq = -EINVAL;
- return ctx ? 0 : -ENOMEM;
+ return 0;
}
int mmc_gpio_get_ro(struct mmc_host *host)
@@ -178,6 +178,10 @@ int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id,
if (IS_ERR(desc))
return PTR_ERR(desc);
+ /* Update default label if no con_id provided */
+ if (!con_id)
+ gpiod_set_consumer_name(desc, ctx->cd_label);
+
if (debounce) {
ret = gpiod_set_debounce(desc, debounce);
if (ret < 0)
@@ -226,6 +230,10 @@ int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id,
if (IS_ERR(desc))
return PTR_ERR(desc);
+ /* Update default label if no con_id provided */
+ if (!con_id)
+ gpiod_set_consumer_name(desc, ctx->ro_label);
+
if (debounce) {
ret = gpiod_set_debounce(desc, debounce);
if (ret < 0)
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 95b3511b0560..5af8494c31b5 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -315,15 +315,17 @@ config MMC_SDHCI_TEGRA
If unsure, say N.
config MMC_SDHCI_S3C
- tristate "SDHCI support on Samsung S3C SoC"
+ tristate "SDHCI support on Samsung S3C/S5P/Exynos SoC"
depends on MMC_SDHCI
depends on PLAT_SAMSUNG || ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
help
This selects the Secure Digital Host Controller Interface (SDHCI)
often referrered to as the HSMMC block in some of the Samsung S3C
- range of SoC.
+ (S3C2416, S3C2443, S3C6410), S5Pv210 and Exynos (Exynso4210,
+ Exynos4412) SoCs.
- If you have a controller with this interface, say Y or M here.
+ If you have a controller with this interface (thereforeyou build for
+ such Samsung SoC), say Y or M here.
If unsure, say N.
@@ -506,7 +508,7 @@ config MMC_OMAP_HS
config MMC_WBSD
tristate "Winbond W83L51xD SD/MMC Card Interface support"
- depends on ISA_DMA_API
+ depends on ISA_DMA_API && !M68K
help
This selects the Winbond(R) W83L51xD Secure digital and
Multimedia card Interface.
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index 14004cc09aaa..ea36d379bd3c 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -14,7 +14,6 @@ obj-$(CONFIG_MMC_SDHCI) += sdhci.o
obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o
sdhci-pci-y += sdhci-pci-core.o sdhci-pci-o2micro.o sdhci-pci-arasan.o \
sdhci-pci-dwc-mshc.o sdhci-pci-gli.o
-obj-$(subst m,y,$(CONFIG_MMC_SDHCI_PCI)) += sdhci-pci-data.o
obj-$(CONFIG_MMC_SDHCI_ACPI) += sdhci-acpi.o
obj-$(CONFIG_MMC_SDHCI_PXAV3) += sdhci-pxav3.o
obj-$(CONFIG_MMC_SDHCI_PXAV2) += sdhci-pxav2.o
diff --git a/drivers/mmc/host/cqhci-core.c b/drivers/mmc/host/cqhci-core.c
index 38559a956330..b0d30c35c390 100644
--- a/drivers/mmc/host/cqhci-core.c
+++ b/drivers/mmc/host/cqhci-core.c
@@ -282,6 +282,9 @@ static void __cqhci_enable(struct cqhci_host *cq_host)
cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
+ if (cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT)
+ cqhci_writel(cq_host, 0, CQHCI_CTL);
+
mmc->cqe_on = true;
if (cq_host->ops->enable)
@@ -899,8 +902,8 @@ static bool cqhci_timeout(struct mmc_host *mmc, struct mmc_request *mrq,
spin_unlock_irqrestore(&cq_host->lock, flags);
if (timed_out) {
- pr_err("%s: cqhci: timeout for tag %d\n",
- mmc_hostname(mmc), tag);
+ pr_err("%s: cqhci: timeout for tag %d, qcnt %d\n",
+ mmc_hostname(mmc), tag, cq_host->qcnt);
cqhci_dumpregs(cq_host);
}
diff --git a/drivers/mmc/host/cqhci-crypto.c b/drivers/mmc/host/cqhci-crypto.c
index 6419cfbb4ab7..d5f4b6972f63 100644
--- a/drivers/mmc/host/cqhci-crypto.c
+++ b/drivers/mmc/host/cqhci-crypto.c
@@ -6,7 +6,7 @@
*/
#include <linux/blk-crypto.h>
-#include <linux/keyslot-manager.h>
+#include <linux/blk-crypto-profile.h>
#include <linux/mmc/host.h>
#include "cqhci-crypto.h"
@@ -23,9 +23,10 @@ static const struct cqhci_crypto_alg_entry {
};
static inline struct cqhci_host *
-cqhci_host_from_ksm(struct blk_keyslot_manager *ksm)
+cqhci_host_from_crypto_profile(struct blk_crypto_profile *profile)
{
- struct mmc_host *mmc = container_of(ksm, struct mmc_host, ksm);
+ struct mmc_host *mmc =
+ container_of(profile, struct mmc_host, crypto_profile);
return mmc->cqe_private;
}
@@ -57,12 +58,12 @@ static int cqhci_crypto_program_key(struct cqhci_host *cq_host,
return 0;
}
-static int cqhci_crypto_keyslot_program(struct blk_keyslot_manager *ksm,
+static int cqhci_crypto_keyslot_program(struct blk_crypto_profile *profile,
const struct blk_crypto_key *key,
unsigned int slot)
{
- struct cqhci_host *cq_host = cqhci_host_from_ksm(ksm);
+ struct cqhci_host *cq_host = cqhci_host_from_crypto_profile(profile);
const union cqhci_crypto_cap_entry *ccap_array =
cq_host->crypto_cap_array;
const struct cqhci_crypto_alg_entry *alg =
@@ -115,11 +116,11 @@ static int cqhci_crypto_clear_keyslot(struct cqhci_host *cq_host, int slot)
return cqhci_crypto_program_key(cq_host, &cfg, slot);
}
-static int cqhci_crypto_keyslot_evict(struct blk_keyslot_manager *ksm,
+static int cqhci_crypto_keyslot_evict(struct blk_crypto_profile *profile,
const struct blk_crypto_key *key,
unsigned int slot)
{
- struct cqhci_host *cq_host = cqhci_host_from_ksm(ksm);
+ struct cqhci_host *cq_host = cqhci_host_from_crypto_profile(profile);
return cqhci_crypto_clear_keyslot(cq_host, slot);
}
@@ -132,7 +133,7 @@ static int cqhci_crypto_keyslot_evict(struct blk_keyslot_manager *ksm,
* "enabled" when these are called, i.e. CQHCI_ENABLE might not be set in the
* CQHCI_CFG register. But the hardware allows that.
*/
-static const struct blk_ksm_ll_ops cqhci_ksm_ops = {
+static const struct blk_crypto_ll_ops cqhci_crypto_ops = {
.keyslot_program = cqhci_crypto_keyslot_program,
.keyslot_evict = cqhci_crypto_keyslot_evict,
};
@@ -157,8 +158,8 @@ cqhci_find_blk_crypto_mode(union cqhci_crypto_cap_entry cap)
*
* If the driver previously set MMC_CAP2_CRYPTO and the CQE declares
* CQHCI_CAP_CS, initialize the crypto support. This involves reading the
- * crypto capability registers, initializing the keyslot manager, clearing all
- * keyslots, and enabling 128-bit task descriptors.
+ * crypto capability registers, initializing the blk_crypto_profile, clearing
+ * all keyslots, and enabling 128-bit task descriptors.
*
* Return: 0 if crypto was initialized or isn't supported; whether
* MMC_CAP2_CRYPTO remains set indicates which one of those cases it is.
@@ -168,7 +169,7 @@ int cqhci_crypto_init(struct cqhci_host *cq_host)
{
struct mmc_host *mmc = cq_host->mmc;
struct device *dev = mmc_dev(mmc);
- struct blk_keyslot_manager *ksm = &mmc->ksm;
+ struct blk_crypto_profile *profile = &mmc->crypto_profile;
unsigned int num_keyslots;
unsigned int cap_idx;
enum blk_crypto_mode_num blk_mode_num;
@@ -199,15 +200,15 @@ int cqhci_crypto_init(struct cqhci_host *cq_host)
*/
num_keyslots = cq_host->crypto_capabilities.config_count + 1;
- err = devm_blk_ksm_init(dev, ksm, num_keyslots);
+ err = devm_blk_crypto_profile_init(dev, profile, num_keyslots);
if (err)
goto out;
- ksm->ksm_ll_ops = cqhci_ksm_ops;
- ksm->dev = dev;
+ profile->ll_ops = cqhci_crypto_ops;
+ profile->dev = dev;
/* Unfortunately, CQHCI crypto only supports 32 DUN bits. */
- ksm->max_dun_bytes_supported = 4;
+ profile->max_dun_bytes_supported = 4;
/*
* Cache all the crypto capabilities and advertise the supported crypto
@@ -223,7 +224,7 @@ int cqhci_crypto_init(struct cqhci_host *cq_host)
cq_host->crypto_cap_array[cap_idx]);
if (blk_mode_num == BLK_ENCRYPTION_MODE_INVALID)
continue;
- ksm->crypto_modes_supported[blk_mode_num] |=
+ profile->modes_supported[blk_mode_num] |=
cq_host->crypto_cap_array[cap_idx].sdus_mask * 512;
}
diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c
index 0c75810812a0..c2dd29ef45c6 100644
--- a/drivers/mmc/host/dw_mmc-exynos.c
+++ b/drivers/mmc/host/dw_mmc-exynos.c
@@ -442,14 +442,14 @@ static inline u8 dw_mci_exynos_move_next_clksmpl(struct dw_mci *host)
return sample;
}
-static s8 dw_mci_exynos_get_best_clksmpl(u8 candiates)
+static s8 dw_mci_exynos_get_best_clksmpl(u8 candidates)
{
const u8 iter = 8;
u8 __c;
s8 i, loc = -1;
for (i = 0; i < iter; i++) {
- __c = ror8(candiates, i);
+ __c = ror8(candidates, i);
if ((__c & 0xc7) == 0xc7) {
loc = i;
goto out;
@@ -457,13 +457,25 @@ static s8 dw_mci_exynos_get_best_clksmpl(u8 candiates)
}
for (i = 0; i < iter; i++) {
- __c = ror8(candiates, i);
+ __c = ror8(candidates, i);
if ((__c & 0x83) == 0x83) {
loc = i;
goto out;
}
}
+ /*
+ * If there is no cadiates value, then it needs to return -EIO.
+ * If there are candidates values and don't find bset clk sample value,
+ * then use a first candidates clock sample value.
+ */
+ for (i = 0; i < iter; i++) {
+ __c = ror8(candidates, i);
+ if ((__c & 0x1) == 0x1) {
+ loc = i;
+ goto out;
+ }
+ }
out:
return loc;
}
@@ -473,7 +485,7 @@ static int dw_mci_exynos_execute_tuning(struct dw_mci_slot *slot, u32 opcode)
struct dw_mci *host = slot->host;
struct dw_mci_exynos_priv_data *priv = host->priv;
struct mmc_host *mmc = slot->mmc;
- u8 start_smpl, smpl, candiates = 0;
+ u8 start_smpl, smpl, candidates = 0;
s8 found;
int ret = 0;
@@ -484,16 +496,18 @@ static int dw_mci_exynos_execute_tuning(struct dw_mci_slot *slot, u32 opcode)
smpl = dw_mci_exynos_move_next_clksmpl(host);
if (!mmc_send_tuning(mmc, opcode, NULL))
- candiates |= (1 << smpl);
+ candidates |= (1 << smpl);
} while (start_smpl != smpl);
- found = dw_mci_exynos_get_best_clksmpl(candiates);
+ found = dw_mci_exynos_get_best_clksmpl(candidates);
if (found >= 0) {
dw_mci_exynos_set_clksmpl(host, found);
priv->tuned_sample = found;
} else {
ret = -EIO;
+ dev_warn(&mmc->class_dev,
+ "There is no candidates value about clksmpl!\n");
}
return ret;
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 380f9aa56eb2..d977f34f6b55 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -1611,37 +1611,32 @@ static void dw_mci_hw_reset(struct mmc_host *mmc)
usleep_range(200, 300);
}
-static void dw_mci_init_card(struct mmc_host *mmc, struct mmc_card *card)
+static void dw_mci_prepare_sdio_irq(struct dw_mci_slot *slot, bool prepare)
{
- struct dw_mci_slot *slot = mmc_priv(mmc);
struct dw_mci *host = slot->host;
+ const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
+ u32 clk_en_a_old;
+ u32 clk_en_a;
/*
* Low power mode will stop the card clock when idle. According to the
* description of the CLKENA register we should disable low power mode
* for SDIO cards if we need SDIO interrupts to work.
*/
- if (mmc->caps & MMC_CAP_SDIO_IRQ) {
- const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id;
- u32 clk_en_a_old;
- u32 clk_en_a;
- clk_en_a_old = mci_readl(host, CLKENA);
-
- if (card->type == MMC_TYPE_SDIO ||
- card->type == MMC_TYPE_SD_COMBO) {
- set_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
- clk_en_a = clk_en_a_old & ~clken_low_pwr;
- } else {
- clear_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
- clk_en_a = clk_en_a_old | clken_low_pwr;
- }
+ clk_en_a_old = mci_readl(host, CLKENA);
+ if (prepare) {
+ set_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
+ clk_en_a = clk_en_a_old & ~clken_low_pwr;
+ } else {
+ clear_bit(DW_MMC_CARD_NO_LOW_PWR, &slot->flags);
+ clk_en_a = clk_en_a_old | clken_low_pwr;
+ }
- if (clk_en_a != clk_en_a_old) {
- mci_writel(host, CLKENA, clk_en_a);
- mci_send_cmd(slot, SDMMC_CMD_UPD_CLK |
- SDMMC_CMD_PRV_DAT_WAIT, 0);
- }
+ if (clk_en_a != clk_en_a_old) {
+ mci_writel(host, CLKENA, clk_en_a);
+ mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT,
+ 0);
}
}
@@ -1669,6 +1664,7 @@ static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
struct dw_mci_slot *slot = mmc_priv(mmc);
struct dw_mci *host = slot->host;
+ dw_mci_prepare_sdio_irq(slot, enb);
__dw_mci_enable_sdio_irq(slot, enb);
/* Avoid runtime suspending the device when SDIO IRQ is enabled */
@@ -1790,7 +1786,6 @@ static const struct mmc_host_ops dw_mci_ops = {
.execute_tuning = dw_mci_execute_tuning,
.card_busy = dw_mci_card_busy,
.start_signal_voltage_switch = dw_mci_switch_voltage,
- .init_card = dw_mci_init_card,
.prepare_hs400_tuning = dw_mci_prepare_hs400_tuning,
};
@@ -2086,7 +2081,8 @@ static void dw_mci_tasklet_func(struct tasklet_struct *t)
* delayed. Allowing the transfer to take place
* avoids races and keeps things simple.
*/
- if (err != -ETIMEDOUT) {
+ if (err != -ETIMEDOUT &&
+ host->dir_status == DW_MCI_RECV_STATUS) {
state = STATE_SENDING_DATA;
continue;
}
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 3765e2f4ad98..c9cacd4d5b22 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -1394,6 +1394,10 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
} else if (host->variant->busy_timeout && busy_resp &&
status & MCI_DATATIMEOUT) {
cmd->error = -ETIMEDOUT;
+ /*
+ * This will wake up mmci_irq_thread() which will issue
+ * a hardware reset of the MMCI block.
+ */
host->irq_action = IRQ_WAKE_THREAD;
} else {
cmd->resp[0] = readl(base + MMCIRESPONSE0);
diff --git a/drivers/mmc/host/moxart-mmc.c b/drivers/mmc/host/moxart-mmc.c
index 6c9d38132f74..16d1c7a43d33 100644
--- a/drivers/mmc/host/moxart-mmc.c
+++ b/drivers/mmc/host/moxart-mmc.c
@@ -566,37 +566,37 @@ static int moxart_probe(struct platform_device *pdev)
if (!mmc) {
dev_err(dev, "mmc_alloc_host failed\n");
ret = -ENOMEM;
- goto out;
+ goto out_mmc;
}
ret = of_address_to_resource(node, 0, &res_mmc);
if (ret) {
dev_err(dev, "of_address_to_resource failed\n");
- goto out;
+ goto out_mmc;
}
irq = irq_of_parse_and_map(node, 0);
if (irq <= 0) {
dev_err(dev, "irq_of_parse_and_map failed\n");
ret = -EINVAL;
- goto out;
+ goto out_mmc;
}
clk = devm_clk_get(dev, NULL);
if (IS_ERR(clk)) {
ret = PTR_ERR(clk);
- goto out;
+ goto out_mmc;
}
reg_mmc = devm_ioremap_resource(dev, &res_mmc);
if (IS_ERR(reg_mmc)) {
ret = PTR_ERR(reg_mmc);
- goto out;
+ goto out_mmc;
}
ret = mmc_of_parse(mmc);
if (ret)
- goto out;
+ goto out_mmc;
host = mmc_priv(mmc);
host->mmc = mmc;
@@ -621,6 +621,14 @@ static int moxart_probe(struct platform_device *pdev)
ret = -EPROBE_DEFER;
goto out;
}
+ if (!IS_ERR(host->dma_chan_tx)) {
+ dma_release_channel(host->dma_chan_tx);
+ host->dma_chan_tx = NULL;
+ }
+ if (!IS_ERR(host->dma_chan_rx)) {
+ dma_release_channel(host->dma_chan_rx);
+ host->dma_chan_rx = NULL;
+ }
dev_dbg(dev, "PIO mode transfer enabled\n");
host->have_dma = false;
} else {
@@ -675,6 +683,11 @@ static int moxart_probe(struct platform_device *pdev)
return 0;
out:
+ if (!IS_ERR_OR_NULL(host->dma_chan_tx))
+ dma_release_channel(host->dma_chan_tx);
+ if (!IS_ERR_OR_NULL(host->dma_chan_rx))
+ dma_release_channel(host->dma_chan_rx);
+out_mmc:
if (mmc)
mmc_free_host(mmc);
return ret;
@@ -687,9 +700,9 @@ static int moxart_remove(struct platform_device *pdev)
dev_set_drvdata(&pdev->dev, NULL);
- if (!IS_ERR(host->dma_chan_tx))
+ if (!IS_ERR_OR_NULL(host->dma_chan_tx))
dma_release_channel(host->dma_chan_tx);
- if (!IS_ERR(host->dma_chan_rx))
+ if (!IS_ERR_OR_NULL(host->dma_chan_rx))
dma_release_channel(host->dma_chan_rx);
mmc_remove_host(mmc);
mmc_free_host(mmc);
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 4dfc246c5f95..943940b44e83 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -8,6 +8,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
+#include <linux/iopoll.h>
#include <linux/ioport.h>
#include <linux/irq.h>
#include <linux/of_address.h>
@@ -258,6 +259,7 @@
#define MSDC_PAD_TUNE_RD_SEL (0x1 << 13) /* RW */
#define MSDC_PAD_TUNE_CMD_SEL (0x1 << 21) /* RW */
+#define PAD_DS_TUNE_DLY_SEL (0x1 << 0) /* RW */
#define PAD_DS_TUNE_DLY1 (0x1f << 2) /* RW */
#define PAD_DS_TUNE_DLY2 (0x1f << 7) /* RW */
#define PAD_DS_TUNE_DLY3 (0x1f << 12) /* RW */
@@ -301,6 +303,11 @@
#define PAD_CMD_RD_RXDLY_SEL (0x1 << 11) /* RW */
#define PAD_CMD_TX_DLY (0x1f << 12) /* RW */
+/* EMMC50_PAD_DS_TUNE mask */
+#define PAD_DS_DLY_SEL (0x1 << 16) /* RW */
+#define PAD_DS_DLY1 (0x1f << 10) /* RW */
+#define PAD_DS_DLY3 (0x1f << 0) /* RW */
+
#define REQ_CMD_EIO (0x1 << 0)
#define REQ_CMD_TMO (0x1 << 1)
#define REQ_DAT_ERR (0x1 << 2)
@@ -448,11 +455,13 @@ struct msdc_host {
bool vqmmc_enabled;
u32 latch_ck;
u32 hs400_ds_delay;
+ u32 hs400_ds_dly3;
u32 hs200_cmd_int_delay; /* cmd internal delay for HS200/SDR104 */
u32 hs400_cmd_int_delay; /* cmd internal delay for HS400 */
bool hs400_cmd_resp_sel_rising;
/* cmd response sample selection for HS400 */
bool hs400_mode; /* current eMMC will run at hs400 mode */
+ bool hs400_tuning; /* hs400 mode online tuning */
bool internal_cd; /* Use internal card-detect logic */
bool cqhci; /* support eMMC hw cmdq */
struct msdc_save_para save_para; /* used when gate HCLK */
@@ -961,7 +970,7 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz)
}
static inline u32 msdc_cmd_find_resp(struct msdc_host *host,
- struct mmc_request *mrq, struct mmc_command *cmd)
+ struct mmc_command *cmd)
{
u32 resp;
@@ -997,7 +1006,7 @@ static inline u32 msdc_cmd_prepare_raw_cmd(struct msdc_host *host,
* stop << 14 | rw << 13 | dtype << 11 | rsptyp << 7 | brk << 6 | opcode
*/
u32 opcode = cmd->opcode;
- u32 resp = msdc_cmd_find_resp(host, mrq, cmd);
+ u32 resp = msdc_cmd_find_resp(host, cmd);
u32 rawcmd = (opcode & 0x3f) | ((resp & 0x7) << 7);
host->cmd_rsp = resp;
@@ -1043,8 +1052,8 @@ static inline u32 msdc_cmd_prepare_raw_cmd(struct msdc_host *host,
return rawcmd;
}
-static void msdc_start_data(struct msdc_host *host, struct mmc_request *mrq,
- struct mmc_command *cmd, struct mmc_data *data)
+static void msdc_start_data(struct msdc_host *host, struct mmc_command *cmd,
+ struct mmc_data *data)
{
bool read;
@@ -1112,8 +1121,7 @@ static void msdc_recheck_sdio_irq(struct msdc_host *host)
}
}
-static void msdc_track_cmd_data(struct msdc_host *host,
- struct mmc_command *cmd, struct mmc_data *data)
+static void msdc_track_cmd_data(struct msdc_host *host, struct mmc_command *cmd)
{
if (host->error)
dev_dbg(host->dev, "%s: cmd=%d arg=%08X; host->error=0x%08X\n",
@@ -1134,7 +1142,7 @@ static void msdc_request_done(struct msdc_host *host, struct mmc_request *mrq)
host->mrq = NULL;
spin_unlock_irqrestore(&host->lock, flags);
- msdc_track_cmd_data(host, mrq->cmd, mrq->data);
+ msdc_track_cmd_data(host, mrq->cmd);
if (mrq->data)
msdc_unprepare_data(host, mrq->data);
if (host->error)
@@ -1190,7 +1198,8 @@ static bool msdc_cmd_done(struct msdc_host *host, int events,
if (!sbc_error && !(events & MSDC_INT_CMDRDY)) {
if (events & MSDC_INT_CMDTMO ||
(cmd->opcode != MMC_SEND_TUNING_BLOCK &&
- cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200))
+ cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200 &&
+ !host->hs400_tuning))
/*
* should not clear fifo/interrupt as the tune data
* may have alreay come when cmd19/cmd21 gets response
@@ -1287,7 +1296,8 @@ static void msdc_cmd_next(struct msdc_host *host,
if ((cmd->error &&
!(cmd->error == -EILSEQ &&
(cmd->opcode == MMC_SEND_TUNING_BLOCK ||
- cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200))) ||
+ cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200 ||
+ host->hs400_tuning))) ||
(mrq->sbc && mrq->sbc->error))
msdc_request_done(host, mrq);
else if (cmd == mrq->sbc)
@@ -1295,7 +1305,7 @@ static void msdc_cmd_next(struct msdc_host *host,
else if (!cmd->data)
msdc_request_done(host, mrq);
else
- msdc_start_data(host, mrq, cmd, cmd->data);
+ msdc_start_data(host, cmd, cmd->data);
}
static void msdc_ops_request(struct mmc_host *mmc, struct mmc_request *mrq)
@@ -2251,6 +2261,67 @@ static int msdc_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
return 0;
}
+static int msdc_execute_hs400_tuning(struct mmc_host *mmc, struct mmc_card *card)
+{
+ struct msdc_host *host = mmc_priv(mmc);
+ struct msdc_delay_phase dly1_delay;
+ u32 val, result_dly1 = 0;
+ u8 *ext_csd;
+ int i, ret;
+
+ if (host->top_base) {
+ sdr_set_bits(host->top_base + EMMC50_PAD_DS_TUNE,
+ PAD_DS_DLY_SEL);
+ if (host->hs400_ds_dly3)
+ sdr_set_field(host->top_base + EMMC50_PAD_DS_TUNE,
+ PAD_DS_DLY3, host->hs400_ds_dly3);
+ } else {
+ sdr_set_bits(host->base + PAD_DS_TUNE, PAD_DS_TUNE_DLY_SEL);
+ if (host->hs400_ds_dly3)
+ sdr_set_field(host->base + PAD_DS_TUNE,
+ PAD_DS_TUNE_DLY3, host->hs400_ds_dly3);
+ }
+
+ host->hs400_tuning = true;
+ for (i = 0; i < PAD_DELAY_MAX; i++) {
+ if (host->top_base)
+ sdr_set_field(host->top_base + EMMC50_PAD_DS_TUNE,
+ PAD_DS_DLY1, i);
+ else
+ sdr_set_field(host->base + PAD_DS_TUNE,
+ PAD_DS_TUNE_DLY1, i);
+ ret = mmc_get_ext_csd(card, &ext_csd);
+ if (!ret)
+ result_dly1 |= (1 << i);
+ }
+ host->hs400_tuning = false;
+
+ dly1_delay = get_best_delay(host, result_dly1);
+ if (dly1_delay.maxlen == 0) {
+ dev_err(host->dev, "Failed to get DLY1 delay!\n");
+ goto fail;
+ }
+ if (host->top_base)
+ sdr_set_field(host->top_base + EMMC50_PAD_DS_TUNE,
+ PAD_DS_DLY1, dly1_delay.final_phase);
+ else
+ sdr_set_field(host->base + PAD_DS_TUNE,
+ PAD_DS_TUNE_DLY1, dly1_delay.final_phase);
+
+ if (host->top_base)
+ val = readl(host->top_base + EMMC50_PAD_DS_TUNE);
+ else
+ val = readl(host->base + PAD_DS_TUNE);
+
+ dev_info(host->dev, "Fianl PAD_DS_TUNE: 0x%x\n", val);
+
+ return 0;
+
+fail:
+ dev_err(host->dev, "Failed to tuning DS pin delay!\n");
+ return -EIO;
+}
+
static void msdc_hw_reset(struct mmc_host *mmc)
{
struct msdc_host *host = mmc_priv(mmc);
@@ -2330,6 +2401,7 @@ static void msdc_cqe_enable(struct mmc_host *mmc)
static void msdc_cqe_disable(struct mmc_host *mmc, bool recovery)
{
struct msdc_host *host = mmc_priv(mmc);
+ unsigned int val = 0;
/* disable cmdq irq */
sdr_clr_bits(host->base + MSDC_INTEN, MSDC_INT_CMDQ);
@@ -2339,6 +2411,9 @@ static void msdc_cqe_disable(struct mmc_host *mmc, bool recovery)
if (recovery) {
sdr_set_field(host->base + MSDC_DMA_CTRL,
MSDC_DMA_CTRL_STOP, 1);
+ if (WARN_ON(readl_poll_timeout(host->base + MSDC_DMA_CFG, val,
+ !(val & MSDC_DMA_CFG_STS), 1, 3000)))
+ return;
msdc_reset_hw(host);
}
}
@@ -2377,6 +2452,7 @@ static const struct mmc_host_ops mt_msdc_ops = {
.card_busy = msdc_card_busy,
.execute_tuning = msdc_execute_tuning,
.prepare_hs400_tuning = msdc_prepare_hs400_tuning,
+ .execute_hs400_tuning = msdc_execute_hs400_tuning,
.hw_reset = msdc_hw_reset,
};
@@ -2396,6 +2472,9 @@ static void msdc_of_property_parse(struct platform_device *pdev,
of_property_read_u32(pdev->dev.of_node, "hs400-ds-delay",
&host->hs400_ds_delay);
+ of_property_read_u32(pdev->dev.of_node, "mediatek,hs400-ds-dly3",
+ &host->hs400_ds_dly3);
+
of_property_read_u32(pdev->dev.of_node, "mediatek,hs200-cmd-int-delay",
&host->hs200_cmd_int_delay);
@@ -2577,6 +2656,25 @@ static int msdc_drv_probe(struct platform_device *pdev)
host->dma_mask = DMA_BIT_MASK(32);
mmc_dev(mmc)->dma_mask = &host->dma_mask;
+ host->timeout_clks = 3 * 1048576;
+ host->dma.gpd = dma_alloc_coherent(&pdev->dev,
+ 2 * sizeof(struct mt_gpdma_desc),
+ &host->dma.gpd_addr, GFP_KERNEL);
+ host->dma.bd = dma_alloc_coherent(&pdev->dev,
+ MAX_BD_NUM * sizeof(struct mt_bdma_desc),
+ &host->dma.bd_addr, GFP_KERNEL);
+ if (!host->dma.gpd || !host->dma.bd) {
+ ret = -ENOMEM;
+ goto release_mem;
+ }
+ msdc_init_gpd_bd(host, &host->dma);
+ INIT_DELAYED_WORK(&host->req_timeout, msdc_request_timeout);
+ spin_lock_init(&host->lock);
+
+ platform_set_drvdata(pdev, mmc);
+ msdc_ungate_clock(host);
+ msdc_init_hw(host);
+
if (mmc->caps2 & MMC_CAP2_CQE) {
host->cq_host = devm_kzalloc(mmc->parent,
sizeof(*host->cq_host),
@@ -2597,25 +2695,6 @@ static int msdc_drv_probe(struct platform_device *pdev)
mmc->max_seg_size = 64 * 1024;
}
- host->timeout_clks = 3 * 1048576;
- host->dma.gpd = dma_alloc_coherent(&pdev->dev,
- 2 * sizeof(struct mt_gpdma_desc),
- &host->dma.gpd_addr, GFP_KERNEL);
- host->dma.bd = dma_alloc_coherent(&pdev->dev,
- MAX_BD_NUM * sizeof(struct mt_bdma_desc),
- &host->dma.bd_addr, GFP_KERNEL);
- if (!host->dma.gpd || !host->dma.bd) {
- ret = -ENOMEM;
- goto release_mem;
- }
- msdc_init_gpd_bd(host, &host->dma);
- INIT_DELAYED_WORK(&host->req_timeout, msdc_request_timeout);
- spin_lock_init(&host->lock);
-
- platform_set_drvdata(pdev, mmc);
- msdc_ungate_clock(host);
- msdc_init_hw(host);
-
ret = devm_request_irq(&pdev->dev, host->irq, msdc_irq,
IRQF_TRIGGER_NONE, pdev->name, host);
if (ret)
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index 947581de7860..8c3655d3be96 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -552,6 +552,11 @@ static const struct of_device_id mxs_mmc_dt_ids[] = {
};
MODULE_DEVICE_TABLE(of, mxs_mmc_dt_ids);
+static void mxs_mmc_regulator_disable(void *regulator)
+{
+ regulator_disable(regulator);
+}
+
static int mxs_mmc_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
@@ -591,6 +596,11 @@ static int mxs_mmc_probe(struct platform_device *pdev)
"Failed to enable vmmc regulator: %d\n", ret);
goto out_mmc_free;
}
+
+ ret = devm_add_action_or_reset(&pdev->dev, mxs_mmc_regulator_disable,
+ reg_vmmc);
+ if (ret)
+ goto out_mmc_free;
}
ssp->clk = devm_clk_get(&pdev->dev, NULL);
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 2f8038d69f67..9dafcbf969d9 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -702,11 +702,6 @@ static void omap_hsmmc_context_save(struct omap_hsmmc_host *host)
#else
-static int omap_hsmmc_context_restore(struct omap_hsmmc_host *host)
-{
- return 0;
-}
-
static void omap_hsmmc_context_save(struct omap_hsmmc_host *host)
{
}
@@ -1515,7 +1510,7 @@ static void omap_hsmmc_init_card(struct mmc_host *mmc, struct mmc_card *card)
* REVISIT: should be moved to sdio core and made more
* general e.g. by expanding the DT bindings of child nodes
* to provide a mechanism to provide this information:
- * Documentation/devicetree/bindings/mmc/mmc-card.txt
+ * Documentation/devicetree/bindings/mmc/mmc-card.yaml
*/
np = of_get_compatible_child(np, "ti,wl1251");
@@ -2086,6 +2081,7 @@ static int omap_hsmmc_resume(struct device *dev)
}
#endif
+#ifdef CONFIG_PM
static int omap_hsmmc_runtime_suspend(struct device *dev)
{
struct omap_hsmmc_host *host;
@@ -2153,11 +2149,11 @@ static int omap_hsmmc_runtime_resume(struct device *dev)
spin_unlock_irqrestore(&host->irq_lock, flags);
return 0;
}
+#endif
static const struct dev_pm_ops omap_hsmmc_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(omap_hsmmc_suspend, omap_hsmmc_resume)
- .runtime_suspend = omap_hsmmc_runtime_suspend,
- .runtime_resume = omap_hsmmc_runtime_resume,
+ SET_RUNTIME_PM_OPS(omap_hsmmc_runtime_suspend, omap_hsmmc_runtime_resume, NULL)
};
static struct platform_driver omap_hsmmc_driver = {
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index 8fe65f172a61..f1ef0d28b0dd 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -362,23 +362,11 @@ static inline bool sdhci_acpi_no_fixup_child_power(struct acpi_device *adev)
static int bxt_get_cd(struct mmc_host *mmc)
{
int gpio_cd = mmc_gpio_get_cd(mmc);
- struct sdhci_host *host = mmc_priv(mmc);
- unsigned long flags;
- int ret = 0;
if (!gpio_cd)
return 0;
- spin_lock_irqsave(&host->lock, flags);
-
- if (host->flags & SDHCI_DEVICE_DEAD)
- goto out;
-
- ret = !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
-out:
- spin_unlock_irqrestore(&host->lock, flags);
-
- return ret;
+ return sdhci_get_cd_nogpio(mmc);
}
static int intel_probe_slot(struct platform_device *pdev, struct acpi_device *adev)
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index f18d169bc8ff..afaf33707d46 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -196,6 +196,9 @@
*/
#define ESDHC_FLAG_BROKEN_AUTO_CMD23 BIT(16)
+/* ERR004536 is not applicable for the IP */
+#define ESDHC_FLAG_SKIP_ERR004536 BIT(17)
+
enum wp_types {
ESDHC_WP_NONE, /* no WP, neither controller nor gpio */
ESDHC_WP_CONTROLLER, /* mmc controller internal WP */
@@ -289,6 +292,13 @@ static const struct esdhc_soc_data usdhc_imx7d_data = {
| ESDHC_FLAG_BROKEN_AUTO_CMD23,
};
+static struct esdhc_soc_data usdhc_s32g2_data = {
+ .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_MAN_TUNING
+ | ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
+ | ESDHC_FLAG_HS400 | ESDHC_FLAG_HS400_ES
+ | ESDHC_FLAG_SKIP_ERR004536,
+};
+
static struct esdhc_soc_data usdhc_imx7ulp_data = {
.flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
| ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
@@ -347,6 +357,7 @@ static const struct of_device_id imx_esdhc_dt_ids[] = {
{ .compatible = "fsl,imx7ulp-usdhc", .data = &usdhc_imx7ulp_data, },
{ .compatible = "fsl,imx8qxp-usdhc", .data = &usdhc_imx8qxp_data, },
{ .compatible = "fsl,imx8mm-usdhc", .data = &usdhc_imx8mm_data, },
+ { .compatible = "nxp,s32g2-usdhc", .data = &usdhc_s32g2_data, },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, imx_esdhc_dt_ids);
@@ -1187,6 +1198,7 @@ static void esdhc_reset_tuning(struct sdhci_host *host)
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
u32 ctrl;
+ int ret;
/* Reset the tuning circuit */
if (esdhc_is_usdhc(imx_data)) {
@@ -1199,7 +1211,22 @@ static void esdhc_reset_tuning(struct sdhci_host *host)
} else if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) {
ctrl = readl(host->ioaddr + SDHCI_AUTO_CMD_STATUS);
ctrl &= ~ESDHC_MIX_CTRL_SMPCLK_SEL;
+ ctrl &= ~ESDHC_MIX_CTRL_EXE_TUNE;
writel(ctrl, host->ioaddr + SDHCI_AUTO_CMD_STATUS);
+ /* Make sure ESDHC_MIX_CTRL_EXE_TUNE cleared */
+ ret = readl_poll_timeout(host->ioaddr + SDHCI_AUTO_CMD_STATUS,
+ ctrl, !(ctrl & ESDHC_MIX_CTRL_EXE_TUNE), 1, 50);
+ if (ret == -ETIMEDOUT)
+ dev_warn(mmc_dev(host->mmc),
+ "Warning! clear execute tuning bit failed\n");
+ /*
+ * SDHCI_INT_DATA_AVAIL is W1C bit, set this bit will clear the
+ * usdhc IP internal logic flag execute_tuning_with_clr_buf, which
+ * will finally make sure the normal data transfer logic correct.
+ */
+ ctrl = readl(host->ioaddr + SDHCI_INT_STATUS);
+ ctrl |= SDHCI_INT_DATA_AVAIL;
+ writel(ctrl, host->ioaddr + SDHCI_INT_STATUS);
}
}
}
@@ -1359,8 +1386,10 @@ static void sdhci_esdhc_imx_hwinit(struct sdhci_host *host)
* erratum ESDHC_FLAG_ERR004536 fix for MX6Q TO1.2 and MX6DL
* TO1.1, it's harmless for MX6SL
*/
- writel(readl(host->ioaddr + 0x6c) & ~BIT(7),
- host->ioaddr + 0x6c);
+ if (!(imx_data->socdata->flags & ESDHC_FLAG_SKIP_ERR004536)) {
+ writel(readl(host->ioaddr + 0x6c) & ~BIT(7),
+ host->ioaddr + 0x6c);
+ }
/* disable DLL_CTRL delay line settings */
writel(0x0, host->ioaddr + ESDHC_DLL_CTRL);
diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c
index 737e2bfdedc2..6a2e5a468424 100644
--- a/drivers/mmc/host/sdhci-of-arasan.c
+++ b/drivers/mmc/host/sdhci-of-arasan.c
@@ -191,6 +191,13 @@ static const struct sdhci_arasan_soc_ctl_map intel_lgm_sdxc_soc_ctl_map = {
.hiword_update = false,
};
+static const struct sdhci_arasan_soc_ctl_map thunderbay_soc_ctl_map = {
+ .baseclkfreq = { .reg = 0x0, .width = 8, .shift = 14 },
+ .clockmultiplier = { .reg = 0x4, .width = 8, .shift = 14 },
+ .support64b = { .reg = 0x4, .width = 1, .shift = 24 },
+ .hiword_update = false,
+};
+
static const struct sdhci_arasan_soc_ctl_map intel_keembay_soc_ctl_map = {
.baseclkfreq = { .reg = 0x0, .width = 8, .shift = 14 },
.clockmultiplier = { .reg = 0x4, .width = 8, .shift = 14 },
@@ -456,6 +463,15 @@ static const struct sdhci_pltfm_data sdhci_arasan_cqe_pdata = {
SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN,
};
+static const struct sdhci_pltfm_data sdhci_arasan_thunderbay_pdata = {
+ .ops = &sdhci_arasan_cqe_ops,
+ .quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
+ .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN |
+ SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN |
+ SDHCI_QUIRK2_STOP_WITH_TC |
+ SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400,
+};
+
#ifdef CONFIG_PM_SLEEP
/**
* sdhci_arasan_suspend - Suspend method for the driver
@@ -1132,6 +1148,12 @@ static struct sdhci_arasan_of_data sdhci_arasan_generic_data = {
.clk_ops = &arasan_clk_ops,
};
+static const struct sdhci_arasan_of_data sdhci_arasan_thunderbay_data = {
+ .soc_ctl_map = &thunderbay_soc_ctl_map,
+ .pdata = &sdhci_arasan_thunderbay_pdata,
+ .clk_ops = &arasan_clk_ops,
+};
+
static const struct sdhci_pltfm_data sdhci_keembay_emmc_pdata = {
.ops = &sdhci_arasan_cqe_ops,
.quirks = SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN |
@@ -1265,6 +1287,10 @@ static const struct of_device_id sdhci_arasan_of_match[] = {
.compatible = "intel,keembay-sdhci-5.1-sdio",
.data = &intel_keembay_sdio_data,
},
+ {
+ .compatible = "intel,thunderbay-sdhci-5.1",
+ .data = &sdhci_arasan_thunderbay_data,
+ },
/* Generic compatible below here */
{
.compatible = "arasan,sdhci-8.9a",
@@ -1626,7 +1652,8 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
if (of_device_is_compatible(np, "intel,keembay-sdhci-5.1-emmc") ||
of_device_is_compatible(np, "intel,keembay-sdhci-5.1-sd") ||
- of_device_is_compatible(np, "intel,keembay-sdhci-5.1-sdio")) {
+ of_device_is_compatible(np, "intel,keembay-sdhci-5.1-sdio") ||
+ of_device_is_compatible(np, "intel,thunderbay-sdhci-5.1")) {
sdhci_arasan_update_clockmultiplier(host, 0x0);
sdhci_arasan_update_support64b(host, 0x0);
diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c
index 8f4d1f003f65..64e27c2821f9 100644
--- a/drivers/mmc/host/sdhci-omap.c
+++ b/drivers/mmc/host/sdhci-omap.c
@@ -12,8 +12,10 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
+#include <linux/of_irq.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/pm_wakeirq.h>
#include <linux/regulator/consumer.h>
#include <linux/pinctrl/consumer.h>
#include <linux/sys_soc.h>
@@ -21,7 +23,14 @@
#include "sdhci-pltfm.h"
-#define SDHCI_OMAP_CON 0x12c
+/*
+ * Note that the register offsets used here are from omap_regs
+ * base which is 0x100 for omap4 and later, and 0 for omap3 and
+ * earlier.
+ */
+#define SDHCI_OMAP_SYSCONFIG 0x10
+
+#define SDHCI_OMAP_CON 0x2c
#define CON_DW8 BIT(5)
#define CON_DMA_MASTER BIT(20)
#define CON_DDR BIT(19)
@@ -31,20 +40,20 @@
#define CON_INIT BIT(1)
#define CON_OD BIT(0)
-#define SDHCI_OMAP_DLL 0x0134
+#define SDHCI_OMAP_DLL 0x34
#define DLL_SWT BIT(20)
#define DLL_FORCE_SR_C_SHIFT 13
#define DLL_FORCE_SR_C_MASK (0x7f << DLL_FORCE_SR_C_SHIFT)
#define DLL_FORCE_VALUE BIT(12)
#define DLL_CALIB BIT(1)
-#define SDHCI_OMAP_CMD 0x20c
+#define SDHCI_OMAP_CMD 0x10c
-#define SDHCI_OMAP_PSTATE 0x0224
+#define SDHCI_OMAP_PSTATE 0x124
#define PSTATE_DLEV_DAT0 BIT(20)
#define PSTATE_DATI BIT(1)
-#define SDHCI_OMAP_HCTL 0x228
+#define SDHCI_OMAP_HCTL 0x128
#define HCTL_SDBP BIT(8)
#define HCTL_SDVS_SHIFT 9
#define HCTL_SDVS_MASK (0x7 << HCTL_SDVS_SHIFT)
@@ -52,26 +61,28 @@
#define HCTL_SDVS_30 (0x6 << HCTL_SDVS_SHIFT)
#define HCTL_SDVS_18 (0x5 << HCTL_SDVS_SHIFT)
-#define SDHCI_OMAP_SYSCTL 0x22c
+#define SDHCI_OMAP_SYSCTL 0x12c
#define SYSCTL_CEN BIT(2)
#define SYSCTL_CLKD_SHIFT 6
#define SYSCTL_CLKD_MASK 0x3ff
-#define SDHCI_OMAP_STAT 0x230
+#define SDHCI_OMAP_STAT 0x130
-#define SDHCI_OMAP_IE 0x234
+#define SDHCI_OMAP_IE 0x134
#define INT_CC_EN BIT(0)
-#define SDHCI_OMAP_AC12 0x23c
+#define SDHCI_OMAP_ISE 0x138
+
+#define SDHCI_OMAP_AC12 0x13c
#define AC12_V1V8_SIGEN BIT(19)
#define AC12_SCLK_SEL BIT(23)
-#define SDHCI_OMAP_CAPA 0x240
+#define SDHCI_OMAP_CAPA 0x140
#define CAPA_VS33 BIT(24)
#define CAPA_VS30 BIT(25)
#define CAPA_VS18 BIT(26)
-#define SDHCI_OMAP_CAPA2 0x0244
+#define SDHCI_OMAP_CAPA2 0x144
#define CAPA2_TSDR50 BIT(13)
#define SDHCI_OMAP_TIMEOUT 1 /* 1 msec */
@@ -89,7 +100,8 @@
#define SDHCI_OMAP_SPECIAL_RESET BIT(1)
struct sdhci_omap_data {
- u32 offset;
+ int omap_offset; /* Offset for omap regs from base */
+ u32 offset; /* Offset for SDHCI regs from base */
u8 flags;
};
@@ -107,12 +119,19 @@ struct sdhci_omap_host {
struct pinctrl *pinctrl;
struct pinctrl_state **pinctrl_state;
+ int wakeirq;
bool is_tuning;
+
+ /* Offset for omap specific registers from base */
+ int omap_offset;
+
/* Omap specific context save */
u32 con;
u32 hctl;
u32 sysctl;
u32 capa;
+ u32 ie;
+ u32 ise;
};
static void sdhci_omap_start_clock(struct sdhci_omap_host *omap_host);
@@ -121,13 +140,13 @@ static void sdhci_omap_stop_clock(struct sdhci_omap_host *omap_host);
static inline u32 sdhci_omap_readl(struct sdhci_omap_host *host,
unsigned int offset)
{
- return readl(host->base + offset);
+ return readl(host->base + host->omap_offset + offset);
}
static inline void sdhci_omap_writel(struct sdhci_omap_host *host,
unsigned int offset, u32 data)
{
- writel(data, host->base + offset);
+ writel(data, host->base + host->omap_offset + offset);
}
static int sdhci_omap_set_pbias(struct sdhci_omap_host *omap_host,
@@ -172,7 +191,7 @@ static int sdhci_omap_set_pbias(struct sdhci_omap_host *omap_host,
}
static int sdhci_omap_enable_iov(struct sdhci_omap_host *omap_host,
- unsigned int iov)
+ unsigned int iov_pbias)
{
int ret;
struct sdhci_host *host = omap_host->host;
@@ -183,14 +202,15 @@ static int sdhci_omap_enable_iov(struct sdhci_omap_host *omap_host,
return ret;
if (!IS_ERR(mmc->supply.vqmmc)) {
- ret = regulator_set_voltage(mmc->supply.vqmmc, iov, iov);
- if (ret) {
+ /* Pick the right voltage to allow 3.0V for 3.3V nominal PBIAS */
+ ret = mmc_regulator_set_vqmmc(mmc, &mmc->ios);
+ if (ret < 0) {
dev_err(mmc_dev(mmc), "vqmmc set voltage failed\n");
return ret;
}
}
- ret = sdhci_omap_set_pbias(omap_host, true, iov);
+ ret = sdhci_omap_set_pbias(omap_host, true, iov_pbias);
if (ret)
return ret;
@@ -200,16 +220,28 @@ static int sdhci_omap_enable_iov(struct sdhci_omap_host *omap_host,
static void sdhci_omap_conf_bus_power(struct sdhci_omap_host *omap_host,
unsigned char signal_voltage)
{
- u32 reg;
+ u32 reg, capa;
ktime_t timeout;
reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_HCTL);
reg &= ~HCTL_SDVS_MASK;
- if (signal_voltage == MMC_SIGNAL_VOLTAGE_330)
- reg |= HCTL_SDVS_33;
- else
+ switch (signal_voltage) {
+ case MMC_SIGNAL_VOLTAGE_330:
+ capa = sdhci_omap_readl(omap_host, SDHCI_OMAP_CAPA);
+ if (capa & CAPA_VS33)
+ reg |= HCTL_SDVS_33;
+ else if (capa & CAPA_VS30)
+ reg |= HCTL_SDVS_30;
+ else
+ dev_warn(omap_host->dev, "misconfigured CAPA: %08x\n",
+ capa);
+ break;
+ case MMC_SIGNAL_VOLTAGE_180:
+ default:
reg |= HCTL_SDVS_18;
+ break;
+ }
sdhci_omap_writel(omap_host, SDHCI_OMAP_HCTL, reg);
@@ -527,28 +559,32 @@ static int sdhci_omap_start_signal_voltage_switch(struct mmc_host *mmc,
if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) {
reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CAPA);
- if (!(reg & CAPA_VS33))
+ if (!(reg & (CAPA_VS30 | CAPA_VS33)))
return -EOPNOTSUPP;
+ if (reg & CAPA_VS30)
+ iov = IOV_3V0;
+ else
+ iov = IOV_3V3;
+
sdhci_omap_conf_bus_power(omap_host, ios->signal_voltage);
reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_AC12);
reg &= ~AC12_V1V8_SIGEN;
sdhci_omap_writel(omap_host, SDHCI_OMAP_AC12, reg);
- iov = IOV_3V3;
} else if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) {
reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CAPA);
if (!(reg & CAPA_VS18))
return -EOPNOTSUPP;
+ iov = IOV_1V8;
+
sdhci_omap_conf_bus_power(omap_host, ios->signal_voltage);
reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_AC12);
reg |= AC12_V1V8_SIGEN;
sdhci_omap_writel(omap_host, SDHCI_OMAP_AC12, reg);
-
- iov = IOV_1V8;
} else {
return -EOPNOTSUPP;
}
@@ -682,7 +718,24 @@ static void sdhci_omap_set_power(struct sdhci_host *host, unsigned char mode,
{
struct mmc_host *mmc = host->mmc;
- mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
+ if (!IS_ERR(mmc->supply.vmmc))
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
+}
+
+/*
+ * MMCHS_HL_HWINFO has the MADMA_EN bit set if the controller instance
+ * is connected to L3 interconnect and is bus master capable. Note that
+ * the MMCHS_HL_HWINFO register is in the module registers before the
+ * omap registers and sdhci registers. The offset can vary for omap
+ * registers depending on the SoC. Do not use sdhci_omap_readl() here.
+ */
+static bool sdhci_omap_has_adma(struct sdhci_omap_host *omap_host, int offset)
+{
+ /* MMCHS_HL_HWINFO register is only available on omap4 and later */
+ if (offset < 0x200)
+ return false;
+
+ return readl(omap_host->base + 4) & 1;
}
static int sdhci_omap_enable_dma(struct sdhci_host *host)
@@ -792,6 +845,11 @@ static void sdhci_omap_reset(struct sdhci_host *host, u8 mask)
struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
unsigned long limit = MMC_TIMEOUT_US;
unsigned long i = 0;
+ u32 sysc;
+
+ /* Save target module sysconfig configured by SoC PM layer */
+ if (mask & SDHCI_RESET_ALL)
+ sysc = sdhci_omap_readl(omap_host, SDHCI_OMAP_SYSCONFIG);
/* Don't reset data lines during tuning operation */
if (omap_host->is_tuning)
@@ -811,10 +869,15 @@ static void sdhci_omap_reset(struct sdhci_host *host, u8 mask)
dev_err(mmc_dev(host->mmc),
"Timeout waiting on controller reset in %s\n",
__func__);
- return;
+
+ goto restore_sysc;
}
sdhci_reset(host, mask);
+
+restore_sysc:
+ if (mask & SDHCI_RESET_ALL)
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_SYSCONFIG, sysc);
}
#define CMD_ERR_MASK (SDHCI_INT_CRC | SDHCI_INT_END_BIT | SDHCI_INT_INDEX |\
@@ -877,34 +940,73 @@ static struct sdhci_ops sdhci_omap_ops = {
.set_timeout = sdhci_omap_set_timeout,
};
-static int sdhci_omap_set_capabilities(struct sdhci_omap_host *omap_host)
+static unsigned int sdhci_omap_regulator_get_caps(struct device *dev,
+ const char *name)
{
- u32 reg;
- int ret = 0;
+ struct regulator *reg;
+ unsigned int caps = 0;
+
+ reg = regulator_get(dev, name);
+ if (IS_ERR(reg))
+ return ~0U;
+
+ if (regulator_is_supported_voltage(reg, 1700000, 1950000))
+ caps |= SDHCI_CAN_VDD_180;
+ if (regulator_is_supported_voltage(reg, 2700000, 3150000))
+ caps |= SDHCI_CAN_VDD_300;
+ if (regulator_is_supported_voltage(reg, 3150000, 3600000))
+ caps |= SDHCI_CAN_VDD_330;
+
+ regulator_put(reg);
+
+ return caps;
+}
+
+static int sdhci_omap_set_capabilities(struct sdhci_host *host)
+{
+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+ struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
struct device *dev = omap_host->dev;
- struct regulator *vqmmc;
+ const u32 mask = SDHCI_CAN_VDD_180 | SDHCI_CAN_VDD_300 | SDHCI_CAN_VDD_330;
+ unsigned int pbias, vqmmc, caps = 0;
+ u32 reg;
- vqmmc = regulator_get(dev, "vqmmc");
- if (IS_ERR(vqmmc)) {
- ret = PTR_ERR(vqmmc);
- goto reg_put;
- }
+ pbias = sdhci_omap_regulator_get_caps(dev, "pbias");
+ vqmmc = sdhci_omap_regulator_get_caps(dev, "vqmmc");
+ caps = pbias & vqmmc;
+
+ if (pbias != ~0U && vqmmc == ~0U)
+ dev_warn(dev, "vqmmc regulator missing for pbias\n");
+ else if (caps == ~0U)
+ return 0;
+
+ /*
+ * Quirk handling to allow 3.0V vqmmc with a valid 3.3V PBIAS. This is
+ * needed for 3.0V ldo9_reg on omap5 at least.
+ */
+ if (pbias != ~0U && (pbias & SDHCI_CAN_VDD_330) &&
+ (vqmmc & SDHCI_CAN_VDD_300))
+ caps |= SDHCI_CAN_VDD_330;
/* voltage capabilities might be set by boot loader, clear it */
reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CAPA);
reg &= ~(CAPA_VS18 | CAPA_VS30 | CAPA_VS33);
- if (regulator_is_supported_voltage(vqmmc, IOV_3V3, IOV_3V3))
- reg |= CAPA_VS33;
- if (regulator_is_supported_voltage(vqmmc, IOV_1V8, IOV_1V8))
+ if (caps & SDHCI_CAN_VDD_180)
reg |= CAPA_VS18;
+ if (caps & SDHCI_CAN_VDD_300)
+ reg |= CAPA_VS30;
+
+ if (caps & SDHCI_CAN_VDD_330)
+ reg |= CAPA_VS33;
+
sdhci_omap_writel(omap_host, SDHCI_OMAP_CAPA, reg);
-reg_put:
- regulator_put(vqmmc);
+ host->caps &= ~mask;
+ host->caps |= caps;
- return ret;
+ return 0;
}
static const struct sdhci_pltfm_data sdhci_omap_pdata = {
@@ -920,26 +1022,56 @@ static const struct sdhci_pltfm_data sdhci_omap_pdata = {
.ops = &sdhci_omap_ops,
};
+static const struct sdhci_omap_data omap2430_data = {
+ .omap_offset = 0,
+ .offset = 0x100,
+};
+
+static const struct sdhci_omap_data omap3_data = {
+ .omap_offset = 0,
+ .offset = 0x100,
+};
+
+static const struct sdhci_omap_data omap4_data = {
+ .omap_offset = 0x100,
+ .offset = 0x200,
+ .flags = SDHCI_OMAP_SPECIAL_RESET,
+};
+
+static const struct sdhci_omap_data omap5_data = {
+ .omap_offset = 0x100,
+ .offset = 0x200,
+ .flags = SDHCI_OMAP_SPECIAL_RESET,
+};
+
static const struct sdhci_omap_data k2g_data = {
+ .omap_offset = 0x100,
.offset = 0x200,
};
static const struct sdhci_omap_data am335_data = {
+ .omap_offset = 0x100,
.offset = 0x200,
.flags = SDHCI_OMAP_SPECIAL_RESET,
};
static const struct sdhci_omap_data am437_data = {
+ .omap_offset = 0x100,
.offset = 0x200,
.flags = SDHCI_OMAP_SPECIAL_RESET,
};
static const struct sdhci_omap_data dra7_data = {
+ .omap_offset = 0x100,
.offset = 0x200,
.flags = SDHCI_OMAP_REQUIRE_IODELAY,
};
static const struct of_device_id omap_sdhci_match[] = {
+ { .compatible = "ti,omap2430-sdhci", .data = &omap2430_data },
+ { .compatible = "ti,omap3-sdhci", .data = &omap3_data },
+ { .compatible = "ti,omap4-sdhci", .data = &omap4_data },
+ { .compatible = "ti,omap5-sdhci", .data = &omap5_data },
{ .compatible = "ti,dra7-sdhci", .data = &dra7_data },
{ .compatible = "ti,k2g-sdhci", .data = &k2g_data },
{ .compatible = "ti,am335-sdhci", .data = &am335_data },
@@ -1122,6 +1254,8 @@ static int sdhci_omap_probe(struct platform_device *pdev)
omap_host->power_mode = MMC_POWER_UNDEFINED;
omap_host->timing = MMC_TIMING_LEGACY;
omap_host->flags = data->flags;
+ omap_host->omap_offset = data->omap_offset;
+ omap_host->con = -EINVAL; /* Prevent invalid restore on first resume */
host->ioaddr += offset;
host->mapbase = regs->start + offset;
@@ -1172,6 +1306,8 @@ static int sdhci_omap_probe(struct platform_device *pdev)
* SYSCONFIG register of omap devices. The callback will be invoked
* as part of pm_runtime_get_sync.
*/
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_set_autosuspend_delay(dev, 50);
pm_runtime_enable(dev);
ret = pm_runtime_resume_and_get(dev);
if (ret) {
@@ -1179,10 +1315,10 @@ static int sdhci_omap_probe(struct platform_device *pdev)
goto err_rpm_disable;
}
- ret = sdhci_omap_set_capabilities(omap_host);
+ ret = sdhci_omap_set_capabilities(host);
if (ret) {
dev_err(dev, "failed to set system capabilities\n");
- goto err_put_sync;
+ goto err_rpm_put;
}
host->mmc_host_ops.start_signal_voltage_switch =
@@ -1192,16 +1328,28 @@ static int sdhci_omap_probe(struct platform_device *pdev)
host->mmc_host_ops.execute_tuning = sdhci_omap_execute_tuning;
host->mmc_host_ops.enable_sdio_irq = sdhci_omap_enable_sdio_irq;
- /* Switch to external DMA only if there is the "dmas" property */
- if (of_find_property(dev->of_node, "dmas", NULL))
+ /*
+ * Switch to external DMA only if there is the "dmas" property and
+ * ADMA is not available on the controller instance.
+ */
+ if (device_property_present(dev, "dmas") &&
+ !sdhci_omap_has_adma(omap_host, offset))
sdhci_switch_external_dma(host, true);
+ if (device_property_read_bool(dev, "ti,non-removable")) {
+ dev_warn_once(dev, "using old ti,non-removable property\n");
+ mmc->caps |= MMC_CAP_NONREMOVABLE;
+ }
+
/* R1B responses is required to properly manage HW busy detection. */
mmc->caps |= MMC_CAP_NEED_RSP_BUSY;
+ /* Allow card power off and runtime PM for eMMC/SD card devices */
+ mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_AGGRESSIVE_PM;
+
ret = sdhci_setup_host(host);
if (ret)
- goto err_put_sync;
+ goto err_rpm_put;
ret = sdhci_omap_config_iodelay_pinctrl_state(omap_host);
if (ret)
@@ -1211,15 +1359,38 @@ static int sdhci_omap_probe(struct platform_device *pdev)
if (ret)
goto err_cleanup_host;
+ /*
+ * SDIO devices can use the dat1 pin as a wake-up interrupt. Some
+ * devices like wl1xxx, use an out-of-band GPIO interrupt instead.
+ */
+ omap_host->wakeirq = of_irq_get_byname(dev->of_node, "wakeup");
+ if (omap_host->wakeirq == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto err_cleanup_host;
+ }
+ if (omap_host->wakeirq > 0) {
+ device_init_wakeup(dev, true);
+ ret = dev_pm_set_dedicated_wake_irq(dev, omap_host->wakeirq);
+ if (ret) {
+ device_init_wakeup(dev, false);
+ goto err_cleanup_host;
+ }
+ host->mmc->pm_caps |= MMC_PM_KEEP_POWER | MMC_PM_WAKE_SDIO_IRQ;
+ }
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+
return 0;
err_cleanup_host:
sdhci_cleanup_host(host);
-err_put_sync:
- pm_runtime_put_sync(dev);
-
+err_rpm_put:
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
err_rpm_disable:
+ pm_runtime_dont_use_autosuspend(dev);
pm_runtime_disable(dev);
err_pltfm_free:
@@ -1232,64 +1403,81 @@ static int sdhci_omap_remove(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct sdhci_host *host = platform_get_drvdata(pdev);
+ pm_runtime_get_sync(dev);
sdhci_remove_host(host, true);
+ device_init_wakeup(dev, false);
+ dev_pm_clear_wake_irq(dev);
+ pm_runtime_dont_use_autosuspend(dev);
pm_runtime_put_sync(dev);
- pm_runtime_disable(dev);
+ /* Ensure device gets disabled despite userspace sysfs config */
+ pm_runtime_force_suspend(dev);
sdhci_pltfm_free(pdev);
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static void sdhci_omap_context_save(struct sdhci_omap_host *omap_host)
+
+#ifdef CONFIG_PM
+static void __maybe_unused sdhci_omap_context_save(struct sdhci_omap_host *omap_host)
{
omap_host->con = sdhci_omap_readl(omap_host, SDHCI_OMAP_CON);
omap_host->hctl = sdhci_omap_readl(omap_host, SDHCI_OMAP_HCTL);
+ omap_host->sysctl = sdhci_omap_readl(omap_host, SDHCI_OMAP_SYSCTL);
omap_host->capa = sdhci_omap_readl(omap_host, SDHCI_OMAP_CAPA);
+ omap_host->ie = sdhci_omap_readl(omap_host, SDHCI_OMAP_IE);
+ omap_host->ise = sdhci_omap_readl(omap_host, SDHCI_OMAP_ISE);
}
-static void sdhci_omap_context_restore(struct sdhci_omap_host *omap_host)
+/* Order matters here, HCTL must be restored in two phases */
+static void __maybe_unused sdhci_omap_context_restore(struct sdhci_omap_host *omap_host)
{
- sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, omap_host->con);
sdhci_omap_writel(omap_host, SDHCI_OMAP_HCTL, omap_host->hctl);
sdhci_omap_writel(omap_host, SDHCI_OMAP_CAPA, omap_host->capa);
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_HCTL, omap_host->hctl);
+
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_SYSCTL, omap_host->sysctl);
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, omap_host->con);
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_IE, omap_host->ie);
+ sdhci_omap_writel(omap_host, SDHCI_OMAP_ISE, omap_host->ise);
}
-static int __maybe_unused sdhci_omap_suspend(struct device *dev)
+static int __maybe_unused sdhci_omap_runtime_suspend(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
- sdhci_suspend_host(host);
+ sdhci_runtime_suspend_host(host);
sdhci_omap_context_save(omap_host);
pinctrl_pm_select_idle_state(dev);
- pm_runtime_force_suspend(dev);
-
return 0;
}
-static int __maybe_unused sdhci_omap_resume(struct device *dev)
+static int __maybe_unused sdhci_omap_runtime_resume(struct device *dev)
{
struct sdhci_host *host = dev_get_drvdata(dev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
- pm_runtime_force_resume(dev);
-
pinctrl_pm_select_default_state(dev);
- sdhci_omap_context_restore(omap_host);
+ if (omap_host->con != -EINVAL)
+ sdhci_omap_context_restore(omap_host);
- sdhci_resume_host(host);
+ sdhci_runtime_resume_host(host, 0);
return 0;
}
#endif
-static SIMPLE_DEV_PM_OPS(sdhci_omap_dev_pm_ops, sdhci_omap_suspend,
- sdhci_omap_resume);
+
+static const struct dev_pm_ops sdhci_omap_dev_pm_ops = {
+ SET_RUNTIME_PM_OPS(sdhci_omap_runtime_suspend,
+ sdhci_omap_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+};
static struct platform_driver sdhci_omap_driver = {
.probe = sdhci_omap_probe,
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index be19785227fe..6f9877546830 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -17,8 +17,6 @@
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/device.h>
-#include <linux/mmc/host.h>
-#include <linux/mmc/mmc.h>
#include <linux/scatterlist.h>
#include <linux/io.h>
#include <linux/iopoll.h>
@@ -26,11 +24,13 @@
#include <linux/pm_runtime.h>
#include <linux/pm_qos.h>
#include <linux/debugfs.h>
-#include <linux/mmc/slot-gpio.h>
-#include <linux/mmc/sdhci-pci-data.h>
#include <linux/acpi.h>
#include <linux/dmi.h>
+#include <linux/mmc/host.h>
+#include <linux/mmc/mmc.h>
+#include <linux/mmc/slot-gpio.h>
+
#ifdef CONFIG_X86
#include <asm/iosf_mbi.h>
#endif
@@ -345,73 +345,6 @@ static int pch_hc_probe_slot(struct sdhci_pci_slot *slot)
return 0;
}
-#ifdef CONFIG_PM
-
-static irqreturn_t sdhci_pci_sd_cd(int irq, void *dev_id)
-{
- struct sdhci_pci_slot *slot = dev_id;
- struct sdhci_host *host = slot->host;
-
- mmc_detect_change(host->mmc, msecs_to_jiffies(200));
- return IRQ_HANDLED;
-}
-
-static void sdhci_pci_add_own_cd(struct sdhci_pci_slot *slot)
-{
- int err, irq, gpio = slot->cd_gpio;
-
- slot->cd_gpio = -EINVAL;
- slot->cd_irq = -EINVAL;
-
- if (!gpio_is_valid(gpio))
- return;
-
- err = devm_gpio_request(&slot->chip->pdev->dev, gpio, "sd_cd");
- if (err < 0)
- goto out;
-
- err = gpio_direction_input(gpio);
- if (err < 0)
- goto out_free;
-
- irq = gpio_to_irq(gpio);
- if (irq < 0)
- goto out_free;
-
- err = request_irq(irq, sdhci_pci_sd_cd, IRQF_TRIGGER_RISING |
- IRQF_TRIGGER_FALLING, "sd_cd", slot);
- if (err)
- goto out_free;
-
- slot->cd_gpio = gpio;
- slot->cd_irq = irq;
-
- return;
-
-out_free:
- devm_gpio_free(&slot->chip->pdev->dev, gpio);
-out:
- dev_warn(&slot->chip->pdev->dev, "failed to setup card detect wake up\n");
-}
-
-static void sdhci_pci_remove_own_cd(struct sdhci_pci_slot *slot)
-{
- if (slot->cd_irq >= 0)
- free_irq(slot->cd_irq, slot);
-}
-
-#else
-
-static inline void sdhci_pci_add_own_cd(struct sdhci_pci_slot *slot)
-{
-}
-
-static inline void sdhci_pci_remove_own_cd(struct sdhci_pci_slot *slot)
-{
-}
-
-#endif
-
static int mfd_emmc_probe_slot(struct sdhci_pci_slot *slot)
{
slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE;
@@ -619,23 +552,16 @@ static int intel_select_drive_strength(struct mmc_card *card,
static int bxt_get_cd(struct mmc_host *mmc)
{
int gpio_cd = mmc_gpio_get_cd(mmc);
- struct sdhci_host *host = mmc_priv(mmc);
- unsigned long flags;
- int ret = 0;
if (!gpio_cd)
return 0;
- spin_lock_irqsave(&host->lock, flags);
-
- if (host->flags & SDHCI_DEVICE_DEAD)
- goto out;
-
- ret = !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
-out:
- spin_unlock_irqrestore(&host->lock, flags);
+ return sdhci_get_cd_nogpio(mmc);
+}
- return ret;
+static int mrfld_get_cd(struct mmc_host *mmc)
+{
+ return sdhci_get_cd_nogpio(mmc);
}
#define SDHCI_INTEL_PWR_TIMEOUT_CNT 20
@@ -1341,6 +1267,14 @@ static int intel_mrfld_mmc_probe_slot(struct sdhci_pci_slot *slot)
MMC_CAP_1_8V_DDR;
break;
case INTEL_MRFLD_SD:
+ slot->cd_idx = 0;
+ slot->cd_override_level = true;
+ /*
+ * There are two PCB designs of SD card slot with the opposite
+ * card detection sense. Quirk this out by ignoring GPIO state
+ * completely in the custom ->get_cd() callback.
+ */
+ slot->host->mmc_host_ops.get_cd = mrfld_get_cd;
slot->host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
break;
case INTEL_MRFLD_SDIO:
@@ -1981,21 +1915,6 @@ int sdhci_pci_enable_dma(struct sdhci_host *host)
return 0;
}
-static void sdhci_pci_gpio_hw_reset(struct sdhci_host *host)
-{
- struct sdhci_pci_slot *slot = sdhci_priv(host);
- int rst_n_gpio = slot->rst_n_gpio;
-
- if (!gpio_is_valid(rst_n_gpio))
- return;
- gpio_set_value_cansleep(rst_n_gpio, 0);
- /* For eMMC, minimum is 1us but give it 10us for good measure */
- udelay(10);
- gpio_set_value_cansleep(rst_n_gpio, 1);
- /* For eMMC, minimum is 200us but give it 300us for good measure */
- usleep_range(300, 1000);
-}
-
static void sdhci_pci_hw_reset(struct sdhci_host *host)
{
struct sdhci_pci_slot *slot = sdhci_priv(host);
@@ -2126,26 +2045,8 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot(
slot->chip = chip;
slot->host = host;
- slot->rst_n_gpio = -EINVAL;
- slot->cd_gpio = -EINVAL;
slot->cd_idx = -1;
- /* Retrieve platform data if there is any */
- if (*sdhci_pci_get_data)
- slot->data = sdhci_pci_get_data(pdev, slotno);
-
- if (slot->data) {
- if (slot->data->setup) {
- ret = slot->data->setup(slot->data);
- if (ret) {
- dev_err(&pdev->dev, "platform setup failed\n");
- goto free;
- }
- }
- slot->rst_n_gpio = slot->data->rst_n_gpio;
- slot->cd_gpio = slot->data->cd_gpio;
- }
-
host->hw_name = "PCI";
host->ops = chip->fixes && chip->fixes->ops ?
chip->fixes->ops :
@@ -2169,17 +2070,6 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot(
goto cleanup;
}
- if (gpio_is_valid(slot->rst_n_gpio)) {
- if (!devm_gpio_request(&pdev->dev, slot->rst_n_gpio, "eMMC_reset")) {
- gpio_direction_output(slot->rst_n_gpio, 1);
- slot->host->mmc->caps |= MMC_CAP_HW_RESET;
- slot->hw_reset = sdhci_pci_gpio_hw_reset;
- } else {
- dev_warn(&pdev->dev, "failed to request rst_n_gpio\n");
- slot->rst_n_gpio = -EINVAL;
- }
- }
-
host->mmc->pm_caps = MMC_PM_KEEP_POWER;
host->mmc->slotno = slotno;
host->mmc->caps2 |= MMC_CAP2_NO_PRESCAN_POWERUP;
@@ -2214,15 +2104,11 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot(
if (ret)
goto remove;
- sdhci_pci_add_own_cd(slot);
-
/*
* Check if the chip needs a separate GPIO for card detect to wake up
* from runtime suspend. If it is not there, don't allow runtime PM.
- * Note sdhci_pci_add_own_cd() sets slot->cd_gpio to -EINVAL on failure.
*/
- if (chip->fixes && chip->fixes->own_cd_for_runtime_pm &&
- !gpio_is_valid(slot->cd_gpio) && slot->cd_idx < 0)
+ if (chip->fixes && chip->fixes->own_cd_for_runtime_pm && slot->cd_idx < 0)
chip->allow_runtime_pm = false;
return slot;
@@ -2232,10 +2118,6 @@ remove:
chip->fixes->remove_slot(slot, 0);
cleanup:
- if (slot->data && slot->data->cleanup)
- slot->data->cleanup(slot->data);
-
-free:
sdhci_free_host(host);
return ERR_PTR(ret);
@@ -2246,8 +2128,6 @@ static void sdhci_pci_remove_slot(struct sdhci_pci_slot *slot)
int dead;
u32 scratch;
- sdhci_pci_remove_own_cd(slot);
-
dead = 0;
scratch = readl(slot->host->ioaddr + SDHCI_INT_STATUS);
if (scratch == (u32)-1)
@@ -2258,9 +2138,6 @@ static void sdhci_pci_remove_slot(struct sdhci_pci_slot *slot)
if (slot->chip->fixes && slot->chip->fixes->remove_slot)
slot->chip->fixes->remove_slot(slot, dead);
- if (slot->data && slot->data->cleanup)
- slot->data->cleanup(slot->data);
-
sdhci_free_host(slot->host);
}
diff --git a/drivers/mmc/host/sdhci-pci-data.c b/drivers/mmc/host/sdhci-pci-data.c
deleted file mode 100644
index 18638fb363d8..000000000000
--- a/drivers/mmc/host/sdhci-pci-data.c
+++ /dev/null
@@ -1,6 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-#include <linux/module.h>
-#include <linux/mmc/sdhci-pci-data.h>
-
-struct sdhci_pci_data *(*sdhci_pci_get_data)(struct pci_dev *pdev, int slotno);
-EXPORT_SYMBOL_GPL(sdhci_pci_get_data);
diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c
index 51d55a87aebe..f045c1ee4667 100644
--- a/drivers/mmc/host/sdhci-pci-o2micro.c
+++ b/drivers/mmc/host/sdhci-pci-o2micro.c
@@ -489,7 +489,7 @@ static void sdhci_pci_o2_enable_msi(struct sdhci_pci_chip *chip,
ret = pci_find_capability(chip->pdev, PCI_CAP_ID_MSI);
if (!ret) {
- pr_info("%s: unsupport msi, use INTx irq\n",
+ pr_info("%s: unsupported MSI, use INTx irq\n",
mmc_hostname(host->mmc));
return;
}
diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
index 8f90c4163bb5..5e3193278ff9 100644
--- a/drivers/mmc/host/sdhci-pci.h
+++ b/drivers/mmc/host/sdhci-pci.h
@@ -156,11 +156,6 @@ struct sdhci_pci_fixes {
struct sdhci_pci_slot {
struct sdhci_pci_chip *chip;
struct sdhci_host *host;
- struct sdhci_pci_data *data;
-
- int rst_n_gpio;
- int cd_gpio;
- int cd_irq;
int cd_idx;
bool cd_override_level;
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 862f033d235d..9085f3932443 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -791,4 +791,3 @@ module_platform_driver(sdhci_s3c_driver);
MODULE_DESCRIPTION("Samsung SDHCI (HSMMC) glue");
MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:s3c-sdhci");
diff --git a/drivers/mmc/host/sdhci-sprd.c b/drivers/mmc/host/sdhci-sprd.c
index 11e375579cfb..f33e9349e4e6 100644
--- a/drivers/mmc/host/sdhci-sprd.c
+++ b/drivers/mmc/host/sdhci-sprd.c
@@ -8,6 +8,7 @@
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/highmem.h>
+#include <linux/iopoll.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -39,6 +40,9 @@
#define SDHCI_SPRD_BIT_POSRD_DLY_INV BIT(21)
#define SDHCI_SPRD_BIT_NEGRD_DLY_INV BIT(29)
+#define SDHCI_SPRD_REG_32_DLL_STS0 0x210
+#define SDHCI_SPRD_DLL_LOCKED BIT(18)
+
#define SDHCI_SPRD_REG_32_BUSY_POSI 0x250
#define SDHCI_SPRD_BIT_OUTR_CLK_AUTO_EN BIT(25)
#define SDHCI_SPRD_BIT_INNR_CLK_AUTO_EN BIT(24)
@@ -256,6 +260,15 @@ static void sdhci_sprd_enable_phy_dll(struct sdhci_host *host)
sdhci_writel(host, tmp, SDHCI_SPRD_REG_32_DLL_CFG);
/* wait 1ms */
usleep_range(1000, 1250);
+
+ if (read_poll_timeout(sdhci_readl, tmp, (tmp & SDHCI_SPRD_DLL_LOCKED),
+ 2000, USEC_PER_SEC, false, host, SDHCI_SPRD_REG_32_DLL_STS0)) {
+ pr_err("%s: DLL locked fail!\n", mmc_hostname(host->mmc));
+ pr_info("%s: DLL_STS0 : 0x%x, DLL_CFG : 0x%x\n",
+ mmc_hostname(host->mmc),
+ sdhci_readl(host, SDHCI_SPRD_REG_32_DLL_STS0),
+ sdhci_readl(host, SDHCI_SPRD_REG_32_DLL_CFG));
+ }
}
static void sdhci_sprd_set_clock(struct sdhci_host *host, unsigned int clock)
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 8eefa7d5fe85..269c86569402 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -930,7 +930,7 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd,
struct mmc_data *data;
unsigned target_timeout, current_timeout;
- *too_big = true;
+ *too_big = false;
/*
* If the host controller provides us with an incorrect timeout
@@ -941,7 +941,7 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd,
if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
return host->max_timeout_count;
- /* Unspecified command, asume max */
+ /* Unspecified command, assume max */
if (cmd == NULL)
return host->max_timeout_count;
@@ -968,17 +968,14 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd,
while (current_timeout < target_timeout) {
count++;
current_timeout <<= 1;
- if (count > host->max_timeout_count)
+ if (count > host->max_timeout_count) {
+ if (!(host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT))
+ DBG("Too large timeout 0x%x requested for CMD%d!\n",
+ count, cmd->opcode);
+ count = host->max_timeout_count;
+ *too_big = true;
break;
- }
-
- if (count > host->max_timeout_count) {
- if (!(host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT))
- DBG("Too large timeout 0x%x requested for CMD%d!\n",
- count, cmd->opcode);
- count = host->max_timeout_count;
- } else {
- *too_big = false;
+ }
}
return count;
@@ -2042,6 +2039,12 @@ void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
break;
case MMC_VDD_32_33:
case MMC_VDD_33_34:
+ /*
+ * 3.4 ~ 3.6V are valid only for those platforms where it's
+ * known that the voltage range is supported by hardware.
+ */
+ case MMC_VDD_34_35:
+ case MMC_VDD_35_36:
pwr = SDHCI_POWER_330;
break;
default:
@@ -2422,6 +2425,25 @@ static int sdhci_get_cd(struct mmc_host *mmc)
return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
}
+int sdhci_get_cd_nogpio(struct mmc_host *mmc)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ if (host->flags & SDHCI_DEVICE_DEAD)
+ goto out;
+
+ ret = !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
+out:
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(sdhci_get_cd_nogpio);
+
static int sdhci_check_ro(struct sdhci_host *host)
{
unsigned long flags;
@@ -3232,7 +3254,7 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *intmask_p)
-ETIMEDOUT :
-EILSEQ;
- if (mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
+ if (sdhci_auto_cmd23(host, mrq)) {
mrq->sbc->error = err;
__sdhci_finish_mrq(host, mrq);
return;
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index e8d04e42a5af..bb883553d3b4 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -750,7 +750,6 @@ static inline void *sdhci_priv(struct sdhci_host *host)
return host->private;
}
-void sdhci_card_detect(struct sdhci_host *host);
void __sdhci_read_caps(struct sdhci_host *host, const u16 *ver,
const u32 *caps, const u32 *caps1);
int sdhci_setup_host(struct sdhci_host *host);
@@ -775,6 +774,7 @@ void sdhci_set_power_and_bus_voltage(struct sdhci_host *host,
unsigned short vdd);
void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
unsigned short vdd);
+int sdhci_get_cd_nogpio(struct mmc_host *mmc);
void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq);
int sdhci_request_atomic(struct mmc_host *mmc, struct mmc_request *mrq);
void sdhci_set_bus_width(struct sdhci_host *host, int width);
diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
index 7dfc26f48c18..e2affa52ef46 100644
--- a/drivers/mmc/host/tmio_mmc_core.c
+++ b/drivers/mmc/host/tmio_mmc_core.c
@@ -195,6 +195,10 @@ static void tmio_mmc_reset(struct tmio_mmc_host *host)
sd_ctrl_write32_as_16_and_16(host, CTL_IRQ_MASK, host->sdcard_irq_mask_all);
host->sdcard_irq_mask = host->sdcard_irq_mask_all;
+ if (host->native_hotplug)
+ tmio_mmc_enable_mmc_irqs(host,
+ TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT);
+
tmio_mmc_set_bus_width(host, host->mmc->ios.bus_width);
if (host->pdata->flags & TMIO_MMC_SDIO_IRQ) {
@@ -956,8 +960,15 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
case MMC_POWER_OFF:
tmio_mmc_power_off(host);
/* For R-Car Gen2+, we need to reset SDHI specific SCC */
- if (host->pdata->flags & TMIO_MMC_MIN_RCAR2)
+ if (host->pdata->flags & TMIO_MMC_MIN_RCAR2) {
host->reset(host);
+
+ if (host->native_hotplug)
+ tmio_mmc_enable_mmc_irqs(host,
+ TMIO_STAT_CARD_REMOVE |
+ TMIO_STAT_CARD_INSERT);
+ }
+
host->set_clock(host, 0);
break;
case MMC_POWER_UP:
@@ -1185,10 +1196,6 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host)
_host->set_clock(_host, 0);
tmio_mmc_reset(_host);
- if (_host->native_hotplug)
- tmio_mmc_enable_mmc_irqs(_host,
- TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT);
-
spin_lock_init(&_host->lock);
mutex_init(&_host->ios_lock);
diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c
index 4950d10d3a19..97beece62fec 100644
--- a/drivers/mmc/host/vub300.c
+++ b/drivers/mmc/host/vub300.c
@@ -576,7 +576,7 @@ static void check_vub300_port_status(struct vub300_mmc_host *vub300)
GET_SYSTEM_PORT_STATUS,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0x0000, 0x0000, &vub300->system_port_status,
- sizeof(vub300->system_port_status), HZ);
+ sizeof(vub300->system_port_status), 1000);
if (sizeof(vub300->system_port_status) == retval)
new_system_port_status(vub300);
}
@@ -1241,7 +1241,7 @@ static void __download_offload_pseudocode(struct vub300_mmc_host *vub300,
SET_INTERRUPT_PSEUDOCODE,
USB_DIR_OUT | USB_TYPE_VENDOR |
USB_RECIP_DEVICE, 0x0000, 0x0000,
- xfer_buffer, xfer_length, HZ);
+ xfer_buffer, xfer_length, 1000);
kfree(xfer_buffer);
if (retval < 0)
goto copy_error_message;
@@ -1284,7 +1284,7 @@ static void __download_offload_pseudocode(struct vub300_mmc_host *vub300,
SET_TRANSFER_PSEUDOCODE,
USB_DIR_OUT | USB_TYPE_VENDOR |
USB_RECIP_DEVICE, 0x0000, 0x0000,
- xfer_buffer, xfer_length, HZ);
+ xfer_buffer, xfer_length, 1000);
kfree(xfer_buffer);
if (retval < 0)
goto copy_error_message;
@@ -1991,7 +1991,7 @@ static void __set_clock_speed(struct vub300_mmc_host *vub300, u8 buf[8],
usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0),
SET_CLOCK_SPEED,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- 0x00, 0x00, buf, buf_array_size, HZ);
+ 0x00, 0x00, buf, buf_array_size, 1000);
if (retval != 8) {
dev_err(&vub300->udev->dev, "SET_CLOCK_SPEED"
" %dkHz failed with retval=%d\n", kHzClock, retval);
@@ -2013,14 +2013,14 @@ static void vub300_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0),
SET_SD_POWER,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- 0x0000, 0x0000, NULL, 0, HZ);
+ 0x0000, 0x0000, NULL, 0, 1000);
/* must wait for the VUB300 u-proc to boot up */
msleep(600);
} else if ((ios->power_mode == MMC_POWER_UP) && !vub300->card_powered) {
usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0),
SET_SD_POWER,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- 0x0001, 0x0000, NULL, 0, HZ);
+ 0x0001, 0x0000, NULL, 0, 1000);
msleep(600);
vub300->card_powered = 1;
} else if (ios->power_mode == MMC_POWER_ON) {
@@ -2275,14 +2275,14 @@ static int vub300_probe(struct usb_interface *interface,
GET_HC_INF0,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0x0000, 0x0000, &vub300->hc_info,
- sizeof(vub300->hc_info), HZ);
+ sizeof(vub300->hc_info), 1000);
if (retval < 0)
goto error5;
retval =
usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0),
SET_ROM_WAIT_STATES,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
- firmware_rom_wait_states, 0x0000, NULL, 0, HZ);
+ firmware_rom_wait_states, 0x0000, NULL, 0, 1000);
if (retval < 0)
goto error5;
dev_info(&vub300->udev->dev,
@@ -2297,7 +2297,7 @@ static int vub300_probe(struct usb_interface *interface,
GET_SYSTEM_PORT_STATUS,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0x0000, 0x0000, &vub300->system_port_status,
- sizeof(vub300->system_port_status), HZ);
+ sizeof(vub300->system_port_status), 1000);
if (retval < 0) {
goto error4;
} else if (sizeof(vub300->system_port_status) == retval) {
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index b8ae1ec14e17..4eaba6f4ec68 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -384,7 +384,9 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
if (new->readonly)
set_disk_ro(gd, 1);
- device_add_disk(&new->mtd->dev, gd, NULL);
+ ret = device_add_disk(&new->mtd->dev, gd, NULL);
+ if (ret)
+ goto out_cleanup_disk;
if (new->disk_attributes) {
ret = sysfs_create_group(&disk_to_dev(gd)->kobj,
@@ -393,6 +395,8 @@ int add_mtd_blktrans_dev(struct mtd_blktrans_dev *new)
}
return 0;
+out_cleanup_disk:
+ blk_cleanup_disk(new->disk);
out_free_tag_set:
blk_mq_free_tag_set(new->tag_set);
out_kfree_tag_set:
diff --git a/drivers/mtd/mtdsuper.c b/drivers/mtd/mtdsuper.c
index 38b6aa849c63..5ff001140ef4 100644
--- a/drivers/mtd/mtdsuper.c
+++ b/drivers/mtd/mtdsuper.c
@@ -15,6 +15,7 @@
#include <linux/slab.h>
#include <linux/major.h>
#include <linux/backing-dev.h>
+#include <linux/blkdev.h>
#include <linux/fs_context.h>
#include "mtdcore.h"
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index f37b1c56f7c4..034dbd487c33 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -150,7 +150,7 @@ config NET_FC
config IFB
tristate "Intermediate Functional Block support"
- depends on NET_CLS_ACT
+ depends on NET_ACT_MIRRED || NFT_FWD_NETDEV
select NET_REDIRECT
help
This is an intermediate driver that allows sharing of
@@ -291,6 +291,22 @@ config GTP
To compile this drivers as a module, choose M here: the module
will be called gtp.
+config AMT
+ tristate "Automatic Multicast Tunneling (AMT)"
+ depends on INET && IP_MULTICAST
+ select NET_UDP_TUNNEL
+ help
+ This allows one to create AMT(Automatic Multicast Tunneling)
+ virtual interfaces that provide multicast tunneling.
+ There are two roles, Gateway, and Relay.
+ Gateway Encapsulates IGMP/MLD traffic from listeners to the Relay.
+ Gateway Decapsulates multicast traffic from the Relay to Listeners.
+ Relay Encapsulates multicast traffic from Sources to Gateway.
+ Relay Decapsulates IGMP/MLD traffic from Gateway.
+
+ To compile this drivers as a module, choose M here: the module
+ will be called amt.
+
config MACSEC
tristate "IEEE 802.1AE MAC-level encryption (MACsec)"
select CRYPTO
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 739838623cf6..50b23e71065f 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_WIREGUARD) += wireguard/
obj-$(CONFIG_EQUALIZER) += eql.o
obj-$(CONFIG_IFB) += ifb.o
obj-$(CONFIG_MACSEC) += macsec.o
+obj-$(CONFIG_AMT) += amt.o
obj-$(CONFIG_MACVLAN) += macvlan.o
obj-$(CONFIG_MACVTAP) += macvtap.o
obj-$(CONFIG_MII) += mii.o
diff --git a/drivers/net/amt.c b/drivers/net/amt.c
new file mode 100644
index 000000000000..60a7053a9cf7
--- /dev/null
+++ b/drivers/net/amt.c
@@ -0,0 +1,3296 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Copyright (c) 2021 Taehee Yoo <ap420073@gmail.com> */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/udp.h>
+#include <linux/jhash.h>
+#include <linux/if_tunnel.h>
+#include <linux/net.h>
+#include <linux/igmp.h>
+#include <linux/workqueue.h>
+#include <net/net_namespace.h>
+#include <net/protocol.h>
+#include <net/ip.h>
+#include <net/udp.h>
+#include <net/udp_tunnel.h>
+#include <net/icmp.h>
+#include <net/mld.h>
+#include <net/amt.h>
+#include <uapi/linux/amt.h>
+#include <linux/security.h>
+#include <net/gro_cells.h>
+#include <net/ipv6.h>
+#include <net/protocol.h>
+#include <net/if_inet6.h>
+#include <net/ndisc.h>
+#include <net/addrconf.h>
+#include <net/ip6_route.h>
+#include <net/inet_common.h>
+#include <net/ip6_checksum.h>
+
+static struct workqueue_struct *amt_wq;
+
+static HLIST_HEAD(source_gc_list);
+/* Lock for source_gc_list */
+static spinlock_t source_gc_lock;
+static struct delayed_work source_gc_wq;
+static char *status_str[] = {
+ "AMT_STATUS_INIT",
+ "AMT_STATUS_SENT_DISCOVERY",
+ "AMT_STATUS_RECEIVED_DISCOVERY",
+ "AMT_STATUS_SENT_ADVERTISEMENT",
+ "AMT_STATUS_RECEIVED_ADVERTISEMENT",
+ "AMT_STATUS_SENT_REQUEST",
+ "AMT_STATUS_RECEIVED_REQUEST",
+ "AMT_STATUS_SENT_QUERY",
+ "AMT_STATUS_RECEIVED_QUERY",
+ "AMT_STATUS_SENT_UPDATE",
+ "AMT_STATUS_RECEIVED_UPDATE",
+};
+
+static char *type_str[] = {
+ "AMT_MSG_DISCOVERY",
+ "AMT_MSG_ADVERTISEMENT",
+ "AMT_MSG_REQUEST",
+ "AMT_MSG_MEMBERSHIP_QUERY",
+ "AMT_MSG_MEMBERSHIP_UPDATE",
+ "AMT_MSG_MULTICAST_DATA",
+ "AMT_MSG_TEARDOWM",
+};
+
+static char *action_str[] = {
+ "AMT_ACT_GMI",
+ "AMT_ACT_GMI_ZERO",
+ "AMT_ACT_GT",
+ "AMT_ACT_STATUS_FWD_NEW",
+ "AMT_ACT_STATUS_D_FWD_NEW",
+ "AMT_ACT_STATUS_NONE_NEW",
+};
+
+static struct igmpv3_grec igmpv3_zero_grec;
+
+#if IS_ENABLED(CONFIG_IPV6)
+#define MLD2_ALL_NODE_INIT { { { 0xff, 0x02, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x01 } } }
+static struct in6_addr mld2_all_node = MLD2_ALL_NODE_INIT;
+static struct mld2_grec mldv2_zero_grec;
+#endif
+
+static struct amt_skb_cb *amt_skb_cb(struct sk_buff *skb)
+{
+ BUILD_BUG_ON(sizeof(struct amt_skb_cb) + sizeof(struct qdisc_skb_cb) >
+ sizeof_field(struct sk_buff, cb));
+
+ return (struct amt_skb_cb *)((void *)skb->cb +
+ sizeof(struct qdisc_skb_cb));
+}
+
+static void __amt_source_gc_work(void)
+{
+ struct amt_source_node *snode;
+ struct hlist_head gc_list;
+ struct hlist_node *t;
+
+ spin_lock_bh(&source_gc_lock);
+ hlist_move_list(&source_gc_list, &gc_list);
+ spin_unlock_bh(&source_gc_lock);
+
+ hlist_for_each_entry_safe(snode, t, &gc_list, node) {
+ hlist_del_rcu(&snode->node);
+ kfree_rcu(snode, rcu);
+ }
+}
+
+static void amt_source_gc_work(struct work_struct *work)
+{
+ __amt_source_gc_work();
+
+ spin_lock_bh(&source_gc_lock);
+ mod_delayed_work(amt_wq, &source_gc_wq,
+ msecs_to_jiffies(AMT_GC_INTERVAL));
+ spin_unlock_bh(&source_gc_lock);
+}
+
+static bool amt_addr_equal(union amt_addr *a, union amt_addr *b)
+{
+ return !memcmp(a, b, sizeof(union amt_addr));
+}
+
+static u32 amt_source_hash(struct amt_tunnel_list *tunnel, union amt_addr *src)
+{
+ u32 hash = jhash(src, sizeof(*src), tunnel->amt->hash_seed);
+
+ return reciprocal_scale(hash, tunnel->amt->hash_buckets);
+}
+
+static bool amt_status_filter(struct amt_source_node *snode,
+ enum amt_filter filter)
+{
+ bool rc = false;
+
+ switch (filter) {
+ case AMT_FILTER_FWD:
+ if (snode->status == AMT_SOURCE_STATUS_FWD &&
+ snode->flags == AMT_SOURCE_OLD)
+ rc = true;
+ break;
+ case AMT_FILTER_D_FWD:
+ if (snode->status == AMT_SOURCE_STATUS_D_FWD &&
+ snode->flags == AMT_SOURCE_OLD)
+ rc = true;
+ break;
+ case AMT_FILTER_FWD_NEW:
+ if (snode->status == AMT_SOURCE_STATUS_FWD &&
+ snode->flags == AMT_SOURCE_NEW)
+ rc = true;
+ break;
+ case AMT_FILTER_D_FWD_NEW:
+ if (snode->status == AMT_SOURCE_STATUS_D_FWD &&
+ snode->flags == AMT_SOURCE_NEW)
+ rc = true;
+ break;
+ case AMT_FILTER_ALL:
+ rc = true;
+ break;
+ case AMT_FILTER_NONE_NEW:
+ if (snode->status == AMT_SOURCE_STATUS_NONE &&
+ snode->flags == AMT_SOURCE_NEW)
+ rc = true;
+ break;
+ case AMT_FILTER_BOTH:
+ if ((snode->status == AMT_SOURCE_STATUS_D_FWD ||
+ snode->status == AMT_SOURCE_STATUS_FWD) &&
+ snode->flags == AMT_SOURCE_OLD)
+ rc = true;
+ break;
+ case AMT_FILTER_BOTH_NEW:
+ if ((snode->status == AMT_SOURCE_STATUS_D_FWD ||
+ snode->status == AMT_SOURCE_STATUS_FWD) &&
+ snode->flags == AMT_SOURCE_NEW)
+ rc = true;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ break;
+ }
+
+ return rc;
+}
+
+static struct amt_source_node *amt_lookup_src(struct amt_tunnel_list *tunnel,
+ struct amt_group_node *gnode,
+ enum amt_filter filter,
+ union amt_addr *src)
+{
+ u32 hash = amt_source_hash(tunnel, src);
+ struct amt_source_node *snode;
+
+ hlist_for_each_entry_rcu(snode, &gnode->sources[hash], node)
+ if (amt_status_filter(snode, filter) &&
+ amt_addr_equal(&snode->source_addr, src))
+ return snode;
+
+ return NULL;
+}
+
+static u32 amt_group_hash(struct amt_tunnel_list *tunnel, union amt_addr *group)
+{
+ u32 hash = jhash(group, sizeof(*group), tunnel->amt->hash_seed);
+
+ return reciprocal_scale(hash, tunnel->amt->hash_buckets);
+}
+
+static struct amt_group_node *amt_lookup_group(struct amt_tunnel_list *tunnel,
+ union amt_addr *group,
+ union amt_addr *host,
+ bool v6)
+{
+ u32 hash = amt_group_hash(tunnel, group);
+ struct amt_group_node *gnode;
+
+ hlist_for_each_entry_rcu(gnode, &tunnel->groups[hash], node) {
+ if (amt_addr_equal(&gnode->group_addr, group) &&
+ amt_addr_equal(&gnode->host_addr, host) &&
+ gnode->v6 == v6)
+ return gnode;
+ }
+
+ return NULL;
+}
+
+static void amt_destroy_source(struct amt_source_node *snode)
+{
+ struct amt_group_node *gnode = snode->gnode;
+ struct amt_tunnel_list *tunnel;
+
+ tunnel = gnode->tunnel_list;
+
+ if (!gnode->v6) {
+ netdev_dbg(snode->gnode->amt->dev,
+ "Delete source %pI4 from %pI4\n",
+ &snode->source_addr.ip4,
+ &gnode->group_addr.ip4);
+#if IS_ENABLED(CONFIG_IPV6)
+ } else {
+ netdev_dbg(snode->gnode->amt->dev,
+ "Delete source %pI6 from %pI6\n",
+ &snode->source_addr.ip6,
+ &gnode->group_addr.ip6);
+#endif
+ }
+
+ cancel_delayed_work(&snode->source_timer);
+ hlist_del_init_rcu(&snode->node);
+ tunnel->nr_sources--;
+ gnode->nr_sources--;
+ spin_lock_bh(&source_gc_lock);
+ hlist_add_head_rcu(&snode->node, &source_gc_list);
+ spin_unlock_bh(&source_gc_lock);
+}
+
+static void amt_del_group(struct amt_dev *amt, struct amt_group_node *gnode)
+{
+ struct amt_source_node *snode;
+ struct hlist_node *t;
+ int i;
+
+ if (cancel_delayed_work(&gnode->group_timer))
+ dev_put(amt->dev);
+ hlist_del_rcu(&gnode->node);
+ gnode->tunnel_list->nr_groups--;
+
+ if (!gnode->v6)
+ netdev_dbg(amt->dev, "Leave group %pI4\n",
+ &gnode->group_addr.ip4);
+#if IS_ENABLED(CONFIG_IPV6)
+ else
+ netdev_dbg(amt->dev, "Leave group %pI6\n",
+ &gnode->group_addr.ip6);
+#endif
+ for (i = 0; i < amt->hash_buckets; i++)
+ hlist_for_each_entry_safe(snode, t, &gnode->sources[i], node)
+ amt_destroy_source(snode);
+
+ /* tunnel->lock was acquired outside of amt_del_group()
+ * But rcu_read_lock() was acquired too so It's safe.
+ */
+ kfree_rcu(gnode, rcu);
+}
+
+/* If a source timer expires with a router filter-mode for the group of
+ * INCLUDE, the router concludes that traffic from this particular
+ * source is no longer desired on the attached network, and deletes the
+ * associated source record.
+ */
+static void amt_source_work(struct work_struct *work)
+{
+ struct amt_source_node *snode = container_of(to_delayed_work(work),
+ struct amt_source_node,
+ source_timer);
+ struct amt_group_node *gnode = snode->gnode;
+ struct amt_dev *amt = gnode->amt;
+ struct amt_tunnel_list *tunnel;
+
+ tunnel = gnode->tunnel_list;
+ spin_lock_bh(&tunnel->lock);
+ rcu_read_lock();
+ if (gnode->filter_mode == MCAST_INCLUDE) {
+ amt_destroy_source(snode);
+ if (!gnode->nr_sources)
+ amt_del_group(amt, gnode);
+ } else {
+ /* When a router filter-mode for a group is EXCLUDE,
+ * source records are only deleted when the group timer expires
+ */
+ snode->status = AMT_SOURCE_STATUS_D_FWD;
+ }
+ rcu_read_unlock();
+ spin_unlock_bh(&tunnel->lock);
+}
+
+static void amt_act_src(struct amt_tunnel_list *tunnel,
+ struct amt_group_node *gnode,
+ struct amt_source_node *snode,
+ enum amt_act act)
+{
+ struct amt_dev *amt = tunnel->amt;
+
+ switch (act) {
+ case AMT_ACT_GMI:
+ mod_delayed_work(amt_wq, &snode->source_timer,
+ msecs_to_jiffies(amt_gmi(amt)));
+ break;
+ case AMT_ACT_GMI_ZERO:
+ cancel_delayed_work(&snode->source_timer);
+ break;
+ case AMT_ACT_GT:
+ mod_delayed_work(amt_wq, &snode->source_timer,
+ gnode->group_timer.timer.expires);
+ break;
+ case AMT_ACT_STATUS_FWD_NEW:
+ snode->status = AMT_SOURCE_STATUS_FWD;
+ snode->flags = AMT_SOURCE_NEW;
+ break;
+ case AMT_ACT_STATUS_D_FWD_NEW:
+ snode->status = AMT_SOURCE_STATUS_D_FWD;
+ snode->flags = AMT_SOURCE_NEW;
+ break;
+ case AMT_ACT_STATUS_NONE_NEW:
+ cancel_delayed_work(&snode->source_timer);
+ snode->status = AMT_SOURCE_STATUS_NONE;
+ snode->flags = AMT_SOURCE_NEW;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return;
+ }
+
+ if (!gnode->v6)
+ netdev_dbg(amt->dev, "Source %pI4 from %pI4 Acted %s\n",
+ &snode->source_addr.ip4,
+ &gnode->group_addr.ip4,
+ action_str[act]);
+#if IS_ENABLED(CONFIG_IPV6)
+ else
+ netdev_dbg(amt->dev, "Source %pI6 from %pI6 Acted %s\n",
+ &snode->source_addr.ip6,
+ &gnode->group_addr.ip6,
+ action_str[act]);
+#endif
+}
+
+static struct amt_source_node *amt_alloc_snode(struct amt_group_node *gnode,
+ union amt_addr *src)
+{
+ struct amt_source_node *snode;
+
+ snode = kzalloc(sizeof(*snode), GFP_ATOMIC);
+ if (!snode)
+ return NULL;
+
+ memcpy(&snode->source_addr, src, sizeof(union amt_addr));
+ snode->gnode = gnode;
+ snode->status = AMT_SOURCE_STATUS_NONE;
+ snode->flags = AMT_SOURCE_NEW;
+ INIT_HLIST_NODE(&snode->node);
+ INIT_DELAYED_WORK(&snode->source_timer, amt_source_work);
+
+ return snode;
+}
+
+/* RFC 3810 - 7.2.2. Definition of Filter Timers
+ *
+ * Router Mode Filter Timer Actions/Comments
+ * ----------- ----------------- ----------------
+ *
+ * INCLUDE Not Used All listeners in
+ * INCLUDE mode.
+ *
+ * EXCLUDE Timer > 0 At least one listener
+ * in EXCLUDE mode.
+ *
+ * EXCLUDE Timer == 0 No more listeners in
+ * EXCLUDE mode for the
+ * multicast address.
+ * If the Requested List
+ * is empty, delete
+ * Multicast Address
+ * Record. If not, switch
+ * to INCLUDE filter mode;
+ * the sources in the
+ * Requested List are
+ * moved to the Include
+ * List, and the Exclude
+ * List is deleted.
+ */
+static void amt_group_work(struct work_struct *work)
+{
+ struct amt_group_node *gnode = container_of(to_delayed_work(work),
+ struct amt_group_node,
+ group_timer);
+ struct amt_tunnel_list *tunnel = gnode->tunnel_list;
+ struct amt_dev *amt = gnode->amt;
+ struct amt_source_node *snode;
+ bool delete_group = true;
+ struct hlist_node *t;
+ int i, buckets;
+
+ buckets = amt->hash_buckets;
+
+ spin_lock_bh(&tunnel->lock);
+ if (gnode->filter_mode == MCAST_INCLUDE) {
+ /* Not Used */
+ spin_unlock_bh(&tunnel->lock);
+ goto out;
+ }
+
+ rcu_read_lock();
+ for (i = 0; i < buckets; i++) {
+ hlist_for_each_entry_safe(snode, t,
+ &gnode->sources[i], node) {
+ if (!delayed_work_pending(&snode->source_timer) ||
+ snode->status == AMT_SOURCE_STATUS_D_FWD) {
+ amt_destroy_source(snode);
+ } else {
+ delete_group = false;
+ snode->status = AMT_SOURCE_STATUS_FWD;
+ }
+ }
+ }
+ if (delete_group)
+ amt_del_group(amt, gnode);
+ else
+ gnode->filter_mode = MCAST_INCLUDE;
+ rcu_read_unlock();
+ spin_unlock_bh(&tunnel->lock);
+out:
+ dev_put(amt->dev);
+}
+
+/* Non-existant group is created as INCLUDE {empty}:
+ *
+ * RFC 3376 - 5.1. Action on Change of Interface State
+ *
+ * If no interface state existed for that multicast address before
+ * the change (i.e., the change consisted of creating a new
+ * per-interface record), or if no state exists after the change
+ * (i.e., the change consisted of deleting a per-interface record),
+ * then the "non-existent" state is considered to have a filter mode
+ * of INCLUDE and an empty source list.
+ */
+static struct amt_group_node *amt_add_group(struct amt_dev *amt,
+ struct amt_tunnel_list *tunnel,
+ union amt_addr *group,
+ union amt_addr *host,
+ bool v6)
+{
+ struct amt_group_node *gnode;
+ u32 hash;
+ int i;
+
+ if (tunnel->nr_groups >= amt->max_groups)
+ return ERR_PTR(-ENOSPC);
+
+ gnode = kzalloc(sizeof(*gnode) +
+ (sizeof(struct hlist_head) * amt->hash_buckets),
+ GFP_ATOMIC);
+ if (unlikely(!gnode))
+ return ERR_PTR(-ENOMEM);
+
+ gnode->amt = amt;
+ gnode->group_addr = *group;
+ gnode->host_addr = *host;
+ gnode->v6 = v6;
+ gnode->tunnel_list = tunnel;
+ gnode->filter_mode = MCAST_INCLUDE;
+ INIT_HLIST_NODE(&gnode->node);
+ INIT_DELAYED_WORK(&gnode->group_timer, amt_group_work);
+ for (i = 0; i < amt->hash_buckets; i++)
+ INIT_HLIST_HEAD(&gnode->sources[i]);
+
+ hash = amt_group_hash(tunnel, group);
+ hlist_add_head_rcu(&gnode->node, &tunnel->groups[hash]);
+ tunnel->nr_groups++;
+
+ if (!gnode->v6)
+ netdev_dbg(amt->dev, "Join group %pI4\n",
+ &gnode->group_addr.ip4);
+#if IS_ENABLED(CONFIG_IPV6)
+ else
+ netdev_dbg(amt->dev, "Join group %pI6\n",
+ &gnode->group_addr.ip6);
+#endif
+
+ return gnode;
+}
+
+static struct sk_buff *amt_build_igmp_gq(struct amt_dev *amt)
+{
+ u8 ra[AMT_IPHDR_OPTS] = { IPOPT_RA, 4, 0, 0 };
+ int hlen = LL_RESERVED_SPACE(amt->dev);
+ int tlen = amt->dev->needed_tailroom;
+ struct igmpv3_query *ihv3;
+ void *csum_start = NULL;
+ __sum16 *csum = NULL;
+ struct sk_buff *skb;
+ struct ethhdr *eth;
+ struct iphdr *iph;
+ unsigned int len;
+ int offset;
+
+ len = hlen + tlen + sizeof(*iph) + AMT_IPHDR_OPTS + sizeof(*ihv3);
+ skb = netdev_alloc_skb_ip_align(amt->dev, len);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, hlen);
+ skb_push(skb, sizeof(*eth));
+ skb->protocol = htons(ETH_P_IP);
+ skb_reset_mac_header(skb);
+ skb->priority = TC_PRIO_CONTROL;
+ skb_put(skb, sizeof(*iph));
+ skb_put_data(skb, ra, sizeof(ra));
+ skb_put(skb, sizeof(*ihv3));
+ skb_pull(skb, sizeof(*eth));
+ skb_reset_network_header(skb);
+
+ iph = ip_hdr(skb);
+ iph->version = 4;
+ iph->ihl = (sizeof(struct iphdr) + AMT_IPHDR_OPTS) >> 2;
+ iph->tos = AMT_TOS;
+ iph->tot_len = htons(sizeof(*iph) + AMT_IPHDR_OPTS + sizeof(*ihv3));
+ iph->frag_off = htons(IP_DF);
+ iph->ttl = 1;
+ iph->id = 0;
+ iph->protocol = IPPROTO_IGMP;
+ iph->daddr = htonl(INADDR_ALLHOSTS_GROUP);
+ iph->saddr = htonl(INADDR_ANY);
+ ip_send_check(iph);
+
+ eth = eth_hdr(skb);
+ ether_addr_copy(eth->h_source, amt->dev->dev_addr);
+ ip_eth_mc_map(htonl(INADDR_ALLHOSTS_GROUP), eth->h_dest);
+ eth->h_proto = htons(ETH_P_IP);
+
+ ihv3 = skb_pull(skb, sizeof(*iph) + AMT_IPHDR_OPTS);
+ skb_reset_transport_header(skb);
+ ihv3->type = IGMP_HOST_MEMBERSHIP_QUERY;
+ ihv3->code = 1;
+ ihv3->group = 0;
+ ihv3->qqic = amt->qi;
+ ihv3->nsrcs = 0;
+ ihv3->resv = 0;
+ ihv3->suppress = false;
+ ihv3->qrv = amt->net->ipv4.sysctl_igmp_qrv;
+ ihv3->csum = 0;
+ csum = &ihv3->csum;
+ csum_start = (void *)ihv3;
+ *csum = ip_compute_csum(csum_start, sizeof(*ihv3));
+ offset = skb_transport_offset(skb);
+ skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
+ skb->ip_summed = CHECKSUM_NONE;
+
+ skb_push(skb, sizeof(*eth) + sizeof(*iph) + AMT_IPHDR_OPTS);
+
+ return skb;
+}
+
+static void __amt_update_gw_status(struct amt_dev *amt, enum amt_status status,
+ bool validate)
+{
+ if (validate && amt->status >= status)
+ return;
+ netdev_dbg(amt->dev, "Update GW status %s -> %s",
+ status_str[amt->status], status_str[status]);
+ amt->status = status;
+}
+
+static void __amt_update_relay_status(struct amt_tunnel_list *tunnel,
+ enum amt_status status,
+ bool validate)
+{
+ if (validate && tunnel->status >= status)
+ return;
+ netdev_dbg(tunnel->amt->dev,
+ "Update Tunnel(IP = %pI4, PORT = %u) status %s -> %s",
+ &tunnel->ip4, ntohs(tunnel->source_port),
+ status_str[tunnel->status], status_str[status]);
+ tunnel->status = status;
+}
+
+static void amt_update_gw_status(struct amt_dev *amt, enum amt_status status,
+ bool validate)
+{
+ spin_lock_bh(&amt->lock);
+ __amt_update_gw_status(amt, status, validate);
+ spin_unlock_bh(&amt->lock);
+}
+
+static void amt_update_relay_status(struct amt_tunnel_list *tunnel,
+ enum amt_status status, bool validate)
+{
+ spin_lock_bh(&tunnel->lock);
+ __amt_update_relay_status(tunnel, status, validate);
+ spin_unlock_bh(&tunnel->lock);
+}
+
+static void amt_send_discovery(struct amt_dev *amt)
+{
+ struct amt_header_discovery *amtd;
+ int hlen, tlen, offset;
+ struct socket *sock;
+ struct udphdr *udph;
+ struct sk_buff *skb;
+ struct iphdr *iph;
+ struct rtable *rt;
+ struct flowi4 fl4;
+ u32 len;
+ int err;
+
+ rcu_read_lock();
+ sock = rcu_dereference(amt->sock);
+ if (!sock)
+ goto out;
+
+ if (!netif_running(amt->stream_dev) || !netif_running(amt->dev))
+ goto out;
+
+ rt = ip_route_output_ports(amt->net, &fl4, sock->sk,
+ amt->discovery_ip, amt->local_ip,
+ amt->gw_port, amt->relay_port,
+ IPPROTO_UDP, 0,
+ amt->stream_dev->ifindex);
+ if (IS_ERR(rt)) {
+ amt->dev->stats.tx_errors++;
+ goto out;
+ }
+
+ hlen = LL_RESERVED_SPACE(amt->dev);
+ tlen = amt->dev->needed_tailroom;
+ len = hlen + tlen + sizeof(*iph) + sizeof(*udph) + sizeof(*amtd);
+ skb = netdev_alloc_skb_ip_align(amt->dev, len);
+ if (!skb) {
+ ip_rt_put(rt);
+ amt->dev->stats.tx_errors++;
+ goto out;
+ }
+
+ skb->priority = TC_PRIO_CONTROL;
+ skb_dst_set(skb, &rt->dst);
+
+ len = sizeof(*iph) + sizeof(*udph) + sizeof(*amtd);
+ skb_reset_network_header(skb);
+ skb_put(skb, len);
+ amtd = skb_pull(skb, sizeof(*iph) + sizeof(*udph));
+ amtd->version = 0;
+ amtd->type = AMT_MSG_DISCOVERY;
+ amtd->reserved = 0;
+ amtd->nonce = amt->nonce;
+ skb_push(skb, sizeof(*udph));
+ skb_reset_transport_header(skb);
+ udph = udp_hdr(skb);
+ udph->source = amt->gw_port;
+ udph->dest = amt->relay_port;
+ udph->len = htons(sizeof(*udph) + sizeof(*amtd));
+ udph->check = 0;
+ offset = skb_transport_offset(skb);
+ skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
+ udph->check = csum_tcpudp_magic(amt->local_ip, amt->discovery_ip,
+ sizeof(*udph) + sizeof(*amtd),
+ IPPROTO_UDP, skb->csum);
+
+ skb_push(skb, sizeof(*iph));
+ iph = ip_hdr(skb);
+ iph->version = 4;
+ iph->ihl = (sizeof(struct iphdr)) >> 2;
+ iph->tos = AMT_TOS;
+ iph->frag_off = 0;
+ iph->ttl = ip4_dst_hoplimit(&rt->dst);
+ iph->daddr = amt->discovery_ip;
+ iph->saddr = amt->local_ip;
+ iph->protocol = IPPROTO_UDP;
+ iph->tot_len = htons(len);
+
+ skb->ip_summed = CHECKSUM_NONE;
+ ip_select_ident(amt->net, skb, NULL);
+ ip_send_check(iph);
+ err = ip_local_out(amt->net, sock->sk, skb);
+ if (unlikely(net_xmit_eval(err)))
+ amt->dev->stats.tx_errors++;
+
+ spin_lock_bh(&amt->lock);
+ __amt_update_gw_status(amt, AMT_STATUS_SENT_DISCOVERY, true);
+ spin_unlock_bh(&amt->lock);
+out:
+ rcu_read_unlock();
+}
+
+static void amt_send_request(struct amt_dev *amt, bool v6)
+{
+ struct amt_header_request *amtrh;
+ int hlen, tlen, offset;
+ struct socket *sock;
+ struct udphdr *udph;
+ struct sk_buff *skb;
+ struct iphdr *iph;
+ struct rtable *rt;
+ struct flowi4 fl4;
+ u32 len;
+ int err;
+
+ rcu_read_lock();
+ sock = rcu_dereference(amt->sock);
+ if (!sock)
+ goto out;
+
+ if (!netif_running(amt->stream_dev) || !netif_running(amt->dev))
+ goto out;
+
+ rt = ip_route_output_ports(amt->net, &fl4, sock->sk,
+ amt->remote_ip, amt->local_ip,
+ amt->gw_port, amt->relay_port,
+ IPPROTO_UDP, 0,
+ amt->stream_dev->ifindex);
+ if (IS_ERR(rt)) {
+ amt->dev->stats.tx_errors++;
+ goto out;
+ }
+
+ hlen = LL_RESERVED_SPACE(amt->dev);
+ tlen = amt->dev->needed_tailroom;
+ len = hlen + tlen + sizeof(*iph) + sizeof(*udph) + sizeof(*amtrh);
+ skb = netdev_alloc_skb_ip_align(amt->dev, len);
+ if (!skb) {
+ ip_rt_put(rt);
+ amt->dev->stats.tx_errors++;
+ goto out;
+ }
+
+ skb->priority = TC_PRIO_CONTROL;
+ skb_dst_set(skb, &rt->dst);
+
+ len = sizeof(*iph) + sizeof(*udph) + sizeof(*amtrh);
+ skb_reset_network_header(skb);
+ skb_put(skb, len);
+ amtrh = skb_pull(skb, sizeof(*iph) + sizeof(*udph));
+ amtrh->version = 0;
+ amtrh->type = AMT_MSG_REQUEST;
+ amtrh->reserved1 = 0;
+ amtrh->p = v6;
+ amtrh->reserved2 = 0;
+ amtrh->nonce = amt->nonce;
+ skb_push(skb, sizeof(*udph));
+ skb_reset_transport_header(skb);
+ udph = udp_hdr(skb);
+ udph->source = amt->gw_port;
+ udph->dest = amt->relay_port;
+ udph->len = htons(sizeof(*amtrh) + sizeof(*udph));
+ udph->check = 0;
+ offset = skb_transport_offset(skb);
+ skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
+ udph->check = csum_tcpudp_magic(amt->local_ip, amt->remote_ip,
+ sizeof(*udph) + sizeof(*amtrh),
+ IPPROTO_UDP, skb->csum);
+
+ skb_push(skb, sizeof(*iph));
+ iph = ip_hdr(skb);
+ iph->version = 4;
+ iph->ihl = (sizeof(struct iphdr)) >> 2;
+ iph->tos = AMT_TOS;
+ iph->frag_off = 0;
+ iph->ttl = ip4_dst_hoplimit(&rt->dst);
+ iph->daddr = amt->remote_ip;
+ iph->saddr = amt->local_ip;
+ iph->protocol = IPPROTO_UDP;
+ iph->tot_len = htons(len);
+
+ skb->ip_summed = CHECKSUM_NONE;
+ ip_select_ident(amt->net, skb, NULL);
+ ip_send_check(iph);
+ err = ip_local_out(amt->net, sock->sk, skb);
+ if (unlikely(net_xmit_eval(err)))
+ amt->dev->stats.tx_errors++;
+
+out:
+ rcu_read_unlock();
+}
+
+static void amt_send_igmp_gq(struct amt_dev *amt,
+ struct amt_tunnel_list *tunnel)
+{
+ struct sk_buff *skb;
+
+ skb = amt_build_igmp_gq(amt);
+ if (!skb)
+ return;
+
+ amt_skb_cb(skb)->tunnel = tunnel;
+ dev_queue_xmit(skb);
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static struct sk_buff *amt_build_mld_gq(struct amt_dev *amt)
+{
+ u8 ra[AMT_IP6HDR_OPTS] = { IPPROTO_ICMPV6, 0, IPV6_TLV_ROUTERALERT,
+ 2, 0, 0, IPV6_TLV_PAD1, IPV6_TLV_PAD1 };
+ int hlen = LL_RESERVED_SPACE(amt->dev);
+ int tlen = amt->dev->needed_tailroom;
+ struct mld2_query *mld2q;
+ void *csum_start = NULL;
+ struct ipv6hdr *ip6h;
+ struct sk_buff *skb;
+ struct ethhdr *eth;
+ u32 len;
+
+ len = hlen + tlen + sizeof(*ip6h) + sizeof(ra) + sizeof(*mld2q);
+ skb = netdev_alloc_skb_ip_align(amt->dev, len);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, hlen);
+ skb_push(skb, sizeof(*eth));
+ skb_reset_mac_header(skb);
+ eth = eth_hdr(skb);
+ skb->priority = TC_PRIO_CONTROL;
+ skb->protocol = htons(ETH_P_IPV6);
+ skb_put_zero(skb, sizeof(*ip6h));
+ skb_put_data(skb, ra, sizeof(ra));
+ skb_put_zero(skb, sizeof(*mld2q));
+ skb_pull(skb, sizeof(*eth));
+ skb_reset_network_header(skb);
+ ip6h = ipv6_hdr(skb);
+ ip6h->payload_len = htons(sizeof(ra) + sizeof(*mld2q));
+ ip6h->nexthdr = NEXTHDR_HOP;
+ ip6h->hop_limit = 1;
+ ip6h->daddr = mld2_all_node;
+ ip6_flow_hdr(ip6h, 0, 0);
+
+ if (ipv6_dev_get_saddr(amt->net, amt->dev, &ip6h->daddr, 0,
+ &ip6h->saddr)) {
+ amt->dev->stats.tx_errors++;
+ kfree_skb(skb);
+ return NULL;
+ }
+
+ eth->h_proto = htons(ETH_P_IPV6);
+ ether_addr_copy(eth->h_source, amt->dev->dev_addr);
+ ipv6_eth_mc_map(&mld2_all_node, eth->h_dest);
+
+ skb_pull(skb, sizeof(*ip6h) + sizeof(ra));
+ skb_reset_transport_header(skb);
+ mld2q = (struct mld2_query *)icmp6_hdr(skb);
+ mld2q->mld2q_mrc = htons(1);
+ mld2q->mld2q_type = ICMPV6_MGM_QUERY;
+ mld2q->mld2q_code = 0;
+ mld2q->mld2q_cksum = 0;
+ mld2q->mld2q_resv1 = 0;
+ mld2q->mld2q_resv2 = 0;
+ mld2q->mld2q_suppress = 0;
+ mld2q->mld2q_qrv = amt->qrv;
+ mld2q->mld2q_nsrcs = 0;
+ mld2q->mld2q_qqic = amt->qi;
+ csum_start = (void *)mld2q;
+ mld2q->mld2q_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
+ sizeof(*mld2q),
+ IPPROTO_ICMPV6,
+ csum_partial(csum_start,
+ sizeof(*mld2q), 0));
+
+ skb->ip_summed = CHECKSUM_NONE;
+ skb_push(skb, sizeof(*eth) + sizeof(*ip6h) + sizeof(ra));
+ return skb;
+}
+
+static void amt_send_mld_gq(struct amt_dev *amt, struct amt_tunnel_list *tunnel)
+{
+ struct sk_buff *skb;
+
+ skb = amt_build_mld_gq(amt);
+ if (!skb)
+ return;
+
+ amt_skb_cb(skb)->tunnel = tunnel;
+ dev_queue_xmit(skb);
+}
+#else
+static void amt_send_mld_gq(struct amt_dev *amt, struct amt_tunnel_list *tunnel)
+{
+}
+#endif
+
+static void amt_secret_work(struct work_struct *work)
+{
+ struct amt_dev *amt = container_of(to_delayed_work(work),
+ struct amt_dev,
+ secret_wq);
+
+ spin_lock_bh(&amt->lock);
+ get_random_bytes(&amt->key, sizeof(siphash_key_t));
+ spin_unlock_bh(&amt->lock);
+ mod_delayed_work(amt_wq, &amt->secret_wq,
+ msecs_to_jiffies(AMT_SECRET_TIMEOUT));
+}
+
+static void amt_discovery_work(struct work_struct *work)
+{
+ struct amt_dev *amt = container_of(to_delayed_work(work),
+ struct amt_dev,
+ discovery_wq);
+
+ spin_lock_bh(&amt->lock);
+ if (amt->status > AMT_STATUS_SENT_DISCOVERY)
+ goto out;
+ get_random_bytes(&amt->nonce, sizeof(__be32));
+ spin_unlock_bh(&amt->lock);
+
+ amt_send_discovery(amt);
+ spin_lock_bh(&amt->lock);
+out:
+ mod_delayed_work(amt_wq, &amt->discovery_wq,
+ msecs_to_jiffies(AMT_DISCOVERY_TIMEOUT));
+ spin_unlock_bh(&amt->lock);
+}
+
+static void amt_req_work(struct work_struct *work)
+{
+ struct amt_dev *amt = container_of(to_delayed_work(work),
+ struct amt_dev,
+ req_wq);
+ u32 exp;
+
+ spin_lock_bh(&amt->lock);
+ if (amt->status < AMT_STATUS_RECEIVED_ADVERTISEMENT)
+ goto out;
+
+ if (amt->req_cnt++ > AMT_MAX_REQ_COUNT) {
+ netdev_dbg(amt->dev, "Gateway is not ready");
+ amt->qi = AMT_INIT_REQ_TIMEOUT;
+ amt->ready4 = false;
+ amt->ready6 = false;
+ amt->remote_ip = 0;
+ __amt_update_gw_status(amt, AMT_STATUS_INIT, false);
+ amt->req_cnt = 0;
+ }
+ spin_unlock_bh(&amt->lock);
+
+ amt_send_request(amt, false);
+ amt_send_request(amt, true);
+ amt_update_gw_status(amt, AMT_STATUS_SENT_REQUEST, true);
+ spin_lock_bh(&amt->lock);
+out:
+ exp = min_t(u32, (1 * (1 << amt->req_cnt)), AMT_MAX_REQ_TIMEOUT);
+ mod_delayed_work(amt_wq, &amt->req_wq, msecs_to_jiffies(exp * 1000));
+ spin_unlock_bh(&amt->lock);
+}
+
+static bool amt_send_membership_update(struct amt_dev *amt,
+ struct sk_buff *skb,
+ bool v6)
+{
+ struct amt_header_membership_update *amtmu;
+ struct socket *sock;
+ struct iphdr *iph;
+ struct flowi4 fl4;
+ struct rtable *rt;
+ int err;
+
+ sock = rcu_dereference_bh(amt->sock);
+ if (!sock)
+ return true;
+
+ err = skb_cow_head(skb, LL_RESERVED_SPACE(amt->dev) + sizeof(*amtmu) +
+ sizeof(*iph) + sizeof(struct udphdr));
+ if (err)
+ return true;
+
+ skb_reset_inner_headers(skb);
+ memset(&fl4, 0, sizeof(struct flowi4));
+ fl4.flowi4_oif = amt->stream_dev->ifindex;
+ fl4.daddr = amt->remote_ip;
+ fl4.saddr = amt->local_ip;
+ fl4.flowi4_tos = AMT_TOS;
+ fl4.flowi4_proto = IPPROTO_UDP;
+ rt = ip_route_output_key(amt->net, &fl4);
+ if (IS_ERR(rt)) {
+ netdev_dbg(amt->dev, "no route to %pI4\n", &amt->remote_ip);
+ return true;
+ }
+
+ amtmu = skb_push(skb, sizeof(*amtmu));
+ amtmu->version = 0;
+ amtmu->type = AMT_MSG_MEMBERSHIP_UPDATE;
+ amtmu->reserved = 0;
+ amtmu->nonce = amt->nonce;
+ amtmu->response_mac = amt->mac;
+
+ if (!v6)
+ skb_set_inner_protocol(skb, htons(ETH_P_IP));
+ else
+ skb_set_inner_protocol(skb, htons(ETH_P_IPV6));
+ udp_tunnel_xmit_skb(rt, sock->sk, skb,
+ fl4.saddr,
+ fl4.daddr,
+ AMT_TOS,
+ ip4_dst_hoplimit(&rt->dst),
+ 0,
+ amt->gw_port,
+ amt->relay_port,
+ false,
+ false);
+ amt_update_gw_status(amt, AMT_STATUS_SENT_UPDATE, true);
+ return false;
+}
+
+static void amt_send_multicast_data(struct amt_dev *amt,
+ const struct sk_buff *oskb,
+ struct amt_tunnel_list *tunnel,
+ bool v6)
+{
+ struct amt_header_mcast_data *amtmd;
+ struct socket *sock;
+ struct sk_buff *skb;
+ struct iphdr *iph;
+ struct flowi4 fl4;
+ struct rtable *rt;
+
+ sock = rcu_dereference_bh(amt->sock);
+ if (!sock)
+ return;
+
+ skb = skb_copy_expand(oskb, sizeof(*amtmd) + sizeof(*iph) +
+ sizeof(struct udphdr), 0, GFP_ATOMIC);
+ if (!skb)
+ return;
+
+ skb_reset_inner_headers(skb);
+ memset(&fl4, 0, sizeof(struct flowi4));
+ fl4.flowi4_oif = amt->stream_dev->ifindex;
+ fl4.daddr = tunnel->ip4;
+ fl4.saddr = amt->local_ip;
+ fl4.flowi4_proto = IPPROTO_UDP;
+ rt = ip_route_output_key(amt->net, &fl4);
+ if (IS_ERR(rt)) {
+ netdev_dbg(amt->dev, "no route to %pI4\n", &tunnel->ip4);
+ kfree_skb(skb);
+ return;
+ }
+
+ amtmd = skb_push(skb, sizeof(*amtmd));
+ amtmd->version = 0;
+ amtmd->reserved = 0;
+ amtmd->type = AMT_MSG_MULTICAST_DATA;
+
+ if (!v6)
+ skb_set_inner_protocol(skb, htons(ETH_P_IP));
+ else
+ skb_set_inner_protocol(skb, htons(ETH_P_IPV6));
+ udp_tunnel_xmit_skb(rt, sock->sk, skb,
+ fl4.saddr,
+ fl4.daddr,
+ AMT_TOS,
+ ip4_dst_hoplimit(&rt->dst),
+ 0,
+ amt->relay_port,
+ tunnel->source_port,
+ false,
+ false);
+}
+
+static bool amt_send_membership_query(struct amt_dev *amt,
+ struct sk_buff *skb,
+ struct amt_tunnel_list *tunnel,
+ bool v6)
+{
+ struct amt_header_membership_query *amtmq;
+ struct socket *sock;
+ struct rtable *rt;
+ struct flowi4 fl4;
+ int err;
+
+ sock = rcu_dereference_bh(amt->sock);
+ if (!sock)
+ return true;
+
+ err = skb_cow_head(skb, LL_RESERVED_SPACE(amt->dev) + sizeof(*amtmq) +
+ sizeof(struct iphdr) + sizeof(struct udphdr));
+ if (err)
+ return true;
+
+ skb_reset_inner_headers(skb);
+ memset(&fl4, 0, sizeof(struct flowi4));
+ fl4.flowi4_oif = amt->stream_dev->ifindex;
+ fl4.daddr = tunnel->ip4;
+ fl4.saddr = amt->local_ip;
+ fl4.flowi4_tos = AMT_TOS;
+ fl4.flowi4_proto = IPPROTO_UDP;
+ rt = ip_route_output_key(amt->net, &fl4);
+ if (IS_ERR(rt)) {
+ netdev_dbg(amt->dev, "no route to %pI4\n", &tunnel->ip4);
+ return -1;
+ }
+
+ amtmq = skb_push(skb, sizeof(*amtmq));
+ amtmq->version = 0;
+ amtmq->type = AMT_MSG_MEMBERSHIP_QUERY;
+ amtmq->reserved = 0;
+ amtmq->l = 0;
+ amtmq->g = 0;
+ amtmq->nonce = tunnel->nonce;
+ amtmq->response_mac = tunnel->mac;
+
+ if (!v6)
+ skb_set_inner_protocol(skb, htons(ETH_P_IP));
+ else
+ skb_set_inner_protocol(skb, htons(ETH_P_IPV6));
+ udp_tunnel_xmit_skb(rt, sock->sk, skb,
+ fl4.saddr,
+ fl4.daddr,
+ AMT_TOS,
+ ip4_dst_hoplimit(&rt->dst),
+ 0,
+ amt->relay_port,
+ tunnel->source_port,
+ false,
+ false);
+ amt_update_relay_status(tunnel, AMT_STATUS_SENT_QUERY, true);
+ return false;
+}
+
+static netdev_tx_t amt_dev_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct amt_dev *amt = netdev_priv(dev);
+ struct amt_tunnel_list *tunnel;
+ struct amt_group_node *gnode;
+ union amt_addr group = {0,};
+#if IS_ENABLED(CONFIG_IPV6)
+ struct ipv6hdr *ip6h;
+ struct mld_msg *mld;
+#endif
+ bool report = false;
+ struct igmphdr *ih;
+ bool query = false;
+ struct iphdr *iph;
+ bool data = false;
+ bool v6 = false;
+ u32 hash;
+
+ iph = ip_hdr(skb);
+ if (iph->version == 4) {
+ if (!ipv4_is_multicast(iph->daddr))
+ goto free;
+
+ if (!ip_mc_check_igmp(skb)) {
+ ih = igmp_hdr(skb);
+ switch (ih->type) {
+ case IGMPV3_HOST_MEMBERSHIP_REPORT:
+ case IGMP_HOST_MEMBERSHIP_REPORT:
+ report = true;
+ break;
+ case IGMP_HOST_MEMBERSHIP_QUERY:
+ query = true;
+ break;
+ default:
+ goto free;
+ }
+ } else {
+ data = true;
+ }
+ v6 = false;
+ group.ip4 = iph->daddr;
+#if IS_ENABLED(CONFIG_IPV6)
+ } else if (iph->version == 6) {
+ ip6h = ipv6_hdr(skb);
+ if (!ipv6_addr_is_multicast(&ip6h->daddr))
+ goto free;
+
+ if (!ipv6_mc_check_mld(skb)) {
+ mld = (struct mld_msg *)skb_transport_header(skb);
+ switch (mld->mld_type) {
+ case ICMPV6_MGM_REPORT:
+ case ICMPV6_MLD2_REPORT:
+ report = true;
+ break;
+ case ICMPV6_MGM_QUERY:
+ query = true;
+ break;
+ default:
+ goto free;
+ }
+ } else {
+ data = true;
+ }
+ v6 = true;
+ group.ip6 = ip6h->daddr;
+#endif
+ } else {
+ dev->stats.tx_errors++;
+ goto free;
+ }
+
+ if (!pskb_may_pull(skb, sizeof(struct ethhdr)))
+ goto free;
+
+ skb_pull(skb, sizeof(struct ethhdr));
+
+ if (amt->mode == AMT_MODE_GATEWAY) {
+ /* Gateway only passes IGMP/MLD packets */
+ if (!report)
+ goto free;
+ if ((!v6 && !amt->ready4) || (v6 && !amt->ready6))
+ goto free;
+ if (amt_send_membership_update(amt, skb, v6))
+ goto free;
+ goto unlock;
+ } else if (amt->mode == AMT_MODE_RELAY) {
+ if (query) {
+ tunnel = amt_skb_cb(skb)->tunnel;
+ if (!tunnel) {
+ WARN_ON(1);
+ goto free;
+ }
+
+ /* Do not forward unexpected query */
+ if (amt_send_membership_query(amt, skb, tunnel, v6))
+ goto free;
+ goto unlock;
+ }
+
+ if (!data)
+ goto free;
+ list_for_each_entry_rcu(tunnel, &amt->tunnel_list, list) {
+ hash = amt_group_hash(tunnel, &group);
+ hlist_for_each_entry_rcu(gnode, &tunnel->groups[hash],
+ node) {
+ if (!v6) {
+ if (gnode->group_addr.ip4 == iph->daddr)
+ goto found;
+#if IS_ENABLED(CONFIG_IPV6)
+ } else {
+ if (ipv6_addr_equal(&gnode->group_addr.ip6,
+ &ip6h->daddr))
+ goto found;
+#endif
+ }
+ }
+ continue;
+found:
+ amt_send_multicast_data(amt, skb, tunnel, v6);
+ }
+ }
+
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+free:
+ dev_kfree_skb(skb);
+unlock:
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+}
+
+static int amt_parse_type(struct sk_buff *skb)
+{
+ struct amt_header *amth;
+
+ if (!pskb_may_pull(skb, sizeof(struct udphdr) +
+ sizeof(struct amt_header)))
+ return -1;
+
+ amth = (struct amt_header *)(udp_hdr(skb) + 1);
+
+ if (amth->version != 0)
+ return -1;
+
+ if (amth->type >= __AMT_MSG_MAX || !amth->type)
+ return -1;
+ return amth->type;
+}
+
+static void amt_clear_groups(struct amt_tunnel_list *tunnel)
+{
+ struct amt_dev *amt = tunnel->amt;
+ struct amt_group_node *gnode;
+ struct hlist_node *t;
+ int i;
+
+ spin_lock_bh(&tunnel->lock);
+ rcu_read_lock();
+ for (i = 0; i < amt->hash_buckets; i++)
+ hlist_for_each_entry_safe(gnode, t, &tunnel->groups[i], node)
+ amt_del_group(amt, gnode);
+ rcu_read_unlock();
+ spin_unlock_bh(&tunnel->lock);
+}
+
+static void amt_tunnel_expire(struct work_struct *work)
+{
+ struct amt_tunnel_list *tunnel = container_of(to_delayed_work(work),
+ struct amt_tunnel_list,
+ gc_wq);
+ struct amt_dev *amt = tunnel->amt;
+
+ spin_lock_bh(&amt->lock);
+ rcu_read_lock();
+ list_del_rcu(&tunnel->list);
+ amt->nr_tunnels--;
+ amt_clear_groups(tunnel);
+ rcu_read_unlock();
+ spin_unlock_bh(&amt->lock);
+ kfree_rcu(tunnel, rcu);
+}
+
+static void amt_cleanup_srcs(struct amt_dev *amt,
+ struct amt_tunnel_list *tunnel,
+ struct amt_group_node *gnode)
+{
+ struct amt_source_node *snode;
+ struct hlist_node *t;
+ int i;
+
+ /* Delete old sources */
+ for (i = 0; i < amt->hash_buckets; i++) {
+ hlist_for_each_entry_safe(snode, t, &gnode->sources[i], node) {
+ if (snode->flags == AMT_SOURCE_OLD)
+ amt_destroy_source(snode);
+ }
+ }
+
+ /* switch from new to old */
+ for (i = 0; i < amt->hash_buckets; i++) {
+ hlist_for_each_entry_rcu(snode, &gnode->sources[i], node) {
+ snode->flags = AMT_SOURCE_OLD;
+ if (!gnode->v6)
+ netdev_dbg(snode->gnode->amt->dev,
+ "Add source as OLD %pI4 from %pI4\n",
+ &snode->source_addr.ip4,
+ &gnode->group_addr.ip4);
+#if IS_ENABLED(CONFIG_IPV6)
+ else
+ netdev_dbg(snode->gnode->amt->dev,
+ "Add source as OLD %pI6 from %pI6\n",
+ &snode->source_addr.ip6,
+ &gnode->group_addr.ip6);
+#endif
+ }
+ }
+}
+
+static void amt_add_srcs(struct amt_dev *amt, struct amt_tunnel_list *tunnel,
+ struct amt_group_node *gnode, void *grec,
+ bool v6)
+{
+ struct igmpv3_grec *igmp_grec;
+ struct amt_source_node *snode;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct mld2_grec *mld_grec;
+#endif
+ union amt_addr src = {0,};
+ u16 nsrcs;
+ u32 hash;
+ int i;
+
+ if (!v6) {
+ igmp_grec = (struct igmpv3_grec *)grec;
+ nsrcs = ntohs(igmp_grec->grec_nsrcs);
+ } else {
+#if IS_ENABLED(CONFIG_IPV6)
+ mld_grec = (struct mld2_grec *)grec;
+ nsrcs = ntohs(mld_grec->grec_nsrcs);
+#else
+ return;
+#endif
+ }
+ for (i = 0; i < nsrcs; i++) {
+ if (tunnel->nr_sources >= amt->max_sources)
+ return;
+ if (!v6)
+ src.ip4 = igmp_grec->grec_src[i];
+#if IS_ENABLED(CONFIG_IPV6)
+ else
+ memcpy(&src.ip6, &mld_grec->grec_src[i],
+ sizeof(struct in6_addr));
+#endif
+ if (amt_lookup_src(tunnel, gnode, AMT_FILTER_ALL, &src))
+ continue;
+
+ snode = amt_alloc_snode(gnode, &src);
+ if (snode) {
+ hash = amt_source_hash(tunnel, &snode->source_addr);
+ hlist_add_head_rcu(&snode->node, &gnode->sources[hash]);
+ tunnel->nr_sources++;
+ gnode->nr_sources++;
+
+ if (!gnode->v6)
+ netdev_dbg(snode->gnode->amt->dev,
+ "Add source as NEW %pI4 from %pI4\n",
+ &snode->source_addr.ip4,
+ &gnode->group_addr.ip4);
+#if IS_ENABLED(CONFIG_IPV6)
+ else
+ netdev_dbg(snode->gnode->amt->dev,
+ "Add source as NEW %pI6 from %pI6\n",
+ &snode->source_addr.ip6,
+ &gnode->group_addr.ip6);
+#endif
+ }
+ }
+}
+
+/* Router State Report Rec'd New Router State
+ * ------------ ------------ ----------------
+ * EXCLUDE (X,Y) IS_IN (A) EXCLUDE (X+A,Y-A)
+ *
+ * -----------+-----------+-----------+
+ * | OLD | NEW |
+ * -----------+-----------+-----------+
+ * FWD | X | X+A |
+ * -----------+-----------+-----------+
+ * D_FWD | Y | Y-A |
+ * -----------+-----------+-----------+
+ * NONE | | A |
+ * -----------+-----------+-----------+
+ *
+ * a) Received sources are NONE/NEW
+ * b) All NONE will be deleted by amt_cleanup_srcs().
+ * c) All OLD will be deleted by amt_cleanup_srcs().
+ * d) After delete, NEW source will be switched to OLD.
+ */
+static void amt_lookup_act_srcs(struct amt_tunnel_list *tunnel,
+ struct amt_group_node *gnode,
+ void *grec,
+ enum amt_ops ops,
+ enum amt_filter filter,
+ enum amt_act act,
+ bool v6)
+{
+ struct amt_dev *amt = tunnel->amt;
+ struct amt_source_node *snode;
+ struct igmpv3_grec *igmp_grec;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct mld2_grec *mld_grec;
+#endif
+ union amt_addr src = {0,};
+ struct hlist_node *t;
+ u16 nsrcs;
+ int i, j;
+
+ if (!v6) {
+ igmp_grec = (struct igmpv3_grec *)grec;
+ nsrcs = ntohs(igmp_grec->grec_nsrcs);
+ } else {
+#if IS_ENABLED(CONFIG_IPV6)
+ mld_grec = (struct mld2_grec *)grec;
+ nsrcs = ntohs(mld_grec->grec_nsrcs);
+#else
+ return;
+#endif
+ }
+
+ memset(&src, 0, sizeof(union amt_addr));
+ switch (ops) {
+ case AMT_OPS_INT:
+ /* A*B */
+ for (i = 0; i < nsrcs; i++) {
+ if (!v6)
+ src.ip4 = igmp_grec->grec_src[i];
+#if IS_ENABLED(CONFIG_IPV6)
+ else
+ memcpy(&src.ip6, &mld_grec->grec_src[i],
+ sizeof(struct in6_addr));
+#endif
+ snode = amt_lookup_src(tunnel, gnode, filter, &src);
+ if (!snode)
+ continue;
+ amt_act_src(tunnel, gnode, snode, act);
+ }
+ break;
+ case AMT_OPS_UNI:
+ /* A+B */
+ for (i = 0; i < amt->hash_buckets; i++) {
+ hlist_for_each_entry_safe(snode, t, &gnode->sources[i],
+ node) {
+ if (amt_status_filter(snode, filter))
+ amt_act_src(tunnel, gnode, snode, act);
+ }
+ }
+ for (i = 0; i < nsrcs; i++) {
+ if (!v6)
+ src.ip4 = igmp_grec->grec_src[i];
+#if IS_ENABLED(CONFIG_IPV6)
+ else
+ memcpy(&src.ip6, &mld_grec->grec_src[i],
+ sizeof(struct in6_addr));
+#endif
+ snode = amt_lookup_src(tunnel, gnode, filter, &src);
+ if (!snode)
+ continue;
+ amt_act_src(tunnel, gnode, snode, act);
+ }
+ break;
+ case AMT_OPS_SUB:
+ /* A-B */
+ for (i = 0; i < amt->hash_buckets; i++) {
+ hlist_for_each_entry_safe(snode, t, &gnode->sources[i],
+ node) {
+ if (!amt_status_filter(snode, filter))
+ continue;
+ for (j = 0; j < nsrcs; j++) {
+ if (!v6)
+ src.ip4 = igmp_grec->grec_src[j];
+#if IS_ENABLED(CONFIG_IPV6)
+ else
+ memcpy(&src.ip6,
+ &mld_grec->grec_src[j],
+ sizeof(struct in6_addr));
+#endif
+ if (amt_addr_equal(&snode->source_addr,
+ &src))
+ goto out_sub;
+ }
+ amt_act_src(tunnel, gnode, snode, act);
+ continue;
+out_sub:;
+ }
+ }
+ break;
+ case AMT_OPS_SUB_REV:
+ /* B-A */
+ for (i = 0; i < nsrcs; i++) {
+ if (!v6)
+ src.ip4 = igmp_grec->grec_src[i];
+#if IS_ENABLED(CONFIG_IPV6)
+ else
+ memcpy(&src.ip6, &mld_grec->grec_src[i],
+ sizeof(struct in6_addr));
+#endif
+ snode = amt_lookup_src(tunnel, gnode, AMT_FILTER_ALL,
+ &src);
+ if (!snode) {
+ snode = amt_lookup_src(tunnel, gnode,
+ filter, &src);
+ if (snode)
+ amt_act_src(tunnel, gnode, snode, act);
+ }
+ }
+ break;
+ default:
+ netdev_dbg(amt->dev, "Invalid type\n");
+ return;
+ }
+}
+
+static void amt_mcast_is_in_handler(struct amt_dev *amt,
+ struct amt_tunnel_list *tunnel,
+ struct amt_group_node *gnode,
+ void *grec, void *zero_grec, bool v6)
+{
+ if (gnode->filter_mode == MCAST_INCLUDE) {
+/* Router State Report Rec'd New Router State Actions
+ * ------------ ------------ ---------------- -------
+ * INCLUDE (A) IS_IN (B) INCLUDE (A+B) (B)=GMI
+ */
+ /* Update IS_IN (B) as FWD/NEW */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
+ AMT_FILTER_NONE_NEW,
+ AMT_ACT_STATUS_FWD_NEW,
+ v6);
+ /* Update INCLUDE (A) as NEW */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
+ AMT_FILTER_FWD,
+ AMT_ACT_STATUS_FWD_NEW,
+ v6);
+ /* (B)=GMI */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
+ AMT_FILTER_FWD_NEW,
+ AMT_ACT_GMI,
+ v6);
+ } else {
+/* State Actions
+ * ------------ ------------ ---------------- -------
+ * EXCLUDE (X,Y) IS_IN (A) EXCLUDE (X+A,Y-A) (A)=GMI
+ */
+ /* Update (A) in (X, Y) as NONE/NEW */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
+ AMT_FILTER_BOTH,
+ AMT_ACT_STATUS_NONE_NEW,
+ v6);
+ /* Update FWD/OLD as FWD/NEW */
+ amt_lookup_act_srcs(tunnel, gnode, zero_grec, AMT_OPS_UNI,
+ AMT_FILTER_FWD,
+ AMT_ACT_STATUS_FWD_NEW,
+ v6);
+ /* Update IS_IN (A) as FWD/NEW */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
+ AMT_FILTER_NONE_NEW,
+ AMT_ACT_STATUS_FWD_NEW,
+ v6);
+ /* Update EXCLUDE (, Y-A) as D_FWD_NEW */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB,
+ AMT_FILTER_D_FWD,
+ AMT_ACT_STATUS_D_FWD_NEW,
+ v6);
+ }
+}
+
+static void amt_mcast_is_ex_handler(struct amt_dev *amt,
+ struct amt_tunnel_list *tunnel,
+ struct amt_group_node *gnode,
+ void *grec, void *zero_grec, bool v6)
+{
+ if (gnode->filter_mode == MCAST_INCLUDE) {
+/* Router State Report Rec'd New Router State Actions
+ * ------------ ------------ ---------------- -------
+ * INCLUDE (A) IS_EX (B) EXCLUDE (A*B,B-A) (B-A)=0
+ * Delete (A-B)
+ * Group Timer=GMI
+ */
+ /* EXCLUDE(A*B, ) */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
+ AMT_FILTER_FWD,
+ AMT_ACT_STATUS_FWD_NEW,
+ v6);
+ /* EXCLUDE(, B-A) */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV,
+ AMT_FILTER_FWD,
+ AMT_ACT_STATUS_D_FWD_NEW,
+ v6);
+ /* (B-A)=0 */
+ amt_lookup_act_srcs(tunnel, gnode, zero_grec, AMT_OPS_UNI,
+ AMT_FILTER_D_FWD_NEW,
+ AMT_ACT_GMI_ZERO,
+ v6);
+ /* Group Timer=GMI */
+ if (!mod_delayed_work(amt_wq, &gnode->group_timer,
+ msecs_to_jiffies(amt_gmi(amt))))
+ dev_hold(amt->dev);
+ gnode->filter_mode = MCAST_EXCLUDE;
+ /* Delete (A-B) will be worked by amt_cleanup_srcs(). */
+ } else {
+/* Router State Report Rec'd New Router State Actions
+ * ------------ ------------ ---------------- -------
+ * EXCLUDE (X,Y) IS_EX (A) EXCLUDE (A-Y,Y*A) (A-X-Y)=GMI
+ * Delete (X-A)
+ * Delete (Y-A)
+ * Group Timer=GMI
+ */
+ /* EXCLUDE (A-Y, ) */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV,
+ AMT_FILTER_D_FWD,
+ AMT_ACT_STATUS_FWD_NEW,
+ v6);
+ /* EXCLUDE (, Y*A ) */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
+ AMT_FILTER_D_FWD,
+ AMT_ACT_STATUS_D_FWD_NEW,
+ v6);
+ /* (A-X-Y)=GMI */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV,
+ AMT_FILTER_BOTH_NEW,
+ AMT_ACT_GMI,
+ v6);
+ /* Group Timer=GMI */
+ if (!mod_delayed_work(amt_wq, &gnode->group_timer,
+ msecs_to_jiffies(amt_gmi(amt))))
+ dev_hold(amt->dev);
+ /* Delete (X-A), (Y-A) will be worked by amt_cleanup_srcs(). */
+ }
+}
+
+static void amt_mcast_to_in_handler(struct amt_dev *amt,
+ struct amt_tunnel_list *tunnel,
+ struct amt_group_node *gnode,
+ void *grec, void *zero_grec, bool v6)
+{
+ if (gnode->filter_mode == MCAST_INCLUDE) {
+/* Router State Report Rec'd New Router State Actions
+ * ------------ ------------ ---------------- -------
+ * INCLUDE (A) TO_IN (B) INCLUDE (A+B) (B)=GMI
+ * Send Q(G,A-B)
+ */
+ /* Update TO_IN (B) sources as FWD/NEW */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
+ AMT_FILTER_NONE_NEW,
+ AMT_ACT_STATUS_FWD_NEW,
+ v6);
+ /* Update INCLUDE (A) sources as NEW */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
+ AMT_FILTER_FWD,
+ AMT_ACT_STATUS_FWD_NEW,
+ v6);
+ /* (B)=GMI */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
+ AMT_FILTER_FWD_NEW,
+ AMT_ACT_GMI,
+ v6);
+ } else {
+/* Router State Report Rec'd New Router State Actions
+ * ------------ ------------ ---------------- -------
+ * EXCLUDE (X,Y) TO_IN (A) EXCLUDE (X+A,Y-A) (A)=GMI
+ * Send Q(G,X-A)
+ * Send Q(G)
+ */
+ /* Update TO_IN (A) sources as FWD/NEW */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
+ AMT_FILTER_NONE_NEW,
+ AMT_ACT_STATUS_FWD_NEW,
+ v6);
+ /* Update EXCLUDE(X,) sources as FWD/NEW */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
+ AMT_FILTER_FWD,
+ AMT_ACT_STATUS_FWD_NEW,
+ v6);
+ /* EXCLUDE (, Y-A)
+ * (A) are already switched to FWD_NEW.
+ * So, D_FWD/OLD -> D_FWD/NEW is okay.
+ */
+ amt_lookup_act_srcs(tunnel, gnode, zero_grec, AMT_OPS_UNI,
+ AMT_FILTER_D_FWD,
+ AMT_ACT_STATUS_D_FWD_NEW,
+ v6);
+ /* (A)=GMI
+ * Only FWD_NEW will have (A) sources.
+ */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
+ AMT_FILTER_FWD_NEW,
+ AMT_ACT_GMI,
+ v6);
+ }
+}
+
+static void amt_mcast_to_ex_handler(struct amt_dev *amt,
+ struct amt_tunnel_list *tunnel,
+ struct amt_group_node *gnode,
+ void *grec, void *zero_grec, bool v6)
+{
+ if (gnode->filter_mode == MCAST_INCLUDE) {
+/* Router State Report Rec'd New Router State Actions
+ * ------------ ------------ ---------------- -------
+ * INCLUDE (A) TO_EX (B) EXCLUDE (A*B,B-A) (B-A)=0
+ * Delete (A-B)
+ * Send Q(G,A*B)
+ * Group Timer=GMI
+ */
+ /* EXCLUDE (A*B, ) */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
+ AMT_FILTER_FWD,
+ AMT_ACT_STATUS_FWD_NEW,
+ v6);
+ /* EXCLUDE (, B-A) */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV,
+ AMT_FILTER_FWD,
+ AMT_ACT_STATUS_D_FWD_NEW,
+ v6);
+ /* (B-A)=0 */
+ amt_lookup_act_srcs(tunnel, gnode, zero_grec, AMT_OPS_UNI,
+ AMT_FILTER_D_FWD_NEW,
+ AMT_ACT_GMI_ZERO,
+ v6);
+ /* Group Timer=GMI */
+ if (!mod_delayed_work(amt_wq, &gnode->group_timer,
+ msecs_to_jiffies(amt_gmi(amt))))
+ dev_hold(amt->dev);
+ gnode->filter_mode = MCAST_EXCLUDE;
+ /* Delete (A-B) will be worked by amt_cleanup_srcs(). */
+ } else {
+/* Router State Report Rec'd New Router State Actions
+ * ------------ ------------ ---------------- -------
+ * EXCLUDE (X,Y) TO_EX (A) EXCLUDE (A-Y,Y*A) (A-X-Y)=Group Timer
+ * Delete (X-A)
+ * Delete (Y-A)
+ * Send Q(G,A-Y)
+ * Group Timer=GMI
+ */
+ /* Update (A-X-Y) as NONE/OLD */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV,
+ AMT_FILTER_BOTH,
+ AMT_ACT_GT,
+ v6);
+ /* EXCLUDE (A-Y, ) */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV,
+ AMT_FILTER_D_FWD,
+ AMT_ACT_STATUS_FWD_NEW,
+ v6);
+ /* EXCLUDE (, Y*A) */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
+ AMT_FILTER_D_FWD,
+ AMT_ACT_STATUS_D_FWD_NEW,
+ v6);
+ /* Group Timer=GMI */
+ if (!mod_delayed_work(amt_wq, &gnode->group_timer,
+ msecs_to_jiffies(amt_gmi(amt))))
+ dev_hold(amt->dev);
+ /* Delete (X-A), (Y-A) will be worked by amt_cleanup_srcs(). */
+ }
+}
+
+static void amt_mcast_allow_handler(struct amt_dev *amt,
+ struct amt_tunnel_list *tunnel,
+ struct amt_group_node *gnode,
+ void *grec, void *zero_grec, bool v6)
+{
+ if (gnode->filter_mode == MCAST_INCLUDE) {
+/* Router State Report Rec'd New Router State Actions
+ * ------------ ------------ ---------------- -------
+ * INCLUDE (A) ALLOW (B) INCLUDE (A+B) (B)=GMI
+ */
+ /* INCLUDE (A+B) */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
+ AMT_FILTER_FWD,
+ AMT_ACT_STATUS_FWD_NEW,
+ v6);
+ /* (B)=GMI */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
+ AMT_FILTER_FWD_NEW,
+ AMT_ACT_GMI,
+ v6);
+ } else {
+/* Router State Report Rec'd New Router State Actions
+ * ------------ ------------ ---------------- -------
+ * EXCLUDE (X,Y) ALLOW (A) EXCLUDE (X+A,Y-A) (A)=GMI
+ */
+ /* EXCLUDE (X+A, ) */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
+ AMT_FILTER_FWD,
+ AMT_ACT_STATUS_FWD_NEW,
+ v6);
+ /* EXCLUDE (, Y-A) */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB,
+ AMT_FILTER_D_FWD,
+ AMT_ACT_STATUS_D_FWD_NEW,
+ v6);
+ /* (A)=GMI
+ * All (A) source are now FWD/NEW status.
+ */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_INT,
+ AMT_FILTER_FWD_NEW,
+ AMT_ACT_GMI,
+ v6);
+ }
+}
+
+static void amt_mcast_block_handler(struct amt_dev *amt,
+ struct amt_tunnel_list *tunnel,
+ struct amt_group_node *gnode,
+ void *grec, void *zero_grec, bool v6)
+{
+ if (gnode->filter_mode == MCAST_INCLUDE) {
+/* Router State Report Rec'd New Router State Actions
+ * ------------ ------------ ---------------- -------
+ * INCLUDE (A) BLOCK (B) INCLUDE (A) Send Q(G,A*B)
+ */
+ /* INCLUDE (A) */
+ amt_lookup_act_srcs(tunnel, gnode, zero_grec, AMT_OPS_UNI,
+ AMT_FILTER_FWD,
+ AMT_ACT_STATUS_FWD_NEW,
+ v6);
+ } else {
+/* Router State Report Rec'd New Router State Actions
+ * ------------ ------------ ---------------- -------
+ * EXCLUDE (X,Y) BLOCK (A) EXCLUDE (X+(A-Y),Y) (A-X-Y)=Group Timer
+ * Send Q(G,A-Y)
+ */
+ /* (A-X-Y)=Group Timer */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV,
+ AMT_FILTER_BOTH,
+ AMT_ACT_GT,
+ v6);
+ /* EXCLUDE (X, ) */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
+ AMT_FILTER_FWD,
+ AMT_ACT_STATUS_FWD_NEW,
+ v6);
+ /* EXCLUDE (X+(A-Y) */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_SUB_REV,
+ AMT_FILTER_D_FWD,
+ AMT_ACT_STATUS_FWD_NEW,
+ v6);
+ /* EXCLUDE (, Y) */
+ amt_lookup_act_srcs(tunnel, gnode, grec, AMT_OPS_UNI,
+ AMT_FILTER_D_FWD,
+ AMT_ACT_STATUS_D_FWD_NEW,
+ v6);
+ }
+}
+
+/* RFC 3376
+ * 7.3.2. In the Presence of Older Version Group Members
+ *
+ * When Group Compatibility Mode is IGMPv2, a router internally
+ * translates the following IGMPv2 messages for that group to their
+ * IGMPv3 equivalents:
+ *
+ * IGMPv2 Message IGMPv3 Equivalent
+ * -------------- -----------------
+ * Report IS_EX( {} )
+ * Leave TO_IN( {} )
+ */
+static void amt_igmpv2_report_handler(struct amt_dev *amt, struct sk_buff *skb,
+ struct amt_tunnel_list *tunnel)
+{
+ struct igmphdr *ih = igmp_hdr(skb);
+ struct iphdr *iph = ip_hdr(skb);
+ struct amt_group_node *gnode;
+ union amt_addr group, host;
+
+ memset(&group, 0, sizeof(union amt_addr));
+ group.ip4 = ih->group;
+ memset(&host, 0, sizeof(union amt_addr));
+ host.ip4 = iph->saddr;
+
+ gnode = amt_lookup_group(tunnel, &group, &host, false);
+ if (!gnode) {
+ gnode = amt_add_group(amt, tunnel, &group, &host, false);
+ if (!IS_ERR(gnode)) {
+ gnode->filter_mode = MCAST_EXCLUDE;
+ if (!mod_delayed_work(amt_wq, &gnode->group_timer,
+ msecs_to_jiffies(amt_gmi(amt))))
+ dev_hold(amt->dev);
+ }
+ }
+}
+
+/* RFC 3376
+ * 7.3.2. In the Presence of Older Version Group Members
+ *
+ * When Group Compatibility Mode is IGMPv2, a router internally
+ * translates the following IGMPv2 messages for that group to their
+ * IGMPv3 equivalents:
+ *
+ * IGMPv2 Message IGMPv3 Equivalent
+ * -------------- -----------------
+ * Report IS_EX( {} )
+ * Leave TO_IN( {} )
+ */
+static void amt_igmpv2_leave_handler(struct amt_dev *amt, struct sk_buff *skb,
+ struct amt_tunnel_list *tunnel)
+{
+ struct igmphdr *ih = igmp_hdr(skb);
+ struct iphdr *iph = ip_hdr(skb);
+ struct amt_group_node *gnode;
+ union amt_addr group, host;
+
+ memset(&group, 0, sizeof(union amt_addr));
+ group.ip4 = ih->group;
+ memset(&host, 0, sizeof(union amt_addr));
+ host.ip4 = iph->saddr;
+
+ gnode = amt_lookup_group(tunnel, &group, &host, false);
+ if (gnode)
+ amt_del_group(amt, gnode);
+}
+
+static void amt_igmpv3_report_handler(struct amt_dev *amt, struct sk_buff *skb,
+ struct amt_tunnel_list *tunnel)
+{
+ struct igmpv3_report *ihrv3 = igmpv3_report_hdr(skb);
+ int len = skb_transport_offset(skb) + sizeof(*ihrv3);
+ void *zero_grec = (void *)&igmpv3_zero_grec;
+ struct iphdr *iph = ip_hdr(skb);
+ struct amt_group_node *gnode;
+ union amt_addr group, host;
+ struct igmpv3_grec *grec;
+ u16 nsrcs;
+ int i;
+
+ for (i = 0; i < ntohs(ihrv3->ngrec); i++) {
+ len += sizeof(*grec);
+ if (!ip_mc_may_pull(skb, len))
+ break;
+
+ grec = (void *)(skb->data + len - sizeof(*grec));
+ nsrcs = ntohs(grec->grec_nsrcs);
+
+ len += nsrcs * sizeof(__be32);
+ if (!ip_mc_may_pull(skb, len))
+ break;
+
+ memset(&group, 0, sizeof(union amt_addr));
+ group.ip4 = grec->grec_mca;
+ memset(&host, 0, sizeof(union amt_addr));
+ host.ip4 = iph->saddr;
+ gnode = amt_lookup_group(tunnel, &group, &host, false);
+ if (!gnode) {
+ gnode = amt_add_group(amt, tunnel, &group, &host,
+ false);
+ if (IS_ERR(gnode))
+ continue;
+ }
+
+ amt_add_srcs(amt, tunnel, gnode, grec, false);
+ switch (grec->grec_type) {
+ case IGMPV3_MODE_IS_INCLUDE:
+ amt_mcast_is_in_handler(amt, tunnel, gnode, grec,
+ zero_grec, false);
+ break;
+ case IGMPV3_MODE_IS_EXCLUDE:
+ amt_mcast_is_ex_handler(amt, tunnel, gnode, grec,
+ zero_grec, false);
+ break;
+ case IGMPV3_CHANGE_TO_INCLUDE:
+ amt_mcast_to_in_handler(amt, tunnel, gnode, grec,
+ zero_grec, false);
+ break;
+ case IGMPV3_CHANGE_TO_EXCLUDE:
+ amt_mcast_to_ex_handler(amt, tunnel, gnode, grec,
+ zero_grec, false);
+ break;
+ case IGMPV3_ALLOW_NEW_SOURCES:
+ amt_mcast_allow_handler(amt, tunnel, gnode, grec,
+ zero_grec, false);
+ break;
+ case IGMPV3_BLOCK_OLD_SOURCES:
+ amt_mcast_block_handler(amt, tunnel, gnode, grec,
+ zero_grec, false);
+ break;
+ default:
+ break;
+ }
+ amt_cleanup_srcs(amt, tunnel, gnode);
+ }
+}
+
+/* caller held tunnel->lock */
+static void amt_igmp_report_handler(struct amt_dev *amt, struct sk_buff *skb,
+ struct amt_tunnel_list *tunnel)
+{
+ struct igmphdr *ih = igmp_hdr(skb);
+
+ switch (ih->type) {
+ case IGMPV3_HOST_MEMBERSHIP_REPORT:
+ amt_igmpv3_report_handler(amt, skb, tunnel);
+ break;
+ case IGMPV2_HOST_MEMBERSHIP_REPORT:
+ amt_igmpv2_report_handler(amt, skb, tunnel);
+ break;
+ case IGMP_HOST_LEAVE_MESSAGE:
+ amt_igmpv2_leave_handler(amt, skb, tunnel);
+ break;
+ default:
+ break;
+ }
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+/* RFC 3810
+ * 8.3.2. In the Presence of MLDv1 Multicast Address Listeners
+ *
+ * When Multicast Address Compatibility Mode is MLDv2, a router acts
+ * using the MLDv2 protocol for that multicast address. When Multicast
+ * Address Compatibility Mode is MLDv1, a router internally translates
+ * the following MLDv1 messages for that multicast address to their
+ * MLDv2 equivalents:
+ *
+ * MLDv1 Message MLDv2 Equivalent
+ * -------------- -----------------
+ * Report IS_EX( {} )
+ * Done TO_IN( {} )
+ */
+static void amt_mldv1_report_handler(struct amt_dev *amt, struct sk_buff *skb,
+ struct amt_tunnel_list *tunnel)
+{
+ struct mld_msg *mld = (struct mld_msg *)icmp6_hdr(skb);
+ struct ipv6hdr *ip6h = ipv6_hdr(skb);
+ struct amt_group_node *gnode;
+ union amt_addr group, host;
+
+ memcpy(&group.ip6, &mld->mld_mca, sizeof(struct in6_addr));
+ memcpy(&host.ip6, &ip6h->saddr, sizeof(struct in6_addr));
+
+ gnode = amt_lookup_group(tunnel, &group, &host, true);
+ if (!gnode) {
+ gnode = amt_add_group(amt, tunnel, &group, &host, true);
+ if (!IS_ERR(gnode)) {
+ gnode->filter_mode = MCAST_EXCLUDE;
+ if (!mod_delayed_work(amt_wq, &gnode->group_timer,
+ msecs_to_jiffies(amt_gmi(amt))))
+ dev_hold(amt->dev);
+ }
+ }
+}
+
+/* RFC 3810
+ * 8.3.2. In the Presence of MLDv1 Multicast Address Listeners
+ *
+ * When Multicast Address Compatibility Mode is MLDv2, a router acts
+ * using the MLDv2 protocol for that multicast address. When Multicast
+ * Address Compatibility Mode is MLDv1, a router internally translates
+ * the following MLDv1 messages for that multicast address to their
+ * MLDv2 equivalents:
+ *
+ * MLDv1 Message MLDv2 Equivalent
+ * -------------- -----------------
+ * Report IS_EX( {} )
+ * Done TO_IN( {} )
+ */
+static void amt_mldv1_leave_handler(struct amt_dev *amt, struct sk_buff *skb,
+ struct amt_tunnel_list *tunnel)
+{
+ struct mld_msg *mld = (struct mld_msg *)icmp6_hdr(skb);
+ struct iphdr *iph = ip_hdr(skb);
+ struct amt_group_node *gnode;
+ union amt_addr group, host;
+
+ memcpy(&group.ip6, &mld->mld_mca, sizeof(struct in6_addr));
+ memset(&host, 0, sizeof(union amt_addr));
+ host.ip4 = iph->saddr;
+
+ gnode = amt_lookup_group(tunnel, &group, &host, true);
+ if (gnode) {
+ amt_del_group(amt, gnode);
+ return;
+ }
+}
+
+static void amt_mldv2_report_handler(struct amt_dev *amt, struct sk_buff *skb,
+ struct amt_tunnel_list *tunnel)
+{
+ struct mld2_report *mld2r = (struct mld2_report *)icmp6_hdr(skb);
+ int len = skb_transport_offset(skb) + sizeof(*mld2r);
+ void *zero_grec = (void *)&mldv2_zero_grec;
+ struct ipv6hdr *ip6h = ipv6_hdr(skb);
+ struct amt_group_node *gnode;
+ union amt_addr group, host;
+ struct mld2_grec *grec;
+ u16 nsrcs;
+ int i;
+
+ for (i = 0; i < ntohs(mld2r->mld2r_ngrec); i++) {
+ len += sizeof(*grec);
+ if (!ipv6_mc_may_pull(skb, len))
+ break;
+
+ grec = (void *)(skb->data + len - sizeof(*grec));
+ nsrcs = ntohs(grec->grec_nsrcs);
+
+ len += nsrcs * sizeof(struct in6_addr);
+ if (!ipv6_mc_may_pull(skb, len))
+ break;
+
+ memset(&group, 0, sizeof(union amt_addr));
+ group.ip6 = grec->grec_mca;
+ memset(&host, 0, sizeof(union amt_addr));
+ host.ip6 = ip6h->saddr;
+ gnode = amt_lookup_group(tunnel, &group, &host, true);
+ if (!gnode) {
+ gnode = amt_add_group(amt, tunnel, &group, &host,
+ ETH_P_IPV6);
+ if (IS_ERR(gnode))
+ continue;
+ }
+
+ amt_add_srcs(amt, tunnel, gnode, grec, true);
+ switch (grec->grec_type) {
+ case MLD2_MODE_IS_INCLUDE:
+ amt_mcast_is_in_handler(amt, tunnel, gnode, grec,
+ zero_grec, true);
+ break;
+ case MLD2_MODE_IS_EXCLUDE:
+ amt_mcast_is_ex_handler(amt, tunnel, gnode, grec,
+ zero_grec, true);
+ break;
+ case MLD2_CHANGE_TO_INCLUDE:
+ amt_mcast_to_in_handler(amt, tunnel, gnode, grec,
+ zero_grec, true);
+ break;
+ case MLD2_CHANGE_TO_EXCLUDE:
+ amt_mcast_to_ex_handler(amt, tunnel, gnode, grec,
+ zero_grec, true);
+ break;
+ case MLD2_ALLOW_NEW_SOURCES:
+ amt_mcast_allow_handler(amt, tunnel, gnode, grec,
+ zero_grec, true);
+ break;
+ case MLD2_BLOCK_OLD_SOURCES:
+ amt_mcast_block_handler(amt, tunnel, gnode, grec,
+ zero_grec, true);
+ break;
+ default:
+ break;
+ }
+ amt_cleanup_srcs(amt, tunnel, gnode);
+ }
+}
+
+/* caller held tunnel->lock */
+static void amt_mld_report_handler(struct amt_dev *amt, struct sk_buff *skb,
+ struct amt_tunnel_list *tunnel)
+{
+ struct mld_msg *mld = (struct mld_msg *)icmp6_hdr(skb);
+
+ switch (mld->mld_type) {
+ case ICMPV6_MGM_REPORT:
+ amt_mldv1_report_handler(amt, skb, tunnel);
+ break;
+ case ICMPV6_MLD2_REPORT:
+ amt_mldv2_report_handler(amt, skb, tunnel);
+ break;
+ case ICMPV6_MGM_REDUCTION:
+ amt_mldv1_leave_handler(amt, skb, tunnel);
+ break;
+ default:
+ break;
+ }
+}
+#endif
+
+static bool amt_advertisement_handler(struct amt_dev *amt, struct sk_buff *skb)
+{
+ struct amt_header_advertisement *amta;
+ int hdr_size;
+
+ hdr_size = sizeof(*amta) - sizeof(struct amt_header);
+
+ if (!pskb_may_pull(skb, hdr_size))
+ return true;
+
+ amta = (struct amt_header_advertisement *)(udp_hdr(skb) + 1);
+ if (!amta->ip4)
+ return true;
+
+ if (amta->reserved || amta->version)
+ return true;
+
+ if (ipv4_is_loopback(amta->ip4) || ipv4_is_multicast(amta->ip4) ||
+ ipv4_is_zeronet(amta->ip4))
+ return true;
+
+ amt->remote_ip = amta->ip4;
+ netdev_dbg(amt->dev, "advertised remote ip = %pI4\n", &amt->remote_ip);
+ mod_delayed_work(amt_wq, &amt->req_wq, 0);
+
+ amt_update_gw_status(amt, AMT_STATUS_RECEIVED_ADVERTISEMENT, true);
+ return false;
+}
+
+static bool amt_multicast_data_handler(struct amt_dev *amt, struct sk_buff *skb)
+{
+ struct amt_header_mcast_data *amtmd;
+ int hdr_size, len, err;
+ struct ethhdr *eth;
+ struct iphdr *iph;
+
+ amtmd = (struct amt_header_mcast_data *)(udp_hdr(skb) + 1);
+ if (amtmd->reserved || amtmd->version)
+ return true;
+
+ hdr_size = sizeof(*amtmd) + sizeof(struct udphdr);
+ if (iptunnel_pull_header(skb, hdr_size, htons(ETH_P_IP), false))
+ return true;
+ skb_reset_network_header(skb);
+ skb_push(skb, sizeof(*eth));
+ skb_reset_mac_header(skb);
+ skb_pull(skb, sizeof(*eth));
+ eth = eth_hdr(skb);
+ iph = ip_hdr(skb);
+ if (iph->version == 4) {
+ if (!ipv4_is_multicast(iph->daddr))
+ return true;
+ skb->protocol = htons(ETH_P_IP);
+ eth->h_proto = htons(ETH_P_IP);
+ ip_eth_mc_map(iph->daddr, eth->h_dest);
+#if IS_ENABLED(CONFIG_IPV6)
+ } else if (iph->version == 6) {
+ struct ipv6hdr *ip6h;
+
+ ip6h = ipv6_hdr(skb);
+ if (!ipv6_addr_is_multicast(&ip6h->daddr))
+ return true;
+ skb->protocol = htons(ETH_P_IPV6);
+ eth->h_proto = htons(ETH_P_IPV6);
+ ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
+#endif
+ } else {
+ return true;
+ }
+
+ skb->pkt_type = PACKET_MULTICAST;
+ skb->ip_summed = CHECKSUM_NONE;
+ len = skb->len;
+ err = gro_cells_receive(&amt->gro_cells, skb);
+ if (likely(err == NET_RX_SUCCESS))
+ dev_sw_netstats_rx_add(amt->dev, len);
+ else
+ amt->dev->stats.rx_dropped++;
+
+ return false;
+}
+
+static bool amt_membership_query_handler(struct amt_dev *amt,
+ struct sk_buff *skb)
+{
+ struct amt_header_membership_query *amtmq;
+ struct igmpv3_query *ihv3;
+ struct ethhdr *eth, *oeth;
+ struct iphdr *iph;
+ int hdr_size, len;
+
+ hdr_size = sizeof(*amtmq) - sizeof(struct amt_header);
+
+ if (!pskb_may_pull(skb, hdr_size))
+ return true;
+
+ amtmq = (struct amt_header_membership_query *)(udp_hdr(skb) + 1);
+ if (amtmq->reserved || amtmq->version)
+ return true;
+
+ hdr_size = sizeof(*amtmq) + sizeof(struct udphdr) - sizeof(*eth);
+ if (iptunnel_pull_header(skb, hdr_size, htons(ETH_P_TEB), false))
+ return true;
+ oeth = eth_hdr(skb);
+ skb_reset_mac_header(skb);
+ skb_pull(skb, sizeof(*eth));
+ skb_reset_network_header(skb);
+ eth = eth_hdr(skb);
+ iph = ip_hdr(skb);
+ if (iph->version == 4) {
+ if (!ipv4_is_multicast(iph->daddr))
+ return true;
+ if (!pskb_may_pull(skb, sizeof(*iph) + AMT_IPHDR_OPTS +
+ sizeof(*ihv3)))
+ return true;
+
+ ihv3 = skb_pull(skb, sizeof(*iph) + AMT_IPHDR_OPTS);
+ skb_reset_transport_header(skb);
+ skb_push(skb, sizeof(*iph) + AMT_IPHDR_OPTS);
+ spin_lock_bh(&amt->lock);
+ amt->ready4 = true;
+ amt->mac = amtmq->response_mac;
+ amt->req_cnt = 0;
+ amt->qi = ihv3->qqic;
+ spin_unlock_bh(&amt->lock);
+ skb->protocol = htons(ETH_P_IP);
+ eth->h_proto = htons(ETH_P_IP);
+ ip_eth_mc_map(iph->daddr, eth->h_dest);
+#if IS_ENABLED(CONFIG_IPV6)
+ } else if (iph->version == 6) {
+ struct ipv6hdr *ip6h = ipv6_hdr(skb);
+ struct mld2_query *mld2q;
+
+ if (!ipv6_addr_is_multicast(&ip6h->daddr))
+ return true;
+ if (!pskb_may_pull(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS +
+ sizeof(*mld2q)))
+ return true;
+
+ mld2q = skb_pull(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS);
+ skb_reset_transport_header(skb);
+ skb_push(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS);
+ spin_lock_bh(&amt->lock);
+ amt->ready6 = true;
+ amt->mac = amtmq->response_mac;
+ amt->req_cnt = 0;
+ amt->qi = mld2q->mld2q_qqic;
+ spin_unlock_bh(&amt->lock);
+ skb->protocol = htons(ETH_P_IPV6);
+ eth->h_proto = htons(ETH_P_IPV6);
+ ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
+#endif
+ } else {
+ return true;
+ }
+
+ ether_addr_copy(eth->h_source, oeth->h_source);
+ skb->pkt_type = PACKET_MULTICAST;
+ skb->ip_summed = CHECKSUM_NONE;
+ len = skb->len;
+ if (netif_rx(skb) == NET_RX_SUCCESS) {
+ amt_update_gw_status(amt, AMT_STATUS_RECEIVED_QUERY, true);
+ dev_sw_netstats_rx_add(amt->dev, len);
+ } else {
+ amt->dev->stats.rx_dropped++;
+ }
+
+ return false;
+}
+
+static bool amt_update_handler(struct amt_dev *amt, struct sk_buff *skb)
+{
+ struct amt_header_membership_update *amtmu;
+ struct amt_tunnel_list *tunnel;
+ struct udphdr *udph;
+ struct ethhdr *eth;
+ struct iphdr *iph;
+ int len;
+
+ iph = ip_hdr(skb);
+ udph = udp_hdr(skb);
+
+ if (__iptunnel_pull_header(skb, sizeof(*udph), skb->protocol,
+ false, false))
+ return true;
+
+ amtmu = (struct amt_header_membership_update *)skb->data;
+ if (amtmu->reserved || amtmu->version)
+ return true;
+
+ skb_pull(skb, sizeof(*amtmu));
+ skb_reset_network_header(skb);
+
+ list_for_each_entry_rcu(tunnel, &amt->tunnel_list, list) {
+ if (tunnel->ip4 == iph->saddr) {
+ if ((amtmu->nonce == tunnel->nonce &&
+ amtmu->response_mac == tunnel->mac)) {
+ mod_delayed_work(amt_wq, &tunnel->gc_wq,
+ msecs_to_jiffies(amt_gmi(amt))
+ * 3);
+ goto report;
+ } else {
+ netdev_dbg(amt->dev, "Invalid MAC\n");
+ return true;
+ }
+ }
+ }
+
+ return false;
+
+report:
+ iph = ip_hdr(skb);
+ if (iph->version == 4) {
+ if (ip_mc_check_igmp(skb)) {
+ netdev_dbg(amt->dev, "Invalid IGMP\n");
+ return true;
+ }
+
+ spin_lock_bh(&tunnel->lock);
+ amt_igmp_report_handler(amt, skb, tunnel);
+ spin_unlock_bh(&tunnel->lock);
+
+ skb_push(skb, sizeof(struct ethhdr));
+ skb_reset_mac_header(skb);
+ eth = eth_hdr(skb);
+ skb->protocol = htons(ETH_P_IP);
+ eth->h_proto = htons(ETH_P_IP);
+ ip_eth_mc_map(iph->daddr, eth->h_dest);
+#if IS_ENABLED(CONFIG_IPV6)
+ } else if (iph->version == 6) {
+ struct ipv6hdr *ip6h = ipv6_hdr(skb);
+
+ if (ipv6_mc_check_mld(skb)) {
+ netdev_dbg(amt->dev, "Invalid MLD\n");
+ return true;
+ }
+
+ spin_lock_bh(&tunnel->lock);
+ amt_mld_report_handler(amt, skb, tunnel);
+ spin_unlock_bh(&tunnel->lock);
+
+ skb_push(skb, sizeof(struct ethhdr));
+ skb_reset_mac_header(skb);
+ eth = eth_hdr(skb);
+ skb->protocol = htons(ETH_P_IPV6);
+ eth->h_proto = htons(ETH_P_IPV6);
+ ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
+#endif
+ } else {
+ netdev_dbg(amt->dev, "Unsupported Protocol\n");
+ return true;
+ }
+
+ skb_pull(skb, sizeof(struct ethhdr));
+ skb->pkt_type = PACKET_MULTICAST;
+ skb->ip_summed = CHECKSUM_NONE;
+ len = skb->len;
+ if (netif_rx(skb) == NET_RX_SUCCESS) {
+ amt_update_relay_status(tunnel, AMT_STATUS_RECEIVED_UPDATE,
+ true);
+ dev_sw_netstats_rx_add(amt->dev, len);
+ } else {
+ amt->dev->stats.rx_dropped++;
+ }
+
+ return false;
+}
+
+static void amt_send_advertisement(struct amt_dev *amt, __be32 nonce,
+ __be32 daddr, __be16 dport)
+{
+ struct amt_header_advertisement *amta;
+ int hlen, tlen, offset;
+ struct socket *sock;
+ struct udphdr *udph;
+ struct sk_buff *skb;
+ struct iphdr *iph;
+ struct rtable *rt;
+ struct flowi4 fl4;
+ u32 len;
+ int err;
+
+ rcu_read_lock();
+ sock = rcu_dereference(amt->sock);
+ if (!sock)
+ goto out;
+
+ if (!netif_running(amt->stream_dev) || !netif_running(amt->dev))
+ goto out;
+
+ rt = ip_route_output_ports(amt->net, &fl4, sock->sk,
+ daddr, amt->local_ip,
+ dport, amt->relay_port,
+ IPPROTO_UDP, 0,
+ amt->stream_dev->ifindex);
+ if (IS_ERR(rt)) {
+ amt->dev->stats.tx_errors++;
+ goto out;
+ }
+
+ hlen = LL_RESERVED_SPACE(amt->dev);
+ tlen = amt->dev->needed_tailroom;
+ len = hlen + tlen + sizeof(*iph) + sizeof(*udph) + sizeof(*amta);
+ skb = netdev_alloc_skb_ip_align(amt->dev, len);
+ if (!skb) {
+ ip_rt_put(rt);
+ amt->dev->stats.tx_errors++;
+ goto out;
+ }
+
+ skb->priority = TC_PRIO_CONTROL;
+ skb_dst_set(skb, &rt->dst);
+
+ len = sizeof(*iph) + sizeof(*udph) + sizeof(*amta);
+ skb_reset_network_header(skb);
+ skb_put(skb, len);
+ amta = skb_pull(skb, sizeof(*iph) + sizeof(*udph));
+ amta->version = 0;
+ amta->type = AMT_MSG_ADVERTISEMENT;
+ amta->reserved = 0;
+ amta->nonce = nonce;
+ amta->ip4 = amt->local_ip;
+ skb_push(skb, sizeof(*udph));
+ skb_reset_transport_header(skb);
+ udph = udp_hdr(skb);
+ udph->source = amt->relay_port;
+ udph->dest = dport;
+ udph->len = htons(sizeof(*amta) + sizeof(*udph));
+ udph->check = 0;
+ offset = skb_transport_offset(skb);
+ skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
+ udph->check = csum_tcpudp_magic(amt->local_ip, daddr,
+ sizeof(*udph) + sizeof(*amta),
+ IPPROTO_UDP, skb->csum);
+
+ skb_push(skb, sizeof(*iph));
+ iph = ip_hdr(skb);
+ iph->version = 4;
+ iph->ihl = (sizeof(struct iphdr)) >> 2;
+ iph->tos = AMT_TOS;
+ iph->frag_off = 0;
+ iph->ttl = ip4_dst_hoplimit(&rt->dst);
+ iph->daddr = daddr;
+ iph->saddr = amt->local_ip;
+ iph->protocol = IPPROTO_UDP;
+ iph->tot_len = htons(len);
+
+ skb->ip_summed = CHECKSUM_NONE;
+ ip_select_ident(amt->net, skb, NULL);
+ ip_send_check(iph);
+ err = ip_local_out(amt->net, sock->sk, skb);
+ if (unlikely(net_xmit_eval(err)))
+ amt->dev->stats.tx_errors++;
+
+out:
+ rcu_read_unlock();
+}
+
+static bool amt_discovery_handler(struct amt_dev *amt, struct sk_buff *skb)
+{
+ struct amt_header_discovery *amtd;
+ struct udphdr *udph;
+ struct iphdr *iph;
+
+ if (!pskb_may_pull(skb, sizeof(*udph) + sizeof(*amtd)))
+ return true;
+
+ iph = ip_hdr(skb);
+ udph = udp_hdr(skb);
+ amtd = (struct amt_header_discovery *)(udp_hdr(skb) + 1);
+
+ if (amtd->reserved || amtd->version)
+ return true;
+
+ amt_send_advertisement(amt, amtd->nonce, iph->saddr, udph->source);
+
+ return false;
+}
+
+static bool amt_request_handler(struct amt_dev *amt, struct sk_buff *skb)
+{
+ struct amt_header_request *amtrh;
+ struct amt_tunnel_list *tunnel;
+ unsigned long long key;
+ struct udphdr *udph;
+ struct iphdr *iph;
+ u64 mac;
+ int i;
+
+ if (!pskb_may_pull(skb, sizeof(*udph) + sizeof(*amtrh)))
+ return true;
+
+ iph = ip_hdr(skb);
+ udph = udp_hdr(skb);
+ amtrh = (struct amt_header_request *)(udp_hdr(skb) + 1);
+
+ if (amtrh->reserved1 || amtrh->reserved2 || amtrh->version)
+ return true;
+
+ list_for_each_entry_rcu(tunnel, &amt->tunnel_list, list)
+ if (tunnel->ip4 == iph->saddr)
+ goto send;
+
+ if (amt->nr_tunnels >= amt->max_tunnels) {
+ icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
+ return true;
+ }
+
+ tunnel = kzalloc(sizeof(*tunnel) +
+ (sizeof(struct hlist_head) * amt->hash_buckets),
+ GFP_ATOMIC);
+ if (!tunnel)
+ return true;
+
+ tunnel->source_port = udph->source;
+ tunnel->ip4 = iph->saddr;
+
+ memcpy(&key, &tunnel->key, sizeof(unsigned long long));
+ tunnel->amt = amt;
+ spin_lock_init(&tunnel->lock);
+ for (i = 0; i < amt->hash_buckets; i++)
+ INIT_HLIST_HEAD(&tunnel->groups[i]);
+
+ INIT_DELAYED_WORK(&tunnel->gc_wq, amt_tunnel_expire);
+
+ spin_lock_bh(&amt->lock);
+ list_add_tail_rcu(&tunnel->list, &amt->tunnel_list);
+ tunnel->key = amt->key;
+ amt_update_relay_status(tunnel, AMT_STATUS_RECEIVED_REQUEST, true);
+ amt->nr_tunnels++;
+ mod_delayed_work(amt_wq, &tunnel->gc_wq,
+ msecs_to_jiffies(amt_gmi(amt)));
+ spin_unlock_bh(&amt->lock);
+
+send:
+ tunnel->nonce = amtrh->nonce;
+ mac = siphash_3u32((__force u32)tunnel->ip4,
+ (__force u32)tunnel->source_port,
+ (__force u32)tunnel->nonce,
+ &tunnel->key);
+ tunnel->mac = mac >> 16;
+
+ if (!netif_running(amt->dev) || !netif_running(amt->stream_dev))
+ return true;
+
+ if (!amtrh->p)
+ amt_send_igmp_gq(amt, tunnel);
+ else
+ amt_send_mld_gq(amt, tunnel);
+
+ return false;
+}
+
+static int amt_rcv(struct sock *sk, struct sk_buff *skb)
+{
+ struct amt_dev *amt;
+ struct iphdr *iph;
+ int type;
+ bool err;
+
+ rcu_read_lock_bh();
+ amt = rcu_dereference_sk_user_data(sk);
+ if (!amt) {
+ err = true;
+ goto out;
+ }
+
+ skb->dev = amt->dev;
+ iph = ip_hdr(skb);
+ type = amt_parse_type(skb);
+ if (type == -1) {
+ err = true;
+ goto drop;
+ }
+
+ if (amt->mode == AMT_MODE_GATEWAY) {
+ switch (type) {
+ case AMT_MSG_ADVERTISEMENT:
+ if (iph->saddr != amt->discovery_ip) {
+ netdev_dbg(amt->dev, "Invalid Relay IP\n");
+ err = true;
+ goto drop;
+ }
+ if (amt_advertisement_handler(amt, skb))
+ amt->dev->stats.rx_dropped++;
+ goto out;
+ case AMT_MSG_MULTICAST_DATA:
+ if (iph->saddr != amt->remote_ip) {
+ netdev_dbg(amt->dev, "Invalid Relay IP\n");
+ err = true;
+ goto drop;
+ }
+ err = amt_multicast_data_handler(amt, skb);
+ if (err)
+ goto drop;
+ else
+ goto out;
+ case AMT_MSG_MEMBERSHIP_QUERY:
+ if (iph->saddr != amt->remote_ip) {
+ netdev_dbg(amt->dev, "Invalid Relay IP\n");
+ err = true;
+ goto drop;
+ }
+ err = amt_membership_query_handler(amt, skb);
+ if (err)
+ goto drop;
+ else
+ goto out;
+ default:
+ err = true;
+ netdev_dbg(amt->dev, "Invalid type of Gateway\n");
+ break;
+ }
+ } else {
+ switch (type) {
+ case AMT_MSG_DISCOVERY:
+ err = amt_discovery_handler(amt, skb);
+ break;
+ case AMT_MSG_REQUEST:
+ err = amt_request_handler(amt, skb);
+ break;
+ case AMT_MSG_MEMBERSHIP_UPDATE:
+ err = amt_update_handler(amt, skb);
+ if (err)
+ goto drop;
+ else
+ goto out;
+ default:
+ err = true;
+ netdev_dbg(amt->dev, "Invalid type of relay\n");
+ break;
+ }
+ }
+drop:
+ if (err) {
+ amt->dev->stats.rx_dropped++;
+ kfree_skb(skb);
+ } else {
+ consume_skb(skb);
+ }
+out:
+ rcu_read_unlock_bh();
+ return 0;
+}
+
+static int amt_err_lookup(struct sock *sk, struct sk_buff *skb)
+{
+ struct amt_dev *amt;
+ int type;
+
+ rcu_read_lock_bh();
+ amt = rcu_dereference_sk_user_data(sk);
+ if (!amt)
+ goto drop;
+
+ if (amt->mode != AMT_MODE_GATEWAY)
+ goto drop;
+
+ type = amt_parse_type(skb);
+ if (type == -1)
+ goto drop;
+
+ netdev_dbg(amt->dev, "Received IGMP Unreachable of %s\n",
+ type_str[type]);
+ switch (type) {
+ case AMT_MSG_DISCOVERY:
+ break;
+ case AMT_MSG_REQUEST:
+ case AMT_MSG_MEMBERSHIP_UPDATE:
+ if (amt->status >= AMT_STATUS_RECEIVED_ADVERTISEMENT)
+ mod_delayed_work(amt_wq, &amt->req_wq, 0);
+ break;
+ default:
+ goto drop;
+ }
+ rcu_read_unlock_bh();
+ return 0;
+drop:
+ rcu_read_unlock_bh();
+ amt->dev->stats.rx_dropped++;
+ return 0;
+}
+
+static struct socket *amt_create_sock(struct net *net, __be16 port)
+{
+ struct udp_port_cfg udp_conf;
+ struct socket *sock;
+ int err;
+
+ memset(&udp_conf, 0, sizeof(udp_conf));
+ udp_conf.family = AF_INET;
+ udp_conf.local_ip.s_addr = htonl(INADDR_ANY);
+
+ udp_conf.local_udp_port = port;
+
+ err = udp_sock_create(net, &udp_conf, &sock);
+ if (err < 0)
+ return ERR_PTR(err);
+
+ return sock;
+}
+
+static int amt_socket_create(struct amt_dev *amt)
+{
+ struct udp_tunnel_sock_cfg tunnel_cfg;
+ struct socket *sock;
+
+ sock = amt_create_sock(amt->net, amt->relay_port);
+ if (IS_ERR(sock))
+ return PTR_ERR(sock);
+
+ /* Mark socket as an encapsulation socket */
+ memset(&tunnel_cfg, 0, sizeof(tunnel_cfg));
+ tunnel_cfg.sk_user_data = amt;
+ tunnel_cfg.encap_type = 1;
+ tunnel_cfg.encap_rcv = amt_rcv;
+ tunnel_cfg.encap_err_lookup = amt_err_lookup;
+ tunnel_cfg.encap_destroy = NULL;
+ setup_udp_tunnel_sock(amt->net, sock, &tunnel_cfg);
+
+ rcu_assign_pointer(amt->sock, sock);
+ return 0;
+}
+
+static int amt_dev_open(struct net_device *dev)
+{
+ struct amt_dev *amt = netdev_priv(dev);
+ int err;
+
+ amt->ready4 = false;
+ amt->ready6 = false;
+
+ err = amt_socket_create(amt);
+ if (err)
+ return err;
+
+ amt->req_cnt = 0;
+ amt->remote_ip = 0;
+ get_random_bytes(&amt->key, sizeof(siphash_key_t));
+
+ amt->status = AMT_STATUS_INIT;
+ if (amt->mode == AMT_MODE_GATEWAY) {
+ mod_delayed_work(amt_wq, &amt->discovery_wq, 0);
+ mod_delayed_work(amt_wq, &amt->req_wq, 0);
+ } else if (amt->mode == AMT_MODE_RELAY) {
+ mod_delayed_work(amt_wq, &amt->secret_wq,
+ msecs_to_jiffies(AMT_SECRET_TIMEOUT));
+ }
+ return err;
+}
+
+static int amt_dev_stop(struct net_device *dev)
+{
+ struct amt_dev *amt = netdev_priv(dev);
+ struct amt_tunnel_list *tunnel, *tmp;
+ struct socket *sock;
+
+ cancel_delayed_work_sync(&amt->req_wq);
+ cancel_delayed_work_sync(&amt->discovery_wq);
+ cancel_delayed_work_sync(&amt->secret_wq);
+
+ /* shutdown */
+ sock = rtnl_dereference(amt->sock);
+ RCU_INIT_POINTER(amt->sock, NULL);
+ synchronize_net();
+ if (sock)
+ udp_tunnel_sock_release(sock);
+
+ amt->ready4 = false;
+ amt->ready6 = false;
+ amt->req_cnt = 0;
+ amt->remote_ip = 0;
+
+ list_for_each_entry_safe(tunnel, tmp, &amt->tunnel_list, list) {
+ list_del_rcu(&tunnel->list);
+ amt->nr_tunnels--;
+ cancel_delayed_work_sync(&tunnel->gc_wq);
+ amt_clear_groups(tunnel);
+ kfree_rcu(tunnel, rcu);
+ }
+
+ return 0;
+}
+
+static const struct device_type amt_type = {
+ .name = "amt",
+};
+
+static int amt_dev_init(struct net_device *dev)
+{
+ struct amt_dev *amt = netdev_priv(dev);
+ int err;
+
+ amt->dev = dev;
+ dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
+ if (!dev->tstats)
+ return -ENOMEM;
+
+ err = gro_cells_init(&amt->gro_cells, dev);
+ if (err) {
+ free_percpu(dev->tstats);
+ return err;
+ }
+
+ return 0;
+}
+
+static void amt_dev_uninit(struct net_device *dev)
+{
+ struct amt_dev *amt = netdev_priv(dev);
+
+ gro_cells_destroy(&amt->gro_cells);
+ free_percpu(dev->tstats);
+}
+
+static const struct net_device_ops amt_netdev_ops = {
+ .ndo_init = amt_dev_init,
+ .ndo_uninit = amt_dev_uninit,
+ .ndo_open = amt_dev_open,
+ .ndo_stop = amt_dev_stop,
+ .ndo_start_xmit = amt_dev_xmit,
+ .ndo_get_stats64 = dev_get_tstats64,
+};
+
+static void amt_link_setup(struct net_device *dev)
+{
+ dev->netdev_ops = &amt_netdev_ops;
+ dev->needs_free_netdev = true;
+ SET_NETDEV_DEVTYPE(dev, &amt_type);
+ dev->min_mtu = ETH_MIN_MTU;
+ dev->max_mtu = ETH_MAX_MTU;
+ dev->type = ARPHRD_NONE;
+ dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
+ dev->hard_header_len = 0;
+ dev->addr_len = 0;
+ dev->priv_flags |= IFF_NO_QUEUE;
+ dev->features |= NETIF_F_LLTX;
+ dev->features |= NETIF_F_GSO_SOFTWARE;
+ dev->features |= NETIF_F_NETNS_LOCAL;
+ dev->hw_features |= NETIF_F_SG | NETIF_F_HW_CSUM;
+ dev->hw_features |= NETIF_F_FRAGLIST | NETIF_F_RXCSUM;
+ dev->hw_features |= NETIF_F_GSO_SOFTWARE;
+ eth_hw_addr_random(dev);
+ eth_zero_addr(dev->broadcast);
+ ether_setup(dev);
+}
+
+static const struct nla_policy amt_policy[IFLA_AMT_MAX + 1] = {
+ [IFLA_AMT_MODE] = { .type = NLA_U32 },
+ [IFLA_AMT_RELAY_PORT] = { .type = NLA_U16 },
+ [IFLA_AMT_GATEWAY_PORT] = { .type = NLA_U16 },
+ [IFLA_AMT_LINK] = { .type = NLA_U32 },
+ [IFLA_AMT_LOCAL_IP] = { .len = sizeof_field(struct iphdr, daddr) },
+ [IFLA_AMT_REMOTE_IP] = { .len = sizeof_field(struct iphdr, daddr) },
+ [IFLA_AMT_DISCOVERY_IP] = { .len = sizeof_field(struct iphdr, daddr) },
+ [IFLA_AMT_MAX_TUNNELS] = { .type = NLA_U32 },
+};
+
+static int amt_validate(struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ if (!data)
+ return -EINVAL;
+
+ if (!data[IFLA_AMT_LINK]) {
+ NL_SET_ERR_MSG_ATTR(extack, data[IFLA_AMT_LINK],
+ "Link attribute is required");
+ return -EINVAL;
+ }
+
+ if (!data[IFLA_AMT_MODE]) {
+ NL_SET_ERR_MSG_ATTR(extack, data[IFLA_AMT_MODE],
+ "Mode attribute is required");
+ return -EINVAL;
+ }
+
+ if (nla_get_u32(data[IFLA_AMT_MODE]) > AMT_MODE_MAX) {
+ NL_SET_ERR_MSG_ATTR(extack, data[IFLA_AMT_MODE],
+ "Mode attribute is not valid");
+ return -EINVAL;
+ }
+
+ if (!data[IFLA_AMT_LOCAL_IP]) {
+ NL_SET_ERR_MSG_ATTR(extack, data[IFLA_AMT_DISCOVERY_IP],
+ "Local attribute is required");
+ return -EINVAL;
+ }
+
+ if (!data[IFLA_AMT_DISCOVERY_IP] &&
+ nla_get_u32(data[IFLA_AMT_MODE]) == AMT_MODE_GATEWAY) {
+ NL_SET_ERR_MSG_ATTR(extack, data[IFLA_AMT_LOCAL_IP],
+ "Discovery attribute is required");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int amt_newlink(struct net *net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ struct amt_dev *amt = netdev_priv(dev);
+ int err = -EINVAL;
+
+ amt->net = net;
+ amt->mode = nla_get_u32(data[IFLA_AMT_MODE]);
+
+ if (data[IFLA_AMT_MAX_TUNNELS] &&
+ nla_get_u32(data[IFLA_AMT_MAX_TUNNELS]))
+ amt->max_tunnels = nla_get_u32(data[IFLA_AMT_MAX_TUNNELS]);
+ else
+ amt->max_tunnels = AMT_MAX_TUNNELS;
+
+ spin_lock_init(&amt->lock);
+ amt->max_groups = AMT_MAX_GROUP;
+ amt->max_sources = AMT_MAX_SOURCE;
+ amt->hash_buckets = AMT_HSIZE;
+ amt->nr_tunnels = 0;
+ get_random_bytes(&amt->hash_seed, sizeof(amt->hash_seed));
+ amt->stream_dev = dev_get_by_index(net,
+ nla_get_u32(data[IFLA_AMT_LINK]));
+ if (!amt->stream_dev) {
+ NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_LINK],
+ "Can't find stream device");
+ return -ENODEV;
+ }
+
+ if (amt->stream_dev->type != ARPHRD_ETHER) {
+ NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_LINK],
+ "Invalid stream device type");
+ goto err;
+ }
+
+ amt->local_ip = nla_get_in_addr(data[IFLA_AMT_LOCAL_IP]);
+ if (ipv4_is_loopback(amt->local_ip) ||
+ ipv4_is_zeronet(amt->local_ip) ||
+ ipv4_is_multicast(amt->local_ip)) {
+ NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_LOCAL_IP],
+ "Invalid Local address");
+ goto err;
+ }
+
+ if (data[IFLA_AMT_RELAY_PORT])
+ amt->relay_port = nla_get_be16(data[IFLA_AMT_RELAY_PORT]);
+ else
+ amt->relay_port = htons(IANA_AMT_UDP_PORT);
+
+ if (data[IFLA_AMT_GATEWAY_PORT])
+ amt->gw_port = nla_get_be16(data[IFLA_AMT_GATEWAY_PORT]);
+ else
+ amt->gw_port = htons(IANA_AMT_UDP_PORT);
+
+ if (!amt->relay_port) {
+ NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_DISCOVERY_IP],
+ "relay port must not be 0");
+ goto err;
+ }
+ if (amt->mode == AMT_MODE_RELAY) {
+ amt->qrv = amt->net->ipv4.sysctl_igmp_qrv;
+ amt->qri = 10;
+ dev->needed_headroom = amt->stream_dev->needed_headroom +
+ AMT_RELAY_HLEN;
+ dev->mtu = amt->stream_dev->mtu - AMT_RELAY_HLEN;
+ dev->max_mtu = dev->mtu;
+ dev->min_mtu = ETH_MIN_MTU + AMT_RELAY_HLEN;
+ } else {
+ if (!data[IFLA_AMT_DISCOVERY_IP]) {
+ NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_DISCOVERY_IP],
+ "discovery must be set in gateway mode");
+ goto err;
+ }
+ if (!amt->gw_port) {
+ NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_DISCOVERY_IP],
+ "gateway port must not be 0");
+ goto err;
+ }
+ amt->remote_ip = 0;
+ amt->discovery_ip = nla_get_in_addr(data[IFLA_AMT_DISCOVERY_IP]);
+ if (ipv4_is_loopback(amt->discovery_ip) ||
+ ipv4_is_zeronet(amt->discovery_ip) ||
+ ipv4_is_multicast(amt->discovery_ip)) {
+ NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_AMT_DISCOVERY_IP],
+ "discovery must be unicast");
+ goto err;
+ }
+
+ dev->needed_headroom = amt->stream_dev->needed_headroom +
+ AMT_GW_HLEN;
+ dev->mtu = amt->stream_dev->mtu - AMT_GW_HLEN;
+ dev->max_mtu = dev->mtu;
+ dev->min_mtu = ETH_MIN_MTU + AMT_GW_HLEN;
+ }
+ amt->qi = AMT_INIT_QUERY_INTERVAL;
+
+ err = register_netdevice(dev);
+ if (err < 0) {
+ netdev_dbg(dev, "failed to register new netdev %d\n", err);
+ goto err;
+ }
+
+ err = netdev_upper_dev_link(amt->stream_dev, dev, extack);
+ if (err < 0) {
+ unregister_netdevice(dev);
+ goto err;
+ }
+
+ INIT_DELAYED_WORK(&amt->discovery_wq, amt_discovery_work);
+ INIT_DELAYED_WORK(&amt->req_wq, amt_req_work);
+ INIT_DELAYED_WORK(&amt->secret_wq, amt_secret_work);
+ INIT_LIST_HEAD(&amt->tunnel_list);
+
+ return 0;
+err:
+ dev_put(amt->stream_dev);
+ return err;
+}
+
+static void amt_dellink(struct net_device *dev, struct list_head *head)
+{
+ struct amt_dev *amt = netdev_priv(dev);
+
+ unregister_netdevice_queue(dev, head);
+ netdev_upper_dev_unlink(amt->stream_dev, dev);
+ dev_put(amt->stream_dev);
+}
+
+static size_t amt_get_size(const struct net_device *dev)
+{
+ return nla_total_size(sizeof(__u32)) + /* IFLA_AMT_MODE */
+ nla_total_size(sizeof(__u16)) + /* IFLA_AMT_RELAY_PORT */
+ nla_total_size(sizeof(__u16)) + /* IFLA_AMT_GATEWAY_PORT */
+ nla_total_size(sizeof(__u32)) + /* IFLA_AMT_LINK */
+ nla_total_size(sizeof(__u32)) + /* IFLA_MAX_TUNNELS */
+ nla_total_size(sizeof(struct iphdr)) + /* IFLA_AMT_DISCOVERY_IP */
+ nla_total_size(sizeof(struct iphdr)) + /* IFLA_AMT_REMOTE_IP */
+ nla_total_size(sizeof(struct iphdr)); /* IFLA_AMT_LOCAL_IP */
+}
+
+static int amt_fill_info(struct sk_buff *skb, const struct net_device *dev)
+{
+ struct amt_dev *amt = netdev_priv(dev);
+
+ if (nla_put_u32(skb, IFLA_AMT_MODE, amt->mode))
+ goto nla_put_failure;
+ if (nla_put_be16(skb, IFLA_AMT_RELAY_PORT, amt->relay_port))
+ goto nla_put_failure;
+ if (nla_put_be16(skb, IFLA_AMT_GATEWAY_PORT, amt->gw_port))
+ goto nla_put_failure;
+ if (nla_put_u32(skb, IFLA_AMT_LINK, amt->stream_dev->ifindex))
+ goto nla_put_failure;
+ if (nla_put_in_addr(skb, IFLA_AMT_LOCAL_IP, amt->local_ip))
+ goto nla_put_failure;
+ if (nla_put_in_addr(skb, IFLA_AMT_DISCOVERY_IP, amt->discovery_ip))
+ goto nla_put_failure;
+ if (amt->remote_ip)
+ if (nla_put_in_addr(skb, IFLA_AMT_REMOTE_IP, amt->remote_ip))
+ goto nla_put_failure;
+ if (nla_put_u32(skb, IFLA_AMT_MAX_TUNNELS, amt->max_tunnels))
+ goto nla_put_failure;
+
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+static struct rtnl_link_ops amt_link_ops __read_mostly = {
+ .kind = "amt",
+ .maxtype = IFLA_AMT_MAX,
+ .policy = amt_policy,
+ .priv_size = sizeof(struct amt_dev),
+ .setup = amt_link_setup,
+ .validate = amt_validate,
+ .newlink = amt_newlink,
+ .dellink = amt_dellink,
+ .get_size = amt_get_size,
+ .fill_info = amt_fill_info,
+};
+
+static struct net_device *amt_lookup_upper_dev(struct net_device *dev)
+{
+ struct net_device *upper_dev;
+ struct amt_dev *amt;
+
+ for_each_netdev(dev_net(dev), upper_dev) {
+ if (netif_is_amt(upper_dev)) {
+ amt = netdev_priv(upper_dev);
+ if (amt->stream_dev == dev)
+ return upper_dev;
+ }
+ }
+
+ return NULL;
+}
+
+static int amt_device_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct net_device *upper_dev;
+ struct amt_dev *amt;
+ LIST_HEAD(list);
+ int new_mtu;
+
+ upper_dev = amt_lookup_upper_dev(dev);
+ if (!upper_dev)
+ return NOTIFY_DONE;
+ amt = netdev_priv(upper_dev);
+
+ switch (event) {
+ case NETDEV_UNREGISTER:
+ amt_dellink(amt->dev, &list);
+ unregister_netdevice_many(&list);
+ break;
+ case NETDEV_CHANGEMTU:
+ if (amt->mode == AMT_MODE_RELAY)
+ new_mtu = dev->mtu - AMT_RELAY_HLEN;
+ else
+ new_mtu = dev->mtu - AMT_GW_HLEN;
+
+ dev_set_mtu(amt->dev, new_mtu);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block amt_notifier_block __read_mostly = {
+ .notifier_call = amt_device_event,
+};
+
+static int __init amt_init(void)
+{
+ int err;
+
+ err = register_netdevice_notifier(&amt_notifier_block);
+ if (err < 0)
+ goto err;
+
+ err = rtnl_link_register(&amt_link_ops);
+ if (err < 0)
+ goto unregister_notifier;
+
+ amt_wq = alloc_workqueue("amt", WQ_UNBOUND, 1);
+ if (!amt_wq)
+ goto rtnl_unregister;
+
+ spin_lock_init(&source_gc_lock);
+ spin_lock_bh(&source_gc_lock);
+ INIT_DELAYED_WORK(&source_gc_wq, amt_source_gc_work);
+ mod_delayed_work(amt_wq, &source_gc_wq,
+ msecs_to_jiffies(AMT_GC_INTERVAL));
+ spin_unlock_bh(&source_gc_lock);
+
+ return 0;
+
+rtnl_unregister:
+ rtnl_link_unregister(&amt_link_ops);
+unregister_notifier:
+ unregister_netdevice_notifier(&amt_notifier_block);
+err:
+ pr_err("error loading AMT module loaded\n");
+ return err;
+}
+late_initcall(amt_init);
+
+static void __exit amt_fini(void)
+{
+ rtnl_link_unregister(&amt_link_ops);
+ unregister_netdevice_notifier(&amt_notifier_block);
+ flush_delayed_work(&source_gc_wq);
+ __amt_source_gc_work();
+ destroy_workqueue(amt_wq);
+}
+module_exit(amt_fini);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Taehee Yoo <ap420073@gmail.com>");
+MODULE_ALIAS_RTNL_LINK("amt");
diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c
index f0695d68c47e..97f254bdbb16 100644
--- a/drivers/net/appletalk/cops.c
+++ b/drivers/net/appletalk/cops.c
@@ -945,8 +945,8 @@ static int cops_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
dev->broadcast[0] = 0xFF;
/* Set hardware address. */
- dev->dev_addr[0] = aa->s_node;
dev->addr_len = 1;
+ dev_addr_set(dev, &aa->s_node);
return 0;
case SIOCGIFADDR:
diff --git a/drivers/net/appletalk/ltpc.c b/drivers/net/appletalk/ltpc.c
index 1f8925e75b3f..388d7b3bd4c2 100644
--- a/drivers/net/appletalk/ltpc.c
+++ b/drivers/net/appletalk/ltpc.c
@@ -846,9 +846,8 @@ static int ltpc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
set_30 (dev,ltflags);
dev->broadcast[0] = 0xFF;
- dev->dev_addr[0] = aa->s_node;
-
dev->addr_len=1;
+ dev_addr_set(dev, &aa->s_node);
return 0;
diff --git a/drivers/net/arcnet/arc-rimi.c b/drivers/net/arcnet/arc-rimi.c
index 12d085405bd0..8c3ccc7c83cd 100644
--- a/drivers/net/arcnet/arc-rimi.c
+++ b/drivers/net/arcnet/arc-rimi.c
@@ -207,7 +207,8 @@ static int __init arcrimi_found(struct net_device *dev)
}
/* get and check the station ID from offset 1 in shmem */
- dev->dev_addr[0] = arcnet_readb(lp->mem_start, COM9026_REG_R_STATION);
+ arcnet_set_addr(dev, arcnet_readb(lp->mem_start,
+ COM9026_REG_R_STATION));
arc_printk(D_NORMAL, dev, "ARCnet RIM I: station %02Xh found at IRQ %d, ShMem %lXh (%ld*%d bytes)\n",
dev->dev_addr[0],
@@ -324,7 +325,7 @@ static int __init arc_rimi_init(void)
return -ENOMEM;
if (node && node != 0xff)
- dev->dev_addr[0] = node;
+ arcnet_set_addr(dev, node);
dev->mem_start = io;
dev->irq = irq;
diff --git a/drivers/net/arcnet/arcdevice.h b/drivers/net/arcnet/arcdevice.h
index 5d4a4c7efbbf..19e996a829c9 100644
--- a/drivers/net/arcnet/arcdevice.h
+++ b/drivers/net/arcnet/arcdevice.h
@@ -364,6 +364,11 @@ netdev_tx_t arcnet_send_packet(struct sk_buff *skb,
struct net_device *dev);
void arcnet_timeout(struct net_device *dev, unsigned int txqueue);
+static inline void arcnet_set_addr(struct net_device *dev, u8 addr)
+{
+ dev_addr_set(dev, &addr);
+}
+
/* I/O equivalents */
#ifdef CONFIG_SA1100_CT6001
diff --git a/drivers/net/arcnet/com20020-isa.c b/drivers/net/arcnet/com20020-isa.c
index be618e4b9ed5..293a621e654c 100644
--- a/drivers/net/arcnet/com20020-isa.c
+++ b/drivers/net/arcnet/com20020-isa.c
@@ -151,7 +151,7 @@ static int __init com20020_init(void)
return -ENOMEM;
if (node && node != 0xff)
- dev->dev_addr[0] = node;
+ arcnet_set_addr(dev, node);
dev->netdev_ops = &com20020_netdev_ops;
diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c
index 3c8f665c1558..6382e1937cca 100644
--- a/drivers/net/arcnet/com20020-pci.c
+++ b/drivers/net/arcnet/com20020-pci.c
@@ -194,7 +194,7 @@ static int com20020pci_probe(struct pci_dev *pdev,
SET_NETDEV_DEV(dev, &pdev->dev);
dev->base_addr = ioaddr;
- dev->dev_addr[0] = node;
+ arcnet_set_addr(dev, node);
dev->sysfs_groups[0] = &com20020_state_group;
dev->irq = pdev->irq;
lp->card_name = "PCI COM20020";
diff --git a/drivers/net/arcnet/com20020.c b/drivers/net/arcnet/com20020.c
index 78043a9c5981..06e1651b594b 100644
--- a/drivers/net/arcnet/com20020.c
+++ b/drivers/net/arcnet/com20020.c
@@ -157,7 +157,7 @@ static int com20020_set_hwaddr(struct net_device *dev, void *addr)
struct arcnet_local *lp = netdev_priv(dev);
struct sockaddr *hwaddr = addr;
- memcpy(dev->dev_addr, hwaddr->sa_data, 1);
+ dev_addr_set(dev, hwaddr->sa_data);
com20020_set_subaddress(lp, ioaddr, SUB_NODE);
arcnet_outb(dev->dev_addr[0], ioaddr, COM20020_REG_W_XREG);
@@ -220,7 +220,7 @@ int com20020_found(struct net_device *dev, int shared)
/* FIXME: do this some other way! */
if (!dev->dev_addr[0])
- dev->dev_addr[0] = arcnet_inb(ioaddr, 8);
+ arcnet_set_addr(dev, arcnet_inb(ioaddr, 8));
com20020_set_subaddress(lp, ioaddr, SUB_SETUP1);
arcnet_outb(lp->setup, ioaddr, COM20020_REG_W_XREG);
diff --git a/drivers/net/arcnet/com20020_cs.c b/drivers/net/arcnet/com20020_cs.c
index b88a109b3b15..24150c933fcb 100644
--- a/drivers/net/arcnet/com20020_cs.c
+++ b/drivers/net/arcnet/com20020_cs.c
@@ -133,7 +133,7 @@ static int com20020_probe(struct pcmcia_device *p_dev)
lp->hw.owner = THIS_MODULE;
/* fill in our module parameters as defaults */
- dev->dev_addr[0] = node;
+ arcnet_set_addr(dev, node);
p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8;
p_dev->resource[0]->end = 16;
diff --git a/drivers/net/arcnet/com90io.c b/drivers/net/arcnet/com90io.c
index 3856b447d38e..37b47749fc8b 100644
--- a/drivers/net/arcnet/com90io.c
+++ b/drivers/net/arcnet/com90io.c
@@ -252,7 +252,7 @@ static int __init com90io_found(struct net_device *dev)
/* get and check the station ID from offset 1 in shmem */
- dev->dev_addr[0] = get_buffer_byte(dev, 1);
+ arcnet_set_addr(dev, get_buffer_byte(dev, 1));
err = register_netdev(dev);
if (err) {
diff --git a/drivers/net/arcnet/com90xx.c b/drivers/net/arcnet/com90xx.c
index d8dfb9ea0de8..f49dae194284 100644
--- a/drivers/net/arcnet/com90xx.c
+++ b/drivers/net/arcnet/com90xx.c
@@ -531,7 +531,8 @@ static int __init com90xx_found(int ioaddr, int airq, u_long shmem,
}
/* get and check the station ID from offset 1 in shmem */
- dev->dev_addr[0] = arcnet_readb(lp->mem_start, COM9026_REG_R_STATION);
+ arcnet_set_addr(dev, arcnet_readb(lp->mem_start,
+ COM9026_REG_R_STATION));
dev->base_addr = ioaddr;
diff --git a/drivers/net/bareudp.c b/drivers/net/bareudp.c
index 54e321a695ce..edffc3489a12 100644
--- a/drivers/net/bareudp.c
+++ b/drivers/net/bareudp.c
@@ -577,11 +577,8 @@ static int bareudp2info(struct nlattr *data[], struct bareudp_conf *conf,
return -EINVAL;
}
- if (data[IFLA_BAREUDP_PORT])
- conf->port = nla_get_u16(data[IFLA_BAREUDP_PORT]);
-
- if (data[IFLA_BAREUDP_ETHERTYPE])
- conf->ethertype = nla_get_u16(data[IFLA_BAREUDP_ETHERTYPE]);
+ conf->port = nla_get_u16(data[IFLA_BAREUDP_PORT]);
+ conf->ethertype = nla_get_u16(data[IFLA_BAREUDP_ETHERTYPE]);
if (data[IFLA_BAREUDP_SRCPORT_MIN])
conf->sport_min = nla_get_u16(data[IFLA_BAREUDP_SRCPORT_MIN]);
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 7d3752cbf761..2ec8e015c7b3 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -50,7 +50,7 @@ struct arp_pkt {
#pragma pack()
/* Forward declaration */
-static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
+static void alb_send_learning_packets(struct slave *slave, const u8 mac_addr[],
bool strict_match);
static void rlb_purge_src_ip(struct bonding *bond, struct arp_pkt *arp);
static void rlb_src_unlink(struct bonding *bond, u32 index);
@@ -353,7 +353,8 @@ static struct slave *rlb_next_rx_slave(struct bonding *bond)
*
* Caller must hold RTNL
*/
-static void rlb_teach_disabled_mac_on_primary(struct bonding *bond, u8 addr[])
+static void rlb_teach_disabled_mac_on_primary(struct bonding *bond,
+ const u8 addr[])
{
struct slave *curr_active = rtnl_dereference(bond->curr_active_slave);
@@ -904,7 +905,7 @@ static void rlb_clear_vlan(struct bonding *bond, unsigned short vlan_id)
/*********************** tlb/rlb shared functions *********************/
-static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
+static void alb_send_lp_vid(struct slave *slave, const u8 mac_addr[],
__be16 vlan_proto, u16 vid)
{
struct learning_pkt pkt;
@@ -940,7 +941,7 @@ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
struct alb_walk_data {
struct bonding *bond;
struct slave *slave;
- u8 *mac_addr;
+ const u8 *mac_addr;
bool strict_match;
};
@@ -949,9 +950,9 @@ static int alb_upper_dev_walk(struct net_device *upper,
{
struct alb_walk_data *data = (struct alb_walk_data *)priv->data;
bool strict_match = data->strict_match;
+ const u8 *mac_addr = data->mac_addr;
struct bonding *bond = data->bond;
struct slave *slave = data->slave;
- u8 *mac_addr = data->mac_addr;
struct bond_vlan_tag *tags;
if (is_vlan_dev(upper) &&
@@ -982,7 +983,7 @@ static int alb_upper_dev_walk(struct net_device *upper,
return 0;
}
-static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
+static void alb_send_learning_packets(struct slave *slave, const u8 mac_addr[],
bool strict_match)
{
struct bonding *bond = bond_get_bond_by_slave(slave);
@@ -1006,14 +1007,14 @@ static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
rcu_read_unlock();
}
-static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[],
+static int alb_set_slave_mac_addr(struct slave *slave, const u8 addr[],
unsigned int len)
{
struct net_device *dev = slave->dev;
struct sockaddr_storage ss;
if (BOND_MODE(slave->bond) == BOND_MODE_TLB) {
- memcpy(dev->dev_addr, addr, len);
+ __dev_addr_set(dev, addr, len);
return 0;
}
@@ -1242,8 +1243,7 @@ static int alb_set_mac_address(struct bonding *bond, void *addr)
res = dev_set_mac_address(slave->dev, addr, NULL);
/* restore net_device's hw address */
- bond_hw_addr_copy(slave->dev->dev_addr, tmp_addr,
- slave->dev->addr_len);
+ dev_addr_set(slave->dev, tmp_addr);
if (res)
goto unwind;
@@ -1263,8 +1263,7 @@ unwind:
rollback_slave->dev->addr_len);
dev_set_mac_address(rollback_slave->dev,
(struct sockaddr *)&ss, NULL);
- bond_hw_addr_copy(rollback_slave->dev->dev_addr, tmp_addr,
- rollback_slave->dev->addr_len);
+ dev_addr_set(rollback_slave->dev, tmp_addr);
}
return res;
@@ -1727,8 +1726,7 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
dev_set_mac_address(new_slave->dev, (struct sockaddr *)&ss,
NULL);
- bond_hw_addr_copy(new_slave->dev->dev_addr, tmp_addr,
- new_slave->dev->addr_len);
+ dev_addr_set(new_slave->dev, tmp_addr);
}
/* curr_active_slave must be set before calling alb_swap_mac_addr */
@@ -1761,7 +1759,7 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
if (res)
return res;
- bond_hw_addr_copy(bond_dev->dev_addr, ss->__data, bond_dev->addr_len);
+ dev_addr_set(bond_dev, ss->__data);
/* If there is no curr_active_slave there is nothing else to do.
* Otherwise we'll need to pass the new address to it and handle
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 77dc79a7f574..ff8da720a33a 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -923,7 +923,7 @@ static int bond_set_dev_addr(struct net_device *bond_dev,
if (err)
return err;
- memcpy(bond_dev->dev_addr, slave_dev->dev_addr, slave_dev->addr_len);
+ __dev_addr_set(bond_dev, slave_dev->dev_addr, slave_dev->addr_len);
bond_dev->addr_assign_type = NET_ADDR_STOLEN;
call_netdevice_notifiers(NETDEV_CHANGEADDR, bond_dev);
return 0;
@@ -4414,7 +4414,7 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
}
/* success */
- memcpy(bond_dev->dev_addr, ss->__data, bond_dev->addr_len);
+ dev_addr_set(bond_dev, ss->__data);
return 0;
unwind:
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index b9e9842fed94..c48b77167fab 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -811,8 +811,8 @@ int bond_create_sysfs(struct bond_net *bn)
*/
if (ret == -EEXIST) {
/* Is someone being kinky and naming a device bonding_master? */
- if (__dev_get_by_name(bn->net,
- class_attr_bonding_masters.attr.name))
+ if (netdev_name_in_use(bn->net,
+ class_attr_bonding_masters.attr.name))
pr_err("network device named %s already exists in sysfs\n",
class_attr_bonding_masters.attr.name);
ret = 0;
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index b06af90a9964..3aea32c9b108 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -1170,9 +1170,9 @@ static ssize_t mb0_id_show(struct device *dev,
struct at91_priv *priv = netdev_priv(to_net_dev(dev));
if (priv->mb0_id & CAN_EFF_FLAG)
- return snprintf(buf, PAGE_SIZE, "0x%08x\n", priv->mb0_id);
+ return sysfs_emit(buf, "0x%08x\n", priv->mb0_id);
else
- return snprintf(buf, PAGE_SIZE, "0x%03x\n", priv->mb0_id);
+ return sysfs_emit(buf, "0x%03x\n", priv->mb0_id);
}
static ssize_t mb0_id_store(struct device *dev,
diff --git a/drivers/net/can/dev/bittiming.c b/drivers/net/can/dev/bittiming.c
index f49170eadd54..0509625c3082 100644
--- a/drivers/net/can/dev/bittiming.c
+++ b/drivers/net/can/dev/bittiming.c
@@ -175,27 +175,29 @@ int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt,
return 0;
}
-void can_calc_tdco(struct net_device *dev)
-{
- struct can_priv *priv = netdev_priv(dev);
- const struct can_bittiming *dbt = &priv->data_bittiming;
- struct can_tdc *tdc = &priv->tdc;
- const struct can_tdc_const *tdc_const = priv->tdc_const;
+void can_calc_tdco(struct can_tdc *tdc, const struct can_tdc_const *tdc_const,
+ const struct can_bittiming *dbt,
+ u32 *ctrlmode, u32 ctrlmode_supported)
- if (!tdc_const)
+{
+ if (!tdc_const || !(ctrlmode_supported & CAN_CTRLMODE_TDC_AUTO))
return;
+ *ctrlmode &= ~CAN_CTRLMODE_TDC_MASK;
+
/* As specified in ISO 11898-1 section 11.3.3 "Transmitter
* delay compensation" (TDC) is only applicable if data BRP is
* one or two.
*/
if (dbt->brp == 1 || dbt->brp == 2) {
- /* Reuse "normal" sample point and convert it to time quanta */
- u32 sample_point_in_tq = can_bit_time(dbt) * dbt->sample_point / 1000;
-
- tdc->tdco = min(sample_point_in_tq, tdc_const->tdco_max);
- } else {
- tdc->tdco = 0;
+ /* Sample point in clock periods */
+ u32 sample_point_in_tc = (CAN_SYNC_SEG + dbt->prop_seg +
+ dbt->phase_seg1) * dbt->brp;
+
+ if (sample_point_in_tc < tdc_const->tdco_min)
+ return;
+ tdc->tdco = min(sample_point_in_tc, tdc_const->tdco_max);
+ *ctrlmode |= CAN_CTRLMODE_TDC_AUTO;
}
}
#endif /* CONFIG_CAN_CALC_BITTIMING */
@@ -209,7 +211,7 @@ static int can_fixup_bittiming(struct net_device *dev, struct can_bittiming *bt,
const struct can_bittiming_const *btc)
{
struct can_priv *priv = netdev_priv(dev);
- int tseg1, alltseg;
+ unsigned int tseg1, alltseg;
u64 brp64;
tseg1 = bt->prop_seg + bt->phase_seg1;
diff --git a/drivers/net/can/dev/netlink.c b/drivers/net/can/dev/netlink.c
index 80425636049d..95cca4e5251f 100644
--- a/drivers/net/can/dev/netlink.c
+++ b/drivers/net/can/dev/netlink.c
@@ -2,6 +2,7 @@
/* Copyright (C) 2005 Marc Kleine-Budde, Pengutronix
* Copyright (C) 2006 Andrey Volkov, Varma Electronics
* Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
+ * Copyright (C) 2021 Vincent Mailhol <mailhol.vincent@wanadoo.fr>
*/
#include <linux/can/dev.h>
@@ -19,6 +20,19 @@ static const struct nla_policy can_policy[IFLA_CAN_MAX + 1] = {
[IFLA_CAN_DATA_BITTIMING] = { .len = sizeof(struct can_bittiming) },
[IFLA_CAN_DATA_BITTIMING_CONST] = { .len = sizeof(struct can_bittiming_const) },
[IFLA_CAN_TERMINATION] = { .type = NLA_U16 },
+ [IFLA_CAN_TDC] = { .type = NLA_NESTED },
+};
+
+static const struct nla_policy can_tdc_policy[IFLA_CAN_TDC_MAX + 1] = {
+ [IFLA_CAN_TDC_TDCV_MIN] = { .type = NLA_U32 },
+ [IFLA_CAN_TDC_TDCV_MAX] = { .type = NLA_U32 },
+ [IFLA_CAN_TDC_TDCO_MIN] = { .type = NLA_U32 },
+ [IFLA_CAN_TDC_TDCO_MAX] = { .type = NLA_U32 },
+ [IFLA_CAN_TDC_TDCF_MIN] = { .type = NLA_U32 },
+ [IFLA_CAN_TDC_TDCF_MAX] = { .type = NLA_U32 },
+ [IFLA_CAN_TDC_TDCV] = { .type = NLA_U32 },
+ [IFLA_CAN_TDC_TDCO] = { .type = NLA_U32 },
+ [IFLA_CAN_TDC_TDCF] = { .type = NLA_U32 },
};
static int can_validate(struct nlattr *tb[], struct nlattr *data[],
@@ -30,6 +44,7 @@ static int can_validate(struct nlattr *tb[], struct nlattr *data[],
* - nominal/arbitration bittiming
* - data bittiming
* - control mode with CAN_CTRLMODE_FD set
+ * - TDC parameters are coherent (details below)
*/
if (!data)
@@ -37,8 +52,43 @@ static int can_validate(struct nlattr *tb[], struct nlattr *data[],
if (data[IFLA_CAN_CTRLMODE]) {
struct can_ctrlmode *cm = nla_data(data[IFLA_CAN_CTRLMODE]);
+ u32 tdc_flags = cm->flags & CAN_CTRLMODE_TDC_MASK;
is_can_fd = cm->flags & cm->mask & CAN_CTRLMODE_FD;
+
+ /* CAN_CTRLMODE_TDC_{AUTO,MANUAL} are mutually exclusive */
+ if (tdc_flags == CAN_CTRLMODE_TDC_MASK)
+ return -EOPNOTSUPP;
+ /* If one of the CAN_CTRLMODE_TDC_* flag is set then
+ * TDC must be set and vice-versa
+ */
+ if (!!tdc_flags != !!data[IFLA_CAN_TDC])
+ return -EOPNOTSUPP;
+ /* If providing TDC parameters, at least TDCO is
+ * needed. TDCV is needed if and only if
+ * CAN_CTRLMODE_TDC_MANUAL is set
+ */
+ if (data[IFLA_CAN_TDC]) {
+ struct nlattr *tb_tdc[IFLA_CAN_TDC_MAX + 1];
+ int err;
+
+ err = nla_parse_nested(tb_tdc, IFLA_CAN_TDC_MAX,
+ data[IFLA_CAN_TDC],
+ can_tdc_policy, extack);
+ if (err)
+ return err;
+
+ if (tb_tdc[IFLA_CAN_TDC_TDCV]) {
+ if (tdc_flags & CAN_CTRLMODE_TDC_AUTO)
+ return -EOPNOTSUPP;
+ } else {
+ if (tdc_flags & CAN_CTRLMODE_TDC_MANUAL)
+ return -EOPNOTSUPP;
+ }
+
+ if (!tb_tdc[IFLA_CAN_TDC_TDCO])
+ return -EOPNOTSUPP;
+ }
}
if (is_can_fd) {
@@ -46,7 +96,7 @@ static int can_validate(struct nlattr *tb[], struct nlattr *data[],
return -EOPNOTSUPP;
}
- if (data[IFLA_CAN_DATA_BITTIMING]) {
+ if (data[IFLA_CAN_DATA_BITTIMING] || data[IFLA_CAN_TDC]) {
if (!is_can_fd)
return -EOPNOTSUPP;
}
@@ -54,11 +104,60 @@ static int can_validate(struct nlattr *tb[], struct nlattr *data[],
return 0;
}
+static int can_tdc_changelink(struct can_priv *priv, const struct nlattr *nla,
+ struct netlink_ext_ack *extack)
+{
+ struct nlattr *tb_tdc[IFLA_CAN_TDC_MAX + 1];
+ struct can_tdc tdc = { 0 };
+ const struct can_tdc_const *tdc_const = priv->tdc_const;
+ int err;
+
+ if (!tdc_const || !can_tdc_is_enabled(priv))
+ return -EOPNOTSUPP;
+
+ err = nla_parse_nested(tb_tdc, IFLA_CAN_TDC_MAX, nla,
+ can_tdc_policy, extack);
+ if (err)
+ return err;
+
+ if (tb_tdc[IFLA_CAN_TDC_TDCV]) {
+ u32 tdcv = nla_get_u32(tb_tdc[IFLA_CAN_TDC_TDCV]);
+
+ if (tdcv < tdc_const->tdcv_min || tdcv > tdc_const->tdcv_max)
+ return -EINVAL;
+
+ tdc.tdcv = tdcv;
+ }
+
+ if (tb_tdc[IFLA_CAN_TDC_TDCO]) {
+ u32 tdco = nla_get_u32(tb_tdc[IFLA_CAN_TDC_TDCO]);
+
+ if (tdco < tdc_const->tdco_min || tdco > tdc_const->tdco_max)
+ return -EINVAL;
+
+ tdc.tdco = tdco;
+ }
+
+ if (tb_tdc[IFLA_CAN_TDC_TDCF]) {
+ u32 tdcf = nla_get_u32(tb_tdc[IFLA_CAN_TDC_TDCF]);
+
+ if (tdcf < tdc_const->tdcf_min || tdcf > tdc_const->tdcf_max)
+ return -EINVAL;
+
+ tdc.tdcf = tdcf;
+ }
+
+ priv->tdc = tdc;
+
+ return 0;
+}
+
static int can_changelink(struct net_device *dev, struct nlattr *tb[],
struct nlattr *data[],
struct netlink_ext_ack *extack)
{
struct can_priv *priv = netdev_priv(dev);
+ u32 tdc_mask = 0;
int err;
/* We need synchronization with dev->stop() */
@@ -138,7 +237,16 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
dev->mtu = CAN_MTU;
memset(&priv->data_bittiming, 0,
sizeof(priv->data_bittiming));
+ priv->ctrlmode &= ~CAN_CTRLMODE_TDC_MASK;
+ memset(&priv->tdc, 0, sizeof(priv->tdc));
}
+
+ tdc_mask = cm->mask & CAN_CTRLMODE_TDC_MASK;
+ /* CAN_CTRLMODE_TDC_{AUTO,MANUAL} are mutually
+ * exclusive: make sure to turn the other one off
+ */
+ if (tdc_mask)
+ priv->ctrlmode &= cm->flags | ~CAN_CTRLMODE_TDC_MASK;
}
if (data[IFLA_CAN_RESTART_MS]) {
@@ -187,9 +295,26 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
return -EINVAL;
}
- memcpy(&priv->data_bittiming, &dbt, sizeof(dbt));
+ memset(&priv->tdc, 0, sizeof(priv->tdc));
+ if (data[IFLA_CAN_TDC]) {
+ /* TDC parameters are provided: use them */
+ err = can_tdc_changelink(priv, data[IFLA_CAN_TDC],
+ extack);
+ if (err) {
+ priv->ctrlmode &= ~CAN_CTRLMODE_TDC_MASK;
+ return err;
+ }
+ } else if (!tdc_mask) {
+ /* Neither of TDC parameters nor TDC flags are
+ * provided: do calculation
+ */
+ can_calc_tdco(&priv->tdc, priv->tdc_const, &priv->data_bittiming,
+ &priv->ctrlmode, priv->ctrlmode_supported);
+ } /* else: both CAN_CTRLMODE_TDC_{AUTO,MANUAL} are explicitly
+ * turned off. TDC is disabled: do nothing
+ */
- can_calc_tdco(dev);
+ memcpy(&priv->data_bittiming, &dbt, sizeof(dbt));
if (priv->do_set_data_bittiming) {
/* Finally, set the bit-timing registers */
@@ -226,6 +351,38 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[],
return 0;
}
+static size_t can_tdc_get_size(const struct net_device *dev)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ size_t size;
+
+ if (!priv->tdc_const)
+ return 0;
+
+ size = nla_total_size(0); /* nest IFLA_CAN_TDC */
+ if (priv->ctrlmode_supported & CAN_CTRLMODE_TDC_MANUAL) {
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCV_MIN */
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCV_MAX */
+ }
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCO_MIN */
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCO_MAX */
+ if (priv->tdc_const->tdcf_max) {
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCF_MIN */
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCF_MAX */
+ }
+
+ if (can_tdc_is_enabled(priv)) {
+ if (priv->ctrlmode & CAN_CTRLMODE_TDC_MANUAL ||
+ priv->do_get_auto_tdcv)
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCV */
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCO */
+ if (priv->tdc_const->tdcf_max)
+ size += nla_total_size(sizeof(u32)); /* IFLA_CAN_TDCF */
+ }
+
+ return size;
+}
+
static size_t can_get_size(const struct net_device *dev)
{
struct can_priv *priv = netdev_priv(dev);
@@ -257,10 +414,64 @@ static size_t can_get_size(const struct net_device *dev)
size += nla_total_size(sizeof(*priv->data_bitrate_const) *
priv->data_bitrate_const_cnt);
size += sizeof(priv->bitrate_max); /* IFLA_CAN_BITRATE_MAX */
+ size += can_tdc_get_size(dev); /* IFLA_CAN_TDC */
return size;
}
+static int can_tdc_fill_info(struct sk_buff *skb, const struct net_device *dev)
+{
+ struct nlattr *nest;
+ struct can_priv *priv = netdev_priv(dev);
+ struct can_tdc *tdc = &priv->tdc;
+ const struct can_tdc_const *tdc_const = priv->tdc_const;
+
+ if (!tdc_const)
+ return 0;
+
+ nest = nla_nest_start(skb, IFLA_CAN_TDC);
+ if (!nest)
+ return -EMSGSIZE;
+
+ if (priv->ctrlmode_supported & CAN_CTRLMODE_TDC_MANUAL &&
+ (nla_put_u32(skb, IFLA_CAN_TDC_TDCV_MIN, tdc_const->tdcv_min) ||
+ nla_put_u32(skb, IFLA_CAN_TDC_TDCV_MAX, tdc_const->tdcv_max)))
+ goto err_cancel;
+ if (nla_put_u32(skb, IFLA_CAN_TDC_TDCO_MIN, tdc_const->tdco_min) ||
+ nla_put_u32(skb, IFLA_CAN_TDC_TDCO_MAX, tdc_const->tdco_max))
+ goto err_cancel;
+ if (tdc_const->tdcf_max &&
+ (nla_put_u32(skb, IFLA_CAN_TDC_TDCF_MIN, tdc_const->tdcf_min) ||
+ nla_put_u32(skb, IFLA_CAN_TDC_TDCF_MAX, tdc_const->tdcf_max)))
+ goto err_cancel;
+
+ if (can_tdc_is_enabled(priv)) {
+ u32 tdcv;
+ int err = -EINVAL;
+
+ if (priv->ctrlmode & CAN_CTRLMODE_TDC_MANUAL) {
+ tdcv = tdc->tdcv;
+ err = 0;
+ } else if (priv->do_get_auto_tdcv) {
+ err = priv->do_get_auto_tdcv(dev, &tdcv);
+ }
+ if (!err && nla_put_u32(skb, IFLA_CAN_TDC_TDCV, tdcv))
+ goto err_cancel;
+ if (nla_put_u32(skb, IFLA_CAN_TDC_TDCO, tdc->tdco))
+ goto err_cancel;
+ if (tdc_const->tdcf_max &&
+ nla_put_u32(skb, IFLA_CAN_TDC_TDCF, tdc->tdcf))
+ goto err_cancel;
+ }
+
+ nla_nest_end(skb, nest);
+ return 0;
+
+err_cancel:
+ nla_nest_cancel(skb, nest);
+ return -EMSGSIZE;
+}
+
static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
{
struct can_priv *priv = netdev_priv(dev);
@@ -318,7 +529,9 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
(nla_put(skb, IFLA_CAN_BITRATE_MAX,
sizeof(priv->bitrate_max),
- &priv->bitrate_max))
+ &priv->bitrate_max)) ||
+
+ (can_tdc_fill_info(skb, dev))
)
return -EMSGSIZE;
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 7734229aa078..12b60ad95b02 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -290,31 +290,33 @@ struct flexcan_regs {
u32 dbg1; /* 0x58 */
u32 dbg2; /* 0x5c */
u32 _reserved3[8]; /* 0x60 */
- u8 mb[2][512]; /* 0x80 - Not affected by Soft Reset */
- /* FIFO-mode:
- * MB
- * 0x080...0x08f 0 RX message buffer
- * 0x090...0x0df 1-5 reserved
- * 0x0e0...0x0ff 6-7 8 entry ID table
- * (mx25, mx28, mx35, mx53)
- * 0x0e0...0x2df 6-7..37 8..128 entry ID table
- * size conf'ed via ctrl2::RFFN
- * (mx6, vf610)
- */
- u32 _reserved4[256]; /* 0x480 */
- u32 rximr[64]; /* 0x880 - Not affected by Soft Reset */
- u32 _reserved5[24]; /* 0x980 */
- u32 gfwr_mx6; /* 0x9e0 - MX6 */
- u32 _reserved6[39]; /* 0x9e4 */
- u32 _rxfir[6]; /* 0xa80 */
- u32 _reserved8[2]; /* 0xa98 */
- u32 _rxmgmask; /* 0xaa0 */
- u32 _rxfgmask; /* 0xaa4 */
- u32 _rx14mask; /* 0xaa8 */
- u32 _rx15mask; /* 0xaac */
- u32 tx_smb[4]; /* 0xab0 */
- u32 rx_smb0[4]; /* 0xac0 */
- u32 rx_smb1[4]; /* 0xad0 */
+ struct_group(init,
+ u8 mb[2][512]; /* 0x80 - Not affected by Soft Reset */
+ /* FIFO-mode:
+ * MB
+ * 0x080...0x08f 0 RX message buffer
+ * 0x090...0x0df 1-5 reserved
+ * 0x0e0...0x0ff 6-7 8 entry ID table
+ * (mx25, mx28, mx35, mx53)
+ * 0x0e0...0x2df 6-7..37 8..128 entry ID table
+ * size conf'ed via ctrl2::RFFN
+ * (mx6, vf610)
+ */
+ u32 _reserved4[256]; /* 0x480 */
+ u32 rximr[64]; /* 0x880 - Not affected by Soft Reset */
+ u32 _reserved5[24]; /* 0x980 */
+ u32 gfwr_mx6; /* 0x9e0 - MX6 */
+ u32 _reserved6[39]; /* 0x9e4 */
+ u32 _rxfir[6]; /* 0xa80 */
+ u32 _reserved8[2]; /* 0xa98 */
+ u32 _rxmgmask; /* 0xaa0 */
+ u32 _rxfgmask; /* 0xaa4 */
+ u32 _rx14mask; /* 0xaa8 */
+ u32 _rx15mask; /* 0xaac */
+ u32 tx_smb[4]; /* 0xab0 */
+ u32 rx_smb0[4]; /* 0xac0 */
+ u32 rx_smb1[4]; /* 0xad0 */
+ );
u32 mecr; /* 0xae0 */
u32 erriar; /* 0xae4 */
u32 erridpr; /* 0xae8 */
@@ -328,9 +330,11 @@ struct flexcan_regs {
u32 fdcbt; /* 0xc04 - Not affected by Soft Reset */
u32 fdcrc; /* 0xc08 */
u32 _reserved9[199]; /* 0xc0c */
- u32 tx_smb_fd[18]; /* 0xf28 */
- u32 rx_smb0_fd[18]; /* 0xf70 */
- u32 rx_smb1_fd[18]; /* 0xfb8 */
+ struct_group(init_fd,
+ u32 tx_smb_fd[18]; /* 0xf28 */
+ u32 rx_smb0_fd[18]; /* 0xf70 */
+ u32 rx_smb1_fd[18]; /* 0xfb8 */
+ );
};
static_assert(sizeof(struct flexcan_regs) == 0x4 * 18 + 0xfb8);
@@ -1400,14 +1404,10 @@ static void flexcan_ram_init(struct net_device *dev)
reg_ctrl2 |= FLEXCAN_CTRL2_WRMFRZ;
priv->write(reg_ctrl2, &regs->ctrl2);
- memset_io(&regs->mb[0][0], 0,
- offsetof(struct flexcan_regs, rx_smb1[3]) -
- offsetof(struct flexcan_regs, mb[0][0]) + 0x4);
+ memset_io(&regs->init, 0, sizeof(regs->init));
if (priv->can.ctrlmode & CAN_CTRLMODE_FD)
- memset_io(&regs->tx_smb_fd[0], 0,
- offsetof(struct flexcan_regs, rx_smb1_fd[17]) -
- offsetof(struct flexcan_regs, tx_smb_fd[0]) + 0x4);
+ memset_io(&regs->init_fd, 0, sizeof(regs->init_fd));
reg_ctrl2 &= ~FLEXCAN_CTRL2_WRMFRZ;
priv->write(reg_ctrl2, &regs->ctrl2);
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index c68ad56628bd..32006dbf5abd 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -1831,7 +1831,7 @@ static ssize_t termination_show(struct device *dev,
return -ETIMEDOUT;
}
- return snprintf(buf, PAGE_SIZE, "%u\n", mod->termination_enabled);
+ return sysfs_emit(buf, "%u\n", mod->termination_enabled);
}
static ssize_t termination_store(struct device *dev,
diff --git a/drivers/net/can/m_can/m_can_platform.c b/drivers/net/can/m_can/m_can_platform.c
index 308d4f2fff00..eee47bad0592 100644
--- a/drivers/net/can/m_can/m_can_platform.c
+++ b/drivers/net/can/m_can/m_can_platform.c
@@ -32,8 +32,13 @@ static u32 iomap_read_reg(struct m_can_classdev *cdev, int reg)
static int iomap_read_fifo(struct m_can_classdev *cdev, int offset, void *val, size_t val_count)
{
struct m_can_plat_priv *priv = cdev_to_priv(cdev);
+ void __iomem *src = priv->mram_base + offset;
- ioread32_rep(priv->mram_base + offset, val, val_count);
+ while (val_count--) {
+ *(unsigned int *)val = ioread32(src);
+ val += 4;
+ src += 4;
+ }
return 0;
}
@@ -51,8 +56,13 @@ static int iomap_write_fifo(struct m_can_classdev *cdev, int offset,
const void *val, size_t val_count)
{
struct m_can_plat_priv *priv = cdev_to_priv(cdev);
+ void __iomem *dst = priv->mram_base + offset;
- iowrite32_rep(priv->base + offset, val, val_count);
+ while (val_count--) {
+ iowrite32(*(unsigned int *)val, dst);
+ val += 4;
+ dst += 4;
+ }
return 0;
}
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index 35892c1efef0..de4ddf79ba9b 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -293,10 +293,8 @@ static int mpc5xxx_can_probe(struct platform_device *ofdev)
return -EINVAL;
base = of_iomap(np, 0);
- if (!base) {
- dev_err(&ofdev->dev, "couldn't ioremap\n");
- return err;
- }
+ if (!base)
+ return dev_err_probe(&ofdev->dev, err, "couldn't ioremap\n");
irq = irq_of_parse_and_map(np, 0);
if (!irq) {
diff --git a/drivers/net/can/rcar/Kconfig b/drivers/net/can/rcar/Kconfig
index 56320a7f828b..c66762ef631b 100644
--- a/drivers/net/can/rcar/Kconfig
+++ b/drivers/net/can/rcar/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
config CAN_RCAR
tristate "Renesas R-Car and RZ/G CAN controller"
- depends on ARCH_RENESAS || ARM || COMPILE_TEST
+ depends on ARCH_RENESAS || COMPILE_TEST
help
Say Y here if you want to use CAN controller found on Renesas R-Car
or RZ/G SoCs.
@@ -11,7 +11,7 @@ config CAN_RCAR
config CAN_RCAR_CANFD
tristate "Renesas R-Car CAN FD controller"
- depends on ARCH_RENESAS || ARM || COMPILE_TEST
+ depends on ARCH_RENESAS || COMPILE_TEST
help
Say Y here if you want to use CAN FD controller found on
Renesas R-Car SoCs. The driver puts the controller in CAN FD only
diff --git a/drivers/net/can/rcar/rcar_can.c b/drivers/net/can/rcar/rcar_can.c
index 00e4533c8bdd..8999ec9455ec 100644
--- a/drivers/net/can/rcar/rcar_can.c
+++ b/drivers/net/can/rcar/rcar_can.c
@@ -846,10 +846,12 @@ static int __maybe_unused rcar_can_suspend(struct device *dev)
struct rcar_can_priv *priv = netdev_priv(ndev);
u16 ctlr;
- if (netif_running(ndev)) {
- netif_stop_queue(ndev);
- netif_device_detach(ndev);
- }
+ if (!netif_running(ndev))
+ return 0;
+
+ netif_stop_queue(ndev);
+ netif_device_detach(ndev);
+
ctlr = readw(&priv->regs->ctlr);
ctlr |= RCAR_CAN_CTLR_CANM_HALT;
writew(ctlr, &priv->regs->ctlr);
@@ -868,6 +870,9 @@ static int __maybe_unused rcar_can_resume(struct device *dev)
u16 ctlr;
int err;
+ if (!netif_running(ndev))
+ return 0;
+
err = clk_enable(priv->clk);
if (err) {
netdev_err(ndev, "clk_enable() failed, error %d\n", err);
@@ -881,10 +886,9 @@ static int __maybe_unused rcar_can_resume(struct device *dev)
writew(ctlr, &priv->regs->ctlr);
priv->can.state = CAN_STATE_ERROR_ACTIVE;
- if (netif_running(ndev)) {
- netif_device_attach(ndev);
- netif_start_queue(ndev);
- }
+ netif_device_attach(ndev);
+ netif_start_queue(ndev);
+
return 0;
}
diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c
index 6db90dc4bc9d..84f34020aafb 100644
--- a/drivers/net/can/sja1000/peak_pci.c
+++ b/drivers/net/can/sja1000/peak_pci.c
@@ -752,16 +752,15 @@ static void peak_pci_remove(struct pci_dev *pdev)
struct net_device *prev_dev = chan->prev_dev;
dev_info(&pdev->dev, "removing device %s\n", dev->name);
+ /* do that only for first channel */
+ if (!prev_dev && chan->pciec_card)
+ peak_pciec_remove(chan->pciec_card);
unregister_sja1000dev(dev);
free_sja1000dev(dev);
dev = prev_dev;
- if (!dev) {
- /* do that only for first channel */
- if (chan->pciec_card)
- peak_pciec_remove(chan->pciec_card);
+ if (!dev)
break;
- }
priv = netdev_priv(dev);
chan = priv->priv;
}
diff --git a/drivers/net/can/usb/etas_es58x/es581_4.h b/drivers/net/can/usb/etas_es58x/es581_4.h
index 4bc60a6df697..667ecb77168c 100644
--- a/drivers/net/can/usb/etas_es58x/es581_4.h
+++ b/drivers/net/can/usb/etas_es58x/es581_4.h
@@ -192,7 +192,7 @@ struct es581_4_urb_cmd {
struct es581_4_rx_cmd_ret rx_cmd_ret;
__le64 timestamp;
u8 rx_cmd_ret_u8;
- u8 raw_msg[0];
+ DECLARE_FLEX_ARRAY(u8, raw_msg);
} __packed;
__le16 reserved_for_crc16_do_not_use;
diff --git a/drivers/net/can/usb/etas_es58x/es58x_fd.c b/drivers/net/can/usb/etas_es58x/es58x_fd.c
index af042aa55f59..4f0cae29f4d8 100644
--- a/drivers/net/can/usb/etas_es58x/es58x_fd.c
+++ b/drivers/net/can/usb/etas_es58x/es58x_fd.c
@@ -428,7 +428,7 @@ static int es58x_fd_enable_channel(struct es58x_priv *priv)
es58x_fd_convert_bittiming(&tx_conf_msg.data_bittiming,
&priv->can.data_bittiming);
- if (priv->can.tdc.tdco) {
+ if (can_tdc_is_enabled(&priv->can)) {
tx_conf_msg.tdc_enabled = 1;
tx_conf_msg.tdco = cpu_to_le16(priv->can.tdc.tdco);
tx_conf_msg.tdcf = cpu_to_le16(priv->can.tdc.tdcf);
@@ -505,8 +505,11 @@ static const struct can_bittiming_const es58x_fd_data_bittiming_const = {
* Register" from Microchip.
*/
static const struct can_tdc_const es58x_tdc_const = {
+ .tdcv_min = 0,
.tdcv_max = 0, /* Manual mode not supported. */
+ .tdco_min = 0,
.tdco_max = 127,
+ .tdcf_min = 0,
.tdcf_max = 127
};
@@ -523,7 +526,7 @@ const struct es58x_parameters es58x_fd_param = {
.clock = {.freq = 80 * CAN_MHZ},
.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY |
CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO |
- CAN_CTRLMODE_CC_LEN8_DLC,
+ CAN_CTRLMODE_CC_LEN8_DLC | CAN_CTRLMODE_TDC_AUTO,
.tx_start_of_frame = 0xCEFA, /* FACE in little endian */
.rx_start_of_frame = 0xFECA, /* CAFE in little endian */
.tx_urb_cmd_max_len = ES58X_FD_TX_URB_CMD_MAX_LEN,
diff --git a/drivers/net/can/usb/etas_es58x/es58x_fd.h b/drivers/net/can/usb/etas_es58x/es58x_fd.h
index a191891b8777..c4b19a6a33ae 100644
--- a/drivers/net/can/usb/etas_es58x/es58x_fd.h
+++ b/drivers/net/can/usb/etas_es58x/es58x_fd.h
@@ -219,7 +219,7 @@ struct es58x_fd_urb_cmd {
struct es58x_fd_tx_ack_msg tx_ack_msg;
__le64 timestamp;
__le32 rx_cmd_ret_le32;
- u8 raw_msg[0];
+ DECLARE_FLEX_ARRAY(u8, raw_msg);
} __packed;
__le16 reserved_for_crc16_do_not_use;
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index 5e892bef46b0..1b400de00f51 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -352,7 +352,7 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
} else { /* echo_id == hf->echo_id */
if (hf->echo_id >= GS_MAX_TX_URBS) {
netdev_err(netdev,
- "Unexpected out of range echo id %d\n",
+ "Unexpected out of range echo id %u\n",
hf->echo_id);
goto resubmit_urb;
}
@@ -365,7 +365,7 @@ static void gs_usb_receive_bulk_callback(struct urb *urb)
/* bad devices send bad echo_ids. */
if (!txc) {
netdev_err(netdev,
- "Unexpected unused echo id %d\n",
+ "Unexpected unused echo id %u\n",
hf->echo_id);
goto resubmit_urb;
}
@@ -458,7 +458,7 @@ static void gs_usb_xmit_callback(struct urb *urb)
struct net_device *netdev = dev->netdev;
if (urb->status)
- netdev_info(netdev, "usb xmit fail %d\n", txc->echo_id);
+ netdev_info(netdev, "usb xmit fail %u\n", txc->echo_id);
usb_free_coherent(urb->dev,
urb->transfer_buffer_length,
@@ -501,7 +501,7 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb,
idx = txc->echo_id;
if (idx >= GS_MAX_TX_URBS) {
- netdev_err(netdev, "Invalid tx context %d\n", idx);
+ netdev_err(netdev, "Invalid tx context %u\n", idx);
goto badidx;
}
@@ -964,11 +964,11 @@ static int gs_usb_probe(struct usb_interface *intf,
}
icount = dconf->icount + 1;
- dev_info(&intf->dev, "Configuring for %d interfaces\n", icount);
+ dev_info(&intf->dev, "Configuring for %u interfaces\n", icount);
if (icount > GS_MAX_INTF) {
dev_err(&intf->dev,
- "Driver cannot handle more that %d CAN interfaces\n",
+ "Driver cannot handle more that %u CAN interfaces\n",
GS_MAX_INTF);
kfree(dconf);
return -EINVAL;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
index e8f43ed90b72..6107fef9f4a0 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c
@@ -205,6 +205,19 @@ int peak_usb_netif_rx(struct sk_buff *skb,
return netif_rx(skb);
}
+/* post received skb with native 64-bit hw timestamp */
+int peak_usb_netif_rx_64(struct sk_buff *skb, u32 ts_low, u32 ts_high)
+{
+ struct skb_shared_hwtstamps *hwts = skb_hwtstamps(skb);
+ u64 ns_ts;
+
+ ns_ts = (u64)ts_high << 32 | ts_low;
+ ns_ts *= NSEC_PER_USEC;
+ hwts->hwtstamp = ns_to_ktime(ns_ts);
+
+ return netif_rx(skb);
+}
+
/*
* callback for bulk Rx urb
*/
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.h b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
index b00a4811bf61..daa19f57e742 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_core.h
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.h
@@ -143,6 +143,7 @@ void peak_usb_set_ts_now(struct peak_time_ref *time_ref, u32 ts_now);
void peak_usb_get_ts_time(struct peak_time_ref *time_ref, u32 ts, ktime_t *tv);
int peak_usb_netif_rx(struct sk_buff *skb,
struct peak_time_ref *time_ref, u32 ts_low);
+int peak_usb_netif_rx_64(struct sk_buff *skb, u32 ts_low, u32 ts_high);
void peak_usb_async_complete(struct urb *urb);
void peak_usb_restart_complete(struct peak_usb_device *dev);
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
index b11eabad575b..6bd12549f101 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c
@@ -515,7 +515,8 @@ static int pcan_usb_fd_decode_canmsg(struct pcan_usb_fd_if *usb_if,
netdev->stats.rx_packets++;
netdev->stats.rx_bytes += cfd->len;
- peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(rm->ts_low));
+ peak_usb_netif_rx_64(skb, le32_to_cpu(rm->ts_low),
+ le32_to_cpu(rm->ts_high));
return 0;
}
@@ -551,11 +552,10 @@ static int pcan_usb_fd_decode_status(struct pcan_usb_fd_if *usb_if,
} else if (sm->channel_p_w_b & PUCAN_BUS_WARNING) {
new_state = CAN_STATE_ERROR_WARNING;
} else {
- /* no error bit (so, no error skb, back to active state) */
- dev->can.state = CAN_STATE_ERROR_ACTIVE;
+ /* back to (or still in) ERROR_ACTIVE state */
+ new_state = CAN_STATE_ERROR_ACTIVE;
pdev->bec.txerr = 0;
pdev->bec.rxerr = 0;
- return 0;
}
/* state hasn't changed */
@@ -568,8 +568,7 @@ static int pcan_usb_fd_decode_status(struct pcan_usb_fd_if *usb_if,
/* allocate an skb to store the error frame */
skb = alloc_can_err_skb(netdev, &cf);
- if (skb)
- can_change_state(netdev, cf, tx_state, rx_state);
+ can_change_state(netdev, cf, tx_state, rx_state);
/* things must be done even in case of OOM */
if (new_state == CAN_STATE_BUS_OFF)
@@ -581,7 +580,8 @@ static int pcan_usb_fd_decode_status(struct pcan_usb_fd_if *usb_if,
netdev->stats.rx_packets++;
netdev->stats.rx_bytes += cf->len;
- peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(sm->ts_low));
+ peak_usb_netif_rx_64(skb, le32_to_cpu(sm->ts_low),
+ le32_to_cpu(sm->ts_high));
return 0;
}
@@ -631,7 +631,8 @@ static int pcan_usb_fd_decode_overrun(struct pcan_usb_fd_if *usb_if,
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
- peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(ov->ts_low));
+ peak_usb_netif_rx_64(skb, le32_to_cpu(ov->ts_low),
+ le32_to_cpu(ov->ts_high));
netdev->stats.rx_over_errors++;
netdev->stats.rx_errors++;
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index 3b883e607d8b..e2b15d29d15e 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -516,8 +516,7 @@ static int xcan_chip_start(struct net_device *ndev)
* @ndev: Pointer to net_device structure
* @mode: Tells the mode of the driver
*
- * This check the drivers state and calls the
- * the corresponding modes to set.
+ * This check the drivers state and calls the corresponding modes to set.
*
* Return: 0 on success and failure value on error
*/
@@ -982,7 +981,7 @@ static void xcan_update_error_state_after_rxtx(struct net_device *ndev)
* @isr: interrupt status register value
*
* This is the CAN error interrupt and it will
- * check the the type of error and forward the error
+ * check the type of error and forward the error
* frame to upper layers.
*/
static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
@@ -1844,11 +1843,9 @@ err:
static int xcan_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
- struct xcan_priv *priv = netdev_priv(ndev);
unregister_candev(ndev);
pm_runtime_disable(&pdev->dev);
- netif_napi_del(&priv->napi);
free_candev(ndev);
return 0;
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
index a5f1aa911fe2..7b1457a6e327 100644
--- a/drivers/net/dsa/Kconfig
+++ b/drivers/net/dsa/Kconfig
@@ -70,6 +70,7 @@ config NET_DSA_QCA8K
config NET_DSA_REALTEK_SMI
tristate "Realtek SMI Ethernet switch family support"
select NET_DSA_TAG_RTL4_A
+ select NET_DSA_TAG_RTL8_4
select FIXED_PHY
select IRQ_DOMAIN
select REALTEK_PHY
diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile
index f3598c040994..8da1569a34e6 100644
--- a/drivers/net/dsa/Makefile
+++ b/drivers/net/dsa/Makefile
@@ -10,7 +10,7 @@ obj-$(CONFIG_NET_DSA_MT7530) += mt7530.o
obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o
obj-$(CONFIG_NET_DSA_QCA8K) += qca8k.o
obj-$(CONFIG_NET_DSA_REALTEK_SMI) += realtek-smi.o
-realtek-smi-objs := realtek-smi-core.o rtl8366.o rtl8366rb.o
+realtek-smi-objs := realtek-smi-core.o rtl8366.o rtl8366rb.o rtl8365mb.o
obj-$(CONFIG_NET_DSA_SMSC_LAN9303) += lan9303-core.o
obj-$(CONFIG_NET_DSA_SMSC_LAN9303_I2C) += lan9303_i2c.o
obj-$(CONFIG_NET_DSA_SMSC_LAN9303_MDIO) += lan9303_mdio.o
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 604f54112665..af4761968733 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -1222,7 +1222,7 @@ static void b53_adjust_link(struct dsa_switch *ds, int port,
return;
/* Enable flow control on BCM5301x's CPU port */
- if (is5301x(dev) && port == dev->cpu_port)
+ if (is5301x(dev) && dsa_is_cpu_port(ds, port))
tx_pause = rx_pause = true;
if (phydev->pause) {
@@ -1291,12 +1291,6 @@ static void b53_adjust_link(struct dsa_switch *ds, int port,
return;
}
}
- } else if (is5301x(dev)) {
- if (port != dev->cpu_port) {
- b53_force_port_config(dev, dev->cpu_port, 2000,
- DUPLEX_FULL, true, true);
- b53_force_link(dev, dev->cpu_port, 1);
- }
}
/* Re-negotiate EEE if it was enabled already */
@@ -1349,10 +1343,8 @@ void b53_phylink_validate(struct dsa_switch *ds, int port,
phylink_set(mask, 100baseT_Full);
}
- bitmap_and(supported, supported, mask,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
- bitmap_and(state->advertising, state->advertising, mask,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_and(supported, supported, mask);
+ linkmode_and(state->advertising, state->advertising, mask);
phylink_helper_basex_speed(state);
}
@@ -1550,7 +1542,7 @@ int b53_vlan_del(struct dsa_switch *ds, int port,
}
EXPORT_SYMBOL(b53_vlan_del);
-/* Address Resolution Logic routines */
+/* Address Resolution Logic routines. Caller must hold &dev->arl_mutex. */
static int b53_arl_op_wait(struct b53_device *dev)
{
unsigned int timeout = 10;
@@ -1715,6 +1707,7 @@ int b53_fdb_add(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid)
{
struct b53_device *priv = ds->priv;
+ int ret;
/* 5325 and 5365 require some more massaging, but could
* be supported eventually
@@ -1722,7 +1715,11 @@ int b53_fdb_add(struct dsa_switch *ds, int port,
if (is5325(priv) || is5365(priv))
return -EOPNOTSUPP;
- return b53_arl_op(priv, 0, port, addr, vid, true);
+ mutex_lock(&priv->arl_mutex);
+ ret = b53_arl_op(priv, 0, port, addr, vid, true);
+ mutex_unlock(&priv->arl_mutex);
+
+ return ret;
}
EXPORT_SYMBOL(b53_fdb_add);
@@ -1730,8 +1727,13 @@ int b53_fdb_del(struct dsa_switch *ds, int port,
const unsigned char *addr, u16 vid)
{
struct b53_device *priv = ds->priv;
+ int ret;
+
+ mutex_lock(&priv->arl_mutex);
+ ret = b53_arl_op(priv, 0, port, addr, vid, false);
+ mutex_unlock(&priv->arl_mutex);
- return b53_arl_op(priv, 0, port, addr, vid, false);
+ return ret;
}
EXPORT_SYMBOL(b53_fdb_del);
@@ -1788,6 +1790,8 @@ int b53_fdb_dump(struct dsa_switch *ds, int port,
int ret;
u8 reg;
+ mutex_lock(&priv->arl_mutex);
+
/* Start search operation */
reg = ARL_SRCH_STDN;
b53_write8(priv, B53_ARLIO_PAGE, B53_ARL_SRCH_CTL, reg);
@@ -1795,18 +1799,18 @@ int b53_fdb_dump(struct dsa_switch *ds, int port,
do {
ret = b53_arl_search_wait(priv);
if (ret)
- return ret;
+ break;
b53_arl_search_rd(priv, 0, &results[0]);
ret = b53_fdb_copy(port, &results[0], cb, data);
if (ret)
- return ret;
+ break;
if (priv->num_arl_bins > 2) {
b53_arl_search_rd(priv, 1, &results[1]);
ret = b53_fdb_copy(port, &results[1], cb, data);
if (ret)
- return ret;
+ break;
if (!results[0].is_valid && !results[1].is_valid)
break;
@@ -1814,6 +1818,8 @@ int b53_fdb_dump(struct dsa_switch *ds, int port,
} while (count++ < b53_max_arl_entries(priv) / 2);
+ mutex_unlock(&priv->arl_mutex);
+
return 0;
}
EXPORT_SYMBOL(b53_fdb_dump);
@@ -1822,6 +1828,7 @@ int b53_mdb_add(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_mdb *mdb)
{
struct b53_device *priv = ds->priv;
+ int ret;
/* 5325 and 5365 require some more massaging, but could
* be supported eventually
@@ -1829,7 +1836,11 @@ int b53_mdb_add(struct dsa_switch *ds, int port,
if (is5325(priv) || is5365(priv))
return -EOPNOTSUPP;
- return b53_arl_op(priv, 0, port, mdb->addr, mdb->vid, true);
+ mutex_lock(&priv->arl_mutex);
+ ret = b53_arl_op(priv, 0, port, mdb->addr, mdb->vid, true);
+ mutex_unlock(&priv->arl_mutex);
+
+ return ret;
}
EXPORT_SYMBOL(b53_mdb_add);
@@ -1839,7 +1850,9 @@ int b53_mdb_del(struct dsa_switch *ds, int port,
struct b53_device *priv = ds->priv;
int ret;
+ mutex_lock(&priv->arl_mutex);
ret = b53_arl_op(priv, 0, port, mdb->addr, mdb->vid, false);
+ mutex_unlock(&priv->arl_mutex);
if (ret)
dev_err(ds->dev, "failed to delete MDB entry\n");
@@ -2302,33 +2315,30 @@ static const struct b53_chip_data b53_switch_chips[] = {
.chip_id = BCM5325_DEVICE_ID,
.dev_name = "BCM5325",
.vlans = 16,
- .enabled_ports = 0x1f,
+ .enabled_ports = 0x3f,
.arl_bins = 2,
.arl_buckets = 1024,
.imp_port = 5,
- .cpu_port = B53_CPU_PORT_25,
.duplex_reg = B53_DUPLEX_STAT_FE,
},
{
.chip_id = BCM5365_DEVICE_ID,
.dev_name = "BCM5365",
.vlans = 256,
- .enabled_ports = 0x1f,
+ .enabled_ports = 0x3f,
.arl_bins = 2,
.arl_buckets = 1024,
.imp_port = 5,
- .cpu_port = B53_CPU_PORT_25,
.duplex_reg = B53_DUPLEX_STAT_FE,
},
{
.chip_id = BCM5389_DEVICE_ID,
.dev_name = "BCM5389",
.vlans = 4096,
- .enabled_ports = 0x1f,
+ .enabled_ports = 0x11f,
.arl_bins = 4,
.arl_buckets = 1024,
.imp_port = 8,
- .cpu_port = B53_CPU_PORT,
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
@@ -2338,11 +2348,10 @@ static const struct b53_chip_data b53_switch_chips[] = {
.chip_id = BCM5395_DEVICE_ID,
.dev_name = "BCM5395",
.vlans = 4096,
- .enabled_ports = 0x1f,
+ .enabled_ports = 0x11f,
.arl_bins = 4,
.arl_buckets = 1024,
.imp_port = 8,
- .cpu_port = B53_CPU_PORT,
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
@@ -2352,11 +2361,10 @@ static const struct b53_chip_data b53_switch_chips[] = {
.chip_id = BCM5397_DEVICE_ID,
.dev_name = "BCM5397",
.vlans = 4096,
- .enabled_ports = 0x1f,
+ .enabled_ports = 0x11f,
.arl_bins = 4,
.arl_buckets = 1024,
.imp_port = 8,
- .cpu_port = B53_CPU_PORT,
.vta_regs = B53_VTA_REGS_9798,
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
@@ -2366,11 +2374,10 @@ static const struct b53_chip_data b53_switch_chips[] = {
.chip_id = BCM5398_DEVICE_ID,
.dev_name = "BCM5398",
.vlans = 4096,
- .enabled_ports = 0x7f,
+ .enabled_ports = 0x17f,
.arl_bins = 4,
.arl_buckets = 1024,
.imp_port = 8,
- .cpu_port = B53_CPU_PORT,
.vta_regs = B53_VTA_REGS_9798,
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
@@ -2380,12 +2387,11 @@ static const struct b53_chip_data b53_switch_chips[] = {
.chip_id = BCM53115_DEVICE_ID,
.dev_name = "BCM53115",
.vlans = 4096,
- .enabled_ports = 0x1f,
+ .enabled_ports = 0x11f,
.arl_bins = 4,
.arl_buckets = 1024,
.vta_regs = B53_VTA_REGS,
.imp_port = 8,
- .cpu_port = B53_CPU_PORT,
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
@@ -2394,11 +2400,10 @@ static const struct b53_chip_data b53_switch_chips[] = {
.chip_id = BCM53125_DEVICE_ID,
.dev_name = "BCM53125",
.vlans = 4096,
- .enabled_ports = 0xff,
+ .enabled_ports = 0x1ff,
.arl_bins = 4,
.arl_buckets = 1024,
.imp_port = 8,
- .cpu_port = B53_CPU_PORT,
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
@@ -2412,7 +2417,6 @@ static const struct b53_chip_data b53_switch_chips[] = {
.arl_bins = 4,
.arl_buckets = 1024,
.imp_port = 8,
- .cpu_port = B53_CPU_PORT,
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
@@ -2426,7 +2430,6 @@ static const struct b53_chip_data b53_switch_chips[] = {
.arl_bins = 4,
.arl_buckets = 1024,
.imp_port = 8,
- .cpu_port = B53_CPU_PORT,
.vta_regs = B53_VTA_REGS_63XX,
.duplex_reg = B53_DUPLEX_STAT_63XX,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK_63XX,
@@ -2436,11 +2439,10 @@ static const struct b53_chip_data b53_switch_chips[] = {
.chip_id = BCM53010_DEVICE_ID,
.dev_name = "BCM53010",
.vlans = 4096,
- .enabled_ports = 0x1f,
+ .enabled_ports = 0x1bf,
.arl_bins = 4,
.arl_buckets = 1024,
.imp_port = 8,
- .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
@@ -2454,7 +2456,6 @@ static const struct b53_chip_data b53_switch_chips[] = {
.arl_bins = 4,
.arl_buckets = 1024,
.imp_port = 8,
- .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
@@ -2468,7 +2469,6 @@ static const struct b53_chip_data b53_switch_chips[] = {
.arl_bins = 4,
.arl_buckets = 1024,
.imp_port = 8,
- .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
@@ -2478,11 +2478,10 @@ static const struct b53_chip_data b53_switch_chips[] = {
.chip_id = BCM53018_DEVICE_ID,
.dev_name = "BCM53018",
.vlans = 4096,
- .enabled_ports = 0x1f,
+ .enabled_ports = 0x1bf,
.arl_bins = 4,
.arl_buckets = 1024,
.imp_port = 8,
- .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
@@ -2492,11 +2491,10 @@ static const struct b53_chip_data b53_switch_chips[] = {
.chip_id = BCM53019_DEVICE_ID,
.dev_name = "BCM53019",
.vlans = 4096,
- .enabled_ports = 0x1f,
+ .enabled_ports = 0x1bf,
.arl_bins = 4,
.arl_buckets = 1024,
.imp_port = 8,
- .cpu_port = B53_CPU_PORT_25, /* TODO: auto detect */
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
@@ -2510,7 +2508,6 @@ static const struct b53_chip_data b53_switch_chips[] = {
.arl_bins = 4,
.arl_buckets = 1024,
.imp_port = 8,
- .cpu_port = B53_CPU_PORT,
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
@@ -2524,7 +2521,6 @@ static const struct b53_chip_data b53_switch_chips[] = {
.arl_bins = 4,
.arl_buckets = 1024,
.imp_port = 8,
- .cpu_port = B53_CPU_PORT,
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
@@ -2539,7 +2535,6 @@ static const struct b53_chip_data b53_switch_chips[] = {
.arl_bins = 4,
.arl_buckets = 256,
.imp_port = 8,
- .cpu_port = 8, /* TODO: ports 4, 5, 8 */
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
@@ -2553,7 +2548,6 @@ static const struct b53_chip_data b53_switch_chips[] = {
.arl_bins = 4,
.arl_buckets = 1024,
.imp_port = 8,
- .cpu_port = B53_CPU_PORT,
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
@@ -2567,7 +2561,6 @@ static const struct b53_chip_data b53_switch_chips[] = {
.arl_bins = 4,
.arl_buckets = 256,
.imp_port = 8,
- .cpu_port = B53_CPU_PORT,
.vta_regs = B53_VTA_REGS,
.duplex_reg = B53_DUPLEX_STAT_GE,
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
@@ -2593,7 +2586,6 @@ static int b53_switch_init(struct b53_device *dev)
dev->vta_regs[2] = chip->vta_regs[2];
dev->jumbo_pm_reg = chip->jumbo_pm_reg;
dev->imp_port = chip->imp_port;
- dev->cpu_port = chip->cpu_port;
dev->num_vlans = chip->vlans;
dev->num_arl_bins = chip->arl_bins;
dev->num_arl_buckets = chip->arl_buckets;
@@ -2625,16 +2617,8 @@ static int b53_switch_init(struct b53_device *dev)
break;
#endif
}
- } else if (dev->chip_id == BCM53115_DEVICE_ID) {
- u64 strap_value;
-
- b53_read48(dev, B53_STAT_PAGE, B53_STRAP_VALUE, &strap_value);
- /* use second IMP port if GMII is enabled */
- if (strap_value & SV_GMII_CTRL_115)
- dev->cpu_port = 5;
}
- dev->enabled_ports |= BIT(dev->cpu_port);
dev->num_ports = fls(dev->enabled_ports);
dev->ds->num_ports = min_t(unsigned int, dev->num_ports, DSA_MAX_PORTS);
@@ -2705,6 +2689,7 @@ struct b53_device *b53_switch_alloc(struct device *base,
mutex_init(&dev->reg_mutex);
mutex_init(&dev->stats_mutex);
+ mutex_init(&dev->arl_mutex);
return dev;
}
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index 959a52d41f0a..579da74ada64 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -107,6 +107,7 @@ struct b53_device {
struct mutex reg_mutex;
struct mutex stats_mutex;
+ struct mutex arl_mutex;
const struct b53_io_ops *ops;
/* chip specific data */
@@ -124,7 +125,6 @@ struct b53_device {
/* used ports mask */
u16 enabled_ports;
unsigned int imp_port;
- unsigned int cpu_port;
/* connect specific data */
u8 current_page;
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 7578a5c38df5..13aa43b5cffd 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -667,7 +667,9 @@ static u32 bcm_sf2_sw_get_phy_flags(struct dsa_switch *ds, int port)
if (priv->int_phy_mask & BIT(port))
return priv->hw_params.gphy_rev;
else
- return 0;
+ return PHY_BRCM_AUTO_PWRDWN_ENABLE |
+ PHY_BRCM_DIS_TXCRXC_NOENRGY |
+ PHY_BRCM_IDDQ_SUSPEND;
}
static void bcm_sf2_sw_validate(struct dsa_switch *ds, int port,
@@ -683,7 +685,7 @@ static void bcm_sf2_sw_validate(struct dsa_switch *ds, int port,
state->interface != PHY_INTERFACE_MODE_GMII &&
state->interface != PHY_INTERFACE_MODE_INTERNAL &&
state->interface != PHY_INTERFACE_MODE_MOCA) {
- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_zero(supported);
if (port != core_readl(priv, CORE_IMP0_PRT_ID))
dev_err(ds->dev,
"Unsupported interface: %d for port %d\n",
@@ -711,10 +713,8 @@ static void bcm_sf2_sw_validate(struct dsa_switch *ds, int port,
phylink_set(mask, 100baseT_Half);
phylink_set(mask, 100baseT_Full);
- bitmap_and(supported, supported, mask,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
- bitmap_and(state->advertising, state->advertising, mask,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_and(supported, supported, mask);
+ linkmode_and(state->advertising, state->advertising, mask);
}
static void bcm_sf2_sw_mac_config(struct dsa_switch *ds, int port,
diff --git a/drivers/net/dsa/hirschmann/hellcreek.c b/drivers/net/dsa/hirschmann/hellcreek.c
index 354655f9ed00..4e0b53d94b52 100644
--- a/drivers/net/dsa/hirschmann/hellcreek.c
+++ b/drivers/net/dsa/hirschmann/hellcreek.c
@@ -1403,10 +1403,8 @@ static void hellcreek_phylink_validate(struct dsa_switch *ds, int port,
else
phylink_set(mask, 1000baseT_Full);
- bitmap_and(supported, supported, mask,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
- bitmap_and(state->advertising, state->advertising, mask,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_and(supported, supported, mask);
+ linkmode_and(state->advertising, state->advertising, mask);
}
static int
diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c
index 3ff4b7e177f3..7056d98d8177 100644
--- a/drivers/net/dsa/lantiq_gswip.c
+++ b/drivers/net/dsa/lantiq_gswip.c
@@ -230,7 +230,7 @@
#define GSWIP_SDMA_PCTRLp(p) (0xBC0 + ((p) * 0x6))
#define GSWIP_SDMA_PCTRL_EN BIT(0) /* SDMA Port Enable */
#define GSWIP_SDMA_PCTRL_FCEN BIT(1) /* Flow Control Enable */
-#define GSWIP_SDMA_PCTRL_PAUFWD BIT(1) /* Pause Frame Forwarding */
+#define GSWIP_SDMA_PCTRL_PAUFWD BIT(3) /* Pause Frame Forwarding */
#define GSWIP_TABLE_ACTIVE_VLAN 0x01
#define GSWIP_TABLE_VLAN_MAPPING 0x02
@@ -276,6 +276,7 @@ struct gswip_priv {
int num_gphy_fw;
struct gswip_gphy_fw *gphy_fw;
u32 port_vlan_filter;
+ struct mutex pce_table_lock;
};
struct gswip_pce_table_entry {
@@ -523,10 +524,14 @@ static int gswip_pce_table_entry_read(struct gswip_priv *priv,
u16 addr_mode = tbl->key_mode ? GSWIP_PCE_TBL_CTRL_OPMOD_KSRD :
GSWIP_PCE_TBL_CTRL_OPMOD_ADRD;
+ mutex_lock(&priv->pce_table_lock);
+
err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
GSWIP_PCE_TBL_CTRL_BAS);
- if (err)
+ if (err) {
+ mutex_unlock(&priv->pce_table_lock);
return err;
+ }
gswip_switch_w(priv, tbl->index, GSWIP_PCE_TBL_ADDR);
gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK |
@@ -536,8 +541,10 @@ static int gswip_pce_table_entry_read(struct gswip_priv *priv,
err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
GSWIP_PCE_TBL_CTRL_BAS);
- if (err)
+ if (err) {
+ mutex_unlock(&priv->pce_table_lock);
return err;
+ }
for (i = 0; i < ARRAY_SIZE(tbl->key); i++)
tbl->key[i] = gswip_switch_r(priv, GSWIP_PCE_TBL_KEY(i));
@@ -553,6 +560,8 @@ static int gswip_pce_table_entry_read(struct gswip_priv *priv,
tbl->valid = !!(crtl & GSWIP_PCE_TBL_CTRL_VLD);
tbl->gmap = (crtl & GSWIP_PCE_TBL_CTRL_GMAP_MASK) >> 7;
+ mutex_unlock(&priv->pce_table_lock);
+
return 0;
}
@@ -565,10 +574,14 @@ static int gswip_pce_table_entry_write(struct gswip_priv *priv,
u16 addr_mode = tbl->key_mode ? GSWIP_PCE_TBL_CTRL_OPMOD_KSWR :
GSWIP_PCE_TBL_CTRL_OPMOD_ADWR;
+ mutex_lock(&priv->pce_table_lock);
+
err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
GSWIP_PCE_TBL_CTRL_BAS);
- if (err)
+ if (err) {
+ mutex_unlock(&priv->pce_table_lock);
return err;
+ }
gswip_switch_w(priv, tbl->index, GSWIP_PCE_TBL_ADDR);
gswip_switch_mask(priv, GSWIP_PCE_TBL_CTRL_ADDR_MASK |
@@ -600,8 +613,12 @@ static int gswip_pce_table_entry_write(struct gswip_priv *priv,
crtl |= GSWIP_PCE_TBL_CTRL_BAS;
gswip_switch_w(priv, crtl, GSWIP_PCE_TBL_CTRL);
- return gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
- GSWIP_PCE_TBL_CTRL_BAS);
+ err = gswip_switch_r_timeout(priv, GSWIP_PCE_TBL_CTRL,
+ GSWIP_PCE_TBL_CTRL_BAS);
+
+ mutex_unlock(&priv->pce_table_lock);
+
+ return err;
}
/* Add the LAN port into a bridge with the CPU port by
@@ -1447,10 +1464,8 @@ static void gswip_phylink_set_capab(unsigned long *supported,
phylink_set(mask, 100baseT_Half);
phylink_set(mask, 100baseT_Full);
- bitmap_and(supported, supported, mask,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
- bitmap_and(state->advertising, state->advertising, mask,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_and(supported, supported, mask);
+ linkmode_and(state->advertising, state->advertising, mask);
}
static void gswip_xrx200_phylink_validate(struct dsa_switch *ds, int port,
@@ -1478,7 +1493,7 @@ static void gswip_xrx200_phylink_validate(struct dsa_switch *ds, int port,
goto unsupported;
break;
default:
- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_zero(supported);
dev_err(ds->dev, "Unsupported port: %i\n", port);
return;
}
@@ -1488,7 +1503,7 @@ static void gswip_xrx200_phylink_validate(struct dsa_switch *ds, int port,
return;
unsupported:
- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_zero(supported);
dev_err(ds->dev, "Unsupported interface '%s' for port %d\n",
phy_modes(state->interface), port);
}
@@ -1518,7 +1533,7 @@ static void gswip_xrx300_phylink_validate(struct dsa_switch *ds, int port,
goto unsupported;
break;
default:
- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_zero(supported);
dev_err(ds->dev, "Unsupported port: %i\n", port);
return;
}
@@ -1528,7 +1543,7 @@ static void gswip_xrx300_phylink_validate(struct dsa_switch *ds, int port,
return;
unsupported:
- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_zero(supported);
dev_err(ds->dev, "Unsupported interface '%s' for port %d\n",
phy_modes(state->interface), port);
}
@@ -2106,6 +2121,7 @@ static int gswip_probe(struct platform_device *pdev)
priv->ds->priv = priv;
priv->ds->ops = priv->hw_info->ops;
priv->dev = dev;
+ mutex_init(&priv->pce_table_lock);
version = gswip_switch_r(priv, GSWIP_VERSION);
np = dev->of_node;
diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c
index c5142f86a3c7..43fc3087aeb3 100644
--- a/drivers/net/dsa/microchip/ksz8795.c
+++ b/drivers/net/dsa/microchip/ksz8795.c
@@ -1542,15 +1542,13 @@ static void ksz8_validate(struct dsa_switch *ds, int port,
phylink_set(mask, 100baseT_Half);
phylink_set(mask, 100baseT_Full);
- bitmap_and(supported, supported, mask,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
- bitmap_and(state->advertising, state->advertising, mask,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_and(supported, supported, mask);
+ linkmode_and(state->advertising, state->advertising, mask);
return;
unsupported:
- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_zero(supported);
dev_err(ds->dev, "Unsupported interface: %s, port: %d\n",
phy_modes(state->interface), port);
}
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 094737e5084a..9890672a206d 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -1035,9 +1035,6 @@ mt7530_port_enable(struct dsa_switch *ds, int port,
{
struct mt7530_priv *priv = ds->priv;
- if (!dsa_is_user_port(ds, port))
- return 0;
-
mutex_lock(&priv->reg_mutex);
/* Allow the user port gets connected to the cpu port and also
@@ -1060,9 +1057,6 @@ mt7530_port_disable(struct dsa_switch *ds, int port)
{
struct mt7530_priv *priv = ds->priv;
- if (!dsa_is_user_port(ds, port))
- return;
-
mutex_lock(&priv->reg_mutex);
/* Clear up all port matrix which could be restored in the next
@@ -3211,7 +3205,7 @@ mt7530_probe(struct mdio_device *mdiodev)
return -ENOMEM;
priv->ds->dev = &mdiodev->dev;
- priv->ds->num_ports = DSA_MAX_PORTS;
+ priv->ds->num_ports = MT7530_NUM_PORTS;
/* Use medatek,mcm property to distinguish hardware type that would
* casues a little bit differences on power-on sequence.
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 8dadcae93c9b..14c678a9e41b 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -674,9 +674,8 @@ static void mv88e6xxx_validate(struct dsa_switch *ds, int port,
if (chip->info->ops->phylink_validate)
chip->info->ops->phylink_validate(chip, port, mask, state);
- bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
- bitmap_and(state->advertising, state->advertising, mask,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_and(supported, supported, mask);
+ linkmode_and(state->advertising, state->advertising, mask);
/* We can only operate at 2500BaseX or 1000BaseX. If requested
* to advertise both, only report advertising at 2500BaseX.
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index 341236dcbdb4..83808e7dbdda 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -958,8 +958,10 @@ static int felix_parse_dt(struct felix *felix, phy_interface_t *port_phy_modes)
switch_node = dev->of_node;
ports_node = of_get_child_by_name(switch_node, "ports");
+ if (!ports_node)
+ ports_node = of_get_child_by_name(switch_node, "ethernet-ports");
if (!ports_node) {
- dev_err(dev, "Incorrect bindings: absent \"ports\" node\n");
+ dev_err(dev, "Incorrect bindings: absent \"ports\" or \"ethernet-ports\" node\n");
return -ENODEV;
}
diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c
index 11b42fd812e4..45c5ec7a83ea 100644
--- a/drivers/net/dsa/ocelot/felix_vsc9959.c
+++ b/drivers/net/dsa/ocelot/felix_vsc9959.c
@@ -943,7 +943,7 @@ static void vsc9959_phylink_validate(struct ocelot *ocelot, int port,
if (state->interface != PHY_INTERFACE_MODE_NA &&
state->interface != ocelot_port->phy_mode) {
- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_zero(supported);
return;
}
@@ -965,10 +965,8 @@ static void vsc9959_phylink_validate(struct ocelot *ocelot, int port,
phylink_set(mask, 2500baseX_Full);
}
- bitmap_and(supported, supported, mask,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
- bitmap_and(state->advertising, state->advertising, mask,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_and(supported, supported, mask);
+ linkmode_and(state->advertising, state->advertising, mask);
}
static int vsc9959_prevalidate_phy_mode(struct ocelot *ocelot, int port,
diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c
index de1d34a1f1e4..92eae63150ea 100644
--- a/drivers/net/dsa/ocelot/seville_vsc9953.c
+++ b/drivers/net/dsa/ocelot/seville_vsc9953.c
@@ -999,7 +999,7 @@ static void vsc9953_phylink_validate(struct ocelot *ocelot, int port,
if (state->interface != PHY_INTERFACE_MODE_NA &&
state->interface != ocelot_port->phy_mode) {
- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_zero(supported);
return;
}
@@ -1018,10 +1018,8 @@ static void vsc9953_phylink_validate(struct ocelot *ocelot, int port,
phylink_set(mask, 2500baseX_Full);
}
- bitmap_and(supported, supported, mask,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
- bitmap_and(state->advertising, state->advertising, mask,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_and(supported, supported, mask);
+ linkmode_and(state->advertising, state->advertising, mask);
}
static int vsc9953_prevalidate_phy_mode(struct ocelot *ocelot, int port,
diff --git a/drivers/net/dsa/qca/ar9331.c b/drivers/net/dsa/qca/ar9331.c
index a6bfb6abc51a..da0d7e68643a 100644
--- a/drivers/net/dsa/qca/ar9331.c
+++ b/drivers/net/dsa/qca/ar9331.c
@@ -522,7 +522,7 @@ static void ar9331_sw_phylink_validate(struct dsa_switch *ds, int port,
goto unsupported;
break;
default:
- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_zero(supported);
dev_err(ds->dev, "Unsupported port: %i\n", port);
return;
}
@@ -536,15 +536,13 @@ static void ar9331_sw_phylink_validate(struct dsa_switch *ds, int port,
phylink_set(mask, 100baseT_Half);
phylink_set(mask, 100baseT_Full);
- bitmap_and(supported, supported, mask,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
- bitmap_and(state->advertising, state->advertising, mask,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_and(supported, supported, mask);
+ linkmode_and(state->advertising, state->advertising, mask);
return;
unsupported:
- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_zero(supported);
dev_err(ds->dev, "Unsupported interface: %d, port: %d\n",
state->interface, port);
}
diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
index a984f06f6f04..ea7f12778922 100644
--- a/drivers/net/dsa/qca8k.c
+++ b/drivers/net/dsa/qca8k.c
@@ -889,62 +889,183 @@ qca8k_setup_mdio_bus(struct qca8k_priv *priv)
}
static int
-qca8k_setup_of_rgmii_delay(struct qca8k_priv *priv)
+qca8k_setup_mac_pwr_sel(struct qca8k_priv *priv)
{
- struct device_node *port_dn;
- phy_interface_t mode;
- struct dsa_port *dp;
- u32 val;
+ u32 mask = 0;
+ int ret = 0;
- /* CPU port is already checked */
- dp = dsa_to_port(priv->ds, 0);
+ /* SoC specific settings for ipq8064.
+ * If more device require this consider adding
+ * a dedicated binding.
+ */
+ if (of_machine_is_compatible("qcom,ipq8064"))
+ mask |= QCA8K_MAC_PWR_RGMII0_1_8V;
+
+ /* SoC specific settings for ipq8065 */
+ if (of_machine_is_compatible("qcom,ipq8065"))
+ mask |= QCA8K_MAC_PWR_RGMII1_1_8V;
+
+ if (mask) {
+ ret = qca8k_rmw(priv, QCA8K_REG_MAC_PWR_SEL,
+ QCA8K_MAC_PWR_RGMII0_1_8V |
+ QCA8K_MAC_PWR_RGMII1_1_8V,
+ mask);
+ }
+
+ return ret;
+}
- port_dn = dp->dn;
+static int qca8k_find_cpu_port(struct dsa_switch *ds)
+{
+ struct qca8k_priv *priv = ds->priv;
- /* Check if port 0 is set to the correct type */
- of_get_phy_mode(port_dn, &mode);
- if (mode != PHY_INTERFACE_MODE_RGMII_ID &&
- mode != PHY_INTERFACE_MODE_RGMII_RXID &&
- mode != PHY_INTERFACE_MODE_RGMII_TXID) {
+ /* Find the connected cpu port. Valid port are 0 or 6 */
+ if (dsa_is_cpu_port(ds, 0))
return 0;
+
+ dev_dbg(priv->dev, "port 0 is not the CPU port. Checking port 6");
+
+ if (dsa_is_cpu_port(ds, 6))
+ return 6;
+
+ return -EINVAL;
+}
+
+static int
+qca8k_setup_of_pws_reg(struct qca8k_priv *priv)
+{
+ struct device_node *node = priv->dev->of_node;
+ const struct qca8k_match_data *data;
+ u32 val = 0;
+ int ret;
+
+ /* QCA8327 require to set to the correct mode.
+ * His bigger brother QCA8328 have the 172 pin layout.
+ * Should be applied by default but we set this just to make sure.
+ */
+ if (priv->switch_id == QCA8K_ID_QCA8327) {
+ data = of_device_get_match_data(priv->dev);
+
+ /* Set the correct package of 148 pin for QCA8327 */
+ if (data->reduced_package)
+ val |= QCA8327_PWS_PACKAGE148_EN;
+
+ ret = qca8k_rmw(priv, QCA8K_REG_PWS, QCA8327_PWS_PACKAGE148_EN,
+ val);
+ if (ret)
+ return ret;
}
- switch (mode) {
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- if (of_property_read_u32(port_dn, "rx-internal-delay-ps", &val))
- val = 2;
- else
- /* Switch regs accept value in ns, convert ps to ns */
- val = val / 1000;
+ if (of_property_read_bool(node, "qca,ignore-power-on-sel"))
+ val |= QCA8K_PWS_POWER_ON_SEL;
- if (val > QCA8K_MAX_DELAY) {
- dev_err(priv->dev, "rgmii rx delay is limited to a max value of 3ns, setting to the max value");
- val = 3;
+ if (of_property_read_bool(node, "qca,led-open-drain")) {
+ if (!(val & QCA8K_PWS_POWER_ON_SEL)) {
+ dev_err(priv->dev, "qca,led-open-drain require qca,ignore-power-on-sel to be set.");
+ return -EINVAL;
}
- priv->rgmii_rx_delay = val;
- /* Stop here if we need to check only for rx delay */
- if (mode != PHY_INTERFACE_MODE_RGMII_ID)
- break;
+ val |= QCA8K_PWS_LED_OPEN_EN_CSR;
+ }
- fallthrough;
- case PHY_INTERFACE_MODE_RGMII_TXID:
- if (of_property_read_u32(port_dn, "tx-internal-delay-ps", &val))
- val = 1;
- else
- /* Switch regs accept value in ns, convert ps to ns */
- val = val / 1000;
+ return qca8k_rmw(priv, QCA8K_REG_PWS,
+ QCA8K_PWS_LED_OPEN_EN_CSR | QCA8K_PWS_POWER_ON_SEL,
+ val);
+}
- if (val > QCA8K_MAX_DELAY) {
- dev_err(priv->dev, "rgmii tx delay is limited to a max value of 3ns, setting to the max value");
- val = 3;
- }
+static int
+qca8k_parse_port_config(struct qca8k_priv *priv)
+{
+ int port, cpu_port_index = -1, ret;
+ struct device_node *port_dn;
+ phy_interface_t mode;
+ struct dsa_port *dp;
+ u32 delay;
- priv->rgmii_tx_delay = val;
- break;
- default:
- return 0;
+ /* We have 2 CPU port. Check them */
+ for (port = 0; port < QCA8K_NUM_PORTS && cpu_port_index < QCA8K_NUM_CPU_PORTS; port++) {
+ /* Skip every other port */
+ if (port != 0 && port != 6)
+ continue;
+
+ dp = dsa_to_port(priv->ds, port);
+ port_dn = dp->dn;
+ cpu_port_index++;
+
+ if (!of_device_is_available(port_dn))
+ continue;
+
+ ret = of_get_phy_mode(port_dn, &mode);
+ if (ret)
+ continue;
+
+ switch (mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_SGMII:
+ delay = 0;
+
+ if (!of_property_read_u32(port_dn, "tx-internal-delay-ps", &delay))
+ /* Switch regs accept value in ns, convert ps to ns */
+ delay = delay / 1000;
+ else if (mode == PHY_INTERFACE_MODE_RGMII_ID ||
+ mode == PHY_INTERFACE_MODE_RGMII_TXID)
+ delay = 1;
+
+ if (delay > QCA8K_MAX_DELAY) {
+ dev_err(priv->dev, "rgmii tx delay is limited to a max value of 3ns, setting to the max value");
+ delay = 3;
+ }
+
+ priv->ports_config.rgmii_tx_delay[cpu_port_index] = delay;
+
+ delay = 0;
+
+ if (!of_property_read_u32(port_dn, "rx-internal-delay-ps", &delay))
+ /* Switch regs accept value in ns, convert ps to ns */
+ delay = delay / 1000;
+ else if (mode == PHY_INTERFACE_MODE_RGMII_ID ||
+ mode == PHY_INTERFACE_MODE_RGMII_RXID)
+ delay = 2;
+
+ if (delay > QCA8K_MAX_DELAY) {
+ dev_err(priv->dev, "rgmii rx delay is limited to a max value of 3ns, setting to the max value");
+ delay = 3;
+ }
+
+ priv->ports_config.rgmii_rx_delay[cpu_port_index] = delay;
+
+ /* Skip sgmii parsing for rgmii* mode */
+ if (mode == PHY_INTERFACE_MODE_RGMII ||
+ mode == PHY_INTERFACE_MODE_RGMII_ID ||
+ mode == PHY_INTERFACE_MODE_RGMII_TXID ||
+ mode == PHY_INTERFACE_MODE_RGMII_RXID)
+ break;
+
+ if (of_property_read_bool(port_dn, "qca,sgmii-txclk-falling-edge"))
+ priv->ports_config.sgmii_tx_clk_falling_edge = true;
+
+ if (of_property_read_bool(port_dn, "qca,sgmii-rxclk-falling-edge"))
+ priv->ports_config.sgmii_rx_clk_falling_edge = true;
+
+ if (of_property_read_bool(port_dn, "qca,sgmii-enable-pll")) {
+ priv->ports_config.sgmii_enable_pll = true;
+
+ if (priv->switch_id == QCA8K_ID_QCA8327) {
+ dev_err(priv->dev, "SGMII PLL should NOT be enabled for qca8327. Aborting enabling");
+ priv->ports_config.sgmii_enable_pll = false;
+ }
+
+ if (priv->switch_revision < 2)
+ dev_warn(priv->dev, "SGMII PLL should NOT be enabled for qca8337 with revision 2 or more.");
+ }
+
+ break;
+ default:
+ continue;
+ }
}
return 0;
@@ -954,15 +1075,20 @@ static int
qca8k_setup(struct dsa_switch *ds)
{
struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
- int ret, i;
+ int cpu_port, ret, i;
u32 mask;
- /* Make sure that port 0 is the cpu port */
- if (!dsa_is_cpu_port(ds, 0)) {
- dev_err(priv->dev, "port 0 is not the CPU port");
- return -EINVAL;
+ cpu_port = qca8k_find_cpu_port(ds);
+ if (cpu_port < 0) {
+ dev_err(priv->dev, "No cpu port configured in both cpu port0 and port6");
+ return cpu_port;
}
+ /* Parse CPU port config to be later used in phy_link mac_config */
+ ret = qca8k_parse_port_config(priv);
+ if (ret)
+ return ret;
+
mutex_init(&priv->reg_mutex);
/* Start by setting up the register mapping */
@@ -975,7 +1101,11 @@ qca8k_setup(struct dsa_switch *ds)
if (ret)
return ret;
- ret = qca8k_setup_of_rgmii_delay(priv);
+ ret = qca8k_setup_of_pws_reg(priv);
+ if (ret)
+ return ret;
+
+ ret = qca8k_setup_mac_pwr_sel(priv);
if (ret)
return ret;
@@ -992,41 +1122,49 @@ qca8k_setup(struct dsa_switch *ds)
if (ret)
dev_warn(priv->dev, "mib init failed");
- /* Enable QCA header mode on the cpu port */
- ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(QCA8K_CPU_PORT),
- QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_TX_S |
- QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_RX_S);
- if (ret) {
- dev_err(priv->dev, "failed enabling QCA header mode");
- return ret;
- }
-
- /* Disable forwarding by default on all ports */
+ /* Initial setup of all ports */
for (i = 0; i < QCA8K_NUM_PORTS; i++) {
+ /* Disable forwarding by default on all ports */
ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
QCA8K_PORT_LOOKUP_MEMBER, 0);
if (ret)
return ret;
- }
- /* Disable MAC by default on all ports */
- for (i = 1; i < QCA8K_NUM_PORTS; i++)
- qca8k_port_set_status(priv, i, 0);
+ /* Enable QCA header mode on all cpu ports */
+ if (dsa_is_cpu_port(ds, i)) {
+ ret = qca8k_write(priv, QCA8K_REG_PORT_HDR_CTRL(i),
+ QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_TX_S |
+ QCA8K_PORT_HDR_CTRL_ALL << QCA8K_PORT_HDR_CTRL_RX_S);
+ if (ret) {
+ dev_err(priv->dev, "failed enabling QCA header mode");
+ return ret;
+ }
+ }
+
+ /* Disable MAC by default on all user ports */
+ if (dsa_is_user_port(ds, i))
+ qca8k_port_set_status(priv, i, 0);
+ }
- /* Forward all unknown frames to CPU port for Linux processing */
+ /* Forward all unknown frames to CPU port for Linux processing
+ * Notice that in multi-cpu config only one port should be set
+ * for igmp, unknown, multicast and broadcast packet
+ */
ret = qca8k_write(priv, QCA8K_REG_GLOBAL_FW_CTRL1,
- BIT(0) << QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_S |
- BIT(0) << QCA8K_GLOBAL_FW_CTRL1_BC_DP_S |
- BIT(0) << QCA8K_GLOBAL_FW_CTRL1_MC_DP_S |
- BIT(0) << QCA8K_GLOBAL_FW_CTRL1_UC_DP_S);
+ BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_IGMP_DP_S |
+ BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_BC_DP_S |
+ BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_MC_DP_S |
+ BIT(cpu_port) << QCA8K_GLOBAL_FW_CTRL1_UC_DP_S);
if (ret)
return ret;
- /* Setup connection between CPU port & user ports */
+ /* Setup connection between CPU port & user ports
+ * Configure specific switch configuration for ports
+ */
for (i = 0; i < QCA8K_NUM_PORTS; i++) {
/* CPU port gets connected to all user ports of the switch */
if (dsa_is_cpu_port(ds, i)) {
- ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(QCA8K_CPU_PORT),
+ ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
QCA8K_PORT_LOOKUP_MEMBER, dsa_user_ports(ds));
if (ret)
return ret;
@@ -1038,7 +1176,7 @@ qca8k_setup(struct dsa_switch *ds)
ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(i),
QCA8K_PORT_LOOKUP_MEMBER,
- BIT(QCA8K_CPU_PORT));
+ BIT(cpu_port));
if (ret)
return ret;
@@ -1063,16 +1201,14 @@ qca8k_setup(struct dsa_switch *ds)
if (ret)
return ret;
}
- }
- /* The port 5 of the qca8337 have some problem in flood condition. The
- * original legacy driver had some specific buffer and priority settings
- * for the different port suggested by the QCA switch team. Add this
- * missing settings to improve switch stability under load condition.
- * This problem is limited to qca8337 and other qca8k switch are not affected.
- */
- if (priv->switch_id == QCA8K_ID_QCA8337) {
- for (i = 0; i < QCA8K_NUM_PORTS; i++) {
+ /* The port 5 of the qca8337 have some problem in flood condition. The
+ * original legacy driver had some specific buffer and priority settings
+ * for the different port suggested by the QCA switch team. Add this
+ * missing settings to improve switch stability under load condition.
+ * This problem is limited to qca8337 and other qca8k switch are not affected.
+ */
+ if (priv->switch_id == QCA8K_ID_QCA8337) {
switch (i) {
/* The 2 CPU port and port 5 requires some different
* priority than any other ports.
@@ -1108,6 +1244,12 @@ qca8k_setup(struct dsa_switch *ds)
QCA8K_PORT_HOL_CTRL1_WRED_EN,
mask);
}
+
+ /* Set initial MTU for every port.
+ * We have only have a general MTU setting. So track
+ * every port and set the max across all port.
+ */
+ priv->port_mtu[i] = ETH_FRAME_LEN + ETH_FCS_LEN;
}
/* Special GLOBAL_FC_THRESH value are needed for ar8327 switch */
@@ -1121,8 +1263,6 @@ qca8k_setup(struct dsa_switch *ds)
}
/* Setup our port MTUs to match power on defaults */
- for (i = 0; i < QCA8K_NUM_PORTS; i++)
- priv->port_mtu[i] = ETH_FRAME_LEN + ETH_FCS_LEN;
ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, ETH_FRAME_LEN + ETH_FCS_LEN);
if (ret)
dev_warn(priv->dev, "failed setting MTU settings");
@@ -1137,12 +1277,53 @@ qca8k_setup(struct dsa_switch *ds)
}
static void
+qca8k_mac_config_setup_internal_delay(struct qca8k_priv *priv, int cpu_port_index,
+ u32 reg)
+{
+ u32 delay, val = 0;
+ int ret;
+
+ /* Delay can be declared in 3 different way.
+ * Mode to rgmii and internal-delay standard binding defined
+ * rgmii-id or rgmii-tx/rx phy mode set.
+ * The parse logic set a delay different than 0 only when one
+ * of the 3 different way is used. In all other case delay is
+ * not enabled. With ID or TX/RXID delay is enabled and set
+ * to the default and recommended value.
+ */
+ if (priv->ports_config.rgmii_tx_delay[cpu_port_index]) {
+ delay = priv->ports_config.rgmii_tx_delay[cpu_port_index];
+
+ val |= QCA8K_PORT_PAD_RGMII_TX_DELAY(delay) |
+ QCA8K_PORT_PAD_RGMII_TX_DELAY_EN;
+ }
+
+ if (priv->ports_config.rgmii_rx_delay[cpu_port_index]) {
+ delay = priv->ports_config.rgmii_rx_delay[cpu_port_index];
+
+ val |= QCA8K_PORT_PAD_RGMII_RX_DELAY(delay) |
+ QCA8K_PORT_PAD_RGMII_RX_DELAY_EN;
+ }
+
+ /* Set RGMII delay based on the selected values */
+ ret = qca8k_rmw(priv, reg,
+ QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK |
+ QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK |
+ QCA8K_PORT_PAD_RGMII_TX_DELAY_EN |
+ QCA8K_PORT_PAD_RGMII_RX_DELAY_EN,
+ val);
+ if (ret)
+ dev_err(priv->dev, "Failed to set internal delay for CPU port%d",
+ cpu_port_index == QCA8K_CPU_PORT0 ? 0 : 6);
+}
+
+static void
qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
const struct phylink_link_state *state)
{
struct qca8k_priv *priv = ds->priv;
+ int cpu_port_index, ret;
u32 reg, val;
- int ret;
switch (port) {
case 0: /* 1st CPU port */
@@ -1154,6 +1335,7 @@ qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
return;
reg = QCA8K_REG_PORT0_PAD_CTRL;
+ cpu_port_index = QCA8K_CPU_PORT0;
break;
case 1:
case 2:
@@ -1172,6 +1354,7 @@ qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
return;
reg = QCA8K_REG_PORT6_PAD_CTRL;
+ cpu_port_index = QCA8K_CPU_PORT6;
break;
default:
dev_err(ds->dev, "%s: unsupported port: %i\n", __func__, port);
@@ -1186,23 +1369,18 @@ qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
switch (state->interface) {
case PHY_INTERFACE_MODE_RGMII:
- /* RGMII mode means no delay so don't enable the delay */
- qca8k_write(priv, reg, QCA8K_PORT_PAD_RGMII_EN);
- break;
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_TXID:
case PHY_INTERFACE_MODE_RGMII_RXID:
- /* RGMII_ID needs internal delay. This is enabled through
- * PORT5_PAD_CTRL for all ports, rather than individual port
- * registers
+ qca8k_write(priv, reg, QCA8K_PORT_PAD_RGMII_EN);
+
+ /* Configure rgmii delay */
+ qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
+
+ /* QCA8337 requires to set rgmii rx delay for all ports.
+ * This is enabled through PORT5_PAD_CTRL for all ports,
+ * rather than individual port registers.
*/
- qca8k_write(priv, reg,
- QCA8K_PORT_PAD_RGMII_EN |
- QCA8K_PORT_PAD_RGMII_TX_DELAY(priv->rgmii_tx_delay) |
- QCA8K_PORT_PAD_RGMII_RX_DELAY(priv->rgmii_rx_delay) |
- QCA8K_PORT_PAD_RGMII_TX_DELAY_EN |
- QCA8K_PORT_PAD_RGMII_RX_DELAY_EN);
- /* QCA8337 requires to set rgmii rx delay */
if (priv->switch_id == QCA8K_ID_QCA8337)
qca8k_write(priv, QCA8K_REG_PORT5_PAD_CTRL,
QCA8K_PORT_PAD_RGMII_RX_DELAY_EN);
@@ -1227,8 +1405,11 @@ qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
if (ret)
return;
- val |= QCA8K_SGMII_EN_PLL | QCA8K_SGMII_EN_RX |
- QCA8K_SGMII_EN_TX | QCA8K_SGMII_EN_SD;
+ val |= QCA8K_SGMII_EN_SD;
+
+ if (priv->ports_config.sgmii_enable_pll)
+ val |= QCA8K_SGMII_EN_PLL | QCA8K_SGMII_EN_RX |
+ QCA8K_SGMII_EN_TX;
if (dsa_is_cpu_port(ds, port)) {
/* CPU port, we're talking to the CPU MAC, be a PHY */
@@ -1243,6 +1424,35 @@ qca8k_phylink_mac_config(struct dsa_switch *ds, int port, unsigned int mode,
}
qca8k_write(priv, QCA8K_REG_SGMII_CTRL, val);
+
+ /* For qca8327/qca8328/qca8334/qca8338 sgmii is unique and
+ * falling edge is set writing in the PORT0 PAD reg
+ */
+ if (priv->switch_id == QCA8K_ID_QCA8327 ||
+ priv->switch_id == QCA8K_ID_QCA8337)
+ reg = QCA8K_REG_PORT0_PAD_CTRL;
+
+ val = 0;
+
+ /* SGMII Clock phase configuration */
+ if (priv->ports_config.sgmii_rx_clk_falling_edge)
+ val |= QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE;
+
+ if (priv->ports_config.sgmii_tx_clk_falling_edge)
+ val |= QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE;
+
+ if (val)
+ ret = qca8k_rmw(priv, reg,
+ QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE |
+ QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE,
+ val);
+
+ /* From original code is reported port instability as SGMII also
+ * require delay set. Apply advised values here or take them from DT.
+ */
+ if (state->interface == PHY_INTERFACE_MODE_SGMII)
+ qca8k_mac_config_setup_internal_delay(priv, cpu_port_index, reg);
+
break;
default:
dev_err(ds->dev, "xMII mode %s not supported for port %d\n",
@@ -1522,10 +1732,15 @@ static int
qca8k_port_bridge_join(struct dsa_switch *ds, int port, struct net_device *br)
{
struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
- int port_mask = BIT(QCA8K_CPU_PORT);
+ int port_mask, cpu_port;
int i, ret;
- for (i = 1; i < QCA8K_NUM_PORTS; i++) {
+ cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
+ port_mask = BIT(cpu_port);
+
+ for (i = 0; i < QCA8K_NUM_PORTS; i++) {
+ if (dsa_is_cpu_port(ds, i))
+ continue;
if (dsa_to_port(ds, i)->bridge_dev != br)
continue;
/* Add this port to the portvlan mask of the other ports
@@ -1551,9 +1766,13 @@ static void
qca8k_port_bridge_leave(struct dsa_switch *ds, int port, struct net_device *br)
{
struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
- int i;
+ int cpu_port, i;
- for (i = 1; i < QCA8K_NUM_PORTS; i++) {
+ cpu_port = dsa_to_port(ds, port)->cpu_dp->index;
+
+ for (i = 0; i < QCA8K_NUM_PORTS; i++) {
+ if (dsa_is_cpu_port(ds, i))
+ continue;
if (dsa_to_port(ds, i)->bridge_dev != br)
continue;
/* Remove this port to the portvlan mask of the other ports
@@ -1568,7 +1787,7 @@ qca8k_port_bridge_leave(struct dsa_switch *ds, int port, struct net_device *br)
* this port
*/
qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port),
- QCA8K_PORT_LOOKUP_MEMBER, BIT(QCA8K_CPU_PORT));
+ QCA8K_PORT_LOOKUP_MEMBER, BIT(cpu_port));
}
static int
@@ -1939,7 +2158,12 @@ static int qca8k_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(qca8k_pm_ops,
qca8k_suspend, qca8k_resume);
-static const struct qca8k_match_data qca832x = {
+static const struct qca8k_match_data qca8327 = {
+ .id = QCA8K_ID_QCA8327,
+ .reduced_package = true,
+};
+
+static const struct qca8k_match_data qca8328 = {
.id = QCA8K_ID_QCA8327,
};
@@ -1948,7 +2172,8 @@ static const struct qca8k_match_data qca833x = {
};
static const struct of_device_id qca8k_of_match[] = {
- { .compatible = "qca,qca8327", .data = &qca832x },
+ { .compatible = "qca,qca8327", .data = &qca8327 },
+ { .compatible = "qca,qca8328", .data = &qca8328 },
{ .compatible = "qca,qca8334", .data = &qca833x },
{ .compatible = "qca,qca8337", .data = &qca833x },
{ /* sentinel */ },
diff --git a/drivers/net/dsa/qca8k.h b/drivers/net/dsa/qca8k.h
index ed3b05ad6745..e10571a398c9 100644
--- a/drivers/net/dsa/qca8k.h
+++ b/drivers/net/dsa/qca8k.h
@@ -13,6 +13,7 @@
#include <linux/gpio.h>
#define QCA8K_NUM_PORTS 7
+#define QCA8K_NUM_CPU_PORTS 2
#define QCA8K_MAX_MTU 9000
#define PHY_ID_QCA8327 0x004dd034
@@ -24,8 +25,6 @@
#define QCA8K_NUM_FDB_RECORDS 2048
-#define QCA8K_CPU_PORT 0
-
#define QCA8K_PORT_VID_DEF 1
/* Global control registers */
@@ -35,16 +34,26 @@
#define QCA8K_MASK_CTRL_DEVICE_ID_MASK GENMASK(15, 8)
#define QCA8K_MASK_CTRL_DEVICE_ID(x) ((x) >> 8)
#define QCA8K_REG_PORT0_PAD_CTRL 0x004
+#define QCA8K_PORT0_PAD_SGMII_RXCLK_FALLING_EDGE BIT(19)
+#define QCA8K_PORT0_PAD_SGMII_TXCLK_FALLING_EDGE BIT(18)
#define QCA8K_REG_PORT5_PAD_CTRL 0x008
#define QCA8K_REG_PORT6_PAD_CTRL 0x00c
#define QCA8K_PORT_PAD_RGMII_EN BIT(26)
+#define QCA8K_PORT_PAD_RGMII_TX_DELAY_MASK GENMASK(23, 22)
#define QCA8K_PORT_PAD_RGMII_TX_DELAY(x) ((x) << 22)
+#define QCA8K_PORT_PAD_RGMII_RX_DELAY_MASK GENMASK(21, 20)
#define QCA8K_PORT_PAD_RGMII_RX_DELAY(x) ((x) << 20)
#define QCA8K_PORT_PAD_RGMII_TX_DELAY_EN BIT(25)
#define QCA8K_PORT_PAD_RGMII_RX_DELAY_EN BIT(24)
#define QCA8K_MAX_DELAY 3
#define QCA8K_PORT_PAD_SGMII_EN BIT(7)
#define QCA8K_REG_PWS 0x010
+#define QCA8K_PWS_POWER_ON_SEL BIT(31)
+/* This reg is only valid for QCA832x and toggle the package
+ * type from 176 pin (by default) to 148 pin used on QCA8327
+ */
+#define QCA8327_PWS_PACKAGE148_EN BIT(30)
+#define QCA8K_PWS_LED_OPEN_EN_CSR BIT(24)
#define QCA8K_PWS_SERDES_AEN_DIS BIT(7)
#define QCA8K_REG_MODULE_EN 0x030
#define QCA8K_MODULE_EN_MIB BIT(0)
@@ -100,6 +109,11 @@
#define QCA8K_SGMII_MODE_CTRL_PHY (1 << 22)
#define QCA8K_SGMII_MODE_CTRL_MAC (2 << 22)
+/* MAC_PWR_SEL registers */
+#define QCA8K_REG_MAC_PWR_SEL 0x0e4
+#define QCA8K_MAC_PWR_RGMII1_1_8V BIT(18)
+#define QCA8K_MAC_PWR_RGMII0_1_8V BIT(19)
+
/* EEE control registers */
#define QCA8K_REG_EEE_CTRL 0x100
#define QCA8K_REG_EEE_CTRL_LPI_EN(_i) ((_i + 1) * 2)
@@ -248,14 +262,27 @@ struct ar8xxx_port_status {
struct qca8k_match_data {
u8 id;
+ bool reduced_package;
+};
+
+enum {
+ QCA8K_CPU_PORT0,
+ QCA8K_CPU_PORT6,
+};
+
+struct qca8k_ports_config {
+ bool sgmii_rx_clk_falling_edge;
+ bool sgmii_tx_clk_falling_edge;
+ bool sgmii_enable_pll;
+ u8 rgmii_rx_delay[QCA8K_NUM_CPU_PORTS]; /* 0: CPU port0, 1: CPU port6 */
+ u8 rgmii_tx_delay[QCA8K_NUM_CPU_PORTS]; /* 0: CPU port0, 1: CPU port6 */
};
struct qca8k_priv {
u8 switch_id;
u8 switch_revision;
- u8 rgmii_tx_delay;
- u8 rgmii_rx_delay;
bool legacy_phy_port_mapping;
+ struct qca8k_ports_config ports_config;
struct regmap *regmap;
struct mii_bus *bus;
struct ar8xxx_port_status port_sts[QCA8K_NUM_PORTS];
diff --git a/drivers/net/dsa/realtek-smi-core.c b/drivers/net/dsa/realtek-smi-core.c
index 2fcfd917b876..c66ebd0ee217 100644
--- a/drivers/net/dsa/realtek-smi-core.c
+++ b/drivers/net/dsa/realtek-smi-core.c
@@ -501,6 +501,10 @@ static const struct of_device_id realtek_smi_of_match[] = {
.compatible = "realtek,rtl8366s",
.data = NULL,
},
+ {
+ .compatible = "realtek,rtl8365mb",
+ .data = &rtl8365mb_variant,
+ },
{ /* sentinel */ },
};
MODULE_DEVICE_TABLE(of, realtek_smi_of_match);
diff --git a/drivers/net/dsa/realtek-smi-core.h b/drivers/net/dsa/realtek-smi-core.h
index fcf465f7f922..5bfa53e2480a 100644
--- a/drivers/net/dsa/realtek-smi-core.h
+++ b/drivers/net/dsa/realtek-smi-core.h
@@ -129,9 +129,6 @@ int rtl8366_set_pvid(struct realtek_smi *smi, unsigned int port,
int rtl8366_enable_vlan4k(struct realtek_smi *smi, bool enable);
int rtl8366_enable_vlan(struct realtek_smi *smi, bool enable);
int rtl8366_reset_vlan(struct realtek_smi *smi);
-int rtl8366_init_vlan(struct realtek_smi *smi);
-int rtl8366_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
- struct netlink_ext_ack *extack);
int rtl8366_vlan_add(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan,
struct netlink_ext_ack *extack);
@@ -143,5 +140,6 @@ int rtl8366_get_sset_count(struct dsa_switch *ds, int port, int sset);
void rtl8366_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data);
extern const struct realtek_smi_variant rtl8366rb_variant;
+extern const struct realtek_smi_variant rtl8365mb_variant;
#endif /* _REALTEK_SMI_H */
diff --git a/drivers/net/dsa/rtl8365mb.c b/drivers/net/dsa/rtl8365mb.c
new file mode 100644
index 000000000000..baaae97283c5
--- /dev/null
+++ b/drivers/net/dsa/rtl8365mb.c
@@ -0,0 +1,1982 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Realtek SMI subdriver for the Realtek RTL8365MB-VC ethernet switch.
+ *
+ * Copyright (C) 2021 Alvin Šipraga <alsi@bang-olufsen.dk>
+ * Copyright (C) 2021 Michael Rasmussen <mir@bang-olufsen.dk>
+ *
+ * The RTL8365MB-VC is a 4+1 port 10/100/1000M switch controller. It includes 4
+ * integrated PHYs for the user facing ports, and an extension interface which
+ * can be connected to the CPU - or another PHY - via either MII, RMII, or
+ * RGMII. The switch is configured via the Realtek Simple Management Interface
+ * (SMI), which uses the MDIO/MDC lines.
+ *
+ * Below is a simplified block diagram of the chip and its relevant interfaces.
+ *
+ * .-----------------------------------.
+ * | |
+ * UTP <---------------> Giga PHY <-> PCS <-> P0 GMAC |
+ * UTP <---------------> Giga PHY <-> PCS <-> P1 GMAC |
+ * UTP <---------------> Giga PHY <-> PCS <-> P2 GMAC |
+ * UTP <---------------> Giga PHY <-> PCS <-> P3 GMAC |
+ * | |
+ * CPU/PHY <-MII/RMII/RGMII---> Extension <---> Extension |
+ * | interface 1 GMAC 1 |
+ * | |
+ * SMI driver/ <-MDC/SCL---> Management ~~~~~~~~~~~~~~ |
+ * EEPROM <-MDIO/SDA--> interface ~REALTEK ~~~~~ |
+ * | ~RTL8365MB ~~~ |
+ * | ~GXXXC TAIWAN~ |
+ * GPIO <--------------> Reset ~~~~~~~~~~~~~~ |
+ * | |
+ * Interrupt <----------> Link UP/DOWN events |
+ * controller | |
+ * '-----------------------------------'
+ *
+ * The driver uses DSA to integrate the 4 user and 1 extension ports into the
+ * kernel. Netdevices are created for the user ports, as are PHY devices for
+ * their integrated PHYs. The device tree firmware should also specify the link
+ * partner of the extension port - either via a fixed-link or other phy-handle.
+ * See the device tree bindings for more detailed information. Note that the
+ * driver has only been tested with a fixed-link, but in principle it should not
+ * matter.
+ *
+ * NOTE: Currently, only the RGMII interface is implemented in this driver.
+ *
+ * The interrupt line is asserted on link UP/DOWN events. The driver creates a
+ * custom irqchip to handle this interrupt and demultiplex the events by reading
+ * the status registers via SMI. Interrupts are then propagated to the relevant
+ * PHY device.
+ *
+ * The EEPROM contains initial register values which the chip will read over I2C
+ * upon hardware reset. It is also possible to omit the EEPROM. In both cases,
+ * the driver will manually reprogram some registers using jam tables to reach
+ * an initial state defined by the vendor driver.
+ *
+ * This Linux driver is written based on an OS-agnostic vendor driver from
+ * Realtek. The reference GPL-licensed sources can be found in the OpenWrt
+ * source tree under the name rtl8367c. The vendor driver claims to support a
+ * number of similar switch controllers from Realtek, but the only hardware we
+ * have is the RTL8365MB-VC. Moreover, there does not seem to be any chip under
+ * the name RTL8367C. Although one wishes that the 'C' stood for some kind of
+ * common hardware revision, there exist examples of chips with the suffix -VC
+ * which are explicitly not supported by the rtl8367c driver and which instead
+ * require the rtl8367d vendor driver. With all this uncertainty, the driver has
+ * been modestly named rtl8365mb. Future implementors may wish to rename things
+ * accordingly.
+ *
+ * In the same family of chips, some carry up to 8 user ports and up to 2
+ * extension ports. Where possible this driver tries to make things generic, but
+ * more work must be done to support these configurations. According to
+ * documentation from Realtek, the family should include the following chips:
+ *
+ * - RTL8363NB
+ * - RTL8363NB-VB
+ * - RTL8363SC
+ * - RTL8363SC-VB
+ * - RTL8364NB
+ * - RTL8364NB-VB
+ * - RTL8365MB-VC
+ * - RTL8366SC
+ * - RTL8367RB-VB
+ * - RTL8367SB
+ * - RTL8367S
+ * - RTL8370MB
+ * - RTL8310SR
+ *
+ * Some of the register logic for these additional chips has been skipped over
+ * while implementing this driver. It is therefore not possible to assume that
+ * things will work out-of-the-box for other chips, and a careful review of the
+ * vendor driver may be needed to expand support. The RTL8365MB-VC seems to be
+ * one of the simpler chips.
+ */
+
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/mutex.h>
+#include <linux/of_irq.h>
+#include <linux/regmap.h>
+#include <linux/if_bridge.h>
+
+#include "realtek-smi-core.h"
+
+/* Chip-specific data and limits */
+#define RTL8365MB_CHIP_ID_8365MB_VC 0x6367
+#define RTL8365MB_CPU_PORT_NUM_8365MB_VC 6
+#define RTL8365MB_LEARN_LIMIT_MAX_8365MB_VC 2112
+
+/* Family-specific data and limits */
+#define RTL8365MB_NUM_PHYREGS 32
+#define RTL8365MB_PHYREGMAX (RTL8365MB_NUM_PHYREGS - 1)
+#define RTL8365MB_MAX_NUM_PORTS (RTL8365MB_CPU_PORT_NUM_8365MB_VC + 1)
+
+/* Chip identification registers */
+#define RTL8365MB_CHIP_ID_REG 0x1300
+
+#define RTL8365MB_CHIP_VER_REG 0x1301
+
+#define RTL8365MB_MAGIC_REG 0x13C2
+#define RTL8365MB_MAGIC_VALUE 0x0249
+
+/* Chip reset register */
+#define RTL8365MB_CHIP_RESET_REG 0x1322
+#define RTL8365MB_CHIP_RESET_SW_MASK 0x0002
+#define RTL8365MB_CHIP_RESET_HW_MASK 0x0001
+
+/* Interrupt polarity register */
+#define RTL8365MB_INTR_POLARITY_REG 0x1100
+#define RTL8365MB_INTR_POLARITY_MASK 0x0001
+#define RTL8365MB_INTR_POLARITY_HIGH 0
+#define RTL8365MB_INTR_POLARITY_LOW 1
+
+/* Interrupt control/status register - enable/check specific interrupt types */
+#define RTL8365MB_INTR_CTRL_REG 0x1101
+#define RTL8365MB_INTR_STATUS_REG 0x1102
+#define RTL8365MB_INTR_SLIENT_START_2_MASK 0x1000
+#define RTL8365MB_INTR_SLIENT_START_MASK 0x0800
+#define RTL8365MB_INTR_ACL_ACTION_MASK 0x0200
+#define RTL8365MB_INTR_CABLE_DIAG_FIN_MASK 0x0100
+#define RTL8365MB_INTR_INTERRUPT_8051_MASK 0x0080
+#define RTL8365MB_INTR_LOOP_DETECTION_MASK 0x0040
+#define RTL8365MB_INTR_GREEN_TIMER_MASK 0x0020
+#define RTL8365MB_INTR_SPECIAL_CONGEST_MASK 0x0010
+#define RTL8365MB_INTR_SPEED_CHANGE_MASK 0x0008
+#define RTL8365MB_INTR_LEARN_OVER_MASK 0x0004
+#define RTL8365MB_INTR_METER_EXCEEDED_MASK 0x0002
+#define RTL8365MB_INTR_LINK_CHANGE_MASK 0x0001
+#define RTL8365MB_INTR_ALL_MASK \
+ (RTL8365MB_INTR_SLIENT_START_2_MASK | \
+ RTL8365MB_INTR_SLIENT_START_MASK | \
+ RTL8365MB_INTR_ACL_ACTION_MASK | \
+ RTL8365MB_INTR_CABLE_DIAG_FIN_MASK | \
+ RTL8365MB_INTR_INTERRUPT_8051_MASK | \
+ RTL8365MB_INTR_LOOP_DETECTION_MASK | \
+ RTL8365MB_INTR_GREEN_TIMER_MASK | \
+ RTL8365MB_INTR_SPECIAL_CONGEST_MASK | \
+ RTL8365MB_INTR_SPEED_CHANGE_MASK | \
+ RTL8365MB_INTR_LEARN_OVER_MASK | \
+ RTL8365MB_INTR_METER_EXCEEDED_MASK | \
+ RTL8365MB_INTR_LINK_CHANGE_MASK)
+
+/* Per-port interrupt type status registers */
+#define RTL8365MB_PORT_LINKDOWN_IND_REG 0x1106
+#define RTL8365MB_PORT_LINKDOWN_IND_MASK 0x07FF
+
+#define RTL8365MB_PORT_LINKUP_IND_REG 0x1107
+#define RTL8365MB_PORT_LINKUP_IND_MASK 0x07FF
+
+/* PHY indirect access registers */
+#define RTL8365MB_INDIRECT_ACCESS_CTRL_REG 0x1F00
+#define RTL8365MB_INDIRECT_ACCESS_CTRL_RW_MASK 0x0002
+#define RTL8365MB_INDIRECT_ACCESS_CTRL_RW_READ 0
+#define RTL8365MB_INDIRECT_ACCESS_CTRL_RW_WRITE 1
+#define RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_MASK 0x0001
+#define RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_VALUE 1
+#define RTL8365MB_INDIRECT_ACCESS_STATUS_REG 0x1F01
+#define RTL8365MB_INDIRECT_ACCESS_ADDRESS_REG 0x1F02
+#define RTL8365MB_INDIRECT_ACCESS_ADDRESS_OCPADR_5_1_MASK GENMASK(4, 0)
+#define RTL8365MB_INDIRECT_ACCESS_ADDRESS_PHYNUM_MASK GENMASK(6, 5)
+#define RTL8365MB_INDIRECT_ACCESS_ADDRESS_OCPADR_9_6_MASK GENMASK(11, 8)
+#define RTL8365MB_PHY_BASE 0x2000
+#define RTL8365MB_INDIRECT_ACCESS_WRITE_DATA_REG 0x1F03
+#define RTL8365MB_INDIRECT_ACCESS_READ_DATA_REG 0x1F04
+
+/* PHY OCP address prefix register */
+#define RTL8365MB_GPHY_OCP_MSB_0_REG 0x1D15
+#define RTL8365MB_GPHY_OCP_MSB_0_CFG_CPU_OCPADR_MASK 0x0FC0
+#define RTL8365MB_PHY_OCP_ADDR_PREFIX_MASK 0xFC00
+
+/* The PHY OCP addresses of PHY registers 0~31 start here */
+#define RTL8365MB_PHY_OCP_ADDR_PHYREG_BASE 0xA400
+
+/* EXT port interface mode values - used in DIGITAL_INTERFACE_SELECT */
+#define RTL8365MB_EXT_PORT_MODE_DISABLE 0
+#define RTL8365MB_EXT_PORT_MODE_RGMII 1
+#define RTL8365MB_EXT_PORT_MODE_MII_MAC 2
+#define RTL8365MB_EXT_PORT_MODE_MII_PHY 3
+#define RTL8365MB_EXT_PORT_MODE_TMII_MAC 4
+#define RTL8365MB_EXT_PORT_MODE_TMII_PHY 5
+#define RTL8365MB_EXT_PORT_MODE_GMII 6
+#define RTL8365MB_EXT_PORT_MODE_RMII_MAC 7
+#define RTL8365MB_EXT_PORT_MODE_RMII_PHY 8
+#define RTL8365MB_EXT_PORT_MODE_SGMII 9
+#define RTL8365MB_EXT_PORT_MODE_HSGMII 10
+#define RTL8365MB_EXT_PORT_MODE_1000X_100FX 11
+#define RTL8365MB_EXT_PORT_MODE_1000X 12
+#define RTL8365MB_EXT_PORT_MODE_100FX 13
+
+/* EXT port interface mode configuration registers 0~1 */
+#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG0 0x1305
+#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG1 0x13C3
+#define RTL8365MB_DIGITAL_INTERFACE_SELECT_REG(_extport) \
+ (RTL8365MB_DIGITAL_INTERFACE_SELECT_REG0 + \
+ ((_extport) >> 1) * (0x13C3 - 0x1305))
+#define RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_MASK(_extport) \
+ (0xF << (((_extport) % 2)))
+#define RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_OFFSET(_extport) \
+ (((_extport) % 2) * 4)
+
+/* EXT port RGMII TX/RX delay configuration registers 1~2 */
+#define RTL8365MB_EXT_RGMXF_REG1 0x1307
+#define RTL8365MB_EXT_RGMXF_REG2 0x13C5
+#define RTL8365MB_EXT_RGMXF_REG(_extport) \
+ (RTL8365MB_EXT_RGMXF_REG1 + \
+ (((_extport) >> 1) * (0x13C5 - 0x1307)))
+#define RTL8365MB_EXT_RGMXF_RXDELAY_MASK 0x0007
+#define RTL8365MB_EXT_RGMXF_TXDELAY_MASK 0x0008
+
+/* External port speed values - used in DIGITAL_INTERFACE_FORCE */
+#define RTL8365MB_PORT_SPEED_10M 0
+#define RTL8365MB_PORT_SPEED_100M 1
+#define RTL8365MB_PORT_SPEED_1000M 2
+
+/* EXT port force configuration registers 0~2 */
+#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG0 0x1310
+#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG1 0x1311
+#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG2 0x13C4
+#define RTL8365MB_DIGITAL_INTERFACE_FORCE_REG(_extport) \
+ (RTL8365MB_DIGITAL_INTERFACE_FORCE_REG0 + \
+ ((_extport) & 0x1) + \
+ ((((_extport) >> 1) & 0x1) * (0x13C4 - 0x1310)))
+#define RTL8365MB_DIGITAL_INTERFACE_FORCE_EN_MASK 0x1000
+#define RTL8365MB_DIGITAL_INTERFACE_FORCE_NWAY_MASK 0x0080
+#define RTL8365MB_DIGITAL_INTERFACE_FORCE_TXPAUSE_MASK 0x0040
+#define RTL8365MB_DIGITAL_INTERFACE_FORCE_RXPAUSE_MASK 0x0020
+#define RTL8365MB_DIGITAL_INTERFACE_FORCE_LINK_MASK 0x0010
+#define RTL8365MB_DIGITAL_INTERFACE_FORCE_DUPLEX_MASK 0x0004
+#define RTL8365MB_DIGITAL_INTERFACE_FORCE_SPEED_MASK 0x0003
+
+/* CPU port mask register - controls which ports are treated as CPU ports */
+#define RTL8365MB_CPU_PORT_MASK_REG 0x1219
+#define RTL8365MB_CPU_PORT_MASK_MASK 0x07FF
+
+/* CPU control register */
+#define RTL8365MB_CPU_CTRL_REG 0x121A
+#define RTL8365MB_CPU_CTRL_TRAP_PORT_EXT_MASK 0x0400
+#define RTL8365MB_CPU_CTRL_TAG_FORMAT_MASK 0x0200
+#define RTL8365MB_CPU_CTRL_RXBYTECOUNT_MASK 0x0080
+#define RTL8365MB_CPU_CTRL_TAG_POSITION_MASK 0x0040
+#define RTL8365MB_CPU_CTRL_TRAP_PORT_MASK 0x0038
+#define RTL8365MB_CPU_CTRL_INSERTMODE_MASK 0x0006
+#define RTL8365MB_CPU_CTRL_EN_MASK 0x0001
+
+/* Maximum packet length register */
+#define RTL8365MB_CFG0_MAX_LEN_REG 0x088C
+#define RTL8365MB_CFG0_MAX_LEN_MASK 0x3FFF
+
+/* Port learning limit registers */
+#define RTL8365MB_LUT_PORT_LEARN_LIMIT_BASE 0x0A20
+#define RTL8365MB_LUT_PORT_LEARN_LIMIT_REG(_physport) \
+ (RTL8365MB_LUT_PORT_LEARN_LIMIT_BASE + (_physport))
+
+/* Port isolation (forwarding mask) registers */
+#define RTL8365MB_PORT_ISOLATION_REG_BASE 0x08A2
+#define RTL8365MB_PORT_ISOLATION_REG(_physport) \
+ (RTL8365MB_PORT_ISOLATION_REG_BASE + (_physport))
+#define RTL8365MB_PORT_ISOLATION_MASK 0x07FF
+
+/* MSTP port state registers - indexed by tree instancrSTI (tree ine */
+#define RTL8365MB_MSTI_CTRL_BASE 0x0A00
+#define RTL8365MB_MSTI_CTRL_REG(_msti, _physport) \
+ (RTL8365MB_MSTI_CTRL_BASE + ((_msti) << 1) + ((_physport) >> 3))
+#define RTL8365MB_MSTI_CTRL_PORT_STATE_OFFSET(_physport) ((_physport) << 1)
+#define RTL8365MB_MSTI_CTRL_PORT_STATE_MASK(_physport) \
+ (0x3 << RTL8365MB_MSTI_CTRL_PORT_STATE_OFFSET((_physport)))
+
+/* MIB counter value registers */
+#define RTL8365MB_MIB_COUNTER_BASE 0x1000
+#define RTL8365MB_MIB_COUNTER_REG(_x) (RTL8365MB_MIB_COUNTER_BASE + (_x))
+
+/* MIB counter address register */
+#define RTL8365MB_MIB_ADDRESS_REG 0x1004
+#define RTL8365MB_MIB_ADDRESS_PORT_OFFSET 0x007C
+#define RTL8365MB_MIB_ADDRESS(_p, _x) \
+ (((RTL8365MB_MIB_ADDRESS_PORT_OFFSET) * (_p) + (_x)) >> 2)
+
+#define RTL8365MB_MIB_CTRL0_REG 0x1005
+#define RTL8365MB_MIB_CTRL0_RESET_MASK 0x0002
+#define RTL8365MB_MIB_CTRL0_BUSY_MASK 0x0001
+
+/* The DSA callback .get_stats64 runs in atomic context, so we are not allowed
+ * to block. On the other hand, accessing MIB counters absolutely requires us to
+ * block. The solution is thus to schedule work which polls the MIB counters
+ * asynchronously and updates some private data, which the callback can then
+ * fetch atomically. Three seconds should be a good enough polling interval.
+ */
+#define RTL8365MB_STATS_INTERVAL_JIFFIES (3 * HZ)
+
+enum rtl8365mb_mib_counter_index {
+ RTL8365MB_MIB_ifInOctets,
+ RTL8365MB_MIB_dot3StatsFCSErrors,
+ RTL8365MB_MIB_dot3StatsSymbolErrors,
+ RTL8365MB_MIB_dot3InPauseFrames,
+ RTL8365MB_MIB_dot3ControlInUnknownOpcodes,
+ RTL8365MB_MIB_etherStatsFragments,
+ RTL8365MB_MIB_etherStatsJabbers,
+ RTL8365MB_MIB_ifInUcastPkts,
+ RTL8365MB_MIB_etherStatsDropEvents,
+ RTL8365MB_MIB_ifInMulticastPkts,
+ RTL8365MB_MIB_ifInBroadcastPkts,
+ RTL8365MB_MIB_inMldChecksumError,
+ RTL8365MB_MIB_inIgmpChecksumError,
+ RTL8365MB_MIB_inMldSpecificQuery,
+ RTL8365MB_MIB_inMldGeneralQuery,
+ RTL8365MB_MIB_inIgmpSpecificQuery,
+ RTL8365MB_MIB_inIgmpGeneralQuery,
+ RTL8365MB_MIB_inMldLeaves,
+ RTL8365MB_MIB_inIgmpLeaves,
+ RTL8365MB_MIB_etherStatsOctets,
+ RTL8365MB_MIB_etherStatsUnderSizePkts,
+ RTL8365MB_MIB_etherOversizeStats,
+ RTL8365MB_MIB_etherStatsPkts64Octets,
+ RTL8365MB_MIB_etherStatsPkts65to127Octets,
+ RTL8365MB_MIB_etherStatsPkts128to255Octets,
+ RTL8365MB_MIB_etherStatsPkts256to511Octets,
+ RTL8365MB_MIB_etherStatsPkts512to1023Octets,
+ RTL8365MB_MIB_etherStatsPkts1024to1518Octets,
+ RTL8365MB_MIB_ifOutOctets,
+ RTL8365MB_MIB_dot3StatsSingleCollisionFrames,
+ RTL8365MB_MIB_dot3StatsMultipleCollisionFrames,
+ RTL8365MB_MIB_dot3StatsDeferredTransmissions,
+ RTL8365MB_MIB_dot3StatsLateCollisions,
+ RTL8365MB_MIB_etherStatsCollisions,
+ RTL8365MB_MIB_dot3StatsExcessiveCollisions,
+ RTL8365MB_MIB_dot3OutPauseFrames,
+ RTL8365MB_MIB_ifOutDiscards,
+ RTL8365MB_MIB_dot1dTpPortInDiscards,
+ RTL8365MB_MIB_ifOutUcastPkts,
+ RTL8365MB_MIB_ifOutMulticastPkts,
+ RTL8365MB_MIB_ifOutBroadcastPkts,
+ RTL8365MB_MIB_outOampduPkts,
+ RTL8365MB_MIB_inOampduPkts,
+ RTL8365MB_MIB_inIgmpJoinsSuccess,
+ RTL8365MB_MIB_inIgmpJoinsFail,
+ RTL8365MB_MIB_inMldJoinsSuccess,
+ RTL8365MB_MIB_inMldJoinsFail,
+ RTL8365MB_MIB_inReportSuppressionDrop,
+ RTL8365MB_MIB_inLeaveSuppressionDrop,
+ RTL8365MB_MIB_outIgmpReports,
+ RTL8365MB_MIB_outIgmpLeaves,
+ RTL8365MB_MIB_outIgmpGeneralQuery,
+ RTL8365MB_MIB_outIgmpSpecificQuery,
+ RTL8365MB_MIB_outMldReports,
+ RTL8365MB_MIB_outMldLeaves,
+ RTL8365MB_MIB_outMldGeneralQuery,
+ RTL8365MB_MIB_outMldSpecificQuery,
+ RTL8365MB_MIB_inKnownMulticastPkts,
+ RTL8365MB_MIB_END,
+};
+
+struct rtl8365mb_mib_counter {
+ u32 offset;
+ u32 length;
+ const char *name;
+};
+
+#define RTL8365MB_MAKE_MIB_COUNTER(_offset, _length, _name) \
+ [RTL8365MB_MIB_ ## _name] = { _offset, _length, #_name }
+
+static struct rtl8365mb_mib_counter rtl8365mb_mib_counters[] = {
+ RTL8365MB_MAKE_MIB_COUNTER(0, 4, ifInOctets),
+ RTL8365MB_MAKE_MIB_COUNTER(4, 2, dot3StatsFCSErrors),
+ RTL8365MB_MAKE_MIB_COUNTER(6, 2, dot3StatsSymbolErrors),
+ RTL8365MB_MAKE_MIB_COUNTER(8, 2, dot3InPauseFrames),
+ RTL8365MB_MAKE_MIB_COUNTER(10, 2, dot3ControlInUnknownOpcodes),
+ RTL8365MB_MAKE_MIB_COUNTER(12, 2, etherStatsFragments),
+ RTL8365MB_MAKE_MIB_COUNTER(14, 2, etherStatsJabbers),
+ RTL8365MB_MAKE_MIB_COUNTER(16, 2, ifInUcastPkts),
+ RTL8365MB_MAKE_MIB_COUNTER(18, 2, etherStatsDropEvents),
+ RTL8365MB_MAKE_MIB_COUNTER(20, 2, ifInMulticastPkts),
+ RTL8365MB_MAKE_MIB_COUNTER(22, 2, ifInBroadcastPkts),
+ RTL8365MB_MAKE_MIB_COUNTER(24, 2, inMldChecksumError),
+ RTL8365MB_MAKE_MIB_COUNTER(26, 2, inIgmpChecksumError),
+ RTL8365MB_MAKE_MIB_COUNTER(28, 2, inMldSpecificQuery),
+ RTL8365MB_MAKE_MIB_COUNTER(30, 2, inMldGeneralQuery),
+ RTL8365MB_MAKE_MIB_COUNTER(32, 2, inIgmpSpecificQuery),
+ RTL8365MB_MAKE_MIB_COUNTER(34, 2, inIgmpGeneralQuery),
+ RTL8365MB_MAKE_MIB_COUNTER(36, 2, inMldLeaves),
+ RTL8365MB_MAKE_MIB_COUNTER(38, 2, inIgmpLeaves),
+ RTL8365MB_MAKE_MIB_COUNTER(40, 4, etherStatsOctets),
+ RTL8365MB_MAKE_MIB_COUNTER(44, 2, etherStatsUnderSizePkts),
+ RTL8365MB_MAKE_MIB_COUNTER(46, 2, etherOversizeStats),
+ RTL8365MB_MAKE_MIB_COUNTER(48, 2, etherStatsPkts64Octets),
+ RTL8365MB_MAKE_MIB_COUNTER(50, 2, etherStatsPkts65to127Octets),
+ RTL8365MB_MAKE_MIB_COUNTER(52, 2, etherStatsPkts128to255Octets),
+ RTL8365MB_MAKE_MIB_COUNTER(54, 2, etherStatsPkts256to511Octets),
+ RTL8365MB_MAKE_MIB_COUNTER(56, 2, etherStatsPkts512to1023Octets),
+ RTL8365MB_MAKE_MIB_COUNTER(58, 2, etherStatsPkts1024to1518Octets),
+ RTL8365MB_MAKE_MIB_COUNTER(60, 4, ifOutOctets),
+ RTL8365MB_MAKE_MIB_COUNTER(64, 2, dot3StatsSingleCollisionFrames),
+ RTL8365MB_MAKE_MIB_COUNTER(66, 2, dot3StatsMultipleCollisionFrames),
+ RTL8365MB_MAKE_MIB_COUNTER(68, 2, dot3StatsDeferredTransmissions),
+ RTL8365MB_MAKE_MIB_COUNTER(70, 2, dot3StatsLateCollisions),
+ RTL8365MB_MAKE_MIB_COUNTER(72, 2, etherStatsCollisions),
+ RTL8365MB_MAKE_MIB_COUNTER(74, 2, dot3StatsExcessiveCollisions),
+ RTL8365MB_MAKE_MIB_COUNTER(76, 2, dot3OutPauseFrames),
+ RTL8365MB_MAKE_MIB_COUNTER(78, 2, ifOutDiscards),
+ RTL8365MB_MAKE_MIB_COUNTER(80, 2, dot1dTpPortInDiscards),
+ RTL8365MB_MAKE_MIB_COUNTER(82, 2, ifOutUcastPkts),
+ RTL8365MB_MAKE_MIB_COUNTER(84, 2, ifOutMulticastPkts),
+ RTL8365MB_MAKE_MIB_COUNTER(86, 2, ifOutBroadcastPkts),
+ RTL8365MB_MAKE_MIB_COUNTER(88, 2, outOampduPkts),
+ RTL8365MB_MAKE_MIB_COUNTER(90, 2, inOampduPkts),
+ RTL8365MB_MAKE_MIB_COUNTER(92, 4, inIgmpJoinsSuccess),
+ RTL8365MB_MAKE_MIB_COUNTER(96, 2, inIgmpJoinsFail),
+ RTL8365MB_MAKE_MIB_COUNTER(98, 2, inMldJoinsSuccess),
+ RTL8365MB_MAKE_MIB_COUNTER(100, 2, inMldJoinsFail),
+ RTL8365MB_MAKE_MIB_COUNTER(102, 2, inReportSuppressionDrop),
+ RTL8365MB_MAKE_MIB_COUNTER(104, 2, inLeaveSuppressionDrop),
+ RTL8365MB_MAKE_MIB_COUNTER(106, 2, outIgmpReports),
+ RTL8365MB_MAKE_MIB_COUNTER(108, 2, outIgmpLeaves),
+ RTL8365MB_MAKE_MIB_COUNTER(110, 2, outIgmpGeneralQuery),
+ RTL8365MB_MAKE_MIB_COUNTER(112, 2, outIgmpSpecificQuery),
+ RTL8365MB_MAKE_MIB_COUNTER(114, 2, outMldReports),
+ RTL8365MB_MAKE_MIB_COUNTER(116, 2, outMldLeaves),
+ RTL8365MB_MAKE_MIB_COUNTER(118, 2, outMldGeneralQuery),
+ RTL8365MB_MAKE_MIB_COUNTER(120, 2, outMldSpecificQuery),
+ RTL8365MB_MAKE_MIB_COUNTER(122, 2, inKnownMulticastPkts),
+};
+
+static_assert(ARRAY_SIZE(rtl8365mb_mib_counters) == RTL8365MB_MIB_END);
+
+struct rtl8365mb_jam_tbl_entry {
+ u16 reg;
+ u16 val;
+};
+
+/* Lifted from the vendor driver sources */
+static const struct rtl8365mb_jam_tbl_entry rtl8365mb_init_jam_8365mb_vc[] = {
+ { 0x13EB, 0x15BB }, { 0x1303, 0x06D6 }, { 0x1304, 0x0700 },
+ { 0x13E2, 0x003F }, { 0x13F9, 0x0090 }, { 0x121E, 0x03CA },
+ { 0x1233, 0x0352 }, { 0x1237, 0x00A0 }, { 0x123A, 0x0030 },
+ { 0x1239, 0x0084 }, { 0x0301, 0x1000 }, { 0x1349, 0x001F },
+ { 0x18E0, 0x4004 }, { 0x122B, 0x241C }, { 0x1305, 0xC000 },
+ { 0x13F0, 0x0000 },
+};
+
+static const struct rtl8365mb_jam_tbl_entry rtl8365mb_init_jam_common[] = {
+ { 0x1200, 0x7FCB }, { 0x0884, 0x0003 }, { 0x06EB, 0x0001 },
+ { 0x03Fa, 0x0007 }, { 0x08C8, 0x00C0 }, { 0x0A30, 0x020E },
+ { 0x0800, 0x0000 }, { 0x0802, 0x0000 }, { 0x09DA, 0x0013 },
+ { 0x1D32, 0x0002 },
+};
+
+enum rtl8365mb_stp_state {
+ RTL8365MB_STP_STATE_DISABLED = 0,
+ RTL8365MB_STP_STATE_BLOCKING = 1,
+ RTL8365MB_STP_STATE_LEARNING = 2,
+ RTL8365MB_STP_STATE_FORWARDING = 3,
+};
+
+enum rtl8365mb_cpu_insert {
+ RTL8365MB_CPU_INSERT_TO_ALL = 0,
+ RTL8365MB_CPU_INSERT_TO_TRAPPING = 1,
+ RTL8365MB_CPU_INSERT_TO_NONE = 2,
+};
+
+enum rtl8365mb_cpu_position {
+ RTL8365MB_CPU_POS_AFTER_SA = 0,
+ RTL8365MB_CPU_POS_BEFORE_CRC = 1,
+};
+
+enum rtl8365mb_cpu_format {
+ RTL8365MB_CPU_FORMAT_8BYTES = 0,
+ RTL8365MB_CPU_FORMAT_4BYTES = 1,
+};
+
+enum rtl8365mb_cpu_rxlen {
+ RTL8365MB_CPU_RXLEN_72BYTES = 0,
+ RTL8365MB_CPU_RXLEN_64BYTES = 1,
+};
+
+/**
+ * struct rtl8365mb_cpu - CPU port configuration
+ * @enable: enable/disable hardware insertion of CPU tag in switch->CPU frames
+ * @mask: port mask of ports that parse should parse CPU tags
+ * @trap_port: forward trapped frames to this port
+ * @insert: CPU tag insertion mode in switch->CPU frames
+ * @position: position of CPU tag in frame
+ * @rx_length: minimum CPU RX length
+ * @format: CPU tag format
+ *
+ * Represents the CPU tagging and CPU port configuration of the switch. These
+ * settings are configurable at runtime.
+ */
+struct rtl8365mb_cpu {
+ bool enable;
+ u32 mask;
+ u32 trap_port;
+ enum rtl8365mb_cpu_insert insert;
+ enum rtl8365mb_cpu_position position;
+ enum rtl8365mb_cpu_rxlen rx_length;
+ enum rtl8365mb_cpu_format format;
+};
+
+/**
+ * struct rtl8365mb_port - private per-port data
+ * @smi: pointer to parent realtek_smi data
+ * @index: DSA port index, same as dsa_port::index
+ * @stats: link statistics populated by rtl8365mb_stats_poll, ready for atomic
+ * access via rtl8365mb_get_stats64
+ * @stats_lock: protect the stats structure during read/update
+ * @mib_work: delayed work for polling MIB counters
+ */
+struct rtl8365mb_port {
+ struct realtek_smi *smi;
+ unsigned int index;
+ struct rtnl_link_stats64 stats;
+ spinlock_t stats_lock;
+ struct delayed_work mib_work;
+};
+
+/**
+ * struct rtl8365mb - private chip-specific driver data
+ * @smi: pointer to parent realtek_smi data
+ * @irq: registered IRQ or zero
+ * @chip_id: chip identifier
+ * @chip_ver: chip silicon revision
+ * @port_mask: mask of all ports
+ * @learn_limit_max: maximum number of L2 addresses the chip can learn
+ * @cpu: CPU tagging and CPU port configuration for this chip
+ * @mib_lock: prevent concurrent reads of MIB counters
+ * @ports: per-port data
+ * @jam_table: chip-specific initialization jam table
+ * @jam_size: size of the chip's jam table
+ *
+ * Private data for this driver.
+ */
+struct rtl8365mb {
+ struct realtek_smi *smi;
+ int irq;
+ u32 chip_id;
+ u32 chip_ver;
+ u32 port_mask;
+ u32 learn_limit_max;
+ struct rtl8365mb_cpu cpu;
+ struct mutex mib_lock;
+ struct rtl8365mb_port ports[RTL8365MB_MAX_NUM_PORTS];
+ const struct rtl8365mb_jam_tbl_entry *jam_table;
+ size_t jam_size;
+};
+
+static int rtl8365mb_phy_poll_busy(struct realtek_smi *smi)
+{
+ u32 val;
+
+ return regmap_read_poll_timeout(smi->map,
+ RTL8365MB_INDIRECT_ACCESS_STATUS_REG,
+ val, !val, 10, 100);
+}
+
+static int rtl8365mb_phy_ocp_prepare(struct realtek_smi *smi, int phy,
+ u32 ocp_addr)
+{
+ u32 val;
+ int ret;
+
+ /* Set OCP prefix */
+ val = FIELD_GET(RTL8365MB_PHY_OCP_ADDR_PREFIX_MASK, ocp_addr);
+ ret = regmap_update_bits(
+ smi->map, RTL8365MB_GPHY_OCP_MSB_0_REG,
+ RTL8365MB_GPHY_OCP_MSB_0_CFG_CPU_OCPADR_MASK,
+ FIELD_PREP(RTL8365MB_GPHY_OCP_MSB_0_CFG_CPU_OCPADR_MASK, val));
+ if (ret)
+ return ret;
+
+ /* Set PHY register address */
+ val = RTL8365MB_PHY_BASE;
+ val |= FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_ADDRESS_PHYNUM_MASK, phy);
+ val |= FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_ADDRESS_OCPADR_5_1_MASK,
+ ocp_addr >> 1);
+ val |= FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_ADDRESS_OCPADR_9_6_MASK,
+ ocp_addr >> 6);
+ ret = regmap_write(smi->map, RTL8365MB_INDIRECT_ACCESS_ADDRESS_REG,
+ val);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int rtl8365mb_phy_ocp_read(struct realtek_smi *smi, int phy,
+ u32 ocp_addr, u16 *data)
+{
+ u32 val;
+ int ret;
+
+ ret = rtl8365mb_phy_poll_busy(smi);
+ if (ret)
+ return ret;
+
+ ret = rtl8365mb_phy_ocp_prepare(smi, phy, ocp_addr);
+ if (ret)
+ return ret;
+
+ /* Execute read operation */
+ val = FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_MASK,
+ RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_VALUE) |
+ FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_CTRL_RW_MASK,
+ RTL8365MB_INDIRECT_ACCESS_CTRL_RW_READ);
+ ret = regmap_write(smi->map, RTL8365MB_INDIRECT_ACCESS_CTRL_REG, val);
+ if (ret)
+ return ret;
+
+ ret = rtl8365mb_phy_poll_busy(smi);
+ if (ret)
+ return ret;
+
+ /* Get PHY register data */
+ ret = regmap_read(smi->map, RTL8365MB_INDIRECT_ACCESS_READ_DATA_REG,
+ &val);
+ if (ret)
+ return ret;
+
+ *data = val & 0xFFFF;
+
+ return 0;
+}
+
+static int rtl8365mb_phy_ocp_write(struct realtek_smi *smi, int phy,
+ u32 ocp_addr, u16 data)
+{
+ u32 val;
+ int ret;
+
+ ret = rtl8365mb_phy_poll_busy(smi);
+ if (ret)
+ return ret;
+
+ ret = rtl8365mb_phy_ocp_prepare(smi, phy, ocp_addr);
+ if (ret)
+ return ret;
+
+ /* Set PHY register data */
+ ret = regmap_write(smi->map, RTL8365MB_INDIRECT_ACCESS_WRITE_DATA_REG,
+ data);
+ if (ret)
+ return ret;
+
+ /* Execute write operation */
+ val = FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_MASK,
+ RTL8365MB_INDIRECT_ACCESS_CTRL_CMD_VALUE) |
+ FIELD_PREP(RTL8365MB_INDIRECT_ACCESS_CTRL_RW_MASK,
+ RTL8365MB_INDIRECT_ACCESS_CTRL_RW_WRITE);
+ ret = regmap_write(smi->map, RTL8365MB_INDIRECT_ACCESS_CTRL_REG, val);
+ if (ret)
+ return ret;
+
+ ret = rtl8365mb_phy_poll_busy(smi);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int rtl8365mb_phy_read(struct realtek_smi *smi, int phy, int regnum)
+{
+ u32 ocp_addr;
+ u16 val;
+ int ret;
+
+ if (regnum > RTL8365MB_PHYREGMAX)
+ return -EINVAL;
+
+ ocp_addr = RTL8365MB_PHY_OCP_ADDR_PHYREG_BASE + regnum * 2;
+
+ ret = rtl8365mb_phy_ocp_read(smi, phy, ocp_addr, &val);
+ if (ret) {
+ dev_err(smi->dev,
+ "failed to read PHY%d reg %02x @ %04x, ret %d\n", phy,
+ regnum, ocp_addr, ret);
+ return ret;
+ }
+
+ dev_dbg(smi->dev, "read PHY%d register 0x%02x @ %04x, val <- %04x\n",
+ phy, regnum, ocp_addr, val);
+
+ return val;
+}
+
+static int rtl8365mb_phy_write(struct realtek_smi *smi, int phy, int regnum,
+ u16 val)
+{
+ u32 ocp_addr;
+ int ret;
+
+ if (regnum > RTL8365MB_PHYREGMAX)
+ return -EINVAL;
+
+ ocp_addr = RTL8365MB_PHY_OCP_ADDR_PHYREG_BASE + regnum * 2;
+
+ ret = rtl8365mb_phy_ocp_write(smi, phy, ocp_addr, val);
+ if (ret) {
+ dev_err(smi->dev,
+ "failed to write PHY%d reg %02x @ %04x, ret %d\n", phy,
+ regnum, ocp_addr, ret);
+ return ret;
+ }
+
+ dev_dbg(smi->dev, "write PHY%d register 0x%02x @ %04x, val -> %04x\n",
+ phy, regnum, ocp_addr, val);
+
+ return 0;
+}
+
+static enum dsa_tag_protocol
+rtl8365mb_get_tag_protocol(struct dsa_switch *ds, int port,
+ enum dsa_tag_protocol mp)
+{
+ return DSA_TAG_PROTO_RTL8_4;
+}
+
+static int rtl8365mb_ext_config_rgmii(struct realtek_smi *smi, int port,
+ phy_interface_t interface)
+{
+ struct device_node *dn;
+ struct dsa_port *dp;
+ int tx_delay = 0;
+ int rx_delay = 0;
+ int ext_port;
+ u32 val;
+ int ret;
+
+ if (port == smi->cpu_port) {
+ ext_port = 1;
+ } else {
+ dev_err(smi->dev, "only one EXT port is currently supported\n");
+ return -EINVAL;
+ }
+
+ dp = dsa_to_port(smi->ds, port);
+ dn = dp->dn;
+
+ /* Set the RGMII TX/RX delay
+ *
+ * The Realtek vendor driver indicates the following possible
+ * configuration settings:
+ *
+ * TX delay:
+ * 0 = no delay, 1 = 2 ns delay
+ * RX delay:
+ * 0 = no delay, 7 = maximum delay
+ * No units are specified, but there are a total of 8 steps.
+ *
+ * The vendor driver also states that this must be configured *before*
+ * forcing the external interface into a particular mode, which is done
+ * in the rtl8365mb_phylink_mac_link_{up,down} functions.
+ *
+ * Only configure an RGMII TX (resp. RX) delay if the
+ * tx-internal-delay-ps (resp. rx-internal-delay-ps) OF property is
+ * specified. We ignore the detail of the RGMII interface mode
+ * (RGMII_{RXID, TXID, etc.}), as this is considered to be a PHY-only
+ * property.
+ *
+ * For the RX delay, we assume that a register value of 4 corresponds to
+ * 2 ns. But this is just an educated guess, so ignore all other values
+ * to avoid too much confusion.
+ */
+ if (!of_property_read_u32(dn, "tx-internal-delay-ps", &val)) {
+ val = val / 1000; /* convert to ns */
+
+ if (val == 0 || val == 2)
+ tx_delay = val / 2;
+ else
+ dev_warn(smi->dev,
+ "EXT port TX delay must be 0 or 2 ns\n");
+ }
+
+ if (!of_property_read_u32(dn, "rx-internal-delay-ps", &val)) {
+ val = val / 1000; /* convert to ns */
+
+ if (val == 0 || val == 2)
+ rx_delay = val * 2;
+ else
+ dev_warn(smi->dev,
+ "EXT port RX delay must be 0 to 2 ns\n");
+ }
+
+ ret = regmap_update_bits(
+ smi->map, RTL8365MB_EXT_RGMXF_REG(ext_port),
+ RTL8365MB_EXT_RGMXF_TXDELAY_MASK |
+ RTL8365MB_EXT_RGMXF_RXDELAY_MASK,
+ FIELD_PREP(RTL8365MB_EXT_RGMXF_TXDELAY_MASK, tx_delay) |
+ FIELD_PREP(RTL8365MB_EXT_RGMXF_RXDELAY_MASK, rx_delay));
+ if (ret)
+ return ret;
+
+ ret = regmap_update_bits(
+ smi->map, RTL8365MB_DIGITAL_INTERFACE_SELECT_REG(ext_port),
+ RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_MASK(ext_port),
+ RTL8365MB_EXT_PORT_MODE_RGMII
+ << RTL8365MB_DIGITAL_INTERFACE_SELECT_MODE_OFFSET(
+ ext_port));
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int rtl8365mb_ext_config_forcemode(struct realtek_smi *smi, int port,
+ bool link, int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ u32 r_tx_pause;
+ u32 r_rx_pause;
+ u32 r_duplex;
+ u32 r_speed;
+ u32 r_link;
+ int ext_port;
+ int val;
+ int ret;
+
+ if (port == smi->cpu_port) {
+ ext_port = 1;
+ } else {
+ dev_err(smi->dev, "only one EXT port is currently supported\n");
+ return -EINVAL;
+ }
+
+ if (link) {
+ /* Force the link up with the desired configuration */
+ r_link = 1;
+ r_rx_pause = rx_pause ? 1 : 0;
+ r_tx_pause = tx_pause ? 1 : 0;
+
+ if (speed == SPEED_1000) {
+ r_speed = RTL8365MB_PORT_SPEED_1000M;
+ } else if (speed == SPEED_100) {
+ r_speed = RTL8365MB_PORT_SPEED_100M;
+ } else if (speed == SPEED_10) {
+ r_speed = RTL8365MB_PORT_SPEED_10M;
+ } else {
+ dev_err(smi->dev, "unsupported port speed %s\n",
+ phy_speed_to_str(speed));
+ return -EINVAL;
+ }
+
+ if (duplex == DUPLEX_FULL) {
+ r_duplex = 1;
+ } else if (duplex == DUPLEX_HALF) {
+ r_duplex = 0;
+ } else {
+ dev_err(smi->dev, "unsupported duplex %s\n",
+ phy_duplex_to_str(duplex));
+ return -EINVAL;
+ }
+ } else {
+ /* Force the link down and reset any programmed configuration */
+ r_link = 0;
+ r_tx_pause = 0;
+ r_rx_pause = 0;
+ r_speed = 0;
+ r_duplex = 0;
+ }
+
+ val = FIELD_PREP(RTL8365MB_DIGITAL_INTERFACE_FORCE_EN_MASK, 1) |
+ FIELD_PREP(RTL8365MB_DIGITAL_INTERFACE_FORCE_TXPAUSE_MASK,
+ r_tx_pause) |
+ FIELD_PREP(RTL8365MB_DIGITAL_INTERFACE_FORCE_RXPAUSE_MASK,
+ r_rx_pause) |
+ FIELD_PREP(RTL8365MB_DIGITAL_INTERFACE_FORCE_LINK_MASK, r_link) |
+ FIELD_PREP(RTL8365MB_DIGITAL_INTERFACE_FORCE_DUPLEX_MASK,
+ r_duplex) |
+ FIELD_PREP(RTL8365MB_DIGITAL_INTERFACE_FORCE_SPEED_MASK, r_speed);
+ ret = regmap_write(smi->map,
+ RTL8365MB_DIGITAL_INTERFACE_FORCE_REG(ext_port),
+ val);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static bool rtl8365mb_phy_mode_supported(struct dsa_switch *ds, int port,
+ phy_interface_t interface)
+{
+ if (dsa_is_user_port(ds, port) &&
+ (interface == PHY_INTERFACE_MODE_NA ||
+ interface == PHY_INTERFACE_MODE_INTERNAL))
+ /* Internal PHY */
+ return true;
+ else if (dsa_is_cpu_port(ds, port) &&
+ phy_interface_mode_is_rgmii(interface))
+ /* Extension MAC */
+ return true;
+
+ return false;
+}
+
+static void rtl8365mb_phylink_validate(struct dsa_switch *ds, int port,
+ unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ struct realtek_smi *smi = ds->priv;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0 };
+
+ /* include/linux/phylink.h says:
+ * When @state->interface is %PHY_INTERFACE_MODE_NA, phylink
+ * expects the MAC driver to return all supported link modes.
+ */
+ if (state->interface != PHY_INTERFACE_MODE_NA &&
+ !rtl8365mb_phy_mode_supported(ds, port, state->interface)) {
+ dev_err(smi->dev, "phy mode %s is unsupported on port %d\n",
+ phy_modes(state->interface), port);
+ linkmode_zero(supported);
+ return;
+ }
+
+ phylink_set_port_modes(mask);
+
+ phylink_set(mask, Autoneg);
+ phylink_set(mask, Pause);
+ phylink_set(mask, Asym_Pause);
+
+ phylink_set(mask, 10baseT_Half);
+ phylink_set(mask, 10baseT_Full);
+ phylink_set(mask, 100baseT_Half);
+ phylink_set(mask, 100baseT_Full);
+ phylink_set(mask, 1000baseT_Full);
+
+ linkmode_and(supported, supported, mask);
+ linkmode_and(state->advertising, state->advertising, mask);
+}
+
+static void rtl8365mb_phylink_mac_config(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct realtek_smi *smi = ds->priv;
+ int ret;
+
+ if (!rtl8365mb_phy_mode_supported(ds, port, state->interface)) {
+ dev_err(smi->dev, "phy mode %s is unsupported on port %d\n",
+ phy_modes(state->interface), port);
+ return;
+ }
+
+ if (mode != MLO_AN_PHY && mode != MLO_AN_FIXED) {
+ dev_err(smi->dev,
+ "port %d supports only conventional PHY or fixed-link\n",
+ port);
+ return;
+ }
+
+ if (phy_interface_mode_is_rgmii(state->interface)) {
+ ret = rtl8365mb_ext_config_rgmii(smi, port, state->interface);
+ if (ret)
+ dev_err(smi->dev,
+ "failed to configure RGMII mode on port %d: %d\n",
+ port, ret);
+ return;
+ }
+
+ /* TODO: Implement MII and RMII modes, which the RTL8365MB-VC also
+ * supports
+ */
+}
+
+static void rtl8365mb_phylink_mac_link_down(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface)
+{
+ struct realtek_smi *smi = ds->priv;
+ struct rtl8365mb_port *p;
+ struct rtl8365mb *mb;
+ int ret;
+
+ mb = smi->chip_data;
+ p = &mb->ports[port];
+ cancel_delayed_work_sync(&p->mib_work);
+
+ if (phy_interface_mode_is_rgmii(interface)) {
+ ret = rtl8365mb_ext_config_forcemode(smi, port, false, 0, 0,
+ false, false);
+ if (ret)
+ dev_err(smi->dev,
+ "failed to reset forced mode on port %d: %d\n",
+ port, ret);
+
+ return;
+ }
+}
+
+static void rtl8365mb_phylink_mac_link_up(struct dsa_switch *ds, int port,
+ unsigned int mode,
+ phy_interface_t interface,
+ struct phy_device *phydev, int speed,
+ int duplex, bool tx_pause,
+ bool rx_pause)
+{
+ struct realtek_smi *smi = ds->priv;
+ struct rtl8365mb_port *p;
+ struct rtl8365mb *mb;
+ int ret;
+
+ mb = smi->chip_data;
+ p = &mb->ports[port];
+ schedule_delayed_work(&p->mib_work, 0);
+
+ if (phy_interface_mode_is_rgmii(interface)) {
+ ret = rtl8365mb_ext_config_forcemode(smi, port, true, speed,
+ duplex, tx_pause,
+ rx_pause);
+ if (ret)
+ dev_err(smi->dev,
+ "failed to force mode on port %d: %d\n", port,
+ ret);
+
+ return;
+ }
+}
+
+static void rtl8365mb_port_stp_state_set(struct dsa_switch *ds, int port,
+ u8 state)
+{
+ struct realtek_smi *smi = ds->priv;
+ enum rtl8365mb_stp_state val;
+ int msti = 0;
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ val = RTL8365MB_STP_STATE_DISABLED;
+ break;
+ case BR_STATE_BLOCKING:
+ case BR_STATE_LISTENING:
+ val = RTL8365MB_STP_STATE_BLOCKING;
+ break;
+ case BR_STATE_LEARNING:
+ val = RTL8365MB_STP_STATE_LEARNING;
+ break;
+ case BR_STATE_FORWARDING:
+ val = RTL8365MB_STP_STATE_FORWARDING;
+ break;
+ default:
+ dev_err(smi->dev, "invalid STP state: %u\n", state);
+ return;
+ }
+
+ regmap_update_bits(smi->map, RTL8365MB_MSTI_CTRL_REG(msti, port),
+ RTL8365MB_MSTI_CTRL_PORT_STATE_MASK(port),
+ val << RTL8365MB_MSTI_CTRL_PORT_STATE_OFFSET(port));
+}
+
+static int rtl8365mb_port_set_learning(struct realtek_smi *smi, int port,
+ bool enable)
+{
+ struct rtl8365mb *mb = smi->chip_data;
+
+ /* Enable/disable learning by limiting the number of L2 addresses the
+ * port can learn. Realtek documentation states that a limit of zero
+ * disables learning. When enabling learning, set it to the chip's
+ * maximum.
+ */
+ return regmap_write(smi->map, RTL8365MB_LUT_PORT_LEARN_LIMIT_REG(port),
+ enable ? mb->learn_limit_max : 0);
+}
+
+static int rtl8365mb_port_set_isolation(struct realtek_smi *smi, int port,
+ u32 mask)
+{
+ return regmap_write(smi->map, RTL8365MB_PORT_ISOLATION_REG(port), mask);
+}
+
+static int rtl8365mb_mib_counter_read(struct realtek_smi *smi, int port,
+ u32 offset, u32 length, u64 *mibvalue)
+{
+ u64 tmpvalue = 0;
+ u32 val;
+ int ret;
+ int i;
+
+ /* The MIB address is an SRAM address. We request a particular address
+ * and then poll the control register before reading the value from some
+ * counter registers.
+ */
+ ret = regmap_write(smi->map, RTL8365MB_MIB_ADDRESS_REG,
+ RTL8365MB_MIB_ADDRESS(port, offset));
+ if (ret)
+ return ret;
+
+ /* Poll for completion */
+ ret = regmap_read_poll_timeout(smi->map, RTL8365MB_MIB_CTRL0_REG, val,
+ !(val & RTL8365MB_MIB_CTRL0_BUSY_MASK),
+ 10, 100);
+ if (ret)
+ return ret;
+
+ /* Presumably this indicates a MIB counter read failure */
+ if (val & RTL8365MB_MIB_CTRL0_RESET_MASK)
+ return -EIO;
+
+ /* There are four MIB counter registers each holding a 16 bit word of a
+ * MIB counter. Depending on the offset, we should read from the upper
+ * two or lower two registers. In case the MIB counter is 4 words, we
+ * read from all four registers.
+ */
+ if (length == 4)
+ offset = 3;
+ else
+ offset = (offset + 1) % 4;
+
+ /* Read the MIB counter 16 bits at a time */
+ for (i = 0; i < length; i++) {
+ ret = regmap_read(smi->map,
+ RTL8365MB_MIB_COUNTER_REG(offset - i), &val);
+ if (ret)
+ return ret;
+
+ tmpvalue = ((tmpvalue) << 16) | (val & 0xFFFF);
+ }
+
+ /* Only commit the result if no error occurred */
+ *mibvalue = tmpvalue;
+
+ return 0;
+}
+
+static void rtl8365mb_get_ethtool_stats(struct dsa_switch *ds, int port, u64 *data)
+{
+ struct realtek_smi *smi = ds->priv;
+ struct rtl8365mb *mb;
+ int ret;
+ int i;
+
+ mb = smi->chip_data;
+
+ mutex_lock(&mb->mib_lock);
+ for (i = 0; i < RTL8365MB_MIB_END; i++) {
+ struct rtl8365mb_mib_counter *mib = &rtl8365mb_mib_counters[i];
+
+ ret = rtl8365mb_mib_counter_read(smi, port, mib->offset,
+ mib->length, &data[i]);
+ if (ret) {
+ dev_err(smi->dev,
+ "failed to read port %d counters: %d\n", port,
+ ret);
+ break;
+ }
+ }
+ mutex_unlock(&mb->mib_lock);
+}
+
+static void rtl8365mb_get_strings(struct dsa_switch *ds, int port, u32 stringset, u8 *data)
+{
+ int i;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < RTL8365MB_MIB_END; i++) {
+ struct rtl8365mb_mib_counter *mib = &rtl8365mb_mib_counters[i];
+
+ strncpy(data + i * ETH_GSTRING_LEN, mib->name, ETH_GSTRING_LEN);
+ }
+}
+
+static int rtl8365mb_get_sset_count(struct dsa_switch *ds, int port, int sset)
+{
+ if (sset != ETH_SS_STATS)
+ return -EOPNOTSUPP;
+
+ return RTL8365MB_MIB_END;
+}
+
+static void rtl8365mb_get_phy_stats(struct dsa_switch *ds, int port,
+ struct ethtool_eth_phy_stats *phy_stats)
+{
+ struct realtek_smi *smi = ds->priv;
+ struct rtl8365mb_mib_counter *mib;
+ struct rtl8365mb *mb;
+
+ mb = smi->chip_data;
+ mib = &rtl8365mb_mib_counters[RTL8365MB_MIB_dot3StatsSymbolErrors];
+
+ mutex_lock(&mb->mib_lock);
+ rtl8365mb_mib_counter_read(smi, port, mib->offset, mib->length,
+ &phy_stats->SymbolErrorDuringCarrier);
+ mutex_unlock(&mb->mib_lock);
+}
+
+static void rtl8365mb_get_mac_stats(struct dsa_switch *ds, int port,
+ struct ethtool_eth_mac_stats *mac_stats)
+{
+ u64 cnt[RTL8365MB_MIB_END] = {
+ [RTL8365MB_MIB_ifOutOctets] = 1,
+ [RTL8365MB_MIB_ifOutUcastPkts] = 1,
+ [RTL8365MB_MIB_ifOutMulticastPkts] = 1,
+ [RTL8365MB_MIB_ifOutBroadcastPkts] = 1,
+ [RTL8365MB_MIB_dot3OutPauseFrames] = 1,
+ [RTL8365MB_MIB_ifOutDiscards] = 1,
+ [RTL8365MB_MIB_ifInOctets] = 1,
+ [RTL8365MB_MIB_ifInUcastPkts] = 1,
+ [RTL8365MB_MIB_ifInMulticastPkts] = 1,
+ [RTL8365MB_MIB_ifInBroadcastPkts] = 1,
+ [RTL8365MB_MIB_dot3InPauseFrames] = 1,
+ [RTL8365MB_MIB_dot3StatsSingleCollisionFrames] = 1,
+ [RTL8365MB_MIB_dot3StatsMultipleCollisionFrames] = 1,
+ [RTL8365MB_MIB_dot3StatsFCSErrors] = 1,
+ [RTL8365MB_MIB_dot3StatsDeferredTransmissions] = 1,
+ [RTL8365MB_MIB_dot3StatsLateCollisions] = 1,
+ [RTL8365MB_MIB_dot3StatsExcessiveCollisions] = 1,
+
+ };
+ struct realtek_smi *smi = ds->priv;
+ struct rtl8365mb *mb;
+ int ret;
+ int i;
+
+ mb = smi->chip_data;
+
+ mutex_lock(&mb->mib_lock);
+ for (i = 0; i < RTL8365MB_MIB_END; i++) {
+ struct rtl8365mb_mib_counter *mib = &rtl8365mb_mib_counters[i];
+
+ /* Only fetch required MIB counters (marked = 1 above) */
+ if (!cnt[i])
+ continue;
+
+ ret = rtl8365mb_mib_counter_read(smi, port, mib->offset,
+ mib->length, &cnt[i]);
+ if (ret)
+ break;
+ }
+ mutex_unlock(&mb->mib_lock);
+
+ /* The RTL8365MB-VC exposes MIB objects, which we have to translate into
+ * IEEE 802.3 Managed Objects. This is not always completely faithful,
+ * but we try out best. See RFC 3635 for a detailed treatment of the
+ * subject.
+ */
+
+ mac_stats->FramesTransmittedOK = cnt[RTL8365MB_MIB_ifOutUcastPkts] +
+ cnt[RTL8365MB_MIB_ifOutMulticastPkts] +
+ cnt[RTL8365MB_MIB_ifOutBroadcastPkts] +
+ cnt[RTL8365MB_MIB_dot3OutPauseFrames] -
+ cnt[RTL8365MB_MIB_ifOutDiscards];
+ mac_stats->SingleCollisionFrames =
+ cnt[RTL8365MB_MIB_dot3StatsSingleCollisionFrames];
+ mac_stats->MultipleCollisionFrames =
+ cnt[RTL8365MB_MIB_dot3StatsMultipleCollisionFrames];
+ mac_stats->FramesReceivedOK = cnt[RTL8365MB_MIB_ifInUcastPkts] +
+ cnt[RTL8365MB_MIB_ifInMulticastPkts] +
+ cnt[RTL8365MB_MIB_ifInBroadcastPkts] +
+ cnt[RTL8365MB_MIB_dot3InPauseFrames];
+ mac_stats->FrameCheckSequenceErrors =
+ cnt[RTL8365MB_MIB_dot3StatsFCSErrors];
+ mac_stats->OctetsTransmittedOK = cnt[RTL8365MB_MIB_ifOutOctets] -
+ 18 * mac_stats->FramesTransmittedOK;
+ mac_stats->FramesWithDeferredXmissions =
+ cnt[RTL8365MB_MIB_dot3StatsDeferredTransmissions];
+ mac_stats->LateCollisions = cnt[RTL8365MB_MIB_dot3StatsLateCollisions];
+ mac_stats->FramesAbortedDueToXSColls =
+ cnt[RTL8365MB_MIB_dot3StatsExcessiveCollisions];
+ mac_stats->OctetsReceivedOK = cnt[RTL8365MB_MIB_ifInOctets] -
+ 18 * mac_stats->FramesReceivedOK;
+ mac_stats->MulticastFramesXmittedOK =
+ cnt[RTL8365MB_MIB_ifOutMulticastPkts];
+ mac_stats->BroadcastFramesXmittedOK =
+ cnt[RTL8365MB_MIB_ifOutBroadcastPkts];
+ mac_stats->MulticastFramesReceivedOK =
+ cnt[RTL8365MB_MIB_ifInMulticastPkts];
+ mac_stats->BroadcastFramesReceivedOK =
+ cnt[RTL8365MB_MIB_ifInBroadcastPkts];
+}
+
+static void rtl8365mb_get_ctrl_stats(struct dsa_switch *ds, int port,
+ struct ethtool_eth_ctrl_stats *ctrl_stats)
+{
+ struct realtek_smi *smi = ds->priv;
+ struct rtl8365mb_mib_counter *mib;
+ struct rtl8365mb *mb;
+
+ mb = smi->chip_data;
+ mib = &rtl8365mb_mib_counters[RTL8365MB_MIB_dot3ControlInUnknownOpcodes];
+
+ mutex_lock(&mb->mib_lock);
+ rtl8365mb_mib_counter_read(smi, port, mib->offset, mib->length,
+ &ctrl_stats->UnsupportedOpcodesReceived);
+ mutex_unlock(&mb->mib_lock);
+}
+
+static void rtl8365mb_stats_update(struct realtek_smi *smi, int port)
+{
+ u64 cnt[RTL8365MB_MIB_END] = {
+ [RTL8365MB_MIB_ifOutOctets] = 1,
+ [RTL8365MB_MIB_ifOutUcastPkts] = 1,
+ [RTL8365MB_MIB_ifOutMulticastPkts] = 1,
+ [RTL8365MB_MIB_ifOutBroadcastPkts] = 1,
+ [RTL8365MB_MIB_ifOutDiscards] = 1,
+ [RTL8365MB_MIB_ifInOctets] = 1,
+ [RTL8365MB_MIB_ifInUcastPkts] = 1,
+ [RTL8365MB_MIB_ifInMulticastPkts] = 1,
+ [RTL8365MB_MIB_ifInBroadcastPkts] = 1,
+ [RTL8365MB_MIB_etherStatsDropEvents] = 1,
+ [RTL8365MB_MIB_etherStatsCollisions] = 1,
+ [RTL8365MB_MIB_etherStatsFragments] = 1,
+ [RTL8365MB_MIB_etherStatsJabbers] = 1,
+ [RTL8365MB_MIB_dot3StatsFCSErrors] = 1,
+ [RTL8365MB_MIB_dot3StatsLateCollisions] = 1,
+ };
+ struct rtl8365mb *mb = smi->chip_data;
+ struct rtnl_link_stats64 *stats;
+ int ret;
+ int i;
+
+ stats = &mb->ports[port].stats;
+
+ mutex_lock(&mb->mib_lock);
+ for (i = 0; i < RTL8365MB_MIB_END; i++) {
+ struct rtl8365mb_mib_counter *c = &rtl8365mb_mib_counters[i];
+
+ /* Only fetch required MIB counters (marked = 1 above) */
+ if (!cnt[i])
+ continue;
+
+ ret = rtl8365mb_mib_counter_read(smi, port, c->offset,
+ c->length, &cnt[i]);
+ if (ret)
+ break;
+ }
+ mutex_unlock(&mb->mib_lock);
+
+ /* Don't update statistics if there was an error reading the counters */
+ if (ret)
+ return;
+
+ spin_lock(&mb->ports[port].stats_lock);
+
+ stats->rx_packets = cnt[RTL8365MB_MIB_ifInUcastPkts] +
+ cnt[RTL8365MB_MIB_ifInMulticastPkts] +
+ cnt[RTL8365MB_MIB_ifInBroadcastPkts] -
+ cnt[RTL8365MB_MIB_ifOutDiscards];
+
+ stats->tx_packets = cnt[RTL8365MB_MIB_ifOutUcastPkts] +
+ cnt[RTL8365MB_MIB_ifOutMulticastPkts] +
+ cnt[RTL8365MB_MIB_ifOutBroadcastPkts];
+
+ /* if{In,Out}Octets includes FCS - remove it */
+ stats->rx_bytes = cnt[RTL8365MB_MIB_ifInOctets] - 4 * stats->rx_packets;
+ stats->tx_bytes =
+ cnt[RTL8365MB_MIB_ifOutOctets] - 4 * stats->tx_packets;
+
+ stats->rx_dropped = cnt[RTL8365MB_MIB_etherStatsDropEvents];
+ stats->tx_dropped = cnt[RTL8365MB_MIB_ifOutDiscards];
+
+ stats->multicast = cnt[RTL8365MB_MIB_ifInMulticastPkts];
+ stats->collisions = cnt[RTL8365MB_MIB_etherStatsCollisions];
+
+ stats->rx_length_errors = cnt[RTL8365MB_MIB_etherStatsFragments] +
+ cnt[RTL8365MB_MIB_etherStatsJabbers];
+ stats->rx_crc_errors = cnt[RTL8365MB_MIB_dot3StatsFCSErrors];
+ stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors;
+
+ stats->tx_aborted_errors = cnt[RTL8365MB_MIB_ifOutDiscards];
+ stats->tx_window_errors = cnt[RTL8365MB_MIB_dot3StatsLateCollisions];
+ stats->tx_errors = stats->tx_aborted_errors + stats->tx_window_errors;
+
+ spin_unlock(&mb->ports[port].stats_lock);
+}
+
+static void rtl8365mb_stats_poll(struct work_struct *work)
+{
+ struct rtl8365mb_port *p = container_of(to_delayed_work(work),
+ struct rtl8365mb_port,
+ mib_work);
+ struct realtek_smi *smi = p->smi;
+
+ rtl8365mb_stats_update(smi, p->index);
+
+ schedule_delayed_work(&p->mib_work, RTL8365MB_STATS_INTERVAL_JIFFIES);
+}
+
+static void rtl8365mb_get_stats64(struct dsa_switch *ds, int port,
+ struct rtnl_link_stats64 *s)
+{
+ struct realtek_smi *smi = ds->priv;
+ struct rtl8365mb_port *p;
+ struct rtl8365mb *mb;
+
+ mb = smi->chip_data;
+ p = &mb->ports[port];
+
+ spin_lock(&p->stats_lock);
+ memcpy(s, &p->stats, sizeof(*s));
+ spin_unlock(&p->stats_lock);
+}
+
+static void rtl8365mb_stats_setup(struct realtek_smi *smi)
+{
+ struct rtl8365mb *mb = smi->chip_data;
+ int i;
+
+ /* Per-chip global mutex to protect MIB counter access, since doing
+ * so requires accessing a series of registers in a particular order.
+ */
+ mutex_init(&mb->mib_lock);
+
+ for (i = 0; i < smi->num_ports; i++) {
+ struct rtl8365mb_port *p = &mb->ports[i];
+
+ if (dsa_is_unused_port(smi->ds, i))
+ continue;
+
+ /* Per-port spinlock to protect the stats64 data */
+ spin_lock_init(&p->stats_lock);
+
+ /* This work polls the MIB counters and keeps the stats64 data
+ * up-to-date.
+ */
+ INIT_DELAYED_WORK(&p->mib_work, rtl8365mb_stats_poll);
+ }
+}
+
+static void rtl8365mb_stats_teardown(struct realtek_smi *smi)
+{
+ struct rtl8365mb *mb = smi->chip_data;
+ int i;
+
+ for (i = 0; i < smi->num_ports; i++) {
+ struct rtl8365mb_port *p = &mb->ports[i];
+
+ if (dsa_is_unused_port(smi->ds, i))
+ continue;
+
+ cancel_delayed_work_sync(&p->mib_work);
+ }
+}
+
+static int rtl8365mb_get_and_clear_status_reg(struct realtek_smi *smi, u32 reg,
+ u32 *val)
+{
+ int ret;
+
+ ret = regmap_read(smi->map, reg, val);
+ if (ret)
+ return ret;
+
+ return regmap_write(smi->map, reg, *val);
+}
+
+static irqreturn_t rtl8365mb_irq(int irq, void *data)
+{
+ struct realtek_smi *smi = data;
+ unsigned long line_changes = 0;
+ struct rtl8365mb *mb;
+ u32 stat;
+ int line;
+ int ret;
+
+ mb = smi->chip_data;
+
+ ret = rtl8365mb_get_and_clear_status_reg(smi, RTL8365MB_INTR_STATUS_REG,
+ &stat);
+ if (ret)
+ goto out_error;
+
+ if (stat & RTL8365MB_INTR_LINK_CHANGE_MASK) {
+ u32 linkdown_ind;
+ u32 linkup_ind;
+ u32 val;
+
+ ret = rtl8365mb_get_and_clear_status_reg(
+ smi, RTL8365MB_PORT_LINKUP_IND_REG, &val);
+ if (ret)
+ goto out_error;
+
+ linkup_ind = FIELD_GET(RTL8365MB_PORT_LINKUP_IND_MASK, val);
+
+ ret = rtl8365mb_get_and_clear_status_reg(
+ smi, RTL8365MB_PORT_LINKDOWN_IND_REG, &val);
+ if (ret)
+ goto out_error;
+
+ linkdown_ind = FIELD_GET(RTL8365MB_PORT_LINKDOWN_IND_MASK, val);
+
+ line_changes = (linkup_ind | linkdown_ind) & mb->port_mask;
+ }
+
+ if (!line_changes)
+ goto out_none;
+
+ for_each_set_bit(line, &line_changes, smi->num_ports) {
+ int child_irq = irq_find_mapping(smi->irqdomain, line);
+
+ handle_nested_irq(child_irq);
+ }
+
+ return IRQ_HANDLED;
+
+out_error:
+ dev_err(smi->dev, "failed to read interrupt status: %d\n", ret);
+
+out_none:
+ return IRQ_NONE;
+}
+
+static struct irq_chip rtl8365mb_irq_chip = {
+ .name = "rtl8365mb",
+ /* The hardware doesn't support masking IRQs on a per-port basis */
+};
+
+static int rtl8365mb_irq_map(struct irq_domain *domain, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ irq_set_chip_data(irq, domain->host_data);
+ irq_set_chip_and_handler(irq, &rtl8365mb_irq_chip, handle_simple_irq);
+ irq_set_nested_thread(irq, 1);
+ irq_set_noprobe(irq);
+
+ return 0;
+}
+
+static void rtl8365mb_irq_unmap(struct irq_domain *d, unsigned int irq)
+{
+ irq_set_nested_thread(irq, 0);
+ irq_set_chip_and_handler(irq, NULL, NULL);
+ irq_set_chip_data(irq, NULL);
+}
+
+static const struct irq_domain_ops rtl8365mb_irqdomain_ops = {
+ .map = rtl8365mb_irq_map,
+ .unmap = rtl8365mb_irq_unmap,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static int rtl8365mb_set_irq_enable(struct realtek_smi *smi, bool enable)
+{
+ return regmap_update_bits(smi->map, RTL8365MB_INTR_CTRL_REG,
+ RTL8365MB_INTR_LINK_CHANGE_MASK,
+ FIELD_PREP(RTL8365MB_INTR_LINK_CHANGE_MASK,
+ enable ? 1 : 0));
+}
+
+static int rtl8365mb_irq_enable(struct realtek_smi *smi)
+{
+ return rtl8365mb_set_irq_enable(smi, true);
+}
+
+static int rtl8365mb_irq_disable(struct realtek_smi *smi)
+{
+ return rtl8365mb_set_irq_enable(smi, false);
+}
+
+static int rtl8365mb_irq_setup(struct realtek_smi *smi)
+{
+ struct rtl8365mb *mb = smi->chip_data;
+ struct device_node *intc;
+ u32 irq_trig;
+ int virq;
+ int irq;
+ u32 val;
+ int ret;
+ int i;
+
+ intc = of_get_child_by_name(smi->dev->of_node, "interrupt-controller");
+ if (!intc) {
+ dev_err(smi->dev, "missing child interrupt-controller node\n");
+ return -EINVAL;
+ }
+
+ /* rtl8365mb IRQs cascade off this one */
+ irq = of_irq_get(intc, 0);
+ if (irq <= 0) {
+ if (irq != -EPROBE_DEFER)
+ dev_err(smi->dev, "failed to get parent irq: %d\n",
+ irq);
+ ret = irq ? irq : -EINVAL;
+ goto out_put_node;
+ }
+
+ smi->irqdomain = irq_domain_add_linear(intc, smi->num_ports,
+ &rtl8365mb_irqdomain_ops, smi);
+ if (!smi->irqdomain) {
+ dev_err(smi->dev, "failed to add irq domain\n");
+ ret = -ENOMEM;
+ goto out_put_node;
+ }
+
+ for (i = 0; i < smi->num_ports; i++) {
+ virq = irq_create_mapping(smi->irqdomain, i);
+ if (!virq) {
+ dev_err(smi->dev,
+ "failed to create irq domain mapping\n");
+ ret = -EINVAL;
+ goto out_remove_irqdomain;
+ }
+
+ irq_set_parent(virq, irq);
+ }
+
+ /* Configure chip interrupt signal polarity */
+ irq_trig = irqd_get_trigger_type(irq_get_irq_data(irq));
+ switch (irq_trig) {
+ case IRQF_TRIGGER_RISING:
+ case IRQF_TRIGGER_HIGH:
+ val = RTL8365MB_INTR_POLARITY_HIGH;
+ break;
+ case IRQF_TRIGGER_FALLING:
+ case IRQF_TRIGGER_LOW:
+ val = RTL8365MB_INTR_POLARITY_LOW;
+ break;
+ default:
+ dev_err(smi->dev, "unsupported irq trigger type %u\n",
+ irq_trig);
+ ret = -EINVAL;
+ goto out_remove_irqdomain;
+ }
+
+ ret = regmap_update_bits(smi->map, RTL8365MB_INTR_POLARITY_REG,
+ RTL8365MB_INTR_POLARITY_MASK,
+ FIELD_PREP(RTL8365MB_INTR_POLARITY_MASK, val));
+ if (ret)
+ goto out_remove_irqdomain;
+
+ /* Disable the interrupt in case the chip has it enabled on reset */
+ ret = rtl8365mb_irq_disable(smi);
+ if (ret)
+ goto out_remove_irqdomain;
+
+ /* Clear the interrupt status register */
+ ret = regmap_write(smi->map, RTL8365MB_INTR_STATUS_REG,
+ RTL8365MB_INTR_ALL_MASK);
+ if (ret)
+ goto out_remove_irqdomain;
+
+ ret = request_threaded_irq(irq, NULL, rtl8365mb_irq, IRQF_ONESHOT,
+ "rtl8365mb", smi);
+ if (ret) {
+ dev_err(smi->dev, "failed to request irq: %d\n", ret);
+ goto out_remove_irqdomain;
+ }
+
+ /* Store the irq so that we know to free it during teardown */
+ mb->irq = irq;
+
+ ret = rtl8365mb_irq_enable(smi);
+ if (ret)
+ goto out_free_irq;
+
+ of_node_put(intc);
+
+ return 0;
+
+out_free_irq:
+ free_irq(mb->irq, smi);
+ mb->irq = 0;
+
+out_remove_irqdomain:
+ for (i = 0; i < smi->num_ports; i++) {
+ virq = irq_find_mapping(smi->irqdomain, i);
+ irq_dispose_mapping(virq);
+ }
+
+ irq_domain_remove(smi->irqdomain);
+ smi->irqdomain = NULL;
+
+out_put_node:
+ of_node_put(intc);
+
+ return ret;
+}
+
+static void rtl8365mb_irq_teardown(struct realtek_smi *smi)
+{
+ struct rtl8365mb *mb = smi->chip_data;
+ int virq;
+ int i;
+
+ if (mb->irq) {
+ free_irq(mb->irq, smi);
+ mb->irq = 0;
+ }
+
+ if (smi->irqdomain) {
+ for (i = 0; i < smi->num_ports; i++) {
+ virq = irq_find_mapping(smi->irqdomain, i);
+ irq_dispose_mapping(virq);
+ }
+
+ irq_domain_remove(smi->irqdomain);
+ smi->irqdomain = NULL;
+ }
+}
+
+static int rtl8365mb_cpu_config(struct realtek_smi *smi)
+{
+ struct rtl8365mb *mb = smi->chip_data;
+ struct rtl8365mb_cpu *cpu = &mb->cpu;
+ u32 val;
+ int ret;
+
+ ret = regmap_update_bits(smi->map, RTL8365MB_CPU_PORT_MASK_REG,
+ RTL8365MB_CPU_PORT_MASK_MASK,
+ FIELD_PREP(RTL8365MB_CPU_PORT_MASK_MASK,
+ cpu->mask));
+ if (ret)
+ return ret;
+
+ val = FIELD_PREP(RTL8365MB_CPU_CTRL_EN_MASK, cpu->enable ? 1 : 0) |
+ FIELD_PREP(RTL8365MB_CPU_CTRL_INSERTMODE_MASK, cpu->insert) |
+ FIELD_PREP(RTL8365MB_CPU_CTRL_TAG_POSITION_MASK, cpu->position) |
+ FIELD_PREP(RTL8365MB_CPU_CTRL_RXBYTECOUNT_MASK, cpu->rx_length) |
+ FIELD_PREP(RTL8365MB_CPU_CTRL_TAG_FORMAT_MASK, cpu->format) |
+ FIELD_PREP(RTL8365MB_CPU_CTRL_TRAP_PORT_MASK, cpu->trap_port) |
+ FIELD_PREP(RTL8365MB_CPU_CTRL_TRAP_PORT_EXT_MASK,
+ cpu->trap_port >> 3);
+ ret = regmap_write(smi->map, RTL8365MB_CPU_CTRL_REG, val);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int rtl8365mb_switch_init(struct realtek_smi *smi)
+{
+ struct rtl8365mb *mb = smi->chip_data;
+ int ret;
+ int i;
+
+ /* Do any chip-specific init jam before getting to the common stuff */
+ if (mb->jam_table) {
+ for (i = 0; i < mb->jam_size; i++) {
+ ret = regmap_write(smi->map, mb->jam_table[i].reg,
+ mb->jam_table[i].val);
+ if (ret)
+ return ret;
+ }
+ }
+
+ /* Common init jam */
+ for (i = 0; i < ARRAY_SIZE(rtl8365mb_init_jam_common); i++) {
+ ret = regmap_write(smi->map, rtl8365mb_init_jam_common[i].reg,
+ rtl8365mb_init_jam_common[i].val);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rtl8365mb_reset_chip(struct realtek_smi *smi)
+{
+ u32 val;
+
+ realtek_smi_write_reg_noack(smi, RTL8365MB_CHIP_RESET_REG,
+ FIELD_PREP(RTL8365MB_CHIP_RESET_HW_MASK,
+ 1));
+
+ /* Realtek documentation says the chip needs 1 second to reset. Sleep
+ * for 100 ms before accessing any registers to prevent ACK timeouts.
+ */
+ msleep(100);
+ return regmap_read_poll_timeout(smi->map, RTL8365MB_CHIP_RESET_REG, val,
+ !(val & RTL8365MB_CHIP_RESET_HW_MASK),
+ 20000, 1e6);
+}
+
+static int rtl8365mb_setup(struct dsa_switch *ds)
+{
+ struct realtek_smi *smi = ds->priv;
+ struct rtl8365mb *mb;
+ int ret;
+ int i;
+
+ mb = smi->chip_data;
+
+ ret = rtl8365mb_reset_chip(smi);
+ if (ret) {
+ dev_err(smi->dev, "failed to reset chip: %d\n", ret);
+ goto out_error;
+ }
+
+ /* Configure switch to vendor-defined initial state */
+ ret = rtl8365mb_switch_init(smi);
+ if (ret) {
+ dev_err(smi->dev, "failed to initialize switch: %d\n", ret);
+ goto out_error;
+ }
+
+ /* Set up cascading IRQs */
+ ret = rtl8365mb_irq_setup(smi);
+ if (ret == -EPROBE_DEFER)
+ return ret;
+ else if (ret)
+ dev_info(smi->dev, "no interrupt support\n");
+
+ /* Configure CPU tagging */
+ ret = rtl8365mb_cpu_config(smi);
+ if (ret)
+ goto out_teardown_irq;
+
+ /* Configure ports */
+ for (i = 0; i < smi->num_ports; i++) {
+ struct rtl8365mb_port *p = &mb->ports[i];
+
+ if (dsa_is_unused_port(smi->ds, i))
+ continue;
+
+ /* Set up per-port private data */
+ p->smi = smi;
+ p->index = i;
+
+ /* Forward only to the CPU */
+ ret = rtl8365mb_port_set_isolation(smi, i, BIT(smi->cpu_port));
+ if (ret)
+ goto out_teardown_irq;
+
+ /* Disable learning */
+ ret = rtl8365mb_port_set_learning(smi, i, false);
+ if (ret)
+ goto out_teardown_irq;
+
+ /* Set the initial STP state of all ports to DISABLED, otherwise
+ * ports will still forward frames to the CPU despite being
+ * administratively down by default.
+ */
+ rtl8365mb_port_stp_state_set(smi->ds, i, BR_STATE_DISABLED);
+ }
+
+ /* Set maximum packet length to 1536 bytes */
+ ret = regmap_update_bits(smi->map, RTL8365MB_CFG0_MAX_LEN_REG,
+ RTL8365MB_CFG0_MAX_LEN_MASK,
+ FIELD_PREP(RTL8365MB_CFG0_MAX_LEN_MASK, 1536));
+ if (ret)
+ goto out_teardown_irq;
+
+ ret = realtek_smi_setup_mdio(smi);
+ if (ret) {
+ dev_err(smi->dev, "could not set up MDIO bus\n");
+ goto out_teardown_irq;
+ }
+
+ /* Start statistics counter polling */
+ rtl8365mb_stats_setup(smi);
+
+ return 0;
+
+out_teardown_irq:
+ rtl8365mb_irq_teardown(smi);
+
+out_error:
+ return ret;
+}
+
+static void rtl8365mb_teardown(struct dsa_switch *ds)
+{
+ struct realtek_smi *smi = ds->priv;
+
+ rtl8365mb_stats_teardown(smi);
+ rtl8365mb_irq_teardown(smi);
+}
+
+static int rtl8365mb_get_chip_id_and_ver(struct regmap *map, u32 *id, u32 *ver)
+{
+ int ret;
+
+ /* For some reason we have to write a magic value to an arbitrary
+ * register whenever accessing the chip ID/version registers.
+ */
+ ret = regmap_write(map, RTL8365MB_MAGIC_REG, RTL8365MB_MAGIC_VALUE);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(map, RTL8365MB_CHIP_ID_REG, id);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(map, RTL8365MB_CHIP_VER_REG, ver);
+ if (ret)
+ return ret;
+
+ /* Reset magic register */
+ ret = regmap_write(map, RTL8365MB_MAGIC_REG, 0);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int rtl8365mb_detect(struct realtek_smi *smi)
+{
+ struct rtl8365mb *mb = smi->chip_data;
+ u32 chip_id;
+ u32 chip_ver;
+ int ret;
+
+ ret = rtl8365mb_get_chip_id_and_ver(smi->map, &chip_id, &chip_ver);
+ if (ret) {
+ dev_err(smi->dev, "failed to read chip id and version: %d\n",
+ ret);
+ return ret;
+ }
+
+ switch (chip_id) {
+ case RTL8365MB_CHIP_ID_8365MB_VC:
+ dev_info(smi->dev,
+ "found an RTL8365MB-VC switch (ver=0x%04x)\n",
+ chip_ver);
+
+ smi->cpu_port = RTL8365MB_CPU_PORT_NUM_8365MB_VC;
+ smi->num_ports = smi->cpu_port + 1;
+
+ mb->smi = smi;
+ mb->chip_id = chip_id;
+ mb->chip_ver = chip_ver;
+ mb->port_mask = BIT(smi->num_ports) - 1;
+ mb->learn_limit_max = RTL8365MB_LEARN_LIMIT_MAX_8365MB_VC;
+ mb->jam_table = rtl8365mb_init_jam_8365mb_vc;
+ mb->jam_size = ARRAY_SIZE(rtl8365mb_init_jam_8365mb_vc);
+
+ mb->cpu.enable = 1;
+ mb->cpu.mask = BIT(smi->cpu_port);
+ mb->cpu.trap_port = smi->cpu_port;
+ mb->cpu.insert = RTL8365MB_CPU_INSERT_TO_ALL;
+ mb->cpu.position = RTL8365MB_CPU_POS_AFTER_SA;
+ mb->cpu.rx_length = RTL8365MB_CPU_RXLEN_64BYTES;
+ mb->cpu.format = RTL8365MB_CPU_FORMAT_8BYTES;
+
+ break;
+ default:
+ dev_err(smi->dev,
+ "found an unknown Realtek switch (id=0x%04x, ver=0x%04x)\n",
+ chip_id, chip_ver);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static const struct dsa_switch_ops rtl8365mb_switch_ops = {
+ .get_tag_protocol = rtl8365mb_get_tag_protocol,
+ .setup = rtl8365mb_setup,
+ .teardown = rtl8365mb_teardown,
+ .phylink_validate = rtl8365mb_phylink_validate,
+ .phylink_mac_config = rtl8365mb_phylink_mac_config,
+ .phylink_mac_link_down = rtl8365mb_phylink_mac_link_down,
+ .phylink_mac_link_up = rtl8365mb_phylink_mac_link_up,
+ .port_stp_state_set = rtl8365mb_port_stp_state_set,
+ .get_strings = rtl8365mb_get_strings,
+ .get_ethtool_stats = rtl8365mb_get_ethtool_stats,
+ .get_sset_count = rtl8365mb_get_sset_count,
+ .get_eth_phy_stats = rtl8365mb_get_phy_stats,
+ .get_eth_mac_stats = rtl8365mb_get_mac_stats,
+ .get_eth_ctrl_stats = rtl8365mb_get_ctrl_stats,
+ .get_stats64 = rtl8365mb_get_stats64,
+};
+
+static const struct realtek_smi_ops rtl8365mb_smi_ops = {
+ .detect = rtl8365mb_detect,
+ .phy_read = rtl8365mb_phy_read,
+ .phy_write = rtl8365mb_phy_write,
+};
+
+const struct realtek_smi_variant rtl8365mb_variant = {
+ .ds_ops = &rtl8365mb_switch_ops,
+ .ops = &rtl8365mb_smi_ops,
+ .clk_delay = 10,
+ .cmd_read = 0xb9,
+ .cmd_write = 0xb8,
+ .chip_data_sz = sizeof(struct rtl8365mb),
+};
+EXPORT_SYMBOL_GPL(rtl8365mb_variant);
diff --git a/drivers/net/dsa/rtl8366.c b/drivers/net/dsa/rtl8366.c
index 75897a369096..bdb8d8d34880 100644
--- a/drivers/net/dsa/rtl8366.c
+++ b/drivers/net/dsa/rtl8366.c
@@ -292,89 +292,6 @@ int rtl8366_reset_vlan(struct realtek_smi *smi)
}
EXPORT_SYMBOL_GPL(rtl8366_reset_vlan);
-int rtl8366_init_vlan(struct realtek_smi *smi)
-{
- int port;
- int ret;
-
- ret = rtl8366_reset_vlan(smi);
- if (ret)
- return ret;
-
- /* Loop over the available ports, for each port, associate
- * it with the VLAN (port+1)
- */
- for (port = 0; port < smi->num_ports; port++) {
- u32 mask;
-
- if (port == smi->cpu_port)
- /* For the CPU port, make all ports members of this
- * VLAN.
- */
- mask = GENMASK((int)smi->num_ports - 1, 0);
- else
- /* For all other ports, enable itself plus the
- * CPU port.
- */
- mask = BIT(port) | BIT(smi->cpu_port);
-
- /* For each port, set the port as member of VLAN (port+1)
- * and untagged, except for the CPU port: the CPU port (5) is
- * member of VLAN 6 and so are ALL the other ports as well.
- * Use filter 0 (no filter).
- */
- dev_info(smi->dev, "VLAN%d port mask for port %d, %08x\n",
- (port + 1), port, mask);
- ret = rtl8366_set_vlan(smi, (port + 1), mask, mask, 0);
- if (ret)
- return ret;
-
- dev_info(smi->dev, "VLAN%d port %d, PVID set to %d\n",
- (port + 1), port, (port + 1));
- ret = rtl8366_set_pvid(smi, port, (port + 1));
- if (ret)
- return ret;
- }
-
- return rtl8366_enable_vlan(smi, true);
-}
-EXPORT_SYMBOL_GPL(rtl8366_init_vlan);
-
-int rtl8366_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering,
- struct netlink_ext_ack *extack)
-{
- struct realtek_smi *smi = ds->priv;
- struct rtl8366_vlan_4k vlan4k;
- int ret;
-
- /* Use VLAN nr port + 1 since VLAN0 is not valid */
- if (!smi->ops->is_vlan_valid(smi, port + 1))
- return -EINVAL;
-
- dev_info(smi->dev, "%s filtering on port %d\n",
- vlan_filtering ? "enable" : "disable",
- port);
-
- /* TODO:
- * The hardware support filter ID (FID) 0..7, I have no clue how to
- * support this in the driver when the callback only says on/off.
- */
- ret = smi->ops->get_vlan_4k(smi, port + 1, &vlan4k);
- if (ret)
- return ret;
-
- /* Just set the filter to FID 1 for now then */
- ret = rtl8366_set_vlan(smi, port + 1,
- vlan4k.member,
- vlan4k.untag,
- 1);
- if (ret)
- return ret;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(rtl8366_vlan_filtering);
-
int rtl8366_vlan_add(struct dsa_switch *ds, int port,
const struct switchdev_obj_port_vlan *vlan,
struct netlink_ext_ack *extack)
@@ -401,12 +318,9 @@ int rtl8366_vlan_add(struct dsa_switch *ds, int port,
return ret;
}
- dev_info(smi->dev, "add VLAN %d on port %d, %s, %s\n",
- vlan->vid, port, untagged ? "untagged" : "tagged",
- pvid ? " PVID" : "no PVID");
-
- if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port))
- dev_err(smi->dev, "port is DSA or CPU port\n");
+ dev_dbg(smi->dev, "add VLAN %d on port %d, %s, %s\n",
+ vlan->vid, port, untagged ? "untagged" : "tagged",
+ pvid ? "PVID" : "no PVID");
member |= BIT(port);
@@ -439,7 +353,7 @@ int rtl8366_vlan_del(struct dsa_switch *ds, int port,
struct realtek_smi *smi = ds->priv;
int ret, i;
- dev_info(smi->dev, "del VLAN %04x on port %d\n", vlan->vid, port);
+ dev_dbg(smi->dev, "del VLAN %d on port %d\n", vlan->vid, port);
for (i = 0; i < smi->num_vlan_mc; i++) {
struct rtl8366_vlan_mc vlanmc;
@@ -457,7 +371,7 @@ int rtl8366_vlan_del(struct dsa_switch *ds, int port,
* anymore then clear the whole member
* config so it can be reused.
*/
- if (!vlanmc.member && vlanmc.untag) {
+ if (!vlanmc.member) {
vlanmc.vid = 0;
vlanmc.priority = 0;
vlanmc.fid = 0;
diff --git a/drivers/net/dsa/rtl8366rb.c b/drivers/net/dsa/rtl8366rb.c
index a89093bc6c6a..03deacd83e61 100644
--- a/drivers/net/dsa/rtl8366rb.c
+++ b/drivers/net/dsa/rtl8366rb.c
@@ -14,6 +14,7 @@
#include <linux/bitops.h>
#include <linux/etherdevice.h>
+#include <linux/if_bridge.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/irqchip/chained_irq.h>
@@ -42,9 +43,12 @@
/* Port Enable Control register */
#define RTL8366RB_PECR 0x0001
-/* Switch Security Control registers */
-#define RTL8366RB_SSCR0 0x0002
-#define RTL8366RB_SSCR1 0x0003
+/* Switch per-port learning disablement register */
+#define RTL8366RB_PORT_LEARNDIS_CTRL 0x0002
+
+/* Security control, actually aging register */
+#define RTL8366RB_SECURITY_CTRL 0x0003
+
#define RTL8366RB_SSCR2 0x0004
#define RTL8366RB_SSCR2_DROP_UNKNOWN_DA BIT(0)
@@ -106,6 +110,18 @@
#define RTL8366RB_POWER_SAVING_REG 0x0021
+/* Spanning tree status (STP) control, two bits per port per FID */
+#define RTL8366RB_STP_STATE_BASE 0x0050 /* 0x0050..0x0057 */
+#define RTL8366RB_STP_STATE_DISABLED 0x0
+#define RTL8366RB_STP_STATE_BLOCKING 0x1
+#define RTL8366RB_STP_STATE_LEARNING 0x2
+#define RTL8366RB_STP_STATE_FORWARDING 0x3
+#define RTL8366RB_STP_MASK GENMASK(1, 0)
+#define RTL8366RB_STP_STATE(port, state) \
+ ((state) << ((port) * 2))
+#define RTL8366RB_STP_STATE_MASK(port) \
+ RTL8366RB_STP_STATE((port), RTL8366RB_STP_MASK)
+
/* CPU port control reg */
#define RTL8368RB_CPU_CTRL_REG 0x0061
#define RTL8368RB_CPU_PORTS_MSK 0x00FF
@@ -143,6 +159,21 @@
#define RTL8366RB_PHY_NO_OFFSET 9
#define RTL8366RB_PHY_NO_MASK (0x1f << 9)
+/* VLAN Ingress Control Register 1, one bit per port.
+ * bit 0 .. 5 will make the switch drop ingress frames without
+ * VID such as untagged or priority-tagged frames for respective
+ * port.
+ * bit 6 .. 11 will make the switch drop ingress frames carrying
+ * a C-tag with VID != 0 for respective port.
+ */
+#define RTL8366RB_VLAN_INGRESS_CTRL1_REG 0x037E
+#define RTL8366RB_VLAN_INGRESS_CTRL1_DROP(port) (BIT((port)) | BIT((port) + 6))
+
+/* VLAN Ingress Control Register 2, one bit per port.
+ * bit0 .. bit5 will make the switch drop all ingress frames with
+ * a VLAN classification that does not include the port is in its
+ * member set.
+ */
#define RTL8366RB_VLAN_INGRESS_CTRL2_REG 0x037f
/* LED control registers */
@@ -215,6 +246,7 @@
#define RTL8366RB_NUM_LEDGROUPS 4
#define RTL8366RB_NUM_VIDS 4096
#define RTL8366RB_PRIORITYMAX 7
+#define RTL8366RB_NUM_FIDS 8
#define RTL8366RB_FIDMAX 7
#define RTL8366RB_PORT_1 BIT(0) /* In userspace port 0 */
@@ -300,6 +332,13 @@
#define RTL8366RB_INTERRUPT_STATUS_REG 0x0442
#define RTL8366RB_NUM_INTERRUPT 14 /* 0..13 */
+/* Port isolation registers */
+#define RTL8366RB_PORT_ISO_BASE 0x0F08
+#define RTL8366RB_PORT_ISO(pnum) (RTL8366RB_PORT_ISO_BASE + (pnum))
+#define RTL8366RB_PORT_ISO_EN BIT(0)
+#define RTL8366RB_PORT_ISO_PORTS_MASK GENMASK(7, 1)
+#define RTL8366RB_PORT_ISO_PORTS(pmask) ((pmask) << 1)
+
/* bits 0..5 enable force when cleared */
#define RTL8366RB_MAC_FORCE_CTRL_REG 0x0F11
@@ -314,9 +353,11 @@
/**
* struct rtl8366rb - RTL8366RB-specific data
* @max_mtu: per-port max MTU setting
+ * @pvid_enabled: if PVID is set for respective port
*/
struct rtl8366rb {
unsigned int max_mtu[RTL8366RB_NUM_PORTS];
+ bool pvid_enabled[RTL8366RB_NUM_PORTS];
};
static struct rtl8366_mib_counter rtl8366rb_mib_counters[] = {
@@ -835,6 +876,21 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
if (ret)
return ret;
+ /* Isolate all user ports so they can only send packets to itself and the CPU port */
+ for (i = 0; i < RTL8366RB_PORT_NUM_CPU; i++) {
+ ret = regmap_write(smi->map, RTL8366RB_PORT_ISO(i),
+ RTL8366RB_PORT_ISO_PORTS(BIT(RTL8366RB_PORT_NUM_CPU)) |
+ RTL8366RB_PORT_ISO_EN);
+ if (ret)
+ return ret;
+ }
+ /* CPU port can send packets to all ports */
+ ret = regmap_write(smi->map, RTL8366RB_PORT_ISO(RTL8366RB_PORT_NUM_CPU),
+ RTL8366RB_PORT_ISO_PORTS(dsa_user_ports(ds)) |
+ RTL8366RB_PORT_ISO_EN);
+ if (ret)
+ return ret;
+
/* Set up the "green ethernet" feature */
ret = rtl8366rb_jam_table(rtl8366rb_green_jam,
ARRAY_SIZE(rtl8366rb_green_jam), smi, false);
@@ -888,13 +944,14 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
/* layer 2 size, see rtl8366rb_change_mtu() */
rb->max_mtu[i] = 1532;
- /* Enable learning for all ports */
- ret = regmap_write(smi->map, RTL8366RB_SSCR0, 0);
+ /* Disable learning for all ports */
+ ret = regmap_write(smi->map, RTL8366RB_PORT_LEARNDIS_CTRL,
+ RTL8366RB_PORT_ALL);
if (ret)
return ret;
/* Enable auto ageing for all ports */
- ret = regmap_write(smi->map, RTL8366RB_SSCR1, 0);
+ ret = regmap_write(smi->map, RTL8366RB_SECURITY_CTRL, 0);
if (ret)
return ret;
@@ -911,11 +968,13 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
if (ret)
return ret;
- /* Discard VLAN tagged packets if the port is not a member of
- * the VLAN with which the packets is associated.
- */
+ /* Accept all packets by default, we enable filtering on-demand */
+ ret = regmap_write(smi->map, RTL8366RB_VLAN_INGRESS_CTRL1_REG,
+ 0);
+ if (ret)
+ return ret;
ret = regmap_write(smi->map, RTL8366RB_VLAN_INGRESS_CTRL2_REG,
- RTL8366RB_PORT_ALL);
+ 0);
if (ret)
return ret;
@@ -963,7 +1022,7 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
return ret;
}
- ret = rtl8366_init_vlan(smi);
+ ret = rtl8366_reset_vlan(smi);
if (ret)
return ret;
@@ -977,8 +1036,6 @@ static int rtl8366rb_setup(struct dsa_switch *ds)
return -ENODEV;
}
- ds->configure_vlan_while_not_filtering = false;
-
return 0;
}
@@ -1127,6 +1184,190 @@ rtl8366rb_port_disable(struct dsa_switch *ds, int port)
rb8366rb_set_port_led(smi, port, false);
}
+static int
+rtl8366rb_port_bridge_join(struct dsa_switch *ds, int port,
+ struct net_device *bridge)
+{
+ struct realtek_smi *smi = ds->priv;
+ unsigned int port_bitmap = 0;
+ int ret, i;
+
+ /* Loop over all other ports than the current one */
+ for (i = 0; i < RTL8366RB_PORT_NUM_CPU; i++) {
+ /* Current port handled last */
+ if (i == port)
+ continue;
+ /* Not on this bridge */
+ if (dsa_to_port(ds, i)->bridge_dev != bridge)
+ continue;
+ /* Join this port to each other port on the bridge */
+ ret = regmap_update_bits(smi->map, RTL8366RB_PORT_ISO(i),
+ RTL8366RB_PORT_ISO_PORTS(BIT(port)),
+ RTL8366RB_PORT_ISO_PORTS(BIT(port)));
+ if (ret)
+ dev_err(smi->dev, "failed to join port %d\n", port);
+
+ port_bitmap |= BIT(i);
+ }
+
+ /* Set the bits for the ports we can access */
+ return regmap_update_bits(smi->map, RTL8366RB_PORT_ISO(port),
+ RTL8366RB_PORT_ISO_PORTS(port_bitmap),
+ RTL8366RB_PORT_ISO_PORTS(port_bitmap));
+}
+
+static void
+rtl8366rb_port_bridge_leave(struct dsa_switch *ds, int port,
+ struct net_device *bridge)
+{
+ struct realtek_smi *smi = ds->priv;
+ unsigned int port_bitmap = 0;
+ int ret, i;
+
+ /* Loop over all other ports than this one */
+ for (i = 0; i < RTL8366RB_PORT_NUM_CPU; i++) {
+ /* Current port handled last */
+ if (i == port)
+ continue;
+ /* Not on this bridge */
+ if (dsa_to_port(ds, i)->bridge_dev != bridge)
+ continue;
+ /* Remove this port from any other port on the bridge */
+ ret = regmap_update_bits(smi->map, RTL8366RB_PORT_ISO(i),
+ RTL8366RB_PORT_ISO_PORTS(BIT(port)), 0);
+ if (ret)
+ dev_err(smi->dev, "failed to leave port %d\n", port);
+
+ port_bitmap |= BIT(i);
+ }
+
+ /* Clear the bits for the ports we can not access, leave ourselves */
+ regmap_update_bits(smi->map, RTL8366RB_PORT_ISO(port),
+ RTL8366RB_PORT_ISO_PORTS(port_bitmap), 0);
+}
+
+/**
+ * rtl8366rb_drop_untagged() - make the switch drop untagged and C-tagged frames
+ * @smi: SMI state container
+ * @port: the port to drop untagged and C-tagged frames on
+ * @drop: whether to drop or pass untagged and C-tagged frames
+ */
+static int rtl8366rb_drop_untagged(struct realtek_smi *smi, int port, bool drop)
+{
+ return regmap_update_bits(smi->map, RTL8366RB_VLAN_INGRESS_CTRL1_REG,
+ RTL8366RB_VLAN_INGRESS_CTRL1_DROP(port),
+ drop ? RTL8366RB_VLAN_INGRESS_CTRL1_DROP(port) : 0);
+}
+
+static int rtl8366rb_vlan_filtering(struct dsa_switch *ds, int port,
+ bool vlan_filtering,
+ struct netlink_ext_ack *extack)
+{
+ struct realtek_smi *smi = ds->priv;
+ struct rtl8366rb *rb;
+ int ret;
+
+ rb = smi->chip_data;
+
+ dev_dbg(smi->dev, "port %d: %s VLAN filtering\n", port,
+ vlan_filtering ? "enable" : "disable");
+
+ /* If the port is not in the member set, the frame will be dropped */
+ ret = regmap_update_bits(smi->map, RTL8366RB_VLAN_INGRESS_CTRL2_REG,
+ BIT(port), vlan_filtering ? BIT(port) : 0);
+ if (ret)
+ return ret;
+
+ /* If VLAN filtering is enabled and PVID is also enabled, we must
+ * not drop any untagged or C-tagged frames. If we turn off VLAN
+ * filtering on a port, we need to accept any frames.
+ */
+ if (vlan_filtering)
+ ret = rtl8366rb_drop_untagged(smi, port, !rb->pvid_enabled[port]);
+ else
+ ret = rtl8366rb_drop_untagged(smi, port, false);
+
+ return ret;
+}
+
+static int
+rtl8366rb_port_pre_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ /* We support enabling/disabling learning */
+ if (flags.mask & ~(BR_LEARNING))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+rtl8366rb_port_bridge_flags(struct dsa_switch *ds, int port,
+ struct switchdev_brport_flags flags,
+ struct netlink_ext_ack *extack)
+{
+ struct realtek_smi *smi = ds->priv;
+ int ret;
+
+ if (flags.mask & BR_LEARNING) {
+ ret = regmap_update_bits(smi->map, RTL8366RB_PORT_LEARNDIS_CTRL,
+ BIT(port),
+ (flags.val & BR_LEARNING) ? 0 : BIT(port));
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void
+rtl8366rb_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
+{
+ struct realtek_smi *smi = ds->priv;
+ u32 val;
+ int i;
+
+ switch (state) {
+ case BR_STATE_DISABLED:
+ val = RTL8366RB_STP_STATE_DISABLED;
+ break;
+ case BR_STATE_BLOCKING:
+ case BR_STATE_LISTENING:
+ val = RTL8366RB_STP_STATE_BLOCKING;
+ break;
+ case BR_STATE_LEARNING:
+ val = RTL8366RB_STP_STATE_LEARNING;
+ break;
+ case BR_STATE_FORWARDING:
+ val = RTL8366RB_STP_STATE_FORWARDING;
+ break;
+ default:
+ dev_err(smi->dev, "unknown bridge state requested\n");
+ return;
+ }
+
+ /* Set the same status for the port on all the FIDs */
+ for (i = 0; i < RTL8366RB_NUM_FIDS; i++) {
+ regmap_update_bits(smi->map, RTL8366RB_STP_STATE_BASE + i,
+ RTL8366RB_STP_STATE_MASK(port),
+ RTL8366RB_STP_STATE(port, val));
+ }
+}
+
+static void
+rtl8366rb_port_fast_age(struct dsa_switch *ds, int port)
+{
+ struct realtek_smi *smi = ds->priv;
+
+ /* This will age out any learned L2 entries */
+ regmap_update_bits(smi->map, RTL8366RB_SECURITY_CTRL,
+ BIT(port), BIT(port));
+ /* Restore the normal state of things */
+ regmap_update_bits(smi->map, RTL8366RB_SECURITY_CTRL,
+ BIT(port), 0);
+}
+
static int rtl8366rb_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
{
struct realtek_smi *smi = ds->priv;
@@ -1338,24 +1579,44 @@ static int rtl8366rb_get_mc_index(struct realtek_smi *smi, int port, int *val)
static int rtl8366rb_set_mc_index(struct realtek_smi *smi, int port, int index)
{
+ struct rtl8366rb *rb;
+ bool pvid_enabled;
+ int ret;
+
+ rb = smi->chip_data;
+ pvid_enabled = !!index;
+
if (port >= smi->num_ports || index >= RTL8366RB_NUM_VLANS)
return -EINVAL;
- return regmap_update_bits(smi->map, RTL8366RB_PORT_VLAN_CTRL_REG(port),
+ ret = regmap_update_bits(smi->map, RTL8366RB_PORT_VLAN_CTRL_REG(port),
RTL8366RB_PORT_VLAN_CTRL_MASK <<
RTL8366RB_PORT_VLAN_CTRL_SHIFT(port),
(index & RTL8366RB_PORT_VLAN_CTRL_MASK) <<
RTL8366RB_PORT_VLAN_CTRL_SHIFT(port));
+ if (ret)
+ return ret;
+
+ rb->pvid_enabled[port] = pvid_enabled;
+
+ /* If VLAN filtering is enabled and PVID is also enabled, we must
+ * not drop any untagged or C-tagged frames. Make sure to update the
+ * filtering setting.
+ */
+ if (dsa_port_is_vlan_filtering(dsa_to_port(smi->ds, port)))
+ ret = rtl8366rb_drop_untagged(smi, port, !pvid_enabled);
+
+ return ret;
}
static bool rtl8366rb_is_vlan_valid(struct realtek_smi *smi, unsigned int vlan)
{
- unsigned int max = RTL8366RB_NUM_VLANS;
+ unsigned int max = RTL8366RB_NUM_VLANS - 1;
if (smi->vlan4k_enabled)
max = RTL8366RB_NUM_VIDS - 1;
- if (vlan == 0 || vlan > max)
+ if (vlan > max)
return false;
return true;
@@ -1510,11 +1771,17 @@ static const struct dsa_switch_ops rtl8366rb_switch_ops = {
.get_strings = rtl8366_get_strings,
.get_ethtool_stats = rtl8366_get_ethtool_stats,
.get_sset_count = rtl8366_get_sset_count,
- .port_vlan_filtering = rtl8366_vlan_filtering,
+ .port_bridge_join = rtl8366rb_port_bridge_join,
+ .port_bridge_leave = rtl8366rb_port_bridge_leave,
+ .port_vlan_filtering = rtl8366rb_vlan_filtering,
.port_vlan_add = rtl8366_vlan_add,
.port_vlan_del = rtl8366_vlan_del,
.port_enable = rtl8366rb_port_enable,
.port_disable = rtl8366rb_port_disable,
+ .port_pre_bridge_flags = rtl8366rb_port_pre_bridge_flags,
+ .port_bridge_flags = rtl8366rb_port_bridge_flags,
+ .port_stp_state_set = rtl8366rb_port_stp_state_set,
+ .port_fast_age = rtl8366rb_port_fast_age,
.port_change_mtu = rtl8366rb_change_mtu,
.port_max_mtu = rtl8366rb_max_mtu,
};
diff --git a/drivers/net/dsa/sja1105/sja1105.h b/drivers/net/dsa/sja1105/sja1105.h
index 5e5d24e7c02b..21dba16af097 100644
--- a/drivers/net/dsa/sja1105/sja1105.h
+++ b/drivers/net/dsa/sja1105/sja1105.h
@@ -20,6 +20,27 @@
#define SJA1105_AGEING_TIME_MS(ms) ((ms) / 10)
#define SJA1105_NUM_L2_POLICERS SJA1110_MAX_L2_POLICING_COUNT
+/* Calculated assuming 1Gbps, where the clock has 125 MHz (8 ns period)
+ * To avoid floating point operations, we'll multiply the degrees by 10
+ * to get a "phase" and get 1 decimal point precision.
+ */
+#define SJA1105_RGMII_DELAY_PS_TO_PHASE(ps) \
+ (((ps) * 360) / 800)
+#define SJA1105_RGMII_DELAY_PHASE_TO_PS(phase) \
+ ((800 * (phase)) / 360)
+#define SJA1105_RGMII_DELAY_PHASE_TO_HW(phase) \
+ (((phase) - 738) / 9)
+#define SJA1105_RGMII_DELAY_PS_TO_HW(ps) \
+ SJA1105_RGMII_DELAY_PHASE_TO_HW(SJA1105_RGMII_DELAY_PS_TO_PHASE(ps))
+
+/* Valid range in degrees is a value between 73.8 and 101.7
+ * in 0.9 degree increments
+ */
+#define SJA1105_RGMII_DELAY_MIN_PS \
+ SJA1105_RGMII_DELAY_PHASE_TO_PS(738)
+#define SJA1105_RGMII_DELAY_MAX_PS \
+ SJA1105_RGMII_DELAY_PHASE_TO_PS(1017)
+
typedef enum {
SPI_READ = 0,
SPI_WRITE = 1,
@@ -222,16 +243,14 @@ struct sja1105_flow_block {
struct sja1105_private {
struct sja1105_static_config static_config;
- bool rgmii_rx_delay[SJA1105_MAX_NUM_PORTS];
- bool rgmii_tx_delay[SJA1105_MAX_NUM_PORTS];
+ int rgmii_rx_delay_ps[SJA1105_MAX_NUM_PORTS];
+ int rgmii_tx_delay_ps[SJA1105_MAX_NUM_PORTS];
phy_interface_t phy_mode[SJA1105_MAX_NUM_PORTS];
bool fixed_link[SJA1105_MAX_NUM_PORTS];
- bool vlan_aware;
unsigned long ucast_egress_floods;
unsigned long bcast_egress_floods;
const struct sja1105_info *info;
size_t max_xfer_len;
- struct gpio_desc *reset_gpio;
struct spi_device *spidev;
struct dsa_switch *ds;
u16 bridge_pvid[SJA1105_MAX_NUM_PORTS];
@@ -242,6 +261,8 @@ struct sja1105_private {
* the switch doesn't confuse them with one another.
*/
struct mutex mgmt_lock;
+ /* Serializes access to the dynamic config interface */
+ struct mutex dynamic_config_lock;
struct devlink_region **regions;
struct sja1105_cbs_entry *cbs;
struct mii_bus *mdio_base_t1;
diff --git a/drivers/net/dsa/sja1105/sja1105_clocking.c b/drivers/net/dsa/sja1105/sja1105_clocking.c
index 5bbf1707f2af..e3699f76f6d7 100644
--- a/drivers/net/dsa/sja1105/sja1105_clocking.c
+++ b/drivers/net/dsa/sja1105/sja1105_clocking.c
@@ -498,17 +498,6 @@ sja1110_cfg_pad_mii_id_packing(void *buf, struct sja1105_cfg_pad_mii_id *cmd,
sja1105_packing(buf, &cmd->txc_pd, 0, 0, size, op);
}
-/* Valid range in degrees is an integer between 73.8 and 101.7 */
-static u64 sja1105_rgmii_delay(u64 phase)
-{
- /* UM11040.pdf: The delay in degree phase is 73.8 + delay_tune * 0.9.
- * To avoid floating point operations we'll multiply by 10
- * and get 1 decimal point precision.
- */
- phase *= 10;
- return (phase - 738) / 9;
-}
-
/* The RGMII delay setup procedure is 2-step and gets called upon each
* .phylink_mac_config. Both are strategic.
* The reason is that the RX Tunable Delay Line of the SJA1105 MAC has issues
@@ -521,13 +510,15 @@ int sja1105pqrs_setup_rgmii_delay(const void *ctx, int port)
const struct sja1105_private *priv = ctx;
const struct sja1105_regs *regs = priv->info->regs;
struct sja1105_cfg_pad_mii_id pad_mii_id = {0};
+ int rx_delay = priv->rgmii_rx_delay_ps[port];
+ int tx_delay = priv->rgmii_tx_delay_ps[port];
u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
int rc;
- if (priv->rgmii_rx_delay[port])
- pad_mii_id.rxc_delay = sja1105_rgmii_delay(90);
- if (priv->rgmii_tx_delay[port])
- pad_mii_id.txc_delay = sja1105_rgmii_delay(90);
+ if (rx_delay)
+ pad_mii_id.rxc_delay = SJA1105_RGMII_DELAY_PS_TO_HW(rx_delay);
+ if (tx_delay)
+ pad_mii_id.txc_delay = SJA1105_RGMII_DELAY_PS_TO_HW(tx_delay);
/* Stage 1: Turn the RGMII delay lines off. */
pad_mii_id.rxc_bypass = 1;
@@ -542,11 +533,11 @@ int sja1105pqrs_setup_rgmii_delay(const void *ctx, int port)
return rc;
/* Stage 2: Turn the RGMII delay lines on. */
- if (priv->rgmii_rx_delay[port]) {
+ if (rx_delay) {
pad_mii_id.rxc_bypass = 0;
pad_mii_id.rxc_pd = 0;
}
- if (priv->rgmii_tx_delay[port]) {
+ if (tx_delay) {
pad_mii_id.txc_bypass = 0;
pad_mii_id.txc_pd = 0;
}
@@ -561,20 +552,22 @@ int sja1110_setup_rgmii_delay(const void *ctx, int port)
const struct sja1105_private *priv = ctx;
const struct sja1105_regs *regs = priv->info->regs;
struct sja1105_cfg_pad_mii_id pad_mii_id = {0};
+ int rx_delay = priv->rgmii_rx_delay_ps[port];
+ int tx_delay = priv->rgmii_tx_delay_ps[port];
u8 packed_buf[SJA1105_SIZE_CGU_CMD] = {0};
pad_mii_id.rxc_pd = 1;
pad_mii_id.txc_pd = 1;
- if (priv->rgmii_rx_delay[port]) {
- pad_mii_id.rxc_delay = sja1105_rgmii_delay(90);
+ if (rx_delay) {
+ pad_mii_id.rxc_delay = SJA1105_RGMII_DELAY_PS_TO_HW(rx_delay);
/* The "BYPASS" bit in SJA1110 is actually a "don't bypass" */
pad_mii_id.rxc_bypass = 1;
pad_mii_id.rxc_pd = 0;
}
- if (priv->rgmii_tx_delay[port]) {
- pad_mii_id.txc_delay = sja1105_rgmii_delay(90);
+ if (tx_delay) {
+ pad_mii_id.txc_delay = SJA1105_RGMII_DELAY_PS_TO_HW(tx_delay);
pad_mii_id.txc_bypass = 1;
pad_mii_id.txc_pd = 0;
}
diff --git a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
index f2049f52833c..7729d3f8b7f5 100644
--- a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
+++ b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
@@ -1170,6 +1170,56 @@ const struct sja1105_dynamic_table_ops sja1110_dyn_ops[BLK_IDX_MAX_DYN] = {
},
};
+#define SJA1105_DYNAMIC_CONFIG_SLEEP_US 10
+#define SJA1105_DYNAMIC_CONFIG_TIMEOUT_US 100000
+
+static int
+sja1105_dynamic_config_poll_valid(struct sja1105_private *priv,
+ struct sja1105_dyn_cmd *cmd,
+ const struct sja1105_dynamic_table_ops *ops)
+{
+ u8 packed_buf[SJA1105_MAX_DYN_CMD_SIZE] = {};
+ int rc;
+
+ /* We don't _need_ to read the full entry, just the command area which
+ * is a fixed SJA1105_SIZE_DYN_CMD. But our cmd_packing() API expects a
+ * buffer that contains the full entry too. Additionally, our API
+ * doesn't really know how many bytes into the buffer does the command
+ * area really begin. So just read back the whole entry.
+ */
+ rc = sja1105_xfer_buf(priv, SPI_READ, ops->addr, packed_buf,
+ ops->packed_size);
+ if (rc)
+ return rc;
+
+ /* Unpack the command structure, and return it to the caller in case it
+ * needs to perform further checks on it (VALIDENT).
+ */
+ memset(cmd, 0, sizeof(*cmd));
+ ops->cmd_packing(packed_buf, cmd, UNPACK);
+
+ /* Hardware hasn't cleared VALID => still working on it */
+ return cmd->valid ? -EAGAIN : 0;
+}
+
+/* Poll the dynamic config entry's control area until the hardware has
+ * cleared the VALID bit, which means we have confirmation that it has
+ * finished processing the command.
+ */
+static int
+sja1105_dynamic_config_wait_complete(struct sja1105_private *priv,
+ struct sja1105_dyn_cmd *cmd,
+ const struct sja1105_dynamic_table_ops *ops)
+{
+ int rc;
+
+ return read_poll_timeout(sja1105_dynamic_config_poll_valid,
+ rc, rc != -EAGAIN,
+ SJA1105_DYNAMIC_CONFIG_SLEEP_US,
+ SJA1105_DYNAMIC_CONFIG_TIMEOUT_US,
+ false, priv, cmd, ops);
+}
+
/* Provides read access to the settings through the dynamic interface
* of the switch.
* @blk_idx is used as key to select from the sja1105_dynamic_table_ops.
@@ -1196,7 +1246,6 @@ int sja1105_dynamic_config_read(struct sja1105_private *priv,
struct sja1105_dyn_cmd cmd = {0};
/* SPI payload buffer */
u8 packed_buf[SJA1105_MAX_DYN_CMD_SIZE] = {0};
- int retries = 3;
int rc;
if (blk_idx >= BLK_IDX_MAX_DYN)
@@ -1234,33 +1283,21 @@ int sja1105_dynamic_config_read(struct sja1105_private *priv,
ops->entry_packing(packed_buf, entry, PACK);
/* Send SPI write operation: read config table entry */
+ mutex_lock(&priv->dynamic_config_lock);
rc = sja1105_xfer_buf(priv, SPI_WRITE, ops->addr, packed_buf,
ops->packed_size);
- if (rc < 0)
+ if (rc < 0) {
+ mutex_unlock(&priv->dynamic_config_lock);
return rc;
+ }
- /* Loop until we have confirmation that hardware has finished
- * processing the command and has cleared the VALID field
- */
- do {
- memset(packed_buf, 0, ops->packed_size);
-
- /* Retrieve the read operation's result */
- rc = sja1105_xfer_buf(priv, SPI_READ, ops->addr, packed_buf,
- ops->packed_size);
- if (rc < 0)
- return rc;
-
- cmd = (struct sja1105_dyn_cmd) {0};
- ops->cmd_packing(packed_buf, &cmd, UNPACK);
-
- if (!cmd.valident && !(ops->access & OP_VALID_ANYWAY))
- return -ENOENT;
- cpu_relax();
- } while (cmd.valid && --retries);
+ rc = sja1105_dynamic_config_wait_complete(priv, &cmd, ops);
+ mutex_unlock(&priv->dynamic_config_lock);
+ if (rc < 0)
+ return rc;
- if (cmd.valid)
- return -ETIMEDOUT;
+ if (!cmd.valident && !(ops->access & OP_VALID_ANYWAY))
+ return -ENOENT;
/* Don't dereference possibly NULL pointer - maybe caller
* only wanted to see whether the entry existed or not.
@@ -1316,8 +1353,16 @@ int sja1105_dynamic_config_write(struct sja1105_private *priv,
ops->entry_packing(packed_buf, entry, PACK);
/* Send SPI write operation: read config table entry */
+ mutex_lock(&priv->dynamic_config_lock);
rc = sja1105_xfer_buf(priv, SPI_WRITE, ops->addr, packed_buf,
ops->packed_size);
+ if (rc < 0) {
+ mutex_unlock(&priv->dynamic_config_lock);
+ return rc;
+ }
+
+ rc = sja1105_dynamic_config_wait_complete(priv, &cmd, ops);
+ mutex_unlock(&priv->dynamic_config_lock);
if (rc < 0)
return rc;
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index 924c3f129992..c343effe2e96 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -27,15 +27,29 @@
#define SJA1105_UNKNOWN_MULTICAST 0x010000000000ull
-static void sja1105_hw_reset(struct gpio_desc *gpio, unsigned int pulse_len,
- unsigned int startup_delay)
+/* Configure the optional reset pin and bring up switch */
+static int sja1105_hw_reset(struct device *dev, unsigned int pulse_len,
+ unsigned int startup_delay)
{
+ struct gpio_desc *gpio;
+
+ gpio = gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(gpio))
+ return PTR_ERR(gpio);
+
+ if (!gpio)
+ return 0;
+
gpiod_set_value_cansleep(gpio, 1);
/* Wait for minimum reset pulse length */
msleep(pulse_len);
gpiod_set_value_cansleep(gpio, 0);
/* Wait until chip is ready after reset */
msleep(startup_delay);
+
+ gpiod_put(gpio);
+
+ return 0;
}
static void
@@ -1095,27 +1109,78 @@ static int sja1105_static_config_load(struct sja1105_private *priv)
return sja1105_static_config_upload(priv);
}
-static int sja1105_parse_rgmii_delays(struct sja1105_private *priv)
+/* This is the "new way" for a MAC driver to configure its RGMII delay lines,
+ * based on the explicit "rx-internal-delay-ps" and "tx-internal-delay-ps"
+ * properties. It has the advantage of working with fixed links and with PHYs
+ * that apply RGMII delays too, and the MAC driver needs not perform any
+ * special checks.
+ *
+ * Previously we were acting upon the "phy-mode" property when we were
+ * operating in fixed-link, basically acting as a PHY, but with a reversed
+ * interpretation: PHY_INTERFACE_MODE_RGMII_TXID means that the MAC should
+ * behave as if it is connected to a PHY which has applied RGMII delays in the
+ * TX direction. So if anything, RX delays should have been added by the MAC,
+ * but we were adding TX delays.
+ *
+ * If the "{rx,tx}-internal-delay-ps" properties are not specified, we fall
+ * back to the legacy behavior and apply delays on fixed-link ports based on
+ * the reverse interpretation of the phy-mode. This is a deviation from the
+ * expected default behavior which is to simply apply no delays. To achieve
+ * that behavior with the new bindings, it is mandatory to specify
+ * "{rx,tx}-internal-delay-ps" with a value of 0.
+ */
+static int sja1105_parse_rgmii_delays(struct sja1105_private *priv, int port,
+ struct device_node *port_dn)
{
- struct dsa_switch *ds = priv->ds;
- int port;
+ phy_interface_t phy_mode = priv->phy_mode[port];
+ struct device *dev = &priv->spidev->dev;
+ int rx_delay = -1, tx_delay = -1;
- for (port = 0; port < ds->num_ports; port++) {
- if (!priv->fixed_link[port])
- continue;
+ if (!phy_interface_mode_is_rgmii(phy_mode))
+ return 0;
- if (priv->phy_mode[port] == PHY_INTERFACE_MODE_RGMII_RXID ||
- priv->phy_mode[port] == PHY_INTERFACE_MODE_RGMII_ID)
- priv->rgmii_rx_delay[port] = true;
+ of_property_read_u32(port_dn, "rx-internal-delay-ps", &rx_delay);
+ of_property_read_u32(port_dn, "tx-internal-delay-ps", &tx_delay);
- if (priv->phy_mode[port] == PHY_INTERFACE_MODE_RGMII_TXID ||
- priv->phy_mode[port] == PHY_INTERFACE_MODE_RGMII_ID)
- priv->rgmii_tx_delay[port] = true;
+ if (rx_delay == -1 && tx_delay == -1 && priv->fixed_link[port]) {
+ dev_warn(dev,
+ "Port %d interpreting RGMII delay settings based on \"phy-mode\" property, "
+ "please update device tree to specify \"rx-internal-delay-ps\" and "
+ "\"tx-internal-delay-ps\"",
+ port);
- if ((priv->rgmii_rx_delay[port] || priv->rgmii_tx_delay[port]) &&
- !priv->info->setup_rgmii_delay)
- return -EINVAL;
+ if (phy_mode == PHY_INTERFACE_MODE_RGMII_RXID ||
+ phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
+ rx_delay = 2000;
+
+ if (phy_mode == PHY_INTERFACE_MODE_RGMII_TXID ||
+ phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
+ tx_delay = 2000;
+ }
+
+ if (rx_delay < 0)
+ rx_delay = 0;
+ if (tx_delay < 0)
+ tx_delay = 0;
+
+ if ((rx_delay || tx_delay) && !priv->info->setup_rgmii_delay) {
+ dev_err(dev, "Chip cannot apply RGMII delays\n");
+ return -EINVAL;
}
+
+ if ((rx_delay && rx_delay < SJA1105_RGMII_DELAY_MIN_PS) ||
+ (tx_delay && tx_delay < SJA1105_RGMII_DELAY_MIN_PS) ||
+ (rx_delay > SJA1105_RGMII_DELAY_MAX_PS) ||
+ (tx_delay > SJA1105_RGMII_DELAY_MAX_PS)) {
+ dev_err(dev,
+ "port %d RGMII delay values out of range, must be between %d and %d ps\n",
+ port, SJA1105_RGMII_DELAY_MIN_PS, SJA1105_RGMII_DELAY_MAX_PS);
+ return -ERANGE;
+ }
+
+ priv->rgmii_rx_delay_ps[port] = rx_delay;
+ priv->rgmii_tx_delay_ps[port] = tx_delay;
+
return 0;
}
@@ -1166,6 +1231,12 @@ static int sja1105_parse_ports_node(struct sja1105_private *priv,
}
priv->phy_mode[index] = phy_mode;
+
+ err = sja1105_parse_rgmii_delays(priv, index, child);
+ if (err) {
+ of_node_put(child);
+ return err;
+ }
}
return 0;
@@ -1360,7 +1431,7 @@ static void sja1105_phylink_validate(struct dsa_switch *ds, int port,
*/
if (state->interface != PHY_INTERFACE_MODE_NA &&
sja1105_phy_mode_mismatch(priv, port, state->interface)) {
- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_zero(supported);
return;
}
@@ -1380,9 +1451,8 @@ static void sja1105_phylink_validate(struct dsa_switch *ds, int port,
phylink_set(mask, 2500baseX_Full);
}
- bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
- bitmap_and(state->advertising, state->advertising, mask,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_and(supported, supported, mask);
+ linkmode_and(state->advertising, state->advertising, mask);
}
static int
@@ -1766,6 +1836,7 @@ static int sja1105_fdb_del(struct dsa_switch *ds, int port,
static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
dsa_fdb_dump_cb_t *cb, void *data)
{
+ struct dsa_port *dp = dsa_to_port(ds, port);
struct sja1105_private *priv = ds->priv;
struct device *dev = ds->dev;
int i;
@@ -1802,7 +1873,7 @@ static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
u64_to_ether_addr(l2_lookup.macaddr, macaddr);
/* We need to hide the dsa_8021q VLANs from the user. */
- if (!priv->vlan_aware)
+ if (!dsa_port_is_vlan_filtering(dp))
l2_lookup.vlanid = 0;
rc = cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data);
if (rc)
@@ -2295,11 +2366,6 @@ int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled,
tpid2 = ETH_P_SJA1105;
}
- if (priv->vlan_aware == enabled)
- return 0;
-
- priv->vlan_aware = enabled;
-
table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
general_params = table->entries;
/* EtherType used to identify inner tagged (C-tag) VLAN traffic */
@@ -2332,7 +2398,7 @@ int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled,
*/
table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
l2_lookup_params = table->entries;
- l2_lookup_params->shared_learn = !priv->vlan_aware;
+ l2_lookup_params->shared_learn = !enabled;
for (port = 0; port < ds->num_ports; port++) {
if (dsa_is_unused_port(ds, port))
@@ -2965,7 +3031,6 @@ static int sja1105_setup_ports(struct sja1105_private *priv)
continue;
dp->priv = sp;
- sp->dp = dp;
sp->data = tagger_data;
slave = dp->slave;
kthread_init_work(&sp->xmit_work, sja1105_port_deferred_xmit);
@@ -3229,17 +3294,14 @@ static int sja1105_probe(struct spi_device *spi)
return -EINVAL;
}
+ rc = sja1105_hw_reset(dev, 1, 1);
+ if (rc)
+ return rc;
+
priv = devm_kzalloc(dev, sizeof(struct sja1105_private), GFP_KERNEL);
if (!priv)
return -ENOMEM;
- /* Configure the optional reset pin and bring up switch */
- priv->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
- if (IS_ERR(priv->reset_gpio))
- dev_dbg(dev, "reset-gpios not defined, ignoring\n");
- else
- sja1105_hw_reset(priv->reset_gpio, 1, 1);
-
/* Populate our driver private structure (priv) based on
* the device tree node that was probed (spi)
*/
@@ -3303,6 +3365,7 @@ static int sja1105_probe(struct spi_device *spi)
priv->ds = ds;
mutex_init(&priv->ptp_data.lock);
+ mutex_init(&priv->dynamic_config_lock);
mutex_init(&priv->mgmt_lock);
rc = sja1105_parse_dt(priv);
@@ -3311,15 +3374,6 @@ static int sja1105_probe(struct spi_device *spi)
return rc;
}
- /* Error out early if internal delays are required through DT
- * and we can't apply them.
- */
- rc = sja1105_parse_rgmii_delays(priv);
- if (rc < 0) {
- dev_err(ds->dev, "RGMII delay not supported\n");
- return rc;
- }
-
if (IS_ENABLED(CONFIG_NET_SCH_CBS)) {
priv->cbs = devm_kcalloc(dev, priv->info->num_cbs_shapers,
sizeof(struct sja1105_cbs_entry),
diff --git a/drivers/net/dsa/sja1105/sja1105_vl.c b/drivers/net/dsa/sja1105/sja1105_vl.c
index 6802f4057cc0..f5dca6a9b0f9 100644
--- a/drivers/net/dsa/sja1105/sja1105_vl.c
+++ b/drivers/net/dsa/sja1105/sja1105_vl.c
@@ -394,7 +394,8 @@ static int sja1105_init_virtual_links(struct sja1105_private *priv,
vl_lookup[k].vlanid = rule->key.vl.vid;
vl_lookup[k].vlanprior = rule->key.vl.pcp;
} else {
- u16 vid = dsa_8021q_rx_vid(priv->ds, port);
+ struct dsa_port *dp = dsa_to_port(priv->ds, port);
+ u16 vid = dsa_tag_8021q_rx_vid(dp);
vl_lookup[k].vlanid = vid;
vl_lookup[k].vlanprior = 0;
@@ -494,13 +495,15 @@ int sja1105_vl_redirect(struct sja1105_private *priv, int port,
bool append)
{
struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
+ struct dsa_port *dp = dsa_to_port(priv->ds, port);
+ bool vlan_aware = dsa_port_is_vlan_filtering(dp);
int rc;
- if (!priv->vlan_aware && key->type != SJA1105_KEY_VLAN_UNAWARE_VL) {
+ if (!vlan_aware && key->type != SJA1105_KEY_VLAN_UNAWARE_VL) {
NL_SET_ERR_MSG_MOD(extack,
"Can only redirect based on DMAC");
return -EOPNOTSUPP;
- } else if (priv->vlan_aware && key->type != SJA1105_KEY_VLAN_AWARE_VL) {
+ } else if (vlan_aware && key->type != SJA1105_KEY_VLAN_AWARE_VL) {
NL_SET_ERR_MSG_MOD(extack,
"Can only redirect based on {DMAC, VID, PCP}");
return -EOPNOTSUPP;
@@ -568,6 +571,8 @@ int sja1105_vl_gate(struct sja1105_private *priv, int port,
u32 num_entries, struct action_gate_entry *entries)
{
struct sja1105_rule *rule = sja1105_rule_find(priv, cookie);
+ struct dsa_port *dp = dsa_to_port(priv->ds, port);
+ bool vlan_aware = dsa_port_is_vlan_filtering(dp);
int ipv = -1;
int i, rc;
s32 rem;
@@ -592,11 +597,11 @@ int sja1105_vl_gate(struct sja1105_private *priv, int port,
return -ERANGE;
}
- if (!priv->vlan_aware && key->type != SJA1105_KEY_VLAN_UNAWARE_VL) {
+ if (!vlan_aware && key->type != SJA1105_KEY_VLAN_UNAWARE_VL) {
NL_SET_ERR_MSG_MOD(extack,
"Can only gate based on DMAC");
return -EOPNOTSUPP;
- } else if (priv->vlan_aware && key->type != SJA1105_KEY_VLAN_AWARE_VL) {
+ } else if (vlan_aware && key->type != SJA1105_KEY_VLAN_AWARE_VL) {
NL_SET_ERR_MSG_MOD(extack,
"Can only gate based on {DMAC, VID, PCP}");
return -EOPNOTSUPP;
diff --git a/drivers/net/dsa/xrs700x/xrs700x.c b/drivers/net/dsa/xrs700x/xrs700x.c
index 469420941054..910fcb3b252b 100644
--- a/drivers/net/dsa/xrs700x/xrs700x.c
+++ b/drivers/net/dsa/xrs700x/xrs700x.c
@@ -456,7 +456,7 @@ static void xrs700x_phylink_validate(struct dsa_switch *ds, int port,
phylink_set(mask, 1000baseT_Full);
break;
default:
- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_zero(supported);
dev_err(ds->dev, "Unsupported port: %i\n", port);
return;
}
@@ -467,10 +467,8 @@ static void xrs700x_phylink_validate(struct dsa_switch *ds, int port,
phylink_set(mask, 10baseT_Full);
phylink_set(mask, 100baseT_Full);
- bitmap_and(supported, supported, mask,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
- bitmap_and(state->advertising, state->advertising, mask,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_and(supported, supported, mask);
+ linkmode_and(state->advertising, state->advertising, mask);
}
static void xrs700x_mac_link_up(struct dsa_switch *ds, int port,
diff --git a/drivers/net/dsa/xrs700x/xrs700x_mdio.c b/drivers/net/dsa/xrs700x/xrs700x_mdio.c
index d01cf1073d49..127a677d1f39 100644
--- a/drivers/net/dsa/xrs700x/xrs700x_mdio.c
+++ b/drivers/net/dsa/xrs700x/xrs700x_mdio.c
@@ -31,7 +31,7 @@ static int xrs700x_mdio_reg_read(void *context, unsigned int reg,
uval = (u16)FIELD_GET(GENMASK(31, 16), reg);
- ret = mdiobus_write(mdiodev->bus, mdiodev->addr, XRS_MDIO_IBA1, uval);
+ ret = mdiodev_write(mdiodev, XRS_MDIO_IBA1, uval);
if (ret < 0) {
dev_err(dev, "xrs mdiobus_write returned %d\n", ret);
return ret;
@@ -39,13 +39,13 @@ static int xrs700x_mdio_reg_read(void *context, unsigned int reg,
uval = (u16)((reg & GENMASK(15, 1)) | XRS_IB_READ);
- ret = mdiobus_write(mdiodev->bus, mdiodev->addr, XRS_MDIO_IBA0, uval);
+ ret = mdiodev_write(mdiodev, XRS_MDIO_IBA0, uval);
if (ret < 0) {
dev_err(dev, "xrs mdiobus_write returned %d\n", ret);
return ret;
}
- ret = mdiobus_read(mdiodev->bus, mdiodev->addr, XRS_MDIO_IBD);
+ ret = mdiodev_read(mdiodev, XRS_MDIO_IBD);
if (ret < 0) {
dev_err(dev, "xrs mdiobus_read returned %d\n", ret);
return ret;
@@ -64,7 +64,7 @@ static int xrs700x_mdio_reg_write(void *context, unsigned int reg,
u16 uval;
int ret;
- ret = mdiobus_write(mdiodev->bus, mdiodev->addr, XRS_MDIO_IBD, (u16)val);
+ ret = mdiodev_write(mdiodev, XRS_MDIO_IBD, (u16)val);
if (ret < 0) {
dev_err(dev, "xrs mdiobus_write returned %d\n", ret);
return ret;
@@ -72,7 +72,7 @@ static int xrs700x_mdio_reg_write(void *context, unsigned int reg,
uval = (u16)FIELD_GET(GENMASK(31, 16), reg);
- ret = mdiobus_write(mdiodev->bus, mdiodev->addr, XRS_MDIO_IBA1, uval);
+ ret = mdiodev_write(mdiodev, XRS_MDIO_IBA1, uval);
if (ret < 0) {
dev_err(dev, "xrs mdiobus_write returned %d\n", ret);
return ret;
@@ -80,7 +80,7 @@ static int xrs700x_mdio_reg_write(void *context, unsigned int reg,
uval = (u16)((reg & GENMASK(15, 1)) | XRS_IB_WRITE);
- ret = mdiobus_write(mdiodev->bus, mdiodev->addr, XRS_MDIO_IBA0, uval);
+ ret = mdiodev_write(mdiodev, XRS_MDIO_IBA0, uval);
if (ret < 0) {
dev_err(dev, "xrs mdiobus_write returned %d\n", ret);
return ret;
diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c
index 87c906e744fb..846fa3af4504 100644
--- a/drivers/net/ethernet/3com/3c509.c
+++ b/drivers/net/ethernet/3com/3c509.c
@@ -270,7 +270,7 @@ static void el3_dev_fill(struct net_device *dev, __be16 *phys_addr, int ioaddr,
{
struct el3_private *lp = netdev_priv(dev);
- memcpy(dev->dev_addr, phys_addr, ETH_ALEN);
+ eth_hw_addr_set(dev, (u8 *)phys_addr);
dev->base_addr = ioaddr;
dev->irq = irq;
dev->if_port = if_port;
diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c
index 6f0ea2facea9..1d124b0f65e7 100644
--- a/drivers/net/ethernet/3com/3c515.c
+++ b/drivers/net/ethernet/3com/3c515.c
@@ -567,6 +567,7 @@ static int corkscrew_setup(struct net_device *dev, int ioaddr,
{
struct corkscrew_private *vp = netdev_priv(dev);
unsigned int eeprom[0x40], checksum = 0; /* EEPROM contents */
+ __be16 addr[ETH_ALEN / 2];
int i;
int irq;
@@ -619,7 +620,6 @@ static int corkscrew_setup(struct net_device *dev, int ioaddr,
/* Read the station address from the EEPROM. */
EL3WINDOW(0);
for (i = 0; i < 0x18; i++) {
- __be16 *phys_addr = (__be16 *) dev->dev_addr;
int timer;
outw(EEPROM_Read + i, ioaddr + Wn0EepromCmd);
/* Pause for at least 162 us. for the read to take place. */
@@ -631,8 +631,9 @@ static int corkscrew_setup(struct net_device *dev, int ioaddr,
eeprom[i] = inw(ioaddr + Wn0EepromData);
checksum ^= eeprom[i];
if (i < 3)
- phys_addr[i] = htons(eeprom[i]);
+ addr[i] = htons(eeprom[i]);
}
+ eth_hw_addr_set(dev, (u8 *)addr);
checksum = (checksum ^ (checksum >> 8)) & 0xff;
if (checksum != 0x00)
pr_cont(" ***INVALID CHECKSUM %4.4x*** ", checksum);
diff --git a/drivers/net/ethernet/3com/3c574_cs.c b/drivers/net/ethernet/3com/3c574_cs.c
index dd4d3c48b98d..dc3b7c960611 100644
--- a/drivers/net/ethernet/3com/3c574_cs.c
+++ b/drivers/net/ethernet/3com/3c574_cs.c
@@ -305,15 +305,13 @@ static int tc574_config(struct pcmcia_device *link)
struct net_device *dev = link->priv;
struct el3_private *lp = netdev_priv(dev);
int ret, i, j;
+ __be16 addr[ETH_ALEN / 2];
unsigned int ioaddr;
- __be16 *phys_addr;
char *cardname;
__u32 config;
u8 *buf;
size_t len;
- phys_addr = (__be16 *)dev->dev_addr;
-
dev_dbg(&link->dev, "3c574_config()\n");
link->io_lines = 16;
@@ -347,19 +345,20 @@ static int tc574_config(struct pcmcia_device *link)
len = pcmcia_get_tuple(link, 0x88, &buf);
if (buf && len >= 6) {
for (i = 0; i < 3; i++)
- phys_addr[i] = htons(le16_to_cpu(buf[i * 2]));
+ addr[i] = htons(le16_to_cpu(buf[i * 2]));
kfree(buf);
} else {
kfree(buf); /* 0 < len < 6 */
EL3WINDOW(0);
for (i = 0; i < 3; i++)
- phys_addr[i] = htons(read_eeprom(ioaddr, i + 10));
- if (phys_addr[0] == htons(0x6060)) {
+ addr[i] = htons(read_eeprom(ioaddr, i + 10));
+ if (addr[0] == htons(0x6060)) {
pr_notice("IO port conflict at 0x%03lx-0x%03lx\n",
dev->base_addr, dev->base_addr+15);
goto failed;
}
}
+ eth_hw_addr_set(dev, (u8 *)addr);
if (link->prod_id[1])
cardname = link->prod_id[1];
else
diff --git a/drivers/net/ethernet/3com/3c589_cs.c b/drivers/net/ethernet/3com/3c589_cs.c
index 09816e84314d..4673bc1604e7 100644
--- a/drivers/net/ethernet/3com/3c589_cs.c
+++ b/drivers/net/ethernet/3com/3c589_cs.c
@@ -237,8 +237,8 @@ static void tc589_detach(struct pcmcia_device *link)
static int tc589_config(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
- __be16 *phys_addr;
int ret, i, j, multi = 0, fifo;
+ __be16 addr[ETH_ALEN / 2];
unsigned int ioaddr;
static const char * const ram_split[] = {"5:3", "3:1", "1:1", "3:5"};
u8 *buf;
@@ -246,7 +246,6 @@ static int tc589_config(struct pcmcia_device *link)
dev_dbg(&link->dev, "3c589_config\n");
- phys_addr = (__be16 *)dev->dev_addr;
/* Is this a 3c562? */
if (link->manf_id != MANFID_3COM)
dev_info(&link->dev, "hmmm, is this really a 3Com card??\n");
@@ -285,18 +284,19 @@ static int tc589_config(struct pcmcia_device *link)
len = pcmcia_get_tuple(link, 0x88, &buf);
if (buf && len >= 6) {
for (i = 0; i < 3; i++)
- phys_addr[i] = htons(le16_to_cpu(buf[i*2]));
+ addr[i] = htons(le16_to_cpu(buf[i*2]));
kfree(buf);
} else {
kfree(buf); /* 0 < len < 6 */
for (i = 0; i < 3; i++)
- phys_addr[i] = htons(read_eeprom(ioaddr, i));
- if (phys_addr[0] == htons(0x6060)) {
+ addr[i] = htons(read_eeprom(ioaddr, i));
+ if (addr[0] == htons(0x6060)) {
dev_err(&link->dev, "IO port conflict at 0x%03lx-0x%03lx\n",
dev->base_addr, dev->base_addr+15);
goto failed;
}
}
+ eth_hw_addr_set(dev, (u8 *)addr);
/* The address and resource configuration register aren't loaded from
* the EEPROM and *must* be set to 0 and IRQ3 for the PCMCIA version.
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 7b0ae9efc004..ccf07667aa5e 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -1091,6 +1091,7 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
struct vortex_private *vp;
int option;
unsigned int eeprom[0x40], checksum = 0; /* EEPROM contents */
+ __be16 addr[ETH_ALEN / 2];
int i, step;
struct net_device *dev;
static int printed_version;
@@ -1284,7 +1285,8 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
if ((checksum != 0x00) && !(vci->drv_flags & IS_TORNADO))
pr_cont(" ***INVALID CHECKSUM %4.4x*** ", checksum);
for (i = 0; i < 3; i++)
- ((__be16 *)dev->dev_addr)[i] = htons(eeprom[i + 10]);
+ addr[i] = htons(eeprom[i + 10]);
+ eth_hw_addr_set(dev, (u8 *)addr);
if (print_info)
pr_cont(" %pM", dev->dev_addr);
/* Unfortunately an all zero eeprom passes the checksum and this
diff --git a/drivers/net/ethernet/8390/apne.c b/drivers/net/ethernet/8390/apne.c
index da1ae37a9d73..991ad953aa79 100644
--- a/drivers/net/ethernet/8390/apne.c
+++ b/drivers/net/ethernet/8390/apne.c
@@ -320,8 +320,7 @@ static int __init apne_probe1(struct net_device *dev, int ioaddr)
i = request_irq(dev->irq, apne_interrupt, IRQF_SHARED, DRV_NAME, dev);
if (i) return i;
- for (i = 0; i < ETH_ALEN; i++)
- dev->dev_addr[i] = SA_prom[i];
+ eth_hw_addr_set(dev, SA_prom);
pr_cont(" %pM\n", dev->dev_addr);
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index 6c6bdd5913ec..1f8acbba5b6b 100644
--- a/drivers/net/ethernet/8390/ax88796.c
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -716,7 +716,7 @@ static int ax_init_dev(struct net_device *dev)
for (i = 0; i < 16; i++)
SA_prom[i] = SA_prom[i+i];
- memcpy(dev->dev_addr, SA_prom, ETH_ALEN);
+ eth_hw_addr_set(dev, SA_prom);
}
#ifdef CONFIG_AX88796_93CX6
@@ -733,7 +733,7 @@ static int ax_init_dev(struct net_device *dev)
(__le16 __force *)mac_addr,
sizeof(mac_addr) >> 1);
- memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
+ eth_hw_addr_set(dev, mac_addr);
}
#endif
if (ax->plat->wordlength == 2) {
@@ -748,16 +748,18 @@ static int ax_init_dev(struct net_device *dev)
/* load the mac-address from the device */
if (ax->plat->flags & AXFLG_MAC_FROMDEV) {
+ u8 addr[ETH_ALEN];
+
ei_outb(E8390_NODMA + E8390_PAGE1 + E8390_STOP,
ei_local->mem + E8390_CMD); /* 0x61 */
for (i = 0; i < ETH_ALEN; i++)
- dev->dev_addr[i] =
- ei_inb(ioaddr + EN1_PHYS_SHIFT(i));
+ addr[i] = ei_inb(ioaddr + EN1_PHYS_SHIFT(i));
+ eth_hw_addr_set(dev, addr);
}
if ((ax->plat->flags & AXFLG_MAC_FROMPLATFORM) &&
ax->plat->mac_addr)
- memcpy(dev->dev_addr, ax->plat->mac_addr, ETH_ALEN);
+ eth_hw_addr_set(dev, ax->plat->mac_addr);
if (!is_valid_ether_addr(dev->dev_addr)) {
eth_hw_addr_random(dev);
diff --git a/drivers/net/ethernet/8390/axnet_cs.c b/drivers/net/ethernet/8390/axnet_cs.c
index 3c370e686ec3..3aef959fc25b 100644
--- a/drivers/net/ethernet/8390/axnet_cs.c
+++ b/drivers/net/ethernet/8390/axnet_cs.c
@@ -187,6 +187,7 @@ static int get_prom(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
unsigned int ioaddr = dev->base_addr;
+ u8 addr[ETH_ALEN];
int i, j;
/* This is based on drivers/net/ethernet/8390/ne.c */
@@ -220,9 +221,11 @@ static int get_prom(struct pcmcia_device *link)
for (i = 0; i < 6; i += 2) {
j = inw(ioaddr + AXNET_DATAPORT);
- dev->dev_addr[i] = j & 0xff;
- dev->dev_addr[i+1] = j >> 8;
+ addr[i] = j & 0xff;
+ addr[i+1] = j >> 8;
}
+ eth_hw_addr_set(dev, addr);
+
return 1;
} /* get_prom */
diff --git a/drivers/net/ethernet/8390/mcf8390.c b/drivers/net/ethernet/8390/mcf8390.c
index 4ad8031ab669..e320cccba61a 100644
--- a/drivers/net/ethernet/8390/mcf8390.c
+++ b/drivers/net/ethernet/8390/mcf8390.c
@@ -374,8 +374,7 @@ static int mcf8390_init(struct net_device *dev)
if (ret)
return ret;
- for (i = 0; i < ETH_ALEN; i++)
- dev->dev_addr[i] = SA_prom[i];
+ eth_hw_addr_set(dev, SA_prom);
netdev_dbg(dev, "Found ethernet address: %pM\n", dev->dev_addr);
diff --git a/drivers/net/ethernet/8390/ne.c b/drivers/net/ethernet/8390/ne.c
index 9afc712f5948..0a9118b8be0c 100644
--- a/drivers/net/ethernet/8390/ne.c
+++ b/drivers/net/ethernet/8390/ne.c
@@ -500,9 +500,7 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr)
dev->base_addr = ioaddr;
- for (i = 0; i < ETH_ALEN; i++) {
- dev->dev_addr[i] = SA_prom[i];
- }
+ eth_hw_addr_set(dev, SA_prom);
pr_cont("%pM\n", dev->dev_addr);
diff --git a/drivers/net/ethernet/8390/ne2k-pci.c b/drivers/net/ethernet/8390/ne2k-pci.c
index d6715008e04d..6a0a2039600a 100644
--- a/drivers/net/ethernet/8390/ne2k-pci.c
+++ b/drivers/net/ethernet/8390/ne2k-pci.c
@@ -390,7 +390,7 @@ static int ne2k_pci_init_one(struct pci_dev *pdev,
dev->ethtool_ops = &ne2k_pci_ethtool_ops;
NS8390_init(dev, 0);
- memcpy(dev->dev_addr, SA_prom, dev->addr_len);
+ eth_hw_addr_set(dev, SA_prom);
i = register_netdev(dev);
if (i)
diff --git a/drivers/net/ethernet/8390/pcnet_cs.c b/drivers/net/ethernet/8390/pcnet_cs.c
index 96ad72abd373..0f07fe03da98 100644
--- a/drivers/net/ethernet/8390/pcnet_cs.c
+++ b/drivers/net/ethernet/8390/pcnet_cs.c
@@ -278,6 +278,7 @@ static struct hw_info *get_hwinfo(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
u_char __iomem *base, *virt;
+ u8 addr[ETH_ALEN];
int i, j;
/* Allocate a small memory window */
@@ -302,7 +303,8 @@ static struct hw_info *get_hwinfo(struct pcmcia_device *link)
(readb(base+2) == hw_info[i].a1) &&
(readb(base+4) == hw_info[i].a2)) {
for (j = 0; j < 6; j++)
- dev->dev_addr[j] = readb(base + (j<<1));
+ addr[j] = readb(base + (j<<1));
+ eth_hw_addr_set(dev, addr);
break;
}
}
@@ -324,6 +326,7 @@ static struct hw_info *get_prom(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
unsigned int ioaddr = dev->base_addr;
+ u8 addr[ETH_ALEN];
u_char prom[32];
int i, j;
@@ -362,7 +365,8 @@ static struct hw_info *get_prom(struct pcmcia_device *link)
}
if ((i < NR_INFO) || ((prom[28] == 0x57) && (prom[30] == 0x57))) {
for (j = 0; j < 6; j++)
- dev->dev_addr[j] = prom[j<<1];
+ addr[j] = prom[j<<1];
+ eth_hw_addr_set(dev, addr);
return (i < NR_INFO) ? hw_info+i : &default_info;
}
return NULL;
@@ -377,6 +381,7 @@ static struct hw_info *get_prom(struct pcmcia_device *link)
static struct hw_info *get_dl10019(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
+ u8 addr[ETH_ALEN];
int i;
u_char sum;
@@ -385,7 +390,8 @@ static struct hw_info *get_dl10019(struct pcmcia_device *link)
if (sum != 0xff)
return NULL;
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = inb_p(dev->base_addr + 0x14 + i);
+ addr[i] = inb_p(dev->base_addr + 0x14 + i);
+ eth_hw_addr_set(dev, addr);
i = inb(dev->base_addr + 0x1f);
return ((i == 0x91)||(i == 0x99)) ? &dl10022_info : &dl10019_info;
}
@@ -400,6 +406,7 @@ static struct hw_info *get_ax88190(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
unsigned int ioaddr = dev->base_addr;
+ u8 addr[ETH_ALEN];
int i, j;
/* Not much of a test, but the alternatives are messy */
@@ -413,9 +420,10 @@ static struct hw_info *get_ax88190(struct pcmcia_device *link)
for (i = 0; i < 6; i += 2) {
j = inw(ioaddr + PCNET_DATAPORT);
- dev->dev_addr[i] = j & 0xff;
- dev->dev_addr[i+1] = j >> 8;
+ addr[i] = j & 0xff;
+ addr[i+1] = j >> 8;
}
+ eth_hw_addr_set(dev, addr);
return NULL;
}
@@ -430,6 +438,7 @@ static struct hw_info *get_ax88190(struct pcmcia_device *link)
static struct hw_info *get_hwired(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
+ u8 addr[ETH_ALEN];
int i;
for (i = 0; i < 6; i++)
@@ -438,7 +447,8 @@ static struct hw_info *get_hwired(struct pcmcia_device *link)
return NULL;
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = hw_addr[i];
+ addr[i] = hw_addr[i];
+ eth_hw_addr_set(dev, addr);
return &default_info;
} /* get_hwired */
diff --git a/drivers/net/ethernet/8390/stnic.c b/drivers/net/ethernet/8390/stnic.c
index fbbd7f22c142..bd89ca8a92df 100644
--- a/drivers/net/ethernet/8390/stnic.c
+++ b/drivers/net/ethernet/8390/stnic.c
@@ -104,8 +104,8 @@ STNIC_WRITE (int reg, byte val)
static int __init stnic_probe(void)
{
struct net_device *dev;
- int i, err;
struct ei_device *ei_local;
+ int err;
/* If we are not running on a SolutionEngine, give up now */
if (! MACH_SE)
@@ -119,8 +119,7 @@ static int __init stnic_probe(void)
#ifdef CONFIG_SH_STANDARD_BIOS
sh_bios_get_node_addr (stnic_eadr);
#endif
- for (i = 0; i < ETH_ALEN; i++)
- dev->dev_addr[i] = stnic_eadr[i];
+ eth_hw_addr_set(dev, stnic_eadr);
/* Set the base address to point to the NIC, not the "real" base! */
dev->base_addr = 0x1000;
diff --git a/drivers/net/ethernet/8390/zorro8390.c b/drivers/net/ethernet/8390/zorro8390.c
index 35a500a21521..e8b4fe813a08 100644
--- a/drivers/net/ethernet/8390/zorro8390.c
+++ b/drivers/net/ethernet/8390/zorro8390.c
@@ -364,8 +364,7 @@ static int zorro8390_init(struct net_device *dev, unsigned long board,
if (i)
return i;
- for (i = 0; i < ETH_ALEN; i++)
- dev->dev_addr[i] = SA_prom[i];
+ eth_hw_addr_set(dev, SA_prom);
pr_debug("Found ethernet address: %pM\n", dev->dev_addr);
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 412ae3e43ffb..4601b38f532a 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -33,6 +33,7 @@ source "drivers/net/ethernet/apm/Kconfig"
source "drivers/net/ethernet/apple/Kconfig"
source "drivers/net/ethernet/aquantia/Kconfig"
source "drivers/net/ethernet/arc/Kconfig"
+source "drivers/net/ethernet/asix/Kconfig"
source "drivers/net/ethernet/atheros/Kconfig"
source "drivers/net/ethernet/broadcom/Kconfig"
source "drivers/net/ethernet/brocade/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index aaa5078cd7d1..fdd8c6c17451 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_NET_XGENE) += apm/
obj-$(CONFIG_NET_VENDOR_APPLE) += apple/
obj-$(CONFIG_NET_VENDOR_AQUANTIA) += aquantia/
obj-$(CONFIG_NET_VENDOR_ARC) += arc/
+obj-$(CONFIG_NET_VENDOR_ASIX) += asix/
obj-$(CONFIG_NET_VENDOR_ATHEROS) += atheros/
obj-$(CONFIG_NET_VENDOR_CADENCE) += cadence/
obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/
diff --git a/drivers/net/ethernet/actions/owl-emac.c b/drivers/net/ethernet/actions/owl-emac.c
index c4ecf4fcadf8..1cfdd01b4c2e 100644
--- a/drivers/net/ethernet/actions/owl-emac.c
+++ b/drivers/net/ethernet/actions/owl-emac.c
@@ -342,7 +342,7 @@ static u32 owl_emac_dma_cmd_stop(struct owl_emac_priv *priv)
static void owl_emac_set_hw_mac_addr(struct net_device *netdev)
{
struct owl_emac_priv *priv = netdev_priv(netdev);
- u8 *mac_addr = netdev->dev_addr;
+ const u8 *mac_addr = netdev->dev_addr;
u32 addr_high, addr_low;
addr_high = mac_addr[0] << 8 | mac_addr[1];
@@ -1173,7 +1173,7 @@ static int owl_emac_ndo_set_mac_addr(struct net_device *netdev, void *addr)
if (netif_running(netdev))
return -EBUSY;
- memcpy(netdev->dev_addr, skaddr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, skaddr->sa_data);
owl_emac_set_hw_mac_addr(netdev);
return owl_emac_setup_frame_xmit(netdev_priv(netdev));
@@ -1385,7 +1385,7 @@ static void owl_emac_get_mac_addr(struct net_device *netdev)
struct device *dev = netdev->dev.parent;
int ret;
- ret = eth_platform_get_mac_address(dev, netdev->dev_addr);
+ ret = platform_get_ethdev_address(dev, netdev);
if (!ret && is_valid_ether_addr(netdev->dev_addr))
return;
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index e0f6cc910bd2..c6982f7caf9b 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -641,6 +641,7 @@ static int starfire_init_one(struct pci_dev *pdev,
struct netdev_private *np;
int i, irq, chip_idx = ent->driver_data;
struct net_device *dev;
+ u8 addr[ETH_ALEN];
long ioaddr;
void __iomem *base;
int drv_flags, io_size;
@@ -696,7 +697,8 @@ static int starfire_init_one(struct pci_dev *pdev,
/* Serial EEPROM reads are hidden by the hardware. */
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = readb(base + EEPROMCtrl + 20 - i);
+ addr[i] = readb(base + EEPROMCtrl + 20 - i);
+ eth_hw_addr_set(dev, addr);
#if ! defined(final_version) /* Dump the EEPROM contents during development. */
if (debug > 4)
@@ -955,7 +957,7 @@ static int netdev_open(struct net_device *dev)
writew(0, ioaddr + PerfFilterTable + 4);
writew(0, ioaddr + PerfFilterTable + 8);
for (i = 1; i < 16; i++) {
- __be16 *eaddrs = (__be16 *)dev->dev_addr;
+ const __be16 *eaddrs = (const __be16 *)dev->dev_addr;
void __iomem *setup_frm = ioaddr + PerfFilterTable + i * 16;
writew(be16_to_cpu(eaddrs[2]), setup_frm); setup_frm += 4;
writew(be16_to_cpu(eaddrs[1]), setup_frm); setup_frm += 4;
@@ -1787,14 +1789,14 @@ static void set_rx_mode(struct net_device *dev)
} else if (netdev_mc_count(dev) <= 14) {
/* Use the 16 element perfect filter, skip first two entries. */
void __iomem *filter_addr = ioaddr + PerfFilterTable + 2 * 16;
- __be16 *eaddrs;
+ const __be16 *eaddrs;
netdev_for_each_mc_addr(ha, dev) {
eaddrs = (__be16 *) ha->addr;
writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 4;
writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 8;
}
- eaddrs = (__be16 *)dev->dev_addr;
+ eaddrs = (const __be16 *)dev->dev_addr;
i = netdev_mc_count(dev) + 2;
while (i++ < 16) {
writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 4;
@@ -1805,7 +1807,7 @@ static void set_rx_mode(struct net_device *dev)
} else {
/* Must use a multicast hash table. */
void __iomem *filter_addr;
- __be16 *eaddrs;
+ const __be16 *eaddrs;
__le16 mc_filter[32] __attribute__ ((aligned(sizeof(long)))); /* Multicast hash filter */
memset(mc_filter, 0, sizeof(mc_filter));
@@ -1819,7 +1821,7 @@ static void set_rx_mode(struct net_device *dev)
}
/* Clear the perfect filter list, skip first two entries. */
filter_addr = ioaddr + PerfFilterTable + 2 * 16;
- eaddrs = (__be16 *)dev->dev_addr;
+ eaddrs = (const __be16 *)dev->dev_addr;
for (i = 2; i < 16; i++) {
writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 4;
writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index c560ad06f0be..447dc64a17e5 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -1025,7 +1025,7 @@ static int greth_set_mac_add(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
GRETH_REGSAVE(regs->esa_msb, dev->dev_addr[0] << 8 | dev->dev_addr[1]);
GRETH_REGSAVE(regs->esa_lsb, dev->dev_addr[2] << 24 | dev->dev_addr[3] << 16 |
dev->dev_addr[4] << 8 | dev->dev_addr[5]);
@@ -1346,6 +1346,7 @@ static int greth_of_probe(struct platform_device *ofdev)
int i;
int err;
int tmp;
+ u8 addr[ETH_ALEN];
unsigned long timeout;
dev = alloc_etherdev(sizeof(struct greth_private));
@@ -1449,8 +1450,6 @@ static int greth_of_probe(struct platform_device *ofdev)
break;
}
if (i == 6) {
- u8 addr[ETH_ALEN];
-
err = of_get_mac_address(ofdev->dev.of_node, addr);
if (!err) {
for (i = 0; i < 6; i++)
@@ -1464,7 +1463,8 @@ static int greth_of_probe(struct platform_device *ofdev)
}
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = macaddr[i];
+ addr[i] = macaddr[i];
+ eth_hw_addr_set(dev, addr);
macaddr[5]++;
diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c
index 920633161174..f4edc616388c 100644
--- a/drivers/net/ethernet/agere/et131x.c
+++ b/drivers/net/ethernet/agere/et131x.c
@@ -3863,7 +3863,7 @@ static int et131x_change_mtu(struct net_device *netdev, int new_mtu)
et131x_init_send(adapter);
et131x_hwaddr_init(adapter);
- ether_addr_copy(netdev->dev_addr, adapter->addr);
+ eth_hw_addr_set(netdev, adapter->addr);
/* Init the device with the new settings */
et131x_adapter_setup(adapter);
@@ -3966,7 +3966,7 @@ static int et131x_pci_setup(struct pci_dev *pdev,
netif_napi_add(netdev, &adapter->napi, et131x_poll, 64);
- ether_addr_copy(netdev->dev_addr, adapter->addr);
+ eth_hw_addr_set(netdev, adapter->addr);
rc = -ENOMEM;
diff --git a/drivers/net/ethernet/alacritech/slicoss.c b/drivers/net/ethernet/alacritech/slicoss.c
index 696517eae77f..1fc9a1cd3ef8 100644
--- a/drivers/net/ethernet/alacritech/slicoss.c
+++ b/drivers/net/ethernet/alacritech/slicoss.c
@@ -1008,7 +1008,7 @@ static void slic_set_link_autoneg(struct slic_device *sdev)
static void slic_set_mac_address(struct slic_device *sdev)
{
- u8 *addr = sdev->netdev->dev_addr;
+ const u8 *addr = sdev->netdev->dev_addr;
u32 val;
val = addr[5] | addr[4] << 8 | addr[3] << 16 | addr[2] << 24;
@@ -1660,7 +1660,7 @@ static int slic_read_eeprom(struct slic_device *sdev)
goto free_eeprom;
}
/* set mac address */
- ether_addr_copy(sdev->netdev->dev_addr, mac[devfn]);
+ eth_hw_addr_set(sdev->netdev, mac[devfn]);
free_eeprom:
dma_free_coherent(&sdev->pdev->dev, SLIC_EEPROM_SIZE, eeprom, paddr);
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index 037baea1c738..800ee022388f 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -356,7 +356,7 @@ static int emac_set_mac_address(struct net_device *dev, void *p)
if (netif_running(dev))
return -EBUSY;
- memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(dev, addr->sa_data);
writel(dev->dev_addr[0] << 16 | dev->dev_addr[1] << 8 | dev->
dev_addr[2], db->membase + EMAC_MAC_A1_REG);
@@ -852,7 +852,7 @@ static int emac_probe(struct platform_device *pdev)
}
/* Read MAC-address from DT */
- ret = of_get_mac_address(np, ndev->dev_addr);
+ ret = of_get_ethdev_address(np, ndev);
if (ret) {
/* if the MAC address is invalid get a random one */
eth_hw_addr_random(ndev);
diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c
index 9dc12b13061f..732da15a3827 100644
--- a/drivers/net/ethernet/alteon/acenic.c
+++ b/drivers/net/ethernet/alteon/acenic.c
@@ -869,6 +869,7 @@ static int ace_init(struct net_device *dev)
int board_idx, ecode = 0;
short i;
unsigned char cache_size;
+ u8 addr[ETH_ALEN];
ap = netdev_priv(dev);
regs = ap->regs;
@@ -988,12 +989,13 @@ static int ace_init(struct net_device *dev)
writel(mac1, &regs->MacAddrHi);
writel(mac2, &regs->MacAddrLo);
- dev->dev_addr[0] = (mac1 >> 8) & 0xff;
- dev->dev_addr[1] = mac1 & 0xff;
- dev->dev_addr[2] = (mac2 >> 24) & 0xff;
- dev->dev_addr[3] = (mac2 >> 16) & 0xff;
- dev->dev_addr[4] = (mac2 >> 8) & 0xff;
- dev->dev_addr[5] = mac2 & 0xff;
+ addr[0] = (mac1 >> 8) & 0xff;
+ addr[1] = mac1 & 0xff;
+ addr[2] = (mac2 >> 24) & 0xff;
+ addr[3] = (mac2 >> 16) & 0xff;
+ addr[4] = (mac2 >> 8) & 0xff;
+ addr[5] = mac2 & 0xff;
+ eth_hw_addr_set(dev, addr);
printk("MAC: %pM\n", dev->dev_addr);
@@ -2712,15 +2714,15 @@ static int ace_set_mac_addr(struct net_device *dev, void *p)
struct ace_private *ap = netdev_priv(dev);
struct ace_regs __iomem *regs = ap->regs;
struct sockaddr *addr=p;
- u8 *da;
+ const u8 *da;
struct cmd cmd;
if(netif_running(dev))
return -EBUSY;
- memcpy(dev->dev_addr, addr->sa_data,dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
- da = (u8 *)dev->dev_addr;
+ da = (const u8 *)dev->dev_addr;
writel(da[0] << 8 | da[1], &regs->MacAddrHi);
writel((da[2] << 24) | (da[3] << 16) | (da[4] << 8) | da[5],
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index 1c00d719e5d7..d75d95a97dd9 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -849,7 +849,7 @@ static int init_phy(struct net_device *dev)
return 0;
}
-static void tse_update_mac_addr(struct altera_tse_private *priv, u8 *addr)
+static void tse_update_mac_addr(struct altera_tse_private *priv, const u8 *addr)
{
u32 msb;
u32 lsb;
@@ -1524,7 +1524,7 @@ static int altera_tse_probe(struct platform_device *pdev)
priv->rx_dma_buf_sz = ALTERA_RXDMABUFFER_SIZE;
/* get default MAC address from device tree */
- ret = of_get_mac_address(pdev->dev.of_node, ndev->dev_addr);
+ ret = of_get_ethdev_address(pdev->dev.of_node, ndev);
if (ret)
eth_hw_addr_random(ndev);
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 0e43000614ab..7d5d885d85d5 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -4073,7 +4073,7 @@ static void ena_set_conf_feat_params(struct ena_adapter *adapter,
ether_addr_copy(adapter->mac_addr, netdev->dev_addr);
} else {
ether_addr_copy(adapter->mac_addr, feat->dev_attr.mac_addr);
- ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
+ eth_hw_addr_set(netdev, adapter->mac_addr);
}
/* Set offload features */
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
index 4786f0504691..899c8a2a34b6 100644
--- a/drivers/net/ethernet/amd/Kconfig
+++ b/drivers/net/ethernet/amd/Kconfig
@@ -168,7 +168,7 @@ config SUNLANCE
config AMD_XGBE
tristate "AMD 10GbE Ethernet driver"
- depends on ((OF_NET && OF_ADDRESS) || ACPI || PCI) && HAS_IOMEM
+ depends on (OF_ADDRESS || ACPI || PCI) && HAS_IOMEM
depends on X86 || ARM64 || COMPILE_TEST
depends on PTP_1588_CLOCK_OPTIONAL
select BITREVERSE
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index 92e4246dc359..9421afb950f7 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -1500,7 +1500,7 @@ static int amd8111e_set_mac_address(struct net_device *dev, void *p)
int i;
struct sockaddr *addr = p;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
spin_lock_irq(&lp->lock);
/* Setting the MAC address to the device */
for (i = 0; i < ETH_ALEN; i++)
@@ -1743,6 +1743,7 @@ static int amd8111e_probe_one(struct pci_dev *pdev,
unsigned long reg_addr, reg_len;
struct amd8111e_priv *lp;
struct net_device *dev;
+ u8 addr[ETH_ALEN];
err = pci_enable_device(pdev);
if (err) {
@@ -1809,7 +1810,8 @@ static int amd8111e_probe_one(struct pci_dev *pdev,
/* Initializing MAC address */
for (i = 0; i < ETH_ALEN; i++)
- dev->dev_addr[i] = readb(lp->mmio + PADR + i);
+ addr[i] = readb(lp->mmio + PADR + i);
+ eth_hw_addr_set(dev, addr);
/* Setting user defined parametrs */
lp->ext_phy_option = speed_duplex[card_idx];
diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c
index 9d2f49fd945e..9c7d9690d00c 100644
--- a/drivers/net/ethernet/amd/atarilance.c
+++ b/drivers/net/ethernet/amd/atarilance.c
@@ -582,7 +582,7 @@ static unsigned long __init lance_probe1( struct net_device *dev,
switch( lp->cardtype ) {
case OLD_RIEBL:
/* No ethernet address! (Set some default address) */
- memcpy(dev->dev_addr, OldRieblDefHwaddr, ETH_ALEN);
+ eth_hw_addr_set(dev, OldRieblDefHwaddr);
break;
case NEW_RIEBL:
lp->memcpy_f(dev->dev_addr, RIEBL_HWADDR_ADDR, ETH_ALEN);
@@ -1123,7 +1123,7 @@ static int lance_set_mac_address( struct net_device *dev, void *addr )
return -EIO;
}
- memcpy( dev->dev_addr, saddr->sa_data, dev->addr_len );
+ eth_hw_addr_set(dev, saddr->sa_data);
for( i = 0; i < 6; i++ )
MEM->init.hwaddr[i] = dev->dev_addr[i^1]; /* <- 16 bit swap! */
lp->memcpy_f( RIEBL_HWADDR_ADDR, dev->dev_addr, 6 );
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index 9c1636222b99..c6f003975621 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -1178,7 +1178,7 @@ static int au1000_probe(struct platform_device *pdev)
aup->phy1_search_mac0 = 1;
} else {
if (is_valid_ether_addr(pd->mac)) {
- memcpy(dev->dev_addr, pd->mac, ETH_ALEN);
+ eth_hw_addr_set(dev, pd->mac);
} else {
/* Set a random MAC since no valid provided by platform_data. */
eth_hw_addr_random(dev);
diff --git a/drivers/net/ethernet/amd/nmclan_cs.c b/drivers/net/ethernet/amd/nmclan_cs.c
index 4019cab87505..30ee5329bd7c 100644
--- a/drivers/net/ethernet/amd/nmclan_cs.c
+++ b/drivers/net/ethernet/amd/nmclan_cs.c
@@ -529,7 +529,8 @@ static void mace_write(mace_private *lp, unsigned int ioaddr, int reg,
mace_init
Resets the MACE chip.
---------------------------------------------------------------------------- */
-static int mace_init(mace_private *lp, unsigned int ioaddr, char *enet_addr)
+static int mace_init(mace_private *lp, unsigned int ioaddr,
+ const char *enet_addr)
{
int i;
int ct = 0;
@@ -635,7 +636,7 @@ static int nmclan_config(struct pcmcia_device *link)
kfree(buf);
goto failed;
}
- memcpy(dev->dev_addr, buf, ETH_ALEN);
+ eth_hw_addr_set(dev, buf);
kfree(buf);
/* Verify configuration by reading the MACE ID. */
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index 70d76fdb9f56..f5c50ff377ff 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -1595,6 +1595,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
struct net_device *dev;
const struct pcnet32_access *a = NULL;
u8 promaddr[ETH_ALEN];
+ u8 addr[ETH_ALEN];
int ret = -ENODEV;
/* reset the chip */
@@ -1760,9 +1761,10 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
unsigned int val;
val = a->read_csr(ioaddr, i + 12) & 0x0ffff;
/* There may be endianness issues here. */
- dev->dev_addr[2 * i] = val & 0x0ff;
- dev->dev_addr[2 * i + 1] = (val >> 8) & 0x0ff;
+ addr[2 * i] = val & 0x0ff;
+ addr[2 * i + 1] = (val >> 8) & 0x0ff;
}
+ eth_hw_addr_set(dev, addr);
/* read PROM address and compare with CSR address */
for (i = 0; i < ETH_ALEN; i++)
@@ -1775,13 +1777,16 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
pr_cont(" warning: CSR address invalid,\n");
pr_info(" using instead PROM address of");
}
- memcpy(dev->dev_addr, promaddr, ETH_ALEN);
+ eth_hw_addr_set(dev, promaddr);
}
}
/* if the ethernet address is not valid, force to 00:00:00:00:00:00 */
- if (!is_valid_ether_addr(dev->dev_addr))
- eth_zero_addr(dev->dev_addr);
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ static const u8 zero_addr[ETH_ALEN] = {};
+
+ eth_hw_addr_set(dev, zero_addr);
+ }
if (pcnet32_debug & NETIF_MSG_PROBE) {
pr_cont(" %pM", dev->dev_addr);
diff --git a/drivers/net/ethernet/amd/sun3lance.c b/drivers/net/ethernet/amd/sun3lance.c
index 4a845bc071b2..007bd7787291 100644
--- a/drivers/net/ethernet/amd/sun3lance.c
+++ b/drivers/net/ethernet/amd/sun3lance.c
@@ -305,7 +305,6 @@ static int __init lance_probe( struct net_device *dev)
unsigned long ioaddr;
struct lance_private *lp;
- int i;
static int did_version;
volatile unsigned short *ioaddr_probe;
unsigned short tmp1, tmp2;
@@ -373,8 +372,7 @@ static int __init lance_probe( struct net_device *dev)
dev->irq);
/* copy in the ethernet address from the prom */
- for(i = 0; i < 6 ; i++)
- dev->dev_addr[i] = idprom->id_ethaddr[i];
+ eth_hw_addr_set(dev, idprom->id_ethaddr);
/* tell the card it's ether address, bytes swapped */
MEM->init.hwaddr[0] = dev->dev_addr[1];
diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c
index ddece276ae23..22d609563af8 100644
--- a/drivers/net/ethernet/amd/sunlance.c
+++ b/drivers/net/ethernet/amd/sunlance.c
@@ -1301,7 +1301,6 @@ static int sparc_lance_probe_one(struct platform_device *op,
struct device_node *dp = op->dev.of_node;
struct lance_private *lp;
struct net_device *dev;
- int i;
dev = alloc_etherdev(sizeof(struct lance_private) + 8);
if (!dev)
@@ -1315,8 +1314,7 @@ static int sparc_lance_probe_one(struct platform_device *op,
* will copy the address in the device structure to the lance
* initialization block.
*/
- for (i = 0; i < 6; i++)
- dev->dev_addr[i] = idprom->id_ethaddr[i];
+ eth_hw_addr_set(dev, idprom->id_ethaddr);
/* Get the IO region */
lp->lregs = of_ioremap(&op->resource[0], 0,
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index b2cd3bdba9f8..533b8519ec35 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -1331,6 +1331,10 @@
#define MDIO_VEND2_PMA_CDR_CONTROL 0x8056
#endif
+#ifndef MDIO_VEND2_PMA_MISC_CTRL0
+#define MDIO_VEND2_PMA_MISC_CTRL0 0x8090
+#endif
+
#ifndef MDIO_CTRL1_SPEED1G
#define MDIO_CTRL1_SPEED1G (MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100)
#endif
@@ -1389,6 +1393,10 @@
#define XGBE_PMA_RX_RST_0_RESET_ON 0x10
#define XGBE_PMA_RX_RST_0_RESET_OFF 0x00
+#define XGBE_PMA_PLL_CTRL_MASK BIT(15)
+#define XGBE_PMA_PLL_CTRL_ENABLE BIT(15)
+#define XGBE_PMA_PLL_CTRL_DISABLE 0x0000
+
/* Bit setting and getting macros
* The get macro will extract the current bit field value from within
* the variable
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index d5fd49dd25f3..3936543a74d8 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -1080,7 +1080,7 @@ static int xgbe_add_mac_addresses(struct xgbe_prv_data *pdata)
return 0;
}
-static int xgbe_set_mac_address(struct xgbe_prv_data *pdata, u8 *addr)
+static int xgbe_set_mac_address(struct xgbe_prv_data *pdata, const u8 *addr)
{
unsigned int mac_addr_hi, mac_addr_lo;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 17a585adfb49..30d24d19f40d 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -1912,10 +1912,8 @@ static int xgbe_close(struct net_device *netdev)
clk_disable_unprepare(pdata->ptpclk);
clk_disable_unprepare(pdata->sysclk);
- flush_workqueue(pdata->an_workqueue);
destroy_workqueue(pdata->an_workqueue);
- flush_workqueue(pdata->dev_workqueue);
destroy_workqueue(pdata->dev_workqueue);
set_bit(XGBE_DOWN, &pdata->dev_state);
@@ -2016,7 +2014,7 @@ static int xgbe_set_mac_address(struct net_device *netdev, void *addr)
if (!is_valid_ether_addr(saddr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(netdev->dev_addr, saddr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, saddr->sa_data);
hw_if->set_mac_address(pdata, netdev->dev_addr);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
index bafc51c34e0b..94879cf8b420 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
@@ -369,9 +369,8 @@ static int xgbe_set_link_ksettings(struct net_device *netdev,
__ETHTOOL_LINK_MODE_MASK_NBITS, cmd->link_modes.advertising,
__ETHTOOL_LINK_MODE_MASK_NBITS, lks->link_modes.supported);
- bitmap_and(advertising,
- cmd->link_modes.advertising, lks->link_modes.supported,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_and(advertising, cmd->link_modes.advertising,
+ lks->link_modes.supported);
if ((cmd->base.autoneg == AUTONEG_ENABLE) &&
bitmap_empty(advertising, __ETHTOOL_LINK_MODE_MASK_NBITS)) {
@@ -384,8 +383,7 @@ static int xgbe_set_link_ksettings(struct net_device *netdev,
pdata->phy.autoneg = cmd->base.autoneg;
pdata->phy.speed = speed;
pdata->phy.duplex = cmd->base.duplex;
- bitmap_copy(lks->link_modes.advertising, advertising,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_copy(lks->link_modes.advertising, advertising);
if (cmd->base.autoneg == AUTONEG_ENABLE)
XGBE_SET_ADV(lks, Autoneg);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
index a218dc6f2edd..0e8698928e4d 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -267,7 +267,7 @@ int xgbe_config_netdev(struct xgbe_prv_data *pdata)
netdev->irq = pdata->dev_irq;
netdev->base_addr = (unsigned long)pdata->xgmac_regs;
- memcpy(netdev->dev_addr, pdata->mac_addr, netdev->addr_len);
+ eth_hw_addr_set(netdev, pdata->mac_addr);
/* Initialize ECC timestamps */
pdata->tx_sec_period = jiffies;
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
index 18e48b3bc402..213769054391 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
@@ -1977,12 +1977,26 @@ static void xgbe_phy_rx_reset(struct xgbe_prv_data *pdata)
}
}
+static void xgbe_phy_pll_ctrl(struct xgbe_prv_data *pdata, bool enable)
+{
+ XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_VEND2_PMA_MISC_CTRL0,
+ XGBE_PMA_PLL_CTRL_MASK,
+ enable ? XGBE_PMA_PLL_CTRL_ENABLE
+ : XGBE_PMA_PLL_CTRL_DISABLE);
+
+ /* Wait for command to complete */
+ usleep_range(100, 200);
+}
+
static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata,
unsigned int cmd, unsigned int sub_cmd)
{
unsigned int s0 = 0;
unsigned int wait;
+ /* Disable PLL re-initialization during FW command processing */
+ xgbe_phy_pll_ctrl(pdata, false);
+
/* Log if a previous command did not complete */
if (XP_IOREAD_BITS(pdata, XP_DRIVER_INT_RO, STATUS)) {
netif_dbg(pdata, link, pdata->netdev,
@@ -2003,7 +2017,7 @@ static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata,
wait = XGBE_RATECHANGE_COUNT;
while (wait--) {
if (!XP_IOREAD_BITS(pdata, XP_DRIVER_INT_RO, STATUS))
- return;
+ goto reenable_pll;
usleep_range(1000, 2000);
}
@@ -2013,6 +2027,10 @@ static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata,
/* Reset on error */
xgbe_phy_rx_reset(pdata);
+
+reenable_pll:
+ /* Enable PLL re-initialization */
+ xgbe_phy_pll_ctrl(pdata, true);
}
static void xgbe_phy_rrc(struct xgbe_prv_data *pdata)
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index 3305979a9f7c..607a2c90513b 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -729,7 +729,7 @@ struct xgbe_ext_stats {
struct xgbe_hw_if {
int (*tx_complete)(struct xgbe_ring_desc *);
- int (*set_mac_address)(struct xgbe_prv_data *, u8 *addr);
+ int (*set_mac_address)(struct xgbe_prv_data *, const u8 *addr);
int (*config_rx_mode)(struct xgbe_prv_data *);
int (*enable_rx_csum)(struct xgbe_prv_data *);
diff --git a/drivers/net/ethernet/apm/xgene-v2/mac.c b/drivers/net/ethernet/apm/xgene-v2/mac.c
index 2da979e4fad1..6423e22e05b2 100644
--- a/drivers/net/ethernet/apm/xgene-v2/mac.c
+++ b/drivers/net/ethernet/apm/xgene-v2/mac.c
@@ -65,7 +65,7 @@ void xge_mac_set_speed(struct xge_pdata *pdata)
void xge_mac_set_station_addr(struct xge_pdata *pdata)
{
- u8 *dev_addr = pdata->ndev->dev_addr;
+ const u8 *dev_addr = pdata->ndev->dev_addr;
u32 addr0, addr1;
addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) |
diff --git a/drivers/net/ethernet/apm/xgene-v2/main.c b/drivers/net/ethernet/apm/xgene-v2/main.c
index 80399c8980bd..d022b6db9e06 100644
--- a/drivers/net/ethernet/apm/xgene-v2/main.c
+++ b/drivers/net/ethernet/apm/xgene-v2/main.c
@@ -36,7 +36,7 @@ static int xge_get_resources(struct xge_pdata *pdata)
return -ENOMEM;
}
- if (!device_get_mac_address(dev, ndev->dev_addr, ETH_ALEN))
+ if (device_get_ethdev_address(dev, ndev))
eth_hw_addr_random(ndev);
memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index 5f657879134e..e641dbbea1e2 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -378,8 +378,8 @@ u32 xgene_enet_rd_stat(struct xgene_enet_pdata *pdata, u32 rd_addr)
static void xgene_gmac_set_mac_addr(struct xgene_enet_pdata *pdata)
{
+ const u8 *dev_addr = pdata->ndev->dev_addr;
u32 addr0, addr1;
- u8 *dev_addr = pdata->ndev->dev_addr;
addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) |
(dev_addr[1] << 8) | dev_addr[0];
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 5f1fc6582d74..220dc42af31a 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -1731,7 +1731,7 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata)
xgene_get_port_id_acpi(dev, pdata);
#endif
- if (!device_get_mac_address(dev, ndev->dev_addr, ETH_ALEN))
+ if (device_get_ethdev_address(dev, ndev))
eth_hw_addr_random(ndev);
memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
index f482ced2cadd..72b5e8eb0ec7 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
@@ -165,8 +165,8 @@ static void xgene_sgmac_reset(struct xgene_enet_pdata *p)
static void xgene_sgmac_set_mac_addr(struct xgene_enet_pdata *p)
{
+ const u8 *dev_addr = p->ndev->dev_addr;
u32 addr0, addr1;
- u8 *dev_addr = p->ndev->dev_addr;
addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) |
(dev_addr[1] << 8) | dev_addr[0];
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
index 304b5d43f236..86607b79c09f 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
@@ -207,8 +207,8 @@ static void xgene_pcs_reset(struct xgene_enet_pdata *pdata)
static void xgene_xgmac_set_mac_addr(struct xgene_enet_pdata *pdata)
{
+ const u8 *dev_addr = pdata->ndev->dev_addr;
u32 addr0, addr1;
- u8 *dev_addr = pdata->ndev->dev_addr;
addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) |
(dev_addr[1] << 8) | dev_addr[0];
diff --git a/drivers/net/ethernet/apple/bmac.c b/drivers/net/ethernet/apple/bmac.c
index a989d2df59ad..9a650d1c1bdd 100644
--- a/drivers/net/ethernet/apple/bmac.c
+++ b/drivers/net/ethernet/apple/bmac.c
@@ -308,7 +308,7 @@ bmac_init_registers(struct net_device *dev)
{
struct bmac_data *bp = netdev_priv(dev);
volatile unsigned short regValue;
- unsigned short *pWord16;
+ const unsigned short *pWord16;
int i;
/* XXDEBUG(("bmac: enter init_registers\n")); */
@@ -371,7 +371,7 @@ bmac_init_registers(struct net_device *dev)
bmwrite(dev, BHASH1, bp->hash_table_mask[2]); /* bits 47 - 32 */
bmwrite(dev, BHASH0, bp->hash_table_mask[3]); /* bits 63 - 48 */
- pWord16 = (unsigned short *)dev->dev_addr;
+ pWord16 = (const unsigned short *)dev->dev_addr;
bmwrite(dev, MADD0, *pWord16++);
bmwrite(dev, MADD1, *pWord16++);
bmwrite(dev, MADD2, *pWord16);
@@ -521,19 +521,16 @@ static int bmac_resume(struct macio_dev *mdev)
static int bmac_set_address(struct net_device *dev, void *addr)
{
struct bmac_data *bp = netdev_priv(dev);
- unsigned char *p = addr;
- unsigned short *pWord16;
+ const unsigned short *pWord16;
unsigned long flags;
- int i;
XXDEBUG(("bmac: enter set_address\n"));
spin_lock_irqsave(&bp->lock, flags);
- for (i = 0; i < 6; ++i) {
- dev->dev_addr[i] = p[i];
- }
+ eth_hw_addr_set(dev, addr);
+
/* load up the hardware address */
- pWord16 = (unsigned short *)dev->dev_addr;
+ pWord16 = (const unsigned short *)dev->dev_addr;
bmwrite(dev, MADD0, *pWord16++);
bmwrite(dev, MADD1, *pWord16++);
bmwrite(dev, MADD2, *pWord16);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
index bed481816ea3..062a300a566a 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -217,7 +217,7 @@ struct aq_hw_ops {
int (*hw_ring_tx_head_update)(struct aq_hw_s *self,
struct aq_ring_s *aq_ring);
- int (*hw_set_mac_address)(struct aq_hw_s *self, u8 *mac_addr);
+ int (*hw_set_mac_address)(struct aq_hw_s *self, const u8 *mac_addr);
int (*hw_soft_reset)(struct aq_hw_s *self);
@@ -226,7 +226,7 @@ struct aq_hw_ops {
int (*hw_reset)(struct aq_hw_s *self);
- int (*hw_init)(struct aq_hw_s *self, u8 *mac_addr);
+ int (*hw_init)(struct aq_hw_s *self, const u8 *mac_addr);
int (*hw_start)(struct aq_hw_s *self);
@@ -373,7 +373,7 @@ struct aq_fw_ops {
int (*set_phyloopback)(struct aq_hw_s *self, u32 mode, bool enable);
int (*set_power)(struct aq_hw_s *self, unsigned int power_state,
- u8 *mac);
+ const u8 *mac);
int (*send_fw_request)(struct aq_hw_s *self,
const struct hw_fw_request_iface *fw_req,
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c b/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c
index 4a6dfac857ca..02058fe79f52 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c
@@ -35,7 +35,7 @@ static int aq_apply_macsec_cfg(struct aq_nic_s *nic);
static int aq_apply_secy_cfg(struct aq_nic_s *nic,
const struct macsec_secy *secy);
-static void aq_ether_addr_to_mac(u32 mac[2], unsigned char *emac)
+static void aq_ether_addr_to_mac(u32 mac[2], const unsigned char *emac)
{
u32 tmp[2] = { 0 };
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 6c049864dac0..1acf544afeb4 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -300,6 +300,7 @@ static bool aq_nic_is_valid_ether_addr(const u8 *addr)
int aq_nic_ndev_register(struct aq_nic_s *self)
{
+ u8 addr[ETH_ALEN];
int err = 0;
if (!self->ndev) {
@@ -316,12 +317,13 @@ int aq_nic_ndev_register(struct aq_nic_s *self)
#endif
mutex_lock(&self->fwreq_mutex);
- err = self->aq_fw_ops->get_mac_permanent(self->aq_hw,
- self->ndev->dev_addr);
+ err = self->aq_fw_ops->get_mac_permanent(self->aq_hw, addr);
mutex_unlock(&self->fwreq_mutex);
if (err)
goto err_exit;
+ eth_hw_addr_set(self->ndev, addr);
+
if (!is_valid_ether_addr(self->ndev->dev_addr) ||
!aq_nic_is_valid_ether_addr(self->ndev->dev_addr)) {
netdev_warn(self->ndev, "MAC is invalid, will use random.");
@@ -332,7 +334,7 @@ int aq_nic_ndev_register(struct aq_nic_s *self)
{
static u8 mac_addr_permanent[] = AQ_CFG_MAC_ADDR_PERMANENT;
- ether_addr_copy(self->ndev->dev_addr, mac_addr_permanent);
+ eth_hw_addr_set(self->ndev, mac_addr_permanent);
}
#endif
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
index 611875ef2cd1..4625ccb79499 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
@@ -322,7 +322,7 @@ static int hw_atl_a0_hw_init_rx_path(struct aq_hw_s *self)
return aq_hw_err_from_flags(self);
}
-static int hw_atl_a0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr)
+static int hw_atl_a0_hw_mac_addr_set(struct aq_hw_s *self, const u8 *mac_addr)
{
unsigned int h = 0U;
unsigned int l = 0U;
@@ -348,7 +348,7 @@ err_exit:
return err;
}
-static int hw_atl_a0_hw_init(struct aq_hw_s *self, u8 *mac_addr)
+static int hw_atl_a0_hw_init(struct aq_hw_s *self, const u8 *mac_addr)
{
static u32 aq_hw_atl_igcr_table_[4][2] = {
[AQ_HW_IRQ_INVALID] = { 0x20000000U, 0x20000000U },
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index 9f1b15077e7d..d875ce3ec759 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -533,7 +533,7 @@ static int hw_atl_b0_hw_init_rx_path(struct aq_hw_s *self)
return aq_hw_err_from_flags(self);
}
-int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr)
+int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, const u8 *mac_addr)
{
unsigned int h = 0U;
unsigned int l = 0U;
@@ -558,7 +558,7 @@ err_exit:
return err;
}
-static int hw_atl_b0_hw_init(struct aq_hw_s *self, u8 *mac_addr)
+static int hw_atl_b0_hw_init(struct aq_hw_s *self, const u8 *mac_addr)
{
static u32 aq_hw_atl_igcr_table_[4][2] = {
[AQ_HW_IRQ_INVALID] = { 0x20000000U, 0x20000000U },
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h
index d8db972113ec..5298846dd9f7 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h
@@ -58,7 +58,7 @@ int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self, struct aq_ring_s *ring);
void hw_atl_b0_hw_init_rx_rss_ctrl1(struct aq_hw_s *self);
-int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr);
+int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, const u8 *mac_addr);
int hw_atl_b0_set_fc(struct aq_hw_s *self, u32 fc, u32 tc);
int hw_atl_b0_set_loopback(struct aq_hw_s *self, u32 mode, bool enable);
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
index 404cbf60d3f2..fc0e66006644 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
@@ -944,7 +944,7 @@ u32 hw_atl_utils_get_fw_version(struct aq_hw_s *self)
}
static int aq_fw1x_set_wake_magic(struct aq_hw_s *self, bool wol_enabled,
- u8 *mac)
+ const u8 *mac)
{
struct hw_atl_utils_fw_rpc *prpc = NULL;
unsigned int rpc_size = 0U;
@@ -987,7 +987,7 @@ err_exit:
}
static int aq_fw1x_set_power(struct aq_hw_s *self, unsigned int power_state,
- u8 *mac)
+ const u8 *mac)
{
struct hw_atl_utils_fw_rpc *prpc = NULL;
unsigned int rpc_size = 0U;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
index ee0c22d04935..eac631c45c56 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
@@ -358,7 +358,7 @@ static int aq_fw2x_get_phy_temp(struct aq_hw_s *self, int *temp)
return 0;
}
-static int aq_fw2x_set_wol(struct aq_hw_s *self, u8 *mac)
+static int aq_fw2x_set_wol(struct aq_hw_s *self, const u8 *mac)
{
struct hw_atl_utils_fw_rpc *rpc = NULL;
struct offload_info *info = NULL;
@@ -404,7 +404,7 @@ err_exit:
}
static int aq_fw2x_set_power(struct aq_hw_s *self, unsigned int power_state,
- u8 *mac)
+ const u8 *mac)
{
int err = 0;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c
index 92f64048bf69..c98708bb044c 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c
@@ -516,7 +516,7 @@ static int hw_atl2_hw_init_rx_path(struct aq_hw_s *self)
return aq_hw_err_from_flags(self);
}
-static int hw_atl2_hw_init(struct aq_hw_s *self, u8 *mac_addr)
+static int hw_atl2_hw_init(struct aq_hw_s *self, const u8 *mac_addr)
{
static u32 aq_hw_atl2_igcr_table_[4][2] = {
[AQ_HW_IRQ_INVALID] = { 0x20000000U, 0x20000000U },
diff --git a/drivers/net/ethernet/arc/Kconfig b/drivers/net/ethernet/arc/Kconfig
index 92a79c4ffa2c..0a67612af228 100644
--- a/drivers/net/ethernet/arc/Kconfig
+++ b/drivers/net/ethernet/arc/Kconfig
@@ -26,7 +26,7 @@ config ARC_EMAC_CORE
config ARC_EMAC
tristate "ARC EMAC support"
select ARC_EMAC_CORE
- depends on OF_IRQ && OF_NET
+ depends on OF_IRQ
depends on ARC || COMPILE_TEST
help
On some legacy ARC (Synopsys) FPGA boards such as ARCAngel4/ML50x
@@ -36,7 +36,7 @@ config ARC_EMAC
config EMAC_ROCKCHIP
tristate "Rockchip EMAC support"
select ARC_EMAC_CORE
- depends on OF_IRQ && OF_NET && REGULATOR
+ depends on OF_IRQ && REGULATOR
depends on ARCH_ROCKCHIP || COMPILE_TEST
help
Support for Rockchip RK3036/RK3066/RK3188 EMAC ethernet controllers.
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index 38c288ec9059..c642c3d3e600 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -773,7 +773,7 @@ static int arc_emac_set_address(struct net_device *ndev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
+ eth_hw_addr_set(ndev, addr->sa_data);
arc_emac_set_address_internal(ndev);
@@ -941,7 +941,7 @@ int arc_emac_probe(struct net_device *ndev, int interface)
}
/* Get MAC address from device tree */
- err = of_get_mac_address(dev->of_node, ndev->dev_addr);
+ err = of_get_ethdev_address(dev->of_node, ndev);
if (err)
eth_hw_addr_random(ndev);
diff --git a/drivers/net/ethernet/arc/emac_mdio.c b/drivers/net/ethernet/arc/emac_mdio.c
index 54cdafdd067d..9acf589b1178 100644
--- a/drivers/net/ethernet/arc/emac_mdio.c
+++ b/drivers/net/ethernet/arc/emac_mdio.c
@@ -151,10 +151,9 @@ int arc_mdio_probe(struct arc_emac_priv *priv)
data->reset_gpio = devm_gpiod_get_optional(priv->dev, "phy-reset",
GPIOD_OUT_LOW);
if (IS_ERR(data->reset_gpio)) {
- error = PTR_ERR(data->reset_gpio);
- dev_err(priv->dev, "Failed to request gpio: %d\n", error);
mdiobus_free(bus);
- return error;
+ return dev_err_probe(priv->dev, PTR_ERR(data->reset_gpio),
+ "Failed to request gpio\n");
}
of_property_read_u32(np, "phy-reset-duration", &data->msec);
@@ -166,9 +165,9 @@ int arc_mdio_probe(struct arc_emac_priv *priv)
error = of_mdiobus_register(bus, priv->dev->of_node);
if (error) {
- dev_err(priv->dev, "cannot register MDIO bus %s\n", bus->name);
mdiobus_free(bus);
- return error;
+ return dev_err_probe(priv->dev, error,
+ "cannot register MDIO bus %s\n", bus->name);
}
return 0;
diff --git a/drivers/net/ethernet/asix/Kconfig b/drivers/net/ethernet/asix/Kconfig
new file mode 100644
index 000000000000..eed02453314c
--- /dev/null
+++ b/drivers/net/ethernet/asix/Kconfig
@@ -0,0 +1,35 @@
+#
+# Asix network device configuration
+#
+
+config NET_VENDOR_ASIX
+ bool "Asix devices"
+ default y
+ help
+ If you have a network (Ethernet, non-USB, not NE2000 compatible)
+ interface based on a chip from ASIX, say Y.
+
+if NET_VENDOR_ASIX
+
+config SPI_AX88796C
+ tristate "Asix AX88796C-SPI support"
+ select PHYLIB
+ depends on SPI
+ depends on GPIOLIB
+ help
+ Say Y here if you intend to use ASIX AX88796C attached in SPI mode.
+
+config SPI_AX88796C_COMPRESSION
+ bool "SPI transfer compression"
+ default n
+ depends on SPI_AX88796C
+ help
+ Say Y here to enable SPI transfer compression. It saves up
+ to 24 dummy cycles during each transfer which may noticeably
+ speed up short transfers. This sets the default value that is
+ inherited by network interfaces during probe. It can be
+ changed at run time via spi-compression ethtool tunable.
+
+ If unsure say N.
+
+endif # NET_VENDOR_ASIX
diff --git a/drivers/net/ethernet/asix/Makefile b/drivers/net/ethernet/asix/Makefile
new file mode 100644
index 000000000000..0bfbbb042634
--- /dev/null
+++ b/drivers/net/ethernet/asix/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for the Asix network device drivers.
+#
+
+obj-$(CONFIG_SPI_AX88796C) += ax88796c.o
+ax88796c-y := ax88796c_main.o ax88796c_ioctl.o ax88796c_spi.o
diff --git a/drivers/net/ethernet/asix/ax88796c_ioctl.c b/drivers/net/ethernet/asix/ax88796c_ioctl.c
new file mode 100644
index 000000000000..916ae380a004
--- /dev/null
+++ b/drivers/net/ethernet/asix/ax88796c_ioctl.c
@@ -0,0 +1,239 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2010 ASIX Electronics Corporation
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd.
+ *
+ * ASIX AX88796C SPI Fast Ethernet Linux driver
+ */
+
+#define pr_fmt(fmt) "ax88796c: " fmt
+
+#include <linux/bitmap.h>
+#include <linux/iopoll.h>
+#include <linux/phy.h>
+#include <linux/netdevice.h>
+
+#include "ax88796c_main.h"
+#include "ax88796c_ioctl.h"
+
+static const char ax88796c_priv_flag_names[][ETH_GSTRING_LEN] = {
+ "SPICompression",
+};
+
+static void
+ax88796c_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info)
+{
+ /* Inherit standard device info */
+ strncpy(info->driver, DRV_NAME, sizeof(info->driver));
+}
+
+static u32 ax88796c_get_msglevel(struct net_device *ndev)
+{
+ struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+
+ return ax_local->msg_enable;
+}
+
+static void ax88796c_set_msglevel(struct net_device *ndev, u32 level)
+{
+ struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+
+ ax_local->msg_enable = level;
+}
+
+static void
+ax88796c_get_pauseparam(struct net_device *ndev, struct ethtool_pauseparam *pause)
+{
+ struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+
+ pause->tx_pause = !!(ax_local->flowctrl & AX_FC_TX);
+ pause->rx_pause = !!(ax_local->flowctrl & AX_FC_RX);
+ pause->autoneg = (ax_local->flowctrl & AX_FC_ANEG) ?
+ AUTONEG_ENABLE :
+ AUTONEG_DISABLE;
+}
+
+static int
+ax88796c_set_pauseparam(struct net_device *ndev, struct ethtool_pauseparam *pause)
+{
+ struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+ int fc;
+
+ /* The following logic comes from phylink_ethtool_set_pauseparam() */
+ fc = pause->tx_pause ? AX_FC_TX : 0;
+ fc |= pause->rx_pause ? AX_FC_RX : 0;
+ fc |= pause->autoneg ? AX_FC_ANEG : 0;
+
+ ax_local->flowctrl = fc;
+
+ if (pause->autoneg) {
+ phy_set_asym_pause(ax_local->phydev, pause->tx_pause,
+ pause->rx_pause);
+ } else {
+ int maccr = 0;
+
+ phy_set_asym_pause(ax_local->phydev, 0, 0);
+ maccr |= (ax_local->flowctrl & AX_FC_RX) ? MACCR_RXFC_ENABLE : 0;
+ maccr |= (ax_local->flowctrl & AX_FC_TX) ? MACCR_TXFC_ENABLE : 0;
+
+ mutex_lock(&ax_local->spi_lock);
+
+ maccr |= AX_READ(&ax_local->ax_spi, P0_MACCR) &
+ ~(MACCR_TXFC_ENABLE | MACCR_RXFC_ENABLE);
+ AX_WRITE(&ax_local->ax_spi, maccr, P0_MACCR);
+
+ mutex_unlock(&ax_local->spi_lock);
+ }
+
+ return 0;
+}
+
+static int ax88796c_get_regs_len(struct net_device *ndev)
+{
+ return AX88796C_REGDUMP_LEN + AX88796C_PHY_REGDUMP_LEN;
+}
+
+static void
+ax88796c_get_regs(struct net_device *ndev, struct ethtool_regs *regs, void *_p)
+{
+ struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+ int offset, i;
+ u16 *p = _p;
+
+ memset(p, 0, ax88796c_get_regs_len(ndev));
+
+ mutex_lock(&ax_local->spi_lock);
+
+ for (offset = 0; offset < AX88796C_REGDUMP_LEN; offset += 2) {
+ if (!test_bit(offset / 2, ax88796c_no_regs_mask))
+ *p = AX_READ(&ax_local->ax_spi, offset);
+ p++;
+ }
+
+ mutex_unlock(&ax_local->spi_lock);
+
+ for (i = 0; i < AX88796C_PHY_REGDUMP_LEN / 2; i++) {
+ *p = phy_read(ax_local->phydev, i);
+ p++;
+ }
+}
+
+static void
+ax88796c_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
+{
+ switch (stringset) {
+ case ETH_SS_PRIV_FLAGS:
+ memcpy(data, ax88796c_priv_flag_names,
+ sizeof(ax88796c_priv_flag_names));
+ break;
+ }
+}
+
+static int
+ax88796c_get_sset_count(struct net_device *ndev, int stringset)
+{
+ int ret = 0;
+
+ switch (stringset) {
+ case ETH_SS_PRIV_FLAGS:
+ ret = ARRAY_SIZE(ax88796c_priv_flag_names);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static int ax88796c_set_priv_flags(struct net_device *ndev, u32 flags)
+{
+ struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+
+ if (flags & ~AX_PRIV_FLAGS_MASK)
+ return -EOPNOTSUPP;
+
+ if ((ax_local->priv_flags ^ flags) & AX_CAP_COMP)
+ if (netif_running(ndev))
+ return -EBUSY;
+
+ ax_local->priv_flags = flags;
+
+ return 0;
+}
+
+static u32 ax88796c_get_priv_flags(struct net_device *ndev)
+{
+ struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+
+ return ax_local->priv_flags;
+}
+
+int ax88796c_mdio_read(struct mii_bus *mdiobus, int phy_id, int loc)
+{
+ struct ax88796c_device *ax_local = mdiobus->priv;
+ int ret;
+
+ mutex_lock(&ax_local->spi_lock);
+ AX_WRITE(&ax_local->ax_spi, MDIOCR_RADDR(loc)
+ | MDIOCR_FADDR(phy_id) | MDIOCR_READ, P2_MDIOCR);
+
+ ret = read_poll_timeout(AX_READ, ret,
+ (ret != 0),
+ 0, jiffies_to_usecs(HZ / 100), false,
+ &ax_local->ax_spi, P2_MDIOCR);
+ if (!ret)
+ ret = AX_READ(&ax_local->ax_spi, P2_MDIODR);
+
+ mutex_unlock(&ax_local->spi_lock);
+
+ return ret;
+}
+
+int
+ax88796c_mdio_write(struct mii_bus *mdiobus, int phy_id, int loc, u16 val)
+{
+ struct ax88796c_device *ax_local = mdiobus->priv;
+ int ret;
+
+ mutex_lock(&ax_local->spi_lock);
+ AX_WRITE(&ax_local->ax_spi, val, P2_MDIODR);
+
+ AX_WRITE(&ax_local->ax_spi,
+ MDIOCR_RADDR(loc) | MDIOCR_FADDR(phy_id)
+ | MDIOCR_WRITE, P2_MDIOCR);
+
+ ret = read_poll_timeout(AX_READ, ret,
+ ((ret & MDIOCR_VALID) != 0), 0,
+ jiffies_to_usecs(HZ / 100), false,
+ &ax_local->ax_spi, P2_MDIOCR);
+ mutex_unlock(&ax_local->spi_lock);
+
+ return ret;
+}
+
+const struct ethtool_ops ax88796c_ethtool_ops = {
+ .get_drvinfo = ax88796c_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_msglevel = ax88796c_get_msglevel,
+ .set_msglevel = ax88796c_set_msglevel,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+ .nway_reset = phy_ethtool_nway_reset,
+ .get_pauseparam = ax88796c_get_pauseparam,
+ .set_pauseparam = ax88796c_set_pauseparam,
+ .get_regs_len = ax88796c_get_regs_len,
+ .get_regs = ax88796c_get_regs,
+ .get_strings = ax88796c_get_strings,
+ .get_sset_count = ax88796c_get_sset_count,
+ .get_priv_flags = ax88796c_get_priv_flags,
+ .set_priv_flags = ax88796c_set_priv_flags,
+};
+
+int ax88796c_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd)
+{
+ int ret;
+
+ ret = phy_mii_ioctl(ndev->phydev, ifr, cmd);
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/asix/ax88796c_ioctl.h b/drivers/net/ethernet/asix/ax88796c_ioctl.h
new file mode 100644
index 000000000000..34d2a7dcc5ef
--- /dev/null
+++ b/drivers/net/ethernet/asix/ax88796c_ioctl.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2010 ASIX Electronics Corporation
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd.
+ *
+ * ASIX AX88796C SPI Fast Ethernet Linux driver
+ */
+
+#ifndef _AX88796C_IOCTL_H
+#define _AX88796C_IOCTL_H
+
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+
+#include "ax88796c_main.h"
+
+extern const struct ethtool_ops ax88796c_ethtool_ops;
+
+bool ax88796c_check_power(const struct ax88796c_device *ax_local);
+bool ax88796c_check_power_and_wake(struct ax88796c_device *ax_local);
+void ax88796c_set_power_saving(struct ax88796c_device *ax_local, u8 ps_level);
+int ax88796c_mdio_read(struct mii_bus *mdiobus, int phy_id, int loc);
+int ax88796c_mdio_write(struct mii_bus *mdiobus, int phy_id, int loc, u16 val);
+int ax88796c_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
+
+#endif
diff --git a/drivers/net/ethernet/asix/ax88796c_main.c b/drivers/net/ethernet/asix/ax88796c_main.c
new file mode 100644
index 000000000000..4b0c5a09fd57
--- /dev/null
+++ b/drivers/net/ethernet/asix/ax88796c_main.c
@@ -0,0 +1,1164 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2010 ASIX Electronics Corporation
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd.
+ *
+ * ASIX AX88796C SPI Fast Ethernet Linux driver
+ */
+
+#define pr_fmt(fmt) "ax88796c: " fmt
+
+#include "ax88796c_main.h"
+#include "ax88796c_ioctl.h"
+
+#include <linux/bitmap.h>
+#include <linux/etherdevice.h>
+#include <linux/iopoll.h>
+#include <linux/lockdep.h>
+#include <linux/mdio.h>
+#include <linux/minmax.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/of.h>
+#include <linux/phy.h>
+#include <linux/skbuff.h>
+#include <linux/spi/spi.h>
+
+static int comp = IS_ENABLED(CONFIG_SPI_AX88796C_COMPRESSION);
+static int msg_enable = NETIF_MSG_PROBE |
+ NETIF_MSG_LINK |
+ NETIF_MSG_RX_ERR |
+ NETIF_MSG_TX_ERR;
+
+static const char *no_regs_list = "80018001,e1918001,8001a001,fc0d0000";
+unsigned long ax88796c_no_regs_mask[AX88796C_REGDUMP_LEN / (sizeof(unsigned long) * 8)];
+
+module_param(msg_enable, int, 0444);
+MODULE_PARM_DESC(msg_enable, "Message mask (see linux/netdevice.h for bitmap)");
+
+static int ax88796c_soft_reset(struct ax88796c_device *ax_local)
+{
+ u16 temp;
+ int ret;
+
+ lockdep_assert_held(&ax_local->spi_lock);
+
+ AX_WRITE(&ax_local->ax_spi, PSR_RESET, P0_PSR);
+ AX_WRITE(&ax_local->ax_spi, PSR_RESET_CLR, P0_PSR);
+
+ ret = read_poll_timeout(AX_READ, ret,
+ (ret & PSR_DEV_READY),
+ 0, jiffies_to_usecs(160 * HZ / 1000), false,
+ &ax_local->ax_spi, P0_PSR);
+ if (ret)
+ return ret;
+
+ temp = AX_READ(&ax_local->ax_spi, P4_SPICR);
+ if (ax_local->priv_flags & AX_CAP_COMP) {
+ AX_WRITE(&ax_local->ax_spi,
+ (temp | SPICR_RCEN | SPICR_QCEN), P4_SPICR);
+ ax_local->ax_spi.comp = 1;
+ } else {
+ AX_WRITE(&ax_local->ax_spi,
+ (temp & ~(SPICR_RCEN | SPICR_QCEN)), P4_SPICR);
+ ax_local->ax_spi.comp = 0;
+ }
+
+ return 0;
+}
+
+static int ax88796c_reload_eeprom(struct ax88796c_device *ax_local)
+{
+ int ret;
+
+ lockdep_assert_held(&ax_local->spi_lock);
+
+ AX_WRITE(&ax_local->ax_spi, EECR_RELOAD, P3_EECR);
+
+ ret = read_poll_timeout(AX_READ, ret,
+ (ret & PSR_DEV_READY),
+ 0, jiffies_to_usecs(2 * HZ / 1000), false,
+ &ax_local->ax_spi, P0_PSR);
+ if (ret) {
+ dev_err(&ax_local->spi->dev,
+ "timeout waiting for reload eeprom\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ax88796c_set_hw_multicast(struct net_device *ndev)
+{
+ struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+ int mc_count = netdev_mc_count(ndev);
+ u16 rx_ctl = RXCR_AB;
+
+ lockdep_assert_held(&ax_local->spi_lock);
+
+ memset(ax_local->multi_filter, 0, AX_MCAST_FILTER_SIZE);
+
+ if (ndev->flags & IFF_PROMISC) {
+ rx_ctl |= RXCR_PRO;
+
+ } else if (ndev->flags & IFF_ALLMULTI || mc_count > AX_MAX_MCAST) {
+ rx_ctl |= RXCR_AMALL;
+
+ } else if (mc_count == 0) {
+ /* just broadcast and directed */
+ } else {
+ u32 crc_bits;
+ int i;
+ struct netdev_hw_addr *ha;
+
+ netdev_for_each_mc_addr(ha, ndev) {
+ crc_bits = ether_crc(ETH_ALEN, ha->addr);
+ ax_local->multi_filter[crc_bits >> 29] |=
+ (1 << ((crc_bits >> 26) & 7));
+ }
+
+ for (i = 0; i < 4; i++) {
+ AX_WRITE(&ax_local->ax_spi,
+ ((ax_local->multi_filter[i * 2 + 1] << 8) |
+ ax_local->multi_filter[i * 2]), P3_MFAR(i));
+ }
+ }
+
+ AX_WRITE(&ax_local->ax_spi, rx_ctl, P2_RXCR);
+}
+
+static void ax88796c_set_mac_addr(struct net_device *ndev)
+{
+ struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+
+ lockdep_assert_held(&ax_local->spi_lock);
+
+ AX_WRITE(&ax_local->ax_spi, ((u16)(ndev->dev_addr[4] << 8) |
+ (u16)ndev->dev_addr[5]), P3_MACASR0);
+ AX_WRITE(&ax_local->ax_spi, ((u16)(ndev->dev_addr[2] << 8) |
+ (u16)ndev->dev_addr[3]), P3_MACASR1);
+ AX_WRITE(&ax_local->ax_spi, ((u16)(ndev->dev_addr[0] << 8) |
+ (u16)ndev->dev_addr[1]), P3_MACASR2);
+}
+
+static void ax88796c_load_mac_addr(struct net_device *ndev)
+{
+ struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+ u16 temp;
+
+ lockdep_assert_held(&ax_local->spi_lock);
+
+ /* Try the device tree first */
+ if (!eth_platform_get_mac_address(&ax_local->spi->dev, ndev->dev_addr) &&
+ is_valid_ether_addr(ndev->dev_addr)) {
+ if (netif_msg_probe(ax_local))
+ dev_info(&ax_local->spi->dev,
+ "MAC address read from device tree\n");
+ return;
+ }
+
+ /* Read the MAC address from AX88796C */
+ temp = AX_READ(&ax_local->ax_spi, P3_MACASR0);
+ ndev->dev_addr[5] = (u8)temp;
+ ndev->dev_addr[4] = (u8)(temp >> 8);
+
+ temp = AX_READ(&ax_local->ax_spi, P3_MACASR1);
+ ndev->dev_addr[3] = (u8)temp;
+ ndev->dev_addr[2] = (u8)(temp >> 8);
+
+ temp = AX_READ(&ax_local->ax_spi, P3_MACASR2);
+ ndev->dev_addr[1] = (u8)temp;
+ ndev->dev_addr[0] = (u8)(temp >> 8);
+
+ if (is_valid_ether_addr(ndev->dev_addr)) {
+ if (netif_msg_probe(ax_local))
+ dev_info(&ax_local->spi->dev,
+ "MAC address read from ASIX chip\n");
+ return;
+ }
+
+ /* Use random address if none found */
+ if (netif_msg_probe(ax_local))
+ dev_info(&ax_local->spi->dev, "Use random MAC address\n");
+ eth_hw_addr_random(ndev);
+}
+
+static void ax88796c_proc_tx_hdr(struct tx_pkt_info *info, u8 ip_summed)
+{
+ u16 pkt_len_bar = (~info->pkt_len & TX_HDR_SOP_PKTLENBAR);
+
+ /* Prepare SOP header */
+ info->sop.flags_len = info->pkt_len |
+ ((ip_summed == CHECKSUM_NONE) ||
+ (ip_summed == CHECKSUM_UNNECESSARY) ? TX_HDR_SOP_DICF : 0);
+
+ info->sop.seq_lenbar = ((info->seq_num << 11) & TX_HDR_SOP_SEQNUM)
+ | pkt_len_bar;
+ cpu_to_be16s(&info->sop.flags_len);
+ cpu_to_be16s(&info->sop.seq_lenbar);
+
+ /* Prepare Segment header */
+ info->seg.flags_seqnum_seglen = TX_HDR_SEG_FS | TX_HDR_SEG_LS
+ | info->pkt_len;
+
+ info->seg.eo_so_seglenbar = pkt_len_bar;
+
+ cpu_to_be16s(&info->seg.flags_seqnum_seglen);
+ cpu_to_be16s(&info->seg.eo_so_seglenbar);
+
+ /* Prepare EOP header */
+ info->eop.seq_len = ((info->seq_num << 11) &
+ TX_HDR_EOP_SEQNUM) | info->pkt_len;
+ info->eop.seqbar_lenbar = ((~info->seq_num << 11) &
+ TX_HDR_EOP_SEQNUMBAR) | pkt_len_bar;
+
+ cpu_to_be16s(&info->eop.seq_len);
+ cpu_to_be16s(&info->eop.seqbar_lenbar);
+}
+
+static int
+ax88796c_check_free_pages(struct ax88796c_device *ax_local, u8 need_pages)
+{
+ u8 free_pages;
+ u16 tmp;
+
+ lockdep_assert_held(&ax_local->spi_lock);
+
+ free_pages = AX_READ(&ax_local->ax_spi, P0_TFBFCR) & TX_FREEBUF_MASK;
+ if (free_pages < need_pages) {
+ /* schedule free page interrupt */
+ tmp = AX_READ(&ax_local->ax_spi, P0_TFBFCR)
+ & TFBFCR_SCHE_FREE_PAGE;
+ AX_WRITE(&ax_local->ax_spi, tmp | TFBFCR_TX_PAGE_SET |
+ TFBFCR_SET_FREE_PAGE(need_pages),
+ P0_TFBFCR);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static struct sk_buff *
+ax88796c_tx_fixup(struct net_device *ndev, struct sk_buff_head *q)
+{
+ struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+ u8 spi_len = ax_local->ax_spi.comp ? 1 : 4;
+ struct sk_buff *skb;
+ struct tx_pkt_info info;
+ struct skb_data *entry;
+ u16 pkt_len;
+ u8 padlen, seq_num;
+ u8 need_pages;
+ int headroom;
+ int tailroom;
+
+ if (skb_queue_empty(q))
+ return NULL;
+
+ skb = skb_peek(q);
+ pkt_len = skb->len;
+ need_pages = (pkt_len + TX_OVERHEAD + 127) >> 7;
+ if (ax88796c_check_free_pages(ax_local, need_pages) != 0)
+ return NULL;
+
+ headroom = skb_headroom(skb);
+ tailroom = skb_tailroom(skb);
+ padlen = round_up(pkt_len, 4) - pkt_len;
+ seq_num = ++ax_local->seq_num & 0x1F;
+
+ info.pkt_len = pkt_len;
+
+ if (skb_cloned(skb) ||
+ (headroom < (TX_OVERHEAD + spi_len)) ||
+ (tailroom < (padlen + TX_EOP_SIZE))) {
+ size_t h = max((TX_OVERHEAD + spi_len) - headroom, 0);
+ size_t t = max((padlen + TX_EOP_SIZE) - tailroom, 0);
+
+ if (pskb_expand_head(skb, h, t, GFP_KERNEL))
+ return NULL;
+ }
+
+ info.seq_num = seq_num;
+ ax88796c_proc_tx_hdr(&info, skb->ip_summed);
+
+ /* SOP and SEG header */
+ memcpy(skb_push(skb, TX_OVERHEAD), &info.sop, TX_OVERHEAD);
+
+ /* Write SPI TXQ header */
+ memcpy(skb_push(skb, spi_len), ax88796c_tx_cmd_buf, spi_len);
+
+ /* Make 32-bit alignment */
+ skb_put(skb, padlen);
+
+ /* EOP header */
+ memcpy(skb_put(skb, TX_EOP_SIZE), &info.eop, TX_EOP_SIZE);
+
+ skb_unlink(skb, q);
+
+ entry = (struct skb_data *)skb->cb;
+ memset(entry, 0, sizeof(*entry));
+ entry->len = pkt_len;
+
+ if (netif_msg_pktdata(ax_local)) {
+ char pfx[IFNAMSIZ + 7];
+
+ snprintf(pfx, sizeof(pfx), "%s: ", ndev->name);
+
+ netdev_info(ndev, "TX packet len %d, total len %d, seq %d\n",
+ pkt_len, skb->len, seq_num);
+
+ netdev_info(ndev, " SPI Header:\n");
+ print_hex_dump(KERN_INFO, pfx, DUMP_PREFIX_OFFSET, 16, 1,
+ skb->data, 4, 0);
+
+ netdev_info(ndev, " TX SOP:\n");
+ print_hex_dump(KERN_INFO, pfx, DUMP_PREFIX_OFFSET, 16, 1,
+ skb->data + 4, TX_OVERHEAD, 0);
+
+ netdev_info(ndev, " TX packet:\n");
+ print_hex_dump(KERN_INFO, pfx, DUMP_PREFIX_OFFSET, 16, 1,
+ skb->data + 4 + TX_OVERHEAD,
+ skb->len - TX_EOP_SIZE - 4 - TX_OVERHEAD, 0);
+
+ netdev_info(ndev, " TX EOP:\n");
+ print_hex_dump(KERN_INFO, pfx, DUMP_PREFIX_OFFSET, 16, 1,
+ skb->data + skb->len - 4, 4, 0);
+ }
+
+ return skb;
+}
+
+static int ax88796c_hard_xmit(struct ax88796c_device *ax_local)
+{
+ struct ax88796c_pcpu_stats *stats;
+ struct sk_buff *tx_skb;
+ struct skb_data *entry;
+ unsigned long flags;
+
+ lockdep_assert_held(&ax_local->spi_lock);
+
+ stats = this_cpu_ptr(ax_local->stats);
+ tx_skb = ax88796c_tx_fixup(ax_local->ndev, &ax_local->tx_wait_q);
+
+ if (!tx_skb) {
+ this_cpu_inc(ax_local->stats->tx_dropped);
+ return 0;
+ }
+ entry = (struct skb_data *)tx_skb->cb;
+
+ AX_WRITE(&ax_local->ax_spi,
+ (TSNR_TXB_START | TSNR_PKT_CNT(1)), P0_TSNR);
+
+ axspi_write_txq(&ax_local->ax_spi, tx_skb->data, tx_skb->len);
+
+ if (((AX_READ(&ax_local->ax_spi, P0_TSNR) & TXNR_TXB_IDLE) == 0) ||
+ ((ISR_TXERR & AX_READ(&ax_local->ax_spi, P0_ISR)) != 0)) {
+ /* Ack tx error int */
+ AX_WRITE(&ax_local->ax_spi, ISR_TXERR, P0_ISR);
+
+ this_cpu_inc(ax_local->stats->tx_dropped);
+
+ if (net_ratelimit())
+ netif_err(ax_local, tx_err, ax_local->ndev,
+ "TX FIFO error, re-initialize the TX bridge\n");
+
+ /* Reinitial tx bridge */
+ AX_WRITE(&ax_local->ax_spi, TXNR_TXB_REINIT |
+ AX_READ(&ax_local->ax_spi, P0_TSNR), P0_TSNR);
+ ax_local->seq_num = 0;
+ } else {
+ flags = u64_stats_update_begin_irqsave(&stats->syncp);
+ u64_stats_inc(&stats->tx_packets);
+ u64_stats_add(&stats->tx_bytes, entry->len);
+ u64_stats_update_end_irqrestore(&stats->syncp, flags);
+ }
+
+ entry->state = tx_done;
+ dev_kfree_skb(tx_skb);
+
+ return 1;
+}
+
+static int
+ax88796c_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+
+ skb_queue_tail(&ax_local->tx_wait_q, skb);
+ if (skb_queue_len(&ax_local->tx_wait_q) > TX_QUEUE_HIGH_WATER)
+ netif_stop_queue(ndev);
+
+ set_bit(EVENT_TX, &ax_local->flags);
+ schedule_work(&ax_local->ax_work);
+
+ return NETDEV_TX_OK;
+}
+
+static void
+ax88796c_skb_return(struct ax88796c_device *ax_local,
+ struct sk_buff *skb, struct rx_header *rxhdr)
+{
+ struct net_device *ndev = ax_local->ndev;
+ struct ax88796c_pcpu_stats *stats;
+ unsigned long flags;
+ int status;
+
+ stats = this_cpu_ptr(ax_local->stats);
+
+ do {
+ if (!(ndev->features & NETIF_F_RXCSUM))
+ break;
+
+ /* checksum error bit is set */
+ if ((rxhdr->flags & RX_HDR3_L3_ERR) ||
+ (rxhdr->flags & RX_HDR3_L4_ERR))
+ break;
+
+ /* Other types may be indicated by more than one bit. */
+ if ((rxhdr->flags & RX_HDR3_L4_TYPE_TCP) ||
+ (rxhdr->flags & RX_HDR3_L4_TYPE_UDP))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } while (0);
+
+ flags = u64_stats_update_begin_irqsave(&stats->syncp);
+ u64_stats_inc(&stats->rx_packets);
+ u64_stats_add(&stats->rx_bytes, skb->len);
+ u64_stats_update_end_irqrestore(&stats->syncp, flags);
+
+ skb->dev = ndev;
+ skb->protocol = eth_type_trans(skb, ax_local->ndev);
+
+ netif_info(ax_local, rx_status, ndev, "< rx, len %zu, type 0x%x\n",
+ skb->len + sizeof(struct ethhdr), skb->protocol);
+
+ status = netif_rx_ni(skb);
+ if (status != NET_RX_SUCCESS && net_ratelimit())
+ netif_info(ax_local, rx_err, ndev,
+ "netif_rx status %d\n", status);
+}
+
+static void
+ax88796c_rx_fixup(struct ax88796c_device *ax_local, struct sk_buff *rx_skb)
+{
+ struct rx_header *rxhdr = (struct rx_header *)rx_skb->data;
+ struct net_device *ndev = ax_local->ndev;
+ u16 len;
+
+ be16_to_cpus(&rxhdr->flags_len);
+ be16_to_cpus(&rxhdr->seq_lenbar);
+ be16_to_cpus(&rxhdr->flags);
+
+ if ((rxhdr->flags_len & RX_HDR1_PKT_LEN) !=
+ (~rxhdr->seq_lenbar & 0x7FF)) {
+ netif_err(ax_local, rx_err, ndev, "Header error\n");
+
+ this_cpu_inc(ax_local->stats->rx_frame_errors);
+ kfree_skb(rx_skb);
+ return;
+ }
+
+ if ((rxhdr->flags_len & RX_HDR1_MII_ERR) ||
+ (rxhdr->flags_len & RX_HDR1_CRC_ERR)) {
+ netif_err(ax_local, rx_err, ndev, "CRC or MII error\n");
+
+ this_cpu_inc(ax_local->stats->rx_crc_errors);
+ kfree_skb(rx_skb);
+ return;
+ }
+
+ len = rxhdr->flags_len & RX_HDR1_PKT_LEN;
+ if (netif_msg_pktdata(ax_local)) {
+ char pfx[IFNAMSIZ + 7];
+
+ snprintf(pfx, sizeof(pfx), "%s: ", ndev->name);
+ netdev_info(ndev, "RX data, total len %d, packet len %d\n",
+ rx_skb->len, len);
+
+ netdev_info(ndev, " Dump RX packet header:");
+ print_hex_dump(KERN_INFO, pfx, DUMP_PREFIX_OFFSET, 16, 1,
+ rx_skb->data, sizeof(*rxhdr), 0);
+
+ netdev_info(ndev, " Dump RX packet:");
+ print_hex_dump(KERN_INFO, pfx, DUMP_PREFIX_OFFSET, 16, 1,
+ rx_skb->data + sizeof(*rxhdr), len, 0);
+ }
+
+ skb_pull(rx_skb, sizeof(*rxhdr));
+ pskb_trim(rx_skb, len);
+
+ ax88796c_skb_return(ax_local, rx_skb, rxhdr);
+}
+
+static int ax88796c_receive(struct net_device *ndev)
+{
+ struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+ struct skb_data *entry;
+ u16 w_count, pkt_len;
+ struct sk_buff *skb;
+ u8 pkt_cnt;
+
+ lockdep_assert_held(&ax_local->spi_lock);
+
+ /* check rx packet and total word count */
+ AX_WRITE(&ax_local->ax_spi, AX_READ(&ax_local->ax_spi, P0_RTWCR)
+ | RTWCR_RX_LATCH, P0_RTWCR);
+
+ pkt_cnt = AX_READ(&ax_local->ax_spi, P0_RXBCR2) & RXBCR2_PKT_MASK;
+ if (!pkt_cnt)
+ return 0;
+
+ pkt_len = AX_READ(&ax_local->ax_spi, P0_RCPHR) & 0x7FF;
+
+ w_count = round_up(pkt_len + 6, 4) >> 1;
+
+ skb = netdev_alloc_skb(ndev, w_count * 2);
+ if (!skb) {
+ AX_WRITE(&ax_local->ax_spi, RXBCR1_RXB_DISCARD, P0_RXBCR1);
+ this_cpu_inc(ax_local->stats->rx_dropped);
+ return 0;
+ }
+ entry = (struct skb_data *)skb->cb;
+
+ AX_WRITE(&ax_local->ax_spi, RXBCR1_RXB_START | w_count, P0_RXBCR1);
+
+ axspi_read_rxq(&ax_local->ax_spi,
+ skb_put(skb, w_count * 2), skb->len);
+
+ /* Check if rx bridge is idle */
+ if ((AX_READ(&ax_local->ax_spi, P0_RXBCR2) & RXBCR2_RXB_IDLE) == 0) {
+ if (net_ratelimit())
+ netif_err(ax_local, rx_err, ndev,
+ "Rx Bridge is not idle\n");
+ AX_WRITE(&ax_local->ax_spi, RXBCR2_RXB_REINIT, P0_RXBCR2);
+
+ entry->state = rx_err;
+ } else {
+ entry->state = rx_done;
+ }
+
+ AX_WRITE(&ax_local->ax_spi, ISR_RXPKT, P0_ISR);
+
+ ax88796c_rx_fixup(ax_local, skb);
+
+ return 1;
+}
+
+static int ax88796c_process_isr(struct ax88796c_device *ax_local)
+{
+ struct net_device *ndev = ax_local->ndev;
+ int todo = 0;
+ u16 isr;
+
+ lockdep_assert_held(&ax_local->spi_lock);
+
+ isr = AX_READ(&ax_local->ax_spi, P0_ISR);
+ AX_WRITE(&ax_local->ax_spi, isr, P0_ISR);
+
+ netif_dbg(ax_local, intr, ndev, " ISR 0x%04x\n", isr);
+
+ if (isr & ISR_TXERR) {
+ netif_dbg(ax_local, intr, ndev, " TXERR interrupt\n");
+ AX_WRITE(&ax_local->ax_spi, TXNR_TXB_REINIT, P0_TSNR);
+ ax_local->seq_num = 0x1f;
+ }
+
+ if (isr & ISR_TXPAGES) {
+ netif_dbg(ax_local, intr, ndev, " TXPAGES interrupt\n");
+ set_bit(EVENT_TX, &ax_local->flags);
+ }
+
+ if (isr & ISR_LINK) {
+ netif_dbg(ax_local, intr, ndev, " Link change interrupt\n");
+ phy_mac_interrupt(ax_local->ndev->phydev);
+ }
+
+ if (isr & ISR_RXPKT) {
+ netif_dbg(ax_local, intr, ndev, " RX interrupt\n");
+ todo = ax88796c_receive(ax_local->ndev);
+ }
+
+ return todo;
+}
+
+static irqreturn_t ax88796c_interrupt(int irq, void *dev_instance)
+{
+ struct ax88796c_device *ax_local;
+ struct net_device *ndev;
+
+ ndev = dev_instance;
+ if (!ndev) {
+ pr_err("irq %d for unknown device.\n", irq);
+ return IRQ_RETVAL(0);
+ }
+ ax_local = to_ax88796c_device(ndev);
+
+ disable_irq_nosync(irq);
+
+ netif_dbg(ax_local, intr, ndev, "Interrupt occurred\n");
+
+ set_bit(EVENT_INTR, &ax_local->flags);
+ schedule_work(&ax_local->ax_work);
+
+ return IRQ_HANDLED;
+}
+
+static void ax88796c_work(struct work_struct *work)
+{
+ struct ax88796c_device *ax_local =
+ container_of(work, struct ax88796c_device, ax_work);
+
+ mutex_lock(&ax_local->spi_lock);
+
+ if (test_bit(EVENT_SET_MULTI, &ax_local->flags)) {
+ ax88796c_set_hw_multicast(ax_local->ndev);
+ clear_bit(EVENT_SET_MULTI, &ax_local->flags);
+ }
+
+ if (test_bit(EVENT_INTR, &ax_local->flags)) {
+ AX_WRITE(&ax_local->ax_spi, IMR_MASKALL, P0_IMR);
+
+ while (ax88796c_process_isr(ax_local))
+ /* nothing */;
+
+ clear_bit(EVENT_INTR, &ax_local->flags);
+
+ AX_WRITE(&ax_local->ax_spi, IMR_DEFAULT, P0_IMR);
+
+ enable_irq(ax_local->ndev->irq);
+ }
+
+ if (test_bit(EVENT_TX, &ax_local->flags)) {
+ while (skb_queue_len(&ax_local->tx_wait_q)) {
+ if (!ax88796c_hard_xmit(ax_local))
+ break;
+ }
+
+ clear_bit(EVENT_TX, &ax_local->flags);
+
+ if (netif_queue_stopped(ax_local->ndev) &&
+ (skb_queue_len(&ax_local->tx_wait_q) < TX_QUEUE_LOW_WATER))
+ netif_wake_queue(ax_local->ndev);
+ }
+
+ mutex_unlock(&ax_local->spi_lock);
+}
+
+static void ax88796c_get_stats64(struct net_device *ndev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+ u32 rx_frame_errors = 0, rx_crc_errors = 0;
+ u32 rx_dropped = 0, tx_dropped = 0;
+ unsigned int start;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ struct ax88796c_pcpu_stats *s;
+ u64 rx_packets, rx_bytes;
+ u64 tx_packets, tx_bytes;
+
+ s = per_cpu_ptr(ax_local->stats, cpu);
+
+ do {
+ start = u64_stats_fetch_begin_irq(&s->syncp);
+ rx_packets = u64_stats_read(&s->rx_packets);
+ rx_bytes = u64_stats_read(&s->rx_bytes);
+ tx_packets = u64_stats_read(&s->tx_packets);
+ tx_bytes = u64_stats_read(&s->tx_bytes);
+ } while (u64_stats_fetch_retry_irq(&s->syncp, start));
+
+ stats->rx_packets += rx_packets;
+ stats->rx_bytes += rx_bytes;
+ stats->tx_packets += tx_packets;
+ stats->tx_bytes += tx_bytes;
+
+ rx_dropped += s->rx_dropped;
+ tx_dropped += s->tx_dropped;
+ rx_frame_errors += s->rx_frame_errors;
+ rx_crc_errors += s->rx_crc_errors;
+ }
+
+ stats->rx_dropped = rx_dropped;
+ stats->tx_dropped = tx_dropped;
+ stats->rx_frame_errors = rx_frame_errors;
+ stats->rx_crc_errors = rx_crc_errors;
+}
+
+static void ax88796c_set_mac(struct ax88796c_device *ax_local)
+{
+ u16 maccr;
+
+ maccr = (ax_local->link) ? MACCR_RXEN : 0;
+
+ switch (ax_local->speed) {
+ case SPEED_100:
+ maccr |= MACCR_SPEED_100;
+ break;
+ case SPEED_10:
+ case SPEED_UNKNOWN:
+ break;
+ default:
+ return;
+ }
+
+ switch (ax_local->duplex) {
+ case DUPLEX_FULL:
+ maccr |= MACCR_SPEED_100;
+ break;
+ case DUPLEX_HALF:
+ case DUPLEX_UNKNOWN:
+ break;
+ default:
+ return;
+ }
+
+ if (ax_local->flowctrl & AX_FC_ANEG &&
+ ax_local->phydev->autoneg) {
+ maccr |= ax_local->pause ? MACCR_RXFC_ENABLE : 0;
+ maccr |= !ax_local->pause != !ax_local->asym_pause ?
+ MACCR_TXFC_ENABLE : 0;
+ } else {
+ maccr |= (ax_local->flowctrl & AX_FC_RX) ? MACCR_RXFC_ENABLE : 0;
+ maccr |= (ax_local->flowctrl & AX_FC_TX) ? MACCR_TXFC_ENABLE : 0;
+ }
+
+ mutex_lock(&ax_local->spi_lock);
+
+ maccr |= AX_READ(&ax_local->ax_spi, P0_MACCR) &
+ ~(MACCR_DUPLEX_FULL | MACCR_SPEED_100 |
+ MACCR_TXFC_ENABLE | MACCR_RXFC_ENABLE);
+ AX_WRITE(&ax_local->ax_spi, maccr, P0_MACCR);
+
+ mutex_unlock(&ax_local->spi_lock);
+}
+
+static void ax88796c_handle_link_change(struct net_device *ndev)
+{
+ struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+ struct phy_device *phydev = ndev->phydev;
+ bool update = false;
+
+ if (phydev->link && (ax_local->speed != phydev->speed ||
+ ax_local->duplex != phydev->duplex ||
+ ax_local->pause != phydev->pause ||
+ ax_local->asym_pause != phydev->asym_pause)) {
+ ax_local->speed = phydev->speed;
+ ax_local->duplex = phydev->duplex;
+ ax_local->pause = phydev->pause;
+ ax_local->asym_pause = phydev->asym_pause;
+ update = true;
+ }
+
+ if (phydev->link != ax_local->link) {
+ if (!phydev->link) {
+ ax_local->speed = SPEED_UNKNOWN;
+ ax_local->duplex = DUPLEX_UNKNOWN;
+ }
+
+ ax_local->link = phydev->link;
+ update = true;
+ }
+
+ if (update)
+ ax88796c_set_mac(ax_local);
+
+ if (net_ratelimit())
+ phy_print_status(ndev->phydev);
+}
+
+static void ax88796c_set_csums(struct ax88796c_device *ax_local)
+{
+ struct net_device *ndev = ax_local->ndev;
+
+ lockdep_assert_held(&ax_local->spi_lock);
+
+ if (ndev->features & NETIF_F_RXCSUM) {
+ AX_WRITE(&ax_local->ax_spi, COERCR0_DEFAULT, P4_COERCR0);
+ AX_WRITE(&ax_local->ax_spi, COERCR1_DEFAULT, P4_COERCR1);
+ } else {
+ AX_WRITE(&ax_local->ax_spi, 0, P4_COERCR0);
+ AX_WRITE(&ax_local->ax_spi, 0, P4_COERCR1);
+ }
+
+ if (ndev->features & NETIF_F_HW_CSUM) {
+ AX_WRITE(&ax_local->ax_spi, COETCR0_DEFAULT, P4_COETCR0);
+ AX_WRITE(&ax_local->ax_spi, COETCR1_TXPPPE, P4_COETCR1);
+ } else {
+ AX_WRITE(&ax_local->ax_spi, 0, P4_COETCR0);
+ AX_WRITE(&ax_local->ax_spi, 0, P4_COETCR1);
+ }
+}
+
+static int
+ax88796c_open(struct net_device *ndev)
+{
+ struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+ unsigned long irq_flag = 0;
+ int fc = AX_FC_NONE;
+ int ret;
+ u16 t;
+
+ ret = request_irq(ndev->irq, ax88796c_interrupt,
+ irq_flag, ndev->name, ndev);
+ if (ret) {
+ netdev_err(ndev, "unable to get IRQ %d (errno=%d).\n",
+ ndev->irq, ret);
+ return ret;
+ }
+
+ mutex_lock(&ax_local->spi_lock);
+
+ ret = ax88796c_soft_reset(ax_local);
+ if (ret < 0) {
+ free_irq(ndev->irq, ndev);
+ mutex_unlock(&ax_local->spi_lock);
+ return ret;
+ }
+ ax_local->seq_num = 0x1f;
+
+ ax88796c_set_mac_addr(ndev);
+ ax88796c_set_csums(ax_local);
+
+ /* Disable stuffing packet */
+ t = AX_READ(&ax_local->ax_spi, P1_RXBSPCR);
+ t &= ~RXBSPCR_STUF_ENABLE;
+ AX_WRITE(&ax_local->ax_spi, t, P1_RXBSPCR);
+
+ /* Enable RX packet process */
+ AX_WRITE(&ax_local->ax_spi, RPPER_RXEN, P1_RPPER);
+
+ t = AX_READ(&ax_local->ax_spi, P0_FER);
+ t |= FER_RXEN | FER_TXEN | FER_BSWAP | FER_IRQ_PULL;
+ AX_WRITE(&ax_local->ax_spi, t, P0_FER);
+
+ /* Setup LED mode */
+ AX_WRITE(&ax_local->ax_spi,
+ (LCR_LED0_EN | LCR_LED0_DUPLEX | LCR_LED1_EN |
+ LCR_LED1_100MODE), P2_LCR0);
+ AX_WRITE(&ax_local->ax_spi,
+ (AX_READ(&ax_local->ax_spi, P2_LCR1) & LCR_LED2_MASK) |
+ LCR_LED2_EN | LCR_LED2_LINK, P2_LCR1);
+
+ /* Disable PHY auto-polling */
+ AX_WRITE(&ax_local->ax_spi, PCR_PHYID(AX88796C_PHY_ID), P2_PCR);
+
+ /* Enable MAC interrupts */
+ AX_WRITE(&ax_local->ax_spi, IMR_DEFAULT, P0_IMR);
+
+ mutex_unlock(&ax_local->spi_lock);
+
+ /* Setup flow-control configuration */
+ phy_support_asym_pause(ax_local->phydev);
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ ax_local->phydev->advertising) ||
+ linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ ax_local->phydev->advertising))
+ fc |= AX_FC_ANEG;
+
+ fc |= linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ ax_local->phydev->advertising) ? AX_FC_RX : 0;
+ fc |= (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ ax_local->phydev->advertising) !=
+ linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ ax_local->phydev->advertising)) ? AX_FC_TX : 0;
+ ax_local->flowctrl = fc;
+
+ phy_start(ax_local->ndev->phydev);
+
+ netif_start_queue(ndev);
+
+ spi_message_init(&ax_local->ax_spi.rx_msg);
+
+ return 0;
+}
+
+static int
+ax88796c_close(struct net_device *ndev)
+{
+ struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+
+ phy_stop(ndev->phydev);
+
+ /* We lock the mutex early not only to protect the device
+ * against concurrent access, but also avoid waking up the
+ * queue in ax88796c_work(). phy_stop() needs to be called
+ * before because it locks the mutex to access SPI.
+ */
+ mutex_lock(&ax_local->spi_lock);
+
+ netif_stop_queue(ndev);
+
+ /* No more work can be scheduled now. Make any pending work,
+ * including one already waiting for the mutex to be unlocked,
+ * NOP.
+ */
+ netif_dbg(ax_local, ifdown, ndev, "clearing bits\n");
+ clear_bit(EVENT_SET_MULTI, &ax_local->flags);
+ clear_bit(EVENT_INTR, &ax_local->flags);
+ clear_bit(EVENT_TX, &ax_local->flags);
+
+ /* Disable MAC interrupts */
+ AX_WRITE(&ax_local->ax_spi, IMR_MASKALL, P0_IMR);
+ __skb_queue_purge(&ax_local->tx_wait_q);
+ ax88796c_soft_reset(ax_local);
+
+ mutex_unlock(&ax_local->spi_lock);
+
+ cancel_work_sync(&ax_local->ax_work);
+
+ free_irq(ndev->irq, ndev);
+
+ return 0;
+}
+
+static int
+ax88796c_set_features(struct net_device *ndev, netdev_features_t features)
+{
+ struct ax88796c_device *ax_local = to_ax88796c_device(ndev);
+ netdev_features_t changed = features ^ ndev->features;
+
+ if (!(changed & (NETIF_F_RXCSUM | NETIF_F_HW_CSUM)))
+ return 0;
+
+ ndev->features = features;
+
+ if (changed & (NETIF_F_RXCSUM | NETIF_F_HW_CSUM))
+ ax88796c_set_csums(ax_local);
+
+ return 0;
+}
+
+static const struct net_device_ops ax88796c_netdev_ops = {
+ .ndo_open = ax88796c_open,
+ .ndo_stop = ax88796c_close,
+ .ndo_start_xmit = ax88796c_start_xmit,
+ .ndo_get_stats64 = ax88796c_get_stats64,
+ .ndo_do_ioctl = ax88796c_ioctl,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_features = ax88796c_set_features,
+};
+
+static int ax88796c_hard_reset(struct ax88796c_device *ax_local)
+{
+ struct device *dev = (struct device *)&ax_local->spi->dev;
+ struct gpio_desc *reset_gpio;
+
+ /* reset info */
+ reset_gpio = gpiod_get(dev, "reset", 0);
+ if (IS_ERR(reset_gpio)) {
+ dev_err(dev, "Could not get 'reset' GPIO: %ld", PTR_ERR(reset_gpio));
+ return PTR_ERR(reset_gpio);
+ }
+
+ /* set reset */
+ gpiod_direction_output(reset_gpio, 1);
+ msleep(100);
+ gpiod_direction_output(reset_gpio, 0);
+ gpiod_put(reset_gpio);
+ msleep(20);
+
+ return 0;
+}
+
+static int ax88796c_probe(struct spi_device *spi)
+{
+ char phy_id[MII_BUS_ID_SIZE + 3];
+ struct ax88796c_device *ax_local;
+ struct net_device *ndev;
+ u16 temp;
+ int ret;
+
+ ndev = devm_alloc_etherdev(&spi->dev, sizeof(*ax_local));
+ if (!ndev)
+ return -ENOMEM;
+
+ SET_NETDEV_DEV(ndev, &spi->dev);
+
+ ax_local = to_ax88796c_device(ndev);
+
+ dev_set_drvdata(&spi->dev, ax_local);
+ ax_local->spi = spi;
+ ax_local->ax_spi.spi = spi;
+
+ ax_local->stats =
+ devm_netdev_alloc_pcpu_stats(&spi->dev,
+ struct ax88796c_pcpu_stats);
+ if (!ax_local->stats)
+ return -ENOMEM;
+
+ ax_local->ndev = ndev;
+ ax_local->priv_flags |= comp ? AX_CAP_COMP : 0;
+ ax_local->msg_enable = msg_enable;
+ mutex_init(&ax_local->spi_lock);
+
+ ax_local->mdiobus = devm_mdiobus_alloc(&spi->dev);
+ if (!ax_local->mdiobus)
+ return -ENOMEM;
+
+ ax_local->mdiobus->priv = ax_local;
+ ax_local->mdiobus->read = ax88796c_mdio_read;
+ ax_local->mdiobus->write = ax88796c_mdio_write;
+ ax_local->mdiobus->name = "ax88976c-mdiobus";
+ ax_local->mdiobus->phy_mask = (u32)~BIT(AX88796C_PHY_ID);
+ ax_local->mdiobus->parent = &spi->dev;
+
+ snprintf(ax_local->mdiobus->id, MII_BUS_ID_SIZE,
+ "ax88796c-%s.%u", dev_name(&spi->dev), spi->chip_select);
+
+ ret = devm_mdiobus_register(&spi->dev, ax_local->mdiobus);
+ if (ret < 0) {
+ dev_err(&spi->dev, "Could not register MDIO bus\n");
+ return ret;
+ }
+
+ if (netif_msg_probe(ax_local)) {
+ dev_info(&spi->dev, "AX88796C-SPI Configuration:\n");
+ dev_info(&spi->dev, " Compression : %s\n",
+ ax_local->priv_flags & AX_CAP_COMP ? "ON" : "OFF");
+ }
+
+ ndev->irq = spi->irq;
+ ndev->netdev_ops = &ax88796c_netdev_ops;
+ ndev->ethtool_ops = &ax88796c_ethtool_ops;
+ ndev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
+ ndev->features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
+ ndev->needed_headroom = TX_OVERHEAD;
+ ndev->needed_tailroom = TX_EOP_SIZE;
+
+ mutex_lock(&ax_local->spi_lock);
+
+ /* ax88796c gpio reset */
+ ax88796c_hard_reset(ax_local);
+
+ /* Reset AX88796C */
+ ret = ax88796c_soft_reset(ax_local);
+ if (ret < 0) {
+ ret = -ENODEV;
+ mutex_unlock(&ax_local->spi_lock);
+ goto err;
+ }
+ /* Check board revision */
+ temp = AX_READ(&ax_local->ax_spi, P2_CRIR);
+ if ((temp & 0xF) != 0x0) {
+ dev_err(&spi->dev, "spi read failed: %d\n", temp);
+ ret = -ENODEV;
+ mutex_unlock(&ax_local->spi_lock);
+ goto err;
+ }
+
+ /*Reload EEPROM*/
+ ax88796c_reload_eeprom(ax_local);
+
+ ax88796c_load_mac_addr(ndev);
+
+ if (netif_msg_probe(ax_local))
+ dev_info(&spi->dev,
+ "irq %d, MAC addr %02X:%02X:%02X:%02X:%02X:%02X\n",
+ ndev->irq,
+ ndev->dev_addr[0], ndev->dev_addr[1],
+ ndev->dev_addr[2], ndev->dev_addr[3],
+ ndev->dev_addr[4], ndev->dev_addr[5]);
+
+ /* Disable power saving */
+ AX_WRITE(&ax_local->ax_spi, (AX_READ(&ax_local->ax_spi, P0_PSCR)
+ & PSCR_PS_MASK) | PSCR_PS_D0, P0_PSCR);
+
+ mutex_unlock(&ax_local->spi_lock);
+
+ INIT_WORK(&ax_local->ax_work, ax88796c_work);
+
+ skb_queue_head_init(&ax_local->tx_wait_q);
+
+ snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
+ ax_local->mdiobus->id, AX88796C_PHY_ID);
+ ax_local->phydev = phy_connect(ax_local->ndev, phy_id,
+ ax88796c_handle_link_change,
+ PHY_INTERFACE_MODE_MII);
+ if (IS_ERR(ax_local->phydev)) {
+ ret = PTR_ERR(ax_local->phydev);
+ goto err;
+ }
+ ax_local->phydev->irq = PHY_POLL;
+
+ ret = devm_register_netdev(&spi->dev, ndev);
+ if (ret) {
+ dev_err(&spi->dev, "failed to register a network device\n");
+ goto err_phy_dis;
+ }
+
+ netif_info(ax_local, probe, ndev, "%s %s registered\n",
+ dev_driver_string(&spi->dev),
+ dev_name(&spi->dev));
+ phy_attached_info(ax_local->phydev);
+
+ return 0;
+
+err_phy_dis:
+ phy_disconnect(ax_local->phydev);
+err:
+ return ret;
+}
+
+static int ax88796c_remove(struct spi_device *spi)
+{
+ struct ax88796c_device *ax_local = dev_get_drvdata(&spi->dev);
+ struct net_device *ndev = ax_local->ndev;
+
+ phy_disconnect(ndev->phydev);
+
+ netif_info(ax_local, probe, ndev, "removing network device %s %s\n",
+ dev_driver_string(&spi->dev),
+ dev_name(&spi->dev));
+
+ return 0;
+}
+
+static const struct of_device_id ax88796c_dt_ids[] = {
+ { .compatible = "asix,ax88796c" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ax88796c_dt_ids);
+
+static const struct spi_device_id asix_id[] = {
+ { "ax88796c", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, asix_id);
+
+static struct spi_driver ax88796c_spi_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = of_match_ptr(ax88796c_dt_ids),
+ },
+ .probe = ax88796c_probe,
+ .remove = ax88796c_remove,
+ .id_table = asix_id,
+};
+
+static __init int ax88796c_spi_init(void)
+{
+ int ret;
+
+ bitmap_zero(ax88796c_no_regs_mask, AX88796C_REGDUMP_LEN);
+ ret = bitmap_parse(no_regs_list, 35,
+ ax88796c_no_regs_mask, AX88796C_REGDUMP_LEN);
+ if (ret) {
+ bitmap_fill(ax88796c_no_regs_mask, AX88796C_REGDUMP_LEN);
+ pr_err("Invalid bitmap description, masking all registers\n");
+ }
+
+ return spi_register_driver(&ax88796c_spi_driver);
+}
+
+static __exit void ax88796c_spi_exit(void)
+{
+ spi_unregister_driver(&ax88796c_spi_driver);
+}
+
+module_init(ax88796c_spi_init);
+module_exit(ax88796c_spi_exit);
+
+MODULE_AUTHOR("Łukasz Stelmach <l.stelmach@samsung.com>");
+MODULE_DESCRIPTION("ASIX AX88796C SPI Ethernet driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/asix/ax88796c_main.h b/drivers/net/ethernet/asix/ax88796c_main.h
new file mode 100644
index 000000000000..80263c3cef75
--- /dev/null
+++ b/drivers/net/ethernet/asix/ax88796c_main.h
@@ -0,0 +1,568 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2010 ASIX Electronics Corporation
+ * Copyright (c) 2020 Samsung Electronics
+ *
+ * ASIX AX88796C SPI Fast Ethernet Linux driver
+ */
+
+#ifndef _AX88796C_MAIN_H
+#define _AX88796C_MAIN_H
+
+#include <linux/netdevice.h>
+#include <linux/mii.h>
+
+#include "ax88796c_spi.h"
+
+/* These identify the driver base version and may not be removed. */
+#define DRV_NAME "ax88796c"
+#define ADP_NAME "ASIX AX88796C SPI Ethernet Adapter"
+
+#define TX_QUEUE_HIGH_WATER 45 /* Tx queue high water mark */
+#define TX_QUEUE_LOW_WATER 20 /* Tx queue low water mark */
+
+#define AX88796C_REGDUMP_LEN 256
+#define AX88796C_PHY_REGDUMP_LEN 14
+#define AX88796C_PHY_ID 0x10
+
+#define TX_OVERHEAD 8
+#define TX_EOP_SIZE 4
+
+#define AX_MCAST_FILTER_SIZE 8
+#define AX_MAX_MCAST 64
+#define AX_MAX_CLK 80000000
+#define TX_HDR_SOP_DICF 0x8000
+#define TX_HDR_SOP_CPHI 0x4000
+#define TX_HDR_SOP_INT 0x2000
+#define TX_HDR_SOP_MDEQ 0x1000
+#define TX_HDR_SOP_PKTLEN 0x07FF
+#define TX_HDR_SOP_SEQNUM 0xF800
+#define TX_HDR_SOP_PKTLENBAR 0x07FF
+
+#define TX_HDR_SEG_FS 0x8000
+#define TX_HDR_SEG_LS 0x4000
+#define TX_HDR_SEG_SEGNUM 0x3800
+#define TX_HDR_SEG_SEGLEN 0x0700
+#define TX_HDR_SEG_EOFST 0xC000
+#define TX_HDR_SEG_SOFST 0x3800
+#define TX_HDR_SEG_SEGLENBAR 0x07FF
+
+#define TX_HDR_EOP_SEQNUM 0xF800
+#define TX_HDR_EOP_PKTLEN 0x07FF
+#define TX_HDR_EOP_SEQNUMBAR 0xF800
+#define TX_HDR_EOP_PKTLENBAR 0x07FF
+
+/* Rx header fields mask */
+#define RX_HDR1_MCBC 0x8000
+#define RX_HDR1_STUFF_PKT 0x4000
+#define RX_HDR1_MII_ERR 0x2000
+#define RX_HDR1_CRC_ERR 0x1000
+#define RX_HDR1_PKT_LEN 0x07FF
+
+#define RX_HDR2_SEQ_NUM 0xF800
+#define RX_HDR2_PKT_LEN_BAR 0x7FFF
+
+#define RX_HDR3_PE 0x8000
+#define RX_HDR3_L3_TYPE_IPV4V6 0x6000
+#define RX_HDR3_L3_TYPE_IP 0x4000
+#define RX_HDR3_L3_TYPE_IPV6 0x2000
+#define RX_HDR3_L4_TYPE_ICMPV6 0x1400
+#define RX_HDR3_L4_TYPE_TCP 0x1000
+#define RX_HDR3_L4_TYPE_IGMP 0x0c00
+#define RX_HDR3_L4_TYPE_ICMP 0x0800
+#define RX_HDR3_L4_TYPE_UDP 0x0400
+#define RX_HDR3_L3_ERR 0x0200
+#define RX_HDR3_L4_ERR 0x0100
+#define RX_HDR3_PRIORITY(x) ((x) << 4)
+#define RX_HDR3_STRIP 0x0008
+#define RX_HDR3_VLAN_ID 0x0007
+
+struct ax88796c_pcpu_stats {
+ u64_stats_t rx_packets;
+ u64_stats_t rx_bytes;
+ u64_stats_t tx_packets;
+ u64_stats_t tx_bytes;
+ struct u64_stats_sync syncp;
+ u32 rx_dropped;
+ u32 tx_dropped;
+ u32 rx_frame_errors;
+ u32 rx_crc_errors;
+};
+
+struct ax88796c_device {
+ struct spi_device *spi;
+ struct net_device *ndev;
+ struct ax88796c_pcpu_stats __percpu *stats;
+
+ struct work_struct ax_work;
+
+ struct mutex spi_lock; /* device access */
+
+ struct sk_buff_head tx_wait_q;
+
+ struct axspi_data ax_spi;
+
+ struct mii_bus *mdiobus;
+ struct phy_device *phydev;
+
+ int msg_enable;
+
+ u16 seq_num;
+
+ u8 multi_filter[AX_MCAST_FILTER_SIZE];
+
+ int link;
+ int speed;
+ int duplex;
+ int pause;
+ int asym_pause;
+ int flowctrl;
+ #define AX_FC_NONE 0
+ #define AX_FC_RX BIT(0)
+ #define AX_FC_TX BIT(1)
+ #define AX_FC_ANEG BIT(2)
+
+ u32 priv_flags;
+ #define AX_CAP_COMP BIT(0)
+ #define AX_PRIV_FLAGS_MASK (AX_CAP_COMP)
+
+ unsigned long flags;
+ #define EVENT_INTR BIT(0)
+ #define EVENT_TX BIT(1)
+ #define EVENT_SET_MULTI BIT(2)
+
+};
+
+#define to_ax88796c_device(ndev) ((struct ax88796c_device *)netdev_priv(ndev))
+
+enum skb_state {
+ illegal = 0,
+ tx_done,
+ rx_done,
+ rx_err,
+};
+
+struct skb_data {
+ enum skb_state state;
+ size_t len;
+};
+
+/* A88796C register definition */
+ /* Definition of PAGE0 */
+#define P0_PSR (0x00)
+ #define PSR_DEV_READY BIT(7)
+ #define PSR_RESET (0 << 15)
+ #define PSR_RESET_CLR BIT(15)
+#define P0_BOR (0x02)
+#define P0_FER (0x04)
+ #define FER_IPALM BIT(0)
+ #define FER_DCRC BIT(1)
+ #define FER_RH3M BIT(2)
+ #define FER_HEADERSWAP BIT(7)
+ #define FER_WSWAP BIT(8)
+ #define FER_BSWAP BIT(9)
+ #define FER_INTHI BIT(10)
+ #define FER_INTLO (0 << 10)
+ #define FER_IRQ_PULL BIT(11)
+ #define FER_RXEN BIT(14)
+ #define FER_TXEN BIT(15)
+#define P0_ISR (0x06)
+ #define ISR_RXPKT BIT(0)
+ #define ISR_MDQ BIT(4)
+ #define ISR_TXT BIT(5)
+ #define ISR_TXPAGES BIT(6)
+ #define ISR_TXERR BIT(8)
+ #define ISR_LINK BIT(9)
+#define P0_IMR (0x08)
+ #define IMR_RXPKT BIT(0)
+ #define IMR_MDQ BIT(4)
+ #define IMR_TXT BIT(5)
+ #define IMR_TXPAGES BIT(6)
+ #define IMR_TXERR BIT(8)
+ #define IMR_LINK BIT(9)
+ #define IMR_MASKALL (0xFFFF)
+ #define IMR_DEFAULT (IMR_TXERR)
+#define P0_WFCR (0x0A)
+ #define WFCR_PMEIND BIT(0) /* PME indication */
+ #define WFCR_PMETYPE BIT(1) /* PME I/O type */
+ #define WFCR_PMEPOL BIT(2) /* PME polarity */
+ #define WFCR_PMERST BIT(3) /* Reset PME */
+ #define WFCR_SLEEP BIT(4) /* Enable sleep mode */
+ #define WFCR_WAKEUP BIT(5) /* Enable wakeup mode */
+ #define WFCR_WAITEVENT BIT(6) /* Reserved */
+ #define WFCR_CLRWAKE BIT(7) /* Clear wakeup */
+ #define WFCR_LINKCH BIT(8) /* Enable link change */
+ #define WFCR_MAGICP BIT(9) /* Enable magic packet */
+ #define WFCR_WAKEF BIT(10) /* Enable wakeup frame */
+ #define WFCR_PMEEN BIT(11) /* Enable PME pin */
+ #define WFCR_LINKCHS BIT(12) /* Link change status */
+ #define WFCR_MAGICPS BIT(13) /* Magic packet status */
+ #define WFCR_WAKEFS BIT(14) /* Wakeup frame status */
+ #define WFCR_PMES BIT(15) /* PME pin status */
+#define P0_PSCR (0x0C)
+ #define PSCR_PS_MASK (0xFFF0)
+ #define PSCR_PS_D0 (0)
+ #define PSCR_PS_D1 BIT(0)
+ #define PSCR_PS_D2 BIT(1)
+ #define PSCR_FPS BIT(3) /* Enable fiber mode PS */
+ #define PSCR_SWPS BIT(4) /* Enable software */
+ /* PS control */
+ #define PSCR_WOLPS BIT(5) /* Enable WOL PS */
+ #define PSCR_SWWOL BIT(6) /* Enable software select */
+ /* WOL PS */
+ #define PSCR_PHYOSC BIT(7) /* Internal PHY OSC control */
+ #define PSCR_FOFEF BIT(8) /* Force PHY generate FEF */
+ #define PSCR_FOF BIT(9) /* Force PHY in fiber mode */
+ #define PSCR_PHYPD BIT(10) /* PHY power down. */
+ /* Active high */
+ #define PSCR_PHYRST BIT(11) /* PHY reset signal. */
+ /* Active low */
+ #define PSCR_PHYCSIL BIT(12) /* PHY cable energy detect */
+ #define PSCR_PHYCOFF BIT(13) /* PHY cable off */
+ #define PSCR_PHYLINK BIT(14) /* PHY link status */
+ #define PSCR_EEPOK BIT(15) /* EEPROM load complete */
+#define P0_MACCR (0x0E)
+ #define MACCR_RXEN BIT(0) /* Enable RX */
+ #define MACCR_DUPLEX_FULL BIT(1) /* 1: Full, 0: Half */
+ #define MACCR_SPEED_100 BIT(2) /* 1: 100Mbps, 0: 10Mbps */
+ #define MACCR_RXFC_ENABLE BIT(3)
+ #define MACCR_RXFC_MASK 0xFFF7
+ #define MACCR_TXFC_ENABLE BIT(4)
+ #define MACCR_TXFC_MASK 0xFFEF
+ #define MACCR_PSI BIT(6) /* Software Cable-Off */
+ /* Power Saving Interrupt */
+ #define MACCR_PF BIT(7)
+ #define MACCR_PMM_BITS 8
+ #define MACCR_PMM_MASK (0x1F00)
+ #define MACCR_PMM_RESET BIT(8)
+ #define MACCR_PMM_WAIT (2 << 8)
+ #define MACCR_PMM_READY (3 << 8)
+ #define MACCR_PMM_D1 (4 << 8)
+ #define MACCR_PMM_D2 (5 << 8)
+ #define MACCR_PMM_WAKE (7 << 8)
+ #define MACCR_PMM_D1_WAKE (8 << 8)
+ #define MACCR_PMM_D2_WAKE (9 << 8)
+ #define MACCR_PMM_SLEEP (10 << 8)
+ #define MACCR_PMM_PHY_RESET (11 << 8)
+ #define MACCR_PMM_SOFT_D1 (16 << 8)
+ #define MACCR_PMM_SOFT_D2 (17 << 8)
+#define P0_TFBFCR (0x10)
+ #define TFBFCR_SCHE_FREE_PAGE 0xE07F
+ #define TFBFCR_FREE_PAGE_BITS 0x07
+ #define TFBFCR_FREE_PAGE_LATCH BIT(6)
+ #define TFBFCR_SET_FREE_PAGE(x) (((x) & 0x3F) << TFBFCR_FREE_PAGE_BITS)
+ #define TFBFCR_TX_PAGE_SET BIT(13)
+ #define TFBFCR_MANU_ENTX BIT(15)
+ #define TX_FREEBUF_MASK 0x003F
+ #define TX_DPTSTART 0x4000
+
+#define P0_TSNR (0x12)
+ #define TXNR_TXB_ERR BIT(5)
+ #define TXNR_TXB_IDLE BIT(6)
+ #define TSNR_PKT_CNT(x) (((x) & 0x3F) << 8)
+ #define TXNR_TXB_REINIT BIT(14)
+ #define TSNR_TXB_START BIT(15)
+#define P0_RTDPR (0x14)
+#define P0_RXBCR1 (0x16)
+ #define RXBCR1_RXB_DISCARD BIT(14)
+ #define RXBCR1_RXB_START BIT(15)
+#define P0_RXBCR2 (0x18)
+ #define RXBCR2_PKT_MASK (0xFF)
+ #define RXBCR2_RXPC_MASK (0x7F)
+ #define RXBCR2_RXB_READY BIT(13)
+ #define RXBCR2_RXB_IDLE BIT(14)
+ #define RXBCR2_RXB_REINIT BIT(15)
+#define P0_RTWCR (0x1A)
+ #define RTWCR_RXWC_MASK (0x3FFF)
+ #define RTWCR_RX_LATCH BIT(15)
+#define P0_RCPHR (0x1C)
+
+ /* Definition of PAGE1 */
+#define P1_RPPER (0x22)
+ #define RPPER_RXEN BIT(0)
+#define P1_MRCR (0x28)
+#define P1_MDR (0x2A)
+#define P1_RMPR (0x2C)
+#define P1_TMPR (0x2E)
+#define P1_RXBSPCR (0x30)
+ #define RXBSPCR_STUF_WORD_CNT(x) (((x) & 0x7000) >> 12)
+ #define RXBSPCR_STUF_ENABLE BIT(15)
+#define P1_MCR (0x32)
+ #define MCR_SBP BIT(8)
+ #define MCR_SM BIT(9)
+ #define MCR_CRCENLAN BIT(11)
+ #define MCR_STP BIT(12)
+ /* Definition of PAGE2 */
+#define P2_CIR (0x42)
+#define P2_PCR (0x44)
+ #define PCR_POLL_EN BIT(0)
+ #define PCR_POLL_FLOWCTRL BIT(1)
+ #define PCR_POLL_BMCR BIT(2)
+ #define PCR_PHYID(x) ((x) << 8)
+#define P2_PHYSR (0x46)
+#define P2_MDIODR (0x48)
+#define P2_MDIOCR (0x4A)
+ #define MDIOCR_RADDR(x) ((x) & 0x1F)
+ #define MDIOCR_FADDR(x) (((x) & 0x1F) << 8)
+ #define MDIOCR_VALID BIT(13)
+ #define MDIOCR_READ BIT(14)
+ #define MDIOCR_WRITE BIT(15)
+#define P2_LCR0 (0x4C)
+ #define LCR_LED0_EN BIT(0)
+ #define LCR_LED0_100MODE BIT(1)
+ #define LCR_LED0_DUPLEX BIT(2)
+ #define LCR_LED0_LINK BIT(3)
+ #define LCR_LED0_ACT BIT(4)
+ #define LCR_LED0_COL BIT(5)
+ #define LCR_LED0_10MODE BIT(6)
+ #define LCR_LED0_DUPCOL BIT(7)
+ #define LCR_LED1_EN BIT(8)
+ #define LCR_LED1_100MODE BIT(9)
+ #define LCR_LED1_DUPLEX BIT(10)
+ #define LCR_LED1_LINK BIT(11)
+ #define LCR_LED1_ACT BIT(12)
+ #define LCR_LED1_COL BIT(13)
+ #define LCR_LED1_10MODE BIT(14)
+ #define LCR_LED1_DUPCOL BIT(15)
+#define P2_LCR1 (0x4E)
+ #define LCR_LED2_MASK (0xFF00)
+ #define LCR_LED2_EN BIT(0)
+ #define LCR_LED2_100MODE BIT(1)
+ #define LCR_LED2_DUPLEX BIT(2)
+ #define LCR_LED2_LINK BIT(3)
+ #define LCR_LED2_ACT BIT(4)
+ #define LCR_LED2_COL BIT(5)
+ #define LCR_LED2_10MODE BIT(6)
+ #define LCR_LED2_DUPCOL BIT(7)
+#define P2_IPGCR (0x50)
+#define P2_CRIR (0x52)
+#define P2_FLHWCR (0x54)
+#define P2_RXCR (0x56)
+ #define RXCR_PRO BIT(0)
+ #define RXCR_AMALL BIT(1)
+ #define RXCR_SEP BIT(2)
+ #define RXCR_AB BIT(3)
+ #define RXCR_AM BIT(4)
+ #define RXCR_AP BIT(5)
+ #define RXCR_ARP BIT(6)
+#define P2_JLCR (0x58)
+#define P2_MPLR (0x5C)
+
+ /* Definition of PAGE3 */
+#define P3_MACASR0 (0x62)
+ #define P3_MACASR(x) (P3_MACASR0 + 2 * (x))
+ #define MACASR_LOWBYTE_MASK 0x00FF
+ #define MACASR_HIGH_BITS 0x08
+#define P3_MACASR1 (0x64)
+#define P3_MACASR2 (0x66)
+#define P3_MFAR01 (0x68)
+#define P3_MFAR_BASE (0x68)
+ #define P3_MFAR(x) (P3_MFAR_BASE + 2 * (x))
+
+#define P3_MFAR23 (0x6A)
+#define P3_MFAR45 (0x6C)
+#define P3_MFAR67 (0x6E)
+#define P3_VID0FR (0x70)
+#define P3_VID1FR (0x72)
+#define P3_EECSR (0x74)
+#define P3_EEDR (0x76)
+#define P3_EECR (0x78)
+ #define EECR_ADDR_MASK (0x00FF)
+ #define EECR_READ_ACT BIT(8)
+ #define EECR_WRITE_ACT BIT(9)
+ #define EECR_WRITE_DISABLE BIT(10)
+ #define EECR_WRITE_ENABLE BIT(11)
+ #define EECR_EE_READY BIT(13)
+ #define EECR_RELOAD BIT(14)
+ #define EECR_RESET BIT(15)
+#define P3_TPCR (0x7A)
+ #define TPCR_PATT_MASK (0xFF)
+ #define TPCR_RAND_PKT_EN BIT(14)
+ #define TPCR_FIXED_PKT_EN BIT(15)
+#define P3_TPLR (0x7C)
+ /* Definition of PAGE4 */
+#define P4_SPICR (0x8A)
+ #define SPICR_RCEN BIT(0)
+ #define SPICR_QCEN BIT(1)
+ #define SPICR_RBRE BIT(3)
+ #define SPICR_PMM BIT(4)
+ #define SPICR_LOOPBACK BIT(8)
+ #define SPICR_CORE_RES_CLR BIT(10)
+ #define SPICR_SPI_RES_CLR BIT(11)
+#define P4_SPIISMR (0x8C)
+
+#define P4_COERCR0 (0x92)
+ #define COERCR0_RXIPCE BIT(0)
+ #define COERCR0_RXIPVE BIT(1)
+ #define COERCR0_RXV6PE BIT(2)
+ #define COERCR0_RXTCPE BIT(3)
+ #define COERCR0_RXUDPE BIT(4)
+ #define COERCR0_RXICMP BIT(5)
+ #define COERCR0_RXIGMP BIT(6)
+ #define COERCR0_RXICV6 BIT(7)
+
+ #define COERCR0_RXTCPV6 BIT(8)
+ #define COERCR0_RXUDPV6 BIT(9)
+ #define COERCR0_RXICMV6 BIT(10)
+ #define COERCR0_RXIGMV6 BIT(11)
+ #define COERCR0_RXICV6V6 BIT(12)
+
+ #define COERCR0_DEFAULT (COERCR0_RXIPCE | COERCR0_RXV6PE | \
+ COERCR0_RXTCPE | COERCR0_RXUDPE | \
+ COERCR0_RXTCPV6 | COERCR0_RXUDPV6)
+#define P4_COERCR1 (0x94)
+ #define COERCR1_IPCEDP BIT(0)
+ #define COERCR1_IPVEDP BIT(1)
+ #define COERCR1_V6VEDP BIT(2)
+ #define COERCR1_TCPEDP BIT(3)
+ #define COERCR1_UDPEDP BIT(4)
+ #define COERCR1_ICMPDP BIT(5)
+ #define COERCR1_IGMPDP BIT(6)
+ #define COERCR1_ICV6DP BIT(7)
+ #define COERCR1_RX64TE BIT(8)
+ #define COERCR1_RXPPPE BIT(9)
+ #define COERCR1_TCP6DP BIT(10)
+ #define COERCR1_UDP6DP BIT(11)
+ #define COERCR1_IC6DP BIT(12)
+ #define COERCR1_IG6DP BIT(13)
+ #define COERCR1_ICV66DP BIT(14)
+ #define COERCR1_RPCE BIT(15)
+
+ #define COERCR1_DEFAULT (COERCR1_RXPPPE)
+
+#define P4_COETCR0 (0x96)
+ #define COETCR0_TXIP BIT(0)
+ #define COETCR0_TXTCP BIT(1)
+ #define COETCR0_TXUDP BIT(2)
+ #define COETCR0_TXICMP BIT(3)
+ #define COETCR0_TXIGMP BIT(4)
+ #define COETCR0_TXICV6 BIT(5)
+ #define COETCR0_TXTCPV6 BIT(8)
+ #define COETCR0_TXUDPV6 BIT(9)
+ #define COETCR0_TXICMV6 BIT(10)
+ #define COETCR0_TXIGMV6 BIT(11)
+ #define COETCR0_TXICV6V6 BIT(12)
+
+ #define COETCR0_DEFAULT (COETCR0_TXIP | COETCR0_TXTCP | \
+ COETCR0_TXUDP | COETCR0_TXTCPV6 | \
+ COETCR0_TXUDPV6)
+#define P4_COETCR1 (0x98)
+ #define COETCR1_TX64TE BIT(0)
+ #define COETCR1_TXPPPE BIT(1)
+
+#define P4_COECEDR (0x9A)
+#define P4_L2CECR (0x9C)
+
+ /* Definition of PAGE5 */
+#define P5_WFTR (0xA2)
+ #define WFTR_2MS (0x01)
+ #define WFTR_4MS (0x02)
+ #define WFTR_8MS (0x03)
+ #define WFTR_16MS (0x04)
+ #define WFTR_32MS (0x05)
+ #define WFTR_64MS (0x06)
+ #define WFTR_128MS (0x07)
+ #define WFTR_256MS (0x08)
+ #define WFTR_512MS (0x09)
+ #define WFTR_1024MS (0x0A)
+ #define WFTR_2048MS (0x0B)
+ #define WFTR_4096MS (0x0C)
+ #define WFTR_8192MS (0x0D)
+ #define WFTR_16384MS (0x0E)
+ #define WFTR_32768MS (0x0F)
+#define P5_WFCCR (0xA4)
+#define P5_WFCR03 (0xA6)
+ #define WFCR03_F0_EN BIT(0)
+ #define WFCR03_F1_EN BIT(4)
+ #define WFCR03_F2_EN BIT(8)
+ #define WFCR03_F3_EN BIT(12)
+#define P5_WFCR47 (0xA8)
+ #define WFCR47_F4_EN BIT(0)
+ #define WFCR47_F5_EN BIT(4)
+ #define WFCR47_F6_EN BIT(8)
+ #define WFCR47_F7_EN BIT(12)
+#define P5_WF0BMR0 (0xAA)
+#define P5_WF0BMR1 (0xAC)
+#define P5_WF0CR (0xAE)
+#define P5_WF0OBR (0xB0)
+#define P5_WF1BMR0 (0xB2)
+#define P5_WF1BMR1 (0xB4)
+#define P5_WF1CR (0xB6)
+#define P5_WF1OBR (0xB8)
+#define P5_WF2BMR0 (0xBA)
+#define P5_WF2BMR1 (0xBC)
+
+ /* Definition of PAGE6 */
+#define P6_WF2CR (0xC2)
+#define P6_WF2OBR (0xC4)
+#define P6_WF3BMR0 (0xC6)
+#define P6_WF3BMR1 (0xC8)
+#define P6_WF3CR (0xCA)
+#define P6_WF3OBR (0xCC)
+#define P6_WF4BMR0 (0xCE)
+#define P6_WF4BMR1 (0xD0)
+#define P6_WF4CR (0xD2)
+#define P6_WF4OBR (0xD4)
+#define P6_WF5BMR0 (0xD6)
+#define P6_WF5BMR1 (0xD8)
+#define P6_WF5CR (0xDA)
+#define P6_WF5OBR (0xDC)
+
+/* Definition of PAGE7 */
+#define P7_WF6BMR0 (0xE2)
+#define P7_WF6BMR1 (0xE4)
+#define P7_WF6CR (0xE6)
+#define P7_WF6OBR (0xE8)
+#define P7_WF7BMR0 (0xEA)
+#define P7_WF7BMR1 (0xEC)
+#define P7_WF7CR (0xEE)
+#define P7_WF7OBR (0xF0)
+#define P7_WFR01 (0xF2)
+#define P7_WFR23 (0xF4)
+#define P7_WFR45 (0xF6)
+#define P7_WFR67 (0xF8)
+#define P7_WFPC0 (0xFA)
+#define P7_WFPC1 (0xFC)
+
+/* Tx headers structure */
+struct tx_sop_header {
+ /* bit 15-11: flags, bit 10-0: packet length */
+ u16 flags_len;
+ /* bit 15-11: sequence number, bit 11-0: packet length bar */
+ u16 seq_lenbar;
+};
+
+struct tx_segment_header {
+ /* bit 15-14: flags, bit 13-11: segment number */
+ /* bit 10-0: segment length */
+ u16 flags_seqnum_seglen;
+ /* bit 15-14: end offset, bit 13-11: start offset */
+ /* bit 10-0: segment length bar */
+ u16 eo_so_seglenbar;
+};
+
+struct tx_eop_header {
+ /* bit 15-11: sequence number, bit 10-0: packet length */
+ u16 seq_len;
+ /* bit 15-11: sequence number bar, bit 10-0: packet length bar */
+ u16 seqbar_lenbar;
+};
+
+struct tx_pkt_info {
+ struct tx_sop_header sop;
+ struct tx_segment_header seg;
+ struct tx_eop_header eop;
+ u16 pkt_len;
+ u16 seq_num;
+};
+
+/* Rx headers structure */
+struct rx_header {
+ u16 flags_len;
+ u16 seq_lenbar;
+ u16 flags;
+};
+
+extern unsigned long ax88796c_no_regs_mask[];
+
+#endif /* #ifndef _AX88796C_MAIN_H */
diff --git a/drivers/net/ethernet/asix/ax88796c_spi.c b/drivers/net/ethernet/asix/ax88796c_spi.c
new file mode 100644
index 000000000000..94df4f96d2be
--- /dev/null
+++ b/drivers/net/ethernet/asix/ax88796c_spi.c
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2010 ASIX Electronics Corporation
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd.
+ *
+ * ASIX AX88796C SPI Fast Ethernet Linux driver
+ */
+
+#define pr_fmt(fmt) "ax88796c: " fmt
+
+#include <linux/string.h>
+#include <linux/spi/spi.h>
+
+#include "ax88796c_spi.h"
+
+const u8 ax88796c_rx_cmd_buf[5] = {AX_SPICMD_READ_RXQ, 0xFF, 0xFF, 0xFF, 0xFF};
+const u8 ax88796c_tx_cmd_buf[4] = {AX_SPICMD_WRITE_TXQ, 0xFF, 0xFF, 0xFF};
+
+/* driver bus management functions */
+int axspi_wakeup(struct axspi_data *ax_spi)
+{
+ int ret;
+
+ ax_spi->cmd_buf[0] = AX_SPICMD_EXIT_PWD; /* OP */
+ ret = spi_write(ax_spi->spi, ax_spi->cmd_buf, 1);
+ if (ret)
+ dev_err(&ax_spi->spi->dev, "%s() failed: ret = %d\n", __func__, ret);
+ return ret;
+}
+
+int axspi_read_status(struct axspi_data *ax_spi, struct spi_status *status)
+{
+ int ret;
+
+ /* OP */
+ ax_spi->cmd_buf[0] = AX_SPICMD_READ_STATUS;
+ ret = spi_write_then_read(ax_spi->spi, ax_spi->cmd_buf, 1, (u8 *)&status, 3);
+ if (ret)
+ dev_err(&ax_spi->spi->dev, "%s() failed: ret = %d\n", __func__, ret);
+ else
+ le16_to_cpus(&status->isr);
+
+ return ret;
+}
+
+int axspi_read_rxq(struct axspi_data *ax_spi, void *data, int len)
+{
+ struct spi_transfer *xfer = ax_spi->spi_rx_xfer;
+ int ret;
+
+ memcpy(ax_spi->cmd_buf, ax88796c_rx_cmd_buf, 5);
+
+ xfer->tx_buf = ax_spi->cmd_buf;
+ xfer->rx_buf = NULL;
+ xfer->len = ax_spi->comp ? 2 : 5;
+ xfer->bits_per_word = 8;
+ spi_message_add_tail(xfer, &ax_spi->rx_msg);
+
+ xfer++;
+ xfer->rx_buf = data;
+ xfer->tx_buf = NULL;
+ xfer->len = len;
+ xfer->bits_per_word = 8;
+ spi_message_add_tail(xfer, &ax_spi->rx_msg);
+ ret = spi_sync(ax_spi->spi, &ax_spi->rx_msg);
+ if (ret)
+ dev_err(&ax_spi->spi->dev, "%s() failed: ret = %d\n", __func__, ret);
+
+ return ret;
+}
+
+int axspi_write_txq(const struct axspi_data *ax_spi, void *data, int len)
+{
+ return spi_write(ax_spi->spi, data, len);
+}
+
+u16 axspi_read_reg(struct axspi_data *ax_spi, u8 reg)
+{
+ int ret;
+ int len = ax_spi->comp ? 3 : 4;
+
+ ax_spi->cmd_buf[0] = 0x03; /* OP code read register */
+ ax_spi->cmd_buf[1] = reg; /* register address */
+ ax_spi->cmd_buf[2] = 0xFF; /* dumy cycle */
+ ax_spi->cmd_buf[3] = 0xFF; /* dumy cycle */
+ ret = spi_write_then_read(ax_spi->spi,
+ ax_spi->cmd_buf, len,
+ ax_spi->rx_buf, 2);
+ if (ret) {
+ dev_err(&ax_spi->spi->dev,
+ "%s() failed: ret = %d\n", __func__, ret);
+ return 0xFFFF;
+ }
+
+ le16_to_cpus((u16 *)ax_spi->rx_buf);
+
+ return *(u16 *)ax_spi->rx_buf;
+}
+
+int axspi_write_reg(struct axspi_data *ax_spi, u8 reg, u16 value)
+{
+ int ret;
+
+ memset(ax_spi->cmd_buf, 0, sizeof(ax_spi->cmd_buf));
+ ax_spi->cmd_buf[0] = AX_SPICMD_WRITE_REG; /* OP code read register */
+ ax_spi->cmd_buf[1] = reg; /* register address */
+ ax_spi->cmd_buf[2] = value;
+ ax_spi->cmd_buf[3] = value >> 8;
+
+ ret = spi_write(ax_spi->spi, ax_spi->cmd_buf, 4);
+ if (ret)
+ dev_err(&ax_spi->spi->dev, "%s() failed: ret = %d\n", __func__, ret);
+ return ret;
+}
+
diff --git a/drivers/net/ethernet/asix/ax88796c_spi.h b/drivers/net/ethernet/asix/ax88796c_spi.h
new file mode 100644
index 000000000000..5bcf91f603fb
--- /dev/null
+++ b/drivers/net/ethernet/asix/ax88796c_spi.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2010 ASIX Electronics Corporation
+ * Copyright (c) 2020 Samsung Electronics Co., Ltd.
+ *
+ * ASIX AX88796C SPI Fast Ethernet Linux driver
+ */
+
+#ifndef _AX88796C_SPI_H
+#define _AX88796C_SPI_H
+
+#include <linux/spi/spi.h>
+#include <linux/types.h>
+
+/* Definition of SPI command */
+#define AX_SPICMD_WRITE_TXQ 0x02
+#define AX_SPICMD_READ_REG 0x03
+#define AX_SPICMD_READ_STATUS 0x05
+#define AX_SPICMD_READ_RXQ 0x0B
+#define AX_SPICMD_BIDIR_WRQ 0xB2
+#define AX_SPICMD_WRITE_REG 0xD8
+#define AX_SPICMD_EXIT_PWD 0xAB
+
+extern const u8 ax88796c_rx_cmd_buf[];
+extern const u8 ax88796c_tx_cmd_buf[];
+
+struct axspi_data {
+ struct spi_device *spi;
+ struct spi_message rx_msg;
+ struct spi_transfer spi_rx_xfer[2];
+ u8 cmd_buf[6];
+ u8 rx_buf[6];
+ u8 comp;
+};
+
+struct spi_status {
+ u16 isr;
+ u8 status;
+# define AX_STATUS_READY 0x80
+};
+
+int axspi_read_rxq(struct axspi_data *ax_spi, void *data, int len);
+int axspi_write_txq(const struct axspi_data *ax_spi, void *data, int len);
+u16 axspi_read_reg(struct axspi_data *ax_spi, u8 reg);
+int axspi_write_reg(struct axspi_data *ax_spi, u8 reg, u16 value);
+int axspi_read_status(struct axspi_data *ax_spi, struct spi_status *status);
+int axspi_wakeup(struct axspi_data *ax_spi);
+
+static inline u16 AX_READ(struct axspi_data *ax_spi, u8 offset)
+{
+ return axspi_read_reg(ax_spi, offset);
+}
+
+static inline int AX_WRITE(struct axspi_data *ax_spi, u16 value, u8 offset)
+{
+ return axspi_write_reg(ax_spi, offset, value);
+}
+
+static inline int AX_READ_STATUS(struct axspi_data *ax_spi,
+ struct spi_status *status)
+{
+ return axspi_read_status(ax_spi, status);
+}
+
+static inline int AX_WAKEUP(struct axspi_data *ax_spi)
+{
+ return axspi_wakeup(ax_spi);
+}
+#endif
diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c
index 02ae98aabf91..88d2ab748399 100644
--- a/drivers/net/ethernet/atheros/ag71xx.c
+++ b/drivers/net/ethernet/atheros/ag71xx.c
@@ -1082,14 +1082,12 @@ static void ag71xx_mac_validate(struct phylink_config *config,
phylink_set(mask, 1000baseX_Full);
}
- bitmap_and(supported, supported, mask,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
- bitmap_and(state->advertising, state->advertising, mask,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_and(supported, supported, mask);
+ linkmode_and(state->advertising, state->advertising, mask);
return;
unsupported:
- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_zero(supported);
}
static void ag71xx_mac_pcs_get_state(struct phylink_config *config,
@@ -1968,10 +1966,10 @@ static int ag71xx_probe(struct platform_device *pdev)
ag->stop_desc->ctrl = 0;
ag->stop_desc->next = (u32)ag->stop_desc_dma;
- err = of_get_mac_address(np, ndev->dev_addr);
+ err = of_get_ethdev_address(np, ndev);
if (err) {
netif_err(ag, probe, ndev, "invalid MAC address, using random address\n");
- eth_random_addr(ndev->dev_addr);
+ eth_hw_addr_random(ndev);
}
err = of_get_phy_mode(np, &ag->phy_if_mode);
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index 4ea157efca86..4ad3fc72e74e 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -607,7 +607,7 @@ static int alx_set_mac_address(struct net_device *netdev, void *data)
if (netdev->addr_assign_type & NET_ADDR_RANDOM)
netdev->addr_assign_type ^= NET_ADDR_RANDOM;
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
alx_set_macaddr(hw, hw->mac_addr);
@@ -1832,7 +1832,7 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
memcpy(hw->mac_addr, hw->perm_addr, ETH_ALEN);
- memcpy(netdev->dev_addr, hw->mac_addr, ETH_ALEN);
+ eth_hw_addr_set(netdev, hw->mac_addr);
memcpy(netdev->perm_addr, hw->perm_addr, ETH_ALEN);
hw->mdio.prtad = 0;
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 3b51b172b317..da595242bc13 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -482,7 +482,7 @@ static int atl1c_set_mac_addr(struct net_device *netdev, void *p)
if (netif_running(netdev))
return -EBUSY;
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
atl1c_hw_set_mac_addr(&adapter->hw, adapter->hw.mac_addr);
@@ -1847,7 +1847,7 @@ static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, u32 queue,
buffer_info->skb = NULL;
buffer_info->length = 0;
ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE);
- netif_warn(adapter, rx_err, adapter->netdev, "RX pci_map_single failed");
+ netif_warn(adapter, rx_err, adapter->netdev, "RX dma_map_single failed");
break;
}
buffer_info->dma = mapping;
@@ -2662,10 +2662,8 @@ static int atl1c_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* enable device (incl. PCI PM wakeup and hotplug setup) */
err = pci_enable_device_mem(pdev);
- if (err) {
- dev_err(&pdev->dev, "cannot enable PCI device\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(&pdev->dev, err, "cannot enable PCI device\n");
/*
* The atl1c chip can DMA to 64-bit addresses, but it uses a single
@@ -2769,7 +2767,7 @@ static int atl1c_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* got a random MAC address, set NET_ADDR_RANDOM to netdev */
netdev->addr_assign_type = NET_ADDR_RANDOM;
}
- memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
+ eth_hw_addr_set(netdev, adapter->hw.mac_addr);
if (netif_msg_probe(adapter))
dev_dbg(&pdev->dev, "mac address : %pM\n",
adapter->hw.mac_addr);
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 753973ac922e..56e5f440e666 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -374,7 +374,7 @@ static int atl1e_set_mac_addr(struct net_device *netdev, void *p)
if (netif_running(netdev))
return -EBUSY;
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
atl1e_hw_set_mac_addr(&adapter->hw);
@@ -2297,10 +2297,8 @@ static int atl1e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
int err = 0;
err = pci_enable_device(pdev);
- if (err) {
- dev_err(&pdev->dev, "cannot enable PCI device\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(&pdev->dev, err, "cannot enable PCI device\n");
/*
* The atl1e chip can DMA to 64-bit addresses, but it uses a single
@@ -2392,7 +2390,7 @@ static int atl1e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_eeprom;
}
- memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
+ eth_hw_addr_set(netdev, adapter->hw.mac_addr);
netdev_dbg(netdev, "mac address : %pM\n", adapter->hw.mac_addr);
INIT_WORK(&adapter->reset_task, atl1e_reset_task);
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index 68f6c0bbd945..b4c9e805e981 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -3027,7 +3027,7 @@ static int atl1_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* mark random mac */
netdev->addr_assign_type = NET_ADDR_RANDOM;
}
- memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
+ eth_hw_addr_set(netdev, adapter->hw.mac_addr);
if (!is_valid_ether_addr(netdev->dev_addr)) {
err = -EIO;
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index b69298ddb647..bbc4d7b08a49 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -931,7 +931,7 @@ static int atl2_set_mac(struct net_device *netdev, void *p)
if (netif_running(netdev))
return -EBUSY;
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
atl2_set_mac_addr(&adapter->hw);
@@ -1405,7 +1405,7 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* copy the MAC address out of the EEPROM */
atl2_read_mac_addr(&adapter->hw);
- memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
+ eth_hw_addr_set(netdev, adapter->hw.mac_addr);
if (!is_valid_ether_addr(netdev->dev_addr)) {
err = -EIO;
goto err_eeprom;
diff --git a/drivers/net/ethernet/atheros/atlx/atlx.c b/drivers/net/ethernet/atheros/atlx/atlx.c
index 0941d07d0833..e8cfbf4ff1b5 100644
--- a/drivers/net/ethernet/atheros/atlx/atlx.c
+++ b/drivers/net/ethernet/atheros/atlx/atlx.c
@@ -69,7 +69,7 @@ static int atlx_set_mac(struct net_device *netdev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
atlx_set_mac_addr(&adapter->hw);
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index fa784953c601..969591bbc066 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -218,7 +218,8 @@ static inline void __b44_cam_read(struct b44 *bp, unsigned char *data, int index
data[1] = (val >> 0) & 0xFF;
}
-static inline void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
+static inline void __b44_cam_write(struct b44 *bp,
+ const unsigned char *data, int index)
{
u32 val;
@@ -1200,7 +1201,7 @@ static int b44_alloc_consistent(struct b44 *bp, gfp_t gfp)
bp->rx_ring = dma_alloc_coherent(bp->sdev->dma_dev, size,
&bp->rx_ring_dma, gfp);
if (!bp->rx_ring) {
- /* Allocation may have failed due to pci_alloc_consistent
+ /* Allocation may have failed due to dma_alloc_coherent
insisting on use of GFP_DMA, which is more restrictive
than necessary... */
struct dma_desc *rx_ring;
@@ -1383,7 +1384,7 @@ static int b44_set_mac_addr(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EINVAL;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
spin_lock_irq(&bp->lock);
@@ -1507,7 +1508,8 @@ static void bwfilter_table(struct b44 *bp, u8 *pp, u32 bytes, u32 table_offset)
}
}
-static int b44_magic_pattern(u8 *macaddr, u8 *ppattern, u8 *pmask, int offset)
+static int b44_magic_pattern(const u8 *macaddr, u8 *ppattern, u8 *pmask,
+ int offset)
{
int magicsync = 6;
int k, j, len = offset;
@@ -2171,7 +2173,7 @@ static int b44_get_invariants(struct b44 *bp)
* valid PHY address. */
bp->phy_addr &= 0x1F;
- memcpy(bp->dev->dev_addr, addr, ETH_ALEN);
+ eth_hw_addr_set(bp->dev, addr);
if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
pr_err("Invalid MAC address found in EEPROM\n");
diff --git a/drivers/net/ethernet/broadcom/bcm4908_enet.c b/drivers/net/ethernet/broadcom/bcm4908_enet.c
index 02a569500234..7cc5213c575a 100644
--- a/drivers/net/ethernet/broadcom/bcm4908_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm4908_enet.c
@@ -170,7 +170,7 @@ static int bcm4908_dma_alloc_buf_descs(struct bcm4908_enet *enet,
goto err_free_buf_descs;
}
- ring->slots = kzalloc(ring->length * sizeof(*ring->slots), GFP_KERNEL);
+ ring->slots = kcalloc(ring->length, sizeof(*ring->slots), GFP_KERNEL);
if (!ring->slots)
goto err_free_buf_descs;
@@ -715,7 +715,7 @@ static int bcm4908_enet_probe(struct platform_device *pdev)
return err;
SET_NETDEV_DEV(netdev, &pdev->dev);
- err = of_get_mac_address(dev->of_node, netdev->dev_addr);
+ err = of_get_ethdev_address(dev->of_node, netdev);
if (err)
eth_hw_addr_random(netdev);
netdev->netdev_ops = &bcm4908_enet_netdev_ops;
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index d56886300ecf..a568994a03a6 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -670,7 +670,7 @@ static int bcm_enet_set_mac_address(struct net_device *dev, void *p)
u32 val;
priv = netdev_priv(dev);
- memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(dev, addr->sa_data);
/* use perfect match register 0 to store my mac address */
val = (dev->dev_addr[2] << 24) | (dev->dev_addr[3] << 16) |
@@ -1762,7 +1762,7 @@ static int bcm_enet_probe(struct platform_device *pdev)
pd = dev_get_platdata(&pdev->dev);
if (pd) {
- memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
+ eth_hw_addr_set(dev, pd->mac_addr);
priv->has_phy = pd->has_phy;
priv->phy_id = pd->phy_id;
priv->has_phy_interrupt = pd->has_phy_interrupt;
@@ -2665,7 +2665,7 @@ static int bcm_enetsw_probe(struct platform_device *pdev)
pd = dev_get_platdata(&pdev->dev);
if (pd) {
- memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
+ eth_hw_addr_set(dev, pd->mac_addr);
memcpy(priv->used_ports, pd->used_ports,
sizeof(pd->used_ports));
priv->num_ports = pd->num_ports;
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 7fa1b695400d..40933bf5a710 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -1818,7 +1818,7 @@ static inline void umac_reset(struct bcm_sysport_priv *priv)
}
static void umac_set_hw_addr(struct bcm_sysport_priv *priv,
- unsigned char *addr)
+ const unsigned char *addr)
{
u32 mac0 = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) |
addr[3];
@@ -1850,7 +1850,7 @@ static int bcm_sysport_change_mac(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EINVAL;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
/* interface is disabled, changes to MAC will be reflected on next
* open call
@@ -2555,7 +2555,7 @@ static int bcm_sysport_probe(struct platform_device *pdev)
}
/* Initialize netdevice members */
- ret = of_get_mac_address(dn, dev->dev_addr);
+ ret = of_get_ethdev_address(dn, dev);
if (ret) {
dev_warn(&pdev->dev, "using random Ethernet MAC\n");
eth_hw_addr_random(dev);
diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c b/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c
index 6ce80cbcb48e..086739e4f40a 100644
--- a/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c
+++ b/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c
@@ -10,6 +10,7 @@
#include <linux/bcma/bcma.h>
#include <linux/brcmphy.h>
+#include <linux/of_mdio.h>
#include "bgmac.h"
static bool bcma_mdio_wait_value(struct bcma_device *core, u16 reg, u32 mask,
@@ -211,6 +212,7 @@ struct mii_bus *bcma_mdio_mii_register(struct bgmac *bgmac)
{
struct bcma_device *core = bgmac->bcma.core;
struct mii_bus *mii_bus;
+ struct device_node *np;
int err;
mii_bus = mdiobus_alloc();
@@ -229,7 +231,9 @@ struct mii_bus *bcma_mdio_mii_register(struct bgmac *bgmac)
mii_bus->parent = &core->dev;
mii_bus->phy_mask = ~(1 << bgmac->phyaddr);
- err = mdiobus_register(mii_bus);
+ np = of_get_child_by_name(core->dev.of_node, "mdio");
+
+ err = of_mdiobus_register(mii_bus, np);
if (err) {
dev_err(&core->dev, "Registration of mii bus failed\n");
goto err_free_bus;
diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c
index 9513cfb5ba58..e6f48786949c 100644
--- a/drivers/net/ethernet/broadcom/bgmac-bcma.c
+++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c
@@ -11,6 +11,7 @@
#include <linux/bcma/bcma.h>
#include <linux/brcmphy.h>
#include <linux/etherdevice.h>
+#include <linux/of_mdio.h>
#include <linux/of_net.h>
#include "bgmac.h"
@@ -86,17 +87,28 @@ static int bcma_phy_connect(struct bgmac *bgmac)
struct phy_device *phy_dev;
char bus_id[MII_BUS_ID_SIZE + 3];
+ /* DT info should be the most accurate */
+ phy_dev = of_phy_get_and_connect(bgmac->net_dev, bgmac->dev->of_node,
+ bgmac_adjust_link);
+ if (phy_dev)
+ return 0;
+
/* Connect to the PHY */
- snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, bgmac->mii_bus->id,
- bgmac->phyaddr);
- phy_dev = phy_connect(bgmac->net_dev, bus_id, bgmac_adjust_link,
- PHY_INTERFACE_MODE_MII);
- if (IS_ERR(phy_dev)) {
- dev_err(bgmac->dev, "PHY connection failed\n");
- return PTR_ERR(phy_dev);
+ if (bgmac->mii_bus && bgmac->phyaddr != BGMAC_PHY_NOREGS) {
+ snprintf(bus_id, sizeof(bus_id), PHY_ID_FMT, bgmac->mii_bus->id,
+ bgmac->phyaddr);
+ phy_dev = phy_connect(bgmac->net_dev, bus_id, bgmac_adjust_link,
+ PHY_INTERFACE_MODE_MII);
+ if (IS_ERR(phy_dev)) {
+ dev_err(bgmac->dev, "PHY connection failed\n");
+ return PTR_ERR(phy_dev);
+ }
+
+ return 0;
}
- return 0;
+ /* Assume a fixed link to the switch port */
+ return bgmac_phy_connect_direct(bgmac);
}
static const struct bcma_device_id bgmac_bcma_tbl[] = {
@@ -128,7 +140,7 @@ static int bgmac_probe(struct bcma_device *core)
bcma_set_drvdata(core, bgmac);
- err = of_get_mac_address(bgmac->dev->of_node, bgmac->net_dev->dev_addr);
+ err = of_get_ethdev_address(bgmac->dev->of_node, bgmac->net_dev);
if (err == -EPROBE_DEFER)
return err;
@@ -150,7 +162,7 @@ static int bgmac_probe(struct bcma_device *core)
err = -ENOTSUPP;
goto err;
}
- ether_addr_copy(bgmac->net_dev->dev_addr, mac);
+ eth_hw_addr_set(bgmac->net_dev, mac);
}
/* On BCM4706 we need common core to access PHY */
@@ -297,10 +309,7 @@ static int bgmac_probe(struct bcma_device *core)
bgmac->cco_ctl_maskset = bcma_bgmac_cco_ctl_maskset;
bgmac->get_bus_clock = bcma_bgmac_get_bus_clock;
bgmac->cmn_maskset32 = bcma_bgmac_cmn_maskset32;
- if (bgmac->mii_bus)
- bgmac->phy_connect = bcma_phy_connect;
- else
- bgmac->phy_connect = bgmac_phy_connect_direct;
+ bgmac->phy_connect = bcma_phy_connect;
err = bgmac_enet_probe(bgmac);
if (err)
diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c
index df8ff839cc62..c6412c523637 100644
--- a/drivers/net/ethernet/broadcom/bgmac-platform.c
+++ b/drivers/net/ethernet/broadcom/bgmac-platform.c
@@ -191,7 +191,7 @@ static int bgmac_probe(struct platform_device *pdev)
bgmac->dev = &pdev->dev;
bgmac->dma_dev = &pdev->dev;
- ret = of_get_mac_address(np, bgmac->net_dev->dev_addr);
+ ret = of_get_ethdev_address(np, bgmac->net_dev);
if (ret == -EPROBE_DEFER)
return ret;
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index fe4d99abd548..7b525c65bacb 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -768,7 +768,7 @@ static void bgmac_umac_cmd_maskset(struct bgmac *bgmac, u32 mask, u32 set,
udelay(2);
}
-static void bgmac_write_mac_address(struct bgmac *bgmac, u8 *addr)
+static void bgmac_write_mac_address(struct bgmac *bgmac, const u8 *addr)
{
u32 tmp;
@@ -1241,7 +1241,7 @@ static int bgmac_set_mac_address(struct net_device *net_dev, void *addr)
if (ret < 0)
return ret;
- ether_addr_copy(net_dev->dev_addr, sa->sa_data);
+ eth_hw_addr_set(net_dev, sa->sa_data);
bgmac_write_mac_address(bgmac, net_dev->dev_addr);
eth_commit_mac_addr_change(net_dev, addr);
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 8c83973adca5..babc955ba64e 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -2704,7 +2704,7 @@ bnx2_alloc_bad_rbuf(struct bnx2 *bp)
}
static void
-bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
+bnx2_set_mac_addr(struct bnx2 *bp, const u8 *mac_addr, u32 pos)
{
u32 val;
@@ -7910,7 +7910,7 @@ bnx2_change_mac_addr(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
if (netif_running(dev))
bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
@@ -8574,7 +8574,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (is_kdump_kernel())
bnx2_wait_dma_complete(bp);
- memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN);
+ eth_hw_addr_set(dev, bp->mac_addr);
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
NETIF_F_TSO | NETIF_F_TSO_ECN |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index e789430f407c..2b06d78baa08 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1994,7 +1994,7 @@ int bnx2x_idle_chk(struct bnx2x *bp);
* operation has been successfully scheduled and a negative - if a requested
* operations has failed.
*/
-int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac,
+int bnx2x_set_mac_one(struct bnx2x *bp, const u8 *mac,
struct bnx2x_vlan_mac_obj *obj, bool set,
int mac_type, unsigned long *ramrod_flags);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index b5d954cb409a..e8e8c2d593c5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -4336,7 +4336,7 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p)
return rc;
}
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
if (netif_running(dev))
rc = bnx2x_set_eth_mac(bp, true);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index ae87296ae1ff..aec666e97683 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -8417,7 +8417,7 @@ alloc_mem_err:
* Init service functions
*/
-int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac,
+int bnx2x_set_mac_one(struct bnx2x *bp, const u8 *mac,
struct bnx2x_vlan_mac_obj *obj, bool set,
int mac_type, unsigned long *ramrod_flags)
{
@@ -9146,7 +9146,7 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
else if (bp->wol) {
u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
- u8 *mac_addr = bp->dev->dev_addr;
+ const u8 *mac_addr = bp->dev->dev_addr;
struct pci_dev *pdev = bp->pdev;
u32 val;
u16 pmc;
@@ -11790,7 +11790,7 @@ static void bnx2x_get_cnic_mac_hwinfo(struct bnx2x *bp)
* as the SAN mac was copied from the primary MAC.
*/
if (IS_MF_FCOE_AFEX(bp))
- memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN);
+ eth_hw_addr_set(bp->dev, fip_mac);
} else {
val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
iscsi_mac_upper);
@@ -11823,9 +11823,10 @@ static void bnx2x_get_mac_hwinfo(struct bnx2x *bp)
u32 val, val2;
int func = BP_ABS_FUNC(bp);
int port = BP_PORT(bp);
+ u8 addr[ETH_ALEN] = {};
/* Zero primary MAC configuration */
- eth_zero_addr(bp->dev->dev_addr);
+ eth_hw_addr_set(bp->dev, addr);
if (BP_NOMCP(bp)) {
BNX2X_ERROR("warning: random MAC workaround active\n");
@@ -11834,8 +11835,10 @@ static void bnx2x_get_mac_hwinfo(struct bnx2x *bp)
val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
- (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
- bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
+ (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
+ bnx2x_set_mac_buf(addr, val, val2);
+ eth_hw_addr_set(bp->dev, addr);
+ }
if (CNIC_SUPPORT(bp))
bnx2x_get_cnic_mac_hwinfo(bp);
@@ -11843,7 +11846,8 @@ static void bnx2x_get_mac_hwinfo(struct bnx2x *bp)
/* in SF read MACs from port configuration */
val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
- bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
+ bnx2x_set_mac_buf(addr, val, val2);
+ eth_hw_addr_set(bp->dev, addr);
if (CNIC_SUPPORT(bp))
bnx2x_get_cnic_mac_hwinfo(bp);
@@ -12291,7 +12295,9 @@ static int bnx2x_init_bp(struct bnx2x *bp)
if (rc)
return rc;
} else {
- eth_zero_addr(bp->dev->dev_addr);
+ static const u8 zero_addr[ETH_ALEN] = {};
+
+ eth_hw_addr_set(bp->dev, zero_addr);
}
bnx2x_set_modes_bitmap(bp);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 6fbf735fca31..74a8931ce1d1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -3058,7 +3058,7 @@ enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp)
if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID &&
!ether_addr_equal(bulletin->mac, bp->old_bulletin.mac)) {
/* update new mac to net device */
- memcpy(bp->dev->dev_addr, bulletin->mac, ETH_ALEN);
+ eth_hw_addr_set(bp->dev, bulletin->mac);
}
if (bulletin->valid_bitmap & (1 << LINK_VALID)) {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index 966d5722c5e2..8c2cf5519787 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -508,7 +508,8 @@ int bnx2x_vfpf_init(struct bnx2x *bp);
void bnx2x_vfpf_close_vf(struct bnx2x *bp);
int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp,
bool is_leading);
-int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set);
+int bnx2x_vfpf_config_mac(struct bnx2x *bp, const u8 *addr, u8 vf_qid,
+ bool set);
int bnx2x_vfpf_config_rss(struct bnx2x *bp,
struct bnx2x_config_rss_params *params);
int bnx2x_vfpf_set_mcast(struct net_device *dev);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index ea0e9394f898..c9129b9ba446 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -384,9 +384,8 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
sizeof(bp->fw_ver));
if (is_valid_ether_addr(bp->acquire_resp.resc.current_mac_addr))
- memcpy(bp->dev->dev_addr,
- bp->acquire_resp.resc.current_mac_addr,
- ETH_ALEN);
+ eth_hw_addr_set(bp->dev,
+ bp->acquire_resp.resc.current_mac_addr);
out:
bnx2x_vfpf_finalize(bp, &req->first_tlv);
@@ -722,7 +721,7 @@ out:
}
/* request pf to add a mac for the vf */
-int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set)
+int bnx2x_vfpf_config_mac(struct bnx2x *bp, const u8 *addr, u8 vf_qid, bool set)
{
struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
@@ -767,7 +766,7 @@ int bnx2x_vfpf_config_mac(struct bnx2x *bp, u8 *addr, u8 vf_qid, bool set)
"vfpf SET MAC failed. Check bulletin board for new posts\n");
/* copy mac from bulletin to device */
- memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN);
+ eth_hw_addr_set(bp->dev, bulletin.mac);
/* check if bulletin board was updated */
if (bnx2x_sample_bulletin(bp) == PFVF_BULLETIN_UPDATED) {
diff --git a/drivers/net/ethernet/broadcom/bnxt/Makefile b/drivers/net/ethernet/broadcom/bnxt/Makefile
index c6ef7ec2c115..2bc2b707d6ee 100644
--- a/drivers/net/ethernet/broadcom/bnxt/Makefile
+++ b/drivers/net/ethernet/broadcom/bnxt/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_BNXT) += bnxt_en.o
-bnxt_en-y := bnxt.o bnxt_hwrm.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o bnxt_ptp.o bnxt_vfr.o bnxt_devlink.o bnxt_dim.o
+bnxt_en-y := bnxt.o bnxt_hwrm.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o bnxt_ptp.o bnxt_vfr.o bnxt_devlink.o bnxt_dim.o bnxt_coredump.o
bnxt_en-$(CONFIG_BNXT_FLOWER_OFFLOAD) += bnxt_tc.o
bnxt_en-$(CONFIG_DEBUG_FS) += bnxt_debugfs.o
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 62f84cc91e4d..c04ea83188e2 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -49,8 +49,6 @@
#include <linux/log2.h>
#include <linux/aer.h>
#include <linux/bitmap.h>
-#include <linux/ptp_clock_kernel.h>
-#include <linux/timecounter.h>
#include <linux/cpu_rmap.h>
#include <linux/cpumask.h>
#include <net/pkt_cls.h>
@@ -85,55 +83,7 @@ MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
#define BNXT_TX_PUSH_THRESH 164
-enum board_idx {
- BCM57301,
- BCM57302,
- BCM57304,
- BCM57417_NPAR,
- BCM58700,
- BCM57311,
- BCM57312,
- BCM57402,
- BCM57404,
- BCM57406,
- BCM57402_NPAR,
- BCM57407,
- BCM57412,
- BCM57414,
- BCM57416,
- BCM57417,
- BCM57412_NPAR,
- BCM57314,
- BCM57417_SFP,
- BCM57416_SFP,
- BCM57404_NPAR,
- BCM57406_NPAR,
- BCM57407_SFP,
- BCM57407_NPAR,
- BCM57414_NPAR,
- BCM57416_NPAR,
- BCM57452,
- BCM57454,
- BCM5745x_NPAR,
- BCM57508,
- BCM57504,
- BCM57502,
- BCM57508_NPAR,
- BCM57504_NPAR,
- BCM57502_NPAR,
- BCM58802,
- BCM58804,
- BCM58808,
- NETXTREME_E_VF,
- NETXTREME_C_VF,
- NETXTREME_S_VF,
- NETXTREME_C_VF_HV,
- NETXTREME_E_VF_HV,
- NETXTREME_E_P5_VF,
- NETXTREME_E_P5_VF_HV,
-};
-
-/* indexed by enum above */
+/* indexed by enum board_idx */
static const struct {
char *name;
} board_info[] = {
@@ -2172,7 +2122,7 @@ static int bnxt_async_event_process(struct bnxt *bp,
set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
break;
case ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY: {
- char *fatal_str = "non-fatal";
+ char *type_str = "Solicited";
if (!bp->fw_health)
goto async_event_process_exit;
@@ -2184,13 +2134,21 @@ static int bnxt_async_event_process(struct bnxt *bp,
bp->fw_reset_max_dsecs = le16_to_cpu(cmpl->timestamp_hi);
if (!bp->fw_reset_max_dsecs)
bp->fw_reset_max_dsecs = BNXT_DFLT_FW_RST_MAX_DSECS;
- if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
- fatal_str = "fatal";
+ if (EVENT_DATA1_RESET_NOTIFY_FW_ACTIVATION(data1)) {
+ set_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state);
+ } else if (EVENT_DATA1_RESET_NOTIFY_FATAL(data1)) {
+ type_str = "Fatal";
+ bp->fw_health->fatalities++;
set_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
+ } else if (data2 && BNXT_FW_STATUS_HEALTHY !=
+ EVENT_DATA2_RESET_NOTIFY_FW_STATUS_CODE(data2)) {
+ type_str = "Non-fatal";
+ bp->fw_health->survivals++;
+ set_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
}
netif_warn(bp, hw, bp->dev,
- "Firmware %s reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n",
- fatal_str, data1, data2,
+ "%s firmware reset event, data1: 0x%x, data2: 0x%x, min wait %u ms, max wait %u ms\n",
+ type_str, data1, data2,
bp->fw_reset_min_dsecs * 100,
bp->fw_reset_max_dsecs * 100);
set_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event);
@@ -2198,17 +2156,18 @@ static int bnxt_async_event_process(struct bnxt *bp,
}
case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY: {
struct bnxt_fw_health *fw_health = bp->fw_health;
+ char *status_desc = "healthy";
+ u32 status;
if (!fw_health)
goto async_event_process_exit;
if (!EVENT_DATA1_RECOVERY_ENABLED(data1)) {
fw_health->enabled = false;
- netif_info(bp, drv, bp->dev,
- "Error recovery info: error recovery[0]\n");
+ netif_info(bp, drv, bp->dev, "Driver recovery watchdog is disabled\n");
break;
}
- fw_health->master = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
+ fw_health->primary = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
fw_health->tmr_multiplier =
DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
bp->current_interval * 10);
@@ -2218,10 +2177,13 @@ static int bnxt_async_event_process(struct bnxt *bp,
bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
fw_health->last_fw_reset_cnt =
bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
+ status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
+ if (status != BNXT_FW_STATUS_HEALTHY)
+ status_desc = "unhealthy";
netif_info(bp, drv, bp->dev,
- "Error recovery info: error recovery[1], master[%d], reset count[%u], health status: 0x%x\n",
- fw_health->master, fw_health->last_fw_reset_cnt,
- bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG));
+ "Driver recovery watchdog, role: %s, firmware status: 0x%x (%s), resets: %u\n",
+ fw_health->primary ? "primary" : "backup", status,
+ status_desc, fw_health->last_fw_reset_cnt);
if (!fw_health->enabled) {
/* Make sure tmr_counter is set and visible to
* bnxt_health_check() before setting enabled to true.
@@ -4651,7 +4613,7 @@ int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
return rc;
}
-static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
+int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
{
struct hwrm_func_drv_unrgtr_input *req;
int rc;
@@ -4869,7 +4831,7 @@ static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
#endif
static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
- u8 *mac_addr)
+ const u8 *mac_addr)
{
struct hwrm_cfa_l2_filter_alloc_output *resp;
struct hwrm_cfa_l2_filter_alloc_input *req;
@@ -6366,7 +6328,7 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
if (rx_rings != bp->rx_nr_rings) {
netdev_warn(bp->dev, "Able to reserve only %d out of %d requested RX rings\n",
rx_rings, bp->rx_nr_rings);
- if ((bp->dev->priv_flags & IFF_RXFH_CONFIGURED) &&
+ if (netif_is_rxfh_configured(bp->dev) &&
(bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) !=
bnxt_get_nr_rss_ctxs(bp, rx_rings) ||
bnxt_get_max_rss_ring(bp) >= rx_rings)) {
@@ -7192,7 +7154,7 @@ static void bnxt_free_ctx_pg_tbls(struct bnxt *bp,
ctx_pg->nr_pages = 0;
}
-static void bnxt_free_ctx_mem(struct bnxt *bp)
+void bnxt_free_ctx_mem(struct bnxt *bp)
{
struct bnxt_ctx_mem_info *ctx = bp->ctx;
int i;
@@ -7518,12 +7480,18 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
bp->fw_cap |= BNXT_FW_CAP_ERR_RECOVER_RELOAD;
if (!(flags & FUNC_QCAPS_RESP_FLAGS_VLAN_ACCELERATION_TX_DISABLED))
bp->fw_cap |= BNXT_FW_CAP_VLAN_TX_INSERT;
+ if (flags & FUNC_QCAPS_RESP_FLAGS_DBG_QCAPS_CMD_SUPPORTED)
+ bp->fw_cap |= BNXT_FW_CAP_DBG_QCAPS;
flags_ext = le32_to_cpu(resp->flags_ext);
if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED)
bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED;
if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED))
bp->fw_cap |= BNXT_FW_CAP_PTP_PPS;
+ if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_HOT_RESET_IF_SUPPORT))
+ bp->fw_cap |= BNXT_FW_CAP_HOT_RESET_IF;
+ if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED))
+ bp->fw_cap |= BNXT_FW_CAP_LIVEPATCH;
bp->tx_push_thresh = 0;
if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) &&
@@ -7579,6 +7547,32 @@ hwrm_func_qcaps_exit:
return rc;
}
+static void bnxt_hwrm_dbg_qcaps(struct bnxt *bp)
+{
+ struct hwrm_dbg_qcaps_output *resp;
+ struct hwrm_dbg_qcaps_input *req;
+ int rc;
+
+ bp->fw_dbg_cap = 0;
+ if (!(bp->fw_cap & BNXT_FW_CAP_DBG_QCAPS))
+ return;
+
+ rc = hwrm_req_init(bp, req, HWRM_DBG_QCAPS);
+ if (rc)
+ return;
+
+ req->fid = cpu_to_le16(0xffff);
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send(bp, req);
+ if (rc)
+ goto hwrm_dbg_qcaps_exit;
+
+ bp->fw_dbg_cap = le32_to_cpu(resp->flags);
+
+hwrm_dbg_qcaps_exit:
+ hwrm_req_drop(bp, req);
+}
+
static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
@@ -7588,6 +7582,9 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
rc = __bnxt_hwrm_func_qcaps(bp);
if (rc)
return rc;
+
+ bnxt_hwrm_dbg_qcaps(bp);
+
rc = bnxt_hwrm_queue_qportcfg(bp);
if (rc) {
netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc);
@@ -7642,6 +7639,7 @@ static int __bnxt_alloc_fw_health(struct bnxt *bp)
if (!bp->fw_health)
return -ENOMEM;
+ mutex_init(&bp->fw_health->lock);
return 0;
}
@@ -7688,12 +7686,16 @@ static void bnxt_inv_fw_health_reg(struct bnxt *bp)
struct bnxt_fw_health *fw_health = bp->fw_health;
u32 reg_type;
- if (!fw_health || !fw_health->status_reliable)
+ if (!fw_health)
return;
reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_HEALTH_REG]);
if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
fw_health->status_reliable = false;
+
+ reg_type = BNXT_FW_HEALTH_REG_TYPE(fw_health->regs[BNXT_FW_RESET_CNT_REG]);
+ if (reg_type == BNXT_FW_HEALTH_REG_TYPE_GRC)
+ fw_health->resets_reliable = false;
}
static void bnxt_try_map_fw_health_reg(struct bnxt *bp)
@@ -7750,6 +7752,7 @@ static int bnxt_map_fw_health_regs(struct bnxt *bp)
int i;
bp->fw_health->status_reliable = false;
+ bp->fw_health->resets_reliable = false;
/* Only pre-map the monitoring GRC registers using window 3 */
for (i = 0; i < 4; i++) {
u32 reg = fw_health->regs[i];
@@ -7763,6 +7766,7 @@ static int bnxt_map_fw_health_regs(struct bnxt *bp)
fw_health->mapped_regs[i] = BNXT_FW_HEALTH_WIN_OFF(reg);
}
bp->fw_health->status_reliable = true;
+ bp->fw_health->resets_reliable = true;
if (reg_base == 0xffffffff)
return 0;
@@ -8208,6 +8212,10 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags)
if (!rc) {
bp->fw_rx_stats_ext_size =
le16_to_cpu(resp_qs->rx_stat_size) / 8;
+ if (BNXT_FW_MAJ(bp) < 220 &&
+ bp->fw_rx_stats_ext_size > BNXT_RX_STATS_EXT_NUM_LEGACY)
+ bp->fw_rx_stats_ext_size = BNXT_RX_STATS_EXT_NUM_LEGACY;
+
bp->fw_tx_stats_ext_size = tx_stat_size ?
le16_to_cpu(resp_qs->tx_stat_size) / 8 : 0;
} else {
@@ -9246,7 +9254,7 @@ static char *bnxt_report_fec(struct bnxt_link_info *link_info)
}
}
-static void bnxt_report_link(struct bnxt *bp)
+void bnxt_report_link(struct bnxt *bp)
{
if (bp->link_info.link_up) {
const char *signal = "";
@@ -9691,8 +9699,6 @@ static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
return hwrm_req_send(bp, req);
}
-static int bnxt_fw_init_one(struct bnxt *bp);
-
static int bnxt_fw_reset_via_optee(struct bnxt *bp)
{
#ifdef CONFIG_TEE_BNXT_FW
@@ -9739,6 +9745,33 @@ static int bnxt_try_recover_fw(struct bnxt *bp)
return -ENODEV;
}
+int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset)
+{
+ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
+ int rc;
+
+ if (!BNXT_NEW_RM(bp))
+ return 0; /* no resource reservations required */
+
+ rc = bnxt_hwrm_func_resc_qcaps(bp, true);
+ if (rc)
+ netdev_err(bp->dev, "resc_qcaps failed\n");
+
+ hw_resc->resv_cp_rings = 0;
+ hw_resc->resv_stat_ctxs = 0;
+ hw_resc->resv_irqs = 0;
+ hw_resc->resv_tx_rings = 0;
+ hw_resc->resv_rx_rings = 0;
+ hw_resc->resv_hw_ring_grps = 0;
+ hw_resc->resv_vnics = 0;
+ if (!fw_reset) {
+ bp->tx_nr_rings = 0;
+ bp->rx_nr_rings = 0;
+ }
+
+ return rc;
+}
+
static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
{
struct hwrm_func_drv_if_change_output *resp;
@@ -9822,25 +9855,7 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
return rc;
}
}
- if (BNXT_NEW_RM(bp)) {
- struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
-
- rc = bnxt_hwrm_func_resc_qcaps(bp, true);
- if (rc)
- netdev_err(bp->dev, "resc_qcaps failed\n");
-
- hw_resc->resv_cp_rings = 0;
- hw_resc->resv_stat_ctxs = 0;
- hw_resc->resv_irqs = 0;
- hw_resc->resv_tx_rings = 0;
- hw_resc->resv_rx_rings = 0;
- hw_resc->resv_hw_ring_grps = 0;
- hw_resc->resv_vnics = 0;
- if (!fw_reset) {
- bp->tx_nr_rings = 0;
- bp->rx_nr_rings = 0;
- }
- }
+ rc = bnxt_cancel_reservations(bp, fw_reset);
}
return rc;
}
@@ -10318,7 +10333,7 @@ void bnxt_half_close_nic(struct bnxt *bp)
bnxt_free_mem(bp, false);
}
-static void bnxt_reenable_sriov(struct bnxt *bp)
+void bnxt_reenable_sriov(struct bnxt *bp)
{
if (BNXT_PF(bp)) {
struct bnxt_pf_info *pf = &bp->pf;
@@ -11295,14 +11310,18 @@ static void bnxt_fw_health_check(struct bnxt *bp)
}
val = bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
- if (val == fw_health->last_fw_heartbeat)
+ if (val == fw_health->last_fw_heartbeat) {
+ fw_health->arrests++;
goto fw_reset;
+ }
fw_health->last_fw_heartbeat = val;
val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
- if (val != fw_health->last_fw_reset_cnt)
+ if (val != fw_health->last_fw_reset_cnt) {
+ fw_health->discoveries++;
goto fw_reset;
+ }
fw_health->tmr_counter = fw_health->tmr_multiplier;
return;
@@ -11508,7 +11527,7 @@ static void bnxt_force_fw_reset(struct bnxt *bp)
}
bnxt_fw_reset_close(bp);
wait_dsecs = fw_health->master_func_wait_dsecs;
- if (fw_health->master) {
+ if (fw_health->primary) {
if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU)
wait_dsecs = 0;
bp->fw_reset_state = BNXT_FW_RESET_STATE_RESET_FW;
@@ -11772,13 +11791,17 @@ static void bnxt_sp_task(struct work_struct *work)
if (test_and_clear_bit(BNXT_RST_RING_SP_EVENT, &bp->sp_event))
bnxt_rx_ring_reset(bp);
- if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event))
- bnxt_devlink_health_report(bp, BNXT_FW_RESET_NOTIFY_SP_EVENT);
+ if (test_and_clear_bit(BNXT_FW_RESET_NOTIFY_SP_EVENT, &bp->sp_event)) {
+ if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) ||
+ test_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state))
+ bnxt_devlink_health_fw_report(bp);
+ else
+ bnxt_fw_reset(bp);
+ }
if (test_and_clear_bit(BNXT_FW_EXCEPTION_SP_EVENT, &bp->sp_event)) {
if (!is_bnxt_fw_ok(bp))
- bnxt_devlink_health_report(bp,
- BNXT_FW_EXCEPTION_SP_EVENT);
+ bnxt_devlink_health_fw_report(bp);
}
smp_mb__before_atomic();
@@ -11989,7 +12012,7 @@ static void bnxt_fw_init_one_p3(struct bnxt *bp)
static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt);
-static int bnxt_fw_init_one(struct bnxt *bp)
+int bnxt_fw_init_one(struct bnxt *bp)
{
int rc;
@@ -12051,6 +12074,27 @@ static void bnxt_fw_reset_writel(struct bnxt *bp, int reg_idx)
}
}
+bool bnxt_hwrm_reset_permitted(struct bnxt *bp)
+{
+ struct hwrm_func_qcfg_output *resp;
+ struct hwrm_func_qcfg_input *req;
+ bool result = true; /* firmware will enforce if unknown */
+
+ if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF)
+ return result;
+
+ if (hwrm_req_init(bp, req, HWRM_FUNC_QCFG))
+ return result;
+
+ req->fid = cpu_to_le16(0xffff);
+ resp = hwrm_req_hold(bp, req);
+ if (!hwrm_req_send(bp, req))
+ result = !!(le16_to_cpu(resp->flags) &
+ FUNC_QCFG_RESP_FLAGS_HOT_RESET_ALLOWED);
+ hwrm_req_drop(bp, req);
+ return result;
+}
+
static void bnxt_reset_all(struct bnxt *bp)
{
struct bnxt_fw_health *fw_health = bp->fw_health;
@@ -12093,7 +12137,7 @@ static void bnxt_fw_reset_abort(struct bnxt *bp, int rc)
clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state);
if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) {
bnxt_ulp_start(bp, rc);
- bnxt_dl_health_status_update(bp, false);
+ bnxt_dl_health_fw_status_update(bp, false);
}
bp->fw_reset_state = 0;
dev_close(bp->dev);
@@ -12159,7 +12203,7 @@ static void bnxt_fw_reset_task(struct work_struct *work)
return;
}
- if (!bp->fw_health->master) {
+ if (!bp->fw_health->primary) {
u32 wait_dsecs = bp->fw_health->normal_func_wait_dsecs;
bp->fw_reset_state = BNXT_FW_RESET_STATE_ENABLE_DEV;
@@ -12192,6 +12236,10 @@ static void bnxt_fw_reset_task(struct work_struct *work)
}
}
clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state);
+ clear_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state);
+ if (test_and_clear_bit(BNXT_STATE_FW_ACTIVATE_RESET, &bp->state) &&
+ !test_bit(BNXT_STATE_FW_ACTIVATE, &bp->state))
+ bnxt_dl_remote_reload(bp);
if (pci_enable_device(bp->pdev)) {
netdev_err(bp->dev, "Cannot re-enable PCI device\n");
rc = -ENODEV;
@@ -12241,8 +12289,11 @@ static void bnxt_fw_reset_task(struct work_struct *work)
bnxt_vf_reps_alloc(bp);
bnxt_vf_reps_open(bp);
bnxt_ptp_reapply_pps(bp);
- bnxt_dl_health_recovery_done(bp);
- bnxt_dl_health_status_update(bp, true);
+ clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state);
+ if (test_and_clear_bit(BNXT_STATE_RECOVER, &bp->state)) {
+ bnxt_dl_health_fw_recovery_done(bp);
+ bnxt_dl_health_fw_status_update(bp, true);
+ }
rtnl_unlock();
break;
}
@@ -12369,7 +12420,7 @@ static int bnxt_change_mac_addr(struct net_device *dev, void *p)
if (rc)
return rc;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
if (netif_running(dev)) {
bnxt_close_nic(bp, false, false);
rc = bnxt_open_nic(bp, false, false);
@@ -13103,7 +13154,7 @@ static int bnxt_init_mac_addr(struct bnxt *bp)
int rc = 0;
if (BNXT_PF(bp)) {
- memcpy(bp->dev->dev_addr, bp->pf.mac_addr, ETH_ALEN);
+ eth_hw_addr_set(bp->dev, bp->pf.mac_addr);
} else {
#ifdef CONFIG_BNXT_SRIOV
struct bnxt_vf_info *vf = &bp->vf;
@@ -13111,7 +13162,7 @@ static int bnxt_init_mac_addr(struct bnxt *bp)
if (is_valid_ether_addr(vf->mac_addr)) {
/* overwrite netdev dev_addr with admin VF MAC */
- memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
+ eth_hw_addr_set(bp->dev, vf->mac_addr);
/* Older PF driver or firmware may not approve this
* correctly.
*/
@@ -13186,6 +13237,15 @@ static int bnxt_map_db_bar(struct bnxt *bp)
return 0;
}
+void bnxt_print_device_info(struct bnxt *bp)
+{
+ netdev_info(bp->dev, "%s found at mem %lx, node addr %pM\n",
+ board_info[bp->board_idx].name,
+ (long)pci_resource_start(bp->pdev, 0), bp->dev->dev_addr);
+
+ pcie_print_link_status(bp->pdev);
+}
+
static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *dev;
@@ -13209,10 +13269,11 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return -ENOMEM;
bp = netdev_priv(dev);
+ bp->board_idx = ent->driver_data;
bp->msg_enable = BNXT_DEF_MSG_ENABLE;
bnxt_set_max_func_irqs(bp, max_irqs);
- if (bnxt_vf_pciid(ent->driver_data))
+ if (bnxt_vf_pciid(bp->board_idx))
bp->flags |= BNXT_FLAG_VF;
if (pdev->msix_cap)
@@ -13370,7 +13431,9 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
bnxt_inv_fw_health_reg(bp);
- bnxt_dl_register(bp);
+ rc = bnxt_dl_register(bp);
+ if (rc)
+ goto init_err_dl;
rc = register_netdev(dev);
if (rc)
@@ -13380,16 +13443,14 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
devlink_port_type_eth_set(&bp->dl_port, bp->dev);
bnxt_dl_fw_reporters_create(bp);
- netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
- board_info[ent->driver_data].name,
- (long)pci_resource_start(pdev, 0), dev->dev_addr);
- pcie_print_link_status(pdev);
+ bnxt_print_device_info(bp);
pci_save_state(pdev);
return 0;
init_err_cleanup:
bnxt_dl_unregister(bp);
+init_err_dl:
bnxt_shutdown_tc(bp);
bnxt_clear_int_mode(bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 19fe6478e9b4..d0d5da9b78f8 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -489,6 +489,15 @@ struct rx_tpa_end_cmp_ext {
ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MASK) ==\
ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_FATAL)
+#define EVENT_DATA1_RESET_NOTIFY_FW_ACTIVATION(data1) \
+ (((data1) & \
+ ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MASK) ==\
+ ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_ACTIVATION)
+
+#define EVENT_DATA2_RESET_NOTIFY_FW_STATUS_CODE(data2) \
+ ((data2) & \
+ ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA2_FW_STATUS_CODE_MASK)
+
#define EVENT_DATA1_RECOVERY_MASTER_FUNC(data1) \
!!((data1) & \
ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_MASTER_FUNC)
@@ -1514,6 +1523,21 @@ struct bnxt_ctx_mem_info {
struct bnxt_mem_init mem_init[BNXT_CTX_MEM_INIT_MAX];
};
+enum bnxt_health_severity {
+ SEVERITY_NORMAL = 0,
+ SEVERITY_WARNING,
+ SEVERITY_RECOVERABLE,
+ SEVERITY_FATAL,
+};
+
+enum bnxt_health_remedy {
+ REMEDY_DEVLINK_RECOVER,
+ REMEDY_POWER_CYCLE_DEVICE,
+ REMEDY_POWER_CYCLE_HOST,
+ REMEDY_FW_UPDATE,
+ REMEDY_HW_REPLACE,
+};
+
struct bnxt_fw_health {
u32 flags;
u32 polling_dsecs;
@@ -1531,9 +1555,9 @@ struct bnxt_fw_health {
u32 last_fw_heartbeat;
u32 last_fw_reset_cnt;
u8 enabled:1;
- u8 master:1;
- u8 fatal:1;
+ u8 primary:1;
u8 status_reliable:1;
+ u8 resets_reliable:1;
u8 tmr_multiplier;
u8 tmr_counter;
u8 fw_reset_seq_cnt;
@@ -1543,12 +1567,15 @@ struct bnxt_fw_health {
u32 echo_req_data1;
u32 echo_req_data2;
struct devlink_health_reporter *fw_reporter;
- struct devlink_health_reporter *fw_reset_reporter;
- struct devlink_health_reporter *fw_fatal_reporter;
-};
-
-struct bnxt_fw_reporter_ctx {
- unsigned long sp_event;
+ /* Protects severity and remedy */
+ struct mutex lock;
+ enum bnxt_health_severity severity;
+ enum bnxt_health_remedy remedy;
+ u32 arrests;
+ u32 discoveries;
+ u32 survivals;
+ u32 fatalities;
+ u32 diagnoses;
};
#define BNXT_FW_HEALTH_REG_TYPE_MASK 3
@@ -1586,6 +1613,54 @@ struct bnxt_fw_reporter_ctx {
#define BNXT_FW_RETRY 5
#define BNXT_FW_IF_RETRY 10
+enum board_idx {
+ BCM57301,
+ BCM57302,
+ BCM57304,
+ BCM57417_NPAR,
+ BCM58700,
+ BCM57311,
+ BCM57312,
+ BCM57402,
+ BCM57404,
+ BCM57406,
+ BCM57402_NPAR,
+ BCM57407,
+ BCM57412,
+ BCM57414,
+ BCM57416,
+ BCM57417,
+ BCM57412_NPAR,
+ BCM57314,
+ BCM57417_SFP,
+ BCM57416_SFP,
+ BCM57404_NPAR,
+ BCM57406_NPAR,
+ BCM57407_SFP,
+ BCM57407_NPAR,
+ BCM57414_NPAR,
+ BCM57416_NPAR,
+ BCM57452,
+ BCM57454,
+ BCM5745x_NPAR,
+ BCM57508,
+ BCM57504,
+ BCM57502,
+ BCM57508_NPAR,
+ BCM57504_NPAR,
+ BCM57502_NPAR,
+ BCM58802,
+ BCM58804,
+ BCM58808,
+ NETXTREME_E_VF,
+ NETXTREME_C_VF,
+ NETXTREME_S_VF,
+ NETXTREME_C_VF_HV,
+ NETXTREME_E_VF_HV,
+ NETXTREME_E_P5_VF,
+ NETXTREME_E_P5_VF_HV,
+};
+
struct bnxt {
void __iomem *bar0;
void __iomem *bar1;
@@ -1840,6 +1915,10 @@ struct bnxt {
#define BNXT_STATE_DRV_REGISTERED 7
#define BNXT_STATE_PCI_CHANNEL_IO_FROZEN 8
#define BNXT_STATE_NAPI_DISABLED 9
+#define BNXT_STATE_FW_ACTIVATE 11
+#define BNXT_STATE_RECOVER 12
+#define BNXT_STATE_FW_NON_FATAL_COND 13
+#define BNXT_STATE_FW_ACTIVATE_RESET 14
#define BNXT_NO_FW_ACCESS(bp) \
(test_bit(BNXT_STATE_FW_FATAL_COND, &(bp)->state) || \
@@ -1879,8 +1958,13 @@ struct bnxt {
#define BNXT_FW_CAP_VLAN_RX_STRIP 0x01000000
#define BNXT_FW_CAP_VLAN_TX_INSERT 0x02000000
#define BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED 0x04000000
+ #define BNXT_FW_CAP_LIVEPATCH 0x08000000
#define BNXT_FW_CAP_PTP_PPS 0x10000000
+ #define BNXT_FW_CAP_HOT_RESET_IF 0x20000000
#define BNXT_FW_CAP_RING_MONITOR 0x40000000
+ #define BNXT_FW_CAP_DBG_QCAPS 0x80000000
+
+ u32 fw_dbg_cap;
#define BNXT_NEW_RM(bp) ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM)
u32 hwrm_spec_code;
@@ -2049,6 +2133,7 @@ struct bnxt {
struct list_head tc_indr_block_list;
struct dentry *debugfs_pdev;
struct device *hwmon_dev;
+ enum board_idx board_idx;
};
#define BNXT_NUM_RX_RING_STATS 8
@@ -2090,6 +2175,9 @@ struct bnxt {
#define BNXT_RX_STATS_EXT_OFFSET(counter) \
(offsetof(struct rx_port_stats_ext, counter) / 8)
+#define BNXT_RX_STATS_EXT_NUM_LEGACY \
+ BNXT_RX_STATS_EXT_OFFSET(rx_fec_corrected_blocks)
+
#define BNXT_TX_STATS_EXT_OFFSET(counter) \
(offsetof(struct tx_port_stats_ext, counter) / 8)
@@ -2181,11 +2269,13 @@ void bnxt_set_ring_params(struct bnxt *);
int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode);
int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap,
int bmap_size, bool async_only);
+int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp);
int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings);
int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id);
int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings);
int bnxt_nq_rings_in_use(struct bnxt *bp);
int bnxt_hwrm_set_coal(struct bnxt *);
+void bnxt_free_ctx_mem(struct bnxt *bp);
unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp);
unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp);
unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp);
@@ -2194,9 +2284,11 @@ int bnxt_get_avail_msix(struct bnxt *bp, int num);
int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init);
void bnxt_tx_disable(struct bnxt *bp);
void bnxt_tx_enable(struct bnxt *bp);
+void bnxt_report_link(struct bnxt *bp);
int bnxt_update_link(struct bnxt *bp, bool chng_link_state);
int bnxt_hwrm_set_pause(struct bnxt *);
int bnxt_hwrm_set_link_setting(struct bnxt *, bool, bool);
+int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset);
int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp);
int bnxt_hwrm_free_wol_fltr(struct bnxt *bp);
int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all);
@@ -2205,6 +2297,7 @@ int bnxt_hwrm_fw_set_time(struct bnxt *);
int bnxt_open_nic(struct bnxt *, bool, bool);
int bnxt_half_open_nic(struct bnxt *bp);
void bnxt_half_close_nic(struct bnxt *bp);
+void bnxt_reenable_sriov(struct bnxt *bp);
int bnxt_close_nic(struct bnxt *, bool, bool);
int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words,
u32 *reg_buf);
@@ -2212,6 +2305,8 @@ void bnxt_fw_exception(struct bnxt *bp);
void bnxt_fw_reset(struct bnxt *bp);
int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
int tx_xdp);
+int bnxt_fw_init_one(struct bnxt *bp);
+bool bnxt_hwrm_reset_permitted(struct bnxt *bp);
int bnxt_setup_mq_tc(struct net_device *dev, u8 tc);
int bnxt_get_max_rings(struct bnxt *, int *, int *, bool);
int bnxt_restore_pf_fw_resources(struct bnxt *bp);
@@ -2219,5 +2314,5 @@ int bnxt_get_port_parent_id(struct net_device *dev,
struct netdev_phys_item_id *ppid);
void bnxt_dim_work(struct work_struct *work);
int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi);
-
+void bnxt_print_device_info(struct bnxt *bp);
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
new file mode 100644
index 000000000000..d3cb2f21946d
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.c
@@ -0,0 +1,444 @@
+/* Broadcom NetXtreme-C/E network driver.
+ *
+ * Copyright (c) 2021 Broadcom Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include "bnxt_hsi.h"
+#include "bnxt.h"
+#include "bnxt_hwrm.h"
+#include "bnxt_coredump.h"
+
+static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg,
+ struct bnxt_hwrm_dbg_dma_info *info)
+{
+ struct hwrm_dbg_cmn_input *cmn_req = msg;
+ __le16 *seq_ptr = msg + info->seq_off;
+ struct hwrm_dbg_cmn_output *cmn_resp;
+ u16 seq = 0, len, segs_off;
+ dma_addr_t dma_handle;
+ void *dma_buf, *resp;
+ int rc, off = 0;
+
+ dma_buf = hwrm_req_dma_slice(bp, msg, info->dma_len, &dma_handle);
+ if (!dma_buf) {
+ hwrm_req_drop(bp, msg);
+ return -ENOMEM;
+ }
+
+ hwrm_req_timeout(bp, msg, HWRM_COREDUMP_TIMEOUT);
+ cmn_resp = hwrm_req_hold(bp, msg);
+ resp = cmn_resp;
+
+ segs_off = offsetof(struct hwrm_dbg_coredump_list_output,
+ total_segments);
+ cmn_req->host_dest_addr = cpu_to_le64(dma_handle);
+ cmn_req->host_buf_len = cpu_to_le32(info->dma_len);
+ while (1) {
+ *seq_ptr = cpu_to_le16(seq);
+ rc = hwrm_req_send(bp, msg);
+ if (rc)
+ break;
+
+ len = le16_to_cpu(*((__le16 *)(resp + info->data_len_off)));
+ if (!seq &&
+ cmn_req->req_type == cpu_to_le16(HWRM_DBG_COREDUMP_LIST)) {
+ info->segs = le16_to_cpu(*((__le16 *)(resp +
+ segs_off)));
+ if (!info->segs) {
+ rc = -EIO;
+ break;
+ }
+
+ info->dest_buf_size = info->segs *
+ sizeof(struct coredump_segment_record);
+ info->dest_buf = kmalloc(info->dest_buf_size,
+ GFP_KERNEL);
+ if (!info->dest_buf) {
+ rc = -ENOMEM;
+ break;
+ }
+ }
+
+ if (info->dest_buf) {
+ if ((info->seg_start + off + len) <=
+ BNXT_COREDUMP_BUF_LEN(info->buf_len)) {
+ memcpy(info->dest_buf + off, dma_buf, len);
+ } else {
+ rc = -ENOBUFS;
+ break;
+ }
+ }
+
+ if (cmn_req->req_type ==
+ cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE))
+ info->dest_buf_size += len;
+
+ if (!(cmn_resp->flags & HWRM_DBG_CMN_FLAGS_MORE))
+ break;
+
+ seq++;
+ off += len;
+ }
+ hwrm_req_drop(bp, msg);
+ return rc;
+}
+
+static int bnxt_hwrm_dbg_coredump_list(struct bnxt *bp,
+ struct bnxt_coredump *coredump)
+{
+ struct bnxt_hwrm_dbg_dma_info info = {NULL};
+ struct hwrm_dbg_coredump_list_input *req;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_DBG_COREDUMP_LIST);
+ if (rc)
+ return rc;
+
+ info.dma_len = COREDUMP_LIST_BUF_LEN;
+ info.seq_off = offsetof(struct hwrm_dbg_coredump_list_input, seq_no);
+ info.data_len_off = offsetof(struct hwrm_dbg_coredump_list_output,
+ data_len);
+
+ rc = bnxt_hwrm_dbg_dma_data(bp, req, &info);
+ if (!rc) {
+ coredump->data = info.dest_buf;
+ coredump->data_size = info.dest_buf_size;
+ coredump->total_segs = info.segs;
+ }
+ return rc;
+}
+
+static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 component_id,
+ u16 segment_id)
+{
+ struct hwrm_dbg_coredump_initiate_input *req;
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_DBG_COREDUMP_INITIATE);
+ if (rc)
+ return rc;
+
+ hwrm_req_timeout(bp, req, HWRM_COREDUMP_TIMEOUT);
+ req->component_id = cpu_to_le16(component_id);
+ req->segment_id = cpu_to_le16(segment_id);
+
+ return hwrm_req_send(bp, req);
+}
+
+static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt *bp, u16 component_id,
+ u16 segment_id, u32 *seg_len,
+ void *buf, u32 buf_len, u32 offset)
+{
+ struct hwrm_dbg_coredump_retrieve_input *req;
+ struct bnxt_hwrm_dbg_dma_info info = {NULL};
+ int rc;
+
+ rc = hwrm_req_init(bp, req, HWRM_DBG_COREDUMP_RETRIEVE);
+ if (rc)
+ return rc;
+
+ req->component_id = cpu_to_le16(component_id);
+ req->segment_id = cpu_to_le16(segment_id);
+
+ info.dma_len = COREDUMP_RETRIEVE_BUF_LEN;
+ info.seq_off = offsetof(struct hwrm_dbg_coredump_retrieve_input,
+ seq_no);
+ info.data_len_off = offsetof(struct hwrm_dbg_coredump_retrieve_output,
+ data_len);
+ if (buf) {
+ info.dest_buf = buf + offset;
+ info.buf_len = buf_len;
+ info.seg_start = offset;
+ }
+
+ rc = bnxt_hwrm_dbg_dma_data(bp, req, &info);
+ if (!rc)
+ *seg_len = info.dest_buf_size;
+
+ return rc;
+}
+
+static void
+bnxt_fill_coredump_seg_hdr(struct bnxt *bp,
+ struct bnxt_coredump_segment_hdr *seg_hdr,
+ struct coredump_segment_record *seg_rec, u32 seg_len,
+ int status, u32 duration, u32 instance)
+{
+ memset(seg_hdr, 0, sizeof(*seg_hdr));
+ memcpy(seg_hdr->signature, "sEgM", 4);
+ if (seg_rec) {
+ seg_hdr->component_id = (__force __le32)seg_rec->component_id;
+ seg_hdr->segment_id = (__force __le32)seg_rec->segment_id;
+ seg_hdr->low_version = seg_rec->version_low;
+ seg_hdr->high_version = seg_rec->version_hi;
+ seg_hdr->flags = cpu_to_le32(seg_rec->compress_flags);
+ } else {
+ /* For hwrm_ver_get response Component id = 2
+ * and Segment id = 0
+ */
+ seg_hdr->component_id = cpu_to_le32(2);
+ seg_hdr->segment_id = 0;
+ }
+ seg_hdr->function_id = cpu_to_le16(bp->pdev->devfn);
+ seg_hdr->length = cpu_to_le32(seg_len);
+ seg_hdr->status = cpu_to_le32(status);
+ seg_hdr->duration = cpu_to_le32(duration);
+ seg_hdr->data_offset = cpu_to_le32(sizeof(*seg_hdr));
+ seg_hdr->instance = cpu_to_le32(instance);
+}
+
+static void bnxt_fill_cmdline(struct bnxt_coredump_record *record)
+{
+ struct mm_struct *mm = current->mm;
+ int i, len, last = 0;
+
+ if (mm) {
+ len = min_t(int, mm->arg_end - mm->arg_start,
+ sizeof(record->commandline) - 1);
+ if (len && !copy_from_user(record->commandline,
+ (char __user *)mm->arg_start, len)) {
+ for (i = 0; i < len; i++) {
+ if (record->commandline[i])
+ last = i;
+ else
+ record->commandline[i] = ' ';
+ }
+ record->commandline[last + 1] = 0;
+ return;
+ }
+ }
+
+ strscpy(record->commandline, current->comm, TASK_COMM_LEN);
+}
+
+static void
+bnxt_fill_coredump_record(struct bnxt *bp, struct bnxt_coredump_record *record,
+ time64_t start, s16 start_utc, u16 total_segs,
+ int status)
+{
+ time64_t end = ktime_get_real_seconds();
+ u32 os_ver_major = 0, os_ver_minor = 0;
+ struct tm tm;
+
+ time64_to_tm(start, 0, &tm);
+ memset(record, 0, sizeof(*record));
+ memcpy(record->signature, "cOrE", 4);
+ record->flags = 0;
+ record->low_version = 0;
+ record->high_version = 1;
+ record->asic_state = 0;
+ strscpy(record->system_name, utsname()->nodename,
+ sizeof(record->system_name));
+ record->year = cpu_to_le16(tm.tm_year + 1900);
+ record->month = cpu_to_le16(tm.tm_mon + 1);
+ record->day = cpu_to_le16(tm.tm_mday);
+ record->hour = cpu_to_le16(tm.tm_hour);
+ record->minute = cpu_to_le16(tm.tm_min);
+ record->second = cpu_to_le16(tm.tm_sec);
+ record->utc_bias = cpu_to_le16(start_utc);
+ bnxt_fill_cmdline(record);
+ record->total_segments = cpu_to_le32(total_segs);
+
+ if (sscanf(utsname()->release, "%u.%u", &os_ver_major, &os_ver_minor) != 2)
+ netdev_warn(bp->dev, "Unknown OS release in coredump\n");
+ record->os_ver_major = cpu_to_le32(os_ver_major);
+ record->os_ver_minor = cpu_to_le32(os_ver_minor);
+
+ strscpy(record->os_name, utsname()->sysname, sizeof(record->os_name));
+ time64_to_tm(end, 0, &tm);
+ record->end_year = cpu_to_le16(tm.tm_year + 1900);
+ record->end_month = cpu_to_le16(tm.tm_mon + 1);
+ record->end_day = cpu_to_le16(tm.tm_mday);
+ record->end_hour = cpu_to_le16(tm.tm_hour);
+ record->end_minute = cpu_to_le16(tm.tm_min);
+ record->end_second = cpu_to_le16(tm.tm_sec);
+ record->end_utc_bias = cpu_to_le16(sys_tz.tz_minuteswest * 60);
+ record->asic_id1 = cpu_to_le32(bp->chip_num << 16 |
+ bp->ver_resp.chip_rev << 8 |
+ bp->ver_resp.chip_metal);
+ record->asic_id2 = 0;
+ record->coredump_status = cpu_to_le32(status);
+ record->ioctl_low_version = 0;
+ record->ioctl_high_version = 0;
+}
+
+static int __bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len)
+{
+ u32 ver_get_resp_len = sizeof(struct hwrm_ver_get_output);
+ u32 offset = 0, seg_hdr_len, seg_record_len, buf_len = 0;
+ struct coredump_segment_record *seg_record = NULL;
+ struct bnxt_coredump_segment_hdr seg_hdr;
+ struct bnxt_coredump coredump = {NULL};
+ time64_t start_time;
+ u16 start_utc;
+ int rc = 0, i;
+
+ if (buf)
+ buf_len = *dump_len;
+
+ start_time = ktime_get_real_seconds();
+ start_utc = sys_tz.tz_minuteswest * 60;
+ seg_hdr_len = sizeof(seg_hdr);
+
+ /* First segment should be hwrm_ver_get response */
+ *dump_len = seg_hdr_len + ver_get_resp_len;
+ if (buf) {
+ bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, NULL, ver_get_resp_len,
+ 0, 0, 0);
+ memcpy(buf + offset, &seg_hdr, seg_hdr_len);
+ offset += seg_hdr_len;
+ memcpy(buf + offset, &bp->ver_resp, ver_get_resp_len);
+ offset += ver_get_resp_len;
+ }
+
+ rc = bnxt_hwrm_dbg_coredump_list(bp, &coredump);
+ if (rc) {
+ netdev_err(bp->dev, "Failed to get coredump segment list\n");
+ goto err;
+ }
+
+ *dump_len += seg_hdr_len * coredump.total_segs;
+
+ seg_record = (struct coredump_segment_record *)coredump.data;
+ seg_record_len = sizeof(*seg_record);
+
+ for (i = 0; i < coredump.total_segs; i++) {
+ u16 comp_id = le16_to_cpu(seg_record->component_id);
+ u16 seg_id = le16_to_cpu(seg_record->segment_id);
+ u32 duration = 0, seg_len = 0;
+ unsigned long start, end;
+
+ if (buf && ((offset + seg_hdr_len) >
+ BNXT_COREDUMP_BUF_LEN(buf_len))) {
+ rc = -ENOBUFS;
+ goto err;
+ }
+
+ start = jiffies;
+
+ rc = bnxt_hwrm_dbg_coredump_initiate(bp, comp_id, seg_id);
+ if (rc) {
+ netdev_err(bp->dev,
+ "Failed to initiate coredump for seg = %d\n",
+ seg_record->segment_id);
+ goto next_seg;
+ }
+
+ /* Write segment data into the buffer */
+ rc = bnxt_hwrm_dbg_coredump_retrieve(bp, comp_id, seg_id,
+ &seg_len, buf, buf_len,
+ offset + seg_hdr_len);
+ if (rc && rc == -ENOBUFS)
+ goto err;
+ else if (rc)
+ netdev_err(bp->dev,
+ "Failed to retrieve coredump for seg = %d\n",
+ seg_record->segment_id);
+
+next_seg:
+ end = jiffies;
+ duration = jiffies_to_msecs(end - start);
+ bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, seg_record, seg_len,
+ rc, duration, 0);
+
+ if (buf) {
+ /* Write segment header into the buffer */
+ memcpy(buf + offset, &seg_hdr, seg_hdr_len);
+ offset += seg_hdr_len + seg_len;
+ }
+
+ *dump_len += seg_len;
+ seg_record =
+ (struct coredump_segment_record *)((u8 *)seg_record +
+ seg_record_len);
+ }
+
+err:
+ if (buf)
+ bnxt_fill_coredump_record(bp, buf + offset, start_time,
+ start_utc, coredump.total_segs + 1,
+ rc);
+ kfree(coredump.data);
+ *dump_len += sizeof(struct bnxt_coredump_record);
+ if (rc == -ENOBUFS)
+ netdev_err(bp->dev, "Firmware returned large coredump buffer\n");
+ return rc;
+}
+
+int bnxt_get_coredump(struct bnxt *bp, u16 dump_type, void *buf, u32 *dump_len)
+{
+ if (dump_type == BNXT_DUMP_CRASH) {
+#ifdef CONFIG_TEE_BNXT_FW
+ return tee_bnxt_copy_coredump(buf, 0, *dump_len);
+#else
+ return -EOPNOTSUPP;
+#endif
+ } else {
+ return __bnxt_get_coredump(bp, buf, dump_len);
+ }
+}
+
+static int bnxt_hwrm_get_dump_len(struct bnxt *bp, u16 dump_type, u32 *dump_len)
+{
+ struct hwrm_dbg_qcfg_output *resp;
+ struct hwrm_dbg_qcfg_input *req;
+ int rc, hdr_len = 0;
+
+ if (!(bp->fw_cap & BNXT_FW_CAP_DBG_QCAPS))
+ return -EOPNOTSUPP;
+
+ if (dump_type == BNXT_DUMP_CRASH &&
+ !(bp->fw_dbg_cap & DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR))
+ return -EOPNOTSUPP;
+
+ rc = hwrm_req_init(bp, req, HWRM_DBG_QCFG);
+ if (rc)
+ return rc;
+
+ req->fid = cpu_to_le16(0xffff);
+ if (dump_type == BNXT_DUMP_CRASH)
+ req->flags = cpu_to_le16(DBG_QCFG_REQ_FLAGS_CRASHDUMP_SIZE_FOR_DEST_DEST_SOC_DDR);
+
+ resp = hwrm_req_hold(bp, req);
+ rc = hwrm_req_send(bp, req);
+ if (rc)
+ goto get_dump_len_exit;
+
+ if (dump_type == BNXT_DUMP_CRASH) {
+ *dump_len = le32_to_cpu(resp->crashdump_size);
+ } else {
+ /* Driver adds coredump header and "HWRM_VER_GET response"
+ * segment additionally to coredump.
+ */
+ hdr_len = sizeof(struct bnxt_coredump_segment_hdr) +
+ sizeof(struct hwrm_ver_get_output) +
+ sizeof(struct bnxt_coredump_record);
+ *dump_len = le32_to_cpu(resp->coredump_size) + hdr_len;
+ }
+ if (*dump_len <= hdr_len)
+ rc = -EINVAL;
+
+get_dump_len_exit:
+ hwrm_req_drop(bp, req);
+ return rc;
+}
+
+u32 bnxt_get_coredump_length(struct bnxt *bp, u16 dump_type)
+{
+ u32 len = 0;
+
+ if (bnxt_hwrm_get_dump_len(bp, dump_type, &len)) {
+ if (dump_type == BNXT_DUMP_CRASH)
+ len = BNXT_CRASH_DUMP_LEN;
+ else
+ __bnxt_get_coredump(bp, NULL, &len);
+ }
+ return len;
+}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h
index 09c22f8fe399..b1a1b2fffb19 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_coredump.h
@@ -10,6 +10,10 @@
#ifndef BNXT_COREDUMP_H
#define BNXT_COREDUMP_H
+#include <linux/utsname.h>
+#include <linux/time.h>
+#include <linux/rtc.h>
+
struct bnxt_coredump_segment_hdr {
__u8 signature[4];
__le32 component_id;
@@ -63,4 +67,51 @@ struct bnxt_coredump_record {
__u8 ioctl_high_version;
__le16 rsvd3[313];
};
+
+#define BNXT_CRASH_DUMP_LEN (8 << 20)
+
+#define COREDUMP_LIST_BUF_LEN 2048
+#define COREDUMP_RETRIEVE_BUF_LEN 4096
+
+struct bnxt_coredump {
+ void *data;
+ int data_size;
+ u16 total_segs;
+};
+
+#define BNXT_COREDUMP_BUF_LEN(len) ((len) - sizeof(struct bnxt_coredump_record))
+
+struct bnxt_hwrm_dbg_dma_info {
+ void *dest_buf;
+ int dest_buf_size;
+ u16 dma_len;
+ u16 seq_off;
+ u16 data_len_off;
+ u16 segs;
+ u32 seg_start;
+ u32 buf_len;
+};
+
+struct hwrm_dbg_cmn_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le64 host_dest_addr;
+ __le32 host_buf_len;
+};
+
+struct hwrm_dbg_cmn_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+ #define HWRM_DBG_CMN_FLAGS_MORE 1
+};
+
+int bnxt_get_coredump(struct bnxt *bp, u16 dump_type, void *buf, u32 *dump_len);
+u32 bnxt_get_coredump_length(struct bnxt *bp, u16 dump_type);
+
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
index 228a5db7e143..217ff597cdf2 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
@@ -159,10 +159,10 @@ static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets)
}
data = &resp->queue_id0 + offsetof(struct bnxt_cos2bw_cfg, queue_id);
- for (i = 0; i < bp->max_tc; i++, data += sizeof(cos2bw) - 4) {
+ for (i = 0; i < bp->max_tc; i++, data += sizeof(cos2bw.cfg)) {
int tc;
- memcpy(&cos2bw.queue_id, data, sizeof(cos2bw) - 4);
+ memcpy(&cos2bw.cfg, data, sizeof(cos2bw.cfg));
if (i == 0)
cos2bw.queue_id = resp->queue_id0;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h
index 6eed231de565..716742522161 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.h
@@ -23,13 +23,15 @@ struct bnxt_dcb {
struct bnxt_cos2bw_cfg {
u8 pad[3];
- u8 queue_id;
- __le32 min_bw;
- __le32 max_bw;
+ struct_group_attr(cfg, __packed,
+ u8 queue_id;
+ __le32 min_bw;
+ __le32 max_bw;
#define BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
- u8 tsa;
- u8 pri_lvl;
- u8 bw_weight;
+ u8 tsa;
+ u8 pri_lvl;
+ u8 bw_weight;
+ );
u8 unused;
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index 9576547df4ab..ce790e9b45c3 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -16,6 +16,18 @@
#include "bnxt_vfr.h"
#include "bnxt_devlink.h"
#include "bnxt_ethtool.h"
+#include "bnxt_ulp.h"
+#include "bnxt_ptp.h"
+#include "bnxt_coredump.h"
+
+static void __bnxt_fw_recover(struct bnxt *bp)
+{
+ if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) ||
+ test_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state))
+ bnxt_fw_reset(bp);
+ else
+ bnxt_fw_exception(bp);
+}
static int
bnxt_dl_flash_update(struct devlink *dl,
@@ -40,146 +52,208 @@ bnxt_dl_flash_update(struct devlink *dl,
return rc;
}
-static int bnxt_fw_reporter_diagnose(struct devlink_health_reporter *reporter,
- struct devlink_fmsg *fmsg,
- struct netlink_ext_ack *extack)
+static int bnxt_hwrm_remote_dev_reset_set(struct bnxt *bp, bool remote_reset)
+{
+ struct hwrm_func_cfg_input *req;
+ int rc;
+
+ if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF)
+ return -EOPNOTSUPP;
+
+ rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG);
+ if (rc)
+ return rc;
+
+ req->fid = cpu_to_le16(0xffff);
+ req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_HOT_RESET_IF_SUPPORT);
+ if (remote_reset)
+ req->flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_HOT_RESET_IF_EN_DIS);
+
+ return hwrm_req_send(bp, req);
+}
+
+static char *bnxt_health_severity_str(enum bnxt_health_severity severity)
+{
+ switch (severity) {
+ case SEVERITY_NORMAL: return "normal";
+ case SEVERITY_WARNING: return "warning";
+ case SEVERITY_RECOVERABLE: return "recoverable";
+ case SEVERITY_FATAL: return "fatal";
+ default: return "unknown";
+ }
+}
+
+static char *bnxt_health_remedy_str(enum bnxt_health_remedy remedy)
+{
+ switch (remedy) {
+ case REMEDY_DEVLINK_RECOVER: return "devlink recover";
+ case REMEDY_POWER_CYCLE_DEVICE: return "device power cycle";
+ case REMEDY_POWER_CYCLE_HOST: return "host power cycle";
+ case REMEDY_FW_UPDATE: return "update firmware";
+ case REMEDY_HW_REPLACE: return "replace hardware";
+ default: return "unknown";
+ }
+}
+
+static int bnxt_fw_diagnose(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg,
+ struct netlink_ext_ack *extack)
{
struct bnxt *bp = devlink_health_reporter_priv(reporter);
- u32 val;
+ struct bnxt_fw_health *h = bp->fw_health;
+ u32 fw_status, fw_resets;
int rc;
if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state))
- return 0;
+ return devlink_fmsg_string_pair_put(fmsg, "Status", "recovering");
- val = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
+ if (!h->status_reliable)
+ return devlink_fmsg_string_pair_put(fmsg, "Status", "unknown");
- if (BNXT_FW_IS_BOOTING(val)) {
- rc = devlink_fmsg_string_pair_put(fmsg, "Description",
- "Not yet completed initialization");
+ mutex_lock(&h->lock);
+ fw_status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG);
+ if (BNXT_FW_IS_BOOTING(fw_status)) {
+ rc = devlink_fmsg_string_pair_put(fmsg, "Status", "initializing");
if (rc)
- return rc;
- } else if (BNXT_FW_IS_ERR(val)) {
- rc = devlink_fmsg_string_pair_put(fmsg, "Description",
- "Encountered fatal error and cannot recover");
+ goto unlock;
+ } else if (h->severity || fw_status != BNXT_FW_STATUS_HEALTHY) {
+ if (!h->severity) {
+ h->severity = SEVERITY_FATAL;
+ h->remedy = REMEDY_POWER_CYCLE_DEVICE;
+ h->diagnoses++;
+ devlink_health_report(h->fw_reporter,
+ "FW error diagnosed", h);
+ }
+ rc = devlink_fmsg_string_pair_put(fmsg, "Status", "error");
if (rc)
- return rc;
+ goto unlock;
+ rc = devlink_fmsg_u32_pair_put(fmsg, "Syndrome", fw_status);
+ if (rc)
+ goto unlock;
+ } else {
+ rc = devlink_fmsg_string_pair_put(fmsg, "Status", "healthy");
+ if (rc)
+ goto unlock;
}
- if (val >> 16) {
- rc = devlink_fmsg_u32_pair_put(fmsg, "Error code", val >> 16);
+ rc = devlink_fmsg_string_pair_put(fmsg, "Severity",
+ bnxt_health_severity_str(h->severity));
+ if (rc)
+ goto unlock;
+
+ if (h->severity) {
+ rc = devlink_fmsg_string_pair_put(fmsg, "Remedy",
+ bnxt_health_remedy_str(h->remedy));
if (rc)
- return rc;
+ goto unlock;
+ if (h->remedy == REMEDY_DEVLINK_RECOVER) {
+ rc = devlink_fmsg_string_pair_put(fmsg, "Impact",
+ "traffic+ntuple_cfg");
+ if (rc)
+ goto unlock;
+ }
}
- val = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
- rc = devlink_fmsg_u32_pair_put(fmsg, "Reset count", val);
- if (rc)
+unlock:
+ mutex_unlock(&h->lock);
+ if (rc || !h->resets_reliable)
return rc;
- return 0;
+ fw_resets = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
+ rc = devlink_fmsg_u32_pair_put(fmsg, "Resets", fw_resets);
+ if (rc)
+ return rc;
+ rc = devlink_fmsg_u32_pair_put(fmsg, "Arrests", h->arrests);
+ if (rc)
+ return rc;
+ rc = devlink_fmsg_u32_pair_put(fmsg, "Survivals", h->survivals);
+ if (rc)
+ return rc;
+ rc = devlink_fmsg_u32_pair_put(fmsg, "Discoveries", h->discoveries);
+ if (rc)
+ return rc;
+ rc = devlink_fmsg_u32_pair_put(fmsg, "Fatalities", h->fatalities);
+ if (rc)
+ return rc;
+ return devlink_fmsg_u32_pair_put(fmsg, "Diagnoses", h->diagnoses);
}
-static const struct devlink_health_reporter_ops bnxt_dl_fw_reporter_ops = {
- .name = "fw",
- .diagnose = bnxt_fw_reporter_diagnose,
-};
-
-static int bnxt_fw_reset_recover(struct devlink_health_reporter *reporter,
- void *priv_ctx,
- struct netlink_ext_ack *extack)
+static int bnxt_fw_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *priv_ctx,
+ struct netlink_ext_ack *extack)
{
struct bnxt *bp = devlink_health_reporter_priv(reporter);
+ u32 dump_len;
+ void *data;
+ int rc;
- if (!priv_ctx)
+ /* TODO: no firmware dump support in devlink_health_report() context */
+ if (priv_ctx)
return -EOPNOTSUPP;
- bnxt_fw_reset(bp);
- return -EINPROGRESS;
-}
+ dump_len = bnxt_get_coredump_length(bp, BNXT_DUMP_LIVE);
+ if (!dump_len)
+ return -EIO;
-static const
-struct devlink_health_reporter_ops bnxt_dl_fw_reset_reporter_ops = {
- .name = "fw_reset",
- .recover = bnxt_fw_reset_recover,
-};
+ data = vmalloc(dump_len);
+ if (!data)
+ return -ENOMEM;
-static int bnxt_fw_fatal_recover(struct devlink_health_reporter *reporter,
- void *priv_ctx,
- struct netlink_ext_ack *extack)
+ rc = bnxt_get_coredump(bp, BNXT_DUMP_LIVE, data, &dump_len);
+ if (!rc) {
+ rc = devlink_fmsg_pair_nest_start(fmsg, "core");
+ if (rc)
+ goto exit;
+ rc = devlink_fmsg_binary_pair_put(fmsg, "data", data, dump_len);
+ if (rc)
+ goto exit;
+ rc = devlink_fmsg_u32_pair_put(fmsg, "size", dump_len);
+ if (rc)
+ goto exit;
+ rc = devlink_fmsg_pair_nest_end(fmsg);
+ }
+
+exit:
+ vfree(data);
+ return rc;
+}
+
+static int bnxt_fw_recover(struct devlink_health_reporter *reporter,
+ void *priv_ctx,
+ struct netlink_ext_ack *extack)
{
struct bnxt *bp = devlink_health_reporter_priv(reporter);
- struct bnxt_fw_reporter_ctx *fw_reporter_ctx = priv_ctx;
- unsigned long event;
- if (!priv_ctx)
- return -EOPNOTSUPP;
+ if (bp->fw_health->severity == SEVERITY_FATAL)
+ return -ENODEV;
- bp->fw_health->fatal = true;
- event = fw_reporter_ctx->sp_event;
- if (event == BNXT_FW_RESET_NOTIFY_SP_EVENT)
- bnxt_fw_reset(bp);
- else if (event == BNXT_FW_EXCEPTION_SP_EVENT)
- bnxt_fw_exception(bp);
+ set_bit(BNXT_STATE_RECOVER, &bp->state);
+ __bnxt_fw_recover(bp);
return -EINPROGRESS;
}
-static const
-struct devlink_health_reporter_ops bnxt_dl_fw_fatal_reporter_ops = {
- .name = "fw_fatal",
- .recover = bnxt_fw_fatal_recover,
+static const struct devlink_health_reporter_ops bnxt_dl_fw_reporter_ops = {
+ .name = "fw",
+ .diagnose = bnxt_fw_diagnose,
+ .dump = bnxt_fw_dump,
+ .recover = bnxt_fw_recover,
};
void bnxt_dl_fw_reporters_create(struct bnxt *bp)
{
struct bnxt_fw_health *health = bp->fw_health;
- if (!bp->dl || !health)
+ if (!health || health->fw_reporter)
return;
- if (!(bp->fw_cap & BNXT_FW_CAP_HOT_RESET) || health->fw_reset_reporter)
- goto err_recovery;
-
- health->fw_reset_reporter =
- devlink_health_reporter_create(bp->dl,
- &bnxt_dl_fw_reset_reporter_ops,
+ health->fw_reporter =
+ devlink_health_reporter_create(bp->dl, &bnxt_dl_fw_reporter_ops,
0, bp);
- if (IS_ERR(health->fw_reset_reporter)) {
- netdev_warn(bp->dev, "Failed to create FW fatal health reporter, rc = %ld\n",
- PTR_ERR(health->fw_reset_reporter));
- health->fw_reset_reporter = NULL;
- bp->fw_cap &= ~BNXT_FW_CAP_HOT_RESET;
- }
-
-err_recovery:
- if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
- return;
-
- if (!health->fw_reporter) {
- health->fw_reporter =
- devlink_health_reporter_create(bp->dl,
- &bnxt_dl_fw_reporter_ops,
- 0, bp);
- if (IS_ERR(health->fw_reporter)) {
- netdev_warn(bp->dev, "Failed to create FW health reporter, rc = %ld\n",
- PTR_ERR(health->fw_reporter));
- health->fw_reporter = NULL;
- bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
- return;
- }
- }
-
- if (health->fw_fatal_reporter)
- return;
-
- health->fw_fatal_reporter =
- devlink_health_reporter_create(bp->dl,
- &bnxt_dl_fw_fatal_reporter_ops,
- 0, bp);
- if (IS_ERR(health->fw_fatal_reporter)) {
- netdev_warn(bp->dev, "Failed to create FW fatal health reporter, rc = %ld\n",
- PTR_ERR(health->fw_fatal_reporter));
- health->fw_fatal_reporter = NULL;
+ if (IS_ERR(health->fw_reporter)) {
+ netdev_warn(bp->dev, "Failed to create FW health reporter, rc = %ld\n",
+ PTR_ERR(health->fw_reporter));
+ health->fw_reporter = NULL;
bp->fw_cap &= ~BNXT_FW_CAP_ERROR_RECOVERY;
}
}
@@ -188,15 +262,9 @@ void bnxt_dl_fw_reporters_destroy(struct bnxt *bp, bool all)
{
struct bnxt_fw_health *health = bp->fw_health;
- if (!bp->dl || !health)
+ if (!health)
return;
- if ((all || !(bp->fw_cap & BNXT_FW_CAP_HOT_RESET)) &&
- health->fw_reset_reporter) {
- devlink_health_reporter_destroy(health->fw_reset_reporter);
- health->fw_reset_reporter = NULL;
- }
-
if ((bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) && !all)
return;
@@ -204,82 +272,319 @@ void bnxt_dl_fw_reporters_destroy(struct bnxt *bp, bool all)
devlink_health_reporter_destroy(health->fw_reporter);
health->fw_reporter = NULL;
}
-
- if (health->fw_fatal_reporter) {
- devlink_health_reporter_destroy(health->fw_fatal_reporter);
- health->fw_fatal_reporter = NULL;
- }
}
-void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event)
+void bnxt_devlink_health_fw_report(struct bnxt *bp)
{
struct bnxt_fw_health *fw_health = bp->fw_health;
- struct bnxt_fw_reporter_ctx fw_reporter_ctx;
-
- fw_reporter_ctx.sp_event = event;
- switch (event) {
- case BNXT_FW_RESET_NOTIFY_SP_EVENT:
- if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
- if (!fw_health->fw_fatal_reporter)
- return;
-
- devlink_health_report(fw_health->fw_fatal_reporter,
- "FW fatal async event received",
- &fw_reporter_ctx);
- return;
- }
- if (!fw_health->fw_reset_reporter)
- return;
+ int rc;
- devlink_health_report(fw_health->fw_reset_reporter,
- "FW non-fatal reset event received",
- &fw_reporter_ctx);
+ if (!fw_health)
return;
- case BNXT_FW_EXCEPTION_SP_EVENT:
- if (!fw_health->fw_fatal_reporter)
- return;
-
- devlink_health_report(fw_health->fw_fatal_reporter,
- "FW fatal error reported",
- &fw_reporter_ctx);
+ if (!fw_health->fw_reporter) {
+ __bnxt_fw_recover(bp);
return;
}
+
+ mutex_lock(&fw_health->lock);
+ fw_health->severity = SEVERITY_RECOVERABLE;
+ fw_health->remedy = REMEDY_DEVLINK_RECOVER;
+ mutex_unlock(&fw_health->lock);
+ rc = devlink_health_report(fw_health->fw_reporter, "FW error reported",
+ fw_health);
+ if (rc == -ECANCELED)
+ __bnxt_fw_recover(bp);
}
-void bnxt_dl_health_status_update(struct bnxt *bp, bool healthy)
+void bnxt_dl_health_fw_status_update(struct bnxt *bp, bool healthy)
{
- struct bnxt_fw_health *health = bp->fw_health;
+ struct bnxt_fw_health *fw_health = bp->fw_health;
u8 state;
- if (healthy)
+ mutex_lock(&fw_health->lock);
+ if (healthy) {
+ fw_health->severity = SEVERITY_NORMAL;
state = DEVLINK_HEALTH_REPORTER_STATE_HEALTHY;
- else
+ } else {
+ fw_health->severity = SEVERITY_FATAL;
+ fw_health->remedy = REMEDY_POWER_CYCLE_DEVICE;
state = DEVLINK_HEALTH_REPORTER_STATE_ERROR;
-
- if (health->fatal)
- devlink_health_reporter_state_update(health->fw_fatal_reporter,
- state);
- else
- devlink_health_reporter_state_update(health->fw_reset_reporter,
- state);
-
- health->fatal = false;
+ }
+ mutex_unlock(&fw_health->lock);
+ devlink_health_reporter_state_update(fw_health->fw_reporter, state);
}
-void bnxt_dl_health_recovery_done(struct bnxt *bp)
+void bnxt_dl_health_fw_recovery_done(struct bnxt *bp)
{
- struct bnxt_fw_health *hlth = bp->fw_health;
+ struct bnxt_dl *dl = devlink_priv(bp->dl);
- if (hlth->fatal)
- devlink_health_reporter_recovery_done(hlth->fw_fatal_reporter);
- else
- devlink_health_reporter_recovery_done(hlth->fw_reset_reporter);
+ devlink_health_reporter_recovery_done(bp->fw_health->fw_reporter);
+ bnxt_hwrm_remote_dev_reset_set(bp, dl->remote_reset);
}
static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
struct netlink_ext_ack *extack);
+static void
+bnxt_dl_livepatch_report_err(struct bnxt *bp, struct netlink_ext_ack *extack,
+ struct hwrm_fw_livepatch_output *resp)
+{
+ int err = ((struct hwrm_err_output *)resp)->cmd_err;
+
+ switch (err) {
+ case FW_LIVEPATCH_CMD_ERR_CODE_INVALID_OPCODE:
+ netdev_err(bp->dev, "Illegal live patch opcode");
+ NL_SET_ERR_MSG_MOD(extack, "Invalid opcode");
+ break;
+ case FW_LIVEPATCH_CMD_ERR_CODE_NOT_SUPPORTED:
+ NL_SET_ERR_MSG_MOD(extack, "Live patch operation not supported");
+ break;
+ case FW_LIVEPATCH_CMD_ERR_CODE_NOT_INSTALLED:
+ NL_SET_ERR_MSG_MOD(extack, "Live patch not found");
+ break;
+ case FW_LIVEPATCH_CMD_ERR_CODE_NOT_PATCHED:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Live patch deactivation failed. Firmware not patched.");
+ break;
+ case FW_LIVEPATCH_CMD_ERR_CODE_AUTH_FAIL:
+ NL_SET_ERR_MSG_MOD(extack, "Live patch not authenticated");
+ break;
+ case FW_LIVEPATCH_CMD_ERR_CODE_INVALID_HEADER:
+ NL_SET_ERR_MSG_MOD(extack, "Incompatible live patch");
+ break;
+ case FW_LIVEPATCH_CMD_ERR_CODE_INVALID_SIZE:
+ NL_SET_ERR_MSG_MOD(extack, "Live patch has invalid size");
+ break;
+ case FW_LIVEPATCH_CMD_ERR_CODE_ALREADY_PATCHED:
+ NL_SET_ERR_MSG_MOD(extack, "Live patch already applied");
+ break;
+ default:
+ netdev_err(bp->dev, "Unexpected live patch error: %hhd\n", err);
+ NL_SET_ERR_MSG_MOD(extack, "Failed to activate live patch");
+ break;
+ }
+}
+
+static int
+bnxt_dl_livepatch_activate(struct bnxt *bp, struct netlink_ext_ack *extack)
+{
+ struct hwrm_fw_livepatch_query_output *query_resp;
+ struct hwrm_fw_livepatch_query_input *query_req;
+ struct hwrm_fw_livepatch_output *patch_resp;
+ struct hwrm_fw_livepatch_input *patch_req;
+ u32 installed = 0;
+ u16 flags;
+ u8 target;
+ int rc;
+
+ if (~bp->fw_cap & BNXT_FW_CAP_LIVEPATCH) {
+ NL_SET_ERR_MSG_MOD(extack, "Device does not support live patch");
+ return -EOPNOTSUPP;
+ }
+
+ rc = hwrm_req_init(bp, query_req, HWRM_FW_LIVEPATCH_QUERY);
+ if (rc)
+ return rc;
+ query_resp = hwrm_req_hold(bp, query_req);
+
+ rc = hwrm_req_init(bp, patch_req, HWRM_FW_LIVEPATCH);
+ if (rc) {
+ hwrm_req_drop(bp, query_req);
+ return rc;
+ }
+ patch_req->opcode = FW_LIVEPATCH_REQ_OPCODE_ACTIVATE;
+ patch_req->loadtype = FW_LIVEPATCH_REQ_LOADTYPE_NVM_INSTALL;
+ patch_resp = hwrm_req_hold(bp, patch_req);
+
+ for (target = 1; target <= FW_LIVEPATCH_REQ_FW_TARGET_LAST; target++) {
+ query_req->fw_target = target;
+ rc = hwrm_req_send(bp, query_req);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to query packages");
+ break;
+ }
+
+ flags = le16_to_cpu(query_resp->status_flags);
+ if (~flags & FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_INSTALL)
+ continue;
+ if ((flags & FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_ACTIVE) &&
+ !strncmp(query_resp->active_ver, query_resp->install_ver,
+ sizeof(query_resp->active_ver)))
+ continue;
+
+ patch_req->fw_target = target;
+ rc = hwrm_req_send(bp, patch_req);
+ if (rc) {
+ bnxt_dl_livepatch_report_err(bp, extack, patch_resp);
+ break;
+ }
+ installed++;
+ }
+
+ if (!rc && !installed) {
+ NL_SET_ERR_MSG_MOD(extack, "No live patches found");
+ rc = -ENOENT;
+ }
+ hwrm_req_drop(bp, query_req);
+ hwrm_req_drop(bp, patch_req);
+ return rc;
+}
+
+static int bnxt_dl_reload_down(struct devlink *dl, bool netns_change,
+ enum devlink_reload_action action,
+ enum devlink_reload_limit limit,
+ struct netlink_ext_ack *extack)
+{
+ struct bnxt *bp = bnxt_get_bp_from_dl(dl);
+ int rc = 0;
+
+ switch (action) {
+ case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: {
+ if (BNXT_PF(bp) && bp->pf.active_vfs) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "reload is unsupported when VFs are allocated\n");
+ return -EOPNOTSUPP;
+ }
+ rtnl_lock();
+ if (bp->dev->reg_state == NETREG_UNREGISTERED) {
+ rtnl_unlock();
+ return -ENODEV;
+ }
+ bnxt_ulp_stop(bp);
+ if (netif_running(bp->dev)) {
+ rc = bnxt_close_nic(bp, true, true);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to close");
+ dev_close(bp->dev);
+ rtnl_unlock();
+ break;
+ }
+ }
+ bnxt_vf_reps_free(bp);
+ rc = bnxt_hwrm_func_drv_unrgtr(bp);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to deregister");
+ if (netif_running(bp->dev))
+ dev_close(bp->dev);
+ rtnl_unlock();
+ break;
+ }
+ bnxt_cancel_reservations(bp, false);
+ bnxt_free_ctx_mem(bp);
+ kfree(bp->ctx);
+ bp->ctx = NULL;
+ break;
+ }
+ case DEVLINK_RELOAD_ACTION_FW_ACTIVATE: {
+ if (limit == DEVLINK_RELOAD_LIMIT_NO_RESET)
+ return bnxt_dl_livepatch_activate(bp, extack);
+ if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET) {
+ NL_SET_ERR_MSG_MOD(extack, "Device not capable, requires reboot");
+ return -EOPNOTSUPP;
+ }
+ if (!bnxt_hwrm_reset_permitted(bp)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Reset denied by firmware, it may be inhibited by remote driver");
+ return -EPERM;
+ }
+ rtnl_lock();
+ if (bp->dev->reg_state == NETREG_UNREGISTERED) {
+ rtnl_unlock();
+ return -ENODEV;
+ }
+ if (netif_running(bp->dev))
+ set_bit(BNXT_STATE_FW_ACTIVATE, &bp->state);
+ rc = bnxt_hwrm_firmware_reset(bp->dev,
+ FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP,
+ FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP,
+ FW_RESET_REQ_FLAGS_RESET_GRACEFUL |
+ FW_RESET_REQ_FLAGS_FW_ACTIVATION);
+ if (rc) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to activate firmware");
+ clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state);
+ rtnl_unlock();
+ }
+ break;
+ }
+ default:
+ rc = -EOPNOTSUPP;
+ }
+
+ return rc;
+}
+
+static int bnxt_dl_reload_up(struct devlink *dl, enum devlink_reload_action action,
+ enum devlink_reload_limit limit, u32 *actions_performed,
+ struct netlink_ext_ack *extack)
+{
+ struct bnxt *bp = bnxt_get_bp_from_dl(dl);
+ int rc = 0;
+
+ *actions_performed = 0;
+ switch (action) {
+ case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: {
+ bnxt_fw_init_one(bp);
+ bnxt_vf_reps_alloc(bp);
+ if (netif_running(bp->dev))
+ rc = bnxt_open_nic(bp, true, true);
+ bnxt_ulp_start(bp, rc);
+ if (!rc) {
+ bnxt_reenable_sriov(bp);
+ bnxt_ptp_reapply_pps(bp);
+ }
+ break;
+ }
+ case DEVLINK_RELOAD_ACTION_FW_ACTIVATE: {
+ unsigned long start = jiffies;
+ unsigned long timeout = start + BNXT_DFLT_FW_RST_MAX_DSECS * HZ / 10;
+
+ if (limit == DEVLINK_RELOAD_LIMIT_NO_RESET)
+ break;
+ if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)
+ timeout = start + bp->fw_health->normal_func_wait_dsecs * HZ / 10;
+ if (!netif_running(bp->dev))
+ NL_SET_ERR_MSG_MOD(extack,
+ "Device is closed, not waiting for reset notice that will never come");
+ rtnl_unlock();
+ while (test_bit(BNXT_STATE_FW_ACTIVATE, &bp->state)) {
+ if (time_after(jiffies, timeout)) {
+ NL_SET_ERR_MSG_MOD(extack, "Activation incomplete");
+ rc = -ETIMEDOUT;
+ break;
+ }
+ if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) {
+ NL_SET_ERR_MSG_MOD(extack, "Activation aborted");
+ rc = -ENODEV;
+ break;
+ }
+ msleep(50);
+ }
+ rtnl_lock();
+ if (!rc)
+ *actions_performed |= BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT);
+ clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state);
+ break;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (!rc) {
+ bnxt_print_device_info(bp);
+ if (netif_running(bp->dev)) {
+ mutex_lock(&bp->link_lock);
+ bnxt_report_link(bp);
+ mutex_unlock(&bp->link_lock);
+ }
+ *actions_performed |= BIT(action);
+ } else if (netif_running(bp->dev)) {
+ dev_close(bp->dev);
+ }
+ rtnl_unlock();
+ return rc;
+}
+
static const struct devlink_ops bnxt_dl_ops = {
#ifdef CONFIG_BNXT_SRIOV
.eswitch_mode_set = bnxt_dl_eswitch_mode_set,
@@ -287,6 +592,11 @@ static const struct devlink_ops bnxt_dl_ops = {
#endif /* CONFIG_BNXT_SRIOV */
.info_get = bnxt_dl_info_get,
.flash_update = bnxt_dl_flash_update,
+ .reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
+ BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE),
+ .reload_limits = BIT(DEVLINK_RELOAD_LIMIT_NO_RESET),
+ .reload_down = bnxt_dl_reload_down,
+ .reload_up = bnxt_dl_reload_up,
};
static const struct devlink_ops bnxt_vf_dl_ops;
@@ -430,6 +740,57 @@ static int bnxt_dl_info_put(struct bnxt *bp, struct devlink_info_req *req,
return 0;
}
+#define BNXT_FW_SRT_PATCH "fw.srt.patch"
+#define BNXT_FW_CRT_PATCH "fw.crt.patch"
+
+static int bnxt_dl_livepatch_info_put(struct bnxt *bp,
+ struct devlink_info_req *req,
+ const char *key)
+{
+ struct hwrm_fw_livepatch_query_input *query;
+ struct hwrm_fw_livepatch_query_output *resp;
+ u16 flags;
+ int rc;
+
+ if (~bp->fw_cap & BNXT_FW_CAP_LIVEPATCH)
+ return 0;
+
+ rc = hwrm_req_init(bp, query, HWRM_FW_LIVEPATCH_QUERY);
+ if (rc)
+ return rc;
+
+ if (!strcmp(key, BNXT_FW_SRT_PATCH))
+ query->fw_target = FW_LIVEPATCH_QUERY_REQ_FW_TARGET_SECURE_FW;
+ else if (!strcmp(key, BNXT_FW_CRT_PATCH))
+ query->fw_target = FW_LIVEPATCH_QUERY_REQ_FW_TARGET_COMMON_FW;
+ else
+ goto exit;
+
+ resp = hwrm_req_hold(bp, query);
+ rc = hwrm_req_send(bp, query);
+ if (rc)
+ goto exit;
+
+ flags = le16_to_cpu(resp->status_flags);
+ if (flags & FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_ACTIVE) {
+ resp->active_ver[sizeof(resp->active_ver) - 1] = '\0';
+ rc = devlink_info_version_running_put(req, key, resp->active_ver);
+ if (rc)
+ goto exit;
+ }
+
+ if (flags & FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_INSTALL) {
+ resp->install_ver[sizeof(resp->install_ver) - 1] = '\0';
+ rc = devlink_info_version_stored_put(req, key, resp->install_ver);
+ if (rc)
+ goto exit;
+ }
+
+exit:
+ hwrm_req_drop(bp, query);
+ return rc;
+}
+
#define HWRM_FW_VER_STR_LEN 16
static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
@@ -554,8 +915,13 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
rc = bnxt_hwrm_nvm_get_dev_info(bp, &nvm_dev_info);
if (rc ||
- !(nvm_dev_info.flags & NVM_GET_DEV_INFO_RESP_FLAGS_FW_VER_VALID))
+ !(nvm_dev_info.flags & NVM_GET_DEV_INFO_RESP_FLAGS_FW_VER_VALID)) {
+ if (!bnxt_get_pkginfo(bp->dev, buf, sizeof(buf)))
+ return bnxt_dl_info_put(bp, req, BNXT_VERSION_STORED,
+ DEVLINK_INFO_VERSION_GENERIC_FW,
+ buf);
return 0;
+ }
buf[0] = 0;
strncat(buf, nvm_dev_info.pkg_name, HWRM_FW_VER_STR_LEN);
@@ -583,8 +949,16 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
snprintf(roce_ver, FW_VER_STR_LEN, "%d.%d.%d.%d",
nvm_dev_info.roce_fw_major, nvm_dev_info.roce_fw_minor,
nvm_dev_info.roce_fw_build, nvm_dev_info.roce_fw_patch);
- return bnxt_dl_info_put(bp, req, BNXT_VERSION_STORED,
- DEVLINK_INFO_VERSION_GENERIC_FW_ROCE, roce_ver);
+ rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_STORED,
+ DEVLINK_INFO_VERSION_GENERIC_FW_ROCE, roce_ver);
+ if (rc)
+ return rc;
+
+ rc = bnxt_dl_livepatch_info_put(bp, req, BNXT_FW_SRT_PATCH);
+ if (rc)
+ return rc;
+ return bnxt_dl_livepatch_info_put(bp, req, BNXT_FW_CRT_PATCH);
+
}
static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
@@ -712,6 +1086,32 @@ static int bnxt_dl_msix_validate(struct devlink *dl, u32 id,
return 0;
}
+static int bnxt_remote_dev_reset_get(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct bnxt *bp = bnxt_get_bp_from_dl(dl);
+
+ if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF)
+ return -EOPNOTSUPP;
+
+ ctx->val.vbool = bnxt_dl_get_remote_reset(dl);
+ return 0;
+}
+
+static int bnxt_remote_dev_reset_set(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct bnxt *bp = bnxt_get_bp_from_dl(dl);
+ int rc;
+
+ rc = bnxt_hwrm_remote_dev_reset_set(bp, ctx->val.vbool);
+ if (rc)
+ return rc;
+
+ bnxt_dl_set_remote_reset(dl, ctx->val.vbool);
+ return rc;
+}
+
static const struct devlink_param bnxt_dl_params[] = {
DEVLINK_PARAM_GENERIC(ENABLE_SRIOV,
BIT(DEVLINK_PARAM_CMODE_PERMANENT),
@@ -734,53 +1134,49 @@ static const struct devlink_param bnxt_dl_params[] = {
BIT(DEVLINK_PARAM_CMODE_PERMANENT),
bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set,
NULL),
-};
-
-static const struct devlink_param bnxt_dl_port_params[] = {
+ /* keep REMOTE_DEV_RESET last, it is excluded based on caps */
+ DEVLINK_PARAM_GENERIC(ENABLE_REMOTE_DEV_RESET,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ bnxt_remote_dev_reset_get,
+ bnxt_remote_dev_reset_set, NULL),
};
static int bnxt_dl_params_register(struct bnxt *bp)
{
+ int num_params = ARRAY_SIZE(bnxt_dl_params);
int rc;
if (bp->hwrm_spec_code < 0x10600)
return 0;
- rc = devlink_params_register(bp->dl, bnxt_dl_params,
- ARRAY_SIZE(bnxt_dl_params));
- if (rc) {
+ if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF)
+ num_params--;
+
+ rc = devlink_params_register(bp->dl, bnxt_dl_params, num_params);
+ if (rc)
netdev_warn(bp->dev, "devlink_params_register failed. rc=%d\n",
rc);
- return rc;
- }
- rc = devlink_port_params_register(&bp->dl_port, bnxt_dl_port_params,
- ARRAY_SIZE(bnxt_dl_port_params));
- if (rc) {
- netdev_err(bp->dev, "devlink_port_params_register failed\n");
- devlink_params_unregister(bp->dl, bnxt_dl_params,
- ARRAY_SIZE(bnxt_dl_params));
- return rc;
- }
- devlink_params_publish(bp->dl);
-
- return 0;
+ return rc;
}
static void bnxt_dl_params_unregister(struct bnxt *bp)
{
+ int num_params = ARRAY_SIZE(bnxt_dl_params);
+
if (bp->hwrm_spec_code < 0x10600)
return;
- devlink_params_unregister(bp->dl, bnxt_dl_params,
- ARRAY_SIZE(bnxt_dl_params));
- devlink_port_params_unregister(&bp->dl_port, bnxt_dl_port_params,
- ARRAY_SIZE(bnxt_dl_port_params));
+ if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF)
+ num_params--;
+
+ devlink_params_unregister(bp->dl, bnxt_dl_params, num_params);
}
int bnxt_dl_register(struct bnxt *bp)
{
const struct devlink_ops *devlink_ops;
struct devlink_port_attrs attrs = {};
+ struct bnxt_dl *bp_dl;
struct devlink *dl;
int rc;
@@ -795,21 +1191,18 @@ int bnxt_dl_register(struct bnxt *bp)
return -ENOMEM;
}
- bnxt_link_bp_to_dl(bp, dl);
+ bp->dl = dl;
+ bp_dl = devlink_priv(dl);
+ bp_dl->bp = bp;
+ bnxt_dl_set_remote_reset(dl, true);
/* Add switchdev eswitch mode setting, if SRIOV supported */
if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV) &&
bp->hwrm_spec_code > 0x10803)
bp->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY;
- rc = devlink_register(dl);
- if (rc) {
- netdev_warn(bp->dev, "devlink_register failed. rc=%d\n", rc);
- goto err_dl_free;
- }
-
if (!BNXT_PF(bp))
- return 0;
+ goto out;
attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
attrs.phys.port_number = bp->pf.port_id;
@@ -819,21 +1212,20 @@ int bnxt_dl_register(struct bnxt *bp)
rc = devlink_port_register(dl, &bp->dl_port, bp->pf.port_id);
if (rc) {
netdev_err(bp->dev, "devlink_port_register failed\n");
- goto err_dl_unreg;
+ goto err_dl_free;
}
rc = bnxt_dl_params_register(bp);
if (rc)
goto err_dl_port_unreg;
+out:
+ devlink_register(dl);
return 0;
err_dl_port_unreg:
devlink_port_unregister(&bp->dl_port);
-err_dl_unreg:
- devlink_unregister(dl);
err_dl_free:
- bnxt_link_bp_to_dl(bp, NULL);
devlink_free(dl);
return rc;
}
@@ -842,13 +1234,10 @@ void bnxt_dl_unregister(struct bnxt *bp)
{
struct devlink *dl = bp->dl;
- if (!dl)
- return;
-
+ devlink_unregister(dl);
if (BNXT_PF(bp)) {
bnxt_dl_params_unregister(bp);
devlink_port_unregister(&bp->dl_port);
}
- devlink_unregister(dl);
devlink_free(dl);
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
index d889f240da2b..a715458abc30 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.h
@@ -13,6 +13,7 @@
/* Struct to hold housekeeping info needed by devlink interface */
struct bnxt_dl {
struct bnxt *bp; /* back ptr to the controlling dev */
+ bool remote_reset;
};
static inline struct bnxt *bnxt_get_bp_from_dl(struct devlink *dl)
@@ -20,17 +21,21 @@ static inline struct bnxt *bnxt_get_bp_from_dl(struct devlink *dl)
return ((struct bnxt_dl *)devlink_priv(dl))->bp;
}
-/* To clear devlink pointer from bp, pass NULL dl */
-static inline void bnxt_link_bp_to_dl(struct bnxt *bp, struct devlink *dl)
+static inline void bnxt_dl_remote_reload(struct bnxt *bp)
{
- bp->dl = dl;
+ devlink_remote_reload_actions_performed(bp->dl, 0,
+ BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) |
+ BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE));
+}
- /* add a back pointer in dl to bp */
- if (dl) {
- struct bnxt_dl *bp_dl = devlink_priv(dl);
+static inline bool bnxt_dl_get_remote_reset(struct devlink *dl)
+{
+ return ((struct bnxt_dl *)devlink_priv(dl))->remote_reset;
+}
- bp_dl->bp = bp;
- }
+static inline void bnxt_dl_set_remote_reset(struct devlink *dl, bool value)
+{
+ ((struct bnxt_dl *)devlink_priv(dl))->remote_reset = value;
}
#define NVM_OFF_MSIX_VEC_PER_PF_MAX 108
@@ -66,9 +71,9 @@ enum bnxt_dl_version_type {
BNXT_VERSION_STORED,
};
-void bnxt_devlink_health_report(struct bnxt *bp, unsigned long event);
-void bnxt_dl_health_status_update(struct bnxt *bp, bool healthy);
-void bnxt_dl_health_recovery_done(struct bnxt *bp);
+void bnxt_devlink_health_fw_report(struct bnxt *bp);
+void bnxt_dl_health_fw_status_update(struct bnxt *bp, bool healthy);
+void bnxt_dl_health_fw_recovery_done(struct bnxt *bp);
void bnxt_dl_fw_reporters_create(struct bnxt *bp);
void bnxt_dl_fw_reporters_destroy(struct bnxt *bp, bool all);
int bnxt_dl_register(struct bnxt *bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 7260910e75fb..8188d55722e4 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -427,6 +427,8 @@ static const struct {
BNXT_RX_STATS_EXT_ENTRY(rx_pcs_symbol_err),
BNXT_RX_STATS_EXT_ENTRY(rx_corrected_bits),
BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES,
+ BNXT_RX_STATS_EXT_ENTRY(rx_fec_corrected_blocks),
+ BNXT_RX_STATS_EXT_ENTRY(rx_fec_uncorrectable_blocks),
};
static const struct {
@@ -909,7 +911,7 @@ static int bnxt_set_channels(struct net_device *dev,
if (bnxt_get_nr_rss_ctxs(bp, req_rx_rings) !=
bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) &&
- (dev->priv_flags & IFF_RXFH_CONFIGURED)) {
+ netif_is_rxfh_configured(dev)) {
netdev_warn(dev, "RSS table size change required, RSS table entries must be default to proceed\n");
return -EINVAL;
}
@@ -2180,13 +2182,18 @@ static int bnxt_flash_nvram(struct net_device *dev, u16 dir_type,
return rc;
}
-static int bnxt_hwrm_firmware_reset(struct net_device *dev, u8 proc_type,
- u8 self_reset, u8 flags)
+int bnxt_hwrm_firmware_reset(struct net_device *dev, u8 proc_type,
+ u8 self_reset, u8 flags)
{
struct bnxt *bp = netdev_priv(dev);
struct hwrm_fw_reset_input *req;
int rc;
+ if (!bnxt_hwrm_reset_permitted(bp)) {
+ netdev_warn(bp->dev, "Reset denied by firmware, it may be inhibited by remote driver");
+ return -EPERM;
+ }
+
rc = hwrm_req_init(bp, req, HWRM_FW_RESET);
if (rc)
return rc;
@@ -2825,39 +2832,56 @@ static char *bnxt_parse_pkglog(int desired_field, u8 *data, size_t datalen)
return retval;
}
-static void bnxt_get_pkgver(struct net_device *dev)
+int bnxt_get_pkginfo(struct net_device *dev, char *ver, int size)
{
struct bnxt *bp = netdev_priv(dev);
u16 index = 0;
char *pkgver;
u32 pkglen;
u8 *pkgbuf;
- int len;
+ int rc;
- if (bnxt_find_nvram_item(dev, BNX_DIR_TYPE_PKG_LOG,
- BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE,
- &index, NULL, &pkglen) != 0)
- return;
+ rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_PKG_LOG,
+ BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE,
+ &index, NULL, &pkglen);
+ if (rc)
+ return rc;
pkgbuf = kzalloc(pkglen, GFP_KERNEL);
if (!pkgbuf) {
dev_err(&bp->pdev->dev, "Unable to allocate memory for pkg version, length = %u\n",
pkglen);
- return;
+ return -ENOMEM;
}
- if (bnxt_get_nvram_item(dev, index, 0, pkglen, pkgbuf))
+ rc = bnxt_get_nvram_item(dev, index, 0, pkglen, pkgbuf);
+ if (rc)
goto err;
pkgver = bnxt_parse_pkglog(BNX_PKG_LOG_FIELD_IDX_PKG_VERSION, pkgbuf,
pkglen);
- if (pkgver && *pkgver != 0 && isdigit(*pkgver)) {
+ if (pkgver && *pkgver != 0 && isdigit(*pkgver))
+ strscpy(ver, pkgver, size);
+ else
+ rc = -ENOENT;
+
+err:
+ kfree(pkgbuf);
+
+ return rc;
+}
+
+static void bnxt_get_pkgver(struct net_device *dev)
+{
+ struct bnxt *bp = netdev_priv(dev);
+ char buf[FW_VER_STR_LEN];
+ int len;
+
+ if (!bnxt_get_pkginfo(dev, buf, sizeof(buf))) {
len = strlen(bp->fw_ver_str);
snprintf(bp->fw_ver_str + len, FW_VER_STR_LEN - len - 1,
- "/pkg %s", pkgver);
+ "/pkg %s", buf);
}
-err:
- kfree(pkgbuf);
}
static int bnxt_get_eeprom(struct net_device *dev,
@@ -3609,337 +3633,6 @@ static int bnxt_reset(struct net_device *dev, u32 *flags)
return 0;
}
-static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg,
- struct bnxt_hwrm_dbg_dma_info *info)
-{
- struct hwrm_dbg_cmn_input *cmn_req = msg;
- __le16 *seq_ptr = msg + info->seq_off;
- struct hwrm_dbg_cmn_output *cmn_resp;
- u16 seq = 0, len, segs_off;
- dma_addr_t dma_handle;
- void *dma_buf, *resp;
- int rc, off = 0;
-
- dma_buf = hwrm_req_dma_slice(bp, msg, info->dma_len, &dma_handle);
- if (!dma_buf) {
- hwrm_req_drop(bp, msg);
- return -ENOMEM;
- }
-
- hwrm_req_timeout(bp, msg, HWRM_COREDUMP_TIMEOUT);
- cmn_resp = hwrm_req_hold(bp, msg);
- resp = cmn_resp;
-
- segs_off = offsetof(struct hwrm_dbg_coredump_list_output,
- total_segments);
- cmn_req->host_dest_addr = cpu_to_le64(dma_handle);
- cmn_req->host_buf_len = cpu_to_le32(info->dma_len);
- while (1) {
- *seq_ptr = cpu_to_le16(seq);
- rc = hwrm_req_send(bp, msg);
- if (rc)
- break;
-
- len = le16_to_cpu(*((__le16 *)(resp + info->data_len_off)));
- if (!seq &&
- cmn_req->req_type == cpu_to_le16(HWRM_DBG_COREDUMP_LIST)) {
- info->segs = le16_to_cpu(*((__le16 *)(resp +
- segs_off)));
- if (!info->segs) {
- rc = -EIO;
- break;
- }
-
- info->dest_buf_size = info->segs *
- sizeof(struct coredump_segment_record);
- info->dest_buf = kmalloc(info->dest_buf_size,
- GFP_KERNEL);
- if (!info->dest_buf) {
- rc = -ENOMEM;
- break;
- }
- }
-
- if (info->dest_buf) {
- if ((info->seg_start + off + len) <=
- BNXT_COREDUMP_BUF_LEN(info->buf_len)) {
- memcpy(info->dest_buf + off, dma_buf, len);
- } else {
- rc = -ENOBUFS;
- break;
- }
- }
-
- if (cmn_req->req_type ==
- cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE))
- info->dest_buf_size += len;
-
- if (!(cmn_resp->flags & HWRM_DBG_CMN_FLAGS_MORE))
- break;
-
- seq++;
- off += len;
- }
- hwrm_req_drop(bp, msg);
- return rc;
-}
-
-static int bnxt_hwrm_dbg_coredump_list(struct bnxt *bp,
- struct bnxt_coredump *coredump)
-{
- struct bnxt_hwrm_dbg_dma_info info = {NULL};
- struct hwrm_dbg_coredump_list_input *req;
- int rc;
-
- rc = hwrm_req_init(bp, req, HWRM_DBG_COREDUMP_LIST);
- if (rc)
- return rc;
-
- info.dma_len = COREDUMP_LIST_BUF_LEN;
- info.seq_off = offsetof(struct hwrm_dbg_coredump_list_input, seq_no);
- info.data_len_off = offsetof(struct hwrm_dbg_coredump_list_output,
- data_len);
-
- rc = bnxt_hwrm_dbg_dma_data(bp, req, &info);
- if (!rc) {
- coredump->data = info.dest_buf;
- coredump->data_size = info.dest_buf_size;
- coredump->total_segs = info.segs;
- }
- return rc;
-}
-
-static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 component_id,
- u16 segment_id)
-{
- struct hwrm_dbg_coredump_initiate_input *req;
- int rc;
-
- rc = hwrm_req_init(bp, req, HWRM_DBG_COREDUMP_INITIATE);
- if (rc)
- return rc;
-
- hwrm_req_timeout(bp, req, HWRM_COREDUMP_TIMEOUT);
- req->component_id = cpu_to_le16(component_id);
- req->segment_id = cpu_to_le16(segment_id);
-
- return hwrm_req_send(bp, req);
-}
-
-static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt *bp, u16 component_id,
- u16 segment_id, u32 *seg_len,
- void *buf, u32 buf_len, u32 offset)
-{
- struct hwrm_dbg_coredump_retrieve_input *req;
- struct bnxt_hwrm_dbg_dma_info info = {NULL};
- int rc;
-
- rc = hwrm_req_init(bp, req, HWRM_DBG_COREDUMP_RETRIEVE);
- if (rc)
- return rc;
-
- req->component_id = cpu_to_le16(component_id);
- req->segment_id = cpu_to_le16(segment_id);
-
- info.dma_len = COREDUMP_RETRIEVE_BUF_LEN;
- info.seq_off = offsetof(struct hwrm_dbg_coredump_retrieve_input,
- seq_no);
- info.data_len_off = offsetof(struct hwrm_dbg_coredump_retrieve_output,
- data_len);
- if (buf) {
- info.dest_buf = buf + offset;
- info.buf_len = buf_len;
- info.seg_start = offset;
- }
-
- rc = bnxt_hwrm_dbg_dma_data(bp, req, &info);
- if (!rc)
- *seg_len = info.dest_buf_size;
-
- return rc;
-}
-
-static void
-bnxt_fill_coredump_seg_hdr(struct bnxt *bp,
- struct bnxt_coredump_segment_hdr *seg_hdr,
- struct coredump_segment_record *seg_rec, u32 seg_len,
- int status, u32 duration, u32 instance)
-{
- memset(seg_hdr, 0, sizeof(*seg_hdr));
- memcpy(seg_hdr->signature, "sEgM", 4);
- if (seg_rec) {
- seg_hdr->component_id = (__force __le32)seg_rec->component_id;
- seg_hdr->segment_id = (__force __le32)seg_rec->segment_id;
- seg_hdr->low_version = seg_rec->version_low;
- seg_hdr->high_version = seg_rec->version_hi;
- } else {
- /* For hwrm_ver_get response Component id = 2
- * and Segment id = 0
- */
- seg_hdr->component_id = cpu_to_le32(2);
- seg_hdr->segment_id = 0;
- }
- seg_hdr->function_id = cpu_to_le16(bp->pdev->devfn);
- seg_hdr->length = cpu_to_le32(seg_len);
- seg_hdr->status = cpu_to_le32(status);
- seg_hdr->duration = cpu_to_le32(duration);
- seg_hdr->data_offset = cpu_to_le32(sizeof(*seg_hdr));
- seg_hdr->instance = cpu_to_le32(instance);
-}
-
-static void
-bnxt_fill_coredump_record(struct bnxt *bp, struct bnxt_coredump_record *record,
- time64_t start, s16 start_utc, u16 total_segs,
- int status)
-{
- time64_t end = ktime_get_real_seconds();
- u32 os_ver_major = 0, os_ver_minor = 0;
- struct tm tm;
-
- time64_to_tm(start, 0, &tm);
- memset(record, 0, sizeof(*record));
- memcpy(record->signature, "cOrE", 4);
- record->flags = 0;
- record->low_version = 0;
- record->high_version = 1;
- record->asic_state = 0;
- strlcpy(record->system_name, utsname()->nodename,
- sizeof(record->system_name));
- record->year = cpu_to_le16(tm.tm_year + 1900);
- record->month = cpu_to_le16(tm.tm_mon + 1);
- record->day = cpu_to_le16(tm.tm_mday);
- record->hour = cpu_to_le16(tm.tm_hour);
- record->minute = cpu_to_le16(tm.tm_min);
- record->second = cpu_to_le16(tm.tm_sec);
- record->utc_bias = cpu_to_le16(start_utc);
- strcpy(record->commandline, "ethtool -w");
- record->total_segments = cpu_to_le32(total_segs);
-
- sscanf(utsname()->release, "%u.%u", &os_ver_major, &os_ver_minor);
- record->os_ver_major = cpu_to_le32(os_ver_major);
- record->os_ver_minor = cpu_to_le32(os_ver_minor);
-
- strlcpy(record->os_name, utsname()->sysname, 32);
- time64_to_tm(end, 0, &tm);
- record->end_year = cpu_to_le16(tm.tm_year + 1900);
- record->end_month = cpu_to_le16(tm.tm_mon + 1);
- record->end_day = cpu_to_le16(tm.tm_mday);
- record->end_hour = cpu_to_le16(tm.tm_hour);
- record->end_minute = cpu_to_le16(tm.tm_min);
- record->end_second = cpu_to_le16(tm.tm_sec);
- record->end_utc_bias = cpu_to_le16(sys_tz.tz_minuteswest * 60);
- record->asic_id1 = cpu_to_le32(bp->chip_num << 16 |
- bp->ver_resp.chip_rev << 8 |
- bp->ver_resp.chip_metal);
- record->asic_id2 = 0;
- record->coredump_status = cpu_to_le32(status);
- record->ioctl_low_version = 0;
- record->ioctl_high_version = 0;
-}
-
-static int bnxt_get_coredump(struct bnxt *bp, void *buf, u32 *dump_len)
-{
- u32 ver_get_resp_len = sizeof(struct hwrm_ver_get_output);
- u32 offset = 0, seg_hdr_len, seg_record_len, buf_len = 0;
- struct coredump_segment_record *seg_record = NULL;
- struct bnxt_coredump_segment_hdr seg_hdr;
- struct bnxt_coredump coredump = {NULL};
- time64_t start_time;
- u16 start_utc;
- int rc = 0, i;
-
- if (buf)
- buf_len = *dump_len;
-
- start_time = ktime_get_real_seconds();
- start_utc = sys_tz.tz_minuteswest * 60;
- seg_hdr_len = sizeof(seg_hdr);
-
- /* First segment should be hwrm_ver_get response */
- *dump_len = seg_hdr_len + ver_get_resp_len;
- if (buf) {
- bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, NULL, ver_get_resp_len,
- 0, 0, 0);
- memcpy(buf + offset, &seg_hdr, seg_hdr_len);
- offset += seg_hdr_len;
- memcpy(buf + offset, &bp->ver_resp, ver_get_resp_len);
- offset += ver_get_resp_len;
- }
-
- rc = bnxt_hwrm_dbg_coredump_list(bp, &coredump);
- if (rc) {
- netdev_err(bp->dev, "Failed to get coredump segment list\n");
- goto err;
- }
-
- *dump_len += seg_hdr_len * coredump.total_segs;
-
- seg_record = (struct coredump_segment_record *)coredump.data;
- seg_record_len = sizeof(*seg_record);
-
- for (i = 0; i < coredump.total_segs; i++) {
- u16 comp_id = le16_to_cpu(seg_record->component_id);
- u16 seg_id = le16_to_cpu(seg_record->segment_id);
- u32 duration = 0, seg_len = 0;
- unsigned long start, end;
-
- if (buf && ((offset + seg_hdr_len) >
- BNXT_COREDUMP_BUF_LEN(buf_len))) {
- rc = -ENOBUFS;
- goto err;
- }
-
- start = jiffies;
-
- rc = bnxt_hwrm_dbg_coredump_initiate(bp, comp_id, seg_id);
- if (rc) {
- netdev_err(bp->dev,
- "Failed to initiate coredump for seg = %d\n",
- seg_record->segment_id);
- goto next_seg;
- }
-
- /* Write segment data into the buffer */
- rc = bnxt_hwrm_dbg_coredump_retrieve(bp, comp_id, seg_id,
- &seg_len, buf, buf_len,
- offset + seg_hdr_len);
- if (rc && rc == -ENOBUFS)
- goto err;
- else if (rc)
- netdev_err(bp->dev,
- "Failed to retrieve coredump for seg = %d\n",
- seg_record->segment_id);
-
-next_seg:
- end = jiffies;
- duration = jiffies_to_msecs(end - start);
- bnxt_fill_coredump_seg_hdr(bp, &seg_hdr, seg_record, seg_len,
- rc, duration, 0);
-
- if (buf) {
- /* Write segment header into the buffer */
- memcpy(buf + offset, &seg_hdr, seg_hdr_len);
- offset += seg_hdr_len + seg_len;
- }
-
- *dump_len += seg_len;
- seg_record =
- (struct coredump_segment_record *)((u8 *)seg_record +
- seg_record_len);
- }
-
-err:
- if (buf)
- bnxt_fill_coredump_record(bp, buf + offset, start_time,
- start_utc, coredump.total_segs + 1,
- rc);
- kfree(coredump.data);
- *dump_len += sizeof(struct bnxt_coredump_record);
- if (rc == -ENOBUFS)
- netdev_err(bp->dev, "Firmware returned large coredump buffer\n");
- return rc;
-}
-
static int bnxt_set_dump(struct net_device *dev, struct ethtool_dump *dump)
{
struct bnxt *bp = netdev_priv(dev);
@@ -3971,10 +3664,7 @@ static int bnxt_get_dump_flag(struct net_device *dev, struct ethtool_dump *dump)
bp->ver_resp.hwrm_fw_rsvd_8b;
dump->flag = bp->dump_flag;
- if (bp->dump_flag == BNXT_DUMP_CRASH)
- dump->len = BNXT_CRASH_DUMP_LEN;
- else
- bnxt_get_coredump(bp, NULL, &dump->len);
+ dump->len = bnxt_get_coredump_length(bp, bp->dump_flag);
return 0;
}
@@ -3989,15 +3679,7 @@ static int bnxt_get_dump_data(struct net_device *dev, struct ethtool_dump *dump,
memset(buf, 0, dump->len);
dump->flag = bp->dump_flag;
- if (dump->flag == BNXT_DUMP_CRASH) {
-#ifdef CONFIG_TEE_BNXT_FW
- return tee_bnxt_copy_coredump(buf, 0, dump->len);
-#endif
- } else {
- return bnxt_get_coredump(bp, buf, &dump->len);
- }
-
- return 0;
+ return bnxt_get_coredump(bp, dump->flag, buf, &dump->len);
}
static int bnxt_get_ts_info(struct net_device *dev,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
index 0a57cb6a4a4b..6aa44840f13a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
@@ -22,49 +22,6 @@ struct bnxt_led_cfg {
u8 rsvd;
};
-#define COREDUMP_LIST_BUF_LEN 2048
-#define COREDUMP_RETRIEVE_BUF_LEN 4096
-
-struct bnxt_coredump {
- void *data;
- int data_size;
- u16 total_segs;
-};
-
-#define BNXT_COREDUMP_BUF_LEN(len) ((len) - sizeof(struct bnxt_coredump_record))
-
-struct bnxt_hwrm_dbg_dma_info {
- void *dest_buf;
- int dest_buf_size;
- u16 dma_len;
- u16 seq_off;
- u16 data_len_off;
- u16 segs;
- u32 seg_start;
- u32 buf_len;
-};
-
-struct hwrm_dbg_cmn_input {
- __le16 req_type;
- __le16 cmpl_ring;
- __le16 seq_id;
- __le16 target_id;
- __le64 resp_addr;
- __le64 host_dest_addr;
- __le32 host_buf_len;
-};
-
-struct hwrm_dbg_cmn_output {
- __le16 error_code;
- __le16 req_type;
- __le16 seq_id;
- __le16 resp_len;
- u8 flags;
- #define HWRM_DBG_CMN_FLAGS_MORE 1
-};
-
-#define BNXT_CRASH_DUMP_LEN (8 << 20)
-
#define BNXT_LED_DFLT_ENA \
(PORT_LED_CFG_REQ_ENABLES_LED0_ID | \
PORT_LED_CFG_REQ_ENABLES_LED0_STATE | \
@@ -94,8 +51,11 @@ u32 bnxt_fw_to_ethtool_speed(u16);
u16 bnxt_get_fw_auto_link_speeds(u32);
int bnxt_hwrm_nvm_get_dev_info(struct bnxt *bp,
struct hwrm_nvm_get_dev_info_output *nvm_dev_info);
+int bnxt_hwrm_firmware_reset(struct net_device *dev, u8 proc_type,
+ u8 self_reset, u8 flags);
int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware *fw,
u32 install_type);
+int bnxt_get_pkginfo(struct net_device *dev, char *ver, int size);
void bnxt_ethtool_init(struct bnxt *bp);
void bnxt_ethtool_free(struct bnxt *bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index 94d07a9f7034..ea86c54247c7 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -532,8 +532,8 @@ struct hwrm_err_output {
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 10
#define HWRM_VERSION_UPDATE 2
-#define HWRM_VERSION_RSVD 52
-#define HWRM_VERSION_STR "1.10.2.52"
+#define HWRM_VERSION_RSVD 63
+#define HWRM_VERSION_STR "1.10.2.63"
/* hwrm_ver_get_input (size:192b/24B) */
struct hwrm_ver_get_input {
@@ -1587,6 +1587,8 @@ struct hwrm_func_qcaps_output {
#define FUNC_QCAPS_RESP_FLAGS_EXT_DFLT_VLAN_TPID_PCP_SUPPORTED 0x200000UL
#define FUNC_QCAPS_RESP_FLAGS_EXT_KTLS_SUPPORTED 0x400000UL
#define FUNC_QCAPS_RESP_FLAGS_EXT_EP_RATE_CONTROL 0x800000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_MIN_BW_SUPPORTED 0x1000000UL
+ #define FUNC_QCAPS_RESP_FLAGS_EXT_TX_COAL_CMPL_CAP 0x2000000UL
u8 max_schqs;
u8 mpc_chnls_cap;
#define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_TCE 0x1UL
@@ -1956,6 +1958,18 @@ struct hwrm_func_cfg_output {
u8 valid;
};
+/* hwrm_func_cfg_cmd_err (size:64b/8B) */
+struct hwrm_func_cfg_cmd_err {
+ u8 code;
+ #define FUNC_CFG_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define FUNC_CFG_CMD_ERR_CODE_PARTITION_MIN_BW_RANGE 0x1UL
+ #define FUNC_CFG_CMD_ERR_CODE_PARTITION_MIN_MORE_THAN_MAX 0x2UL
+ #define FUNC_CFG_CMD_ERR_CODE_PARTITION_MIN_BW_UNSUPPORTED 0x3UL
+ #define FUNC_CFG_CMD_ERR_CODE_PARTITION_BW_PERCENT 0x4UL
+ #define FUNC_CFG_CMD_ERR_CODE_LAST FUNC_CFG_CMD_ERR_CODE_PARTITION_BW_PERCENT
+ u8 unused_0[7];
+};
+
/* hwrm_func_qstats_input (size:192b/24B) */
struct hwrm_func_qstats_input {
__le16 req_type;
@@ -3601,7 +3615,15 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR4 0x1dUL
#define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR4 0x1eUL
#define PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER4 0x1fUL
- #define PORT_PHY_QCFG_RESP_PHY_TYPE_LAST PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER4
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASECR 0x20UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASESR 0x21UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASELR 0x22UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASEER 0x23UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR2 0x24UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR2 0x25UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR2 0x26UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER2 0x27UL
+ #define PORT_PHY_QCFG_RESP_PHY_TYPE_LAST PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER2
u8 media_type;
#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN 0x0UL
#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP 0x1UL
@@ -4040,7 +4062,7 @@ struct tx_port_stats_ext {
__le64 pfc_pri7_tx_transitions;
};
-/* rx_port_stats_ext (size:3648b/456B) */
+/* rx_port_stats_ext (size:3776b/472B) */
struct rx_port_stats_ext {
__le64 link_down_events;
__le64 continuous_pause_events;
@@ -4099,6 +4121,8 @@ struct rx_port_stats_ext {
__le64 rx_discard_packets_cos5;
__le64 rx_discard_packets_cos6;
__le64 rx_discard_packets_cos7;
+ __le64 rx_fec_corrected_blocks;
+ __le64 rx_fec_uncorrectable_blocks;
};
/* hwrm_port_qstats_ext_input (size:320b/40B) */
@@ -4372,7 +4396,10 @@ struct hwrm_port_phy_qcaps_output {
#define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_50G 0x1UL
#define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_100G 0x2UL
#define PORT_PHY_QCAPS_RESP_SUPPORTED_PAM4_SPEEDS_FORCE_MODE_200G 0x4UL
- u8 unused_0[3];
+ __le16 flags2;
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_PAUSE_UNSUPPORTED 0x1UL
+ #define PORT_PHY_QCAPS_RESP_FLAGS2_PFC_UNSUPPORTED 0x2UL
+ u8 unused_0[1];
u8 valid;
};
@@ -6076,6 +6103,11 @@ struct hwrm_vnic_qcaps_output {
#define VNIC_QCAPS_RESP_FLAGS_VIRTIO_NET_VNIC_ALLOC_CAP 0x800UL
#define VNIC_QCAPS_RESP_FLAGS_METADATA_FORMAT_CAP 0x1000UL
#define VNIC_QCAPS_RESP_FLAGS_RSS_STRICT_HASH_TYPE_CAP 0x2000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_HASH_TYPE_DELTA_CAP 0x4000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_HASH_FUNCTION_TOEPLITZ_CAP 0x8000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_HASH_FUNCTION_XOR_CAP 0x10000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_HASH_FUNCTION_CHKSM_CAP 0x20000UL
+ #define VNIC_QCAPS_RESP_FLAGS_RSS_IPV6_FLOW_LABEL_CAP 0x40000UL
__le16 max_aggs_supported;
u8 unused_1[5];
u8 valid;
@@ -6206,7 +6238,15 @@ struct hwrm_vnic_rss_cfg_input {
__le64 ring_grp_tbl_addr;
__le64 hash_key_tbl_addr;
__le16 rss_ctx_idx;
- u8 unused_1[6];
+ u8 flags;
+ #define VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_INCLUDE 0x1UL
+ #define VNIC_RSS_CFG_REQ_FLAGS_HASH_TYPE_EXCLUDE 0x2UL
+ u8 rss_hash_function;
+ #define VNIC_RSS_CFG_REQ_RSS_HASH_FUNCTION_TOEPLITZ 0x0UL
+ #define VNIC_RSS_CFG_REQ_RSS_HASH_FUNCTION_XOR 0x1UL
+ #define VNIC_RSS_CFG_REQ_RSS_HASH_FUNCTION_CHECKSUM 0x2UL
+ #define VNIC_RSS_CFG_REQ_RSS_HASH_FUNCTION_LAST VNIC_RSS_CFG_REQ_RSS_HASH_FUNCTION_CHECKSUM
+ u8 unused_1[4];
};
/* hwrm_vnic_rss_cfg_output (size:128b/16B) */
@@ -6331,7 +6371,24 @@ struct hwrm_ring_alloc_input {
#define RING_ALLOC_REQ_RING_TYPE_RX_AGG 0x4UL
#define RING_ALLOC_REQ_RING_TYPE_NQ 0x5UL
#define RING_ALLOC_REQ_RING_TYPE_LAST RING_ALLOC_REQ_RING_TYPE_NQ
- u8 unused_0;
+ u8 cmpl_coal_cnt;
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_OFF 0x0UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_4 0x1UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_8 0x2UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_12 0x3UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_16 0x4UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_24 0x5UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_32 0x6UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_48 0x7UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_64 0x8UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_96 0x9UL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_128 0xaUL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_192 0xbUL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_256 0xcUL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_320 0xdUL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_384 0xeUL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_MAX 0xfUL
+ #define RING_ALLOC_REQ_CMPL_COAL_CNT_LAST RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_MAX
__le16 flags;
#define RING_ALLOC_REQ_FLAGS_RX_SOP_PAD 0x1UL
__le64 page_tbl_addr;
@@ -7099,6 +7156,7 @@ struct hwrm_cfa_ntuple_filter_alloc_input {
#define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_FID 0x8UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_ARP_REPLY 0x10UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX 0x20UL
+ #define CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_NO_L2_CONTEXT 0x40UL
__le32 enables;
#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID 0x1UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE 0x2UL
@@ -7234,6 +7292,7 @@ struct hwrm_cfa_ntuple_filter_cfg_input {
__le32 flags;
#define CFA_NTUPLE_FILTER_CFG_REQ_FLAGS_DEST_FID 0x1UL
#define CFA_NTUPLE_FILTER_CFG_REQ_FLAGS_DEST_RFS_RING_IDX 0x2UL
+ #define CFA_NTUPLE_FILTER_CFG_REQ_FLAGS_NO_L2_CONTEXT 0x4UL
__le64 ntuple_filter_id;
__le32 new_dst_id;
__le32 new_mirror_vnic_id;
@@ -7834,11 +7893,11 @@ struct hwrm_cfa_adv_flow_mgnt_qcaps_output {
#define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_TRUFLOW_CAPABLE 0x8000UL
#define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_L2_FILTER_TRAFFIC_TYPE_L2_ROCE_SUPPORTED 0x10000UL
#define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_LAG_SUPPORTED 0x20000UL
+ #define CFA_ADV_FLOW_MGNT_QCAPS_RESP_FLAGS_NTUPLE_FLOW_NO_L2CTX_SUPPORTED 0x40000UL
u8 unused_0[3];
u8 valid;
};
-/* hwrm_tunnel_dst_port_query_input (size:192b/24B) */
struct hwrm_tunnel_dst_port_query_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -8414,6 +8473,86 @@ struct hwrm_fw_get_structured_data_cmd_err {
u8 unused_0[7];
};
+/* hwrm_fw_livepatch_query_input (size:192b/24B) */
+struct hwrm_fw_livepatch_query_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 fw_target;
+ #define FW_LIVEPATCH_QUERY_REQ_FW_TARGET_COMMON_FW 0x1UL
+ #define FW_LIVEPATCH_QUERY_REQ_FW_TARGET_SECURE_FW 0x2UL
+ #define FW_LIVEPATCH_QUERY_REQ_FW_TARGET_LAST FW_LIVEPATCH_QUERY_REQ_FW_TARGET_SECURE_FW
+ u8 unused_0[7];
+};
+
+/* hwrm_fw_livepatch_query_output (size:640b/80B) */
+struct hwrm_fw_livepatch_query_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ char install_ver[32];
+ char active_ver[32];
+ __le16 status_flags;
+ #define FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_INSTALL 0x1UL
+ #define FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_ACTIVE 0x2UL
+ u8 unused_0[5];
+ u8 valid;
+};
+
+/* hwrm_fw_livepatch_input (size:256b/32B) */
+struct hwrm_fw_livepatch_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ u8 opcode;
+ #define FW_LIVEPATCH_REQ_OPCODE_ACTIVATE 0x1UL
+ #define FW_LIVEPATCH_REQ_OPCODE_DEACTIVATE 0x2UL
+ #define FW_LIVEPATCH_REQ_OPCODE_LAST FW_LIVEPATCH_REQ_OPCODE_DEACTIVATE
+ u8 fw_target;
+ #define FW_LIVEPATCH_REQ_FW_TARGET_COMMON_FW 0x1UL
+ #define FW_LIVEPATCH_REQ_FW_TARGET_SECURE_FW 0x2UL
+ #define FW_LIVEPATCH_REQ_FW_TARGET_LAST FW_LIVEPATCH_REQ_FW_TARGET_SECURE_FW
+ u8 loadtype;
+ #define FW_LIVEPATCH_REQ_LOADTYPE_NVM_INSTALL 0x1UL
+ #define FW_LIVEPATCH_REQ_LOADTYPE_MEMORY_DIRECT 0x2UL
+ #define FW_LIVEPATCH_REQ_LOADTYPE_LAST FW_LIVEPATCH_REQ_LOADTYPE_MEMORY_DIRECT
+ u8 flags;
+ __le32 patch_len;
+ __le64 host_addr;
+};
+
+/* hwrm_fw_livepatch_output (size:128b/16B) */
+struct hwrm_fw_livepatch_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 unused_0[7];
+ u8 valid;
+};
+
+/* hwrm_fw_livepatch_cmd_err (size:64b/8B) */
+struct hwrm_fw_livepatch_cmd_err {
+ u8 code;
+ #define FW_LIVEPATCH_CMD_ERR_CODE_UNKNOWN 0x0UL
+ #define FW_LIVEPATCH_CMD_ERR_CODE_INVALID_OPCODE 0x1UL
+ #define FW_LIVEPATCH_CMD_ERR_CODE_INVALID_TARGET 0x2UL
+ #define FW_LIVEPATCH_CMD_ERR_CODE_NOT_SUPPORTED 0x3UL
+ #define FW_LIVEPATCH_CMD_ERR_CODE_NOT_INSTALLED 0x4UL
+ #define FW_LIVEPATCH_CMD_ERR_CODE_NOT_PATCHED 0x5UL
+ #define FW_LIVEPATCH_CMD_ERR_CODE_AUTH_FAIL 0x6UL
+ #define FW_LIVEPATCH_CMD_ERR_CODE_INVALID_HEADER 0x7UL
+ #define FW_LIVEPATCH_CMD_ERR_CODE_INVALID_SIZE 0x8UL
+ #define FW_LIVEPATCH_CMD_ERR_CODE_ALREADY_PATCHED 0x9UL
+ #define FW_LIVEPATCH_CMD_ERR_CODE_LAST FW_LIVEPATCH_CMD_ERR_CODE_ALREADY_PATCHED
+ u8 unused_0[7];
+};
+
/* hwrm_exec_fwd_resp_input (size:1024b/128B) */
struct hwrm_exec_fwd_resp_input {
__le16 req_type;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
index f0aa480799ca..8388be119f9a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c
@@ -11,9 +11,7 @@
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
-#include <linux/ptp_clock_kernel.h>
#include <linux/net_tstamp.h>
-#include <linux/timecounter.h>
#include <linux/timekeeping.h>
#include <linux/ptp_classify.h>
#include "bnxt_hsi.h"
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
index fa5f05708e6d..7c528e1f8713 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h
@@ -10,6 +10,9 @@
#ifndef BNXT_PTP_H
#define BNXT_PTP_H
+#include <linux/ptp_clock_kernel.h>
+#include <linux/timecounter.h>
+
#define BNXT_PTP_GRC_WIN 6
#define BNXT_PTP_GRC_WIN_BASE 0x6000
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 70d8ca3039dc..1d177fed44a6 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -1151,7 +1151,7 @@ void bnxt_hwrm_exec_fwd_req(struct bnxt *bp)
}
}
-int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict)
+int bnxt_approve_mac(struct bnxt *bp, const u8 *mac, bool strict)
{
struct hwrm_func_vf_cfg_input *req;
int rc = 0;
@@ -1217,7 +1217,7 @@ void bnxt_update_vf_mac(struct bnxt *bp)
/* overwrite netdev dev_addr with admin VF MAC */
if (is_valid_ether_addr(bp->vf.mac_addr))
- memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN);
+ eth_hw_addr_set(bp->dev, bp->vf.mac_addr);
update_vf_mac_exit:
hwrm_req_drop(bp, req);
if (inform_pf)
@@ -1246,7 +1246,7 @@ void bnxt_update_vf_mac(struct bnxt *bp)
{
}
-int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict)
+int bnxt_approve_mac(struct bnxt *bp, const u8 *mac, bool strict)
{
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
index 995535e4c11b..9a4bacba477b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.h
@@ -41,5 +41,5 @@ int bnxt_cfg_hw_sriov(struct bnxt *bp, int *num_vfs, bool reset);
void bnxt_sriov_disable(struct bnxt *);
void bnxt_hwrm_exec_fwd_req(struct bnxt *);
void bnxt_update_vf_mac(struct bnxt *);
-int bnxt_approve_mac(struct bnxt *, u8 *, bool);
+int bnxt_approve_mac(struct bnxt *, const u8 *, bool);
#endif
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
index 6b4d2556a6df..54d59f681b86 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
@@ -11,8 +11,7 @@
#define BNXT_ULP_H
#define BNXT_ROCE_ULP 0
-#define BNXT_OTHER_ULP 1
-#define BNXT_MAX_ULP 2
+#define BNXT_MAX_ULP 1
#define BNXT_MIN_ROCE_CP_RINGS 2
#define BNXT_MIN_ROCE_STAT_CTXS 1
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
index 9401936b74fa..8eb28e088582 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
@@ -475,7 +475,7 @@ static void bnxt_vf_rep_netdev_init(struct bnxt *bp, struct bnxt_vf_rep *vf_rep,
dev->features |= pf_dev->features;
bnxt_vf_rep_eth_addr_gen(bp->pf.mac_addr, vf_rep->vf_idx,
dev->perm_addr);
- ether_addr_copy(dev->dev_addr, dev->perm_addr);
+ eth_hw_addr_set(dev, dev->perm_addr);
/* Set VF-Rep's max-mtu to the corresponding VF's max-mtu */
if (!bnxt_hwrm_vfr_qcfg(bp, vf_rep, &max_mtu))
dev->max_mtu = max_mtu;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 23c7595d2a1d..226f4403cfed 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -935,6 +935,48 @@ static int bcmgenet_set_coalesce(struct net_device *dev,
return 0;
}
+static void bcmgenet_get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *epause)
+{
+ struct bcmgenet_priv *priv;
+ u32 umac_cmd;
+
+ priv = netdev_priv(dev);
+
+ epause->autoneg = priv->autoneg_pause;
+
+ if (netif_carrier_ok(dev)) {
+ /* report active state when link is up */
+ umac_cmd = bcmgenet_umac_readl(priv, UMAC_CMD);
+ epause->tx_pause = !(umac_cmd & CMD_TX_PAUSE_IGNORE);
+ epause->rx_pause = !(umac_cmd & CMD_RX_PAUSE_IGNORE);
+ } else {
+ /* otherwise report stored settings */
+ epause->tx_pause = priv->tx_pause;
+ epause->rx_pause = priv->rx_pause;
+ }
+}
+
+static int bcmgenet_set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *epause)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+
+ if (!dev->phydev)
+ return -ENODEV;
+
+ if (!phy_validate_pause(dev->phydev, epause))
+ return -EINVAL;
+
+ priv->autoneg_pause = !!epause->autoneg;
+ priv->tx_pause = !!epause->tx_pause;
+ priv->rx_pause = !!epause->rx_pause;
+
+ bcmgenet_phy_pause_set(dev, priv->rx_pause, priv->tx_pause);
+
+ return 0;
+}
+
/* standard ethtool support functions. */
enum bcmgenet_stat_type {
BCMGENET_STAT_NETDEV = -1,
@@ -1587,6 +1629,8 @@ static const struct ethtool_ops bcmgenet_ethtool_ops = {
.get_ts_info = ethtool_op_get_ts_info,
.get_rxnfc = bcmgenet_get_rxnfc,
.set_rxnfc = bcmgenet_set_rxnfc,
+ .get_pauseparam = bcmgenet_get_pauseparam,
+ .set_pauseparam = bcmgenet_set_pauseparam,
};
/* Power down the unimac, based on mode. */
@@ -1609,7 +1653,7 @@ static int bcmgenet_power_down(struct bcmgenet_priv *priv,
/* Power down LED */
if (priv->hw_params->flags & GENET_HAS_EXT) {
reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
- if (GENET_IS_V5(priv))
+ if (GENET_IS_V5(priv) && !priv->ephy_16nm)
reg |= EXT_PWR_DOWN_PHY_EN |
EXT_PWR_DOWN_PHY_RD |
EXT_PWR_DOWN_PHY_SD |
@@ -1646,7 +1690,7 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv,
case GENET_POWER_PASSIVE:
reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS |
EXT_ENERGY_DET_MASK);
- if (GENET_IS_V5(priv)) {
+ if (GENET_IS_V5(priv) && !priv->ephy_16nm) {
reg &= ~(EXT_PWR_DOWN_PHY_EN |
EXT_PWR_DOWN_PHY_RD |
EXT_PWR_DOWN_PHY_SD |
@@ -3222,7 +3266,7 @@ static void bcmgenet_umac_reset(struct bcmgenet_priv *priv)
}
static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv,
- unsigned char *addr)
+ const unsigned char *addr)
{
bcmgenet_umac_writel(priv, get_unaligned_be32(&addr[0]), UMAC_MAC0);
bcmgenet_umac_writel(priv, get_unaligned_be16(&addr[4]), UMAC_MAC1);
@@ -3364,6 +3408,8 @@ static int bcmgenet_open(struct net_device *dev)
goto err_irq1;
}
+ bcmgenet_phy_pause_set(dev, priv->rx_pause, priv->tx_pause);
+
bcmgenet_netif_start(dev);
netif_tx_start_all_queues(dev);
@@ -3408,11 +3454,6 @@ static void bcmgenet_netif_stop(struct net_device *dev)
*/
cancel_work_sync(&priv->bcmgenet_irq_work);
- priv->old_link = -1;
- priv->old_speed = -1;
- priv->old_duplex = -1;
- priv->old_pause = -1;
-
/* tx reclaim */
bcmgenet_tx_reclaim_all(dev);
bcmgenet_fini_dma(priv);
@@ -3519,7 +3560,7 @@ static void bcmgenet_timeout(struct net_device *dev, unsigned int txqueue)
#define MAX_MDF_FILTER 17
static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv *priv,
- unsigned char *addr,
+ const unsigned char *addr,
int *i)
{
bcmgenet_umac_writel(priv, addr[0] << 8 | addr[1],
@@ -3592,7 +3633,7 @@ static int bcmgenet_set_mac_addr(struct net_device *dev, void *p)
if (netif_running(dev))
return -EBUSY;
- ether_addr_copy(dev->dev_addr, addr->sa_data);
+ eth_hw_addr_set(dev, addr->sa_data);
return 0;
}
@@ -3869,6 +3910,7 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
struct bcmgenet_plat_data {
enum bcmgenet_version version;
u32 dma_max_burst_length;
+ bool ephy_16nm;
};
static const struct bcmgenet_plat_data v1_plat_data = {
@@ -3901,6 +3943,12 @@ static const struct bcmgenet_plat_data bcm2711_plat_data = {
.dma_max_burst_length = 0x08,
};
+static const struct bcmgenet_plat_data bcm7712_plat_data = {
+ .version = GENET_V5,
+ .dma_max_burst_length = DMA_MAX_BURST_LENGTH,
+ .ephy_16nm = true,
+};
+
static const struct of_device_id bcmgenet_match[] = {
{ .compatible = "brcm,genet-v1", .data = &v1_plat_data },
{ .compatible = "brcm,genet-v2", .data = &v2_plat_data },
@@ -3908,6 +3956,7 @@ static const struct of_device_id bcmgenet_match[] = {
{ .compatible = "brcm,genet-v4", .data = &v4_plat_data },
{ .compatible = "brcm,genet-v5", .data = &v5_plat_data },
{ .compatible = "brcm,bcm2711-genet-v5", .data = &bcm2711_plat_data },
+ { .compatible = "brcm,bcm7712-genet-v5", .data = &bcm7712_plat_data },
{ },
};
MODULE_DEVICE_TABLE(of, bcmgenet_match);
@@ -3950,6 +3999,11 @@ static int bcmgenet_probe(struct platform_device *pdev)
spin_lock_init(&priv->lock);
+ /* Set default pause parameters */
+ priv->autoneg_pause = 1;
+ priv->tx_pause = 1;
+ priv->rx_pause = 1;
+
SET_NETDEV_DEV(dev, &pdev->dev);
dev_set_drvdata(&pdev->dev, dev);
dev->watchdog_timeo = 2 * HZ;
@@ -3983,6 +4037,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
if (pdata) {
priv->version = pdata->version;
priv->dma_max_burst_length = pdata->dma_max_burst_length;
+ priv->ephy_16nm = pdata->ephy_16nm;
} else {
priv->version = pd->genet_version;
priv->dma_max_burst_length = DMA_MAX_BURST_LENGTH;
@@ -4036,11 +4091,15 @@ static int bcmgenet_probe(struct platform_device *pdev)
bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
if (pd && !IS_ERR_OR_NULL(pd->mac_address))
- ether_addr_copy(dev->dev_addr, pd->mac_address);
+ eth_hw_addr_set(dev, pd->mac_address);
else
- if (!device_get_mac_address(&pdev->dev, dev->dev_addr, ETH_ALEN))
- if (has_acpi_companion(&pdev->dev))
- bcmgenet_get_hw_addr(priv, dev->dev_addr);
+ if (device_get_ethdev_address(&pdev->dev, dev))
+ if (has_acpi_companion(&pdev->dev)) {
+ u8 addr[ETH_ALEN];
+
+ bcmgenet_get_hw_addr(priv, addr);
+ eth_hw_addr_set(dev, addr);
+ }
if (!is_valid_ether_addr(dev->dev_addr)) {
dev_warn(&pdev->dev, "using random Ethernet MAC\n");
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 0a6d91b0f0aa..946f6e283c4e 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -329,6 +329,7 @@ struct bcmgenet_mib_counters {
#define EXT_CFG_IDDQ_BIAS (1 << 0)
#define EXT_CFG_PWR_DOWN (1 << 1)
#define EXT_CK25_DIS (1 << 4)
+#define EXT_CFG_IDDQ_GLOBAL_PWR (1 << 3)
#define EXT_GPHY_RESET (1 << 5)
/* DMA rings size */
@@ -594,6 +595,9 @@ struct bcmgenet_priv {
/* other misc variables */
struct bcmgenet_hw_params *hw_params;
+ unsigned autoneg_pause:1;
+ unsigned tx_pause:1;
+ unsigned rx_pause:1;
/* MDIO bus variables */
wait_queue_head_t wq;
@@ -606,13 +610,10 @@ struct bcmgenet_priv {
bool clk_eee_enabled;
/* PHY device variables */
- int old_link;
- int old_speed;
- int old_duplex;
- int old_pause;
phy_interface_t phy_interface;
int phy_addr;
int ext_phy;
+ bool ephy_16nm;
/* Interrupt variables */
struct work_struct bcmgenet_irq_work;
@@ -690,6 +691,7 @@ int bcmgenet_mii_init(struct net_device *dev);
int bcmgenet_mii_config(struct net_device *dev, bool init);
int bcmgenet_mii_probe(struct net_device *dev);
void bcmgenet_mii_exit(struct net_device *dev);
+void bcmgenet_phy_pause_set(struct net_device *dev, bool rx, bool tx);
void bcmgenet_phy_power_set(struct net_device *dev, bool enable);
void bcmgenet_mii_setup(struct net_device *dev);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 89d16c587bb7..5f259641437a 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -25,92 +25,80 @@
#include "bcmgenet.h"
-/* setup netdev link state when PHY link status change and
- * update UMAC and RGMII block when link up
- */
-void bcmgenet_mii_setup(struct net_device *dev)
+static void bcmgenet_mac_config(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
struct phy_device *phydev = dev->phydev;
u32 reg, cmd_bits = 0;
- bool status_changed = false;
- if (priv->old_link != phydev->link) {
- status_changed = true;
- priv->old_link = phydev->link;
- }
+ /* speed */
+ if (phydev->speed == SPEED_1000)
+ cmd_bits = CMD_SPEED_1000;
+ else if (phydev->speed == SPEED_100)
+ cmd_bits = CMD_SPEED_100;
+ else
+ cmd_bits = CMD_SPEED_10;
+ cmd_bits <<= CMD_SPEED_SHIFT;
- if (phydev->link) {
- /* check speed/duplex/pause changes */
- if (priv->old_speed != phydev->speed) {
- status_changed = true;
- priv->old_speed = phydev->speed;
- }
+ /* duplex */
+ if (phydev->duplex != DUPLEX_FULL) {
+ cmd_bits |= CMD_HD_EN |
+ CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE;
+ } else {
+ /* pause capability defaults to Symmetric */
+ if (priv->autoneg_pause) {
+ bool tx_pause = 0, rx_pause = 0;
- if (priv->old_duplex != phydev->duplex) {
- status_changed = true;
- priv->old_duplex = phydev->duplex;
- }
+ if (phydev->autoneg)
+ phy_get_pause(phydev, &tx_pause, &rx_pause);
- if (priv->old_pause != phydev->pause) {
- status_changed = true;
- priv->old_pause = phydev->pause;
+ if (!tx_pause)
+ cmd_bits |= CMD_TX_PAUSE_IGNORE;
+ if (!rx_pause)
+ cmd_bits |= CMD_RX_PAUSE_IGNORE;
}
- /* done if nothing has changed */
- if (!status_changed)
- return;
-
- /* speed */
- if (phydev->speed == SPEED_1000)
- cmd_bits = CMD_SPEED_1000;
- else if (phydev->speed == SPEED_100)
- cmd_bits = CMD_SPEED_100;
- else
- cmd_bits = CMD_SPEED_10;
- cmd_bits <<= CMD_SPEED_SHIFT;
-
- /* duplex */
- if (phydev->duplex != DUPLEX_FULL)
- cmd_bits |= CMD_HD_EN;
-
- /* pause capability */
- if (!phydev->pause)
- cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE;
-
- /*
- * Program UMAC and RGMII block based on established
- * link speed, duplex, and pause. The speed set in
- * umac->cmd tell RGMII block which clock to use for
- * transmit -- 25MHz(100Mbps) or 125MHz(1Gbps).
- * Receive clock is provided by the PHY.
- */
- reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
- reg &= ~OOB_DISABLE;
- reg |= RGMII_LINK;
- bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
+ /* Manual override */
+ if (!priv->rx_pause)
+ cmd_bits |= CMD_RX_PAUSE_IGNORE;
+ if (!priv->tx_pause)
+ cmd_bits |= CMD_TX_PAUSE_IGNORE;
+ }
- reg = bcmgenet_umac_readl(priv, UMAC_CMD);
- reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) |
- CMD_HD_EN |
- CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE);
- reg |= cmd_bits;
- if (reg & CMD_SW_RESET) {
- reg &= ~CMD_SW_RESET;
- bcmgenet_umac_writel(priv, reg, UMAC_CMD);
- udelay(2);
- reg |= CMD_TX_EN | CMD_RX_EN;
- }
+ /* Program UMAC and RGMII block based on established
+ * link speed, duplex, and pause. The speed set in
+ * umac->cmd tell RGMII block which clock to use for
+ * transmit -- 25MHz(100Mbps) or 125MHz(1Gbps).
+ * Receive clock is provided by the PHY.
+ */
+ reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
+ reg &= ~OOB_DISABLE;
+ reg |= RGMII_LINK;
+ bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
+
+ reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+ reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) |
+ CMD_HD_EN |
+ CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE);
+ reg |= cmd_bits;
+ if (reg & CMD_SW_RESET) {
+ reg &= ~CMD_SW_RESET;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
- } else {
- /* done if nothing has changed */
- if (!status_changed)
- return;
-
- /* needed for MoCA fixed PHY to reflect correct link status */
- netif_carrier_off(dev);
+ udelay(2);
+ reg |= CMD_TX_EN | CMD_RX_EN;
}
+ bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+}
+/* setup netdev link state when PHY link status change and
+ * update UMAC and RGMII block when link up
+ */
+void bcmgenet_mii_setup(struct net_device *dev)
+{
+ struct phy_device *phydev = dev->phydev;
+
+ if (phydev->link)
+ bcmgenet_mac_config(dev);
phy_print_status(phydev);
}
@@ -130,20 +118,36 @@ static int bcmgenet_fixed_phy_link_update(struct net_device *dev,
return 0;
}
+void bcmgenet_phy_pause_set(struct net_device *dev, bool rx, bool tx)
+{
+ struct phy_device *phydev = dev->phydev;
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->advertising, rx);
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydev->advertising,
+ rx | tx);
+ phy_start_aneg(phydev);
+
+ mutex_lock(&phydev->lock);
+ if (phydev->link)
+ bcmgenet_mac_config(dev);
+ mutex_unlock(&phydev->lock);
+}
+
void bcmgenet_phy_power_set(struct net_device *dev, bool enable)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
u32 reg = 0;
/* EXT_GPHY_CTRL is only valid for GENETv4 and onward */
- if (GENET_IS_V4(priv)) {
+ if (GENET_IS_V4(priv) || priv->ephy_16nm) {
reg = bcmgenet_ext_readl(priv, EXT_GPHY_CTRL);
if (enable) {
reg &= ~EXT_CK25_DIS;
bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
mdelay(1);
- reg &= ~(EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN);
+ reg &= ~(EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN |
+ EXT_CFG_IDDQ_GLOBAL_PWR);
reg |= EXT_GPHY_RESET;
bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
mdelay(1);
@@ -151,7 +155,7 @@ void bcmgenet_phy_power_set(struct net_device *dev, bool enable)
reg &= ~EXT_GPHY_RESET;
} else {
reg |= EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN |
- EXT_GPHY_RESET;
+ EXT_GPHY_RESET | EXT_CFG_IDDQ_GLOBAL_PWR;
bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
mdelay(1);
reg |= EXT_CK25_DIS;
@@ -286,23 +290,53 @@ int bcmgenet_mii_probe(struct net_device *dev)
struct bcmgenet_priv *priv = netdev_priv(dev);
struct device *kdev = &priv->pdev->dev;
struct device_node *dn = kdev->of_node;
+ phy_interface_t phy_iface = priv->phy_interface;
struct phy_device *phydev;
- u32 phy_flags = 0;
+ u32 phy_flags = PHY_BRCM_AUTO_PWRDWN_ENABLE |
+ PHY_BRCM_DIS_TXCRXC_NOENRGY |
+ PHY_BRCM_IDDQ_SUSPEND;
int ret;
/* Communicate the integrated PHY revision */
if (priv->internal_phy)
phy_flags = priv->gphy_rev;
- /* Initialize link state variables that bcmgenet_mii_setup() uses */
- priv->old_link = -1;
- priv->old_speed = -1;
- priv->old_duplex = -1;
- priv->old_pause = -1;
+ /* This is an ugly quirk but we have not been correctly interpreting
+ * the phy_interface values and we have done that across different
+ * drivers, so at least we are consistent in our mistakes.
+ *
+ * When the Generic PHY driver is in use either the PHY has been
+ * strapped or programmed correctly by the boot loader so we should
+ * stick to our incorrect interpretation since we have validated it.
+ *
+ * Now when a dedicated PHY driver is in use, we need to reverse the
+ * meaning of the phy_interface_mode values to something that the PHY
+ * driver will interpret and act on such that we have two mistakes
+ * canceling themselves so to speak. We only do this for the two
+ * modes that GENET driver officially supports on Broadcom STB chips:
+ * PHY_INTERFACE_MODE_RGMII and PHY_INTERFACE_MODE_RGMII_TXID. Other
+ * modes are not *officially* supported with the boot loader and the
+ * scripted environment generating Device Tree blobs for those
+ * platforms.
+ *
+ * Note that internal PHY, MoCA and fixed-link configurations are not
+ * affected because they use different phy_interface_t values or the
+ * Generic PHY driver.
+ */
+ switch (priv->phy_interface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ phy_iface = PHY_INTERFACE_MODE_RGMII_ID;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ phy_iface = PHY_INTERFACE_MODE_RGMII_RXID;
+ break;
+ default:
+ break;
+ }
if (dn) {
phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup,
- phy_flags, priv->phy_interface);
+ phy_flags, phy_iface);
if (!phydev) {
pr_err("could not attach to PHY\n");
return -ENODEV;
@@ -332,7 +366,7 @@ int bcmgenet_mii_probe(struct net_device *dev)
phydev->dev_flags = phy_flags;
ret = phy_connect_direct(dev, phydev, bcmgenet_mii_setup,
- priv->phy_interface);
+ phy_iface);
if (ret) {
pr_err("could not attach to PHY\n");
return -ENODEV;
@@ -350,8 +384,6 @@ int bcmgenet_mii_probe(struct net_device *dev)
return ret;
}
- linkmode_copy(phydev->advertising, phydev->supported);
-
/* The internal PHY has its link interrupts routed to the
* Ethernet MAC ISRs. On GENETv5 there is a hardware issue
* that prevents the signaling of link UP interrupts when
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 5e0e0e70d801..b1328c5524b5 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -3942,7 +3942,8 @@ static int tg3_load_tso_firmware(struct tg3 *tp)
}
/* tp->lock is held. */
-static void __tg3_set_one_mac_addr(struct tg3 *tp, u8 *mac_addr, int index)
+static void __tg3_set_one_mac_addr(struct tg3 *tp, const u8 *mac_addr,
+ int index)
{
u32 addr_high, addr_low;
@@ -5746,7 +5747,6 @@ static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
udelay(40);
- current_link_up = false;
tp->link_config.rmt_adv = 0;
mac_status = tr32(MAC_STATUS);
@@ -9366,7 +9366,7 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
if (!netif_running(dev))
return 0;
@@ -10273,8 +10273,7 @@ static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
- if (tg3_flag(tp, TSO_CAPABLE) &&
- tg3_asic_rev(tp) == ASIC_REV_5705) {
+ if (tg3_flag(tp, TSO_CAPABLE)) {
rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
} else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
!tg3_flag(tp, IS_5788)) {
@@ -11213,12 +11212,8 @@ static void tg3_reset_task(struct work_struct *work)
}
tg3_netif_start(tp);
-
tg3_full_unlock(tp);
-
- if (!err)
- tg3_phy_start(tp);
-
+ tg3_phy_start(tp);
tg3_flag_clear(tp, RESET_TASK_PENDING);
out:
rtnl_unlock();
@@ -16915,19 +16910,18 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
return err;
}
-static int tg3_get_device_address(struct tg3 *tp)
+static int tg3_get_device_address(struct tg3 *tp, u8 *addr)
{
- struct net_device *dev = tp->dev;
u32 hi, lo, mac_offset;
int addr_ok = 0;
int err;
- if (!eth_platform_get_mac_address(&tp->pdev->dev, dev->dev_addr))
+ if (!eth_platform_get_mac_address(&tp->pdev->dev, addr))
return 0;
if (tg3_flag(tp, IS_SSB_CORE)) {
- err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
- if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
+ err = ssb_gige_get_macaddr(tp->pdev, addr);
+ if (!err && is_valid_ether_addr(addr))
return 0;
}
@@ -16951,41 +16945,41 @@ static int tg3_get_device_address(struct tg3 *tp)
/* First try to get it from MAC address mailbox. */
tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
if ((hi >> 16) == 0x484b) {
- dev->dev_addr[0] = (hi >> 8) & 0xff;
- dev->dev_addr[1] = (hi >> 0) & 0xff;
+ addr[0] = (hi >> 8) & 0xff;
+ addr[1] = (hi >> 0) & 0xff;
tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
- dev->dev_addr[2] = (lo >> 24) & 0xff;
- dev->dev_addr[3] = (lo >> 16) & 0xff;
- dev->dev_addr[4] = (lo >> 8) & 0xff;
- dev->dev_addr[5] = (lo >> 0) & 0xff;
+ addr[2] = (lo >> 24) & 0xff;
+ addr[3] = (lo >> 16) & 0xff;
+ addr[4] = (lo >> 8) & 0xff;
+ addr[5] = (lo >> 0) & 0xff;
/* Some old bootcode may report a 0 MAC address in SRAM */
- addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
+ addr_ok = is_valid_ether_addr(addr);
}
if (!addr_ok) {
/* Next, try NVRAM. */
if (!tg3_flag(tp, NO_NVRAM) &&
!tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
!tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
- memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
- memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
+ memcpy(&addr[0], ((char *)&hi) + 2, 2);
+ memcpy(&addr[2], (char *)&lo, sizeof(lo));
}
/* Finally just fetch it out of the MAC control regs. */
else {
hi = tr32(MAC_ADDR_0_HIGH);
lo = tr32(MAC_ADDR_0_LOW);
- dev->dev_addr[5] = lo & 0xff;
- dev->dev_addr[4] = (lo >> 8) & 0xff;
- dev->dev_addr[3] = (lo >> 16) & 0xff;
- dev->dev_addr[2] = (lo >> 24) & 0xff;
- dev->dev_addr[1] = hi & 0xff;
- dev->dev_addr[0] = (hi >> 8) & 0xff;
+ addr[5] = lo & 0xff;
+ addr[4] = (lo >> 8) & 0xff;
+ addr[3] = (lo >> 16) & 0xff;
+ addr[2] = (lo >> 24) & 0xff;
+ addr[1] = hi & 0xff;
+ addr[0] = (hi >> 8) & 0xff;
}
}
- if (!is_valid_ether_addr(&dev->dev_addr[0]))
+ if (!is_valid_ether_addr(addr))
return -EINVAL;
return 0;
}
@@ -17561,6 +17555,7 @@ static int tg3_init_one(struct pci_dev *pdev,
char str[40];
u64 dma_mask, persist_dma_mask;
netdev_features_t features = 0;
+ u8 addr[ETH_ALEN] __aligned(2);
err = pci_enable_device(pdev);
if (err) {
@@ -17783,12 +17778,13 @@ static int tg3_init_one(struct pci_dev *pdev,
tp->rx_pending = 63;
}
- err = tg3_get_device_address(tp);
+ err = tg3_get_device_address(tp, addr);
if (err) {
dev_err(&pdev->dev,
"Could not obtain valid ethernet address, aborting\n");
goto err_out_apeunmap;
}
+ eth_hw_addr_set(dev, addr);
intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index ba47777d9cff..bbdc829c3524 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -875,7 +875,7 @@ bnad_set_netdev_perm_addr(struct bnad *bnad)
ether_addr_copy(netdev->perm_addr, bnad->perm_addr);
if (is_zero_ether_addr(netdev->dev_addr))
- ether_addr_copy(netdev->dev_addr, bnad->perm_addr);
+ eth_hw_addr_set(netdev, bnad->perm_addr);
}
/* Control Path Handlers */
@@ -3249,7 +3249,7 @@ bnad_set_mac_address(struct net_device *netdev, void *addr)
err = bnad_mac_addr_set_locked(bnad, sa->sa_data);
if (!err)
- ether_addr_copy(netdev->dev_addr, sa->sa_data);
+ eth_hw_addr_set(netdev, sa->sa_data);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
@@ -3515,7 +3515,6 @@ static void
bnad_uninit(struct bnad *bnad)
{
if (bnad->work_q) {
- flush_workqueue(bnad->work_q);
destroy_workqueue(bnad->work_q);
bnad->work_q = NULL;
}
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index d8d87213697c..5620b97b3482 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -243,9 +243,11 @@
#define MACB_NCR_TPF_SIZE 1
#define MACB_TZQ_OFFSET 12 /* Transmit zero quantum pause frame */
#define MACB_TZQ_SIZE 1
-#define MACB_SRTSM_OFFSET 15
-#define MACB_OSSMODE_OFFSET 24 /* Enable One Step Synchro Mode */
+#define MACB_SRTSM_OFFSET 15 /* Store Receive Timestamp to Memory */
+#define MACB_OSSMODE_OFFSET 24 /* Enable One Step Synchro Mode */
#define MACB_OSSMODE_SIZE 1
+#define MACB_MIIONRGMII_OFFSET 28 /* MII Usage on RGMII Interface */
+#define MACB_MIIONRGMII_SIZE 1
/* Bitfields in NCFGR */
#define MACB_SPD_OFFSET 0 /* Speed */
@@ -713,6 +715,7 @@
#define MACB_CAPS_GEM_HAS_PTP 0x00000040
#define MACB_CAPS_BD_RD_PREFETCH 0x00000080
#define MACB_CAPS_NEEDS_RSTONUBR 0x00000100
+#define MACB_CAPS_MIIONRGMII 0x00000200
#define MACB_CAPS_CLK_HW_CHG 0x04000000
#define MACB_CAPS_MACB_IS_EMAC 0x08000000
#define MACB_CAPS_FIFO_MODE 0x10000000
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index d13fb1d31821..ffce528aa00e 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -313,7 +313,7 @@ static void macb_get_hwaddr(struct macb *bp)
addr[5] = (top >> 8) & 0xff;
if (is_valid_ether_addr(addr)) {
- memcpy(bp->dev->dev_addr, addr, sizeof(addr));
+ eth_hw_addr_set(bp->dev, addr);
return;
}
}
@@ -522,21 +522,21 @@ static void macb_validate(struct phylink_config *config,
state->interface != PHY_INTERFACE_MODE_SGMII &&
state->interface != PHY_INTERFACE_MODE_10GBASER &&
!phy_interface_mode_is_rgmii(state->interface)) {
- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_zero(supported);
return;
}
if (!macb_is_gem(bp) &&
(state->interface == PHY_INTERFACE_MODE_GMII ||
phy_interface_mode_is_rgmii(state->interface))) {
- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_zero(supported);
return;
}
if (state->interface == PHY_INTERFACE_MODE_10GBASER &&
!(bp->caps & MACB_CAPS_HIGH_SPEED &&
bp->caps & MACB_CAPS_PCS)) {
- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_zero(supported);
return;
}
@@ -547,13 +547,8 @@ static void macb_validate(struct phylink_config *config,
if (bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE &&
(state->interface == PHY_INTERFACE_MODE_NA ||
state->interface == PHY_INTERFACE_MODE_10GBASER)) {
- phylink_set(mask, 10000baseCR_Full);
- phylink_set(mask, 10000baseER_Full);
+ phylink_set_10g_modes(mask);
phylink_set(mask, 10000baseKR_Full);
- phylink_set(mask, 10000baseLR_Full);
- phylink_set(mask, 10000baseLRM_Full);
- phylink_set(mask, 10000baseSR_Full);
- phylink_set(mask, 10000baseT_Full);
if (state->interface != PHY_INTERFACE_MODE_NA)
goto out;
}
@@ -575,9 +570,8 @@ static void macb_validate(struct phylink_config *config,
phylink_set(mask, 1000baseT_Half);
}
out:
- bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
- bitmap_and(state->advertising, state->advertising, mask,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_and(supported, supported, mask);
+ linkmode_and(state->advertising, state->advertising, mask);
}
static void macb_usx_pcs_link_up(struct phylink_pcs *pcs, unsigned int mode,
@@ -684,6 +678,9 @@ static void macb_mac_config(struct phylink_config *config, unsigned int mode,
} else if (state->interface == PHY_INTERFACE_MODE_10GBASER) {
ctrl |= GEM_BIT(PCSSEL);
ncr |= GEM_BIT(ENABLE_HS_MAC);
+ } else if (bp->caps & MACB_CAPS_MIIONRGMII &&
+ bp->phy_interface == PHY_INTERFACE_MODE_MII) {
+ ncr |= MACB_BIT(MIIONRGMII);
}
}
@@ -900,6 +897,17 @@ static int macb_mdiobus_register(struct macb *bp)
{
struct device_node *child, *np = bp->pdev->dev.of_node;
+ /* If we have a child named mdio, probe it instead of looking for PHYs
+ * directly under the MAC node
+ */
+ child = of_get_child_by_name(np, "mdio");
+ if (child) {
+ int ret = of_mdiobus_register(bp->mii_bus, child);
+
+ of_node_put(child);
+ return ret;
+ }
+
if (of_phy_is_fixed_link(np))
return mdiobus_register(bp->mii_bus);
@@ -4594,7 +4602,8 @@ static const struct macb_config zynq_config = {
};
static const struct macb_config sama7g5_gem_config = {
- .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_CLK_HW_CHG,
+ .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_CLK_HW_CHG |
+ MACB_CAPS_MIIONRGMII,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
.init = macb_init,
@@ -4602,7 +4611,8 @@ static const struct macb_config sama7g5_gem_config = {
};
static const struct macb_config sama7g5_emac_config = {
- .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_USRIO_HAS_CLKEN,
+ .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII |
+ MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_MIIONRGMII,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
.init = macb_init,
@@ -4774,7 +4784,7 @@ static int macb_probe(struct platform_device *pdev)
if (bp->caps & MACB_CAPS_NEEDS_RSTONUBR)
bp->rx_intr_mask |= MACB_BIT(RXUBR);
- err = of_get_mac_address(np, bp->dev->dev_addr);
+ err = of_get_ethdev_address(np, bp->dev);
if (err == -EPROBE_DEFER)
goto err_out_free_netdev;
else if (err)
diff --git a/drivers/net/ethernet/cadence/macb_ptp.c b/drivers/net/ethernet/cadence/macb_ptp.c
index c2e1f163bb14..095c5a2144a7 100644
--- a/drivers/net/ethernet/cadence/macb_ptp.c
+++ b/drivers/net/ethernet/cadence/macb_ptp.c
@@ -38,7 +38,8 @@ static struct macb_dma_desc_ptp *macb_ptp_desc(struct macb *bp,
return NULL;
}
-static int gem_tsu_get_time(struct ptp_clock_info *ptp, struct timespec64 *ts)
+static int gem_tsu_get_time(struct ptp_clock_info *ptp, struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
{
struct macb *bp = container_of(ptp, struct macb, ptp_clock_info);
unsigned long flags;
@@ -46,7 +47,9 @@ static int gem_tsu_get_time(struct ptp_clock_info *ptp, struct timespec64 *ts)
u32 secl, sech;
spin_lock_irqsave(&bp->tsu_clk_lock, flags);
+ ptp_read_system_prets(sts);
first = gem_readl(bp, TN);
+ ptp_read_system_postts(sts);
secl = gem_readl(bp, TSL);
sech = gem_readl(bp, TSH);
second = gem_readl(bp, TN);
@@ -56,7 +59,9 @@ static int gem_tsu_get_time(struct ptp_clock_info *ptp, struct timespec64 *ts)
/* if so, use later read & re-read seconds
* (assume all done within 1s)
*/
+ ptp_read_system_prets(sts);
ts->tv_nsec = gem_readl(bp, TN);
+ ptp_read_system_postts(sts);
secl = gem_readl(bp, TSL);
sech = gem_readl(bp, TSH);
} else {
@@ -161,7 +166,7 @@ static int gem_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
}
if (delta > TSU_NSEC_MAX_VAL) {
- gem_tsu_get_time(&bp->ptp_clock_info, &now);
+ gem_tsu_get_time(&bp->ptp_clock_info, &now, NULL);
now = timespec64_add(now, then);
gem_tsu_set_time(&bp->ptp_clock_info,
@@ -192,7 +197,7 @@ static const struct ptp_clock_info gem_ptp_caps_template = {
.pps = 1,
.adjfine = gem_ptp_adjfine,
.adjtime = gem_ptp_adjtime,
- .gettime64 = gem_tsu_get_time,
+ .gettimex64 = gem_tsu_get_time,
.settime64 = gem_tsu_set_time,
.enable = gem_ptp_enable,
};
@@ -251,7 +256,7 @@ static int gem_hw_timestamp(struct macb *bp, u32 dma_desc_ts_1,
* The timestamp only contains lower few bits of seconds,
* so add value from 1588 timer
*/
- gem_tsu_get_time(&bp->ptp_clock_info, &tsu);
+ gem_tsu_get_time(&bp->ptp_clock_info, &tsu, NULL);
/* If the top bit is set in the timestamp,
* but not in 1588 timer, it has rolled over,
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index b6a066404f4b..457cb7121000 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -607,7 +607,7 @@ static inline void xgmac_mac_disable(void __iomem *ioaddr)
writel(value, ioaddr + XGMAC_CONTROL);
}
-static void xgmac_set_mac_addr(void __iomem *ioaddr, unsigned char *addr,
+static void xgmac_set_mac_addr(void __iomem *ioaddr, const unsigned char *addr,
int num)
{
u32 data;
@@ -1479,7 +1479,7 @@ static int xgmac_set_mac_address(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
xgmac_set_mac_addr(ioaddr, dev->dev_addr, 0);
@@ -1693,6 +1693,7 @@ static int xgmac_probe(struct platform_device *pdev)
struct resource *res;
struct net_device *ndev = NULL;
struct xgmac_priv *priv = NULL;
+ u8 addr[ETH_ALEN];
u32 uid;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1785,7 +1786,8 @@ static int xgmac_probe(struct platform_device *pdev)
ndev->max_mtu = XGMAC_MAX_MTU;
/* Get the MAC address */
- xgmac_get_mac_addr(priv->base, ndev->dev_addr, 0);
+ xgmac_get_mac_addr(priv->base, addr, 0);
+ eth_hw_addr_set(ndev, addr);
if (!is_valid_ether_addr(ndev->dev_addr))
netdev_warn(ndev, "MAC address %pM not valid",
ndev->dev_addr);
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c
index 2a0d64e5797c..73cb03266549 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_core.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c
@@ -411,7 +411,7 @@ void octeon_pf_changed_vf_macaddr(struct octeon_device *oct, u8 *mac)
if (!ether_addr_equal(netdev->dev_addr, mac)) {
macaddr_changed = true;
- ether_addr_copy(netdev->dev_addr, mac);
+ eth_hw_addr_set(netdev, mac);
ether_addr_copy(((u8 *)&lio->linfo.hw_addr) + 2, mac);
call_netdevice_notifiers(NETDEV_CHANGEADDR, netdev);
}
@@ -490,7 +490,6 @@ void cleanup_rx_oom_poll_fn(struct net_device *netdev)
wq = &lio->rxq_status_wq[q_no];
if (wq->wq) {
cancel_delayed_work_sync(&wq->wk.work);
- flush_workqueue(wq->wq);
destroy_workqueue(wq->wq);
wq->wq = NULL;
}
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 2907e13b9df6..12eee2bc7f5c 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -892,12 +892,11 @@ liquidio_probe(struct pci_dev *pdev, const struct pci_device_id __maybe_unused *
bus = pdev->bus->number;
device = PCI_SLOT(pdev->devfn);
function = PCI_FUNC(pdev->devfn);
- oct_dev->watchdog_task = kthread_create(
- liquidio_watchdog, oct_dev,
- "liowd/%02hhx:%02hhx.%hhx", bus, device, function);
- if (!IS_ERR(oct_dev->watchdog_task)) {
- wake_up_process(oct_dev->watchdog_task);
- } else {
+ oct_dev->watchdog_task = kthread_run(liquidio_watchdog,
+ oct_dev,
+ "liowd/%02hhx:%02hhx.%hhx",
+ bus, device, function);
+ if (IS_ERR(oct_dev->watchdog_task)) {
oct_dev->watchdog_task = NULL;
dev_err(&oct_dev->pci_dev->dev,
"failed to create kernel_thread\n");
@@ -1279,6 +1278,14 @@ static int liquidio_stop_nic_module(struct octeon_device *oct)
struct lio *lio;
dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n");
+ device_lock(&oct->pci_dev->dev);
+ if (oct->devlink) {
+ devlink_unregister(oct->devlink);
+ devlink_free(oct->devlink);
+ oct->devlink = NULL;
+ }
+ device_unlock(&oct->pci_dev->dev);
+
if (!oct->ifcount) {
dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n");
return 1;
@@ -1300,12 +1307,6 @@ static int liquidio_stop_nic_module(struct octeon_device *oct)
for (i = 0; i < oct->ifcount; i++)
liquidio_destroy_nic_device(oct, i);
- if (oct->devlink) {
- devlink_unregister(oct->devlink);
- devlink_free(oct->devlink);
- oct->devlink = NULL;
- }
-
dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n");
return 0;
}
@@ -2022,7 +2023,7 @@ static int liquidio_set_mac(struct net_device *netdev, void *p)
return -EIO;
}
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
memcpy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data, ETH_ALEN);
return 0;
@@ -3632,7 +3633,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
/* Copy MAC Address to OS network device structure */
- ether_addr_copy(netdev->dev_addr, mac);
+ eth_hw_addr_set(netdev, mac);
/* By default all interfaces on a single Octeon uses the same
* tx and rx queues
@@ -3749,10 +3750,12 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
}
}
+ device_lock(&octeon_dev->pci_dev->dev);
devlink = devlink_alloc(&liquidio_devlink_ops,
sizeof(struct lio_devlink_priv),
&octeon_dev->pci_dev->dev);
if (!devlink) {
+ device_unlock(&octeon_dev->pci_dev->dev);
dev_err(&octeon_dev->pci_dev->dev, "devlink alloc failed\n");
goto setup_nic_dev_free;
}
@@ -3760,15 +3763,10 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
lio_devlink = devlink_priv(devlink);
lio_devlink->oct = octeon_dev;
- if (devlink_register(devlink)) {
- devlink_free(devlink);
- dev_err(&octeon_dev->pci_dev->dev,
- "devlink registration failed\n");
- goto setup_nic_dev_free;
- }
-
octeon_dev->devlink = devlink;
octeon_dev->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY;
+ devlink_register(devlink);
+ device_unlock(&octeon_dev->pci_dev->dev);
return 0;
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
index f6396ac64006..c607756b731f 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -1168,7 +1168,7 @@ static int liquidio_set_mac(struct net_device *netdev, void *p)
return -EPERM;
}
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
ether_addr_copy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data);
return 0;
@@ -2148,7 +2148,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev)
mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j));
/* Copy MAC Address to OS network device structure */
- ether_addr_copy(netdev->dev_addr, mac);
+ eth_hw_addr_set(netdev, mac);
if (liquidio_setup_io_queues(octeon_dev, i,
lio->linfo.num_txpciq,
diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
index 30463a6d1f8c..4e39d712e121 100644
--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
@@ -1501,7 +1501,7 @@ static int octeon_mgmt_probe(struct platform_device *pdev)
netdev->min_mtu = 64 - OCTEON_MGMT_RX_HEADROOM;
netdev->max_mtu = 16383 - OCTEON_MGMT_RX_HEADROOM - VLAN_HLEN;
- result = of_get_mac_address(pdev->dev.of_node, netdev->dev_addr);
+ result = of_get_ethdev_address(pdev->dev.of_node, netdev);
if (result)
eth_hw_addr_random(netdev);
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index 691e1475d55e..f2f1ce81fd9c 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -1193,7 +1193,7 @@ static int nic_register_interrupts(struct nicpf *nic)
dev_err(&nic->pdev->dev,
"Request for #%d msix vectors failed, returned %d\n",
nic->num_vec, ret);
- return 1;
+ return ret;
}
/* Register mailbox interrupt handler */
@@ -1311,9 +1311,8 @@ static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = pci_enable_device(pdev);
if (err) {
- dev_err(dev, "Failed to enable PCI device\n");
pci_set_drvdata(pdev, NULL);
- return err;
+ return dev_err_probe(dev, err, "Failed to enable PCI device\n");
}
err = pci_request_regions(pdev, DRV_NAME);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index d1667b759522..bb45d5df2856 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -221,8 +221,7 @@ static void nicvf_handle_mbx_intr(struct nicvf *nic)
nic->tns_mode = mbx.nic_cfg.tns_mode & 0x7F;
nic->node = mbx.nic_cfg.node_id;
if (!nic->set_mac_pending)
- ether_addr_copy(nic->netdev->dev_addr,
- mbx.nic_cfg.mac_addr);
+ eth_hw_addr_set(nic->netdev, mbx.nic_cfg.mac_addr);
nic->sqs_mode = mbx.nic_cfg.sqs_mode;
nic->loopback_supported = mbx.nic_cfg.loopback_supported;
nic->link_up = false;
@@ -1224,7 +1223,7 @@ static int nicvf_register_misc_interrupt(struct nicvf *nic)
if (ret < 0) {
netdev_err(nic->netdev,
"Req for #%d msix vectors failed\n", nic->num_vec);
- return 1;
+ return ret;
}
sprintf(nic->irq_name[irq], "%s Mbox", "NICVF");
@@ -1243,7 +1242,7 @@ static int nicvf_register_misc_interrupt(struct nicvf *nic)
if (!nicvf_check_pf_ready(nic)) {
nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
nicvf_unregister_interrupts(nic);
- return 1;
+ return -EIO;
}
return 0;
@@ -1612,7 +1611,7 @@ static int nicvf_set_mac_address(struct net_device *netdev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
if (nic->pdev->msix_enabled) {
if (nicvf_hw_set_mac_addr(nic, netdev))
@@ -2119,10 +2118,8 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
err = pci_enable_device(pdev);
- if (err) {
- dev_err(dev, "Failed to enable PCI device\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(dev, err, "Failed to enable PCI device\n");
err = pci_request_regions(pdev, DRV_NAME);
if (err) {
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index c36fed9c3d73..574a32f23f96 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -1387,10 +1387,10 @@ static int acpi_get_mac_address(struct device *dev, struct acpi_device *adev,
u8 *dst)
{
u8 mac[ETH_ALEN];
- u8 *addr;
+ int ret;
- addr = fwnode_get_mac_address(acpi_fwnode_handle(adev), mac, ETH_ALEN);
- if (!addr) {
+ ret = fwnode_get_mac_address(acpi_fwnode_handle(adev), mac);
+ if (ret) {
dev_err(dev, "MAC address invalid: %pM\n", mac);
return -EINVAL;
}
@@ -1597,9 +1597,8 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = pcim_enable_device(pdev);
if (err) {
- dev_err(dev, "Failed to enable PCI device\n");
pci_set_drvdata(pdev, NULL);
- return err;
+ return dev_err_probe(dev, err, "Failed to enable PCI device\n");
}
err = pci_request_regions(pdev, DRV_NAME);
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index d246eee4b6d5..609820e214a3 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -853,7 +853,7 @@ static int t1_set_mac_addr(struct net_device *dev, void *p)
if (!mac->ops->macaddress_set)
return -EOPNOTSUPP;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
mac->ops->macaddress_set(mac, dev->dev_addr);
return 0;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb/gmac.h b/drivers/net/ethernet/chelsio/cxgb/gmac.h
index dfa77491a910..5913eaf442b5 100644
--- a/drivers/net/ethernet/chelsio/cxgb/gmac.h
+++ b/drivers/net/ethernet/chelsio/cxgb/gmac.h
@@ -117,7 +117,7 @@ struct cmac_ops {
const struct cmac_statistics *(*statistics_update)(struct cmac *, int);
int (*macaddress_get)(struct cmac *, u8 mac_addr[6]);
- int (*macaddress_set)(struct cmac *, u8 mac_addr[6]);
+ int (*macaddress_set)(struct cmac *, const u8 mac_addr[6]);
};
typedef struct _cmac_instance cmac_instance;
diff --git a/drivers/net/ethernet/chelsio/cxgb/pm3393.c b/drivers/net/ethernet/chelsio/cxgb/pm3393.c
index c27908e66f5e..0bb37e4680c7 100644
--- a/drivers/net/ethernet/chelsio/cxgb/pm3393.c
+++ b/drivers/net/ethernet/chelsio/cxgb/pm3393.c
@@ -496,7 +496,7 @@ static int pm3393_macaddress_get(struct cmac *cmac, u8 mac_addr[6])
return 0;
}
-static int pm3393_macaddress_set(struct cmac *cmac, u8 ma[6])
+static int pm3393_macaddress_set(struct cmac *cmac, const u8 ma[6])
{
u32 val, lo, mid, hi, enabled = cmac->instance->enabled;
diff --git a/drivers/net/ethernet/chelsio/cxgb/subr.c b/drivers/net/ethernet/chelsio/cxgb/subr.c
index 310add28fcf5..007c591b8bf5 100644
--- a/drivers/net/ethernet/chelsio/cxgb/subr.c
+++ b/drivers/net/ethernet/chelsio/cxgb/subr.c
@@ -1140,7 +1140,7 @@ int t1_init_sw_modules(adapter_t *adapter, const struct board_info *bi)
adapter->port[i].dev->name);
goto error;
}
- memcpy(adapter->port[i].dev->dev_addr, hw_addr, ETH_ALEN);
+ eth_hw_addr_set(adapter->port[i].dev, hw_addr);
init_link_config(&adapter->port[i].link_config, bi);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb/vsc7326.c b/drivers/net/ethernet/chelsio/cxgb/vsc7326.c
index 873c1c7b4ca0..2ad3efb550c2 100644
--- a/drivers/net/ethernet/chelsio/cxgb/vsc7326.c
+++ b/drivers/net/ethernet/chelsio/cxgb/vsc7326.c
@@ -379,7 +379,7 @@ static int mac_intr_clear(struct cmac *mac)
}
/* Expect MAC address to be in network byte order. */
-static int mac_set_address(struct cmac* mac, u8 addr[6])
+static int mac_set_address(struct cmac* mac, const u8 addr[6])
{
u32 val;
int port = mac->instance->index;
@@ -591,7 +591,7 @@ static void port_stats_update(struct cmac *mac)
} hw_stats[] = {
#define HW_STAT(reg, stat_name) \
- { reg, (&((struct cmac_statistics *)NULL)->stat_name) - (u64 *)NULL }
+ { reg, offsetof(struct cmac_statistics, stat_name) / sizeof(u64) }
/* Rx stats */
HW_STAT(RxUnicast, RxUnicastFramesOK),
diff --git a/drivers/net/ethernet/chelsio/cxgb3/common.h b/drivers/net/ethernet/chelsio/cxgb3/common.h
index b706f2fbe4f4..a309016f7f8c 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/common.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/common.h
@@ -710,7 +710,7 @@ int t3_mac_enable(struct cmac *mac, int which);
int t3_mac_disable(struct cmac *mac, int which);
int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu);
int t3_mac_set_rx_mode(struct cmac *mac, struct net_device *dev);
-int t3_mac_set_address(struct cmac *mac, unsigned int idx, u8 addr[6]);
+int t3_mac_set_address(struct cmac *mac, unsigned int idx, const u8 addr[6]);
int t3_mac_set_num_ucast(struct cmac *mac, int n);
const struct mac_stats *t3_mac_update_stats(struct cmac *mac);
int t3_mac_set_speed_duplex_fc(struct cmac *mac, int speed, int duplex, int fc);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 38e47703f9ab..9cf9e33664e4 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -2586,7 +2586,7 @@ static int cxgb_set_mac_addr(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
t3_mac_set_address(&pi->mac, LAN_MAC_IDX, dev->dev_addr);
if (offload_running(adapter))
write_smt_entry(adapter, pi->port_id);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
index 7ff31d1026fb..53feac8da503 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
@@ -29,6 +29,7 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
+#include <linux/etherdevice.h>
#include "common.h"
#include "regs.h"
#include "sge_defs.h"
@@ -3758,8 +3759,7 @@ int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
- memcpy(adapter->port[i]->dev_addr, hw_addr,
- ETH_ALEN);
+ eth_hw_addr_set(adapter->port[i], hw_addr);
init_link_config(&p->link_config, p->phy.caps);
p->phy.ops->power_down(&p->phy, 1);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/xgmac.c b/drivers/net/ethernet/chelsio/cxgb3/xgmac.c
index 3af19a550372..1bdc6cad1e49 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/xgmac.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/xgmac.c
@@ -240,7 +240,7 @@ static void set_addr_filter(struct cmac *mac, int idx, const u8 * addr)
}
/* Set one of the station's unicast MAC addresses. */
-int t3_mac_set_address(struct cmac *mac, unsigned int idx, u8 addr[6])
+int t3_mac_set_address(struct cmac *mac, unsigned int idx, const u8 addr[6])
{
if (idx >= mac->nucast)
return -EINVAL;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index ecea3cdd30b3..5657ac8cfca0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -1545,7 +1545,7 @@ static inline void t4_write_reg64(struct adapter *adap, u32 reg_addr, u64 val)
static inline void t4_set_hw_addr(struct adapter *adapter, int port_idx,
u8 hw_addr[])
{
- ether_addr_copy(adapter->port[port_idx]->dev_addr, hw_addr);
+ eth_hw_addr_set(adapter->port[port_idx], hw_addr);
ether_addr_copy(adapter->port[port_idx]->perm_addr, hw_addr);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 0d9cda4ab303..dde1cf51d0ab 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -3468,7 +3468,7 @@ static int cxgb_set_mac_addr(struct net_device *dev, void *p)
if (ret < 0)
return ret;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
return 0;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 64144b6171d7..e7b4e3ed056c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -9706,7 +9706,7 @@ int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
if (ret)
return ret;
- memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
+ eth_hw_addr_set(adap->port[i], addr);
j++;
}
return 0;
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
index f55105a4112f..03cb1410d6fc 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
@@ -40,6 +40,7 @@
#ifndef __CXGB4VF_ADAPTER_H__
#define __CXGB4VF_ADAPTER_H__
+#include <linux/etherdevice.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
@@ -507,7 +508,7 @@ static inline const char *port_name(struct adapter *adapter, int pidx)
static inline void t4_os_set_hw_addr(struct adapter *adapter, int pidx,
u8 hw_addr[])
{
- memcpy(adapter->port[pidx]->dev_addr, hw_addr, ETH_ALEN);
+ eth_hw_addr_set(adapter->port[pidx], hw_addr);
}
/**
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 49b76fd47daa..64479c464b4e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -1218,7 +1218,7 @@ static int cxgb4vf_set_mac_addr(struct net_device *dev, void *_addr)
if (ret < 0)
return ret;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
return 0;
}
@@ -2902,10 +2902,8 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
* Initialize generic PCI device state.
*/
err = pci_enable_device(pdev);
- if (err) {
- dev_err(&pdev->dev, "cannot enable PCI device\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(&pdev->dev, err, "cannot enable PCI device\n");
/*
* Reserve PCI resources for the device. If we can't get them some
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
index bcad69c48074..4af5561cbfc5 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c
@@ -870,7 +870,7 @@ static void do_abort_syn_rcv(struct sock *child, struct sock *parent)
* created only after 3 way handshake is done.
*/
sock_orphan(child);
- percpu_counter_inc((child)->sk_prot->orphan_count);
+ INC_ORPHAN_COUNT(child);
chtls_release_resources(child);
chtls_conn_done(child);
} else {
diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h
index b1161bdeda4d..f61ca657601c 100644
--- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h
+++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.h
@@ -95,7 +95,7 @@ struct deferred_skb_cb {
#define WSCALE_OK(tp) ((tp)->rx_opt.wscale_ok)
#define TSTAMP_OK(tp) ((tp)->rx_opt.tstamp_ok)
#define SACK_OK(tp) ((tp)->rx_opt.sack_ok)
-#define INC_ORPHAN_COUNT(sk) percpu_counter_inc((sk)->sk_prot->orphan_count)
+#define INC_ORPHAN_COUNT(sk) this_cpu_inc(*(sk)->sk_prot->orphan_count)
/* TLS SKB */
#define skb_ulp_tls_inline(skb) (ULP_SKB_CB(skb)->ulp.tls.ofld)
diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c
index d0c4c8b7a15a..4a97aa8e1387 100644
--- a/drivers/net/ethernet/cirrus/cs89x0.c
+++ b/drivers/net/ethernet/cirrus/cs89x0.c
@@ -1227,7 +1227,7 @@ static int set_mac_address(struct net_device *dev, void *p)
if (netif_running(dev))
return -EBUSY;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
cs89_dbg(0, debug, "%s: Setting MAC address to %pM\n",
dev->name, dev->dev_addr);
@@ -1314,6 +1314,7 @@ cs89x0_probe1(struct net_device *dev, void __iomem *ioaddr, int modular)
int tmp;
unsigned rev_type = 0;
int eeprom_buff[CHKSUM_LEN];
+ u8 addr[ETH_ALEN];
int retval;
/* Initialize the device structure. */
@@ -1387,9 +1388,10 @@ cs89x0_probe1(struct net_device *dev, void __iomem *ioaddr, int modular)
for (i = 0; i < ETH_ALEN / 2; i++) {
unsigned int Addr;
Addr = readreg(dev, PP_IA + i * 2);
- dev->dev_addr[i * 2] = Addr & 0xFF;
- dev->dev_addr[i * 2 + 1] = Addr >> 8;
+ addr[i * 2] = Addr & 0xFF;
+ addr[i * 2 + 1] = Addr >> 8;
}
+ eth_hw_addr_set(dev, addr);
/* Load the Adapter Configuration.
* Note: Barring any more specific information from some
@@ -1464,9 +1466,10 @@ cs89x0_probe1(struct net_device *dev, void __iomem *ioaddr, int modular)
/* eeprom_buff has 32-bit ints, so we can't just memcpy it */
/* store the initial memory base address */
for (i = 0; i < ETH_ALEN / 2; i++) {
- dev->dev_addr[i * 2] = eeprom_buff[i];
- dev->dev_addr[i * 2 + 1] = eeprom_buff[i] >> 8;
+ addr[i * 2] = eeprom_buff[i];
+ addr[i * 2 + 1] = eeprom_buff[i] >> 8;
}
+ eth_hw_addr_set(dev, addr);
cs89_dbg(1, debug, "%s: new adapter_cnf: 0x%x\n",
dev->name, lp->adapter_cnf);
}
diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c
index 072fac5f5d24..21ba6e893072 100644
--- a/drivers/net/ethernet/cirrus/ep93xx_eth.c
+++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c
@@ -746,7 +746,7 @@ static struct net_device *ep93xx_dev_alloc(struct ep93xx_eth_data *data)
if (dev == NULL)
return NULL;
- memcpy(dev->dev_addr, data->dev_addr, ETH_ALEN);
+ eth_hw_addr_set(dev, data->dev_addr);
dev->ethtool_ops = &ep93xx_ethtool_ops;
dev->netdev_ops = &ep93xx_netdev_ops;
diff --git a/drivers/net/ethernet/cirrus/mac89x0.c b/drivers/net/ethernet/cirrus/mac89x0.c
index 6324e80960c3..84251b85fc93 100644
--- a/drivers/net/ethernet/cirrus/mac89x0.c
+++ b/drivers/net/ethernet/cirrus/mac89x0.c
@@ -541,7 +541,7 @@ static int set_mac_address(struct net_device *dev, void *addr)
if (!is_valid_ether_addr(saddr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(dev, saddr->sa_data);
netdev_info(dev, "Setting MAC address to %pM\n", dev->dev_addr);
/* set the Ethernet address */
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index 12ffc14fbecd..6ded4d9fa32a 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -139,7 +139,7 @@ static void enic_get_drvinfo(struct net_device *netdev,
int err;
err = enic_dev_fw_info(enic, &fw_info);
- /* return only when pci_zalloc_consistent fails in vnic_dev_fw_info
+ /* return only when dma_alloc_coherent fails in vnic_dev_fw_info
* For other failures, like devcmd failure, we return previously
* recorded info.
*/
@@ -270,7 +270,7 @@ static void enic_get_ethtool_stats(struct net_device *netdev,
int err;
err = enic_dev_stats_dump(enic, &vstats);
- /* return only when pci_zalloc_consistent fails in vnic_dev_stats_dump
+ /* return only when dma_alloc_coherent fails in vnic_dev_stats_dump
* For other failures, like devcmd failure, we return previously
* recorded stats.
*/
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index d0a8f7106958..aacf141986d5 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -882,7 +882,7 @@ static void enic_get_stats(struct net_device *netdev,
int err;
err = enic_dev_stats_dump(enic, &stats);
- /* return only when pci_zalloc_consistent fails in vnic_dev_stats_dump
+ /* return only when dma_alloc_coherent fails in vnic_dev_stats_dump
* For other failures, like devcmd failure, we return previously
* recorded stats.
*/
@@ -985,7 +985,7 @@ static int enic_set_mac_addr(struct net_device *netdev, char *addr)
return -EADDRNOTAVAIL;
}
- memcpy(netdev->dev_addr, addr, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr);
return 0;
}
@@ -1098,6 +1098,7 @@ static int enic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
static int enic_set_vf_port(struct net_device *netdev, int vf,
struct nlattr *port[])
{
+ static const u8 zero_addr[ETH_ALEN] = {};
struct enic *enic = netdev_priv(netdev);
struct enic_port_profile prev_pp;
struct enic_port_profile *pp;
@@ -1162,7 +1163,7 @@ static int enic_set_vf_port(struct net_device *netdev, int vf,
} else {
memset(pp, 0, sizeof(*pp));
if (vf == PORT_SELF_VF)
- eth_zero_addr(netdev->dev_addr);
+ eth_hw_addr_set(netdev, zero_addr);
}
} else {
/* Set flag to indicate that the port assoc/disassoc
@@ -1174,7 +1175,7 @@ static int enic_set_vf_port(struct net_device *netdev, int vf,
if (pp->request == PORT_REQUEST_DISASSOCIATE) {
eth_zero_addr(pp->mac_addr);
if (vf == PORT_SELF_VF)
- eth_zero_addr(netdev->dev_addr);
+ eth_hw_addr_set(netdev, zero_addr);
}
}
diff --git a/drivers/net/ethernet/cisco/enic/enic_pp.c b/drivers/net/ethernet/cisco/enic/enic_pp.c
index e6a83198c3dd..80f46dbd5117 100644
--- a/drivers/net/ethernet/cisco/enic/enic_pp.c
+++ b/drivers/net/ethernet/cisco/enic/enic_pp.c
@@ -73,9 +73,9 @@ static int enic_set_port_profile(struct enic *enic, int vf)
struct vic_provinfo *vp;
const u8 oui[3] = VIC_PROVINFO_CISCO_OUI;
const __be16 os_type = htons(VIC_GENERIC_PROV_OS_TYPE_LINUX);
+ const u8 *client_mac;
char uuid_str[38];
char client_mac_str[18];
- u8 *client_mac;
int err;
ENIC_PP_BY_INDEX(enic, vf, pp, &err);
diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
index 6e745ca4c433..941f175fb911 100644
--- a/drivers/net/ethernet/cortina/gemini.c
+++ b/drivers/net/ethernet/cortina/gemini.c
@@ -1889,7 +1889,7 @@ static int gmac_set_mac_address(struct net_device *netdev, void *addr)
{
struct sockaddr *sa = addr;
- memcpy(netdev->dev_addr, sa->sa_data, ETH_ALEN);
+ eth_hw_addr_set(netdev, sa->sa_data);
gmac_write_mac_address(netdev);
return 0;
@@ -2467,13 +2467,13 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
DEFAULT_NAPI_WEIGHT);
if (is_valid_ether_addr((void *)port->mac_addr)) {
- memcpy(netdev->dev_addr, port->mac_addr, ETH_ALEN);
+ eth_hw_addr_set(netdev, (u8 *)port->mac_addr);
} else {
dev_dbg(dev, "ethernet address 0x%08x%08x%08x invalid\n",
port->mac_addr[0], port->mac_addr[1],
port->mac_addr[2]);
dev_info(dev, "using a random ethernet address\n");
- eth_random_addr(netdev->dev_addr);
+ eth_hw_addr_random(netdev);
}
gmac_write_mac_address(netdev);
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index e842de6f6635..0985ab216566 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -1425,6 +1425,7 @@ dm9000_probe(struct platform_device *pdev)
enum of_gpio_flags flags;
struct regulator *power;
bool inv_mac_addr = false;
+ u8 addr[ETH_ALEN];
power = devm_regulator_get(dev, "vcc");
if (IS_ERR(power)) {
@@ -1666,11 +1667,12 @@ dm9000_probe(struct platform_device *pdev)
/* try reading the node address from the attached EEPROM */
for (i = 0; i < 6; i += 2)
- dm9000_read_eeprom(db, i / 2, ndev->dev_addr+i);
+ dm9000_read_eeprom(db, i / 2, addr + i);
+ eth_hw_addr_set(ndev, addr);
if (!is_valid_ether_addr(ndev->dev_addr) && pdata != NULL) {
mac_src = "platform data";
- memcpy(ndev->dev_addr, pdata->dev_addr, ETH_ALEN);
+ eth_hw_addr_set(ndev, pdata->dev_addr);
}
if (!is_valid_ether_addr(ndev->dev_addr)) {
@@ -1678,7 +1680,8 @@ dm9000_probe(struct platform_device *pdev)
mac_src = "chip";
for (i = 0; i < 6; i++)
- ndev->dev_addr[i] = ior(db, i+DM9000_PAR);
+ addr[i] = ior(db, i + DM9000_PAR);
+ eth_hw_addr_set(ndev, pdata->dev_addr);
}
if (!is_valid_ether_addr(ndev->dev_addr)) {
diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c
index 117c26fa5909..d51b3d24a0c8 100644
--- a/drivers/net/ethernet/dec/tulip/de2104x.c
+++ b/drivers/net/ethernet/dec/tulip/de2104x.c
@@ -666,8 +666,8 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
struct de_private *de = netdev_priv(dev);
u16 hash_table[32];
struct netdev_hw_addr *ha;
+ const u16 *eaddrs;
int i;
- u16 *eaddrs;
memset(hash_table, 0, sizeof(hash_table));
__set_bit_le(255, hash_table); /* Broadcast entry */
@@ -685,7 +685,7 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
setup_frm = &de->setup_frame[13*6];
/* Fill the final entry with our physical address. */
- eaddrs = (u16 *)dev->dev_addr;
+ eaddrs = (const u16 *)dev->dev_addr;
*setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
*setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
*setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
@@ -695,7 +695,7 @@ static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
{
struct de_private *de = netdev_priv(dev);
struct netdev_hw_addr *ha;
- u16 *eaddrs;
+ const u16 *eaddrs;
/* We have <= 14 addresses so we can use the wonderful
16 address perfect filtering of the Tulip. */
@@ -710,7 +710,7 @@ static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
setup_frm = &de->setup_frame[15*6];
/* Fill the final entry with our physical address. */
- eaddrs = (u16 *)dev->dev_addr;
+ eaddrs = (const u16 *)dev->dev_addr;
*setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
*setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
*setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
@@ -1713,6 +1713,7 @@ static const struct ethtool_ops de_ethtool_ops = {
static void de21040_get_mac_address(struct de_private *de)
{
+ u8 addr[ETH_ALEN];
unsigned i;
dw32 (ROMCmd, 0); /* Reset the pointer with a dummy write. */
@@ -1724,12 +1725,13 @@ static void de21040_get_mac_address(struct de_private *de)
value = dr32(ROMCmd);
rmb();
} while (value < 0 && --boguscnt > 0);
- de->dev->dev_addr[i] = value;
+ addr[i] = value;
udelay(1);
if (boguscnt <= 0)
pr_warn("timeout reading 21040 MAC address byte %u\n",
i);
}
+ eth_hw_addr_set(de->dev, addr);
}
static void de21040_get_media_info(struct de_private *de)
@@ -1821,8 +1823,7 @@ static void de21041_get_srom_info(struct de_private *de)
#endif
/* store MAC address */
- for (i = 0; i < 6; i ++)
- de->dev->dev_addr[i] = ee_data[i + sa_offset];
+ eth_hw_addr_set(de->dev, &ee_data[sa_offset]);
/* get offset of controller 0 info leaf. ignore 2nd byte. */
ofs = ee_data[SROMC0InfoLeaf];
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
index 36ab4cbf2ad0..13121c4dcfe6 100644
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -4031,6 +4031,7 @@ get_hw_addr(struct net_device *dev)
int broken, i, k, tmp, status = 0;
u_short j,chksum;
struct de4x5_private *lp = netdev_priv(dev);
+ u8 addr[ETH_ALEN];
broken = de4x5_bad_srom(lp);
@@ -4042,28 +4043,30 @@ get_hw_addr(struct net_device *dev)
if (lp->chipset == DC21040) {
while ((tmp = inl(DE4X5_APROM)) < 0);
k += (u_char) tmp;
- dev->dev_addr[i++] = (u_char) tmp;
+ addr[i++] = (u_char) tmp;
while ((tmp = inl(DE4X5_APROM)) < 0);
k += (u_short) (tmp << 8);
- dev->dev_addr[i++] = (u_char) tmp;
+ addr[i++] = (u_char) tmp;
} else if (!broken) {
- dev->dev_addr[i] = (u_char) lp->srom.ieee_addr[i]; i++;
- dev->dev_addr[i] = (u_char) lp->srom.ieee_addr[i]; i++;
+ addr[i] = (u_char) lp->srom.ieee_addr[i]; i++;
+ addr[i] = (u_char) lp->srom.ieee_addr[i]; i++;
} else if ((broken == SMC) || (broken == ACCTON)) {
- dev->dev_addr[i] = *((u_char *)&lp->srom + i); i++;
- dev->dev_addr[i] = *((u_char *)&lp->srom + i); i++;
+ addr[i] = *((u_char *)&lp->srom + i); i++;
+ addr[i] = *((u_char *)&lp->srom + i); i++;
}
} else {
k += (u_char) (tmp = inb(EISA_APROM));
- dev->dev_addr[i++] = (u_char) tmp;
+ addr[i++] = (u_char) tmp;
k += (u_short) ((tmp = inb(EISA_APROM)) << 8);
- dev->dev_addr[i++] = (u_char) tmp;
+ addr[i++] = (u_char) tmp;
}
if (k > 0xffff) k-=0xffff;
}
if (k == 0xffff) k=0;
+ eth_hw_addr_set(dev, addr);
+
if (lp->bus == PCI) {
if (lp->chipset == DC21040) {
while ((tmp = inl(DE4X5_APROM)) < 0);
@@ -4095,8 +4098,9 @@ get_hw_addr(struct net_device *dev)
int x = dev->dev_addr[i];
x = ((x & 0xf) << 4) + ((x & 0xf0) >> 4);
x = ((x & 0x33) << 2) + ((x & 0xcc) >> 2);
- dev->dev_addr[i] = ((x & 0x55) << 1) + ((x & 0xaa) >> 1);
+ addr[i] = ((x & 0x55) << 1) + ((x & 0xaa) >> 1);
}
+ eth_hw_addr_set(dev, addr);
}
#endif /* CONFIG_PPC_PMAC */
@@ -4158,12 +4162,9 @@ test_bad_enet(struct net_device *dev, int status)
if ((tmp == 0) || (tmp == 0x5fa)) {
if ((lp->chipset == last.chipset) &&
(lp->bus_num == last.bus) && (lp->bus_num > 0)) {
- for (i=0; i<ETH_ALEN; i++) dev->dev_addr[i] = last.addr[i];
- for (i=ETH_ALEN-1; i>2; --i) {
- dev->dev_addr[i] += 1;
- if (dev->dev_addr[i] != 0) break;
- }
- for (i=0; i<ETH_ALEN; i++) last.addr[i] = dev->dev_addr[i];
+ eth_addr_inc(last.addr);
+ eth_hw_addr_set(dev, last.addr);
+
if (!an_exception(lp)) {
dev->irq = last.irq;
}
@@ -5391,9 +5392,7 @@ de4x5_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __user *data
if (netif_queue_stopped(dev))
return -EBUSY;
netif_stop_queue(dev);
- for (i=0; i<ETH_ALEN; i++) {
- dev->dev_addr[i] = tmp.addr[i];
- }
+ eth_hw_addr_set(dev, tmp.addr);
build_setup_frame(dev, PHYS_ADDR_ONLY);
/* Set up the descriptor and give ownership to the card */
load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c
index c763b692e164..83f1727d1423 100644
--- a/drivers/net/ethernet/dec/tulip/dmfe.c
+++ b/drivers/net/ethernet/dec/tulip/dmfe.c
@@ -476,8 +476,7 @@ static int dmfe_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* Set Node address */
- for (i = 0; i < 6; i++)
- dev->dev_addr[i] = db->srom[20 + i];
+ eth_hw_addr_set(dev, &db->srom[20]);
err = register_netdev (dev);
if (err)
@@ -1436,9 +1435,9 @@ static void update_cr6(u32 cr6_data, void __iomem *ioaddr)
static void dm9132_id_table(struct net_device *dev)
{
+ const u16 *addrptr = (const u16 *)dev->dev_addr;
struct dmfe_board_info *db = netdev_priv(dev);
void __iomem *ioaddr = db->ioaddr + 0xc0;
- u16 *addrptr = (u16 *)dev->dev_addr;
struct netdev_hw_addr *ha;
u16 i, hash_table[4];
@@ -1477,7 +1476,7 @@ static void send_filter_frame(struct net_device *dev)
struct dmfe_board_info *db = netdev_priv(dev);
struct netdev_hw_addr *ha;
struct tx_desc *txptr;
- u16 * addrptr;
+ const u16 * addrptr;
u32 * suptr;
int i;
@@ -1487,7 +1486,7 @@ static void send_filter_frame(struct net_device *dev)
suptr = (u32 *) txptr->tx_buf_ptr;
/* Node address */
- addrptr = (u16 *) dev->dev_addr;
+ addrptr = (const u16 *) dev->dev_addr;
*suptr++ = addrptr[0];
*suptr++ = addrptr[1];
*suptr++ = addrptr[2];
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index fcedd733bacb..79df5a72877b 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -339,7 +339,7 @@ static void tulip_up(struct net_device *dev)
}
} else {
/* This is set_rx_mode(), but without starting the transmitter. */
- u16 *eaddrs = (u16 *)dev->dev_addr;
+ const u16 *eaddrs = (const u16 *)dev->dev_addr;
u16 *setup_frm = &tp->setup_frame[15*6];
dma_addr_t mapping;
@@ -1001,8 +1001,8 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
struct tulip_private *tp = netdev_priv(dev);
u16 hash_table[32];
struct netdev_hw_addr *ha;
+ const u16 *eaddrs;
int i;
- u16 *eaddrs;
memset(hash_table, 0, sizeof(hash_table));
__set_bit_le(255, hash_table); /* Broadcast entry */
@@ -1019,7 +1019,7 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
setup_frm = &tp->setup_frame[13*6];
/* Fill the final entry with our physical address. */
- eaddrs = (u16 *)dev->dev_addr;
+ eaddrs = (const u16 *)dev->dev_addr;
*setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
*setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
*setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
@@ -1029,7 +1029,7 @@ static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
{
struct tulip_private *tp = netdev_priv(dev);
struct netdev_hw_addr *ha;
- u16 *eaddrs;
+ const u16 *eaddrs;
/* We have <= 14 addresses so we can use the wonderful
16 address perfect filtering of the Tulip. */
@@ -1044,7 +1044,7 @@ static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
setup_frm = &tp->setup_frame[15*6];
/* Fill the final entry with our physical address. */
- eaddrs = (u16 *)dev->dev_addr;
+ eaddrs = (const u16 *)dev->dev_addr;
*setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
*setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
*setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
@@ -1305,6 +1305,7 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
int chip_idx = ent->driver_data;
const char *chip_name = tulip_tbl[chip_idx].chip_name;
unsigned int eeprom_missing = 0;
+ u8 addr[ETH_ALEN] __aligned(2);
unsigned int force_csr0 = 0;
board_idx++;
@@ -1506,13 +1507,15 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
do {
value = ioread32(ioaddr + CSR9);
} while (value < 0 && --boguscnt > 0);
- put_unaligned_le16(value, ((__le16 *)dev->dev_addr) + i);
+ put_unaligned_le16(value, ((__le16 *)addr) + i);
sum += value & 0xffff;
}
+ eth_hw_addr_set(dev, addr);
} else if (chip_idx == COMET) {
/* No need to read the EEPROM. */
- put_unaligned_le32(ioread32(ioaddr + 0xA4), dev->dev_addr);
- put_unaligned_le16(ioread32(ioaddr + 0xA8), dev->dev_addr + 4);
+ put_unaligned_le32(ioread32(ioaddr + 0xA4), addr);
+ put_unaligned_le16(ioread32(ioaddr + 0xA8), addr + 4);
+ eth_hw_addr_set(dev, addr);
for (i = 0; i < 6; i ++)
sum += dev->dev_addr[i];
} else {
@@ -1575,20 +1578,23 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
#endif
for (i = 0; i < 6; i ++) {
- dev->dev_addr[i] = ee_data[i + sa_offset];
+ addr[i] = ee_data[i + sa_offset];
sum += ee_data[i + sa_offset];
}
+ eth_hw_addr_set(dev, addr);
}
/* Lite-On boards have the address byte-swapped. */
if ((dev->dev_addr[0] == 0xA0 ||
dev->dev_addr[0] == 0xC0 ||
dev->dev_addr[0] == 0x02) &&
- dev->dev_addr[1] == 0x00)
+ dev->dev_addr[1] == 0x00) {
for (i = 0; i < 6; i+=2) {
- char tmp = dev->dev_addr[i];
- dev->dev_addr[i] = dev->dev_addr[i+1];
- dev->dev_addr[i+1] = tmp;
+ addr[i] = dev->dev_addr[i+1];
+ addr[i+1] = dev->dev_addr[i];
}
+ eth_hw_addr_set(dev, addr);
+ }
+
/* On the Zynx 315 Etherarray and other multiport boards only the
first Tulip has an EEPROM.
On Sparc systems the mac address is held in the OBP property
@@ -1599,17 +1605,18 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (sum == 0 || sum == 6*0xff) {
#if defined(CONFIG_SPARC)
struct device_node *dp = pci_device_to_OF_node(pdev);
- const unsigned char *addr;
+ const unsigned char *addr2;
int len;
#endif
eeprom_missing = 1;
for (i = 0; i < 5; i++)
- dev->dev_addr[i] = last_phys_addr[i];
- dev->dev_addr[i] = last_phys_addr[i] + 1;
+ addr[i] = last_phys_addr[i];
+ addr[i] = last_phys_addr[i] + 1;
+ eth_hw_addr_set(dev, addr);
#if defined(CONFIG_SPARC)
- addr = of_get_property(dp, "local-mac-address", &len);
- if (addr && len == ETH_ALEN)
- memcpy(dev->dev_addr, addr, ETH_ALEN);
+ addr2 = of_get_property(dp, "local-mac-address", &len);
+ if (addr2 && len == ETH_ALEN)
+ eth_hw_addr_set(dev, addr2);
#endif
#if defined(__i386__) || defined(__x86_64__) /* Patch up x86 BIOS bug. */
if (last_irq)
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index d67ef7d02d6b..77d9058431e3 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -272,6 +272,7 @@ static int uli526x_init_one(struct pci_dev *pdev,
struct uli526x_board_info *db; /* board information structure */
struct net_device *dev;
void __iomem *ioaddr;
+ u8 addr[ETH_ALEN];
int i, err;
ULI526X_DBUG(0, "uli526x_init_one()", 0);
@@ -379,7 +380,7 @@ static int uli526x_init_one(struct pci_dev *pdev,
uw32(DCR13, 0x1b0); //Select ID Table access port
//Read MAC address from CR14
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = ur32(DCR14);
+ addr[i] = ur32(DCR14);
//Read end
uw32(DCR13, 0); //Clear CR13
uw32(DCR0, 0); //Clear CR0
@@ -388,8 +389,10 @@ static int uli526x_init_one(struct pci_dev *pdev,
else /*Exist SROM*/
{
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = db->srom[20 + i];
+ addr[i] = db->srom[20 + i];
}
+ eth_hw_addr_set(dev, addr);
+
err = register_netdev (dev);
if (err)
goto err_out_unmap;
@@ -1343,7 +1346,7 @@ static void send_filter_frame(struct net_device *dev, int mc_cnt)
void __iomem *ioaddr = db->ioaddr;
struct netdev_hw_addr *ha;
struct tx_desc *txptr;
- u16 * addrptr;
+ const u16 * addrptr;
u32 * suptr;
int i;
@@ -1353,7 +1356,7 @@ static void send_filter_frame(struct net_device *dev, int mc_cnt)
suptr = (u32 *) txptr->tx_buf_ptr;
/* Node address */
- addrptr = (u16 *) dev->dev_addr;
+ addrptr = (const u16 *) dev->dev_addr;
*suptr++ = addrptr[0] << FLT_SHIFT;
*suptr++ = addrptr[1] << FLT_SHIFT;
*suptr++ = addrptr[2] << FLT_SHIFT;
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
index 85b99099c6b9..86b1d23eba83 100644
--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
@@ -355,6 +355,7 @@ static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
int chip_idx = ent->driver_data;
int irq;
int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
+ __le16 addr[ETH_ALEN / 2];
void __iomem *ioaddr;
i = pcim_enable_device(pdev);
@@ -382,7 +383,8 @@ static int w840_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_netdev;
for (i = 0; i < 3; i++)
- ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(eeprom_read(ioaddr, i));
+ addr[i] = cpu_to_le16(eeprom_read(ioaddr, i));
+ eth_hw_addr_set(dev, (u8 *)addr);
/* Reset the chip to erase previous misconfiguration.
No hold time required! */
@@ -877,7 +879,7 @@ static void init_registers(struct net_device *dev)
8000 16 longwords 0200 2 longwords 2000 32 longwords
C000 32 longwords 0400 4 longwords */
-#if defined (__i386__) && !defined(MODULE)
+#if defined (__i386__) && !defined(MODULE) && !defined(CONFIG_UML)
/* When not a module we can work around broken '486 PCI boards. */
if (boot_cpu_data.x86 <= 4) {
i |= 0x4800;
diff --git a/drivers/net/ethernet/dec/tulip/xircom_cb.c b/drivers/net/ethernet/dec/tulip/xircom_cb.c
index a8de79355578..8759f9f76b62 100644
--- a/drivers/net/ethernet/dec/tulip/xircom_cb.c
+++ b/drivers/net/ethernet/dec/tulip/xircom_cb.c
@@ -1015,12 +1015,14 @@ static void read_mac_address(struct xircom_private *card)
xw32(CSR10, i + 3);
data_count = xr32(CSR9);
if ((tuple == 0x22) && (data_id == 0x04) && (data_count == 0x06)) {
+ u8 addr[ETH_ALEN];
int j;
for (j = 0; j < 6; j++) {
xw32(CSR10, i + j + 4);
- card->dev->dev_addr[j] = xr32(CSR9) & 0xff;
+ addr[j] = xr32(CSR9) & 0xff;
}
+ eth_hw_addr_set(card->dev, addr);
break;
} else if (link == 0) {
break;
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index 202ecb132053..a301f7e6a440 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -349,8 +349,7 @@ parse_eeprom (struct net_device *dev)
}
/* Set MAC address */
- for (i = 0; i < 6; i++)
- dev->dev_addr[i] = psrom->mac_addr[i];
+ eth_hw_addr_set(dev, psrom->mac_addr);
if (np->chip_id == CHIP_IP1000A) {
np->led_mode = psrom->led_mode;
@@ -567,7 +566,7 @@ static void rio_hw_init(struct net_device *dev)
*/
for (i = 0; i < 3; i++)
dw16(StationAddr0 + 2 * i,
- cpu_to_le16(((u16 *)dev->dev_addr)[i]));
+ cpu_to_le16(((const u16 *)dev->dev_addr)[i]));
set_multicast (dev);
if (np->coalesce) {
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index c36d186dffed..c710dc17be90 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -508,6 +508,7 @@ static int sundance_probe1(struct pci_dev *pdev,
int bar = 1;
#endif
int phy, phy_end, phy_idx = 0;
+ __le16 addr[ETH_ALEN / 2];
if (pci_enable_device(pdev))
return -EIO;
@@ -528,8 +529,9 @@ static int sundance_probe1(struct pci_dev *pdev,
goto err_out_res;
for (i = 0; i < 3; i++)
- ((__le16 *)dev->dev_addr)[i] =
+ addr[i] =
cpu_to_le16(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
+ eth_hw_addr_set(dev, (u8 *)addr);
np = netdev_priv(dev);
np->ndev = dev;
@@ -1611,7 +1613,7 @@ static int sundance_set_mac_addr(struct net_device *dev, void *data)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(dev, addr->sa_data);
__set_mac_addr(dev);
return 0;
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
index 6c51cf991dad..92462ed87bc4 100644
--- a/drivers/net/ethernet/dnet.c
+++ b/drivers/net/ethernet/dnet.c
@@ -60,11 +60,11 @@ static void __dnet_set_hwaddr(struct dnet *bp)
{
u16 tmp;
- tmp = be16_to_cpup((__be16 *)bp->dev->dev_addr);
+ tmp = be16_to_cpup((const __be16 *)bp->dev->dev_addr);
dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_0_REG, tmp);
- tmp = be16_to_cpup((__be16 *)(bp->dev->dev_addr + 2));
+ tmp = be16_to_cpup((const __be16 *)(bp->dev->dev_addr + 2));
dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_1_REG, tmp);
- tmp = be16_to_cpup((__be16 *)(bp->dev->dev_addr + 4));
+ tmp = be16_to_cpup((const __be16 *)(bp->dev->dev_addr + 4));
dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_2_REG, tmp);
}
@@ -93,7 +93,7 @@ static void dnet_get_hwaddr(struct dnet *bp)
*((__be16 *)(addr + 4)) = cpu_to_be16(tmp);
if (is_valid_ether_addr(addr))
- memcpy(bp->dev->dev_addr, addr, sizeof(addr));
+ eth_hw_addr_set(bp->dev, addr);
}
static int dnet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c
index b2d4fb3feb74..46e3a05e9582 100644
--- a/drivers/net/ethernet/ec_bhf.c
+++ b/drivers/net/ethernet/ec_bhf.c
@@ -479,6 +479,7 @@ static int ec_bhf_probe(struct pci_dev *dev, const struct pci_device_id *id)
struct net_device *net_dev;
struct ec_bhf_priv *priv;
void __iomem *dma_io;
+ u8 addr[ETH_ALEN];
void __iomem *io;
int err = 0;
@@ -539,7 +540,8 @@ static int ec_bhf_probe(struct pci_dev *dev, const struct pci_device_id *id)
if (err < 0)
goto err_free_net_dev;
- memcpy_fromio(net_dev->dev_addr, priv->mii_io + MII_MAC_ADDR, 6);
+ memcpy_fromio(addr, priv->mii_io + MII_MAC_ADDR, ETH_ALEN);
+ eth_hw_addr_set(net_dev, addr);
err = register_netdev(net_dev);
if (err < 0)
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 649c5c429bd7..528eb0f223b1 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -1080,7 +1080,7 @@ err:
}
/* Uses synchronous MCCQ */
-int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
+int be_cmd_pmac_add(struct be_adapter *adapter, const u8 *mac_addr,
u32 if_id, u32 *pmac_id, u32 domain)
{
struct be_mcc_wrb *wrb;
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index c30d6d6f0f3a..db1f3b908582 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -2385,7 +2385,7 @@ int be_pci_fnum_get(struct be_adapter *adapter);
int be_fw_wait_ready(struct be_adapter *adapter);
int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
bool permanent, u32 if_handle, u32 pmac_id);
-int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, u32 if_id,
+int be_cmd_pmac_add(struct be_adapter *adapter, const u8 *mac_addr, u32 if_id,
u32 *pmac_id, u32 domain);
int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id,
u32 domain);
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 361c1c87c183..d51f24c9e1b8 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -272,7 +272,7 @@ void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
iowrite32(val, adapter->db + DB_CQ_OFFSET);
}
-static int be_dev_mac_add(struct be_adapter *adapter, u8 *mac)
+static int be_dev_mac_add(struct be_adapter *adapter, const u8 *mac)
{
int i;
@@ -369,7 +369,7 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
/* Remember currently programmed MAC */
ether_addr_copy(adapter->dev_mac, addr->sa_data);
done:
- ether_addr_copy(netdev->dev_addr, addr->sa_data);
+ eth_hw_addr_set(netdev, addr->sa_data);
dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
return 0;
err:
@@ -4599,7 +4599,7 @@ static int be_mac_setup(struct be_adapter *adapter)
if (status)
return status;
- memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
+ eth_hw_addr_set(adapter->netdev, mac);
memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
/* Initial MAC for BE3 VFs is already programmed by PF */
@@ -4621,7 +4621,6 @@ static void be_destroy_err_recovery_workq(void)
if (!be_err_recovery_workq)
return;
- flush_workqueue(be_err_recovery_workq);
destroy_workqueue(be_err_recovery_workq);
be_err_recovery_workq = NULL;
}
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index ed1ed48e7483..b1c8ffea6ad2 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -707,20 +707,16 @@ static int ethoc_mdio_probe(struct net_device *dev)
else
phy = phy_find_first(priv->mdio);
- if (!phy) {
- dev_err(&dev->dev, "no PHY found\n");
- return -ENXIO;
- }
+ if (!phy)
+ return dev_err_probe(&dev->dev, -ENXIO, "no PHY found\n");
priv->old_duplex = -1;
priv->old_link = -1;
err = phy_connect_direct(dev, phy, ethoc_mdio_poll,
PHY_INTERFACE_MODE_GMII);
- if (err) {
- dev_err(&dev->dev, "could not attach to PHY\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(&dev->dev, err, "could not attach to PHY\n");
phy_set_max_speed(phy, SPEED_100);
@@ -806,8 +802,8 @@ static int ethoc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
static void ethoc_do_set_mac_address(struct net_device *dev)
{
+ const unsigned char *mac = dev->dev_addr;
struct ethoc *priv = netdev_priv(dev);
- unsigned char *mac = dev->dev_addr;
ethoc_write(priv, MAC_ADDR0, (mac[2] << 24) | (mac[3] << 16) |
(mac[4] << 8) | (mac[5] << 0));
@@ -820,7 +816,7 @@ static int ethoc_set_mac_address(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(dev, addr->sa_data);
ethoc_do_set_mac_address(dev);
return 0;
}
@@ -1148,18 +1144,22 @@ static int ethoc_probe(struct platform_device *pdev)
/* Allow the platform setup code to pass in a MAC address. */
if (pdata) {
- ether_addr_copy(netdev->dev_addr, pdata->hwaddr);
+ eth_hw_addr_set(netdev, pdata->hwaddr);
priv->phy_id = pdata->phy_id;
} else {
- of_get_mac_address(pdev->dev.of_node, netdev->dev_addr);
+ of_get_ethdev_address(pdev->dev.of_node, netdev);
priv->phy_id = -1;
}
/* Check that the given MAC address is valid. If it isn't, read the
* current MAC from the controller.
*/
- if (!is_valid_ether_addr(netdev->dev_addr))
- ethoc_get_mac_address(netdev, netdev->dev_addr);
+ if (!is_valid_ether_addr(netdev->dev_addr)) {
+ u8 addr[ETH_ALEN];
+
+ ethoc_get_mac_address(netdev, addr);
+ eth_hw_addr_set(netdev, addr);
+ }
/* Check the MAC again for validity, if it still isn't choose and
* program a random one.
diff --git a/drivers/net/ethernet/ezchip/Kconfig b/drivers/net/ethernet/ezchip/Kconfig
index 38aa824efb25..9241b9b1c7a3 100644
--- a/drivers/net/ethernet/ezchip/Kconfig
+++ b/drivers/net/ethernet/ezchip/Kconfig
@@ -18,7 +18,7 @@ if NET_VENDOR_EZCHIP
config EZCHIP_NPS_MANAGEMENT_ENET
tristate "EZchip NPS management enet support"
- depends on OF_IRQ && OF_NET
+ depends on OF_IRQ
depends on HAS_IOMEM
help
Simple LAN device for debug or management purposes.
diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c
index f9a288a6ec8c..323340826dab 100644
--- a/drivers/net/ethernet/ezchip/nps_enet.c
+++ b/drivers/net/ethernet/ezchip/nps_enet.c
@@ -421,7 +421,7 @@ static s32 nps_enet_set_mac_address(struct net_device *ndev, void *p)
res = eth_mac_addr(ndev, p);
if (!res) {
- ether_addr_copy(ndev->dev_addr, addr->sa_data);
+ eth_hw_addr_set(ndev, addr->sa_data);
nps_enet_set_hw_mac_address(ndev);
}
@@ -601,7 +601,7 @@ static s32 nps_enet_probe(struct platform_device *pdev)
dev_dbg(dev, "Registers base address is 0x%p\n", priv->regs_base);
/* set kernel MAC address to dev */
- err = of_get_mac_address(dev->of_node, ndev->dev_addr);
+ err = of_get_ethdev_address(dev->of_node, ndev);
if (err)
eth_hw_addr_random(ndev);
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index ff76e401a014..97c5d70de76e 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -182,13 +182,10 @@ static void ftgmac100_initial_mac(struct ftgmac100 *priv)
u8 mac[ETH_ALEN];
unsigned int m;
unsigned int l;
- void *addr;
- addr = device_get_mac_address(priv->dev, mac, ETH_ALEN);
- if (addr) {
- ether_addr_copy(priv->netdev->dev_addr, mac);
+ if (!device_get_ethdev_address(priv->dev, priv->netdev)) {
dev_info(priv->dev, "Read MAC address %pM from device tree\n",
- mac);
+ priv->netdev->dev_addr);
return;
}
@@ -203,7 +200,7 @@ static void ftgmac100_initial_mac(struct ftgmac100 *priv)
mac[5] = l & 0xff;
if (is_valid_ether_addr(mac)) {
- ether_addr_copy(priv->netdev->dev_addr, mac);
+ eth_hw_addr_set(priv->netdev, mac);
dev_info(priv->dev, "Read MAC address %pM from chip\n", mac);
} else {
eth_hw_addr_random(priv->netdev);
diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
index 25c91b3c5fd3..b3939a5f7b03 100644
--- a/drivers/net/ethernet/fealnx.c
+++ b/drivers/net/ethernet/fealnx.c
@@ -482,6 +482,7 @@ static int fealnx_init_one(struct pci_dev *pdev,
struct net_device *dev;
void *ring_space;
dma_addr_t ring_dma;
+ u8 addr[ETH_ALEN];
#ifdef USE_IO_OPS
int bar = 0;
#else
@@ -525,7 +526,8 @@ static int fealnx_init_one(struct pci_dev *pdev,
/* read ethernet id */
for (i = 0; i < 6; ++i)
- dev->dev_addr[i] = ioread8(ioaddr + PAR0 + i);
+ addr[i] = ioread8(ioaddr + PAR0 + i);
+ eth_hw_addr_set(dev, addr);
/* Reset the chip to erase previous misconfiguration. */
iowrite32(0x00000001, ioaddr + BCR);
@@ -827,7 +829,7 @@ static int netdev_open(struct net_device *dev)
return -EAGAIN;
for (i = 0; i < 3; i++)
- iowrite16(((unsigned short*)dev->dev_addr)[i],
+ iowrite16(((const unsigned short *)dev->dev_addr)[i],
ioaddr + PAR0 + i*2);
init_ring(dev);
@@ -857,7 +859,7 @@ static int netdev_open(struct net_device *dev)
np->bcrvalue |= 0x04; /* big-endian */
#endif
-#if defined(__i386__) && !defined(MODULE)
+#if defined(__i386__) && !defined(MODULE) && !defined(CONFIG_UML)
if (boot_cpu_data.x86 <= 4)
np->crvalue = 0xa00;
else
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 685d2d8a3b36..6b2927d863e2 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -268,11 +268,11 @@ static int dpaa_netdev_init(struct net_device *net_dev,
if (is_valid_ether_addr(mac_addr)) {
memcpy(net_dev->perm_addr, mac_addr, net_dev->addr_len);
- memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
+ eth_hw_addr_set(net_dev, mac_addr);
} else {
eth_hw_addr_random(net_dev);
err = priv->mac_dev->change_addr(priv->mac_dev->fman_mac,
- (enet_addr_t *)net_dev->dev_addr);
+ (const enet_addr_t *)net_dev->dev_addr);
if (err) {
dev_err(dev, "Failed to set random MAC address\n");
return -EINVAL;
@@ -452,7 +452,7 @@ static int dpaa_set_mac_address(struct net_device *net_dev, void *addr)
mac_dev = priv->mac_dev;
err = mac_dev->change_addr(mac_dev->fman_mac,
- (enet_addr_t *)net_dev->dev_addr);
+ (const enet_addr_t *)net_dev->dev_addr);
if (err < 0) {
netif_err(priv, drv, net_dev, "mac_dev->change_addr() = %d\n",
err);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c
index 605a39f892b9..7fefe1574b6a 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c
@@ -189,12 +189,11 @@ static const struct devlink_ops dpaa2_eth_devlink_ops = {
.trap_group_action_set = dpaa2_eth_dl_trap_group_action_set,
};
-int dpaa2_eth_dl_register(struct dpaa2_eth_priv *priv)
+int dpaa2_eth_dl_alloc(struct dpaa2_eth_priv *priv)
{
struct net_device *net_dev = priv->net_dev;
struct device *dev = net_dev->dev.parent;
struct dpaa2_eth_devlink_priv *dl_priv;
- int err;
priv->devlink =
devlink_alloc(&dpaa2_eth_devlink_ops, sizeof(*dl_priv), dev);
@@ -204,25 +203,23 @@ int dpaa2_eth_dl_register(struct dpaa2_eth_priv *priv)
}
dl_priv = devlink_priv(priv->devlink);
dl_priv->dpaa2_priv = priv;
-
- err = devlink_register(priv->devlink);
- if (err) {
- dev_err(dev, "devlink_register() = %d\n", err);
- goto devlink_free;
- }
-
return 0;
+}
-devlink_free:
+void dpaa2_eth_dl_free(struct dpaa2_eth_priv *priv)
+{
devlink_free(priv->devlink);
+}
- return err;
+
+void dpaa2_eth_dl_register(struct dpaa2_eth_priv *priv)
+{
+ devlink_register(priv->devlink);
}
void dpaa2_eth_dl_unregister(struct dpaa2_eth_priv *priv)
{
devlink_unregister(priv->devlink);
- devlink_free(priv->devlink);
}
int dpaa2_eth_dl_port_add(struct dpaa2_eth_priv *priv)
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
index 7065c71ed7b8..714e961e7a77 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
@@ -533,6 +533,7 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv,
percpu_stats->rx_packets++;
percpu_stats->rx_bytes += dpaa2_fd_get_len(fd);
+ ch->stats.bytes_per_cdan += dpaa2_fd_get_len(fd);
list_add_tail(&skb->list, ch->rx_list);
@@ -641,6 +642,7 @@ static int dpaa2_eth_consume_frames(struct dpaa2_eth_channel *ch,
fq->stats.frames += cleaned;
ch->stats.frames += cleaned;
+ ch->stats.frames_per_cdan += cleaned;
/* A dequeue operation only pulls frames from a single queue
* into the store. Return the frame queue as an out param.
@@ -1264,7 +1266,7 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev)
/* Tx confirmation frame processing routine */
static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
- struct dpaa2_eth_channel *ch __always_unused,
+ struct dpaa2_eth_channel *ch,
const struct dpaa2_fd *fd,
struct dpaa2_eth_fq *fq)
{
@@ -1279,6 +1281,7 @@ static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv,
percpu_extras = this_cpu_ptr(priv->percpu_extras);
percpu_extras->tx_conf_frames++;
percpu_extras->tx_conf_bytes += fd_len;
+ ch->stats.bytes_per_cdan += fd_len;
/* Check frame errors in the FD field */
fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK;
@@ -1601,6 +1604,12 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget)
}
} while (store_cleaned);
+ /* Update NET DIM with the values for this CDAN */
+ dpaa2_io_update_net_dim(ch->dpio, ch->stats.frames_per_cdan,
+ ch->stats.bytes_per_cdan);
+ ch->stats.frames_per_cdan = 0;
+ ch->stats.bytes_per_cdan = 0;
+
/* We didn't consume the entire budget, so finish napi and
* re-enable data availability notifications
*/
@@ -4013,7 +4022,7 @@ static int dpaa2_eth_set_mac_addr(struct dpaa2_eth_priv *priv)
return err;
}
}
- memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
+ eth_hw_addr_set(net_dev, mac_addr);
} else if (is_zero_ether_addr(dpni_mac_addr)) {
/* No MAC address configured, fill in net_dev->dev_addr
* with a random one
@@ -4038,7 +4047,7 @@ static int dpaa2_eth_set_mac_addr(struct dpaa2_eth_priv *priv)
/* NET_ADDR_PERM is default, all we have to do is
* fill in the device addr.
*/
- memcpy(net_dev->dev_addr, dpni_mac_addr, net_dev->addr_len);
+ eth_hw_addr_set(net_dev, dpni_mac_addr);
}
return 0;
@@ -4431,7 +4440,7 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
if (err)
goto err_connect_mac;
- err = dpaa2_eth_dl_register(priv);
+ err = dpaa2_eth_dl_alloc(priv);
if (err)
goto err_dl_register;
@@ -4453,6 +4462,7 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
dpaa2_dbg_add(priv);
#endif
+ dpaa2_eth_dl_register(priv);
dev_info(dev, "Probed interface %s\n", net_dev->name);
return 0;
@@ -4461,7 +4471,7 @@ err_netdev_reg:
err_dl_port_add:
dpaa2_eth_dl_traps_unregister(priv);
err_dl_trap_register:
- dpaa2_eth_dl_unregister(priv);
+ dpaa2_eth_dl_free(priv);
err_dl_register:
dpaa2_eth_disconnect_mac(priv);
err_connect_mac:
@@ -4508,6 +4518,8 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
net_dev = dev_get_drvdata(dev);
priv = netdev_priv(net_dev);
+ dpaa2_eth_dl_unregister(priv);
+
#ifdef CONFIG_DEBUG_FS
dpaa2_dbg_remove(priv);
#endif
@@ -4519,7 +4531,7 @@ static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev)
dpaa2_eth_dl_port_del(priv);
dpaa2_eth_dl_traps_unregister(priv);
- dpaa2_eth_dl_unregister(priv);
+ dpaa2_eth_dl_free(priv);
if (priv->do_link_poll)
kthread_stop(priv->poll_thread);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
index cdb623d5f2c1..2085844227fe 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
@@ -384,6 +384,8 @@ struct dpaa2_eth_ch_stats {
__u64 xdp_redirect;
/* Must be last, does not show up in ethtool stats */
__u64 frames;
+ __u64 frames_per_cdan;
+ __u64 bytes_per_cdan;
};
/* Maximum number of queues associated with a DPNI */
@@ -725,7 +727,10 @@ void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv,
extern const struct dcbnl_rtnl_ops dpaa2_eth_dcbnl_ops;
-int dpaa2_eth_dl_register(struct dpaa2_eth_priv *priv);
+int dpaa2_eth_dl_alloc(struct dpaa2_eth_priv *priv);
+void dpaa2_eth_dl_free(struct dpaa2_eth_priv *priv);
+
+void dpaa2_eth_dl_register(struct dpaa2_eth_priv *priv);
void dpaa2_eth_dl_unregister(struct dpaa2_eth_priv *priv);
int dpaa2_eth_dl_port_add(struct dpaa2_eth_priv *priv);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
index 2da5f881f630..adb8ce5306ee 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c
@@ -820,7 +820,63 @@ static int dpaa2_eth_set_tunable(struct net_device *net_dev,
return err;
}
+static int dpaa2_eth_get_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ic,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(dev);
+ struct dpaa2_io *dpio = priv->channel[0]->dpio;
+
+ dpaa2_io_get_irq_coalescing(dpio, &ic->rx_coalesce_usecs);
+ ic->use_adaptive_rx_coalesce = dpaa2_io_get_adaptive_coalescing(dpio);
+
+ return 0;
+}
+
+static int dpaa2_eth_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ic,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct dpaa2_eth_priv *priv = netdev_priv(dev);
+ struct dpaa2_io *dpio;
+ int prev_adaptive;
+ u32 prev_rx_usecs;
+ int i, j, err;
+
+ /* Keep track of the previous value, just in case we fail */
+ dpio = priv->channel[0]->dpio;
+ dpaa2_io_get_irq_coalescing(dpio, &prev_rx_usecs);
+ prev_adaptive = dpaa2_io_get_adaptive_coalescing(dpio);
+
+ /* Setup new value for rx coalescing */
+ for (i = 0; i < priv->num_channels; i++) {
+ dpio = priv->channel[i]->dpio;
+
+ dpaa2_io_set_adaptive_coalescing(dpio,
+ ic->use_adaptive_rx_coalesce);
+ err = dpaa2_io_set_irq_coalescing(dpio, ic->rx_coalesce_usecs);
+ if (err)
+ goto restore_rx_usecs;
+ }
+
+ return 0;
+
+restore_rx_usecs:
+ for (j = 0; j < i; j++) {
+ dpio = priv->channel[j]->dpio;
+
+ dpaa2_io_set_irq_coalescing(dpio, prev_rx_usecs);
+ dpaa2_io_set_adaptive_coalescing(dpio, prev_adaptive);
+ }
+
+ return err;
+}
+
const struct ethtool_ops dpaa2_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
+ ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
.get_drvinfo = dpaa2_eth_get_drvinfo,
.nway_reset = dpaa2_eth_nway_reset,
.get_link = ethtool_op_get_link,
@@ -836,4 +892,6 @@ const struct ethtool_ops dpaa2_ethtool_ops = {
.get_ts_info = dpaa2_eth_get_ts_info,
.get_tunable = dpaa2_eth_get_tunable,
.set_tunable = dpaa2_eth_set_tunable,
+ .get_coalesce = dpaa2_eth_get_coalesce,
+ .set_coalesce = dpaa2_eth_set_coalesce,
};
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
index ae6d382d8735..ef8f0a055024 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.c
@@ -139,7 +139,7 @@ static void dpaa2_mac_validate(struct phylink_config *config,
case PHY_INTERFACE_MODE_NA:
case PHY_INTERFACE_MODE_10GBASER:
case PHY_INTERFACE_MODE_USXGMII:
- phylink_set(mask, 10000baseT_Full);
+ phylink_set_10g_modes(mask);
if (state->interface == PHY_INTERFACE_MODE_10GBASER)
break;
phylink_set(mask, 5000baseT_Full);
diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
index 175f15c46842..d039457928b0 100644
--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
@@ -980,7 +980,7 @@ static int dpaa2_switch_port_set_mac_addr(struct ethsw_port_priv *port_priv)
/* First check if firmware has any address configured by bootloader */
if (!is_zero_ether_addr(mac_addr)) {
- memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len);
+ eth_hw_addr_set(net_dev, mac_addr);
} else {
/* No MAC address configured, fill in net_dev->dev_addr
* with a random one
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
index 042327b9981f..504e12554079 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc.c
@@ -7,7 +7,9 @@
#include <linux/udp.h>
#include <linux/vmalloc.h>
#include <linux/ptp_classify.h>
+#include <net/ip6_checksum.h>
#include <net/pkt_sched.h>
+#include <net/tso.h>
static int enetc_num_stack_tx_queues(struct enetc_ndev_priv *priv)
{
@@ -314,12 +316,261 @@ dma_err:
return 0;
}
+static void enetc_map_tx_tso_hdr(struct enetc_bdr *tx_ring, struct sk_buff *skb,
+ struct enetc_tx_swbd *tx_swbd,
+ union enetc_tx_bd *txbd, int *i, int hdr_len,
+ int data_len)
+{
+ union enetc_tx_bd txbd_tmp;
+ u8 flags = 0, e_flags = 0;
+ dma_addr_t addr;
+
+ enetc_clear_tx_bd(&txbd_tmp);
+ addr = tx_ring->tso_headers_dma + *i * TSO_HEADER_SIZE;
+
+ if (skb_vlan_tag_present(skb))
+ flags |= ENETC_TXBD_FLAGS_EX;
+
+ txbd_tmp.addr = cpu_to_le64(addr);
+ txbd_tmp.buf_len = cpu_to_le16(hdr_len);
+
+ /* first BD needs frm_len and offload flags set */
+ txbd_tmp.frm_len = cpu_to_le16(hdr_len + data_len);
+ txbd_tmp.flags = flags;
+
+ /* For the TSO header we do not set the dma address since we do not
+ * want it unmapped when we do cleanup. We still set len so that we
+ * count the bytes sent.
+ */
+ tx_swbd->len = hdr_len;
+ tx_swbd->do_twostep_tstamp = false;
+ tx_swbd->check_wb = false;
+
+ /* Actually write the header in the BD */
+ *txbd = txbd_tmp;
+
+ /* Add extension BD for VLAN */
+ if (flags & ENETC_TXBD_FLAGS_EX) {
+ /* Get the next BD */
+ enetc_bdr_idx_inc(tx_ring, i);
+ txbd = ENETC_TXBD(*tx_ring, *i);
+ tx_swbd = &tx_ring->tx_swbd[*i];
+ prefetchw(txbd);
+
+ /* Setup the VLAN fields */
+ enetc_clear_tx_bd(&txbd_tmp);
+ txbd_tmp.ext.vid = cpu_to_le16(skb_vlan_tag_get(skb));
+ txbd_tmp.ext.tpid = 0; /* < C-TAG */
+ e_flags |= ENETC_TXBD_E_FLAGS_VLAN_INS;
+
+ /* Write the BD */
+ txbd_tmp.ext.e_flags = e_flags;
+ *txbd = txbd_tmp;
+ }
+}
+
+static int enetc_map_tx_tso_data(struct enetc_bdr *tx_ring, struct sk_buff *skb,
+ struct enetc_tx_swbd *tx_swbd,
+ union enetc_tx_bd *txbd, char *data,
+ int size, bool last_bd)
+{
+ union enetc_tx_bd txbd_tmp;
+ dma_addr_t addr;
+ u8 flags = 0;
+
+ enetc_clear_tx_bd(&txbd_tmp);
+
+ addr = dma_map_single(tx_ring->dev, data, size, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(tx_ring->dev, addr))) {
+ netdev_err(tx_ring->ndev, "DMA map error\n");
+ return -ENOMEM;
+ }
+
+ if (last_bd) {
+ flags |= ENETC_TXBD_FLAGS_F;
+ tx_swbd->is_eof = 1;
+ }
+
+ txbd_tmp.addr = cpu_to_le64(addr);
+ txbd_tmp.buf_len = cpu_to_le16(size);
+ txbd_tmp.flags = flags;
+
+ tx_swbd->dma = addr;
+ tx_swbd->len = size;
+ tx_swbd->dir = DMA_TO_DEVICE;
+
+ *txbd = txbd_tmp;
+
+ return 0;
+}
+
+static __wsum enetc_tso_hdr_csum(struct tso_t *tso, struct sk_buff *skb,
+ char *hdr, int hdr_len, int *l4_hdr_len)
+{
+ char *l4_hdr = hdr + skb_transport_offset(skb);
+ int mac_hdr_len = skb_network_offset(skb);
+
+ if (tso->tlen != sizeof(struct udphdr)) {
+ struct tcphdr *tcph = (struct tcphdr *)(l4_hdr);
+
+ tcph->check = 0;
+ } else {
+ struct udphdr *udph = (struct udphdr *)(l4_hdr);
+
+ udph->check = 0;
+ }
+
+ /* Compute the IP checksum. This is necessary since tso_build_hdr()
+ * already incremented the IP ID field.
+ */
+ if (!tso->ipv6) {
+ struct iphdr *iph = (void *)(hdr + mac_hdr_len);
+
+ iph->check = 0;
+ iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
+ }
+
+ /* Compute the checksum over the L4 header. */
+ *l4_hdr_len = hdr_len - skb_transport_offset(skb);
+ return csum_partial(l4_hdr, *l4_hdr_len, 0);
+}
+
+static void enetc_tso_complete_csum(struct enetc_bdr *tx_ring, struct tso_t *tso,
+ struct sk_buff *skb, char *hdr, int len,
+ __wsum sum)
+{
+ char *l4_hdr = hdr + skb_transport_offset(skb);
+ __sum16 csum_final;
+
+ /* Complete the L4 checksum by appending the pseudo-header to the
+ * already computed checksum.
+ */
+ if (!tso->ipv6)
+ csum_final = csum_tcpudp_magic(ip_hdr(skb)->saddr,
+ ip_hdr(skb)->daddr,
+ len, ip_hdr(skb)->protocol, sum);
+ else
+ csum_final = csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr,
+ len, ipv6_hdr(skb)->nexthdr, sum);
+
+ if (tso->tlen != sizeof(struct udphdr)) {
+ struct tcphdr *tcph = (struct tcphdr *)(l4_hdr);
+
+ tcph->check = csum_final;
+ } else {
+ struct udphdr *udph = (struct udphdr *)(l4_hdr);
+
+ udph->check = csum_final;
+ }
+}
+
+static int enetc_map_tx_tso_buffs(struct enetc_bdr *tx_ring, struct sk_buff *skb)
+{
+ int hdr_len, total_len, data_len;
+ struct enetc_tx_swbd *tx_swbd;
+ union enetc_tx_bd *txbd;
+ struct tso_t tso;
+ __wsum csum, csum2;
+ int count = 0, pos;
+ int err, i, bd_data_num;
+
+ /* Initialize the TSO handler, and prepare the first payload */
+ hdr_len = tso_start(skb, &tso);
+ total_len = skb->len - hdr_len;
+ i = tx_ring->next_to_use;
+
+ while (total_len > 0) {
+ char *hdr;
+
+ /* Get the BD */
+ txbd = ENETC_TXBD(*tx_ring, i);
+ tx_swbd = &tx_ring->tx_swbd[i];
+ prefetchw(txbd);
+
+ /* Determine the length of this packet */
+ data_len = min_t(int, skb_shinfo(skb)->gso_size, total_len);
+ total_len -= data_len;
+
+ /* prepare packet headers: MAC + IP + TCP */
+ hdr = tx_ring->tso_headers + i * TSO_HEADER_SIZE;
+ tso_build_hdr(skb, hdr, &tso, data_len, total_len == 0);
+
+ /* compute the csum over the L4 header */
+ csum = enetc_tso_hdr_csum(&tso, skb, hdr, hdr_len, &pos);
+ enetc_map_tx_tso_hdr(tx_ring, skb, tx_swbd, txbd, &i, hdr_len, data_len);
+ bd_data_num = 0;
+ count++;
+
+ while (data_len > 0) {
+ int size;
+
+ size = min_t(int, tso.size, data_len);
+
+ /* Advance the index in the BDR */
+ enetc_bdr_idx_inc(tx_ring, &i);
+ txbd = ENETC_TXBD(*tx_ring, i);
+ tx_swbd = &tx_ring->tx_swbd[i];
+ prefetchw(txbd);
+
+ /* Compute the checksum over this segment of data and
+ * add it to the csum already computed (over the L4
+ * header and possible other data segments).
+ */
+ csum2 = csum_partial(tso.data, size, 0);
+ csum = csum_block_add(csum, csum2, pos);
+ pos += size;
+
+ err = enetc_map_tx_tso_data(tx_ring, skb, tx_swbd, txbd,
+ tso.data, size,
+ size == data_len);
+ if (err)
+ goto err_map_data;
+
+ data_len -= size;
+ count++;
+ bd_data_num++;
+ tso_build_data(skb, &tso, size);
+
+ if (unlikely(bd_data_num >= ENETC_MAX_SKB_FRAGS && data_len))
+ goto err_chained_bd;
+ }
+
+ enetc_tso_complete_csum(tx_ring, &tso, skb, hdr, pos, csum);
+
+ if (total_len == 0)
+ tx_swbd->skb = skb;
+
+ /* Go to the next BD */
+ enetc_bdr_idx_inc(tx_ring, &i);
+ }
+
+ tx_ring->next_to_use = i;
+ enetc_update_tx_ring_tail(tx_ring);
+
+ return count;
+
+err_map_data:
+ dev_err(tx_ring->dev, "DMA map error");
+
+err_chained_bd:
+ do {
+ tx_swbd = &tx_ring->tx_swbd[i];
+ enetc_free_tx_frame(tx_ring, tx_swbd);
+ if (i == 0)
+ i = tx_ring->bd_count;
+ i--;
+ } while (count--);
+
+ return 0;
+}
+
static netdev_tx_t enetc_start_xmit(struct sk_buff *skb,
struct net_device *ndev)
{
struct enetc_ndev_priv *priv = netdev_priv(ndev);
struct enetc_bdr *tx_ring;
- int count;
+ int count, err;
/* Queue one-step Sync packet if already locked */
if (skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP) {
@@ -332,19 +583,35 @@ static netdev_tx_t enetc_start_xmit(struct sk_buff *skb,
tx_ring = priv->tx_ring[skb->queue_mapping];
- if (unlikely(skb_shinfo(skb)->nr_frags > ENETC_MAX_SKB_FRAGS))
- if (unlikely(skb_linearize(skb)))
- goto drop_packet_err;
+ if (skb_is_gso(skb)) {
+ if (enetc_bd_unused(tx_ring) < tso_count_descs(skb)) {
+ netif_stop_subqueue(ndev, tx_ring->index);
+ return NETDEV_TX_BUSY;
+ }
- count = skb_shinfo(skb)->nr_frags + 1; /* fragments + head */
- if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(count)) {
- netif_stop_subqueue(ndev, tx_ring->index);
- return NETDEV_TX_BUSY;
- }
+ enetc_lock_mdio();
+ count = enetc_map_tx_tso_buffs(tx_ring, skb);
+ enetc_unlock_mdio();
+ } else {
+ if (unlikely(skb_shinfo(skb)->nr_frags > ENETC_MAX_SKB_FRAGS))
+ if (unlikely(skb_linearize(skb)))
+ goto drop_packet_err;
+
+ count = skb_shinfo(skb)->nr_frags + 1; /* fragments + head */
+ if (enetc_bd_unused(tx_ring) < ENETC_TXBDS_NEEDED(count)) {
+ netif_stop_subqueue(ndev, tx_ring->index);
+ return NETDEV_TX_BUSY;
+ }
- enetc_lock_mdio();
- count = enetc_map_tx_buffs(tx_ring, skb);
- enetc_unlock_mdio();
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ err = skb_checksum_help(skb);
+ if (err)
+ goto drop_packet_err;
+ }
+ enetc_lock_mdio();
+ count = enetc_map_tx_buffs(tx_ring, skb);
+ enetc_unlock_mdio();
+ }
if (unlikely(!count))
goto drop_packet_err;
@@ -546,10 +813,7 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
bool is_eof = tx_swbd->is_eof;
if (unlikely(tx_swbd->check_wb)) {
- struct enetc_ndev_priv *priv = netdev_priv(ndev);
- union enetc_tx_bd *txbd;
-
- txbd = ENETC_TXBD(*tx_ring, i);
+ union enetc_tx_bd *txbd = ENETC_TXBD(*tx_ring, i);
if (txbd->flags & ENETC_TXBD_FLAGS_W &&
tx_swbd->do_twostep_tstamp) {
@@ -567,8 +831,7 @@ static bool enetc_clean_tx_ring(struct enetc_bdr *tx_ring, int napi_budget)
if (xdp_frame) {
xdp_return_frame(xdp_frame);
} else if (skb) {
- if (unlikely(tx_swbd->skb->cb[0] &
- ENETC_F_TX_ONESTEP_SYNC_TSTAMP)) {
+ if (unlikely(skb->cb[0] & ENETC_F_TX_ONESTEP_SYNC_TSTAMP)) {
/* Start work to release lock for next one-step
* timestamping packet. And send one skb in
* tx_skbs queue if has.
@@ -1493,15 +1756,32 @@ static int enetc_alloc_txbdr(struct enetc_bdr *txr)
return -ENOMEM;
err = enetc_dma_alloc_bdr(txr, sizeof(union enetc_tx_bd));
- if (err) {
- vfree(txr->tx_swbd);
- return err;
+ if (err)
+ goto err_alloc_bdr;
+
+ txr->tso_headers = dma_alloc_coherent(txr->dev,
+ txr->bd_count * TSO_HEADER_SIZE,
+ &txr->tso_headers_dma,
+ GFP_KERNEL);
+ if (!txr->tso_headers) {
+ err = -ENOMEM;
+ goto err_alloc_tso;
}
txr->next_to_clean = 0;
txr->next_to_use = 0;
return 0;
+
+err_alloc_tso:
+ dma_free_coherent(txr->dev, txr->bd_count * sizeof(union enetc_tx_bd),
+ txr->bd_base, txr->bd_dma_base);
+ txr->bd_base = NULL;
+err_alloc_bdr:
+ vfree(txr->tx_swbd);
+ txr->tx_swbd = NULL;
+
+ return err;
}
static void enetc_free_txbdr(struct enetc_bdr *txr)
@@ -1513,6 +1793,10 @@ static void enetc_free_txbdr(struct enetc_bdr *txr)
size = txr->bd_count * sizeof(union enetc_tx_bd);
+ dma_free_coherent(txr->dev, txr->bd_count * TSO_HEADER_SIZE,
+ txr->tso_headers, txr->tso_headers_dma);
+ txr->tso_headers = NULL;
+
dma_free_coherent(txr->dev, size, txr->bd_base, txr->bd_dma_base);
txr->bd_base = NULL;
@@ -2607,10 +2891,8 @@ int enetc_pci_probe(struct pci_dev *pdev, const char *name, int sizeof_priv)
pcie_flr(pdev);
err = pci_enable_device_mem(pdev);
- if (err) {
- dev_err(&pdev->dev, "device enable failed\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(&pdev->dev, err, "device enable failed\n");
/* set up for high or low dma */
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
diff --git a/drivers/net/ethernet/freescale/enetc/enetc.h b/drivers/net/ethernet/freescale/enetc/enetc.h
index 08b283347d9c..fb39e406b7fc 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc.h
@@ -112,6 +112,10 @@ struct enetc_bdr {
dma_addr_t bd_dma_base;
u8 tsd_enable; /* Time specific departure */
bool ext_en; /* enable h/w descriptor extensions */
+
+ /* DMA buffer for TSO headers */
+ char *tso_headers;
+ dma_addr_t tso_headers_dma;
} ____cacheline_aligned_in_smp;
static inline void enetc_bdr_idx_inc(struct enetc_bdr *bdr, int *i)
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
index 9690e36e9e85..910b9f722504 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
@@ -157,7 +157,7 @@ static const struct {
{ ENETC_PM0_TFRM, "MAC tx frames" },
{ ENETC_PM0_TFCS, "MAC tx fcs errors" },
{ ENETC_PM0_TVLAN, "MAC tx VLAN frames" },
- { ENETC_PM0_TERR, "MAC tx frames" },
+ { ENETC_PM0_TERR, "MAC tx frame errors" },
{ ENETC_PM0_TUCA, "MAC tx unicast frames" },
{ ENETC_PM0_TMCA, "MAC tx multicast frames" },
{ ENETC_PM0_TBCA, "MAC tx broadcast frames" },
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
index 0f5f081a5baf..1514e6a4a3ff 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h
+++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
@@ -635,10 +635,14 @@ struct enetc_cmd_rfse {
#define ENETC_RFSE_EN BIT(15)
#define ENETC_RFSE_MODE_BD 2
-static inline void enetc_get_primary_mac_addr(struct enetc_hw *hw, u8 *addr)
+static inline void enetc_load_primary_mac_addr(struct enetc_hw *hw,
+ struct net_device *ndev)
{
+ u8 addr[ETH_ALEN] __aligned(4);
+
*(u32 *)addr = __raw_readl(hw->reg + ENETC_SIPMAR0);
*(u16 *)(addr + 4) = __raw_readw(hw->reg + ENETC_SIPMAR1);
+ eth_hw_addr_set(ndev, addr);
}
#define ENETC_SI_INT_IDX 0
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index 4c977dfc44f0..0e87c7043b77 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -40,7 +40,7 @@ static int enetc_pf_set_mac_addr(struct net_device *ndev, void *addr)
if (!is_valid_ether_addr(saddr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(ndev->dev_addr, saddr->sa_data, ndev->addr_len);
+ eth_hw_addr_set(ndev, saddr->sa_data);
enetc_pf_set_primary_mac_addr(&priv->si->hw, 0, saddr->sa_data);
return 0;
@@ -517,10 +517,13 @@ static void enetc_port_si_configure(struct enetc_si *si)
static void enetc_configure_port_mac(struct enetc_hw *hw)
{
+ int tc;
+
enetc_port_wr(hw, ENETC_PM0_MAXFRM,
ENETC_SET_MAXFRM(ENETC_RX_MAXFRM_SIZE));
- enetc_port_wr(hw, ENETC_PTCMSDUR(0), ENETC_MAC_MAXFRM_SIZE);
+ for (tc = 0; tc < 8; tc++)
+ enetc_port_wr(hw, ENETC_PTCMSDUR(tc), ENETC_MAC_MAXFRM_SIZE);
enetc_port_wr(hw, ENETC_PM0_CMD_CFG, ENETC_PM0_CMD_PHY_TX_EN |
ENETC_PM0_CMD_TXP | ENETC_PM0_PROMISC);
@@ -759,10 +762,14 @@ static void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_LOOPBACK;
+ NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_LOOPBACK |
+ NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6;
ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX;
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6;
+ ndev->vlan_features = NETIF_F_SG | NETIF_F_HW_CSUM |
+ NETIF_F_TSO | NETIF_F_TSO6;
if (si->num_rss)
ndev->hw_features |= NETIF_F_RXHASH;
@@ -779,7 +786,7 @@ static void enetc_pf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
}
/* pick up primary MAC address from SI */
- enetc_get_primary_mac_addr(&si->hw, ndev->dev_addr);
+ enetc_load_primary_mac_addr(&si->hw, ndev);
}
static int enetc_mdio_probe(struct enetc_pf *pf, struct device_node *np)
@@ -803,10 +810,8 @@ static int enetc_mdio_probe(struct enetc_pf *pf, struct device_node *np)
snprintf(bus->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
err = of_mdiobus_register(bus, np);
- if (err) {
- dev_err(dev, "cannot register MDIO bus\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(dev, err, "cannot register MDIO bus\n");
pf->mdio = bus;
@@ -937,7 +942,7 @@ static void enetc_pl_mac_validate(struct phylink_config *config,
state->interface != PHY_INTERFACE_MODE_2500BASEX &&
state->interface != PHY_INTERFACE_MODE_USXGMII &&
!phy_interface_mode_is_rgmii(state->interface)) {
- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_zero(supported);
return;
}
@@ -960,10 +965,8 @@ static void enetc_pl_mac_validate(struct phylink_config *config,
phylink_set(mask, 2500baseX_Full);
}
- bitmap_and(supported, supported, mask,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
- bitmap_and(state->advertising, state->advertising, mask,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_and(supported, supported, mask);
+ linkmode_and(state->advertising, state->advertising, mask);
}
static void enetc_pl_mac_config(struct phylink_config *config,
@@ -1215,10 +1218,8 @@ static int enetc_pf_probe(struct pci_dev *pdev,
ERR_PTR(err));
err = enetc_pci_probe(pdev, KBUILD_MODNAME, sizeof(*pf));
- if (err) {
- dev_err(&pdev->dev, "PCI probing failed\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(&pdev->dev, err, "PCI probing failed\n");
si = pci_get_drvdata(pdev);
if (!si->hw.port || !si->hw.global) {
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ptp.c b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c
index bc594892507a..36b4f51dd297 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_ptp.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_ptp.c
@@ -39,10 +39,8 @@ static int enetc_ptp_probe(struct pci_dev *pdev,
}
err = pci_enable_device_mem(pdev);
- if (err) {
- dev_err(&pdev->dev, "device enable failed\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(&pdev->dev, err, "device enable failed\n");
/* set up for high or low dma */
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_qos.c b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
index 4577226d3c6a..0536d2c76fbc 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_qos.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_qos.c
@@ -486,14 +486,16 @@ static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv,
data_size = sizeof(struct streamid_data);
si_data = kzalloc(data_size, __GFP_DMA | GFP_KERNEL);
+ if (!si_data)
+ return -ENOMEM;
cbd.length = cpu_to_le16(data_size);
dma = dma_map_single(&priv->si->pdev->dev, si_data,
data_size, DMA_FROM_DEVICE);
if (dma_mapping_error(&priv->si->pdev->dev, dma)) {
netdev_err(priv->si->ndev, "DMA mapping failed!\n");
- kfree(si_data);
- return -ENOMEM;
+ err = -ENOMEM;
+ goto out;
}
cbd.addr[0] = cpu_to_le32(lower_32_bits(dma));
@@ -512,12 +514,10 @@ static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv,
err = enetc_send_cmd(priv->si, &cbd);
if (err)
- return -EINVAL;
+ goto out;
- if (!enable) {
- kfree(si_data);
- return 0;
- }
+ if (!enable)
+ goto out;
/* Enable the entry overwrite again incase space flushed by hardware */
memset(&cbd, 0, sizeof(cbd));
@@ -560,6 +560,10 @@ static int enetc_streamid_hw_set(struct enetc_ndev_priv *priv,
}
err = enetc_send_cmd(priv->si, &cbd);
+out:
+ if (!dma_mapping_error(&priv->si->pdev->dev, dma))
+ dma_unmap_single(&priv->si->pdev->dev, dma, data_size, DMA_FROM_DEVICE);
+
kfree(si_data);
return err;
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
index 1a9d1e8b772c..17924305afa2 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_vf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c
@@ -122,16 +122,20 @@ static void enetc_vf_netdev_setup(struct enetc_si *si, struct net_device *ndev,
ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX;
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6;
ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX;
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_CSUM | NETIF_F_TSO | NETIF_F_TSO6;
+ ndev->vlan_features = NETIF_F_SG | NETIF_F_HW_CSUM |
+ NETIF_F_TSO | NETIF_F_TSO6;
if (si->num_rss)
ndev->hw_features |= NETIF_F_RXHASH;
/* pick up primary MAC address from SI */
- enetc_get_primary_mac_addr(&si->hw, ndev->dev_addr);
+ enetc_load_primary_mac_addr(&si->hw, ndev);
}
static int enetc_vf_probe(struct pci_dev *pdev,
@@ -143,10 +147,8 @@ static int enetc_vf_probe(struct pci_dev *pdev,
int err;
err = enetc_pci_probe(pdev, KBUILD_MODNAME, 0);
- if (err) {
- dev_err(&pdev->dev, "PCI probing failed\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(&pdev->dev, err, "PCI probing failed\n");
si = pci_get_drvdata(pdev);
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index ec87b370bba1..bc418b910999 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1768,11 +1768,8 @@ static int fec_get_mac(struct net_device *ndev)
return 0;
}
- memcpy(ndev->dev_addr, iap, ETH_ALEN);
-
/* Adjust MAC if using macaddr */
- if (iap == macaddr)
- ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->dev_id;
+ eth_hw_addr_gen(ndev, iap, iap == macaddr ? fep->dev_id : 0);
return 0;
}
@@ -3326,7 +3323,7 @@ fec_set_mac_address(struct net_device *ndev, void *p)
if (addr) {
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
+ eth_hw_addr_set(ndev, addr->sa_data);
}
/* Add netif status check here to avoid system hang in below case:
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
index 73ff359a15f1..bbbde9f701c2 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -112,7 +112,7 @@ static int mpc52xx_fec_set_mac_address(struct net_device *dev, void *addr)
{
struct sockaddr *sock = addr;
- memcpy(dev->dev_addr, sock->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, sock->sa_data);
mpc52xx_fec_set_paddr(dev, sock->sa_data);
return 0;
@@ -890,7 +890,7 @@ static int mpc52xx_fec_probe(struct platform_device *op)
*
* First try to read MAC address from DT
*/
- rv = of_get_mac_address(np, ndev->dev_addr);
+ rv = of_get_ethdev_address(np, ndev);
if (rv) {
struct mpc52xx_fec __iomem *fec = priv->fec;
diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
index bce3c9398887..1950a8936bc0 100644
--- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c
+++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
@@ -366,7 +366,7 @@ static void set_dflts(struct dtsec_cfg *cfg)
cfg->maximum_frame = DEFAULT_MAXIMUM_FRAME;
}
-static void set_mac_address(struct dtsec_regs __iomem *regs, u8 *adr)
+static void set_mac_address(struct dtsec_regs __iomem *regs, const u8 *adr)
{
u32 tmp;
@@ -516,7 +516,7 @@ static int init(struct dtsec_regs __iomem *regs, struct dtsec_cfg *cfg,
if (addr) {
MAKE_ENET_ADDR_FROM_UINT64(addr, eth_addr);
- set_mac_address(regs, (u8 *)eth_addr);
+ set_mac_address(regs, (const u8 *)eth_addr);
}
/* HASH */
@@ -1022,7 +1022,7 @@ int dtsec_accept_rx_pause_frames(struct fman_mac *dtsec, bool en)
return 0;
}
-int dtsec_modify_mac_address(struct fman_mac *dtsec, enet_addr_t *enet_addr)
+int dtsec_modify_mac_address(struct fman_mac *dtsec, const enet_addr_t *enet_addr)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
enum comm_mode mode = COMM_MODE_NONE;
@@ -1041,7 +1041,7 @@ int dtsec_modify_mac_address(struct fman_mac *dtsec, enet_addr_t *enet_addr)
* Station address have to be swapped (big endian to little endian
*/
dtsec->addr = ENET_ADDR_TO_UINT64(*enet_addr);
- set_mac_address(dtsec->regs, (u8 *)(*enet_addr));
+ set_mac_address(dtsec->regs, (const u8 *)(*enet_addr));
graceful_start(dtsec, mode);
diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.h b/drivers/net/ethernet/freescale/fman/fman_dtsec.h
index 5149d96ec2c1..68512c3bd6e5 100644
--- a/drivers/net/ethernet/freescale/fman/fman_dtsec.h
+++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.h
@@ -37,7 +37,7 @@
struct fman_mac *dtsec_config(struct fman_mac_params *params);
int dtsec_set_promiscuous(struct fman_mac *dtsec, bool new_val);
-int dtsec_modify_mac_address(struct fman_mac *dtsec, enet_addr_t *enet_addr);
+int dtsec_modify_mac_address(struct fman_mac *dtsec, const enet_addr_t *enet_addr);
int dtsec_adjust_link(struct fman_mac *dtsec,
u16 speed);
int dtsec_restart_autoneg(struct fman_mac *dtsec);
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c
index 62f42921933d..2216b7f51d26 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.c
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.c
@@ -354,7 +354,7 @@ struct fman_mac {
bool allmulti_enabled;
};
-static void add_addr_in_paddr(struct memac_regs __iomem *regs, u8 *adr,
+static void add_addr_in_paddr(struct memac_regs __iomem *regs, const u8 *adr,
u8 paddr_num)
{
u32 tmp0, tmp1;
@@ -897,12 +897,12 @@ int memac_accept_rx_pause_frames(struct fman_mac *memac, bool en)
return 0;
}
-int memac_modify_mac_address(struct fman_mac *memac, enet_addr_t *enet_addr)
+int memac_modify_mac_address(struct fman_mac *memac, const enet_addr_t *enet_addr)
{
if (!is_init_done(memac->memac_drv_param))
return -EINVAL;
- add_addr_in_paddr(memac->regs, (u8 *)(*enet_addr), 0);
+ add_addr_in_paddr(memac->regs, (const u8 *)(*enet_addr), 0);
return 0;
}
@@ -1058,7 +1058,7 @@ int memac_init(struct fman_mac *memac)
/* MAC Address */
if (memac->addr != 0) {
MAKE_ENET_ADDR_FROM_UINT64(memac->addr, eth_addr);
- add_addr_in_paddr(memac->regs, (u8 *)eth_addr, 0);
+ add_addr_in_paddr(memac->regs, (const u8 *)eth_addr, 0);
}
fixed_link = memac_drv_param->fixed_link;
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.h b/drivers/net/ethernet/freescale/fman/fman_memac.h
index b2c671ec0ce7..3820f7a22983 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.h
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.h
@@ -40,7 +40,7 @@
struct fman_mac *memac_config(struct fman_mac_params *params);
int memac_set_promiscuous(struct fman_mac *memac, bool new_val);
-int memac_modify_mac_address(struct fman_mac *memac, enet_addr_t *enet_addr);
+int memac_modify_mac_address(struct fman_mac *memac, const enet_addr_t *enet_addr);
int memac_adjust_link(struct fman_mac *memac, u16 speed);
int memac_cfg_max_frame_len(struct fman_mac *memac, u16 new_val);
int memac_cfg_reset_on_init(struct fman_mac *memac, bool enable);
diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.c b/drivers/net/ethernet/freescale/fman/fman_tgec.c
index 41946b16f6c7..311c1906e044 100644
--- a/drivers/net/ethernet/freescale/fman/fman_tgec.c
+++ b/drivers/net/ethernet/freescale/fman/fman_tgec.c
@@ -221,7 +221,7 @@ struct fman_mac {
bool allmulti_enabled;
};
-static void set_mac_address(struct tgec_regs __iomem *regs, u8 *adr)
+static void set_mac_address(struct tgec_regs __iomem *regs, const u8 *adr)
{
u32 tmp0, tmp1;
@@ -514,13 +514,13 @@ int tgec_accept_rx_pause_frames(struct fman_mac *tgec, bool en)
return 0;
}
-int tgec_modify_mac_address(struct fman_mac *tgec, enet_addr_t *p_enet_addr)
+int tgec_modify_mac_address(struct fman_mac *tgec, const enet_addr_t *p_enet_addr)
{
if (!is_init_done(tgec->cfg))
return -EINVAL;
tgec->addr = ENET_ADDR_TO_UINT64(*p_enet_addr);
- set_mac_address(tgec->regs, (u8 *)(*p_enet_addr));
+ set_mac_address(tgec->regs, (const u8 *)(*p_enet_addr));
return 0;
}
@@ -704,7 +704,7 @@ int tgec_init(struct fman_mac *tgec)
if (tgec->addr) {
MAKE_ENET_ADDR_FROM_UINT64(tgec->addr, eth_addr);
- set_mac_address(tgec->regs, (u8 *)eth_addr);
+ set_mac_address(tgec->regs, (const u8 *)eth_addr);
}
/* interrupts */
diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.h b/drivers/net/ethernet/freescale/fman/fman_tgec.h
index 3bfd1062b386..b28b20b26148 100644
--- a/drivers/net/ethernet/freescale/fman/fman_tgec.h
+++ b/drivers/net/ethernet/freescale/fman/fman_tgec.h
@@ -37,7 +37,7 @@
struct fman_mac *tgec_config(struct fman_mac_params *params);
int tgec_set_promiscuous(struct fman_mac *tgec, bool new_val);
-int tgec_modify_mac_address(struct fman_mac *tgec, enet_addr_t *enet_addr);
+int tgec_modify_mac_address(struct fman_mac *tgec, const enet_addr_t *enet_addr);
int tgec_cfg_max_frame_len(struct fman_mac *tgec, u16 new_val);
int tgec_enable(struct fman_mac *tgec, enum comm_mode mode);
int tgec_disable(struct fman_mac *tgec, enum comm_mode mode);
diff --git a/drivers/net/ethernet/freescale/fman/mac.h b/drivers/net/ethernet/freescale/fman/mac.h
index 824a81a9f350..daa285a9b8b2 100644
--- a/drivers/net/ethernet/freescale/fman/mac.h
+++ b/drivers/net/ethernet/freescale/fman/mac.h
@@ -66,7 +66,7 @@ struct mac_device {
int (*stop)(struct mac_device *mac_dev);
void (*adjust_link)(struct mac_device *mac_dev);
int (*set_promisc)(struct fman_mac *mac_dev, bool enable);
- int (*change_addr)(struct fman_mac *mac_dev, enet_addr_t *enet_addr);
+ int (*change_addr)(struct fman_mac *mac_dev, const enet_addr_t *enet_addr);
int (*set_allmulti)(struct fman_mac *mac_dev, bool enable);
int (*set_tstamp)(struct fman_mac *mac_dev, bool enable);
int (*set_multi)(struct net_device *net_dev,
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 2db6e38a772e..bacf25318f87 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -1005,7 +1005,7 @@ static int fs_enet_probe(struct platform_device *ofdev)
spin_lock_init(&fep->lock);
spin_lock_init(&fep->tx_lock);
- of_get_mac_address(ofdev->dev.of_node, ndev->dev_addr);
+ of_get_ethdev_address(ofdev->dev.of_node, ndev);
ret = fep->ops->allocate_bd(ndev);
if (ret)
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index af6ad94bf24a..acab58fd3db3 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -753,7 +753,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
if (stash_len || stash_idx)
priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
- err = of_get_mac_address(np, dev->dev_addr);
+ err = of_get_ethdev_address(np, dev);
if (err) {
eth_hw_addr_random(dev);
dev_info(&ofdev->dev, "Using random MAC address: %pM\n", dev->dev_addr);
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 3eb288d10b0c..823221c912ab 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -3205,7 +3205,7 @@ static int ucc_geth_set_mac_addr(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
/*
* If device is not running, we will set mac addr register
@@ -3731,7 +3731,7 @@ static int ucc_geth_probe(struct platform_device* ofdev)
goto err_free_netdev;
}
- of_get_mac_address(np, dev->dev_addr);
+ of_get_ethdev_address(np, dev);
ugeth->ug_info = ug_info;
ugeth->dev = device;
diff --git a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
index 62c0bed82ced..b0d733e9a7c6 100644
--- a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
+++ b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
@@ -334,6 +334,7 @@ static int fmvj18x_config(struct pcmcia_device *link)
u8 *buf;
size_t len;
u_char buggybuf[32];
+ u8 addr[ETH_ALEN];
dev_dbg(&link->dev, "fmvj18x_config\n");
@@ -468,8 +469,7 @@ static int fmvj18x_config(struct pcmcia_device *link)
goto failed;
}
/* Read MACID from CIS */
- for (i = 0; i < 6; i++)
- dev->dev_addr[i] = buf[i + 5];
+ eth_hw_addr_set(dev, &buf[5]);
kfree(buf);
} else {
if (pcmcia_get_mac_from_cis(link, dev))
@@ -490,7 +490,8 @@ static int fmvj18x_config(struct pcmcia_device *link)
case UNGERMANN:
/* Read MACID from register */
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = inb(ioaddr + UNGERMANN_MAC_ID + i);
+ addr[i] = inb(ioaddr + UNGERMANN_MAC_ID + i);
+ eth_hw_addr_set(dev, addr);
card_name = "Access/CARD";
break;
case XXX10304:
@@ -499,16 +500,15 @@ static int fmvj18x_config(struct pcmcia_device *link)
pr_notice("unable to read hardware net address\n");
goto failed;
}
- for (i = 0 ; i < 6; i++) {
- dev->dev_addr[i] = buggybuf[i];
- }
+ eth_hw_addr_set(dev, buggybuf);
card_name = "FMV-J182";
break;
case MBH10302:
default:
/* Read MACID from register */
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = inb(ioaddr + MAC_ID + i);
+ addr[i] = inb(ioaddr + MAC_ID + i);
+ eth_hw_addr_set(dev, addr);
card_name = "FMV-J181";
break;
}
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
index 92dc18a4bcc4..b719f72281c4 100644
--- a/drivers/net/ethernet/google/gve/gve.h
+++ b/drivers/net/ethernet/google/gve/gve.h
@@ -30,7 +30,7 @@
#define GVE_MIN_MSIX 3
/* Numbers of gve tx/rx stats in stats report. */
-#define GVE_TX_STATS_REPORT_NUM 5
+#define GVE_TX_STATS_REPORT_NUM 6
#define GVE_RX_STATS_REPORT_NUM 2
/* Interval to schedule a stats report update, 20000ms. */
@@ -142,6 +142,19 @@ struct gve_index_list {
s16 tail;
};
+/* A single received packet split across multiple buffers may be
+ * reconstructed using the information in this structure.
+ */
+struct gve_rx_ctx {
+ /* head and tail of skb chain for the current packet or NULL if none */
+ struct sk_buff *skb_head;
+ struct sk_buff *skb_tail;
+ u16 total_expected_size;
+ u8 expected_frag_cnt;
+ u8 curr_frag_cnt;
+ u8 reuse_frags;
+};
+
/* Contains datapath state used to represent an RX queue. */
struct gve_rx_ring {
struct gve_priv *gve;
@@ -153,6 +166,7 @@ struct gve_rx_ring {
/* threshold for posting new buffs and descs */
u32 db_threshold;
+ u16 packet_buffer_size;
};
/* DQO fields. */
@@ -200,15 +214,16 @@ struct gve_rx_ring {
u64 rx_skb_alloc_fail; /* free-running count of skb alloc fails */
u64 rx_buf_alloc_fail; /* free-running count of buffer alloc fails */
u64 rx_desc_err_dropped_pkt; /* free-running count of packets dropped by descriptor error */
+ u64 rx_cont_packet_cnt; /* free-running multi-fragment packets received */
+ u64 rx_frag_flip_cnt; /* free-running count of rx segments where page_flip was used */
+ u64 rx_frag_copy_cnt; /* free-running count of rx segments copied into skb linear portion */
u32 q_num; /* queue index */
u32 ntfy_id; /* notification block index */
struct gve_queue_resources *q_resources; /* head and tail pointer idx */
dma_addr_t q_resources_bus; /* dma address for the queue resources */
struct u64_stats_sync statss; /* sync stats for 32bit archs */
- /* head and tail of skb chain for the current packet or NULL if none */
- struct sk_buff *skb_head;
- struct sk_buff *skb_tail;
+ struct gve_rx_ctx ctx; /* Info for packet currently being processed in this ring. */
};
/* A TX desc ring entry */
@@ -224,11 +239,6 @@ struct gve_tx_iovec {
u32 iov_padding; /* padding associated with this segment */
};
-struct gve_tx_dma_buf {
- DEFINE_DMA_UNMAP_ADDR(dma);
- DEFINE_DMA_UNMAP_LEN(len);
-};
-
/* Tracks the memory in the fifo occupied by the skb. Mapped 1:1 to a desc
* ring entry but only used for a pkt_desc not a seg_desc
*/
@@ -236,7 +246,10 @@ struct gve_tx_buffer_state {
struct sk_buff *skb; /* skb for this pkt */
union {
struct gve_tx_iovec iov[GVE_TX_MAX_IOVEC]; /* segments of this pkt */
- struct gve_tx_dma_buf buf;
+ struct {
+ DEFINE_DMA_UNMAP_ADDR(dma);
+ DEFINE_DMA_UNMAP_LEN(len);
+ };
};
};
@@ -280,7 +293,8 @@ struct gve_tx_pending_packet_dqo {
* All others correspond to `skb`'s frags and should be unmapped with
* `dma_unmap_page`.
*/
- struct gve_tx_dma_buf bufs[MAX_SKB_FRAGS + 1];
+ DEFINE_DMA_UNMAP_ADDR(dma[MAX_SKB_FRAGS + 1]);
+ DEFINE_DMA_UNMAP_LEN(len[MAX_SKB_FRAGS + 1]);
u16 num_bufs;
/* Linked list index to next element in the list, or -1 if none */
@@ -342,8 +356,8 @@ struct gve_tx_ring {
union {
/* GQI fields */
struct {
- /* NIC tail pointer */
- __be32 last_nic_done;
+ /* Spinlock for when cleanup in progress */
+ spinlock_t clean_lock;
};
/* DQO fields. */
@@ -414,7 +428,9 @@ struct gve_tx_ring {
u32 q_num ____cacheline_aligned; /* queue idx */
u32 stop_queue; /* count of queue stops */
u32 wake_queue; /* count of queue wakes */
+ u32 queue_timeout; /* count of queue timeouts */
u32 ntfy_id; /* notification block index */
+ u32 last_kick_msec; /* Last time the queue was kicked */
dma_addr_t bus; /* dma address of the descr ring */
dma_addr_t q_resources_bus; /* dma address of the queue resources */
dma_addr_t complq_bus_dqo; /* dma address of the dqo.compl_ring */
@@ -822,15 +838,15 @@ netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev);
bool gve_tx_poll(struct gve_notify_block *block, int budget);
int gve_tx_alloc_rings(struct gve_priv *priv);
void gve_tx_free_rings_gqi(struct gve_priv *priv);
-__be32 gve_tx_load_event_counter(struct gve_priv *priv,
- struct gve_tx_ring *tx);
+u32 gve_tx_load_event_counter(struct gve_priv *priv,
+ struct gve_tx_ring *tx);
+bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx);
/* rx handling */
void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx);
-bool gve_rx_poll(struct gve_notify_block *block, int budget);
+int gve_rx_poll(struct gve_notify_block *block, int budget);
+bool gve_rx_work_pending(struct gve_rx_ring *rx);
int gve_rx_alloc_rings(struct gve_priv *priv);
void gve_rx_free_rings_gqi(struct gve_priv *priv);
-bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
- netdev_features_t feat);
/* Reset */
void gve_schedule_reset(struct gve_priv *priv);
int gve_reset(struct gve_priv *priv, bool attempt_teardown);
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c
index f089d33dd48e..83ae56c310d3 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.c
+++ b/drivers/net/ethernet/google/gve/gve_adminq.c
@@ -38,7 +38,8 @@ void gve_parse_device_option(struct gve_priv *priv,
struct gve_device_option *option,
struct gve_device_option_gqi_rda **dev_op_gqi_rda,
struct gve_device_option_gqi_qpl **dev_op_gqi_qpl,
- struct gve_device_option_dqo_rda **dev_op_dqo_rda)
+ struct gve_device_option_dqo_rda **dev_op_dqo_rda,
+ struct gve_device_option_jumbo_frames **dev_op_jumbo_frames)
{
u32 req_feat_mask = be32_to_cpu(option->required_features_mask);
u16 option_length = be16_to_cpu(option->option_length);
@@ -111,6 +112,24 @@ void gve_parse_device_option(struct gve_priv *priv,
}
*dev_op_dqo_rda = (void *)(option + 1);
break;
+ case GVE_DEV_OPT_ID_JUMBO_FRAMES:
+ if (option_length < sizeof(**dev_op_jumbo_frames) ||
+ req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES) {
+ dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
+ "Jumbo Frames",
+ (int)sizeof(**dev_op_jumbo_frames),
+ GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES,
+ option_length, req_feat_mask);
+ break;
+ }
+
+ if (option_length > sizeof(**dev_op_jumbo_frames)) {
+ dev_warn(&priv->pdev->dev,
+ GVE_DEVICE_OPTION_TOO_BIG_FMT,
+ "Jumbo Frames");
+ }
+ *dev_op_jumbo_frames = (void *)(option + 1);
+ break;
default:
/* If we don't recognize the option just continue
* without doing anything.
@@ -126,7 +145,8 @@ gve_process_device_options(struct gve_priv *priv,
struct gve_device_descriptor *descriptor,
struct gve_device_option_gqi_rda **dev_op_gqi_rda,
struct gve_device_option_gqi_qpl **dev_op_gqi_qpl,
- struct gve_device_option_dqo_rda **dev_op_dqo_rda)
+ struct gve_device_option_dqo_rda **dev_op_dqo_rda,
+ struct gve_device_option_jumbo_frames **dev_op_jumbo_frames)
{
const int num_options = be16_to_cpu(descriptor->num_device_options);
struct gve_device_option *dev_opt;
@@ -146,7 +166,7 @@ gve_process_device_options(struct gve_priv *priv,
gve_parse_device_option(priv, descriptor, dev_opt,
dev_op_gqi_rda, dev_op_gqi_qpl,
- dev_op_dqo_rda);
+ dev_op_dqo_rda, dev_op_jumbo_frames);
dev_opt = next_opt;
}
@@ -530,6 +550,7 @@ static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index)
cpu_to_be64(rx->data.data_bus),
cmd.create_rx_queue.index = cpu_to_be32(queue_index);
cmd.create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
+ cmd.create_rx_queue.packet_buffer_size = cpu_to_be16(rx->packet_buffer_size);
} else {
cmd.create_rx_queue.rx_ring_size =
cpu_to_be16(priv->rx_desc_cnt);
@@ -660,12 +681,31 @@ gve_set_desc_cnt_dqo(struct gve_priv *priv,
return 0;
}
+static void gve_enable_supported_features(struct gve_priv *priv,
+ u32 supported_features_mask,
+ const struct gve_device_option_jumbo_frames
+ *dev_op_jumbo_frames)
+{
+ /* Before control reaches this point, the page-size-capped max MTU from
+ * the gve_device_descriptor field has already been stored in
+ * priv->dev->max_mtu. We overwrite it with the true max MTU below.
+ */
+ if (dev_op_jumbo_frames &&
+ (supported_features_mask & GVE_SUP_JUMBO_FRAMES_MASK)) {
+ dev_info(&priv->pdev->dev,
+ "JUMBO FRAMES device option enabled.\n");
+ priv->dev->max_mtu = be16_to_cpu(dev_op_jumbo_frames->max_mtu);
+ }
+}
+
int gve_adminq_describe_device(struct gve_priv *priv)
{
+ struct gve_device_option_jumbo_frames *dev_op_jumbo_frames = NULL;
struct gve_device_option_gqi_rda *dev_op_gqi_rda = NULL;
struct gve_device_option_gqi_qpl *dev_op_gqi_qpl = NULL;
struct gve_device_option_dqo_rda *dev_op_dqo_rda = NULL;
struct gve_device_descriptor *descriptor;
+ u32 supported_features_mask = 0;
union gve_adminq_command cmd;
dma_addr_t descriptor_bus;
int err = 0;
@@ -689,7 +729,8 @@ int gve_adminq_describe_device(struct gve_priv *priv)
goto free_device_descriptor;
err = gve_process_device_options(priv, descriptor, &dev_op_gqi_rda,
- &dev_op_gqi_qpl, &dev_op_dqo_rda);
+ &dev_op_gqi_qpl, &dev_op_dqo_rda,
+ &dev_op_jumbo_frames);
if (err)
goto free_device_descriptor;
@@ -704,12 +745,19 @@ int gve_adminq_describe_device(struct gve_priv *priv)
priv->queue_format = GVE_DQO_RDA_FORMAT;
dev_info(&priv->pdev->dev,
"Driver is running with DQO RDA queue format.\n");
+ supported_features_mask =
+ be32_to_cpu(dev_op_dqo_rda->supported_features_mask);
} else if (dev_op_gqi_rda) {
priv->queue_format = GVE_GQI_RDA_FORMAT;
dev_info(&priv->pdev->dev,
"Driver is running with GQI RDA queue format.\n");
+ supported_features_mask =
+ be32_to_cpu(dev_op_gqi_rda->supported_features_mask);
} else {
priv->queue_format = GVE_GQI_QPL_FORMAT;
+ if (dev_op_gqi_qpl)
+ supported_features_mask =
+ be32_to_cpu(dev_op_gqi_qpl->supported_features_mask);
dev_info(&priv->pdev->dev,
"Driver is running with GQI QPL queue format.\n");
}
@@ -733,7 +781,7 @@ int gve_adminq_describe_device(struct gve_priv *priv)
}
priv->dev->max_mtu = mtu;
priv->num_event_counters = be16_to_cpu(descriptor->counters);
- ether_addr_copy(priv->dev->dev_addr, descriptor->mac);
+ eth_hw_addr_set(priv->dev, descriptor->mac);
mac = descriptor->mac;
dev_info(&priv->pdev->dev, "MAC addr: %pM\n", mac);
priv->tx_pages_per_qpl = be16_to_cpu(descriptor->tx_pages_per_qpl);
@@ -746,6 +794,9 @@ int gve_adminq_describe_device(struct gve_priv *priv)
}
priv->default_num_queues = be16_to_cpu(descriptor->default_num_queues);
+ gve_enable_supported_features(priv, supported_features_mask,
+ dev_op_jumbo_frames);
+
free_device_descriptor:
dma_free_coherent(&priv->pdev->dev, PAGE_SIZE, descriptor,
descriptor_bus);
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.h b/drivers/net/ethernet/google/gve/gve_adminq.h
index 47c3d8f313fc..83c0b40cd2d9 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.h
+++ b/drivers/net/ethernet/google/gve/gve_adminq.h
@@ -108,6 +108,14 @@ struct gve_device_option_dqo_rda {
static_assert(sizeof(struct gve_device_option_dqo_rda) == 8);
+struct gve_device_option_jumbo_frames {
+ __be32 supported_features_mask;
+ __be16 max_mtu;
+ u8 padding[2];
+};
+
+static_assert(sizeof(struct gve_device_option_jumbo_frames) == 8);
+
/* Terminology:
*
* RDA - Raw DMA Addressing - Buffers associated with SKBs are directly DMA
@@ -121,6 +129,7 @@ enum gve_dev_opt_id {
GVE_DEV_OPT_ID_GQI_RDA = 0x2,
GVE_DEV_OPT_ID_GQI_QPL = 0x3,
GVE_DEV_OPT_ID_DQO_RDA = 0x4,
+ GVE_DEV_OPT_ID_JUMBO_FRAMES = 0x8,
};
enum gve_dev_opt_req_feat_mask {
@@ -128,6 +137,11 @@ enum gve_dev_opt_req_feat_mask {
GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RDA = 0x0,
GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL = 0x0,
GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA = 0x0,
+ GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES = 0x0,
+};
+
+enum gve_sup_feature_mask {
+ GVE_SUP_JUMBO_FRAMES_MASK = 1 << 2,
};
#define GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING 0x0
@@ -270,6 +284,7 @@ enum gve_stat_names {
TX_LAST_COMPLETION_PROCESSED = 5,
RX_NEXT_EXPECTED_SEQUENCE = 6,
RX_BUFFERS_POSTED = 7,
+ TX_TIMEOUT_CNT = 8,
// stats from NIC
RX_QUEUE_DROP_CNT = 65,
RX_NO_BUFFERS_POSTED = 66,
diff --git a/drivers/net/ethernet/google/gve/gve_desc.h b/drivers/net/ethernet/google/gve/gve_desc.h
index 05ae6300e984..4d225a18d8ce 100644
--- a/drivers/net/ethernet/google/gve/gve_desc.h
+++ b/drivers/net/ethernet/google/gve/gve_desc.h
@@ -90,12 +90,13 @@ union gve_rx_data_slot {
/* GVE Recive Packet Descriptor Flags */
#define GVE_RXFLG(x) cpu_to_be16(1 << (3 + (x)))
-#define GVE_RXF_FRAG GVE_RXFLG(3) /* IP Fragment */
-#define GVE_RXF_IPV4 GVE_RXFLG(4) /* IPv4 */
-#define GVE_RXF_IPV6 GVE_RXFLG(5) /* IPv6 */
-#define GVE_RXF_TCP GVE_RXFLG(6) /* TCP Packet */
-#define GVE_RXF_UDP GVE_RXFLG(7) /* UDP Packet */
-#define GVE_RXF_ERR GVE_RXFLG(8) /* Packet Error Detected */
+#define GVE_RXF_FRAG GVE_RXFLG(3) /* IP Fragment */
+#define GVE_RXF_IPV4 GVE_RXFLG(4) /* IPv4 */
+#define GVE_RXF_IPV6 GVE_RXFLG(5) /* IPv6 */
+#define GVE_RXF_TCP GVE_RXFLG(6) /* TCP Packet */
+#define GVE_RXF_UDP GVE_RXFLG(7) /* UDP Packet */
+#define GVE_RXF_ERR GVE_RXFLG(8) /* Packet Error Detected */
+#define GVE_RXF_PKT_CONT GVE_RXFLG(10) /* Multi Fragment RX packet */
/* GVE IRQ */
#define GVE_IRQ_ACK BIT(31)
diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c
index 716e6240305d..c8df47a97fa4 100644
--- a/drivers/net/ethernet/google/gve/gve_ethtool.c
+++ b/drivers/net/ethernet/google/gve/gve_ethtool.c
@@ -43,6 +43,7 @@ static const char gve_gstrings_main_stats[][ETH_GSTRING_LEN] = {
static const char gve_gstrings_rx_stats[][ETH_GSTRING_LEN] = {
"rx_posted_desc[%u]", "rx_completed_desc[%u]", "rx_bytes[%u]",
+ "rx_cont_packet_cnt[%u]", "rx_frag_flip_cnt[%u]", "rx_frag_copy_cnt[%u]",
"rx_dropped_pkt[%u]", "rx_copybreak_pkt[%u]", "rx_copied_pkt[%u]",
"rx_queue_drop_cnt[%u]", "rx_no_buffers_posted[%u]",
"rx_drops_packet_over_mru[%u]", "rx_drops_invalid_checksum[%u]",
@@ -265,6 +266,9 @@ gve_get_ethtool_stats(struct net_device *netdev,
} while (u64_stats_fetch_retry(&priv->rx[ring].statss,
start));
data[i++] = tmp_rx_bytes;
+ data[i++] = rx->rx_cont_packet_cnt;
+ data[i++] = rx->rx_frag_flip_cnt;
+ data[i++] = rx->rx_frag_copy_cnt;
/* rx dropped packets */
data[i++] = tmp_rx_skb_alloc_fail +
tmp_rx_buf_alloc_fail +
@@ -330,8 +334,7 @@ gve_get_ethtool_stats(struct net_device *netdev,
data[i++] = tmp_tx_bytes;
data[i++] = tx->wake_queue;
data[i++] = tx->stop_queue;
- data[i++] = be32_to_cpu(gve_tx_load_event_counter(priv,
- tx));
+ data[i++] = gve_tx_load_event_counter(priv, tx);
data[i++] = tx->dma_mapping_error;
/* stats from NIC */
if (skip_nic_stats) {
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index bf8a4a7c43f7..6b02ef432eda 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -24,6 +24,9 @@
#define GVE_VERSION "1.0.0"
#define GVE_VERSION_PREFIX "GVE-"
+// Minimum amount of time between queue kicks in msec (10 seconds)
+#define MIN_TX_TIMEOUT_GAP (1000 * 10)
+
const char gve_version_str[] = GVE_VERSION;
static const char gve_version_prefix[] = GVE_VERSION_PREFIX;
@@ -192,34 +195,40 @@ static int gve_napi_poll(struct napi_struct *napi, int budget)
__be32 __iomem *irq_doorbell;
bool reschedule = false;
struct gve_priv *priv;
+ int work_done = 0;
block = container_of(napi, struct gve_notify_block, napi);
priv = block->priv;
if (block->tx)
reschedule |= gve_tx_poll(block, budget);
- if (block->rx)
- reschedule |= gve_rx_poll(block, budget);
+ if (block->rx) {
+ work_done = gve_rx_poll(block, budget);
+ reschedule |= work_done == budget;
+ }
if (reschedule)
return budget;
- napi_complete(napi);
- irq_doorbell = gve_irq_doorbell(priv, block);
- iowrite32be(GVE_IRQ_ACK | GVE_IRQ_EVENT, irq_doorbell);
+ /* Complete processing - don't unmask irq if busy polling is enabled */
+ if (likely(napi_complete_done(napi, work_done))) {
+ irq_doorbell = gve_irq_doorbell(priv, block);
+ iowrite32be(GVE_IRQ_ACK | GVE_IRQ_EVENT, irq_doorbell);
- /* Double check we have no extra work.
- * Ensure unmask synchronizes with checking for work.
- */
- mb();
- if (block->tx)
- reschedule |= gve_tx_poll(block, -1);
- if (block->rx)
- reschedule |= gve_rx_poll(block, -1);
- if (reschedule && napi_reschedule(napi))
- iowrite32be(GVE_IRQ_MASK, irq_doorbell);
+ /* Ensure IRQ ACK is visible before we check pending work.
+ * If queue had issued updates, it would be truly visible.
+ */
+ mb();
- return 0;
+ if (block->tx)
+ reschedule |= gve_tx_clean_pending(priv, block->tx);
+ if (block->rx)
+ reschedule |= gve_rx_work_pending(block->rx);
+
+ if (reschedule && napi_reschedule(napi))
+ iowrite32be(GVE_IRQ_MASK, irq_doorbell);
+ }
+ return work_done;
}
static int gve_napi_poll_dqo(struct napi_struct *napi, int budget)
@@ -279,7 +288,7 @@ static int gve_alloc_notify_blocks(struct gve_priv *priv)
int i, j;
int err;
- priv->msix_vectors = kvzalloc(num_vecs_requested *
+ priv->msix_vectors = kvcalloc(num_vecs_requested,
sizeof(*priv->msix_vectors), GFP_KERNEL);
if (!priv->msix_vectors)
return -ENOMEM;
@@ -640,7 +649,7 @@ static int gve_alloc_rings(struct gve_priv *priv)
int err;
/* Setup tx rings */
- priv->tx = kvzalloc(priv->tx_cfg.num_queues * sizeof(*priv->tx),
+ priv->tx = kvcalloc(priv->tx_cfg.num_queues, sizeof(*priv->tx),
GFP_KERNEL);
if (!priv->tx)
return -ENOMEM;
@@ -653,7 +662,7 @@ static int gve_alloc_rings(struct gve_priv *priv)
goto free_tx;
/* Setup rx rings */
- priv->rx = kvzalloc(priv->rx_cfg.num_queues * sizeof(*priv->rx),
+ priv->rx = kvcalloc(priv->rx_cfg.num_queues, sizeof(*priv->rx),
GFP_KERNEL);
if (!priv->rx) {
err = -ENOMEM;
@@ -776,12 +785,11 @@ static int gve_alloc_queue_page_list(struct gve_priv *priv, u32 id,
qpl->id = id;
qpl->num_entries = 0;
- qpl->pages = kvzalloc(pages * sizeof(*qpl->pages), GFP_KERNEL);
+ qpl->pages = kvcalloc(pages, sizeof(*qpl->pages), GFP_KERNEL);
/* caller handles clean up */
if (!qpl->pages)
return -ENOMEM;
- qpl->page_buses = kvzalloc(pages * sizeof(*qpl->page_buses),
- GFP_KERNEL);
+ qpl->page_buses = kvcalloc(pages, sizeof(*qpl->page_buses), GFP_KERNEL);
/* caller handles clean up */
if (!qpl->page_buses)
return -ENOMEM;
@@ -840,7 +848,7 @@ static int gve_alloc_qpls(struct gve_priv *priv)
if (priv->queue_format == GVE_GQI_RDA_FORMAT)
return 0;
- priv->qpls = kvzalloc(num_qpls * sizeof(*priv->qpls), GFP_KERNEL);
+ priv->qpls = kvcalloc(num_qpls, sizeof(*priv->qpls), GFP_KERNEL);
if (!priv->qpls)
return -ENOMEM;
@@ -859,7 +867,7 @@ static int gve_alloc_qpls(struct gve_priv *priv)
priv->qpl_cfg.qpl_map_size = BITS_TO_LONGS(num_qpls) *
sizeof(unsigned long) * BITS_PER_BYTE;
- priv->qpl_cfg.qpl_id_map = kvzalloc(BITS_TO_LONGS(num_qpls) *
+ priv->qpl_cfg.qpl_id_map = kvcalloc(BITS_TO_LONGS(num_qpls),
sizeof(unsigned long), GFP_KERNEL);
if (!priv->qpl_cfg.qpl_id_map) {
err = -ENOMEM;
@@ -1116,9 +1124,47 @@ static void gve_turnup(struct gve_priv *priv)
static void gve_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
- struct gve_priv *priv = netdev_priv(dev);
+ struct gve_notify_block *block;
+ struct gve_tx_ring *tx = NULL;
+ struct gve_priv *priv;
+ u32 last_nic_done;
+ u32 current_time;
+ u32 ntfy_idx;
+ netdev_info(dev, "Timeout on tx queue, %d", txqueue);
+ priv = netdev_priv(dev);
+ if (txqueue > priv->tx_cfg.num_queues)
+ goto reset;
+
+ ntfy_idx = gve_tx_idx_to_ntfy(priv, txqueue);
+ if (ntfy_idx > priv->num_ntfy_blks)
+ goto reset;
+
+ block = &priv->ntfy_blocks[ntfy_idx];
+ tx = block->tx;
+
+ current_time = jiffies_to_msecs(jiffies);
+ if (tx->last_kick_msec + MIN_TX_TIMEOUT_GAP > current_time)
+ goto reset;
+
+ /* Check to see if there are missed completions, which will allow us to
+ * kick the queue.
+ */
+ last_nic_done = gve_tx_load_event_counter(priv, tx);
+ if (last_nic_done - tx->done) {
+ netdev_info(dev, "Kicking queue %d", txqueue);
+ iowrite32be(GVE_IRQ_MASK, gve_irq_doorbell(priv, block));
+ napi_schedule(&block->napi);
+ tx->last_kick_msec = current_time;
+ goto out;
+ } // Else reset.
+
+reset:
gve_schedule_reset(priv);
+
+out:
+ if (tx)
+ tx->queue_timeout++;
priv->tx_timeo_cnt++;
}
@@ -1247,6 +1293,11 @@ void gve_handle_report_stats(struct gve_priv *priv)
.value = cpu_to_be64(last_completion),
.queue_id = cpu_to_be32(idx),
};
+ stats[stats_idx++] = (struct stats) {
+ .stat_name = cpu_to_be32(TX_TIMEOUT_CNT),
+ .value = cpu_to_be64(priv->tx[idx].queue_timeout),
+ .queue_id = cpu_to_be32(idx),
+ };
}
}
/* rx stats */
@@ -1320,14 +1371,6 @@ static int gve_init_priv(struct gve_priv *priv, bool skip_describe_device)
"Could not get device information: err=%d\n", err);
goto err;
}
- if (gve_is_gqi(priv) && priv->dev->max_mtu > PAGE_SIZE) {
- priv->dev->max_mtu = PAGE_SIZE;
- err = gve_adminq_set_mtu(priv, priv->dev->mtu);
- if (err) {
- dev_err(&priv->pdev->dev, "Could not set mtu");
- goto err;
- }
- }
priv->dev->mtu = priv->dev->max_mtu;
num_ntfy = pci_msix_vec_count(priv->pdev);
if (num_ntfy <= 0) {
diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c
index 94941d4e4744..c8500babbd1d 100644
--- a/drivers/net/ethernet/google/gve/gve_rx.c
+++ b/drivers/net/ethernet/google/gve/gve_rx.c
@@ -16,19 +16,23 @@ static void gve_rx_free_buffer(struct device *dev,
dma_addr_t dma = (dma_addr_t)(be64_to_cpu(data_slot->addr) &
GVE_DATA_SLOT_ADDR_PAGE_MASK);
+ page_ref_sub(page_info->page, page_info->pagecnt_bias - 1);
gve_free_page(dev, page_info->page, dma, DMA_FROM_DEVICE);
}
static void gve_rx_unfill_pages(struct gve_priv *priv, struct gve_rx_ring *rx)
{
- if (rx->data.raw_addressing) {
- u32 slots = rx->mask + 1;
- int i;
+ u32 slots = rx->mask + 1;
+ int i;
+ if (rx->data.raw_addressing) {
for (i = 0; i < slots; i++)
gve_rx_free_buffer(&priv->pdev->dev, &rx->data.page_info[i],
&rx->data.data_ring[i]);
} else {
+ for (i = 0; i < slots; i++)
+ page_ref_sub(rx->data.page_info[i].page,
+ rx->data.page_info[i].pagecnt_bias - 1);
gve_unassign_qpl(priv, rx->data.qpl->id);
rx->data.qpl = NULL;
}
@@ -69,6 +73,9 @@ static void gve_setup_rx_buffer(struct gve_rx_slot_page_info *page_info,
page_info->page_offset = 0;
page_info->page_address = page_address(page);
*slot_addr = cpu_to_be64(addr);
+ /* The page already has 1 ref */
+ page_ref_add(page, INT_MAX - 1);
+ page_info->pagecnt_bias = INT_MAX;
}
static int gve_rx_alloc_buffer(struct gve_priv *priv, struct device *dev,
@@ -136,6 +143,16 @@ alloc_err:
return err;
}
+static void gve_rx_ctx_clear(struct gve_rx_ctx *ctx)
+{
+ ctx->curr_frag_cnt = 0;
+ ctx->total_expected_size = 0;
+ ctx->expected_frag_cnt = 0;
+ ctx->skb_head = NULL;
+ ctx->skb_tail = NULL;
+ ctx->reuse_frags = false;
+}
+
static int gve_rx_alloc_ring(struct gve_priv *priv, int idx)
{
struct gve_rx_ring *rx = &priv->rx[idx];
@@ -202,6 +219,12 @@ static int gve_rx_alloc_ring(struct gve_priv *priv, int idx)
rx->cnt = 0;
rx->db_threshold = priv->rx_desc_cnt / 2;
rx->desc.seqno = 1;
+
+ /* Allocating half-page buffers allows page-flipping which is faster
+ * than copying or allocating new pages.
+ */
+ rx->packet_buffer_size = PAGE_SIZE / 2;
+ gve_rx_ctx_clear(&rx->ctx);
gve_rx_add_to_block(priv, idx);
return 0;
@@ -268,18 +291,28 @@ static enum pkt_hash_types gve_rss_type(__be16 pkt_flags)
return PKT_HASH_TYPE_L2;
}
+static u16 gve_rx_ctx_padding(struct gve_rx_ctx *ctx)
+{
+ return (ctx->curr_frag_cnt == 0) ? GVE_RX_PAD : 0;
+}
+
static struct sk_buff *gve_rx_add_frags(struct napi_struct *napi,
struct gve_rx_slot_page_info *page_info,
- u16 len)
+ u16 packet_buffer_size, u16 len,
+ struct gve_rx_ctx *ctx)
{
- struct sk_buff *skb = napi_get_frags(napi);
+ u32 offset = page_info->page_offset + gve_rx_ctx_padding(ctx);
+ struct sk_buff *skb;
- if (unlikely(!skb))
+ if (!ctx->skb_head)
+ ctx->skb_head = napi_get_frags(napi);
+
+ if (unlikely(!ctx->skb_head))
return NULL;
- skb_add_rx_frag(skb, 0, page_info->page,
- page_info->page_offset +
- GVE_RX_PAD, len, PAGE_SIZE / 2);
+ skb = ctx->skb_head;
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page_info->page,
+ offset, len, packet_buffer_size);
return skb;
}
@@ -293,23 +326,18 @@ static void gve_rx_flip_buff(struct gve_rx_slot_page_info *page_info, __be64 *sl
*(slot_addr) ^= offset;
}
-static bool gve_rx_can_flip_buffers(struct net_device *netdev)
-{
- return PAGE_SIZE == 4096
- ? netdev->mtu + GVE_RX_PAD + ETH_HLEN <= PAGE_SIZE / 2 : false;
-}
-
-static int gve_rx_can_recycle_buffer(struct page *page)
+static int gve_rx_can_recycle_buffer(struct gve_rx_slot_page_info *page_info)
{
- int pagecount = page_count(page);
+ int pagecount = page_count(page_info->page);
/* This page is not being used by any SKBs - reuse */
- if (pagecount == 1)
+ if (pagecount == page_info->pagecnt_bias)
return 1;
/* This page is still being used by an SKB - we can't reuse */
- else if (pagecount >= 2)
+ else if (pagecount > page_info->pagecnt_bias)
return 0;
- WARN(pagecount < 1, "Pagecount should never be < 1");
+ WARN(pagecount < page_info->pagecnt_bias,
+ "Pagecount should never be less than the bias.");
return -1;
}
@@ -317,19 +345,19 @@ static struct sk_buff *
gve_rx_raw_addressing(struct device *dev, struct net_device *netdev,
struct gve_rx_slot_page_info *page_info, u16 len,
struct napi_struct *napi,
- union gve_rx_data_slot *data_slot)
+ union gve_rx_data_slot *data_slot,
+ u16 packet_buffer_size, struct gve_rx_ctx *ctx)
{
- struct sk_buff *skb;
+ struct sk_buff *skb = gve_rx_add_frags(napi, page_info, packet_buffer_size, len, ctx);
- skb = gve_rx_add_frags(napi, page_info, len);
if (!skb)
return NULL;
- /* Optimistically stop the kernel from freeing the page by increasing
- * the page bias. We will check the refcount in refill to determine if
- * we need to alloc a new page.
+ /* Optimistically stop the kernel from freeing the page.
+ * We will check again in refill to determine if we need to alloc a
+ * new page.
*/
- get_page(page_info->page);
+ gve_dec_pagecnt_bias(page_info);
return skb;
}
@@ -340,6 +368,7 @@ gve_rx_qpl(struct device *dev, struct net_device *netdev,
u16 len, struct napi_struct *napi,
union gve_rx_data_slot *data_slot)
{
+ struct gve_rx_ctx *ctx = &rx->ctx;
struct sk_buff *skb;
/* if raw_addressing mode is not enabled gvnic can only receive into
@@ -347,116 +376,259 @@ gve_rx_qpl(struct device *dev, struct net_device *netdev,
* choice is to copy the data out of it so that we can return it to the
* device.
*/
- if (page_info->can_flip) {
- skb = gve_rx_add_frags(napi, page_info, len);
+ if (ctx->reuse_frags) {
+ skb = gve_rx_add_frags(napi, page_info, rx->packet_buffer_size, len, ctx);
/* No point in recycling if we didn't get the skb */
if (skb) {
/* Make sure that the page isn't freed. */
- get_page(page_info->page);
+ gve_dec_pagecnt_bias(page_info);
gve_rx_flip_buff(page_info, &data_slot->qpl_offset);
}
} else {
- skb = gve_rx_copy(netdev, napi, page_info, len, GVE_RX_PAD);
+ const u16 padding = gve_rx_ctx_padding(ctx);
+
+ skb = gve_rx_copy(netdev, napi, page_info, len, padding, ctx);
if (skb) {
u64_stats_update_begin(&rx->statss);
- rx->rx_copied_pkt++;
+ rx->rx_frag_copy_cnt++;
u64_stats_update_end(&rx->statss);
}
}
return skb;
}
-static bool gve_rx(struct gve_rx_ring *rx, struct gve_rx_desc *rx_desc,
- netdev_features_t feat, u32 idx)
+#define GVE_PKTCONT_BIT_IS_SET(x) (GVE_RXF_PKT_CONT & (x))
+static u16 gve_rx_get_fragment_size(struct gve_rx_ctx *ctx, struct gve_rx_desc *desc)
{
+ return be16_to_cpu(desc->len) - gve_rx_ctx_padding(ctx);
+}
+
+static bool gve_rx_ctx_init(struct gve_rx_ctx *ctx, struct gve_rx_ring *rx)
+{
+ bool qpl_mode = !rx->data.raw_addressing, packet_size_error = false;
+ bool buffer_error = false, desc_error = false, seqno_error = false;
struct gve_rx_slot_page_info *page_info;
struct gve_priv *priv = rx->gve;
- struct napi_struct *napi = &priv->ntfy_blocks[rx->ntfy_id].napi;
- struct net_device *dev = priv->dev;
- union gve_rx_data_slot *data_slot;
- struct sk_buff *skb = NULL;
- dma_addr_t page_bus;
- u16 len;
+ u32 idx = rx->cnt & rx->mask;
+ bool reuse_frags, can_flip;
+ struct gve_rx_desc *desc;
+ u16 packet_size = 0;
+ u16 n_frags = 0;
+ int recycle;
+
+ /** In QPL mode, we only flip buffers when all buffers containing the packet
+ * can be flipped. RDA can_flip decisions will be made later, per frag.
+ */
+ can_flip = qpl_mode;
+ reuse_frags = can_flip;
+ do {
+ u16 frag_size;
+
+ n_frags++;
+ desc = &rx->desc.desc_ring[idx];
+ desc_error = unlikely(desc->flags_seq & GVE_RXF_ERR) || desc_error;
+ if (GVE_SEQNO(desc->flags_seq) != rx->desc.seqno) {
+ seqno_error = true;
+ netdev_warn(priv->dev,
+ "RX seqno error: want=%d, got=%d, dropping packet and scheduling reset.",
+ rx->desc.seqno, GVE_SEQNO(desc->flags_seq));
+ }
+ frag_size = be16_to_cpu(desc->len);
+ packet_size += frag_size;
+ if (frag_size > rx->packet_buffer_size) {
+ packet_size_error = true;
+ netdev_warn(priv->dev,
+ "RX fragment error: packet_buffer_size=%d, frag_size=%d, droping packet.",
+ rx->packet_buffer_size, be16_to_cpu(desc->len));
+ }
+ page_info = &rx->data.page_info[idx];
+ if (can_flip) {
+ recycle = gve_rx_can_recycle_buffer(page_info);
+ reuse_frags = reuse_frags && recycle > 0;
+ buffer_error = buffer_error || unlikely(recycle < 0);
+ }
+ idx = (idx + 1) & rx->mask;
+ rx->desc.seqno = gve_next_seqno(rx->desc.seqno);
+ } while (GVE_PKTCONT_BIT_IS_SET(desc->flags_seq));
+
+ prefetch(rx->desc.desc_ring + idx);
+
+ ctx->curr_frag_cnt = 0;
+ ctx->total_expected_size = packet_size - GVE_RX_PAD;
+ ctx->expected_frag_cnt = n_frags;
+ ctx->skb_head = NULL;
+ ctx->reuse_frags = reuse_frags;
- /* drop this packet */
- if (unlikely(rx_desc->flags_seq & GVE_RXF_ERR)) {
+ if (ctx->expected_frag_cnt > 1) {
u64_stats_update_begin(&rx->statss);
- rx->rx_desc_err_dropped_pkt++;
+ rx->rx_cont_packet_cnt++;
u64_stats_update_end(&rx->statss);
+ }
+ if (ctx->total_expected_size > priv->rx_copybreak && !ctx->reuse_frags && qpl_mode) {
+ u64_stats_update_begin(&rx->statss);
+ rx->rx_copied_pkt++;
+ u64_stats_update_end(&rx->statss);
+ }
+
+ if (unlikely(buffer_error || seqno_error || packet_size_error)) {
+ gve_schedule_reset(priv);
return false;
}
- len = be16_to_cpu(rx_desc->len) - GVE_RX_PAD;
- page_info = &rx->data.page_info[idx];
+ if (unlikely(desc_error)) {
+ u64_stats_update_begin(&rx->statss);
+ rx->rx_desc_err_dropped_pkt++;
+ u64_stats_update_end(&rx->statss);
+ return false;
+ }
+ return true;
+}
- data_slot = &rx->data.data_ring[idx];
- page_bus = (rx->data.raw_addressing) ?
- be64_to_cpu(data_slot->addr) & GVE_DATA_SLOT_ADDR_PAGE_MASK :
- rx->data.qpl->page_buses[idx];
- dma_sync_single_for_cpu(&priv->pdev->dev, page_bus,
- PAGE_SIZE, DMA_FROM_DEVICE);
+static struct sk_buff *gve_rx_skb(struct gve_priv *priv, struct gve_rx_ring *rx,
+ struct gve_rx_slot_page_info *page_info, struct napi_struct *napi,
+ u16 len, union gve_rx_data_slot *data_slot)
+{
+ struct net_device *netdev = priv->dev;
+ struct gve_rx_ctx *ctx = &rx->ctx;
+ struct sk_buff *skb = NULL;
- if (len <= priv->rx_copybreak) {
+ if (len <= priv->rx_copybreak && ctx->expected_frag_cnt == 1) {
/* Just copy small packets */
- skb = gve_rx_copy(dev, napi, page_info, len, GVE_RX_PAD);
- u64_stats_update_begin(&rx->statss);
- rx->rx_copied_pkt++;
- rx->rx_copybreak_pkt++;
- u64_stats_update_end(&rx->statss);
+ skb = gve_rx_copy(netdev, napi, page_info, len, GVE_RX_PAD, ctx);
+ if (skb) {
+ u64_stats_update_begin(&rx->statss);
+ rx->rx_copied_pkt++;
+ rx->rx_frag_copy_cnt++;
+ rx->rx_copybreak_pkt++;
+ } u64_stats_update_end(&rx->statss);
} else {
- u8 can_flip = gve_rx_can_flip_buffers(dev);
- int recycle = 0;
+ if (rx->data.raw_addressing) {
+ int recycle = gve_rx_can_recycle_buffer(page_info);
- if (can_flip) {
- recycle = gve_rx_can_recycle_buffer(page_info->page);
- if (recycle < 0) {
- if (!rx->data.raw_addressing)
- gve_schedule_reset(priv);
- return false;
+ if (unlikely(recycle < 0)) {
+ gve_schedule_reset(priv);
+ return NULL;
}
- }
-
- page_info->can_flip = can_flip && recycle;
- if (rx->data.raw_addressing) {
- skb = gve_rx_raw_addressing(&priv->pdev->dev, dev,
+ page_info->can_flip = recycle;
+ if (page_info->can_flip) {
+ u64_stats_update_begin(&rx->statss);
+ rx->rx_frag_flip_cnt++;
+ u64_stats_update_end(&rx->statss);
+ }
+ skb = gve_rx_raw_addressing(&priv->pdev->dev, netdev,
page_info, len, napi,
- data_slot);
+ data_slot,
+ rx->packet_buffer_size, ctx);
} else {
- skb = gve_rx_qpl(&priv->pdev->dev, dev, rx,
+ if (ctx->reuse_frags) {
+ u64_stats_update_begin(&rx->statss);
+ rx->rx_frag_flip_cnt++;
+ u64_stats_update_end(&rx->statss);
+ }
+ skb = gve_rx_qpl(&priv->pdev->dev, netdev, rx,
page_info, len, napi, data_slot);
}
}
+ return skb;
+}
- if (!skb) {
- u64_stats_update_begin(&rx->statss);
- rx->rx_skb_alloc_fail++;
- u64_stats_update_end(&rx->statss);
- return false;
+static bool gve_rx(struct gve_rx_ring *rx, netdev_features_t feat,
+ u64 *packet_size_bytes, u32 *work_done)
+{
+ struct gve_rx_slot_page_info *page_info;
+ struct gve_rx_ctx *ctx = &rx->ctx;
+ union gve_rx_data_slot *data_slot;
+ struct gve_priv *priv = rx->gve;
+ struct gve_rx_desc *first_desc;
+ struct sk_buff *skb = NULL;
+ struct gve_rx_desc *desc;
+ struct napi_struct *napi;
+ dma_addr_t page_bus;
+ u32 work_cnt = 0;
+ void *va;
+ u32 idx;
+ u16 len;
+
+ idx = rx->cnt & rx->mask;
+ first_desc = &rx->desc.desc_ring[idx];
+ desc = first_desc;
+ napi = &priv->ntfy_blocks[rx->ntfy_id].napi;
+
+ if (unlikely(!gve_rx_ctx_init(ctx, rx)))
+ goto skb_alloc_fail;
+
+ while (ctx->curr_frag_cnt < ctx->expected_frag_cnt) {
+ /* Prefetch two packet buffers ahead, we will need it soon. */
+ page_info = &rx->data.page_info[(idx + 2) & rx->mask];
+ va = page_info->page_address + page_info->page_offset;
+
+ prefetch(page_info->page); /* Kernel page struct. */
+ prefetch(va); /* Packet header. */
+ prefetch(va + 64); /* Next cacheline too. */
+
+ len = gve_rx_get_fragment_size(ctx, desc);
+
+ page_info = &rx->data.page_info[idx];
+ data_slot = &rx->data.data_ring[idx];
+ page_bus = rx->data.raw_addressing ?
+ be64_to_cpu(data_slot->addr) - page_info->page_offset :
+ rx->data.qpl->page_buses[idx];
+ dma_sync_single_for_cpu(&priv->pdev->dev, page_bus, PAGE_SIZE, DMA_FROM_DEVICE);
+
+ skb = gve_rx_skb(priv, rx, page_info, napi, len, data_slot);
+ if (!skb) {
+ u64_stats_update_begin(&rx->statss);
+ rx->rx_skb_alloc_fail++;
+ u64_stats_update_end(&rx->statss);
+ goto skb_alloc_fail;
+ }
+
+ ctx->curr_frag_cnt++;
+ rx->cnt++;
+ idx = rx->cnt & rx->mask;
+ work_cnt++;
+ desc = &rx->desc.desc_ring[idx];
}
if (likely(feat & NETIF_F_RXCSUM)) {
/* NIC passes up the partial sum */
- if (rx_desc->csum)
+ if (first_desc->csum)
skb->ip_summed = CHECKSUM_COMPLETE;
else
skb->ip_summed = CHECKSUM_NONE;
- skb->csum = csum_unfold(rx_desc->csum);
+ skb->csum = csum_unfold(first_desc->csum);
}
/* parse flags & pass relevant info up */
if (likely(feat & NETIF_F_RXHASH) &&
- gve_needs_rss(rx_desc->flags_seq))
- skb_set_hash(skb, be32_to_cpu(rx_desc->rss_hash),
- gve_rss_type(rx_desc->flags_seq));
+ gve_needs_rss(first_desc->flags_seq))
+ skb_set_hash(skb, be32_to_cpu(first_desc->rss_hash),
+ gve_rss_type(first_desc->flags_seq));
+ *packet_size_bytes = skb->len + (skb->protocol ? ETH_HLEN : 0);
+ *work_done = work_cnt;
if (skb_is_nonlinear(skb))
napi_gro_frags(napi);
else
napi_gro_receive(napi, skb);
+
+ gve_rx_ctx_clear(ctx);
return true;
+
+skb_alloc_fail:
+ if (napi->skb)
+ napi_free_frags(napi);
+ *packet_size_bytes = 0;
+ *work_done = ctx->expected_frag_cnt;
+ while (ctx->curr_frag_cnt < ctx->expected_frag_cnt) {
+ rx->cnt++;
+ ctx->curr_frag_cnt++;
+ }
+ gve_rx_ctx_clear(ctx);
+ return false;
}
-static bool gve_rx_work_pending(struct gve_rx_ring *rx)
+bool gve_rx_work_pending(struct gve_rx_ring *rx)
{
struct gve_rx_desc *desc;
__be16 flags_seq;
@@ -499,7 +671,7 @@ static bool gve_rx_refill_buffers(struct gve_priv *priv, struct gve_rx_ring *rx)
* owns half the page it is impossible to tell which half. Either
* the whole page is free or it needs to be replaced.
*/
- int recycle = gve_rx_can_recycle_buffer(page_info->page);
+ int recycle = gve_rx_can_recycle_buffer(page_info);
if (recycle < 0) {
if (!rx->data.raw_addressing)
@@ -511,11 +683,15 @@ static bool gve_rx_refill_buffers(struct gve_priv *priv, struct gve_rx_ring *rx)
union gve_rx_data_slot *data_slot =
&rx->data.data_ring[idx];
struct device *dev = &priv->pdev->dev;
-
gve_rx_free_buffer(dev, page_info, data_slot);
page_info->page = NULL;
- if (gve_rx_alloc_buffer(priv, dev, page_info, data_slot))
+ if (gve_rx_alloc_buffer(priv, dev, page_info,
+ data_slot)) {
+ u64_stats_update_begin(&rx->statss);
+ rx->rx_buf_alloc_fail++;
+ u64_stats_update_end(&rx->statss);
break;
+ }
}
}
fill_cnt++;
@@ -524,19 +700,20 @@ static bool gve_rx_refill_buffers(struct gve_priv *priv, struct gve_rx_ring *rx)
return true;
}
-bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
- netdev_features_t feat)
+static int gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
+ netdev_features_t feat)
{
+ u32 work_done = 0, total_packet_cnt = 0, ok_packet_cnt = 0;
struct gve_priv *priv = rx->gve;
- u32 work_done = 0, packets = 0;
+ u32 idx = rx->cnt & rx->mask;
struct gve_rx_desc *desc;
- u32 cnt = rx->cnt;
- u32 idx = cnt & rx->mask;
u64 bytes = 0;
- desc = rx->desc.desc_ring + idx;
+ desc = &rx->desc.desc_ring[idx];
while ((GVE_SEQNO(desc->flags_seq) == rx->desc.seqno) &&
work_done < budget) {
+ u64 packet_size_bytes = 0;
+ u32 work_cnt = 0;
bool dropped;
netif_info(priv, rx_status, priv->dev,
@@ -546,56 +723,57 @@ bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
"[%d] seqno=%d rx->desc.seqno=%d\n",
rx->q_num, GVE_SEQNO(desc->flags_seq),
rx->desc.seqno);
- dropped = !gve_rx(rx, desc, feat, idx);
+
+ dropped = !gve_rx(rx, feat, &packet_size_bytes, &work_cnt);
if (!dropped) {
- bytes += be16_to_cpu(desc->len) - GVE_RX_PAD;
- packets++;
+ bytes += packet_size_bytes;
+ ok_packet_cnt++;
}
- cnt++;
- idx = cnt & rx->mask;
- desc = rx->desc.desc_ring + idx;
- rx->desc.seqno = gve_next_seqno(rx->desc.seqno);
- work_done++;
+ total_packet_cnt++;
+ idx = rx->cnt & rx->mask;
+ desc = &rx->desc.desc_ring[idx];
+ work_done += work_cnt;
}
- if (!work_done && rx->fill_cnt - cnt > rx->db_threshold)
- return false;
+ if (!work_done && rx->fill_cnt - rx->cnt > rx->db_threshold)
+ return 0;
- u64_stats_update_begin(&rx->statss);
- rx->rpackets += packets;
- rx->rbytes += bytes;
- u64_stats_update_end(&rx->statss);
- rx->cnt = cnt;
+ if (work_done) {
+ u64_stats_update_begin(&rx->statss);
+ rx->rpackets += ok_packet_cnt;
+ rx->rbytes += bytes;
+ u64_stats_update_end(&rx->statss);
+ }
/* restock ring slots */
if (!rx->data.raw_addressing) {
/* In QPL mode buffs are refilled as the desc are processed */
rx->fill_cnt += work_done;
- } else if (rx->fill_cnt - cnt <= rx->db_threshold) {
+ } else if (rx->fill_cnt - rx->cnt <= rx->db_threshold) {
/* In raw addressing mode buffs are only refilled if the avail
* falls below a threshold.
*/
if (!gve_rx_refill_buffers(priv, rx))
- return false;
+ return 0;
/* If we were not able to completely refill buffers, we'll want
* to schedule this queue for work again to refill buffers.
*/
- if (rx->fill_cnt - cnt <= rx->db_threshold) {
+ if (rx->fill_cnt - rx->cnt <= rx->db_threshold) {
gve_rx_write_doorbell(priv, rx);
- return true;
+ return budget;
}
}
gve_rx_write_doorbell(priv, rx);
- return gve_rx_work_pending(rx);
+ return total_packet_cnt;
}
-bool gve_rx_poll(struct gve_notify_block *block, int budget)
+int gve_rx_poll(struct gve_notify_block *block, int budget)
{
struct gve_rx_ring *rx = block->rx;
netdev_features_t feat;
- bool repoll = false;
+ int work_done = 0;
feat = block->napi.dev->features;
@@ -604,8 +782,7 @@ bool gve_rx_poll(struct gve_notify_block *block, int budget)
budget = INT_MAX;
if (budget > 0)
- repoll |= gve_clean_rx_done(rx, budget, feat);
- else
- repoll |= gve_rx_work_pending(rx);
- return repoll;
+ work_done = gve_clean_rx_done(rx, budget, feat);
+
+ return work_done;
}
diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
index 8500621b2cd4..beb8bb079023 100644
--- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
@@ -240,8 +240,8 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv, int idx)
rx->dqo.bufq.mask = buffer_queue_slots - 1;
rx->dqo.complq.num_free_slots = completion_queue_slots;
rx->dqo.complq.mask = completion_queue_slots - 1;
- rx->skb_head = NULL;
- rx->skb_tail = NULL;
+ rx->ctx.skb_head = NULL;
+ rx->ctx.skb_tail = NULL;
rx->dqo.num_buf_states = min_t(s16, S16_MAX, buffer_queue_slots * 4);
rx->dqo.buf_states = kvcalloc(rx->dqo.num_buf_states,
@@ -467,12 +467,12 @@ static void gve_rx_skb_hash(struct sk_buff *skb,
static void gve_rx_free_skb(struct gve_rx_ring *rx)
{
- if (!rx->skb_head)
+ if (!rx->ctx.skb_head)
return;
- dev_kfree_skb_any(rx->skb_head);
- rx->skb_head = NULL;
- rx->skb_tail = NULL;
+ dev_kfree_skb_any(rx->ctx.skb_head);
+ rx->ctx.skb_head = NULL;
+ rx->ctx.skb_tail = NULL;
}
/* Chains multi skbs for single rx packet.
@@ -483,7 +483,7 @@ static int gve_rx_append_frags(struct napi_struct *napi,
u16 buf_len, struct gve_rx_ring *rx,
struct gve_priv *priv)
{
- int num_frags = skb_shinfo(rx->skb_tail)->nr_frags;
+ int num_frags = skb_shinfo(rx->ctx.skb_tail)->nr_frags;
if (unlikely(num_frags == MAX_SKB_FRAGS)) {
struct sk_buff *skb;
@@ -492,17 +492,17 @@ static int gve_rx_append_frags(struct napi_struct *napi,
if (!skb)
return -1;
- skb_shinfo(rx->skb_tail)->frag_list = skb;
- rx->skb_tail = skb;
+ skb_shinfo(rx->ctx.skb_tail)->frag_list = skb;
+ rx->ctx.skb_tail = skb;
num_frags = 0;
}
- if (rx->skb_tail != rx->skb_head) {
- rx->skb_head->len += buf_len;
- rx->skb_head->data_len += buf_len;
- rx->skb_head->truesize += priv->data_buffer_size_dqo;
+ if (rx->ctx.skb_tail != rx->ctx.skb_head) {
+ rx->ctx.skb_head->len += buf_len;
+ rx->ctx.skb_head->data_len += buf_len;
+ rx->ctx.skb_head->truesize += priv->data_buffer_size_dqo;
}
- skb_add_rx_frag(rx->skb_tail, num_frags,
+ skb_add_rx_frag(rx->ctx.skb_tail, num_frags,
buf_state->page_info.page,
buf_state->page_info.page_offset,
buf_len, priv->data_buffer_size_dqo);
@@ -556,7 +556,7 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
buf_len, DMA_FROM_DEVICE);
/* Append to current skb if one exists. */
- if (rx->skb_head) {
+ if (rx->ctx.skb_head) {
if (unlikely(gve_rx_append_frags(napi, buf_state, buf_len, rx,
priv)) != 0) {
goto error;
@@ -567,11 +567,11 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
}
if (eop && buf_len <= priv->rx_copybreak) {
- rx->skb_head = gve_rx_copy(priv->dev, napi,
- &buf_state->page_info, buf_len, 0);
- if (unlikely(!rx->skb_head))
+ rx->ctx.skb_head = gve_rx_copy(priv->dev, napi,
+ &buf_state->page_info, buf_len, 0, NULL);
+ if (unlikely(!rx->ctx.skb_head))
goto error;
- rx->skb_tail = rx->skb_head;
+ rx->ctx.skb_tail = rx->ctx.skb_head;
u64_stats_update_begin(&rx->statss);
rx->rx_copied_pkt++;
@@ -583,12 +583,12 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
return 0;
}
- rx->skb_head = napi_get_frags(napi);
- if (unlikely(!rx->skb_head))
+ rx->ctx.skb_head = napi_get_frags(napi);
+ if (unlikely(!rx->ctx.skb_head))
goto error;
- rx->skb_tail = rx->skb_head;
+ rx->ctx.skb_tail = rx->ctx.skb_head;
- skb_add_rx_frag(rx->skb_head, 0, buf_state->page_info.page,
+ skb_add_rx_frag(rx->ctx.skb_head, 0, buf_state->page_info.page,
buf_state->page_info.page_offset, buf_len,
priv->data_buffer_size_dqo);
gve_dec_pagecnt_bias(&buf_state->page_info);
@@ -635,27 +635,27 @@ static int gve_rx_complete_skb(struct gve_rx_ring *rx, struct napi_struct *napi,
rx->gve->ptype_lut_dqo->ptypes[desc->packet_type];
int err;
- skb_record_rx_queue(rx->skb_head, rx->q_num);
+ skb_record_rx_queue(rx->ctx.skb_head, rx->q_num);
if (feat & NETIF_F_RXHASH)
- gve_rx_skb_hash(rx->skb_head, desc, ptype);
+ gve_rx_skb_hash(rx->ctx.skb_head, desc, ptype);
if (feat & NETIF_F_RXCSUM)
- gve_rx_skb_csum(rx->skb_head, desc, ptype);
+ gve_rx_skb_csum(rx->ctx.skb_head, desc, ptype);
/* RSC packets must set gso_size otherwise the TCP stack will complain
* that packets are larger than MTU.
*/
if (desc->rsc) {
- err = gve_rx_complete_rsc(rx->skb_head, desc, ptype);
+ err = gve_rx_complete_rsc(rx->ctx.skb_head, desc, ptype);
if (err < 0)
return err;
}
- if (skb_headlen(rx->skb_head) == 0)
+ if (skb_headlen(rx->ctx.skb_head) == 0)
napi_gro_frags(napi);
else
- napi_gro_receive(napi, rx->skb_head);
+ napi_gro_receive(napi, rx->ctx.skb_head);
return 0;
}
@@ -717,18 +717,18 @@ int gve_rx_poll_dqo(struct gve_notify_block *block, int budget)
/* Free running counter of completed descriptors */
rx->cnt++;
- if (!rx->skb_head)
+ if (!rx->ctx.skb_head)
continue;
if (!compl_desc->end_of_packet)
continue;
work_done++;
- pkt_bytes = rx->skb_head->len;
+ pkt_bytes = rx->ctx.skb_head->len;
/* The ethernet header (first ETH_HLEN bytes) is snipped off
* by eth_type_trans.
*/
- if (skb_headlen(rx->skb_head))
+ if (skb_headlen(rx->ctx.skb_head))
pkt_bytes += ETH_HLEN;
/* gve_rx_complete_skb() will consume skb if successful */
@@ -741,8 +741,8 @@ int gve_rx_poll_dqo(struct gve_notify_block *block, int budget)
}
bytes += pkt_bytes;
- rx->skb_head = NULL;
- rx->skb_tail = NULL;
+ rx->ctx.skb_head = NULL;
+ rx->ctx.skb_tail = NULL;
}
gve_rx_post_buffers_dqo(rx);
diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c
index 665ac795a1ad..a9cb241fedf4 100644
--- a/drivers/net/ethernet/google/gve/gve_tx.c
+++ b/drivers/net/ethernet/google/gve/gve_tx.c
@@ -144,7 +144,7 @@ static void gve_tx_free_ring(struct gve_priv *priv, int idx)
gve_tx_remove_from_block(priv, idx);
slots = tx->mask + 1;
- gve_clean_tx_done(priv, tx, tx->req, false);
+ gve_clean_tx_done(priv, tx, priv->tx_desc_cnt, false);
netdev_tx_reset_queue(tx->netdev_txq);
dma_free_coherent(hdev, sizeof(*tx->q_resources),
@@ -176,6 +176,7 @@ static int gve_tx_alloc_ring(struct gve_priv *priv, int idx)
/* Make sure everything is zeroed to start */
memset(tx, 0, sizeof(*tx));
+ spin_lock_init(&tx->clean_lock);
tx->q_num = idx;
tx->mask = slots - 1;
@@ -303,15 +304,15 @@ static inline int gve_skb_fifo_bytes_required(struct gve_tx_ring *tx,
static void gve_tx_unmap_buf(struct device *dev, struct gve_tx_buffer_state *info)
{
if (info->skb) {
- dma_unmap_single(dev, dma_unmap_addr(&info->buf, dma),
- dma_unmap_len(&info->buf, len),
+ dma_unmap_single(dev, dma_unmap_addr(info, dma),
+ dma_unmap_len(info, len),
DMA_TO_DEVICE);
- dma_unmap_len_set(&info->buf, len, 0);
+ dma_unmap_len_set(info, len, 0);
} else {
- dma_unmap_page(dev, dma_unmap_addr(&info->buf, dma),
- dma_unmap_len(&info->buf, len),
+ dma_unmap_page(dev, dma_unmap_addr(info, dma),
+ dma_unmap_len(info, len),
DMA_TO_DEVICE);
- dma_unmap_len_set(&info->buf, len, 0);
+ dma_unmap_len_set(info, len, 0);
}
}
@@ -328,10 +329,16 @@ static inline bool gve_can_tx(struct gve_tx_ring *tx, int bytes_required)
return (gve_tx_avail(tx) >= MAX_TX_DESC_NEEDED && can_alloc);
}
+static_assert(NAPI_POLL_WEIGHT >= MAX_TX_DESC_NEEDED);
+
/* Stops the queue if the skb cannot be transmitted. */
-static int gve_maybe_stop_tx(struct gve_tx_ring *tx, struct sk_buff *skb)
+static int gve_maybe_stop_tx(struct gve_priv *priv, struct gve_tx_ring *tx,
+ struct sk_buff *skb)
{
int bytes_required = 0;
+ u32 nic_done;
+ u32 to_do;
+ int ret;
if (!tx->raw_addressing)
bytes_required = gve_skb_fifo_bytes_required(tx, skb);
@@ -339,29 +346,28 @@ static int gve_maybe_stop_tx(struct gve_tx_ring *tx, struct sk_buff *skb)
if (likely(gve_can_tx(tx, bytes_required)))
return 0;
- /* No space, so stop the queue */
- tx->stop_queue++;
- netif_tx_stop_queue(tx->netdev_txq);
- smp_mb(); /* sync with restarting queue in gve_clean_tx_done() */
-
- /* Now check for resources again, in case gve_clean_tx_done() freed
- * resources after we checked and we stopped the queue after
- * gve_clean_tx_done() checked.
- *
- * gve_maybe_stop_tx() gve_clean_tx_done()
- * nsegs/can_alloc test failed
- * gve_tx_free_fifo()
- * if (tx queue stopped)
- * netif_tx_queue_wake()
- * netif_tx_stop_queue()
- * Need to check again for space here!
- */
- if (likely(!gve_can_tx(tx, bytes_required)))
- return -EBUSY;
+ ret = -EBUSY;
+ spin_lock(&tx->clean_lock);
+ nic_done = gve_tx_load_event_counter(priv, tx);
+ to_do = nic_done - tx->done;
- netif_tx_start_queue(tx->netdev_txq);
- tx->wake_queue++;
- return 0;
+ /* Only try to clean if there is hope for TX */
+ if (to_do + gve_tx_avail(tx) >= MAX_TX_DESC_NEEDED) {
+ if (to_do > 0) {
+ to_do = min_t(u32, to_do, NAPI_POLL_WEIGHT);
+ gve_clean_tx_done(priv, tx, to_do, false);
+ }
+ if (likely(gve_can_tx(tx, bytes_required)))
+ ret = 0;
+ }
+ if (ret) {
+ /* No space, so stop the queue */
+ tx->stop_queue++;
+ netif_tx_stop_queue(tx->netdev_txq);
+ }
+ spin_unlock(&tx->clean_lock);
+
+ return ret;
}
static void gve_tx_fill_pkt_desc(union gve_tx_desc *pkt_desc,
@@ -491,7 +497,6 @@ static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx,
struct gve_tx_buffer_state *info;
bool is_gso = skb_is_gso(skb);
u32 idx = tx->req & tx->mask;
- struct gve_tx_dma_buf *buf;
u64 addr;
u32 len;
int i;
@@ -515,9 +520,8 @@ static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx,
tx->dma_mapping_error++;
goto drop;
}
- buf = &info->buf;
- dma_unmap_len_set(buf, len, len);
- dma_unmap_addr_set(buf, dma, addr);
+ dma_unmap_len_set(info, len, len);
+ dma_unmap_addr_set(info, dma, addr);
payload_nfrags = shinfo->nr_frags;
if (hlen < len) {
@@ -549,10 +553,9 @@ static int gve_tx_add_skb_no_copy(struct gve_priv *priv, struct gve_tx_ring *tx,
tx->dma_mapping_error++;
goto unmap_drop;
}
- buf = &tx->info[idx].buf;
tx->info[idx].skb = NULL;
- dma_unmap_len_set(buf, len, len);
- dma_unmap_addr_set(buf, dma, addr);
+ dma_unmap_len_set(&tx->info[idx], len, len);
+ dma_unmap_addr_set(&tx->info[idx], dma, addr);
gve_tx_fill_seg_desc(seg_desc, skb, is_gso, len, addr);
}
@@ -579,7 +582,7 @@ netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev)
WARN(skb_get_queue_mapping(skb) >= priv->tx_cfg.num_queues,
"skb queue index out of range");
tx = &priv->tx[skb_get_queue_mapping(skb)];
- if (unlikely(gve_maybe_stop_tx(tx, skb))) {
+ if (unlikely(gve_maybe_stop_tx(priv, tx, skb))) {
/* We need to ring the txq doorbell -- we have stopped the Tx
* queue for want of resources, but prior calls to gve_tx()
* may have added descriptors without ringing the doorbell.
@@ -675,19 +678,19 @@ static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx,
return pkts;
}
-__be32 gve_tx_load_event_counter(struct gve_priv *priv,
- struct gve_tx_ring *tx)
+u32 gve_tx_load_event_counter(struct gve_priv *priv,
+ struct gve_tx_ring *tx)
{
- u32 counter_index = be32_to_cpu((tx->q_resources->counter_index));
+ u32 counter_index = be32_to_cpu(tx->q_resources->counter_index);
+ __be32 counter = READ_ONCE(priv->counter_array[counter_index]);
- return READ_ONCE(priv->counter_array[counter_index]);
+ return be32_to_cpu(counter);
}
bool gve_tx_poll(struct gve_notify_block *block, int budget)
{
struct gve_priv *priv = block->priv;
struct gve_tx_ring *tx = block->tx;
- bool repoll = false;
u32 nic_done;
u32 to_do;
@@ -695,17 +698,23 @@ bool gve_tx_poll(struct gve_notify_block *block, int budget)
if (budget == 0)
budget = INT_MAX;
+ /* In TX path, it may try to clean completed pkts in order to xmit,
+ * to avoid cleaning conflict, use spin_lock(), it yields better
+ * concurrency between xmit/clean than netif's lock.
+ */
+ spin_lock(&tx->clean_lock);
/* Find out how much work there is to be done */
- tx->last_nic_done = gve_tx_load_event_counter(priv, tx);
- nic_done = be32_to_cpu(tx->last_nic_done);
- if (budget > 0) {
- /* Do as much work as we have that the budget will
- * allow
- */
- to_do = min_t(u32, (nic_done - tx->done), budget);
- gve_clean_tx_done(priv, tx, to_do, true);
- }
+ nic_done = gve_tx_load_event_counter(priv, tx);
+ to_do = min_t(u32, (nic_done - tx->done), budget);
+ gve_clean_tx_done(priv, tx, to_do, true);
+ spin_unlock(&tx->clean_lock);
/* If we still have work we want to repoll */
- repoll |= (nic_done != tx->done);
- return repoll;
+ return nic_done != tx->done;
+}
+
+bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx)
+{
+ u32 nic_done = gve_tx_load_event_counter(priv, tx);
+
+ return nic_done != tx->done;
}
diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
index 05ddb6a75c38..ec394d991668 100644
--- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
@@ -85,18 +85,16 @@ static void gve_tx_clean_pending_packets(struct gve_tx_ring *tx)
int j;
for (j = 0; j < cur_state->num_bufs; j++) {
- struct gve_tx_dma_buf *buf = &cur_state->bufs[j];
-
if (j == 0) {
dma_unmap_single(tx->dev,
- dma_unmap_addr(buf, dma),
- dma_unmap_len(buf, len),
- DMA_TO_DEVICE);
+ dma_unmap_addr(cur_state, dma[j]),
+ dma_unmap_len(cur_state, len[j]),
+ DMA_TO_DEVICE);
} else {
dma_unmap_page(tx->dev,
- dma_unmap_addr(buf, dma),
- dma_unmap_len(buf, len),
- DMA_TO_DEVICE);
+ dma_unmap_addr(cur_state, dma[j]),
+ dma_unmap_len(cur_state, len[j]),
+ DMA_TO_DEVICE);
}
}
if (cur_state->skb) {
@@ -457,15 +455,15 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
const bool is_gso = skb_is_gso(skb);
u32 desc_idx = tx->dqo_tx.tail;
- struct gve_tx_pending_packet_dqo *pending_packet;
+ struct gve_tx_pending_packet_dqo *pkt;
struct gve_tx_metadata_dqo metadata;
s16 completion_tag;
int i;
- pending_packet = gve_alloc_pending_packet(tx);
- pending_packet->skb = skb;
- pending_packet->num_bufs = 0;
- completion_tag = pending_packet - tx->dqo.pending_packets;
+ pkt = gve_alloc_pending_packet(tx);
+ pkt->skb = skb;
+ pkt->num_bufs = 0;
+ completion_tag = pkt - tx->dqo.pending_packets;
gve_extract_tx_metadata_dqo(skb, &metadata);
if (is_gso) {
@@ -493,8 +491,6 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
/* Map the linear portion of skb */
{
- struct gve_tx_dma_buf *buf =
- &pending_packet->bufs[pending_packet->num_bufs];
u32 len = skb_headlen(skb);
dma_addr_t addr;
@@ -502,9 +498,9 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
if (unlikely(dma_mapping_error(tx->dev, addr)))
goto err;
- dma_unmap_len_set(buf, len, len);
- dma_unmap_addr_set(buf, dma, addr);
- ++pending_packet->num_bufs;
+ dma_unmap_len_set(pkt, len[pkt->num_bufs], len);
+ dma_unmap_addr_set(pkt, dma[pkt->num_bufs], addr);
+ ++pkt->num_bufs;
gve_tx_fill_pkt_desc_dqo(tx, &desc_idx, skb, len, addr,
completion_tag,
@@ -512,8 +508,6 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
}
for (i = 0; i < shinfo->nr_frags; i++) {
- struct gve_tx_dma_buf *buf =
- &pending_packet->bufs[pending_packet->num_bufs];
const skb_frag_t *frag = &shinfo->frags[i];
bool is_eop = i == (shinfo->nr_frags - 1);
u32 len = skb_frag_size(frag);
@@ -523,9 +517,9 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
if (unlikely(dma_mapping_error(tx->dev, addr)))
goto err;
- dma_unmap_len_set(buf, len, len);
- dma_unmap_addr_set(buf, dma, addr);
- ++pending_packet->num_bufs;
+ dma_unmap_len_set(pkt, len[pkt->num_bufs], len);
+ dma_unmap_addr_set(pkt, dma[pkt->num_bufs], addr);
+ ++pkt->num_bufs;
gve_tx_fill_pkt_desc_dqo(tx, &desc_idx, skb, len, addr,
completion_tag, is_eop, is_gso);
@@ -552,22 +546,23 @@ static int gve_tx_add_skb_no_copy_dqo(struct gve_tx_ring *tx,
return 0;
err:
- for (i = 0; i < pending_packet->num_bufs; i++) {
- struct gve_tx_dma_buf *buf = &pending_packet->bufs[i];
-
+ for (i = 0; i < pkt->num_bufs; i++) {
if (i == 0) {
- dma_unmap_single(tx->dev, dma_unmap_addr(buf, dma),
- dma_unmap_len(buf, len),
+ dma_unmap_single(tx->dev,
+ dma_unmap_addr(pkt, dma[i]),
+ dma_unmap_len(pkt, len[i]),
DMA_TO_DEVICE);
} else {
- dma_unmap_page(tx->dev, dma_unmap_addr(buf, dma),
- dma_unmap_len(buf, len), DMA_TO_DEVICE);
+ dma_unmap_page(tx->dev,
+ dma_unmap_addr(pkt, dma[i]),
+ dma_unmap_len(pkt, len[i]),
+ DMA_TO_DEVICE);
}
}
- pending_packet->skb = NULL;
- pending_packet->num_bufs = 0;
- gve_free_pending_packet(tx, pending_packet);
+ pkt->skb = NULL;
+ pkt->num_bufs = 0;
+ gve_free_pending_packet(tx, pkt);
return -1;
}
@@ -725,12 +720,12 @@ static void add_to_list(struct gve_tx_ring *tx, struct gve_index_list *list,
static void remove_from_list(struct gve_tx_ring *tx,
struct gve_index_list *list,
- struct gve_tx_pending_packet_dqo *pending_packet)
+ struct gve_tx_pending_packet_dqo *pkt)
{
s16 prev_index, next_index;
- prev_index = pending_packet->prev;
- next_index = pending_packet->next;
+ prev_index = pkt->prev;
+ next_index = pkt->next;
if (prev_index == -1) {
/* Node is head */
@@ -747,21 +742,18 @@ static void remove_from_list(struct gve_tx_ring *tx,
}
static void gve_unmap_packet(struct device *dev,
- struct gve_tx_pending_packet_dqo *pending_packet)
+ struct gve_tx_pending_packet_dqo *pkt)
{
- struct gve_tx_dma_buf *buf;
int i;
/* SKB linear portion is guaranteed to be mapped */
- buf = &pending_packet->bufs[0];
- dma_unmap_single(dev, dma_unmap_addr(buf, dma),
- dma_unmap_len(buf, len), DMA_TO_DEVICE);
- for (i = 1; i < pending_packet->num_bufs; i++) {
- buf = &pending_packet->bufs[i];
- dma_unmap_page(dev, dma_unmap_addr(buf, dma),
- dma_unmap_len(buf, len), DMA_TO_DEVICE);
+ dma_unmap_single(dev, dma_unmap_addr(pkt, dma[0]),
+ dma_unmap_len(pkt, len[0]), DMA_TO_DEVICE);
+ for (i = 1; i < pkt->num_bufs; i++) {
+ dma_unmap_page(dev, dma_unmap_addr(pkt, dma[i]),
+ dma_unmap_len(pkt, len[i]), DMA_TO_DEVICE);
}
- pending_packet->num_bufs = 0;
+ pkt->num_bufs = 0;
}
/* Completion types and expected behavior:
diff --git a/drivers/net/ethernet/google/gve/gve_utils.c b/drivers/net/ethernet/google/gve/gve_utils.c
index 93f3dcbeeea9..88ca49cbc1e2 100644
--- a/drivers/net/ethernet/google/gve/gve_utils.c
+++ b/drivers/net/ethernet/google/gve/gve_utils.c
@@ -18,12 +18,16 @@ void gve_tx_remove_from_block(struct gve_priv *priv, int queue_idx)
void gve_tx_add_to_block(struct gve_priv *priv, int queue_idx)
{
+ unsigned int active_cpus = min_t(int, priv->num_ntfy_blks / 2,
+ num_online_cpus());
int ntfy_idx = gve_tx_idx_to_ntfy(priv, queue_idx);
struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
struct gve_tx_ring *tx = &priv->tx[queue_idx];
block->tx = tx;
tx->ntfy_id = ntfy_idx;
+ netif_set_xps_queue(priv->dev, get_cpu_mask(ntfy_idx % active_cpus),
+ queue_idx);
}
void gve_rx_remove_from_block(struct gve_priv *priv, int queue_idx)
@@ -46,20 +50,31 @@ void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx)
struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi,
struct gve_rx_slot_page_info *page_info, u16 len,
- u16 pad)
+ u16 padding, struct gve_rx_ctx *ctx)
{
- struct sk_buff *skb = napi_alloc_skb(napi, len);
- void *va = page_info->page_address + pad +
- page_info->page_offset;
-
- if (unlikely(!skb))
- return NULL;
-
+ void *va = page_info->page_address + padding + page_info->page_offset;
+ int skb_linear_offset = 0;
+ bool set_protocol = false;
+ struct sk_buff *skb;
+
+ if (ctx) {
+ if (!ctx->skb_head)
+ ctx->skb_head = napi_alloc_skb(napi, ctx->total_expected_size);
+
+ if (unlikely(!ctx->skb_head))
+ return NULL;
+ skb = ctx->skb_head;
+ skb_linear_offset = skb->len;
+ set_protocol = ctx->curr_frag_cnt == ctx->expected_frag_cnt - 1;
+ } else {
+ skb = napi_alloc_skb(napi, len);
+ set_protocol = true;
+ }
__skb_put(skb, len);
+ skb_copy_to_linear_data_offset(skb, skb_linear_offset, va, len);
- skb_copy_to_linear_data(skb, va, len);
-
- skb->protocol = eth_type_trans(skb, dev);
+ if (set_protocol)
+ skb->protocol = eth_type_trans(skb, dev);
return skb;
}
diff --git a/drivers/net/ethernet/google/gve/gve_utils.h b/drivers/net/ethernet/google/gve/gve_utils.h
index 79595940b351..6d98e69fd3b8 100644
--- a/drivers/net/ethernet/google/gve/gve_utils.h
+++ b/drivers/net/ethernet/google/gve/gve_utils.h
@@ -19,7 +19,7 @@ void gve_rx_add_to_block(struct gve_priv *priv, int queue_idx);
struct sk_buff *gve_rx_copy(struct net_device *dev, struct napi_struct *napi,
struct gve_rx_slot_page_info *page_info, u16 len,
- u16 pad);
+ u16 pad, struct gve_rx_ctx *ctx);
/* Decrement pagecnt_bias. Set it back to INT_MAX if it reached zero. */
void gve_dec_pagecnt_bias(struct gve_rx_slot_page_info *page_info);
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index 37b605fed32c..c84ef494bd60 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -998,7 +998,7 @@ static int hip04_mac_probe(struct platform_device *pdev)
hip04_config_port(ndev, SPEED_100, DUPLEX_FULL);
hip04_config_fifo(priv);
- eth_random_addr(ndev->dev_addr);
+ eth_hw_addr_random(ndev);
hip04_update_mac_address(ndev);
ret = hip04_alloc_ring(ndev, d);
diff --git a/drivers/net/ethernet/hisilicon/hisi_femac.c b/drivers/net/ethernet/hisilicon/hisi_femac.c
index 22bf914f2dbd..a6c18b6527f9 100644
--- a/drivers/net/ethernet/hisilicon/hisi_femac.c
+++ b/drivers/net/ethernet/hisilicon/hisi_femac.c
@@ -427,7 +427,7 @@ static void hisi_femac_free_skb_rings(struct hisi_femac_priv *priv)
}
static int hisi_femac_set_hw_mac_addr(struct hisi_femac_priv *priv,
- unsigned char *mac)
+ const unsigned char *mac)
{
u32 reg;
@@ -555,7 +555,7 @@ static int hisi_femac_set_mac_address(struct net_device *dev, void *p)
if (!is_valid_ether_addr(skaddr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, skaddr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, skaddr->sa_data);
dev->addr_assign_type &= ~NET_ADDR_RANDOM;
hisi_femac_set_hw_mac_addr(priv, dev->dev_addr);
@@ -841,7 +841,7 @@ static int hisi_femac_drv_probe(struct platform_device *pdev)
(unsigned long)phy->phy_id,
phy_modes(phy->interface));
- ret = of_get_mac_address(node, ndev->dev_addr);
+ ret = of_get_ethdev_address(node, ndev);
if (ret) {
eth_hw_addr_random(ndev);
dev_warn(dev, "using random MAC address %pM\n",
diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
index c1aae0fca5e9..d7e62eca050f 100644
--- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
@@ -429,7 +429,7 @@ static void hix5hd2_port_disable(struct hix5hd2_priv *priv)
static void hix5hd2_hw_set_mac_addr(struct net_device *dev)
{
struct hix5hd2_priv *priv = netdev_priv(dev);
- unsigned char *mac = dev->dev_addr;
+ const unsigned char *mac = dev->dev_addr;
u32 val;
val = mac[1] | (mac[0] << 8);
@@ -1219,7 +1219,7 @@ static int hix5hd2_dev_probe(struct platform_device *pdev)
goto out_phy_node;
}
- ret = of_get_mac_address(node, ndev->dev_addr);
+ ret = of_get_ethdev_address(node, ndev);
if (ret) {
eth_hw_addr_random(ndev);
netdev_warn(ndev, "using random MAC address %pM\n",
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h
index 2b7db1c22321..d72657444ef3 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.h
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.h
@@ -499,7 +499,7 @@ struct hnae_ae_ops {
u32 *tx_usecs_high, u32 *rx_usecs_high);
void (*set_promisc_mode)(struct hnae_handle *handle, u32 en);
int (*get_mac_addr)(struct hnae_handle *handle, void **p);
- int (*set_mac_addr)(struct hnae_handle *handle, void *p);
+ int (*set_mac_addr)(struct hnae_handle *handle, const void *p);
int (*add_uc_addr)(struct hnae_handle *handle,
const unsigned char *addr);
int (*rm_uc_addr)(struct hnae_handle *handle,
@@ -558,7 +558,7 @@ struct hnae_handle {
enum hnae_media_type media_type;
struct list_head node; /* list to hnae_ae_dev->handle_list */
struct hnae_buf_ops *bops; /* operation for the buffer */
- struct hnae_queue **qs; /* array base of all queues */
+ struct hnae_queue *qs[]; /* flexible array of all queues */
};
#define ring_to_dev(ring) ((ring)->q->dev->dev)
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
index 75e4ec569da8..bc3e406f0139 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
@@ -81,8 +81,8 @@ static struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev,
vfnum_per_port = hns_ae_get_vf_num_per_port(dsaf_dev, port_id);
qnum_per_vf = hns_ae_get_q_num_per_vf(dsaf_dev, port_id);
- vf_cb = kzalloc(sizeof(*vf_cb) +
- qnum_per_vf * sizeof(struct hnae_queue *), GFP_KERNEL);
+ vf_cb = kzalloc(struct_size(vf_cb, ae_handle.qs, qnum_per_vf),
+ GFP_KERNEL);
if (unlikely(!vf_cb)) {
dev_err(dsaf_dev->dev, "malloc vf_cb fail!\n");
ae_handle = ERR_PTR(-ENOMEM);
@@ -108,7 +108,6 @@ static struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev,
goto vf_id_err;
}
- ae_handle->qs = (struct hnae_queue **)(&ae_handle->qs + 1);
for (i = 0; i < qnum_per_vf; i++) {
ae_handle->qs[i] = &ring_pair_cb->q;
ae_handle->qs[i]->rx_ring.q = ae_handle->qs[i];
@@ -207,7 +206,7 @@ static void hns_ae_fini_queue(struct hnae_queue *q)
hns_rcb_reset_ring_hw(q);
}
-static int hns_ae_set_mac_address(struct hnae_handle *handle, void *p)
+static int hns_ae_set_mac_address(struct hnae_handle *handle, const void *p)
{
int ret;
struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
index f387a859a201..8f391e2adcc0 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
@@ -450,7 +450,7 @@ static void hns_gmac_update_stats(void *mac_drv)
+= dsaf_read_dev(drv, GMAC_TX_PAUSE_FRAMES_REG);
}
-static void hns_gmac_set_mac_addr(void *mac_drv, char *mac_addr)
+static void hns_gmac_set_mac_addr(void *mac_drv, const char *mac_addr)
{
struct mac_driver *drv = (struct mac_driver *)mac_drv;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index f41379de2186..7edf8569514c 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -240,7 +240,7 @@ int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb, u8 vmid, u8 *port_num)
*@addr:mac address
*/
int hns_mac_change_vf_addr(struct hns_mac_cb *mac_cb,
- u32 vmid, char *addr)
+ u32 vmid, const char *addr)
{
int ret;
struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
index 8943ffab4418..e3bb05959ba9 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
@@ -348,7 +348,7 @@ struct mac_driver {
/*disable mac when disable nic or dsaf*/
void (*mac_disable)(void *mac_drv, enum mac_commom_mode mode);
/* config mac address*/
- void (*set_mac_addr)(void *mac_drv, char *mac_addr);
+ void (*set_mac_addr)(void *mac_drv, const char *mac_addr);
/*adjust mac mode of port,include speed and duplex*/
int (*adjust_link)(void *mac_drv, enum mac_speed speed,
u32 full_duplex);
@@ -425,7 +425,8 @@ int hns_mac_init(struct dsaf_device *dsaf_dev);
void mac_adjust_link(struct net_device *net_dev);
bool hns_mac_need_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex);
void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status);
-int hns_mac_change_vf_addr(struct hns_mac_cb *mac_cb, u32 vmid, char *addr);
+int hns_mac_change_vf_addr(struct hns_mac_cb *mac_cb, u32 vmid,
+ const char *addr);
int hns_mac_set_multi(struct hns_mac_cb *mac_cb,
u32 port_num, char *addr, bool enable);
int hns_mac_vm_config_bc_en(struct hns_mac_cb *mac_cb, u32 vm, bool enable);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
index cba04bfa0b3f..5526a10caac5 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
@@ -210,7 +210,7 @@ struct hnae_vf_cb {
u8 port_index;
struct hns_mac_cb *mac_cb;
struct dsaf_device *dsaf_dev;
- struct hnae_handle ae_handle; /* must be the last number */
+ struct hnae_handle ae_handle; /* must be the last member */
};
struct dsaf_int_xge_src {
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
index 401fef5f1d07..fc26ffaae620 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
@@ -255,7 +255,7 @@ static void hns_xgmac_pausefrm_cfg(void *mac_drv, u32 rx_en, u32 tx_en)
dsaf_write_dev(drv, XGMAC_MAC_PAUSE_CTRL_REG, origin);
}
-static void hns_xgmac_set_pausefrm_mac_addr(void *mac_drv, char *mac_addr)
+static void hns_xgmac_set_pausefrm_mac_addr(void *mac_drv, const char *mac_addr)
{
struct mac_driver *drv = (struct mac_driver *)mac_drv;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 343c605c4be8..22a463e15678 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -1194,7 +1194,7 @@ static int hns_nic_net_set_mac_address(struct net_device *ndev, void *p)
return ret;
}
- memcpy(ndev->dev_addr, mac_addr->sa_data, ndev->addr_len);
+ eth_hw_addr_set(ndev, mac_addr->sa_data);
return 0;
}
@@ -1212,7 +1212,7 @@ static void hns_init_mac_addr(struct net_device *ndev)
{
struct hns_nic_priv *priv = netdev_priv(ndev);
- if (!device_get_mac_address(priv->dev, ndev->dev_addr, ETH_ALEN)) {
+ if (device_get_ethdev_address(priv->dev, ndev)) {
eth_hw_addr_random(ndev);
dev_warn(priv->dev, "No valid mac, use random mac %pM",
ndev->dev_addr);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
index eef1b2764d34..67b0bf310daa 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
@@ -10,6 +10,27 @@ static LIST_HEAD(hnae3_ae_algo_list);
static LIST_HEAD(hnae3_client_list);
static LIST_HEAD(hnae3_ae_dev_list);
+void hnae3_unregister_ae_algo_prepare(struct hnae3_ae_algo *ae_algo)
+{
+ const struct pci_device_id *pci_id;
+ struct hnae3_ae_dev *ae_dev;
+
+ if (!ae_algo)
+ return;
+
+ list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) {
+ if (!hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))
+ continue;
+
+ pci_id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev);
+ if (!pci_id)
+ continue;
+ if (IS_ENABLED(CONFIG_PCI_IOV))
+ pci_disable_sriov(ae_dev->pdev);
+ }
+}
+EXPORT_SYMBOL(hnae3_unregister_ae_algo_prepare);
+
/* we are keeping things simple and using single lock for all the
* list. This is a non-critical code so other updations, if happen
* in parallel, can wait.
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index 8ba21d6dc220..3f7a9a4c59d5 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -95,6 +95,7 @@ enum HNAE3_DEV_CAP_BITS {
HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B,
HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B,
HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B,
+ HNAE3_DEV_SUPPORT_MC_MAC_MNG_B,
};
#define hnae3_dev_fd_supported(hdev) \
@@ -151,6 +152,9 @@ enum HNAE3_DEV_CAP_BITS {
#define hnae3_ae_dev_rxd_adv_layout_supported(ae_dev) \
test_bit(HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B, (ae_dev)->caps)
+#define hnae3_ae_dev_mc_mac_mng_supported(ae_dev) \
+ test_bit(HNAE3_DEV_SUPPORT_MC_MAC_MNG_B, (ae_dev)->caps)
+
enum HNAE3_PF_CAP_BITS {
HNAE3_PF_SUPPORT_VLAN_FLTR_MDF_B = 0,
};
@@ -294,6 +298,8 @@ enum hnae3_dbg_cmd {
HNAE3_DBG_CMD_MAC_TNL_STATUS,
HNAE3_DBG_CMD_SERV_INFO,
HNAE3_DBG_CMD_UMV_INFO,
+ HNAE3_DBG_CMD_PAGE_POOL_INFO,
+ HNAE3_DBG_CMD_COAL_INFO,
HNAE3_DBG_CMD_UNKNOWN,
};
@@ -341,6 +347,9 @@ struct hnae3_dev_specs {
u8 max_non_tso_bd_num; /* max BD number of one non-TSO packet */
u16 max_frm_size;
u16 max_qset_num;
+ u16 umv_size;
+ u16 mc_mac_size;
+ u32 mac_stats_num;
};
struct hnae3_client_ops {
@@ -588,7 +597,7 @@ struct hnae3_ae_ops {
u32 *tx_usecs_high, u32 *rx_usecs_high);
void (*get_mac_addr)(struct hnae3_handle *handle, u8 *p);
- int (*set_mac_addr)(struct hnae3_handle *handle, void *p,
+ int (*set_mac_addr)(struct hnae3_handle *handle, const void *p,
bool is_first);
int (*do_ioctl)(struct hnae3_handle *handle,
struct ifreq *ifr, int cmd);
@@ -853,6 +862,7 @@ struct hnae3_handle {
int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev);
void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev);
+void hnae3_unregister_ae_algo_prepare(struct hnae3_ae_algo *ae_algo);
void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo);
void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index 2b66c59f5eaf..67364ab63a1f 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -137,7 +137,7 @@ static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = {
.name = "uc",
.cmd = HNAE3_DBG_CMD_MAC_UC,
.dentry = HNS3_DBG_DENTRY_MAC,
- .buf_len = HNS3_DBG_READ_LEN,
+ .buf_len = HNS3_DBG_READ_LEN_128KB,
.init = hns3_dbg_common_file_init,
},
{
@@ -256,7 +256,7 @@ static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = {
.name = "tqp",
.cmd = HNAE3_DBG_CMD_REG_TQP,
.dentry = HNS3_DBG_DENTRY_REG,
- .buf_len = HNS3_DBG_READ_LEN,
+ .buf_len = HNS3_DBG_READ_LEN_128KB,
.init = hns3_dbg_common_file_init,
},
{
@@ -298,7 +298,7 @@ static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = {
.name = "fd_tcam",
.cmd = HNAE3_DBG_CMD_FD_TCAM,
.dentry = HNS3_DBG_DENTRY_FD,
- .buf_len = HNS3_DBG_READ_LEN,
+ .buf_len = HNS3_DBG_READ_LEN_1MB,
.init = hns3_dbg_common_file_init,
},
{
@@ -336,6 +336,20 @@ static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = {
.buf_len = HNS3_DBG_READ_LEN,
.init = hns3_dbg_common_file_init,
},
+ {
+ .name = "page_pool_info",
+ .cmd = HNAE3_DBG_CMD_PAGE_POOL_INFO,
+ .dentry = HNS3_DBG_DENTRY_COMMON,
+ .buf_len = HNS3_DBG_READ_LEN,
+ .init = hns3_dbg_common_file_init,
+ },
+ {
+ .name = "coalesce_info",
+ .cmd = HNAE3_DBG_CMD_COAL_INFO,
+ .dentry = HNS3_DBG_DENTRY_COMMON,
+ .buf_len = HNS3_DBG_READ_LEN_1MB,
+ .init = hns3_dbg_common_file_init,
+ },
};
static struct hns3_dbg_cap_info hns3_dbg_cap[] = {
@@ -384,6 +398,26 @@ static struct hns3_dbg_cap_info hns3_dbg_cap[] = {
}
};
+static const struct hns3_dbg_item coal_info_items[] = {
+ { "VEC_ID", 2 },
+ { "ALGO_STATE", 2 },
+ { "PROFILE_ID", 2 },
+ { "CQE_MODE", 2 },
+ { "TUNE_STATE", 2 },
+ { "STEPS_LEFT", 2 },
+ { "STEPS_RIGHT", 2 },
+ { "TIRED", 2 },
+ { "SW_GL", 2 },
+ { "SW_QL", 2 },
+ { "HW_GL", 2 },
+ { "HW_QL", 2 },
+};
+
+static const char * const dim_cqe_mode_str[] = { "EQE", "CQE" };
+static const char * const dim_state_str[] = { "START", "IN_PROG", "APPLY" };
+static const char * const
+dim_tune_stat_str[] = { "ON_TOP", "TIRED", "RIGHT", "LEFT" };
+
static void hns3_dbg_fill_content(char *content, u16 len,
const struct hns3_dbg_item *items,
const char **result, u16 size)
@@ -405,6 +439,94 @@ static void hns3_dbg_fill_content(char *content, u16 len,
*pos++ = '\0';
}
+static void hns3_get_coal_info(struct hns3_enet_tqp_vector *tqp_vector,
+ char **result, int i, bool is_tx)
+{
+ unsigned int gl_offset, ql_offset;
+ struct hns3_enet_coalesce *coal;
+ unsigned int reg_val;
+ unsigned int j = 0;
+ struct dim *dim;
+ bool ql_enable;
+
+ if (is_tx) {
+ coal = &tqp_vector->tx_group.coal;
+ dim = &tqp_vector->tx_group.dim;
+ gl_offset = HNS3_VECTOR_GL1_OFFSET;
+ ql_offset = HNS3_VECTOR_TX_QL_OFFSET;
+ ql_enable = tqp_vector->tx_group.coal.ql_enable;
+ } else {
+ coal = &tqp_vector->rx_group.coal;
+ dim = &tqp_vector->rx_group.dim;
+ gl_offset = HNS3_VECTOR_GL0_OFFSET;
+ ql_offset = HNS3_VECTOR_RX_QL_OFFSET;
+ ql_enable = tqp_vector->rx_group.coal.ql_enable;
+ }
+
+ sprintf(result[j++], "%d", i);
+ sprintf(result[j++], "%s", dim_state_str[dim->state]);
+ sprintf(result[j++], "%u", dim->profile_ix);
+ sprintf(result[j++], "%s", dim_cqe_mode_str[dim->mode]);
+ sprintf(result[j++], "%s",
+ dim_tune_stat_str[dim->tune_state]);
+ sprintf(result[j++], "%u", dim->steps_left);
+ sprintf(result[j++], "%u", dim->steps_right);
+ sprintf(result[j++], "%u", dim->tired);
+ sprintf(result[j++], "%u", coal->int_gl);
+ sprintf(result[j++], "%u", coal->int_ql);
+ reg_val = readl(tqp_vector->mask_addr + gl_offset) &
+ HNS3_VECTOR_GL_MASK;
+ sprintf(result[j++], "%u", reg_val);
+ if (ql_enable) {
+ reg_val = readl(tqp_vector->mask_addr + ql_offset) &
+ HNS3_VECTOR_QL_MASK;
+ sprintf(result[j++], "%u", reg_val);
+ } else {
+ sprintf(result[j++], "NA");
+ }
+}
+
+static void hns3_dump_coal_info(struct hnae3_handle *h, char *buf, int len,
+ int *pos, bool is_tx)
+{
+ char data_str[ARRAY_SIZE(coal_info_items)][HNS3_DBG_DATA_STR_LEN];
+ char *result[ARRAY_SIZE(coal_info_items)];
+ struct hns3_enet_tqp_vector *tqp_vector;
+ struct hns3_nic_priv *priv = h->priv;
+ char content[HNS3_DBG_INFO_LEN];
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(coal_info_items); i++)
+ result[i] = &data_str[i][0];
+
+ *pos += scnprintf(buf + *pos, len - *pos,
+ "%s interrupt coalesce info:\n",
+ is_tx ? "tx" : "rx");
+ hns3_dbg_fill_content(content, sizeof(content), coal_info_items,
+ NULL, ARRAY_SIZE(coal_info_items));
+ *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
+
+ for (i = 0; i < priv->vector_num; i++) {
+ tqp_vector = &priv->tqp_vector[i];
+ hns3_get_coal_info(tqp_vector, result, i, is_tx);
+ hns3_dbg_fill_content(content, sizeof(content), coal_info_items,
+ (const char **)result,
+ ARRAY_SIZE(coal_info_items));
+ *pos += scnprintf(buf + *pos, len - *pos, "%s", content);
+ }
+}
+
+static int hns3_dbg_coal_info(struct hnae3_handle *h, char *buf, int len)
+{
+ int pos = 0;
+
+ hns3_dump_coal_info(h, buf, len, &pos, true);
+ pos += scnprintf(buf + pos, len - pos, "\n");
+ hns3_dump_coal_info(h, buf, len, &pos, false);
+
+ return 0;
+}
+
static const struct hns3_dbg_item tx_spare_info_items[] = {
{ "QUEUE_ID", 2 },
{ "COPYBREAK", 2 },
@@ -462,7 +584,7 @@ static const struct hns3_dbg_item rx_queue_info_items[] = {
{ "TAIL", 2 },
{ "HEAD", 2 },
{ "FBDNUM", 2 },
- { "PKTNUM", 2 },
+ { "PKTNUM", 5 },
{ "COPYBREAK", 2 },
{ "RING_EN", 2 },
{ "RX_RING_EN", 2 },
@@ -565,7 +687,7 @@ static const struct hns3_dbg_item tx_queue_info_items[] = {
{ "HEAD", 2 },
{ "FBDNUM", 2 },
{ "OFFSET", 2 },
- { "PKTNUM", 2 },
+ { "PKTNUM", 5 },
{ "RING_EN", 2 },
{ "TX_RING_EN", 2 },
{ "BASE_ADDR", 10 },
@@ -790,13 +912,13 @@ static int hns3_dbg_rx_bd_info(struct hns3_dbg_data *d, char *buf, int len)
}
static const struct hns3_dbg_item tx_bd_info_items[] = {
- { "BD_IDX", 5 },
- { "ADDRESS", 2 },
+ { "BD_IDX", 2 },
+ { "ADDRESS", 13 },
{ "VLAN_TAG", 2 },
{ "SIZE", 2 },
{ "T_CS_VLAN_TSO", 2 },
{ "OT_VLAN_TAG", 3 },
- { "TV", 2 },
+ { "TV", 5 },
{ "OLT_VLAN_LEN", 2 },
{ "PAYLEN_OL4CS", 2 },
{ "BD_FE_SC_VLD", 2 },
@@ -924,6 +1046,12 @@ hns3_dbg_dev_specs(struct hnae3_handle *h, char *buf, int len, int *pos)
dev_specs->max_tm_rate);
*pos += scnprintf(buf + *pos, len - *pos, "MAX QSET number: %u\n",
dev_specs->max_qset_num);
+ *pos += scnprintf(buf + *pos, len - *pos, "umv size: %u\n",
+ dev_specs->umv_size);
+ *pos += scnprintf(buf + *pos, len - *pos, "mc mac size: %u\n",
+ dev_specs->mc_mac_size);
+ *pos += scnprintf(buf + *pos, len - *pos, "MAC statistics number: %u\n",
+ dev_specs->mac_stats_num);
}
static int hns3_dbg_dev_info(struct hnae3_handle *h, char *buf, int len)
@@ -937,6 +1065,69 @@ static int hns3_dbg_dev_info(struct hnae3_handle *h, char *buf, int len)
return 0;
}
+static const struct hns3_dbg_item page_pool_info_items[] = {
+ { "QUEUE_ID", 2 },
+ { "ALLOCATE_CNT", 2 },
+ { "FREE_CNT", 6 },
+ { "POOL_SIZE(PAGE_NUM)", 2 },
+ { "ORDER", 2 },
+ { "NUMA_ID", 2 },
+ { "MAX_LEN", 2 },
+};
+
+static void hns3_dump_page_pool_info(struct hns3_enet_ring *ring,
+ char **result, u32 index)
+{
+ u32 j = 0;
+
+ sprintf(result[j++], "%u", index);
+ sprintf(result[j++], "%u", ring->page_pool->pages_state_hold_cnt);
+ sprintf(result[j++], "%u",
+ atomic_read(&ring->page_pool->pages_state_release_cnt));
+ sprintf(result[j++], "%u", ring->page_pool->p.pool_size);
+ sprintf(result[j++], "%u", ring->page_pool->p.order);
+ sprintf(result[j++], "%d", ring->page_pool->p.nid);
+ sprintf(result[j++], "%uK", ring->page_pool->p.max_len / 1024);
+}
+
+static int
+hns3_dbg_page_pool_info(struct hnae3_handle *h, char *buf, int len)
+{
+ char data_str[ARRAY_SIZE(page_pool_info_items)][HNS3_DBG_DATA_STR_LEN];
+ char *result[ARRAY_SIZE(page_pool_info_items)];
+ struct hns3_nic_priv *priv = h->priv;
+ char content[HNS3_DBG_INFO_LEN];
+ struct hns3_enet_ring *ring;
+ int pos = 0;
+ u32 i;
+
+ if (!priv->ring) {
+ dev_err(&h->pdev->dev, "priv->ring is NULL\n");
+ return -EFAULT;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(page_pool_info_items); i++)
+ result[i] = &data_str[i][0];
+
+ hns3_dbg_fill_content(content, sizeof(content), page_pool_info_items,
+ NULL, ARRAY_SIZE(page_pool_info_items));
+ pos += scnprintf(buf + pos, len - pos, "%s", content);
+ for (i = 0; i < h->kinfo.num_tqps; i++) {
+ if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) ||
+ test_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
+ return -EPERM;
+ ring = &priv->ring[(u32)(i + h->kinfo.num_tqps)];
+ hns3_dump_page_pool_info(ring, result, i);
+ hns3_dbg_fill_content(content, sizeof(content),
+ page_pool_info_items,
+ (const char **)result,
+ ARRAY_SIZE(page_pool_info_items));
+ pos += scnprintf(buf + pos, len - pos, "%s", content);
+ }
+
+ return 0;
+}
+
static int hns3_dbg_get_cmd_index(struct hns3_dbg_data *dbg_data, u32 *index)
{
u32 i;
@@ -978,6 +1169,14 @@ static const struct hns3_dbg_func hns3_dbg_cmd_func[] = {
.cmd = HNAE3_DBG_CMD_TX_QUEUE_INFO,
.dbg_dump = hns3_dbg_tx_queue_info,
},
+ {
+ .cmd = HNAE3_DBG_CMD_PAGE_POOL_INFO,
+ .dbg_dump = hns3_dbg_page_pool_info,
+ },
+ {
+ .cmd = HNAE3_DBG_CMD_COAL_INFO,
+ .dbg_dump = hns3_dbg_coal_info,
+ },
};
static int hns3_dbg_read_cmd(struct hns3_dbg_data *dbg_data,
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 468b8f07bf47..a2b993d62822 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -1847,7 +1847,6 @@ void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size)
static int hns3_skb_linearize(struct hns3_enet_ring *ring,
struct sk_buff *skb,
- u8 max_non_tso_bd_num,
unsigned int bd_num)
{
/* 'bd_num == UINT_MAX' means the skb' fraglist has a
@@ -1864,8 +1863,7 @@ static int hns3_skb_linearize(struct hns3_enet_ring *ring,
* will not help.
*/
if (skb->len > HNS3_MAX_TSO_SIZE ||
- (!skb_is_gso(skb) && skb->len >
- HNS3_MAX_NON_TSO_SIZE(max_non_tso_bd_num))) {
+ (!skb_is_gso(skb) && skb->len > HNS3_MAX_NON_TSO_SIZE)) {
u64_stats_update_begin(&ring->syncp);
ring->stats.hw_limitation++;
u64_stats_update_end(&ring->syncp);
@@ -1900,8 +1898,7 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
goto out;
}
- if (hns3_skb_linearize(ring, skb, max_non_tso_bd_num,
- bd_num))
+ if (hns3_skb_linearize(ring, skb, bd_num))
return -ENOMEM;
bd_num = hns3_tx_bd_count(skb->len);
@@ -2287,7 +2284,7 @@ static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
return ret;
}
- ether_addr_copy(netdev->dev_addr, mac_addr->sa_data);
+ eth_hw_addr_set(netdev, mac_addr->sa_data);
return 0;
}
@@ -3258,6 +3255,7 @@ static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i)
{
hns3_unmap_buffer(ring, &ring->desc_cb[i]);
ring->desc[i].addr = 0;
+ ring->desc_cb[i].refill = 0;
}
static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i,
@@ -3336,6 +3334,7 @@ static int hns3_alloc_and_attach_buffer(struct hns3_enet_ring *ring, int i)
ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma +
ring->desc_cb[i].page_offset);
+ ring->desc_cb[i].refill = 1;
return 0;
}
@@ -3365,6 +3364,7 @@ static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
{
hns3_unmap_buffer(ring, &ring->desc_cb[i]);
ring->desc_cb[i] = *res_cb;
+ ring->desc_cb[i].refill = 1;
ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma +
ring->desc_cb[i].page_offset);
ring->desc[i].rx.bd_base_info = 0;
@@ -3373,6 +3373,7 @@ static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
{
ring->desc_cb[i].reuse_flag = 0;
+ ring->desc_cb[i].refill = 1;
ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma +
ring->desc_cb[i].page_offset);
ring->desc[i].rx.bd_base_info = 0;
@@ -3479,10 +3480,14 @@ static int hns3_desc_unused(struct hns3_enet_ring *ring)
int ntc = ring->next_to_clean;
int ntu = ring->next_to_use;
+ if (unlikely(ntc == ntu && !ring->desc_cb[ntc].refill))
+ return ring->desc_num;
+
return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
}
-static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
+/* Return true if there is any allocation failure */
+static bool hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
int cleand_count)
{
struct hns3_desc_cb *desc_cb;
@@ -3507,7 +3512,10 @@ static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
hns3_rl_err(ring_to_netdev(ring),
"alloc rx buffer failed: %d\n",
ret);
- break;
+
+ writel(i, ring->tqp->io_base +
+ HNS3_RING_RX_RING_HEAD_REG);
+ return true;
}
hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
@@ -3520,6 +3528,7 @@ static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
}
writel(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
+ return false;
}
static bool hns3_can_reuse_page(struct hns3_desc_cb *cb)
@@ -3824,6 +3833,7 @@ static void hns3_rx_ring_move_fw(struct hns3_enet_ring *ring)
{
ring->desc[ring->next_to_clean].rx.bd_base_info &=
cpu_to_le32(~BIT(HNS3_RXD_VLD_B));
+ ring->desc_cb[ring->next_to_clean].refill = 0;
ring->next_to_clean += 1;
if (unlikely(ring->next_to_clean == ring->desc_num))
@@ -4170,6 +4180,7 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
{
#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
int unused_count = hns3_desc_unused(ring);
+ bool failure = false;
int recv_pkts = 0;
int err;
@@ -4178,9 +4189,9 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
while (recv_pkts < budget) {
/* Reuse or realloc buffers */
if (unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
- hns3_nic_alloc_rx_buffers(ring, unused_count);
- unused_count = hns3_desc_unused(ring) -
- ring->pending_buf;
+ failure = failure ||
+ hns3_nic_alloc_rx_buffers(ring, unused_count);
+ unused_count = 0;
}
/* Poll one pkt */
@@ -4199,11 +4210,7 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
}
out:
- /* Make all data has been write before submit */
- if (unused_count > 0)
- hns3_nic_alloc_rx_buffers(ring, unused_count);
-
- return recv_pkts;
+ return failure ? budget : recv_pkts;
}
static void hns3_update_rx_int_coalesce(struct hns3_enet_tqp_vector *tqp_vector)
@@ -4933,7 +4940,7 @@ static int hns3_init_mac_addr(struct net_device *netdev)
dev_warn(priv->dev, "using random MAC address %pM\n",
netdev->dev_addr);
} else if (!ether_addr_equal(netdev->dev_addr, mac_addr_temp)) {
- ether_addr_copy(netdev->dev_addr, mac_addr_temp);
+ eth_hw_addr_set(netdev, mac_addr_temp);
ether_addr_copy(netdev->perm_addr, mac_addr_temp);
} else {
return 0;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index 6162d9f88e37..1715c98d906d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -186,17 +186,16 @@ enum hns3_nic_state {
#define HNS3_MAX_BD_SIZE 65535
#define HNS3_MAX_TSO_BD_NUM 63U
-#define HNS3_MAX_TSO_SIZE \
- (HNS3_MAX_BD_SIZE * HNS3_MAX_TSO_BD_NUM)
-
-#define HNS3_MAX_NON_TSO_SIZE(max_non_tso_bd_num) \
- (HNS3_MAX_BD_SIZE * (max_non_tso_bd_num))
+#define HNS3_MAX_TSO_SIZE 1048576U
+#define HNS3_MAX_NON_TSO_SIZE 9728U
+#define HNS3_VECTOR_GL_MASK GENMASK(11, 0)
#define HNS3_VECTOR_GL0_OFFSET 0x100
#define HNS3_VECTOR_GL1_OFFSET 0x200
#define HNS3_VECTOR_GL2_OFFSET 0x300
#define HNS3_VECTOR_RL_OFFSET 0x900
#define HNS3_VECTOR_RL_EN_B 6
+#define HNS3_VECTOR_QL_MASK GENMASK(9, 0)
#define HNS3_VECTOR_TX_QL_OFFSET 0xe00
#define HNS3_VECTOR_RX_QL_OFFSET 0xf00
@@ -332,6 +331,7 @@ struct hns3_desc_cb {
u32 length; /* length of the buffer */
u16 reuse_flag;
+ u16 refill;
/* desc type, used by the ring user to mark the type of the priv data */
u16 type;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
index 9c2eeaa82294..c327df9dbac4 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
@@ -482,6 +482,7 @@ static int hclge_firmware_compat_config(struct hclge_dev *hdev, bool en)
hnae3_set_bit(compat, HCLGE_NCSI_ERROR_REPORT_EN_B, 1);
if (hnae3_dev_phy_imp_supported(hdev))
hnae3_set_bit(compat, HCLGE_PHY_IMP_EN_B, 1);
+ hnae3_set_bit(compat, HCLGE_MAC_STATS_EXT_EN_B, 1);
req->compat = cpu_to_le32(compat);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
index 33244472e0d0..c38b57fc6c6a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -1150,6 +1150,7 @@ struct hclge_query_ppu_pf_other_int_dfx_cmd {
#define HCLGE_LINK_EVENT_REPORT_EN_B 0
#define HCLGE_NCSI_ERROR_REPORT_EN_B 1
#define HCLGE_PHY_IMP_EN_B 2
+#define HCLGE_MAC_STATS_EXT_EN_B 3
struct hclge_firmware_compat_cmd {
__le32 compat;
u8 rsv[20];
@@ -1188,7 +1189,10 @@ struct hclge_dev_specs_1_cmd {
__le16 max_frm_size;
__le16 max_qset_num;
__le16 max_int_gl;
- u8 rsv1[18];
+ u8 rsv0[2];
+ __le16 umv_size;
+ __le16 mc_mac_size;
+ u8 rsv1[12];
};
/* mac speed type defined in firmware command */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
index 307c9e830510..91cb578f56b8 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
@@ -137,6 +137,15 @@ static int hclge_ets_sch_mode_validate(struct hclge_dev *hdev,
*changed = true;
break;
case IEEE_8021QAZ_TSA_ETS:
+ /* The hardware will switch to sp mode if bandwidth is
+ * 0, so limit ets bandwidth must be greater than 0.
+ */
+ if (!ets->tc_tx_bw[i]) {
+ dev_err(&hdev->pdev->dev,
+ "tc%u ets bw cannot be 0\n", i);
+ return -EINVAL;
+ }
+
if (hdev->tm_info.tc_info[i].tc_sch_mode !=
HCLGE_SCH_MODE_DWRR)
*changed = true;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index 32f62cd2dd99..4e0a8c2f7c05 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -391,7 +391,7 @@ static int hclge_dbg_dump_mac(struct hclge_dev *hdev, char *buf, int len)
static int hclge_dbg_dump_dcb_qset(struct hclge_dev *hdev, char *buf, int len,
int *pos)
{
- struct hclge_dbg_bitmap_cmd *bitmap;
+ struct hclge_dbg_bitmap_cmd req;
struct hclge_desc desc;
u16 qset_id, qset_num;
int ret;
@@ -408,12 +408,12 @@ static int hclge_dbg_dump_dcb_qset(struct hclge_dev *hdev, char *buf, int len,
if (ret)
return ret;
- bitmap = (struct hclge_dbg_bitmap_cmd *)&desc.data[1];
+ req.bitmap = (u8)le32_to_cpu(desc.data[1]);
*pos += scnprintf(buf + *pos, len - *pos,
"%04u %#x %#x %#x %#x\n",
- qset_id, bitmap->bit0, bitmap->bit1,
- bitmap->bit2, bitmap->bit3);
+ qset_id, req.bit0, req.bit1, req.bit2,
+ req.bit3);
}
return 0;
@@ -422,7 +422,7 @@ static int hclge_dbg_dump_dcb_qset(struct hclge_dev *hdev, char *buf, int len,
static int hclge_dbg_dump_dcb_pri(struct hclge_dev *hdev, char *buf, int len,
int *pos)
{
- struct hclge_dbg_bitmap_cmd *bitmap;
+ struct hclge_dbg_bitmap_cmd req;
struct hclge_desc desc;
u8 pri_id, pri_num;
int ret;
@@ -439,12 +439,11 @@ static int hclge_dbg_dump_dcb_pri(struct hclge_dev *hdev, char *buf, int len,
if (ret)
return ret;
- bitmap = (struct hclge_dbg_bitmap_cmd *)&desc.data[1];
+ req.bitmap = (u8)le32_to_cpu(desc.data[1]);
*pos += scnprintf(buf + *pos, len - *pos,
"%03u %#x %#x %#x\n",
- pri_id, bitmap->bit0, bitmap->bit1,
- bitmap->bit2);
+ pri_id, req.bit0, req.bit1, req.bit2);
}
return 0;
@@ -453,7 +452,7 @@ static int hclge_dbg_dump_dcb_pri(struct hclge_dev *hdev, char *buf, int len,
static int hclge_dbg_dump_dcb_pg(struct hclge_dev *hdev, char *buf, int len,
int *pos)
{
- struct hclge_dbg_bitmap_cmd *bitmap;
+ struct hclge_dbg_bitmap_cmd req;
struct hclge_desc desc;
u8 pg_id;
int ret;
@@ -466,12 +465,11 @@ static int hclge_dbg_dump_dcb_pg(struct hclge_dev *hdev, char *buf, int len,
if (ret)
return ret;
- bitmap = (struct hclge_dbg_bitmap_cmd *)&desc.data[1];
+ req.bitmap = (u8)le32_to_cpu(desc.data[1]);
*pos += scnprintf(buf + *pos, len - *pos,
"%03u %#x %#x %#x\n",
- pg_id, bitmap->bit0, bitmap->bit1,
- bitmap->bit2);
+ pg_id, req.bit0, req.bit1, req.bit2);
}
return 0;
@@ -511,7 +509,7 @@ static int hclge_dbg_dump_dcb_queue(struct hclge_dev *hdev, char *buf, int len,
static int hclge_dbg_dump_dcb_port(struct hclge_dev *hdev, char *buf, int len,
int *pos)
{
- struct hclge_dbg_bitmap_cmd *bitmap;
+ struct hclge_dbg_bitmap_cmd req;
struct hclge_desc desc;
u8 port_id = 0;
int ret;
@@ -521,12 +519,12 @@ static int hclge_dbg_dump_dcb_port(struct hclge_dev *hdev, char *buf, int len,
if (ret)
return ret;
- bitmap = (struct hclge_dbg_bitmap_cmd *)&desc.data[1];
+ req.bitmap = (u8)le32_to_cpu(desc.data[1]);
*pos += scnprintf(buf + *pos, len - *pos, "port_mask: %#x\n",
- bitmap->bit0);
+ req.bit0);
*pos += scnprintf(buf + *pos, len - *pos, "port_shaping_pass: %#x\n",
- bitmap->bit1);
+ req.bit1);
return 0;
}
@@ -1992,6 +1990,9 @@ static int hclge_dbg_dump_umv_info(struct hclge_dev *hdev, char *buf, int len)
}
mutex_unlock(&hdev->vport_lock);
+ pos += scnprintf(buf + pos, len - pos, "used_mc_mac_num : %u\n",
+ hdev->used_mc_mac_num);
+
return 0;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c
index e4aad695abcc..4c441e6a5082 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c
@@ -109,7 +109,6 @@ int hclge_devlink_init(struct hclge_dev *hdev)
struct pci_dev *pdev = hdev->pdev;
struct hclge_devlink_priv *priv;
struct devlink *devlink;
- int ret;
devlink = devlink_alloc(&hclge_devlink_ops,
sizeof(struct hclge_devlink_priv), &pdev->dev);
@@ -120,28 +119,15 @@ int hclge_devlink_init(struct hclge_dev *hdev)
priv->hdev = hdev;
hdev->devlink = devlink;
- ret = devlink_register(devlink);
- if (ret) {
- dev_err(&pdev->dev, "failed to register devlink, ret = %d\n",
- ret);
- goto out_reg_fail;
- }
-
- devlink_reload_enable(devlink);
-
+ devlink_set_features(devlink, DEVLINK_F_RELOAD);
+ devlink_register(devlink);
return 0;
-
-out_reg_fail:
- devlink_free(devlink);
- return ret;
}
void hclge_devlink_uninit(struct hclge_dev *hdev)
{
struct devlink *devlink = hdev->devlink;
- devlink_reload_disable(devlink);
-
devlink_unregister(devlink);
devlink_free(devlink);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
index bb9b026ae88e..20e628c2bd44 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
@@ -1243,6 +1243,9 @@ static const struct hclge_hw_module_id hclge_hw_module_id_st[] = {
.module_id = MODULE_MASTER,
.msg = "MODULE_MASTER"
}, {
+ .module_id = MODULE_HIMAC,
+ .msg = "MODULE_HIMAC"
+ }, {
.module_id = MODULE_ROCEE_TOP,
.msg = "MODULE_ROCEE_TOP"
}, {
@@ -1316,12 +1319,21 @@ static const struct hclge_hw_type_id hclge_hw_type_id_st[] = {
.type_id = GLB_ERROR,
.msg = "glb_error"
}, {
+ .type_id = LINK_ERROR,
+ .msg = "link_error"
+ }, {
+ .type_id = PTP_ERROR,
+ .msg = "ptp_error"
+ }, {
.type_id = ROCEE_NORMAL_ERR,
.msg = "rocee_normal_error"
}, {
.type_id = ROCEE_OVF_ERR,
.msg = "rocee_ovf_error"
- }
+ }, {
+ .type_id = ROCEE_BUS_ERR,
+ .msg = "rocee_bus_error"
+ },
};
static void hclge_log_error(struct device *dev, char *reg,
@@ -1560,8 +1572,11 @@ static int hclge_config_tm_hw_err_int(struct hclge_dev *hdev, bool en)
/* configure TM QCN hw errors */
hclge_cmd_setup_basic_desc(&desc, HCLGE_TM_QCN_MEM_INT_CFG, false);
- if (en)
+ desc.data[0] = cpu_to_le32(HCLGE_TM_QCN_ERR_INT_TYPE);
+ if (en) {
+ desc.data[0] |= cpu_to_le32(HCLGE_TM_QCN_FIFO_INT_EN);
desc.data[1] = cpu_to_le32(HCLGE_TM_QCN_MEM_ERR_INT_EN);
+ }
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
index 07987fb8332e..86be6fb32990 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
@@ -50,6 +50,8 @@
#define HCLGE_PPP_MPF_ECC_ERR_INT3_EN 0x003F
#define HCLGE_PPP_MPF_ECC_ERR_INT3_EN_MASK 0x003F
#define HCLGE_TM_SCH_ECC_ERR_INT_EN 0x3
+#define HCLGE_TM_QCN_ERR_INT_TYPE 0x29
+#define HCLGE_TM_QCN_FIFO_INT_EN 0xFFFF00
#define HCLGE_TM_QCN_MEM_ERR_INT_EN 0xFFFFFF
#define HCLGE_NCSI_ERR_INT_EN 0x3
#define HCLGE_NCSI_ERR_INT_TYPE 0x9
@@ -136,6 +138,7 @@ enum hclge_mod_name_list {
MODULE_RCB_TX = 12,
MODULE_TXDMA = 13,
MODULE_MASTER = 14,
+ MODULE_HIMAC = 15,
/* add new MODULE NAME for NIC here in order */
MODULE_ROCEE_TOP = 40,
MODULE_ROCEE_TIMER = 41,
@@ -164,9 +167,12 @@ enum hclge_err_type_list {
ETS_ERROR = 10,
NCSI_ERROR = 11,
GLB_ERROR = 12,
+ LINK_ERROR = 13,
+ PTP_ERROR = 14,
/* add new ERROR TYPE for NIC here in order */
ROCEE_NORMAL_ERR = 40,
ROCEE_OVF_ERR = 41,
+ ROCEE_BUS_ERR = 42,
/* add new ERROR TYPE for ROCEE here in order */
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index f5b8d1fee0f1..2e41aa2d1df8 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -156,174 +156,210 @@ static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
};
static const struct hclge_comm_stats_str g_mac_stats_string[] = {
- {"mac_tx_mac_pause_num",
+ {"mac_tx_mac_pause_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
- {"mac_rx_mac_pause_num",
+ {"mac_rx_mac_pause_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
- {"mac_tx_control_pkt_num",
+ {"mac_tx_pause_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pause_xoff_time)},
+ {"mac_rx_pause_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pause_xoff_time)},
+ {"mac_tx_control_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
- {"mac_rx_control_pkt_num",
+ {"mac_rx_control_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
- {"mac_tx_pfc_pkt_num",
+ {"mac_tx_pfc_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
- {"mac_tx_pfc_pri0_pkt_num",
+ {"mac_tx_pfc_pri0_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
- {"mac_tx_pfc_pri1_pkt_num",
+ {"mac_tx_pfc_pri1_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
- {"mac_tx_pfc_pri2_pkt_num",
+ {"mac_tx_pfc_pri2_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
- {"mac_tx_pfc_pri3_pkt_num",
+ {"mac_tx_pfc_pri3_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
- {"mac_tx_pfc_pri4_pkt_num",
+ {"mac_tx_pfc_pri4_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
- {"mac_tx_pfc_pri5_pkt_num",
+ {"mac_tx_pfc_pri5_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
- {"mac_tx_pfc_pri6_pkt_num",
+ {"mac_tx_pfc_pri6_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
- {"mac_tx_pfc_pri7_pkt_num",
+ {"mac_tx_pfc_pri7_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
- {"mac_rx_pfc_pkt_num",
+ {"mac_tx_pfc_pri0_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_xoff_time)},
+ {"mac_tx_pfc_pri1_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_xoff_time)},
+ {"mac_tx_pfc_pri2_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_xoff_time)},
+ {"mac_tx_pfc_pri3_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_xoff_time)},
+ {"mac_tx_pfc_pri4_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_xoff_time)},
+ {"mac_tx_pfc_pri5_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_xoff_time)},
+ {"mac_tx_pfc_pri6_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_xoff_time)},
+ {"mac_tx_pfc_pri7_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_xoff_time)},
+ {"mac_rx_pfc_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
- {"mac_rx_pfc_pri0_pkt_num",
+ {"mac_rx_pfc_pri0_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
- {"mac_rx_pfc_pri1_pkt_num",
+ {"mac_rx_pfc_pri1_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
- {"mac_rx_pfc_pri2_pkt_num",
+ {"mac_rx_pfc_pri2_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
- {"mac_rx_pfc_pri3_pkt_num",
+ {"mac_rx_pfc_pri3_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
- {"mac_rx_pfc_pri4_pkt_num",
+ {"mac_rx_pfc_pri4_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
- {"mac_rx_pfc_pri5_pkt_num",
+ {"mac_rx_pfc_pri5_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
- {"mac_rx_pfc_pri6_pkt_num",
+ {"mac_rx_pfc_pri6_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
- {"mac_rx_pfc_pri7_pkt_num",
+ {"mac_rx_pfc_pri7_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
- {"mac_tx_total_pkt_num",
+ {"mac_rx_pfc_pri0_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_xoff_time)},
+ {"mac_rx_pfc_pri1_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_xoff_time)},
+ {"mac_rx_pfc_pri2_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_xoff_time)},
+ {"mac_rx_pfc_pri3_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_xoff_time)},
+ {"mac_rx_pfc_pri4_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_xoff_time)},
+ {"mac_rx_pfc_pri5_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_xoff_time)},
+ {"mac_rx_pfc_pri6_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_xoff_time)},
+ {"mac_rx_pfc_pri7_xoff_time", HCLGE_MAC_STATS_MAX_NUM_V2,
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_xoff_time)},
+ {"mac_tx_total_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
- {"mac_tx_total_oct_num",
+ {"mac_tx_total_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
- {"mac_tx_good_pkt_num",
+ {"mac_tx_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
- {"mac_tx_bad_pkt_num",
+ {"mac_tx_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
- {"mac_tx_good_oct_num",
+ {"mac_tx_good_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
- {"mac_tx_bad_oct_num",
+ {"mac_tx_bad_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
- {"mac_tx_uni_pkt_num",
+ {"mac_tx_uni_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
- {"mac_tx_multi_pkt_num",
+ {"mac_tx_multi_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
- {"mac_tx_broad_pkt_num",
+ {"mac_tx_broad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
- {"mac_tx_undersize_pkt_num",
+ {"mac_tx_undersize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
- {"mac_tx_oversize_pkt_num",
+ {"mac_tx_oversize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
- {"mac_tx_64_oct_pkt_num",
+ {"mac_tx_64_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
- {"mac_tx_65_127_oct_pkt_num",
+ {"mac_tx_65_127_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
- {"mac_tx_128_255_oct_pkt_num",
+ {"mac_tx_128_255_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
- {"mac_tx_256_511_oct_pkt_num",
+ {"mac_tx_256_511_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
- {"mac_tx_512_1023_oct_pkt_num",
+ {"mac_tx_512_1023_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
- {"mac_tx_1024_1518_oct_pkt_num",
+ {"mac_tx_1024_1518_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
- {"mac_tx_1519_2047_oct_pkt_num",
+ {"mac_tx_1519_2047_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
- {"mac_tx_2048_4095_oct_pkt_num",
+ {"mac_tx_2048_4095_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
- {"mac_tx_4096_8191_oct_pkt_num",
+ {"mac_tx_4096_8191_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
- {"mac_tx_8192_9216_oct_pkt_num",
+ {"mac_tx_8192_9216_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
- {"mac_tx_9217_12287_oct_pkt_num",
+ {"mac_tx_9217_12287_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
- {"mac_tx_12288_16383_oct_pkt_num",
+ {"mac_tx_12288_16383_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
- {"mac_tx_1519_max_good_pkt_num",
+ {"mac_tx_1519_max_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
- {"mac_tx_1519_max_bad_pkt_num",
+ {"mac_tx_1519_max_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
- {"mac_rx_total_pkt_num",
+ {"mac_rx_total_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
- {"mac_rx_total_oct_num",
+ {"mac_rx_total_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
- {"mac_rx_good_pkt_num",
+ {"mac_rx_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
- {"mac_rx_bad_pkt_num",
+ {"mac_rx_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
- {"mac_rx_good_oct_num",
+ {"mac_rx_good_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
- {"mac_rx_bad_oct_num",
+ {"mac_rx_bad_oct_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
- {"mac_rx_uni_pkt_num",
+ {"mac_rx_uni_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
- {"mac_rx_multi_pkt_num",
+ {"mac_rx_multi_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
- {"mac_rx_broad_pkt_num",
+ {"mac_rx_broad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
- {"mac_rx_undersize_pkt_num",
+ {"mac_rx_undersize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
- {"mac_rx_oversize_pkt_num",
+ {"mac_rx_oversize_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
- {"mac_rx_64_oct_pkt_num",
+ {"mac_rx_64_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
- {"mac_rx_65_127_oct_pkt_num",
+ {"mac_rx_65_127_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
- {"mac_rx_128_255_oct_pkt_num",
+ {"mac_rx_128_255_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
- {"mac_rx_256_511_oct_pkt_num",
+ {"mac_rx_256_511_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
- {"mac_rx_512_1023_oct_pkt_num",
+ {"mac_rx_512_1023_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
- {"mac_rx_1024_1518_oct_pkt_num",
+ {"mac_rx_1024_1518_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
- {"mac_rx_1519_2047_oct_pkt_num",
+ {"mac_rx_1519_2047_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
- {"mac_rx_2048_4095_oct_pkt_num",
+ {"mac_rx_2048_4095_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
- {"mac_rx_4096_8191_oct_pkt_num",
+ {"mac_rx_4096_8191_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
- {"mac_rx_8192_9216_oct_pkt_num",
+ {"mac_rx_8192_9216_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
- {"mac_rx_9217_12287_oct_pkt_num",
+ {"mac_rx_9217_12287_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
- {"mac_rx_12288_16383_oct_pkt_num",
+ {"mac_rx_12288_16383_oct_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
- {"mac_rx_1519_max_good_pkt_num",
+ {"mac_rx_1519_max_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
- {"mac_rx_1519_max_bad_pkt_num",
+ {"mac_rx_1519_max_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
- {"mac_tx_fragment_pkt_num",
+ {"mac_tx_fragment_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
- {"mac_tx_undermin_pkt_num",
+ {"mac_tx_undermin_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
- {"mac_tx_jabber_pkt_num",
+ {"mac_tx_jabber_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
- {"mac_tx_err_all_pkt_num",
+ {"mac_tx_err_all_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
- {"mac_tx_from_app_good_pkt_num",
+ {"mac_tx_from_app_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
- {"mac_tx_from_app_bad_pkt_num",
+ {"mac_tx_from_app_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
- {"mac_rx_fragment_pkt_num",
+ {"mac_rx_fragment_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
- {"mac_rx_undermin_pkt_num",
+ {"mac_rx_undermin_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
- {"mac_rx_jabber_pkt_num",
+ {"mac_rx_jabber_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
- {"mac_rx_fcs_err_pkt_num",
+ {"mac_rx_fcs_err_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
- {"mac_rx_send_app_good_pkt_num",
+ {"mac_rx_send_app_good_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
- {"mac_rx_send_app_bad_pkt_num",
+ {"mac_rx_send_app_bad_pkt_num", HCLGE_MAC_STATS_MAX_NUM_V1,
HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
};
@@ -451,8 +487,9 @@ static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
u64 *data = (u64 *)(&hdev->mac_stats);
struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
__le64 *desc_data;
- int i, k, n;
+ u32 data_size;
int ret;
+ u32 i;
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
@@ -463,33 +500,37 @@ static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
return ret;
}
- for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
- /* for special opcode 0032, only the first desc has the head */
- if (unlikely(i == 0)) {
- desc_data = (__le64 *)(&desc[i].data[0]);
- n = HCLGE_RD_FIRST_STATS_NUM;
- } else {
- desc_data = (__le64 *)(&desc[i]);
- n = HCLGE_RD_OTHER_STATS_NUM;
- }
+ /* The first desc has a 64-bit header, so data size need to minus 1 */
+ data_size = sizeof(desc) / (sizeof(u64)) - 1;
- for (k = 0; k < n; k++) {
- *data += le64_to_cpu(*desc_data);
- data++;
- desc_data++;
- }
+ desc_data = (__le64 *)(&desc[0].data[0]);
+ for (i = 0; i < data_size; i++) {
+ /* data memory is continuous becase only the first desc has a
+ * header in this command
+ */
+ *data += le64_to_cpu(*desc_data);
+ data++;
+ desc_data++;
}
return 0;
}
-static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
+static int hclge_mac_update_stats_complete(struct hclge_dev *hdev)
{
+#define HCLGE_REG_NUM_PER_DESC 4
+
+ u32 reg_num = hdev->ae_dev->dev_specs.mac_stats_num;
u64 *data = (u64 *)(&hdev->mac_stats);
struct hclge_desc *desc;
__le64 *desc_data;
- u16 i, k, n;
+ u32 data_size;
+ u32 desc_num;
int ret;
+ u32 i;
+
+ /* The first desc has a 64-bit header, so need to consider it */
+ desc_num = reg_num / HCLGE_REG_NUM_PER_DESC + 1;
/* This may be called inside atomic sections,
* so GFP_ATOMIC is more suitalbe here
@@ -505,21 +546,16 @@ static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
return ret;
}
- for (i = 0; i < desc_num; i++) {
- /* for special opcode 0034, only the first desc has the head */
- if (i == 0) {
- desc_data = (__le64 *)(&desc[i].data[0]);
- n = HCLGE_RD_FIRST_STATS_NUM;
- } else {
- desc_data = (__le64 *)(&desc[i]);
- n = HCLGE_RD_OTHER_STATS_NUM;
- }
+ data_size = min_t(u32, sizeof(hdev->mac_stats) / sizeof(u64), reg_num);
- for (k = 0; k < n; k++) {
- *data += le64_to_cpu(*desc_data);
- data++;
- desc_data++;
- }
+ desc_data = (__le64 *)(&desc[0].data[0]);
+ for (i = 0; i < data_size; i++) {
+ /* data memory is continuous becase only the first desc has a
+ * header in this command
+ */
+ *data += le64_to_cpu(*desc_data);
+ data++;
+ desc_data++;
}
kfree(desc);
@@ -527,42 +563,37 @@ static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
return 0;
}
-static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
+static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *reg_num)
{
struct hclge_desc desc;
- __le32 *desc_data;
- u32 reg_num;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to query mac statistic reg number, ret = %d\n",
+ ret);
return ret;
+ }
- desc_data = (__le32 *)(&desc.data[0]);
- reg_num = le32_to_cpu(*desc_data);
-
- *desc_num = 1 + ((reg_num - 3) >> 2) +
- (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
+ *reg_num = le32_to_cpu(desc.data[0]);
+ if (*reg_num == 0) {
+ dev_err(&hdev->pdev->dev,
+ "mac statistic reg number is invalid!\n");
+ return -ENODATA;
+ }
return 0;
}
static int hclge_mac_update_stats(struct hclge_dev *hdev)
{
- u32 desc_num;
- int ret;
-
- ret = hclge_mac_query_reg_num(hdev, &desc_num);
/* The firmware supports the new statistics acquisition method */
- if (!ret)
- ret = hclge_mac_update_stats_complete(hdev, desc_num);
- else if (ret == -EOPNOTSUPP)
- ret = hclge_mac_update_stats_defective(hdev);
+ if (hdev->ae_dev->dev_specs.mac_stats_num)
+ return hclge_mac_update_stats_complete(hdev);
else
- dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
-
- return ret;
+ return hclge_mac_update_stats_defective(hdev);
}
static int hclge_tqps_update_stats(struct hnae3_handle *handle)
@@ -670,20 +701,39 @@ static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
return buff;
}
-static u64 *hclge_comm_get_stats(const void *comm_stats,
+static int hclge_comm_get_count(struct hclge_dev *hdev,
+ const struct hclge_comm_stats_str strs[],
+ u32 size)
+{
+ int count = 0;
+ u32 i;
+
+ for (i = 0; i < size; i++)
+ if (strs[i].stats_num <= hdev->ae_dev->dev_specs.mac_stats_num)
+ count++;
+
+ return count;
+}
+
+static u64 *hclge_comm_get_stats(struct hclge_dev *hdev,
const struct hclge_comm_stats_str strs[],
int size, u64 *data)
{
u64 *buf = data;
u32 i;
- for (i = 0; i < size; i++)
- buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
+ for (i = 0; i < size; i++) {
+ if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num)
+ continue;
+
+ *buf = HCLGE_STATS_READ(&hdev->mac_stats, strs[i].offset);
+ buf++;
+ }
- return buf + size;
+ return buf;
}
-static u8 *hclge_comm_get_strings(u32 stringset,
+static u8 *hclge_comm_get_strings(struct hclge_dev *hdev, u32 stringset,
const struct hclge_comm_stats_str strs[],
int size, u8 *data)
{
@@ -694,6 +744,9 @@ static u8 *hclge_comm_get_strings(u32 stringset,
return buff;
for (i = 0; i < size; i++) {
+ if (strs[i].stats_num > hdev->ae_dev->dev_specs.mac_stats_num)
+ continue;
+
snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
buff = buff + ETH_GSTRING_LEN;
}
@@ -785,7 +838,8 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
}
} else if (stringset == ETH_SS_STATS) {
- count = ARRAY_SIZE(g_mac_stats_string) +
+ count = hclge_comm_get_count(hdev, g_mac_stats_string,
+ ARRAY_SIZE(g_mac_stats_string)) +
hclge_tqps_get_sset_count(handle, stringset);
}
@@ -795,12 +849,14 @@ static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
u8 *data)
{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
u8 *p = (char *)data;
int size;
if (stringset == ETH_SS_STATS) {
size = ARRAY_SIZE(g_mac_stats_string);
- p = hclge_comm_get_strings(stringset, g_mac_stats_string,
+ p = hclge_comm_get_strings(hdev, stringset, g_mac_stats_string,
size, p);
p = hclge_tqps_get_strings(handle, p);
} else if (stringset == ETH_SS_TEST) {
@@ -834,7 +890,7 @@ static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
struct hclge_dev *hdev = vport->back;
u64 *p;
- p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
+ p = hclge_comm_get_stats(hdev, g_mac_stats_string,
ARRAY_SIZE(g_mac_stats_string), data);
p = hclge_tqps_get_stats(handle, p);
}
@@ -1037,96 +1093,100 @@ static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
return -EINVAL;
}
-static void hclge_convert_setting_sr(struct hclge_mac *mac, u16 speed_ability)
+static void hclge_convert_setting_sr(u16 speed_ability,
+ unsigned long *link_mode)
{
if (speed_ability & HCLGE_SUPPORT_10G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_25G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_40G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_50G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_100G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_200G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
- mac->supported);
+ link_mode);
}
-static void hclge_convert_setting_lr(struct hclge_mac *mac, u16 speed_ability)
+static void hclge_convert_setting_lr(u16 speed_ability,
+ unsigned long *link_mode)
{
if (speed_ability & HCLGE_SUPPORT_10G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_25G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_50G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_40G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_100G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_200G_BIT)
linkmode_set_bit(
ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
- mac->supported);
+ link_mode);
}
-static void hclge_convert_setting_cr(struct hclge_mac *mac, u16 speed_ability)
+static void hclge_convert_setting_cr(u16 speed_ability,
+ unsigned long *link_mode)
{
if (speed_ability & HCLGE_SUPPORT_10G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_25G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_40G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_50G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_100G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_200G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
- mac->supported);
+ link_mode);
}
-static void hclge_convert_setting_kr(struct hclge_mac *mac, u16 speed_ability)
+static void hclge_convert_setting_kr(u16 speed_ability,
+ unsigned long *link_mode)
{
if (speed_ability & HCLGE_SUPPORT_1G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_10G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_25G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_40G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_50G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_100G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
- mac->supported);
+ link_mode);
if (speed_ability & HCLGE_SUPPORT_200G_BIT)
linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
- mac->supported);
+ link_mode);
}
static void hclge_convert_setting_fec(struct hclge_mac *mac)
@@ -1170,9 +1230,9 @@ static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
mac->supported);
- hclge_convert_setting_sr(mac, speed_ability);
- hclge_convert_setting_lr(mac, speed_ability);
- hclge_convert_setting_cr(mac, speed_ability);
+ hclge_convert_setting_sr(speed_ability, mac->supported);
+ hclge_convert_setting_lr(speed_ability, mac->supported);
+ hclge_convert_setting_cr(speed_ability, mac->supported);
if (hnae3_dev_fec_supported(hdev))
hclge_convert_setting_fec(mac);
@@ -1188,7 +1248,7 @@ static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
{
struct hclge_mac *mac = &hdev->hw.mac;
- hclge_convert_setting_kr(mac, speed_ability);
+ hclge_convert_setting_kr(speed_ability, mac->supported);
if (hnae3_dev_fec_supported(hdev))
hclge_convert_setting_fec(mac);
@@ -1342,8 +1402,6 @@ static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
HCLGE_CFG_UMV_TBL_SPACE_M,
HCLGE_CFG_UMV_TBL_SPACE_S);
- if (!cfg->umv_space)
- cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
cfg->pf_rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[2]),
HCLGE_CFG_PF_RSS_SIZE_M,
@@ -1419,6 +1477,7 @@ static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
ae_dev->dev_specs.max_int_gl = HCLGE_DEF_MAX_INT_GL;
ae_dev->dev_specs.max_frm_size = HCLGE_MAC_MAX_FRAME;
ae_dev->dev_specs.max_qset_num = HCLGE_MAX_QSET_NUM;
+ ae_dev->dev_specs.umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
}
static void hclge_parse_dev_specs(struct hclge_dev *hdev,
@@ -1440,6 +1499,8 @@ static void hclge_parse_dev_specs(struct hclge_dev *hdev,
ae_dev->dev_specs.max_qset_num = le16_to_cpu(req1->max_qset_num);
ae_dev->dev_specs.max_int_gl = le16_to_cpu(req1->max_int_gl);
ae_dev->dev_specs.max_frm_size = le16_to_cpu(req1->max_frm_size);
+ ae_dev->dev_specs.umv_size = le16_to_cpu(req1->umv_size);
+ ae_dev->dev_specs.mc_mac_size = le16_to_cpu(req1->mc_mac_size);
}
static void hclge_check_dev_specs(struct hclge_dev *hdev)
@@ -1460,6 +1521,21 @@ static void hclge_check_dev_specs(struct hclge_dev *hdev)
dev_specs->max_int_gl = HCLGE_DEF_MAX_INT_GL;
if (!dev_specs->max_frm_size)
dev_specs->max_frm_size = HCLGE_MAC_MAX_FRAME;
+ if (!dev_specs->umv_size)
+ dev_specs->umv_size = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
+}
+
+static int hclge_query_mac_stats_num(struct hclge_dev *hdev)
+{
+ u32 reg_num = 0;
+ int ret;
+
+ ret = hclge_mac_query_reg_num(hdev, &reg_num);
+ if (ret && ret != -EOPNOTSUPP)
+ return ret;
+
+ hdev->ae_dev->dev_specs.mac_stats_num = reg_num;
+ return 0;
}
static int hclge_query_dev_specs(struct hclge_dev *hdev)
@@ -1468,6 +1544,10 @@ static int hclge_query_dev_specs(struct hclge_dev *hdev)
int ret;
int i;
+ ret = hclge_query_mac_stats_num(hdev);
+ if (ret)
+ return ret;
+
/* set default specifications as devices lower than version V3 do not
* support querying specifications from firmware.
*/
@@ -1549,7 +1629,10 @@ static int hclge_configure(struct hclge_dev *hdev)
hdev->tm_info.num_pg = 1;
hdev->tc_max = cfg.tc_num;
hdev->tm_info.hw_pfc_map = 0;
- hdev->wanted_umv_size = cfg.umv_space;
+ if (cfg.umv_space)
+ hdev->wanted_umv_size = cfg.umv_space;
+ else
+ hdev->wanted_umv_size = hdev->ae_dev->dev_specs.umv_size;
hdev->tx_spare_buf_size = cfg.tx_spare_buf_size;
hdev->gro_en = true;
if (cfg.vlan_fliter_cap == HCLGE_VLAN_FLTR_CAN_MDF)
@@ -2847,33 +2930,29 @@ static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
{
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
- mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
- hclge_wq, &hdev->service_task, 0);
+ mod_delayed_work(hclge_wq, &hdev->service_task, 0);
}
static void hclge_reset_task_schedule(struct hclge_dev *hdev)
{
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
+ test_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state) &&
!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
- mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
- hclge_wq, &hdev->service_task, 0);
+ mod_delayed_work(hclge_wq, &hdev->service_task, 0);
}
static void hclge_errhand_task_schedule(struct hclge_dev *hdev)
{
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
!test_and_set_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
- mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
- hclge_wq, &hdev->service_task, 0);
+ mod_delayed_work(hclge_wq, &hdev->service_task, 0);
}
void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
{
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
!test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
- mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
- hclge_wq, &hdev->service_task,
- delay_time);
+ mod_delayed_work(hclge_wq, &hdev->service_task, delay_time);
}
static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
@@ -2968,6 +3047,82 @@ static void hclge_update_link_status(struct hclge_dev *hdev)
clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
}
+static void hclge_update_speed_advertising(struct hclge_mac *mac)
+{
+ u32 speed_ability;
+
+ if (hclge_get_speed_bit(mac->speed, &speed_ability))
+ return;
+
+ switch (mac->module_type) {
+ case HNAE3_MODULE_TYPE_FIBRE_LR:
+ hclge_convert_setting_lr(speed_ability, mac->advertising);
+ break;
+ case HNAE3_MODULE_TYPE_FIBRE_SR:
+ case HNAE3_MODULE_TYPE_AOC:
+ hclge_convert_setting_sr(speed_ability, mac->advertising);
+ break;
+ case HNAE3_MODULE_TYPE_CR:
+ hclge_convert_setting_cr(speed_ability, mac->advertising);
+ break;
+ case HNAE3_MODULE_TYPE_KR:
+ hclge_convert_setting_kr(speed_ability, mac->advertising);
+ break;
+ default:
+ break;
+ }
+}
+
+static void hclge_update_fec_advertising(struct hclge_mac *mac)
+{
+ if (mac->fec_mode & BIT(HNAE3_FEC_RS))
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
+ mac->advertising);
+ else if (mac->fec_mode & BIT(HNAE3_FEC_BASER))
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
+ mac->advertising);
+ else
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT,
+ mac->advertising);
+}
+
+static void hclge_update_pause_advertising(struct hclge_dev *hdev)
+{
+ struct hclge_mac *mac = &hdev->hw.mac;
+ bool rx_en, tx_en;
+
+ switch (hdev->fc_mode_last_time) {
+ case HCLGE_FC_RX_PAUSE:
+ rx_en = true;
+ tx_en = false;
+ break;
+ case HCLGE_FC_TX_PAUSE:
+ rx_en = false;
+ tx_en = true;
+ break;
+ case HCLGE_FC_FULL:
+ rx_en = true;
+ tx_en = true;
+ break;
+ default:
+ rx_en = false;
+ tx_en = false;
+ break;
+ }
+
+ linkmode_set_pause(mac->advertising, tx_en, rx_en);
+}
+
+static void hclge_update_advertising(struct hclge_dev *hdev)
+{
+ struct hclge_mac *mac = &hdev->hw.mac;
+
+ linkmode_zero(mac->advertising);
+ hclge_update_speed_advertising(mac);
+ hclge_update_fec_advertising(mac);
+ hclge_update_pause_advertising(hdev);
+}
+
static void hclge_update_port_capability(struct hclge_dev *hdev,
struct hclge_mac *mac)
{
@@ -2990,7 +3145,7 @@ static void hclge_update_port_capability(struct hclge_dev *hdev,
} else {
linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
mac->supported);
- linkmode_zero(mac->advertising);
+ hclge_update_advertising(hdev);
}
}
@@ -3491,33 +3646,14 @@ static void hclge_get_misc_vector(struct hclge_dev *hdev)
hdev->num_msi_used += 1;
}
-static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
- const cpumask_t *mask)
-{
- struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
- affinity_notify);
-
- cpumask_copy(&hdev->affinity_mask, mask);
-}
-
-static void hclge_irq_affinity_release(struct kref *ref)
-{
-}
-
static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
{
irq_set_affinity_hint(hdev->misc_vector.vector_irq,
&hdev->affinity_mask);
-
- hdev->affinity_notify.notify = hclge_irq_affinity_notify;
- hdev->affinity_notify.release = hclge_irq_affinity_release;
- irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
- &hdev->affinity_notify);
}
static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
{
- irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
}
@@ -8498,6 +8634,9 @@ static int hclge_init_umv_space(struct hclge_dev *hdev)
hdev->share_umv_size = hdev->priv_umv_size +
hdev->max_umv_size % (hdev->num_alloc_vport + 1);
+ if (hdev->ae_dev->dev_specs.mc_mac_size)
+ set_bit(HNAE3_DEV_SUPPORT_MC_MAC_MNG_B, hdev->ae_dev->caps);
+
return 0;
}
@@ -8515,6 +8654,8 @@ static void hclge_reset_umv_space(struct hclge_dev *hdev)
hdev->share_umv_size = hdev->priv_umv_size +
hdev->max_umv_size % (hdev->num_alloc_vport + 1);
mutex_unlock(&hdev->vport_lock);
+
+ hdev->used_mc_mac_num = 0;
}
static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
@@ -8769,6 +8910,7 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport,
struct hclge_dev *hdev = vport->back;
struct hclge_mac_vlan_tbl_entry_cmd req;
struct hclge_desc desc[3];
+ bool is_new_addr = false;
int status;
/* mac addr check */
@@ -8782,6 +8924,13 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport,
hclge_prepare_mac_addr(&req, addr, true);
status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
if (status) {
+ if (hnae3_ae_dev_mc_mac_mng_supported(hdev->ae_dev) &&
+ hdev->used_mc_mac_num >=
+ hdev->ae_dev->dev_specs.mc_mac_size)
+ goto err_no_space;
+
+ is_new_addr = true;
+
/* This mac addr do not exist, add new entry for it */
memset(desc[0].data, 0, sizeof(desc[0].data));
memset(desc[1].data, 0, sizeof(desc[0].data));
@@ -8791,12 +8940,18 @@ int hclge_add_mc_addr_common(struct hclge_vport *vport,
if (status)
return status;
status = hclge_add_mac_vlan_tbl(vport, &req, desc);
- /* if already overflow, not to print each time */
- if (status == -ENOSPC &&
- !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
- dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
+ if (status == -ENOSPC)
+ goto err_no_space;
+ else if (!status && is_new_addr)
+ hdev->used_mc_mac_num++;
return status;
+
+err_no_space:
+ /* if already overflow, not to print each time */
+ if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
+ dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
+ return -ENOSPC;
}
static int hclge_rm_mc_addr(struct hnae3_handle *handle,
@@ -8833,12 +8988,15 @@ int hclge_rm_mc_addr_common(struct hclge_vport *vport,
if (status)
return status;
- if (hclge_is_all_function_id_zero(desc))
+ if (hclge_is_all_function_id_zero(desc)) {
/* All the vfid is zero, so need to delete this entry */
status = hclge_remove_mac_vlan_tbl(vport, &req);
- else
+ if (!status)
+ hdev->used_mc_mac_num--;
+ } else {
/* Not all the vfid is zero, update the vfid */
status = hclge_add_mac_vlan_tbl(vport, &req, desc);
+ }
} else if (status == -ENOENT) {
status = 0;
}
@@ -9414,7 +9572,7 @@ int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
return 0;
}
-static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
+static int hclge_set_mac_addr(struct hnae3_handle *handle, const void *p,
bool is_first)
{
const unsigned char *new_addr = (const unsigned char *)p;
@@ -13052,7 +13210,7 @@ static int hclge_init(void)
{
pr_info("%s is initializing\n", HCLGE_NAME);
- hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
+ hclge_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGE_NAME);
if (!hclge_wq) {
pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
return -ENOMEM;
@@ -13065,6 +13223,7 @@ static int hclge_init(void)
static void hclge_exit(void)
{
+ hnae3_unregister_ae_algo_prepare(&ae_algo);
hnae3_unregister_ae_algo(&ae_algo);
destroy_workqueue(hclge_wq);
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index de6afbcbfbac..9e1eede599ec 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -403,8 +403,13 @@ struct hclge_tm_info {
u8 pfc_en; /* PFC enabled or not for user priority */
};
+/* max number of mac statistics on each version */
+#define HCLGE_MAC_STATS_MAX_NUM_V1 84
+#define HCLGE_MAC_STATS_MAX_NUM_V2 105
+
struct hclge_comm_stats_str {
char desc[ETH_GSTRING_LEN];
+ u32 stats_num;
unsigned long offset;
};
@@ -412,6 +417,7 @@ struct hclge_comm_stats_str {
struct hclge_mac_stats {
u64 mac_tx_mac_pause_num;
u64 mac_rx_mac_pause_num;
+ u64 rsv0;
u64 mac_tx_pfc_pri0_pkt_num;
u64 mac_tx_pfc_pri1_pkt_num;
u64 mac_tx_pfc_pri2_pkt_num;
@@ -448,7 +454,7 @@ struct hclge_mac_stats {
u64 mac_tx_1519_2047_oct_pkt_num;
u64 mac_tx_2048_4095_oct_pkt_num;
u64 mac_tx_4096_8191_oct_pkt_num;
- u64 rsv0;
+ u64 rsv1;
u64 mac_tx_8192_9216_oct_pkt_num;
u64 mac_tx_9217_12287_oct_pkt_num;
u64 mac_tx_12288_16383_oct_pkt_num;
@@ -475,7 +481,7 @@ struct hclge_mac_stats {
u64 mac_rx_1519_2047_oct_pkt_num;
u64 mac_rx_2048_4095_oct_pkt_num;
u64 mac_rx_4096_8191_oct_pkt_num;
- u64 rsv1;
+ u64 rsv2;
u64 mac_rx_8192_9216_oct_pkt_num;
u64 mac_rx_9217_12287_oct_pkt_num;
u64 mac_rx_12288_16383_oct_pkt_num;
@@ -498,6 +504,28 @@ struct hclge_mac_stats {
u64 mac_rx_pfc_pause_pkt_num;
u64 mac_tx_ctrl_pkt_num;
u64 mac_rx_ctrl_pkt_num;
+
+ /* duration of pfc */
+ u64 mac_tx_pfc_pri0_xoff_time;
+ u64 mac_tx_pfc_pri1_xoff_time;
+ u64 mac_tx_pfc_pri2_xoff_time;
+ u64 mac_tx_pfc_pri3_xoff_time;
+ u64 mac_tx_pfc_pri4_xoff_time;
+ u64 mac_tx_pfc_pri5_xoff_time;
+ u64 mac_tx_pfc_pri6_xoff_time;
+ u64 mac_tx_pfc_pri7_xoff_time;
+ u64 mac_rx_pfc_pri0_xoff_time;
+ u64 mac_rx_pfc_pri1_xoff_time;
+ u64 mac_rx_pfc_pri2_xoff_time;
+ u64 mac_rx_pfc_pri3_xoff_time;
+ u64 mac_rx_pfc_pri4_xoff_time;
+ u64 mac_rx_pfc_pri5_xoff_time;
+ u64 mac_rx_pfc_pri6_xoff_time;
+ u64 mac_rx_pfc_pri7_xoff_time;
+
+ /* duration of pause */
+ u64 mac_tx_pause_xoff_time;
+ u64 mac_rx_pause_xoff_time;
};
#define HCLGE_STATS_TIMER_INTERVAL 300UL
@@ -938,13 +966,14 @@ struct hclge_dev {
u16 priv_umv_size;
/* unicast mac vlan space shared by PF and its VFs */
u16 share_umv_size;
+ /* multicast mac address number used by PF and its VFs */
+ u16 used_mc_mac_num;
DECLARE_KFIFO(mac_tnl_log, struct hclge_mac_tnl_stats,
HCLGE_MAC_TNL_LOG_SIZE);
/* affinity mask and notify for misc interrupt */
cpumask_t affinity_mask;
- struct irq_affinity_notify affinity_notify;
struct hclge_ptp *ptp;
struct devlink *devlink;
};
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index f314dbd3ce11..95074e91a846 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -752,6 +752,8 @@ static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
hdev->tm_info.pg_info[i].tc_bit_map = hdev->hw_tc_map;
for (k = 0; k < hdev->tm_info.num_tc; k++)
hdev->tm_info.pg_info[i].tc_dwrr[k] = BW_PERCENT;
+ for (; k < HNAE3_MAX_TC; k++)
+ hdev->tm_info.pg_info[i].tc_dwrr[k] = 0;
}
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c
index f478770299c6..fdc19868b818 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c
@@ -110,7 +110,6 @@ int hclgevf_devlink_init(struct hclgevf_dev *hdev)
struct pci_dev *pdev = hdev->pdev;
struct hclgevf_devlink_priv *priv;
struct devlink *devlink;
- int ret;
devlink =
devlink_alloc(&hclgevf_devlink_ops,
@@ -122,28 +121,15 @@ int hclgevf_devlink_init(struct hclgevf_dev *hdev)
priv->hdev = hdev;
hdev->devlink = devlink;
- ret = devlink_register(devlink);
- if (ret) {
- dev_err(&pdev->dev, "failed to register devlink, ret = %d\n",
- ret);
- goto out_reg_fail;
- }
-
- devlink_reload_enable(devlink);
-
+ devlink_set_features(devlink, DEVLINK_F_RELOAD);
+ devlink_register(devlink);
return 0;
-
-out_reg_fail:
- devlink_free(devlink);
- return ret;
}
void hclgevf_devlink_uninit(struct hclgevf_dev *hdev)
{
struct devlink *devlink = hdev->devlink;
- devlink_reload_disable(devlink);
-
devlink_unregister(devlink);
devlink_free(devlink);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 5fdac8685f95..645b2c0011e6 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -1349,7 +1349,7 @@ static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p)
ether_addr_copy(p, hdev->hw.mac.mac_addr);
}
-static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p,
+static int hclgevf_set_mac_addr(struct hnae3_handle *handle, const void *p,
bool is_first)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
@@ -2232,6 +2232,7 @@ static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev)
void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev)
{
if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) &&
+ test_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state) &&
!test_and_set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED,
&hdev->state))
mod_delayed_work(hclgevf_wq, &hdev->service_task, 0);
@@ -2273,9 +2274,9 @@ static void hclgevf_reset_service_task(struct hclgevf_dev *hdev)
hdev->reset_attempts = 0;
hdev->last_reset_time = jiffies;
- while ((hdev->reset_type =
- hclgevf_get_reset_level(hdev, &hdev->reset_pending))
- != HNAE3_NONE_RESET)
+ hdev->reset_type =
+ hclgevf_get_reset_level(hdev, &hdev->reset_pending);
+ if (hdev->reset_type != HNAE3_NONE_RESET)
hclgevf_reset(hdev);
} else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED,
&hdev->reset_state)) {
@@ -3449,6 +3450,8 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
hclgevf_init_rxd_adv_layout(hdev);
+ set_bit(HCLGEVF_STATE_SERVICE_INITED, &hdev->state);
+
hdev->last_reset_time = jiffies;
dev_info(&hdev->pdev->dev, "finished initializing %s driver\n",
HCLGEVF_DRIVER_NAME);
@@ -3899,7 +3902,7 @@ static int hclgevf_init(void)
{
pr_info("%s is initializing\n", HCLGEVF_NAME);
- hclgevf_wq = alloc_workqueue("%s", 0, 0, HCLGEVF_NAME);
+ hclgevf_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGEVF_NAME);
if (!hclgevf_wq) {
pr_err("%s: failed to create workqueue\n", HCLGEVF_NAME);
return -ENOMEM;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
index 883130a9b48f..28288d7e3303 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -146,6 +146,7 @@ enum hclgevf_states {
HCLGEVF_STATE_REMOVING,
HCLGEVF_STATE_NIC_REGISTERED,
HCLGEVF_STATE_ROCE_REGISTERED,
+ HCLGEVF_STATE_SERVICE_INITED,
/* task states */
HCLGEVF_STATE_RST_SERVICE_SCHED,
HCLGEVF_STATE_RST_HANDLING,
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_devlink.c b/drivers/net/ethernet/huawei/hinic/hinic_devlink.c
index 6e11ee339f12..60ae8bfc5f69 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_devlink.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_devlink.c
@@ -303,11 +303,11 @@ void hinic_devlink_free(struct devlink *devlink)
devlink_free(devlink);
}
-int hinic_devlink_register(struct hinic_devlink_priv *priv)
+void hinic_devlink_register(struct hinic_devlink_priv *priv)
{
struct devlink *devlink = priv_to_devlink(priv);
- return devlink_register(devlink);
+ devlink_register(devlink);
}
void hinic_devlink_unregister(struct hinic_devlink_priv *priv)
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_devlink.h b/drivers/net/ethernet/huawei/hinic/hinic_devlink.h
index 9e315011015c..46760d607b9b 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_devlink.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_devlink.h
@@ -110,7 +110,7 @@ struct host_image_st {
struct devlink *hinic_devlink_alloc(struct device *dev);
void hinic_devlink_free(struct devlink *devlink);
-int hinic_devlink_register(struct hinic_devlink_priv *priv);
+void hinic_devlink_register(struct hinic_devlink_priv *priv);
void hinic_devlink_unregister(struct hinic_devlink_priv *priv);
int hinic_health_reporters_create(struct hinic_devlink_priv *priv);
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
index b431c300ef1b..a85667078b72 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c
@@ -322,12 +322,10 @@ static int hinic_get_link_ksettings(struct net_device *netdev,
}
}
- bitmap_copy(link_ksettings->link_modes.supported,
- (unsigned long *)&settings.supported,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
- bitmap_copy(link_ksettings->link_modes.advertising,
- (unsigned long *)&settings.advertising,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_copy(link_ksettings->link_modes.supported,
+ (unsigned long *)&settings.supported);
+ linkmode_copy(link_ksettings->link_modes.advertising,
+ (unsigned long *)&settings.advertising);
return 0;
}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
index 56b6b04e209b..657a15447bd0 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
@@ -754,17 +754,9 @@ static int init_pfhwdev(struct hinic_pfhwdev *pfhwdev)
return err;
}
- err = hinic_devlink_register(hwdev->devlink_dev);
- if (err) {
- dev_err(&hwif->pdev->dev, "Failed to register devlink\n");
- hinic_pf_to_mgmt_free(&pfhwdev->pf_to_mgmt);
- return err;
- }
-
err = hinic_func_to_func_init(hwdev);
if (err) {
dev_err(&hwif->pdev->dev, "Failed to init mailbox\n");
- hinic_devlink_unregister(hwdev->devlink_dev);
hinic_pf_to_mgmt_free(&pfhwdev->pf_to_mgmt);
return err;
}
@@ -787,7 +779,7 @@ static int init_pfhwdev(struct hinic_pfhwdev *pfhwdev)
}
hinic_set_pf_action(hwif, HINIC_PF_MGMT_ACTIVE);
-
+ hinic_devlink_register(hwdev->devlink_dev);
return 0;
}
@@ -799,6 +791,7 @@ static void free_pfhwdev(struct hinic_pfhwdev *pfhwdev)
{
struct hinic_hwdev *hwdev = &pfhwdev->hwdev;
+ hinic_devlink_unregister(hwdev->devlink_dev);
hinic_set_pf_action(hwdev->hwif, HINIC_PF_MGMT_INIT);
if (!HINIC_IS_VF(hwdev->hwif)) {
@@ -816,8 +809,6 @@ static void free_pfhwdev(struct hinic_pfhwdev *pfhwdev)
hinic_func_to_func_free(hwdev);
- hinic_devlink_unregister(hwdev->devlink_dev);
-
hinic_pf_to_mgmt_free(&pfhwdev->pf_to_mgmt);
}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
index ae707e305684..f9a766b8ac43 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
@@ -656,7 +656,7 @@ static int hinic_set_mac_addr(struct net_device *netdev, void *addr)
err = change_mac_addr(netdev, new_mac);
if (!err)
- memcpy(netdev->dev_addr, new_mac, ETH_ALEN);
+ eth_hw_addr_set(netdev, new_mac);
return err;
}
@@ -1181,6 +1181,7 @@ static int nic_dev_init(struct pci_dev *pdev)
struct net_device *netdev;
struct hinic_hwdev *hwdev;
struct devlink *devlink;
+ u8 addr[ETH_ALEN];
int err, num_qps;
devlink = hinic_devlink_alloc(&pdev->dev);
@@ -1259,11 +1260,12 @@ static int nic_dev_init(struct pci_dev *pdev)
pci_set_drvdata(pdev, netdev);
- err = hinic_port_get_mac(nic_dev, netdev->dev_addr);
+ err = hinic_port_get_mac(nic_dev, addr);
if (err) {
dev_err(&pdev->dev, "Failed to get mac address\n");
goto err_get_mac;
}
+ eth_hw_addr_set(netdev, addr);
if (!is_valid_ether_addr(netdev->dev_addr)) {
if (!HINIC_IS_VF(nic_dev->hwdev->hwif)) {
@@ -1379,10 +1381,8 @@ static int hinic_probe(struct pci_dev *pdev,
{
int err = pci_enable_device(pdev);
- if (err) {
- dev_err(&pdev->dev, "Failed to enable PCI device\n");
- return err;
- }
+ if (err)
+ return dev_err_probe(&pdev->dev, err, "Failed to enable PCI device\n");
err = pci_request_regions(pdev, HINIC_DRV_NAME);
if (err) {
diff --git a/drivers/net/ethernet/i825xx/sun3_82586.c b/drivers/net/ethernet/i825xx/sun3_82586.c
index 0696f723228a..3909c6a0af89 100644
--- a/drivers/net/ethernet/i825xx/sun3_82586.c
+++ b/drivers/net/ethernet/i825xx/sun3_82586.c
@@ -339,14 +339,13 @@ static const struct net_device_ops sun3_82586_netdev_ops = {
static int __init sun3_82586_probe1(struct net_device *dev,int ioaddr)
{
- int i, size, retval;
+ int size, retval;
if (!request_region(ioaddr, SUN3_82586_TOTAL_SIZE, DRV_NAME))
return -EBUSY;
/* copy in the ethernet address from the prom */
- for(i = 0; i < 6 ; i++)
- dev->dev_addr[i] = idprom->id_ethaddr[i];
+ eth_hw_addr_set(dev, idprom->id_ethaddr);
printk("%s: SUN3 Intel 82586 found at %lx, ",dev->name,dev->base_addr);
@@ -461,7 +460,7 @@ static int init586(struct net_device *dev)
ias_cmd->cmd_cmd = swab16(CMD_IASETUP | CMD_LAST);
ias_cmd->cmd_link = 0xffff;
- memcpy((char *)&ias_cmd->iaddr,(char *) dev->dev_addr,ETH_ALEN);
+ memcpy((char *)&ias_cmd->iaddr,(const char *) dev->dev_addr,ETH_ALEN);
p->scb->cbl_offset = make16(ias_cmd);
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index d5df131b183c..bad94e4d50f4 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -1741,7 +1741,7 @@ static int ehea_set_mac_addr(struct net_device *dev, void *sa)
goto out_free;
}
- memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, mac_addr->sa_data);
/* Deregister old MAC in pHYP */
if (port->state == EHEA_PORT_UP) {
@@ -2986,7 +2986,7 @@ static struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
SET_NETDEV_DEV(dev, port_dev);
/* initialize net_device structure */
- memcpy(dev->dev_addr, &port->mac_addr, ETH_ALEN);
+ eth_hw_addr_set(dev, (u8 *)&port->mac_addr);
dev->netdev_ops = &ehea_netdev_ops;
ehea_set_ethtool_ops(dev);
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 664a91af662d..6b3fc8823c54 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -1013,7 +1013,7 @@ static int emac_set_mac_address(struct net_device *ndev, void *sa)
mutex_lock(&dev->link_lock);
- memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
+ eth_hw_addr_set(ndev, addr->sa_data);
emac_rx_disable(dev);
emac_tx_disable(dev);
@@ -2848,7 +2848,6 @@ static int emac_init_phy(struct emac_instance *dev)
static int emac_init_config(struct emac_instance *dev)
{
struct device_node *np = dev->ofdev->dev.of_node;
- const void *p;
int err;
/* Read config from device-tree */
@@ -2976,13 +2975,12 @@ static int emac_init_config(struct emac_instance *dev)
}
/* Read MAC-address */
- p = of_get_property(np, "local-mac-address", NULL);
- if (p == NULL) {
- printk(KERN_ERR "%pOF: Can't find local-mac-address property\n",
- np);
- return -ENXIO;
+ err = of_get_ethdev_address(np, dev->ndev);
+ if (err) {
+ if (err != -EPROBE_DEFER)
+ dev_err(&dev->ofdev->dev, "Can't get valid [local-]mac-address from OF !\n");
+ return err;
}
- memcpy(dev->ndev->dev_addr, p, ETH_ALEN);
/* IAHT and GAHT filter parameterization */
if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) {
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 3d9b4f99d357..45ba40cf4d07 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -483,17 +483,6 @@ retry:
return rc;
}
-static u64 ibmveth_encode_mac_addr(u8 *mac)
-{
- int i;
- u64 encoded = 0;
-
- for (i = 0; i < ETH_ALEN; i++)
- encoded = (encoded << 8) | mac[i];
-
- return encoded;
-}
-
static int ibmveth_open(struct net_device *netdev)
{
struct ibmveth_adapter *adapter = netdev_priv(netdev);
@@ -553,7 +542,7 @@ static int ibmveth_open(struct net_device *netdev)
adapter->rx_queue.num_slots = rxq_entries;
adapter->rx_queue.toggle = 1;
- mac_address = ibmveth_encode_mac_addr(netdev->dev_addr);
+ mac_address = ether_addr_to_u64(netdev->dev_addr);
rxq_desc.fields.flags_len = IBMVETH_BUF_VALID |
adapter->rx_queue.queue_len;
@@ -605,17 +594,13 @@ static int ibmveth_open(struct net_device *netdev)
}
rc = -ENOMEM;
- adapter->bounce_buffer =
- kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL);
- if (!adapter->bounce_buffer)
- goto out_free_irq;
- adapter->bounce_buffer_dma =
- dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer,
- netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL);
- if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
- netdev_err(netdev, "unable to map bounce buffer\n");
- goto out_free_bounce_buffer;
+ adapter->bounce_buffer = dma_alloc_coherent(&adapter->vdev->dev,
+ netdev->mtu + IBMVETH_BUFF_OH,
+ &adapter->bounce_buffer_dma, GFP_KERNEL);
+ if (!adapter->bounce_buffer) {
+ netdev_err(netdev, "unable to alloc bounce buffer\n");
+ goto out_free_irq;
}
netdev_dbg(netdev, "initial replenish cycle\n");
@@ -627,8 +612,6 @@ static int ibmveth_open(struct net_device *netdev)
return 0;
-out_free_bounce_buffer:
- kfree(adapter->bounce_buffer);
out_free_irq:
free_irq(netdev->irq, netdev);
out_free_buffer_pools:
@@ -702,10 +685,9 @@ static int ibmveth_close(struct net_device *netdev)
ibmveth_free_buffer_pool(adapter,
&adapter->rx_buff_pool[i]);
- dma_unmap_single(&adapter->vdev->dev, adapter->bounce_buffer_dma,
- adapter->netdev->mtu + IBMVETH_BUFF_OH,
- DMA_BIDIRECTIONAL);
- kfree(adapter->bounce_buffer);
+ dma_free_coherent(&adapter->vdev->dev,
+ adapter->netdev->mtu + IBMVETH_BUFF_OH,
+ adapter->bounce_buffer, adapter->bounce_buffer_dma);
netdev_dbg(netdev, "close complete\n");
@@ -1483,7 +1465,7 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
netdev_for_each_mc_addr(ha, netdev) {
/* add the multicast address to the filter table */
u64 mcast_addr;
- mcast_addr = ibmveth_encode_mac_addr(ha->addr);
+ mcast_addr = ether_addr_to_u64(ha->addr);
lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
IbmVethMcastAddFilter,
mcast_addr);
@@ -1613,14 +1595,14 @@ static int ibmveth_set_mac_addr(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- mac_address = ibmveth_encode_mac_addr(addr->sa_data);
+ mac_address = ether_addr_to_u64(addr->sa_data);
rc = h_change_logical_lan_mac(adapter->vdev->unit_address, mac_address);
if (rc) {
netdev_err(adapter->netdev, "h_change_logical_lan_mac failed with rc=%d\n", rc);
return rc;
}
- ether_addr_copy(dev->dev_addr, addr->sa_data);
+ eth_hw_addr_set(dev, addr->sa_data);
return 0;
}
@@ -1727,7 +1709,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
netdev->min_mtu = IBMVETH_MIN_MTU;
netdev->max_mtu = ETH_MAX_MTU - IBMVETH_BUFF_OH;
- memcpy(netdev->dev_addr, mac_addr_p, ETH_ALEN);
+ eth_hw_addr_set(netdev, mac_addr_p);
if (firmware_has_feature(FW_FEATURE_CMO))
memcpy(pool_count, pool_count_cmo, sizeof(pool_count));
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 6aa6ff89a765..3cca51735421 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -108,6 +108,8 @@ static int init_crq_queue(struct ibmvnic_adapter *adapter);
static int send_query_phys_parms(struct ibmvnic_adapter *adapter);
static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
struct ibmvnic_sub_crq_queue *tx_scrq);
+static void free_long_term_buff(struct ibmvnic_adapter *adapter,
+ struct ibmvnic_long_term_buff *ltb);
struct ibmvnic_stat {
char name[ETH_GSTRING_LEN];
@@ -214,22 +216,77 @@ static int ibmvnic_wait_for_completion(struct ibmvnic_adapter *adapter,
return -ETIMEDOUT;
}
+/**
+ * reuse_ltb() - Check if a long term buffer can be reused
+ * @ltb: The long term buffer to be checked
+ * @size: The size of the long term buffer.
+ *
+ * An LTB can be reused unless its size has changed.
+ *
+ * Return: Return true if the LTB can be reused, false otherwise.
+ */
+static bool reuse_ltb(struct ibmvnic_long_term_buff *ltb, int size)
+{
+ return (ltb->buff && ltb->size == size);
+}
+
+/**
+ * alloc_long_term_buff() - Allocate a long term buffer (LTB)
+ *
+ * @adapter: ibmvnic adapter associated to the LTB
+ * @ltb: container object for the LTB
+ * @size: size of the LTB
+ *
+ * Allocate an LTB of the specified size and notify VIOS.
+ *
+ * If the given @ltb already has the correct size, reuse it. Otherwise if
+ * its non-NULL, free it. Then allocate a new one of the correct size.
+ * Notify the VIOS either way since we may now be working with a new VIOS.
+ *
+ * Allocating larger chunks of memory during resets, specially LPM or under
+ * low memory situations can cause resets to fail/timeout and for LPAR to
+ * lose connectivity. So hold onto the LTB even if we fail to communicate
+ * with the VIOS and reuse it on next open. Free LTB when adapter is closed.
+ *
+ * Return: 0 if we were able to allocate the LTB and notify the VIOS and
+ * a negative value otherwise.
+ */
static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
struct ibmvnic_long_term_buff *ltb, int size)
{
struct device *dev = &adapter->vdev->dev;
int rc;
- ltb->size = size;
- ltb->buff = dma_alloc_coherent(dev, ltb->size, &ltb->addr,
- GFP_KERNEL);
+ if (!reuse_ltb(ltb, size)) {
+ dev_dbg(dev,
+ "LTB size changed from 0x%llx to 0x%x, reallocating\n",
+ ltb->size, size);
+ free_long_term_buff(adapter, ltb);
+ }
- if (!ltb->buff) {
- dev_err(dev, "Couldn't alloc long term buffer\n");
- return -ENOMEM;
+ if (ltb->buff) {
+ dev_dbg(dev, "Reusing LTB [map %d, size 0x%llx]\n",
+ ltb->map_id, ltb->size);
+ } else {
+ ltb->buff = dma_alloc_coherent(dev, size, &ltb->addr,
+ GFP_KERNEL);
+ if (!ltb->buff) {
+ dev_err(dev, "Couldn't alloc long term buffer\n");
+ return -ENOMEM;
+ }
+ ltb->size = size;
+
+ ltb->map_id = find_first_zero_bit(adapter->map_ids,
+ MAX_MAP_ID);
+ bitmap_set(adapter->map_ids, ltb->map_id, 1);
+
+ dev_dbg(dev,
+ "Allocated new LTB [map %d, size 0x%llx]\n",
+ ltb->map_id, ltb->size);
}
- ltb->map_id = adapter->map_id;
- adapter->map_id++;
+
+ /* Ensure ltb is zeroed - specially when reusing it. */
+ memset(ltb->buff, 0, ltb->size);
mutex_lock(&adapter->fw_lock);
adapter->fw_done_rc = 0;
@@ -243,24 +300,20 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
if (rc) {
- dev_err(dev,
- "Long term map request aborted or timed out,rc = %d\n",
+ dev_err(dev, "LTB map request aborted or timed out, rc = %d\n",
rc);
goto out;
}
if (adapter->fw_done_rc) {
- dev_err(dev, "Couldn't map long term buffer,rc = %d\n",
+ dev_err(dev, "Couldn't map LTB, rc = %d\n",
adapter->fw_done_rc);
rc = -1;
goto out;
}
rc = 0;
out:
- if (rc) {
- dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
- ltb->buff = NULL;
- }
+ /* don't free LTB on communication error - see function header */
mutex_unlock(&adapter->fw_lock);
return rc;
}
@@ -281,48 +334,15 @@ static void free_long_term_buff(struct ibmvnic_adapter *adapter,
adapter->reset_reason != VNIC_RESET_MOBILITY &&
adapter->reset_reason != VNIC_RESET_TIMEOUT)
send_request_unmap(adapter, ltb->map_id);
+
dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
+
ltb->buff = NULL;
+ /* mark this map_id free */
+ bitmap_clear(adapter->map_ids, ltb->map_id, 1);
ltb->map_id = 0;
}
-static int reset_long_term_buff(struct ibmvnic_adapter *adapter,
- struct ibmvnic_long_term_buff *ltb)
-{
- struct device *dev = &adapter->vdev->dev;
- int rc;
-
- memset(ltb->buff, 0, ltb->size);
-
- mutex_lock(&adapter->fw_lock);
- adapter->fw_done_rc = 0;
-
- reinit_completion(&adapter->fw_done);
- rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id);
- if (rc) {
- mutex_unlock(&adapter->fw_lock);
- return rc;
- }
-
- rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000);
- if (rc) {
- dev_info(dev,
- "Reset failed, long term map request timed out or aborted\n");
- mutex_unlock(&adapter->fw_lock);
- return rc;
- }
-
- if (adapter->fw_done_rc) {
- dev_info(dev,
- "Reset failed, attempting to free and reallocate buffer\n");
- free_long_term_buff(adapter, ltb);
- mutex_unlock(&adapter->fw_lock);
- return alloc_long_term_buff(adapter, ltb, ltb->size);
- }
- mutex_unlock(&adapter->fw_lock);
- return 0;
-}
-
static void deactivate_rx_pools(struct ibmvnic_adapter *adapter)
{
int i;
@@ -363,31 +383,41 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
* be 0.
*/
for (i = ind_bufp->index; i < count; ++i) {
- skb = netdev_alloc_skb(adapter->netdev, pool->buff_size);
+ index = pool->free_map[pool->next_free];
+
+ /* We maybe reusing the skb from earlier resets. Allocate
+ * only if necessary. But since the LTB may have changed
+ * during reset (see init_rx_pools()), update LTB below
+ * even if reusing skb.
+ */
+ skb = pool->rx_buff[index].skb;
if (!skb) {
- dev_err(dev, "Couldn't replenish rx buff\n");
- adapter->replenish_no_mem++;
- break;
+ skb = netdev_alloc_skb(adapter->netdev,
+ pool->buff_size);
+ if (!skb) {
+ dev_err(dev, "Couldn't replenish rx buff\n");
+ adapter->replenish_no_mem++;
+ break;
+ }
}
- index = pool->free_map[pool->next_free];
-
- if (pool->rx_buff[index].skb)
- dev_err(dev, "Inconsistent free_map!\n");
+ pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
+ pool->next_free = (pool->next_free + 1) % pool->size;
/* Copy the skb to the long term mapped DMA buffer */
offset = index * pool->buff_size;
dst = pool->long_term_buff.buff + offset;
memset(dst, 0, pool->buff_size);
dma_addr = pool->long_term_buff.addr + offset;
- pool->rx_buff[index].data = dst;
- pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
+ /* add the skb to an rx_buff in the pool */
+ pool->rx_buff[index].data = dst;
pool->rx_buff[index].dma = dma_addr;
pool->rx_buff[index].skb = skb;
pool->rx_buff[index].pool_index = pool->index;
pool->rx_buff[index].size = pool->buff_size;
+ /* queue the rx_buff for the next send_subcrq_indirect */
sub_crq = &ind_bufp->indir_arr[ind_bufp->index++];
memset(sub_crq, 0, sizeof(*sub_crq));
sub_crq->rx_add.first = IBMVNIC_CRQ_CMD;
@@ -405,7 +435,8 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
shift = 8;
#endif
sub_crq->rx_add.len = cpu_to_be32(pool->buff_size << shift);
- pool->next_free = (pool->next_free + 1) % pool->size;
+
+ /* if send_subcrq_indirect queue is full, flush to VIOS */
if (ind_bufp->index == IBMVNIC_MAX_IND_DESCS ||
i == count - 1) {
lpar_rc =
@@ -523,53 +554,12 @@ static int init_stats_token(struct ibmvnic_adapter *adapter)
return 0;
}
-static int reset_rx_pools(struct ibmvnic_adapter *adapter)
-{
- struct ibmvnic_rx_pool *rx_pool;
- u64 buff_size;
- int rx_scrqs;
- int i, j, rc;
-
- if (!adapter->rx_pool)
- return -1;
-
- buff_size = adapter->cur_rx_buf_sz;
- rx_scrqs = adapter->num_active_rx_pools;
- for (i = 0; i < rx_scrqs; i++) {
- rx_pool = &adapter->rx_pool[i];
-
- netdev_dbg(adapter->netdev, "Re-setting rx_pool[%d]\n", i);
-
- if (rx_pool->buff_size != buff_size) {
- free_long_term_buff(adapter, &rx_pool->long_term_buff);
- rx_pool->buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
- rc = alloc_long_term_buff(adapter,
- &rx_pool->long_term_buff,
- rx_pool->size *
- rx_pool->buff_size);
- } else {
- rc = reset_long_term_buff(adapter,
- &rx_pool->long_term_buff);
- }
-
- if (rc)
- return rc;
-
- for (j = 0; j < rx_pool->size; j++)
- rx_pool->free_map[j] = j;
-
- memset(rx_pool->rx_buff, 0,
- rx_pool->size * sizeof(struct ibmvnic_rx_buff));
-
- atomic_set(&rx_pool->available, 0);
- rx_pool->next_alloc = 0;
- rx_pool->next_free = 0;
- rx_pool->active = 1;
- }
-
- return 0;
-}
-
+/**
+ * release_rx_pools() - Release any rx pools attached to @adapter.
+ * @adapter: ibmvnic adapter
+ *
+ * Safe to call this multiple times - even if no pools are attached.
+ */
static void release_rx_pools(struct ibmvnic_adapter *adapter)
{
struct ibmvnic_rx_pool *rx_pool;
@@ -584,6 +574,7 @@ static void release_rx_pools(struct ibmvnic_adapter *adapter)
netdev_dbg(adapter->netdev, "Releasing rx_pool[%d]\n", i);
kfree(rx_pool->free_map);
+
free_long_term_buff(adapter, &rx_pool->long_term_buff);
if (!rx_pool->rx_buff)
@@ -602,21 +593,91 @@ static void release_rx_pools(struct ibmvnic_adapter *adapter)
kfree(adapter->rx_pool);
adapter->rx_pool = NULL;
adapter->num_active_rx_pools = 0;
+ adapter->prev_rx_pool_size = 0;
}
+/**
+ * reuse_rx_pools() - Check if the existing rx pools can be reused.
+ * @adapter: ibmvnic adapter
+ *
+ * Check if the existing rx pools in the adapter can be reused. The
+ * pools can be reused if the pool parameters (number of pools,
+ * number of buffers in the pool and size of each buffer) have not
+ * changed.
+ *
+ * NOTE: This assumes that all pools have the same number of buffers
+ * which is the case currently. If that changes, we must fix this.
+ *
+ * Return: true if the rx pools can be reused, false otherwise.
+ */
+static bool reuse_rx_pools(struct ibmvnic_adapter *adapter)
+{
+ u64 old_num_pools, new_num_pools;
+ u64 old_pool_size, new_pool_size;
+ u64 old_buff_size, new_buff_size;
+
+ if (!adapter->rx_pool)
+ return false;
+
+ old_num_pools = adapter->num_active_rx_pools;
+ new_num_pools = adapter->req_rx_queues;
+
+ old_pool_size = adapter->prev_rx_pool_size;
+ new_pool_size = adapter->req_rx_add_entries_per_subcrq;
+
+ old_buff_size = adapter->prev_rx_buf_sz;
+ new_buff_size = adapter->cur_rx_buf_sz;
+
+ /* Require buff size to be exactly same for now */
+ if (old_buff_size != new_buff_size)
+ return false;
+
+ if (old_num_pools == new_num_pools && old_pool_size == new_pool_size)
+ return true;
+
+ if (old_num_pools < adapter->min_rx_queues ||
+ old_num_pools > adapter->max_rx_queues ||
+ old_pool_size < adapter->min_rx_add_entries_per_subcrq ||
+ old_pool_size > adapter->max_rx_add_entries_per_subcrq)
+ return false;
+
+ return true;
+}
+
+/**
+ * init_rx_pools(): Initialize the set of receiver pools in the adapter.
+ * @netdev: net device associated with the vnic interface
+ *
+ * Initialize the set of receiver pools in the ibmvnic adapter associated
+ * with the net_device @netdev. If possible, reuse the existing rx pools.
+ * Otherwise free any existing pools and allocate a new set of pools
+ * before initializing them.
+ *
+ * Return: 0 on success and negative value on error.
+ */
static int init_rx_pools(struct net_device *netdev)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
struct device *dev = &adapter->vdev->dev;
struct ibmvnic_rx_pool *rx_pool;
- int rxadd_subcrqs;
+ u64 num_pools;
+ u64 pool_size; /* # of buffers in one pool */
u64 buff_size;
int i, j;
- rxadd_subcrqs = adapter->num_active_rx_scrqs;
+ pool_size = adapter->req_rx_add_entries_per_subcrq;
+ num_pools = adapter->req_rx_queues;
buff_size = adapter->cur_rx_buf_sz;
- adapter->rx_pool = kcalloc(rxadd_subcrqs,
+ if (reuse_rx_pools(adapter)) {
+ dev_dbg(dev, "Reusing rx pools\n");
+ goto update_ltb;
+ }
+
+ /* Allocate/populate the pools. */
+ release_rx_pools(adapter);
+
+ adapter->rx_pool = kcalloc(num_pools,
sizeof(struct ibmvnic_rx_pool),
GFP_KERNEL);
if (!adapter->rx_pool) {
@@ -624,26 +685,27 @@ static int init_rx_pools(struct net_device *netdev)
return -1;
}
- adapter->num_active_rx_pools = rxadd_subcrqs;
+ /* Set num_active_rx_pools early. If we fail below after partial
+ * allocation, release_rx_pools() will know how many to look for.
+ */
+ adapter->num_active_rx_pools = num_pools;
- for (i = 0; i < rxadd_subcrqs; i++) {
+ for (i = 0; i < num_pools; i++) {
rx_pool = &adapter->rx_pool[i];
netdev_dbg(adapter->netdev,
"Initializing rx_pool[%d], %lld buffs, %lld bytes each\n",
- i, adapter->req_rx_add_entries_per_subcrq,
- buff_size);
+ i, pool_size, buff_size);
- rx_pool->size = adapter->req_rx_add_entries_per_subcrq;
+ rx_pool->size = pool_size;
rx_pool->index = i;
rx_pool->buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
- rx_pool->active = 1;
rx_pool->free_map = kcalloc(rx_pool->size, sizeof(int),
GFP_KERNEL);
if (!rx_pool->free_map) {
- release_rx_pools(adapter);
- return -1;
+ dev_err(dev, "Couldn't alloc free_map %d\n", i);
+ goto out_release;
}
rx_pool->rx_buff = kcalloc(rx_pool->size,
@@ -651,69 +713,58 @@ static int init_rx_pools(struct net_device *netdev)
GFP_KERNEL);
if (!rx_pool->rx_buff) {
dev_err(dev, "Couldn't alloc rx buffers\n");
- release_rx_pools(adapter);
- return -1;
+ goto out_release;
}
-
- if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
- rx_pool->size * rx_pool->buff_size)) {
- release_rx_pools(adapter);
- return -1;
- }
-
- for (j = 0; j < rx_pool->size; ++j)
- rx_pool->free_map[j] = j;
-
- atomic_set(&rx_pool->available, 0);
- rx_pool->next_alloc = 0;
- rx_pool->next_free = 0;
}
- return 0;
-}
+ adapter->prev_rx_pool_size = pool_size;
+ adapter->prev_rx_buf_sz = adapter->cur_rx_buf_sz;
-static int reset_one_tx_pool(struct ibmvnic_adapter *adapter,
- struct ibmvnic_tx_pool *tx_pool)
-{
- int rc, i;
-
- rc = reset_long_term_buff(adapter, &tx_pool->long_term_buff);
- if (rc)
- return rc;
-
- memset(tx_pool->tx_buff, 0,
- tx_pool->num_buffers *
- sizeof(struct ibmvnic_tx_buff));
+update_ltb:
+ for (i = 0; i < num_pools; i++) {
+ rx_pool = &adapter->rx_pool[i];
+ dev_dbg(dev, "Updating LTB for rx pool %d [%d, %d]\n",
+ i, rx_pool->size, rx_pool->buff_size);
- for (i = 0; i < tx_pool->num_buffers; i++)
- tx_pool->free_map[i] = i;
+ if (alloc_long_term_buff(adapter, &rx_pool->long_term_buff,
+ rx_pool->size * rx_pool->buff_size))
+ goto out;
- tx_pool->consumer_index = 0;
- tx_pool->producer_index = 0;
+ for (j = 0; j < rx_pool->size; ++j) {
+ struct ibmvnic_rx_buff *rx_buff;
- return 0;
-}
-
-static int reset_tx_pools(struct ibmvnic_adapter *adapter)
-{
- int tx_scrqs;
- int i, rc;
+ rx_pool->free_map[j] = j;
- if (!adapter->tx_pool)
- return -1;
+ /* NOTE: Don't clear rx_buff->skb here - will leak
+ * memory! replenish_rx_pool() will reuse skbs or
+ * allocate as necessary.
+ */
+ rx_buff = &rx_pool->rx_buff[j];
+ rx_buff->dma = 0;
+ rx_buff->data = 0;
+ rx_buff->size = 0;
+ rx_buff->pool_index = 0;
+ }
- tx_scrqs = adapter->num_active_tx_pools;
- for (i = 0; i < tx_scrqs; i++) {
- ibmvnic_tx_scrq_clean_buffer(adapter, adapter->tx_scrq[i]);
- rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]);
- if (rc)
- return rc;
- rc = reset_one_tx_pool(adapter, &adapter->tx_pool[i]);
- if (rc)
- return rc;
+ /* Mark pool "empty" so replenish_rx_pools() will
+ * update the LTB info for each buffer
+ */
+ atomic_set(&rx_pool->available, 0);
+ rx_pool->next_alloc = 0;
+ rx_pool->next_free = 0;
+ /* replenish_rx_pool() may have called deactivate_rx_pools()
+ * on failover. Ensure pool is active now.
+ */
+ rx_pool->active = 1;
}
-
return 0;
+out_release:
+ release_rx_pools(adapter);
+out:
+ /* We failed to allocate one or more LTBs or map them on the VIOS.
+ * Hold onto the pools and any LTBs that we did allocate/map.
+ */
+ return -1;
}
static void release_vpd_data(struct ibmvnic_adapter *adapter)
@@ -735,10 +786,19 @@ static void release_one_tx_pool(struct ibmvnic_adapter *adapter,
free_long_term_buff(adapter, &tx_pool->long_term_buff);
}
+/**
+ * release_tx_pools() - Release any tx pools attached to @adapter.
+ * @adapter: ibmvnic adapter
+ *
+ * Safe to call this multiple times - even if no pools are attached.
+ */
static void release_tx_pools(struct ibmvnic_adapter *adapter)
{
int i;
+ /* init_tx_pools() ensures that ->tx_pool and ->tso_pool are
+ * both NULL or both non-NULL. So we only need to check one.
+ */
if (!adapter->tx_pool)
return;
@@ -752,84 +812,218 @@ static void release_tx_pools(struct ibmvnic_adapter *adapter)
kfree(adapter->tso_pool);
adapter->tso_pool = NULL;
adapter->num_active_tx_pools = 0;
+ adapter->prev_tx_pool_size = 0;
}
static int init_one_tx_pool(struct net_device *netdev,
struct ibmvnic_tx_pool *tx_pool,
- int num_entries, int buf_size)
+ int pool_size, int buf_size)
{
- struct ibmvnic_adapter *adapter = netdev_priv(netdev);
int i;
- tx_pool->tx_buff = kcalloc(num_entries,
+ tx_pool->tx_buff = kcalloc(pool_size,
sizeof(struct ibmvnic_tx_buff),
GFP_KERNEL);
if (!tx_pool->tx_buff)
return -1;
- if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
- num_entries * buf_size))
- return -1;
-
- tx_pool->free_map = kcalloc(num_entries, sizeof(int), GFP_KERNEL);
- if (!tx_pool->free_map)
+ tx_pool->free_map = kcalloc(pool_size, sizeof(int), GFP_KERNEL);
+ if (!tx_pool->free_map) {
+ kfree(tx_pool->tx_buff);
+ tx_pool->tx_buff = NULL;
return -1;
+ }
- for (i = 0; i < num_entries; i++)
+ for (i = 0; i < pool_size; i++)
tx_pool->free_map[i] = i;
tx_pool->consumer_index = 0;
tx_pool->producer_index = 0;
- tx_pool->num_buffers = num_entries;
+ tx_pool->num_buffers = pool_size;
tx_pool->buf_size = buf_size;
return 0;
}
+/**
+ * reuse_tx_pools() - Check if the existing tx pools can be reused.
+ * @adapter: ibmvnic adapter
+ *
+ * Check if the existing tx pools in the adapter can be reused. The
+ * pools can be reused if the pool parameters (number of pools,
+ * number of buffers in the pool and mtu) have not changed.
+ *
+ * NOTE: This assumes that all pools have the same number of buffers
+ * which is the case currently. If that changes, we must fix this.
+ *
+ * Return: true if the tx pools can be reused, false otherwise.
+ */
+static bool reuse_tx_pools(struct ibmvnic_adapter *adapter)
+{
+ u64 old_num_pools, new_num_pools;
+ u64 old_pool_size, new_pool_size;
+ u64 old_mtu, new_mtu;
+
+ if (!adapter->tx_pool)
+ return false;
+
+ old_num_pools = adapter->num_active_tx_pools;
+ new_num_pools = adapter->num_active_tx_scrqs;
+ old_pool_size = adapter->prev_tx_pool_size;
+ new_pool_size = adapter->req_tx_entries_per_subcrq;
+ old_mtu = adapter->prev_mtu;
+ new_mtu = adapter->req_mtu;
+
+ /* Require MTU to be exactly same to reuse pools for now */
+ if (old_mtu != new_mtu)
+ return false;
+
+ if (old_num_pools == new_num_pools && old_pool_size == new_pool_size)
+ return true;
+
+ if (old_num_pools < adapter->min_tx_queues ||
+ old_num_pools > adapter->max_tx_queues ||
+ old_pool_size < adapter->min_tx_entries_per_subcrq ||
+ old_pool_size > adapter->max_tx_entries_per_subcrq)
+ return false;
+
+ return true;
+}
+
+/**
+ * init_tx_pools(): Initialize the set of transmit pools in the adapter.
+ * @netdev: net device associated with the vnic interface
+ *
+ * Initialize the set of transmit pools in the ibmvnic adapter associated
+ * with the net_device @netdev. If possible, reuse the existing tx pools.
+ * Otherwise free any existing pools and allocate a new set of pools
+ * before initializing them.
+ *
+ * Return: 0 on success and negative value on error.
+ */
static int init_tx_pools(struct net_device *netdev)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
- int tx_subcrqs;
+ struct device *dev = &adapter->vdev->dev;
+ int num_pools;
+ u64 pool_size; /* # of buffers in pool */
u64 buff_size;
- int i, rc;
+ int i, j, rc;
+
+ num_pools = adapter->req_tx_queues;
+
+ /* We must notify the VIOS about the LTB on all resets - but we only
+ * need to alloc/populate pools if either the number of buffers or
+ * size of each buffer in the pool has changed.
+ */
+ if (reuse_tx_pools(adapter)) {
+ netdev_dbg(netdev, "Reusing tx pools\n");
+ goto update_ltb;
+ }
+
+ /* Allocate/populate the pools. */
+ release_tx_pools(adapter);
+
+ pool_size = adapter->req_tx_entries_per_subcrq;
+ num_pools = adapter->num_active_tx_scrqs;
- tx_subcrqs = adapter->num_active_tx_scrqs;
- adapter->tx_pool = kcalloc(tx_subcrqs,
+ adapter->tx_pool = kcalloc(num_pools,
sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
if (!adapter->tx_pool)
return -1;
- adapter->tso_pool = kcalloc(tx_subcrqs,
+ adapter->tso_pool = kcalloc(num_pools,
sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
+ /* To simplify release_tx_pools() ensure that ->tx_pool and
+ * ->tso_pool are either both NULL or both non-NULL.
+ */
if (!adapter->tso_pool) {
kfree(adapter->tx_pool);
adapter->tx_pool = NULL;
return -1;
}
- adapter->num_active_tx_pools = tx_subcrqs;
+ /* Set num_active_tx_pools early. If we fail below after partial
+ * allocation, release_tx_pools() will know how many to look for.
+ */
+ adapter->num_active_tx_pools = num_pools;
+
+ buff_size = adapter->req_mtu + VLAN_HLEN;
+ buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
+
+ for (i = 0; i < num_pools; i++) {
+ dev_dbg(dev, "Init tx pool %d [%llu, %llu]\n",
+ i, adapter->req_tx_entries_per_subcrq, buff_size);
- for (i = 0; i < tx_subcrqs; i++) {
- buff_size = adapter->req_mtu + VLAN_HLEN;
- buff_size = ALIGN(buff_size, L1_CACHE_BYTES);
rc = init_one_tx_pool(netdev, &adapter->tx_pool[i],
- adapter->req_tx_entries_per_subcrq,
- buff_size);
- if (rc) {
- release_tx_pools(adapter);
- return rc;
- }
+ pool_size, buff_size);
+ if (rc)
+ goto out_release;
rc = init_one_tx_pool(netdev, &adapter->tso_pool[i],
IBMVNIC_TSO_BUFS,
IBMVNIC_TSO_BUF_SZ);
- if (rc) {
- release_tx_pools(adapter);
- return rc;
- }
+ if (rc)
+ goto out_release;
+ }
+
+ adapter->prev_tx_pool_size = pool_size;
+ adapter->prev_mtu = adapter->req_mtu;
+
+update_ltb:
+ /* NOTE: All tx_pools have the same number of buffers (which is
+ * same as pool_size). All tso_pools have IBMVNIC_TSO_BUFS
+ * buffers (see calls init_one_tx_pool() for these).
+ * For consistency, we use tx_pool->num_buffers and
+ * tso_pool->num_buffers below.
+ */
+ rc = -1;
+ for (i = 0; i < num_pools; i++) {
+ struct ibmvnic_tx_pool *tso_pool;
+ struct ibmvnic_tx_pool *tx_pool;
+ u32 ltb_size;
+
+ tx_pool = &adapter->tx_pool[i];
+ ltb_size = tx_pool->num_buffers * tx_pool->buf_size;
+ if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
+ ltb_size))
+ goto out;
+
+ dev_dbg(dev, "Updated LTB for tx pool %d [%p, %d, %d]\n",
+ i, tx_pool->long_term_buff.buff,
+ tx_pool->num_buffers, tx_pool->buf_size);
+
+ tx_pool->consumer_index = 0;
+ tx_pool->producer_index = 0;
+
+ for (j = 0; j < tx_pool->num_buffers; j++)
+ tx_pool->free_map[j] = j;
+
+ tso_pool = &adapter->tso_pool[i];
+ ltb_size = tso_pool->num_buffers * tso_pool->buf_size;
+ if (alloc_long_term_buff(adapter, &tso_pool->long_term_buff,
+ ltb_size))
+ goto out;
+
+ dev_dbg(dev, "Updated LTB for tso pool %d [%p, %d, %d]\n",
+ i, tso_pool->long_term_buff.buff,
+ tso_pool->num_buffers, tso_pool->buf_size);
+
+ tso_pool->consumer_index = 0;
+ tso_pool->producer_index = 0;
+
+ for (j = 0; j < tso_pool->num_buffers; j++)
+ tso_pool->free_map[j] = j;
}
return 0;
+out_release:
+ release_tx_pools(adapter);
+out:
+ /* We failed to allocate one or more LTBs or map them on the VIOS.
+ * Hold onto the pools and any LTBs that we did allocate/map.
+ */
+ return rc;
}
static void ibmvnic_napi_enable(struct ibmvnic_adapter *adapter)
@@ -1020,9 +1214,6 @@ static void release_resources(struct ibmvnic_adapter *adapter)
{
release_vpd_data(adapter);
- release_tx_pools(adapter);
- release_rx_pools(adapter);
-
release_napi(adapter);
release_login_buffer(adapter);
release_login_rsp_buffer(adapter);
@@ -1198,8 +1389,6 @@ static int init_resources(struct ibmvnic_adapter *adapter)
return rc;
}
- adapter->map_id = 1;
-
rc = init_napi(adapter);
if (rc)
return rc;
@@ -1296,6 +1485,8 @@ static int ibmvnic_open(struct net_device *netdev)
if (rc) {
netdev_err(netdev, "failed to initialize resources\n");
release_resources(adapter);
+ release_rx_pools(adapter);
+ release_tx_pools(adapter);
goto out;
}
}
@@ -1424,9 +1615,6 @@ static void ibmvnic_cleanup(struct net_device *netdev)
ibmvnic_napi_disable(adapter);
ibmvnic_disable_irqs(adapter);
-
- clean_rx_pools(adapter);
- clean_tx_pools(adapter);
}
static int __ibmvnic_close(struct net_device *netdev)
@@ -1460,6 +1648,8 @@ static int ibmvnic_close(struct net_device *netdev)
rc = __ibmvnic_close(netdev);
ibmvnic_cleanup(netdev);
+ clean_rx_pools(adapter);
+ clean_tx_pools(adapter);
return rc;
}
@@ -1724,8 +1914,6 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
ind_bufp = &tx_scrq->ind_buf;
if (test_bit(0, &adapter->resetting)) {
- if (!netif_subqueue_stopped(netdev, skb))
- netif_stop_subqueue(netdev, queue_num);
dev_kfree_skb_any(skb);
tx_send_failed++;
@@ -2036,9 +2224,9 @@ static const char *reset_reason_to_string(enum ibmvnic_reset_reason reason)
static int do_reset(struct ibmvnic_adapter *adapter,
struct ibmvnic_rwi *rwi, u32 reset_state)
{
+ struct net_device *netdev = adapter->netdev;
u64 old_num_rx_queues, old_num_tx_queues;
u64 old_num_rx_slots, old_num_tx_slots;
- struct net_device *netdev = adapter->netdev;
int rc;
netdev_dbg(adapter->netdev,
@@ -2188,8 +2376,6 @@ static int do_reset(struct ibmvnic_adapter *adapter,
!adapter->rx_pool ||
!adapter->tso_pool ||
!adapter->tx_pool) {
- release_rx_pools(adapter);
- release_tx_pools(adapter);
release_napi(adapter);
release_vpd_data(adapter);
@@ -2198,16 +2384,18 @@ static int do_reset(struct ibmvnic_adapter *adapter,
goto out;
} else {
- rc = reset_tx_pools(adapter);
+ rc = init_tx_pools(netdev);
if (rc) {
- netdev_dbg(adapter->netdev, "reset tx pools failed (%d)\n",
+ netdev_dbg(netdev,
+ "init tx pools failed (%d)\n",
rc);
goto out;
}
- rc = reset_rx_pools(adapter);
+ rc = init_rx_pools(netdev);
if (rc) {
- netdev_dbg(adapter->netdev, "reset rx pools failed (%d)\n",
+ netdev_dbg(netdev,
+ "init rx pools failed (%d)\n",
rc);
goto out;
}
@@ -2567,7 +2755,7 @@ static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
if (adapter->state == VNIC_PROBING) {
netdev_warn(netdev, "Adapter reset during probe\n");
- adapter->init_done_rc = EAGAIN;
+ adapter->init_done_rc = -EAGAIN;
ret = EAGAIN;
goto err;
}
@@ -4576,8 +4764,7 @@ static int handle_change_mac_rsp(union ibmvnic_crq *crq,
/* crq->change_mac_addr.mac_addr is the requested one
* crq->change_mac_addr_rsp.mac_addr is the returned valid one.
*/
- ether_addr_copy(netdev->dev_addr,
- &crq->change_mac_addr_rsp.mac_addr[0]);
+ eth_hw_addr_set(netdev, &crq->change_mac_addr_rsp.mac_addr[0]);
ether_addr_copy(adapter->mac_addr,
&crq->change_mac_addr_rsp.mac_addr[0]);
out:
@@ -4778,9 +4965,10 @@ static void handle_query_map_rsp(union ibmvnic_crq *crq,
dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
return;
}
- netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
- crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages,
- crq->query_map_rsp.free_pages);
+ netdev_dbg(netdev, "page_size = %d\ntot_pages = %u\nfree_pages = %u\n",
+ crq->query_map_rsp.page_size,
+ __be32_to_cpu(crq->query_map_rsp.tot_pages),
+ __be32_to_cpu(crq->query_map_rsp.free_pages));
}
static void handle_query_cap_rsp(union ibmvnic_crq *crq,
@@ -5069,11 +5257,6 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
*/
adapter->login_pending = false;
- if (!completion_done(&adapter->init_done)) {
- complete(&adapter->init_done);
- adapter->init_done_rc = -EIO;
- }
-
if (adapter->state == VNIC_DOWN)
rc = ibmvnic_reset(adapter, VNIC_RESET_PASSIVE_INIT);
else
@@ -5094,6 +5277,13 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
rc);
adapter->failover_pending = false;
}
+
+ if (!completion_done(&adapter->init_done)) {
+ complete(&adapter->init_done);
+ if (!adapter->init_done_rc)
+ adapter->init_done_rc = -EAGAIN;
+ }
+
break;
case IBMVNIC_CRQ_INIT_COMPLETE:
dev_info(dev, "Partner initialization complete\n");
@@ -5414,6 +5604,9 @@ static int init_crq_queue(struct ibmvnic_adapter *adapter)
crq->cur = 0;
spin_lock_init(&crq->lock);
+ /* process any CRQs that were queued before we enabled interrupts */
+ tasklet_schedule(&adapter->tasklet);
+
return retrc;
req_irq_failed:
@@ -5527,9 +5720,12 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
adapter->vdev = dev;
adapter->netdev = netdev;
adapter->login_pending = false;
+ memset(&adapter->map_ids, 0, sizeof(adapter->map_ids));
+ /* map_ids start at 1, so ensure map_id 0 is always "in-use" */
+ bitmap_set(adapter->map_ids, 0, 1);
ether_addr_copy(adapter->mac_addr, mac_addr_p);
- ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
+ eth_hw_addr_set(netdev, adapter->mac_addr);
netdev->irq = dev->irq;
netdev->netdev_ops = &ibmvnic_netdev_ops;
netdev->ethtool_ops = &ibmvnic_ethtool_ops;
@@ -5547,6 +5743,8 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
init_completion(&adapter->reset_done);
init_completion(&adapter->stats_done);
clear_bit(0, &adapter->resetting);
+ adapter->prev_rx_buf_sz = 0;
+ adapter->prev_mtu = 0;
init_success = false;
do {
@@ -5558,7 +5756,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
}
rc = ibmvnic_reset_init(adapter, false);
- } while (rc == EAGAIN);
+ } while (rc == -EAGAIN);
/* We are ignoring the error from ibmvnic_reset_init() assuming that the
* partner is not ready. CRQ is not active. When the partner becomes
@@ -5647,6 +5845,8 @@ static void ibmvnic_remove(struct vio_dev *dev)
unregister_netdevice(netdev);
release_resources(adapter);
+ release_rx_pools(adapter);
+ release_tx_pools(adapter);
release_sub_crqs(adapter, 1);
release_crq_queue(adapter);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index 22df602323bc..b8e42f67d897 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -827,7 +827,7 @@ struct ibmvnic_rx_buff {
struct ibmvnic_rx_pool {
struct ibmvnic_rx_buff *rx_buff;
- int size;
+ int size; /* # of buffers in the pool */
int index;
int buff_size;
atomic_t available;
@@ -967,6 +967,7 @@ struct ibmvnic_adapter {
u64 min_mtu;
u64 max_mtu;
u64 req_mtu;
+ u64 prev_mtu;
u64 max_multicast_filters;
u64 vlan_header_insertion;
u64 rx_vlan_header_insertion;
@@ -979,13 +980,18 @@ struct ibmvnic_adapter {
u64 opt_tx_entries_per_subcrq;
u64 opt_rxba_entries_per_subcrq;
__be64 tx_rx_desc_req;
- u8 map_id;
+#define MAX_MAP_ID 255
+ DECLARE_BITMAP(map_ids, MAX_MAP_ID);
u32 num_active_rx_scrqs;
u32 num_active_rx_pools;
u32 num_active_rx_napi;
u32 num_active_tx_scrqs;
u32 num_active_tx_pools;
+
+ u32 prev_rx_pool_size;
+ u32 prev_tx_pool_size;
u32 cur_rx_buf_sz;
+ u32 prev_rx_buf_sz;
struct tasklet_struct tasklet;
enum vnic_state state;
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index ed8ea63bb172..0b274d8fa45b 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -313,6 +313,20 @@ config ICE
To compile this driver as a module, choose M here. The module
will be called ice.
+config ICE_SWITCHDEV
+ bool "Switchdev Support"
+ default y
+ depends on ICE && NET_SWITCHDEV
+ help
+ Switchdev support provides internal SRIOV packet steering and switching.
+
+ To enable it on running kernel use devlink tool:
+ #devlink dev eswitch set pci/0000:XX:XX.X mode switchdev
+
+ Say Y here if you want to use Switchdev in the driver.
+
+ If unsure, say N.
+
config FM10K
tristate "Intel(R) FM10000 Ethernet Switch Host Interface Support"
default n
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 09ae1939e6db..5039a2536951 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -2259,7 +2259,7 @@ static int e100_set_mac_address(struct net_device *netdev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
e100_exec_cb(nic, NULL, e100_setup_iaaddr);
return 0;
@@ -2921,7 +2921,7 @@ static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
e100_phy_init(nic);
- memcpy(netdev->dev_addr, nic->eeprom, ETH_ALEN);
+ eth_hw_addr_set(netdev, (u8 *)nic->eeprom);
if (!is_valid_ether_addr(netdev->dev_addr)) {
if (!eeprom_bad_csum_allow) {
netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, aborting\n");
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index bed4f040face..669060a2e6aa 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -1103,7 +1103,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
e_err(probe, "EEPROM Read Error\n");
}
/* don't block initialization here due to bad MAC address */
- memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
+ eth_hw_addr_set(netdev, hw->mac_addr);
if (!is_valid_ether_addr(netdev->dev_addr))
e_err(probe, "Invalid MAC Address\n");
@@ -2209,7 +2209,7 @@ static int e1000_set_mac(struct net_device *netdev, void *p)
if (hw->mac_type == e1000_82542_rev2_0)
e1000_enter_82542_rst(adapter);
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
e1000_rar_set(hw, hw->mac_addr, 0);
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 5b2143f4b1f8..c3def0ee7788 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -21,6 +21,7 @@
#include <linux/ptp_classify.h>
#include <linux/mii.h>
#include <linux/mdio.h>
+#include <linux/mutex.h>
#include <linux/pm_qos.h>
#include "hw.h"
@@ -113,7 +114,8 @@ enum e1000_boards {
board_pch2lan,
board_pch_lpt,
board_pch_spt,
- board_pch_cnp
+ board_pch_cnp,
+ board_pch_tgp
};
struct e1000_ps_page {
@@ -499,6 +501,7 @@ extern const struct e1000_info e1000_pch2_info;
extern const struct e1000_info e1000_pch_lpt_info;
extern const struct e1000_info e1000_pch_spt_info;
extern const struct e1000_info e1000_pch_cnp_info;
+extern const struct e1000_info e1000_pch_tgp_info;
extern const struct e1000_info e1000_es2_info;
void e1000e_ptp_init(struct e1000_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 60c582a16821..5e4fc9b4e2ad 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -4813,7 +4813,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
{
struct e1000_mac_info *mac = &hw->mac;
- u32 ctrl_ext, txdctl, snoop;
+ u32 ctrl_ext, txdctl, snoop, fflt_dbg;
s32 ret_val;
u16 i;
@@ -4872,6 +4872,15 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
snoop = (u32)~(PCIE_NO_SNOOP_ALL);
e1000e_set_pcie_no_snoop(hw, snoop);
+ /* Enable workaround for packet loss issue on TGP PCH
+ * Do not gate DMA clock from the modPHY block
+ */
+ if (mac->type >= e1000_pch_tgp) {
+ fflt_dbg = er32(FFLT_DBG);
+ fflt_dbg |= E1000_FFLT_DBG_DONT_GATE_WAKE_DMA_CLK;
+ ew32(FFLT_DBG, fflt_dbg);
+ }
+
ctrl_ext = er32(CTRL_EXT);
ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
ew32(CTRL_EXT, ctrl_ext);
@@ -5992,3 +6001,23 @@ const struct e1000_info e1000_pch_cnp_info = {
.phy_ops = &ich8_phy_ops,
.nvm_ops = &spt_nvm_ops,
};
+
+const struct e1000_info e1000_pch_tgp_info = {
+ .mac = e1000_pch_tgp,
+ .flags = FLAG_IS_ICH
+ | FLAG_HAS_WOL
+ | FLAG_HAS_HW_TIMESTAMP
+ | FLAG_HAS_CTRLEXT_ON_LOAD
+ | FLAG_HAS_AMT
+ | FLAG_HAS_FLASH
+ | FLAG_HAS_JUMBO_FRAMES
+ | FLAG_APME_IN_WUC,
+ .flags2 = FLAG2_HAS_PHY_STATS
+ | FLAG2_HAS_EEE,
+ .pba = 26,
+ .max_hw_frame_size = 9022,
+ .get_variants = e1000_get_variants_ich8lan,
+ .mac_ops = &ich8_mac_ops,
+ .phy_ops = &ich8_phy_ops,
+ .nvm_ops = &spt_nvm_ops,
+};
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index d6a092e5ee74..2504b11c3169 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -289,6 +289,9 @@
/* Proprietary Latency Tolerance Reporting PCI Capability */
#define E1000_PCI_LTR_CAP_LPT 0xA8
+/* Don't gate wake DMA clock */
+#define E1000_FFLT_DBG_DONT_GATE_WAKE_DMA_CLK 0x1000
+
void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw);
void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
bool state);
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 900b3ab998bd..44e2dc8328a2 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -51,6 +51,7 @@ static const struct e1000_info *e1000_info_tbl[] = {
[board_pch_lpt] = &e1000_pch_lpt_info,
[board_pch_spt] = &e1000_pch_spt_info,
[board_pch_cnp] = &e1000_pch_cnp_info,
+ [board_pch_tgp] = &e1000_pch_tgp_info,
};
struct e1000_reg_info {
@@ -2549,7 +2550,6 @@ static void e1000_set_itr(struct e1000_adapter *adapter)
/* for non-gigabit speeds, just fix the interrupt rate at 4000 */
if (adapter->link_speed != SPEED_1000) {
- current_itr = 0;
new_itr = 4000;
goto set_itr_now;
}
@@ -4786,7 +4786,7 @@ static int e1000_set_mac(struct net_device *netdev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len);
hw->mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
@@ -7589,7 +7589,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_err(&pdev->dev,
"NVM Read Error while reading MAC address\n");
- memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
+ eth_hw_addr_set(netdev, adapter->hw.mac.addr);
if (!is_valid_ether_addr(netdev->dev_addr)) {
dev_err(&pdev->dev, "Invalid MAC Address: %pM\n",
@@ -7896,28 +7896,28 @@ static const struct pci_device_id e1000_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_V11), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_LM12), board_pch_spt },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_V12), board_pch_spt },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM13), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V13), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM14), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V14), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM15), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V15), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM23), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V23), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM16), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V16), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM17), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V17), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM22), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V22), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM18), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V18), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM19), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V19), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM20), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V20), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM21), board_pch_cnp },
- { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V21), board_pch_cnp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM13), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V13), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM14), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V14), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM15), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V15), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM23), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V23), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM16), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V16), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM17), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V17), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM22), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V22), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM18), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V18), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM19), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V19), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM20), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V20), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM21), board_pch_tgp },
+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V21), board_pch_tgp },
{ 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
};
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index 2fb52bd6fc0e..2cca9e84e31e 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -990,7 +990,7 @@ static int fm10k_set_mac(struct net_device *dev, void *p)
}
if (!err) {
- ether_addr_copy(dev->dev_addr, addr->sa_data);
+ eth_hw_addr_set(dev, addr->sa_data);
ether_addr_copy(hw->mac.addr, addr->sa_data);
dev->addr_assign_type &= ~NET_ADDR_RANDOM;
}
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index adfa2768f024..b473cb7d7c57 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -300,7 +300,7 @@ static int fm10k_handle_reset(struct fm10k_intfc *interface)
if (is_valid_ether_addr(hw->mac.perm_addr)) {
ether_addr_copy(hw->mac.addr, hw->mac.perm_addr);
ether_addr_copy(netdev->perm_addr, hw->mac.perm_addr);
- ether_addr_copy(netdev->dev_addr, hw->mac.perm_addr);
+ eth_hw_addr_set(netdev, hw->mac.perm_addr);
netdev->addr_assign_type &= ~NET_ADDR_RANDOM;
}
@@ -2045,7 +2045,7 @@ static int fm10k_sw_init(struct fm10k_intfc *interface,
netdev->addr_assign_type |= NET_ADDR_RANDOM;
}
- ether_addr_copy(netdev->dev_addr, hw->mac.addr);
+ eth_hw_addr_set(netdev, hw->mac.addr);
ether_addr_copy(netdev->perm_addr, hw->mac.addr);
if (!is_valid_ether_addr(netdev->perm_addr)) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 39fb3d57c057..3d528fba754b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -435,7 +435,7 @@ static inline bool i40e_is_channel_macvlan(struct i40e_channel *ch)
return !!ch->fwd;
}
-static inline u8 *i40e_channel_mac(struct i40e_channel *ch)
+static inline const u8 *i40e_channel_mac(struct i40e_channel *ch)
{
if (i40e_is_channel_macvlan(ch))
return ch->fwd->netdev->dev_addr;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index e04b540cedc8..ba862131b9bd 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -1587,7 +1587,7 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
*/
spin_lock_bh(&vsi->mac_filter_hash_lock);
i40e_del_mac_filter(vsi, netdev->dev_addr);
- ether_addr_copy(netdev->dev_addr, addr->sa_data);
+ eth_hw_addr_set(netdev, addr->sa_data);
i40e_add_mac_filter(vsi, netdev->dev_addr);
spin_unlock_bh(&vsi->mac_filter_hash_lock);
@@ -13425,7 +13425,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
i40e_add_mac_filter(vsi, broadcast);
spin_unlock_bh(&vsi->mac_filter_hash_lock);
- ether_addr_copy(netdev->dev_addr, mac_addr);
+ eth_hw_addr_set(netdev, mac_addr);
ether_addr_copy(netdev->perm_addr, mac_addr);
/* i40iw_net_event() reads 16 bytes from neigh->primary_key */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index e7e778ca074c..ea06e957393e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -193,42 +193,40 @@ bool i40e_alloc_rx_buffers_zc(struct i40e_ring *rx_ring, u16 count)
{
u16 ntu = rx_ring->next_to_use;
union i40e_rx_desc *rx_desc;
- struct xdp_buff **bi, *xdp;
+ struct xdp_buff **xdp;
+ u32 nb_buffs, i;
dma_addr_t dma;
- bool ok = true;
rx_desc = I40E_RX_DESC(rx_ring, ntu);
- bi = i40e_rx_bi(rx_ring, ntu);
- do {
- xdp = xsk_buff_alloc(rx_ring->xsk_pool);
- if (!xdp) {
- ok = false;
- goto no_buffers;
- }
- *bi = xdp;
- dma = xsk_buff_xdp_get_dma(xdp);
+ xdp = i40e_rx_bi(rx_ring, ntu);
+
+ nb_buffs = min_t(u16, count, rx_ring->count - ntu);
+ nb_buffs = xsk_buff_alloc_batch(rx_ring->xsk_pool, xdp, nb_buffs);
+ if (!nb_buffs)
+ return false;
+
+ i = nb_buffs;
+ while (i--) {
+ dma = xsk_buff_xdp_get_dma(*xdp);
rx_desc->read.pkt_addr = cpu_to_le64(dma);
rx_desc->read.hdr_addr = 0;
rx_desc++;
- bi++;
- ntu++;
-
- if (unlikely(ntu == rx_ring->count)) {
- rx_desc = I40E_RX_DESC(rx_ring, 0);
- bi = i40e_rx_bi(rx_ring, 0);
- ntu = 0;
- }
- } while (--count);
+ xdp++;
+ }
-no_buffers:
- if (rx_ring->next_to_use != ntu) {
- /* clear the status bits for the next_to_use descriptor */
- rx_desc->wb.qword1.status_error_len = 0;
- i40e_release_rx_desc(rx_ring, ntu);
+ ntu += nb_buffs;
+ if (ntu == rx_ring->count) {
+ rx_desc = I40E_RX_DESC(rx_ring, 0);
+ xdp = i40e_rx_bi(rx_ring, 0);
+ ntu = 0;
}
- return ok;
+ /* clear the status bits for the next_to_use descriptor */
+ rx_desc->wb.qword1.status_error_len = 0;
+ i40e_release_rx_desc(rx_ring, ntu);
+
+ return count == nb_buffs;
}
/**
@@ -365,7 +363,7 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
break;
bi = *i40e_rx_bi(rx_ring, next_to_clean);
- bi->data_end = bi->data + size;
+ xsk_buff_set_size(bi, size);
xsk_buff_dma_sync_for_cpu(bi, rx_ring->xsk_pool);
xdp_res = i40e_run_xdp_zc(rx_ring, bi);
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index 68c80f04113c..e6e7c1da47fb 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -177,6 +177,7 @@ enum iavf_state_t {
__IAVF_INIT_VERSION_CHECK, /* aq msg sent, awaiting reply */
__IAVF_INIT_GET_RESOURCES, /* aq msg sent, awaiting reply */
__IAVF_INIT_SW, /* got resources, setting up structs */
+ __IAVF_INIT_FAILED, /* init failed, restarting procedure */
__IAVF_RESETTING, /* in reset */
__IAVF_COMM_FAILED, /* communication with PF failed */
/* Below here, watchdog is running */
@@ -225,7 +226,6 @@ struct iavf_adapter {
struct work_struct reset_task;
struct work_struct adminq_task;
struct delayed_work client_task;
- struct delayed_work init_task;
wait_queue_head_t down_waitqueue;
struct iavf_q_vector *q_vectors;
struct list_head vlan_filter_list;
@@ -312,6 +312,7 @@ struct iavf_adapter {
struct iavf_hw hw; /* defined in iavf_type.h */
enum iavf_state_t state;
+ enum iavf_state_t last_state;
unsigned long crit_section;
struct delayed_work watchdog_task;
@@ -393,6 +394,51 @@ struct iavf_device {
extern char iavf_driver_name[];
extern struct workqueue_struct *iavf_wq;
+static inline const char *iavf_state_str(enum iavf_state_t state)
+{
+ switch (state) {
+ case __IAVF_STARTUP:
+ return "__IAVF_STARTUP";
+ case __IAVF_REMOVE:
+ return "__IAVF_REMOVE";
+ case __IAVF_INIT_VERSION_CHECK:
+ return "__IAVF_INIT_VERSION_CHECK";
+ case __IAVF_INIT_GET_RESOURCES:
+ return "__IAVF_INIT_GET_RESOURCES";
+ case __IAVF_INIT_SW:
+ return "__IAVF_INIT_SW";
+ case __IAVF_INIT_FAILED:
+ return "__IAVF_INIT_FAILED";
+ case __IAVF_RESETTING:
+ return "__IAVF_RESETTING";
+ case __IAVF_COMM_FAILED:
+ return "__IAVF_COMM_FAILED";
+ case __IAVF_DOWN:
+ return "__IAVF_DOWN";
+ case __IAVF_DOWN_PENDING:
+ return "__IAVF_DOWN_PENDING";
+ case __IAVF_TESTING:
+ return "__IAVF_TESTING";
+ case __IAVF_RUNNING:
+ return "__IAVF_RUNNING";
+ default:
+ return "__IAVF_UNKNOWN_STATE";
+ }
+}
+
+static inline void iavf_change_state(struct iavf_adapter *adapter,
+ enum iavf_state_t state)
+{
+ if (adapter->state != state) {
+ adapter->last_state = adapter->state;
+ adapter->state = state;
+ }
+ dev_dbg(&adapter->pdev->dev,
+ "state transition from:%s to:%s\n",
+ iavf_state_str(adapter->last_state),
+ iavf_state_str(adapter->state));
+}
+
int iavf_up(struct iavf_adapter *adapter);
void iavf_down(struct iavf_adapter *adapter);
int iavf_process_config(struct iavf_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index cada4e0e40b4..847d67e32a54 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -14,7 +14,7 @@
static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter);
static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter);
static int iavf_close(struct net_device *netdev);
-static int iavf_init_get_resources(struct iavf_adapter *adapter);
+static void iavf_init_get_resources(struct iavf_adapter *adapter);
static int iavf_check_reset_complete(struct iavf_hw *hw);
char iavf_driver_name[] = "iavf";
@@ -52,6 +52,15 @@ static const struct net_device_ops iavf_netdev_ops;
struct workqueue_struct *iavf_wq;
/**
+ * iavf_pdev_to_adapter - go from pci_dev to adapter
+ * @pdev: pci_dev pointer
+ */
+static struct iavf_adapter *iavf_pdev_to_adapter(struct pci_dev *pdev)
+{
+ return netdev_priv(pci_get_drvdata(pdev));
+}
+
+/**
* iavf_allocate_dma_mem_d - OS specific memory alloc for shared code
* @hw: pointer to the HW structure
* @mem: ptr to mem struct to fill out
@@ -960,7 +969,7 @@ static void iavf_configure(struct iavf_adapter *adapter)
**/
static void iavf_up_complete(struct iavf_adapter *adapter)
{
- adapter->state = __IAVF_RUNNING;
+ iavf_change_state(adapter, __IAVF_RUNNING);
clear_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
iavf_napi_enable_all(adapter);
@@ -1688,9 +1697,9 @@ static int iavf_process_aq_command(struct iavf_adapter *adapter)
*
* Function process __IAVF_STARTUP driver state.
* When success the state is changed to __IAVF_INIT_VERSION_CHECK
- * when fails it returns -EAGAIN
+ * when fails the state is changed to __IAVF_INIT_FAILED
**/
-static int iavf_startup(struct iavf_adapter *adapter)
+static void iavf_startup(struct iavf_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
struct iavf_hw *hw = &adapter->hw;
@@ -1729,9 +1738,10 @@ static int iavf_startup(struct iavf_adapter *adapter)
iavf_shutdown_adminq(hw);
goto err;
}
- adapter->state = __IAVF_INIT_VERSION_CHECK;
+ iavf_change_state(adapter, __IAVF_INIT_VERSION_CHECK);
+ return;
err:
- return err;
+ iavf_change_state(adapter, __IAVF_INIT_FAILED);
}
/**
@@ -1740,9 +1750,9 @@ err:
*
* Function process __IAVF_INIT_VERSION_CHECK driver state.
* When success the state is changed to __IAVF_INIT_GET_RESOURCES
- * when fails it returns -EAGAIN
+ * when fails the state is changed to __IAVF_INIT_FAILED
**/
-static int iavf_init_version_check(struct iavf_adapter *adapter)
+static void iavf_init_version_check(struct iavf_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
struct iavf_hw *hw = &adapter->hw;
@@ -1753,7 +1763,7 @@ static int iavf_init_version_check(struct iavf_adapter *adapter)
if (!iavf_asq_done(hw)) {
dev_err(&pdev->dev, "Admin queue command never completed\n");
iavf_shutdown_adminq(hw);
- adapter->state = __IAVF_STARTUP;
+ iavf_change_state(adapter, __IAVF_STARTUP);
goto err;
}
@@ -1776,10 +1786,10 @@ static int iavf_init_version_check(struct iavf_adapter *adapter)
err);
goto err;
}
- adapter->state = __IAVF_INIT_GET_RESOURCES;
-
+ iavf_change_state(adapter, __IAVF_INIT_GET_RESOURCES);
+ return;
err:
- return err;
+ iavf_change_state(adapter, __IAVF_INIT_FAILED);
}
/**
@@ -1789,9 +1799,9 @@ err:
* Function process __IAVF_INIT_GET_RESOURCES driver state and
* finishes driver initialization procedure.
* When success the state is changed to __IAVF_DOWN
- * when fails it returns -EAGAIN
+ * when fails the state is changed to __IAVF_INIT_FAILED
**/
-static int iavf_init_get_resources(struct iavf_adapter *adapter)
+static void iavf_init_get_resources(struct iavf_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
@@ -1819,7 +1829,7 @@ static int iavf_init_get_resources(struct iavf_adapter *adapter)
*/
iavf_shutdown_adminq(hw);
dev_err(&pdev->dev, "Unable to get VF config due to PF error condition, not retrying\n");
- return 0;
+ return;
}
if (err) {
dev_err(&pdev->dev, "Unable to get VF config (%d)\n", err);
@@ -1847,7 +1857,7 @@ static int iavf_init_get_resources(struct iavf_adapter *adapter)
eth_hw_addr_random(netdev);
ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
} else {
- ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
+ eth_hw_addr_set(netdev, adapter->hw.mac.addr);
ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
}
@@ -1893,7 +1903,7 @@ static int iavf_init_get_resources(struct iavf_adapter *adapter)
if (netdev->features & NETIF_F_GRO)
dev_info(&pdev->dev, "GRO is enabled\n");
- adapter->state = __IAVF_DOWN;
+ iavf_change_state(adapter, __IAVF_DOWN);
set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
rtnl_unlock();
@@ -1911,7 +1921,7 @@ static int iavf_init_get_resources(struct iavf_adapter *adapter)
else
iavf_init_rss(adapter);
- return err;
+ return;
err_mem:
iavf_free_rss(adapter);
err_register:
@@ -1922,7 +1932,7 @@ err_alloc:
kfree(adapter->vf_res);
adapter->vf_res = NULL;
err:
- return err;
+ iavf_change_state(adapter, __IAVF_INIT_FAILED);
}
/**
@@ -1941,9 +1951,50 @@ static void iavf_watchdog_task(struct work_struct *work)
goto restart_watchdog;
if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
- adapter->state = __IAVF_COMM_FAILED;
+ iavf_change_state(adapter, __IAVF_COMM_FAILED);
+
+ if (adapter->flags & IAVF_FLAG_RESET_NEEDED &&
+ adapter->state != __IAVF_RESETTING) {
+ iavf_change_state(adapter, __IAVF_RESETTING);
+ adapter->aq_required = 0;
+ adapter->current_op = VIRTCHNL_OP_UNKNOWN;
+ }
switch (adapter->state) {
+ case __IAVF_STARTUP:
+ iavf_startup(adapter);
+ mutex_unlock(&adapter->crit_lock);
+ queue_delayed_work(iavf_wq, &adapter->watchdog_task,
+ msecs_to_jiffies(30));
+ return;
+ case __IAVF_INIT_VERSION_CHECK:
+ iavf_init_version_check(adapter);
+ mutex_unlock(&adapter->crit_lock);
+ queue_delayed_work(iavf_wq, &adapter->watchdog_task,
+ msecs_to_jiffies(30));
+ return;
+ case __IAVF_INIT_GET_RESOURCES:
+ iavf_init_get_resources(adapter);
+ mutex_unlock(&adapter->crit_lock);
+ queue_delayed_work(iavf_wq, &adapter->watchdog_task,
+ msecs_to_jiffies(1));
+ return;
+ case __IAVF_INIT_FAILED:
+ if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to communicate with PF; waiting before retry\n");
+ adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
+ iavf_shutdown_adminq(hw);
+ mutex_unlock(&adapter->crit_lock);
+ queue_delayed_work(iavf_wq,
+ &adapter->watchdog_task, (5 * HZ));
+ return;
+ }
+ /* Try again from failed step*/
+ iavf_change_state(adapter, adapter->last_state);
+ mutex_unlock(&adapter->crit_lock);
+ queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ);
+ return;
case __IAVF_COMM_FAILED:
reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
@@ -1952,23 +2003,19 @@ static void iavf_watchdog_task(struct work_struct *work)
/* A chance for redemption! */
dev_err(&adapter->pdev->dev,
"Hardware came out of reset. Attempting reinit.\n");
- adapter->state = __IAVF_STARTUP;
- adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
- queue_delayed_work(iavf_wq, &adapter->init_task, 10);
- mutex_unlock(&adapter->crit_lock);
- /* Don't reschedule the watchdog, since we've restarted
- * the init task. When init_task contacts the PF and
+ /* When init task contacts the PF and
* gets everything set up again, it'll restart the
* watchdog for us. Down, boy. Sit. Stay. Woof.
*/
- return;
+ iavf_change_state(adapter, __IAVF_STARTUP);
+ adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
}
adapter->aq_required = 0;
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
queue_delayed_work(iavf_wq,
&adapter->watchdog_task,
msecs_to_jiffies(10));
- goto watchdog_done;
+ return;
case __IAVF_RESETTING:
mutex_unlock(&adapter->crit_lock);
queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2);
@@ -1991,38 +2038,40 @@ static void iavf_watchdog_task(struct work_struct *work)
adapter->state == __IAVF_RUNNING)
iavf_request_stats(adapter);
}
+ if (adapter->state == __IAVF_RUNNING)
+ iavf_detect_recover_hung(&adapter->vsi);
break;
case __IAVF_REMOVE:
mutex_unlock(&adapter->crit_lock);
return;
default:
- goto restart_watchdog;
+ return;
}
- /* check for hw reset */
+ /* check for hw reset */
reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK;
if (!reg_val) {
+ iavf_change_state(adapter, __IAVF_RESETTING);
adapter->flags |= IAVF_FLAG_RESET_PENDING;
adapter->aq_required = 0;
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
queue_work(iavf_wq, &adapter->reset_task);
- goto watchdog_done;
+ mutex_unlock(&adapter->crit_lock);
+ queue_delayed_work(iavf_wq,
+ &adapter->watchdog_task, HZ * 2);
+ return;
}
schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5));
-watchdog_done:
- if (adapter->state == __IAVF_RUNNING ||
- adapter->state == __IAVF_COMM_FAILED)
- iavf_detect_recover_hung(&adapter->vsi);
mutex_unlock(&adapter->crit_lock);
restart_watchdog:
+ queue_work(iavf_wq, &adapter->adminq_task);
if (adapter->aq_required)
queue_delayed_work(iavf_wq, &adapter->watchdog_task,
msecs_to_jiffies(20));
else
queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2);
- queue_work(iavf_wq, &adapter->adminq_task);
}
static void iavf_disable_vf(struct iavf_adapter *adapter)
@@ -2081,7 +2130,7 @@ static void iavf_disable_vf(struct iavf_adapter *adapter)
adapter->netdev->flags &= ~IFF_UP;
mutex_unlock(&adapter->crit_lock);
adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
- adapter->state = __IAVF_DOWN;
+ iavf_change_state(adapter, __IAVF_DOWN);
wake_up(&adapter->down_waitqueue);
dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n");
}
@@ -2191,7 +2240,7 @@ continue_reset:
}
iavf_irq_disable(adapter);
- adapter->state = __IAVF_RESETTING;
+ iavf_change_state(adapter, __IAVF_RESETTING);
adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
/* free the Tx/Rx rings and descriptors, might be better to just
@@ -2291,11 +2340,14 @@ continue_reset:
iavf_configure(adapter);
+ /* iavf_up_complete() will switch device back
+ * to __IAVF_RUNNING
+ */
iavf_up_complete(adapter);
iavf_irq_enable(adapter, true);
} else {
- adapter->state = __IAVF_DOWN;
+ iavf_change_state(adapter, __IAVF_DOWN);
wake_up(&adapter->down_waitqueue);
}
mutex_unlock(&adapter->client_lock);
@@ -2305,6 +2357,8 @@ continue_reset:
reset_err:
mutex_unlock(&adapter->client_lock);
mutex_unlock(&adapter->crit_lock);
+ if (running)
+ iavf_change_state(adapter, __IAVF_RUNNING);
dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
iavf_close(netdev);
}
@@ -3226,6 +3280,13 @@ static int iavf_open(struct net_device *netdev)
goto err_unlock;
}
+ if (adapter->state == __IAVF_RUNNING &&
+ !test_bit(__IAVF_VSI_DOWN, adapter->vsi.state)) {
+ dev_dbg(&adapter->pdev->dev, "VF is already open.\n");
+ err = 0;
+ goto err_unlock;
+ }
+
/* allocate transmit descriptors */
err = iavf_setup_all_tx_resources(adapter);
if (err)
@@ -3297,7 +3358,7 @@ static int iavf_close(struct net_device *netdev)
adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_CLOSE;
iavf_down(adapter);
- adapter->state = __IAVF_DOWN_PENDING;
+ iavf_change_state(adapter, __IAVF_DOWN_PENDING);
iavf_free_traffic_irqs(adapter);
mutex_unlock(&adapter->crit_lock);
@@ -3631,71 +3692,13 @@ int iavf_process_config(struct iavf_adapter *adapter)
}
/**
- * iavf_init_task - worker thread to perform delayed initialization
- * @work: pointer to work_struct containing our data
- *
- * This task completes the work that was begun in probe. Due to the nature
- * of VF-PF communications, we may need to wait tens of milliseconds to get
- * responses back from the PF. Rather than busy-wait in probe and bog down the
- * whole system, we'll do it in a task so we can sleep.
- * This task only runs during driver init. Once we've established
- * communications with the PF driver and set up our netdev, the watchdog
- * takes over.
- **/
-static void iavf_init_task(struct work_struct *work)
-{
- struct iavf_adapter *adapter = container_of(work,
- struct iavf_adapter,
- init_task.work);
- struct iavf_hw *hw = &adapter->hw;
-
- if (iavf_lock_timeout(&adapter->crit_lock, 5000)) {
- dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__);
- return;
- }
- switch (adapter->state) {
- case __IAVF_STARTUP:
- if (iavf_startup(adapter) < 0)
- goto init_failed;
- break;
- case __IAVF_INIT_VERSION_CHECK:
- if (iavf_init_version_check(adapter) < 0)
- goto init_failed;
- break;
- case __IAVF_INIT_GET_RESOURCES:
- if (iavf_init_get_resources(adapter) < 0)
- goto init_failed;
- goto out;
- default:
- goto init_failed;
- }
-
- queue_delayed_work(iavf_wq, &adapter->init_task,
- msecs_to_jiffies(30));
- goto out;
-init_failed:
- if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) {
- dev_err(&adapter->pdev->dev,
- "Failed to communicate with PF; waiting before retry\n");
- adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
- iavf_shutdown_adminq(hw);
- adapter->state = __IAVF_STARTUP;
- queue_delayed_work(iavf_wq, &adapter->init_task, HZ * 5);
- goto out;
- }
- queue_delayed_work(iavf_wq, &adapter->init_task, HZ);
-out:
- mutex_unlock(&adapter->crit_lock);
-}
-
-/**
* iavf_shutdown - Shutdown the device in preparation for a reboot
* @pdev: pci device structure
**/
static void iavf_shutdown(struct pci_dev *pdev)
{
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct iavf_adapter *adapter = netdev_priv(netdev);
+ struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev);
+ struct net_device *netdev = adapter->netdev;
netif_device_detach(netdev);
@@ -3705,7 +3708,7 @@ static void iavf_shutdown(struct pci_dev *pdev)
if (iavf_lock_timeout(&adapter->crit_lock, 5000))
dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__);
/* Prevent the watchdog from running. */
- adapter->state = __IAVF_REMOVE;
+ iavf_change_state(adapter, __IAVF_REMOVE);
adapter->aq_required = 0;
mutex_unlock(&adapter->crit_lock);
@@ -3778,7 +3781,7 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->back = adapter;
adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
- adapter->state = __IAVF_STARTUP;
+ iavf_change_state(adapter, __IAVF_STARTUP);
/* Call save state here because it relies on the adapter struct. */
pci_save_state(pdev);
@@ -3822,8 +3825,7 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_WORK(&adapter->adminq_task, iavf_adminq_task);
INIT_DELAYED_WORK(&adapter->watchdog_task, iavf_watchdog_task);
INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task);
- INIT_DELAYED_WORK(&adapter->init_task, iavf_init_task);
- queue_delayed_work(iavf_wq, &adapter->init_task,
+ queue_delayed_work(iavf_wq, &adapter->watchdog_task,
msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
/* Setup the wait queue for indicating transition to down status */
@@ -3880,10 +3882,11 @@ static int __maybe_unused iavf_suspend(struct device *dev_d)
static int __maybe_unused iavf_resume(struct device *dev_d)
{
struct pci_dev *pdev = to_pci_dev(dev_d);
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct iavf_adapter *adapter = netdev_priv(netdev);
+ struct iavf_adapter *adapter;
u32 err;
+ adapter = iavf_pdev_to_adapter(pdev);
+
pci_set_master(pdev);
rtnl_lock();
@@ -3902,7 +3905,7 @@ static int __maybe_unused iavf_resume(struct device *dev_d)
queue_work(iavf_wq, &adapter->reset_task);
- netif_device_attach(netdev);
+ netif_device_attach(adapter->netdev);
return err;
}
@@ -3918,8 +3921,9 @@ static int __maybe_unused iavf_resume(struct device *dev_d)
**/
static void iavf_remove(struct pci_dev *pdev)
{
- struct net_device *netdev = pci_get_drvdata(pdev);
- struct iavf_adapter *adapter = netdev_priv(netdev);
+ struct iavf_adapter *adapter = iavf_pdev_to_adapter(pdev);
+ enum iavf_state_t prev_state = adapter->last_state;
+ struct net_device *netdev = adapter->netdev;
struct iavf_fdir_fltr *fdir, *fdirtmp;
struct iavf_vlan_filter *vlf, *vlftmp;
struct iavf_adv_rss *rss, *rsstmp;
@@ -3929,8 +3933,8 @@ static void iavf_remove(struct pci_dev *pdev)
int err;
/* Indicate we are in remove and not to run reset_task */
mutex_lock(&adapter->remove_lock);
- cancel_delayed_work_sync(&adapter->init_task);
cancel_work_sync(&adapter->reset_task);
+ cancel_delayed_work_sync(&adapter->watchdog_task);
cancel_delayed_work_sync(&adapter->client_task);
if (adapter->netdev_registered) {
unregister_netdev(netdev);
@@ -3954,13 +3958,25 @@ static void iavf_remove(struct pci_dev *pdev)
dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__);
/* Shut down all the garbage mashers on the detention level */
- adapter->state = __IAVF_REMOVE;
+ iavf_change_state(adapter, __IAVF_REMOVE);
adapter->aq_required = 0;
adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
+
iavf_free_all_tx_resources(adapter);
iavf_free_all_rx_resources(adapter);
iavf_misc_irq_disable(adapter);
iavf_free_misc_irq(adapter);
+
+ /* In case we enter iavf_remove from erroneous state, free traffic irqs
+ * here, so as to not cause a kernel crash, when calling
+ * iavf_reset_interrupt_capability.
+ */
+ if ((adapter->last_state == __IAVF_RESETTING &&
+ prev_state != __IAVF_DOWN) ||
+ (adapter->last_state == __IAVF_RUNNING &&
+ !(netdev->flags & IFF_UP)))
+ iavf_free_traffic_irqs(adapter);
+
iavf_reset_interrupt_capability(adapter);
iavf_free_q_vectors(adapter);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index 3c735968e1b8..8c3f0f77cb57 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -1685,7 +1685,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
if (!v_retval)
iavf_mac_add_ok(adapter);
if (!ether_addr_equal(netdev->dev_addr, adapter->hw.mac.addr))
- ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
+ eth_hw_addr_set(netdev, adapter->hw.mac.addr);
break;
case VIRTCHNL_OP_GET_STATS: {
struct iavf_eth_stats *stats =
@@ -1716,7 +1716,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
} else {
/* refresh current mac address if changed */
- ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
+ eth_hw_addr_set(netdev, adapter->hw.mac.addr);
ether_addr_copy(netdev->perm_addr,
adapter->hw.mac.addr);
}
@@ -1735,7 +1735,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
iavf_free_all_tx_resources(adapter);
iavf_free_all_rx_resources(adapter);
if (adapter->state == __IAVF_DOWN_PENDING) {
- adapter->state = __IAVF_DOWN;
+ iavf_change_state(adapter, __IAVF_DOWN);
wake_up(&adapter->down_waitqueue);
}
break;
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile
index 4f538cdf42c1..c36faa7d1471 100644
--- a/drivers/net/ethernet/intel/ice/Makefile
+++ b/drivers/net/ethernet/intel/ice/Makefile
@@ -26,10 +26,13 @@ ice-y := ice_main.o \
ice_devlink.o \
ice_fw_update.o \
ice_lag.o \
- ice_ethtool.o
+ ice_ethtool.o \
+ ice_repr.o \
+ ice_tc_lib.o
ice-$(CONFIG_PCI_IOV) += ice_virtchnl_allowlist.o
ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o ice_virtchnl_fdir.o
ice-$(CONFIG_PTP_1588_CLOCK) += ice_ptp.o ice_ptp_hw.o
ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_nl.o ice_dcb_lib.o
ice-$(CONFIG_RFS_ACCEL) += ice_arfs.o
ice-$(CONFIG_XDP_SOCKETS) += ice_xsk.o
+ice-$(CONFIG_ICE_SWITCHDEV) += ice_eswitch.o
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index 3c4f08d20414..bf4ecd9a517c 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -34,10 +34,15 @@
#include <linux/if_bridge.h>
#include <linux/ctype.h>
#include <linux/bpf.h>
+#include <linux/btf.h>
#include <linux/auxiliary_bus.h>
#include <linux/avf/virtchnl.h>
#include <linux/cpu_rmap.h>
#include <linux/dim.h>
+#include <net/pkt_cls.h>
+#include <net/tc_act/tc_mirred.h>
+#include <net/tc_act/tc_gact.h>
+#include <net/ip.h>
#include <net/devlink.h>
#include <net/ipv6.h>
#include <net/xdp_sock.h>
@@ -55,6 +60,7 @@
#include "ice_dcb.h"
#include "ice_switch.h"
#include "ice_common.h"
+#include "ice_flow.h"
#include "ice_sched.h"
#include "ice_idc_int.h"
#include "ice_virtchnl_pf.h"
@@ -63,6 +69,8 @@
#include "ice_fdir.h"
#include "ice_xsk.h"
#include "ice_arfs.h"
+#include "ice_repr.h"
+#include "ice_eswitch.h"
#include "ice_lag.h"
#define ICE_BAR0 0
@@ -84,6 +92,7 @@
#define ICE_FDIR_MSIX 2
#define ICE_RDMA_NUM_AEQ_MSIX 4
#define ICE_MIN_RDMA_MSIX 2
+#define ICE_ESWITCH_MSIX 1
#define ICE_NO_VSI 0xffff
#define ICE_VSI_MAP_CONTIG 0
#define ICE_VSI_MAP_SCATTER 1
@@ -101,6 +110,10 @@
#define ICE_INVAL_VFID 256
#define ICE_MAX_RXQS_PER_TC 256 /* Used when setting VSI context per TC Rx queues */
+
+#define ICE_CHNL_START_TC 1
+#define ICE_CHNL_MAX_TC 16
+
#define ICE_MAX_RESET_WAIT 20
#define ICE_VSIQF_HKEY_ARRAY_SIZE ((VSIQF_HKEY_MAX_INDEX + 1) * 4)
@@ -118,14 +131,24 @@
#define ICE_TX_CTX_DESC(R, i) (&(((struct ice_tx_ctx_desc *)((R)->desc))[i]))
#define ICE_TX_FDIRDESC(R, i) (&(((struct ice_fltr_desc *)((R)->desc))[i]))
+/* Minimum BW limit is 500 Kbps for any scheduler node */
+#define ICE_MIN_BW_LIMIT 500
+/* User can specify BW in either Kbit/Mbit/Gbit and OS converts it in bytes.
+ * use it to convert user specified BW limit into Kbps
+ */
+#define ICE_BW_KBPS_DIVISOR 125
+
/* Macro for each VSI in a PF */
#define ice_for_each_vsi(pf, i) \
for ((i) = 0; (i) < (pf)->num_alloc_vsi; (i)++)
-/* Macros for each Tx/Rx ring in a VSI */
+/* Macros for each Tx/Xdp/Rx ring in a VSI */
#define ice_for_each_txq(vsi, i) \
for ((i) = 0; (i) < (vsi)->num_txq; (i)++)
+#define ice_for_each_xdp_txq(vsi, i) \
+ for ((i) = 0; (i) < (vsi)->num_xdp_txq; (i)++)
+
#define ice_for_each_rxq(vsi, i) \
for ((i) = 0; (i) < (vsi)->num_rxq; (i)++)
@@ -139,6 +162,9 @@
#define ice_for_each_q_vector(vsi, i) \
for ((i) = 0; (i) < (vsi)->num_q_vectors; (i)++)
+#define ice_for_each_chnl_tc(i) \
+ for ((i) = ICE_CHNL_START_TC; (i) < ICE_CHNL_MAX_TC; (i)++)
+
#define ICE_UCAST_PROMISC_BITS (ICE_PROMISC_UCAST_TX | ICE_PROMISC_MCAST_TX | \
ICE_PROMISC_UCAST_RX | ICE_PROMISC_MCAST_RX)
@@ -158,6 +184,29 @@
#define ice_pf_to_dev(pf) (&((pf)->pdev->dev))
+enum ice_feature {
+ ICE_F_DSCP,
+ ICE_F_SMA_CTRL,
+ ICE_F_MAX
+};
+
+DECLARE_STATIC_KEY_FALSE(ice_xdp_locking_key);
+
+struct ice_channel {
+ struct list_head list;
+ u8 type;
+ u16 sw_id;
+ u16 base_q;
+ u16 num_rxq;
+ u16 num_txq;
+ u16 vsi_num;
+ u8 ena_tc;
+ struct ice_aqc_vsi_props info;
+ u64 max_tx_rate;
+ u64 min_tx_rate;
+ struct ice_vsi *ch_vsi;
+};
+
struct ice_txq_meta {
u32 q_teid; /* Tx-scheduler element identifier */
u16 q_id; /* Entry in VSI's txq_map bitmap */
@@ -175,7 +224,7 @@ struct ice_tc_info {
struct ice_tc_cfg {
u8 numtc; /* Total number of enabled TCs */
- u8 ena_tc; /* Tx map */
+ u16 ena_tc; /* Tx map */
struct ice_tc_info tc_info[ICE_MAX_TRAFFIC_CLASS];
};
@@ -266,8 +315,8 @@ struct ice_vsi {
struct ice_sw *vsw; /* switch this VSI is on */
struct ice_pf *back; /* back pointer to PF */
struct ice_port_info *port_info; /* back pointer to port_info */
- struct ice_ring **rx_rings; /* Rx ring array */
- struct ice_ring **tx_rings; /* Tx ring array */
+ struct ice_rx_ring **rx_rings; /* Rx ring array */
+ struct ice_tx_ring **tx_rings; /* Tx ring array */
struct ice_q_vector **q_vectors; /* q_vector array */
irqreturn_t (*irq_handler)(int irq, void *data);
@@ -306,10 +355,6 @@ struct ice_vsi {
spinlock_t arfs_lock; /* protects aRFS hash table and filter state */
atomic_t *arfs_last_fltr_id;
- /* devlink port data */
- struct devlink_port devlink_port;
- bool devlink_port_registered;
-
u16 max_frame;
u16 rx_buf_len;
@@ -344,11 +389,42 @@ struct ice_vsi {
u16 qset_handle[ICE_MAX_TRAFFIC_CLASS];
struct ice_tc_cfg tc_cfg;
struct bpf_prog *xdp_prog;
- struct ice_ring **xdp_rings; /* XDP ring array */
+ struct ice_tx_ring **xdp_rings; /* XDP ring array */
unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled qps */
u16 num_xdp_txq; /* Used XDP queues */
u8 xdp_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
+ struct net_device **target_netdevs;
+
+ struct tc_mqprio_qopt_offload mqprio_qopt; /* queue parameters */
+
+ /* Channel Specific Fields */
+ struct ice_vsi *tc_map_vsi[ICE_CHNL_MAX_TC];
+ u16 cnt_q_avail;
+ u16 next_base_q; /* next queue to be used for channel setup */
+ struct list_head ch_list;
+ u16 num_chnl_rxq;
+ u16 num_chnl_txq;
+ u16 ch_rss_size;
+ u16 num_chnl_fltr;
+ /* store away rss size info before configuring ADQ channels so that,
+ * it can be used after tc-qdisc delete, to get back RSS setting as
+ * they were before
+ */
+ u16 orig_rss_size;
+ /* this keeps tracks of all enabled TC with and without DCB
+ * and inclusive of ADQ, vsi->mqprio_opt keeps track of queue
+ * information
+ */
+ u8 all_numtc;
+ u16 all_enatc;
+
+ /* store away TC info, to be used for rebuild logic */
+ u8 old_numtc;
+ u16 old_ena_tc;
+
+ struct ice_channel *ch;
+
/* setup back reference, to which aggregator node this VSI
* corresponds to
*/
@@ -377,6 +453,8 @@ struct ice_q_vector {
cpumask_t affinity_mask;
struct irq_affinity_notify affinity_notify;
+ struct ice_channel *ch;
+
char name[ICE_INT_NAME_STR_LEN];
u16 total_events; /* net_dim(): number of interrupts processed */
@@ -395,11 +473,14 @@ enum ice_pf_flags {
ICE_FLAG_PTP, /* PTP is enabled by software */
ICE_FLAG_AUX_ENA,
ICE_FLAG_ADV_FEATURES,
+ ICE_FLAG_TC_MQPRIO, /* support for Multi queue TC */
+ ICE_FLAG_CLS_FLOWER,
ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA,
ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA,
ICE_FLAG_NO_MEDIA,
ICE_FLAG_FW_LLDP_AGENT,
ICE_FLAG_MOD_POWER_UNSUPPORTED,
+ ICE_FLAG_PHY_FW_LOAD_FAILED,
ICE_FLAG_ETHTOOL_CTXT, /* set when ethtool holds RTNL lock */
ICE_FLAG_LEGACY_RX,
ICE_FLAG_VF_TRUE_PROMISC_ENA,
@@ -408,6 +489,12 @@ enum ice_pf_flags {
ICE_PF_FLAGS_NBITS /* must be last */
};
+struct ice_switchdev_info {
+ struct ice_vsi *control_vsi;
+ struct ice_vsi *uplink_vsi;
+ bool is_running;
+};
+
struct ice_agg_node {
u32 agg_id;
#define ICE_MAX_VSIS_IN_AGG_NODE 64
@@ -421,6 +508,9 @@ struct ice_pf {
struct devlink_region *nvm_region;
struct devlink_region *devcaps_region;
+ /* devlink port data */
+ struct devlink_port devlink_port;
+
/* OS reserved IRQ details */
struct msix_entry *msix_entries;
struct ice_res_tracker *irq_tracker;
@@ -434,6 +524,7 @@ struct ice_pf {
struct ice_vsi **vsi; /* VSIs created by the driver */
struct ice_sw *first_sw; /* first switch created by firmware */
+ u16 eswitch_mode; /* current mode of eswitch */
/* Virtchnl/SR-IOV config info */
struct ice_vf *vf;
u16 num_alloc_vfs; /* actual number of VFs allocated */
@@ -443,6 +534,7 @@ struct ice_pf {
/* used to ratelimit the MDD event logging */
unsigned long last_printed_mdd_jiffies;
DECLARE_BITMAP(malvfs, ICE_MAX_VF_COUNT);
+ DECLARE_BITMAP(features, ICE_F_MAX);
DECLARE_BITMAP(state, ICE_STATE_NBITS);
DECLARE_BITMAP(flags, ICE_PF_FLAGS_NBITS);
unsigned long *avail_txqs; /* bitmap to track PF Tx queue usage */
@@ -495,12 +587,19 @@ struct ice_pf {
struct auxiliary_device *adev;
int aux_idx;
u32 sw_int_count;
+ /* count of tc_flower filters specific to channel (aka where filter
+ * action is "hw_tc <tc_num>")
+ */
+ u16 num_dmac_chnl_fltrs;
+ struct hlist_head tc_flower_fltr_list;
__le64 nvm_phy_type_lo; /* NVM PHY type low */
__le64 nvm_phy_type_hi; /* NVM PHY type high */
struct ice_link_default_override_tlv link_dflt_override;
struct ice_lag *lag; /* Link Aggregation information */
+ struct ice_switchdev_info switchdev;
+
#define ICE_INVALID_AGG_NODE_ID 0
#define ICE_PF_AGG_NODE_ID_START 1
#define ICE_MAX_PF_AGG_NODES 32
@@ -512,9 +611,28 @@ struct ice_pf {
struct ice_netdev_priv {
struct ice_vsi *vsi;
+ struct ice_repr *repr;
+ /* indirect block callbacks on registered higher level devices
+ * (e.g. tunnel devices)
+ *
+ * tc_indr_block_cb_priv_list is used to look up indirect callback
+ * private data
+ */
+ struct list_head tc_indr_block_priv_list;
};
/**
+ * ice_vector_ch_enabled
+ * @qv: pointer to q_vector, can be NULL
+ *
+ * This function returns true if vector is channel enabled otherwise false
+ */
+static inline bool ice_vector_ch_enabled(struct ice_q_vector *qv)
+{
+ return !!qv->ch; /* Enable it to run with TC */
+}
+
+/**
* ice_irq_dynamic_ena - Enable default interrupt generation settings
* @hw: pointer to HW struct
* @vsi: pointer to VSI struct, can be NULL
@@ -556,25 +674,42 @@ static inline bool ice_is_xdp_ena_vsi(struct ice_vsi *vsi)
return !!vsi->xdp_prog;
}
-static inline void ice_set_ring_xdp(struct ice_ring *ring)
+static inline void ice_set_ring_xdp(struct ice_tx_ring *ring)
{
ring->flags |= ICE_TX_FLAGS_RING_XDP;
}
/**
* ice_xsk_pool - get XSK buffer pool bound to a ring
- * @ring: ring to use
+ * @ring: Rx ring to use
*
* Returns a pointer to xdp_umem structure if there is a buffer pool present,
* NULL otherwise.
*/
-static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_ring *ring)
+static inline struct xsk_buff_pool *ice_xsk_pool(struct ice_rx_ring *ring)
{
struct ice_vsi *vsi = ring->vsi;
u16 qid = ring->q_index;
- if (ice_ring_is_xdp(ring))
- qid -= vsi->num_xdp_txq;
+ if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps))
+ return NULL;
+
+ return xsk_get_pool_from_qid(vsi->netdev, qid);
+}
+
+/**
+ * ice_tx_xsk_pool - get XSK buffer pool bound to a ring
+ * @ring: Tx ring to use
+ *
+ * Returns a pointer to xdp_umem structure if there is a buffer pool present,
+ * NULL otherwise. Tx equivalent of ice_xsk_pool.
+ */
+static inline struct xsk_buff_pool *ice_tx_xsk_pool(struct ice_tx_ring *ring)
+{
+ struct ice_vsi *vsi = ring->vsi;
+ u16 qid;
+
+ qid = ring->q_index - vsi->num_xdp_txq;
if (!ice_is_xdp_ena_vsi(vsi) || !test_bit(qid, vsi->af_xdp_zc_qps))
return NULL;
@@ -597,6 +732,19 @@ static inline struct ice_vsi *ice_get_main_vsi(struct ice_pf *pf)
}
/**
+ * ice_get_netdev_priv_vsi - return VSI associated with netdev priv.
+ * @np: private netdev structure
+ */
+static inline struct ice_vsi *ice_get_netdev_priv_vsi(struct ice_netdev_priv *np)
+{
+ /* In case of port representor return source port VSI. */
+ if (np->repr)
+ return np->repr->src_vsi;
+ else
+ return np->vsi;
+}
+
+/**
* ice_get_ctrl_vsi - Get the control VSI
* @pf: PF instance
*/
@@ -610,6 +758,18 @@ static inline struct ice_vsi *ice_get_ctrl_vsi(struct ice_pf *pf)
}
/**
+ * ice_is_switchdev_running - check if switchdev is configured
+ * @pf: pointer to PF structure
+ *
+ * Returns true if eswitch mode is set to DEVLINK_ESWITCH_MODE_SWITCHDEV
+ * and switchdev is configured, false otherwise.
+ */
+static inline bool ice_is_switchdev_running(struct ice_pf *pf)
+{
+ return pf->switchdev.is_running;
+}
+
+/**
* ice_set_sriov_cap - enable SRIOV in PF flags
* @pf: PF struct
*/
@@ -633,11 +793,37 @@ static inline void ice_clear_sriov_cap(struct ice_pf *pf)
((base_idx) * ICE_FD_STAT_CTR_BLOCK_COUNT)
#define ICE_FD_SB_STAT_IDX(base_idx) ICE_FD_STAT_PF_IDX(base_idx)
+/**
+ * ice_is_adq_active - any active ADQs
+ * @pf: pointer to PF
+ *
+ * This function returns true if there are any ADQs configured (which is
+ * determined by looking at VSI type (which should be VSI_PF), numtc, and
+ * TC_MQPRIO flag) otherwise return false
+ */
+static inline bool ice_is_adq_active(struct ice_pf *pf)
+{
+ struct ice_vsi *vsi;
+
+ vsi = ice_get_main_vsi(pf);
+ if (!vsi)
+ return false;
+
+ /* is ADQ configured */
+ if (vsi->tc_cfg.numtc > ICE_CHNL_START_TC &&
+ test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
+ return true;
+
+ return false;
+}
+
bool netif_is_ice(struct net_device *dev);
int ice_vsi_setup_tx_rings(struct ice_vsi *vsi);
int ice_vsi_setup_rx_rings(struct ice_vsi *vsi);
int ice_vsi_open_ctrl(struct ice_vsi *vsi);
+int ice_vsi_open(struct ice_vsi *vsi);
void ice_set_ethtool_ops(struct net_device *netdev);
+void ice_set_ethtool_repr_ops(struct net_device *netdev);
void ice_set_ethtool_safe_mode_ops(struct net_device *netdev);
u16 ice_get_avail_txq_count(struct ice_pf *pf);
u16 ice_get_avail_rxq_count(struct ice_pf *pf);
@@ -648,6 +834,7 @@ int ice_up(struct ice_vsi *vsi);
int ice_down(struct ice_vsi *vsi);
int ice_vsi_cfg(struct ice_vsi *vsi);
struct ice_vsi *ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi);
+int ice_vsi_determine_xdp_res(struct ice_vsi *vsi);
int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog);
int ice_destroy_xdp_rings(struct ice_vsi *vsi);
int
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 21b4c7cd6f05..4eef3488d86f 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -233,6 +233,7 @@ struct ice_aqc_get_sw_cfg_resp_elem {
*/
#define ICE_AQC_RES_TYPE_VSI_LIST_REP 0x03
#define ICE_AQC_RES_TYPE_VSI_LIST_PRUNE 0x04
+#define ICE_AQC_RES_TYPE_RECIPE 0x05
#define ICE_AQC_RES_TYPE_FDIR_COUNTER_BLOCK 0x21
#define ICE_AQC_RES_TYPE_FDIR_GUARANTEED_ENTRIES 0x22
#define ICE_AQC_RES_TYPE_FDIR_SHARED_ENTRIES 0x23
@@ -241,6 +242,7 @@ struct ice_aqc_get_sw_cfg_resp_elem {
#define ICE_AQC_RES_TYPE_HASH_PROF_BLDR_PROFID 0x60
#define ICE_AQC_RES_TYPE_HASH_PROF_BLDR_TCAM 0x61
+#define ICE_AQC_RES_TYPE_FLAG_SHARED BIT(7)
#define ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM BIT(12)
#define ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX BIT(13)
@@ -474,6 +476,53 @@ struct ice_aqc_vsi_props {
#define ICE_MAX_NUM_RECIPES 64
+/* Add/Get Recipe (indirect 0x0290/0x0292) */
+struct ice_aqc_add_get_recipe {
+ __le16 num_sub_recipes; /* Input in Add cmd, Output in Get cmd */
+ __le16 return_index; /* Input, used for Get cmd only */
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct ice_aqc_recipe_content {
+ u8 rid;
+#define ICE_AQ_RECIPE_ID_IS_ROOT BIT(7)
+#define ICE_AQ_SW_ID_LKUP_IDX 0
+ u8 lkup_indx[5];
+#define ICE_AQ_RECIPE_LKUP_IGNORE BIT(7)
+#define ICE_AQ_SW_ID_LKUP_MASK 0x00FF
+ __le16 mask[5];
+ u8 result_indx;
+#define ICE_AQ_RECIPE_RESULT_DATA_S 0
+#define ICE_AQ_RECIPE_RESULT_DATA_M (0x3F << ICE_AQ_RECIPE_RESULT_DATA_S)
+#define ICE_AQ_RECIPE_RESULT_EN BIT(7)
+ u8 rsvd0[3];
+ u8 act_ctrl_join_priority;
+ u8 act_ctrl_fwd_priority;
+ u8 act_ctrl;
+#define ICE_AQ_RECIPE_ACT_INV_ACT BIT(2)
+ u8 rsvd1;
+ __le32 dflt_act;
+};
+
+struct ice_aqc_recipe_data_elem {
+ u8 recipe_indx;
+ u8 resp_bits;
+ u8 rsvd0[2];
+ u8 recipe_bitmap[8];
+ u8 rsvd1[4];
+ struct ice_aqc_recipe_content content;
+ u8 rsvd2[20];
+};
+
+/* Set/Get Recipes to Profile Association (direct 0x0291/0x0293) */
+struct ice_aqc_recipe_to_profile {
+ __le16 profile_id;
+ u8 rsvd[6];
+ DECLARE_BITMAP(recipe_assoc, ICE_MAX_NUM_RECIPES);
+};
+
/* Add/Update/Remove/Get switch rules (indirect 0x02A0, 0x02A1, 0x02A2, 0x02A3)
*/
struct ice_aqc_sw_rules {
@@ -671,6 +720,16 @@ struct ice_aqc_sw_rules_elem {
} __packed pdata;
};
+/* Query PFC Mode (direct 0x0302)
+ * Set PFC Mode (direct 0x0303)
+ */
+struct ice_aqc_set_query_pfc_mode {
+ u8 pfc_mode;
+/* For Query Command response, reserved in all other cases */
+#define ICE_AQC_PFC_VLAN_BASED_PFC 1
+#define ICE_AQC_PFC_DSCP_BASED_PFC 2
+ u8 rsvd[15];
+};
/* Get Default Topology (indirect 0x0400) */
struct ice_aqc_get_topo {
u8 port_num;
@@ -1126,6 +1185,7 @@ struct ice_aqc_get_link_status_data {
#define ICE_AQ_LINK_TOPO_UNSUPP_MEDIA BIT(7)
u8 link_cfg_err;
#define ICE_AQ_LINK_MODULE_POWER_UNSUPPORTED BIT(5)
+#define ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE BIT(6)
#define ICE_AQ_LINK_INVAL_MAX_POWER_LIMIT BIT(7)
u8 link_info;
#define ICE_AQ_LINK_UP BIT(0) /* Link Status */
@@ -1209,6 +1269,7 @@ struct ice_aqc_set_event_mask {
#define ICE_AQ_LINK_EVENT_AN_COMPLETED BIT(7)
#define ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL BIT(8)
#define ICE_AQ_LINK_EVENT_PORT_TX_SUSPENDED BIT(9)
+#define ICE_AQ_LINK_EVENT_PHY_FW_LOAD_FAIL BIT(12)
u8 reserved1[6];
};
@@ -1220,7 +1281,7 @@ struct ice_aqc_set_mac_lb {
u8 reserved[15];
};
-struct ice_aqc_link_topo_addr {
+struct ice_aqc_link_topo_params {
u8 lport_num;
u8 lport_num_valid;
#define ICE_AQC_LINK_TOPO_PORT_NUM_VALID BIT(0)
@@ -1246,6 +1307,10 @@ struct ice_aqc_link_topo_addr {
#define ICE_AQC_LINK_TOPO_NODE_CTX_PROVIDED 4
#define ICE_AQC_LINK_TOPO_NODE_CTX_OVERRIDE 5
u8 index;
+};
+
+struct ice_aqc_link_topo_addr {
+ struct ice_aqc_link_topo_params topo_params;
__le16 handle;
#define ICE_AQC_LINK_TOPO_HANDLE_S 0
#define ICE_AQC_LINK_TOPO_HANDLE_M (0x3FF << ICE_AQC_LINK_TOPO_HANDLE_S)
@@ -1268,6 +1333,7 @@ struct ice_aqc_link_topo_addr {
struct ice_aqc_get_link_topo {
struct ice_aqc_link_topo_addr addr;
u8 node_part_num;
+#define ICE_AQC_GET_LINK_TOPO_NODE_NR_PCA9575 0x21
u8 rsvd[9];
};
@@ -1281,6 +1347,16 @@ struct ice_aqc_set_port_id_led {
u8 rsvd[13];
};
+/* Set/Get GPIO (direct, 0x06EC/0x06ED) */
+struct ice_aqc_gpio {
+ __le16 gpio_ctrl_handle;
+#define ICE_AQC_GPIO_HANDLE_S 0
+#define ICE_AQC_GPIO_HANDLE_M (0x3FF << ICE_AQC_GPIO_HANDLE_S)
+ u8 gpio_num;
+ u8 gpio_val;
+ u8 rsvd[12];
+};
+
/* Read/Write SFF EEPROM command (indirect 0x06EE) */
struct ice_aqc_sff_eeprom {
u8 lport_num;
@@ -1922,10 +1998,13 @@ struct ice_aq_desc {
struct ice_aqc_get_phy_caps get_phy;
struct ice_aqc_set_phy_cfg set_phy;
struct ice_aqc_restart_an restart_an;
+ struct ice_aqc_gpio read_write_gpio;
struct ice_aqc_sff_eeprom read_write_sff_param;
struct ice_aqc_set_port_id_led set_port_id_led;
struct ice_aqc_get_sw_cfg get_sw_conf;
struct ice_aqc_sw_rules sw_rules;
+ struct ice_aqc_add_get_recipe add_get_recipe;
+ struct ice_aqc_recipe_to_profile recipe_to_profile;
struct ice_aqc_get_topo get_topo;
struct ice_aqc_sched_elem_cmd sched_elem_cmd;
struct ice_aqc_query_txsched_res query_sched_res;
@@ -1936,6 +2015,7 @@ struct ice_aq_desc {
struct ice_aqc_nvm_pkg_data pkg_data;
struct ice_aqc_nvm_pass_comp_tbl pass_comp_tbl;
struct ice_aqc_pf_vf_msg virt;
+ struct ice_aqc_set_query_pfc_mode set_query_pfc_mode;
struct ice_aqc_lldp_get_mib lldp_get_mib;
struct ice_aqc_lldp_set_mib_change lldp_set_event;
struct ice_aqc_lldp_stop lldp_stop;
@@ -2033,6 +2113,12 @@ enum ice_adminq_opc {
ice_aqc_opc_update_vsi = 0x0211,
ice_aqc_opc_free_vsi = 0x0213,
+ /* recipe commands */
+ ice_aqc_opc_add_recipe = 0x0290,
+ ice_aqc_opc_recipe_to_profile = 0x0291,
+ ice_aqc_opc_get_recipe = 0x0292,
+ ice_aqc_opc_get_recipe_to_profile = 0x0293,
+
/* switch rules population commands */
ice_aqc_opc_add_sw_rules = 0x02A0,
ice_aqc_opc_update_sw_rules = 0x02A1,
@@ -2040,6 +2126,10 @@ enum ice_adminq_opc {
ice_aqc_opc_clear_pf_cfg = 0x02A4,
+ /* DCB commands */
+ ice_aqc_opc_query_pfc_mode = 0x0302,
+ ice_aqc_opc_set_pfc_mode = 0x0303,
+
/* transmit scheduler commands */
ice_aqc_opc_get_dflt_topo = 0x0400,
ice_aqc_opc_add_sched_elems = 0x0401,
@@ -2064,6 +2154,8 @@ enum ice_adminq_opc {
ice_aqc_opc_set_mac_lb = 0x0620,
ice_aqc_opc_get_link_topo = 0x06E0,
ice_aqc_opc_set_port_id_led = 0x06E9,
+ ice_aqc_opc_set_gpio = 0x06EC,
+ ice_aqc_opc_get_gpio = 0x06ED,
ice_aqc_opc_sff_eeprom = 0x06EE,
/* NVM commands */
diff --git a/drivers/net/ethernet/intel/ice/ice_arfs.c b/drivers/net/ethernet/intel/ice/ice_arfs.c
index 88d98c9e5f91..5daade32ea62 100644
--- a/drivers/net/ethernet/intel/ice/ice_arfs.c
+++ b/drivers/net/ethernet/intel/ice/ice_arfs.c
@@ -513,7 +513,7 @@ void ice_init_arfs(struct ice_vsi *vsi)
if (!vsi || vsi->type != ICE_VSI_PF)
return;
- arfs_fltr_list = kzalloc(sizeof(*arfs_fltr_list) * ICE_MAX_ARFS_LIST,
+ arfs_fltr_list = kcalloc(ICE_MAX_ARFS_LIST, sizeof(*arfs_fltr_list),
GFP_KERNEL);
if (!arfs_fltr_list)
return;
@@ -614,7 +614,7 @@ int ice_set_cpu_rx_rmap(struct ice_vsi *vsi)
return -EINVAL;
base_idx = vsi->base_vector;
- for (i = 0; i < vsi->num_q_vectors; i++)
+ ice_for_each_q_vector(vsi, i)
if (irq_cpu_rmap_add(netdev->rx_cpu_rmap,
pf->msix_entries[base_idx + i].vector)) {
ice_free_cpu_rx_rmap(vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index c36057efc7ae..fa6cd63cbf1f 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -115,6 +115,8 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx)
q_vector->rx.itr_setting = ICE_DFLT_RX_ITR;
q_vector->tx.itr_mode = ITR_DYNAMIC;
q_vector->rx.itr_mode = ITR_DYNAMIC;
+ q_vector->tx.type = ICE_TX_CONTAINER;
+ q_vector->rx.type = ICE_RX_CONTAINER;
if (vsi->type == ICE_VSI_VF)
goto out;
@@ -146,7 +148,8 @@ static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
{
struct ice_q_vector *q_vector;
struct ice_pf *pf = vsi->back;
- struct ice_ring *ring;
+ struct ice_tx_ring *tx_ring;
+ struct ice_rx_ring *rx_ring;
struct device *dev;
dev = ice_pf_to_dev(pf);
@@ -156,10 +159,10 @@ static void ice_free_q_vector(struct ice_vsi *vsi, int v_idx)
}
q_vector = vsi->q_vectors[v_idx];
- ice_for_each_ring(ring, q_vector->tx)
- ring->q_vector = NULL;
- ice_for_each_ring(ring, q_vector->rx)
- ring->q_vector = NULL;
+ ice_for_each_tx_ring(tx_ring, q_vector->tx)
+ tx_ring->q_vector = NULL;
+ ice_for_each_rx_ring(rx_ring, q_vector->rx)
+ rx_ring->q_vector = NULL;
/* only VSI with an associated netdev is set up with NAPI */
if (vsi->netdev)
@@ -201,15 +204,18 @@ static void ice_cfg_itr_gran(struct ice_hw *hw)
}
/**
- * ice_calc_q_handle - calculate the queue handle
+ * ice_calc_txq_handle - calculate the queue handle
* @vsi: VSI that ring belongs to
* @ring: ring to get the absolute queue index
* @tc: traffic class number
*/
-static u16 ice_calc_q_handle(struct ice_vsi *vsi, struct ice_ring *ring, u8 tc)
+static u16 ice_calc_txq_handle(struct ice_vsi *vsi, struct ice_tx_ring *ring, u8 tc)
{
WARN_ONCE(ice_ring_is_xdp(ring) && tc, "XDP ring can't belong to TC other than 0\n");
+ if (ring->ch)
+ return ring->q_index - ring->ch->base_q;
+
/* Idea here for calculation is that we subtract the number of queue
* count from TC that ring belongs to from it's absolute queue index
* and as a result we get the queue's index within TC.
@@ -218,13 +224,37 @@ static u16 ice_calc_q_handle(struct ice_vsi *vsi, struct ice_ring *ring, u8 tc)
}
/**
+ * ice_eswitch_calc_txq_handle
+ * @ring: pointer to ring which unique index is needed
+ *
+ * To correctly work with many netdevs ring->q_index of Tx rings on switchdev
+ * VSI can repeat. Hardware ring setup requires unique q_index. Calculate it
+ * here by finding index in vsi->tx_rings of this ring.
+ *
+ * Return ICE_INVAL_Q_INDEX when index wasn't found. Should never happen,
+ * because VSI is get from ring->vsi, so it has to be present in this VSI.
+ */
+static u16 ice_eswitch_calc_txq_handle(struct ice_tx_ring *ring)
+{
+ struct ice_vsi *vsi = ring->vsi;
+ int i;
+
+ ice_for_each_txq(vsi, i) {
+ if (vsi->tx_rings[i] == ring)
+ return i;
+ }
+
+ return ICE_INVAL_Q_INDEX;
+}
+
+/**
* ice_cfg_xps_tx_ring - Configure XPS for a Tx ring
* @ring: The Tx ring to configure
*
* This enables/disables XPS for a given Tx descriptor ring
* based on the TCs enabled for the VSI that ring belongs to.
*/
-static void ice_cfg_xps_tx_ring(struct ice_ring *ring)
+static void ice_cfg_xps_tx_ring(struct ice_tx_ring *ring)
{
if (!ring->q_vector || !ring->netdev)
return;
@@ -246,7 +276,7 @@ static void ice_cfg_xps_tx_ring(struct ice_ring *ring)
* Configure the Tx descriptor ring in TLAN context.
*/
static void
-ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
+ice_setup_tx_ctx(struct ice_tx_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
{
struct ice_vsi *vsi = ring->vsi;
struct ice_hw *hw = &vsi->back->hw;
@@ -258,7 +288,7 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
/* Transmit Queue Length */
tlan_ctx->qlen = ring->count;
- ice_set_cgd_num(tlan_ctx, ring);
+ ice_set_cgd_num(tlan_ctx, ring->dcb_tc);
/* PF number */
tlan_ctx->pf_num = hw->pf_id;
@@ -273,19 +303,28 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
case ICE_VSI_LB:
case ICE_VSI_CTRL:
case ICE_VSI_PF:
- tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
+ if (ring->ch)
+ tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ;
+ else
+ tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_PF;
break;
case ICE_VSI_VF:
/* Firmware expects vmvf_num to be absolute VF ID */
tlan_ctx->vmvf_num = hw->func_caps.vf_base_id + vsi->vf_id;
tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VF;
break;
+ case ICE_VSI_SWITCHDEV_CTRL:
+ tlan_ctx->vmvf_type = ICE_TLAN_CTX_VMVF_TYPE_VMQ;
+ break;
default:
return;
}
/* make sure the context is associated with the right VSI */
- tlan_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx);
+ if (ring->ch)
+ tlan_ctx->src_vsi = ring->ch->vsi_num;
+ else
+ tlan_ctx->src_vsi = ice_get_hw_vsi_num(hw, vsi->idx);
/* Restrict Tx timestamps to the PF VSI */
switch (vsi->type) {
@@ -312,7 +351,7 @@ ice_setup_tx_ctx(struct ice_ring *ring, struct ice_tlan_ctx *tlan_ctx, u16 pf_q)
*
* Returns the offset value for ring into the data buffer.
*/
-static unsigned int ice_rx_offset(struct ice_ring *rx_ring)
+static unsigned int ice_rx_offset(struct ice_rx_ring *rx_ring)
{
if (ice_ring_uses_build_skb(rx_ring))
return ICE_SKB_PAD;
@@ -328,7 +367,7 @@ static unsigned int ice_rx_offset(struct ice_ring *rx_ring)
*
* Configure the Rx descriptor ring in RLAN context.
*/
-static int ice_setup_rx_ctx(struct ice_ring *ring)
+static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
{
int chain_len = ICE_MAX_CHAINED_RX_BUFS;
struct ice_vsi *vsi = ring->vsi;
@@ -439,7 +478,7 @@ static int ice_setup_rx_ctx(struct ice_ring *ring)
*
* Return 0 on success and a negative value on error.
*/
-int ice_vsi_cfg_rxq(struct ice_ring *ring)
+int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
{
struct device *dev = ice_pf_to_dev(ring->vsi->back);
u16 num_bufs = ICE_DESC_UNUSED(ring);
@@ -660,16 +699,16 @@ void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
tx_rings_per_v = (u8)DIV_ROUND_UP(tx_rings_rem,
q_vectors - v_id);
q_vector->num_ring_tx = tx_rings_per_v;
- q_vector->tx.ring = NULL;
+ q_vector->tx.tx_ring = NULL;
q_vector->tx.itr_idx = ICE_TX_ITR;
q_base = vsi->num_txq - tx_rings_rem;
for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) {
- struct ice_ring *tx_ring = vsi->tx_rings[q_id];
+ struct ice_tx_ring *tx_ring = vsi->tx_rings[q_id];
tx_ring->q_vector = q_vector;
- tx_ring->next = q_vector->tx.ring;
- q_vector->tx.ring = tx_ring;
+ tx_ring->next = q_vector->tx.tx_ring;
+ q_vector->tx.tx_ring = tx_ring;
}
tx_rings_rem -= tx_rings_per_v;
@@ -677,16 +716,16 @@ void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
rx_rings_per_v = (u8)DIV_ROUND_UP(rx_rings_rem,
q_vectors - v_id);
q_vector->num_ring_rx = rx_rings_per_v;
- q_vector->rx.ring = NULL;
+ q_vector->rx.rx_ring = NULL;
q_vector->rx.itr_idx = ICE_RX_ITR;
q_base = vsi->num_rxq - rx_rings_rem;
for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) {
- struct ice_ring *rx_ring = vsi->rx_rings[q_id];
+ struct ice_rx_ring *rx_ring = vsi->rx_rings[q_id];
rx_ring->q_vector = q_vector;
- rx_ring->next = q_vector->rx.ring;
- q_vector->rx.ring = rx_ring;
+ rx_ring->next = q_vector->rx.rx_ring;
+ q_vector->rx.rx_ring = rx_ring;
}
rx_rings_rem -= rx_rings_per_v;
}
@@ -711,12 +750,13 @@ void ice_vsi_free_q_vectors(struct ice_vsi *vsi)
* @qg_buf: queue group buffer
*/
int
-ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring,
+ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
struct ice_aqc_add_tx_qgrp *qg_buf)
{
u8 buf_len = struct_size(qg_buf, txqs, 1);
struct ice_tlan_ctx tlan_ctx = { 0 };
struct ice_aqc_add_txqs_perq *txq;
+ struct ice_channel *ch = ring->ch;
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
enum ice_status status;
@@ -746,10 +786,23 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring,
/* Add unique software queue handle of the Tx queue per
* TC into the VSI Tx ring
*/
- ring->q_handle = ice_calc_q_handle(vsi, ring, tc);
+ if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) {
+ ring->q_handle = ice_eswitch_calc_txq_handle(ring);
- status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, ring->q_handle,
- 1, qg_buf, buf_len, NULL);
+ if (ring->q_handle == ICE_INVAL_Q_INDEX)
+ return -ENODEV;
+ } else {
+ ring->q_handle = ice_calc_txq_handle(vsi, ring, tc);
+ }
+
+ if (ch)
+ status = ice_ena_vsi_txq(vsi->port_info, ch->ch_vsi->idx, 0,
+ ring->q_handle, 1, qg_buf, buf_len,
+ NULL);
+ else
+ status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc,
+ ring->q_handle, 1, qg_buf, buf_len,
+ NULL);
if (status) {
dev_err(ice_pf_to_dev(pf), "Failed to set LAN Tx queue context, error: %s\n",
ice_stat_str(status));
@@ -870,7 +923,7 @@ void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector)
*/
int
ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
- u16 rel_vmvf_num, struct ice_ring *ring,
+ u16 rel_vmvf_num, struct ice_tx_ring *ring,
struct ice_txq_meta *txq_meta)
{
struct ice_pf *pf = vsi->back;
@@ -927,9 +980,10 @@ ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
* are needed for stopping Tx queue
*/
void
-ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring,
+ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_tx_ring *ring,
struct ice_txq_meta *txq_meta)
{
+ struct ice_channel *ch = ring->ch;
u8 tc;
if (IS_ENABLED(CONFIG_DCB))
@@ -940,6 +994,11 @@ ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring,
txq_meta->q_id = ring->reg_idx;
txq_meta->q_teid = ring->txq_teid;
txq_meta->q_handle = ring->q_handle;
- txq_meta->vsi_idx = vsi->idx;
- txq_meta->tc = tc;
+ if (ch) {
+ txq_meta->vsi_idx = ch->ch_vsi->idx;
+ txq_meta->tc = 0;
+ } else {
+ txq_meta->vsi_idx = vsi->idx;
+ txq_meta->tc = tc;
+ }
}
diff --git a/drivers/net/ethernet/intel/ice/ice_base.h b/drivers/net/ethernet/intel/ice/ice_base.h
index 20e1c29aa68a..b67dca417acb 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.h
+++ b/drivers/net/ethernet/intel/ice/ice_base.h
@@ -6,7 +6,7 @@
#include "ice.h"
-int ice_vsi_cfg_rxq(struct ice_ring *ring);
+int ice_vsi_cfg_rxq(struct ice_rx_ring *ring);
int __ice_vsi_get_qs(struct ice_qs_cfg *qs_cfg);
int
ice_vsi_ctrl_one_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx, bool wait);
@@ -15,7 +15,7 @@ int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi);
void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi);
void ice_vsi_free_q_vectors(struct ice_vsi *vsi);
int
-ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring,
+ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_tx_ring *ring,
struct ice_aqc_add_tx_qgrp *qg_buf);
void ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector);
void
@@ -25,9 +25,9 @@ ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx);
void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector);
int
ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
- u16 rel_vmvf_num, struct ice_ring *ring,
+ u16 rel_vmvf_num, struct ice_tx_ring *ring,
struct ice_txq_meta *txq_meta);
void
-ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring,
+ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_tx_ring *ring,
struct ice_txq_meta *txq_meta);
#endif /* _ICE_BASE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 2fb81e359cdf..b3066d0fea8b 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -25,6 +25,8 @@ static enum ice_status ice_set_mac_type(struct ice_hw *hw)
case ICE_DEV_ID_E810C_BACKPLANE:
case ICE_DEV_ID_E810C_QSFP:
case ICE_DEV_ID_E810C_SFP:
+ case ICE_DEV_ID_E810_XXV_BACKPLANE:
+ case ICE_DEV_ID_E810_XXV_QSFP:
case ICE_DEV_ID_E810_XXV_SFP:
hw->mac_type = ICE_MAC_E810;
break;
@@ -70,6 +72,27 @@ bool ice_is_e810(struct ice_hw *hw)
}
/**
+ * ice_is_e810t
+ * @hw: pointer to the hardware structure
+ *
+ * returns true if the device is E810T based, false if not.
+ */
+bool ice_is_e810t(struct ice_hw *hw)
+{
+ switch (hw->device_id) {
+ case ICE_DEV_ID_E810C_SFP:
+ if (hw->subsystem_device_id == ICE_SUBDEV_ID_E810T ||
+ hw->subsystem_device_id == ICE_SUBDEV_ID_E810T2)
+ return true;
+ break;
+ default:
+ break;
+ }
+
+ return false;
+}
+
+/**
* ice_clear_pf_cfg - Clear PF configuration
* @hw: pointer to the hardware structure
*
@@ -240,11 +263,13 @@ ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type,
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
- cmd->addr.node_type_ctx = (ICE_AQC_LINK_TOPO_NODE_CTX_PORT <<
- ICE_AQC_LINK_TOPO_NODE_CTX_S);
+ cmd->addr.topo_params.node_type_ctx =
+ (ICE_AQC_LINK_TOPO_NODE_CTX_PORT <<
+ ICE_AQC_LINK_TOPO_NODE_CTX_S);
/* set node type */
- cmd->addr.node_type_ctx |= (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type);
+ cmd->addr.topo_params.node_type_ctx |=
+ (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type);
return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
}
@@ -568,6 +593,7 @@ static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
return ICE_ERR_NO_MEMORY;
INIT_LIST_HEAD(&sw->vsi_list_map_head);
+ sw->prof_res_bm_init = 0;
status = ice_init_def_sw_recp(hw);
if (status) {
@@ -594,17 +620,42 @@ static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
list_del(&v_pos_map->list_entry);
devm_kfree(ice_hw_to_dev(hw), v_pos_map);
}
- recps = hw->switch_info->recp_list;
- for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
- struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
+ recps = sw->recp_list;
+ for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
+ struct ice_recp_grp_entry *rg_entry, *tmprg_entry;
recps[i].root_rid = i;
- mutex_destroy(&recps[i].filt_rule_lock);
- list_for_each_entry_safe(lst_itr, tmp_entry,
- &recps[i].filt_rules, list_entry) {
- list_del(&lst_itr->list_entry);
- devm_kfree(ice_hw_to_dev(hw), lst_itr);
+ list_for_each_entry_safe(rg_entry, tmprg_entry,
+ &recps[i].rg_list, l_entry) {
+ list_del(&rg_entry->l_entry);
+ devm_kfree(ice_hw_to_dev(hw), rg_entry);
+ }
+
+ if (recps[i].adv_rule) {
+ struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
+ struct ice_adv_fltr_mgmt_list_entry *lst_itr;
+
+ mutex_destroy(&recps[i].filt_rule_lock);
+ list_for_each_entry_safe(lst_itr, tmp_entry,
+ &recps[i].filt_rules,
+ list_entry) {
+ list_del(&lst_itr->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), lst_itr->lkups);
+ devm_kfree(ice_hw_to_dev(hw), lst_itr);
+ }
+ } else {
+ struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
+
+ mutex_destroy(&recps[i].filt_rule_lock);
+ list_for_each_entry_safe(lst_itr, tmp_entry,
+ &recps[i].filt_rules,
+ list_entry) {
+ list_del(&lst_itr->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), lst_itr);
+ }
}
+ if (recps[i].root_buf)
+ devm_kfree(ice_hw_to_dev(hw), recps[i].root_buf);
}
ice_rm_all_sw_replay_rule_info(hw);
devm_kfree(ice_hw_to_dev(hw), sw->recp_list);
@@ -4767,6 +4818,64 @@ ice_aq_get_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
}
/**
+ * ice_aq_set_gpio
+ * @hw: pointer to the hw struct
+ * @gpio_ctrl_handle: GPIO controller node handle
+ * @pin_idx: IO Number of the GPIO that needs to be set
+ * @value: SW provide IO value to set in the LSB
+ * @cd: pointer to command details structure or NULL
+ *
+ * Sends 0x06EC AQ command to set the GPIO pin state that's part of the topology
+ */
+int
+ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_gpio *cmd;
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_gpio);
+ cmd = &desc.params.read_write_gpio;
+ cmd->gpio_ctrl_handle = cpu_to_le16(gpio_ctrl_handle);
+ cmd->gpio_num = pin_idx;
+ cmd->gpio_val = value ? 1 : 0;
+
+ return ice_status_to_errno(ice_aq_send_cmd(hw, &desc, NULL, 0, cd));
+}
+
+/**
+ * ice_aq_get_gpio
+ * @hw: pointer to the hw struct
+ * @gpio_ctrl_handle: GPIO controller node handle
+ * @pin_idx: IO Number of the GPIO that needs to be set
+ * @value: IO value read
+ * @cd: pointer to command details structure or NULL
+ *
+ * Sends 0x06ED AQ command to get the value of a GPIO signal which is part of
+ * the topology
+ */
+int
+ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
+ bool *value, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_gpio *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_gpio);
+ cmd = &desc.params.read_write_gpio;
+ cmd->gpio_ctrl_handle = cpu_to_le16(gpio_ctrl_handle);
+ cmd->gpio_num = pin_idx;
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+ if (status)
+ return ice_status_to_errno(status);
+
+ *value = !!cmd->gpio_val;
+ return 0;
+}
+
+/**
* ice_fw_supports_link_override
* @hw: pointer to the hardware structure
*
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index fb16070f02e2..65c1b3244264 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -183,6 +183,7 @@ ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
void
ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
u64 *prev_stat, u64 *cur_stat);
+bool ice_is_e810t(struct ice_hw *hw);
enum ice_status
ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
struct ice_aqc_txsched_elem_data *buf);
@@ -192,6 +193,12 @@ ice_aq_set_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
int
ice_aq_get_driver_param(struct ice_hw *hw, enum ice_aqc_driver_params idx,
u32 *value, struct ice_sq_cd *cd);
+int
+ice_aq_set_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool value,
+ struct ice_sq_cd *cd);
+int
+ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
+ bool *value, struct ice_sq_cd *cd);
enum ice_status
ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
struct ice_sq_cd *cd);
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c
index 849fcf605479..241427cd9bc0 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2019, Intel Corporation. */
#include "ice_common.h"
+#include "ice_lib.h"
#include "ice_sched.h"
#include "ice_dcb.h"
@@ -736,6 +737,45 @@ ice_aq_get_cee_dcb_cfg(struct ice_hw *hw,
}
/**
+ * ice_aq_set_pfc_mode - Set PFC mode
+ * @hw: pointer to the HW struct
+ * @pfc_mode: value of PFC mode to set
+ * @cd: pointer to command details structure or NULL
+ *
+ * This AQ call configures the PFC mode to DSCP-based PFC mode or
+ * VLAN-based PFC (0x0303)
+ */
+int ice_aq_set_pfc_mode(struct ice_hw *hw, u8 pfc_mode, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_set_query_pfc_mode *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ if (pfc_mode > ICE_AQC_PFC_DSCP_BASED_PFC)
+ return -EINVAL;
+
+ cmd = &desc.params.set_query_pfc_mode;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_pfc_mode);
+
+ cmd->pfc_mode = pfc_mode;
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+ if (status)
+ return ice_status_to_errno(status);
+
+ /* FW will write the PFC mode set back into cmd->pfc_mode, but if DCB is
+ * disabled, FW will write back 0 to cmd->pfc_mode. After the AQ has
+ * been executed, check if cmd->pfc_mode is what was requested. If not,
+ * return an error.
+ */
+ if (cmd->pfc_mode != pfc_mode)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+/**
* ice_cee_to_dcb_cfg
* @cee_cfg: pointer to CEE configuration struct
* @pi: port information structure
@@ -1207,7 +1247,140 @@ ice_add_ieee_app_pri_tlv(struct ice_lldp_org_tlv *tlv,
}
/**
- * ice_add_dcb_tlv - Add all IEEE TLVs
+ * ice_add_dscp_up_tlv - Prepare DSCP to UP TLV
+ * @tlv: location to build the TLV data
+ * @dcbcfg: location of data to convert to TLV
+ */
+static void
+ice_add_dscp_up_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
+{
+ u8 *buf = tlv->tlvinfo;
+ u32 ouisubtype;
+ u16 typelen;
+ int i;
+
+ typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
+ ICE_DSCP_UP_TLV_LEN);
+ tlv->typelen = htons(typelen);
+
+ ouisubtype = (u32)((ICE_DSCP_OUI << ICE_LLDP_TLV_OUI_S) |
+ ICE_DSCP_SUBTYPE_DSCP2UP);
+ tlv->ouisubtype = htonl(ouisubtype);
+
+ /* bytes 0 - 63 - IPv4 DSCP2UP LUT */
+ for (i = 0; i < ICE_DSCP_NUM_VAL; i++) {
+ /* IPv4 mapping */
+ buf[i] = dcbcfg->dscp_map[i];
+ /* IPv6 mapping */
+ buf[i + ICE_DSCP_IPV6_OFFSET] = dcbcfg->dscp_map[i];
+ }
+
+ /* byte 64 - IPv4 untagged traffic */
+ buf[i] = 0;
+
+ /* byte 144 - IPv6 untagged traffic */
+ buf[i + ICE_DSCP_IPV6_OFFSET] = 0;
+}
+
+#define ICE_BYTES_PER_TC 8
+/**
+ * ice_add_dscp_enf_tlv - Prepare DSCP Enforcement TLV
+ * @tlv: location to build the TLV data
+ */
+static void
+ice_add_dscp_enf_tlv(struct ice_lldp_org_tlv *tlv)
+{
+ u8 *buf = tlv->tlvinfo;
+ u32 ouisubtype;
+ u16 typelen;
+
+ typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
+ ICE_DSCP_ENF_TLV_LEN);
+ tlv->typelen = htons(typelen);
+
+ ouisubtype = (u32)((ICE_DSCP_OUI << ICE_LLDP_TLV_OUI_S) |
+ ICE_DSCP_SUBTYPE_ENFORCE);
+ tlv->ouisubtype = htonl(ouisubtype);
+
+ /* Allow all DSCP values to be valid for all TC's (IPv4 and IPv6) */
+ memset(buf, 0, 2 * (ICE_MAX_TRAFFIC_CLASS * ICE_BYTES_PER_TC));
+}
+
+/**
+ * ice_add_dscp_tc_bw_tlv - Prepare DSCP BW for TC TLV
+ * @tlv: location to build the TLV data
+ * @dcbcfg: location of the data to convert to TLV
+ */
+static void
+ice_add_dscp_tc_bw_tlv(struct ice_lldp_org_tlv *tlv,
+ struct ice_dcbx_cfg *dcbcfg)
+{
+ struct ice_dcb_ets_cfg *etscfg;
+ u8 *buf = tlv->tlvinfo;
+ u32 ouisubtype;
+ u8 offset = 0;
+ u16 typelen;
+ int i;
+
+ typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
+ ICE_DSCP_TC_BW_TLV_LEN);
+ tlv->typelen = htons(typelen);
+
+ ouisubtype = (u32)((ICE_DSCP_OUI << ICE_LLDP_TLV_OUI_S) |
+ ICE_DSCP_SUBTYPE_TCBW);
+ tlv->ouisubtype = htonl(ouisubtype);
+
+ /* First Octect after subtype
+ * ----------------------------
+ * | RSV | CBS | RSV | Max TCs |
+ * | 1b | 1b | 3b | 3b |
+ * ----------------------------
+ */
+ etscfg = &dcbcfg->etscfg;
+ buf[0] = etscfg->maxtcs & ICE_IEEE_ETS_MAXTC_M;
+
+ /* bytes 1 - 4 reserved */
+ offset = 5;
+
+ /* TC BW table
+ * bytes 0 - 7 for TC 0 - 7
+ *
+ * TSA Assignment table
+ * bytes 8 - 15 for TC 0 - 7
+ */
+ for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
+ buf[offset] = etscfg->tcbwtable[i];
+ buf[offset + ICE_MAX_TRAFFIC_CLASS] = etscfg->tsatable[i];
+ offset++;
+ }
+}
+
+/**
+ * ice_add_dscp_pfc_tlv - Prepare DSCP PFC TLV
+ * @tlv: Fill PFC TLV in IEEE format
+ * @dcbcfg: Local store which holds the PFC CFG data
+ */
+static void
+ice_add_dscp_pfc_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
+{
+ u8 *buf = tlv->tlvinfo;
+ u32 ouisubtype;
+ u16 typelen;
+
+ typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
+ ICE_DSCP_PFC_TLV_LEN);
+ tlv->typelen = htons(typelen);
+
+ ouisubtype = (u32)((ICE_DSCP_OUI << ICE_LLDP_TLV_OUI_S) |
+ ICE_DSCP_SUBTYPE_PFC);
+ tlv->ouisubtype = htonl(ouisubtype);
+
+ buf[0] = dcbcfg->pfc.pfccap & 0xF;
+ buf[1] = dcbcfg->pfc.pfcena & 0xF;
+}
+
+/**
+ * ice_add_dcb_tlv - Add all IEEE or DSCP TLVs
* @tlv: Fill TLV data in IEEE format
* @dcbcfg: Local store which holds the DCB Config
* @tlvid: Type of IEEE TLV
@@ -1218,21 +1391,41 @@ static void
ice_add_dcb_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg,
u16 tlvid)
{
- switch (tlvid) {
- case ICE_IEEE_TLV_ID_ETS_CFG:
- ice_add_ieee_ets_tlv(tlv, dcbcfg);
- break;
- case ICE_IEEE_TLV_ID_ETS_REC:
- ice_add_ieee_etsrec_tlv(tlv, dcbcfg);
- break;
- case ICE_IEEE_TLV_ID_PFC_CFG:
- ice_add_ieee_pfc_tlv(tlv, dcbcfg);
- break;
- case ICE_IEEE_TLV_ID_APP_PRI:
- ice_add_ieee_app_pri_tlv(tlv, dcbcfg);
- break;
- default:
- break;
+ if (dcbcfg->pfc_mode == ICE_QOS_MODE_VLAN) {
+ switch (tlvid) {
+ case ICE_IEEE_TLV_ID_ETS_CFG:
+ ice_add_ieee_ets_tlv(tlv, dcbcfg);
+ break;
+ case ICE_IEEE_TLV_ID_ETS_REC:
+ ice_add_ieee_etsrec_tlv(tlv, dcbcfg);
+ break;
+ case ICE_IEEE_TLV_ID_PFC_CFG:
+ ice_add_ieee_pfc_tlv(tlv, dcbcfg);
+ break;
+ case ICE_IEEE_TLV_ID_APP_PRI:
+ ice_add_ieee_app_pri_tlv(tlv, dcbcfg);
+ break;
+ default:
+ break;
+ }
+ } else {
+ /* pfc_mode == ICE_QOS_MODE_DSCP */
+ switch (tlvid) {
+ case ICE_TLV_ID_DSCP_UP:
+ ice_add_dscp_up_tlv(tlv, dcbcfg);
+ break;
+ case ICE_TLV_ID_DSCP_ENF:
+ ice_add_dscp_enf_tlv(tlv);
+ break;
+ case ICE_TLV_ID_DSCP_TC_BW:
+ ice_add_dscp_tc_bw_tlv(tlv, dcbcfg);
+ break;
+ case ICE_TLV_ID_DSCP_TO_PFC:
+ ice_add_dscp_pfc_tlv(tlv, dcbcfg);
+ break;
+ default:
+ break;
+ }
}
}
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.h b/drivers/net/ethernet/intel/ice/ice_dcb.h
index d7e5e6178a21..9b6f87a889a6 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb.h
+++ b/drivers/net/ethernet/intel/ice/ice_dcb.h
@@ -22,6 +22,14 @@
#define ICE_CEE_DCBX_OUI 0x001B21
#define ICE_CEE_DCBX_TYPE 2
+
+#define ICE_DSCP_OUI 0xFFFFFF
+#define ICE_DSCP_SUBTYPE_DSCP2UP 0x41
+#define ICE_DSCP_SUBTYPE_ENFORCE 0x42
+#define ICE_DSCP_SUBTYPE_TCBW 0x43
+#define ICE_DSCP_SUBTYPE_PFC 0x44
+#define ICE_DSCP_IPV6_OFFSET 80
+
#define ICE_CEE_SUBTYPE_PG_CFG 2
#define ICE_CEE_SUBTYPE_PFC_CFG 3
#define ICE_CEE_SUBTYPE_APP_PRI 4
@@ -78,11 +86,20 @@
#define ICE_IEEE_TLV_ID_APP_PRI 6
#define ICE_TLV_ID_END_OF_LLDPPDU 7
#define ICE_TLV_ID_START ICE_IEEE_TLV_ID_ETS_CFG
+#define ICE_TLV_ID_DSCP_UP 3
+#define ICE_TLV_ID_DSCP_ENF 4
+#define ICE_TLV_ID_DSCP_TC_BW 5
+#define ICE_TLV_ID_DSCP_TO_PFC 6
#define ICE_IEEE_ETS_TLV_LEN 25
#define ICE_IEEE_PFC_TLV_LEN 6
#define ICE_IEEE_APP_TLV_LEN 11
+#define ICE_DSCP_UP_TLV_LEN 148
+#define ICE_DSCP_ENF_TLV_LEN 132
+#define ICE_DSCP_TC_BW_TLV_LEN 25
+#define ICE_DSCP_PFC_TLV_LEN 6
+
/* IEEE 802.1AB LLDP Organization specific TLV */
struct ice_lldp_org_tlv {
__be16 typelen;
@@ -120,6 +137,7 @@ struct ice_cee_app_prio {
u8 prio_map;
} __packed;
+int ice_aq_set_pfc_mode(struct ice_hw *hw, u8 pfc_mode, struct ice_sq_cd *cd);
enum ice_status
ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype,
struct ice_dcbx_cfg *dcbcfg);
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
index 926cf748c5ec..a72e18320a22 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
@@ -5,52 +5,10 @@
#include "ice_dcb_nl.h"
/**
- * ice_vsi_cfg_netdev_tc - Setup the netdev TC configuration
- * @vsi: the VSI being configured
- * @ena_tc: TC map to be enabled
- */
-void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc)
-{
- struct net_device *netdev = vsi->netdev;
- struct ice_pf *pf = vsi->back;
- struct ice_dcbx_cfg *dcbcfg;
- u8 netdev_tc;
- int i;
-
- if (!netdev)
- return;
-
- if (!ena_tc) {
- netdev_reset_tc(netdev);
- return;
- }
-
- if (netdev_set_num_tc(netdev, vsi->tc_cfg.numtc))
- return;
-
- dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
-
- ice_for_each_traffic_class(i)
- if (vsi->tc_cfg.ena_tc & BIT(i))
- netdev_set_tc_queue(netdev,
- vsi->tc_cfg.tc_info[i].netdev_tc,
- vsi->tc_cfg.tc_info[i].qcount_tx,
- vsi->tc_cfg.tc_info[i].qoffset);
-
- for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) {
- u8 ets_tc = dcbcfg->etscfg.prio_table[i];
-
- /* Get the mapped netdev TC# for the UP */
- netdev_tc = vsi->tc_cfg.tc_info[ets_tc].netdev_tc;
- netdev_set_prio_tc_map(netdev, i, netdev_tc);
- }
-}
-
-/**
* ice_dcb_get_ena_tc - return bitmap of enabled TCs
* @dcbcfg: DCB config to evaluate for enabled TCs
*/
-u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg *dcbcfg)
+static u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg *dcbcfg)
{
u8 i, num_tc, ena_tc = 1;
@@ -179,6 +137,67 @@ u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg *dcbcfg)
}
/**
+ * ice_get_first_droptc - returns number of first droptc
+ * @vsi: used to find the first droptc
+ *
+ * This function returns the value of first_droptc.
+ * When DCB is enabled, first droptc information is derived from enabled_tc
+ * and PFC enabled bits. otherwise this function returns 0 as there is one
+ * TC without DCB (tc0)
+ */
+static u8 ice_get_first_droptc(struct ice_vsi *vsi)
+{
+ struct ice_dcbx_cfg *cfg = &vsi->port_info->qos_cfg.local_dcbx_cfg;
+ struct device *dev = ice_pf_to_dev(vsi->back);
+ u8 num_tc, ena_tc_map, pfc_ena_map;
+ u8 i;
+
+ num_tc = ice_dcb_get_num_tc(cfg);
+
+ /* get bitmap of enabled TCs */
+ ena_tc_map = ice_dcb_get_ena_tc(cfg);
+
+ /* get bitmap of PFC enabled TCs */
+ pfc_ena_map = cfg->pfc.pfcena;
+
+ /* get first TC that is not PFC enabled */
+ for (i = 0; i < num_tc; i++) {
+ if ((ena_tc_map & BIT(i)) && (!(pfc_ena_map & BIT(i)))) {
+ dev_dbg(dev, "first drop tc = %d\n", i);
+ return i;
+ }
+ }
+
+ dev_dbg(dev, "first drop tc = 0\n");
+ return 0;
+}
+
+/**
+ * ice_vsi_set_dcb_tc_cfg - Set VSI's TC based on DCB configuration
+ * @vsi: pointer to the VSI instance
+ */
+void ice_vsi_set_dcb_tc_cfg(struct ice_vsi *vsi)
+{
+ struct ice_dcbx_cfg *cfg = &vsi->port_info->qos_cfg.local_dcbx_cfg;
+
+ switch (vsi->type) {
+ case ICE_VSI_PF:
+ vsi->tc_cfg.ena_tc = ice_dcb_get_ena_tc(cfg);
+ vsi->tc_cfg.numtc = ice_dcb_get_num_tc(cfg);
+ break;
+ case ICE_VSI_CHNL:
+ vsi->tc_cfg.ena_tc = BIT(ice_get_first_droptc(vsi));
+ vsi->tc_cfg.numtc = 1;
+ break;
+ case ICE_VSI_CTRL:
+ case ICE_VSI_LB:
+ default:
+ vsi->tc_cfg.ena_tc = ICE_DFLT_TRAFFIC_CLASS;
+ vsi->tc_cfg.numtc = 1;
+ }
+}
+
+/**
* ice_dcb_get_tc - Get the TC associated with the queue
* @vsi: ptr to the VSI
* @queue_index: queue number associated with VSI
@@ -194,17 +213,18 @@ u8 ice_dcb_get_tc(struct ice_vsi *vsi, int queue_index)
*/
void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi)
{
- struct ice_ring *tx_ring, *rx_ring;
+ struct ice_tx_ring *tx_ring;
+ struct ice_rx_ring *rx_ring;
u16 qoffset, qcount;
int i, n;
if (!test_bit(ICE_FLAG_DCB_ENA, vsi->back->flags)) {
/* Reset the TC information */
- for (i = 0; i < vsi->num_txq; i++) {
+ ice_for_each_txq(vsi, i) {
tx_ring = vsi->tx_rings[i];
tx_ring->dcb_tc = 0;
}
- for (i = 0; i < vsi->num_rxq; i++) {
+ ice_for_each_rxq(vsi, i) {
rx_ring = vsi->rx_rings[i];
rx_ring->dcb_tc = 0;
}
@@ -217,11 +237,68 @@ void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi)
qoffset = vsi->tc_cfg.tc_info[n].qoffset;
qcount = vsi->tc_cfg.tc_info[n].qcount_tx;
- for (i = qoffset; i < (qoffset + qcount); i++) {
- tx_ring = vsi->tx_rings[i];
- rx_ring = vsi->rx_rings[i];
- tx_ring->dcb_tc = n;
- rx_ring->dcb_tc = n;
+ for (i = qoffset; i < (qoffset + qcount); i++)
+ vsi->tx_rings[i]->dcb_tc = n;
+
+ qcount = vsi->tc_cfg.tc_info[n].qcount_rx;
+ for (i = qoffset; i < (qoffset + qcount); i++)
+ vsi->rx_rings[i]->dcb_tc = n;
+ }
+ /* applicable only if "all_enatc" is set, which will be set from
+ * setup_tc method as part of configuring channels
+ */
+ if (vsi->all_enatc) {
+ u8 first_droptc = ice_get_first_droptc(vsi);
+
+ /* When DCB is configured, TC for ADQ queues (which are really
+ * PF queues) should be the first drop TC of the main VSI
+ */
+ ice_for_each_chnl_tc(n) {
+ if (!(vsi->all_enatc & BIT(n)))
+ break;
+
+ qoffset = vsi->mqprio_qopt.qopt.offset[n];
+ qcount = vsi->mqprio_qopt.qopt.count[n];
+ for (i = qoffset; i < (qoffset + qcount); i++) {
+ vsi->tx_rings[i]->dcb_tc = first_droptc;
+ vsi->rx_rings[i]->dcb_tc = first_droptc;
+ }
+ }
+ }
+}
+
+/**
+ * ice_dcb_ena_dis_vsi - disable certain VSIs for DCB config/reconfig
+ * @pf: pointer to the PF instance
+ * @ena: true to enable VSIs, false to disable
+ * @locked: true if caller holds RTNL lock, false otherwise
+ *
+ * Before a new DCB configuration can be applied, VSIs of type PF, SWITCHDEV
+ * and CHNL need to be brought down. Following completion of DCB configuration
+ * the VSIs that were downed need to be brought up again. This helper function
+ * does both.
+ */
+static void ice_dcb_ena_dis_vsi(struct ice_pf *pf, bool ena, bool locked)
+{
+ int i;
+
+ ice_for_each_vsi(pf, i) {
+ struct ice_vsi *vsi = pf->vsi[i];
+
+ if (!vsi)
+ continue;
+
+ switch (vsi->type) {
+ case ICE_VSI_CHNL:
+ case ICE_VSI_SWITCHDEV_CTRL:
+ case ICE_VSI_PF:
+ if (ena)
+ ice_ena_vsi(vsi, locked);
+ else
+ ice_dis_vsi(vsi, locked);
+ break;
+ default:
+ continue;
}
}
}
@@ -330,7 +407,9 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
*/
if (!locked)
rtnl_lock();
- ice_dis_vsi(pf_vsi, true);
+
+ /* disable VSIs affected by DCB changes */
+ ice_dcb_ena_dis_vsi(pf, false, true);
memcpy(curr_cfg, new_cfg, sizeof(*curr_cfg));
memcpy(&curr_cfg->etsrec, &curr_cfg->etscfg, sizeof(curr_cfg->etsrec));
@@ -358,7 +437,8 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
ice_pf_dcb_recfg(pf);
out:
- ice_ena_vsi(pf_vsi, true);
+ /* enable previously downed VSIs */
+ ice_dcb_ena_dis_vsi(pf, true, true);
if (!locked)
rtnl_unlock();
free_cfg:
@@ -544,7 +624,7 @@ static int ice_dcb_init_cfg(struct ice_pf *pf, bool locked)
* @ets_willing: configure ETS willing
* @locked: was this function called with RTNL held
*/
-static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool ets_willing, bool locked)
+int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool ets_willing, bool locked)
{
struct ice_aqc_port_ets_elem buf = { 0 };
struct ice_dcbx_cfg *dcbcfg;
@@ -673,6 +753,8 @@ void ice_pf_dcb_recfg(struct ice_pf *pf)
tc_map = ICE_DFLT_TRAFFIC_CLASS;
ice_dcb_noncontig_cfg(pf);
}
+ } else if (vsi->type == ICE_VSI_CHNL) {
+ tc_map = BIT(ice_get_first_droptc(vsi));
} else {
tc_map = ICE_DFLT_TRAFFIC_CLASS;
}
@@ -683,6 +765,12 @@ void ice_pf_dcb_recfg(struct ice_pf *pf)
vsi->idx);
continue;
}
+ /* no need to proceed with remaining cfg if it is CHNL
+ * or switchdev VSI
+ */
+ if (vsi->type == ICE_VSI_CHNL ||
+ vsi->type == ICE_VSI_SWITCHDEV_CTRL)
+ continue;
ice_vsi_map_rings_to_vectors(vsi);
if (vsi->type == ICE_VSI_PF)
@@ -726,6 +814,11 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked)
/* FW LLDP is disabled, activate SW DCBX/LLDP mode */
dev_info(dev, "FW LLDP is disabled, DCBx/LLDP in SW mode.\n");
clear_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags);
+ err = ice_aq_set_pfc_mode(&pf->hw, ICE_AQC_PFC_VLAN_BASED_PFC,
+ NULL);
+ if (err)
+ dev_info(dev, "Failed to set VLAN PFC mode\n");
+
err = ice_dcb_sw_dflt_cfg(pf, true, locked);
if (err) {
dev_err(dev, "Failed to set local DCB config %d\n",
@@ -814,7 +907,7 @@ void ice_update_dcb_stats(struct ice_pf *pf)
* tag will already be configured with the correct ID and priority bits
*/
void
-ice_tx_prepare_vlan_flags_dcb(struct ice_ring *tx_ring,
+ice_tx_prepare_vlan_flags_dcb(struct ice_tx_ring *tx_ring,
struct ice_tx_buf *first)
{
struct sk_buff *skb = first->skb;
@@ -851,7 +944,6 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
struct ice_dcbx_cfg tmp_dcbx_cfg;
bool need_reconfig = false;
struct ice_port_info *pi;
- struct ice_vsi *pf_vsi;
u8 mib_type;
int ret;
@@ -927,14 +1019,9 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
}
- pf_vsi = ice_get_main_vsi(pf);
- if (!pf_vsi) {
- dev_dbg(dev, "PF VSI doesn't exist\n");
- goto out;
- }
-
rtnl_lock();
- ice_dis_vsi(pf_vsi, true);
+ /* disable VSIs affected by DCB changes */
+ ice_dcb_ena_dis_vsi(pf, false, true);
ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
if (ret) {
@@ -945,7 +1032,8 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
/* changes in configuration update VSI */
ice_pf_dcb_recfg(pf);
- ice_ena_vsi(pf_vsi, true);
+ /* enable previously downed VSIs */
+ ice_dcb_ena_dis_vsi(pf, true, true);
unlock_rtnl:
rtnl_unlock();
out:
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
index 261b6e2ed7bc..4c421c842a13 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
@@ -15,7 +15,7 @@
#define ICE_DCB_HW_CHG 2 /* DCB configuration changed, no reset */
void ice_dcb_rebuild(struct ice_pf *pf);
-u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg *dcbcfg);
+int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool ets_willing, bool locked);
u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg *dcbcfg);
void ice_vsi_set_dcb_tc_cfg(struct ice_vsi *vsi);
bool ice_is_pfc_causing_hung_q(struct ice_pf *pf, unsigned int txqueue);
@@ -28,13 +28,11 @@ void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi);
int ice_init_pf_dcb(struct ice_pf *pf, bool locked);
void ice_update_dcb_stats(struct ice_pf *pf);
void
-ice_tx_prepare_vlan_flags_dcb(struct ice_ring *tx_ring,
+ice_tx_prepare_vlan_flags_dcb(struct ice_tx_ring *tx_ring,
struct ice_tx_buf *first);
void
ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
struct ice_rq_event_info *event);
-void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc);
-
/**
* ice_find_q_in_range
* @low: start of queue range for a TC i.e. offset of TC
@@ -49,9 +47,9 @@ static inline bool ice_find_q_in_range(u16 low, u16 high, unsigned int tx_q)
}
static inline void
-ice_set_cgd_num(struct ice_tlan_ctx *tlan_ctx, struct ice_ring *ring)
+ice_set_cgd_num(struct ice_tlan_ctx *tlan_ctx, u8 dcb_tc)
{
- tlan_ctx->cgd_num = ring->dcb_tc;
+ tlan_ctx->cgd_num = dcb_tc;
}
static inline bool ice_is_dcb_active(struct ice_pf *pf)
@@ -59,9 +57,21 @@ static inline bool ice_is_dcb_active(struct ice_pf *pf)
return (test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags) ||
test_bit(ICE_FLAG_DCB_ENA, pf->flags));
}
+
+static inline u8 ice_get_pfc_mode(struct ice_pf *pf)
+{
+ return pf->hw.port_info->qos_cfg.local_dcbx_cfg.pfc_mode;
+}
+
#else
static inline void ice_dcb_rebuild(struct ice_pf *pf) { }
+static inline void ice_vsi_set_dcb_tc_cfg(struct ice_vsi *vsi)
+{
+ vsi->tc_cfg.ena_tc = ICE_DFLT_TRAFFIC_CLASS;
+ vsi->tc_cfg.numtc = 1;
+}
+
static inline u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg __always_unused *dcbcfg)
{
return ICE_DFLT_TRAFFIC_CLASS;
@@ -95,7 +105,7 @@ ice_pf_dcb_cfg(struct ice_pf __always_unused *pf,
}
static inline int
-ice_tx_prepare_vlan_flags_dcb(struct ice_ring __always_unused *tx_ring,
+ice_tx_prepare_vlan_flags_dcb(struct ice_tx_ring __always_unused *tx_ring,
struct ice_tx_buf __always_unused *first)
{
return 0;
@@ -113,12 +123,16 @@ ice_is_pfc_causing_hung_q(struct ice_pf __always_unused *pf,
return false;
}
+static inline u8 ice_get_pfc_mode(struct ice_pf *pf)
+{
+ return 0;
+}
+
static inline void ice_pf_dcb_recfg(struct ice_pf *pf) { }
static inline void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi) { }
static inline void ice_update_dcb_stats(struct ice_pf *pf) { }
static inline void
ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, struct ice_rq_event_info *event) { }
-static inline void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc) { }
-static inline void ice_set_cgd_num(struct ice_tlan_ctx *tlan_ctx, struct ice_ring *ring) { }
+static inline void ice_set_cgd_num(struct ice_tlan_ctx *tlan_ctx, u8 dcb_tc) { }
#endif /* CONFIG_DCB */
#endif /* _ICE_DCB_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
index 4180f1f35fb8..7fdeb411b6df 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
@@ -64,7 +64,7 @@ static int ice_dcbnl_setets(struct net_device *netdev, struct ieee_ets *ets)
struct ice_pf *pf = ice_netdev_to_pf(netdev);
struct ice_dcbx_cfg *new_cfg;
int bwcfg = 0, bwrec = 0;
- int err, i, max_tc = 0;
+ int err, i;
if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
!(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
@@ -80,13 +80,14 @@ static int ice_dcbnl_setets(struct net_device *netdev, struct ieee_ets *ets)
new_cfg->etscfg.tcbwtable[i] = ets->tc_tx_bw[i];
bwcfg += ets->tc_tx_bw[i];
new_cfg->etscfg.tsatable[i] = ets->tc_tsa[i];
- new_cfg->etscfg.prio_table[i] = ets->prio_tc[i];
- if (ets->prio_tc[i] > max_tc)
- max_tc = ets->prio_tc[i];
+ if (new_cfg->pfc_mode == ICE_QOS_MODE_VLAN) {
+ /* in DSCP mode up->tc mapping cannot change */
+ new_cfg->etscfg.prio_table[i] = ets->prio_tc[i];
+ new_cfg->etsrec.prio_table[i] = ets->reco_prio_tc[i];
+ }
new_cfg->etsrec.tcbwtable[i] = ets->tc_reco_bw[i];
bwrec += ets->tc_reco_bw[i];
new_cfg->etsrec.tsatable[i] = ets->tc_reco_tsa[i];
- new_cfg->etsrec.prio_table[i] = ets->reco_prio_tc[i];
}
if (ice_dcb_bwchk(pf, new_cfg)) {
@@ -94,12 +95,7 @@ static int ice_dcbnl_setets(struct net_device *netdev, struct ieee_ets *ets)
goto ets_out;
}
- max_tc = pf->hw.func_caps.common_cap.maxtc;
-
- new_cfg->etscfg.maxtcs = max_tc;
-
- if (!bwcfg)
- new_cfg->etscfg.tcbwtable[0] = 100;
+ new_cfg->etscfg.maxtcs = pf->hw.func_caps.common_cap.maxtc;
if (!bwrec)
new_cfg->etsrec.tcbwtable[0] = 100;
@@ -173,10 +169,13 @@ static u8 ice_dcbnl_setdcbx(struct net_device *netdev, u8 mode)
pf->dcbx_cap = mode;
qos_cfg = &pf->hw.port_info->qos_cfg;
- if (mode & DCB_CAP_DCBX_VER_CEE)
+ if (mode & DCB_CAP_DCBX_VER_CEE) {
+ if (qos_cfg->local_dcbx_cfg.pfc_mode == ICE_QOS_MODE_DSCP)
+ return ICE_DCB_NO_HW_CHG;
qos_cfg->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_CEE;
- else
+ } else {
qos_cfg->local_dcbx_cfg.dcbx_mode = ICE_DCBX_MODE_IEEE;
+ }
dev_info(ice_pf_to_dev(pf), "DCBx mode = 0x%x\n", mode);
return ICE_DCB_HW_CHG_RST;
@@ -683,6 +682,8 @@ ice_dcbnl_find_app(struct ice_dcbx_cfg *cfg,
return false;
}
+#define ICE_BYTES_PER_DSCP_VAL 8
+
/**
* ice_dcbnl_setapp - set local IEEE App config
* @netdev: relevant netdev struct
@@ -693,42 +694,117 @@ static int ice_dcbnl_setapp(struct net_device *netdev, struct dcb_app *app)
struct ice_pf *pf = ice_netdev_to_pf(netdev);
struct ice_dcb_app_priority_table new_app;
struct ice_dcbx_cfg *old_cfg, *new_cfg;
+ u8 max_tc;
int ret;
- if ((pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) ||
- !(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
+ /* ONLY DSCP APP TLVs have operational significance */
+ if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP)
return -EINVAL;
- mutex_lock(&pf->tc_mutex);
+ /* only allow APP TLVs in SW Mode */
+ if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) {
+ netdev_err(netdev, "can't do DSCP QoS when FW DCB agent active\n");
+ return -EINVAL;
+ }
- new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
+ if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
+ return -EINVAL;
- old_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
+ if (!ice_is_feature_supported(pf, ICE_F_DSCP))
+ return -EOPNOTSUPP;
- if (old_cfg->numapps == ICE_DCBX_MAX_APPS) {
- ret = -EINVAL;
- goto setapp_out;
+ if (app->protocol >= ICE_DSCP_NUM_VAL) {
+ netdev_err(netdev, "DSCP value 0x%04X out of range\n",
+ app->protocol);
+ return -EINVAL;
+ }
+
+ max_tc = pf->hw.func_caps.common_cap.maxtc;
+ if (app->priority >= max_tc) {
+ netdev_err(netdev, "TC %d out of range, max TC %d\n",
+ app->priority, max_tc);
+ return -EINVAL;
}
+ /* grab TC mutex */
+ mutex_lock(&pf->tc_mutex);
+
+ new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
+ old_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
+
ret = dcb_ieee_setapp(netdev, app);
if (ret)
goto setapp_out;
+ if (test_and_set_bit(app->protocol, new_cfg->dscp_mapped)) {
+ netdev_err(netdev, "DSCP value 0x%04X already user mapped\n",
+ app->protocol);
+ ret = dcb_ieee_delapp(netdev, app);
+ if (ret)
+ netdev_err(netdev, "Failed to delete re-mapping TLV\n");
+ ret = -EINVAL;
+ goto setapp_out;
+ }
+
new_app.selector = app->selector;
new_app.prot_id = app->protocol;
new_app.priority = app->priority;
- if (ice_dcbnl_find_app(old_cfg, &new_app)) {
- ret = 0;
- goto setapp_out;
- }
+ /* If port is not in DSCP mode, need to set */
+ if (old_cfg->pfc_mode == ICE_QOS_MODE_VLAN) {
+ int i, j;
+
+ /* set DSCP mode */
+ ret = ice_aq_set_pfc_mode(&pf->hw, ICE_AQC_PFC_DSCP_BASED_PFC,
+ NULL);
+ if (ret) {
+ netdev_err(netdev, "Failed to set DSCP PFC mode %d\n",
+ ret);
+ goto setapp_out;
+ }
+ netdev_info(netdev, "Switched QoS to L3 DSCP mode\n");
+
+ new_cfg->pfc_mode = ICE_QOS_MODE_DSCP;
+
+ /* set default DSCP QoS values */
+ new_cfg->etscfg.willing = 0;
+ new_cfg->pfc.pfccap = max_tc;
+ new_cfg->pfc.willing = 0;
+
+ for (i = 0; i < max_tc; i++)
+ for (j = 0; j < ICE_BYTES_PER_DSCP_VAL; j++) {
+ int dscp, offset;
+
+ dscp = (i * max_tc) + j;
+ offset = max_tc * ICE_BYTES_PER_DSCP_VAL;
+
+ new_cfg->dscp_map[dscp] = i;
+ /* if less that 8 TCs supported */
+ if (max_tc < ICE_MAX_TRAFFIC_CLASS)
+ new_cfg->dscp_map[dscp + offset] = i;
+ }
+
+ new_cfg->etscfg.tcbwtable[0] = 100;
+ new_cfg->etscfg.tsatable[0] = ICE_IEEE_TSA_ETS;
+ new_cfg->etscfg.prio_table[0] = 0;
+
+ for (i = 1; i < max_tc; i++) {
+ new_cfg->etscfg.tcbwtable[i] = 0;
+ new_cfg->etscfg.tsatable[i] = ICE_IEEE_TSA_ETS;
+ new_cfg->etscfg.prio_table[i] = i;
+ }
+ } /* end of switching to DSCP mode */
+
+ /* apply new mapping for this DSCP value */
+ new_cfg->dscp_map[app->protocol] = app->priority;
new_cfg->app[new_cfg->numapps++] = new_app;
+
ret = ice_pf_dcb_cfg(pf, new_cfg, true);
/* return of zero indicates new cfg applied */
if (ret == ICE_DCB_HW_CHG_RST)
ice_dcbnl_devreset(netdev);
- if (ret == ICE_DCB_NO_HW_CHG)
- ret = ICE_DCB_HW_CHG_RST;
+ else
+ ret = ICE_DCB_NO_HW_CHG;
setapp_out:
mutex_unlock(&pf->tc_mutex);
@@ -749,22 +825,21 @@ static int ice_dcbnl_delapp(struct net_device *netdev, struct dcb_app *app)
unsigned int i, j;
int ret = 0;
- if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED)
+ if (pf->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED) {
+ netdev_err(netdev, "can't delete DSCP netlink app when FW DCB agent is active\n");
return -EINVAL;
+ }
mutex_lock(&pf->tc_mutex);
old_cfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
- if (old_cfg->numapps <= 1)
- goto delapp_out;
-
ret = dcb_ieee_delapp(netdev, app);
if (ret)
goto delapp_out;
new_cfg = &pf->hw.port_info->qos_cfg.desired_dcbx_cfg;
- for (i = 1; i < new_cfg->numapps; i++) {
+ for (i = 0; i < new_cfg->numapps; i++) {
if (app->selector == new_cfg->app[i].selector &&
app->protocol == new_cfg->app[i].prot_id &&
app->priority == new_cfg->app[i].priority) {
@@ -784,17 +859,58 @@ static int ice_dcbnl_delapp(struct net_device *netdev, struct dcb_app *app)
new_cfg->numapps--;
for (j = i; j < new_cfg->numapps; j++) {
- new_cfg->app[i].selector = old_cfg->app[j + 1].selector;
- new_cfg->app[i].prot_id = old_cfg->app[j + 1].prot_id;
- new_cfg->app[i].priority = old_cfg->app[j + 1].priority;
+ new_cfg->app[j].selector = old_cfg->app[j + 1].selector;
+ new_cfg->app[j].prot_id = old_cfg->app[j + 1].prot_id;
+ new_cfg->app[j].priority = old_cfg->app[j + 1].priority;
}
- ret = ice_pf_dcb_cfg(pf, new_cfg, true);
- /* return of zero indicates new cfg applied */
+ /* if not a DSCP APP TLV or DSCP is not supported, we are done */
+ if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP ||
+ !ice_is_feature_supported(pf, ICE_F_DSCP)) {
+ ret = ICE_DCB_HW_CHG;
+ goto delapp_out;
+ }
+
+ /* if DSCP TLV, then need to address change in mapping */
+ clear_bit(app->protocol, new_cfg->dscp_mapped);
+ /* remap this DSCP value to default value */
+ new_cfg->dscp_map[app->protocol] = app->protocol %
+ ICE_BYTES_PER_DSCP_VAL;
+
+ /* if the last DSCP mapping just got deleted, need to switch
+ * to L2 VLAN QoS mode
+ */
+ if (bitmap_empty(new_cfg->dscp_mapped, ICE_DSCP_NUM_VAL) &&
+ new_cfg->pfc_mode == ICE_QOS_MODE_DSCP) {
+ ret = ice_aq_set_pfc_mode(&pf->hw,
+ ICE_AQC_PFC_VLAN_BASED_PFC,
+ NULL);
+ if (ret) {
+ netdev_info(netdev, "Failed to set VLAN PFC mode %d\n",
+ ret);
+ goto delapp_out;
+ }
+ netdev_info(netdev, "Switched QoS to L2 VLAN mode\n");
+
+ new_cfg->pfc_mode = ICE_QOS_MODE_VLAN;
+
+ ret = ice_dcb_sw_dflt_cfg(pf, true, true);
+ } else {
+ ret = ice_pf_dcb_cfg(pf, new_cfg, true);
+ }
+
+ /* return of ICE_DCB_HW_CHG_RST indicates new cfg applied
+ * and reset needs to be performed
+ */
if (ret == ICE_DCB_HW_CHG_RST)
ice_dcbnl_devreset(netdev);
+
+ /* if the change was not siginificant enough to actually call
+ * the reconfiguration flow, we still need to tell caller that
+ * their request was successfully handled
+ */
if (ret == ICE_DCB_NO_HW_CHG)
- ret = ICE_DCB_HW_CHG_RST;
+ ret = ICE_DCB_HW_CHG;
delapp_out:
mutex_unlock(&pf->tc_mutex);
diff --git a/drivers/net/ethernet/intel/ice/ice_devids.h b/drivers/net/ethernet/intel/ice/ice_devids.h
index 9d8194671f6a..61dd2f18dee8 100644
--- a/drivers/net/ethernet/intel/ice/ice_devids.h
+++ b/drivers/net/ethernet/intel/ice/ice_devids.h
@@ -21,6 +21,12 @@
#define ICE_DEV_ID_E810C_QSFP 0x1592
/* Intel(R) Ethernet Controller E810-C for SFP */
#define ICE_DEV_ID_E810C_SFP 0x1593
+#define ICE_SUBDEV_ID_E810T 0x000E
+#define ICE_SUBDEV_ID_E810T2 0x000F
+/* Intel(R) Ethernet Controller E810-XXV for backplane */
+#define ICE_DEV_ID_E810_XXV_BACKPLANE 0x1599
+/* Intel(R) Ethernet Controller E810-XXV for QSFP */
+#define ICE_DEV_ID_E810_XXV_QSFP 0x159A
/* Intel(R) Ethernet Controller E810-XXV for SFP */
#define ICE_DEV_ID_E810_XXV_SFP 0x159B
/* Intel(R) Ethernet Connection E823-C for backplane */
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c
index 14afce82ef63..b9bd9f9472f6 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.c
+++ b/drivers/net/ethernet/intel/ice/ice_devlink.c
@@ -4,6 +4,7 @@
#include "ice.h"
#include "ice_lib.h"
#include "ice_devlink.h"
+#include "ice_eswitch.h"
#include "ice_fw_update.h"
/* context for devlink info version reporting */
@@ -22,7 +23,7 @@ struct ice_info_ctx {
*
* If a version does not exist, for example when attempting to get the
* inactive version of flash when there is no pending update, the function
- * should leave the buffer in the ctx structure empty and return 0.
+ * should leave the buffer in the ctx structure empty.
*/
static void ice_info_get_dsn(struct ice_pf *pf, struct ice_info_ctx *ctx)
@@ -35,7 +36,7 @@ static void ice_info_get_dsn(struct ice_pf *pf, struct ice_info_ctx *ctx)
snprintf(ctx->buf, sizeof(ctx->buf), "%8phD", dsn);
}
-static int ice_info_pba(struct ice_pf *pf, struct ice_info_ctx *ctx)
+static void ice_info_pba(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
struct ice_hw *hw = &pf->hw;
enum ice_status status;
@@ -45,148 +46,127 @@ static int ice_info_pba(struct ice_pf *pf, struct ice_info_ctx *ctx)
/* We failed to locate the PBA, so just skip this entry */
dev_dbg(ice_pf_to_dev(pf), "Failed to read Product Board Assembly string, status %s\n",
ice_stat_str(status));
-
- return 0;
}
-static int ice_info_fw_mgmt(struct ice_pf *pf, struct ice_info_ctx *ctx)
+static void ice_info_fw_mgmt(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
struct ice_hw *hw = &pf->hw;
- snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", hw->fw_maj_ver, hw->fw_min_ver,
- hw->fw_patch);
-
- return 0;
+ snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u",
+ hw->fw_maj_ver, hw->fw_min_ver, hw->fw_patch);
}
-static int ice_info_fw_api(struct ice_pf *pf, struct ice_info_ctx *ctx)
+static void ice_info_fw_api(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
struct ice_hw *hw = &pf->hw;
- snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u", hw->api_maj_ver, hw->api_min_ver);
-
- return 0;
+ snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", hw->api_maj_ver,
+ hw->api_min_ver, hw->api_patch);
}
-static int ice_info_fw_build(struct ice_pf *pf, struct ice_info_ctx *ctx)
+static void ice_info_fw_build(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
struct ice_hw *hw = &pf->hw;
snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", hw->fw_build);
-
- return 0;
}
-static int ice_info_orom_ver(struct ice_pf *pf, struct ice_info_ctx *ctx)
+static void ice_info_orom_ver(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
struct ice_orom_info *orom = &pf->hw.flash.orom;
- snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u", orom->major, orom->build, orom->patch);
-
- return 0;
+ snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u",
+ orom->major, orom->build, orom->patch);
}
-static int
-ice_info_pending_orom_ver(struct ice_pf __always_unused *pf, struct ice_info_ctx *ctx)
+static void
+ice_info_pending_orom_ver(struct ice_pf __always_unused *pf,
+ struct ice_info_ctx *ctx)
{
struct ice_orom_info *orom = &ctx->pending_orom;
if (ctx->dev_caps.common_cap.nvm_update_pending_orom)
snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u",
orom->major, orom->build, orom->patch);
-
- return 0;
}
-static int ice_info_nvm_ver(struct ice_pf *pf, struct ice_info_ctx *ctx)
+static void ice_info_nvm_ver(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
struct ice_nvm_info *nvm = &pf->hw.flash.nvm;
snprintf(ctx->buf, sizeof(ctx->buf), "%x.%02x", nvm->major, nvm->minor);
-
- return 0;
}
-static int
-ice_info_pending_nvm_ver(struct ice_pf __always_unused *pf, struct ice_info_ctx *ctx)
+static void
+ice_info_pending_nvm_ver(struct ice_pf __always_unused *pf,
+ struct ice_info_ctx *ctx)
{
struct ice_nvm_info *nvm = &ctx->pending_nvm;
if (ctx->dev_caps.common_cap.nvm_update_pending_nvm)
- snprintf(ctx->buf, sizeof(ctx->buf), "%x.%02x", nvm->major, nvm->minor);
-
- return 0;
+ snprintf(ctx->buf, sizeof(ctx->buf), "%x.%02x",
+ nvm->major, nvm->minor);
}
-static int ice_info_eetrack(struct ice_pf *pf, struct ice_info_ctx *ctx)
+static void ice_info_eetrack(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
struct ice_nvm_info *nvm = &pf->hw.flash.nvm;
snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", nvm->eetrack);
-
- return 0;
}
-static int
-ice_info_pending_eetrack(struct ice_pf __always_unused *pf, struct ice_info_ctx *ctx)
+static void
+ice_info_pending_eetrack(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
struct ice_nvm_info *nvm = &ctx->pending_nvm;
if (ctx->dev_caps.common_cap.nvm_update_pending_nvm)
snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", nvm->eetrack);
-
- return 0;
}
-static int ice_info_ddp_pkg_name(struct ice_pf *pf, struct ice_info_ctx *ctx)
+static void ice_info_ddp_pkg_name(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
struct ice_hw *hw = &pf->hw;
snprintf(ctx->buf, sizeof(ctx->buf), "%s", hw->active_pkg_name);
-
- return 0;
}
-static int ice_info_ddp_pkg_version(struct ice_pf *pf, struct ice_info_ctx *ctx)
+static void
+ice_info_ddp_pkg_version(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
struct ice_pkg_ver *pkg = &pf->hw.active_pkg_ver;
- snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u.%u", pkg->major, pkg->minor, pkg->update,
- pkg->draft);
-
- return 0;
+ snprintf(ctx->buf, sizeof(ctx->buf), "%u.%u.%u.%u",
+ pkg->major, pkg->minor, pkg->update, pkg->draft);
}
-static int ice_info_ddp_pkg_bundle_id(struct ice_pf *pf, struct ice_info_ctx *ctx)
+static void
+ice_info_ddp_pkg_bundle_id(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", pf->hw.active_track_id);
-
- return 0;
}
-static int ice_info_netlist_ver(struct ice_pf *pf, struct ice_info_ctx *ctx)
+static void ice_info_netlist_ver(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
struct ice_netlist_info *netlist = &pf->hw.flash.netlist;
/* The netlist version fields are BCD formatted */
- snprintf(ctx->buf, sizeof(ctx->buf), "%x.%x.%x-%x.%x.%x", netlist->major, netlist->minor,
- netlist->type >> 16, netlist->type & 0xFFFF, netlist->rev,
- netlist->cust_ver);
-
- return 0;
+ snprintf(ctx->buf, sizeof(ctx->buf), "%x.%x.%x-%x.%x.%x",
+ netlist->major, netlist->minor,
+ netlist->type >> 16, netlist->type & 0xFFFF,
+ netlist->rev, netlist->cust_ver);
}
-static int ice_info_netlist_build(struct ice_pf *pf, struct ice_info_ctx *ctx)
+static void ice_info_netlist_build(struct ice_pf *pf, struct ice_info_ctx *ctx)
{
struct ice_netlist_info *netlist = &pf->hw.flash.netlist;
snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", netlist->hash);
-
- return 0;
}
-static int
-ice_info_pending_netlist_ver(struct ice_pf __always_unused *pf, struct ice_info_ctx *ctx)
+static void
+ice_info_pending_netlist_ver(struct ice_pf __always_unused *pf,
+ struct ice_info_ctx *ctx)
{
struct ice_netlist_info *netlist = &ctx->pending_netlist;
@@ -194,21 +174,18 @@ ice_info_pending_netlist_ver(struct ice_pf __always_unused *pf, struct ice_info_
if (ctx->dev_caps.common_cap.nvm_update_pending_netlist)
snprintf(ctx->buf, sizeof(ctx->buf), "%x.%x.%x-%x.%x.%x",
netlist->major, netlist->minor,
- netlist->type >> 16, netlist->type & 0xFFFF, netlist->rev,
- netlist->cust_ver);
-
- return 0;
+ netlist->type >> 16, netlist->type & 0xFFFF,
+ netlist->rev, netlist->cust_ver);
}
-static int
-ice_info_pending_netlist_build(struct ice_pf __always_unused *pf, struct ice_info_ctx *ctx)
+static void
+ice_info_pending_netlist_build(struct ice_pf __always_unused *pf,
+ struct ice_info_ctx *ctx)
{
struct ice_netlist_info *netlist = &ctx->pending_netlist;
if (ctx->dev_caps.common_cap.nvm_update_pending_netlist)
snprintf(ctx->buf, sizeof(ctx->buf), "0x%08x", netlist->hash);
-
- return 0;
}
#define fixed(key, getter) { ICE_VERSION_FIXED, key, getter, NULL }
@@ -238,8 +215,8 @@ enum ice_version_type {
static const struct ice_devlink_version {
enum ice_version_type type;
const char *key;
- int (*getter)(struct ice_pf *pf, struct ice_info_ctx *ctx);
- int (*fallback)(struct ice_pf *pf, struct ice_info_ctx *ctx);
+ void (*getter)(struct ice_pf *pf, struct ice_info_ctx *ctx);
+ void (*fallback)(struct ice_pf *pf, struct ice_info_ctx *ctx);
} ice_devlink_versions[] = {
fixed(DEVLINK_INFO_VERSION_GENERIC_BOARD_ID, ice_info_pba),
running(DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, ice_info_fw_mgmt),
@@ -351,24 +328,15 @@ static int ice_devlink_info_get(struct devlink *devlink,
memset(ctx->buf, 0, sizeof(ctx->buf));
- err = ice_devlink_versions[i].getter(pf, ctx);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "Unable to obtain version info");
- goto out_free_ctx;
- }
+ ice_devlink_versions[i].getter(pf, ctx);
/* If the default getter doesn't report a version, use the
* fallback function. This is primarily useful in the case of
* "stored" versions that want to report the same value as the
* running version in the normal case of no pending update.
*/
- if (ctx->buf[0] == '\0' && ice_devlink_versions[i].fallback) {
- err = ice_devlink_versions[i].fallback(pf, ctx);
- if (err) {
- NL_SET_ERR_MSG_MOD(extack, "Unable to obtain version info");
- goto out_free_ctx;
- }
- }
+ if (ctx->buf[0] == '\0' && ice_devlink_versions[i].fallback)
+ ice_devlink_versions[i].fallback(pf, ctx);
/* Do not report missing versions */
if (ctx->buf[0] == '\0')
@@ -456,6 +424,8 @@ ice_devlink_flash_update(struct devlink *devlink,
static const struct devlink_ops ice_devlink_ops = {
.supported_flash_update_params = DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK,
+ .eswitch_mode_get = ice_eswitch_mode_get,
+ .eswitch_mode_set = ice_eswitch_mode_set,
.info_get = ice_devlink_info_get,
.flash_update = ice_devlink_flash_update,
};
@@ -482,10 +452,8 @@ struct ice_pf *ice_allocate_pf(struct device *dev)
return NULL;
/* Add an action to teardown the devlink when unwinding the driver */
- if (devm_add_action(dev, ice_devlink_free, devlink)) {
- devlink_free(devlink);
+ if (devm_add_action_or_reset(dev, ice_devlink_free, devlink))
return NULL;
- }
return devlink_priv(devlink);
}
@@ -498,15 +466,58 @@ struct ice_pf *ice_allocate_pf(struct device *dev)
*
* Return: zero on success or an error code on failure.
*/
-int ice_devlink_register(struct ice_pf *pf)
+void ice_devlink_register(struct ice_pf *pf)
{
struct devlink *devlink = priv_to_devlink(pf);
- struct device *dev = ice_pf_to_dev(pf);
+
+ devlink_register(devlink);
+}
+
+/**
+ * ice_devlink_unregister - Unregister devlink resources for this PF.
+ * @pf: the PF structure to cleanup
+ *
+ * Releases resources used by devlink and cleans up associated memory.
+ */
+void ice_devlink_unregister(struct ice_pf *pf)
+{
+ devlink_unregister(priv_to_devlink(pf));
+}
+
+/**
+ * ice_devlink_create_pf_port - Create a devlink port for this PF
+ * @pf: the PF to create a devlink port for
+ *
+ * Create and register a devlink_port for this PF.
+ *
+ * Return: zero on success or an error code on failure.
+ */
+int ice_devlink_create_pf_port(struct ice_pf *pf)
+{
+ struct devlink_port_attrs attrs = {};
+ struct devlink_port *devlink_port;
+ struct devlink *devlink;
+ struct ice_vsi *vsi;
+ struct device *dev;
int err;
- err = devlink_register(devlink);
+ dev = ice_pf_to_dev(pf);
+
+ devlink_port = &pf->devlink_port;
+
+ vsi = ice_get_main_vsi(pf);
+ if (!vsi)
+ return -EIO;
+
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
+ attrs.phys.port_number = pf->hw.bus.func;
+ devlink_port_attrs_set(devlink_port, &attrs);
+ devlink = priv_to_devlink(pf);
+
+ err = devlink_port_register(devlink, devlink_port, vsi->idx);
if (err) {
- dev_err(dev, "devlink registration failed: %d\n", err);
+ dev_err(dev, "Failed to create devlink port for PF %d, error %d\n",
+ pf->hw.pf_id, err);
return err;
}
@@ -514,71 +525,75 @@ int ice_devlink_register(struct ice_pf *pf)
}
/**
- * ice_devlink_unregister - Unregister devlink resources for this PF.
- * @pf: the PF structure to cleanup
+ * ice_devlink_destroy_pf_port - Destroy the devlink_port for this PF
+ * @pf: the PF to cleanup
*
- * Releases resources used by devlink and cleans up associated memory.
+ * Unregisters the devlink_port structure associated with this PF.
*/
-void ice_devlink_unregister(struct ice_pf *pf)
+void ice_devlink_destroy_pf_port(struct ice_pf *pf)
{
- devlink_unregister(priv_to_devlink(pf));
+ struct devlink_port *devlink_port;
+
+ devlink_port = &pf->devlink_port;
+
+ devlink_port_type_clear(devlink_port);
+ devlink_port_unregister(devlink_port);
}
/**
- * ice_devlink_create_port - Create a devlink port for this VSI
- * @vsi: the VSI to create a port for
+ * ice_devlink_create_vf_port - Create a devlink port for this VF
+ * @vf: the VF to create a port for
*
- * Create and register a devlink_port for this VSI.
+ * Create and register a devlink_port for this VF.
*
* Return: zero on success or an error code on failure.
*/
-int ice_devlink_create_port(struct ice_vsi *vsi)
+int ice_devlink_create_vf_port(struct ice_vf *vf)
{
struct devlink_port_attrs attrs = {};
- struct ice_port_info *pi;
+ struct devlink_port *devlink_port;
struct devlink *devlink;
+ struct ice_vsi *vsi;
struct device *dev;
struct ice_pf *pf;
int err;
- /* Currently we only create devlink_port instances for PF VSIs */
- if (vsi->type != ICE_VSI_PF)
- return -EINVAL;
+ pf = vf->pf;
+ dev = ice_pf_to_dev(pf);
+ vsi = ice_get_vf_vsi(vf);
+ devlink_port = &vf->devlink_port;
+
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_PCI_VF;
+ attrs.pci_vf.pf = pf->hw.bus.func;
+ attrs.pci_vf.vf = vf->vf_id;
- pf = vsi->back;
+ devlink_port_attrs_set(devlink_port, &attrs);
devlink = priv_to_devlink(pf);
- dev = ice_pf_to_dev(pf);
- pi = pf->hw.port_info;
- attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
- attrs.phys.port_number = pi->lport;
- devlink_port_attrs_set(&vsi->devlink_port, &attrs);
- err = devlink_port_register(devlink, &vsi->devlink_port, vsi->idx);
+ err = devlink_port_register(devlink, devlink_port, vsi->idx);
if (err) {
- dev_err(dev, "devlink_port_register failed: %d\n", err);
+ dev_err(dev, "Failed to create devlink port for VF %d, error %d\n",
+ vf->vf_id, err);
return err;
}
- vsi->devlink_port_registered = true;
-
return 0;
}
/**
- * ice_devlink_destroy_port - Destroy the devlink_port for this VSI
- * @vsi: the VSI to cleanup
+ * ice_devlink_destroy_vf_port - Destroy the devlink_port for this VF
+ * @vf: the VF to cleanup
*
- * Unregisters the devlink_port structure associated with this VSI.
+ * Unregisters the devlink_port structure associated with this VF.
*/
-void ice_devlink_destroy_port(struct ice_vsi *vsi)
+void ice_devlink_destroy_vf_port(struct ice_vf *vf)
{
- if (!vsi->devlink_port_registered)
- return;
+ struct devlink_port *devlink_port;
- devlink_port_type_clear(&vsi->devlink_port);
- devlink_port_unregister(&vsi->devlink_port);
+ devlink_port = &vf->devlink_port;
- vsi->devlink_port_registered = false;
+ devlink_port_type_clear(devlink_port);
+ devlink_port_unregister(devlink_port);
}
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.h b/drivers/net/ethernet/intel/ice/ice_devlink.h
index e07e74426bde..b7f9551e4fc4 100644
--- a/drivers/net/ethernet/intel/ice/ice_devlink.h
+++ b/drivers/net/ethernet/intel/ice/ice_devlink.h
@@ -6,10 +6,12 @@
struct ice_pf *ice_allocate_pf(struct device *dev);
-int ice_devlink_register(struct ice_pf *pf);
+void ice_devlink_register(struct ice_pf *pf);
void ice_devlink_unregister(struct ice_pf *pf);
-int ice_devlink_create_port(struct ice_vsi *vsi);
-void ice_devlink_destroy_port(struct ice_vsi *vsi);
+int ice_devlink_create_pf_port(struct ice_pf *pf);
+void ice_devlink_destroy_pf_port(struct ice_pf *pf);
+int ice_devlink_create_vf_port(struct ice_vf *vf);
+void ice_devlink_destroy_vf_port(struct ice_vf *vf);
void ice_devlink_init_regions(struct ice_pf *pf);
void ice_devlink_destroy_regions(struct ice_pf *pf);
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c
new file mode 100644
index 000000000000..d1d7389b0bff
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c
@@ -0,0 +1,655 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2019-2021, Intel Corporation. */
+
+#include "ice.h"
+#include "ice_lib.h"
+#include "ice_eswitch.h"
+#include "ice_fltr.h"
+#include "ice_repr.h"
+#include "ice_devlink.h"
+#include "ice_tc_lib.h"
+
+/**
+ * ice_eswitch_setup_env - configure switchdev HW filters
+ * @pf: pointer to PF struct
+ *
+ * This function adds HW filters configuration specific for switchdev
+ * mode.
+ */
+static int ice_eswitch_setup_env(struct ice_pf *pf)
+{
+ struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi;
+ struct net_device *uplink_netdev = uplink_vsi->netdev;
+ struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
+ struct ice_port_info *pi = pf->hw.port_info;
+ bool rule_added = false;
+
+ ice_vsi_manage_vlan_stripping(ctrl_vsi, false);
+
+ ice_remove_vsi_fltr(&pf->hw, uplink_vsi->idx);
+
+ netif_addr_lock_bh(uplink_netdev);
+ __dev_uc_unsync(uplink_netdev, NULL);
+ __dev_mc_unsync(uplink_netdev, NULL);
+ netif_addr_unlock_bh(uplink_netdev);
+
+ if (ice_vsi_add_vlan(uplink_vsi, 0, ICE_FWD_TO_VSI))
+ goto err_def_rx;
+
+ if (!ice_is_dflt_vsi_in_use(uplink_vsi->vsw)) {
+ if (ice_set_dflt_vsi(uplink_vsi->vsw, uplink_vsi))
+ goto err_def_rx;
+ rule_added = true;
+ }
+
+ if (ice_cfg_dflt_vsi(pi->hw, ctrl_vsi->idx, true, ICE_FLTR_TX))
+ goto err_def_tx;
+
+ if (ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_set_allow_override))
+ goto err_override_uplink;
+
+ if (ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_set_allow_override))
+ goto err_override_control;
+
+ if (ice_fltr_update_flags_dflt_rule(ctrl_vsi, pi->dflt_tx_vsi_rule_id,
+ ICE_FLTR_TX,
+ ICE_SINGLE_ACT_LB_ENABLE))
+ goto err_update_action;
+
+ return 0;
+
+err_update_action:
+ ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override);
+err_override_control:
+ ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
+err_override_uplink:
+ ice_cfg_dflt_vsi(pi->hw, ctrl_vsi->idx, false, ICE_FLTR_TX);
+err_def_tx:
+ if (rule_added)
+ ice_clear_dflt_vsi(uplink_vsi->vsw);
+err_def_rx:
+ ice_fltr_add_mac_and_broadcast(uplink_vsi,
+ uplink_vsi->port_info->mac.perm_addr,
+ ICE_FWD_TO_VSI);
+ return -ENODEV;
+}
+
+/**
+ * ice_eswitch_remap_rings_to_vectors - reconfigure rings of switchdev ctrl VSI
+ * @pf: pointer to PF struct
+ *
+ * In switchdev number of allocated Tx/Rx rings is equal.
+ *
+ * This function fills q_vectors structures associated with representor and
+ * move each ring pairs to port representor netdevs. Each port representor
+ * will have dedicated 1 Tx/Rx ring pair, so number of rings pair is equal to
+ * number of VFs.
+ */
+static void ice_eswitch_remap_rings_to_vectors(struct ice_pf *pf)
+{
+ struct ice_vsi *vsi = pf->switchdev.control_vsi;
+ int q_id;
+
+ ice_for_each_txq(vsi, q_id) {
+ struct ice_repr *repr = pf->vf[q_id].repr;
+ struct ice_q_vector *q_vector = repr->q_vector;
+ struct ice_tx_ring *tx_ring = vsi->tx_rings[q_id];
+ struct ice_rx_ring *rx_ring = vsi->rx_rings[q_id];
+
+ q_vector->vsi = vsi;
+ q_vector->reg_idx = vsi->q_vectors[0]->reg_idx;
+
+ q_vector->num_ring_tx = 1;
+ q_vector->tx.tx_ring = tx_ring;
+ tx_ring->q_vector = q_vector;
+ tx_ring->next = NULL;
+ tx_ring->netdev = repr->netdev;
+ /* In switchdev mode, from OS stack perspective, there is only
+ * one queue for given netdev, so it needs to be indexed as 0.
+ */
+ tx_ring->q_index = 0;
+
+ q_vector->num_ring_rx = 1;
+ q_vector->rx.rx_ring = rx_ring;
+ rx_ring->q_vector = q_vector;
+ rx_ring->next = NULL;
+ rx_ring->netdev = repr->netdev;
+ }
+}
+
+/**
+ * ice_eswitch_setup_reprs - configure port reprs to run in switchdev mode
+ * @pf: pointer to PF struct
+ */
+static int ice_eswitch_setup_reprs(struct ice_pf *pf)
+{
+ struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
+ int max_vsi_num = 0;
+ int i;
+
+ ice_for_each_vf(pf, i) {
+ struct ice_vsi *vsi = pf->vf[i].repr->src_vsi;
+ struct ice_vf *vf = &pf->vf[i];
+
+ ice_remove_vsi_fltr(&pf->hw, vsi->idx);
+ vf->repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
+ GFP_KERNEL);
+ if (!vf->repr->dst) {
+ ice_fltr_add_mac_and_broadcast(vsi,
+ vf->hw_lan_addr.addr,
+ ICE_FWD_TO_VSI);
+ goto err;
+ }
+
+ if (ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof)) {
+ ice_fltr_add_mac_and_broadcast(vsi,
+ vf->hw_lan_addr.addr,
+ ICE_FWD_TO_VSI);
+ metadata_dst_free(vf->repr->dst);
+ goto err;
+ }
+
+ if (ice_vsi_add_vlan(vsi, 0, ICE_FWD_TO_VSI)) {
+ ice_fltr_add_mac_and_broadcast(vsi,
+ vf->hw_lan_addr.addr,
+ ICE_FWD_TO_VSI);
+ metadata_dst_free(vf->repr->dst);
+ ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
+ goto err;
+ }
+
+ if (max_vsi_num < vsi->vsi_num)
+ max_vsi_num = vsi->vsi_num;
+
+ netif_napi_add(vf->repr->netdev, &vf->repr->q_vector->napi, ice_napi_poll,
+ NAPI_POLL_WEIGHT);
+
+ netif_keep_dst(vf->repr->netdev);
+ }
+
+ kfree(ctrl_vsi->target_netdevs);
+
+ ctrl_vsi->target_netdevs = kcalloc(max_vsi_num + 1,
+ sizeof(*ctrl_vsi->target_netdevs),
+ GFP_KERNEL);
+ if (!ctrl_vsi->target_netdevs)
+ goto err;
+
+ ice_for_each_vf(pf, i) {
+ struct ice_repr *repr = pf->vf[i].repr;
+ struct ice_vsi *vsi = repr->src_vsi;
+ struct metadata_dst *dst;
+
+ ctrl_vsi->target_netdevs[vsi->vsi_num] = repr->netdev;
+
+ dst = repr->dst;
+ dst->u.port_info.port_id = vsi->vsi_num;
+ dst->u.port_info.lower_dev = repr->netdev;
+ ice_repr_set_traffic_vsi(repr, ctrl_vsi);
+ }
+
+ return 0;
+
+err:
+ for (i = i - 1; i >= 0; i--) {
+ struct ice_vsi *vsi = pf->vf[i].repr->src_vsi;
+ struct ice_vf *vf = &pf->vf[i];
+
+ ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
+ metadata_dst_free(vf->repr->dst);
+ ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr,
+ ICE_FWD_TO_VSI);
+ }
+
+ return -ENODEV;
+}
+
+/**
+ * ice_eswitch_release_reprs - clear PR VSIs configuration
+ * @pf: poiner to PF struct
+ * @ctrl_vsi: pointer to switchdev control VSI
+ */
+static void
+ice_eswitch_release_reprs(struct ice_pf *pf, struct ice_vsi *ctrl_vsi)
+{
+ int i;
+
+ kfree(ctrl_vsi->target_netdevs);
+ ice_for_each_vf(pf, i) {
+ struct ice_vsi *vsi = pf->vf[i].repr->src_vsi;
+ struct ice_vf *vf = &pf->vf[i];
+
+ ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof);
+ metadata_dst_free(vf->repr->dst);
+ ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr,
+ ICE_FWD_TO_VSI);
+
+ netif_napi_del(&vf->repr->q_vector->napi);
+ }
+}
+
+/**
+ * ice_eswitch_update_repr - reconfigure VF port representor
+ * @vsi: VF VSI for which port representor is configured
+ */
+void ice_eswitch_update_repr(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ struct ice_repr *repr;
+ struct ice_vf *vf;
+ int ret;
+
+ if (!ice_is_switchdev_running(pf))
+ return;
+
+ vf = &pf->vf[vsi->vf_id];
+ repr = vf->repr;
+ repr->src_vsi = vsi;
+ repr->dst->u.port_info.port_id = vsi->vsi_num;
+
+ ret = ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof);
+ if (ret) {
+ ice_fltr_add_mac_and_broadcast(vsi, vf->hw_lan_addr.addr, ICE_FWD_TO_VSI);
+ dev_err(ice_pf_to_dev(pf), "Failed to update VF %d port representor", vsi->vf_id);
+ }
+}
+
+/**
+ * ice_eswitch_port_start_xmit - callback for packets transmit
+ * @skb: send buffer
+ * @netdev: network interface device structure
+ *
+ * Returns NETDEV_TX_OK if sent, else an error code
+ */
+netdev_tx_t
+ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct ice_netdev_priv *np;
+ struct ice_repr *repr;
+ struct ice_vsi *vsi;
+
+ np = netdev_priv(netdev);
+ vsi = np->vsi;
+
+ if (ice_is_reset_in_progress(vsi->back->state))
+ return NETDEV_TX_BUSY;
+
+ repr = ice_netdev_to_repr(netdev);
+ skb_dst_drop(skb);
+ dst_hold((struct dst_entry *)repr->dst);
+ skb_dst_set(skb, (struct dst_entry *)repr->dst);
+ skb->queue_mapping = repr->vf->vf_id;
+
+ return ice_start_xmit(skb, netdev);
+}
+
+/**
+ * ice_eswitch_set_target_vsi - set switchdev context in Tx context descriptor
+ * @skb: pointer to send buffer
+ * @off: pointer to offload struct
+ */
+void
+ice_eswitch_set_target_vsi(struct sk_buff *skb,
+ struct ice_tx_offload_params *off)
+{
+ struct metadata_dst *dst = skb_metadata_dst(skb);
+ u64 cd_cmd, dst_vsi;
+
+ if (!dst) {
+ cd_cmd = ICE_TX_CTX_DESC_SWTCH_UPLINK << ICE_TXD_CTX_QW1_CMD_S;
+ off->cd_qw1 |= (cd_cmd | ICE_TX_DESC_DTYPE_CTX);
+ } else {
+ cd_cmd = ICE_TX_CTX_DESC_SWTCH_VSI << ICE_TXD_CTX_QW1_CMD_S;
+ dst_vsi = ((u64)dst->u.port_info.port_id <<
+ ICE_TXD_CTX_QW1_VSI_S) & ICE_TXD_CTX_QW1_VSI_M;
+ off->cd_qw1 = cd_cmd | dst_vsi | ICE_TX_DESC_DTYPE_CTX;
+ }
+}
+
+/**
+ * ice_eswitch_release_env - clear switchdev HW filters
+ * @pf: pointer to PF struct
+ *
+ * This function removes HW filters configuration specific for switchdev
+ * mode and restores default legacy mode settings.
+ */
+static void ice_eswitch_release_env(struct ice_pf *pf)
+{
+ struct ice_vsi *uplink_vsi = pf->switchdev.uplink_vsi;
+ struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
+
+ ice_vsi_update_security(ctrl_vsi, ice_vsi_ctx_clear_allow_override);
+ ice_vsi_update_security(uplink_vsi, ice_vsi_ctx_clear_allow_override);
+ ice_cfg_dflt_vsi(&pf->hw, ctrl_vsi->idx, false, ICE_FLTR_TX);
+ ice_clear_dflt_vsi(uplink_vsi->vsw);
+ ice_fltr_add_mac_and_broadcast(uplink_vsi,
+ uplink_vsi->port_info->mac.perm_addr,
+ ICE_FWD_TO_VSI);
+}
+
+/**
+ * ice_eswitch_vsi_setup - configure switchdev control VSI
+ * @pf: pointer to PF structure
+ * @pi: pointer to port_info structure
+ */
+static struct ice_vsi *
+ice_eswitch_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
+{
+ return ice_vsi_setup(pf, pi, ICE_VSI_SWITCHDEV_CTRL, ICE_INVAL_VFID, NULL);
+}
+
+/**
+ * ice_eswitch_napi_del - remove NAPI handle for all port representors
+ * @pf: pointer to PF structure
+ */
+static void ice_eswitch_napi_del(struct ice_pf *pf)
+{
+ int i;
+
+ ice_for_each_vf(pf, i)
+ netif_napi_del(&pf->vf[i].repr->q_vector->napi);
+}
+
+/**
+ * ice_eswitch_napi_enable - enable NAPI for all port representors
+ * @pf: pointer to PF structure
+ */
+static void ice_eswitch_napi_enable(struct ice_pf *pf)
+{
+ int i;
+
+ ice_for_each_vf(pf, i)
+ napi_enable(&pf->vf[i].repr->q_vector->napi);
+}
+
+/**
+ * ice_eswitch_napi_disable - disable NAPI for all port representors
+ * @pf: pointer to PF structure
+ */
+static void ice_eswitch_napi_disable(struct ice_pf *pf)
+{
+ int i;
+
+ ice_for_each_vf(pf, i)
+ napi_disable(&pf->vf[i].repr->q_vector->napi);
+}
+
+/**
+ * ice_eswitch_set_rxdid - configure rxdid on all Rx queues from VSI
+ * @vsi: VSI to setup rxdid on
+ * @rxdid: flex descriptor id
+ */
+static void ice_eswitch_set_rxdid(struct ice_vsi *vsi, u32 rxdid)
+{
+ struct ice_hw *hw = &vsi->back->hw;
+ int i;
+
+ ice_for_each_rxq(vsi, i) {
+ struct ice_rx_ring *ring = vsi->rx_rings[i];
+ u16 pf_q = vsi->rxq_map[ring->q_index];
+
+ ice_write_qrxflxp_cntxt(hw, pf_q, rxdid, 0x3, true);
+ }
+}
+
+/**
+ * ice_eswitch_enable_switchdev - configure eswitch in switchdev mode
+ * @pf: pointer to PF structure
+ */
+static int ice_eswitch_enable_switchdev(struct ice_pf *pf)
+{
+ struct ice_vsi *ctrl_vsi;
+
+ pf->switchdev.control_vsi = ice_eswitch_vsi_setup(pf, pf->hw.port_info);
+ if (!pf->switchdev.control_vsi)
+ return -ENODEV;
+
+ ctrl_vsi = pf->switchdev.control_vsi;
+ pf->switchdev.uplink_vsi = ice_get_main_vsi(pf);
+ if (!pf->switchdev.uplink_vsi)
+ goto err_vsi;
+
+ if (ice_eswitch_setup_env(pf))
+ goto err_vsi;
+
+ if (ice_repr_add_for_all_vfs(pf))
+ goto err_repr_add;
+
+ if (ice_eswitch_setup_reprs(pf))
+ goto err_setup_reprs;
+
+ ice_eswitch_remap_rings_to_vectors(pf);
+
+ if (ice_vsi_open(ctrl_vsi))
+ goto err_setup_reprs;
+
+ ice_eswitch_napi_enable(pf);
+
+ ice_eswitch_set_rxdid(ctrl_vsi, ICE_RXDID_FLEX_NIC_2);
+
+ return 0;
+
+err_setup_reprs:
+ ice_repr_rem_from_all_vfs(pf);
+err_repr_add:
+ ice_eswitch_release_env(pf);
+err_vsi:
+ ice_vsi_release(ctrl_vsi);
+ return -ENODEV;
+}
+
+/**
+ * ice_eswitch_disable_switchdev - disable switchdev resources
+ * @pf: pointer to PF structure
+ */
+static void ice_eswitch_disable_switchdev(struct ice_pf *pf)
+{
+ struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
+
+ ice_eswitch_napi_disable(pf);
+ ice_eswitch_release_env(pf);
+ ice_eswitch_release_reprs(pf, ctrl_vsi);
+ ice_vsi_release(ctrl_vsi);
+ ice_repr_rem_from_all_vfs(pf);
+}
+
+/**
+ * ice_eswitch_mode_set - set new eswitch mode
+ * @devlink: pointer to devlink structure
+ * @mode: eswitch mode to switch to
+ * @extack: pointer to extack structure
+ */
+int
+ice_eswitch_mode_set(struct devlink *devlink, u16 mode,
+ struct netlink_ext_ack *extack)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+
+ if (pf->eswitch_mode == mode)
+ return 0;
+
+ if (pf->num_alloc_vfs) {
+ dev_info(ice_pf_to_dev(pf), "Changing eswitch mode is allowed only if there is no VFs created");
+ NL_SET_ERR_MSG_MOD(extack, "Changing eswitch mode is allowed only if there is no VFs created");
+ return -EOPNOTSUPP;
+ }
+
+ switch (mode) {
+ case DEVLINK_ESWITCH_MODE_LEGACY:
+ dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to legacy",
+ pf->hw.pf_id);
+ NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to legacy");
+ break;
+ case DEVLINK_ESWITCH_MODE_SWITCHDEV:
+ {
+ dev_info(ice_pf_to_dev(pf), "PF %d changed eswitch mode to switchdev",
+ pf->hw.pf_id);
+ NL_SET_ERR_MSG_MOD(extack, "Changed eswitch mode to switchdev");
+ break;
+ }
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Unknown eswitch mode");
+ return -EINVAL;
+ }
+
+ pf->eswitch_mode = mode;
+ return 0;
+}
+
+/**
+ * ice_eswitch_get_target_netdev - return port representor netdev
+ * @rx_ring: pointer to Rx ring
+ * @rx_desc: pointer to Rx descriptor
+ *
+ * When working in switchdev mode context (when control VSI is used), this
+ * function returns netdev of appropriate port representor. For non-switchdev
+ * context, regular netdev associated with Rx ring is returned.
+ */
+struct net_device *
+ice_eswitch_get_target_netdev(struct ice_rx_ring *rx_ring,
+ union ice_32b_rx_flex_desc *rx_desc)
+{
+ struct ice_32b_rx_flex_desc_nic_2 *desc;
+ struct ice_vsi *vsi = rx_ring->vsi;
+ struct ice_vsi *control_vsi;
+ u16 target_vsi_id;
+
+ control_vsi = vsi->back->switchdev.control_vsi;
+ if (vsi != control_vsi)
+ return rx_ring->netdev;
+
+ desc = (struct ice_32b_rx_flex_desc_nic_2 *)rx_desc;
+ target_vsi_id = le16_to_cpu(desc->src_vsi);
+
+ return vsi->target_netdevs[target_vsi_id];
+}
+
+/**
+ * ice_eswitch_mode_get - get current eswitch mode
+ * @devlink: pointer to devlink structure
+ * @mode: output parameter for current eswitch mode
+ */
+int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode)
+{
+ struct ice_pf *pf = devlink_priv(devlink);
+
+ *mode = pf->eswitch_mode;
+ return 0;
+}
+
+/**
+ * ice_is_eswitch_mode_switchdev - check if eswitch mode is set to switchdev
+ * @pf: pointer to PF structure
+ *
+ * Returns true if eswitch mode is set to DEVLINK_ESWITCH_MODE_SWITCHDEV,
+ * false otherwise.
+ */
+bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf)
+{
+ return pf->eswitch_mode == DEVLINK_ESWITCH_MODE_SWITCHDEV;
+}
+
+/**
+ * ice_eswitch_release - cleanup eswitch
+ * @pf: pointer to PF structure
+ */
+void ice_eswitch_release(struct ice_pf *pf)
+{
+ if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY)
+ return;
+
+ ice_eswitch_disable_switchdev(pf);
+ pf->switchdev.is_running = false;
+}
+
+/**
+ * ice_eswitch_configure - configure eswitch
+ * @pf: pointer to PF structure
+ */
+int ice_eswitch_configure(struct ice_pf *pf)
+{
+ int status;
+
+ if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_LEGACY || pf->switchdev.is_running)
+ return 0;
+
+ status = ice_eswitch_enable_switchdev(pf);
+ if (status)
+ return status;
+
+ pf->switchdev.is_running = true;
+ return 0;
+}
+
+/**
+ * ice_eswitch_start_all_tx_queues - start Tx queues of all port representors
+ * @pf: pointer to PF structure
+ */
+static void ice_eswitch_start_all_tx_queues(struct ice_pf *pf)
+{
+ struct ice_repr *repr;
+ int i;
+
+ if (test_bit(ICE_DOWN, pf->state))
+ return;
+
+ ice_for_each_vf(pf, i) {
+ repr = pf->vf[i].repr;
+ if (repr)
+ ice_repr_start_tx_queues(repr);
+ }
+}
+
+/**
+ * ice_eswitch_stop_all_tx_queues - stop Tx queues of all port representors
+ * @pf: pointer to PF structure
+ */
+void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf)
+{
+ struct ice_repr *repr;
+ int i;
+
+ if (test_bit(ICE_DOWN, pf->state))
+ return;
+
+ ice_for_each_vf(pf, i) {
+ repr = pf->vf[i].repr;
+ if (repr)
+ ice_repr_stop_tx_queues(repr);
+ }
+}
+
+/**
+ * ice_eswitch_rebuild - rebuild eswitch
+ * @pf: pointer to PF structure
+ */
+int ice_eswitch_rebuild(struct ice_pf *pf)
+{
+ struct ice_vsi *ctrl_vsi = pf->switchdev.control_vsi;
+ int status;
+
+ ice_eswitch_napi_disable(pf);
+ ice_eswitch_napi_del(pf);
+
+ status = ice_eswitch_setup_env(pf);
+ if (status)
+ return status;
+
+ status = ice_eswitch_setup_reprs(pf);
+ if (status)
+ return status;
+
+ ice_eswitch_remap_rings_to_vectors(pf);
+
+ ice_replay_tc_fltrs(pf);
+
+ status = ice_vsi_open(ctrl_vsi);
+ if (status)
+ return status;
+
+ ice_eswitch_napi_enable(pf);
+ ice_eswitch_set_rxdid(ctrl_vsi, ICE_RXDID_FLEX_NIC_2);
+ ice_eswitch_start_all_tx_queues(pf);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.h b/drivers/net/ethernet/intel/ice/ice_eswitch.h
new file mode 100644
index 000000000000..364cd2a79c37
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_eswitch.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2019-2021, Intel Corporation. */
+
+#ifndef _ICE_ESWITCH_H_
+#define _ICE_ESWITCH_H_
+
+#include <net/devlink.h>
+
+#ifdef CONFIG_ICE_SWITCHDEV
+void ice_eswitch_release(struct ice_pf *pf);
+int ice_eswitch_configure(struct ice_pf *pf);
+int ice_eswitch_rebuild(struct ice_pf *pf);
+
+int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode);
+int
+ice_eswitch_mode_set(struct devlink *devlink, u16 mode,
+ struct netlink_ext_ack *extack);
+bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf);
+
+void ice_eswitch_update_repr(struct ice_vsi *vsi);
+
+void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf);
+
+struct net_device *
+ice_eswitch_get_target_netdev(struct ice_rx_ring *rx_ring,
+ union ice_32b_rx_flex_desc *rx_desc);
+
+void ice_eswitch_set_target_vsi(struct sk_buff *skb,
+ struct ice_tx_offload_params *off);
+netdev_tx_t
+ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev);
+#else /* CONFIG_ICE_SWITCHDEV */
+static inline void ice_eswitch_release(struct ice_pf *pf) { }
+
+static inline void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf) { }
+
+static inline void
+ice_eswitch_set_target_vsi(struct sk_buff *skb,
+ struct ice_tx_offload_params *off) { }
+
+static inline void ice_eswitch_update_repr(struct ice_vsi *vsi) { }
+
+static inline int ice_eswitch_configure(struct ice_pf *pf)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int ice_eswitch_rebuild(struct ice_pf *pf)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode)
+{
+ return DEVLINK_ESWITCH_MODE_LEGACY;
+}
+
+static inline int
+ice_eswitch_mode_set(struct devlink *devlink, u16 mode,
+ struct netlink_ext_ack *extack)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf)
+{
+ return false;
+}
+
+static inline struct net_device *
+ice_eswitch_get_target_netdev(struct ice_rx_ring *rx_ring,
+ union ice_32b_rx_flex_desc *rx_desc)
+{
+ return rx_ring->netdev;
+}
+
+static inline netdev_tx_t
+ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ return NETDEV_TX_BUSY;
+}
+#endif /* CONFIG_ICE_SWITCHDEV */
+#endif /* _ICE_ESWITCH_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index c451cf401e63..572519e402f4 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -170,10 +170,9 @@ static const struct ice_priv_flag ice_gstrings_priv_flags[] = {
#define ICE_PRIV_FLAG_ARRAY_SIZE ARRAY_SIZE(ice_gstrings_priv_flags)
static void
-ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
+__ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo,
+ struct ice_vsi *vsi)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
struct ice_orom_info *orom;
@@ -190,9 +189,19 @@ ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%x.%02x 0x%x %d.%d.%d", nvm->major, nvm->minor,
nvm->eetrack, orom->major, orom->build, orom->patch);
+}
+
+static void
+ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_pf *pf = np->vsi->back;
+
+ __ice_get_drvinfo(netdev, drvinfo, np->vsi);
strscpy(drvinfo->bus_info, pci_name(pf->pdev),
sizeof(drvinfo->bus_info));
+
drvinfo->n_priv_flags = ICE_PRIV_FLAG_ARRAY_SIZE;
}
@@ -584,7 +593,7 @@ static bool ice_lbtest_check_frame(u8 *frame)
*
* Function sends loopback packets on a test Tx ring.
*/
-static int ice_diag_send(struct ice_ring *tx_ring, u8 *data, u16 size)
+static int ice_diag_send(struct ice_tx_ring *tx_ring, u8 *data, u16 size)
{
struct ice_tx_desc *tx_desc;
struct ice_tx_buf *tx_buf;
@@ -637,7 +646,7 @@ static int ice_diag_send(struct ice_ring *tx_ring, u8 *data, u16 size)
* Function receives loopback packets and verify their correctness.
* Returns number of received valid frames.
*/
-static int ice_lbtest_receive_frames(struct ice_ring *rx_ring)
+static int ice_lbtest_receive_frames(struct ice_rx_ring *rx_ring)
{
struct ice_rx_buf *rx_buf;
int valid_frames, i;
@@ -676,9 +685,10 @@ static u64 ice_loopback_test(struct net_device *netdev)
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *orig_vsi = np->vsi, *test_vsi;
struct ice_pf *pf = orig_vsi->back;
- struct ice_ring *tx_ring, *rx_ring;
u8 broadcast[ETH_ALEN], ret = 0;
int num_frames, valid_frames;
+ struct ice_tx_ring *tx_ring;
+ struct ice_rx_ring *rx_ring;
struct device *dev;
u8 *tx_frame;
int i;
@@ -866,10 +876,10 @@ skip_ol_tests:
netdev_info(netdev, "testing finished\n");
}
-static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
+static void
+__ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data,
+ struct ice_vsi *vsi)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_vsi *vsi = np->vsi;
unsigned int i;
u8 *p = data;
@@ -879,6 +889,9 @@ static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
ethtool_sprintf(&p,
ice_gstrings_vsi_stats[i].stat_string);
+ if (ice_is_port_repr_netdev(netdev))
+ return;
+
ice_for_each_alloc_txq(vsi, i) {
ethtool_sprintf(&p, "tx_queue_%u_packets", i);
ethtool_sprintf(&p, "tx_queue_%u_bytes", i);
@@ -917,6 +930,13 @@ static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
}
}
+static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+
+ __ice_get_strings(netdev, stringset, data, np->vsi);
+}
+
static int
ice_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state)
{
@@ -1215,6 +1235,13 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
enum ice_status status;
bool dcbx_agent_status;
+ if (ice_get_pfc_mode(pf) == ICE_QOS_MODE_DSCP) {
+ clear_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags);
+ dev_err(dev, "QoS in L3 DSCP mode, FW Agent not allowed to start\n");
+ ret = -EOPNOTSUPP;
+ goto ethtool_exit;
+ }
+
/* Remove rule to direct LLDP packets to default VSI.
* The FW LLDP engine will now be consuming them.
*/
@@ -1312,13 +1339,13 @@ static int ice_get_sset_count(struct net_device *netdev, int sset)
}
static void
-ice_get_ethtool_stats(struct net_device *netdev,
- struct ethtool_stats __always_unused *stats, u64 *data)
+__ice_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats __always_unused *stats, u64 *data,
+ struct ice_vsi *vsi)
{
- struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
- struct ice_ring *ring;
+ struct ice_tx_ring *tx_ring;
+ struct ice_rx_ring *rx_ring;
unsigned int j;
int i = 0;
char *p;
@@ -1332,14 +1359,17 @@ ice_get_ethtool_stats(struct net_device *netdev,
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
+ if (ice_is_port_repr_netdev(netdev))
+ return;
+
/* populate per queue stats */
rcu_read_lock();
ice_for_each_alloc_txq(vsi, j) {
- ring = READ_ONCE(vsi->tx_rings[j]);
- if (ring) {
- data[i++] = ring->stats.pkts;
- data[i++] = ring->stats.bytes;
+ tx_ring = READ_ONCE(vsi->tx_rings[j]);
+ if (tx_ring) {
+ data[i++] = tx_ring->stats.pkts;
+ data[i++] = tx_ring->stats.bytes;
} else {
data[i++] = 0;
data[i++] = 0;
@@ -1347,10 +1377,10 @@ ice_get_ethtool_stats(struct net_device *netdev,
}
ice_for_each_alloc_rxq(vsi, j) {
- ring = READ_ONCE(vsi->rx_rings[j]);
- if (ring) {
- data[i++] = ring->stats.pkts;
- data[i++] = ring->stats.bytes;
+ rx_ring = READ_ONCE(vsi->rx_rings[j]);
+ if (rx_ring) {
+ data[i++] = rx_ring->stats.pkts;
+ data[i++] = rx_ring->stats.bytes;
} else {
data[i++] = 0;
data[i++] = 0;
@@ -1379,6 +1409,15 @@ ice_get_ethtool_stats(struct net_device *netdev,
}
}
+static void
+ice_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats __always_unused *stats, u64 *data)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+
+ __ice_get_ethtool_stats(netdev, stats, data, np->vsi);
+}
+
#define ICE_PHY_TYPE_LOW_MASK_MIN_1G (ICE_PHY_TYPE_LOW_100BASE_TX | \
ICE_PHY_TYPE_LOW_100M_SGMII)
@@ -2667,9 +2706,10 @@ ice_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
static int
ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
{
- struct ice_ring *tx_rings = NULL, *rx_rings = NULL;
struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_ring *xdp_rings = NULL;
+ struct ice_tx_ring *xdp_rings = NULL;
+ struct ice_tx_ring *tx_rings = NULL;
+ struct ice_rx_ring *rx_rings = NULL;
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
int i, timeout = 50, err = 0;
@@ -2718,12 +2758,12 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
/* set for the next time the netdev is started */
if (!netif_running(vsi->netdev)) {
- for (i = 0; i < vsi->alloc_txq; i++)
+ ice_for_each_alloc_txq(vsi, i)
vsi->tx_rings[i]->count = new_tx_cnt;
- for (i = 0; i < vsi->alloc_rxq; i++)
+ ice_for_each_alloc_rxq(vsi, i)
vsi->rx_rings[i]->count = new_rx_cnt;
if (ice_is_xdp_ena_vsi(vsi))
- for (i = 0; i < vsi->num_xdp_txq; i++)
+ ice_for_each_xdp_txq(vsi, i)
vsi->xdp_rings[i]->count = new_tx_cnt;
vsi->num_tx_desc = (u16)new_tx_cnt;
vsi->num_rx_desc = (u16)new_rx_cnt;
@@ -2772,7 +2812,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
goto free_tx;
}
- for (i = 0; i < vsi->num_xdp_txq; i++) {
+ ice_for_each_xdp_txq(vsi, i) {
/* clone ring and setup updated count */
xdp_rings[i] = *vsi->xdp_rings[i];
xdp_rings[i].count = new_tx_cnt;
@@ -2866,7 +2906,7 @@ process_link:
}
if (xdp_rings) {
- for (i = 0; i < vsi->num_xdp_txq; i++) {
+ ice_for_each_xdp_txq(vsi, i) {
ice_free_tx_ring(vsi->xdp_rings[i]);
*vsi->xdp_rings[i] = xdp_rings[i];
}
@@ -3155,6 +3195,11 @@ ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key,
return -EIO;
}
+ if (ice_is_adq_active(pf)) {
+ netdev_err(netdev, "Cannot change RSS params with ADQ configured.\n");
+ return -EOPNOTSUPP;
+ }
+
if (key) {
if (!vsi->rss_hkey_user) {
vsi->rss_hkey_user =
@@ -3255,7 +3300,7 @@ static u32 ice_get_combined_cnt(struct ice_vsi *vsi)
ice_for_each_q_vector(vsi, q_idx) {
struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
- if (q_vector->rx.ring && q_vector->tx.ring)
+ if (q_vector->rx.rx_ring && q_vector->tx.tx_ring)
combined++;
}
@@ -3365,6 +3410,11 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch)
if (ch->other_count != (test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1U : 0U))
return -EINVAL;
+ if (ice_is_adq_active(pf)) {
+ netdev_err(dev, "Cannot set channels with ADQ configured.\n");
+ return -EOPNOTSUPP;
+ }
+
if (test_bit(ICE_FLAG_FD_ENA, pf->flags) && pf->hw.fdir_active_fltr) {
netdev_err(dev, "Cannot set channels when Flow Director filters are active\n");
return -EOPNOTSUPP;
@@ -3466,15 +3516,9 @@ static int ice_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
return 0;
}
-enum ice_container_type {
- ICE_RX_CONTAINER,
- ICE_TX_CONTAINER,
-};
-
/**
* ice_get_rc_coalesce - get ITR values for specific ring container
* @ec: ethtool structure to fill with driver's coalesce settings
- * @c_type: container type, Rx or Tx
* @rc: ring container that the ITR values will come from
*
* Query the device for ice_ring_container specific ITR values. This is
@@ -3484,24 +3528,23 @@ enum ice_container_type {
* Returns 0 on success, negative otherwise.
*/
static int
-ice_get_rc_coalesce(struct ethtool_coalesce *ec, enum ice_container_type c_type,
- struct ice_ring_container *rc)
+ice_get_rc_coalesce(struct ethtool_coalesce *ec, struct ice_ring_container *rc)
{
- if (!rc->ring)
+ if (!rc->rx_ring)
return -EINVAL;
- switch (c_type) {
+ switch (rc->type) {
case ICE_RX_CONTAINER:
ec->use_adaptive_rx_coalesce = ITR_IS_DYNAMIC(rc);
ec->rx_coalesce_usecs = rc->itr_setting;
- ec->rx_coalesce_usecs_high = rc->ring->q_vector->intrl;
+ ec->rx_coalesce_usecs_high = rc->rx_ring->q_vector->intrl;
break;
case ICE_TX_CONTAINER:
ec->use_adaptive_tx_coalesce = ITR_IS_DYNAMIC(rc);
ec->tx_coalesce_usecs = rc->itr_setting;
break;
default:
- dev_dbg(ice_pf_to_dev(rc->ring->vsi->back), "Invalid c_type %d\n", c_type);
+ dev_dbg(ice_pf_to_dev(rc->rx_ring->vsi->back), "Invalid c_type %d\n", rc->type);
return -EINVAL;
}
@@ -3522,18 +3565,18 @@ static int
ice_get_q_coalesce(struct ice_vsi *vsi, struct ethtool_coalesce *ec, int q_num)
{
if (q_num < vsi->num_rxq && q_num < vsi->num_txq) {
- if (ice_get_rc_coalesce(ec, ICE_RX_CONTAINER,
+ if (ice_get_rc_coalesce(ec,
&vsi->rx_rings[q_num]->q_vector->rx))
return -EINVAL;
- if (ice_get_rc_coalesce(ec, ICE_TX_CONTAINER,
+ if (ice_get_rc_coalesce(ec,
&vsi->tx_rings[q_num]->q_vector->tx))
return -EINVAL;
} else if (q_num < vsi->num_rxq) {
- if (ice_get_rc_coalesce(ec, ICE_RX_CONTAINER,
+ if (ice_get_rc_coalesce(ec,
&vsi->rx_rings[q_num]->q_vector->rx))
return -EINVAL;
} else if (q_num < vsi->num_txq) {
- if (ice_get_rc_coalesce(ec, ICE_TX_CONTAINER,
+ if (ice_get_rc_coalesce(ec,
&vsi->tx_rings[q_num]->q_vector->tx))
return -EINVAL;
} else {
@@ -3585,7 +3628,6 @@ ice_get_per_q_coalesce(struct net_device *netdev, u32 q_num,
/**
* ice_set_rc_coalesce - set ITR values for specific ring container
- * @c_type: container type, Rx or Tx
* @ec: ethtool structure from user to update ITR settings
* @rc: ring container that the ITR values will come from
* @vsi: VSI associated to the ring container
@@ -3597,19 +3639,22 @@ ice_get_per_q_coalesce(struct net_device *netdev, u32 q_num,
* Returns 0 on success, negative otherwise.
*/
static int
-ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec,
+ice_set_rc_coalesce(struct ethtool_coalesce *ec,
struct ice_ring_container *rc, struct ice_vsi *vsi)
{
- const char *c_type_str = (c_type == ICE_RX_CONTAINER) ? "rx" : "tx";
+ const char *c_type_str = (rc->type == ICE_RX_CONTAINER) ? "rx" : "tx";
u32 use_adaptive_coalesce, coalesce_usecs;
struct ice_pf *pf = vsi->back;
u16 itr_setting;
- if (!rc->ring)
+ if (!rc->rx_ring)
return -EINVAL;
- switch (c_type) {
+ switch (rc->type) {
case ICE_RX_CONTAINER:
+ {
+ struct ice_q_vector *q_vector = rc->rx_ring->q_vector;
+
if (ec->rx_coalesce_usecs_high > ICE_MAX_INTRL ||
(ec->rx_coalesce_usecs_high &&
ec->rx_coalesce_usecs_high < pf->hw.intrl_gran)) {
@@ -3618,22 +3663,20 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec,
ICE_MAX_INTRL);
return -EINVAL;
}
- if (ec->rx_coalesce_usecs_high != rc->ring->q_vector->intrl &&
+ if (ec->rx_coalesce_usecs_high != q_vector->intrl &&
(ec->use_adaptive_rx_coalesce || ec->use_adaptive_tx_coalesce)) {
netdev_info(vsi->netdev, "Invalid value, %s-usecs-high cannot be changed if adaptive-tx or adaptive-rx is enabled\n",
c_type_str);
return -EINVAL;
}
- if (ec->rx_coalesce_usecs_high != rc->ring->q_vector->intrl) {
- rc->ring->q_vector->intrl = ec->rx_coalesce_usecs_high;
- ice_write_intrl(rc->ring->q_vector,
- ec->rx_coalesce_usecs_high);
- }
+ if (ec->rx_coalesce_usecs_high != q_vector->intrl)
+ q_vector->intrl = ec->rx_coalesce_usecs_high;
use_adaptive_coalesce = ec->use_adaptive_rx_coalesce;
coalesce_usecs = ec->rx_coalesce_usecs;
break;
+ }
case ICE_TX_CONTAINER:
use_adaptive_coalesce = ec->use_adaptive_tx_coalesce;
coalesce_usecs = ec->tx_coalesce_usecs;
@@ -3641,7 +3684,7 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec,
break;
default:
dev_dbg(ice_pf_to_dev(pf), "Invalid container type %d\n",
- c_type);
+ rc->type);
return -EINVAL;
}
@@ -3690,22 +3733,22 @@ static int
ice_set_q_coalesce(struct ice_vsi *vsi, struct ethtool_coalesce *ec, int q_num)
{
if (q_num < vsi->num_rxq && q_num < vsi->num_txq) {
- if (ice_set_rc_coalesce(ICE_RX_CONTAINER, ec,
+ if (ice_set_rc_coalesce(ec,
&vsi->rx_rings[q_num]->q_vector->rx,
vsi))
return -EINVAL;
- if (ice_set_rc_coalesce(ICE_TX_CONTAINER, ec,
+ if (ice_set_rc_coalesce(ec,
&vsi->tx_rings[q_num]->q_vector->tx,
vsi))
return -EINVAL;
} else if (q_num < vsi->num_rxq) {
- if (ice_set_rc_coalesce(ICE_RX_CONTAINER, ec,
+ if (ice_set_rc_coalesce(ec,
&vsi->rx_rings[q_num]->q_vector->rx,
vsi))
return -EINVAL;
} else if (q_num < vsi->num_txq) {
- if (ice_set_rc_coalesce(ICE_TX_CONTAINER, ec,
+ if (ice_set_rc_coalesce(ec,
&vsi->tx_rings[q_num]->q_vector->tx,
vsi))
return -EINVAL;
@@ -3778,6 +3821,8 @@ __ice_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec,
if (ice_set_q_coalesce(vsi, ec, v_idx))
return -EINVAL;
+
+ ice_set_q_vector_intrl(vsi->q_vectors[v_idx]);
}
goto set_complete;
}
@@ -3785,6 +3830,8 @@ __ice_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec,
if (ice_set_q_coalesce(vsi, ec, q_num))
return -EINVAL;
+ ice_set_q_vector_intrl(vsi->q_vectors[q_num]);
+
set_complete:
return 0;
}
@@ -3804,6 +3851,54 @@ ice_set_per_q_coalesce(struct net_device *netdev, u32 q_num,
return __ice_set_coalesce(netdev, ec, q_num);
}
+static void
+ice_repr_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct ice_repr *repr = ice_netdev_to_repr(netdev);
+
+ if (ice_check_vf_ready_for_cfg(repr->vf))
+ return;
+
+ __ice_get_drvinfo(netdev, drvinfo, repr->src_vsi);
+}
+
+static void
+ice_repr_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
+{
+ struct ice_repr *repr = ice_netdev_to_repr(netdev);
+
+ /* for port representors only ETH_SS_STATS is supported */
+ if (ice_check_vf_ready_for_cfg(repr->vf) ||
+ stringset != ETH_SS_STATS)
+ return;
+
+ __ice_get_strings(netdev, stringset, data, repr->src_vsi);
+}
+
+static void
+ice_repr_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats __always_unused *stats,
+ u64 *data)
+{
+ struct ice_repr *repr = ice_netdev_to_repr(netdev);
+
+ if (ice_check_vf_ready_for_cfg(repr->vf))
+ return;
+
+ __ice_get_ethtool_stats(netdev, stats, data, repr->src_vsi);
+}
+
+static int ice_repr_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ICE_VSI_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
#define ICE_I2C_EEPROM_DEV_ADDR 0xA0
#define ICE_I2C_EEPROM_DEV_ADDR2 0xA2
#define ICE_MODULE_TYPE_SFP 0x03
@@ -4055,6 +4150,23 @@ void ice_set_ethtool_safe_mode_ops(struct net_device *netdev)
netdev->ethtool_ops = &ice_ethtool_safe_mode_ops;
}
+static const struct ethtool_ops ice_ethtool_repr_ops = {
+ .get_drvinfo = ice_repr_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_strings = ice_repr_get_strings,
+ .get_ethtool_stats = ice_repr_get_ethtool_stats,
+ .get_sset_count = ice_repr_get_sset_count,
+};
+
+/**
+ * ice_set_ethtool_repr_ops - setup VF's port representor ethtool ops
+ * @netdev: network interface device structure
+ */
+void ice_set_ethtool_repr_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &ice_ethtool_repr_ops;
+}
+
/**
* ice_set_ethtool_ops - setup netdev ethtool ops
* @netdev: network interface device structure
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
index 16de603b280c..38960bcc384c 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c
@@ -706,7 +706,7 @@ ice_create_init_fdir_rule(struct ice_pf *pf, enum ice_fltr_ptype flow)
if (!seg)
return -ENOMEM;
- tun_seg = devm_kzalloc(dev, sizeof(*seg) * ICE_FD_HW_SEG_MAX,
+ tun_seg = devm_kcalloc(dev, sizeof(*seg), ICE_FD_HW_SEG_MAX,
GFP_KERNEL);
if (!tun_seg) {
devm_kfree(dev, seg);
@@ -1068,7 +1068,7 @@ ice_cfg_fdir_xtrct_seq(struct ice_pf *pf, struct ethtool_rx_flow_spec *fsp,
if (!seg)
return -ENOMEM;
- tun_seg = devm_kzalloc(dev, sizeof(*seg) * ICE_FD_HW_SEG_MAX,
+ tun_seg = devm_kcalloc(dev, sizeof(*seg), ICE_FD_HW_SEG_MAX,
GFP_KERNEL);
if (!tun_seg) {
devm_kfree(dev, seg);
diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.c b/drivers/net/ethernet/intel/ice/ice_fdir.c
index 59ef68f072c0..cbd8424631e3 100644
--- a/drivers/net/ethernet/intel/ice/ice_fdir.c
+++ b/drivers/net/ethernet/intel/ice/ice_fdir.c
@@ -952,7 +952,7 @@ ice_fdir_get_gen_prgm_pkt(struct ice_hw *hw, struct ice_fdir_fltr *input,
ice_pkt_insert_u8(loc, ICE_IPV4_TTL_OFFSET, input->ip.v4.ttl);
ice_pkt_insert_mac_addr(loc, input->ext_data.dst_mac);
if (frag)
- loc[20] = ICE_FDIR_IPV4_PKT_FLAG_DF;
+ loc[20] = ICE_FDIR_IPV4_PKT_FLAG_MF;
break;
case ICE_FLTR_PTYPE_NONF_IPV4_UDP:
ice_pkt_insert_u32(loc, ICE_IPV4_DST_ADDR_OFFSET,
diff --git a/drivers/net/ethernet/intel/ice/ice_fdir.h b/drivers/net/ethernet/intel/ice/ice_fdir.h
index d2d40e18ae8a..da4163856f4c 100644
--- a/drivers/net/ethernet/intel/ice/ice_fdir.h
+++ b/drivers/net/ethernet/intel/ice/ice_fdir.h
@@ -48,7 +48,7 @@
* requests that the packet not be fragmented. MF indicates that a packet has
* been fragmented.
*/
-#define ICE_FDIR_IPV4_PKT_FLAG_DF 0x20
+#define ICE_FDIR_IPV4_PKT_FLAG_MF 0x20
enum ice_fltr_prgm_desc_dest {
ICE_FLTR_PRGM_DESC_DEST_DROP_PKT,
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
index 06ac9badee77..23cfcceb1536 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
@@ -735,7 +735,7 @@ static void ice_release_global_cfg_lock(struct ice_hw *hw)
*
* This function will request ownership of the change lock.
*/
-static enum ice_status
+enum ice_status
ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access)
{
return ice_acquire_res(hw, ICE_CHANGE_LOCK_RES_ID, access,
@@ -748,7 +748,7 @@ ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access)
*
* This function will release the change lock using the proper Admin Command.
*/
-static void ice_release_change_lock(struct ice_hw *hw)
+void ice_release_change_lock(struct ice_hw *hw)
{
ice_release_res(hw, ICE_CHANGE_LOCK_RES_ID);
}
@@ -1330,6 +1330,86 @@ fw_ddp_compat_free_alloc:
}
/**
+ * ice_sw_fv_handler
+ * @sect_type: section type
+ * @section: pointer to section
+ * @index: index of the field vector entry to be returned
+ * @offset: ptr to variable that receives the offset in the field vector table
+ *
+ * This is a callback function that can be passed to ice_pkg_enum_entry.
+ * This function treats the given section as of type ice_sw_fv_section and
+ * enumerates offset field. "offset" is an index into the field vector table.
+ */
+static void *
+ice_sw_fv_handler(u32 sect_type, void *section, u32 index, u32 *offset)
+{
+ struct ice_sw_fv_section *fv_section = section;
+
+ if (!section || sect_type != ICE_SID_FLD_VEC_SW)
+ return NULL;
+ if (index >= le16_to_cpu(fv_section->count))
+ return NULL;
+ if (offset)
+ /* "index" passed in to this function is relative to a given
+ * 4k block. To get to the true index into the field vector
+ * table need to add the relative index to the base_offset
+ * field of this section
+ */
+ *offset = le16_to_cpu(fv_section->base_offset) + index;
+ return fv_section->fv + index;
+}
+
+/**
+ * ice_get_prof_index_max - get the max profile index for used profile
+ * @hw: pointer to the HW struct
+ *
+ * Calling this function will get the max profile index for used profile
+ * and store the index number in struct ice_switch_info *switch_info
+ * in HW for following use.
+ */
+static enum ice_status ice_get_prof_index_max(struct ice_hw *hw)
+{
+ u16 prof_index = 0, j, max_prof_index = 0;
+ struct ice_pkg_enum state;
+ struct ice_seg *ice_seg;
+ bool flag = false;
+ struct ice_fv *fv;
+ u32 offset;
+
+ memset(&state, 0, sizeof(state));
+
+ if (!hw->seg)
+ return ICE_ERR_PARAM;
+
+ ice_seg = hw->seg;
+
+ do {
+ fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
+ &offset, ice_sw_fv_handler);
+ if (!fv)
+ break;
+ ice_seg = NULL;
+
+ /* in the profile that not be used, the prot_id is set to 0xff
+ * and the off is set to 0x1ff for all the field vectors.
+ */
+ for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
+ if (fv->ew[j].prot_id != ICE_PROT_INVALID ||
+ fv->ew[j].off != ICE_FV_OFFSET_INVAL)
+ flag = true;
+ if (flag && prof_index > max_prof_index)
+ max_prof_index = prof_index;
+
+ prof_index++;
+ flag = false;
+ } while (fv);
+
+ hw->switch_info->max_used_prof_index = max_prof_index;
+
+ return 0;
+}
+
+/**
* ice_init_pkg - initialize/download package
* @hw: pointer to the hardware structure
* @buf: pointer to the package buffer
@@ -1408,6 +1488,7 @@ enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
*/
ice_init_pkg_regs(hw);
ice_fill_blk_tbls(hw);
+ ice_get_prof_index_max(hw);
} else {
ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n",
status);
@@ -1485,6 +1566,195 @@ static struct ice_buf_build *ice_pkg_buf_alloc(struct ice_hw *hw)
}
/**
+ * ice_get_sw_prof_type - determine switch profile type
+ * @hw: pointer to the HW structure
+ * @fv: pointer to the switch field vector
+ */
+static enum ice_prof_type
+ice_get_sw_prof_type(struct ice_hw *hw, struct ice_fv *fv)
+{
+ u16 i;
+
+ for (i = 0; i < hw->blk[ICE_BLK_SW].es.fvw; i++) {
+ /* UDP tunnel will have UDP_OF protocol ID and VNI offset */
+ if (fv->ew[i].prot_id == (u8)ICE_PROT_UDP_OF &&
+ fv->ew[i].off == ICE_VNI_OFFSET)
+ return ICE_PROF_TUN_UDP;
+
+ /* GRE tunnel will have GRE protocol */
+ if (fv->ew[i].prot_id == (u8)ICE_PROT_GRE_OF)
+ return ICE_PROF_TUN_GRE;
+ }
+
+ return ICE_PROF_NON_TUN;
+}
+
+/**
+ * ice_get_sw_fv_bitmap - Get switch field vector bitmap based on profile type
+ * @hw: pointer to hardware structure
+ * @req_profs: type of profiles requested
+ * @bm: pointer to memory for returning the bitmap of field vectors
+ */
+void
+ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type req_profs,
+ unsigned long *bm)
+{
+ struct ice_pkg_enum state;
+ struct ice_seg *ice_seg;
+ struct ice_fv *fv;
+
+ if (req_profs == ICE_PROF_ALL) {
+ bitmap_set(bm, 0, ICE_MAX_NUM_PROFILES);
+ return;
+ }
+
+ memset(&state, 0, sizeof(state));
+ bitmap_zero(bm, ICE_MAX_NUM_PROFILES);
+ ice_seg = hw->seg;
+ do {
+ enum ice_prof_type prof_type;
+ u32 offset;
+
+ fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
+ &offset, ice_sw_fv_handler);
+ ice_seg = NULL;
+
+ if (fv) {
+ /* Determine field vector type */
+ prof_type = ice_get_sw_prof_type(hw, fv);
+
+ if (req_profs & prof_type)
+ set_bit((u16)offset, bm);
+ }
+ } while (fv);
+}
+
+/**
+ * ice_get_sw_fv_list
+ * @hw: pointer to the HW structure
+ * @prot_ids: field vector to search for with a given protocol ID
+ * @ids_cnt: lookup/protocol count
+ * @bm: bitmap of field vectors to consider
+ * @fv_list: Head of a list
+ *
+ * Finds all the field vector entries from switch block that contain
+ * a given protocol ID and returns a list of structures of type
+ * "ice_sw_fv_list_entry". Every structure in the list has a field vector
+ * definition and profile ID information
+ * NOTE: The caller of the function is responsible for freeing the memory
+ * allocated for every list entry.
+ */
+enum ice_status
+ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
+ unsigned long *bm, struct list_head *fv_list)
+{
+ struct ice_sw_fv_list_entry *fvl;
+ struct ice_sw_fv_list_entry *tmp;
+ struct ice_pkg_enum state;
+ struct ice_seg *ice_seg;
+ struct ice_fv *fv;
+ u32 offset;
+
+ memset(&state, 0, sizeof(state));
+
+ if (!ids_cnt || !hw->seg)
+ return ICE_ERR_PARAM;
+
+ ice_seg = hw->seg;
+ do {
+ u16 i;
+
+ fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
+ &offset, ice_sw_fv_handler);
+ if (!fv)
+ break;
+ ice_seg = NULL;
+
+ /* If field vector is not in the bitmap list, then skip this
+ * profile.
+ */
+ if (!test_bit((u16)offset, bm))
+ continue;
+
+ for (i = 0; i < ids_cnt; i++) {
+ int j;
+
+ /* This code assumes that if a switch field vector line
+ * has a matching protocol, then this line will contain
+ * the entries necessary to represent every field in
+ * that protocol header.
+ */
+ for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
+ if (fv->ew[j].prot_id == prot_ids[i])
+ break;
+ if (j >= hw->blk[ICE_BLK_SW].es.fvw)
+ break;
+ if (i + 1 == ids_cnt) {
+ fvl = devm_kzalloc(ice_hw_to_dev(hw),
+ sizeof(*fvl), GFP_KERNEL);
+ if (!fvl)
+ goto err;
+ fvl->fv_ptr = fv;
+ fvl->profile_id = offset;
+ list_add(&fvl->list_entry, fv_list);
+ break;
+ }
+ }
+ } while (fv);
+ if (list_empty(fv_list))
+ return ICE_ERR_CFG;
+ return 0;
+
+err:
+ list_for_each_entry_safe(fvl, tmp, fv_list, list_entry) {
+ list_del(&fvl->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), fvl);
+ }
+
+ return ICE_ERR_NO_MEMORY;
+}
+
+/**
+ * ice_init_prof_result_bm - Initialize the profile result index bitmap
+ * @hw: pointer to hardware structure
+ */
+void ice_init_prof_result_bm(struct ice_hw *hw)
+{
+ struct ice_pkg_enum state;
+ struct ice_seg *ice_seg;
+ struct ice_fv *fv;
+
+ memset(&state, 0, sizeof(state));
+
+ if (!hw->seg)
+ return;
+
+ ice_seg = hw->seg;
+ do {
+ u32 off;
+ u16 i;
+
+ fv = ice_pkg_enum_entry(ice_seg, &state, ICE_SID_FLD_VEC_SW,
+ &off, ice_sw_fv_handler);
+ ice_seg = NULL;
+ if (!fv)
+ break;
+
+ bitmap_zero(hw->switch_info->prof_res_bm[off],
+ ICE_MAX_FV_WORDS);
+
+ /* Determine empty field vector indices, these can be
+ * used for recipe results. Skip index 0, since it is
+ * always used for Switch ID.
+ */
+ for (i = 1; i < ICE_MAX_FV_WORDS; i++)
+ if (fv->ew[i].prot_id == ICE_PROT_INVALID &&
+ fv->ew[i].off == ICE_FV_OFFSET_INVAL)
+ set_bit(i, hw->switch_info->prof_res_bm[off]);
+ } while (fv);
+}
+
+/**
* ice_pkg_buf_free
* @hw: pointer to the HW structure
* @bld: pointer to pkg build (allocated by ice_pkg_buf_alloc())
@@ -1668,7 +1938,7 @@ static u16 ice_tunnel_idx_to_entry(struct ice_hw *hw, enum ice_tunnel_type type,
for (i = 0; i < hw->tnl.count && i < ICE_TUNNEL_MAX_ENTRIES; i++)
if (hw->tnl.tbl[i].valid &&
hw->tnl.tbl[i].type == type &&
- idx--)
+ idx-- == 0)
return i;
WARN_ON_ONCE(1);
@@ -1828,7 +2098,7 @@ int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
u16 index;
tnl_type = ti->type == UDP_TUNNEL_TYPE_VXLAN ? TNL_VXLAN : TNL_GENEVE;
- index = ice_tunnel_idx_to_entry(&pf->hw, idx, tnl_type);
+ index = ice_tunnel_idx_to_entry(&pf->hw, tnl_type, idx);
status = ice_create_tunnel(&pf->hw, index, tnl_type, ntohs(ti->port));
if (status) {
@@ -1863,6 +2133,35 @@ int ice_udp_tunnel_unset_port(struct net_device *netdev, unsigned int table,
return 0;
}
+/**
+ * ice_find_prot_off - find prot ID and offset pair, based on prof and FV index
+ * @hw: pointer to the hardware structure
+ * @blk: hardware block
+ * @prof: profile ID
+ * @fv_idx: field vector word index
+ * @prot: variable to receive the protocol ID
+ * @off: variable to receive the protocol offset
+ */
+enum ice_status
+ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx,
+ u8 *prot, u16 *off)
+{
+ struct ice_fv_word *fv_ext;
+
+ if (prof >= hw->blk[blk].es.count)
+ return ICE_ERR_PARAM;
+
+ if (fv_idx >= hw->blk[blk].es.fvw)
+ return ICE_ERR_PARAM;
+
+ fv_ext = hw->blk[blk].es.t + (prof * hw->blk[blk].es.fvw);
+
+ *prot = fv_ext[fv_idx].prot_id;
+ *off = fv_ext[fv_idx].off;
+
+ return 0;
+}
+
/* PTG Management */
/**
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
index 8a58e79729b9..344c2637facd 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
@@ -18,6 +18,20 @@
#define ICE_PKG_CNT 4
+enum ice_status
+ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access);
+void ice_release_change_lock(struct ice_hw *hw);
+enum ice_status
+ice_find_prot_off(struct ice_hw *hw, enum ice_block blk, u8 prof, u16 fv_idx,
+ u8 *prot, u16 *off);
+void
+ice_get_sw_fv_bitmap(struct ice_hw *hw, enum ice_prof_type type,
+ unsigned long *bm);
+void
+ice_init_prof_result_bm(struct ice_hw *hw);
+enum ice_status
+ice_get_sw_fv_list(struct ice_hw *hw, u8 *prot_ids, u16 ids_cnt,
+ unsigned long *bm, struct list_head *fv_list);
bool
ice_get_open_tunnel_port(struct ice_hw *hw, u16 *port);
int ice_udp_tunnel_set_port(struct net_device *netdev, unsigned int table,
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_type.h b/drivers/net/ethernet/intel/ice/ice_flex_type.h
index 7d8b517a63c9..0f572a36d021 100644
--- a/drivers/net/ethernet/intel/ice/ice_flex_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_flex_type.h
@@ -13,6 +13,8 @@ struct ice_fv_word {
u8 resvrd;
} __packed;
+#define ICE_MAX_NUM_PROFILES 256
+
#define ICE_MAX_FV_WORDS 48
struct ice_fv {
struct ice_fv_word ew[ICE_MAX_FV_WORDS];
@@ -279,6 +281,12 @@ struct ice_sw_fv_section {
struct ice_fv fv[];
};
+struct ice_sw_fv_list_entry {
+ struct list_head list_entry;
+ u32 profile_id;
+ struct ice_fv *fv_ptr;
+};
+
/* The BOOST TCAM stores the match packet header in reverse order, meaning
* the fields are reversed; in addition, this means that the normally big endian
* fields of the packet are now little endian.
@@ -365,6 +373,7 @@ struct ice_pkg_enum {
enum ice_tunnel_type {
TNL_VXLAN = 0,
TNL_GENEVE,
+ TNL_GRETAP,
__TNL_TYPE_CNT,
TNL_LAST = 0xFF,
TNL_ALL = 0xFF,
@@ -603,4 +612,12 @@ struct ice_chs_chg {
};
#define ICE_FLOW_PTYPE_MAX ICE_XLT1_CNT
+
+enum ice_prof_type {
+ ICE_PROF_NON_TUN = 0x1,
+ ICE_PROF_TUN_UDP = 0x2,
+ ICE_PROF_TUN_GRE = 0x4,
+ ICE_PROF_TUN_ALL = 0x6,
+ ICE_PROF_ALL = 0xFF,
+};
#endif /* _ICE_FLEX_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_fltr.c b/drivers/net/ethernet/intel/ice/ice_fltr.c
index 2418d4fff037..c2e78eaf4ccb 100644
--- a/drivers/net/ethernet/intel/ice/ice_fltr.c
+++ b/drivers/net/ethernet/intel/ice/ice_fltr.c
@@ -395,3 +395,83 @@ enum ice_status ice_fltr_remove_eth(struct ice_vsi *vsi, u16 ethertype,
return ice_fltr_prepare_eth(vsi, ethertype, flag, action,
ice_fltr_remove_eth_list);
}
+
+/**
+ * ice_fltr_update_rule_flags - update lan_en/lb_en flags
+ * @hw: pointer to hw
+ * @rule_id: id of rule being updated
+ * @recipe_id: recipe id of rule
+ * @act: current action field
+ * @type: Rx or Tx
+ * @src: source VSI
+ * @new_flags: combinations of lb_en and lan_en
+ */
+static enum ice_status
+ice_fltr_update_rule_flags(struct ice_hw *hw, u16 rule_id, u16 recipe_id,
+ u32 act, u16 type, u16 src, u32 new_flags)
+{
+ struct ice_aqc_sw_rules_elem *s_rule;
+ enum ice_status err;
+ u32 flags_mask;
+
+ s_rule = kzalloc(ICE_SW_RULE_RX_TX_NO_HDR_SIZE, GFP_KERNEL);
+ if (!s_rule)
+ return ICE_ERR_NO_MEMORY;
+
+ flags_mask = ICE_SINGLE_ACT_LB_ENABLE | ICE_SINGLE_ACT_LAN_ENABLE;
+ act &= ~flags_mask;
+ act |= (flags_mask & new_flags);
+
+ s_rule->pdata.lkup_tx_rx.recipe_id = cpu_to_le16(recipe_id);
+ s_rule->pdata.lkup_tx_rx.index = cpu_to_le16(rule_id);
+ s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act);
+
+ if (type & ICE_FLTR_RX) {
+ s_rule->pdata.lkup_tx_rx.src =
+ cpu_to_le16(hw->port_info->lport);
+ s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX);
+
+ } else {
+ s_rule->pdata.lkup_tx_rx.src = cpu_to_le16(src);
+ s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
+ }
+
+ err = ice_aq_sw_rules(hw, s_rule, ICE_SW_RULE_RX_TX_NO_HDR_SIZE, 1,
+ ice_aqc_opc_update_sw_rules, NULL);
+
+ kfree(s_rule);
+ return err;
+}
+
+/**
+ * ice_fltr_build_action - build action for rule
+ * @vsi_id: id of VSI which is use to build action
+ */
+static u32 ice_fltr_build_action(u16 vsi_id)
+{
+ return ((vsi_id << ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M) |
+ ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
+}
+
+/**
+ * ice_fltr_update_flags_dflt_rule - update flags on default rule
+ * @vsi: pointer to VSI
+ * @rule_id: id of rule
+ * @direction: Tx or Rx
+ * @new_flags: flags to update
+ *
+ * Function updates flags on default rule with ICE_SW_LKUP_DFLT.
+ *
+ * Flags should be a combination of ICE_SINGLE_ACT_LB_ENABLE and
+ * ICE_SINGLE_ACT_LAN_ENABLE.
+ */
+enum ice_status
+ice_fltr_update_flags_dflt_rule(struct ice_vsi *vsi, u16 rule_id, u8 direction,
+ u32 new_flags)
+{
+ u32 action = ice_fltr_build_action(vsi->vsi_num);
+ struct ice_hw *hw = &vsi->back->hw;
+
+ return ice_fltr_update_rule_flags(hw, rule_id, ICE_SW_LKUP_DFLT, action,
+ direction, vsi->vsi_num, new_flags);
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_fltr.h b/drivers/net/ethernet/intel/ice/ice_fltr.h
index 361cb4da9b43..8eec4febead1 100644
--- a/drivers/net/ethernet/intel/ice/ice_fltr.h
+++ b/drivers/net/ethernet/intel/ice/ice_fltr.h
@@ -36,4 +36,7 @@ enum ice_status
ice_fltr_remove_eth(struct ice_vsi *vsi, u16 ethertype, u16 flag,
enum ice_sw_fwd_act_type action);
void ice_fltr_remove_all(struct ice_vsi *vsi);
+enum ice_status
+ice_fltr_update_flags_dflt_rule(struct ice_vsi *vsi, u16 rule_id, u8 direction,
+ u32 new_flags);
#endif
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index 76021d977b60..a49082485642 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -182,6 +182,7 @@
#define GLINT_DYN_CTL_INTERVAL_S 5
#define GLINT_DYN_CTL_INTERVAL_M ICE_M(0xFFF, 5)
#define GLINT_DYN_CTL_SW_ITR_INDX_ENA_M BIT(24)
+#define GLINT_DYN_CTL_SW_ITR_INDX_S 25
#define GLINT_DYN_CTL_SW_ITR_INDX_M ICE_M(0x3, 25)
#define GLINT_DYN_CTL_WB_ON_ITR_M BIT(30)
#define GLINT_DYN_CTL_INTENA_MSK_M BIT(31)
diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c
index 37c18c66b5c7..e375ac849aec 100644
--- a/drivers/net/ethernet/intel/ice/ice_lag.c
+++ b/drivers/net/ethernet/intel/ice/ice_lag.c
@@ -100,9 +100,9 @@ static void ice_display_lag_info(struct ice_lag *lag)
*/
static void ice_lag_info_event(struct ice_lag *lag, void *ptr)
{
- struct net_device *event_netdev, *netdev_tmp;
struct netdev_notifier_bonding_info *info;
struct netdev_bonding_info *bonding_info;
+ struct net_device *event_netdev;
const char *lag_netdev_name;
event_netdev = netdev_notifier_info_to_dev(ptr);
@@ -123,19 +123,6 @@ static void ice_lag_info_event(struct ice_lag *lag, void *ptr)
goto lag_out;
}
- rcu_read_lock();
- for_each_netdev_in_bond_rcu(lag->upper_netdev, netdev_tmp) {
- if (!netif_is_ice(netdev_tmp))
- continue;
-
- if (netdev_tmp && netdev_tmp != lag->netdev &&
- lag->peer_netdev != netdev_tmp) {
- dev_hold(netdev_tmp);
- lag->peer_netdev = netdev_tmp;
- }
- }
- rcu_read_unlock();
-
if (bonding_info->slave.state)
ice_lag_set_backup(lag);
else
@@ -319,6 +306,9 @@ ice_lag_event_handler(struct notifier_block *notif_blk, unsigned long event,
case NETDEV_BONDING_INFO:
ice_lag_info_event(lag, ptr);
break;
+ case NETDEV_UNREGISTER:
+ ice_lag_unlink(lag, ptr);
+ break;
default:
break;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
index 80736e0ec0dc..d981dc6f2323 100644
--- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
+++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h
@@ -301,6 +301,46 @@ struct ice_32b_rx_flex_desc_nic {
} flex_ts;
};
+/* Rx Flex Descriptor NIC Profile
+ * RxDID Profile ID 6
+ * Flex-field 0: RSS hash lower 16-bits
+ * Flex-field 1: RSS hash upper 16-bits
+ * Flex-field 2: Flow ID lower 16-bits
+ * Flex-field 3: Source VSI
+ * Flex-field 4: reserved, VLAN ID taken from L2Tag
+ */
+struct ice_32b_rx_flex_desc_nic_2 {
+ /* Qword 0 */
+ u8 rxdid;
+ u8 mir_id_umb_cast;
+ __le16 ptype_flexi_flags0;
+ __le16 pkt_len;
+ __le16 hdr_len_sph_flex_flags1;
+
+ /* Qword 1 */
+ __le16 status_error0;
+ __le16 l2tag1;
+ __le32 rss_hash;
+
+ /* Qword 2 */
+ __le16 status_error1;
+ u8 flexi_flags2;
+ u8 ts_low;
+ __le16 l2tag2_1st;
+ __le16 l2tag2_2nd;
+
+ /* Qword 3 */
+ __le16 flow_id;
+ __le16 src_vsi;
+ union {
+ struct {
+ __le16 rsvd;
+ __le16 flow_id_ipv6;
+ } flex;
+ __le32 ts_high;
+ } flex_ts;
+};
+
/* Receive Flex Descriptor profile IDs: There are a total
* of 64 profiles where profile IDs 0/1 are for legacy; and
* profiles 2-63 are flex profiles that can be programmed
@@ -529,6 +569,9 @@ struct ice_tx_ctx_desc {
#define ICE_TXD_CTX_QW1_MSS_S 50
+#define ICE_TXD_CTX_QW1_VSI_S 50
+#define ICE_TXD_CTX_QW1_VSI_M (0x3FFULL << ICE_TXD_CTX_QW1_VSI_S)
+
enum ice_tx_ctx_desc_cmd_bits {
ICE_TX_CTX_DESC_TSO = 0x01,
ICE_TX_CTX_DESC_TSYN = 0x02,
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index dde9802c6c72..40562600a8cf 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -22,8 +22,12 @@ const char *ice_vsi_type_str(enum ice_vsi_type vsi_type)
return "ICE_VSI_VF";
case ICE_VSI_CTRL:
return "ICE_VSI_CTRL";
+ case ICE_VSI_CHNL:
+ return "ICE_VSI_CHNL";
case ICE_VSI_LB:
return "ICE_VSI_LB";
+ case ICE_VSI_SWITCHDEV_CTRL:
+ return "ICE_VSI_SWITCHDEV_CTRL";
default:
return "unknown";
}
@@ -44,12 +48,12 @@ static int ice_vsi_ctrl_all_rx_rings(struct ice_vsi *vsi, bool ena)
int ret = 0;
u16 i;
- for (i = 0; i < vsi->num_rxq; i++)
+ ice_for_each_rxq(vsi, i)
ice_vsi_ctrl_one_rx_ring(vsi, ena, i, false);
ice_flush(&vsi->back->hw);
- for (i = 0; i < vsi->num_rxq; i++) {
+ ice_for_each_rxq(vsi, i) {
ret = ice_vsi_wait_one_rx_ring(vsi, ena, i);
if (ret)
break;
@@ -71,6 +75,8 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
struct device *dev;
dev = ice_pf_to_dev(pf);
+ if (vsi->type == ICE_VSI_CHNL)
+ return 0;
/* allocate memory for both Tx and Rx ring pointers */
vsi->tx_rings = devm_kcalloc(dev, vsi->alloc_txq,
@@ -132,6 +138,7 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
{
switch (vsi->type) {
case ICE_VSI_PF:
+ case ICE_VSI_SWITCHDEV_CTRL:
case ICE_VSI_CTRL:
case ICE_VSI_LB:
/* a user could change the values of num_[tr]x_desc using
@@ -200,6 +207,14 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
max_t(int, vsi->alloc_rxq,
vsi->alloc_txq));
break;
+ case ICE_VSI_SWITCHDEV_CTRL:
+ /* The number of queues for ctrl VSI is equal to number of VFs.
+ * Each ring is associated to the corresponding VF_PR netdev.
+ */
+ vsi->alloc_txq = pf->num_alloc_vfs;
+ vsi->alloc_rxq = pf->num_alloc_vfs;
+ vsi->num_q_vectors = 1;
+ break;
case ICE_VSI_VF:
vf = &pf->vf[vsi->vf_id];
if (vf->num_req_qs)
@@ -218,6 +233,10 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
vsi->alloc_rxq = 1;
vsi->num_q_vectors = 1;
break;
+ case ICE_VSI_CHNL:
+ vsi->alloc_txq = 0;
+ vsi->alloc_rxq = 0;
+ break;
case ICE_VSI_LB:
vsi->alloc_txq = 1;
vsi->alloc_rxq = 1;
@@ -263,7 +282,7 @@ static int ice_get_free_slot(void *array, int size, int curr)
* ice_vsi_delete - delete a VSI from the switch
* @vsi: pointer to VSI being removed
*/
-static void ice_vsi_delete(struct ice_vsi *vsi)
+void ice_vsi_delete(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
struct ice_vsi_ctx *ctxt;
@@ -334,7 +353,7 @@ static void ice_vsi_free_arrays(struct ice_vsi *vsi)
*
* Returns 0 on success, negative on failure
*/
-static int ice_vsi_clear(struct ice_vsi *vsi)
+int ice_vsi_clear(struct ice_vsi *vsi)
{
struct ice_pf *pf = NULL;
struct device *dev;
@@ -379,12 +398,12 @@ static irqreturn_t ice_msix_clean_ctrl_vsi(int __always_unused irq, void *data)
{
struct ice_q_vector *q_vector = (struct ice_q_vector *)data;
- if (!q_vector->tx.ring)
+ if (!q_vector->tx.tx_ring)
return IRQ_HANDLED;
#define FDIR_RX_DESC_CLEAN_BUDGET 64
- ice_clean_rx_irq(q_vector->rx.ring, FDIR_RX_DESC_CLEAN_BUDGET);
- ice_clean_ctrl_tx_irq(q_vector->tx.ring);
+ ice_clean_rx_irq(q_vector->rx.rx_ring, FDIR_RX_DESC_CLEAN_BUDGET);
+ ice_clean_ctrl_tx_irq(q_vector->tx.tx_ring);
return IRQ_HANDLED;
}
@@ -398,7 +417,7 @@ static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data)
{
struct ice_q_vector *q_vector = (struct ice_q_vector *)data;
- if (!q_vector->tx.ring && !q_vector->rx.ring)
+ if (!q_vector->tx.tx_ring && !q_vector->rx.rx_ring)
return IRQ_HANDLED;
q_vector->total_events++;
@@ -408,16 +427,33 @@ static irqreturn_t ice_msix_clean_rings(int __always_unused irq, void *data)
return IRQ_HANDLED;
}
+static irqreturn_t ice_eswitch_msix_clean_rings(int __always_unused irq, void *data)
+{
+ struct ice_q_vector *q_vector = (struct ice_q_vector *)data;
+ struct ice_pf *pf = q_vector->vsi->back;
+ int i;
+
+ if (!q_vector->tx.tx_ring && !q_vector->rx.rx_ring)
+ return IRQ_HANDLED;
+
+ ice_for_each_vf(pf, i)
+ napi_schedule(&pf->vf[i].repr->q_vector->napi);
+
+ return IRQ_HANDLED;
+}
+
/**
* ice_vsi_alloc - Allocates the next available struct VSI in the PF
* @pf: board private structure
* @vsi_type: type of VSI
+ * @ch: ptr to channel
* @vf_id: ID of the VF being configured
*
* returns a pointer to a VSI on success, NULL on failure.
*/
static struct ice_vsi *
-ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type, u16 vf_id)
+ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type,
+ struct ice_channel *ch, u16 vf_id)
{
struct device *dev = ice_pf_to_dev(pf);
struct ice_vsi *vsi = NULL;
@@ -444,10 +480,17 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type, u16 vf_id)
if (vsi_type == ICE_VSI_VF)
ice_vsi_set_num_qs(vsi, vf_id);
- else
+ else if (vsi_type != ICE_VSI_CHNL)
ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID);
switch (vsi->type) {
+ case ICE_VSI_SWITCHDEV_CTRL:
+ if (ice_vsi_alloc_arrays(vsi))
+ goto err_rings;
+
+ /* Setup eswitch MSIX irq handler for VSI */
+ vsi->irq_handler = ice_eswitch_msix_clean_rings;
+ break;
case ICE_VSI_PF:
if (ice_vsi_alloc_arrays(vsi))
goto err_rings;
@@ -466,6 +509,13 @@ ice_vsi_alloc(struct ice_pf *pf, enum ice_vsi_type vsi_type, u16 vf_id)
if (ice_vsi_alloc_arrays(vsi))
goto err_rings;
break;
+ case ICE_VSI_CHNL:
+ if (!ch)
+ goto err_rings;
+ vsi->num_rxq = ch->num_rxq;
+ vsi->num_txq = ch->num_txq;
+ vsi->next_base_q = ch->base_q;
+ break;
case ICE_VSI_LB:
if (ice_vsi_alloc_arrays(vsi))
goto err_rings;
@@ -582,6 +632,9 @@ static int ice_vsi_get_qs(struct ice_vsi *vsi)
};
int ret;
+ if (vsi->type == ICE_VSI_CHNL)
+ return 0;
+
ret = __ice_vsi_get_qs(&tx_qs_cfg);
if (ret)
return ret;
@@ -606,12 +659,12 @@ static void ice_vsi_put_qs(struct ice_vsi *vsi)
mutex_lock(&pf->avail_q_mutex);
- for (i = 0; i < vsi->alloc_txq; i++) {
+ ice_for_each_alloc_txq(vsi, i) {
clear_bit(vsi->txq_map[i], pf->avail_txqs);
vsi->txq_map[i] = ICE_INVAL_Q_INDEX;
}
- for (i = 0; i < vsi->alloc_rxq; i++) {
+ ice_for_each_alloc_rxq(vsi, i) {
clear_bit(vsi->rxq_map[i], pf->avail_rxqs);
vsi->rxq_map[i] = ICE_INVAL_Q_INDEX;
}
@@ -700,12 +753,23 @@ static void ice_vsi_set_rss_params(struct ice_vsi *vsi)
cap = &pf->hw.func_caps.common_cap;
switch (vsi->type) {
+ case ICE_VSI_CHNL:
case ICE_VSI_PF:
/* PF VSI will inherit RSS instance of PF */
vsi->rss_table_size = (u16)cap->rss_table_size;
+ if (vsi->type == ICE_VSI_CHNL)
+ vsi->rss_size = min_t(u16, vsi->num_rxq,
+ BIT(cap->rss_table_entry_width));
+ else
+ vsi->rss_size = min_t(u16, num_online_cpus(),
+ BIT(cap->rss_table_entry_width));
+ vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
+ break;
+ case ICE_VSI_SWITCHDEV_CTRL:
+ vsi->rss_table_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
vsi->rss_size = min_t(u16, num_online_cpus(),
BIT(cap->rss_table_entry_width));
- vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF;
+ vsi->rss_lut_type = ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI;
break;
case ICE_VSI_VF:
/* VF VSI will get a small RSS table.
@@ -775,21 +839,13 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
u16 num_txq_per_tc, num_rxq_per_tc;
u16 qcount_tx = vsi->alloc_txq;
u16 qcount_rx = vsi->alloc_rxq;
- bool ena_tc0 = false;
u8 netdev_tc = 0;
int i;
- /* at least TC0 should be enabled by default */
- if (vsi->tc_cfg.numtc) {
- if (!(vsi->tc_cfg.ena_tc & BIT(0)))
- ena_tc0 = true;
- } else {
- ena_tc0 = true;
- }
-
- if (ena_tc0) {
- vsi->tc_cfg.numtc++;
- vsi->tc_cfg.ena_tc |= 1;
+ if (!vsi->tc_cfg.numtc) {
+ /* at least TC0 should be enabled by default */
+ vsi->tc_cfg.numtc = 1;
+ vsi->tc_cfg.ena_tc = 1;
}
num_rxq_per_tc = min_t(u16, qcount_rx / vsi->tc_cfg.numtc, ICE_MAX_RXQS_PER_TC);
@@ -931,6 +987,7 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
dev = ice_pf_to_dev(pf);
switch (vsi->type) {
+ case ICE_VSI_CHNL:
case ICE_VSI_PF:
/* PF VSI will inherit RSS instance of PF */
lut_type = ICE_AQ_VSI_Q_OPT_RSS_LUT_PF;
@@ -953,6 +1010,28 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
ICE_AQ_VSI_Q_OPT_RSS_HASH_M);
}
+static void
+ice_chnl_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
+{
+ struct ice_pf *pf = vsi->back;
+ u16 qcount, qmap;
+ u8 offset = 0;
+ int pow;
+
+ qcount = min_t(int, vsi->num_rxq, pf->num_lan_msix);
+
+ pow = order_base_2(qcount);
+ qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) &
+ ICE_AQ_VSI_TC_Q_OFFSET_M) |
+ ((pow << ICE_AQ_VSI_TC_Q_NUM_S) &
+ ICE_AQ_VSI_TC_Q_NUM_M);
+
+ ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
+ ctxt->info.mapping_flags |= cpu_to_le16(ICE_AQ_VSI_Q_MAP_CONTIG);
+ ctxt->info.q_mapping[0] = cpu_to_le16(vsi->next_base_q);
+ ctxt->info.q_mapping[1] = cpu_to_le16(qcount);
+}
+
/**
* ice_vsi_init - Create and initialize a VSI
* @vsi: the VSI being configured
@@ -980,6 +1059,10 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)
case ICE_VSI_PF:
ctxt->flags = ICE_AQ_VSI_TYPE_PF;
break;
+ case ICE_VSI_SWITCHDEV_CTRL:
+ case ICE_VSI_CHNL:
+ ctxt->flags = ICE_AQ_VSI_TYPE_VMDQ2;
+ break;
case ICE_VSI_VF:
ctxt->flags = ICE_AQ_VSI_TYPE_VF;
/* VF number here is the absolute VF number (0-255) */
@@ -990,6 +1073,21 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)
goto out;
}
+ /* Handle VLAN pruning for channel VSI if main VSI has VLAN
+ * prune enabled
+ */
+ if (vsi->type == ICE_VSI_CHNL) {
+ struct ice_vsi *main_vsi;
+
+ main_vsi = ice_get_main_vsi(pf);
+ if (main_vsi && ice_vsi_is_vlan_pruning_ena(main_vsi))
+ ctxt->info.sw_flags2 |=
+ ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
+ else
+ ctxt->info.sw_flags2 &=
+ ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
+ }
+
ice_set_dflt_vsi_ctx(ctxt);
if (test_bit(ICE_FLAG_FD_ENA, pf->flags))
ice_set_fd_vsi_ctx(ctxt, vsi);
@@ -1010,13 +1108,17 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)
}
ctxt->info.sw_id = vsi->port_info->sw_id;
- ice_vsi_setup_q_map(vsi, ctxt);
- if (!init_vsi) /* means VSI being updated */
- /* must to indicate which section of VSI context are
- * being modified
- */
- ctxt->info.valid_sections |=
- cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
+ if (vsi->type == ICE_VSI_CHNL) {
+ ice_chnl_vsi_setup_q_map(vsi, ctxt);
+ } else {
+ ice_vsi_setup_q_map(vsi, ctxt);
+ if (!init_vsi) /* means VSI being updated */
+ /* must to indicate which section of VSI context are
+ * being modified
+ */
+ ctxt->info.valid_sections |=
+ cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
+ }
/* enable/disable MAC and VLAN anti-spoof when spoofchk is on/off
* respectively
@@ -1195,6 +1297,8 @@ static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
/* SRIOV doesn't grab irq_tracker entries for each VSI */
if (vsi->type == ICE_VSI_VF)
return 0;
+ if (vsi->type == ICE_VSI_CHNL)
+ return 0;
if (vsi->base_vector) {
dev_dbg(dev, "VSI %d has non-zero base vector %d\n",
@@ -1249,14 +1353,14 @@ static void ice_vsi_clear_rings(struct ice_vsi *vsi)
struct ice_q_vector *q_vector = vsi->q_vectors[i];
if (q_vector) {
- q_vector->tx.ring = NULL;
- q_vector->rx.ring = NULL;
+ q_vector->tx.tx_ring = NULL;
+ q_vector->rx.rx_ring = NULL;
}
}
}
if (vsi->tx_rings) {
- for (i = 0; i < vsi->alloc_txq; i++) {
+ ice_for_each_alloc_txq(vsi, i) {
if (vsi->tx_rings[i]) {
kfree_rcu(vsi->tx_rings[i], rcu);
WRITE_ONCE(vsi->tx_rings[i], NULL);
@@ -1264,7 +1368,7 @@ static void ice_vsi_clear_rings(struct ice_vsi *vsi)
}
}
if (vsi->rx_rings) {
- for (i = 0; i < vsi->alloc_rxq; i++) {
+ ice_for_each_alloc_rxq(vsi, i) {
if (vsi->rx_rings[i]) {
kfree_rcu(vsi->rx_rings[i], rcu);
WRITE_ONCE(vsi->rx_rings[i], NULL);
@@ -1285,8 +1389,8 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
dev = ice_pf_to_dev(pf);
/* Allocate Tx rings */
- for (i = 0; i < vsi->alloc_txq; i++) {
- struct ice_ring *ring;
+ ice_for_each_alloc_txq(vsi, i) {
+ struct ice_tx_ring *ring;
/* allocate with kzalloc(), free with kfree_rcu() */
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
@@ -1296,7 +1400,6 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
ring->q_index = i;
ring->reg_idx = vsi->txq_map[i];
- ring->ring_active = false;
ring->vsi = vsi;
ring->tx_tstamps = &pf->ptp.port.tx;
ring->dev = dev;
@@ -1305,8 +1408,8 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
}
/* Allocate Rx rings */
- for (i = 0; i < vsi->alloc_rxq; i++) {
- struct ice_ring *ring;
+ ice_for_each_alloc_rxq(vsi, i) {
+ struct ice_rx_ring *ring;
/* allocate with kzalloc(), free with kfree_rcu() */
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
@@ -1315,7 +1418,6 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
ring->q_index = i;
ring->reg_idx = vsi->rxq_map[i];
- ring->ring_active = false;
ring->vsi = vsi;
ring->netdev = vsi->netdev;
ring->dev = dev;
@@ -1363,7 +1465,7 @@ void ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena)
* ice_vsi_cfg_rss_lut_key - Configure RSS params for a VSI
* @vsi: VSI to be configured
*/
-static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi)
+int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi)
{
struct ice_pf *pf = vsi->back;
struct device *dev;
@@ -1371,7 +1473,25 @@ static int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi)
int err;
dev = ice_pf_to_dev(pf);
- vsi->rss_size = min_t(u16, vsi->rss_size, vsi->num_rxq);
+ if (vsi->type == ICE_VSI_PF && vsi->ch_rss_size &&
+ (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))) {
+ vsi->rss_size = min_t(u16, vsi->rss_size, vsi->ch_rss_size);
+ } else {
+ vsi->rss_size = min_t(u16, vsi->rss_size, vsi->num_rxq);
+
+ /* If orig_rss_size is valid and it is less than determined
+ * main VSI's rss_size, update main VSI's rss_size to be
+ * orig_rss_size so that when tc-qdisc is deleted, main VSI
+ * RSS table gets programmed to be correct (whatever it was
+ * to begin with (prior to setup-tc for ADQ config)
+ */
+ if (vsi->orig_rss_size && vsi->rss_size < vsi->orig_rss_size &&
+ vsi->orig_rss_size <= vsi->num_rxq) {
+ vsi->rss_size = vsi->orig_rss_size;
+ /* now orig_rss_size is used, reset it to zero */
+ vsi->orig_rss_size = 0;
+ }
+ }
lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
if (!lut)
@@ -1710,7 +1830,7 @@ int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx)
return ice_vsi_cfg_rxq(vsi->rx_rings[q_idx]);
}
-int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_ring **tx_rings, u16 q_idx)
+int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings, u16 q_idx)
{
struct ice_aqc_add_tx_qgrp *qg_buf;
int err;
@@ -1766,7 +1886,7 @@ setup_rings:
* Configure the Tx VSI for operation.
*/
static int
-ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings, u16 count)
+ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_tx_ring **rings, u16 count)
{
struct ice_aqc_add_tx_qgrp *qg_buf;
u16 q_idx = 0;
@@ -1817,8 +1937,8 @@ int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi)
if (ret)
return ret;
- for (i = 0; i < vsi->num_xdp_txq; i++)
- vsi->xdp_rings[i]->xsk_pool = ice_xsk_pool(vsi->xdp_rings[i]);
+ ice_for_each_xdp_txq(vsi, i)
+ vsi->xdp_rings[i]->xsk_pool = ice_tx_xsk_pool(vsi->xdp_rings[i]);
return ret;
}
@@ -1853,6 +1973,24 @@ void ice_write_intrl(struct ice_q_vector *q_vector, u8 intrl)
ice_intrl_usec_to_reg(intrl, ICE_INTRL_GRAN_ABOVE_25));
}
+static struct ice_q_vector *ice_pull_qvec_from_rc(struct ice_ring_container *rc)
+{
+ switch (rc->type) {
+ case ICE_RX_CONTAINER:
+ if (rc->rx_ring)
+ return rc->rx_ring->q_vector;
+ break;
+ case ICE_TX_CONTAINER:
+ if (rc->tx_ring)
+ return rc->tx_ring->q_vector;
+ break;
+ default:
+ break;
+ }
+
+ return NULL;
+}
+
/**
* __ice_write_itr - write throttle rate to register
* @q_vector: pointer to interrupt data structure
@@ -1877,15 +2015,39 @@ void ice_write_itr(struct ice_ring_container *rc, u16 itr)
{
struct ice_q_vector *q_vector;
- if (!rc->ring)
+ q_vector = ice_pull_qvec_from_rc(rc);
+ if (!q_vector)
return;
- q_vector = rc->ring->q_vector;
-
__ice_write_itr(q_vector, rc, itr);
}
/**
+ * ice_set_q_vector_intrl - set up interrupt rate limiting
+ * @q_vector: the vector to be configured
+ *
+ * Interrupt rate limiting is local to the vector, not per-queue so we must
+ * detect if either ring container has dynamic moderation enabled to decide
+ * what to set the interrupt rate limit to via INTRL settings. In the case that
+ * dynamic moderation is disabled on both, write the value with the cached
+ * setting to make sure INTRL register matches the user visible value.
+ */
+void ice_set_q_vector_intrl(struct ice_q_vector *q_vector)
+{
+ if (ITR_IS_DYNAMIC(&q_vector->tx) || ITR_IS_DYNAMIC(&q_vector->rx)) {
+ /* in the case of dynamic enabled, cap each vector to no more
+ * than (4 us) 250,000 ints/sec, which allows low latency
+ * but still less than 500,000 interrupts per second, which
+ * reduces CPU a bit in the case of the lowest latency
+ * setting. The 4 here is a value in microseconds.
+ */
+ ice_write_intrl(q_vector, 4);
+ } else {
+ ice_write_intrl(q_vector, q_vector->intrl);
+ }
+}
+
+/**
* ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW
* @vsi: the VSI being configured
*
@@ -1899,7 +2061,7 @@ void ice_vsi_cfg_msix(struct ice_vsi *vsi)
u16 txq = 0, rxq = 0;
int i, q;
- for (i = 0; i < vsi->num_q_vectors; i++) {
+ ice_for_each_q_vector(vsi, i) {
struct ice_q_vector *q_vector = vsi->q_vectors[i];
u16 reg_idx = q_vector->reg_idx;
@@ -2057,7 +2219,7 @@ int ice_vsi_stop_all_rx_rings(struct ice_vsi *vsi)
*/
static int
ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
- u16 rel_vmvf_num, struct ice_ring **rings, u16 count)
+ u16 rel_vmvf_num, struct ice_tx_ring **rings, u16 count)
{
u16 q_idx;
@@ -2122,11 +2284,10 @@ bool ice_vsi_is_vlan_pruning_ena(struct ice_vsi *vsi)
* ice_cfg_vlan_pruning - enable or disable VLAN pruning on the VSI
* @vsi: VSI to enable or disable VLAN pruning on
* @ena: set to true to enable VLAN pruning and false to disable it
- * @vlan_promisc: enable valid security flags if not in VLAN promiscuous mode
*
* returns 0 if VSI is updated, negative otherwise
*/
-int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc)
+int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena)
{
struct ice_vsi_ctx *ctxt;
struct ice_pf *pf;
@@ -2154,9 +2315,7 @@ int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc)
else
ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
- if (!vlan_promisc)
- ctxt->info.valid_sections =
- cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
+ ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
status = ice_update_vsi(&pf->hw, vsi->idx, ctxt, NULL);
if (status) {
@@ -2179,10 +2338,14 @@ err_out:
static void ice_vsi_set_tc_cfg(struct ice_vsi *vsi)
{
- struct ice_dcbx_cfg *cfg = &vsi->port_info->qos_cfg.local_dcbx_cfg;
+ if (!test_bit(ICE_FLAG_DCB_ENA, vsi->back->flags)) {
+ vsi->tc_cfg.ena_tc = ICE_DFLT_TRAFFIC_CLASS;
+ vsi->tc_cfg.numtc = 1;
+ return;
+ }
- vsi->tc_cfg.ena_tc = ice_dcb_get_ena_tc(cfg);
- vsi->tc_cfg.numtc = ice_dcb_get_num_tc(cfg);
+ /* set VSI TC information based on DCB config */
+ ice_vsi_set_dcb_tc_cfg(vsi);
}
/**
@@ -2295,8 +2458,10 @@ static void ice_set_agg_vsi(struct ice_vsi *vsi)
switch (vsi->type) {
case ICE_VSI_CTRL:
+ case ICE_VSI_CHNL:
case ICE_VSI_LB:
case ICE_VSI_PF:
+ case ICE_VSI_SWITCHDEV_CTRL:
max_agg_nodes = ICE_MAX_PF_AGG_NODES;
agg_node_id_start = ICE_PF_AGG_NODE_ID_START;
agg_node_iter = &pf->pf_agg_node[0];
@@ -2393,6 +2558,7 @@ static void ice_set_agg_vsi(struct ice_vsi *vsi)
* @vf_id: defines VF ID to which this VSI connects. This field is meant to be
* used only for ICE_VSI_VF VSI type. For other VSI types, should
* fill-in ICE_INVAL_VFID as input.
+ * @ch: ptr to channel
*
* This allocates the sw VSI structure and its queue resources.
*
@@ -2401,7 +2567,7 @@ static void ice_set_agg_vsi(struct ice_vsi *vsi)
*/
struct ice_vsi *
ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
- enum ice_vsi_type vsi_type, u16 vf_id)
+ enum ice_vsi_type vsi_type, u16 vf_id, struct ice_channel *ch)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
struct device *dev = ice_pf_to_dev(pf);
@@ -2409,10 +2575,12 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
struct ice_vsi *vsi;
int ret, i;
- if (vsi_type == ICE_VSI_VF || vsi_type == ICE_VSI_CTRL)
- vsi = ice_vsi_alloc(pf, vsi_type, vf_id);
+ if (vsi_type == ICE_VSI_CHNL)
+ vsi = ice_vsi_alloc(pf, vsi_type, ch, ICE_INVAL_VFID);
+ else if (vsi_type == ICE_VSI_VF || vsi_type == ICE_VSI_CTRL)
+ vsi = ice_vsi_alloc(pf, vsi_type, NULL, vf_id);
else
- vsi = ice_vsi_alloc(pf, vsi_type, ICE_INVAL_VFID);
+ vsi = ice_vsi_alloc(pf, vsi_type, NULL, ICE_INVAL_VFID);
if (!vsi) {
dev_err(dev, "could not allocate VSI\n");
@@ -2429,10 +2597,12 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
ice_alloc_fd_res(vsi);
- if (ice_vsi_get_qs(vsi)) {
- dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n",
- vsi->idx);
- goto unroll_vsi_alloc;
+ if (vsi_type != ICE_VSI_CHNL) {
+ if (ice_vsi_get_qs(vsi)) {
+ dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n",
+ vsi->idx);
+ goto unroll_vsi_alloc;
+ }
}
/* set RSS capabilities */
@@ -2448,6 +2618,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
switch (vsi->type) {
case ICE_VSI_CTRL:
+ case ICE_VSI_SWITCHDEV_CTRL:
case ICE_VSI_PF:
ret = ice_vsi_alloc_q_vectors(vsi);
if (ret)
@@ -2490,6 +2661,12 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
}
ice_init_arfs(vsi);
break;
+ case ICE_VSI_CHNL:
+ if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
+ ice_vsi_cfg_rss_lut_key(vsi);
+ ice_vsi_set_rss_flow_fld(vsi);
+ }
+ break;
case ICE_VSI_VF:
/* VF driver will take care of creating netdev for this type and
* map queues to vectors through Virtchnl, PF driver only
@@ -2528,9 +2705,21 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
}
/* configure VSI nodes based on number of queues and TC's */
- for (i = 0; i < vsi->tc_cfg.numtc; i++)
- max_txqs[i] = vsi->alloc_txq;
+ ice_for_each_traffic_class(i) {
+ if (!(vsi->tc_cfg.ena_tc & BIT(i)))
+ continue;
+
+ if (vsi->type == ICE_VSI_CHNL) {
+ if (!vsi->alloc_txq && vsi->num_txq)
+ max_txqs[i] = vsi->num_txq;
+ else
+ max_txqs[i] = pf->num_lan_tx;
+ } else {
+ max_txqs[i] = vsi->alloc_txq;
+ }
+ }
+ dev_dbg(dev, "vsi->tc_cfg.ena_tc = %d\n", vsi->tc_cfg.ena_tc);
status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
max_txqs);
if (status) {
@@ -2591,7 +2780,7 @@ static void ice_vsi_release_msix(struct ice_vsi *vsi)
u32 rxq = 0;
int i, q;
- for (i = 0; i < vsi->num_q_vectors; i++) {
+ ice_for_each_q_vector(vsi, i) {
struct ice_q_vector *q_vector = vsi->q_vectors[i];
ice_write_intrl(q_vector, 0);
@@ -2757,7 +2946,8 @@ void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
} else {
ice_vsi_close(vsi);
}
- } else if (vsi->type == ICE_VSI_CTRL) {
+ } else if (vsi->type == ICE_VSI_CTRL ||
+ vsi->type == ICE_VSI_SWITCHDEV_CTRL) {
ice_vsi_close(vsi);
}
}
@@ -2841,6 +3031,7 @@ void ice_napi_del(struct ice_vsi *vsi)
*/
int ice_vsi_release(struct ice_vsi *vsi)
{
+ enum ice_status err;
struct ice_pf *pf;
if (!vsi->back)
@@ -2859,7 +3050,8 @@ int ice_vsi_release(struct ice_vsi *vsi)
clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
}
- ice_devlink_destroy_port(vsi);
+ if (vsi->type == ICE_VSI_PF)
+ ice_devlink_destroy_pf_port(pf);
if (test_bit(ICE_FLAG_RSS_ENA, pf->flags))
ice_rss_clean(vsi);
@@ -2912,6 +3104,10 @@ int ice_vsi_release(struct ice_vsi *vsi)
ice_fltr_remove_all(vsi);
ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
+ err = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx);
+ if (err)
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to remove RDMA scheduler config for VSI %u, err %d\n",
+ vsi->vsi_num, err);
ice_vsi_delete(vsi);
ice_vsi_free_q_vectors(vsi);
@@ -3036,7 +3232,7 @@ ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi,
}
vsi->q_vectors[i]->intrl = coalesce[i].intrl;
- ice_write_intrl(vsi->q_vectors[i], coalesce[i].intrl);
+ ice_set_q_vector_intrl(vsi->q_vectors[i]);
}
/* the number of queue vectors increased so write whatever is in
@@ -3054,7 +3250,7 @@ ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi,
ice_write_itr(rc, rc->itr_setting);
vsi->q_vectors[i]->intrl = coalesce[0].intrl;
- ice_write_intrl(vsi->q_vectors[i], coalesce[0].intrl);
+ ice_set_q_vector_intrl(vsi->q_vectors[i]);
}
}
@@ -3092,6 +3288,10 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi, coalesce);
ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
+ ret = ice_rm_vsi_rdma_cfg(vsi->port_info, vsi->idx);
+ if (ret)
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to remove RDMA scheduler config for VSI %u, err %d\n",
+ vsi->vsi_num, ret);
ice_vsi_free_q_vectors(vsi);
/* SR-IOV determines needed MSIX resources all at once instead of per
@@ -3135,6 +3335,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
switch (vtype) {
case ICE_VSI_CTRL:
+ case ICE_VSI_SWITCHDEV_CTRL:
case ICE_VSI_PF:
ret = ice_vsi_alloc_q_vectors(vsi);
if (ret)
@@ -3154,7 +3355,9 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
ice_vsi_map_rings_to_vectors(vsi);
if (ice_is_xdp_ena_vsi(vsi)) {
- vsi->num_xdp_txq = vsi->alloc_rxq;
+ ret = ice_vsi_determine_xdp_res(vsi);
+ if (ret)
+ goto err_vectors;
ret = ice_prepare_xdp_rings(vsi, vsi->xdp_prog);
if (ret)
goto err_vectors;
@@ -3182,20 +3385,42 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
goto err_vectors;
break;
+ case ICE_VSI_CHNL:
+ if (test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
+ ice_vsi_cfg_rss_lut_key(vsi);
+ ice_vsi_set_rss_flow_fld(vsi);
+ }
+ break;
default:
break;
}
/* configure VSI nodes based on number of queues and TC's */
for (i = 0; i < vsi->tc_cfg.numtc; i++) {
- max_txqs[i] = vsi->alloc_txq;
+ /* configure VSI nodes based on number of queues and TC's.
+ * ADQ creates VSIs for each TC/Channel but doesn't
+ * allocate queues instead it reconfigures the PF queues
+ * as per the TC command. So max_txqs should point to the
+ * PF Tx queues.
+ */
+ if (vtype == ICE_VSI_CHNL)
+ max_txqs[i] = pf->num_lan_tx;
+ else
+ max_txqs[i] = vsi->alloc_txq;
if (ice_is_xdp_ena_vsi(vsi))
max_txqs[i] += vsi->num_xdp_txq;
}
- status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
- max_txqs);
+ if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
+ /* If MQPRIO is set, means channel code path, hence for main
+ * VSI's, use TC as 1
+ */
+ status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, 1, max_txqs);
+ else
+ status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx,
+ vsi->tc_cfg.ena_tc, max_txqs);
+
if (status) {
dev_err(ice_pf_to_dev(pf), "VSI %d failed lan queue config, error %s\n",
vsi->vsi_num, ice_stat_str(status));
@@ -3267,7 +3492,6 @@ int ice_wait_for_reset(struct ice_pf *pf, unsigned long timeout)
return 0;
}
-#ifdef CONFIG_DCB
/**
* ice_vsi_update_q_map - update our copy of the VSI info with new queue map
* @vsi: VSI being configured
@@ -3283,6 +3507,146 @@ static void ice_vsi_update_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx)
}
/**
+ * ice_vsi_cfg_netdev_tc - Setup the netdev TC configuration
+ * @vsi: the VSI being configured
+ * @ena_tc: TC map to be enabled
+ */
+void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc)
+{
+ struct net_device *netdev = vsi->netdev;
+ struct ice_pf *pf = vsi->back;
+ int numtc = vsi->tc_cfg.numtc;
+ struct ice_dcbx_cfg *dcbcfg;
+ u8 netdev_tc;
+ int i;
+
+ if (!netdev)
+ return;
+
+ /* CHNL VSI doesn't have it's own netdev, hence, no netdev_tc */
+ if (vsi->type == ICE_VSI_CHNL)
+ return;
+
+ if (!ena_tc) {
+ netdev_reset_tc(netdev);
+ return;
+ }
+
+ if (vsi->type == ICE_VSI_PF && ice_is_adq_active(pf))
+ numtc = vsi->all_numtc;
+
+ if (netdev_set_num_tc(netdev, numtc))
+ return;
+
+ dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
+
+ ice_for_each_traffic_class(i)
+ if (vsi->tc_cfg.ena_tc & BIT(i))
+ netdev_set_tc_queue(netdev,
+ vsi->tc_cfg.tc_info[i].netdev_tc,
+ vsi->tc_cfg.tc_info[i].qcount_tx,
+ vsi->tc_cfg.tc_info[i].qoffset);
+ /* setup TC queue map for CHNL TCs */
+ ice_for_each_chnl_tc(i) {
+ if (!(vsi->all_enatc & BIT(i)))
+ break;
+ if (!vsi->mqprio_qopt.qopt.count[i])
+ break;
+ netdev_set_tc_queue(netdev, i,
+ vsi->mqprio_qopt.qopt.count[i],
+ vsi->mqprio_qopt.qopt.offset[i]);
+ }
+
+ if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
+ return;
+
+ for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) {
+ u8 ets_tc = dcbcfg->etscfg.prio_table[i];
+
+ /* Get the mapped netdev TC# for the UP */
+ netdev_tc = vsi->tc_cfg.tc_info[ets_tc].netdev_tc;
+ netdev_set_prio_tc_map(netdev, i, netdev_tc);
+ }
+}
+
+/**
+ * ice_vsi_setup_q_map_mqprio - Prepares mqprio based tc_config
+ * @vsi: the VSI being configured,
+ * @ctxt: VSI context structure
+ * @ena_tc: number of traffic classes to enable
+ *
+ * Prepares VSI tc_config to have queue configurations based on MQPRIO options.
+ */
+static void
+ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt,
+ u8 ena_tc)
+{
+ u16 pow, offset = 0, qcount_tx = 0, qcount_rx = 0, qmap;
+ u16 tc0_offset = vsi->mqprio_qopt.qopt.offset[0];
+ int tc0_qcount = vsi->mqprio_qopt.qopt.count[0];
+ u8 netdev_tc = 0;
+ int i;
+
+ vsi->tc_cfg.ena_tc = ena_tc ? ena_tc : 1;
+
+ pow = order_base_2(tc0_qcount);
+ qmap = ((tc0_offset << ICE_AQ_VSI_TC_Q_OFFSET_S) &
+ ICE_AQ_VSI_TC_Q_OFFSET_M) |
+ ((pow << ICE_AQ_VSI_TC_Q_NUM_S) & ICE_AQ_VSI_TC_Q_NUM_M);
+
+ ice_for_each_traffic_class(i) {
+ if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
+ /* TC is not enabled */
+ vsi->tc_cfg.tc_info[i].qoffset = 0;
+ vsi->tc_cfg.tc_info[i].qcount_rx = 1;
+ vsi->tc_cfg.tc_info[i].qcount_tx = 1;
+ vsi->tc_cfg.tc_info[i].netdev_tc = 0;
+ ctxt->info.tc_mapping[i] = 0;
+ continue;
+ }
+
+ offset = vsi->mqprio_qopt.qopt.offset[i];
+ qcount_rx = vsi->mqprio_qopt.qopt.count[i];
+ qcount_tx = vsi->mqprio_qopt.qopt.count[i];
+ vsi->tc_cfg.tc_info[i].qoffset = offset;
+ vsi->tc_cfg.tc_info[i].qcount_rx = qcount_rx;
+ vsi->tc_cfg.tc_info[i].qcount_tx = qcount_tx;
+ vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++;
+ }
+
+ if (vsi->all_numtc && vsi->all_numtc != vsi->tc_cfg.numtc) {
+ ice_for_each_chnl_tc(i) {
+ if (!(vsi->all_enatc & BIT(i)))
+ continue;
+ offset = vsi->mqprio_qopt.qopt.offset[i];
+ qcount_rx = vsi->mqprio_qopt.qopt.count[i];
+ qcount_tx = vsi->mqprio_qopt.qopt.count[i];
+ }
+ }
+
+ /* Set actual Tx/Rx queue pairs */
+ vsi->num_txq = offset + qcount_tx;
+ vsi->num_rxq = offset + qcount_rx;
+
+ /* Setup queue TC[0].qmap for given VSI context */
+ ctxt->info.tc_mapping[0] = cpu_to_le16(qmap);
+ ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]);
+ ctxt->info.q_mapping[1] = cpu_to_le16(tc0_qcount);
+
+ /* Find queue count available for channel VSIs and starting offset
+ * for channel VSIs
+ */
+ if (tc0_qcount && tc0_qcount < vsi->num_rxq) {
+ vsi->cnt_q_avail = vsi->num_rxq - tc0_qcount;
+ vsi->next_base_q = tc0_qcount;
+ }
+ dev_dbg(ice_pf_to_dev(vsi->back), "vsi->num_txq = %d\n", vsi->num_txq);
+ dev_dbg(ice_pf_to_dev(vsi->back), "vsi->num_rxq = %d\n", vsi->num_rxq);
+ dev_dbg(ice_pf_to_dev(vsi->back), "all_numtc %u, all_enatc: 0x%04x, tc_cfg.numtc %u\n",
+ vsi->all_numtc, vsi->all_enatc, vsi->tc_cfg.numtc);
+}
+
+/**
* ice_vsi_cfg_tc - Configure VSI Tx Sched for given TC map
* @vsi: VSI to be configured
* @ena_tc: TC bitmap
@@ -3300,6 +3664,9 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
u8 num_tc = 0;
dev = ice_pf_to_dev(pf);
+ if (vsi->tc_cfg.ena_tc == ena_tc &&
+ vsi->mqprio_qopt.mode != TC_MQPRIO_MODE_CHANNEL)
+ return ret;
ice_for_each_traffic_class(i) {
/* build bitmap of enabled TCs */
@@ -3307,6 +3674,12 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
num_tc++;
/* populate max_txqs per TC */
max_txqs[i] = vsi->alloc_txq;
+ /* Update max_txqs if it is CHNL VSI, because alloc_t[r]xq are
+ * zero for CHNL VSI, hence use num_txq instead as max_txqs
+ */
+ if (vsi->type == ICE_VSI_CHNL &&
+ test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
+ max_txqs[i] = vsi->num_txq;
}
vsi->tc_cfg.ena_tc = ena_tc;
@@ -3319,7 +3692,11 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
ctx->vf_num = 0;
ctx->info = vsi->info;
- ice_vsi_setup_q_map(vsi, ctx);
+ if (vsi->type == ICE_VSI_PF &&
+ test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
+ ice_vsi_setup_q_map_mqprio(vsi, ctx, ena_tc);
+ else
+ ice_vsi_setup_q_map(vsi, ctx);
/* must to indicate which section of VSI context are being modified */
ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
@@ -3330,8 +3707,13 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
goto out;
}
- status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
- max_txqs);
+ if (vsi->type == ICE_VSI_PF &&
+ test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
+ status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, 1,
+ max_txqs);
+ else
+ status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx,
+ vsi->tc_cfg.ena_tc, max_txqs);
if (status) {
dev_err(dev, "VSI %d failed TC config, error %s\n",
@@ -3347,20 +3729,19 @@ out:
kfree(ctx);
return ret;
}
-#endif /* CONFIG_DCB */
/**
* ice_update_ring_stats - Update ring statistics
- * @ring: ring to update
+ * @stats: stats to be updated
* @pkts: number of processed packets
* @bytes: number of processed bytes
*
* This function assumes that caller has acquired a u64_stats_sync lock.
*/
-static void ice_update_ring_stats(struct ice_ring *ring, u64 pkts, u64 bytes)
+static void ice_update_ring_stats(struct ice_q_stats *stats, u64 pkts, u64 bytes)
{
- ring->stats.bytes += bytes;
- ring->stats.pkts += pkts;
+ stats->bytes += bytes;
+ stats->pkts += pkts;
}
/**
@@ -3369,10 +3750,10 @@ static void ice_update_ring_stats(struct ice_ring *ring, u64 pkts, u64 bytes)
* @pkts: number of processed packets
* @bytes: number of processed bytes
*/
-void ice_update_tx_ring_stats(struct ice_ring *tx_ring, u64 pkts, u64 bytes)
+void ice_update_tx_ring_stats(struct ice_tx_ring *tx_ring, u64 pkts, u64 bytes)
{
u64_stats_update_begin(&tx_ring->syncp);
- ice_update_ring_stats(tx_ring, pkts, bytes);
+ ice_update_ring_stats(&tx_ring->stats, pkts, bytes);
u64_stats_update_end(&tx_ring->syncp);
}
@@ -3382,10 +3763,10 @@ void ice_update_tx_ring_stats(struct ice_ring *tx_ring, u64 pkts, u64 bytes)
* @pkts: number of processed packets
* @bytes: number of processed bytes
*/
-void ice_update_rx_ring_stats(struct ice_ring *rx_ring, u64 pkts, u64 bytes)
+void ice_update_rx_ring_stats(struct ice_rx_ring *rx_ring, u64 pkts, u64 bytes)
{
u64_stats_update_begin(&rx_ring->syncp);
- ice_update_ring_stats(rx_ring, pkts, bytes);
+ ice_update_ring_stats(&rx_ring->stats, pkts, bytes);
u64_stats_update_end(&rx_ring->syncp);
}
@@ -3538,6 +3919,180 @@ int ice_clear_dflt_vsi(struct ice_sw *sw)
}
/**
+ * ice_get_link_speed_mbps - get link speed in Mbps
+ * @vsi: the VSI whose link speed is being queried
+ *
+ * Return current VSI link speed and 0 if the speed is unknown.
+ */
+int ice_get_link_speed_mbps(struct ice_vsi *vsi)
+{
+ switch (vsi->port_info->phy.link_info.link_speed) {
+ case ICE_AQ_LINK_SPEED_100GB:
+ return SPEED_100000;
+ case ICE_AQ_LINK_SPEED_50GB:
+ return SPEED_50000;
+ case ICE_AQ_LINK_SPEED_40GB:
+ return SPEED_40000;
+ case ICE_AQ_LINK_SPEED_25GB:
+ return SPEED_25000;
+ case ICE_AQ_LINK_SPEED_20GB:
+ return SPEED_20000;
+ case ICE_AQ_LINK_SPEED_10GB:
+ return SPEED_10000;
+ case ICE_AQ_LINK_SPEED_5GB:
+ return SPEED_5000;
+ case ICE_AQ_LINK_SPEED_2500MB:
+ return SPEED_2500;
+ case ICE_AQ_LINK_SPEED_1000MB:
+ return SPEED_1000;
+ case ICE_AQ_LINK_SPEED_100MB:
+ return SPEED_100;
+ case ICE_AQ_LINK_SPEED_10MB:
+ return SPEED_10;
+ case ICE_AQ_LINK_SPEED_UNKNOWN:
+ default:
+ return 0;
+ }
+}
+
+/**
+ * ice_get_link_speed_kbps - get link speed in Kbps
+ * @vsi: the VSI whose link speed is being queried
+ *
+ * Return current VSI link speed and 0 if the speed is unknown.
+ */
+int ice_get_link_speed_kbps(struct ice_vsi *vsi)
+{
+ int speed_mbps;
+
+ speed_mbps = ice_get_link_speed_mbps(vsi);
+
+ return speed_mbps * 1000;
+}
+
+/**
+ * ice_set_min_bw_limit - setup minimum BW limit for Tx based on min_tx_rate
+ * @vsi: VSI to be configured
+ * @min_tx_rate: min Tx rate in Kbps to be configured as BW limit
+ *
+ * If the min_tx_rate is specified as 0 that means to clear the minimum BW limit
+ * profile, otherwise a non-zero value will force a minimum BW limit for the VSI
+ * on TC 0.
+ */
+int ice_set_min_bw_limit(struct ice_vsi *vsi, u64 min_tx_rate)
+{
+ struct ice_pf *pf = vsi->back;
+ enum ice_status status;
+ struct device *dev;
+ int speed;
+
+ dev = ice_pf_to_dev(pf);
+ if (!vsi->port_info) {
+ dev_dbg(dev, "VSI %d, type %u specified doesn't have valid port_info\n",
+ vsi->idx, vsi->type);
+ return -EINVAL;
+ }
+
+ speed = ice_get_link_speed_kbps(vsi);
+ if (min_tx_rate > (u64)speed) {
+ dev_err(dev, "invalid min Tx rate %llu Kbps specified for %s %d is greater than current link speed %u Kbps\n",
+ min_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx,
+ speed);
+ return -EINVAL;
+ }
+
+ /* Configure min BW for VSI limit */
+ if (min_tx_rate) {
+ status = ice_cfg_vsi_bw_lmt_per_tc(vsi->port_info, vsi->idx, 0,
+ ICE_MIN_BW, min_tx_rate);
+ if (status) {
+ dev_err(dev, "failed to set min Tx rate(%llu Kbps) for %s %d\n",
+ min_tx_rate, ice_vsi_type_str(vsi->type),
+ vsi->idx);
+ return -EIO;
+ }
+
+ dev_dbg(dev, "set min Tx rate(%llu Kbps) for %s\n",
+ min_tx_rate, ice_vsi_type_str(vsi->type));
+ } else {
+ status = ice_cfg_vsi_bw_dflt_lmt_per_tc(vsi->port_info,
+ vsi->idx, 0,
+ ICE_MIN_BW);
+ if (status) {
+ dev_err(dev, "failed to clear min Tx rate configuration for %s %d\n",
+ ice_vsi_type_str(vsi->type), vsi->idx);
+ return -EIO;
+ }
+
+ dev_dbg(dev, "cleared min Tx rate configuration for %s %d\n",
+ ice_vsi_type_str(vsi->type), vsi->idx);
+ }
+
+ return 0;
+}
+
+/**
+ * ice_set_max_bw_limit - setup maximum BW limit for Tx based on max_tx_rate
+ * @vsi: VSI to be configured
+ * @max_tx_rate: max Tx rate in Kbps to be configured as BW limit
+ *
+ * If the max_tx_rate is specified as 0 that means to clear the maximum BW limit
+ * profile, otherwise a non-zero value will force a maximum BW limit for the VSI
+ * on TC 0.
+ */
+int ice_set_max_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate)
+{
+ struct ice_pf *pf = vsi->back;
+ enum ice_status status;
+ struct device *dev;
+ int speed;
+
+ dev = ice_pf_to_dev(pf);
+ if (!vsi->port_info) {
+ dev_dbg(dev, "VSI %d, type %u specified doesn't have valid port_info\n",
+ vsi->idx, vsi->type);
+ return -EINVAL;
+ }
+
+ speed = ice_get_link_speed_kbps(vsi);
+ if (max_tx_rate > (u64)speed) {
+ dev_err(dev, "invalid max Tx rate %llu Kbps specified for %s %d is greater than current link speed %u Kbps\n",
+ max_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx,
+ speed);
+ return -EINVAL;
+ }
+
+ /* Configure max BW for VSI limit */
+ if (max_tx_rate) {
+ status = ice_cfg_vsi_bw_lmt_per_tc(vsi->port_info, vsi->idx, 0,
+ ICE_MAX_BW, max_tx_rate);
+ if (status) {
+ dev_err(dev, "failed setting max Tx rate(%llu Kbps) for %s %d\n",
+ max_tx_rate, ice_vsi_type_str(vsi->type),
+ vsi->idx);
+ return -EIO;
+ }
+
+ dev_dbg(dev, "set max Tx rate(%llu Kbps) for %s %d\n",
+ max_tx_rate, ice_vsi_type_str(vsi->type), vsi->idx);
+ } else {
+ status = ice_cfg_vsi_bw_dflt_lmt_per_tc(vsi->port_info,
+ vsi->idx, 0,
+ ICE_MAX_BW);
+ if (status) {
+ dev_err(dev, "failed clearing max Tx rate configuration for %s %d\n",
+ ice_vsi_type_str(vsi->type), vsi->idx);
+ return -EIO;
+ }
+
+ dev_dbg(dev, "cleared max Tx rate configuration for %s %d\n",
+ ice_vsi_type_str(vsi->type), vsi->idx);
+ }
+
+ return 0;
+}
+
+/**
* ice_set_link - turn on/off physical link
* @vsi: VSI to modify physical link on
* @ena: turn on/off physical link
@@ -3573,3 +4128,126 @@ int ice_set_link(struct ice_vsi *vsi, bool ena)
return 0;
}
+
+/**
+ * ice_is_feature_supported
+ * @pf: pointer to the struct ice_pf instance
+ * @f: feature enum to be checked
+ *
+ * returns true if feature is supported, false otherwise
+ */
+bool ice_is_feature_supported(struct ice_pf *pf, enum ice_feature f)
+{
+ if (f < 0 || f >= ICE_F_MAX)
+ return false;
+
+ return test_bit(f, pf->features);
+}
+
+/**
+ * ice_set_feature_support
+ * @pf: pointer to the struct ice_pf instance
+ * @f: feature enum to set
+ */
+static void ice_set_feature_support(struct ice_pf *pf, enum ice_feature f)
+{
+ if (f < 0 || f >= ICE_F_MAX)
+ return;
+
+ set_bit(f, pf->features);
+}
+
+/**
+ * ice_clear_feature_support
+ * @pf: pointer to the struct ice_pf instance
+ * @f: feature enum to clear
+ */
+void ice_clear_feature_support(struct ice_pf *pf, enum ice_feature f)
+{
+ if (f < 0 || f >= ICE_F_MAX)
+ return;
+
+ clear_bit(f, pf->features);
+}
+
+/**
+ * ice_init_feature_support
+ * @pf: pointer to the struct ice_pf instance
+ *
+ * called during init to setup supported feature
+ */
+void ice_init_feature_support(struct ice_pf *pf)
+{
+ switch (pf->hw.device_id) {
+ case ICE_DEV_ID_E810C_BACKPLANE:
+ case ICE_DEV_ID_E810C_QSFP:
+ case ICE_DEV_ID_E810C_SFP:
+ ice_set_feature_support(pf, ICE_F_DSCP);
+ if (ice_is_e810t(&pf->hw))
+ ice_set_feature_support(pf, ICE_F_SMA_CTRL);
+ break;
+ default:
+ break;
+ }
+}
+
+/**
+ * ice_vsi_update_security - update security block in VSI
+ * @vsi: pointer to VSI structure
+ * @fill: function pointer to fill ctx
+ */
+int
+ice_vsi_update_security(struct ice_vsi *vsi, void (*fill)(struct ice_vsi_ctx *))
+{
+ struct ice_vsi_ctx ctx = { 0 };
+
+ ctx.info = vsi->info;
+ ctx.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
+ fill(&ctx);
+
+ if (ice_update_vsi(&vsi->back->hw, vsi->idx, &ctx, NULL))
+ return -ENODEV;
+
+ vsi->info = ctx.info;
+ return 0;
+}
+
+/**
+ * ice_vsi_ctx_set_antispoof - set antispoof function in VSI ctx
+ * @ctx: pointer to VSI ctx structure
+ */
+void ice_vsi_ctx_set_antispoof(struct ice_vsi_ctx *ctx)
+{
+ ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
+ (ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
+ ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
+}
+
+/**
+ * ice_vsi_ctx_clear_antispoof - clear antispoof function in VSI ctx
+ * @ctx: pointer to VSI ctx structure
+ */
+void ice_vsi_ctx_clear_antispoof(struct ice_vsi_ctx *ctx)
+{
+ ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF &
+ ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
+ ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
+}
+
+/**
+ * ice_vsi_ctx_set_allow_override - allow destination override on VSI
+ * @ctx: pointer to VSI ctx structure
+ */
+void ice_vsi_ctx_set_allow_override(struct ice_vsi_ctx *ctx)
+{
+ ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD;
+}
+
+/**
+ * ice_vsi_ctx_clear_allow_override - turn off destination override on VSI
+ * @ctx: pointer to VSI ctx structure
+ */
+void ice_vsi_ctx_clear_allow_override(struct ice_vsi_ctx *ctx)
+{
+ ctx->info.sec_flags &= ~ICE_AQ_VSI_SEC_FLAG_ALLOW_DEST_OVRD;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index d5a28bf0fc2c..6c803407b67d 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -14,7 +14,7 @@ void ice_update_eth_stats(struct ice_vsi *vsi);
int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx);
-int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_ring **tx_rings, u16 q_idx);
+int ice_vsi_cfg_single_txq(struct ice_vsi *vsi, struct ice_tx_ring **tx_rings, u16 q_idx);
int ice_vsi_cfg_rxqs(struct ice_vsi *vsi);
@@ -45,19 +45,24 @@ int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi);
bool ice_vsi_is_vlan_pruning_ena(struct ice_vsi *vsi);
-int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc);
+int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena);
void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create);
int ice_set_link(struct ice_vsi *vsi, bool ena);
-#ifdef CONFIG_DCB
+void ice_vsi_delete(struct ice_vsi *vsi);
+int ice_vsi_clear(struct ice_vsi *vsi);
+
int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc);
-#endif /* CONFIG_DCB */
+
+int ice_vsi_cfg_rss_lut_key(struct ice_vsi *vsi);
+
+void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc);
struct ice_vsi *
ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
- enum ice_vsi_type vsi_type, u16 vf_id);
+ enum ice_vsi_type vsi_type, u16 vf_id, struct ice_channel *ch);
void ice_napi_del(struct ice_vsi *vsi);
@@ -93,9 +98,9 @@ void ice_vsi_free_tx_rings(struct ice_vsi *vsi);
void ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena);
-void ice_update_tx_ring_stats(struct ice_ring *ring, u64 pkts, u64 bytes);
+void ice_update_tx_ring_stats(struct ice_tx_ring *ring, u64 pkts, u64 bytes);
-void ice_update_rx_ring_stats(struct ice_ring *ring, u64 pkts, u64 bytes);
+void ice_update_rx_ring_stats(struct ice_rx_ring *ring, u64 pkts, u64 bytes);
void ice_vsi_cfg_frame_size(struct ice_vsi *vsi);
@@ -103,6 +108,7 @@ int ice_status_to_errno(enum ice_status err);
void ice_write_intrl(struct ice_q_vector *q_vector, u8 intrl);
void ice_write_itr(struct ice_ring_container *rc, u16 itr);
+void ice_set_q_vector_intrl(struct ice_q_vector *q_vector);
enum ice_status
ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set);
@@ -116,4 +122,22 @@ bool ice_is_vsi_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi);
int ice_set_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi);
int ice_clear_dflt_vsi(struct ice_sw *sw);
+int ice_set_min_bw_limit(struct ice_vsi *vsi, u64 min_tx_rate);
+int ice_set_max_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate);
+int ice_get_link_speed_kbps(struct ice_vsi *vsi);
+int ice_get_link_speed_mbps(struct ice_vsi *vsi);
+int
+ice_vsi_update_security(struct ice_vsi *vsi, void (*fill)(struct ice_vsi_ctx *));
+
+void ice_vsi_ctx_set_antispoof(struct ice_vsi_ctx *ctx);
+
+void ice_vsi_ctx_clear_antispoof(struct ice_vsi_ctx *ctx);
+
+void ice_vsi_ctx_set_allow_override(struct ice_vsi_ctx *ctx);
+
+void ice_vsi_ctx_clear_allow_override(struct ice_vsi_ctx *ctx);
+
+bool ice_is_feature_supported(struct ice_pf *pf, enum ice_feature f);
+void ice_clear_feature_support(struct ice_pf *pf, enum ice_feature f);
+void ice_init_feature_support(struct ice_pf *pf);
#endif /* !_ICE_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 0d6c143f6653..f099797f35e3 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -19,6 +19,8 @@
*/
#define CREATE_TRACE_POINTS
#include "ice_trace.h"
+#include "ice_eswitch.h"
+#include "ice_tc_lib.h"
#define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver"
static const char ice_driver_string[] = DRV_SUMMARY;
@@ -42,16 +44,26 @@ MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
#endif /* !CONFIG_DYNAMIC_DEBUG */
static DEFINE_IDA(ice_aux_ida);
+DEFINE_STATIC_KEY_FALSE(ice_xdp_locking_key);
+EXPORT_SYMBOL(ice_xdp_locking_key);
static struct workqueue_struct *ice_wq;
static const struct net_device_ops ice_netdev_safe_mode_ops;
static const struct net_device_ops ice_netdev_ops;
-static int ice_vsi_open(struct ice_vsi *vsi);
static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type);
static void ice_vsi_release_all(struct ice_pf *pf);
+static int ice_rebuild_channels(struct ice_pf *pf);
+static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_adv_fltr);
+
+static int
+ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch,
+ void *cb_priv, enum tc_setup_type type, void *type_data,
+ void *data,
+ void (*cleanup)(struct flow_block_cb *block_cb));
+
bool netif_is_ice(struct net_device *dev)
{
return dev && (dev->netdev_ops == &ice_netdev_ops);
@@ -61,7 +73,7 @@ bool netif_is_ice(struct net_device *dev)
* ice_get_tx_pending - returns number of Tx descriptors not processed
* @ring: the ring of descriptors
*/
-static u16 ice_get_tx_pending(struct ice_ring *ring)
+static u16 ice_get_tx_pending(struct ice_tx_ring *ring)
{
u16 head, tail;
@@ -100,10 +112,15 @@ static void ice_check_for_hang_subtask(struct ice_pf *pf)
hw = &vsi->back->hw;
- for (i = 0; i < vsi->num_txq; i++) {
- struct ice_ring *tx_ring = vsi->tx_rings[i];
+ ice_for_each_txq(vsi, i) {
+ struct ice_tx_ring *tx_ring = vsi->tx_rings[i];
+
+ if (!tx_ring)
+ continue;
+ if (ice_ring_ch_enabled(tx_ring))
+ continue;
- if (tx_ring && tx_ring->desc) {
+ if (tx_ring->desc) {
/* If packet counter has not changed the queue is
* likely stalled, so force an interrupt for this
* queue.
@@ -379,7 +396,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
~IFF_PROMISC;
goto out_promisc;
}
- ice_cfg_vlan_pruning(vsi, false, false);
+ ice_cfg_vlan_pruning(vsi, false);
}
} else {
/* Clear Rx filter to remove traffic from wire */
@@ -393,7 +410,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
goto out_promisc;
}
if (vsi->num_vlan > 1)
- ice_cfg_vlan_pruning(vsi, true, false);
+ ice_cfg_vlan_pruning(vsi, true);
}
}
}
@@ -455,17 +472,21 @@ static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
}
/**
- * ice_prepare_for_reset - prep for the core to reset
+ * ice_prepare_for_reset - prep for reset
* @pf: board private structure
+ * @reset_type: reset type requested
*
* Inform or close all dependent features in prep for reset.
*/
static void
-ice_prepare_for_reset(struct ice_pf *pf)
+ice_prepare_for_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
{
struct ice_hw *hw = &pf->hw;
+ struct ice_vsi *vsi;
unsigned int i;
+ dev_dbg(ice_pf_to_dev(pf), "reset_type=%d\n", reset_type);
+
/* already prepared for reset */
if (test_bit(ICE_PREPARED_FOR_RESET, pf->state))
return;
@@ -480,6 +501,38 @@ ice_prepare_for_reset(struct ice_pf *pf)
ice_for_each_vf(pf, i)
ice_set_vf_state_qs_dis(&pf->vf[i]);
+ /* release ADQ specific HW and SW resources */
+ vsi = ice_get_main_vsi(pf);
+ if (!vsi)
+ goto skip;
+
+ /* to be on safe side, reset orig_rss_size so that normal flow
+ * of deciding rss_size can take precedence
+ */
+ vsi->orig_rss_size = 0;
+
+ if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
+ if (reset_type == ICE_RESET_PFR) {
+ vsi->old_ena_tc = vsi->all_enatc;
+ vsi->old_numtc = vsi->all_numtc;
+ } else {
+ ice_remove_q_channels(vsi, true);
+
+ /* for other reset type, do not support channel rebuild
+ * hence reset needed info
+ */
+ vsi->old_ena_tc = 0;
+ vsi->all_enatc = 0;
+ vsi->old_numtc = 0;
+ vsi->all_numtc = 0;
+ vsi->req_txq = 0;
+ vsi->req_rxq = 0;
+ clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
+ memset(&vsi->mqprio_qopt, 0, sizeof(vsi->mqprio_qopt));
+ }
+ }
+skip:
+
/* clear SW filtering DB */
ice_clear_hw_tbls(hw);
/* disable the VSIs and their queues that are not already DOWN */
@@ -499,8 +552,7 @@ ice_prepare_for_reset(struct ice_pf *pf)
/**
* ice_do_reset - Initiate one of many types of resets
* @pf: board private structure
- * @reset_type: reset type requested
- * before this function was called.
+ * @reset_type: reset type requested before this function was called.
*/
static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
{
@@ -509,7 +561,7 @@ static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
dev_dbg(dev, "reset_type 0x%x requested\n", reset_type);
- ice_prepare_for_reset(pf);
+ ice_prepare_for_reset(pf, reset_type);
/* trigger the reset */
if (ice_reset(hw, reset_type)) {
@@ -567,7 +619,7 @@ static void ice_reset_subtask(struct ice_pf *pf)
/* return if no valid reset type requested */
if (reset_type == ICE_RESET_INVAL)
return;
- ice_prepare_for_reset(pf);
+ ice_prepare_for_reset(pf, reset_type);
/* make sure we are ready to rebuild */
if (ice_check_reset(&pf->hw)) {
@@ -624,7 +676,10 @@ static void ice_print_topo_conflict(struct ice_vsi *vsi)
netdev_info(vsi->netdev, "Potential misconfiguration of the Ethernet port detected. If it was not intended, please use the Intel (R) Ethernet Port Configuration Tool to address the issue.\n");
break;
case ICE_AQ_LINK_TOPO_UNSUPP_MEDIA:
- netdev_info(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
+ if (test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, vsi->back->flags))
+ netdev_warn(vsi->netdev, "An unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules\n");
+ else
+ netdev_err(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was detected. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for a list of supported modules.\n");
break;
default:
break;
@@ -882,6 +937,29 @@ static void ice_set_dflt_mib(struct ice_pf *pf)
}
/**
+ * ice_check_phy_fw_load - check if PHY FW load failed
+ * @pf: pointer to PF struct
+ * @link_cfg_err: bitmap from the link info structure
+ *
+ * check if external PHY FW load failed and print an error message if it did
+ */
+static void ice_check_phy_fw_load(struct ice_pf *pf, u8 link_cfg_err)
+{
+ if (!(link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE)) {
+ clear_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags);
+ return;
+ }
+
+ if (test_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags))
+ return;
+
+ if (link_cfg_err & ICE_AQ_LINK_EXTERNAL_PHY_LOAD_FAILURE) {
+ dev_err(ice_pf_to_dev(pf), "Device failed to load the FW for the external PHY. Please download and install the latest NVM for your device and try again\n");
+ set_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags);
+ }
+}
+
+/**
* ice_check_module_power
* @pf: pointer to PF struct
* @link_cfg_err: bitmap from the link info structure
@@ -914,6 +992,20 @@ static void ice_check_module_power(struct ice_pf *pf, u8 link_cfg_err)
}
/**
+ * ice_check_link_cfg_err - check if link configuration failed
+ * @pf: pointer to the PF struct
+ * @link_cfg_err: bitmap from the link info structure
+ *
+ * print if any link configuration failure happens due to the value in the
+ * link_cfg_err parameter in the link info structure
+ */
+static void ice_check_link_cfg_err(struct ice_pf *pf, u8 link_cfg_err)
+{
+ ice_check_module_power(pf, link_cfg_err);
+ ice_check_phy_fw_load(pf, link_cfg_err);
+}
+
+/**
* ice_link_event - process the link event
* @pf: PF that the link event is associated with
* @pi: port_info for the port that the link event is associated with
@@ -948,7 +1040,7 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
pi->lport, ice_stat_str(status),
ice_aq_str(pi->hw->adminq.sq_last_status));
- ice_check_module_power(pf, pi->phy.link_info.link_cfg_err);
+ ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
/* Check if the link state is up after updating link info, and treat
* this event as an UP event since the link is actually UP now.
@@ -1026,7 +1118,8 @@ static int ice_init_link_events(struct ice_port_info *pi)
u16 mask;
mask = ~((u16)(ICE_AQ_LINK_EVENT_UPDOWN | ICE_AQ_LINK_EVENT_MEDIA_NA |
- ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL));
+ ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL |
+ ICE_AQ_LINK_EVENT_PHY_FW_LOAD_FAIL));
if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) {
dev_dbg(ice_hw_to_dev(pi->hw), "Failed to set link event mask for port %d\n",
@@ -1965,7 +2058,8 @@ static int ice_configure_phy(struct ice_vsi *vsi)
ice_print_topo_conflict(vsi);
- if (phy->link_info.topo_media_conflict == ICE_AQ_LINK_TOPO_UNSUPP_MEDIA)
+ if (!test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags) &&
+ phy->link_info.topo_media_conflict == ICE_AQ_LINK_TOPO_UNSUPP_MEDIA)
return -EPERM;
if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags))
@@ -2096,7 +2190,7 @@ static void ice_check_media_subtask(struct ice_pf *pf)
if (err)
return;
- ice_check_module_power(pf, pi->phy.link_info.link_cfg_err);
+ ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state))
@@ -2302,14 +2396,14 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
irq_num = pf->msix_entries[base + vector].vector;
- if (q_vector->tx.ring && q_vector->rx.ring) {
+ if (q_vector->tx.tx_ring && q_vector->rx.rx_ring) {
snprintf(q_vector->name, sizeof(q_vector->name) - 1,
"%s-%s-%d", basename, "TxRx", rx_int_idx++);
tx_int_idx++;
- } else if (q_vector->rx.ring) {
+ } else if (q_vector->rx.rx_ring) {
snprintf(q_vector->name, sizeof(q_vector->name) - 1,
"%s-%s-%d", basename, "rx", rx_int_idx++);
- } else if (q_vector->tx.ring) {
+ } else if (q_vector->tx.tx_ring) {
snprintf(q_vector->name, sizeof(q_vector->name) - 1,
"%s-%s-%d", basename, "tx", tx_int_idx++);
} else {
@@ -2367,11 +2461,12 @@ free_q_irqs:
static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
{
struct device *dev = ice_pf_to_dev(vsi->back);
- int i;
+ struct ice_tx_desc *tx_desc;
+ int i, j;
- for (i = 0; i < vsi->num_xdp_txq; i++) {
+ ice_for_each_xdp_txq(vsi, i) {
u16 xdp_q_idx = vsi->alloc_txq + i;
- struct ice_ring *xdp_ring;
+ struct ice_tx_ring *xdp_ring;
xdp_ring = kzalloc(sizeof(*xdp_ring), GFP_KERNEL);
@@ -2380,16 +2475,29 @@ static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
xdp_ring->q_index = xdp_q_idx;
xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx];
- xdp_ring->ring_active = false;
xdp_ring->vsi = vsi;
xdp_ring->netdev = NULL;
+ xdp_ring->next_dd = ICE_TX_THRESH - 1;
+ xdp_ring->next_rs = ICE_TX_THRESH - 1;
xdp_ring->dev = dev;
xdp_ring->count = vsi->num_tx_desc;
WRITE_ONCE(vsi->xdp_rings[i], xdp_ring);
if (ice_setup_tx_ring(xdp_ring))
goto free_xdp_rings;
ice_set_ring_xdp(xdp_ring);
- xdp_ring->xsk_pool = ice_xsk_pool(xdp_ring);
+ xdp_ring->xsk_pool = ice_tx_xsk_pool(xdp_ring);
+ spin_lock_init(&xdp_ring->tx_lock);
+ for (j = 0; j < xdp_ring->count; j++) {
+ tx_desc = ICE_TX_DESC(xdp_ring, j);
+ tx_desc->cmd_type_offset_bsz = cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE);
+ }
+ }
+
+ ice_for_each_rxq(vsi, i) {
+ if (static_key_enabled(&ice_xdp_locking_key))
+ vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i % vsi->num_xdp_txq];
+ else
+ vsi->rx_rings[i]->xdp_ring = vsi->xdp_rings[i];
}
return 0;
@@ -2455,6 +2563,10 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
if (__ice_vsi_get_qs(&xdp_qs_cfg))
goto err_map_xdp;
+ if (static_key_enabled(&ice_xdp_locking_key))
+ netdev_warn(vsi->netdev,
+ "Could not allocate one XDP Tx ring per CPU, XDP_TX/XDP_REDIRECT actions will be slower\n");
+
if (ice_xdp_alloc_setup_rings(vsi))
goto clear_xdp_rings;
@@ -2468,11 +2580,11 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
q_base = vsi->num_xdp_txq - xdp_rings_rem;
for (q_id = q_base; q_id < (q_base + xdp_rings_per_v); q_id++) {
- struct ice_ring *xdp_ring = vsi->xdp_rings[q_id];
+ struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id];
xdp_ring->q_vector = q_vector;
- xdp_ring->next = q_vector->tx.ring;
- q_vector->tx.ring = xdp_ring;
+ xdp_ring->next = q_vector->tx.tx_ring;
+ q_vector->tx.tx_ring = xdp_ring;
}
xdp_rings_rem -= xdp_rings_per_v;
}
@@ -2501,7 +2613,7 @@ int ice_prepare_xdp_rings(struct ice_vsi *vsi, struct bpf_prog *prog)
return 0;
clear_xdp_rings:
- for (i = 0; i < vsi->num_xdp_txq; i++)
+ ice_for_each_xdp_txq(vsi, i)
if (vsi->xdp_rings[i]) {
kfree_rcu(vsi->xdp_rings[i], rcu);
vsi->xdp_rings[i] = NULL;
@@ -2509,7 +2621,7 @@ clear_xdp_rings:
err_map_xdp:
mutex_lock(&pf->avail_q_mutex);
- for (i = 0; i < vsi->num_xdp_txq; i++) {
+ ice_for_each_xdp_txq(vsi, i) {
clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
}
@@ -2542,25 +2654,25 @@ int ice_destroy_xdp_rings(struct ice_vsi *vsi)
ice_for_each_q_vector(vsi, v_idx) {
struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
- struct ice_ring *ring;
+ struct ice_tx_ring *ring;
- ice_for_each_ring(ring, q_vector->tx)
+ ice_for_each_tx_ring(ring, q_vector->tx)
if (!ring->tx_buf || !ice_ring_is_xdp(ring))
break;
/* restore the value of last node prior to XDP setup */
- q_vector->tx.ring = ring;
+ q_vector->tx.tx_ring = ring;
}
free_qmap:
mutex_lock(&pf->avail_q_mutex);
- for (i = 0; i < vsi->num_xdp_txq; i++) {
+ ice_for_each_xdp_txq(vsi, i) {
clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs);
vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX;
}
mutex_unlock(&pf->avail_q_mutex);
- for (i = 0; i < vsi->num_xdp_txq; i++)
+ ice_for_each_xdp_txq(vsi, i)
if (vsi->xdp_rings[i]) {
if (vsi->xdp_rings[i]->desc)
ice_free_tx_ring(vsi->xdp_rings[i]);
@@ -2571,6 +2683,9 @@ free_qmap:
devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings);
vsi->xdp_rings = NULL;
+ if (static_key_enabled(&ice_xdp_locking_key))
+ static_branch_dec(&ice_xdp_locking_key);
+
if (ice_is_reset_in_progress(pf->state) || !vsi->q_vectors[0])
return 0;
@@ -2598,7 +2713,7 @@ static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi)
int i;
ice_for_each_rxq(vsi, i) {
- struct ice_ring *rx_ring = vsi->rx_rings[i];
+ struct ice_rx_ring *rx_ring = vsi->rx_rings[i];
if (rx_ring->xsk_pool)
napi_schedule(&rx_ring->q_vector->napi);
@@ -2606,6 +2721,29 @@ static void ice_vsi_rx_napi_schedule(struct ice_vsi *vsi)
}
/**
+ * ice_vsi_determine_xdp_res - figure out how many Tx qs can XDP have
+ * @vsi: VSI to determine the count of XDP Tx qs
+ *
+ * returns 0 if Tx qs count is higher than at least half of CPU count,
+ * -ENOMEM otherwise
+ */
+int ice_vsi_determine_xdp_res(struct ice_vsi *vsi)
+{
+ u16 avail = ice_get_avail_txq_count(vsi->back);
+ u16 cpus = num_possible_cpus();
+
+ if (avail < cpus / 2)
+ return -ENOMEM;
+
+ vsi->num_xdp_txq = min_t(u16, avail, cpus);
+
+ if (vsi->num_xdp_txq < cpus)
+ static_branch_inc(&ice_xdp_locking_key);
+
+ return 0;
+}
+
+/**
* ice_xdp_setup_prog - Add or remove XDP eBPF program
* @vsi: VSI to setup XDP for
* @prog: XDP program
@@ -2634,10 +2772,14 @@ ice_xdp_setup_prog(struct ice_vsi *vsi, struct bpf_prog *prog,
}
if (!ice_is_xdp_ena_vsi(vsi) && prog) {
- vsi->num_xdp_txq = vsi->alloc_rxq;
- xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
- if (xdp_ring_err)
- NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
+ xdp_ring_err = ice_vsi_determine_xdp_res(vsi);
+ if (xdp_ring_err) {
+ NL_SET_ERR_MSG_MOD(extack, "Not enough Tx resources for XDP");
+ } else {
+ xdp_ring_err = ice_prepare_xdp_rings(vsi, prog);
+ if (xdp_ring_err)
+ NL_SET_ERR_MSG_MOD(extack, "Setting up XDP Tx resources failed");
+ }
} else if (ice_is_xdp_ena_vsi(vsi) && !prog) {
xdp_ring_err = ice_destroy_xdp_rings(vsi);
if (xdp_ring_err)
@@ -3103,6 +3245,9 @@ static void ice_set_netdev_features(struct net_device *netdev)
/* enable features */
netdev->features |= netdev->hw_features;
+
+ netdev->hw_features |= NETIF_F_HW_TC;
+
/* encap and VLAN devices inherit default, csumo and tso features */
netdev->hw_enc_features |= dflt_features | csumo_features |
tso_features;
@@ -3139,7 +3284,7 @@ static int ice_cfg_netdev(struct ice_vsi *vsi)
if (vsi->type == ICE_VSI_PF) {
SET_NETDEV_DEV(netdev, ice_pf_to_dev(vsi->back));
ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
- ether_addr_copy(netdev->dev_addr, mac_addr);
+ eth_hw_addr_set(netdev, mac_addr);
ether_addr_copy(netdev->perm_addr, mac_addr);
}
@@ -3182,7 +3327,14 @@ void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
static struct ice_vsi *
ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
{
- return ice_vsi_setup(pf, pi, ICE_VSI_PF, ICE_INVAL_VFID);
+ return ice_vsi_setup(pf, pi, ICE_VSI_PF, ICE_INVAL_VFID, NULL);
+}
+
+static struct ice_vsi *
+ice_chnl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
+ struct ice_channel *ch)
+{
+ return ice_vsi_setup(pf, pi, ICE_VSI_CHNL, ICE_INVAL_VFID, ch);
}
/**
@@ -3196,7 +3348,7 @@ ice_pf_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
static struct ice_vsi *
ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
{
- return ice_vsi_setup(pf, pi, ICE_VSI_CTRL, ICE_INVAL_VFID);
+ return ice_vsi_setup(pf, pi, ICE_VSI_CTRL, ICE_INVAL_VFID, NULL);
}
/**
@@ -3210,7 +3362,7 @@ ice_ctrl_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
struct ice_vsi *
ice_lb_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi)
{
- return ice_vsi_setup(pf, pi, ICE_VSI_LB, ICE_INVAL_VFID);
+ return ice_vsi_setup(pf, pi, ICE_VSI_LB, ICE_INVAL_VFID, NULL);
}
/**
@@ -3235,7 +3387,7 @@ ice_vlan_rx_add_vid(struct net_device *netdev, __always_unused __be16 proto,
/* Enable VLAN pruning when a VLAN other than 0 is added */
if (!ice_vsi_is_vlan_pruning_ena(vsi)) {
- ret = ice_cfg_vlan_pruning(vsi, true, false);
+ ret = ice_cfg_vlan_pruning(vsi, true);
if (ret)
return ret;
}
@@ -3279,13 +3431,70 @@ ice_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto,
/* Disable pruning when VLAN 0 is the only VLAN rule */
if (vsi->num_vlan == 1 && ice_vsi_is_vlan_pruning_ena(vsi))
- ret = ice_cfg_vlan_pruning(vsi, false, false);
+ ret = ice_cfg_vlan_pruning(vsi, false);
set_bit(ICE_VSI_VLAN_FLTR_CHANGED, vsi->state);
return ret;
}
/**
+ * ice_rep_indr_tc_block_unbind
+ * @cb_priv: indirection block private data
+ */
+static void ice_rep_indr_tc_block_unbind(void *cb_priv)
+{
+ struct ice_indr_block_priv *indr_priv = cb_priv;
+
+ list_del(&indr_priv->list);
+ kfree(indr_priv);
+}
+
+/**
+ * ice_tc_indir_block_unregister - Unregister TC indirect block notifications
+ * @vsi: VSI struct which has the netdev
+ */
+static void ice_tc_indir_block_unregister(struct ice_vsi *vsi)
+{
+ struct ice_netdev_priv *np = netdev_priv(vsi->netdev);
+
+ flow_indr_dev_unregister(ice_indr_setup_tc_cb, np,
+ ice_rep_indr_tc_block_unbind);
+}
+
+/**
+ * ice_tc_indir_block_remove - clean indirect TC block notifications
+ * @pf: PF structure
+ */
+static void ice_tc_indir_block_remove(struct ice_pf *pf)
+{
+ struct ice_vsi *pf_vsi = ice_get_main_vsi(pf);
+
+ if (!pf_vsi)
+ return;
+
+ ice_tc_indir_block_unregister(pf_vsi);
+}
+
+/**
+ * ice_tc_indir_block_register - Register TC indirect block notifications
+ * @vsi: VSI struct which has the netdev
+ *
+ * Returns 0 on success, negative value on failure
+ */
+static int ice_tc_indir_block_register(struct ice_vsi *vsi)
+{
+ struct ice_netdev_priv *np;
+
+ if (!vsi || !vsi->netdev)
+ return -EINVAL;
+
+ np = netdev_priv(vsi->netdev);
+
+ INIT_LIST_HEAD(&np->tc_indr_block_priv_list);
+ return flow_indr_dev_register(ice_indr_setup_tc_cb, np);
+}
+
+/**
* ice_setup_pf_sw - Setup the HW switch on startup or after reset
* @pf: board private structure
*
@@ -3293,6 +3502,7 @@ ice_vlan_rx_kill_vid(struct net_device *netdev, __always_unused __be16 proto,
*/
static int ice_setup_pf_sw(struct ice_pf *pf)
{
+ struct device *dev = ice_pf_to_dev(pf);
struct ice_vsi *vsi;
int status = 0;
@@ -3303,6 +3513,9 @@ static int ice_setup_pf_sw(struct ice_pf *pf)
if (!vsi)
return -ENOMEM;
+ /* init channel list */
+ INIT_LIST_HEAD(&vsi->ch_list);
+
status = ice_cfg_netdev(vsi);
if (status) {
status = -ENODEV;
@@ -3311,6 +3524,13 @@ static int ice_setup_pf_sw(struct ice_pf *pf)
/* netdev has to be configured before setting frame size */
ice_vsi_cfg_frame_size(vsi);
+ /* init indirect block notifications */
+ status = ice_tc_indir_block_register(vsi);
+ if (status) {
+ dev_err(dev, "Failed to register netdev notifier\n");
+ goto unroll_cfg_netdev;
+ }
+
/* Setup DCB netlink interface */
ice_dcbnl_setup(vsi);
@@ -3322,7 +3542,7 @@ static int ice_setup_pf_sw(struct ice_pf *pf)
status = ice_set_cpu_rx_rmap(vsi);
if (status) {
- dev_err(ice_pf_to_dev(pf), "Failed to set CPU Rx map VSI %d error %d\n",
+ dev_err(dev, "Failed to set CPU Rx map VSI %d error %d\n",
vsi->vsi_num, status);
status = -EINVAL;
goto unroll_napi_add;
@@ -3335,8 +3555,9 @@ static int ice_setup_pf_sw(struct ice_pf *pf)
free_cpu_rx_map:
ice_free_cpu_rx_rmap(vsi);
-
unroll_napi_add:
+ ice_tc_indir_block_unregister(vsi);
+unroll_cfg_netdev:
if (vsi) {
ice_napi_del(vsi);
if (vsi->netdev) {
@@ -3538,6 +3759,13 @@ static int ice_ena_msix_range(struct ice_pf *pf)
v_left -= needed;
}
+ /* reserve for switchdev */
+ needed = ICE_ESWITCH_MSIX;
+ if (v_left < needed)
+ goto no_hw_vecs_left_err;
+ v_budget += needed;
+ v_left -= needed;
+
/* total used for non-traffic vectors */
v_other = v_budget;
@@ -4170,11 +4398,11 @@ static int ice_register_netdev(struct ice_pf *pf)
set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state);
netif_carrier_off(vsi->netdev);
netif_tx_stop_all_queues(vsi->netdev);
- err = ice_devlink_create_port(vsi);
+ err = ice_devlink_create_pf_port(pf);
if (err)
goto err_devlink_create;
- devlink_port_type_eth_set(&vsi->devlink_port, vsi->netdev);
+ devlink_port_type_eth_set(&pf->devlink_port, vsi->netdev);
return 0;
err_devlink_create:
@@ -4224,6 +4452,9 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
if (!pf)
return -ENOMEM;
+ /* initialize Auxiliary index to invalid value */
+ pf->aux_idx = -1;
+
/* set up for high or low DMA */
err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
if (err)
@@ -4258,12 +4489,6 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M);
- err = ice_devlink_register(pf);
- if (err) {
- dev_err(dev, "ice_devlink_register failed: %d\n", err);
- goto err_exit_unroll;
- }
-
#ifndef CONFIG_DYNAMIC_DEBUG
if (debug < -1)
hw->debug_mask = debug;
@@ -4276,6 +4501,8 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
goto err_exit_unroll;
}
+ ice_init_feature_support(pf);
+
ice_request_fw(pf);
/* if ice_request_fw fails, ICE_FLAG_ADV_FEATURES bit won't be
@@ -4411,7 +4638,8 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
ice_init_link_dflt_override(pf->hw.port_info);
- ice_check_module_power(pf, pf->hw.port_info->phy.link_info.link_cfg_err);
+ ice_check_link_cfg_err(pf,
+ pf->hw.port_info->phy.link_info.link_cfg_err);
/* if media available, initialize PHY settings */
if (pf->hw.port_info->phy.link_info.link_info &
@@ -4497,6 +4725,7 @@ probe_done:
dev_warn(dev, "RDMA is not supported on this device\n");
}
+ ice_devlink_register(pf);
return 0;
err_init_aux_unroll:
@@ -4520,7 +4749,6 @@ err_init_pf_unroll:
ice_devlink_destroy_regions(pf);
ice_deinit_hw(hw);
err_exit_unroll:
- ice_devlink_unregister(pf);
pci_disable_pcie_error_reporting(pdev);
pci_disable_device(pdev);
return err;
@@ -4597,15 +4825,15 @@ static void ice_remove(struct pci_dev *pdev)
struct ice_pf *pf = pci_get_drvdata(pdev);
int i;
- if (!pf)
- return;
-
+ ice_devlink_unregister(pf);
for (i = 0; i < ICE_MAX_RESET_WAIT; i++) {
if (!ice_is_reset_in_progress(pf->state))
break;
msleep(100);
}
+ ice_tc_indir_block_remove(pf);
+
if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
set_bit(ICE_VF_RESETS_DISABLED, pf->state);
ice_free_vfs(pf);
@@ -4615,7 +4843,8 @@ static void ice_remove(struct pci_dev *pdev)
ice_aq_cancel_waiting_tasks(pf);
ice_unplug_aux_dev(pf);
- ida_free(&ice_aux_ida, pf->aux_idx);
+ if (pf->aux_idx >= 0)
+ ida_free(&ice_aux_ida, pf->aux_idx);
set_bit(ICE_DOWN, pf->state);
mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
@@ -4636,7 +4865,6 @@ static void ice_remove(struct pci_dev *pdev)
ice_deinit_pf(pf);
ice_devlink_destroy_regions(pf);
ice_deinit_hw(&pf->hw);
- ice_devlink_unregister(pf);
/* Issue a PFR as part of the prescribed driver unload flow. Do not
* do it via ice_schedule_reset() since there is no need to rebuild
@@ -4898,7 +5126,7 @@ ice_pci_err_detected(struct pci_dev *pdev, pci_channel_state_t err)
if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
set_bit(ICE_PFR_REQ, pf->state);
- ice_prepare_for_reset(pf);
+ ice_prepare_for_reset(pf, ICE_RESET_PFR);
}
}
@@ -4990,7 +5218,7 @@ static void ice_pci_err_reset_prepare(struct pci_dev *pdev)
if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) {
set_bit(ICE_PFR_REQ, pf->state);
- ice_prepare_for_reset(pf);
+ ice_prepare_for_reset(pf, ICE_RESET_PFR);
}
}
}
@@ -5016,6 +5244,8 @@ static const struct pci_device_id ice_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_BACKPLANE), 0 },
+ { PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_QSFP), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810_XXV_SFP), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_BACKPLANE), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E823C_QSFP), 0 },
@@ -5144,10 +5374,16 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi)
return -EBUSY;
}
+ if (ice_chnl_dmac_fltr_cnt(pf)) {
+ netdev_err(netdev, "can't set mac %pM. Device has tc-flower filters, delete all of them and try again\n",
+ mac);
+ return -EAGAIN;
+ }
+
netif_addr_lock_bh(netdev);
ether_addr_copy(old_mac, netdev->dev_addr);
/* change the netdev's MAC address */
- memcpy(netdev->dev_addr, mac, netdev->addr_len);
+ eth_hw_addr_set(netdev, mac);
netif_addr_unlock_bh(netdev);
/* Clean up old MAC filter. Not an error if old filter doesn't exist */
@@ -5175,7 +5411,7 @@ err_update_filters:
netdev_err(netdev, "can't set MAC %pM. filter update failed\n",
mac);
netif_addr_lock_bh(netdev);
- ether_addr_copy(netdev->dev_addr, old_mac);
+ eth_hw_addr_set(netdev, old_mac);
netif_addr_unlock_bh(netdev);
return err;
}
@@ -5380,10 +5616,10 @@ ice_set_features(struct net_device *netdev, netdev_features_t features)
if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
!(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
- ret = ice_cfg_vlan_pruning(vsi, true, false);
+ ret = ice_cfg_vlan_pruning(vsi, true);
else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
- ret = ice_cfg_vlan_pruning(vsi, false, false);
+ ret = ice_cfg_vlan_pruning(vsi, false);
if ((features & NETIF_F_NTUPLE) &&
!(netdev->features & NETIF_F_NTUPLE)) {
@@ -5395,6 +5631,18 @@ ice_set_features(struct net_device *netdev, netdev_features_t features)
ice_clear_arfs(vsi);
}
+ /* don't turn off hw_tc_offload when ADQ is already enabled */
+ if (!(features & NETIF_F_HW_TC) && ice_is_adq_active(pf)) {
+ dev_err(ice_pf_to_dev(pf), "ADQ is active, can't turn hw_tc_offload off\n");
+ return -EACCES;
+ }
+
+ if ((features & NETIF_F_HW_TC) &&
+ !(netdev->features & NETIF_F_HW_TC))
+ set_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
+ else
+ clear_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
+
return ret;
}
@@ -5444,77 +5692,59 @@ int ice_vsi_cfg(struct ice_vsi *vsi)
}
/* THEORY OF MODERATION:
- * The below code creates custom DIM profiles for use by this driver, because
- * the ice driver hardware works differently than the hardware that DIMLIB was
+ * The ice driver hardware works differently than the hardware that DIMLIB was
* originally made for. ice hardware doesn't have packet count limits that
* can trigger an interrupt, but it *does* have interrupt rate limit support,
- * and this code adds that capability to be used by the driver when it's using
- * DIMLIB. The DIMLIB code was always designed to be a suggestion to the driver
- * for how to "respond" to traffic and interrupts, so this driver uses a
- * slightly different set of moderation parameters to get best performance.
+ * which is hard-coded to a limit of 250,000 ints/second.
+ * If not using dynamic moderation, the INTRL value can be modified
+ * by ethtool rx-usecs-high.
*/
struct ice_dim {
/* the throttle rate for interrupts, basically worst case delay before
* an initial interrupt fires, value is stored in microseconds.
*/
u16 itr;
- /* the rate limit for interrupts, which can cap a delay from a small
- * ITR at a certain amount of interrupts per second. f.e. a 2us ITR
- * could yield as much as 500,000 interrupts per second, but with a
- * 10us rate limit, it limits to 100,000 interrupts per second. Value
- * is stored in microseconds.
- */
- u16 intrl;
};
/* Make a different profile for Rx that doesn't allow quite so aggressive
- * moderation at the high end (it maxes out at 128us or about 8k interrupts a
- * second. The INTRL/rate parameters here are only useful to cap small ITR
- * values, which is why for larger ITR's - like 128, which can only generate
- * 8k interrupts per second, there is no point to rate limit and the values
- * are set to zero. The rate limit values do affect latency, and so must
- * be reasonably small so to not impact latency sensitive tests.
+ * moderation at the high end (it maxes out at 126us or about 8k interrupts a
+ * second.
*/
static const struct ice_dim rx_profile[] = {
- {2, 10},
- {8, 16},
- {32, 0},
- {96, 0},
- {128, 0}
+ {2}, /* 500,000 ints/s, capped at 250K by INTRL */
+ {8}, /* 125,000 ints/s */
+ {16}, /* 62,500 ints/s */
+ {62}, /* 16,129 ints/s */
+ {126} /* 7,936 ints/s */
};
/* The transmit profile, which has the same sorts of values
* as the previous struct
*/
static const struct ice_dim tx_profile[] = {
- {2, 10},
- {8, 16},
- {64, 0},
- {128, 0},
- {256, 0}
+ {2}, /* 500,000 ints/s, capped at 250K by INTRL */
+ {8}, /* 125,000 ints/s */
+ {40}, /* 16,125 ints/s */
+ {128}, /* 7,812 ints/s */
+ {256} /* 3,906 ints/s */
};
static void ice_tx_dim_work(struct work_struct *work)
{
struct ice_ring_container *rc;
- struct ice_q_vector *q_vector;
struct dim *dim;
- u16 itr, intrl;
+ u16 itr;
dim = container_of(work, struct dim, work);
- rc = container_of(dim, struct ice_ring_container, dim);
- q_vector = container_of(rc, struct ice_q_vector, tx);
+ rc = (struct ice_ring_container *)dim->priv;
- if (dim->profile_ix >= ARRAY_SIZE(tx_profile))
- dim->profile_ix = ARRAY_SIZE(tx_profile) - 1;
+ WARN_ON(dim->profile_ix >= ARRAY_SIZE(tx_profile));
/* look up the values in our local table */
itr = tx_profile[dim->profile_ix].itr;
- intrl = tx_profile[dim->profile_ix].intrl;
- ice_trace(tx_dim_work, q_vector, dim);
+ ice_trace(tx_dim_work, container_of(rc, struct ice_q_vector, tx), dim);
ice_write_itr(rc, itr);
- ice_write_intrl(q_vector, intrl);
dim->state = DIM_START_MEASURE;
}
@@ -5522,28 +5752,65 @@ static void ice_tx_dim_work(struct work_struct *work)
static void ice_rx_dim_work(struct work_struct *work)
{
struct ice_ring_container *rc;
- struct ice_q_vector *q_vector;
struct dim *dim;
- u16 itr, intrl;
+ u16 itr;
dim = container_of(work, struct dim, work);
- rc = container_of(dim, struct ice_ring_container, dim);
- q_vector = container_of(rc, struct ice_q_vector, rx);
+ rc = (struct ice_ring_container *)dim->priv;
- if (dim->profile_ix >= ARRAY_SIZE(rx_profile))
- dim->profile_ix = ARRAY_SIZE(rx_profile) - 1;
+ WARN_ON(dim->profile_ix >= ARRAY_SIZE(rx_profile));
/* look up the values in our local table */
itr = rx_profile[dim->profile_ix].itr;
- intrl = rx_profile[dim->profile_ix].intrl;
- ice_trace(rx_dim_work, q_vector, dim);
+ ice_trace(rx_dim_work, container_of(rc, struct ice_q_vector, rx), dim);
ice_write_itr(rc, itr);
- ice_write_intrl(q_vector, intrl);
dim->state = DIM_START_MEASURE;
}
+#define ICE_DIM_DEFAULT_PROFILE_IX 1
+
+/**
+ * ice_init_moderation - set up interrupt moderation
+ * @q_vector: the vector containing rings to be configured
+ *
+ * Set up interrupt moderation registers, with the intent to do the right thing
+ * when called from reset or from probe, and whether or not dynamic moderation
+ * is enabled or not. Take special care to write all the registers in both
+ * dynamic moderation mode or not in order to make sure hardware is in a known
+ * state.
+ */
+static void ice_init_moderation(struct ice_q_vector *q_vector)
+{
+ struct ice_ring_container *rc;
+ bool tx_dynamic, rx_dynamic;
+
+ rc = &q_vector->tx;
+ INIT_WORK(&rc->dim.work, ice_tx_dim_work);
+ rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+ rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX;
+ rc->dim.priv = rc;
+ tx_dynamic = ITR_IS_DYNAMIC(rc);
+
+ /* set the initial TX ITR to match the above */
+ ice_write_itr(rc, tx_dynamic ?
+ tx_profile[rc->dim.profile_ix].itr : rc->itr_setting);
+
+ rc = &q_vector->rx;
+ INIT_WORK(&rc->dim.work, ice_rx_dim_work);
+ rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+ rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX;
+ rc->dim.priv = rc;
+ rx_dynamic = ITR_IS_DYNAMIC(rc);
+
+ /* set the initial RX ITR to match the above */
+ ice_write_itr(rc, rx_dynamic ? rx_profile[rc->dim.profile_ix].itr :
+ rc->itr_setting);
+
+ ice_set_q_vector_intrl(q_vector);
+}
+
/**
* ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI
* @vsi: the VSI being configured
@@ -5558,13 +5825,9 @@ static void ice_napi_enable_all(struct ice_vsi *vsi)
ice_for_each_q_vector(vsi, q_idx) {
struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
- INIT_WORK(&q_vector->tx.dim.work, ice_tx_dim_work);
- q_vector->tx.dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+ ice_init_moderation(q_vector);
- INIT_WORK(&q_vector->rx.dim.work, ice_rx_dim_work);
- q_vector->rx.dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
-
- if (q_vector->rx.ring || q_vector->tx.ring)
+ if (q_vector->rx.rx_ring || q_vector->tx.tx_ring)
napi_enable(&q_vector->napi);
}
}
@@ -5624,7 +5887,8 @@ int ice_up(struct ice_vsi *vsi)
/**
* ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring
- * @ring: Tx or Rx ring to read stats from
+ * @syncp: pointer to u64_stats_sync
+ * @stats: stats that pkts and bytes count will be taken from
* @pkts: packets stats counter
* @bytes: bytes stats counter
*
@@ -5632,19 +5896,16 @@ int ice_up(struct ice_vsi *vsi)
* that needs to be performed to read u64 values in 32 bit machine.
*/
static void
-ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts, u64 *bytes)
+ice_fetch_u64_stats_per_ring(struct u64_stats_sync *syncp, struct ice_q_stats stats,
+ u64 *pkts, u64 *bytes)
{
unsigned int start;
- *pkts = 0;
- *bytes = 0;
- if (!ring)
- return;
do {
- start = u64_stats_fetch_begin_irq(&ring->syncp);
- *pkts = ring->stats.pkts;
- *bytes = ring->stats.bytes;
- } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
+ start = u64_stats_fetch_begin_irq(syncp);
+ *pkts = stats.pkts;
+ *bytes = stats.bytes;
+ } while (u64_stats_fetch_retry_irq(syncp, start));
}
/**
@@ -5654,18 +5915,19 @@ ice_fetch_u64_stats_per_ring(struct ice_ring *ring, u64 *pkts, u64 *bytes)
* @count: number of rings
*/
static void
-ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, struct ice_ring **rings,
+ice_update_vsi_tx_ring_stats(struct ice_vsi *vsi, struct ice_tx_ring **rings,
u16 count)
{
struct rtnl_link_stats64 *vsi_stats = &vsi->net_stats;
u16 i;
for (i = 0; i < count; i++) {
- struct ice_ring *ring;
- u64 pkts, bytes;
+ struct ice_tx_ring *ring;
+ u64 pkts = 0, bytes = 0;
ring = READ_ONCE(rings[i]);
- ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes);
+ if (ring)
+ ice_fetch_u64_stats_per_ring(&ring->syncp, ring->stats, &pkts, &bytes);
vsi_stats->tx_packets += pkts;
vsi_stats->tx_bytes += bytes;
vsi->tx_restart += ring->tx_stats.restart_q;
@@ -5704,9 +5966,9 @@ static void ice_update_vsi_ring_stats(struct ice_vsi *vsi)
/* update Rx rings counters */
ice_for_each_rxq(vsi, i) {
- struct ice_ring *ring = READ_ONCE(vsi->rx_rings[i]);
+ struct ice_rx_ring *ring = READ_ONCE(vsi->rx_rings[i]);
- ice_fetch_u64_stats_per_ring(ring, &pkts, &bytes);
+ ice_fetch_u64_stats_per_ring(&ring->syncp, ring->stats, &pkts, &bytes);
vsi_stats->rx_packets += pkts;
vsi_stats->rx_bytes += bytes;
vsi->rx_buf_failed += ring->rx_stats.alloc_buf_failed;
@@ -5970,7 +6232,7 @@ static void ice_napi_disable_all(struct ice_vsi *vsi)
ice_for_each_q_vector(vsi, q_idx) {
struct ice_q_vector *q_vector = vsi->q_vectors[q_idx];
- if (q_vector->rx.ring || q_vector->tx.ring)
+ if (q_vector->rx.rx_ring || q_vector->tx.tx_ring)
napi_disable(&q_vector->napi);
cancel_work_sync(&q_vector->tx.dim.work);
@@ -5989,9 +6251,11 @@ int ice_down(struct ice_vsi *vsi)
/* Caller of this function is expected to set the
* vsi->state ICE_DOWN bit
*/
- if (vsi->netdev) {
+ if (vsi->netdev && vsi->type == ICE_VSI_PF) {
netif_carrier_off(vsi->netdev);
netif_tx_disable(vsi->netdev);
+ } else if (vsi->type == ICE_VSI_SWITCHDEV_CTRL) {
+ ice_eswitch_stop_all_tx_queues(vsi->back);
}
ice_vsi_dis_irq(vsi);
@@ -6053,12 +6317,13 @@ int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
}
ice_for_each_txq(vsi, i) {
- struct ice_ring *ring = vsi->tx_rings[i];
+ struct ice_tx_ring *ring = vsi->tx_rings[i];
if (!ring)
return -EINVAL;
- ring->netdev = vsi->netdev;
+ if (vsi->netdev)
+ ring->netdev = vsi->netdev;
err = ice_setup_tx_ring(ring);
if (err)
break;
@@ -6084,12 +6349,13 @@ int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
}
ice_for_each_rxq(vsi, i) {
- struct ice_ring *ring = vsi->rx_rings[i];
+ struct ice_rx_ring *ring = vsi->rx_rings[i];
if (!ring)
return -EINVAL;
- ring->netdev = vsi->netdev;
+ if (vsi->netdev)
+ ring->netdev = vsi->netdev;
err = ice_setup_rx_ring(ring);
if (err)
break;
@@ -6162,7 +6428,7 @@ err_setup_tx:
*
* Returns 0 on success, negative value on error
*/
-static int ice_vsi_open(struct ice_vsi *vsi)
+int ice_vsi_open(struct ice_vsi *vsi)
{
char int_name[ICE_INT_NAME_STR_LEN];
struct ice_pf *pf = vsi->back;
@@ -6187,14 +6453,16 @@ static int ice_vsi_open(struct ice_vsi *vsi)
if (err)
goto err_setup_rx;
- /* Notify the stack of the actual queue counts. */
- err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);
- if (err)
- goto err_set_qs;
+ if (vsi->type == ICE_VSI_PF) {
+ /* Notify the stack of the actual queue counts. */
+ err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq);
+ if (err)
+ goto err_set_qs;
- err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq);
- if (err)
- goto err_set_qs;
+ err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq);
+ if (err)
+ goto err_set_qs;
+ }
err = ice_up_complete(vsi);
if (err)
@@ -6229,6 +6497,9 @@ static void ice_vsi_release_all(struct ice_pf *pf)
if (!pf->vsi[i])
continue;
+ if (pf->vsi[i]->type == ICE_VSI_CHNL)
+ continue;
+
err = ice_vsi_release(pf->vsi[i]);
if (err)
dev_dbg(ice_pf_to_dev(pf), "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n",
@@ -6433,6 +6704,21 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
goto err_vsi_rebuild;
}
+ err = ice_vsi_rebuild_by_type(pf, ICE_VSI_SWITCHDEV_CTRL);
+ if (err) {
+ dev_err(dev, "Switchdev CTRL VSI rebuild failed: %d\n", err);
+ goto err_vsi_rebuild;
+ }
+
+ if (reset_type == ICE_RESET_PFR) {
+ err = ice_rebuild_channels(pf);
+ if (err) {
+ dev_err(dev, "failed to rebuild and replay ADQ VSIs, err %d\n",
+ err);
+ goto err_vsi_rebuild;
+ }
+ }
+
/* If Flow Director is active */
if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) {
err = ice_vsi_rebuild_by_type(pf, ICE_VSI_CTRL);
@@ -6979,7 +7265,7 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
- struct ice_ring *tx_ring = NULL;
+ struct ice_tx_ring *tx_ring = NULL;
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
u32 i;
@@ -6997,7 +7283,7 @@ static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
}
/* now that we have an index, find the tx_ring struct */
- for (i = 0; i < vsi->num_txq; i++)
+ ice_for_each_txq(vsi, i)
if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
if (txqueue == vsi->tx_rings[i]->q_index) {
tx_ring = vsi->tx_rings[i];
@@ -7054,6 +7340,1050 @@ static void ice_tx_timeout(struct net_device *netdev, unsigned int txqueue)
}
/**
+ * ice_setup_tc_cls_flower - flower classifier offloads
+ * @np: net device to configure
+ * @filter_dev: device on which filter is added
+ * @cls_flower: offload data
+ */
+static int
+ice_setup_tc_cls_flower(struct ice_netdev_priv *np,
+ struct net_device *filter_dev,
+ struct flow_cls_offload *cls_flower)
+{
+ struct ice_vsi *vsi = np->vsi;
+
+ if (cls_flower->common.chain_index)
+ return -EOPNOTSUPP;
+
+ switch (cls_flower->command) {
+ case FLOW_CLS_REPLACE:
+ return ice_add_cls_flower(filter_dev, vsi, cls_flower);
+ case FLOW_CLS_DESTROY:
+ return ice_del_cls_flower(vsi, cls_flower);
+ default:
+ return -EINVAL;
+ }
+}
+
+/**
+ * ice_setup_tc_block_cb - callback handler registered for TC block
+ * @type: TC SETUP type
+ * @type_data: TC flower offload data that contains user input
+ * @cb_priv: netdev private data
+ */
+static int
+ice_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
+{
+ struct ice_netdev_priv *np = cb_priv;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return ice_setup_tc_cls_flower(np, np->vsi->netdev,
+ type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/**
+ * ice_validate_mqprio_qopt - Validate TCF input parameters
+ * @vsi: Pointer to VSI
+ * @mqprio_qopt: input parameters for mqprio queue configuration
+ *
+ * This function validates MQPRIO params, such as qcount (power of 2 wherever
+ * needed), and make sure user doesn't specify qcount and BW rate limit
+ * for TCs, which are more than "num_tc"
+ */
+static int
+ice_validate_mqprio_qopt(struct ice_vsi *vsi,
+ struct tc_mqprio_qopt_offload *mqprio_qopt)
+{
+ u64 sum_max_rate = 0, sum_min_rate = 0;
+ int non_power_of_2_qcount = 0;
+ struct ice_pf *pf = vsi->back;
+ int max_rss_q_cnt = 0;
+ struct device *dev;
+ int i, speed;
+ u8 num_tc;
+
+ if (vsi->type != ICE_VSI_PF)
+ return -EINVAL;
+
+ if (mqprio_qopt->qopt.offset[0] != 0 ||
+ mqprio_qopt->qopt.num_tc < 1 ||
+ mqprio_qopt->qopt.num_tc > ICE_CHNL_MAX_TC)
+ return -EINVAL;
+
+ dev = ice_pf_to_dev(pf);
+ vsi->ch_rss_size = 0;
+ num_tc = mqprio_qopt->qopt.num_tc;
+
+ for (i = 0; num_tc; i++) {
+ int qcount = mqprio_qopt->qopt.count[i];
+ u64 max_rate, min_rate, rem;
+
+ if (!qcount)
+ return -EINVAL;
+
+ if (is_power_of_2(qcount)) {
+ if (non_power_of_2_qcount &&
+ qcount > non_power_of_2_qcount) {
+ dev_err(dev, "qcount[%d] cannot be greater than non power of 2 qcount[%d]\n",
+ qcount, non_power_of_2_qcount);
+ return -EINVAL;
+ }
+ if (qcount > max_rss_q_cnt)
+ max_rss_q_cnt = qcount;
+ } else {
+ if (non_power_of_2_qcount &&
+ qcount != non_power_of_2_qcount) {
+ dev_err(dev, "Only one non power of 2 qcount allowed[%d,%d]\n",
+ qcount, non_power_of_2_qcount);
+ return -EINVAL;
+ }
+ if (qcount < max_rss_q_cnt) {
+ dev_err(dev, "non power of 2 qcount[%d] cannot be less than other qcount[%d]\n",
+ qcount, max_rss_q_cnt);
+ return -EINVAL;
+ }
+ max_rss_q_cnt = qcount;
+ non_power_of_2_qcount = qcount;
+ }
+
+ /* TC command takes input in K/N/Gbps or K/M/Gbit etc but
+ * converts the bandwidth rate limit into Bytes/s when
+ * passing it down to the driver. So convert input bandwidth
+ * from Bytes/s to Kbps
+ */
+ max_rate = mqprio_qopt->max_rate[i];
+ max_rate = div_u64(max_rate, ICE_BW_KBPS_DIVISOR);
+ sum_max_rate += max_rate;
+
+ /* min_rate is minimum guaranteed rate and it can't be zero */
+ min_rate = mqprio_qopt->min_rate[i];
+ min_rate = div_u64(min_rate, ICE_BW_KBPS_DIVISOR);
+ sum_min_rate += min_rate;
+
+ if (min_rate && min_rate < ICE_MIN_BW_LIMIT) {
+ dev_err(dev, "TC%d: min_rate(%llu Kbps) < %u Kbps\n", i,
+ min_rate, ICE_MIN_BW_LIMIT);
+ return -EINVAL;
+ }
+
+ iter_div_u64_rem(min_rate, ICE_MIN_BW_LIMIT, &rem);
+ if (rem) {
+ dev_err(dev, "TC%d: Min Rate not multiple of %u Kbps",
+ i, ICE_MIN_BW_LIMIT);
+ return -EINVAL;
+ }
+
+ iter_div_u64_rem(max_rate, ICE_MIN_BW_LIMIT, &rem);
+ if (rem) {
+ dev_err(dev, "TC%d: Max Rate not multiple of %u Kbps",
+ i, ICE_MIN_BW_LIMIT);
+ return -EINVAL;
+ }
+
+ /* min_rate can't be more than max_rate, except when max_rate
+ * is zero (implies max_rate sought is max line rate). In such
+ * a case min_rate can be more than max.
+ */
+ if (max_rate && min_rate > max_rate) {
+ dev_err(dev, "min_rate %llu Kbps can't be more than max_rate %llu Kbps\n",
+ min_rate, max_rate);
+ return -EINVAL;
+ }
+
+ if (i >= mqprio_qopt->qopt.num_tc - 1)
+ break;
+ if (mqprio_qopt->qopt.offset[i + 1] !=
+ (mqprio_qopt->qopt.offset[i] + qcount))
+ return -EINVAL;
+ }
+ if (vsi->num_rxq <
+ (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
+ return -EINVAL;
+ if (vsi->num_txq <
+ (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i]))
+ return -EINVAL;
+
+ speed = ice_get_link_speed_kbps(vsi);
+ if (sum_max_rate && sum_max_rate > (u64)speed) {
+ dev_err(dev, "Invalid max Tx rate(%llu) Kbps > speed(%u) Kbps specified\n",
+ sum_max_rate, speed);
+ return -EINVAL;
+ }
+ if (sum_min_rate && sum_min_rate > (u64)speed) {
+ dev_err(dev, "Invalid min Tx rate(%llu) Kbps > speed (%u) Kbps specified\n",
+ sum_min_rate, speed);
+ return -EINVAL;
+ }
+
+ /* make sure vsi->ch_rss_size is set correctly based on TC's qcount */
+ vsi->ch_rss_size = max_rss_q_cnt;
+
+ return 0;
+}
+
+/**
+ * ice_add_channel - add a channel by adding VSI
+ * @pf: ptr to PF device
+ * @sw_id: underlying HW switching element ID
+ * @ch: ptr to channel structure
+ *
+ * Add a channel (VSI) using add_vsi and queue_map
+ */
+static int ice_add_channel(struct ice_pf *pf, u16 sw_id, struct ice_channel *ch)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_vsi *vsi;
+
+ if (ch->type != ICE_VSI_CHNL) {
+ dev_err(dev, "add new VSI failed, ch->type %d\n", ch->type);
+ return -EINVAL;
+ }
+
+ vsi = ice_chnl_vsi_setup(pf, pf->hw.port_info, ch);
+ if (!vsi || vsi->type != ICE_VSI_CHNL) {
+ dev_err(dev, "create chnl VSI failure\n");
+ return -EINVAL;
+ }
+
+ ch->sw_id = sw_id;
+ ch->vsi_num = vsi->vsi_num;
+ ch->info.mapping_flags = vsi->info.mapping_flags;
+ ch->ch_vsi = vsi;
+ /* set the back pointer of channel for newly created VSI */
+ vsi->ch = ch;
+
+ memcpy(&ch->info.q_mapping, &vsi->info.q_mapping,
+ sizeof(vsi->info.q_mapping));
+ memcpy(&ch->info.tc_mapping, vsi->info.tc_mapping,
+ sizeof(vsi->info.tc_mapping));
+
+ return 0;
+}
+
+/**
+ * ice_chnl_cfg_res
+ * @vsi: the VSI being setup
+ * @ch: ptr to channel structure
+ *
+ * Configure channel specific resources such as rings, vector.
+ */
+static void ice_chnl_cfg_res(struct ice_vsi *vsi, struct ice_channel *ch)
+{
+ int i;
+
+ for (i = 0; i < ch->num_txq; i++) {
+ struct ice_q_vector *tx_q_vector, *rx_q_vector;
+ struct ice_ring_container *rc;
+ struct ice_tx_ring *tx_ring;
+ struct ice_rx_ring *rx_ring;
+
+ tx_ring = vsi->tx_rings[ch->base_q + i];
+ rx_ring = vsi->rx_rings[ch->base_q + i];
+ if (!tx_ring || !rx_ring)
+ continue;
+
+ /* setup ring being channel enabled */
+ tx_ring->ch = ch;
+ rx_ring->ch = ch;
+
+ /* following code block sets up vector specific attributes */
+ tx_q_vector = tx_ring->q_vector;
+ rx_q_vector = rx_ring->q_vector;
+ if (!tx_q_vector && !rx_q_vector)
+ continue;
+
+ if (tx_q_vector) {
+ tx_q_vector->ch = ch;
+ /* setup Tx and Rx ITR setting if DIM is off */
+ rc = &tx_q_vector->tx;
+ if (!ITR_IS_DYNAMIC(rc))
+ ice_write_itr(rc, rc->itr_setting);
+ }
+ if (rx_q_vector) {
+ rx_q_vector->ch = ch;
+ /* setup Tx and Rx ITR setting if DIM is off */
+ rc = &rx_q_vector->rx;
+ if (!ITR_IS_DYNAMIC(rc))
+ ice_write_itr(rc, rc->itr_setting);
+ }
+ }
+
+ /* it is safe to assume that, if channel has non-zero num_t[r]xq, then
+ * GLINT_ITR register would have written to perform in-context
+ * update, hence perform flush
+ */
+ if (ch->num_txq || ch->num_rxq)
+ ice_flush(&vsi->back->hw);
+}
+
+/**
+ * ice_cfg_chnl_all_res - configure channel resources
+ * @vsi: pte to main_vsi
+ * @ch: ptr to channel structure
+ *
+ * This function configures channel specific resources such as flow-director
+ * counter index, and other resources such as queues, vectors, ITR settings
+ */
+static void
+ice_cfg_chnl_all_res(struct ice_vsi *vsi, struct ice_channel *ch)
+{
+ /* configure channel (aka ADQ) resources such as queues, vectors,
+ * ITR settings for channel specific vectors and anything else
+ */
+ ice_chnl_cfg_res(vsi, ch);
+}
+
+/**
+ * ice_setup_hw_channel - setup new channel
+ * @pf: ptr to PF device
+ * @vsi: the VSI being setup
+ * @ch: ptr to channel structure
+ * @sw_id: underlying HW switching element ID
+ * @type: type of channel to be created (VMDq2/VF)
+ *
+ * Setup new channel (VSI) based on specified type (VMDq2/VF)
+ * and configures Tx rings accordingly
+ */
+static int
+ice_setup_hw_channel(struct ice_pf *pf, struct ice_vsi *vsi,
+ struct ice_channel *ch, u16 sw_id, u8 type)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ int ret;
+
+ ch->base_q = vsi->next_base_q;
+ ch->type = type;
+
+ ret = ice_add_channel(pf, sw_id, ch);
+ if (ret) {
+ dev_err(dev, "failed to add_channel using sw_id %u\n", sw_id);
+ return ret;
+ }
+
+ /* configure/setup ADQ specific resources */
+ ice_cfg_chnl_all_res(vsi, ch);
+
+ /* make sure to update the next_base_q so that subsequent channel's
+ * (aka ADQ) VSI queue map is correct
+ */
+ vsi->next_base_q = vsi->next_base_q + ch->num_rxq;
+ dev_dbg(dev, "added channel: vsi_num %u, num_rxq %u\n", ch->vsi_num,
+ ch->num_rxq);
+
+ return 0;
+}
+
+/**
+ * ice_setup_channel - setup new channel using uplink element
+ * @pf: ptr to PF device
+ * @vsi: the VSI being setup
+ * @ch: ptr to channel structure
+ *
+ * Setup new channel (VSI) based on specified type (VMDq2/VF)
+ * and uplink switching element
+ */
+static bool
+ice_setup_channel(struct ice_pf *pf, struct ice_vsi *vsi,
+ struct ice_channel *ch)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ u16 sw_id;
+ int ret;
+
+ if (vsi->type != ICE_VSI_PF) {
+ dev_err(dev, "unsupported parent VSI type(%d)\n", vsi->type);
+ return false;
+ }
+
+ sw_id = pf->first_sw->sw_id;
+
+ /* create channel (VSI) */
+ ret = ice_setup_hw_channel(pf, vsi, ch, sw_id, ICE_VSI_CHNL);
+ if (ret) {
+ dev_err(dev, "failed to setup hw_channel\n");
+ return false;
+ }
+ dev_dbg(dev, "successfully created channel()\n");
+
+ return ch->ch_vsi ? true : false;
+}
+
+/**
+ * ice_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
+ * @vsi: VSI to be configured
+ * @max_tx_rate: max Tx rate in Kbps to be configured as maximum BW limit
+ * @min_tx_rate: min Tx rate in Kbps to be configured as minimum BW limit
+ */
+static int
+ice_set_bw_limit(struct ice_vsi *vsi, u64 max_tx_rate, u64 min_tx_rate)
+{
+ int err;
+
+ err = ice_set_min_bw_limit(vsi, min_tx_rate);
+ if (err)
+ return err;
+
+ return ice_set_max_bw_limit(vsi, max_tx_rate);
+}
+
+/**
+ * ice_create_q_channel - function to create channel
+ * @vsi: VSI to be configured
+ * @ch: ptr to channel (it contains channel specific params)
+ *
+ * This function creates channel (VSI) using num_queues specified by user,
+ * reconfigs RSS if needed.
+ */
+static int ice_create_q_channel(struct ice_vsi *vsi, struct ice_channel *ch)
+{
+ struct ice_pf *pf = vsi->back;
+ struct device *dev;
+
+ if (!ch)
+ return -EINVAL;
+
+ dev = ice_pf_to_dev(pf);
+ if (!ch->num_txq || !ch->num_rxq) {
+ dev_err(dev, "Invalid num_queues requested: %d\n", ch->num_rxq);
+ return -EINVAL;
+ }
+
+ if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_txq) {
+ dev_err(dev, "cnt_q_avail (%u) less than num_queues %d\n",
+ vsi->cnt_q_avail, ch->num_txq);
+ return -EINVAL;
+ }
+
+ if (!ice_setup_channel(pf, vsi, ch)) {
+ dev_info(dev, "Failed to setup channel\n");
+ return -EINVAL;
+ }
+ /* configure BW rate limit */
+ if (ch->ch_vsi && (ch->max_tx_rate || ch->min_tx_rate)) {
+ int ret;
+
+ ret = ice_set_bw_limit(ch->ch_vsi, ch->max_tx_rate,
+ ch->min_tx_rate);
+ if (ret)
+ dev_err(dev, "failed to set Tx rate of %llu Kbps for VSI(%u)\n",
+ ch->max_tx_rate, ch->ch_vsi->vsi_num);
+ else
+ dev_dbg(dev, "set Tx rate of %llu Kbps for VSI(%u)\n",
+ ch->max_tx_rate, ch->ch_vsi->vsi_num);
+ }
+
+ vsi->cnt_q_avail -= ch->num_txq;
+
+ return 0;
+}
+
+/**
+ * ice_rem_all_chnl_fltrs - removes all channel filters
+ * @pf: ptr to PF, TC-flower based filter are tracked at PF level
+ *
+ * Remove all advanced switch filters only if they are channel specific
+ * tc-flower based filter
+ */
+static void ice_rem_all_chnl_fltrs(struct ice_pf *pf)
+{
+ struct ice_tc_flower_fltr *fltr;
+ struct hlist_node *node;
+
+ /* to remove all channel filters, iterate an ordered list of filters */
+ hlist_for_each_entry_safe(fltr, node,
+ &pf->tc_flower_fltr_list,
+ tc_flower_node) {
+ struct ice_rule_query_data rule;
+ int status;
+
+ /* for now process only channel specific filters */
+ if (!ice_is_chnl_fltr(fltr))
+ continue;
+
+ rule.rid = fltr->rid;
+ rule.rule_id = fltr->rule_id;
+ rule.vsi_handle = fltr->dest_id;
+ status = ice_rem_adv_rule_by_id(&pf->hw, &rule);
+ if (status) {
+ if (status == -ENOENT)
+ dev_dbg(ice_pf_to_dev(pf), "TC flower filter (rule_id %u) does not exist\n",
+ rule.rule_id);
+ else
+ dev_err(ice_pf_to_dev(pf), "failed to delete TC flower filter, status %d\n",
+ status);
+ } else if (fltr->dest_vsi) {
+ /* update advanced switch filter count */
+ if (fltr->dest_vsi->type == ICE_VSI_CHNL) {
+ u32 flags = fltr->flags;
+
+ fltr->dest_vsi->num_chnl_fltr--;
+ if (flags & (ICE_TC_FLWR_FIELD_DST_MAC |
+ ICE_TC_FLWR_FIELD_ENC_DST_MAC))
+ pf->num_dmac_chnl_fltrs--;
+ }
+ }
+
+ hlist_del(&fltr->tc_flower_node);
+ kfree(fltr);
+ }
+}
+
+/**
+ * ice_remove_q_channels - Remove queue channels for the TCs
+ * @vsi: VSI to be configured
+ * @rem_fltr: delete advanced switch filter or not
+ *
+ * Remove queue channels for the TCs
+ */
+static void ice_remove_q_channels(struct ice_vsi *vsi, bool rem_fltr)
+{
+ struct ice_channel *ch, *ch_tmp;
+ struct ice_pf *pf = vsi->back;
+ int i;
+
+ /* remove all tc-flower based filter if they are channel filters only */
+ if (rem_fltr)
+ ice_rem_all_chnl_fltrs(pf);
+
+ /* perform cleanup for channels if they exist */
+ list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) {
+ struct ice_vsi *ch_vsi;
+
+ list_del(&ch->list);
+ ch_vsi = ch->ch_vsi;
+ if (!ch_vsi) {
+ kfree(ch);
+ continue;
+ }
+
+ /* Reset queue contexts */
+ for (i = 0; i < ch->num_rxq; i++) {
+ struct ice_tx_ring *tx_ring;
+ struct ice_rx_ring *rx_ring;
+
+ tx_ring = vsi->tx_rings[ch->base_q + i];
+ rx_ring = vsi->rx_rings[ch->base_q + i];
+ if (tx_ring) {
+ tx_ring->ch = NULL;
+ if (tx_ring->q_vector)
+ tx_ring->q_vector->ch = NULL;
+ }
+ if (rx_ring) {
+ rx_ring->ch = NULL;
+ if (rx_ring->q_vector)
+ rx_ring->q_vector->ch = NULL;
+ }
+ }
+
+ /* clear the VSI from scheduler tree */
+ ice_rm_vsi_lan_cfg(ch->ch_vsi->port_info, ch->ch_vsi->idx);
+
+ /* Delete VSI from FW */
+ ice_vsi_delete(ch->ch_vsi);
+
+ /* Delete VSI from PF and HW VSI arrays */
+ ice_vsi_clear(ch->ch_vsi);
+
+ /* free the channel */
+ kfree(ch);
+ }
+
+ /* clear the channel VSI map which is stored in main VSI */
+ ice_for_each_chnl_tc(i)
+ vsi->tc_map_vsi[i] = NULL;
+
+ /* reset main VSI's all TC information */
+ vsi->all_enatc = 0;
+ vsi->all_numtc = 0;
+}
+
+/**
+ * ice_rebuild_channels - rebuild channel
+ * @pf: ptr to PF
+ *
+ * Recreate channel VSIs and replay filters
+ */
+static int ice_rebuild_channels(struct ice_pf *pf)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ struct ice_vsi *main_vsi;
+ bool rem_adv_fltr = true;
+ struct ice_channel *ch;
+ struct ice_vsi *vsi;
+ int tc_idx = 1;
+ int i, err;
+
+ main_vsi = ice_get_main_vsi(pf);
+ if (!main_vsi)
+ return 0;
+
+ if (!test_bit(ICE_FLAG_TC_MQPRIO, pf->flags) ||
+ main_vsi->old_numtc == 1)
+ return 0; /* nothing to be done */
+
+ /* reconfigure main VSI based on old value of TC and cached values
+ * for MQPRIO opts
+ */
+ err = ice_vsi_cfg_tc(main_vsi, main_vsi->old_ena_tc);
+ if (err) {
+ dev_err(dev, "failed configuring TC(ena_tc:0x%02x) for HW VSI=%u\n",
+ main_vsi->old_ena_tc, main_vsi->vsi_num);
+ return err;
+ }
+
+ /* rebuild ADQ VSIs */
+ ice_for_each_vsi(pf, i) {
+ enum ice_vsi_type type;
+
+ vsi = pf->vsi[i];
+ if (!vsi || vsi->type != ICE_VSI_CHNL)
+ continue;
+
+ type = vsi->type;
+
+ /* rebuild ADQ VSI */
+ err = ice_vsi_rebuild(vsi, true);
+ if (err) {
+ dev_err(dev, "VSI (type:%s) at index %d rebuild failed, err %d\n",
+ ice_vsi_type_str(type), vsi->idx, err);
+ goto cleanup;
+ }
+
+ /* Re-map HW VSI number, using VSI handle that has been
+ * previously validated in ice_replay_vsi() call above
+ */
+ vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
+
+ /* replay filters for the VSI */
+ err = ice_replay_vsi(&pf->hw, vsi->idx);
+ if (err) {
+ dev_err(dev, "VSI (type:%s) replay failed, err %d, VSI index %d\n",
+ ice_vsi_type_str(type), err, vsi->idx);
+ rem_adv_fltr = false;
+ goto cleanup;
+ }
+ dev_info(dev, "VSI (type:%s) at index %d rebuilt successfully\n",
+ ice_vsi_type_str(type), vsi->idx);
+
+ /* store ADQ VSI at correct TC index in main VSI's
+ * map of TC to VSI
+ */
+ main_vsi->tc_map_vsi[tc_idx++] = vsi;
+ }
+
+ /* ADQ VSI(s) has been rebuilt successfully, so setup
+ * channel for main VSI's Tx and Rx rings
+ */
+ list_for_each_entry(ch, &main_vsi->ch_list, list) {
+ struct ice_vsi *ch_vsi;
+
+ ch_vsi = ch->ch_vsi;
+ if (!ch_vsi)
+ continue;
+
+ /* reconfig channel resources */
+ ice_cfg_chnl_all_res(main_vsi, ch);
+
+ /* replay BW rate limit if it is non-zero */
+ if (!ch->max_tx_rate && !ch->min_tx_rate)
+ continue;
+
+ err = ice_set_bw_limit(ch_vsi, ch->max_tx_rate,
+ ch->min_tx_rate);
+ if (err)
+ dev_err(dev, "failed (err:%d) to rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n",
+ err, ch->max_tx_rate, ch->min_tx_rate,
+ ch_vsi->vsi_num);
+ else
+ dev_dbg(dev, "successfully rebuild BW rate limit, max_tx_rate: %llu Kbps, min_tx_rate: %llu Kbps for VSI(%u)\n",
+ ch->max_tx_rate, ch->min_tx_rate,
+ ch_vsi->vsi_num);
+ }
+
+ /* reconfig RSS for main VSI */
+ if (main_vsi->ch_rss_size)
+ ice_vsi_cfg_rss_lut_key(main_vsi);
+
+ return 0;
+
+cleanup:
+ ice_remove_q_channels(main_vsi, rem_adv_fltr);
+ return err;
+}
+
+/**
+ * ice_create_q_channels - Add queue channel for the given TCs
+ * @vsi: VSI to be configured
+ *
+ * Configures queue channel mapping to the given TCs
+ */
+static int ice_create_q_channels(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ struct ice_channel *ch;
+ int ret = 0, i;
+
+ ice_for_each_chnl_tc(i) {
+ if (!(vsi->all_enatc & BIT(i)))
+ continue;
+
+ ch = kzalloc(sizeof(*ch), GFP_KERNEL);
+ if (!ch) {
+ ret = -ENOMEM;
+ goto err_free;
+ }
+ INIT_LIST_HEAD(&ch->list);
+ ch->num_rxq = vsi->mqprio_qopt.qopt.count[i];
+ ch->num_txq = vsi->mqprio_qopt.qopt.count[i];
+ ch->base_q = vsi->mqprio_qopt.qopt.offset[i];
+ ch->max_tx_rate = vsi->mqprio_qopt.max_rate[i];
+ ch->min_tx_rate = vsi->mqprio_qopt.min_rate[i];
+
+ /* convert to Kbits/s */
+ if (ch->max_tx_rate)
+ ch->max_tx_rate = div_u64(ch->max_tx_rate,
+ ICE_BW_KBPS_DIVISOR);
+ if (ch->min_tx_rate)
+ ch->min_tx_rate = div_u64(ch->min_tx_rate,
+ ICE_BW_KBPS_DIVISOR);
+
+ ret = ice_create_q_channel(vsi, ch);
+ if (ret) {
+ dev_err(ice_pf_to_dev(pf),
+ "failed creating channel TC:%d\n", i);
+ kfree(ch);
+ goto err_free;
+ }
+ list_add_tail(&ch->list, &vsi->ch_list);
+ vsi->tc_map_vsi[i] = ch->ch_vsi;
+ dev_dbg(ice_pf_to_dev(pf),
+ "successfully created channel: VSI %pK\n", ch->ch_vsi);
+ }
+ return 0;
+
+err_free:
+ ice_remove_q_channels(vsi, false);
+
+ return ret;
+}
+
+/**
+ * ice_setup_tc_mqprio_qdisc - configure multiple traffic classes
+ * @netdev: net device to configure
+ * @type_data: TC offload data
+ */
+static int ice_setup_tc_mqprio_qdisc(struct net_device *netdev, void *type_data)
+{
+ struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_vsi *vsi = np->vsi;
+ struct ice_pf *pf = vsi->back;
+ u16 mode, ena_tc_qdisc = 0;
+ int cur_txq, cur_rxq;
+ u8 hw = 0, num_tcf;
+ struct device *dev;
+ int ret, i;
+
+ dev = ice_pf_to_dev(pf);
+ num_tcf = mqprio_qopt->qopt.num_tc;
+ hw = mqprio_qopt->qopt.hw;
+ mode = mqprio_qopt->mode;
+ if (!hw) {
+ clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
+ vsi->ch_rss_size = 0;
+ memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
+ goto config_tcf;
+ }
+
+ /* Generate queue region map for number of TCF requested */
+ for (i = 0; i < num_tcf; i++)
+ ena_tc_qdisc |= BIT(i);
+
+ switch (mode) {
+ case TC_MQPRIO_MODE_CHANNEL:
+
+ ret = ice_validate_mqprio_qopt(vsi, mqprio_qopt);
+ if (ret) {
+ netdev_err(netdev, "failed to validate_mqprio_qopt(), ret %d\n",
+ ret);
+ return ret;
+ }
+ memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt));
+ set_bit(ICE_FLAG_TC_MQPRIO, pf->flags);
+ /* don't assume state of hw_tc_offload during driver load
+ * and set the flag for TC flower filter if hw_tc_offload
+ * already ON
+ */
+ if (vsi->netdev->features & NETIF_F_HW_TC)
+ set_bit(ICE_FLAG_CLS_FLOWER, pf->flags);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+config_tcf:
+
+ /* Requesting same TCF configuration as already enabled */
+ if (ena_tc_qdisc == vsi->tc_cfg.ena_tc &&
+ mode != TC_MQPRIO_MODE_CHANNEL)
+ return 0;
+
+ /* Pause VSI queues */
+ ice_dis_vsi(vsi, true);
+
+ if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags))
+ ice_remove_q_channels(vsi, true);
+
+ if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
+ vsi->req_txq = min_t(int, ice_get_avail_txq_count(pf),
+ num_online_cpus());
+ vsi->req_rxq = min_t(int, ice_get_avail_rxq_count(pf),
+ num_online_cpus());
+ } else {
+ /* logic to rebuild VSI, same like ethtool -L */
+ u16 offset = 0, qcount_tx = 0, qcount_rx = 0;
+
+ for (i = 0; i < num_tcf; i++) {
+ if (!(ena_tc_qdisc & BIT(i)))
+ continue;
+
+ offset = vsi->mqprio_qopt.qopt.offset[i];
+ qcount_rx = vsi->mqprio_qopt.qopt.count[i];
+ qcount_tx = vsi->mqprio_qopt.qopt.count[i];
+ }
+ vsi->req_txq = offset + qcount_tx;
+ vsi->req_rxq = offset + qcount_rx;
+
+ /* store away original rss_size info, so that it gets reused
+ * form ice_vsi_rebuild during tc-qdisc delete stage - to
+ * determine, what should be the rss_sizefor main VSI
+ */
+ vsi->orig_rss_size = vsi->rss_size;
+ }
+
+ /* save current values of Tx and Rx queues before calling VSI rebuild
+ * for fallback option
+ */
+ cur_txq = vsi->num_txq;
+ cur_rxq = vsi->num_rxq;
+
+ /* proceed with rebuild main VSI using correct number of queues */
+ ret = ice_vsi_rebuild(vsi, false);
+ if (ret) {
+ /* fallback to current number of queues */
+ dev_info(dev, "Rebuild failed with new queues, try with current number of queues\n");
+ vsi->req_txq = cur_txq;
+ vsi->req_rxq = cur_rxq;
+ clear_bit(ICE_RESET_FAILED, pf->state);
+ if (ice_vsi_rebuild(vsi, false)) {
+ dev_err(dev, "Rebuild of main VSI failed again\n");
+ return ret;
+ }
+ }
+
+ vsi->all_numtc = num_tcf;
+ vsi->all_enatc = ena_tc_qdisc;
+ ret = ice_vsi_cfg_tc(vsi, ena_tc_qdisc);
+ if (ret) {
+ netdev_err(netdev, "failed configuring TC for VSI id=%d\n",
+ vsi->vsi_num);
+ goto exit;
+ }
+
+ if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) {
+ u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0];
+ u64 min_tx_rate = vsi->mqprio_qopt.min_rate[0];
+
+ /* set TC0 rate limit if specified */
+ if (max_tx_rate || min_tx_rate) {
+ /* convert to Kbits/s */
+ if (max_tx_rate)
+ max_tx_rate = div_u64(max_tx_rate, ICE_BW_KBPS_DIVISOR);
+ if (min_tx_rate)
+ min_tx_rate = div_u64(min_tx_rate, ICE_BW_KBPS_DIVISOR);
+
+ ret = ice_set_bw_limit(vsi, max_tx_rate, min_tx_rate);
+ if (!ret) {
+ dev_dbg(dev, "set Tx rate max %llu min %llu for VSI(%u)\n",
+ max_tx_rate, min_tx_rate, vsi->vsi_num);
+ } else {
+ dev_err(dev, "failed to set Tx rate max %llu min %llu for VSI(%u)\n",
+ max_tx_rate, min_tx_rate, vsi->vsi_num);
+ goto exit;
+ }
+ }
+ ret = ice_create_q_channels(vsi);
+ if (ret) {
+ netdev_err(netdev, "failed configuring queue channels\n");
+ goto exit;
+ } else {
+ netdev_dbg(netdev, "successfully configured channels\n");
+ }
+ }
+
+ if (vsi->ch_rss_size)
+ ice_vsi_cfg_rss_lut_key(vsi);
+
+exit:
+ /* if error, reset the all_numtc and all_enatc */
+ if (ret) {
+ vsi->all_numtc = 0;
+ vsi->all_enatc = 0;
+ }
+ /* resume VSI */
+ ice_ena_vsi(vsi, true);
+
+ return ret;
+}
+
+static LIST_HEAD(ice_block_cb_list);
+
+static int
+ice_setup_tc(struct net_device *netdev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_pf *pf = np->vsi->back;
+ int err;
+
+ switch (type) {
+ case TC_SETUP_BLOCK:
+ return flow_block_cb_setup_simple(type_data,
+ &ice_block_cb_list,
+ ice_setup_tc_block_cb,
+ np, np, true);
+ case TC_SETUP_QDISC_MQPRIO:
+ /* setup traffic classifier for receive side */
+ mutex_lock(&pf->tc_mutex);
+ err = ice_setup_tc_mqprio_qdisc(netdev, type_data);
+ mutex_unlock(&pf->tc_mutex);
+ return err;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return -EOPNOTSUPP;
+}
+
+static struct ice_indr_block_priv *
+ice_indr_block_priv_lookup(struct ice_netdev_priv *np,
+ struct net_device *netdev)
+{
+ struct ice_indr_block_priv *cb_priv;
+
+ list_for_each_entry(cb_priv, &np->tc_indr_block_priv_list, list) {
+ if (!cb_priv->netdev)
+ return NULL;
+ if (cb_priv->netdev == netdev)
+ return cb_priv;
+ }
+ return NULL;
+}
+
+static int
+ice_indr_setup_block_cb(enum tc_setup_type type, void *type_data,
+ void *indr_priv)
+{
+ struct ice_indr_block_priv *priv = indr_priv;
+ struct ice_netdev_priv *np = priv->np;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return ice_setup_tc_cls_flower(np, priv->netdev,
+ (struct flow_cls_offload *)
+ type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int
+ice_indr_setup_tc_block(struct net_device *netdev, struct Qdisc *sch,
+ struct ice_netdev_priv *np,
+ struct flow_block_offload *f, void *data,
+ void (*cleanup)(struct flow_block_cb *block_cb))
+{
+ struct ice_indr_block_priv *indr_priv;
+ struct flow_block_cb *block_cb;
+
+ if (!ice_is_tunnel_supported(netdev) &&
+ !(is_vlan_dev(netdev) &&
+ vlan_dev_real_dev(netdev) == np->vsi->netdev))
+ return -EOPNOTSUPP;
+
+ if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ return -EOPNOTSUPP;
+
+ switch (f->command) {
+ case FLOW_BLOCK_BIND:
+ indr_priv = ice_indr_block_priv_lookup(np, netdev);
+ if (indr_priv)
+ return -EEXIST;
+
+ indr_priv = kzalloc(sizeof(*indr_priv), GFP_KERNEL);
+ if (!indr_priv)
+ return -ENOMEM;
+
+ indr_priv->netdev = netdev;
+ indr_priv->np = np;
+ list_add(&indr_priv->list, &np->tc_indr_block_priv_list);
+
+ block_cb =
+ flow_indr_block_cb_alloc(ice_indr_setup_block_cb,
+ indr_priv, indr_priv,
+ ice_rep_indr_tc_block_unbind,
+ f, netdev, sch, data, np,
+ cleanup);
+
+ if (IS_ERR(block_cb)) {
+ list_del(&indr_priv->list);
+ kfree(indr_priv);
+ return PTR_ERR(block_cb);
+ }
+ flow_block_cb_add(block_cb, f);
+ list_add_tail(&block_cb->driver_list, &ice_block_cb_list);
+ break;
+ case FLOW_BLOCK_UNBIND:
+ indr_priv = ice_indr_block_priv_lookup(np, netdev);
+ if (!indr_priv)
+ return -ENOENT;
+
+ block_cb = flow_block_cb_lookup(f->block,
+ ice_indr_setup_block_cb,
+ indr_priv);
+ if (!block_cb)
+ return -ENOENT;
+
+ flow_indr_block_cb_remove(block_cb, f);
+
+ list_del(&block_cb->driver_list);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static int
+ice_indr_setup_tc_cb(struct net_device *netdev, struct Qdisc *sch,
+ void *cb_priv, enum tc_setup_type type, void *type_data,
+ void *data,
+ void (*cleanup)(struct flow_block_cb *block_cb))
+{
+ switch (type) {
+ case TC_SETUP_BLOCK:
+ return ice_indr_setup_tc_block(netdev, sch, cb_priv, type_data,
+ data, cleanup);
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/**
* ice_open - Called when a network interface becomes active
* @netdev: network interface device structure
*
@@ -7111,7 +8441,7 @@ int ice_open_internal(struct net_device *netdev)
return -EIO;
}
- ice_check_module_power(pf, pi->phy.link_info.link_cfg_err);
+ ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err);
/* Set PHY if there is media, otherwise, turn off PHY */
if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
@@ -7239,6 +8569,7 @@ static const struct net_device_ops ice_netdev_ops = {
.ndo_open = ice_open,
.ndo_stop = ice_stop,
.ndo_start_xmit = ice_start_xmit,
+ .ndo_select_queue = ice_select_queue,
.ndo_features_check = ice_features_check,
.ndo_set_rx_mode = ice_set_rx_mode,
.ndo_set_mac_address = ice_set_mac_address,
@@ -7254,8 +8585,10 @@ static const struct net_device_ops ice_netdev_ops = {
.ndo_set_vf_vlan = ice_set_vf_port_vlan,
.ndo_set_vf_link_state = ice_set_vf_link_state,
.ndo_get_vf_stats = ice_get_vf_stats,
+ .ndo_set_vf_rate = ice_set_vf_bw,
.ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
+ .ndo_setup_tc = ice_setup_tc,
.ndo_set_features = ice_set_features,
.ndo_bridge_getlink = ice_bridge_getlink,
.ndo_bridge_setlink = ice_bridge_setlink,
diff --git a/drivers/net/ethernet/intel/ice/ice_protocol_type.h b/drivers/net/ethernet/intel/ice/ice_protocol_type.h
index 199aa5b71540..dc1b0e9e6df5 100644
--- a/drivers/net/ethernet/intel/ice/ice_protocol_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_protocol_type.h
@@ -3,6 +3,56 @@
#ifndef _ICE_PROTOCOL_TYPE_H_
#define _ICE_PROTOCOL_TYPE_H_
+#define ICE_IPV6_ADDR_LENGTH 16
+
+/* Each recipe can match up to 5 different fields. Fields to match can be meta-
+ * data, values extracted from packet headers, or results from other recipes.
+ * One of the 5 fields is reserved for matching the switch ID. So, up to 4
+ * recipes can provide intermediate results to another one through chaining,
+ * e.g. recipes 0, 1, 2, and 3 can provide intermediate results to recipe 4.
+ */
+#define ICE_NUM_WORDS_RECIPE 4
+
+/* Max recipes that can be chained */
+#define ICE_MAX_CHAIN_RECIPE 5
+
+/* 1 word reserved for switch ID from allowed 5 words.
+ * So a recipe can have max 4 words. And you can chain 5 such recipes
+ * together. So maximum words that can be programmed for look up is 5 * 4.
+ */
+#define ICE_MAX_CHAIN_WORDS (ICE_NUM_WORDS_RECIPE * ICE_MAX_CHAIN_RECIPE)
+
+/* Field vector index corresponding to chaining */
+#define ICE_CHAIN_FV_INDEX_START 47
+
+enum ice_protocol_type {
+ ICE_MAC_OFOS = 0,
+ ICE_MAC_IL,
+ ICE_ETYPE_OL,
+ ICE_VLAN_OFOS,
+ ICE_IPV4_OFOS,
+ ICE_IPV4_IL,
+ ICE_IPV6_OFOS,
+ ICE_IPV6_IL,
+ ICE_TCP_IL,
+ ICE_UDP_OF,
+ ICE_UDP_ILOS,
+ ICE_VXLAN,
+ ICE_GENEVE,
+ ICE_NVGRE,
+ ICE_VXLAN_GPE,
+ ICE_SCTP_IL,
+ ICE_PROTOCOL_LAST
+};
+
+enum ice_sw_tunnel_type {
+ ICE_NON_TUN = 0,
+ ICE_SW_TUN_VXLAN,
+ ICE_SW_TUN_GENEVE,
+ ICE_SW_TUN_NVGRE,
+ ICE_ALL_TUNNELS /* All tunnel types including NVGRE */
+};
+
/* Decoders for ice_prot_id:
* - F: First
* - I: Inner
@@ -35,4 +85,158 @@ enum ice_prot_id {
ICE_PROT_META_ID = 255, /* when offset == metadata */
ICE_PROT_INVALID = 255 /* when offset == ICE_FV_OFFSET_INVAL */
};
+
+#define ICE_VNI_OFFSET 12 /* offset of VNI from ICE_PROT_UDP_OF */
+
+#define ICE_MAC_OFOS_HW 1
+#define ICE_MAC_IL_HW 4
+#define ICE_ETYPE_OL_HW 9
+#define ICE_VLAN_OF_HW 16
+#define ICE_VLAN_OL_HW 17
+#define ICE_IPV4_OFOS_HW 32
+#define ICE_IPV4_IL_HW 33
+#define ICE_IPV6_OFOS_HW 40
+#define ICE_IPV6_IL_HW 41
+#define ICE_TCP_IL_HW 49
+#define ICE_UDP_ILOS_HW 53
+#define ICE_GRE_OF_HW 64
+
+#define ICE_UDP_OF_HW 52 /* UDP Tunnels */
+#define ICE_META_DATA_ID_HW 255 /* this is used for tunnel type */
+
+#define ICE_MDID_SIZE 2
+#define ICE_TUN_FLAG_MDID 21
+#define ICE_TUN_FLAG_MDID_OFF (ICE_MDID_SIZE * ICE_TUN_FLAG_MDID)
+#define ICE_TUN_FLAG_MASK 0xFF
+
+#define ICE_TUN_FLAG_FV_IND 2
+
+/* Mapping of software defined protocol ID to hardware defined protocol ID */
+struct ice_protocol_entry {
+ enum ice_protocol_type type;
+ u8 protocol_id;
+};
+
+struct ice_ether_hdr {
+ u8 dst_addr[ETH_ALEN];
+ u8 src_addr[ETH_ALEN];
+};
+
+struct ice_ethtype_hdr {
+ __be16 ethtype_id;
+};
+
+struct ice_ether_vlan_hdr {
+ u8 dst_addr[ETH_ALEN];
+ u8 src_addr[ETH_ALEN];
+ __be32 vlan_id;
+};
+
+struct ice_vlan_hdr {
+ __be16 type;
+ __be16 vlan;
+};
+
+struct ice_ipv4_hdr {
+ u8 version;
+ u8 tos;
+ __be16 total_length;
+ __be16 id;
+ __be16 frag_off;
+ u8 time_to_live;
+ u8 protocol;
+ __be16 check;
+ __be32 src_addr;
+ __be32 dst_addr;
+};
+
+struct ice_ipv6_hdr {
+ __be32 be_ver_tc_flow;
+ __be16 payload_len;
+ u8 next_hdr;
+ u8 hop_limit;
+ u8 src_addr[ICE_IPV6_ADDR_LENGTH];
+ u8 dst_addr[ICE_IPV6_ADDR_LENGTH];
+};
+
+struct ice_sctp_hdr {
+ __be16 src_port;
+ __be16 dst_port;
+ __be32 verification_tag;
+ __be32 check;
+};
+
+struct ice_l4_hdr {
+ __be16 src_port;
+ __be16 dst_port;
+ __be16 len;
+ __be16 check;
+};
+
+struct ice_udp_tnl_hdr {
+ __be16 field;
+ __be16 proto_type;
+ __be32 vni; /* only use lower 24-bits */
+};
+
+struct ice_nvgre_hdr {
+ __be16 flags;
+ __be16 protocol;
+ __be32 tni_flow;
+};
+
+union ice_prot_hdr {
+ struct ice_ether_hdr eth_hdr;
+ struct ice_ethtype_hdr ethertype;
+ struct ice_vlan_hdr vlan_hdr;
+ struct ice_ipv4_hdr ipv4_hdr;
+ struct ice_ipv6_hdr ipv6_hdr;
+ struct ice_l4_hdr l4_hdr;
+ struct ice_sctp_hdr sctp_hdr;
+ struct ice_udp_tnl_hdr tnl_hdr;
+ struct ice_nvgre_hdr nvgre_hdr;
+};
+
+/* This is mapping table entry that maps every word within a given protocol
+ * structure to the real byte offset as per the specification of that
+ * protocol header.
+ * for e.g. dst address is 3 words in ethertype header and corresponding bytes
+ * are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
+ */
+struct ice_prot_ext_tbl_entry {
+ enum ice_protocol_type prot_type;
+ /* Byte offset into header of given protocol type */
+ u8 offs[sizeof(union ice_prot_hdr)];
+};
+
+/* Extractions to be looked up for a given recipe */
+struct ice_prot_lkup_ext {
+ u16 prot_type;
+ u8 n_val_words;
+ /* create a buffer to hold max words per recipe */
+ u16 field_off[ICE_MAX_CHAIN_WORDS];
+ u16 field_mask[ICE_MAX_CHAIN_WORDS];
+
+ struct ice_fv_word fv_words[ICE_MAX_CHAIN_WORDS];
+
+ /* Indicate field offsets that have field vector indices assigned */
+ DECLARE_BITMAP(done, ICE_MAX_CHAIN_WORDS);
+};
+
+struct ice_pref_recipe_group {
+ u8 n_val_pairs; /* Number of valid pairs */
+ struct ice_fv_word pairs[ICE_NUM_WORDS_RECIPE];
+ u16 mask[ICE_NUM_WORDS_RECIPE];
+};
+
+struct ice_recp_grp_entry {
+ struct list_head l_entry;
+
+#define ICE_INVAL_CHAIN_IND 0xFF
+ u16 rid;
+ u8 chain_idx;
+ u16 fv_idx[ICE_NUM_WORDS_RECIPE];
+ u16 fv_mask[ICE_NUM_WORDS_RECIPE];
+ struct ice_pref_recipe_group r_group;
+};
#endif /* _ICE_PROTOCOL_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c
index 80380aed8882..bf7247c6f58e 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.c
@@ -6,6 +6,252 @@
#define E810_OUT_PROP_DELAY_NS 1
+static const struct ptp_pin_desc ice_pin_desc_e810t[] = {
+ /* name idx func chan */
+ { "GNSS", GNSS, PTP_PF_EXTTS, 0, { 0, } },
+ { "SMA1", SMA1, PTP_PF_NONE, 1, { 0, } },
+ { "U.FL1", UFL1, PTP_PF_NONE, 1, { 0, } },
+ { "SMA2", SMA2, PTP_PF_NONE, 2, { 0, } },
+ { "U.FL2", UFL2, PTP_PF_NONE, 2, { 0, } },
+};
+
+/**
+ * ice_get_sma_config_e810t
+ * @hw: pointer to the hw struct
+ * @ptp_pins: pointer to the ptp_pin_desc struture
+ *
+ * Read the configuration of the SMA control logic and put it into the
+ * ptp_pin_desc structure
+ */
+static int
+ice_get_sma_config_e810t(struct ice_hw *hw, struct ptp_pin_desc *ptp_pins)
+{
+ u8 data, i;
+ int status;
+
+ /* Read initial pin state */
+ status = ice_read_sma_ctrl_e810t(hw, &data);
+ if (status)
+ return status;
+
+ /* initialize with defaults */
+ for (i = 0; i < NUM_PTP_PINS_E810T; i++) {
+ snprintf(ptp_pins[i].name, sizeof(ptp_pins[i].name),
+ "%s", ice_pin_desc_e810t[i].name);
+ ptp_pins[i].index = ice_pin_desc_e810t[i].index;
+ ptp_pins[i].func = ice_pin_desc_e810t[i].func;
+ ptp_pins[i].chan = ice_pin_desc_e810t[i].chan;
+ }
+
+ /* Parse SMA1/UFL1 */
+ switch (data & ICE_SMA1_MASK_E810T) {
+ case ICE_SMA1_MASK_E810T:
+ default:
+ ptp_pins[SMA1].func = PTP_PF_NONE;
+ ptp_pins[UFL1].func = PTP_PF_NONE;
+ break;
+ case ICE_SMA1_DIR_EN_E810T:
+ ptp_pins[SMA1].func = PTP_PF_PEROUT;
+ ptp_pins[UFL1].func = PTP_PF_NONE;
+ break;
+ case ICE_SMA1_TX_EN_E810T:
+ ptp_pins[SMA1].func = PTP_PF_EXTTS;
+ ptp_pins[UFL1].func = PTP_PF_NONE;
+ break;
+ case 0:
+ ptp_pins[SMA1].func = PTP_PF_EXTTS;
+ ptp_pins[UFL1].func = PTP_PF_PEROUT;
+ break;
+ }
+
+ /* Parse SMA2/UFL2 */
+ switch (data & ICE_SMA2_MASK_E810T) {
+ case ICE_SMA2_MASK_E810T:
+ default:
+ ptp_pins[SMA2].func = PTP_PF_NONE;
+ ptp_pins[UFL2].func = PTP_PF_NONE;
+ break;
+ case (ICE_SMA2_TX_EN_E810T | ICE_SMA2_UFL2_RX_DIS_E810T):
+ ptp_pins[SMA2].func = PTP_PF_EXTTS;
+ ptp_pins[UFL2].func = PTP_PF_NONE;
+ break;
+ case (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_UFL2_RX_DIS_E810T):
+ ptp_pins[SMA2].func = PTP_PF_PEROUT;
+ ptp_pins[UFL2].func = PTP_PF_NONE;
+ break;
+ case (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_TX_EN_E810T):
+ ptp_pins[SMA2].func = PTP_PF_NONE;
+ ptp_pins[UFL2].func = PTP_PF_EXTTS;
+ break;
+ case ICE_SMA2_DIR_EN_E810T:
+ ptp_pins[SMA2].func = PTP_PF_PEROUT;
+ ptp_pins[UFL2].func = PTP_PF_EXTTS;
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_ptp_set_sma_config_e810t
+ * @hw: pointer to the hw struct
+ * @ptp_pins: pointer to the ptp_pin_desc struture
+ *
+ * Set the configuration of the SMA control logic based on the configuration in
+ * num_pins parameter
+ */
+static int
+ice_ptp_set_sma_config_e810t(struct ice_hw *hw,
+ const struct ptp_pin_desc *ptp_pins)
+{
+ int status;
+ u8 data;
+
+ /* SMA1 and UFL1 cannot be set to TX at the same time */
+ if (ptp_pins[SMA1].func == PTP_PF_PEROUT &&
+ ptp_pins[UFL1].func == PTP_PF_PEROUT)
+ return -EINVAL;
+
+ /* SMA2 and UFL2 cannot be set to RX at the same time */
+ if (ptp_pins[SMA2].func == PTP_PF_EXTTS &&
+ ptp_pins[UFL2].func == PTP_PF_EXTTS)
+ return -EINVAL;
+
+ /* Read initial pin state value */
+ status = ice_read_sma_ctrl_e810t(hw, &data);
+ if (status)
+ return status;
+
+ /* Set the right sate based on the desired configuration */
+ data &= ~ICE_SMA1_MASK_E810T;
+ if (ptp_pins[SMA1].func == PTP_PF_NONE &&
+ ptp_pins[UFL1].func == PTP_PF_NONE) {
+ dev_info(ice_hw_to_dev(hw), "SMA1 + U.FL1 disabled");
+ data |= ICE_SMA1_MASK_E810T;
+ } else if (ptp_pins[SMA1].func == PTP_PF_EXTTS &&
+ ptp_pins[UFL1].func == PTP_PF_NONE) {
+ dev_info(ice_hw_to_dev(hw), "SMA1 RX");
+ data |= ICE_SMA1_TX_EN_E810T;
+ } else if (ptp_pins[SMA1].func == PTP_PF_NONE &&
+ ptp_pins[UFL1].func == PTP_PF_PEROUT) {
+ /* U.FL 1 TX will always enable SMA 1 RX */
+ dev_info(ice_hw_to_dev(hw), "SMA1 RX + U.FL1 TX");
+ } else if (ptp_pins[SMA1].func == PTP_PF_EXTTS &&
+ ptp_pins[UFL1].func == PTP_PF_PEROUT) {
+ dev_info(ice_hw_to_dev(hw), "SMA1 RX + U.FL1 TX");
+ } else if (ptp_pins[SMA1].func == PTP_PF_PEROUT &&
+ ptp_pins[UFL1].func == PTP_PF_NONE) {
+ dev_info(ice_hw_to_dev(hw), "SMA1 TX");
+ data |= ICE_SMA1_DIR_EN_E810T;
+ }
+
+ data &= ~ICE_SMA2_MASK_E810T;
+ if (ptp_pins[SMA2].func == PTP_PF_NONE &&
+ ptp_pins[UFL2].func == PTP_PF_NONE) {
+ dev_info(ice_hw_to_dev(hw), "SMA2 + U.FL2 disabled");
+ data |= ICE_SMA2_MASK_E810T;
+ } else if (ptp_pins[SMA2].func == PTP_PF_EXTTS &&
+ ptp_pins[UFL2].func == PTP_PF_NONE) {
+ dev_info(ice_hw_to_dev(hw), "SMA2 RX");
+ data |= (ICE_SMA2_TX_EN_E810T |
+ ICE_SMA2_UFL2_RX_DIS_E810T);
+ } else if (ptp_pins[SMA2].func == PTP_PF_NONE &&
+ ptp_pins[UFL2].func == PTP_PF_EXTTS) {
+ dev_info(ice_hw_to_dev(hw), "UFL2 RX");
+ data |= (ICE_SMA2_DIR_EN_E810T | ICE_SMA2_TX_EN_E810T);
+ } else if (ptp_pins[SMA2].func == PTP_PF_PEROUT &&
+ ptp_pins[UFL2].func == PTP_PF_NONE) {
+ dev_info(ice_hw_to_dev(hw), "SMA2 TX");
+ data |= (ICE_SMA2_DIR_EN_E810T |
+ ICE_SMA2_UFL2_RX_DIS_E810T);
+ } else if (ptp_pins[SMA2].func == PTP_PF_PEROUT &&
+ ptp_pins[UFL2].func == PTP_PF_EXTTS) {
+ dev_info(ice_hw_to_dev(hw), "SMA2 TX + U.FL2 RX");
+ data |= ICE_SMA2_DIR_EN_E810T;
+ }
+
+ return ice_write_sma_ctrl_e810t(hw, data);
+}
+
+/**
+ * ice_ptp_set_sma_e810t
+ * @info: the driver's PTP info structure
+ * @pin: pin index in kernel structure
+ * @func: Pin function to be set (PTP_PF_NONE, PTP_PF_EXTTS or PTP_PF_PEROUT)
+ *
+ * Set the configuration of a single SMA pin
+ */
+static int
+ice_ptp_set_sma_e810t(struct ptp_clock_info *info, unsigned int pin,
+ enum ptp_pin_function func)
+{
+ struct ptp_pin_desc ptp_pins[NUM_PTP_PINS_E810T];
+ struct ice_pf *pf = ptp_info_to_pf(info);
+ struct ice_hw *hw = &pf->hw;
+ int err;
+
+ if (pin < SMA1 || func > PTP_PF_PEROUT)
+ return -EOPNOTSUPP;
+
+ err = ice_get_sma_config_e810t(hw, ptp_pins);
+ if (err)
+ return err;
+
+ /* Disable the same function on the other pin sharing the channel */
+ if (pin == SMA1 && ptp_pins[UFL1].func == func)
+ ptp_pins[UFL1].func = PTP_PF_NONE;
+ if (pin == UFL1 && ptp_pins[SMA1].func == func)
+ ptp_pins[SMA1].func = PTP_PF_NONE;
+
+ if (pin == SMA2 && ptp_pins[UFL2].func == func)
+ ptp_pins[UFL2].func = PTP_PF_NONE;
+ if (pin == UFL2 && ptp_pins[SMA2].func == func)
+ ptp_pins[SMA2].func = PTP_PF_NONE;
+
+ /* Set up new pin function in the temp table */
+ ptp_pins[pin].func = func;
+
+ return ice_ptp_set_sma_config_e810t(hw, ptp_pins);
+}
+
+/**
+ * ice_verify_pin_e810t
+ * @info: the driver's PTP info structure
+ * @pin: Pin index
+ * @func: Assigned function
+ * @chan: Assigned channel
+ *
+ * Verify if pin supports requested pin function. If the Check pins consistency.
+ * Reconfigure the SMA logic attached to the given pin to enable its
+ * desired functionality
+ */
+static int
+ice_verify_pin_e810t(struct ptp_clock_info *info, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ /* Don't allow channel reassignment */
+ if (chan != ice_pin_desc_e810t[pin].chan)
+ return -EOPNOTSUPP;
+
+ /* Check if functions are properly assigned */
+ switch (func) {
+ case PTP_PF_NONE:
+ break;
+ case PTP_PF_EXTTS:
+ if (pin == UFL1)
+ return -EOPNOTSUPP;
+ break;
+ case PTP_PF_PEROUT:
+ if (pin == UFL2 || pin == GNSS)
+ return -EOPNOTSUPP;
+ break;
+ case PTP_PF_PHYSYNC:
+ return -EOPNOTSUPP;
+ }
+
+ return ice_ptp_set_sma_e810t(info, pin, func);
+}
+
/**
* ice_set_tx_tstamp - Enable or disable Tx timestamping
* @pf: The PF pointer to search in
@@ -735,17 +981,34 @@ ice_ptp_gpio_enable_e810(struct ptp_clock_info *info,
{
struct ice_pf *pf = ptp_info_to_pf(info);
struct ice_perout_channel clk_cfg = {0};
+ bool sma_pres = false;
unsigned int chan;
u32 gpio_pin;
int err;
+ if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL))
+ sma_pres = true;
+
switch (rq->type) {
case PTP_CLK_REQ_PEROUT:
chan = rq->perout.index;
- if (chan == PPS_CLK_GEN_CHAN)
+ if (sma_pres) {
+ if (chan == ice_pin_desc_e810t[SMA1].chan)
+ clk_cfg.gpio_pin = GPIO_20;
+ else if (chan == ice_pin_desc_e810t[SMA2].chan)
+ clk_cfg.gpio_pin = GPIO_22;
+ else
+ return -1;
+ } else if (ice_is_e810t(&pf->hw)) {
+ if (chan == 0)
+ clk_cfg.gpio_pin = GPIO_20;
+ else
+ clk_cfg.gpio_pin = GPIO_22;
+ } else if (chan == PPS_CLK_GEN_CHAN) {
clk_cfg.gpio_pin = PPS_PIN_INDEX;
- else
+ } else {
clk_cfg.gpio_pin = chan;
+ }
clk_cfg.period = ((rq->perout.period.sec * NSEC_PER_SEC) +
rq->perout.period.nsec);
@@ -757,7 +1020,19 @@ ice_ptp_gpio_enable_e810(struct ptp_clock_info *info,
break;
case PTP_CLK_REQ_EXTTS:
chan = rq->extts.index;
- gpio_pin = chan;
+ if (sma_pres) {
+ if (chan < ice_pin_desc_e810t[SMA2].chan)
+ gpio_pin = GPIO_21;
+ else
+ gpio_pin = GPIO_23;
+ } else if (ice_is_e810t(&pf->hw)) {
+ if (chan == 0)
+ gpio_pin = GPIO_21;
+ else
+ gpio_pin = GPIO_23;
+ } else {
+ gpio_pin = chan;
+ }
err = ice_ptp_cfg_extts(pf, !!on, chan, gpio_pin,
rq->extts.flags);
@@ -1012,7 +1287,7 @@ int ice_ptp_set_ts_config(struct ice_pf *pf, struct ifreq *ifr)
* The timestamp is in ns, so we must convert the result first.
*/
void
-ice_ptp_rx_hwtstamp(struct ice_ring *rx_ring,
+ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring,
union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb)
{
u32 ts_high;
@@ -1038,13 +1313,93 @@ ice_ptp_rx_hwtstamp(struct ice_ring *rx_ring,
}
/**
+ * ice_ptp_disable_sma_pins_e810t - Disable E810-T SMA pins
+ * @pf: pointer to the PF structure
+ * @info: PTP clock info structure
+ *
+ * Disable the OS access to the SMA pins. Called to clear out the OS
+ * indications of pin support when we fail to setup the E810-T SMA control
+ * register.
+ */
+static void
+ice_ptp_disable_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+
+ dev_warn(dev, "Failed to configure E810-T SMA pin control\n");
+
+ info->enable = NULL;
+ info->verify = NULL;
+ info->n_pins = 0;
+ info->n_ext_ts = 0;
+ info->n_per_out = 0;
+}
+
+/**
+ * ice_ptp_setup_sma_pins_e810t - Setup the SMA pins
+ * @pf: pointer to the PF structure
+ * @info: PTP clock info structure
+ *
+ * Finish setting up the SMA pins by allocating pin_config, and setting it up
+ * according to the current status of the SMA. On failure, disable all of the
+ * extended SMA pin support.
+ */
+static void
+ice_ptp_setup_sma_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info)
+{
+ struct device *dev = ice_pf_to_dev(pf);
+ int err;
+
+ /* Allocate memory for kernel pins interface */
+ info->pin_config = devm_kcalloc(dev, info->n_pins,
+ sizeof(*info->pin_config), GFP_KERNEL);
+ if (!info->pin_config) {
+ ice_ptp_disable_sma_pins_e810t(pf, info);
+ return;
+ }
+
+ /* Read current SMA status */
+ err = ice_get_sma_config_e810t(&pf->hw, info->pin_config);
+ if (err)
+ ice_ptp_disable_sma_pins_e810t(pf, info);
+}
+
+/**
+ * ice_ptp_setup_pins_e810t - Setup PTP pins in sysfs
+ * @pf: pointer to the PF instance
+ * @info: PTP clock capabilities
+ */
+static void
+ice_ptp_setup_pins_e810t(struct ice_pf *pf, struct ptp_clock_info *info)
+{
+ /* Check if SMA controller is in the netlist */
+ if (ice_is_feature_supported(pf, ICE_F_SMA_CTRL) &&
+ !ice_is_pca9575_present(&pf->hw))
+ ice_clear_feature_support(pf, ICE_F_SMA_CTRL);
+
+ if (!ice_is_feature_supported(pf, ICE_F_SMA_CTRL)) {
+ info->n_ext_ts = N_EXT_TS_E810_NO_SMA;
+ info->n_per_out = N_PER_OUT_E810T_NO_SMA;
+ return;
+ }
+
+ info->n_per_out = N_PER_OUT_E810T;
+ info->n_ext_ts = N_EXT_TS_E810;
+ info->n_pins = NUM_PTP_PINS_E810T;
+ info->verify = ice_verify_pin_e810t;
+
+ /* Complete setup of the SMA pins */
+ ice_ptp_setup_sma_pins_e810t(pf, info);
+}
+
+/**
* ice_ptp_setup_pins_e810 - Setup PTP pins in sysfs
* @info: PTP clock capabilities
*/
static void ice_ptp_setup_pins_e810(struct ptp_clock_info *info)
{
- info->n_per_out = E810_N_PER_OUT;
- info->n_ext_ts = E810_N_EXT_TS;
+ info->n_per_out = N_PER_OUT_E810;
+ info->n_ext_ts = N_EXT_TS_E810;
}
/**
@@ -1062,7 +1417,10 @@ ice_ptp_set_funcs_e810(struct ice_pf *pf, struct ptp_clock_info *info)
{
info->enable = ice_ptp_gpio_enable_e810;
- ice_ptp_setup_pins_e810(info);
+ if (ice_is_e810t(&pf->hw))
+ ice_ptp_setup_pins_e810t(pf, info);
+ else
+ ice_ptp_setup_pins_e810(info);
}
/**
@@ -1571,6 +1929,9 @@ err_kworker:
*/
void ice_ptp_release(struct ice_pf *pf)
{
+ if (!test_bit(ICE_FLAG_PTP, pf->flags))
+ return;
+
/* Disable timestamping for both Tx and Rx */
ice_ptp_cfg_timestamp(pf, false);
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h
index e1c787bd5b96..f71ad317d6c8 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp.h
@@ -9,12 +9,21 @@
#include "ice_ptp_hw.h"
-enum ice_ptp_pin {
+enum ice_ptp_pin_e810 {
GPIO_20 = 0,
GPIO_21,
GPIO_22,
GPIO_23,
- NUM_ICE_PTP_PIN
+ NUM_PTP_PIN_E810
+};
+
+enum ice_ptp_pin_e810t {
+ GNSS = 0,
+ SMA1,
+ UFL1,
+ SMA2,
+ UFL2,
+ NUM_PTP_PINS_E810T
};
struct ice_perout_channel {
@@ -155,8 +164,11 @@ struct ice_ptp {
#define PPS_CLK_SRC_CHAN 2
#define PPS_PIN_INDEX 5
#define TIME_SYNC_PIN_INDEX 4
-#define E810_N_EXT_TS 3
-#define E810_N_PER_OUT 4
+#define N_EXT_TS_E810 3
+#define N_PER_OUT_E810 4
+#define N_PER_OUT_E810T 3
+#define N_PER_OUT_E810T_NO_SMA 2
+#define N_EXT_TS_E810_NO_SMA 2
#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
struct ice_pf;
@@ -168,7 +180,7 @@ s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb);
void ice_ptp_process_ts(struct ice_pf *pf);
void
-ice_ptp_rx_hwtstamp(struct ice_ring *rx_ring,
+ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring,
union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb);
void ice_ptp_init(struct ice_pf *pf);
void ice_ptp_release(struct ice_pf *pf);
@@ -196,7 +208,7 @@ ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb)
static inline void ice_ptp_process_ts(struct ice_pf *pf) { }
static inline void
-ice_ptp_rx_hwtstamp(struct ice_ring *rx_ring,
+ice_ptp_rx_hwtstamp(struct ice_rx_ring *rx_ring,
union ice_32b_rx_flex_desc *rx_desc, struct sk_buff *skb) { }
static inline void ice_ptp_init(struct ice_pf *pf) { }
static inline void ice_ptp_release(struct ice_pf *pf) { }
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
index 3eca0e4eab0b..29f947c0cd2e 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c
@@ -649,3 +649,154 @@ int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx)
{
return ice_clear_phy_tstamp_e810(hw, block, idx);
}
+
+/* E810T SMA functions
+ *
+ * The following functions operate specifically on E810T hardware and are used
+ * to access the extended GPIOs available.
+ */
+
+/**
+ * ice_get_pca9575_handle
+ * @hw: pointer to the hw struct
+ * @pca9575_handle: GPIO controller's handle
+ *
+ * Find and return the GPIO controller's handle in the netlist.
+ * When found - the value will be cached in the hw structure and following calls
+ * will return cached value
+ */
+static int
+ice_get_pca9575_handle(struct ice_hw *hw, u16 *pca9575_handle)
+{
+ struct ice_aqc_get_link_topo *cmd;
+ struct ice_aq_desc desc;
+ int status;
+ u8 idx;
+
+ /* If handle was read previously return cached value */
+ if (hw->io_expander_handle) {
+ *pca9575_handle = hw->io_expander_handle;
+ return 0;
+ }
+
+ /* If handle was not detected read it from the netlist */
+ cmd = &desc.params.get_link_topo;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
+
+ /* Set node type to GPIO controller */
+ cmd->addr.topo_params.node_type_ctx =
+ (ICE_AQC_LINK_TOPO_NODE_TYPE_M &
+ ICE_AQC_LINK_TOPO_NODE_TYPE_GPIO_CTRL);
+
+#define SW_PCA9575_SFP_TOPO_IDX 2
+#define SW_PCA9575_QSFP_TOPO_IDX 1
+
+ /* Check if the SW IO expander controlling SMA exists in the netlist. */
+ if (hw->device_id == ICE_DEV_ID_E810C_SFP)
+ idx = SW_PCA9575_SFP_TOPO_IDX;
+ else if (hw->device_id == ICE_DEV_ID_E810C_QSFP)
+ idx = SW_PCA9575_QSFP_TOPO_IDX;
+ else
+ return -EOPNOTSUPP;
+
+ cmd->addr.topo_params.index = idx;
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
+ if (status)
+ return -EOPNOTSUPP;
+
+ /* Verify if we found the right IO expander type */
+ if (desc.params.get_link_topo.node_part_num !=
+ ICE_AQC_GET_LINK_TOPO_NODE_NR_PCA9575)
+ return -EOPNOTSUPP;
+
+ /* If present save the handle and return it */
+ hw->io_expander_handle =
+ le16_to_cpu(desc.params.get_link_topo.addr.handle);
+ *pca9575_handle = hw->io_expander_handle;
+
+ return 0;
+}
+
+/**
+ * ice_read_sma_ctrl_e810t
+ * @hw: pointer to the hw struct
+ * @data: pointer to data to be read from the GPIO controller
+ *
+ * Read the SMA controller state. It is connected to pins 3-7 of Port 1 of the
+ * PCA9575 expander, so only bits 3-7 in data are valid.
+ */
+int ice_read_sma_ctrl_e810t(struct ice_hw *hw, u8 *data)
+{
+ int status;
+ u16 handle;
+ u8 i;
+
+ status = ice_get_pca9575_handle(hw, &handle);
+ if (status)
+ return status;
+
+ *data = 0;
+
+ for (i = ICE_SMA_MIN_BIT_E810T; i <= ICE_SMA_MAX_BIT_E810T; i++) {
+ bool pin;
+
+ status = ice_aq_get_gpio(hw, handle, i + ICE_PCA9575_P1_OFFSET,
+ &pin, NULL);
+ if (status)
+ break;
+ *data |= (u8)(!pin) << i;
+ }
+
+ return status;
+}
+
+/**
+ * ice_write_sma_ctrl_e810t
+ * @hw: pointer to the hw struct
+ * @data: data to be written to the GPIO controller
+ *
+ * Write the data to the SMA controller. It is connected to pins 3-7 of Port 1
+ * of the PCA9575 expander, so only bits 3-7 in data are valid.
+ */
+int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data)
+{
+ int status;
+ u16 handle;
+ u8 i;
+
+ status = ice_get_pca9575_handle(hw, &handle);
+ if (status)
+ return status;
+
+ for (i = ICE_SMA_MIN_BIT_E810T; i <= ICE_SMA_MAX_BIT_E810T; i++) {
+ bool pin;
+
+ pin = !(data & (1 << i));
+ status = ice_aq_set_gpio(hw, handle, i + ICE_PCA9575_P1_OFFSET,
+ pin, NULL);
+ if (status)
+ break;
+ }
+
+ return status;
+}
+
+/**
+ * ice_is_pca9575_present
+ * @hw: pointer to the hw struct
+ *
+ * Check if the SW IO expander is present in the netlist
+ */
+bool ice_is_pca9575_present(struct ice_hw *hw)
+{
+ u16 handle = 0;
+ int status;
+
+ if (!ice_is_e810t(hw))
+ return false;
+
+ status = ice_get_pca9575_handle(hw, &handle);
+
+ return !status && handle;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
index 55a414e87018..b2984b5c22c1 100644
--- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
+++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h
@@ -30,6 +30,9 @@ int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx);
/* E810 family functions */
int ice_ptp_init_phy_e810(struct ice_hw *hw);
+int ice_read_sma_ctrl_e810t(struct ice_hw *hw, u8 *data);
+int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data);
+bool ice_is_pca9575_present(struct ice_hw *hw);
#define PFTSYN_SEM_BYTES 4
@@ -76,4 +79,23 @@ int ice_ptp_init_phy_e810(struct ice_hw *hw);
#define LOW_TX_MEMORY_BANK_START 0x03090000
#define HIGH_TX_MEMORY_BANK_START 0x03090004
+/* E810T SMA controller pin control */
+#define ICE_SMA1_DIR_EN_E810T BIT(4)
+#define ICE_SMA1_TX_EN_E810T BIT(5)
+#define ICE_SMA2_UFL2_RX_DIS_E810T BIT(3)
+#define ICE_SMA2_DIR_EN_E810T BIT(6)
+#define ICE_SMA2_TX_EN_E810T BIT(7)
+
+#define ICE_SMA1_MASK_E810T (ICE_SMA1_DIR_EN_E810T | \
+ ICE_SMA1_TX_EN_E810T)
+#define ICE_SMA2_MASK_E810T (ICE_SMA2_UFL2_RX_DIS_E810T | \
+ ICE_SMA2_DIR_EN_E810T | \
+ ICE_SMA2_TX_EN_E810T)
+#define ICE_ALL_SMA_MASK_E810T (ICE_SMA1_MASK_E810T | \
+ ICE_SMA2_MASK_E810T)
+
+#define ICE_SMA_MIN_BIT_E810T 3
+#define ICE_SMA_MAX_BIT_E810T 7
+#define ICE_PCA9575_P1_OFFSET 8
+
#endif /* _ICE_PTP_HW_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_repr.c b/drivers/net/ethernet/intel/ice/ice_repr.c
new file mode 100644
index 000000000000..af8e6ef5f571
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_repr.c
@@ -0,0 +1,389 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2019-2021, Intel Corporation. */
+
+#include "ice.h"
+#include "ice_eswitch.h"
+#include "ice_devlink.h"
+#include "ice_virtchnl_pf.h"
+#include "ice_tc_lib.h"
+
+/**
+ * ice_repr_get_sw_port_id - get port ID associated with representor
+ * @repr: pointer to port representor
+ */
+static int ice_repr_get_sw_port_id(struct ice_repr *repr)
+{
+ return repr->vf->pf->hw.port_info->lport;
+}
+
+/**
+ * ice_repr_get_phys_port_name - get phys port name
+ * @netdev: pointer to port representor netdev
+ * @buf: write here port name
+ * @len: max length of buf
+ */
+static int
+ice_repr_get_phys_port_name(struct net_device *netdev, char *buf, size_t len)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_repr *repr = np->repr;
+ int res;
+
+ /* Devlink port is registered and devlink core is taking care of name formatting. */
+ if (repr->vf->devlink_port.devlink)
+ return -EOPNOTSUPP;
+
+ res = snprintf(buf, len, "pf%dvfr%d", ice_repr_get_sw_port_id(repr),
+ repr->vf->vf_id);
+ if (res <= 0)
+ return -EOPNOTSUPP;
+ return 0;
+}
+
+/**
+ * ice_repr_get_stats64 - get VF stats for VFPR use
+ * @netdev: pointer to port representor netdev
+ * @stats: pointer to struct where stats can be stored
+ */
+static void
+ice_repr_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+ struct ice_eth_stats *eth_stats;
+ struct ice_vsi *vsi;
+
+ if (ice_is_vf_disabled(np->repr->vf))
+ return;
+ vsi = np->repr->src_vsi;
+
+ ice_update_vsi_stats(vsi);
+ eth_stats = &vsi->eth_stats;
+
+ stats->tx_packets = eth_stats->tx_unicast + eth_stats->tx_broadcast +
+ eth_stats->tx_multicast;
+ stats->rx_packets = eth_stats->rx_unicast + eth_stats->rx_broadcast +
+ eth_stats->rx_multicast;
+ stats->tx_bytes = eth_stats->tx_bytes;
+ stats->rx_bytes = eth_stats->rx_bytes;
+ stats->multicast = eth_stats->rx_multicast;
+ stats->tx_errors = eth_stats->tx_errors;
+ stats->tx_dropped = eth_stats->tx_discards;
+ stats->rx_dropped = eth_stats->rx_discards;
+}
+
+/**
+ * ice_netdev_to_repr - Get port representor for given netdevice
+ * @netdev: pointer to port representor netdev
+ */
+struct ice_repr *ice_netdev_to_repr(struct net_device *netdev)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+
+ return np->repr;
+}
+
+/**
+ * ice_repr_open - Enable port representor's network interface
+ * @netdev: network interface device structure
+ *
+ * The open entry point is called when a port representor's network
+ * interface is made active by the system (IFF_UP). Corresponding
+ * VF is notified about link status change.
+ *
+ * Returns 0 on success
+ */
+static int ice_repr_open(struct net_device *netdev)
+{
+ struct ice_repr *repr = ice_netdev_to_repr(netdev);
+ struct ice_vf *vf;
+
+ vf = repr->vf;
+ vf->link_forced = true;
+ vf->link_up = true;
+ ice_vc_notify_vf_link_state(vf);
+
+ netif_carrier_on(netdev);
+ netif_tx_start_all_queues(netdev);
+
+ return 0;
+}
+
+/**
+ * ice_repr_stop - Disable port representor's network interface
+ * @netdev: network interface device structure
+ *
+ * The stop entry point is called when a port representor's network
+ * interface is de-activated by the system. Corresponding
+ * VF is notified about link status change.
+ *
+ * Returns 0 on success
+ */
+static int ice_repr_stop(struct net_device *netdev)
+{
+ struct ice_repr *repr = ice_netdev_to_repr(netdev);
+ struct ice_vf *vf;
+
+ vf = repr->vf;
+ vf->link_forced = true;
+ vf->link_up = false;
+ ice_vc_notify_vf_link_state(vf);
+
+ netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
+
+ return 0;
+}
+
+static struct devlink_port *
+ice_repr_get_devlink_port(struct net_device *netdev)
+{
+ struct ice_repr *repr = ice_netdev_to_repr(netdev);
+
+ return &repr->vf->devlink_port;
+}
+
+static int
+ice_repr_setup_tc_cls_flower(struct ice_repr *repr,
+ struct flow_cls_offload *flower)
+{
+ switch (flower->command) {
+ case FLOW_CLS_REPLACE:
+ return ice_add_cls_flower(repr->netdev, repr->src_vsi, flower);
+ case FLOW_CLS_DESTROY:
+ return ice_del_cls_flower(repr->src_vsi, flower);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int
+ice_repr_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
+{
+ struct flow_cls_offload *flower = (struct flow_cls_offload *)type_data;
+ struct ice_netdev_priv *np = (struct ice_netdev_priv *)cb_priv;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return ice_repr_setup_tc_cls_flower(np->repr, flower);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static LIST_HEAD(ice_repr_block_cb_list);
+
+static int
+ice_repr_setup_tc(struct net_device *netdev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+
+ switch (type) {
+ case TC_SETUP_BLOCK:
+ return flow_block_cb_setup_simple((struct flow_block_offload *)
+ type_data,
+ &ice_repr_block_cb_list,
+ ice_repr_setup_tc_block_cb,
+ np, np, true);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct net_device_ops ice_repr_netdev_ops = {
+ .ndo_get_phys_port_name = ice_repr_get_phys_port_name,
+ .ndo_get_stats64 = ice_repr_get_stats64,
+ .ndo_open = ice_repr_open,
+ .ndo_stop = ice_repr_stop,
+ .ndo_start_xmit = ice_eswitch_port_start_xmit,
+ .ndo_get_devlink_port = ice_repr_get_devlink_port,
+ .ndo_setup_tc = ice_repr_setup_tc,
+};
+
+/**
+ * ice_is_port_repr_netdev - Check if a given netdevice is a port representor netdev
+ * @netdev: pointer to netdev
+ */
+bool ice_is_port_repr_netdev(struct net_device *netdev)
+{
+ return netdev && (netdev->netdev_ops == &ice_repr_netdev_ops);
+}
+
+/**
+ * ice_repr_reg_netdev - register port representor netdev
+ * @netdev: pointer to port representor netdev
+ */
+static int
+ice_repr_reg_netdev(struct net_device *netdev)
+{
+ eth_hw_addr_random(netdev);
+ netdev->netdev_ops = &ice_repr_netdev_ops;
+ ice_set_ethtool_repr_ops(netdev);
+
+ netdev->hw_features |= NETIF_F_HW_TC;
+
+ netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
+
+ return register_netdev(netdev);
+}
+
+/**
+ * ice_repr_add - add representor for VF
+ * @vf: pointer to VF structure
+ */
+static int ice_repr_add(struct ice_vf *vf)
+{
+ struct ice_q_vector *q_vector;
+ struct ice_netdev_priv *np;
+ struct ice_repr *repr;
+ int err;
+
+ repr = kzalloc(sizeof(*repr), GFP_KERNEL);
+ if (!repr)
+ return -ENOMEM;
+
+ repr->netdev = alloc_etherdev(sizeof(struct ice_netdev_priv));
+ if (!repr->netdev) {
+ err = -ENOMEM;
+ goto err_alloc;
+ }
+
+ repr->src_vsi = ice_get_vf_vsi(vf);
+ repr->vf = vf;
+ vf->repr = repr;
+ np = netdev_priv(repr->netdev);
+ np->repr = repr;
+
+ q_vector = kzalloc(sizeof(*q_vector), GFP_KERNEL);
+ if (!q_vector) {
+ err = -ENOMEM;
+ goto err_alloc_q_vector;
+ }
+ repr->q_vector = q_vector;
+
+ err = ice_devlink_create_vf_port(vf);
+ if (err)
+ goto err_devlink;
+
+ repr->netdev->min_mtu = ETH_MIN_MTU;
+ repr->netdev->max_mtu = ICE_MAX_MTU;
+
+ err = ice_repr_reg_netdev(repr->netdev);
+ if (err)
+ goto err_netdev;
+
+ devlink_port_type_eth_set(&vf->devlink_port, repr->netdev);
+
+ return 0;
+
+err_netdev:
+ ice_devlink_destroy_vf_port(vf);
+err_devlink:
+ kfree(repr->q_vector);
+ vf->repr->q_vector = NULL;
+err_alloc_q_vector:
+ free_netdev(repr->netdev);
+ repr->netdev = NULL;
+err_alloc:
+ kfree(repr);
+ vf->repr = NULL;
+ return err;
+}
+
+/**
+ * ice_repr_rem - remove representor from VF
+ * @vf: pointer to VF structure
+ */
+static void ice_repr_rem(struct ice_vf *vf)
+{
+ ice_devlink_destroy_vf_port(vf);
+ kfree(vf->repr->q_vector);
+ vf->repr->q_vector = NULL;
+ unregister_netdev(vf->repr->netdev);
+ free_netdev(vf->repr->netdev);
+ vf->repr->netdev = NULL;
+ kfree(vf->repr);
+ vf->repr = NULL;
+}
+
+/**
+ * ice_repr_add_for_all_vfs - add port representor for all VFs
+ * @pf: pointer to PF structure
+ */
+int ice_repr_add_for_all_vfs(struct ice_pf *pf)
+{
+ int err;
+ int i;
+
+ ice_for_each_vf(pf, i) {
+ struct ice_vf *vf = &pf->vf[i];
+
+ err = ice_repr_add(vf);
+ if (err)
+ goto err;
+
+ ice_vc_change_ops_to_repr(&vf->vc_ops);
+ }
+
+ return 0;
+
+err:
+ for (i = i - 1; i >= 0; i--) {
+ struct ice_vf *vf = &pf->vf[i];
+
+ ice_repr_rem(vf);
+ ice_vc_set_dflt_vf_ops(&vf->vc_ops);
+ }
+
+ return err;
+}
+
+/**
+ * ice_repr_rem_from_all_vfs - remove port representor for all VFs
+ * @pf: pointer to PF structure
+ */
+void ice_repr_rem_from_all_vfs(struct ice_pf *pf)
+{
+ int i;
+
+ ice_for_each_vf(pf, i) {
+ struct ice_vf *vf = &pf->vf[i];
+
+ ice_repr_rem(vf);
+ ice_vc_set_dflt_vf_ops(&vf->vc_ops);
+ }
+}
+
+/**
+ * ice_repr_start_tx_queues - start Tx queues of port representor
+ * @repr: pointer to repr structure
+ */
+void ice_repr_start_tx_queues(struct ice_repr *repr)
+{
+ netif_carrier_on(repr->netdev);
+ netif_tx_start_all_queues(repr->netdev);
+}
+
+/**
+ * ice_repr_stop_tx_queues - stop Tx queues of port representor
+ * @repr: pointer to repr structure
+ */
+void ice_repr_stop_tx_queues(struct ice_repr *repr)
+{
+ netif_carrier_off(repr->netdev);
+ netif_tx_stop_all_queues(repr->netdev);
+}
+
+/**
+ * ice_repr_set_traffic_vsi - set traffic VSI for port representor
+ * @repr: repr on with VSI will be set
+ * @vsi: pointer to VSI that will be used by port representor to pass traffic
+ */
+void ice_repr_set_traffic_vsi(struct ice_repr *repr, struct ice_vsi *vsi)
+{
+ struct ice_netdev_priv *np = netdev_priv(repr->netdev);
+
+ np->vsi = vsi;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_repr.h b/drivers/net/ethernet/intel/ice/ice_repr.h
new file mode 100644
index 000000000000..806de22933c6
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_repr.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2019-2021, Intel Corporation. */
+
+#ifndef _ICE_REPR_H_
+#define _ICE_REPR_H_
+
+#include <net/dst_metadata.h>
+#include "ice.h"
+
+struct ice_repr {
+ struct ice_vsi *src_vsi;
+ struct ice_vf *vf;
+ struct ice_q_vector *q_vector;
+ struct net_device *netdev;
+ struct metadata_dst *dst;
+};
+
+int ice_repr_add_for_all_vfs(struct ice_pf *pf);
+void ice_repr_rem_from_all_vfs(struct ice_pf *pf);
+
+void ice_repr_start_tx_queues(struct ice_repr *repr);
+void ice_repr_stop_tx_queues(struct ice_repr *repr);
+
+void ice_repr_set_traffic_vsi(struct ice_repr *repr, struct ice_vsi *vsi);
+
+struct ice_repr *ice_netdev_to_repr(struct net_device *netdev);
+bool ice_is_port_repr_netdev(struct net_device *netdev);
+#endif
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c
index 9f07b6641705..ce3c7bded4cb 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.c
+++ b/drivers/net/ethernet/intel/ice/ice_sched.c
@@ -2071,6 +2071,19 @@ enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle)
}
/**
+ * ice_rm_vsi_rdma_cfg - remove VSI and its RDMA children nodes
+ * @pi: port information structure
+ * @vsi_handle: software VSI handle
+ *
+ * This function clears the VSI and its RDMA children nodes from scheduler tree
+ * for all TCs.
+ */
+enum ice_status ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle)
+{
+ return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_RDMA);
+}
+
+/**
* ice_get_agg_info - get the aggregator ID
* @hw: pointer to the hardware structure
* @agg_id: aggregator ID
@@ -2999,6 +3012,43 @@ static void ice_set_clear_shared_bw(struct ice_bw_type_info *bw_t_info, u32 bw)
}
/**
+ * ice_sched_save_vsi_bw - save VSI node's BW information
+ * @pi: port information structure
+ * @vsi_handle: sw VSI handle
+ * @tc: traffic class
+ * @rl_type: rate limit type min, max, or shared
+ * @bw: bandwidth in Kbps - Kilo bits per sec
+ *
+ * Save BW information of VSI type node for post replay use.
+ */
+static int
+ice_sched_save_vsi_bw(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
+ enum ice_rl_type rl_type, u32 bw)
+{
+ struct ice_vsi_ctx *vsi_ctx;
+
+ if (!ice_is_vsi_valid(pi->hw, vsi_handle))
+ return -EINVAL;
+ vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
+ if (!vsi_ctx)
+ return -EINVAL;
+ switch (rl_type) {
+ case ICE_MIN_BW:
+ ice_set_clear_cir_bw(&vsi_ctx->sched.bw_t_info[tc], bw);
+ break;
+ case ICE_MAX_BW:
+ ice_set_clear_eir_bw(&vsi_ctx->sched.bw_t_info[tc], bw);
+ break;
+ case ICE_SHARED_BW:
+ ice_set_clear_shared_bw(&vsi_ctx->sched.bw_t_info[tc], bw);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
* ice_sched_calc_wakeup - calculate RL profile wakeup parameter
* @hw: pointer to the HW struct
* @bw: bandwidth in Kbps
@@ -3771,6 +3821,153 @@ ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
}
/**
+ * ice_sched_get_node_by_id_type - get node from ID type
+ * @pi: port information structure
+ * @id: identifier
+ * @agg_type: type of aggregator
+ * @tc: traffic class
+ *
+ * This function returns node identified by ID of type aggregator, and
+ * based on traffic class (TC). This function needs to be called with
+ * the scheduler lock held.
+ */
+static struct ice_sched_node *
+ice_sched_get_node_by_id_type(struct ice_port_info *pi, u32 id,
+ enum ice_agg_type agg_type, u8 tc)
+{
+ struct ice_sched_node *node = NULL;
+
+ switch (agg_type) {
+ case ICE_AGG_TYPE_VSI: {
+ struct ice_vsi_ctx *vsi_ctx;
+ u16 vsi_handle = (u16)id;
+
+ if (!ice_is_vsi_valid(pi->hw, vsi_handle))
+ break;
+ /* Get sched_vsi_info */
+ vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle);
+ if (!vsi_ctx)
+ break;
+ node = vsi_ctx->sched.vsi_node[tc];
+ break;
+ }
+
+ case ICE_AGG_TYPE_AGG: {
+ struct ice_sched_node *tc_node;
+
+ tc_node = ice_sched_get_tc_node(pi, tc);
+ if (tc_node)
+ node = ice_sched_get_agg_node(pi, tc_node, id);
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ return node;
+}
+
+/**
+ * ice_sched_set_node_bw_lmt_per_tc - set node BW limit per TC
+ * @pi: port information structure
+ * @id: ID (software VSI handle or AGG ID)
+ * @agg_type: aggregator type (VSI or AGG type node)
+ * @tc: traffic class
+ * @rl_type: min or max
+ * @bw: bandwidth in Kbps
+ *
+ * This function sets BW limit of VSI or Aggregator scheduling node
+ * based on TC information from passed in argument BW.
+ */
+static enum ice_status
+ice_sched_set_node_bw_lmt_per_tc(struct ice_port_info *pi, u32 id,
+ enum ice_agg_type agg_type, u8 tc,
+ enum ice_rl_type rl_type, u32 bw)
+{
+ enum ice_status status = ICE_ERR_PARAM;
+ struct ice_sched_node *node;
+
+ if (!pi)
+ return status;
+
+ if (rl_type == ICE_UNKNOWN_BW)
+ return status;
+
+ mutex_lock(&pi->sched_lock);
+ node = ice_sched_get_node_by_id_type(pi, id, agg_type, tc);
+ if (!node) {
+ ice_debug(pi->hw, ICE_DBG_SCHED, "Wrong id, agg type, or tc\n");
+ goto exit_set_node_bw_lmt_per_tc;
+ }
+ if (bw == ICE_SCHED_DFLT_BW)
+ status = ice_sched_set_node_bw_dflt_lmt(pi, node, rl_type);
+ else
+ status = ice_sched_set_node_bw_lmt(pi, node, rl_type, bw);
+
+exit_set_node_bw_lmt_per_tc:
+ mutex_unlock(&pi->sched_lock);
+ return status;
+}
+
+/**
+ * ice_cfg_vsi_bw_lmt_per_tc - configure VSI BW limit per TC
+ * @pi: port information structure
+ * @vsi_handle: software VSI handle
+ * @tc: traffic class
+ * @rl_type: min or max
+ * @bw: bandwidth in Kbps
+ *
+ * This function configures BW limit of VSI scheduling node based on TC
+ * information.
+ */
+enum ice_status
+ice_cfg_vsi_bw_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
+ enum ice_rl_type rl_type, u32 bw)
+{
+ int status;
+
+ status = ice_sched_set_node_bw_lmt_per_tc(pi, vsi_handle,
+ ICE_AGG_TYPE_VSI,
+ tc, rl_type, bw);
+ if (!status) {
+ mutex_lock(&pi->sched_lock);
+ status = ice_sched_save_vsi_bw(pi, vsi_handle, tc, rl_type, bw);
+ mutex_unlock(&pi->sched_lock);
+ }
+ return status;
+}
+
+/**
+ * ice_cfg_vsi_bw_dflt_lmt_per_tc - configure default VSI BW limit per TC
+ * @pi: port information structure
+ * @vsi_handle: software VSI handle
+ * @tc: traffic class
+ * @rl_type: min or max
+ *
+ * This function configures default BW limit of VSI scheduling node based on TC
+ * information.
+ */
+enum ice_status
+ice_cfg_vsi_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
+ enum ice_rl_type rl_type)
+{
+ int status;
+
+ status = ice_sched_set_node_bw_lmt_per_tc(pi, vsi_handle,
+ ICE_AGG_TYPE_VSI,
+ tc, rl_type,
+ ICE_SCHED_DFLT_BW);
+ if (!status) {
+ mutex_lock(&pi->sched_lock);
+ status = ice_sched_save_vsi_bw(pi, vsi_handle, tc, rl_type,
+ ICE_SCHED_DFLT_BW);
+ mutex_unlock(&pi->sched_lock);
+ }
+ return status;
+}
+
+/**
* ice_cfg_rl_burst_size - Set burst size value
* @hw: pointer to the HW struct
* @bytes: burst size in bytes
diff --git a/drivers/net/ethernet/intel/ice/ice_sched.h b/drivers/net/ethernet/intel/ice/ice_sched.h
index 9beef8f0ec76..6bddcbecaf5e 100644
--- a/drivers/net/ethernet/intel/ice/ice_sched.h
+++ b/drivers/net/ethernet/intel/ice/ice_sched.h
@@ -58,6 +58,8 @@ struct ice_sched_agg_info {
DECLARE_BITMAP(tc_bitmap, ICE_MAX_TRAFFIC_CLASS);
u32 agg_id;
enum ice_agg_type agg_type;
+ /* bw_t_info saves aggregator BW information */
+ struct ice_bw_type_info bw_t_info[ICE_MAX_TRAFFIC_CLASS];
/* save aggregator TC bitmap */
DECLARE_BITMAP(replay_tc_bitmap, ICE_MAX_TRAFFIC_CLASS);
};
@@ -89,6 +91,7 @@ enum ice_status
ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
u8 owner, bool enable);
enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle);
+enum ice_status ice_rm_vsi_rdma_cfg(struct ice_port_info *pi, u16 vsi_handle);
/* Tx scheduler rate limiter functions */
enum ice_status
@@ -103,6 +106,12 @@ ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
enum ice_status
ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
u16 q_handle, enum ice_rl_type rl_type);
+enum ice_status
+ice_cfg_vsi_bw_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
+ enum ice_rl_type rl_type, u32 bw);
+enum ice_status
+ice_cfg_vsi_bw_dflt_lmt_per_tc(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
+ enum ice_rl_type rl_type);
enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes);
void ice_sched_replay_agg_vsi_preinit(struct ice_hw *hw);
void ice_sched_replay_agg(struct ice_hw *hw);
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
index 3b6c1420aa7b..793f4a9fc2cd 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.c
+++ b/drivers/net/ethernet/intel/ice/ice_switch.c
@@ -8,6 +8,7 @@
#define ICE_ETH_ETHTYPE_OFFSET 12
#define ICE_ETH_VLAN_TCI_OFFSET 14
#define ICE_MAX_VLAN_ID 0xFFF
+#define ICE_IPV6_ETHER_ID 0x86DD
/* Dummy ethernet header needed in the ice_aqc_sw_rules_elem
* struct to configure any switch filter rules.
@@ -29,6 +30,476 @@ static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
0x2, 0, 0, 0, 0, 0,
0x81, 0, 0, 0};
+struct ice_dummy_pkt_offsets {
+ enum ice_protocol_type type;
+ u16 offset; /* ICE_PROTOCOL_LAST indicates end of list */
+};
+
+static const struct ice_dummy_pkt_offsets dummy_gre_tcp_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_IPV4_OFOS, 14 },
+ { ICE_NVGRE, 34 },
+ { ICE_MAC_IL, 42 },
+ { ICE_IPV4_IL, 56 },
+ { ICE_TCP_IL, 76 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+static const u8 dummy_gre_tcp_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x08, 0x00, /* ICE_ETYPE_OL 12 */
+
+ 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x2F, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x00,
+
+ 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x06, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 76 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x50, 0x02, 0x20, 0x00,
+ 0x00, 0x00, 0x00, 0x00
+};
+
+static const struct ice_dummy_pkt_offsets dummy_gre_udp_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_IPV4_OFOS, 14 },
+ { ICE_NVGRE, 34 },
+ { ICE_MAC_IL, 42 },
+ { ICE_IPV4_IL, 56 },
+ { ICE_UDP_ILOS, 76 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+static const u8 dummy_gre_udp_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x08, 0x00, /* ICE_ETYPE_OL 12 */
+
+ 0x45, 0x00, 0x00, 0x3E, /* ICE_IPV4_OFOS 14 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x2F, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x80, 0x00, 0x65, 0x58, /* ICE_NVGRE 34 */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 42 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x00,
+
+ 0x45, 0x00, 0x00, 0x14, /* ICE_IPV4_IL 56 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x11, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 76 */
+ 0x00, 0x08, 0x00, 0x00,
+};
+
+static const struct ice_dummy_pkt_offsets dummy_udp_tun_tcp_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_IPV4_OFOS, 14 },
+ { ICE_UDP_OF, 34 },
+ { ICE_VXLAN, 42 },
+ { ICE_GENEVE, 42 },
+ { ICE_VXLAN_GPE, 42 },
+ { ICE_MAC_IL, 50 },
+ { ICE_IPV4_IL, 64 },
+ { ICE_TCP_IL, 84 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+static const u8 dummy_udp_tun_tcp_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x08, 0x00, /* ICE_ETYPE_OL 12 */
+
+ 0x45, 0x00, 0x00, 0x5a, /* ICE_IPV4_OFOS 14 */
+ 0x00, 0x01, 0x00, 0x00,
+ 0x40, 0x11, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
+ 0x00, 0x46, 0x00, 0x00,
+
+ 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x00,
+
+ 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_IL 64 */
+ 0x00, 0x01, 0x00, 0x00,
+ 0x40, 0x06, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 84 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x50, 0x02, 0x20, 0x00,
+ 0x00, 0x00, 0x00, 0x00
+};
+
+static const struct ice_dummy_pkt_offsets dummy_udp_tun_udp_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_IPV4_OFOS, 14 },
+ { ICE_UDP_OF, 34 },
+ { ICE_VXLAN, 42 },
+ { ICE_GENEVE, 42 },
+ { ICE_VXLAN_GPE, 42 },
+ { ICE_MAC_IL, 50 },
+ { ICE_IPV4_IL, 64 },
+ { ICE_UDP_ILOS, 84 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+static const u8 dummy_udp_tun_udp_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x08, 0x00, /* ICE_ETYPE_OL 12 */
+
+ 0x45, 0x00, 0x00, 0x4e, /* ICE_IPV4_OFOS 14 */
+ 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x11, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x12, 0xb5, /* ICE_UDP_OF 34 */
+ 0x00, 0x3a, 0x00, 0x00,
+
+ 0x00, 0x00, 0x65, 0x58, /* ICE_VXLAN 42 */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_IL 50 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x00,
+
+ 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_IL 64 */
+ 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x11, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 84 */
+ 0x00, 0x08, 0x00, 0x00,
+};
+
+/* offset info for MAC + IPv4 + UDP dummy packet */
+static const struct ice_dummy_pkt_offsets dummy_udp_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_IPV4_OFOS, 14 },
+ { ICE_UDP_ILOS, 34 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+/* Dummy packet for MAC + IPv4 + UDP */
+static const u8 dummy_udp_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x08, 0x00, /* ICE_ETYPE_OL 12 */
+
+ 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 14 */
+ 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x11, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 34 */
+ 0x00, 0x08, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
+/* offset info for MAC + VLAN + IPv4 + UDP dummy packet */
+static const struct ice_dummy_pkt_offsets dummy_vlan_udp_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_VLAN_OFOS, 12 },
+ { ICE_ETYPE_OL, 16 },
+ { ICE_IPV4_OFOS, 18 },
+ { ICE_UDP_ILOS, 38 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+/* C-tag (801.1Q), IPv4:UDP dummy packet */
+static const u8 dummy_vlan_udp_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
+
+ 0x08, 0x00, /* ICE_ETYPE_OL 16 */
+
+ 0x45, 0x00, 0x00, 0x1c, /* ICE_IPV4_OFOS 18 */
+ 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x11, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 38 */
+ 0x00, 0x08, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
+/* offset info for MAC + IPv4 + TCP dummy packet */
+static const struct ice_dummy_pkt_offsets dummy_tcp_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_IPV4_OFOS, 14 },
+ { ICE_TCP_IL, 34 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+/* Dummy packet for MAC + IPv4 + TCP */
+static const u8 dummy_tcp_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x08, 0x00, /* ICE_ETYPE_OL 12 */
+
+ 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 14 */
+ 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x06, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 34 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x50, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
+/* offset info for MAC + VLAN (C-tag, 802.1Q) + IPv4 + TCP dummy packet */
+static const struct ice_dummy_pkt_offsets dummy_vlan_tcp_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_VLAN_OFOS, 12 },
+ { ICE_ETYPE_OL, 16 },
+ { ICE_IPV4_OFOS, 18 },
+ { ICE_TCP_IL, 38 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+/* C-tag (801.1Q), IPv4:TCP dummy packet */
+static const u8 dummy_vlan_tcp_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
+
+ 0x08, 0x00, /* ICE_ETYPE_OL 16 */
+
+ 0x45, 0x00, 0x00, 0x28, /* ICE_IPV4_OFOS 18 */
+ 0x00, 0x01, 0x00, 0x00,
+ 0x00, 0x06, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 38 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x50, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
+static const struct ice_dummy_pkt_offsets dummy_tcp_ipv6_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_IPV6_OFOS, 14 },
+ { ICE_TCP_IL, 54 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+static const u8 dummy_tcp_ipv6_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
+
+ 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
+ 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 54 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x50, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
+/* C-tag (802.1Q): IPv6 + TCP */
+static const struct ice_dummy_pkt_offsets
+dummy_vlan_tcp_ipv6_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_VLAN_OFOS, 12 },
+ { ICE_ETYPE_OL, 16 },
+ { ICE_IPV6_OFOS, 18 },
+ { ICE_TCP_IL, 58 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+/* C-tag (802.1Q), IPv6 + TCP dummy packet */
+static const u8 dummy_vlan_tcp_ipv6_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x81, 0x00, 0x00, 0x00, /* ICE_VLAN_OFOS 12 */
+
+ 0x86, 0xDD, /* ICE_ETYPE_OL 16 */
+
+ 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
+ 0x00, 0x14, 0x06, 0x00, /* Next header is TCP */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_TCP_IL 58 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x50, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
+/* IPv6 + UDP */
+static const struct ice_dummy_pkt_offsets dummy_udp_ipv6_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_ETYPE_OL, 12 },
+ { ICE_IPV6_OFOS, 14 },
+ { ICE_UDP_ILOS, 54 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+/* IPv6 + UDP dummy packet */
+static const u8 dummy_udp_ipv6_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x86, 0xDD, /* ICE_ETYPE_OL 12 */
+
+ 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 40 */
+ 0x00, 0x10, 0x11, 0x00, /* Next header UDP */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 54 */
+ 0x00, 0x10, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* needed for ESP packets */
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
+/* C-tag (802.1Q): IPv6 + UDP */
+static const struct ice_dummy_pkt_offsets
+dummy_vlan_udp_ipv6_packet_offsets[] = {
+ { ICE_MAC_OFOS, 0 },
+ { ICE_VLAN_OFOS, 12 },
+ { ICE_ETYPE_OL, 16 },
+ { ICE_IPV6_OFOS, 18 },
+ { ICE_UDP_ILOS, 58 },
+ { ICE_PROTOCOL_LAST, 0 },
+};
+
+/* C-tag (802.1Q), IPv6 + UDP dummy packet */
+static const u8 dummy_vlan_udp_ipv6_packet[] = {
+ 0x00, 0x00, 0x00, 0x00, /* ICE_MAC_OFOS 0 */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x81, 0x00, 0x00, 0x00,/* ICE_VLAN_OFOS 12 */
+
+ 0x86, 0xDD, /* ICE_ETYPE_OL 16 */
+
+ 0x60, 0x00, 0x00, 0x00, /* ICE_IPV6_OFOS 18 */
+ 0x00, 0x08, 0x11, 0x00, /* Next header UDP */
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+
+ 0x00, 0x00, 0x00, 0x00, /* ICE_UDP_ILOS 58 */
+ 0x00, 0x08, 0x00, 0x00,
+
+ 0x00, 0x00, /* 2 bytes for 4 byte alignment */
+};
+
#define ICE_SW_RULE_RX_TX_ETH_HDR_SIZE \
(offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr) + \
(DUMMY_ETH_HDR_LEN * \
@@ -42,6 +513,14 @@ static const u8 dummy_eth_header[DUMMY_ETH_HDR_LEN] = { 0x2, 0, 0, 0, 0, 0,
(offsetof(struct ice_aqc_sw_rules_elem, pdata.vsi_list.vsi) + \
((n) * sizeof(((struct ice_sw_rule_vsi_list *)0)->vsi[0])))
+/* this is a recipe to profile association bitmap */
+static DECLARE_BITMAP(recipe_to_profile[ICE_MAX_NUM_RECIPES],
+ ICE_MAX_NUM_PROFILES);
+
+/* this is a profile to recipe association bitmap */
+static DECLARE_BITMAP(profile_to_recipe[ICE_MAX_NUM_PROFILES],
+ ICE_MAX_NUM_RECIPES);
+
/**
* ice_init_def_sw_recp - initialize the recipe book keeping tables
* @hw: pointer to the HW struct
@@ -59,10 +538,11 @@ enum ice_status ice_init_def_sw_recp(struct ice_hw *hw)
if (!recps)
return ICE_ERR_NO_MEMORY;
- for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
+ for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
recps[i].root_rid = i;
INIT_LIST_HEAD(&recps[i].filt_rules);
INIT_LIST_HEAD(&recps[i].filt_replay_rules);
+ INIT_LIST_HEAD(&recps[i].rg_list);
mutex_init(&recps[i].filt_rule_lock);
}
@@ -518,7 +998,7 @@ ice_aq_alloc_free_vsi_list_exit:
*
* Add(0x02a0)/Update(0x02a1)/Remove(0x02a2) switch rules commands to firmware
*/
-static enum ice_status
+enum ice_status
ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
{
@@ -543,6 +1023,360 @@ ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
return status;
}
+/**
+ * ice_aq_add_recipe - add switch recipe
+ * @hw: pointer to the HW struct
+ * @s_recipe_list: pointer to switch rule population list
+ * @num_recipes: number of switch recipes in the list
+ * @cd: pointer to command details structure or NULL
+ *
+ * Add(0x0290)
+ */
+static enum ice_status
+ice_aq_add_recipe(struct ice_hw *hw,
+ struct ice_aqc_recipe_data_elem *s_recipe_list,
+ u16 num_recipes, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_add_get_recipe *cmd;
+ struct ice_aq_desc desc;
+ u16 buf_size;
+
+ cmd = &desc.params.add_get_recipe;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_recipe);
+
+ cmd->num_sub_recipes = cpu_to_le16(num_recipes);
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
+ buf_size = num_recipes * sizeof(*s_recipe_list);
+
+ return ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
+}
+
+/**
+ * ice_aq_get_recipe - get switch recipe
+ * @hw: pointer to the HW struct
+ * @s_recipe_list: pointer to switch rule population list
+ * @num_recipes: pointer to the number of recipes (input and output)
+ * @recipe_root: root recipe number of recipe(s) to retrieve
+ * @cd: pointer to command details structure or NULL
+ *
+ * Get(0x0292)
+ *
+ * On input, *num_recipes should equal the number of entries in s_recipe_list.
+ * On output, *num_recipes will equal the number of entries returned in
+ * s_recipe_list.
+ *
+ * The caller must supply enough space in s_recipe_list to hold all possible
+ * recipes and *num_recipes must equal ICE_MAX_NUM_RECIPES.
+ */
+static enum ice_status
+ice_aq_get_recipe(struct ice_hw *hw,
+ struct ice_aqc_recipe_data_elem *s_recipe_list,
+ u16 *num_recipes, u16 recipe_root, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_add_get_recipe *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+ u16 buf_size;
+
+ if (*num_recipes != ICE_MAX_NUM_RECIPES)
+ return ICE_ERR_PARAM;
+
+ cmd = &desc.params.add_get_recipe;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe);
+
+ cmd->return_index = cpu_to_le16(recipe_root);
+ cmd->num_sub_recipes = 0;
+
+ buf_size = *num_recipes * sizeof(*s_recipe_list);
+
+ status = ice_aq_send_cmd(hw, &desc, s_recipe_list, buf_size, cd);
+ *num_recipes = le16_to_cpu(cmd->num_sub_recipes);
+
+ return status;
+}
+
+/**
+ * ice_aq_map_recipe_to_profile - Map recipe to packet profile
+ * @hw: pointer to the HW struct
+ * @profile_id: package profile ID to associate the recipe with
+ * @r_bitmap: Recipe bitmap filled in and need to be returned as response
+ * @cd: pointer to command details structure or NULL
+ * Recipe to profile association (0x0291)
+ */
+static enum ice_status
+ice_aq_map_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_recipe_to_profile *cmd;
+ struct ice_aq_desc desc;
+
+ cmd = &desc.params.recipe_to_profile;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_recipe_to_profile);
+ cmd->profile_id = cpu_to_le16(profile_id);
+ /* Set the recipe ID bit in the bitmask to let the device know which
+ * profile we are associating the recipe to
+ */
+ memcpy(cmd->recipe_assoc, r_bitmap, sizeof(cmd->recipe_assoc));
+
+ return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+}
+
+/**
+ * ice_aq_get_recipe_to_profile - Map recipe to packet profile
+ * @hw: pointer to the HW struct
+ * @profile_id: package profile ID to associate the recipe with
+ * @r_bitmap: Recipe bitmap filled in and need to be returned as response
+ * @cd: pointer to command details structure or NULL
+ * Associate profile ID with given recipe (0x0293)
+ */
+static enum ice_status
+ice_aq_get_recipe_to_profile(struct ice_hw *hw, u32 profile_id, u8 *r_bitmap,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_recipe_to_profile *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ cmd = &desc.params.recipe_to_profile;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_recipe_to_profile);
+ cmd->profile_id = cpu_to_le16(profile_id);
+
+ status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
+ if (!status)
+ memcpy(r_bitmap, cmd->recipe_assoc, sizeof(cmd->recipe_assoc));
+
+ return status;
+}
+
+/**
+ * ice_alloc_recipe - add recipe resource
+ * @hw: pointer to the hardware structure
+ * @rid: recipe ID returned as response to AQ call
+ */
+static enum ice_status ice_alloc_recipe(struct ice_hw *hw, u16 *rid)
+{
+ struct ice_aqc_alloc_free_res_elem *sw_buf;
+ enum ice_status status;
+ u16 buf_len;
+
+ buf_len = struct_size(sw_buf, elem, 1);
+ sw_buf = kzalloc(buf_len, GFP_KERNEL);
+ if (!sw_buf)
+ return ICE_ERR_NO_MEMORY;
+
+ sw_buf->num_elems = cpu_to_le16(1);
+ sw_buf->res_type = cpu_to_le16((ICE_AQC_RES_TYPE_RECIPE <<
+ ICE_AQC_RES_TYPE_S) |
+ ICE_AQC_RES_TYPE_FLAG_SHARED);
+ status = ice_aq_alloc_free_res(hw, 1, sw_buf, buf_len,
+ ice_aqc_opc_alloc_res, NULL);
+ if (!status)
+ *rid = le16_to_cpu(sw_buf->elem[0].e.sw_resp);
+ kfree(sw_buf);
+
+ return status;
+}
+
+/**
+ * ice_get_recp_to_prof_map - updates recipe to profile mapping
+ * @hw: pointer to hardware structure
+ *
+ * This function is used to populate recipe_to_profile matrix where index to
+ * this array is the recipe ID and the element is the mapping of which profiles
+ * is this recipe mapped to.
+ */
+static void ice_get_recp_to_prof_map(struct ice_hw *hw)
+{
+ DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
+ u16 i;
+
+ for (i = 0; i < hw->switch_info->max_used_prof_index + 1; i++) {
+ u16 j;
+
+ bitmap_zero(profile_to_recipe[i], ICE_MAX_NUM_RECIPES);
+ bitmap_zero(r_bitmap, ICE_MAX_NUM_RECIPES);
+ if (ice_aq_get_recipe_to_profile(hw, i, (u8 *)r_bitmap, NULL))
+ continue;
+ bitmap_copy(profile_to_recipe[i], r_bitmap,
+ ICE_MAX_NUM_RECIPES);
+ for_each_set_bit(j, r_bitmap, ICE_MAX_NUM_RECIPES)
+ set_bit(i, recipe_to_profile[j]);
+ }
+}
+
+/**
+ * ice_collect_result_idx - copy result index values
+ * @buf: buffer that contains the result index
+ * @recp: the recipe struct to copy data into
+ */
+static void
+ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf,
+ struct ice_sw_recipe *recp)
+{
+ if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
+ set_bit(buf->content.result_indx & ~ICE_AQ_RECIPE_RESULT_EN,
+ recp->res_idxs);
+}
+
+/**
+ * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries
+ * @hw: pointer to hardware structure
+ * @recps: struct that we need to populate
+ * @rid: recipe ID that we are populating
+ * @refresh_required: true if we should get recipe to profile mapping from FW
+ *
+ * This function is used to populate all the necessary entries into our
+ * bookkeeping so that we have a current list of all the recipes that are
+ * programmed in the firmware.
+ */
+static enum ice_status
+ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid,
+ bool *refresh_required)
+{
+ DECLARE_BITMAP(result_bm, ICE_MAX_FV_WORDS);
+ struct ice_aqc_recipe_data_elem *tmp;
+ u16 num_recps = ICE_MAX_NUM_RECIPES;
+ struct ice_prot_lkup_ext *lkup_exts;
+ enum ice_status status;
+ u8 fv_word_idx = 0;
+ u16 sub_recps;
+
+ bitmap_zero(result_bm, ICE_MAX_FV_WORDS);
+
+ /* we need a buffer big enough to accommodate all the recipes */
+ tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return ICE_ERR_NO_MEMORY;
+
+ tmp[0].recipe_indx = rid;
+ status = ice_aq_get_recipe(hw, tmp, &num_recps, rid, NULL);
+ /* non-zero status meaning recipe doesn't exist */
+ if (status)
+ goto err_unroll;
+
+ /* Get recipe to profile map so that we can get the fv from lkups that
+ * we read for a recipe from FW. Since we want to minimize the number of
+ * times we make this FW call, just make one call and cache the copy
+ * until a new recipe is added. This operation is only required the
+ * first time to get the changes from FW. Then to search existing
+ * entries we don't need to update the cache again until another recipe
+ * gets added.
+ */
+ if (*refresh_required) {
+ ice_get_recp_to_prof_map(hw);
+ *refresh_required = false;
+ }
+
+ /* Start populating all the entries for recps[rid] based on lkups from
+ * firmware. Note that we are only creating the root recipe in our
+ * database.
+ */
+ lkup_exts = &recps[rid].lkup_exts;
+
+ for (sub_recps = 0; sub_recps < num_recps; sub_recps++) {
+ struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps];
+ struct ice_recp_grp_entry *rg_entry;
+ u8 i, prof, idx, prot = 0;
+ bool is_root;
+ u16 off = 0;
+
+ rg_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rg_entry),
+ GFP_KERNEL);
+ if (!rg_entry) {
+ status = ICE_ERR_NO_MEMORY;
+ goto err_unroll;
+ }
+
+ idx = root_bufs.recipe_indx;
+ is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT;
+
+ /* Mark all result indices in this chain */
+ if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN)
+ set_bit(root_bufs.content.result_indx & ~ICE_AQ_RECIPE_RESULT_EN,
+ result_bm);
+
+ /* get the first profile that is associated with rid */
+ prof = find_first_bit(recipe_to_profile[idx],
+ ICE_MAX_NUM_PROFILES);
+ for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) {
+ u8 lkup_indx = root_bufs.content.lkup_indx[i + 1];
+
+ rg_entry->fv_idx[i] = lkup_indx;
+ rg_entry->fv_mask[i] =
+ le16_to_cpu(root_bufs.content.mask[i + 1]);
+
+ /* If the recipe is a chained recipe then all its
+ * child recipe's result will have a result index.
+ * To fill fv_words we should not use those result
+ * index, we only need the protocol ids and offsets.
+ * We will skip all the fv_idx which stores result
+ * index in them. We also need to skip any fv_idx which
+ * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a
+ * valid offset value.
+ */
+ if (test_bit(rg_entry->fv_idx[i], hw->switch_info->prof_res_bm[prof]) ||
+ rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE ||
+ rg_entry->fv_idx[i] == 0)
+ continue;
+
+ ice_find_prot_off(hw, ICE_BLK_SW, prof,
+ rg_entry->fv_idx[i], &prot, &off);
+ lkup_exts->fv_words[fv_word_idx].prot_id = prot;
+ lkup_exts->fv_words[fv_word_idx].off = off;
+ lkup_exts->field_mask[fv_word_idx] =
+ rg_entry->fv_mask[i];
+ fv_word_idx++;
+ }
+ /* populate rg_list with the data from the child entry of this
+ * recipe
+ */
+ list_add(&rg_entry->l_entry, &recps[rid].rg_list);
+
+ /* Propagate some data to the recipe database */
+ recps[idx].is_root = !!is_root;
+ recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
+ bitmap_zero(recps[idx].res_idxs, ICE_MAX_FV_WORDS);
+ if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) {
+ recps[idx].chain_idx = root_bufs.content.result_indx &
+ ~ICE_AQ_RECIPE_RESULT_EN;
+ set_bit(recps[idx].chain_idx, recps[idx].res_idxs);
+ } else {
+ recps[idx].chain_idx = ICE_INVAL_CHAIN_IND;
+ }
+
+ if (!is_root)
+ continue;
+
+ /* Only do the following for root recipes entries */
+ memcpy(recps[idx].r_bitmap, root_bufs.recipe_bitmap,
+ sizeof(recps[idx].r_bitmap));
+ recps[idx].root_rid = root_bufs.content.rid &
+ ~ICE_AQ_RECIPE_ID_IS_ROOT;
+ recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority;
+ }
+
+ /* Complete initialization of the root recipe entry */
+ lkup_exts->n_val_words = fv_word_idx;
+ recps[rid].big_recp = (num_recps > 1);
+ recps[rid].n_grp_count = (u8)num_recps;
+ recps[rid].root_buf = devm_kmemdup(ice_hw_to_dev(hw), tmp,
+ recps[rid].n_grp_count * sizeof(*recps[rid].root_buf),
+ GFP_KERNEL);
+ if (!recps[rid].root_buf) {
+ status = ICE_ERR_NO_MEMORY;
+ goto err_unroll;
+ }
+
+ /* Copy result indexes */
+ bitmap_copy(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS);
+ recps[rid].recp_created = true;
+
+err_unroll:
+ kfree(tmp);
+ return status;
+}
+
/* ice_init_port_info - Initialize port_info with switch configuration data
* @pi: pointer to port_info
* @vsi_port_num: VSI number or port number
@@ -1627,6 +2461,125 @@ exit:
}
/**
+ * ice_mac_fltr_exist - does this MAC filter exist for given VSI
+ * @hw: pointer to the hardware structure
+ * @mac: MAC address to be checked (for MAC filter)
+ * @vsi_handle: check MAC filter for this VSI
+ */
+bool ice_mac_fltr_exist(struct ice_hw *hw, u8 *mac, u16 vsi_handle)
+{
+ struct ice_fltr_mgmt_list_entry *entry;
+ struct list_head *rule_head;
+ struct ice_switch_info *sw;
+ struct mutex *rule_lock; /* Lock to protect filter rule list */
+ u16 hw_vsi_id;
+
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return false;
+
+ hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
+ sw = hw->switch_info;
+ rule_head = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rules;
+ if (!rule_head)
+ return false;
+
+ rule_lock = &sw->recp_list[ICE_SW_LKUP_MAC].filt_rule_lock;
+ mutex_lock(rule_lock);
+ list_for_each_entry(entry, rule_head, list_entry) {
+ struct ice_fltr_info *f_info = &entry->fltr_info;
+ u8 *mac_addr = &f_info->l_data.mac.mac_addr[0];
+
+ if (is_zero_ether_addr(mac_addr))
+ continue;
+
+ if (f_info->flag != ICE_FLTR_TX ||
+ f_info->src_id != ICE_SRC_ID_VSI ||
+ f_info->lkup_type != ICE_SW_LKUP_MAC ||
+ f_info->fltr_act != ICE_FWD_TO_VSI ||
+ hw_vsi_id != f_info->fwd_id.hw_vsi_id)
+ continue;
+
+ if (ether_addr_equal(mac, mac_addr)) {
+ mutex_unlock(rule_lock);
+ return true;
+ }
+ }
+ mutex_unlock(rule_lock);
+ return false;
+}
+
+/**
+ * ice_vlan_fltr_exist - does this VLAN filter exist for given VSI
+ * @hw: pointer to the hardware structure
+ * @vlan_id: VLAN ID
+ * @vsi_handle: check MAC filter for this VSI
+ */
+bool ice_vlan_fltr_exist(struct ice_hw *hw, u16 vlan_id, u16 vsi_handle)
+{
+ struct ice_fltr_mgmt_list_entry *entry;
+ struct list_head *rule_head;
+ struct ice_switch_info *sw;
+ struct mutex *rule_lock; /* Lock to protect filter rule list */
+ u16 hw_vsi_id;
+
+ if (vlan_id > ICE_MAX_VLAN_ID)
+ return false;
+
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return false;
+
+ hw_vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
+ sw = hw->switch_info;
+ rule_head = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rules;
+ if (!rule_head)
+ return false;
+
+ rule_lock = &sw->recp_list[ICE_SW_LKUP_VLAN].filt_rule_lock;
+ mutex_lock(rule_lock);
+ list_for_each_entry(entry, rule_head, list_entry) {
+ struct ice_fltr_info *f_info = &entry->fltr_info;
+ u16 entry_vlan_id = f_info->l_data.vlan.vlan_id;
+ struct ice_vsi_list_map_info *map_info;
+
+ if (entry_vlan_id > ICE_MAX_VLAN_ID)
+ continue;
+
+ if (f_info->flag != ICE_FLTR_TX ||
+ f_info->src_id != ICE_SRC_ID_VSI ||
+ f_info->lkup_type != ICE_SW_LKUP_VLAN)
+ continue;
+
+ /* Only allowed filter action are FWD_TO_VSI/_VSI_LIST */
+ if (f_info->fltr_act != ICE_FWD_TO_VSI &&
+ f_info->fltr_act != ICE_FWD_TO_VSI_LIST)
+ continue;
+
+ if (f_info->fltr_act == ICE_FWD_TO_VSI) {
+ if (hw_vsi_id != f_info->fwd_id.hw_vsi_id)
+ continue;
+ } else if (f_info->fltr_act == ICE_FWD_TO_VSI_LIST) {
+ /* If filter_action is FWD_TO_VSI_LIST, make sure
+ * that VSI being checked is part of VSI list
+ */
+ if (entry->vsi_count == 1 &&
+ entry->vsi_list_info) {
+ map_info = entry->vsi_list_info;
+ if (!test_bit(vsi_handle, map_info->vsi_map))
+ continue;
+ }
+ }
+
+ if (vlan_id == entry_vlan_id) {
+ mutex_unlock(rule_lock);
+ return true;
+ }
+ }
+ mutex_unlock(rule_lock);
+
+ return false;
+}
+
+/**
* ice_add_mac - Add a MAC address based filter rule
* @hw: pointer to the hardware structure
* @m_list: list of MAC addresses and forwarding information
@@ -2037,6 +2990,27 @@ ice_rem_sw_rule_info(struct ice_hw *hw, struct list_head *rule_head)
}
/**
+ * ice_rem_adv_rule_info
+ * @hw: pointer to the hardware structure
+ * @rule_head: pointer to the switch list structure that we want to delete
+ */
+static void
+ice_rem_adv_rule_info(struct ice_hw *hw, struct list_head *rule_head)
+{
+ struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
+ struct ice_adv_fltr_mgmt_list_entry *lst_itr;
+
+ if (list_empty(rule_head))
+ return;
+
+ list_for_each_entry_safe(lst_itr, tmp_entry, rule_head, list_entry) {
+ list_del(&lst_itr->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), lst_itr->lkups);
+ devm_kfree(ice_hw_to_dev(hw), lst_itr);
+ }
+}
+
+/**
* ice_cfg_dflt_vsi - change state of VSI to set/clear default
* @hw: pointer to the hardware structure
* @vsi_handle: VSI handle to set as default
@@ -2773,6 +3747,1621 @@ ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
return status;
}
+/* This is mapping table entry that maps every word within a given protocol
+ * structure to the real byte offset as per the specification of that
+ * protocol header.
+ * for example dst address is 3 words in ethertype header and corresponding
+ * bytes are 0, 2, 3 in the actual packet header and src address is at 4, 6, 8
+ * IMPORTANT: Every structure part of "ice_prot_hdr" union should have a
+ * matching entry describing its field. This needs to be updated if new
+ * structure is added to that union.
+ */
+static const struct ice_prot_ext_tbl_entry ice_prot_ext[ICE_PROTOCOL_LAST] = {
+ { ICE_MAC_OFOS, { 0, 2, 4, 6, 8, 10, 12 } },
+ { ICE_MAC_IL, { 0, 2, 4, 6, 8, 10, 12 } },
+ { ICE_ETYPE_OL, { 0 } },
+ { ICE_VLAN_OFOS, { 2, 0 } },
+ { ICE_IPV4_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
+ { ICE_IPV4_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18 } },
+ { ICE_IPV6_OFOS, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
+ 26, 28, 30, 32, 34, 36, 38 } },
+ { ICE_IPV6_IL, { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24,
+ 26, 28, 30, 32, 34, 36, 38 } },
+ { ICE_TCP_IL, { 0, 2 } },
+ { ICE_UDP_OF, { 0, 2 } },
+ { ICE_UDP_ILOS, { 0, 2 } },
+ { ICE_VXLAN, { 8, 10, 12, 14 } },
+ { ICE_GENEVE, { 8, 10, 12, 14 } },
+ { ICE_NVGRE, { 0, 2, 4, 6 } },
+};
+
+static struct ice_protocol_entry ice_prot_id_tbl[ICE_PROTOCOL_LAST] = {
+ { ICE_MAC_OFOS, ICE_MAC_OFOS_HW },
+ { ICE_MAC_IL, ICE_MAC_IL_HW },
+ { ICE_ETYPE_OL, ICE_ETYPE_OL_HW },
+ { ICE_VLAN_OFOS, ICE_VLAN_OL_HW },
+ { ICE_IPV4_OFOS, ICE_IPV4_OFOS_HW },
+ { ICE_IPV4_IL, ICE_IPV4_IL_HW },
+ { ICE_IPV6_OFOS, ICE_IPV6_OFOS_HW },
+ { ICE_IPV6_IL, ICE_IPV6_IL_HW },
+ { ICE_TCP_IL, ICE_TCP_IL_HW },
+ { ICE_UDP_OF, ICE_UDP_OF_HW },
+ { ICE_UDP_ILOS, ICE_UDP_ILOS_HW },
+ { ICE_VXLAN, ICE_UDP_OF_HW },
+ { ICE_GENEVE, ICE_UDP_OF_HW },
+ { ICE_NVGRE, ICE_GRE_OF_HW },
+};
+
+/**
+ * ice_find_recp - find a recipe
+ * @hw: pointer to the hardware structure
+ * @lkup_exts: extension sequence to match
+ *
+ * Returns index of matching recipe, or ICE_MAX_NUM_RECIPES if not found.
+ */
+static u16 ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts)
+{
+ bool refresh_required = true;
+ struct ice_sw_recipe *recp;
+ u8 i;
+
+ /* Walk through existing recipes to find a match */
+ recp = hw->switch_info->recp_list;
+ for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
+ /* If recipe was not created for this ID, in SW bookkeeping,
+ * check if FW has an entry for this recipe. If the FW has an
+ * entry update it in our SW bookkeeping and continue with the
+ * matching.
+ */
+ if (!recp[i].recp_created)
+ if (ice_get_recp_frm_fw(hw,
+ hw->switch_info->recp_list, i,
+ &refresh_required))
+ continue;
+
+ /* Skip inverse action recipes */
+ if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl &
+ ICE_AQ_RECIPE_ACT_INV_ACT)
+ continue;
+
+ /* if number of words we are looking for match */
+ if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) {
+ struct ice_fv_word *ar = recp[i].lkup_exts.fv_words;
+ struct ice_fv_word *be = lkup_exts->fv_words;
+ u16 *cr = recp[i].lkup_exts.field_mask;
+ u16 *de = lkup_exts->field_mask;
+ bool found = true;
+ u8 pe, qr;
+
+ /* ar, cr, and qr are related to the recipe words, while
+ * be, de, and pe are related to the lookup words
+ */
+ for (pe = 0; pe < lkup_exts->n_val_words; pe++) {
+ for (qr = 0; qr < recp[i].lkup_exts.n_val_words;
+ qr++) {
+ if (ar[qr].off == be[pe].off &&
+ ar[qr].prot_id == be[pe].prot_id &&
+ cr[qr] == de[pe])
+ /* Found the "pe"th word in the
+ * given recipe
+ */
+ break;
+ }
+ /* After walking through all the words in the
+ * "i"th recipe if "p"th word was not found then
+ * this recipe is not what we are looking for.
+ * So break out from this loop and try the next
+ * recipe
+ */
+ if (qr >= recp[i].lkup_exts.n_val_words) {
+ found = false;
+ break;
+ }
+ }
+ /* If for "i"th recipe the found was never set to false
+ * then it means we found our match
+ */
+ if (found)
+ return i; /* Return the recipe ID */
+ }
+ }
+ return ICE_MAX_NUM_RECIPES;
+}
+
+/**
+ * ice_prot_type_to_id - get protocol ID from protocol type
+ * @type: protocol type
+ * @id: pointer to variable that will receive the ID
+ *
+ * Returns true if found, false otherwise
+ */
+static bool ice_prot_type_to_id(enum ice_protocol_type type, u8 *id)
+{
+ u8 i;
+
+ for (i = 0; i < ARRAY_SIZE(ice_prot_id_tbl); i++)
+ if (ice_prot_id_tbl[i].type == type) {
+ *id = ice_prot_id_tbl[i].protocol_id;
+ return true;
+ }
+ return false;
+}
+
+/**
+ * ice_fill_valid_words - count valid words
+ * @rule: advanced rule with lookup information
+ * @lkup_exts: byte offset extractions of the words that are valid
+ *
+ * calculate valid words in a lookup rule using mask value
+ */
+static u8
+ice_fill_valid_words(struct ice_adv_lkup_elem *rule,
+ struct ice_prot_lkup_ext *lkup_exts)
+{
+ u8 j, word, prot_id, ret_val;
+
+ if (!ice_prot_type_to_id(rule->type, &prot_id))
+ return 0;
+
+ word = lkup_exts->n_val_words;
+
+ for (j = 0; j < sizeof(rule->m_u) / sizeof(u16); j++)
+ if (((u16 *)&rule->m_u)[j] &&
+ rule->type < ARRAY_SIZE(ice_prot_ext)) {
+ /* No more space to accommodate */
+ if (word >= ICE_MAX_CHAIN_WORDS)
+ return 0;
+ lkup_exts->fv_words[word].off =
+ ice_prot_ext[rule->type].offs[j];
+ lkup_exts->fv_words[word].prot_id =
+ ice_prot_id_tbl[rule->type].protocol_id;
+ lkup_exts->field_mask[word] =
+ be16_to_cpu(((__force __be16 *)&rule->m_u)[j]);
+ word++;
+ }
+
+ ret_val = word - lkup_exts->n_val_words;
+ lkup_exts->n_val_words = word;
+
+ return ret_val;
+}
+
+/**
+ * ice_create_first_fit_recp_def - Create a recipe grouping
+ * @hw: pointer to the hardware structure
+ * @lkup_exts: an array of protocol header extractions
+ * @rg_list: pointer to a list that stores new recipe groups
+ * @recp_cnt: pointer to a variable that stores returned number of recipe groups
+ *
+ * Using first fit algorithm, take all the words that are still not done
+ * and start grouping them in 4-word groups. Each group makes up one
+ * recipe.
+ */
+static enum ice_status
+ice_create_first_fit_recp_def(struct ice_hw *hw,
+ struct ice_prot_lkup_ext *lkup_exts,
+ struct list_head *rg_list,
+ u8 *recp_cnt)
+{
+ struct ice_pref_recipe_group *grp = NULL;
+ u8 j;
+
+ *recp_cnt = 0;
+
+ /* Walk through every word in the rule to check if it is not done. If so
+ * then this word needs to be part of a new recipe.
+ */
+ for (j = 0; j < lkup_exts->n_val_words; j++)
+ if (!test_bit(j, lkup_exts->done)) {
+ if (!grp ||
+ grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) {
+ struct ice_recp_grp_entry *entry;
+
+ entry = devm_kzalloc(ice_hw_to_dev(hw),
+ sizeof(*entry),
+ GFP_KERNEL);
+ if (!entry)
+ return ICE_ERR_NO_MEMORY;
+ list_add(&entry->l_entry, rg_list);
+ grp = &entry->r_group;
+ (*recp_cnt)++;
+ }
+
+ grp->pairs[grp->n_val_pairs].prot_id =
+ lkup_exts->fv_words[j].prot_id;
+ grp->pairs[grp->n_val_pairs].off =
+ lkup_exts->fv_words[j].off;
+ grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j];
+ grp->n_val_pairs++;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_fill_fv_word_index - fill in the field vector indices for a recipe group
+ * @hw: pointer to the hardware structure
+ * @fv_list: field vector with the extraction sequence information
+ * @rg_list: recipe groupings with protocol-offset pairs
+ *
+ * Helper function to fill in the field vector indices for protocol-offset
+ * pairs. These indexes are then ultimately programmed into a recipe.
+ */
+static enum ice_status
+ice_fill_fv_word_index(struct ice_hw *hw, struct list_head *fv_list,
+ struct list_head *rg_list)
+{
+ struct ice_sw_fv_list_entry *fv;
+ struct ice_recp_grp_entry *rg;
+ struct ice_fv_word *fv_ext;
+
+ if (list_empty(fv_list))
+ return 0;
+
+ fv = list_first_entry(fv_list, struct ice_sw_fv_list_entry,
+ list_entry);
+ fv_ext = fv->fv_ptr->ew;
+
+ list_for_each_entry(rg, rg_list, l_entry) {
+ u8 i;
+
+ for (i = 0; i < rg->r_group.n_val_pairs; i++) {
+ struct ice_fv_word *pr;
+ bool found = false;
+ u16 mask;
+ u8 j;
+
+ pr = &rg->r_group.pairs[i];
+ mask = rg->r_group.mask[i];
+
+ for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++)
+ if (fv_ext[j].prot_id == pr->prot_id &&
+ fv_ext[j].off == pr->off) {
+ found = true;
+
+ /* Store index of field vector */
+ rg->fv_idx[i] = j;
+ rg->fv_mask[i] = mask;
+ break;
+ }
+
+ /* Protocol/offset could not be found, caller gave an
+ * invalid pair
+ */
+ if (!found)
+ return ICE_ERR_PARAM;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ice_find_free_recp_res_idx - find free result indexes for recipe
+ * @hw: pointer to hardware structure
+ * @profiles: bitmap of profiles that will be associated with the new recipe
+ * @free_idx: pointer to variable to receive the free index bitmap
+ *
+ * The algorithm used here is:
+ * 1. When creating a new recipe, create a set P which contains all
+ * Profiles that will be associated with our new recipe
+ *
+ * 2. For each Profile p in set P:
+ * a. Add all recipes associated with Profile p into set R
+ * b. Optional : PossibleIndexes &= profile[p].possibleIndexes
+ * [initially PossibleIndexes should be 0xFFFFFFFFFFFFFFFF]
+ * i. Or just assume they all have the same possible indexes:
+ * 44, 45, 46, 47
+ * i.e., PossibleIndexes = 0x0000F00000000000
+ *
+ * 3. For each Recipe r in set R:
+ * a. UsedIndexes |= (bitwise or ) recipe[r].res_indexes
+ * b. FreeIndexes = UsedIndexes ^ PossibleIndexes
+ *
+ * FreeIndexes will contain the bits indicating the indexes free for use,
+ * then the code needs to update the recipe[r].used_result_idx_bits to
+ * indicate which indexes were selected for use by this recipe.
+ */
+static u16
+ice_find_free_recp_res_idx(struct ice_hw *hw, const unsigned long *profiles,
+ unsigned long *free_idx)
+{
+ DECLARE_BITMAP(possible_idx, ICE_MAX_FV_WORDS);
+ DECLARE_BITMAP(recipes, ICE_MAX_NUM_RECIPES);
+ DECLARE_BITMAP(used_idx, ICE_MAX_FV_WORDS);
+ u16 bit;
+
+ bitmap_zero(possible_idx, ICE_MAX_FV_WORDS);
+ bitmap_zero(recipes, ICE_MAX_NUM_RECIPES);
+ bitmap_zero(used_idx, ICE_MAX_FV_WORDS);
+ bitmap_zero(free_idx, ICE_MAX_FV_WORDS);
+
+ bitmap_set(possible_idx, 0, ICE_MAX_FV_WORDS);
+
+ /* For each profile we are going to associate the recipe with, add the
+ * recipes that are associated with that profile. This will give us
+ * the set of recipes that our recipe may collide with. Also, determine
+ * what possible result indexes are usable given this set of profiles.
+ */
+ for_each_set_bit(bit, profiles, ICE_MAX_NUM_PROFILES) {
+ bitmap_or(recipes, recipes, profile_to_recipe[bit],
+ ICE_MAX_NUM_RECIPES);
+ bitmap_and(possible_idx, possible_idx,
+ hw->switch_info->prof_res_bm[bit],
+ ICE_MAX_FV_WORDS);
+ }
+
+ /* For each recipe that our new recipe may collide with, determine
+ * which indexes have been used.
+ */
+ for_each_set_bit(bit, recipes, ICE_MAX_NUM_RECIPES)
+ bitmap_or(used_idx, used_idx,
+ hw->switch_info->recp_list[bit].res_idxs,
+ ICE_MAX_FV_WORDS);
+
+ bitmap_xor(free_idx, used_idx, possible_idx, ICE_MAX_FV_WORDS);
+
+ /* return number of free indexes */
+ return (u16)bitmap_weight(free_idx, ICE_MAX_FV_WORDS);
+}
+
+/**
+ * ice_add_sw_recipe - function to call AQ calls to create switch recipe
+ * @hw: pointer to hardware structure
+ * @rm: recipe management list entry
+ * @profiles: bitmap of profiles that will be associated.
+ */
+static enum ice_status
+ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm,
+ unsigned long *profiles)
+{
+ DECLARE_BITMAP(result_idx_bm, ICE_MAX_FV_WORDS);
+ struct ice_aqc_recipe_data_elem *tmp;
+ struct ice_aqc_recipe_data_elem *buf;
+ struct ice_recp_grp_entry *entry;
+ enum ice_status status;
+ u16 free_res_idx;
+ u16 recipe_count;
+ u8 chain_idx;
+ u8 recps = 0;
+
+ /* When more than one recipe are required, another recipe is needed to
+ * chain them together. Matching a tunnel metadata ID takes up one of
+ * the match fields in the chaining recipe reducing the number of
+ * chained recipes by one.
+ */
+ /* check number of free result indices */
+ bitmap_zero(result_idx_bm, ICE_MAX_FV_WORDS);
+ free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm);
+
+ ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n",
+ free_res_idx, rm->n_grp_count);
+
+ if (rm->n_grp_count > 1) {
+ if (rm->n_grp_count > free_res_idx)
+ return ICE_ERR_MAX_LIMIT;
+
+ rm->n_grp_count++;
+ }
+
+ if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE)
+ return ICE_ERR_MAX_LIMIT;
+
+ tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL);
+ if (!tmp)
+ return ICE_ERR_NO_MEMORY;
+
+ buf = devm_kcalloc(ice_hw_to_dev(hw), rm->n_grp_count, sizeof(*buf),
+ GFP_KERNEL);
+ if (!buf) {
+ status = ICE_ERR_NO_MEMORY;
+ goto err_mem;
+ }
+
+ bitmap_zero(rm->r_bitmap, ICE_MAX_NUM_RECIPES);
+ recipe_count = ICE_MAX_NUM_RECIPES;
+ status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC,
+ NULL);
+ if (status || recipe_count == 0)
+ goto err_unroll;
+
+ /* Allocate the recipe resources, and configure them according to the
+ * match fields from protocol headers and extracted field vectors.
+ */
+ chain_idx = find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS);
+ list_for_each_entry(entry, &rm->rg_list, l_entry) {
+ u8 i;
+
+ status = ice_alloc_recipe(hw, &entry->rid);
+ if (status)
+ goto err_unroll;
+
+ /* Clear the result index of the located recipe, as this will be
+ * updated, if needed, later in the recipe creation process.
+ */
+ tmp[0].content.result_indx = 0;
+
+ buf[recps] = tmp[0];
+ buf[recps].recipe_indx = (u8)entry->rid;
+ /* if the recipe is a non-root recipe RID should be programmed
+ * as 0 for the rules to be applied correctly.
+ */
+ buf[recps].content.rid = 0;
+ memset(&buf[recps].content.lkup_indx, 0,
+ sizeof(buf[recps].content.lkup_indx));
+
+ /* All recipes use look-up index 0 to match switch ID. */
+ buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
+ buf[recps].content.mask[0] =
+ cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK);
+ /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask
+ * to be 0
+ */
+ for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
+ buf[recps].content.lkup_indx[i] = 0x80;
+ buf[recps].content.mask[i] = 0;
+ }
+
+ for (i = 0; i < entry->r_group.n_val_pairs; i++) {
+ buf[recps].content.lkup_indx[i + 1] = entry->fv_idx[i];
+ buf[recps].content.mask[i + 1] =
+ cpu_to_le16(entry->fv_mask[i]);
+ }
+
+ if (rm->n_grp_count > 1) {
+ /* Checks to see if there really is a valid result index
+ * that can be used.
+ */
+ if (chain_idx >= ICE_MAX_FV_WORDS) {
+ ice_debug(hw, ICE_DBG_SW, "No chain index available\n");
+ status = ICE_ERR_MAX_LIMIT;
+ goto err_unroll;
+ }
+
+ entry->chain_idx = chain_idx;
+ buf[recps].content.result_indx =
+ ICE_AQ_RECIPE_RESULT_EN |
+ ((chain_idx << ICE_AQ_RECIPE_RESULT_DATA_S) &
+ ICE_AQ_RECIPE_RESULT_DATA_M);
+ clear_bit(chain_idx, result_idx_bm);
+ chain_idx = find_first_bit(result_idx_bm,
+ ICE_MAX_FV_WORDS);
+ }
+
+ /* fill recipe dependencies */
+ bitmap_zero((unsigned long *)buf[recps].recipe_bitmap,
+ ICE_MAX_NUM_RECIPES);
+ set_bit(buf[recps].recipe_indx,
+ (unsigned long *)buf[recps].recipe_bitmap);
+ buf[recps].content.act_ctrl_fwd_priority = rm->priority;
+ recps++;
+ }
+
+ if (rm->n_grp_count == 1) {
+ rm->root_rid = buf[0].recipe_indx;
+ set_bit(buf[0].recipe_indx, rm->r_bitmap);
+ buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT;
+ if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) {
+ memcpy(buf[0].recipe_bitmap, rm->r_bitmap,
+ sizeof(buf[0].recipe_bitmap));
+ } else {
+ status = ICE_ERR_BAD_PTR;
+ goto err_unroll;
+ }
+ /* Applicable only for ROOT_RECIPE, set the fwd_priority for
+ * the recipe which is getting created if specified
+ * by user. Usually any advanced switch filter, which results
+ * into new extraction sequence, ended up creating a new recipe
+ * of type ROOT and usually recipes are associated with profiles
+ * Switch rule referreing newly created recipe, needs to have
+ * either/or 'fwd' or 'join' priority, otherwise switch rule
+ * evaluation will not happen correctly. In other words, if
+ * switch rule to be evaluated on priority basis, then recipe
+ * needs to have priority, otherwise it will be evaluated last.
+ */
+ buf[0].content.act_ctrl_fwd_priority = rm->priority;
+ } else {
+ struct ice_recp_grp_entry *last_chain_entry;
+ u16 rid, i;
+
+ /* Allocate the last recipe that will chain the outcomes of the
+ * other recipes together
+ */
+ status = ice_alloc_recipe(hw, &rid);
+ if (status)
+ goto err_unroll;
+
+ buf[recps].recipe_indx = (u8)rid;
+ buf[recps].content.rid = (u8)rid;
+ buf[recps].content.rid |= ICE_AQ_RECIPE_ID_IS_ROOT;
+ /* the new entry created should also be part of rg_list to
+ * make sure we have complete recipe
+ */
+ last_chain_entry = devm_kzalloc(ice_hw_to_dev(hw),
+ sizeof(*last_chain_entry),
+ GFP_KERNEL);
+ if (!last_chain_entry) {
+ status = ICE_ERR_NO_MEMORY;
+ goto err_unroll;
+ }
+ last_chain_entry->rid = rid;
+ memset(&buf[recps].content.lkup_indx, 0,
+ sizeof(buf[recps].content.lkup_indx));
+ /* All recipes use look-up index 0 to match switch ID. */
+ buf[recps].content.lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX;
+ buf[recps].content.mask[0] =
+ cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK);
+ for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) {
+ buf[recps].content.lkup_indx[i] =
+ ICE_AQ_RECIPE_LKUP_IGNORE;
+ buf[recps].content.mask[i] = 0;
+ }
+
+ i = 1;
+ /* update r_bitmap with the recp that is used for chaining */
+ set_bit(rid, rm->r_bitmap);
+ /* this is the recipe that chains all the other recipes so it
+ * should not have a chaining ID to indicate the same
+ */
+ last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND;
+ list_for_each_entry(entry, &rm->rg_list, l_entry) {
+ last_chain_entry->fv_idx[i] = entry->chain_idx;
+ buf[recps].content.lkup_indx[i] = entry->chain_idx;
+ buf[recps].content.mask[i++] = cpu_to_le16(0xFFFF);
+ set_bit(entry->rid, rm->r_bitmap);
+ }
+ list_add(&last_chain_entry->l_entry, &rm->rg_list);
+ if (sizeof(buf[recps].recipe_bitmap) >=
+ sizeof(rm->r_bitmap)) {
+ memcpy(buf[recps].recipe_bitmap, rm->r_bitmap,
+ sizeof(buf[recps].recipe_bitmap));
+ } else {
+ status = ICE_ERR_BAD_PTR;
+ goto err_unroll;
+ }
+ buf[recps].content.act_ctrl_fwd_priority = rm->priority;
+
+ recps++;
+ rm->root_rid = (u8)rid;
+ }
+ status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
+ if (status)
+ goto err_unroll;
+
+ status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL);
+ ice_release_change_lock(hw);
+ if (status)
+ goto err_unroll;
+
+ /* Every recipe that just got created add it to the recipe
+ * book keeping list
+ */
+ list_for_each_entry(entry, &rm->rg_list, l_entry) {
+ struct ice_switch_info *sw = hw->switch_info;
+ bool is_root, idx_found = false;
+ struct ice_sw_recipe *recp;
+ u16 idx, buf_idx = 0;
+
+ /* find buffer index for copying some data */
+ for (idx = 0; idx < rm->n_grp_count; idx++)
+ if (buf[idx].recipe_indx == entry->rid) {
+ buf_idx = idx;
+ idx_found = true;
+ }
+
+ if (!idx_found) {
+ status = ICE_ERR_OUT_OF_RANGE;
+ goto err_unroll;
+ }
+
+ recp = &sw->recp_list[entry->rid];
+ is_root = (rm->root_rid == entry->rid);
+ recp->is_root = is_root;
+
+ recp->root_rid = entry->rid;
+ recp->big_recp = (is_root && rm->n_grp_count > 1);
+
+ memcpy(&recp->ext_words, entry->r_group.pairs,
+ entry->r_group.n_val_pairs * sizeof(struct ice_fv_word));
+
+ memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap,
+ sizeof(recp->r_bitmap));
+
+ /* Copy non-result fv index values and masks to recipe. This
+ * call will also update the result recipe bitmask.
+ */
+ ice_collect_result_idx(&buf[buf_idx], recp);
+
+ /* for non-root recipes, also copy to the root, this allows
+ * easier matching of a complete chained recipe
+ */
+ if (!is_root)
+ ice_collect_result_idx(&buf[buf_idx],
+ &sw->recp_list[rm->root_rid]);
+
+ recp->n_ext_words = entry->r_group.n_val_pairs;
+ recp->chain_idx = entry->chain_idx;
+ recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority;
+ recp->n_grp_count = rm->n_grp_count;
+ recp->tun_type = rm->tun_type;
+ recp->recp_created = true;
+ }
+ rm->root_buf = buf;
+ kfree(tmp);
+ return status;
+
+err_unroll:
+err_mem:
+ kfree(tmp);
+ devm_kfree(ice_hw_to_dev(hw), buf);
+ return status;
+}
+
+/**
+ * ice_create_recipe_group - creates recipe group
+ * @hw: pointer to hardware structure
+ * @rm: recipe management list entry
+ * @lkup_exts: lookup elements
+ */
+static enum ice_status
+ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm,
+ struct ice_prot_lkup_ext *lkup_exts)
+{
+ enum ice_status status;
+ u8 recp_count = 0;
+
+ rm->n_grp_count = 0;
+
+ /* Create recipes for words that are marked not done by packing them
+ * as best fit.
+ */
+ status = ice_create_first_fit_recp_def(hw, lkup_exts,
+ &rm->rg_list, &recp_count);
+ if (!status) {
+ rm->n_grp_count += recp_count;
+ rm->n_ext_words = lkup_exts->n_val_words;
+ memcpy(&rm->ext_words, lkup_exts->fv_words,
+ sizeof(rm->ext_words));
+ memcpy(rm->word_masks, lkup_exts->field_mask,
+ sizeof(rm->word_masks));
+ }
+
+ return status;
+}
+
+/**
+ * ice_get_fv - get field vectors/extraction sequences for spec. lookup types
+ * @hw: pointer to hardware structure
+ * @lkups: lookup elements or match criteria for the advanced recipe, one
+ * structure per protocol header
+ * @lkups_cnt: number of protocols
+ * @bm: bitmap of field vectors to consider
+ * @fv_list: pointer to a list that holds the returned field vectors
+ */
+static enum ice_status
+ice_get_fv(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
+ unsigned long *bm, struct list_head *fv_list)
+{
+ enum ice_status status;
+ u8 *prot_ids;
+ u16 i;
+
+ prot_ids = kcalloc(lkups_cnt, sizeof(*prot_ids), GFP_KERNEL);
+ if (!prot_ids)
+ return ICE_ERR_NO_MEMORY;
+
+ for (i = 0; i < lkups_cnt; i++)
+ if (!ice_prot_type_to_id(lkups[i].type, &prot_ids[i])) {
+ status = ICE_ERR_CFG;
+ goto free_mem;
+ }
+
+ /* Find field vectors that include all specified protocol types */
+ status = ice_get_sw_fv_list(hw, prot_ids, lkups_cnt, bm, fv_list);
+
+free_mem:
+ kfree(prot_ids);
+ return status;
+}
+
+/**
+ * ice_tun_type_match_word - determine if tun type needs a match mask
+ * @tun_type: tunnel type
+ * @mask: mask to be used for the tunnel
+ */
+static bool ice_tun_type_match_word(enum ice_sw_tunnel_type tun_type, u16 *mask)
+{
+ switch (tun_type) {
+ case ICE_SW_TUN_GENEVE:
+ case ICE_SW_TUN_VXLAN:
+ case ICE_SW_TUN_NVGRE:
+ *mask = ICE_TUN_FLAG_MASK;
+ return true;
+
+ default:
+ *mask = 0;
+ return false;
+ }
+}
+
+/**
+ * ice_add_special_words - Add words that are not protocols, such as metadata
+ * @rinfo: other information regarding the rule e.g. priority and action info
+ * @lkup_exts: lookup word structure
+ */
+static enum ice_status
+ice_add_special_words(struct ice_adv_rule_info *rinfo,
+ struct ice_prot_lkup_ext *lkup_exts)
+{
+ u16 mask;
+
+ /* If this is a tunneled packet, then add recipe index to match the
+ * tunnel bit in the packet metadata flags.
+ */
+ if (ice_tun_type_match_word(rinfo->tun_type, &mask)) {
+ if (lkup_exts->n_val_words < ICE_MAX_CHAIN_WORDS) {
+ u8 word = lkup_exts->n_val_words++;
+
+ lkup_exts->fv_words[word].prot_id = ICE_META_DATA_ID_HW;
+ lkup_exts->fv_words[word].off = ICE_TUN_FLAG_MDID_OFF;
+ lkup_exts->field_mask[word] = mask;
+ } else {
+ return ICE_ERR_MAX_LIMIT;
+ }
+ }
+
+ return 0;
+}
+
+/* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule
+ * @hw: pointer to hardware structure
+ * @rinfo: other information regarding the rule e.g. priority and action info
+ * @bm: pointer to memory for returning the bitmap of field vectors
+ */
+static void
+ice_get_compat_fv_bitmap(struct ice_hw *hw, struct ice_adv_rule_info *rinfo,
+ unsigned long *bm)
+{
+ enum ice_prof_type prof_type;
+
+ bitmap_zero(bm, ICE_MAX_NUM_PROFILES);
+
+ switch (rinfo->tun_type) {
+ case ICE_NON_TUN:
+ prof_type = ICE_PROF_NON_TUN;
+ break;
+ case ICE_ALL_TUNNELS:
+ prof_type = ICE_PROF_TUN_ALL;
+ break;
+ case ICE_SW_TUN_GENEVE:
+ case ICE_SW_TUN_VXLAN:
+ prof_type = ICE_PROF_TUN_UDP;
+ break;
+ case ICE_SW_TUN_NVGRE:
+ prof_type = ICE_PROF_TUN_GRE;
+ break;
+ default:
+ prof_type = ICE_PROF_ALL;
+ break;
+ }
+
+ ice_get_sw_fv_bitmap(hw, prof_type, bm);
+}
+
+/**
+ * ice_add_adv_recipe - Add an advanced recipe that is not part of the default
+ * @hw: pointer to hardware structure
+ * @lkups: lookup elements or match criteria for the advanced recipe, one
+ * structure per protocol header
+ * @lkups_cnt: number of protocols
+ * @rinfo: other information regarding the rule e.g. priority and action info
+ * @rid: return the recipe ID of the recipe created
+ */
+static enum ice_status
+ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
+ u16 lkups_cnt, struct ice_adv_rule_info *rinfo, u16 *rid)
+{
+ DECLARE_BITMAP(fv_bitmap, ICE_MAX_NUM_PROFILES);
+ DECLARE_BITMAP(profiles, ICE_MAX_NUM_PROFILES);
+ struct ice_prot_lkup_ext *lkup_exts;
+ struct ice_recp_grp_entry *r_entry;
+ struct ice_sw_fv_list_entry *fvit;
+ struct ice_recp_grp_entry *r_tmp;
+ struct ice_sw_fv_list_entry *tmp;
+ enum ice_status status = 0;
+ struct ice_sw_recipe *rm;
+ u8 i;
+
+ if (!lkups_cnt)
+ return ICE_ERR_PARAM;
+
+ lkup_exts = kzalloc(sizeof(*lkup_exts), GFP_KERNEL);
+ if (!lkup_exts)
+ return ICE_ERR_NO_MEMORY;
+
+ /* Determine the number of words to be matched and if it exceeds a
+ * recipe's restrictions
+ */
+ for (i = 0; i < lkups_cnt; i++) {
+ u16 count;
+
+ if (lkups[i].type >= ICE_PROTOCOL_LAST) {
+ status = ICE_ERR_CFG;
+ goto err_free_lkup_exts;
+ }
+
+ count = ice_fill_valid_words(&lkups[i], lkup_exts);
+ if (!count) {
+ status = ICE_ERR_CFG;
+ goto err_free_lkup_exts;
+ }
+ }
+
+ rm = kzalloc(sizeof(*rm), GFP_KERNEL);
+ if (!rm) {
+ status = ICE_ERR_NO_MEMORY;
+ goto err_free_lkup_exts;
+ }
+
+ /* Get field vectors that contain fields extracted from all the protocol
+ * headers being programmed.
+ */
+ INIT_LIST_HEAD(&rm->fv_list);
+ INIT_LIST_HEAD(&rm->rg_list);
+
+ /* Get bitmap of field vectors (profiles) that are compatible with the
+ * rule request; only these will be searched in the subsequent call to
+ * ice_get_fv.
+ */
+ ice_get_compat_fv_bitmap(hw, rinfo, fv_bitmap);
+
+ status = ice_get_fv(hw, lkups, lkups_cnt, fv_bitmap, &rm->fv_list);
+ if (status)
+ goto err_unroll;
+
+ /* Create any special protocol/offset pairs, such as looking at tunnel
+ * bits by extracting metadata
+ */
+ status = ice_add_special_words(rinfo, lkup_exts);
+ if (status)
+ goto err_free_lkup_exts;
+
+ /* Group match words into recipes using preferred recipe grouping
+ * criteria.
+ */
+ status = ice_create_recipe_group(hw, rm, lkup_exts);
+ if (status)
+ goto err_unroll;
+
+ /* set the recipe priority if specified */
+ rm->priority = (u8)rinfo->priority;
+
+ /* Find offsets from the field vector. Pick the first one for all the
+ * recipes.
+ */
+ status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list);
+ if (status)
+ goto err_unroll;
+
+ /* get bitmap of all profiles the recipe will be associated with */
+ bitmap_zero(profiles, ICE_MAX_NUM_PROFILES);
+ list_for_each_entry(fvit, &rm->fv_list, list_entry) {
+ ice_debug(hw, ICE_DBG_SW, "profile: %d\n", fvit->profile_id);
+ set_bit((u16)fvit->profile_id, profiles);
+ }
+
+ /* Look for a recipe which matches our requested fv / mask list */
+ *rid = ice_find_recp(hw, lkup_exts);
+ if (*rid < ICE_MAX_NUM_RECIPES)
+ /* Success if found a recipe that match the existing criteria */
+ goto err_unroll;
+
+ /* Recipe we need does not exist, add a recipe */
+ status = ice_add_sw_recipe(hw, rm, profiles);
+ if (status)
+ goto err_unroll;
+
+ /* Associate all the recipes created with all the profiles in the
+ * common field vector.
+ */
+ list_for_each_entry(fvit, &rm->fv_list, list_entry) {
+ DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
+ u16 j;
+
+ status = ice_aq_get_recipe_to_profile(hw, fvit->profile_id,
+ (u8 *)r_bitmap, NULL);
+ if (status)
+ goto err_unroll;
+
+ bitmap_or(r_bitmap, r_bitmap, rm->r_bitmap,
+ ICE_MAX_NUM_RECIPES);
+ status = ice_acquire_change_lock(hw, ICE_RES_WRITE);
+ if (status)
+ goto err_unroll;
+
+ status = ice_aq_map_recipe_to_profile(hw, fvit->profile_id,
+ (u8 *)r_bitmap,
+ NULL);
+ ice_release_change_lock(hw);
+
+ if (status)
+ goto err_unroll;
+
+ /* Update profile to recipe bitmap array */
+ bitmap_copy(profile_to_recipe[fvit->profile_id], r_bitmap,
+ ICE_MAX_NUM_RECIPES);
+
+ /* Update recipe to profile bitmap array */
+ for_each_set_bit(j, rm->r_bitmap, ICE_MAX_NUM_RECIPES)
+ set_bit((u16)fvit->profile_id, recipe_to_profile[j]);
+ }
+
+ *rid = rm->root_rid;
+ memcpy(&hw->switch_info->recp_list[*rid].lkup_exts, lkup_exts,
+ sizeof(*lkup_exts));
+err_unroll:
+ list_for_each_entry_safe(r_entry, r_tmp, &rm->rg_list, l_entry) {
+ list_del(&r_entry->l_entry);
+ devm_kfree(ice_hw_to_dev(hw), r_entry);
+ }
+
+ list_for_each_entry_safe(fvit, tmp, &rm->fv_list, list_entry) {
+ list_del(&fvit->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), fvit);
+ }
+
+ if (rm->root_buf)
+ devm_kfree(ice_hw_to_dev(hw), rm->root_buf);
+
+ kfree(rm);
+
+err_free_lkup_exts:
+ kfree(lkup_exts);
+
+ return status;
+}
+
+/**
+ * ice_find_dummy_packet - find dummy packet
+ *
+ * @lkups: lookup elements or match criteria for the advanced recipe, one
+ * structure per protocol header
+ * @lkups_cnt: number of protocols
+ * @tun_type: tunnel type
+ * @pkt: dummy packet to fill according to filter match criteria
+ * @pkt_len: packet length of dummy packet
+ * @offsets: pointer to receive the pointer to the offsets for the packet
+ */
+static void
+ice_find_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
+ enum ice_sw_tunnel_type tun_type,
+ const u8 **pkt, u16 *pkt_len,
+ const struct ice_dummy_pkt_offsets **offsets)
+{
+ bool tcp = false, udp = false, ipv6 = false, vlan = false;
+ u16 i;
+
+ for (i = 0; i < lkups_cnt; i++) {
+ if (lkups[i].type == ICE_UDP_ILOS)
+ udp = true;
+ else if (lkups[i].type == ICE_TCP_IL)
+ tcp = true;
+ else if (lkups[i].type == ICE_IPV6_OFOS)
+ ipv6 = true;
+ else if (lkups[i].type == ICE_VLAN_OFOS)
+ vlan = true;
+ else if (lkups[i].type == ICE_ETYPE_OL &&
+ lkups[i].h_u.ethertype.ethtype_id ==
+ cpu_to_be16(ICE_IPV6_ETHER_ID) &&
+ lkups[i].m_u.ethertype.ethtype_id ==
+ cpu_to_be16(0xFFFF))
+ ipv6 = true;
+ }
+
+ if (tun_type == ICE_SW_TUN_NVGRE) {
+ if (tcp) {
+ *pkt = dummy_gre_tcp_packet;
+ *pkt_len = sizeof(dummy_gre_tcp_packet);
+ *offsets = dummy_gre_tcp_packet_offsets;
+ return;
+ }
+
+ *pkt = dummy_gre_udp_packet;
+ *pkt_len = sizeof(dummy_gre_udp_packet);
+ *offsets = dummy_gre_udp_packet_offsets;
+ return;
+ }
+
+ if (tun_type == ICE_SW_TUN_VXLAN ||
+ tun_type == ICE_SW_TUN_GENEVE) {
+ if (tcp) {
+ *pkt = dummy_udp_tun_tcp_packet;
+ *pkt_len = sizeof(dummy_udp_tun_tcp_packet);
+ *offsets = dummy_udp_tun_tcp_packet_offsets;
+ return;
+ }
+
+ *pkt = dummy_udp_tun_udp_packet;
+ *pkt_len = sizeof(dummy_udp_tun_udp_packet);
+ *offsets = dummy_udp_tun_udp_packet_offsets;
+ return;
+ }
+
+ if (udp && !ipv6) {
+ if (vlan) {
+ *pkt = dummy_vlan_udp_packet;
+ *pkt_len = sizeof(dummy_vlan_udp_packet);
+ *offsets = dummy_vlan_udp_packet_offsets;
+ return;
+ }
+ *pkt = dummy_udp_packet;
+ *pkt_len = sizeof(dummy_udp_packet);
+ *offsets = dummy_udp_packet_offsets;
+ return;
+ } else if (udp && ipv6) {
+ if (vlan) {
+ *pkt = dummy_vlan_udp_ipv6_packet;
+ *pkt_len = sizeof(dummy_vlan_udp_ipv6_packet);
+ *offsets = dummy_vlan_udp_ipv6_packet_offsets;
+ return;
+ }
+ *pkt = dummy_udp_ipv6_packet;
+ *pkt_len = sizeof(dummy_udp_ipv6_packet);
+ *offsets = dummy_udp_ipv6_packet_offsets;
+ return;
+ } else if ((tcp && ipv6) || ipv6) {
+ if (vlan) {
+ *pkt = dummy_vlan_tcp_ipv6_packet;
+ *pkt_len = sizeof(dummy_vlan_tcp_ipv6_packet);
+ *offsets = dummy_vlan_tcp_ipv6_packet_offsets;
+ return;
+ }
+ *pkt = dummy_tcp_ipv6_packet;
+ *pkt_len = sizeof(dummy_tcp_ipv6_packet);
+ *offsets = dummy_tcp_ipv6_packet_offsets;
+ return;
+ }
+
+ if (vlan) {
+ *pkt = dummy_vlan_tcp_packet;
+ *pkt_len = sizeof(dummy_vlan_tcp_packet);
+ *offsets = dummy_vlan_tcp_packet_offsets;
+ } else {
+ *pkt = dummy_tcp_packet;
+ *pkt_len = sizeof(dummy_tcp_packet);
+ *offsets = dummy_tcp_packet_offsets;
+ }
+}
+
+/**
+ * ice_fill_adv_dummy_packet - fill a dummy packet with given match criteria
+ *
+ * @lkups: lookup elements or match criteria for the advanced recipe, one
+ * structure per protocol header
+ * @lkups_cnt: number of protocols
+ * @s_rule: stores rule information from the match criteria
+ * @dummy_pkt: dummy packet to fill according to filter match criteria
+ * @pkt_len: packet length of dummy packet
+ * @offsets: offset info for the dummy packet
+ */
+static enum ice_status
+ice_fill_adv_dummy_packet(struct ice_adv_lkup_elem *lkups, u16 lkups_cnt,
+ struct ice_aqc_sw_rules_elem *s_rule,
+ const u8 *dummy_pkt, u16 pkt_len,
+ const struct ice_dummy_pkt_offsets *offsets)
+{
+ u8 *pkt;
+ u16 i;
+
+ /* Start with a packet with a pre-defined/dummy content. Then, fill
+ * in the header values to be looked up or matched.
+ */
+ pkt = s_rule->pdata.lkup_tx_rx.hdr;
+
+ memcpy(pkt, dummy_pkt, pkt_len);
+
+ for (i = 0; i < lkups_cnt; i++) {
+ enum ice_protocol_type type;
+ u16 offset = 0, len = 0, j;
+ bool found = false;
+
+ /* find the start of this layer; it should be found since this
+ * was already checked when search for the dummy packet
+ */
+ type = lkups[i].type;
+ for (j = 0; offsets[j].type != ICE_PROTOCOL_LAST; j++) {
+ if (type == offsets[j].type) {
+ offset = offsets[j].offset;
+ found = true;
+ break;
+ }
+ }
+ /* this should never happen in a correct calling sequence */
+ if (!found)
+ return ICE_ERR_PARAM;
+
+ switch (lkups[i].type) {
+ case ICE_MAC_OFOS:
+ case ICE_MAC_IL:
+ len = sizeof(struct ice_ether_hdr);
+ break;
+ case ICE_ETYPE_OL:
+ len = sizeof(struct ice_ethtype_hdr);
+ break;
+ case ICE_VLAN_OFOS:
+ len = sizeof(struct ice_vlan_hdr);
+ break;
+ case ICE_IPV4_OFOS:
+ case ICE_IPV4_IL:
+ len = sizeof(struct ice_ipv4_hdr);
+ break;
+ case ICE_IPV6_OFOS:
+ case ICE_IPV6_IL:
+ len = sizeof(struct ice_ipv6_hdr);
+ break;
+ case ICE_TCP_IL:
+ case ICE_UDP_OF:
+ case ICE_UDP_ILOS:
+ len = sizeof(struct ice_l4_hdr);
+ break;
+ case ICE_SCTP_IL:
+ len = sizeof(struct ice_sctp_hdr);
+ break;
+ case ICE_NVGRE:
+ len = sizeof(struct ice_nvgre_hdr);
+ break;
+ case ICE_VXLAN:
+ case ICE_GENEVE:
+ len = sizeof(struct ice_udp_tnl_hdr);
+ break;
+ default:
+ return ICE_ERR_PARAM;
+ }
+
+ /* the length should be a word multiple */
+ if (len % ICE_BYTES_PER_WORD)
+ return ICE_ERR_CFG;
+
+ /* We have the offset to the header start, the length, the
+ * caller's header values and mask. Use this information to
+ * copy the data into the dummy packet appropriately based on
+ * the mask. Note that we need to only write the bits as
+ * indicated by the mask to make sure we don't improperly write
+ * over any significant packet data.
+ */
+ for (j = 0; j < len / sizeof(u16); j++)
+ if (((u16 *)&lkups[i].m_u)[j])
+ ((u16 *)(pkt + offset))[j] =
+ (((u16 *)(pkt + offset))[j] &
+ ~((u16 *)&lkups[i].m_u)[j]) |
+ (((u16 *)&lkups[i].h_u)[j] &
+ ((u16 *)&lkups[i].m_u)[j]);
+ }
+
+ s_rule->pdata.lkup_tx_rx.hdr_len = cpu_to_le16(pkt_len);
+
+ return 0;
+}
+
+/**
+ * ice_fill_adv_packet_tun - fill dummy packet with udp tunnel port
+ * @hw: pointer to the hardware structure
+ * @tun_type: tunnel type
+ * @pkt: dummy packet to fill in
+ * @offsets: offset info for the dummy packet
+ */
+static enum ice_status
+ice_fill_adv_packet_tun(struct ice_hw *hw, enum ice_sw_tunnel_type tun_type,
+ u8 *pkt, const struct ice_dummy_pkt_offsets *offsets)
+{
+ u16 open_port, i;
+
+ switch (tun_type) {
+ case ICE_SW_TUN_VXLAN:
+ case ICE_SW_TUN_GENEVE:
+ if (!ice_get_open_tunnel_port(hw, &open_port))
+ return ICE_ERR_CFG;
+ break;
+
+ default:
+ /* Nothing needs to be done for this tunnel type */
+ return 0;
+ }
+
+ /* Find the outer UDP protocol header and insert the port number */
+ for (i = 0; offsets[i].type != ICE_PROTOCOL_LAST; i++) {
+ if (offsets[i].type == ICE_UDP_OF) {
+ struct ice_l4_hdr *hdr;
+ u16 offset;
+
+ offset = offsets[i].offset;
+ hdr = (struct ice_l4_hdr *)&pkt[offset];
+ hdr->dst_port = cpu_to_be16(open_port);
+
+ return 0;
+ }
+ }
+
+ return ICE_ERR_CFG;
+}
+
+/**
+ * ice_find_adv_rule_entry - Search a rule entry
+ * @hw: pointer to the hardware structure
+ * @lkups: lookup elements or match criteria for the advanced recipe, one
+ * structure per protocol header
+ * @lkups_cnt: number of protocols
+ * @recp_id: recipe ID for which we are finding the rule
+ * @rinfo: other information regarding the rule e.g. priority and action info
+ *
+ * Helper function to search for a given advance rule entry
+ * Returns pointer to entry storing the rule if found
+ */
+static struct ice_adv_fltr_mgmt_list_entry *
+ice_find_adv_rule_entry(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
+ u16 lkups_cnt, u16 recp_id,
+ struct ice_adv_rule_info *rinfo)
+{
+ struct ice_adv_fltr_mgmt_list_entry *list_itr;
+ struct ice_switch_info *sw = hw->switch_info;
+ int i;
+
+ list_for_each_entry(list_itr, &sw->recp_list[recp_id].filt_rules,
+ list_entry) {
+ bool lkups_matched = true;
+
+ if (lkups_cnt != list_itr->lkups_cnt)
+ continue;
+ for (i = 0; i < list_itr->lkups_cnt; i++)
+ if (memcmp(&list_itr->lkups[i], &lkups[i],
+ sizeof(*lkups))) {
+ lkups_matched = false;
+ break;
+ }
+ if (rinfo->sw_act.flag == list_itr->rule_info.sw_act.flag &&
+ rinfo->tun_type == list_itr->rule_info.tun_type &&
+ lkups_matched)
+ return list_itr;
+ }
+ return NULL;
+}
+
+/**
+ * ice_adv_add_update_vsi_list
+ * @hw: pointer to the hardware structure
+ * @m_entry: pointer to current adv filter management list entry
+ * @cur_fltr: filter information from the book keeping entry
+ * @new_fltr: filter information with the new VSI to be added
+ *
+ * Call AQ command to add or update previously created VSI list with new VSI.
+ *
+ * Helper function to do book keeping associated with adding filter information
+ * The algorithm to do the booking keeping is described below :
+ * When a VSI needs to subscribe to a given advanced filter
+ * if only one VSI has been added till now
+ * Allocate a new VSI list and add two VSIs
+ * to this list using switch rule command
+ * Update the previously created switch rule with the
+ * newly created VSI list ID
+ * if a VSI list was previously created
+ * Add the new VSI to the previously created VSI list set
+ * using the update switch rule command
+ */
+static enum ice_status
+ice_adv_add_update_vsi_list(struct ice_hw *hw,
+ struct ice_adv_fltr_mgmt_list_entry *m_entry,
+ struct ice_adv_rule_info *cur_fltr,
+ struct ice_adv_rule_info *new_fltr)
+{
+ enum ice_status status;
+ u16 vsi_list_id = 0;
+
+ if (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
+ cur_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
+ cur_fltr->sw_act.fltr_act == ICE_DROP_PACKET)
+ return ICE_ERR_NOT_IMPL;
+
+ if ((new_fltr->sw_act.fltr_act == ICE_FWD_TO_Q ||
+ new_fltr->sw_act.fltr_act == ICE_FWD_TO_QGRP) &&
+ (cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI ||
+ cur_fltr->sw_act.fltr_act == ICE_FWD_TO_VSI_LIST))
+ return ICE_ERR_NOT_IMPL;
+
+ if (m_entry->vsi_count < 2 && !m_entry->vsi_list_info) {
+ /* Only one entry existed in the mapping and it was not already
+ * a part of a VSI list. So, create a VSI list with the old and
+ * new VSIs.
+ */
+ struct ice_fltr_info tmp_fltr;
+ u16 vsi_handle_arr[2];
+
+ /* A rule already exists with the new VSI being added */
+ if (cur_fltr->sw_act.fwd_id.hw_vsi_id ==
+ new_fltr->sw_act.fwd_id.hw_vsi_id)
+ return ICE_ERR_ALREADY_EXISTS;
+
+ vsi_handle_arr[0] = cur_fltr->sw_act.vsi_handle;
+ vsi_handle_arr[1] = new_fltr->sw_act.vsi_handle;
+ status = ice_create_vsi_list_rule(hw, &vsi_handle_arr[0], 2,
+ &vsi_list_id,
+ ICE_SW_LKUP_LAST);
+ if (status)
+ return status;
+
+ memset(&tmp_fltr, 0, sizeof(tmp_fltr));
+ tmp_fltr.flag = m_entry->rule_info.sw_act.flag;
+ tmp_fltr.fltr_rule_id = cur_fltr->fltr_rule_id;
+ tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
+ tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
+ tmp_fltr.lkup_type = ICE_SW_LKUP_LAST;
+
+ /* Update the previous switch rule of "forward to VSI" to
+ * "fwd to VSI list"
+ */
+ status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
+ if (status)
+ return status;
+
+ cur_fltr->sw_act.fwd_id.vsi_list_id = vsi_list_id;
+ cur_fltr->sw_act.fltr_act = ICE_FWD_TO_VSI_LIST;
+ m_entry->vsi_list_info =
+ ice_create_vsi_list_map(hw, &vsi_handle_arr[0], 2,
+ vsi_list_id);
+ } else {
+ u16 vsi_handle = new_fltr->sw_act.vsi_handle;
+
+ if (!m_entry->vsi_list_info)
+ return ICE_ERR_CFG;
+
+ /* A rule already exists with the new VSI being added */
+ if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
+ return 0;
+
+ /* Update the previously created VSI list set with
+ * the new VSI ID passed in
+ */
+ vsi_list_id = cur_fltr->sw_act.fwd_id.vsi_list_id;
+
+ status = ice_update_vsi_list_rule(hw, &vsi_handle, 1,
+ vsi_list_id, false,
+ ice_aqc_opc_update_sw_rules,
+ ICE_SW_LKUP_LAST);
+ /* update VSI list mapping info with new VSI ID */
+ if (!status)
+ set_bit(vsi_handle, m_entry->vsi_list_info->vsi_map);
+ }
+ if (!status)
+ m_entry->vsi_count++;
+ return status;
+}
+
+/**
+ * ice_add_adv_rule - helper function to create an advanced switch rule
+ * @hw: pointer to the hardware structure
+ * @lkups: information on the words that needs to be looked up. All words
+ * together makes one recipe
+ * @lkups_cnt: num of entries in the lkups array
+ * @rinfo: other information related to the rule that needs to be programmed
+ * @added_entry: this will return recipe_id, rule_id and vsi_handle. should be
+ * ignored is case of error.
+ *
+ * This function can program only 1 rule at a time. The lkups is used to
+ * describe the all the words that forms the "lookup" portion of the recipe.
+ * These words can span multiple protocols. Callers to this function need to
+ * pass in a list of protocol headers with lookup information along and mask
+ * that determines which words are valid from the given protocol header.
+ * rinfo describes other information related to this rule such as forwarding
+ * IDs, priority of this rule, etc.
+ */
+enum ice_status
+ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
+ u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
+ struct ice_rule_query_data *added_entry)
+{
+ struct ice_adv_fltr_mgmt_list_entry *m_entry, *adv_fltr = NULL;
+ u16 rid = 0, i, pkt_len, rule_buf_sz, vsi_handle;
+ const struct ice_dummy_pkt_offsets *pkt_offsets;
+ struct ice_aqc_sw_rules_elem *s_rule = NULL;
+ struct list_head *rule_head;
+ struct ice_switch_info *sw;
+ enum ice_status status;
+ const u8 *pkt = NULL;
+ u16 word_cnt;
+ u32 act = 0;
+ u8 q_rgn;
+
+ /* Initialize profile to result index bitmap */
+ if (!hw->switch_info->prof_res_bm_init) {
+ hw->switch_info->prof_res_bm_init = 1;
+ ice_init_prof_result_bm(hw);
+ }
+
+ if (!lkups_cnt)
+ return ICE_ERR_PARAM;
+
+ /* get # of words we need to match */
+ word_cnt = 0;
+ for (i = 0; i < lkups_cnt; i++) {
+ u16 j, *ptr;
+
+ ptr = (u16 *)&lkups[i].m_u;
+ for (j = 0; j < sizeof(lkups->m_u) / sizeof(u16); j++)
+ if (ptr[j] != 0)
+ word_cnt++;
+ }
+
+ if (!word_cnt || word_cnt > ICE_MAX_CHAIN_WORDS)
+ return ICE_ERR_PARAM;
+
+ /* make sure that we can locate a dummy packet */
+ ice_find_dummy_packet(lkups, lkups_cnt, rinfo->tun_type, &pkt, &pkt_len,
+ &pkt_offsets);
+ if (!pkt) {
+ status = ICE_ERR_PARAM;
+ goto err_ice_add_adv_rule;
+ }
+
+ if (!(rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI ||
+ rinfo->sw_act.fltr_act == ICE_FWD_TO_Q ||
+ rinfo->sw_act.fltr_act == ICE_FWD_TO_QGRP ||
+ rinfo->sw_act.fltr_act == ICE_DROP_PACKET))
+ return ICE_ERR_CFG;
+
+ vsi_handle = rinfo->sw_act.vsi_handle;
+ if (!ice_is_vsi_valid(hw, vsi_handle))
+ return ICE_ERR_PARAM;
+
+ if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
+ rinfo->sw_act.fwd_id.hw_vsi_id =
+ ice_get_hw_vsi_num(hw, vsi_handle);
+ if (rinfo->sw_act.flag & ICE_FLTR_TX)
+ rinfo->sw_act.src = ice_get_hw_vsi_num(hw, vsi_handle);
+
+ status = ice_add_adv_recipe(hw, lkups, lkups_cnt, rinfo, &rid);
+ if (status)
+ return status;
+ m_entry = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
+ if (m_entry) {
+ /* we have to add VSI to VSI_LIST and increment vsi_count.
+ * Also Update VSI list so that we can change forwarding rule
+ * if the rule already exists, we will check if it exists with
+ * same vsi_id, if not then add it to the VSI list if it already
+ * exists if not then create a VSI list and add the existing VSI
+ * ID and the new VSI ID to the list
+ * We will add that VSI to the list
+ */
+ status = ice_adv_add_update_vsi_list(hw, m_entry,
+ &m_entry->rule_info,
+ rinfo);
+ if (added_entry) {
+ added_entry->rid = rid;
+ added_entry->rule_id = m_entry->rule_info.fltr_rule_id;
+ added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
+ }
+ return status;
+ }
+ rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE + pkt_len;
+ s_rule = kzalloc(rule_buf_sz, GFP_KERNEL);
+ if (!s_rule)
+ return ICE_ERR_NO_MEMORY;
+ if (!rinfo->flags_info.act_valid) {
+ act |= ICE_SINGLE_ACT_LAN_ENABLE;
+ act |= ICE_SINGLE_ACT_LB_ENABLE;
+ } else {
+ act |= rinfo->flags_info.act & (ICE_SINGLE_ACT_LAN_ENABLE |
+ ICE_SINGLE_ACT_LB_ENABLE);
+ }
+
+ switch (rinfo->sw_act.fltr_act) {
+ case ICE_FWD_TO_VSI:
+ act |= (rinfo->sw_act.fwd_id.hw_vsi_id <<
+ ICE_SINGLE_ACT_VSI_ID_S) & ICE_SINGLE_ACT_VSI_ID_M;
+ act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_VALID_BIT;
+ break;
+ case ICE_FWD_TO_Q:
+ act |= ICE_SINGLE_ACT_TO_Q;
+ act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
+ ICE_SINGLE_ACT_Q_INDEX_M;
+ break;
+ case ICE_FWD_TO_QGRP:
+ q_rgn = rinfo->sw_act.qgrp_size > 0 ?
+ (u8)ilog2(rinfo->sw_act.qgrp_size) : 0;
+ act |= ICE_SINGLE_ACT_TO_Q;
+ act |= (rinfo->sw_act.fwd_id.q_id << ICE_SINGLE_ACT_Q_INDEX_S) &
+ ICE_SINGLE_ACT_Q_INDEX_M;
+ act |= (q_rgn << ICE_SINGLE_ACT_Q_REGION_S) &
+ ICE_SINGLE_ACT_Q_REGION_M;
+ break;
+ case ICE_DROP_PACKET:
+ act |= ICE_SINGLE_ACT_VSI_FORWARDING | ICE_SINGLE_ACT_DROP |
+ ICE_SINGLE_ACT_VALID_BIT;
+ break;
+ default:
+ status = ICE_ERR_CFG;
+ goto err_ice_add_adv_rule;
+ }
+
+ /* set the rule LOOKUP type based on caller specified 'Rx'
+ * instead of hardcoding it to be either LOOKUP_TX/RX
+ *
+ * for 'Rx' set the source to be the port number
+ * for 'Tx' set the source to be the source HW VSI number (determined
+ * by caller)
+ */
+ if (rinfo->rx) {
+ s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_RX);
+ s_rule->pdata.lkup_tx_rx.src =
+ cpu_to_le16(hw->port_info->lport);
+ } else {
+ s_rule->type = cpu_to_le16(ICE_AQC_SW_RULES_T_LKUP_TX);
+ s_rule->pdata.lkup_tx_rx.src = cpu_to_le16(rinfo->sw_act.src);
+ }
+
+ s_rule->pdata.lkup_tx_rx.recipe_id = cpu_to_le16(rid);
+ s_rule->pdata.lkup_tx_rx.act = cpu_to_le32(act);
+
+ status = ice_fill_adv_dummy_packet(lkups, lkups_cnt, s_rule, pkt,
+ pkt_len, pkt_offsets);
+ if (status)
+ goto err_ice_add_adv_rule;
+
+ if (rinfo->tun_type != ICE_NON_TUN) {
+ status = ice_fill_adv_packet_tun(hw, rinfo->tun_type,
+ s_rule->pdata.lkup_tx_rx.hdr,
+ pkt_offsets);
+ if (status)
+ goto err_ice_add_adv_rule;
+ }
+
+ status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
+ rule_buf_sz, 1, ice_aqc_opc_add_sw_rules,
+ NULL);
+ if (status)
+ goto err_ice_add_adv_rule;
+ adv_fltr = devm_kzalloc(ice_hw_to_dev(hw),
+ sizeof(struct ice_adv_fltr_mgmt_list_entry),
+ GFP_KERNEL);
+ if (!adv_fltr) {
+ status = ICE_ERR_NO_MEMORY;
+ goto err_ice_add_adv_rule;
+ }
+
+ adv_fltr->lkups = devm_kmemdup(ice_hw_to_dev(hw), lkups,
+ lkups_cnt * sizeof(*lkups), GFP_KERNEL);
+ if (!adv_fltr->lkups) {
+ status = ICE_ERR_NO_MEMORY;
+ goto err_ice_add_adv_rule;
+ }
+
+ adv_fltr->lkups_cnt = lkups_cnt;
+ adv_fltr->rule_info = *rinfo;
+ adv_fltr->rule_info.fltr_rule_id =
+ le16_to_cpu(s_rule->pdata.lkup_tx_rx.index);
+ sw = hw->switch_info;
+ sw->recp_list[rid].adv_rule = true;
+ rule_head = &sw->recp_list[rid].filt_rules;
+
+ if (rinfo->sw_act.fltr_act == ICE_FWD_TO_VSI)
+ adv_fltr->vsi_count = 1;
+
+ /* Add rule entry to book keeping list */
+ list_add(&adv_fltr->list_entry, rule_head);
+ if (added_entry) {
+ added_entry->rid = rid;
+ added_entry->rule_id = adv_fltr->rule_info.fltr_rule_id;
+ added_entry->vsi_handle = rinfo->sw_act.vsi_handle;
+ }
+err_ice_add_adv_rule:
+ if (status && adv_fltr) {
+ devm_kfree(ice_hw_to_dev(hw), adv_fltr->lkups);
+ devm_kfree(ice_hw_to_dev(hw), adv_fltr);
+ }
+
+ kfree(s_rule);
+
+ return status;
+}
+
/**
* ice_replay_vsi_fltr - Replay filters for requested VSI
* @hw: pointer to the hardware structure
@@ -2831,6 +5420,236 @@ end:
}
/**
+ * ice_adv_rem_update_vsi_list
+ * @hw: pointer to the hardware structure
+ * @vsi_handle: VSI handle of the VSI to remove
+ * @fm_list: filter management entry for which the VSI list management needs to
+ * be done
+ */
+static enum ice_status
+ice_adv_rem_update_vsi_list(struct ice_hw *hw, u16 vsi_handle,
+ struct ice_adv_fltr_mgmt_list_entry *fm_list)
+{
+ struct ice_vsi_list_map_info *vsi_list_info;
+ enum ice_sw_lkup_type lkup_type;
+ enum ice_status status;
+ u16 vsi_list_id;
+
+ if (fm_list->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST ||
+ fm_list->vsi_count == 0)
+ return ICE_ERR_PARAM;
+
+ /* A rule with the VSI being removed does not exist */
+ if (!test_bit(vsi_handle, fm_list->vsi_list_info->vsi_map))
+ return ICE_ERR_DOES_NOT_EXIST;
+
+ lkup_type = ICE_SW_LKUP_LAST;
+ vsi_list_id = fm_list->rule_info.sw_act.fwd_id.vsi_list_id;
+ status = ice_update_vsi_list_rule(hw, &vsi_handle, 1, vsi_list_id, true,
+ ice_aqc_opc_update_sw_rules,
+ lkup_type);
+ if (status)
+ return status;
+
+ fm_list->vsi_count--;
+ clear_bit(vsi_handle, fm_list->vsi_list_info->vsi_map);
+ vsi_list_info = fm_list->vsi_list_info;
+ if (fm_list->vsi_count == 1) {
+ struct ice_fltr_info tmp_fltr;
+ u16 rem_vsi_handle;
+
+ rem_vsi_handle = find_first_bit(vsi_list_info->vsi_map,
+ ICE_MAX_VSI);
+ if (!ice_is_vsi_valid(hw, rem_vsi_handle))
+ return ICE_ERR_OUT_OF_RANGE;
+
+ /* Make sure VSI list is empty before removing it below */
+ status = ice_update_vsi_list_rule(hw, &rem_vsi_handle, 1,
+ vsi_list_id, true,
+ ice_aqc_opc_update_sw_rules,
+ lkup_type);
+ if (status)
+ return status;
+
+ memset(&tmp_fltr, 0, sizeof(tmp_fltr));
+ tmp_fltr.flag = fm_list->rule_info.sw_act.flag;
+ tmp_fltr.fltr_rule_id = fm_list->rule_info.fltr_rule_id;
+ fm_list->rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
+ tmp_fltr.fltr_act = ICE_FWD_TO_VSI;
+ tmp_fltr.fwd_id.hw_vsi_id =
+ ice_get_hw_vsi_num(hw, rem_vsi_handle);
+ fm_list->rule_info.sw_act.fwd_id.hw_vsi_id =
+ ice_get_hw_vsi_num(hw, rem_vsi_handle);
+ fm_list->rule_info.sw_act.vsi_handle = rem_vsi_handle;
+
+ /* Update the previous switch rule of "MAC forward to VSI" to
+ * "MAC fwd to VSI list"
+ */
+ status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
+ if (status) {
+ ice_debug(hw, ICE_DBG_SW, "Failed to update pkt fwd rule to FWD_TO_VSI on HW VSI %d, error %d\n",
+ tmp_fltr.fwd_id.hw_vsi_id, status);
+ return status;
+ }
+ fm_list->vsi_list_info->ref_cnt--;
+
+ /* Remove the VSI list since it is no longer used */
+ status = ice_remove_vsi_list_rule(hw, vsi_list_id, lkup_type);
+ if (status) {
+ ice_debug(hw, ICE_DBG_SW, "Failed to remove VSI list %d, error %d\n",
+ vsi_list_id, status);
+ return status;
+ }
+
+ list_del(&vsi_list_info->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), vsi_list_info);
+ fm_list->vsi_list_info = NULL;
+ }
+
+ return status;
+}
+
+/**
+ * ice_rem_adv_rule - removes existing advanced switch rule
+ * @hw: pointer to the hardware structure
+ * @lkups: information on the words that needs to be looked up. All words
+ * together makes one recipe
+ * @lkups_cnt: num of entries in the lkups array
+ * @rinfo: Its the pointer to the rule information for the rule
+ *
+ * This function can be used to remove 1 rule at a time. The lkups is
+ * used to describe all the words that forms the "lookup" portion of the
+ * rule. These words can span multiple protocols. Callers to this function
+ * need to pass in a list of protocol headers with lookup information along
+ * and mask that determines which words are valid from the given protocol
+ * header. rinfo describes other information related to this rule such as
+ * forwarding IDs, priority of this rule, etc.
+ */
+static enum ice_status
+ice_rem_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
+ u16 lkups_cnt, struct ice_adv_rule_info *rinfo)
+{
+ struct ice_adv_fltr_mgmt_list_entry *list_elem;
+ struct ice_prot_lkup_ext lkup_exts;
+ enum ice_status status = 0;
+ bool remove_rule = false;
+ struct mutex *rule_lock; /* Lock to protect filter rule list */
+ u16 i, rid, vsi_handle;
+
+ memset(&lkup_exts, 0, sizeof(lkup_exts));
+ for (i = 0; i < lkups_cnt; i++) {
+ u16 count;
+
+ if (lkups[i].type >= ICE_PROTOCOL_LAST)
+ return ICE_ERR_CFG;
+
+ count = ice_fill_valid_words(&lkups[i], &lkup_exts);
+ if (!count)
+ return ICE_ERR_CFG;
+ }
+
+ /* Create any special protocol/offset pairs, such as looking at tunnel
+ * bits by extracting metadata
+ */
+ status = ice_add_special_words(rinfo, &lkup_exts);
+ if (status)
+ return status;
+
+ rid = ice_find_recp(hw, &lkup_exts);
+ /* If did not find a recipe that match the existing criteria */
+ if (rid == ICE_MAX_NUM_RECIPES)
+ return ICE_ERR_PARAM;
+
+ rule_lock = &hw->switch_info->recp_list[rid].filt_rule_lock;
+ list_elem = ice_find_adv_rule_entry(hw, lkups, lkups_cnt, rid, rinfo);
+ /* the rule is already removed */
+ if (!list_elem)
+ return 0;
+ mutex_lock(rule_lock);
+ if (list_elem->rule_info.sw_act.fltr_act != ICE_FWD_TO_VSI_LIST) {
+ remove_rule = true;
+ } else if (list_elem->vsi_count > 1) {
+ remove_rule = false;
+ vsi_handle = rinfo->sw_act.vsi_handle;
+ status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
+ } else {
+ vsi_handle = rinfo->sw_act.vsi_handle;
+ status = ice_adv_rem_update_vsi_list(hw, vsi_handle, list_elem);
+ if (status) {
+ mutex_unlock(rule_lock);
+ return status;
+ }
+ if (list_elem->vsi_count == 0)
+ remove_rule = true;
+ }
+ mutex_unlock(rule_lock);
+ if (remove_rule) {
+ struct ice_aqc_sw_rules_elem *s_rule;
+ u16 rule_buf_sz;
+
+ rule_buf_sz = ICE_SW_RULE_RX_TX_NO_HDR_SIZE;
+ s_rule = kzalloc(rule_buf_sz, GFP_KERNEL);
+ if (!s_rule)
+ return ICE_ERR_NO_MEMORY;
+ s_rule->pdata.lkup_tx_rx.act = 0;
+ s_rule->pdata.lkup_tx_rx.index =
+ cpu_to_le16(list_elem->rule_info.fltr_rule_id);
+ s_rule->pdata.lkup_tx_rx.hdr_len = 0;
+ status = ice_aq_sw_rules(hw, (struct ice_aqc_sw_rules *)s_rule,
+ rule_buf_sz, 1,
+ ice_aqc_opc_remove_sw_rules, NULL);
+ if (!status || status == ICE_ERR_DOES_NOT_EXIST) {
+ struct ice_switch_info *sw = hw->switch_info;
+
+ mutex_lock(rule_lock);
+ list_del(&list_elem->list_entry);
+ devm_kfree(ice_hw_to_dev(hw), list_elem->lkups);
+ devm_kfree(ice_hw_to_dev(hw), list_elem);
+ mutex_unlock(rule_lock);
+ if (list_empty(&sw->recp_list[rid].filt_rules))
+ sw->recp_list[rid].adv_rule = false;
+ }
+ kfree(s_rule);
+ }
+ return status;
+}
+
+/**
+ * ice_rem_adv_rule_by_id - removes existing advanced switch rule by ID
+ * @hw: pointer to the hardware structure
+ * @remove_entry: data struct which holds rule_id, VSI handle and recipe ID
+ *
+ * This function is used to remove 1 rule at a time. The removal is based on
+ * the remove_entry parameter. This function will remove rule for a given
+ * vsi_handle with a given rule_id which is passed as parameter in remove_entry
+ */
+enum ice_status
+ice_rem_adv_rule_by_id(struct ice_hw *hw,
+ struct ice_rule_query_data *remove_entry)
+{
+ struct ice_adv_fltr_mgmt_list_entry *list_itr;
+ struct list_head *list_head;
+ struct ice_adv_rule_info rinfo;
+ struct ice_switch_info *sw;
+
+ sw = hw->switch_info;
+ if (!sw->recp_list[remove_entry->rid].recp_created)
+ return ICE_ERR_PARAM;
+ list_head = &sw->recp_list[remove_entry->rid].filt_rules;
+ list_for_each_entry(list_itr, list_head, list_entry) {
+ if (list_itr->rule_info.fltr_rule_id ==
+ remove_entry->rule_id) {
+ rinfo = list_itr->rule_info;
+ rinfo.sw_act.vsi_handle = remove_entry->vsi_handle;
+ return ice_rem_adv_rule(hw, list_itr->lkups,
+ list_itr->lkups_cnt, &rinfo);
+ }
+ }
+ /* either list is empty or unable to find rule */
+ return ICE_ERR_DOES_NOT_EXIST;
+}
+
+/**
* ice_replay_vsi_all_fltr - replay all filters stored in bookkeeping lists
* @hw: pointer to the hardware structure
* @vsi_handle: driver VSI handle
@@ -2868,12 +5687,15 @@ void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw)
if (!sw)
return;
- for (i = 0; i < ICE_SW_LKUP_LAST; i++) {
+ for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
if (!list_empty(&sw->recp_list[i].filt_replay_rules)) {
struct list_head *l_head;
l_head = &sw->recp_list[i].filt_replay_rules;
- ice_rem_sw_rule_info(hw, l_head);
+ if (!sw->recp_list[i].adv_rule)
+ ice_rem_sw_rule_info(hw, l_head);
+ else
+ ice_rem_adv_rule_info(hw, l_head);
}
}
}
diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h
index c5db8d56133f..d8a38906f16f 100644
--- a/drivers/net/ethernet/intel/ice/ice_switch.h
+++ b/drivers/net/ethernet/intel/ice/ice_switch.h
@@ -14,6 +14,9 @@
#define ICE_VSI_INVAL_ID 0xffff
#define ICE_INVAL_Q_HANDLE 0xFFFF
+#define ICE_SW_RULE_RX_TX_NO_HDR_SIZE \
+ (offsetof(struct ice_aqc_sw_rules_elem, pdata.lkup_tx_rx.hdr))
+
/* VSI context structure for add/get/update/free operations */
struct ice_vsi_ctx {
u16 vsi_num;
@@ -122,30 +125,124 @@ struct ice_fltr_info {
u8 lan_en; /* Indicate if packet can be forwarded to the uplink */
};
+struct ice_adv_lkup_elem {
+ enum ice_protocol_type type;
+ union ice_prot_hdr h_u; /* Header values */
+ union ice_prot_hdr m_u; /* Mask of header values to match */
+};
+
+struct ice_sw_act_ctrl {
+ /* Source VSI for LOOKUP_TX or source port for LOOKUP_RX */
+ u16 src;
+ u16 flag;
+ enum ice_sw_fwd_act_type fltr_act;
+ /* Depending on filter action */
+ union {
+ /* This is a queue ID in case of ICE_FWD_TO_Q and starting
+ * queue ID in case of ICE_FWD_TO_QGRP.
+ */
+ u16 q_id:11;
+ u16 vsi_id:10;
+ u16 hw_vsi_id:10;
+ u16 vsi_list_id:10;
+ } fwd_id;
+ /* software VSI handle */
+ u16 vsi_handle;
+ u8 qgrp_size;
+};
+
+struct ice_rule_query_data {
+ /* Recipe ID for which the requested rule was added */
+ u16 rid;
+ /* Rule ID that was added or is supposed to be removed */
+ u16 rule_id;
+ /* vsi_handle for which Rule was added or is supposed to be removed */
+ u16 vsi_handle;
+};
+
+/* This structure allows to pass info about lb_en and lan_en
+ * flags to ice_add_adv_rule. Values in act would be used
+ * only if act_valid was set to true, otherwise default
+ * values would be used.
+ */
+struct ice_adv_rule_flags_info {
+ u32 act;
+ u8 act_valid; /* indicate if flags in act are valid */
+};
+
+struct ice_adv_rule_info {
+ enum ice_sw_tunnel_type tun_type;
+ struct ice_sw_act_ctrl sw_act;
+ u32 priority;
+ u8 rx; /* true means LOOKUP_RX otherwise LOOKUP_TX */
+ u16 fltr_rule_id;
+ struct ice_adv_rule_flags_info flags_info;
+};
+
+/* A collection of one or more four word recipe */
struct ice_sw_recipe {
- struct list_head l_entry;
+ /* For a chained recipe the root recipe is what should be used for
+ * programming rules
+ */
+ u8 is_root;
+ u8 root_rid;
+ u8 recp_created;
- /* To protect modification of filt_rule list
- * defined below
+ /* Number of extraction words */
+ u8 n_ext_words;
+ /* Protocol ID and Offset pair (extraction word) to describe the
+ * recipe
*/
- struct mutex filt_rule_lock;
+ struct ice_fv_word ext_words[ICE_MAX_CHAIN_WORDS];
+ u16 word_masks[ICE_MAX_CHAIN_WORDS];
- /* List of type ice_fltr_mgmt_list_entry */
+ /* if this recipe is a collection of other recipe */
+ u8 big_recp;
+
+ /* if this recipe is part of another bigger recipe then chain index
+ * corresponding to this recipe
+ */
+ u8 chain_idx;
+
+ /* if this recipe is a collection of other recipe then count of other
+ * recipes and recipe IDs of those recipes
+ */
+ u8 n_grp_count;
+
+ /* Bit map specifying the IDs associated with this group of recipe */
+ DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
+
+ enum ice_sw_tunnel_type tun_type;
+
+ /* List of type ice_fltr_mgmt_list_entry or adv_rule */
+ u8 adv_rule;
struct list_head filt_rules;
struct list_head filt_replay_rules;
- /* linked list of type recipe_list_entry */
- struct list_head rg_list;
- /* linked list of type ice_sw_fv_list_entry*/
+ struct mutex filt_rule_lock; /* protect filter rule structure */
+
+ /* Profiles this recipe should be associated with */
struct list_head fv_list;
- struct ice_aqc_recipe_data_elem *r_buf;
- u8 recp_count;
- u8 root_rid;
- u8 num_profs;
- u8 *prof_ids;
- /* recipe bitmap: what all recipes makes this recipe */
- DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES);
+ /* Profiles this recipe is associated with */
+ u8 num_profs, *prof_ids;
+
+ /* Bit map for possible result indexes */
+ DECLARE_BITMAP(res_idxs, ICE_MAX_FV_WORDS);
+
+ /* This allows user to specify the recipe priority.
+ * For now, this becomes 'fwd_priority' when recipe
+ * is created, usually recipes can have 'fwd' and 'join'
+ * priority.
+ */
+ u8 priority;
+
+ struct list_head rg_list;
+
+ /* AQ buffer associated with this recipe */
+ struct ice_aqc_recipe_data_elem *root_buf;
+ /* This struct saves the fv_words for a given lookup */
+ struct ice_prot_lkup_ext lkup_exts;
};
/* Bookkeeping structure to hold bitmap of VSIs corresponding to VSI list ID */
@@ -183,6 +280,16 @@ struct ice_fltr_mgmt_list_entry {
u8 counter_index;
};
+struct ice_adv_fltr_mgmt_list_entry {
+ struct list_head list_entry;
+
+ struct ice_adv_lkup_elem *lkups;
+ struct ice_adv_rule_info rule_info;
+ u16 lkups_cnt;
+ struct ice_vsi_list_map_info *vsi_list_info;
+ u16 vsi_count;
+};
+
enum ice_promisc_flags {
ICE_PROMISC_UCAST_RX = 0x1,
ICE_PROMISC_UCAST_TX = 0x2,
@@ -218,6 +325,10 @@ ice_free_res_cntr(struct ice_hw *hw, u8 type, u8 alloc_shared, u16 num_items,
u16 counter_id);
/* Switch/bridge related commands */
+enum ice_status
+ice_add_adv_rule(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups,
+ u16 lkups_cnt, struct ice_adv_rule_info *rinfo,
+ struct ice_rule_query_data *added_entry);
enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw);
enum ice_status ice_add_mac(struct ice_hw *hw, struct list_head *m_lst);
enum ice_status ice_remove_mac(struct ice_hw *hw, struct list_head *m_lst);
@@ -227,6 +338,8 @@ enum ice_status
ice_remove_eth_mac(struct ice_hw *hw, struct list_head *em_list);
int
ice_cfg_rdma_fltr(struct ice_hw *hw, u16 vsi_handle, bool enable);
+bool ice_mac_fltr_exist(struct ice_hw *hw, u8 *mac, u16 vsi_handle);
+bool ice_vlan_fltr_exist(struct ice_hw *hw, u16 vlan_id, u16 vsi_handle);
void ice_remove_vsi_fltr(struct ice_hw *hw, u16 vsi_handle);
enum ice_status
ice_add_vlan(struct ice_hw *hw, struct list_head *m_list);
@@ -245,10 +358,19 @@ enum ice_status
ice_set_vlan_vsi_promisc(struct ice_hw *hw, u16 vsi_handle, u8 promisc_mask,
bool rm_vlan_promisc);
+enum ice_status
+ice_rem_adv_rule_for_vsi(struct ice_hw *hw, u16 vsi_handle);
+enum ice_status
+ice_rem_adv_rule_by_id(struct ice_hw *hw,
+ struct ice_rule_query_data *remove_entry);
+
enum ice_status ice_init_def_sw_recp(struct ice_hw *hw);
u16 ice_get_hw_vsi_num(struct ice_hw *hw, u16 vsi_handle);
enum ice_status ice_replay_vsi_all_fltr(struct ice_hw *hw, u16 vsi_handle);
void ice_rm_all_sw_replay_rule_info(struct ice_hw *hw);
+enum ice_status
+ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
+ u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd);
#endif /* _ICE_SWITCH_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
new file mode 100644
index 000000000000..e5d23feb6701
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c
@@ -0,0 +1,1369 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2019-2021, Intel Corporation. */
+
+#include "ice.h"
+#include "ice_tc_lib.h"
+#include "ice_fltr.h"
+#include "ice_lib.h"
+#include "ice_protocol_type.h"
+
+/**
+ * ice_tc_count_lkups - determine lookup count for switch filter
+ * @flags: TC-flower flags
+ * @headers: Pointer to TC flower filter header structure
+ * @fltr: Pointer to outer TC filter structure
+ *
+ * Determine lookup count based on TC flower input for switch filter.
+ */
+static int
+ice_tc_count_lkups(u32 flags, struct ice_tc_flower_lyr_2_4_hdrs *headers,
+ struct ice_tc_flower_fltr *fltr)
+{
+ int lkups_cnt = 0;
+
+ if (flags & ICE_TC_FLWR_FIELD_TENANT_ID)
+ lkups_cnt++;
+
+ if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 |
+ ICE_TC_FLWR_FIELD_ENC_DEST_IPV4 |
+ ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 |
+ ICE_TC_FLWR_FIELD_ENC_DEST_IPV6))
+ lkups_cnt++;
+
+ if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT)
+ lkups_cnt++;
+
+ /* currently inner etype filter isn't supported */
+ if ((flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID) &&
+ fltr->tunnel_type == TNL_LAST)
+ lkups_cnt++;
+
+ /* are MAC fields specified? */
+ if (flags & (ICE_TC_FLWR_FIELD_DST_MAC | ICE_TC_FLWR_FIELD_SRC_MAC))
+ lkups_cnt++;
+
+ /* is VLAN specified? */
+ if (flags & ICE_TC_FLWR_FIELD_VLAN)
+ lkups_cnt++;
+
+ /* are IPv[4|6] fields specified? */
+ if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV4 | ICE_TC_FLWR_FIELD_SRC_IPV4 |
+ ICE_TC_FLWR_FIELD_DEST_IPV6 | ICE_TC_FLWR_FIELD_SRC_IPV6))
+ lkups_cnt++;
+
+ /* is L4 (TCP/UDP/any other L4 protocol fields) specified? */
+ if (flags & (ICE_TC_FLWR_FIELD_DEST_L4_PORT |
+ ICE_TC_FLWR_FIELD_SRC_L4_PORT))
+ lkups_cnt++;
+
+ return lkups_cnt;
+}
+
+static enum ice_protocol_type ice_proto_type_from_mac(bool inner)
+{
+ return inner ? ICE_MAC_IL : ICE_MAC_OFOS;
+}
+
+static enum ice_protocol_type ice_proto_type_from_ipv4(bool inner)
+{
+ return inner ? ICE_IPV4_IL : ICE_IPV4_OFOS;
+}
+
+static enum ice_protocol_type ice_proto_type_from_ipv6(bool inner)
+{
+ return inner ? ICE_IPV6_IL : ICE_IPV6_OFOS;
+}
+
+static enum ice_protocol_type
+ice_proto_type_from_l4_port(bool inner, u16 ip_proto)
+{
+ if (inner) {
+ switch (ip_proto) {
+ case IPPROTO_UDP:
+ return ICE_UDP_ILOS;
+ }
+ } else {
+ switch (ip_proto) {
+ case IPPROTO_TCP:
+ return ICE_TCP_IL;
+ case IPPROTO_UDP:
+ return ICE_UDP_OF;
+ }
+ }
+
+ return 0;
+}
+
+static enum ice_protocol_type
+ice_proto_type_from_tunnel(enum ice_tunnel_type type)
+{
+ switch (type) {
+ case TNL_VXLAN:
+ return ICE_VXLAN;
+ case TNL_GENEVE:
+ return ICE_GENEVE;
+ case TNL_GRETAP:
+ return ICE_NVGRE;
+ default:
+ return 0;
+ }
+}
+
+static enum ice_sw_tunnel_type
+ice_sw_type_from_tunnel(enum ice_tunnel_type type)
+{
+ switch (type) {
+ case TNL_VXLAN:
+ return ICE_SW_TUN_VXLAN;
+ case TNL_GENEVE:
+ return ICE_SW_TUN_GENEVE;
+ case TNL_GRETAP:
+ return ICE_SW_TUN_NVGRE;
+ default:
+ return ICE_NON_TUN;
+ }
+}
+
+static int
+ice_tc_fill_tunnel_outer(u32 flags, struct ice_tc_flower_fltr *fltr,
+ struct ice_adv_lkup_elem *list)
+{
+ struct ice_tc_flower_lyr_2_4_hdrs *hdr = &fltr->outer_headers;
+ int i = 0;
+
+ if (flags & ICE_TC_FLWR_FIELD_TENANT_ID) {
+ u32 tenant_id;
+
+ list[i].type = ice_proto_type_from_tunnel(fltr->tunnel_type);
+ switch (fltr->tunnel_type) {
+ case TNL_VXLAN:
+ case TNL_GENEVE:
+ tenant_id = be32_to_cpu(fltr->tenant_id) << 8;
+ list[i].h_u.tnl_hdr.vni = cpu_to_be32(tenant_id);
+ memcpy(&list[i].m_u.tnl_hdr.vni, "\xff\xff\xff\x00", 4);
+ i++;
+ break;
+ case TNL_GRETAP:
+ list[i].h_u.nvgre_hdr.tni_flow = fltr->tenant_id;
+ memcpy(&list[i].m_u.nvgre_hdr.tni_flow, "\xff\xff\xff\xff", 4);
+ i++;
+ break;
+ default:
+ break;
+ }
+ }
+
+ if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 |
+ ICE_TC_FLWR_FIELD_ENC_DEST_IPV4)) {
+ list[i].type = ice_proto_type_from_ipv4(false);
+
+ if (flags & ICE_TC_FLWR_FIELD_ENC_SRC_IPV4) {
+ list[i].h_u.ipv4_hdr.src_addr = hdr->l3_key.src_ipv4;
+ list[i].m_u.ipv4_hdr.src_addr = hdr->l3_mask.src_ipv4;
+ }
+ if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_IPV4) {
+ list[i].h_u.ipv4_hdr.dst_addr = hdr->l3_key.dst_ipv4;
+ list[i].m_u.ipv4_hdr.dst_addr = hdr->l3_mask.dst_ipv4;
+ }
+ i++;
+ }
+
+ if (flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 |
+ ICE_TC_FLWR_FIELD_ENC_DEST_IPV6)) {
+ list[i].type = ice_proto_type_from_ipv6(false);
+
+ if (flags & ICE_TC_FLWR_FIELD_ENC_SRC_IPV6) {
+ memcpy(&list[i].h_u.ipv6_hdr.src_addr,
+ &hdr->l3_key.src_ipv6_addr,
+ sizeof(hdr->l3_key.src_ipv6_addr));
+ memcpy(&list[i].m_u.ipv6_hdr.src_addr,
+ &hdr->l3_mask.src_ipv6_addr,
+ sizeof(hdr->l3_mask.src_ipv6_addr));
+ }
+ if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_IPV6) {
+ memcpy(&list[i].h_u.ipv6_hdr.dst_addr,
+ &hdr->l3_key.dst_ipv6_addr,
+ sizeof(hdr->l3_key.dst_ipv6_addr));
+ memcpy(&list[i].m_u.ipv6_hdr.dst_addr,
+ &hdr->l3_mask.dst_ipv6_addr,
+ sizeof(hdr->l3_mask.dst_ipv6_addr));
+ }
+ i++;
+ }
+
+ if (flags & ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT) {
+ list[i].type = ice_proto_type_from_l4_port(false, hdr->l3_key.ip_proto);
+ list[i].h_u.l4_hdr.dst_port = hdr->l4_key.dst_port;
+ list[i].m_u.l4_hdr.dst_port = hdr->l4_mask.dst_port;
+ i++;
+ }
+
+ return i;
+}
+
+/**
+ * ice_tc_fill_rules - fill filter rules based on TC fltr
+ * @hw: pointer to HW structure
+ * @flags: tc flower field flags
+ * @tc_fltr: pointer to TC flower filter
+ * @list: list of advance rule elements
+ * @rule_info: pointer to information about rule
+ * @l4_proto: pointer to information such as L4 proto type
+ *
+ * Fill ice_adv_lkup_elem list based on TC flower flags and
+ * TC flower headers. This list should be used to add
+ * advance filter in hardware.
+ */
+static int
+ice_tc_fill_rules(struct ice_hw *hw, u32 flags,
+ struct ice_tc_flower_fltr *tc_fltr,
+ struct ice_adv_lkup_elem *list,
+ struct ice_adv_rule_info *rule_info,
+ u16 *l4_proto)
+{
+ struct ice_tc_flower_lyr_2_4_hdrs *headers = &tc_fltr->outer_headers;
+ bool inner = false;
+ int i = 0;
+
+ rule_info->tun_type = ice_sw_type_from_tunnel(tc_fltr->tunnel_type);
+ if (tc_fltr->tunnel_type != TNL_LAST) {
+ i = ice_tc_fill_tunnel_outer(flags, tc_fltr, list);
+
+ headers = &tc_fltr->inner_headers;
+ inner = true;
+ } else if (flags & ICE_TC_FLWR_FIELD_ETH_TYPE_ID) {
+ list[i].type = ICE_ETYPE_OL;
+ list[i].h_u.ethertype.ethtype_id = headers->l2_key.n_proto;
+ list[i].m_u.ethertype.ethtype_id = headers->l2_mask.n_proto;
+ i++;
+ }
+
+ if (flags & (ICE_TC_FLWR_FIELD_DST_MAC |
+ ICE_TC_FLWR_FIELD_SRC_MAC)) {
+ struct ice_tc_l2_hdr *l2_key, *l2_mask;
+
+ l2_key = &headers->l2_key;
+ l2_mask = &headers->l2_mask;
+
+ list[i].type = ice_proto_type_from_mac(inner);
+ if (flags & ICE_TC_FLWR_FIELD_DST_MAC) {
+ ether_addr_copy(list[i].h_u.eth_hdr.dst_addr,
+ l2_key->dst_mac);
+ ether_addr_copy(list[i].m_u.eth_hdr.dst_addr,
+ l2_mask->dst_mac);
+ }
+ if (flags & ICE_TC_FLWR_FIELD_SRC_MAC) {
+ ether_addr_copy(list[i].h_u.eth_hdr.src_addr,
+ l2_key->src_mac);
+ ether_addr_copy(list[i].m_u.eth_hdr.src_addr,
+ l2_mask->src_mac);
+ }
+ i++;
+ }
+
+ /* copy VLAN info */
+ if (flags & ICE_TC_FLWR_FIELD_VLAN) {
+ list[i].type = ICE_VLAN_OFOS;
+ list[i].h_u.vlan_hdr.vlan = headers->vlan_hdr.vlan_id;
+ list[i].m_u.vlan_hdr.vlan = cpu_to_be16(0xFFFF);
+ i++;
+ }
+
+ /* copy L3 (IPv[4|6]: src, dest) address */
+ if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV4 |
+ ICE_TC_FLWR_FIELD_SRC_IPV4)) {
+ struct ice_tc_l3_hdr *l3_key, *l3_mask;
+
+ list[i].type = ice_proto_type_from_ipv4(inner);
+ l3_key = &headers->l3_key;
+ l3_mask = &headers->l3_mask;
+ if (flags & ICE_TC_FLWR_FIELD_DEST_IPV4) {
+ list[i].h_u.ipv4_hdr.dst_addr = l3_key->dst_ipv4;
+ list[i].m_u.ipv4_hdr.dst_addr = l3_mask->dst_ipv4;
+ }
+ if (flags & ICE_TC_FLWR_FIELD_SRC_IPV4) {
+ list[i].h_u.ipv4_hdr.src_addr = l3_key->src_ipv4;
+ list[i].m_u.ipv4_hdr.src_addr = l3_mask->src_ipv4;
+ }
+ i++;
+ } else if (flags & (ICE_TC_FLWR_FIELD_DEST_IPV6 |
+ ICE_TC_FLWR_FIELD_SRC_IPV6)) {
+ struct ice_ipv6_hdr *ipv6_hdr, *ipv6_mask;
+ struct ice_tc_l3_hdr *l3_key, *l3_mask;
+
+ list[i].type = ice_proto_type_from_ipv6(inner);
+ ipv6_hdr = &list[i].h_u.ipv6_hdr;
+ ipv6_mask = &list[i].m_u.ipv6_hdr;
+ l3_key = &headers->l3_key;
+ l3_mask = &headers->l3_mask;
+
+ if (flags & ICE_TC_FLWR_FIELD_DEST_IPV6) {
+ memcpy(&ipv6_hdr->dst_addr, &l3_key->dst_ipv6_addr,
+ sizeof(l3_key->dst_ipv6_addr));
+ memcpy(&ipv6_mask->dst_addr, &l3_mask->dst_ipv6_addr,
+ sizeof(l3_mask->dst_ipv6_addr));
+ }
+ if (flags & ICE_TC_FLWR_FIELD_SRC_IPV6) {
+ memcpy(&ipv6_hdr->src_addr, &l3_key->src_ipv6_addr,
+ sizeof(l3_key->src_ipv6_addr));
+ memcpy(&ipv6_mask->src_addr, &l3_mask->src_ipv6_addr,
+ sizeof(l3_mask->src_ipv6_addr));
+ }
+ i++;
+ }
+
+ /* copy L4 (src, dest) port */
+ if (flags & (ICE_TC_FLWR_FIELD_DEST_L4_PORT |
+ ICE_TC_FLWR_FIELD_SRC_L4_PORT)) {
+ struct ice_tc_l4_hdr *l4_key, *l4_mask;
+
+ list[i].type = ice_proto_type_from_l4_port(inner, headers->l3_key.ip_proto);
+ l4_key = &headers->l4_key;
+ l4_mask = &headers->l4_mask;
+
+ if (flags & ICE_TC_FLWR_FIELD_DEST_L4_PORT) {
+ list[i].h_u.l4_hdr.dst_port = l4_key->dst_port;
+ list[i].m_u.l4_hdr.dst_port = l4_mask->dst_port;
+ }
+ if (flags & ICE_TC_FLWR_FIELD_SRC_L4_PORT) {
+ list[i].h_u.l4_hdr.src_port = l4_key->src_port;
+ list[i].m_u.l4_hdr.src_port = l4_mask->src_port;
+ }
+ i++;
+ }
+
+ return i;
+}
+
+/**
+ * ice_tc_tun_get_type - get the tunnel type
+ * @tunnel_dev: ptr to tunnel device
+ *
+ * This function detects appropriate tunnel_type if specified device is
+ * tunnel device such as VXLAN/Geneve
+ */
+static int ice_tc_tun_get_type(struct net_device *tunnel_dev)
+{
+ if (netif_is_vxlan(tunnel_dev))
+ return TNL_VXLAN;
+ if (netif_is_geneve(tunnel_dev))
+ return TNL_GENEVE;
+ if (netif_is_gretap(tunnel_dev) ||
+ netif_is_ip6gretap(tunnel_dev))
+ return TNL_GRETAP;
+ return TNL_LAST;
+}
+
+bool ice_is_tunnel_supported(struct net_device *dev)
+{
+ return ice_tc_tun_get_type(dev) != TNL_LAST;
+}
+
+static int
+ice_eswitch_tc_parse_action(struct ice_tc_flower_fltr *fltr,
+ struct flow_action_entry *act)
+{
+ struct ice_repr *repr;
+
+ switch (act->id) {
+ case FLOW_ACTION_DROP:
+ fltr->action.fltr_act = ICE_DROP_PACKET;
+ break;
+
+ case FLOW_ACTION_REDIRECT:
+ fltr->action.fltr_act = ICE_FWD_TO_VSI;
+
+ if (ice_is_port_repr_netdev(act->dev)) {
+ repr = ice_netdev_to_repr(act->dev);
+
+ fltr->dest_vsi = repr->src_vsi;
+ fltr->direction = ICE_ESWITCH_FLTR_INGRESS;
+ } else if (netif_is_ice(act->dev) ||
+ ice_is_tunnel_supported(act->dev)) {
+ fltr->direction = ICE_ESWITCH_FLTR_EGRESS;
+ } else {
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported netdevice in switchdev mode");
+ return -EINVAL;
+ }
+
+ break;
+
+ default:
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported action in switchdev mode");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
+{
+ struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers;
+ struct ice_adv_rule_info rule_info = { 0 };
+ struct ice_rule_query_data rule_added;
+ struct ice_hw *hw = &vsi->back->hw;
+ struct ice_adv_lkup_elem *list;
+ u32 flags = fltr->flags;
+ enum ice_status status;
+ int lkups_cnt;
+ int ret = 0;
+ int i;
+
+ if (!flags || (flags & ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT)) {
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported encap field(s)");
+ return -EOPNOTSUPP;
+ }
+
+ lkups_cnt = ice_tc_count_lkups(flags, headers, fltr);
+ list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
+ if (!list)
+ return -ENOMEM;
+
+ i = ice_tc_fill_rules(hw, flags, fltr, list, &rule_info, NULL);
+ if (i != lkups_cnt) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ /* egress traffic is always redirect to uplink */
+ if (fltr->direction == ICE_ESWITCH_FLTR_EGRESS)
+ fltr->dest_vsi = vsi->back->switchdev.uplink_vsi;
+
+ rule_info.sw_act.fltr_act = fltr->action.fltr_act;
+ if (fltr->action.fltr_act != ICE_DROP_PACKET)
+ rule_info.sw_act.vsi_handle = fltr->dest_vsi->idx;
+ /* For now, making priority to be highest, and it also becomes
+ * the priority for recipe which will get created as a result of
+ * new extraction sequence based on input set.
+ * Priority '7' is max val for switch recipe, higher the number
+ * results into order of switch rule evaluation.
+ */
+ rule_info.priority = 7;
+
+ if (fltr->direction == ICE_ESWITCH_FLTR_INGRESS) {
+ rule_info.sw_act.flag |= ICE_FLTR_RX;
+ rule_info.sw_act.src = hw->pf_id;
+ rule_info.rx = true;
+ } else {
+ rule_info.sw_act.flag |= ICE_FLTR_TX;
+ rule_info.sw_act.src = vsi->idx;
+ rule_info.rx = false;
+ rule_info.flags_info.act = ICE_SINGLE_ACT_LAN_ENABLE;
+ rule_info.flags_info.act_valid = true;
+ }
+
+ /* specify the cookie as filter_rule_id */
+ rule_info.fltr_rule_id = fltr->cookie;
+
+ status = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added);
+ if (status == ICE_ERR_ALREADY_EXISTS) {
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because it already exist");
+ ret = -EINVAL;
+ goto exit;
+ } else if (status) {
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter due to error");
+ ret = -EIO;
+ goto exit;
+ }
+
+ /* store the output params, which are needed later for removing
+ * advanced switch filter
+ */
+ fltr->rid = rule_added.rid;
+ fltr->rule_id = rule_added.rule_id;
+
+exit:
+ kfree(list);
+ return ret;
+}
+
+/**
+ * ice_add_tc_flower_adv_fltr - add appropriate filter rules
+ * @vsi: Pointer to VSI
+ * @tc_fltr: Pointer to TC flower filter structure
+ *
+ * based on filter parameters using Advance recipes supported
+ * by OS package.
+ */
+static int
+ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
+ struct ice_tc_flower_fltr *tc_fltr)
+{
+ struct ice_tc_flower_lyr_2_4_hdrs *headers = &tc_fltr->outer_headers;
+ struct ice_adv_rule_info rule_info = {0};
+ struct ice_rule_query_data rule_added;
+ struct ice_adv_lkup_elem *list;
+ struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ u32 flags = tc_fltr->flags;
+ struct ice_vsi *ch_vsi;
+ struct device *dev;
+ u16 lkups_cnt = 0;
+ u16 l4_proto = 0;
+ int ret = 0;
+ u16 i = 0;
+
+ dev = ice_pf_to_dev(pf);
+ if (ice_is_safe_mode(pf)) {
+ NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Unable to add filter because driver is in safe mode");
+ return -EOPNOTSUPP;
+ }
+
+ if (!flags || (flags & (ICE_TC_FLWR_FIELD_ENC_DEST_IPV4 |
+ ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 |
+ ICE_TC_FLWR_FIELD_ENC_DEST_IPV6 |
+ ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 |
+ ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT))) {
+ NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Unsupported encap field(s)");
+ return -EOPNOTSUPP;
+ }
+
+ /* get the channel (aka ADQ VSI) */
+ if (tc_fltr->dest_vsi)
+ ch_vsi = tc_fltr->dest_vsi;
+ else
+ ch_vsi = vsi->tc_map_vsi[tc_fltr->action.tc_class];
+
+ lkups_cnt = ice_tc_count_lkups(flags, headers, tc_fltr);
+ list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
+ if (!list)
+ return -ENOMEM;
+
+ i = ice_tc_fill_rules(hw, flags, tc_fltr, list, &rule_info, &l4_proto);
+ if (i != lkups_cnt) {
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ rule_info.sw_act.fltr_act = tc_fltr->action.fltr_act;
+ if (tc_fltr->action.tc_class >= ICE_CHNL_START_TC) {
+ if (!ch_vsi) {
+ NL_SET_ERR_MSG_MOD(tc_fltr->extack, "Unable to add filter because specified destination doesn't exist");
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ rule_info.sw_act.fltr_act = ICE_FWD_TO_VSI;
+ rule_info.sw_act.vsi_handle = ch_vsi->idx;
+ rule_info.priority = 7;
+ rule_info.sw_act.src = hw->pf_id;
+ rule_info.rx = true;
+ dev_dbg(dev, "add switch rule for TC:%u vsi_idx:%u, lkups_cnt:%u\n",
+ tc_fltr->action.tc_class,
+ rule_info.sw_act.vsi_handle, lkups_cnt);
+ } else {
+ rule_info.sw_act.flag |= ICE_FLTR_TX;
+ rule_info.sw_act.src = vsi->idx;
+ rule_info.rx = false;
+ }
+
+ /* specify the cookie as filter_rule_id */
+ rule_info.fltr_rule_id = tc_fltr->cookie;
+
+ ret = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added);
+ if (ret == -EEXIST) {
+ NL_SET_ERR_MSG_MOD(tc_fltr->extack,
+ "Unable to add filter because it already exist");
+ ret = -EINVAL;
+ goto exit;
+ } else if (ret) {
+ NL_SET_ERR_MSG_MOD(tc_fltr->extack,
+ "Unable to add filter due to error");
+ ret = -EIO;
+ goto exit;
+ }
+
+ /* store the output params, which are needed later for removing
+ * advanced switch filter
+ */
+ tc_fltr->rid = rule_added.rid;
+ tc_fltr->rule_id = rule_added.rule_id;
+ if (tc_fltr->action.tc_class > 0 && ch_vsi) {
+ /* For PF ADQ, VSI type is set as ICE_VSI_CHNL, and
+ * for PF ADQ filter, it is not yet set in tc_fltr,
+ * hence store the dest_vsi ptr in tc_fltr
+ */
+ if (ch_vsi->type == ICE_VSI_CHNL)
+ tc_fltr->dest_vsi = ch_vsi;
+ /* keep track of advanced switch filter for
+ * destination VSI (channel VSI)
+ */
+ ch_vsi->num_chnl_fltr++;
+ /* in this case, dest_id is VSI handle (sw handle) */
+ tc_fltr->dest_id = rule_added.vsi_handle;
+
+ /* keeps track of channel filters for PF VSI */
+ if (vsi->type == ICE_VSI_PF &&
+ (flags & (ICE_TC_FLWR_FIELD_DST_MAC |
+ ICE_TC_FLWR_FIELD_ENC_DST_MAC)))
+ pf->num_dmac_chnl_fltrs++;
+ }
+ dev_dbg(dev, "added switch rule (lkups_cnt %u, flags 0x%x) for TC %u, rid %u, rule_id %u, vsi_idx %u\n",
+ lkups_cnt, flags,
+ tc_fltr->action.tc_class, rule_added.rid,
+ rule_added.rule_id, rule_added.vsi_handle);
+exit:
+ kfree(list);
+ return ret;
+}
+
+/**
+ * ice_tc_set_ipv4 - Parse IPv4 addresses from TC flower filter
+ * @match: Pointer to flow match structure
+ * @fltr: Pointer to filter structure
+ * @headers: inner or outer header fields
+ * @is_encap: set true for tunnel IPv4 address
+ */
+static int
+ice_tc_set_ipv4(struct flow_match_ipv4_addrs *match,
+ struct ice_tc_flower_fltr *fltr,
+ struct ice_tc_flower_lyr_2_4_hdrs *headers, bool is_encap)
+{
+ if (match->key->dst) {
+ if (is_encap)
+ fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DEST_IPV4;
+ else
+ fltr->flags |= ICE_TC_FLWR_FIELD_DEST_IPV4;
+ headers->l3_key.dst_ipv4 = match->key->dst;
+ headers->l3_mask.dst_ipv4 = match->mask->dst;
+ }
+ if (match->key->src) {
+ if (is_encap)
+ fltr->flags |= ICE_TC_FLWR_FIELD_ENC_SRC_IPV4;
+ else
+ fltr->flags |= ICE_TC_FLWR_FIELD_SRC_IPV4;
+ headers->l3_key.src_ipv4 = match->key->src;
+ headers->l3_mask.src_ipv4 = match->mask->src;
+ }
+ return 0;
+}
+
+/**
+ * ice_tc_set_ipv6 - Parse IPv6 addresses from TC flower filter
+ * @match: Pointer to flow match structure
+ * @fltr: Pointer to filter structure
+ * @headers: inner or outer header fields
+ * @is_encap: set true for tunnel IPv6 address
+ */
+static int
+ice_tc_set_ipv6(struct flow_match_ipv6_addrs *match,
+ struct ice_tc_flower_fltr *fltr,
+ struct ice_tc_flower_lyr_2_4_hdrs *headers, bool is_encap)
+{
+ struct ice_tc_l3_hdr *l3_key, *l3_mask;
+
+ /* src and dest IPV6 address should not be LOOPBACK
+ * (0:0:0:0:0:0:0:1), which can be represented as ::1
+ */
+ if (ipv6_addr_loopback(&match->key->dst) ||
+ ipv6_addr_loopback(&match->key->src)) {
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Bad IPv6, addr is LOOPBACK");
+ return -EINVAL;
+ }
+ /* if src/dest IPv6 address is *,* error */
+ if (ipv6_addr_any(&match->mask->dst) &&
+ ipv6_addr_any(&match->mask->src)) {
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Bad src/dest IPv6, addr is any");
+ return -EINVAL;
+ }
+ if (!ipv6_addr_any(&match->mask->dst)) {
+ if (is_encap)
+ fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DEST_IPV6;
+ else
+ fltr->flags |= ICE_TC_FLWR_FIELD_DEST_IPV6;
+ }
+ if (!ipv6_addr_any(&match->mask->src)) {
+ if (is_encap)
+ fltr->flags |= ICE_TC_FLWR_FIELD_ENC_SRC_IPV6;
+ else
+ fltr->flags |= ICE_TC_FLWR_FIELD_SRC_IPV6;
+ }
+
+ l3_key = &headers->l3_key;
+ l3_mask = &headers->l3_mask;
+
+ if (fltr->flags & (ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 |
+ ICE_TC_FLWR_FIELD_SRC_IPV6)) {
+ memcpy(&l3_key->src_ipv6_addr, &match->key->src.s6_addr,
+ sizeof(match->key->src.s6_addr));
+ memcpy(&l3_mask->src_ipv6_addr, &match->mask->src.s6_addr,
+ sizeof(match->mask->src.s6_addr));
+ }
+ if (fltr->flags & (ICE_TC_FLWR_FIELD_ENC_DEST_IPV6 |
+ ICE_TC_FLWR_FIELD_DEST_IPV6)) {
+ memcpy(&l3_key->dst_ipv6_addr, &match->key->dst.s6_addr,
+ sizeof(match->key->dst.s6_addr));
+ memcpy(&l3_mask->dst_ipv6_addr, &match->mask->dst.s6_addr,
+ sizeof(match->mask->dst.s6_addr));
+ }
+
+ return 0;
+}
+
+/**
+ * ice_tc_set_port - Parse ports from TC flower filter
+ * @match: Flow match structure
+ * @fltr: Pointer to filter structure
+ * @headers: inner or outer header fields
+ * @is_encap: set true for tunnel port
+ */
+static int
+ice_tc_set_port(struct flow_match_ports match,
+ struct ice_tc_flower_fltr *fltr,
+ struct ice_tc_flower_lyr_2_4_hdrs *headers, bool is_encap)
+{
+ if (match.key->dst) {
+ if (is_encap)
+ fltr->flags |= ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT;
+ else
+ fltr->flags |= ICE_TC_FLWR_FIELD_DEST_L4_PORT;
+ fltr->flags |= ICE_TC_FLWR_FIELD_DEST_L4_PORT;
+ headers->l4_key.dst_port = match.key->dst;
+ headers->l4_mask.dst_port = match.mask->dst;
+ }
+ if (match.key->src) {
+ if (is_encap)
+ fltr->flags |= ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT;
+ else
+ fltr->flags |= ICE_TC_FLWR_FIELD_SRC_L4_PORT;
+ fltr->flags |= ICE_TC_FLWR_FIELD_SRC_L4_PORT;
+ headers->l4_key.src_port = match.key->src;
+ headers->l4_mask.src_port = match.mask->src;
+ }
+ return 0;
+}
+
+static struct net_device *
+ice_get_tunnel_device(struct net_device *dev, struct flow_rule *rule)
+{
+ struct flow_action_entry *act;
+ int i;
+
+ if (ice_is_tunnel_supported(dev))
+ return dev;
+
+ flow_action_for_each(i, act, &rule->action) {
+ if (act->id == FLOW_ACTION_REDIRECT &&
+ ice_is_tunnel_supported(act->dev))
+ return act->dev;
+ }
+
+ return NULL;
+}
+
+static int
+ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule,
+ struct ice_tc_flower_fltr *fltr)
+{
+ struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers;
+ struct flow_match_control enc_control;
+
+ fltr->tunnel_type = ice_tc_tun_get_type(dev);
+ headers->l3_key.ip_proto = IPPROTO_UDP;
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
+ struct flow_match_enc_keyid enc_keyid;
+
+ flow_rule_match_enc_keyid(rule, &enc_keyid);
+
+ if (!enc_keyid.mask->keyid ||
+ enc_keyid.mask->keyid != cpu_to_be32(ICE_TC_FLOWER_MASK_32))
+ return -EINVAL;
+
+ fltr->flags |= ICE_TC_FLWR_FIELD_TENANT_ID;
+ fltr->tenant_id = enc_keyid.key->keyid;
+ }
+
+ flow_rule_match_enc_control(rule, &enc_control);
+
+ if (enc_control.key->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+ struct flow_match_ipv4_addrs match;
+
+ flow_rule_match_enc_ipv4_addrs(rule, &match);
+ if (ice_tc_set_ipv4(&match, fltr, headers, true))
+ return -EINVAL;
+ } else if (enc_control.key->addr_type ==
+ FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
+ struct flow_match_ipv6_addrs match;
+
+ flow_rule_match_enc_ipv6_addrs(rule, &match);
+ if (ice_tc_set_ipv6(&match, fltr, headers, true))
+ return -EINVAL;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) {
+ struct flow_match_ip match;
+
+ flow_rule_match_enc_ip(rule, &match);
+ headers->l3_key.tos = match.key->tos;
+ headers->l3_key.ttl = match.key->ttl;
+ headers->l3_mask.tos = match.mask->tos;
+ headers->l3_mask.ttl = match.mask->ttl;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
+ struct flow_match_ports match;
+
+ flow_rule_match_enc_ports(rule, &match);
+ if (ice_tc_set_port(match, fltr, headers, true))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_parse_cls_flower - Parse TC flower filters provided by kernel
+ * @vsi: Pointer to the VSI
+ * @filter_dev: Pointer to device on which filter is being added
+ * @f: Pointer to struct flow_cls_offload
+ * @fltr: Pointer to filter structure
+ */
+static int
+ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
+ struct flow_cls_offload *f,
+ struct ice_tc_flower_fltr *fltr)
+{
+ struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers;
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ u16 n_proto_mask = 0, n_proto_key = 0, addr_type = 0;
+ struct flow_dissector *dissector;
+ struct net_device *tunnel_dev;
+
+ dissector = rule->match.dissector;
+
+ if (dissector->used_keys &
+ ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_VLAN) |
+ BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
+ BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
+ BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
+ BIT(FLOW_DISSECTOR_KEY_ENC_IP) |
+ BIT(FLOW_DISSECTOR_KEY_PORTS))) {
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported key used");
+ return -EOPNOTSUPP;
+ }
+
+ tunnel_dev = ice_get_tunnel_device(filter_dev, rule);
+ if (tunnel_dev) {
+ int err;
+
+ filter_dev = tunnel_dev;
+
+ err = ice_parse_tunnel_attr(filter_dev, rule, fltr);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Failed to parse TC flower tunnel attributes");
+ return err;
+ }
+
+ /* header pointers should point to the inner headers, outer
+ * header were already set by ice_parse_tunnel_attr
+ */
+ headers = &fltr->inner_headers;
+ } else if (dissector->used_keys &
+ (BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
+ BIT(FLOW_DISSECTOR_KEY_ENC_PORTS))) {
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Tunnel key used, but device isn't a tunnel");
+ return -EOPNOTSUPP;
+ } else {
+ fltr->tunnel_type = TNL_LAST;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match;
+
+ flow_rule_match_basic(rule, &match);
+
+ n_proto_key = ntohs(match.key->n_proto);
+ n_proto_mask = ntohs(match.mask->n_proto);
+
+ if (n_proto_key == ETH_P_ALL || n_proto_key == 0) {
+ n_proto_key = 0;
+ n_proto_mask = 0;
+ } else {
+ fltr->flags |= ICE_TC_FLWR_FIELD_ETH_TYPE_ID;
+ }
+
+ headers->l2_key.n_proto = cpu_to_be16(n_proto_key);
+ headers->l2_mask.n_proto = cpu_to_be16(n_proto_mask);
+ headers->l3_key.ip_proto = match.key->ip_proto;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_match_eth_addrs match;
+
+ flow_rule_match_eth_addrs(rule, &match);
+
+ if (!is_zero_ether_addr(match.key->dst)) {
+ ether_addr_copy(headers->l2_key.dst_mac,
+ match.key->dst);
+ ether_addr_copy(headers->l2_mask.dst_mac,
+ match.mask->dst);
+ fltr->flags |= ICE_TC_FLWR_FIELD_DST_MAC;
+ }
+
+ if (!is_zero_ether_addr(match.key->src)) {
+ ether_addr_copy(headers->l2_key.src_mac,
+ match.key->src);
+ ether_addr_copy(headers->l2_mask.src_mac,
+ match.mask->src);
+ fltr->flags |= ICE_TC_FLWR_FIELD_SRC_MAC;
+ }
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN) ||
+ is_vlan_dev(filter_dev)) {
+ struct flow_dissector_key_vlan mask;
+ struct flow_dissector_key_vlan key;
+ struct flow_match_vlan match;
+
+ if (is_vlan_dev(filter_dev)) {
+ match.key = &key;
+ match.key->vlan_id = vlan_dev_vlan_id(filter_dev);
+ match.key->vlan_priority = 0;
+ match.mask = &mask;
+ memset(match.mask, 0xff, sizeof(*match.mask));
+ match.mask->vlan_priority = 0;
+ } else {
+ flow_rule_match_vlan(rule, &match);
+ }
+
+ if (match.mask->vlan_id) {
+ if (match.mask->vlan_id == VLAN_VID_MASK) {
+ fltr->flags |= ICE_TC_FLWR_FIELD_VLAN;
+ } else {
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Bad VLAN mask");
+ return -EINVAL;
+ }
+ }
+
+ headers->vlan_hdr.vlan_id =
+ cpu_to_be16(match.key->vlan_id & VLAN_VID_MASK);
+ if (match.mask->vlan_priority)
+ headers->vlan_hdr.vlan_prio = match.key->vlan_priority;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_match_control match;
+
+ flow_rule_match_control(rule, &match);
+
+ addr_type = match.key->addr_type;
+ }
+
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+ struct flow_match_ipv4_addrs match;
+
+ flow_rule_match_ipv4_addrs(rule, &match);
+ if (ice_tc_set_ipv4(&match, fltr, headers, false))
+ return -EINVAL;
+ }
+
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
+ struct flow_match_ipv6_addrs match;
+
+ flow_rule_match_ipv6_addrs(rule, &match);
+ if (ice_tc_set_ipv6(&match, fltr, headers, false))
+ return -EINVAL;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
+ struct flow_match_ports match;
+
+ flow_rule_match_ports(rule, &match);
+ if (ice_tc_set_port(match, fltr, headers, false))
+ return -EINVAL;
+ switch (headers->l3_key.ip_proto) {
+ case IPPROTO_TCP:
+ case IPPROTO_UDP:
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Only UDP and TCP transport are supported");
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+/**
+ * ice_add_switch_fltr - Add TC flower filters
+ * @vsi: Pointer to VSI
+ * @fltr: Pointer to struct ice_tc_flower_fltr
+ *
+ * Add filter in HW switch block
+ */
+static int
+ice_add_switch_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
+{
+ if (fltr->action.fltr_act == ICE_FWD_TO_QGRP)
+ return -EOPNOTSUPP;
+
+ if (ice_is_eswitch_mode_switchdev(vsi->back))
+ return ice_eswitch_add_tc_fltr(vsi, fltr);
+
+ return ice_add_tc_flower_adv_fltr(vsi, fltr);
+}
+
+/**
+ * ice_handle_tclass_action - Support directing to a traffic class
+ * @vsi: Pointer to VSI
+ * @cls_flower: Pointer to TC flower offload structure
+ * @fltr: Pointer to TC flower filter structure
+ *
+ * Support directing traffic to a traffic class
+ */
+static int
+ice_handle_tclass_action(struct ice_vsi *vsi,
+ struct flow_cls_offload *cls_flower,
+ struct ice_tc_flower_fltr *fltr)
+{
+ int tc = tc_classid_to_hwtc(vsi->netdev, cls_flower->classid);
+ struct ice_vsi *main_vsi;
+
+ if (tc < 0) {
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because specified destination is invalid");
+ return -EINVAL;
+ }
+ if (!tc) {
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because of invalid destination");
+ return -EINVAL;
+ }
+
+ if (!(vsi->all_enatc & BIT(tc))) {
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Unable to add filter because of non-existence destination");
+ return -EINVAL;
+ }
+
+ /* Redirect to a TC class or Queue Group */
+ main_vsi = ice_get_main_vsi(vsi->back);
+ if (!main_vsi || !main_vsi->netdev) {
+ NL_SET_ERR_MSG_MOD(fltr->extack,
+ "Unable to add filter because of invalid netdevice");
+ return -EINVAL;
+ }
+
+ if ((fltr->flags & ICE_TC_FLWR_FIELD_TENANT_ID) &&
+ (fltr->flags & (ICE_TC_FLWR_FIELD_DST_MAC |
+ ICE_TC_FLWR_FIELD_SRC_MAC))) {
+ NL_SET_ERR_MSG_MOD(fltr->extack,
+ "Unable to add filter because filter using tunnel key and inner MAC is unsupported combination");
+ return -EOPNOTSUPP;
+ }
+
+ /* For ADQ, filter must include dest MAC address, otherwise unwanted
+ * packets with unrelated MAC address get delivered to ADQ VSIs as long
+ * as remaining filter criteria is satisfied such as dest IP address
+ * and dest/src L4 port. Following code is trying to handle:
+ * 1. For non-tunnel, if user specify MAC addresses, use them (means
+ * this code won't do anything
+ * 2. For non-tunnel, if user didn't specify MAC address, add implicit
+ * dest MAC to be lower netdev's active unicast MAC address
+ */
+ if (!(fltr->flags & ICE_TC_FLWR_FIELD_DST_MAC)) {
+ ether_addr_copy(fltr->outer_headers.l2_key.dst_mac,
+ main_vsi->netdev->dev_addr);
+ eth_broadcast_addr(fltr->outer_headers.l2_mask.dst_mac);
+ fltr->flags |= ICE_TC_FLWR_FIELD_DST_MAC;
+ }
+
+ /* validate specified dest MAC address, make sure either it belongs to
+ * lower netdev or any of MACVLAN. MACVLANs MAC address are added as
+ * unicast MAC filter destined to main VSI.
+ */
+ if (!ice_mac_fltr_exist(&main_vsi->back->hw,
+ fltr->outer_headers.l2_key.dst_mac,
+ main_vsi->idx)) {
+ NL_SET_ERR_MSG_MOD(fltr->extack,
+ "Unable to add filter because legacy MAC filter for specified destination doesn't exist");
+ return -EINVAL;
+ }
+
+ /* Make sure VLAN is already added to main VSI, before allowing ADQ to
+ * add a VLAN based filter such as MAC + VLAN + L4 port.
+ */
+ if (fltr->flags & ICE_TC_FLWR_FIELD_VLAN) {
+ u16 vlan_id = be16_to_cpu(fltr->outer_headers.vlan_hdr.vlan_id);
+
+ if (!ice_vlan_fltr_exist(&main_vsi->back->hw, vlan_id,
+ main_vsi->idx)) {
+ NL_SET_ERR_MSG_MOD(fltr->extack,
+ "Unable to add filter because legacy VLAN filter for specified destination doesn't exist");
+ return -EINVAL;
+ }
+ }
+ fltr->action.fltr_act = ICE_FWD_TO_VSI;
+ fltr->action.tc_class = tc;
+
+ return 0;
+}
+
+/**
+ * ice_parse_tc_flower_actions - Parse the actions for a TC filter
+ * @vsi: Pointer to VSI
+ * @cls_flower: Pointer to TC flower offload structure
+ * @fltr: Pointer to TC flower filter structure
+ *
+ * Parse the actions for a TC filter
+ */
+static int
+ice_parse_tc_flower_actions(struct ice_vsi *vsi,
+ struct flow_cls_offload *cls_flower,
+ struct ice_tc_flower_fltr *fltr)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls_flower);
+ struct flow_action *flow_action = &rule->action;
+ struct flow_action_entry *act;
+ int i;
+
+ if (cls_flower->classid)
+ return ice_handle_tclass_action(vsi, cls_flower, fltr);
+
+ if (!flow_action_has_entries(flow_action))
+ return -EINVAL;
+
+ flow_action_for_each(i, act, flow_action) {
+ if (ice_is_eswitch_mode_switchdev(vsi->back)) {
+ int err = ice_eswitch_tc_parse_action(fltr, act);
+
+ if (err)
+ return err;
+ continue;
+ }
+ /* Allow only one rule per filter */
+
+ /* Drop action */
+ if (act->id == FLOW_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported action DROP");
+ return -EINVAL;
+ }
+ fltr->action.fltr_act = ICE_FWD_TO_VSI;
+ }
+ return 0;
+}
+
+/**
+ * ice_del_tc_fltr - deletes a filter from HW table
+ * @vsi: Pointer to VSI
+ * @fltr: Pointer to struct ice_tc_flower_fltr
+ *
+ * This function deletes a filter from HW table and manages book-keeping
+ */
+static int ice_del_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
+{
+ struct ice_rule_query_data rule_rem;
+ struct ice_pf *pf = vsi->back;
+ int err;
+
+ rule_rem.rid = fltr->rid;
+ rule_rem.rule_id = fltr->rule_id;
+ rule_rem.vsi_handle = fltr->dest_id;
+ err = ice_rem_adv_rule_by_id(&pf->hw, &rule_rem);
+ if (err) {
+ if (err == ICE_ERR_DOES_NOT_EXIST) {
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Filter does not exist");
+ return -ENOENT;
+ }
+ NL_SET_ERR_MSG_MOD(fltr->extack, "Failed to delete TC flower filter");
+ return -EIO;
+ }
+
+ /* update advanced switch filter count for destination
+ * VSI if filter destination was VSI
+ */
+ if (fltr->dest_vsi) {
+ if (fltr->dest_vsi->type == ICE_VSI_CHNL) {
+ fltr->dest_vsi->num_chnl_fltr--;
+
+ /* keeps track of channel filters for PF VSI */
+ if (vsi->type == ICE_VSI_PF &&
+ (fltr->flags & (ICE_TC_FLWR_FIELD_DST_MAC |
+ ICE_TC_FLWR_FIELD_ENC_DST_MAC)))
+ pf->num_dmac_chnl_fltrs--;
+ }
+ }
+ return 0;
+}
+
+/**
+ * ice_add_tc_fltr - adds a TC flower filter
+ * @netdev: Pointer to netdev
+ * @vsi: Pointer to VSI
+ * @f: Pointer to flower offload structure
+ * @__fltr: Pointer to struct ice_tc_flower_fltr
+ *
+ * This function parses TC-flower input fields, parses action,
+ * and adds a filter.
+ */
+static int
+ice_add_tc_fltr(struct net_device *netdev, struct ice_vsi *vsi,
+ struct flow_cls_offload *f,
+ struct ice_tc_flower_fltr **__fltr)
+{
+ struct ice_tc_flower_fltr *fltr;
+ int err;
+
+ /* by default, set output to be INVALID */
+ *__fltr = NULL;
+
+ fltr = kzalloc(sizeof(*fltr), GFP_KERNEL);
+ if (!fltr)
+ return -ENOMEM;
+
+ fltr->cookie = f->cookie;
+ fltr->extack = f->common.extack;
+ fltr->src_vsi = vsi;
+ INIT_HLIST_NODE(&fltr->tc_flower_node);
+
+ err = ice_parse_cls_flower(netdev, vsi, f, fltr);
+ if (err < 0)
+ goto err;
+
+ err = ice_parse_tc_flower_actions(vsi, f, fltr);
+ if (err < 0)
+ goto err;
+
+ err = ice_add_switch_fltr(vsi, fltr);
+ if (err < 0)
+ goto err;
+
+ /* return the newly created filter */
+ *__fltr = fltr;
+
+ return 0;
+err:
+ kfree(fltr);
+ return err;
+}
+
+/**
+ * ice_find_tc_flower_fltr - Find the TC flower filter in the list
+ * @pf: Pointer to PF
+ * @cookie: filter specific cookie
+ */
+static struct ice_tc_flower_fltr *
+ice_find_tc_flower_fltr(struct ice_pf *pf, unsigned long cookie)
+{
+ struct ice_tc_flower_fltr *fltr;
+
+ hlist_for_each_entry(fltr, &pf->tc_flower_fltr_list, tc_flower_node)
+ if (cookie == fltr->cookie)
+ return fltr;
+
+ return NULL;
+}
+
+/**
+ * ice_add_cls_flower - add TC flower filters
+ * @netdev: Pointer to filter device
+ * @vsi: Pointer to VSI
+ * @cls_flower: Pointer to flower offload structure
+ */
+int
+ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi,
+ struct flow_cls_offload *cls_flower)
+{
+ struct netlink_ext_ack *extack = cls_flower->common.extack;
+ struct net_device *vsi_netdev = vsi->netdev;
+ struct ice_tc_flower_fltr *fltr;
+ struct ice_pf *pf = vsi->back;
+ int err;
+
+ if (ice_is_reset_in_progress(pf->state))
+ return -EBUSY;
+ if (test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
+ return -EINVAL;
+
+ if (ice_is_port_repr_netdev(netdev))
+ vsi_netdev = netdev;
+
+ if (!(vsi_netdev->features & NETIF_F_HW_TC) &&
+ !test_bit(ICE_FLAG_CLS_FLOWER, pf->flags)) {
+ /* Based on TC indirect notifications from kernel, all ice
+ * devices get an instance of rule from higher level device.
+ * Avoid triggering explicit error in this case.
+ */
+ if (netdev == vsi_netdev)
+ NL_SET_ERR_MSG_MOD(extack, "can't apply TC flower filters, turn ON hw-tc-offload and try again");
+ return -EINVAL;
+ }
+
+ /* avoid duplicate entries, if exists - return error */
+ fltr = ice_find_tc_flower_fltr(pf, cls_flower->cookie);
+ if (fltr) {
+ NL_SET_ERR_MSG_MOD(extack, "filter cookie already exists, ignoring");
+ return -EEXIST;
+ }
+
+ /* prep and add TC-flower filter in HW */
+ err = ice_add_tc_fltr(netdev, vsi, cls_flower, &fltr);
+ if (err)
+ return err;
+
+ /* add filter into an ordered list */
+ hlist_add_head(&fltr->tc_flower_node, &pf->tc_flower_fltr_list);
+ return 0;
+}
+
+/**
+ * ice_del_cls_flower - delete TC flower filters
+ * @vsi: Pointer to VSI
+ * @cls_flower: Pointer to struct flow_cls_offload
+ */
+int
+ice_del_cls_flower(struct ice_vsi *vsi, struct flow_cls_offload *cls_flower)
+{
+ struct ice_tc_flower_fltr *fltr;
+ struct ice_pf *pf = vsi->back;
+ int err;
+
+ /* find filter */
+ fltr = ice_find_tc_flower_fltr(pf, cls_flower->cookie);
+ if (!fltr) {
+ if (!test_bit(ICE_FLAG_TC_MQPRIO, pf->flags) &&
+ hlist_empty(&pf->tc_flower_fltr_list))
+ return 0;
+
+ NL_SET_ERR_MSG_MOD(cls_flower->common.extack, "failed to delete TC flower filter because unable to find it");
+ return -EINVAL;
+ }
+
+ fltr->extack = cls_flower->common.extack;
+ /* delete filter from HW */
+ err = ice_del_tc_fltr(vsi, fltr);
+ if (err)
+ return err;
+
+ /* delete filter from an ordered list */
+ hlist_del(&fltr->tc_flower_node);
+
+ /* free the filter node */
+ kfree(fltr);
+
+ return 0;
+}
+
+/**
+ * ice_replay_tc_fltrs - replay TC filters
+ * @pf: pointer to PF struct
+ */
+void ice_replay_tc_fltrs(struct ice_pf *pf)
+{
+ struct ice_tc_flower_fltr *fltr;
+ struct hlist_node *node;
+
+ hlist_for_each_entry_safe(fltr, node,
+ &pf->tc_flower_fltr_list,
+ tc_flower_node) {
+ fltr->extack = NULL;
+ ice_add_switch_fltr(fltr->src_vsi, fltr);
+ }
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.h b/drivers/net/ethernet/intel/ice/ice_tc_lib.h
new file mode 100644
index 000000000000..319049477959
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.h
@@ -0,0 +1,162 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2019-2021, Intel Corporation. */
+
+#ifndef _ICE_TC_LIB_H_
+#define _ICE_TC_LIB_H_
+
+#define ICE_TC_FLWR_FIELD_DST_MAC BIT(0)
+#define ICE_TC_FLWR_FIELD_SRC_MAC BIT(1)
+#define ICE_TC_FLWR_FIELD_VLAN BIT(2)
+#define ICE_TC_FLWR_FIELD_DEST_IPV4 BIT(3)
+#define ICE_TC_FLWR_FIELD_SRC_IPV4 BIT(4)
+#define ICE_TC_FLWR_FIELD_DEST_IPV6 BIT(5)
+#define ICE_TC_FLWR_FIELD_SRC_IPV6 BIT(6)
+#define ICE_TC_FLWR_FIELD_DEST_L4_PORT BIT(7)
+#define ICE_TC_FLWR_FIELD_SRC_L4_PORT BIT(8)
+#define ICE_TC_FLWR_FIELD_TENANT_ID BIT(9)
+#define ICE_TC_FLWR_FIELD_ENC_DEST_IPV4 BIT(10)
+#define ICE_TC_FLWR_FIELD_ENC_SRC_IPV4 BIT(11)
+#define ICE_TC_FLWR_FIELD_ENC_DEST_IPV6 BIT(12)
+#define ICE_TC_FLWR_FIELD_ENC_SRC_IPV6 BIT(13)
+#define ICE_TC_FLWR_FIELD_ENC_DEST_L4_PORT BIT(14)
+#define ICE_TC_FLWR_FIELD_ENC_SRC_L4_PORT BIT(15)
+#define ICE_TC_FLWR_FIELD_ENC_DST_MAC BIT(16)
+#define ICE_TC_FLWR_FIELD_ETH_TYPE_ID BIT(17)
+
+#define ICE_TC_FLOWER_MASK_32 0xFFFFFFFF
+
+struct ice_indr_block_priv {
+ struct net_device *netdev;
+ struct ice_netdev_priv *np;
+ struct list_head list;
+};
+
+struct ice_tc_flower_action {
+ u32 tc_class;
+ enum ice_sw_fwd_act_type fltr_act;
+};
+
+struct ice_tc_vlan_hdr {
+ __be16 vlan_id; /* Only last 12 bits valid */
+ u16 vlan_prio; /* Only last 3 bits valid (valid values: 0..7) */
+};
+
+struct ice_tc_l2_hdr {
+ u8 dst_mac[ETH_ALEN];
+ u8 src_mac[ETH_ALEN];
+ __be16 n_proto; /* Ethernet Protocol */
+};
+
+struct ice_tc_l3_hdr {
+ u8 ip_proto; /* IPPROTO value */
+ union {
+ struct {
+ struct in_addr dst_ip;
+ struct in_addr src_ip;
+ } v4;
+ struct {
+ struct in6_addr dst_ip6;
+ struct in6_addr src_ip6;
+ } v6;
+ } ip;
+#define dst_ipv6 ip.v6.dst_ip6.s6_addr32
+#define dst_ipv6_addr ip.v6.dst_ip6.s6_addr
+#define src_ipv6 ip.v6.src_ip6.s6_addr32
+#define src_ipv6_addr ip.v6.src_ip6.s6_addr
+#define dst_ipv4 ip.v4.dst_ip.s_addr
+#define src_ipv4 ip.v4.src_ip.s_addr
+
+ u8 tos;
+ u8 ttl;
+};
+
+struct ice_tc_l4_hdr {
+ __be16 dst_port;
+ __be16 src_port;
+};
+
+struct ice_tc_flower_lyr_2_4_hdrs {
+ /* L2 layer fields with their mask */
+ struct ice_tc_l2_hdr l2_key;
+ struct ice_tc_l2_hdr l2_mask;
+ struct ice_tc_vlan_hdr vlan_hdr;
+ /* L3 (IPv4[6]) layer fields with their mask */
+ struct ice_tc_l3_hdr l3_key;
+ struct ice_tc_l3_hdr l3_mask;
+
+ /* L4 layer fields with their mask */
+ struct ice_tc_l4_hdr l4_key;
+ struct ice_tc_l4_hdr l4_mask;
+};
+
+enum ice_eswitch_fltr_direction {
+ ICE_ESWITCH_FLTR_INGRESS,
+ ICE_ESWITCH_FLTR_EGRESS,
+};
+
+struct ice_tc_flower_fltr {
+ struct hlist_node tc_flower_node;
+
+ /* cookie becomes filter_rule_id if rule is added successfully */
+ unsigned long cookie;
+
+ /* add_adv_rule returns information like recipe ID, rule_id. Store
+ * those values since they are needed to remove advanced rule
+ */
+ u16 rid;
+ u16 rule_id;
+ /* this could be queue/vsi_idx (sw handle)/queue_group, depending upon
+ * destination type
+ */
+ u16 dest_id;
+ /* if dest_id is vsi_idx, then need to store destination VSI ptr */
+ struct ice_vsi *dest_vsi;
+ /* direction of fltr for eswitch use case */
+ enum ice_eswitch_fltr_direction direction;
+
+ /* Parsed TC flower configuration params */
+ struct ice_tc_flower_lyr_2_4_hdrs outer_headers;
+ struct ice_tc_flower_lyr_2_4_hdrs inner_headers;
+ struct ice_vsi *src_vsi;
+ __be32 tenant_id;
+ u32 flags;
+ u8 tunnel_type;
+ struct ice_tc_flower_action action;
+
+ /* cache ptr which is used wherever needed to communicate netlink
+ * messages
+ */
+ struct netlink_ext_ack *extack;
+};
+
+/**
+ * ice_is_chnl_fltr - is this a valid channel filter
+ * @f: Pointer to tc-flower filter
+ *
+ * Criteria to determine of given filter is valid channel filter
+ * or not is based on its "destination". If destination is hw_tc (aka tc_class)
+ * and it is non-zero, then it is valid channel (aka ADQ) filter
+ */
+static inline bool ice_is_chnl_fltr(struct ice_tc_flower_fltr *f)
+{
+ return !!f->action.tc_class;
+}
+
+/**
+ * ice_chnl_dmac_fltr_cnt - DMAC based CHNL filter count
+ * @pf: Pointer to PF
+ */
+static inline int ice_chnl_dmac_fltr_cnt(struct ice_pf *pf)
+{
+ return pf->num_dmac_chnl_fltrs;
+}
+
+int
+ice_add_cls_flower(struct net_device *netdev, struct ice_vsi *vsi,
+ struct flow_cls_offload *cls_flower);
+int
+ice_del_cls_flower(struct ice_vsi *vsi, struct flow_cls_offload *cls_flower);
+void ice_replay_tc_fltrs(struct ice_pf *pf);
+bool ice_is_tunnel_supported(struct net_device *dev);
+
+#endif /* _ICE_TC_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_trace.h b/drivers/net/ethernet/intel/ice/ice_trace.h
index 9bc0b8fdfc77..cf685247c07a 100644
--- a/drivers/net/ethernet/intel/ice/ice_trace.h
+++ b/drivers/net/ethernet/intel/ice/ice_trace.h
@@ -64,15 +64,15 @@ DECLARE_EVENT_CLASS(ice_rx_dim_template,
TP_ARGS(q_vector, dim),
TP_STRUCT__entry(__field(struct ice_q_vector *, q_vector)
__field(struct dim *, dim)
- __string(devname, q_vector->rx.ring->netdev->name)),
+ __string(devname, q_vector->rx.rx_ring->netdev->name)),
TP_fast_assign(__entry->q_vector = q_vector;
__entry->dim = dim;
- __assign_str(devname, q_vector->rx.ring->netdev->name);),
+ __assign_str(devname, q_vector->rx.rx_ring->netdev->name);),
TP_printk("netdev: %s Rx-Q: %d dim-state: %d dim-profile: %d dim-tune: %d dim-st-right: %d dim-st-left: %d dim-tired: %d",
__get_str(devname),
- __entry->q_vector->rx.ring->q_index,
+ __entry->q_vector->rx.rx_ring->q_index,
__entry->dim->state,
__entry->dim->profile_ix,
__entry->dim->tune_state,
@@ -91,15 +91,15 @@ DECLARE_EVENT_CLASS(ice_tx_dim_template,
TP_ARGS(q_vector, dim),
TP_STRUCT__entry(__field(struct ice_q_vector *, q_vector)
__field(struct dim *, dim)
- __string(devname, q_vector->tx.ring->netdev->name)),
+ __string(devname, q_vector->tx.tx_ring->netdev->name)),
TP_fast_assign(__entry->q_vector = q_vector;
__entry->dim = dim;
- __assign_str(devname, q_vector->tx.ring->netdev->name);),
+ __assign_str(devname, q_vector->tx.tx_ring->netdev->name);),
TP_printk("netdev: %s Tx-Q: %d dim-state: %d dim-profile: %d dim-tune: %d dim-st-right: %d dim-st-left: %d dim-tired: %d",
__get_str(devname),
- __entry->q_vector->tx.ring->q_index,
+ __entry->q_vector->tx.tx_ring->q_index,
__entry->dim->state,
__entry->dim->profile_ix,
__entry->dim->tune_state,
@@ -115,7 +115,7 @@ DEFINE_EVENT(ice_tx_dim_template, ice_tx_dim_work,
/* Events related to a vsi & ring */
DECLARE_EVENT_CLASS(ice_tx_template,
- TP_PROTO(struct ice_ring *ring, struct ice_tx_desc *desc,
+ TP_PROTO(struct ice_tx_ring *ring, struct ice_tx_desc *desc,
struct ice_tx_buf *buf),
TP_ARGS(ring, desc, buf),
@@ -135,7 +135,7 @@ DECLARE_EVENT_CLASS(ice_tx_template,
#define DEFINE_TX_TEMPLATE_OP_EVENT(name) \
DEFINE_EVENT(ice_tx_template, name, \
- TP_PROTO(struct ice_ring *ring, \
+ TP_PROTO(struct ice_tx_ring *ring, \
struct ice_tx_desc *desc, \
struct ice_tx_buf *buf), \
TP_ARGS(ring, desc, buf))
@@ -145,7 +145,7 @@ DEFINE_TX_TEMPLATE_OP_EVENT(ice_clean_tx_irq_unmap);
DEFINE_TX_TEMPLATE_OP_EVENT(ice_clean_tx_irq_unmap_eop);
DECLARE_EVENT_CLASS(ice_rx_template,
- TP_PROTO(struct ice_ring *ring, union ice_32b_rx_flex_desc *desc),
+ TP_PROTO(struct ice_rx_ring *ring, union ice_32b_rx_flex_desc *desc),
TP_ARGS(ring, desc),
@@ -161,12 +161,12 @@ DECLARE_EVENT_CLASS(ice_rx_template,
__entry->ring, __entry->desc)
);
DEFINE_EVENT(ice_rx_template, ice_clean_rx_irq,
- TP_PROTO(struct ice_ring *ring, union ice_32b_rx_flex_desc *desc),
+ TP_PROTO(struct ice_rx_ring *ring, union ice_32b_rx_flex_desc *desc),
TP_ARGS(ring, desc)
);
DECLARE_EVENT_CLASS(ice_rx_indicate_template,
- TP_PROTO(struct ice_ring *ring, union ice_32b_rx_flex_desc *desc,
+ TP_PROTO(struct ice_rx_ring *ring, union ice_32b_rx_flex_desc *desc,
struct sk_buff *skb),
TP_ARGS(ring, desc, skb),
@@ -186,13 +186,13 @@ DECLARE_EVENT_CLASS(ice_rx_indicate_template,
);
DEFINE_EVENT(ice_rx_indicate_template, ice_clean_rx_irq_indicate,
- TP_PROTO(struct ice_ring *ring, union ice_32b_rx_flex_desc *desc,
+ TP_PROTO(struct ice_rx_ring *ring, union ice_32b_rx_flex_desc *desc,
struct sk_buff *skb),
TP_ARGS(ring, desc, skb)
);
DECLARE_EVENT_CLASS(ice_xmit_template,
- TP_PROTO(struct ice_ring *ring, struct sk_buff *skb),
+ TP_PROTO(struct ice_tx_ring *ring, struct sk_buff *skb),
TP_ARGS(ring, skb),
@@ -210,7 +210,7 @@ DECLARE_EVENT_CLASS(ice_xmit_template,
#define DEFINE_XMIT_TEMPLATE_OP_EVENT(name) \
DEFINE_EVENT(ice_xmit_template, name, \
- TP_PROTO(struct ice_ring *ring, struct sk_buff *skb), \
+ TP_PROTO(struct ice_tx_ring *ring, struct sk_buff *skb), \
TP_ARGS(ring, skb))
DEFINE_XMIT_TEMPLATE_OP_EVENT(ice_xmit_frame_ring);
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index 6ee8e0032d52..bc3ba19dc88f 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -6,6 +6,7 @@
#include <linux/prefetch.h>
#include <linux/mm.h>
#include <linux/bpf_trace.h>
+#include <net/dsfield.h>
#include <net/xdp.h>
#include "ice_txrx_lib.h"
#include "ice_lib.h"
@@ -13,6 +14,7 @@
#include "ice_trace.h"
#include "ice_dcb_lib.h"
#include "ice_xsk.h"
+#include "ice_eswitch.h"
#define ICE_RX_HDR_SIZE 256
@@ -32,7 +34,7 @@ ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,
struct ice_tx_buf *tx_buf, *first;
struct ice_fltr_desc *f_desc;
struct ice_tx_desc *tx_desc;
- struct ice_ring *tx_ring;
+ struct ice_tx_ring *tx_ring;
struct device *dev;
dma_addr_t dma;
u32 td_cmd;
@@ -106,7 +108,7 @@ ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,
* @tx_buf: the buffer to free
*/
static void
-ice_unmap_and_free_tx_buf(struct ice_ring *ring, struct ice_tx_buf *tx_buf)
+ice_unmap_and_free_tx_buf(struct ice_tx_ring *ring, struct ice_tx_buf *tx_buf)
{
if (tx_buf->skb) {
if (tx_buf->tx_flags & ICE_TX_FLAGS_DUMMY_PKT)
@@ -133,7 +135,7 @@ ice_unmap_and_free_tx_buf(struct ice_ring *ring, struct ice_tx_buf *tx_buf)
/* tx_buf must be completely set up in the transmit path */
}
-static struct netdev_queue *txring_txq(const struct ice_ring *ring)
+static struct netdev_queue *txring_txq(const struct ice_tx_ring *ring)
{
return netdev_get_tx_queue(ring->netdev, ring->q_index);
}
@@ -142,8 +144,9 @@ static struct netdev_queue *txring_txq(const struct ice_ring *ring)
* ice_clean_tx_ring - Free any empty Tx buffers
* @tx_ring: ring to be cleaned
*/
-void ice_clean_tx_ring(struct ice_ring *tx_ring)
+void ice_clean_tx_ring(struct ice_tx_ring *tx_ring)
{
+ u32 size;
u16 i;
if (ice_ring_is_xdp(tx_ring) && tx_ring->xsk_pool) {
@@ -162,8 +165,10 @@ void ice_clean_tx_ring(struct ice_ring *tx_ring)
tx_skip_free:
memset(tx_ring->tx_buf, 0, sizeof(*tx_ring->tx_buf) * tx_ring->count);
+ size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
+ PAGE_SIZE);
/* Zero out the descriptor ring */
- memset(tx_ring->desc, 0, tx_ring->size);
+ memset(tx_ring->desc, 0, size);
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
@@ -181,14 +186,18 @@ tx_skip_free:
*
* Free all transmit software resources
*/
-void ice_free_tx_ring(struct ice_ring *tx_ring)
+void ice_free_tx_ring(struct ice_tx_ring *tx_ring)
{
+ u32 size;
+
ice_clean_tx_ring(tx_ring);
devm_kfree(tx_ring->dev, tx_ring->tx_buf);
tx_ring->tx_buf = NULL;
if (tx_ring->desc) {
- dmam_free_coherent(tx_ring->dev, tx_ring->size,
+ size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
+ PAGE_SIZE);
+ dmam_free_coherent(tx_ring->dev, size,
tx_ring->desc, tx_ring->dma);
tx_ring->desc = NULL;
}
@@ -201,7 +210,7 @@ void ice_free_tx_ring(struct ice_ring *tx_ring)
*
* Returns true if there's any budget left (e.g. the clean is finished)
*/
-static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget)
+static bool ice_clean_tx_irq(struct ice_tx_ring *tx_ring, int napi_budget)
{
unsigned int total_bytes = 0, total_pkts = 0;
unsigned int budget = ICE_DFLT_IRQ_WORK;
@@ -238,11 +247,8 @@ static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget)
total_bytes += tx_buf->bytecount;
total_pkts += tx_buf->gso_segs;
- if (ice_ring_is_xdp(tx_ring))
- page_frag_free(tx_buf->raw_buf);
- else
- /* free the skb */
- napi_consume_skb(tx_buf->skb, napi_budget);
+ /* free the skb */
+ napi_consume_skb(tx_buf->skb, napi_budget);
/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
@@ -298,9 +304,6 @@ static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget)
ice_update_tx_ring_stats(tx_ring, total_pkts, total_bytes);
- if (ice_ring_is_xdp(tx_ring))
- return !!budget;
-
netdev_tx_completed_queue(txring_txq(tx_ring), total_pkts,
total_bytes);
@@ -329,9 +332,10 @@ static bool ice_clean_tx_irq(struct ice_ring *tx_ring, int napi_budget)
*
* Return 0 on success, negative on error
*/
-int ice_setup_tx_ring(struct ice_ring *tx_ring)
+int ice_setup_tx_ring(struct ice_tx_ring *tx_ring)
{
struct device *dev = tx_ring->dev;
+ u32 size;
if (!dev)
return -ENOMEM;
@@ -339,19 +343,19 @@ int ice_setup_tx_ring(struct ice_ring *tx_ring)
/* warn if we are about to overwrite the pointer */
WARN_ON(tx_ring->tx_buf);
tx_ring->tx_buf =
- devm_kzalloc(dev, sizeof(*tx_ring->tx_buf) * tx_ring->count,
+ devm_kcalloc(dev, sizeof(*tx_ring->tx_buf), tx_ring->count,
GFP_KERNEL);
if (!tx_ring->tx_buf)
return -ENOMEM;
/* round up to nearest page */
- tx_ring->size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
- PAGE_SIZE);
- tx_ring->desc = dmam_alloc_coherent(dev, tx_ring->size, &tx_ring->dma,
+ size = ALIGN(tx_ring->count * sizeof(struct ice_tx_desc),
+ PAGE_SIZE);
+ tx_ring->desc = dmam_alloc_coherent(dev, size, &tx_ring->dma,
GFP_KERNEL);
if (!tx_ring->desc) {
dev_err(dev, "Unable to allocate memory for the Tx descriptor ring, size=%d\n",
- tx_ring->size);
+ size);
goto err;
}
@@ -370,9 +374,10 @@ err:
* ice_clean_rx_ring - Free Rx buffers
* @rx_ring: ring to be cleaned
*/
-void ice_clean_rx_ring(struct ice_ring *rx_ring)
+void ice_clean_rx_ring(struct ice_rx_ring *rx_ring)
{
struct device *dev = rx_ring->dev;
+ u32 size;
u16 i;
/* ring already cleared, nothing to do */
@@ -417,7 +422,9 @@ rx_skip_free:
memset(rx_ring->rx_buf, 0, sizeof(*rx_ring->rx_buf) * rx_ring->count);
/* Zero out the descriptor ring */
- memset(rx_ring->desc, 0, rx_ring->size);
+ size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
+ PAGE_SIZE);
+ memset(rx_ring->desc, 0, size);
rx_ring->next_to_alloc = 0;
rx_ring->next_to_clean = 0;
@@ -430,8 +437,10 @@ rx_skip_free:
*
* Free all receive software resources
*/
-void ice_free_rx_ring(struct ice_ring *rx_ring)
+void ice_free_rx_ring(struct ice_rx_ring *rx_ring)
{
+ u32 size;
+
ice_clean_rx_ring(rx_ring);
if (rx_ring->vsi->type == ICE_VSI_PF)
if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
@@ -441,7 +450,9 @@ void ice_free_rx_ring(struct ice_ring *rx_ring)
rx_ring->rx_buf = NULL;
if (rx_ring->desc) {
- dmam_free_coherent(rx_ring->dev, rx_ring->size,
+ size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
+ PAGE_SIZE);
+ dmam_free_coherent(rx_ring->dev, size,
rx_ring->desc, rx_ring->dma);
rx_ring->desc = NULL;
}
@@ -453,9 +464,10 @@ void ice_free_rx_ring(struct ice_ring *rx_ring)
*
* Return 0 on success, negative on error
*/
-int ice_setup_rx_ring(struct ice_ring *rx_ring)
+int ice_setup_rx_ring(struct ice_rx_ring *rx_ring)
{
struct device *dev = rx_ring->dev;
+ u32 size;
if (!dev)
return -ENOMEM;
@@ -463,19 +475,19 @@ int ice_setup_rx_ring(struct ice_ring *rx_ring)
/* warn if we are about to overwrite the pointer */
WARN_ON(rx_ring->rx_buf);
rx_ring->rx_buf =
- devm_kzalloc(dev, sizeof(*rx_ring->rx_buf) * rx_ring->count,
+ devm_kcalloc(dev, sizeof(*rx_ring->rx_buf), rx_ring->count,
GFP_KERNEL);
if (!rx_ring->rx_buf)
return -ENOMEM;
/* round up to nearest page */
- rx_ring->size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
- PAGE_SIZE);
- rx_ring->desc = dmam_alloc_coherent(dev, rx_ring->size, &rx_ring->dma,
+ size = ALIGN(rx_ring->count * sizeof(union ice_32byte_rx_desc),
+ PAGE_SIZE);
+ rx_ring->desc = dmam_alloc_coherent(dev, size, &rx_ring->dma,
GFP_KERNEL);
if (!rx_ring->desc) {
dev_err(dev, "Unable to allocate memory for the Rx descriptor ring, size=%d\n",
- rx_ring->size);
+ size);
goto err;
}
@@ -499,7 +511,7 @@ err:
}
static unsigned int
-ice_rx_frame_truesize(struct ice_ring *rx_ring, unsigned int __maybe_unused size)
+ice_rx_frame_truesize(struct ice_rx_ring *rx_ring, unsigned int __maybe_unused size)
{
unsigned int truesize;
@@ -519,15 +531,15 @@ ice_rx_frame_truesize(struct ice_ring *rx_ring, unsigned int __maybe_unused size
* @rx_ring: Rx ring
* @xdp: xdp_buff used as input to the XDP program
* @xdp_prog: XDP program to run
+ * @xdp_ring: ring to be used for XDP_TX action
*
* Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
*/
static int
-ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp,
- struct bpf_prog *xdp_prog)
+ice_run_xdp(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
+ struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring)
{
- struct ice_ring *xdp_ring;
- int err, result;
+ int err;
u32 act;
act = bpf_prog_run_xdp(xdp_prog, xdp);
@@ -535,11 +547,14 @@ ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp,
case XDP_PASS:
return ICE_XDP_PASS;
case XDP_TX:
- xdp_ring = rx_ring->vsi->xdp_rings[smp_processor_id()];
- result = ice_xmit_xdp_buff(xdp, xdp_ring);
- if (result == ICE_XDP_CONSUMED)
+ if (static_branch_unlikely(&ice_xdp_locking_key))
+ spin_lock(&xdp_ring->tx_lock);
+ err = ice_xmit_xdp_ring(xdp->data, xdp->data_end - xdp->data, xdp_ring);
+ if (static_branch_unlikely(&ice_xdp_locking_key))
+ spin_unlock(&xdp_ring->tx_lock);
+ if (err == ICE_XDP_CONSUMED)
goto out_failure;
- return result;
+ return err;
case XDP_REDIRECT:
err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
if (err)
@@ -576,7 +591,7 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
struct ice_netdev_priv *np = netdev_priv(dev);
unsigned int queue_index = smp_processor_id();
struct ice_vsi *vsi = np->vsi;
- struct ice_ring *xdp_ring;
+ struct ice_tx_ring *xdp_ring;
int nxmit = 0, i;
if (test_bit(ICE_VSI_DOWN, vsi->state))
@@ -588,7 +603,14 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
return -EINVAL;
- xdp_ring = vsi->xdp_rings[queue_index];
+ if (static_branch_unlikely(&ice_xdp_locking_key)) {
+ queue_index %= vsi->num_xdp_txq;
+ xdp_ring = vsi->xdp_rings[queue_index];
+ spin_lock(&xdp_ring->tx_lock);
+ } else {
+ xdp_ring = vsi->xdp_rings[queue_index];
+ }
+
for (i = 0; i < n; i++) {
struct xdp_frame *xdpf = frames[i];
int err;
@@ -602,6 +624,9 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
if (unlikely(flags & XDP_XMIT_FLUSH))
ice_xdp_ring_update_tail(xdp_ring);
+ if (static_branch_unlikely(&ice_xdp_locking_key))
+ spin_unlock(&xdp_ring->tx_lock);
+
return nxmit;
}
@@ -614,7 +639,7 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
* reused.
*/
static bool
-ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi)
+ice_alloc_mapped_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *bi)
{
struct page *page = bi->page;
dma_addr_t dma;
@@ -665,7 +690,7 @@ ice_alloc_mapped_page(struct ice_ring *rx_ring, struct ice_rx_buf *bi)
* buffers. Then bump tail at most one time. Grouping like this lets us avoid
* multiple tail writes per call.
*/
-bool ice_alloc_rx_bufs(struct ice_ring *rx_ring, u16 cleaned_count)
+bool ice_alloc_rx_bufs(struct ice_rx_ring *rx_ring, u16 cleaned_count)
{
union ice_32b_rx_flex_desc *rx_desc;
u16 ntu = rx_ring->next_to_use;
@@ -794,7 +819,7 @@ ice_can_reuse_rx_page(struct ice_rx_buf *rx_buf, int rx_buf_pgcnt)
* The function will then update the page offset.
*/
static void
-ice_add_rx_frag(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
+ice_add_rx_frag(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
struct sk_buff *skb, unsigned int size)
{
#if (PAGE_SIZE >= 8192)
@@ -820,7 +845,7 @@ ice_add_rx_frag(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
* Synchronizes page for reuse by the adapter
*/
static void
-ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf)
+ice_reuse_rx_page(struct ice_rx_ring *rx_ring, struct ice_rx_buf *old_buf)
{
u16 nta = rx_ring->next_to_alloc;
struct ice_rx_buf *new_buf;
@@ -851,7 +876,7 @@ ice_reuse_rx_page(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf)
* for use by the CPU.
*/
static struct ice_rx_buf *
-ice_get_rx_buf(struct ice_ring *rx_ring, const unsigned int size,
+ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size,
int *rx_buf_pgcnt)
{
struct ice_rx_buf *rx_buf;
@@ -888,7 +913,7 @@ ice_get_rx_buf(struct ice_ring *rx_ring, const unsigned int size,
* to set up the skb correctly and avoid any memcpy overhead.
*/
static struct sk_buff *
-ice_build_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
+ice_build_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
struct xdp_buff *xdp)
{
u8 metasize = xdp->data - xdp->data_meta;
@@ -940,7 +965,7 @@ ice_build_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
* skb correctly.
*/
static struct sk_buff *
-ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
+ice_construct_skb(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
struct xdp_buff *xdp)
{
unsigned int size = xdp->data_end - xdp->data;
@@ -1000,7 +1025,7 @@ ice_construct_skb(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
* the associated resources.
*/
static void
-ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
+ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf,
int rx_buf_pgcnt)
{
u16 ntc = rx_ring->next_to_clean + 1;
@@ -1036,7 +1061,7 @@ ice_put_rx_buf(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
* otherwise return true indicating that this is in fact a non-EOP buffer.
*/
static bool
-ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc)
+ice_is_non_eop(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc)
{
/* if we are the last buffer then there is nothing else to do */
#define ICE_RXD_EOF BIT(ICE_RX_FLEX_DESC_STATUS0_EOF_S)
@@ -1060,11 +1085,12 @@ ice_is_non_eop(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc)
*
* Returns amount of work completed
*/
-int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
+int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
{
unsigned int total_rx_bytes = 0, total_rx_pkts = 0, frame_sz = 0;
u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
unsigned int offset = rx_ring->rx_offset;
+ struct ice_tx_ring *xdp_ring = NULL;
unsigned int xdp_res, xdp_xmit = 0;
struct sk_buff *skb = rx_ring->skb;
struct bpf_prog *xdp_prog = NULL;
@@ -1077,6 +1103,10 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
#endif
xdp_init_buff(&xdp, frame_sz, &rx_ring->xdp_rxq);
+ xdp_prog = READ_ONCE(rx_ring->xdp_prog);
+ if (xdp_prog)
+ xdp_ring = rx_ring->xdp_ring;
+
/* start the loop to process Rx packets bounded by 'budget' */
while (likely(total_rx_pkts < (unsigned int)budget)) {
union ice_32b_rx_flex_desc *rx_desc;
@@ -1140,11 +1170,10 @@ int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
xdp.frame_sz = ice_rx_frame_truesize(rx_ring, size);
#endif
- xdp_prog = READ_ONCE(rx_ring->xdp_prog);
if (!xdp_prog)
goto construct_skb;
- xdp_res = ice_run_xdp(rx_ring, &xdp, xdp_prog);
+ xdp_res = ice_run_xdp(rx_ring, &xdp, xdp_prog, xdp_ring);
if (!xdp_res)
goto construct_skb;
if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) {
@@ -1221,7 +1250,7 @@ construct_skb:
failure = ice_alloc_rx_bufs(rx_ring, cleaned_count);
if (xdp_prog)
- ice_finalize_xdp_rx(rx_ring, xdp_xmit);
+ ice_finalize_xdp_rx(xdp_ring, xdp_xmit);
rx_ring->skb = skb;
ice_update_rx_ring_stats(rx_ring, total_rx_pkts, total_rx_bytes);
@@ -1230,6 +1259,41 @@ construct_skb:
return failure ? budget : (int)total_rx_pkts;
}
+static void __ice_update_sample(struct ice_q_vector *q_vector,
+ struct ice_ring_container *rc,
+ struct dim_sample *sample,
+ bool is_tx)
+{
+ u64 packets = 0, bytes = 0;
+
+ if (is_tx) {
+ struct ice_tx_ring *tx_ring;
+
+ ice_for_each_tx_ring(tx_ring, *rc) {
+ packets += tx_ring->stats.pkts;
+ bytes += tx_ring->stats.bytes;
+ }
+ } else {
+ struct ice_rx_ring *rx_ring;
+
+ ice_for_each_rx_ring(rx_ring, *rc) {
+ packets += rx_ring->stats.pkts;
+ bytes += rx_ring->stats.bytes;
+ }
+ }
+
+ dim_update_sample(q_vector->total_events, packets, bytes, sample);
+ sample->comp_ctr = 0;
+
+ /* if dim settings get stale, like when not updated for 1
+ * second or longer, force it to start again. This addresses the
+ * frequent case of an idle queue being switched to by the
+ * scheduler. The 1,000 here means 1,000 milliseconds.
+ */
+ if (ktime_ms_delta(sample->time, rc->dim.start_sample.time) >= 1000)
+ rc->dim.state = DIM_START_MEASURE;
+}
+
/**
* ice_net_dim - Update net DIM algorithm
* @q_vector: the vector associated with the interrupt
@@ -1245,34 +1309,16 @@ static void ice_net_dim(struct ice_q_vector *q_vector)
struct ice_ring_container *rx = &q_vector->rx;
if (ITR_IS_DYNAMIC(tx)) {
- struct dim_sample dim_sample = {};
- u64 packets = 0, bytes = 0;
- struct ice_ring *ring;
-
- ice_for_each_ring(ring, q_vector->tx) {
- packets += ring->stats.pkts;
- bytes += ring->stats.bytes;
- }
-
- dim_update_sample(q_vector->total_events, packets, bytes,
- &dim_sample);
+ struct dim_sample dim_sample;
+ __ice_update_sample(q_vector, tx, &dim_sample, true);
net_dim(&tx->dim, dim_sample);
}
if (ITR_IS_DYNAMIC(rx)) {
- struct dim_sample dim_sample = {};
- u64 packets = 0, bytes = 0;
- struct ice_ring *ring;
-
- ice_for_each_ring(ring, q_vector->rx) {
- packets += ring->stats.pkts;
- bytes += ring->stats.bytes;
- }
-
- dim_update_sample(q_vector->total_events, packets, bytes,
- &dim_sample);
+ struct dim_sample dim_sample;
+ __ice_update_sample(q_vector, rx, &dim_sample, false);
net_dim(&rx->dim, dim_sample);
}
}
@@ -1299,15 +1345,14 @@ static u32 ice_buildreg_itr(u16 itr_idx, u16 itr)
}
/**
- * ice_update_ena_itr - Update ITR moderation and re-enable MSI-X interrupt
+ * ice_enable_interrupt - re-enable MSI-X interrupt
* @q_vector: the vector associated with the interrupt to enable
*
- * Update the net_dim() algorithm and re-enable the interrupt associated with
- * this vector.
- *
- * If the VSI is down, the interrupt will not be re-enabled.
+ * If the VSI is down, the interrupt will not be re-enabled. Also,
+ * when enabling the interrupt always reset the wb_on_itr to false
+ * and trigger a software interrupt to clean out internal state.
*/
-static void ice_update_ena_itr(struct ice_q_vector *q_vector)
+static void ice_enable_interrupt(struct ice_q_vector *q_vector)
{
struct ice_vsi *vsi = q_vector->vsi;
bool wb_en = q_vector->wb_on_itr;
@@ -1316,25 +1361,25 @@ static void ice_update_ena_itr(struct ice_q_vector *q_vector)
if (test_bit(ICE_DOWN, vsi->state))
return;
- /* When exiting WB_ON_ITR, let ITR resume its normal
- * interrupts-enabled path.
+ /* trigger an ITR delayed software interrupt when exiting busy poll, to
+ * make sure to catch any pending cleanups that might have been missed
+ * due to interrupt state transition. If busy poll or poll isn't
+ * enabled, then don't update ITR, and just enable the interrupt.
*/
- if (wb_en)
+ if (!wb_en) {
+ itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0);
+ } else {
q_vector->wb_on_itr = false;
- /* This will do nothing if dynamic updates are not enabled. */
- ice_net_dim(q_vector);
-
- /* net_dim() updates ITR out-of-band using a work item */
- itr_val = ice_buildreg_itr(ICE_ITR_NONE, 0);
- /* trigger an immediate software interrupt when exiting
- * busy poll, to make sure to catch any pending cleanups
- * that might have been missed due to interrupt state
- * transition.
- */
- if (wb_en) {
+ /* do two things here with a single write. Set up the third ITR
+ * index to be used for software interrupt moderation, and then
+ * trigger a software interrupt with a rate limit of 20K on
+ * software interrupts, this will help avoid high interrupt
+ * loads due to frequently polling and exiting polling.
+ */
+ itr_val = ice_buildreg_itr(ICE_IDX_ITR2, ICE_ITR_20K);
itr_val |= GLINT_DYN_CTL_SWINT_TRIG_M |
- GLINT_DYN_CTL_SW_ITR_INDX_M |
+ ICE_IDX_ITR2 << GLINT_DYN_CTL_SW_ITR_INDX_S |
GLINT_DYN_CTL_SW_ITR_INDX_ENA_M;
}
wr32(&vsi->back->hw, GLINT_DYN_CTL(q_vector->reg_idx), itr_val);
@@ -1387,18 +1432,24 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
{
struct ice_q_vector *q_vector =
container_of(napi, struct ice_q_vector, napi);
+ struct ice_tx_ring *tx_ring;
+ struct ice_rx_ring *rx_ring;
bool clean_complete = true;
- struct ice_ring *ring;
int budget_per_ring;
int work_done = 0;
/* Since the actual Tx work is minimal, we can give the Tx a larger
* budget and be more aggressive about cleaning up the Tx descriptors.
*/
- ice_for_each_ring(ring, q_vector->tx) {
- bool wd = ring->xsk_pool ?
- ice_clean_tx_irq_zc(ring, budget) :
- ice_clean_tx_irq(ring, budget);
+ ice_for_each_tx_ring(tx_ring, q_vector->tx) {
+ bool wd;
+
+ if (tx_ring->xsk_pool)
+ wd = ice_clean_tx_irq_zc(tx_ring, budget);
+ else if (ice_ring_is_xdp(tx_ring))
+ wd = true;
+ else
+ wd = ice_clean_tx_irq(tx_ring, budget);
if (!wd)
clean_complete = false;
@@ -1419,16 +1470,16 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
/* Max of 1 Rx ring in this q_vector so give it the budget */
budget_per_ring = budget;
- ice_for_each_ring(ring, q_vector->rx) {
+ ice_for_each_rx_ring(rx_ring, q_vector->rx) {
int cleaned;
/* A dedicated path for zero-copy allows making a single
* comparison in the irq context instead of many inside the
* ice_clean_rx_irq function and makes the codebase cleaner.
*/
- cleaned = ring->xsk_pool ?
- ice_clean_rx_irq_zc(ring, budget_per_ring) :
- ice_clean_rx_irq(ring, budget_per_ring);
+ cleaned = rx_ring->xsk_pool ?
+ ice_clean_rx_irq_zc(rx_ring, budget_per_ring) :
+ ice_clean_rx_irq(rx_ring, budget_per_ring);
work_done += cleaned;
/* if we clean as many as budgeted, we must not be done */
if (cleaned >= budget_per_ring)
@@ -1447,10 +1498,12 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
/* Exit the polling mode, but don't re-enable interrupts if stack might
* poll us due to busy-polling
*/
- if (likely(napi_complete_done(napi, work_done)))
- ice_update_ena_itr(q_vector);
- else
+ if (likely(napi_complete_done(napi, work_done))) {
+ ice_net_dim(q_vector);
+ ice_enable_interrupt(q_vector);
+ } else {
ice_set_wb_on_itr(q_vector);
+ }
return min_t(int, work_done, budget - 1);
}
@@ -1462,7 +1515,7 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
*
* Returns -EBUSY if a stop is needed, else 0
*/
-static int __ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
+static int __ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsigned int size)
{
netif_stop_subqueue(tx_ring->netdev, tx_ring->q_index);
/* Memory barrier before checking head and tail */
@@ -1485,7 +1538,7 @@ static int __ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
*
* Returns 0 if stop is not needed
*/
-static int ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
+static int ice_maybe_stop_tx(struct ice_tx_ring *tx_ring, unsigned int size)
{
if (likely(ICE_DESC_UNUSED(tx_ring) >= size))
return 0;
@@ -1504,7 +1557,7 @@ static int ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
* it and the length into the transmit descriptor.
*/
static void
-ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
+ice_tx_map(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first,
struct ice_tx_offload_params *off)
{
u64 td_offset, td_tag, td_cmd;
@@ -1840,7 +1893,7 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
* related to VLAN tagging for the HW, such as VLAN, DCB, etc.
*/
static void
-ice_tx_prepare_vlan_flags(struct ice_ring *tx_ring, struct ice_tx_buf *first)
+ice_tx_prepare_vlan_flags(struct ice_tx_ring *tx_ring, struct ice_tx_buf *first)
{
struct sk_buff *skb = first->skb;
@@ -2146,7 +2199,7 @@ static bool ice_chk_linearize(struct sk_buff *skb, unsigned int count)
* @off: Tx offload parameters
*/
static void
-ice_tstamp(struct ice_ring *tx_ring, struct sk_buff *skb,
+ice_tstamp(struct ice_tx_ring *tx_ring, struct sk_buff *skb,
struct ice_tx_buf *first, struct ice_tx_offload_params *off)
{
s8 idx;
@@ -2181,7 +2234,7 @@ ice_tstamp(struct ice_ring *tx_ring, struct sk_buff *skb,
* Returns NETDEV_TX_OK if sent, else an error code
*/
static netdev_tx_t
-ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
+ice_xmit_frame_ring(struct sk_buff *skb, struct ice_tx_ring *tx_ring)
{
struct ice_tx_offload_params offload = { 0 };
struct ice_vsi *vsi = tx_ring->vsi;
@@ -2245,6 +2298,8 @@ ice_xmit_frame_ring(struct sk_buff *skb, struct ice_ring *tx_ring)
ICE_TXD_CTX_QW1_CMD_S);
ice_tstamp(tx_ring, skb, first, &offload);
+ if (ice_is_switchdev_running(vsi->back))
+ ice_eswitch_set_target_vsi(skb, &offload);
if (offload.cd_qw1 & ICE_TX_DESC_DTYPE_CTX) {
struct ice_tx_ctx_desc *cdesc;
@@ -2282,7 +2337,7 @@ netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
- struct ice_ring *tx_ring;
+ struct ice_tx_ring *tx_ring;
tx_ring = vsi->tx_rings[skb->queue_mapping];
@@ -2296,10 +2351,43 @@ netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev)
}
/**
+ * ice_get_dscp_up - return the UP/TC value for a SKB
+ * @dcbcfg: DCB config that contains DSCP to UP/TC mapping
+ * @skb: SKB to query for info to determine UP/TC
+ *
+ * This function is to only be called when the PF is in L3 DSCP PFC mode
+ */
+static u8 ice_get_dscp_up(struct ice_dcbx_cfg *dcbcfg, struct sk_buff *skb)
+{
+ u8 dscp = 0;
+
+ if (skb->protocol == htons(ETH_P_IP))
+ dscp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ dscp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
+
+ return dcbcfg->dscp_map[dscp];
+}
+
+u16
+ice_select_queue(struct net_device *netdev, struct sk_buff *skb,
+ struct net_device *sb_dev)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_dcbx_cfg *dcbcfg;
+
+ dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
+ if (dcbcfg->pfc_mode == ICE_QOS_MODE_DSCP)
+ skb->priority = ice_get_dscp_up(dcbcfg, skb);
+
+ return netdev_pick_tx(netdev, skb, sb_dev);
+}
+
+/**
* ice_clean_ctrl_tx_irq - interrupt handler for flow director Tx queue
* @tx_ring: tx_ring to clean
*/
-void ice_clean_ctrl_tx_irq(struct ice_ring *tx_ring)
+void ice_clean_ctrl_tx_irq(struct ice_tx_ring *tx_ring)
{
struct ice_vsi *vsi = tx_ring->vsi;
s16 i = tx_ring->next_to_clean;
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index 1e46e80f3d6f..c56dd1749903 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -13,6 +13,7 @@
#define ICE_MAX_CHAINED_RX_BUFS 5
#define ICE_MAX_BUF_TXD 8
#define ICE_MIN_TX_LEN 17
+#define ICE_TX_THRESH 32
/* The size limit for a transmit buffer in a descriptor is (16K - 1).
* In order to align with the read requests we will align the value to
@@ -154,7 +155,7 @@ struct ice_tx_buf {
struct ice_tx_offload_params {
u64 cd_qw1;
- struct ice_ring *tx_ring;
+ struct ice_tx_ring *tx_ring;
u32 td_cmd;
u32 td_offset;
u32 td_l2tag1;
@@ -164,17 +165,10 @@ struct ice_tx_offload_params {
};
struct ice_rx_buf {
- union {
- struct {
- dma_addr_t dma;
- struct page *page;
- unsigned int page_offset;
- u16 pagecnt_bias;
- };
- struct {
- struct xdp_buff *xdp;
- };
- };
+ dma_addr_t dma;
+ struct page *page;
+ unsigned int page_offset;
+ u16 pagecnt_bias;
};
struct ice_q_stats {
@@ -258,9 +252,9 @@ enum ice_dynamic_itr {
#define ICE_TX_LEGACY 1
/* descriptor ring, associated with a VSI */
-struct ice_ring {
+struct ice_rx_ring {
/* CL1 - 1st cacheline starts here */
- struct ice_ring *next; /* pointer to next ring in q_vector */
+ struct ice_rx_ring *next; /* pointer to next ring in q_vector */
void *desc; /* Descriptor ring memory */
struct device *dev; /* Used for DMA mapping */
struct net_device *netdev; /* netdev ring maps to */
@@ -268,14 +262,13 @@ struct ice_ring {
struct ice_q_vector *q_vector; /* Backreference to associated vector */
u8 __iomem *tail;
union {
- struct ice_tx_buf *tx_buf;
struct ice_rx_buf *rx_buf;
+ struct xdp_buff **xdp_buf;
};
/* CL2 - 2nd cacheline starts here */
+ struct xdp_rxq_info xdp_rxq;
+ /* CL3 - 3rd cacheline starts here */
u16 q_index; /* Queue number of ring */
- u16 q_handle; /* Queue handle per TC */
-
- u8 ring_active:1; /* is ring online or not */
u16 count; /* Number of descriptors */
u16 reg_idx; /* HW register index of the ring */
@@ -284,63 +277,104 @@ struct ice_ring {
u16 next_to_use;
u16 next_to_clean;
u16 next_to_alloc;
+ u16 rx_offset;
+ u16 rx_buf_len;
/* stats structs */
+ struct ice_rxq_stats rx_stats;
struct ice_q_stats stats;
struct u64_stats_sync syncp;
- union {
- struct ice_txq_stats tx_stats;
- struct ice_rxq_stats rx_stats;
- };
struct rcu_head rcu; /* to avoid race on free */
- DECLARE_BITMAP(xps_state, ICE_TX_NBITS); /* XPS Config State */
+ /* CL4 - 3rd cacheline starts here */
+ struct ice_channel *ch;
struct bpf_prog *xdp_prog;
+ struct ice_tx_ring *xdp_ring;
struct xsk_buff_pool *xsk_pool;
- u16 rx_offset;
- /* CL3 - 3rd cacheline starts here */
- struct xdp_rxq_info xdp_rxq;
struct sk_buff *skb;
- /* CLX - the below items are only accessed infrequently and should be
- * in their own cache line if possible
- */
-#define ICE_TX_FLAGS_RING_XDP BIT(0)
+ dma_addr_t dma; /* physical address of ring */
#define ICE_RX_FLAGS_RING_BUILD_SKB BIT(1)
+ u64 cached_phctime;
+ u8 dcb_tc; /* Traffic class of ring */
+ u8 ptp_rx;
u8 flags;
+} ____cacheline_internodealigned_in_smp;
+
+struct ice_tx_ring {
+ /* CL1 - 1st cacheline starts here */
+ struct ice_tx_ring *next; /* pointer to next ring in q_vector */
+ void *desc; /* Descriptor ring memory */
+ struct device *dev; /* Used for DMA mapping */
+ u8 __iomem *tail;
+ struct ice_tx_buf *tx_buf;
+ struct ice_q_vector *q_vector; /* Backreference to associated vector */
+ struct net_device *netdev; /* netdev ring maps to */
+ struct ice_vsi *vsi; /* Backreference to associated VSI */
+ /* CL2 - 2nd cacheline starts here */
dma_addr_t dma; /* physical address of ring */
- unsigned int size; /* length of descriptor ring in bytes */
+ struct xsk_buff_pool *xsk_pool;
+ u16 next_to_use;
+ u16 next_to_clean;
+ u16 next_rs;
+ u16 next_dd;
+ u16 q_handle; /* Queue handle per TC */
+ u16 reg_idx; /* HW register index of the ring */
+ u16 count; /* Number of descriptors */
+ u16 q_index; /* Queue number of ring */
+ /* stats structs */
+ struct ice_q_stats stats;
+ struct u64_stats_sync syncp;
+ struct ice_txq_stats tx_stats;
+
+ /* CL3 - 3rd cacheline starts here */
+ struct rcu_head rcu; /* to avoid race on free */
+ DECLARE_BITMAP(xps_state, ICE_TX_NBITS); /* XPS Config State */
+ struct ice_channel *ch;
+ struct ice_ptp_tx *tx_tstamps;
+ spinlock_t tx_lock;
u32 txq_teid; /* Added Tx queue TEID */
- u16 rx_buf_len;
+#define ICE_TX_FLAGS_RING_XDP BIT(0)
+ u8 flags;
u8 dcb_tc; /* Traffic class of ring */
- struct ice_ptp_tx *tx_tstamps;
- u64 cached_phctime;
- u8 ptp_rx:1;
- u8 ptp_tx:1;
+ u8 ptp_tx;
} ____cacheline_internodealigned_in_smp;
-static inline bool ice_ring_uses_build_skb(struct ice_ring *ring)
+static inline bool ice_ring_uses_build_skb(struct ice_rx_ring *ring)
{
return !!(ring->flags & ICE_RX_FLAGS_RING_BUILD_SKB);
}
-static inline void ice_set_ring_build_skb_ena(struct ice_ring *ring)
+static inline void ice_set_ring_build_skb_ena(struct ice_rx_ring *ring)
{
ring->flags |= ICE_RX_FLAGS_RING_BUILD_SKB;
}
-static inline void ice_clear_ring_build_skb_ena(struct ice_ring *ring)
+static inline void ice_clear_ring_build_skb_ena(struct ice_rx_ring *ring)
{
ring->flags &= ~ICE_RX_FLAGS_RING_BUILD_SKB;
}
-static inline bool ice_ring_is_xdp(struct ice_ring *ring)
+static inline bool ice_ring_ch_enabled(struct ice_tx_ring *ring)
+{
+ return !!ring->ch;
+}
+
+static inline bool ice_ring_is_xdp(struct ice_tx_ring *ring)
{
return !!(ring->flags & ICE_TX_FLAGS_RING_XDP);
}
+enum ice_container_type {
+ ICE_RX_CONTAINER,
+ ICE_TX_CONTAINER,
+};
+
struct ice_ring_container {
/* head of linked-list of rings */
- struct ice_ring *ring;
+ union {
+ struct ice_rx_ring *rx_ring;
+ struct ice_tx_ring *tx_ring;
+ };
struct dim dim; /* data for net_dim algorithm */
u16 itr_idx; /* index in the interrupt vector */
/* this matches the maximum number of ITR bits, but in usec
@@ -349,6 +383,7 @@ struct ice_ring_container {
u16 itr_setting:13;
u16 itr_reserved:2;
u16 itr_mode:1;
+ enum ice_container_type type;
};
struct ice_coalesce_stored {
@@ -360,10 +395,13 @@ struct ice_coalesce_stored {
};
/* iterator for handling rings in ring container */
-#define ice_for_each_ring(pos, head) \
- for (pos = (head).ring; pos; pos = pos->next)
+#define ice_for_each_rx_ring(pos, head) \
+ for (pos = (head).rx_ring; pos; pos = pos->next)
+
+#define ice_for_each_tx_ring(pos, head) \
+ for (pos = (head).tx_ring; pos; pos = pos->next)
-static inline unsigned int ice_rx_pg_order(struct ice_ring *ring)
+static inline unsigned int ice_rx_pg_order(struct ice_rx_ring *ring)
{
#if (PAGE_SIZE < 8192)
if (ring->rx_buf_len > (PAGE_SIZE / 2))
@@ -376,18 +414,21 @@ static inline unsigned int ice_rx_pg_order(struct ice_ring *ring)
union ice_32b_rx_flex_desc;
-bool ice_alloc_rx_bufs(struct ice_ring *rxr, u16 cleaned_count);
+bool ice_alloc_rx_bufs(struct ice_rx_ring *rxr, u16 cleaned_count);
netdev_tx_t ice_start_xmit(struct sk_buff *skb, struct net_device *netdev);
-void ice_clean_tx_ring(struct ice_ring *tx_ring);
-void ice_clean_rx_ring(struct ice_ring *rx_ring);
-int ice_setup_tx_ring(struct ice_ring *tx_ring);
-int ice_setup_rx_ring(struct ice_ring *rx_ring);
-void ice_free_tx_ring(struct ice_ring *tx_ring);
-void ice_free_rx_ring(struct ice_ring *rx_ring);
+u16
+ice_select_queue(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev);
+void ice_clean_tx_ring(struct ice_tx_ring *tx_ring);
+void ice_clean_rx_ring(struct ice_rx_ring *rx_ring);
+int ice_setup_tx_ring(struct ice_tx_ring *tx_ring);
+int ice_setup_rx_ring(struct ice_rx_ring *rx_ring);
+void ice_free_tx_ring(struct ice_tx_ring *tx_ring);
+void ice_free_rx_ring(struct ice_rx_ring *rx_ring);
int ice_napi_poll(struct napi_struct *napi, int budget);
int
ice_prgm_fdir_fltr(struct ice_vsi *vsi, struct ice_fltr_desc *fdir_desc,
u8 *raw_packet);
-int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget);
-void ice_clean_ctrl_tx_irq(struct ice_ring *tx_ring);
+int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget);
+void ice_clean_ctrl_tx_irq(struct ice_tx_ring *tx_ring);
#endif /* _ICE_TXRX_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
index 171397dcf00a..1dd7e84f41f8 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
@@ -2,13 +2,15 @@
/* Copyright (c) 2019, Intel Corporation. */
#include "ice_txrx_lib.h"
+#include "ice_eswitch.h"
+#include "ice_lib.h"
/**
* ice_release_rx_desc - Store the new tail and head values
* @rx_ring: ring to bump
* @val: new head index
*/
-void ice_release_rx_desc(struct ice_ring *rx_ring, u16 val)
+void ice_release_rx_desc(struct ice_rx_ring *rx_ring, u16 val)
{
u16 prev_ntu = rx_ring->next_to_use & ~0x7;
@@ -66,7 +68,7 @@ static enum pkt_hash_types ice_ptype_to_htype(u16 ptype)
* @rx_ptype: the ptype value from the descriptor
*/
static void
-ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
+ice_rx_hash(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
struct sk_buff *skb, u16 rx_ptype)
{
struct ice_32b_rx_flex_desc_nic *nic_mdid;
@@ -93,7 +95,7 @@ ice_rx_hash(struct ice_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc,
* skb->protocol must be set before this function is called
*/
static void
-ice_rx_csum(struct ice_ring *ring, struct sk_buff *skb,
+ice_rx_csum(struct ice_rx_ring *ring, struct sk_buff *skb,
union ice_32b_rx_flex_desc *rx_desc, u16 ptype)
{
struct ice_rx_ptype_decoded decoded;
@@ -178,14 +180,15 @@ checksum_fail:
* other fields within the skb.
*/
void
-ice_process_skb_fields(struct ice_ring *rx_ring,
+ice_process_skb_fields(struct ice_rx_ring *rx_ring,
union ice_32b_rx_flex_desc *rx_desc,
struct sk_buff *skb, u16 ptype)
{
ice_rx_hash(rx_ring, rx_desc, skb, ptype);
/* modifies the skb - consumes the enet header */
- skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+ skb->protocol = eth_type_trans(skb, ice_eswitch_get_target_netdev
+ (rx_ring, rx_desc));
ice_rx_csum(rx_ring, skb, rx_desc, ptype);
@@ -203,7 +206,7 @@ ice_process_skb_fields(struct ice_ring *rx_ring,
* gro receive functions (with/without VLAN tag)
*/
void
-ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)
+ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)
{
if ((rx_ring->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
(vlan_tag & VLAN_VID_MASK))
@@ -212,18 +215,67 @@ ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag)
}
/**
+ * ice_clean_xdp_irq - Reclaim resources after transmit completes on XDP ring
+ * @xdp_ring: XDP ring to clean
+ */
+static void ice_clean_xdp_irq(struct ice_tx_ring *xdp_ring)
+{
+ unsigned int total_bytes = 0, total_pkts = 0;
+ u16 ntc = xdp_ring->next_to_clean;
+ struct ice_tx_desc *next_dd_desc;
+ u16 next_dd = xdp_ring->next_dd;
+ struct ice_tx_buf *tx_buf;
+ int i;
+
+ next_dd_desc = ICE_TX_DESC(xdp_ring, next_dd);
+ if (!(next_dd_desc->cmd_type_offset_bsz &
+ cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
+ return;
+
+ for (i = 0; i < ICE_TX_THRESH; i++) {
+ tx_buf = &xdp_ring->tx_buf[ntc];
+
+ total_bytes += tx_buf->bytecount;
+ /* normally tx_buf->gso_segs was taken but at this point
+ * it's always 1 for us
+ */
+ total_pkts++;
+
+ page_frag_free(tx_buf->raw_buf);
+ dma_unmap_single(xdp_ring->dev, dma_unmap_addr(tx_buf, dma),
+ dma_unmap_len(tx_buf, len), DMA_TO_DEVICE);
+ dma_unmap_len_set(tx_buf, len, 0);
+ tx_buf->raw_buf = NULL;
+
+ ntc++;
+ if (ntc >= xdp_ring->count)
+ ntc = 0;
+ }
+
+ next_dd_desc->cmd_type_offset_bsz = 0;
+ xdp_ring->next_dd = xdp_ring->next_dd + ICE_TX_THRESH;
+ if (xdp_ring->next_dd > xdp_ring->count)
+ xdp_ring->next_dd = ICE_TX_THRESH - 1;
+ xdp_ring->next_to_clean = ntc;
+ ice_update_tx_ring_stats(xdp_ring, total_pkts, total_bytes);
+}
+
+/**
* ice_xmit_xdp_ring - submit single packet to XDP ring for transmission
* @data: packet data pointer
* @size: packet data size
* @xdp_ring: XDP ring for transmission
*/
-int ice_xmit_xdp_ring(void *data, u16 size, struct ice_ring *xdp_ring)
+int ice_xmit_xdp_ring(void *data, u16 size, struct ice_tx_ring *xdp_ring)
{
u16 i = xdp_ring->next_to_use;
struct ice_tx_desc *tx_desc;
struct ice_tx_buf *tx_buf;
dma_addr_t dma;
+ if (ICE_DESC_UNUSED(xdp_ring) < ICE_TX_THRESH)
+ ice_clean_xdp_irq(xdp_ring);
+
if (!unlikely(ICE_DESC_UNUSED(xdp_ring))) {
xdp_ring->tx_stats.tx_busy++;
return ICE_XDP_CONSUMED;
@@ -244,21 +296,26 @@ int ice_xmit_xdp_ring(void *data, u16 size, struct ice_ring *xdp_ring)
tx_desc = ICE_TX_DESC(xdp_ring, i);
tx_desc->buf_addr = cpu_to_le64(dma);
- tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TXD_LAST_DESC_CMD, 0,
+ tx_desc->cmd_type_offset_bsz = ice_build_ctob(ICE_TX_DESC_CMD_EOP, 0,
size, 0);
- /* Make certain all of the status bits have been updated
- * before next_to_watch is written.
- */
- smp_wmb();
-
i++;
- if (i == xdp_ring->count)
+ if (i == xdp_ring->count) {
i = 0;
-
- tx_buf->next_to_watch = tx_desc;
+ tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_rs);
+ tx_desc->cmd_type_offset_bsz |=
+ cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
+ xdp_ring->next_rs = ICE_TX_THRESH - 1;
+ }
xdp_ring->next_to_use = i;
+ if (i > xdp_ring->next_rs) {
+ tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_rs);
+ tx_desc->cmd_type_offset_bsz |=
+ cpu_to_le64(ICE_TX_DESC_CMD_RS << ICE_TXD_QW1_CMD_S);
+ xdp_ring->next_rs += ICE_TX_THRESH;
+ }
+
return ICE_XDP_TX;
}
@@ -269,7 +326,7 @@ int ice_xmit_xdp_ring(void *data, u16 size, struct ice_ring *xdp_ring)
*
* Returns negative on failure, 0 on success.
*/
-int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_ring *xdp_ring)
+int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring)
{
struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
@@ -281,22 +338,23 @@ int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_ring *xdp_ring)
/**
* ice_finalize_xdp_rx - Bump XDP Tx tail and/or flush redirect map
- * @rx_ring: Rx ring
+ * @xdp_ring: XDP ring
* @xdp_res: Result of the receive batch
*
* This function bumps XDP Tx tail and/or flush redirect map, and
* should be called when a batch of packets has been processed in the
* napi loop.
*/
-void ice_finalize_xdp_rx(struct ice_ring *rx_ring, unsigned int xdp_res)
+void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res)
{
if (xdp_res & ICE_XDP_REDIR)
xdp_do_flush_map();
if (xdp_res & ICE_XDP_TX) {
- struct ice_ring *xdp_ring =
- rx_ring->vsi->xdp_rings[rx_ring->q_index];
-
+ if (static_branch_unlikely(&ice_xdp_locking_key))
+ spin_lock(&xdp_ring->tx_lock);
ice_xdp_ring_update_tail(xdp_ring);
+ if (static_branch_unlikely(&ice_xdp_locking_key))
+ spin_unlock(&xdp_ring->tx_lock);
}
}
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
index 05ac30752902..11b6c1601986 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.h
@@ -37,7 +37,7 @@ ice_build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag)
*
* This function updates the XDP Tx ring tail register.
*/
-static inline void ice_xdp_ring_update_tail(struct ice_ring *xdp_ring)
+static inline void ice_xdp_ring_update_tail(struct ice_tx_ring *xdp_ring)
{
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch.
@@ -46,14 +46,14 @@ static inline void ice_xdp_ring_update_tail(struct ice_ring *xdp_ring)
writel_relaxed(xdp_ring->next_to_use, xdp_ring->tail);
}
-void ice_finalize_xdp_rx(struct ice_ring *rx_ring, unsigned int xdp_res);
-int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_ring *xdp_ring);
-int ice_xmit_xdp_ring(void *data, u16 size, struct ice_ring *xdp_ring);
-void ice_release_rx_desc(struct ice_ring *rx_ring, u16 val);
+void ice_finalize_xdp_rx(struct ice_tx_ring *xdp_ring, unsigned int xdp_res);
+int ice_xmit_xdp_buff(struct xdp_buff *xdp, struct ice_tx_ring *xdp_ring);
+int ice_xmit_xdp_ring(void *data, u16 size, struct ice_tx_ring *xdp_ring);
+void ice_release_rx_desc(struct ice_rx_ring *rx_ring, u16 val);
void
-ice_process_skb_fields(struct ice_ring *rx_ring,
+ice_process_skb_fields(struct ice_rx_ring *rx_ring,
union ice_32b_rx_flex_desc *rx_desc,
struct sk_buff *skb, u16 ptype);
void
-ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag);
+ice_receive_skb(struct ice_rx_ring *rx_ring, struct sk_buff *skb, u16 vlan_tag);
#endif /* !_ICE_TXRX_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index d33d1906103c..9e0c2923c62e 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -138,7 +138,9 @@ enum ice_vsi_type {
ICE_VSI_PF = 0,
ICE_VSI_VF = 1,
ICE_VSI_CTRL = 3, /* equates to ICE_VSI_PF with 1 queue pair */
+ ICE_VSI_CHNL = 4,
ICE_VSI_LB = 6,
+ ICE_VSI_SWITCHDEV_CTRL = 7,
};
struct ice_link_status {
@@ -569,6 +571,8 @@ struct ice_sched_vsi_info {
struct list_head list_entry;
u16 max_lanq[ICE_MAX_TRAFFIC_CLASS];
u16 max_rdmaq[ICE_MAX_TRAFFIC_CLASS];
+ /* bw_t_info saves VSI BW information */
+ struct ice_bw_type_info bw_t_info[ICE_MAX_TRAFFIC_CLASS];
};
/* driver defines the policy */
@@ -604,7 +608,8 @@ struct ice_dcb_app_priority_table {
};
#define ICE_MAX_USER_PRIORITY 8
-#define ICE_DCBX_MAX_APPS 32
+#define ICE_DCBX_MAX_APPS 64
+#define ICE_DSCP_NUM_VAL 64
#define ICE_LLDPDU_SIZE 1500
#define ICE_TLV_STATUS_OPER 0x1
#define ICE_TLV_STATUS_SYNC 0x2
@@ -622,7 +627,14 @@ struct ice_dcbx_cfg {
struct ice_dcb_ets_cfg etscfg;
struct ice_dcb_ets_cfg etsrec;
struct ice_dcb_pfc_cfg pfc;
+#define ICE_QOS_MODE_VLAN 0x0
+#define ICE_QOS_MODE_DSCP 0x1
+ u8 pfc_mode;
struct ice_dcb_app_priority_table app[ICE_DCBX_MAX_APPS];
+ /* when DSCP mapping defined by user set its bit to 1 */
+ DECLARE_BITMAP(dscp_mapped, ICE_DSCP_NUM_VAL);
+ /* array holding DSCP -> UP/TC values for DSCP L3 QoS mode */
+ u8 dscp_map[ICE_DSCP_NUM_VAL];
u8 dcbx_mode;
#define ICE_DCBX_MODE_CEE 0x1
#define ICE_DCBX_MODE_IEEE 0x2
@@ -668,6 +680,10 @@ struct ice_port_info {
struct ice_switch_info {
struct list_head vsi_list_map_head;
struct ice_sw_recipe *recp_list;
+ u16 prof_res_bm_init;
+ u16 max_used_prof_index;
+
+ DECLARE_BITMAP(prof_res_bm[ICE_MAX_NUM_PROFILES], ICE_MAX_FV_WORDS);
};
/* FW logging configuration */
@@ -903,6 +919,7 @@ struct ice_hw {
struct mutex rss_locks; /* protect RSS configuration */
struct list_head rss_list_head;
struct ice_mbx_snapshot mbx_snapshot;
+ u16 io_expander_handle;
};
/* Statistics collected by each port, VSI, VEB, and S-channel */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index e93430ab37f1..2ac21484b876 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -5,7 +5,9 @@
#include "ice_base.h"
#include "ice_lib.h"
#include "ice_fltr.h"
+#include "ice_dcb_lib.h"
#include "ice_flow.h"
+#include "ice_eswitch.h"
#include "ice_virtchnl_allowlist.h"
#define FIELD_SELECTOR(proto_hdr_field) \
@@ -251,7 +253,7 @@ ice_vc_hash_field_match_type ice_vc_hash_field_list_comms[] = {
* ice_get_vf_vsi - get VF's VSI based on the stored index
* @vf: VF used to get VSI
*/
-static struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf)
+struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf)
{
return vf->pf->vsi[vf->lan_vsi_idx];
}
@@ -412,7 +414,7 @@ static bool ice_is_vf_link_up(struct ice_vf *vf)
*
* send a link status message to a single VF
*/
-static void ice_vc_notify_vf_link_state(struct ice_vf *vf)
+void ice_vc_notify_vf_link_state(struct ice_vf *vf)
{
struct virtchnl_pf_event pfe = { 0 };
struct ice_hw *hw = &vf->pf->hw;
@@ -620,6 +622,8 @@ void ice_free_vfs(struct ice_pf *pf)
if (!pf->vf)
return;
+ ice_eswitch_release(pf);
+
while (test_and_set_bit(ICE_VF_DIS, pf->state))
usleep_range(1000, 2000);
@@ -828,7 +832,7 @@ static struct ice_vsi *ice_vf_vsi_setup(struct ice_vf *vf)
struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
- vsi = ice_vsi_setup(pf, pi, ICE_VSI_VF, vf->vf_id);
+ vsi = ice_vsi_setup(pf, pi, ICE_VSI_VF, vf->vf_id, NULL);
if (!vsi) {
dev_err(ice_pf_to_dev(pf), "Failed to create VF VSI\n");
@@ -855,7 +859,7 @@ struct ice_vsi *ice_vf_ctrl_vsi_setup(struct ice_vf *vf)
struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
- vsi = ice_vsi_setup(pf, pi, ICE_VSI_CTRL, vf->vf_id);
+ vsi = ice_vsi_setup(pf, pi, ICE_VSI_CTRL, vf->vf_id, NULL);
if (!vsi) {
dev_err(ice_pf_to_dev(pf), "Failed to create VF control VSI\n");
ice_vf_ctrl_invalidate_vsi(vf);
@@ -882,6 +886,40 @@ static int ice_calc_vf_first_vector_idx(struct ice_pf *pf, struct ice_vf *vf)
}
/**
+ * ice_vf_rebuild_host_tx_rate_cfg - re-apply the Tx rate limiting configuration
+ * @vf: VF to re-apply the configuration for
+ *
+ * Called after a VF VSI has been re-added/rebuild during reset. The PF driver
+ * needs to re-apply the host configured Tx rate limiting configuration.
+ */
+static int ice_vf_rebuild_host_tx_rate_cfg(struct ice_vf *vf)
+{
+ struct device *dev = ice_pf_to_dev(vf->pf);
+ struct ice_vsi *vsi = ice_get_vf_vsi(vf);
+ int err;
+
+ if (vf->min_tx_rate) {
+ err = ice_set_min_bw_limit(vsi, (u64)vf->min_tx_rate * 1000);
+ if (err) {
+ dev_err(dev, "failed to set min Tx rate to %d Mbps for VF %u, error %d\n",
+ vf->min_tx_rate, vf->vf_id, err);
+ return err;
+ }
+ }
+
+ if (vf->max_tx_rate) {
+ err = ice_set_max_bw_limit(vsi, (u64)vf->max_tx_rate * 1000);
+ if (err) {
+ dev_err(dev, "failed to set max Tx rate to %d Mbps for VF %u, error %d\n",
+ vf->max_tx_rate, vf->vf_id, err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+/**
* ice_vf_rebuild_host_vlan_cfg - add VLAN 0 filter or rebuild the Port VLAN
* @vf: VF to add MAC filters for
*
@@ -932,6 +970,9 @@ static int ice_vf_rebuild_host_mac_cfg(struct ice_vf *vf)
enum ice_status status;
u8 broadcast[ETH_ALEN];
+ if (ice_is_eswitch_mode_switchdev(vf->pf))
+ return 0;
+
eth_broadcast_addr(broadcast);
status = ice_fltr_add_mac(vsi, broadcast, ICE_FWD_TO_VSI);
if (status) {
@@ -1414,6 +1455,11 @@ static void ice_vf_rebuild_host_cfg(struct ice_vf *vf)
if (ice_vf_rebuild_host_vlan_cfg(vf))
dev_err(dev, "failed to rebuild VLAN configuration for VF %u\n",
vf->vf_id);
+
+ if (ice_vf_rebuild_host_tx_rate_cfg(vf))
+ dev_err(dev, "failed to rebuild Tx rate limiting configuration for VF %u\n",
+ vf->vf_id);
+
/* rebuild aggregator node config for main VF VSI */
ice_vf_rebuild_aggregator_node_cfg(vsi);
}
@@ -1581,6 +1627,10 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
ice_vf_post_vsi_rebuild(vf);
}
+ if (ice_is_eswitch_mode_switchdev(pf))
+ if (ice_eswitch_rebuild(pf))
+ dev_warn(dev, "eswitch rebuild failed\n");
+
ice_flush(hw);
clear_bit(ICE_VF_DIS, pf->state);
@@ -1593,7 +1643,7 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
*
* Returns true if the PF or VF is disabled, false otherwise.
*/
-static bool ice_is_vf_disabled(struct ice_vf *vf)
+bool ice_is_vf_disabled(struct ice_vf *vf)
{
struct ice_pf *pf = vf->pf;
@@ -1711,6 +1761,8 @@ bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
}
ice_vf_post_vsi_rebuild(vf);
+ vsi = ice_get_vf_vsi(vf);
+ ice_eswitch_update_repr(vsi);
/* if the VF has been reset allow it to come up again */
if (ice_mbx_clear_malvf(&hw->mbx_snapshot, pf->malvfs, ICE_MAX_VF_COUNT, vf->vf_id))
@@ -1894,6 +1946,8 @@ static void ice_set_dflt_settings_vfs(struct ice_pf *pf)
*/
ice_vf_ctrl_invalidate_vsi(vf);
ice_vf_fdir_init(vf);
+
+ ice_vc_set_dflt_vf_ops(&vf->vc_ops);
}
}
@@ -1960,6 +2014,11 @@ static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
}
clear_bit(ICE_VF_DIS, pf->state);
+
+ ret = ice_eswitch_configure(pf);
+ if (ret)
+ goto err_unroll_sriov;
+
return 0;
err_unroll_sriov:
@@ -2823,7 +2882,7 @@ static void ice_wait_on_vf_reset(struct ice_vf *vf)
* disabled, and initialized so it can be configured and/or queried by a host
* administrator.
*/
-static int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
+int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
{
struct ice_pf *pf;
@@ -3013,7 +3072,10 @@ static int ice_vc_cfg_promiscuous_mode_msg(struct ice_vf *vf, u8 *msg)
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
}
- ret = ice_cfg_vlan_pruning(vsi, true, !rm_promisc);
+ if (rm_promisc)
+ ret = ice_cfg_vlan_pruning(vsi, true);
+ else
+ ret = ice_cfg_vlan_pruning(vsi, false);
if (ret) {
dev_err(dev, "Failed to configure VLAN pruning in promiscuous mode\n");
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
@@ -3329,7 +3391,7 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
q_map = vqs->tx_queues;
for_each_set_bit(vf_q_id, &q_map, ICE_MAX_RSS_QS_PER_VF) {
- struct ice_ring *ring = vsi->tx_rings[vf_q_id];
+ struct ice_tx_ring *ring = vsi->tx_rings[vf_q_id];
struct ice_txq_meta txq_meta = { 0 };
if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
@@ -3802,6 +3864,26 @@ static bool ice_is_legacy_umac_expired(struct ice_time_mac *last_added_umac)
}
/**
+ * ice_update_legacy_cached_mac - update cached hardware MAC for legacy VF
+ * @vf: VF to update
+ * @vc_ether_addr: structure from VIRTCHNL with MAC to check
+ *
+ * only update cached hardware MAC for legacy VF drivers on delete
+ * because we cannot guarantee order/type of MAC from the VF driver
+ */
+static void
+ice_update_legacy_cached_mac(struct ice_vf *vf,
+ struct virtchnl_ether_addr *vc_ether_addr)
+{
+ if (!ice_is_vc_addr_legacy(vc_ether_addr) ||
+ ice_is_legacy_umac_expired(&vf->legacy_last_added_umac))
+ return;
+
+ ether_addr_copy(vf->dev_lan_addr.addr, vf->legacy_last_added_umac.addr);
+ ether_addr_copy(vf->hw_lan_addr.addr, vf->legacy_last_added_umac.addr);
+}
+
+/**
* ice_vfhw_mac_del - update the VF's cached hardware MAC if allowed
* @vf: VF to update
* @vc_ether_addr: structure from VIRTCHNL with MAC to delete
@@ -3822,16 +3904,7 @@ ice_vfhw_mac_del(struct ice_vf *vf, struct virtchnl_ether_addr *vc_ether_addr)
*/
eth_zero_addr(vf->dev_lan_addr.addr);
- /* only update cached hardware MAC for legacy VF drivers on delete
- * because we cannot guarantee order/type of MAC from the VF driver
- */
- if (ice_is_vc_addr_legacy(vc_ether_addr) &&
- !ice_is_legacy_umac_expired(&vf->legacy_last_added_umac)) {
- ether_addr_copy(vf->dev_lan_addr.addr,
- vf->legacy_last_added_umac.addr);
- ether_addr_copy(vf->hw_lan_addr.addr,
- vf->legacy_last_added_umac.addr);
- }
+ ice_update_legacy_cached_mac(vf, vc_ether_addr);
}
/**
@@ -4207,7 +4280,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
/* Enable VLAN pruning when non-zero VLAN is added */
if (!vlan_promisc && vid &&
!ice_vsi_is_vlan_pruning_ena(vsi)) {
- status = ice_cfg_vlan_pruning(vsi, true, false);
+ status = ice_cfg_vlan_pruning(vsi, true);
if (status) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
dev_err(dev, "Enable VLAN pruning on VLAN ID: %d failed error-%d\n",
@@ -4261,7 +4334,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
/* Disable VLAN pruning when only VLAN 0 is left */
if (vsi->num_vlan == 1 &&
ice_vsi_is_vlan_pruning_ena(vsi))
- ice_cfg_vlan_pruning(vsi, false, false);
+ ice_cfg_vlan_pruning(vsi, false);
/* Disable Unicast/Multicast VLAN promiscuous mode */
if (vlan_promisc) {
@@ -4400,6 +4473,168 @@ static int ice_vf_init_vlan_stripping(struct ice_vf *vf)
return ice_vsi_manage_vlan_stripping(vsi, false);
}
+static struct ice_vc_vf_ops ice_vc_vf_dflt_ops = {
+ .get_ver_msg = ice_vc_get_ver_msg,
+ .get_vf_res_msg = ice_vc_get_vf_res_msg,
+ .reset_vf = ice_vc_reset_vf_msg,
+ .add_mac_addr_msg = ice_vc_add_mac_addr_msg,
+ .del_mac_addr_msg = ice_vc_del_mac_addr_msg,
+ .cfg_qs_msg = ice_vc_cfg_qs_msg,
+ .ena_qs_msg = ice_vc_ena_qs_msg,
+ .dis_qs_msg = ice_vc_dis_qs_msg,
+ .request_qs_msg = ice_vc_request_qs_msg,
+ .cfg_irq_map_msg = ice_vc_cfg_irq_map_msg,
+ .config_rss_key = ice_vc_config_rss_key,
+ .config_rss_lut = ice_vc_config_rss_lut,
+ .get_stats_msg = ice_vc_get_stats_msg,
+ .cfg_promiscuous_mode_msg = ice_vc_cfg_promiscuous_mode_msg,
+ .add_vlan_msg = ice_vc_add_vlan_msg,
+ .remove_vlan_msg = ice_vc_remove_vlan_msg,
+ .ena_vlan_stripping = ice_vc_ena_vlan_stripping,
+ .dis_vlan_stripping = ice_vc_dis_vlan_stripping,
+ .handle_rss_cfg_msg = ice_vc_handle_rss_cfg,
+ .add_fdir_fltr_msg = ice_vc_add_fdir_fltr,
+ .del_fdir_fltr_msg = ice_vc_del_fdir_fltr,
+};
+
+void ice_vc_set_dflt_vf_ops(struct ice_vc_vf_ops *ops)
+{
+ *ops = ice_vc_vf_dflt_ops;
+}
+
+/**
+ * ice_vc_repr_add_mac
+ * @vf: pointer to VF
+ * @msg: virtchannel message
+ *
+ * When port representors are created, we do not add MAC rule
+ * to firmware, we store it so that PF could report same
+ * MAC as VF.
+ */
+static int ice_vc_repr_add_mac(struct ice_vf *vf, u8 *msg)
+{
+ enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_ether_addr_list *al =
+ (struct virtchnl_ether_addr_list *)msg;
+ struct ice_vsi *vsi;
+ struct ice_pf *pf;
+ int i;
+
+ if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
+ !ice_vc_isvalid_vsi_id(vf, al->vsi_id)) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto handle_mac_exit;
+ }
+
+ pf = vf->pf;
+
+ vsi = ice_get_vf_vsi(vf);
+ if (!vsi) {
+ v_ret = VIRTCHNL_STATUS_ERR_PARAM;
+ goto handle_mac_exit;
+ }
+
+ for (i = 0; i < al->num_elements; i++) {
+ u8 *mac_addr = al->list[i].addr;
+
+ if (!is_unicast_ether_addr(mac_addr) ||
+ ether_addr_equal(mac_addr, vf->hw_lan_addr.addr))
+ continue;
+
+ if (vf->pf_set_mac) {
+ dev_err(ice_pf_to_dev(pf), "VF attempting to override administratively set MAC address\n");
+ v_ret = VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
+ goto handle_mac_exit;
+ }
+
+ ice_vfhw_mac_add(vf, &al->list[i]);
+ vf->num_mac++;
+ break;
+ }
+
+handle_mac_exit:
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_ETH_ADDR,
+ v_ret, NULL, 0);
+}
+
+/**
+ * ice_vc_repr_del_mac - response with success for deleting MAC
+ * @vf: pointer to VF
+ * @msg: virtchannel message
+ *
+ * Respond with success to not break normal VF flow.
+ * For legacy VF driver try to update cached MAC address.
+ */
+static int
+ice_vc_repr_del_mac(struct ice_vf __always_unused *vf, u8 __always_unused *msg)
+{
+ struct virtchnl_ether_addr_list *al =
+ (struct virtchnl_ether_addr_list *)msg;
+
+ ice_update_legacy_cached_mac(vf, &al->list[0]);
+
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_ETH_ADDR,
+ VIRTCHNL_STATUS_SUCCESS, NULL, 0);
+}
+
+static int ice_vc_repr_add_vlan(struct ice_vf *vf, u8 __always_unused *msg)
+{
+ dev_dbg(ice_pf_to_dev(vf->pf),
+ "Can't add VLAN in switchdev mode for VF %d\n", vf->vf_id);
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ADD_VLAN,
+ VIRTCHNL_STATUS_SUCCESS, NULL, 0);
+}
+
+static int ice_vc_repr_del_vlan(struct ice_vf *vf, u8 __always_unused *msg)
+{
+ dev_dbg(ice_pf_to_dev(vf->pf),
+ "Can't delete VLAN in switchdev mode for VF %d\n", vf->vf_id);
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DEL_VLAN,
+ VIRTCHNL_STATUS_SUCCESS, NULL, 0);
+}
+
+static int ice_vc_repr_ena_vlan_stripping(struct ice_vf *vf)
+{
+ dev_dbg(ice_pf_to_dev(vf->pf),
+ "Can't enable VLAN stripping in switchdev mode for VF %d\n",
+ vf->vf_id);
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_ENABLE_VLAN_STRIPPING,
+ VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
+ NULL, 0);
+}
+
+static int ice_vc_repr_dis_vlan_stripping(struct ice_vf *vf)
+{
+ dev_dbg(ice_pf_to_dev(vf->pf),
+ "Can't disable VLAN stripping in switchdev mode for VF %d\n",
+ vf->vf_id);
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING,
+ VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
+ NULL, 0);
+}
+
+static int
+ice_vc_repr_cfg_promiscuous_mode(struct ice_vf *vf, u8 __always_unused *msg)
+{
+ dev_dbg(ice_pf_to_dev(vf->pf),
+ "Can't config promiscuous mode in switchdev mode for VF %d\n",
+ vf->vf_id);
+ return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE,
+ VIRTCHNL_STATUS_ERR_NOT_SUPPORTED,
+ NULL, 0);
+}
+
+void ice_vc_change_ops_to_repr(struct ice_vc_vf_ops *ops)
+{
+ ops->add_mac_addr_msg = ice_vc_repr_add_mac;
+ ops->del_mac_addr_msg = ice_vc_repr_del_mac;
+ ops->add_vlan_msg = ice_vc_repr_add_vlan;
+ ops->remove_vlan_msg = ice_vc_repr_del_vlan;
+ ops->ena_vlan_stripping = ice_vc_repr_ena_vlan_stripping;
+ ops->dis_vlan_stripping = ice_vc_repr_dis_vlan_stripping;
+ ops->cfg_promiscuous_mode_msg = ice_vc_repr_cfg_promiscuous_mode;
+}
+
/**
* ice_vc_process_vf_msg - Process request from VF
* @pf: pointer to the PF structure
@@ -4413,6 +4648,7 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
u32 v_opcode = le32_to_cpu(event->desc.cookie_high);
s16 vf_id = le16_to_cpu(event->desc.retval);
u16 msglen = event->msg_len;
+ struct ice_vc_vf_ops *ops;
u8 *msg = event->msg_buf;
struct ice_vf *vf = NULL;
struct device *dev;
@@ -4436,6 +4672,8 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
goto error_handler;
}
+ ops = &vf->vc_ops;
+
/* Perform basic checks on the msg */
err = virtchnl_vc_validate_vf_msg(&vf->vf_ver, v_opcode, msg, msglen);
if (err) {
@@ -4463,75 +4701,75 @@ error_handler:
switch (v_opcode) {
case VIRTCHNL_OP_VERSION:
- err = ice_vc_get_ver_msg(vf, msg);
+ err = ops->get_ver_msg(vf, msg);
break;
case VIRTCHNL_OP_GET_VF_RESOURCES:
- err = ice_vc_get_vf_res_msg(vf, msg);
+ err = ops->get_vf_res_msg(vf, msg);
if (ice_vf_init_vlan_stripping(vf))
dev_err(dev, "Failed to initialize VLAN stripping for VF %d\n",
vf->vf_id);
ice_vc_notify_vf_link_state(vf);
break;
case VIRTCHNL_OP_RESET_VF:
- ice_vc_reset_vf_msg(vf);
+ ops->reset_vf(vf);
break;
case VIRTCHNL_OP_ADD_ETH_ADDR:
- err = ice_vc_add_mac_addr_msg(vf, msg);
+ err = ops->add_mac_addr_msg(vf, msg);
break;
case VIRTCHNL_OP_DEL_ETH_ADDR:
- err = ice_vc_del_mac_addr_msg(vf, msg);
+ err = ops->del_mac_addr_msg(vf, msg);
break;
case VIRTCHNL_OP_CONFIG_VSI_QUEUES:
- err = ice_vc_cfg_qs_msg(vf, msg);
+ err = ops->cfg_qs_msg(vf, msg);
break;
case VIRTCHNL_OP_ENABLE_QUEUES:
- err = ice_vc_ena_qs_msg(vf, msg);
+ err = ops->ena_qs_msg(vf, msg);
ice_vc_notify_vf_link_state(vf);
break;
case VIRTCHNL_OP_DISABLE_QUEUES:
- err = ice_vc_dis_qs_msg(vf, msg);
+ err = ops->dis_qs_msg(vf, msg);
break;
case VIRTCHNL_OP_REQUEST_QUEUES:
- err = ice_vc_request_qs_msg(vf, msg);
+ err = ops->request_qs_msg(vf, msg);
break;
case VIRTCHNL_OP_CONFIG_IRQ_MAP:
- err = ice_vc_cfg_irq_map_msg(vf, msg);
+ err = ops->cfg_irq_map_msg(vf, msg);
break;
case VIRTCHNL_OP_CONFIG_RSS_KEY:
- err = ice_vc_config_rss_key(vf, msg);
+ err = ops->config_rss_key(vf, msg);
break;
case VIRTCHNL_OP_CONFIG_RSS_LUT:
- err = ice_vc_config_rss_lut(vf, msg);
+ err = ops->config_rss_lut(vf, msg);
break;
case VIRTCHNL_OP_GET_STATS:
- err = ice_vc_get_stats_msg(vf, msg);
+ err = ops->get_stats_msg(vf, msg);
break;
case VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
- err = ice_vc_cfg_promiscuous_mode_msg(vf, msg);
+ err = ops->cfg_promiscuous_mode_msg(vf, msg);
break;
case VIRTCHNL_OP_ADD_VLAN:
- err = ice_vc_add_vlan_msg(vf, msg);
+ err = ops->add_vlan_msg(vf, msg);
break;
case VIRTCHNL_OP_DEL_VLAN:
- err = ice_vc_remove_vlan_msg(vf, msg);
+ err = ops->remove_vlan_msg(vf, msg);
break;
case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING:
- err = ice_vc_ena_vlan_stripping(vf);
+ err = ops->ena_vlan_stripping(vf);
break;
case VIRTCHNL_OP_DISABLE_VLAN_STRIPPING:
- err = ice_vc_dis_vlan_stripping(vf);
+ err = ops->dis_vlan_stripping(vf);
break;
case VIRTCHNL_OP_ADD_FDIR_FILTER:
- err = ice_vc_add_fdir_fltr(vf, msg);
+ err = ops->add_fdir_fltr_msg(vf, msg);
break;
case VIRTCHNL_OP_DEL_FDIR_FILTER:
- err = ice_vc_del_fdir_fltr(vf, msg);
+ err = ops->del_fdir_fltr_msg(vf, msg);
break;
case VIRTCHNL_OP_ADD_RSS_CFG:
- err = ice_vc_handle_rss_cfg(vf, msg, true);
+ err = ops->handle_rss_cfg_msg(vf, msg, true);
break;
case VIRTCHNL_OP_DEL_RSS_CFG:
- err = ice_vc_handle_rss_cfg(vf, msg, false);
+ err = ops->handle_rss_cfg_msg(vf, msg, false);
break;
case VIRTCHNL_OP_UNKNOWN:
default:
@@ -4588,8 +4826,8 @@ ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)
ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
else
ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
- ivi->max_tx_rate = vf->tx_rate;
- ivi->min_tx_rate = 0;
+ ivi->max_tx_rate = vf->max_tx_rate;
+ ivi->min_tx_rate = vf->min_tx_rate;
return 0;
}
@@ -4699,6 +4937,11 @@ int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
struct ice_vf *vf;
int ret;
+ if (ice_is_eswitch_mode_switchdev(pf)) {
+ dev_info(ice_pf_to_dev(pf), "Trusted VF is forbidden in switchdev mode\n");
+ return -EOPNOTSUPP;
+ }
+
if (ice_validate_vf_id(pf, vf_id))
return -EINVAL;
@@ -4763,6 +5006,122 @@ int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state)
}
/**
+ * ice_calc_all_vfs_min_tx_rate - calculate cumulative min Tx rate on all VFs
+ * @pf: PF associated with VFs
+ */
+static int ice_calc_all_vfs_min_tx_rate(struct ice_pf *pf)
+{
+ int rate = 0, i;
+
+ ice_for_each_vf(pf, i)
+ rate += pf->vf[i].min_tx_rate;
+
+ return rate;
+}
+
+/**
+ * ice_min_tx_rate_oversubscribed - check if min Tx rate causes oversubscription
+ * @vf: VF trying to configure min_tx_rate
+ * @min_tx_rate: min Tx rate in Mbps
+ *
+ * Check if the min_tx_rate being passed in will cause oversubscription of total
+ * min_tx_rate based on the current link speed and all other VFs configured
+ * min_tx_rate
+ *
+ * Return true if the passed min_tx_rate would cause oversubscription, else
+ * return false
+ */
+static bool
+ice_min_tx_rate_oversubscribed(struct ice_vf *vf, int min_tx_rate)
+{
+ int link_speed_mbps = ice_get_link_speed_mbps(ice_get_vf_vsi(vf));
+ int all_vfs_min_tx_rate = ice_calc_all_vfs_min_tx_rate(vf->pf);
+
+ /* this VF's previous rate is being overwritten */
+ all_vfs_min_tx_rate -= vf->min_tx_rate;
+
+ if (all_vfs_min_tx_rate + min_tx_rate > link_speed_mbps) {
+ dev_err(ice_pf_to_dev(vf->pf), "min_tx_rate of %d Mbps on VF %u would cause oversubscription of %d Mbps based on the current link speed %d Mbps\n",
+ min_tx_rate, vf->vf_id,
+ all_vfs_min_tx_rate + min_tx_rate - link_speed_mbps,
+ link_speed_mbps);
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * ice_set_vf_bw - set min/max VF bandwidth
+ * @netdev: network interface device structure
+ * @vf_id: VF identifier
+ * @min_tx_rate: Minimum Tx rate in Mbps
+ * @max_tx_rate: Maximum Tx rate in Mbps
+ */
+int
+ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
+ int max_tx_rate)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+ struct ice_vsi *vsi;
+ struct device *dev;
+ struct ice_vf *vf;
+ int ret;
+
+ dev = ice_pf_to_dev(pf);
+ if (ice_validate_vf_id(pf, vf_id))
+ return -EINVAL;
+
+ vf = &pf->vf[vf_id];
+ ret = ice_check_vf_ready_for_cfg(vf);
+ if (ret)
+ return ret;
+
+ vsi = ice_get_vf_vsi(vf);
+
+ /* when max_tx_rate is zero that means no max Tx rate limiting, so only
+ * check if max_tx_rate is non-zero
+ */
+ if (max_tx_rate && min_tx_rate > max_tx_rate) {
+ dev_err(dev, "Cannot set min Tx rate %d Mbps greater than max Tx rate %d Mbps\n",
+ min_tx_rate, max_tx_rate);
+ return -EINVAL;
+ }
+
+ if (min_tx_rate && ice_is_dcb_active(pf)) {
+ dev_err(dev, "DCB on PF is currently enabled. VF min Tx rate limiting not allowed on this PF.\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (ice_min_tx_rate_oversubscribed(vf, min_tx_rate))
+ return -EINVAL;
+
+ if (vf->min_tx_rate != (unsigned int)min_tx_rate) {
+ ret = ice_set_min_bw_limit(vsi, (u64)min_tx_rate * 1000);
+ if (ret) {
+ dev_err(dev, "Unable to set min-tx-rate for VF %d\n",
+ vf->vf_id);
+ return ret;
+ }
+
+ vf->min_tx_rate = min_tx_rate;
+ }
+
+ if (vf->max_tx_rate != (unsigned int)max_tx_rate) {
+ ret = ice_set_max_bw_limit(vsi, (u64)max_tx_rate * 1000);
+ if (ret) {
+ dev_err(dev, "Unable to set max-tx-rate for VF %d\n",
+ vf->vf_id);
+ return ret;
+ }
+
+ vf->max_tx_rate = max_tx_rate;
+ }
+
+ return 0;
+}
+
+/**
* ice_get_vf_stats - populate some stats for the VF
* @netdev: the netdev of the PF
* @vf_id: the host OS identifier (0-255)
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
index 842cb077df86..5ff93a08f54c 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
@@ -70,6 +70,32 @@ struct ice_mdd_vf_events {
u16 last_printed;
};
+struct ice_vf;
+
+struct ice_vc_vf_ops {
+ int (*get_ver_msg)(struct ice_vf *vf, u8 *msg);
+ int (*get_vf_res_msg)(struct ice_vf *vf, u8 *msg);
+ void (*reset_vf)(struct ice_vf *vf);
+ int (*add_mac_addr_msg)(struct ice_vf *vf, u8 *msg);
+ int (*del_mac_addr_msg)(struct ice_vf *vf, u8 *msg);
+ int (*cfg_qs_msg)(struct ice_vf *vf, u8 *msg);
+ int (*ena_qs_msg)(struct ice_vf *vf, u8 *msg);
+ int (*dis_qs_msg)(struct ice_vf *vf, u8 *msg);
+ int (*request_qs_msg)(struct ice_vf *vf, u8 *msg);
+ int (*cfg_irq_map_msg)(struct ice_vf *vf, u8 *msg);
+ int (*config_rss_key)(struct ice_vf *vf, u8 *msg);
+ int (*config_rss_lut)(struct ice_vf *vf, u8 *msg);
+ int (*get_stats_msg)(struct ice_vf *vf, u8 *msg);
+ int (*cfg_promiscuous_mode_msg)(struct ice_vf *vf, u8 *msg);
+ int (*add_vlan_msg)(struct ice_vf *vf, u8 *msg);
+ int (*remove_vlan_msg)(struct ice_vf *vf, u8 *msg);
+ int (*ena_vlan_stripping)(struct ice_vf *vf);
+ int (*dis_vlan_stripping)(struct ice_vf *vf);
+ int (*handle_rss_cfg_msg)(struct ice_vf *vf, u8 *msg, bool add);
+ int (*add_fdir_fltr_msg)(struct ice_vf *vf, u8 *msg);
+ int (*del_fdir_fltr_msg)(struct ice_vf *vf, u8 *msg);
+};
+
/* VF information structure */
struct ice_vf {
struct ice_pf *pf;
@@ -99,7 +125,8 @@ struct ice_vf {
* the main LAN VSI for the PF.
*/
u16 lan_vsi_num; /* ID as used by firmware */
- unsigned int tx_rate; /* Tx bandwidth limit in Mbps */
+ unsigned int min_tx_rate; /* Minimum Tx bandwidth limit in Mbps */
+ unsigned int max_tx_rate; /* Maximum Tx bandwidth limit in Mbps */
DECLARE_BITMAP(vf_states, ICE_VF_STATES_NBITS); /* VF runtime states */
u64 num_inval_msgs; /* number of continuous invalid msgs */
@@ -111,9 +138,17 @@ struct ice_vf {
struct ice_mdd_vf_events mdd_rx_events;
struct ice_mdd_vf_events mdd_tx_events;
DECLARE_BITMAP(opcodes_allowlist, VIRTCHNL_OP_MAX);
+
+ struct ice_repr *repr;
+
+ struct ice_vc_vf_ops vc_ops;
+
+ /* devlink port data */
+ struct devlink_port devlink_port;
};
#ifdef CONFIG_PCI_IOV
+struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf);
void ice_process_vflr_event(struct ice_pf *pf);
int ice_sriov_configure(struct pci_dev *pdev, int num_vfs);
int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac);
@@ -124,6 +159,9 @@ void ice_free_vfs(struct ice_pf *pf);
void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event);
void ice_vc_notify_link_state(struct ice_pf *pf);
void ice_vc_notify_reset(struct ice_pf *pf);
+void ice_vc_notify_vf_link_state(struct ice_vf *vf);
+void ice_vc_change_ops_to_repr(struct ice_vc_vf_ops *ops);
+void ice_vc_set_dflt_vf_ops(struct ice_vc_vf_ops *ops);
bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr);
bool ice_reset_vf(struct ice_vf *vf, bool is_vflr);
void ice_restore_all_vfs_msi_state(struct pci_dev *pdev);
@@ -135,10 +173,18 @@ int
ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
__be16 vlan_proto);
+int
+ice_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
+ int max_tx_rate);
+
int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted);
int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state);
+int ice_check_vf_ready_for_cfg(struct ice_vf *vf);
+
+bool ice_is_vf_disabled(struct ice_vf *vf);
+
int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena);
int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector);
@@ -164,6 +210,9 @@ static inline
void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event) { }
static inline void ice_vc_notify_link_state(struct ice_pf *pf) { }
static inline void ice_vc_notify_reset(struct ice_pf *pf) { }
+static inline void ice_vc_notify_vf_link_state(struct ice_vf *vf) { }
+static inline void ice_vc_change_ops_to_repr(struct ice_vc_vf_ops *ops) { }
+static inline void ice_vc_set_dflt_vf_ops(struct ice_vc_vf_ops *ops) { }
static inline void ice_set_vf_state_qs_dis(struct ice_vf *vf) { }
static inline
void ice_vf_lan_overflow_event(struct ice_pf *pf, struct ice_rq_event_info *event) { }
@@ -171,6 +220,21 @@ static inline void ice_print_vfs_mdd_events(struct ice_pf *pf) { }
static inline void ice_print_vf_rx_mdd_event(struct ice_vf *vf) { }
static inline void ice_restore_all_vfs_msi_state(struct pci_dev *pdev) { }
+static inline int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline bool ice_is_vf_disabled(struct ice_vf *vf)
+{
+ return true;
+}
+
+static inline struct ice_vsi *ice_get_vf_vsi(struct ice_vf *vf)
+{
+ return NULL;
+}
+
static inline bool
ice_is_malicious_vf(struct ice_pf __always_unused *pf,
struct ice_rq_event_info __always_unused *event,
@@ -245,6 +309,14 @@ ice_set_vf_link_state(struct net_device __always_unused *netdev,
}
static inline int
+ice_set_vf_bw(struct net_device __always_unused *netdev,
+ int __always_unused vf_id, int __always_unused min_tx_rate,
+ int __always_unused max_tx_rate)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
ice_calc_vf_reg_idx(struct ice_vf __always_unused *vf,
struct ice_q_vector __always_unused *q_vector)
{
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 5a9f61deeb38..ff55cb415b11 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -67,7 +67,7 @@ ice_qvec_toggle_napi(struct ice_vsi *vsi, struct ice_q_vector *q_vector,
* @q_vector: queue vector
*/
static void
-ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_ring *rx_ring,
+ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_rx_ring *rx_ring,
struct ice_q_vector *q_vector)
{
struct ice_pf *pf = vsi->back;
@@ -104,16 +104,17 @@ ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
u16 reg_idx = q_vector->reg_idx;
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
- struct ice_ring *ring;
+ struct ice_tx_ring *tx_ring;
+ struct ice_rx_ring *rx_ring;
ice_cfg_itr(hw, q_vector);
- ice_for_each_ring(ring, q_vector->tx)
- ice_cfg_txq_interrupt(vsi, ring->reg_idx, reg_idx,
+ ice_for_each_tx_ring(tx_ring, q_vector->tx)
+ ice_cfg_txq_interrupt(vsi, tx_ring->reg_idx, reg_idx,
q_vector->tx.itr_idx);
- ice_for_each_ring(ring, q_vector->rx)
- ice_cfg_rxq_interrupt(vsi, ring->reg_idx, reg_idx,
+ ice_for_each_rx_ring(rx_ring, q_vector->rx)
+ ice_cfg_rxq_interrupt(vsi, rx_ring->reg_idx, reg_idx,
q_vector->rx.itr_idx);
ice_flush(hw);
@@ -144,8 +145,9 @@ static void ice_qvec_ena_irq(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
{
struct ice_txq_meta txq_meta = { };
- struct ice_ring *tx_ring, *rx_ring;
struct ice_q_vector *q_vector;
+ struct ice_tx_ring *tx_ring;
+ struct ice_rx_ring *rx_ring;
int timeout = 50;
int err;
@@ -171,7 +173,7 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
if (err)
return err;
if (ice_is_xdp_ena_vsi(vsi)) {
- struct ice_ring *xdp_ring = vsi->xdp_rings[q_idx];
+ struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
memset(&txq_meta, 0, sizeof(txq_meta));
ice_fill_txq_meta(vsi, xdp_ring, &txq_meta);
@@ -201,8 +203,9 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
{
struct ice_aqc_add_tx_qgrp *qg_buf;
- struct ice_ring *tx_ring, *rx_ring;
struct ice_q_vector *q_vector;
+ struct ice_tx_ring *tx_ring;
+ struct ice_rx_ring *rx_ring;
u16 size;
int err;
@@ -225,7 +228,7 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
goto free_buf;
if (ice_is_xdp_ena_vsi(vsi)) {
- struct ice_ring *xdp_ring = vsi->xdp_rings[q_idx];
+ struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_idx];
memset(qg_buf, 0, size);
qg_buf->num_txqs = 1;
@@ -233,7 +236,7 @@ static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
if (err)
goto free_buf;
ice_set_ring_xdp(xdp_ring);
- xdp_ring->xsk_pool = ice_xsk_pool(xdp_ring);
+ xdp_ring->xsk_pool = ice_tx_xsk_pool(xdp_ring);
}
err = ice_vsi_cfg_rxq(rx_ring);
@@ -360,56 +363,50 @@ xsk_pool_if_up:
*
* Returns true if all allocations were successful, false if any fail.
*/
-bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count)
+bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count)
{
union ice_32b_rx_flex_desc *rx_desc;
u16 ntu = rx_ring->next_to_use;
- struct ice_rx_buf *rx_buf;
- bool ok = true;
+ struct xdp_buff **xdp;
+ u32 nb_buffs, i;
dma_addr_t dma;
- if (!count)
- return true;
-
rx_desc = ICE_RX_DESC(rx_ring, ntu);
- rx_buf = &rx_ring->rx_buf[ntu];
+ xdp = &rx_ring->xdp_buf[ntu];
- do {
- rx_buf->xdp = xsk_buff_alloc(rx_ring->xsk_pool);
- if (!rx_buf->xdp) {
- ok = false;
- break;
- }
+ nb_buffs = min_t(u16, count, rx_ring->count - ntu);
+ nb_buffs = xsk_buff_alloc_batch(rx_ring->xsk_pool, xdp, nb_buffs);
+ if (!nb_buffs)
+ return false;
- dma = xsk_buff_xdp_get_dma(rx_buf->xdp);
+ i = nb_buffs;
+ while (i--) {
+ dma = xsk_buff_xdp_get_dma(*xdp);
rx_desc->read.pkt_addr = cpu_to_le64(dma);
- rx_desc->wb.status_error0 = 0;
rx_desc++;
- rx_buf++;
- ntu++;
-
- if (unlikely(ntu == rx_ring->count)) {
- rx_desc = ICE_RX_DESC(rx_ring, 0);
- rx_buf = rx_ring->rx_buf;
- ntu = 0;
- }
- } while (--count);
+ xdp++;
+ }
- if (rx_ring->next_to_use != ntu) {
- /* clear the status bits for the next_to_use descriptor */
- rx_desc->wb.status_error0 = 0;
- ice_release_rx_desc(rx_ring, ntu);
+ ntu += nb_buffs;
+ if (ntu == rx_ring->count) {
+ rx_desc = ICE_RX_DESC(rx_ring, 0);
+ xdp = rx_ring->xdp_buf;
+ ntu = 0;
}
- return ok;
+ /* clear the status bits for the next_to_use descriptor */
+ rx_desc->wb.status_error0 = 0;
+ ice_release_rx_desc(rx_ring, ntu);
+
+ return count == nb_buffs;
}
/**
* ice_bump_ntc - Bump the next_to_clean counter of an Rx ring
* @rx_ring: Rx ring
*/
-static void ice_bump_ntc(struct ice_ring *rx_ring)
+static void ice_bump_ntc(struct ice_rx_ring *rx_ring)
{
int ntc = rx_ring->next_to_clean + 1;
@@ -421,19 +418,19 @@ static void ice_bump_ntc(struct ice_ring *rx_ring)
/**
* ice_construct_skb_zc - Create an sk_buff from zero-copy buffer
* @rx_ring: Rx ring
- * @rx_buf: zero-copy Rx buffer
+ * @xdp_arr: Pointer to the SW ring of xdp_buff pointers
*
* This function allocates a new skb from a zero-copy Rx buffer.
*
* Returns the skb on success, NULL on failure.
*/
static struct sk_buff *
-ice_construct_skb_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
+ice_construct_skb_zc(struct ice_rx_ring *rx_ring, struct xdp_buff **xdp_arr)
{
- unsigned int metasize = rx_buf->xdp->data - rx_buf->xdp->data_meta;
- unsigned int datasize = rx_buf->xdp->data_end - rx_buf->xdp->data;
- unsigned int datasize_hard = rx_buf->xdp->data_end -
- rx_buf->xdp->data_hard_start;
+ struct xdp_buff *xdp = *xdp_arr;
+ unsigned int metasize = xdp->data - xdp->data_meta;
+ unsigned int datasize = xdp->data_end - xdp->data;
+ unsigned int datasize_hard = xdp->data_end - xdp->data_hard_start;
struct sk_buff *skb;
skb = __napi_alloc_skb(&rx_ring->q_vector->napi, datasize_hard,
@@ -441,13 +438,13 @@ ice_construct_skb_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
if (unlikely(!skb))
return NULL;
- skb_reserve(skb, rx_buf->xdp->data - rx_buf->xdp->data_hard_start);
- memcpy(__skb_put(skb, datasize), rx_buf->xdp->data, datasize);
+ skb_reserve(skb, xdp->data - xdp->data_hard_start);
+ memcpy(__skb_put(skb, datasize), xdp->data, datasize);
if (metasize)
skb_metadata_set(skb, metasize);
- xsk_buff_free(rx_buf->xdp);
- rx_buf->xdp = NULL;
+ xsk_buff_free(xdp);
+ *xdp_arr = NULL;
return skb;
}
@@ -455,22 +452,18 @@ ice_construct_skb_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
* ice_run_xdp_zc - Executes an XDP program in zero-copy path
* @rx_ring: Rx ring
* @xdp: xdp_buff used as input to the XDP program
+ * @xdp_prog: XDP program to run
+ * @xdp_ring: ring to be used for XDP_TX action
*
* Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
*/
static int
-ice_run_xdp_zc(struct ice_ring *rx_ring, struct xdp_buff *xdp)
+ice_run_xdp_zc(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
+ struct bpf_prog *xdp_prog, struct ice_tx_ring *xdp_ring)
{
int err, result = ICE_XDP_PASS;
- struct bpf_prog *xdp_prog;
- struct ice_ring *xdp_ring;
u32 act;
- /* ZC patch is enabled only when XDP program is set,
- * so here it can not be NULL
- */
- xdp_prog = READ_ONCE(rx_ring->xdp_prog);
-
act = bpf_prog_run_xdp(xdp_prog, xdp);
if (likely(act == XDP_REDIRECT)) {
@@ -484,7 +477,6 @@ ice_run_xdp_zc(struct ice_ring *rx_ring, struct xdp_buff *xdp)
case XDP_PASS:
break;
case XDP_TX:
- xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->q_index];
result = ice_xmit_xdp_buff(xdp, xdp_ring);
if (result == ICE_XDP_CONSUMED)
goto out_failure;
@@ -511,17 +503,25 @@ out_failure:
*
* Returns number of processed packets on success, remaining budget on failure.
*/
-int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
+int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
{
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
+ struct ice_tx_ring *xdp_ring;
unsigned int xdp_xmit = 0;
+ struct bpf_prog *xdp_prog;
bool failure = false;
+ /* ZC patch is enabled only when XDP program is set,
+ * so here it can not be NULL
+ */
+ xdp_prog = READ_ONCE(rx_ring->xdp_prog);
+ xdp_ring = rx_ring->xdp_ring;
+
while (likely(total_rx_packets < (unsigned int)budget)) {
union ice_32b_rx_flex_desc *rx_desc;
unsigned int size, xdp_res = 0;
- struct ice_rx_buf *rx_buf;
+ struct xdp_buff **xdp;
struct sk_buff *skb;
u16 stat_err_bits;
u16 vlan_tag = 0;
@@ -544,18 +544,18 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
if (!size)
break;
- rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
- rx_buf->xdp->data_end = rx_buf->xdp->data + size;
- xsk_buff_dma_sync_for_cpu(rx_buf->xdp, rx_ring->xsk_pool);
+ xdp = &rx_ring->xdp_buf[rx_ring->next_to_clean];
+ xsk_buff_set_size(*xdp, size);
+ xsk_buff_dma_sync_for_cpu(*xdp, rx_ring->xsk_pool);
- xdp_res = ice_run_xdp_zc(rx_ring, rx_buf->xdp);
+ xdp_res = ice_run_xdp_zc(rx_ring, *xdp, xdp_prog, xdp_ring);
if (xdp_res) {
if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR))
xdp_xmit |= xdp_res;
else
- xsk_buff_free(rx_buf->xdp);
+ xsk_buff_free(*xdp);
- rx_buf->xdp = NULL;
+ *xdp = NULL;
total_rx_bytes += size;
total_rx_packets++;
cleaned_count++;
@@ -565,7 +565,7 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
}
/* XDP_PASS path */
- skb = ice_construct_skb_zc(rx_ring, rx_buf);
+ skb = ice_construct_skb_zc(rx_ring, xdp);
if (!skb) {
rx_ring->rx_stats.alloc_buf_failed++;
break;
@@ -596,7 +596,7 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
if (cleaned_count >= ICE_RX_BUF_WRITE)
failure = !ice_alloc_rx_bufs_zc(rx_ring, cleaned_count);
- ice_finalize_xdp_rx(rx_ring, xdp_xmit);
+ ice_finalize_xdp_rx(xdp_ring, xdp_xmit);
ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes);
if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
@@ -618,7 +618,7 @@ int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
*
* Returns true if cleanup/transmission is done.
*/
-static bool ice_xmit_zc(struct ice_ring *xdp_ring, int budget)
+static bool ice_xmit_zc(struct ice_tx_ring *xdp_ring, int budget)
{
struct ice_tx_desc *tx_desc = NULL;
bool work_done = true;
@@ -669,7 +669,7 @@ static bool ice_xmit_zc(struct ice_ring *xdp_ring, int budget)
* @tx_buf: Tx buffer to clean
*/
static void
-ice_clean_xdp_tx_buf(struct ice_ring *xdp_ring, struct ice_tx_buf *tx_buf)
+ice_clean_xdp_tx_buf(struct ice_tx_ring *xdp_ring, struct ice_tx_buf *tx_buf)
{
xdp_return_frame((struct xdp_frame *)tx_buf->raw_buf);
dma_unmap_single(xdp_ring->dev, dma_unmap_addr(tx_buf, dma),
@@ -684,7 +684,7 @@ ice_clean_xdp_tx_buf(struct ice_ring *xdp_ring, struct ice_tx_buf *tx_buf)
*
* Returns true if cleanup/tranmission is done.
*/
-bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget)
+bool ice_clean_tx_irq_zc(struct ice_tx_ring *xdp_ring, int budget)
{
int total_packets = 0, total_bytes = 0;
s16 ntc = xdp_ring->next_to_clean;
@@ -757,7 +757,7 @@ ice_xsk_wakeup(struct net_device *netdev, u32 queue_id,
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_q_vector *q_vector;
struct ice_vsi *vsi = np->vsi;
- struct ice_ring *ring;
+ struct ice_tx_ring *ring;
if (test_bit(ICE_DOWN, vsi->state))
return -ENETDOWN;
@@ -808,17 +808,17 @@ bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi)
* ice_xsk_clean_rx_ring - clean buffer pool queues connected to a given Rx ring
* @rx_ring: ring to be cleaned
*/
-void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring)
+void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring)
{
u16 i;
for (i = 0; i < rx_ring->count; i++) {
- struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];
+ struct xdp_buff **xdp = &rx_ring->xdp_buf[i];
- if (!rx_buf->xdp)
+ if (!xdp)
continue;
- rx_buf->xdp = NULL;
+ *xdp = NULL;
}
}
@@ -826,7 +826,7 @@ void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring)
* ice_xsk_clean_xdp_ring - Clean the XDP Tx ring and its buffer pool queues
* @xdp_ring: XDP_Tx ring
*/
-void ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring)
+void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring)
{
u16 ntc = xdp_ring->next_to_clean, ntu = xdp_ring->next_to_use;
u32 xsk_frames = 0;
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.h b/drivers/net/ethernet/intel/ice/ice_xsk.h
index ea208808623a..4c7bd8e9dfc4 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.h
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.h
@@ -11,13 +11,13 @@ struct ice_vsi;
#ifdef CONFIG_XDP_SOCKETS
int ice_xsk_pool_setup(struct ice_vsi *vsi, struct xsk_buff_pool *pool,
u16 qid);
-int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget);
-bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget);
+int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget);
+bool ice_clean_tx_irq_zc(struct ice_tx_ring *xdp_ring, int budget);
int ice_xsk_wakeup(struct net_device *netdev, u32 queue_id, u32 flags);
-bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count);
+bool ice_alloc_rx_bufs_zc(struct ice_rx_ring *rx_ring, u16 count);
bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi);
-void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring);
-void ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring);
+void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring);
+void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring);
#else
static inline int
ice_xsk_pool_setup(struct ice_vsi __always_unused *vsi,
@@ -28,21 +28,21 @@ ice_xsk_pool_setup(struct ice_vsi __always_unused *vsi,
}
static inline int
-ice_clean_rx_irq_zc(struct ice_ring __always_unused *rx_ring,
+ice_clean_rx_irq_zc(struct ice_rx_ring __always_unused *rx_ring,
int __always_unused budget)
{
return 0;
}
static inline bool
-ice_clean_tx_irq_zc(struct ice_ring __always_unused *xdp_ring,
+ice_clean_tx_irq_zc(struct ice_tx_ring __always_unused *xdp_ring,
int __always_unused budget)
{
return false;
}
static inline bool
-ice_alloc_rx_bufs_zc(struct ice_ring __always_unused *rx_ring,
+ice_alloc_rx_bufs_zc(struct ice_rx_ring __always_unused *rx_ring,
u16 __always_unused count)
{
return false;
@@ -60,7 +60,7 @@ ice_xsk_wakeup(struct net_device __always_unused *netdev,
return -EOPNOTSUPP;
}
-static inline void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring) { }
-static inline void ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring) { }
+static inline void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring) { }
+static inline void ice_xsk_clean_xdp_ring(struct ice_tx_ring *xdp_ring) { }
#endif /* CONFIG_XDP_SOCKETS */
#endif /* !_ICE_XSK_H_ */
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 751de06019a0..836be0d3b291 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -577,16 +577,15 @@ static void igb_set_i2c_data(void *data, int state)
struct e1000_hw *hw = &adapter->hw;
s32 i2cctl = rd32(E1000_I2CPARAMS);
- if (state)
- i2cctl |= E1000_I2C_DATA_OUT;
- else
+ if (state) {
+ i2cctl |= E1000_I2C_DATA_OUT | E1000_I2C_DATA_OE_N;
+ } else {
+ i2cctl &= ~E1000_I2C_DATA_OE_N;
i2cctl &= ~E1000_I2C_DATA_OUT;
+ }
- i2cctl &= ~E1000_I2C_DATA_OE_N;
- i2cctl |= E1000_I2C_CLK_OE_N;
wr32(E1000_I2CPARAMS, i2cctl);
wrfl();
-
}
/**
@@ -603,8 +602,7 @@ static void igb_set_i2c_clk(void *data, int state)
s32 i2cctl = rd32(E1000_I2CPARAMS);
if (state) {
- i2cctl |= E1000_I2C_CLK_OUT;
- i2cctl &= ~E1000_I2C_CLK_OE_N;
+ i2cctl |= E1000_I2C_CLK_OUT | E1000_I2C_CLK_OE_N;
} else {
i2cctl &= ~E1000_I2C_CLK_OUT;
i2cctl &= ~E1000_I2C_CLK_OE_N;
@@ -3116,12 +3114,21 @@ static void igb_init_mas(struct igb_adapter *adapter)
**/
static s32 igb_init_i2c(struct igb_adapter *adapter)
{
+ struct e1000_hw *hw = &adapter->hw;
s32 status = 0;
+ s32 i2cctl;
/* I2C interface supported on i350 devices */
if (adapter->hw.mac.type != e1000_i350)
return 0;
+ i2cctl = rd32(E1000_I2CPARAMS);
+ i2cctl |= E1000_I2CBB_EN
+ | E1000_I2C_CLK_OUT | E1000_I2C_CLK_OE_N
+ | E1000_I2C_DATA_OUT | E1000_I2C_DATA_OE_N;
+ wr32(E1000_I2CPARAMS, i2cctl);
+ wrfl();
+
/* Initialize the i2c bus which is controlled by the registers.
* This bus will use the i2c_algo_bit structure that implements
* the protocol through toggling of the 4 bits in the register.
@@ -3356,7 +3363,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_err(&pdev->dev, "NVM Read Error\n");
}
- memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
+ eth_hw_addr_set(netdev, hw->mac.addr);
if (!is_valid_ether_addr(netdev->dev_addr)) {
dev_err(&pdev->dev, "Invalid MAC Address\n");
@@ -4988,7 +4995,7 @@ static int igb_set_mac(struct net_device *netdev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
/* set the correct pool for the new PF MAC address in entry 0 */
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index d32e72d953c8..74ccd622251a 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -1527,8 +1527,7 @@ static void igbvf_reset(struct igbvf_adapter *adapter)
spin_unlock_bh(&hw->mbx_lock);
if (is_valid_ether_addr(adapter->hw.mac.addr)) {
- memcpy(netdev->dev_addr, adapter->hw.mac.addr,
- netdev->addr_len);
+ eth_hw_addr_set(netdev, adapter->hw.mac.addr);
memcpy(netdev->perm_addr, adapter->hw.mac.addr,
netdev->addr_len);
}
@@ -1813,7 +1812,7 @@ static int igbvf_set_mac(struct net_device *netdev, void *p)
if (!ether_addr_equal(addr->sa_data, hw->mac.addr))
return -EADDRNOTAVAIL;
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
return 0;
}
@@ -2816,8 +2815,7 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
else if (is_zero_ether_addr(adapter->hw.mac.addr))
dev_info(&pdev->dev,
"MAC address not assigned by administrator.\n");
- memcpy(netdev->dev_addr, adapter->hw.mac.addr,
- netdev->addr_len);
+ eth_hw_addr_set(netdev, adapter->hw.mac.addr);
}
spin_unlock_bh(&hw->mbx_lock);
diff --git a/drivers/net/ethernet/intel/igc/igc_base.c b/drivers/net/ethernet/intel/igc/igc_base.c
index 84f142f5e472..f068b66b8025 100644
--- a/drivers/net/ethernet/intel/igc/igc_base.c
+++ b/drivers/net/ethernet/intel/igc/igc_base.c
@@ -40,7 +40,7 @@ static s32 igc_reset_hw_base(struct igc_hw *hw)
ctrl = rd32(IGC_CTRL);
hw_dbg("Issuing a global reset to MAC\n");
- wr32(IGC_CTRL, ctrl | IGC_CTRL_DEV_RST);
+ wr32(IGC_CTRL, ctrl | IGC_CTRL_RST);
ret_val = igc_get_auto_rd_done(hw);
if (ret_val) {
@@ -158,11 +158,6 @@ static s32 igc_init_phy_params_base(struct igc_hw *hw)
struct igc_phy_info *phy = &hw->phy;
s32 ret_val = 0;
- if (hw->phy.media_type != igc_media_type_copper) {
- phy->type = igc_phy_none;
- goto out;
- }
-
phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT_2500;
phy->reset_delay_us = 100;
@@ -207,6 +202,7 @@ static s32 igc_get_invariants_base(struct igc_hw *hw)
case IGC_DEV_ID_I225_K2:
case IGC_DEV_ID_I226_K:
case IGC_DEV_ID_I225_LMVP:
+ case IGC_DEV_ID_I226_LMVP:
case IGC_DEV_ID_I225_IT:
case IGC_DEV_ID_I226_LM:
case IGC_DEV_ID_I226_V:
diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h
index a4bbee748798..c7fe61509d5b 100644
--- a/drivers/net/ethernet/intel/igc/igc_defines.h
+++ b/drivers/net/ethernet/intel/igc/igc_defines.h
@@ -130,7 +130,7 @@
#define IGC_ERR_SWFW_SYNC 13
/* Device Control */
-#define IGC_CTRL_DEV_RST 0x20000000 /* Device reset */
+#define IGC_CTRL_RST 0x04000000 /* Global reset */
#define IGC_CTRL_PHY_RST 0x80000000 /* PHY Reset */
#define IGC_CTRL_SLU 0x00000040 /* Set link up (Force Link) */
diff --git a/drivers/net/ethernet/intel/igc/igc_hw.h b/drivers/net/ethernet/intel/igc/igc_hw.h
index 4461f8b9a864..587db7483f25 100644
--- a/drivers/net/ethernet/intel/igc/igc_hw.h
+++ b/drivers/net/ethernet/intel/igc/igc_hw.h
@@ -22,8 +22,9 @@
#define IGC_DEV_ID_I220_V 0x15F7
#define IGC_DEV_ID_I225_K 0x3100
#define IGC_DEV_ID_I225_K2 0x3101
+#define IGC_DEV_ID_I226_K 0x3102
#define IGC_DEV_ID_I225_LMVP 0x5502
-#define IGC_DEV_ID_I226_K 0x5504
+#define IGC_DEV_ID_I226_LMVP 0x5503
#define IGC_DEV_ID_I225_IT 0x0D9F
#define IGC_DEV_ID_I226_LM 0x125B
#define IGC_DEV_ID_I226_V 0x125C
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 0e19b4d02e62..8e448288ee26 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -56,6 +56,7 @@ static const struct pci_device_id igc_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_K2), board_base },
{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_K), board_base },
{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_LMVP), board_base },
+ { PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_LMVP), board_base },
{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I225_IT), board_base },
{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_LM), board_base },
{ PCI_VDEVICE(INTEL, IGC_DEV_ID_I226_V), board_base },
@@ -949,7 +950,7 @@ static int igc_set_mac(struct net_device *netdev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
/* set the correct pool for the new PF MAC address in entry 0 */
@@ -6377,7 +6378,7 @@ static int igc_probe(struct pci_dev *pdev,
dev_err(&pdev->dev, "NVM Read Error\n");
}
- memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
+ eth_hw_addr_set(netdev, hw->mac.addr);
if (!is_valid_ether_addr(netdev->dev_addr)) {
dev_err(&pdev->dev, "Invalid MAC Address\n");
diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
index 0f021909b430..30568e3544cd 100644
--- a/drivers/net/ethernet/intel/igc/igc_ptp.c
+++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
@@ -773,7 +773,7 @@ static bool igc_is_crosststamp_supported(struct igc_adapter *adapter)
static struct system_counterval_t igc_device_tstamp_to_system(u64 tstamp)
{
-#if IS_ENABLED(CONFIG_X86_TSC)
+#if IS_ENABLED(CONFIG_X86_TSC) && !defined(CONFIG_UML)
return convert_art_ns_to_tsc(tstamp);
#else
return (struct system_counterval_t) { };
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_hw.c b/drivers/net/ethernet/intel/ixgb/ixgb_hw.c
index a430871d1c27..c8d1e815ec6b 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_hw.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_hw.c
@@ -549,7 +549,7 @@ ixgb_mta_set(struct ixgb_hw *hw,
*****************************************************************************/
void
ixgb_rar_set(struct ixgb_hw *hw,
- u8 *addr,
+ const u8 *addr,
u32 index)
{
u32 rar_low, rar_high;
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_hw.h b/drivers/net/ethernet/intel/ixgb/ixgb_hw.h
index 6064583095da..70bcff5fb3db 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_hw.h
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_hw.h
@@ -740,7 +740,7 @@ bool ixgb_adapter_start(struct ixgb_hw *hw);
void ixgb_check_for_link(struct ixgb_hw *hw);
bool ixgb_check_for_bad_link(struct ixgb_hw *hw);
-void ixgb_rar_set(struct ixgb_hw *hw, u8 *addr, u32 index);
+void ixgb_rar_set(struct ixgb_hw *hw, const u8 *addr, u32 index);
/* Filters (multicast, vlan, receive) */
void ixgb_mc_addr_list_update(struct ixgb_hw *hw, u8 *mc_addr_list,
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index 1588376d4c67..99d481904ce6 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -362,6 +362,7 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct ixgb_adapter *adapter;
static int cards_found = 0;
int pci_using_dac;
+ u8 addr[ETH_ALEN];
int i;
int err;
@@ -461,7 +462,8 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_eeprom;
}
- ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr);
+ ixgb_get_ee_mac_addr(&adapter->hw, addr);
+ eth_hw_addr_set(netdev, addr);
if (!is_valid_ether_addr(netdev->dev_addr)) {
netif_err(adapter, probe, adapter->netdev, "Invalid MAC Address\n");
@@ -1030,7 +1032,7 @@ ixgb_set_mac(struct net_device *netdev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
ixgb_rar_set(&adapter->hw, addr->sa_data, 0);
@@ -2227,6 +2229,7 @@ static pci_ers_result_t ixgb_io_slot_reset(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct ixgb_adapter *adapter = netdev_priv(netdev);
+ u8 addr[ETH_ALEN];
if (pci_enable_device(pdev)) {
netif_err(adapter, probe, adapter->netdev,
@@ -2250,7 +2253,8 @@ static pci_ers_result_t ixgb_io_slot_reset(struct pci_dev *pdev)
"After reset, the EEPROM checksum is not valid\n");
return PCI_ERS_RESULT_DISCONNECT;
}
- ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr);
+ ixgb_get_ee_mac_addr(&adapter->hw, addr);
+ eth_hw_addr_set(netdev, addr);
memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
if (!is_valid_ether_addr(netdev->perm_addr)) {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index a604552fa634..4a69823e6abd 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -351,6 +351,7 @@ struct ixgbe_ring {
};
u16 rx_offset;
struct xdp_rxq_info xdp_rxq;
+ spinlock_t tx_lock; /* used in XDP mode */
struct xsk_buff_pool *xsk_pool;
u16 ring_idx; /* {rx,tx,xdp}_ring back reference idx */
u16 rx_buf_len;
@@ -375,11 +376,13 @@ enum ixgbe_ring_f_enum {
#define IXGBE_MAX_FCOE_INDICES 8
#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
-#define MAX_XDP_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
+#define IXGBE_MAX_XDP_QS (IXGBE_MAX_FDIR_INDICES + 1)
#define IXGBE_MAX_L2A_QUEUES 4
#define IXGBE_BAD_L2A_QUEUE 3
#define IXGBE_MAX_MACVLANS 63
+DECLARE_STATIC_KEY_FALSE(ixgbe_xdp_locking_key);
+
struct ixgbe_ring_feature {
u16 limit; /* upper limit on feature indices */
u16 indices; /* current value of indices */
@@ -629,7 +632,7 @@ struct ixgbe_adapter {
/* XDP */
int num_xdp_queues;
- struct ixgbe_ring *xdp_ring[MAX_XDP_QUEUES];
+ struct ixgbe_ring *xdp_ring[IXGBE_MAX_XDP_QS];
unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled rings */
/* TX */
@@ -772,6 +775,22 @@ struct ixgbe_adapter {
#endif /* CONFIG_IXGBE_IPSEC */
};
+static inline int ixgbe_determine_xdp_q_idx(int cpu)
+{
+ if (static_key_enabled(&ixgbe_xdp_locking_key))
+ return cpu % IXGBE_MAX_XDP_QS;
+ else
+ return cpu;
+}
+
+static inline
+struct ixgbe_ring *ixgbe_determine_xdp_ring(struct ixgbe_adapter *adapter)
+{
+ int index = ixgbe_determine_xdp_q_idx(smp_processor_id());
+
+ return adapter->xdp_ring[index];
+}
+
static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter)
{
switch (adapter->hw.mac.type) {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index beda8e0ef7d4..8362822316a9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -467,9 +467,8 @@ static int ixgbe_set_link_ksettings(struct net_device *netdev,
* this function does not support duplex forcing, but can
* limit the advertising of the adapter to the specified speed
*/
- if (!bitmap_subset(cmd->link_modes.advertising,
- cmd->link_modes.supported,
- __ETHTOOL_LINK_MODE_MASK_NBITS))
+ if (!linkmode_subset(cmd->link_modes.advertising,
+ cmd->link_modes.supported))
return -EINVAL;
/* only allow one speed at a time if no autoneg */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 0218f6c9b925..86b11164655e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -299,7 +299,10 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
static int ixgbe_xdp_queues(struct ixgbe_adapter *adapter)
{
- return adapter->xdp_prog ? nr_cpu_ids : 0;
+ int queues;
+
+ queues = min_t(int, IXGBE_MAX_XDP_QS, nr_cpu_ids);
+ return adapter->xdp_prog ? queues : 0;
}
#define IXGBE_RSS_64Q_MASK 0x3F
@@ -947,6 +950,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
ring->count = adapter->tx_ring_count;
ring->queue_index = xdp_idx;
set_ring_xdp(ring);
+ spin_lock_init(&ring->tx_lock);
/* assign ring to adapter */
WRITE_ONCE(adapter->xdp_ring[xdp_idx], ring);
@@ -1032,6 +1036,9 @@ static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx)
adapter->q_vector[v_idx] = NULL;
__netif_napi_del(&q_vector->napi);
+ if (static_key_enabled(&ixgbe_xdp_locking_key))
+ static_branch_dec(&ixgbe_xdp_locking_key);
+
/*
* after a call to __netif_napi_del() napi may still be used and
* ixgbe_get_stats64() might access the rings on this vector,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 13c4782b920a..0f9f022260d7 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -165,6 +165,9 @@ MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
MODULE_LICENSE("GPL v2");
+DEFINE_STATIC_KEY_FALSE(ixgbe_xdp_locking_key);
+EXPORT_SYMBOL(ixgbe_xdp_locking_key);
+
static struct workqueue_struct *ixgbe_wq;
static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev);
@@ -2197,6 +2200,7 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
{
int err, result = IXGBE_XDP_PASS;
struct bpf_prog *xdp_prog;
+ struct ixgbe_ring *ring;
struct xdp_frame *xdpf;
u32 act;
@@ -2215,7 +2219,12 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
xdpf = xdp_convert_buff_to_frame(xdp);
if (unlikely(!xdpf))
goto out_failure;
- result = ixgbe_xmit_xdp_ring(adapter, xdpf);
+ ring = ixgbe_determine_xdp_ring(adapter);
+ if (static_branch_unlikely(&ixgbe_xdp_locking_key))
+ spin_lock(&ring->tx_lock);
+ result = ixgbe_xmit_xdp_ring(ring, xdpf);
+ if (static_branch_unlikely(&ixgbe_xdp_locking_key))
+ spin_unlock(&ring->tx_lock);
if (result == IXGBE_XDP_CONSUMED)
goto out_failure;
break;
@@ -2422,13 +2431,9 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
xdp_do_flush_map();
if (xdp_xmit & IXGBE_XDP_TX) {
- struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
+ struct ixgbe_ring *ring = ixgbe_determine_xdp_ring(adapter);
- /* Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch.
- */
- wmb();
- writel(ring->next_to_use, ring->tail);
+ ixgbe_xdp_ring_update_tail_locked(ring);
}
u64_stats_update_begin(&rx_ring->syncp);
@@ -6320,7 +6325,7 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
if (ixgbe_init_rss_key(adapter))
return -ENOMEM;
- adapter->af_xdp_zc_qps = bitmap_zalloc(MAX_XDP_QUEUES, GFP_KERNEL);
+ adapter->af_xdp_zc_qps = bitmap_zalloc(IXGBE_MAX_XDP_QS, GFP_KERNEL);
if (!adapter->af_xdp_zc_qps)
return -ENOMEM;
@@ -8536,10 +8541,9 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
}
#endif
-int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
+int ixgbe_xmit_xdp_ring(struct ixgbe_ring *ring,
struct xdp_frame *xdpf)
{
- struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
struct ixgbe_tx_buffer *tx_buffer;
union ixgbe_adv_tx_desc *tx_desc;
u32 len, cmd_type;
@@ -8788,7 +8792,7 @@ static int ixgbe_set_mac(struct net_device *netdev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
ixgbe_mac_set_default_filter(adapter);
@@ -10131,8 +10135,13 @@ static int ixgbe_xdp_setup(struct net_device *dev, struct bpf_prog *prog)
return -EINVAL;
}
- if (nr_cpu_ids > MAX_XDP_QUEUES)
+ /* if the number of cpus is much larger than the maximum of queues,
+ * we should stop it and then return with ENOMEM like before.
+ */
+ if (nr_cpu_ids > IXGBE_MAX_XDP_QS * 2)
return -ENOMEM;
+ else if (nr_cpu_ids > IXGBE_MAX_XDP_QS)
+ static_branch_inc(&ixgbe_xdp_locking_key);
old_prog = xchg(&adapter->xdp_prog, prog);
need_reset = (!!prog != !!old_prog);
@@ -10199,6 +10208,15 @@ void ixgbe_xdp_ring_update_tail(struct ixgbe_ring *ring)
writel(ring->next_to_use, ring->tail);
}
+void ixgbe_xdp_ring_update_tail_locked(struct ixgbe_ring *ring)
+{
+ if (static_branch_unlikely(&ixgbe_xdp_locking_key))
+ spin_lock(&ring->tx_lock);
+ ixgbe_xdp_ring_update_tail(ring);
+ if (static_branch_unlikely(&ixgbe_xdp_locking_key))
+ spin_unlock(&ring->tx_lock);
+}
+
static int ixgbe_xdp_xmit(struct net_device *dev, int n,
struct xdp_frame **frames, u32 flags)
{
@@ -10216,18 +10234,21 @@ static int ixgbe_xdp_xmit(struct net_device *dev, int n,
/* During program transitions its possible adapter->xdp_prog is assigned
* but ring has not been configured yet. In this case simply abort xmit.
*/
- ring = adapter->xdp_prog ? adapter->xdp_ring[smp_processor_id()] : NULL;
+ ring = adapter->xdp_prog ? ixgbe_determine_xdp_ring(adapter) : NULL;
if (unlikely(!ring))
return -ENXIO;
if (unlikely(test_bit(__IXGBE_TX_DISABLED, &ring->state)))
return -ENXIO;
+ if (static_branch_unlikely(&ixgbe_xdp_locking_key))
+ spin_lock(&ring->tx_lock);
+
for (i = 0; i < n; i++) {
struct xdp_frame *xdpf = frames[i];
int err;
- err = ixgbe_xmit_xdp_ring(adapter, xdpf);
+ err = ixgbe_xmit_xdp_ring(ring, xdpf);
if (err != IXGBE_XDP_TX)
break;
nxmit++;
@@ -10236,6 +10257,9 @@ static int ixgbe_xdp_xmit(struct net_device *dev, int n,
if (unlikely(flags & XDP_XMIT_FLUSH))
ixgbe_xdp_ring_update_tail(ring);
+ if (static_branch_unlikely(&ixgbe_xdp_locking_key))
+ spin_unlock(&ring->tx_lock);
+
return nxmit;
}
@@ -10903,7 +10927,7 @@ skip_sriov:
eth_platform_get_mac_address(&adapter->pdev->dev,
adapter->hw.mac.perm_addr);
- memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
+ eth_hw_addr_set(netdev, hw->mac.perm_addr);
if (!is_valid_ether_addr(netdev->dev_addr)) {
e_dev_err("invalid MAC address\n");
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
index 2aeec78029bc..a82533f21d36 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_txrx_common.h
@@ -12,7 +12,7 @@
#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \
IXGBE_TXD_CMD_RS)
-int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
+int ixgbe_xmit_xdp_ring(struct ixgbe_ring *ring,
struct xdp_frame *xdpf);
bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring,
union ixgbe_adv_rx_desc *rx_desc,
@@ -23,6 +23,7 @@ void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
struct sk_buff *skb);
void ixgbe_xdp_ring_update_tail(struct ixgbe_ring *ring);
+void ixgbe_xdp_ring_update_tail_locked(struct ixgbe_ring *ring);
void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, u64 qmask);
void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
index b1d22e4d5ec9..db2bc58dfcfd 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_xsk.c
@@ -100,6 +100,7 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
{
int err, result = IXGBE_XDP_PASS;
struct bpf_prog *xdp_prog;
+ struct ixgbe_ring *ring;
struct xdp_frame *xdpf;
u32 act;
@@ -120,7 +121,12 @@ static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
xdpf = xdp_convert_buff_to_frame(xdp);
if (unlikely(!xdpf))
goto out_failure;
- result = ixgbe_xmit_xdp_ring(adapter, xdpf);
+ ring = ixgbe_determine_xdp_ring(adapter);
+ if (static_branch_unlikely(&ixgbe_xdp_locking_key))
+ spin_lock(&ring->tx_lock);
+ result = ixgbe_xmit_xdp_ring(ring, xdpf);
+ if (static_branch_unlikely(&ixgbe_xdp_locking_key))
+ spin_unlock(&ring->tx_lock);
if (result == IXGBE_XDP_CONSUMED)
goto out_failure;
break;
@@ -334,13 +340,9 @@ int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
xdp_do_flush_map();
if (xdp_xmit & IXGBE_XDP_TX) {
- struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
+ struct ixgbe_ring *ring = ixgbe_determine_xdp_ring(adapter);
- /* Force memory writes to complete before letting h/w
- * know there are new descriptors to fetch.
- */
- wmb();
- writel(ring->next_to_use, ring->tail);
+ ixgbe_xdp_ring_update_tail_locked(ring);
}
u64_stats_update_begin(&rx_ring->syncp);
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index c714e1ecd308..d81811ab4ec4 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -2540,7 +2540,7 @@ void ixgbevf_reset(struct ixgbevf_adapter *adapter)
}
if (is_valid_ether_addr(adapter->hw.mac.addr)) {
- ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
+ eth_hw_addr_set(netdev, adapter->hw.mac.addr);
ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
}
@@ -3054,7 +3054,7 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
else if (is_zero_ether_addr(adapter->hw.mac.addr))
dev_info(&pdev->dev,
"MAC address not assigned by administrator.\n");
- ether_addr_copy(netdev->dev_addr, hw->mac.addr);
+ eth_hw_addr_set(netdev, hw->mac.addr);
}
if (!is_valid_ether_addr(netdev->dev_addr)) {
@@ -4231,7 +4231,7 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p)
ether_addr_copy(hw->mac.addr, addr->sa_data);
ether_addr_copy(hw->mac.perm_addr, addr->sa_data);
- ether_addr_copy(netdev->dev_addr, addr->sa_data);
+ eth_hw_addr_set(netdev, addr->sa_data);
return 0;
}
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index 5fc347abab3c..d459f5c8e98f 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -66,9 +66,9 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
u32 timeout = IXGBE_VF_INIT_TIMEOUT;
- s32 ret_val = IXGBE_ERR_INVALID_MAC_ADDR;
u32 msgbuf[IXGBE_VF_PERMADDR_MSG_LEN];
u8 *addr = (u8 *)(&msgbuf[1]);
+ s32 ret_val;
/* Call adapter stop to disable tx/rx and clear interrupts */
hw->mac.ops.stop_adapter(hw);
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 1bdc4f23e1e5..439674fc9765 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -313,7 +313,7 @@ jme_load_macaddr(struct net_device *netdev)
val = jread32(jme, JME_RXUMA_HI);
macaddr[4] = (val >> 0) & 0xFF;
macaddr[5] = (val >> 8) & 0xFF;
- memcpy(netdev->dev_addr, macaddr, ETH_ALEN);
+ eth_hw_addr_set(netdev, macaddr);
spin_unlock_bh(&jme->macaddr_lock);
}
@@ -2254,7 +2254,7 @@ jme_set_macaddr(struct net_device *netdev, void *p)
return -EBUSY;
spin_lock_bh(&jme->macaddr_lock);
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
jme_set_unicastaddr(netdev);
spin_unlock_bh(&jme->macaddr_lock);
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index 3e9f324f1061..df9a8eefa007 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -1297,8 +1297,8 @@ static int korina_probe(struct platform_device *pdev)
lp = netdev_priv(dev);
if (mac_addr)
- ether_addr_copy(dev->dev_addr, mac_addr);
- else if (of_get_mac_address(pdev->dev.of_node, dev->dev_addr) < 0)
+ eth_hw_addr_set(dev, mac_addr);
+ else if (of_get_ethdev_address(pdev->dev.of_node, dev) < 0)
eth_hw_addr_random(dev);
clk = devm_clk_get_optional(&pdev->dev, "mdioclk");
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index 62f8c5212182..2258e3f19161 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -96,6 +96,9 @@ struct ltq_etop_priv {
struct ltq_etop_chan ch[MAX_DMA_CHAN];
int tx_free[MAX_DMA_CHAN >> 1];
+ int tx_burst_len;
+ int rx_burst_len;
+
spinlock_t lock;
};
@@ -259,7 +262,7 @@ ltq_etop_hw_init(struct net_device *dev)
/* enable crc generation */
ltq_etop_w32(PPE32_CGEN, LQ_PPE32_ENET_MAC_CFG);
- ltq_dma_init_port(DMA_PORT_ETOP);
+ ltq_dma_init_port(DMA_PORT_ETOP, priv->tx_burst_len, rx_burst_len);
for (i = 0; i < MAX_DMA_CHAN; i++) {
int irq = LTQ_DMA_CH0_INT + i;
@@ -472,8 +475,8 @@ ltq_etop_tx(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_BUSY;
}
- /* dma needs to start on a 16 byte aligned address */
- byte_offset = CPHYSADDR(skb->data) % 16;
+ /* dma needs to start on a burst length value aligned address */
+ byte_offset = CPHYSADDR(skb->data) % (priv->tx_burst_len * 4);
ch->skb[ch->dma.desc] = skb;
netif_trans_update(dev);
@@ -667,6 +670,18 @@ ltq_etop_probe(struct platform_device *pdev)
spin_lock_init(&priv->lock);
SET_NETDEV_DEV(dev, &pdev->dev);
+ err = device_property_read_u32(&pdev->dev, "lantiq,tx-burst-length", &priv->tx_burst_len);
+ if (err < 0) {
+ dev_err(&pdev->dev, "unable to read tx-burst-length property\n");
+ return err;
+ }
+
+ err = device_property_read_u32(&pdev->dev, "lantiq,rx-burst-length", &priv->rx_burst_len);
+ if (err < 0) {
+ dev_err(&pdev->dev, "unable to read rx-burst-length property\n");
+ return err;
+ }
+
for (i = 0; i < MAX_DMA_CHAN; i++) {
if (IS_TX(i))
netif_napi_add(dev, &priv->ch[i].napi,
diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c
index fb78f17d734f..0da09ea81980 100644
--- a/drivers/net/ethernet/lantiq_xrx200.c
+++ b/drivers/net/ethernet/lantiq_xrx200.c
@@ -14,15 +14,18 @@
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/if_vlan.h>
+
#include <linux/of_net.h>
#include <linux/of_platform.h>
#include <xway_dma.h>
/* DMA */
-#define XRX200_DMA_DATA_LEN 0x600
+#define XRX200_DMA_DATA_LEN (SZ_64K - 1)
#define XRX200_DMA_RX 0
#define XRX200_DMA_TX 1
+#define XRX200_DMA_BURST_LEN 8
/* cpu port mac */
#define PMAC_RX_IPG 0x0024
@@ -106,7 +109,8 @@ static void xrx200_flush_dma(struct xrx200_chan *ch)
break;
desc->ctl = LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) |
- XRX200_DMA_DATA_LEN;
+ (ch->priv->net_dev->mtu + VLAN_ETH_HLEN +
+ ETH_FCS_LEN);
ch->dma.desc++;
ch->dma.desc %= LTQ_DESC_NUM;
}
@@ -154,19 +158,20 @@ static int xrx200_close(struct net_device *net_dev)
static int xrx200_alloc_skb(struct xrx200_chan *ch)
{
+ int len = ch->priv->net_dev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
struct sk_buff *skb = ch->skb[ch->dma.desc];
dma_addr_t mapping;
int ret = 0;
ch->skb[ch->dma.desc] = netdev_alloc_skb_ip_align(ch->priv->net_dev,
- XRX200_DMA_DATA_LEN);
+ len);
if (!ch->skb[ch->dma.desc]) {
ret = -ENOMEM;
goto skip;
}
mapping = dma_map_single(ch->priv->dev, ch->skb[ch->dma.desc]->data,
- XRX200_DMA_DATA_LEN, DMA_FROM_DEVICE);
+ len, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(ch->priv->dev, mapping))) {
dev_kfree_skb_any(ch->skb[ch->dma.desc]);
ch->skb[ch->dma.desc] = skb;
@@ -179,8 +184,7 @@ static int xrx200_alloc_skb(struct xrx200_chan *ch)
wmb();
skip:
ch->dma.desc_base[ch->dma.desc].ctl =
- LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) |
- XRX200_DMA_DATA_LEN;
+ LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) | len;
return ret;
}
@@ -316,8 +320,8 @@ static netdev_tx_t xrx200_start_xmit(struct sk_buff *skb,
if (unlikely(dma_mapping_error(priv->dev, mapping)))
goto err_drop;
- /* dma needs to start on a 16 byte aligned address */
- byte_offset = mapping % 16;
+ /* dma needs to start on a burst length value aligned address */
+ byte_offset = mapping % (XRX200_DMA_BURST_LEN * 4);
desc->addr = mapping - byte_offset;
/* Make sure the address is written before we give it to HW */
@@ -340,10 +344,57 @@ err_drop:
return NETDEV_TX_OK;
}
+static int
+xrx200_change_mtu(struct net_device *net_dev, int new_mtu)
+{
+ struct xrx200_priv *priv = netdev_priv(net_dev);
+ struct xrx200_chan *ch_rx = &priv->chan_rx;
+ int old_mtu = net_dev->mtu;
+ bool running = false;
+ struct sk_buff *skb;
+ int curr_desc;
+ int ret = 0;
+
+ net_dev->mtu = new_mtu;
+
+ if (new_mtu <= old_mtu)
+ return ret;
+
+ running = netif_running(net_dev);
+ if (running) {
+ napi_disable(&ch_rx->napi);
+ ltq_dma_close(&ch_rx->dma);
+ }
+
+ xrx200_poll_rx(&ch_rx->napi, LTQ_DESC_NUM);
+ curr_desc = ch_rx->dma.desc;
+
+ for (ch_rx->dma.desc = 0; ch_rx->dma.desc < LTQ_DESC_NUM;
+ ch_rx->dma.desc++) {
+ skb = ch_rx->skb[ch_rx->dma.desc];
+ ret = xrx200_alloc_skb(ch_rx);
+ if (ret) {
+ net_dev->mtu = old_mtu;
+ break;
+ }
+ dev_kfree_skb_any(skb);
+ }
+
+ ch_rx->dma.desc = curr_desc;
+ if (running) {
+ napi_enable(&ch_rx->napi);
+ ltq_dma_open(&ch_rx->dma);
+ ltq_dma_enable_irq(&ch_rx->dma);
+ }
+
+ return ret;
+}
+
static const struct net_device_ops xrx200_netdev_ops = {
.ndo_open = xrx200_open,
.ndo_stop = xrx200_close,
.ndo_start_xmit = xrx200_start_xmit,
+ .ndo_change_mtu = xrx200_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
@@ -369,7 +420,8 @@ static int xrx200_dma_init(struct xrx200_priv *priv)
int ret = 0;
int i;
- ltq_dma_init_port(DMA_PORT_ETOP);
+ ltq_dma_init_port(DMA_PORT_ETOP, XRX200_DMA_BURST_LEN,
+ XRX200_DMA_BURST_LEN);
ch_rx->dma.nr = XRX200_DMA_RX;
ch_rx->dma.dev = priv->dev;
@@ -453,7 +505,7 @@ static int xrx200_probe(struct platform_device *pdev)
net_dev->netdev_ops = &xrx200_netdev_ops;
SET_NETDEV_DEV(net_dev, dev);
net_dev->min_mtu = ETH_ZLEN;
- net_dev->max_mtu = XRX200_DMA_DATA_LEN;
+ net_dev->max_mtu = XRX200_DMA_DATA_LEN - VLAN_ETH_HLEN - ETH_FCS_LEN;
/* load the memory ranges */
priv->pmac_reg = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
@@ -474,7 +526,7 @@ static int xrx200_probe(struct platform_device *pdev)
return PTR_ERR(priv->clk);
}
- err = of_get_mac_address(np, net_dev->dev_addr);
+ err = of_get_ethdev_address(np, net_dev);
if (err)
eth_hw_addr_random(net_dev);
diff --git a/drivers/net/ethernet/litex/Kconfig b/drivers/net/ethernet/litex/Kconfig
index 63bf01d28f0c..f99adbf26ab4 100644
--- a/drivers/net/ethernet/litex/Kconfig
+++ b/drivers/net/ethernet/litex/Kconfig
@@ -17,7 +17,7 @@ if NET_VENDOR_LITEX
config LITEX_LITEETH
tristate "LiteX Ethernet support"
- depends on OF_NET
+ depends on OF
help
If you wish to compile a kernel for hardware with a LiteX LiteEth
device then you should answer Y to this.
diff --git a/drivers/net/ethernet/litex/litex_liteeth.c b/drivers/net/ethernet/litex/litex_liteeth.c
index a9bdbf0dcfe1..3d9385a4989b 100644
--- a/drivers/net/ethernet/litex/litex_liteeth.c
+++ b/drivers/net/ethernet/litex/litex_liteeth.c
@@ -266,7 +266,7 @@ static int liteeth_probe(struct platform_device *pdev)
priv->tx_base = buf_base + priv->num_rx_slots * priv->slot_size;
priv->tx_slot = 0;
- err = of_get_mac_address(pdev->dev.of_node, netdev->dev_addr);
+ err = of_get_ethdev_address(pdev->dev.of_node, netdev);
if (err)
eth_hw_addr_random(netdev);
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 28d5ad296646..bb14fa2241a3 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1770,7 +1770,7 @@ static void uc_addr_get(struct mv643xx_eth_private *mp, unsigned char *addr)
addr[5] = mac_l & 0xff;
}
-static void uc_addr_set(struct mv643xx_eth_private *mp, unsigned char *addr)
+static void uc_addr_set(struct mv643xx_eth_private *mp, const u8 *addr)
{
wrlp(mp, MAC_ADDR_HIGH,
(addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3]);
@@ -1919,7 +1919,7 @@ static int mv643xx_eth_set_mac_address(struct net_device *dev, void *addr)
if (!is_valid_ether_addr(sa->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
+ eth_hw_addr_set(dev, sa->sa_data);
netif_addr_lock_bh(dev);
mv643xx_eth_program_unicast_filter(dev);
@@ -2925,10 +2925,14 @@ static void set_params(struct mv643xx_eth_private *mp,
struct net_device *dev = mp->dev;
unsigned int tx_ring_size;
- if (is_valid_ether_addr(pd->mac_addr))
- memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
- else
- uc_addr_get(mp, dev->dev_addr);
+ if (is_valid_ether_addr(pd->mac_addr)) {
+ eth_hw_addr_set(dev, pd->mac_addr);
+ } else {
+ u8 addr[ETH_ALEN];
+
+ uc_addr_get(mp, addr);
+ eth_hw_addr_set(dev, addr);
+ }
mp->rx_ring_size = DEFAULT_RX_QUEUE_SIZE;
if (pd->rx_queue_size)
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 9d460a270601..5a7bdca22a63 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -1623,8 +1623,8 @@ static void mvneta_set_ucast_addr(struct mvneta_port *pp, u8 last_nibble,
}
/* Set mac address */
-static void mvneta_mac_addr_set(struct mvneta_port *pp, unsigned char *addr,
- int queue)
+static void mvneta_mac_addr_set(struct mvneta_port *pp,
+ const unsigned char *addr, int queue)
{
unsigned int mac_h;
unsigned int mac_l;
@@ -1914,7 +1914,7 @@ static int mvneta_rx_refill(struct mvneta_port *pp,
}
/* Handle tx checksum */
-static u32 mvneta_skb_tx_csum(struct mvneta_port *pp, struct sk_buff *skb)
+static u32 mvneta_skb_tx_csum(struct sk_buff *skb)
{
if (skb->ip_summed == CHECKSUM_PARTIAL) {
int ip_hdr_len = 0;
@@ -2595,8 +2595,7 @@ err_drop_frame:
}
static inline void
-mvneta_tso_put_hdr(struct sk_buff *skb,
- struct mvneta_port *pp, struct mvneta_tx_queue *txq)
+mvneta_tso_put_hdr(struct sk_buff *skb, struct mvneta_tx_queue *txq)
{
int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
struct mvneta_tx_buf *buf = &txq->buf[txq->txq_put_index];
@@ -2604,7 +2603,7 @@ mvneta_tso_put_hdr(struct sk_buff *skb,
tx_desc = mvneta_txq_next_desc_get(txq);
tx_desc->data_size = hdr_len;
- tx_desc->command = mvneta_skb_tx_csum(pp, skb);
+ tx_desc->command = mvneta_skb_tx_csum(skb);
tx_desc->command |= MVNETA_TXD_F_DESC;
tx_desc->buf_phys_addr = txq->tso_hdrs_phys +
txq->txq_put_index * TSO_HEADER_SIZE;
@@ -2681,7 +2680,7 @@ static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev,
hdr = txq->tso_hdrs + txq->txq_put_index * TSO_HEADER_SIZE;
tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
- mvneta_tso_put_hdr(skb, pp, txq);
+ mvneta_tso_put_hdr(skb, txq);
while (data_left > 0) {
int size;
@@ -2799,7 +2798,7 @@ static netdev_tx_t mvneta_tx(struct sk_buff *skb, struct net_device *dev)
/* Get a descriptor for the first part of the packet */
tx_desc = mvneta_txq_next_desc_get(txq);
- tx_cmd = mvneta_skb_tx_csum(pp, skb);
+ tx_cmd = mvneta_skb_tx_csum(skb);
tx_desc->data_size = skb_headlen(skb);
@@ -3824,8 +3823,6 @@ static void mvneta_validate(struct phylink_config *config,
unsigned long *supported,
struct phylink_link_state *state)
{
- struct net_device *ndev = to_net_dev(config->dev);
- struct mvneta_port *pp = netdev_priv(ndev);
__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
/* We only support QSGMII, SGMII, 802.3z and RGMII modes.
@@ -3833,16 +3830,9 @@ static void mvneta_validate(struct phylink_config *config,
* "Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ...
* When <PortType> = 1 (1000BASE-X) this field must be set to 1."
*/
- if (phy_interface_mode_is_8023z(state->interface)) {
- if (!phylink_test(state->advertising, Autoneg)) {
- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
- return;
- }
- } else if (state->interface != PHY_INTERFACE_MODE_NA &&
- state->interface != PHY_INTERFACE_MODE_QSGMII &&
- state->interface != PHY_INTERFACE_MODE_SGMII &&
- !phy_interface_mode_is_rgmii(state->interface)) {
- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ if (phy_interface_mode_is_8023z(state->interface) &&
+ !phylink_test(state->advertising, Autoneg)) {
+ linkmode_zero(supported);
return;
}
@@ -3854,11 +3844,12 @@ static void mvneta_validate(struct phylink_config *config,
phylink_set(mask, Pause);
/* Half-duplex at speeds higher than 100Mbit is unsupported */
- if (pp->comphy || state->interface != PHY_INTERFACE_MODE_2500BASEX) {
+ if (state->interface != PHY_INTERFACE_MODE_2500BASEX) {
phylink_set(mask, 1000baseT_Full);
phylink_set(mask, 1000baseX_Full);
}
- if (pp->comphy || state->interface == PHY_INTERFACE_MODE_2500BASEX) {
+
+ if (state->interface == PHY_INTERFACE_MODE_2500BASEX) {
phylink_set(mask, 2500baseT_Full);
phylink_set(mask, 2500baseX_Full);
}
@@ -3871,15 +3862,8 @@ static void mvneta_validate(struct phylink_config *config,
phylink_set(mask, 100baseT_Full);
}
- bitmap_and(supported, supported, mask,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
- bitmap_and(state->advertising, state->advertising, mask,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
-
- /* We can only operate at 2500BaseX or 1000BaseX. If requested
- * to advertise both, only report advertising at 2500BaseX.
- */
- phylink_helper_basex_speed(state);
+ linkmode_and(supported, supported, mask);
+ linkmode_and(state->advertising, state->advertising, mask);
}
static void mvneta_mac_pcs_get_state(struct phylink_config *config,
@@ -5182,6 +5166,31 @@ static int mvneta_probe(struct platform_device *pdev)
pp->phylink_config.dev = &dev->dev;
pp->phylink_config.type = PHYLINK_NETDEV;
+ phy_interface_set_rgmii(pp->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_QSGMII,
+ pp->phylink_config.supported_interfaces);
+ if (comphy) {
+ /* If a COMPHY is present, we can support any of the serdes
+ * modes and switch between them.
+ */
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ pp->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ pp->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ pp->phylink_config.supported_interfaces);
+ } else if (phy_mode == PHY_INTERFACE_MODE_2500BASEX) {
+ /* No COMPHY, with only 2500BASE-X mode supported */
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ pp->phylink_config.supported_interfaces);
+ } else if (phy_mode == PHY_INTERFACE_MODE_1000BASEX ||
+ phy_mode == PHY_INTERFACE_MODE_SGMII) {
+ /* No COMPHY, we can switch between 1000BASE-X and SGMII */
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ pp->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ pp->phylink_config.supported_interfaces);
+ }
phylink = phylink_create(&pp->phylink_config, pdev->dev.fwnode,
phy_mode, &mvneta_phylink_ops);
@@ -5242,14 +5251,14 @@ static int mvneta_probe(struct platform_device *pdev)
goto err_free_ports;
}
- err = of_get_mac_address(dn, dev->dev_addr);
+ err = of_get_ethdev_address(dn, dev);
if (!err) {
mac_from = "device tree";
} else {
mvneta_get_mac_addr(pp, hw_mac_addr);
if (is_valid_ether_addr(hw_mac_addr)) {
mac_from = "hardware";
- memcpy(dev->dev_addr, hw_mac_addr, ETH_ALEN);
+ eth_hw_addr_set(dev, hw_mac_addr);
} else {
mac_from = "random";
eth_hw_addr_random(dev);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index d5c92e43f89e..587def69a6f7 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -6081,9 +6081,9 @@ static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv,
char hw_mac_addr[ETH_ALEN] = {0};
char fw_mac_addr[ETH_ALEN];
- if (fwnode_get_mac_address(fwnode, fw_mac_addr, ETH_ALEN)) {
+ if (!fwnode_get_mac_address(fwnode, fw_mac_addr)) {
*mac_from = "firmware node";
- ether_addr_copy(dev->dev_addr, fw_mac_addr);
+ eth_hw_addr_set(dev, fw_mac_addr);
return;
}
@@ -6091,7 +6091,7 @@ static void mvpp2_port_copy_mac_addr(struct net_device *dev, struct mvpp2 *priv,
mvpp21_get_mac_address(port, hw_mac_addr);
if (is_valid_ether_addr(hw_mac_addr)) {
*mac_from = "hardware";
- ether_addr_copy(dev->dev_addr, hw_mac_addr);
+ eth_hw_addr_set(dev, hw_mac_addr);
return;
}
}
@@ -6261,32 +6261,13 @@ static void mvpp2_phylink_validate(struct phylink_config *config,
struct mvpp2_port *port = mvpp2_phylink_to_port(config);
__ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
- /* Invalid combinations */
- switch (state->interface) {
- case PHY_INTERFACE_MODE_10GBASER:
- case PHY_INTERFACE_MODE_XAUI:
- if (!mvpp2_port_supports_xlg(port))
- goto empty_set;
- break;
- case PHY_INTERFACE_MODE_RGMII:
- case PHY_INTERFACE_MODE_RGMII_ID:
- case PHY_INTERFACE_MODE_RGMII_RXID:
- case PHY_INTERFACE_MODE_RGMII_TXID:
- if (!mvpp2_port_supports_rgmii(port))
- goto empty_set;
- break;
- case PHY_INTERFACE_MODE_1000BASEX:
- case PHY_INTERFACE_MODE_2500BASEX:
- /* When in 802.3z mode, we must have AN enabled:
- * Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ...
- * When <PortType> = 1 (1000BASE-X) this field must be set to 1.
- */
- if (!phylink_test(state->advertising, Autoneg))
- goto empty_set;
- break;
- default:
- break;
- }
+ /* When in 802.3z mode, we must have AN enabled:
+ * Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ...
+ * When <PortType> = 1 (1000BASE-X) this field must be set to 1.
+ */
+ if (phy_interface_mode_is_8023z(state->interface) &&
+ !phylink_test(state->advertising, Autoneg))
+ goto empty_set;
phylink_set(mask, Autoneg);
phylink_set_port_modes(mask);
@@ -6299,19 +6280,12 @@ static void mvpp2_phylink_validate(struct phylink_config *config,
switch (state->interface) {
case PHY_INTERFACE_MODE_10GBASER:
case PHY_INTERFACE_MODE_XAUI:
- case PHY_INTERFACE_MODE_NA:
if (mvpp2_port_supports_xlg(port)) {
- phylink_set(mask, 10000baseT_Full);
- phylink_set(mask, 10000baseCR_Full);
- phylink_set(mask, 10000baseSR_Full);
- phylink_set(mask, 10000baseLR_Full);
- phylink_set(mask, 10000baseLRM_Full);
- phylink_set(mask, 10000baseER_Full);
+ phylink_set_10g_modes(mask);
phylink_set(mask, 10000baseKR_Full);
}
- if (state->interface != PHY_INTERFACE_MODE_NA)
- break;
- fallthrough;
+ break;
+
case PHY_INTERFACE_MODE_RGMII:
case PHY_INTERFACE_MODE_RGMII_ID:
case PHY_INTERFACE_MODE_RGMII_RXID:
@@ -6323,35 +6297,28 @@ static void mvpp2_phylink_validate(struct phylink_config *config,
phylink_set(mask, 100baseT_Full);
phylink_set(mask, 1000baseT_Full);
phylink_set(mask, 1000baseX_Full);
- if (state->interface != PHY_INTERFACE_MODE_NA)
- break;
- fallthrough;
+ break;
+
case PHY_INTERFACE_MODE_1000BASEX:
+ phylink_set(mask, 1000baseT_Full);
+ phylink_set(mask, 1000baseX_Full);
+ break;
+
case PHY_INTERFACE_MODE_2500BASEX:
- if (port->comphy ||
- state->interface != PHY_INTERFACE_MODE_2500BASEX) {
- phylink_set(mask, 1000baseT_Full);
- phylink_set(mask, 1000baseX_Full);
- }
- if (port->comphy ||
- state->interface == PHY_INTERFACE_MODE_2500BASEX) {
- phylink_set(mask, 2500baseT_Full);
- phylink_set(mask, 2500baseX_Full);
- }
+ phylink_set(mask, 2500baseT_Full);
+ phylink_set(mask, 2500baseX_Full);
break;
+
default:
goto empty_set;
}
- bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
- bitmap_and(state->advertising, state->advertising, mask,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
-
- phylink_helper_basex_speed(state);
+ linkmode_and(supported, supported, mask);
+ linkmode_and(state->advertising, state->advertising, mask);
return;
empty_set:
- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_zero(supported);
}
static void mvpp2_xlg_config(struct mvpp2_port *port, unsigned int mode,
@@ -6943,6 +6910,40 @@ static int mvpp2_port_probe(struct platform_device *pdev,
port->phylink_config.dev = &dev->dev;
port->phylink_config.type = PHYLINK_NETDEV;
+ if (mvpp2_port_supports_xlg(port)) {
+ __set_bit(PHY_INTERFACE_MODE_10GBASER,
+ port->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_XAUI,
+ port->phylink_config.supported_interfaces);
+ }
+
+ if (mvpp2_port_supports_rgmii(port))
+ phy_interface_set_rgmii(port->phylink_config.supported_interfaces);
+
+ if (comphy) {
+ /* If a COMPHY is present, we can support any of the
+ * serdes modes and switch between them.
+ */
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ port->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ port->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ port->phylink_config.supported_interfaces);
+ } else if (phy_mode == PHY_INTERFACE_MODE_2500BASEX) {
+ /* No COMPHY, with only 2500BASE-X mode supported */
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ port->phylink_config.supported_interfaces);
+ } else if (phy_mode == PHY_INTERFACE_MODE_1000BASEX ||
+ phy_mode == PHY_INTERFACE_MODE_SGMII) {
+ /* No COMPHY, we can switch between 1000BASE-X and SGMII
+ */
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ port->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ port->phylink_config.supported_interfaces);
+ }
+
phylink = phylink_create(&port->phylink_config, port_fwnode,
phy_mode, &mvpp2_phylink_ops);
if (IS_ERR(phylink)) {
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
index 93575800ca92..75ba57bd1d46 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
@@ -2347,7 +2347,7 @@ int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da)
return err;
/* Set addr in the device */
- ether_addr_copy(dev->dev_addr, da);
+ eth_hw_addr_set(dev, da);
return 0;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index 34a089b71e55..186d00a9ab35 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -838,9 +838,6 @@ void cgx_lmac_ptp_config(void *cgxd, int lmac_id, bool enable)
if (!cgx)
return;
- if (is_dev_rpm(cgx))
- return;
-
if (enable) {
/* Enable inbound PTP timestamping */
cfg = cgx_read(cgx, lmac_id, CGXX_GMP_GMI_RXX_FRM_CTL);
@@ -1522,7 +1519,6 @@ static int cgx_lmac_exit(struct cgx *cgx)
int i;
if (cgx->cgx_cmd_workq) {
- flush_workqueue(cgx->cgx_cmd_workq);
destroy_workqueue(cgx->cgx_cmd_workq);
cgx->cgx_cmd_workq = NULL;
}
@@ -1545,9 +1541,11 @@ static int cgx_lmac_exit(struct cgx *cgx)
static void cgx_populate_features(struct cgx *cgx)
{
if (is_dev_rpm(cgx))
- cgx->hw_features = (RVU_MAC_RPM | RVU_LMAC_FEAT_FC);
+ cgx->hw_features = (RVU_LMAC_FEAT_DMACF | RVU_MAC_RPM |
+ RVU_LMAC_FEAT_FC | RVU_LMAC_FEAT_PTP);
else
- cgx->hw_features = (RVU_LMAC_FEAT_FC | RVU_LMAC_FEAT_PTP);
+ cgx->hw_features = (RVU_LMAC_FEAT_FC | RVU_LMAC_FEAT_HIGIG2 |
+ RVU_LMAC_FEAT_PTP | RVU_LMAC_FEAT_DMACF);
}
static struct mac_ops cgx_mac_ops = {
@@ -1571,6 +1569,7 @@ static struct mac_ops cgx_mac_ops = {
.mac_get_pause_frm_status = cgx_lmac_get_pause_frm_status,
.mac_enadis_pause_frm = cgx_lmac_enadis_pause_frm,
.mac_pause_frm_config = cgx_lmac_pause_frm_config,
+ .mac_enadis_ptp_config = cgx_lmac_ptp_config,
};
static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h
index d9bea13f15b8..8931864ee110 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h
@@ -191,6 +191,7 @@ enum nix_scheduler {
#define NIX_CHAN_SDP_CH_START (0x700ull)
#define NIX_CHAN_SDP_CHX(a) (NIX_CHAN_SDP_CH_START + (a))
#define NIX_CHAN_SDP_NUM_CHANS 256
+#define NIX_CHAN_CPT_CH_START (0x800ull)
/* The mask is to extract lower 10-bits of channel number
* which CPT will pass to X2P.
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
index c38306b3384a..fc6e7423cbd8 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
@@ -102,6 +102,11 @@ struct mac_ops {
void (*mac_pause_frm_config)(void *cgxd,
int lmac_id,
bool enable);
+
+ /* Enable/Disable Inbound PTP */
+ void (*mac_enadis_ptp_config)(void *cgxd,
+ int lmac_id,
+ bool enable);
};
struct cgx {
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index 154877706a0e..4e79e918a161 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -84,7 +84,7 @@ struct mbox_msghdr {
#define OTX2_MBOX_REQ_SIG (0xdead)
#define OTX2_MBOX_RSP_SIG (0xbeef)
u16 sig; /* Signature, for validating corrupted msgs */
-#define OTX2_MBOX_VERSION (0x0009)
+#define OTX2_MBOX_VERSION (0x000a)
u16 ver; /* Version of msg's structure for this ID */
u16 next_msgoff; /* Offset of next msg within mailbox region */
int rc; /* Msg process'ed response code */
@@ -154,23 +154,23 @@ M(CGX_PTP_RX_ENABLE, 0x20C, cgx_ptp_rx_enable, msg_req, msg_rsp) \
M(CGX_PTP_RX_DISABLE, 0x20D, cgx_ptp_rx_disable, msg_req, msg_rsp) \
M(CGX_CFG_PAUSE_FRM, 0x20E, cgx_cfg_pause_frm, cgx_pause_frm_cfg, \
cgx_pause_frm_cfg) \
-M(CGX_FEC_SET, 0x210, cgx_set_fec_param, fec_mode, fec_mode) \
-M(CGX_FEC_STATS, 0x211, cgx_fec_stats, msg_req, cgx_fec_stats_rsp) \
-M(CGX_GET_PHY_FEC_STATS, 0x212, cgx_get_phy_fec_stats, msg_req, msg_rsp) \
-M(CGX_FW_DATA_GET, 0x213, cgx_get_aux_link_info, msg_req, cgx_fw_data) \
-M(CGX_SET_LINK_MODE, 0x214, cgx_set_link_mode, cgx_set_link_mode_req,\
- cgx_set_link_mode_rsp) \
-M(CGX_FEATURES_GET, 0x215, cgx_features_get, msg_req, \
- cgx_features_info_msg) \
-M(RPM_STATS, 0x216, rpm_stats, msg_req, rpm_stats_rsp) \
-M(CGX_MAC_ADDR_ADD, 0x217, cgx_mac_addr_add, cgx_mac_addr_add_req, \
- cgx_mac_addr_add_rsp) \
-M(CGX_MAC_ADDR_DEL, 0x218, cgx_mac_addr_del, cgx_mac_addr_del_req, \
+M(CGX_FW_DATA_GET, 0x20F, cgx_get_aux_link_info, msg_req, cgx_fw_data) \
+M(CGX_FEC_SET, 0x210, cgx_set_fec_param, fec_mode, fec_mode) \
+M(CGX_MAC_ADDR_ADD, 0x211, cgx_mac_addr_add, cgx_mac_addr_add_req, \
+ cgx_mac_addr_add_rsp) \
+M(CGX_MAC_ADDR_DEL, 0x212, cgx_mac_addr_del, cgx_mac_addr_del_req, \
msg_rsp) \
-M(CGX_MAC_MAX_ENTRIES_GET, 0x219, cgx_mac_max_entries_get, msg_req, \
+M(CGX_MAC_MAX_ENTRIES_GET, 0x213, cgx_mac_max_entries_get, msg_req, \
cgx_max_dmac_entries_get_rsp) \
-M(CGX_MAC_ADDR_RESET, 0x21A, cgx_mac_addr_reset, msg_req, msg_rsp) \
-M(CGX_MAC_ADDR_UPDATE, 0x21B, cgx_mac_addr_update, cgx_mac_addr_update_req, \
+M(CGX_FEC_STATS, 0x217, cgx_fec_stats, msg_req, cgx_fec_stats_rsp) \
+M(CGX_SET_LINK_MODE, 0x218, cgx_set_link_mode, cgx_set_link_mode_req,\
+ cgx_set_link_mode_rsp) \
+M(CGX_GET_PHY_FEC_STATS, 0x219, cgx_get_phy_fec_stats, msg_req, msg_rsp) \
+M(CGX_FEATURES_GET, 0x21B, cgx_features_get, msg_req, \
+ cgx_features_info_msg) \
+M(RPM_STATS, 0x21C, rpm_stats, msg_req, rpm_stats_rsp) \
+M(CGX_MAC_ADDR_RESET, 0x21D, cgx_mac_addr_reset, msg_req, msg_rsp) \
+M(CGX_MAC_ADDR_UPDATE, 0x21E, cgx_mac_addr_update, cgx_mac_addr_update_req, \
msg_rsp) \
/* NPA mbox IDs (range 0x400 - 0x5FF) */ \
M(NPA_LF_ALLOC, 0x400, npa_lf_alloc, \
@@ -186,9 +186,12 @@ M(CPT_LF_ALLOC, 0xA00, cpt_lf_alloc, cpt_lf_alloc_req_msg, \
M(CPT_LF_FREE, 0xA01, cpt_lf_free, msg_req, msg_rsp) \
M(CPT_RD_WR_REGISTER, 0xA02, cpt_rd_wr_register, cpt_rd_wr_reg_msg, \
cpt_rd_wr_reg_msg) \
+M(CPT_INLINE_IPSEC_CFG, 0xA04, cpt_inline_ipsec_cfg, \
+ cpt_inline_ipsec_cfg_msg, msg_rsp) \
M(CPT_STATS, 0xA05, cpt_sts, cpt_sts_req, cpt_sts_rsp) \
M(CPT_RXC_TIME_CFG, 0xA06, cpt_rxc_time_cfg, cpt_rxc_time_cfg_req, \
msg_rsp) \
+M(CPT_CTX_CACHE_SYNC, 0xA07, cpt_ctx_cache_sync, msg_req, msg_rsp) \
/* SDP mbox IDs (range 0x1000 - 0x11FF) */ \
M(SET_SDP_CHAN_INFO, 0x1000, set_sdp_chan_info, sdp_chan_info_msg, msg_rsp) \
M(GET_SDP_CHAN_INFO, 0x1001, get_sdp_chan_info, msg_req, sdp_get_chan_info_msg) \
@@ -229,6 +232,8 @@ M(NPC_DELETE_FLOW, 0x600e, npc_delete_flow, \
M(NPC_MCAM_READ_ENTRY, 0x600f, npc_mcam_read_entry, \
npc_mcam_read_entry_req, \
npc_mcam_read_entry_rsp) \
+M(NPC_SET_PKIND, 0x6010, npc_set_pkind, \
+ npc_set_pkind, msg_rsp) \
M(NPC_MCAM_READ_BASE_RULE, 0x6011, npc_read_base_steer_rule, \
msg_req, npc_mcam_read_base_rule_rsp) \
M(NPC_MCAM_GET_STATS, 0x6012, npc_mcam_entry_stats, \
@@ -270,6 +275,10 @@ M(NIX_BP_ENABLE, 0x8016, nix_bp_enable, nix_bp_cfg_req, \
nix_bp_cfg_rsp) \
M(NIX_BP_DISABLE, 0x8017, nix_bp_disable, nix_bp_cfg_req, msg_rsp) \
M(NIX_GET_MAC_ADDR, 0x8018, nix_get_mac_addr, msg_req, nix_get_mac_addr_rsp) \
+M(NIX_INLINE_IPSEC_CFG, 0x8019, nix_inline_ipsec_cfg, \
+ nix_inline_ipsec_cfg, msg_rsp) \
+M(NIX_INLINE_IPSEC_LF_CFG, 0x801a, nix_inline_ipsec_lf_cfg, \
+ nix_inline_ipsec_lf_cfg, msg_rsp) \
M(NIX_CN10K_AQ_ENQ, 0x801b, nix_cn10k_aq_enq, nix_cn10k_aq_enq_req, \
nix_cn10k_aq_enq_rsp) \
M(NIX_GET_HW_INFO, 0x801c, nix_get_hw_info, msg_req, nix_hw_info) \
@@ -284,10 +293,14 @@ M(NIX_BANDPROF_GET_HWINFO, 0x801f, nix_bandprof_get_hwinfo, msg_req, \
#define MBOX_UP_CGX_MESSAGES \
M(CGX_LINK_EVENT, 0xC00, cgx_link_event, cgx_link_info_msg, msg_rsp)
+#define MBOX_UP_CPT_MESSAGES \
+M(CPT_INST_LMTST, 0xD00, cpt_inst_lmtst, cpt_inst_lmtst_req, msg_rsp)
+
enum {
#define M(_name, _id, _1, _2, _3) MBOX_MSG_ ## _name = _id,
MBOX_MESSAGES
MBOX_UP_CGX_MESSAGES
+MBOX_UP_CPT_MESSAGES
#undef M
};
@@ -575,10 +588,13 @@ struct cgx_mac_addr_update_req {
};
#define RVU_LMAC_FEAT_FC BIT_ULL(0) /* pause frames */
-#define RVU_LMAC_FEAT_PTP BIT_ULL(1) /* precision time protocol */
-#define RVU_MAC_VERSION BIT_ULL(2)
-#define RVU_MAC_CGX BIT_ULL(3)
-#define RVU_MAC_RPM BIT_ULL(4)
+#define RVU_LMAC_FEAT_HIGIG2 BIT_ULL(1)
+ /* flow control from physical link higig2 messages */
+#define RVU_LMAC_FEAT_PTP BIT_ULL(2) /* precison time protocol */
+#define RVU_LMAC_FEAT_DMACF BIT_ULL(3) /* DMAC FILTER */
+#define RVU_MAC_VERSION BIT_ULL(4)
+#define RVU_MAC_CGX BIT_ULL(5)
+#define RVU_MAC_RPM BIT_ULL(6)
struct cgx_features_info_msg {
struct mbox_msghdr hdr;
@@ -593,6 +609,22 @@ struct rpm_stats_rsp {
u64 tx_stats[RPM_TX_STATS_COUNT];
};
+struct npc_set_pkind {
+ struct mbox_msghdr hdr;
+#define OTX2_PRIV_FLAGS_DEFAULT BIT_ULL(0)
+#define OTX2_PRIV_FLAGS_CUSTOM BIT_ULL(63)
+ u64 mode;
+#define PKIND_TX BIT_ULL(0)
+#define PKIND_RX BIT_ULL(1)
+ u8 dir;
+ u8 pkind; /* valid only in case custom flag */
+ u8 var_len_off; /* Offset of custom header length field.
+ * Valid only for pkind NPC_RX_CUSTOM_PRE_L2_PKIND
+ */
+ u8 var_len_off_mask; /* Mask for length with in offset */
+ u8 shift_dir; /* shift direction to get length of the header at var_len_off */
+};
+
/* NPA mbox message formats */
/* NPA mailbox error codes
@@ -698,6 +730,8 @@ enum nix_af_status {
NIX_AF_ERR_INVALID_BANDPROF = -426,
NIX_AF_ERR_IPOLICER_NOTSUPP = -427,
NIX_AF_ERR_BANDPROF_INVAL_REQ = -428,
+ NIX_AF_ERR_CQ_CTX_WRITE_ERR = -429,
+ NIX_AF_ERR_AQ_CTX_RETRY_WRITE = -430,
};
/* For NIX RX vtag action */
@@ -1065,6 +1099,40 @@ struct nix_bp_cfg_rsp {
u8 chan_cnt; /* Number of channel for which bpids are assigned */
};
+/* Global NIX inline IPSec configuration */
+struct nix_inline_ipsec_cfg {
+ struct mbox_msghdr hdr;
+ u32 cpt_credit;
+ struct {
+ u8 egrp;
+ u8 opcode;
+ u16 param1;
+ u16 param2;
+ } gen_cfg;
+ struct {
+ u16 cpt_pf_func;
+ u8 cpt_slot;
+ } inst_qsel;
+ u8 enable;
+};
+
+/* Per NIX LF inline IPSec configuration */
+struct nix_inline_ipsec_lf_cfg {
+ struct mbox_msghdr hdr;
+ u64 sa_base_addr;
+ struct {
+ u32 tag_const;
+ u16 lenm1_max;
+ u8 sa_pow2_size;
+ u8 tt;
+ } ipsec_cfg0;
+ struct {
+ u32 sa_idx_max;
+ u8 sa_idx_w;
+ } ipsec_cfg1;
+ u8 enable;
+};
+
struct nix_hw_info {
struct mbox_msghdr hdr;
u16 rsvs16;
@@ -1357,12 +1425,15 @@ struct npc_mcam_get_stats_rsp {
enum ptp_op {
PTP_OP_ADJFINE = 0,
PTP_OP_GET_CLOCK = 1,
+ PTP_OP_GET_TSTMP = 2,
+ PTP_OP_SET_THRESH = 3,
};
struct ptp_req {
struct mbox_msghdr hdr;
u8 op;
s64 scaled_ppm;
+ u64 thresh;
};
struct ptp_rsp {
@@ -1399,7 +1470,9 @@ enum cpt_af_status {
CPT_AF_ERR_LF_INVALID = -903,
CPT_AF_ERR_ACCESS_DENIED = -904,
CPT_AF_ERR_SSO_PF_FUNC_INVALID = -905,
- CPT_AF_ERR_NIX_PF_FUNC_INVALID = -906
+ CPT_AF_ERR_NIX_PF_FUNC_INVALID = -906,
+ CPT_AF_ERR_INLINE_IPSEC_INB_ENA = -907,
+ CPT_AF_ERR_INLINE_IPSEC_OUT_ENA = -908
};
/* CPT mbox message formats */
@@ -1420,6 +1493,22 @@ struct cpt_lf_alloc_req_msg {
int blkaddr;
};
+#define CPT_INLINE_INBOUND 0
+#define CPT_INLINE_OUTBOUND 1
+
+/* Mailbox message request format for CPT IPsec
+ * inline inbound and outbound configuration.
+ */
+struct cpt_inline_ipsec_cfg_msg {
+ struct mbox_msghdr hdr;
+ u8 enable;
+ u8 slot;
+ u8 dir;
+ u8 sso_pf_func_ovrd;
+ u16 sso_pf_func; /* inbound path SSO_PF_FUNC */
+ u16 nix_pf_func; /* outbound path NIX_PF_FUNC */
+};
+
/* Mailbox message request and response format for CPT stats. */
struct cpt_sts_req {
struct mbox_msghdr hdr;
@@ -1478,6 +1567,13 @@ struct cpt_rxc_time_cfg_req {
u16 active_limit;
};
+/* Mailbox message request format to request for CPT_INST_S lmtst. */
+struct cpt_inst_lmtst_req {
+ struct mbox_msghdr hdr;
+ u64 inst[8];
+ u64 rsvd;
+};
+
struct sdp_node_info {
/* Node to which this PF belons to */
u8 node_id;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
index 3a819b24accc..77fd39e2c8db 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h
@@ -8,6 +8,8 @@
#ifndef NPC_H
#define NPC_H
+#define NPC_KEX_CHAN_MASK 0xFFFULL
+
enum NPC_LID_E {
NPC_LID_LA = 0,
NPC_LID_LB,
@@ -25,15 +27,12 @@ enum npc_kpu_la_ltype {
NPC_LT_LA_8023 = 1,
NPC_LT_LA_ETHER,
NPC_LT_LA_IH_NIX_ETHER,
- NPC_LT_LA_IH_8_ETHER,
- NPC_LT_LA_IH_4_ETHER,
- NPC_LT_LA_IH_2_ETHER,
- NPC_LT_LA_HIGIG2_ETHER,
+ NPC_LT_LA_HIGIG2_ETHER = 7,
NPC_LT_LA_IH_NIX_HIGIG2_ETHER,
NPC_LT_LA_CUSTOM_L2_90B_ETHER,
- NPC_LT_LA_CH_LEN_90B_ETHER,
NPC_LT_LA_CPT_HDR,
NPC_LT_LA_CUSTOM_L2_24B_ETHER,
+ NPC_LT_LA_CUSTOM_PRE_L2_ETHER,
NPC_LT_LA_CUSTOM0 = 0xE,
NPC_LT_LA_CUSTOM1 = 0xF,
};
@@ -148,10 +147,11 @@ enum npc_kpu_lh_ltype {
* Software assigns pkind for each incoming port such as CGX
* Ethernet interfaces, LBK interfaces, etc.
*/
-#define NPC_UNRESERVED_PKIND_COUNT NPC_RX_VLAN_EXDSA_PKIND
+#define NPC_UNRESERVED_PKIND_COUNT NPC_RX_CUSTOM_PRE_L2_PKIND
enum npc_pkind_type {
NPC_RX_LBK_PKIND = 0ULL,
+ NPC_RX_CUSTOM_PRE_L2_PKIND = 55ULL,
NPC_RX_VLAN_EXDSA_PKIND = 56ULL,
NPC_RX_CHLEN24B_PKIND = 57ULL,
NPC_RX_CPT_HDR_PKIND,
@@ -162,6 +162,10 @@ enum npc_pkind_type {
NPC_TX_DEF_PKIND, /* NIX-TX PKIND */
};
+enum npc_interface_type {
+ NPC_INTF_MODE_DEF,
+};
+
/* list of known and supported fields in packet header and
* fields present in key structure.
*/
@@ -549,7 +553,7 @@ struct npc_kpu_profile_fwdata {
#define KPU_SIGN 0x00666f727075706b
#define KPU_NAME_LEN 32
/** Maximum number of custom KPU entries supported by the built-in profile. */
-#define KPU_MAX_CST_ENT 2
+#define KPU_MAX_CST_ENT 6
/* KPU Profle Header */
__le64 signature; /* "kpuprof\0" (8 bytes/ASCII characters) */
u8 name[KPU_NAME_LEN]; /* KPU Profile name */
@@ -589,6 +593,8 @@ struct rvu_npc_mcam_rule {
u8 default_rule;
bool enable;
bool vfvlan_cfg;
+ u16 chan;
+ u16 chan_mask;
};
#endif /* NPC_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
index 588822a0cf21..0fe7ad35e36f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h
@@ -8,7 +8,7 @@
#ifndef NPC_PROFILE_H
#define NPC_PROFILE_H
-#define NPC_KPU_PROFILE_VER 0x0000000100060000
+#define NPC_KPU_PROFILE_VER 0x0000000100070000
#define NPC_KPU_VER_MAJ(ver) ((u16)(((ver) >> 32) & 0xFFFF))
#define NPC_KPU_VER_MIN(ver) ((u16)(((ver) >> 16) & 0xFFFF))
#define NPC_KPU_VER_PATCH(ver) ((u16)((ver) & 0xFFFF))
@@ -176,18 +176,18 @@ enum npc_kpu_parser_state {
NPC_S_KPU1_EXDSA,
NPC_S_KPU1_HIGIG2,
NPC_S_KPU1_IH_NIX_HIGIG2,
- NPC_S_KPU1_CUSTOM_L2_90B,
+ NPC_S_KPU1_CUSTOM_PRE_L2,
NPC_S_KPU1_CPT_HDR,
- NPC_S_KPU1_CUSTOM_L2_24B,
NPC_S_KPU1_VLAN_EXDSA,
NPC_S_KPU2_CTAG,
NPC_S_KPU2_CTAG2,
NPC_S_KPU2_SBTAG,
NPC_S_KPU2_QINQ,
NPC_S_KPU2_ETAG,
- NPC_S_KPU2_PREHEADER,
NPC_S_KPU2_EXDSA,
NPC_S_KPU2_NGIO,
+ NPC_S_KPU2_CPT_CTAG,
+ NPC_S_KPU2_CPT_QINQ,
NPC_S_KPU3_CTAG,
NPC_S_KPU3_STAG,
NPC_S_KPU3_QINQ,
@@ -979,8 +979,8 @@ static struct npc_kpu_profile_action ikpu_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
12, 16, 20, 0, 0,
- NPC_S_KPU1_ETHER, 0, 0,
- NPC_LID_LA, NPC_LT_NA,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_PRE_L2_ETHER,
0,
0, 0, 0, 0,
@@ -996,27 +996,27 @@ static struct npc_kpu_profile_action ikpu_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 36, 40, 44, 0, 0,
- NPC_S_KPU1_CUSTOM_L2_24B, 0, 0,
- NPC_LID_LA, NPC_LT_NA,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 24, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 40, 54, 58, 0, 0,
- NPC_S_KPU1_CPT_HDR, 0, 0,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_CPT_HDR, 40, 0,
NPC_LID_LA, NPC_LT_NA,
0,
- 0, 0, 0, 0,
+ 0, 7, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 102, 106, 110, 0, 0,
- NPC_S_KPU1_CUSTOM_L2_90B, 0, 0,
- NPC_LID_LA, NPC_LT_NA,
+ 12, 16, 20, 0, 0,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 90, 1,
+ NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
0,
0, 0, 0, 0,
@@ -1062,6 +1062,10 @@ static struct npc_kpu_profile_action ikpu_action_entries[] = {
static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
NPC_KPU_NOP_CAM,
NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU1_ETHER, 0xff,
NPC_ETYPE_IP,
@@ -1379,33 +1383,6 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
{
NPC_S_KPU1_IH, 0xff,
- NPC_IH_W | NPC_IH_UTAG,
- NPC_IH_W | NPC_IH_UTAG,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_IH, 0xff,
- NPC_IH_W,
- NPC_IH_W | NPC_IH_UTAG,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_IH, 0xff,
- 0x0000,
- NPC_IH_W | NPC_IH_UTAG,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_IH, 0xff,
0x0000,
0x0000,
0x0000,
@@ -1711,7 +1688,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
NPC_ETYPE_IP,
0xffff,
0x0000,
@@ -1720,7 +1697,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
NPC_ETYPE_IP6,
0xffff,
0x0000,
@@ -1729,7 +1706,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
NPC_ETYPE_ARP,
0xffff,
0x0000,
@@ -1738,7 +1715,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
NPC_ETYPE_RARP,
0xffff,
0x0000,
@@ -1747,7 +1724,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
NPC_ETYPE_PTP,
0xffff,
0x0000,
@@ -1756,7 +1733,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
NPC_ETYPE_FCOE,
0xffff,
0x0000,
@@ -1765,7 +1742,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
NPC_ETYPE_CTAG,
0xffff,
NPC_ETYPE_CTAG,
@@ -1774,7 +1751,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
NPC_ETYPE_CTAG,
0xffff,
0x0000,
@@ -1783,7 +1760,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
NPC_ETYPE_SBTAG,
0xffff,
0x0000,
@@ -1792,7 +1769,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
NPC_ETYPE_QINQ,
0xffff,
0x0000,
@@ -1801,7 +1778,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
NPC_ETYPE_ETAG,
0xffff,
0x0000,
@@ -1810,7 +1787,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
NPC_ETYPE_MPLSU,
0xffff,
0x0000,
@@ -1819,7 +1796,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
NPC_ETYPE_MPLSM,
0xffff,
0x0000,
@@ -1828,7 +1805,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
NPC_ETYPE_NSH,
0xffff,
0x0000,
@@ -1837,7 +1814,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU1_CUSTOM_L2_90B, 0xff,
+ NPC_S_KPU1_CUSTOM_PRE_L2, 0xff,
0x0000,
0x0000,
0x0000,
@@ -1847,150 +1824,24 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
},
{
NPC_S_KPU1_CPT_HDR, 0xff,
- 0x0000,
- 0xffff,
NPC_ETYPE_IP,
0xffff,
0x0000,
0x0000,
- },
- {
- NPC_S_KPU1_CPT_HDR, 0xff,
- 0x0000,
- 0xffff,
- NPC_ETYPE_IP6,
- 0xffff,
0x0000,
0x0000,
},
{
NPC_S_KPU1_CPT_HDR, 0xff,
- 0x0000,
- 0xffff,
- NPC_ETYPE_CTAG,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_CPT_HDR, 0xff,
- 0x0000,
- 0xffff,
- NPC_ETYPE_QINQ,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_CPT_HDR, 0xff,
- 0x0000,
- 0xffff,
- 0x0000,
- 0x0000,
- NPC_ETYPE_IP,
- 0xffff,
- },
- {
- NPC_S_KPU1_CPT_HDR, 0xff,
- 0x0000,
- 0xffff,
- 0x0000,
- 0x0000,
NPC_ETYPE_IP6,
0xffff,
- },
- {
- NPC_S_KPU1_CPT_HDR, 0xff,
0x0000,
- 0xffff,
0x0000,
0x0000,
- NPC_ETYPE_CTAG,
- 0xffff,
- },
- {
- NPC_S_KPU1_CPT_HDR, 0xff,
- 0x0000,
- 0xffff,
0x0000,
- 0x0000,
- NPC_ETYPE_QINQ,
- 0xffff,
},
{
NPC_S_KPU1_CPT_HDR, 0xff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
- NPC_ETYPE_IP,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
- NPC_ETYPE_IP6,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
- NPC_ETYPE_ARP,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
- NPC_ETYPE_RARP,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
- NPC_ETYPE_PTP,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
- NPC_ETYPE_FCOE,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
- NPC_ETYPE_CTAG,
- 0xffff,
- NPC_ETYPE_CTAG,
- 0xffff,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
NPC_ETYPE_CTAG,
0xffff,
0x0000,
@@ -1999,16 +1850,7 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
+ NPC_S_KPU1_CPT_HDR, 0xff,
NPC_ETYPE_QINQ,
0xffff,
0x0000,
@@ -2017,51 +1859,6 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
- NPC_ETYPE_ETAG,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
- NPC_ETYPE_MPLSU,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
- NPC_ETYPE_MPLSM,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
- NPC_ETYPE_NSH,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU1_CUSTOM_L2_24B, 0xff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
NPC_S_KPU1_VLAN_EXDSA, 0xff,
NPC_ETYPE_CTAG,
0xffff,
@@ -2084,6 +1881,10 @@ static struct npc_kpu_profile_cam kpu1_cam_entries[] = {
static struct npc_kpu_profile_cam kpu2_cam_entries[] = {
NPC_KPU_NOP_CAM,
NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU2_CTAG, 0xff,
NPC_ETYPE_IP,
@@ -2805,114 +2606,6 @@ static struct npc_kpu_profile_cam kpu2_cam_entries[] = {
0x0000,
},
{
- NPC_S_KPU2_PREHEADER, 0xff,
- NPC_ETYPE_IP,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_PREHEADER, 0xff,
- NPC_ETYPE_IP6,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_PREHEADER, 0xff,
- NPC_ETYPE_ARP,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_PREHEADER, 0xff,
- NPC_ETYPE_RARP,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_PREHEADER, 0xff,
- NPC_ETYPE_PTP,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_PREHEADER, 0xff,
- NPC_ETYPE_FCOE,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_PREHEADER, 0xff,
- NPC_ETYPE_CTAG,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_PREHEADER, 0xff,
- NPC_ETYPE_SBTAG,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_PREHEADER, 0xff,
- NPC_ETYPE_QINQ,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_PREHEADER, 0xff,
- NPC_ETYPE_MPLSU,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_PREHEADER, 0xff,
- NPC_ETYPE_MPLSM,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU2_PREHEADER, 0xff,
- NPC_ETYPE_NSH,
- 0xffff,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- },
- {
NPC_S_KPU2_EXDSA, 0xff,
NPC_DSA_EDSA,
NPC_DSA_EDSA,
@@ -3066,6 +2759,42 @@ static struct npc_kpu_profile_cam kpu2_cam_entries[] = {
0x0000,
},
{
+ NPC_S_KPU2_CPT_CTAG, 0xff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CPT_CTAG, 0xff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CPT_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
+ NPC_S_KPU2_CPT_QINQ, 0xff,
+ NPC_ETYPE_CTAG,
+ 0xffff,
+ NPC_ETYPE_IP6,
+ 0xffff,
+ 0x0000,
+ 0x0000,
+ },
+ {
NPC_S_NA, 0X00,
0x0000,
0x0000,
@@ -3079,6 +2808,10 @@ static struct npc_kpu_profile_cam kpu2_cam_entries[] = {
static struct npc_kpu_profile_cam kpu3_cam_entries[] = {
NPC_KPU_NOP_CAM,
NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU3_CTAG, 0xff,
NPC_ETYPE_IP,
@@ -4056,6 +3789,10 @@ static struct npc_kpu_profile_cam kpu3_cam_entries[] = {
static struct npc_kpu_profile_cam kpu4_cam_entries[] = {
NPC_KPU_NOP_CAM,
NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU4_MPLS, 0xff,
NPC_MPLS_S,
@@ -4367,6 +4104,10 @@ static struct npc_kpu_profile_cam kpu4_cam_entries[] = {
static struct npc_kpu_profile_cam kpu5_cam_entries[] = {
NPC_KPU_NOP_CAM,
NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU5_IP, 0xff,
0x0000,
@@ -5362,6 +5103,10 @@ static struct npc_kpu_profile_cam kpu5_cam_entries[] = {
static struct npc_kpu_profile_cam kpu6_cam_entries[] = {
NPC_KPU_NOP_CAM,
NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU6_IP6_EXT, 0xff,
0x0000,
@@ -6033,6 +5778,10 @@ static struct npc_kpu_profile_cam kpu6_cam_entries[] = {
static struct npc_kpu_profile_cam kpu7_cam_entries[] = {
NPC_KPU_NOP_CAM,
NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU7_IP6_EXT, 0xff,
0x0000,
@@ -6353,6 +6102,10 @@ static struct npc_kpu_profile_cam kpu7_cam_entries[] = {
static struct npc_kpu_profile_cam kpu8_cam_entries[] = {
NPC_KPU_NOP_CAM,
NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU8_TCP, 0xff,
0x0000,
@@ -7096,6 +6849,10 @@ static struct npc_kpu_profile_cam kpu8_cam_entries[] = {
static struct npc_kpu_profile_cam kpu9_cam_entries[] = {
NPC_KPU_NOP_CAM,
NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU9_TU_MPLS_IN_GRE, 0xff,
NPC_MPLS_S,
@@ -7496,15 +7253,6 @@ static struct npc_kpu_profile_cam kpu9_cam_entries[] = {
NPC_S_KPU9_GTPU, 0xff,
0x0000,
0x0000,
- NPC_GTP_PT_GTP | NPC_GTP_VER1 | NPC_GTP_MT_G_PDU,
- NPC_GTP_PT_MASK | NPC_GTP_VER_MASK | NPC_GTP_MT_MASK,
- 0x0000,
- 0x0000,
- },
- {
- NPC_S_KPU9_GTPU, 0xff,
- 0x0000,
- 0x0000,
NPC_GTP_PT_GTP | NPC_GTP_VER1,
NPC_GTP_PT_MASK | NPC_GTP_VER_MASK,
0x0000,
@@ -7569,6 +7317,10 @@ static struct npc_kpu_profile_cam kpu9_cam_entries[] = {
static struct npc_kpu_profile_cam kpu10_cam_entries[] = {
NPC_KPU_NOP_CAM,
NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU10_TU_MPLS, 0xff,
NPC_MPLS_S,
@@ -7736,6 +7488,10 @@ static struct npc_kpu_profile_cam kpu10_cam_entries[] = {
static struct npc_kpu_profile_cam kpu11_cam_entries[] = {
NPC_KPU_NOP_CAM,
NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU11_TU_ETHER, 0xff,
NPC_ETYPE_IP,
@@ -8047,6 +7803,10 @@ static struct npc_kpu_profile_cam kpu11_cam_entries[] = {
static struct npc_kpu_profile_cam kpu12_cam_entries[] = {
NPC_KPU_NOP_CAM,
NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU12_TU_IP, 0xff,
NPC_IPNH_TCP,
@@ -8304,6 +8064,10 @@ static struct npc_kpu_profile_cam kpu12_cam_entries[] = {
static struct npc_kpu_profile_cam kpu13_cam_entries[] = {
NPC_KPU_NOP_CAM,
NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU13_TU_IP6_EXT, 0xff,
0x0000,
@@ -8318,6 +8082,10 @@ static struct npc_kpu_profile_cam kpu13_cam_entries[] = {
static struct npc_kpu_profile_cam kpu14_cam_entries[] = {
NPC_KPU_NOP_CAM,
NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU14_TU_IP6_EXT, 0xff,
0x0000,
@@ -8332,6 +8100,10 @@ static struct npc_kpu_profile_cam kpu14_cam_entries[] = {
static struct npc_kpu_profile_cam kpu15_cam_entries[] = {
NPC_KPU_NOP_CAM,
NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU15_TU_TCP, 0xff,
0x0000,
@@ -8535,6 +8307,10 @@ static struct npc_kpu_profile_cam kpu15_cam_entries[] = {
static struct npc_kpu_profile_cam kpu16_cam_entries[] = {
NPC_KPU_NOP_CAM,
NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
+ NPC_KPU_NOP_CAM,
{
NPC_S_KPU16_TCP_DATA, 0xff,
0x0000,
@@ -8594,6 +8370,10 @@ static struct npc_kpu_profile_cam kpu16_cam_entries[] = {
static struct npc_kpu_profile_action kpu1_action_entries[] = {
NPC_KPU_NOP_ACTION,
NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 3, 0,
@@ -8880,30 +8660,6 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
0, 0, 0, 0,
},
{
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 12, 14, 16, 0, 0,
- NPC_S_KPU2_PREHEADER, 8, 1,
- NPC_LID_LA, NPC_LT_LA_IH_8_ETHER,
- 0,
- 1, 0xff, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 12, 14, 16, 0, 0,
- NPC_S_KPU2_PREHEADER, 4, 1,
- NPC_LID_LA, NPC_LT_LA_IH_4_ETHER,
- 0,
- 1, 0xff, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 12, 14, 16, 0, 0,
- NPC_S_KPU2_PREHEADER, 2, 1,
- NPC_LID_LA, NPC_LT_LA_IH_2_ETHER,
- 0,
- 1, 0xff, 0, 0,
- },
- {
NPC_ERRLEV_LA, NPC_EC_IH_LENGTH,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 1,
@@ -9192,159 +8948,127 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 3, 0,
- NPC_S_KPU5_IP, 104, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ NPC_S_KPU5_IP, 14, 0,
+ NPC_LID_LA, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
6, 0, 0, 3, 0,
- NPC_S_KPU5_IP6, 104, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ NPC_S_KPU5_IP6, 14, 0,
+ NPC_LID_LA, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 3, 0,
- NPC_S_KPU5_ARP, 104, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ NPC_S_KPU5_ARP, 14, 0,
+ NPC_LID_LA, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 3, 0,
- NPC_S_KPU5_RARP, 104, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ NPC_S_KPU5_RARP, 14, 0,
+ NPC_LID_LA, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 3, 0,
- NPC_S_KPU5_PTP, 104, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ NPC_S_KPU5_PTP, 14, 0,
+ NPC_LID_LA, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 3, 0,
- NPC_S_KPU5_FCOE, 104, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
+ NPC_S_KPU5_FCOE, 14, 0,
+ NPC_LID_LA, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 12, 0, 0, 0,
- NPC_S_KPU2_CTAG2, 102, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
- NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ NPC_S_KPU2_CTAG2, 12, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
4, 8, 0, 0, 0,
- NPC_S_KPU2_CTAG, 102, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
- NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ NPC_S_KPU2_CTAG, 12, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
4, 8, 22, 0, 0,
- NPC_S_KPU2_SBTAG, 102, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
- NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ NPC_S_KPU2_SBTAG, 12, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
4, 8, 0, 0, 0,
- NPC_S_KPU2_QINQ, 102, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
- NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
+ NPC_S_KPU2_QINQ, 12, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 12, 26, 0, 0,
- NPC_S_KPU2_ETAG, 102, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
- NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_ETAG,
+ NPC_S_KPU2_ETAG, 12, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
2, 6, 10, 2, 0,
- NPC_S_KPU4_MPLS, 104, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
- NPC_F_LA_L_WITH_MPLS,
+ NPC_S_KPU4_MPLS, 14, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
2, 6, 10, 2, 0,
- NPC_S_KPU4_MPLS, 104, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
- NPC_F_LA_L_WITH_MPLS,
+ NPC_S_KPU4_MPLS, 14, 0,
+ NPC_LID_LA, NPC_LT_NA,
+ 0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
2, 0, 0, 2, 0,
- NPC_S_KPU4_NSH, 104, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
- NPC_F_LA_L_WITH_NSH,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 0, 1,
- NPC_S_NA, 0, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_90B_ETHER,
- NPC_F_LA_L_UNK_ETYPE,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 0, 6, 3, 0,
- NPC_S_KPU5_CPT_IP, 56, 1,
- NPC_LID_LA, NPC_LT_LA_CPT_HDR,
+ NPC_S_KPU4_NSH, 14, 0,
+ NPC_LID_LA, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 3, 0,
- NPC_S_KPU5_CPT_IP6, 56, 1,
- NPC_LID_LA, NPC_LT_LA_CPT_HDR,
+ 0, 0, 0, 0, 1,
+ NPC_S_NA, 0, 0,
+ NPC_LID_LA, NPC_LT_NA,
0,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 4, 8, 0, 0, 0,
- NPC_S_KPU2_CTAG, 54, 1,
- NPC_LID_LA, NPC_LT_LA_CPT_HDR,
- NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 4, 8, 0, 0, 0,
- NPC_S_KPU2_QINQ, 54, 1,
- NPC_LID_LA, NPC_LT_LA_CPT_HDR,
- NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 3, 0,
- NPC_S_KPU5_CPT_IP, 60, 1,
+ NPC_S_KPU5_CPT_IP, 14, 1,
NPC_LID_LA, NPC_LT_LA_CPT_HDR,
0,
0, 0, 0, 0,
@@ -9352,7 +9076,7 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
6, 0, 0, 3, 0,
- NPC_S_KPU5_CPT_IP6, 60, 1,
+ NPC_S_KPU5_CPT_IP6, 14, 1,
NPC_LID_LA, NPC_LT_LA_CPT_HDR,
0,
0, 0, 0, 0,
@@ -9360,7 +9084,7 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
4, 8, 0, 0, 0,
- NPC_S_KPU2_CTAG, 58, 1,
+ NPC_S_KPU2_CPT_CTAG, 12, 1,
NPC_LID_LA, NPC_LT_LA_CPT_HDR,
NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
0, 0, 0, 0,
@@ -9368,141 +9092,13 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
4, 8, 0, 0, 0,
- NPC_S_KPU2_QINQ, 58, 1,
+ NPC_S_KPU2_CPT_QINQ, 12, 1,
NPC_LID_LA, NPC_LT_LA_CPT_HDR,
NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
0, 0, 0, 0,
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 0, 1,
- NPC_S_NA, 0, 1,
- NPC_LID_LA, NPC_LT_LA_CPT_HDR,
- NPC_F_LA_L_UNK_ETYPE,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 0, 6, 3, 0,
- NPC_S_KPU5_IP, 38, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 3, 0,
- NPC_S_KPU5_IP6, 38, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 3, 0,
- NPC_S_KPU5_ARP, 38, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 3, 0,
- NPC_S_KPU5_RARP, 38, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 3, 0,
- NPC_S_KPU5_PTP, 38, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 3, 0,
- NPC_S_KPU5_FCOE, 38, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 12, 0, 0, 0,
- NPC_S_KPU2_CTAG2, 36, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
- NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 4, 8, 0, 0, 0,
- NPC_S_KPU2_CTAG, 36, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
- NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 4, 8, 22, 0, 0,
- NPC_S_KPU2_SBTAG, 36, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
- NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 4, 8, 0, 0, 0,
- NPC_S_KPU2_QINQ, 36, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
- NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_VLAN,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 12, 26, 0, 0,
- NPC_S_KPU2_ETAG, 36, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
- NPC_F_LA_U_HAS_TAG | NPC_F_LA_L_WITH_ETAG,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 2, 6, 10, 2, 0,
- NPC_S_KPU4_MPLS, 38, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
- NPC_F_LA_L_WITH_MPLS,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 2, 6, 10, 2, 0,
- NPC_S_KPU4_MPLS, 38, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
- NPC_F_LA_L_WITH_MPLS,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 2, 0, 0, 2, 0,
- NPC_S_KPU4_NSH, 38, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
- NPC_F_LA_L_WITH_NSH,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 0, 1,
- NPC_S_NA, 0, 1,
- NPC_LID_LA, NPC_LT_LA_CUSTOM_L2_24B_ETHER,
- NPC_F_LA_L_UNK_ETYPE,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
12, 0, 0, 1, 0,
NPC_S_KPU3_VLAN_EXDSA, 12, 1,
NPC_LID_LA, NPC_LT_LA_ETHER,
@@ -9522,6 +9118,10 @@ static struct npc_kpu_profile_action kpu1_action_entries[] = {
static struct npc_kpu_profile_action kpu2_action_entries[] = {
NPC_KPU_NOP_ACTION,
NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 2, 0,
@@ -10165,102 +9765,6 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 2, 0,
- NPC_S_KPU5_IP, 14, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 6, 0, 0, 2, 0,
- NPC_S_KPU5_IP6, 14, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 2, 0,
- NPC_S_KPU5_ARP, 14, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 2, 0,
- NPC_S_KPU5_RARP, 14, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 2, 0,
- NPC_S_KPU5_PTP, 14, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 0, 0, 0, 2, 0,
- NPC_S_KPU5_FCOE, 14, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 2, 6, 0, 0, 0,
- NPC_S_KPU3_CTAG_C, 14, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 2, 6, 20, 0, 0,
- NPC_S_KPU3_STAG_C, 14, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 2, 6, 0, 0, 0,
- NPC_S_KPU3_QINQ_C, 14, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 2, 6, 10, 1, 0,
- NPC_S_KPU4_MPLS, 14, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 2, 6, 10, 1, 0,
- NPC_S_KPU4_MPLS, 14, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 2, 0, 0, 1, 0,
- NPC_S_KPU4_NSH, 14, 0,
- NPC_LID_LB, NPC_LT_NA,
- 0,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 0, 6, 2, 0,
NPC_S_KPU5_IP, 18, 1,
NPC_LID_LB, NPC_LT_LB_EDSA,
NPC_F_LB_L_EDSA,
@@ -10395,6 +9899,38 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
0, 0, 0, 0,
},
{
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_CPT_IP, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_CPT_IP6, 6, 1,
+ NPC_LID_LB, NPC_LT_LB_CTAG,
+ 0,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 8, 0, 6, 2, 0,
+ NPC_S_KPU5_CPT_IP, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
+ NPC_ERRLEV_RE, NPC_EC_NOERR,
+ 6, 0, 0, 2, 0,
+ NPC_S_KPU5_CPT_IP6, 10, 1,
+ NPC_LID_LB, NPC_LT_LB_STAG_QINQ,
+ NPC_F_LB_U_MORE_TAG | NPC_F_LB_L_WITH_CTAG,
+ 0, 0, 0, 0,
+ },
+ {
NPC_ERRLEV_LB, NPC_EC_L2_K3,
0, 0, 0, 0, 1,
NPC_S_NA, 0, 0,
@@ -10407,6 +9943,10 @@ static struct npc_kpu_profile_action kpu2_action_entries[] = {
static struct npc_kpu_profile_action kpu3_action_entries[] = {
NPC_KPU_NOP_ACTION,
NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 1, 0,
@@ -11276,6 +10816,10 @@ static struct npc_kpu_profile_action kpu3_action_entries[] = {
static struct npc_kpu_profile_action kpu4_action_entries[] = {
NPC_KPU_NOP_ACTION,
NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 0,
@@ -11553,6 +11097,10 @@ static struct npc_kpu_profile_action kpu4_action_entries[] = {
static struct npc_kpu_profile_action kpu5_action_entries[] = {
NPC_KPU_NOP_ACTION,
NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_LC, NPC_EC_IP_TTL_0,
0, 0, 0, 0, 1,
@@ -12438,6 +11986,10 @@ static struct npc_kpu_profile_action kpu5_action_entries[] = {
static struct npc_kpu_profile_action kpu6_action_entries[] = {
NPC_KPU_NOP_ACTION,
NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
@@ -13035,6 +12587,10 @@ static struct npc_kpu_profile_action kpu6_action_entries[] = {
static struct npc_kpu_profile_action kpu7_action_entries[] = {
NPC_KPU_NOP_ACTION,
NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
@@ -13320,6 +12876,10 @@ static struct npc_kpu_profile_action kpu7_action_entries[] = {
static struct npc_kpu_profile_action kpu8_action_entries[] = {
NPC_KPU_NOP_ACTION,
NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_LD, NPC_EC_TCP_FLAGS_FIN_ONLY,
0, 0, 0, 0, 1,
@@ -13981,6 +13541,10 @@ static struct npc_kpu_profile_action kpu8_action_entries[] = {
static struct npc_kpu_profile_action kpu9_action_entries[] = {
NPC_KPU_NOP_ACTION,
NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 0,
@@ -14335,16 +13899,8 @@ static struct npc_kpu_profile_action kpu9_action_entries[] = {
},
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 0, 6, 2, 0,
- NPC_S_KPU12_TU_IP, 8, 1,
- NPC_LID_LE, NPC_LT_LE_GTPU,
- NPC_F_LE_L_GTPU_G_PDU,
- 0, 0, 0, 0,
- },
- {
- NPC_ERRLEV_RE, NPC_EC_NOERR,
- 8, 0, 6, 2, 0,
- NPC_S_KPU12_TU_IP, 8, 1,
+ 8, 0, 6, 2, 1,
+ NPC_S_NA, 0, 1,
NPC_LID_LE, NPC_LT_LE_GTPU,
0,
0, 0, 0, 0,
@@ -14402,6 +13958,10 @@ static struct npc_kpu_profile_action kpu9_action_entries[] = {
static struct npc_kpu_profile_action kpu10_action_entries[] = {
NPC_KPU_NOP_ACTION,
NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 1, 0,
@@ -14551,6 +14111,10 @@ static struct npc_kpu_profile_action kpu10_action_entries[] = {
static struct npc_kpu_profile_action kpu11_action_entries[] = {
NPC_KPU_NOP_ACTION,
NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
8, 0, 6, 0, 0,
@@ -14828,6 +14392,10 @@ static struct npc_kpu_profile_action kpu11_action_entries[] = {
static struct npc_kpu_profile_action kpu12_action_entries[] = {
NPC_KPU_NOP_ACTION,
NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
2, 12, 0, 2, 0,
@@ -15057,6 +14625,10 @@ static struct npc_kpu_profile_action kpu12_action_entries[] = {
static struct npc_kpu_profile_action kpu13_action_entries[] = {
NPC_KPU_NOP_ACTION,
NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
@@ -15070,6 +14642,10 @@ static struct npc_kpu_profile_action kpu13_action_entries[] = {
static struct npc_kpu_profile_action kpu14_action_entries[] = {
NPC_KPU_NOP_ACTION,
NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
@@ -15083,6 +14659,10 @@ static struct npc_kpu_profile_action kpu14_action_entries[] = {
static struct npc_kpu_profile_action kpu15_action_entries[] = {
NPC_KPU_NOP_ACTION,
NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_LG, NPC_EC_TCP_FLAGS_FIN_ONLY,
0, 0, 0, 0, 1,
@@ -15264,6 +14844,10 @@ static struct npc_kpu_profile_action kpu15_action_entries[] = {
static struct npc_kpu_profile_action kpu16_action_entries[] = {
NPC_KPU_NOP_ACTION,
NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
+ NPC_KPU_NOP_ACTION,
{
NPC_ERRLEV_RE, NPC_EC_NOERR,
0, 0, 0, 0, 1,
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
index 9b8e59f4c206..d6321de3cc17 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c
@@ -27,54 +27,29 @@
#define PCI_DEVID_CN10K_PTP 0xA09E
#define PCI_PTP_BAR_NO 0
-#define PCI_RST_BAR_NO 0
#define PTP_CLOCK_CFG 0xF00ULL
#define PTP_CLOCK_CFG_PTP_EN BIT_ULL(0)
+#define PTP_CLOCK_CFG_EXT_CLK_EN BIT_ULL(1)
+#define PTP_CLOCK_CFG_EXT_CLK_IN_MASK GENMASK_ULL(7, 2)
+#define PTP_CLOCK_CFG_TSTMP_EDGE BIT_ULL(9)
+#define PTP_CLOCK_CFG_TSTMP_EN BIT_ULL(8)
+#define PTP_CLOCK_CFG_TSTMP_IN_MASK GENMASK_ULL(15, 10)
+#define PTP_CLOCK_CFG_PPS_EN BIT_ULL(30)
+#define PTP_CLOCK_CFG_PPS_INV BIT_ULL(31)
+
+#define PTP_PPS_HI_INCR 0xF60ULL
+#define PTP_PPS_LO_INCR 0xF68ULL
+#define PTP_PPS_THRESH_HI 0xF58ULL
+
#define PTP_CLOCK_LO 0xF08ULL
#define PTP_CLOCK_HI 0xF10ULL
#define PTP_CLOCK_COMP 0xF18ULL
-
-#define RST_BOOT 0x1600ULL
-#define RST_MUL_BITS GENMASK_ULL(38, 33)
-#define CLOCK_BASE_RATE 50000000ULL
+#define PTP_TIMESTAMP 0xF20ULL
static struct ptp *first_ptp_block;
static const struct pci_device_id ptp_id_table[];
-static u64 get_clock_rate(void)
-{
- u64 cfg, ret = CLOCK_BASE_RATE * 16;
- struct pci_dev *pdev;
- void __iomem *base;
-
- /* To get the input clock frequency with which PTP co-processor
- * block is running the base frequency(50 MHz) needs to be multiplied
- * with multiplier bits present in RST_BOOT register of RESET block.
- * Hence below code gets the multiplier bits from the RESET PCI
- * device present in the system.
- */
- pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
- PCI_DEVID_OCTEONTX2_RST, NULL);
- if (!pdev)
- goto error;
-
- base = pci_ioremap_bar(pdev, PCI_RST_BAR_NO);
- if (!base)
- goto error_put_pdev;
-
- cfg = readq(base + RST_BOOT);
- ret = CLOCK_BASE_RATE * FIELD_GET(RST_MUL_BITS, cfg);
-
- iounmap(base);
-
-error_put_pdev:
- pci_dev_put(pdev);
-
-error:
- return ret;
-}
-
struct ptp *ptp_get(void)
{
struct ptp *ptp = first_ptp_block;
@@ -145,13 +120,74 @@ static int ptp_get_clock(struct ptp *ptp, u64 *clk)
return 0;
}
+void ptp_start(struct ptp *ptp, u64 sclk, u32 ext_clk_freq, u32 extts)
+{
+ struct pci_dev *pdev;
+ u64 clock_comp;
+ u64 clock_cfg;
+
+ if (!ptp)
+ return;
+
+ pdev = ptp->pdev;
+
+ if (!sclk) {
+ dev_err(&pdev->dev, "PTP input clock cannot be zero\n");
+ return;
+ }
+
+ /* sclk is in MHz */
+ ptp->clock_rate = sclk * 1000000;
+
+ /* Enable PTP clock */
+ clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG);
+
+ if (ext_clk_freq) {
+ ptp->clock_rate = ext_clk_freq;
+ /* Set GPIO as PTP clock source */
+ clock_cfg &= ~PTP_CLOCK_CFG_EXT_CLK_IN_MASK;
+ clock_cfg |= PTP_CLOCK_CFG_EXT_CLK_EN;
+ }
+
+ if (extts) {
+ clock_cfg |= PTP_CLOCK_CFG_TSTMP_EDGE;
+ /* Set GPIO as timestamping source */
+ clock_cfg &= ~PTP_CLOCK_CFG_TSTMP_IN_MASK;
+ clock_cfg |= PTP_CLOCK_CFG_TSTMP_EN;
+ }
+
+ clock_cfg |= PTP_CLOCK_CFG_PTP_EN;
+ clock_cfg |= PTP_CLOCK_CFG_PPS_EN | PTP_CLOCK_CFG_PPS_INV;
+ writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG);
+
+ /* Set 50% duty cycle for 1Hz output */
+ writeq(0x1dcd650000000000, ptp->reg_base + PTP_PPS_HI_INCR);
+ writeq(0x1dcd650000000000, ptp->reg_base + PTP_PPS_LO_INCR);
+
+ clock_comp = ((u64)1000000000ull << 32) / ptp->clock_rate;
+ /* Initial compensation value to start the nanosecs counter */
+ writeq(clock_comp, ptp->reg_base + PTP_CLOCK_COMP);
+}
+
+static int ptp_get_tstmp(struct ptp *ptp, u64 *clk)
+{
+ *clk = readq(ptp->reg_base + PTP_TIMESTAMP);
+
+ return 0;
+}
+
+static int ptp_set_thresh(struct ptp *ptp, u64 thresh)
+{
+ writeq(thresh, ptp->reg_base + PTP_PPS_THRESH_HI);
+
+ return 0;
+}
+
static int ptp_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct device *dev = &pdev->dev;
struct ptp *ptp;
- u64 clock_comp;
- u64 clock_cfg;
int err;
ptp = devm_kzalloc(dev, sizeof(*ptp), GFP_KERNEL);
@@ -172,17 +208,6 @@ static int ptp_probe(struct pci_dev *pdev,
ptp->reg_base = pcim_iomap_table(pdev)[PCI_PTP_BAR_NO];
- ptp->clock_rate = get_clock_rate();
-
- /* Enable PTP clock */
- clock_cfg = readq(ptp->reg_base + PTP_CLOCK_CFG);
- clock_cfg |= PTP_CLOCK_CFG_PTP_EN;
- writeq(clock_cfg, ptp->reg_base + PTP_CLOCK_CFG);
-
- clock_comp = ((u64)1000000000ull << 32) / ptp->clock_rate;
- /* Initial compensation value to start the nanosecs counter */
- writeq(clock_comp, ptp->reg_base + PTP_CLOCK_COMP);
-
pci_set_drvdata(pdev, ptp);
if (!first_ptp_block)
first_ptp_block = ptp;
@@ -272,6 +297,12 @@ int rvu_mbox_handler_ptp_op(struct rvu *rvu, struct ptp_req *req,
case PTP_OP_GET_CLOCK:
err = ptp_get_clock(rvu->ptp, &rsp->clk);
break;
+ case PTP_OP_GET_TSTMP:
+ err = ptp_get_tstmp(rvu->ptp, &rsp->clk);
+ break;
+ case PTP_OP_SET_THRESH:
+ err = ptp_set_thresh(rvu->ptp, req->thresh);
+ break;
default:
err = -EINVAL;
break;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.h b/drivers/net/ethernet/marvell/octeontx2/af/ptp.h
index 76d404b24552..1b81a0493cd3 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/ptp.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.h
@@ -20,6 +20,7 @@ struct ptp {
struct ptp *ptp_get(void);
void ptp_put(struct ptp *ptp);
+void ptp_start(struct ptp *ptp, u64 sclk, u32 ext_clk_freq, u32 extts);
extern struct pci_driver ptp_driver;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
index 07b0eafccad8..e695fa0e82a9 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c
@@ -29,6 +29,7 @@ static struct mac_ops rpm_mac_ops = {
.mac_get_pause_frm_status = rpm_lmac_get_pause_frm_status,
.mac_enadis_pause_frm = rpm_lmac_enadis_pause_frm,
.mac_pause_frm_config = rpm_lmac_pause_frm_config,
+ .mac_enadis_ptp_config = rpm_lmac_ptp_config,
};
struct mac_ops *rpm_get_mac_ops(void)
@@ -270,3 +271,19 @@ int rpm_lmac_internal_loopback(void *rpmd, int lmac_id, bool enable)
return 0;
}
+
+void rpm_lmac_ptp_config(void *rpmd, int lmac_id, bool enable)
+{
+ rpm_t *rpm = rpmd;
+ u64 cfg;
+
+ if (!is_lmac_valid(rpm, lmac_id))
+ return;
+
+ cfg = rpm_read(rpm, lmac_id, RPMX_CMRX_CFG);
+ if (enable)
+ cfg |= RPMX_RX_TS_PREPEND;
+ else
+ cfg &= ~RPMX_RX_TS_PREPEND;
+ rpm_write(rpm, lmac_id, RPMX_CMRX_CFG, cfg);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
index f0b069442dcc..57c8a687b488 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h
@@ -14,6 +14,8 @@
#define PCI_DEVID_CN10K_RPM 0xA060
/* Registers */
+#define RPMX_CMRX_CFG 0x00
+#define RPMX_RX_TS_PREPEND BIT_ULL(22)
#define RPMX_CMRX_SW_INT 0x180
#define RPMX_CMRX_SW_INT_W1S 0x188
#define RPMX_CMRX_SW_INT_ENA_W1S 0x198
@@ -54,4 +56,5 @@ int rpm_lmac_enadis_pause_frm(void *rpmd, int lmac_id, u8 tx_pause,
u8 rx_pause);
int rpm_get_tx_stats(void *rpmd, int lmac_id, int idx, u64 *tx_stat);
int rpm_get_rx_stats(void *rpmd, int lmac_id, int idx, u64 *rx_stat);
+void rpm_lmac_ptp_config(void *rpmd, int lmac_id, bool enable);
#endif /* RPM_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index 35836903b7fb..cb56e171ddd4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -854,6 +854,7 @@ static int rvu_setup_nix_hw_resource(struct rvu *rvu, int blkaddr)
block->lfcfg_reg = NIX_PRIV_LFX_CFG;
block->msixcfg_reg = NIX_PRIV_LFX_INT_CFG;
block->lfreset_reg = NIX_AF_LF_RST;
+ block->rvu = rvu;
sprintf(block->name, "NIX%d", blkid);
rvu->nix_blkaddr[blkid] = blkaddr;
return rvu_alloc_bitmap(&block->lf);
@@ -883,6 +884,7 @@ static int rvu_setup_cpt_hw_resource(struct rvu *rvu, int blkaddr)
block->lfcfg_reg = CPT_PRIV_LFX_CFG;
block->msixcfg_reg = CPT_PRIV_LFX_INT_CFG;
block->lfreset_reg = CPT_AF_LF_RST;
+ block->rvu = rvu;
sprintf(block->name, "CPT%d", blkid);
return rvu_alloc_bitmap(&block->lf);
}
@@ -940,6 +942,7 @@ static int rvu_setup_hw_resources(struct rvu *rvu)
block->lfcfg_reg = NPA_PRIV_LFX_CFG;
block->msixcfg_reg = NPA_PRIV_LFX_INT_CFG;
block->lfreset_reg = NPA_AF_LF_RST;
+ block->rvu = rvu;
sprintf(block->name, "NPA");
err = rvu_alloc_bitmap(&block->lf);
if (err) {
@@ -979,6 +982,7 @@ nix:
block->lfcfg_reg = SSO_PRIV_LFX_HWGRP_CFG;
block->msixcfg_reg = SSO_PRIV_LFX_HWGRP_INT_CFG;
block->lfreset_reg = SSO_AF_LF_HWGRP_RST;
+ block->rvu = rvu;
sprintf(block->name, "SSO GROUP");
err = rvu_alloc_bitmap(&block->lf);
if (err) {
@@ -1003,6 +1007,7 @@ ssow:
block->lfcfg_reg = SSOW_PRIV_LFX_HWS_CFG;
block->msixcfg_reg = SSOW_PRIV_LFX_HWS_INT_CFG;
block->lfreset_reg = SSOW_AF_LF_HWS_RST;
+ block->rvu = rvu;
sprintf(block->name, "SSOWS");
err = rvu_alloc_bitmap(&block->lf);
if (err) {
@@ -1028,6 +1033,7 @@ tim:
block->lfcfg_reg = TIM_PRIV_LFX_CFG;
block->msixcfg_reg = TIM_PRIV_LFX_INT_CFG;
block->lfreset_reg = TIM_AF_LF_RST;
+ block->rvu = rvu;
sprintf(block->name, "TIM");
err = rvu_alloc_bitmap(&block->lf);
if (err) {
@@ -1287,6 +1293,60 @@ static int rvu_lookup_rsrc(struct rvu *rvu, struct rvu_block *block,
return (val & 0xFFF);
}
+int rvu_get_blkaddr_from_slot(struct rvu *rvu, int blktype, u16 pcifunc,
+ u16 global_slot, u16 *slot_in_block)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ int numlfs, total_lfs = 0, nr_blocks = 0;
+ int i, num_blkaddr[BLK_COUNT] = { 0 };
+ struct rvu_block *block;
+ int blkaddr;
+ u16 start_slot;
+
+ if (!is_blktype_attached(pfvf, blktype))
+ return -ENODEV;
+
+ /* Get all the block addresses from which LFs are attached to
+ * the given pcifunc in num_blkaddr[].
+ */
+ for (blkaddr = BLKADDR_RVUM; blkaddr < BLK_COUNT; blkaddr++) {
+ block = &rvu->hw->block[blkaddr];
+ if (block->type != blktype)
+ continue;
+ if (!is_block_implemented(rvu->hw, blkaddr))
+ continue;
+
+ numlfs = rvu_get_rsrc_mapcount(pfvf, blkaddr);
+ if (numlfs) {
+ total_lfs += numlfs;
+ num_blkaddr[nr_blocks] = blkaddr;
+ nr_blocks++;
+ }
+ }
+
+ if (global_slot >= total_lfs)
+ return -ENODEV;
+
+ /* Based on the given global slot number retrieve the
+ * correct block address out of all attached block
+ * addresses and slot number in that block.
+ */
+ total_lfs = 0;
+ blkaddr = -ENODEV;
+ for (i = 0; i < nr_blocks; i++) {
+ numlfs = rvu_get_rsrc_mapcount(pfvf, num_blkaddr[i]);
+ total_lfs += numlfs;
+ if (global_slot < total_lfs) {
+ blkaddr = num_blkaddr[i];
+ start_slot = total_lfs - numlfs;
+ *slot_in_block = global_slot - start_slot;
+ break;
+ }
+ }
+
+ return blkaddr;
+}
+
static void rvu_detach_block(struct rvu *rvu, int pcifunc, int blktype)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
@@ -2345,7 +2405,6 @@ static void rvu_mbox_destroy(struct mbox_wq_info *mw)
int devid;
if (mw->mbox_wq) {
- flush_workqueue(mw->mbox_wq);
destroy_workqueue(mw->mbox_wq);
mw->mbox_wq = NULL;
}
@@ -2473,7 +2532,8 @@ static void rvu_blklf_teardown(struct rvu *rvu, u16 pcifunc, u8 blkaddr)
rvu_npa_lf_teardown(rvu, pcifunc, lf);
else if ((block->addr == BLKADDR_CPT0) ||
(block->addr == BLKADDR_CPT1))
- rvu_cpt_lf_teardown(rvu, pcifunc, lf, slot);
+ rvu_cpt_lf_teardown(rvu, pcifunc, block->addr, lf,
+ slot);
err = rvu_lf_reset(rvu, block, lf);
if (err) {
@@ -2671,6 +2731,8 @@ static void rvu_unregister_interrupts(struct rvu *rvu)
{
int irq;
+ rvu_cpt_unregister_interrupts(rvu);
+
/* Disable the Mbox interrupt */
rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1C,
INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
@@ -2880,6 +2942,11 @@ static int rvu_register_interrupts(struct rvu *rvu)
goto fail;
}
rvu->irq_allocated[offset] = true;
+
+ ret = rvu_cpt_register_interrupts(rvu);
+ if (ret)
+ goto fail;
+
return 0;
fail:
@@ -2890,7 +2957,6 @@ fail:
static void rvu_flr_wq_destroy(struct rvu *rvu)
{
if (rvu->flr_wq) {
- flush_workqueue(rvu->flr_wq);
destroy_workqueue(rvu->flr_wq);
rvu->flr_wq = NULL;
}
@@ -3186,6 +3252,10 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mutex_init(&rvu->rswitch.switch_lock);
+ if (rvu->fwdata)
+ ptp_start(rvu->ptp, rvu->fwdata->sclk, rvu->fwdata->ptp_ext_clk_rate,
+ rvu->fwdata->ptp_ext_tstamp);
+
return 0;
err_dl:
rvu_unregister_dl(rvu);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index 1d9411232f1d..66e45d733824 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -101,6 +101,7 @@ struct rvu_block {
u64 msixcfg_reg;
u64 lfreset_reg;
unsigned char name[NAME_SIZE];
+ struct rvu *rvu;
};
struct nix_mcast {
@@ -220,6 +221,7 @@ struct rvu_pfvf {
u16 maxlen;
u16 minlen;
+ bool hw_rx_tstamp_en; /* Is rx_tstamp enabled */
u8 mac_addr[ETH_ALEN]; /* MAC address of this PF/VF */
u8 default_mac[ETH_ALEN]; /* MAC address from FWdata */
@@ -237,6 +239,7 @@ struct rvu_pfvf {
bool cgx_in_use; /* this PF/VF using CGX? */
int cgx_users; /* number of cgx users - used only by PFs */
+ int intf_mode;
u8 nix_blkaddr; /* BLKADDR_NIX0/1 assigned to this PF */
u8 nix_rx_intf; /* NIX0_RX/NIX1_RX interface to NPC */
u8 nix_tx_intf; /* NIX0_TX/NIX1_TX interface to NPC */
@@ -394,7 +397,9 @@ struct rvu_fwdata {
u64 mcam_addr;
u64 mcam_sz;
u64 msixtr_base;
-#define FWDATA_RESERVED_MEM 1023
+ u32 ptp_ext_clk_rate;
+ u32 ptp_ext_tstamp;
+#define FWDATA_RESERVED_MEM 1022
u64 reserved[FWDATA_RESERVED_MEM];
#define CGX_MAX 5
#define CGX_LMACS_MAX 4
@@ -656,6 +661,8 @@ int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf);
int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc);
int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero);
int rvu_get_num_lbk_chans(void);
+int rvu_get_blkaddr_from_slot(struct rvu *rvu, int blktype, u16 pcifunc,
+ u16 global_slot, u16 *slot_in_block);
/* RVU HW reg validation */
enum regmap_block {
@@ -794,6 +801,7 @@ void npc_enable_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
void npc_read_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
int blkaddr, u16 src, struct mcam_entry *entry,
u8 *intf, u8 *ena);
+bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc);
bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature);
u32 rvu_cgx_get_fifolen(struct rvu *rvu);
void *rvu_first_cgx_pdata(struct rvu *rvu);
@@ -805,7 +813,11 @@ bool is_mcam_entry_enabled(struct rvu *rvu, struct npc_mcam *mcam, int blkaddr,
int index);
/* CPT APIs */
-int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int lf, int slot);
+int rvu_cpt_register_interrupts(struct rvu *rvu);
+void rvu_cpt_unregister_interrupts(struct rvu *rvu);
+int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int lf,
+ int slot);
+int rvu_cpt_ctx_flush(struct rvu *rvu, u16 pcifunc);
/* CN10K RVU */
int rvu_set_channels_base(struct rvu *rvu);
@@ -827,4 +839,7 @@ void rvu_switch_enable(struct rvu *rvu);
void rvu_switch_disable(struct rvu *rvu);
void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc);
+int rvu_npc_set_parse_mode(struct rvu *rvu, u16 pcifunc, u64 mode, u8 dir,
+ u64 pkind, u8 var_len_off, u8 var_len_off_mask,
+ u8 shift_dir);
#endif /* RVU_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index 81e8ea9ee30e..2ca182a4ce82 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -324,7 +324,6 @@ static int cgx_lmac_event_handler_init(struct rvu *rvu)
static void rvu_cgx_wq_destroy(struct rvu *rvu)
{
if (rvu->cgx_evh_wq) {
- flush_workqueue(rvu->cgx_evh_wq);
destroy_workqueue(rvu->cgx_evh_wq);
rvu->cgx_evh_wq = NULL;
}
@@ -411,7 +410,7 @@ int rvu_cgx_exit(struct rvu *rvu)
* VF's of mapped PF and other PFs are not allowed. This fn() checks
* whether a PFFUNC is permitted to do the config or not.
*/
-static bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc)
+inline bool is_cgx_config_permitted(struct rvu *rvu, u16 pcifunc)
{
if ((pcifunc & RVU_PFVF_FUNC_MASK) ||
!is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc)))
@@ -694,7 +693,9 @@ int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req,
static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
int pf = rvu_get_pf(pcifunc);
+ struct mac_ops *mac_ops;
u8 cgx_id, lmac_id;
void *cgxd;
@@ -711,13 +712,16 @@ static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
cgxd = rvu_cgx_pdata(cgx_id, rvu);
- cgx_lmac_ptp_config(cgxd, lmac_id, enable);
+ mac_ops = get_mac_ops(cgxd);
+ mac_ops->mac_enadis_ptp_config(cgxd, lmac_id, true);
/* If PTP is enabled then inform NPC that packets to be
* parsed by this PF will have their data shifted by 8 bytes
* and if PTP is disabled then no shift is required
*/
if (npc_config_ts_kpuaction(rvu, pf, pcifunc, enable))
return -EINVAL;
+ /* This flag is required to clean up CGX conf if app gets killed */
+ pfvf->hw_rx_tstamp_en = enable;
return 0;
}
@@ -725,6 +729,9 @@ static int rvu_cgx_ptp_rx_cfg(struct rvu *rvu, u16 pcifunc, bool enable)
int rvu_mbox_handler_cgx_ptp_rx_enable(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
+ if (!is_pf_cgxmapped(rvu, rvu_get_pf(req->hdr.pcifunc)))
+ return -EPERM;
+
return rvu_cgx_ptp_rx_cfg(rvu, req->hdr.pcifunc, true);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
index 46a41cfff575..7dbbc115cde4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
@@ -334,8 +334,8 @@ int rvu_set_channels_base(struct rvu *rvu)
/* Out of 4096 channels start CPT from 2048 so
* that MSB for CPT channels is always set
*/
- if (cpt_chan_base <= 0x800) {
- hw->cpt_chan_base = 0x800;
+ if (cpt_chan_base <= NIX_CHAN_CPT_CH_START) {
+ hw->cpt_chan_base = NIX_CHAN_CPT_CH_START;
} else {
dev_err(rvu->dev,
"CPT channels could not fit in the range 2048-4095\n");
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
index 1f90a7403392..45357deecabb 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c
@@ -37,6 +37,236 @@
(_rsp)->free_sts_##etype = free_sts; \
})
+static irqreturn_t rvu_cpt_af_flt_intr_handler(int irq, void *ptr)
+{
+ struct rvu_block *block = ptr;
+ struct rvu *rvu = block->rvu;
+ int blkaddr = block->addr;
+ u64 reg0, reg1, reg2;
+
+ reg0 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(0));
+ reg1 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(1));
+ if (!is_rvu_otx2(rvu)) {
+ reg2 = rvu_read64(rvu, blkaddr, CPT_AF_FLTX_INT(2));
+ dev_err_ratelimited(rvu->dev,
+ "Received CPTAF FLT irq : 0x%llx, 0x%llx, 0x%llx",
+ reg0, reg1, reg2);
+ } else {
+ dev_err_ratelimited(rvu->dev,
+ "Received CPTAF FLT irq : 0x%llx, 0x%llx",
+ reg0, reg1);
+ }
+
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT(0), reg0);
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT(1), reg1);
+ if (!is_rvu_otx2(rvu))
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT(2), reg2);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rvu_cpt_af_rvu_intr_handler(int irq, void *ptr)
+{
+ struct rvu_block *block = ptr;
+ struct rvu *rvu = block->rvu;
+ int blkaddr = block->addr;
+ u64 reg;
+
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_RVU_INT);
+ dev_err_ratelimited(rvu->dev, "Received CPTAF RVU irq : 0x%llx", reg);
+
+ rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT, reg);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rvu_cpt_af_ras_intr_handler(int irq, void *ptr)
+{
+ struct rvu_block *block = ptr;
+ struct rvu *rvu = block->rvu;
+ int blkaddr = block->addr;
+ u64 reg;
+
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_RAS_INT);
+ dev_err_ratelimited(rvu->dev, "Received CPTAF RAS irq : 0x%llx", reg);
+
+ rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT, reg);
+ return IRQ_HANDLED;
+}
+
+static int rvu_cpt_do_register_interrupt(struct rvu_block *block, int irq_offs,
+ irq_handler_t handler,
+ const char *name)
+{
+ struct rvu *rvu = block->rvu;
+ int ret;
+
+ ret = request_irq(pci_irq_vector(rvu->pdev, irq_offs), handler, 0,
+ name, block);
+ if (ret) {
+ dev_err(rvu->dev, "RVUAF: %s irq registration failed", name);
+ return ret;
+ }
+
+ WARN_ON(rvu->irq_allocated[irq_offs]);
+ rvu->irq_allocated[irq_offs] = true;
+ return 0;
+}
+
+static void cpt_10k_unregister_interrupts(struct rvu_block *block, int off)
+{
+ struct rvu *rvu = block->rvu;
+ int blkaddr = block->addr;
+ int i;
+
+ /* Disable all CPT AF interrupts */
+ for (i = 0; i < CPT_10K_AF_INT_VEC_RVU; i++)
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(i), 0x1);
+ rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1C, 0x1);
+ rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1C, 0x1);
+
+ for (i = 0; i < CPT_10K_AF_INT_VEC_CNT; i++)
+ if (rvu->irq_allocated[off + i]) {
+ free_irq(pci_irq_vector(rvu->pdev, off + i), block);
+ rvu->irq_allocated[off + i] = false;
+ }
+}
+
+static void cpt_unregister_interrupts(struct rvu *rvu, int blkaddr)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int i, offs;
+
+ if (!is_block_implemented(rvu->hw, blkaddr))
+ return;
+ offs = rvu_read64(rvu, blkaddr, CPT_PRIV_AF_INT_CFG) & 0x7FF;
+ if (!offs) {
+ dev_warn(rvu->dev,
+ "Failed to get CPT_AF_INT vector offsets\n");
+ return;
+ }
+ block = &hw->block[blkaddr];
+ if (!is_rvu_otx2(rvu))
+ return cpt_10k_unregister_interrupts(block, offs);
+
+ /* Disable all CPT AF interrupts */
+ for (i = 0; i < CPT_AF_INT_VEC_RVU; i++)
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1C(i), 0x1);
+ rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1C, 0x1);
+ rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1C, 0x1);
+
+ for (i = 0; i < CPT_AF_INT_VEC_CNT; i++)
+ if (rvu->irq_allocated[offs + i]) {
+ free_irq(pci_irq_vector(rvu->pdev, offs + i), block);
+ rvu->irq_allocated[offs + i] = false;
+ }
+}
+
+void rvu_cpt_unregister_interrupts(struct rvu *rvu)
+{
+ cpt_unregister_interrupts(rvu, BLKADDR_CPT0);
+ cpt_unregister_interrupts(rvu, BLKADDR_CPT1);
+}
+
+static int cpt_10k_register_interrupts(struct rvu_block *block, int off)
+{
+ struct rvu *rvu = block->rvu;
+ int blkaddr = block->addr;
+ char irq_name[16];
+ int i, ret;
+
+ for (i = CPT_10K_AF_INT_VEC_FLT0; i < CPT_10K_AF_INT_VEC_RVU; i++) {
+ snprintf(irq_name, sizeof(irq_name), "CPTAF FLT%d", i);
+ ret = rvu_cpt_do_register_interrupt(block, off + i,
+ rvu_cpt_af_flt_intr_handler,
+ irq_name);
+ if (ret)
+ goto err;
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), 0x1);
+ }
+
+ ret = rvu_cpt_do_register_interrupt(block, off + CPT_10K_AF_INT_VEC_RVU,
+ rvu_cpt_af_rvu_intr_handler,
+ "CPTAF RVU");
+ if (ret)
+ goto err;
+ rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1S, 0x1);
+
+ ret = rvu_cpt_do_register_interrupt(block, off + CPT_10K_AF_INT_VEC_RAS,
+ rvu_cpt_af_ras_intr_handler,
+ "CPTAF RAS");
+ if (ret)
+ goto err;
+ rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1S, 0x1);
+
+ return 0;
+err:
+ rvu_cpt_unregister_interrupts(rvu);
+ return ret;
+}
+
+static int cpt_register_interrupts(struct rvu *rvu, int blkaddr)
+{
+ struct rvu_hwinfo *hw = rvu->hw;
+ struct rvu_block *block;
+ int i, offs, ret = 0;
+ char irq_name[16];
+
+ if (!is_block_implemented(rvu->hw, blkaddr))
+ return 0;
+
+ block = &hw->block[blkaddr];
+ offs = rvu_read64(rvu, blkaddr, CPT_PRIV_AF_INT_CFG) & 0x7FF;
+ if (!offs) {
+ dev_warn(rvu->dev,
+ "Failed to get CPT_AF_INT vector offsets\n");
+ return 0;
+ }
+
+ if (!is_rvu_otx2(rvu))
+ return cpt_10k_register_interrupts(block, offs);
+
+ for (i = CPT_AF_INT_VEC_FLT0; i < CPT_AF_INT_VEC_RVU; i++) {
+ snprintf(irq_name, sizeof(irq_name), "CPTAF FLT%d", i);
+ ret = rvu_cpt_do_register_interrupt(block, offs + i,
+ rvu_cpt_af_flt_intr_handler,
+ irq_name);
+ if (ret)
+ goto err;
+ rvu_write64(rvu, blkaddr, CPT_AF_FLTX_INT_ENA_W1S(i), 0x1);
+ }
+
+ ret = rvu_cpt_do_register_interrupt(block, offs + CPT_AF_INT_VEC_RVU,
+ rvu_cpt_af_rvu_intr_handler,
+ "CPTAF RVU");
+ if (ret)
+ goto err;
+ rvu_write64(rvu, blkaddr, CPT_AF_RVU_INT_ENA_W1S, 0x1);
+
+ ret = rvu_cpt_do_register_interrupt(block, offs + CPT_AF_INT_VEC_RAS,
+ rvu_cpt_af_ras_intr_handler,
+ "CPTAF RAS");
+ if (ret)
+ goto err;
+ rvu_write64(rvu, blkaddr, CPT_AF_RAS_INT_ENA_W1S, 0x1);
+
+ return 0;
+err:
+ rvu_cpt_unregister_interrupts(rvu);
+ return ret;
+}
+
+int rvu_cpt_register_interrupts(struct rvu *rvu)
+{
+ int ret;
+
+ ret = cpt_register_interrupts(rvu, BLKADDR_CPT0);
+ if (ret)
+ return ret;
+
+ return cpt_register_interrupts(rvu, BLKADDR_CPT1);
+}
+
static int get_cpt_pf_num(struct rvu *rvu)
{
int i, domain_nr, cpt_pf_num = -1;
@@ -147,9 +377,13 @@ int rvu_mbox_handler_cpt_lf_alloc(struct rvu *rvu,
rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val);
- /* Set CPT LF NIX_PF_FUNC and SSO_PF_FUNC */
- val = (u64)req->nix_pf_func << 48 |
- (u64)req->sso_pf_func << 32;
+ /* Set CPT LF NIX_PF_FUNC and SSO_PF_FUNC. EXE_LDWB is set
+ * on reset.
+ */
+ val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf));
+ val &= ~(GENMASK_ULL(63, 48) | GENMASK_ULL(47, 32));
+ val |= ((u64)req->nix_pf_func << 48 |
+ (u64)req->sso_pf_func << 32);
rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf), val);
}
@@ -159,7 +393,7 @@ int rvu_mbox_handler_cpt_lf_alloc(struct rvu *rvu,
static int cpt_lf_free(struct rvu *rvu, struct msg_req *req, int blkaddr)
{
u16 pcifunc = req->hdr.pcifunc;
- int num_lfs, cptlf, slot;
+ int num_lfs, cptlf, slot, err;
struct rvu_block *block;
block = &rvu->hw->block[blkaddr];
@@ -173,10 +407,15 @@ static int cpt_lf_free(struct rvu *rvu, struct msg_req *req, int blkaddr)
if (cptlf < 0)
return CPT_AF_ERR_LF_INVALID;
- /* Reset CPT LF group and priority */
- rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), 0x0);
- /* Reset CPT LF NIX_PF_FUNC and SSO_PF_FUNC */
- rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf), 0x0);
+ /* Perform teardown */
+ rvu_cpt_lf_teardown(rvu, pcifunc, blkaddr, cptlf, slot);
+
+ /* Reset LF */
+ err = rvu_lf_reset(rvu, block, cptlf);
+ if (err) {
+ dev_err(rvu->dev, "Failed to reset blkaddr %d LF%d\n",
+ block->addr, cptlf);
+ }
}
return 0;
@@ -197,6 +436,141 @@ int rvu_mbox_handler_cpt_lf_free(struct rvu *rvu, struct msg_req *req,
return ret;
}
+static int cpt_inline_ipsec_cfg_inbound(struct rvu *rvu, int blkaddr, u8 cptlf,
+ struct cpt_inline_ipsec_cfg_msg *req)
+{
+ u16 sso_pf_func = req->sso_pf_func;
+ u8 nix_sel;
+ u64 val;
+
+ val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf));
+ if (req->enable && (val & BIT_ULL(16))) {
+ /* IPSec inline outbound path is already enabled for a given
+ * CPT LF, HRM states that inline inbound & outbound paths
+ * must not be enabled at the same time for a given CPT LF
+ */
+ return CPT_AF_ERR_INLINE_IPSEC_INB_ENA;
+ }
+ /* Check if requested 'CPTLF <=> SSOLF' mapping is valid */
+ if (sso_pf_func && !is_pffunc_map_valid(rvu, sso_pf_func, BLKTYPE_SSO))
+ return CPT_AF_ERR_SSO_PF_FUNC_INVALID;
+
+ nix_sel = (blkaddr == BLKADDR_CPT1) ? 1 : 0;
+ /* Enable CPT LF for IPsec inline inbound operations */
+ if (req->enable)
+ val |= BIT_ULL(9);
+ else
+ val &= ~BIT_ULL(9);
+
+ val |= (u64)nix_sel << 8;
+ rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val);
+
+ if (sso_pf_func) {
+ /* Set SSO_PF_FUNC */
+ val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf));
+ val |= (u64)sso_pf_func << 32;
+ val |= (u64)req->nix_pf_func << 48;
+ rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf), val);
+ }
+ if (req->sso_pf_func_ovrd)
+ /* Set SSO_PF_FUNC_OVRD for inline IPSec */
+ rvu_write64(rvu, blkaddr, CPT_AF_ECO, 0x1);
+
+ /* Configure the X2P Link register with the cpt base channel number and
+ * range of channels it should propagate to X2P
+ */
+ if (!is_rvu_otx2(rvu)) {
+ val = (ilog2(NIX_CHAN_CPT_X2P_MASK + 1) << 16);
+ val |= rvu->hw->cpt_chan_base;
+
+ rvu_write64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(0), val);
+ rvu_write64(rvu, blkaddr, CPT_AF_X2PX_LINK_CFG(1), val);
+ }
+
+ return 0;
+}
+
+static int cpt_inline_ipsec_cfg_outbound(struct rvu *rvu, int blkaddr, u8 cptlf,
+ struct cpt_inline_ipsec_cfg_msg *req)
+{
+ u16 nix_pf_func = req->nix_pf_func;
+ int nix_blkaddr;
+ u8 nix_sel;
+ u64 val;
+
+ val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf));
+ if (req->enable && (val & BIT_ULL(9))) {
+ /* IPSec inline inbound path is already enabled for a given
+ * CPT LF, HRM states that inline inbound & outbound paths
+ * must not be enabled at the same time for a given CPT LF
+ */
+ return CPT_AF_ERR_INLINE_IPSEC_OUT_ENA;
+ }
+
+ /* Check if requested 'CPTLF <=> NIXLF' mapping is valid */
+ if (nix_pf_func && !is_pffunc_map_valid(rvu, nix_pf_func, BLKTYPE_NIX))
+ return CPT_AF_ERR_NIX_PF_FUNC_INVALID;
+
+ /* Enable CPT LF for IPsec inline outbound operations */
+ if (req->enable)
+ val |= BIT_ULL(16);
+ else
+ val &= ~BIT_ULL(16);
+ rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val);
+
+ if (nix_pf_func) {
+ /* Set NIX_PF_FUNC */
+ val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf));
+ val |= (u64)nix_pf_func << 48;
+ rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL2(cptlf), val);
+
+ nix_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, nix_pf_func);
+ nix_sel = (nix_blkaddr == BLKADDR_NIX0) ? 0 : 1;
+
+ val = rvu_read64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf));
+ val |= (u64)nix_sel << 8;
+ rvu_write64(rvu, blkaddr, CPT_AF_LFX_CTL(cptlf), val);
+ }
+
+ return 0;
+}
+
+int rvu_mbox_handler_cpt_inline_ipsec_cfg(struct rvu *rvu,
+ struct cpt_inline_ipsec_cfg_msg *req,
+ struct msg_rsp *rsp)
+{
+ u16 pcifunc = req->hdr.pcifunc;
+ struct rvu_block *block;
+ int cptlf, blkaddr, ret;
+ u16 actual_slot;
+
+ blkaddr = rvu_get_blkaddr_from_slot(rvu, BLKTYPE_CPT, pcifunc,
+ req->slot, &actual_slot);
+ if (blkaddr < 0)
+ return CPT_AF_ERR_LF_INVALID;
+
+ block = &rvu->hw->block[blkaddr];
+
+ cptlf = rvu_get_lf(rvu, block, pcifunc, actual_slot);
+ if (cptlf < 0)
+ return CPT_AF_ERR_LF_INVALID;
+
+ switch (req->dir) {
+ case CPT_INLINE_INBOUND:
+ ret = cpt_inline_ipsec_cfg_inbound(rvu, blkaddr, cptlf, req);
+ break;
+
+ case CPT_INLINE_OUTBOUND:
+ ret = cpt_inline_ipsec_cfg_outbound(rvu, blkaddr, cptlf, req);
+ break;
+
+ default:
+ return CPT_AF_ERR_PARAM;
+ }
+
+ return ret;
+}
+
static bool is_valid_offset(struct rvu *rvu, struct cpt_rd_wr_reg_msg *req)
{
u64 offset = req->reg_offset;
@@ -421,6 +795,58 @@ int rvu_mbox_handler_cpt_rxc_time_cfg(struct rvu *rvu,
return 0;
}
+int rvu_mbox_handler_cpt_ctx_cache_sync(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ return rvu_cpt_ctx_flush(rvu, req->hdr.pcifunc);
+}
+
+static void cpt_rxc_teardown(struct rvu *rvu, int blkaddr)
+{
+ struct cpt_rxc_time_cfg_req req;
+ int timeout = 2000;
+ u64 reg;
+
+ if (is_rvu_otx2(rvu))
+ return;
+
+ /* Set time limit to minimum values, so that rxc entries will be
+ * flushed out quickly.
+ */
+ req.step = 1;
+ req.zombie_thres = 1;
+ req.zombie_limit = 1;
+ req.active_thres = 1;
+ req.active_limit = 1;
+
+ cpt_rxc_time_cfg(rvu, &req, blkaddr);
+
+ do {
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ACTIVE_STS);
+ udelay(1);
+ if (FIELD_GET(RXC_ACTIVE_COUNT, reg))
+ timeout--;
+ else
+ break;
+ } while (timeout);
+
+ if (timeout == 0)
+ dev_warn(rvu->dev, "Poll for RXC active count hits hard loop counter\n");
+
+ timeout = 2000;
+ do {
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_RXC_ZOMBIE_STS);
+ udelay(1);
+ if (FIELD_GET(RXC_ZOMBIE_COUNT, reg))
+ timeout--;
+ else
+ break;
+ } while (timeout);
+
+ if (timeout == 0)
+ dev_warn(rvu->dev, "Poll for RXC zombie count hits hard loop counter\n");
+}
+
#define INPROG_INFLIGHT(reg) ((reg) & 0x1FF)
#define INPROG_GRB_PARTIAL(reg) ((reg) & BIT_ULL(31))
#define INPROG_GRB(reg) (((reg) >> 32) & 0xFF)
@@ -485,14 +911,12 @@ static void cpt_lf_disable_iqueue(struct rvu *rvu, int blkaddr, int slot)
dev_warn(rvu->dev, "CPT FLR hits hard loop counter\n");
}
-int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int lf, int slot)
+int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int lf, int slot)
{
- int blkaddr;
u64 reg;
- blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_CPT, pcifunc);
- if (blkaddr != BLKADDR_CPT0 && blkaddr != BLKADDR_CPT1)
- return -EINVAL;
+ if (is_cpt_pf(rvu, pcifunc) || is_cpt_vf(rvu, pcifunc))
+ cpt_rxc_teardown(rvu, blkaddr);
/* Enable BAR2 ALIAS for this pcifunc. */
reg = BIT_ULL(16) | pcifunc;
@@ -509,3 +933,154 @@ int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int lf, int slot)
return 0;
}
+
+#define CPT_RES_LEN 16
+#define CPT_SE_IE_EGRP 1ULL
+
+static int cpt_inline_inb_lf_cmd_send(struct rvu *rvu, int blkaddr,
+ int nix_blkaddr)
+{
+ int cpt_pf_num = get_cpt_pf_num(rvu);
+ struct cpt_inst_lmtst_req *req;
+ dma_addr_t res_daddr;
+ int timeout = 3000;
+ u8 cpt_idx;
+ u64 *inst;
+ u16 *res;
+ int rc;
+
+ res = kzalloc(CPT_RES_LEN, GFP_KERNEL);
+ if (!res)
+ return -ENOMEM;
+
+ res_daddr = dma_map_single(rvu->dev, res, CPT_RES_LEN,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(rvu->dev, res_daddr)) {
+ dev_err(rvu->dev, "DMA mapping failed for CPT result\n");
+ rc = -EFAULT;
+ goto res_free;
+ }
+ *res = 0xFFFF;
+
+ /* Send mbox message to CPT PF */
+ req = (struct cpt_inst_lmtst_req *)
+ otx2_mbox_alloc_msg_rsp(&rvu->afpf_wq_info.mbox_up,
+ cpt_pf_num, sizeof(*req),
+ sizeof(struct msg_rsp));
+ if (!req) {
+ rc = -ENOMEM;
+ goto res_daddr_unmap;
+ }
+ req->hdr.sig = OTX2_MBOX_REQ_SIG;
+ req->hdr.id = MBOX_MSG_CPT_INST_LMTST;
+
+ inst = req->inst;
+ /* Prepare CPT_INST_S */
+ inst[0] = 0;
+ inst[1] = res_daddr;
+ /* AF PF FUNC */
+ inst[2] = 0;
+ /* Set QORD */
+ inst[3] = 1;
+ inst[4] = 0;
+ inst[5] = 0;
+ inst[6] = 0;
+ /* Set EGRP */
+ inst[7] = CPT_SE_IE_EGRP << 61;
+
+ /* Subtract 1 from the NIX-CPT credit count to preserve
+ * credit counts.
+ */
+ cpt_idx = (blkaddr == BLKADDR_CPT0) ? 0 : 1;
+ rvu_write64(rvu, nix_blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx),
+ BIT_ULL(22) - 1);
+
+ otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, cpt_pf_num);
+ rc = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, cpt_pf_num);
+ if (rc)
+ dev_warn(rvu->dev, "notification to pf %d failed\n",
+ cpt_pf_num);
+ /* Wait for CPT instruction to be completed */
+ do {
+ mdelay(1);
+ if (*res == 0xFFFF)
+ timeout--;
+ else
+ break;
+ } while (timeout);
+
+ if (timeout == 0)
+ dev_warn(rvu->dev, "Poll for result hits hard loop counter\n");
+
+res_daddr_unmap:
+ dma_unmap_single(rvu->dev, res_daddr, CPT_RES_LEN, DMA_BIDIRECTIONAL);
+res_free:
+ kfree(res);
+
+ return 0;
+}
+
+#define CTX_CAM_PF_FUNC GENMASK_ULL(61, 46)
+#define CTX_CAM_CPTR GENMASK_ULL(45, 0)
+
+int rvu_cpt_ctx_flush(struct rvu *rvu, u16 pcifunc)
+{
+ int nix_blkaddr, blkaddr;
+ u16 max_ctx_entries, i;
+ int slot = 0, num_lfs;
+ u64 reg, cam_data;
+ int rc;
+
+ nix_blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc);
+ if (nix_blkaddr < 0)
+ return -EINVAL;
+
+ if (is_rvu_otx2(rvu))
+ return 0;
+
+ blkaddr = (nix_blkaddr == BLKADDR_NIX1) ? BLKADDR_CPT1 : BLKADDR_CPT0;
+
+ /* Submit CPT_INST_S to track when all packets have been
+ * flushed through for the NIX PF FUNC in inline inbound case.
+ */
+ rc = cpt_inline_inb_lf_cmd_send(rvu, blkaddr, nix_blkaddr);
+ if (rc)
+ return rc;
+
+ /* Wait for rxc entries to be flushed out */
+ cpt_rxc_teardown(rvu, blkaddr);
+
+ reg = rvu_read64(rvu, blkaddr, CPT_AF_CONSTANTS0);
+ max_ctx_entries = (reg >> 48) & 0xFFF;
+
+ mutex_lock(&rvu->rsrc_lock);
+
+ num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc),
+ blkaddr);
+ if (num_lfs == 0) {
+ dev_warn(rvu->dev, "CPT LF is not configured\n");
+ goto unlock;
+ }
+
+ /* Enable BAR2 ALIAS for this pcifunc. */
+ reg = BIT_ULL(16) | pcifunc;
+ rvu_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, reg);
+
+ for (i = 0; i < max_ctx_entries; i++) {
+ cam_data = rvu_read64(rvu, blkaddr, CPT_AF_CTX_CAM_DATA(i));
+
+ if ((FIELD_GET(CTX_CAM_PF_FUNC, cam_data) == pcifunc) &&
+ FIELD_GET(CTX_CAM_CPTR, cam_data)) {
+ reg = BIT_ULL(46) | FIELD_GET(CTX_CAM_CPTR, cam_data);
+ rvu_write64(rvu, blkaddr,
+ CPT_AF_BAR2_ALIASX(slot, CPT_LF_CTX_FLUSH),
+ reg);
+ }
+ }
+ rvu_write64(rvu, blkaddr, CPT_AF_BAR2_SEL, 0);
+
+unlock:
+ mutex_unlock(&rvu->rsrc_lock);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index 9338765da048..c7fd466a0efd 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -95,7 +95,7 @@ static char *cgx_tx_stats_fields[] = {
[CGX_STAT5] = "Total frames sent on the interface",
[CGX_STAT6] = "Packets sent with an octet count < 64",
[CGX_STAT7] = "Packets sent with an octet count == 64",
- [CGX_STAT8] = "Packets sent with an octet count of 65–127",
+ [CGX_STAT8] = "Packets sent with an octet count of 65-127",
[CGX_STAT9] = "Packets sent with an octet count of 128-255",
[CGX_STAT10] = "Packets sent with an octet count of 256-511",
[CGX_STAT11] = "Packets sent with an octet count of 512-1023",
@@ -125,7 +125,7 @@ static char *rpm_rx_stats_fields[] = {
"Total frames received on interface",
"Packets received with an octet count < 64",
"Packets received with an octet count == 64",
- "Packets received with an octet count of 65–127",
+ "Packets received with an octet count of 65-127",
"Packets received with an octet count of 128-255",
"Packets received with an octet count of 256-511",
"Packets received with an octet count of 512-1023",
@@ -164,7 +164,7 @@ static char *rpm_tx_stats_fields[] = {
"Packets sent to the multicast DMAC",
"Packets sent to a broadcast DMAC",
"Packets sent with an octet count == 64",
- "Packets sent with an octet count of 65–127",
+ "Packets sent with an octet count of 65-127",
"Packets sent with an octet count of 128-255",
"Packets sent with an octet count of 256-511",
"Packets sent with an octet count of 512-1023",
@@ -226,18 +226,175 @@ static const struct file_operations rvu_dbg_##name##_fops = { \
static void print_nix_qsize(struct seq_file *filp, struct rvu_pfvf *pfvf);
+#define LMT_MAPTBL_ENTRY_SIZE 16
+/* Dump LMTST map table */
+static ssize_t rvu_dbg_lmtst_map_table_display(struct file *filp,
+ char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct rvu *rvu = filp->private_data;
+ u64 lmt_addr, val, tbl_base;
+ int pf, vf, num_vfs, hw_vfs;
+ void __iomem *lmt_map_base;
+ int index = 0, off = 0;
+ int bytes_not_copied;
+ int buf_size = 10240;
+ char *buf;
+
+ /* don't allow partial reads */
+ if (*ppos != 0)
+ return 0;
+
+ buf = kzalloc(buf_size, GFP_KERNEL);
+ if (!buf)
+ return -ENOSPC;
+
+ tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE);
+
+ lmt_map_base = ioremap_wc(tbl_base, 128 * 1024);
+ if (!lmt_map_base) {
+ dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n");
+ kfree(buf);
+ return false;
+ }
+
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ "\n\t\t\t\t\tLmtst Map Table Entries");
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ "\n\t\t\t\t\t=======================");
+ off += scnprintf(&buf[off], buf_size - 1 - off, "\nPcifunc\t\t\t");
+ off += scnprintf(&buf[off], buf_size - 1 - off, "Table Index\t\t");
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ "Lmtline Base (word 0)\t\t");
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ "Lmt Map Entry (word 1)");
+ off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
+ for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
+ off += scnprintf(&buf[off], buf_size - 1 - off, "PF%d \t\t\t",
+ pf);
+
+ index = pf * rvu->hw->total_vfs * LMT_MAPTBL_ENTRY_SIZE;
+ off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%llx\t\t",
+ (tbl_base + index));
+ lmt_addr = readq(lmt_map_base + index);
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ " 0x%016llx\t\t", lmt_addr);
+ index += 8;
+ val = readq(lmt_map_base + index);
+ off += scnprintf(&buf[off], buf_size - 1 - off, " 0x%016llx\n",
+ val);
+ /* Reading num of VFs per PF */
+ rvu_get_pf_numvfs(rvu, pf, &num_vfs, &hw_vfs);
+ for (vf = 0; vf < num_vfs; vf++) {
+ index = (pf * rvu->hw->total_vfs * 16) +
+ ((vf + 1) * LMT_MAPTBL_ENTRY_SIZE);
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ "PF%d:VF%d \t\t", pf, vf);
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ " 0x%llx\t\t", (tbl_base + index));
+ lmt_addr = readq(lmt_map_base + index);
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ " 0x%016llx\t\t", lmt_addr);
+ index += 8;
+ val = readq(lmt_map_base + index);
+ off += scnprintf(&buf[off], buf_size - 1 - off,
+ " 0x%016llx\n", val);
+ }
+ }
+ off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
+
+ bytes_not_copied = copy_to_user(buffer, buf, off);
+ kfree(buf);
+
+ iounmap(lmt_map_base);
+ if (bytes_not_copied)
+ return -EFAULT;
+
+ *ppos = off;
+ return off;
+}
+
+RVU_DEBUG_FOPS(lmtst_map_table, lmtst_map_table_display, NULL);
+
+static void get_lf_str_list(struct rvu_block block, int pcifunc,
+ char *lfs)
+{
+ int lf = 0, seq = 0, len = 0, prev_lf = block.lf.max;
+
+ for_each_set_bit(lf, block.lf.bmap, block.lf.max) {
+ if (lf >= block.lf.max)
+ break;
+
+ if (block.fn_map[lf] != pcifunc)
+ continue;
+
+ if (lf == prev_lf + 1) {
+ prev_lf = lf;
+ seq = 1;
+ continue;
+ }
+
+ if (seq)
+ len += sprintf(lfs + len, "-%d,%d", prev_lf, lf);
+ else
+ len += (len ? sprintf(lfs + len, ",%d", lf) :
+ sprintf(lfs + len, "%d", lf));
+
+ prev_lf = lf;
+ seq = 0;
+ }
+
+ if (seq)
+ len += sprintf(lfs + len, "-%d", prev_lf);
+
+ lfs[len] = '\0';
+}
+
+static int get_max_column_width(struct rvu *rvu)
+{
+ int index, pf, vf, lf_str_size = 12, buf_size = 256;
+ struct rvu_block block;
+ u16 pcifunc;
+ char *buf;
+
+ buf = kzalloc(buf_size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
+ for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
+ pcifunc = pf << 10 | vf;
+ if (!pcifunc)
+ continue;
+
+ for (index = 0; index < BLK_COUNT; index++) {
+ block = rvu->hw->block[index];
+ if (!strlen(block.name))
+ continue;
+
+ get_lf_str_list(block, pcifunc, buf);
+ if (lf_str_size <= strlen(buf))
+ lf_str_size = strlen(buf) + 1;
+ }
+ }
+ }
+
+ kfree(buf);
+ return lf_str_size;
+}
+
/* Dumps current provisioning status of all RVU block LFs */
static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
char __user *buffer,
size_t count, loff_t *ppos)
{
- int index, off = 0, flag = 0, go_back = 0, len = 0;
+ int index, off = 0, flag = 0, len = 0, i = 0;
struct rvu *rvu = filp->private_data;
- int lf, pf, vf, pcifunc;
+ int bytes_not_copied = 0;
struct rvu_block block;
- int bytes_not_copied;
- int lf_str_size = 12;
+ int pf, vf, pcifunc;
int buf_size = 2048;
+ int lf_str_size;
char *lfs;
char *buf;
@@ -249,6 +406,9 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
if (!buf)
return -ENOSPC;
+ /* Get the maximum width of a column */
+ lf_str_size = get_max_column_width(rvu);
+
lfs = kzalloc(lf_str_size, GFP_KERNEL);
if (!lfs) {
kfree(buf);
@@ -262,65 +422,69 @@ static ssize_t rvu_dbg_rsrc_attach_status(struct file *filp,
"%-*s", lf_str_size,
rvu->hw->block[index].name);
}
+
off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
+ bytes_not_copied = copy_to_user(buffer + (i * off), buf, off);
+ if (bytes_not_copied)
+ goto out;
+
+ i++;
+ *ppos += off;
for (pf = 0; pf < rvu->hw->total_pfs; pf++) {
for (vf = 0; vf <= rvu->hw->total_vfs; vf++) {
+ off = 0;
+ flag = 0;
pcifunc = pf << 10 | vf;
if (!pcifunc)
continue;
if (vf) {
sprintf(lfs, "PF%d:VF%d", pf, vf - 1);
- go_back = scnprintf(&buf[off],
- buf_size - 1 - off,
- "%-*s", lf_str_size, lfs);
+ off = scnprintf(&buf[off],
+ buf_size - 1 - off,
+ "%-*s", lf_str_size, lfs);
} else {
sprintf(lfs, "PF%d", pf);
- go_back = scnprintf(&buf[off],
- buf_size - 1 - off,
- "%-*s", lf_str_size, lfs);
+ off = scnprintf(&buf[off],
+ buf_size - 1 - off,
+ "%-*s", lf_str_size, lfs);
}
- off += go_back;
- for (index = 0; index < BLKTYPE_MAX; index++) {
+ for (index = 0; index < BLK_COUNT; index++) {
block = rvu->hw->block[index];
if (!strlen(block.name))
continue;
len = 0;
lfs[len] = '\0';
- for (lf = 0; lf < block.lf.max; lf++) {
- if (block.fn_map[lf] != pcifunc)
- continue;
+ get_lf_str_list(block, pcifunc, lfs);
+ if (strlen(lfs))
flag = 1;
- len += sprintf(&lfs[len], "%d,", lf);
- }
- if (flag)
- len--;
- lfs[len] = '\0';
off += scnprintf(&buf[off], buf_size - 1 - off,
"%-*s", lf_str_size, lfs);
- if (!strlen(lfs))
- go_back += lf_str_size;
}
- if (!flag)
- off -= go_back;
- else
- flag = 0;
- off--;
- off += scnprintf(&buf[off], buf_size - 1 - off, "\n");
+ if (flag) {
+ off += scnprintf(&buf[off],
+ buf_size - 1 - off, "\n");
+ bytes_not_copied = copy_to_user(buffer +
+ (i * off),
+ buf, off);
+ if (bytes_not_copied)
+ goto out;
+
+ i++;
+ *ppos += off;
+ }
}
}
- bytes_not_copied = copy_to_user(buffer, buf, off);
+out:
kfree(lfs);
kfree(buf);
-
if (bytes_not_copied)
return -EFAULT;
- *ppos = off;
- return off;
+ return *ppos;
}
RVU_DEBUG_FOPS(rsrc_status, rsrc_attach_status, NULL);
@@ -504,7 +668,7 @@ static ssize_t rvu_dbg_qsize_write(struct file *filp,
if (cmd_buf)
ret = -EINVAL;
- if (!strncmp(subtoken, "help", 4) || ret < 0) {
+ if (ret < 0 || !strncmp(subtoken, "help", 4)) {
dev_info(rvu->dev, "Use echo <%s-lf > qsize\n", blk_string);
goto qsize_write_done;
}
@@ -1719,6 +1883,10 @@ static int rvu_dbg_nix_band_prof_ctx_display(struct seq_file *m, void *unused)
u16 pcifunc;
char *str;
+ /* Ingress policers do not exist on all platforms */
+ if (!nix_hw->ipolicer)
+ return 0;
+
for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
if (layer == BAND_PROF_INVAL_LAYER)
continue;
@@ -1768,6 +1936,10 @@ static int rvu_dbg_nix_band_prof_rsrc_display(struct seq_file *m, void *unused)
int layer;
char *str;
+ /* Ingress policers do not exist on all platforms */
+ if (!nix_hw->ipolicer)
+ return 0;
+
seq_puts(m, "\nBandwidth profile resource free count\n");
seq_puts(m, "=====================================\n");
for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) {
@@ -1878,7 +2050,7 @@ static int cgx_print_stats(struct seq_file *s, int lmac_id)
return -ENODEV;
mac_ops = get_mac_ops(cgxd);
-
+ /* There can be no CGX devices at all */
if (!mac_ops)
return 0;
@@ -1956,13 +2128,13 @@ static int cgx_print_stats(struct seq_file *s, int lmac_id)
if (err)
return err;
- if (is_rvu_otx2(rvu))
- seq_printf(s, "%s: %llu\n", cgx_tx_stats_fields[stat],
- tx_stat);
- else
- seq_printf(s, "%s: %llu\n", rpm_tx_stats_fields[stat],
- tx_stat);
- stat++;
+ if (is_rvu_otx2(rvu))
+ seq_printf(s, "%s: %llu\n", cgx_tx_stats_fields[stat],
+ tx_stat);
+ else
+ seq_printf(s, "%s: %llu\n", rpm_tx_stats_fields[stat],
+ tx_stat);
+ stat++;
}
return err;
@@ -2400,6 +2572,8 @@ static int rvu_dbg_npc_mcam_show_rules(struct seq_file *s, void *unused)
seq_printf(s, "VF%d", vf);
}
seq_puts(s, "\n");
+ seq_printf(s, "\tchannel: 0x%x\n", iter->chan);
+ seq_printf(s, "\tchannel_mask: 0x%x\n", iter->chan_mask);
}
rvu_dbg_npc_mcam_show_action(s, iter);
@@ -2672,6 +2846,10 @@ void rvu_dbg_init(struct rvu *rvu)
debugfs_create_file("rsrc_alloc", 0444, rvu->rvu_dbg.root, rvu,
&rvu_dbg_rsrc_status_fops);
+ if (!is_rvu_otx2(rvu))
+ debugfs_create_file("lmtst_map_table", 0444, rvu->rvu_dbg.root,
+ rvu, &rvu_dbg_lmtst_map_table_fops);
+
if (!cgx_get_cgxcnt_max())
goto create;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
index 274d3abe30eb..70bacd38a6d9 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
@@ -1510,13 +1510,6 @@ int rvu_register_dl(struct rvu *rvu)
return -ENOMEM;
}
- err = devlink_register(dl);
- if (err) {
- dev_err(rvu->dev, "devlink register failed with error %d\n", err);
- devlink_free(dl);
- return err;
- }
-
rvu_dl = devlink_priv(dl);
rvu_dl->dl = dl;
rvu_dl->rvu = rvu;
@@ -1537,13 +1530,11 @@ int rvu_register_dl(struct rvu *rvu)
goto err_dl_health;
}
- devlink_params_publish(dl);
-
+ devlink_register(dl);
return 0;
err_dl_health:
rvu_health_reporters_destroy(rvu);
- devlink_unregister(dl);
devlink_free(dl);
return err;
}
@@ -1553,12 +1544,9 @@ void rvu_unregister_dl(struct rvu *rvu)
struct rvu_devlink *rvu_dl = rvu->rvu_dl;
struct devlink *dl = rvu_dl->dl;
- if (!dl)
- return;
-
+ devlink_unregister(dl);
devlink_params_unregister(dl, rvu_af_dl_params,
ARRAY_SIZE(rvu_af_dl_params));
rvu_health_reporters_destroy(rvu);
- devlink_unregister(dl);
devlink_free(dl);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index 9ef4e942e31e..d8b1948aaa0a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -28,6 +28,7 @@ static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req,
static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc);
static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw,
u32 leaf_prof);
+static const char *nix_get_ctx_name(int ctype);
enum mc_tbl_sz {
MC_TBL_SZ_256,
@@ -1061,10 +1062,68 @@ static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
return 0;
}
+static int rvu_nix_verify_aq_ctx(struct rvu *rvu, struct nix_hw *nix_hw,
+ struct nix_aq_enq_req *req, u8 ctype)
+{
+ struct nix_cn10k_aq_enq_req aq_req;
+ struct nix_cn10k_aq_enq_rsp aq_rsp;
+ int rc, word;
+
+ if (req->ctype != NIX_AQ_CTYPE_CQ)
+ return 0;
+
+ rc = nix_aq_context_read(rvu, nix_hw, &aq_req, &aq_rsp,
+ req->hdr.pcifunc, ctype, req->qidx);
+ if (rc) {
+ dev_err(rvu->dev,
+ "%s: Failed to fetch %s%d context of PFFUNC 0x%x\n",
+ __func__, nix_get_ctx_name(ctype), req->qidx,
+ req->hdr.pcifunc);
+ return rc;
+ }
+
+ /* Make copy of original context & mask which are required
+ * for resubmission
+ */
+ memcpy(&aq_req.cq_mask, &req->cq_mask, sizeof(struct nix_cq_ctx_s));
+ memcpy(&aq_req.cq, &req->cq, sizeof(struct nix_cq_ctx_s));
+
+ /* exclude fields which HW can update */
+ aq_req.cq_mask.cq_err = 0;
+ aq_req.cq_mask.wrptr = 0;
+ aq_req.cq_mask.tail = 0;
+ aq_req.cq_mask.head = 0;
+ aq_req.cq_mask.avg_level = 0;
+ aq_req.cq_mask.update_time = 0;
+ aq_req.cq_mask.substream = 0;
+
+ /* Context mask (cq_mask) holds mask value of fields which
+ * are changed in AQ WRITE operation.
+ * for example cq.drop = 0xa;
+ * cq_mask.drop = 0xff;
+ * Below logic performs '&' between cq and cq_mask so that non
+ * updated fields are masked out for request and response
+ * comparison
+ */
+ for (word = 0; word < sizeof(struct nix_cq_ctx_s) / sizeof(u64);
+ word++) {
+ *(u64 *)((u8 *)&aq_rsp.cq + word * 8) &=
+ (*(u64 *)((u8 *)&aq_req.cq_mask + word * 8));
+ *(u64 *)((u8 *)&aq_req.cq + word * 8) &=
+ (*(u64 *)((u8 *)&aq_req.cq_mask + word * 8));
+ }
+
+ if (memcmp(&aq_req.cq, &aq_rsp.cq, sizeof(struct nix_cq_ctx_s)))
+ return NIX_AF_ERR_AQ_CTX_RETRY_WRITE;
+
+ return 0;
+}
+
static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
struct nix_aq_enq_rsp *rsp)
{
struct nix_hw *nix_hw;
+ int err, retries = 5;
int blkaddr;
blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, req->hdr.pcifunc);
@@ -1075,7 +1134,24 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req,
if (!nix_hw)
return NIX_AF_ERR_INVALID_NIXBLK;
- return rvu_nix_blk_aq_enq_inst(rvu, nix_hw, req, rsp);
+retry:
+ err = rvu_nix_blk_aq_enq_inst(rvu, nix_hw, req, rsp);
+
+ /* HW errata 'AQ Modification to CQ could be discarded on heavy traffic'
+ * As a work around perfrom CQ context read after each AQ write. If AQ
+ * read shows AQ write is not updated perform AQ write again.
+ */
+ if (!err && req->op == NIX_AQ_INSTOP_WRITE) {
+ err = rvu_nix_verify_aq_ctx(rvu, nix_hw, req, NIX_AQ_CTYPE_CQ);
+ if (err == NIX_AF_ERR_AQ_CTX_RETRY_WRITE) {
+ if (retries--)
+ goto retry;
+ else
+ return NIX_AF_ERR_CQ_CTX_WRITE_ERR;
+ }
+ }
+
+ return err;
}
static const char *nix_get_ctx_name(int ctype)
@@ -2507,6 +2583,9 @@ static void nix_free_tx_vtag_entries(struct rvu *rvu, u16 pcifunc)
return;
nix_hw = get_nix_hw(rvu->hw, blkaddr);
+ if (!nix_hw)
+ return;
+
vlan = &nix_hw->txvlan;
mutex_lock(&vlan->rsrc_lock);
@@ -4436,10 +4515,17 @@ int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req,
return rvu_cgx_start_stop_io(rvu, pcifunc, false);
}
+#define RX_SA_BASE GENMASK_ULL(52, 7)
+
void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
{
struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
struct hwctx_disable_req ctx_req;
+ int pf = rvu_get_pf(pcifunc);
+ struct mac_ops *mac_ops;
+ u8 cgx_id, lmac_id;
+ u64 sa_base;
+ void *cgxd;
int err;
ctx_req.hdr.pcifunc = pcifunc;
@@ -4476,9 +4562,33 @@ void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf)
dev_err(rvu->dev, "CQ ctx disable failed\n");
}
+ /* reset HW config done for Switch headers */
+ rvu_npc_set_parse_mode(rvu, pcifunc, OTX2_PRIV_FLAGS_DEFAULT,
+ (PKIND_TX | PKIND_RX), 0, 0, 0, 0);
+
+ /* Disabling CGX and NPC config done for PTP */
+ if (pfvf->hw_rx_tstamp_en) {
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgxd = rvu_cgx_pdata(cgx_id, rvu);
+ mac_ops = get_mac_ops(cgxd);
+ mac_ops->mac_enadis_ptp_config(cgxd, lmac_id, false);
+ /* Undo NPC config done for PTP */
+ if (npc_config_ts_kpuaction(rvu, pf, pcifunc, false))
+ dev_err(rvu->dev, "NPC config for PTP failed\n");
+ pfvf->hw_rx_tstamp_en = false;
+ }
+
nix_ctx_free(rvu, pfvf);
nix_free_all_bandprof(rvu, pcifunc);
+
+ sa_base = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(nixlf));
+ if (FIELD_GET(RX_SA_BASE, sa_base)) {
+ err = rvu_cpt_ctx_flush(rvu, pcifunc);
+ if (err)
+ dev_err(rvu->dev,
+ "CPT ctx flush failed with error: %d\n", err);
+ }
}
#define NIX_AF_LFX_TX_CFG_PTP_EN BIT_ULL(32)
@@ -4579,6 +4689,119 @@ int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu,
return 0;
}
+#define IPSEC_GEN_CFG_EGRP GENMASK_ULL(50, 48)
+#define IPSEC_GEN_CFG_OPCODE GENMASK_ULL(47, 32)
+#define IPSEC_GEN_CFG_PARAM1 GENMASK_ULL(31, 16)
+#define IPSEC_GEN_CFG_PARAM2 GENMASK_ULL(15, 0)
+
+#define CPT_INST_QSEL_BLOCK GENMASK_ULL(28, 24)
+#define CPT_INST_QSEL_PF_FUNC GENMASK_ULL(23, 8)
+#define CPT_INST_QSEL_SLOT GENMASK_ULL(7, 0)
+
+static void nix_inline_ipsec_cfg(struct rvu *rvu, struct nix_inline_ipsec_cfg *req,
+ int blkaddr)
+{
+ u8 cpt_idx, cpt_blkaddr;
+ u64 val;
+
+ cpt_idx = (blkaddr == BLKADDR_NIX0) ? 0 : 1;
+ if (req->enable) {
+ val = 0;
+ /* Enable context prefetching */
+ if (!is_rvu_otx2(rvu))
+ val |= BIT_ULL(51);
+
+ /* Set OPCODE and EGRP */
+ val |= FIELD_PREP(IPSEC_GEN_CFG_EGRP, req->gen_cfg.egrp);
+ val |= FIELD_PREP(IPSEC_GEN_CFG_OPCODE, req->gen_cfg.opcode);
+ val |= FIELD_PREP(IPSEC_GEN_CFG_PARAM1, req->gen_cfg.param1);
+ val |= FIELD_PREP(IPSEC_GEN_CFG_PARAM2, req->gen_cfg.param2);
+
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, val);
+
+ /* Set CPT queue for inline IPSec */
+ val = FIELD_PREP(CPT_INST_QSEL_SLOT, req->inst_qsel.cpt_slot);
+ val |= FIELD_PREP(CPT_INST_QSEL_PF_FUNC,
+ req->inst_qsel.cpt_pf_func);
+
+ if (!is_rvu_otx2(rvu)) {
+ cpt_blkaddr = (cpt_idx == 0) ? BLKADDR_CPT0 :
+ BLKADDR_CPT1;
+ val |= FIELD_PREP(CPT_INST_QSEL_BLOCK, cpt_blkaddr);
+ }
+
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx),
+ val);
+
+ /* Set CPT credit */
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx),
+ req->cpt_credit);
+ } else {
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_IPSEC_GEN_CFG, 0x0);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_INST_QSEL(cpt_idx),
+ 0x0);
+ rvu_write64(rvu, blkaddr, NIX_AF_RX_CPTX_CREDIT(cpt_idx),
+ 0x3FFFFF);
+ }
+}
+
+int rvu_mbox_handler_nix_inline_ipsec_cfg(struct rvu *rvu,
+ struct nix_inline_ipsec_cfg *req,
+ struct msg_rsp *rsp)
+{
+ if (!is_block_implemented(rvu->hw, BLKADDR_CPT0))
+ return 0;
+
+ nix_inline_ipsec_cfg(rvu, req, BLKADDR_NIX0);
+ if (is_block_implemented(rvu->hw, BLKADDR_CPT1))
+ nix_inline_ipsec_cfg(rvu, req, BLKADDR_NIX1);
+
+ return 0;
+}
+
+int rvu_mbox_handler_nix_inline_ipsec_lf_cfg(struct rvu *rvu,
+ struct nix_inline_ipsec_lf_cfg *req,
+ struct msg_rsp *rsp)
+{
+ int lf, blkaddr, err;
+ u64 val;
+
+ if (!is_block_implemented(rvu->hw, BLKADDR_CPT0))
+ return 0;
+
+ err = nix_get_nixlf(rvu, req->hdr.pcifunc, &lf, &blkaddr);
+ if (err)
+ return err;
+
+ if (req->enable) {
+ /* Set TT, TAG_CONST, SA_POW2_SIZE and LENM1_MAX */
+ val = (u64)req->ipsec_cfg0.tt << 44 |
+ (u64)req->ipsec_cfg0.tag_const << 20 |
+ (u64)req->ipsec_cfg0.sa_pow2_size << 16 |
+ req->ipsec_cfg0.lenm1_max;
+
+ if (blkaddr == BLKADDR_NIX1)
+ val |= BIT_ULL(46);
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), val);
+
+ /* Set SA_IDX_W and SA_IDX_MAX */
+ val = (u64)req->ipsec_cfg1.sa_idx_w << 32 |
+ req->ipsec_cfg1.sa_idx_max;
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), val);
+
+ /* Set SA base address */
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf),
+ req->sa_base_addr);
+ } else {
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG0(lf), 0x0);
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_CFG1(lf), 0x0);
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_IPSEC_SA_BASE(lf),
+ 0x0);
+ }
+
+ return 0;
+}
void rvu_nix_reset_mac(struct rvu_pfvf *pfvf, int pcifunc)
{
bool from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
index 5efb4174e82d..bb6b42bbefa4 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c
@@ -3167,6 +3167,102 @@ int rvu_mbox_handler_npc_get_kex_cfg(struct rvu *rvu, struct msg_req *req,
return 0;
}
+static int
+npc_set_var_len_offset_pkind(struct rvu *rvu, u16 pcifunc, u64 pkind,
+ u8 var_len_off, u8 var_len_off_mask, u8 shift_dir)
+{
+ struct npc_kpu_action0 *act0;
+ u8 shift_count = 0;
+ int blkaddr;
+ u64 val;
+
+ if (!var_len_off_mask)
+ return -EINVAL;
+
+ if (var_len_off_mask != 0xff) {
+ if (shift_dir)
+ shift_count = __ffs(var_len_off_mask);
+ else
+ shift_count = (8 - __fls(var_len_off_mask));
+ }
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, pcifunc);
+ if (blkaddr < 0) {
+ dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__);
+ return -EINVAL;
+ }
+ val = rvu_read64(rvu, blkaddr, NPC_AF_PKINDX_ACTION0(pkind));
+ act0 = (struct npc_kpu_action0 *)&val;
+ act0->var_len_shift = shift_count;
+ act0->var_len_right = shift_dir;
+ act0->var_len_mask = var_len_off_mask;
+ act0->var_len_offset = var_len_off;
+ rvu_write64(rvu, blkaddr, NPC_AF_PKINDX_ACTION0(pkind), val);
+ return 0;
+}
+
+int rvu_npc_set_parse_mode(struct rvu *rvu, u16 pcifunc, u64 mode, u8 dir,
+ u64 pkind, u8 var_len_off, u8 var_len_off_mask,
+ u8 shift_dir)
+
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ int blkaddr, nixlf, rc, intf_mode;
+ int pf = rvu_get_pf(pcifunc);
+ u64 rxpkind, txpkind;
+ u8 cgx_id, lmac_id;
+
+ /* use default pkind to disable edsa/higig */
+ rxpkind = rvu_npc_get_pkind(rvu, pf);
+ txpkind = NPC_TX_DEF_PKIND;
+ intf_mode = NPC_INTF_MODE_DEF;
+
+ if (mode & OTX2_PRIV_FLAGS_CUSTOM) {
+ if (pkind == NPC_RX_CUSTOM_PRE_L2_PKIND) {
+ rc = npc_set_var_len_offset_pkind(rvu, pcifunc, pkind,
+ var_len_off,
+ var_len_off_mask,
+ shift_dir);
+ if (rc)
+ return rc;
+ }
+ rxpkind = pkind;
+ txpkind = pkind;
+ }
+
+ if (dir & PKIND_RX) {
+ /* rx pkind set req valid only for cgx mapped PFs */
+ if (!is_cgx_config_permitted(rvu, pcifunc))
+ return 0;
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+
+ rc = cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id,
+ rxpkind);
+ if (rc)
+ return rc;
+ }
+
+ if (dir & PKIND_TX) {
+ /* Tx pkind set request valid if PCIFUNC has NIXLF attached */
+ rc = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr);
+ if (rc)
+ return rc;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf),
+ txpkind);
+ }
+
+ pfvf->intf_mode = intf_mode;
+ return 0;
+}
+
+int rvu_mbox_handler_npc_set_pkind(struct rvu *rvu, struct npc_set_pkind *req,
+ struct msg_rsp *rsp)
+{
+ return rvu_npc_set_parse_mode(rvu, req->hdr.pcifunc, req->mode,
+ req->dir, req->pkind, req->var_len_off,
+ req->var_len_off_mask, req->shift_dir);
+}
+
int rvu_mbox_handler_npc_read_base_steer_rule(struct rvu *rvu,
struct msg_req *req,
struct npc_mcam_read_base_rule_rsp *rsp)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
index 51ddc7b81d0b..ff2b21999f36 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c
@@ -1119,6 +1119,9 @@ find_rule:
rule->default_rule = req->default_rule;
rule->owner = owner;
rule->enable = enable;
+ rule->chan_mask = write_req.entry_data.kw_mask[0] & NPC_KEX_CHAN_MASK;
+ rule->chan = write_req.entry_data.kw[0] & NPC_KEX_CHAN_MASK;
+ rule->chan &= rule->chan_mask;
if (is_npc_intf_tx(req->intf))
rule->intf = pfvf->nix_tx_intf;
else
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
index 21f1ed4e222f..22cd751613cd 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
@@ -236,6 +236,8 @@
#define NIX_AF_RX_DEF_OIP6_DSCP (0x02F8)
#define NIX_AF_RX_IPSEC_GEN_CFG (0x0300)
#define NIX_AF_RX_CPTX_INST_ADDR (0x0310)
+#define NIX_AF_RX_CPTX_INST_QSEL(a) (0x0320ull | (uint64_t)(a) << 3)
+#define NIX_AF_RX_CPTX_CREDIT(a) (0x0360ull | (uint64_t)(a) << 3)
#define NIX_AF_NDC_TX_SYNC (0x03F0)
#define NIX_AF_AQ_CFG (0x0400)
#define NIX_AF_AQ_BASE (0x0410)
@@ -525,6 +527,7 @@
#define CPT_AF_CTX_WBACK_LATENCY_PC (0x49448ull)
#define CPT_AF_CTX_PSH_PC (0x49450ull)
#define CPT_AF_CTX_PSH_LATENCY_PC (0x49458ull)
+#define CPT_AF_CTX_CAM_DATA(a) (0x49800ull | (u64)(a) << 3)
#define CPT_AF_RXC_TIME (0x50010ull)
#define CPT_AF_RXC_TIME_CFG (0x50018ull)
#define CPT_AF_RXC_DFRG (0x50020ull)
@@ -542,6 +545,7 @@
#define CPT_LF_CTL 0x10
#define CPT_LF_INPROG 0x40
#define CPT_LF_Q_GRP_PTR 0x120
+#define CPT_LF_CTX_FLUSH 0x510
#define NPC_AF_BLK_RST (0x00040)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
index 77ac96693f04..edc9367b1b95 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
@@ -62,6 +62,24 @@ enum rvu_af_int_vec_e {
RVU_AF_INT_VEC_CNT = 0x5,
};
+/* CPT Admin function Interrupt Vector Enumeration */
+enum cpt_af_int_vec_e {
+ CPT_AF_INT_VEC_FLT0 = 0x0,
+ CPT_AF_INT_VEC_FLT1 = 0x1,
+ CPT_AF_INT_VEC_RVU = 0x2,
+ CPT_AF_INT_VEC_RAS = 0x3,
+ CPT_AF_INT_VEC_CNT = 0x4,
+};
+
+enum cpt_10k_af_int_vec_e {
+ CPT_10K_AF_INT_VEC_FLT0 = 0x0,
+ CPT_10K_AF_INT_VEC_FLT1 = 0x1,
+ CPT_10K_AF_INT_VEC_FLT2 = 0x2,
+ CPT_10K_AF_INT_VEC_RVU = 0x3,
+ CPT_10K_AF_INT_VEC_RAS = 0x4,
+ CPT_10K_AF_INT_VEC_CNT = 0x5,
+};
+
/* NPA Admin function Interrupt Vector Enumeration */
enum npa_af_int_vec_e {
NPA_AF_INT_VEC_RVU = 0x0,
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
index b92c267628b8..0048b5946712 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
@@ -3,11 +3,11 @@
# Makefile for Marvell's RVU Ethernet device drivers
#
-obj-$(CONFIG_OCTEONTX2_PF) += rvu_nicpf.o
-obj-$(CONFIG_OCTEONTX2_VF) += rvu_nicvf.o
+obj-$(CONFIG_OCTEONTX2_PF) += rvu_nicpf.o otx2_ptp.o
+obj-$(CONFIG_OCTEONTX2_VF) += rvu_nicvf.o otx2_ptp.o
rvu_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \
- otx2_ptp.o otx2_flows.o otx2_tc.o cn10k.o otx2_dmac_flt.o \
+ otx2_flows.o otx2_tc.o cn10k.o otx2_dmac_flt.o \
otx2_devlink.o
rvu_nicvf-y := otx2_vf.o otx2_devlink.o
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
index 95f21dfdba48..fd4f083c699e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
@@ -88,7 +88,7 @@ int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
aq->sq.ena = 1;
/* Only one SMQ is allocated, map all SQ's to that SMQ */
aq->sq.smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0];
- aq->sq.smq_rr_weight = mtu_to_dwrr_weight(pfvf, pfvf->max_frs);
+ aq->sq.smq_rr_weight = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen);
aq->sq.default_chan = pfvf->hw.tx_chan_base;
aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */
aq->sq.sqb_aura = sqb_aura;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index 78df173e6df2..66da31f30d3e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -188,7 +188,7 @@ static int otx2_hw_get_mac_addr(struct otx2_nic *pfvf,
return PTR_ERR(msghdr);
}
rsp = (struct nix_get_mac_addr_rsp *)msghdr;
- ether_addr_copy(netdev->dev_addr, rsp->mac_addr);
+ eth_hw_addr_set(netdev, rsp->mac_addr);
mutex_unlock(&pfvf->mbox.lock);
return 0;
@@ -203,7 +203,7 @@ int otx2_set_mac_address(struct net_device *netdev, void *p)
return -EADDRNOTAVAIL;
if (!otx2_hw_set_mac_addr(pfvf, addr->sa_data)) {
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
/* update dmac field in vlan offload rule */
if (netif_running(netdev) &&
pfvf->flags & OTX2_FLAG_RX_VLAN_SUPPORT)
@@ -231,7 +231,7 @@ int otx2_hw_set_mtu(struct otx2_nic *pfvf, int mtu)
return -ENOMEM;
}
- req->maxlen = pfvf->max_frs;
+ req->maxlen = pfvf->netdev->mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN;
err = otx2_sync_mbox_msg(&pfvf->mbox);
mutex_unlock(&pfvf->mbox.lock);
@@ -590,7 +590,7 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
u64 schq, parent;
u64 dwrr_val;
- dwrr_val = mtu_to_dwrr_weight(pfvf, pfvf->max_frs);
+ dwrr_val = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen);
req = otx2_mbox_alloc_msg_nix_txschq_cfg(&pfvf->mbox);
if (!req)
@@ -603,9 +603,7 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl)
/* Set topology e.t.c configuration */
if (lvl == NIX_TXSCH_LVL_SMQ) {
req->reg[0] = NIX_AF_SMQX_CFG(schq);
- req->regval[0] = ((pfvf->netdev->max_mtu + OTX2_ETH_HLEN) << 8)
- | OTX2_MIN_MTU;
-
+ req->regval[0] = ((u64)pfvf->tx_max_pktlen << 8) | OTX2_MIN_MTU;
req->regval[0] |= (0x20ULL << 51) | (0x80ULL << 39) |
(0x2ULL << 36);
req->num_regs++;
@@ -718,7 +716,7 @@ void otx2_sqb_flush(struct otx2_nic *pfvf)
int timeout = 1000;
ptr = (u64 *)otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS);
- for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) {
+ for (qidx = 0; qidx < pfvf->hw.tot_tx_queues; qidx++) {
incr = (u64)qidx << 32;
while (timeout) {
val = otx2_atomic64_add(incr, ptr);
@@ -800,7 +798,7 @@ int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
aq->sq.ena = 1;
/* Only one SMQ is allocated, map all SQ's to that SMQ */
aq->sq.smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0];
- aq->sq.smq_rr_quantum = mtu_to_dwrr_weight(pfvf, pfvf->max_frs);
+ aq->sq.smq_rr_quantum = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen);
aq->sq.default_chan = pfvf->hw.tx_chan_base;
aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */
aq->sq.sqb_aura = sqb_aura;
@@ -835,17 +833,19 @@ static int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
if (err)
return err;
- err = qmem_alloc(pfvf->dev, &sq->tso_hdrs, qset->sqe_cnt,
- TSO_HEADER_SIZE);
- if (err)
- return err;
+ if (qidx < pfvf->hw.tx_queues) {
+ err = qmem_alloc(pfvf->dev, &sq->tso_hdrs, qset->sqe_cnt,
+ TSO_HEADER_SIZE);
+ if (err)
+ return err;
+ }
sq->sqe_base = sq->sqe->base;
sq->sg = kcalloc(qset->sqe_cnt, sizeof(struct sg_list), GFP_KERNEL);
if (!sq->sg)
return -ENOMEM;
- if (pfvf->ptp) {
+ if (pfvf->ptp && qidx < pfvf->hw.tx_queues) {
err = qmem_alloc(pfvf->dev, &sq->timestamps, qset->sqe_cnt,
sizeof(*sq->timestamps));
if (err)
@@ -871,20 +871,27 @@ static int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura)
static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
{
struct otx2_qset *qset = &pfvf->qset;
+ int err, pool_id, non_xdp_queues;
struct nix_aq_enq_req *aq;
struct otx2_cq_queue *cq;
- int err, pool_id;
cq = &qset->cq[qidx];
cq->cq_idx = qidx;
+ non_xdp_queues = pfvf->hw.rx_queues + pfvf->hw.tx_queues;
if (qidx < pfvf->hw.rx_queues) {
cq->cq_type = CQ_RX;
cq->cint_idx = qidx;
cq->cqe_cnt = qset->rqe_cnt;
- } else {
+ if (pfvf->xdp_prog)
+ xdp_rxq_info_reg(&cq->xdp_rxq, pfvf->netdev, qidx, 0);
+ } else if (qidx < non_xdp_queues) {
cq->cq_type = CQ_TX;
cq->cint_idx = qidx - pfvf->hw.rx_queues;
cq->cqe_cnt = qset->sqe_cnt;
+ } else {
+ cq->cq_type = CQ_XDP;
+ cq->cint_idx = qidx - non_xdp_queues;
+ cq->cqe_cnt = qset->sqe_cnt;
}
cq->cqe_size = pfvf->qset.xqe_size;
@@ -991,7 +998,7 @@ int otx2_config_nix_queues(struct otx2_nic *pfvf)
}
/* Initialize TX queues */
- for (qidx = 0; qidx < pfvf->hw.tx_queues; qidx++) {
+ for (qidx = 0; qidx < pfvf->hw.tot_tx_queues; qidx++) {
u16 sqb_aura = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
err = otx2_sq_init(pfvf, qidx, sqb_aura);
@@ -1006,6 +1013,9 @@ int otx2_config_nix_queues(struct otx2_nic *pfvf)
return err;
}
+ pfvf->cq_op_addr = (__force u64 *)otx2_get_regaddr(pfvf,
+ NIX_LF_CQ_OP_STATUS);
+
/* Initialize work queue for receive buffer refill */
pfvf->refill_wrk = devm_kcalloc(pfvf->dev, pfvf->qset.cq_cnt,
sizeof(struct refill_work), GFP_KERNEL);
@@ -1035,7 +1045,7 @@ int otx2_config_nix(struct otx2_nic *pfvf)
/* Set RQ/SQ/CQ counts */
nixlf->rq_cnt = pfvf->hw.rx_queues;
- nixlf->sq_cnt = pfvf->hw.tx_queues;
+ nixlf->sq_cnt = pfvf->hw.tot_tx_queues;
nixlf->cq_cnt = pfvf->qset.cq_cnt;
nixlf->rss_sz = MAX_RSS_INDIR_TBL_SIZE;
nixlf->rss_grps = MAX_RSS_GROUPS;
@@ -1073,7 +1083,7 @@ void otx2_sq_free_sqbs(struct otx2_nic *pfvf)
int sqb, qidx;
u64 iova, pa;
- for (qidx = 0; qidx < hw->tx_queues; qidx++) {
+ for (qidx = 0; qidx < hw->tot_tx_queues; qidx++) {
sq = &qset->sq[qidx];
if (!sq->sqb_ptrs)
continue;
@@ -1285,7 +1295,7 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
stack_pages =
(num_sqbs + hw->stack_pg_ptrs - 1) / hw->stack_pg_ptrs;
- for (qidx = 0; qidx < hw->tx_queues; qidx++) {
+ for (qidx = 0; qidx < hw->tot_tx_queues; qidx++) {
pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
/* Initialize aura context */
err = otx2_aura_init(pfvf, pool_id, pool_id, num_sqbs);
@@ -1305,7 +1315,7 @@ int otx2_sq_aura_pool_init(struct otx2_nic *pfvf)
goto fail;
/* Allocate pointers and free them to aura/pool */
- for (qidx = 0; qidx < hw->tx_queues; qidx++) {
+ for (qidx = 0; qidx < hw->tot_tx_queues; qidx++) {
pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
pool = &pfvf->qset.pool[pool_id];
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index a51ecd771d07..61e52812983f 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -171,6 +171,8 @@ struct otx2_hw {
struct otx2_rss_info rss_info;
u16 rx_queues;
u16 tx_queues;
+ u16 xdp_queues;
+ u16 tot_tx_queues;
u16 max_queues;
u16 pool_cnt;
u16 rqpool_cnt;
@@ -223,6 +225,7 @@ struct otx2_hw {
#define HW_TSO 0
#define CN10K_MBOX 1
#define CN10K_LMTST 2
+#define CN10K_RPM 3
unsigned long cap_flag;
#define LMT_LINE_SIZE 128
@@ -263,6 +266,12 @@ struct otx2_ptp {
struct cyclecounter cycle_counter;
struct timecounter time_counter;
+
+ struct delayed_work extts_work;
+ u64 last_extts;
+ u64 thresh;
+
+ struct ptp_pin_desc extts_config;
};
#define OTX2_HW_TIMESTAMP_LEN 8
@@ -317,7 +326,7 @@ struct otx2_nic {
struct net_device *netdev;
struct dev_hw_ops *hw_ops;
void *iommu_domain;
- u16 max_frs;
+ u16 tx_max_pktlen;
u16 rbsize; /* Receive buffer size */
#define OTX2_FLAG_RX_TSTAMP_ENABLED BIT_ULL(0)
@@ -336,7 +345,9 @@ struct otx2_nic {
#define OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED BIT_ULL(13)
#define OTX2_FLAG_DMACFLTR_SUPPORT BIT_ULL(14)
u64 flags;
+ u64 *cq_op_addr;
+ struct bpf_prog *xdp_prog;
struct otx2_qset qset;
struct otx2_hw hw;
struct pci_dev *pdev;
@@ -452,6 +463,7 @@ static inline void otx2_setup_dev_hw_settings(struct otx2_nic *pfvf)
if (!is_dev_otx2(pfvf->pdev)) {
__set_bit(CN10K_MBOX, &hw->cap_flag);
__set_bit(CN10K_LMTST, &hw->cap_flag);
+ __set_bit(CN10K_RPM, &hw->cap_flag);
}
}
@@ -825,6 +837,9 @@ int otx2_open(struct net_device *netdev);
int otx2_stop(struct net_device *netdev);
int otx2_set_real_num_queues(struct net_device *netdev,
int tx_queues, int rx_queues);
+int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd);
+int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr);
+
/* MCAM filter related APIs */
int otx2_mcam_flow_init(struct otx2_nic *pf);
int otx2vf_mcam_flow_init(struct otx2_nic *pfvf);
@@ -845,6 +860,7 @@ int otx2_del_macfilter(struct net_device *netdev, const u8 *mac);
int otx2_add_macfilter(struct net_device *netdev, const u8 *mac);
int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable);
int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf);
+bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx);
u16 otx2_get_max_mtu(struct otx2_nic *pfvf);
/* tc support */
int otx2_init_tc(struct otx2_nic *nic);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
index 7ac3ef2fa06a..777a27047c8e 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c
@@ -108,13 +108,6 @@ int otx2_register_dl(struct otx2_nic *pfvf)
return -ENOMEM;
}
- err = devlink_register(dl);
- if (err) {
- dev_err(pfvf->dev, "devlink register failed with error %d\n", err);
- devlink_free(dl);
- return err;
- }
-
otx2_dl = devlink_priv(dl);
otx2_dl->dl = dl;
otx2_dl->pfvf = pfvf;
@@ -128,12 +121,10 @@ int otx2_register_dl(struct otx2_nic *pfvf)
goto err_dl;
}
- devlink_params_publish(dl);
-
+ devlink_register(dl);
return 0;
err_dl:
- devlink_unregister(dl);
devlink_free(dl);
return err;
}
@@ -141,16 +132,10 @@ err_dl:
void otx2_unregister_dl(struct otx2_nic *pfvf)
{
struct otx2_devlink *otx2_dl = pfvf->dl;
- struct devlink *dl;
-
- if (!otx2_dl || !otx2_dl->dl)
- return;
-
- dl = otx2_dl->dl;
+ struct devlink *dl = otx2_dl->dl;
+ devlink_unregister(dl);
devlink_params_unregister(dl, otx2_dl_params,
ARRAY_SIZE(otx2_dl_params));
-
- devlink_unregister(dl);
devlink_free(dl);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
index dbfa3bc39e34..80d4ce61f442 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c
@@ -121,14 +121,16 @@ static void otx2_get_strings(struct net_device *netdev, u32 sset, u8 *data)
otx2_get_qset_strings(pfvf, &data, 0);
- for (stats = 0; stats < CGX_RX_STATS_COUNT; stats++) {
- sprintf(data, "cgx_rxstat%d: ", stats);
- data += ETH_GSTRING_LEN;
- }
+ if (!test_bit(CN10K_RPM, &pfvf->hw.cap_flag)) {
+ for (stats = 0; stats < CGX_RX_STATS_COUNT; stats++) {
+ sprintf(data, "cgx_rxstat%d: ", stats);
+ data += ETH_GSTRING_LEN;
+ }
- for (stats = 0; stats < CGX_TX_STATS_COUNT; stats++) {
- sprintf(data, "cgx_txstat%d: ", stats);
- data += ETH_GSTRING_LEN;
+ for (stats = 0; stats < CGX_TX_STATS_COUNT; stats++) {
+ sprintf(data, "cgx_txstat%d: ", stats);
+ data += ETH_GSTRING_LEN;
+ }
}
strcpy(data, "reset_count");
@@ -205,11 +207,15 @@ static void otx2_get_ethtool_stats(struct net_device *netdev,
[otx2_drv_stats[stat].index]);
otx2_get_qset_stats(pfvf, stats, &data);
- otx2_update_lmac_stats(pfvf);
- for (stat = 0; stat < CGX_RX_STATS_COUNT; stat++)
- *(data++) = pfvf->hw.cgx_rx_stats[stat];
- for (stat = 0; stat < CGX_TX_STATS_COUNT; stat++)
- *(data++) = pfvf->hw.cgx_tx_stats[stat];
+
+ if (!test_bit(CN10K_RPM, &pfvf->hw.cap_flag)) {
+ otx2_update_lmac_stats(pfvf);
+ for (stat = 0; stat < CGX_RX_STATS_COUNT; stat++)
+ *(data++) = pfvf->hw.cgx_rx_stats[stat];
+ for (stat = 0; stat < CGX_TX_STATS_COUNT; stat++)
+ *(data++) = pfvf->hw.cgx_tx_stats[stat];
+ }
+
*(data++) = pfvf->reset_count;
fec_corr_blks = pfvf->hw.cgx_fec_corr_blks;
@@ -242,18 +248,19 @@ static void otx2_get_ethtool_stats(struct net_device *netdev,
static int otx2_get_sset_count(struct net_device *netdev, int sset)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
- int qstats_count;
+ int qstats_count, mac_stats = 0;
if (sset != ETH_SS_STATS)
return -EINVAL;
qstats_count = otx2_n_queue_stats *
(pfvf->hw.rx_queues + pfvf->hw.tx_queues);
+ if (!test_bit(CN10K_RPM, &pfvf->hw.cap_flag))
+ mac_stats = CGX_RX_STATS_COUNT + CGX_TX_STATS_COUNT;
otx2_update_lmac_fec_stats(pfvf);
return otx2_n_dev_stats + otx2_n_drv_stats + qstats_count +
- CGX_RX_STATS_COUNT + CGX_TX_STATS_COUNT + OTX2_FEC_STATS_CNT
- + 1;
+ mac_stats + OTX2_FEC_STATS_CNT + 1;
}
/* Get no of queues device supports and current queue count */
@@ -1168,9 +1175,8 @@ static int otx2_set_link_ksettings(struct net_device *netdev,
otx2_get_link_ksettings(netdev, &cur_ks);
/* Check requested modes against supported modes by hardware */
- if (!bitmap_subset(cmd->link_modes.advertising,
- cur_ks.link_modes.supported,
- __ETHTOOL_LINK_MODE_MASK_NBITS))
+ if (!linkmode_subset(cmd->link_modes.advertising,
+ cur_ks.link_modes.supported))
return -EINVAL;
mutex_lock(&mbox->lock);
@@ -1340,6 +1346,7 @@ static const struct ethtool_ops otx2vf_ethtool_ops = {
.get_pauseparam = otx2_get_pauseparam,
.set_pauseparam = otx2_set_pauseparam,
.get_link_ksettings = otx2vf_get_link_ksettings,
+ .get_ts_info = otx2_get_ts_info,
};
void otx2vf_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 53df7fff92c4..1e0d0c9c1dac 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -13,6 +13,8 @@
#include <linux/if_vlan.h>
#include <linux/iommu.h>
#include <net/ip.h>
+#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
#include "otx2_reg.h"
#include "otx2_common.h"
@@ -48,9 +50,15 @@ static int otx2_config_hw_rx_tstamp(struct otx2_nic *pfvf, bool enable);
static int otx2_change_mtu(struct net_device *netdev, int new_mtu)
{
+ struct otx2_nic *pf = netdev_priv(netdev);
bool if_up = netif_running(netdev);
int err = 0;
+ if (pf->xdp_prog && new_mtu > MAX_XDP_MTU) {
+ netdev_warn(netdev, "Jumbo frames not yet supported with XDP, current MTU %d.\n",
+ netdev->mtu);
+ return -EINVAL;
+ }
if (if_up)
otx2_stop(netdev);
@@ -1180,7 +1188,7 @@ static irqreturn_t otx2_q_intr_handler(int irq, void *data)
}
/* SQ */
- for (qidx = 0; qidx < pf->hw.tx_queues; qidx++) {
+ for (qidx = 0; qidx < pf->hw.tot_tx_queues; qidx++) {
ptr = otx2_get_regaddr(pf, NIX_LF_SQ_OP_INT);
val = otx2_atomic64_add((qidx << 44), ptr);
otx2_write64(pf, NIX_LF_SQ_OP_INT, (qidx << 44) |
@@ -1283,7 +1291,7 @@ static void otx2_free_sq_res(struct otx2_nic *pf)
otx2_ctx_disable(&pf->mbox, NIX_AQ_CTYPE_SQ, false);
/* Free SQB pointers */
otx2_sq_free_sqbs(pf);
- for (qidx = 0; qidx < pf->hw.tx_queues; qidx++) {
+ for (qidx = 0; qidx < pf->hw.tot_tx_queues; qidx++) {
sq = &qset->sq[qidx];
qmem_free(pf->dev, sq->sqe);
qmem_free(pf->dev, sq->tso_hdrs);
@@ -1304,16 +1312,14 @@ static int otx2_get_rbuf_size(struct otx2_nic *pf, int mtu)
* NIX transfers entire data using 6 segments/buffers and writes
* a CQE_RX descriptor with those segment addresses. First segment
* has additional data prepended to packet. Also software omits a
- * headroom of 128 bytes and sizeof(struct skb_shared_info) in
- * each segment. Hence the total size of memory needed
- * to receive a packet with 'mtu' is:
+ * headroom of 128 bytes in each segment. Hence the total size of
+ * memory needed to receive a packet with 'mtu' is:
* frame size = mtu + additional data;
- * memory = frame_size + (headroom + struct skb_shared_info size) * 6;
+ * memory = frame_size + headroom * 6;
* each receive buffer size = memory / 6;
*/
frame_size = mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN;
- total_size = frame_size + (OTX2_HEAD_ROOM +
- OTX2_DATA_ALIGN(sizeof(struct skb_shared_info))) * 6;
+ total_size = frame_size + OTX2_HEAD_ROOM * 6;
rbuf_size = total_size / 6;
return ALIGN(rbuf_size, 2048);
@@ -1332,10 +1338,11 @@ static int otx2_init_hw_resources(struct otx2_nic *pf)
* so, aura count = pool count.
*/
hw->rqpool_cnt = hw->rx_queues;
- hw->sqpool_cnt = hw->tx_queues;
+ hw->sqpool_cnt = hw->tot_tx_queues;
hw->pool_cnt = hw->rqpool_cnt + hw->sqpool_cnt;
- pf->max_frs = pf->netdev->mtu + OTX2_ETH_HLEN + OTX2_HW_TIMESTAMP_LEN;
+ /* Maximum hardware supported transmit length */
+ pf->tx_max_pktlen = pf->netdev->max_mtu + OTX2_ETH_HLEN;
pf->rbsize = otx2_get_rbuf_size(pf, pf->netdev->mtu);
@@ -1493,6 +1500,44 @@ static void otx2_free_hw_resources(struct otx2_nic *pf)
mutex_unlock(&mbox->lock);
}
+static void otx2_do_set_rx_mode(struct otx2_nic *pf)
+{
+ struct net_device *netdev = pf->netdev;
+ struct nix_rx_mode *req;
+ bool promisc = false;
+
+ if (!(netdev->flags & IFF_UP))
+ return;
+
+ if ((netdev->flags & IFF_PROMISC) ||
+ (netdev_uc_count(netdev) > OTX2_MAX_UNICAST_FLOWS)) {
+ promisc = true;
+ }
+
+ /* Write unicast address to mcam entries or del from mcam */
+ if (!promisc && netdev->priv_flags & IFF_UNICAST_FLT)
+ __dev_uc_sync(netdev, otx2_add_macfilter, otx2_del_macfilter);
+
+ mutex_lock(&pf->mbox.lock);
+ req = otx2_mbox_alloc_msg_nix_set_rx_mode(&pf->mbox);
+ if (!req) {
+ mutex_unlock(&pf->mbox.lock);
+ return;
+ }
+
+ req->mode = NIX_RX_MODE_UCAST;
+
+ if (promisc)
+ req->mode |= NIX_RX_MODE_PROMISC;
+ if (netdev->flags & (IFF_ALLMULTI | IFF_MULTICAST))
+ req->mode |= NIX_RX_MODE_ALLMULTI;
+
+ req->mode |= NIX_RX_MODE_USE_MCE;
+
+ otx2_sync_mbox_msg(&pf->mbox);
+ mutex_unlock(&pf->mbox.lock);
+}
+
int otx2_open(struct net_device *netdev)
{
struct otx2_nic *pf = netdev_priv(netdev);
@@ -1503,7 +1548,7 @@ int otx2_open(struct net_device *netdev)
netif_carrier_off(netdev);
- pf->qset.cq_cnt = pf->hw.rx_queues + pf->hw.tx_queues;
+ pf->qset.cq_cnt = pf->hw.rx_queues + pf->hw.tot_tx_queues;
/* RQ and SQs are mapped to different CQs,
* so find out max CQ IRQs (i.e CINTs) needed.
*/
@@ -1523,7 +1568,7 @@ int otx2_open(struct net_device *netdev)
if (!qset->cq)
goto err_free_mem;
- qset->sq = kcalloc(pf->hw.tx_queues,
+ qset->sq = kcalloc(pf->hw.tot_tx_queues,
sizeof(struct otx2_snd_queue), GFP_KERNEL);
if (!qset->sq)
goto err_free_mem;
@@ -1544,11 +1589,20 @@ int otx2_open(struct net_device *netdev)
/* RQ0 & SQ0 are mapped to CINT0 and so on..
* 'cq_ids[0]' points to RQ's CQ and
* 'cq_ids[1]' points to SQ's CQ and
+ * 'cq_ids[2]' points to XDP's CQ and
*/
cq_poll->cq_ids[CQ_RX] =
(qidx < pf->hw.rx_queues) ? qidx : CINT_INVALID_CQ;
cq_poll->cq_ids[CQ_TX] = (qidx < pf->hw.tx_queues) ?
qidx + pf->hw.rx_queues : CINT_INVALID_CQ;
+ if (pf->xdp_prog)
+ cq_poll->cq_ids[CQ_XDP] = (qidx < pf->hw.xdp_queues) ?
+ (qidx + pf->hw.rx_queues +
+ pf->hw.tx_queues) :
+ CINT_INVALID_CQ;
+ else
+ cq_poll->cq_ids[CQ_XDP] = CINT_INVALID_CQ;
+
cq_poll->dev = (void *)pf;
netif_napi_add(netdev, &cq_poll->napi,
otx2_napi_handler, NAPI_POLL_WEIGHT);
@@ -1646,6 +1700,8 @@ int otx2_open(struct net_device *netdev)
if (err)
goto err_tx_stop_queues;
+ otx2_do_set_rx_mode(pf);
+
return 0;
err_tx_stop_queues:
@@ -1750,7 +1806,7 @@ static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev)
/* Check for minimum and maximum packet length */
if (skb->len <= ETH_HLEN ||
- (!skb_shinfo(skb)->gso_size && skb->len > pf->max_frs)) {
+ (!skb_shinfo(skb)->gso_size && skb->len > pf->tx_max_pktlen)) {
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -1791,43 +1847,11 @@ static void otx2_set_rx_mode(struct net_device *netdev)
queue_work(pf->otx2_wq, &pf->rx_mode_work);
}
-static void otx2_do_set_rx_mode(struct work_struct *work)
+static void otx2_rx_mode_wrk_handler(struct work_struct *work)
{
struct otx2_nic *pf = container_of(work, struct otx2_nic, rx_mode_work);
- struct net_device *netdev = pf->netdev;
- struct nix_rx_mode *req;
- bool promisc = false;
-
- if (!(netdev->flags & IFF_UP))
- return;
-
- if ((netdev->flags & IFF_PROMISC) ||
- (netdev_uc_count(netdev) > OTX2_MAX_UNICAST_FLOWS)) {
- promisc = true;
- }
-
- /* Write unicast address to mcam entries or del from mcam */
- if (!promisc && netdev->priv_flags & IFF_UNICAST_FLT)
- __dev_uc_sync(netdev, otx2_add_macfilter, otx2_del_macfilter);
-
- mutex_lock(&pf->mbox.lock);
- req = otx2_mbox_alloc_msg_nix_set_rx_mode(&pf->mbox);
- if (!req) {
- mutex_unlock(&pf->mbox.lock);
- return;
- }
-
- req->mode = NIX_RX_MODE_UCAST;
-
- if (promisc)
- req->mode |= NIX_RX_MODE_PROMISC;
- if (netdev->flags & (IFF_ALLMULTI | IFF_MULTICAST))
- req->mode |= NIX_RX_MODE_ALLMULTI;
- req->mode |= NIX_RX_MODE_USE_MCE;
-
- otx2_sync_mbox_msg(&pf->mbox);
- mutex_unlock(&pf->mbox.lock);
+ otx2_do_set_rx_mode(pf);
}
static int otx2_set_features(struct net_device *netdev,
@@ -1967,7 +1991,7 @@ static int otx2_config_hw_tx_tstamp(struct otx2_nic *pfvf, bool enable)
return 0;
}
-static int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
+int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
struct hwtstamp_config config;
@@ -2023,8 +2047,9 @@ static int otx2_config_hwtstamp(struct net_device *netdev, struct ifreq *ifr)
return copy_to_user(ifr->ifr_data, &config,
sizeof(config)) ? -EFAULT : 0;
}
+EXPORT_SYMBOL(otx2_config_hwtstamp);
-static int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
+int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
{
struct otx2_nic *pfvf = netdev_priv(netdev);
struct hwtstamp_config *cfg = &pfvf->tstamp;
@@ -2039,6 +2064,7 @@ static int otx2_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
return -EOPNOTSUPP;
}
}
+EXPORT_SYMBOL(otx2_ioctl);
static int otx2_do_set_vf_mac(struct otx2_nic *pf, int vf, const u8 *mac)
{
@@ -2281,6 +2307,111 @@ static int otx2_get_vf_config(struct net_device *netdev, int vf,
return 0;
}
+static int otx2_xdp_xmit_tx(struct otx2_nic *pf, struct xdp_frame *xdpf,
+ int qidx)
+{
+ struct page *page;
+ u64 dma_addr;
+ int err = 0;
+
+ dma_addr = otx2_dma_map_page(pf, virt_to_page(xdpf->data),
+ offset_in_page(xdpf->data), xdpf->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(pf->dev, dma_addr))
+ return -ENOMEM;
+
+ err = otx2_xdp_sq_append_pkt(pf, dma_addr, xdpf->len, qidx);
+ if (!err) {
+ otx2_dma_unmap_page(pf, dma_addr, xdpf->len, DMA_TO_DEVICE);
+ page = virt_to_page(xdpf->data);
+ put_page(page);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int otx2_xdp_xmit(struct net_device *netdev, int n,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+ int qidx = smp_processor_id();
+ struct otx2_snd_queue *sq;
+ int drops = 0, i;
+
+ if (!netif_running(netdev))
+ return -ENETDOWN;
+
+ qidx += pf->hw.tx_queues;
+ sq = pf->xdp_prog ? &pf->qset.sq[qidx] : NULL;
+
+ /* Abort xmit if xdp queue is not */
+ if (unlikely(!sq))
+ return -ENXIO;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ for (i = 0; i < n; i++) {
+ struct xdp_frame *xdpf = frames[i];
+ int err;
+
+ err = otx2_xdp_xmit_tx(pf, xdpf, qidx);
+ if (err)
+ drops++;
+ }
+ return n - drops;
+}
+
+static int otx2_xdp_setup(struct otx2_nic *pf, struct bpf_prog *prog)
+{
+ struct net_device *dev = pf->netdev;
+ bool if_up = netif_running(pf->netdev);
+ struct bpf_prog *old_prog;
+
+ if (prog && dev->mtu > MAX_XDP_MTU) {
+ netdev_warn(dev, "Jumbo frames not yet supported with XDP\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (if_up)
+ otx2_stop(pf->netdev);
+
+ old_prog = xchg(&pf->xdp_prog, prog);
+
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ if (pf->xdp_prog)
+ bpf_prog_add(pf->xdp_prog, pf->hw.rx_queues - 1);
+
+ /* Network stack and XDP shared same rx queues.
+ * Use separate tx queues for XDP and network stack.
+ */
+ if (pf->xdp_prog)
+ pf->hw.xdp_queues = pf->hw.rx_queues;
+ else
+ pf->hw.xdp_queues = 0;
+
+ pf->hw.tot_tx_queues += pf->hw.xdp_queues;
+
+ if (if_up)
+ otx2_open(pf->netdev);
+
+ return 0;
+}
+
+static int otx2_xdp(struct net_device *netdev, struct netdev_bpf *xdp)
+{
+ struct otx2_nic *pf = netdev_priv(netdev);
+
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return otx2_xdp_setup(pf, xdp->prog);
+ default:
+ return -EINVAL;
+ }
+}
+
static int otx2_set_vf_permissions(struct otx2_nic *pf, int vf,
int req_perm)
{
@@ -2348,6 +2479,8 @@ static const struct net_device_ops otx2_netdev_ops = {
.ndo_set_vf_mac = otx2_set_vf_mac,
.ndo_set_vf_vlan = otx2_set_vf_vlan,
.ndo_get_vf_config = otx2_get_vf_config,
+ .ndo_bpf = otx2_xdp,
+ .ndo_xdp_xmit = otx2_xdp_xmit,
.ndo_setup_tc = otx2_setup_tc,
.ndo_set_vf_trust = otx2_ndo_set_vf_trust,
};
@@ -2358,7 +2491,7 @@ static int otx2_wq_init(struct otx2_nic *pf)
if (!pf->otx2_wq)
return -ENOMEM;
- INIT_WORK(&pf->rx_mode_work, otx2_do_set_rx_mode);
+ INIT_WORK(&pf->rx_mode_work, otx2_rx_mode_wrk_handler);
INIT_WORK(&pf->reset_task, otx2_reset_task);
return 0;
}
@@ -2489,6 +2622,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
hw->pdev = pdev;
hw->rx_queues = qcount;
hw->tx_queues = qcount;
+ hw->tot_tx_queues = qcount;
hw->max_queues = qcount;
num_vec = pci_msix_vec_count(pdev);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
index ec9e49985c2c..0ef68fdd1f26 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
@@ -27,6 +27,23 @@ static int otx2_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm)
return otx2_sync_mbox_msg(&ptp->nic->mbox);
}
+static int ptp_set_thresh(struct otx2_ptp *ptp, u64 thresh)
+{
+ struct ptp_req *req;
+
+ if (!ptp->nic)
+ return -ENODEV;
+
+ req = otx2_mbox_alloc_msg_ptp_op(&ptp->nic->mbox);
+ if (!req)
+ return -ENOMEM;
+
+ req->op = PTP_OP_SET_THRESH;
+ req->thresh = thresh;
+
+ return otx2_sync_mbox_msg(&ptp->nic->mbox);
+}
+
static u64 ptp_cc_read(const struct cyclecounter *cc)
{
struct otx2_ptp *ptp = container_of(cc, struct otx2_ptp, cycle_counter);
@@ -55,6 +72,33 @@ static u64 ptp_cc_read(const struct cyclecounter *cc)
return rsp->clk;
}
+static u64 ptp_tstmp_read(struct otx2_ptp *ptp)
+{
+ struct ptp_req *req;
+ struct ptp_rsp *rsp;
+ int err;
+
+ if (!ptp->nic)
+ return 0;
+
+ req = otx2_mbox_alloc_msg_ptp_op(&ptp->nic->mbox);
+ if (!req)
+ return 0;
+
+ req->op = PTP_OP_GET_TSTMP;
+
+ err = otx2_sync_mbox_msg(&ptp->nic->mbox);
+ if (err)
+ return 0;
+
+ rsp = (struct ptp_rsp *)otx2_mbox_get_rsp(&ptp->nic->mbox.mbox, 0,
+ &req->hdr);
+ if (IS_ERR(rsp))
+ return 0;
+
+ return rsp->clk;
+}
+
static int otx2_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta)
{
struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
@@ -102,9 +146,73 @@ static int otx2_ptp_settime(struct ptp_clock_info *ptp_info,
return 0;
}
+static int otx2_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ switch (func) {
+ case PTP_PF_NONE:
+ case PTP_PF_EXTTS:
+ break;
+ case PTP_PF_PEROUT:
+ case PTP_PF_PHYSYNC:
+ return -1;
+ }
+ return 0;
+}
+
+static void otx2_ptp_extts_check(struct work_struct *work)
+{
+ struct otx2_ptp *ptp = container_of(work, struct otx2_ptp,
+ extts_work.work);
+ struct ptp_clock_event event;
+ u64 tstmp, new_thresh;
+
+ mutex_lock(&ptp->nic->mbox.lock);
+ tstmp = ptp_tstmp_read(ptp);
+ mutex_unlock(&ptp->nic->mbox.lock);
+
+ if (tstmp != ptp->last_extts) {
+ event.type = PTP_CLOCK_EXTTS;
+ event.index = 0;
+ event.timestamp = timecounter_cyc2time(&ptp->time_counter, tstmp);
+ ptp_clock_event(ptp->ptp_clock, &event);
+ ptp->last_extts = tstmp;
+
+ new_thresh = tstmp % 500000000;
+ if (ptp->thresh != new_thresh) {
+ mutex_lock(&ptp->nic->mbox.lock);
+ ptp_set_thresh(ptp, new_thresh);
+ mutex_unlock(&ptp->nic->mbox.lock);
+ ptp->thresh = new_thresh;
+ }
+ }
+ schedule_delayed_work(&ptp->extts_work, msecs_to_jiffies(200));
+}
+
static int otx2_ptp_enable(struct ptp_clock_info *ptp_info,
struct ptp_clock_request *rq, int on)
{
+ struct otx2_ptp *ptp = container_of(ptp_info, struct otx2_ptp,
+ ptp_info);
+ int pin;
+
+ if (!ptp->nic)
+ return -ENODEV;
+
+ switch (rq->type) {
+ case PTP_CLK_REQ_EXTTS:
+ pin = ptp_find_pin(ptp->ptp_clock, PTP_PF_EXTTS,
+ rq->extts.index);
+ if (pin < 0)
+ return -EBUSY;
+ if (on)
+ schedule_delayed_work(&ptp->extts_work, msecs_to_jiffies(200));
+ else
+ cancel_delayed_work_sync(&ptp->extts_work);
+ return 0;
+ default:
+ break;
+ }
return -EOPNOTSUPP;
}
@@ -115,6 +223,11 @@ int otx2_ptp_init(struct otx2_nic *pfvf)
struct ptp_req *req;
int err;
+ if (is_otx2_lbkvf(pfvf->pdev)) {
+ pfvf->ptp = NULL;
+ return 0;
+ }
+
mutex_lock(&pfvf->mbox.lock);
/* check if PTP block is available */
req = otx2_mbox_alloc_msg_ptp_op(&pfvf->mbox);
@@ -149,20 +262,28 @@ int otx2_ptp_init(struct otx2_nic *pfvf)
timecounter_init(&ptp_ptr->time_counter, &ptp_ptr->cycle_counter,
ktime_to_ns(ktime_get_real()));
+ snprintf(ptp_ptr->extts_config.name, sizeof(ptp_ptr->extts_config.name), "TSTAMP");
+ ptp_ptr->extts_config.index = 0;
+ ptp_ptr->extts_config.func = PTP_PF_NONE;
+
ptp_ptr->ptp_info = (struct ptp_clock_info) {
.owner = THIS_MODULE,
.name = "OcteonTX2 PTP",
.max_adj = 1000000000ull,
- .n_ext_ts = 0,
- .n_pins = 0,
+ .n_ext_ts = 1,
+ .n_pins = 1,
.pps = 0,
+ .pin_config = &ptp_ptr->extts_config,
.adjfine = otx2_ptp_adjfine,
.adjtime = otx2_ptp_adjtime,
.gettime64 = otx2_ptp_gettime,
.settime64 = otx2_ptp_settime,
.enable = otx2_ptp_enable,
+ .verify = otx2_ptp_verify_pin,
};
+ INIT_DELAYED_WORK(&ptp_ptr->extts_work, otx2_ptp_extts_check);
+
ptp_ptr->ptp_clock = ptp_clock_register(&ptp_ptr->ptp_info, pfvf->dev);
if (IS_ERR_OR_NULL(ptp_ptr->ptp_clock)) {
err = ptp_ptr->ptp_clock ?
@@ -176,6 +297,7 @@ int otx2_ptp_init(struct otx2_nic *pfvf)
error:
return err;
}
+EXPORT_SYMBOL_GPL(otx2_ptp_init);
void otx2_ptp_destroy(struct otx2_nic *pfvf)
{
@@ -188,6 +310,7 @@ void otx2_ptp_destroy(struct otx2_nic *pfvf)
kfree(ptp);
pfvf->ptp = NULL;
}
+EXPORT_SYMBOL_GPL(otx2_ptp_destroy);
int otx2_ptp_clock_index(struct otx2_nic *pfvf)
{
@@ -196,6 +319,7 @@ int otx2_ptp_clock_index(struct otx2_nic *pfvf)
return ptp_clock_index(pfvf->ptp->ptp_clock);
}
+EXPORT_SYMBOL_GPL(otx2_ptp_clock_index);
int otx2_ptp_tstamp2time(struct otx2_nic *pfvf, u64 tstamp, u64 *tsns)
{
@@ -206,3 +330,8 @@ int otx2_ptp_tstamp2time(struct otx2_nic *pfvf, u64 tstamp, u64 *tsns)
return 0;
}
+EXPORT_SYMBOL_GPL(otx2_ptp_tstamp2time);
+
+MODULE_AUTHOR("Sunil Goutham <sgoutham@marvell.com>");
+MODULE_DESCRIPTION("Marvell RVU NIC PTP Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index f42b1d4e0c67..0cc6353254bf 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -8,6 +8,8 @@
#include <linux/etherdevice.h>
#include <net/ip.h>
#include <net/tso.h>
+#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
#include "otx2_reg.h"
#include "otx2_common.h"
@@ -17,6 +19,35 @@
#include "cn10k.h"
#define CQE_ADDR(CQ, idx) ((CQ)->cqe_base + ((CQ)->cqe_size * (idx)))
+static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
+ struct bpf_prog *prog,
+ struct nix_cqe_rx_s *cqe,
+ struct otx2_cq_queue *cq);
+
+static int otx2_nix_cq_op_status(struct otx2_nic *pfvf,
+ struct otx2_cq_queue *cq)
+{
+ u64 incr = (u64)(cq->cq_idx) << 32;
+ u64 status;
+
+ status = otx2_atomic64_fetch_add(incr, pfvf->cq_op_addr);
+
+ if (unlikely(status & BIT_ULL(CQ_OP_STAT_OP_ERR) ||
+ status & BIT_ULL(CQ_OP_STAT_CQ_ERR))) {
+ dev_err(pfvf->dev, "CQ stopped due to error");
+ return -EINVAL;
+ }
+
+ cq->cq_tail = status & 0xFFFFF;
+ cq->cq_head = (status >> 20) & 0xFFFFF;
+ if (cq->cq_tail < cq->cq_head)
+ cq->pend_cqe = (cq->cqe_cnt - cq->cq_head) +
+ cq->cq_tail;
+ else
+ cq->pend_cqe = cq->cq_tail - cq->cq_head;
+
+ return 0;
+}
static struct nix_cqe_hdr_s *otx2_get_next_cqe(struct otx2_cq_queue *cq)
{
@@ -73,6 +104,24 @@ static void otx2_dma_unmap_skb_frags(struct otx2_nic *pfvf, struct sg_list *sg)
sg->num_segs = 0;
}
+static void otx2_xdp_snd_pkt_handler(struct otx2_nic *pfvf,
+ struct otx2_snd_queue *sq,
+ struct nix_cqe_tx_s *cqe)
+{
+ struct nix_send_comp_s *snd_comp = &cqe->comp;
+ struct sg_list *sg;
+ struct page *page;
+ u64 pa;
+
+ sg = &sq->sg[snd_comp->sqe_id];
+
+ pa = otx2_iova_to_phys(pfvf->iommu_domain, sg->dma_addr[0]);
+ otx2_dma_unmap_page(pfvf, sg->dma_addr[0],
+ sg->size[0], DMA_TO_DEVICE);
+ page = virt_to_page(phys_to_virt(pa));
+ put_page(page);
+}
+
static void otx2_snd_pkt_handler(struct otx2_nic *pfvf,
struct otx2_cq_queue *cq,
struct otx2_snd_queue *sq,
@@ -132,8 +181,9 @@ static void otx2_set_rxtstamp(struct otx2_nic *pfvf,
skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(tsns);
}
-static void otx2_skb_add_frag(struct otx2_nic *pfvf, struct sk_buff *skb,
- u64 iova, int len, struct nix_rx_parse_s *parse)
+static bool otx2_skb_add_frag(struct otx2_nic *pfvf, struct sk_buff *skb,
+ u64 iova, int len, struct nix_rx_parse_s *parse,
+ int qidx)
{
struct page *page;
int off = 0;
@@ -154,11 +204,22 @@ static void otx2_skb_add_frag(struct otx2_nic *pfvf, struct sk_buff *skb,
}
page = virt_to_page(va);
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
- va - page_address(page) + off, len - off, pfvf->rbsize);
+ if (likely(skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS)) {
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+ va - page_address(page) + off,
+ len - off, pfvf->rbsize);
+
+ otx2_dma_unmap_page(pfvf, iova - OTX2_HEAD_ROOM,
+ pfvf->rbsize, DMA_FROM_DEVICE);
+ return true;
+ }
- otx2_dma_unmap_page(pfvf, iova - OTX2_HEAD_ROOM,
- pfvf->rbsize, DMA_FROM_DEVICE);
+ /* If more than MAX_SKB_FRAGS fragments are received then
+ * give back those buffer pointers to hardware for reuse.
+ */
+ pfvf->hw_ops->aura_freeptr(pfvf, qidx, iova & ~0x07ULL);
+
+ return false;
}
static void otx2_set_rxhash(struct otx2_nic *pfvf,
@@ -285,6 +346,10 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
return;
}
+ if (pfvf->xdp_prog)
+ if (otx2_xdp_rcv_pkt_handler(pfvf, pfvf->xdp_prog, cqe, cq))
+ return;
+
skb = napi_get_frags(napi);
if (unlikely(!skb))
return;
@@ -296,9 +361,9 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
seg_addr = &sg->seg_addr;
seg_size = (void *)sg;
for (seg = 0; seg < sg->segs; seg++, seg_addr++) {
- otx2_skb_add_frag(pfvf, skb, *seg_addr, seg_size[seg],
- parse);
- cq->pool_ptrs++;
+ if (otx2_skb_add_frag(pfvf, skb, *seg_addr,
+ seg_size[seg], parse, cq->cq_idx))
+ cq->pool_ptrs++;
}
start += sizeof(*sg);
}
@@ -318,7 +383,14 @@ static int otx2_rx_napi_handler(struct otx2_nic *pfvf,
struct nix_cqe_rx_s *cqe;
int processed_cqe = 0;
- while (likely(processed_cqe < budget)) {
+ if (cq->pend_cqe >= budget)
+ goto process_cqe;
+
+ if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
+ return 0;
+
+process_cqe:
+ while (likely(processed_cqe < budget) && cq->pend_cqe) {
cqe = (struct nix_cqe_rx_s *)CQE_ADDR(cq, cq->cq_head);
if (cqe->hdr.cqe_type == NIX_XQE_TYPE_INVALID ||
!cqe->sg.seg_addr) {
@@ -334,17 +406,13 @@ static int otx2_rx_napi_handler(struct otx2_nic *pfvf,
cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID;
cqe->sg.seg_addr = 0x00;
processed_cqe++;
+ cq->pend_cqe--;
}
/* Free CQEs to HW */
otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
((u64)cq->cq_idx << 32) | processed_cqe);
- if (unlikely(!cq->pool_ptrs))
- return 0;
- /* Refill pool with new buffers */
- pfvf->hw_ops->refill_pool_ptrs(pfvf, cq);
-
return processed_cqe;
}
@@ -364,22 +432,36 @@ void otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
static int otx2_tx_napi_handler(struct otx2_nic *pfvf,
struct otx2_cq_queue *cq, int budget)
{
- int tx_pkts = 0, tx_bytes = 0;
+ int tx_pkts = 0, tx_bytes = 0, qidx;
struct nix_cqe_tx_s *cqe;
int processed_cqe = 0;
- while (likely(processed_cqe < budget)) {
+ if (cq->pend_cqe >= budget)
+ goto process_cqe;
+
+ if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
+ return 0;
+
+process_cqe:
+ while (likely(processed_cqe < budget) && cq->pend_cqe) {
cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq);
if (unlikely(!cqe)) {
if (!processed_cqe)
return 0;
break;
}
- otx2_snd_pkt_handler(pfvf, cq, &pfvf->qset.sq[cq->cint_idx],
- cqe, budget, &tx_pkts, &tx_bytes);
-
+ if (cq->cq_type == CQ_XDP) {
+ qidx = cq->cq_idx - pfvf->hw.rx_queues;
+ otx2_xdp_snd_pkt_handler(pfvf, &pfvf->qset.sq[qidx],
+ cqe);
+ } else {
+ otx2_snd_pkt_handler(pfvf, cq,
+ &pfvf->qset.sq[cq->cint_idx],
+ cqe, budget, &tx_pkts, &tx_bytes);
+ }
cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID;
processed_cqe++;
+ cq->pend_cqe--;
}
/* Free CQEs to HW */
@@ -402,6 +484,7 @@ static int otx2_tx_napi_handler(struct otx2_nic *pfvf,
int otx2_napi_handler(struct napi_struct *napi, int budget)
{
+ struct otx2_cq_queue *rx_cq = NULL;
struct otx2_cq_poll *cq_poll;
int workdone = 0, cq_idx, i;
struct otx2_cq_queue *cq;
@@ -412,17 +495,13 @@ int otx2_napi_handler(struct napi_struct *napi, int budget)
pfvf = (struct otx2_nic *)cq_poll->dev;
qset = &pfvf->qset;
- for (i = CQS_PER_CINT - 1; i >= 0; i--) {
+ for (i = 0; i < CQS_PER_CINT; i++) {
cq_idx = cq_poll->cq_ids[i];
if (unlikely(cq_idx == CINT_INVALID_CQ))
continue;
cq = &qset->cq[cq_idx];
if (cq->cq_type == CQ_RX) {
- /* If the RQ refill WQ task is running, skip napi
- * scheduler for this queue.
- */
- if (cq->refill_task_sched)
- continue;
+ rx_cq = cq;
workdone += otx2_rx_napi_handler(pfvf, napi,
cq, budget);
} else {
@@ -430,6 +509,8 @@ int otx2_napi_handler(struct napi_struct *napi, int budget)
}
}
+ if (rx_cq && rx_cq->pool_ptrs)
+ pfvf->hw_ops->refill_pool_ptrs(pfvf, rx_cq);
/* Clear the IRQ */
otx2_write64(pfvf, NIX_LF_CINTX_INT(cq_poll->cint_idx), BIT_ULL(0));
@@ -936,10 +1017,19 @@ void otx2_cleanup_rx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
int processed_cqe = 0;
u64 iova, pa;
- while ((cqe = (struct nix_cqe_rx_s *)otx2_get_next_cqe(cq))) {
- if (!cqe->sg.subdc)
- continue;
+ if (pfvf->xdp_prog)
+ xdp_rxq_info_unreg(&cq->xdp_rxq);
+
+ if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
+ return;
+
+ while (cq->pend_cqe) {
+ cqe = (struct nix_cqe_rx_s *)otx2_get_next_cqe(cq);
processed_cqe++;
+ cq->pend_cqe--;
+
+ if (!cqe)
+ continue;
if (cqe->sg.segs > 1) {
otx2_free_rcv_seg(pfvf, cqe, cq->cq_idx);
continue;
@@ -965,7 +1055,16 @@ void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
sq = &pfvf->qset.sq[cq->cint_idx];
- while ((cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq))) {
+ if (otx2_nix_cq_op_status(pfvf, cq) || !cq->pend_cqe)
+ return;
+
+ while (cq->pend_cqe) {
+ cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq);
+ processed_cqe++;
+ cq->pend_cqe--;
+
+ if (!cqe)
+ continue;
sg = &sq->sg[cqe->comp.sqe_id];
skb = (struct sk_buff *)sg->skb;
if (skb) {
@@ -973,7 +1072,6 @@ void otx2_cleanup_tx_cqes(struct otx2_nic *pfvf, struct otx2_cq_queue *cq)
dev_kfree_skb_any(skb);
sg->skb = (u64)NULL;
}
- processed_cqe++;
}
/* Free CQEs to HW */
@@ -1001,3 +1099,116 @@ int otx2_rxtx_enable(struct otx2_nic *pfvf, bool enable)
mutex_unlock(&pfvf->mbox.lock);
return err;
}
+
+static void otx2_xdp_sqe_add_sg(struct otx2_snd_queue *sq, u64 dma_addr,
+ int len, int *offset)
+{
+ struct nix_sqe_sg_s *sg = NULL;
+ u64 *iova = NULL;
+
+ sg = (struct nix_sqe_sg_s *)(sq->sqe_base + *offset);
+ sg->ld_type = NIX_SEND_LDTYPE_LDD;
+ sg->subdc = NIX_SUBDC_SG;
+ sg->segs = 1;
+ sg->seg1_size = len;
+ iova = (void *)sg + sizeof(*sg);
+ *iova = dma_addr;
+ *offset += sizeof(*sg) + sizeof(u64);
+
+ sq->sg[sq->head].dma_addr[0] = dma_addr;
+ sq->sg[sq->head].size[0] = len;
+ sq->sg[sq->head].num_segs = 1;
+}
+
+bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx)
+{
+ struct nix_sqe_hdr_s *sqe_hdr;
+ struct otx2_snd_queue *sq;
+ int offset, free_sqe;
+
+ sq = &pfvf->qset.sq[qidx];
+ free_sqe = (sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb;
+ if (free_sqe < sq->sqe_thresh)
+ return false;
+
+ memset(sq->sqe_base + 8, 0, sq->sqe_size - 8);
+
+ sqe_hdr = (struct nix_sqe_hdr_s *)(sq->sqe_base);
+
+ if (!sqe_hdr->total) {
+ sqe_hdr->aura = sq->aura_id;
+ sqe_hdr->df = 1;
+ sqe_hdr->sq = qidx;
+ sqe_hdr->pnc = 1;
+ }
+ sqe_hdr->total = len;
+ sqe_hdr->sqe_id = sq->head;
+
+ offset = sizeof(*sqe_hdr);
+
+ otx2_xdp_sqe_add_sg(sq, iova, len, &offset);
+ sqe_hdr->sizem1 = (offset / 16) - 1;
+ pfvf->hw_ops->sqe_flush(pfvf, sq, offset, qidx);
+
+ return true;
+}
+
+static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
+ struct bpf_prog *prog,
+ struct nix_cqe_rx_s *cqe,
+ struct otx2_cq_queue *cq)
+{
+ unsigned char *hard_start, *data;
+ int qidx = cq->cq_idx;
+ struct xdp_buff xdp;
+ struct page *page;
+ u64 iova, pa;
+ u32 act;
+ int err;
+
+ iova = cqe->sg.seg_addr - OTX2_HEAD_ROOM;
+ pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
+ page = virt_to_page(phys_to_virt(pa));
+
+ xdp_init_buff(&xdp, pfvf->rbsize, &cq->xdp_rxq);
+
+ data = (unsigned char *)phys_to_virt(pa);
+ hard_start = page_address(page);
+ xdp_prepare_buff(&xdp, hard_start, data - hard_start,
+ cqe->sg.seg_size, false);
+
+ act = bpf_prog_run_xdp(prog, &xdp);
+
+ switch (act) {
+ case XDP_PASS:
+ break;
+ case XDP_TX:
+ qidx += pfvf->hw.tx_queues;
+ cq->pool_ptrs++;
+ return otx2_xdp_sq_append_pkt(pfvf, iova,
+ cqe->sg.seg_size, qidx);
+ case XDP_REDIRECT:
+ cq->pool_ptrs++;
+ err = xdp_do_redirect(pfvf->netdev, &xdp, prog);
+
+ otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize,
+ DMA_FROM_DEVICE);
+ if (!err)
+ return true;
+ put_page(page);
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(act);
+ break;
+ case XDP_ABORTED:
+ trace_xdp_exception(pfvf->netdev, prog, act);
+ break;
+ case XDP_DROP:
+ otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize,
+ DMA_FROM_DEVICE);
+ put_page(page);
+ cq->pool_ptrs++;
+ return true;
+ }
+ return false;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
index 3ff1ad79c001..f1a04cf9210c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
@@ -11,6 +11,7 @@
#include <linux/etherdevice.h>
#include <linux/iommu.h>
#include <linux/if_vlan.h>
+#include <net/xdp.h>
#define LBK_CHAN_BASE 0x000
#define SDP_CHAN_BASE 0x700
@@ -25,6 +26,8 @@
#define OTX2_MAX_GSO_SEGS 255
#define OTX2_MAX_FRAGS_IN_SQE 9
+#define MAX_XDP_MTU (1530 - OTX2_ETH_HLEN)
+
/* Rx buffer size should be in multiples of 128bytes */
#define RCV_FRAG_LEN1(x) \
((OTX2_HEAD_ROOM + OTX2_DATA_ALIGN(x)) + \
@@ -36,9 +39,7 @@
#define RCV_FRAG_LEN(x) \
((RCV_FRAG_LEN1(x) < 2048) ? 2048 : RCV_FRAG_LEN1(x))
-#define DMA_BUFFER_LEN(x) \
- ((x) - OTX2_HEAD_ROOM - \
- OTX2_DATA_ALIGN(sizeof(struct skb_shared_info)))
+#define DMA_BUFFER_LEN(x) ((x) - OTX2_HEAD_ROOM)
/* IRQ triggered when NIX_LF_CINTX_CNT[ECOUNT]
* is equal to this value.
@@ -56,6 +57,9 @@
*/
#define CQ_QCOUNT_DEFAULT 1
+#define CQ_OP_STAT_OP_ERR 63
+#define CQ_OP_STAT_CQ_ERR 46
+
struct queue_stats {
u64 bytes;
u64 pkts;
@@ -96,7 +100,8 @@ struct otx2_snd_queue {
enum cq_type {
CQ_RX,
CQ_TX,
- CQS_PER_CINT = 2, /* RQ + SQ */
+ CQ_XDP,
+ CQS_PER_CINT = 3, /* RQ + SQ + XDP */
};
struct otx2_cq_poll {
@@ -122,9 +127,12 @@ struct otx2_cq_queue {
u16 pool_ptrs;
u32 cqe_cnt;
u32 cq_head;
+ u32 cq_tail;
+ u32 pend_cqe;
void *cqe_base;
struct qmem *cqe;
struct otx2_pool *rbpool;
+ struct xdp_rxq_info xdp_rxq;
} ____cacheline_aligned_in_smp;
struct otx2_qset {
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index 03b4ec630432..e6cb8cd0787d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -8,9 +8,11 @@
#include <linux/etherdevice.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/net_tstamp.h>
#include "otx2_common.h"
#include "otx2_reg.h"
+#include "otx2_ptp.h"
#include "cn10k.h"
#define DRV_NAME "rvu_nicvf"
@@ -277,7 +279,6 @@ static void otx2vf_vfaf_mbox_destroy(struct otx2_nic *vf)
struct mbox *mbox = &vf->mbox;
if (vf->mbox_wq) {
- flush_workqueue(vf->mbox_wq);
destroy_workqueue(vf->mbox_wq);
vf->mbox_wq = NULL;
}
@@ -500,6 +501,7 @@ static const struct net_device_ops otx2vf_netdev_ops = {
.ndo_set_features = otx2vf_set_features,
.ndo_get_stats64 = otx2_get_stats64,
.ndo_tx_timeout = otx2_tx_timeout,
+ .ndo_do_ioctl = otx2_ioctl,
};
static int otx2_wq_init(struct otx2_nic *vf)
@@ -583,6 +585,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
hw->rx_queues = qcount;
hw->tx_queues = qcount;
hw->max_queues = qcount;
+ hw->tot_tx_queues = qcount;
hw->irq_name = devm_kmalloc_array(&hw->pdev->dev, num_vec, NAME_SIZE,
GFP_KERNEL);
@@ -640,6 +643,9 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_detach_rsrc;
+ /* Don't check for error. Proceed without ptp */
+ otx2_ptp_init(vf);
+
/* Assign default mac address */
otx2_get_mac_from_af(netdev);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera.h b/drivers/net/ethernet/marvell/prestera/prestera.h
index f18fe664b373..2a4c14c704c0 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera.h
@@ -53,6 +53,8 @@ struct prestera_port_stats {
u64 good_octets_sent;
};
+#define PRESTERA_AP_PORT_MAX (10)
+
struct prestera_port_caps {
u64 supp_link_modes;
u8 supp_fec;
@@ -69,6 +71,39 @@ struct prestera_lag {
struct prestera_flow_block;
+struct prestera_port_mac_state {
+ u32 mode;
+ u32 speed;
+ bool oper;
+ u8 duplex;
+ u8 fc;
+ u8 fec;
+};
+
+struct prestera_port_phy_state {
+ u64 lmode_bmap;
+ struct {
+ bool pause;
+ bool asym_pause;
+ } remote_fc;
+ u8 mdix;
+};
+
+struct prestera_port_mac_config {
+ u32 mode;
+ u32 speed;
+ bool admin;
+ u8 inband;
+ u8 duplex;
+ u8 fec;
+};
+
+struct prestera_port_phy_config {
+ u32 mode;
+ bool admin;
+ u8 mdix;
+};
+
struct prestera_port {
struct net_device *dev;
struct prestera_switch *sw;
@@ -91,6 +126,10 @@ struct prestera_port {
struct prestera_port_stats stats;
struct delayed_work caching_dw;
} cached_hw_stats;
+ struct prestera_port_mac_config cfg_mac;
+ struct prestera_port_phy_config cfg_phy;
+ struct prestera_port_mac_state state_mac;
+ struct prestera_port_phy_state state_phy;
};
struct prestera_device {
@@ -107,7 +146,7 @@ struct prestera_device {
int (*recv_msg)(struct prestera_device *dev, void *msg, size_t size);
/* called by higher layer to send request to the firmware */
- int (*send_req)(struct prestera_device *dev, void *in_msg,
+ int (*send_req)(struct prestera_device *dev, int qid, void *in_msg,
size_t in_size, void *out_msg, size_t out_size,
unsigned int wait);
};
@@ -129,13 +168,28 @@ enum prestera_rxtx_event_id {
enum prestera_port_event_id {
PRESTERA_PORT_EVENT_UNSPEC,
- PRESTERA_PORT_EVENT_STATE_CHANGED,
+ PRESTERA_PORT_EVENT_MAC_STATE_CHANGED,
};
struct prestera_port_event {
u32 port_id;
union {
- u32 oper_state;
+ struct {
+ u32 mode;
+ u32 speed;
+ u8 oper;
+ u8 duplex;
+ u8 fc;
+ u8 fec;
+ } mac;
+ struct {
+ u64 lmode_bmap;
+ struct {
+ bool pause;
+ bool asym_pause;
+ } remote_fc;
+ u8 mdix;
+ } phy;
} data;
};
@@ -223,11 +277,16 @@ void prestera_device_unregister(struct prestera_device *dev);
struct prestera_port *prestera_port_find_by_hwid(struct prestera_switch *sw,
u32 dev_id, u32 hw_id);
-int prestera_port_autoneg_set(struct prestera_port *port, bool enable,
- u64 adver_link_modes, u8 adver_fec);
+int prestera_port_autoneg_set(struct prestera_port *port, u64 link_modes);
struct prestera_port *prestera_find_port(struct prestera_switch *sw, u32 id);
+int prestera_port_cfg_mac_read(struct prestera_port *port,
+ struct prestera_port_mac_config *cfg);
+
+int prestera_port_cfg_mac_write(struct prestera_port *port,
+ struct prestera_port_mac_config *cfg);
+
struct prestera_port *prestera_port_dev_lower_find(struct net_device *dev);
int prestera_port_pvid_set(struct prestera_port *port, u16 vid);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_devlink.c b/drivers/net/ethernet/marvell/prestera/prestera_devlink.c
index 68b442eb6d69..06279cd6da67 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_devlink.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_devlink.c
@@ -345,8 +345,6 @@ static struct prestera_trap prestera_trap_items_arr[] = {
},
};
-static void prestera_devlink_traps_fini(struct prestera_switch *sw);
-
static int prestera_drop_counter_get(struct devlink *devlink,
const struct devlink_trap *trap,
u64 *p_drops);
@@ -381,8 +379,6 @@ static int prestera_trap_action_set(struct devlink *devlink,
enum devlink_trap_action action,
struct netlink_ext_ack *extack);
-static int prestera_devlink_traps_register(struct prestera_switch *sw);
-
static const struct devlink_ops prestera_dl_ops = {
.info_get = prestera_dl_info_get,
.trap_init = prestera_trap_init,
@@ -407,38 +403,18 @@ void prestera_devlink_free(struct prestera_switch *sw)
devlink_free(dl);
}
-int prestera_devlink_register(struct prestera_switch *sw)
+void prestera_devlink_register(struct prestera_switch *sw)
{
struct devlink *dl = priv_to_devlink(sw);
- int err;
-
- err = devlink_register(dl);
- if (err) {
- dev_err(prestera_dev(sw), "devlink_register failed: %d\n", err);
- return err;
- }
- err = prestera_devlink_traps_register(sw);
- if (err) {
- devlink_unregister(dl);
- dev_err(sw->dev->dev, "devlink_traps_register failed: %d\n",
- err);
- return err;
- }
-
- return 0;
+ devlink_register(dl);
}
void prestera_devlink_unregister(struct prestera_switch *sw)
{
- struct prestera_trap_data *trap_data = sw->trap_data;
struct devlink *dl = priv_to_devlink(sw);
- prestera_devlink_traps_fini(sw);
devlink_unregister(dl);
-
- kfree(trap_data->trap_items_arr);
- kfree(trap_data);
}
int prestera_devlink_port_register(struct prestera_port *port)
@@ -486,7 +462,7 @@ struct devlink_port *prestera_devlink_get_port(struct net_device *dev)
return &port->dl_port;
}
-static int prestera_devlink_traps_register(struct prestera_switch *sw)
+int prestera_devlink_traps_register(struct prestera_switch *sw)
{
const u32 groups_count = ARRAY_SIZE(prestera_trap_groups_arr);
const u32 traps_count = ARRAY_SIZE(prestera_trap_items_arr);
@@ -625,8 +601,9 @@ static int prestera_drop_counter_get(struct devlink *devlink,
cpu_code_type, p_drops);
}
-static void prestera_devlink_traps_fini(struct prestera_switch *sw)
+void prestera_devlink_traps_unregister(struct prestera_switch *sw)
{
+ struct prestera_trap_data *trap_data = sw->trap_data;
struct devlink *dl = priv_to_devlink(sw);
const struct devlink_trap *trap;
int i;
@@ -638,4 +615,6 @@ static void prestera_devlink_traps_fini(struct prestera_switch *sw)
devlink_trap_groups_unregister(dl, prestera_trap_groups_arr,
ARRAY_SIZE(prestera_trap_groups_arr));
+ kfree(trap_data->trap_items_arr);
+ kfree(trap_data);
}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_devlink.h b/drivers/net/ethernet/marvell/prestera/prestera_devlink.h
index cc34c3db13a2..b322295bad3a 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_devlink.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_devlink.h
@@ -9,7 +9,7 @@
struct prestera_switch *prestera_devlink_alloc(struct prestera_device *dev);
void prestera_devlink_free(struct prestera_switch *sw);
-int prestera_devlink_register(struct prestera_switch *sw);
+void prestera_devlink_register(struct prestera_switch *sw);
void prestera_devlink_unregister(struct prestera_switch *sw);
int prestera_devlink_port_register(struct prestera_port *port);
@@ -22,5 +22,7 @@ struct devlink_port *prestera_devlink_get_port(struct net_device *dev);
void prestera_devlink_trap_report(struct prestera_port *port,
struct sk_buff *skb, u8 cpu_code);
+int prestera_devlink_traps_register(struct prestera_switch *sw);
+void prestera_devlink_traps_unregister(struct prestera_switch *sw);
#endif /* _PRESTERA_DEVLINK_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_ethtool.c b/drivers/net/ethernet/marvell/prestera/prestera_ethtool.c
index 93a5e2baf808..6011454dba71 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_ethtool.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_ethtool.c
@@ -323,7 +323,6 @@ static int prestera_port_type_set(const struct ethtool_link_ksettings *ecmd,
{
u32 new_mode = PRESTERA_LINK_MODE_MAX;
u32 type, mode;
- int err;
for (type = 0; type < PRESTERA_PORT_TYPE_MAX; type++) {
if (port_types[type].eth_type == ecmd->base.port &&
@@ -348,13 +347,8 @@ static int prestera_port_type_set(const struct ethtool_link_ksettings *ecmd,
}
}
- if (new_mode < PRESTERA_LINK_MODE_MAX)
- err = prestera_hw_port_link_mode_set(port, new_mode);
- else
- err = -EINVAL;
-
- if (err)
- return err;
+ if (new_mode >= PRESTERA_LINK_MODE_MAX)
+ return -EINVAL;
port->caps.type = type;
port->autoneg = false;
@@ -434,27 +428,33 @@ static void prestera_port_supp_types_get(struct ethtool_link_ksettings *ecmd,
static void prestera_port_remote_cap_get(struct ethtool_link_ksettings *ecmd,
struct prestera_port *port)
{
+ struct prestera_port_phy_state *state = &port->state_phy;
bool asym_pause;
bool pause;
u64 bitmap;
int err;
- err = prestera_hw_port_remote_cap_get(port, &bitmap);
- if (!err) {
- prestera_modes_to_eth(ecmd->link_modes.lp_advertising,
- bitmap, 0, PRESTERA_PORT_TYPE_NONE);
+ err = prestera_hw_port_phy_mode_get(port, NULL, &state->lmode_bmap,
+ &state->remote_fc.pause,
+ &state->remote_fc.asym_pause);
+ if (err)
+ netdev_warn(port->dev, "Remote link caps get failed %d",
+ port->caps.transceiver);
- if (!bitmap_empty(ecmd->link_modes.lp_advertising,
- __ETHTOOL_LINK_MODE_MASK_NBITS)) {
- ethtool_link_ksettings_add_link_mode(ecmd,
- lp_advertising,
- Autoneg);
- }
+ bitmap = state->lmode_bmap;
+
+ prestera_modes_to_eth(ecmd->link_modes.lp_advertising,
+ bitmap, 0, PRESTERA_PORT_TYPE_NONE);
+
+ if (!bitmap_empty(ecmd->link_modes.lp_advertising,
+ __ETHTOOL_LINK_MODE_MASK_NBITS)) {
+ ethtool_link_ksettings_add_link_mode(ecmd,
+ lp_advertising,
+ Autoneg);
}
- err = prestera_hw_port_remote_fc_get(port, &pause, &asym_pause);
- if (err)
- return;
+ pause = state->remote_fc.pause;
+ asym_pause = state->remote_fc.asym_pause;
if (pause)
ethtool_link_ksettings_add_link_mode(ecmd,
@@ -466,30 +466,46 @@ static void prestera_port_remote_cap_get(struct ethtool_link_ksettings *ecmd,
Asym_Pause);
}
-static void prestera_port_speed_get(struct ethtool_link_ksettings *ecmd,
- struct prestera_port *port)
+static void prestera_port_link_mode_get(struct ethtool_link_ksettings *ecmd,
+ struct prestera_port *port)
{
+ struct prestera_port_mac_state *state = &port->state_mac;
u32 speed;
+ u8 duplex;
int err;
- err = prestera_hw_port_speed_get(port, &speed);
- ecmd->base.speed = err ? SPEED_UNKNOWN : speed;
+ if (!port->state_mac.oper)
+ return;
+
+ if (state->speed == SPEED_UNKNOWN || state->duplex == DUPLEX_UNKNOWN) {
+ err = prestera_hw_port_mac_mode_get(port, NULL, &speed,
+ &duplex, NULL);
+ if (err) {
+ state->speed = SPEED_UNKNOWN;
+ state->duplex = DUPLEX_UNKNOWN;
+ } else {
+ state->speed = speed;
+ state->duplex = duplex == PRESTERA_PORT_DUPLEX_FULL ?
+ DUPLEX_FULL : DUPLEX_HALF;
+ }
+ }
+
+ ecmd->base.speed = port->state_mac.speed;
+ ecmd->base.duplex = port->state_mac.duplex;
}
-static void prestera_port_duplex_get(struct ethtool_link_ksettings *ecmd,
- struct prestera_port *port)
+static void prestera_port_mdix_get(struct ethtool_link_ksettings *ecmd,
+ struct prestera_port *port)
{
- u8 duplex;
- int err;
+ struct prestera_port_phy_state *state = &port->state_phy;
- err = prestera_hw_port_duplex_get(port, &duplex);
- if (err) {
- ecmd->base.duplex = DUPLEX_UNKNOWN;
- return;
+ if (prestera_hw_port_phy_mode_get(port, &state->mdix, NULL, NULL, NULL)) {
+ netdev_warn(port->dev, "MDIX params get failed");
+ state->mdix = ETH_TP_MDI_INVALID;
}
- ecmd->base.duplex = duplex == PRESTERA_PORT_DUPLEX_FULL ?
- DUPLEX_FULL : DUPLEX_HALF;
+ ecmd->base.eth_tp_mdix = port->state_phy.mdix;
+ ecmd->base.eth_tp_mdix_ctrl = port->cfg_phy.mdix;
}
static int
@@ -501,6 +517,8 @@ prestera_ethtool_get_link_ksettings(struct net_device *dev,
ethtool_link_ksettings_zero_link_mode(ecmd, supported);
ethtool_link_ksettings_zero_link_mode(ecmd, advertising);
ethtool_link_ksettings_zero_link_mode(ecmd, lp_advertising);
+ ecmd->base.speed = SPEED_UNKNOWN;
+ ecmd->base.duplex = DUPLEX_UNKNOWN;
ecmd->base.autoneg = port->autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
@@ -521,13 +539,8 @@ prestera_ethtool_get_link_ksettings(struct net_device *dev,
prestera_port_supp_types_get(ecmd, port);
- if (netif_carrier_ok(dev)) {
- prestera_port_speed_get(ecmd, port);
- prestera_port_duplex_get(ecmd, port);
- } else {
- ecmd->base.speed = SPEED_UNKNOWN;
- ecmd->base.duplex = DUPLEX_UNKNOWN;
- }
+ if (netif_carrier_ok(dev))
+ prestera_port_link_mode_get(ecmd, port);
ecmd->base.port = prestera_port_type_get(port);
@@ -545,8 +558,7 @@ prestera_ethtool_get_link_ksettings(struct net_device *dev,
if (port->caps.type == PRESTERA_PORT_TYPE_TP &&
port->caps.transceiver == PRESTERA_PORT_TCVR_COPPER)
- prestera_hw_port_mdix_get(port, &ecmd->base.eth_tp_mdix,
- &ecmd->base.eth_tp_mdix_ctrl);
+ prestera_port_mdix_get(ecmd, port);
return 0;
}
@@ -555,12 +567,17 @@ static int prestera_port_mdix_set(const struct ethtool_link_ksettings *ecmd,
struct prestera_port *port)
{
if (ecmd->base.eth_tp_mdix_ctrl != ETH_TP_MDI_INVALID &&
- port->caps.transceiver == PRESTERA_PORT_TCVR_COPPER &&
- port->caps.type == PRESTERA_PORT_TYPE_TP)
- return prestera_hw_port_mdix_set(port,
- ecmd->base.eth_tp_mdix_ctrl);
-
+ port->caps.transceiver == PRESTERA_PORT_TCVR_COPPER &&
+ port->caps.type == PRESTERA_PORT_TYPE_TP) {
+ port->cfg_phy.mdix = ecmd->base.eth_tp_mdix_ctrl;
+ return prestera_hw_port_phy_mode_set(port, port->cfg_phy.admin,
+ port->autoneg,
+ port->cfg_phy.mode,
+ port->adver_link_modes,
+ port->cfg_phy.mdix);
+ }
return 0;
+
}
static int prestera_port_link_mode_set(struct prestera_port *port,
@@ -568,12 +585,15 @@ static int prestera_port_link_mode_set(struct prestera_port *port,
{
u32 new_mode = PRESTERA_LINK_MODE_MAX;
u32 mode;
+ int err;
for (mode = 0; mode < PRESTERA_LINK_MODE_MAX; mode++) {
- if (speed != port_link_modes[mode].speed)
+ if (speed != SPEED_UNKNOWN &&
+ speed != port_link_modes[mode].speed)
continue;
- if (duplex != port_link_modes[mode].duplex)
+ if (duplex != DUPLEX_UNKNOWN &&
+ duplex != port_link_modes[mode].duplex)
continue;
if (!(port_link_modes[mode].pr_mask &
@@ -590,36 +610,31 @@ static int prestera_port_link_mode_set(struct prestera_port *port,
if (new_mode == PRESTERA_LINK_MODE_MAX)
return -EOPNOTSUPP;
- return prestera_hw_port_link_mode_set(port, new_mode);
+ err = prestera_hw_port_phy_mode_set(port, port->cfg_phy.admin,
+ false, new_mode, 0,
+ port->cfg_phy.mdix);
+ if (err)
+ return err;
+
+ port->adver_fec = BIT(PRESTERA_PORT_FEC_OFF);
+ port->adver_link_modes = 0;
+ port->cfg_phy.mode = new_mode;
+ port->autoneg = false;
+
+ return 0;
}
static int
prestera_port_speed_duplex_set(const struct ethtool_link_ksettings *ecmd,
struct prestera_port *port)
{
- u32 curr_mode;
- u8 duplex;
- u32 speed;
- int err;
-
- err = prestera_hw_port_link_mode_get(port, &curr_mode);
- if (err)
- return err;
- if (curr_mode >= PRESTERA_LINK_MODE_MAX)
- return -EINVAL;
+ u8 duplex = DUPLEX_UNKNOWN;
if (ecmd->base.duplex != DUPLEX_UNKNOWN)
duplex = ecmd->base.duplex == DUPLEX_FULL ?
PRESTERA_PORT_DUPLEX_FULL : PRESTERA_PORT_DUPLEX_HALF;
- else
- duplex = port_link_modes[curr_mode].duplex;
- if (ecmd->base.speed != SPEED_UNKNOWN)
- speed = ecmd->base.speed;
- else
- speed = port_link_modes[curr_mode].speed;
-
- return prestera_port_link_mode_set(port, speed, duplex,
+ return prestera_port_link_mode_set(port, ecmd->base.speed, duplex,
port->caps.type);
}
@@ -645,19 +660,12 @@ prestera_ethtool_set_link_ksettings(struct net_device *dev,
prestera_modes_from_eth(ecmd->link_modes.advertising, &adver_modes,
&adver_fec, port->caps.type);
- err = prestera_port_autoneg_set(port,
- ecmd->base.autoneg == AUTONEG_ENABLE,
- adver_modes, adver_fec);
- if (err)
- return err;
-
- if (ecmd->base.autoneg == AUTONEG_DISABLE) {
+ if (ecmd->base.autoneg == AUTONEG_ENABLE)
+ err = prestera_port_autoneg_set(port, adver_modes);
+ else
err = prestera_port_speed_duplex_set(ecmd, port);
- if (err)
- return err;
- }
- return 0;
+ return err;
}
static int prestera_ethtool_get_fecparam(struct net_device *dev,
@@ -668,7 +676,7 @@ static int prestera_ethtool_get_fecparam(struct net_device *dev,
u32 mode;
int err;
- err = prestera_hw_port_fec_get(port, &active);
+ err = prestera_hw_port_mac_mode_get(port, NULL, NULL, NULL, &active);
if (err)
return err;
@@ -693,18 +701,19 @@ static int prestera_ethtool_set_fecparam(struct net_device *dev,
struct ethtool_fecparam *fecparam)
{
struct prestera_port *port = netdev_priv(dev);
- u8 fec, active;
+ struct prestera_port_mac_config cfg_mac;
u32 mode;
- int err;
+ u8 fec;
if (port->autoneg) {
netdev_err(dev, "FEC set is not allowed while autoneg is on\n");
return -EINVAL;
}
- err = prestera_hw_port_fec_get(port, &active);
- if (err)
- return err;
+ if (port->caps.transceiver == PRESTERA_PORT_TCVR_SFP) {
+ netdev_err(dev, "FEC set is not allowed on non-SFP ports\n");
+ return -EINVAL;
+ }
fec = PRESTERA_PORT_FEC_MAX;
for (mode = 0; mode < PRESTERA_PORT_FEC_MAX; mode++) {
@@ -715,13 +724,19 @@ static int prestera_ethtool_set_fecparam(struct net_device *dev,
}
}
- if (fec == active)
+ prestera_port_cfg_mac_read(port, &cfg_mac);
+
+ if (fec == cfg_mac.fec)
return 0;
- if (fec == PRESTERA_PORT_FEC_MAX)
- return -EOPNOTSUPP;
+ if (fec == PRESTERA_PORT_FEC_MAX) {
+ netdev_err(dev, "Unsupported FEC requested");
+ return -EINVAL;
+ }
+
+ cfg_mac.fec = fec;
- return prestera_hw_port_fec_set(port, fec);
+ return prestera_port_cfg_mac_write(port, &cfg_mac);
}
static int prestera_ethtool_get_sset_count(struct net_device *dev, int sset)
@@ -766,6 +781,28 @@ static int prestera_ethtool_nway_reset(struct net_device *dev)
return -EINVAL;
}
+void prestera_ethtool_port_state_changed(struct prestera_port *port,
+ struct prestera_port_event *evt)
+{
+ struct prestera_port_mac_state *smac = &port->state_mac;
+
+ smac->oper = evt->data.mac.oper;
+
+ if (smac->oper) {
+ smac->mode = evt->data.mac.mode;
+ smac->speed = evt->data.mac.speed;
+ smac->duplex = evt->data.mac.duplex;
+ smac->fc = evt->data.mac.fc;
+ smac->fec = evt->data.mac.fec;
+ } else {
+ smac->mode = PRESTERA_MAC_MODE_MAX;
+ smac->speed = SPEED_UNKNOWN;
+ smac->duplex = DUPLEX_UNKNOWN;
+ smac->fc = 0;
+ smac->fec = 0;
+ }
+}
+
const struct ethtool_ops prestera_ethtool_ops = {
.get_drvinfo = prestera_ethtool_get_drvinfo,
.get_link_ksettings = prestera_ethtool_get_link_ksettings,
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_ethtool.h b/drivers/net/ethernet/marvell/prestera/prestera_ethtool.h
index 523ef1f592ce..9eb18e99dea6 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_ethtool.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_ethtool.h
@@ -6,6 +6,12 @@
#include <linux/ethtool.h>
+struct prestera_port_event;
+struct prestera_port;
+
extern const struct ethtool_ops prestera_ethtool_ops;
+void prestera_ethtool_port_state_changed(struct prestera_port *port,
+ struct prestera_port_event *evt);
+
#endif /* _PRESTERA_ETHTOOL_H_ */
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_hw.c b/drivers/net/ethernet/marvell/prestera/prestera_hw.c
index c1297859e471..41ba17cb2965 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_hw.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_hw.c
@@ -47,7 +47,6 @@ enum prestera_cmd_type_t {
PRESTERA_CMD_TYPE_ACL_PORT_UNBIND = 0x531,
PRESTERA_CMD_TYPE_RXTX_INIT = 0x800,
- PRESTERA_CMD_TYPE_RXTX_PORT_INIT = 0x801,
PRESTERA_CMD_TYPE_LAG_MEMBER_ADD = 0x900,
PRESTERA_CMD_TYPE_LAG_MEMBER_DELETE = 0x901,
@@ -76,16 +75,12 @@ enum {
PRESTERA_CMD_PORT_ATTR_LEARNING = 7,
PRESTERA_CMD_PORT_ATTR_FLOOD = 8,
PRESTERA_CMD_PORT_ATTR_CAPABILITY = 9,
- PRESTERA_CMD_PORT_ATTR_REMOTE_CAPABILITY = 10,
- PRESTERA_CMD_PORT_ATTR_REMOTE_FC = 11,
- PRESTERA_CMD_PORT_ATTR_LINK_MODE = 12,
+ PRESTERA_CMD_PORT_ATTR_PHY_MODE = 12,
PRESTERA_CMD_PORT_ATTR_TYPE = 13,
- PRESTERA_CMD_PORT_ATTR_FEC = 14,
- PRESTERA_CMD_PORT_ATTR_AUTONEG = 15,
- PRESTERA_CMD_PORT_ATTR_DUPLEX = 16,
PRESTERA_CMD_PORT_ATTR_STATS = 17,
- PRESTERA_CMD_PORT_ATTR_MDIX = 18,
- PRESTERA_CMD_PORT_ATTR_AUTONEG_RESTART = 19,
+ PRESTERA_CMD_PORT_ATTR_MAC_AUTONEG_RESTART = 18,
+ PRESTERA_CMD_PORT_ATTR_PHY_AUTONEG_RESTART = 19,
+ PRESTERA_CMD_PORT_ATTR_MAC_MODE = 22,
};
enum {
@@ -169,12 +164,12 @@ struct prestera_fw_event_handler {
};
struct prestera_msg_cmd {
- u32 type;
+ __le32 type;
};
struct prestera_msg_ret {
struct prestera_msg_cmd cmd;
- u32 status;
+ __le32 status;
};
struct prestera_msg_common_req {
@@ -187,102 +182,144 @@ struct prestera_msg_common_resp {
union prestera_msg_switch_param {
u8 mac[ETH_ALEN];
- u32 ageing_timeout_ms;
-};
+ __le32 ageing_timeout_ms;
+} __packed;
struct prestera_msg_switch_attr_req {
struct prestera_msg_cmd cmd;
- u32 attr;
+ __le32 attr;
union prestera_msg_switch_param param;
};
struct prestera_msg_switch_init_resp {
struct prestera_msg_ret ret;
- u32 port_count;
- u32 mtu_max;
+ __le32 port_count;
+ __le32 mtu_max;
u8 switch_id;
u8 lag_max;
u8 lag_member_max;
-};
+ __le32 size_tbl_router_nexthop;
+} __packed __aligned(4);
-struct prestera_msg_port_autoneg_param {
- u64 link_mode;
- u8 enable;
- u8 fec;
-};
+struct prestera_msg_event_port_param {
+ union {
+ struct {
+ u8 oper;
+ __le32 mode;
+ __le32 speed;
+ u8 duplex;
+ u8 fc;
+ u8 fec;
+ } __packed mac;
+ struct {
+ u8 mdix;
+ __le64 lmode_bmap;
+ u8 fc;
+ } __packed phy;
+ } __packed;
+} __packed __aligned(4);
struct prestera_msg_port_cap_param {
- u64 link_mode;
+ __le64 link_mode;
u8 type;
u8 fec;
+ u8 fc;
u8 transceiver;
};
-struct prestera_msg_port_mdix_param {
- u8 status;
- u8 admin_mode;
-};
-
struct prestera_msg_port_flood_param {
u8 type;
u8 enable;
};
union prestera_msg_port_param {
- u8 admin_state;
- u8 oper_state;
- u32 mtu;
- u8 mac[ETH_ALEN];
- u8 accept_frm_type;
- u32 speed;
+ u8 admin_state;
+ u8 oper_state;
+ __le32 mtu;
+ u8 mac[ETH_ALEN];
+ u8 accept_frm_type;
+ __le32 speed;
u8 learning;
u8 flood;
- u32 link_mode;
- u8 type;
- u8 duplex;
- u8 fec;
- u8 fc;
- struct prestera_msg_port_mdix_param mdix;
- struct prestera_msg_port_autoneg_param autoneg;
+ __le32 link_mode;
+ u8 type;
+ u8 duplex;
+ u8 fec;
+ u8 fc;
+
+ union {
+ struct {
+ u8 admin:1;
+ u8 fc;
+ u8 ap_enable;
+ union {
+ struct {
+ __le32 mode;
+ u8 inband:1;
+ __le32 speed;
+ u8 duplex;
+ u8 fec;
+ u8 fec_supp;
+ } __packed reg_mode;
+ struct {
+ __le32 mode;
+ __le32 speed;
+ u8 fec;
+ u8 fec_supp;
+ } __packed ap_modes[PRESTERA_AP_PORT_MAX];
+ } __packed;
+ } __packed mac;
+ struct {
+ u8 admin:1;
+ u8 adv_enable;
+ __le64 modes;
+ __le32 mode;
+ u8 mdix;
+ } __packed phy;
+ } __packed link;
+
struct prestera_msg_port_cap_param cap;
struct prestera_msg_port_flood_param flood_ext;
-};
+ struct prestera_msg_event_port_param link_evt;
+} __packed;
struct prestera_msg_port_attr_req {
struct prestera_msg_cmd cmd;
- u32 attr;
- u32 port;
- u32 dev;
+ __le32 attr;
+ __le32 port;
+ __le32 dev;
union prestera_msg_port_param param;
-};
+} __packed __aligned(4);
+
struct prestera_msg_port_attr_resp {
struct prestera_msg_ret ret;
union prestera_msg_port_param param;
-};
+} __packed __aligned(4);
+
struct prestera_msg_port_stats_resp {
struct prestera_msg_ret ret;
- u64 stats[PRESTERA_PORT_CNT_MAX];
+ __le64 stats[PRESTERA_PORT_CNT_MAX];
};
struct prestera_msg_port_info_req {
struct prestera_msg_cmd cmd;
- u32 port;
+ __le32 port;
};
struct prestera_msg_port_info_resp {
struct prestera_msg_ret ret;
- u32 hw_id;
- u32 dev_id;
- u16 fp_id;
+ __le32 hw_id;
+ __le32 dev_id;
+ __le16 fp_id;
};
struct prestera_msg_vlan_req {
struct prestera_msg_cmd cmd;
- u32 port;
- u32 dev;
- u16 vid;
+ __le32 port;
+ __le32 dev;
+ __le16 vid;
u8 is_member;
u8 is_tagged;
};
@@ -292,113 +329,114 @@ struct prestera_msg_fdb_req {
u8 dest_type;
union {
struct {
- u32 port;
- u32 dev;
+ __le32 port;
+ __le32 dev;
};
- u16 lag_id;
+ __le16 lag_id;
} dest;
u8 mac[ETH_ALEN];
- u16 vid;
+ __le16 vid;
u8 dynamic;
- u32 flush_mode;
-};
+ __le32 flush_mode;
+} __packed __aligned(4);
struct prestera_msg_bridge_req {
struct prestera_msg_cmd cmd;
- u32 port;
- u32 dev;
- u16 bridge;
+ __le32 port;
+ __le32 dev;
+ __le16 bridge;
};
struct prestera_msg_bridge_resp {
struct prestera_msg_ret ret;
- u16 bridge;
+ __le16 bridge;
};
struct prestera_msg_acl_action {
- u32 id;
+ __le32 id;
+ __le32 reserved[5];
};
struct prestera_msg_acl_match {
- u32 type;
+ __le32 type;
union {
struct {
u8 key;
u8 mask;
- } u8;
+ } __packed u8;
struct {
- u16 key;
- u16 mask;
+ __le16 key;
+ __le16 mask;
} u16;
struct {
- u32 key;
- u32 mask;
+ __le32 key;
+ __le32 mask;
} u32;
struct {
- u64 key;
- u64 mask;
+ __le64 key;
+ __le64 mask;
} u64;
struct {
u8 key[ETH_ALEN];
u8 mask[ETH_ALEN];
- } mac;
- } __packed keymask;
+ } __packed mac;
+ } keymask;
};
struct prestera_msg_acl_rule_req {
struct prestera_msg_cmd cmd;
- u32 id;
- u32 priority;
- u16 ruleset_id;
+ __le32 id;
+ __le32 priority;
+ __le16 ruleset_id;
u8 n_actions;
u8 n_matches;
};
struct prestera_msg_acl_rule_resp {
struct prestera_msg_ret ret;
- u32 id;
+ __le32 id;
};
struct prestera_msg_acl_rule_stats_resp {
struct prestera_msg_ret ret;
- u64 packets;
- u64 bytes;
+ __le64 packets;
+ __le64 bytes;
};
struct prestera_msg_acl_ruleset_bind_req {
struct prestera_msg_cmd cmd;
- u32 port;
- u32 dev;
- u16 ruleset_id;
+ __le32 port;
+ __le32 dev;
+ __le16 ruleset_id;
};
struct prestera_msg_acl_ruleset_req {
struct prestera_msg_cmd cmd;
- u16 id;
+ __le16 id;
};
struct prestera_msg_acl_ruleset_resp {
struct prestera_msg_ret ret;
- u16 id;
+ __le16 id;
};
struct prestera_msg_span_req {
struct prestera_msg_cmd cmd;
- u32 port;
- u32 dev;
+ __le32 port;
+ __le32 dev;
u8 id;
-} __packed __aligned(4);
+};
struct prestera_msg_span_resp {
struct prestera_msg_ret ret;
u8 id;
-} __packed __aligned(4);
+};
struct prestera_msg_stp_req {
struct prestera_msg_cmd cmd;
- u32 port;
- u32 dev;
- u16 vid;
+ __le32 port;
+ __le32 dev;
+ __le16 vid;
u8 state;
};
@@ -409,20 +447,14 @@ struct prestera_msg_rxtx_req {
struct prestera_msg_rxtx_resp {
struct prestera_msg_ret ret;
- u32 map_addr;
-};
-
-struct prestera_msg_rxtx_port_req {
- struct prestera_msg_cmd cmd;
- u32 port;
- u32 dev;
+ __le32 map_addr;
};
struct prestera_msg_lag_req {
struct prestera_msg_cmd cmd;
- u32 port;
- u32 dev;
- u16 lag_id;
+ __le32 port;
+ __le32 dev;
+ __le16 lag_id;
};
struct prestera_msg_cpu_code_counter_req {
@@ -433,22 +465,18 @@ struct prestera_msg_cpu_code_counter_req {
struct mvsw_msg_cpu_code_counter_ret {
struct prestera_msg_ret ret;
- u64 packet_count;
+ __le64 packet_count;
};
struct prestera_msg_event {
- u16 type;
- u16 id;
-};
-
-union prestera_msg_event_port_param {
- u32 oper_state;
+ __le16 type;
+ __le16 id;
};
struct prestera_msg_event_port {
struct prestera_msg_event id;
- u32 port_id;
- union prestera_msg_event_port_param param;
+ __le32 port_id;
+ struct prestera_msg_event_port_param param;
};
union prestera_msg_event_fdb_param {
@@ -459,12 +487,52 @@ struct prestera_msg_event_fdb {
struct prestera_msg_event id;
u8 dest_type;
union {
- u32 port_id;
- u16 lag_id;
+ __le32 port_id;
+ __le16 lag_id;
} dest;
- u32 vid;
+ __le32 vid;
union prestera_msg_event_fdb_param param;
-};
+} __packed __aligned(4);
+
+static inline void prestera_hw_build_tests(void)
+{
+ /* check requests */
+ BUILD_BUG_ON(sizeof(struct prestera_msg_common_req) != 4);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_switch_attr_req) != 16);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_port_attr_req) != 120);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_port_info_req) != 8);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_vlan_req) != 16);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_fdb_req) != 28);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_bridge_req) != 16);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_acl_rule_req) != 16);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_acl_ruleset_bind_req) != 16);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_acl_ruleset_req) != 8);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_span_req) != 16);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_stp_req) != 16);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_rxtx_req) != 8);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_lag_req) != 16);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_cpu_code_counter_req) != 8);
+
+ /* check responses */
+ BUILD_BUG_ON(sizeof(struct prestera_msg_common_resp) != 8);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_switch_init_resp) != 24);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_port_attr_resp) != 112);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_port_stats_resp) != 248);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_port_info_resp) != 20);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_bridge_resp) != 12);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_acl_rule_resp) != 12);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_acl_rule_stats_resp) != 24);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_acl_ruleset_resp) != 12);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_span_resp) != 12);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_rxtx_resp) != 12);
+
+ /* check events */
+ BUILD_BUG_ON(sizeof(struct prestera_msg_event_port) != 20);
+ BUILD_BUG_ON(sizeof(struct prestera_msg_event_fdb) != 20);
+}
+
+static u8 prestera_hw_mdix_to_eth(u8 mode);
+static void prestera_hw_remote_fc_to_eth(u8 fc, bool *pause, bool *asym_pause);
static int __prestera_cmd_ret(struct prestera_switch *sw,
enum prestera_cmd_type_t type,
@@ -475,15 +543,15 @@ static int __prestera_cmd_ret(struct prestera_switch *sw,
struct prestera_device *dev = sw->dev;
int err;
- cmd->type = type;
+ cmd->type = __cpu_to_le32(type);
- err = dev->send_req(dev, cmd, clen, ret, rlen, waitms);
+ err = dev->send_req(dev, 0, cmd, clen, ret, rlen, waitms);
if (err)
return err;
- if (ret->cmd.type != PRESTERA_CMD_TYPE_ACK)
+ if (__le32_to_cpu(ret->cmd.type) != PRESTERA_CMD_TYPE_ACK)
return -EBADE;
- if (ret->status != PRESTERA_CMD_ACK_OK)
+ if (__le32_to_cpu(ret->status) != PRESTERA_CMD_ACK_OK)
return -EINVAL;
return 0;
@@ -517,13 +585,24 @@ static int prestera_cmd(struct prestera_switch *sw,
static int prestera_fw_parse_port_evt(void *msg, struct prestera_event *evt)
{
- struct prestera_msg_event_port *hw_evt = msg;
+ struct prestera_msg_event_port *hw_evt;
- if (evt->id != PRESTERA_PORT_EVENT_STATE_CHANGED)
- return -EINVAL;
+ hw_evt = (struct prestera_msg_event_port *)msg;
- evt->port_evt.data.oper_state = hw_evt->param.oper_state;
- evt->port_evt.port_id = hw_evt->port_id;
+ evt->port_evt.port_id = __le32_to_cpu(hw_evt->port_id);
+
+ if (evt->id == PRESTERA_PORT_EVENT_MAC_STATE_CHANGED) {
+ evt->port_evt.data.mac.oper = hw_evt->param.mac.oper;
+ evt->port_evt.data.mac.mode =
+ __le32_to_cpu(hw_evt->param.mac.mode);
+ evt->port_evt.data.mac.speed =
+ __le32_to_cpu(hw_evt->param.mac.speed);
+ evt->port_evt.data.mac.duplex = hw_evt->param.mac.duplex;
+ evt->port_evt.data.mac.fc = hw_evt->param.mac.fc;
+ evt->port_evt.data.mac.fec = hw_evt->param.mac.fec;
+ } else {
+ return -EINVAL;
+ }
return 0;
}
@@ -535,17 +614,17 @@ static int prestera_fw_parse_fdb_evt(void *msg, struct prestera_event *evt)
switch (hw_evt->dest_type) {
case PRESTERA_HW_FDB_ENTRY_TYPE_REG_PORT:
evt->fdb_evt.type = PRESTERA_FDB_ENTRY_TYPE_REG_PORT;
- evt->fdb_evt.dest.port_id = hw_evt->dest.port_id;
+ evt->fdb_evt.dest.port_id = __le32_to_cpu(hw_evt->dest.port_id);
break;
case PRESTERA_HW_FDB_ENTRY_TYPE_LAG:
evt->fdb_evt.type = PRESTERA_FDB_ENTRY_TYPE_LAG;
- evt->fdb_evt.dest.lag_id = hw_evt->dest.lag_id;
+ evt->fdb_evt.dest.lag_id = __le16_to_cpu(hw_evt->dest.lag_id);
break;
default:
return -EINVAL;
}
- evt->fdb_evt.vid = hw_evt->vid;
+ evt->fdb_evt.vid = __le32_to_cpu(hw_evt->vid);
ether_addr_copy(evt->fdb_evt.data.mac, hw_evt->param.mac);
@@ -597,20 +676,22 @@ static int prestera_evt_recv(struct prestera_device *dev, void *buf, size_t size
struct prestera_msg_event *msg = buf;
struct prestera_fw_event_handler eh;
struct prestera_event evt;
+ u16 msg_type;
int err;
- if (msg->type >= PRESTERA_EVENT_TYPE_MAX)
+ msg_type = __le16_to_cpu(msg->type);
+ if (msg_type >= PRESTERA_EVENT_TYPE_MAX)
return -EINVAL;
- if (!fw_event_parsers[msg->type].func)
+ if (!fw_event_parsers[msg_type].func)
return -ENOENT;
- err = prestera_find_event_handler(sw, msg->type, &eh);
+ err = prestera_find_event_handler(sw, msg_type, &eh);
if (err)
return err;
- evt.id = msg->id;
+ evt.id = __le16_to_cpu(msg->id);
- err = fw_event_parsers[msg->type].func(buf, &evt);
+ err = fw_event_parsers[msg_type].func(buf, &evt);
if (err)
return err;
@@ -635,11 +716,39 @@ static void prestera_pkt_recv(struct prestera_device *dev)
eh.func(sw, &ev, eh.arg);
}
+static u8 prestera_hw_mdix_to_eth(u8 mode)
+{
+ switch (mode) {
+ case PRESTERA_PORT_TP_MDI:
+ return ETH_TP_MDI;
+ case PRESTERA_PORT_TP_MDIX:
+ return ETH_TP_MDI_X;
+ case PRESTERA_PORT_TP_AUTO:
+ return ETH_TP_MDI_AUTO;
+ default:
+ return ETH_TP_MDI_INVALID;
+ }
+}
+
+static u8 prestera_hw_mdix_from_eth(u8 mode)
+{
+ switch (mode) {
+ case ETH_TP_MDI:
+ return PRESTERA_PORT_TP_MDI;
+ case ETH_TP_MDI_X:
+ return PRESTERA_PORT_TP_MDIX;
+ case ETH_TP_MDI_AUTO:
+ return PRESTERA_PORT_TP_AUTO;
+ default:
+ return PRESTERA_PORT_TP_NA;
+ }
+}
+
int prestera_hw_port_info_get(const struct prestera_port *port,
u32 *dev_id, u32 *hw_id, u16 *fp_id)
{
struct prestera_msg_port_info_req req = {
- .port = port->id,
+ .port = __cpu_to_le32(port->id),
};
struct prestera_msg_port_info_resp resp;
int err;
@@ -649,9 +758,9 @@ int prestera_hw_port_info_get(const struct prestera_port *port,
if (err)
return err;
- *dev_id = resp.dev_id;
- *hw_id = resp.hw_id;
- *fp_id = resp.fp_id;
+ *dev_id = __le32_to_cpu(resp.dev_id);
+ *hw_id = __le32_to_cpu(resp.hw_id);
+ *fp_id = __le16_to_cpu(resp.fp_id);
return 0;
}
@@ -659,7 +768,7 @@ int prestera_hw_port_info_get(const struct prestera_port *port,
int prestera_hw_switch_mac_set(struct prestera_switch *sw, const char *mac)
{
struct prestera_msg_switch_attr_req req = {
- .attr = PRESTERA_CMD_SWITCH_ATTR_MAC,
+ .attr = __cpu_to_le32(PRESTERA_CMD_SWITCH_ATTR_MAC),
};
ether_addr_copy(req.param.mac, mac);
@@ -676,6 +785,8 @@ int prestera_hw_switch_init(struct prestera_switch *sw)
INIT_LIST_HEAD(&sw->event_handlers);
+ prestera_hw_build_tests();
+
err = prestera_cmd_ret_wait(sw, PRESTERA_CMD_TYPE_SWITCH_INIT,
&req.cmd, sizeof(req),
&resp.ret, sizeof(resp),
@@ -685,9 +796,9 @@ int prestera_hw_switch_init(struct prestera_switch *sw)
sw->dev->recv_msg = prestera_evt_recv;
sw->dev->recv_pkt = prestera_pkt_recv;
- sw->port_count = resp.port_count;
+ sw->port_count = __le32_to_cpu(resp.port_count);
sw->mtu_min = PRESTERA_MIN_MTU;
- sw->mtu_max = resp.mtu_max;
+ sw->mtu_max = __le32_to_cpu(resp.mtu_max);
sw->id = resp.switch_id;
sw->lag_member_max = resp.lag_member_max;
sw->lag_max = resp.lag_max;
@@ -703,9 +814,9 @@ void prestera_hw_switch_fini(struct prestera_switch *sw)
int prestera_hw_switch_ageing_set(struct prestera_switch *sw, u32 ageing_ms)
{
struct prestera_msg_switch_attr_req req = {
- .attr = PRESTERA_CMD_SWITCH_ATTR_AGEING,
+ .attr = __cpu_to_le32(PRESTERA_CMD_SWITCH_ATTR_AGEING),
.param = {
- .ageing_timeout_ms = ageing_ms,
+ .ageing_timeout_ms = __cpu_to_le32(ageing_ms),
},
};
@@ -713,15 +824,56 @@ int prestera_hw_switch_ageing_set(struct prestera_switch *sw, u32 ageing_ms)
&req.cmd, sizeof(req));
}
-int prestera_hw_port_state_set(const struct prestera_port *port,
- bool admin_state)
+int prestera_hw_port_mac_mode_get(const struct prestera_port *port,
+ u32 *mode, u32 *speed, u8 *duplex, u8 *fec)
+{
+ struct prestera_msg_port_attr_resp resp;
+ struct prestera_msg_port_attr_req req = {
+ .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_MAC_MODE),
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id)
+ };
+ int err;
+
+ err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ if (mode)
+ *mode = __le32_to_cpu(resp.param.link_evt.mac.mode);
+
+ if (speed)
+ *speed = __le32_to_cpu(resp.param.link_evt.mac.speed);
+
+ if (duplex)
+ *duplex = resp.param.link_evt.mac.duplex;
+
+ if (fec)
+ *fec = resp.param.link_evt.mac.fec;
+
+ return err;
+}
+
+int prestera_hw_port_mac_mode_set(const struct prestera_port *port,
+ bool admin, u32 mode, u8 inband,
+ u32 speed, u8 duplex, u8 fec)
{
struct prestera_msg_port_attr_req req = {
- .attr = PRESTERA_CMD_PORT_ATTR_ADMIN_STATE,
- .port = port->hw_id,
- .dev = port->dev_id,
+ .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_MAC_MODE),
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
.param = {
- .admin_state = admin_state,
+ .link = {
+ .mac = {
+ .admin = admin,
+ .reg_mode.mode = __cpu_to_le32(mode),
+ .reg_mode.inband = inband,
+ .reg_mode.speed = __cpu_to_le32(speed),
+ .reg_mode.duplex = duplex,
+ .reg_mode.fec = fec
+ }
+ }
}
};
@@ -729,14 +881,70 @@ int prestera_hw_port_state_set(const struct prestera_port *port,
&req.cmd, sizeof(req));
}
+int prestera_hw_port_phy_mode_get(const struct prestera_port *port,
+ u8 *mdix, u64 *lmode_bmap,
+ bool *fc_pause, bool *fc_asym)
+{
+ struct prestera_msg_port_attr_resp resp;
+ struct prestera_msg_port_attr_req req = {
+ .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_PHY_MODE),
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id)
+ };
+ int err;
+
+ err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET,
+ &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
+ if (err)
+ return err;
+
+ if (mdix)
+ *mdix = prestera_hw_mdix_to_eth(resp.param.link_evt.phy.mdix);
+
+ if (lmode_bmap)
+ *lmode_bmap = __le64_to_cpu(resp.param.link_evt.phy.lmode_bmap);
+
+ if (fc_pause && fc_asym)
+ prestera_hw_remote_fc_to_eth(resp.param.link_evt.phy.fc,
+ fc_pause, fc_asym);
+
+ return err;
+}
+
+int prestera_hw_port_phy_mode_set(const struct prestera_port *port,
+ bool admin, bool adv, u32 mode, u64 modes,
+ u8 mdix)
+{
+ struct prestera_msg_port_attr_req req = {
+ .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_PHY_MODE),
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
+ .param = {
+ .link = {
+ .phy = {
+ .admin = admin,
+ .adv_enable = adv ? 1 : 0,
+ .mode = __cpu_to_le32(mode),
+ .modes = __cpu_to_le64(modes),
+ }
+ }
+ }
+ };
+
+ req.param.link.phy.mdix = prestera_hw_mdix_from_eth(mdix);
+
+ return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
+ &req.cmd, sizeof(req));
+}
+
int prestera_hw_port_mtu_set(const struct prestera_port *port, u32 mtu)
{
struct prestera_msg_port_attr_req req = {
- .attr = PRESTERA_CMD_PORT_ATTR_MTU,
- .port = port->hw_id,
- .dev = port->dev_id,
+ .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_MTU),
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
.param = {
- .mtu = mtu,
+ .mtu = __cpu_to_le32(mtu),
}
};
@@ -747,9 +955,9 @@ int prestera_hw_port_mtu_set(const struct prestera_port *port, u32 mtu)
int prestera_hw_port_mac_set(const struct prestera_port *port, const char *mac)
{
struct prestera_msg_port_attr_req req = {
- .attr = PRESTERA_CMD_PORT_ATTR_MAC,
- .port = port->hw_id,
- .dev = port->dev_id,
+ .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_MAC),
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
};
ether_addr_copy(req.param.mac, mac);
@@ -762,9 +970,9 @@ int prestera_hw_port_accept_frm_type(struct prestera_port *port,
enum prestera_accept_frm_type type)
{
struct prestera_msg_port_attr_req req = {
- .attr = PRESTERA_CMD_PORT_ATTR_ACCEPT_FRAME_TYPE,
- .port = port->hw_id,
- .dev = port->dev_id,
+ .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_ACCEPT_FRAME_TYPE),
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
.param = {
.accept_frm_type = type,
}
@@ -778,9 +986,9 @@ int prestera_hw_port_cap_get(const struct prestera_port *port,
struct prestera_port_caps *caps)
{
struct prestera_msg_port_attr_req req = {
- .attr = PRESTERA_CMD_PORT_ATTR_CAPABILITY,
- .port = port->hw_id,
- .dev = port->dev_id,
+ .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_CAPABILITY),
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
};
struct prestera_msg_port_attr_resp resp;
int err;
@@ -790,7 +998,7 @@ int prestera_hw_port_cap_get(const struct prestera_port *port,
if (err)
return err;
- caps->supp_link_modes = resp.param.cap.link_mode;
+ caps->supp_link_modes = __le64_to_cpu(resp.param.cap.link_mode);
caps->transceiver = resp.param.cap.transceiver;
caps->supp_fec = resp.param.cap.fec;
caps->type = resp.param.cap.type;
@@ -798,44 +1006,9 @@ int prestera_hw_port_cap_get(const struct prestera_port *port,
return err;
}
-int prestera_hw_port_remote_cap_get(const struct prestera_port *port,
- u64 *link_mode_bitmap)
+static void prestera_hw_remote_fc_to_eth(u8 fc, bool *pause, bool *asym_pause)
{
- struct prestera_msg_port_attr_req req = {
- .attr = PRESTERA_CMD_PORT_ATTR_REMOTE_CAPABILITY,
- .port = port->hw_id,
- .dev = port->dev_id,
- };
- struct prestera_msg_port_attr_resp resp;
- int err;
-
- err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET,
- &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
- if (err)
- return err;
-
- *link_mode_bitmap = resp.param.cap.link_mode;
-
- return 0;
-}
-
-int prestera_hw_port_remote_fc_get(const struct prestera_port *port,
- bool *pause, bool *asym_pause)
-{
- struct prestera_msg_port_attr_req req = {
- .attr = PRESTERA_CMD_PORT_ATTR_REMOTE_FC,
- .port = port->hw_id,
- .dev = port->dev_id,
- };
- struct prestera_msg_port_attr_resp resp;
- int err;
-
- err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET,
- &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
- if (err)
- return err;
-
- switch (resp.param.fc) {
+ switch (fc) {
case PRESTERA_FC_SYMMETRIC:
*pause = true;
*asym_pause = false;
@@ -852,8 +1025,6 @@ int prestera_hw_port_remote_fc_get(const struct prestera_port *port,
*pause = false;
*asym_pause = false;
}
-
- return 0;
}
int prestera_hw_acl_ruleset_create(struct prestera_switch *sw, u16 *ruleset_id)
@@ -867,7 +1038,7 @@ int prestera_hw_acl_ruleset_create(struct prestera_switch *sw, u16 *ruleset_id)
if (err)
return err;
- *ruleset_id = resp.id;
+ *ruleset_id = __le16_to_cpu(resp.id);
return 0;
}
@@ -875,7 +1046,7 @@ int prestera_hw_acl_ruleset_create(struct prestera_switch *sw, u16 *ruleset_id)
int prestera_hw_acl_ruleset_del(struct prestera_switch *sw, u16 ruleset_id)
{
struct prestera_msg_acl_ruleset_req req = {
- .id = ruleset_id,
+ .id = __cpu_to_le16(ruleset_id),
};
return prestera_cmd(sw, PRESTERA_CMD_TYPE_ACL_RULESET_DELETE,
@@ -890,7 +1061,7 @@ static int prestera_hw_acl_actions_put(struct prestera_msg_acl_action *action,
int i = 0;
list_for_each_entry(a_entry, a_list, list) {
- action[i].id = a_entry->id;
+ action[i].id = __cpu_to_le32(a_entry->id);
switch (a_entry->id) {
case PRESTERA_ACL_RULE_ACTION_ACCEPT:
@@ -916,7 +1087,7 @@ static int prestera_hw_acl_matches_put(struct prestera_msg_acl_match *match,
int i = 0;
list_for_each_entry(m_entry, m_list, list) {
- match[i].type = m_entry->type;
+ match[i].type = __cpu_to_le32(m_entry->type);
switch (m_entry->type) {
case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ETH_TYPE:
@@ -924,8 +1095,10 @@ static int prestera_hw_acl_matches_put(struct prestera_msg_acl_match *match,
case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_DST:
case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_VLAN_ID:
case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_VLAN_TPID:
- match[i].keymask.u16.key = m_entry->keymask.u16.key;
- match[i].keymask.u16.mask = m_entry->keymask.u16.mask;
+ match[i].keymask.u16.key =
+ __cpu_to_le16(m_entry->keymask.u16.key);
+ match[i].keymask.u16.mask =
+ __cpu_to_le16(m_entry->keymask.u16.mask);
break;
case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ICMP_TYPE:
case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_ICMP_CODE:
@@ -946,12 +1119,16 @@ static int prestera_hw_acl_matches_put(struct prestera_msg_acl_match *match,
case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_IP_DST:
case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_RANGE_SRC:
case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_L4_PORT_RANGE_DST:
- match[i].keymask.u32.key = m_entry->keymask.u32.key;
- match[i].keymask.u32.mask = m_entry->keymask.u32.mask;
+ match[i].keymask.u32.key =
+ __cpu_to_le32(m_entry->keymask.u32.key);
+ match[i].keymask.u32.mask =
+ __cpu_to_le32(m_entry->keymask.u32.mask);
break;
case PRESTERA_ACL_RULE_MATCH_ENTRY_TYPE_PORT:
- match[i].keymask.u64.key = m_entry->keymask.u64.key;
- match[i].keymask.u64.mask = m_entry->keymask.u64.mask;
+ match[i].keymask.u64.key =
+ __cpu_to_le64(m_entry->keymask.u64.key);
+ match[i].keymask.u64.mask =
+ __cpu_to_le64(m_entry->keymask.u64.mask);
break;
default:
return -EINVAL;
@@ -1001,8 +1178,8 @@ int prestera_hw_acl_rule_add(struct prestera_switch *sw,
if (err)
goto free_buff;
- req->ruleset_id = prestera_acl_rule_ruleset_id_get(rule);
- req->priority = prestera_acl_rule_priority_get(rule);
+ req->ruleset_id = __cpu_to_le16(prestera_acl_rule_ruleset_id_get(rule));
+ req->priority = __cpu_to_le32(prestera_acl_rule_priority_get(rule));
req->n_actions = prestera_acl_rule_action_len(rule);
req->n_matches = prestera_acl_rule_match_len(rule);
@@ -1011,7 +1188,7 @@ int prestera_hw_acl_rule_add(struct prestera_switch *sw,
if (err)
goto free_buff;
- *rule_id = resp.id;
+ *rule_id = __le32_to_cpu(resp.id);
free_buff:
kfree(buff);
return err;
@@ -1020,7 +1197,7 @@ free_buff:
int prestera_hw_acl_rule_del(struct prestera_switch *sw, u32 rule_id)
{
struct prestera_msg_acl_rule_req req = {
- .id = rule_id
+ .id = __cpu_to_le32(rule_id)
};
return prestera_cmd(sw, PRESTERA_CMD_TYPE_ACL_RULE_DELETE,
@@ -1032,7 +1209,7 @@ int prestera_hw_acl_rule_stats_get(struct prestera_switch *sw, u32 rule_id,
{
struct prestera_msg_acl_rule_stats_resp resp;
struct prestera_msg_acl_rule_req req = {
- .id = rule_id
+ .id = __cpu_to_le32(rule_id)
};
int err;
@@ -1041,8 +1218,8 @@ int prestera_hw_acl_rule_stats_get(struct prestera_switch *sw, u32 rule_id,
if (err)
return err;
- *packets = resp.packets;
- *bytes = resp.bytes;
+ *packets = __le64_to_cpu(resp.packets);
+ *bytes = __le64_to_cpu(resp.bytes);
return 0;
}
@@ -1050,9 +1227,9 @@ int prestera_hw_acl_rule_stats_get(struct prestera_switch *sw, u32 rule_id,
int prestera_hw_acl_port_bind(const struct prestera_port *port, u16 ruleset_id)
{
struct prestera_msg_acl_ruleset_bind_req req = {
- .port = port->hw_id,
- .dev = port->dev_id,
- .ruleset_id = ruleset_id,
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
+ .ruleset_id = __cpu_to_le16(ruleset_id),
};
return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_ACL_PORT_BIND,
@@ -1063,9 +1240,9 @@ int prestera_hw_acl_port_unbind(const struct prestera_port *port,
u16 ruleset_id)
{
struct prestera_msg_acl_ruleset_bind_req req = {
- .port = port->hw_id,
- .dev = port->dev_id,
- .ruleset_id = ruleset_id,
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
+ .ruleset_id = __cpu_to_le16(ruleset_id),
};
return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_ACL_PORT_UNBIND,
@@ -1076,8 +1253,8 @@ int prestera_hw_span_get(const struct prestera_port *port, u8 *span_id)
{
struct prestera_msg_span_resp resp;
struct prestera_msg_span_req req = {
- .port = port->hw_id,
- .dev = port->dev_id,
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
};
int err;
@@ -1094,8 +1271,8 @@ int prestera_hw_span_get(const struct prestera_port *port, u8 *span_id)
int prestera_hw_span_bind(const struct prestera_port *port, u8 span_id)
{
struct prestera_msg_span_req req = {
- .port = port->hw_id,
- .dev = port->dev_id,
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
.id = span_id,
};
@@ -1106,8 +1283,8 @@ int prestera_hw_span_bind(const struct prestera_port *port, u8 span_id)
int prestera_hw_span_unbind(const struct prestera_port *port)
{
struct prestera_msg_span_req req = {
- .port = port->hw_id,
- .dev = port->dev_id,
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
};
return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_SPAN_UNBIND,
@@ -1127,9 +1304,9 @@ int prestera_hw_span_release(struct prestera_switch *sw, u8 span_id)
int prestera_hw_port_type_get(const struct prestera_port *port, u8 *type)
{
struct prestera_msg_port_attr_req req = {
- .attr = PRESTERA_CMD_PORT_ATTR_TYPE,
- .port = port->hw_id,
- .dev = port->dev_id,
+ .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_TYPE),
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
};
struct prestera_msg_port_attr_resp resp;
int err;
@@ -1144,146 +1321,12 @@ int prestera_hw_port_type_get(const struct prestera_port *port, u8 *type)
return 0;
}
-int prestera_hw_port_fec_get(const struct prestera_port *port, u8 *fec)
-{
- struct prestera_msg_port_attr_req req = {
- .attr = PRESTERA_CMD_PORT_ATTR_FEC,
- .port = port->hw_id,
- .dev = port->dev_id,
- };
- struct prestera_msg_port_attr_resp resp;
- int err;
-
- err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET,
- &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
- if (err)
- return err;
-
- *fec = resp.param.fec;
-
- return 0;
-}
-
-int prestera_hw_port_fec_set(const struct prestera_port *port, u8 fec)
-{
- struct prestera_msg_port_attr_req req = {
- .attr = PRESTERA_CMD_PORT_ATTR_FEC,
- .port = port->hw_id,
- .dev = port->dev_id,
- .param = {
- .fec = fec,
- }
- };
-
- return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
- &req.cmd, sizeof(req));
-}
-
-static u8 prestera_hw_mdix_to_eth(u8 mode)
-{
- switch (mode) {
- case PRESTERA_PORT_TP_MDI:
- return ETH_TP_MDI;
- case PRESTERA_PORT_TP_MDIX:
- return ETH_TP_MDI_X;
- case PRESTERA_PORT_TP_AUTO:
- return ETH_TP_MDI_AUTO;
- default:
- return ETH_TP_MDI_INVALID;
- }
-}
-
-static u8 prestera_hw_mdix_from_eth(u8 mode)
-{
- switch (mode) {
- case ETH_TP_MDI:
- return PRESTERA_PORT_TP_MDI;
- case ETH_TP_MDI_X:
- return PRESTERA_PORT_TP_MDIX;
- case ETH_TP_MDI_AUTO:
- return PRESTERA_PORT_TP_AUTO;
- default:
- return PRESTERA_PORT_TP_NA;
- }
-}
-
-int prestera_hw_port_mdix_get(const struct prestera_port *port, u8 *status,
- u8 *admin_mode)
-{
- struct prestera_msg_port_attr_req req = {
- .attr = PRESTERA_CMD_PORT_ATTR_MDIX,
- .port = port->hw_id,
- .dev = port->dev_id,
- };
- struct prestera_msg_port_attr_resp resp;
- int err;
-
- err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET,
- &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
- if (err)
- return err;
-
- *status = prestera_hw_mdix_to_eth(resp.param.mdix.status);
- *admin_mode = prestera_hw_mdix_to_eth(resp.param.mdix.admin_mode);
-
- return 0;
-}
-
-int prestera_hw_port_mdix_set(const struct prestera_port *port, u8 mode)
-{
- struct prestera_msg_port_attr_req req = {
- .attr = PRESTERA_CMD_PORT_ATTR_MDIX,
- .port = port->hw_id,
- .dev = port->dev_id,
- };
-
- req.param.mdix.admin_mode = prestera_hw_mdix_from_eth(mode);
-
- return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
- &req.cmd, sizeof(req));
-}
-
-int prestera_hw_port_link_mode_set(const struct prestera_port *port, u32 mode)
-{
- struct prestera_msg_port_attr_req req = {
- .attr = PRESTERA_CMD_PORT_ATTR_LINK_MODE,
- .port = port->hw_id,
- .dev = port->dev_id,
- .param = {
- .link_mode = mode,
- }
- };
-
- return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
- &req.cmd, sizeof(req));
-}
-
-int prestera_hw_port_link_mode_get(const struct prestera_port *port, u32 *mode)
-{
- struct prestera_msg_port_attr_req req = {
- .attr = PRESTERA_CMD_PORT_ATTR_LINK_MODE,
- .port = port->hw_id,
- .dev = port->dev_id,
- };
- struct prestera_msg_port_attr_resp resp;
- int err;
-
- err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET,
- &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
- if (err)
- return err;
-
- *mode = resp.param.link_mode;
-
- return 0;
-}
-
int prestera_hw_port_speed_get(const struct prestera_port *port, u32 *speed)
{
struct prestera_msg_port_attr_req req = {
- .attr = PRESTERA_CMD_PORT_ATTR_SPEED,
- .port = port->hw_id,
- .dev = port->dev_id,
+ .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_SPEED),
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
};
struct prestera_msg_port_attr_resp resp;
int err;
@@ -1293,73 +1336,33 @@ int prestera_hw_port_speed_get(const struct prestera_port *port, u32 *speed)
if (err)
return err;
- *speed = resp.param.speed;
+ *speed = __le32_to_cpu(resp.param.speed);
return 0;
}
-int prestera_hw_port_autoneg_set(const struct prestera_port *port,
- bool autoneg, u64 link_modes, u8 fec)
-{
- struct prestera_msg_port_attr_req req = {
- .attr = PRESTERA_CMD_PORT_ATTR_AUTONEG,
- .port = port->hw_id,
- .dev = port->dev_id,
- .param = {
- .autoneg = {
- .link_mode = link_modes,
- .enable = autoneg,
- .fec = fec,
- }
- }
- };
-
- return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
- &req.cmd, sizeof(req));
-}
-
int prestera_hw_port_autoneg_restart(struct prestera_port *port)
{
struct prestera_msg_port_attr_req req = {
- .attr = PRESTERA_CMD_PORT_ATTR_AUTONEG_RESTART,
- .port = port->hw_id,
- .dev = port->dev_id,
+ .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_PHY_AUTONEG_RESTART),
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
};
return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_SET,
&req.cmd, sizeof(req));
}
-int prestera_hw_port_duplex_get(const struct prestera_port *port, u8 *duplex)
-{
- struct prestera_msg_port_attr_req req = {
- .attr = PRESTERA_CMD_PORT_ATTR_DUPLEX,
- .port = port->hw_id,
- .dev = port->dev_id,
- };
- struct prestera_msg_port_attr_resp resp;
- int err;
-
- err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET,
- &req.cmd, sizeof(req), &resp.ret, sizeof(resp));
- if (err)
- return err;
-
- *duplex = resp.param.duplex;
-
- return 0;
-}
-
int prestera_hw_port_stats_get(const struct prestera_port *port,
struct prestera_port_stats *st)
{
struct prestera_msg_port_attr_req req = {
- .attr = PRESTERA_CMD_PORT_ATTR_STATS,
- .port = port->hw_id,
- .dev = port->dev_id,
+ .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_STATS),
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
};
struct prestera_msg_port_stats_resp resp;
- u64 *hw = resp.stats;
+ __le64 *hw = resp.stats;
int err;
err = prestera_cmd_ret(port->sw, PRESTERA_CMD_TYPE_PORT_ATTR_GET,
@@ -1367,36 +1370,56 @@ int prestera_hw_port_stats_get(const struct prestera_port *port,
if (err)
return err;
- st->good_octets_received = hw[PRESTERA_PORT_GOOD_OCTETS_RCV_CNT];
- st->bad_octets_received = hw[PRESTERA_PORT_BAD_OCTETS_RCV_CNT];
- st->mac_trans_error = hw[PRESTERA_PORT_MAC_TRANSMIT_ERR_CNT];
- st->broadcast_frames_received = hw[PRESTERA_PORT_BRDC_PKTS_RCV_CNT];
- st->multicast_frames_received = hw[PRESTERA_PORT_MC_PKTS_RCV_CNT];
- st->frames_64_octets = hw[PRESTERA_PORT_PKTS_64L_CNT];
- st->frames_65_to_127_octets = hw[PRESTERA_PORT_PKTS_65TO127L_CNT];
- st->frames_128_to_255_octets = hw[PRESTERA_PORT_PKTS_128TO255L_CNT];
- st->frames_256_to_511_octets = hw[PRESTERA_PORT_PKTS_256TO511L_CNT];
- st->frames_512_to_1023_octets = hw[PRESTERA_PORT_PKTS_512TO1023L_CNT];
- st->frames_1024_to_max_octets = hw[PRESTERA_PORT_PKTS_1024TOMAXL_CNT];
- st->excessive_collision = hw[PRESTERA_PORT_EXCESSIVE_COLLISIONS_CNT];
- st->multicast_frames_sent = hw[PRESTERA_PORT_MC_PKTS_SENT_CNT];
- st->broadcast_frames_sent = hw[PRESTERA_PORT_BRDC_PKTS_SENT_CNT];
- st->fc_sent = hw[PRESTERA_PORT_FC_SENT_CNT];
- st->fc_received = hw[PRESTERA_PORT_GOOD_FC_RCV_CNT];
- st->buffer_overrun = hw[PRESTERA_PORT_DROP_EVENTS_CNT];
- st->undersize = hw[PRESTERA_PORT_UNDERSIZE_PKTS_CNT];
- st->fragments = hw[PRESTERA_PORT_FRAGMENTS_PKTS_CNT];
- st->oversize = hw[PRESTERA_PORT_OVERSIZE_PKTS_CNT];
- st->jabber = hw[PRESTERA_PORT_JABBER_PKTS_CNT];
- st->rx_error_frame_received = hw[PRESTERA_PORT_MAC_RCV_ERROR_CNT];
- st->bad_crc = hw[PRESTERA_PORT_BAD_CRC_CNT];
- st->collisions = hw[PRESTERA_PORT_COLLISIONS_CNT];
- st->late_collision = hw[PRESTERA_PORT_LATE_COLLISIONS_CNT];
- st->unicast_frames_received = hw[PRESTERA_PORT_GOOD_UC_PKTS_RCV_CNT];
- st->unicast_frames_sent = hw[PRESTERA_PORT_GOOD_UC_PKTS_SENT_CNT];
- st->sent_multiple = hw[PRESTERA_PORT_MULTIPLE_PKTS_SENT_CNT];
- st->sent_deferred = hw[PRESTERA_PORT_DEFERRED_PKTS_SENT_CNT];
- st->good_octets_sent = hw[PRESTERA_PORT_GOOD_OCTETS_SENT_CNT];
+ st->good_octets_received =
+ __le64_to_cpu(hw[PRESTERA_PORT_GOOD_OCTETS_RCV_CNT]);
+ st->bad_octets_received =
+ __le64_to_cpu(hw[PRESTERA_PORT_BAD_OCTETS_RCV_CNT]);
+ st->mac_trans_error =
+ __le64_to_cpu(hw[PRESTERA_PORT_MAC_TRANSMIT_ERR_CNT]);
+ st->broadcast_frames_received =
+ __le64_to_cpu(hw[PRESTERA_PORT_BRDC_PKTS_RCV_CNT]);
+ st->multicast_frames_received =
+ __le64_to_cpu(hw[PRESTERA_PORT_MC_PKTS_RCV_CNT]);
+ st->frames_64_octets = __le64_to_cpu(hw[PRESTERA_PORT_PKTS_64L_CNT]);
+ st->frames_65_to_127_octets =
+ __le64_to_cpu(hw[PRESTERA_PORT_PKTS_65TO127L_CNT]);
+ st->frames_128_to_255_octets =
+ __le64_to_cpu(hw[PRESTERA_PORT_PKTS_128TO255L_CNT]);
+ st->frames_256_to_511_octets =
+ __le64_to_cpu(hw[PRESTERA_PORT_PKTS_256TO511L_CNT]);
+ st->frames_512_to_1023_octets =
+ __le64_to_cpu(hw[PRESTERA_PORT_PKTS_512TO1023L_CNT]);
+ st->frames_1024_to_max_octets =
+ __le64_to_cpu(hw[PRESTERA_PORT_PKTS_1024TOMAXL_CNT]);
+ st->excessive_collision =
+ __le64_to_cpu(hw[PRESTERA_PORT_EXCESSIVE_COLLISIONS_CNT]);
+ st->multicast_frames_sent =
+ __le64_to_cpu(hw[PRESTERA_PORT_MC_PKTS_SENT_CNT]);
+ st->broadcast_frames_sent =
+ __le64_to_cpu(hw[PRESTERA_PORT_BRDC_PKTS_SENT_CNT]);
+ st->fc_sent = __le64_to_cpu(hw[PRESTERA_PORT_FC_SENT_CNT]);
+ st->fc_received = __le64_to_cpu(hw[PRESTERA_PORT_GOOD_FC_RCV_CNT]);
+ st->buffer_overrun = __le64_to_cpu(hw[PRESTERA_PORT_DROP_EVENTS_CNT]);
+ st->undersize = __le64_to_cpu(hw[PRESTERA_PORT_UNDERSIZE_PKTS_CNT]);
+ st->fragments = __le64_to_cpu(hw[PRESTERA_PORT_FRAGMENTS_PKTS_CNT]);
+ st->oversize = __le64_to_cpu(hw[PRESTERA_PORT_OVERSIZE_PKTS_CNT]);
+ st->jabber = __le64_to_cpu(hw[PRESTERA_PORT_JABBER_PKTS_CNT]);
+ st->rx_error_frame_received =
+ __le64_to_cpu(hw[PRESTERA_PORT_MAC_RCV_ERROR_CNT]);
+ st->bad_crc = __le64_to_cpu(hw[PRESTERA_PORT_BAD_CRC_CNT]);
+ st->collisions = __le64_to_cpu(hw[PRESTERA_PORT_COLLISIONS_CNT]);
+ st->late_collision =
+ __le64_to_cpu(hw[PRESTERA_PORT_LATE_COLLISIONS_CNT]);
+ st->unicast_frames_received =
+ __le64_to_cpu(hw[PRESTERA_PORT_GOOD_UC_PKTS_RCV_CNT]);
+ st->unicast_frames_sent =
+ __le64_to_cpu(hw[PRESTERA_PORT_GOOD_UC_PKTS_SENT_CNT]);
+ st->sent_multiple =
+ __le64_to_cpu(hw[PRESTERA_PORT_MULTIPLE_PKTS_SENT_CNT]);
+ st->sent_deferred =
+ __le64_to_cpu(hw[PRESTERA_PORT_DEFERRED_PKTS_SENT_CNT]);
+ st->good_octets_sent =
+ __le64_to_cpu(hw[PRESTERA_PORT_GOOD_OCTETS_SENT_CNT]);
return 0;
}
@@ -1404,9 +1427,9 @@ int prestera_hw_port_stats_get(const struct prestera_port *port,
int prestera_hw_port_learning_set(struct prestera_port *port, bool enable)
{
struct prestera_msg_port_attr_req req = {
- .attr = PRESTERA_CMD_PORT_ATTR_LEARNING,
- .port = port->hw_id,
- .dev = port->dev_id,
+ .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_LEARNING),
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
.param = {
.learning = enable,
}
@@ -1419,9 +1442,9 @@ int prestera_hw_port_learning_set(struct prestera_port *port, bool enable)
static int prestera_hw_port_uc_flood_set(struct prestera_port *port, bool flood)
{
struct prestera_msg_port_attr_req req = {
- .attr = PRESTERA_CMD_PORT_ATTR_FLOOD,
- .port = port->hw_id,
- .dev = port->dev_id,
+ .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_FLOOD),
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
.param = {
.flood_ext = {
.type = PRESTERA_PORT_FLOOD_TYPE_UC,
@@ -1437,9 +1460,9 @@ static int prestera_hw_port_uc_flood_set(struct prestera_port *port, bool flood)
static int prestera_hw_port_mc_flood_set(struct prestera_port *port, bool flood)
{
struct prestera_msg_port_attr_req req = {
- .attr = PRESTERA_CMD_PORT_ATTR_FLOOD,
- .port = port->hw_id,
- .dev = port->dev_id,
+ .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_FLOOD),
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
.param = {
.flood_ext = {
.type = PRESTERA_PORT_FLOOD_TYPE_MC,
@@ -1455,9 +1478,9 @@ static int prestera_hw_port_mc_flood_set(struct prestera_port *port, bool flood)
static int prestera_hw_port_flood_set_v2(struct prestera_port *port, bool flood)
{
struct prestera_msg_port_attr_req req = {
- .attr = PRESTERA_CMD_PORT_ATTR_FLOOD,
- .port = port->hw_id,
- .dev = port->dev_id,
+ .attr = __cpu_to_le32(PRESTERA_CMD_PORT_ATTR_FLOOD),
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
.param = {
.flood = flood,
}
@@ -1505,7 +1528,7 @@ err_uc_flood:
int prestera_hw_vlan_create(struct prestera_switch *sw, u16 vid)
{
struct prestera_msg_vlan_req req = {
- .vid = vid,
+ .vid = __cpu_to_le16(vid),
};
return prestera_cmd(sw, PRESTERA_CMD_TYPE_VLAN_CREATE,
@@ -1515,7 +1538,7 @@ int prestera_hw_vlan_create(struct prestera_switch *sw, u16 vid)
int prestera_hw_vlan_delete(struct prestera_switch *sw, u16 vid)
{
struct prestera_msg_vlan_req req = {
- .vid = vid,
+ .vid = __cpu_to_le16(vid),
};
return prestera_cmd(sw, PRESTERA_CMD_TYPE_VLAN_DELETE,
@@ -1526,9 +1549,9 @@ int prestera_hw_vlan_port_set(struct prestera_port *port, u16 vid,
bool is_member, bool untagged)
{
struct prestera_msg_vlan_req req = {
- .port = port->hw_id,
- .dev = port->dev_id,
- .vid = vid,
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
+ .vid = __cpu_to_le16(vid),
.is_member = is_member,
.is_tagged = !untagged,
};
@@ -1540,9 +1563,9 @@ int prestera_hw_vlan_port_set(struct prestera_port *port, u16 vid,
int prestera_hw_vlan_port_vid_set(struct prestera_port *port, u16 vid)
{
struct prestera_msg_vlan_req req = {
- .port = port->hw_id,
- .dev = port->dev_id,
- .vid = vid,
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
+ .vid = __cpu_to_le16(vid),
};
return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_VLAN_PVID_SET,
@@ -1552,9 +1575,9 @@ int prestera_hw_vlan_port_vid_set(struct prestera_port *port, u16 vid)
int prestera_hw_vlan_port_stp_set(struct prestera_port *port, u16 vid, u8 state)
{
struct prestera_msg_stp_req req = {
- .port = port->hw_id,
- .dev = port->dev_id,
- .vid = vid,
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
+ .vid = __cpu_to_le16(vid),
.state = state,
};
@@ -1567,10 +1590,10 @@ int prestera_hw_fdb_add(struct prestera_port *port, const unsigned char *mac,
{
struct prestera_msg_fdb_req req = {
.dest = {
- .dev = port->dev_id,
- .port = port->hw_id,
+ .dev = __cpu_to_le32(port->dev_id),
+ .port = __cpu_to_le32(port->hw_id),
},
- .vid = vid,
+ .vid = __cpu_to_le16(vid),
.dynamic = dynamic,
};
@@ -1585,10 +1608,10 @@ int prestera_hw_fdb_del(struct prestera_port *port, const unsigned char *mac,
{
struct prestera_msg_fdb_req req = {
.dest = {
- .dev = port->dev_id,
- .port = port->hw_id,
+ .dev = __cpu_to_le32(port->dev_id),
+ .port = __cpu_to_le32(port->hw_id),
},
- .vid = vid,
+ .vid = __cpu_to_le16(vid),
};
ether_addr_copy(req.mac, mac);
@@ -1603,9 +1626,9 @@ int prestera_hw_lag_fdb_add(struct prestera_switch *sw, u16 lag_id,
struct prestera_msg_fdb_req req = {
.dest_type = PRESTERA_HW_FDB_ENTRY_TYPE_LAG,
.dest = {
- .lag_id = lag_id,
+ .lag_id = __cpu_to_le16(lag_id),
},
- .vid = vid,
+ .vid = __cpu_to_le16(vid),
.dynamic = dynamic,
};
@@ -1621,9 +1644,9 @@ int prestera_hw_lag_fdb_del(struct prestera_switch *sw, u16 lag_id,
struct prestera_msg_fdb_req req = {
.dest_type = PRESTERA_HW_FDB_ENTRY_TYPE_LAG,
.dest = {
- .lag_id = lag_id,
+ .lag_id = __cpu_to_le16(lag_id),
},
- .vid = vid,
+ .vid = __cpu_to_le16(vid),
};
ether_addr_copy(req.mac, mac);
@@ -1636,10 +1659,10 @@ int prestera_hw_fdb_flush_port(struct prestera_port *port, u32 mode)
{
struct prestera_msg_fdb_req req = {
.dest = {
- .dev = port->dev_id,
- .port = port->hw_id,
+ .dev = __cpu_to_le32(port->dev_id),
+ .port = __cpu_to_le32(port->hw_id),
},
- .flush_mode = mode,
+ .flush_mode = __cpu_to_le32(mode),
};
return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_FDB_FLUSH_PORT,
@@ -1649,8 +1672,8 @@ int prestera_hw_fdb_flush_port(struct prestera_port *port, u32 mode)
int prestera_hw_fdb_flush_vlan(struct prestera_switch *sw, u16 vid, u32 mode)
{
struct prestera_msg_fdb_req req = {
- .vid = vid,
- .flush_mode = mode,
+ .vid = __cpu_to_le16(vid),
+ .flush_mode = __cpu_to_le32(mode),
};
return prestera_cmd(sw, PRESTERA_CMD_TYPE_FDB_FLUSH_VLAN,
@@ -1662,11 +1685,11 @@ int prestera_hw_fdb_flush_port_vlan(struct prestera_port *port, u16 vid,
{
struct prestera_msg_fdb_req req = {
.dest = {
- .dev = port->dev_id,
- .port = port->hw_id,
+ .dev = __cpu_to_le32(port->dev_id),
+ .port = __cpu_to_le32(port->hw_id),
},
- .vid = vid,
- .flush_mode = mode,
+ .vid = __cpu_to_le16(vid),
+ .flush_mode = __cpu_to_le32(mode),
};
return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_FDB_FLUSH_PORT_VLAN,
@@ -1679,9 +1702,9 @@ int prestera_hw_fdb_flush_lag(struct prestera_switch *sw, u16 lag_id,
struct prestera_msg_fdb_req req = {
.dest_type = PRESTERA_HW_FDB_ENTRY_TYPE_LAG,
.dest = {
- .lag_id = lag_id,
+ .lag_id = __cpu_to_le16(lag_id),
},
- .flush_mode = mode,
+ .flush_mode = __cpu_to_le32(mode),
};
return prestera_cmd(sw, PRESTERA_CMD_TYPE_FDB_FLUSH_PORT,
@@ -1694,10 +1717,10 @@ int prestera_hw_fdb_flush_lag_vlan(struct prestera_switch *sw,
struct prestera_msg_fdb_req req = {
.dest_type = PRESTERA_HW_FDB_ENTRY_TYPE_LAG,
.dest = {
- .lag_id = lag_id,
+ .lag_id = __cpu_to_le16(lag_id),
},
- .vid = vid,
- .flush_mode = mode,
+ .vid = __cpu_to_le16(vid),
+ .flush_mode = __cpu_to_le32(mode),
};
return prestera_cmd(sw, PRESTERA_CMD_TYPE_FDB_FLUSH_PORT_VLAN,
@@ -1716,7 +1739,7 @@ int prestera_hw_bridge_create(struct prestera_switch *sw, u16 *bridge_id)
if (err)
return err;
- *bridge_id = resp.bridge;
+ *bridge_id = __le16_to_cpu(resp.bridge);
return 0;
}
@@ -1724,7 +1747,7 @@ int prestera_hw_bridge_create(struct prestera_switch *sw, u16 *bridge_id)
int prestera_hw_bridge_delete(struct prestera_switch *sw, u16 bridge_id)
{
struct prestera_msg_bridge_req req = {
- .bridge = bridge_id,
+ .bridge = __cpu_to_le16(bridge_id),
};
return prestera_cmd(sw, PRESTERA_CMD_TYPE_BRIDGE_DELETE,
@@ -1734,9 +1757,9 @@ int prestera_hw_bridge_delete(struct prestera_switch *sw, u16 bridge_id)
int prestera_hw_bridge_port_add(struct prestera_port *port, u16 bridge_id)
{
struct prestera_msg_bridge_req req = {
- .bridge = bridge_id,
- .port = port->hw_id,
- .dev = port->dev_id,
+ .bridge = __cpu_to_le16(bridge_id),
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
};
return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_BRIDGE_PORT_ADD,
@@ -1746,9 +1769,9 @@ int prestera_hw_bridge_port_add(struct prestera_port *port, u16 bridge_id)
int prestera_hw_bridge_port_delete(struct prestera_port *port, u16 bridge_id)
{
struct prestera_msg_bridge_req req = {
- .bridge = bridge_id,
- .port = port->hw_id,
- .dev = port->dev_id,
+ .bridge = __cpu_to_le16(bridge_id),
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
};
return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_BRIDGE_PORT_DELETE,
@@ -1769,28 +1792,17 @@ int prestera_hw_rxtx_init(struct prestera_switch *sw,
if (err)
return err;
- params->map_addr = resp.map_addr;
+ params->map_addr = __le32_to_cpu(resp.map_addr);
return 0;
}
-int prestera_hw_rxtx_port_init(struct prestera_port *port)
-{
- struct prestera_msg_rxtx_port_req req = {
- .port = port->hw_id,
- .dev = port->dev_id,
- };
-
- return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_RXTX_PORT_INIT,
- &req.cmd, sizeof(req));
-}
-
int prestera_hw_lag_member_add(struct prestera_port *port, u16 lag_id)
{
struct prestera_msg_lag_req req = {
- .port = port->hw_id,
- .dev = port->dev_id,
- .lag_id = lag_id,
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
+ .lag_id = __cpu_to_le16(lag_id),
};
return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_LAG_MEMBER_ADD,
@@ -1800,9 +1812,9 @@ int prestera_hw_lag_member_add(struct prestera_port *port, u16 lag_id)
int prestera_hw_lag_member_del(struct prestera_port *port, u16 lag_id)
{
struct prestera_msg_lag_req req = {
- .port = port->hw_id,
- .dev = port->dev_id,
- .lag_id = lag_id,
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
+ .lag_id = __cpu_to_le16(lag_id),
};
return prestera_cmd(port->sw, PRESTERA_CMD_TYPE_LAG_MEMBER_DELETE,
@@ -1813,9 +1825,9 @@ int prestera_hw_lag_member_enable(struct prestera_port *port, u16 lag_id,
bool enable)
{
struct prestera_msg_lag_req req = {
- .port = port->hw_id,
- .dev = port->dev_id,
- .lag_id = lag_id,
+ .port = __cpu_to_le32(port->hw_id),
+ .dev = __cpu_to_le32(port->dev_id),
+ .lag_id = __cpu_to_le16(lag_id),
};
u32 cmd;
@@ -1842,7 +1854,7 @@ prestera_hw_cpu_code_counters_get(struct prestera_switch *sw, u8 code,
if (err)
return err;
- *packet_count = resp.packet_count;
+ *packet_count = __le64_to_cpu(resp.packet_count);
return 0;
}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_hw.h b/drivers/net/ethernet/marvell/prestera/prestera_hw.h
index 546d5fd8240d..57a3c2e5b112 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_hw.h
+++ b/drivers/net/ethernet/marvell/prestera/prestera_hw.h
@@ -20,6 +20,23 @@ enum prestera_fdb_flush_mode {
};
enum {
+ PRESTERA_MAC_MODE_INTERNAL,
+ PRESTERA_MAC_MODE_SGMII,
+ PRESTERA_MAC_MODE_1000BASE_X,
+ PRESTERA_MAC_MODE_KR,
+ PRESTERA_MAC_MODE_KR2,
+ PRESTERA_MAC_MODE_KR4,
+ PRESTERA_MAC_MODE_CR,
+ PRESTERA_MAC_MODE_CR2,
+ PRESTERA_MAC_MODE_CR4,
+ PRESTERA_MAC_MODE_SR_LR,
+ PRESTERA_MAC_MODE_SR_LR2,
+ PRESTERA_MAC_MODE_SR_LR4,
+
+ PRESTERA_MAC_MODE_MAX
+};
+
+enum {
PRESTERA_LINK_MODE_10baseT_Half,
PRESTERA_LINK_MODE_10baseT_Full,
PRESTERA_LINK_MODE_100baseT_Half,
@@ -116,32 +133,29 @@ int prestera_hw_switch_mac_set(struct prestera_switch *sw, const char *mac);
/* Port API */
int prestera_hw_port_info_get(const struct prestera_port *port,
u32 *dev_id, u32 *hw_id, u16 *fp_id);
-int prestera_hw_port_state_set(const struct prestera_port *port,
- bool admin_state);
+
+int prestera_hw_port_mac_mode_get(const struct prestera_port *port,
+ u32 *mode, u32 *speed, u8 *duplex, u8 *fec);
+int prestera_hw_port_mac_mode_set(const struct prestera_port *port,
+ bool admin, u32 mode, u8 inband,
+ u32 speed, u8 duplex, u8 fec);
+int prestera_hw_port_phy_mode_get(const struct prestera_port *port,
+ u8 *mdix, u64 *lmode_bmap,
+ bool *fc_pause, bool *fc_asym);
+int prestera_hw_port_phy_mode_set(const struct prestera_port *port,
+ bool admin, bool adv, u32 mode, u64 modes,
+ u8 mdix);
+
int prestera_hw_port_mtu_set(const struct prestera_port *port, u32 mtu);
int prestera_hw_port_mtu_get(const struct prestera_port *port, u32 *mtu);
int prestera_hw_port_mac_set(const struct prestera_port *port, const char *mac);
int prestera_hw_port_mac_get(const struct prestera_port *port, char *mac);
int prestera_hw_port_cap_get(const struct prestera_port *port,
struct prestera_port_caps *caps);
-int prestera_hw_port_remote_cap_get(const struct prestera_port *port,
- u64 *link_mode_bitmap);
-int prestera_hw_port_remote_fc_get(const struct prestera_port *port,
- bool *pause, bool *asym_pause);
int prestera_hw_port_type_get(const struct prestera_port *port, u8 *type);
-int prestera_hw_port_fec_get(const struct prestera_port *port, u8 *fec);
-int prestera_hw_port_fec_set(const struct prestera_port *port, u8 fec);
-int prestera_hw_port_autoneg_set(const struct prestera_port *port,
- bool autoneg, u64 link_modes, u8 fec);
int prestera_hw_port_autoneg_restart(struct prestera_port *port);
-int prestera_hw_port_duplex_get(const struct prestera_port *port, u8 *duplex);
int prestera_hw_port_stats_get(const struct prestera_port *port,
struct prestera_port_stats *stats);
-int prestera_hw_port_link_mode_set(const struct prestera_port *port, u32 mode);
-int prestera_hw_port_link_mode_get(const struct prestera_port *port, u32 *mode);
-int prestera_hw_port_mdix_get(const struct prestera_port *port, u8 *status,
- u8 *admin_mode);
-int prestera_hw_port_mdix_set(const struct prestera_port *port, u8 mode);
int prestera_hw_port_speed_get(const struct prestera_port *port, u32 *speed);
int prestera_hw_port_learning_set(struct prestera_port *port, bool enable);
int prestera_hw_port_flood_set(struct prestera_port *port, unsigned long mask,
@@ -206,7 +220,6 @@ void prestera_hw_event_handler_unregister(struct prestera_switch *sw,
/* RX/TX */
int prestera_hw_rxtx_init(struct prestera_switch *sw,
struct prestera_rxtx_params *params);
-int prestera_hw_rxtx_port_init(struct prestera_port *port);
/* LAG API */
int prestera_hw_lag_member_add(struct prestera_port *port, u16 lag_id);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c
index 44c670807fb3..625b40149fac 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_main.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c
@@ -80,27 +80,76 @@ struct prestera_port *prestera_find_port(struct prestera_switch *sw, u32 id)
return port;
}
-static int prestera_port_open(struct net_device *dev)
+int prestera_port_cfg_mac_read(struct prestera_port *port,
+ struct prestera_port_mac_config *cfg)
+{
+ *cfg = port->cfg_mac;
+ return 0;
+}
+
+int prestera_port_cfg_mac_write(struct prestera_port *port,
+ struct prestera_port_mac_config *cfg)
{
- struct prestera_port *port = netdev_priv(dev);
int err;
- err = prestera_hw_port_state_set(port, true);
+ err = prestera_hw_port_mac_mode_set(port, cfg->admin,
+ cfg->mode, cfg->inband, cfg->speed,
+ cfg->duplex, cfg->fec);
if (err)
return err;
+ port->cfg_mac = *cfg;
+ return 0;
+}
+
+static int prestera_port_open(struct net_device *dev)
+{
+ struct prestera_port *port = netdev_priv(dev);
+ struct prestera_port_mac_config cfg_mac;
+ int err = 0;
+
+ if (port->caps.transceiver == PRESTERA_PORT_TCVR_SFP) {
+ err = prestera_port_cfg_mac_read(port, &cfg_mac);
+ if (!err) {
+ cfg_mac.admin = true;
+ err = prestera_port_cfg_mac_write(port, &cfg_mac);
+ }
+ } else {
+ port->cfg_phy.admin = true;
+ err = prestera_hw_port_phy_mode_set(port, true, port->autoneg,
+ port->cfg_phy.mode,
+ port->adver_link_modes,
+ port->cfg_phy.mdix);
+ }
+
netif_start_queue(dev);
- return 0;
+ return err;
}
static int prestera_port_close(struct net_device *dev)
{
struct prestera_port *port = netdev_priv(dev);
+ struct prestera_port_mac_config cfg_mac;
+ int err = 0;
netif_stop_queue(dev);
- return prestera_hw_port_state_set(port, false);
+ if (port->caps.transceiver == PRESTERA_PORT_TCVR_SFP) {
+ err = prestera_port_cfg_mac_read(port, &cfg_mac);
+ if (!err) {
+ cfg_mac.admin = false;
+ prestera_port_cfg_mac_write(port, &cfg_mac);
+ }
+ } else {
+ port->cfg_phy.admin = false;
+ err = prestera_hw_port_phy_mode_set(port, false, port->autoneg,
+ port->cfg_phy.mode,
+ port->adver_link_modes,
+ port->cfg_phy.mdix);
+ }
+
+ return err;
}
static netdev_tx_t prestera_port_xmit(struct sk_buff *skb,
@@ -137,7 +186,7 @@ static int prestera_port_set_mac_address(struct net_device *dev, void *p)
if (err)
return err;
- ether_addr_copy(dev->dev_addr, addr->sa_data);
+ eth_hw_addr_set(dev, addr->sa_data);
return 0;
}
@@ -228,46 +277,23 @@ static const struct net_device_ops prestera_netdev_ops = {
.ndo_get_devlink_port = prestera_devlink_get_port,
};
-int prestera_port_autoneg_set(struct prestera_port *port, bool enable,
- u64 adver_link_modes, u8 adver_fec)
+int prestera_port_autoneg_set(struct prestera_port *port, u64 link_modes)
{
- bool refresh = false;
- u64 link_modes;
int err;
- u8 fec;
-
- if (port->caps.type != PRESTERA_PORT_TYPE_TP)
- return enable ? -EINVAL : 0;
-
- if (!enable)
- goto set_autoneg;
-
- link_modes = port->caps.supp_link_modes & adver_link_modes;
- fec = port->caps.supp_fec & adver_fec;
-
- if (!link_modes && !fec)
- return -EOPNOTSUPP;
-
- if (link_modes && port->adver_link_modes != link_modes) {
- port->adver_link_modes = link_modes;
- refresh = true;
- }
-
- if (fec && port->adver_fec != fec) {
- port->adver_fec = fec;
- refresh = true;
- }
-set_autoneg:
- if (port->autoneg == enable && !refresh)
+ if (port->autoneg && port->adver_link_modes == link_modes)
return 0;
- err = prestera_hw_port_autoneg_set(port, enable, port->adver_link_modes,
- port->adver_fec);
+ err = prestera_hw_port_phy_mode_set(port, port->cfg_phy.admin,
+ true, 0, link_modes,
+ port->cfg_phy.mdix);
if (err)
return err;
- port->autoneg = enable;
+ port->adver_fec = BIT(PRESTERA_PORT_FEC_OFF);
+ port->adver_link_modes = link_modes;
+ port->cfg_phy.mode = 0;
+ port->autoneg = true;
return 0;
}
@@ -288,6 +314,7 @@ static void prestera_port_list_del(struct prestera_port *port)
static int prestera_port_create(struct prestera_switch *sw, u32 id)
{
+ struct prestera_port_mac_config cfg_mac;
struct prestera_port *port;
struct net_device *dev;
int err;
@@ -338,11 +365,14 @@ static int prestera_port_create(struct prestera_switch *sw, u32 id)
goto err_port_init;
}
+ eth_hw_addr_gen(dev, sw->base_mac, port->fp_id);
/* firmware requires that port's MAC address consist of the first
* 5 bytes of the base MAC address
*/
- memcpy(dev->dev_addr, sw->base_mac, dev->addr_len - 1);
- dev->dev_addr[dev->addr_len - 1] = port->fp_id;
+ if (memcmp(dev->dev_addr, sw->base_mac, ETH_ALEN - 1)) {
+ dev_warn(prestera_dev(sw), "Port MAC address wraps for port(%u)\n", id);
+ dev_addr_mod(dev, 0, sw->base_mac, ETH_ALEN - 1);
+ }
err = prestera_hw_port_mac_set(port, dev->dev_addr);
if (err) {
@@ -356,16 +386,43 @@ static int prestera_port_create(struct prestera_switch *sw, u32 id)
goto err_port_init;
}
- port->adver_fec = BIT(PRESTERA_PORT_FEC_OFF);
- prestera_port_autoneg_set(port, true, port->caps.supp_link_modes,
- port->caps.supp_fec);
+ port->adver_link_modes = port->caps.supp_link_modes;
+ port->adver_fec = 0;
+ port->autoneg = true;
+
+ /* initialize config mac */
+ if (port->caps.transceiver != PRESTERA_PORT_TCVR_SFP) {
+ cfg_mac.admin = true;
+ cfg_mac.mode = PRESTERA_MAC_MODE_INTERNAL;
+ } else {
+ cfg_mac.admin = false;
+ cfg_mac.mode = PRESTERA_MAC_MODE_MAX;
+ }
+ cfg_mac.inband = false;
+ cfg_mac.speed = 0;
+ cfg_mac.duplex = DUPLEX_UNKNOWN;
+ cfg_mac.fec = PRESTERA_PORT_FEC_OFF;
- err = prestera_hw_port_state_set(port, false);
+ err = prestera_port_cfg_mac_write(port, &cfg_mac);
if (err) {
- dev_err(prestera_dev(sw), "Failed to set port(%u) down\n", id);
+ dev_err(prestera_dev(sw), "Failed to set port(%u) mac mode\n", id);
goto err_port_init;
}
+ /* initialize config phy (if this is inegral) */
+ if (port->caps.transceiver != PRESTERA_PORT_TCVR_SFP) {
+ port->cfg_phy.mdix = ETH_TP_MDI_AUTO;
+ port->cfg_phy.admin = false;
+ err = prestera_hw_port_phy_mode_set(port,
+ port->cfg_phy.admin,
+ false, 0, 0,
+ port->cfg_phy.mdix);
+ if (err) {
+ dev_err(prestera_dev(sw), "Failed to set port(%u) phy mode\n", id);
+ goto err_port_init;
+ }
+ }
+
err = prestera_rxtx_port_init(port);
if (err)
goto err_port_init;
@@ -446,8 +503,10 @@ static void prestera_port_handle_event(struct prestera_switch *sw,
caching_dw = &port->cached_hw_stats.caching_dw;
- if (evt->id == PRESTERA_PORT_EVENT_STATE_CHANGED) {
- if (evt->port_evt.data.oper_state) {
+ prestera_ethtool_port_state_changed(port, &evt->port_evt);
+
+ if (evt->id == PRESTERA_PORT_EVENT_MAC_STATE_CHANGED) {
+ if (port->state_mac.oper) {
netif_carrier_on(port->dev);
if (!delayed_work_pending(caching_dw))
queue_delayed_work(prestera_wq, caching_dw, 0);
@@ -851,7 +910,7 @@ static int prestera_switch_init(struct prestera_switch *sw)
if (err)
goto err_span_init;
- err = prestera_devlink_register(sw);
+ err = prestera_devlink_traps_register(sw);
if (err)
goto err_dl_register;
@@ -863,12 +922,13 @@ static int prestera_switch_init(struct prestera_switch *sw)
if (err)
goto err_ports_create;
+ prestera_devlink_register(sw);
return 0;
err_ports_create:
prestera_lag_fini(sw);
err_lag_init:
- prestera_devlink_unregister(sw);
+ prestera_devlink_traps_unregister(sw);
err_dl_register:
prestera_span_fini(sw);
err_span_init:
@@ -888,9 +948,10 @@ err_swdev_register:
static void prestera_switch_fini(struct prestera_switch *sw)
{
+ prestera_devlink_unregister(sw);
prestera_destroy_ports(sw);
prestera_lag_fini(sw);
- prestera_devlink_unregister(sw);
+ prestera_devlink_traps_unregister(sw);
prestera_span_fini(sw);
prestera_acl_fini(sw);
prestera_event_handlers_unregister(sw);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_pci.c b/drivers/net/ethernet/marvell/prestera/prestera_pci.c
index a250d394da38..5d4d410b07c8 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_pci.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_pci.c
@@ -14,10 +14,10 @@
#define PRESTERA_MSG_MAX_SIZE 1500
-#define PRESTERA_SUPP_FW_MAJ_VER 3
+#define PRESTERA_SUPP_FW_MAJ_VER 4
#define PRESTERA_SUPP_FW_MIN_VER 0
-#define PRESTERA_PREV_FW_MAJ_VER 2
+#define PRESTERA_PREV_FW_MAJ_VER 4
#define PRESTERA_PREV_FW_MIN_VER 0
#define PRESTERA_FW_PATH_FMT "mrvl/prestera/mvsw_prestera_fw-v%u.%u.img"
@@ -102,23 +102,30 @@ struct prestera_fw_evtq_regs {
u32 len;
};
+#define PRESTERA_CMD_QNUM_MAX 4
+
+struct prestera_fw_cmdq_regs {
+ u32 req_ctl;
+ u32 req_len;
+ u32 rcv_ctl;
+ u32 rcv_len;
+ u32 offs;
+ u32 len;
+};
+
struct prestera_fw_regs {
u32 fw_ready;
- u32 pad;
u32 cmd_offs;
u32 cmd_len;
+ u32 cmd_qnum;
u32 evt_offs;
u32 evt_qnum;
- u32 cmd_req_ctl;
- u32 cmd_req_len;
- u32 cmd_rcv_ctl;
- u32 cmd_rcv_len;
-
u32 fw_status;
u32 rx_status;
- struct prestera_fw_evtq_regs evtq_list[PRESTERA_EVT_QNUM_MAX];
+ struct prestera_fw_cmdq_regs cmdq_list[PRESTERA_EVT_QNUM_MAX];
+ struct prestera_fw_evtq_regs evtq_list[PRESTERA_CMD_QNUM_MAX];
};
#define PRESTERA_FW_REG_OFFSET(f) offsetof(struct prestera_fw_regs, f)
@@ -130,14 +137,22 @@ struct prestera_fw_regs {
#define PRESTERA_CMD_BUF_OFFS_REG PRESTERA_FW_REG_OFFSET(cmd_offs)
#define PRESTERA_CMD_BUF_LEN_REG PRESTERA_FW_REG_OFFSET(cmd_len)
+#define PRESTERA_CMD_QNUM_REG PRESTERA_FW_REG_OFFSET(cmd_qnum)
#define PRESTERA_EVT_BUF_OFFS_REG PRESTERA_FW_REG_OFFSET(evt_offs)
#define PRESTERA_EVT_QNUM_REG PRESTERA_FW_REG_OFFSET(evt_qnum)
-#define PRESTERA_CMD_REQ_CTL_REG PRESTERA_FW_REG_OFFSET(cmd_req_ctl)
-#define PRESTERA_CMD_REQ_LEN_REG PRESTERA_FW_REG_OFFSET(cmd_req_len)
+#define PRESTERA_CMDQ_REG_OFFSET(q, f) \
+ (PRESTERA_FW_REG_OFFSET(cmdq_list) + \
+ (q) * sizeof(struct prestera_fw_cmdq_regs) + \
+ offsetof(struct prestera_fw_cmdq_regs, f))
+
+#define PRESTERA_CMDQ_REQ_CTL_REG(q) PRESTERA_CMDQ_REG_OFFSET(q, req_ctl)
+#define PRESTERA_CMDQ_REQ_LEN_REG(q) PRESTERA_CMDQ_REG_OFFSET(q, req_len)
+#define PRESTERA_CMDQ_RCV_CTL_REG(q) PRESTERA_CMDQ_REG_OFFSET(q, rcv_ctl)
+#define PRESTERA_CMDQ_RCV_LEN_REG(q) PRESTERA_CMDQ_REG_OFFSET(q, rcv_len)
+#define PRESTERA_CMDQ_OFFS_REG(q) PRESTERA_CMDQ_REG_OFFSET(q, offs)
+#define PRESTERA_CMDQ_LEN_REG(q) PRESTERA_CMDQ_REG_OFFSET(q, len)
-#define PRESTERA_CMD_RCV_CTL_REG PRESTERA_FW_REG_OFFSET(cmd_rcv_ctl)
-#define PRESTERA_CMD_RCV_LEN_REG PRESTERA_FW_REG_OFFSET(cmd_rcv_len)
#define PRESTERA_FW_STATUS_REG PRESTERA_FW_REG_OFFSET(fw_status)
#define PRESTERA_RX_STATUS_REG PRESTERA_FW_REG_OFFSET(rx_status)
@@ -174,6 +189,13 @@ struct prestera_fw_evtq {
size_t len;
};
+struct prestera_fw_cmdq {
+ /* serialize access to dev->send_req */
+ struct mutex cmd_mtx;
+ u8 __iomem *addr;
+ size_t len;
+};
+
struct prestera_fw {
struct prestera_fw_rev rev_supp;
const struct firmware *bin;
@@ -183,9 +205,10 @@ struct prestera_fw {
u8 __iomem *ldr_ring_buf;
u32 ldr_buf_len;
u32 ldr_wr_idx;
- struct mutex cmd_mtx; /* serialize access to dev->send_req */
size_t cmd_mbox_len;
u8 __iomem *cmd_mbox;
+ struct prestera_fw_cmdq cmd_queue[PRESTERA_CMD_QNUM_MAX];
+ u8 cmd_qnum;
struct prestera_fw_evtq evt_queue[PRESTERA_EVT_QNUM_MAX];
u8 evt_qnum;
struct work_struct evt_work;
@@ -324,7 +347,27 @@ static int prestera_fw_wait_reg32(struct prestera_fw *fw, u32 reg, u32 cmp,
1 * USEC_PER_MSEC, waitms * USEC_PER_MSEC);
}
-static int prestera_fw_cmd_send(struct prestera_fw *fw,
+static void prestera_fw_cmdq_lock(struct prestera_fw *fw, u8 qid)
+{
+ mutex_lock(&fw->cmd_queue[qid].cmd_mtx);
+}
+
+static void prestera_fw_cmdq_unlock(struct prestera_fw *fw, u8 qid)
+{
+ mutex_unlock(&fw->cmd_queue[qid].cmd_mtx);
+}
+
+static u32 prestera_fw_cmdq_len(struct prestera_fw *fw, u8 qid)
+{
+ return fw->cmd_queue[qid].len;
+}
+
+static u8 __iomem *prestera_fw_cmdq_buf(struct prestera_fw *fw, u8 qid)
+{
+ return fw->cmd_queue[qid].addr;
+}
+
+static int prestera_fw_cmd_send(struct prestera_fw *fw, int qid,
void *in_msg, size_t in_size,
void *out_msg, size_t out_size,
unsigned int waitms)
@@ -335,30 +378,32 @@ static int prestera_fw_cmd_send(struct prestera_fw *fw,
if (!waitms)
waitms = PRESTERA_FW_CMD_DEFAULT_WAIT_MS;
- if (ALIGN(in_size, 4) > fw->cmd_mbox_len)
+ if (ALIGN(in_size, 4) > prestera_fw_cmdq_len(fw, qid))
return -EMSGSIZE;
/* wait for finish previous reply from FW */
- err = prestera_fw_wait_reg32(fw, PRESTERA_CMD_RCV_CTL_REG, 0, 30);
+ err = prestera_fw_wait_reg32(fw, PRESTERA_CMDQ_RCV_CTL_REG(qid), 0, 30);
if (err) {
dev_err(fw->dev.dev, "finish reply from FW is timed out\n");
return err;
}
- prestera_fw_write(fw, PRESTERA_CMD_REQ_LEN_REG, in_size);
- memcpy_toio(fw->cmd_mbox, in_msg, in_size);
+ prestera_fw_write(fw, PRESTERA_CMDQ_REQ_LEN_REG(qid), in_size);
+
+ memcpy_toio(prestera_fw_cmdq_buf(fw, qid), in_msg, in_size);
- prestera_fw_write(fw, PRESTERA_CMD_REQ_CTL_REG, PRESTERA_CMD_F_REQ_SENT);
+ prestera_fw_write(fw, PRESTERA_CMDQ_REQ_CTL_REG(qid),
+ PRESTERA_CMD_F_REQ_SENT);
/* wait for reply from FW */
- err = prestera_fw_wait_reg32(fw, PRESTERA_CMD_RCV_CTL_REG,
+ err = prestera_fw_wait_reg32(fw, PRESTERA_CMDQ_RCV_CTL_REG(qid),
PRESTERA_CMD_F_REPL_SENT, waitms);
if (err) {
dev_err(fw->dev.dev, "reply from FW is timed out\n");
goto cmd_exit;
}
- ret_size = prestera_fw_read(fw, PRESTERA_CMD_RCV_LEN_REG);
+ ret_size = prestera_fw_read(fw, PRESTERA_CMDQ_RCV_LEN_REG(qid));
if (ret_size > out_size) {
dev_err(fw->dev.dev, "ret_size (%u) > out_len(%zu)\n",
ret_size, out_size);
@@ -366,14 +411,15 @@ static int prestera_fw_cmd_send(struct prestera_fw *fw,
goto cmd_exit;
}
- memcpy_fromio(out_msg, fw->cmd_mbox + in_size, ret_size);
+ memcpy_fromio(out_msg, prestera_fw_cmdq_buf(fw, qid) + in_size, ret_size);
cmd_exit:
- prestera_fw_write(fw, PRESTERA_CMD_REQ_CTL_REG, PRESTERA_CMD_F_REPL_RCVD);
+ prestera_fw_write(fw, PRESTERA_CMDQ_REQ_CTL_REG(qid),
+ PRESTERA_CMD_F_REPL_RCVD);
return err;
}
-static int prestera_fw_send_req(struct prestera_device *dev,
+static int prestera_fw_send_req(struct prestera_device *dev, int qid,
void *in_msg, size_t in_size, void *out_msg,
size_t out_size, unsigned int waitms)
{
@@ -382,9 +428,10 @@ static int prestera_fw_send_req(struct prestera_device *dev,
fw = container_of(dev, struct prestera_fw, dev);
- mutex_lock(&fw->cmd_mtx);
- ret = prestera_fw_cmd_send(fw, in_msg, in_size, out_msg, out_size, waitms);
- mutex_unlock(&fw->cmd_mtx);
+ prestera_fw_cmdq_lock(fw, qid);
+ ret = prestera_fw_cmd_send(fw, qid, in_msg, in_size, out_msg, out_size,
+ waitms);
+ prestera_fw_cmdq_unlock(fw, qid);
return ret;
}
@@ -414,7 +461,16 @@ static int prestera_fw_init(struct prestera_fw *fw)
fw->cmd_mbox = base + prestera_fw_read(fw, PRESTERA_CMD_BUF_OFFS_REG);
fw->cmd_mbox_len = prestera_fw_read(fw, PRESTERA_CMD_BUF_LEN_REG);
- mutex_init(&fw->cmd_mtx);
+ fw->cmd_qnum = prestera_fw_read(fw, PRESTERA_CMD_QNUM_REG);
+
+ for (qid = 0; qid < fw->cmd_qnum; qid++) {
+ u32 offs = prestera_fw_read(fw, PRESTERA_CMDQ_OFFS_REG(qid));
+ struct prestera_fw_cmdq *cmdq = &fw->cmd_queue[qid];
+
+ cmdq->len = prestera_fw_read(fw, PRESTERA_CMDQ_LEN_REG(qid));
+ cmdq->addr = fw->cmd_mbox + offs;
+ mutex_init(&cmdq->cmd_mtx);
+ }
fw->evt_buf = base + prestera_fw_read(fw, PRESTERA_EVT_BUF_OFFS_REG);
fw->evt_qnum = prestera_fw_read(fw, PRESTERA_EVT_QNUM_REG);
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c
index 73d2eba5262f..e452cdeaf703 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c
@@ -794,14 +794,7 @@ void prestera_rxtx_switch_fini(struct prestera_switch *sw)
int prestera_rxtx_port_init(struct prestera_port *port)
{
- int err;
-
- err = prestera_hw_rxtx_port_init(port);
- if (err)
- return err;
-
port->dev->needed_headroom = PRESTERA_DSA_HLEN;
-
return 0;
}
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index fab53c9b8380..1d607bc6b59e 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -389,7 +389,7 @@ static void inverse_every_nibble(unsigned char *mac_addr)
* Outputs
* return the calculated entry.
*/
-static u32 hash_function(unsigned char *mac_addr_orig)
+static u32 hash_function(const unsigned char *mac_addr_orig)
{
u32 hash_result;
u32 addr0;
@@ -434,7 +434,7 @@ static u32 hash_function(unsigned char *mac_addr_orig)
* -ENOSPC if table full
*/
static int add_del_hash_entry(struct pxa168_eth_private *pep,
- unsigned char *mac_addr,
+ const unsigned char *mac_addr,
u32 rd, u32 skip, int del)
{
struct addr_table_entry *entry, *start;
@@ -521,7 +521,7 @@ static int add_del_hash_entry(struct pxa168_eth_private *pep,
*/
static void update_hash_table_mac_address(struct pxa168_eth_private *pep,
unsigned char *oaddr,
- unsigned char *addr)
+ const unsigned char *addr)
{
/* Delete old entry */
if (oaddr)
@@ -607,7 +607,7 @@ static int pxa168_eth_set_mac_address(struct net_device *dev, void *addr)
if (!is_valid_ether_addr(sa->sa_data))
return -EADDRNOTAVAIL;
memcpy(oldMac, dev->dev_addr, ETH_ALEN);
- memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
+ eth_hw_addr_set(dev, sa->sa_data);
mac_h = dev->dev_addr[0] << 24;
mac_h |= dev->dev_addr[1] << 16;
@@ -977,8 +977,7 @@ static int pxa168_init_phy(struct net_device *dev)
cmd.base.phy_address = pep->phy_addr;
cmd.base.speed = pep->phy_speed;
cmd.base.duplex = pep->phy_duplex;
- bitmap_copy(cmd.link_modes.advertising, PHY_BASIC_FEATURES,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_copy(cmd.link_modes.advertising, PHY_BASIC_FEATURES);
cmd.base.autoneg = AUTONEG_ENABLE;
if (cmd.base.speed != 0)
@@ -1434,11 +1433,15 @@ static int pxa168_eth_probe(struct platform_device *pdev)
INIT_WORK(&pep->tx_timeout_task, pxa168_eth_tx_timeout_task);
- err = of_get_mac_address(pdev->dev.of_node, dev->dev_addr);
+ err = of_get_ethdev_address(pdev->dev.of_node, dev);
if (err) {
+ u8 addr[ETH_ALEN];
+
/* try reading the mac address, if set by the bootloader */
- pxa168_eth_get_mac_address(dev, dev->dev_addr);
- if (!is_valid_ether_addr(dev->dev_addr)) {
+ pxa168_eth_get_mac_address(dev, addr);
+ if (is_valid_ether_addr(addr)) {
+ eth_hw_addr_set(dev, addr);
+ } else {
dev_info(&pdev->dev, "Using random mac address\n");
eth_hw_addr_random(dev);
}
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 051dd3fb5b03..0c864e5bf0a6 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -3459,7 +3459,7 @@ static int skge_set_mac_address(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(dev, addr->sa_data);
if (!netif_running(dev)) {
memcpy_toio(hw->regs + B2_MAC_1 + port*8, dev->dev_addr, ETH_ALEN);
@@ -3810,6 +3810,7 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
{
struct skge_port *skge;
struct net_device *dev = alloc_etherdev(sizeof(*skge));
+ u8 addr[ETH_ALEN];
if (!dev)
return NULL;
@@ -3862,7 +3863,8 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
}
/* read the mac address */
- memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN);
+ memcpy_fromio(addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN);
+ eth_hw_addr_set(dev, addr);
return dev;
}
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index e9fc74e54b22..28b5b9341145 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -3817,7 +3817,7 @@ static int sky2_set_mac_address(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(dev, addr->sa_data);
memcpy_toio(hw->regs + B2_MAC_1 + port * 8,
dev->dev_addr, ETH_ALEN);
memcpy_toio(hw->regs + B2_MAC_2 + port * 8,
@@ -4440,86 +4440,6 @@ static const struct ethtool_ops sky2_ethtool_ops = {
static struct dentry *sky2_debug;
-
-/*
- * Read and parse the first part of Vital Product Data
- */
-#define VPD_SIZE 128
-#define VPD_MAGIC 0x82
-
-static const struct vpd_tag {
- char tag[2];
- char *label;
-} vpd_tags[] = {
- { "PN", "Part Number" },
- { "EC", "Engineering Level" },
- { "MN", "Manufacturer" },
- { "SN", "Serial Number" },
- { "YA", "Asset Tag" },
- { "VL", "First Error Log Message" },
- { "VF", "Second Error Log Message" },
- { "VB", "Boot Agent ROM Configuration" },
- { "VE", "EFI UNDI Configuration" },
-};
-
-static void sky2_show_vpd(struct seq_file *seq, struct sky2_hw *hw)
-{
- size_t vpd_size;
- loff_t offs;
- u8 len;
- unsigned char *buf;
- u16 reg2;
-
- reg2 = sky2_pci_read16(hw, PCI_DEV_REG2);
- vpd_size = 1 << ( ((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8);
-
- seq_printf(seq, "%s Product Data\n", pci_name(hw->pdev));
- buf = kmalloc(vpd_size, GFP_KERNEL);
- if (!buf) {
- seq_puts(seq, "no memory!\n");
- return;
- }
-
- if (pci_read_vpd(hw->pdev, 0, vpd_size, buf) < 0) {
- seq_puts(seq, "VPD read failed\n");
- goto out;
- }
-
- if (buf[0] != VPD_MAGIC) {
- seq_printf(seq, "VPD tag mismatch: %#x\n", buf[0]);
- goto out;
- }
- len = buf[1];
- if (len == 0 || len > vpd_size - 4) {
- seq_printf(seq, "Invalid id length: %d\n", len);
- goto out;
- }
-
- seq_printf(seq, "%.*s\n", len, buf + 3);
- offs = len + 3;
-
- while (offs < vpd_size - 4) {
- int i;
-
- if (!memcmp("RW", buf + offs, 2)) /* end marker */
- break;
- len = buf[offs + 2];
- if (offs + len + 3 >= vpd_size)
- break;
-
- for (i = 0; i < ARRAY_SIZE(vpd_tags); i++) {
- if (!memcmp(vpd_tags[i].tag, buf + offs, 2)) {
- seq_printf(seq, " %s: %.*s\n",
- vpd_tags[i].label, len, buf + offs + 3);
- break;
- }
- }
- offs += len + 3;
- }
-out:
- kfree(buf);
-}
-
static int sky2_debug_show(struct seq_file *seq, void *v)
{
struct net_device *dev = seq->private;
@@ -4529,9 +4449,7 @@ static int sky2_debug_show(struct seq_file *seq, void *v)
unsigned idx, last;
int sop;
- sky2_show_vpd(seq, hw);
-
- seq_printf(seq, "\nIRQ src=%x mask=%x control=%x\n",
+ seq_printf(seq, "IRQ src=%x mask=%x control=%x\n",
sky2_read32(hw, B0_ISRC),
sky2_read32(hw, B0_IMSK),
sky2_read32(hw, B0_Y2_SP_ICR));
@@ -4802,10 +4720,13 @@ static struct net_device *sky2_init_netdev(struct sky2_hw *hw, unsigned port,
* 1) from device tree data
* 2) from internal registers set by bootloader
*/
- ret = of_get_mac_address(hw->pdev->dev.of_node, dev->dev_addr);
- if (ret)
- memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8,
- ETH_ALEN);
+ ret = of_get_ethdev_address(hw->pdev->dev.of_node, dev);
+ if (ret) {
+ u8 addr[ETH_ALEN];
+
+ memcpy_fromio(addr, hw->regs + B2_MAC_1 + port * 8, ETH_ALEN);
+ eth_hw_addr_set(dev, addr);
+ }
/* if the address is invalid, use a random value */
if (!is_valid_ether_addr(dev->dev_addr)) {
@@ -4989,7 +4910,7 @@ static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev);
if (sizeof(dma_addr_t) > sizeof(u32) &&
- !(err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)))) {
+ !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
using_dac = 1;
err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
if (err < 0) {
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 398c23cec815..75d67d1b5f6b 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -2588,7 +2588,7 @@ static int __init mtk_init(struct net_device *dev)
struct mtk_eth *eth = mac->hw;
int ret;
- ret = of_get_mac_address(mac->of_node, dev->dev_addr);
+ ret = of_get_ethdev_address(mac->of_node, dev);
if (ret) {
/* If the mac address is invalid, use random mac address */
eth_hw_addr_random(dev);
diff --git a/drivers/net/ethernet/mediatek/mtk_star_emac.c b/drivers/net/ethernet/mediatek/mtk_star_emac.c
index 1d5dd2015453..89ca7960b225 100644
--- a/drivers/net/ethernet/mediatek/mtk_star_emac.c
+++ b/drivers/net/ethernet/mediatek/mtk_star_emac.c
@@ -523,7 +523,7 @@ static void mtk_star_dma_resume_tx(struct mtk_star_priv *priv)
static void mtk_star_set_mac_addr(struct net_device *ndev)
{
struct mtk_star_priv *priv = netdev_priv(ndev);
- u8 *mac_addr = ndev->dev_addr;
+ const u8 *mac_addr = ndev->dev_addr;
unsigned int high, low;
high = mac_addr[0] << 8 | mac_addr[1] << 0;
@@ -1544,7 +1544,7 @@ static int mtk_star_probe(struct platform_device *pdev)
if (ret)
return ret;
- ret = eth_platform_get_mac_address(dev, ndev->dev_addr);
+ ret = platform_get_ethdev_address(dev, ndev);
if (ret || !is_valid_ether_addr(ndev->dev_addr))
eth_hw_addr_random(ndev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 8d751383530b..e10b7b04b894 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -2480,7 +2480,6 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
return 0;
err_thread:
- flush_workqueue(priv->mfunc.master.comm_wq);
destroy_workqueue(priv->mfunc.master.comm_wq);
err_slaves:
while (i--) {
@@ -2587,7 +2586,6 @@ void mlx4_multi_func_cleanup(struct mlx4_dev *dev)
int i, port;
if (mlx4_is_master(dev)) {
- flush_workqueue(priv->mfunc.master.comm_wq);
destroy_workqueue(priv->mfunc.master.comm_wq);
for (i = 0; i < dev->num_slaves; i++) {
for (port = 1; port <= MLX4_MAX_PORTS; port++)
@@ -3009,7 +3007,7 @@ int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u8 *mac)
return -EPERM;
}
- s_info->mac = mlx4_mac_to_u64(mac);
+ s_info->mac = ether_addr_to_u64(mac);
mlx4_info(dev, "default mac on vf %d port %d to %llX will take effect only after vf restart\n",
vf, port, s_info->mac);
return 0;
@@ -3195,7 +3193,7 @@ int mlx4_set_vf_spoofchk(struct mlx4_dev *dev, int port, int vf, bool setting)
port = mlx4_slaves_closest_port(dev, slave, port);
s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
- mlx4_u64_to_mac(mac, s_info->mac);
+ u64_to_ether_addr(s_info->mac, mac);
if (setting && !is_valid_ether_addr(mac)) {
mlx4_info(dev, "Illegal MAC with spoofchk\n");
return -EPERM;
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index f7053a74e6a8..4d4f9cf9facb 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -314,7 +314,8 @@ static int mlx4_init_user_cqes(void *buf, int entries, int cqe_size)
buf += PAGE_SIZE;
}
} else {
- err = copy_to_user((void __user *)buf, init_ents, entries * cqe_size) ?
+ err = copy_to_user((void __user *)buf, init_ents,
+ array_size(entries, cqe_size)) ?
-EFAULT : 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index ef518b1040f7..066d79e4ecfc 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -39,6 +39,7 @@
#include <linux/in.h>
#include <net/ip.h>
#include <linux/bitmap.h>
+#include <linux/mii.h>
#include "mlx4_en.h"
#include "en_port.h"
@@ -197,6 +198,8 @@ static const char main_strings[][ETH_GSTRING_LEN] = {
/* xdp statistics */
"rx_xdp_drop",
+ "rx_xdp_redirect",
+ "rx_xdp_redirect_fail",
"rx_xdp_tx",
"rx_xdp_tx_full",
@@ -428,6 +431,8 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
data[index++] = priv->rx_ring[i]->bytes;
data[index++] = priv->rx_ring[i]->dropped;
data[index++] = priv->rx_ring[i]->xdp_drop;
+ data[index++] = priv->rx_ring[i]->xdp_redirect;
+ data[index++] = priv->rx_ring[i]->xdp_redirect_fail;
data[index++] = priv->rx_ring[i]->xdp_tx;
data[index++] = priv->rx_ring[i]->xdp_tx_full;
}
@@ -520,6 +525,10 @@ static void mlx4_en_get_strings(struct net_device *dev,
sprintf(data + (index++) * ETH_GSTRING_LEN,
"rx%d_xdp_drop", i);
sprintf(data + (index++) * ETH_GSTRING_LEN,
+ "rx%d_xdp_redirect", i);
+ sprintf(data + (index++) * ETH_GSTRING_LEN,
+ "rx%d_xdp_redirect_fail", i);
+ sprintf(data + (index++) * ETH_GSTRING_LEN,
"rx%d_xdp_tx", i);
sprintf(data + (index++) * ETH_GSTRING_LEN,
"rx%d_xdp_tx_full", i);
@@ -643,10 +652,8 @@ static unsigned long *ptys2ethtool_link_mode(struct ptys2ethtool_config *cfg,
unsigned int i; \
cfg = &ptys2ethtool_map[reg_]; \
cfg->speed = speed_; \
- bitmap_zero(cfg->supported, \
- __ETHTOOL_LINK_MODE_MASK_NBITS); \
- bitmap_zero(cfg->advertised, \
- __ETHTOOL_LINK_MODE_MASK_NBITS); \
+ linkmode_zero(cfg->supported); \
+ linkmode_zero(cfg->advertised); \
for (i = 0 ; i < ARRAY_SIZE(modes) ; ++i) { \
__set_bit(modes[i], cfg->supported); \
__set_bit(modes[i], cfg->advertised); \
@@ -702,10 +709,8 @@ static void ptys2ethtool_update_link_modes(unsigned long *link_modes,
int i;
for (i = 0; i < MLX4_LINK_MODES_SZ; i++) {
if (eth_proto & MLX4_PROT_MASK(i))
- bitmap_or(link_modes, link_modes,
- ptys2ethtool_link_mode(&ptys2ethtool_map[i],
- report),
- __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_or(link_modes, link_modes,
+ ptys2ethtool_link_mode(&ptys2ethtool_map[i], report));
}
}
@@ -716,11 +721,9 @@ static u32 ethtool2ptys_link_modes(const unsigned long *link_modes,
u32 ptys_modes = 0;
for (i = 0; i < MLX4_LINK_MODES_SZ; i++) {
- if (bitmap_intersects(
- ptys2ethtool_link_mode(&ptys2ethtool_map[i],
- report),
- link_modes,
- __ETHTOOL_LINK_MODE_MASK_NBITS))
+ ulong *map_mode = ptys2ethtool_link_mode(&ptys2ethtool_map[i],
+ report);
+ if (linkmode_intersects(map_mode, link_modes))
ptys_modes |= 1 << i;
}
return ptys_modes;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index 109472d6b61f..f1259bdb1a29 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -237,7 +237,6 @@ static void mlx4_en_remove(struct mlx4_dev *dev, void *endev_ptr)
if (mdev->pndev[i])
mlx4_en_destroy_netdev(mdev->pndev[i]);
- flush_workqueue(mdev->workqueue);
destroy_workqueue(mdev->workqueue);
(void) mlx4_mr_free(dev, &mdev->mr);
iounmap(mdev->uar_map);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 8af7f2827322..3f6d5c384637 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -527,18 +527,17 @@ static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev,
return err;
}
-static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac)
+static void mlx4_en_u64_to_mac(struct net_device *dev, u64 src_mac)
{
- int i;
- for (i = ETH_ALEN - 1; i >= 0; --i) {
- dst_mac[i] = src_mac & 0xff;
- src_mac >>= 8;
- }
- memset(&dst_mac[ETH_ALEN], 0, 2);
+ u8 addr[ETH_ALEN];
+
+ u64_to_ether_addr(src_mac, addr);
+ eth_hw_addr_set(dev, addr);
}
-static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *addr,
+static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv,
+ const unsigned char *addr,
int qpn, u64 *reg_id)
{
int err;
@@ -559,7 +558,7 @@ static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *ad
static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv,
- unsigned char *mac, int *qpn, u64 *reg_id)
+ const unsigned char *mac, int *qpn, u64 *reg_id)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_dev *dev = mdev->dev;
@@ -611,7 +610,8 @@ static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv,
}
static void mlx4_en_uc_steer_release(struct mlx4_en_priv *priv,
- unsigned char *mac, int qpn, u64 reg_id)
+ const unsigned char *mac,
+ int qpn, u64 reg_id)
{
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_dev *dev = mdev->dev;
@@ -644,7 +644,7 @@ static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
int index = 0;
int err = 0;
int *qpn = &priv->base_qpn;
- u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr);
+ u64 mac = ether_addr_to_u64(priv->dev->dev_addr);
en_dbg(DRV, priv, "Registering MAC: %pM for adding\n",
priv->dev->dev_addr);
@@ -683,7 +683,7 @@ static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
int qpn = priv->base_qpn;
if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
- u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr);
+ u64 mac = ether_addr_to_u64(priv->dev->dev_addr);
en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
priv->dev->dev_addr);
mlx4_unregister_mac(dev, priv->port, mac);
@@ -701,14 +701,14 @@ static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn,
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_dev *dev = mdev->dev;
int err = 0;
- u64 new_mac_u64 = mlx4_mac_to_u64(new_mac);
+ u64 new_mac_u64 = ether_addr_to_u64(new_mac);
if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
struct hlist_head *bucket;
unsigned int mac_hash;
struct mlx4_mac_entry *entry;
struct hlist_node *tmp;
- u64 prev_mac_u64 = mlx4_mac_to_u64(prev_mac);
+ u64 prev_mac_u64 = ether_addr_to_u64(prev_mac);
bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]];
hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
@@ -797,7 +797,7 @@ static int mlx4_en_set_mac(struct net_device *dev, void *addr)
if (err)
goto out;
- memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(dev, saddr->sa_data);
mlx4_en_update_user_mac(priv, new_mac);
out:
mutex_unlock(&mdev->state_lock);
@@ -1076,7 +1076,7 @@ static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
mlx4_en_cache_mclist(dev);
netif_addr_unlock_bh(dev);
list_for_each_entry(mclist, &priv->mc_list, list) {
- mcast_addr = mlx4_mac_to_u64(mclist->addr);
+ mcast_addr = ether_addr_to_u64(mclist->addr);
mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
mcast_addr, 0, MLX4_MCAST_CONFIG);
}
@@ -1169,7 +1169,7 @@ static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
found = true;
if (!found) {
- mac = mlx4_mac_to_u64(entry->mac);
+ mac = ether_addr_to_u64(entry->mac);
mlx4_en_uc_steer_release(priv, entry->mac,
priv->base_qpn,
entry->reg_id);
@@ -1212,7 +1212,7 @@ static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
break;
}
- mac = mlx4_mac_to_u64(ha->addr);
+ mac = ether_addr_to_u64(ha->addr);
memcpy(entry->mac, ha->addr, ETH_ALEN);
err = mlx4_register_mac(mdev->dev, priv->port, mac);
if (err < 0) {
@@ -1348,7 +1348,7 @@ static void mlx4_en_delete_rss_steer_rules(struct mlx4_en_priv *priv)
for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
bucket = &priv->mac_hash[i];
hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
- mac = mlx4_mac_to_u64(entry->mac);
+ mac = ether_addr_to_u64(entry->mac);
en_dbg(DRV, priv, "Registering MAC:%pM for deleting\n",
entry->mac);
mlx4_en_uc_steer_release(priv, entry->mac,
@@ -3267,7 +3267,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
/* Set default MAC */
dev->addr_len = ETH_ALEN;
- mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]);
+ mlx4_en_u64_to_mac(dev, mdev->dev->caps.def_mac[priv->port]);
if (!is_valid_ether_addr(dev->dev_addr)) {
en_err(priv, "Port: %d, invalid mac burned: %pM, quitting\n",
priv->port, dev->dev_addr);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index 0158b88bea5b..532997eba698 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -244,6 +244,8 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
priv->port_stats.rx_chksum_complete = 0;
priv->port_stats.rx_alloc_pages = 0;
priv->xdp_stats.rx_xdp_drop = 0;
+ priv->xdp_stats.rx_xdp_redirect = 0;
+ priv->xdp_stats.rx_xdp_redirect_fail = 0;
priv->xdp_stats.rx_xdp_tx = 0;
priv->xdp_stats.rx_xdp_tx_full = 0;
for (i = 0; i < priv->rx_ring_num; i++) {
@@ -255,6 +257,8 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
priv->port_stats.rx_chksum_complete += READ_ONCE(ring->csum_complete);
priv->port_stats.rx_alloc_pages += READ_ONCE(ring->rx_alloc_pages);
priv->xdp_stats.rx_xdp_drop += READ_ONCE(ring->xdp_drop);
+ priv->xdp_stats.rx_xdp_redirect += READ_ONCE(ring->xdp_redirect);
+ priv->xdp_stats.rx_xdp_redirect_fail += READ_ONCE(ring->xdp_redirect_fail);
priv->xdp_stats.rx_xdp_tx += READ_ONCE(ring->xdp_tx);
priv->xdp_stats.rx_xdp_tx_full += READ_ONCE(ring->xdp_tx_full);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 7f6d3b82c29b..650e6a1844ae 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -669,6 +669,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
struct bpf_prog *xdp_prog;
int cq_ring = cq->ring;
bool doorbell_pending;
+ bool xdp_redir_flush;
struct mlx4_cqe *cqe;
struct xdp_buff xdp;
int polled = 0;
@@ -682,6 +683,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
xdp_prog = rcu_dereference_bh(ring->xdp_prog);
xdp_init_buff(&xdp, priv->frag_info[0].frag_stride, &ring->xdp_rxq);
doorbell_pending = false;
+ xdp_redir_flush = false;
/* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
* descriptor offset can be deduced from the CQE index instead of
@@ -790,6 +792,16 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
switch (act) {
case XDP_PASS:
break;
+ case XDP_REDIRECT:
+ if (likely(!xdp_do_redirect(dev, &xdp, xdp_prog))) {
+ ring->xdp_redirect++;
+ xdp_redir_flush = true;
+ frags[0].page = NULL;
+ goto next;
+ }
+ ring->xdp_redirect_fail++;
+ trace_xdp_exception(dev, xdp_prog, act);
+ goto xdp_drop_no_cnt;
case XDP_TX:
if (likely(!mlx4_en_xmit_frame(ring, frags, priv,
length, cq_ring,
@@ -897,6 +909,9 @@ next:
break;
}
+ if (xdp_redir_flush)
+ xdp_do_flush();
+
if (likely(polled)) {
if (doorbell_pending) {
priv->tx_cq[TX_XDP][cq_ring]->xdp_busy = true;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index c56b9dba4c71..817f4154b86d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -130,6 +130,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
ring->bf_enabled = !!(priv->pflags &
MLX4_EN_PRIV_FLAGS_BLUEFLAME);
}
+ ring->doorbell_address = ring->bf.uar->map + MLX4_SEND_DOORBELL;
ring->hwtstamp_tx_type = priv->hwtstamp_config.tx_type;
ring->queue_index = queue_index;
@@ -753,8 +754,7 @@ void mlx4_en_xmit_doorbell(struct mlx4_en_tx_ring *ring)
#else
iowrite32be(
#endif
- (__force u32)ring->doorbell_qpn,
- ring->bf.uar->map + MLX4_SEND_DOORBELL);
+ (__force u32)ring->doorbell_qpn, ring->doorbell_address);
}
static void mlx4_en_tx_write_desc(struct mlx4_en_tx_ring *ring,
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index dc4ac1a2b6b6..42c96c9d7fb1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -3105,7 +3105,7 @@ void mlx4_replace_zero_macs(struct mlx4_dev *dev)
dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH) {
eth_random_addr(mac_addr);
dev->port_random_macs |= 1 << i;
- dev->caps.def_mac[i] = mlx4_mac_to_u64(mac_addr);
+ dev->caps.def_mac[i] = ether_addr_to_u64(mac_addr);
}
}
EXPORT_SYMBOL_GPL(mlx4_replace_zero_macs);
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 5a6b0fcaf7f8..b187c210d4d6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -4015,9 +4015,6 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
mutex_init(&dev->persist->interface_state_mutex);
mutex_init(&dev->persist->pci_status_mutex);
- ret = devlink_register(devlink);
- if (ret)
- goto err_persist_free;
ret = devlink_params_register(devlink, mlx4_devlink_params,
ARRAY_SIZE(mlx4_devlink_params));
if (ret)
@@ -4027,17 +4024,15 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
goto err_params_unregister;
- devlink_params_publish(devlink);
- devlink_reload_enable(devlink);
pci_save_state(pdev);
+ devlink_set_features(devlink, DEVLINK_F_RELOAD);
+ devlink_register(devlink);
return 0;
err_params_unregister:
devlink_params_unregister(devlink, mlx4_devlink_params,
ARRAY_SIZE(mlx4_devlink_params));
err_devlink_unregister:
- devlink_unregister(devlink);
-err_persist_free:
kfree(dev->persist);
err_devlink_free:
devlink_free(devlink);
@@ -4140,7 +4135,7 @@ static void mlx4_remove_one(struct pci_dev *pdev)
struct devlink *devlink = priv_to_devlink(priv);
int active_vfs = 0;
- devlink_reload_disable(devlink);
+ devlink_unregister(devlink);
if (mlx4_is_slave(dev))
persist->interface_state |= MLX4_INTERFACE_STATE_NOWAIT;
@@ -4176,7 +4171,6 @@ static void mlx4_remove_one(struct pci_dev *pdev)
mlx4_pci_disable_device(dev);
devlink_params_unregister(devlink, mlx4_devlink_params,
ARRAY_SIZE(mlx4_devlink_params));
- devlink_unregister(devlink);
kfree(dev->persist);
devlink_free(devlink);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index f1b4ad9c66d2..f1716a83a4d3 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -1046,7 +1046,7 @@ int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id)
}
EXPORT_SYMBOL_GPL(mlx4_flow_detach);
-int mlx4_tunnel_steer_add(struct mlx4_dev *dev, unsigned char *addr,
+int mlx4_tunnel_steer_add(struct mlx4_dev *dev, const unsigned char *addr,
int port, int qpn, u16 prio, u64 *reg_id)
{
int err;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 6bf558c5ec10..e132ff4c82f2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -283,6 +283,7 @@ struct mlx4_en_tx_ring {
struct mlx4_bf bf;
/* Following part should be mostly read */
+ void __iomem *doorbell_address;
__be32 doorbell_qpn;
__be32 mr_key;
u32 size; /* number of TXBBs */
@@ -340,6 +341,8 @@ struct mlx4_en_rx_ring {
unsigned long csum_complete;
unsigned long rx_alloc_pages;
unsigned long xdp_drop;
+ unsigned long xdp_redirect;
+ unsigned long xdp_redirect_fail;
unsigned long xdp_tx;
unsigned long xdp_tx_full;
unsigned long dropped;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
index 7b51ae8cf759..e9cd4bb6f83d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
@@ -42,9 +42,11 @@ struct mlx4_en_port_stats {
struct mlx4_en_xdp_stats {
unsigned long rx_xdp_drop;
+ unsigned long rx_xdp_redirect;
+ unsigned long rx_xdp_redirect_fail;
unsigned long rx_xdp_tx;
unsigned long rx_xdp_tx_full;
-#define NUM_XDP_STATS 3
+#define NUM_XDP_STATS 5
};
struct mlx4_en_phy_stats {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 63032cd6efb1..e63bb9ceb9c0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -14,10 +14,10 @@ obj-$(CONFIG_MLX5_CORE) += mlx5_core.o
mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
health.o mcg.o cq.o alloc.o port.o mr.o pd.o \
transobj.o vport.o sriov.o fs_cmd.o fs_core.o pci_irq.o \
- fs_counters.o fs_ft_pool.o rl.o lag.o dev.o events.o wq.o lib/gid.o \
+ fs_counters.o fs_ft_pool.o rl.o lag/lag.o dev.o events.o wq.o lib/gid.o \
lib/devcom.o lib/pci_vsc.o lib/dm.o lib/fs_ttc.o diag/fs_tracepoint.o \
diag/fw_tracer.o diag/crdump.o devlink.o diag/rsc_dump.o \
- fw_reset.o qos.o
+ fw_reset.o qos.o lib/tout.o
#
# Netdev basic
@@ -37,7 +37,7 @@ mlx5_core-$(CONFIG_MLX5_EN_ARFS) += en_arfs.o
mlx5_core-$(CONFIG_MLX5_EN_RXNFC) += en_fs_ethtool.o
mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o en/port_buffer.o
mlx5_core-$(CONFIG_PCI_HYPERV_INTERFACE) += en/hv_vhca_stats.o
-mlx5_core-$(CONFIG_MLX5_ESWITCH) += lag_mp.o lib/geneve.o lib/port_tun.o \
+mlx5_core-$(CONFIG_MLX5_ESWITCH) += lag/mp.o lag/port_sel.o lib/geneve.o lib/port_tun.o \
en_rep.o en/rep/bond.o en/mod_hdr.o \
en/mapping.o
mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en_tc.o en/rep/tc.o en/rep/neigh.o \
@@ -45,7 +45,7 @@ mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en_tc.o en/rep/tc.o en/rep/neigh.o \
esw/indir_table.o en/tc_tun_encap.o \
en/tc_tun_vxlan.o en/tc_tun_gre.o en/tc_tun_geneve.o \
en/tc_tun_mplsoudp.o diag/en_tc_tracepoint.o \
- en/tc/post_act.o
+ en/tc/post_act.o en/tc/int_port.o
mlx5_core-$(CONFIG_MLX5_TC_CT) += en/tc_ct.o
mlx5_core-$(CONFIG_MLX5_TC_SAMPLE) += en/tc/sample.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index db5dfff585c9..f71ec4d9d68e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -45,6 +45,7 @@
#include "mlx5_core.h"
#include "lib/eq.h"
+#include "lib/tout.h"
enum {
CMD_IF_REV = 5,
@@ -225,9 +226,13 @@ static void set_signature(struct mlx5_cmd_work_ent *ent, int csum)
static void poll_timeout(struct mlx5_cmd_work_ent *ent)
{
- unsigned long poll_end = jiffies + msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC + 1000);
+ struct mlx5_core_dev *dev = container_of(ent->cmd, struct mlx5_core_dev, cmd);
+ u64 cmd_to_ms = mlx5_tout_ms(dev, CMD);
+ unsigned long poll_end;
u8 own;
+ poll_end = jiffies + msecs_to_jiffies(cmd_to_ms + 1000);
+
do {
own = READ_ONCE(ent->lay->status_own);
if (!(own & CMD_OWNER_HW)) {
@@ -925,15 +930,18 @@ static void cmd_work_handler(struct work_struct *work)
{
struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
struct mlx5_cmd *cmd = ent->cmd;
- struct mlx5_core_dev *dev = container_of(cmd, struct mlx5_core_dev, cmd);
- unsigned long cb_timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
+ bool poll_cmd = ent->polling;
struct mlx5_cmd_layout *lay;
+ struct mlx5_core_dev *dev;
+ unsigned long cb_timeout;
struct semaphore *sem;
unsigned long flags;
- bool poll_cmd = ent->polling;
int alloc_ret;
int cmd_mode;
+ dev = container_of(cmd, struct mlx5_core_dev, cmd);
+ cb_timeout = msecs_to_jiffies(mlx5_tout_ms(dev, CMD));
+
complete(&ent->handling);
sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
down(sem);
@@ -1073,7 +1081,7 @@ static void wait_func_handle_exec_timeout(struct mlx5_core_dev *dev,
static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
{
- unsigned long timeout = msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC);
+ unsigned long timeout = msecs_to_jiffies(mlx5_tout_ms(dev, CMD));
struct mlx5_cmd *cmd = &dev->cmd;
int err;
@@ -2058,7 +2066,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
return -EINVAL;
}
- cmd->stats = kvzalloc(MLX5_CMD_OP_MAX * sizeof(*cmd->stats), GFP_KERNEL);
+ cmd->stats = kvcalloc(MLX5_CMD_OP_MAX, sizeof(*cmd->stats), GFP_KERNEL);
if (!cmd->stats)
return -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index e8093c4e09d4..a8b84d53dfb0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -33,6 +33,7 @@
#include <linux/mlx5/driver.h>
#include <linux/mlx5/eswitch.h>
#include <linux/mlx5/mlx5_ifc_vdpa.h>
+#include <linux/mlx5/vport.h>
#include "mlx5_core.h"
/* intf dev list mutex */
@@ -537,6 +538,16 @@ int mlx5_rescan_drivers_locked(struct mlx5_core_dev *dev)
return add_drivers(dev);
}
+static bool mlx5_same_hw_devs(struct mlx5_core_dev *dev, struct mlx5_core_dev *peer_dev)
+{
+ u64 fsystem_guid, psystem_guid;
+
+ fsystem_guid = mlx5_query_nic_system_image_guid(dev);
+ psystem_guid = mlx5_query_nic_system_image_guid(peer_dev);
+
+ return (fsystem_guid && psystem_guid && fsystem_guid == psystem_guid);
+}
+
static u32 mlx5_gen_pci_id(const struct mlx5_core_dev *dev)
{
return (u32)((pci_domain_nr(dev->pdev->bus) << 16) |
@@ -556,7 +567,8 @@ static int next_phys_dev(struct device *dev, const void *data)
if (mdev == curr)
return 0;
- if (mlx5_gen_pci_id(mdev) != mlx5_gen_pci_id(curr))
+ if (!mlx5_same_hw_devs(mdev, (struct mlx5_core_dev *)curr) &&
+ mlx5_gen_pci_id(mdev) != mlx5_gen_pci_id(curr))
return 0;
return 1;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index dcf9f27ba2ef..1c98652b244a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -136,6 +136,7 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,
struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
+ struct pci_dev *pdev = dev->pdev;
bool sf_dev_allocated;
sf_dev_allocated = mlx5_sf_dev_allocated(dev);
@@ -153,6 +154,10 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,
return -EOPNOTSUPP;
}
+ if (pci_num_vf(pdev)) {
+ NL_SET_ERR_MSG_MOD(extack, "reload while VFs are present is unfavorable");
+ }
+
switch (action) {
case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
mlx5_unload_one(dev);
@@ -449,7 +454,8 @@ static int mlx5_devlink_enable_roce_validate(struct devlink *devlink, u32 id,
struct mlx5_core_dev *dev = devlink_priv(devlink);
bool new_state = val.vbool;
- if (new_state && !MLX5_CAP_GEN(dev, roce)) {
+ if (new_state && !MLX5_CAP_GEN(dev, roce) &&
+ !MLX5_CAP_GEN(dev, roce_rw_supported)) {
NL_SET_ERR_MSG_MOD(extack, "Device doesn't support RoCE");
return -EOPNOTSUPP;
}
@@ -625,7 +631,6 @@ static int mlx5_devlink_eth_param_register(struct devlink *devlink)
devlink_param_driverinit_value_set(devlink,
DEVLINK_PARAM_GENERIC_ID_ENABLE_ETH,
value);
- devlink_param_publish(devlink, &enable_eth_param);
return 0;
}
@@ -636,7 +641,6 @@ static void mlx5_devlink_eth_param_unregister(struct devlink *devlink)
if (!mlx5_eth_supported(dev))
return;
- devlink_param_unpublish(devlink, &enable_eth_param);
devlink_param_unregister(devlink, &enable_eth_param);
}
@@ -672,7 +676,6 @@ static int mlx5_devlink_rdma_param_register(struct devlink *devlink)
devlink_param_driverinit_value_set(devlink,
DEVLINK_PARAM_GENERIC_ID_ENABLE_RDMA,
value);
- devlink_param_publish(devlink, &enable_rdma_param);
return 0;
}
@@ -681,7 +684,6 @@ static void mlx5_devlink_rdma_param_unregister(struct devlink *devlink)
if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND))
return;
- devlink_param_unpublish(devlink, &enable_rdma_param);
devlink_param_unregister(devlink, &enable_rdma_param);
}
@@ -706,7 +708,6 @@ static int mlx5_devlink_vnet_param_register(struct devlink *devlink)
devlink_param_driverinit_value_set(devlink,
DEVLINK_PARAM_GENERIC_ID_ENABLE_VNET,
value);
- devlink_param_publish(devlink, &enable_rdma_param);
return 0;
}
@@ -717,7 +718,6 @@ static void mlx5_devlink_vnet_param_unregister(struct devlink *devlink)
if (!mlx5_vnet_supported(dev))
return;
- devlink_param_unpublish(devlink, &enable_vnet_param);
devlink_param_unregister(devlink, &enable_vnet_param);
}
@@ -797,18 +797,15 @@ static void mlx5_devlink_traps_unregister(struct devlink *devlink)
int mlx5_devlink_register(struct devlink *devlink)
{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
int err;
- err = devlink_register(devlink);
- if (err)
- return err;
-
err = devlink_params_register(devlink, mlx5_devlink_params,
ARRAY_SIZE(mlx5_devlink_params));
if (err)
- goto params_reg_err;
+ return err;
+
mlx5_devlink_set_params_init_values(devlink);
- devlink_params_publish(devlink);
err = mlx5_devlink_auxdev_params_register(devlink);
if (err)
@@ -818,6 +815,9 @@ int mlx5_devlink_register(struct devlink *devlink)
if (err)
goto traps_reg_err;
+ if (!mlx5_core_is_mp_slave(dev))
+ devlink_set_features(devlink, DEVLINK_F_RELOAD);
+
return 0;
traps_reg_err:
@@ -825,8 +825,6 @@ traps_reg_err:
auxdev_reg_err:
devlink_params_unregister(devlink, mlx5_devlink_params,
ARRAY_SIZE(mlx5_devlink_params));
-params_reg_err:
- devlink_unregister(devlink);
return err;
}
@@ -834,8 +832,6 @@ void mlx5_devlink_unregister(struct devlink *devlink)
{
mlx5_devlink_traps_unregister(devlink);
mlx5_devlink_auxdev_params_unregister(devlink);
- devlink_params_unpublish(devlink);
devlink_params_unregister(devlink, mlx5_devlink_params,
ARRAY_SIZE(mlx5_devlink_params));
- devlink_unregister(devlink);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
index 87d65f6b5310..7841ef6c193c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c
@@ -235,6 +235,9 @@ const char *parse_fs_dst(struct trace_seq *p,
const char *ret = trace_seq_buffer_ptr(p);
switch (dst->type) {
+ case MLX5_FLOW_DESTINATION_TYPE_UPLINK:
+ trace_seq_printf(p, "uplink\n");
+ break;
case MLX5_FLOW_DESTINATION_TYPE_VPORT:
trace_seq_printf(p, "vport=%u\n", dst->vport.num);
break;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
index f9cf9fb31547..eae9aa9c0811 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
@@ -745,7 +745,7 @@ static int mlx5_fw_tracer_set_mtrc_conf(struct mlx5_fw_tracer *tracer)
MLX5_SET(mtrc_conf, in, trace_mode, TRACE_TO_MEMORY);
MLX5_SET(mtrc_conf, in, log_trace_buffer_size,
ilog2(TRACER_BUFFER_PAGE_NUM));
- MLX5_SET(mtrc_conf, in, trace_mkey, tracer->buff.mkey.key);
+ MLX5_SET(mtrc_conf, in, trace_mkey, tracer->buff.mkey);
err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out),
MLX5_REG_MTRC_CONF, 0, 1);
@@ -1028,7 +1028,7 @@ int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer)
err_notifier_unregister:
mlx5_eq_notifier_unregister(dev, &tracer->nb);
- mlx5_core_destroy_mkey(dev, &tracer->buff.mkey);
+ mlx5_core_destroy_mkey(dev, tracer->buff.mkey);
err_dealloc_pd:
mlx5_core_dealloc_pd(dev, tracer->buff.pdn);
err_cancel_work:
@@ -1051,7 +1051,7 @@ void mlx5_fw_tracer_cleanup(struct mlx5_fw_tracer *tracer)
if (tracer->owner)
mlx5_fw_tracer_ownership_release(tracer);
- mlx5_core_destroy_mkey(tracer->dev, &tracer->buff.mkey);
+ mlx5_core_destroy_mkey(tracer->dev, tracer->buff.mkey);
mlx5_core_dealloc_pd(tracer->dev, tracer->buff.pdn);
}
@@ -1069,7 +1069,6 @@ void mlx5_fw_tracer_destroy(struct mlx5_fw_tracer *tracer)
mlx5_fw_tracer_clean_saved_traces_array(tracer);
mlx5_fw_tracer_free_strings_db(tracer);
mlx5_fw_tracer_destroy_log_buf(tracer);
- flush_workqueue(tracer->work_queue);
destroy_workqueue(tracer->work_queue);
kvfree(tracer);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h
index 97252a85d65e..4762b55b0b0e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h
@@ -89,7 +89,7 @@ struct mlx5_fw_tracer {
void *log_buf;
dma_addr_t dma;
u32 size;
- struct mlx5_core_mkey mkey;
+ u32 mkey;
u32 consumer_index;
} buff;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c
index ed4fb79b4db7..538adab6878b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/rsc_dump.c
@@ -30,7 +30,7 @@ static const char *const mlx5_rsc_sgmt_name[] = {
struct mlx5_rsc_dump {
u32 pdn;
- struct mlx5_core_mkey mkey;
+ u32 mkey;
u16 fw_segment_type[MLX5_SGMT_TYPE_NUM];
};
@@ -89,7 +89,7 @@ static int mlx5_rsc_dump_trigger(struct mlx5_core_dev *dev, struct mlx5_rsc_dump
return -ENOMEM;
in_seq_num = MLX5_GET(resource_dump, cmd->cmd, seq_num);
- MLX5_SET(resource_dump, cmd->cmd, mkey, rsc_dump->mkey.key);
+ MLX5_SET(resource_dump, cmd->cmd, mkey, rsc_dump->mkey);
MLX5_SET64(resource_dump, cmd->cmd, address, dma);
err = mlx5_core_access_reg(dev, cmd->cmd, sizeof(cmd->cmd), cmd->cmd,
@@ -202,7 +202,7 @@ free_page:
}
static int mlx5_rsc_dump_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
- struct mlx5_core_mkey *mkey)
+ u32 *mkey)
{
int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
void *mkc;
@@ -276,7 +276,7 @@ int mlx5_rsc_dump_init(struct mlx5_core_dev *dev)
return err;
destroy_mkey:
- mlx5_core_destroy_mkey(dev, &rsc_dump->mkey);
+ mlx5_core_destroy_mkey(dev, rsc_dump->mkey);
free_pd:
mlx5_core_dealloc_pd(dev, rsc_dump->pdn);
return err;
@@ -287,6 +287,6 @@ void mlx5_rsc_dump_cleanup(struct mlx5_core_dev *dev)
if (IS_ERR_OR_NULL(dev->rsc_dump))
return;
- mlx5_core_destroy_mkey(dev, &dev->rsc_dump->mkey);
+ mlx5_core_destroy_mkey(dev, dev->rsc_dump->mkey);
mlx5_core_dealloc_pd(dev, dev->rsc_dump->pdn);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 03a7a4ce5cd5..f0ac6b0d9653 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -79,6 +79,11 @@ struct page_pool;
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
#define MLX5E_RX_MAX_HEAD (256)
+#define MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE (9)
+#define MLX5E_SHAMPO_WQ_HEADER_PER_PAGE (PAGE_SIZE >> MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE)
+#define MLX5E_SHAMPO_WQ_BASE_HEAD_ENTRY_SIZE (64)
+#define MLX5E_SHAMPO_WQ_RESRV_SIZE (64 * 1024)
+#define MLX5E_SHAMPO_WQ_BASE_RESRV_SIZE (4096)
#define MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev) \
(6 + MLX5_CAP_GEN(mdev, cache_line_128byte)) /* HW restriction */
@@ -152,6 +157,25 @@ struct page_pool;
#define MLX5E_UMR_WQEBBS \
(DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ, MLX5_SEND_WQE_BB))
+#define MLX5E_KLM_UMR_WQE_SZ(sgl_len)\
+ (sizeof(struct mlx5e_umr_wqe) +\
+ (sizeof(struct mlx5_klm) * (sgl_len)))
+
+#define MLX5E_KLM_UMR_WQEBBS(klm_entries) \
+ (DIV_ROUND_UP(MLX5E_KLM_UMR_WQE_SZ(klm_entries), MLX5_SEND_WQE_BB))
+
+#define MLX5E_KLM_UMR_DS_CNT(klm_entries)\
+ (DIV_ROUND_UP(MLX5E_KLM_UMR_WQE_SZ(klm_entries), MLX5_SEND_WQE_DS))
+
+#define MLX5E_KLM_MAX_ENTRIES_PER_WQE(wqe_size)\
+ (((wqe_size) - sizeof(struct mlx5e_umr_wqe)) / sizeof(struct mlx5_klm))
+
+#define MLX5E_KLM_ENTRIES_PER_WQE(wqe_size)\
+ ALIGN_DOWN(MLX5E_KLM_MAX_ENTRIES_PER_WQE(wqe_size), MLX5_UMR_KLM_ALIGNMENT)
+
+#define MLX5E_MAX_KLM_PER_WQE(mdev) \
+ MLX5E_KLM_ENTRIES_PER_WQE(MLX5E_TX_MPW_MAX_NUM_DS << MLX5_MKEY_BSF_OCTO_SIZE)
+
#define MLX5E_MSG_LEVEL NETIF_MSG_LINK
#define mlx5e_dbg(mlevel, priv, format, ...) \
@@ -217,11 +241,12 @@ struct mlx5e_umr_wqe {
struct mlx5_wqe_ctrl_seg ctrl;
struct mlx5_wqe_umr_ctrl_seg uctrl;
struct mlx5_mkey_seg mkc;
- struct mlx5_mtt inline_mtts[0];
+ union {
+ struct mlx5_mtt inline_mtts[0];
+ struct mlx5_klm inline_klms[0];
+ };
};
-extern const char mlx5e_self_tests[][ETH_GSTRING_LEN];
-
enum mlx5e_priv_flag {
MLX5E_PFLAG_RX_CQE_BASED_MODER,
MLX5E_PFLAG_TX_CQE_BASED_MODER,
@@ -244,6 +269,21 @@ enum mlx5e_priv_flag {
#define MLX5E_GET_PFLAG(params, pflag) (!!((params)->pflags & (BIT(pflag))))
+enum packet_merge {
+ MLX5E_PACKET_MERGE_NONE,
+ MLX5E_PACKET_MERGE_LRO,
+ MLX5E_PACKET_MERGE_SHAMPO,
+};
+
+struct mlx5e_packet_merge_param {
+ enum packet_merge type;
+ u32 timeout;
+ struct {
+ u8 match_criteria_type;
+ u8 alignment_granularity;
+ } shampo;
+};
+
struct mlx5e_params {
u8 log_sq_size;
u8 rq_wq_type;
@@ -253,18 +293,20 @@ struct mlx5e_params {
u16 mode;
u8 num_tc;
struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
+ struct {
+ struct mlx5e_mqprio_rl *rl;
+ } channel;
} mqprio;
bool rx_cqe_compress_def;
bool tunneled_offload_en;
struct dim_cq_moder rx_cq_moderation;
struct dim_cq_moder tx_cq_moderation;
- bool lro_en;
+ struct mlx5e_packet_merge_param packet_merge;
u8 tx_min_inline_mode;
bool vlan_strip_disable;
bool scatter_fcs_en;
bool rx_dim_enabled;
bool tx_dim_enabled;
- u32 lro_timeout;
u32 pflags;
struct bpf_prog *xdp_prog;
struct mlx5e_xsk *xsk;
@@ -286,7 +328,8 @@ enum {
MLX5E_RQ_STATE_NO_CSUM_COMPLETE,
MLX5E_RQ_STATE_CSUM_FULL, /* cqe_csum_full hw bit is set */
MLX5E_RQ_STATE_FPGA_TLS, /* FPGA TLS enabled */
- MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX /* set when mini_cqe_resp_stride_index cap is used */
+ MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, /* set when mini_cqe_resp_stride_index cap is used */
+ MLX5E_RQ_STATE_SHAMPO, /* set when SHAMPO cap is used */
};
struct mlx5e_cq {
@@ -577,6 +620,7 @@ typedef struct sk_buff *
struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt);
typedef bool (*mlx5e_fp_post_rx_wqes)(struct mlx5e_rq *rq);
typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16);
+typedef void (*mlx5e_fp_shampo_dealloc_hd)(struct mlx5e_rq*, u16, u16, bool);
int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool xsk);
void mlx5e_rq_set_trap_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params);
@@ -598,6 +642,25 @@ struct mlx5e_rq_frags_info {
u8 wqe_bulk;
};
+struct mlx5e_shampo_hd {
+ u32 mkey;
+ struct mlx5e_dma_info *info;
+ struct page *last_page;
+ u16 hd_per_wq;
+ u16 hd_per_wqe;
+ unsigned long *bitmap;
+ u16 pi;
+ u16 ci;
+ __be32 key;
+ u64 last_addr;
+};
+
+struct mlx5e_hw_gro_data {
+ struct sk_buff *skb;
+ struct flow_keys fk;
+ int second_ip_id;
+};
+
struct mlx5e_rq {
/* data path */
union {
@@ -619,6 +682,7 @@ struct mlx5e_rq {
u8 umr_in_progress;
u8 umr_last_bulk;
u8 umr_completed;
+ struct mlx5e_shampo_hd *shampo;
} mpwqe;
};
struct {
@@ -638,6 +702,8 @@ struct mlx5e_rq {
struct mlx5e_icosq *icosq;
struct mlx5e_priv *priv;
+ struct mlx5e_hw_gro_data *hw_gro_data;
+
mlx5e_fp_handle_rx_cqe handle_rx_cqe;
mlx5e_fp_post_rx_wqes post_wqes;
mlx5e_fp_dealloc_wqe dealloc_wqe;
@@ -665,7 +731,7 @@ struct mlx5e_rq {
u8 wq_type;
u32 rqn;
struct mlx5_core_dev *mdev;
- struct mlx5_core_mkey umr_mkey;
+ u32 umr_mkey;
struct mlx5e_dma_info wqe_overflow;
/* XDP read-mostly */
@@ -879,11 +945,13 @@ struct mlx5e_priv {
#endif
struct mlx5e_scratchpad scratchpad;
struct mlx5e_htb htb;
+ struct mlx5e_mqprio_rl *mqprio_rl;
};
struct mlx5e_rx_handlers {
mlx5e_fp_handle_rx_cqe handle_rx_cqe;
mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe;
+ mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe_shampo;
};
extern const struct mlx5e_rx_handlers mlx5e_rx_handlers_nic;
@@ -913,11 +981,13 @@ void mlx5e_build_ptys2ethtool_map(void);
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev);
+void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq, u16 len, u16 start, bool close);
void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s);
void mlx5e_init_l2_addr(struct mlx5e_priv *priv);
int mlx5e_self_test_num(struct mlx5e_priv *priv);
+int mlx5e_self_test_fill_strings(struct mlx5e_priv *priv, u8 *data);
void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest,
u64 *buf);
void mlx5e_set_rx_mode_work(struct work_struct *work);
@@ -1003,7 +1073,8 @@ int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn,
struct mlx5e_modify_sq_param *p);
int mlx5e_open_txqsq(struct mlx5e_channel *c, u32 tisn, int txq_ix,
struct mlx5e_params *params, struct mlx5e_sq_param *param,
- struct mlx5e_txqsq *sq, int tc, u16 qos_queue_group_id, u16 qos_qid);
+ struct mlx5e_txqsq *sq, int tc, u16 qos_queue_group_id,
+ struct mlx5e_sq_stats *sq_stats);
void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq);
void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq);
void mlx5e_free_txqsq(struct mlx5e_txqsq *sq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
index 86e079310ac3..ae52e7f38306 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c
@@ -24,7 +24,7 @@ int mlx5e_devlink_port_register(struct mlx5e_priv *priv)
if (mlx5_core_is_pf(priv->mdev)) {
attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
- attrs.phys.port_number = PCI_FUNC(priv->mdev->pdev->devfn);
+ attrs.phys.port_number = mlx5_get_dev_index(priv->mdev);
if (MLX5_ESWITCH_MANAGER(priv->mdev)) {
mlx5e_devlink_get_port_parent_id(priv->mdev, &ppid);
memcpy(attrs.switch_id.id, ppid.id, ppid.id_len);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
index 41684a6c44e9..678ffbb48a25 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
@@ -125,15 +125,15 @@ struct mlx5e_ethtool_steering {
void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv);
void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv);
-int mlx5e_ethtool_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd);
-int mlx5e_ethtool_get_rxnfc(struct net_device *dev,
+int mlx5e_ethtool_set_rxnfc(struct mlx5e_priv *priv, struct ethtool_rxnfc *cmd);
+int mlx5e_ethtool_get_rxnfc(struct mlx5e_priv *priv,
struct ethtool_rxnfc *info, u32 *rule_locs);
#else
static inline void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv) { }
static inline void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv) { }
-static inline int mlx5e_ethtool_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+static inline int mlx5e_ethtool_set_rxnfc(struct mlx5e_priv *priv, struct ethtool_rxnfc *cmd)
{ return -EOPNOTSUPP; }
-static inline int mlx5e_ethtool_get_rxnfc(struct net_device *dev,
+static inline int mlx5e_ethtool_get_rxnfc(struct mlx5e_priv *priv,
struct ethtool_rxnfc *info, u32 *rule_locs)
{ return -EOPNOTSUPP; }
#endif /* CONFIG_MLX5_EN_RXNFC */
@@ -199,6 +199,9 @@ void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv);
int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv);
+int mlx5e_fs_init(struct mlx5e_priv *priv);
+void mlx5e_fs_cleanup(struct mlx5e_priv *priv);
+
int mlx5e_add_vlan_trap(struct mlx5e_priv *priv, int trap_id, int tir_num);
void mlx5e_remove_vlan_trap(struct mlx5e_priv *priv);
int mlx5e_add_mac_trap(struct mlx5e_priv *priv, int trap_id, int tir_num);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h
index 018262d0164b..d5b7110a4265 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.h
@@ -32,7 +32,6 @@ void mlx5e_reporter_rq_cqe_err(struct mlx5e_rq *rq);
void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq);
#define MLX5E_REPORTER_PER_Q_MAX_LEN 256
-#define MLX5E_REPORTER_FLUSH_TIMEOUT_MSEC 2000
struct mlx5e_err_ctx {
int (*recover)(void *ctx);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
index 3cbb596821e8..f8c29022dbb2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c
@@ -87,7 +87,8 @@ bool mlx5e_rx_is_linear_skb(struct mlx5e_params *params,
u32 linear_frag_sz = max(mlx5e_rx_get_linear_frag_sz(params, xsk),
mlx5e_rx_get_linear_frag_sz(params, NULL));
- return !params->lro_en && linear_frag_sz <= PAGE_SIZE;
+ return params->packet_merge.type == MLX5E_PACKET_MERGE_NONE &&
+ linear_frag_sz <= PAGE_SIZE;
}
bool mlx5e_verify_rx_mpwqe_strides(struct mlx5_core_dev *mdev,
@@ -138,6 +139,27 @@ u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5e_params *params,
return params->log_rq_mtu_frames - log_pkts_per_wqe;
}
+u8 mlx5e_shampo_get_log_hd_entry_size(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params)
+{
+ return order_base_2(DIV_ROUND_UP(MLX5E_RX_MAX_HEAD, MLX5E_SHAMPO_WQ_BASE_HEAD_ENTRY_SIZE));
+}
+
+u8 mlx5e_shampo_get_log_rsrv_size(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params)
+{
+ return order_base_2(MLX5E_SHAMPO_WQ_RESRV_SIZE / MLX5E_SHAMPO_WQ_BASE_RESRV_SIZE);
+}
+
+u8 mlx5e_shampo_get_log_pkt_per_rsrv(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params)
+{
+ u32 resrv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) *
+ PAGE_SIZE;
+
+ return order_base_2(DIV_ROUND_UP(resrv_size, params->sw_mtu));
+}
+
u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk)
@@ -164,19 +186,8 @@ u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev,
mlx5e_rx_is_linear_skb(params, xsk) :
mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk);
- return is_linear_skb ? mlx5e_get_linear_rq_headroom(params, xsk) : 0;
-}
-
-struct mlx5e_lro_param mlx5e_get_lro_param(struct mlx5e_params *params)
-{
- struct mlx5e_lro_param lro_param;
-
- lro_param = (struct mlx5e_lro_param) {
- .enabled = params->lro_en,
- .timeout = params->lro_timeout,
- };
-
- return lro_param;
+ return is_linear_skb || params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO ?
+ mlx5e_get_linear_rq_headroom(params, xsk) : 0;
}
u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
@@ -453,6 +464,23 @@ static void mlx5e_build_common_cq_param(struct mlx5_core_dev *mdev,
MLX5_SET(cqc, cqc, cqe_sz, CQE_STRIDE_128_PAD);
}
+static u32 mlx5e_shampo_get_log_cq_size(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
+ struct mlx5e_xsk_param *xsk)
+{
+ int rsrv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) * PAGE_SIZE;
+ u16 num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk));
+ int pkt_per_rsrv = BIT(mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params));
+ u8 log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
+ int wq_size = BIT(mlx5e_mpwqe_get_log_rq_size(params, xsk));
+ int wqe_size = BIT(log_stride_sz) * num_strides;
+
+ /* +1 is for the case that the pkt_per_rsrv dont consume the reservation
+ * so we get a filler cqe for the rest of the reservation.
+ */
+ return order_base_2((wqe_size / rsrv_size) * wq_size * (pkt_per_rsrv + 1));
+}
+
static void mlx5e_build_rx_cq_param(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk,
@@ -464,9 +492,12 @@ static void mlx5e_build_rx_cq_param(struct mlx5_core_dev *mdev,
switch (params->rq_wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
- log_cq_size = mlx5e_mpwqe_get_log_rq_size(params, xsk) +
- mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk);
hw_stridx = MLX5_CAP_GEN(mdev, mini_cqe_resp_stride_index);
+ if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO)
+ log_cq_size = mlx5e_shampo_get_log_cq_size(mdev, params, xsk);
+ else
+ log_cq_size = mlx5e_mpwqe_get_log_rq_size(params, xsk) +
+ mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk);
break;
default: /* MLX5_WQ_TYPE_CYCLIC */
log_cq_size = params->log_rq_mtu_frames;
@@ -485,10 +516,11 @@ static void mlx5e_build_rx_cq_param(struct mlx5_core_dev *mdev,
static u8 rq_end_pad_mode(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
{
+ bool lro_en = params->packet_merge.type == MLX5E_PACKET_MERGE_LRO;
bool ro = pcie_relaxed_ordering_enabled(mdev->pdev) &&
MLX5_CAP_GEN(mdev, relaxed_ordering_write);
- return ro && params->lro_en ?
+ return ro && lro_en ?
MLX5_WQ_END_PAD_MODE_NONE : MLX5_WQ_END_PAD_MODE_ALIGN;
}
@@ -520,6 +552,22 @@ int mlx5e_build_rq_param(struct mlx5_core_dev *mdev,
MLX5_SET(wq, wq, log_wqe_stride_size,
log_wqe_stride_size - MLX5_MPWQE_LOG_STRIDE_SZ_BASE);
MLX5_SET(wq, wq, log_wq_sz, mlx5e_mpwqe_get_log_rq_size(params, xsk));
+ if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) {
+ MLX5_SET(wq, wq, shampo_enable, true);
+ MLX5_SET(wq, wq, log_reservation_size,
+ mlx5e_shampo_get_log_rsrv_size(mdev, params));
+ MLX5_SET(wq, wq,
+ log_max_num_of_packets_per_reservation,
+ mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params));
+ MLX5_SET(wq, wq, log_headers_entry_size,
+ mlx5e_shampo_get_log_hd_entry_size(mdev, params));
+ MLX5_SET(rqc, rqc, reservation_timeout,
+ params->packet_merge.timeout);
+ MLX5_SET(rqc, rqc, shampo_match_criteria_type,
+ params->packet_merge.shampo.match_criteria_type);
+ MLX5_SET(rqc, rqc, shampo_no_match_alignment_granularity,
+ params->packet_merge.shampo.alignment_granularity);
+ }
break;
}
default: /* MLX5_WQ_TYPE_CYCLIC */
@@ -620,17 +668,80 @@ static u8 mlx5e_get_rq_log_wq_sz(void *rqc)
return MLX5_GET(wq, wq, log_wq_sz);
}
-static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5e_params *params,
+/* This function calculates the maximum number of headers entries that are needed
+ * per WQE, the formula is based on the size of the reservations and the
+ * restriction we have about max packets for reservation that is equal to max
+ * headers per reservation.
+ */
+u32 mlx5e_shampo_hd_per_wqe(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
+ struct mlx5e_rq_param *rq_param)
+{
+ int resv_size = BIT(mlx5e_shampo_get_log_rsrv_size(mdev, params)) * PAGE_SIZE;
+ u16 num_strides = BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, NULL));
+ int pkt_per_resv = BIT(mlx5e_shampo_get_log_pkt_per_rsrv(mdev, params));
+ u8 log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, NULL);
+ int wqe_size = BIT(log_stride_sz) * num_strides;
+ u32 hd_per_wqe;
+
+ /* Assumption: hd_per_wqe % 8 == 0. */
+ hd_per_wqe = (wqe_size / resv_size) * pkt_per_resv;
+ mlx5_core_dbg(mdev, "%s hd_per_wqe = %d rsrv_size = %d wqe_size = %d pkt_per_resv = %d\n",
+ __func__, hd_per_wqe, resv_size, wqe_size, pkt_per_resv);
+ return hd_per_wqe;
+}
+
+/* This function calculates the maximum number of headers entries that are needed
+ * for the WQ, this value is uesed to allocate the header buffer in HW, thus
+ * must be a pow of 2.
+ */
+u32 mlx5e_shampo_hd_per_wq(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
+ struct mlx5e_rq_param *rq_param)
+{
+ void *wqc = MLX5_ADDR_OF(rqc, rq_param->rqc, wq);
+ int wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz));
+ u32 hd_per_wqe, hd_per_wq;
+
+ hd_per_wqe = mlx5e_shampo_hd_per_wqe(mdev, params, rq_param);
+ hd_per_wq = roundup_pow_of_two(hd_per_wqe * wq_size);
+ return hd_per_wq;
+}
+
+static u32 mlx5e_shampo_icosq_sz(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
+ struct mlx5e_rq_param *rq_param)
+{
+ int max_num_of_umr_per_wqe, max_hd_per_wqe, max_klm_per_umr, rest;
+ void *wqc = MLX5_ADDR_OF(rqc, rq_param->rqc, wq);
+ int wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz));
+ u32 wqebbs;
+
+ max_klm_per_umr = MLX5E_MAX_KLM_PER_WQE(mdev);
+ max_hd_per_wqe = mlx5e_shampo_hd_per_wqe(mdev, params, rq_param);
+ max_num_of_umr_per_wqe = max_hd_per_wqe / max_klm_per_umr;
+ rest = max_hd_per_wqe % max_klm_per_umr;
+ wqebbs = MLX5E_KLM_UMR_WQEBBS(max_klm_per_umr) * max_num_of_umr_per_wqe;
+ if (rest)
+ wqebbs += MLX5E_KLM_UMR_WQEBBS(rest);
+ wqebbs *= wq_size;
+ return wqebbs;
+}
+
+static u8 mlx5e_build_icosq_log_wq_sz(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
struct mlx5e_rq_param *rqp)
{
- switch (params->rq_wq_type) {
- case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
- return max_t(u8, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE,
- order_base_2(MLX5E_UMR_WQEBBS) +
- mlx5e_get_rq_log_wq_sz(rqp->rqc));
- default: /* MLX5_WQ_TYPE_CYCLIC */
+ u32 wqebbs;
+
+ /* MLX5_WQ_TYPE_CYCLIC */
+ if (params->rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
return MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
- }
+
+ wqebbs = MLX5E_UMR_WQEBBS * BIT(mlx5e_get_rq_log_wq_sz(rqp->rqc));
+ if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO)
+ wqebbs += mlx5e_shampo_icosq_sz(mdev, params, rqp);
+ return max_t(u8, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE, order_base_2(wqebbs));
}
static u8 mlx5e_build_async_icosq_log_wq_sz(struct mlx5_core_dev *mdev)
@@ -697,7 +808,7 @@ int mlx5e_build_channel_param(struct mlx5_core_dev *mdev,
if (err)
return err;
- icosq_log_wq_sz = mlx5e_build_icosq_log_wq_sz(params, &cparam->rq);
+ icosq_log_wq_sz = mlx5e_build_icosq_log_wq_sz(mdev, params, &cparam->rq);
async_icosq_log_wq_sz = mlx5e_build_async_icosq_log_wq_sz(mdev);
mlx5e_build_sq_param(mdev, params, &cparam->txq_sq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
index 879ad46d754e..433e6967692d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h
@@ -11,11 +11,6 @@ struct mlx5e_xsk_param {
u16 chunk_size;
};
-struct mlx5e_lro_param {
- bool enabled;
- u32 timeout;
-};
-
struct mlx5e_cq_param {
u32 cqc[MLX5_ST_SZ_DW(cqc)];
struct mlx5_wq_param wq;
@@ -116,6 +111,18 @@ bool mlx5e_rx_mpwqe_is_linear_skb(struct mlx5_core_dev *mdev,
struct mlx5e_xsk_param *xsk);
u8 mlx5e_mpwqe_get_log_rq_size(struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk);
+u8 mlx5e_shampo_get_log_hd_entry_size(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params);
+u8 mlx5e_shampo_get_log_rsrv_size(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params);
+u8 mlx5e_shampo_get_log_pkt_per_rsrv(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params);
+u32 mlx5e_shampo_hd_per_wqe(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
+ struct mlx5e_rq_param *rq_param);
+u32 mlx5e_shampo_hd_per_wq(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
+ struct mlx5e_rq_param *rq_param);
u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk);
@@ -125,7 +132,6 @@ u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev,
u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk);
-struct mlx5e_lro_param mlx5e_get_lro_param(struct mlx5e_params *params);
/* Build queue parameters */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
index 3a86f66d1295..18d542b1c5cb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
@@ -682,7 +682,7 @@ int mlx5e_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
c->tstamp = &priv->tstamp;
c->pdev = mlx5_core_dma_dev(priv->mdev);
c->netdev = priv->netdev;
- c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey.key);
+ c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey);
c->num_tc = mlx5e_get_dcb_num_tc(params);
c->stats = &priv->ptp_stats.ch;
c->lag_port = lag_port;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
index e8a8d78e3e4d..50977f01a050 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
@@ -7,6 +7,21 @@
#define BYTES_IN_MBIT 125000
+int mlx5e_qos_bytes_rate_check(struct mlx5_core_dev *mdev, u64 nbytes)
+{
+ if (nbytes < BYTES_IN_MBIT) {
+ qos_warn(mdev, "Input rate (%llu Bytes/sec) below minimum supported (%u Bytes/sec)\n",
+ nbytes, BYTES_IN_MBIT);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static u32 mlx5e_qos_bytes2mbits(struct mlx5_core_dev *mdev, u64 nbytes)
+{
+ return div_u64(nbytes, BYTES_IN_MBIT);
+}
+
int mlx5e_qos_max_leaf_nodes(struct mlx5_core_dev *mdev)
{
return min(MLX5E_QOS_MAX_LEAF_NODES, mlx5_qos_max_leaf_nodes(mdev));
@@ -238,7 +253,8 @@ static int mlx5e_open_qos_sq(struct mlx5e_priv *priv, struct mlx5e_channels *chs
if (err)
goto err_free_sq;
err = mlx5e_open_txqsq(c, priv->tisn[c->lag_port][0], txq_ix, params,
- &param_sq, sq, 0, node->hw_id, node->qid);
+ &param_sq, sq, 0, node->hw_id,
+ priv->htb.qos_sq_stats[node->qid]);
if (err)
goto err_close_cq;
@@ -979,3 +995,87 @@ int mlx5e_htb_node_modify(struct mlx5e_priv *priv, u16 classid, u64 rate, u64 ce
return err;
}
+
+struct mlx5e_mqprio_rl {
+ struct mlx5_core_dev *mdev;
+ u32 root_id;
+ u32 *leaves_id;
+ u8 num_tc;
+};
+
+struct mlx5e_mqprio_rl *mlx5e_mqprio_rl_alloc(void)
+{
+ return kvzalloc(sizeof(struct mlx5e_mqprio_rl), GFP_KERNEL);
+}
+
+void mlx5e_mqprio_rl_free(struct mlx5e_mqprio_rl *rl)
+{
+ kvfree(rl);
+}
+
+int mlx5e_mqprio_rl_init(struct mlx5e_mqprio_rl *rl, struct mlx5_core_dev *mdev, u8 num_tc,
+ u64 max_rate[])
+{
+ int err;
+ int tc;
+
+ if (!mlx5_qos_is_supported(mdev)) {
+ qos_warn(mdev, "Missing QoS capabilities. Try disabling SRIOV or use a supported device.");
+ return -EOPNOTSUPP;
+ }
+ if (num_tc > mlx5e_qos_max_leaf_nodes(mdev))
+ return -EINVAL;
+
+ rl->mdev = mdev;
+ rl->num_tc = num_tc;
+ rl->leaves_id = kvcalloc(num_tc, sizeof(*rl->leaves_id), GFP_KERNEL);
+ if (!rl->leaves_id)
+ return -ENOMEM;
+
+ err = mlx5_qos_create_root_node(mdev, &rl->root_id);
+ if (err)
+ goto err_free_leaves;
+
+ qos_dbg(mdev, "Root created, id %#x\n", rl->root_id);
+
+ for (tc = 0; tc < num_tc; tc++) {
+ u32 max_average_bw;
+
+ max_average_bw = mlx5e_qos_bytes2mbits(mdev, max_rate[tc]);
+ err = mlx5_qos_create_leaf_node(mdev, rl->root_id, 0, max_average_bw,
+ &rl->leaves_id[tc]);
+ if (err)
+ goto err_destroy_leaves;
+
+ qos_dbg(mdev, "Leaf[%d] created, id %#x, max average bw %u Mbits/sec\n",
+ tc, rl->leaves_id[tc], max_average_bw);
+ }
+ return 0;
+
+err_destroy_leaves:
+ while (--tc >= 0)
+ mlx5_qos_destroy_node(mdev, rl->leaves_id[tc]);
+ mlx5_qos_destroy_node(mdev, rl->root_id);
+err_free_leaves:
+ kvfree(rl->leaves_id);
+ return err;
+}
+
+void mlx5e_mqprio_rl_cleanup(struct mlx5e_mqprio_rl *rl)
+{
+ int tc;
+
+ for (tc = 0; tc < rl->num_tc; tc++)
+ mlx5_qos_destroy_node(rl->mdev, rl->leaves_id[tc]);
+ mlx5_qos_destroy_node(rl->mdev, rl->root_id);
+ kvfree(rl->leaves_id);
+}
+
+int mlx5e_mqprio_rl_get_node_hw_id(struct mlx5e_mqprio_rl *rl, int tc, u32 *hw_id)
+{
+ if (tc >= rl->num_tc)
+ return -EINVAL;
+
+ *hw_id = rl->leaves_id[tc];
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h
index 757682b7c0e0..b7558907ba20 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h
@@ -12,6 +12,7 @@ struct mlx5e_priv;
struct mlx5e_channels;
struct mlx5e_channel;
+int mlx5e_qos_bytes_rate_check(struct mlx5_core_dev *mdev, u64 nbytes);
int mlx5e_qos_max_leaf_nodes(struct mlx5_core_dev *mdev);
int mlx5e_qos_cur_leaf_nodes(struct mlx5e_priv *priv);
@@ -41,4 +42,12 @@ int mlx5e_htb_leaf_del_last(struct mlx5e_priv *priv, u16 classid, bool force,
int mlx5e_htb_node_modify(struct mlx5e_priv *priv, u16 classid, u64 rate, u64 ceil,
struct netlink_ext_ack *extack);
+/* MQPRIO TX rate limit */
+struct mlx5e_mqprio_rl;
+struct mlx5e_mqprio_rl *mlx5e_mqprio_rl_alloc(void);
+void mlx5e_mqprio_rl_free(struct mlx5e_mqprio_rl *rl);
+int mlx5e_mqprio_rl_init(struct mlx5e_mqprio_rl *rl, struct mlx5_core_dev *mdev, u8 num_tc,
+ u64 max_rate[]);
+void mlx5e_mqprio_rl_cleanup(struct mlx5e_mqprio_rl *rl);
+int mlx5e_mqprio_rl_get_node_hw_id(struct mlx5e_mqprio_rl *rl, int tc, u32 *hw_id);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
index de03684528bb..fcb0892c08a9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
@@ -3,6 +3,7 @@
#include <net/dst_metadata.h>
#include <linux/netdevice.h>
+#include <linux/if_macvlan.h>
#include <linux/list.h>
#include <linux/rculist.h>
#include <linux/rtnetlink.h>
@@ -18,10 +19,13 @@
#include "en/tc_tun.h"
#include "lib/port_tun.h"
#include "en/tc/sample.h"
+#include "en_accel/ipsec_rxtx.h"
+#include "en/tc/int_port.h"
struct mlx5e_rep_indr_block_priv {
struct net_device *netdev;
struct mlx5e_rep_priv *rpriv;
+ enum flow_block_binder_type binder_type;
struct list_head list;
};
@@ -296,14 +300,16 @@ int mlx5e_rep_tc_event_port_affinity(struct mlx5e_priv *priv)
static struct mlx5e_rep_indr_block_priv *
mlx5e_rep_indr_block_priv_lookup(struct mlx5e_rep_priv *rpriv,
- struct net_device *netdev)
+ struct net_device *netdev,
+ enum flow_block_binder_type binder_type)
{
struct mlx5e_rep_indr_block_priv *cb_priv;
list_for_each_entry(cb_priv,
&rpriv->uplink_priv.tc_indr_block_priv_list,
list)
- if (cb_priv->netdev == netdev)
+ if (cb_priv->netdev == netdev &&
+ cb_priv->binder_type == binder_type)
return cb_priv;
return NULL;
@@ -341,9 +347,13 @@ mlx5e_rep_indr_offload(struct net_device *netdev,
static int mlx5e_rep_indr_setup_tc_cb(enum tc_setup_type type,
void *type_data, void *indr_priv)
{
- unsigned long flags = MLX5_TC_FLAG(EGRESS) | MLX5_TC_FLAG(ESW_OFFLOAD);
+ unsigned long flags = MLX5_TC_FLAG(ESW_OFFLOAD);
struct mlx5e_rep_indr_block_priv *priv = indr_priv;
+ flags |= (priv->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) ?
+ MLX5_TC_FLAG(EGRESS) :
+ MLX5_TC_FLAG(INGRESS);
+
switch (type) {
case TC_SETUP_CLSFLOWER:
return mlx5e_rep_indr_offload(priv->netdev, type_data, priv,
@@ -409,6 +419,13 @@ static void mlx5e_rep_indr_block_unbind(void *cb_priv)
static LIST_HEAD(mlx5e_block_cb_list);
+static bool mlx5e_rep_macvlan_mode_supported(const struct net_device *dev)
+{
+ struct macvlan_dev *macvlan = netdev_priv(dev);
+
+ return macvlan->mode == MACVLAN_MODE_PASSTHRU;
+}
+
static int
mlx5e_rep_indr_setup_block(struct net_device *netdev, struct Qdisc *sch,
struct mlx5e_rep_priv *rpriv,
@@ -418,14 +435,30 @@ mlx5e_rep_indr_setup_block(struct net_device *netdev, struct Qdisc *sch,
void (*cleanup)(struct flow_block_cb *block_cb))
{
struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ bool is_ovs_int_port = netif_is_ovs_master(netdev);
struct mlx5e_rep_indr_block_priv *indr_priv;
struct flow_block_cb *block_cb;
if (!mlx5e_tc_tun_device_to_offload(priv, netdev) &&
- !(is_vlan_dev(netdev) && vlan_dev_real_dev(netdev) == rpriv->netdev))
+ !(is_vlan_dev(netdev) && vlan_dev_real_dev(netdev) == rpriv->netdev) &&
+ !is_ovs_int_port) {
+ if (!(netif_is_macvlan(netdev) && macvlan_dev_real_dev(netdev) == rpriv->netdev))
+ return -EOPNOTSUPP;
+ if (!mlx5e_rep_macvlan_mode_supported(netdev)) {
+ netdev_warn(netdev, "Offloading ingress filter is supported only with macvlan passthru mode");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS &&
+ f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS)
return -EOPNOTSUPP;
- if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS && !is_ovs_int_port)
+ return -EOPNOTSUPP;
+
+ if (is_ovs_int_port && !mlx5e_tc_int_port_supported(esw))
return -EOPNOTSUPP;
f->unlocked_driver_cb = true;
@@ -433,7 +466,7 @@ mlx5e_rep_indr_setup_block(struct net_device *netdev, struct Qdisc *sch,
switch (f->command) {
case FLOW_BLOCK_BIND:
- indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev);
+ indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev, f->binder_type);
if (indr_priv)
return -EEXIST;
@@ -443,6 +476,7 @@ mlx5e_rep_indr_setup_block(struct net_device *netdev, struct Qdisc *sch,
indr_priv->netdev = netdev;
indr_priv->rpriv = rpriv;
+ indr_priv->binder_type = f->binder_type;
list_add(&indr_priv->list,
&rpriv->uplink_priv.tc_indr_block_priv_list);
@@ -460,7 +494,7 @@ mlx5e_rep_indr_setup_block(struct net_device *netdev, struct Qdisc *sch,
return 0;
case FLOW_BLOCK_UNBIND:
- indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev);
+ indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev, f->binder_type);
if (!indr_priv)
return -ENOENT;
@@ -597,8 +631,8 @@ static bool mlx5e_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb,
return false;
}
- /* Set tun_dev so we do dev_put() after datapath */
- tc_priv->tun_dev = dev;
+ /* Set fwd_dev so we do dev_put() after datapath */
+ tc_priv->fwd_dev = dev;
skb->dev = dev;
@@ -638,6 +672,12 @@ static bool mlx5e_restore_skb_chain(struct sk_buff *skb, u32 chain, u32 reg_c1,
return mlx5e_restore_tunnel(priv, skb, tc_priv, tunnel_id);
}
+static void mlx5_rep_tc_post_napi_receive(struct mlx5e_tc_update_priv *tc_priv)
+{
+ if (tc_priv->fwd_dev)
+ dev_put(tc_priv->fwd_dev);
+}
+
static void mlx5e_restore_skb_sample(struct mlx5e_priv *priv, struct sk_buff *skb,
struct mlx5_mapped_obj *mapped_obj,
struct mlx5e_tc_update_priv *tc_priv)
@@ -647,25 +687,54 @@ static void mlx5e_restore_skb_sample(struct mlx5e_priv *priv, struct sk_buff *sk
"Failed to restore tunnel info for sampled packet\n");
return;
}
-#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
mlx5e_tc_sample_skb(skb, mapped_obj);
-#endif /* CONFIG_MLX5_TC_SAMPLE */
mlx5_rep_tc_post_napi_receive(tc_priv);
}
-bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe,
- struct sk_buff *skb,
- struct mlx5e_tc_update_priv *tc_priv)
+static bool mlx5e_restore_skb_int_port(struct mlx5e_priv *priv, struct sk_buff *skb,
+ struct mlx5_mapped_obj *mapped_obj,
+ struct mlx5e_tc_update_priv *tc_priv,
+ bool *forward_tx,
+ u32 reg_c1)
+{
+ u32 tunnel_id = (reg_c1 >> ESW_TUN_OFFSET) & TUNNEL_ID_MASK;
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5_rep_uplink_priv *uplink_priv;
+ struct mlx5e_rep_priv *uplink_rpriv;
+
+ /* Tunnel restore takes precedence over int port restore */
+ if (tunnel_id)
+ return mlx5e_restore_tunnel(priv, skb, tc_priv, tunnel_id);
+
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ uplink_priv = &uplink_rpriv->uplink_priv;
+
+ if (mlx5e_tc_int_port_dev_fwd(uplink_priv->int_port_priv, skb,
+ mapped_obj->int_port_metadata, forward_tx)) {
+ /* Set fwd_dev for future dev_put */
+ tc_priv->fwd_dev = skb->dev;
+
+ return true;
+ }
+
+ return false;
+}
+
+void mlx5e_rep_tc_receive(struct mlx5_cqe64 *cqe, struct mlx5e_rq *rq,
+ struct sk_buff *skb)
{
+ u32 reg_c1 = be32_to_cpu(cqe->ft_metadata);
+ struct mlx5e_tc_update_priv tc_priv = {};
struct mlx5_mapped_obj mapped_obj;
struct mlx5_eswitch *esw;
+ bool forward_tx = false;
struct mlx5e_priv *priv;
u32 reg_c0;
int err;
reg_c0 = (be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK);
if (!reg_c0 || reg_c0 == MLX5_FS_DEFAULT_FLOW_TAG)
- return true;
+ goto forward;
/* If reg_c0 is not equal to the default flow tag then skb->mark
* is not supported and must be reset back to 0.
@@ -679,26 +748,35 @@ bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe,
netdev_dbg(priv->netdev,
"Couldn't find mapped object for reg_c0: %d, err: %d\n",
reg_c0, err);
- return false;
+ goto free_skb;
}
if (mapped_obj.type == MLX5_MAPPED_OBJ_CHAIN) {
- u32 reg_c1 = be32_to_cpu(cqe->ft_metadata);
-
- return mlx5e_restore_skb_chain(skb, mapped_obj.chain, reg_c1, tc_priv);
+ if (!mlx5e_restore_skb_chain(skb, mapped_obj.chain, reg_c1, &tc_priv) &&
+ !mlx5_ipsec_is_rx_flow(cqe))
+ goto free_skb;
} else if (mapped_obj.type == MLX5_MAPPED_OBJ_SAMPLE) {
- mlx5e_restore_skb_sample(priv, skb, &mapped_obj, tc_priv);
- return false;
+ mlx5e_restore_skb_sample(priv, skb, &mapped_obj, &tc_priv);
+ goto free_skb;
+ } else if (mapped_obj.type == MLX5_MAPPED_OBJ_INT_PORT_METADATA) {
+ if (!mlx5e_restore_skb_int_port(priv, skb, &mapped_obj, &tc_priv,
+ &forward_tx, reg_c1))
+ goto free_skb;
} else {
netdev_dbg(priv->netdev, "Invalid mapped object type: %d\n", mapped_obj.type);
- return false;
+ goto free_skb;
}
- return true;
-}
+forward:
+ if (forward_tx)
+ dev_queue_xmit(skb);
+ else
+ napi_gro_receive(rq->cq.napi, skb);
-void mlx5_rep_tc_post_napi_receive(struct mlx5e_tc_update_priv *tc_priv)
-{
- if (tc_priv->tun_dev)
- dev_put(tc_priv->tun_dev);
+ mlx5_rep_tc_post_napi_receive(&tc_priv);
+
+ return;
+
+free_skb:
+ dev_kfree_skb_any(skb);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.h
index d0661578467b..d6c7c81690eb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.h
@@ -36,10 +36,8 @@ void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv,
int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data);
-bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe,
- struct sk_buff *skb,
- struct mlx5e_tc_update_priv *tc_priv);
-void mlx5_rep_tc_post_napi_receive(struct mlx5e_tc_update_priv *tc_priv);
+void mlx5e_rep_tc_receive(struct mlx5_cqe64 *cqe, struct mlx5e_rq *rq,
+ struct sk_buff *skb);
#else /* CONFIG_MLX5_CLS_ACT */
@@ -66,13 +64,9 @@ static inline int
mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data) { return -EOPNOTSUPP; }
-struct mlx5e_tc_update_priv;
-static inline bool
-mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe,
- struct sk_buff *skb,
- struct mlx5e_tc_update_priv *tc_priv) { return true; }
static inline void
-mlx5_rep_tc_post_napi_receive(struct mlx5e_tc_update_priv *tc_priv) {}
+mlx5e_rep_tc_receive(struct mlx5_cqe64 *cqe, struct mlx5e_rq *rq,
+ struct sk_buff *skb) {}
#endif /* CONFIG_MLX5_CLS_ACT */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
index 0eb125316fe2..74086eb556ae 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_rx.c
@@ -6,6 +6,7 @@
#include "txrx.h"
#include "devlink.h"
#include "ptp.h"
+#include "lib/tout.h"
static int mlx5e_query_rq_state(struct mlx5_core_dev *dev, u32 rqn, u8 *state)
{
@@ -32,8 +33,10 @@ out:
static int mlx5e_wait_for_icosq_flush(struct mlx5e_icosq *icosq)
{
- unsigned long exp_time = jiffies +
- msecs_to_jiffies(MLX5E_REPORTER_FLUSH_TIMEOUT_MSEC);
+ struct mlx5_core_dev *dev = icosq->channel->mdev;
+ unsigned long exp_time;
+
+ exp_time = jiffies + msecs_to_jiffies(mlx5_tout_ms(dev, FLUSH_ON_ERROR));
while (time_before(jiffies, exp_time)) {
if (icosq->cc == icosq->pc)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
index bb682fd751c9..4f4bc8726ec4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
@@ -4,11 +4,14 @@
#include "health.h"
#include "en/ptp.h"
#include "en/devlink.h"
+#include "lib/tout.h"
static int mlx5e_wait_for_sq_flush(struct mlx5e_txqsq *sq)
{
- unsigned long exp_time = jiffies +
- msecs_to_jiffies(MLX5E_REPORTER_FLUSH_TIMEOUT_MSEC);
+ struct mlx5_core_dev *dev = sq->mdev;
+ unsigned long exp_time;
+
+ exp_time = jiffies + msecs_to_jiffies(mlx5_tout_ms(dev, FLUSH_ON_ERROR));
while (time_before(jiffies, exp_time)) {
if (sq->cc == sq->pc)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
index 625cd49ef96c..c1cdd8c2e37a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c
@@ -127,7 +127,7 @@ mlx5e_rss_get_tt_config(struct mlx5e_rss *rss, enum mlx5_traffic_types tt)
static int mlx5e_rss_create_tir(struct mlx5e_rss *rss,
enum mlx5_traffic_types tt,
- const struct mlx5e_lro_param *init_lro_param,
+ const struct mlx5e_packet_merge_param *init_pkt_merge_param,
bool inner)
{
struct mlx5e_rss_params_traffic_type rss_tt;
@@ -161,7 +161,7 @@ static int mlx5e_rss_create_tir(struct mlx5e_rss *rss,
rqtn = mlx5e_rqt_get_rqtn(&rss->rqt);
mlx5e_tir_builder_build_rqt(builder, rss->mdev->mlx5e_res.hw_objs.td.tdn,
rqtn, rss->inner_ft_support);
- mlx5e_tir_builder_build_lro(builder, init_lro_param);
+ mlx5e_tir_builder_build_packet_merge(builder, init_pkt_merge_param);
rss_tt = mlx5e_rss_get_tt_config(rss, tt);
mlx5e_tir_builder_build_rss(builder, &rss->hash, &rss_tt, inner);
@@ -198,14 +198,14 @@ static void mlx5e_rss_destroy_tir(struct mlx5e_rss *rss, enum mlx5_traffic_types
}
static int mlx5e_rss_create_tirs(struct mlx5e_rss *rss,
- const struct mlx5e_lro_param *init_lro_param,
+ const struct mlx5e_packet_merge_param *init_pkt_merge_param,
bool inner)
{
enum mlx5_traffic_types tt, max_tt;
int err;
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
- err = mlx5e_rss_create_tir(rss, tt, init_lro_param, inner);
+ err = mlx5e_rss_create_tir(rss, tt, init_pkt_merge_param, inner);
if (err)
goto err_destroy_tirs;
}
@@ -297,7 +297,7 @@ int mlx5e_rss_init_no_tirs(struct mlx5e_rss *rss, struct mlx5_core_dev *mdev,
int mlx5e_rss_init(struct mlx5e_rss *rss, struct mlx5_core_dev *mdev,
bool inner_ft_support, u32 drop_rqn,
- const struct mlx5e_lro_param *init_lro_param)
+ const struct mlx5e_packet_merge_param *init_pkt_merge_param)
{
int err;
@@ -305,12 +305,12 @@ int mlx5e_rss_init(struct mlx5e_rss *rss, struct mlx5_core_dev *mdev,
if (err)
goto err_out;
- err = mlx5e_rss_create_tirs(rss, init_lro_param, false);
+ err = mlx5e_rss_create_tirs(rss, init_pkt_merge_param, false);
if (err)
goto err_destroy_rqt;
if (inner_ft_support) {
- err = mlx5e_rss_create_tirs(rss, init_lro_param, true);
+ err = mlx5e_rss_create_tirs(rss, init_pkt_merge_param, true);
if (err)
goto err_destroy_tirs;
}
@@ -372,7 +372,7 @@ u32 mlx5e_rss_get_tirn(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
*/
int mlx5e_rss_obtain_tirn(struct mlx5e_rss *rss,
enum mlx5_traffic_types tt,
- const struct mlx5e_lro_param *init_lro_param,
+ const struct mlx5e_packet_merge_param *init_pkt_merge_param,
bool inner, u32 *tirn)
{
struct mlx5e_tir *tir;
@@ -381,7 +381,7 @@ int mlx5e_rss_obtain_tirn(struct mlx5e_rss *rss,
if (!tir) { /* TIR doesn't exist, create one */
int err;
- err = mlx5e_rss_create_tir(rss, tt, init_lro_param, inner);
+ err = mlx5e_rss_create_tir(rss, tt, init_pkt_merge_param, inner);
if (err)
return err;
tir = rss_get_tir(rss, tt, inner);
@@ -391,7 +391,7 @@ int mlx5e_rss_obtain_tirn(struct mlx5e_rss *rss,
return 0;
}
-static void mlx5e_rss_apply(struct mlx5e_rss *rss, u32 *rqns, unsigned int num_rqns)
+static int mlx5e_rss_apply(struct mlx5e_rss *rss, u32 *rqns, unsigned int num_rqns)
{
int err;
@@ -399,6 +399,7 @@ static void mlx5e_rss_apply(struct mlx5e_rss *rss, u32 *rqns, unsigned int num_r
if (err)
mlx5e_rss_warn(rss->mdev, "Failed to redirect RQT %#x to channels: err = %d\n",
mlx5e_rqt_get_rqtn(&rss->rqt), err);
+ return err;
}
void mlx5e_rss_enable(struct mlx5e_rss *rss, u32 *rqns, unsigned int num_rqns)
@@ -418,7 +419,8 @@ void mlx5e_rss_disable(struct mlx5e_rss *rss)
mlx5e_rqt_get_rqtn(&rss->rqt), rss->drop_rqn, err);
}
-int mlx5e_rss_lro_set_param(struct mlx5e_rss *rss, struct mlx5e_lro_param *lro_param)
+int mlx5e_rss_packet_merge_set_param(struct mlx5e_rss *rss,
+ struct mlx5e_packet_merge_param *pkt_merge_param)
{
struct mlx5e_tir_builder *builder;
enum mlx5_traffic_types tt;
@@ -428,7 +430,7 @@ int mlx5e_rss_lro_set_param(struct mlx5e_rss *rss, struct mlx5e_lro_param *lro_p
if (!builder)
return -ENOMEM;
- mlx5e_tir_builder_build_lro(builder, lro_param);
+ mlx5e_tir_builder_build_packet_merge(builder, pkt_merge_param);
final_err = 0;
@@ -490,6 +492,14 @@ int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir,
{
bool changed_indir = false;
bool changed_hash = false;
+ struct mlx5e_rss *old_rss;
+ int err = 0;
+
+ old_rss = mlx5e_rss_alloc();
+ if (!old_rss)
+ return -ENOMEM;
+
+ *old_rss = *rss;
if (hfunc && *hfunc != rss->hash.hfunc) {
switch (*hfunc) {
@@ -497,7 +507,8 @@ int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir,
case ETH_RSS_HASH_TOP:
break;
default:
- return -EINVAL;
+ err = -EINVAL;
+ goto out;
}
changed_hash = true;
changed_indir = true;
@@ -520,13 +531,20 @@ int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir,
rss->indir.table[i] = indir[i];
}
- if (changed_indir && rss->enabled)
- mlx5e_rss_apply(rss, rqns, num_rqns);
+ if (changed_indir && rss->enabled) {
+ err = mlx5e_rss_apply(rss, rqns, num_rqns);
+ if (err) {
+ *rss = *old_rss;
+ goto out;
+ }
+ }
if (changed_hash)
mlx5e_rss_update_tirs(rss);
- return 0;
+out:
+ mlx5e_rss_free(old_rss);
+ return err;
}
struct mlx5e_rss_params_hash mlx5e_rss_get_hash(struct mlx5e_rss *rss)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h
index d522a10dadf3..c6b216416344 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h
@@ -17,7 +17,7 @@ struct mlx5e_rss *mlx5e_rss_alloc(void);
void mlx5e_rss_free(struct mlx5e_rss *rss);
int mlx5e_rss_init(struct mlx5e_rss *rss, struct mlx5_core_dev *mdev,
bool inner_ft_support, u32 drop_rqn,
- const struct mlx5e_lro_param *init_lro_param);
+ const struct mlx5e_packet_merge_param *init_pkt_merge_param);
int mlx5e_rss_init_no_tirs(struct mlx5e_rss *rss, struct mlx5_core_dev *mdev,
bool inner_ft_support, u32 drop_rqn);
int mlx5e_rss_cleanup(struct mlx5e_rss *rss);
@@ -30,13 +30,14 @@ u32 mlx5e_rss_get_tirn(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
bool inner);
int mlx5e_rss_obtain_tirn(struct mlx5e_rss *rss,
enum mlx5_traffic_types tt,
- const struct mlx5e_lro_param *init_lro_param,
+ const struct mlx5e_packet_merge_param *init_pkt_merge_param,
bool inner, u32 *tirn);
void mlx5e_rss_enable(struct mlx5e_rss *rss, u32 *rqns, unsigned int num_rqns);
void mlx5e_rss_disable(struct mlx5e_rss *rss);
-int mlx5e_rss_lro_set_param(struct mlx5e_rss *rss, struct mlx5e_lro_param *lro_param);
+int mlx5e_rss_packet_merge_set_param(struct mlx5e_rss *rss,
+ struct mlx5e_packet_merge_param *pkt_merge_param);
int mlx5e_rss_get_rxfh(struct mlx5e_rss *rss, u32 *indir, u8 *key, u8 *hfunc);
int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir,
const u8 *key, const u8 *hfunc,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
index 13056cb9757d..142953847996 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c
@@ -34,7 +34,7 @@ struct mlx5e_rx_res {
/* API for rx_res_rss_* */
static int mlx5e_rx_res_rss_init_def(struct mlx5e_rx_res *res,
- const struct mlx5e_lro_param *init_lro_param,
+ const struct mlx5e_packet_merge_param *init_pkt_merge_param,
unsigned int init_nch)
{
bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
@@ -49,7 +49,7 @@ static int mlx5e_rx_res_rss_init_def(struct mlx5e_rx_res *res,
return -ENOMEM;
err = mlx5e_rss_init(rss, res->mdev, inner_ft_support, res->drop_rqn,
- init_lro_param);
+ init_pkt_merge_param);
if (err)
goto err_rss_free;
@@ -275,7 +275,7 @@ struct mlx5e_rx_res *mlx5e_rx_res_alloc(void)
}
static int mlx5e_rx_res_channels_init(struct mlx5e_rx_res *res,
- const struct mlx5e_lro_param *init_lro_param)
+ const struct mlx5e_packet_merge_param *init_pkt_merge_param)
{
bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
struct mlx5e_tir_builder *builder;
@@ -306,7 +306,7 @@ static int mlx5e_rx_res_channels_init(struct mlx5e_rx_res *res,
mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn,
mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt),
inner_ft_support);
- mlx5e_tir_builder_build_lro(builder, init_lro_param);
+ mlx5e_tir_builder_build_packet_merge(builder, init_pkt_merge_param);
mlx5e_tir_builder_build_direct(builder);
err = mlx5e_tir_init(&res->channels[ix].direct_tir, builder, res->mdev, true);
@@ -336,7 +336,7 @@ static int mlx5e_rx_res_channels_init(struct mlx5e_rx_res *res,
mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn,
mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt),
inner_ft_support);
- mlx5e_tir_builder_build_lro(builder, init_lro_param);
+ mlx5e_tir_builder_build_packet_merge(builder, init_pkt_merge_param);
mlx5e_tir_builder_build_direct(builder);
err = mlx5e_tir_init(&res->channels[ix].xsk_tir, builder, res->mdev, true);
@@ -437,7 +437,7 @@ static void mlx5e_rx_res_ptp_destroy(struct mlx5e_rx_res *res)
int mlx5e_rx_res_init(struct mlx5e_rx_res *res, struct mlx5_core_dev *mdev,
enum mlx5e_rx_res_features features, unsigned int max_nch,
- u32 drop_rqn, const struct mlx5e_lro_param *init_lro_param,
+ u32 drop_rqn, const struct mlx5e_packet_merge_param *init_pkt_merge_param,
unsigned int init_nch)
{
int err;
@@ -447,11 +447,11 @@ int mlx5e_rx_res_init(struct mlx5e_rx_res *res, struct mlx5_core_dev *mdev,
res->max_nch = max_nch;
res->drop_rqn = drop_rqn;
- err = mlx5e_rx_res_rss_init_def(res, init_lro_param, init_nch);
+ err = mlx5e_rx_res_rss_init_def(res, init_pkt_merge_param, init_nch);
if (err)
goto err_out;
- err = mlx5e_rx_res_channels_init(res, init_lro_param);
+ err = mlx5e_rx_res_channels_init(res, init_pkt_merge_param);
if (err)
goto err_rss_destroy;
@@ -645,7 +645,8 @@ int mlx5e_rx_res_xsk_deactivate(struct mlx5e_rx_res *res, unsigned int ix)
return err;
}
-int mlx5e_rx_res_lro_set_param(struct mlx5e_rx_res *res, struct mlx5e_lro_param *lro_param)
+int mlx5e_rx_res_packet_merge_set_param(struct mlx5e_rx_res *res,
+ struct mlx5e_packet_merge_param *pkt_merge_param)
{
struct mlx5e_tir_builder *builder;
int err, final_err;
@@ -655,7 +656,7 @@ int mlx5e_rx_res_lro_set_param(struct mlx5e_rx_res *res, struct mlx5e_lro_param
if (!builder)
return -ENOMEM;
- mlx5e_tir_builder_build_lro(builder, lro_param);
+ mlx5e_tir_builder_build_packet_merge(builder, pkt_merge_param);
final_err = 0;
@@ -665,7 +666,7 @@ int mlx5e_rx_res_lro_set_param(struct mlx5e_rx_res *res, struct mlx5e_lro_param
if (!rss)
continue;
- err = mlx5e_rss_lro_set_param(rss, lro_param);
+ err = mlx5e_rss_packet_merge_set_param(rss, pkt_merge_param);
if (err)
final_err = final_err ? : err;
}
@@ -673,7 +674,7 @@ int mlx5e_rx_res_lro_set_param(struct mlx5e_rx_res *res, struct mlx5e_lro_param
for (ix = 0; ix < res->max_nch; ix++) {
err = mlx5e_tir_modify(&res->channels[ix].direct_tir, builder);
if (err) {
- mlx5_core_warn(res->mdev, "Failed to update LRO state of direct TIR %#x for channel %u: err = %d\n",
+ mlx5_core_warn(res->mdev, "Failed to update packet merge state of direct TIR %#x for channel %u: err = %d\n",
mlx5e_tir_get_tirn(&res->channels[ix].direct_tir), ix, err);
if (!final_err)
final_err = err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
index 4a15942d79f7..d09f7d174a51 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h
@@ -25,7 +25,7 @@ enum mlx5e_rx_res_features {
struct mlx5e_rx_res *mlx5e_rx_res_alloc(void);
int mlx5e_rx_res_init(struct mlx5e_rx_res *res, struct mlx5_core_dev *mdev,
enum mlx5e_rx_res_features features, unsigned int max_nch,
- u32 drop_rqn, const struct mlx5e_lro_param *init_lro_param,
+ u32 drop_rqn, const struct mlx5e_packet_merge_param *init_pkt_merge_param,
unsigned int init_nch);
void mlx5e_rx_res_destroy(struct mlx5e_rx_res *res);
void mlx5e_rx_res_free(struct mlx5e_rx_res *res);
@@ -57,7 +57,8 @@ int mlx5e_rx_res_rss_set_rxfh(struct mlx5e_rx_res *res, u32 rss_idx,
u8 mlx5e_rx_res_rss_get_hash_fields(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt);
int mlx5e_rx_res_rss_set_hash_fields(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt,
u8 rx_hash_fields);
-int mlx5e_rx_res_lro_set_param(struct mlx5e_rx_res *res, struct mlx5e_lro_param *lro_param);
+int mlx5e_rx_res_packet_merge_set_param(struct mlx5e_rx_res *res,
+ struct mlx5e_packet_merge_param *pkt_merge_param);
int mlx5e_rx_res_rss_init(struct mlx5e_rx_res *res, u32 *rss_idx, unsigned int init_nch);
int mlx5e_rx_res_rss_destroy(struct mlx5e_rx_res *res, u32 rss_idx);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/int_port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/int_port.c
new file mode 100644
index 000000000000..ca834bbcb44f
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/int_port.c
@@ -0,0 +1,457 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include <linux/mlx5/fs.h>
+#include "en/mapping.h"
+#include "en/tc/int_port.h"
+#include "en.h"
+#include "en_rep.h"
+#include "en_tc.h"
+
+struct mlx5e_tc_int_port {
+ enum mlx5e_tc_int_port_type type;
+ int ifindex;
+ u32 match_metadata;
+ u32 mapping;
+ struct list_head list;
+ struct mlx5_flow_handle *rx_rule;
+ refcount_t refcnt;
+ struct rcu_head rcu_head;
+};
+
+struct mlx5e_tc_int_port_priv {
+ struct mlx5_core_dev *dev;
+ struct mutex int_ports_lock; /* Protects int ports list */
+ struct list_head int_ports; /* Uses int_ports_lock */
+ u16 num_ports;
+ bool ul_rep_rx_ready; /* Set when uplink is performing teardown */
+ struct mapping_ctx *metadata_mapping; /* Metadata for source port rewrite and matching */
+};
+
+bool mlx5e_tc_int_port_supported(const struct mlx5_eswitch *esw)
+{
+ return mlx5_eswitch_vport_match_metadata_enabled(esw) &&
+ MLX5_CAP_GEN(esw->dev, reg_c_preserve);
+}
+
+u32 mlx5e_tc_int_port_get_metadata(struct mlx5e_tc_int_port *int_port)
+{
+ return int_port->match_metadata;
+}
+
+int mlx5e_tc_int_port_get_flow_source(struct mlx5e_tc_int_port *int_port)
+{
+ /* For egress forwarding we can have the case
+ * where the packet came from a vport and redirected
+ * to int port or it came from the uplink, going
+ * via internal port and hairpinned back to uplink
+ * so we set the source to any port in this case.
+ */
+ return int_port->type == MLX5E_TC_INT_PORT_EGRESS ?
+ MLX5_FLOW_CONTEXT_FLOW_SOURCE_ANY_VPORT :
+ MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
+}
+
+u32 mlx5e_tc_int_port_get_metadata_for_match(struct mlx5e_tc_int_port *int_port)
+{
+ return int_port->match_metadata << (32 - ESW_SOURCE_PORT_METADATA_BITS);
+}
+
+static struct mlx5_flow_handle *
+mlx5e_int_port_create_rx_rule(struct mlx5_eswitch *esw,
+ struct mlx5e_tc_int_port *int_port,
+ struct mlx5_flow_destination *dest)
+
+{
+ struct mlx5_flow_context *flow_context;
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_handle *flow_rule;
+ struct mlx5_flow_spec *spec;
+ void *misc;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return ERR_PTR(-ENOMEM);
+
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
+ MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
+ mlx5e_tc_int_port_get_metadata_for_match(int_port));
+
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
+ MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
+ mlx5_eswitch_get_vport_metadata_mask());
+
+ spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
+
+ /* Overwrite flow tag with the int port metadata mapping
+ * instead of the chain mapping.
+ */
+ flow_context = &spec->flow_context;
+ flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
+ flow_context->flow_tag = int_port->mapping;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
+ &flow_act, dest, 1);
+ if (IS_ERR(flow_rule))
+ mlx5_core_warn(esw->dev, "ft offloads: Failed to add internal vport rx rule err %ld\n",
+ PTR_ERR(flow_rule));
+
+ kvfree(spec);
+
+ return flow_rule;
+}
+
+static struct mlx5e_tc_int_port *
+mlx5e_int_port_lookup(struct mlx5e_tc_int_port_priv *priv,
+ int ifindex,
+ enum mlx5e_tc_int_port_type type)
+{
+ struct mlx5e_tc_int_port *int_port;
+
+ if (!priv->ul_rep_rx_ready)
+ goto not_found;
+
+ list_for_each_entry(int_port, &priv->int_ports, list)
+ if (int_port->ifindex == ifindex && int_port->type == type) {
+ refcount_inc(&int_port->refcnt);
+ return int_port;
+ }
+
+not_found:
+ return NULL;
+}
+
+static int mlx5e_int_port_metadata_alloc(struct mlx5e_tc_int_port_priv *priv,
+ int ifindex, enum mlx5e_tc_int_port_type type,
+ u32 *id)
+{
+ u32 mapped_key[2] = {type, ifindex};
+ int err;
+
+ err = mapping_add(priv->metadata_mapping, mapped_key, id);
+ if (err)
+ return err;
+
+ /* Fill upper 4 bits of PFNUM with reserved value */
+ *id |= 0xf << ESW_VPORT_BITS;
+
+ return 0;
+}
+
+static void mlx5e_int_port_metadata_free(struct mlx5e_tc_int_port_priv *priv,
+ u32 id)
+{
+ id &= (1 << ESW_VPORT_BITS) - 1;
+ mapping_remove(priv->metadata_mapping, id);
+}
+
+/* Must be called with priv->int_ports_lock held */
+static struct mlx5e_tc_int_port *
+mlx5e_int_port_add(struct mlx5e_tc_int_port_priv *priv,
+ int ifindex,
+ enum mlx5e_tc_int_port_type type)
+{
+ struct mlx5_eswitch *esw = priv->dev->priv.eswitch;
+ struct mlx5_mapped_obj mapped_obj = {};
+ struct mlx5e_rep_priv *uplink_rpriv;
+ struct mlx5e_tc_int_port *int_port;
+ struct mlx5_flow_destination dest;
+ struct mapping_ctx *ctx;
+ u32 match_metadata;
+ u32 mapping;
+ int err;
+
+ if (priv->num_ports == MLX5E_TC_MAX_INT_PORT_NUM) {
+ mlx5_core_dbg(priv->dev, "Cannot add a new int port, max supported %d",
+ MLX5E_TC_MAX_INT_PORT_NUM);
+ return ERR_PTR(-ENOSPC);
+ }
+
+ int_port = kzalloc(sizeof(*int_port), GFP_KERNEL);
+ if (!int_port)
+ return ERR_PTR(-ENOMEM);
+
+ err = mlx5e_int_port_metadata_alloc(priv, ifindex, type, &match_metadata);
+ if (err) {
+ mlx5_core_warn(esw->dev, "Cannot add a new internal port, metadata allocation failed for ifindex %d",
+ ifindex);
+ goto err_metadata;
+ }
+
+ /* map metadata to reg_c0 object for miss handling */
+ ctx = esw->offloads.reg_c0_obj_pool;
+ mapped_obj.type = MLX5_MAPPED_OBJ_INT_PORT_METADATA;
+ mapped_obj.int_port_metadata = match_metadata;
+ err = mapping_add(ctx, &mapped_obj, &mapping);
+ if (err)
+ goto err_map;
+
+ int_port->type = type;
+ int_port->ifindex = ifindex;
+ int_port->match_metadata = match_metadata;
+ int_port->mapping = mapping;
+
+ /* Create a match on internal vport metadata in vport table */
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest.ft = uplink_rpriv->root_ft;
+
+ int_port->rx_rule = mlx5e_int_port_create_rx_rule(esw, int_port, &dest);
+ if (IS_ERR(int_port->rx_rule)) {
+ err = PTR_ERR(int_port->rx_rule);
+ mlx5_core_warn(esw->dev, "Can't add internal port rx rule, err %d", err);
+ goto err_rx_rule;
+ }
+
+ refcount_set(&int_port->refcnt, 1);
+ list_add_rcu(&int_port->list, &priv->int_ports);
+ priv->num_ports++;
+
+ return int_port;
+
+err_rx_rule:
+ mapping_remove(ctx, int_port->mapping);
+
+err_map:
+ mlx5e_int_port_metadata_free(priv, match_metadata);
+
+err_metadata:
+ kfree(int_port);
+
+ return ERR_PTR(err);
+}
+
+/* Must be called with priv->int_ports_lock held */
+static void
+mlx5e_int_port_remove(struct mlx5e_tc_int_port_priv *priv,
+ struct mlx5e_tc_int_port *int_port)
+{
+ struct mlx5_eswitch *esw = priv->dev->priv.eswitch;
+ struct mapping_ctx *ctx;
+
+ ctx = esw->offloads.reg_c0_obj_pool;
+
+ list_del_rcu(&int_port->list);
+
+ /* The following parameters are not used by the
+ * rcu readers of this int_port object so it is
+ * safe to release them.
+ */
+ if (int_port->rx_rule)
+ mlx5_del_flow_rules(int_port->rx_rule);
+ mapping_remove(ctx, int_port->mapping);
+ mlx5e_int_port_metadata_free(priv, int_port->match_metadata);
+ kfree_rcu(int_port);
+ priv->num_ports--;
+}
+
+/* Must be called with rcu_read_lock held */
+static struct mlx5e_tc_int_port *
+mlx5e_int_port_get_from_metadata(struct mlx5e_tc_int_port_priv *priv,
+ u32 metadata)
+{
+ struct mlx5e_tc_int_port *int_port;
+
+ list_for_each_entry_rcu(int_port, &priv->int_ports, list)
+ if (int_port->match_metadata == metadata)
+ return int_port;
+
+ return NULL;
+}
+
+struct mlx5e_tc_int_port *
+mlx5e_tc_int_port_get(struct mlx5e_tc_int_port_priv *priv,
+ int ifindex,
+ enum mlx5e_tc_int_port_type type)
+{
+ struct mlx5e_tc_int_port *int_port;
+
+ if (!priv)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ mutex_lock(&priv->int_ports_lock);
+
+ /* Reject request if ul rep not ready */
+ if (!priv->ul_rep_rx_ready) {
+ int_port = ERR_PTR(-EOPNOTSUPP);
+ goto done;
+ }
+
+ int_port = mlx5e_int_port_lookup(priv, ifindex, type);
+ if (int_port)
+ goto done;
+
+ /* Alloc and add new int port to list */
+ int_port = mlx5e_int_port_add(priv, ifindex, type);
+
+done:
+ mutex_unlock(&priv->int_ports_lock);
+
+ return int_port;
+}
+
+void
+mlx5e_tc_int_port_put(struct mlx5e_tc_int_port_priv *priv,
+ struct mlx5e_tc_int_port *int_port)
+{
+ if (!refcount_dec_and_mutex_lock(&int_port->refcnt, &priv->int_ports_lock))
+ return;
+
+ mlx5e_int_port_remove(priv, int_port);
+ mutex_unlock(&priv->int_ports_lock);
+}
+
+struct mlx5e_tc_int_port_priv *
+mlx5e_tc_int_port_init(struct mlx5e_priv *priv)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5e_tc_int_port_priv *int_port_priv;
+ u64 mapping_id;
+
+ if (!mlx5e_tc_int_port_supported(esw))
+ return NULL;
+
+ int_port_priv = kzalloc(sizeof(*int_port_priv), GFP_KERNEL);
+ if (!int_port_priv)
+ return NULL;
+
+ mapping_id = mlx5_query_nic_system_image_guid(priv->mdev);
+
+ int_port_priv->metadata_mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_INT_PORT,
+ sizeof(u32) * 2,
+ (1 << ESW_VPORT_BITS) - 1, true);
+ if (IS_ERR(int_port_priv->metadata_mapping)) {
+ mlx5_core_warn(priv->mdev, "Can't allocate metadata mapping of int port offload, err=%ld\n",
+ PTR_ERR(int_port_priv->metadata_mapping));
+ goto err_mapping;
+ }
+
+ int_port_priv->dev = priv->mdev;
+ mutex_init(&int_port_priv->int_ports_lock);
+ INIT_LIST_HEAD(&int_port_priv->int_ports);
+
+ return int_port_priv;
+
+err_mapping:
+ kfree(int_port_priv);
+
+ return NULL;
+}
+
+void
+mlx5e_tc_int_port_cleanup(struct mlx5e_tc_int_port_priv *priv)
+{
+ if (!priv)
+ return;
+
+ mutex_destroy(&priv->int_ports_lock);
+ mapping_destroy(priv->metadata_mapping);
+ kfree(priv);
+}
+
+/* Int port rx rules reside in ul rep rx tables.
+ * It is possible the ul rep will go down while there are
+ * still int port rules in its rx table so proper cleanup
+ * is required to free resources.
+ */
+void mlx5e_tc_int_port_init_rep_rx(struct mlx5e_priv *priv)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5_rep_uplink_priv *uplink_priv;
+ struct mlx5e_tc_int_port_priv *ppriv;
+ struct mlx5e_rep_priv *uplink_rpriv;
+
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ uplink_priv = &uplink_rpriv->uplink_priv;
+
+ ppriv = uplink_priv->int_port_priv;
+
+ if (!ppriv)
+ return;
+
+ mutex_lock(&ppriv->int_ports_lock);
+ ppriv->ul_rep_rx_ready = true;
+ mutex_unlock(&ppriv->int_ports_lock);
+}
+
+void mlx5e_tc_int_port_cleanup_rep_rx(struct mlx5e_priv *priv)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5_rep_uplink_priv *uplink_priv;
+ struct mlx5e_tc_int_port_priv *ppriv;
+ struct mlx5e_rep_priv *uplink_rpriv;
+ struct mlx5e_tc_int_port *int_port;
+
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ uplink_priv = &uplink_rpriv->uplink_priv;
+
+ ppriv = uplink_priv->int_port_priv;
+
+ if (!ppriv)
+ return;
+
+ mutex_lock(&ppriv->int_ports_lock);
+
+ ppriv->ul_rep_rx_ready = false;
+
+ list_for_each_entry(int_port, &ppriv->int_ports, list) {
+ if (!IS_ERR_OR_NULL(int_port->rx_rule))
+ mlx5_del_flow_rules(int_port->rx_rule);
+
+ int_port->rx_rule = NULL;
+ }
+
+ mutex_unlock(&ppriv->int_ports_lock);
+}
+
+bool
+mlx5e_tc_int_port_dev_fwd(struct mlx5e_tc_int_port_priv *priv,
+ struct sk_buff *skb, u32 int_vport_metadata,
+ bool *forward_tx)
+{
+ enum mlx5e_tc_int_port_type fwd_type;
+ struct mlx5e_tc_int_port *int_port;
+ struct net_device *dev;
+ int ifindex;
+
+ if (!priv)
+ return false;
+
+ rcu_read_lock();
+ int_port = mlx5e_int_port_get_from_metadata(priv, int_vport_metadata);
+ if (!int_port) {
+ rcu_read_unlock();
+ mlx5_core_dbg(priv->dev, "Unable to find int port with metadata 0x%.8x\n",
+ int_vport_metadata);
+ return false;
+ }
+
+ ifindex = int_port->ifindex;
+ fwd_type = int_port->type;
+ rcu_read_unlock();
+
+ dev = dev_get_by_index(&init_net, ifindex);
+ if (!dev) {
+ mlx5_core_dbg(priv->dev,
+ "Couldn't find internal port device with ifindex: %d\n",
+ ifindex);
+ return false;
+ }
+
+ skb->skb_iif = dev->ifindex;
+ skb->dev = dev;
+
+ if (fwd_type == MLX5E_TC_INT_PORT_INGRESS) {
+ skb->pkt_type = PACKET_HOST;
+ skb_set_redirected(skb, true);
+ *forward_tx = false;
+ } else {
+ skb_reset_network_header(skb);
+ skb_push_rcsum(skb, skb->mac_len);
+ skb_set_redirected(skb, false);
+ *forward_tx = true;
+ }
+
+ return true;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/int_port.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/int_port.h
new file mode 100644
index 000000000000..e72c79d308d7
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/int_port.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5_EN_TC_INT_PORT_H__
+#define __MLX5_EN_TC_INT_PORT_H__
+
+#include "en.h"
+
+struct mlx5e_tc_int_port;
+struct mlx5e_tc_int_port_priv;
+
+enum mlx5e_tc_int_port_type {
+ MLX5E_TC_INT_PORT_INGRESS,
+ MLX5E_TC_INT_PORT_EGRESS,
+};
+
+#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
+bool mlx5e_tc_int_port_supported(const struct mlx5_eswitch *esw);
+
+struct mlx5e_tc_int_port_priv *
+mlx5e_tc_int_port_init(struct mlx5e_priv *priv);
+void
+mlx5e_tc_int_port_cleanup(struct mlx5e_tc_int_port_priv *priv);
+
+void mlx5e_tc_int_port_init_rep_rx(struct mlx5e_priv *priv);
+void mlx5e_tc_int_port_cleanup_rep_rx(struct mlx5e_priv *priv);
+
+bool
+mlx5e_tc_int_port_dev_fwd(struct mlx5e_tc_int_port_priv *priv,
+ struct sk_buff *skb, u32 int_vport_metadata,
+ bool *forward_tx);
+struct mlx5e_tc_int_port *
+mlx5e_tc_int_port_get(struct mlx5e_tc_int_port_priv *priv,
+ int ifindex,
+ enum mlx5e_tc_int_port_type type);
+void
+mlx5e_tc_int_port_put(struct mlx5e_tc_int_port_priv *priv,
+ struct mlx5e_tc_int_port *int_port);
+
+u32 mlx5e_tc_int_port_get_metadata(struct mlx5e_tc_int_port *int_port);
+u32 mlx5e_tc_int_port_get_metadata_for_match(struct mlx5e_tc_int_port *int_port);
+int mlx5e_tc_int_port_get_flow_source(struct mlx5e_tc_int_port *int_port);
+#else /* CONFIG_MLX5_CLS_ACT */
+static inline u32
+mlx5e_tc_int_port_get_metadata_for_match(struct mlx5e_tc_int_port *int_port)
+{
+ return 0;
+}
+
+static inline int
+mlx5e_tc_int_port_get_flow_source(struct mlx5e_tc_int_port *int_port)
+{
+ return 0;
+}
+
+static inline bool mlx5e_tc_int_port_supported(const struct mlx5_eswitch *esw)
+{
+ return false;
+}
+
+static inline void mlx5e_tc_int_port_init_rep_rx(struct mlx5e_priv *priv) {}
+static inline void mlx5e_tc_int_port_cleanup_rep_rx(struct mlx5e_priv *priv) {}
+
+#endif /* CONFIG_MLX5_CLS_ACT */
+#endif /* __MLX5_EN_TC_INT_PORT_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c
index a3e43e898a56..31b4e39be2d3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c
@@ -4,6 +4,7 @@
#include "en_tc.h"
#include "post_act.h"
#include "mlx5_core.h"
+#include "fs_core.h"
struct mlx5e_post_act {
enum mlx5_flow_namespace_type ns_type;
@@ -28,16 +29,14 @@ struct mlx5e_post_act *
mlx5e_tc_post_act_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
enum mlx5_flow_namespace_type ns_type)
{
+ enum fs_flow_table_type table_type = ns_type == MLX5_FLOW_NAMESPACE_FDB ?
+ FS_FT_FDB : FS_FT_NIC_RX;
struct mlx5e_post_act *post_act;
int err;
- if (ns_type == MLX5_FLOW_NAMESPACE_FDB &&
- !MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, ignore_flow_level)) {
- mlx5_core_warn(priv->mdev, "firmware level support is missing\n");
- err = -EOPNOTSUPP;
- goto err_check;
- } else if (!MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level)) {
- mlx5_core_warn(priv->mdev, "firmware level support is missing\n");
+ if (!MLX5_CAP_FLOWTABLE_TYPE(priv->mdev, ignore_flow_level, table_type)) {
+ if (priv->mdev->coredev_type != MLX5_COREDEV_VF)
+ mlx5_core_warn(priv->mdev, "firmware level support is missing\n");
err = -EOPNOTSUPP;
goto err_check;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c
index 6552ecee3f9b..df6888c4793c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c
@@ -509,13 +509,6 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
if (IS_ERR_OR_NULL(tc_psample))
return ERR_PTR(-EOPNOTSUPP);
- /* If slow path flag is set, eg. when the neigh is invalid for encap,
- * don't offload sample action.
- */
- esw = tc_psample->esw;
- if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)
- return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
-
sample_flow = kzalloc(sizeof(*sample_flow), GFP_KERNEL);
if (!sample_flow)
return ERR_PTR(-ENOMEM);
@@ -527,6 +520,7 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
* Only match the fte id instead of the same match in the
* original flow table.
*/
+ esw = tc_psample->esw;
if (MLX5_CAP_GEN(esw->dev, reg_c_preserve) ||
attr->action & MLX5_FLOW_CONTEXT_ACTION_DECAP) {
struct mlx5_flow_table *ft;
@@ -602,7 +596,7 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
}
sample_flow->pre_attr = pre_attr;
- return sample_flow->post_rule;
+ return sample_flow->pre_rule;
err_pre_offload_rule:
kfree(pre_attr);
@@ -613,7 +607,7 @@ err_sample_restore:
err_obj_id:
sampler_put(tc_psample, sample_flow->sampler);
err_sampler:
- if (!post_act_handle)
+ if (sample_flow->post_rule)
del_post_rule(esw, sample_flow, attr);
err_post_rule:
if (post_act_handle)
@@ -628,45 +622,26 @@ mlx5e_tc_sample_unoffload(struct mlx5e_tc_psample *tc_psample,
struct mlx5_flow_handle *rule,
struct mlx5_flow_attr *attr)
{
- struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
struct mlx5e_sample_flow *sample_flow;
- struct mlx5_vport_tbl_attr tbl_attr;
struct mlx5_eswitch *esw;
if (IS_ERR_OR_NULL(tc_psample))
return;
- /* If slow path flag is set, sample action is not offloaded.
- * No need to delete sample rule.
- */
- esw = tc_psample->esw;
- if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) {
- mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
- return;
- }
-
/* The following delete order can't be changed, otherwise,
* will hit fw syndromes.
*/
+ esw = tc_psample->esw;
sample_flow = attr->sample_attr->sample_flow;
mlx5_eswitch_del_offloaded_rule(esw, sample_flow->pre_rule, sample_flow->pre_attr);
- if (!sample_flow->post_act_handle)
- mlx5_eswitch_del_offloaded_rule(esw, sample_flow->post_rule,
- sample_flow->post_attr);
sample_restore_put(tc_psample, sample_flow->restore);
mapping_remove(esw->offloads.reg_c0_obj_pool, attr->sample_attr->restore_obj_id);
sampler_put(tc_psample, sample_flow->sampler);
- if (sample_flow->post_act_handle) {
+ if (sample_flow->post_act_handle)
mlx5e_tc_post_act_del(tc_psample->post_act, sample_flow->post_act_handle);
- } else {
- tbl_attr.chain = attr->chain;
- tbl_attr.prio = attr->prio;
- tbl_attr.vport = esw_attr->in_rep->vport;
- tbl_attr.vport_ns = &mlx5_esw_vport_tbl_sample_ns;
- mlx5_esw_vporttbl_put(esw, &tbl_attr);
- kfree(sample_flow->post_attr);
- }
+ else
+ del_post_rule(esw, sample_flow, attr);
kfree(sample_flow->pre_attr);
kfree(sample_flow);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.h
index db0146df9b30..9ef8a49d7801 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.h
@@ -19,6 +19,8 @@ struct mlx5e_sample_attr {
struct mlx5e_sample_flow *sample_flow;
};
+#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
+
void mlx5e_tc_sample_skb(struct sk_buff *skb, struct mlx5_mapped_obj *mapped_obj);
struct mlx5_flow_handle *
@@ -38,4 +40,29 @@ mlx5e_tc_sample_init(struct mlx5_eswitch *esw, struct mlx5e_post_act *post_act);
void
mlx5e_tc_sample_cleanup(struct mlx5e_tc_psample *tc_psample);
+#else /* CONFIG_MLX5_TC_SAMPLE */
+
+static inline struct mlx5_flow_handle *
+mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_flow_attr *attr,
+ u32 tunnel_id)
+{ return ERR_PTR(-EOPNOTSUPP); }
+
+static inline void
+mlx5e_tc_sample_unoffload(struct mlx5e_tc_psample *tc_psample,
+ struct mlx5_flow_handle *rule,
+ struct mlx5_flow_attr *attr) {}
+
+static inline struct mlx5e_tc_psample *
+mlx5e_tc_sample_init(struct mlx5_eswitch *esw, struct mlx5e_post_act *post_act)
+{ return ERR_PTR(-EOPNOTSUPP); }
+
+static inline void
+mlx5e_tc_sample_cleanup(struct mlx5e_tc_psample *tc_psample) {}
+
+static inline void
+mlx5e_tc_sample_skb(struct sk_buff *skb, struct mlx5_mapped_obj *mapped_obj) {}
+
+#endif /* CONFIG_MLX5_TC_SAMPLE */
#endif /* __MLX5_EN_TC_SAMPLE_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
index 6c949abcd2e1..c1c6e74c79c4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
@@ -889,7 +889,7 @@ mlx5_tc_ct_counter_create(struct mlx5_tc_ct_priv *ct_priv)
return ERR_PTR(-ENOMEM);
counter->is_shared = false;
- counter->counter = mlx5_fc_create(ct_priv->dev, true);
+ counter->counter = mlx5_fc_create_ex(ct_priv->dev, true);
if (IS_ERR(counter->counter)) {
ct_dbg("Failed to create counter for ct entry");
ret = PTR_ERR(counter->counter);
@@ -2039,25 +2039,36 @@ mlx5_tc_ct_init_check_esw_support(struct mlx5_eswitch *esw,
static int
mlx5_tc_ct_init_check_support(struct mlx5e_priv *priv,
enum mlx5_flow_namespace_type ns_type,
- struct mlx5e_post_act *post_act,
- const char **err_msg)
+ struct mlx5e_post_act *post_act)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ const char *err_msg = NULL;
+ int err = 0;
#if !IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
/* cannot restore chain ID on HW miss */
- *err_msg = "tc skb extension missing";
- return -EOPNOTSUPP;
+ err_msg = "tc skb extension missing";
+ err = -EOPNOTSUPP;
+ goto out_err;
#endif
if (IS_ERR_OR_NULL(post_act)) {
- *err_msg = "tc ct offload not supported, post action is missing";
- return -EOPNOTSUPP;
+ /* Ignore_flow_level support isn't supported by default for VFs and so post_act
+ * won't be supported. Skip showing error msg.
+ */
+ if (priv->mdev->coredev_type != MLX5_COREDEV_VF)
+ err_msg = "post action is missing";
+ err = -EOPNOTSUPP;
+ goto out_err;
}
if (ns_type == MLX5_FLOW_NAMESPACE_FDB)
- return mlx5_tc_ct_init_check_esw_support(esw, err_msg);
- return 0;
+ err = mlx5_tc_ct_init_check_esw_support(esw, &err_msg);
+
+out_err:
+ if (err && err_msg)
+ netdev_dbg(priv->netdev, "tc ct offload not supported, %s\n", err_msg);
+ return err;
}
#define INIT_ERR_PREFIX "tc ct offload init failed"
@@ -2070,16 +2081,13 @@ mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
{
struct mlx5_tc_ct_priv *ct_priv;
struct mlx5_core_dev *dev;
- const char *msg;
u64 mapping_id;
int err;
dev = priv->mdev;
- err = mlx5_tc_ct_init_check_support(priv, ns_type, post_act, &msg);
- if (err) {
- mlx5_core_warn(dev, "tc ct offload not supported, %s\n", msg);
+ err = mlx5_tc_ct_init_check_support(priv, ns_type, post_act);
+ if (err)
goto err_support;
- }
ct_priv = kzalloc(sizeof(*ct_priv), GFP_KERNEL);
if (!ct_priv)
@@ -2127,12 +2135,21 @@ mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains,
ct_priv->post_act = post_act;
mutex_init(&ct_priv->control_lock);
- rhashtable_init(&ct_priv->zone_ht, &zone_params);
- rhashtable_init(&ct_priv->ct_tuples_ht, &tuples_ht_params);
- rhashtable_init(&ct_priv->ct_tuples_nat_ht, &tuples_nat_ht_params);
+ if (rhashtable_init(&ct_priv->zone_ht, &zone_params))
+ goto err_ct_zone_ht;
+ if (rhashtable_init(&ct_priv->ct_tuples_ht, &tuples_ht_params))
+ goto err_ct_tuples_ht;
+ if (rhashtable_init(&ct_priv->ct_tuples_nat_ht, &tuples_nat_ht_params))
+ goto err_ct_tuples_nat_ht;
return ct_priv;
+err_ct_tuples_nat_ht:
+ rhashtable_destroy(&ct_priv->ct_tuples_ht);
+err_ct_tuples_ht:
+ rhashtable_destroy(&ct_priv->zone_ht);
+err_ct_zone_ht:
+ mlx5_chains_destroy_global_table(chains, ct_priv->ct_nat);
err_ct_nat_tbl:
mlx5_chains_destroy_global_table(chains, ct_priv->ct);
err_ct_tbl:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
index d1599b7b944b..8f64f2c8895a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
@@ -173,4 +173,6 @@ void mlx5e_flow_put(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow);
struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow);
+struct mlx5e_tc_int_port_priv *
+mlx5e_get_int_port_priv(struct mlx5e_priv *priv);
#endif /* __MLX5_EN_TC_PRIV_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
index b4e986818794..a5e450973225 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -10,6 +10,8 @@
#include "en_tc.h"
#include "rep/tc.h"
#include "rep/neigh.h"
+#include "lag/lag.h"
+#include "lag/mp.h"
struct mlx5e_tc_tun_route_attr {
struct net_device *out_dev;
@@ -81,7 +83,8 @@ static int get_route_and_out_devs(struct mlx5e_priv *priv,
*/
*route_dev = dev;
if (!netdev_port_same_parent_id(priv->netdev, real_dev) ||
- dst_is_lag_dev || is_vlan_dev(*route_dev))
+ dst_is_lag_dev || is_vlan_dev(*route_dev) ||
+ netif_is_ovs_master(*route_dev))
*out_dev = uplink_dev;
else if (mlx5e_eswitch_rep(dev) &&
mlx5e_is_valid_eswitch_fwd_dev(priv, dev))
@@ -118,6 +121,11 @@ static int mlx5e_route_lookup_ipv4_get(struct mlx5e_priv *priv,
uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
attr->fl.fl4.flowi4_oif = uplink_dev->ifindex;
+ } else {
+ struct mlx5e_tc_tunnel *tunnel = mlx5e_get_tc_tun(mirred_dev);
+
+ if (tunnel && tunnel->get_remote_ifindex)
+ attr->fl.fl4.flowi4_oif = tunnel->get_remote_ifindex(mirred_dev);
}
rt = ip_route_output_key(dev_net(mirred_dev), &attr->fl.fl4);
@@ -435,12 +443,15 @@ static int mlx5e_route_lookup_ipv6_get(struct mlx5e_priv *priv,
struct net_device *mirred_dev,
struct mlx5e_tc_tun_route_attr *attr)
{
+ struct mlx5e_tc_tunnel *tunnel = mlx5e_get_tc_tun(mirred_dev);
struct net_device *route_dev;
struct net_device *out_dev;
struct dst_entry *dst;
struct neighbour *n;
int ret;
+ if (tunnel && tunnel->get_remote_ifindex)
+ attr->fl.fl6.flowi6_oif = tunnel->get_remote_ifindex(mirred_dev);
dst = ipv6_stub->ipv6_dst_lookup_flow(dev_net(mirred_dev), NULL, &attr->fl.fl6,
NULL);
if (IS_ERR(dst))
@@ -700,6 +711,7 @@ int mlx5e_tc_tun_route_lookup(struct mlx5e_priv *priv,
struct mlx5_flow_attr *flow_attr)
{
struct mlx5_esw_flow_attr *esw_attr = flow_attr->esw_attr;
+ struct mlx5e_tc_int_port *int_port;
TC_TUN_ROUTE_ATTR_INIT(attr);
u16 vport_num;
int err = 0;
@@ -724,17 +736,25 @@ int mlx5e_tc_tun_route_lookup(struct mlx5e_priv *priv,
if (err)
return err;
- if (attr.route_dev->netdev_ops != &mlx5e_netdev_ops ||
- !mlx5e_tc_is_vf_tunnel(attr.out_dev, attr.route_dev))
- goto out;
-
- err = mlx5e_tc_query_route_vport(attr.out_dev, attr.route_dev, &vport_num);
- if (err)
- goto out;
+ if (attr.route_dev->netdev_ops == &mlx5e_netdev_ops &&
+ mlx5e_tc_is_vf_tunnel(attr.out_dev, attr.route_dev)) {
+ err = mlx5e_tc_query_route_vport(attr.out_dev, attr.route_dev, &vport_num);
+ if (err)
+ goto out;
- esw_attr->rx_tun_attr->vni = MLX5_GET(fte_match_param, spec->match_value,
- misc_parameters.vxlan_vni);
- esw_attr->rx_tun_attr->decap_vport = vport_num;
+ esw_attr->rx_tun_attr->vni = MLX5_GET(fte_match_param, spec->match_value,
+ misc_parameters.vxlan_vni);
+ esw_attr->rx_tun_attr->decap_vport = vport_num;
+ } else if (netif_is_ovs_master(attr.route_dev)) {
+ int_port = mlx5e_tc_int_port_get(mlx5e_get_int_port_priv(priv),
+ attr.route_dev->ifindex,
+ MLX5E_TC_INT_PORT_INGRESS);
+ if (IS_ERR(int_port)) {
+ err = PTR_ERR(int_port);
+ goto out;
+ }
+ esw_attr->int_port = int_port;
+ }
out:
if (flow_attr->tun_ip_version == 4)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
index 9350ca05ce65..aa092eaeaec3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h
@@ -51,6 +51,7 @@ struct mlx5e_tc_tunnel {
void *headers_v);
bool (*encap_info_equal)(struct mlx5e_encap_key *a,
struct mlx5e_encap_key *b);
+ int (*get_remote_ifindex)(struct net_device *mirred_dev);
};
extern struct mlx5e_tc_tunnel vxlan_tunnel;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
index 1c44c6c345f5..660cca73c36c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
@@ -13,6 +13,30 @@ enum {
MLX5E_ROUTE_ENTRY_VALID = BIT(0),
};
+static int mlx5e_set_int_port_tunnel(struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr,
+ struct mlx5e_encap_entry *e,
+ int out_index)
+{
+ struct net_device *route_dev;
+ int err = 0;
+
+ route_dev = dev_get_by_index(dev_net(e->out_dev), e->route_dev_ifindex);
+
+ if (!route_dev || !netif_is_ovs_master(route_dev))
+ goto out;
+
+ err = mlx5e_set_fwd_to_int_port_actions(priv, attr, e->route_dev_ifindex,
+ MLX5E_TC_INT_PORT_EGRESS,
+ &attr->action, out_index);
+
+out:
+ if (route_dev)
+ dev_put(route_dev);
+
+ return err;
+}
+
struct mlx5e_route_key {
int ip_version;
union {
@@ -809,6 +833,17 @@ attach_flow:
if (err)
goto out_err;
+ err = mlx5e_set_int_port_tunnel(priv, attr, e, out_index);
+ if (err == -EOPNOTSUPP) {
+ /* If device doesn't support int port offload,
+ * redirect to uplink vport.
+ */
+ mlx5_core_dbg(priv->mdev, "attaching int port as encap dev not supported, using uplink\n");
+ err = 0;
+ } else if (err) {
+ goto out_err;
+ }
+
flow->encaps[out_index].e = e;
list_add(&flow->encaps[out_index].list, &e->flows);
flow->encaps[out_index].index = out_index;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
index 4267f3a1059e..fd07c4cbfd1d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
@@ -141,6 +141,14 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv,
return 0;
}
+static int mlx5e_tc_tun_get_remote_ifindex(struct net_device *mirred_dev)
+{
+ const struct vxlan_dev *vxlan = netdev_priv(mirred_dev);
+ const struct vxlan_rdst *dst = &vxlan->default_dst;
+
+ return dst->remote_ifindex;
+}
+
struct mlx5e_tc_tunnel vxlan_tunnel = {
.tunnel_type = MLX5E_TC_TUNNEL_TYPE_VXLAN,
.match_level = MLX5_MATCH_L4,
@@ -151,4 +159,5 @@ struct mlx5e_tc_tunnel vxlan_tunnel = {
.parse_udp_ports = mlx5e_tc_tun_parse_udp_ports_vxlan,
.parse_tunnel = mlx5e_tc_tun_parse_vxlan,
.encap_info_equal = mlx5e_tc_tun_encap_info_equal_generic,
+ .get_remote_ifindex = mlx5e_tc_tun_get_remote_ifindex,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c
index de936dc4bc48..da169b816665 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c
@@ -70,24 +70,30 @@ void mlx5e_tir_builder_build_rqt(struct mlx5e_tir_builder *builder, u32 tdn,
MLX5_SET(tirc, tirc, tunneled_offload_en, inner_ft_support);
}
-void mlx5e_tir_builder_build_lro(struct mlx5e_tir_builder *builder,
- const struct mlx5e_lro_param *lro_param)
+void mlx5e_tir_builder_build_packet_merge(struct mlx5e_tir_builder *builder,
+ const struct mlx5e_packet_merge_param *pkt_merge_param)
{
void *tirc = mlx5e_tir_builder_get_tirc(builder);
const unsigned int rough_max_l2_l3_hdr_sz = 256;
if (builder->modify)
- MLX5_SET(modify_tir_in, builder->in, bitmask.lro, 1);
-
- if (!lro_param->enabled)
- return;
-
- MLX5_SET(tirc, tirc, lro_enable_mask,
- MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
- MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
- MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
- (MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ - rough_max_l2_l3_hdr_sz) >> 8);
- MLX5_SET(tirc, tirc, lro_timeout_period_usecs, lro_param->timeout);
+ MLX5_SET(modify_tir_in, builder->in, bitmask.packet_merge, 1);
+
+ switch (pkt_merge_param->type) {
+ case MLX5E_PACKET_MERGE_LRO:
+ MLX5_SET(tirc, tirc, packet_merge_mask,
+ MLX5_TIRC_PACKET_MERGE_MASK_IPV4_LRO |
+ MLX5_TIRC_PACKET_MERGE_MASK_IPV6_LRO);
+ MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
+ (MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ - rough_max_l2_l3_hdr_sz) >> 8);
+ MLX5_SET(tirc, tirc, lro_timeout_period_usecs, pkt_merge_param->timeout);
+ break;
+ case MLX5E_PACKET_MERGE_SHAMPO:
+ MLX5_SET(tirc, tirc, packet_merge_mask, MLX5_TIRC_PACKET_MERGE_MASK_SHAMPO);
+ break;
+ default:
+ break;
+ }
}
static int mlx5e_hfunc_to_hw(u8 hfunc)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.h
index e45149a78ed9..857a84bcd53a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.h
@@ -18,7 +18,7 @@ struct mlx5e_rss_params_traffic_type {
};
struct mlx5e_tir_builder;
-struct mlx5e_lro_param;
+struct mlx5e_packet_merge_param;
struct mlx5e_tir_builder *mlx5e_tir_builder_alloc(bool modify);
void mlx5e_tir_builder_free(struct mlx5e_tir_builder *builder);
@@ -27,8 +27,8 @@ void mlx5e_tir_builder_clear(struct mlx5e_tir_builder *builder);
void mlx5e_tir_builder_build_inline(struct mlx5e_tir_builder *builder, u32 tdn, u32 rqn);
void mlx5e_tir_builder_build_rqt(struct mlx5e_tir_builder *builder, u32 tdn,
u32 rqtn, bool inner_ft_support);
-void mlx5e_tir_builder_build_lro(struct mlx5e_tir_builder *builder,
- const struct mlx5e_lro_param *lro_param);
+void mlx5e_tir_builder_build_packet_merge(struct mlx5e_tir_builder *builder,
+ const struct mlx5e_packet_merge_param *pkt_merge_param);
void mlx5e_tir_builder_build_rss(struct mlx5e_tir_builder *builder,
const struct mlx5e_rss_params_hash *rss_hash,
const struct mlx5e_rss_params_traffic_type *rss_tt,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
index d54607a42740..a55b066746cb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c
@@ -137,7 +137,7 @@ static struct mlx5e_trap *mlx5e_open_trap(struct mlx5e_priv *priv)
t->tstamp = &priv->tstamp;
t->pdev = mlx5_core_dma_dev(priv->mdev);
t->netdev = priv->netdev;
- t->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey.key);
+ t->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey);
t->stats = &priv->trap_stats.ch;
netif_napi_add(netdev, &t->napi, mlx5e_trap_napi_poll, 64);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
index 055c3bc23733..4cdf8e5b24c2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
@@ -36,6 +36,7 @@ ktime_t mlx5e_cqe_ts_to_ns(cqe_ts_to_ns func, struct mlx5_clock *clock, u64 cqe_
enum mlx5e_icosq_wqe_type {
MLX5E_ICOSQ_WQE_NOP,
MLX5E_ICOSQ_WQE_UMR_RX,
+ MLX5E_ICOSQ_WQE_SHAMPO_HD_UMR,
#ifdef CONFIG_MLX5_EN_TLS
MLX5E_ICOSQ_WQE_UMR_TLS,
MLX5E_ICOSQ_WQE_SET_PSV_TLS,
@@ -166,6 +167,10 @@ static inline u16 mlx5e_txqsq_get_next_pi(struct mlx5e_txqsq *sq, u16 size)
return pi;
}
+struct mlx5e_shampo_umr {
+ u16 len;
+};
+
struct mlx5e_icosq_wqe_info {
u8 wqe_type;
u8 num_wqebbs;
@@ -175,6 +180,7 @@ struct mlx5e_icosq_wqe_info {
struct {
struct mlx5e_rq *rq;
} umr;
+ struct mlx5e_shampo_umr shampo;
#ifdef CONFIG_MLX5_EN_TLS
struct {
struct mlx5e_ktls_offload_context_rx *priv_rx;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
index 33de8f0092a6..fb5397324aa4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
@@ -141,8 +141,7 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
* Pkt: MAC IP ESP IP L4
*
* Transport Mode:
- * SWP: OutL3 InL4
- * InL3
+ * SWP: OutL3 OutL4
* Pkt: MAC IP ESP L4
*
* Tunnel(VXLAN TCP/UDP) over Transport Mode
@@ -171,31 +170,35 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
return;
if (!xo->inner_ipproto) {
- eseg->swp_inner_l3_offset = skb_network_offset(skb) / 2;
- eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
- if (skb->protocol == htons(ETH_P_IPV6))
- eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
- if (xo->proto == IPPROTO_UDP)
+ switch (xo->proto) {
+ case IPPROTO_UDP:
+ eseg->swp_flags |= MLX5_ETH_WQE_SWP_OUTER_L4_UDP;
+ fallthrough;
+ case IPPROTO_TCP:
+ /* IP | ESP | TCP */
+ eseg->swp_outer_l4_offset = skb_inner_transport_offset(skb) / 2;
+ break;
+ default:
+ break;
+ }
+ } else {
+ /* Tunnel(VXLAN TCP/UDP) over Transport Mode */
+ switch (xo->inner_ipproto) {
+ case IPPROTO_UDP:
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
- return;
- }
-
- /* Tunnel(VXLAN TCP/UDP) over Transport Mode */
- switch (xo->inner_ipproto) {
- case IPPROTO_UDP:
- eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
- fallthrough;
- case IPPROTO_TCP:
- eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
- eseg->swp_inner_l4_offset = (skb->csum_start + skb->head - skb->data) / 2;
- if (skb->protocol == htons(ETH_P_IPV6))
- eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
- break;
- default:
- break;
+ fallthrough;
+ case IPPROTO_TCP:
+ eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
+ eseg->swp_inner_l4_offset =
+ (skb->csum_start + skb->head - skb->data) / 2;
+ if (skb->protocol == htons(ETH_P_IPV6))
+ eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
+ break;
+ default:
+ break;
+ }
}
- return;
}
void mlx5e_ipsec_set_iv_esn(struct sk_buff *skb, struct xfrm_state *x,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
index 5120a59361e6..b98db50c3418 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
@@ -127,6 +127,25 @@ out_disable:
return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
}
+static inline bool
+mlx5e_ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ struct mlx5_wqe_eth_seg *eseg)
+{
+ struct xfrm_offload *xo = xfrm_offload(skb);
+
+ if (!mlx5e_ipsec_eseg_meta(eseg))
+ return false;
+
+ eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
+ if (xo->inner_ipproto) {
+ eseg->cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM | MLX5_ETH_WQE_L3_INNER_CSUM;
+ } else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
+ eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
+ sq->stats->csum_partial_inner++;
+ }
+
+ return true;
+}
#else
static inline
void mlx5e_ipsec_offload_handle_rx_skb(struct net_device *netdev,
@@ -143,6 +162,13 @@ static inline bool mlx5_ipsec_is_rx_flow(struct mlx5_cqe64 *cqe) { return false;
static inline netdev_features_t
mlx5e_ipsec_feature_check(struct sk_buff *skb, netdev_features_t features)
{ return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); }
+
+static inline bool
+mlx5e_ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
+ struct mlx5_wqe_eth_seg *eseg)
+{
+ return false;
+}
#endif /* CONFIG_MLX5_EN_IPSEC */
#endif /* __MLX5E_IPSEC_RXTX_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
index 84eb7201c142..c0f409c195bf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
@@ -47,7 +47,7 @@ void mlx5e_mkey_set_relaxed_ordering(struct mlx5_core_dev *mdev, void *mkc)
}
static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
- struct mlx5_core_mkey *mkey)
+ u32 *mkey)
{
int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
void *mkc;
@@ -108,7 +108,7 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev)
return 0;
err_destroy_mkey:
- mlx5_core_destroy_mkey(mdev, &res->mkey);
+ mlx5_core_destroy_mkey(mdev, res->mkey);
err_dealloc_transport_domain:
mlx5_core_dealloc_transport_domain(mdev, res->td.tdn);
err_dealloc_pd:
@@ -121,7 +121,7 @@ void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev)
struct mlx5e_hw_objs *res = &mdev->mlx5e_res.hw_objs;
mlx5_free_bfreg(mdev, &res->bfreg);
- mlx5_core_destroy_mkey(mdev, &res->mkey);
+ mlx5_core_destroy_mkey(mdev, res->mkey);
mlx5_core_dealloc_transport_domain(mdev, res->td.tdn);
mlx5_core_dealloc_pd(mdev, res->pdn);
memset(res, 0, sizeof(*res));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 9d451b8ee467..c2ea5fad48dd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -267,9 +267,7 @@ void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv, u32 stringset, u8 *data)
break;
case ETH_SS_TEST:
- for (i = 0; i < mlx5e_self_test_num(priv); i++)
- strcpy(data + i * ETH_GSTRING_LEN,
- mlx5e_self_tests[i]);
+ mlx5e_self_test_fill_strings(priv, data);
break;
case ETH_SS_STATS:
@@ -1902,6 +1900,11 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val
return -EINVAL;
}
+ if (priv->channels.params.packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) {
+ netdev_warn(priv->netdev, "Can't set CQE compression with HW-GRO, disable it first.\n");
+ return -EINVAL;
+ }
+
new_params = priv->channels.params;
MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_RX_CQE_COMPRESS, new_val);
if (rx_filter)
@@ -1954,8 +1957,8 @@ static int set_pflag_rx_striding_rq(struct net_device *netdev, bool enable)
return -EOPNOTSUPP;
if (!mlx5e_striding_rq_possible(mdev, &priv->channels.params))
return -EINVAL;
- } else if (priv->channels.params.lro_en) {
- netdev_warn(netdev, "Can't set legacy RQ with LRO, disable LRO first\n");
+ } else if (priv->channels.params.packet_merge.type != MLX5E_PACKET_MERGE_NONE) {
+ netdev_warn(netdev, "Can't set legacy RQ with HW-GRO/LRO, disable them first\n");
return -EINVAL;
}
@@ -2139,12 +2142,14 @@ int mlx5e_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
return 0;
}
- return mlx5e_ethtool_get_rxnfc(dev, info, rule_locs);
+ return mlx5e_ethtool_get_rxnfc(priv, info, rule_locs);
}
int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
{
- return mlx5e_ethtool_set_rxnfc(dev, cmd);
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ return mlx5e_ethtool_set_rxnfc(priv, cmd);
}
static int query_port_status_opcode(struct mlx5_core_dev *mdev, u32 *status_opcode)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index c06b4b938ae7..aeff1d972a46 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -71,12 +71,12 @@ struct mlx5e_l2_hash_node {
bool mpfs;
};
-static inline int mlx5e_hash_l2(u8 *addr)
+static inline int mlx5e_hash_l2(const u8 *addr)
{
return addr[5];
}
-static void mlx5e_add_l2_to_hash(struct hlist_head *hash, u8 *addr)
+static void mlx5e_add_l2_to_hash(struct hlist_head *hash, const u8 *addr)
{
struct mlx5e_l2_hash_node *hn;
int ix = mlx5e_hash_l2(addr);
@@ -1186,10 +1186,6 @@ static int mlx5e_create_vlan_table(struct mlx5e_priv *priv)
struct mlx5e_flow_table *ft;
int err;
- priv->fs.vlan = kvzalloc(sizeof(*priv->fs.vlan), GFP_KERNEL);
- if (!priv->fs.vlan)
- return -ENOMEM;
-
ft = &priv->fs.vlan->ft;
ft->num_groups = 0;
@@ -1198,10 +1194,8 @@ static int mlx5e_create_vlan_table(struct mlx5e_priv *priv)
ft_attr.prio = MLX5E_NIC_PRIO;
ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
- if (IS_ERR(ft->t)) {
- err = PTR_ERR(ft->t);
- goto err_free_t;
- }
+ if (IS_ERR(ft->t))
+ return PTR_ERR(ft->t);
ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
if (!ft->g) {
@@ -1221,9 +1215,6 @@ err_free_g:
kfree(ft->g);
err_destroy_vlan_table:
mlx5_destroy_flow_table(ft->t);
-err_free_t:
- kvfree(priv->fs.vlan);
- priv->fs.vlan = NULL;
return err;
}
@@ -1232,7 +1223,6 @@ static void mlx5e_destroy_vlan_table(struct mlx5e_priv *priv)
{
mlx5e_del_vlan_rules(priv);
mlx5e_destroy_flow_table(&priv->fs.vlan->ft);
- kvfree(priv->fs.vlan);
}
static void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv)
@@ -1351,3 +1341,17 @@ void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv)
mlx5e_arfs_destroy_tables(priv);
mlx5e_ethtool_cleanup_steering(priv);
}
+
+int mlx5e_fs_init(struct mlx5e_priv *priv)
+{
+ priv->fs.vlan = kvzalloc(sizeof(*priv->fs.vlan), GFP_KERNEL);
+ if (!priv->fs.vlan)
+ return -ENOMEM;
+ return 0;
+}
+
+void mlx5e_fs_cleanup(struct mlx5e_priv *priv)
+{
+ kvfree(priv->fs.vlan);
+ priv->fs.vlan = NULL;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
index 03693fa74a70..ad0d234632a3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -411,7 +411,7 @@ static int flow_get_tirn(struct mlx5e_priv *priv,
u32 rss_context, u32 *tirn)
{
if (fs->flow_type & FLOW_RSS) {
- struct mlx5e_lro_param lro_param;
+ struct mlx5e_packet_merge_param pkt_merge_param;
struct mlx5e_rss *rss;
u32 flow_type;
int err;
@@ -426,8 +426,8 @@ static int flow_get_tirn(struct mlx5e_priv *priv,
if (tt < 0)
return -EINVAL;
- lro_param = mlx5e_get_lro_param(&priv->channels.params);
- err = mlx5e_rss_obtain_tirn(rss, tt, &lro_param, false, tirn);
+ pkt_merge_param = priv->channels.params.packet_merge;
+ err = mlx5e_rss_obtain_tirn(rss, tt, &pkt_merge_param, false, tirn);
if (err)
return err;
eth_rule->rss = rss;
@@ -937,9 +937,8 @@ static int mlx5e_get_rss_hash_opt(struct mlx5e_priv *priv,
return 0;
}
-int mlx5e_ethtool_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+int mlx5e_ethtool_set_rxnfc(struct mlx5e_priv *priv, struct ethtool_rxnfc *cmd)
{
- struct mlx5e_priv *priv = netdev_priv(dev);
int err = 0;
switch (cmd->cmd) {
@@ -960,10 +959,9 @@ int mlx5e_ethtool_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
return err;
}
-int mlx5e_ethtool_get_rxnfc(struct net_device *dev,
+int mlx5e_ethtool_get_rxnfc(struct mlx5e_priv *priv,
struct ethtool_rxnfc *info, u32 *rule_locs)
{
- struct mlx5e_priv *priv = netdev_priv(dev);
int err = 0;
switch (info->cmd) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 09c8b71b186c..65571593ec5c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -218,6 +218,45 @@ static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
}
+static int mlx5e_rq_shampo_hd_alloc(struct mlx5e_rq *rq, int node)
+{
+ rq->mpwqe.shampo = kvzalloc_node(sizeof(*rq->mpwqe.shampo),
+ GFP_KERNEL, node);
+ if (!rq->mpwqe.shampo)
+ return -ENOMEM;
+ return 0;
+}
+
+static void mlx5e_rq_shampo_hd_free(struct mlx5e_rq *rq)
+{
+ kvfree(rq->mpwqe.shampo);
+}
+
+static int mlx5e_rq_shampo_hd_info_alloc(struct mlx5e_rq *rq, int node)
+{
+ struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
+
+ shampo->bitmap = bitmap_zalloc_node(shampo->hd_per_wq, GFP_KERNEL,
+ node);
+ if (!shampo->bitmap)
+ return -ENOMEM;
+
+ shampo->info = kvzalloc_node(array_size(shampo->hd_per_wq,
+ sizeof(*shampo->info)),
+ GFP_KERNEL, node);
+ if (!shampo->info) {
+ kvfree(shampo->bitmap);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void mlx5e_rq_shampo_hd_info_free(struct mlx5e_rq *rq)
+{
+ kvfree(rq->mpwqe.shampo->bitmap);
+ kvfree(rq->mpwqe.shampo->info);
+}
+
static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, int node)
{
int wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
@@ -233,10 +272,9 @@ static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, int node)
return 0;
}
-static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev,
- u64 npages, u8 page_shift,
- struct mlx5_core_mkey *umr_mkey,
- dma_addr_t filler_addr)
+static int mlx5e_create_umr_mtt_mkey(struct mlx5_core_dev *mdev,
+ u64 npages, u8 page_shift, u32 *umr_mkey,
+ dma_addr_t filler_addr)
{
struct mlx5_mtt *mtt;
int inlen;
@@ -284,12 +322,59 @@ static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev,
return err;
}
+static int mlx5e_create_umr_klm_mkey(struct mlx5_core_dev *mdev,
+ u64 nentries,
+ u32 *umr_mkey)
+{
+ int inlen;
+ void *mkc;
+ u32 *in;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
+
+ mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+
+ MLX5_SET(mkc, mkc, free, 1);
+ MLX5_SET(mkc, mkc, umr_en, 1);
+ MLX5_SET(mkc, mkc, lw, 1);
+ MLX5_SET(mkc, mkc, lr, 1);
+ MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_KLMS);
+ mlx5e_mkey_set_relaxed_ordering(mdev, mkc);
+ MLX5_SET(mkc, mkc, qpn, 0xffffff);
+ MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.hw_objs.pdn);
+ MLX5_SET(mkc, mkc, translations_octword_size, nentries);
+ MLX5_SET(mkc, mkc, length64, 1);
+ err = mlx5_core_create_mkey(mdev, umr_mkey, in, inlen);
+
+ kvfree(in);
+ return err;
+}
+
static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq *rq)
{
u64 num_mtts = MLX5E_REQUIRED_MTTS(mlx5_wq_ll_get_size(&rq->mpwqe.wq));
- return mlx5e_create_umr_mkey(mdev, num_mtts, PAGE_SHIFT, &rq->umr_mkey,
- rq->wqe_overflow.addr);
+ return mlx5e_create_umr_mtt_mkey(mdev, num_mtts, PAGE_SHIFT,
+ &rq->umr_mkey, rq->wqe_overflow.addr);
+}
+
+static int mlx5e_create_rq_hd_umr_mkey(struct mlx5_core_dev *mdev,
+ struct mlx5e_rq *rq)
+{
+ u32 max_klm_size = BIT(MLX5_CAP_GEN(mdev, log_max_klm_list_size));
+
+ if (max_klm_size < rq->mpwqe.shampo->hd_per_wq) {
+ mlx5_core_err(mdev, "max klm list size 0x%x is smaller than shampo header buffer list size 0x%x\n",
+ max_klm_size, rq->mpwqe.shampo->hd_per_wq);
+ return -EINVAL;
+ }
+ return mlx5e_create_umr_klm_mkey(mdev, rq->mpwqe.shampo->hd_per_wq,
+ &rq->mpwqe.shampo->mkey);
}
static u64 mlx5e_get_mpwqe_offset(u16 wqe_ix)
@@ -403,6 +488,65 @@ static int mlx5e_init_rxq_rq(struct mlx5e_channel *c, struct mlx5e_params *param
return xdp_rxq_info_reg(&rq->xdp_rxq, rq->netdev, rq->ix, 0);
}
+static int mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev,
+ struct mlx5e_params *params,
+ struct mlx5e_rq_param *rqp,
+ struct mlx5e_rq *rq,
+ u32 *pool_size,
+ int node)
+{
+ void *wqc = MLX5_ADDR_OF(rqc, rqp->rqc, wq);
+ int wq_size;
+ int err;
+
+ if (!test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
+ return 0;
+ err = mlx5e_rq_shampo_hd_alloc(rq, node);
+ if (err)
+ goto out;
+ rq->mpwqe.shampo->hd_per_wq =
+ mlx5e_shampo_hd_per_wq(mdev, params, rqp);
+ err = mlx5e_create_rq_hd_umr_mkey(mdev, rq);
+ if (err)
+ goto err_shampo_hd;
+ err = mlx5e_rq_shampo_hd_info_alloc(rq, node);
+ if (err)
+ goto err_shampo_info;
+ rq->hw_gro_data = kvzalloc_node(sizeof(*rq->hw_gro_data), GFP_KERNEL, node);
+ if (!rq->hw_gro_data) {
+ err = -ENOMEM;
+ goto err_hw_gro_data;
+ }
+ rq->mpwqe.shampo->key =
+ cpu_to_be32(rq->mpwqe.shampo->mkey);
+ rq->mpwqe.shampo->hd_per_wqe =
+ mlx5e_shampo_hd_per_wqe(mdev, params, rqp);
+ wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz));
+ *pool_size += (rq->mpwqe.shampo->hd_per_wqe * wq_size) /
+ MLX5E_SHAMPO_WQ_HEADER_PER_PAGE;
+ return 0;
+
+err_hw_gro_data:
+ mlx5e_rq_shampo_hd_info_free(rq);
+err_shampo_info:
+ mlx5_core_destroy_mkey(mdev, rq->mpwqe.shampo->mkey);
+err_shampo_hd:
+ mlx5e_rq_shampo_hd_free(rq);
+out:
+ return err;
+}
+
+static void mlx5e_rq_free_shampo(struct mlx5e_rq *rq)
+{
+ if (!test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
+ return;
+
+ kvfree(rq->hw_gro_data);
+ mlx5e_rq_shampo_hd_info_free(rq);
+ mlx5_core_destroy_mkey(rq->mdev, rq->mpwqe.shampo->mkey);
+ mlx5e_rq_shampo_hd_free(rq);
+}
+
static int mlx5e_alloc_rq(struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk,
struct mlx5e_rq_param *rqp,
@@ -455,11 +599,16 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
err = mlx5e_create_rq_umr_mkey(mdev, rq);
if (err)
goto err_rq_drop_page;
- rq->mkey_be = cpu_to_be32(rq->umr_mkey.key);
+ rq->mkey_be = cpu_to_be32(rq->umr_mkey);
err = mlx5e_rq_alloc_mpwqe_info(rq, node);
if (err)
goto err_rq_mkey;
+
+ err = mlx5_rq_shampo_alloc(mdev, params, rqp, rq, &pool_size, node);
+ if (err)
+ goto err_free_by_rq_type;
+
break;
default: /* MLX5_WQ_TYPE_CYCLIC */
err = mlx5_wq_cyc_create(mdev, &rqp->wq, rqc_wq, &rq->wqe.wq,
@@ -487,7 +636,7 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
if (err)
goto err_rq_frags;
- rq->mkey_be = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey.key);
+ rq->mkey_be = cpu_to_be32(mdev->mlx5e_res.hw_objs.mkey);
}
if (xsk) {
@@ -512,14 +661,14 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
if (IS_ERR(rq->page_pool)) {
err = PTR_ERR(rq->page_pool);
rq->page_pool = NULL;
- goto err_free_by_rq_type;
+ goto err_free_shampo;
}
if (xdp_rxq_info_is_reg(&rq->xdp_rxq))
err = xdp_rxq_info_reg_mem_model(&rq->xdp_rxq,
MEM_TYPE_PAGE_POOL, rq->page_pool);
}
if (err)
- goto err_free_by_rq_type;
+ goto err_free_shampo;
for (i = 0; i < wq_sz; i++) {
if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
@@ -528,8 +677,10 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
u32 byte_count =
rq->mpwqe.num_strides << rq->mpwqe.log_stride_sz;
u64 dma_offset = mlx5e_get_mpwqe_offset(i);
+ u16 headroom = test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state) ?
+ 0 : rq->buff.headroom;
- wqe->data[0].addr = cpu_to_be64(dma_offset + rq->buff.headroom);
+ wqe->data[0].addr = cpu_to_be64(dma_offset + headroom);
wqe->data[0].byte_count = cpu_to_be32(byte_count);
wqe->data[0].lkey = rq->mkey_be;
} else {
@@ -569,12 +720,14 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
return 0;
+err_free_shampo:
+ mlx5e_rq_free_shampo(rq);
err_free_by_rq_type:
switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
kvfree(rq->mpwqe.info);
err_rq_mkey:
- mlx5_core_destroy_mkey(mdev, &rq->umr_mkey);
+ mlx5_core_destroy_mkey(mdev, rq->umr_mkey);
err_rq_drop_page:
mlx5e_free_mpwqe_rq_drop_page(rq);
break;
@@ -607,8 +760,9 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq)
switch (rq->wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
kvfree(rq->mpwqe.info);
- mlx5_core_destroy_mkey(rq->mdev, &rq->umr_mkey);
+ mlx5_core_destroy_mkey(rq->mdev, rq->umr_mkey);
mlx5e_free_mpwqe_rq_drop_page(rq);
+ mlx5e_rq_free_shampo(rq);
break;
default: /* MLX5_WQ_TYPE_CYCLIC */
kvfree(rq->wqe.frags);
@@ -662,6 +816,12 @@ int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma);
+ if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) {
+ MLX5_SET(wq, wq, log_headers_buffer_entry_num,
+ order_base_2(rq->mpwqe.shampo->hd_per_wq));
+ MLX5_SET(wq, wq, headers_mkey, rq->mpwqe.shampo->mkey);
+ }
+
mlx5_fill_page_frag_array(&rq->wq_ctrl.buf,
(__be64 *)MLX5_ADDR_OF(wq, wq, pas));
@@ -801,6 +961,15 @@ void mlx5e_free_rx_in_progress_descs(struct mlx5e_rq *rq)
head = mlx5_wq_ll_get_wqe_next_ix(wq, head);
}
+ if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) {
+ u16 len;
+
+ len = (rq->mpwqe.shampo->pi - rq->mpwqe.shampo->ci) &
+ (rq->mpwqe.shampo->hd_per_wq - 1);
+ mlx5e_shampo_dealloc_hd(rq, len, rq->mpwqe.shampo->ci, false);
+ rq->mpwqe.shampo->pi = rq->mpwqe.shampo->ci;
+ }
+
rq->mpwqe.actual_wq_head = wq->head;
rq->mpwqe.umr_in_progress = 0;
rq->mpwqe.umr_completed = 0;
@@ -826,6 +995,10 @@ void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
mlx5_wq_ll_pop(wq, wqe_ix_be,
&wqe->next.next_wqe_index);
}
+
+ if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
+ mlx5e_shampo_dealloc_hd(rq, rq->mpwqe.shampo->hd_per_wq,
+ 0, true);
} else {
struct mlx5_wq_cyc *wq = &rq->wqe.wq;
@@ -845,6 +1018,9 @@ int mlx5e_open_rq(struct mlx5e_params *params, struct mlx5e_rq_param *param,
struct mlx5_core_dev *mdev = rq->mdev;
int err;
+ if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO)
+ __set_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state);
+
err = mlx5e_alloc_rq(params, xsk, param, node, rq);
if (err)
return err;
@@ -930,9 +1106,10 @@ static int mlx5e_alloc_xdpsq_fifo(struct mlx5e_xdpsq *sq, int numa)
struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo;
int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
int dsegs_per_wq = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
+ size_t size;
- xdpi_fifo->xi = kvzalloc_node(sizeof(*xdpi_fifo->xi) * dsegs_per_wq,
- GFP_KERNEL, numa);
+ size = array_size(sizeof(*xdpi_fifo->xi), dsegs_per_wq);
+ xdpi_fifo->xi = kvzalloc_node(size, GFP_KERNEL, numa);
if (!xdpi_fifo->xi)
return -ENOMEM;
@@ -946,10 +1123,11 @@ static int mlx5e_alloc_xdpsq_fifo(struct mlx5e_xdpsq *sq, int numa)
static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa)
{
int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
+ size_t size;
int err;
- sq->db.wqe_info = kvzalloc_node(sizeof(*sq->db.wqe_info) * wq_sz,
- GFP_KERNEL, numa);
+ size = array_size(sizeof(*sq->db.wqe_info), wq_sz);
+ sq->db.wqe_info = kvzalloc_node(size, GFP_KERNEL, numa);
if (!sq->db.wqe_info)
return -ENOMEM;
@@ -1298,7 +1476,8 @@ static int mlx5e_set_sq_maxrate(struct net_device *dev,
int mlx5e_open_txqsq(struct mlx5e_channel *c, u32 tisn, int txq_ix,
struct mlx5e_params *params, struct mlx5e_sq_param *param,
- struct mlx5e_txqsq *sq, int tc, u16 qos_queue_group_id, u16 qos_qid)
+ struct mlx5e_txqsq *sq, int tc, u16 qos_queue_group_id,
+ struct mlx5e_sq_stats *sq_stats)
{
struct mlx5e_create_sq_param csp = {};
u32 tx_rate;
@@ -1308,10 +1487,7 @@ int mlx5e_open_txqsq(struct mlx5e_channel *c, u32 tisn, int txq_ix,
if (err)
return err;
- if (qos_queue_group_id)
- sq->stats = c->priv->htb.qos_sq_stats[qos_qid];
- else
- sq->stats = &c->priv->channel_stats[c->ix].sq[tc];
+ sq->stats = sq_stats;
csp.tisn = tisn;
csp.tis_lst_sz = 1;
@@ -1705,6 +1881,36 @@ static void mlx5e_close_tx_cqs(struct mlx5e_channel *c)
mlx5e_close_cq(&c->sq[tc].cq);
}
+static int mlx5e_mqprio_txq_to_tc(struct netdev_tc_txq *tc_to_txq, unsigned int txq)
+{
+ int tc;
+
+ for (tc = 0; tc < TC_MAX_QUEUE; tc++)
+ if (txq - tc_to_txq[tc].offset < tc_to_txq[tc].count)
+ return tc;
+
+ WARN(1, "Unexpected TCs configuration. No match found for txq %u", txq);
+ return -ENOENT;
+}
+
+static int mlx5e_txq_get_qos_node_hw_id(struct mlx5e_params *params, int txq_ix,
+ u32 *hw_id)
+{
+ int tc;
+
+ if (params->mqprio.mode != TC_MQPRIO_MODE_CHANNEL ||
+ !params->mqprio.channel.rl) {
+ *hw_id = 0;
+ return 0;
+ }
+
+ tc = mlx5e_mqprio_txq_to_tc(params->mqprio.tc_to_txq, txq_ix);
+ if (tc < 0)
+ return tc;
+
+ return mlx5e_mqprio_rl_get_node_hw_id(params->mqprio.channel.rl, tc, hw_id);
+}
+
static int mlx5e_open_sqs(struct mlx5e_channel *c,
struct mlx5e_params *params,
struct mlx5e_channel_param *cparam)
@@ -1713,9 +1919,16 @@ static int mlx5e_open_sqs(struct mlx5e_channel *c,
for (tc = 0; tc < mlx5e_get_dcb_num_tc(params); tc++) {
int txq_ix = c->ix + tc * params->num_channels;
+ u32 qos_queue_group_id;
+
+ err = mlx5e_txq_get_qos_node_hw_id(params, txq_ix, &qos_queue_group_id);
+ if (err)
+ goto err_close_sqs;
err = mlx5e_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix,
- params, &cparam->txq_sq, &c->sq[tc], tc, 0, 0);
+ params, &cparam->txq_sq, &c->sq[tc], tc,
+ qos_queue_group_id,
+ &c->priv->channel_stats[c->ix].sq[tc]);
if (err)
goto err_close_sqs;
}
@@ -1991,7 +2204,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
c->cpu = cpu;
c->pdev = mlx5_core_dma_dev(priv->mdev);
c->netdev = priv->netdev;
- c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey.key);
+ c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey);
c->num_tc = mlx5e_get_dcb_num_tc(params);
c->xdp = !!params->xdp_prog;
c->stats = &priv->channel_stats[ix].ch;
@@ -2185,17 +2398,14 @@ void mlx5e_close_channels(struct mlx5e_channels *chs)
chs->num = 0;
}
-static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
+static int mlx5e_modify_tirs_packet_merge(struct mlx5e_priv *priv)
{
struct mlx5e_rx_res *res = priv->rx_res;
- struct mlx5e_lro_param lro_param;
-
- lro_param = mlx5e_get_lro_param(&priv->channels.params);
- return mlx5e_rx_res_lro_set_param(res, &lro_param);
+ return mlx5e_rx_res_packet_merge_set_param(res, &priv->channels.params.packet_merge);
}
-static MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_modify_tirs_lro);
+static MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_modify_tirs_packet_merge);
static int mlx5e_set_mtu(struct mlx5_core_dev *mdev,
struct mlx5e_params *params, u16 mtu)
@@ -2340,6 +2550,13 @@ static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv)
netdev_warn(netdev, "netif_set_real_num_rx_queues failed, %d\n", err);
goto err_txqs;
}
+ if (priv->mqprio_rl != priv->channels.params.mqprio.channel.rl) {
+ if (priv->mqprio_rl) {
+ mlx5e_mqprio_rl_cleanup(priv->mqprio_rl);
+ mlx5e_mqprio_rl_free(priv->mqprio_rl);
+ }
+ priv->mqprio_rl = priv->channels.params.mqprio.channel.rl;
+ }
return 0;
@@ -2901,15 +3118,18 @@ static void mlx5e_params_mqprio_dcb_set(struct mlx5e_params *params, u8 num_tc)
{
params->mqprio.mode = TC_MQPRIO_MODE_DCB;
params->mqprio.num_tc = num_tc;
+ params->mqprio.channel.rl = NULL;
mlx5e_mqprio_build_default_tc_to_txq(params->mqprio.tc_to_txq, num_tc,
params->num_channels);
}
static void mlx5e_params_mqprio_channel_set(struct mlx5e_params *params,
- struct tc_mqprio_qopt *qopt)
+ struct tc_mqprio_qopt *qopt,
+ struct mlx5e_mqprio_rl *rl)
{
params->mqprio.mode = TC_MQPRIO_MODE_CHANNEL;
params->mqprio.num_tc = qopt->num_tc;
+ params->mqprio.channel.rl = rl;
mlx5e_mqprio_build_tc_to_txq(params->mqprio.tc_to_txq, qopt);
}
@@ -2969,9 +3189,13 @@ static int mlx5e_mqprio_channel_validate(struct mlx5e_priv *priv,
netdev_err(netdev, "Min tx rate is not supported\n");
return -EINVAL;
}
+
if (mqprio->max_rate[i]) {
- netdev_err(netdev, "Max tx rate is not supported\n");
- return -EINVAL;
+ int err;
+
+ err = mlx5e_qos_bytes_rate_check(priv->mdev, mqprio->max_rate[i]);
+ if (err)
+ return err;
}
if (mqprio->qopt.offset[i] != agg_count) {
@@ -2990,11 +3214,22 @@ static int mlx5e_mqprio_channel_validate(struct mlx5e_priv *priv,
return 0;
}
+static bool mlx5e_mqprio_rate_limit(struct tc_mqprio_qopt_offload *mqprio)
+{
+ int tc;
+
+ for (tc = 0; tc < mqprio->qopt.num_tc; tc++)
+ if (mqprio->max_rate[tc])
+ return true;
+ return false;
+}
+
static int mlx5e_setup_tc_mqprio_channel(struct mlx5e_priv *priv,
struct tc_mqprio_qopt_offload *mqprio)
{
mlx5e_fp_preactivate preactivate;
struct mlx5e_params new_params;
+ struct mlx5e_mqprio_rl *rl;
bool nch_changed;
int err;
@@ -3002,13 +3237,32 @@ static int mlx5e_setup_tc_mqprio_channel(struct mlx5e_priv *priv,
if (err)
return err;
+ rl = NULL;
+ if (mlx5e_mqprio_rate_limit(mqprio)) {
+ rl = mlx5e_mqprio_rl_alloc();
+ if (!rl)
+ return -ENOMEM;
+ err = mlx5e_mqprio_rl_init(rl, priv->mdev, mqprio->qopt.num_tc,
+ mqprio->max_rate);
+ if (err) {
+ mlx5e_mqprio_rl_free(rl);
+ return err;
+ }
+ }
+
new_params = priv->channels.params;
- mlx5e_params_mqprio_channel_set(&new_params, &mqprio->qopt);
+ mlx5e_params_mqprio_channel_set(&new_params, &mqprio->qopt, rl);
nch_changed = mlx5e_get_dcb_num_tc(&priv->channels.params) > 1;
preactivate = nch_changed ? mlx5e_num_channels_changed_ctx :
mlx5e_update_netdev_queues_ctx;
- return mlx5e_safe_switch_params(priv, &new_params, preactivate, NULL, true);
+ err = mlx5e_safe_switch_params(priv, &new_params, preactivate, NULL, true);
+ if (err && rl) {
+ mlx5e_mqprio_rl_cleanup(rl);
+ mlx5e_mqprio_rl_free(rl);
+ }
+
+ return err;
}
static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv,
@@ -3226,7 +3480,7 @@ static int mlx5e_set_mac(struct net_device *netdev, void *addr)
return -EADDRNOTAVAIL;
netif_addr_lock_bh(netdev);
- ether_addr_copy(netdev->dev_addr, saddr->sa_data);
+ eth_hw_addr_set(netdev, saddr->sa_data);
netif_addr_unlock_bh(netdev);
mlx5e_nic_set_rx_mode(priv);
@@ -3270,16 +3524,59 @@ static int set_feature_lro(struct net_device *netdev, bool enable)
}
new_params = *cur_params;
- new_params.lro_en = enable;
- if (cur_params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
- if (mlx5e_rx_mpwqe_is_linear_skb(mdev, cur_params, NULL) ==
- mlx5e_rx_mpwqe_is_linear_skb(mdev, &new_params, NULL))
- reset = false;
+ if (enable)
+ new_params.packet_merge.type = MLX5E_PACKET_MERGE_LRO;
+ else if (new_params.packet_merge.type == MLX5E_PACKET_MERGE_LRO)
+ new_params.packet_merge.type = MLX5E_PACKET_MERGE_NONE;
+ else
+ goto out;
+
+ if (!(cur_params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO &&
+ new_params.packet_merge.type == MLX5E_PACKET_MERGE_LRO)) {
+ if (cur_params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
+ if (mlx5e_rx_mpwqe_is_linear_skb(mdev, cur_params, NULL) ==
+ mlx5e_rx_mpwqe_is_linear_skb(mdev, &new_params, NULL))
+ reset = false;
+ }
+ }
+
+ err = mlx5e_safe_switch_params(priv, &new_params,
+ mlx5e_modify_tirs_packet_merge_ctx, NULL, reset);
+out:
+ mutex_unlock(&priv->state_lock);
+ return err;
+}
+
+static int set_feature_hw_gro(struct net_device *netdev, bool enable)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5e_params new_params;
+ bool reset = true;
+ int err = 0;
+
+ mutex_lock(&priv->state_lock);
+ new_params = priv->channels.params;
+
+ if (enable) {
+ if (MLX5E_GET_PFLAG(&new_params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
+ netdev_warn(netdev, "Can't set HW-GRO when CQE compress is active\n");
+ err = -EINVAL;
+ goto out;
+ }
+ new_params.packet_merge.type = MLX5E_PACKET_MERGE_SHAMPO;
+ new_params.packet_merge.shampo.match_criteria_type =
+ MLX5_RQC_SHAMPO_MATCH_CRITERIA_TYPE_EXTENDED;
+ new_params.packet_merge.shampo.alignment_granularity =
+ MLX5_RQC_SHAMPO_NO_MATCH_ALIGNMENT_GRANULARITY_STRIDE;
+ } else if (new_params.packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) {
+ new_params.packet_merge.type = MLX5E_PACKET_MERGE_NONE;
+ } else {
+ goto out;
}
err = mlx5e_safe_switch_params(priv, &new_params,
- mlx5e_modify_tirs_lro_ctx, NULL, reset);
+ mlx5e_modify_tirs_packet_merge_ctx, NULL, reset);
out:
mutex_unlock(&priv->state_lock);
return err;
@@ -3458,6 +3755,7 @@ int mlx5e_set_features(struct net_device *netdev, netdev_features_t features)
mlx5e_handle_feature(netdev, &oper_features, features, feature, handler)
err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro);
+ err |= MLX5E_HANDLE_FEATURE(NETIF_F_GRO_HW, set_feature_hw_gro);
err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER,
set_feature_cvlan_filter);
err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TC, set_feature_hw_tc);
@@ -3518,6 +3816,10 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
netdev_warn(netdev, "Disabling LRO, not supported in legacy RQ\n");
features &= ~NETIF_F_LRO;
}
+ if (features & NETIF_F_GRO_HW) {
+ netdev_warn(netdev, "Disabling HW-GRO, not supported in legacy RQ\n");
+ features &= ~NETIF_F_GRO_HW;
+ }
}
if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
@@ -3606,7 +3908,7 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
goto out;
}
- if (params->lro_en)
+ if (params->packet_merge.type == MLX5E_PACKET_MERGE_LRO)
reset = false;
if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
@@ -4063,8 +4365,8 @@ static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog)
struct net_device *netdev = priv->netdev;
struct mlx5e_params new_params;
- if (priv->channels.params.lro_en) {
- netdev_warn(netdev, "can't set XDP while LRO is on, disable LRO first\n");
+ if (priv->channels.params.packet_merge.type != MLX5E_PACKET_MERGE_NONE) {
+ netdev_warn(netdev, "can't set XDP while HW-GRO/LRO is on, disable them first\n");
return -EINVAL;
}
@@ -4321,9 +4623,10 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16
params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
/* No XSK params: checking the availability of striding RQ in general. */
if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL))
- params->lro_en = !slow_pci_heuristic(mdev);
+ params->packet_merge.type = slow_pci_heuristic(mdev) ?
+ MLX5E_PACKET_MERGE_NONE : MLX5E_PACKET_MERGE_LRO;
}
- params->lro_timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
+ params->packet_merge.timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
/* CQ moderation params */
rx_cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
@@ -4351,13 +4654,17 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16
static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
+ u8 addr[ETH_ALEN];
- mlx5_query_mac_address(priv->mdev, netdev->dev_addr);
- if (is_zero_ether_addr(netdev->dev_addr) &&
+ mlx5_query_mac_address(priv->mdev, addr);
+ if (is_zero_ether_addr(addr) &&
!MLX5_CAP_GEN(priv->mdev, vport_group_manager)) {
eth_hw_addr_random(netdev);
mlx5_core_info(priv->mdev, "Assigned random MAC address %pM\n", netdev->dev_addr);
+ return;
}
+
+ eth_hw_addr_set(netdev, addr);
}
static int mlx5e_vxlan_set_port(struct net_device *netdev, unsigned int table,
@@ -4454,6 +4761,10 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
+ if (!!MLX5_CAP_GEN(mdev, shampo) &&
+ mlx5e_check_fragmented_striding_rq_cap(mdev))
+ netdev->hw_features |= NETIF_F_GRO_HW;
+
if (mlx5e_tunnel_any_tx_proto_supported(mdev)) {
netdev->hw_enc_features |= NETIF_F_HW_CSUM;
netdev->hw_enc_features |= NETIF_F_TSO;
@@ -4504,6 +4815,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
if (fcs_enabled)
netdev->features &= ~NETIF_F_RXALL;
netdev->features &= ~NETIF_F_LRO;
+ netdev->features &= ~NETIF_F_GRO_HW;
netdev->features &= ~NETIF_F_RXFCS;
#define FT_CAP(f) MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.f)
@@ -4578,6 +4890,12 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
mlx5e_timestamp_init(priv);
+ err = mlx5e_fs_init(priv);
+ if (err) {
+ mlx5_core_err(mdev, "FS initialization failed, %d\n", err);
+ return err;
+ }
+
err = mlx5e_ipsec_init(priv);
if (err)
mlx5_core_err(mdev, "IPSec initialization failed, %d\n", err);
@@ -4595,13 +4913,13 @@ static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
mlx5e_health_destroy_reporters(priv);
mlx5e_tls_cleanup(priv);
mlx5e_ipsec_cleanup(priv);
+ mlx5e_fs_cleanup(priv);
}
static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
enum mlx5e_rx_res_features features;
- struct mlx5e_lro_param lro_param;
int err;
priv->rx_res = mlx5e_rx_res_alloc();
@@ -4619,9 +4937,9 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
features = MLX5E_RX_RES_FEATURE_XSK | MLX5E_RX_RES_FEATURE_PTP;
if (priv->channels.params.tunneled_offload_en)
features |= MLX5E_RX_RES_FEATURE_INNER_FT;
- lro_param = mlx5e_get_lro_param(&priv->channels.params);
err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, features,
- priv->max_nch, priv->drop_rq.rqn, &lro_param,
+ priv->max_nch, priv->drop_rq.rqn,
+ &priv->channels.params.packet_merge,
priv->channels.params.num_channels);
if (err)
goto err_close_drop_rq;
@@ -4855,6 +5173,11 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv)
kfree(priv->htb.qos_sq_stats[i]);
kvfree(priv->htb.qos_sq_stats);
+ if (priv->mqprio_rl) {
+ mlx5e_mqprio_rl_cleanup(priv->mqprio_rl);
+ mlx5e_mqprio_rl_free(priv->mqprio_rl);
+ }
+
memset(priv, 0, sizeof(*priv));
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 0684ac6699b2..e58a9ec42553 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -53,6 +53,7 @@
#define CREATE_TRACE_POINTS
#include "diag/en_rep_tracepoint.h"
#include "en_accel/ipsec.h"
+#include "en/tc/int_port.h"
#define MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE \
max(0x7, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
@@ -793,7 +794,6 @@ int mlx5e_rep_bond_update(struct mlx5e_priv *priv, bool cleanup)
static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
- struct mlx5e_lro_param lro_param;
int err;
priv->rx_res = mlx5e_rx_res_alloc();
@@ -808,9 +808,9 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
return err;
}
- lro_param = mlx5e_get_lro_param(&priv->channels.params);
err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, 0,
- priv->max_nch, priv->drop_rq.rqn, &lro_param,
+ priv->max_nch, priv->drop_rq.rqn,
+ &priv->channels.params.packet_merge,
priv->channels.params.num_channels);
if (err)
goto err_close_drop_rq;
@@ -858,12 +858,22 @@ static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
static int mlx5e_init_ul_rep_rx(struct mlx5e_priv *priv)
{
+ int err;
+
mlx5e_create_q_counters(priv);
- return mlx5e_init_rep_rx(priv);
+ err = mlx5e_init_rep_rx(priv);
+ if (err)
+ goto out;
+
+ mlx5e_tc_int_port_init_rep_rx(priv);
+
+out:
+ return err;
}
static void mlx5e_cleanup_ul_rep_rx(struct mlx5e_priv *priv)
{
+ mlx5e_tc_int_port_cleanup_rep_rx(priv);
mlx5e_cleanup_rep_rx(priv);
mlx5e_destroy_q_counters(priv);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
index 48a203a9e7d9..b01dacb6f527 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
@@ -58,6 +58,7 @@ struct mlx5e_neigh_update_table {
};
struct mlx5_tc_ct_priv;
+struct mlx5_tc_int_port_priv;
struct mlx5e_rep_bond;
struct mlx5e_tc_tun_encap;
struct mlx5e_post_act;
@@ -98,6 +99,9 @@ struct mlx5_rep_uplink_priv {
/* tc tunneling encapsulation private data */
struct mlx5e_tc_tun_encap *encap;
+
+ /* OVS internal port support */
+ struct mlx5e_tc_int_port_priv *int_port_priv;
};
struct mlx5e_rep_priv {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 29a6586ef28d..96967b0a2441 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -33,9 +33,12 @@
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/tcp.h>
+#include <linux/bitmap.h>
#include <net/ip6_checksum.h>
#include <net/page_pool.h>
#include <net/inet_ecn.h>
+#include <net/udp.h>
+#include <net/tcp.h>
#include "en.h"
#include "en/txrx.h"
#include "en_tc.h"
@@ -62,10 +65,12 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
u16 cqe_bcnt, u32 head_offset, u32 page_idx);
static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
+static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
const struct mlx5e_rx_handlers mlx5e_rx_handlers_nic = {
.handle_rx_cqe = mlx5e_handle_rx_cqe,
.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
+ .handle_rx_cqe_mpwqe_shampo = mlx5e_handle_rx_cqe_mpwrq_shampo,
};
static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config)
@@ -185,8 +190,9 @@ static inline u32 mlx5e_decompress_cqes_cont(struct mlx5e_rq *rq,
mlx5e_read_mini_arr_slot(wq, cqd, cqcc);
mlx5e_decompress_cqe_no_hash(rq, wq, cqcc);
- INDIRECT_CALL_2(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
- mlx5e_handle_rx_cqe, rq, &cqd->title);
+ INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
+ mlx5e_handle_rx_cqe_mpwrq_shampo, mlx5e_handle_rx_cqe,
+ rq, &cqd->title);
}
mlx5e_cqes_update_owner(wq, cqcc - wq->cc);
wq->cc = cqcc;
@@ -206,8 +212,9 @@ static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq,
mlx5e_read_title_slot(rq, wq, cc);
mlx5e_read_mini_arr_slot(wq, cqd, cc + 1);
mlx5e_decompress_cqe(rq, wq, cc);
- INDIRECT_CALL_2(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
- mlx5e_handle_rx_cqe, rq, &cqd->title);
+ INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
+ mlx5e_handle_rx_cqe_mpwrq_shampo, mlx5e_handle_rx_cqe,
+ rq, &cqd->title);
cqd->mini_arr_idx++;
return mlx5e_decompress_cqes_cont(rq, wq, 1, budget_rem) - 1;
@@ -448,13 +455,13 @@ mlx5e_add_skb_frag(struct mlx5e_rq *rq, struct sk_buff *skb,
static inline void
mlx5e_copy_skb_header(struct device *pdev, struct sk_buff *skb,
struct mlx5e_dma_info *dma_info,
- int offset_from, u32 headlen)
+ int offset_from, int dma_offset, u32 headlen)
{
const void *from = page_address(dma_info->page) + offset_from;
/* Aligning len to sizeof(long) optimizes memcpy performance */
unsigned int len = ALIGN(headlen, sizeof(long));
- dma_sync_single_for_cpu(pdev, dma_info->addr + offset_from, len,
+ dma_sync_single_for_cpu(pdev, dma_info->addr + dma_offset, len,
DMA_FROM_DEVICE);
skb_copy_to_linear_data(skb, from, len);
}
@@ -494,6 +501,157 @@ static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq, u8 n)
mlx5_wq_ll_update_db_record(wq);
}
+/* This function returns the size of the continuous free space inside a bitmap
+ * that starts from first and no longer than len including circular ones.
+ */
+static int bitmap_find_window(unsigned long *bitmap, int len,
+ int bitmap_size, int first)
+{
+ int next_one, count;
+
+ next_one = find_next_bit(bitmap, bitmap_size, first);
+ if (next_one == bitmap_size) {
+ if (bitmap_size - first >= len)
+ return len;
+ next_one = find_next_bit(bitmap, bitmap_size, 0);
+ count = next_one + bitmap_size - first;
+ } else {
+ count = next_one - first;
+ }
+
+ return min(len, count);
+}
+
+static void build_klm_umr(struct mlx5e_icosq *sq, struct mlx5e_umr_wqe *umr_wqe,
+ __be32 key, u16 offset, u16 klm_len, u16 wqe_bbs)
+{
+ memset(umr_wqe, 0, offsetof(struct mlx5e_umr_wqe, inline_klms));
+ umr_wqe->ctrl.opmod_idx_opcode =
+ cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
+ MLX5_OPCODE_UMR);
+ umr_wqe->ctrl.umr_mkey = key;
+ umr_wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT)
+ | MLX5E_KLM_UMR_DS_CNT(klm_len));
+ umr_wqe->uctrl.flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE;
+ umr_wqe->uctrl.xlt_offset = cpu_to_be16(offset);
+ umr_wqe->uctrl.xlt_octowords = cpu_to_be16(klm_len);
+ umr_wqe->uctrl.mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
+}
+
+static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
+ struct mlx5e_icosq *sq,
+ u16 klm_entries, u16 index)
+{
+ struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
+ u16 entries, pi, i, header_offset, err, wqe_bbs, new_entries;
+ u32 lkey = rq->mdev->mlx5e_res.hw_objs.mkey;
+ struct page *page = shampo->last_page;
+ u64 addr = shampo->last_addr;
+ struct mlx5e_dma_info *dma_info;
+ struct mlx5e_umr_wqe *umr_wqe;
+ int headroom;
+
+ headroom = rq->buff.headroom;
+ new_entries = klm_entries - (shampo->pi & (MLX5_UMR_KLM_ALIGNMENT - 1));
+ entries = ALIGN(klm_entries, MLX5_UMR_KLM_ALIGNMENT);
+ wqe_bbs = MLX5E_KLM_UMR_WQEBBS(entries);
+ pi = mlx5e_icosq_get_next_pi(sq, wqe_bbs);
+ umr_wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
+ build_klm_umr(sq, umr_wqe, shampo->key, index, entries, wqe_bbs);
+
+ for (i = 0; i < entries; i++, index++) {
+ dma_info = &shampo->info[index];
+ if (i >= klm_entries || (index < shampo->pi && shampo->pi - index <
+ MLX5_UMR_KLM_ALIGNMENT))
+ goto update_klm;
+ header_offset = (index & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) <<
+ MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE;
+ if (!(header_offset & (PAGE_SIZE - 1))) {
+ err = mlx5e_page_alloc(rq, dma_info);
+ if (unlikely(err))
+ goto err_unmap;
+ addr = dma_info->addr;
+ page = dma_info->page;
+ } else {
+ dma_info->addr = addr + header_offset;
+ dma_info->page = page;
+ }
+
+update_klm:
+ umr_wqe->inline_klms[i].bcount =
+ cpu_to_be32(MLX5E_RX_MAX_HEAD);
+ umr_wqe->inline_klms[i].key = cpu_to_be32(lkey);
+ umr_wqe->inline_klms[i].va =
+ cpu_to_be64(dma_info->addr + headroom);
+ }
+
+ sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) {
+ .wqe_type = MLX5E_ICOSQ_WQE_SHAMPO_HD_UMR,
+ .num_wqebbs = wqe_bbs,
+ .shampo.len = new_entries,
+ };
+
+ shampo->pi = (shampo->pi + new_entries) & (shampo->hd_per_wq - 1);
+ shampo->last_page = page;
+ shampo->last_addr = addr;
+ sq->pc += wqe_bbs;
+ sq->doorbell_cseg = &umr_wqe->ctrl;
+
+ return 0;
+
+err_unmap:
+ while (--i >= 0) {
+ if (--index < 0)
+ index = shampo->hd_per_wq - 1;
+ dma_info = &shampo->info[index];
+ if (!(i & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1))) {
+ dma_info->addr = ALIGN_DOWN(dma_info->addr, PAGE_SIZE);
+ mlx5e_page_release(rq, dma_info, true);
+ }
+ }
+ rq->stats->buff_alloc_err++;
+ return err;
+}
+
+static int mlx5e_alloc_rx_hd_mpwqe(struct mlx5e_rq *rq)
+{
+ struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
+ u16 klm_entries, num_wqe, index, entries_before;
+ struct mlx5e_icosq *sq = rq->icosq;
+ int i, err, max_klm_entries, len;
+
+ max_klm_entries = MLX5E_MAX_KLM_PER_WQE(rq->mdev);
+ klm_entries = bitmap_find_window(shampo->bitmap,
+ shampo->hd_per_wqe,
+ shampo->hd_per_wq, shampo->pi);
+ if (!klm_entries)
+ return 0;
+
+ klm_entries += (shampo->pi & (MLX5_UMR_KLM_ALIGNMENT - 1));
+ index = ALIGN_DOWN(shampo->pi, MLX5_UMR_KLM_ALIGNMENT);
+ entries_before = shampo->hd_per_wq - index;
+
+ if (unlikely(entries_before < klm_entries))
+ num_wqe = DIV_ROUND_UP(entries_before, max_klm_entries) +
+ DIV_ROUND_UP(klm_entries - entries_before, max_klm_entries);
+ else
+ num_wqe = DIV_ROUND_UP(klm_entries, max_klm_entries);
+
+ for (i = 0; i < num_wqe; i++) {
+ len = (klm_entries > max_klm_entries) ? max_klm_entries :
+ klm_entries;
+ if (unlikely(index + len > shampo->hd_per_wq))
+ len = shampo->hd_per_wq - index;
+ err = mlx5e_build_shampo_hd_umr(rq, sq, len, index);
+ if (unlikely(err))
+ return err;
+ index = (index + len) & (rq->mpwqe.shampo->hd_per_wq - 1);
+ klm_entries -= len;
+ }
+
+ return 0;
+}
+
static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
{
struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
@@ -514,6 +672,12 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
goto err;
}
+ if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) {
+ err = mlx5e_alloc_rx_hd_mpwqe(rq);
+ if (unlikely(err))
+ goto err;
+ }
+
pi = mlx5e_icosq_get_next_pi(sq, MLX5E_UMR_WQEBBS);
umr_wqe = mlx5_wq_cyc_get_wqe(wq, pi);
memcpy(umr_wqe, &rq->mpwqe.umr_wqe, offsetof(struct mlx5e_umr_wqe, inline_mtts));
@@ -558,6 +722,44 @@ err:
return err;
}
+/* This function is responsible to dealloc SHAMPO header buffer.
+ * close == true specifies that we are in the middle of closing RQ operation so
+ * we go over all the entries and if they are not in use we free them,
+ * otherwise we only go over a specific range inside the header buffer that are
+ * not in use.
+ */
+void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq, u16 len, u16 start, bool close)
+{
+ struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
+ int hd_per_wq = shampo->hd_per_wq;
+ struct page *deleted_page = NULL;
+ struct mlx5e_dma_info *hd_info;
+ int i, index = start;
+
+ for (i = 0; i < len; i++, index++) {
+ if (index == hd_per_wq)
+ index = 0;
+
+ if (close && !test_bit(index, shampo->bitmap))
+ continue;
+
+ hd_info = &shampo->info[index];
+ hd_info->addr = ALIGN_DOWN(hd_info->addr, PAGE_SIZE);
+ if (hd_info->page != deleted_page) {
+ deleted_page = hd_info->page;
+ mlx5e_page_release(rq, hd_info, false);
+ }
+ }
+
+ if (start + len > hd_per_wq) {
+ len -= hd_per_wq - start;
+ bitmap_clear(shampo->bitmap, start, hd_per_wq - start);
+ start = 0;
+ }
+
+ bitmap_clear(shampo->bitmap, start, len);
+}
+
static void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
{
struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
@@ -629,6 +831,28 @@ void mlx5e_free_icosq_descs(struct mlx5e_icosq *sq)
sq->cc = sqcc;
}
+static void mlx5e_handle_shampo_hd_umr(struct mlx5e_shampo_umr umr,
+ struct mlx5e_icosq *sq)
+{
+ struct mlx5e_channel *c = container_of(sq, struct mlx5e_channel, icosq);
+ struct mlx5e_shampo_hd *shampo;
+ /* assume 1:1 relationship between RQ and icosq */
+ struct mlx5e_rq *rq = &c->rq;
+ int end, from, len = umr.len;
+
+ shampo = rq->mpwqe.shampo;
+ end = shampo->hd_per_wq;
+ from = shampo->ci;
+ if (from + len > shampo->hd_per_wq) {
+ len -= end - from;
+ bitmap_set(shampo->bitmap, from, end - from);
+ from = 0;
+ }
+
+ bitmap_set(shampo->bitmap, from, len);
+ shampo->ci = (shampo->ci + umr.len) & (shampo->hd_per_wq - 1);
+}
+
int mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
{
struct mlx5e_icosq *sq = container_of(cq, struct mlx5e_icosq, cq);
@@ -685,6 +909,9 @@ int mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
break;
case MLX5E_ICOSQ_WQE_NOP:
break;
+ case MLX5E_ICOSQ_WQE_SHAMPO_HD_UMR:
+ mlx5e_handle_shampo_hd_umr(wi->shampo, sq);
+ break;
#ifdef CONFIG_MLX5_EN_TLS
case MLX5E_ICOSQ_WQE_UMR_TLS:
break;
@@ -782,8 +1009,8 @@ static void mlx5e_lro_update_tcp_hdr(struct mlx5_cqe64 *cqe, struct tcphdr *tcp)
if (tcp_ack) {
tcp->ack = 1;
- tcp->ack_seq = cqe->lro_ack_seq_num;
- tcp->window = cqe->lro_tcp_win;
+ tcp->ack_seq = cqe->lro.ack_seq_num;
+ tcp->window = cqe->lro.tcp_win;
}
}
@@ -809,7 +1036,7 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
tcp = ip_p + sizeof(struct iphdr);
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
- ipv4->ttl = cqe->lro_min_ttl;
+ ipv4->ttl = cqe->lro.min_ttl;
ipv4->tot_len = cpu_to_be16(tot_len);
ipv4->check = 0;
ipv4->check = ip_fast_csum((unsigned char *)ipv4,
@@ -829,7 +1056,7 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
tcp = ip_p + sizeof(struct ipv6hdr);
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
- ipv6->hop_limit = cqe->lro_min_ttl;
+ ipv6->hop_limit = cqe->lro.min_ttl;
ipv6->payload_len = cpu_to_be16(payload_len);
mlx5e_lro_update_tcp_hdr(cqe, tcp);
@@ -841,6 +1068,142 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
}
}
+static void *mlx5e_shampo_get_packet_hd(struct mlx5e_rq *rq, u16 header_index)
+{
+ struct mlx5e_dma_info *last_head = &rq->mpwqe.shampo->info[header_index];
+ u16 head_offset = (last_head->addr & (PAGE_SIZE - 1)) + rq->buff.headroom;
+
+ return page_address(last_head->page) + head_offset;
+}
+
+static void mlx5e_shampo_update_ipv4_udp_hdr(struct mlx5e_rq *rq, struct iphdr *ipv4)
+{
+ int udp_off = rq->hw_gro_data->fk.control.thoff;
+ struct sk_buff *skb = rq->hw_gro_data->skb;
+ struct udphdr *uh;
+
+ uh = (struct udphdr *)(skb->data + udp_off);
+ uh->len = htons(skb->len - udp_off);
+
+ if (uh->check)
+ uh->check = ~udp_v4_check(skb->len - udp_off, ipv4->saddr,
+ ipv4->daddr, 0);
+
+ skb->csum_start = (unsigned char *)uh - skb->head;
+ skb->csum_offset = offsetof(struct udphdr, check);
+
+ skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_L4;
+}
+
+static void mlx5e_shampo_update_ipv6_udp_hdr(struct mlx5e_rq *rq, struct ipv6hdr *ipv6)
+{
+ int udp_off = rq->hw_gro_data->fk.control.thoff;
+ struct sk_buff *skb = rq->hw_gro_data->skb;
+ struct udphdr *uh;
+
+ uh = (struct udphdr *)(skb->data + udp_off);
+ uh->len = htons(skb->len - udp_off);
+
+ if (uh->check)
+ uh->check = ~udp_v6_check(skb->len - udp_off, &ipv6->saddr,
+ &ipv6->daddr, 0);
+
+ skb->csum_start = (unsigned char *)uh - skb->head;
+ skb->csum_offset = offsetof(struct udphdr, check);
+
+ skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_L4;
+}
+
+static void mlx5e_shampo_update_fin_psh_flags(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
+ struct tcphdr *skb_tcp_hd)
+{
+ u16 header_index = be16_to_cpu(cqe->shampo.header_entry_index);
+ struct tcphdr *last_tcp_hd;
+ void *last_hd_addr;
+
+ last_hd_addr = mlx5e_shampo_get_packet_hd(rq, header_index);
+ last_tcp_hd = last_hd_addr + ETH_HLEN + rq->hw_gro_data->fk.control.thoff;
+ tcp_flag_word(skb_tcp_hd) |= tcp_flag_word(last_tcp_hd) & (TCP_FLAG_FIN | TCP_FLAG_PSH);
+}
+
+static void mlx5e_shampo_update_ipv4_tcp_hdr(struct mlx5e_rq *rq, struct iphdr *ipv4,
+ struct mlx5_cqe64 *cqe, bool match)
+{
+ int tcp_off = rq->hw_gro_data->fk.control.thoff;
+ struct sk_buff *skb = rq->hw_gro_data->skb;
+ struct tcphdr *tcp;
+
+ tcp = (struct tcphdr *)(skb->data + tcp_off);
+ if (match)
+ mlx5e_shampo_update_fin_psh_flags(rq, cqe, tcp);
+
+ tcp->check = ~tcp_v4_check(skb->len - tcp_off, ipv4->saddr,
+ ipv4->daddr, 0);
+ skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
+ if (ntohs(ipv4->id) == rq->hw_gro_data->second_ip_id)
+ skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID;
+
+ skb->csum_start = (unsigned char *)tcp - skb->head;
+ skb->csum_offset = offsetof(struct tcphdr, check);
+
+ if (tcp->cwr)
+ skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
+}
+
+static void mlx5e_shampo_update_ipv6_tcp_hdr(struct mlx5e_rq *rq, struct ipv6hdr *ipv6,
+ struct mlx5_cqe64 *cqe, bool match)
+{
+ int tcp_off = rq->hw_gro_data->fk.control.thoff;
+ struct sk_buff *skb = rq->hw_gro_data->skb;
+ struct tcphdr *tcp;
+
+ tcp = (struct tcphdr *)(skb->data + tcp_off);
+ if (match)
+ mlx5e_shampo_update_fin_psh_flags(rq, cqe, tcp);
+
+ tcp->check = ~tcp_v6_check(skb->len - tcp_off, &ipv6->saddr,
+ &ipv6->daddr, 0);
+ skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6;
+ skb->csum_start = (unsigned char *)tcp - skb->head;
+ skb->csum_offset = offsetof(struct tcphdr, check);
+
+ if (tcp->cwr)
+ skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
+}
+
+static void mlx5e_shampo_update_hdr(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, bool match)
+{
+ bool is_ipv4 = (rq->hw_gro_data->fk.basic.n_proto == htons(ETH_P_IP));
+ struct sk_buff *skb = rq->hw_gro_data->skb;
+
+ skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
+ skb->ip_summed = CHECKSUM_PARTIAL;
+
+ if (is_ipv4) {
+ int nhoff = rq->hw_gro_data->fk.control.thoff - sizeof(struct iphdr);
+ struct iphdr *ipv4 = (struct iphdr *)(skb->data + nhoff);
+ __be16 newlen = htons(skb->len - nhoff);
+
+ csum_replace2(&ipv4->check, ipv4->tot_len, newlen);
+ ipv4->tot_len = newlen;
+
+ if (ipv4->protocol == IPPROTO_TCP)
+ mlx5e_shampo_update_ipv4_tcp_hdr(rq, ipv4, cqe, match);
+ else
+ mlx5e_shampo_update_ipv4_udp_hdr(rq, ipv4);
+ } else {
+ int nhoff = rq->hw_gro_data->fk.control.thoff - sizeof(struct ipv6hdr);
+ struct ipv6hdr *ipv6 = (struct ipv6hdr *)(skb->data + nhoff);
+
+ ipv6->payload_len = htons(skb->len - nhoff - sizeof(*ipv6));
+
+ if (ipv6->nexthdr == IPPROTO_TCP)
+ mlx5e_shampo_update_ipv6_tcp_hdr(rq, ipv6, cqe, match);
+ else
+ mlx5e_shampo_update_ipv6_udp_hdr(rq, ipv6);
+ }
+}
+
static inline void mlx5e_skb_set_hash(struct mlx5_cqe64 *cqe,
struct sk_buff *skb)
{
@@ -1090,6 +1453,27 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
stats->mcast_packets++;
}
+static void mlx5e_shampo_complete_rx_cqe(struct mlx5e_rq *rq,
+ struct mlx5_cqe64 *cqe,
+ u32 cqe_bcnt,
+ struct sk_buff *skb)
+{
+ struct mlx5e_rq_stats *stats = rq->stats;
+
+ stats->packets++;
+ stats->gro_packets++;
+ stats->bytes += cqe_bcnt;
+ stats->gro_bytes += cqe_bcnt;
+ if (NAPI_GRO_CB(skb)->count != 1)
+ return;
+ mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
+ skb_reset_network_header(skb);
+ if (!skb_flow_dissect_flow_keys(skb, &rq->hw_gro_data->fk, 0)) {
+ napi_gro_receive(rq->cq.napi, skb);
+ rq->hw_gro_data->skb = NULL;
+ }
+}
+
static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
struct mlx5_cqe64 *cqe,
u32 cqe_bcnt,
@@ -1199,7 +1583,8 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
}
/* copy header */
- mlx5e_copy_skb_header(rq->pdev, skb, head_wi->di, head_wi->offset, headlen);
+ mlx5e_copy_skb_header(rq->pdev, skb, head_wi->di, head_wi->offset, head_wi->offset,
+ headlen);
/* skb linear part was allocated with headlen and aligned to long */
skb->tail += headlen;
skb->len += headlen;
@@ -1275,7 +1660,6 @@ static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch_rep *rep = rpriv->rep;
- struct mlx5e_tc_update_priv tc_priv = {};
struct mlx5_wq_cyc *wq = &rq->wqe.wq;
struct mlx5e_wqe_frag_info *wi;
struct sk_buff *skb;
@@ -1311,15 +1695,7 @@ static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
if (rep->vlan && skb_vlan_tag_present(skb))
skb_vlan_pop(skb);
- if (unlikely(!mlx5_ipsec_is_rx_flow(cqe) &&
- !mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv))) {
- dev_kfree_skb_any(skb);
- goto free_wqe;
- }
-
- napi_gro_receive(rq->cq.napi, skb);
-
- mlx5_rep_tc_post_napi_receive(&tc_priv);
+ mlx5e_rep_tc_receive(cqe, rq, skb);
free_wqe:
mlx5e_free_rx_wqe(rq, wi, true);
@@ -1336,7 +1712,6 @@ static void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, struct mlx5_cqe64
u32 wqe_offset = stride_ix << rq->mpwqe.log_stride_sz;
u32 head_offset = wqe_offset & (PAGE_SIZE - 1);
u32 page_idx = wqe_offset >> PAGE_SHIFT;
- struct mlx5e_tc_update_priv tc_priv = {};
struct mlx5e_rx_wqe_ll *wqe;
struct mlx5_wq_ll *wq;
struct sk_buff *skb;
@@ -1369,15 +1744,7 @@ static void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, struct mlx5_cqe64
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
- if (unlikely(!mlx5_ipsec_is_rx_flow(cqe) &&
- !mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv))) {
- dev_kfree_skb_any(skb);
- goto mpwrq_cqe_out;
- }
-
- napi_gro_receive(rq->cq.napi, skb);
-
- mlx5_rep_tc_post_napi_receive(&tc_priv);
+ mlx5e_rep_tc_receive(cqe, rq, skb);
mpwrq_cqe_out:
if (likely(wi->consumed_strides < rq->mpwqe.num_strides))
@@ -1395,6 +1762,30 @@ const struct mlx5e_rx_handlers mlx5e_rx_handlers_rep = {
};
#endif
+static void
+mlx5e_fill_skb_data(struct sk_buff *skb, struct mlx5e_rq *rq, struct mlx5e_dma_info *di,
+ u32 data_bcnt, u32 data_offset)
+{
+ net_prefetchw(skb->data);
+
+ while (data_bcnt) {
+ u32 pg_consumed_bytes = min_t(u32, PAGE_SIZE - data_offset, data_bcnt);
+ unsigned int truesize;
+
+ if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
+ truesize = pg_consumed_bytes;
+ else
+ truesize = ALIGN(pg_consumed_bytes, BIT(rq->mpwqe.log_stride_sz));
+
+ mlx5e_add_skb_frag(rq, skb, di, data_offset,
+ pg_consumed_bytes, truesize);
+
+ data_bcnt -= pg_consumed_bytes;
+ data_offset = 0;
+ di++;
+ }
+}
+
static struct sk_buff *
mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
u16 cqe_bcnt, u32 head_offset, u32 page_idx)
@@ -1420,20 +1811,9 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
frag_offset -= PAGE_SIZE;
}
- while (byte_cnt) {
- u32 pg_consumed_bytes =
- min_t(u32, PAGE_SIZE - frag_offset, byte_cnt);
- unsigned int truesize =
- ALIGN(pg_consumed_bytes, BIT(rq->mpwqe.log_stride_sz));
-
- mlx5e_add_skb_frag(rq, skb, di, frag_offset,
- pg_consumed_bytes, truesize);
- byte_cnt -= pg_consumed_bytes;
- frag_offset = 0;
- di++;
- }
+ mlx5e_fill_skb_data(skb, rq, di, byte_cnt, frag_offset);
/* copy header */
- mlx5e_copy_skb_header(rq->pdev, skb, head_di, head_offset, headlen);
+ mlx5e_copy_skb_header(rq->pdev, skb, head_di, head_offset, head_offset, headlen);
/* skb linear part was allocated with headlen and aligned to long */
skb->tail += headlen;
skb->len += headlen;
@@ -1487,6 +1867,181 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
return skb;
}
+static void
+mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
+ struct mlx5_cqe64 *cqe, u16 header_index)
+{
+ struct mlx5e_dma_info *head = &rq->mpwqe.shampo->info[header_index];
+ u16 head_offset = head->addr & (PAGE_SIZE - 1);
+ u16 head_size = cqe->shampo.header_size;
+ u16 rx_headroom = rq->buff.headroom;
+ struct sk_buff *skb = NULL;
+ void *hdr, *data;
+ u32 frag_size;
+
+ hdr = page_address(head->page) + head_offset;
+ data = hdr + rx_headroom;
+ frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + head_size);
+
+ if (likely(frag_size <= BIT(MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE))) {
+ /* build SKB around header */
+ dma_sync_single_range_for_cpu(rq->pdev, head->addr, 0, frag_size, DMA_FROM_DEVICE);
+ prefetchw(hdr);
+ prefetch(data);
+ skb = mlx5e_build_linear_skb(rq, hdr, frag_size, rx_headroom, head_size);
+
+ if (unlikely(!skb))
+ return;
+
+ /* queue up for recycling/reuse */
+ page_ref_inc(head->page);
+
+ } else {
+ /* allocate SKB and copy header for large header */
+ rq->stats->gro_large_hds++;
+ skb = napi_alloc_skb(rq->cq.napi,
+ ALIGN(head_size, sizeof(long)));
+ if (unlikely(!skb)) {
+ rq->stats->buff_alloc_err++;
+ return;
+ }
+
+ prefetchw(skb->data);
+ mlx5e_copy_skb_header(rq->pdev, skb, head,
+ head_offset + rx_headroom,
+ rx_headroom, head_size);
+ /* skb linear part was allocated with headlen and aligned to long */
+ skb->tail += head_size;
+ skb->len += head_size;
+ }
+ rq->hw_gro_data->skb = skb;
+ NAPI_GRO_CB(skb)->count = 1;
+ skb_shinfo(skb)->gso_size = mpwrq_get_cqe_byte_cnt(cqe) - head_size;
+}
+
+static void
+mlx5e_shampo_align_fragment(struct sk_buff *skb, u8 log_stride_sz)
+{
+ skb_frag_t *last_frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
+ unsigned int frag_size = skb_frag_size(last_frag);
+ unsigned int frag_truesize;
+
+ frag_truesize = ALIGN(frag_size, BIT(log_stride_sz));
+ skb->truesize += frag_truesize - frag_size;
+}
+
+static void
+mlx5e_shampo_flush_skb(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, bool match)
+{
+ struct sk_buff *skb = rq->hw_gro_data->skb;
+ struct mlx5e_rq_stats *stats = rq->stats;
+
+ stats->gro_skbs++;
+ if (likely(skb_shinfo(skb)->nr_frags))
+ mlx5e_shampo_align_fragment(skb, rq->mpwqe.log_stride_sz);
+ if (NAPI_GRO_CB(skb)->count > 1)
+ mlx5e_shampo_update_hdr(rq, cqe, match);
+ napi_gro_receive(rq->cq.napi, skb);
+ rq->hw_gro_data->skb = NULL;
+}
+
+static bool
+mlx5e_hw_gro_skb_has_enough_space(struct sk_buff *skb, u16 data_bcnt)
+{
+ int nr_frags = skb_shinfo(skb)->nr_frags;
+
+ return PAGE_SIZE * nr_frags + data_bcnt <= GSO_MAX_SIZE;
+}
+
+static void
+mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index)
+{
+ struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
+ u64 addr = shampo->info[header_index].addr;
+
+ if (((header_index + 1) & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) == 0) {
+ shampo->info[header_index].addr = ALIGN_DOWN(addr, PAGE_SIZE);
+ mlx5e_page_release(rq, &shampo->info[header_index], true);
+ }
+ bitmap_clear(shampo->bitmap, header_index, 1);
+}
+
+static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
+{
+ u16 data_bcnt = mpwrq_get_cqe_byte_cnt(cqe) - cqe->shampo.header_size;
+ u16 header_index = be16_to_cpu(cqe->shampo.header_entry_index);
+ u32 wqe_offset = be32_to_cpu(cqe->shampo.data_offset);
+ u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe);
+ u32 data_offset = wqe_offset & (PAGE_SIZE - 1);
+ u32 cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe);
+ u16 wqe_id = be16_to_cpu(cqe->wqe_id);
+ u32 page_idx = wqe_offset >> PAGE_SHIFT;
+ struct sk_buff **skb = &rq->hw_gro_data->skb;
+ bool flush = cqe->shampo.flush;
+ bool match = cqe->shampo.match;
+ struct mlx5e_rq_stats *stats = rq->stats;
+ struct mlx5e_rx_wqe_ll *wqe;
+ struct mlx5e_dma_info *di;
+ struct mlx5e_mpw_info *wi;
+ struct mlx5_wq_ll *wq;
+
+ wi = &rq->mpwqe.info[wqe_id];
+ wi->consumed_strides += cstrides;
+
+ if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
+ trigger_report(rq, cqe);
+ stats->wqe_err++;
+ goto mpwrq_cqe_out;
+ }
+
+ if (unlikely(mpwrq_is_filler_cqe(cqe))) {
+ stats->mpwqe_filler_cqes++;
+ stats->mpwqe_filler_strides += cstrides;
+ goto mpwrq_cqe_out;
+ }
+
+ stats->gro_match_packets += match;
+
+ if (*skb && (!match || !(mlx5e_hw_gro_skb_has_enough_space(*skb, data_bcnt)))) {
+ match = false;
+ mlx5e_shampo_flush_skb(rq, cqe, match);
+ }
+
+ if (!*skb) {
+ mlx5e_skb_from_cqe_shampo(rq, wi, cqe, header_index);
+ if (unlikely(!*skb))
+ goto free_hd_entry;
+ } else {
+ NAPI_GRO_CB(*skb)->count++;
+ if (NAPI_GRO_CB(*skb)->count == 2 &&
+ rq->hw_gro_data->fk.basic.n_proto == htons(ETH_P_IP)) {
+ void *hd_addr = mlx5e_shampo_get_packet_hd(rq, header_index);
+ int nhoff = ETH_HLEN + rq->hw_gro_data->fk.control.thoff -
+ sizeof(struct iphdr);
+ struct iphdr *iph = (struct iphdr *)(hd_addr + nhoff);
+
+ rq->hw_gro_data->second_ip_id = ntohs(iph->id);
+ }
+ }
+
+ di = &wi->umr.dma_info[page_idx];
+ mlx5e_fill_skb_data(*skb, rq, di, data_bcnt, data_offset);
+
+ mlx5e_shampo_complete_rx_cqe(rq, cqe, cqe_bcnt, *skb);
+ if (flush)
+ mlx5e_shampo_flush_skb(rq, cqe, match);
+free_hd_entry:
+ mlx5e_free_rx_shampo_hd_entry(rq, header_index);
+mpwrq_cqe_out:
+ if (likely(wi->consumed_strides < rq->mpwqe.num_strides))
+ return;
+
+ wq = &rq->mpwqe.wq;
+ wqe = mlx5_wq_ll_get_wqe(wq, wqe_id);
+ mlx5e_free_rx_mpwqe(rq, wi, true);
+ mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index);
+}
+
static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
{
u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe);
@@ -1579,11 +2134,15 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
mlx5_cqwq_pop(cqwq);
- INDIRECT_CALL_2(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
- mlx5e_handle_rx_cqe, rq, cqe);
+ INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
+ mlx5e_handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq_shampo,
+ rq, cqe);
} while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(cqwq)));
out:
+ if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state) && rq->hw_gro_data->skb)
+ mlx5e_shampo_flush_skb(rq, NULL, false);
+
if (rcu_access_pointer(rq->xdp_prog))
mlx5e_xdp_rx_poll_complete(rq);
@@ -1784,15 +2343,24 @@ int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool
rq->post_wqes = mlx5e_post_rx_mpwqes;
rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
- rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe_mpwqe;
if (mlx5_fpga_is_ipsec_device(mdev)) {
netdev_err(netdev, "MPWQE RQ with Innova IPSec offload not supported\n");
return -EINVAL;
}
- if (!rq->handle_rx_cqe) {
- netdev_err(netdev, "RX handler of MPWQE RQ is not set\n");
- return -EINVAL;
+ if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) {
+ rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe_mpwqe_shampo;
+ if (!rq->handle_rx_cqe) {
+ netdev_err(netdev, "RX handler of SHAMPO MPWQE RQ is not set\n");
+ return -EINVAL;
+ }
+ } else {
+ rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe_mpwqe;
+ if (!rq->handle_rx_cqe) {
+ netdev_err(netdev, "RX handler of MPWQE RQ is not set\n");
+ return -EINVAL;
+ }
}
+
break;
default: /* MLX5_WQ_TYPE_CYCLIC */
rq->wqe.skb_from_cqe = xsk ?
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
index ce8ab1f01876..8c9163d2c646 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_selftest.c
@@ -35,30 +35,7 @@
#include <net/udp.h>
#include "en.h"
#include "en/port.h"
-
-enum {
- MLX5E_ST_LINK_STATE,
- MLX5E_ST_LINK_SPEED,
- MLX5E_ST_HEALTH_INFO,
-#ifdef CONFIG_INET
- MLX5E_ST_LOOPBACK,
-#endif
- MLX5E_ST_NUM,
-};
-
-const char mlx5e_self_tests[MLX5E_ST_NUM][ETH_GSTRING_LEN] = {
- "Link Test",
- "Speed Test",
- "Health Test",
-#ifdef CONFIG_INET
- "Loopback Test",
-#endif
-};
-
-int mlx5e_self_test_num(struct mlx5e_priv *priv)
-{
- return ARRAY_SIZE(mlx5e_self_tests);
-}
+#include "eswitch.h"
static int mlx5e_test_health_info(struct mlx5e_priv *priv)
{
@@ -265,6 +242,14 @@ static void mlx5e_test_loopback_cleanup(struct mlx5e_priv *priv,
mlx5e_refresh_tirs(priv, false, false);
}
+static int mlx5e_cond_loopback(struct mlx5e_priv *priv)
+{
+ if (is_mdev_switchdev_mode(priv->mdev))
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
#define MLX5E_LB_VERIFY_TIMEOUT (msecs_to_jiffies(200))
static int mlx5e_test_loopback(struct mlx5e_priv *priv)
{
@@ -313,37 +298,47 @@ out:
}
#endif
-static int (*mlx5e_st_func[MLX5E_ST_NUM])(struct mlx5e_priv *) = {
- mlx5e_test_link_state,
- mlx5e_test_link_speed,
- mlx5e_test_health_info,
+typedef int (*mlx5e_st_func)(struct mlx5e_priv *);
+
+struct mlx5e_st {
+ char name[ETH_GSTRING_LEN];
+ mlx5e_st_func st_func;
+ mlx5e_st_func cond_func;
+};
+
+static struct mlx5e_st mlx5e_sts[] = {
+ { "Link Test", mlx5e_test_link_state },
+ { "Speed Test", mlx5e_test_link_speed },
+ { "Health Test", mlx5e_test_health_info },
#ifdef CONFIG_INET
- mlx5e_test_loopback,
+ { "Loopback Test", mlx5e_test_loopback, mlx5e_cond_loopback },
#endif
};
+#define MLX5E_ST_NUM ARRAY_SIZE(mlx5e_sts)
+
void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest,
u64 *buf)
{
struct mlx5e_priv *priv = netdev_priv(ndev);
- int i;
-
- memset(buf, 0, sizeof(u64) * MLX5E_ST_NUM);
+ int i, count = 0;
mutex_lock(&priv->state_lock);
netdev_info(ndev, "Self test begin..\n");
for (i = 0; i < MLX5E_ST_NUM; i++) {
- netdev_info(ndev, "\t[%d] %s start..\n",
- i, mlx5e_self_tests[i]);
- buf[i] = mlx5e_st_func[i](priv);
- netdev_info(ndev, "\t[%d] %s end: result(%lld)\n",
- i, mlx5e_self_tests[i], buf[i]);
+ struct mlx5e_st st = mlx5e_sts[i];
+
+ if (st.cond_func && st.cond_func(priv))
+ continue;
+ netdev_info(ndev, "\t[%d] %s start..\n", i, st.name);
+ buf[count] = st.st_func(priv);
+ netdev_info(ndev, "\t[%d] %s end: result(%lld)\n", i, st.name, buf[count]);
}
mutex_unlock(&priv->state_lock);
- for (i = 0; i < MLX5E_ST_NUM; i++) {
+ for (i = 0; i < count; i++) {
if (buf[i]) {
etest->flags |= ETH_TEST_FL_FAILED;
break;
@@ -352,3 +347,24 @@ void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest,
netdev_info(ndev, "Self test out: status flags(0x%x)\n",
etest->flags);
}
+
+int mlx5e_self_test_fill_strings(struct mlx5e_priv *priv, u8 *data)
+{
+ int i, count = 0;
+
+ for (i = 0; i < MLX5E_ST_NUM; i++) {
+ struct mlx5e_st st = mlx5e_sts[i];
+
+ if (st.cond_func && st.cond_func(priv))
+ continue;
+ if (data)
+ strcpy(data + count * ETH_GSTRING_LEN, st.name);
+ count++;
+ }
+ return count;
+}
+
+int mlx5e_self_test_num(struct mlx5e_priv *priv)
+{
+ return mlx5e_self_test_fill_strings(priv, NULL);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index e1dd17019030..2a9bfc3ffa2e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -128,6 +128,11 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_skbs) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_match_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_large_hds) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_ecn_mark) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_removed_vlan_packets) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
@@ -313,6 +318,11 @@ static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s,
s->rx_bytes += rq_stats->bytes;
s->rx_lro_packets += rq_stats->lro_packets;
s->rx_lro_bytes += rq_stats->lro_bytes;
+ s->rx_gro_packets += rq_stats->gro_packets;
+ s->rx_gro_bytes += rq_stats->gro_bytes;
+ s->rx_gro_skbs += rq_stats->gro_skbs;
+ s->rx_gro_match_packets += rq_stats->gro_match_packets;
+ s->rx_gro_large_hds += rq_stats->gro_large_hds;
s->rx_ecn_mark += rq_stats->ecn_mark;
s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets;
s->rx_csum_none += rq_stats->csum_none;
@@ -1760,6 +1770,11 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_redirect) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_packets) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_bytes) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_skbs) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_match_packets) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_large_hds) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, ecn_mark) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 139e59f30db0..2c1ed5b81be6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -144,6 +144,11 @@ struct mlx5e_sw_stats {
u64 tx_mpwqe_pkts;
u64 rx_lro_packets;
u64 rx_lro_bytes;
+ u64 rx_gro_packets;
+ u64 rx_gro_bytes;
+ u64 rx_gro_skbs;
+ u64 rx_gro_match_packets;
+ u64 rx_gro_large_hds;
u64 rx_mcast_packets;
u64 rx_ecn_mark;
u64 rx_removed_vlan_packets;
@@ -322,6 +327,11 @@ struct mlx5e_rq_stats {
u64 csum_none;
u64 lro_packets;
u64 lro_bytes;
+ u64 gro_packets;
+ u64 gro_bytes;
+ u64 gro_skbs;
+ u64 gro_match_packets;
+ u64 gro_large_hds;
u64 mcast_packets;
u64 ecn_mark;
u64 removed_vlan_packets;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index ba8164792016..835caa1c7b74 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -39,6 +39,7 @@
#include <linux/rhashtable.h>
#include <linux/refcount.h>
#include <linux/completion.h>
+#include <linux/if_macvlan.h>
#include <net/tc_act/tc_pedit.h>
#include <net/tc_act/tc_csum.h>
#include <net/psample.h>
@@ -59,7 +60,6 @@
#include "en/mapping.h"
#include "en/tc_ct.h"
#include "en/mod_hdr.h"
-#include "en/tc_priv.h"
#include "en/tc_tun_encap.h"
#include "en/tc/sample.h"
#include "lib/devcom.h"
@@ -67,6 +67,8 @@
#include "lib/fs_chains.h"
#include "diag/en_tc_tracepoint.h"
#include <asm/div64.h>
+#include "lag/lag.h"
+#include "lag/mp.h"
#define nic_chains(priv) ((priv)->fs.tc.chains)
#define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)
@@ -229,6 +231,23 @@ mlx5e_tc_match_to_reg_set_and_get_id(struct mlx5_core_dev *mdev,
return err;
}
+struct mlx5e_tc_int_port_priv *
+mlx5e_get_int_port_priv(struct mlx5e_priv *priv)
+{
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ struct mlx5_rep_uplink_priv *uplink_priv;
+ struct mlx5e_rep_priv *uplink_rpriv;
+
+ if (is_mdev_switchdev_mode(priv->mdev)) {
+ uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+ uplink_priv = &uplink_rpriv->uplink_priv;
+
+ return uplink_priv->int_port_priv;
+ }
+
+ return NULL;
+}
+
static struct mlx5_tc_ct_priv *
get_ct_priv(struct mlx5e_priv *priv)
{
@@ -246,7 +265,6 @@ get_ct_priv(struct mlx5e_priv *priv)
return priv->fs.tc.ct;
}
-#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
static struct mlx5e_tc_psample *
get_sample_priv(struct mlx5e_priv *priv)
{
@@ -263,7 +281,6 @@ get_sample_priv(struct mlx5e_priv *priv)
return NULL;
}
-#endif
struct mlx5_flow_handle *
mlx5_tc_rule_insert(struct mlx5e_priv *priv,
@@ -1146,11 +1163,9 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
rule = mlx5_tc_ct_flow_offload(get_ct_priv(flow->priv),
flow, spec, attr,
mod_hdr_acts);
-#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
} else if (flow_flag_test(flow, SAMPLE)) {
rule = mlx5e_tc_sample_offload(get_sample_priv(flow->priv), spec, attr,
mlx5e_tc_get_flow_tun_id(flow));
-#endif
} else {
rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
}
@@ -1186,12 +1201,10 @@ void mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
return;
}
-#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
if (flow_flag_test(flow, SAMPLE)) {
mlx5e_tc_sample_unoffload(get_sample_priv(flow->priv), flow->rule[0], attr);
return;
}
-#endif
if (attr->esw_attr->split_count)
mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
@@ -1388,6 +1401,9 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
int err = 0;
int out_index;
+ parse_attr = attr->parse_attr;
+ esw_attr = attr->esw_attr;
+
/* We check chain range only for tc flows.
* For ft flows, we checked attr->chain was originally 0 and set it to
* FDB_FT_CHAIN which is outside tc range.
@@ -1413,6 +1429,24 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
err = mlx5e_attach_decap_route(priv, flow);
if (err)
goto err_out;
+
+ if (!attr->chain && esw_attr->int_port) {
+ /* If decap route device is internal port, change the
+ * source vport value in reg_c0 back to uplink just in
+ * case the rule performs goto chain > 0. If we have a miss
+ * on chain > 0 we want the metadata regs to hold the
+ * chain id so SW will resume handling of this packet
+ * from the proper chain.
+ */
+ u32 metadata = mlx5_eswitch_get_vport_metadata_for_set(esw,
+ esw_attr->in_rep->vport);
+
+ err = mlx5e_tc_match_to_reg_set(priv->mdev, &parse_attr->mod_hdr_acts,
+ MLX5_FLOW_NAMESPACE_FDB, VPORT_TO_REG,
+ metadata);
+ if (err)
+ return err;
+ }
}
if (flow_flag_test(flow, L3_TO_L2_DECAP)) {
@@ -1421,8 +1455,31 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
goto err_out;
}
- parse_attr = attr->parse_attr;
- esw_attr = attr->esw_attr;
+ if (netif_is_ovs_master(parse_attr->filter_dev)) {
+ struct mlx5e_tc_int_port *int_port;
+
+ if (attr->chain) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Internal port rule is only supported on chain 0");
+ return -EOPNOTSUPP;
+ }
+
+ if (attr->dest_chain) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Internal port rule offload doesn't support goto action");
+ return -EOPNOTSUPP;
+ }
+
+ int_port = mlx5e_tc_int_port_get(mlx5e_get_int_port_priv(priv),
+ parse_attr->filter_dev->ifindex,
+ flow_flag_test(flow, EGRESS) ?
+ MLX5E_TC_INT_PORT_EGRESS :
+ MLX5E_TC_INT_PORT_INGRESS);
+ if (IS_ERR(int_port))
+ return PTR_ERR(int_port);
+
+ esw_attr->int_port = int_port;
+ }
for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
struct net_device *out_dev;
@@ -1445,7 +1502,8 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
goto err_out;
if (esw_attr->dests[out_index].flags &
- MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
+ MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE &&
+ !esw_attr->dest_int_port)
vf_tun = true;
out_priv = netdev_priv(encap_dev);
rpriv = out_priv->ppriv;
@@ -1553,7 +1611,8 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
if (esw_attr->dests[out_index].flags &
- MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
+ MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE &&
+ !esw_attr->dest_int_port)
vf_tun = true;
if (esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP) {
mlx5e_detach_encap(priv, flow, out_index);
@@ -1577,6 +1636,12 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
mlx5_fc_destroy(esw_attr->counter_dev, attr->counter);
+ if (esw_attr->int_port)
+ mlx5e_tc_int_port_put(mlx5e_get_int_port_priv(priv), esw_attr->int_port);
+
+ if (esw_attr->dest_int_port)
+ mlx5e_tc_int_port_put(mlx5e_get_int_port_priv(priv), esw_attr->dest_int_port);
+
if (flow_flag_test(flow, L3_TO_L2_DECAP))
mlx5e_detach_decap(priv, flow);
@@ -1688,8 +1753,8 @@ enc_opts_is_dont_care_or_full_match(struct mlx5e_priv *priv,
if (opt->opt_class != htons(U16_MAX) ||
opt->type != U8_MAX) {
- NL_SET_ERR_MSG(extack,
- "Partial match of tunnel options in chain > 0 isn't supported");
+ NL_SET_ERR_MSG_MOD(extack,
+ "Partial match of tunnel options in chain > 0 isn't supported");
netdev_warn(priv->netdev,
"Partial match of tunnel options in chain > 0 isn't supported");
return -EOPNOTSUPP;
@@ -1896,8 +1961,10 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
bool needs_mapping, sets_mapping;
int err;
- if (!mlx5e_is_eswitch_flow(flow))
+ if (!mlx5e_is_eswitch_flow(flow)) {
+ NL_SET_ERR_MSG_MOD(extack, "Match on tunnel is not supported");
return -EOPNOTSUPP;
+ }
needs_mapping = !!flow->attr->chain;
sets_mapping = flow_requires_tunnel_mapping(flow->attr->chain, f);
@@ -1905,8 +1972,8 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
if ((needs_mapping || sets_mapping) &&
!mlx5_eswitch_reg_c1_loopback_enabled(esw)) {
- NL_SET_ERR_MSG(extack,
- "Chains on tunnel devices isn't supported without register loopback support");
+ NL_SET_ERR_MSG_MOD(extack,
+ "Chains on tunnel devices isn't supported without register loopback support");
netdev_warn(priv->netdev,
"Chains on tunnel devices isn't supported without register loopback support");
return -EOPNOTSUPP;
@@ -2269,8 +2336,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
addr_type = match.key->addr_type;
/* the HW doesn't support frag first/later */
- if (match.mask->flags & FLOW_DIS_FIRST_FRAG)
+ if (match.mask->flags & FLOW_DIS_FIRST_FRAG) {
+ NL_SET_ERR_MSG_MOD(extack, "Match on frag first/later is not supported");
return -EOPNOTSUPP;
+ }
if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) {
MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
@@ -2437,8 +2506,11 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
switch (ip_proto) {
case IPPROTO_ICMP:
if (!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) &
- MLX5_FLEX_PROTO_ICMP))
+ MLX5_FLEX_PROTO_ICMP)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Match on Flex protocols for ICMP is not supported");
return -EOPNOTSUPP;
+ }
MLX5_SET(fte_match_set_misc3, misc_c_3, icmp_type,
match.mask->type);
MLX5_SET(fte_match_set_misc3, misc_v_3, icmp_type,
@@ -2450,8 +2522,11 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
break;
case IPPROTO_ICMPV6:
if (!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) &
- MLX5_FLEX_PROTO_ICMPV6))
+ MLX5_FLEX_PROTO_ICMPV6)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Match on Flex protocols for ICMPV6 is not supported");
return -EOPNOTSUPP;
+ }
MLX5_SET(fte_match_set_misc3, misc_c_3, icmpv6_type,
match.mask->type);
MLX5_SET(fte_match_set_misc3, misc_v_3, icmpv6_type,
@@ -2557,15 +2632,19 @@ static int pedit_header_offsets[] = {
#define pedit_header(_ph, _htype) ((void *)(_ph) + pedit_header_offsets[_htype])
static int set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset,
- struct pedit_headers_action *hdrs)
+ struct pedit_headers_action *hdrs,
+ struct netlink_ext_ack *extack)
{
u32 *curr_pmask, *curr_pval;
curr_pmask = (u32 *)(pedit_header(&hdrs->masks, hdr_type) + offset);
curr_pval = (u32 *)(pedit_header(&hdrs->vals, hdr_type) + offset);
- if (*curr_pmask & mask) /* disallow acting twice on the same location */
+ if (*curr_pmask & mask) { /* disallow acting twice on the same location */
+ NL_SET_ERR_MSG_MOD(extack,
+ "curr_pmask and new mask same. Acting twice on same location");
goto out_err;
+ }
*curr_pmask |= mask;
*curr_pval |= (val & mask);
@@ -2898,7 +2977,7 @@ parse_pedit_to_modify_hdr(struct mlx5e_priv *priv,
val = act->mangle.val;
offset = act->mangle.offset;
- err = set_pedit_val(htype, ~mask, val, offset, &hdrs[cmd]);
+ err = set_pedit_val(htype, ~mask, val, offset, &hdrs[cmd], extack);
if (err)
goto out_err;
@@ -2910,16 +2989,17 @@ out_err:
}
static int
-parse_pedit_to_reformat(struct mlx5e_priv *priv,
- const struct flow_action_entry *act,
+parse_pedit_to_reformat(const struct flow_action_entry *act,
struct mlx5e_tc_flow_parse_attr *parse_attr,
struct netlink_ext_ack *extack)
{
u32 mask, val, offset;
u32 *p;
- if (act->id != FLOW_ACTION_MANGLE)
+ if (act->id != FLOW_ACTION_MANGLE) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported action id");
return -EOPNOTSUPP;
+ }
if (act->mangle.htype != FLOW_ACT_MANGLE_HDR_TYPE_ETH) {
NL_SET_ERR_MSG_MOD(extack, "Only Ethernet modification is supported");
@@ -2943,7 +3023,7 @@ static int parse_tc_pedit_action(struct mlx5e_priv *priv,
struct netlink_ext_ack *extack)
{
if (flow && flow_flag_test(flow, L3_TO_L2_DECAP))
- return parse_pedit_to_reformat(priv, act, parse_attr, extack);
+ return parse_pedit_to_reformat(act, parse_attr, extack);
return parse_pedit_to_modify_hdr(priv, act, namespace,
parse_attr, hdrs, extack);
@@ -3025,10 +3105,10 @@ struct ipv6_hoplimit_word {
__u8 hop_limit;
};
-static int is_action_keys_supported(const struct flow_action_entry *act,
- bool ct_flow, bool *modify_ip_header,
- bool *modify_tuple,
- struct netlink_ext_ack *extack)
+static bool
+is_action_keys_supported(const struct flow_action_entry *act, bool ct_flow,
+ bool *modify_ip_header, bool *modify_tuple,
+ struct netlink_ext_ack *extack)
{
u32 mask, offset;
u8 htype;
@@ -3056,7 +3136,7 @@ static int is_action_keys_supported(const struct flow_action_entry *act,
if (ct_flow && *modify_tuple) {
NL_SET_ERR_MSG_MOD(extack,
"can't offload re-write of ipv4 address with action ct");
- return -EOPNOTSUPP;
+ return false;
}
} else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) {
struct ipv6_hoplimit_word *hoplimit_word =
@@ -3074,7 +3154,7 @@ static int is_action_keys_supported(const struct flow_action_entry *act,
if (ct_flow && *modify_tuple) {
NL_SET_ERR_MSG_MOD(extack,
"can't offload re-write of ipv6 address with action ct");
- return -EOPNOTSUPP;
+ return false;
}
} else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_TCP ||
htype == FLOW_ACT_MANGLE_HDR_TYPE_UDP) {
@@ -3082,11 +3162,11 @@ static int is_action_keys_supported(const struct flow_action_entry *act,
if (ct_flow) {
NL_SET_ERR_MSG_MOD(extack,
"can't offload re-write of transport header ports with action ct");
- return -EOPNOTSUPP;
+ return false;
}
}
- return 0;
+ return true;
}
static bool modify_tuple_supported(bool modify_tuple, bool ct_clear,
@@ -3133,7 +3213,7 @@ static bool modify_header_match_supported(struct mlx5e_priv *priv,
void *headers_v;
u16 ethertype;
u8 ip_proto;
- int i, err;
+ int i;
headers_c = get_match_headers_criteria(actions, spec);
headers_v = get_match_headers_value(actions, spec);
@@ -3151,11 +3231,10 @@ static bool modify_header_match_supported(struct mlx5e_priv *priv,
act->id != FLOW_ACTION_ADD)
continue;
- err = is_action_keys_supported(act, ct_flow,
- &modify_ip_header,
- &modify_tuple, extack);
- if (err)
- return err;
+ if (!is_action_keys_supported(act, ct_flow,
+ &modify_ip_header,
+ &modify_tuple, extack))
+ return false;
}
if (!modify_tuple_supported(modify_tuple, ct_clear, ct_flow, extack,
@@ -3176,37 +3255,65 @@ out_ok:
return true;
}
-static bool actions_match_supported(struct mlx5e_priv *priv,
- struct flow_action *flow_action,
- struct mlx5e_tc_flow_parse_attr *parse_attr,
- struct mlx5e_tc_flow *flow,
- struct netlink_ext_ack *extack)
+static bool
+actions_match_supported_fdb(struct mlx5e_priv *priv,
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct mlx5e_tc_flow *flow,
+ struct netlink_ext_ack *extack)
{
- bool ct_flow = false, ct_clear = false;
- u32 actions;
+ struct mlx5_esw_flow_attr *esw_attr = flow->attr->esw_attr;
+ bool ct_flow, ct_clear;
- ct_clear = flow->attr->ct_attr.ct_action &
- TCA_CT_ACT_CLEAR;
+ ct_clear = flow->attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR;
ct_flow = flow_flag_test(flow, CT) && !ct_clear;
- actions = flow->attr->action;
- if (mlx5e_is_eswitch_flow(flow)) {
- if (flow->attr->esw_attr->split_count && ct_flow &&
- !MLX5_CAP_GEN(flow->attr->esw_attr->in_mdev, reg_c_preserve)) {
- /* All registers used by ct are cleared when using
- * split rules.
- */
- NL_SET_ERR_MSG_MOD(extack,
- "Can't offload mirroring with action ct");
- return false;
- }
+ if (esw_attr->split_count && ct_flow &&
+ !MLX5_CAP_GEN(esw_attr->in_mdev, reg_c_preserve)) {
+ /* All registers used by ct are cleared when using
+ * split rules.
+ */
+ NL_SET_ERR_MSG_MOD(extack, "Can't offload mirroring with action ct");
+ return false;
}
- if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
- return modify_header_match_supported(priv, &parse_attr->spec,
- flow_action, actions,
- ct_flow, ct_clear,
- extack);
+ if (esw_attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "current firmware doesn't support split rule for port mirroring");
+ netdev_warn_once(priv->netdev,
+ "current firmware doesn't support split rule for port mirroring\n");
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+actions_match_supported(struct mlx5e_priv *priv,
+ struct flow_action *flow_action,
+ struct mlx5e_tc_flow_parse_attr *parse_attr,
+ struct mlx5e_tc_flow *flow,
+ struct netlink_ext_ack *extack)
+{
+ u32 actions = flow->attr->action;
+ bool ct_flow, ct_clear;
+
+ ct_clear = flow->attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR;
+ ct_flow = flow_flag_test(flow, CT) && !ct_clear;
+
+ if (!(actions &
+ (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) {
+ NL_SET_ERR_MSG_MOD(extack, "Rule must have at least one forward/drop action");
+ return false;
+ }
+
+ if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
+ !modify_header_match_supported(priv, &parse_attr->spec, flow_action,
+ actions, ct_flow, ct_clear, extack))
+ return false;
+
+ if (mlx5e_is_eswitch_flow(flow) &&
+ !actions_match_supported_fdb(priv, parse_attr, flow, extack))
+ return false;
return true;
}
@@ -3355,11 +3462,51 @@ static int validate_goto_chain(struct mlx5e_priv *priv,
return 0;
}
-static int parse_tc_nic_actions(struct mlx5e_priv *priv,
- struct flow_action *flow_action,
+static int
+actions_prepare_mod_hdr_actions(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
+ struct mlx5_flow_attr *attr,
+ struct pedit_headers_action *hdrs,
struct netlink_ext_ack *extack)
{
+ struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
+ enum mlx5_flow_namespace_type ns_type;
+ int err;
+
+ if (!hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits &&
+ !hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits)
+ return 0;
+
+ ns_type = get_flow_name_space(flow);
+
+ err = alloc_tc_pedit_action(priv, ns_type, parse_attr, hdrs,
+ &attr->action, extack);
+ if (err)
+ return err;
+
+ /* In case all pedit actions are skipped, remove the MOD_HDR flag. */
+ if (parse_attr->mod_hdr_acts.num_actions > 0)
+ return 0;
+
+ attr->action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+ dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
+
+ if (ns_type != MLX5_FLOW_NAMESPACE_FDB)
+ return 0;
+
+ if (!((attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) ||
+ (attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH)))
+ attr->esw_attr->split_count = 0;
+
+ return 0;
+}
+
+static int
+parse_tc_nic_actions(struct mlx5e_priv *priv,
+ struct flow_action *flow_action,
+ struct mlx5e_tc_flow *flow,
+ struct netlink_ext_ack *extack)
+{
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5_flow_attr *attr = flow->attr;
struct pedit_headers_action hdrs[2] = {};
@@ -3368,12 +3515,16 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv,
u32 action = 0;
int err, i;
- if (!flow_action_has_entries(flow_action))
+ if (!flow_action_has_entries(flow_action)) {
+ NL_SET_ERR_MSG_MOD(extack, "Flow action doesn't have any entries");
return -EINVAL;
+ }
if (!flow_action_hw_stats_check(flow_action, extack,
- FLOW_ACTION_HW_STATS_DELAYED_BIT))
+ FLOW_ACTION_HW_STATS_DELAYED_BIT)) {
+ NL_SET_ERR_MSG_MOD(extack, "Flow action HW stats type is not supported");
return -EOPNOTSUPP;
+ }
nic_attr = attr->nic_attr;
nic_attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
@@ -3451,7 +3602,8 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv,
if (err)
return err;
- action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
attr->dest_chain = act->chain_index;
break;
case FLOW_ACTION_CT:
@@ -3462,38 +3614,22 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv,
flow_flag_set(flow, CT);
break;
default:
- NL_SET_ERR_MSG_MOD(extack, "The offload action is not supported");
+ NL_SET_ERR_MSG_MOD(extack,
+ "The offload action is not supported in NIC action");
return -EOPNOTSUPP;
}
}
- if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
- hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
- err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_KERNEL,
- parse_attr, hdrs, &action, extack);
- if (err)
- return err;
- /* in case all pedit actions are skipped, remove the MOD_HDR
- * flag.
- */
- if (parse_attr->mod_hdr_acts.num_actions == 0) {
- action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
- dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
- }
- }
-
attr->action = action;
- if (attr->dest_chain) {
- if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
- NL_SET_ERR_MSG(extack, "Mirroring goto chain rules isn't supported");
- return -EOPNOTSUPP;
- }
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ if (attr->dest_chain && parse_attr->mirred_ifindex[0]) {
+ NL_SET_ERR_MSG(extack, "Mirroring goto chain rules isn't supported");
+ return -EOPNOTSUPP;
}
- if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ err = actions_prepare_mod_hdr_actions(priv, flow, attr, hdrs, extack);
+ if (err)
+ return err;
if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
return -EOPNOTSUPP;
@@ -3517,19 +3653,25 @@ static bool is_merged_eswitch_vfs(struct mlx5e_priv *priv,
static int parse_tc_vlan_action(struct mlx5e_priv *priv,
const struct flow_action_entry *act,
struct mlx5_esw_flow_attr *attr,
- u32 *action)
+ u32 *action,
+ struct netlink_ext_ack *extack)
{
u8 vlan_idx = attr->total_vlan;
- if (vlan_idx >= MLX5_FS_VLAN_DEPTH)
+ if (vlan_idx >= MLX5_FS_VLAN_DEPTH) {
+ NL_SET_ERR_MSG_MOD(extack, "Total vlans used is greater than supported");
return -EOPNOTSUPP;
+ }
switch (act->id) {
case FLOW_ACTION_VLAN_POP:
if (vlan_idx) {
if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
- MLX5_FS_VLAN_DEPTH))
+ MLX5_FS_VLAN_DEPTH)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "vlan pop action is not supported");
return -EOPNOTSUPP;
+ }
*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2;
} else {
@@ -3545,20 +3687,27 @@ static int parse_tc_vlan_action(struct mlx5e_priv *priv,
if (vlan_idx) {
if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
- MLX5_FS_VLAN_DEPTH))
+ MLX5_FS_VLAN_DEPTH)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "vlan push action is not supported for vlan depth > 1");
return -EOPNOTSUPP;
+ }
*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2;
} else {
if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, 1) &&
(act->vlan.proto != htons(ETH_P_8021Q) ||
- act->vlan.prio))
+ act->vlan.prio)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "vlan push action is not supported");
return -EOPNOTSUPP;
+ }
*action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
}
break;
default:
+ NL_SET_ERR_MSG_MOD(extack, "Unexpected action id for VLAN");
return -EINVAL;
}
@@ -3592,7 +3741,8 @@ static struct net_device *get_fdb_out_dev(struct net_device *uplink_dev,
static int add_vlan_push_action(struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr,
struct net_device **out_dev,
- u32 *action)
+ u32 *action,
+ struct netlink_ext_ack *extack)
{
struct net_device *vlan_dev = *out_dev;
struct flow_action_entry vlan_act = {
@@ -3603,7 +3753,7 @@ static int add_vlan_push_action(struct mlx5e_priv *priv,
};
int err;
- err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, action);
+ err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, action, extack);
if (err)
return err;
@@ -3614,14 +3764,15 @@ static int add_vlan_push_action(struct mlx5e_priv *priv,
return -ENODEV;
if (is_vlan_dev(*out_dev))
- err = add_vlan_push_action(priv, attr, out_dev, action);
+ err = add_vlan_push_action(priv, attr, out_dev, action, extack);
return err;
}
static int add_vlan_pop_action(struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr,
- u32 *action)
+ u32 *action,
+ struct netlink_ext_ack *extack)
{
struct flow_action_entry vlan_act = {
.id = FLOW_ACTION_VLAN_POP,
@@ -3631,7 +3782,7 @@ static int add_vlan_pop_action(struct mlx5e_priv *priv,
nest_level = attr->parse_attr->filter_dev->lower_level -
priv->netdev->lower_level;
while (nest_level--) {
- err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, action);
+ err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, action, extack);
if (err)
return err;
}
@@ -3732,6 +3883,45 @@ static int verify_uplink_forwarding(struct mlx5e_priv *priv,
return 0;
}
+int mlx5e_set_fwd_to_int_port_actions(struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr,
+ int ifindex,
+ enum mlx5e_tc_int_port_type type,
+ u32 *action,
+ int out_index)
+{
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+ struct mlx5e_tc_int_port_priv *int_port_priv;
+ struct mlx5e_tc_flow_parse_attr *parse_attr;
+ struct mlx5e_tc_int_port *dest_int_port;
+ int err;
+
+ parse_attr = attr->parse_attr;
+ int_port_priv = mlx5e_get_int_port_priv(priv);
+
+ dest_int_port = mlx5e_tc_int_port_get(int_port_priv, ifindex, type);
+ if (IS_ERR(dest_int_port))
+ return PTR_ERR(dest_int_port);
+
+ err = mlx5e_tc_match_to_reg_set(priv->mdev, &parse_attr->mod_hdr_acts,
+ MLX5_FLOW_NAMESPACE_FDB, VPORT_TO_REG,
+ mlx5e_tc_int_port_get_metadata(dest_int_port));
+ if (err) {
+ mlx5e_tc_int_port_put(int_port_priv, dest_int_port);
+ return err;
+ }
+
+ *action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+
+ esw_attr->dest_int_port = dest_int_port;
+ esw_attr->dests[out_index].flags |= MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE;
+
+ /* Forward to root fdb for matching against the new source vport */
+ attr->dest_chain = 0;
+
+ return 0;
+}
+
static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
struct flow_action *flow_action,
struct mlx5e_tc_flow *flow,
@@ -3751,20 +3941,39 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
bool encap = false, decap = false;
u32 action = attr->action;
int err, i, if_count = 0;
+ bool ptype_host = false;
bool mpls_push = false;
- if (!flow_action_has_entries(flow_action))
+ if (!flow_action_has_entries(flow_action)) {
+ NL_SET_ERR_MSG_MOD(extack, "Flow action doesn't have any entries");
return -EINVAL;
+ }
if (!flow_action_hw_stats_check(flow_action, extack,
- FLOW_ACTION_HW_STATS_DELAYED_BIT))
+ FLOW_ACTION_HW_STATS_DELAYED_BIT)) {
+ NL_SET_ERR_MSG_MOD(extack, "Flow action HW stats type is not supported");
return -EOPNOTSUPP;
+ }
esw_attr = attr->esw_attr;
parse_attr = attr->parse_attr;
flow_action_for_each(i, act, flow_action) {
switch (act->id) {
+ case FLOW_ACTION_ACCEPT:
+ action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ attr->flags |= MLX5_ESW_ATTR_FLAG_ACCEPT;
+ break;
+ case FLOW_ACTION_PTYPE:
+ if (act->ptype != PACKET_HOST) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "skbedit ptype is only supported with type host");
+ return -EOPNOTSUPP;
+ }
+
+ ptype_host = true;
+ break;
case FLOW_ACTION_DROP:
action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
@@ -3828,6 +4037,50 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
break;
return -EOPNOTSUPP;
+ case FLOW_ACTION_REDIRECT_INGRESS: {
+ struct net_device *out_dev;
+
+ out_dev = act->dev;
+ if (!out_dev)
+ return -EOPNOTSUPP;
+
+ if (!netif_is_ovs_master(out_dev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "redirect to ingress is supported only for OVS internal ports");
+ return -EOPNOTSUPP;
+ }
+
+ if (netif_is_ovs_master(parse_attr->filter_dev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "redirect to ingress is not supported from internal port");
+ return -EOPNOTSUPP;
+ }
+
+ if (!ptype_host) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "redirect to int port ingress requires ptype=host action");
+ return -EOPNOTSUPP;
+ }
+
+ if (esw_attr->out_count) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "redirect to int port ingress is supported only as single destination");
+ return -EOPNOTSUPP;
+ }
+
+ action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+
+ err = mlx5e_set_fwd_to_int_port_actions(priv, attr, out_dev->ifindex,
+ MLX5E_TC_INT_PORT_INGRESS,
+ &action, esw_attr->out_count);
+ if (err)
+ return err;
+
+ esw_attr->out_count++;
+
+ break;
+ }
case FLOW_ACTION_REDIRECT:
case FLOW_ACTION_MIRRED: {
struct mlx5e_priv *out_priv;
@@ -3902,18 +4155,21 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
if (is_vlan_dev(out_dev)) {
err = add_vlan_push_action(priv, attr,
&out_dev,
- &action);
+ &action, extack);
if (err)
return err;
}
if (is_vlan_dev(parse_attr->filter_dev)) {
err = add_vlan_pop_action(priv, attr,
- &action);
+ &action, extack);
if (err)
return err;
}
+ if (netif_is_macvlan(out_dev))
+ out_dev = macvlan_dev_real_dev(out_dev);
+
err = verify_uplink_forwarding(priv, flow, out_dev, extack);
if (err)
return err;
@@ -3935,6 +4191,16 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
esw_attr->dests[esw_attr->out_count].rep = rpriv->rep;
esw_attr->dests[esw_attr->out_count].mdev = out_priv->mdev;
esw_attr->out_count++;
+ } else if (netif_is_ovs_master(out_dev)) {
+ err = mlx5e_set_fwd_to_int_port_actions(priv, attr,
+ out_dev->ifindex,
+ MLX5E_TC_INT_PORT_EGRESS,
+ &action,
+ esw_attr->out_count);
+ if (err)
+ return err;
+
+ esw_attr->out_count++;
} else if (parse_attr->filter_dev != priv->netdev) {
/* All mlx5 devices are called to configure
* high level device filters. Therefore, the
@@ -3955,10 +4221,13 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
break;
case FLOW_ACTION_TUNNEL_ENCAP:
info = act->tunnel;
- if (info)
+ if (info) {
encap = true;
- else
+ } else {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Zero tunnel attributes is not supported");
return -EOPNOTSUPP;
+ }
break;
case FLOW_ACTION_VLAN_PUSH:
@@ -3972,7 +4241,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
act, parse_attr, hdrs,
&action, extack);
} else {
- err = parse_tc_vlan_action(priv, act, esw_attr, &action);
+ err = parse_tc_vlan_action(priv, act, esw_attr, &action, extack);
}
if (err)
return err;
@@ -3998,7 +4267,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
if (err)
return err;
- action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
attr->dest_chain = act->chain_index;
break;
case FLOW_ACTION_CT:
@@ -4025,11 +4295,20 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
flow_flag_set(flow, SAMPLE);
break;
default:
- NL_SET_ERR_MSG_MOD(extack, "The offload action is not supported");
+ NL_SET_ERR_MSG_MOD(extack,
+ "The offload action is not supported in FDB action");
return -EOPNOTSUPP;
}
}
+ /* Forward to/from internal port can only have 1 dest */
+ if ((netif_is_ovs_master(parse_attr->filter_dev) || esw_attr->dest_int_port) &&
+ esw_attr->out_count > 1) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Rules with internal port can have only one destination");
+ return -EOPNOTSUPP;
+ }
+
/* always set IP version for indirect table handling */
attr->ip_version = mlx5e_tc_get_ip_version(&parse_attr->spec, true);
@@ -4045,60 +4324,26 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
return err;
}
- if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
- hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
- err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_FDB,
- parse_attr, hdrs, &action, extack);
- if (err)
- return err;
- /* in case all pedit actions are skipped, remove the MOD_HDR
- * flag. we might have set split_count either by pedit or
- * pop/push. if there is no pop/push either, reset it too.
- */
- if (parse_attr->mod_hdr_acts.num_actions == 0) {
- action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
- dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
- if (!((action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) ||
- (action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH)))
- esw_attr->split_count = 0;
- }
- }
-
attr->action = action;
- if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
- return -EOPNOTSUPP;
- if (attr->dest_chain) {
- if (decap) {
- /* It can be supported if we'll create a mapping for
- * the tunnel device only (without tunnel), and set
- * this tunnel id with this decap flow.
- *
- * On restore (miss), we'll just set this saved tunnel
- * device.
- */
-
- NL_SET_ERR_MSG(extack,
- "Decap with goto isn't supported");
- netdev_warn(priv->netdev,
- "Decap with goto isn't supported");
- return -EOPNOTSUPP;
- }
-
- attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
- }
+ err = actions_prepare_mod_hdr_actions(priv, flow, attr, hdrs, extack);
+ if (err)
+ return err;
- if (!(attr->action &
- (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) {
- NL_SET_ERR_MSG_MOD(extack,
- "Rule must have at least one forward/drop action");
+ if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
return -EOPNOTSUPP;
- }
- if (esw_attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
- NL_SET_ERR_MSG_MOD(extack,
- "current firmware doesn't support split rule for port mirroring");
- netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n");
+ if (attr->dest_chain && decap) {
+ /* It can be supported if we'll create a mapping for
+ * the tunnel device only (without tunnel), and set
+ * this tunnel id with this decap flow.
+ *
+ * On restore (miss), we'll just set this saved tunnel
+ * device.
+ */
+
+ NL_SET_ERR_MSG(extack, "Decap with goto isn't supported");
+ netdev_warn(priv->netdev, "Decap with goto isn't supported");
return -EOPNOTSUPP;
}
@@ -4733,8 +4978,10 @@ static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
- if (!flow_action_basic_hw_stats_check(flow_action, extack))
+ if (!flow_action_basic_hw_stats_check(flow_action, extack)) {
+ NL_SET_ERR_MSG_MOD(extack, "Flow action HW stats type is not supported");
return -EOPNOTSUPP;
+ }
flow_action_for_each(i, act, flow_action) {
switch (act->id) {
@@ -5006,9 +5253,9 @@ int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
MLX5_FLOW_NAMESPACE_FDB,
uplink_priv->post_act);
-#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
+ uplink_priv->int_port_priv = mlx5e_tc_int_port_init(netdev_priv(priv->netdev));
+
uplink_priv->tc_psample = mlx5e_tc_sample_init(esw, uplink_priv->post_act);
-#endif
mapping_id = mlx5_query_nic_system_image_guid(esw->dev);
@@ -5022,9 +5269,11 @@ int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
}
uplink_priv->tunnel_mapping = mapping;
- /* 0xFFF is reserved for stack devices slow path table mark */
+ /* Two last values are reserved for stack devices slow path table mark
+ * and bridge ingress push mark.
+ */
mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_TUNNEL_ENC_OPTS,
- sz_enc_opts, ENC_OPTS_BITS_MASK - 1, true);
+ sz_enc_opts, ENC_OPTS_BITS_MASK - 2, true);
if (IS_ERR(mapping)) {
err = PTR_ERR(mapping);
goto err_enc_opts_mapping;
@@ -5052,9 +5301,8 @@ err_ht_init:
err_enc_opts_mapping:
mapping_destroy(uplink_priv->tunnel_mapping);
err_tun_mapping:
-#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
mlx5e_tc_sample_cleanup(uplink_priv->tc_psample);
-#endif
+ mlx5e_tc_int_port_cleanup(uplink_priv->int_port_priv);
mlx5_tc_ct_clean(uplink_priv->ct_priv);
netdev_warn(priv->netdev,
"Failed to initialize tc (eswitch), err: %d", err);
@@ -5074,9 +5322,8 @@ void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht)
mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
mapping_destroy(uplink_priv->tunnel_mapping);
-#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
mlx5e_tc_sample_cleanup(uplink_priv->tc_psample);
-#endif
+ mlx5e_tc_int_port_cleanup(uplink_priv->int_port_priv);
mlx5_tc_ct_clean(uplink_priv->ct_priv);
mlx5e_tc_post_act_destroy(uplink_priv->post_act);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
index 1a4cd882f0fb..fdb222793027 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h
@@ -38,6 +38,7 @@
#include "eswitch.h"
#include "en/tc_ct.h"
#include "en/tc_tun.h"
+#include "en/tc/int_port.h"
#include "en_rep.h"
#define MLX5E_TC_FLOW_ID_MASK 0x0000ffff
@@ -56,7 +57,7 @@
int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags);
struct mlx5e_tc_update_priv {
- struct net_device *tun_dev;
+ struct net_device *fwd_dev;
};
struct mlx5_nic_flow_attr {
@@ -104,6 +105,8 @@ struct mlx5_rx_tun_attr {
#define MLX5E_TC_TABLE_CHAIN_TAG_BITS 16
#define MLX5E_TC_TABLE_CHAIN_TAG_MASK GENMASK(MLX5E_TC_TABLE_CHAIN_TAG_BITS - 1, 0)
+#define MLX5E_TC_MAX_INT_PORT_NUM (8)
+
#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
struct tunnel_match_key {
@@ -283,6 +286,12 @@ bool mlx5e_tc_is_vf_tunnel(struct net_device *out_dev, struct net_device *route_
int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *route_dev,
u16 *vport);
+int mlx5e_set_fwd_to_int_port_actions(struct mlx5e_priv *priv,
+ struct mlx5_flow_attr *attr,
+ int ifindex,
+ enum mlx5e_tc_int_port_type type,
+ u32 *action,
+ int out_index);
#else /* CONFIG_MLX5_CLS_ACT */
static inline int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; }
static inline void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) {}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index c63d78eda606..7fd33b356cc8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -38,6 +38,7 @@
#include "en/txrx.h"
#include "ipoib/ipoib.h"
#include "en_accel/en_accel.h"
+#include "en_accel/ipsec_rxtx.h"
#include "en/ptp.h"
static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma)
@@ -213,27 +214,14 @@ static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs)
memcpy(&vhdr->h_vlan_encapsulated_proto, skb->data + cpy1_sz, cpy2_sz);
}
-/* If packet is not IP's CHECKSUM_PARTIAL (e.g. icmd packet),
- * need to set L3 checksum flag for IPsec
- */
-static void
-ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
- struct mlx5_wqe_eth_seg *eseg)
-{
- eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
- if (skb->encapsulation) {
- eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM;
- sq->stats->csum_partial_inner++;
- } else {
- sq->stats->csum_partial++;
- }
-}
-
static inline void
mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5e_accel_tx_state *accel,
struct mlx5_wqe_eth_seg *eseg)
{
+ if (unlikely(mlx5e_ipsec_txwqe_build_eseg_csum(sq, skb, eseg)))
+ return;
+
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
if (skb->encapsulation) {
@@ -249,8 +237,6 @@ mlx5e_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
sq->stats->csum_partial++;
#endif
- } else if (unlikely(mlx5e_ipsec_eseg_meta(eseg))) {
- ipsec_txwqe_build_eseg_csum(sq, skb, eseg);
} else
sq->stats->csum_none++;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 605c8ecc3610..792e0d6aa861 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -632,6 +632,7 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
mlx5_eq_notifier_register(dev, &table->cq_err_nb);
param = (struct mlx5_eq_param) {
+ .irq_index = MLX5_IRQ_EQ_CTRL,
.nent = MLX5_NUM_CMD_EQE,
.mask[0] = 1ull << MLX5_EVENT_TYPE_CMD,
};
@@ -644,6 +645,7 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL);
param = (struct mlx5_eq_param) {
+ .irq_index = MLX5_IRQ_EQ_CTRL,
.nent = MLX5_NUM_ASYNC_EQE,
};
@@ -653,6 +655,7 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
goto err2;
param = (struct mlx5_eq_param) {
+ .irq_index = MLX5_IRQ_EQ_CTRL,
.nent = /* TODO: sriov max_vf + */ 1,
.mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_REQUEST,
};
@@ -806,8 +809,8 @@ static int create_comp_eqs(struct mlx5_core_dev *dev)
ncomp_eqs = table->num_comp_eqs;
nent = MLX5_COMP_EQ_SIZE;
for (i = 0; i < ncomp_eqs; i++) {
- int vecidx = i + MLX5_IRQ_VEC_COMP_BASE;
struct mlx5_eq_param param = {};
+ int vecidx = i;
eq = kzalloc(sizeof(*eq), GFP_KERNEL);
if (!eq) {
@@ -953,9 +956,7 @@ static int set_rmap(struct mlx5_core_dev *mdev)
goto err_out;
}
- vecidx = MLX5_IRQ_VEC_COMP_BASE;
- for (; vecidx < eq_table->num_comp_eqs + MLX5_IRQ_VEC_COMP_BASE;
- vecidx++) {
+ for (vecidx = 0; vecidx < eq_table->num_comp_eqs; vecidx++) {
err = irq_cpu_rmap_add(eq_table->rmap,
pci_irq_vector(mdev->pdev, vecidx));
if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
index 7e221038df8d..f690f430f40f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
@@ -28,7 +28,10 @@
#define MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_TO (MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE / 2 - 1)
#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_FROM \
(MLX5_ESW_BRIDGE_EGRESS_TABLE_VLAN_GRP_IDX_TO + 1)
-#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_TO (MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE - 1)
+#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_TO (MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE - 2)
+#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_FROM \
+ (MLX5_ESW_BRIDGE_EGRESS_TABLE_MAC_GRP_IDX_TO + 1)
+#define MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_TO (MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE - 1)
#define MLX5_ESW_BRIDGE_SKIP_TABLE_SIZE 0
@@ -61,6 +64,9 @@ struct mlx5_esw_bridge {
struct mlx5_flow_table *egress_ft;
struct mlx5_flow_group *egress_vlan_fg;
struct mlx5_flow_group *egress_mac_fg;
+ struct mlx5_flow_group *egress_miss_fg;
+ struct mlx5_pkt_reformat *egress_miss_pkt_reformat;
+ struct mlx5_flow_handle *egress_miss_handle;
unsigned long ageing_time;
u32 flags;
};
@@ -86,6 +92,26 @@ mlx5_esw_bridge_fdb_del_notify(struct mlx5_esw_bridge_fdb_entry *entry)
SWITCHDEV_FDB_DEL_TO_BRIDGE);
}
+static bool mlx5_esw_bridge_pkt_reformat_vlan_pop_supported(struct mlx5_eswitch *esw)
+{
+ return BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat_remove)) &&
+ MLX5_CAP_GEN_2(esw->dev, max_reformat_remove_size) >= sizeof(struct vlan_hdr) &&
+ MLX5_CAP_GEN_2(esw->dev, max_reformat_remove_offset) >=
+ offsetof(struct vlan_ethhdr, h_vlan_proto);
+}
+
+static struct mlx5_pkt_reformat *
+mlx5_esw_bridge_pkt_reformat_vlan_pop_create(struct mlx5_eswitch *esw)
+{
+ struct mlx5_pkt_reformat_params reformat_params = {};
+
+ reformat_params.type = MLX5_REFORMAT_TYPE_REMOVE_HDR;
+ reformat_params.param_0 = MLX5_REFORMAT_CONTEXT_ANCHOR_MAC_START;
+ reformat_params.param_1 = offsetof(struct vlan_ethhdr, h_vlan_proto);
+ reformat_params.size = sizeof(struct vlan_hdr);
+ return mlx5_packet_reformat_alloc(esw->dev, &reformat_params, MLX5_FLOW_NAMESPACE_FDB);
+}
+
static struct mlx5_flow_table *
mlx5_esw_bridge_table_create(int max_fte, u32 level, struct mlx5_eswitch *esw)
{
@@ -287,43 +313,74 @@ mlx5_esw_bridge_egress_mac_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_
return fg;
}
+static struct mlx5_flow_group *
+mlx5_esw_bridge_egress_miss_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *egress_ft)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_group *fg;
+ u32 *in, *match;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return ERR_PTR(-ENOMEM);
+
+ MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_MATCH_MISC_PARAMETERS_2);
+ match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
+
+ MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
+
+ MLX5_SET(create_flow_group_in, in, start_flow_index,
+ MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_FROM);
+ MLX5_SET(create_flow_group_in, in, end_flow_index,
+ MLX5_ESW_BRIDGE_EGRESS_TABLE_MISS_GRP_IDX_TO);
+
+ fg = mlx5_create_flow_group(egress_ft, in);
+ if (IS_ERR(fg))
+ esw_warn(esw->dev,
+ "Failed to create bridge egress table miss flow group (err=%ld)\n",
+ PTR_ERR(fg));
+ kvfree(in);
+ return fg;
+}
+
static int
mlx5_esw_bridge_ingress_table_init(struct mlx5_esw_bridge_offloads *br_offloads)
{
struct mlx5_flow_group *mac_fg, *filter_fg, *vlan_fg;
struct mlx5_flow_table *ingress_ft, *skip_ft;
+ struct mlx5_eswitch *esw = br_offloads->esw;
int err;
- if (!mlx5_eswitch_vport_match_metadata_enabled(br_offloads->esw))
+ if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
return -EOPNOTSUPP;
ingress_ft = mlx5_esw_bridge_table_create(MLX5_ESW_BRIDGE_INGRESS_TABLE_SIZE,
MLX5_ESW_BRIDGE_LEVEL_INGRESS_TABLE,
- br_offloads->esw);
+ esw);
if (IS_ERR(ingress_ft))
return PTR_ERR(ingress_ft);
skip_ft = mlx5_esw_bridge_table_create(MLX5_ESW_BRIDGE_SKIP_TABLE_SIZE,
MLX5_ESW_BRIDGE_LEVEL_SKIP_TABLE,
- br_offloads->esw);
+ esw);
if (IS_ERR(skip_ft)) {
err = PTR_ERR(skip_ft);
goto err_skip_tbl;
}
- vlan_fg = mlx5_esw_bridge_ingress_vlan_fg_create(br_offloads->esw, ingress_ft);
+ vlan_fg = mlx5_esw_bridge_ingress_vlan_fg_create(esw, ingress_ft);
if (IS_ERR(vlan_fg)) {
err = PTR_ERR(vlan_fg);
goto err_vlan_fg;
}
- filter_fg = mlx5_esw_bridge_ingress_filter_fg_create(br_offloads->esw, ingress_ft);
+ filter_fg = mlx5_esw_bridge_ingress_filter_fg_create(esw, ingress_ft);
if (IS_ERR(filter_fg)) {
err = PTR_ERR(filter_fg);
goto err_filter_fg;
}
- mac_fg = mlx5_esw_bridge_ingress_mac_fg_create(br_offloads->esw, ingress_ft);
+ mac_fg = mlx5_esw_bridge_ingress_mac_fg_create(esw, ingress_ft);
if (IS_ERR(mac_fg)) {
err = PTR_ERR(mac_fg);
goto err_mac_fg;
@@ -362,35 +419,82 @@ mlx5_esw_bridge_ingress_table_cleanup(struct mlx5_esw_bridge_offloads *br_offloa
br_offloads->ingress_ft = NULL;
}
+static struct mlx5_flow_handle *
+mlx5_esw_bridge_egress_miss_flow_create(struct mlx5_flow_table *egress_ft,
+ struct mlx5_flow_table *skip_ft,
+ struct mlx5_pkt_reformat *pkt_reformat);
+
static int
mlx5_esw_bridge_egress_table_init(struct mlx5_esw_bridge_offloads *br_offloads,
struct mlx5_esw_bridge *bridge)
{
- struct mlx5_flow_group *mac_fg, *vlan_fg;
+ struct mlx5_flow_group *miss_fg = NULL, *mac_fg, *vlan_fg;
+ struct mlx5_pkt_reformat *miss_pkt_reformat = NULL;
+ struct mlx5_flow_handle *miss_handle = NULL;
+ struct mlx5_eswitch *esw = br_offloads->esw;
struct mlx5_flow_table *egress_ft;
int err;
egress_ft = mlx5_esw_bridge_table_create(MLX5_ESW_BRIDGE_EGRESS_TABLE_SIZE,
MLX5_ESW_BRIDGE_LEVEL_EGRESS_TABLE,
- br_offloads->esw);
+ esw);
if (IS_ERR(egress_ft))
return PTR_ERR(egress_ft);
- vlan_fg = mlx5_esw_bridge_egress_vlan_fg_create(br_offloads->esw, egress_ft);
+ vlan_fg = mlx5_esw_bridge_egress_vlan_fg_create(esw, egress_ft);
if (IS_ERR(vlan_fg)) {
err = PTR_ERR(vlan_fg);
goto err_vlan_fg;
}
- mac_fg = mlx5_esw_bridge_egress_mac_fg_create(br_offloads->esw, egress_ft);
+ mac_fg = mlx5_esw_bridge_egress_mac_fg_create(esw, egress_ft);
if (IS_ERR(mac_fg)) {
err = PTR_ERR(mac_fg);
goto err_mac_fg;
}
+ if (mlx5_esw_bridge_pkt_reformat_vlan_pop_supported(esw)) {
+ miss_fg = mlx5_esw_bridge_egress_miss_fg_create(esw, egress_ft);
+ if (IS_ERR(miss_fg)) {
+ esw_warn(esw->dev, "Failed to create miss flow group (err=%ld)\n",
+ PTR_ERR(miss_fg));
+ miss_fg = NULL;
+ goto skip_miss_flow;
+ }
+
+ miss_pkt_reformat = mlx5_esw_bridge_pkt_reformat_vlan_pop_create(esw);
+ if (IS_ERR(miss_pkt_reformat)) {
+ esw_warn(esw->dev,
+ "Failed to alloc packet reformat REMOVE_HEADER (err=%ld)\n",
+ PTR_ERR(miss_pkt_reformat));
+ miss_pkt_reformat = NULL;
+ mlx5_destroy_flow_group(miss_fg);
+ miss_fg = NULL;
+ goto skip_miss_flow;
+ }
+
+ miss_handle = mlx5_esw_bridge_egress_miss_flow_create(egress_ft,
+ br_offloads->skip_ft,
+ miss_pkt_reformat);
+ if (IS_ERR(miss_handle)) {
+ esw_warn(esw->dev, "Failed to create miss flow (err=%ld)\n",
+ PTR_ERR(miss_handle));
+ miss_handle = NULL;
+ mlx5_packet_reformat_dealloc(esw->dev, miss_pkt_reformat);
+ miss_pkt_reformat = NULL;
+ mlx5_destroy_flow_group(miss_fg);
+ miss_fg = NULL;
+ goto skip_miss_flow;
+ }
+ }
+skip_miss_flow:
+
bridge->egress_ft = egress_ft;
bridge->egress_vlan_fg = vlan_fg;
bridge->egress_mac_fg = mac_fg;
+ bridge->egress_miss_fg = miss_fg;
+ bridge->egress_miss_pkt_reformat = miss_pkt_reformat;
+ bridge->egress_miss_handle = miss_handle;
return 0;
err_mac_fg:
@@ -403,6 +507,13 @@ err_vlan_fg:
static void
mlx5_esw_bridge_egress_table_cleanup(struct mlx5_esw_bridge *bridge)
{
+ if (bridge->egress_miss_handle)
+ mlx5_del_flow_rules(bridge->egress_miss_handle);
+ if (bridge->egress_miss_pkt_reformat)
+ mlx5_packet_reformat_dealloc(bridge->br_offloads->esw->dev,
+ bridge->egress_miss_pkt_reformat);
+ if (bridge->egress_miss_fg)
+ mlx5_destroy_flow_group(bridge->egress_miss_fg);
mlx5_destroy_flow_group(bridge->egress_mac_fg);
mlx5_destroy_flow_group(bridge->egress_vlan_fg);
mlx5_destroy_flow_table(bridge->egress_ft);
@@ -443,8 +554,10 @@ mlx5_esw_bridge_ingress_flow_with_esw_create(u16 vport_num, const unsigned char
mlx5_eswitch_get_vport_metadata_for_match(esw, vport_num));
if (vlan && vlan->pkt_reformat_push) {
- flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
+ MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
flow_act.pkt_reformat = vlan->pkt_reformat_push;
+ flow_act.modify_hdr = vlan->pkt_mod_hdr_push_mark;
} else if (vlan) {
MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
outer_headers.cvlan_tag);
@@ -564,6 +677,10 @@ mlx5_esw_bridge_egress_flow_create(u16 vport_num, u16 esw_owner_vhca_id, const u
if (!rule_spec)
return ERR_PTR(-ENOMEM);
+ if (MLX5_CAP_ESW_FLOWTABLE(bridge->br_offloads->esw->dev, flow_source) &&
+ vport_num == MLX5_VPORT_UPLINK)
+ rule_spec->flow_context.flow_source =
+ MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
dmac_v = MLX5_ADDR_OF(fte_match_param, rule_spec->match_value,
@@ -599,6 +716,41 @@ mlx5_esw_bridge_egress_flow_create(u16 vport_num, u16 esw_owner_vhca_id, const u
return handle;
}
+static struct mlx5_flow_handle *
+mlx5_esw_bridge_egress_miss_flow_create(struct mlx5_flow_table *egress_ft,
+ struct mlx5_flow_table *skip_ft,
+ struct mlx5_pkt_reformat *pkt_reformat)
+{
+ struct mlx5_flow_destination dest = {
+ .type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE,
+ .ft = skip_ft,
+ };
+ struct mlx5_flow_act flow_act = {
+ .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT,
+ .flags = FLOW_ACT_NO_APPEND,
+ .pkt_reformat = pkt_reformat,
+ };
+ struct mlx5_flow_spec *rule_spec;
+ struct mlx5_flow_handle *handle;
+
+ rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
+ if (!rule_spec)
+ return ERR_PTR(-ENOMEM);
+
+ rule_spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
+
+ MLX5_SET(fte_match_param, rule_spec->match_criteria,
+ misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
+ MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters_2.metadata_reg_c_1,
+ ESW_TUN_BRIDGE_INGRESS_PUSH_VLAN_MARK);
+
+ handle = mlx5_add_flow_rules(egress_ft, rule_spec, &flow_act, &dest, 1);
+
+ kvfree(rule_spec);
+ return handle;
+}
+
static struct mlx5_esw_bridge *mlx5_esw_bridge_create(int ifindex,
struct mlx5_esw_bridge_offloads *br_offloads)
{
@@ -736,14 +888,20 @@ mlx5_esw_bridge_fdb_entry_cleanup(struct mlx5_esw_bridge_fdb_entry *entry,
kvfree(entry);
}
+static void
+mlx5_esw_bridge_fdb_entry_notify_and_cleanup(struct mlx5_esw_bridge_fdb_entry *entry,
+ struct mlx5_esw_bridge *bridge)
+{
+ mlx5_esw_bridge_fdb_del_notify(entry);
+ mlx5_esw_bridge_fdb_entry_cleanup(entry, bridge);
+}
+
static void mlx5_esw_bridge_fdb_flush(struct mlx5_esw_bridge *bridge)
{
struct mlx5_esw_bridge_fdb_entry *entry, *tmp;
- list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list) {
- mlx5_esw_bridge_fdb_del_notify(entry);
- mlx5_esw_bridge_fdb_entry_cleanup(entry, bridge);
- }
+ list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list)
+ mlx5_esw_bridge_fdb_entry_notify_and_cleanup(entry, bridge);
}
static struct mlx5_esw_bridge_vlan *
@@ -798,24 +956,14 @@ mlx5_esw_bridge_vlan_push_cleanup(struct mlx5_esw_bridge_vlan *vlan, struct mlx5
static int
mlx5_esw_bridge_vlan_pop_create(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw)
{
- struct mlx5_pkt_reformat_params reformat_params = {};
struct mlx5_pkt_reformat *pkt_reformat;
- if (!BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat_remove)) ||
- MLX5_CAP_GEN_2(esw->dev, max_reformat_remove_size) < sizeof(struct vlan_hdr) ||
- MLX5_CAP_GEN_2(esw->dev, max_reformat_remove_offset) <
- offsetof(struct vlan_ethhdr, h_vlan_proto)) {
+ if (!mlx5_esw_bridge_pkt_reformat_vlan_pop_supported(esw)) {
esw_warn(esw->dev, "Packet reformat REMOVE_HEADER is not supported\n");
return -EOPNOTSUPP;
}
- reformat_params.type = MLX5_REFORMAT_TYPE_REMOVE_HDR;
- reformat_params.param_0 = MLX5_REFORMAT_CONTEXT_ANCHOR_MAC_START;
- reformat_params.param_1 = offsetof(struct vlan_ethhdr, h_vlan_proto);
- reformat_params.size = sizeof(struct vlan_hdr);
- pkt_reformat = mlx5_packet_reformat_alloc(esw->dev,
- &reformat_params,
- MLX5_FLOW_NAMESPACE_FDB);
+ pkt_reformat = mlx5_esw_bridge_pkt_reformat_vlan_pop_create(esw);
if (IS_ERR(pkt_reformat)) {
esw_warn(esw->dev, "Failed to alloc packet reformat REMOVE_HEADER (err=%ld)\n",
PTR_ERR(pkt_reformat));
@@ -833,6 +981,33 @@ mlx5_esw_bridge_vlan_pop_cleanup(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_
vlan->pkt_reformat_pop = NULL;
}
+static int
+mlx5_esw_bridge_vlan_push_mark_create(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw)
+{
+ u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
+ struct mlx5_modify_hdr *pkt_mod_hdr;
+
+ MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
+ MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_C_1);
+ MLX5_SET(set_action_in, action, offset, 8);
+ MLX5_SET(set_action_in, action, length, ESW_TUN_OPTS_BITS + ESW_TUN_ID_BITS);
+ MLX5_SET(set_action_in, action, data, ESW_TUN_BRIDGE_INGRESS_PUSH_VLAN);
+
+ pkt_mod_hdr = mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_FDB, 1, action);
+ if (IS_ERR(pkt_mod_hdr))
+ return PTR_ERR(pkt_mod_hdr);
+
+ vlan->pkt_mod_hdr_push_mark = pkt_mod_hdr;
+ return 0;
+}
+
+static void
+mlx5_esw_bridge_vlan_push_mark_cleanup(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_eswitch *esw)
+{
+ mlx5_modify_header_dealloc(esw->dev, vlan->pkt_mod_hdr_push_mark);
+ vlan->pkt_mod_hdr_push_mark = NULL;
+}
+
static struct mlx5_esw_bridge_vlan *
mlx5_esw_bridge_vlan_create(u16 vid, u16 flags, struct mlx5_esw_bridge_port *port,
struct mlx5_eswitch *esw)
@@ -852,6 +1027,10 @@ mlx5_esw_bridge_vlan_create(u16 vid, u16 flags, struct mlx5_esw_bridge_port *por
err = mlx5_esw_bridge_vlan_push_create(vlan, esw);
if (err)
goto err_vlan_push;
+
+ err = mlx5_esw_bridge_vlan_push_mark_create(vlan, esw);
+ if (err)
+ goto err_vlan_push_mark;
}
if (flags & BRIDGE_VLAN_INFO_UNTAGGED) {
err = mlx5_esw_bridge_vlan_pop_create(vlan, esw);
@@ -870,6 +1049,9 @@ err_xa_insert:
if (vlan->pkt_reformat_pop)
mlx5_esw_bridge_vlan_pop_cleanup(vlan, esw);
err_vlan_pop:
+ if (vlan->pkt_mod_hdr_push_mark)
+ mlx5_esw_bridge_vlan_push_mark_cleanup(vlan, esw);
+err_vlan_push_mark:
if (vlan->pkt_reformat_push)
mlx5_esw_bridge_vlan_push_cleanup(vlan, esw);
err_vlan_push:
@@ -886,17 +1068,18 @@ static void mlx5_esw_bridge_vlan_erase(struct mlx5_esw_bridge_port *port,
static void mlx5_esw_bridge_vlan_flush(struct mlx5_esw_bridge_vlan *vlan,
struct mlx5_esw_bridge *bridge)
{
+ struct mlx5_eswitch *esw = bridge->br_offloads->esw;
struct mlx5_esw_bridge_fdb_entry *entry, *tmp;
- list_for_each_entry_safe(entry, tmp, &vlan->fdb_list, vlan_list) {
- mlx5_esw_bridge_fdb_del_notify(entry);
- mlx5_esw_bridge_fdb_entry_cleanup(entry, bridge);
- }
+ list_for_each_entry_safe(entry, tmp, &vlan->fdb_list, vlan_list)
+ mlx5_esw_bridge_fdb_entry_notify_and_cleanup(entry, bridge);
if (vlan->pkt_reformat_pop)
- mlx5_esw_bridge_vlan_pop_cleanup(vlan, bridge->br_offloads->esw);
+ mlx5_esw_bridge_vlan_pop_cleanup(vlan, esw);
+ if (vlan->pkt_mod_hdr_push_mark)
+ mlx5_esw_bridge_vlan_push_mark_cleanup(vlan, esw);
if (vlan->pkt_reformat_push)
- mlx5_esw_bridge_vlan_push_cleanup(vlan, bridge->br_offloads->esw);
+ mlx5_esw_bridge_vlan_push_cleanup(vlan, esw);
}
static void mlx5_esw_bridge_vlan_cleanup(struct mlx5_esw_bridge_port *port,
@@ -949,6 +1132,17 @@ mlx5_esw_bridge_port_vlan_lookup(u16 vid, u16 vport_num, u16 esw_owner_vhca_id,
}
static struct mlx5_esw_bridge_fdb_entry *
+mlx5_esw_bridge_fdb_lookup(struct mlx5_esw_bridge *bridge,
+ const unsigned char *addr, u16 vid)
+{
+ struct mlx5_esw_bridge_fdb_key key = {};
+
+ ether_addr_copy(key.addr, addr);
+ key.vid = vid;
+ return rhashtable_lookup_fast(&bridge->fdb_ht, &key, fdb_ht_params);
+}
+
+static struct mlx5_esw_bridge_fdb_entry *
mlx5_esw_bridge_fdb_entry_init(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
const unsigned char *addr, u16 vid, bool added_by_user, bool peer,
struct mlx5_eswitch *esw, struct mlx5_esw_bridge *bridge)
@@ -966,6 +1160,10 @@ mlx5_esw_bridge_fdb_entry_init(struct net_device *dev, u16 vport_num, u16 esw_ow
return ERR_CAST(vlan);
}
+ entry = mlx5_esw_bridge_fdb_lookup(bridge, addr, vid);
+ if (entry)
+ mlx5_esw_bridge_fdb_entry_notify_and_cleanup(entry, bridge);
+
entry = kvzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return ERR_PTR(-ENOMEM);
@@ -1265,7 +1463,6 @@ void mlx5_esw_bridge_fdb_update_used(struct net_device *dev, u16 vport_num, u16
struct switchdev_notifier_fdb_info *fdb_info)
{
struct mlx5_esw_bridge_fdb_entry *entry;
- struct mlx5_esw_bridge_fdb_key key;
struct mlx5_esw_bridge_port *port;
struct mlx5_esw_bridge *bridge;
@@ -1274,13 +1471,11 @@ void mlx5_esw_bridge_fdb_update_used(struct net_device *dev, u16 vport_num, u16
return;
bridge = port->bridge;
- ether_addr_copy(key.addr, fdb_info->addr);
- key.vid = fdb_info->vid;
- entry = rhashtable_lookup_fast(&bridge->fdb_ht, &key, fdb_ht_params);
+ entry = mlx5_esw_bridge_fdb_lookup(bridge, fdb_info->addr, fdb_info->vid);
if (!entry) {
esw_debug(br_offloads->esw->dev,
"FDB entry with specified key not found (MAC=%pM,vid=%u,vport=%u)\n",
- key.addr, key.vid, vport_num);
+ fdb_info->addr, fdb_info->vid, vport_num);
return;
}
@@ -1322,7 +1517,6 @@ void mlx5_esw_bridge_fdb_remove(struct net_device *dev, u16 vport_num, u16 esw_o
{
struct mlx5_eswitch *esw = br_offloads->esw;
struct mlx5_esw_bridge_fdb_entry *entry;
- struct mlx5_esw_bridge_fdb_key key;
struct mlx5_esw_bridge_port *port;
struct mlx5_esw_bridge *bridge;
@@ -1331,18 +1525,15 @@ void mlx5_esw_bridge_fdb_remove(struct net_device *dev, u16 vport_num, u16 esw_o
return;
bridge = port->bridge;
- ether_addr_copy(key.addr, fdb_info->addr);
- key.vid = fdb_info->vid;
- entry = rhashtable_lookup_fast(&bridge->fdb_ht, &key, fdb_ht_params);
+ entry = mlx5_esw_bridge_fdb_lookup(bridge, fdb_info->addr, fdb_info->vid);
if (!entry) {
esw_warn(esw->dev,
"FDB entry with specified key not found (MAC=%pM,vid=%u,vport=%u)\n",
- key.addr, key.vid, vport_num);
+ fdb_info->addr, fdb_info->vid, vport_num);
return;
}
- mlx5_esw_bridge_fdb_del_notify(entry);
- mlx5_esw_bridge_fdb_entry_cleanup(entry, bridge);
+ mlx5_esw_bridge_fdb_entry_notify_and_cleanup(entry, bridge);
}
void mlx5_esw_bridge_update(struct mlx5_esw_bridge_offloads *br_offloads)
@@ -1358,13 +1549,11 @@ void mlx5_esw_bridge_update(struct mlx5_esw_bridge_offloads *br_offloads)
if (entry->flags & MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER)
continue;
- if (time_after(lastuse, entry->lastuse)) {
+ if (time_after(lastuse, entry->lastuse))
mlx5_esw_bridge_fdb_entry_refresh(entry);
- } else if (!(entry->flags & MLX5_ESW_BRIDGE_FLAG_PEER) &&
- time_is_before_jiffies(entry->lastuse + bridge->ageing_time)) {
- mlx5_esw_bridge_fdb_del_notify(entry);
- mlx5_esw_bridge_fdb_entry_cleanup(entry, bridge);
- }
+ else if (!(entry->flags & MLX5_ESW_BRIDGE_FLAG_PEER) &&
+ time_is_before_jiffies(entry->lastuse + bridge->ageing_time))
+ mlx5_esw_bridge_fdb_entry_notify_and_cleanup(entry, bridge);
}
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h
index 52964a82d6a6..878311fe950a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h
@@ -49,6 +49,7 @@ struct mlx5_esw_bridge_vlan {
struct list_head fdb_list;
struct mlx5_pkt_reformat *pkt_reformat_push;
struct mlx5_pkt_reformat *pkt_reformat_pop;
+ struct mlx5_modify_hdr *pkt_mod_hdr_push_mark;
};
struct mlx5_esw_bridge_port {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
index 20af557ae30c..7f9b96d9537e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
@@ -36,7 +36,7 @@ static struct devlink_port *mlx5_esw_dl_port_alloc(struct mlx5_eswitch *esw, u16
return NULL;
mlx5_esw_get_port_parent_id(dev, &ppid);
- pfnum = PCI_FUNC(dev->pdev->devfn);
+ pfnum = mlx5_get_dev_index(dev);
external = mlx5_core_is_ecpf_esw_manager(dev);
if (external)
controller_num = dev->priv.eswitch->offloads.host_number + 1;
@@ -149,7 +149,7 @@ int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_p
if (IS_ERR(vport))
return PTR_ERR(vport);
- pfnum = PCI_FUNC(dev->pdev->devfn);
+ pfnum = mlx5_get_dev_index(dev);
mlx5_esw_get_port_parent_id(dev, &ppid);
memcpy(dl_port->attrs.switch_id.id, &ppid.id[0], ppid.id_len);
dl_port->attrs.switch_id.id_len = ppid.id_len;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
index 985e305179d1..c6cc67cb4f6a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
@@ -473,10 +473,9 @@ esw_qos_create_rate_group(struct mlx5_eswitch *esw, struct netlink_ext_ack *exta
err_min_rate:
list_del(&group->list);
- err = mlx5_destroy_scheduling_element_cmd(esw->dev,
- SCHEDULING_HIERARCHY_E_SWITCH,
- group->tsar_ix);
- if (err)
+ if (mlx5_destroy_scheduling_element_cmd(esw->dev,
+ SCHEDULING_HIERARCHY_E_SWITCH,
+ group->tsar_ix))
NL_SET_ERR_MSG_MOD(extack, "E-Switch destroy TSAR for group failed");
err_sched_elem:
kfree(group);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 2c7444101bb9..42f8ee2e5d9f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -51,6 +51,7 @@
enum mlx5_mapped_obj_type {
MLX5_MAPPED_OBJ_CHAIN,
MLX5_MAPPED_OBJ_SAMPLE,
+ MLX5_MAPPED_OBJ_INT_PORT_METADATA,
};
struct mlx5_mapped_obj {
@@ -63,6 +64,7 @@ struct mlx5_mapped_obj {
u32 trunc_size;
u32 tunnel_id;
} sample;
+ u32 int_port_metadata;
};
};
@@ -88,6 +90,7 @@ enum {
MAPPING_TYPE_TUNNEL_ENC_OPTS,
MAPPING_TYPE_LABELS,
MAPPING_TYPE_ZONE,
+ MAPPING_TYPE_INT_PORT,
};
struct vport_ingress {
@@ -336,6 +339,9 @@ void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata);
int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32 rate_mbps);
+bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw);
+int mlx5_esw_offloads_vport_metadata_set(struct mlx5_eswitch *esw, bool enable);
+
/* E-Switch API */
int mlx5_eswitch_init(struct mlx5_core_dev *dev);
void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw);
@@ -433,7 +439,7 @@ enum mlx5_flow_match_level {
};
/* current maximum for flow based vport multicasting */
-#define MLX5_MAX_FLOW_FWD_VPORTS 2
+#define MLX5_MAX_FLOW_FWD_VPORTS 32
enum {
MLX5_ESW_DEST_ENCAP = BIT(0),
@@ -447,12 +453,22 @@ enum {
MLX5_ESW_ATTR_FLAG_NO_IN_PORT = BIT(2),
MLX5_ESW_ATTR_FLAG_SRC_REWRITE = BIT(3),
MLX5_ESW_ATTR_FLAG_SAMPLE = BIT(4),
+ MLX5_ESW_ATTR_FLAG_ACCEPT = BIT(5),
};
+/* Returns true if any of the flags that require skipping further TC/NF processing are set. */
+static inline bool
+mlx5_esw_attr_flags_skip(u32 attr_flags)
+{
+ return attr_flags & (MLX5_ESW_ATTR_FLAG_SLOW_PATH | MLX5_ESW_ATTR_FLAG_ACCEPT);
+}
+
struct mlx5_esw_flow_attr {
struct mlx5_eswitch_rep *in_rep;
struct mlx5_core_dev *in_mdev;
struct mlx5_core_dev *counter_dev;
+ struct mlx5e_tc_int_port *dest_int_port;
+ struct mlx5e_tc_int_port *int_port;
int split_count;
int out_count;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 0d461e38add3..f4eaa5893886 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -86,12 +86,18 @@ mlx5_eswitch_set_rule_flow_source(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
struct mlx5_esw_flow_attr *attr)
{
- if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) &&
- attr && attr->in_rep)
- spec->flow_context.flow_source =
- attr->in_rep->vport == MLX5_VPORT_UPLINK ?
- MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK :
- MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
+ if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) || !attr || !attr->in_rep)
+ return;
+
+ if (attr->int_port) {
+ spec->flow_context.flow_source = mlx5e_tc_int_port_get_flow_source(attr->int_port);
+
+ return;
+ }
+
+ spec->flow_context.flow_source = (attr->in_rep->vport == MLX5_VPORT_UPLINK) ?
+ MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK :
+ MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
}
/* Actually only the upper 16 bits of reg c0 need to be cleared, but the lower 16 bits
@@ -121,6 +127,8 @@ mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
struct mlx5_eswitch *src_esw,
u16 vport)
{
+ struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
+ u32 metadata;
void *misc2;
void *misc;
@@ -130,10 +138,16 @@ mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
if (mlx5_esw_indir_table_decap_vport(attr))
vport = mlx5_esw_indir_table_decap_vport(attr);
+
+ if (esw_attr->int_port)
+ metadata =
+ mlx5e_tc_int_port_get_metadata_for_match(esw_attr->int_port);
+ else
+ metadata =
+ mlx5_eswitch_get_vport_metadata_for_match(src_esw, vport);
+
misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
- MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0,
- mlx5_eswitch_get_vport_metadata_for_match(src_esw,
- vport));
+ MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, metadata);
misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0,
@@ -290,8 +304,11 @@ esw_setup_chain_src_port_rewrite(struct mlx5_flow_destination *dest,
err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain, 1, 0, *i);
if (err)
goto err_setup_chain;
- flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
- flow_act->pkt_reformat = esw_attr->dests[j].pkt_reformat;
+
+ if (esw_attr->dests[j].pkt_reformat) {
+ flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
+ flow_act->pkt_reformat = esw_attr->dests[j].pkt_reformat;
+ }
}
return 0;
@@ -315,7 +332,8 @@ esw_is_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr)
int i;
for (i = esw_attr->split_count; i < esw_attr->out_count; i++)
- if (mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport,
+ if (esw_attr->dests[i].rep &&
+ mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport,
esw_attr->dests[i].mdev))
return true;
return false;
@@ -440,7 +458,7 @@ esw_setup_dests(struct mlx5_flow_destination *dest,
} else if (attr->dest_ft) {
esw_setup_ft_dest(dest, flow_act, esw, attr, spec, *i);
(*i)++;
- } else if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) {
+ } else if (mlx5_esw_attr_flags_skip(attr->flags)) {
esw_setup_slow_path_dest(dest, flow_act, chains, *i);
(*i)++;
} else if (attr->dest_chain) {
@@ -467,7 +485,7 @@ esw_cleanup_dests(struct mlx5_eswitch *esw,
if (attr->dest_ft) {
esw_cleanup_decap_indir(esw, attr);
- } else if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)) {
+ } else if (!mlx5_esw_attr_flags_skip(attr->flags)) {
if (attr->dest_chain)
esw_cleanup_chain_dest(chains, attr->dest_chain, 1, 0);
else if (esw_is_indir_table(esw, attr))
@@ -482,12 +500,12 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
struct mlx5_flow_attr *attr)
{
- struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
struct mlx5_fs_chains *chains = esw_chains(esw);
bool split = !!(esw_attr->split_count);
struct mlx5_vport_tbl_attr fwd_attr;
+ struct mlx5_flow_destination *dest;
struct mlx5_flow_handle *rule;
struct mlx5_flow_table *fdb;
int i = 0;
@@ -495,6 +513,10 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
if (esw->mode != MLX5_ESWITCH_OFFLOADS)
return ERR_PTR(-EOPNOTSUPP);
+ dest = kcalloc(MLX5_MAX_FLOW_FWD_VPORTS + 1, sizeof(*dest), GFP_KERNEL);
+ if (!dest)
+ return ERR_PTR(-ENOMEM);
+
flow_act.action = attr->action;
/* if per flow vlan pop/push is emulated, don't set that into the firmware */
if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
@@ -574,6 +596,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
else
atomic64_inc(&esw->offloads.num_flows);
+ kfree(dest);
return rule;
err_add_rule:
@@ -584,6 +607,7 @@ err_add_rule:
err_esw_get:
esw_cleanup_dests(esw, attr);
err_create_goto_table:
+ kfree(dest);
return rule;
}
@@ -592,16 +616,20 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
struct mlx5_flow_attr *attr)
{
- struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {};
struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
struct mlx5_fs_chains *chains = esw_chains(esw);
struct mlx5_vport_tbl_attr fwd_attr;
+ struct mlx5_flow_destination *dest;
struct mlx5_flow_table *fast_fdb;
struct mlx5_flow_table *fwd_fdb;
struct mlx5_flow_handle *rule;
int i, err = 0;
+ dest = kcalloc(MLX5_MAX_FLOW_FWD_VPORTS + 1, sizeof(*dest), GFP_KERNEL);
+ if (!dest)
+ return ERR_PTR(-ENOMEM);
+
fast_fdb = mlx5_chains_get_table(chains, attr->chain, attr->prio, 0);
if (IS_ERR(fast_fdb)) {
rule = ERR_CAST(fast_fdb);
@@ -654,6 +682,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
atomic64_inc(&esw->offloads.num_flows);
+ kfree(dest);
return rule;
err_chain_src_rewrite:
esw_put_dest_tables_loop(esw, attr, 0, i);
@@ -661,6 +690,7 @@ err_chain_src_rewrite:
err_get_fwd:
mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
err_get_fast:
+ kfree(dest);
return rule;
}
@@ -678,7 +708,7 @@ __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
mlx5_del_flow_rules(rule);
- if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)) {
+ if (!mlx5_esw_attr_flags_skip(attr->flags)) {
/* unref the term table */
for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
if (esw_attr->dests[i].termtbl)
@@ -1009,7 +1039,7 @@ mlx5_eswitch_add_send_to_vport_meta_rules(struct mlx5_eswitch *esw)
u16 vport_num;
num_vfs = esw->esw_funcs.num_vfs;
- flows = kvzalloc(num_vfs * sizeof(*flows), GFP_KERNEL);
+ flows = kvcalloc(num_vfs, sizeof(*flows), GFP_KERNEL);
if (!flows)
return -ENOMEM;
@@ -1188,7 +1218,7 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
peer_miss_rules_setup(esw, peer_dev, spec, &dest);
- flows = kvzalloc(nvports * sizeof(*flows), GFP_KERNEL);
+ flows = kvcalloc(nvports, sizeof(*flows), GFP_KERNEL);
if (!flows) {
err = -ENOMEM;
goto alloc_flows_err;
@@ -1845,6 +1875,17 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
atomic64_set(&esw->user_count, 0);
}
+static int esw_get_offloads_ft_size(struct mlx5_eswitch *esw)
+{
+ int nvports;
+
+ nvports = esw->total_vports + MLX5_ESW_MISS_FLOWS;
+ if (mlx5e_tc_int_port_supported(esw))
+ nvports += MLX5E_TC_MAX_INT_PORT_NUM;
+
+ return nvports;
+}
+
static int esw_create_offloads_table(struct mlx5_eswitch *esw)
{
struct mlx5_flow_table_attr ft_attr = {};
@@ -1859,7 +1900,7 @@ static int esw_create_offloads_table(struct mlx5_eswitch *esw)
return -EOPNOTSUPP;
}
- ft_attr.max_fte = esw->total_vports + MLX5_ESW_MISS_FLOWS;
+ ft_attr.max_fte = esw_get_offloads_ft_size(esw);
ft_attr.prio = 1;
ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
@@ -1888,7 +1929,7 @@ static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
int nvports;
int err = 0;
- nvports = esw->total_vports + MLX5_ESW_MISS_FLOWS;
+ nvports = esw_get_offloads_ft_size(esw);
flow_group_in = kvzalloc(inlen, GFP_KERNEL);
if (!flow_group_in)
return -ENOMEM;
@@ -2793,12 +2834,13 @@ bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw)
{
u32 vport_end_ida = (1 << ESW_VPORT_BITS) - 1;
- u32 max_pf_num = (1 << ESW_PFNUM_BITS) - 1;
+ /* Reserve 0xf for internal port offload */
+ u32 max_pf_num = (1 << ESW_PFNUM_BITS) - 2;
u32 pf_num;
int id;
/* Only 4 bits of pf_num */
- pf_num = PCI_FUNC(esw->dev->pdev->devfn);
+ pf_num = mlx5_get_dev_index(esw->dev);
if (pf_num > max_pf_num)
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
index b45954905845..182306bbefaa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c
@@ -219,8 +219,8 @@ mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw,
if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, termination_table) ||
!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level) ||
- attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH ||
- !mlx5_eswitch_offload_is_uplink_port(esw, spec))
+ mlx5_esw_attr_flags_skip(attr->flags) ||
+ (!mlx5_eswitch_offload_is_uplink_port(esw, spec) && !esw_attr->int_port))
return false;
/* push vlan on RX */
@@ -229,7 +229,8 @@ mlx5_eswitch_termtbl_required(struct mlx5_eswitch *esw,
/* hairpin */
for (i = esw_attr->split_count; i < esw_attr->out_count; i++)
- if (esw_attr->dests[i].rep->vport == MLX5_VPORT_UPLINK)
+ if (!esw_attr->dest_int_port && esw_attr->dests[i].rep &&
+ esw_attr->dests[i].rep->vport == MLX5_VPORT_UPLINK)
return true;
return false;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
index 306279b7f9e7..12abe991583a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c
@@ -115,7 +115,7 @@ static int mlx5_fpga_conn_post_recv(struct mlx5_fpga_conn *conn,
ix = conn->qp.rq.pc & (conn->qp.rq.size - 1);
data = mlx5_wq_cyc_get_wqe(&conn->qp.wq.rq, ix);
data->byte_count = cpu_to_be32(buf->sg[0].size);
- data->lkey = cpu_to_be32(conn->fdev->conn_res.mkey.key);
+ data->lkey = cpu_to_be32(conn->fdev->conn_res.mkey);
data->addr = cpu_to_be64(buf->sg[0].dma_addr);
conn->qp.rq.pc++;
@@ -155,7 +155,7 @@ static void mlx5_fpga_conn_post_send(struct mlx5_fpga_conn *conn,
if (!buf->sg[sgi].data)
break;
data->byte_count = cpu_to_be32(buf->sg[sgi].size);
- data->lkey = cpu_to_be32(conn->fdev->conn_res.mkey.key);
+ data->lkey = cpu_to_be32(conn->fdev->conn_res.mkey);
data->addr = cpu_to_be64(buf->sg[sgi].dma_addr);
data++;
size++;
@@ -221,7 +221,7 @@ static int mlx5_fpga_conn_post_recv_buf(struct mlx5_fpga_conn *conn)
}
static int mlx5_fpga_conn_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
- struct mlx5_core_mkey *mkey)
+ u32 *mkey)
{
int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
void *mkc;
@@ -978,7 +978,7 @@ int mlx5_fpga_conn_device_init(struct mlx5_fpga_device *fdev)
mlx5_fpga_err(fdev, "create mkey failed, %d\n", err);
goto err_dealloc_pd;
}
- mlx5_fpga_dbg(fdev, "Created mkey 0x%x\n", fdev->conn_res.mkey.key);
+ mlx5_fpga_dbg(fdev, "Created mkey 0x%x\n", fdev->conn_res.mkey);
return 0;
@@ -994,7 +994,7 @@ out:
void mlx5_fpga_conn_device_cleanup(struct mlx5_fpga_device *fdev)
{
- mlx5_core_destroy_mkey(fdev->mdev, &fdev->conn_res.mkey);
+ mlx5_core_destroy_mkey(fdev->mdev, fdev->conn_res.mkey);
mlx5_core_dealloc_pd(fdev->mdev, fdev->conn_res.pdn);
mlx5_put_uars_page(fdev->mdev, fdev->conn_res.uar);
mlx5_nic_vport_disable_roce(fdev->mdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h
index 52c9dee91ea4..2a984e82ae16 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h
@@ -54,7 +54,7 @@ struct mlx5_fpga_device {
/* QP Connection resources */
struct {
u32 pdn;
- struct mlx5_core_mkey mkey;
+ u32 mkey;
struct mlx5_uars_page *uar;
} conn_res;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index 7db8df64a60e..750b21124a1a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -185,6 +185,20 @@ static int mlx5_cmd_set_slave_root_fdb(struct mlx5_core_dev *master,
return mlx5_cmd_exec(slave, in, sizeof(in), out, sizeof(out));
}
+static int
+mlx5_cmd_stub_destroy_match_definer(struct mlx5_flow_root_namespace *ns,
+ int definer_id)
+{
+ return 0;
+}
+
+static int
+mlx5_cmd_stub_create_match_definer(struct mlx5_flow_root_namespace *ns,
+ u16 format_id, u32 *match_mask)
+{
+ return 0;
+}
+
static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft, u32 underlay_qpn,
bool disconnect)
@@ -563,8 +577,8 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
id = dst->dest_attr.ft->id;
break;
+ case MLX5_FLOW_DESTINATION_TYPE_UPLINK:
case MLX5_FLOW_DESTINATION_TYPE_VPORT:
- id = dst->dest_attr.vport.num;
MLX5_SET(dest_format_struct, in_dests,
destination_eswitch_owner_vhca_id_valid,
!!(dst->dest_attr.vport.flags &
@@ -572,6 +586,12 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
MLX5_SET(dest_format_struct, in_dests,
destination_eswitch_owner_vhca_id,
dst->dest_attr.vport.vhca_id);
+ if (type == MLX5_FLOW_DESTINATION_TYPE_UPLINK) {
+ /* destination_id is reserved */
+ id = 0;
+ break;
+ }
+ id = dst->dest_attr.vport.num;
if (extended_dest &&
dst->dest_attr.vport.pkt_reformat) {
MLX5_SET(dest_format_struct, in_dests,
@@ -909,6 +929,45 @@ static void mlx5_cmd_modify_header_dealloc(struct mlx5_flow_root_namespace *ns,
mlx5_cmd_exec_in(dev, dealloc_modify_header_context, in);
}
+static int mlx5_cmd_destroy_match_definer(struct mlx5_flow_root_namespace *ns,
+ int definer_id)
+{
+ u32 in[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)] = {};
+ u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
+
+ MLX5_SET(general_obj_in_cmd_hdr, in, opcode,
+ MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_type,
+ MLX5_OBJ_TYPE_MATCH_DEFINER);
+ MLX5_SET(general_obj_in_cmd_hdr, in, obj_id, definer_id);
+
+ return mlx5_cmd_exec(ns->dev, in, sizeof(in), out, sizeof(out));
+}
+
+static int mlx5_cmd_create_match_definer(struct mlx5_flow_root_namespace *ns,
+ u16 format_id, u32 *match_mask)
+{
+ u32 out[MLX5_ST_SZ_DW(create_match_definer_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(create_match_definer_in)] = {};
+ struct mlx5_core_dev *dev = ns->dev;
+ void *ptr;
+ int err;
+
+ MLX5_SET(create_match_definer_in, in, general_obj_in_cmd_hdr.opcode,
+ MLX5_CMD_OP_CREATE_GENERAL_OBJECT);
+ MLX5_SET(create_match_definer_in, in, general_obj_in_cmd_hdr.obj_type,
+ MLX5_OBJ_TYPE_MATCH_DEFINER);
+
+ ptr = MLX5_ADDR_OF(create_match_definer_in, in, obj_context);
+ MLX5_SET(match_definer, ptr, format_id, format_id);
+
+ ptr = MLX5_ADDR_OF(match_definer, ptr, match_mask);
+ memcpy(ptr, match_mask, MLX5_FLD_SZ_BYTES(match_definer, match_mask));
+
+ err = mlx5_cmd_exec_inout(dev, create_match_definer, in, out);
+ return err ? err : MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
+}
+
static const struct mlx5_flow_cmds mlx5_flow_cmds = {
.create_flow_table = mlx5_cmd_create_flow_table,
.destroy_flow_table = mlx5_cmd_destroy_flow_table,
@@ -923,6 +982,8 @@ static const struct mlx5_flow_cmds mlx5_flow_cmds = {
.packet_reformat_dealloc = mlx5_cmd_packet_reformat_dealloc,
.modify_header_alloc = mlx5_cmd_modify_header_alloc,
.modify_header_dealloc = mlx5_cmd_modify_header_dealloc,
+ .create_match_definer = mlx5_cmd_create_match_definer,
+ .destroy_match_definer = mlx5_cmd_destroy_match_definer,
.set_peer = mlx5_cmd_stub_set_peer,
.create_ns = mlx5_cmd_stub_create_ns,
.destroy_ns = mlx5_cmd_stub_destroy_ns,
@@ -942,6 +1003,8 @@ static const struct mlx5_flow_cmds mlx5_flow_cmd_stubs = {
.packet_reformat_dealloc = mlx5_cmd_stub_packet_reformat_dealloc,
.modify_header_alloc = mlx5_cmd_stub_modify_header_alloc,
.modify_header_dealloc = mlx5_cmd_stub_modify_header_dealloc,
+ .create_match_definer = mlx5_cmd_stub_create_match_definer,
+ .destroy_match_definer = mlx5_cmd_stub_destroy_match_definer,
.set_peer = mlx5_cmd_stub_set_peer,
.create_ns = mlx5_cmd_stub_create_ns,
.destroy_ns = mlx5_cmd_stub_destroy_ns,
@@ -969,6 +1032,7 @@ const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type typ
case FS_FT_NIC_TX:
case FS_FT_RDMA_RX:
case FS_FT_RDMA_TX:
+ case FS_FT_PORT_SEL:
return mlx5_fs_cmd_get_fw_cmds();
default:
return mlx5_fs_cmd_get_stub_cmds();
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
index 5ecd33cdc087..220ec632d35a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
@@ -97,6 +97,10 @@ struct mlx5_flow_cmds {
int (*create_ns)(struct mlx5_flow_root_namespace *ns);
int (*destroy_ns)(struct mlx5_flow_root_namespace *ns);
+ int (*create_match_definer)(struct mlx5_flow_root_namespace *ns,
+ u16 format_id, u32 *match_mask);
+ int (*destroy_match_definer)(struct mlx5_flow_root_namespace *ns,
+ int definer_id);
};
int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index fe501ba88bea..386ab9a2d490 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -99,6 +99,9 @@
#define LEFTOVERS_NUM_LEVELS 1
#define LEFTOVERS_NUM_PRIOS 1
+#define RDMA_RX_COUNTERS_PRIO_NUM_LEVELS 1
+#define RDMA_TX_COUNTERS_PRIO_NUM_LEVELS 1
+
#define BY_PASS_PRIO_NUM_LEVELS 1
#define BY_PASS_MIN_LEVEL (ETHTOOL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\
LEFTOVERS_NUM_PRIOS)
@@ -206,34 +209,63 @@ static struct init_tree_node egress_root_fs = {
}
};
-#define RDMA_RX_BYPASS_PRIO 0
-#define RDMA_RX_KERNEL_PRIO 1
+enum {
+ RDMA_RX_COUNTERS_PRIO,
+ RDMA_RX_BYPASS_PRIO,
+ RDMA_RX_KERNEL_PRIO,
+};
+
+#define RDMA_RX_BYPASS_MIN_LEVEL MLX5_BY_PASS_NUM_REGULAR_PRIOS
+#define RDMA_RX_KERNEL_MIN_LEVEL (RDMA_RX_BYPASS_MIN_LEVEL + 1)
+#define RDMA_RX_COUNTERS_MIN_LEVEL (RDMA_RX_KERNEL_MIN_LEVEL + 2)
+
static struct init_tree_node rdma_rx_root_fs = {
.type = FS_TYPE_NAMESPACE,
- .ar_size = 2,
+ .ar_size = 3,
.children = (struct init_tree_node[]) {
+ [RDMA_RX_COUNTERS_PRIO] =
+ ADD_PRIO(0, RDMA_RX_COUNTERS_MIN_LEVEL, 0,
+ FS_CHAINING_CAPS,
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ ADD_MULTIPLE_PRIO(MLX5_RDMA_RX_NUM_COUNTERS_PRIOS,
+ RDMA_RX_COUNTERS_PRIO_NUM_LEVELS))),
[RDMA_RX_BYPASS_PRIO] =
- ADD_PRIO(0, MLX5_BY_PASS_NUM_REGULAR_PRIOS, 0,
+ ADD_PRIO(0, RDMA_RX_BYPASS_MIN_LEVEL, 0,
FS_CHAINING_CAPS,
ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_REGULAR_PRIOS,
BY_PASS_PRIO_NUM_LEVELS))),
[RDMA_RX_KERNEL_PRIO] =
- ADD_PRIO(0, MLX5_BY_PASS_NUM_REGULAR_PRIOS + 1, 0,
+ ADD_PRIO(0, RDMA_RX_KERNEL_MIN_LEVEL, 0,
FS_CHAINING_CAPS,
ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_SWITCH_DOMAIN,
ADD_MULTIPLE_PRIO(1, 1))),
}
};
+enum {
+ RDMA_TX_COUNTERS_PRIO,
+ RDMA_TX_BYPASS_PRIO,
+};
+
+#define RDMA_TX_BYPASS_MIN_LEVEL MLX5_BY_PASS_NUM_PRIOS
+#define RDMA_TX_COUNTERS_MIN_LEVEL (RDMA_TX_BYPASS_MIN_LEVEL + 1)
+
static struct init_tree_node rdma_tx_root_fs = {
.type = FS_TYPE_NAMESPACE,
- .ar_size = 1,
+ .ar_size = 2,
.children = (struct init_tree_node[]) {
- ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS, 0,
+ [RDMA_TX_COUNTERS_PRIO] =
+ ADD_PRIO(0, RDMA_TX_COUNTERS_MIN_LEVEL, 0,
+ FS_CHAINING_CAPS,
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ ADD_MULTIPLE_PRIO(MLX5_RDMA_TX_NUM_COUNTERS_PRIOS,
+ RDMA_TX_COUNTERS_PRIO_NUM_LEVELS))),
+ [RDMA_TX_BYPASS_PRIO] =
+ ADD_PRIO(0, RDMA_TX_BYPASS_MIN_LEVEL, 0,
FS_CHAINING_CAPS_RDMA_TX,
ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
- ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
+ ADD_MULTIPLE_PRIO(RDMA_TX_BYPASS_MIN_LEVEL,
BY_PASS_PRIO_NUM_LEVELS))),
}
};
@@ -2191,6 +2223,10 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
if (steering->fdb_root_ns)
return &steering->fdb_root_ns->ns;
return NULL;
+ case MLX5_FLOW_NAMESPACE_PORT_SEL:
+ if (steering->port_sel_root_ns)
+ return &steering->port_sel_root_ns->ns;
+ return NULL;
case MLX5_FLOW_NAMESPACE_SNIFFER_RX:
if (steering->sniffer_rx_root_ns)
return &steering->sniffer_rx_root_ns->ns;
@@ -2215,6 +2251,12 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
prio = RDMA_RX_KERNEL_PRIO;
} else if (type == MLX5_FLOW_NAMESPACE_RDMA_TX) {
root_ns = steering->rdma_tx_root_ns;
+ } else if (type == MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS) {
+ root_ns = steering->rdma_rx_root_ns;
+ prio = RDMA_RX_COUNTERS_PRIO;
+ } else if (type == MLX5_FLOW_NAMESPACE_RDMA_TX_COUNTERS) {
+ root_ns = steering->rdma_tx_root_ns;
+ prio = RDMA_TX_COUNTERS_PRIO;
} else { /* Must be NIC RX */
root_ns = steering->root_ns;
prio = type;
@@ -2596,6 +2638,7 @@ void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
steering->fdb_root_ns = NULL;
kfree(steering->fdb_sub_ns);
steering->fdb_sub_ns = NULL;
+ cleanup_root_ns(steering->port_sel_root_ns);
cleanup_root_ns(steering->sniffer_rx_root_ns);
cleanup_root_ns(steering->sniffer_tx_root_ns);
cleanup_root_ns(steering->rdma_rx_root_ns);
@@ -2634,6 +2677,21 @@ static int init_sniffer_rx_root_ns(struct mlx5_flow_steering *steering)
return PTR_ERR_OR_ZERO(prio);
}
+#define PORT_SEL_NUM_LEVELS 3
+static int init_port_sel_root_ns(struct mlx5_flow_steering *steering)
+{
+ struct fs_prio *prio;
+
+ steering->port_sel_root_ns = create_root_ns(steering, FS_FT_PORT_SEL);
+ if (!steering->port_sel_root_ns)
+ return -ENOMEM;
+
+ /* Create single prio */
+ prio = fs_create_prio(&steering->port_sel_root_ns->ns, 0,
+ PORT_SEL_NUM_LEVELS);
+ return PTR_ERR_OR_ZERO(prio);
+}
+
static int init_rdma_rx_root_ns(struct mlx5_flow_steering *steering)
{
int err;
@@ -3020,6 +3078,12 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
goto err;
}
+ if (MLX5_CAP_FLOWTABLE_PORT_SELECTION(dev, ft_support)) {
+ err = init_port_sel_root_ns(steering);
+ if (err)
+ goto err;
+ }
+
if (MLX5_CAP_FLOWTABLE_RDMA_RX(dev, ft_support) &&
MLX5_CAP_FLOWTABLE_RDMA_RX(dev, table_miss_action_domain)) {
err = init_rdma_rx_root_ns(steering);
@@ -3224,6 +3288,52 @@ void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev,
}
EXPORT_SYMBOL(mlx5_packet_reformat_dealloc);
+int mlx5_get_match_definer_id(struct mlx5_flow_definer *definer)
+{
+ return definer->id;
+}
+
+struct mlx5_flow_definer *
+mlx5_create_match_definer(struct mlx5_core_dev *dev,
+ enum mlx5_flow_namespace_type ns_type, u16 format_id,
+ u32 *match_mask)
+{
+ struct mlx5_flow_root_namespace *root;
+ struct mlx5_flow_definer *definer;
+ int id;
+
+ root = get_root_namespace(dev, ns_type);
+ if (!root)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ definer = kzalloc(sizeof(*definer), GFP_KERNEL);
+ if (!definer)
+ return ERR_PTR(-ENOMEM);
+
+ definer->ns_type = ns_type;
+ id = root->cmds->create_match_definer(root, format_id, match_mask);
+ if (id < 0) {
+ mlx5_core_warn(root->dev, "Failed to create match definer (%d)\n", id);
+ kfree(definer);
+ return ERR_PTR(id);
+ }
+ definer->id = id;
+ return definer;
+}
+
+void mlx5_destroy_match_definer(struct mlx5_core_dev *dev,
+ struct mlx5_flow_definer *definer)
+{
+ struct mlx5_flow_root_namespace *root;
+
+ root = get_root_namespace(dev, definer->ns_type);
+ if (WARN_ON(!root))
+ return;
+
+ root->cmds->destroy_match_definer(root, definer->id);
+ kfree(definer);
+}
+
int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_root_namespace *peer_ns)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 98240badc342..7711db245c63 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -49,6 +49,11 @@
#define FDB_TC_MAX_PRIO 16
#define FDB_TC_LEVELS_PER_PRIO 2
+struct mlx5_flow_definer {
+ enum mlx5_flow_namespace_type ns_type;
+ u32 id;
+};
+
struct mlx5_modify_hdr {
enum mlx5_flow_namespace_type ns_type;
union {
@@ -97,7 +102,8 @@ enum fs_flow_table_type {
FS_FT_SNIFFER_TX = 0X6,
FS_FT_RDMA_RX = 0X7,
FS_FT_RDMA_TX = 0X8,
- FS_FT_MAX_TYPE = FS_FT_RDMA_TX,
+ FS_FT_PORT_SEL = 0X9,
+ FS_FT_MAX_TYPE = FS_FT_PORT_SEL,
};
enum fs_flow_table_op_mod {
@@ -129,6 +135,7 @@ struct mlx5_flow_steering {
struct mlx5_flow_root_namespace *rdma_rx_root_ns;
struct mlx5_flow_root_namespace *rdma_tx_root_ns;
struct mlx5_flow_root_namespace *egress_root_ns;
+ struct mlx5_flow_root_namespace *port_sel_root_ns;
int esw_egress_acl_vports;
int esw_ingress_acl_vports;
};
@@ -341,7 +348,8 @@ struct mlx5_flow_root_namespace *find_root(struct fs_node *node);
(type == FS_FT_SNIFFER_TX) ? MLX5_CAP_FLOWTABLE_SNIFFER_TX(mdev, cap) : \
(type == FS_FT_RDMA_RX) ? MLX5_CAP_FLOWTABLE_RDMA_RX(mdev, cap) : \
(type == FS_FT_RDMA_TX) ? MLX5_CAP_FLOWTABLE_RDMA_TX(mdev, cap) : \
- (BUILD_BUG_ON_ZERO(FS_FT_RDMA_TX != FS_FT_MAX_TYPE))\
+ (type == FS_FT_PORT_SEL) ? MLX5_CAP_FLOWTABLE_PORT_SELECTION(mdev, cap) : \
+ (BUILD_BUG_ON_ZERO(FS_FT_PORT_SEL != FS_FT_MAX_TYPE))\
)
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
index 18e5aec14641..31c99d53faf7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -40,6 +40,7 @@
#define MLX5_FC_STATS_PERIOD msecs_to_jiffies(1000)
/* Max number of counters to query in bulk read is 32K */
#define MLX5_SW_MAX_COUNTERS_BULK BIT(15)
+#define MLX5_SF_NUM_COUNTERS_BULK 6
#define MLX5_FC_POOL_MAX_THRESHOLD BIT(18)
#define MLX5_FC_POOL_USED_BUFF_RATIO 10
@@ -146,8 +147,12 @@ static void mlx5_fc_stats_remove(struct mlx5_core_dev *dev,
static int get_max_bulk_query_len(struct mlx5_core_dev *dev)
{
- return min_t(int, MLX5_SW_MAX_COUNTERS_BULK,
- (1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk)));
+ int num_counters_bulk = mlx5_core_is_sf(dev) ?
+ MLX5_SF_NUM_COUNTERS_BULK :
+ MLX5_SW_MAX_COUNTERS_BULK;
+
+ return min_t(int, num_counters_bulk,
+ (1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk)));
}
static void update_counter_cache(int index, u32 *bulk_raw_data,
@@ -296,7 +301,7 @@ static struct mlx5_fc *mlx5_fc_acquire(struct mlx5_core_dev *dev, bool aging)
return mlx5_fc_single_alloc(dev);
}
-struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging)
+struct mlx5_fc *mlx5_fc_create_ex(struct mlx5_core_dev *dev, bool aging)
{
struct mlx5_fc *counter = mlx5_fc_acquire(dev, aging);
struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
@@ -327,8 +332,6 @@ struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging)
goto err_out_alloc;
llist_add(&counter->addlist, &fc_stats->addlist);
-
- mod_delayed_work(fc_stats->wq, &fc_stats->work, 0);
}
return counter;
@@ -337,6 +340,16 @@ err_out_alloc:
mlx5_fc_release(dev, counter);
return ERR_PTR(err);
}
+
+struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging)
+{
+ struct mlx5_fc *counter = mlx5_fc_create_ex(dev, aging);
+ struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
+
+ if (aging)
+ mod_delayed_work(fc_stats->wq, &fc_stats->work, 0);
+ return counter;
+}
EXPORT_SYMBOL(mlx5_fc_create);
u32 mlx5_fc_id(struct mlx5_fc *counter)
@@ -497,8 +510,7 @@ static struct mlx5_fc_bulk *mlx5_fc_bulk_create(struct mlx5_core_dev *dev)
alloc_bitmask = MLX5_CAP_GEN(dev, flow_counter_bulk_alloc);
bulk_len = alloc_bitmask > 0 ? MLX5_FC_BULK_NUM_FCS(alloc_bitmask) : 1;
- bulk = kvzalloc(sizeof(*bulk) + bulk_len * sizeof(struct mlx5_fc),
- GFP_KERNEL);
+ bulk = kvzalloc(struct_size(bulk, fcs, bulk_len), GFP_KERNEL);
if (!bulk)
goto err_alloc_bulk;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 016d26f809a5..2d8406fab844 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -35,6 +35,7 @@
#include <linux/module.h>
#include "mlx5_core.h"
#include "../../mlxfw/mlxfw.h"
+#include "lib/tout.h"
#include "accel/tls.h"
enum {
@@ -148,6 +149,12 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
if (err)
return err;
+ if (MLX5_CAP_GEN(dev, port_selection_cap)) {
+ err = mlx5_core_get_caps(dev, MLX5_CAP_PORT_SELECTION);
+ if (err)
+ return err;
+ }
+
if (MLX5_CAP_GEN(dev, hca_cap_2)) {
err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL_2);
if (err)
@@ -262,6 +269,12 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
return err;
}
+ if (MLX5_CAP_GEN(dev, shampo)) {
+ err = mlx5_core_get_caps(dev, MLX5_CAP_DEV_SHAMPO);
+ if (err)
+ return err;
+ }
+
return 0;
}
@@ -317,10 +330,9 @@ int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev)
return 0;
}
-#define MLX5_FAST_TEARDOWN_WAIT_MS 3000
int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev)
{
- unsigned long end, delay_ms = MLX5_FAST_TEARDOWN_WAIT_MS;
+ unsigned long end, delay_ms = mlx5_tout_ms(dev, TEARDOWN);
u32 out[MLX5_ST_SZ_DW(teardown_hca_out)] = {};
u32 in[MLX5_ST_SZ_DW(teardown_hca_in)] = {};
int state;
@@ -618,17 +630,18 @@ static void mlx5_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
fwhandle, 0);
}
-#define MLX5_FSM_REACTIVATE_TOUT 5000 /* msecs */
static int mlx5_fsm_reactivate(struct mlxfw_dev *mlxfw_dev, u8 *status)
{
- unsigned long exp_time = jiffies + msecs_to_jiffies(MLX5_FSM_REACTIVATE_TOUT);
struct mlx5_mlxfw_dev *mlx5_mlxfw_dev =
container_of(mlxfw_dev, struct mlx5_mlxfw_dev, mlxfw_dev);
struct mlx5_core_dev *dev = mlx5_mlxfw_dev->mlx5_core_dev;
u32 out[MLX5_ST_SZ_DW(mirc_reg)];
u32 in[MLX5_ST_SZ_DW(mirc_reg)];
+ unsigned long exp_time;
int err;
+ exp_time = jiffies + msecs_to_jiffies(mlx5_tout_ms(dev, FSM_REACTIVATE));
+
if (!MLX5_CAP_MCAM_REG2(dev, mirc))
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
index 106b50e42b46..0b0234f9d694 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
@@ -3,6 +3,7 @@
#include "fw_reset.h"
#include "diag/fw_tracer.h"
+#include "lib/tout.h"
enum {
MLX5_FW_RESET_FLAGS_RESET_REQUESTED,
@@ -228,8 +229,6 @@ static void mlx5_sync_reset_request_event(struct work_struct *work)
mlx5_core_warn(dev, "PCI Sync FW Update Reset Ack. Device reset is expected.\n");
}
-#define MLX5_PCI_LINK_UP_TIMEOUT 2000
-
static int mlx5_pci_link_toggle(struct mlx5_core_dev *dev)
{
struct pci_bus *bridge_bus = dev->pdev->bus;
@@ -286,7 +285,7 @@ static int mlx5_pci_link_toggle(struct mlx5_core_dev *dev)
goto restore;
}
- timeout = jiffies + msecs_to_jiffies(MLX5_PCI_LINK_UP_TIMEOUT);
+ timeout = jiffies + msecs_to_jiffies(mlx5_tout_ms(dev, PCI_TOGGLE));
do {
err = pci_read_config_word(bridge, cap + PCI_EXP_LNKSTA, &reg16);
if (err)
@@ -299,8 +298,8 @@ static int mlx5_pci_link_toggle(struct mlx5_core_dev *dev)
if (reg16 & PCI_EXP_LNKSTA_DLLLA) {
mlx5_core_info(dev, "PCI Link up\n");
} else {
- mlx5_core_err(dev, "PCI link not ready (0x%04x) after %d ms\n",
- reg16, MLX5_PCI_LINK_UP_TIMEOUT);
+ mlx5_core_err(dev, "PCI link not ready (0x%04x) after %llu ms\n",
+ reg16, mlx5_tout_ms(dev, PCI_TOGGLE));
err = -ETIMEDOUT;
}
@@ -395,16 +394,16 @@ static int fw_reset_event_notifier(struct notifier_block *nb, unsigned long acti
return NOTIFY_OK;
}
-#define MLX5_FW_RESET_TIMEOUT_MSEC 5000
int mlx5_fw_reset_wait_reset_done(struct mlx5_core_dev *dev)
{
- unsigned long timeout = msecs_to_jiffies(MLX5_FW_RESET_TIMEOUT_MSEC);
+ unsigned long pci_sync_update_timeout = mlx5_tout_ms(dev, PCI_SYNC_UPDATE);
+ unsigned long timeout = msecs_to_jiffies(pci_sync_update_timeout);
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
int err;
if (!wait_for_completion_timeout(&fw_reset->done, timeout)) {
- mlx5_core_warn(dev, "FW sync reset timeout after %d seconds\n",
- MLX5_FW_RESET_TIMEOUT_MSEC / 1000);
+ mlx5_core_warn(dev, "FW sync reset timeout after %lu seconds\n",
+ pci_sync_update_timeout / 1000);
err = -ETIMEDOUT;
goto out;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 037e18dd4be0..64f1abc4dc36 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -36,14 +36,15 @@
#include <linux/vmalloc.h>
#include <linux/hardirq.h>
#include <linux/mlx5/driver.h>
+#include <linux/kern_levels.h>
#include "mlx5_core.h"
#include "lib/eq.h"
#include "lib/mlx5.h"
#include "lib/pci_vsc.h"
+#include "lib/tout.h"
#include "diag/fw_tracer.h"
enum {
- MLX5_HEALTH_POLL_INTERVAL = 2 * HZ,
MAX_MISSES = 3,
};
@@ -74,6 +75,11 @@ enum {
MLX5_SENSOR_FW_SYND_RFR = 5,
};
+enum {
+ MLX5_SEVERITY_MASK = 0x7,
+ MLX5_SEVERITY_VALID_MASK = 0x8,
+};
+
u8 mlx5_get_nic_state(struct mlx5_core_dev *dev)
{
return (ioread32be(&dev->iseg->cmdq_addr_l_sz) >> 8) & 7;
@@ -98,12 +104,19 @@ static bool sensor_pci_not_working(struct mlx5_core_dev *dev)
return (ioread32be(&h->fw_ver) == 0xffffffff);
}
+static int mlx5_health_get_rfr(u8 rfr_severity)
+{
+ return rfr_severity >> MLX5_RFR_BIT_OFFSET;
+}
+
static bool sensor_fw_synd_rfr(struct mlx5_core_dev *dev)
{
struct mlx5_core_health *health = &dev->priv.health;
struct health_buffer __iomem *h = health->health;
- u32 rfr = ioread32be(&h->rfr) >> MLX5_RFR_OFFSET;
u8 synd = ioread8(&h->synd);
+ u8 rfr;
+
+ rfr = mlx5_health_get_rfr(ioread8(&h->rfr_severity));
if (rfr && synd)
mlx5_core_dbg(dev, "FW requests reset, synd: %d\n", synd);
@@ -219,11 +232,9 @@ unlock:
mutex_unlock(&dev->intf_state_mutex);
}
-#define MLX5_CRDUMP_WAIT_MS 60000
-#define MLX5_FW_RESET_WAIT_MS 1000
void mlx5_error_sw_reset(struct mlx5_core_dev *dev)
{
- unsigned long end, delay_ms = MLX5_FW_RESET_WAIT_MS;
+ unsigned long end, delay_ms = mlx5_tout_ms(dev, PCI_TOGGLE);
int lock = -EBUSY;
mutex_lock(&dev->intf_state_mutex);
@@ -237,7 +248,7 @@ void mlx5_error_sw_reset(struct mlx5_core_dev *dev)
lock = lock_sem_sw_reset(dev, true);
if (lock == -EBUSY) {
- delay_ms = MLX5_CRDUMP_WAIT_MS;
+ delay_ms = mlx5_tout_ms(dev, FULL_CRDUMP);
goto recover_from_sw_reset;
}
/* Execute SW reset */
@@ -307,13 +318,11 @@ static void mlx5_handle_bad_state(struct mlx5_core_dev *dev)
mlx5_disable_device(dev);
}
-/* How much time to wait until health resetting the driver (in msecs) */
-#define MLX5_RECOVERY_WAIT_MSECS 60000
int mlx5_health_wait_pci_up(struct mlx5_core_dev *dev)
{
unsigned long end;
- end = jiffies + msecs_to_jiffies(MLX5_RECOVERY_WAIT_MSECS);
+ end = jiffies + msecs_to_jiffies(mlx5_tout_ms(dev, FW_RESET));
while (sensor_pci_not_working(dev)) {
if (time_after(jiffies, end))
return -ETIMEDOUT;
@@ -370,35 +379,69 @@ static const char *hsynd_str(u8 synd)
}
}
+static const char *mlx5_loglevel_str(int level)
+{
+ switch (level) {
+ case LOGLEVEL_EMERG:
+ return "EMERGENCY";
+ case LOGLEVEL_ALERT:
+ return "ALERT";
+ case LOGLEVEL_CRIT:
+ return "CRITICAL";
+ case LOGLEVEL_ERR:
+ return "ERROR";
+ case LOGLEVEL_WARNING:
+ return "WARNING";
+ case LOGLEVEL_NOTICE:
+ return "NOTICE";
+ case LOGLEVEL_INFO:
+ return "INFO";
+ case LOGLEVEL_DEBUG:
+ return "DEBUG";
+ }
+ return "Unknown log level";
+}
+
+static int mlx5_health_get_severity(u8 rfr_severity)
+{
+ return rfr_severity & MLX5_SEVERITY_VALID_MASK ?
+ rfr_severity & MLX5_SEVERITY_MASK : LOGLEVEL_ERR;
+}
+
static void print_health_info(struct mlx5_core_dev *dev)
{
struct mlx5_core_health *health = &dev->priv.health;
struct health_buffer __iomem *h = health->health;
- char fw_str[18];
- u32 fw;
+ u8 rfr_severity;
+ int severity;
int i;
/* If the syndrome is 0, the device is OK and no need to print buffer */
if (!ioread8(&h->synd))
return;
+ rfr_severity = ioread8(&h->rfr_severity);
+ severity = mlx5_health_get_severity(rfr_severity);
+ mlx5_log(dev, severity, "Health issue observed, %s, severity(%d) %s:\n",
+ hsynd_str(ioread8(&h->synd)), severity, mlx5_loglevel_str(severity));
+
for (i = 0; i < ARRAY_SIZE(h->assert_var); i++)
- mlx5_core_err(dev, "assert_var[%d] 0x%08x\n", i,
- ioread32be(h->assert_var + i));
-
- mlx5_core_err(dev, "assert_exit_ptr 0x%08x\n",
- ioread32be(&h->assert_exit_ptr));
- mlx5_core_err(dev, "assert_callra 0x%08x\n",
- ioread32be(&h->assert_callra));
- sprintf(fw_str, "%d.%d.%d", fw_rev_maj(dev), fw_rev_min(dev), fw_rev_sub(dev));
- mlx5_core_err(dev, "fw_ver %s\n", fw_str);
- mlx5_core_err(dev, "hw_id 0x%08x\n", ioread32be(&h->hw_id));
- mlx5_core_err(dev, "irisc_index %d\n", ioread8(&h->irisc_index));
- mlx5_core_err(dev, "synd 0x%x: %s\n", ioread8(&h->synd),
- hsynd_str(ioread8(&h->synd)));
- mlx5_core_err(dev, "ext_synd 0x%04x\n", ioread16be(&h->ext_synd));
- fw = ioread32be(&h->fw_ver);
- mlx5_core_err(dev, "raw fw_ver 0x%08x\n", fw);
+ mlx5_log(dev, severity, "assert_var[%d] 0x%08x\n", i,
+ ioread32be(h->assert_var + i));
+
+ mlx5_log(dev, severity, "assert_exit_ptr 0x%08x\n", ioread32be(&h->assert_exit_ptr));
+ mlx5_log(dev, severity, "assert_callra 0x%08x\n", ioread32be(&h->assert_callra));
+ mlx5_log(dev, severity, "fw_ver %d.%d.%d", fw_rev_maj(dev), fw_rev_min(dev),
+ fw_rev_sub(dev));
+ mlx5_log(dev, severity, "time %u\n", ioread32be(&h->time));
+ mlx5_log(dev, severity, "hw_id 0x%08x\n", ioread32be(&h->hw_id));
+ mlx5_log(dev, severity, "rfr %d\n", mlx5_health_get_rfr(rfr_severity));
+ mlx5_log(dev, severity, "severity %d (%s)\n", severity, mlx5_loglevel_str(severity));
+ mlx5_log(dev, severity, "irisc_index %d\n", ioread8(&h->irisc_index));
+ mlx5_log(dev, severity, "synd 0x%x: %s\n", ioread8(&h->synd),
+ hsynd_str(ioread8(&h->synd)));
+ mlx5_log(dev, severity, "ext_synd 0x%04x\n", ioread16be(&h->ext_synd));
+ mlx5_log(dev, severity, "raw fw_ver 0x%08x\n", ioread32be(&h->fw_ver));
}
static int
@@ -447,6 +490,7 @@ mlx5_fw_reporter_heath_buffer_data_put(struct mlx5_core_dev *dev,
{
struct mlx5_core_health *health = &dev->priv.health;
struct health_buffer __iomem *h = health->health;
+ u8 rfr_severity;
int err;
int i;
@@ -479,9 +523,19 @@ mlx5_fw_reporter_heath_buffer_data_put(struct mlx5_core_dev *dev,
ioread32be(&h->assert_callra));
if (err)
return err;
+ err = devlink_fmsg_u32_pair_put(fmsg, "time", ioread32be(&h->time));
+ if (err)
+ return err;
err = devlink_fmsg_u32_pair_put(fmsg, "hw_id", ioread32be(&h->hw_id));
if (err)
return err;
+ rfr_severity = ioread8(&h->rfr_severity);
+ err = devlink_fmsg_u8_pair_put(fmsg, "rfr", mlx5_health_get_rfr(rfr_severity));
+ if (err)
+ return err;
+ err = devlink_fmsg_u8_pair_put(fmsg, "severity", mlx5_health_get_severity(rfr_severity));
+ if (err)
+ return err;
err = devlink_fmsg_u8_pair_put(fmsg, "irisc_index",
ioread8(&h->irisc_index));
if (err)
@@ -674,13 +728,13 @@ static void mlx5_fw_reporters_destroy(struct mlx5_core_dev *dev)
devlink_health_reporter_destroy(health->fw_fatal_reporter);
}
-static unsigned long get_next_poll_jiffies(void)
+static unsigned long get_next_poll_jiffies(struct mlx5_core_dev *dev)
{
unsigned long next;
get_random_bytes(&next, sizeof(next));
next %= HZ;
- next += jiffies + MLX5_HEALTH_POLL_INTERVAL;
+ next += jiffies + msecs_to_jiffies(mlx5_tout_ms(dev, HEALTH_POLL_INTERVAL));
return next;
}
@@ -698,6 +752,31 @@ void mlx5_trigger_health_work(struct mlx5_core_dev *dev)
spin_unlock_irqrestore(&health->wq_lock, flags);
}
+#define MLX5_MSEC_PER_HOUR (MSEC_PER_SEC * 60 * 60)
+static void mlx5_health_log_ts_update(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ u32 out[MLX5_ST_SZ_DW(mrtc_reg)] = {};
+ u32 in[MLX5_ST_SZ_DW(mrtc_reg)] = {};
+ struct mlx5_core_health *health;
+ struct mlx5_core_dev *dev;
+ struct mlx5_priv *priv;
+ u64 now_us;
+
+ health = container_of(dwork, struct mlx5_core_health, update_fw_log_ts_work);
+ priv = container_of(health, struct mlx5_priv, health);
+ dev = container_of(priv, struct mlx5_core_dev, priv);
+
+ now_us = ktime_to_us(ktime_get_real());
+
+ MLX5_SET(mrtc_reg, in, time_h, now_us >> 32);
+ MLX5_SET(mrtc_reg, in, time_l, now_us & 0xFFFFFFFF);
+ mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_MRTC, 0, 1);
+
+ queue_delayed_work(health->wq, &health->update_fw_log_ts_work,
+ msecs_to_jiffies(MLX5_MSEC_PER_HOUR));
+}
+
static void poll_health(struct timer_list *t)
{
struct mlx5_core_dev *dev = from_timer(dev, t, priv.health.timer);
@@ -740,11 +819,12 @@ static void poll_health(struct timer_list *t)
queue_work(health->wq, &health->report_work);
out:
- mod_timer(&health->timer, get_next_poll_jiffies());
+ mod_timer(&health->timer, get_next_poll_jiffies(dev));
}
void mlx5_start_health_poll(struct mlx5_core_dev *dev)
{
+ u64 poll_interval_ms = mlx5_tout_ms(dev, HEALTH_POLL_INTERVAL);
struct mlx5_core_health *health = &dev->priv.health;
timer_setup(&health->timer, poll_health, 0);
@@ -753,7 +833,7 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev)
health->health = &dev->iseg->health;
health->health_counter = &dev->iseg->health_counter;
- health->timer.expires = round_jiffies(jiffies + MLX5_HEALTH_POLL_INTERVAL);
+ health->timer.expires = jiffies + msecs_to_jiffies(poll_interval_ms);
add_timer(&health->timer);
}
@@ -779,6 +859,7 @@ void mlx5_drain_health_wq(struct mlx5_core_dev *dev)
spin_lock_irqsave(&health->wq_lock, flags);
set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
spin_unlock_irqrestore(&health->wq_lock, flags);
+ cancel_delayed_work_sync(&health->update_fw_log_ts_work);
cancel_work_sync(&health->report_work);
cancel_work_sync(&health->fatal_report_work);
}
@@ -794,6 +875,7 @@ void mlx5_health_cleanup(struct mlx5_core_dev *dev)
{
struct mlx5_core_health *health = &dev->priv.health;
+ cancel_delayed_work_sync(&health->update_fw_log_ts_work);
destroy_workqueue(health->wq);
mlx5_fw_reporters_destroy(dev);
}
@@ -819,6 +901,9 @@ int mlx5_health_init(struct mlx5_core_dev *dev)
spin_lock_init(&health->wq_lock);
INIT_WORK(&health->fatal_report_work, mlx5_fw_fatal_reporter_err_work);
INIT_WORK(&health->report_work, mlx5_fw_reporter_err_work);
+ INIT_DELAYED_WORK(&health->update_fw_log_ts_work, mlx5_health_log_ts_update);
+ if (mlx5_core_is_pf(dev))
+ queue_delayed_work(health->wq, &health->update_fw_log_ts_work, 0);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
index 0c8594c7df21..962d41418ce7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
@@ -217,6 +217,32 @@ static int mlx5i_get_link_ksettings(struct net_device *netdev,
return 0;
}
+#ifdef CONFIG_MLX5_EN_RXNFC
+static u32 mlx5i_flow_type_mask(u32 flow_type)
+{
+ return flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
+}
+
+static int mlx5i_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(dev);
+ struct ethtool_rx_flow_spec *fs = &cmd->fs;
+
+ if (mlx5i_flow_type_mask(fs->flow_type) == ETHER_FLOW)
+ return -EINVAL;
+
+ return mlx5e_ethtool_set_rxnfc(priv, cmd);
+}
+
+static int mlx5i_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
+ u32 *rule_locs)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(dev);
+
+ return mlx5e_ethtool_get_rxnfc(priv, info, rule_locs);
+}
+#endif
+
const struct ethtool_ops mlx5i_ethtool_ops = {
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
ETHTOOL_COALESCE_MAX_FRAMES |
@@ -233,6 +259,10 @@ const struct ethtool_ops mlx5i_ethtool_ops = {
.get_coalesce = mlx5i_get_coalesce,
.set_coalesce = mlx5i_set_coalesce,
.get_ts_info = mlx5i_get_ts_info,
+#ifdef CONFIG_MLX5_EN_RXNFC
+ .get_rxnfc = mlx5i_get_rxnfc,
+ .set_rxnfc = mlx5i_set_rxnfc,
+#endif
.get_link_ksettings = mlx5i_get_link_ksettings,
.get_link = ethtool_op_get_link,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 269ebb53eda6..ea1efdecc88c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -67,7 +67,7 @@ static void mlx5i_build_nic_params(struct mlx5_core_dev *mdev,
MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
MLX5I_PARAMS_DEFAULT_LOG_RQ_SIZE;
- params->lro_en = false;
+ params->packet_merge.type = MLX5E_PACKET_MERGE_NONE;
params->hard_mtu = MLX5_IB_GRH_BYTES + MLX5_IPOIB_HARD_LEN;
params->tunneled_offload_en = false;
}
@@ -219,7 +219,7 @@ void mlx5i_uninit_underlay_qp(struct mlx5e_priv *priv)
int mlx5i_create_underlay_qp(struct mlx5e_priv *priv)
{
- unsigned char *dev_addr = priv->netdev->dev_addr;
+ const unsigned char *dev_addr = priv->netdev->dev_addr;
u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {};
u32 in[MLX5_ST_SZ_DW(create_qp_in)] = {};
struct mlx5i_priv *ipriv = priv->ppriv;
@@ -336,6 +336,8 @@ static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
goto err_destroy_arfs_tables;
}
+ mlx5e_ethtool_init_steering(priv);
+
return 0;
err_destroy_arfs_tables:
@@ -348,12 +350,12 @@ static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv)
{
mlx5e_destroy_ttc_table(priv);
mlx5e_arfs_destroy_tables(priv);
+ mlx5e_ethtool_cleanup_steering(priv);
}
static int mlx5i_init_rx(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
- struct mlx5e_lro_param lro_param;
int err;
priv->rx_res = mlx5e_rx_res_alloc();
@@ -368,9 +370,9 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv)
goto err_destroy_q_counters;
}
- lro_param = mlx5e_get_lro_param(&priv->channels.params);
err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, 0,
- priv->max_nch, priv->drop_rq.rqn, &lro_param,
+ priv->max_nch, priv->drop_rq.rqn,
+ &priv->channels.params.packet_merge,
priv->channels.params.num_channels);
if (err)
goto err_close_drop_rq;
@@ -472,11 +474,13 @@ int mlx5i_dev_init(struct net_device *dev)
{
struct mlx5e_priv *priv = mlx5i_epriv(dev);
struct mlx5i_priv *ipriv = priv->ppriv;
+ u8 addr_mod[3];
/* Set dev address using underlay QP */
- dev->dev_addr[1] = (ipriv->qpn >> 16) & 0xff;
- dev->dev_addr[2] = (ipriv->qpn >> 8) & 0xff;
- dev->dev_addr[3] = (ipriv->qpn) & 0xff;
+ addr_mod[0] = (ipriv->qpn >> 16) & 0xff;
+ addr_mod[1] = (ipriv->qpn >> 8) & 0xff;
+ addr_mod[2] = (ipriv->qpn) & 0xff;
+ dev_addr_mod(dev, 1, addr_mod, sizeof(addr_mod));
/* Add QPN to net-device mapping to HT */
mlx5i_pkey_add_qpn(dev, ipriv->qpn);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
index ca5690b0a7ab..48d2ea690d7a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
@@ -38,7 +38,7 @@
#include "mlx5_core.h"
#include "eswitch.h"
#include "lag.h"
-#include "lag_mp.h"
+#include "mp.h"
/* General purpose, use for short periods of time.
* Beware of lock dependencies (preferably, no locks should be acquired
@@ -47,16 +47,21 @@
static DEFINE_SPINLOCK(lag_lock);
static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 remap_port1,
- u8 remap_port2, bool shared_fdb)
+ u8 remap_port2, bool shared_fdb, u8 flags)
{
u32 in[MLX5_ST_SZ_DW(create_lag_in)] = {};
void *lag_ctx = MLX5_ADDR_OF(create_lag_in, in, ctx);
MLX5_SET(create_lag_in, in, opcode, MLX5_CMD_OP_CREATE_LAG);
- MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, remap_port1);
- MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, remap_port2);
MLX5_SET(lagc, lag_ctx, fdb_selection_mode, shared_fdb);
+ if (!(flags & MLX5_LAG_FLAG_HASH_BASED)) {
+ MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, remap_port1);
+ MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, remap_port2);
+ } else {
+ MLX5_SET(lagc, lag_ctx, port_select_mode,
+ MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_FT);
+ }
return mlx5_cmd_exec_in(dev, create_lag, in);
}
@@ -199,6 +204,15 @@ static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker,
*port1 = 2;
}
+static int _mlx5_modify_lag(struct mlx5_lag *ldev, u8 v2p_port1, u8 v2p_port2)
+{
+ struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+
+ if (ldev->flags & MLX5_LAG_FLAG_HASH_BASED)
+ return mlx5_lag_port_sel_modify(ldev, v2p_port1, v2p_port2);
+ return mlx5_cmd_modify_lag(dev0, v2p_port1, v2p_port2);
+}
+
void mlx5_modify_lag(struct mlx5_lag *ldev,
struct lag_tracker *tracker)
{
@@ -211,39 +225,56 @@ void mlx5_modify_lag(struct mlx5_lag *ldev,
if (v2p_port1 != ldev->v2p_map[MLX5_LAG_P1] ||
v2p_port2 != ldev->v2p_map[MLX5_LAG_P2]) {
+ err = _mlx5_modify_lag(ldev, v2p_port1, v2p_port2);
+ if (err) {
+ mlx5_core_err(dev0,
+ "Failed to modify LAG (%d)\n",
+ err);
+ return;
+ }
ldev->v2p_map[MLX5_LAG_P1] = v2p_port1;
ldev->v2p_map[MLX5_LAG_P2] = v2p_port2;
-
mlx5_core_info(dev0, "modify lag map port 1:%d port 2:%d",
ldev->v2p_map[MLX5_LAG_P1],
ldev->v2p_map[MLX5_LAG_P2]);
-
- err = mlx5_cmd_modify_lag(dev0, v2p_port1, v2p_port2);
- if (err)
- mlx5_core_err(dev0,
- "Failed to modify LAG (%d)\n",
- err);
}
}
+static void mlx5_lag_set_port_sel_mode(struct mlx5_lag *ldev,
+ struct lag_tracker *tracker, u8 *flags)
+{
+ bool roce_lag = !!(*flags & MLX5_LAG_FLAG_ROCE);
+ struct lag_func *dev0 = &ldev->pf[MLX5_LAG_P1];
+
+ if (roce_lag ||
+ !MLX5_CAP_PORT_SELECTION(dev0->dev, port_select_flow_table) ||
+ tracker->tx_type != NETDEV_LAG_TX_TYPE_HASH)
+ return;
+ *flags |= MLX5_LAG_FLAG_HASH_BASED;
+}
+
+static char *get_str_port_sel_mode(u8 flags)
+{
+ if (flags & MLX5_LAG_FLAG_HASH_BASED)
+ return "hash";
+ return "queue_affinity";
+}
+
static int mlx5_create_lag(struct mlx5_lag *ldev,
struct lag_tracker *tracker,
- bool shared_fdb)
+ bool shared_fdb, u8 flags)
{
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {};
int err;
- mlx5_infer_tx_affinity_mapping(tracker, &ldev->v2p_map[MLX5_LAG_P1],
- &ldev->v2p_map[MLX5_LAG_P2]);
-
- mlx5_core_info(dev0, "lag map port 1:%d port 2:%d shared_fdb:%d",
+ mlx5_core_info(dev0, "lag map port 1:%d port 2:%d shared_fdb:%d mode:%s",
ldev->v2p_map[MLX5_LAG_P1], ldev->v2p_map[MLX5_LAG_P2],
- shared_fdb);
+ shared_fdb, get_str_port_sel_mode(flags));
err = mlx5_cmd_create_lag(dev0, ldev->v2p_map[MLX5_LAG_P1],
- ldev->v2p_map[MLX5_LAG_P2], shared_fdb);
+ ldev->v2p_map[MLX5_LAG_P2], shared_fdb, flags);
if (err) {
mlx5_core_err(dev0,
"Failed to create LAG (%d)\n",
@@ -279,16 +310,32 @@ int mlx5_activate_lag(struct mlx5_lag *ldev,
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
int err;
- err = mlx5_create_lag(ldev, tracker, shared_fdb);
+ mlx5_infer_tx_affinity_mapping(tracker, &ldev->v2p_map[MLX5_LAG_P1],
+ &ldev->v2p_map[MLX5_LAG_P2]);
+ mlx5_lag_set_port_sel_mode(ldev, tracker, &flags);
+ if (flags & MLX5_LAG_FLAG_HASH_BASED) {
+ err = mlx5_lag_port_sel_create(ldev, tracker->hash_type,
+ ldev->v2p_map[MLX5_LAG_P1],
+ ldev->v2p_map[MLX5_LAG_P2]);
+ if (err) {
+ mlx5_core_err(dev0,
+ "Failed to create LAG port selection(%d)\n",
+ err);
+ return err;
+ }
+ }
+
+ err = mlx5_create_lag(ldev, tracker, shared_fdb, flags);
if (err) {
- if (roce_lag) {
+ if (flags & MLX5_LAG_FLAG_HASH_BASED)
+ mlx5_lag_port_sel_destroy(ldev);
+ if (roce_lag)
mlx5_core_err(dev0,
"Failed to activate RoCE LAG\n");
- } else {
+ else
mlx5_core_err(dev0,
"Failed to activate VF LAG\n"
"Make sure all VFs are unbound prior to VF LAG activation or deactivation\n");
- }
return err;
}
@@ -302,6 +349,7 @@ static int mlx5_deactivate_lag(struct mlx5_lag *ldev)
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {};
bool roce_lag = __mlx5_lag_is_roce(ldev);
+ u8 flags = ldev->flags;
int err;
ldev->flags &= ~MLX5_LAG_MODE_FLAGS;
@@ -324,6 +372,8 @@ static int mlx5_deactivate_lag(struct mlx5_lag *ldev)
"Failed to deactivate VF LAG; driver restart required\n"
"Make sure all VFs are unbound prior to VF LAG activation or deactivation\n");
}
+ } else if (flags & MLX5_LAG_FLAG_HASH_BASED) {
+ mlx5_lag_port_sel_destroy(ldev);
}
return err;
@@ -442,6 +492,10 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
if (!mlx5_lag_is_ready(ldev)) {
do_bond = false;
} else {
+ /* VF LAG is in multipath mode, ignore bond change requests */
+ if (mlx5_lag_is_multipath(dev0))
+ return;
+
tracker = ldev->tracker;
do_bond = tracker.is_bonded && mlx5_lag_check_prereq(ldev);
@@ -588,8 +642,10 @@ static int mlx5_handle_changeupper_event(struct mlx5_lag *ldev,
if (!(bond_status & 0x3))
return 0;
- if (lag_upper_info)
+ if (lag_upper_info) {
tracker->tx_type = lag_upper_info->tx_type;
+ tracker->hash_type = lag_upper_info->hash_type;
+ }
/* Determine bonding status:
* A device is considered bonded if both its physical ports are slaves
@@ -688,7 +744,7 @@ static void mlx5_ldev_add_netdev(struct mlx5_lag *ldev,
struct mlx5_core_dev *dev,
struct net_device *netdev)
{
- unsigned int fn = PCI_FUNC(dev->pdev->devfn);
+ unsigned int fn = mlx5_get_dev_index(dev);
if (fn >= MLX5_MAX_PORTS)
return;
@@ -718,7 +774,7 @@ static void mlx5_ldev_remove_netdev(struct mlx5_lag *ldev,
static void mlx5_ldev_add_mdev(struct mlx5_lag *ldev,
struct mlx5_core_dev *dev)
{
- unsigned int fn = PCI_FUNC(dev->pdev->devfn);
+ unsigned int fn = mlx5_get_dev_index(dev);
if (fn >= MLX5_MAX_PORTS)
return;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
index d4bae528954e..e5d231c31b54 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
@@ -5,7 +5,8 @@
#define __MLX5_LAG_H__
#include "mlx5_core.h"
-#include "lag_mp.h"
+#include "mp.h"
+#include "port_sel.h"
enum {
MLX5_LAG_P1,
@@ -17,10 +18,12 @@ enum {
MLX5_LAG_FLAG_SRIOV = 1 << 1,
MLX5_LAG_FLAG_MULTIPATH = 1 << 2,
MLX5_LAG_FLAG_READY = 1 << 3,
+ MLX5_LAG_FLAG_HASH_BASED = 1 << 4,
};
#define MLX5_LAG_MODE_FLAGS (MLX5_LAG_FLAG_ROCE | MLX5_LAG_FLAG_SRIOV |\
- MLX5_LAG_FLAG_MULTIPATH)
+ MLX5_LAG_FLAG_MULTIPATH | \
+ MLX5_LAG_FLAG_HASH_BASED)
struct lag_func {
struct mlx5_core_dev *dev;
@@ -32,6 +35,7 @@ struct lag_tracker {
enum netdev_lag_tx_type tx_type;
struct netdev_lag_lower_state_info netdev_state[MLX5_MAX_PORTS];
unsigned int is_bonded:1;
+ enum netdev_lag_hash hash_type;
};
/* LAG data of a ConnectX card.
@@ -49,6 +53,7 @@ struct mlx5_lag {
struct delayed_work bond_work;
struct notifier_block nb;
struct lag_mp lag_mp;
+ struct mlx5_lag_port_sel port_sel;
};
static inline struct mlx5_lag *
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
index f239b352a58a..bf4d3cbefa63 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
@@ -3,26 +3,29 @@
#include <linux/netdevice.h>
#include <net/nexthop.h>
-#include "lag.h"
-#include "lag_mp.h"
+#include "lag/lag.h"
+#include "lag/mp.h"
#include "mlx5_core.h"
#include "eswitch.h"
#include "lib/mlx5.h"
+static bool __mlx5_lag_is_multipath(struct mlx5_lag *ldev)
+{
+ return !!(ldev->flags & MLX5_LAG_FLAG_MULTIPATH);
+}
+
static bool mlx5_lag_multipath_check_prereq(struct mlx5_lag *ldev)
{
if (!mlx5_lag_is_ready(ldev))
return false;
+ if (__mlx5_lag_is_active(ldev) && !__mlx5_lag_is_multipath(ldev))
+ return false;
+
return mlx5_esw_multipath_prereq(ldev->pf[MLX5_LAG_P1].dev,
ldev->pf[MLX5_LAG_P2].dev);
}
-static bool __mlx5_lag_is_multipath(struct mlx5_lag *ldev)
-{
- return !!(ldev->flags & MLX5_LAG_FLAG_MULTIPATH);
-}
-
bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev)
{
struct mlx5_lag *ldev;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.h
index 729c839397a8..57af962cad29 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.h
@@ -24,12 +24,14 @@ struct lag_mp {
void mlx5_lag_mp_reset(struct mlx5_lag *ldev);
int mlx5_lag_mp_init(struct mlx5_lag *ldev);
void mlx5_lag_mp_cleanup(struct mlx5_lag *ldev);
+bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev);
#else /* CONFIG_MLX5_ESWITCH */
static inline void mlx5_lag_mp_reset(struct mlx5_lag *ldev) {};
static inline int mlx5_lag_mp_init(struct mlx5_lag *ldev) { return 0; }
static inline void mlx5_lag_mp_cleanup(struct mlx5_lag *ldev) {}
+static inline bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev) { return false; }
#endif /* CONFIG_MLX5_ESWITCH */
#endif /* __MLX5_LAG_MP_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
new file mode 100644
index 000000000000..adc836b3d857
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
@@ -0,0 +1,611 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. */
+
+#include <linux/netdevice.h>
+#include "lag.h"
+
+enum {
+ MLX5_LAG_FT_LEVEL_TTC,
+ MLX5_LAG_FT_LEVEL_INNER_TTC,
+ MLX5_LAG_FT_LEVEL_DEFINER,
+};
+
+static struct mlx5_flow_group *
+mlx5_create_hash_flow_group(struct mlx5_flow_table *ft,
+ struct mlx5_flow_definer *definer)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_group *fg;
+ u32 *in;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return ERR_PTR(-ENOMEM);
+
+ MLX5_SET(create_flow_group_in, in, match_definer_id,
+ mlx5_get_match_definer_id(definer));
+ MLX5_SET(create_flow_group_in, in, start_flow_index, 0);
+ MLX5_SET(create_flow_group_in, in, end_flow_index, MLX5_MAX_PORTS - 1);
+ MLX5_SET(create_flow_group_in, in, group_type,
+ MLX5_CREATE_FLOW_GROUP_IN_GROUP_TYPE_HASH_SPLIT);
+
+ fg = mlx5_create_flow_group(ft, in);
+ kvfree(in);
+ return fg;
+}
+
+static int mlx5_lag_create_port_sel_table(struct mlx5_lag *ldev,
+ struct mlx5_lag_definer *lag_definer,
+ u8 port1, u8 port2)
+{
+ struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
+ struct mlx5_flow_table_attr ft_attr = {};
+ struct mlx5_flow_destination dest = {};
+ MLX5_DECLARE_FLOW_ACT(flow_act);
+ struct mlx5_flow_namespace *ns;
+ int err, i;
+
+ ft_attr.max_fte = MLX5_MAX_PORTS;
+ ft_attr.level = MLX5_LAG_FT_LEVEL_DEFINER;
+
+ ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_PORT_SEL);
+ if (!ns) {
+ mlx5_core_warn(dev, "Failed to get port selection namespace\n");
+ return -EOPNOTSUPP;
+ }
+
+ lag_definer->ft = mlx5_create_flow_table(ns, &ft_attr);
+ if (IS_ERR(lag_definer->ft)) {
+ mlx5_core_warn(dev, "Failed to create port selection table\n");
+ return PTR_ERR(lag_definer->ft);
+ }
+
+ lag_definer->fg = mlx5_create_hash_flow_group(lag_definer->ft,
+ lag_definer->definer);
+ if (IS_ERR(lag_definer->fg)) {
+ err = PTR_ERR(lag_definer->fg);
+ goto destroy_ft;
+ }
+
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_UPLINK;
+ dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
+ flow_act.flags |= FLOW_ACT_NO_APPEND;
+ for (i = 0; i < MLX5_MAX_PORTS; i++) {
+ u8 affinity = i == 0 ? port1 : port2;
+
+ dest.vport.vhca_id = MLX5_CAP_GEN(ldev->pf[affinity - 1].dev,
+ vhca_id);
+ lag_definer->rules[i] = mlx5_add_flow_rules(lag_definer->ft,
+ NULL, &flow_act,
+ &dest, 1);
+ if (IS_ERR(lag_definer->rules[i])) {
+ err = PTR_ERR(lag_definer->rules[i]);
+ while (i--)
+ mlx5_del_flow_rules(lag_definer->rules[i]);
+ goto destroy_fg;
+ }
+ }
+
+ return 0;
+
+destroy_fg:
+ mlx5_destroy_flow_group(lag_definer->fg);
+destroy_ft:
+ mlx5_destroy_flow_table(lag_definer->ft);
+ return err;
+}
+
+static int mlx5_lag_set_definer_inner(u32 *match_definer_mask,
+ enum mlx5_traffic_types tt)
+{
+ int format_id;
+ u8 *ipv6;
+
+ switch (tt) {
+ case MLX5_TT_IPV4_UDP:
+ case MLX5_TT_IPV4_TCP:
+ format_id = 23;
+ MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
+ inner_l4_sport);
+ MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
+ inner_l4_dport);
+ MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
+ inner_ip_src_addr);
+ MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
+ inner_ip_dest_addr);
+ break;
+ case MLX5_TT_IPV4:
+ format_id = 23;
+ MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
+ inner_l3_type);
+ MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
+ inner_dmac_47_16);
+ MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
+ inner_dmac_15_0);
+ MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
+ inner_smac_47_16);
+ MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
+ inner_smac_15_0);
+ MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
+ inner_ip_src_addr);
+ MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
+ inner_ip_dest_addr);
+ break;
+ case MLX5_TT_IPV6_TCP:
+ case MLX5_TT_IPV6_UDP:
+ format_id = 31;
+ MLX5_SET_TO_ONES(match_definer_format_31, match_definer_mask,
+ inner_l4_sport);
+ MLX5_SET_TO_ONES(match_definer_format_31, match_definer_mask,
+ inner_l4_dport);
+ ipv6 = MLX5_ADDR_OF(match_definer_format_31, match_definer_mask,
+ inner_ip_dest_addr);
+ memset(ipv6, 0xff, 16);
+ ipv6 = MLX5_ADDR_OF(match_definer_format_31, match_definer_mask,
+ inner_ip_src_addr);
+ memset(ipv6, 0xff, 16);
+ break;
+ case MLX5_TT_IPV6:
+ format_id = 32;
+ ipv6 = MLX5_ADDR_OF(match_definer_format_32, match_definer_mask,
+ inner_ip_dest_addr);
+ memset(ipv6, 0xff, 16);
+ ipv6 = MLX5_ADDR_OF(match_definer_format_32, match_definer_mask,
+ inner_ip_src_addr);
+ memset(ipv6, 0xff, 16);
+ MLX5_SET_TO_ONES(match_definer_format_32, match_definer_mask,
+ inner_dmac_47_16);
+ MLX5_SET_TO_ONES(match_definer_format_32, match_definer_mask,
+ inner_dmac_15_0);
+ MLX5_SET_TO_ONES(match_definer_format_32, match_definer_mask,
+ inner_smac_47_16);
+ MLX5_SET_TO_ONES(match_definer_format_32, match_definer_mask,
+ inner_smac_15_0);
+ break;
+ default:
+ format_id = 23;
+ MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
+ inner_l3_type);
+ MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
+ inner_dmac_47_16);
+ MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
+ inner_dmac_15_0);
+ MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
+ inner_smac_47_16);
+ MLX5_SET_TO_ONES(match_definer_format_23, match_definer_mask,
+ inner_smac_15_0);
+ break;
+ }
+
+ return format_id;
+}
+
+static int mlx5_lag_set_definer(u32 *match_definer_mask,
+ enum mlx5_traffic_types tt, bool tunnel,
+ enum netdev_lag_hash hash)
+{
+ int format_id;
+ u8 *ipv6;
+
+ if (tunnel)
+ return mlx5_lag_set_definer_inner(match_definer_mask, tt);
+
+ switch (tt) {
+ case MLX5_TT_IPV4_UDP:
+ case MLX5_TT_IPV4_TCP:
+ format_id = 22;
+ MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
+ outer_l4_sport);
+ MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
+ outer_l4_dport);
+ MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
+ outer_ip_src_addr);
+ MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
+ outer_ip_dest_addr);
+ break;
+ case MLX5_TT_IPV4:
+ format_id = 22;
+ MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
+ outer_l3_type);
+ MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
+ outer_dmac_47_16);
+ MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
+ outer_dmac_15_0);
+ MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
+ outer_smac_47_16);
+ MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
+ outer_smac_15_0);
+ MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
+ outer_ip_src_addr);
+ MLX5_SET_TO_ONES(match_definer_format_22, match_definer_mask,
+ outer_ip_dest_addr);
+ break;
+ case MLX5_TT_IPV6_TCP:
+ case MLX5_TT_IPV6_UDP:
+ format_id = 29;
+ MLX5_SET_TO_ONES(match_definer_format_29, match_definer_mask,
+ outer_l4_sport);
+ MLX5_SET_TO_ONES(match_definer_format_29, match_definer_mask,
+ outer_l4_dport);
+ ipv6 = MLX5_ADDR_OF(match_definer_format_29, match_definer_mask,
+ outer_ip_dest_addr);
+ memset(ipv6, 0xff, 16);
+ ipv6 = MLX5_ADDR_OF(match_definer_format_29, match_definer_mask,
+ outer_ip_src_addr);
+ memset(ipv6, 0xff, 16);
+ break;
+ case MLX5_TT_IPV6:
+ format_id = 30;
+ ipv6 = MLX5_ADDR_OF(match_definer_format_30, match_definer_mask,
+ outer_ip_dest_addr);
+ memset(ipv6, 0xff, 16);
+ ipv6 = MLX5_ADDR_OF(match_definer_format_30, match_definer_mask,
+ outer_ip_src_addr);
+ memset(ipv6, 0xff, 16);
+ MLX5_SET_TO_ONES(match_definer_format_30, match_definer_mask,
+ outer_dmac_47_16);
+ MLX5_SET_TO_ONES(match_definer_format_30, match_definer_mask,
+ outer_dmac_15_0);
+ MLX5_SET_TO_ONES(match_definer_format_30, match_definer_mask,
+ outer_smac_47_16);
+ MLX5_SET_TO_ONES(match_definer_format_30, match_definer_mask,
+ outer_smac_15_0);
+ break;
+ default:
+ format_id = 0;
+ MLX5_SET_TO_ONES(match_definer_format_0, match_definer_mask,
+ outer_smac_47_16);
+ MLX5_SET_TO_ONES(match_definer_format_0, match_definer_mask,
+ outer_smac_15_0);
+
+ if (hash == NETDEV_LAG_HASH_VLAN_SRCMAC) {
+ MLX5_SET_TO_ONES(match_definer_format_0,
+ match_definer_mask,
+ outer_first_vlan_vid);
+ break;
+ }
+
+ MLX5_SET_TO_ONES(match_definer_format_0, match_definer_mask,
+ outer_ethertype);
+ MLX5_SET_TO_ONES(match_definer_format_0, match_definer_mask,
+ outer_dmac_47_16);
+ MLX5_SET_TO_ONES(match_definer_format_0, match_definer_mask,
+ outer_dmac_15_0);
+ break;
+ }
+
+ return format_id;
+}
+
+static struct mlx5_lag_definer *
+mlx5_lag_create_definer(struct mlx5_lag *ldev, enum netdev_lag_hash hash,
+ enum mlx5_traffic_types tt, bool tunnel, u8 port1,
+ u8 port2)
+{
+ struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
+ struct mlx5_lag_definer *lag_definer;
+ u32 *match_definer_mask;
+ int format_id, err;
+
+ lag_definer = kzalloc(sizeof(*lag_definer), GFP_KERNEL);
+ if (!lag_definer)
+ return ERR_PTR(ENOMEM);
+
+ match_definer_mask = kvzalloc(MLX5_FLD_SZ_BYTES(match_definer,
+ match_mask),
+ GFP_KERNEL);
+ if (!match_definer_mask) {
+ err = -ENOMEM;
+ goto free_lag_definer;
+ }
+
+ format_id = mlx5_lag_set_definer(match_definer_mask, tt, tunnel, hash);
+ lag_definer->definer =
+ mlx5_create_match_definer(dev, MLX5_FLOW_NAMESPACE_PORT_SEL,
+ format_id, match_definer_mask);
+ if (IS_ERR(lag_definer->definer)) {
+ err = PTR_ERR(lag_definer->definer);
+ goto free_mask;
+ }
+
+ err = mlx5_lag_create_port_sel_table(ldev, lag_definer, port1, port2);
+ if (err)
+ goto destroy_match_definer;
+
+ kvfree(match_definer_mask);
+
+ return lag_definer;
+
+destroy_match_definer:
+ mlx5_destroy_match_definer(dev, lag_definer->definer);
+free_mask:
+ kvfree(match_definer_mask);
+free_lag_definer:
+ kfree(lag_definer);
+ return ERR_PTR(err);
+}
+
+static void mlx5_lag_destroy_definer(struct mlx5_lag *ldev,
+ struct mlx5_lag_definer *lag_definer)
+{
+ struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
+ int i;
+
+ for (i = 0; i < MLX5_MAX_PORTS; i++)
+ mlx5_del_flow_rules(lag_definer->rules[i]);
+ mlx5_destroy_flow_group(lag_definer->fg);
+ mlx5_destroy_flow_table(lag_definer->ft);
+ mlx5_destroy_match_definer(dev, lag_definer->definer);
+ kfree(lag_definer);
+}
+
+static void mlx5_lag_destroy_definers(struct mlx5_lag *ldev)
+{
+ struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
+ int tt;
+
+ for_each_set_bit(tt, port_sel->tt_map, MLX5_NUM_TT) {
+ if (port_sel->outer.definers[tt])
+ mlx5_lag_destroy_definer(ldev,
+ port_sel->outer.definers[tt]);
+ if (port_sel->inner.definers[tt])
+ mlx5_lag_destroy_definer(ldev,
+ port_sel->inner.definers[tt]);
+ }
+}
+
+static int mlx5_lag_create_definers(struct mlx5_lag *ldev,
+ enum netdev_lag_hash hash_type,
+ u8 port1, u8 port2)
+{
+ struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
+ struct mlx5_lag_definer *lag_definer;
+ int tt, err;
+
+ for_each_set_bit(tt, port_sel->tt_map, MLX5_NUM_TT) {
+ lag_definer = mlx5_lag_create_definer(ldev, hash_type, tt,
+ false, port1, port2);
+ if (IS_ERR(lag_definer)) {
+ err = PTR_ERR(lag_definer);
+ goto destroy_definers;
+ }
+ port_sel->outer.definers[tt] = lag_definer;
+
+ if (!port_sel->tunnel)
+ continue;
+
+ lag_definer =
+ mlx5_lag_create_definer(ldev, hash_type, tt,
+ true, port1, port2);
+ if (IS_ERR(lag_definer)) {
+ err = PTR_ERR(lag_definer);
+ goto destroy_definers;
+ }
+ port_sel->inner.definers[tt] = lag_definer;
+ }
+
+ return 0;
+
+destroy_definers:
+ mlx5_lag_destroy_definers(ldev);
+ return err;
+}
+
+static void set_tt_map(struct mlx5_lag_port_sel *port_sel,
+ enum netdev_lag_hash hash)
+{
+ port_sel->tunnel = false;
+
+ switch (hash) {
+ case NETDEV_LAG_HASH_E34:
+ port_sel->tunnel = true;
+ fallthrough;
+ case NETDEV_LAG_HASH_L34:
+ set_bit(MLX5_TT_IPV4_TCP, port_sel->tt_map);
+ set_bit(MLX5_TT_IPV4_UDP, port_sel->tt_map);
+ set_bit(MLX5_TT_IPV6_TCP, port_sel->tt_map);
+ set_bit(MLX5_TT_IPV6_UDP, port_sel->tt_map);
+ set_bit(MLX5_TT_IPV4, port_sel->tt_map);
+ set_bit(MLX5_TT_IPV6, port_sel->tt_map);
+ set_bit(MLX5_TT_ANY, port_sel->tt_map);
+ break;
+ case NETDEV_LAG_HASH_E23:
+ port_sel->tunnel = true;
+ fallthrough;
+ case NETDEV_LAG_HASH_L23:
+ set_bit(MLX5_TT_IPV4, port_sel->tt_map);
+ set_bit(MLX5_TT_IPV6, port_sel->tt_map);
+ set_bit(MLX5_TT_ANY, port_sel->tt_map);
+ break;
+ default:
+ set_bit(MLX5_TT_ANY, port_sel->tt_map);
+ break;
+ }
+}
+
+#define SET_IGNORE_DESTS_BITS(tt_map, dests) \
+ do { \
+ int idx; \
+ \
+ for_each_clear_bit(idx, tt_map, MLX5_NUM_TT) \
+ set_bit(idx, dests); \
+ } while (0)
+
+static void mlx5_lag_set_inner_ttc_params(struct mlx5_lag *ldev,
+ struct ttc_params *ttc_params)
+{
+ struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
+ struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
+ struct mlx5_flow_table_attr *ft_attr;
+ int tt;
+
+ ttc_params->ns = mlx5_get_flow_namespace(dev,
+ MLX5_FLOW_NAMESPACE_PORT_SEL);
+ ft_attr = &ttc_params->ft_attr;
+ ft_attr->level = MLX5_LAG_FT_LEVEL_INNER_TTC;
+
+ for_each_set_bit(tt, port_sel->tt_map, MLX5_NUM_TT) {
+ ttc_params->dests[tt].type =
+ MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ ttc_params->dests[tt].ft = port_sel->inner.definers[tt]->ft;
+ }
+ SET_IGNORE_DESTS_BITS(port_sel->tt_map, ttc_params->ignore_dests);
+}
+
+static void mlx5_lag_set_outer_ttc_params(struct mlx5_lag *ldev,
+ struct ttc_params *ttc_params)
+{
+ struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
+ struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
+ struct mlx5_flow_table_attr *ft_attr;
+ int tt;
+
+ ttc_params->ns = mlx5_get_flow_namespace(dev,
+ MLX5_FLOW_NAMESPACE_PORT_SEL);
+ ft_attr = &ttc_params->ft_attr;
+ ft_attr->level = MLX5_LAG_FT_LEVEL_TTC;
+
+ for_each_set_bit(tt, port_sel->tt_map, MLX5_NUM_TT) {
+ ttc_params->dests[tt].type =
+ MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ ttc_params->dests[tt].ft = port_sel->outer.definers[tt]->ft;
+ }
+ SET_IGNORE_DESTS_BITS(port_sel->tt_map, ttc_params->ignore_dests);
+
+ ttc_params->inner_ttc = port_sel->tunnel;
+ if (!port_sel->tunnel)
+ return;
+
+ for (tt = 0; tt < MLX5_NUM_TUNNEL_TT; tt++) {
+ ttc_params->tunnel_dests[tt].type =
+ MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ ttc_params->tunnel_dests[tt].ft =
+ mlx5_get_ttc_flow_table(port_sel->inner.ttc);
+ }
+}
+
+static int mlx5_lag_create_ttc_table(struct mlx5_lag *ldev)
+{
+ struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
+ struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
+ struct ttc_params ttc_params = {};
+
+ mlx5_lag_set_outer_ttc_params(ldev, &ttc_params);
+ port_sel->outer.ttc = mlx5_create_ttc_table(dev, &ttc_params);
+ if (IS_ERR(port_sel->outer.ttc))
+ return PTR_ERR(port_sel->outer.ttc);
+
+ return 0;
+}
+
+static int mlx5_lag_create_inner_ttc_table(struct mlx5_lag *ldev)
+{
+ struct mlx5_core_dev *dev = ldev->pf[MLX5_LAG_P1].dev;
+ struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
+ struct ttc_params ttc_params = {};
+
+ mlx5_lag_set_inner_ttc_params(ldev, &ttc_params);
+ port_sel->inner.ttc = mlx5_create_ttc_table(dev, &ttc_params);
+ if (IS_ERR(port_sel->inner.ttc))
+ return PTR_ERR(port_sel->inner.ttc);
+
+ return 0;
+}
+
+int mlx5_lag_port_sel_create(struct mlx5_lag *ldev,
+ enum netdev_lag_hash hash_type, u8 port1, u8 port2)
+{
+ struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
+ int err;
+
+ set_tt_map(port_sel, hash_type);
+ err = mlx5_lag_create_definers(ldev, hash_type, port1, port2);
+ if (err)
+ return err;
+
+ if (port_sel->tunnel) {
+ err = mlx5_lag_create_inner_ttc_table(ldev);
+ if (err)
+ goto destroy_definers;
+ }
+
+ err = mlx5_lag_create_ttc_table(ldev);
+ if (err)
+ goto destroy_inner;
+
+ return 0;
+
+destroy_inner:
+ if (port_sel->tunnel)
+ mlx5_destroy_ttc_table(port_sel->inner.ttc);
+destroy_definers:
+ mlx5_lag_destroy_definers(ldev);
+ return err;
+}
+
+static int
+mlx5_lag_modify_definers_destinations(struct mlx5_lag *ldev,
+ struct mlx5_lag_definer **definers,
+ u8 port1, u8 port2)
+{
+ struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
+ struct mlx5_flow_destination dest = {};
+ int err;
+ int tt;
+
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_UPLINK;
+ dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
+
+ for_each_set_bit(tt, port_sel->tt_map, MLX5_NUM_TT) {
+ struct mlx5_flow_handle **rules = definers[tt]->rules;
+
+ if (ldev->v2p_map[MLX5_LAG_P1] != port1) {
+ dest.vport.vhca_id =
+ MLX5_CAP_GEN(ldev->pf[port1 - 1].dev, vhca_id);
+ err = mlx5_modify_rule_destination(rules[MLX5_LAG_P1],
+ &dest, NULL);
+ if (err)
+ return err;
+ }
+
+ if (ldev->v2p_map[MLX5_LAG_P2] != port2) {
+ dest.vport.vhca_id =
+ MLX5_CAP_GEN(ldev->pf[port2 - 1].dev, vhca_id);
+ err = mlx5_modify_rule_destination(rules[MLX5_LAG_P2],
+ &dest, NULL);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+int mlx5_lag_port_sel_modify(struct mlx5_lag *ldev, u8 port1, u8 port2)
+{
+ struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
+ int err;
+
+ err = mlx5_lag_modify_definers_destinations(ldev,
+ port_sel->outer.definers,
+ port1, port2);
+ if (err)
+ return err;
+
+ if (!port_sel->tunnel)
+ return 0;
+
+ return mlx5_lag_modify_definers_destinations(ldev,
+ port_sel->inner.definers,
+ port1, port2);
+}
+
+void mlx5_lag_port_sel_destroy(struct mlx5_lag *ldev)
+{
+ struct mlx5_lag_port_sel *port_sel = &ldev->port_sel;
+
+ mlx5_destroy_ttc_table(port_sel->outer.ttc);
+ if (port_sel->tunnel)
+ mlx5_destroy_ttc_table(port_sel->inner.ttc);
+ mlx5_lag_destroy_definers(ldev);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.h
new file mode 100644
index 000000000000..6d15b28a42fc
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. */
+
+#ifndef __MLX5_LAG_FS_H__
+#define __MLX5_LAG_FS_H__
+
+#include "lib/fs_ttc.h"
+
+struct mlx5_lag_definer {
+ struct mlx5_flow_definer *definer;
+ struct mlx5_flow_table *ft;
+ struct mlx5_flow_group *fg;
+ struct mlx5_flow_handle *rules[MLX5_MAX_PORTS];
+};
+
+struct mlx5_lag_ttc {
+ struct mlx5_ttc_table *ttc;
+ struct mlx5_lag_definer *definers[MLX5_NUM_TT];
+};
+
+struct mlx5_lag_port_sel {
+ DECLARE_BITMAP(tt_map, MLX5_NUM_TT);
+ bool tunnel;
+ struct mlx5_lag_ttc outer;
+ struct mlx5_lag_ttc inner;
+};
+
+#ifdef CONFIG_MLX5_ESWITCH
+
+int mlx5_lag_port_sel_modify(struct mlx5_lag *ldev, u8 port1, u8 port2);
+void mlx5_lag_port_sel_destroy(struct mlx5_lag *ldev);
+int mlx5_lag_port_sel_create(struct mlx5_lag *ldev,
+ enum netdev_lag_hash hash_type, u8 port1,
+ u8 port2);
+
+#else /* CONFIG_MLX5_ESWITCH */
+static inline int mlx5_lag_port_sel_create(struct mlx5_lag *ldev,
+ enum netdev_lag_hash hash_type,
+ u8 port1, u8 port2)
+{
+ return 0;
+}
+
+static inline int mlx5_lag_port_sel_modify(struct mlx5_lag *ldev, u8 port1,
+ u8 port2)
+{
+ return 0;
+}
+
+static inline void mlx5_lag_port_sel_destroy(struct mlx5_lag *ldev) {}
+#endif /* CONFIG_MLX5_ESWITCH */
+#endif /* __MLX5_LAG_FS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c
index 749d17c0057d..b63dec24747a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c
@@ -247,6 +247,8 @@ static int mlx5_generate_ttc_table_rules(struct mlx5_core_dev *dev,
for (tt = 0; tt < MLX5_NUM_TT; tt++) {
struct mlx5_ttc_rule *rule = &rules[tt];
+ if (test_bit(tt, params->ignore_dests))
+ continue;
rule->rule = mlx5_generate_ttc_rule(dev, ft, &params->dests[tt],
ttc_rules[tt].etype,
ttc_rules[tt].proto);
@@ -266,6 +268,8 @@ static int mlx5_generate_ttc_table_rules(struct mlx5_core_dev *dev,
if (!mlx5_tunnel_proto_supported_rx(dev,
ttc_tunnel_rules[tt].proto))
continue;
+ if (test_bit(tt, params->ignore_tunnel_dests))
+ continue;
trules[tt] = mlx5_generate_ttc_rule(dev, ft,
&params->tunnel_dests[tt],
ttc_tunnel_rules[tt].etype,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h
index ce95be8f8382..85fef0cd1c07 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h
@@ -43,7 +43,9 @@ struct ttc_params {
struct mlx5_flow_namespace *ns;
struct mlx5_flow_table_attr ft_attr;
struct mlx5_flow_destination dests[MLX5_NUM_TT];
+ DECLARE_BITMAP(ignore_dests, MLX5_NUM_TT);
bool inner_ttc;
+ DECLARE_BITMAP(ignore_tunnel_dests, MLX5_NUM_TUNNEL_TT);
struct mlx5_flow_destination tunnel_dests[MLX5_NUM_TUNNEL_TT];
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c
new file mode 100644
index 000000000000..0dd96a6b140d
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c
@@ -0,0 +1,162 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include <linux/mlx5/driver.h>
+#include "lib/tout.h"
+
+struct mlx5_timeouts {
+ u64 to[MAX_TIMEOUT_TYPES];
+};
+
+static const u32 tout_def_sw_val[MAX_TIMEOUT_TYPES] = {
+ [MLX5_TO_FW_PRE_INIT_TIMEOUT_MS] = 120000,
+ [MLX5_TO_FW_PRE_INIT_WARN_MESSAGE_INTERVAL_MS] = 20000,
+ [MLX5_TO_FW_PRE_INIT_WAIT_MS] = 2,
+ [MLX5_TO_FW_INIT_MS] = 2000,
+ [MLX5_TO_CMD_MS] = 60000,
+ [MLX5_TO_PCI_TOGGLE_MS] = 2000,
+ [MLX5_TO_HEALTH_POLL_INTERVAL_MS] = 2000,
+ [MLX5_TO_FULL_CRDUMP_MS] = 60000,
+ [MLX5_TO_FW_RESET_MS] = 60000,
+ [MLX5_TO_FLUSH_ON_ERROR_MS] = 2000,
+ [MLX5_TO_PCI_SYNC_UPDATE_MS] = 5000,
+ [MLX5_TO_TEARDOWN_MS] = 3000,
+ [MLX5_TO_FSM_REACTIVATE_MS] = 5000,
+ [MLX5_TO_RECLAIM_PAGES_MS] = 5000,
+ [MLX5_TO_RECLAIM_VFS_PAGES_MS] = 120000
+};
+
+static void tout_set(struct mlx5_core_dev *dev, u64 val, enum mlx5_timeouts_types type)
+{
+ dev->timeouts->to[type] = val;
+}
+
+static void tout_set_def_val(struct mlx5_core_dev *dev)
+{
+ int i;
+
+ for (i = MLX5_TO_FW_PRE_INIT_TIMEOUT_MS; i < MAX_TIMEOUT_TYPES; i++)
+ tout_set(dev, tout_def_sw_val[i], i);
+}
+
+int mlx5_tout_init(struct mlx5_core_dev *dev)
+{
+ dev->timeouts = kmalloc(sizeof(*dev->timeouts), GFP_KERNEL);
+ if (!dev->timeouts)
+ return -ENOMEM;
+
+ tout_set_def_val(dev);
+ return 0;
+}
+
+void mlx5_tout_cleanup(struct mlx5_core_dev *dev)
+{
+ kfree(dev->timeouts);
+}
+
+/* Time register consists of two fields to_multiplier(time out multiplier)
+ * and to_value(time out value). to_value is the quantity of the time units and
+ * to_multiplier is the type and should be one off these four values.
+ * 0x0: millisecond
+ * 0x1: seconds
+ * 0x2: minutes
+ * 0x3: hours
+ * this function converts the time stored in the two register fields into
+ * millisecond.
+ */
+static u64 tout_convert_reg_field_to_ms(u32 to_mul, u32 to_val)
+{
+ u64 msec = to_val;
+
+ to_mul &= 0x3;
+ /* convert hours/minutes/seconds to miliseconds */
+ if (to_mul)
+ msec *= 1000 * int_pow(60, to_mul - 1);
+
+ return msec;
+}
+
+static u64 tout_convert_iseg_to_ms(u32 iseg_to)
+{
+ return tout_convert_reg_field_to_ms(iseg_to >> 29, iseg_to & 0xfffff);
+}
+
+static bool tout_is_supported(struct mlx5_core_dev *dev)
+{
+ return !!ioread32be(&dev->iseg->cmd_q_init_to);
+}
+
+void mlx5_tout_query_iseg(struct mlx5_core_dev *dev)
+{
+ u32 to;
+
+ if (!tout_is_supported(dev))
+ return;
+
+ to = ioread32be(&dev->iseg->cmd_q_init_to);
+ tout_set(dev, tout_convert_iseg_to_ms(to), MLX5_TO_FW_INIT_MS);
+
+ to = ioread32be(&dev->iseg->cmd_exec_to);
+ tout_set(dev, tout_convert_iseg_to_ms(to), MLX5_TO_CMD_MS);
+}
+
+u64 _mlx5_tout_ms(struct mlx5_core_dev *dev, enum mlx5_timeouts_types type)
+{
+ return dev->timeouts->to[type];
+}
+
+#define MLX5_TIMEOUT_QUERY(fld, reg_out) \
+ ({ \
+ struct mlx5_ifc_default_timeout_bits *time_field; \
+ u32 to_multi, to_value; \
+ u64 to_val_ms; \
+ \
+ time_field = MLX5_ADDR_OF(dtor_reg, reg_out, fld); \
+ to_multi = MLX5_GET(default_timeout, time_field, to_multiplier); \
+ to_value = MLX5_GET(default_timeout, time_field, to_value); \
+ to_val_ms = tout_convert_reg_field_to_ms(to_multi, to_value); \
+ to_val_ms; \
+ })
+
+#define MLX5_TIMEOUT_FILL(fld, reg_out, dev, to_type, to_extra) \
+ ({ \
+ u64 fw_to = MLX5_TIMEOUT_QUERY(fld, reg_out); \
+ tout_set(dev, fw_to + (to_extra), to_type); \
+ fw_to; \
+ })
+
+static int tout_query_dtor(struct mlx5_core_dev *dev)
+{
+ u64 pcie_toggle_to_val, tear_down_to_val;
+ u32 out[MLX5_ST_SZ_DW(dtor_reg)] = {};
+ u32 in[MLX5_ST_SZ_DW(dtor_reg)] = {};
+ int err;
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), MLX5_REG_DTOR, 0, 0);
+ if (err)
+ return err;
+
+ pcie_toggle_to_val = MLX5_TIMEOUT_FILL(pcie_toggle_to, out, dev, MLX5_TO_PCI_TOGGLE_MS, 0);
+ MLX5_TIMEOUT_FILL(fw_reset_to, out, dev, MLX5_TO_FW_RESET_MS, pcie_toggle_to_val);
+
+ tear_down_to_val = MLX5_TIMEOUT_FILL(tear_down_to, out, dev, MLX5_TO_TEARDOWN_MS, 0);
+ MLX5_TIMEOUT_FILL(pci_sync_update_to, out, dev, MLX5_TO_PCI_SYNC_UPDATE_MS,
+ tear_down_to_val);
+
+ MLX5_TIMEOUT_FILL(health_poll_to, out, dev, MLX5_TO_HEALTH_POLL_INTERVAL_MS, 0);
+ MLX5_TIMEOUT_FILL(full_crdump_to, out, dev, MLX5_TO_FULL_CRDUMP_MS, 0);
+ MLX5_TIMEOUT_FILL(flush_on_err_to, out, dev, MLX5_TO_FLUSH_ON_ERROR_MS, 0);
+ MLX5_TIMEOUT_FILL(fsm_reactivate_to, out, dev, MLX5_TO_FSM_REACTIVATE_MS, 0);
+ MLX5_TIMEOUT_FILL(reclaim_pages_to, out, dev, MLX5_TO_RECLAIM_PAGES_MS, 0);
+ MLX5_TIMEOUT_FILL(reclaim_vfs_pages_to, out, dev, MLX5_TO_RECLAIM_VFS_PAGES_MS, 0);
+
+ return 0;
+}
+
+int mlx5_tout_query_dtor(struct mlx5_core_dev *dev)
+{
+ if (tout_is_supported(dev))
+ return tout_query_dtor(dev);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h
new file mode 100644
index 000000000000..31faa5c17aa9
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef MLX5_TIMEOUTS_H
+#define MLX5_TIMEOUTS_H
+
+enum mlx5_timeouts_types {
+ /* pre init timeouts (not read from FW) */
+ MLX5_TO_FW_PRE_INIT_TIMEOUT_MS,
+ MLX5_TO_FW_PRE_INIT_WARN_MESSAGE_INTERVAL_MS,
+ MLX5_TO_FW_PRE_INIT_WAIT_MS,
+
+ /* init segment timeouts */
+ MLX5_TO_FW_INIT_MS,
+ MLX5_TO_CMD_MS,
+
+ /* DTOR timeouts */
+ MLX5_TO_PCI_TOGGLE_MS,
+ MLX5_TO_HEALTH_POLL_INTERVAL_MS,
+ MLX5_TO_FULL_CRDUMP_MS,
+ MLX5_TO_FW_RESET_MS,
+ MLX5_TO_FLUSH_ON_ERROR_MS,
+ MLX5_TO_PCI_SYNC_UPDATE_MS,
+ MLX5_TO_TEARDOWN_MS,
+ MLX5_TO_FSM_REACTIVATE_MS,
+ MLX5_TO_RECLAIM_PAGES_MS,
+ MLX5_TO_RECLAIM_VFS_PAGES_MS,
+
+ MAX_TIMEOUT_TYPES
+};
+
+struct mlx5_core_dev;
+int mlx5_tout_init(struct mlx5_core_dev *dev);
+void mlx5_tout_cleanup(struct mlx5_core_dev *dev);
+void mlx5_tout_query_iseg(struct mlx5_core_dev *dev);
+int mlx5_tout_query_dtor(struct mlx5_core_dev *dev);
+u64 _mlx5_tout_ms(struct mlx5_core_dev *dev, enum mlx5_timeouts_types type);
+
+#define mlx5_tout_ms(dev, type) _mlx5_tout_ms(dev, MLX5_TO_##type##_MS)
+
+# endif /* MLX5_TIMEOUTS_H */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 79482824c64f..a92a92a52346 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -60,6 +60,7 @@
#include "devlink.h"
#include "fw_reset.h"
#include "lib/mlx5.h"
+#include "lib/tout.h"
#include "fpga/core.h"
#include "fpga/ipsec.h"
#include "accel/ipsec.h"
@@ -176,11 +177,6 @@ static struct mlx5_profile profile[] = {
},
};
-#define FW_INIT_TIMEOUT_MILI 2000
-#define FW_INIT_WAIT_MS 2
-#define FW_PRE_INIT_TIMEOUT_MILI 120000
-#define FW_INIT_WARN_MESSAGE_INTERVAL 20000
-
static int fw_initializing(struct mlx5_core_dev *dev)
{
return ioread32be(&dev->iseg->initializing) >> 31;
@@ -193,8 +189,6 @@ static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili,
unsigned long end = jiffies + msecs_to_jiffies(max_wait_mili);
int err = 0;
- BUILD_BUG_ON(FW_PRE_INIT_TIMEOUT_MILI < FW_INIT_WARN_MESSAGE_INTERVAL);
-
while (fw_initializing(dev)) {
if (time_after(jiffies, end)) {
err = -EBUSY;
@@ -205,7 +199,7 @@ static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili,
jiffies_to_msecs(end - warn) / 1000);
warn = jiffies + msecs_to_jiffies(warn_time_mili);
}
- msleep(FW_INIT_WAIT_MS);
+ msleep(mlx5_tout_ms(dev, FW_PRE_INIT_WAIT));
}
return err;
@@ -564,15 +558,38 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
MLX5_SET(cmd_hca_cap, set_hca_cap, num_total_dynamic_vf_msix,
MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix));
+ if (MLX5_CAP_GEN(dev, roce_rw_supported))
+ MLX5_SET(cmd_hca_cap, set_hca_cap, roce, mlx5_is_roce_init_enabled(dev));
+
return set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE);
}
+/* Cached MLX5_CAP_GEN(dev, roce) can be out of sync this early in the
+ * boot process.
+ * In case RoCE cap is writable in FW and user/devlink requested to change the
+ * cap, we are yet to query the final state of the above cap.
+ * Hence, the need for this function.
+ *
+ * Returns
+ * True:
+ * 1) RoCE cap is read only in FW and already disabled
+ * OR:
+ * 2) RoCE cap is writable in FW and user/devlink requested it off.
+ *
+ * In any other case, return False.
+ */
+static bool is_roce_fw_disabled(struct mlx5_core_dev *dev)
+{
+ return (MLX5_CAP_GEN(dev, roce_rw_supported) && !mlx5_is_roce_init_enabled(dev)) ||
+ (!MLX5_CAP_GEN(dev, roce_rw_supported) && !MLX5_CAP_GEN(dev, roce));
+}
+
static int handle_hca_cap_roce(struct mlx5_core_dev *dev, void *set_ctx)
{
void *set_hca_cap;
int err;
- if (!MLX5_CAP_GEN(dev, roce))
+ if (is_roce_fw_disabled(dev))
return 0;
err = mlx5_core_get_caps(dev, MLX5_CAP_ROCE);
@@ -975,25 +992,34 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot)
if (mlx5_core_is_pf(dev))
pcie_print_link_status(dev->pdev);
+ err = mlx5_tout_init(dev);
+ if (err) {
+ mlx5_core_err(dev, "Failed initializing timeouts, aborting\n");
+ return err;
+ }
+
/* wait for firmware to accept initialization segments configurations
*/
- err = wait_fw_init(dev, FW_PRE_INIT_TIMEOUT_MILI, FW_INIT_WARN_MESSAGE_INTERVAL);
+ err = wait_fw_init(dev, mlx5_tout_ms(dev, FW_PRE_INIT_TIMEOUT),
+ mlx5_tout_ms(dev, FW_PRE_INIT_WARN_MESSAGE_INTERVAL));
if (err) {
- mlx5_core_err(dev, "Firmware over %d MS in pre-initializing state, aborting\n",
- FW_PRE_INIT_TIMEOUT_MILI);
- return err;
+ mlx5_core_err(dev, "Firmware over %llu MS in pre-initializing state, aborting\n",
+ mlx5_tout_ms(dev, FW_PRE_INIT_TIMEOUT));
+ goto err_tout_cleanup;
}
err = mlx5_cmd_init(dev);
if (err) {
mlx5_core_err(dev, "Failed initializing command interface, aborting\n");
- return err;
+ goto err_tout_cleanup;
}
- err = wait_fw_init(dev, FW_INIT_TIMEOUT_MILI, 0);
+ mlx5_tout_query_iseg(dev);
+
+ err = wait_fw_init(dev, mlx5_tout_ms(dev, FW_INIT), 0);
if (err) {
- mlx5_core_err(dev, "Firmware over %d MS in initializing state, aborting\n",
- FW_INIT_TIMEOUT_MILI);
+ mlx5_core_err(dev, "Firmware over %llu MS in initializing state, aborting\n",
+ mlx5_tout_ms(dev, FW_INIT));
goto err_cmd_cleanup;
}
@@ -1017,6 +1043,12 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot)
goto err_disable_hca;
}
+ err = mlx5_tout_query_dtor(dev);
+ if (err) {
+ mlx5_core_err(dev, "failed to read dtor\n");
+ goto reclaim_boot_pages;
+ }
+
err = set_hca_ctrl(dev);
if (err) {
mlx5_core_err(dev, "set_hca_ctrl failed\n");
@@ -1062,6 +1094,8 @@ err_disable_hca:
err_cmd_cleanup:
mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
mlx5_cmd_cleanup(dev);
+err_tout_cleanup:
+ mlx5_tout_cleanup(dev);
return err;
}
@@ -1080,6 +1114,7 @@ static int mlx5_function_teardown(struct mlx5_core_dev *dev, bool boot)
mlx5_core_disable_hca(dev, 0);
mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
mlx5_cmd_cleanup(dev);
+ mlx5_tout_cleanup(dev);
return 0;
}
@@ -1112,8 +1147,9 @@ static int mlx5_load(struct mlx5_core_dev *dev)
err = mlx5_fw_tracer_init(dev->tracer);
if (err) {
- mlx5_core_err(dev, "Failed to init FW tracer\n");
- goto err_fw_tracer;
+ mlx5_core_err(dev, "Failed to init FW tracer %d\n", err);
+ mlx5_fw_tracer_destroy(dev->tracer);
+ dev->tracer = NULL;
}
mlx5_fw_reset_events_start(dev);
@@ -1121,8 +1157,9 @@ static int mlx5_load(struct mlx5_core_dev *dev)
err = mlx5_rsc_dump_init(dev);
if (err) {
- mlx5_core_err(dev, "Failed to init Resource dump\n");
- goto err_rsc_dump;
+ mlx5_core_err(dev, "Failed to init Resource dump %d\n", err);
+ mlx5_rsc_dump_destroy(dev);
+ dev->rsc_dump = NULL;
}
err = mlx5_fpga_device_start(dev);
@@ -1192,11 +1229,9 @@ err_tls_start:
mlx5_fpga_device_stop(dev);
err_fpga_start:
mlx5_rsc_dump_cleanup(dev);
-err_rsc_dump:
mlx5_hv_vhca_cleanup(dev->hv_vhca);
mlx5_fw_reset_events_stop(dev);
mlx5_fw_tracer_cleanup(dev->tracer);
-err_fw_tracer:
mlx5_eq_table_destroy(dev);
err_eq_table:
mlx5_irq_table_destroy(dev);
@@ -1381,6 +1416,8 @@ static const int types[] = {
MLX5_CAP_TLS,
MLX5_CAP_VDPA_EMULATION,
MLX5_CAP_IPSEC,
+ MLX5_CAP_PORT_SELECTION,
+ MLX5_CAP_DEV_SHAMPO,
};
static void mlx5_hca_caps_free(struct mlx5_core_dev *dev)
@@ -1537,8 +1574,7 @@ static int probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
dev_err(&pdev->dev, "mlx5_crdump_enable failed with error code %d\n", err);
pci_save_state(pdev);
- if (!mlx5_core_is_mp_slave(dev))
- devlink_reload_enable(devlink);
+ devlink_register(devlink);
return 0;
err_init_one:
@@ -1558,7 +1594,7 @@ static void remove_one(struct pci_dev *pdev)
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
struct devlink *devlink = priv_to_devlink(dev);
- devlink_reload_disable(devlink);
+ devlink_unregister(devlink);
mlx5_crdump_disable(dev);
mlx5_drain_health_wq(dev);
mlx5_uninit_one(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 230eab7e3bc9..bb677329ea08 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -97,6 +97,30 @@ do { \
__func__, __LINE__, current->pid, \
##__VA_ARGS__)
+static inline void mlx5_printk(struct mlx5_core_dev *dev, int level, const char *format, ...)
+{
+ struct device *device = dev->device;
+ struct va_format vaf;
+ va_list args;
+
+ if (WARN_ONCE(level < LOGLEVEL_EMERG || level > LOGLEVEL_DEBUG,
+ "Level %d is out of range, set to default level\n", level))
+ level = LOGLEVEL_DEFAULT;
+
+ va_start(args, format);
+ vaf.fmt = format;
+ vaf.va = &args;
+
+ dev_printk_emit(level, device, "%s %s: %pV", dev_driver_string(device), dev_name(device),
+ &vaf);
+ va_end(args);
+}
+
+#define mlx5_log(__dev, level, format, ...) \
+ mlx5_printk(__dev, level, "%s:%d:(pid %d): " format, \
+ __func__, __LINE__, current->pid, \
+ ##__VA_ARGS__)
+
static inline struct device *mlx5_core_dma_dev(struct mlx5_core_dev *dev)
{
return &dev->pdev->dev;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
index abd024173c42..8116815663a7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h
@@ -8,8 +8,6 @@
#define MLX5_COMP_EQS_PER_SF 8
-#define MLX5_IRQ_EQ_CTRL (0)
-
struct mlx5_irq;
int mlx5_irq_table_init(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
index 174f71ed5280..f099a087400e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
@@ -35,13 +35,11 @@
#include <linux/mlx5/driver.h>
#include "mlx5_core.h"
-int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
- struct mlx5_core_mkey *mkey,
- u32 *in, int inlen)
+int mlx5_core_create_mkey(struct mlx5_core_dev *dev, u32 *mkey, u32 *in,
+ int inlen)
{
u32 lout[MLX5_ST_SZ_DW(create_mkey_out)] = {};
u32 mkey_index;
- void *mkc;
int err;
MLX5_SET(create_mkey_in, in, opcode, MLX5_CMD_OP_CREATE_MKEY);
@@ -50,38 +48,33 @@ int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
if (err)
return err;
- mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
mkey_index = MLX5_GET(create_mkey_out, lout, mkey_index);
- mkey->iova = MLX5_GET64(mkc, mkc, start_addr);
- mkey->size = MLX5_GET64(mkc, mkc, len);
- mkey->key = (u32)mlx5_mkey_variant(mkey->key) | mlx5_idx_to_mkey(mkey_index);
- mkey->pd = MLX5_GET(mkc, mkc, pd);
- init_waitqueue_head(&mkey->wait);
+ *mkey = MLX5_GET(create_mkey_in, in, memory_key_mkey_entry.mkey_7_0) |
+ mlx5_idx_to_mkey(mkey_index);
- mlx5_core_dbg(dev, "out 0x%x, mkey 0x%x\n", mkey_index, mkey->key);
+ mlx5_core_dbg(dev, "out 0x%x, mkey 0x%x\n", mkey_index, *mkey);
return 0;
}
EXPORT_SYMBOL(mlx5_core_create_mkey);
-int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev,
- struct mlx5_core_mkey *mkey)
+int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, u32 mkey)
{
u32 in[MLX5_ST_SZ_DW(destroy_mkey_in)] = {};
MLX5_SET(destroy_mkey_in, in, opcode, MLX5_CMD_OP_DESTROY_MKEY);
- MLX5_SET(destroy_mkey_in, in, mkey_index, mlx5_mkey_to_idx(mkey->key));
+ MLX5_SET(destroy_mkey_in, in, mkey_index, mlx5_mkey_to_idx(mkey));
return mlx5_cmd_exec_in(dev, destroy_mkey, in);
}
EXPORT_SYMBOL(mlx5_core_destroy_mkey);
-int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey,
- u32 *out, int outlen)
+int mlx5_core_query_mkey(struct mlx5_core_dev *dev, u32 mkey, u32 *out,
+ int outlen)
{
u32 in[MLX5_ST_SZ_DW(query_mkey_in)] = {};
memset(out, 0, outlen);
MLX5_SET(query_mkey_in, in, opcode, MLX5_CMD_OP_QUERY_MKEY);
- MLX5_SET(query_mkey_in, in, mkey_index, mlx5_mkey_to_idx(mkey->key));
+ MLX5_SET(query_mkey_in, in, mkey_index, mlx5_mkey_to_idx(mkey));
return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
}
EXPORT_SYMBOL(mlx5_core_query_mkey);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index 110c0837f95b..f6b5451328fc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -38,6 +38,7 @@
#include <linux/xarray.h>
#include "mlx5_core.h"
#include "lib/eq.h"
+#include "lib/tout.h"
enum {
MLX5_PAGES_CANT_GIVE = 0,
@@ -65,11 +66,6 @@ struct fw_page {
};
enum {
- MAX_RECLAIM_TIME_MSECS = 5000,
- MAX_RECLAIM_VFS_PAGES_TIME_MSECS = 2 * 1000 * 60,
-};
-
-enum {
MLX5_MAX_RECLAIM_TIME_MILI = 5000,
MLX5_NUM_4K_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
};
@@ -641,7 +637,8 @@ static int optimal_reclaimed_pages(void)
static int mlx5_reclaim_root_pages(struct mlx5_core_dev *dev,
struct rb_root *root, u16 func_id)
{
- unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
+ u64 recl_pages_to_jiffies = msecs_to_jiffies(mlx5_tout_ms(dev, RECLAIM_PAGES));
+ unsigned long end = jiffies + recl_pages_to_jiffies;
while (!RB_EMPTY_ROOT(root)) {
int nclaimed;
@@ -656,7 +653,7 @@ static int mlx5_reclaim_root_pages(struct mlx5_core_dev *dev,
}
if (nclaimed)
- end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
+ end = jiffies + recl_pages_to_jiffies;
if (time_after(jiffies, end)) {
mlx5_core_warn(dev, "FW did not return all pages. giving up...\n");
@@ -727,7 +724,8 @@ void mlx5_pagealloc_stop(struct mlx5_core_dev *dev)
int mlx5_wait_for_pages(struct mlx5_core_dev *dev, int *pages)
{
- unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_VFS_PAGES_TIME_MSECS);
+ u64 recl_vf_pages_to_jiffies = msecs_to_jiffies(mlx5_tout_ms(dev, RECLAIM_VFS_PAGES));
+ unsigned long end = jiffies + recl_vf_pages_to_jiffies;
int prev_pages = *pages;
/* In case of internal error we will free the pages manually later */
@@ -743,7 +741,7 @@ int mlx5_wait_for_pages(struct mlx5_core_dev *dev, int *pages)
return -ETIMEDOUT;
}
if (*pages < prev_pages) {
- end = jiffies + msecs_to_jiffies(MAX_RECLAIM_VFS_PAGES_TIME_MSECS);
+ end = jiffies + recl_vf_pages_to_jiffies;
prev_pages = *pages;
}
msleep(50);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
index 763c83a02380..830444f927d4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
@@ -194,15 +194,25 @@ static void irq_sf_set_name(struct mlx5_irq_pool *pool, char *name, int vecidx)
snprintf(name, MLX5_MAX_IRQ_NAME, "%s%d", pool->name, vecidx);
}
-static void irq_set_name(char *name, int vecidx)
+static void irq_set_name(struct mlx5_irq_pool *pool, char *name, int vecidx)
{
- if (vecidx == 0) {
+ if (!pool->xa_num_irqs.max) {
+ /* in case we only have a single irq for the device */
+ snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_combined%d", vecidx);
+ return;
+ }
+
+ if (vecidx == pool->xa_num_irqs.max) {
snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_async%d", vecidx);
return;
}
- snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d",
- vecidx - MLX5_IRQ_VEC_COMP_BASE);
+ snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", vecidx);
+}
+
+static bool irq_pool_is_sf_pool(struct mlx5_irq_pool *pool)
+{
+ return !strncmp("mlx5_sf", pool->name, strlen("mlx5_sf"));
}
static struct mlx5_irq *irq_request(struct mlx5_irq_pool *pool, int i)
@@ -216,8 +226,8 @@ static struct mlx5_irq *irq_request(struct mlx5_irq_pool *pool, int i)
if (!irq)
return ERR_PTR(-ENOMEM);
irq->irqn = pci_irq_vector(dev->pdev, i);
- if (!pool->name[0])
- irq_set_name(name, i);
+ if (!irq_pool_is_sf_pool(pool))
+ irq_set_name(pool, name, i);
else
irq_sf_set_name(pool, name, i);
ATOMIC_INIT_NOTIFIER_HEAD(&irq->nh);
@@ -386,6 +396,9 @@ irq_pool_request_vector(struct mlx5_irq_pool *pool, int vecidx,
if (IS_ERR(irq) || !affinity)
goto unlock;
cpumask_copy(irq->mask, affinity);
+ if (!irq_pool_is_sf_pool(pool) && !pool->xa_num_irqs.max &&
+ cpumask_empty(irq->mask))
+ cpumask_set_cpu(0, irq->mask);
irq_set_affinity_hint(irq->irqn, irq->mask);
unlock:
mutex_unlock(&pool->lock);
@@ -440,6 +453,7 @@ struct mlx5_irq *mlx5_irq_request(struct mlx5_core_dev *dev, u16 vecidx,
}
pf_irq:
pool = irq_table->pf_pool;
+ vecidx = (vecidx == MLX5_IRQ_EQ_CTRL) ? pool->xa_num_irqs.max : vecidx;
irq = irq_pool_request_vector(pool, vecidx, affinity);
out:
if (IS_ERR(irq))
@@ -577,6 +591,8 @@ void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev)
int mlx5_irq_table_get_num_comp(struct mlx5_irq_table *table)
{
+ if (!table->pf_pool->xa_num_irqs.max)
+ return 1;
return table->pf_pool->xa_num_irqs.max - table->pf_pool->xa_num_irqs.min;
}
@@ -592,19 +608,15 @@ int mlx5_irq_table_create(struct mlx5_core_dev *dev)
if (mlx5_core_is_sf(dev))
return 0;
- pf_vec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() +
- MLX5_IRQ_VEC_COMP_BASE;
+ pf_vec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() + 1;
pf_vec = min_t(int, pf_vec, num_eqs);
- if (pf_vec <= MLX5_IRQ_VEC_COMP_BASE)
- return -ENOMEM;
total_vec = pf_vec;
if (mlx5_sf_max_functions(dev))
total_vec += MLX5_IRQ_CTRL_SF_MAX +
MLX5_COMP_EQS_PER_SF * mlx5_sf_max_functions(dev);
- total_vec = pci_alloc_irq_vectors(dev->pdev, MLX5_IRQ_VEC_COMP_BASE + 1,
- total_vec, PCI_IRQ_MSIX);
+ total_vec = pci_alloc_irq_vectors(dev->pdev, 1, total_vec, PCI_IRQ_MSIX);
if (total_vec < 0)
return total_vec;
pf_vec = min(pf_vec, total_vec);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
index 871c2fbe18d3..f37db7cc32a6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c
@@ -9,6 +9,8 @@
#include "sf/sf.h"
#include "sf/mlx5_ifc_vhca_event.h"
#include "ecpf.h"
+#define CREATE_TRACE_POINTS
+#include "diag/dev_tracepoint.h"
struct mlx5_sf_dev_table {
struct xarray devices;
@@ -66,13 +68,18 @@ static void mlx5_sf_dev_release(struct device *device)
kfree(sf_dev);
}
-static void mlx5_sf_dev_remove(struct mlx5_sf_dev *sf_dev)
+static void mlx5_sf_dev_remove(struct mlx5_core_dev *dev, struct mlx5_sf_dev *sf_dev)
{
+ int id;
+
+ id = sf_dev->adev.id;
+ trace_mlx5_sf_dev_del(dev, sf_dev, id);
+
auxiliary_device_delete(&sf_dev->adev);
auxiliary_device_uninit(&sf_dev->adev);
}
-static void mlx5_sf_dev_add(struct mlx5_core_dev *dev, u16 sf_index, u32 sfnum)
+static void mlx5_sf_dev_add(struct mlx5_core_dev *dev, u16 sf_index, u16 fn_id, u32 sfnum)
{
struct mlx5_sf_dev_table *table = dev->priv.sf_dev_table;
struct mlx5_sf_dev *sf_dev;
@@ -100,6 +107,7 @@ static void mlx5_sf_dev_add(struct mlx5_core_dev *dev, u16 sf_index, u32 sfnum)
sf_dev->adev.dev.groups = sf_attr_groups;
sf_dev->sfnum = sfnum;
sf_dev->parent_mdev = dev;
+ sf_dev->fn_id = fn_id;
if (!table->max_sfs) {
mlx5_adev_idx_free(id);
@@ -109,6 +117,8 @@ static void mlx5_sf_dev_add(struct mlx5_core_dev *dev, u16 sf_index, u32 sfnum)
}
sf_dev->bar_base_addr = table->base_address + (sf_index * table->sf_bar_length);
+ trace_mlx5_sf_dev_add(dev, sf_dev, id);
+
err = auxiliary_device_init(&sf_dev->adev);
if (err) {
mlx5_adev_idx_free(id);
@@ -128,7 +138,7 @@ static void mlx5_sf_dev_add(struct mlx5_core_dev *dev, u16 sf_index, u32 sfnum)
return;
xa_err:
- mlx5_sf_dev_remove(sf_dev);
+ mlx5_sf_dev_remove(dev, sf_dev);
add_err:
mlx5_core_err(dev, "SF DEV: fail device add for index=%d sfnum=%d err=%d\n",
sf_index, sfnum, err);
@@ -139,7 +149,7 @@ static void mlx5_sf_dev_del(struct mlx5_core_dev *dev, struct mlx5_sf_dev *sf_de
struct mlx5_sf_dev_table *table = dev->priv.sf_dev_table;
xa_erase(&table->devices, sf_index);
- mlx5_sf_dev_remove(sf_dev);
+ mlx5_sf_dev_remove(dev, sf_dev);
}
static int
@@ -178,7 +188,8 @@ mlx5_sf_dev_state_change_handler(struct notifier_block *nb, unsigned long event_
break;
case MLX5_VHCA_STATE_ACTIVE:
if (!sf_dev)
- mlx5_sf_dev_add(table->dev, sf_index, event->sw_function_id);
+ mlx5_sf_dev_add(table->dev, sf_index, event->function_id,
+ event->sw_function_id);
break;
default:
break;
@@ -260,7 +271,7 @@ static void mlx5_sf_dev_destroy_all(struct mlx5_sf_dev_table *table)
xa_for_each(&table->devices, index, sf_dev) {
xa_erase(&table->devices, index);
- mlx5_sf_dev_remove(sf_dev);
+ mlx5_sf_dev_remove(table->dev, sf_dev);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h
index 149fd9e698cf..2a66a427ef15 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.h
@@ -16,6 +16,7 @@ struct mlx5_sf_dev {
struct mlx5_core_dev *mdev;
phys_addr_t bar_base_addr;
u32 sfnum;
+ u16 fn_id;
};
void mlx5_sf_dev_table_create(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/diag/dev_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/diag/dev_tracepoint.h
new file mode 100644
index 000000000000..7f7c9af5deed
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/diag/dev_tracepoint.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mlx5
+
+#if !defined(_MLX5_SF_DEV_TP_) || defined(TRACE_HEADER_MULTI_READ)
+#define _MLX5_SF_DEV_TP_
+
+#include <linux/tracepoint.h>
+#include <linux/mlx5/driver.h>
+#include "../../dev/dev.h"
+
+DECLARE_EVENT_CLASS(mlx5_sf_dev_template,
+ TP_PROTO(const struct mlx5_core_dev *dev,
+ const struct mlx5_sf_dev *sfdev,
+ int aux_id),
+ TP_ARGS(dev, sfdev, aux_id),
+ TP_STRUCT__entry(__string(devname, dev_name(dev->device))
+ __field(const struct mlx5_sf_dev*, sfdev)
+ __field(int, aux_id)
+ __field(u16, hw_fn_id)
+ __field(u32, sfnum)
+ ),
+ TP_fast_assign(__assign_str(devname, dev_name(dev->device));
+ __entry->sfdev = sfdev;
+ __entry->aux_id = aux_id;
+ __entry->hw_fn_id = sfdev->fn_id;
+ __entry->sfnum = sfdev->sfnum;
+ ),
+ TP_printk("(%s) sfdev=%pK aux_id=%d hw_id=0x%x sfnum=%u\n",
+ __get_str(devname), __entry->sfdev,
+ __entry->aux_id, __entry->hw_fn_id,
+ __entry->sfnum)
+);
+
+DEFINE_EVENT(mlx5_sf_dev_template, mlx5_sf_dev_add,
+ TP_PROTO(const struct mlx5_core_dev *dev,
+ const struct mlx5_sf_dev *sfdev,
+ int aux_id),
+ TP_ARGS(dev, sfdev, aux_id)
+ );
+
+DEFINE_EVENT(mlx5_sf_dev_template, mlx5_sf_dev_del,
+ TP_PROTO(const struct mlx5_core_dev *dev,
+ const struct mlx5_sf_dev *sfdev,
+ int aux_id),
+ TP_ARGS(dev, sfdev, aux_id)
+ );
+
+#endif /* _MLX5_SF_DEV_TP_ */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH sf/dev/diag
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE dev_tracepoint
+#include <trace/define_trace.h>
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
index 052f48068dc1..7b4783ce213e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
@@ -46,7 +46,7 @@ static int mlx5_sf_dev_probe(struct auxiliary_device *adev, const struct auxilia
mlx5_core_warn(mdev, "mlx5_init_one err=%d\n", err);
goto init_one_err;
}
- devlink_reload_enable(devlink);
+ devlink_register(devlink);
return 0;
init_one_err:
@@ -61,10 +61,9 @@ mdev_err:
static void mlx5_sf_dev_remove(struct auxiliary_device *adev)
{
struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev);
- struct devlink *devlink;
+ struct devlink *devlink = priv_to_devlink(sf_dev->mdev);
- devlink = priv_to_devlink(sf_dev->mdev);
- devlink_reload_disable(devlink);
+ devlink_unregister(devlink);
mlx5_uninit_one(sf_dev->mdev);
iounmap(sf_dev->mdev->iseg);
mlx5_mdev_uninit(sf_dev->mdev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
index 13891fdc607e..3be659cd91f1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
@@ -8,6 +8,8 @@
#include "mlx5_ifc_vhca_event.h"
#include "vhca_event.h"
#include "ecpf.h"
+#define CREATE_TRACE_POINTS
+#include "diag/sf_tracepoint.h"
struct mlx5_sf {
struct devlink_port dl_port;
@@ -112,6 +114,7 @@ static void mlx5_sf_free(struct mlx5_sf_table *table, struct mlx5_sf *sf)
{
mlx5_sf_id_erase(table, sf);
mlx5_sf_hw_table_sf_free(table->dev, sf->controller, sf->id);
+ trace_mlx5_sf_free(table->dev, sf->port_index, sf->controller, sf->hw_fn_id);
kfree(sf);
}
@@ -209,6 +212,7 @@ static int mlx5_sf_activate(struct mlx5_core_dev *dev, struct mlx5_sf *sf,
return err;
sf->hw_state = MLX5_VHCA_STATE_ACTIVE;
+ trace_mlx5_sf_activate(dev, sf->port_index, sf->controller, sf->hw_fn_id);
return 0;
}
@@ -224,6 +228,7 @@ static int mlx5_sf_deactivate(struct mlx5_core_dev *dev, struct mlx5_sf *sf)
return err;
sf->hw_state = MLX5_VHCA_STATE_TEARDOWN_REQUEST;
+ trace_mlx5_sf_deactivate(dev, sf->port_index, sf->controller, sf->hw_fn_id);
return 0;
}
@@ -293,6 +298,7 @@ static int mlx5_sf_add(struct mlx5_core_dev *dev, struct mlx5_sf_table *table,
if (err)
goto esw_err;
*new_port_index = sf->port_index;
+ trace_mlx5_sf_add(dev, sf->port_index, sf->controller, sf->hw_fn_id, new_attr->sfnum);
return 0;
esw_err:
@@ -323,7 +329,7 @@ mlx5_sf_new_check_attr(struct mlx5_core_dev *dev, const struct devlink_port_new_
NL_SET_ERR_MSG_MOD(extack, "External controller is unsupported");
return -EOPNOTSUPP;
}
- if (new_attr->pfnum != PCI_FUNC(dev->pdev->devfn)) {
+ if (new_attr->pfnum != mlx5_get_dev_index(dev)) {
NL_SET_ERR_MSG_MOD(extack, "Invalid pfnum supplied");
return -EOPNOTSUPP;
}
@@ -442,6 +448,8 @@ static int mlx5_sf_vhca_event(struct notifier_block *nb, unsigned long opcode, v
update = mlx5_sf_state_update_check(sf, event->new_vhca_state);
if (update)
sf->hw_state = event->new_vhca_state;
+ trace_mlx5_sf_update_state(table->dev, sf->port_index, sf->controller,
+ sf->hw_fn_id, sf->hw_state);
sf_err:
mutex_unlock(&table->sf_state_lock);
mlx5_sf_table_put(table);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/diag/sf_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/diag/sf_tracepoint.h
new file mode 100644
index 000000000000..8bf1cd90930d
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/diag/sf_tracepoint.h
@@ -0,0 +1,173 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mlx5
+
+#if !defined(_MLX5_SF_TP_) || defined(TRACE_HEADER_MULTI_READ)
+#define _MLX5_SF_TP_
+
+#include <linux/tracepoint.h>
+#include <linux/mlx5/driver.h>
+#include "sf/vhca_event.h"
+
+TRACE_EVENT(mlx5_sf_add,
+ TP_PROTO(const struct mlx5_core_dev *dev,
+ unsigned int port_index,
+ u32 controller,
+ u16 hw_fn_id,
+ u32 sfnum),
+ TP_ARGS(dev, port_index, controller, hw_fn_id, sfnum),
+ TP_STRUCT__entry(__string(devname, dev_name(dev->device))
+ __field(unsigned int, port_index)
+ __field(u32, controller)
+ __field(u16, hw_fn_id)
+ __field(u32, sfnum)
+ ),
+ TP_fast_assign(__assign_str(devname, dev_name(dev->device));
+ __entry->port_index = port_index;
+ __entry->controller = controller;
+ __entry->hw_fn_id = hw_fn_id;
+ __entry->sfnum = sfnum;
+ ),
+ TP_printk("(%s) port_index=%u controller=%u hw_id=0x%x sfnum=%u\n",
+ __get_str(devname), __entry->port_index, __entry->controller,
+ __entry->hw_fn_id, __entry->sfnum)
+);
+
+TRACE_EVENT(mlx5_sf_free,
+ TP_PROTO(const struct mlx5_core_dev *dev,
+ unsigned int port_index,
+ u32 controller,
+ u16 hw_fn_id),
+ TP_ARGS(dev, port_index, controller, hw_fn_id),
+ TP_STRUCT__entry(__string(devname, dev_name(dev->device))
+ __field(unsigned int, port_index)
+ __field(u32, controller)
+ __field(u16, hw_fn_id)
+ ),
+ TP_fast_assign(__assign_str(devname, dev_name(dev->device));
+ __entry->port_index = port_index;
+ __entry->controller = controller;
+ __entry->hw_fn_id = hw_fn_id;
+ ),
+ TP_printk("(%s) port_index=%u controller=%u hw_id=0x%x\n",
+ __get_str(devname), __entry->port_index, __entry->controller,
+ __entry->hw_fn_id)
+);
+
+TRACE_EVENT(mlx5_sf_hwc_alloc,
+ TP_PROTO(const struct mlx5_core_dev *dev,
+ u32 controller,
+ u16 hw_fn_id,
+ u32 sfnum),
+ TP_ARGS(dev, controller, hw_fn_id, sfnum),
+ TP_STRUCT__entry(__string(devname, dev_name(dev->device))
+ __field(u32, controller)
+ __field(u16, hw_fn_id)
+ __field(u32, sfnum)
+ ),
+ TP_fast_assign(__assign_str(devname, dev_name(dev->device));
+ __entry->controller = controller;
+ __entry->hw_fn_id = hw_fn_id;
+ __entry->sfnum = sfnum;
+ ),
+ TP_printk("(%s) controller=%u hw_id=0x%x sfnum=%u\n",
+ __get_str(devname), __entry->controller, __entry->hw_fn_id,
+ __entry->sfnum)
+);
+
+TRACE_EVENT(mlx5_sf_hwc_free,
+ TP_PROTO(const struct mlx5_core_dev *dev,
+ u16 hw_fn_id),
+ TP_ARGS(dev, hw_fn_id),
+ TP_STRUCT__entry(__string(devname, dev_name(dev->device))
+ __field(u16, hw_fn_id)
+ ),
+ TP_fast_assign(__assign_str(devname, dev_name(dev->device));
+ __entry->hw_fn_id = hw_fn_id;
+ ),
+ TP_printk("(%s) hw_id=0x%x\n", __get_str(devname), __entry->hw_fn_id)
+);
+
+TRACE_EVENT(mlx5_sf_hwc_deferred_free,
+ TP_PROTO(const struct mlx5_core_dev *dev,
+ u16 hw_fn_id),
+ TP_ARGS(dev, hw_fn_id),
+ TP_STRUCT__entry(__string(devname, dev_name(dev->device))
+ __field(u16, hw_fn_id)
+ ),
+ TP_fast_assign(__assign_str(devname, dev_name(dev->device));
+ __entry->hw_fn_id = hw_fn_id;
+ ),
+ TP_printk("(%s) hw_id=0x%x\n", __get_str(devname), __entry->hw_fn_id)
+);
+
+DECLARE_EVENT_CLASS(mlx5_sf_state_template,
+ TP_PROTO(const struct mlx5_core_dev *dev,
+ u32 port_index,
+ u32 controller,
+ u16 hw_fn_id),
+ TP_ARGS(dev, port_index, controller, hw_fn_id),
+ TP_STRUCT__entry(__string(devname, dev_name(dev->device))
+ __field(unsigned int, port_index)
+ __field(u32, controller)
+ __field(u16, hw_fn_id)),
+ TP_fast_assign(__assign_str(devname, dev_name(dev->device));
+ __entry->port_index = port_index;
+ __entry->controller = controller;
+ __entry->hw_fn_id = hw_fn_id;
+ ),
+ TP_printk("(%s) port_index=%u controller=%u hw_id=0x%x\n",
+ __get_str(devname), __entry->port_index, __entry->controller,
+ __entry->hw_fn_id)
+);
+
+DEFINE_EVENT(mlx5_sf_state_template, mlx5_sf_activate,
+ TP_PROTO(const struct mlx5_core_dev *dev,
+ u32 port_index,
+ u32 controller,
+ u16 hw_fn_id),
+ TP_ARGS(dev, port_index, controller, hw_fn_id)
+ );
+
+DEFINE_EVENT(mlx5_sf_state_template, mlx5_sf_deactivate,
+ TP_PROTO(const struct mlx5_core_dev *dev,
+ u32 port_index,
+ u32 controller,
+ u16 hw_fn_id),
+ TP_ARGS(dev, port_index, controller, hw_fn_id)
+ );
+
+TRACE_EVENT(mlx5_sf_update_state,
+ TP_PROTO(const struct mlx5_core_dev *dev,
+ unsigned int port_index,
+ u32 controller,
+ u16 hw_fn_id,
+ u8 state),
+ TP_ARGS(dev, port_index, controller, hw_fn_id, state),
+ TP_STRUCT__entry(__string(devname, dev_name(dev->device))
+ __field(unsigned int, port_index)
+ __field(u32, controller)
+ __field(u16, hw_fn_id)
+ __field(u8, state)
+ ),
+ TP_fast_assign(__assign_str(devname, dev_name(dev->device));
+ __entry->port_index = port_index;
+ __entry->controller = controller;
+ __entry->hw_fn_id = hw_fn_id;
+ __entry->state = state;
+ ),
+ TP_printk("(%s) port_index=%u controller=%u hw_id=0x%x state=%u\n",
+ __get_str(devname), __entry->port_index, __entry->controller,
+ __entry->hw_fn_id, __entry->state)
+);
+
+#endif /* _MLX5_SF_TP_ */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH sf/diag
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE sf_tracepoint
+#include <trace/define_trace.h>
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/diag/vhca_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/diag/vhca_tracepoint.h
new file mode 100644
index 000000000000..fd814a190b8b
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/diag/vhca_tracepoint.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mlx5
+
+#if !defined(_MLX5_SF_VHCA_TP_) || defined(TRACE_HEADER_MULTI_READ)
+#define _MLX5_SF_VHCA_TP_
+
+#include <linux/tracepoint.h>
+#include <linux/mlx5/driver.h>
+#include "sf/vhca_event.h"
+
+TRACE_EVENT(mlx5_sf_vhca_event,
+ TP_PROTO(const struct mlx5_core_dev *dev,
+ const struct mlx5_vhca_state_event *event),
+ TP_ARGS(dev, event),
+ TP_STRUCT__entry(__string(devname, dev_name(dev->device))
+ __field(u16, hw_fn_id)
+ __field(u32, sfnum)
+ __field(u8, vhca_state)
+ ),
+ TP_fast_assign(__assign_str(devname, dev_name(dev->device));
+ __entry->hw_fn_id = event->function_id;
+ __entry->sfnum = event->sw_function_id;
+ __entry->vhca_state = event->new_vhca_state;
+ ),
+ TP_printk("(%s) hw_id=0x%x sfnum=%u vhca_state=%d\n",
+ __get_str(devname), __entry->hw_fn_id,
+ __entry->sfnum, __entry->vhca_state)
+);
+
+#endif /* _MLX5_SF_VHCA_TP_ */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH sf/diag
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE vhca_tracepoint
+#include <trace/define_trace.h>
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
index d9c69123c1ab..252d6017387d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/hw_table.c
@@ -8,6 +8,7 @@
#include "ecpf.h"
#include "mlx5_core.h"
#include "eswitch.h"
+#include "diag/sf_tracepoint.h"
struct mlx5_sf_hw {
u32 usr_sfnum;
@@ -142,6 +143,7 @@ int mlx5_sf_hw_table_sf_alloc(struct mlx5_core_dev *dev, u32 controller, u32 usr
goto vhca_err;
}
+ trace_mlx5_sf_hwc_alloc(dev, controller, hw_fn_id, usr_sfnum);
mutex_unlock(&table->table_lock);
return sw_id;
@@ -172,6 +174,7 @@ static void mlx5_sf_hw_table_hwc_sf_free(struct mlx5_core_dev *dev,
mlx5_cmd_dealloc_sf(dev, hwc->start_fn_id + idx);
hwc->sfs[idx].allocated = false;
hwc->sfs[idx].pending_delete = false;
+ trace_mlx5_sf_hwc_free(dev, hwc->start_fn_id + idx);
}
void mlx5_sf_hw_table_sf_deferred_free(struct mlx5_core_dev *dev, u32 controller, u16 id)
@@ -195,6 +198,7 @@ void mlx5_sf_hw_table_sf_deferred_free(struct mlx5_core_dev *dev, u32 controller
hwc->sfs[id].allocated = false;
} else {
hwc->sfs[id].pending_delete = true;
+ trace_mlx5_sf_hwc_deferred_free(dev, hw_fn_id);
}
err:
mutex_unlock(&table->table_lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c
index 28b14b05086f..d908fba968f0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/vhca_event.c
@@ -6,6 +6,8 @@
#include "mlx5_core.h"
#include "vhca_event.h"
#include "ecpf.h"
+#define CREATE_TRACE_POINTS
+#include "diag/vhca_tracepoint.h"
struct mlx5_vhca_state_notifier {
struct mlx5_core_dev *dev;
@@ -82,6 +84,7 @@ mlx5_vhca_event_notify(struct mlx5_core_dev *dev, struct mlx5_vhca_state_event *
vhca_state_context.vhca_state);
mlx5_vhca_event_arm(dev, event->function_id);
+ trace_mlx5_sf_vhca_event(dev, event);
blocking_notifier_call_chain(&dev->priv.vhca_state_notifier->n_head, 0, event);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
index a5b9f65db23c..07936841ce99 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
@@ -39,6 +39,7 @@ static const char * const action_type_to_str[] = {
[DR_ACTION_TYP_VPORT] = "DR_ACTION_TYP_VPORT",
[DR_ACTION_TYP_POP_VLAN] = "DR_ACTION_TYP_POP_VLAN",
[DR_ACTION_TYP_PUSH_VLAN] = "DR_ACTION_TYP_PUSH_VLAN",
+ [DR_ACTION_TYP_SAMPLER] = "DR_ACTION_TYP_SAMPLER",
[DR_ACTION_TYP_INSERT_HDR] = "DR_ACTION_TYP_INSERT_HDR",
[DR_ACTION_TYP_REMOVE_HDR] = "DR_ACTION_TYP_REMOVE_HDR",
[DR_ACTION_TYP_MAX] = "DR_ACTION_UNKNOWN",
@@ -513,9 +514,9 @@ static int dr_action_handle_cs_recalc(struct mlx5dr_domain *dmn,
/* If destination is vport we will get the FW flow table
* that recalculates the CS and forwards to the vport.
*/
- ret = mlx5dr_domain_cache_get_recalc_cs_ft_addr(dest_action->vport->dmn,
- dest_action->vport->caps->num,
- final_icm_addr);
+ ret = mlx5dr_domain_get_recalc_cs_ft_addr(dest_action->vport->dmn,
+ dest_action->vport->caps->num,
+ final_icm_addr);
if (ret) {
mlx5dr_err(dmn, "Failed to get FW cs recalc flow table\n");
return ret;
@@ -632,7 +633,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
return -EOPNOTSUPP;
case DR_ACTION_TYP_CTR:
attr.ctr_id = action->ctr->ctr_id +
- action->ctr->offeset;
+ action->ctr->offset;
break;
case DR_ACTION_TYP_TAG:
attr.flow_tag = action->flow_tag->flow_tag;
@@ -669,7 +670,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
attr.hit_gvmi = action->vport->caps->vhca_gvmi;
dest_action = action;
if (rx_rule) {
- if (action->vport->caps->num == WIRE_PORT) {
+ if (action->vport->caps->num == MLX5_VPORT_UPLINK) {
mlx5dr_dbg(dmn, "Device doesn't support Loopback on WIRE vport\n");
return -EOPNOTSUPP;
}
@@ -853,6 +854,7 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
struct mlx5dr_action *action;
bool reformat_req = false;
u32 num_of_ref = 0;
+ u32 ref_act_cnt;
int ret;
int i;
@@ -861,11 +863,14 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn,
return NULL;
}
- hw_dests = kzalloc(sizeof(*hw_dests) * num_of_dests, GFP_KERNEL);
+ hw_dests = kcalloc(num_of_dests, sizeof(*hw_dests), GFP_KERNEL);
if (!hw_dests)
return NULL;
- ref_actions = kzalloc(sizeof(*ref_actions) * num_of_dests * 2, GFP_KERNEL);
+ if (unlikely(check_mul_overflow(num_of_dests, 2u, &ref_act_cnt)))
+ goto free_hw_dests;
+
+ ref_actions = kcalloc(ref_act_cnt, sizeof(*ref_actions), GFP_KERNEL);
if (!ref_actions)
goto free_hw_dests;
@@ -1747,7 +1752,7 @@ dec_ref:
struct mlx5dr_action *
mlx5dr_action_create_dest_vport(struct mlx5dr_domain *dmn,
- u32 vport, u8 vhca_id_valid,
+ u16 vport, u8 vhca_id_valid,
u16 vhca_id)
{
struct mlx5dr_cmd_vport_cap *vport_cap;
@@ -1767,9 +1772,11 @@ mlx5dr_action_create_dest_vport(struct mlx5dr_domain *dmn,
return NULL;
}
- vport_cap = mlx5dr_get_vport_cap(&vport_dmn->info.caps, vport);
+ vport_cap = mlx5dr_domain_get_vport_cap(vport_dmn, vport);
if (!vport_cap) {
- mlx5dr_dbg(dmn, "Failed to get vport %d caps\n", vport);
+ mlx5dr_err(dmn,
+ "Failed to get vport 0x%x caps - vport is disabled or invalid\n",
+ vport);
return NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
index 56307283bf9b..1d8febed0d76 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
@@ -195,6 +195,8 @@ int mlx5dr_cmd_query_device(struct mlx5_core_dev *mdev,
caps->roce_min_src_udp = MLX5_CAP_ROCE(mdev, r_roce_min_src_udp_port);
+ caps->is_ecpf = mlx5_core_is_ecpf_esw_manager(mdev);
+
return 0;
}
@@ -272,7 +274,7 @@ int mlx5dr_cmd_set_fte_modify_and_vport(struct mlx5_core_dev *mdev,
u32 table_id,
u32 group_id,
u32 modify_header_id,
- u32 vport_id)
+ u16 vport)
{
u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {};
void *in_flow_context;
@@ -303,7 +305,7 @@ int mlx5dr_cmd_set_fte_modify_and_vport(struct mlx5_core_dev *mdev,
in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
MLX5_SET(dest_format_struct, in_dests, destination_type,
MLX5_FLOW_DESTINATION_TYPE_VPORT);
- MLX5_SET(dest_format_struct, in_dests, destination_id, vport_id);
+ MLX5_SET(dest_format_struct, in_dests, destination_id, vport);
err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out));
kvfree(in);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
index 0fe159809ba1..49089cbe897c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
@@ -9,48 +9,45 @@
((dmn)->info.caps.dmn_type##_sw_owner_v2 && \
(dmn)->info.caps.sw_format_ver <= MLX5_STEERING_FORMAT_CONNECTX_6DX))
-static int dr_domain_init_cache(struct mlx5dr_domain *dmn)
+static void dr_domain_init_csum_recalc_fts(struct mlx5dr_domain *dmn)
{
/* Per vport cached FW FT for checksum recalculation, this
- * recalculation is needed due to a HW bug.
+ * recalculation is needed due to a HW bug in STEv0.
*/
- dmn->cache.recalc_cs_ft = kcalloc(dmn->info.caps.num_vports,
- sizeof(dmn->cache.recalc_cs_ft[0]),
- GFP_KERNEL);
- if (!dmn->cache.recalc_cs_ft)
- return -ENOMEM;
-
- return 0;
+ xa_init(&dmn->csum_fts_xa);
}
-static void dr_domain_uninit_cache(struct mlx5dr_domain *dmn)
+static void dr_domain_uninit_csum_recalc_fts(struct mlx5dr_domain *dmn)
{
- int i;
-
- for (i = 0; i < dmn->info.caps.num_vports; i++) {
- if (!dmn->cache.recalc_cs_ft[i])
- continue;
+ struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft;
+ unsigned long i;
- mlx5dr_fw_destroy_recalc_cs_ft(dmn, dmn->cache.recalc_cs_ft[i]);
+ xa_for_each(&dmn->csum_fts_xa, i, recalc_cs_ft) {
+ if (recalc_cs_ft)
+ mlx5dr_fw_destroy_recalc_cs_ft(dmn, recalc_cs_ft);
}
- kfree(dmn->cache.recalc_cs_ft);
+ xa_destroy(&dmn->csum_fts_xa);
}
-int mlx5dr_domain_cache_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn,
- u32 vport_num,
- u64 *rx_icm_addr)
+int mlx5dr_domain_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn,
+ u16 vport_num,
+ u64 *rx_icm_addr)
{
struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft;
+ int ret;
- recalc_cs_ft = dmn->cache.recalc_cs_ft[vport_num];
+ recalc_cs_ft = xa_load(&dmn->csum_fts_xa, vport_num);
if (!recalc_cs_ft) {
- /* Table not in cache, need to allocate a new one */
+ /* Table hasn't been created yet */
recalc_cs_ft = mlx5dr_fw_create_recalc_cs_ft(dmn, vport_num);
if (!recalc_cs_ft)
return -EINVAL;
- dmn->cache.recalc_cs_ft[vport_num] = recalc_cs_ft;
+ ret = xa_err(xa_store(&dmn->csum_fts_xa, vport_num,
+ recalc_cs_ft, GFP_KERNEL));
+ if (ret)
+ return ret;
}
*rx_icm_addr = recalc_cs_ft->rx_icm_addr;
@@ -124,18 +121,39 @@ static void dr_domain_uninit_resources(struct mlx5dr_domain *dmn)
mlx5_core_dealloc_pd(dmn->mdev, dmn->pdn);
}
+static void dr_domain_fill_uplink_caps(struct mlx5dr_domain *dmn,
+ struct mlx5dr_cmd_vport_cap *uplink_vport)
+{
+ struct mlx5dr_esw_caps *esw_caps = &dmn->info.caps.esw_caps;
+
+ uplink_vport->num = MLX5_VPORT_UPLINK;
+ uplink_vport->icm_address_rx = esw_caps->uplink_icm_address_rx;
+ uplink_vport->icm_address_tx = esw_caps->uplink_icm_address_tx;
+ uplink_vport->vport_gvmi = 0;
+ uplink_vport->vhca_gvmi = dmn->info.caps.gvmi;
+}
+
static int dr_domain_query_vport(struct mlx5dr_domain *dmn,
- bool other_vport,
- u16 vport_number)
+ u16 vport_number,
+ struct mlx5dr_cmd_vport_cap *vport_caps)
{
- struct mlx5dr_cmd_vport_cap *vport_caps;
+ u16 cmd_vport = vport_number;
+ bool other_vport = true;
int ret;
- vport_caps = &dmn->info.caps.vports_caps[vport_number];
+ if (vport_number == MLX5_VPORT_UPLINK) {
+ dr_domain_fill_uplink_caps(dmn, vport_caps);
+ return 0;
+ }
+
+ if (dmn->info.caps.is_ecpf && vport_number == MLX5_VPORT_ECPF) {
+ other_vport = false;
+ cmd_vport = 0;
+ }
ret = mlx5dr_cmd_query_esw_vport_context(dmn->mdev,
other_vport,
- vport_number,
+ cmd_vport,
&vport_caps->icm_address_rx,
&vport_caps->icm_address_tx);
if (ret)
@@ -143,7 +161,7 @@ static int dr_domain_query_vport(struct mlx5dr_domain *dmn,
ret = mlx5dr_cmd_query_gvmi(dmn->mdev,
other_vport,
- vport_number,
+ cmd_vport,
&vport_caps->vport_gvmi);
if (ret)
return ret;
@@ -154,27 +172,82 @@ static int dr_domain_query_vport(struct mlx5dr_domain *dmn,
return 0;
}
-static int dr_domain_query_vports(struct mlx5dr_domain *dmn)
+static int dr_domain_query_esw_mngr(struct mlx5dr_domain *dmn)
{
- struct mlx5dr_esw_caps *esw_caps = &dmn->info.caps.esw_caps;
- struct mlx5dr_cmd_vport_cap *wire_vport;
- int vport;
+ return dr_domain_query_vport(dmn,
+ dmn->info.caps.is_ecpf ? MLX5_VPORT_ECPF : 0,
+ &dmn->info.caps.vports.esw_manager_caps);
+}
+
+static struct mlx5dr_cmd_vport_cap *
+dr_domain_add_vport_cap(struct mlx5dr_domain *dmn, u16 vport)
+{
+ struct mlx5dr_cmd_caps *caps = &dmn->info.caps;
+ struct mlx5dr_cmd_vport_cap *vport_caps;
int ret;
- /* Query vports (except wire vport) */
- for (vport = 0; vport < dmn->info.caps.num_esw_ports - 1; vport++) {
- ret = dr_domain_query_vport(dmn, !!vport, vport);
- if (ret)
- return ret;
+ vport_caps = kvzalloc(sizeof(*vport_caps), GFP_KERNEL);
+ if (!vport_caps)
+ return NULL;
+
+ ret = dr_domain_query_vport(dmn, vport, vport_caps);
+ if (ret) {
+ kvfree(vport_caps);
+ return NULL;
}
- /* Last vport is the wire port */
- wire_vport = &dmn->info.caps.vports_caps[vport];
- wire_vport->num = WIRE_PORT;
- wire_vport->icm_address_rx = esw_caps->uplink_icm_address_rx;
- wire_vport->icm_address_tx = esw_caps->uplink_icm_address_tx;
- wire_vport->vport_gvmi = 0;
- wire_vport->vhca_gvmi = dmn->info.caps.gvmi;
+ ret = xa_insert(&caps->vports.vports_caps_xa, vport,
+ vport_caps, GFP_KERNEL);
+ if (ret) {
+ mlx5dr_dbg(dmn, "Couldn't insert new vport into xarray (%d)\n", ret);
+ kvfree(vport_caps);
+ return ERR_PTR(ret);
+ }
+
+ return vport_caps;
+}
+
+struct mlx5dr_cmd_vport_cap *
+mlx5dr_domain_get_vport_cap(struct mlx5dr_domain *dmn, u16 vport)
+{
+ struct mlx5dr_cmd_caps *caps = &dmn->info.caps;
+ struct mlx5dr_cmd_vport_cap *vport_caps;
+
+ if ((caps->is_ecpf && vport == MLX5_VPORT_ECPF) ||
+ (!caps->is_ecpf && vport == 0))
+ return &caps->vports.esw_manager_caps;
+
+vport_load:
+ vport_caps = xa_load(&caps->vports.vports_caps_xa, vport);
+ if (vport_caps)
+ return vport_caps;
+
+ vport_caps = dr_domain_add_vport_cap(dmn, vport);
+ if (PTR_ERR(vport_caps) == -EBUSY)
+ /* caps were already stored by another thread */
+ goto vport_load;
+
+ return vport_caps;
+}
+
+static void dr_domain_clear_vports(struct mlx5dr_domain *dmn)
+{
+ struct mlx5dr_cmd_vport_cap *vport_caps;
+ unsigned long i;
+
+ xa_for_each(&dmn->info.caps.vports.vports_caps_xa, i, vport_caps) {
+ vport_caps = xa_erase(&dmn->info.caps.vports.vports_caps_xa, i);
+ kvfree(vport_caps);
+ }
+}
+
+static int dr_domain_query_uplink(struct mlx5dr_domain *dmn)
+{
+ struct mlx5dr_cmd_vport_cap *vport_caps;
+
+ vport_caps = mlx5dr_domain_get_vport_cap(dmn, MLX5_VPORT_UPLINK);
+ if (!vport_caps)
+ return -EINVAL;
return 0;
}
@@ -196,25 +269,29 @@ static int dr_domain_query_fdb_caps(struct mlx5_core_dev *mdev,
dmn->info.caps.esw_rx_drop_address = dmn->info.caps.esw_caps.drop_icm_address_rx;
dmn->info.caps.esw_tx_drop_address = dmn->info.caps.esw_caps.drop_icm_address_tx;
- dmn->info.caps.vports_caps = kcalloc(dmn->info.caps.num_esw_ports,
- sizeof(dmn->info.caps.vports_caps[0]),
- GFP_KERNEL);
- if (!dmn->info.caps.vports_caps)
- return -ENOMEM;
+ xa_init(&dmn->info.caps.vports.vports_caps_xa);
- ret = dr_domain_query_vports(dmn);
+ /* Query eswitch manager and uplink vports only. Rest of the
+ * vports (vport 0, VFs and SFs) will be queried dynamically.
+ */
+
+ ret = dr_domain_query_esw_mngr(dmn);
if (ret) {
- mlx5dr_err(dmn, "Failed to query vports caps (err: %d)", ret);
- goto free_vports_caps;
+ mlx5dr_err(dmn, "Failed to query eswitch manager vport caps (err: %d)", ret);
+ goto free_vports_caps_xa;
}
- dmn->info.caps.num_vports = dmn->info.caps.num_esw_ports - 1;
+ ret = dr_domain_query_uplink(dmn);
+ if (ret) {
+ mlx5dr_err(dmn, "Failed to query uplink vport caps (err: %d)", ret);
+ goto free_vports_caps_xa;
+ }
return 0;
-free_vports_caps:
- kfree(dmn->info.caps.vports_caps);
- dmn->info.caps.vports_caps = NULL;
+free_vports_caps_xa:
+ xa_destroy(&dmn->info.caps.vports.vports_caps_xa);
+
return ret;
}
@@ -229,8 +306,6 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
return -EOPNOTSUPP;
}
- dmn->info.caps.num_esw_ports = mlx5_eswitch_get_total_vports(mdev);
-
ret = mlx5dr_cmd_query_device(mdev, &dmn->info.caps);
if (ret)
return ret;
@@ -267,11 +342,7 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
dmn->info.rx.type = DR_DOMAIN_NIC_TYPE_RX;
dmn->info.tx.type = DR_DOMAIN_NIC_TYPE_TX;
- vport_cap = mlx5dr_get_vport_cap(&dmn->info.caps, 0);
- if (!vport_cap) {
- mlx5dr_err(dmn, "Failed to get esw manager vport\n");
- return -ENOENT;
- }
+ vport_cap = &dmn->info.caps.vports.esw_manager_caps;
dmn->info.supp_sw_steering = true;
dmn->info.tx.default_icm_addr = vport_cap->icm_address_tx;
@@ -290,7 +361,8 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev,
static void dr_domain_caps_uninit(struct mlx5dr_domain *dmn)
{
- kfree(dmn->info.caps.vports_caps);
+ dr_domain_clear_vports(dmn);
+ xa_destroy(&dmn->info.caps.vports.vports_caps_xa);
}
struct mlx5dr_domain *
@@ -333,16 +405,10 @@ mlx5dr_domain_create(struct mlx5_core_dev *mdev, enum mlx5dr_domain_type type)
goto uninit_caps;
}
- ret = dr_domain_init_cache(dmn);
- if (ret) {
- mlx5dr_err(dmn, "Failed initialize domain cache\n");
- goto uninit_resourses;
- }
+ dr_domain_init_csum_recalc_fts(dmn);
return dmn;
-uninit_resourses:
- dr_domain_uninit_resources(dmn);
uninit_caps:
dr_domain_caps_uninit(dmn);
free_domain:
@@ -381,7 +447,7 @@ int mlx5dr_domain_destroy(struct mlx5dr_domain *dmn)
/* make sure resources are not used by the hardware */
mlx5dr_cmd_sync_steering(dmn->mdev);
- dr_domain_uninit_cache(dmn);
+ dr_domain_uninit_csum_recalc_fts(dmn);
dr_domain_uninit_resources(dmn);
dr_domain_caps_uninit(dmn);
mutex_destroy(&dmn->info.tx.mutex);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c
index 0d6f86eb248b..68a4c32d5f34 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c
@@ -5,7 +5,7 @@
#include "dr_types.h"
struct mlx5dr_fw_recalc_cs_ft *
-mlx5dr_fw_create_recalc_cs_ft(struct mlx5dr_domain *dmn, u32 vport_num)
+mlx5dr_fw_create_recalc_cs_ft(struct mlx5dr_domain *dmn, u16 vport_num)
{
struct mlx5dr_cmd_create_flow_table_attr ft_attr = {};
struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
index 66c24767e3b0..7f6fd9c5e371 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_icm_pool.c
@@ -24,7 +24,7 @@ struct mlx5dr_icm_dm {
};
struct mlx5dr_icm_mr {
- struct mlx5_core_mkey mkey;
+ u32 mkey;
struct mlx5dr_icm_dm dm;
struct mlx5dr_domain *dmn;
size_t length;
@@ -33,7 +33,7 @@ struct mlx5dr_icm_mr {
static int dr_icm_create_dm_mkey(struct mlx5_core_dev *mdev,
u32 pd, u64 length, u64 start_addr, int mode,
- struct mlx5_core_mkey *mkey)
+ u32 *mkey)
{
u32 inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
u32 in[MLX5_ST_SZ_DW(create_mkey_in)] = {};
@@ -116,7 +116,7 @@ dr_icm_pool_mr_create(struct mlx5dr_icm_pool *pool)
return icm_mr;
free_mkey:
- mlx5_core_destroy_mkey(mdev, &icm_mr->mkey);
+ mlx5_core_destroy_mkey(mdev, icm_mr->mkey);
free_dm:
mlx5_dm_sw_icm_dealloc(mdev, icm_mr->dm.type, icm_mr->dm.length, 0,
icm_mr->dm.addr, icm_mr->dm.obj_id);
@@ -130,7 +130,7 @@ static void dr_icm_pool_mr_destroy(struct mlx5dr_icm_mr *icm_mr)
struct mlx5_core_dev *mdev = icm_mr->dmn->mdev;
struct mlx5dr_icm_dm *dm = &icm_mr->dm;
- mlx5_core_destroy_mkey(mdev, &icm_mr->mkey);
+ mlx5_core_destroy_mkey(mdev, icm_mr->mkey);
mlx5_dm_sw_icm_dealloc(mdev, dm->type, dm->length, 0,
dm->addr, dm->obj_id);
kvfree(icm_mr);
@@ -252,7 +252,7 @@ dr_icm_chunk_create(struct mlx5dr_icm_pool *pool,
offset = mlx5dr_icm_pool_dm_type_to_entry_size(pool->icm_type) * seg;
- chunk->rkey = buddy_mem_pool->icm_mr->mkey.key;
+ chunk->rkey = buddy_mem_pool->icm_mr->mkey;
chunk->mr_addr = offset;
chunk->icm_addr =
(uintptr_t)buddy_mem_pool->icm_mr->icm_start_addr + offset;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
index b5409cc021d3..75c775bee351 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c
@@ -875,9 +875,10 @@ uninit_nic_rx:
static int dr_matcher_init(struct mlx5dr_matcher *matcher,
struct mlx5dr_match_parameters *mask)
{
+ struct mlx5dr_match_parameters consumed_mask;
struct mlx5dr_table *tbl = matcher->tbl;
struct mlx5dr_domain *dmn = tbl->dmn;
- int ret;
+ int i, ret;
if (matcher->match_criteria >= DR_MATCHER_CRITERIA_MAX) {
mlx5dr_err(dmn, "Invalid match criteria attribute\n");
@@ -889,8 +890,16 @@ static int dr_matcher_init(struct mlx5dr_matcher *matcher,
mlx5dr_err(dmn, "Invalid match size attribute\n");
return -EINVAL;
}
+
+ consumed_mask.match_buf = kzalloc(mask->match_sz, GFP_KERNEL);
+ if (!consumed_mask.match_buf)
+ return -ENOMEM;
+
+ consumed_mask.match_sz = mask->match_sz;
+ memcpy(consumed_mask.match_buf, mask->match_buf, mask->match_sz);
mlx5dr_ste_copy_param(matcher->match_criteria,
- &matcher->mask, mask);
+ &matcher->mask, &consumed_mask,
+ true);
}
switch (dmn->type) {
@@ -909,9 +918,22 @@ static int dr_matcher_init(struct mlx5dr_matcher *matcher,
break;
default:
WARN_ON(true);
- return -EINVAL;
+ ret = -EINVAL;
+ goto free_consumed_mask;
+ }
+
+ /* Check that all mask data was consumed */
+ for (i = 0; i < consumed_mask.match_sz; i++) {
+ if (consumed_mask.match_buf[i]) {
+ mlx5dr_dbg(dmn, "Match param mask contains unsupported parameters\n");
+ ret = -EOPNOTSUPP;
+ goto free_consumed_mask;
+ }
}
+ ret = 0;
+free_consumed_mask:
+ kfree(consumed_mask.match_buf);
return ret;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
index aca80efc28fa..6a390e981b09 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
@@ -917,7 +917,7 @@ static bool dr_rule_verify(struct mlx5dr_matcher *matcher,
return false;
}
- mlx5dr_ste_copy_param(matcher->match_criteria, param, value);
+ mlx5dr_ste_copy_param(matcher->match_criteria, param, value, false);
if (match_criteria & DR_MATCHER_CRITERIA_OUTER) {
s_idx = offsetof(struct mlx5dr_match_param, outer);
@@ -1042,10 +1042,10 @@ static bool dr_rule_skip(enum mlx5dr_domain_type domain,
return false;
if (mask->misc.source_port) {
- if (rx && value->misc.source_port != WIRE_PORT)
+ if (rx && value->misc.source_port != MLX5_VPORT_UPLINK)
return true;
- if (!rx && value->misc.source_port == WIRE_PORT)
+ if (!rx && value->misc.source_port == MLX5_VPORT_UPLINK)
return true;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
index bfb14b4b1906..00aef47d7682 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
@@ -350,7 +350,7 @@ static void dr_fill_data_segs(struct mlx5dr_send_ring *send_ring,
send_info->read.length = send_info->write.length;
/* Read into the same write area */
send_info->read.addr = (uintptr_t)send_info->write.addr;
- send_info->read.lkey = send_ring->mr->mkey.key;
+ send_info->read.lkey = send_ring->mr->mkey;
if (send_ring->pending_wqe % send_ring->signal_th == 0)
send_info->read.send_flags = IB_SEND_SIGNALED;
@@ -388,7 +388,7 @@ static int dr_postsend_icm_data(struct mlx5dr_domain *dmn,
(void *)(uintptr_t)send_info->write.addr,
send_info->write.length);
send_info->write.addr = (uintptr_t)send_ring->mr->dma_addr + buff_offset;
- send_info->write.lkey = send_ring->mr->mkey.key;
+ send_info->write.lkey = send_ring->mr->mkey;
}
send_ring->tx_head++;
@@ -848,8 +848,7 @@ static void dr_destroy_cq(struct mlx5_core_dev *mdev, struct mlx5dr_cq *cq)
kfree(cq);
}
-static int
-dr_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, struct mlx5_core_mkey *mkey)
+static int dr_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, u32 *mkey)
{
u32 in[MLX5_ST_SZ_DW(create_mkey_in)] = {};
void *mkc;
@@ -908,7 +907,7 @@ static struct mlx5dr_mr *dr_reg_mr(struct mlx5_core_dev *mdev,
static void dr_dereg_mr(struct mlx5_core_dev *mdev, struct mlx5dr_mr *mr)
{
- mlx5_core_destroy_mkey(mdev, &mr->mkey);
+ mlx5_core_destroy_mkey(mdev, mr->mkey);
dma_unmap_single(mlx5_core_dma_dev(mdev), mr->dma_addr, mr->size,
DMA_BIDIRECTIONAL);
kfree(mr);
@@ -1039,7 +1038,7 @@ int mlx5dr_send_ring_force_drain(struct mlx5dr_domain *dmn)
send_info.write.lkey = 0;
/* Using the sync_mr in order to write/read */
send_info.remote_addr = (uintptr_t)send_ring->sync_mr->addr;
- send_info.rkey = send_ring->sync_mr->mkey.key;
+ send_info.rkey = send_ring->sync_mr->mkey;
for (i = 0; i < num_of_sends_req; i++) {
ret = dr_postsend_icm_data(dmn, &send_info);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
index 1cdfe4fccc7a..219a5474a8a4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
@@ -668,101 +668,116 @@ int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher,
return 0;
}
-static void dr_ste_copy_mask_misc(char *mask, struct mlx5dr_match_misc *spec)
-{
- spec->gre_c_present = MLX5_GET(fte_match_set_misc, mask, gre_c_present);
- spec->gre_k_present = MLX5_GET(fte_match_set_misc, mask, gre_k_present);
- spec->gre_s_present = MLX5_GET(fte_match_set_misc, mask, gre_s_present);
- spec->source_vhca_port = MLX5_GET(fte_match_set_misc, mask, source_vhca_port);
- spec->source_sqn = MLX5_GET(fte_match_set_misc, mask, source_sqn);
-
- spec->source_port = MLX5_GET(fte_match_set_misc, mask, source_port);
- spec->source_eswitch_owner_vhca_id = MLX5_GET(fte_match_set_misc, mask,
- source_eswitch_owner_vhca_id);
-
- spec->outer_second_prio = MLX5_GET(fte_match_set_misc, mask, outer_second_prio);
- spec->outer_second_cfi = MLX5_GET(fte_match_set_misc, mask, outer_second_cfi);
- spec->outer_second_vid = MLX5_GET(fte_match_set_misc, mask, outer_second_vid);
- spec->inner_second_prio = MLX5_GET(fte_match_set_misc, mask, inner_second_prio);
- spec->inner_second_cfi = MLX5_GET(fte_match_set_misc, mask, inner_second_cfi);
- spec->inner_second_vid = MLX5_GET(fte_match_set_misc, mask, inner_second_vid);
+#define IFC_GET_CLR(typ, p, fld, clear) ({ \
+ void *__p = (p); \
+ u32 __t = MLX5_GET(typ, __p, fld); \
+ if (clear) \
+ MLX5_SET(typ, __p, fld, 0); \
+ __t; \
+})
+
+#define memcpy_and_clear(to, from, len, clear) ({ \
+ void *__to = (to), *__from = (from); \
+ size_t __len = (len); \
+ memcpy(__to, __from, __len); \
+ if (clear) \
+ memset(__from, 0, __len); \
+})
+
+static void dr_ste_copy_mask_misc(char *mask, struct mlx5dr_match_misc *spec, bool clr)
+{
+ spec->gre_c_present = IFC_GET_CLR(fte_match_set_misc, mask, gre_c_present, clr);
+ spec->gre_k_present = IFC_GET_CLR(fte_match_set_misc, mask, gre_k_present, clr);
+ spec->gre_s_present = IFC_GET_CLR(fte_match_set_misc, mask, gre_s_present, clr);
+ spec->source_vhca_port = IFC_GET_CLR(fte_match_set_misc, mask, source_vhca_port, clr);
+ spec->source_sqn = IFC_GET_CLR(fte_match_set_misc, mask, source_sqn, clr);
+
+ spec->source_port = IFC_GET_CLR(fte_match_set_misc, mask, source_port, clr);
+ spec->source_eswitch_owner_vhca_id =
+ IFC_GET_CLR(fte_match_set_misc, mask, source_eswitch_owner_vhca_id, clr);
+
+ spec->outer_second_prio = IFC_GET_CLR(fte_match_set_misc, mask, outer_second_prio, clr);
+ spec->outer_second_cfi = IFC_GET_CLR(fte_match_set_misc, mask, outer_second_cfi, clr);
+ spec->outer_second_vid = IFC_GET_CLR(fte_match_set_misc, mask, outer_second_vid, clr);
+ spec->inner_second_prio = IFC_GET_CLR(fte_match_set_misc, mask, inner_second_prio, clr);
+ spec->inner_second_cfi = IFC_GET_CLR(fte_match_set_misc, mask, inner_second_cfi, clr);
+ spec->inner_second_vid = IFC_GET_CLR(fte_match_set_misc, mask, inner_second_vid, clr);
spec->outer_second_cvlan_tag =
- MLX5_GET(fte_match_set_misc, mask, outer_second_cvlan_tag);
+ IFC_GET_CLR(fte_match_set_misc, mask, outer_second_cvlan_tag, clr);
spec->inner_second_cvlan_tag =
- MLX5_GET(fte_match_set_misc, mask, inner_second_cvlan_tag);
+ IFC_GET_CLR(fte_match_set_misc, mask, inner_second_cvlan_tag, clr);
spec->outer_second_svlan_tag =
- MLX5_GET(fte_match_set_misc, mask, outer_second_svlan_tag);
+ IFC_GET_CLR(fte_match_set_misc, mask, outer_second_svlan_tag, clr);
spec->inner_second_svlan_tag =
- MLX5_GET(fte_match_set_misc, mask, inner_second_svlan_tag);
-
- spec->gre_protocol = MLX5_GET(fte_match_set_misc, mask, gre_protocol);
+ IFC_GET_CLR(fte_match_set_misc, mask, inner_second_svlan_tag, clr);
+ spec->gre_protocol = IFC_GET_CLR(fte_match_set_misc, mask, gre_protocol, clr);
- spec->gre_key_h = MLX5_GET(fte_match_set_misc, mask, gre_key.nvgre.hi);
- spec->gre_key_l = MLX5_GET(fte_match_set_misc, mask, gre_key.nvgre.lo);
+ spec->gre_key_h = IFC_GET_CLR(fte_match_set_misc, mask, gre_key.nvgre.hi, clr);
+ spec->gre_key_l = IFC_GET_CLR(fte_match_set_misc, mask, gre_key.nvgre.lo, clr);
- spec->vxlan_vni = MLX5_GET(fte_match_set_misc, mask, vxlan_vni);
+ spec->vxlan_vni = IFC_GET_CLR(fte_match_set_misc, mask, vxlan_vni, clr);
- spec->geneve_vni = MLX5_GET(fte_match_set_misc, mask, geneve_vni);
- spec->geneve_oam = MLX5_GET(fte_match_set_misc, mask, geneve_oam);
+ spec->geneve_vni = IFC_GET_CLR(fte_match_set_misc, mask, geneve_vni, clr);
+ spec->geneve_oam = IFC_GET_CLR(fte_match_set_misc, mask, geneve_oam, clr);
spec->outer_ipv6_flow_label =
- MLX5_GET(fte_match_set_misc, mask, outer_ipv6_flow_label);
+ IFC_GET_CLR(fte_match_set_misc, mask, outer_ipv6_flow_label, clr);
spec->inner_ipv6_flow_label =
- MLX5_GET(fte_match_set_misc, mask, inner_ipv6_flow_label);
+ IFC_GET_CLR(fte_match_set_misc, mask, inner_ipv6_flow_label, clr);
- spec->geneve_opt_len = MLX5_GET(fte_match_set_misc, mask, geneve_opt_len);
+ spec->geneve_opt_len = IFC_GET_CLR(fte_match_set_misc, mask, geneve_opt_len, clr);
spec->geneve_protocol_type =
- MLX5_GET(fte_match_set_misc, mask, geneve_protocol_type);
+ IFC_GET_CLR(fte_match_set_misc, mask, geneve_protocol_type, clr);
- spec->bth_dst_qp = MLX5_GET(fte_match_set_misc, mask, bth_dst_qp);
+ spec->bth_dst_qp = IFC_GET_CLR(fte_match_set_misc, mask, bth_dst_qp, clr);
}
-static void dr_ste_copy_mask_spec(char *mask, struct mlx5dr_match_spec *spec)
+static void dr_ste_copy_mask_spec(char *mask, struct mlx5dr_match_spec *spec, bool clr)
{
__be32 raw_ip[4];
- spec->smac_47_16 = MLX5_GET(fte_match_set_lyr_2_4, mask, smac_47_16);
+ spec->smac_47_16 = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, smac_47_16, clr);
- spec->smac_15_0 = MLX5_GET(fte_match_set_lyr_2_4, mask, smac_15_0);
- spec->ethertype = MLX5_GET(fte_match_set_lyr_2_4, mask, ethertype);
+ spec->smac_15_0 = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, smac_15_0, clr);
+ spec->ethertype = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ethertype, clr);
- spec->dmac_47_16 = MLX5_GET(fte_match_set_lyr_2_4, mask, dmac_47_16);
+ spec->dmac_47_16 = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, dmac_47_16, clr);
- spec->dmac_15_0 = MLX5_GET(fte_match_set_lyr_2_4, mask, dmac_15_0);
- spec->first_prio = MLX5_GET(fte_match_set_lyr_2_4, mask, first_prio);
- spec->first_cfi = MLX5_GET(fte_match_set_lyr_2_4, mask, first_cfi);
- spec->first_vid = MLX5_GET(fte_match_set_lyr_2_4, mask, first_vid);
+ spec->dmac_15_0 = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, dmac_15_0, clr);
+ spec->first_prio = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, first_prio, clr);
+ spec->first_cfi = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, first_cfi, clr);
+ spec->first_vid = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, first_vid, clr);
- spec->ip_protocol = MLX5_GET(fte_match_set_lyr_2_4, mask, ip_protocol);
- spec->ip_dscp = MLX5_GET(fte_match_set_lyr_2_4, mask, ip_dscp);
- spec->ip_ecn = MLX5_GET(fte_match_set_lyr_2_4, mask, ip_ecn);
- spec->cvlan_tag = MLX5_GET(fte_match_set_lyr_2_4, mask, cvlan_tag);
- spec->svlan_tag = MLX5_GET(fte_match_set_lyr_2_4, mask, svlan_tag);
- spec->frag = MLX5_GET(fte_match_set_lyr_2_4, mask, frag);
- spec->ip_version = MLX5_GET(fte_match_set_lyr_2_4, mask, ip_version);
- spec->tcp_flags = MLX5_GET(fte_match_set_lyr_2_4, mask, tcp_flags);
- spec->tcp_sport = MLX5_GET(fte_match_set_lyr_2_4, mask, tcp_sport);
- spec->tcp_dport = MLX5_GET(fte_match_set_lyr_2_4, mask, tcp_dport);
+ spec->ip_protocol = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ip_protocol, clr);
+ spec->ip_dscp = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ip_dscp, clr);
+ spec->ip_ecn = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ip_ecn, clr);
+ spec->cvlan_tag = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, cvlan_tag, clr);
+ spec->svlan_tag = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, svlan_tag, clr);
+ spec->frag = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, frag, clr);
+ spec->ip_version = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ip_version, clr);
+ spec->tcp_flags = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, tcp_flags, clr);
+ spec->tcp_sport = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, tcp_sport, clr);
+ spec->tcp_dport = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, tcp_dport, clr);
- spec->ttl_hoplimit = MLX5_GET(fte_match_set_lyr_2_4, mask, ttl_hoplimit);
+ spec->ttl_hoplimit = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, ttl_hoplimit, clr);
- spec->udp_sport = MLX5_GET(fte_match_set_lyr_2_4, mask, udp_sport);
- spec->udp_dport = MLX5_GET(fte_match_set_lyr_2_4, mask, udp_dport);
+ spec->udp_sport = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, udp_sport, clr);
+ spec->udp_dport = IFC_GET_CLR(fte_match_set_lyr_2_4, mask, udp_dport, clr);
- memcpy(raw_ip, MLX5_ADDR_OF(fte_match_set_lyr_2_4, mask,
- src_ipv4_src_ipv6.ipv6_layout.ipv6),
- sizeof(raw_ip));
+ memcpy_and_clear(raw_ip, MLX5_ADDR_OF(fte_match_set_lyr_2_4, mask,
+ src_ipv4_src_ipv6.ipv6_layout.ipv6),
+ sizeof(raw_ip), clr);
spec->src_ip_127_96 = be32_to_cpu(raw_ip[0]);
spec->src_ip_95_64 = be32_to_cpu(raw_ip[1]);
spec->src_ip_63_32 = be32_to_cpu(raw_ip[2]);
spec->src_ip_31_0 = be32_to_cpu(raw_ip[3]);
- memcpy(raw_ip, MLX5_ADDR_OF(fte_match_set_lyr_2_4, mask,
- dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
- sizeof(raw_ip));
+ memcpy_and_clear(raw_ip, MLX5_ADDR_OF(fte_match_set_lyr_2_4, mask,
+ dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+ sizeof(raw_ip), clr);
spec->dst_ip_127_96 = be32_to_cpu(raw_ip[0]);
spec->dst_ip_95_64 = be32_to_cpu(raw_ip[1]);
@@ -770,104 +785,105 @@ static void dr_ste_copy_mask_spec(char *mask, struct mlx5dr_match_spec *spec)
spec->dst_ip_31_0 = be32_to_cpu(raw_ip[3]);
}
-static void dr_ste_copy_mask_misc2(char *mask, struct mlx5dr_match_misc2 *spec)
+static void dr_ste_copy_mask_misc2(char *mask, struct mlx5dr_match_misc2 *spec, bool clr)
{
spec->outer_first_mpls_label =
- MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls.mpls_label);
+ IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls.mpls_label, clr);
spec->outer_first_mpls_exp =
- MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls.mpls_exp);
+ IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls.mpls_exp, clr);
spec->outer_first_mpls_s_bos =
- MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls.mpls_s_bos);
+ IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls.mpls_s_bos, clr);
spec->outer_first_mpls_ttl =
- MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls.mpls_ttl);
+ IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls.mpls_ttl, clr);
spec->inner_first_mpls_label =
- MLX5_GET(fte_match_set_misc2, mask, inner_first_mpls.mpls_label);
+ IFC_GET_CLR(fte_match_set_misc2, mask, inner_first_mpls.mpls_label, clr);
spec->inner_first_mpls_exp =
- MLX5_GET(fte_match_set_misc2, mask, inner_first_mpls.mpls_exp);
+ IFC_GET_CLR(fte_match_set_misc2, mask, inner_first_mpls.mpls_exp, clr);
spec->inner_first_mpls_s_bos =
- MLX5_GET(fte_match_set_misc2, mask, inner_first_mpls.mpls_s_bos);
+ IFC_GET_CLR(fte_match_set_misc2, mask, inner_first_mpls.mpls_s_bos, clr);
spec->inner_first_mpls_ttl =
- MLX5_GET(fte_match_set_misc2, mask, inner_first_mpls.mpls_ttl);
+ IFC_GET_CLR(fte_match_set_misc2, mask, inner_first_mpls.mpls_ttl, clr);
spec->outer_first_mpls_over_gre_label =
- MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_label);
+ IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_label, clr);
spec->outer_first_mpls_over_gre_exp =
- MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_exp);
+ IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_exp, clr);
spec->outer_first_mpls_over_gre_s_bos =
- MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_s_bos);
+ IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_s_bos, clr);
spec->outer_first_mpls_over_gre_ttl =
- MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_ttl);
+ IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_gre.mpls_ttl, clr);
spec->outer_first_mpls_over_udp_label =
- MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_label);
+ IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_label, clr);
spec->outer_first_mpls_over_udp_exp =
- MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_exp);
+ IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_exp, clr);
spec->outer_first_mpls_over_udp_s_bos =
- MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_s_bos);
+ IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_s_bos, clr);
spec->outer_first_mpls_over_udp_ttl =
- MLX5_GET(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_ttl);
- spec->metadata_reg_c_7 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_7);
- spec->metadata_reg_c_6 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_6);
- spec->metadata_reg_c_5 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_5);
- spec->metadata_reg_c_4 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_4);
- spec->metadata_reg_c_3 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_3);
- spec->metadata_reg_c_2 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_2);
- spec->metadata_reg_c_1 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_1);
- spec->metadata_reg_c_0 = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_c_0);
- spec->metadata_reg_a = MLX5_GET(fte_match_set_misc2, mask, metadata_reg_a);
-}
-
-static void dr_ste_copy_mask_misc3(char *mask, struct mlx5dr_match_misc3 *spec)
-{
- spec->inner_tcp_seq_num = MLX5_GET(fte_match_set_misc3, mask, inner_tcp_seq_num);
- spec->outer_tcp_seq_num = MLX5_GET(fte_match_set_misc3, mask, outer_tcp_seq_num);
- spec->inner_tcp_ack_num = MLX5_GET(fte_match_set_misc3, mask, inner_tcp_ack_num);
- spec->outer_tcp_ack_num = MLX5_GET(fte_match_set_misc3, mask, outer_tcp_ack_num);
+ IFC_GET_CLR(fte_match_set_misc2, mask, outer_first_mpls_over_udp.mpls_ttl, clr);
+ spec->metadata_reg_c_7 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_7, clr);
+ spec->metadata_reg_c_6 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_6, clr);
+ spec->metadata_reg_c_5 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_5, clr);
+ spec->metadata_reg_c_4 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_4, clr);
+ spec->metadata_reg_c_3 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_3, clr);
+ spec->metadata_reg_c_2 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_2, clr);
+ spec->metadata_reg_c_1 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_1, clr);
+ spec->metadata_reg_c_0 = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_c_0, clr);
+ spec->metadata_reg_a = IFC_GET_CLR(fte_match_set_misc2, mask, metadata_reg_a, clr);
+}
+
+static void dr_ste_copy_mask_misc3(char *mask, struct mlx5dr_match_misc3 *spec, bool clr)
+{
+ spec->inner_tcp_seq_num = IFC_GET_CLR(fte_match_set_misc3, mask, inner_tcp_seq_num, clr);
+ spec->outer_tcp_seq_num = IFC_GET_CLR(fte_match_set_misc3, mask, outer_tcp_seq_num, clr);
+ spec->inner_tcp_ack_num = IFC_GET_CLR(fte_match_set_misc3, mask, inner_tcp_ack_num, clr);
+ spec->outer_tcp_ack_num = IFC_GET_CLR(fte_match_set_misc3, mask, outer_tcp_ack_num, clr);
spec->outer_vxlan_gpe_vni =
- MLX5_GET(fte_match_set_misc3, mask, outer_vxlan_gpe_vni);
+ IFC_GET_CLR(fte_match_set_misc3, mask, outer_vxlan_gpe_vni, clr);
spec->outer_vxlan_gpe_next_protocol =
- MLX5_GET(fte_match_set_misc3, mask, outer_vxlan_gpe_next_protocol);
+ IFC_GET_CLR(fte_match_set_misc3, mask, outer_vxlan_gpe_next_protocol, clr);
spec->outer_vxlan_gpe_flags =
- MLX5_GET(fte_match_set_misc3, mask, outer_vxlan_gpe_flags);
- spec->icmpv4_header_data = MLX5_GET(fte_match_set_misc3, mask, icmp_header_data);
+ IFC_GET_CLR(fte_match_set_misc3, mask, outer_vxlan_gpe_flags, clr);
+ spec->icmpv4_header_data = IFC_GET_CLR(fte_match_set_misc3, mask, icmp_header_data, clr);
spec->icmpv6_header_data =
- MLX5_GET(fte_match_set_misc3, mask, icmpv6_header_data);
- spec->icmpv4_type = MLX5_GET(fte_match_set_misc3, mask, icmp_type);
- spec->icmpv4_code = MLX5_GET(fte_match_set_misc3, mask, icmp_code);
- spec->icmpv6_type = MLX5_GET(fte_match_set_misc3, mask, icmpv6_type);
- spec->icmpv6_code = MLX5_GET(fte_match_set_misc3, mask, icmpv6_code);
+ IFC_GET_CLR(fte_match_set_misc3, mask, icmpv6_header_data, clr);
+ spec->icmpv4_type = IFC_GET_CLR(fte_match_set_misc3, mask, icmp_type, clr);
+ spec->icmpv4_code = IFC_GET_CLR(fte_match_set_misc3, mask, icmp_code, clr);
+ spec->icmpv6_type = IFC_GET_CLR(fte_match_set_misc3, mask, icmpv6_type, clr);
+ spec->icmpv6_code = IFC_GET_CLR(fte_match_set_misc3, mask, icmpv6_code, clr);
spec->geneve_tlv_option_0_data =
- MLX5_GET(fte_match_set_misc3, mask, geneve_tlv_option_0_data);
- spec->gtpu_msg_flags = MLX5_GET(fte_match_set_misc3, mask, gtpu_msg_flags);
- spec->gtpu_msg_type = MLX5_GET(fte_match_set_misc3, mask, gtpu_msg_type);
- spec->gtpu_teid = MLX5_GET(fte_match_set_misc3, mask, gtpu_teid);
- spec->gtpu_dw_0 = MLX5_GET(fte_match_set_misc3, mask, gtpu_dw_0);
- spec->gtpu_dw_2 = MLX5_GET(fte_match_set_misc3, mask, gtpu_dw_2);
+ IFC_GET_CLR(fte_match_set_misc3, mask, geneve_tlv_option_0_data, clr);
+ spec->gtpu_teid = IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_teid, clr);
+ spec->gtpu_msg_flags = IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_msg_flags, clr);
+ spec->gtpu_msg_type = IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_msg_type, clr);
+ spec->gtpu_dw_0 = IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_dw_0, clr);
+ spec->gtpu_dw_2 = IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_dw_2, clr);
spec->gtpu_first_ext_dw_0 =
- MLX5_GET(fte_match_set_misc3, mask, gtpu_first_ext_dw_0);
+ IFC_GET_CLR(fte_match_set_misc3, mask, gtpu_first_ext_dw_0, clr);
}
-static void dr_ste_copy_mask_misc4(char *mask, struct mlx5dr_match_misc4 *spec)
+static void dr_ste_copy_mask_misc4(char *mask, struct mlx5dr_match_misc4 *spec, bool clr)
{
spec->prog_sample_field_id_0 =
- MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_id_0);
+ IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_id_0, clr);
spec->prog_sample_field_value_0 =
- MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_value_0);
+ IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_value_0, clr);
spec->prog_sample_field_id_1 =
- MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_id_1);
+ IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_id_1, clr);
spec->prog_sample_field_value_1 =
- MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_value_1);
+ IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_value_1, clr);
spec->prog_sample_field_id_2 =
- MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_id_2);
+ IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_id_2, clr);
spec->prog_sample_field_value_2 =
- MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_value_2);
+ IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_value_2, clr);
spec->prog_sample_field_id_3 =
- MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_id_3);
+ IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_id_3, clr);
spec->prog_sample_field_value_3 =
- MLX5_GET(fte_match_set_misc4, mask, prog_sample_field_value_3);
+ IFC_GET_CLR(fte_match_set_misc4, mask, prog_sample_field_value_3, clr);
}
void mlx5dr_ste_copy_param(u8 match_criteria,
struct mlx5dr_match_param *set_param,
- struct mlx5dr_match_parameters *mask)
+ struct mlx5dr_match_parameters *mask,
+ bool clr)
{
u8 tail_param[MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4)] = {};
u8 *data = (u8 *)mask->match_buf;
@@ -881,7 +897,7 @@ void mlx5dr_ste_copy_param(u8 match_criteria,
} else {
buff = mask->match_buf;
}
- dr_ste_copy_mask_spec(buff, &set_param->outer);
+ dr_ste_copy_mask_spec(buff, &set_param->outer, clr);
}
param_location = sizeof(struct mlx5dr_match_spec);
@@ -894,7 +910,7 @@ void mlx5dr_ste_copy_param(u8 match_criteria,
} else {
buff = data + param_location;
}
- dr_ste_copy_mask_misc(buff, &set_param->misc);
+ dr_ste_copy_mask_misc(buff, &set_param->misc, clr);
}
param_location += sizeof(struct mlx5dr_match_misc);
@@ -907,7 +923,7 @@ void mlx5dr_ste_copy_param(u8 match_criteria,
} else {
buff = data + param_location;
}
- dr_ste_copy_mask_spec(buff, &set_param->inner);
+ dr_ste_copy_mask_spec(buff, &set_param->inner, clr);
}
param_location += sizeof(struct mlx5dr_match_spec);
@@ -920,7 +936,7 @@ void mlx5dr_ste_copy_param(u8 match_criteria,
} else {
buff = data + param_location;
}
- dr_ste_copy_mask_misc2(buff, &set_param->misc2);
+ dr_ste_copy_mask_misc2(buff, &set_param->misc2, clr);
}
param_location += sizeof(struct mlx5dr_match_misc2);
@@ -934,7 +950,7 @@ void mlx5dr_ste_copy_param(u8 match_criteria,
} else {
buff = data + param_location;
}
- dr_ste_copy_mask_misc3(buff, &set_param->misc3);
+ dr_ste_copy_mask_misc3(buff, &set_param->misc3, clr);
}
param_location += sizeof(struct mlx5dr_match_misc3);
@@ -948,7 +964,7 @@ void mlx5dr_ste_copy_param(u8 match_criteria,
} else {
buff = data + param_location;
}
- dr_ste_copy_mask_misc4(buff, &set_param->misc4);
+ dr_ste_copy_mask_misc4(buff, &set_param->misc4, clr);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
index 9c704bce3c12..b0649c2877dd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
@@ -1645,7 +1645,7 @@ dr_ste_v0_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
struct mlx5dr_match_misc *misc = &value->misc;
struct mlx5dr_cmd_vport_cap *vport_cap;
struct mlx5dr_domain *dmn = sb->dmn;
- struct mlx5dr_cmd_caps *caps;
+ struct mlx5dr_domain *vport_dmn;
u8 *bit_mask = sb->bit_mask;
bool source_gvmi_set;
@@ -1654,23 +1654,24 @@ dr_ste_v0_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
if (sb->vhca_id_valid) {
/* Find port GVMI based on the eswitch_owner_vhca_id */
if (misc->source_eswitch_owner_vhca_id == dmn->info.caps.gvmi)
- caps = &dmn->info.caps;
+ vport_dmn = dmn;
else if (dmn->peer_dmn && (misc->source_eswitch_owner_vhca_id ==
dmn->peer_dmn->info.caps.gvmi))
- caps = &dmn->peer_dmn->info.caps;
+ vport_dmn = dmn->peer_dmn;
else
return -EINVAL;
misc->source_eswitch_owner_vhca_id = 0;
} else {
- caps = &dmn->info.caps;
+ vport_dmn = dmn;
}
source_gvmi_set = MLX5_GET(ste_src_gvmi_qp, bit_mask, source_gvmi);
if (source_gvmi_set) {
- vport_cap = mlx5dr_get_vport_cap(caps, misc->source_port);
+ vport_cap = mlx5dr_domain_get_vport_cap(vport_dmn,
+ misc->source_port);
if (!vport_cap) {
- mlx5dr_err(dmn, "Vport 0x%x is invalid\n",
+ mlx5dr_err(dmn, "Vport 0x%x is disabled or invalid\n",
misc->source_port);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
index b2481c99da79..cb9cf67b0a02 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
@@ -586,9 +586,11 @@ static void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
} else if (action_type_set[DR_ACTION_TYP_L2_TO_TNL_L3]) {
u8 *d_action;
- dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
- action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
- action_sz = DR_STE_ACTION_TRIPLE_SZ;
+ if (action_sz < DR_STE_ACTION_TRIPLE_SZ) {
+ dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
+ action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
+ action_sz = DR_STE_ACTION_TRIPLE_SZ;
+ }
d_action = action + DR_STE_ACTION_SINGLE_SZ;
dr_ste_v1_set_encap_l3(last_ste,
@@ -1776,7 +1778,7 @@ static int dr_ste_v1_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
struct mlx5dr_match_misc *misc = &value->misc;
struct mlx5dr_cmd_vport_cap *vport_cap;
struct mlx5dr_domain *dmn = sb->dmn;
- struct mlx5dr_cmd_caps *caps;
+ struct mlx5dr_domain *vport_dmn;
u8 *bit_mask = sb->bit_mask;
DR_STE_SET_TAG(src_gvmi_qp_v1, tag, source_qp, misc, source_sqn);
@@ -1784,22 +1786,22 @@ static int dr_ste_v1_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
if (sb->vhca_id_valid) {
/* Find port GVMI based on the eswitch_owner_vhca_id */
if (misc->source_eswitch_owner_vhca_id == dmn->info.caps.gvmi)
- caps = &dmn->info.caps;
+ vport_dmn = dmn;
else if (dmn->peer_dmn && (misc->source_eswitch_owner_vhca_id ==
dmn->peer_dmn->info.caps.gvmi))
- caps = &dmn->peer_dmn->info.caps;
+ vport_dmn = dmn->peer_dmn;
else
return -EINVAL;
- misc->source_eswitch_owner_vhca_id = 0;
+ misc->source_eswitch_owner_vhca_id = 0;
} else {
- caps = &dmn->info.caps;
+ vport_dmn = dmn;
}
if (!MLX5_GET(ste_src_gvmi_qp_v1, bit_mask, source_gvmi))
return 0;
- vport_cap = mlx5dr_get_vport_cap(caps, misc->source_port);
+ vport_cap = mlx5dr_domain_get_vport_cap(vport_dmn, misc->source_port);
if (!vport_cap) {
mlx5dr_err(dmn, "Vport 0x%x is disabled or invalid\n",
misc->source_port);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
index b20e8aabb861..3028b776da00 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
@@ -4,7 +4,7 @@
#ifndef _DR_TYPES_
#define _DR_TYPES_
-#include <linux/mlx5/driver.h>
+#include <linux/mlx5/vport.h>
#include <linux/refcount.h>
#include "fs_core.h"
#include "wq.h"
@@ -14,7 +14,6 @@
#define DR_RULE_MAX_STES 18
#define DR_ACTION_MAX_STES 5
-#define WIRE_PORT 0xFFFF
#define DR_STE_SVLAN 0x1
#define DR_STE_CVLAN 0x2
#define DR_SZ_MATCH_PARAM (MLX5_ST_SZ_DW_MATCH_PARAM * 4)
@@ -752,9 +751,9 @@ struct mlx5dr_esw_caps {
struct mlx5dr_cmd_vport_cap {
u16 vport_gvmi;
u16 vhca_gvmi;
+ u16 num;
u64 icm_address_rx;
u64 icm_address_tx;
- u32 num;
};
struct mlx5dr_roce_cap {
@@ -763,6 +762,11 @@ struct mlx5dr_roce_cap {
u8 fl_rc_qp_when_roce_enabled:1;
};
+struct mlx5dr_vports {
+ struct mlx5dr_cmd_vport_cap esw_manager_caps;
+ struct xarray vports_caps_xa;
+};
+
struct mlx5dr_cmd_caps {
u16 gvmi;
u64 nic_rx_drop_address;
@@ -786,7 +790,6 @@ struct mlx5dr_cmd_caps {
u8 flex_parser_id_gtpu_first_ext_dw_0;
u8 max_ft_level;
u16 roce_min_src_udp;
- u8 num_esw_ports;
u8 sw_format_ver;
bool eswitch_manager;
bool rx_sw_owner;
@@ -795,11 +798,11 @@ struct mlx5dr_cmd_caps {
u8 rx_sw_owner_v2:1;
u8 tx_sw_owner_v2:1;
u8 fdb_sw_owner_v2:1;
- u32 num_vports;
struct mlx5dr_esw_caps esw_caps;
- struct mlx5dr_cmd_vport_cap *vports_caps;
+ struct mlx5dr_vports vports;
bool prio_tag_required;
struct mlx5dr_roce_cap roce_caps;
+ u8 is_ecpf:1;
u8 isolate_vl_tc:1;
};
@@ -826,10 +829,6 @@ struct mlx5dr_domain_info {
struct mlx5dr_cmd_caps caps;
};
-struct mlx5dr_domain_cache {
- struct mlx5dr_fw_recalc_cs_ft **recalc_cs_ft;
-};
-
struct mlx5dr_domain {
struct mlx5dr_domain *peer_dmn;
struct mlx5_core_dev *mdev;
@@ -841,7 +840,7 @@ struct mlx5dr_domain {
struct mlx5dr_icm_pool *action_icm_pool;
struct mlx5dr_send_ring *send_ring;
struct mlx5dr_domain_info info;
- struct mlx5dr_domain_cache cache;
+ struct xarray csum_fts_xa;
struct mlx5dr_ste_ctx *ste_ctx;
};
@@ -942,7 +941,7 @@ struct mlx5dr_action_dest_tbl {
struct mlx5dr_action_ctr {
u32 ctr_id;
- u32 offeset;
+ u32 offset;
};
struct mlx5dr_action_vport {
@@ -1102,18 +1101,8 @@ mlx5dr_ste_htbl_may_grow(struct mlx5dr_ste_htbl *htbl)
return true;
}
-static inline struct mlx5dr_cmd_vport_cap *
-mlx5dr_get_vport_cap(struct mlx5dr_cmd_caps *caps, u32 vport)
-{
- if (!caps->vports_caps ||
- (vport >= caps->num_vports && vport != WIRE_PORT))
- return NULL;
-
- if (vport == WIRE_PORT)
- vport = caps->num_vports;
-
- return &caps->vports_caps[vport];
-}
+struct mlx5dr_cmd_vport_cap *
+mlx5dr_domain_get_vport_cap(struct mlx5dr_domain *dmn, u16 vport);
struct mlx5dr_cmd_query_flow_table_details {
u8 status;
@@ -1154,7 +1143,7 @@ int mlx5dr_cmd_set_fte_modify_and_vport(struct mlx5_core_dev *mdev,
u32 table_id,
u32 group_id,
u32 modify_header_id,
- u32 vport_id);
+ u16 vport_id);
int mlx5dr_cmd_del_flow_table_entry(struct mlx5_core_dev *mdev,
u32 table_type,
u32 table_id);
@@ -1241,7 +1230,8 @@ void mlx5dr_ste_set_formatted_ste(struct mlx5dr_ste_ctx *ste_ctx,
struct mlx5dr_htbl_connect_info *connect_info);
void mlx5dr_ste_copy_param(u8 match_criteria,
struct mlx5dr_match_param *set_param,
- struct mlx5dr_match_parameters *mask);
+ struct mlx5dr_match_parameters *mask,
+ bool clear);
struct mlx5dr_qp {
struct mlx5_core_dev *mdev;
@@ -1275,7 +1265,7 @@ struct mlx5dr_cq {
struct mlx5dr_mr {
struct mlx5_core_dev *mdev;
- struct mlx5_core_mkey mkey;
+ u32 mkey;
dma_addr_t dma_addr;
void *addr;
size_t size;
@@ -1372,12 +1362,12 @@ struct mlx5dr_fw_recalc_cs_ft {
};
struct mlx5dr_fw_recalc_cs_ft *
-mlx5dr_fw_create_recalc_cs_ft(struct mlx5dr_domain *dmn, u32 vport_num);
+mlx5dr_fw_create_recalc_cs_ft(struct mlx5dr_domain *dmn, u16 vport_num);
void mlx5dr_fw_destroy_recalc_cs_ft(struct mlx5dr_domain *dmn,
struct mlx5dr_fw_recalc_cs_ft *recalc_cs_ft);
-int mlx5dr_domain_cache_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn,
- u32 vport_num,
- u64 *rx_icm_addr);
+int mlx5dr_domain_get_recalc_cs_ft_addr(struct mlx5dr_domain *dmn,
+ u16 vport_num,
+ u64 *rx_icm_addr);
int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn,
struct mlx5dr_cmd_flow_destination_hw_info *dest,
int num_dest,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
index 7e58f4e594b7..2632d5ae9bc0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
@@ -222,7 +222,7 @@ static bool contain_vport_reformat_action(struct mlx5_flow_rule *dst)
dst->dest_attr.vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
}
-#define MLX5_FLOW_CONTEXT_ACTION_MAX 20
+#define MLX5_FLOW_CONTEXT_ACTION_MAX 32
static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct mlx5_flow_group *group,
@@ -625,6 +625,19 @@ static void mlx5_cmd_dr_modify_header_dealloc(struct mlx5_flow_root_namespace *n
mlx5dr_action_destroy(modify_hdr->action.dr_action);
}
+static int
+mlx5_cmd_dr_destroy_match_definer(struct mlx5_flow_root_namespace *ns,
+ int definer_id)
+{
+ return -EOPNOTSUPP;
+}
+
+static int mlx5_cmd_dr_create_match_definer(struct mlx5_flow_root_namespace *ns,
+ u16 format_id, u32 *match_mask)
+{
+ return -EOPNOTSUPP;
+}
+
static int mlx5_cmd_dr_delete_fte(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *ft,
struct fs_fte *fte)
@@ -727,6 +740,8 @@ static const struct mlx5_flow_cmds mlx5_flow_cmds_dr = {
.packet_reformat_dealloc = mlx5_cmd_dr_packet_reformat_dealloc,
.modify_header_alloc = mlx5_cmd_dr_modify_header_alloc,
.modify_header_dealloc = mlx5_cmd_dr_modify_header_dealloc,
+ .create_match_definer = mlx5_cmd_dr_create_match_definer,
+ .destroy_match_definer = mlx5_cmd_dr_destroy_match_definer,
.set_peer = mlx5_cmd_dr_set_peer,
.create_ns = mlx5_cmd_dr_create_ns,
.destroy_ns = mlx5_cmd_dr_destroy_ns,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
index c5a8b1601999..c7c93131b762 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
@@ -89,7 +89,7 @@ mlx5dr_action_create_dest_flow_fw_table(struct mlx5dr_domain *domain,
struct mlx5dr_action *
mlx5dr_action_create_dest_vport(struct mlx5dr_domain *domain,
- u32 vport, u8 vhca_id_valid,
+ u16 vport, u8 vhca_id_valid,
u16 vhca_id);
struct mlx5dr_action *
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
index da481a7c12f4..01e9c412977c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
@@ -36,7 +36,7 @@
#include <linux/mlx5/driver.h>
#include "mlx5_core.h"
-int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn)
+static int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn)
{
u32 out[MLX5_ST_SZ_DW(alloc_uar_out)] = {};
u32 in[MLX5_ST_SZ_DW(alloc_uar_in)] = {};
@@ -44,13 +44,14 @@ int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn)
MLX5_SET(alloc_uar_in, in, opcode, MLX5_CMD_OP_ALLOC_UAR);
err = mlx5_cmd_exec_inout(dev, alloc_uar, in, out);
- if (!err)
- *uarn = MLX5_GET(alloc_uar_out, out, uar);
- return err;
+ if (err)
+ return err;
+
+ *uarn = MLX5_GET(alloc_uar_out, out, uar);
+ return 0;
}
-EXPORT_SYMBOL(mlx5_cmd_alloc_uar);
-int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn)
+static int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn)
{
u32 in[MLX5_ST_SZ_DW(dealloc_uar_in)] = {};
@@ -58,7 +59,6 @@ int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn)
MLX5_SET(dealloc_uar_in, in, uar, uarn);
return mlx5_cmd_exec_in(dev, dealloc_uar, in);
}
-EXPORT_SYMBOL(mlx5_cmd_free_uar);
static int uars_per_sys_page(struct mlx5_core_dev *mdev)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index 4c1440a95ad7..8846d30a380a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -421,19 +421,21 @@ int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
{
u32 *out;
int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
+ int err;
out = kvzalloc(outlen, GFP_KERNEL);
if (!out)
return -ENOMEM;
- mlx5_query_nic_vport_context(mdev, 0, out);
+ err = mlx5_query_nic_vport_context(mdev, 0, out);
+ if (err)
+ goto out;
*system_image_guid = MLX5_GET64(query_nic_vport_context_out, out,
nic_vport_context.system_image_guid);
-
+out:
kvfree(out);
-
- return 0;
+ return err;
}
EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_system_image_guid);
@@ -1133,19 +1135,20 @@ EXPORT_SYMBOL_GPL(mlx5_nic_vport_unaffiliate_multiport);
u64 mlx5_query_nic_system_image_guid(struct mlx5_core_dev *mdev)
{
int port_type_cap = MLX5_CAP_GEN(mdev, port_type);
- u64 tmp = 0;
+ u64 tmp;
+ int err;
if (mdev->sys_image_guid)
return mdev->sys_image_guid;
if (port_type_cap == MLX5_CAP_PORT_TYPE_ETH)
- mlx5_query_nic_vport_system_image_guid(mdev, &tmp);
+ err = mlx5_query_nic_vport_system_image_guid(mdev, &tmp);
else
- mlx5_query_hca_vport_system_image_guid(mdev, &tmp);
+ err = mlx5_query_hca_vport_system_image_guid(mdev, &tmp);
- mdev->sys_image_guid = tmp;
+ mdev->sys_image_guid = err ? 0 : tmp;
- return tmp;
+ return mdev->sys_image_guid;
}
EXPORT_SYMBOL_GPL(mlx5_query_nic_system_image_guid);
diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
index 6704f5c1aa32..b990782c1eb1 100644
--- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
+++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c
@@ -75,7 +75,7 @@ static void mlxbf_gige_initial_mac(struct mlxbf_gige *priv)
u64_to_ether_addr(local_mac, mac);
if (is_valid_ether_addr(mac)) {
- ether_addr_copy(priv->netdev->dev_addr, mac);
+ eth_hw_addr_set(priv->netdev, mac);
} else {
/* Provide a random MAC if for some reason the device has
* not been configured with a valid MAC address already.
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw.h b/drivers/net/ethernet/mellanox/mlxfw/mlxfw.h
index 7654841a05c2..e6475ea77cd1 100644
--- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw.h
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw.h
@@ -19,7 +19,7 @@ struct mlxfw_dev {
static inline
struct device *mlxfw_dev_dev(struct mlxfw_dev *mlxfw_dev)
{
- return mlxfw_dev->devlink->dev;
+ return devlink_to_dev(mlxfw_dev->devlink);
}
#define MLXFW_PRFX "mlxfw: "
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index f080fab3de2b..3fd3812b8f31 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -90,7 +90,6 @@ struct mlxsw_core {
struct devlink_health_reporter *fw_fatal;
} health;
struct mlxsw_env *env;
- bool is_initialized; /* Denotes if core was already initialized. */
unsigned long driver_priv[];
/* driver_priv has to be always the last item */
};
@@ -1975,12 +1974,6 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
goto err_emad_init;
if (!reload) {
- err = devlink_register(devlink);
- if (err)
- goto err_devlink_register;
- }
-
- if (!reload) {
err = mlxsw_core_params_register(mlxsw_core);
if (err)
goto err_register_params;
@@ -1995,12 +1988,6 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
if (err)
goto err_health_init;
- if (mlxsw_driver->init) {
- err = mlxsw_driver->init(mlxsw_core, mlxsw_bus_info, extack);
- if (err)
- goto err_driver_init;
- }
-
err = mlxsw_hwmon_init(mlxsw_core, mlxsw_bus_info, &mlxsw_core->hwmon);
if (err)
goto err_hwmon_init;
@@ -2014,31 +2001,31 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
if (err)
goto err_env_init;
- mlxsw_core->is_initialized = true;
- devlink_params_publish(devlink);
-
- if (!reload)
- devlink_reload_enable(devlink);
+ if (mlxsw_driver->init) {
+ err = mlxsw_driver->init(mlxsw_core, mlxsw_bus_info, extack);
+ if (err)
+ goto err_driver_init;
+ }
+ if (!reload) {
+ devlink_set_features(devlink, DEVLINK_F_RELOAD);
+ devlink_register(devlink);
+ }
return 0;
+err_driver_init:
+ mlxsw_env_fini(mlxsw_core->env);
err_env_init:
mlxsw_thermal_fini(mlxsw_core->thermal);
err_thermal_init:
mlxsw_hwmon_fini(mlxsw_core->hwmon);
err_hwmon_init:
- if (mlxsw_core->driver->fini)
- mlxsw_core->driver->fini(mlxsw_core);
-err_driver_init:
mlxsw_core_health_fini(mlxsw_core);
err_health_init:
err_fw_rev_validate:
if (!reload)
mlxsw_core_params_unregister(mlxsw_core);
err_register_params:
- if (!reload)
- devlink_unregister(devlink);
-err_devlink_register:
mlxsw_emad_fini(mlxsw_core);
err_emad_init:
kfree(mlxsw_core->lag.mapping);
@@ -2088,7 +2075,8 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
struct devlink *devlink = priv_to_devlink(mlxsw_core);
if (!reload)
- devlink_reload_disable(devlink);
+ devlink_unregister(devlink);
+
if (devlink_is_reload_failed(devlink)) {
if (!reload)
/* Only the parts that were not de-initialized in the
@@ -2099,18 +2087,14 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
return;
}
- devlink_params_unpublish(devlink);
- mlxsw_core->is_initialized = false;
+ if (mlxsw_core->driver->fini)
+ mlxsw_core->driver->fini(mlxsw_core);
mlxsw_env_fini(mlxsw_core->env);
mlxsw_thermal_fini(mlxsw_core->thermal);
mlxsw_hwmon_fini(mlxsw_core->hwmon);
- if (mlxsw_core->driver->fini)
- mlxsw_core->driver->fini(mlxsw_core);
mlxsw_core_health_fini(mlxsw_core);
if (!reload)
mlxsw_core_params_unregister(mlxsw_core);
- if (!reload)
- devlink_unregister(devlink);
mlxsw_emad_fini(mlxsw_core);
kfree(mlxsw_core->lag.mapping);
mlxsw_ports_fini(mlxsw_core, reload);
@@ -2124,7 +2108,6 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
reload_fail_deinit:
mlxsw_core_params_unregister(mlxsw_core);
- devlink_unregister(devlink);
devlink_resources_unregister(devlink, NULL);
devlink_free(devlink);
}
@@ -2939,49 +2922,6 @@ struct mlxsw_env *mlxsw_core_env(const struct mlxsw_core *mlxsw_core)
return mlxsw_core->env;
}
-bool mlxsw_core_is_initialized(const struct mlxsw_core *mlxsw_core)
-{
- return mlxsw_core->is_initialized;
-}
-
-int mlxsw_core_module_max_width(struct mlxsw_core *mlxsw_core, u8 module)
-{
- enum mlxsw_reg_pmtm_module_type module_type;
- char pmtm_pl[MLXSW_REG_PMTM_LEN];
- int err;
-
- mlxsw_reg_pmtm_pack(pmtm_pl, module);
- err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtm), pmtm_pl);
- if (err)
- return err;
- mlxsw_reg_pmtm_unpack(pmtm_pl, &module_type);
-
- /* Here we need to get the module width according to the module type. */
-
- switch (module_type) {
- case MLXSW_REG_PMTM_MODULE_TYPE_C2C8X:
- case MLXSW_REG_PMTM_MODULE_TYPE_QSFP_DD:
- case MLXSW_REG_PMTM_MODULE_TYPE_OSFP:
- return 8;
- case MLXSW_REG_PMTM_MODULE_TYPE_C2C4X:
- case MLXSW_REG_PMTM_MODULE_TYPE_BP_4X:
- case MLXSW_REG_PMTM_MODULE_TYPE_QSFP:
- return 4;
- case MLXSW_REG_PMTM_MODULE_TYPE_C2C2X:
- case MLXSW_REG_PMTM_MODULE_TYPE_BP_2X:
- case MLXSW_REG_PMTM_MODULE_TYPE_SFP_DD:
- case MLXSW_REG_PMTM_MODULE_TYPE_DSFP:
- return 2;
- case MLXSW_REG_PMTM_MODULE_TYPE_C2C1X:
- case MLXSW_REG_PMTM_MODULE_TYPE_BP_1X:
- case MLXSW_REG_PMTM_MODULE_TYPE_SFP:
- return 1;
- default:
- return -EINVAL;
- }
-}
-EXPORT_SYMBOL(mlxsw_core_module_max_width);
-
static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core,
const char *buf, size_t size)
{
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h
index 80712dc803d0..12023a550007 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.h
@@ -249,8 +249,6 @@ mlxsw_core_port_devlink_port_get(struct mlxsw_core *mlxsw_core,
u8 local_port);
bool mlxsw_core_port_is_xm(const struct mlxsw_core *mlxsw_core, u8 local_port);
struct mlxsw_env *mlxsw_core_env(const struct mlxsw_core *mlxsw_core);
-bool mlxsw_core_is_initialized(const struct mlxsw_core *mlxsw_core);
-int mlxsw_core_module_max_width(struct mlxsw_core *mlxsw_core, u8 module);
int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay);
bool mlxsw_core_schedule_work(struct work_struct *work);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
index 3713c45cfa1e..6dd4ae2f45f4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c
@@ -5,6 +5,7 @@
#include <linux/err.h>
#include <linux/ethtool.h>
#include <linux/sfp.h>
+#include <linux/mutex.h>
#include "core.h"
#include "core_env.h"
@@ -14,12 +15,15 @@
struct mlxsw_env_module_info {
u64 module_overheat_counter;
bool is_overheat;
+ int num_ports_mapped;
+ int num_ports_up;
+ enum ethtool_module_power_mode_policy power_mode_policy;
};
struct mlxsw_env {
struct mlxsw_core *core;
u8 module_count;
- spinlock_t module_info_lock; /* Protects 'module_info'. */
+ struct mutex module_info_lock; /* Protects 'module_info'. */
struct mlxsw_env_module_info module_info[];
};
@@ -389,6 +393,205 @@ mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core, u8 module,
}
EXPORT_SYMBOL(mlxsw_env_get_module_eeprom_by_page);
+static int mlxsw_env_module_reset(struct mlxsw_core *mlxsw_core, u8 module)
+{
+ char pmaos_pl[MLXSW_REG_PMAOS_LEN];
+
+ mlxsw_reg_pmaos_pack(pmaos_pl, module);
+ mlxsw_reg_pmaos_rst_set(pmaos_pl, true);
+
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(pmaos), pmaos_pl);
+}
+
+int mlxsw_env_reset_module(struct net_device *netdev,
+ struct mlxsw_core *mlxsw_core, u8 module, u32 *flags)
+{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+ u32 req = *flags;
+ int err;
+
+ if (!(req & ETH_RESET_PHY) &&
+ !(req & (ETH_RESET_PHY << ETH_RESET_SHARED_SHIFT)))
+ return 0;
+
+ if (WARN_ON_ONCE(module >= mlxsw_env->module_count))
+ return -EINVAL;
+
+ mutex_lock(&mlxsw_env->module_info_lock);
+
+ if (mlxsw_env->module_info[module].num_ports_up) {
+ netdev_err(netdev, "Cannot reset module when ports using it are administratively up\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (mlxsw_env->module_info[module].num_ports_mapped > 1 &&
+ !(req & (ETH_RESET_PHY << ETH_RESET_SHARED_SHIFT))) {
+ netdev_err(netdev, "Cannot reset module without \"phy-shared\" flag when shared by multiple ports\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ err = mlxsw_env_module_reset(mlxsw_core, module);
+ if (err) {
+ netdev_err(netdev, "Failed to reset module\n");
+ goto out;
+ }
+
+ *flags &= ~(ETH_RESET_PHY | (ETH_RESET_PHY << ETH_RESET_SHARED_SHIFT));
+
+out:
+ mutex_unlock(&mlxsw_env->module_info_lock);
+ return err;
+}
+EXPORT_SYMBOL(mlxsw_env_reset_module);
+
+int
+mlxsw_env_get_module_power_mode(struct mlxsw_core *mlxsw_core, u8 module,
+ struct ethtool_module_power_mode_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+ char mcion_pl[MLXSW_REG_MCION_LEN];
+ u32 status_bits;
+ int err;
+
+ if (WARN_ON_ONCE(module >= mlxsw_env->module_count))
+ return -EINVAL;
+
+ mutex_lock(&mlxsw_env->module_info_lock);
+
+ params->policy = mlxsw_env->module_info[module].power_mode_policy;
+
+ mlxsw_reg_mcion_pack(mcion_pl, module);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mcion), mcion_pl);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to retrieve module's power mode");
+ goto out;
+ }
+
+ status_bits = mlxsw_reg_mcion_module_status_bits_get(mcion_pl);
+ if (!(status_bits & MLXSW_REG_MCION_MODULE_STATUS_BITS_PRESENT_MASK))
+ goto out;
+
+ if (status_bits & MLXSW_REG_MCION_MODULE_STATUS_BITS_LOW_POWER_MASK)
+ params->mode = ETHTOOL_MODULE_POWER_MODE_LOW;
+ else
+ params->mode = ETHTOOL_MODULE_POWER_MODE_HIGH;
+
+out:
+ mutex_unlock(&mlxsw_env->module_info_lock);
+ return err;
+}
+EXPORT_SYMBOL(mlxsw_env_get_module_power_mode);
+
+static int mlxsw_env_module_enable_set(struct mlxsw_core *mlxsw_core,
+ u8 module, bool enable)
+{
+ enum mlxsw_reg_pmaos_admin_status admin_status;
+ char pmaos_pl[MLXSW_REG_PMAOS_LEN];
+
+ mlxsw_reg_pmaos_pack(pmaos_pl, module);
+ admin_status = enable ? MLXSW_REG_PMAOS_ADMIN_STATUS_ENABLED :
+ MLXSW_REG_PMAOS_ADMIN_STATUS_DISABLED;
+ mlxsw_reg_pmaos_admin_status_set(pmaos_pl, admin_status);
+ mlxsw_reg_pmaos_ase_set(pmaos_pl, true);
+
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(pmaos), pmaos_pl);
+}
+
+static int mlxsw_env_module_low_power_set(struct mlxsw_core *mlxsw_core,
+ u8 module, bool low_power)
+{
+ u16 eeprom_override_mask, eeprom_override;
+ char pmmp_pl[MLXSW_REG_PMMP_LEN];
+
+ mlxsw_reg_pmmp_pack(pmmp_pl, module);
+ mlxsw_reg_pmmp_sticky_set(pmmp_pl, true);
+ /* Mask all the bits except low power mode. */
+ eeprom_override_mask = ~MLXSW_REG_PMMP_EEPROM_OVERRIDE_LOW_POWER_MASK;
+ mlxsw_reg_pmmp_eeprom_override_mask_set(pmmp_pl, eeprom_override_mask);
+ eeprom_override = low_power ? MLXSW_REG_PMMP_EEPROM_OVERRIDE_LOW_POWER_MASK :
+ 0;
+ mlxsw_reg_pmmp_eeprom_override_set(pmmp_pl, eeprom_override);
+
+ return mlxsw_reg_write(mlxsw_core, MLXSW_REG(pmmp), pmmp_pl);
+}
+
+static int __mlxsw_env_set_module_power_mode(struct mlxsw_core *mlxsw_core,
+ u8 module, bool low_power,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ err = mlxsw_env_module_enable_set(mlxsw_core, module, false);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to disable module");
+ return err;
+ }
+
+ err = mlxsw_env_module_low_power_set(mlxsw_core, module, low_power);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to set module's power mode");
+ goto err_module_low_power_set;
+ }
+
+ err = mlxsw_env_module_enable_set(mlxsw_core, module, true);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to enable module");
+ goto err_module_enable_set;
+ }
+
+ return 0;
+
+err_module_enable_set:
+ mlxsw_env_module_low_power_set(mlxsw_core, module, !low_power);
+err_module_low_power_set:
+ mlxsw_env_module_enable_set(mlxsw_core, module, true);
+ return err;
+}
+
+int
+mlxsw_env_set_module_power_mode(struct mlxsw_core *mlxsw_core, u8 module,
+ enum ethtool_module_power_mode_policy policy,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+ bool low_power;
+ int err = 0;
+
+ if (WARN_ON_ONCE(module >= mlxsw_env->module_count))
+ return -EINVAL;
+
+ if (policy != ETHTOOL_MODULE_POWER_MODE_POLICY_HIGH &&
+ policy != ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported power mode policy");
+ return -EOPNOTSUPP;
+ }
+
+ mutex_lock(&mlxsw_env->module_info_lock);
+
+ if (mlxsw_env->module_info[module].power_mode_policy == policy)
+ goto out;
+
+ /* If any ports are up, we are already in high power mode. */
+ if (mlxsw_env->module_info[module].num_ports_up)
+ goto out_set_policy;
+
+ low_power = policy == ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO;
+ err = __mlxsw_env_set_module_power_mode(mlxsw_core, module, low_power,
+ extack);
+ if (err)
+ goto out;
+
+out_set_policy:
+ mlxsw_env->module_info[module].power_mode_policy = policy;
+out:
+ mutex_unlock(&mlxsw_env->module_info_lock);
+ return err;
+}
+EXPORT_SYMBOL(mlxsw_env_set_module_power_mode);
+
static int mlxsw_env_module_has_temp_sensor(struct mlxsw_core *mlxsw_core,
u8 module,
bool *p_has_temp_sensor)
@@ -482,22 +685,32 @@ static int mlxsw_env_module_temp_event_enable(struct mlxsw_core *mlxsw_core,
return 0;
}
-static void mlxsw_env_mtwe_event_func(const struct mlxsw_reg_info *reg,
- char *mtwe_pl, void *priv)
+struct mlxsw_env_module_temp_warn_event {
+ struct mlxsw_env *mlxsw_env;
+ char mtwe_pl[MLXSW_REG_MTWE_LEN];
+ struct work_struct work;
+};
+
+static void mlxsw_env_mtwe_event_work(struct work_struct *work)
{
- struct mlxsw_env *mlxsw_env = priv;
+ struct mlxsw_env_module_temp_warn_event *event;
+ struct mlxsw_env *mlxsw_env;
int i, sensor_warning;
bool is_overheat;
+ event = container_of(work, struct mlxsw_env_module_temp_warn_event,
+ work);
+ mlxsw_env = event->mlxsw_env;
+
for (i = 0; i < mlxsw_env->module_count; i++) {
/* 64-127 of sensor_index are mapped to the port modules
* sequentially (module 0 is mapped to sensor_index 64,
* module 1 to sensor_index 65 and so on)
*/
sensor_warning =
- mlxsw_reg_mtwe_sensor_warning_get(mtwe_pl,
+ mlxsw_reg_mtwe_sensor_warning_get(event->mtwe_pl,
i + MLXSW_REG_MTMP_MODULE_INDEX_MIN);
- spin_lock(&mlxsw_env->module_info_lock);
+ mutex_lock(&mlxsw_env->module_info_lock);
is_overheat =
mlxsw_env->module_info[i].is_overheat;
@@ -507,13 +720,13 @@ static void mlxsw_env_mtwe_event_func(const struct mlxsw_reg_info *reg,
* warning OR current state in "no warning" and MTWE
* does not report warning.
*/
- spin_unlock(&mlxsw_env->module_info_lock);
+ mutex_unlock(&mlxsw_env->module_info_lock);
continue;
} else if (is_overheat && !sensor_warning) {
/* MTWE reports "no warning", turn is_overheat off.
*/
mlxsw_env->module_info[i].is_overheat = false;
- spin_unlock(&mlxsw_env->module_info_lock);
+ mutex_unlock(&mlxsw_env->module_info_lock);
} else {
/* Current state is "no warning" and MTWE reports
* "warning", increase the counter and turn is_overheat
@@ -521,13 +734,32 @@ static void mlxsw_env_mtwe_event_func(const struct mlxsw_reg_info *reg,
*/
mlxsw_env->module_info[i].is_overheat = true;
mlxsw_env->module_info[i].module_overheat_counter++;
- spin_unlock(&mlxsw_env->module_info_lock);
+ mutex_unlock(&mlxsw_env->module_info_lock);
}
}
+
+ kfree(event);
+}
+
+static void
+mlxsw_env_mtwe_listener_func(const struct mlxsw_reg_info *reg, char *mtwe_pl,
+ void *priv)
+{
+ struct mlxsw_env_module_temp_warn_event *event;
+ struct mlxsw_env *mlxsw_env = priv;
+
+ event = kmalloc(sizeof(*event), GFP_ATOMIC);
+ if (!event)
+ return;
+
+ event->mlxsw_env = mlxsw_env;
+ memcpy(event->mtwe_pl, mtwe_pl, MLXSW_REG_MTWE_LEN);
+ INIT_WORK(&event->work, mlxsw_env_mtwe_event_work);
+ mlxsw_core_schedule_work(&event->work);
}
static const struct mlxsw_listener mlxsw_env_temp_warn_listener =
- MLXSW_EVENTL(mlxsw_env_mtwe_event_func, MTWE, MTWE);
+ MLXSW_EVENTL(mlxsw_env_mtwe_listener_func, MTWE, MTWE);
static int mlxsw_env_temp_warn_event_register(struct mlxsw_core *mlxsw_core)
{
@@ -568,9 +800,9 @@ static void mlxsw_env_pmpe_event_work(struct work_struct *work)
work);
mlxsw_env = event->mlxsw_env;
- spin_lock_bh(&mlxsw_env->module_info_lock);
+ mutex_lock(&mlxsw_env->module_info_lock);
mlxsw_env->module_info[event->module].is_overheat = false;
- spin_unlock_bh(&mlxsw_env->module_info_lock);
+ mutex_unlock(&mlxsw_env->module_info_lock);
err = mlxsw_env_module_has_temp_sensor(mlxsw_env->core, event->module,
&has_temp_sensor);
@@ -652,8 +884,10 @@ mlxsw_env_module_oper_state_event_enable(struct mlxsw_core *mlxsw_core,
for (i = 0; i < module_count; i++) {
char pmaos_pl[MLXSW_REG_PMAOS_LEN];
- mlxsw_reg_pmaos_pack(pmaos_pl, i,
- MLXSW_REG_PMAOS_E_GENERATE_EVENT);
+ mlxsw_reg_pmaos_pack(pmaos_pl, i);
+ mlxsw_reg_pmaos_e_set(pmaos_pl,
+ MLXSW_REG_PMAOS_E_GENERATE_EVENT);
+ mlxsw_reg_pmaos_ee_set(pmaos_pl, true);
err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(pmaos), pmaos_pl);
if (err)
return err;
@@ -667,29 +901,110 @@ mlxsw_env_module_overheat_counter_get(struct mlxsw_core *mlxsw_core, u8 module,
{
struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
- /* Prevent switch driver from accessing uninitialized data. */
- if (!mlxsw_core_is_initialized(mlxsw_core)) {
- *p_counter = 0;
- return 0;
- }
-
if (WARN_ON_ONCE(module >= mlxsw_env->module_count))
return -EINVAL;
- spin_lock_bh(&mlxsw_env->module_info_lock);
+ mutex_lock(&mlxsw_env->module_info_lock);
*p_counter = mlxsw_env->module_info[module].module_overheat_counter;
- spin_unlock_bh(&mlxsw_env->module_info_lock);
+ mutex_unlock(&mlxsw_env->module_info_lock);
return 0;
}
EXPORT_SYMBOL(mlxsw_env_module_overheat_counter_get);
+void mlxsw_env_module_port_map(struct mlxsw_core *mlxsw_core, u8 module)
+{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+
+ if (WARN_ON_ONCE(module >= mlxsw_env->module_count))
+ return;
+
+ mutex_lock(&mlxsw_env->module_info_lock);
+ mlxsw_env->module_info[module].num_ports_mapped++;
+ mutex_unlock(&mlxsw_env->module_info_lock);
+}
+EXPORT_SYMBOL(mlxsw_env_module_port_map);
+
+void mlxsw_env_module_port_unmap(struct mlxsw_core *mlxsw_core, u8 module)
+{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+
+ if (WARN_ON_ONCE(module >= mlxsw_env->module_count))
+ return;
+
+ mutex_lock(&mlxsw_env->module_info_lock);
+ mlxsw_env->module_info[module].num_ports_mapped--;
+ mutex_unlock(&mlxsw_env->module_info_lock);
+}
+EXPORT_SYMBOL(mlxsw_env_module_port_unmap);
+
+int mlxsw_env_module_port_up(struct mlxsw_core *mlxsw_core, u8 module)
+{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+ int err = 0;
+
+ if (WARN_ON_ONCE(module >= mlxsw_env->module_count))
+ return -EINVAL;
+
+ mutex_lock(&mlxsw_env->module_info_lock);
+
+ if (mlxsw_env->module_info[module].power_mode_policy !=
+ ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO)
+ goto out_inc;
+
+ if (mlxsw_env->module_info[module].num_ports_up != 0)
+ goto out_inc;
+
+ /* Transition to high power mode following first port using the module
+ * being put administratively up.
+ */
+ err = __mlxsw_env_set_module_power_mode(mlxsw_core, module, false,
+ NULL);
+ if (err)
+ goto out_unlock;
+
+out_inc:
+ mlxsw_env->module_info[module].num_ports_up++;
+out_unlock:
+ mutex_unlock(&mlxsw_env->module_info_lock);
+ return err;
+}
+EXPORT_SYMBOL(mlxsw_env_module_port_up);
+
+void mlxsw_env_module_port_down(struct mlxsw_core *mlxsw_core, u8 module)
+{
+ struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core);
+
+ if (WARN_ON_ONCE(module >= mlxsw_env->module_count))
+ return;
+
+ mutex_lock(&mlxsw_env->module_info_lock);
+
+ mlxsw_env->module_info[module].num_ports_up--;
+
+ if (mlxsw_env->module_info[module].power_mode_policy !=
+ ETHTOOL_MODULE_POWER_MODE_POLICY_AUTO)
+ goto out_unlock;
+
+ if (mlxsw_env->module_info[module].num_ports_up != 0)
+ goto out_unlock;
+
+ /* Transition to low power mode following last port using the module
+ * being put administratively down.
+ */
+ __mlxsw_env_set_module_power_mode(mlxsw_core, module, true, NULL);
+
+out_unlock:
+ mutex_unlock(&mlxsw_env->module_info_lock);
+}
+EXPORT_SYMBOL(mlxsw_env_module_port_down);
+
int mlxsw_env_init(struct mlxsw_core *mlxsw_core, struct mlxsw_env **p_env)
{
char mgpir_pl[MLXSW_REG_MGPIR_LEN];
struct mlxsw_env *env;
u8 module_count;
- int err;
+ int i, err;
mlxsw_reg_mgpir_pack(mgpir_pl);
err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(mgpir), mgpir_pl);
@@ -702,7 +1017,14 @@ int mlxsw_env_init(struct mlxsw_core *mlxsw_core, struct mlxsw_env **p_env)
if (!env)
return -ENOMEM;
- spin_lock_init(&env->module_info_lock);
+ /* Firmware defaults to high power mode policy where modules are
+ * transitioned to high power mode following plug-in.
+ */
+ for (i = 0; i < module_count; i++)
+ env->module_info[i].power_mode_policy =
+ ETHTOOL_MODULE_POWER_MODE_POLICY_HIGH;
+
+ mutex_init(&env->module_info_lock);
env->core = mlxsw_core;
env->module_count = module_count;
*p_env = env;
@@ -732,6 +1054,7 @@ err_oper_state_event_enable:
err_module_plug_event_register:
mlxsw_env_temp_warn_event_unregister(env);
err_temp_warn_event_register:
+ mutex_destroy(&env->module_info_lock);
kfree(env);
return err;
}
@@ -742,5 +1065,6 @@ void mlxsw_env_fini(struct mlxsw_env *env)
/* Make sure there is no more event work scheduled. */
mlxsw_core_flush_owq();
mlxsw_env_temp_warn_event_unregister(env);
+ mutex_destroy(&env->module_info_lock);
kfree(env);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.h b/drivers/net/ethernet/mellanox/mlxsw/core_env.h
index 0bf5bd0f8a7e..da121b1a84b4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_env.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.h
@@ -24,9 +24,32 @@ mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core, u8 module,
const struct ethtool_module_eeprom *page,
struct netlink_ext_ack *extack);
+int mlxsw_env_reset_module(struct net_device *netdev,
+ struct mlxsw_core *mlxsw_core, u8 module,
+ u32 *flags);
+
+int
+mlxsw_env_get_module_power_mode(struct mlxsw_core *mlxsw_core, u8 module,
+ struct ethtool_module_power_mode_params *params,
+ struct netlink_ext_ack *extack);
+
+int
+mlxsw_env_set_module_power_mode(struct mlxsw_core *mlxsw_core, u8 module,
+ enum ethtool_module_power_mode_policy policy,
+ struct netlink_ext_ack *extack);
+
int
mlxsw_env_module_overheat_counter_get(struct mlxsw_core *mlxsw_core, u8 module,
u64 *p_counter);
+
+void mlxsw_env_module_port_map(struct mlxsw_core *mlxsw_core, u8 module);
+
+void mlxsw_env_module_port_unmap(struct mlxsw_core *mlxsw_core, u8 module);
+
+int mlxsw_env_module_port_up(struct mlxsw_core *mlxsw_core, u8 module);
+
+void mlxsw_env_module_port_down(struct mlxsw_core *mlxsw_core, u8 module);
+
int mlxsw_env_init(struct mlxsw_core *core, struct mlxsw_env **p_env);
void mlxsw_env_fini(struct mlxsw_env *env);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/item.h b/drivers/net/ethernet/mellanox/mlxsw/item.h
index e92cadc98128..ab70a873a01a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/item.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/item.h
@@ -270,11 +270,13 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
.size = {.bits = _sizebits,}, \
.name = #_type "_" #_cname "_" #_iname, \
}; \
-static inline u8 mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \
+static inline u8 __maybe_unused \
+mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \
{ \
return __mlxsw_item_get8(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \
} \
-static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u8 val)\
+static inline void __maybe_unused \
+mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u8 val) \
{ \
__mlxsw_item_set8(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \
}
@@ -290,13 +292,13 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
.size = {.bits = _sizebits,}, \
.name = #_type "_" #_cname "_" #_iname, \
}; \
-static inline u8 \
+static inline u8 __maybe_unused \
mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf, unsigned short index)\
{ \
return __mlxsw_item_get8(buf, &__ITEM_NAME(_type, _cname, _iname), \
index); \
} \
-static inline void \
+static inline void __maybe_unused \
mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \
u8 val) \
{ \
@@ -311,11 +313,13 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
.size = {.bits = _sizebits,}, \
.name = #_type "_" #_cname "_" #_iname, \
}; \
-static inline u16 mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \
+static inline u16 __maybe_unused \
+mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \
{ \
return __mlxsw_item_get16(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \
} \
-static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u16 val)\
+static inline void __maybe_unused \
+mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u16 val) \
{ \
__mlxsw_item_set16(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \
}
@@ -331,13 +335,13 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
.size = {.bits = _sizebits,}, \
.name = #_type "_" #_cname "_" #_iname, \
}; \
-static inline u16 \
+static inline u16 __maybe_unused \
mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf, unsigned short index)\
{ \
return __mlxsw_item_get16(buf, &__ITEM_NAME(_type, _cname, _iname), \
index); \
} \
-static inline void \
+static inline void __maybe_unused \
mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \
u16 val) \
{ \
@@ -352,11 +356,13 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
.size = {.bits = _sizebits,}, \
.name = #_type "_" #_cname "_" #_iname, \
}; \
-static inline u32 mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \
+static inline u32 __maybe_unused \
+mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \
{ \
return __mlxsw_item_get32(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \
} \
-static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u32 val)\
+static inline void __maybe_unused \
+mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u32 val) \
{ \
__mlxsw_item_set32(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \
}
@@ -372,13 +378,13 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
.size = {.bits = _sizebits,}, \
.name = #_type "_" #_cname "_" #_iname, \
}; \
-static inline u32 \
+static inline u32 __maybe_unused \
mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf, unsigned short index)\
{ \
return __mlxsw_item_get32(buf, &__ITEM_NAME(_type, _cname, _iname), \
index); \
} \
-static inline void \
+static inline void __maybe_unused \
mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \
u32 val) \
{ \
@@ -393,11 +399,13 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
.size = {.bits = _sizebits,}, \
.name = #_type "_" #_cname "_" #_iname, \
}; \
-static inline u64 mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \
+static inline u64 __maybe_unused \
+mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf) \
{ \
return __mlxsw_item_get64(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \
} \
-static inline void mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u64 val)\
+static inline void __maybe_unused \
+mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u64 val) \
{ \
__mlxsw_item_set64(buf, &__ITEM_NAME(_type, _cname, _iname), 0, val); \
}
@@ -413,13 +421,13 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
.size = {.bits = _sizebits,}, \
.name = #_type "_" #_cname "_" #_iname, \
}; \
-static inline u64 \
+static inline u64 __maybe_unused \
mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf, unsigned short index)\
{ \
return __mlxsw_item_get64(buf, &__ITEM_NAME(_type, _cname, _iname), \
index); \
} \
-static inline void \
+static inline void __maybe_unused \
mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, unsigned short index, \
u64 val) \
{ \
@@ -433,19 +441,19 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
.size = {.bytes = _sizebytes,}, \
.name = #_type "_" #_cname "_" #_iname, \
}; \
-static inline void \
+static inline void __maybe_unused \
mlxsw_##_type##_##_cname##_##_iname##_memcpy_from(const char *buf, char *dst) \
{ \
__mlxsw_item_memcpy_from(buf, dst, \
&__ITEM_NAME(_type, _cname, _iname), 0); \
} \
-static inline void \
+static inline void __maybe_unused \
mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf, const char *src) \
{ \
__mlxsw_item_memcpy_to(buf, src, \
&__ITEM_NAME(_type, _cname, _iname), 0); \
} \
-static inline char * \
+static inline char * __maybe_unused \
mlxsw_##_type##_##_cname##_##_iname##_data(char *buf) \
{ \
return __mlxsw_item_data(buf, &__ITEM_NAME(_type, _cname, _iname), 0); \
@@ -460,7 +468,7 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
.size = {.bytes = _sizebytes,}, \
.name = #_type "_" #_cname "_" #_iname, \
}; \
-static inline void \
+static inline void __maybe_unused \
mlxsw_##_type##_##_cname##_##_iname##_memcpy_from(const char *buf, \
unsigned short index, \
char *dst) \
@@ -468,7 +476,7 @@ mlxsw_##_type##_##_cname##_##_iname##_memcpy_from(const char *buf, \
__mlxsw_item_memcpy_from(buf, dst, \
&__ITEM_NAME(_type, _cname, _iname), index); \
} \
-static inline void \
+static inline void __maybe_unused \
mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf, \
unsigned short index, \
const char *src) \
@@ -476,7 +484,7 @@ mlxsw_##_type##_##_cname##_##_iname##_memcpy_to(char *buf, \
__mlxsw_item_memcpy_to(buf, src, \
&__ITEM_NAME(_type, _cname, _iname), index); \
} \
-static inline char * \
+static inline char * __maybe_unused \
mlxsw_##_type##_##_cname##_##_iname##_data(char *buf, unsigned short index) \
{ \
return __mlxsw_item_data(buf, \
@@ -491,14 +499,14 @@ static struct mlxsw_item __ITEM_NAME(_type, _cname, _iname) = { \
.size = {.bytes = _sizebytes,}, \
.name = #_type "_" #_cname "_" #_iname, \
}; \
-static inline u8 \
+static inline u8 __maybe_unused \
mlxsw_##_type##_##_cname##_##_iname##_get(const char *buf, u16 index) \
{ \
return __mlxsw_item_bit_array_get(buf, \
&__ITEM_NAME(_type, _cname, _iname), \
index); \
} \
-static inline void \
+static inline void __maybe_unused \
mlxsw_##_type##_##_cname##_##_iname##_set(char *buf, u16 index, u8 val) \
{ \
return __mlxsw_item_bit_array_set(buf, \
diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
index d9d56c44e994..5d4dfa5ddbb5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/minimal.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c
@@ -54,8 +54,20 @@ static int mlxsw_m_base_mac_get(struct mlxsw_m *mlxsw_m)
return 0;
}
-static int mlxsw_m_port_dummy_open_stop(struct net_device *dev)
+static int mlxsw_m_port_open(struct net_device *dev)
{
+ struct mlxsw_m_port *mlxsw_m_port = netdev_priv(dev);
+ struct mlxsw_m *mlxsw_m = mlxsw_m_port->mlxsw_m;
+
+ return mlxsw_env_module_port_up(mlxsw_m->core, mlxsw_m_port->module);
+}
+
+static int mlxsw_m_port_stop(struct net_device *dev)
+{
+ struct mlxsw_m_port *mlxsw_m_port = netdev_priv(dev);
+ struct mlxsw_m *mlxsw_m = mlxsw_m_port->mlxsw_m;
+
+ mlxsw_env_module_port_down(mlxsw_m->core, mlxsw_m_port->module);
return 0;
}
@@ -70,8 +82,8 @@ mlxsw_m_port_get_devlink_port(struct net_device *dev)
}
static const struct net_device_ops mlxsw_m_port_netdev_ops = {
- .ndo_open = mlxsw_m_port_dummy_open_stop,
- .ndo_stop = mlxsw_m_port_dummy_open_stop,
+ .ndo_open = mlxsw_m_port_open,
+ .ndo_stop = mlxsw_m_port_stop,
.ndo_get_devlink_port = mlxsw_m_port_get_devlink_port,
};
@@ -124,11 +136,47 @@ mlxsw_m_get_module_eeprom_by_page(struct net_device *netdev,
page, extack);
}
+static int mlxsw_m_reset(struct net_device *netdev, u32 *flags)
+{
+ struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev);
+ struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core;
+
+ return mlxsw_env_reset_module(netdev, core, mlxsw_m_port->module,
+ flags);
+}
+
+static int
+mlxsw_m_get_module_power_mode(struct net_device *netdev,
+ struct ethtool_module_power_mode_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev);
+ struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core;
+
+ return mlxsw_env_get_module_power_mode(core, mlxsw_m_port->module,
+ params, extack);
+}
+
+static int
+mlxsw_m_set_module_power_mode(struct net_device *netdev,
+ const struct ethtool_module_power_mode_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev);
+ struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core;
+
+ return mlxsw_env_set_module_power_mode(core, mlxsw_m_port->module,
+ params->policy, extack);
+}
+
static const struct ethtool_ops mlxsw_m_port_ethtool_ops = {
.get_drvinfo = mlxsw_m_module_get_drvinfo,
.get_module_info = mlxsw_m_get_module_info,
.get_module_eeprom = mlxsw_m_get_module_eeprom,
.get_module_eeprom_by_page = mlxsw_m_get_module_eeprom_by_page,
+ .reset = mlxsw_m_reset,
+ .get_module_power_mode = mlxsw_m_get_module_power_mode,
+ .set_module_power_mode = mlxsw_m_set_module_power_mode,
};
static int
@@ -152,20 +200,16 @@ static int
mlxsw_m_port_dev_addr_get(struct mlxsw_m_port *mlxsw_m_port)
{
struct mlxsw_m *mlxsw_m = mlxsw_m_port->mlxsw_m;
- struct net_device *dev = mlxsw_m_port->dev;
char ppad_pl[MLXSW_REG_PPAD_LEN];
+ u8 addr[ETH_ALEN];
int err;
mlxsw_reg_ppad_pack(ppad_pl, false, 0);
err = mlxsw_reg_query(mlxsw_m->core, MLXSW_REG(ppad), ppad_pl);
if (err)
return err;
- mlxsw_reg_ppad_mac_memcpy_from(ppad_pl, dev->dev_addr);
- /* The last byte value in base mac address is guaranteed
- * to be such it does not overflow when adding local_port
- * value.
- */
- dev->dev_addr[ETH_ALEN - 1] += mlxsw_m_port->module + 1;
+ mlxsw_reg_ppad_mac_memcpy_from(ppad_pl, addr);
+ eth_hw_addr_gen(mlxsw_m_port->dev, addr, mlxsw_m_port->module + 1);
return 0;
}
@@ -266,6 +310,7 @@ static int mlxsw_m_port_module_map(struct mlxsw_m *mlxsw_m, u8 local_port,
if (WARN_ON_ONCE(module >= max_ports))
return -EINVAL;
+ mlxsw_env_module_port_map(mlxsw_m->core, module);
mlxsw_m->module_to_port[module] = ++mlxsw_m->max_ports;
return 0;
@@ -274,6 +319,7 @@ static int mlxsw_m_port_module_map(struct mlxsw_m *mlxsw_m, u8 local_port,
static void mlxsw_m_port_module_unmap(struct mlxsw_m *mlxsw_m, u8 module)
{
mlxsw_m->module_to_port[module] = -1;
+ mlxsw_env_module_port_unmap(mlxsw_m->core, module);
}
static int mlxsw_m_ports_create(struct mlxsw_m *mlxsw_m)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c
index 13b0259f7ea6..fcace73eae40 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c
@@ -353,13 +353,10 @@ static int mlxsw_pci_rdq_skb_alloc(struct mlxsw_pci *mlxsw_pci,
struct sk_buff *skb;
int err;
- elem_info->u.rdq.skb = NULL;
skb = netdev_alloc_skb_ip_align(NULL, buf_len);
if (!skb)
return -ENOMEM;
- /* Assume that wqe was previously zeroed. */
-
err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
buf_len, DMA_FROM_DEVICE);
if (err)
@@ -597,21 +594,26 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
struct pci_dev *pdev = mlxsw_pci->pdev;
struct mlxsw_pci_queue_elem_info *elem_info;
struct mlxsw_rx_info rx_info = {};
- char *wqe;
+ char wqe[MLXSW_PCI_WQE_SIZE];
struct sk_buff *skb;
u16 byte_count;
int err;
elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
- skb = elem_info->u.sdq.skb;
- if (!skb)
- return;
- wqe = elem_info->elem;
- mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
+ skb = elem_info->u.rdq.skb;
+ memcpy(wqe, elem_info->elem, MLXSW_PCI_WQE_SIZE);
if (q->consumer_counter++ != consumer_counter_limit)
dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in RDQ\n");
+ err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
+ if (err) {
+ dev_err_ratelimited(&pdev->dev, "Failed to alloc skb for RDQ\n");
+ goto out;
+ }
+
+ mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
+
if (mlxsw_pci_cqe_lag_get(cqe_v, cqe)) {
rx_info.is_lag = true;
rx_info.u.lag_id = mlxsw_pci_cqe_lag_id_get(cqe_v, cqe);
@@ -647,10 +649,7 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
skb_put(skb, byte_count);
mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info);
- memset(wqe, 0, q->elem_size);
- err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
- if (err)
- dev_dbg_ratelimited(&pdev->dev, "Failed to alloc skb for RDQ\n");
+out:
/* Everything is set up, ring doorbell to pass elem to HW */
q->producer_counter++;
mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 6fbda6ebd590..8d420eb8ade2 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -4951,7 +4951,7 @@ enum mlxsw_reg_ppcnt_grp {
MLXSW_REG_PPCNT_DISCARD_CNT = 0x6,
MLXSW_REG_PPCNT_PRIO_CNT = 0x10,
MLXSW_REG_PPCNT_TC_CNT = 0x11,
- MLXSW_REG_PPCNT_TC_CONG_TC = 0x13,
+ MLXSW_REG_PPCNT_TC_CONG_CNT = 0x13,
};
/* reg_ppcnt_grp
@@ -5371,7 +5371,7 @@ MLXSW_ITEM64(reg, ppcnt, tx_pause_duration,
MLXSW_ITEM64(reg, ppcnt, tx_pause_transition,
MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x70, 0, 64);
-/* Ethernet Per Traffic Group Counters */
+/* Ethernet Per Traffic Class Counters */
/* reg_ppcnt_tc_transmit_queue
* Contains the transmit queue depth in cells of traffic class
@@ -5398,6 +5398,12 @@ MLXSW_ITEM64(reg, ppcnt, tc_no_buffer_discard_uc,
MLXSW_ITEM64(reg, ppcnt, wred_discard,
MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x00, 0, 64);
+/* reg_ppcnt_ecn_marked_tc
+ * Access: RO
+ */
+MLXSW_ITEM64(reg, ppcnt, ecn_marked_tc,
+ MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x08, 0, 64);
+
static inline void mlxsw_reg_ppcnt_pack(char *payload, u8 local_port,
enum mlxsw_reg_ppcnt_grp grp,
u8 prio_tc)
@@ -5681,6 +5687,14 @@ static inline void mlxsw_reg_pspa_pack(char *payload, u8 swid, u8 local_port)
MLXSW_REG_DEFINE(pmaos, MLXSW_REG_PMAOS_ID, MLXSW_REG_PMAOS_LEN);
+/* reg_pmaos_rst
+ * Module reset toggle.
+ * Note: Setting reset while module is plugged-in will result in transition to
+ * "initializing" operational state.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, pmaos, rst, 0x00, 31, 1);
+
/* reg_pmaos_slot_index
* Slot index.
* Access: Index
@@ -5693,6 +5707,24 @@ MLXSW_ITEM32(reg, pmaos, slot_index, 0x00, 24, 4);
*/
MLXSW_ITEM32(reg, pmaos, module, 0x00, 16, 8);
+enum mlxsw_reg_pmaos_admin_status {
+ MLXSW_REG_PMAOS_ADMIN_STATUS_ENABLED = 1,
+ MLXSW_REG_PMAOS_ADMIN_STATUS_DISABLED = 2,
+ /* If the module is active and then unplugged, or experienced an error
+ * event, the operational status should go to "disabled" and can only
+ * be enabled upon explicit enable command.
+ */
+ MLXSW_REG_PMAOS_ADMIN_STATUS_ENABLED_ONCE = 3,
+};
+
+/* reg_pmaos_admin_status
+ * Module administrative state (the desired state of the module).
+ * Note: To disable a module, all ports associated with the port must be
+ * administatively down first.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, pmaos, admin_status, 0x00, 8, 4);
+
/* reg_pmaos_ase
* Admin state update enable.
* If this bit is set, admin state will be updated based on admin_state field.
@@ -5721,13 +5753,10 @@ enum mlxsw_reg_pmaos_e {
*/
MLXSW_ITEM32(reg, pmaos, e, 0x04, 0, 2);
-static inline void mlxsw_reg_pmaos_pack(char *payload, u8 module,
- enum mlxsw_reg_pmaos_e e)
+static inline void mlxsw_reg_pmaos_pack(char *payload, u8 module)
{
MLXSW_REG_ZERO(pmaos, payload);
mlxsw_reg_pmaos_module_set(payload, module);
- mlxsw_reg_pmaos_e_set(payload, e);
- mlxsw_reg_pmaos_ee_set(payload, true);
}
/* PPLR - Port Physical Loopback Register
@@ -5766,6 +5795,69 @@ static inline void mlxsw_reg_pplr_pack(char *payload, u8 local_port,
MLXSW_REG_PPLR_LB_TYPE_BIT_PHY_LOCAL : 0);
}
+/* PMTDB - Port Module To local DataBase Register
+ * ----------------------------------------------
+ * The PMTDB register allows to query the possible module<->local port
+ * mapping than can be used in PMLP. It does not represent the actual/current
+ * mapping of the local to module. Actual mapping is only defined by PMLP.
+ */
+#define MLXSW_REG_PMTDB_ID 0x501A
+#define MLXSW_REG_PMTDB_LEN 0x40
+
+MLXSW_REG_DEFINE(pmtdb, MLXSW_REG_PMTDB_ID, MLXSW_REG_PMTDB_LEN);
+
+/* reg_pmtdb_slot_index
+ * Slot index (0: Main board).
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pmtdb, slot_index, 0x00, 24, 4);
+
+/* reg_pmtdb_module
+ * Module number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pmtdb, module, 0x00, 16, 8);
+
+/* reg_pmtdb_ports_width
+ * Port's width
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pmtdb, ports_width, 0x00, 12, 4);
+
+/* reg_pmtdb_num_ports
+ * Number of ports in a single module (split/breakout)
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pmtdb, num_ports, 0x00, 8, 4);
+
+enum mlxsw_reg_pmtdb_status {
+ MLXSW_REG_PMTDB_STATUS_SUCCESS,
+};
+
+/* reg_pmtdb_status
+ * Status
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pmtdb, status, 0x00, 0, 4);
+
+/* reg_pmtdb_port_num
+ * The local_port value which can be assigned to the module.
+ * In case of more than one port, port<x> represent the /<x> port of
+ * the module.
+ * Access: RO
+ */
+MLXSW_ITEM16_INDEXED(reg, pmtdb, port_num, 0x04, 0, 8, 0x02, 0x00, false);
+
+static inline void mlxsw_reg_pmtdb_pack(char *payload, u8 slot_index, u8 module,
+ u8 ports_width, u8 num_ports)
+{
+ MLXSW_REG_ZERO(pmtdb, payload);
+ mlxsw_reg_pmtdb_slot_index_set(payload, slot_index);
+ mlxsw_reg_pmtdb_module_set(payload, module);
+ mlxsw_reg_pmtdb_ports_width_set(payload, ports_width);
+ mlxsw_reg_pmtdb_num_ports_set(payload, num_ports);
+}
+
/* PMPE - Port Module Plug/Unplug Event Register
* ---------------------------------------------
* This register reports any operational status change of a module.
@@ -5860,67 +5952,100 @@ static inline void mlxsw_reg_pddr_pack(char *payload, u8 local_port,
mlxsw_reg_pddr_page_select_set(payload, page_select);
}
-/* PMTM - Port Module Type Mapping Register
- * ----------------------------------------
- * The PMTM allows query or configuration of module types.
+/* PMMP - Port Module Memory Map Properties Register
+ * -------------------------------------------------
+ * The PMMP register allows to override the module memory map advertisement.
+ * The register can only be set when the module is disabled by PMAOS register.
*/
-#define MLXSW_REG_PMTM_ID 0x5067
-#define MLXSW_REG_PMTM_LEN 0x10
+#define MLXSW_REG_PMMP_ID 0x5044
+#define MLXSW_REG_PMMP_LEN 0x2C
-MLXSW_REG_DEFINE(pmtm, MLXSW_REG_PMTM_ID, MLXSW_REG_PMTM_LEN);
+MLXSW_REG_DEFINE(pmmp, MLXSW_REG_PMMP_ID, MLXSW_REG_PMMP_LEN);
-/* reg_pmtm_module
+/* reg_pmmp_module
* Module number.
* Access: Index
*/
-MLXSW_ITEM32(reg, pmtm, module, 0x00, 16, 8);
+MLXSW_ITEM32(reg, pmmp, module, 0x00, 16, 8);
-enum mlxsw_reg_pmtm_module_type {
- /* Backplane with 4 lanes */
- MLXSW_REG_PMTM_MODULE_TYPE_BP_4X,
- /* QSFP */
- MLXSW_REG_PMTM_MODULE_TYPE_QSFP,
- /* SFP */
- MLXSW_REG_PMTM_MODULE_TYPE_SFP,
- /* Backplane with single lane */
- MLXSW_REG_PMTM_MODULE_TYPE_BP_1X = 4,
- /* Backplane with two lane */
- MLXSW_REG_PMTM_MODULE_TYPE_BP_2X = 8,
- /* Chip2Chip4x */
- MLXSW_REG_PMTM_MODULE_TYPE_C2C4X = 10,
- /* Chip2Chip2x */
- MLXSW_REG_PMTM_MODULE_TYPE_C2C2X,
- /* Chip2Chip1x */
- MLXSW_REG_PMTM_MODULE_TYPE_C2C1X,
- /* QSFP-DD */
- MLXSW_REG_PMTM_MODULE_TYPE_QSFP_DD = 14,
- /* OSFP */
- MLXSW_REG_PMTM_MODULE_TYPE_OSFP,
- /* SFP-DD */
- MLXSW_REG_PMTM_MODULE_TYPE_SFP_DD,
- /* DSFP */
- MLXSW_REG_PMTM_MODULE_TYPE_DSFP,
- /* Chip2Chip8x */
- MLXSW_REG_PMTM_MODULE_TYPE_C2C8X,
+/* reg_pmmp_sticky
+ * When set, will keep eeprom_override values after plug-out event.
+ * Access: OP
+ */
+MLXSW_ITEM32(reg, pmmp, sticky, 0x00, 0, 1);
+
+/* reg_pmmp_eeprom_override_mask
+ * Write mask bit (negative polarity).
+ * 0 - Allow write
+ * 1 - Ignore write
+ * On write, indicates which of the bits from eeprom_override field are
+ * updated.
+ * Access: WO
+ */
+MLXSW_ITEM32(reg, pmmp, eeprom_override_mask, 0x04, 16, 16);
+
+enum {
+ /* Set module to low power mode */
+ MLXSW_REG_PMMP_EEPROM_OVERRIDE_LOW_POWER_MASK = BIT(8),
};
-/* reg_pmtm_module_type
- * Module type.
+/* reg_pmmp_eeprom_override
+ * Override / ignore EEPROM advertisement properties bitmask
* Access: RW
*/
-MLXSW_ITEM32(reg, pmtm, module_type, 0x04, 0, 4);
+MLXSW_ITEM32(reg, pmmp, eeprom_override, 0x04, 0, 16);
-static inline void mlxsw_reg_pmtm_pack(char *payload, u8 module)
+static inline void mlxsw_reg_pmmp_pack(char *payload, u8 module)
{
- MLXSW_REG_ZERO(pmtm, payload);
- mlxsw_reg_pmtm_module_set(payload, module);
+ MLXSW_REG_ZERO(pmmp, payload);
+ mlxsw_reg_pmmp_module_set(payload, module);
}
-static inline void
-mlxsw_reg_pmtm_unpack(char *payload,
- enum mlxsw_reg_pmtm_module_type *module_type)
+/* PLLP - Port Local port to Label Port mapping Register
+ * -----------------------------------------------------
+ * The PLLP register returns the mapping from Local Port into Label Port.
+ */
+#define MLXSW_REG_PLLP_ID 0x504A
+#define MLXSW_REG_PLLP_LEN 0x10
+
+MLXSW_REG_DEFINE(pllp, MLXSW_REG_PLLP_ID, MLXSW_REG_PLLP_LEN);
+
+/* reg_pllp_local_port
+ * Local port number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, pllp, local_port, 0x00, 16, 8);
+
+/* reg_pllp_label_port
+ * Front panel label of the port.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pllp, label_port, 0x00, 0, 8);
+
+/* reg_pllp_split_num
+ * Label split mapping for local_port.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pllp, split_num, 0x04, 0, 4);
+
+/* reg_pllp_slot_index
+ * Slot index (0: Main board).
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, pllp, slot_index, 0x08, 0, 4);
+
+static inline void mlxsw_reg_pllp_pack(char *payload, u8 local_port)
{
- *module_type = mlxsw_reg_pmtm_module_type_get(payload);
+ MLXSW_REG_ZERO(pllp, payload);
+ mlxsw_reg_pllp_local_port_set(payload, local_port);
+}
+
+static inline void mlxsw_reg_pllp_unpack(char *payload, u8 *label_port,
+ u8 *split_num, u8 *slot_index)
+{
+ *label_port = mlxsw_reg_pllp_label_port_get(payload);
+ *split_num = mlxsw_reg_pllp_split_num_get(payload);
+ *slot_index = mlxsw_reg_pllp_slot_index_get(payload);
}
/* HTGT - Host Trap Group Table
@@ -6401,6 +6526,12 @@ MLXSW_ITEM32(reg, ritr, mtu, 0x34, 0, 16);
*/
MLXSW_ITEM32(reg, ritr, if_swid, 0x08, 24, 8);
+/* reg_ritr_if_mac_profile_id
+ * MAC msb profile ID.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, if_mac_profile_id, 0x10, 16, 4);
+
/* reg_ritr_if_mac
* Router interface MAC address.
* In Spectrum, all MAC addresses must have the same 38 MSBits.
@@ -6664,6 +6795,23 @@ mlxsw_reg_ritr_loopback_ipip4_pack(char *payload,
mlxsw_reg_ritr_loopback_ipip_usip4_set(payload, usip);
}
+static inline void
+mlxsw_reg_ritr_loopback_ipip6_pack(char *payload,
+ enum mlxsw_reg_ritr_loopback_ipip_type ipip_type,
+ enum mlxsw_reg_ritr_loopback_ipip_options options,
+ u16 uvr_id, u16 underlay_rif,
+ const struct in6_addr *usip, u32 gre_key)
+{
+ enum mlxsw_reg_ritr_loopback_protocol protocol =
+ MLXSW_REG_RITR_LOOPBACK_PROTOCOL_IPIP_IPV6;
+
+ mlxsw_reg_ritr_loopback_protocol_set(payload, protocol);
+ mlxsw_reg_ritr_loopback_ipip_common_pack(payload, ipip_type, options,
+ uvr_id, underlay_rif, gre_key);
+ mlxsw_reg_ritr_loopback_ipip_usip6_memcpy_to(payload,
+ (const char *)usip);
+}
+
/* RTAR - Router TCAM Allocation Register
* --------------------------------------
* This register is used for allocation of regions in the TCAM table.
@@ -6932,6 +7080,12 @@ static inline void mlxsw_reg_ratr_ipip4_entry_pack(char *payload, u32 ipv4_udip)
mlxsw_reg_ratr_ipip_ipv4_udip_set(payload, ipv4_udip);
}
+static inline void mlxsw_reg_ratr_ipip6_entry_pack(char *payload, u32 ipv6_ptr)
+{
+ mlxsw_reg_ratr_ipip_type_set(payload, MLXSW_REG_RATR_IPIP_TYPE_IPV6);
+ mlxsw_reg_ratr_ipip_ipv6_ptr_set(payload, ipv6_ptr);
+}
+
static inline void mlxsw_reg_ratr_counter_pack(char *payload, u64 counter_index,
bool counter_enable)
{
@@ -8117,19 +8271,71 @@ static inline void mlxsw_reg_rtdp_pack(char *payload,
}
static inline void
-mlxsw_reg_rtdp_ipip4_pack(char *payload, u16 irif,
- enum mlxsw_reg_rtdp_ipip_sip_check sip_check,
- unsigned int type_check, bool gre_key_check,
- u32 ipv4_usip, u32 expected_gre_key)
+mlxsw_reg_rtdp_ipip_pack(char *payload, u16 irif,
+ enum mlxsw_reg_rtdp_ipip_sip_check sip_check,
+ unsigned int type_check, bool gre_key_check,
+ u32 expected_gre_key)
{
mlxsw_reg_rtdp_ipip_irif_set(payload, irif);
mlxsw_reg_rtdp_ipip_sip_check_set(payload, sip_check);
mlxsw_reg_rtdp_ipip_type_check_set(payload, type_check);
mlxsw_reg_rtdp_ipip_gre_key_check_set(payload, gre_key_check);
- mlxsw_reg_rtdp_ipip_ipv4_usip_set(payload, ipv4_usip);
mlxsw_reg_rtdp_ipip_expected_gre_key_set(payload, expected_gre_key);
}
+static inline void
+mlxsw_reg_rtdp_ipip4_pack(char *payload, u16 irif,
+ enum mlxsw_reg_rtdp_ipip_sip_check sip_check,
+ unsigned int type_check, bool gre_key_check,
+ u32 ipv4_usip, u32 expected_gre_key)
+{
+ mlxsw_reg_rtdp_ipip_pack(payload, irif, sip_check, type_check,
+ gre_key_check, expected_gre_key);
+ mlxsw_reg_rtdp_ipip_ipv4_usip_set(payload, ipv4_usip);
+}
+
+static inline void
+mlxsw_reg_rtdp_ipip6_pack(char *payload, u16 irif,
+ enum mlxsw_reg_rtdp_ipip_sip_check sip_check,
+ unsigned int type_check, bool gre_key_check,
+ u32 ipv6_usip_ptr, u32 expected_gre_key)
+{
+ mlxsw_reg_rtdp_ipip_pack(payload, irif, sip_check, type_check,
+ gre_key_check, expected_gre_key);
+ mlxsw_reg_rtdp_ipip_ipv6_usip_ptr_set(payload, ipv6_usip_ptr);
+}
+
+/* RIPS - Router IP version Six Register
+ * -------------------------------------
+ * The RIPS register is used to store IPv6 addresses for use by the NVE and
+ * IPinIP
+ */
+#define MLXSW_REG_RIPS_ID 0x8021
+#define MLXSW_REG_RIPS_LEN 0x14
+
+MLXSW_REG_DEFINE(rips, MLXSW_REG_RIPS_ID, MLXSW_REG_RIPS_LEN);
+
+/* reg_rips_index
+ * Index to IPv6 address.
+ * For Spectrum, the index is to the KVD linear.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, rips, index, 0x00, 0, 24);
+
+/* reg_rips_ipv6
+ * IPv6 address
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, rips, ipv6, 0x04, 16);
+
+static inline void mlxsw_reg_rips_pack(char *payload, u32 index,
+ const struct in6_addr *ipv6)
+{
+ MLXSW_REG_ZERO(rips, payload);
+ mlxsw_reg_rips_index_set(payload, index);
+ mlxsw_reg_rips_ipv6_memcpy_to(payload, (const char *)ipv6);
+}
+
/* RATRAD - Router Adjacency Table Activity Dump Register
* ------------------------------------------------------
* The RATRAD register is used to dump and optionally clear activity bits of
@@ -10208,6 +10414,39 @@ static inline void mlxsw_reg_mlcr_pack(char *payload, u8 local_port,
MLXSW_REG_MLCR_DURATION_MAX : 0);
}
+/* MCION - Management Cable IO and Notifications Register
+ * ------------------------------------------------------
+ * The MCION register is used to query transceiver modules' IO pins and other
+ * notifications.
+ */
+#define MLXSW_REG_MCION_ID 0x9052
+#define MLXSW_REG_MCION_LEN 0x18
+
+MLXSW_REG_DEFINE(mcion, MLXSW_REG_MCION_ID, MLXSW_REG_MCION_LEN);
+
+/* reg_mcion_module
+ * Module number.
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mcion, module, 0x00, 16, 8);
+
+enum {
+ MLXSW_REG_MCION_MODULE_STATUS_BITS_PRESENT_MASK = BIT(0),
+ MLXSW_REG_MCION_MODULE_STATUS_BITS_LOW_POWER_MASK = BIT(8),
+};
+
+/* reg_mcion_module_status_bits
+ * Module IO status as defined by SFF.
+ * Access: RO
+ */
+MLXSW_ITEM32(reg, mcion, module_status_bits, 0x04, 0, 16);
+
+static inline void mlxsw_reg_mcion_pack(char *payload, u8 module)
+{
+ MLXSW_REG_ZERO(mcion, payload);
+ mlxsw_reg_mcion_module_set(payload, module);
+}
+
/* MTPPS - Management Pulse Per Second Register
* --------------------------------------------
* This register provides the device PPS capabilities, configure the PPS in and
@@ -12200,9 +12439,11 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(pspa),
MLXSW_REG(pmaos),
MLXSW_REG(pplr),
+ MLXSW_REG(pmtdb),
MLXSW_REG(pmpe),
MLXSW_REG(pddr),
- MLXSW_REG(pmtm),
+ MLXSW_REG(pmmp),
+ MLXSW_REG(pllp),
MLXSW_REG(htgt),
MLXSW_REG(hpkt),
MLXSW_REG(rgcr),
@@ -12210,6 +12451,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(rtar),
MLXSW_REG(ratr),
MLXSW_REG(rtdp),
+ MLXSW_REG(rips),
MLXSW_REG(ratrad),
MLXSW_REG(rdpm),
MLXSW_REG(ricnt),
@@ -12249,6 +12491,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(mgir),
MLXSW_REG(mrsr),
MLXSW_REG(mlcr),
+ MLXSW_REG(mcion),
MLXSW_REG(mtpps),
MLXSW_REG(mtutc),
MLXSW_REG(mpsc),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/resources.h b/drivers/net/ethernet/mellanox/mlxsw/resources.h
index a56c9e19a390..c7fc650608eb 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/resources.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/resources.h
@@ -25,9 +25,6 @@ enum mlxsw_res_id {
MLXSW_RES_ID_MAX_SYSTEM_PORT,
MLXSW_RES_ID_MAX_LAG,
MLXSW_RES_ID_MAX_LAG_MEMBERS,
- MLXSW_RES_ID_LOCAL_PORTS_IN_1X,
- MLXSW_RES_ID_LOCAL_PORTS_IN_2X,
- MLXSW_RES_ID_LOCAL_PORTS_IN_4X,
MLXSW_RES_ID_GUARANTEED_SHARED_BUFFER,
MLXSW_RES_ID_CELL_SIZE,
MLXSW_RES_ID_MAX_HEADROOM_SIZE,
@@ -52,6 +49,7 @@ enum mlxsw_res_id {
MLXSW_RES_ID_MAX_VRS,
MLXSW_RES_ID_MAX_RIFS,
MLXSW_RES_ID_MC_ERIF_LIST_ENTRIES,
+ MLXSW_RES_ID_MAX_RIF_MAC_PROFILES,
MLXSW_RES_ID_MAX_LPM_TREES,
MLXSW_RES_ID_MAX_NVE_MC_ENTRIES_IPV4,
MLXSW_RES_ID_MAX_NVE_MC_ENTRIES_IPV6,
@@ -84,9 +82,6 @@ static u16 mlxsw_res_ids[] = {
[MLXSW_RES_ID_MAX_SYSTEM_PORT] = 0x2502,
[MLXSW_RES_ID_MAX_LAG] = 0x2520,
[MLXSW_RES_ID_MAX_LAG_MEMBERS] = 0x2521,
- [MLXSW_RES_ID_LOCAL_PORTS_IN_1X] = 0x2610,
- [MLXSW_RES_ID_LOCAL_PORTS_IN_2X] = 0x2611,
- [MLXSW_RES_ID_LOCAL_PORTS_IN_4X] = 0x2612,
[MLXSW_RES_ID_GUARANTEED_SHARED_BUFFER] = 0x2805, /* Bytes */
[MLXSW_RES_ID_CELL_SIZE] = 0x2803, /* Bytes */
[MLXSW_RES_ID_MAX_HEADROOM_SIZE] = 0x2811, /* Bytes */
@@ -111,6 +106,7 @@ static u16 mlxsw_res_ids[] = {
[MLXSW_RES_ID_MAX_VRS] = 0x2C01,
[MLXSW_RES_ID_MAX_RIFS] = 0x2C02,
[MLXSW_RES_ID_MC_ERIF_LIST_ENTRIES] = 0x2C10,
+ [MLXSW_RES_ID_MAX_RIF_MAC_PROFILES] = 0x2C14,
[MLXSW_RES_ID_MAX_LPM_TREES] = 0x2C30,
[MLXSW_RES_ID_MAX_NVE_MC_ENTRIES_IPV4] = 0x2E02,
[MLXSW_RES_ID_MAX_NVE_MC_ENTRIES_IPV6] = 0x2E03,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 250c5a24264d..5925db386b1b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -47,7 +47,7 @@
#define MLXSW_SP1_FWREV_MAJOR 13
#define MLXSW_SP1_FWREV_MINOR 2008
-#define MLXSW_SP1_FWREV_SUBMINOR 2406
+#define MLXSW_SP1_FWREV_SUBMINOR 3326
#define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
@@ -64,7 +64,7 @@ static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
#define MLXSW_SP2_FWREV_MAJOR 29
#define MLXSW_SP2_FWREV_MINOR 2008
-#define MLXSW_SP2_FWREV_SUBMINOR 2406
+#define MLXSW_SP2_FWREV_SUBMINOR 3326
static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = {
.major = MLXSW_SP2_FWREV_MAJOR,
@@ -79,7 +79,7 @@ static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = {
#define MLXSW_SP3_FWREV_MAJOR 30
#define MLXSW_SP3_FWREV_MINOR 2008
-#define MLXSW_SP3_FWREV_SUBMINOR 2406
+#define MLXSW_SP3_FWREV_SUBMINOR 3326
static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = {
.major = MLXSW_SP3_FWREV_MAJOR,
@@ -316,11 +316,11 @@ static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- unsigned char *addr = mlxsw_sp_port->dev->dev_addr;
- ether_addr_copy(addr, mlxsw_sp->base_mac);
- addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port;
- return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
+ eth_hw_addr_gen(mlxsw_sp_port->dev, mlxsw_sp->base_mac,
+ mlxsw_sp_port->local_port);
+ return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port,
+ mlxsw_sp_port->dev->dev_addr);
}
static int mlxsw_sp_port_max_mtu_get(struct mlxsw_sp_port *mlxsw_sp_port, int *p_max_mtu)
@@ -351,12 +351,12 @@ static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
}
-static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
+static int mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp,
+ u8 local_port, u8 swid)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char pspa_pl[MLXSW_REG_PSPA_LEN];
- mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port);
+ mlxsw_reg_pspa_pack(pspa_pl, swid, local_port);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
}
@@ -529,55 +529,80 @@ mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u8 local_port,
port_mapping->module = module;
port_mapping->width = width;
+ port_mapping->module_width = width;
port_mapping->lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
return 0;
}
-static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port)
+static int
+mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+ const struct mlxsw_sp_port_mapping *port_mapping)
{
- struct mlxsw_sp_port_mapping *port_mapping = &mlxsw_sp_port->mapping;
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char pmlp_pl[MLXSW_REG_PMLP_LEN];
- int i;
+ int i, err;
+
+ mlxsw_env_module_port_map(mlxsw_sp->core, port_mapping->module);
- mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
+ mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
mlxsw_reg_pmlp_width_set(pmlp_pl, port_mapping->width);
for (i = 0; i < port_mapping->width; i++) {
mlxsw_reg_pmlp_module_set(pmlp_pl, i, port_mapping->module);
mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, port_mapping->lane + i); /* Rx & Tx */
}
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
+ if (err)
+ goto err_pmlp_write;
+ return 0;
+
+err_pmlp_write:
+ mlxsw_env_module_port_unmap(mlxsw_sp->core, port_mapping->module);
+ return err;
}
-static int mlxsw_sp_port_module_unmap(struct mlxsw_sp_port *mlxsw_sp_port)
+static void mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u8 local_port,
+ u8 module)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char pmlp_pl[MLXSW_REG_PMLP_LEN];
- mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
+ mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
+ mlxsw_env_module_port_unmap(mlxsw_sp->core, module);
}
static int mlxsw_sp_port_open(struct net_device *dev)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
int err;
- err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
+ err = mlxsw_env_module_port_up(mlxsw_sp->core,
+ mlxsw_sp_port->mapping.module);
if (err)
return err;
+ err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
+ if (err)
+ goto err_port_admin_status_set;
netif_start_queue(dev);
return 0;
+
+err_port_admin_status_set:
+ mlxsw_env_module_port_down(mlxsw_sp->core,
+ mlxsw_sp_port->mapping.module);
+ return err;
}
static int mlxsw_sp_port_stop(struct net_device *dev)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
netif_stop_queue(dev);
- return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
+ mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
+ mlxsw_env_module_port_down(mlxsw_sp->core,
+ mlxsw_sp_port->mapping.module);
+ return 0;
}
static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
@@ -649,7 +674,7 @@ static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
if (err)
return err;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
return 0;
}
@@ -799,12 +824,16 @@ mlxsw_sp_port_get_hw_xstats(struct net_device *dev,
for (i = 0; i < TC_MAX_QUEUE; i++) {
err = mlxsw_sp_port_get_stats_raw(dev,
- MLXSW_REG_PPCNT_TC_CONG_TC,
+ MLXSW_REG_PPCNT_TC_CONG_CNT,
i, ppcnt_pl);
- if (!err)
- xstats->wred_drop[i] =
- mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl);
+ if (err)
+ goto tc_cnt;
+ xstats->wred_drop[i] =
+ mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl);
+ xstats->tc_ecn[i] = mlxsw_reg_ppcnt_ecn_marked_tc_get(ppcnt_pl);
+
+tc_cnt:
err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT,
i, ppcnt_pl);
if (err)
@@ -1010,6 +1039,8 @@ static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, false);
case FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP:
return mlxsw_sp_setup_tc_block_qevent_early_drop(mlxsw_sp_port, f);
+ case FLOW_BLOCK_BINDER_TYPE_RED_MARK:
+ return mlxsw_sp_setup_tc_block_qevent_mark(mlxsw_sp_port, f);
default:
return -EOPNOTSUPP;
}
@@ -1442,29 +1473,68 @@ mlxsw_sp_port_vlan_classification_set(struct mlxsw_sp_port *mlxsw_sp_port,
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvc), spvc_pl);
}
+static int mlxsw_sp_port_label_info_get(struct mlxsw_sp *mlxsw_sp,
+ u8 local_port, u8 *port_number,
+ u8 *split_port_subnumber,
+ u8 *slot_index)
+{
+ char pllp_pl[MLXSW_REG_PLLP_LEN];
+ int err;
+
+ mlxsw_reg_pllp_pack(pllp_pl, local_port);
+ err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pllp), pllp_pl);
+ if (err)
+ return err;
+ mlxsw_reg_pllp_unpack(pllp_pl, port_number,
+ split_port_subnumber, slot_index);
+ return 0;
+}
+
static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
- u8 split_base_local_port,
+ bool split,
struct mlxsw_sp_port_mapping *port_mapping)
{
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
- bool split = !!split_base_local_port;
struct mlxsw_sp_port *mlxsw_sp_port;
u32 lanes = port_mapping->width;
+ u8 split_port_subnumber;
struct net_device *dev;
+ u8 port_number;
+ u8 slot_index;
bool splittable;
int err;
+ err = mlxsw_sp_port_module_map(mlxsw_sp, local_port, port_mapping);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n",
+ local_port);
+ return err;
+ }
+
+ err = mlxsw_sp_port_swid_set(mlxsw_sp, local_port, 0);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
+ local_port);
+ goto err_port_swid_set;
+ }
+
+ err = mlxsw_sp_port_label_info_get(mlxsw_sp, local_port, &port_number,
+ &split_port_subnumber, &slot_index);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get port label information\n",
+ local_port);
+ goto err_port_label_info_get;
+ }
+
splittable = lanes > 1 && !split;
err = mlxsw_core_port_init(mlxsw_sp->core, local_port,
- port_mapping->module + 1, split,
- port_mapping->lane / lanes,
- splittable, lanes,
- mlxsw_sp->base_mac,
+ port_number, split, split_port_subnumber,
+ splittable, lanes, mlxsw_sp->base_mac,
sizeof(mlxsw_sp->base_mac));
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
local_port);
- return err;
+ goto err_core_port_init;
}
dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
@@ -1480,7 +1550,6 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
mlxsw_sp_port->local_port = local_port;
mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID;
mlxsw_sp_port->split = split;
- mlxsw_sp_port->split_base_local_port = split_base_local_port;
mlxsw_sp_port->mapping = *port_mapping;
mlxsw_sp_port->link.autoneg = 1;
INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list);
@@ -1498,20 +1567,6 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
- err = mlxsw_sp_port_module_map(mlxsw_sp_port);
- if (err) {
- dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n",
- mlxsw_sp_port->local_port);
- goto err_port_module_map;
- }
-
- err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
- if (err) {
- dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
- mlxsw_sp_port->local_port);
- goto err_port_swid_set;
- }
-
err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
@@ -1712,21 +1767,24 @@ err_max_speed_get:
err_port_speed_by_width_set:
err_port_system_port_mapping_set:
err_dev_addr_init:
- mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
-err_port_swid_set:
- mlxsw_sp_port_module_unmap(mlxsw_sp_port);
-err_port_module_map:
free_percpu(mlxsw_sp_port->pcpu_stats);
err_alloc_stats:
free_netdev(dev);
err_alloc_etherdev:
mlxsw_core_port_fini(mlxsw_sp->core, local_port);
+err_core_port_init:
+err_port_label_info_get:
+ mlxsw_sp_port_swid_set(mlxsw_sp, local_port,
+ MLXSW_PORT_SWID_DISABLED_PORT);
+err_port_swid_set:
+ mlxsw_sp_port_module_unmap(mlxsw_sp, local_port, port_mapping->module);
return err;
}
static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
{
struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
+ u8 module = mlxsw_sp_port->mapping.module;
cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw);
cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw);
@@ -1742,12 +1800,13 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
mlxsw_sp_port_buffers_fini(mlxsw_sp_port);
- mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
- mlxsw_sp_port_module_unmap(mlxsw_sp_port);
free_percpu(mlxsw_sp_port->pcpu_stats);
WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list));
free_netdev(mlxsw_sp_port->dev);
mlxsw_core_port_fini(mlxsw_sp->core, local_port);
+ mlxsw_sp_port_swid_set(mlxsw_sp, local_port,
+ MLXSW_PORT_SWID_DISABLED_PORT);
+ mlxsw_sp_port_module_unmap(mlxsw_sp, local_port, module);
}
static int mlxsw_sp_cpu_port_create(struct mlxsw_sp *mlxsw_sp)
@@ -1789,8 +1848,15 @@ static void mlxsw_sp_cpu_port_remove(struct mlxsw_sp *mlxsw_sp)
kfree(mlxsw_sp_port);
}
+static bool mlxsw_sp_local_port_valid(u8 local_port)
+{
+ return local_port != MLXSW_PORT_CPU_PORT;
+}
+
static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port)
{
+ if (!mlxsw_sp_local_port_valid(local_port))
+ return false;
return mlxsw_sp->ports[local_port] != NULL;
}
@@ -1827,7 +1893,7 @@ static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
port_mapping = mlxsw_sp->port_mapping[i];
if (!port_mapping)
continue;
- err = mlxsw_sp_port_create(mlxsw_sp, i, 0, port_mapping);
+ err = mlxsw_sp_port_create(mlxsw_sp, i, false, port_mapping);
if (err)
goto err_port_create;
}
@@ -1894,17 +1960,10 @@ static void mlxsw_sp_port_module_info_fini(struct mlxsw_sp *mlxsw_sp)
kfree(mlxsw_sp->port_mapping);
}
-static u8 mlxsw_sp_cluster_base_port_get(u8 local_port, unsigned int max_width)
-{
- u8 offset = (local_port - 1) % max_width;
-
- return local_port - offset;
-}
-
static int
-mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
+mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_port_mapping *port_mapping,
- unsigned int count, u8 offset)
+ unsigned int count, const char *pmtdb_pl)
{
struct mlxsw_sp_port_mapping split_port_mapping;
int err, i;
@@ -1912,8 +1971,13 @@ mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
split_port_mapping = *port_mapping;
split_port_mapping.width /= count;
for (i = 0; i < count; i++) {
- err = mlxsw_sp_port_create(mlxsw_sp, base_port + i * offset,
- base_port, &split_port_mapping);
+ u8 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
+
+ if (!mlxsw_sp_local_port_valid(s_local_port))
+ continue;
+
+ err = mlxsw_sp_port_create(mlxsw_sp, s_local_port,
+ true, &split_port_mapping);
if (err)
goto err_port_create;
split_port_mapping.lane += split_port_mapping.width;
@@ -1922,49 +1986,34 @@ mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
return 0;
err_port_create:
- for (i--; i >= 0; i--)
- if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset))
- mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset);
+ for (i--; i >= 0; i--) {
+ u8 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
+
+ if (mlxsw_sp_port_created(mlxsw_sp, s_local_port))
+ mlxsw_sp_port_remove(mlxsw_sp, s_local_port);
+ }
return err;
}
static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
- u8 base_port,
- unsigned int count, u8 offset)
+ unsigned int count,
+ const char *pmtdb_pl)
{
struct mlxsw_sp_port_mapping *port_mapping;
int i;
/* Go over original unsplit ports in the gap and recreate them. */
- for (i = 0; i < count * offset; i++) {
- port_mapping = mlxsw_sp->port_mapping[base_port + i];
- if (!port_mapping)
+ for (i = 0; i < count; i++) {
+ u8 local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
+
+ port_mapping = mlxsw_sp->port_mapping[local_port];
+ if (!port_mapping || !mlxsw_sp_local_port_valid(local_port))
continue;
- mlxsw_sp_port_create(mlxsw_sp, base_port + i, 0, port_mapping);
+ mlxsw_sp_port_create(mlxsw_sp, local_port,
+ false, port_mapping);
}
}
-static int mlxsw_sp_local_ports_offset(struct mlxsw_core *mlxsw_core,
- unsigned int count,
- unsigned int max_width)
-{
- enum mlxsw_res_id local_ports_in_x_res_id;
- int split_width = max_width / count;
-
- if (split_width == 1)
- local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_1X;
- else if (split_width == 2)
- local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_2X;
- else if (split_width == 4)
- local_ports_in_x_res_id = MLXSW_RES_ID_LOCAL_PORTS_IN_4X;
- else
- return -EINVAL;
-
- if (!mlxsw_core_res_valid(mlxsw_core, local_ports_in_x_res_id))
- return -EINVAL;
- return mlxsw_core_res_get(mlxsw_core, local_ports_in_x_res_id);
-}
-
static struct mlxsw_sp_port *
mlxsw_sp_port_get_by_local_port(struct mlxsw_sp *mlxsw_sp, u8 local_port)
{
@@ -1980,9 +2029,8 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
struct mlxsw_sp_port_mapping port_mapping;
struct mlxsw_sp_port *mlxsw_sp_port;
- int max_width;
- u8 base_port;
- int offset;
+ enum mlxsw_reg_pmtdb_status status;
+ char pmtdb_pl[MLXSW_REG_PMTDB_LEN];
int i;
int err;
@@ -1994,57 +2042,37 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
return -EINVAL;
}
- max_width = mlxsw_core_module_max_width(mlxsw_core,
- mlxsw_sp_port->mapping.module);
- if (max_width < 0) {
- netdev_err(mlxsw_sp_port->dev, "Cannot get max width of port module\n");
- NL_SET_ERR_MSG_MOD(extack, "Cannot get max width of port module");
- return max_width;
+ if (mlxsw_sp_port->split) {
+ NL_SET_ERR_MSG_MOD(extack, "Port is already split");
+ return -EINVAL;
}
- /* Split port with non-max cannot be split. */
- if (mlxsw_sp_port->mapping.width != max_width) {
- netdev_err(mlxsw_sp_port->dev, "Port cannot be split\n");
- NL_SET_ERR_MSG_MOD(extack, "Port cannot be split");
- return -EINVAL;
+ mlxsw_reg_pmtdb_pack(pmtdb_pl, 0, mlxsw_sp_port->mapping.module,
+ mlxsw_sp_port->mapping.module_width / count,
+ count);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), pmtdb_pl);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to query split info");
+ return err;
}
- offset = mlxsw_sp_local_ports_offset(mlxsw_core, count, max_width);
- if (offset < 0) {
- netdev_err(mlxsw_sp_port->dev, "Cannot obtain local port offset\n");
- NL_SET_ERR_MSG_MOD(extack, "Cannot obtain local port offset");
+ status = mlxsw_reg_pmtdb_status_get(pmtdb_pl);
+ if (status != MLXSW_REG_PMTDB_STATUS_SUCCESS) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported split configuration");
return -EINVAL;
}
- /* Only in case max split is being done, the local port and
- * base port may differ.
- */
- base_port = count == max_width ?
- mlxsw_sp_cluster_base_port_get(local_port, max_width) :
- local_port;
+ port_mapping = mlxsw_sp_port->mapping;
- for (i = 0; i < count * offset; i++) {
- /* Expect base port to exist and also the one in the middle in
- * case of maximal split count.
- */
- if (i == 0 || (count == max_width && i == count / 2))
- continue;
+ for (i = 0; i < count; i++) {
+ u8 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
- if (mlxsw_sp_port_created(mlxsw_sp, base_port + i)) {
- netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
- NL_SET_ERR_MSG_MOD(extack, "Invalid split configuration");
- return -EINVAL;
- }
+ if (mlxsw_sp_port_created(mlxsw_sp, s_local_port))
+ mlxsw_sp_port_remove(mlxsw_sp, s_local_port);
}
- port_mapping = mlxsw_sp_port->mapping;
-
- for (i = 0; i < count; i++)
- if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset))
- mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset);
-
- err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, &port_mapping,
- count, offset);
+ err = mlxsw_sp_port_split_create(mlxsw_sp, &port_mapping,
+ count, pmtdb_pl);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
goto err_port_split_create;
@@ -2053,7 +2081,7 @@ static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
return 0;
err_port_split_create:
- mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count, offset);
+ mlxsw_sp_port_unsplit_create(mlxsw_sp, count, pmtdb_pl);
return err;
}
@@ -2062,11 +2090,10 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port,
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
struct mlxsw_sp_port *mlxsw_sp_port;
+ char pmtdb_pl[MLXSW_REG_PMTDB_LEN];
unsigned int count;
- int max_width;
- u8 base_port;
- int offset;
int i;
+ int err;
mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port);
if (!mlxsw_sp_port) {
@@ -2077,35 +2104,30 @@ static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port,
}
if (!mlxsw_sp_port->split) {
- netdev_err(mlxsw_sp_port->dev, "Port was not split\n");
NL_SET_ERR_MSG_MOD(extack, "Port was not split");
return -EINVAL;
}
- max_width = mlxsw_core_module_max_width(mlxsw_core,
- mlxsw_sp_port->mapping.module);
- if (max_width < 0) {
- netdev_err(mlxsw_sp_port->dev, "Cannot get max width of port module\n");
- NL_SET_ERR_MSG_MOD(extack, "Cannot get max width of port module");
- return max_width;
- }
-
- count = max_width / mlxsw_sp_port->mapping.width;
+ count = mlxsw_sp_port->mapping.module_width /
+ mlxsw_sp_port->mapping.width;
- offset = mlxsw_sp_local_ports_offset(mlxsw_core, count, max_width);
- if (WARN_ON(offset < 0)) {
- netdev_err(mlxsw_sp_port->dev, "Cannot obtain local port offset\n");
- NL_SET_ERR_MSG_MOD(extack, "Cannot obtain local port offset");
- return -EINVAL;
+ mlxsw_reg_pmtdb_pack(pmtdb_pl, 0, mlxsw_sp_port->mapping.module,
+ mlxsw_sp_port->mapping.module_width / count,
+ count);
+ err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), pmtdb_pl);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to query split info");
+ return err;
}
- base_port = mlxsw_sp_port->split_base_local_port;
+ for (i = 0; i < count; i++) {
+ u8 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
- for (i = 0; i < count; i++)
- if (mlxsw_sp_port_created(mlxsw_sp, base_port + i * offset))
- mlxsw_sp_port_remove(mlxsw_sp, base_port + i * offset);
+ if (mlxsw_sp_port_created(mlxsw_sp, s_local_port))
+ mlxsw_sp_port_remove(mlxsw_sp, s_local_port);
+ }
- mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count, offset);
+ mlxsw_sp_port_unsplit_create(mlxsw_sp, count, pmtdb_pl);
return 0;
}
@@ -3260,6 +3282,30 @@ static int mlxsw_sp_resources_span_register(struct mlxsw_core *mlxsw_core)
&span_size_params);
}
+static int
+mlxsw_sp_resources_rif_mac_profile_register(struct mlxsw_core *mlxsw_core)
+{
+ struct devlink *devlink = priv_to_devlink(mlxsw_core);
+ struct devlink_resource_size_params size_params;
+ u8 max_rif_mac_profiles;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_RIF_MAC_PROFILES))
+ return -EIO;
+
+ max_rif_mac_profiles = MLXSW_CORE_RES_GET(mlxsw_core,
+ MAX_RIF_MAC_PROFILES);
+ devlink_resource_size_params_init(&size_params, max_rif_mac_profiles,
+ max_rif_mac_profiles, 1,
+ DEVLINK_RESOURCE_UNIT_ENTRY);
+
+ return devlink_resource_register(devlink,
+ "rif_mac_profiles",
+ max_rif_mac_profiles,
+ MLXSW_SP_RESOURCE_RIF_MAC_PROFILES,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &size_params);
+}
+
static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core)
{
int err;
@@ -3278,10 +3324,16 @@ static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core)
err = mlxsw_sp_policer_resources_register(mlxsw_core);
if (err)
- goto err_resources_counter_register;
+ goto err_policer_resources_register;
+
+ err = mlxsw_sp_resources_rif_mac_profile_register(mlxsw_core);
+ if (err)
+ goto err_resources_rif_mac_profile_register;
return 0;
+err_resources_rif_mac_profile_register:
+err_policer_resources_register:
err_resources_counter_register:
err_resources_span_register:
devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL);
@@ -3306,10 +3358,16 @@ static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core)
err = mlxsw_sp_policer_resources_register(mlxsw_core);
if (err)
- goto err_resources_counter_register;
+ goto err_policer_resources_register;
+
+ err = mlxsw_sp_resources_rif_mac_profile_register(mlxsw_core);
+ if (err)
+ goto err_resources_rif_mac_profile_register;
return 0;
+err_resources_rif_mac_profile_register:
+err_policer_resources_register:
err_resources_counter_register:
err_resources_span_register:
devlink_resources_unregister(priv_to_devlink(mlxsw_core), NULL);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 3a43cba6d23c..32fdd37657dd 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -67,6 +67,7 @@ enum mlxsw_sp_resource_id {
MLXSW_SP_RESOURCE_COUNTERS_RIF,
MLXSW_SP_RESOURCE_GLOBAL_POLICERS,
MLXSW_SP_RESOURCE_SINGLE_RATE_POLICERS,
+ MLXSW_SP_RESOURCE_RIF_MAC_PROFILES,
};
struct mlxsw_sp_port;
@@ -144,7 +145,8 @@ struct mlxsw_sp_mall_entry;
struct mlxsw_sp_port_mapping {
u8 module;
- u8 width;
+ u8 width; /* Number of lanes used by the port */
+ u8 module_width; /* Number of lanes in the module (static) */
u8 lane;
};
@@ -284,6 +286,7 @@ struct mlxsw_sp_port_vlan {
/* No need an internal lock; At worse - miss a single periodic iteration */
struct mlxsw_sp_port_xstats {
u64 ecn;
+ u64 tc_ecn[TC_MAX_QUEUE];
u64 wred_drop[TC_MAX_QUEUE];
u64 tail_drop[TC_MAX_QUEUE];
u64 backlog[TC_MAX_QUEUE];
@@ -345,7 +348,6 @@ struct mlxsw_sp_port {
u16 egr_types;
struct mlxsw_sp_ptp_port_stats stats;
} ptp;
- u8 split_base_local_port;
int max_mtu;
u32 max_speed;
struct mlxsw_sp_hdroom *hdroom;
@@ -747,6 +749,7 @@ enum mlxsw_sp_kvdl_entry_type {
MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET,
MLXSW_SP_KVDL_ENTRY_TYPE_PBS,
MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR,
+ MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS,
MLXSW_SP_KVDL_ENTRY_TYPE_TNUMT,
};
@@ -758,6 +761,7 @@ mlxsw_sp_kvdl_entry_size(enum mlxsw_sp_kvdl_entry_type type)
case MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET:
case MLXSW_SP_KVDL_ENTRY_TYPE_PBS:
case MLXSW_SP_KVDL_ENTRY_TYPE_MCRIGR:
+ case MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS:
case MLXSW_SP_KVDL_ENTRY_TYPE_TNUMT:
default:
return 1;
@@ -1193,6 +1197,8 @@ int mlxsw_sp_setup_tc_fifo(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_fifo_qopt_offload *p);
int mlxsw_sp_setup_tc_block_qevent_early_drop(struct mlxsw_sp_port *mlxsw_sp_port,
struct flow_block_offload *f);
+int mlxsw_sp_setup_tc_block_qevent_mark(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct flow_block_offload *f);
/* spectrum_fid.c */
bool mlxsw_sp_fid_is_dummy(struct mlxsw_sp *mlxsw_sp, u16 fid_index);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c
index 3a73d654017f..10ae1115de6c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_kvdl.c
@@ -35,6 +35,7 @@ static const struct mlxsw_sp2_kvdl_part_info mlxsw_sp2_kvdl_parts_info[] = {
MAX_KVD_ACTION_SETS),
MLXSW_SP2_KVDL_PART_INFO(PBS, 0x24, KVD_SIZE, KVD_SIZE),
MLXSW_SP2_KVDL_PART_INFO(MCRIGR, 0x26, KVD_SIZE, KVD_SIZE),
+ MLXSW_SP2_KVDL_PART_INFO(IPV6_ADDRESS, 0x28, KVD_SIZE, KVD_SIZE),
MLXSW_SP2_KVDL_PART_INFO(TNUMT, 0x29, KVD_SIZE, KVD_SIZE),
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c
index ded4cf658680..4b713832fdd5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c
@@ -119,7 +119,6 @@ mlxsw_sp_acl_atcam_region_12kb_init(struct mlxsw_sp_acl_atcam_region *aregion)
{
struct mlxsw_sp *mlxsw_sp = aregion->region->mlxsw_sp;
struct mlxsw_sp_acl_atcam_region_12kb *region_12kb;
- size_t alloc_size;
u64 max_lkey_id;
int err;
@@ -131,8 +130,7 @@ mlxsw_sp_acl_atcam_region_12kb_init(struct mlxsw_sp_acl_atcam_region *aregion)
if (!region_12kb)
return -ENOMEM;
- alloc_size = BITS_TO_LONGS(max_lkey_id) * sizeof(unsigned long);
- region_12kb->used_lkey_id = kzalloc(alloc_size, GFP_KERNEL);
+ region_12kb->used_lkey_id = bitmap_zalloc(max_lkey_id, GFP_KERNEL);
if (!region_12kb->used_lkey_id) {
err = -ENOMEM;
goto err_used_lkey_id_alloc;
@@ -149,7 +147,7 @@ mlxsw_sp_acl_atcam_region_12kb_init(struct mlxsw_sp_acl_atcam_region *aregion)
return 0;
err_rhashtable_init:
- kfree(region_12kb->used_lkey_id);
+ bitmap_free(region_12kb->used_lkey_id);
err_used_lkey_id_alloc:
kfree(region_12kb);
return err;
@@ -161,7 +159,7 @@ mlxsw_sp_acl_atcam_region_12kb_fini(struct mlxsw_sp_acl_atcam_region *aregion)
struct mlxsw_sp_acl_atcam_region_12kb *region_12kb = aregion->priv;
rhashtable_destroy(&region_12kb->lkey_ht);
- kfree(region_12kb->used_lkey_id);
+ bitmap_free(region_12kb->used_lkey_id);
kfree(region_12kb);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
index 7cccc41dd69c..31f7f4c3acc3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c
@@ -36,7 +36,6 @@ int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
u64 max_tcam_regions;
u64 max_regions;
u64 max_groups;
- size_t alloc_size;
int err;
mutex_init(&tcam->lock);
@@ -52,15 +51,13 @@ int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
if (max_tcam_regions < max_regions)
max_regions = max_tcam_regions;
- alloc_size = sizeof(tcam->used_regions[0]) * BITS_TO_LONGS(max_regions);
- tcam->used_regions = kzalloc(alloc_size, GFP_KERNEL);
+ tcam->used_regions = bitmap_zalloc(max_regions, GFP_KERNEL);
if (!tcam->used_regions)
return -ENOMEM;
tcam->max_regions = max_regions;
max_groups = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_GROUPS);
- alloc_size = sizeof(tcam->used_groups[0]) * BITS_TO_LONGS(max_groups);
- tcam->used_groups = kzalloc(alloc_size, GFP_KERNEL);
+ tcam->used_groups = bitmap_zalloc(max_groups, GFP_KERNEL);
if (!tcam->used_groups) {
err = -ENOMEM;
goto err_alloc_used_groups;
@@ -76,9 +73,9 @@ int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
return 0;
err_tcam_init:
- kfree(tcam->used_groups);
+ bitmap_free(tcam->used_groups);
err_alloc_used_groups:
- kfree(tcam->used_regions);
+ bitmap_free(tcam->used_regions);
return err;
}
@@ -89,8 +86,8 @@ void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp,
mutex_destroy(&tcam->lock);
ops->fini(mlxsw_sp, tcam->priv);
- kfree(tcam->used_groups);
- kfree(tcam->used_regions);
+ bitmap_free(tcam->used_groups);
+ bitmap_free(tcam->used_regions);
}
int mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp *mlxsw_sp,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index 9de160e740b2..d78cf5a7220a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -1583,7 +1583,7 @@ int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core,
{
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx;
- unsigned long cb_priv;
+ unsigned long cb_priv = 0;
LIST_HEAD(bulk_list);
char *sbsr_pl;
u8 masked_count;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
index b65b93a2b9bc..fc2257753b9b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.c
@@ -122,7 +122,6 @@ int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp)
unsigned int sub_pools_count = ARRAY_SIZE(mlxsw_sp_counter_sub_pools);
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
struct mlxsw_sp_counter_pool *pool;
- unsigned int map_size;
int err;
pool = kzalloc(struct_size(pool, sub_pools, sub_pools_count),
@@ -143,9 +142,7 @@ int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp)
devlink_resource_occ_get_register(devlink, MLXSW_SP_RESOURCE_COUNTERS,
mlxsw_sp_counter_pool_occ_get, pool);
- map_size = BITS_TO_LONGS(pool->pool_size) * sizeof(unsigned long);
-
- pool->usage = kzalloc(map_size, GFP_KERNEL);
+ pool->usage = bitmap_zalloc(pool->pool_size, GFP_KERNEL);
if (!pool->usage) {
err = -ENOMEM;
goto err_usage_alloc;
@@ -158,7 +155,7 @@ int mlxsw_sp_counter_pool_init(struct mlxsw_sp *mlxsw_sp)
return 0;
err_sub_pools_init:
- kfree(pool->usage);
+ bitmap_free(pool->usage);
err_usage_alloc:
devlink_resource_occ_get_unregister(devlink,
MLXSW_SP_RESOURCE_COUNTERS);
@@ -176,7 +173,7 @@ void mlxsw_sp_counter_pool_fini(struct mlxsw_sp *mlxsw_sp)
WARN_ON(find_first_bit(pool->usage, pool->pool_size) !=
pool->pool_size);
WARN_ON(atomic_read(&pool->active_entries_count));
- kfree(pool->usage);
+ bitmap_free(pool->usage);
devlink_resource_occ_get_unregister(devlink,
MLXSW_SP_RESOURCE_COUNTERS);
kfree(pool);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
index 267590a0eee7..84d4460f3dcd 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c
@@ -96,6 +96,9 @@ mlxsw_sp_link_ext_state_opcode_map[] = {
{1032, ETHTOOL_LINK_EXT_STATE_POWER_BUDGET_EXCEEDED, 0},
{1030, ETHTOOL_LINK_EXT_STATE_OVERHEAT, 0},
+
+ {1042, ETHTOOL_LINK_EXT_STATE_MODULE,
+ ETHTOOL_LINK_EXT_SUBSTATE_MODULE_CMIS_NOT_READY},
};
static void
@@ -124,6 +127,10 @@ mlxsw_sp_port_set_link_ext_state(struct mlxsw_sp_ethtool_link_ext_state_opcode_m
link_ext_state_info->cable_issue =
link_ext_state_mapping.link_ext_substate;
break;
+ case ETHTOOL_LINK_EXT_STATE_MODULE:
+ link_ext_state_info->module =
+ link_ext_state_mapping.link_ext_substate;
+ break;
default:
break;
}
@@ -1197,6 +1204,41 @@ mlxsw_sp_get_rmon_stats(struct net_device *dev,
*ranges = mlxsw_rmon_ranges;
}
+static int mlxsw_sp_reset(struct net_device *dev, u32 *flags)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 module = mlxsw_sp_port->mapping.module;
+
+ return mlxsw_env_reset_module(dev, mlxsw_sp->core, module, flags);
+}
+
+static int
+mlxsw_sp_get_module_power_mode(struct net_device *dev,
+ struct ethtool_module_power_mode_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 module = mlxsw_sp_port->mapping.module;
+
+ return mlxsw_env_get_module_power_mode(mlxsw_sp->core, module, params,
+ extack);
+}
+
+static int
+mlxsw_sp_set_module_power_mode(struct net_device *dev,
+ const struct ethtool_module_power_mode_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ u8 module = mlxsw_sp_port->mapping.module;
+
+ return mlxsw_env_set_module_power_mode(mlxsw_sp->core, module,
+ params->policy, extack);
+}
+
const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
.cap_link_lanes_supported = true,
.get_drvinfo = mlxsw_sp_port_get_drvinfo,
@@ -1218,6 +1260,9 @@ const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
.get_eth_mac_stats = mlxsw_sp_get_eth_mac_stats,
.get_eth_ctrl_stats = mlxsw_sp_get_eth_ctrl_stats,
.get_rmon_stats = mlxsw_sp_get_rmon_stats,
+ .reset = mlxsw_sp_reset,
+ .get_module_power_mode = mlxsw_sp_get_module_power_mode,
+ .set_module_power_mode = mlxsw_sp_set_module_power_mode,
};
struct mlxsw_sp1_port_link_mode {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
index 5facabd86882..ad3926de88f2 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
@@ -24,50 +24,72 @@ mlxsw_sp_ipip_netdev_parms6(const struct net_device *ol_dev)
return tun->parms;
}
-static bool mlxsw_sp_ipip_parms4_has_ikey(struct ip_tunnel_parm parms)
+static bool mlxsw_sp_ipip_parms4_has_ikey(const struct ip_tunnel_parm *parms)
{
- return !!(parms.i_flags & TUNNEL_KEY);
+ return !!(parms->i_flags & TUNNEL_KEY);
}
-static bool mlxsw_sp_ipip_parms4_has_okey(struct ip_tunnel_parm parms)
+static bool mlxsw_sp_ipip_parms6_has_ikey(const struct __ip6_tnl_parm *parms)
{
- return !!(parms.o_flags & TUNNEL_KEY);
+ return !!(parms->i_flags & TUNNEL_KEY);
}
-static u32 mlxsw_sp_ipip_parms4_ikey(struct ip_tunnel_parm parms)
+static bool mlxsw_sp_ipip_parms4_has_okey(const struct ip_tunnel_parm *parms)
+{
+ return !!(parms->o_flags & TUNNEL_KEY);
+}
+
+static bool mlxsw_sp_ipip_parms6_has_okey(const struct __ip6_tnl_parm *parms)
+{
+ return !!(parms->o_flags & TUNNEL_KEY);
+}
+
+static u32 mlxsw_sp_ipip_parms4_ikey(const struct ip_tunnel_parm *parms)
{
return mlxsw_sp_ipip_parms4_has_ikey(parms) ?
- be32_to_cpu(parms.i_key) : 0;
+ be32_to_cpu(parms->i_key) : 0;
}
-static u32 mlxsw_sp_ipip_parms4_okey(struct ip_tunnel_parm parms)
+static u32 mlxsw_sp_ipip_parms6_ikey(const struct __ip6_tnl_parm *parms)
+{
+ return mlxsw_sp_ipip_parms6_has_ikey(parms) ?
+ be32_to_cpu(parms->i_key) : 0;
+}
+
+static u32 mlxsw_sp_ipip_parms4_okey(const struct ip_tunnel_parm *parms)
{
return mlxsw_sp_ipip_parms4_has_okey(parms) ?
- be32_to_cpu(parms.o_key) : 0;
+ be32_to_cpu(parms->o_key) : 0;
+}
+
+static u32 mlxsw_sp_ipip_parms6_okey(const struct __ip6_tnl_parm *parms)
+{
+ return mlxsw_sp_ipip_parms6_has_okey(parms) ?
+ be32_to_cpu(parms->o_key) : 0;
}
static union mlxsw_sp_l3addr
-mlxsw_sp_ipip_parms4_saddr(struct ip_tunnel_parm parms)
+mlxsw_sp_ipip_parms4_saddr(const struct ip_tunnel_parm *parms)
{
- return (union mlxsw_sp_l3addr) { .addr4 = parms.iph.saddr };
+ return (union mlxsw_sp_l3addr) { .addr4 = parms->iph.saddr };
}
static union mlxsw_sp_l3addr
-mlxsw_sp_ipip_parms6_saddr(struct __ip6_tnl_parm parms)
+mlxsw_sp_ipip_parms6_saddr(const struct __ip6_tnl_parm *parms)
{
- return (union mlxsw_sp_l3addr) { .addr6 = parms.laddr };
+ return (union mlxsw_sp_l3addr) { .addr6 = parms->laddr };
}
static union mlxsw_sp_l3addr
-mlxsw_sp_ipip_parms4_daddr(struct ip_tunnel_parm parms)
+mlxsw_sp_ipip_parms4_daddr(const struct ip_tunnel_parm *parms)
{
- return (union mlxsw_sp_l3addr) { .addr4 = parms.iph.daddr };
+ return (union mlxsw_sp_l3addr) { .addr4 = parms->iph.daddr };
}
static union mlxsw_sp_l3addr
-mlxsw_sp_ipip_parms6_daddr(struct __ip6_tnl_parm parms)
+mlxsw_sp_ipip_parms6_daddr(const struct __ip6_tnl_parm *parms)
{
- return (union mlxsw_sp_l3addr) { .addr6 = parms.raddr };
+ return (union mlxsw_sp_l3addr) { .addr6 = parms->raddr };
}
union mlxsw_sp_l3addr
@@ -80,10 +102,10 @@ mlxsw_sp_ipip_netdev_saddr(enum mlxsw_sp_l3proto proto,
switch (proto) {
case MLXSW_SP_L3_PROTO_IPV4:
parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev);
- return mlxsw_sp_ipip_parms4_saddr(parms4);
+ return mlxsw_sp_ipip_parms4_saddr(&parms4);
case MLXSW_SP_L3_PROTO_IPV6:
parms6 = mlxsw_sp_ipip_netdev_parms6(ol_dev);
- return mlxsw_sp_ipip_parms6_saddr(parms6);
+ return mlxsw_sp_ipip_parms6_saddr(&parms6);
}
WARN_ON(1);
@@ -95,7 +117,7 @@ static __be32 mlxsw_sp_ipip_netdev_daddr4(const struct net_device *ol_dev)
struct ip_tunnel_parm parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev);
- return mlxsw_sp_ipip_parms4_daddr(parms4).addr4;
+ return mlxsw_sp_ipip_parms4_daddr(&parms4).addr4;
}
static union mlxsw_sp_l3addr
@@ -108,10 +130,10 @@ mlxsw_sp_ipip_netdev_daddr(enum mlxsw_sp_l3proto proto,
switch (proto) {
case MLXSW_SP_L3_PROTO_IPV4:
parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev);
- return mlxsw_sp_ipip_parms4_daddr(parms4);
+ return mlxsw_sp_ipip_parms4_daddr(&parms4);
case MLXSW_SP_L3_PROTO_IPV6:
parms6 = mlxsw_sp_ipip_netdev_parms6(ol_dev);
- return mlxsw_sp_ipip_parms6_daddr(parms6);
+ return mlxsw_sp_ipip_parms6_daddr(&parms6);
}
WARN_ON(1);
@@ -125,6 +147,21 @@ bool mlxsw_sp_l3addr_is_zero(union mlxsw_sp_l3addr addr)
return !memcmp(&addr, &naddr, sizeof(naddr));
}
+static struct mlxsw_sp_ipip_parms
+mlxsw_sp_ipip_netdev_parms_init_gre4(const struct net_device *ol_dev)
+{
+ struct ip_tunnel_parm parms = mlxsw_sp_ipip_netdev_parms4(ol_dev);
+
+ return (struct mlxsw_sp_ipip_parms) {
+ .proto = MLXSW_SP_L3_PROTO_IPV4,
+ .saddr = mlxsw_sp_ipip_parms4_saddr(&parms),
+ .daddr = mlxsw_sp_ipip_parms4_daddr(&parms),
+ .link = parms.link,
+ .ikey = mlxsw_sp_ipip_parms4_ikey(&parms),
+ .okey = mlxsw_sp_ipip_parms4_okey(&parms),
+ };
+}
+
static int
mlxsw_sp_ipip_nexthop_update_gre4(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
struct mlxsw_sp_ipip_entry *ipip_entry,
@@ -158,8 +195,8 @@ mlxsw_sp_ipip_decap_config_gre4(struct mlxsw_sp *mlxsw_sp,
u32 ikey;
parms = mlxsw_sp_ipip_netdev_parms4(ipip_entry->ol_dev);
- has_ikey = mlxsw_sp_ipip_parms4_has_ikey(parms);
- ikey = mlxsw_sp_ipip_parms4_ikey(parms);
+ has_ikey = mlxsw_sp_ipip_parms4_has_ikey(&parms);
+ ikey = mlxsw_sp_ipip_parms4_ikey(&parms);
mlxsw_reg_rtdp_pack(rtdp_pl, MLXSW_REG_RTDP_TYPE_IPIP, tunnel_index);
mlxsw_reg_rtdp_egress_router_interface_set(rtdp_pl, ul_rif_id);
@@ -218,12 +255,12 @@ mlxsw_sp_ipip_ol_loopback_config_gre4(struct mlxsw_sp *mlxsw_sp,
struct ip_tunnel_parm parms = mlxsw_sp_ipip_netdev_parms4(ol_dev);
enum mlxsw_reg_ritr_loopback_ipip_type lb_ipipt;
- lb_ipipt = mlxsw_sp_ipip_parms4_has_okey(parms) ?
+ lb_ipipt = mlxsw_sp_ipip_parms4_has_okey(&parms) ?
MLXSW_REG_RITR_LOOPBACK_IPIP_TYPE_IP_IN_GRE_KEY_IN_IP :
MLXSW_REG_RITR_LOOPBACK_IPIP_TYPE_IP_IN_GRE_IN_IP;
return (struct mlxsw_sp_rif_ipip_lb_config){
.lb_ipipt = lb_ipipt,
- .okey = mlxsw_sp_ipip_parms4_okey(parms),
+ .okey = mlxsw_sp_ipip_parms4_okey(&parms),
.ul_protocol = MLXSW_SP_L3_PROTO_IPV4,
.saddr = mlxsw_sp_ipip_netdev_saddr(MLXSW_SP_L3_PROTO_IPV4,
ol_dev),
@@ -231,48 +268,39 @@ mlxsw_sp_ipip_ol_loopback_config_gre4(struct mlxsw_sp *mlxsw_sp,
}
static int
-mlxsw_sp_ipip_ol_netdev_change_gre4(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_ipip_entry *ipip_entry,
- struct netlink_ext_ack *extack)
+mlxsw_sp_ipip_ol_netdev_change_gre(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ const struct mlxsw_sp_ipip_parms *new_parms,
+ struct netlink_ext_ack *extack)
{
- union mlxsw_sp_l3addr old_saddr, new_saddr;
- union mlxsw_sp_l3addr old_daddr, new_daddr;
- struct ip_tunnel_parm new_parms;
+ const struct mlxsw_sp_ipip_parms *old_parms = &ipip_entry->parms;
bool update_tunnel = false;
bool update_decap = false;
bool update_nhs = false;
int err = 0;
- new_parms = mlxsw_sp_ipip_netdev_parms4(ipip_entry->ol_dev);
-
- new_saddr = mlxsw_sp_ipip_parms4_saddr(new_parms);
- old_saddr = mlxsw_sp_ipip_parms4_saddr(ipip_entry->parms4);
- new_daddr = mlxsw_sp_ipip_parms4_daddr(new_parms);
- old_daddr = mlxsw_sp_ipip_parms4_daddr(ipip_entry->parms4);
-
- if (!mlxsw_sp_l3addr_eq(&new_saddr, &old_saddr)) {
+ if (!mlxsw_sp_l3addr_eq(&new_parms->saddr, &old_parms->saddr)) {
u16 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
/* Since the local address has changed, if there is another
* tunnel with a matching saddr, both need to be demoted.
*/
if (mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp,
- MLXSW_SP_L3_PROTO_IPV4,
- new_saddr, ul_tb_id,
+ new_parms->proto,
+ new_parms->saddr,
+ ul_tb_id,
ipip_entry)) {
mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
return 0;
}
update_tunnel = true;
- } else if ((mlxsw_sp_ipip_parms4_okey(ipip_entry->parms4) !=
- mlxsw_sp_ipip_parms4_okey(new_parms)) ||
- ipip_entry->parms4.link != new_parms.link) {
+ } else if (old_parms->okey != new_parms->okey ||
+ old_parms->link != new_parms->link) {
update_tunnel = true;
- } else if (!mlxsw_sp_l3addr_eq(&new_daddr, &old_daddr)) {
+ } else if (!mlxsw_sp_l3addr_eq(&new_parms->daddr, &old_parms->daddr)) {
update_nhs = true;
- } else if (mlxsw_sp_ipip_parms4_ikey(ipip_entry->parms4) !=
- mlxsw_sp_ipip_parms4_ikey(new_parms)) {
+ } else if (old_parms->ikey != new_parms->ikey) {
update_decap = true;
}
@@ -288,23 +316,308 @@ mlxsw_sp_ipip_ol_netdev_change_gre4(struct mlxsw_sp *mlxsw_sp,
err = __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
false, false, false,
extack);
+ if (err)
+ return err;
- ipip_entry->parms4 = new_parms;
- return err;
+ ipip_entry->parms = *new_parms;
+ return 0;
+}
+
+static int
+mlxsw_sp_ipip_ol_netdev_change_gre4(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_ipip_parms new_parms;
+
+ new_parms = mlxsw_sp_ipip_netdev_parms_init_gre4(ipip_entry->ol_dev);
+ return mlxsw_sp_ipip_ol_netdev_change_gre(mlxsw_sp, ipip_entry,
+ &new_parms, extack);
+}
+
+static int
+mlxsw_sp_ipip_rem_addr_set_gre4(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry)
+{
+ return 0;
+}
+
+static void
+mlxsw_sp_ipip_rem_addr_unset_gre4(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_ipip_entry *ipip_entry)
+{
}
static const struct mlxsw_sp_ipip_ops mlxsw_sp_ipip_gre4_ops = {
.dev_type = ARPHRD_IPGRE,
.ul_proto = MLXSW_SP_L3_PROTO_IPV4,
+ .inc_parsing_depth = false,
+ .parms_init = mlxsw_sp_ipip_netdev_parms_init_gre4,
.nexthop_update = mlxsw_sp_ipip_nexthop_update_gre4,
.decap_config = mlxsw_sp_ipip_decap_config_gre4,
.can_offload = mlxsw_sp_ipip_can_offload_gre4,
.ol_loopback_config = mlxsw_sp_ipip_ol_loopback_config_gre4,
.ol_netdev_change = mlxsw_sp_ipip_ol_netdev_change_gre4,
+ .rem_ip_addr_set = mlxsw_sp_ipip_rem_addr_set_gre4,
+ .rem_ip_addr_unset = mlxsw_sp_ipip_rem_addr_unset_gre4,
};
-const struct mlxsw_sp_ipip_ops *mlxsw_sp_ipip_ops_arr[] = {
+static struct mlxsw_sp_ipip_parms
+mlxsw_sp1_ipip_netdev_parms_init_gre6(const struct net_device *ol_dev)
+{
+ struct mlxsw_sp_ipip_parms parms = {0};
+
+ WARN_ON_ONCE(1);
+ return parms;
+}
+
+static int
+mlxsw_sp1_ipip_nexthop_update_gre6(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ bool force, char *ratr_pl)
+{
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+}
+
+static int
+mlxsw_sp1_ipip_decap_config_gre6(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ u32 tunnel_index)
+{
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+}
+
+static bool mlxsw_sp1_ipip_can_offload_gre6(const struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *ol_dev)
+{
+ return false;
+}
+
+static struct mlxsw_sp_rif_ipip_lb_config
+mlxsw_sp1_ipip_ol_loopback_config_gre6(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *ol_dev)
+{
+ struct mlxsw_sp_rif_ipip_lb_config config = {0};
+
+ WARN_ON_ONCE(1);
+ return config;
+}
+
+static int
+mlxsw_sp1_ipip_ol_netdev_change_gre6(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ struct netlink_ext_ack *extack)
+{
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+}
+
+static int
+mlxsw_sp1_ipip_rem_addr_set_gre6(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry)
+{
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+}
+
+static void
+mlxsw_sp1_ipip_rem_addr_unset_gre6(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_ipip_entry *ipip_entry)
+{
+ WARN_ON_ONCE(1);
+}
+
+static const struct mlxsw_sp_ipip_ops mlxsw_sp1_ipip_gre6_ops = {
+ .dev_type = ARPHRD_IP6GRE,
+ .ul_proto = MLXSW_SP_L3_PROTO_IPV6,
+ .inc_parsing_depth = true,
+ .parms_init = mlxsw_sp1_ipip_netdev_parms_init_gre6,
+ .nexthop_update = mlxsw_sp1_ipip_nexthop_update_gre6,
+ .decap_config = mlxsw_sp1_ipip_decap_config_gre6,
+ .can_offload = mlxsw_sp1_ipip_can_offload_gre6,
+ .ol_loopback_config = mlxsw_sp1_ipip_ol_loopback_config_gre6,
+ .ol_netdev_change = mlxsw_sp1_ipip_ol_netdev_change_gre6,
+ .rem_ip_addr_set = mlxsw_sp1_ipip_rem_addr_set_gre6,
+ .rem_ip_addr_unset = mlxsw_sp1_ipip_rem_addr_unset_gre6,
+};
+
+const struct mlxsw_sp_ipip_ops *mlxsw_sp1_ipip_ops_arr[] = {
[MLXSW_SP_IPIP_TYPE_GRE4] = &mlxsw_sp_ipip_gre4_ops,
+ [MLXSW_SP_IPIP_TYPE_GRE6] = &mlxsw_sp1_ipip_gre6_ops,
+};
+
+static struct mlxsw_sp_ipip_parms
+mlxsw_sp2_ipip_netdev_parms_init_gre6(const struct net_device *ol_dev)
+{
+ struct __ip6_tnl_parm parms = mlxsw_sp_ipip_netdev_parms6(ol_dev);
+
+ return (struct mlxsw_sp_ipip_parms) {
+ .proto = MLXSW_SP_L3_PROTO_IPV6,
+ .saddr = mlxsw_sp_ipip_parms6_saddr(&parms),
+ .daddr = mlxsw_sp_ipip_parms6_daddr(&parms),
+ .link = parms.link,
+ .ikey = mlxsw_sp_ipip_parms6_ikey(&parms),
+ .okey = mlxsw_sp_ipip_parms6_okey(&parms),
+ };
+}
+
+static int
+mlxsw_sp2_ipip_nexthop_update_gre6(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ bool force, char *ratr_pl)
+{
+ u16 rif_index = mlxsw_sp_ipip_lb_rif_index(ipip_entry->ol_lb);
+ enum mlxsw_reg_ratr_op op;
+
+ op = force ? MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY :
+ MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY_ON_ACTIVITY;
+ mlxsw_reg_ratr_pack(ratr_pl, op, true, MLXSW_REG_RATR_TYPE_IPIP,
+ adj_index, rif_index);
+ mlxsw_reg_ratr_ipip6_entry_pack(ratr_pl,
+ ipip_entry->dip_kvdl_index);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
+}
+
+static int
+mlxsw_sp2_ipip_decap_config_gre6(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ u32 tunnel_index)
+{
+ u16 rif_index = mlxsw_sp_ipip_lb_rif_index(ipip_entry->ol_lb);
+ u16 ul_rif_id = mlxsw_sp_ipip_lb_ul_rif_id(ipip_entry->ol_lb);
+ char rtdp_pl[MLXSW_REG_RTDP_LEN];
+ struct __ip6_tnl_parm parms;
+ unsigned int type_check;
+ bool has_ikey;
+ u32 ikey;
+
+ parms = mlxsw_sp_ipip_netdev_parms6(ipip_entry->ol_dev);
+ has_ikey = mlxsw_sp_ipip_parms6_has_ikey(&parms);
+ ikey = mlxsw_sp_ipip_parms6_ikey(&parms);
+
+ mlxsw_reg_rtdp_pack(rtdp_pl, MLXSW_REG_RTDP_TYPE_IPIP, tunnel_index);
+ mlxsw_reg_rtdp_egress_router_interface_set(rtdp_pl, ul_rif_id);
+
+ type_check = has_ikey ?
+ MLXSW_REG_RTDP_IPIP_TYPE_CHECK_ALLOW_GRE_KEY :
+ MLXSW_REG_RTDP_IPIP_TYPE_CHECK_ALLOW_GRE;
+
+ /* Linux demuxes tunnels based on packet SIP (which must match tunnel
+ * remote IP). Thus configure decap so that it filters out packets that
+ * are not IPv6 or have the wrong SIP. IPIP_DECAP_ERROR trap is
+ * generated for packets that fail this criterion. Linux then handles
+ * such packets in slow path and generates ICMP destination unreachable.
+ */
+ mlxsw_reg_rtdp_ipip6_pack(rtdp_pl, rif_index,
+ MLXSW_REG_RTDP_IPIP_SIP_CHECK_FILTER_IPV6,
+ type_check, has_ikey,
+ ipip_entry->dip_kvdl_index, ikey);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rtdp), rtdp_pl);
+}
+
+static bool mlxsw_sp2_ipip_can_offload_gre6(const struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *ol_dev)
+{
+ struct __ip6_tnl_parm tparm = mlxsw_sp_ipip_netdev_parms6(ol_dev);
+ bool inherit_tos = tparm.flags & IP6_TNL_F_USE_ORIG_TCLASS;
+ bool inherit_ttl = tparm.hop_limit == 0;
+ __be16 okflags = TUNNEL_KEY; /* We can't offload any other features. */
+
+ return (tparm.i_flags & ~okflags) == 0 &&
+ (tparm.o_flags & ~okflags) == 0 &&
+ inherit_ttl && inherit_tos &&
+ mlxsw_sp_ipip_tunnel_complete(MLXSW_SP_L3_PROTO_IPV6, ol_dev);
+}
+
+static struct mlxsw_sp_rif_ipip_lb_config
+mlxsw_sp2_ipip_ol_loopback_config_gre6(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *ol_dev)
+{
+ struct __ip6_tnl_parm parms = mlxsw_sp_ipip_netdev_parms6(ol_dev);
+ enum mlxsw_reg_ritr_loopback_ipip_type lb_ipipt;
+
+ lb_ipipt = mlxsw_sp_ipip_parms6_has_okey(&parms) ?
+ MLXSW_REG_RITR_LOOPBACK_IPIP_TYPE_IP_IN_GRE_KEY_IN_IP :
+ MLXSW_REG_RITR_LOOPBACK_IPIP_TYPE_IP_IN_GRE_IN_IP;
+ return (struct mlxsw_sp_rif_ipip_lb_config){
+ .lb_ipipt = lb_ipipt,
+ .okey = mlxsw_sp_ipip_parms6_okey(&parms),
+ .ul_protocol = MLXSW_SP_L3_PROTO_IPV6,
+ .saddr = mlxsw_sp_ipip_netdev_saddr(MLXSW_SP_L3_PROTO_IPV6,
+ ol_dev),
+ };
+}
+
+static int
+mlxsw_sp2_ipip_ol_netdev_change_gre6(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_ipip_parms new_parms;
+
+ new_parms = mlxsw_sp2_ipip_netdev_parms_init_gre6(ipip_entry->ol_dev);
+ return mlxsw_sp_ipip_ol_netdev_change_gre(mlxsw_sp, ipip_entry,
+ &new_parms, extack);
+}
+
+static int
+mlxsw_sp2_ipip_rem_addr_set_gre6(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry)
+{
+ char rips_pl[MLXSW_REG_RIPS_LEN];
+ struct __ip6_tnl_parm parms6;
+ int err;
+
+ err = mlxsw_sp_kvdl_alloc(mlxsw_sp,
+ MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1,
+ &ipip_entry->dip_kvdl_index);
+ if (err)
+ return err;
+
+ parms6 = mlxsw_sp_ipip_netdev_parms6(ipip_entry->ol_dev);
+ mlxsw_reg_rips_pack(rips_pl, ipip_entry->dip_kvdl_index,
+ &parms6.raddr);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rips), rips_pl);
+ if (err)
+ goto err_rips_write;
+
+ return 0;
+
+err_rips_write:
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1,
+ ipip_entry->dip_kvdl_index);
+ return err;
+}
+
+static void
+mlxsw_sp2_ipip_rem_addr_unset_gre6(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_ipip_entry *ipip_entry)
+{
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1,
+ ipip_entry->dip_kvdl_index);
+}
+
+static const struct mlxsw_sp_ipip_ops mlxsw_sp2_ipip_gre6_ops = {
+ .dev_type = ARPHRD_IP6GRE,
+ .ul_proto = MLXSW_SP_L3_PROTO_IPV6,
+ .inc_parsing_depth = true,
+ .parms_init = mlxsw_sp2_ipip_netdev_parms_init_gre6,
+ .nexthop_update = mlxsw_sp2_ipip_nexthop_update_gre6,
+ .decap_config = mlxsw_sp2_ipip_decap_config_gre6,
+ .can_offload = mlxsw_sp2_ipip_can_offload_gre6,
+ .ol_loopback_config = mlxsw_sp2_ipip_ol_loopback_config_gre6,
+ .ol_netdev_change = mlxsw_sp2_ipip_ol_netdev_change_gre6,
+ .rem_ip_addr_set = mlxsw_sp2_ipip_rem_addr_set_gre6,
+ .rem_ip_addr_unset = mlxsw_sp2_ipip_rem_addr_unset_gre6,
+};
+
+const struct mlxsw_sp_ipip_ops *mlxsw_sp2_ipip_ops_arr[] = {
+ [MLXSW_SP_IPIP_TYPE_GRE4] = &mlxsw_sp_ipip_gre4_ops,
+ [MLXSW_SP_IPIP_TYPE_GRE6] = &mlxsw_sp2_ipip_gre6_ops,
};
static int mlxsw_sp_ipip_ecn_encap_init_one(struct mlxsw_sp *mlxsw_sp,
@@ -363,3 +676,22 @@ int mlxsw_sp_ipip_ecn_decap_init(struct mlxsw_sp *mlxsw_sp)
return 0;
}
+
+struct net_device *
+mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device *ol_dev)
+{
+ struct net *net = dev_net(ol_dev);
+ struct ip_tunnel *tun4;
+ struct ip6_tnl *tun6;
+
+ switch (ol_dev->type) {
+ case ARPHRD_IPGRE:
+ tun4 = netdev_priv(ol_dev);
+ return dev_get_by_index_rcu(net, tun4->parms.link);
+ case ARPHRD_IP6GRE:
+ tun6 = netdev_priv(ol_dev);
+ return dev_get_by_index_rcu(net, tun6->parms.link);
+ default:
+ return NULL;
+ }
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h
index f0837b42d1d6..8cc259dcc8d0 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h
@@ -7,6 +7,7 @@
#include "spectrum_router.h"
#include <net/ip_fib.h>
#include <linux/if_tunnel.h>
+#include <net/ip6_tunnel.h>
struct ip_tunnel_parm
mlxsw_sp_ipip_netdev_parms4(const struct net_device *ol_dev);
@@ -21,23 +22,36 @@ bool mlxsw_sp_l3addr_is_zero(union mlxsw_sp_l3addr addr);
enum mlxsw_sp_ipip_type {
MLXSW_SP_IPIP_TYPE_GRE4,
+ MLXSW_SP_IPIP_TYPE_GRE6,
MLXSW_SP_IPIP_TYPE_MAX,
};
+struct mlxsw_sp_ipip_parms {
+ enum mlxsw_sp_l3proto proto;
+ union mlxsw_sp_l3addr saddr;
+ union mlxsw_sp_l3addr daddr;
+ int link;
+ u32 ikey;
+ u32 okey;
+};
+
struct mlxsw_sp_ipip_entry {
enum mlxsw_sp_ipip_type ipipt;
struct net_device *ol_dev; /* Overlay. */
struct mlxsw_sp_rif_ipip_lb *ol_lb;
struct mlxsw_sp_fib_entry *decap_fib_entry;
struct list_head ipip_list_node;
- union {
- struct ip_tunnel_parm parms4;
- };
+ struct mlxsw_sp_ipip_parms parms;
+ u32 dip_kvdl_index;
};
struct mlxsw_sp_ipip_ops {
int dev_type;
enum mlxsw_sp_l3proto ul_proto; /* Underlay. */
+ bool inc_parsing_depth;
+
+ struct mlxsw_sp_ipip_parms
+ (*parms_init)(const struct net_device *ol_dev);
int (*nexthop_update)(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
struct mlxsw_sp_ipip_entry *ipip_entry,
@@ -58,8 +72,13 @@ struct mlxsw_sp_ipip_ops {
int (*ol_netdev_change)(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_ipip_entry *ipip_entry,
struct netlink_ext_ack *extack);
+ int (*rem_ip_addr_set)(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry);
+ void (*rem_ip_addr_unset)(struct mlxsw_sp *mlxsw_sp,
+ const struct mlxsw_sp_ipip_entry *ipip_entry);
};
-extern const struct mlxsw_sp_ipip_ops *mlxsw_sp_ipip_ops_arr[];
+extern const struct mlxsw_sp_ipip_ops *mlxsw_sp1_ipip_ops_arr[];
+extern const struct mlxsw_sp_ipip_ops *mlxsw_sp2_ipip_ops_arr[];
#endif /* _MLXSW_IPIP_H_*/
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
index 9958d503bf0e..4243d3b883ff 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
@@ -50,12 +50,24 @@ struct mlxsw_sp_qdisc_ops {
struct mlxsw_sp_qdisc *(*find_class)(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
u32 parent);
unsigned int num_classes;
+
+ u8 (*get_prio_bitmap)(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct mlxsw_sp_qdisc *child);
+ int (*get_tclass_num)(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct mlxsw_sp_qdisc *child);
+};
+
+struct mlxsw_sp_qdisc_ets_band {
+ u8 prio_bitmap;
+ int tclass_num;
+};
+
+struct mlxsw_sp_qdisc_ets_data {
+ struct mlxsw_sp_qdisc_ets_band bands[IEEE_8021QAZ_MAX_TCS];
};
struct mlxsw_sp_qdisc {
u32 handle;
- int tclass_num;
- u8 prio_bitmap;
union {
struct red_stats red;
} xstats_base;
@@ -67,6 +79,10 @@ struct mlxsw_sp_qdisc {
u64 backlog;
} stats_base;
+ union {
+ struct mlxsw_sp_qdisc_ets_data *ets_data;
+ };
+
struct mlxsw_sp_qdisc_ops *ops;
struct mlxsw_sp_qdisc *parent;
struct mlxsw_sp_qdisc *qdiscs;
@@ -141,8 +157,7 @@ mlxsw_sp_qdisc_walk_cb_find(struct mlxsw_sp_qdisc *qdisc, void *data)
}
static struct mlxsw_sp_qdisc *
-mlxsw_sp_qdisc_find(struct mlxsw_sp_port *mlxsw_sp_port, u32 parent,
- bool root_only)
+mlxsw_sp_qdisc_find(struct mlxsw_sp_port *mlxsw_sp_port, u32 parent)
{
struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
@@ -150,8 +165,6 @@ mlxsw_sp_qdisc_find(struct mlxsw_sp_port *mlxsw_sp_port, u32 parent,
return NULL;
if (parent == TC_H_ROOT)
return &qdisc_state->root_qdisc;
- if (root_only)
- return NULL;
return mlxsw_sp_qdisc_walk(&qdisc_state->root_qdisc,
mlxsw_sp_qdisc_walk_cb_find, &parent);
}
@@ -187,6 +200,32 @@ mlxsw_sp_qdisc_reduce_parent_backlog(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
tmp->stats_base.backlog -= mlxsw_sp_qdisc->stats_base.backlog;
}
+static u8 mlxsw_sp_qdisc_get_prio_bitmap(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
+{
+ struct mlxsw_sp_qdisc *parent = mlxsw_sp_qdisc->parent;
+
+ if (!parent)
+ return 0xff;
+ if (!parent->ops->get_prio_bitmap)
+ return mlxsw_sp_qdisc_get_prio_bitmap(mlxsw_sp_port, parent);
+ return parent->ops->get_prio_bitmap(parent, mlxsw_sp_qdisc);
+}
+
+#define MLXSW_SP_PORT_DEFAULT_TCLASS 0
+
+static int mlxsw_sp_qdisc_get_tclass_num(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
+{
+ struct mlxsw_sp_qdisc *parent = mlxsw_sp_qdisc->parent;
+
+ if (!parent)
+ return MLXSW_SP_PORT_DEFAULT_TCLASS;
+ if (!parent->ops->get_tclass_num)
+ return mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port, parent);
+ return parent->ops->get_tclass_num(parent, mlxsw_sp_qdisc);
+}
+
static int
mlxsw_sp_qdisc_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
@@ -194,6 +233,7 @@ mlxsw_sp_qdisc_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *root_qdisc = &mlxsw_sp_port->qdisc->root_qdisc;
int err_hdroom = 0;
int err = 0;
+ int i;
if (!mlxsw_sp_qdisc)
return 0;
@@ -211,6 +251,9 @@ mlxsw_sp_qdisc_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
if (!mlxsw_sp_qdisc->ops)
return 0;
+ for (i = 0; i < mlxsw_sp_qdisc->num_classes; i++)
+ mlxsw_sp_qdisc_destroy(mlxsw_sp_port,
+ &mlxsw_sp_qdisc->qdiscs[i]);
mlxsw_sp_qdisc_reduce_parent_backlog(mlxsw_sp_qdisc);
if (mlxsw_sp_qdisc->ops->destroy)
err = mlxsw_sp_qdisc->ops->destroy(mlxsw_sp_port,
@@ -226,6 +269,87 @@ mlxsw_sp_qdisc_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
return err_hdroom ?: err;
}
+struct mlxsw_sp_qdisc_tree_validate {
+ bool forbid_ets;
+ bool forbid_root_tbf;
+ bool forbid_tbf;
+ bool forbid_red;
+};
+
+static int
+__mlxsw_sp_qdisc_tree_validate(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct mlxsw_sp_qdisc_tree_validate validate);
+
+static int
+mlxsw_sp_qdisc_tree_validate_children(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct mlxsw_sp_qdisc_tree_validate validate)
+{
+ unsigned int i;
+ int err;
+
+ for (i = 0; i < mlxsw_sp_qdisc->num_classes; i++) {
+ err = __mlxsw_sp_qdisc_tree_validate(&mlxsw_sp_qdisc->qdiscs[i],
+ validate);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int
+__mlxsw_sp_qdisc_tree_validate(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct mlxsw_sp_qdisc_tree_validate validate)
+{
+ if (!mlxsw_sp_qdisc->ops)
+ return 0;
+
+ switch (mlxsw_sp_qdisc->ops->type) {
+ case MLXSW_SP_QDISC_FIFO:
+ break;
+ case MLXSW_SP_QDISC_RED:
+ if (validate.forbid_red)
+ return -EINVAL;
+ validate.forbid_red = true;
+ validate.forbid_root_tbf = true;
+ validate.forbid_ets = true;
+ break;
+ case MLXSW_SP_QDISC_TBF:
+ if (validate.forbid_root_tbf) {
+ if (validate.forbid_tbf)
+ return -EINVAL;
+ /* This is a TC TBF. */
+ validate.forbid_tbf = true;
+ validate.forbid_ets = true;
+ } else {
+ /* This is root TBF. */
+ validate.forbid_root_tbf = true;
+ }
+ break;
+ case MLXSW_SP_QDISC_PRIO:
+ case MLXSW_SP_QDISC_ETS:
+ if (validate.forbid_ets)
+ return -EINVAL;
+ validate.forbid_root_tbf = true;
+ validate.forbid_ets = true;
+ break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ return mlxsw_sp_qdisc_tree_validate_children(mlxsw_sp_qdisc, validate);
+}
+
+static int mlxsw_sp_qdisc_tree_validate(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ struct mlxsw_sp_qdisc_tree_validate validate = {};
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
+
+ mlxsw_sp_qdisc = &mlxsw_sp_port->qdisc->root_qdisc;
+ return __mlxsw_sp_qdisc_tree_validate(mlxsw_sp_qdisc, validate);
+}
+
static int mlxsw_sp_qdisc_create(struct mlxsw_sp_port *mlxsw_sp_port,
u32 handle,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
@@ -268,6 +392,10 @@ static int mlxsw_sp_qdisc_create(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_qdisc->num_classes = ops->num_classes;
mlxsw_sp_qdisc->ops = ops;
mlxsw_sp_qdisc->handle = handle;
+ err = mlxsw_sp_qdisc_tree_validate(mlxsw_sp_port);
+ if (err)
+ goto err_replace;
+
err = ops->replace(mlxsw_sp_port, handle, mlxsw_sp_qdisc, params);
if (err)
goto err_replace;
@@ -406,13 +534,17 @@ mlxsw_sp_qdisc_collect_tc_stats(struct mlxsw_sp_port *mlxsw_sp_port,
u64 *p_tx_bytes, u64 *p_tx_packets,
u64 *p_drops, u64 *p_backlog)
{
- int tclass_num = mlxsw_sp_qdisc->tclass_num;
struct mlxsw_sp_port_xstats *xstats;
u64 tx_bytes, tx_packets;
+ u8 prio_bitmap;
+ int tclass_num;
+ prio_bitmap = mlxsw_sp_qdisc_get_prio_bitmap(mlxsw_sp_port,
+ mlxsw_sp_qdisc);
+ tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
+ mlxsw_sp_qdisc);
xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
- mlxsw_sp_qdisc_bstats_per_priority_get(xstats,
- mlxsw_sp_qdisc->prio_bitmap,
+ mlxsw_sp_qdisc_bstats_per_priority_get(xstats, prio_bitmap,
&tx_packets, &tx_bytes);
*p_tx_packets += tx_packets;
@@ -506,19 +638,24 @@ static void
mlxsw_sp_setup_tc_qdisc_red_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
{
- int tclass_num = mlxsw_sp_qdisc->tclass_num;
struct mlxsw_sp_qdisc_stats *stats_base;
struct mlxsw_sp_port_xstats *xstats;
struct red_stats *red_base;
+ u8 prio_bitmap;
+ int tclass_num;
+ prio_bitmap = mlxsw_sp_qdisc_get_prio_bitmap(mlxsw_sp_port,
+ mlxsw_sp_qdisc);
+ tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
+ mlxsw_sp_qdisc);
xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
stats_base = &mlxsw_sp_qdisc->stats_base;
red_base = &mlxsw_sp_qdisc->xstats_base.red;
- mlxsw_sp_qdisc_bstats_per_priority_get(xstats,
- mlxsw_sp_qdisc->prio_bitmap,
+ mlxsw_sp_qdisc_bstats_per_priority_get(xstats, prio_bitmap,
&stats_base->tx_packets,
&stats_base->tx_bytes);
+ red_base->prob_mark = xstats->tc_ecn[tclass_num];
red_base->prob_drop = xstats->wred_drop[tclass_num];
red_base->pdrop = mlxsw_sp_xstats_tail_drop(xstats, tclass_num);
@@ -532,8 +669,10 @@ static int
mlxsw_sp_qdisc_red_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
{
- return mlxsw_sp_tclass_congestion_disable(mlxsw_sp_port,
- mlxsw_sp_qdisc->tclass_num);
+ int tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
+ mlxsw_sp_qdisc);
+
+ return mlxsw_sp_tclass_congestion_disable(mlxsw_sp_port, tclass_num);
}
static int
@@ -564,15 +703,33 @@ mlxsw_sp_qdisc_red_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
}
static int
+mlxsw_sp_qdisc_future_fifo_replace(struct mlxsw_sp_port *mlxsw_sp_port,
+ u32 handle, unsigned int band,
+ struct mlxsw_sp_qdisc *child_qdisc);
+static void
+mlxsw_sp_qdisc_future_fifos_init(struct mlxsw_sp_port *mlxsw_sp_port,
+ u32 handle);
+
+static int
mlxsw_sp_qdisc_red_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
void *params)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct tc_red_qopt_offload_params *p = params;
- int tclass_num = mlxsw_sp_qdisc->tclass_num;
+ int tclass_num;
u32 min, max;
u64 prob;
+ int err;
+
+ err = mlxsw_sp_qdisc_future_fifo_replace(mlxsw_sp_port, handle, 0,
+ &mlxsw_sp_qdisc->qdiscs[0]);
+ if (err)
+ return err;
+ mlxsw_sp_qdisc_future_fifos_init(mlxsw_sp_port, TC_H_UNSPEC);
+
+ tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
+ mlxsw_sp_qdisc);
/* calculate probability in percentage */
prob = p->probability;
@@ -615,22 +772,27 @@ mlxsw_sp_qdisc_get_red_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
void *xstats_ptr)
{
struct red_stats *xstats_base = &mlxsw_sp_qdisc->xstats_base.red;
- int tclass_num = mlxsw_sp_qdisc->tclass_num;
struct mlxsw_sp_port_xstats *xstats;
struct red_stats *res = xstats_ptr;
- int early_drops, pdrops;
+ int early_drops, marks, pdrops;
+ int tclass_num;
+ tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
+ mlxsw_sp_qdisc);
xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
early_drops = xstats->wred_drop[tclass_num] - xstats_base->prob_drop;
+ marks = xstats->tc_ecn[tclass_num] - xstats_base->prob_mark;
pdrops = mlxsw_sp_xstats_tail_drop(xstats, tclass_num) -
xstats_base->pdrop;
res->pdrop += pdrops;
res->prob_drop += early_drops;
+ res->prob_mark += marks;
xstats_base->pdrop += pdrops;
xstats_base->prob_drop += early_drops;
+ xstats_base->prob_mark += marks;
return 0;
}
@@ -639,16 +801,19 @@ mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
struct tc_qopt_offload_stats *stats_ptr)
{
- int tclass_num = mlxsw_sp_qdisc->tclass_num;
struct mlxsw_sp_qdisc_stats *stats_base;
struct mlxsw_sp_port_xstats *xstats;
u64 overlimits;
+ int tclass_num;
+ tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
+ mlxsw_sp_qdisc);
xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
stats_base = &mlxsw_sp_qdisc->stats_base;
mlxsw_sp_qdisc_get_tc_stats(mlxsw_sp_port, mlxsw_sp_qdisc, stats_ptr);
- overlimits = xstats->wred_drop[tclass_num] - stats_base->overlimits;
+ overlimits = xstats->wred_drop[tclass_num] +
+ xstats->tc_ecn[tclass_num] - stats_base->overlimits;
stats_ptr->qstats->overlimits += overlimits;
stats_base->overlimits += overlimits;
@@ -660,11 +825,12 @@ static struct mlxsw_sp_qdisc *
mlxsw_sp_qdisc_leaf_find_class(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
u32 parent)
{
- return NULL;
+ /* RED and TBF are formally classful qdiscs, but all class references,
+ * including X:0, just refer to the same one class.
+ */
+ return &mlxsw_sp_qdisc->qdiscs[0];
}
-#define MLXSW_SP_PORT_DEFAULT_TCLASS 0
-
static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_red = {
.type = MLXSW_SP_QDISC_RED,
.check_params = mlxsw_sp_qdisc_red_check_params,
@@ -675,14 +841,19 @@ static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_red = {
.get_xstats = mlxsw_sp_qdisc_get_red_xstats,
.clean_stats = mlxsw_sp_setup_tc_qdisc_red_clean_stats,
.find_class = mlxsw_sp_qdisc_leaf_find_class,
+ .num_classes = 1,
};
+static int mlxsw_sp_qdisc_graft(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ u8 band, u32 child_handle);
+
static int __mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_red_qopt_offload *p)
{
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
- mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, false);
+ mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent);
if (!mlxsw_sp_qdisc)
return -EOPNOTSUPP;
@@ -704,6 +875,9 @@ static int __mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port,
case TC_RED_STATS:
return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
&p->stats);
+ case TC_RED_GRAFT:
+ return mlxsw_sp_qdisc_graft(mlxsw_sp_port, mlxsw_sp_qdisc, 0,
+ p->child_handle);
default:
return -EOPNOTSUPP;
}
@@ -740,13 +914,34 @@ mlxsw_sp_setup_tc_qdisc_leaf_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_qdisc->stats_base.backlog = 0;
}
+static enum mlxsw_reg_qeec_hr
+mlxsw_sp_qdisc_tbf_hr(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
+{
+ if (mlxsw_sp_qdisc == &mlxsw_sp_port->qdisc->root_qdisc)
+ return MLXSW_REG_QEEC_HR_PORT;
+
+ /* Configure subgroup shaper, so that both UC and MC traffic is subject
+ * to shaping. That is unlike RED, however UC queue lengths are going to
+ * be different than MC ones due to different pool and quota
+ * configurations, so the configuration is not applicable. For shaper on
+ * the other hand, subjecting the overall stream to the configured
+ * shaper makes sense. Also note that that is what we do for
+ * ieee_setmaxrate().
+ */
+ return MLXSW_REG_QEEC_HR_SUBGROUP;
+}
+
static int
mlxsw_sp_qdisc_tbf_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
{
- return mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
- MLXSW_REG_QEEC_HR_SUBGROUP,
- mlxsw_sp_qdisc->tclass_num, 0,
+ enum mlxsw_reg_qeec_hr hr = mlxsw_sp_qdisc_tbf_hr(mlxsw_sp_port,
+ mlxsw_sp_qdisc);
+ int tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
+ mlxsw_sp_qdisc);
+
+ return mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, hr, tclass_num, 0,
MLXSW_REG_QEEC_MAS_DIS, 0);
}
@@ -828,27 +1023,29 @@ mlxsw_sp_qdisc_tbf_replace(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
void *params)
{
+ enum mlxsw_reg_qeec_hr hr = mlxsw_sp_qdisc_tbf_hr(mlxsw_sp_port,
+ mlxsw_sp_qdisc);
struct tc_tbf_qopt_offload_replace_params *p = params;
u64 rate_kbps = mlxsw_sp_qdisc_tbf_rate_kbps(p);
+ int tclass_num;
u8 burst_size;
int err;
+ err = mlxsw_sp_qdisc_future_fifo_replace(mlxsw_sp_port, handle, 0,
+ &mlxsw_sp_qdisc->qdiscs[0]);
+ if (err)
+ return err;
+ mlxsw_sp_qdisc_future_fifos_init(mlxsw_sp_port, TC_H_UNSPEC);
+
+ tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port,
+ mlxsw_sp_qdisc);
+
err = mlxsw_sp_qdisc_tbf_bs(mlxsw_sp_port, p->max_size, &burst_size);
if (WARN_ON_ONCE(err))
/* check_params above was supposed to reject this value. */
return -EINVAL;
- /* Configure subgroup shaper, so that both UC and MC traffic is subject
- * to shaping. That is unlike RED, however UC queue lengths are going to
- * be different than MC ones due to different pool and quota
- * configurations, so the configuration is not applicable. For shaper on
- * the other hand, subjecting the overall stream to the configured
- * shaper makes sense. Also note that that is what we do for
- * ieee_setmaxrate().
- */
- return mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
- MLXSW_REG_QEEC_HR_SUBGROUP,
- mlxsw_sp_qdisc->tclass_num, 0,
+ return mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port, hr, tclass_num, 0,
rate_kbps, burst_size);
}
@@ -881,6 +1078,7 @@ static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_tbf = {
.get_stats = mlxsw_sp_qdisc_get_tbf_stats,
.clean_stats = mlxsw_sp_setup_tc_qdisc_leaf_clean_stats,
.find_class = mlxsw_sp_qdisc_leaf_find_class,
+ .num_classes = 1,
};
static int __mlxsw_sp_setup_tc_tbf(struct mlxsw_sp_port *mlxsw_sp_port,
@@ -888,7 +1086,7 @@ static int __mlxsw_sp_setup_tc_tbf(struct mlxsw_sp_port *mlxsw_sp_port,
{
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
- mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, false);
+ mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent);
if (!mlxsw_sp_qdisc)
return -EOPNOTSUPP;
@@ -907,6 +1105,9 @@ static int __mlxsw_sp_setup_tc_tbf(struct mlxsw_sp_port *mlxsw_sp_port,
case TC_TBF_STATS:
return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
&p->stats);
+ case TC_TBF_GRAFT:
+ return mlxsw_sp_qdisc_graft(mlxsw_sp_port, mlxsw_sp_qdisc, 0,
+ p->child_handle);
default:
return -EOPNOTSUPP;
}
@@ -957,6 +1158,32 @@ static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_fifo = {
.clean_stats = mlxsw_sp_setup_tc_qdisc_leaf_clean_stats,
};
+static int
+mlxsw_sp_qdisc_future_fifo_replace(struct mlxsw_sp_port *mlxsw_sp_port,
+ u32 handle, unsigned int band,
+ struct mlxsw_sp_qdisc *child_qdisc)
+{
+ struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
+
+ if (handle == qdisc_state->future_handle &&
+ qdisc_state->future_fifos[band])
+ return mlxsw_sp_qdisc_replace(mlxsw_sp_port, TC_H_UNSPEC,
+ child_qdisc,
+ &mlxsw_sp_qdisc_ops_fifo,
+ NULL);
+ return 0;
+}
+
+static void
+mlxsw_sp_qdisc_future_fifos_init(struct mlxsw_sp_port *mlxsw_sp_port,
+ u32 handle)
+{
+ struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
+
+ qdisc_state->future_handle = handle;
+ memset(qdisc_state->future_fifos, 0, sizeof(qdisc_state->future_fifos));
+}
+
static int __mlxsw_sp_setup_tc_fifo(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_fifo_qopt_offload *p)
{
@@ -965,16 +1192,15 @@ static int __mlxsw_sp_setup_tc_fifo(struct mlxsw_sp_port *mlxsw_sp_port,
unsigned int band;
u32 parent_handle;
- mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, false);
+ mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent);
if (!mlxsw_sp_qdisc && p->handle == TC_H_UNSPEC) {
parent_handle = TC_H_MAJ(p->parent);
if (parent_handle != qdisc_state->future_handle) {
/* This notifications is for a different Qdisc than
* previously. Wipe the future cache.
*/
- memset(qdisc_state->future_fifos, 0,
- sizeof(qdisc_state->future_fifos));
- qdisc_state->future_handle = parent_handle;
+ mlxsw_sp_qdisc_future_fifos_init(mlxsw_sp_port,
+ parent_handle);
}
band = TC_H_MIN(p->parent) - 1;
@@ -1033,11 +1259,10 @@ static int __mlxsw_sp_qdisc_ets_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_port_ets_set(mlxsw_sp_port,
MLXSW_REG_QEEC_HR_SUBGROUP,
i, 0, false, 0);
- mlxsw_sp_qdisc_destroy(mlxsw_sp_port,
- &mlxsw_sp_qdisc->qdiscs[i]);
- mlxsw_sp_qdisc->qdiscs[i].prio_bitmap = 0;
}
+ kfree(mlxsw_sp_qdisc->ets_data);
+ mlxsw_sp_qdisc->ets_data = NULL;
return 0;
}
@@ -1066,6 +1291,31 @@ mlxsw_sp_qdisc_prio_check_params(struct mlxsw_sp_port *mlxsw_sp_port,
return __mlxsw_sp_qdisc_ets_check_params(p->bands);
}
+static struct mlxsw_sp_qdisc *
+mlxsw_sp_qdisc_walk_cb_clean_stats(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ void *mlxsw_sp_port)
+{
+ u64 backlog;
+
+ if (mlxsw_sp_qdisc->ops) {
+ backlog = mlxsw_sp_qdisc->stats_base.backlog;
+ if (mlxsw_sp_qdisc->ops->clean_stats)
+ mlxsw_sp_qdisc->ops->clean_stats(mlxsw_sp_port,
+ mlxsw_sp_qdisc);
+ mlxsw_sp_qdisc->stats_base.backlog = backlog;
+ }
+
+ return NULL;
+}
+
+static void
+mlxsw_sp_qdisc_tree_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
+{
+ mlxsw_sp_qdisc_walk(mlxsw_sp_qdisc, mlxsw_sp_qdisc_walk_cb_clean_stats,
+ mlxsw_sp_port);
+}
+
static int
__mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
@@ -1074,69 +1324,80 @@ __mlxsw_sp_qdisc_ets_replace(struct mlxsw_sp_port *mlxsw_sp_port,
const unsigned int *weights,
const u8 *priomap)
{
- struct mlxsw_sp_qdisc_state *qdisc_state = mlxsw_sp_port->qdisc;
+ struct mlxsw_sp_qdisc_ets_data *ets_data = mlxsw_sp_qdisc->ets_data;
+ struct mlxsw_sp_qdisc_ets_band *ets_band;
struct mlxsw_sp_qdisc *child_qdisc;
- int tclass, i, band, backlog;
- u8 old_priomap;
+ u8 old_priomap, new_priomap;
+ int i, band;
int err;
+ if (!ets_data) {
+ ets_data = kzalloc(sizeof(*ets_data), GFP_KERNEL);
+ if (!ets_data)
+ return -ENOMEM;
+ mlxsw_sp_qdisc->ets_data = ets_data;
+
+ for (band = 0; band < mlxsw_sp_qdisc->num_classes; band++) {
+ int tclass_num = MLXSW_SP_PRIO_BAND_TO_TCLASS(band);
+
+ ets_band = &ets_data->bands[band];
+ ets_band->tclass_num = tclass_num;
+ }
+ }
+
for (band = 0; band < nbands; band++) {
- tclass = MLXSW_SP_PRIO_BAND_TO_TCLASS(band);
+ int tclass_num;
+
child_qdisc = &mlxsw_sp_qdisc->qdiscs[band];
- old_priomap = child_qdisc->prio_bitmap;
- child_qdisc->prio_bitmap = 0;
+ ets_band = &ets_data->bands[band];
+
+ tclass_num = ets_band->tclass_num;
+ old_priomap = ets_band->prio_bitmap;
+ new_priomap = 0;
err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
MLXSW_REG_QEEC_HR_SUBGROUP,
- tclass, 0, !!quanta[band],
+ tclass_num, 0, !!quanta[band],
weights[band]);
if (err)
return err;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
if (priomap[i] == band) {
- child_qdisc->prio_bitmap |= BIT(i);
+ new_priomap |= BIT(i);
if (BIT(i) & old_priomap)
continue;
err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port,
- i, tclass);
+ i, tclass_num);
if (err)
return err;
}
}
- child_qdisc->tclass_num = tclass;
+ ets_band->prio_bitmap = new_priomap;
- if (old_priomap != child_qdisc->prio_bitmap &&
- child_qdisc->ops && child_qdisc->ops->clean_stats) {
- backlog = child_qdisc->stats_base.backlog;
- child_qdisc->ops->clean_stats(mlxsw_sp_port,
- child_qdisc);
- child_qdisc->stats_base.backlog = backlog;
- }
+ if (old_priomap != new_priomap)
+ mlxsw_sp_qdisc_tree_clean_stats(mlxsw_sp_port,
+ child_qdisc);
- if (handle == qdisc_state->future_handle &&
- qdisc_state->future_fifos[band]) {
- err = mlxsw_sp_qdisc_replace(mlxsw_sp_port, TC_H_UNSPEC,
- child_qdisc,
- &mlxsw_sp_qdisc_ops_fifo,
- NULL);
- if (err)
- return err;
- }
+ err = mlxsw_sp_qdisc_future_fifo_replace(mlxsw_sp_port, handle,
+ band, child_qdisc);
+ if (err)
+ return err;
}
for (; band < IEEE_8021QAZ_MAX_TCS; band++) {
- tclass = MLXSW_SP_PRIO_BAND_TO_TCLASS(band);
+ ets_band = &ets_data->bands[band];
+ ets_band->prio_bitmap = 0;
+
child_qdisc = &mlxsw_sp_qdisc->qdiscs[band];
- child_qdisc->prio_bitmap = 0;
mlxsw_sp_qdisc_destroy(mlxsw_sp_port, child_qdisc);
+
mlxsw_sp_port_ets_set(mlxsw_sp_port,
MLXSW_REG_QEEC_HR_SUBGROUP,
- tclass, 0, false, 0);
+ ets_band->tclass_num, 0, false, 0);
}
- qdisc_state->future_handle = TC_H_UNSPEC;
- memset(qdisc_state->future_fifos, 0, sizeof(qdisc_state->future_fifos));
+ mlxsw_sp_qdisc_future_fifos_init(mlxsw_sp_port, TC_H_UNSPEC);
return 0;
}
@@ -1238,6 +1499,31 @@ mlxsw_sp_qdisc_prio_find_class(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
return &mlxsw_sp_qdisc->qdiscs[band];
}
+static struct mlxsw_sp_qdisc_ets_band *
+mlxsw_sp_qdisc_ets_get_band(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct mlxsw_sp_qdisc *child)
+{
+ unsigned int band = child - mlxsw_sp_qdisc->qdiscs;
+
+ if (WARN_ON(band >= IEEE_8021QAZ_MAX_TCS))
+ band = 0;
+ return &mlxsw_sp_qdisc->ets_data->bands[band];
+}
+
+static u8
+mlxsw_sp_qdisc_ets_get_prio_bitmap(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct mlxsw_sp_qdisc *child)
+{
+ return mlxsw_sp_qdisc_ets_get_band(mlxsw_sp_qdisc, child)->prio_bitmap;
+}
+
+static int
+mlxsw_sp_qdisc_ets_get_tclass_num(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct mlxsw_sp_qdisc *child)
+{
+ return mlxsw_sp_qdisc_ets_get_band(mlxsw_sp_qdisc, child)->tclass_num;
+}
+
static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_prio = {
.type = MLXSW_SP_QDISC_PRIO,
.check_params = mlxsw_sp_qdisc_prio_check_params,
@@ -1248,6 +1534,8 @@ static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_prio = {
.clean_stats = mlxsw_sp_setup_tc_qdisc_prio_clean_stats,
.find_class = mlxsw_sp_qdisc_prio_find_class,
.num_classes = IEEE_8021QAZ_MAX_TCS,
+ .get_prio_bitmap = mlxsw_sp_qdisc_ets_get_prio_bitmap,
+ .get_tclass_num = mlxsw_sp_qdisc_ets_get_tclass_num,
};
static int
@@ -1299,6 +1587,8 @@ static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_ets = {
.clean_stats = mlxsw_sp_setup_tc_qdisc_prio_clean_stats,
.find_class = mlxsw_sp_qdisc_prio_find_class,
.num_classes = IEEE_8021QAZ_MAX_TCS,
+ .get_prio_bitmap = mlxsw_sp_qdisc_ets_get_prio_bitmap,
+ .get_tclass_num = mlxsw_sp_qdisc_ets_get_tclass_num,
};
/* Linux allows linking of Qdiscs to arbitrary classes (so long as the resulting
@@ -1326,10 +1616,9 @@ static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_ets = {
* grafted corresponds to the parent handle. If the two don't match, we
* unoffload the child.
*/
-static int
-__mlxsw_sp_qdisc_ets_graft(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
- u8 band, u32 child_handle)
+static int mlxsw_sp_qdisc_graft(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ u8 band, u32 child_handle)
{
struct mlxsw_sp_qdisc *old_qdisc;
u32 parent;
@@ -1362,21 +1651,12 @@ __mlxsw_sp_qdisc_ets_graft(struct mlxsw_sp_port *mlxsw_sp_port,
return -EOPNOTSUPP;
}
-static int
-mlxsw_sp_qdisc_prio_graft(struct mlxsw_sp_port *mlxsw_sp_port,
- struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
- struct tc_prio_qopt_offload_graft_params *p)
-{
- return __mlxsw_sp_qdisc_ets_graft(mlxsw_sp_port, mlxsw_sp_qdisc,
- p->band, p->child_handle);
-}
-
static int __mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_prio_qopt_offload *p)
{
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
- mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, true);
+ mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent);
if (!mlxsw_sp_qdisc)
return -EOPNOTSUPP;
@@ -1396,8 +1676,9 @@ static int __mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
&p->stats);
case TC_PRIO_GRAFT:
- return mlxsw_sp_qdisc_prio_graft(mlxsw_sp_port, mlxsw_sp_qdisc,
- &p->graft_params);
+ return mlxsw_sp_qdisc_graft(mlxsw_sp_port, mlxsw_sp_qdisc,
+ p->graft_params.band,
+ p->graft_params.child_handle);
default:
return -EOPNOTSUPP;
}
@@ -1420,7 +1701,7 @@ static int __mlxsw_sp_setup_tc_ets(struct mlxsw_sp_port *mlxsw_sp_port,
{
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
- mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, true);
+ mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent);
if (!mlxsw_sp_qdisc)
return -EOPNOTSUPP;
@@ -1440,9 +1721,9 @@ static int __mlxsw_sp_setup_tc_ets(struct mlxsw_sp_port *mlxsw_sp_port,
return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
&p->stats);
case TC_ETS_GRAFT:
- return __mlxsw_sp_qdisc_ets_graft(mlxsw_sp_port, mlxsw_sp_qdisc,
- p->graft_params.band,
- p->graft_params.child_handle);
+ return mlxsw_sp_qdisc_graft(mlxsw_sp_port, mlxsw_sp_qdisc,
+ p->graft_params.band,
+ p->graft_params.child_handle);
default:
return -EOPNOTSUPP;
}
@@ -1472,6 +1753,7 @@ struct mlxsw_sp_qevent_binding {
u32 handle;
int tclass_num;
enum mlxsw_sp_span_trigger span_trigger;
+ unsigned int action_mask;
};
static LIST_HEAD(mlxsw_sp_qevent_block_cb_list);
@@ -1482,8 +1764,10 @@ static int mlxsw_sp_qevent_span_configure(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_span_agent_parms *agent_parms,
int *p_span_id)
{
+ enum mlxsw_sp_span_trigger span_trigger = qevent_binding->span_trigger;
struct mlxsw_sp_port *mlxsw_sp_port = qevent_binding->mlxsw_sp_port;
struct mlxsw_sp_span_trigger_parms trigger_parms = {};
+ bool ingress;
int span_id;
int err;
@@ -1491,18 +1775,19 @@ static int mlxsw_sp_qevent_span_configure(struct mlxsw_sp *mlxsw_sp,
if (err)
return err;
- err = mlxsw_sp_span_analyzed_port_get(mlxsw_sp_port, true);
+ ingress = mlxsw_sp_span_trigger_is_ingress(span_trigger);
+ err = mlxsw_sp_span_analyzed_port_get(mlxsw_sp_port, ingress);
if (err)
goto err_analyzed_port_get;
trigger_parms.span_id = span_id;
trigger_parms.probability_rate = 1;
- err = mlxsw_sp_span_agent_bind(mlxsw_sp, qevent_binding->span_trigger, mlxsw_sp_port,
+ err = mlxsw_sp_span_agent_bind(mlxsw_sp, span_trigger, mlxsw_sp_port,
&trigger_parms);
if (err)
goto err_agent_bind;
- err = mlxsw_sp_span_trigger_enable(mlxsw_sp_port, qevent_binding->span_trigger,
+ err = mlxsw_sp_span_trigger_enable(mlxsw_sp_port, span_trigger,
qevent_binding->tclass_num);
if (err)
goto err_trigger_enable;
@@ -1511,10 +1796,10 @@ static int mlxsw_sp_qevent_span_configure(struct mlxsw_sp *mlxsw_sp,
return 0;
err_trigger_enable:
- mlxsw_sp_span_agent_unbind(mlxsw_sp, qevent_binding->span_trigger, mlxsw_sp_port,
+ mlxsw_sp_span_agent_unbind(mlxsw_sp, span_trigger, mlxsw_sp_port,
&trigger_parms);
err_agent_bind:
- mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, true);
+ mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, ingress);
err_analyzed_port_get:
mlxsw_sp_span_agent_put(mlxsw_sp, span_id);
return err;
@@ -1524,16 +1809,20 @@ static void mlxsw_sp_qevent_span_deconfigure(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_qevent_binding *qevent_binding,
int span_id)
{
+ enum mlxsw_sp_span_trigger span_trigger = qevent_binding->span_trigger;
struct mlxsw_sp_port *mlxsw_sp_port = qevent_binding->mlxsw_sp_port;
struct mlxsw_sp_span_trigger_parms trigger_parms = {
.span_id = span_id,
};
+ bool ingress;
+
+ ingress = mlxsw_sp_span_trigger_is_ingress(span_trigger);
- mlxsw_sp_span_trigger_disable(mlxsw_sp_port, qevent_binding->span_trigger,
+ mlxsw_sp_span_trigger_disable(mlxsw_sp_port, span_trigger,
qevent_binding->tclass_num);
- mlxsw_sp_span_agent_unbind(mlxsw_sp, qevent_binding->span_trigger, mlxsw_sp_port,
+ mlxsw_sp_span_agent_unbind(mlxsw_sp, span_trigger, mlxsw_sp_port,
&trigger_parms);
- mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, true);
+ mlxsw_sp_span_analyzed_port_put(mlxsw_sp_port, ingress);
mlxsw_sp_span_agent_put(mlxsw_sp, span_id);
}
@@ -1583,10 +1872,17 @@ static void mlxsw_sp_qevent_trap_deconfigure(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_qevent_span_deconfigure(mlxsw_sp, qevent_binding, mall_entry->trap.span_id);
}
-static int mlxsw_sp_qevent_entry_configure(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_mall_entry *mall_entry,
- struct mlxsw_sp_qevent_binding *qevent_binding)
+static int
+mlxsw_sp_qevent_entry_configure(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_mall_entry *mall_entry,
+ struct mlxsw_sp_qevent_binding *qevent_binding,
+ struct netlink_ext_ack *extack)
{
+ if (!(BIT(mall_entry->type) & qevent_binding->action_mask)) {
+ NL_SET_ERR_MSG(extack, "Action not supported at this qevent");
+ return -EOPNOTSUPP;
+ }
+
switch (mall_entry->type) {
case MLXSW_SP_MALL_ACTION_TYPE_MIRROR:
return mlxsw_sp_qevent_mirror_configure(mlxsw_sp, mall_entry, qevent_binding);
@@ -1614,15 +1910,17 @@ static void mlxsw_sp_qevent_entry_deconfigure(struct mlxsw_sp *mlxsw_sp,
}
}
-static int mlxsw_sp_qevent_binding_configure(struct mlxsw_sp_qevent_block *qevent_block,
- struct mlxsw_sp_qevent_binding *qevent_binding)
+static int
+mlxsw_sp_qevent_binding_configure(struct mlxsw_sp_qevent_block *qevent_block,
+ struct mlxsw_sp_qevent_binding *qevent_binding,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp_mall_entry *mall_entry;
int err;
list_for_each_entry(mall_entry, &qevent_block->mall_entry_list, list) {
err = mlxsw_sp_qevent_entry_configure(qevent_block->mlxsw_sp, mall_entry,
- qevent_binding);
+ qevent_binding, extack);
if (err)
goto err_entry_configure;
}
@@ -1646,13 +1944,17 @@ static void mlxsw_sp_qevent_binding_deconfigure(struct mlxsw_sp_qevent_block *qe
qevent_binding);
}
-static int mlxsw_sp_qevent_block_configure(struct mlxsw_sp_qevent_block *qevent_block)
+static int
+mlxsw_sp_qevent_block_configure(struct mlxsw_sp_qevent_block *qevent_block,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp_qevent_binding *qevent_binding;
int err;
list_for_each_entry(qevent_binding, &qevent_block->binding_list, list) {
- err = mlxsw_sp_qevent_binding_configure(qevent_block, qevent_binding);
+ err = mlxsw_sp_qevent_binding_configure(qevent_block,
+ qevent_binding,
+ extack);
if (err)
goto err_binding_configure;
}
@@ -1737,7 +2039,7 @@ static int mlxsw_sp_qevent_mall_replace(struct mlxsw_sp *mlxsw_sp,
list_add_tail(&mall_entry->list, &qevent_block->mall_entry_list);
- err = mlxsw_sp_qevent_block_configure(qevent_block);
+ err = mlxsw_sp_qevent_block_configure(qevent_block, f->common.extack);
if (err)
goto err_block_configure;
@@ -1825,7 +2127,8 @@ static void mlxsw_sp_qevent_block_release(void *cb_priv)
static struct mlxsw_sp_qevent_binding *
mlxsw_sp_qevent_binding_create(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle, int tclass_num,
- enum mlxsw_sp_span_trigger span_trigger)
+ enum mlxsw_sp_span_trigger span_trigger,
+ unsigned int action_mask)
{
struct mlxsw_sp_qevent_binding *binding;
@@ -1837,6 +2140,7 @@ mlxsw_sp_qevent_binding_create(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle,
binding->handle = handle;
binding->tclass_num = tclass_num;
binding->span_trigger = span_trigger;
+ binding->action_mask = action_mask;
return binding;
}
@@ -1862,9 +2166,11 @@ mlxsw_sp_qevent_binding_lookup(struct mlxsw_sp_qevent_block *block,
return NULL;
}
-static int mlxsw_sp_setup_tc_block_qevent_bind(struct mlxsw_sp_port *mlxsw_sp_port,
- struct flow_block_offload *f,
- enum mlxsw_sp_span_trigger span_trigger)
+static int
+mlxsw_sp_setup_tc_block_qevent_bind(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct flow_block_offload *f,
+ enum mlxsw_sp_span_trigger span_trigger,
+ unsigned int action_mask)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_qevent_binding *qevent_binding;
@@ -1872,6 +2178,7 @@ static int mlxsw_sp_setup_tc_block_qevent_bind(struct mlxsw_sp_port *mlxsw_sp_po
struct flow_block_cb *block_cb;
struct mlxsw_sp_qdisc *qdisc;
bool register_block = false;
+ int tclass_num;
int err;
block_cb = flow_block_cb_lookup(f->block, mlxsw_sp_qevent_block_cb, mlxsw_sp);
@@ -1904,14 +2211,19 @@ static int mlxsw_sp_setup_tc_block_qevent_bind(struct mlxsw_sp_port *mlxsw_sp_po
goto err_binding_exists;
}
- qevent_binding = mlxsw_sp_qevent_binding_create(mlxsw_sp_port, f->sch->handle,
- qdisc->tclass_num, span_trigger);
+ tclass_num = mlxsw_sp_qdisc_get_tclass_num(mlxsw_sp_port, qdisc);
+ qevent_binding = mlxsw_sp_qevent_binding_create(mlxsw_sp_port,
+ f->sch->handle,
+ tclass_num,
+ span_trigger,
+ action_mask);
if (IS_ERR(qevent_binding)) {
err = PTR_ERR(qevent_binding);
goto err_binding_create;
}
- err = mlxsw_sp_qevent_binding_configure(qevent_block, qevent_binding);
+ err = mlxsw_sp_qevent_binding_configure(qevent_block, qevent_binding,
+ f->extack);
if (err)
goto err_binding_configure;
@@ -1963,15 +2275,19 @@ static void mlxsw_sp_setup_tc_block_qevent_unbind(struct mlxsw_sp_port *mlxsw_sp
}
}
-static int mlxsw_sp_setup_tc_block_qevent(struct mlxsw_sp_port *mlxsw_sp_port,
- struct flow_block_offload *f,
- enum mlxsw_sp_span_trigger span_trigger)
+static int
+mlxsw_sp_setup_tc_block_qevent(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct flow_block_offload *f,
+ enum mlxsw_sp_span_trigger span_trigger,
+ unsigned int action_mask)
{
f->driver_block_list = &mlxsw_sp_qevent_block_cb_list;
switch (f->command) {
case FLOW_BLOCK_BIND:
- return mlxsw_sp_setup_tc_block_qevent_bind(mlxsw_sp_port, f, span_trigger);
+ return mlxsw_sp_setup_tc_block_qevent_bind(mlxsw_sp_port, f,
+ span_trigger,
+ action_mask);
case FLOW_BLOCK_UNBIND:
mlxsw_sp_setup_tc_block_qevent_unbind(mlxsw_sp_port, f, span_trigger);
return 0;
@@ -1983,7 +2299,22 @@ static int mlxsw_sp_setup_tc_block_qevent(struct mlxsw_sp_port *mlxsw_sp_port,
int mlxsw_sp_setup_tc_block_qevent_early_drop(struct mlxsw_sp_port *mlxsw_sp_port,
struct flow_block_offload *f)
{
- return mlxsw_sp_setup_tc_block_qevent(mlxsw_sp_port, f, MLXSW_SP_SPAN_TRIGGER_EARLY_DROP);
+ unsigned int action_mask = BIT(MLXSW_SP_MALL_ACTION_TYPE_MIRROR) |
+ BIT(MLXSW_SP_MALL_ACTION_TYPE_TRAP);
+
+ return mlxsw_sp_setup_tc_block_qevent(mlxsw_sp_port, f,
+ MLXSW_SP_SPAN_TRIGGER_EARLY_DROP,
+ action_mask);
+}
+
+int mlxsw_sp_setup_tc_block_qevent_mark(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct flow_block_offload *f)
+{
+ unsigned int action_mask = BIT(MLXSW_SP_MALL_ACTION_TYPE_MIRROR);
+
+ return mlxsw_sp_setup_tc_block_qevent(mlxsw_sp_port, f,
+ MLXSW_SP_SPAN_TRIGGER_ECN,
+ action_mask);
}
int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port)
@@ -1995,8 +2326,6 @@ int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port)
return -ENOMEM;
mutex_init(&qdisc_state->lock);
- qdisc_state->root_qdisc.prio_bitmap = 0xff;
- qdisc_state->root_qdisc.tclass_num = MLXSW_SP_PORT_DEFAULT_TCLASS;
mlxsw_sp_port->qdisc = qdisc_state;
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 19bb3ca0515e..217e3b351dfe 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -57,6 +57,7 @@ struct mlxsw_sp_rif {
unsigned char addr[ETH_ALEN];
int mtu;
u16 rif_index;
+ u8 mac_profile_id;
u16 vr_id;
const struct mlxsw_sp_rif_ops *ops;
struct mlxsw_sp *mlxsw_sp;
@@ -106,15 +107,23 @@ struct mlxsw_sp_rif_ops {
void (*setup)(struct mlxsw_sp_rif *rif,
const struct mlxsw_sp_rif_params *params);
- int (*configure)(struct mlxsw_sp_rif *rif);
+ int (*configure)(struct mlxsw_sp_rif *rif,
+ struct netlink_ext_ack *extack);
void (*deconfigure)(struct mlxsw_sp_rif *rif);
struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_rif *rif,
struct netlink_ext_ack *extack);
void (*fdb_del)(struct mlxsw_sp_rif *rif, const char *mac);
};
+struct mlxsw_sp_rif_mac_profile {
+ unsigned char mac_prefix[ETH_ALEN];
+ refcount_t ref_count;
+ u8 id;
+};
+
struct mlxsw_sp_router_ops {
int (*init)(struct mlxsw_sp *mlxsw_sp);
+ int (*ipips_init)(struct mlxsw_sp *mlxsw_sp);
};
static struct mlxsw_sp_rif *
@@ -1055,22 +1064,13 @@ static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp)
kfree(mlxsw_sp->router->vrs);
}
-static struct net_device *
-__mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device *ol_dev)
-{
- struct ip_tunnel *tun = netdev_priv(ol_dev);
- struct net *net = dev_net(ol_dev);
-
- return dev_get_by_index_rcu(net, tun->parms.link);
-}
-
u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev)
{
struct net_device *d;
u32 tb_id;
rcu_read_lock();
- d = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
+ d = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
if (d)
tb_id = l3mdev_fib_table(d) ? : RT_TABLE_MAIN;
else
@@ -1116,6 +1116,7 @@ mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_ipip_ops *ipip_ops;
struct mlxsw_sp_ipip_entry *ipip_entry;
struct mlxsw_sp_ipip_entry *ret = NULL;
+ int err;
ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
ipip_entry = kzalloc(sizeof(*ipip_entry), GFP_KERNEL);
@@ -1131,26 +1132,30 @@ mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp,
ipip_entry->ipipt = ipipt;
ipip_entry->ol_dev = ol_dev;
+ ipip_entry->parms = ipip_ops->parms_init(ol_dev);
- switch (ipip_ops->ul_proto) {
- case MLXSW_SP_L3_PROTO_IPV4:
- ipip_entry->parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev);
- break;
- case MLXSW_SP_L3_PROTO_IPV6:
- WARN_ON(1);
- break;
+ err = ipip_ops->rem_ip_addr_set(mlxsw_sp, ipip_entry);
+ if (err) {
+ ret = ERR_PTR(err);
+ goto err_rem_ip_addr_set;
}
return ipip_entry;
+err_rem_ip_addr_set:
+ mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
err_ol_ipip_lb_create:
kfree(ipip_entry);
return ret;
}
-static void
-mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp_ipip_entry *ipip_entry)
+static void mlxsw_sp_ipip_entry_dealloc(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_ipip_entry *ipip_entry)
{
+ const struct mlxsw_sp_ipip_ops *ipip_ops =
+ mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
+
+ ipip_ops->rem_ip_addr_unset(mlxsw_sp, ipip_entry);
mlxsw_sp_rif_destroy(&ipip_entry->ol_lb->common);
kfree(ipip_entry);
}
@@ -1174,6 +1179,32 @@ mlxsw_sp_ipip_entry_saddr_matches(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_l3addr_eq(&tun_saddr, &saddr);
}
+static int mlxsw_sp_ipip_decap_parsing_depth_inc(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_ipip_type ipipt)
+{
+ const struct mlxsw_sp_ipip_ops *ipip_ops;
+
+ ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
+
+ /* Not all tunnels require to increase the default pasing depth
+ * (96 bytes).
+ */
+ if (ipip_ops->inc_parsing_depth)
+ return mlxsw_sp_parsing_depth_inc(mlxsw_sp);
+
+ return 0;
+}
+
+static void mlxsw_sp_ipip_decap_parsing_depth_dec(struct mlxsw_sp *mlxsw_sp,
+ enum mlxsw_sp_ipip_type ipipt)
+{
+ const struct mlxsw_sp_ipip_ops *ipip_ops =
+ mlxsw_sp->router->ipip_ops_arr[ipipt];
+
+ if (ipip_ops->inc_parsing_depth)
+ mlxsw_sp_parsing_depth_dec(mlxsw_sp);
+}
+
static int
mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry,
@@ -1187,18 +1218,32 @@ mlxsw_sp_fib_entry_decap_init(struct mlxsw_sp *mlxsw_sp,
if (err)
return err;
+ err = mlxsw_sp_ipip_decap_parsing_depth_inc(mlxsw_sp,
+ ipip_entry->ipipt);
+ if (err)
+ goto err_parsing_depth_inc;
+
ipip_entry->decap_fib_entry = fib_entry;
fib_entry->decap.ipip_entry = ipip_entry;
fib_entry->decap.tunnel_index = tunnel_index;
+
return 0;
+
+err_parsing_depth_inc:
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
+ fib_entry->decap.tunnel_index);
+ return err;
}
static void mlxsw_sp_fib_entry_decap_fini(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry)
{
+ enum mlxsw_sp_ipip_type ipipt = fib_entry->decap.ipip_entry->ipipt;
+
/* Unlink this node from the IPIP entry that it's the decap entry of. */
fib_entry->decap.ipip_entry->decap_fib_entry = NULL;
fib_entry->decap.ipip_entry = NULL;
+ mlxsw_sp_ipip_decap_parsing_depth_dec(mlxsw_sp, ipipt);
mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ,
1, fib_entry->decap.tunnel_index);
}
@@ -1309,6 +1354,11 @@ mlxsw_sp_ipip_entry_find_decap(struct mlxsw_sp *mlxsw_sp,
saddr_len = 4;
saddr_prefix_len = 32;
break;
+ case MLXSW_SP_L3_PROTO_IPV6:
+ saddrp = &saddr.addr6;
+ saddr_len = 16;
+ saddr_prefix_len = 128;
+ break;
default:
WARN_ON(1);
return NULL;
@@ -1345,7 +1395,7 @@ mlxsw_sp_ipip_entry_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_ipip_entry *ipip_entry)
{
list_del(&ipip_entry->ipip_list_node);
- mlxsw_sp_ipip_entry_dealloc(ipip_entry);
+ mlxsw_sp_ipip_entry_dealloc(mlxsw_sp, ipip_entry);
}
static bool
@@ -1450,7 +1500,7 @@ mlxsw_sp_ipip_entry_find_by_ul_dev(const struct mlxsw_sp *mlxsw_sp,
struct net_device *ipip_ul_dev;
rcu_read_lock();
- ipip_ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
+ ipip_ul_dev = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
rcu_read_unlock();
if (ipip_ul_dev == ul_dev)
@@ -1536,23 +1586,34 @@ mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif, u16 ul_vr_id,
u16 ul_rif_id, bool enable)
{
struct mlxsw_sp_rif_ipip_lb_config lb_cf = lb_rif->lb_config;
+ enum mlxsw_reg_ritr_loopback_ipip_options ipip_options;
struct mlxsw_sp_rif *rif = &lb_rif->common;
struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
char ritr_pl[MLXSW_REG_RITR_LEN];
+ struct in6_addr *saddr6;
u32 saddr4;
+ ipip_options = MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET;
switch (lb_cf.ul_protocol) {
case MLXSW_SP_L3_PROTO_IPV4:
saddr4 = be32_to_cpu(lb_cf.saddr.addr4);
mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
rif->rif_index, rif->vr_id, rif->dev->mtu);
mlxsw_reg_ritr_loopback_ipip4_pack(ritr_pl, lb_cf.lb_ipipt,
- MLXSW_REG_RITR_LOOPBACK_IPIP_OPTIONS_GRE_KEY_PRESET,
- ul_vr_id, ul_rif_id, saddr4, lb_cf.okey);
+ ipip_options, ul_vr_id,
+ ul_rif_id, saddr4,
+ lb_cf.okey);
break;
case MLXSW_SP_L3_PROTO_IPV6:
- return -EAFNOSUPPORT;
+ saddr6 = &lb_cf.saddr.addr6;
+ mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
+ rif->rif_index, rif->vr_id, rif->dev->mtu);
+ mlxsw_reg_ritr_loopback_ipip6_pack(ritr_pl, lb_cf.lb_ipipt,
+ ipip_options, ul_vr_id,
+ ul_rif_id, saddr6,
+ lb_cf.okey);
+ break;
}
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
@@ -1827,7 +1888,7 @@ static void mlxsw_sp_ipip_demote_tunnel_by_ul_netdev(struct mlxsw_sp *mlxsw_sp,
struct net_device *ipip_ul_dev;
rcu_read_lock();
- ipip_ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
+ ipip_ul_dev = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
rcu_read_unlock();
if (ipip_ul_dev == ul_dev)
mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
@@ -4152,7 +4213,7 @@ static bool mlxsw_sp_ipip_netdev_ul_up(struct net_device *ol_dev)
bool is_up;
rcu_read_lock();
- ul_dev = __mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
+ ul_dev = mlxsw_sp_ipip_netdev_ul_dev_get(ol_dev);
is_up = ul_dev ? (ul_dev->flags & IFF_UP) : true;
rcu_read_unlock();
@@ -4376,6 +4437,66 @@ static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
}
}
+static int mlxsw_sp_adj_trap_entry_init(struct mlxsw_sp *mlxsw_sp)
+{
+ enum mlxsw_reg_ratr_trap_action trap_action;
+ char ratr_pl[MLXSW_REG_RATR_LEN];
+ int err;
+
+ err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
+ &mlxsw_sp->router->adj_trap_index);
+ if (err)
+ return err;
+
+ trap_action = MLXSW_REG_RATR_TRAP_ACTION_TRAP;
+ mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY, true,
+ MLXSW_REG_RATR_TYPE_ETHERNET,
+ mlxsw_sp->router->adj_trap_index,
+ mlxsw_sp->router->lb_rif_index);
+ mlxsw_reg_ratr_trap_action_set(ratr_pl, trap_action);
+ mlxsw_reg_ratr_trap_id_set(ratr_pl, MLXSW_TRAP_ID_RTR_EGRESS0);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
+ if (err)
+ goto err_ratr_write;
+
+ return 0;
+
+err_ratr_write:
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
+ mlxsw_sp->router->adj_trap_index);
+ return err;
+}
+
+static void mlxsw_sp_adj_trap_entry_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
+ mlxsw_sp->router->adj_trap_index);
+}
+
+static int mlxsw_sp_nexthop_group_inc(struct mlxsw_sp *mlxsw_sp)
+{
+ int err;
+
+ if (refcount_inc_not_zero(&mlxsw_sp->router->num_groups))
+ return 0;
+
+ err = mlxsw_sp_adj_trap_entry_init(mlxsw_sp);
+ if (err)
+ return err;
+
+ refcount_set(&mlxsw_sp->router->num_groups, 1);
+
+ return 0;
+}
+
+static void mlxsw_sp_nexthop_group_dec(struct mlxsw_sp *mlxsw_sp)
+{
+ if (!refcount_dec_and_test(&mlxsw_sp->router->num_groups))
+ return;
+
+ mlxsw_sp_adj_trap_entry_fini(mlxsw_sp);
+}
+
static void
mlxsw_sp_nh_grp_activity_get(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_nexthop_group *nh_grp,
@@ -4790,6 +4911,9 @@ mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_nexthop_obj_init;
}
+ err = mlxsw_sp_nexthop_group_inc(mlxsw_sp);
+ if (err)
+ goto err_group_inc;
err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
if (err) {
NL_SET_ERR_MSG_MOD(info->extack, "Failed to write adjacency entries to the device");
@@ -4808,6 +4932,8 @@ mlxsw_sp_nexthop_obj_group_info_init(struct mlxsw_sp *mlxsw_sp,
return 0;
err_group_refresh:
+ mlxsw_sp_nexthop_group_dec(mlxsw_sp);
+err_group_inc:
i = nhgi->count;
err_nexthop_obj_init:
for (i--; i >= 0; i--) {
@@ -4832,6 +4958,7 @@ mlxsw_sp_nexthop_obj_group_info_fini(struct mlxsw_sp *mlxsw_sp,
cancel_delayed_work(&router->nh_grp_activity_dw);
}
+ mlxsw_sp_nexthop_group_dec(mlxsw_sp);
for (i = nhgi->count - 1; i >= 0; i--) {
struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
@@ -5223,6 +5350,9 @@ mlxsw_sp_nexthop4_group_info_init(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_nexthop4_init;
}
+ err = mlxsw_sp_nexthop_group_inc(mlxsw_sp);
+ if (err)
+ goto err_group_inc;
err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
if (err)
goto err_group_refresh;
@@ -5230,6 +5360,8 @@ mlxsw_sp_nexthop4_group_info_init(struct mlxsw_sp *mlxsw_sp,
return 0;
err_group_refresh:
+ mlxsw_sp_nexthop_group_dec(mlxsw_sp);
+err_group_inc:
i = nhgi->count;
err_nexthop4_init:
for (i--; i >= 0; i--) {
@@ -5247,6 +5379,7 @@ mlxsw_sp_nexthop4_group_info_fini(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
int i;
+ mlxsw_sp_nexthop_group_dec(mlxsw_sp);
for (i = nhgi->count - 1; i >= 0; i--) {
struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
@@ -5725,41 +5858,6 @@ static int mlxsw_sp_fib_entry_commit(struct mlxsw_sp *mlxsw_sp,
return err;
}
-static int mlxsw_sp_adj_discard_write(struct mlxsw_sp *mlxsw_sp)
-{
- enum mlxsw_reg_ratr_trap_action trap_action;
- char ratr_pl[MLXSW_REG_RATR_LEN];
- int err;
-
- if (mlxsw_sp->router->adj_discard_index_valid)
- return 0;
-
- err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
- &mlxsw_sp->router->adj_discard_index);
- if (err)
- return err;
-
- trap_action = MLXSW_REG_RATR_TRAP_ACTION_TRAP;
- mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY, true,
- MLXSW_REG_RATR_TYPE_ETHERNET,
- mlxsw_sp->router->adj_discard_index,
- mlxsw_sp->router->lb_rif_index);
- mlxsw_reg_ratr_trap_action_set(ratr_pl, trap_action);
- mlxsw_reg_ratr_trap_id_set(ratr_pl, MLXSW_TRAP_ID_RTR_EGRESS0);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
- if (err)
- goto err_ratr_write;
-
- mlxsw_sp->router->adj_discard_index_valid = true;
-
- return 0;
-
-err_ratr_write:
- mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
- mlxsw_sp->router->adj_discard_index);
- return err;
-}
-
static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry_op_ctx *op_ctx,
struct mlxsw_sp_fib_entry *fib_entry,
@@ -5772,7 +5870,6 @@ static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
u16 trap_id = 0;
u32 adjacency_index = 0;
u16 ecmp_size = 0;
- int err;
/* In case the nexthop group adjacency index is valid, use it
* with provided ECMP size. Otherwise, setup trap and pass
@@ -5783,11 +5880,8 @@ static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
adjacency_index = nhgi->adj_index;
ecmp_size = nhgi->ecmp_size;
} else if (!nhgi->adj_index_valid && nhgi->count && nhgi->nh_rif) {
- err = mlxsw_sp_adj_discard_write(mlxsw_sp);
- if (err)
- return err;
trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
- adjacency_index = mlxsw_sp->router->adj_discard_index;
+ adjacency_index = mlxsw_sp->router->adj_trap_index;
ecmp_size = 1;
} else {
trap_action = MLXSW_REG_RALUE_TRAP_ACTION_TRAP;
@@ -6036,8 +6130,8 @@ mlxsw_sp_fib4_entry_type_set(struct mlxsw_sp *mlxsw_sp,
}
static void
-mlxsw_sp_fib4_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry *fib_entry)
+mlxsw_sp_fib_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
{
switch (fib_entry->type) {
case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
@@ -6048,6 +6142,13 @@ mlxsw_sp_fib4_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
}
}
+static void
+mlxsw_sp_fib4_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib4_entry *fib4_entry)
+{
+ mlxsw_sp_fib_entry_type_unset(mlxsw_sp, &fib4_entry->common);
+}
+
static struct mlxsw_sp_fib4_entry *
mlxsw_sp_fib4_entry_create(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_node *fib_node,
@@ -6108,7 +6209,7 @@ static void mlxsw_sp_fib4_entry_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_node *fib_node = fib4_entry->common.fib_node;
fib_info_put(fib4_entry->fi);
- mlxsw_sp_fib4_entry_type_unset(mlxsw_sp, &fib4_entry->common);
+ mlxsw_sp_fib4_entry_type_unset(mlxsw_sp, fib4_entry);
mlxsw_sp_nexthop_group_vr_unlink(fib4_entry->common.nh_group,
fib_node->fib);
mlxsw_sp_nexthop4_group_put(mlxsw_sp, &fib4_entry->common);
@@ -6641,6 +6742,9 @@ mlxsw_sp_nexthop6_group_info_init(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_rt6 = list_next_entry(mlxsw_sp_rt6, list);
}
nh_grp->nhgi = nhgi;
+ err = mlxsw_sp_nexthop_group_inc(mlxsw_sp);
+ if (err)
+ goto err_group_inc;
err = mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp);
if (err)
goto err_group_refresh;
@@ -6648,6 +6752,8 @@ mlxsw_sp_nexthop6_group_info_init(struct mlxsw_sp *mlxsw_sp,
return 0;
err_group_refresh:
+ mlxsw_sp_nexthop_group_dec(mlxsw_sp);
+err_group_inc:
i = nhgi->count;
err_nexthop6_init:
for (i--; i >= 0; i--) {
@@ -6665,6 +6771,7 @@ mlxsw_sp_nexthop6_group_info_fini(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group_info *nhgi = nh_grp->nhgi;
int i;
+ mlxsw_sp_nexthop_group_dec(mlxsw_sp);
for (i = nhgi->count - 1; i >= 0; i--) {
struct mlxsw_sp_nexthop *nh = &nhgi->nexthops[i];
@@ -6888,11 +6995,38 @@ mlxsw_sp_fib6_entry_nexthop_del(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_nexthop6_group_update(mlxsw_sp, op_ctx, fib6_entry);
}
-static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_fib_entry *fib_entry,
- const struct fib6_info *rt)
+static int
+mlxsw_sp_fib6_entry_type_set_local(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry,
+ const struct fib6_info *rt)
{
- if (rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST))
+ struct mlxsw_sp_nexthop_group_info *nhgi = fib_entry->nh_group->nhgi;
+ union mlxsw_sp_l3addr dip = { .addr6 = rt->fib6_dst.addr };
+ int ifindex = nhgi->nexthops[0].ifindex;
+ struct mlxsw_sp_ipip_entry *ipip_entry;
+
+ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
+ ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, ifindex,
+ MLXSW_SP_L3_PROTO_IPV6,
+ dip);
+
+ if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) {
+ fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
+ return mlxsw_sp_fib_entry_decap_init(mlxsw_sp, fib_entry,
+ ipip_entry);
+ }
+
+ return 0;
+}
+
+static int mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry,
+ const struct fib6_info *rt)
+{
+ if (rt->fib6_flags & RTF_LOCAL)
+ return mlxsw_sp_fib6_entry_type_set_local(mlxsw_sp, fib_entry,
+ rt);
+ if (rt->fib6_flags & RTF_ANYCAST)
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_TRAP;
else if (rt->fib6_type == RTN_BLACKHOLE)
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE;
@@ -6902,6 +7036,8 @@ static void mlxsw_sp_fib6_entry_type_set(struct mlxsw_sp *mlxsw_sp,
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_REMOTE;
else
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_LOCAL;
+
+ return 0;
}
static void
@@ -6959,12 +7095,16 @@ mlxsw_sp_fib6_entry_create(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_nexthop_group_vr_link;
- mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, rt_arr[0]);
+ err = mlxsw_sp_fib6_entry_type_set(mlxsw_sp, fib_entry, rt_arr[0]);
+ if (err)
+ goto err_fib6_entry_type_set;
fib_entry->fib_node = fib_node;
return fib6_entry;
+err_fib6_entry_type_set:
+ mlxsw_sp_nexthop_group_vr_unlink(fib_entry->nh_group, fib_node->fib);
err_nexthop_group_vr_link:
mlxsw_sp_nexthop6_group_put(mlxsw_sp, fib_entry);
err_nexthop6_group_get:
@@ -6983,11 +7123,19 @@ err_fib_entry_priv_create:
return ERR_PTR(err);
}
+static void
+mlxsw_sp_fib6_entry_type_unset(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib6_entry *fib6_entry)
+{
+ mlxsw_sp_fib_entry_type_unset(mlxsw_sp, &fib6_entry->common);
+}
+
static void mlxsw_sp_fib6_entry_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib6_entry *fib6_entry)
{
struct mlxsw_sp_fib_node *fib_node = fib6_entry->common.fib_node;
+ mlxsw_sp_fib6_entry_type_unset(mlxsw_sp, fib6_entry);
mlxsw_sp_nexthop_group_vr_unlink(fib6_entry->common.nh_group,
fib_node->fib);
mlxsw_sp_nexthop6_group_put(mlxsw_sp, &fib6_entry->common);
@@ -7340,16 +7488,6 @@ static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
continue;
mlxsw_sp_vr_fib_flush(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV6);
}
-
- /* After flushing all the routes, it is not possible anyone is still
- * using the adjacency index that is discarding packets, so free it in
- * case it was allocated.
- */
- if (!mlxsw_sp->router->adj_discard_index_valid)
- return;
- mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
- mlxsw_sp->router->adj_discard_index);
- mlxsw_sp->router->adj_discard_index_valid = false;
}
struct mlxsw_sp_fib6_event {
@@ -8056,7 +8194,7 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
if (ops->setup)
ops->setup(rif, params);
- err = ops->configure(rif);
+ err = ops->configure(rif, extack);
if (err)
goto err_configure;
@@ -8175,6 +8313,200 @@ static void mlxsw_sp_rif_subport_put(struct mlxsw_sp_rif *rif)
mlxsw_sp_rif_destroy(rif);
}
+static int mlxsw_sp_rif_mac_profile_index_alloc(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_rif_mac_profile *profile,
+ struct netlink_ext_ack *extack)
+{
+ u8 max_rif_mac_profiles = mlxsw_sp->router->max_rif_mac_profile;
+ struct mlxsw_sp_router *router = mlxsw_sp->router;
+ int id;
+
+ id = idr_alloc(&router->rif_mac_profiles_idr, profile, 0,
+ max_rif_mac_profiles, GFP_KERNEL);
+
+ if (id >= 0) {
+ profile->id = id;
+ return 0;
+ }
+
+ if (id == -ENOSPC)
+ NL_SET_ERR_MSG_MOD(extack,
+ "Exceeded number of supported router interface MAC profiles");
+
+ return id;
+}
+
+static struct mlxsw_sp_rif_mac_profile *
+mlxsw_sp_rif_mac_profile_index_free(struct mlxsw_sp *mlxsw_sp, u8 mac_profile)
+{
+ struct mlxsw_sp_rif_mac_profile *profile;
+
+ profile = idr_remove(&mlxsw_sp->router->rif_mac_profiles_idr,
+ mac_profile);
+ WARN_ON(!profile);
+ return profile;
+}
+
+static struct mlxsw_sp_rif_mac_profile *
+mlxsw_sp_rif_mac_profile_alloc(const char *mac)
+{
+ struct mlxsw_sp_rif_mac_profile *profile;
+
+ profile = kzalloc(sizeof(*profile), GFP_KERNEL);
+ if (!profile)
+ return NULL;
+
+ ether_addr_copy(profile->mac_prefix, mac);
+ refcount_set(&profile->ref_count, 1);
+ return profile;
+}
+
+static struct mlxsw_sp_rif_mac_profile *
+mlxsw_sp_rif_mac_profile_find(const struct mlxsw_sp *mlxsw_sp, const char *mac)
+{
+ struct mlxsw_sp_router *router = mlxsw_sp->router;
+ struct mlxsw_sp_rif_mac_profile *profile;
+ int id;
+
+ idr_for_each_entry(&router->rif_mac_profiles_idr, profile, id) {
+ if (!profile)
+ continue;
+
+ if (ether_addr_equal_masked(profile->mac_prefix, mac,
+ mlxsw_sp->mac_mask))
+ return profile;
+ }
+
+ return NULL;
+}
+
+static u64 mlxsw_sp_rif_mac_profiles_occ_get(void *priv)
+{
+ const struct mlxsw_sp *mlxsw_sp = priv;
+
+ return atomic_read(&mlxsw_sp->router->rif_mac_profiles_count);
+}
+
+static struct mlxsw_sp_rif_mac_profile *
+mlxsw_sp_rif_mac_profile_create(struct mlxsw_sp *mlxsw_sp, const char *mac,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_rif_mac_profile *profile;
+ int err;
+
+ profile = mlxsw_sp_rif_mac_profile_alloc(mac);
+ if (!profile)
+ return ERR_PTR(-ENOMEM);
+
+ err = mlxsw_sp_rif_mac_profile_index_alloc(mlxsw_sp, profile, extack);
+ if (err)
+ goto profile_index_alloc_err;
+
+ atomic_inc(&mlxsw_sp->router->rif_mac_profiles_count);
+ return profile;
+
+profile_index_alloc_err:
+ kfree(profile);
+ return ERR_PTR(err);
+}
+
+static void mlxsw_sp_rif_mac_profile_destroy(struct mlxsw_sp *mlxsw_sp,
+ u8 mac_profile)
+{
+ struct mlxsw_sp_rif_mac_profile *profile;
+
+ atomic_dec(&mlxsw_sp->router->rif_mac_profiles_count);
+ profile = mlxsw_sp_rif_mac_profile_index_free(mlxsw_sp, mac_profile);
+ kfree(profile);
+}
+
+static int mlxsw_sp_rif_mac_profile_get(struct mlxsw_sp *mlxsw_sp,
+ const char *mac, u8 *p_mac_profile,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_rif_mac_profile *profile;
+
+ profile = mlxsw_sp_rif_mac_profile_find(mlxsw_sp, mac);
+ if (profile) {
+ refcount_inc(&profile->ref_count);
+ goto out;
+ }
+
+ profile = mlxsw_sp_rif_mac_profile_create(mlxsw_sp, mac, extack);
+ if (IS_ERR(profile))
+ return PTR_ERR(profile);
+
+out:
+ *p_mac_profile = profile->id;
+ return 0;
+}
+
+static void mlxsw_sp_rif_mac_profile_put(struct mlxsw_sp *mlxsw_sp,
+ u8 mac_profile)
+{
+ struct mlxsw_sp_rif_mac_profile *profile;
+
+ profile = idr_find(&mlxsw_sp->router->rif_mac_profiles_idr,
+ mac_profile);
+ if (WARN_ON(!profile))
+ return;
+
+ if (!refcount_dec_and_test(&profile->ref_count))
+ return;
+
+ mlxsw_sp_rif_mac_profile_destroy(mlxsw_sp, mac_profile);
+}
+
+static bool mlxsw_sp_rif_mac_profile_is_shared(const struct mlxsw_sp_rif *rif)
+{
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+ struct mlxsw_sp_rif_mac_profile *profile;
+
+ profile = idr_find(&mlxsw_sp->router->rif_mac_profiles_idr,
+ rif->mac_profile_id);
+ if (WARN_ON(!profile))
+ return false;
+
+ return refcount_read(&profile->ref_count) > 1;
+}
+
+static int mlxsw_sp_rif_mac_profile_edit(struct mlxsw_sp_rif *rif,
+ const char *new_mac)
+{
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+ struct mlxsw_sp_rif_mac_profile *profile;
+
+ profile = idr_find(&mlxsw_sp->router->rif_mac_profiles_idr,
+ rif->mac_profile_id);
+ if (WARN_ON(!profile))
+ return -EINVAL;
+
+ ether_addr_copy(profile->mac_prefix, new_mac);
+ return 0;
+}
+
+static int
+mlxsw_sp_rif_mac_profile_replace(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_rif *rif,
+ const char *new_mac,
+ struct netlink_ext_ack *extack)
+{
+ u8 mac_profile;
+ int err;
+
+ if (!mlxsw_sp_rif_mac_profile_is_shared(rif))
+ return mlxsw_sp_rif_mac_profile_edit(rif, new_mac);
+
+ err = mlxsw_sp_rif_mac_profile_get(mlxsw_sp, new_mac,
+ &mac_profile, extack);
+ if (err)
+ return err;
+
+ mlxsw_sp_rif_mac_profile_put(mlxsw_sp, rif->mac_profile_id);
+ rif->mac_profile_id = mac_profile;
+ return 0;
+}
+
static int
__mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
struct net_device *l3_dev,
@@ -8523,36 +8855,6 @@ static int mlxsw_sp_inetaddr_macvlan_event(struct mlxsw_sp *mlxsw_sp,
return 0;
}
-static int mlxsw_sp_router_port_check_rif_addr(struct mlxsw_sp *mlxsw_sp,
- struct net_device *dev,
- const unsigned char *dev_addr,
- struct netlink_ext_ack *extack)
-{
- struct mlxsw_sp_rif *rif;
- int i;
-
- /* A RIF is not created for macvlan netdevs. Their MAC is used to
- * populate the FDB
- */
- if (netif_is_macvlan(dev) || netif_is_l3_master(dev))
- return 0;
-
- for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
- rif = mlxsw_sp->router->rifs[i];
- if (rif && rif->ops &&
- rif->ops->type == MLXSW_SP_RIF_TYPE_IPIP_LB)
- continue;
- if (rif && rif->dev && rif->dev != dev &&
- !ether_addr_equal_masked(rif->dev->dev_addr, dev_addr,
- mlxsw_sp->mac_mask)) {
- NL_SET_ERR_MSG_MOD(extack, "All router interface MAC addresses must have the same prefix");
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
static int __mlxsw_sp_inetaddr_event(struct mlxsw_sp *mlxsw_sp,
struct net_device *dev,
unsigned long event,
@@ -8618,11 +8920,6 @@ int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
if (!mlxsw_sp_rif_should_config(rif, dev, event))
goto out;
- err = mlxsw_sp_router_port_check_rif_addr(mlxsw_sp, dev, dev->dev_addr,
- ivi->extack);
- if (err)
- goto out;
-
err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, ivi->extack);
out:
mutex_unlock(&mlxsw_sp->router->lock);
@@ -8706,11 +9003,6 @@ int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused,
if (!mlxsw_sp_rif_should_config(rif, dev, event))
goto out;
- err = mlxsw_sp_router_port_check_rif_addr(mlxsw_sp, dev, dev->dev_addr,
- i6vi->extack);
- if (err)
- goto out;
-
err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, i6vi->extack);
out:
mutex_unlock(&mlxsw_sp->router->lock);
@@ -8718,7 +9010,7 @@ out:
}
static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
- const char *mac, int mtu)
+ const char *mac, int mtu, u8 mac_profile)
{
char ritr_pl[MLXSW_REG_RITR_LEN];
int err;
@@ -8730,15 +9022,18 @@ static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
mlxsw_reg_ritr_mtu_set(ritr_pl, mtu);
mlxsw_reg_ritr_if_mac_memcpy_to(ritr_pl, mac);
+ mlxsw_reg_ritr_if_mac_profile_id_set(ritr_pl, mac_profile);
mlxsw_reg_ritr_op_set(ritr_pl, MLXSW_REG_RITR_RIF_CREATE);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
}
static int
mlxsw_sp_router_port_change_event(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_rif *rif)
+ struct mlxsw_sp_rif *rif,
+ struct netlink_ext_ack *extack)
{
struct net_device *dev = rif->dev;
+ u8 old_mac_profile;
u16 fid_index;
int err;
@@ -8748,8 +9043,14 @@ mlxsw_sp_router_port_change_event(struct mlxsw_sp *mlxsw_sp,
if (err)
return err;
+ old_mac_profile = rif->mac_profile_id;
+ err = mlxsw_sp_rif_mac_profile_replace(mlxsw_sp, rif, dev->dev_addr,
+ extack);
+ if (err)
+ goto err_rif_mac_profile_replace;
+
err = mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, dev->dev_addr,
- dev->mtu);
+ dev->mtu, rif->mac_profile_id);
if (err)
goto err_rif_edit;
@@ -8779,8 +9080,11 @@ mlxsw_sp_router_port_change_event(struct mlxsw_sp *mlxsw_sp,
return 0;
err_rif_fdb_op:
- mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu);
+ mlxsw_sp_rif_edit(mlxsw_sp, rif->rif_index, rif->addr, rif->mtu,
+ old_mac_profile);
err_rif_edit:
+ mlxsw_sp_rif_mac_profile_replace(mlxsw_sp, rif, rif->addr, extack);
+err_rif_mac_profile_replace:
mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, true);
return err;
}
@@ -8788,16 +9092,34 @@ err_rif_edit:
static int mlxsw_sp_router_port_pre_changeaddr_event(struct mlxsw_sp_rif *rif,
struct netdev_notifier_pre_changeaddr_info *info)
{
+ struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+ struct mlxsw_sp_rif_mac_profile *profile;
struct netlink_ext_ack *extack;
+ u8 max_rif_mac_profiles;
+ u64 occ;
extack = netdev_notifier_info_to_extack(&info->info);
- return mlxsw_sp_router_port_check_rif_addr(rif->mlxsw_sp, rif->dev,
- info->dev_addr, extack);
+
+ profile = mlxsw_sp_rif_mac_profile_find(mlxsw_sp, info->dev_addr);
+ if (profile)
+ return 0;
+
+ max_rif_mac_profiles = mlxsw_sp->router->max_rif_mac_profile;
+ occ = mlxsw_sp_rif_mac_profiles_occ_get(mlxsw_sp);
+ if (occ < max_rif_mac_profiles)
+ return 0;
+
+ if (!mlxsw_sp_rif_mac_profile_is_shared(rif))
+ return 0;
+
+ NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interface MAC profiles");
+ return -ENOBUFS;
}
int mlxsw_sp_netdevice_router_port_event(struct net_device *dev,
unsigned long event, void *ptr)
{
+ struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr);
struct mlxsw_sp *mlxsw_sp;
struct mlxsw_sp_rif *rif;
int err = 0;
@@ -8814,7 +9136,7 @@ int mlxsw_sp_netdevice_router_port_event(struct net_device *dev,
switch (event) {
case NETDEV_CHANGEMTU:
case NETDEV_CHANGEADDR:
- err = mlxsw_sp_router_port_change_event(mlxsw_sp, rif);
+ err = mlxsw_sp_router_port_change_event(mlxsw_sp, rif, extack);
break;
case NETDEV_PRE_CHANGEADDR:
err = mlxsw_sp_router_port_pre_changeaddr_event(rif, ptr);
@@ -8937,6 +9259,7 @@ static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable)
mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_SP_IF,
rif->rif_index, rif->vr_id, rif->dev->mtu);
mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
+ mlxsw_reg_ritr_if_mac_profile_id_set(ritr_pl, rif->mac_profile_id);
mlxsw_reg_ritr_sp_if_pack(ritr_pl, rif_subport->lag,
rif_subport->lag ? rif_subport->lag_id :
rif_subport->system_port,
@@ -8945,13 +9268,21 @@ static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable)
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
}
-static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif)
+static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif,
+ struct netlink_ext_ack *extack)
{
+ u8 mac_profile;
int err;
- err = mlxsw_sp_rif_subport_op(rif, true);
+ err = mlxsw_sp_rif_mac_profile_get(rif->mlxsw_sp, rif->addr,
+ &mac_profile, extack);
if (err)
return err;
+ rif->mac_profile_id = mac_profile;
+
+ err = mlxsw_sp_rif_subport_op(rif, true);
+ if (err)
+ goto err_rif_subport_op;
err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
mlxsw_sp_fid_index(rif->fid), true);
@@ -8963,6 +9294,8 @@ static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif)
err_rif_fdb_op:
mlxsw_sp_rif_subport_op(rif, false);
+err_rif_subport_op:
+ mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, mac_profile);
return err;
}
@@ -8975,6 +9308,7 @@ static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
mlxsw_sp_fid_index(fid), false);
mlxsw_sp_rif_macvlan_flush(rif);
mlxsw_sp_rif_subport_op(rif, false);
+ mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, rif->mac_profile_id);
}
static struct mlxsw_sp_fid *
@@ -9003,6 +9337,7 @@ static int mlxsw_sp_rif_vlan_fid_op(struct mlxsw_sp_rif *rif,
mlxsw_reg_ritr_pack(ritr_pl, enable, type, rif->rif_index, rif->vr_id,
rif->dev->mtu);
mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
+ mlxsw_reg_ritr_if_mac_profile_id_set(ritr_pl, rif->mac_profile_id);
mlxsw_reg_ritr_fid_set(ritr_pl, type, vid_fid);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
@@ -9013,16 +9348,24 @@ u8 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
return mlxsw_core_max_ports(mlxsw_sp->core) + 1;
}
-static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif)
+static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
u16 fid_index = mlxsw_sp_fid_index(rif->fid);
+ u8 mac_profile;
int err;
+ err = mlxsw_sp_rif_mac_profile_get(mlxsw_sp, rif->addr,
+ &mac_profile, extack);
+ if (err)
+ return err;
+ rif->mac_profile_id = mac_profile;
+
err = mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index,
true);
if (err)
- return err;
+ goto err_rif_vlan_fid_op;
err = mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
mlxsw_sp_router_port(mlxsw_sp), true);
@@ -9050,6 +9393,8 @@ err_fid_bc_flood_set:
mlxsw_sp_router_port(mlxsw_sp), false);
err_fid_mc_flood_set:
mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
+err_rif_vlan_fid_op:
+ mlxsw_sp_rif_mac_profile_put(mlxsw_sp, mac_profile);
return err;
}
@@ -9068,6 +9413,7 @@ static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_MC,
mlxsw_sp_router_port(mlxsw_sp), false);
mlxsw_sp_rif_vlan_fid_op(rif, MLXSW_REG_RITR_FID_IF, fid_index, false);
+ mlxsw_sp_rif_mac_profile_put(rif->mlxsw_sp, rif->mac_profile_id);
}
static struct mlxsw_sp_fid *
@@ -9172,7 +9518,8 @@ mlxsw_sp_rif_ipip_lb_setup(struct mlxsw_sp_rif *rif,
}
static int
-mlxsw_sp1_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif)
+mlxsw_sp1_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev);
@@ -9359,7 +9706,8 @@ out:
}
static int
-mlxsw_sp2_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif)
+mlxsw_sp2_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev);
@@ -9414,6 +9762,13 @@ static const struct mlxsw_sp_rif_ops *mlxsw_sp2_rif_ops_arr[] = {
static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
{
u64 max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
+ struct mlxsw_core *core = mlxsw_sp->core;
+
+ if (!MLXSW_CORE_RES_VALID(core, MAX_RIF_MAC_PROFILES))
+ return -EIO;
+ mlxsw_sp->router->max_rif_mac_profile =
+ MLXSW_CORE_RES_GET(core, MAX_RIF_MAC_PROFILES);
mlxsw_sp->router->rifs = kcalloc(max_rifs,
sizeof(struct mlxsw_sp_rif *),
@@ -9421,16 +9776,28 @@ static int mlxsw_sp_rifs_init(struct mlxsw_sp *mlxsw_sp)
if (!mlxsw_sp->router->rifs)
return -ENOMEM;
+ idr_init(&mlxsw_sp->router->rif_mac_profiles_idr);
+ atomic_set(&mlxsw_sp->router->rif_mac_profiles_count, 0);
+ devlink_resource_occ_get_register(devlink,
+ MLXSW_SP_RESOURCE_RIF_MAC_PROFILES,
+ mlxsw_sp_rif_mac_profiles_occ_get,
+ mlxsw_sp);
+
return 0;
}
static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
int i;
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
WARN_ON_ONCE(mlxsw_sp->router->rifs[i]);
+ devlink_resource_occ_get_unregister(devlink,
+ MLXSW_SP_RESOURCE_RIF_MAC_PROFILES);
+ WARN_ON(!idr_is_empty(&mlxsw_sp->router->rif_mac_profiles_idr));
+ idr_destroy(&mlxsw_sp->router->rif_mac_profiles_idr);
kfree(mlxsw_sp->router->rifs);
}
@@ -9447,7 +9814,6 @@ static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp)
{
int err;
- mlxsw_sp->router->ipip_ops_arr = mlxsw_sp_ipip_ops_arr;
INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list);
err = mlxsw_sp_ipip_ecn_encap_init(mlxsw_sp);
@@ -9460,6 +9826,18 @@ static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp)
return mlxsw_sp_ipip_config_tigcr(mlxsw_sp);
}
+static int mlxsw_sp1_ipips_init(struct mlxsw_sp *mlxsw_sp)
+{
+ mlxsw_sp->router->ipip_ops_arr = mlxsw_sp1_ipip_ops_arr;
+ return mlxsw_sp_ipips_init(mlxsw_sp);
+}
+
+static int mlxsw_sp2_ipips_init(struct mlxsw_sp *mlxsw_sp)
+{
+ mlxsw_sp->router->ipip_ops_arr = mlxsw_sp2_ipip_ops_arr;
+ return mlxsw_sp_ipips_init(mlxsw_sp);
+}
+
static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp)
{
WARN_ON(!list_empty(&mlxsw_sp->router->ipip_list));
@@ -9874,6 +10252,7 @@ static int mlxsw_sp1_router_init(struct mlxsw_sp *mlxsw_sp)
const struct mlxsw_sp_router_ops mlxsw_sp1_router_ops = {
.init = mlxsw_sp1_router_init,
+ .ipips_init = mlxsw_sp1_ipips_init,
};
static int mlxsw_sp2_router_init(struct mlxsw_sp *mlxsw_sp)
@@ -9889,6 +10268,7 @@ static int mlxsw_sp2_router_init(struct mlxsw_sp *mlxsw_sp)
const struct mlxsw_sp_router_ops mlxsw_sp2_router_ops = {
.init = mlxsw_sp2_router_init,
+ .ipips_init = mlxsw_sp2_ipips_init,
};
int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
@@ -9934,7 +10314,7 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_rifs_init;
- err = mlxsw_sp_ipips_init(mlxsw_sp);
+ err = mlxsw_sp->router_ops->ipips_init(mlxsw_sp);
if (err)
goto err_ipips_init;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
index 25d3eae63501..99e8371a82a5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
@@ -39,6 +39,9 @@ mlxsw_sp_fib_entry_op_ctx_clear(struct mlxsw_sp_fib_entry_op_ctx *op_ctx)
struct mlxsw_sp_router {
struct mlxsw_sp *mlxsw_sp;
struct mlxsw_sp_rif **rifs;
+ struct idr rif_mac_profiles_idr;
+ atomic_t rif_mac_profiles_count;
+ u8 max_rif_mac_profile;
struct mlxsw_sp_vr *vrs;
struct rhashtable neigh_ht;
struct rhashtable nexthop_group_ht;
@@ -65,8 +68,6 @@ struct mlxsw_sp_router {
struct notifier_block inet6addr_nb;
const struct mlxsw_sp_rif_ops **rif_ops_arr;
const struct mlxsw_sp_ipip_ops **ipip_ops_arr;
- u32 adj_discard_index;
- bool adj_discard_index_valid;
struct mlxsw_sp_router_nve_decap nve_decap_config;
struct mutex lock; /* Protects shared router resources */
struct work_struct fib_event_work;
@@ -82,6 +83,8 @@ struct mlxsw_sp_router {
struct delayed_work nh_grp_activity_dw;
struct list_head nh_res_grp_list;
bool inc_parsing_depth;
+ refcount_t num_groups;
+ u32 adj_trap_index;
};
struct mlxsw_sp_fib_entry_priv {
@@ -226,6 +229,8 @@ static inline bool mlxsw_sp_l3addr_eq(const union mlxsw_sp_l3addr *addr1,
int mlxsw_sp_ipip_ecn_encap_init(struct mlxsw_sp *mlxsw_sp);
int mlxsw_sp_ipip_ecn_decap_init(struct mlxsw_sp *mlxsw_sp);
+struct net_device *
+mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device *ol_dev);
extern const struct mlxsw_sp_router_ll_ops mlxsw_sp_router_ll_xm_ops;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
index 3398cc01e5ec..f5f819aa9a65 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
@@ -1650,6 +1650,22 @@ void mlxsw_sp_span_trigger_disable(struct mlxsw_sp_port *mlxsw_sp_port,
return trigger_entry->ops->disable(trigger_entry, mlxsw_sp_port, tc);
}
+bool mlxsw_sp_span_trigger_is_ingress(enum mlxsw_sp_span_trigger trigger)
+{
+ switch (trigger) {
+ case MLXSW_SP_SPAN_TRIGGER_INGRESS:
+ case MLXSW_SP_SPAN_TRIGGER_EARLY_DROP:
+ case MLXSW_SP_SPAN_TRIGGER_TAIL_DROP:
+ return true;
+ case MLXSW_SP_SPAN_TRIGGER_EGRESS:
+ case MLXSW_SP_SPAN_TRIGGER_ECN:
+ return false;
+ }
+
+ WARN_ON_ONCE(1);
+ return false;
+}
+
static int mlxsw_sp1_span_init(struct mlxsw_sp *mlxsw_sp)
{
size_t arr_size = ARRAY_SIZE(mlxsw_sp1_span_entry_ops_arr);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
index efaefd1ae863..82e711afb02b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
@@ -120,6 +120,7 @@ int mlxsw_sp_span_trigger_enable(struct mlxsw_sp_port *mlxsw_sp_port,
enum mlxsw_sp_span_trigger trigger, u8 tc);
void mlxsw_sp_span_trigger_disable(struct mlxsw_sp_port *mlxsw_sp_port,
enum mlxsw_sp_span_trigger trigger, u8 tc);
+bool mlxsw_sp_span_trigger_is_ingress(enum mlxsw_sp_span_trigger trigger);
extern const struct mlxsw_sp_span_ops mlxsw_sp1_span_ops;
extern const struct mlxsw_sp_span_ops mlxsw_sp2_span_ops;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 22fede5cb32c..81c7e8a7fcf5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -1635,16 +1635,13 @@ mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp,
u16 fid)
{
struct mlxsw_sp_mid *mid;
- size_t alloc_size;
mid = kzalloc(sizeof(*mid), GFP_KERNEL);
if (!mid)
return NULL;
- alloc_size = sizeof(unsigned long) *
- BITS_TO_LONGS(mlxsw_core_max_ports(mlxsw_sp->core));
-
- mid->ports_in_mid = kzalloc(alloc_size, GFP_KERNEL);
+ mid->ports_in_mid = bitmap_zalloc(mlxsw_core_max_ports(mlxsw_sp->core),
+ GFP_KERNEL);
if (!mid->ports_in_mid)
goto err_ports_in_mid_alloc;
@@ -1663,7 +1660,7 @@ out:
return mid;
err_write_mdb_entry:
- kfree(mid->ports_in_mid);
+ bitmap_free(mid->ports_in_mid);
err_ports_in_mid_alloc:
kfree(mid);
return NULL;
@@ -1680,7 +1677,7 @@ static int mlxsw_sp_port_remove_from_mid(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_core_max_ports(mlxsw_sp->core))) {
err = mlxsw_sp_mc_remove_mdb_entry(mlxsw_sp, mid);
list_del(&mid->list);
- kfree(mid->ports_in_mid);
+ bitmap_free(mid->ports_in_mid);
kfree(mid);
}
return err;
diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c
index b27713906d3a..c11b118dc415 100644
--- a/drivers/net/ethernet/micrel/ks8842.c
+++ b/drivers/net/ethernet/micrel/ks8842.c
@@ -348,13 +348,15 @@ static void ks8842_reset_hw(struct ks8842_adapter *adapter)
ks8842_write16(adapter, 32, 0x1, REG_SW_ID_AND_ENABLE);
}
-static void ks8842_read_mac_addr(struct ks8842_adapter *adapter, u8 *dest)
+static void ks8842_init_mac_addr(struct ks8842_adapter *adapter)
{
+ u8 addr[ETH_ALEN];
int i;
u16 mac;
for (i = 0; i < ETH_ALEN; i++)
- dest[ETH_ALEN - i - 1] = ks8842_read8(adapter, 2, REG_MARL + i);
+ addr[ETH_ALEN - i - 1] = ks8842_read8(adapter, 2, REG_MARL + i);
+ eth_hw_addr_set(adapter->netdev, addr);
if (adapter->conf_flags & MICREL_KS884X) {
/*
@@ -380,7 +382,7 @@ static void ks8842_read_mac_addr(struct ks8842_adapter *adapter, u8 *dest)
}
}
-static void ks8842_write_mac_addr(struct ks8842_adapter *adapter, u8 *mac)
+static void ks8842_write_mac_addr(struct ks8842_adapter *adapter, const u8 *mac)
{
unsigned long flags;
unsigned i;
@@ -1064,7 +1066,7 @@ static int ks8842_set_mac(struct net_device *netdev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(netdev->dev_addr, mac, netdev->addr_len);
+ eth_hw_addr_set(netdev, mac);
ks8842_write_mac_addr(adapter, mac);
return 0;
@@ -1191,12 +1193,11 @@ static int ks8842_probe(struct platform_device *pdev)
if (i < netdev->addr_len)
/* an address was passed, use it */
- memcpy(netdev->dev_addr, pdata->macaddr,
- netdev->addr_len);
+ eth_hw_addr_set(netdev, pdata->macaddr);
}
if (i == netdev->addr_len) {
- ks8842_read_mac_addr(adapter, netdev->dev_addr);
+ ks8842_init_mac_addr(adapter);
if (!is_valid_ether_addr(netdev->dev_addr))
eth_hw_addr_random(netdev);
diff --git a/drivers/net/ethernet/micrel/ks8851.h b/drivers/net/ethernet/micrel/ks8851.h
index e2eb0caeac82..6f34a61739b6 100644
--- a/drivers/net/ethernet/micrel/ks8851.h
+++ b/drivers/net/ethernet/micrel/ks8851.h
@@ -427,7 +427,7 @@ struct ks8851_net {
int ks8851_probe_common(struct net_device *netdev, struct device *dev,
int msg_en);
-int ks8851_remove_common(struct device *dev);
+void ks8851_remove_common(struct device *dev);
int ks8851_suspend(struct device *dev);
int ks8851_resume(struct device *dev);
diff --git a/drivers/net/ethernet/micrel/ks8851_common.c b/drivers/net/ethernet/micrel/ks8851_common.c
index a6db1a8156e1..691206f19ea7 100644
--- a/drivers/net/ethernet/micrel/ks8851_common.c
+++ b/drivers/net/ethernet/micrel/ks8851_common.c
@@ -165,6 +165,7 @@ static void ks8851_read_mac_addr(struct net_device *dev)
{
struct ks8851_net *ks = netdev_priv(dev);
unsigned long flags;
+ u8 addr[ETH_ALEN];
u16 reg;
int i;
@@ -172,9 +173,10 @@ static void ks8851_read_mac_addr(struct net_device *dev)
for (i = 0; i < ETH_ALEN; i += 2) {
reg = ks8851_rdreg16(ks, KS_MAR(i));
- dev->dev_addr[i] = reg >> 8;
- dev->dev_addr[i + 1] = reg & 0xff;
+ addr[i] = reg >> 8;
+ addr[i + 1] = reg & 0xff;
}
+ eth_hw_addr_set(dev, addr);
ks8851_unlock(ks, &flags);
}
@@ -195,7 +197,7 @@ static void ks8851_init_mac(struct ks8851_net *ks, struct device_node *np)
struct net_device *dev = ks->netdev;
int ret;
- ret = of_get_mac_address(np, dev->dev_addr);
+ ret = of_get_ethdev_address(np, dev);
if (!ret) {
ks8851_write_mac_addr(dev);
return;
@@ -672,7 +674,7 @@ static int ks8851_set_mac_address(struct net_device *dev, void *addr)
if (!is_valid_ether_addr(sa->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
+ eth_hw_addr_set(dev, sa->sa_data);
return ks8851_write_mac_addr(dev);
}
@@ -1247,7 +1249,7 @@ err_reg_io:
}
EXPORT_SYMBOL_GPL(ks8851_probe_common);
-int ks8851_remove_common(struct device *dev)
+void ks8851_remove_common(struct device *dev)
{
struct ks8851_net *priv = dev_get_drvdata(dev);
@@ -1261,8 +1263,6 @@ int ks8851_remove_common(struct device *dev)
gpio_set_value(priv->gpio, 0);
regulator_disable(priv->vdd_reg);
regulator_disable(priv->vdd_io);
-
- return 0;
}
EXPORT_SYMBOL_GPL(ks8851_remove_common);
diff --git a/drivers/net/ethernet/micrel/ks8851_par.c b/drivers/net/ethernet/micrel/ks8851_par.c
index 2e8fcce50f9d..2e25798c610e 100644
--- a/drivers/net/ethernet/micrel/ks8851_par.c
+++ b/drivers/net/ethernet/micrel/ks8851_par.c
@@ -327,7 +327,9 @@ static int ks8851_probe_par(struct platform_device *pdev)
static int ks8851_remove_par(struct platform_device *pdev)
{
- return ks8851_remove_common(&pdev->dev);
+ ks8851_remove_common(&pdev->dev);
+
+ return 0;
}
static const struct of_device_id ks8851_match_table[] = {
diff --git a/drivers/net/ethernet/micrel/ks8851_spi.c b/drivers/net/ethernet/micrel/ks8851_spi.c
index 479406ecbaa3..0303e727e99f 100644
--- a/drivers/net/ethernet/micrel/ks8851_spi.c
+++ b/drivers/net/ethernet/micrel/ks8851_spi.c
@@ -454,7 +454,9 @@ static int ks8851_probe_spi(struct spi_device *spi)
static int ks8851_remove_spi(struct spi_device *spi)
{
- return ks8851_remove_common(&spi->dev);
+ ks8851_remove_common(&spi->dev);
+
+ return 0;
}
static const struct of_device_id ks8851_match_table[] = {
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index a0ee155f9f51..99c0c1491af2 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -4033,7 +4033,7 @@ static void hw_set_add_addr(struct ksz_hw *hw)
}
}
-static int hw_add_addr(struct ksz_hw *hw, u8 *mac_addr)
+static int hw_add_addr(struct ksz_hw *hw, const u8 *mac_addr)
{
int i;
int j = ADDITIONAL_ENTRIES;
@@ -4054,7 +4054,7 @@ static int hw_add_addr(struct ksz_hw *hw, u8 *mac_addr)
return -1;
}
-static int hw_del_addr(struct ksz_hw *hw, u8 *mac_addr)
+static int hw_del_addr(struct ksz_hw *hw, const u8 *mac_addr)
{
int i;
@@ -5581,7 +5581,7 @@ static int netdev_set_mac_address(struct net_device *dev, void *addr)
memcpy(hw->override_addr, mac->sa_data, ETH_ALEN);
}
- memcpy(dev->dev_addr, mac->sa_data, ETH_ALEN);
+ eth_hw_addr_set(dev, mac->sa_data);
interrupt = hw_block_intr(hw);
@@ -7005,12 +7005,14 @@ static int pcidev_init(struct pci_dev *pdev, const struct pci_device_id *id)
dev->mem_end = dev->mem_start + reg_len - 1;
dev->irq = pdev->irq;
if (MAIN_PORT == i)
- memcpy(dev->dev_addr, hw_priv->hw.override_addr,
- ETH_ALEN);
+ eth_hw_addr_set(dev, hw_priv->hw.override_addr);
else {
- memcpy(dev->dev_addr, sw->other_addr, ETH_ALEN);
+ u8 addr[ETH_ALEN];
+
+ ether_addr_copy(addr, sw->other_addr);
if (ether_addr_equal(sw->other_addr, hw->override_addr))
- dev->dev_addr[5] += port->first_port;
+ addr[5] += port->first_port;
+ eth_hw_addr_set(dev, addr);
}
dev->netdev_ops = &netdev_ops;
diff --git a/drivers/net/ethernet/microchip/enc28j60.c b/drivers/net/ethernet/microchip/enc28j60.c
index 09cdc2f2e7ff..634ac7649c43 100644
--- a/drivers/net/ethernet/microchip/enc28j60.c
+++ b/drivers/net/ethernet/microchip/enc28j60.c
@@ -517,7 +517,7 @@ static int enc28j60_set_mac_address(struct net_device *dev, void *addr)
if (!is_valid_ether_addr(address->sa_data))
return -EADDRNOTAVAIL;
- ether_addr_copy(dev->dev_addr, address->sa_data);
+ eth_hw_addr_set(dev, address->sa_data);
return enc28j60_set_hw_macaddr(dev);
}
@@ -1539,7 +1539,6 @@ static const struct net_device_ops enc28j60_netdev_ops = {
static int enc28j60_probe(struct spi_device *spi)
{
- unsigned char macaddr[ETH_ALEN];
struct net_device *dev;
struct enc28j60_net *priv;
int ret = 0;
@@ -1572,9 +1571,7 @@ static int enc28j60_probe(struct spi_device *spi)
goto error_irq;
}
- if (device_get_mac_address(&spi->dev, macaddr, sizeof(macaddr)))
- ether_addr_copy(dev->dev_addr, macaddr);
- else
+ if (device_get_ethdev_address(&spi->dev, dev))
eth_hw_addr_random(dev);
enc28j60_set_hw_macaddr(dev);
diff --git a/drivers/net/ethernet/microchip/encx24j600.c b/drivers/net/ethernet/microchip/encx24j600.c
index 0bc6b3176fbf..b90efc80fb59 100644
--- a/drivers/net/ethernet/microchip/encx24j600.c
+++ b/drivers/net/ethernet/microchip/encx24j600.c
@@ -761,7 +761,7 @@ static int encx24j600_set_mac_address(struct net_device *dev, void *addr)
if (!is_valid_ether_addr(address->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, address->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, address->sa_data);
return encx24j600_set_hw_macaddr(dev);
}
@@ -1001,6 +1001,7 @@ static int encx24j600_spi_probe(struct spi_device *spi)
struct net_device *ndev;
struct encx24j600_priv *priv;
u16 eidled;
+ u8 addr[ETH_ALEN];
ndev = alloc_etherdev(sizeof(struct encx24j600_priv));
@@ -1056,7 +1057,8 @@ static int encx24j600_spi_probe(struct spi_device *spi)
}
/* Get the MAC address from the chip */
- encx24j600_hw_get_macaddr(priv, ndev->dev_addr);
+ encx24j600_hw_get_macaddr(priv, addr);
+ eth_hw_addr_set(ndev, addr);
ndev->ethtool_ops = &encx24j600_ethtool_ops;
@@ -1125,4 +1127,3 @@ module_spi_driver(encx24j600_spi_net_driver);
MODULE_DESCRIPTION(DRV_NAME " ethernet driver");
MODULE_AUTHOR("Jon Ringle <jringle@gridpoint.com>");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("spi:" DRV_NAME);
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index 9e8561cdc32a..4fc97823bc84 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -816,7 +816,7 @@ static int lan743x_mac_init(struct lan743x_adapter *adapter)
eth_random_addr(adapter->mac_address);
}
lan743x_mac_set_address(adapter, adapter->mac_address);
- ether_addr_copy(netdev->dev_addr, adapter->mac_address);
+ eth_hw_addr_set(netdev, adapter->mac_address);
return 0;
}
@@ -1743,6 +1743,16 @@ static int lan743x_tx_ring_init(struct lan743x_tx *tx)
ret = -EINVAL;
goto cleanup;
}
+ if (dma_set_mask_and_coherent(&tx->adapter->pdev->dev,
+ DMA_BIT_MASK(64))) {
+ if (dma_set_mask_and_coherent(&tx->adapter->pdev->dev,
+ DMA_BIT_MASK(32))) {
+ dev_warn(&tx->adapter->pdev->dev,
+ "lan743x_: No suitable DMA available\n");
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+ }
ring_allocation_size = ALIGN(tx->ring_size *
sizeof(struct lan743x_tx_descriptor),
PAGE_SIZE);
@@ -1934,7 +1944,8 @@ static void lan743x_rx_update_tail(struct lan743x_rx *rx, int index)
index);
}
-static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index)
+static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index,
+ gfp_t gfp)
{
struct net_device *netdev = rx->adapter->netdev;
struct device *dev = &rx->adapter->pdev->dev;
@@ -1948,7 +1959,7 @@ static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index)
descriptor = &rx->ring_cpu_ptr[index];
buffer_info = &rx->buffer_info[index];
- skb = __netdev_alloc_skb(netdev, buffer_length, GFP_ATOMIC | GFP_DMA);
+ skb = __netdev_alloc_skb(netdev, buffer_length, gfp);
if (!skb)
return -ENOMEM;
dma_ptr = dma_map_single(dev, skb->data, buffer_length, DMA_FROM_DEVICE);
@@ -2110,7 +2121,8 @@ static int lan743x_rx_process_buffer(struct lan743x_rx *rx)
/* save existing skb, allocate new skb and map to dma */
skb = buffer_info->skb;
- if (lan743x_rx_init_ring_element(rx, rx->last_head)) {
+ if (lan743x_rx_init_ring_element(rx, rx->last_head,
+ GFP_ATOMIC | GFP_DMA)) {
/* failed to allocate next skb.
* Memory is very low.
* Drop this packet and reuse buffer.
@@ -2276,6 +2288,16 @@ static int lan743x_rx_ring_init(struct lan743x_rx *rx)
ret = -EINVAL;
goto cleanup;
}
+ if (dma_set_mask_and_coherent(&rx->adapter->pdev->dev,
+ DMA_BIT_MASK(64))) {
+ if (dma_set_mask_and_coherent(&rx->adapter->pdev->dev,
+ DMA_BIT_MASK(32))) {
+ dev_warn(&rx->adapter->pdev->dev,
+ "lan743x_: No suitable DMA available\n");
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+ }
ring_allocation_size = ALIGN(rx->ring_size *
sizeof(struct lan743x_rx_descriptor),
PAGE_SIZE);
@@ -2315,13 +2337,16 @@ static int lan743x_rx_ring_init(struct lan743x_rx *rx)
rx->last_head = 0;
for (index = 0; index < rx->ring_size; index++) {
- ret = lan743x_rx_init_ring_element(rx, index);
+ ret = lan743x_rx_init_ring_element(rx, index, GFP_KERNEL);
if (ret)
goto cleanup;
}
return 0;
cleanup:
+ netif_warn(rx->adapter, ifup, rx->adapter->netdev,
+ "Error allocating memory for LAN743x\n");
+
lan743x_rx_ring_cleanup(rx);
return ret;
}
@@ -2645,7 +2670,7 @@ static int lan743x_netdev_set_mac_address(struct net_device *netdev,
ret = eth_prepare_mac_addr_change(netdev, sock_addr);
if (ret)
return ret;
- ether_addr_copy(netdev->dev_addr, sock_addr->sa_data);
+ eth_hw_addr_set(netdev, sock_addr->sa_data);
lan743x_mac_set_address(adapter, sock_addr->sa_data);
lan743x_rfe_update_mac_address(adapter);
return 0;
@@ -3019,6 +3044,8 @@ static int lan743x_pm_resume(struct device *dev)
if (ret) {
netif_err(adapter, probe, adapter->netdev,
"lan743x_hardware_init returned %d\n", ret);
+ lan743x_pci_cleanup(adapter);
+ return ret;
}
/* open netdev when netdev is at running state while resume.
diff --git a/drivers/net/ethernet/microchip/lan743x_main.h b/drivers/net/ethernet/microchip/lan743x_main.h
index 6080028c1df2..aaf7aaeaba0c 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.h
+++ b/drivers/net/ethernet/microchip/lan743x_main.h
@@ -279,6 +279,7 @@
#define PTP_GENERAL_CONFIG_CLOCK_EVENT_1MS_ (3)
#define PTP_GENERAL_CONFIG_CLOCK_EVENT_10MS_ (4)
#define PTP_GENERAL_CONFIG_CLOCK_EVENT_200MS_ (5)
+#define PTP_GENERAL_CONFIG_CLOCK_EVENT_TOGGLE_ (6)
#define PTP_GENERAL_CONFIG_CLOCK_EVENT_X_SET_(channel, value) \
(((value) & 0x7) << (1 + ((channel) << 2)))
#define PTP_GENERAL_CONFIG_RELOAD_ADD_X_(channel) (BIT((channel) << 2))
@@ -830,7 +831,7 @@ struct lan743x_rx_buffer_info {
unsigned int buffer_length;
};
-#define LAN743X_RX_RING_SIZE (65)
+#define LAN743X_RX_RING_SIZE (128)
#define RX_PROCESS_RESULT_NOTHING_TO_DO (0)
#define RX_PROCESS_RESULT_BUFFER_RECEIVED (1)
diff --git a/drivers/net/ethernet/microchip/lan743x_ptp.c b/drivers/net/ethernet/microchip/lan743x_ptp.c
index ab6d719d40f0..9380e396f648 100644
--- a/drivers/net/ethernet/microchip/lan743x_ptp.c
+++ b/drivers/net/ethernet/microchip/lan743x_ptp.c
@@ -491,9 +491,10 @@ static int lan743x_ptp_perout(struct lan743x_adapter *adapter, int on,
int perout_pin = 0;
unsigned int index = perout_request->index;
struct lan743x_ptp_perout *perout = &ptp->perout[index];
+ int ret = 0;
/* Reject requests with unsupported flags */
- if (perout_request->flags)
+ if (perout_request->flags & ~PTP_PEROUT_DUTY_CYCLE)
return -EOPNOTSUPP;
if (on) {
@@ -518,6 +519,7 @@ static int lan743x_ptp_perout(struct lan743x_adapter *adapter, int on,
netif_warn(adapter, drv, adapter->netdev,
"Failed to reserve event channel %d for PEROUT\n",
index);
+ ret = -EBUSY;
goto failed;
}
@@ -529,6 +531,7 @@ static int lan743x_ptp_perout(struct lan743x_adapter *adapter, int on,
netif_warn(adapter, drv, adapter->netdev,
"Failed to reserve gpio %d for PEROUT\n",
perout_pin);
+ ret = -EBUSY;
goto failed;
}
@@ -540,27 +543,93 @@ static int lan743x_ptp_perout(struct lan743x_adapter *adapter, int on,
period_sec += perout_request->period.nsec / 1000000000;
period_nsec = perout_request->period.nsec % 1000000000;
- if (period_sec == 0) {
- if (period_nsec >= 400000000) {
+ if (perout_request->flags & PTP_PEROUT_DUTY_CYCLE) {
+ struct timespec64 ts_on, ts_period;
+ s64 wf_high, period64, half;
+ s32 reminder;
+
+ ts_on.tv_sec = perout_request->on.sec;
+ ts_on.tv_nsec = perout_request->on.nsec;
+ wf_high = timespec64_to_ns(&ts_on);
+ ts_period.tv_sec = perout_request->period.sec;
+ ts_period.tv_nsec = perout_request->period.nsec;
+ period64 = timespec64_to_ns(&ts_period);
+
+ if (period64 < 200) {
+ netif_warn(adapter, drv, adapter->netdev,
+ "perout period too small, minimum is 200nS\n");
+ ret = -EOPNOTSUPP;
+ goto failed;
+ }
+ if (wf_high >= period64) {
+ netif_warn(adapter, drv, adapter->netdev,
+ "pulse width must be smaller than period\n");
+ ret = -EINVAL;
+ goto failed;
+ }
+
+ /* Check if we can do 50% toggle on an even value of period.
+ * If the period number is odd, then check if the requested
+ * pulse width is the same as one of pre-defined width values.
+ * Otherwise, return failure.
+ */
+ half = div_s64_rem(period64, 2, &reminder);
+ if (!reminder) {
+ if (half == wf_high) {
+ /* It's 50% match. Use the toggle option */
+ pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_TOGGLE_;
+ /* In this case, devide period value by 2 */
+ ts_period = ns_to_timespec64(div_s64(period64, 2));
+ period_sec = ts_period.tv_sec;
+ period_nsec = ts_period.tv_nsec;
+
+ goto program;
+ }
+ }
+ /* if we can't do toggle, then the width option needs to be the exact match */
+ if (wf_high == 200000000) {
pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_200MS_;
- } else if (period_nsec >= 20000000) {
+ } else if (wf_high == 10000000) {
pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_10MS_;
- } else if (period_nsec >= 2000000) {
+ } else if (wf_high == 1000000) {
pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_1MS_;
- } else if (period_nsec >= 200000) {
+ } else if (wf_high == 100000) {
pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_100US_;
- } else if (period_nsec >= 20000) {
+ } else if (wf_high == 10000) {
pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_10US_;
- } else if (period_nsec >= 200) {
+ } else if (wf_high == 100) {
pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_100NS_;
} else {
netif_warn(adapter, drv, adapter->netdev,
- "perout period too small, minimum is 200nS\n");
+ "duty cycle specified is not supported\n");
+ ret = -EOPNOTSUPP;
goto failed;
}
} else {
- pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_200MS_;
+ if (period_sec == 0) {
+ if (period_nsec >= 400000000) {
+ pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_200MS_;
+ } else if (period_nsec >= 20000000) {
+ pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_10MS_;
+ } else if (period_nsec >= 2000000) {
+ pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_1MS_;
+ } else if (period_nsec >= 200000) {
+ pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_100US_;
+ } else if (period_nsec >= 20000) {
+ pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_10US_;
+ } else if (period_nsec >= 200) {
+ pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_100NS_;
+ } else {
+ netif_warn(adapter, drv, adapter->netdev,
+ "perout period too small, minimum is 200nS\n");
+ ret = -EOPNOTSUPP;
+ goto failed;
+ }
+ } else {
+ pulse_width = PTP_GENERAL_CONFIG_CLOCK_EVENT_200MS_;
+ }
}
+program:
/* turn off by setting target far in future */
lan743x_csr_write(adapter,
@@ -599,7 +668,7 @@ static int lan743x_ptp_perout(struct lan743x_adapter *adapter, int on,
failed:
lan743x_ptp_perout_off(adapter, index);
- return -ENODEV;
+ return ret;
}
static int lan743x_ptpci_enable(struct ptp_clock_info *ptpci,
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
index cbece6e9bff2..4625d4fb4cde 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c
@@ -234,8 +234,7 @@ static int sparx5_create_targets(struct sparx5 *sparx5)
}
iomem[idx] = devm_ioremap(sparx5->dev,
iores[idx]->start,
- iores[idx]->end - iores[idx]->start
- + 1);
+ resource_size(iores[idx]));
if (!iomem[idx]) {
dev_err(sparx5->dev, "Unable to get switch registers: %s\n",
iores[idx]->name);
@@ -758,6 +757,7 @@ static int mchp_sparx5_probe(struct platform_device *pdev)
err = dev_err_probe(sparx5->dev, PTR_ERR(serdes),
"port %u: missing serdes\n",
portno);
+ of_node_put(portnp);
goto cleanup_config;
}
config->portno = portno;
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c b/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c
index cb68eaaac881..e042f117dc7a 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_netdev.c
@@ -162,7 +162,7 @@ static int sparx5_set_mac_address(struct net_device *dev, void *p)
sparx5_mact_learn(sparx5, PGID_CPU, addr->sa_data, port->pvid);
/* Record the address */
- ether_addr_copy(dev->dev_addr, addr->sa_data);
+ eth_hw_addr_set(dev, addr->sa_data);
return 0;
}
@@ -200,7 +200,6 @@ struct net_device *sparx5_create_netdev(struct sparx5 *sparx5, u32 portno)
{
struct sparx5_port *spx5_port;
struct net_device *ndev;
- u64 val;
ndev = devm_alloc_etherdev(sparx5->dev, sizeof(struct sparx5_port));
if (!ndev)
@@ -216,8 +215,7 @@ struct net_device *sparx5_create_netdev(struct sparx5 *sparx5, u32 portno)
ndev->netdev_ops = &sparx5_port_netdev_ops;
ndev->ethtool_ops = &sparx5_ethtool_ops;
- val = ether_addr_to_u64(sparx5->base_mac) + portno + 1;
- u64_to_ether_addr(val, ndev->dev_addr);
+ eth_hw_addr_gen(ndev, sparx5->base_mac, portno + 1);
return ndev;
}
diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c b/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c
index af70e2795125..fb74752de0ca 100644
--- a/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c
+++ b/drivers/net/ethernet/microchip/sparx5/sparx5_phylink.c
@@ -92,12 +92,11 @@ static void sparx5_phylink_validate(struct phylink_config *config,
}
break;
default:
- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_zero(supported);
return;
}
- bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
- bitmap_and(state->advertising, state->advertising, mask,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_and(supported, supported, mask);
+ linkmode_and(state->advertising, state->advertising, mask);
}
static void sparx5_phylink_mac_config(struct phylink_config *config,
diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c
index cee75b561f59..c96ac81212f7 100644
--- a/drivers/net/ethernet/microsoft/mana/gdma_main.c
+++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c
@@ -3,6 +3,8 @@
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/utsname.h>
+#include <linux/version.h>
#include "mana.h"
@@ -848,6 +850,15 @@ int mana_gd_verify_vf_version(struct pci_dev *pdev)
req.gd_drv_cap_flags3 = GDMA_DRV_CAP_FLAGS3;
req.gd_drv_cap_flags4 = GDMA_DRV_CAP_FLAGS4;
+ req.drv_ver = 0; /* Unused*/
+ req.os_type = 0x10; /* Linux */
+ req.os_ver_major = LINUX_VERSION_MAJOR;
+ req.os_ver_minor = LINUX_VERSION_PATCHLEVEL;
+ req.os_ver_build = LINUX_VERSION_SUBLEVEL;
+ strscpy(req.os_ver_str1, utsname()->sysname, sizeof(req.os_ver_str1));
+ strscpy(req.os_ver_str2, utsname()->release, sizeof(req.os_ver_str2));
+ strscpy(req.os_ver_str3, utsname()->version, sizeof(req.os_ver_str3));
+
err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
if (err || resp.hdr.status) {
dev_err(gc->dev, "VfVerifyVersionOutput: %d, status=0x%x\n",
@@ -1247,6 +1258,52 @@ static void mana_gd_remove_irqs(struct pci_dev *pdev)
gc->irq_contexts = NULL;
}
+static int mana_gd_setup(struct pci_dev *pdev)
+{
+ struct gdma_context *gc = pci_get_drvdata(pdev);
+ int err;
+
+ mana_gd_init_registers(pdev);
+ mana_smc_init(&gc->shm_channel, gc->dev, gc->shm_base);
+
+ err = mana_gd_setup_irqs(pdev);
+ if (err)
+ return err;
+
+ err = mana_hwc_create_channel(gc);
+ if (err)
+ goto remove_irq;
+
+ err = mana_gd_verify_vf_version(pdev);
+ if (err)
+ goto destroy_hwc;
+
+ err = mana_gd_query_max_resources(pdev);
+ if (err)
+ goto destroy_hwc;
+
+ err = mana_gd_detect_devices(pdev);
+ if (err)
+ goto destroy_hwc;
+
+ return 0;
+
+destroy_hwc:
+ mana_hwc_destroy_channel(gc);
+remove_irq:
+ mana_gd_remove_irqs(pdev);
+ return err;
+}
+
+static void mana_gd_cleanup(struct pci_dev *pdev)
+{
+ struct gdma_context *gc = pci_get_drvdata(pdev);
+
+ mana_hwc_destroy_channel(gc);
+
+ mana_gd_remove_irqs(pdev);
+}
+
static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct gdma_context *gc;
@@ -1276,6 +1333,9 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!gc)
goto release_region;
+ mutex_init(&gc->eq_test_event_mutex);
+ pci_set_drvdata(pdev, gc);
+
bar0_va = pci_iomap(pdev, bar, 0);
if (!bar0_va)
goto free_gc;
@@ -1283,49 +1343,23 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
gc->bar0_va = bar0_va;
gc->dev = &pdev->dev;
- pci_set_drvdata(pdev, gc);
- mana_gd_init_registers(pdev);
-
- mana_smc_init(&gc->shm_channel, gc->dev, gc->shm_base);
-
- err = mana_gd_setup_irqs(pdev);
+ err = mana_gd_setup(pdev);
if (err)
goto unmap_bar;
- mutex_init(&gc->eq_test_event_mutex);
-
- err = mana_hwc_create_channel(gc);
- if (err)
- goto remove_irq;
-
- err = mana_gd_verify_vf_version(pdev);
- if (err)
- goto remove_irq;
-
- err = mana_gd_query_max_resources(pdev);
- if (err)
- goto remove_irq;
-
- err = mana_gd_detect_devices(pdev);
+ err = mana_probe(&gc->mana, false);
if (err)
- goto remove_irq;
-
- err = mana_probe(&gc->mana);
- if (err)
- goto clean_up_gdma;
+ goto cleanup_gd;
return 0;
-clean_up_gdma:
- mana_hwc_destroy_channel(gc);
- vfree(gc->cq_table);
- gc->cq_table = NULL;
-remove_irq:
- mana_gd_remove_irqs(pdev);
+cleanup_gd:
+ mana_gd_cleanup(pdev);
unmap_bar:
pci_iounmap(pdev, bar0_va);
free_gc:
+ pci_set_drvdata(pdev, NULL);
vfree(gc);
release_region:
pci_release_regions(pdev);
@@ -1340,13 +1374,9 @@ static void mana_gd_remove(struct pci_dev *pdev)
{
struct gdma_context *gc = pci_get_drvdata(pdev);
- mana_remove(&gc->mana);
+ mana_remove(&gc->mana, false);
- mana_hwc_destroy_channel(gc);
- vfree(gc->cq_table);
- gc->cq_table = NULL;
-
- mana_gd_remove_irqs(pdev);
+ mana_gd_cleanup(pdev);
pci_iounmap(pdev, gc->bar0_va);
@@ -1357,6 +1387,52 @@ static void mana_gd_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
+/* The 'state' parameter is not used. */
+static int mana_gd_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct gdma_context *gc = pci_get_drvdata(pdev);
+
+ mana_remove(&gc->mana, true);
+
+ mana_gd_cleanup(pdev);
+
+ return 0;
+}
+
+/* In case the NIC hardware stops working, the suspend and resume callbacks will
+ * fail -- if this happens, it's safer to just report an error than try to undo
+ * what has been done.
+ */
+static int mana_gd_resume(struct pci_dev *pdev)
+{
+ struct gdma_context *gc = pci_get_drvdata(pdev);
+ int err;
+
+ err = mana_gd_setup(pdev);
+ if (err)
+ return err;
+
+ err = mana_probe(&gc->mana, true);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+/* Quiesce the device for kexec. This is also called upon reboot/shutdown. */
+static void mana_gd_shutdown(struct pci_dev *pdev)
+{
+ struct gdma_context *gc = pci_get_drvdata(pdev);
+
+ dev_info(&pdev->dev, "Shutdown was calledd\n");
+
+ mana_remove(&gc->mana, true);
+
+ mana_gd_cleanup(pdev);
+
+ pci_disable_device(pdev);
+}
+
#ifndef PCI_VENDOR_ID_MICROSOFT
#define PCI_VENDOR_ID_MICROSOFT 0x1414
#endif
@@ -1371,6 +1447,9 @@ static struct pci_driver mana_driver = {
.id_table = mana_id_table,
.probe = mana_gd_probe,
.remove = mana_gd_remove,
+ .suspend = mana_gd_suspend,
+ .resume = mana_gd_resume,
+ .shutdown = mana_gd_shutdown,
};
module_pci_driver(mana_driver);
diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c
index d5c485a6d284..34b971ff8ef8 100644
--- a/drivers/net/ethernet/microsoft/mana/hw_channel.c
+++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c
@@ -309,9 +309,6 @@ static void mana_hwc_comp_event(void *ctx, struct gdma_queue *q_self)
static void mana_hwc_destroy_cq(struct gdma_context *gc, struct hwc_cq *hwc_cq)
{
- if (!hwc_cq)
- return;
-
kfree(hwc_cq->comp_buf);
if (hwc_cq->gdma_cq)
@@ -363,7 +360,7 @@ static int mana_hwc_create_cq(struct hw_channel_context *hwc, u16 q_depth,
}
hwc_cq->gdma_cq = cq;
- comp_buf = kcalloc(q_depth, sizeof(struct gdma_comp), GFP_KERNEL);
+ comp_buf = kcalloc(q_depth, sizeof(*comp_buf), GFP_KERNEL);
if (!comp_buf) {
err = -ENOMEM;
goto out;
@@ -446,9 +443,6 @@ static void mana_hwc_dealloc_dma_buf(struct hw_channel_context *hwc,
static void mana_hwc_destroy_wq(struct hw_channel_context *hwc,
struct hwc_wq *hwc_wq)
{
- if (!hwc_wq)
- return;
-
mana_hwc_dealloc_dma_buf(hwc, hwc_wq->msg_buf);
if (hwc_wq->gdma_wq)
@@ -580,7 +574,7 @@ static int mana_hwc_test_channel(struct hw_channel_context *hwc, u16 q_depth,
return err;
}
- ctx = kzalloc(q_depth * sizeof(struct hwc_caller_ctx), GFP_KERNEL);
+ ctx = kcalloc(q_depth, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
@@ -621,6 +615,7 @@ static int mana_hwc_establish_channel(struct gdma_context *gc, u16 *q_depth,
*max_req_msg_size = hwc->hwc_init_max_req_msg_size;
*max_resp_msg_size = hwc->hwc_init_max_resp_msg_size;
+ /* Both were set in mana_hwc_init_event_handler(). */
if (WARN_ON(cq->id >= gc->max_num_cqs))
return -EPROTO;
@@ -636,9 +631,6 @@ static int mana_hwc_establish_channel(struct gdma_context *gc, u16 *q_depth,
static int mana_hwc_init_queues(struct hw_channel_context *hwc, u16 q_depth,
u32 max_req_msg_size, u32 max_resp_msg_size)
{
- struct hwc_wq *hwc_rxq = NULL;
- struct hwc_wq *hwc_txq = NULL;
- struct hwc_cq *hwc_cq = NULL;
int err;
err = mana_hwc_init_inflight_msg(hwc, q_depth);
@@ -651,44 +643,32 @@ static int mana_hwc_init_queues(struct hw_channel_context *hwc, u16 q_depth,
err = mana_hwc_create_cq(hwc, q_depth * 2,
mana_hwc_init_event_handler, hwc,
mana_hwc_rx_event_handler, hwc,
- mana_hwc_tx_event_handler, hwc, &hwc_cq);
+ mana_hwc_tx_event_handler, hwc, &hwc->cq);
if (err) {
dev_err(hwc->dev, "Failed to create HWC CQ: %d\n", err);
goto out;
}
- hwc->cq = hwc_cq;
err = mana_hwc_create_wq(hwc, GDMA_RQ, q_depth, max_req_msg_size,
- hwc_cq, &hwc_rxq);
+ hwc->cq, &hwc->rxq);
if (err) {
dev_err(hwc->dev, "Failed to create HWC RQ: %d\n", err);
goto out;
}
- hwc->rxq = hwc_rxq;
err = mana_hwc_create_wq(hwc, GDMA_SQ, q_depth, max_resp_msg_size,
- hwc_cq, &hwc_txq);
+ hwc->cq, &hwc->txq);
if (err) {
dev_err(hwc->dev, "Failed to create HWC SQ: %d\n", err);
goto out;
}
- hwc->txq = hwc_txq;
hwc->num_inflight_msg = q_depth;
hwc->max_req_msg_size = max_req_msg_size;
return 0;
out:
- if (hwc_txq)
- mana_hwc_destroy_wq(hwc, hwc_txq);
-
- if (hwc_rxq)
- mana_hwc_destroy_wq(hwc, hwc_rxq);
-
- if (hwc_cq)
- mana_hwc_destroy_cq(hwc->gdma_dev->gdma_context, hwc_cq);
-
- mana_gd_free_res_map(&hwc->inflight_msg_res);
+ /* mana_hwc_create_channel() will do the cleanup.*/
return err;
}
@@ -716,6 +696,9 @@ int mana_hwc_create_channel(struct gdma_context *gc)
gd->pdid = INVALID_PDID;
gd->doorbell = INVALID_DOORBELL;
+ /* mana_hwc_init_queues() only creates the required data structures,
+ * and doesn't touch the HWC device.
+ */
err = mana_hwc_init_queues(hwc, HW_CHANNEL_VF_BOOTSTRAP_QUEUE_DEPTH,
HW_CHANNEL_MAX_REQUEST_SIZE,
HW_CHANNEL_MAX_RESPONSE_SIZE);
@@ -741,42 +724,50 @@ int mana_hwc_create_channel(struct gdma_context *gc)
return 0;
out:
- kfree(hwc);
+ mana_hwc_destroy_channel(gc);
return err;
}
void mana_hwc_destroy_channel(struct gdma_context *gc)
{
struct hw_channel_context *hwc = gc->hwc.driver_data;
- struct hwc_caller_ctx *ctx;
- mana_smc_teardown_hwc(&gc->shm_channel, false);
+ if (!hwc)
+ return;
+
+ /* gc->max_num_cqs is set in mana_hwc_init_event_handler(). If it's
+ * non-zero, the HWC worked and we should tear down the HWC here.
+ */
+ if (gc->max_num_cqs > 0) {
+ mana_smc_teardown_hwc(&gc->shm_channel, false);
+ gc->max_num_cqs = 0;
+ }
- ctx = hwc->caller_ctx;
- kfree(ctx);
+ kfree(hwc->caller_ctx);
hwc->caller_ctx = NULL;
- mana_hwc_destroy_wq(hwc, hwc->txq);
- hwc->txq = NULL;
+ if (hwc->txq)
+ mana_hwc_destroy_wq(hwc, hwc->txq);
- mana_hwc_destroy_wq(hwc, hwc->rxq);
- hwc->rxq = NULL;
+ if (hwc->rxq)
+ mana_hwc_destroy_wq(hwc, hwc->rxq);
- mana_hwc_destroy_cq(hwc->gdma_dev->gdma_context, hwc->cq);
- hwc->cq = NULL;
+ if (hwc->cq)
+ mana_hwc_destroy_cq(hwc->gdma_dev->gdma_context, hwc->cq);
mana_gd_free_res_map(&hwc->inflight_msg_res);
hwc->num_inflight_msg = 0;
- if (hwc->gdma_dev->pdid != INVALID_PDID) {
- hwc->gdma_dev->doorbell = INVALID_DOORBELL;
- hwc->gdma_dev->pdid = INVALID_PDID;
- }
+ hwc->gdma_dev->doorbell = INVALID_DOORBELL;
+ hwc->gdma_dev->pdid = INVALID_PDID;
kfree(hwc);
gc->hwc.driver_data = NULL;
gc->hwc.gdma_context = NULL;
+
+ vfree(gc->cq_table);
+ gc->cq_table = NULL;
}
int mana_hwc_send_request(struct hw_channel_context *hwc, u32 req_len,
diff --git a/drivers/net/ethernet/microsoft/mana/mana.h b/drivers/net/ethernet/microsoft/mana/mana.h
index fc98a5ba5ed0..d047ee876f12 100644
--- a/drivers/net/ethernet/microsoft/mana/mana.h
+++ b/drivers/net/ethernet/microsoft/mana/mana.h
@@ -374,8 +374,8 @@ int mana_alloc_queues(struct net_device *ndev);
int mana_attach(struct net_device *ndev);
int mana_detach(struct net_device *ndev, bool from_close);
-int mana_probe(struct gdma_dev *gd);
-void mana_remove(struct gdma_dev *gd);
+int mana_probe(struct gdma_dev *gd, bool resuming);
+void mana_remove(struct gdma_dev *gd, bool suspending);
extern const struct ethtool_ops mana_ethtool_ops;
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index 030ae89f3a33..72cbf45c42d8 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -1599,7 +1599,8 @@ static int mana_init_port(struct net_device *ndev)
err = mana_query_vport_cfg(apc, port_idx, &max_txq, &max_rxq,
&num_indirect_entries);
if (err) {
- netdev_err(ndev, "Failed to query info for vPort 0\n");
+ netdev_err(ndev, "Failed to query info for vPort %d\n",
+ port_idx);
goto reset_apc;
}
@@ -1610,7 +1611,7 @@ static int mana_init_port(struct net_device *ndev)
if (apc->num_queues > apc->max_queues)
apc->num_queues = apc->max_queues;
- ether_addr_copy(ndev->dev_addr, apc->mac_addr);
+ eth_hw_addr_set(ndev, apc->mac_addr);
return 0;
@@ -1667,24 +1668,23 @@ int mana_attach(struct net_device *ndev)
if (err)
return err;
- err = mana_alloc_queues(ndev);
- if (err) {
- kfree(apc->rxqs);
- apc->rxqs = NULL;
- return err;
+ if (apc->port_st_save) {
+ err = mana_alloc_queues(ndev);
+ if (err) {
+ mana_cleanup_port_context(apc);
+ return err;
+ }
}
- netif_device_attach(ndev);
-
apc->port_is_up = apc->port_st_save;
/* Ensure port state updated before txq state */
smp_wmb();
- if (apc->port_is_up) {
+ if (apc->port_is_up)
netif_carrier_on(ndev);
- netif_tx_wake_all_queues(ndev);
- }
+
+ netif_device_attach(ndev);
return 0;
}
@@ -1828,11 +1828,12 @@ free_net:
return err;
}
-int mana_probe(struct gdma_dev *gd)
+int mana_probe(struct gdma_dev *gd, bool resuming)
{
struct gdma_context *gc = gd->gdma_context;
+ struct mana_context *ac = gd->driver_data;
struct device *dev = gc->dev;
- struct mana_context *ac;
+ u16 num_ports = 0;
int err;
int i;
@@ -1844,44 +1845,70 @@ int mana_probe(struct gdma_dev *gd)
if (err)
return err;
- ac = kzalloc(sizeof(*ac), GFP_KERNEL);
- if (!ac)
- return -ENOMEM;
+ if (!resuming) {
+ ac = kzalloc(sizeof(*ac), GFP_KERNEL);
+ if (!ac)
+ return -ENOMEM;
- ac->gdma_dev = gd;
- ac->num_ports = 1;
- gd->driver_data = ac;
+ ac->gdma_dev = gd;
+ gd->driver_data = ac;
+ }
err = mana_create_eq(ac);
if (err)
goto out;
err = mana_query_device_cfg(ac, MANA_MAJOR_VERSION, MANA_MINOR_VERSION,
- MANA_MICRO_VERSION, &ac->num_ports);
+ MANA_MICRO_VERSION, &num_ports);
if (err)
goto out;
+ if (!resuming) {
+ ac->num_ports = num_ports;
+ } else {
+ if (ac->num_ports != num_ports) {
+ dev_err(dev, "The number of vPorts changed: %d->%d\n",
+ ac->num_ports, num_ports);
+ err = -EPROTO;
+ goto out;
+ }
+ }
+
+ if (ac->num_ports == 0)
+ dev_err(dev, "Failed to detect any vPort\n");
+
if (ac->num_ports > MAX_PORTS_IN_MANA_DEV)
ac->num_ports = MAX_PORTS_IN_MANA_DEV;
- for (i = 0; i < ac->num_ports; i++) {
- err = mana_probe_port(ac, i, &ac->ports[i]);
- if (err)
- break;
+ if (!resuming) {
+ for (i = 0; i < ac->num_ports; i++) {
+ err = mana_probe_port(ac, i, &ac->ports[i]);
+ if (err)
+ break;
+ }
+ } else {
+ for (i = 0; i < ac->num_ports; i++) {
+ rtnl_lock();
+ err = mana_attach(ac->ports[i]);
+ rtnl_unlock();
+ if (err)
+ break;
+ }
}
out:
if (err)
- mana_remove(gd);
+ mana_remove(gd, false);
return err;
}
-void mana_remove(struct gdma_dev *gd)
+void mana_remove(struct gdma_dev *gd, bool suspending)
{
struct gdma_context *gc = gd->gdma_context;
struct mana_context *ac = gd->driver_data;
struct device *dev = gc->dev;
struct net_device *ndev;
+ int err;
int i;
for (i = 0; i < ac->num_ports; i++) {
@@ -1897,7 +1924,16 @@ void mana_remove(struct gdma_dev *gd)
*/
rtnl_lock();
- mana_detach(ndev, false);
+ err = mana_detach(ndev, false);
+ if (err)
+ netdev_err(ndev, "Failed to detach vPort %d: %d\n",
+ i, err);
+
+ if (suspending) {
+ /* No need to unregister the ndev. */
+ rtnl_unlock();
+ continue;
+ }
unregister_netdevice(ndev);
@@ -1910,6 +1946,10 @@ void mana_remove(struct gdma_dev *gd)
out:
mana_gd_deregister_device(gd);
+
+ if (suspending)
+ return;
+
gd->driver_data = NULL;
gd->gdma_context = NULL;
kfree(ac);
diff --git a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
index 7e74339f39ae..c3c81ae3fafd 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c
@@ -211,9 +211,6 @@ static int mana_set_channels(struct net_device *ndev,
unsigned int old_count = apc->num_queues;
int err, err2;
- if (!apc->port_is_up)
- return -EOPNOTSUPP;
-
err = mana_detach(ndev, false);
if (err) {
netdev_err(ndev, "mana_detach failed: %d\n", err);
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index 49def6934cad..15179b9529e1 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -65,7 +65,7 @@ static int moxart_set_mac_address(struct net_device *ndev, void *addr)
if (!is_valid_ether_addr(address->sa_data))
return -EADDRNOTAVAIL;
- memcpy(ndev->dev_addr, address->sa_data, ndev->addr_len);
+ eth_hw_addr_set(ndev, address->sa_data);
moxart_update_mac_address(ndev);
return 0;
diff --git a/drivers/net/ethernet/mscc/Kconfig b/drivers/net/ethernet/mscc/Kconfig
index b6a73d151dec..8dd8c7f425d2 100644
--- a/drivers/net/ethernet/mscc/Kconfig
+++ b/drivers/net/ethernet/mscc/Kconfig
@@ -28,7 +28,7 @@ config MSCC_OCELOT_SWITCH
depends on BRIDGE || BRIDGE=n
depends on NET_SWITCHDEV
depends on HAS_IOMEM
- depends on OF_NET
+ depends on OF
select MSCC_OCELOT_SWITCH_LIB
select GENERIC_PHY
help
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index a08e4f530c1c..e6c18b598d5c 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -20,11 +20,13 @@ struct ocelot_mact_entry {
enum macaccess_entry_type type;
};
+/* Caller must hold &ocelot->mact_lock */
static inline u32 ocelot_mact_read_macaccess(struct ocelot *ocelot)
{
return ocelot_read(ocelot, ANA_TABLES_MACACCESS);
}
+/* Caller must hold &ocelot->mact_lock */
static inline int ocelot_mact_wait_for_completion(struct ocelot *ocelot)
{
u32 val;
@@ -36,6 +38,7 @@ static inline int ocelot_mact_wait_for_completion(struct ocelot *ocelot)
TABLE_UPDATE_SLEEP_US, TABLE_UPDATE_TIMEOUT_US);
}
+/* Caller must hold &ocelot->mact_lock */
static void ocelot_mact_select(struct ocelot *ocelot,
const unsigned char mac[ETH_ALEN],
unsigned int vid)
@@ -67,6 +70,7 @@ int ocelot_mact_learn(struct ocelot *ocelot, int port,
ANA_TABLES_MACACCESS_ENTRYTYPE(type) |
ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_LEARN);
unsigned int mc_ports;
+ int err;
/* Set MAC_CPU_COPY if the CPU port is used by a multicast entry */
if (type == ENTRYTYPE_MACv4)
@@ -79,18 +83,28 @@ int ocelot_mact_learn(struct ocelot *ocelot, int port,
if (mc_ports & BIT(ocelot->num_phys_ports))
cmd |= ANA_TABLES_MACACCESS_MAC_CPU_COPY;
+ mutex_lock(&ocelot->mact_lock);
+
ocelot_mact_select(ocelot, mac, vid);
/* Issue a write command */
ocelot_write(ocelot, cmd, ANA_TABLES_MACACCESS);
- return ocelot_mact_wait_for_completion(ocelot);
+ err = ocelot_mact_wait_for_completion(ocelot);
+
+ mutex_unlock(&ocelot->mact_lock);
+
+ return err;
}
EXPORT_SYMBOL(ocelot_mact_learn);
int ocelot_mact_forget(struct ocelot *ocelot,
const unsigned char mac[ETH_ALEN], unsigned int vid)
{
+ int err;
+
+ mutex_lock(&ocelot->mact_lock);
+
ocelot_mact_select(ocelot, mac, vid);
/* Issue a forget command */
@@ -98,7 +112,11 @@ int ocelot_mact_forget(struct ocelot *ocelot,
ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_FORGET),
ANA_TABLES_MACACCESS);
- return ocelot_mact_wait_for_completion(ocelot);
+ err = ocelot_mact_wait_for_completion(ocelot);
+
+ mutex_unlock(&ocelot->mact_lock);
+
+ return err;
}
EXPORT_SYMBOL(ocelot_mact_forget);
@@ -114,7 +132,9 @@ static void ocelot_mact_init(struct ocelot *ocelot)
| ANA_AGENCTRL_LEARN_IGNORE_VLAN,
ANA_AGENCTRL);
- /* Clear the MAC table */
+ /* Clear the MAC table. We are not concurrent with anyone, so
+ * holding &ocelot->mact_lock is pointless.
+ */
ocelot_write(ocelot, MACACCESS_CMD_INIT, ANA_TABLES_MACACCESS);
}
@@ -162,48 +182,117 @@ static int ocelot_vlant_set_mask(struct ocelot *ocelot, u16 vid, u32 mask)
return ocelot_vlant_wait_for_completion(ocelot);
}
-static void ocelot_port_set_native_vlan(struct ocelot *ocelot, int port,
- struct ocelot_vlan native_vlan)
+static int ocelot_port_num_untagged_vlans(struct ocelot *ocelot, int port)
{
- struct ocelot_port *ocelot_port = ocelot->ports[port];
- u32 val = 0;
+ struct ocelot_bridge_vlan *vlan;
+ int num_untagged = 0;
- ocelot_port->native_vlan = native_vlan;
+ list_for_each_entry(vlan, &ocelot->vlans, list) {
+ if (!(vlan->portmask & BIT(port)))
+ continue;
- ocelot_rmw_gix(ocelot, REW_PORT_VLAN_CFG_PORT_VID(native_vlan.vid),
- REW_PORT_VLAN_CFG_PORT_VID_M,
- REW_PORT_VLAN_CFG, port);
+ if (vlan->untagged & BIT(port))
+ num_untagged++;
+ }
+
+ return num_untagged;
+}
+
+static int ocelot_port_num_tagged_vlans(struct ocelot *ocelot, int port)
+{
+ struct ocelot_bridge_vlan *vlan;
+ int num_tagged = 0;
+
+ list_for_each_entry(vlan, &ocelot->vlans, list) {
+ if (!(vlan->portmask & BIT(port)))
+ continue;
+
+ if (!(vlan->untagged & BIT(port)))
+ num_tagged++;
+ }
+
+ return num_tagged;
+}
+
+/* We use native VLAN when we have to mix egress-tagged VLANs with exactly
+ * _one_ egress-untagged VLAN (_the_ native VLAN)
+ */
+static bool ocelot_port_uses_native_vlan(struct ocelot *ocelot, int port)
+{
+ return ocelot_port_num_tagged_vlans(ocelot, port) &&
+ ocelot_port_num_untagged_vlans(ocelot, port) == 1;
+}
+
+static struct ocelot_bridge_vlan *
+ocelot_port_find_native_vlan(struct ocelot *ocelot, int port)
+{
+ struct ocelot_bridge_vlan *vlan;
+
+ list_for_each_entry(vlan, &ocelot->vlans, list)
+ if (vlan->portmask & BIT(port) && vlan->untagged & BIT(port))
+ return vlan;
+
+ return NULL;
+}
+
+/* Keep in sync REW_TAG_CFG_TAG_CFG and, if applicable,
+ * REW_PORT_VLAN_CFG_PORT_VID, with the bridge VLAN table and VLAN awareness
+ * state of the port.
+ */
+static void ocelot_port_manage_port_tag(struct ocelot *ocelot, int port)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+ enum ocelot_port_tag_config tag_cfg;
+ bool uses_native_vlan = false;
if (ocelot_port->vlan_aware) {
- if (native_vlan.valid)
- /* Tag all frames except when VID == DEFAULT_VLAN */
- val = REW_TAG_CFG_TAG_CFG(1);
+ uses_native_vlan = ocelot_port_uses_native_vlan(ocelot, port);
+
+ if (uses_native_vlan)
+ tag_cfg = OCELOT_PORT_TAG_NATIVE;
+ else if (ocelot_port_num_untagged_vlans(ocelot, port))
+ tag_cfg = OCELOT_PORT_TAG_DISABLED;
else
- /* Tag all frames */
- val = REW_TAG_CFG_TAG_CFG(3);
+ tag_cfg = OCELOT_PORT_TAG_TRUNK;
} else {
- /* Port tagging disabled. */
- val = REW_TAG_CFG_TAG_CFG(0);
+ tag_cfg = OCELOT_PORT_TAG_DISABLED;
}
- ocelot_rmw_gix(ocelot, val,
+
+ ocelot_rmw_gix(ocelot, REW_TAG_CFG_TAG_CFG(tag_cfg),
REW_TAG_CFG_TAG_CFG_M,
REW_TAG_CFG, port);
+
+ if (uses_native_vlan) {
+ struct ocelot_bridge_vlan *native_vlan;
+
+ /* Not having a native VLAN is impossible, because
+ * ocelot_port_num_untagged_vlans has returned 1.
+ * So there is no use in checking for NULL here.
+ */
+ native_vlan = ocelot_port_find_native_vlan(ocelot, port);
+
+ ocelot_rmw_gix(ocelot,
+ REW_PORT_VLAN_CFG_PORT_VID(native_vlan->vid),
+ REW_PORT_VLAN_CFG_PORT_VID_M,
+ REW_PORT_VLAN_CFG, port);
+ }
}
/* Default vlan to clasify for untagged frames (may be zero) */
static void ocelot_port_set_pvid(struct ocelot *ocelot, int port,
- struct ocelot_vlan pvid_vlan)
+ const struct ocelot_bridge_vlan *pvid_vlan)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
+ u16 pvid = OCELOT_VLAN_UNAWARE_PVID;
u32 val = 0;
ocelot_port->pvid_vlan = pvid_vlan;
- if (!ocelot_port->vlan_aware)
- pvid_vlan.vid = 0;
+ if (ocelot_port->vlan_aware && pvid_vlan)
+ pvid = pvid_vlan->vid;
ocelot_rmw_gix(ocelot,
- ANA_PORT_VLAN_CFG_VLAN_VID(pvid_vlan.vid),
+ ANA_PORT_VLAN_CFG_VLAN_VID(pvid),
ANA_PORT_VLAN_CFG_VLAN_VID_M,
ANA_PORT_VLAN_CFG, port);
@@ -212,7 +301,7 @@ static void ocelot_port_set_pvid(struct ocelot *ocelot, int port,
* classified to VLAN 0, but that is always in our RX filter, so it
* would get accepted were it not for this setting.
*/
- if (!pvid_vlan.valid && ocelot_port->vlan_aware)
+ if (!pvid_vlan && ocelot_port->vlan_aware)
val = ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA |
ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA;
@@ -222,31 +311,90 @@ static void ocelot_port_set_pvid(struct ocelot *ocelot, int port,
ANA_PORT_DROP_CFG, port);
}
-static int ocelot_vlan_member_set(struct ocelot *ocelot, u32 vlan_mask, u16 vid)
+static struct ocelot_bridge_vlan *ocelot_bridge_vlan_find(struct ocelot *ocelot,
+ u16 vid)
{
+ struct ocelot_bridge_vlan *vlan;
+
+ list_for_each_entry(vlan, &ocelot->vlans, list)
+ if (vlan->vid == vid)
+ return vlan;
+
+ return NULL;
+}
+
+static int ocelot_vlan_member_add(struct ocelot *ocelot, int port, u16 vid,
+ bool untagged)
+{
+ struct ocelot_bridge_vlan *vlan = ocelot_bridge_vlan_find(ocelot, vid);
+ unsigned long portmask;
int err;
- err = ocelot_vlant_set_mask(ocelot, vid, vlan_mask);
- if (err)
+ if (vlan) {
+ portmask = vlan->portmask | BIT(port);
+
+ err = ocelot_vlant_set_mask(ocelot, vid, portmask);
+ if (err)
+ return err;
+
+ vlan->portmask = portmask;
+ /* Bridge VLANs can be overwritten with a different
+ * egress-tagging setting, so make sure to override an untagged
+ * with a tagged VID if that's going on.
+ */
+ if (untagged)
+ vlan->untagged |= BIT(port);
+ else
+ vlan->untagged &= ~BIT(port);
+
+ return 0;
+ }
+
+ vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
+ if (!vlan)
+ return -ENOMEM;
+
+ portmask = BIT(port);
+
+ err = ocelot_vlant_set_mask(ocelot, vid, portmask);
+ if (err) {
+ kfree(vlan);
return err;
+ }
- ocelot->vlan_mask[vid] = vlan_mask;
+ vlan->vid = vid;
+ vlan->portmask = portmask;
+ if (untagged)
+ vlan->untagged = BIT(port);
+ INIT_LIST_HEAD(&vlan->list);
+ list_add_tail(&vlan->list, &ocelot->vlans);
return 0;
}
-static int ocelot_vlan_member_add(struct ocelot *ocelot, int port, u16 vid)
-{
- return ocelot_vlan_member_set(ocelot,
- ocelot->vlan_mask[vid] | BIT(port),
- vid);
-}
-
static int ocelot_vlan_member_del(struct ocelot *ocelot, int port, u16 vid)
{
- return ocelot_vlan_member_set(ocelot,
- ocelot->vlan_mask[vid] & ~BIT(port),
- vid);
+ struct ocelot_bridge_vlan *vlan = ocelot_bridge_vlan_find(ocelot, vid);
+ unsigned long portmask;
+ int err;
+
+ if (!vlan)
+ return 0;
+
+ portmask = vlan->portmask & ~BIT(port);
+
+ err = ocelot_vlant_set_mask(ocelot, vid, portmask);
+ if (err)
+ return err;
+
+ vlan->portmask = portmask;
+ if (vlan->portmask)
+ return 0;
+
+ list_del(&vlan->list);
+ kfree(vlan);
+
+ return 0;
}
int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port,
@@ -279,7 +427,7 @@ int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port,
ANA_PORT_VLAN_CFG, port);
ocelot_port_set_pvid(ocelot, port, ocelot_port->pvid_vlan);
- ocelot_port_set_native_vlan(ocelot, port, ocelot_port->native_vlan);
+ ocelot_port_manage_port_tag(ocelot, port);
return 0;
}
@@ -288,14 +436,20 @@ EXPORT_SYMBOL(ocelot_port_vlan_filtering);
int ocelot_vlan_prepare(struct ocelot *ocelot, int port, u16 vid, bool pvid,
bool untagged, struct netlink_ext_ack *extack)
{
- struct ocelot_port *ocelot_port = ocelot->ports[port];
-
- /* Deny changing the native VLAN, but always permit deleting it */
- if (untagged && ocelot_port->native_vlan.vid != vid &&
- ocelot_port->native_vlan.valid) {
- NL_SET_ERR_MSG_MOD(extack,
- "Port already has a native VLAN");
- return -EBUSY;
+ if (untagged) {
+ /* We are adding an egress-tagged VLAN */
+ if (ocelot_port_uses_native_vlan(ocelot, port)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Port with egress-tagged VLANs cannot have more than one egress-untagged (native) VLAN");
+ return -EBUSY;
+ }
+ } else {
+ /* We are adding an egress-tagged VLAN */
+ if (ocelot_port_num_untagged_vlans(ocelot, port) > 1) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Port with more than one egress-untagged VLAN cannot have egress-tagged VLANs");
+ return -EBUSY;
+ }
}
return 0;
@@ -307,27 +461,17 @@ int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid,
{
int err;
- err = ocelot_vlan_member_add(ocelot, port, vid);
+ err = ocelot_vlan_member_add(ocelot, port, vid, untagged);
if (err)
return err;
/* Default ingress vlan classification */
- if (pvid) {
- struct ocelot_vlan pvid_vlan;
-
- pvid_vlan.vid = vid;
- pvid_vlan.valid = true;
- ocelot_port_set_pvid(ocelot, port, pvid_vlan);
- }
+ if (pvid)
+ ocelot_port_set_pvid(ocelot, port,
+ ocelot_bridge_vlan_find(ocelot, vid));
/* Untagged egress vlan clasification */
- if (untagged) {
- struct ocelot_vlan native_vlan;
-
- native_vlan.vid = vid;
- native_vlan.valid = true;
- ocelot_port_set_native_vlan(ocelot, port, native_vlan);
- }
+ ocelot_port_manage_port_tag(ocelot, port);
return 0;
}
@@ -343,18 +487,11 @@ int ocelot_vlan_del(struct ocelot *ocelot, int port, u16 vid)
return err;
/* Ingress */
- if (ocelot_port->pvid_vlan.vid == vid) {
- struct ocelot_vlan pvid_vlan = {0};
-
- ocelot_port_set_pvid(ocelot, port, pvid_vlan);
- }
+ if (ocelot_port->pvid_vlan && ocelot_port->pvid_vlan->vid == vid)
+ ocelot_port_set_pvid(ocelot, port, NULL);
/* Egress */
- if (ocelot_port->native_vlan.vid == vid) {
- struct ocelot_vlan native_vlan = {0};
-
- ocelot_port_set_native_vlan(ocelot, port, native_vlan);
- }
+ ocelot_port_manage_port_tag(ocelot, port);
return 0;
}
@@ -372,13 +509,13 @@ static void ocelot_vlan_init(struct ocelot *ocelot)
/* Configure the port VLAN memberships */
for (vid = 1; vid < VLAN_N_VID; vid++)
- ocelot_vlan_member_set(ocelot, 0, vid);
+ ocelot_vlant_set_mask(ocelot, vid, 0);
/* Because VLAN filtering is enabled, we need VID 0 to get untagged
* traffic. It is added automatically if 8021q module is loaded, but
* we can't rely on it since module may be not loaded.
*/
- ocelot_vlan_member_set(ocelot, all_ports, 0);
+ ocelot_vlant_set_mask(ocelot, OCELOT_VLAN_UNAWARE_PVID, all_ports);
/* Set vlan ingress filter mask to all ports but the CPU port by
* default.
@@ -951,7 +1088,7 @@ void ocelot_port_inject_frame(struct ocelot *ocelot, int port, int grp,
ocelot_ifh_set_bypass(ifh, 1);
ocelot_ifh_set_dest(ifh, BIT_ULL(port));
ocelot_ifh_set_tag_type(ifh, IFH_TAG_TYPE_C);
- ocelot_ifh_set_vid(ifh, skb_vlan_tag_get(skb));
+ ocelot_ifh_set_vlan_tci(ifh, skb_vlan_tag_get(skb));
ocelot_ifh_set_rew_op(ifh, rew_op);
for (i = 0; i < OCELOT_TAG_LEN / 4; i++)
@@ -1053,6 +1190,7 @@ nla_put_failure:
}
EXPORT_SYMBOL(ocelot_port_fdb_do_dump);
+/* Caller must hold &ocelot->mact_lock */
static int ocelot_mact_read(struct ocelot *ocelot, int port, int row, int col,
struct ocelot_mact_entry *entry)
{
@@ -1103,33 +1241,40 @@ static int ocelot_mact_read(struct ocelot *ocelot, int port, int row, int col,
int ocelot_fdb_dump(struct ocelot *ocelot, int port,
dsa_fdb_dump_cb_t *cb, void *data)
{
+ int err = 0;
int i, j;
+ /* We could take the lock just around ocelot_mact_read, but doing so
+ * thousands of times in a row seems rather pointless and inefficient.
+ */
+ mutex_lock(&ocelot->mact_lock);
+
/* Loop through all the mac tables entries. */
for (i = 0; i < ocelot->num_mact_rows; i++) {
for (j = 0; j < 4; j++) {
struct ocelot_mact_entry entry;
bool is_static;
- int ret;
- ret = ocelot_mact_read(ocelot, port, i, j, &entry);
+ err = ocelot_mact_read(ocelot, port, i, j, &entry);
/* If the entry is invalid (wrong port, invalid...),
* skip it.
*/
- if (ret == -EINVAL)
+ if (err == -EINVAL)
continue;
- else if (ret)
- return ret;
+ else if (err)
+ break;
is_static = (entry.type == ENTRYTYPE_LOCKED);
- ret = cb(entry.mac, entry.vid, is_static, data);
- if (ret)
- return ret;
+ err = cb(entry.mac, entry.vid, is_static, data);
+ if (err)
+ break;
}
}
- return 0;
+ mutex_unlock(&ocelot->mact_lock);
+
+ return err;
}
EXPORT_SYMBOL(ocelot_fdb_dump);
@@ -1680,12 +1825,11 @@ void ocelot_port_bridge_leave(struct ocelot *ocelot, int port,
struct net_device *bridge)
{
struct ocelot_port *ocelot_port = ocelot->ports[port];
- struct ocelot_vlan pvid = {0}, native_vlan = {0};
ocelot_port->bridge = NULL;
- ocelot_port_set_pvid(ocelot, port, pvid);
- ocelot_port_set_native_vlan(ocelot, port, native_vlan);
+ ocelot_port_set_pvid(ocelot, port, NULL);
+ ocelot_port_manage_port_tag(ocelot, port);
ocelot_apply_bridge_fwd_mask(ocelot);
}
EXPORT_SYMBOL(ocelot_port_bridge_leave);
@@ -2071,9 +2215,10 @@ static void ocelot_cpu_port_init(struct ocelot *ocelot)
OCELOT_TAG_PREFIX_NONE);
/* Configure the CPU port to be VLAN aware */
- ocelot_write_gix(ocelot, ANA_PORT_VLAN_CFG_VLAN_VID(0) |
- ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA |
- ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1),
+ ocelot_write_gix(ocelot,
+ ANA_PORT_VLAN_CFG_VLAN_VID(OCELOT_VLAN_UNAWARE_PVID) |
+ ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA |
+ ANA_PORT_VLAN_CFG_VLAN_POP_CNT(1),
ANA_PORT_VLAN_CFG, cpu);
}
@@ -2114,6 +2259,7 @@ int ocelot_init(struct ocelot *ocelot)
mutex_init(&ocelot->stats_lock);
mutex_init(&ocelot->ptp_lock);
+ mutex_init(&ocelot->mact_lock);
spin_lock_init(&ocelot->ptp_clock_lock);
spin_lock_init(&ocelot->ts_id_lock);
snprintf(queue_name, sizeof(queue_name), "%s-stats",
@@ -2130,6 +2276,7 @@ int ocelot_init(struct ocelot *ocelot)
INIT_LIST_HEAD(&ocelot->multicast);
INIT_LIST_HEAD(&ocelot->pgids);
+ INIT_LIST_HEAD(&ocelot->vlans);
ocelot_detect_features(ocelot);
ocelot_mact_init(ocelot);
ocelot_vlan_init(ocelot);
diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h
index 1952d6a1b98a..e43da09b8f91 100644
--- a/drivers/net/ethernet/mscc/ocelot.h
+++ b/drivers/net/ethernet/mscc/ocelot.h
@@ -25,6 +25,7 @@
#include "ocelot_rew.h"
#include "ocelot_qs.h"
+#define OCELOT_VLAN_UNAWARE_PVID 0
#define OCELOT_BUFFER_CELL_SZ 60
#define OCELOT_STATS_CHECK_DELAY (2 * HZ)
diff --git a/drivers/net/ethernet/mscc/ocelot_flower.c b/drivers/net/ethernet/mscc/ocelot_flower.c
index 8b843d3c9189..769a8159373e 100644
--- a/drivers/net/ethernet/mscc/ocelot_flower.c
+++ b/drivers/net/ethernet/mscc/ocelot_flower.c
@@ -142,17 +142,77 @@ ocelot_find_vcap_filter_that_points_at(struct ocelot *ocelot, int chain)
return NULL;
}
+static int
+ocelot_flower_parse_ingress_vlan_modify(struct ocelot *ocelot, int port,
+ struct ocelot_vcap_filter *filter,
+ const struct flow_action_entry *a,
+ struct netlink_ext_ack *extack)
+{
+ struct ocelot_port *ocelot_port = ocelot->ports[port];
+
+ if (filter->goto_target != -1) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Last action must be GOTO");
+ return -EOPNOTSUPP;
+ }
+
+ if (!ocelot_port->vlan_aware) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Can only modify VLAN under VLAN aware bridge");
+ return -EOPNOTSUPP;
+ }
+
+ filter->action.vid_replace_ena = true;
+ filter->action.pcp_dei_ena = true;
+ filter->action.vid = a->vlan.vid;
+ filter->action.pcp = a->vlan.prio;
+ filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
+
+ return 0;
+}
+
+static int
+ocelot_flower_parse_egress_vlan_modify(struct ocelot_vcap_filter *filter,
+ const struct flow_action_entry *a,
+ struct netlink_ext_ack *extack)
+{
+ enum ocelot_tag_tpid_sel tpid;
+
+ switch (ntohs(a->vlan.proto)) {
+ case ETH_P_8021Q:
+ tpid = OCELOT_TAG_TPID_SEL_8021Q;
+ break;
+ case ETH_P_8021AD:
+ tpid = OCELOT_TAG_TPID_SEL_8021AD;
+ break;
+ default:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Cannot modify custom TPID");
+ return -EOPNOTSUPP;
+ }
+
+ filter->action.tag_a_tpid_sel = tpid;
+ filter->action.push_outer_tag = OCELOT_ES0_TAG;
+ filter->action.tag_a_vid_sel = OCELOT_ES0_VID_PLUS_CLASSIFIED_VID;
+ filter->action.vid_a_val = a->vlan.vid;
+ filter->action.pcp_a_val = a->vlan.prio;
+ filter->action.tag_a_pcp_sel = OCELOT_ES0_PCP;
+ filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
+
+ return 0;
+}
+
static int ocelot_flower_parse_action(struct ocelot *ocelot, int port,
bool ingress, struct flow_cls_offload *f,
struct ocelot_vcap_filter *filter)
{
- struct ocelot_port *ocelot_port = ocelot->ports[port];
struct netlink_ext_ack *extack = f->common.extack;
bool allow_missing_goto_target = false;
const struct flow_action_entry *a;
enum ocelot_tag_tpid_sel tpid;
int i, chain, egress_port;
u64 rate;
+ int err;
if (!flow_action_basic_hw_stats_check(&f->rule->action,
f->common.extack))
@@ -273,26 +333,20 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port,
filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
break;
case FLOW_ACTION_VLAN_MANGLE:
- if (filter->block_id != VCAP_IS1) {
- NL_SET_ERR_MSG_MOD(extack,
- "VLAN modify action can only be offloaded to VCAP IS1");
- return -EOPNOTSUPP;
- }
- if (filter->goto_target != -1) {
+ if (filter->block_id == VCAP_IS1) {
+ err = ocelot_flower_parse_ingress_vlan_modify(ocelot, port,
+ filter, a,
+ extack);
+ } else if (filter->block_id == VCAP_ES0) {
+ err = ocelot_flower_parse_egress_vlan_modify(filter, a,
+ extack);
+ } else {
NL_SET_ERR_MSG_MOD(extack,
- "Last action must be GOTO");
- return -EOPNOTSUPP;
+ "VLAN modify action can only be offloaded to VCAP IS1 or ES0");
+ err = -EOPNOTSUPP;
}
- if (!ocelot_port->vlan_aware) {
- NL_SET_ERR_MSG_MOD(extack,
- "Can only modify VLAN under VLAN aware bridge");
- return -EOPNOTSUPP;
- }
- filter->action.vid_replace_ena = true;
- filter->action.pcp_dei_ena = true;
- filter->action.vid = a->vlan.vid;
- filter->action.pcp = a->vlan.prio;
- filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
+ if (err)
+ return err;
break;
case FLOW_ACTION_PRIORITY:
if (filter->block_id != VCAP_IS1) {
@@ -340,7 +394,7 @@ static int ocelot_flower_parse_action(struct ocelot *ocelot, int port,
}
filter->action.tag_a_tpid_sel = tpid;
filter->action.push_outer_tag = OCELOT_ES0_TAG;
- filter->action.tag_a_vid_sel = 1;
+ filter->action.tag_a_vid_sel = OCELOT_ES0_VID;
filter->action.vid_a_val = a->vlan.vid;
filter->action.pcp_a_val = a->vlan.prio;
filter->type = OCELOT_VCAP_FILTER_OFFLOAD;
@@ -678,6 +732,31 @@ static int ocelot_vcap_dummy_filter_del(struct ocelot *ocelot,
return 0;
}
+/* If we have an egress VLAN modification rule, we need to actually write the
+ * delta between the input VLAN (from the key) and the output VLAN (from the
+ * action), but the action was parsed first. So we need to patch the delta into
+ * the action here.
+ */
+static int
+ocelot_flower_patch_es0_vlan_modify(struct ocelot_vcap_filter *filter,
+ struct netlink_ext_ack *extack)
+{
+ if (filter->block_id != VCAP_ES0 ||
+ filter->action.tag_a_vid_sel != OCELOT_ES0_VID_PLUS_CLASSIFIED_VID)
+ return 0;
+
+ if (filter->vlan.vid.mask != VLAN_VID_MASK) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "VCAP ES0 VLAN rewriting needs a full VLAN in the key");
+ return -EOPNOTSUPP;
+ }
+
+ filter->action.vid_a_val -= filter->vlan.vid.value;
+ filter->action.vid_a_val &= VLAN_VID_MASK;
+
+ return 0;
+}
+
int ocelot_cls_flower_replace(struct ocelot *ocelot, int port,
struct flow_cls_offload *f, bool ingress)
{
@@ -701,6 +780,12 @@ int ocelot_cls_flower_replace(struct ocelot *ocelot, int port,
return ret;
}
+ ret = ocelot_flower_patch_es0_vlan_modify(filter, extack);
+ if (ret) {
+ kfree(filter);
+ return ret;
+ }
+
/* The non-optional GOTOs for the TCAM skeleton don't need
* to be actually offloaded.
*/
diff --git a/drivers/net/ethernet/mscc/ocelot_mrp.c b/drivers/net/ethernet/mscc/ocelot_mrp.c
index 4b0941f09f71..1fa58546abdc 100644
--- a/drivers/net/ethernet/mscc/ocelot_mrp.c
+++ b/drivers/net/ethernet/mscc/ocelot_mrp.c
@@ -116,16 +116,16 @@ static void ocelot_mrp_save_mac(struct ocelot *ocelot,
struct ocelot_port *port)
{
ocelot_mact_learn(ocelot, PGID_BLACKHOLE, mrp_test_dmac,
- port->pvid_vlan.vid, ENTRYTYPE_LOCKED);
+ OCELOT_VLAN_UNAWARE_PVID, ENTRYTYPE_LOCKED);
ocelot_mact_learn(ocelot, PGID_BLACKHOLE, mrp_control_dmac,
- port->pvid_vlan.vid, ENTRYTYPE_LOCKED);
+ OCELOT_VLAN_UNAWARE_PVID, ENTRYTYPE_LOCKED);
}
static void ocelot_mrp_del_mac(struct ocelot *ocelot,
struct ocelot_port *port)
{
- ocelot_mact_forget(ocelot, mrp_test_dmac, port->pvid_vlan.vid);
- ocelot_mact_forget(ocelot, mrp_control_dmac, port->pvid_vlan.vid);
+ ocelot_mact_forget(ocelot, mrp_test_dmac, OCELOT_VLAN_UNAWARE_PVID);
+ ocelot_mact_forget(ocelot, mrp_control_dmac, OCELOT_VLAN_UNAWARE_PVID);
}
int ocelot_mrp_add(struct ocelot *ocelot, int port,
diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c
index 2545727fd5b2..eaeba60b1bba 100644
--- a/drivers/net/ethernet/mscc/ocelot_net.c
+++ b/drivers/net/ethernet/mscc/ocelot_net.c
@@ -418,7 +418,7 @@ static int ocelot_vlan_vid_del(struct net_device *dev, u16 vid)
* with VLAN filtering feature. We need to keep it to receive
* untagged traffic.
*/
- if (vid == 0)
+ if (vid == OCELOT_VLAN_UNAWARE_PVID)
return 0;
ret = ocelot_vlan_del(ocelot, port, vid);
@@ -553,7 +553,7 @@ static int ocelot_mc_unsync(struct net_device *dev, const unsigned char *addr)
struct ocelot_mact_work_ctx w;
ether_addr_copy(w.forget.addr, addr);
- w.forget.vid = ocelot_port->pvid_vlan.vid;
+ w.forget.vid = OCELOT_VLAN_UNAWARE_PVID;
w.type = OCELOT_MACT_FORGET;
return ocelot_enqueue_mact_action(ocelot, &w);
@@ -567,7 +567,7 @@ static int ocelot_mc_sync(struct net_device *dev, const unsigned char *addr)
struct ocelot_mact_work_ctx w;
ether_addr_copy(w.learn.addr, addr);
- w.learn.vid = ocelot_port->pvid_vlan.vid;
+ w.learn.vid = OCELOT_VLAN_UNAWARE_PVID;
w.learn.pgid = PGID_CPU;
w.learn.entry_type = ENTRYTYPE_LOCKED;
w.type = OCELOT_MACT_LEARN;
@@ -602,11 +602,11 @@ static int ocelot_port_set_mac_address(struct net_device *dev, void *p)
/* Learn the new net device MAC address in the mac table. */
ocelot_mact_learn(ocelot, PGID_CPU, addr->sa_data,
- ocelot_port->pvid_vlan.vid, ENTRYTYPE_LOCKED);
+ OCELOT_VLAN_UNAWARE_PVID, ENTRYTYPE_LOCKED);
/* Then forget the previous one. */
- ocelot_mact_forget(ocelot, dev->dev_addr, ocelot_port->pvid_vlan.vid);
+ ocelot_mact_forget(ocelot, dev->dev_addr, OCELOT_VLAN_UNAWARE_PVID);
- ether_addr_copy(dev->dev_addr, addr->sa_data);
+ eth_hw_addr_set(dev, addr->sa_data);
return 0;
}
@@ -1509,7 +1509,7 @@ static void vsc7514_phylink_validate(struct phylink_config *config,
if (state->interface != PHY_INTERFACE_MODE_NA &&
state->interface != ocelot_port->phy_mode) {
- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_zero(supported);
return;
}
@@ -1528,9 +1528,8 @@ static void vsc7514_phylink_validate(struct phylink_config *config,
phylink_set(mask, 2500baseT_Full);
phylink_set(mask, 2500baseX_Full);
- bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
- bitmap_and(state->advertising, state->advertising, mask,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_and(supported, supported, mask);
+ linkmode_and(state->advertising, state->advertising, mask);
}
static void vsc7514_phylink_mac_config(struct phylink_config *config,
@@ -1705,10 +1704,9 @@ int ocelot_probe_port(struct ocelot *ocelot, int port, struct regmap *target,
NETIF_F_HW_TC;
dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC;
- memcpy(dev->dev_addr, ocelot->base_mac, ETH_ALEN);
- dev->dev_addr[ETH_ALEN - 1] += port;
+ eth_hw_addr_gen(dev, ocelot->base_mac, port);
ocelot_mact_learn(ocelot, PGID_CPU, dev->dev_addr,
- ocelot_port->pvid_vlan.vid, ENTRYTYPE_LOCKED);
+ OCELOT_VLAN_UNAWARE_PVID, ENTRYTYPE_LOCKED);
ocelot_init_port(ocelot, port);
diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
index 291ae6817c26..38103b0255b0 100644
--- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c
+++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c
@@ -969,6 +969,7 @@ static int mscc_ocelot_init_ports(struct platform_device *pdev,
target = ocelot_regmap_init(ocelot, res);
if (IS_ERR(target)) {
err = PTR_ERR(target);
+ of_node_put(portnp);
goto out_teardown;
}
@@ -1134,10 +1135,6 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
if (err)
goto out_put_ports;
- err = devlink_register(devlink);
- if (err)
- goto out_ocelot_deinit;
-
err = mscc_ocelot_init_ports(pdev, ports);
if (err)
goto out_ocelot_devlink_unregister;
@@ -1160,6 +1157,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev)
register_switchdev_blocking_notifier(&ocelot_switchdev_blocking_nb);
of_node_put(ports);
+ devlink_register(devlink);
dev_info(&pdev->dev, "Ocelot switch probed\n");
@@ -1169,8 +1167,6 @@ out_ocelot_release_ports:
mscc_ocelot_release_ports(ocelot);
mscc_ocelot_teardown_devlink_ports(ocelot);
out_ocelot_devlink_unregister:
- devlink_unregister(devlink);
-out_ocelot_deinit:
ocelot_deinit(ocelot);
out_put_ports:
of_node_put(ports);
@@ -1183,11 +1179,11 @@ static int mscc_ocelot_remove(struct platform_device *pdev)
{
struct ocelot *ocelot = platform_get_drvdata(pdev);
+ devlink_unregister(ocelot->devlink);
ocelot_deinit_timestamp(ocelot);
ocelot_devlink_sb_unregister(ocelot);
mscc_ocelot_release_ports(ocelot);
mscc_ocelot_teardown_devlink_ports(ocelot);
- devlink_unregister(ocelot->devlink);
ocelot_deinit(ocelot);
unregister_switchdev_blocking_notifier(&ocelot_switchdev_blocking_nb);
unregister_switchdev_notifier(&ocelot_switchdev_nb);
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index c1a75b08ced7..5736fcdafd7a 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -796,7 +796,8 @@ static int myri10ge_load_firmware(struct myri10ge_priv *mgp, int adopt)
return status;
}
-static int myri10ge_update_mac_address(struct myri10ge_priv *mgp, u8 * addr)
+static int myri10ge_update_mac_address(struct myri10ge_priv *mgp,
+ const u8 * addr)
{
struct myri10ge_cmd cmd;
int status;
@@ -3022,7 +3023,7 @@ static int myri10ge_set_mac_address(struct net_device *dev, void *addr)
}
/* change the dev structure */
- memcpy(dev->dev_addr, sa->sa_data, ETH_ALEN);
+ eth_hw_addr_set(dev, sa->sa_data);
return 0;
}
@@ -3738,7 +3739,6 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct net_device *netdev;
struct myri10ge_priv *mgp;
struct device *dev = &pdev->dev;
- int i;
int status = -ENXIO;
int dac_enabled;
unsigned hdr_offset, ss_offset;
@@ -3828,8 +3828,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (status)
goto abort_with_ioremap;
- for (i = 0; i < ETH_ALEN; i++)
- netdev->dev_addr[i] = mgp->mac_addr[i];
+ eth_hw_addr_set(netdev, mgp->mac_addr);
myri10ge_select_firmware(mgp);
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index 3f982033944b..82a22711ce45 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -809,6 +809,7 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
unsigned long iosize;
void __iomem *ioaddr;
const int pcibar = 1; /* PCI base address register */
+ u8 addr[ETH_ALEN];
int prev_eedata;
u32 tmp;
@@ -859,10 +860,11 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
prev_eedata = eeprom_read(ioaddr, 6);
for (i = 0; i < 3; i++) {
int eedata = eeprom_read(ioaddr, i + 7);
- dev->dev_addr[i*2] = (eedata << 1) + (prev_eedata >> 15);
- dev->dev_addr[i*2+1] = eedata >> 7;
+ addr[i*2] = (eedata << 1) + (prev_eedata >> 15);
+ addr[i*2+1] = eedata >> 7;
prev_eedata = eedata;
}
+ eth_hw_addr_set(dev, addr);
np = netdev_priv(dev);
np->ioaddr = ioaddr;
diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c
index 72794d158871..49ea130c9067 100644
--- a/drivers/net/ethernet/natsemi/ns83820.c
+++ b/drivers/net/ethernet/natsemi/ns83820.c
@@ -1649,9 +1649,11 @@ failed:
return ret;
}
-static void ns83820_getmac(struct ns83820 *dev, u8 *mac)
+static void ns83820_getmac(struct ns83820 *dev, struct net_device *ndev)
{
+ u8 mac[ETH_ALEN];
unsigned i;
+
for (i=0; i<3; i++) {
u32 data;
@@ -1661,9 +1663,10 @@ static void ns83820_getmac(struct ns83820 *dev, u8 *mac)
writel(i*2, dev->base + RFCR);
data = readl(dev->base + RFDR);
- *mac++ = data;
- *mac++ = data >> 8;
+ mac[i * 2] = data;
+ mac[i * 2 + 1] = data >> 8;
}
+ eth_hw_addr_set(ndev, mac);
}
static void ns83820_set_multicast(struct net_device *ndev)
@@ -2136,7 +2139,7 @@ static int ns83820_init_one(struct pci_dev *pci_dev,
/* Disable Wake On Lan */
writel(0, dev->base + WCSR);
- ns83820_getmac(dev, ndev->dev_addr);
+ ns83820_getmac(dev, ndev);
/* Yes, we support dumb IP checksum on transmit */
ndev->features |= NETIF_F_SG;
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index 3b6b2e61139e..d1c32c65db05 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -5202,7 +5202,7 @@ static int s2io_set_mac_addr(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
/* store the MAC address in CAM */
return do_s2io_prog_unicast(dev, dev->dev_addr);
@@ -5217,7 +5217,7 @@ static int s2io_set_mac_addr(struct net_device *dev, void *p)
* as defined in errno.h file on failure.
*/
-static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr)
+static int do_s2io_prog_unicast(struct net_device *dev, const u8 *addr)
{
struct s2io_nic *sp = netdev_priv(dev);
register u64 mac_addr = 0, perm_addr = 0;
@@ -7954,7 +7954,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
/* Set the factory defined MAC address initially */
dev->addr_len = ETH_ALEN;
- memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
+ eth_hw_addr_set(dev, sp->def_mac_addr[0].mac_addr);
/* initialize number of multicast & unicast MAC entries variables */
if (sp->device_type == XFRAME_I_DEVICE) {
diff --git a/drivers/net/ethernet/neterion/s2io.h b/drivers/net/ethernet/neterion/s2io.h
index 5a6032212c19..a4266d1544ab 100644
--- a/drivers/net/ethernet/neterion/s2io.h
+++ b/drivers/net/ethernet/neterion/s2io.h
@@ -1073,7 +1073,7 @@ static void s2io_reset(struct s2io_nic * sp);
static int s2io_poll_msix(struct napi_struct *napi, int budget);
static int s2io_poll_inta(struct napi_struct *napi, int budget);
static void s2io_init_pci(struct s2io_nic * sp);
-static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr);
+static int do_s2io_prog_unicast(struct net_device *dev, const u8 *addr);
static void s2io_alarm_handle(struct timer_list *t);
static irqreturn_t
s2io_msix_ring_handle(int irq, void *dev_id);
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index df4a3f3da83a..1969009a91e7 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -1328,7 +1328,7 @@ static int vxge_set_mac_addr(struct net_device *dev, void *p)
}
if (unlikely(!is_vxge_card_up(vdev))) {
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
return VXGE_HW_OK;
}
@@ -1341,7 +1341,7 @@ static int vxge_set_mac_addr(struct net_device *dev, void *p)
return -EINVAL;
}
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
return status;
}
@@ -4663,7 +4663,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
/* Store the fw version for ethttool option */
strcpy(vdev->fw_version, ll_config->device_hw_info.fw_version.version);
- memcpy(vdev->ndev->dev_addr, (u8 *)vdev->vpaths[0].macaddr, ETH_ALEN);
+ eth_hw_addr_set(vdev->ndev, (u8 *)vdev->vpaths[0].macaddr);
/* Copy the station mac address to the list */
for (i = 0; i < vdev->no_of_vpath; i++) {
diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.c b/drivers/net/ethernet/netronome/nfp/abm/main.c
index 605a1617b195..5d3df28c648f 100644
--- a/drivers/net/ethernet/netronome/nfp/abm/main.c
+++ b/drivers/net/ethernet/netronome/nfp/abm/main.c
@@ -305,7 +305,7 @@ nfp_abm_vnic_set_mac(struct nfp_pf *pf, struct nfp_abm *abm, struct nfp_net *nn,
return;
}
- ether_addr_copy(nn->dp.netdev->dev_addr, mac_addr);
+ eth_hw_addr_set(nn->dp.netdev, mac_addr);
ether_addr_copy(nn->dp.netdev->perm_addr, mac_addr);
}
diff --git a/drivers/net/ethernet/netronome/nfp/abm/qdisc.c b/drivers/net/ethernet/netronome/nfp/abm/qdisc.c
index 2473fb5f75e5..2a5cc64227e9 100644
--- a/drivers/net/ethernet/netronome/nfp/abm/qdisc.c
+++ b/drivers/net/ethernet/netronome/nfp/abm/qdisc.c
@@ -458,7 +458,7 @@ nfp_abm_qdisc_graft(struct nfp_abm_link *alink, u32 handle, u32 child_handle,
static void
nfp_abm_stats_calculate(struct nfp_alink_stats *new,
struct nfp_alink_stats *old,
- struct gnet_stats_basic_packed *bstats,
+ struct gnet_stats_basic_sync *bstats,
struct gnet_stats_queue *qstats)
{
_bstats_update(bstats, new->tx_bytes - old->tx_bytes,
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c
index 11c83a99b014..f469950c7265 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c
@@ -182,15 +182,21 @@ static int
nfp_bpf_check_mtu(struct nfp_app *app, struct net_device *netdev, int new_mtu)
{
struct nfp_net *nn = netdev_priv(netdev);
- unsigned int max_mtu;
+ struct nfp_bpf_vnic *bv;
+ struct bpf_prog *prog;
if (~nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)
return 0;
- max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
- if (new_mtu > max_mtu) {
- nn_info(nn, "BPF offload active, MTU over %u not supported\n",
- max_mtu);
+ if (nn->xdp_hw.prog) {
+ prog = nn->xdp_hw.prog;
+ } else {
+ bv = nn->app_priv;
+ prog = bv->tc_prog;
+ }
+
+ if (nfp_bpf_offload_check_mtu(nn, prog, new_mtu)) {
+ nn_info(nn, "BPF offload active, potential packet access beyond hardware packet boundary");
return -EBUSY;
}
return 0;
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h
index d0e17eebddd9..16841bb750b7 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.h
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h
@@ -560,6 +560,8 @@ bool nfp_is_subprog_start(struct nfp_insn_meta *meta);
void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog);
int nfp_bpf_jit(struct nfp_prog *prog);
bool nfp_bpf_supported_opcode(u8 code);
+bool nfp_bpf_offload_check_mtu(struct nfp_net *nn, struct bpf_prog *prog,
+ unsigned int mtu);
int nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx,
int prev_insn_idx);
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
index 53851853562c..9d97cd281f18 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
@@ -481,19 +481,28 @@ int nfp_bpf_event_output(struct nfp_app_bpf *bpf, const void *data,
return 0;
}
+bool nfp_bpf_offload_check_mtu(struct nfp_net *nn, struct bpf_prog *prog,
+ unsigned int mtu)
+{
+ unsigned int fw_mtu, pkt_off;
+
+ fw_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
+ pkt_off = min(prog->aux->max_pkt_offset, mtu);
+
+ return fw_mtu < pkt_off;
+}
+
static int
nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog,
struct netlink_ext_ack *extack)
{
struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv;
- unsigned int fw_mtu, pkt_off, max_stack, max_prog_len;
+ unsigned int max_stack, max_prog_len;
dma_addr_t dma_addr;
void *img;
int err;
- fw_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
- pkt_off = min(prog->aux->max_pkt_offset, nn->dp.netdev->mtu);
- if (fw_mtu < pkt_off) {
+ if (nfp_bpf_offload_check_mtu(nn, prog, nn->dp.netdev->mtu)) {
NL_SET_ERR_MSG_MOD(extack, "BPF offload not supported with potential packet access beyond HW packet split boundary");
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/netronome/nfp/devlink_param.c b/drivers/net/ethernet/netronome/nfp/devlink_param.c
index 36491835ac65..db297ee4d7ad 100644
--- a/drivers/net/ethernet/netronome/nfp/devlink_param.c
+++ b/drivers/net/ethernet/netronome/nfp/devlink_param.c
@@ -233,13 +233,8 @@ int nfp_devlink_params_register(struct nfp_pf *pf)
if (err <= 0)
return err;
- err = devlink_params_register(devlink, nfp_devlink_params,
- ARRAY_SIZE(nfp_devlink_params));
- if (err)
- return err;
-
- devlink_params_publish(devlink);
- return 0;
+ return devlink_params_register(devlink, nfp_devlink_params,
+ ARRAY_SIZE(nfp_devlink_params));
}
void nfp_devlink_params_unregister(struct nfp_pf *pf)
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index 2a432de11858..a3242b36e216 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -272,7 +272,8 @@ nfp_flower_tun_is_gre(struct flow_rule *rule, int start_idx)
for (act_idx = start_idx + 1; act_idx < num_act; act_idx++)
if (act[act_idx].id == FLOW_ACTION_REDIRECT ||
act[act_idx].id == FLOW_ACTION_MIRRED)
- return netif_is_gretap(act[act_idx].dev);
+ return netif_is_gretap(act[act_idx].dev) ||
+ netif_is_ip6gretap(act[act_idx].dev);
return false;
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
index a2926b1b3cff..784292b16290 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
@@ -703,7 +703,7 @@ nfp_fl_netdev_is_tunnel_type(struct net_device *netdev,
{
if (netif_is_vxlan(netdev))
return tun_type == NFP_FL_TUNNEL_VXLAN;
- if (netif_is_gretap(netdev))
+ if (netif_is_gretap(netdev) || netif_is_ip6gretap(netdev))
return tun_type == NFP_FL_TUNNEL_GRE;
if (netif_is_geneve(netdev))
return tun_type == NFP_FL_TUNNEL_GENEVE;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 64c0ef57ad42..224089d04d98 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -360,7 +360,7 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
/* check if GRE, which has no enc_ports */
- if (!netif_is_gretap(netdev)) {
+ if (!netif_is_gretap(netdev) && !netif_is_ip6gretap(netdev)) {
NL_SET_ERR_MSG_MOD(extack, "unsupported offload: an exact match on L4 destination port is required for non-GRE tunnels");
return -EOPNOTSUPP;
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
index ab70179728f6..dfb4468fe287 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
@@ -837,7 +837,7 @@ nfp_tunnel_put_ipv6_off(struct nfp_app *app, struct nfp_ipv6_addr_entry *entry)
}
static int
-__nfp_tunnel_offload_mac(struct nfp_app *app, u8 *mac, u16 idx, bool del)
+__nfp_tunnel_offload_mac(struct nfp_app *app, const u8 *mac, u16 idx, bool del)
{
struct nfp_tun_mac_addr_offload payload;
@@ -886,7 +886,7 @@ static bool nfp_tunnel_is_mac_idx_global(u16 nfp_mac_idx)
}
static struct nfp_tun_offloaded_mac *
-nfp_tunnel_lookup_offloaded_macs(struct nfp_app *app, u8 *mac)
+nfp_tunnel_lookup_offloaded_macs(struct nfp_app *app, const u8 *mac)
{
struct nfp_flower_priv *priv = app->priv;
@@ -1005,7 +1005,7 @@ err_free_ida:
static int
nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev,
- u8 *mac, bool mod)
+ const u8 *mac, bool mod)
{
struct nfp_flower_priv *priv = app->priv;
struct nfp_flower_repr_priv *repr_priv;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_asm.c b/drivers/net/ethernet/netronome/nfp/nfp_asm.c
index 2643ea5948f4..154399c5453f 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_asm.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_asm.c
@@ -196,7 +196,7 @@ int swreg_to_unrestricted(swreg dst, swreg lreg, swreg rreg,
}
reg->dst_lmextn = swreg_lmextn(dst);
- reg->src_lmextn = swreg_lmextn(lreg) | swreg_lmextn(rreg);
+ reg->src_lmextn = swreg_lmextn(lreg) || swreg_lmextn(rreg);
return 0;
}
@@ -277,7 +277,7 @@ int swreg_to_restricted(swreg dst, swreg lreg, swreg rreg,
}
reg->dst_lmextn = swreg_lmextn(dst);
- reg->src_lmextn = swreg_lmextn(lreg) | swreg_lmextn(rreg);
+ reg->src_lmextn = swreg_lmextn(lreg) || swreg_lmextn(rreg);
return 0;
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 5bfa22accf2c..850bfdf83d0a 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -2067,7 +2067,7 @@ static int nfp_net_poll(struct napi_struct *napi, int budget)
if (napi_complete_done(napi, pkts_polled))
nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry);
- if (r_vec->nfp_net->rx_coalesce_adapt_on) {
+ if (r_vec->nfp_net->rx_coalesce_adapt_on && r_vec->rx_ring) {
struct dim_sample dim_sample = {};
unsigned int start;
u64 pkts, bytes;
@@ -2082,7 +2082,7 @@ static int nfp_net_poll(struct napi_struct *napi, int budget)
net_dim(&r_vec->rx_dim, dim_sample);
}
- if (r_vec->nfp_net->tx_coalesce_adapt_on) {
+ if (r_vec->nfp_net->tx_coalesce_adapt_on && r_vec->tx_ring) {
struct dim_sample dim_sample = {};
unsigned int start;
u64 pkts, bytes;
@@ -3016,10 +3016,8 @@ static void nfp_net_rx_dim_work(struct work_struct *work)
/* copy RX interrupt coalesce parameters */
value = (moder.pkts << 16) | (factor * moder.usec);
- rtnl_lock();
nn_writel(nn, NFP_NET_CFG_RXR_IRQ_MOD(r_vec->rx_ring->idx), value);
(void)nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_IRQMOD);
- rtnl_unlock();
dim->state = DIM_START_MEASURE;
}
@@ -3047,10 +3045,8 @@ static void nfp_net_tx_dim_work(struct work_struct *work)
/* copy TX interrupt coalesce parameters */
value = (moder.pkts << 16) | (factor * moder.usec);
- rtnl_lock();
nn_writel(nn, NFP_NET_CFG_TXR_IRQ_MOD(r_vec->tx_ring->idx), value);
(void)nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_IRQMOD);
- rtnl_unlock();
dim->state = DIM_START_MEASURE;
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
index d10a93801344..751f76cd4f79 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
@@ -55,7 +55,7 @@ nfp_net_get_mac_addr(struct nfp_pf *pf, struct net_device *netdev,
return;
}
- ether_addr_copy(netdev->dev_addr, eth_port->mac_addr);
+ eth_hw_addr_set(netdev, eth_port->mac_addr);
ether_addr_copy(netdev->perm_addr, eth_port->mac_addr);
}
@@ -701,10 +701,6 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
if (err)
goto err_unmap;
- err = devlink_register(devlink);
- if (err)
- goto err_app_clean;
-
err = nfp_shared_buf_register(pf);
if (err)
goto err_devlink_unreg;
@@ -734,6 +730,7 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
goto err_stop_app;
mutex_unlock(&pf->lock);
+ devlink_register(devlink);
return 0;
@@ -751,8 +748,6 @@ err_shared_buf_unreg:
nfp_shared_buf_unregister(pf);
err_devlink_unreg:
cancel_work_sync(&pf->port_refresh_work);
- devlink_unregister(devlink);
-err_app_clean:
nfp_net_pf_app_clean(pf);
err_unmap:
nfp_net_pci_unmap_mem(pf);
@@ -763,6 +758,7 @@ void nfp_net_pci_remove(struct nfp_pf *pf)
{
struct nfp_net *nn, *next;
+ devlink_unregister(priv_to_devlink(pf));
mutex_lock(&pf->lock);
list_for_each_entry_safe(nn, next, &pf->vnics, vnic_list) {
if (!nfp_net_is_data_vnic(nn))
@@ -779,7 +775,6 @@ void nfp_net_pci_remove(struct nfp_pf *pf)
nfp_devlink_params_unregister(pf);
nfp_shared_buf_unregister(pf);
- devlink_unregister(priv_to_devlink(pf));
nfp_net_pf_free_irqs(pf);
nfp_net_pf_app_clean(pf);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
index 3b8e675087de..369f6ae700c7 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
@@ -499,8 +499,7 @@ struct nfp_reprs *nfp_reprs_alloc(unsigned int num_reprs)
{
struct nfp_reprs *reprs;
- reprs = kzalloc(sizeof(*reprs) +
- num_reprs * sizeof(struct net_device *), GFP_KERNEL);
+ reprs = kzalloc(struct_size(reprs, reprs, num_reprs), GFP_KERNEL);
if (!reprs)
return NULL;
reprs->num_reprs = num_reprs;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
index c0e2f4394aef..87f2268b16d6 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
@@ -58,7 +58,7 @@ static void nfp_netvf_get_mac_addr(struct nfp_net *nn)
return;
}
- ether_addr_copy(nn->dp.netdev->dev_addr, mac_addr);
+ eth_hw_addr_set(nn->dp.netdev, mac_addr);
ether_addr_copy(nn->dp.netdev->perm_addr, mac_addr);
}
diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c
index 346145d3180e..cfeb7620ae20 100644
--- a/drivers/net/ethernet/ni/nixge.c
+++ b/drivers/net/ethernet/ni/nixge.c
@@ -1283,7 +1283,7 @@ static int nixge_probe(struct platform_device *pdev)
mac_addr = nixge_get_nvmem_address(&pdev->dev);
if (mac_addr && is_valid_ether_addr(mac_addr)) {
- ether_addr_copy(ndev->dev_addr, mac_addr);
+ eth_hw_addr_set(ndev, mac_addr);
kfree(mac_addr);
} else {
eth_hw_addr_random(ndev);
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index ef3fb4cc90af..9b530d7509a4 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -3175,7 +3175,7 @@ static int nv_set_mac_address(struct net_device *dev, void *addr)
return -EADDRNOTAVAIL;
/* synchronized against open : rtnl_lock() held by caller */
- memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(dev, macaddr->sa_data);
if (netif_running(dev)) {
netif_tx_lock_bh(dev);
@@ -5711,6 +5711,7 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
u32 phystate_orig = 0, phystate;
int phyinitialized = 0;
static int printed_version;
+ u8 mac[ETH_ALEN];
if (!printed_version++)
pr_info("Reverse Engineered nForce ethernet driver. Version %s.\n",
@@ -5884,50 +5885,52 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
txreg = readl(base + NvRegTransmitPoll);
if (id->driver_data & DEV_HAS_CORRECT_MACADDR) {
/* mac address is already in correct order */
- dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff;
- dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff;
- dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff;
- dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff;
- dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff;
- dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff;
+ mac[0] = (np->orig_mac[0] >> 0) & 0xff;
+ mac[1] = (np->orig_mac[0] >> 8) & 0xff;
+ mac[2] = (np->orig_mac[0] >> 16) & 0xff;
+ mac[3] = (np->orig_mac[0] >> 24) & 0xff;
+ mac[4] = (np->orig_mac[1] >> 0) & 0xff;
+ mac[5] = (np->orig_mac[1] >> 8) & 0xff;
} else if (txreg & NVREG_TRANSMITPOLL_MAC_ADDR_REV) {
/* mac address is already in correct order */
- dev->dev_addr[0] = (np->orig_mac[0] >> 0) & 0xff;
- dev->dev_addr[1] = (np->orig_mac[0] >> 8) & 0xff;
- dev->dev_addr[2] = (np->orig_mac[0] >> 16) & 0xff;
- dev->dev_addr[3] = (np->orig_mac[0] >> 24) & 0xff;
- dev->dev_addr[4] = (np->orig_mac[1] >> 0) & 0xff;
- dev->dev_addr[5] = (np->orig_mac[1] >> 8) & 0xff;
+ mac[0] = (np->orig_mac[0] >> 0) & 0xff;
+ mac[1] = (np->orig_mac[0] >> 8) & 0xff;
+ mac[2] = (np->orig_mac[0] >> 16) & 0xff;
+ mac[3] = (np->orig_mac[0] >> 24) & 0xff;
+ mac[4] = (np->orig_mac[1] >> 0) & 0xff;
+ mac[5] = (np->orig_mac[1] >> 8) & 0xff;
/*
* Set orig mac address back to the reversed version.
* This flag will be cleared during low power transition.
* Therefore, we should always put back the reversed address.
*/
- np->orig_mac[0] = (dev->dev_addr[5] << 0) + (dev->dev_addr[4] << 8) +
- (dev->dev_addr[3] << 16) + (dev->dev_addr[2] << 24);
- np->orig_mac[1] = (dev->dev_addr[1] << 0) + (dev->dev_addr[0] << 8);
+ np->orig_mac[0] = (mac[5] << 0) + (mac[4] << 8) +
+ (mac[3] << 16) + (mac[2] << 24);
+ np->orig_mac[1] = (mac[1] << 0) + (mac[0] << 8);
} else {
/* need to reverse mac address to correct order */
- dev->dev_addr[0] = (np->orig_mac[1] >> 8) & 0xff;
- dev->dev_addr[1] = (np->orig_mac[1] >> 0) & 0xff;
- dev->dev_addr[2] = (np->orig_mac[0] >> 24) & 0xff;
- dev->dev_addr[3] = (np->orig_mac[0] >> 16) & 0xff;
- dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff;
- dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff;
+ mac[0] = (np->orig_mac[1] >> 8) & 0xff;
+ mac[1] = (np->orig_mac[1] >> 0) & 0xff;
+ mac[2] = (np->orig_mac[0] >> 24) & 0xff;
+ mac[3] = (np->orig_mac[0] >> 16) & 0xff;
+ mac[4] = (np->orig_mac[0] >> 8) & 0xff;
+ mac[5] = (np->orig_mac[0] >> 0) & 0xff;
writel(txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
dev_dbg(&pci_dev->dev,
"%s: set workaround bit for reversed mac addr\n",
__func__);
}
- if (!is_valid_ether_addr(dev->dev_addr)) {
+ if (is_valid_ether_addr(mac)) {
+ eth_hw_addr_set(dev, mac);
+ } else {
/*
* Bad mac address. At least one bios sets the mac address
* to 01:23:45:67:89:ab
*/
dev_err(&pci_dev->dev,
"Invalid MAC address detected: %pM - Please complain to your hardware vendor.\n",
- dev->dev_addr);
+ mac);
eth_hw_addr_random(dev);
dev_err(&pci_dev->dev,
"Using random MAC address: %pM\n", dev->dev_addr);
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index d29fe562b3de..bc39558fe82b 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -419,7 +419,7 @@ struct netdata_local {
/*
* MAC support functions
*/
-static void __lpc_set_mac(struct netdata_local *pldat, u8 *mac)
+static void __lpc_set_mac(struct netdata_local *pldat, const u8 *mac)
{
u32 tmp;
@@ -1015,9 +1015,6 @@ static int lpc_eth_close(struct net_device *ndev)
napi_disable(&pldat->napi);
netif_stop_queue(ndev);
- if (ndev->phydev)
- phy_stop(ndev->phydev);
-
spin_lock_irqsave(&pldat->lock, flags);
__lpc_eth_reset(pldat);
netif_carrier_off(ndev);
@@ -1025,6 +1022,8 @@ static int lpc_eth_close(struct net_device *ndev)
writel(0, LPC_ENET_MAC2(pldat->net_base));
spin_unlock_irqrestore(&pldat->lock, flags);
+ if (ndev->phydev)
+ phy_stop(ndev->phydev);
clk_disable_unprepare(pldat->clk);
return 0;
@@ -1093,7 +1092,7 @@ static int lpc_set_mac_address(struct net_device *ndev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(ndev->dev_addr, addr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(ndev, addr->sa_data);
spin_lock_irqsave(&pldat->lock, flags);
@@ -1232,6 +1231,7 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
struct net_device *ndev;
dma_addr_t dma_handle;
struct resource *res;
+ u8 addr[ETH_ALEN];
int irq, ret;
/* Setup network interface for RMII or MII mode */
@@ -1347,10 +1347,11 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
pldat->phy_node = of_parse_phandle(np, "phy-handle", 0);
/* Get MAC address from current HW setting (POR state is all zeros) */
- __lpc_get_mac(pldat, ndev->dev_addr);
+ __lpc_get_mac(pldat, addr);
+ eth_hw_addr_set(ndev, addr);
if (!is_valid_ether_addr(ndev->dev_addr)) {
- of_get_mac_address(np, ndev->dev_addr);
+ of_get_ethdev_address(np, ndev);
}
if (!is_valid_ether_addr(ndev->dev_addr))
eth_hw_addr_random(ndev);
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index ec3e558f890e..71d234291fc5 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -2137,7 +2137,7 @@ static int pch_gbe_set_mac(struct net_device *netdev, void *addr)
if (!is_valid_ether_addr(skaddr->sa_data)) {
ret_val = -EADDRNOTAVAIL;
} else {
- memcpy(netdev->dev_addr, skaddr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, skaddr->sa_data);
memcpy(adapter->hw.mac.addr, skaddr->sa_data, netdev->addr_len);
pch_gbe_mac_mar_set(&adapter->hw, adapter->hw.mac.addr, 0);
ret_val = 0;
@@ -2555,7 +2555,7 @@ static int pch_gbe_probe(struct pci_dev *pdev,
goto err_free_adapter;
}
- memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
+ eth_hw_addr_set(netdev, adapter->hw.mac.addr);
if (!is_valid_ether_addr(netdev->dev_addr)) {
/*
* If the MAC is invalid (or just missing), display a warning
diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c
index 1a6336a56d3d..9c408328be0d 100644
--- a/drivers/net/ethernet/packetengines/hamachi.c
+++ b/drivers/net/ethernet/packetengines/hamachi.c
@@ -592,6 +592,7 @@ static int hamachi_init_one(struct pci_dev *pdev,
void *ring_space;
dma_addr_t ring_dma;
int ret = -ENOMEM;
+ u8 addr[ETH_ALEN];
/* when built into the kernel, we only print version if device is found */
#ifndef MODULE
@@ -628,8 +629,8 @@ static int hamachi_init_one(struct pci_dev *pdev,
SET_NETDEV_DEV(dev, &pdev->dev);
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = 1 ? read_eeprom(ioaddr, 4 + i)
- : readb(ioaddr + StationAddr + i);
+ addr[i] = read_eeprom(ioaddr, 4 + i);
+ eth_hw_addr_set(dev, addr);
#if ! defined(final_version)
if (hamachi_debug > 4)
diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c
index f5cd8f51be7c..12105f62cbdd 100644
--- a/drivers/net/ethernet/packetengines/yellowfin.c
+++ b/drivers/net/ethernet/packetengines/yellowfin.c
@@ -384,6 +384,7 @@ static int yellowfin_init_one(struct pci_dev *pdev,
#else
int bar = 1;
#endif
+ u8 addr[ETH_ALEN];
/* when built into the kernel, we only print version if device is found */
#ifndef MODULE
@@ -416,12 +417,13 @@ static int yellowfin_init_one(struct pci_dev *pdev,
if (drv_flags & DontUseEeprom)
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = ioread8(ioaddr + StnAddr + i);
+ addr[i] = ioread8(ioaddr + StnAddr + i);
else {
int ee_offset = (read_eeprom(ioaddr, 6) == 0xff ? 0x100 : 0);
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = read_eeprom(ioaddr, ee_offset + i);
+ addr[i] = read_eeprom(ioaddr, ee_offset + i);
}
+ eth_hw_addr_set(dev, addr);
/* Reset the chip. */
iowrite32(0x80000000, ioaddr + DMACtrl);
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index 7e096b2888b9..f0ace3a0e85c 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -221,7 +221,7 @@ static int pasemi_mac_set_mac_addr(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
adr0 = dev->dev_addr[2] << 24 |
dev->dev_addr[3] << 16 |
@@ -1722,7 +1722,7 @@ pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = -ENODEV;
goto out;
}
- memcpy(dev->dev_addr, mac->mac_addr, sizeof(mac->mac_addr));
+ eth_hw_addr_set(dev, mac->mac_addr);
ret = mac_to_intf(mac);
if (ret < 0) {
diff --git a/drivers/net/ethernet/pensando/ionic/ionic.h b/drivers/net/ethernet/pensando/ionic/ionic.h
index 66204106f83e..5e25411ff02f 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic.h
@@ -19,6 +19,7 @@ struct ionic_lif;
#define PCI_DEVICE_ID_PENSANDO_IONIC_ETH_VF 0x1003
#define DEVCMD_TIMEOUT 10
+#define IONIC_ADMINQ_TIME_SLICE msecs_to_jiffies(100)
#define IONIC_PHC_UPDATE_NS 10000000000 /* 10s in nanoseconds */
#define NORMAL_PPB 1000000000 /* one billion parts per billion */
@@ -69,8 +70,13 @@ struct ionic_admin_ctx {
};
int ionic_adminq_post(struct ionic_lif *lif, struct ionic_admin_ctx *ctx);
-int ionic_adminq_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx, int err);
+int ionic_adminq_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx,
+ const int err, const bool do_msg);
int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx);
+int ionic_adminq_post_wait_nomsg(struct ionic_lif *lif, struct ionic_admin_ctx *ctx);
+void ionic_adminq_netdev_err_print(struct ionic_lif *lif, u8 opcode,
+ u8 status, int err);
+
int ionic_dev_cmd_wait(struct ionic *ionic, unsigned long max_wait);
int ionic_set_dma_mask(struct ionic *ionic);
int ionic_setup(struct ionic *ionic);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c b/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
index 39f59849720d..c58217027564 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c
@@ -143,8 +143,6 @@ void ionic_debugfs_add_qcq(struct ionic_lif *lif, struct ionic_qcq *qcq)
debugfs_create_u32("qid", 0400, q_dentry, &q->hw_index);
debugfs_create_u32("qtype", 0400, q_dentry, &q->hw_type);
debugfs_create_u64("drop", 0400, q_dentry, &q->drop);
- debugfs_create_u64("stop", 0400, q_dentry, &q->stop);
- debugfs_create_u64("wake", 0400, q_dentry, &q->wake);
debugfs_create_file("tail", 0400, q_dentry, q, &q_tail_fops);
debugfs_create_file("head", 0400, q_dentry, q, &q_head_fops);
@@ -228,6 +226,50 @@ static int netdev_show(struct seq_file *seq, void *v)
}
DEFINE_SHOW_ATTRIBUTE(netdev);
+static int lif_filters_show(struct seq_file *seq, void *v)
+{
+ struct ionic_lif *lif = seq->private;
+ struct ionic_rx_filter *f;
+ struct hlist_head *head;
+ struct hlist_node *tmp;
+ unsigned int i;
+
+ seq_puts(seq, "id flow state type filter\n");
+ spin_lock_bh(&lif->rx_filters.lock);
+ for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) {
+ head = &lif->rx_filters.by_id[i];
+ hlist_for_each_entry_safe(f, tmp, head, by_id) {
+ switch (le16_to_cpu(f->cmd.match)) {
+ case IONIC_RX_FILTER_MATCH_VLAN:
+ seq_printf(seq, "0x%04x 0x%08x 0x%02x vlan 0x%04x\n",
+ f->filter_id, f->flow_id, f->state,
+ le16_to_cpu(f->cmd.vlan.vlan));
+ break;
+ case IONIC_RX_FILTER_MATCH_MAC:
+ seq_printf(seq, "0x%04x 0x%08x 0x%02x mac %pM\n",
+ f->filter_id, f->flow_id, f->state,
+ f->cmd.mac.addr);
+ break;
+ case IONIC_RX_FILTER_MATCH_MAC_VLAN:
+ seq_printf(seq, "0x%04x 0x%08x 0x%02x macvl 0x%04x %pM\n",
+ f->filter_id, f->flow_id, f->state,
+ le16_to_cpu(f->cmd.vlan.vlan),
+ f->cmd.mac.addr);
+ break;
+ case IONIC_RX_FILTER_STEER_PKTCLASS:
+ seq_printf(seq, "0x%04x 0x%08x 0x%02x rxstr 0x%llx\n",
+ f->filter_id, f->flow_id, f->state,
+ le64_to_cpu(f->cmd.pkt_class));
+ break;
+ }
+ }
+ }
+ spin_unlock_bh(&lif->rx_filters.lock);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(lif_filters);
+
void ionic_debugfs_add_lif(struct ionic_lif *lif)
{
struct dentry *lif_dentry;
@@ -239,6 +281,8 @@ void ionic_debugfs_add_lif(struct ionic_lif *lif)
debugfs_create_file("netdev", 0400, lif->dentry,
lif->netdev, &netdev_fops);
+ debugfs_create_file("filters", 0400, lif->dentry,
+ lif, &lif_filters_fops);
}
void ionic_debugfs_del_lif(struct ionic_lif *lif)
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
index 0d6858ab511c..d57e80d44c9d 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
@@ -581,7 +581,6 @@ unsigned int ionic_cq_service(struct ionic_cq *cq, unsigned int work_to_do,
cq->done_color = !cq->done_color;
cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1);
cq_info = &cq->info[cq->tail_idx];
- DEBUG_STATS_CQE_CNT(cq);
if (++work_done >= work_to_do)
break;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
index 8311086fb1f4..e5acf3bd62b2 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
@@ -220,9 +220,6 @@ struct ionic_queue {
unsigned int num_descs;
unsigned int max_sg_elems;
u64 features;
- u64 dbell_count;
- u64 stop;
- u64 wake;
u64 drop;
struct ionic_dev *idev;
unsigned int type;
@@ -269,7 +266,6 @@ struct ionic_cq {
bool done_color;
unsigned int num_descs;
unsigned int desc_size;
- u64 compl_count;
void *base;
dma_addr_t base_pa;
} ____cacheline_aligned_in_smp;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_devlink.c b/drivers/net/ethernet/pensando/ionic/ionic_devlink.c
index c7d0e195d176..4297ed9024c0 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_devlink.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_devlink.c
@@ -82,22 +82,16 @@ int ionic_devlink_register(struct ionic *ionic)
struct devlink_port_attrs attrs = {};
int err;
- err = devlink_register(dl);
- if (err) {
- dev_warn(ionic->dev, "devlink_register failed: %d\n", err);
- return err;
- }
-
attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
devlink_port_attrs_set(&ionic->dl_port, &attrs);
err = devlink_port_register(dl, &ionic->dl_port, 0);
if (err) {
dev_err(ionic->dev, "devlink_port_register failed: %d\n", err);
- devlink_unregister(dl);
return err;
}
devlink_port_type_eth_set(&ionic->dl_port, ionic->lif->netdev);
+ devlink_register(dl);
return 0;
}
@@ -105,6 +99,6 @@ void ionic_devlink_unregister(struct ionic *ionic)
{
struct devlink *dl = priv_to_devlink(ionic);
- devlink_port_unregister(&ionic->dl_port);
devlink_unregister(dl);
+ devlink_port_unregister(&ionic->dl_port);
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
index 3de1a03839e2..c54d735b9e2e 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c
@@ -11,13 +11,6 @@
#include "ionic_ethtool.h"
#include "ionic_stats.h"
-static const char ionic_priv_flags_strings[][ETH_GSTRING_LEN] = {
-#define IONIC_PRIV_F_SW_DBG_STATS BIT(0)
- "sw-dbg-stats",
-};
-
-#define IONIC_PRIV_FLAGS_COUNT ARRAY_SIZE(ionic_priv_flags_strings)
-
static void ionic_get_stats_strings(struct ionic_lif *lif, u8 *buf)
{
u32 i;
@@ -59,9 +52,6 @@ static int ionic_get_sset_count(struct net_device *netdev, int sset)
case ETH_SS_STATS:
count = ionic_get_stats_count(lif);
break;
- case ETH_SS_PRIV_FLAGS:
- count = IONIC_PRIV_FLAGS_COUNT;
- break;
}
return count;
}
@@ -75,10 +65,6 @@ static void ionic_get_strings(struct net_device *netdev,
case ETH_SS_STATS:
ionic_get_stats_strings(lif, buf);
break;
- case ETH_SS_PRIV_FLAGS:
- memcpy(buf, ionic_priv_flags_strings,
- IONIC_PRIV_FLAGS_COUNT * ETH_GSTRING_LEN);
- break;
}
}
@@ -228,8 +214,7 @@ static int ionic_get_link_ksettings(struct net_device *netdev,
break;
}
- bitmap_copy(ks->link_modes.advertising, ks->link_modes.supported,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_copy(ks->link_modes.advertising, ks->link_modes.supported);
ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER);
ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
@@ -691,28 +676,6 @@ static int ionic_set_channels(struct net_device *netdev,
return err;
}
-static u32 ionic_get_priv_flags(struct net_device *netdev)
-{
- struct ionic_lif *lif = netdev_priv(netdev);
- u32 priv_flags = 0;
-
- if (test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state))
- priv_flags |= IONIC_PRIV_F_SW_DBG_STATS;
-
- return priv_flags;
-}
-
-static int ionic_set_priv_flags(struct net_device *netdev, u32 priv_flags)
-{
- struct ionic_lif *lif = netdev_priv(netdev);
-
- clear_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state);
- if (priv_flags & IONIC_PRIV_F_SW_DBG_STATS)
- set_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state);
-
- return 0;
-}
-
static int ionic_get_rxnfc(struct net_device *netdev,
struct ethtool_rxnfc *info, u32 *rules)
{
@@ -1013,8 +976,6 @@ static const struct ethtool_ops ionic_ethtool_ops = {
.get_strings = ionic_get_strings,
.get_ethtool_stats = ionic_get_stats,
.get_sset_count = ionic_get_sset_count,
- .get_priv_flags = ionic_get_priv_flags,
- .set_priv_flags = ionic_set_priv_flags,
.get_rxnfc = ionic_get_rxnfc,
.get_rxfh_indir_size = ionic_get_rxfh_indir_size,
.get_rxfh_key_size = ionic_get_rxfh_key_size,
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index 7f3322ce044c..63f8a8163b5f 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -287,11 +287,9 @@ static int ionic_qcq_enable(struct ionic_qcq *qcq)
return ionic_adminq_post_wait(lif, &ctx);
}
-static int ionic_qcq_disable(struct ionic_qcq *qcq, bool send_to_hw)
+static int ionic_qcq_disable(struct ionic_lif *lif, struct ionic_qcq *qcq, int fw_err)
{
struct ionic_queue *q;
- struct ionic_lif *lif;
- int err = 0;
struct ionic_admin_ctx ctx = {
.work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
@@ -301,11 +299,12 @@ static int ionic_qcq_disable(struct ionic_qcq *qcq, bool send_to_hw)
},
};
- if (!qcq)
+ if (!qcq) {
+ netdev_err(lif->netdev, "%s: bad qcq\n", __func__);
return -ENXIO;
+ }
q = &qcq->q;
- lif = q->lif;
if (qcq->flags & IONIC_QCQ_F_INTR) {
struct ionic_dev *idev = &lif->ionic->idev;
@@ -318,17 +317,19 @@ static int ionic_qcq_disable(struct ionic_qcq *qcq, bool send_to_hw)
napi_disable(&qcq->napi);
}
- if (send_to_hw) {
- ctx.cmd.q_control.lif_index = cpu_to_le16(lif->index);
- ctx.cmd.q_control.type = q->type;
- ctx.cmd.q_control.index = cpu_to_le32(q->index);
- dev_dbg(lif->ionic->dev, "q_disable.index %d q_disable.qtype %d\n",
- ctx.cmd.q_control.index, ctx.cmd.q_control.type);
+ /* If there was a previous fw communcation error, don't bother with
+ * sending the adminq command and just return the same error value.
+ */
+ if (fw_err == -ETIMEDOUT || fw_err == -ENXIO)
+ return fw_err;
- err = ionic_adminq_post_wait(lif, &ctx);
- }
+ ctx.cmd.q_control.lif_index = cpu_to_le16(lif->index);
+ ctx.cmd.q_control.type = q->type;
+ ctx.cmd.q_control.index = cpu_to_le32(q->index);
+ dev_dbg(lif->ionic->dev, "q_disable.index %d q_disable.qtype %d\n",
+ ctx.cmd.q_control.index, ctx.cmd.q_control.type);
- return err;
+ return ionic_adminq_post_wait(lif, &ctx);
}
static void ionic_lif_qcq_deinit(struct ionic_lif *lif, struct ionic_qcq *qcq)
@@ -1241,137 +1242,6 @@ void ionic_get_stats64(struct net_device *netdev,
ns->tx_errors = ns->tx_aborted_errors;
}
-int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr)
-{
- struct ionic_admin_ctx ctx = {
- .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
- .cmd.rx_filter_add = {
- .opcode = IONIC_CMD_RX_FILTER_ADD,
- .lif_index = cpu_to_le16(lif->index),
- .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC),
- },
- };
- int nfilters = le32_to_cpu(lif->identity->eth.max_ucast_filters);
- bool mc = is_multicast_ether_addr(addr);
- struct ionic_rx_filter *f;
- int err = 0;
-
- memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, ETH_ALEN);
-
- spin_lock_bh(&lif->rx_filters.lock);
- f = ionic_rx_filter_by_addr(lif, addr);
- if (f) {
- /* don't bother if we already have it and it is sync'd */
- if (f->state == IONIC_FILTER_STATE_SYNCED) {
- spin_unlock_bh(&lif->rx_filters.lock);
- return 0;
- }
-
- /* mark preemptively as sync'd to block any parallel attempts */
- f->state = IONIC_FILTER_STATE_SYNCED;
- } else {
- /* save as SYNCED to catch any DEL requests while processing */
- err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx,
- IONIC_FILTER_STATE_SYNCED);
- }
- spin_unlock_bh(&lif->rx_filters.lock);
- if (err)
- return err;
-
- netdev_dbg(lif->netdev, "rx_filter add ADDR %pM\n", addr);
-
- /* Don't bother with the write to FW if we know there's no room,
- * we can try again on the next sync attempt.
- */
- if ((lif->nucast + lif->nmcast) >= nfilters)
- err = -ENOSPC;
- else
- err = ionic_adminq_post_wait(lif, &ctx);
-
- spin_lock_bh(&lif->rx_filters.lock);
- if (err && err != -EEXIST) {
- /* set the state back to NEW so we can try again later */
- f = ionic_rx_filter_by_addr(lif, addr);
- if (f && f->state == IONIC_FILTER_STATE_SYNCED) {
- f->state = IONIC_FILTER_STATE_NEW;
- set_bit(IONIC_LIF_F_FILTER_SYNC_NEEDED, lif->state);
- }
-
- spin_unlock_bh(&lif->rx_filters.lock);
-
- if (err == -ENOSPC)
- return 0;
- else
- return err;
- }
-
- if (mc)
- lif->nmcast++;
- else
- lif->nucast++;
-
- f = ionic_rx_filter_by_addr(lif, addr);
- if (f && f->state == IONIC_FILTER_STATE_OLD) {
- /* Someone requested a delete while we were adding
- * so update the filter info with the results from the add
- * and the data will be there for the delete on the next
- * sync cycle.
- */
- err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx,
- IONIC_FILTER_STATE_OLD);
- } else {
- err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx,
- IONIC_FILTER_STATE_SYNCED);
- }
-
- spin_unlock_bh(&lif->rx_filters.lock);
-
- return err;
-}
-
-int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr)
-{
- struct ionic_admin_ctx ctx = {
- .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
- .cmd.rx_filter_del = {
- .opcode = IONIC_CMD_RX_FILTER_DEL,
- .lif_index = cpu_to_le16(lif->index),
- },
- };
- struct ionic_rx_filter *f;
- int state;
- int err;
-
- spin_lock_bh(&lif->rx_filters.lock);
- f = ionic_rx_filter_by_addr(lif, addr);
- if (!f) {
- spin_unlock_bh(&lif->rx_filters.lock);
- return -ENOENT;
- }
-
- netdev_dbg(lif->netdev, "rx_filter del ADDR %pM (id %d)\n",
- addr, f->filter_id);
-
- state = f->state;
- ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id);
- ionic_rx_filter_free(lif, f);
-
- if (is_multicast_ether_addr(addr) && lif->nmcast)
- lif->nmcast--;
- else if (!is_multicast_ether_addr(addr) && lif->nucast)
- lif->nucast--;
-
- spin_unlock_bh(&lif->rx_filters.lock);
-
- if (state != IONIC_FILTER_STATE_NEW) {
- err = ionic_adminq_post_wait(lif, &ctx);
- if (err && err != -EEXIST)
- return err;
- }
-
- return 0;
-}
-
static int ionic_addr_add(struct net_device *netdev, const u8 *addr)
{
return ionic_lif_list_addr(netdev_priv(netdev), addr, ADD_ADDR);
@@ -1407,7 +1277,7 @@ void ionic_lif_rx_mode(struct ionic_lif *lif)
rx_mode |= (nd_flags & IFF_PROMISC) ? IONIC_RX_MODE_F_PROMISC : 0;
rx_mode |= (nd_flags & IFF_ALLMULTI) ? IONIC_RX_MODE_F_ALLMULTI : 0;
- /* sync the mac filters */
+ /* sync the filters */
ionic_rx_filter_sync(lif);
/* check for overflow state
@@ -1417,14 +1287,12 @@ void ionic_lif_rx_mode(struct ionic_lif *lif)
* to see if we can disable NIC PROMISC
*/
nfilters = le32_to_cpu(lif->identity->eth.max_ucast_filters);
- if ((lif->nucast + lif->nmcast) >= nfilters) {
+
+ if (((lif->nucast + lif->nmcast) >= nfilters) ||
+ (lif->max_vlans && lif->nvlans >= lif->max_vlans)) {
rx_mode |= IONIC_RX_MODE_F_PROMISC;
rx_mode |= IONIC_RX_MODE_F_ALLMULTI;
- lif->uc_overflow = true;
- lif->mc_overflow = true;
- } else if (lif->uc_overflow) {
- lif->uc_overflow = false;
- lif->mc_overflow = false;
+ } else {
if (!(nd_flags & IFF_PROMISC))
rx_mode &= ~IONIC_RX_MODE_F_PROMISC;
if (!(nd_flags & IFF_ALLMULTI))
@@ -1809,59 +1677,30 @@ static int ionic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
u16 vid)
{
struct ionic_lif *lif = netdev_priv(netdev);
- struct ionic_admin_ctx ctx = {
- .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
- .cmd.rx_filter_add = {
- .opcode = IONIC_CMD_RX_FILTER_ADD,
- .lif_index = cpu_to_le16(lif->index),
- .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_VLAN),
- .vlan.vlan = cpu_to_le16(vid),
- },
- };
int err;
- netdev_dbg(netdev, "rx_filter add VLAN %d\n", vid);
- err = ionic_adminq_post_wait(lif, &ctx);
+ err = ionic_lif_vlan_add(lif, vid);
if (err)
return err;
- spin_lock_bh(&lif->rx_filters.lock);
- err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx,
- IONIC_FILTER_STATE_SYNCED);
- spin_unlock_bh(&lif->rx_filters.lock);
+ ionic_lif_rx_mode(lif);
- return err;
+ return 0;
}
static int ionic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
u16 vid)
{
struct ionic_lif *lif = netdev_priv(netdev);
- struct ionic_admin_ctx ctx = {
- .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
- .cmd.rx_filter_del = {
- .opcode = IONIC_CMD_RX_FILTER_DEL,
- .lif_index = cpu_to_le16(lif->index),
- },
- };
- struct ionic_rx_filter *f;
-
- spin_lock_bh(&lif->rx_filters.lock);
-
- f = ionic_rx_filter_by_vlan(lif, vid);
- if (!f) {
- spin_unlock_bh(&lif->rx_filters.lock);
- return -ENOENT;
- }
+ int err;
- netdev_dbg(netdev, "rx_filter del VLAN %d (id %d)\n",
- vid, f->filter_id);
+ err = ionic_lif_vlan_del(lif, vid);
+ if (err)
+ return err;
- ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id);
- ionic_rx_filter_free(lif, f);
- spin_unlock_bh(&lif->rx_filters.lock);
+ ionic_lif_rx_mode(lif);
- return ionic_adminq_post_wait(lif, &ctx);
+ return 0;
}
int ionic_lif_rss_config(struct ionic_lif *lif, const u16 types,
@@ -1953,19 +1792,19 @@ static void ionic_txrx_disable(struct ionic_lif *lif)
if (lif->txqcqs) {
for (i = 0; i < lif->nxqs; i++)
- err = ionic_qcq_disable(lif->txqcqs[i], (err != -ETIMEDOUT));
+ err = ionic_qcq_disable(lif, lif->txqcqs[i], err);
}
if (lif->hwstamp_txq)
- err = ionic_qcq_disable(lif->hwstamp_txq, (err != -ETIMEDOUT));
+ err = ionic_qcq_disable(lif, lif->hwstamp_txq, err);
if (lif->rxqcqs) {
for (i = 0; i < lif->nxqs; i++)
- err = ionic_qcq_disable(lif->rxqcqs[i], (err != -ETIMEDOUT));
+ err = ionic_qcq_disable(lif, lif->rxqcqs[i], err);
}
if (lif->hwstamp_rxq)
- err = ionic_qcq_disable(lif->hwstamp_rxq, (err != -ETIMEDOUT));
+ err = ionic_qcq_disable(lif, lif->hwstamp_rxq, err);
ionic_lif_quiesce(lif);
}
@@ -2165,7 +2004,7 @@ static int ionic_txrx_enable(struct ionic_lif *lif)
err = ionic_qcq_enable(lif->txqcqs[i]);
if (err) {
- derr = ionic_qcq_disable(lif->rxqcqs[i], (err != -ETIMEDOUT));
+ derr = ionic_qcq_disable(lif, lif->rxqcqs[i], err);
goto err_out;
}
}
@@ -2187,13 +2026,13 @@ static int ionic_txrx_enable(struct ionic_lif *lif)
err_out_hwstamp_tx:
if (lif->hwstamp_rxq)
- derr = ionic_qcq_disable(lif->hwstamp_rxq, (derr != -ETIMEDOUT));
+ derr = ionic_qcq_disable(lif, lif->hwstamp_rxq, derr);
err_out_hwstamp_rx:
i = lif->nxqs;
err_out:
while (i--) {
- derr = ionic_qcq_disable(lif->txqcqs[i], (derr != -ETIMEDOUT));
- derr = ionic_qcq_disable(lif->rxqcqs[i], (derr != -ETIMEDOUT));
+ derr = ionic_qcq_disable(lif, lif->txqcqs[i], derr);
+ derr = ionic_qcq_disable(lif, lif->rxqcqs[i], derr);
}
return err;
@@ -2896,6 +2735,9 @@ int ionic_lif_alloc(struct ionic *ionic)
snprintf(lif->name, sizeof(lif->name), "lif%u", lif->index);
+ mutex_init(&lif->queue_lock);
+ mutex_init(&lif->config_lock);
+
spin_lock_init(&lif->adminq_lock);
spin_lock_init(&lif->deferred.lock);
@@ -2909,7 +2751,7 @@ int ionic_lif_alloc(struct ionic *ionic)
if (!lif->info) {
dev_err(dev, "Failed to allocate lif info, aborting\n");
err = -ENOMEM;
- goto err_out_free_netdev;
+ goto err_out_free_mutex;
}
ionic_debugfs_add_lif(lif);
@@ -2944,6 +2786,9 @@ err_out_free_lif_info:
dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa);
lif->info = NULL;
lif->info_pa = 0;
+err_out_free_mutex:
+ mutex_destroy(&lif->config_lock);
+ mutex_destroy(&lif->queue_lock);
err_out_free_netdev:
free_netdev(lif->netdev);
lif = NULL;
@@ -2974,11 +2819,10 @@ static void ionic_lif_handle_fw_down(struct ionic_lif *lif)
netif_device_detach(lif->netdev);
+ mutex_lock(&lif->queue_lock);
if (test_bit(IONIC_LIF_F_UP, lif->state)) {
dev_info(ionic->dev, "Surprise FW stop, stopping queues\n");
- mutex_lock(&lif->queue_lock);
ionic_stop_queues(lif);
- mutex_unlock(&lif->queue_lock);
}
if (netif_running(lif->netdev)) {
@@ -2989,6 +2833,8 @@ static void ionic_lif_handle_fw_down(struct ionic_lif *lif)
ionic_reset(ionic);
ionic_qcqs_free(lif);
+ mutex_unlock(&lif->queue_lock);
+
dev_info(ionic->dev, "FW Down: LIFs stopped\n");
}
@@ -3012,9 +2858,12 @@ static void ionic_lif_handle_fw_up(struct ionic_lif *lif)
err = ionic_port_init(ionic);
if (err)
goto err_out;
+
+ mutex_lock(&lif->queue_lock);
+
err = ionic_qcqs_alloc(lif);
if (err)
- goto err_out;
+ goto err_unlock;
err = ionic_lif_init(lif);
if (err)
@@ -3035,6 +2884,8 @@ static void ionic_lif_handle_fw_up(struct ionic_lif *lif)
goto err_txrx_free;
}
+ mutex_unlock(&lif->queue_lock);
+
clear_bit(IONIC_LIF_F_FW_RESET, lif->state);
ionic_link_status_check_request(lif, CAN_SLEEP);
netif_device_attach(lif->netdev);
@@ -3051,6 +2902,8 @@ err_lifs_deinit:
ionic_lif_deinit(lif);
err_qcqs_free:
ionic_qcqs_free(lif);
+err_unlock:
+ mutex_unlock(&lif->queue_lock);
err_out:
dev_err(ionic->dev, "FW Up: LIFs restart failed - err %d\n", err);
}
@@ -3084,6 +2937,9 @@ void ionic_lif_free(struct ionic_lif *lif)
kfree(lif->dbid_inuse);
lif->dbid_inuse = NULL;
+ mutex_destroy(&lif->config_lock);
+ mutex_destroy(&lif->queue_lock);
+
/* free netdev & lif */
ionic_debugfs_del_lif(lif);
free_netdev(lif->netdev);
@@ -3106,8 +2962,6 @@ void ionic_lif_deinit(struct ionic_lif *lif)
ionic_lif_qcq_deinit(lif, lif->notifyqcq);
ionic_lif_qcq_deinit(lif, lif->adminqcq);
- mutex_destroy(&lif->config_lock);
- mutex_destroy(&lif->queue_lock);
ionic_lif_reset(lif);
}
@@ -3273,8 +3127,6 @@ int ionic_lif_init(struct ionic_lif *lif)
return err;
lif->hw_index = le16_to_cpu(comp.hw_index);
- mutex_init(&lif->queue_lock);
- mutex_init(&lif->config_lock);
/* now that we have the hw_index we can figure out our doorbell page */
lif->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
index 4915184f3efb..9f7ab2f17f93 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
@@ -14,9 +14,6 @@
#define IONIC_ADMINQ_LENGTH 16 /* must be a power of two */
#define IONIC_NOTIFYQ_LENGTH 64 /* must be a power of two */
-#define IONIC_MAX_NUM_NAPI_CNTR (NAPI_POLL_WEIGHT + 1)
-#define IONIC_MAX_NUM_SG_CNTR (IONIC_TX_MAX_SG_ELEMS + 1)
-
#define ADD_ADDR true
#define DEL_ADDR false
#define CAN_SLEEP true
@@ -37,7 +34,6 @@ struct ionic_tx_stats {
u64 clean;
u64 linearize;
u64 crc32_csum;
- u64 sg_cntr[IONIC_MAX_NUM_SG_CNTR];
u64 dma_map_err;
u64 hwstamp_valid;
u64 hwstamp_invalid;
@@ -48,7 +44,6 @@ struct ionic_rx_stats {
u64 bytes;
u64 csum_none;
u64 csum_complete;
- u64 buffers_posted;
u64 dropped;
u64 vlan_stripped;
u64 csum_error;
@@ -65,11 +60,6 @@ struct ionic_rx_stats {
#define IONIC_QCQ_F_RX_STATS BIT(4)
#define IONIC_QCQ_F_NOTIFYQ BIT(5)
-struct ionic_napi_stats {
- u64 poll_count;
- u64 work_done_cntr[IONIC_MAX_NUM_NAPI_CNTR];
-};
-
struct ionic_qcq {
void *q_base;
dma_addr_t q_base_pa;
@@ -85,7 +75,6 @@ struct ionic_qcq {
struct ionic_cq cq;
struct ionic_intr_info intr;
struct napi_struct napi;
- struct ionic_napi_stats napi_stats;
unsigned int flags;
struct dentry *dentry;
};
@@ -142,7 +131,6 @@ struct ionic_lif_sw_stats {
enum ionic_lif_state_flags {
IONIC_LIF_F_INITED,
- IONIC_LIF_F_SW_DEBUG_STATS,
IONIC_LIF_F_UP,
IONIC_LIF_F_LINK_CHECK_REQUESTED,
IONIC_LIF_F_FILTER_SYNC_NEEDED,
@@ -201,11 +189,11 @@ struct ionic_lif {
u16 rx_mode;
u64 hw_features;
bool registered;
- bool mc_overflow;
- bool uc_overflow;
u16 lif_type;
unsigned int nmcast;
unsigned int nucast;
+ unsigned int nvlans;
+ unsigned int max_vlans;
char name[IONIC_LIF_NAME_MAX_SZ];
union ionic_lif_identity *identity;
@@ -350,37 +338,4 @@ int ionic_lif_rss_config(struct ionic_lif *lif, u16 types,
void ionic_lif_rx_mode(struct ionic_lif *lif);
int ionic_reconfigure_queues(struct ionic_lif *lif,
struct ionic_queue_params *qparam);
-
-static inline void debug_stats_txq_post(struct ionic_queue *q, bool dbell)
-{
- struct ionic_txq_desc *desc = &q->txq[q->head_idx];
- u8 num_sg_elems;
-
- q->dbell_count += dbell;
-
- num_sg_elems = ((le64_to_cpu(desc->cmd) >> IONIC_TXQ_DESC_NSGE_SHIFT)
- & IONIC_TXQ_DESC_NSGE_MASK);
- if (num_sg_elems > (IONIC_MAX_NUM_SG_CNTR - 1))
- num_sg_elems = IONIC_MAX_NUM_SG_CNTR - 1;
-
- q->lif->txqstats[q->index].sg_cntr[num_sg_elems]++;
-}
-
-static inline void debug_stats_napi_poll(struct ionic_qcq *qcq,
- unsigned int work_done)
-{
- qcq->napi_stats.poll_count++;
-
- if (work_done > (IONIC_MAX_NUM_NAPI_CNTR - 1))
- work_done = IONIC_MAX_NUM_NAPI_CNTR - 1;
-
- qcq->napi_stats.work_done_cntr[work_done]++;
-}
-
-#define DEBUG_STATS_CQE_CNT(cq) ((cq)->compl_count++)
-#define DEBUG_STATS_RX_BUFF_CNT(q) ((q)->lif->rxqstats[q->index].buffers_posted++)
-#define DEBUG_STATS_TXQ_POST(q, dbell) debug_stats_txq_post(q, dbell)
-#define DEBUG_STATS_NAPI_POLL(qcq, work_done) \
- debug_stats_napi_poll(qcq, work_done)
-
#endif /* _IONIC_LIF_H_ */
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c
index 6f07bf509efe..875f4ec42efe 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_main.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c
@@ -7,6 +7,7 @@
#include <linux/netdevice.h>
#include <linux/utsname.h>
#include <generated/utsrelease.h>
+#include <linux/ctype.h>
#include "ionic.h"
#include "ionic_bus.h"
@@ -211,24 +212,28 @@ static void ionic_adminq_flush(struct ionic_lif *lif)
spin_unlock_irqrestore(&lif->adminq_lock, irqflags);
}
+void ionic_adminq_netdev_err_print(struct ionic_lif *lif, u8 opcode,
+ u8 status, int err)
+{
+ netdev_err(lif->netdev, "%s (%d) failed: %s (%d)\n",
+ ionic_opcode_to_str(opcode), opcode,
+ ionic_error_to_str(status), err);
+}
+
static int ionic_adminq_check_err(struct ionic_lif *lif,
struct ionic_admin_ctx *ctx,
- bool timeout)
+ const bool timeout,
+ const bool do_msg)
{
- struct net_device *netdev = lif->netdev;
- const char *opcode_str;
- const char *status_str;
int err = 0;
if (ctx->comp.comp.status || timeout) {
- opcode_str = ionic_opcode_to_str(ctx->cmd.cmd.opcode);
- status_str = ionic_error_to_str(ctx->comp.comp.status);
err = timeout ? -ETIMEDOUT :
ionic_error_to_errno(ctx->comp.comp.status);
- netdev_err(netdev, "%s (%d) failed: %s (%d)\n",
- opcode_str, ctx->cmd.cmd.opcode,
- timeout ? "TIMEOUT" : status_str, err);
+ if (do_msg)
+ ionic_adminq_netdev_err_print(lif, ctx->cmd.cmd.opcode,
+ ctx->comp.comp.status, err);
if (timeout)
ionic_adminq_flush(lif);
@@ -297,24 +302,52 @@ err_out:
return err;
}
-int ionic_adminq_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx, int err)
+int ionic_adminq_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx,
+ const int err, const bool do_msg)
{
struct net_device *netdev = lif->netdev;
+ unsigned long time_limit;
+ unsigned long time_start;
+ unsigned long time_done;
unsigned long remaining;
const char *name;
+ name = ionic_opcode_to_str(ctx->cmd.cmd.opcode);
+
if (err) {
- if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
- name = ionic_opcode_to_str(ctx->cmd.cmd.opcode);
+ if (do_msg && !test_bit(IONIC_LIF_F_FW_RESET, lif->state))
netdev_err(netdev, "Posting of %s (%d) failed: %d\n",
name, ctx->cmd.cmd.opcode, err);
- }
return err;
}
- remaining = wait_for_completion_timeout(&ctx->work,
- HZ * (ulong)DEVCMD_TIMEOUT);
- return ionic_adminq_check_err(lif, ctx, (remaining == 0));
+ time_start = jiffies;
+ time_limit = time_start + HZ * (ulong)DEVCMD_TIMEOUT;
+ do {
+ remaining = wait_for_completion_timeout(&ctx->work,
+ IONIC_ADMINQ_TIME_SLICE);
+
+ /* check for done */
+ if (remaining)
+ break;
+
+ /* interrupt the wait if FW stopped */
+ if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
+ if (do_msg)
+ netdev_err(netdev, "%s (%d) interrupted, FW in reset\n",
+ name, ctx->cmd.cmd.opcode);
+ return -ENXIO;
+ }
+
+ } while (time_before(jiffies, time_limit));
+ time_done = jiffies;
+
+ dev_dbg(lif->ionic->dev, "%s: elapsed %d msecs\n",
+ __func__, jiffies_to_msecs(time_done - time_start));
+
+ return ionic_adminq_check_err(lif, ctx,
+ time_after_eq(time_done, time_limit),
+ do_msg);
}
int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
@@ -323,7 +356,16 @@ int ionic_adminq_post_wait(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
err = ionic_adminq_post(lif, ctx);
- return ionic_adminq_wait(lif, ctx, err);
+ return ionic_adminq_wait(lif, ctx, err, true);
+}
+
+int ionic_adminq_post_wait_nomsg(struct ionic_lif *lif, struct ionic_admin_ctx *ctx)
+{
+ int err;
+
+ err = ionic_adminq_post(lif, ctx);
+
+ return ionic_adminq_wait(lif, ctx, err, false);
}
static void ionic_dev_cmd_clean(struct ionic *ionic)
@@ -450,13 +492,23 @@ int ionic_identify(struct ionic *ionic)
}
mutex_unlock(&ionic->dev_cmd_lock);
- dev_info(ionic->dev, "FW: %s\n", idev->dev_info.fw_version);
-
if (err) {
- dev_err(ionic->dev, "Cannot identify ionic: %dn", err);
+ dev_err(ionic->dev, "Cannot identify ionic: %d\n", err);
goto err_out;
}
+ if (isprint(idev->dev_info.fw_version[0]) &&
+ isascii(idev->dev_info.fw_version[0]))
+ dev_info(ionic->dev, "FW: %.*s\n",
+ (int)(sizeof(idev->dev_info.fw_version) - 1),
+ idev->dev_info.fw_version);
+ else
+ dev_info(ionic->dev, "FW: (invalid string) 0x%02x 0x%02x 0x%02x 0x%02x ...\n",
+ (u8)idev->dev_info.fw_version[0],
+ (u8)idev->dev_info.fw_version[1],
+ (u8)idev->dev_info.fw_version[2],
+ (u8)idev->dev_info.fw_version[3]);
+
err = ionic_lif_identify(ionic, IONIC_LIF_TYPE_CLASSIC,
&ionic->ident.lif);
if (err) {
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_phc.c b/drivers/net/ethernet/pensando/ionic/ionic_phc.c
index eed2db69d708..887046838b3b 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_phc.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_phc.c
@@ -348,7 +348,7 @@ static int ionic_phc_adjfine(struct ptp_clock_info *info, long scaled_ppm)
spin_unlock_irqrestore(&phc->lock, irqflags);
- return ionic_adminq_wait(phc->lif, &ctx, err);
+ return ionic_adminq_wait(phc->lif, &ctx, err, true);
}
static int ionic_phc_adjtime(struct ptp_clock_info *info, s64 delta)
@@ -373,7 +373,7 @@ static int ionic_phc_adjtime(struct ptp_clock_info *info, s64 delta)
spin_unlock_irqrestore(&phc->lock, irqflags);
- return ionic_adminq_wait(phc->lif, &ctx, err);
+ return ionic_adminq_wait(phc->lif, &ctx, err, true);
}
static int ionic_phc_settime64(struct ptp_clock_info *info,
@@ -402,7 +402,7 @@ static int ionic_phc_settime64(struct ptp_clock_info *info,
spin_unlock_irqrestore(&phc->lock, irqflags);
- return ionic_adminq_wait(phc->lif, &ctx, err);
+ return ionic_adminq_wait(phc->lif, &ctx, err, true);
}
static int ionic_phc_gettimex64(struct ptp_clock_info *info,
@@ -459,7 +459,7 @@ static long ionic_phc_aux_work(struct ptp_clock_info *info)
spin_unlock_irqrestore(&phc->lock, irqflags);
- ionic_adminq_wait(phc->lif, &ctx, err);
+ ionic_adminq_wait(phc->lif, &ctx, err, true);
return phc->aux_work_delay;
}
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c
index 69728f9013cb..f6e785f949f9 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c
@@ -239,6 +239,21 @@ struct ionic_rx_filter *ionic_rx_filter_rxsteer(struct ionic_lif *lif)
return NULL;
}
+static struct ionic_rx_filter *ionic_rx_filter_find(struct ionic_lif *lif,
+ struct ionic_rx_filter_add_cmd *ac)
+{
+ switch (le16_to_cpu(ac->match)) {
+ case IONIC_RX_FILTER_MATCH_VLAN:
+ return ionic_rx_filter_by_vlan(lif, le16_to_cpu(ac->vlan.vlan));
+ case IONIC_RX_FILTER_MATCH_MAC:
+ return ionic_rx_filter_by_addr(lif, ac->mac.addr);
+ default:
+ netdev_err(lif->netdev, "unsupported filter match %d",
+ le16_to_cpu(ac->match));
+ return NULL;
+ }
+}
+
int ionic_lif_list_addr(struct ionic_lif *lif, const u8 *addr, bool mode)
{
struct ionic_rx_filter *f;
@@ -286,6 +301,228 @@ int ionic_lif_list_addr(struct ionic_lif *lif, const u8 *addr, bool mode)
return 0;
}
+static int ionic_lif_filter_add(struct ionic_lif *lif,
+ struct ionic_rx_filter_add_cmd *ac)
+{
+ struct ionic_admin_ctx ctx = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
+ };
+ struct ionic_rx_filter *f;
+ int nfilters;
+ int err = 0;
+
+ ctx.cmd.rx_filter_add = *ac;
+ ctx.cmd.rx_filter_add.opcode = IONIC_CMD_RX_FILTER_ADD,
+ ctx.cmd.rx_filter_add.lif_index = cpu_to_le16(lif->index),
+
+ spin_lock_bh(&lif->rx_filters.lock);
+ f = ionic_rx_filter_find(lif, &ctx.cmd.rx_filter_add);
+ if (f) {
+ /* don't bother if we already have it and it is sync'd */
+ if (f->state == IONIC_FILTER_STATE_SYNCED) {
+ spin_unlock_bh(&lif->rx_filters.lock);
+ return 0;
+ }
+
+ /* mark preemptively as sync'd to block any parallel attempts */
+ f->state = IONIC_FILTER_STATE_SYNCED;
+ } else {
+ /* save as SYNCED to catch any DEL requests while processing */
+ err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx,
+ IONIC_FILTER_STATE_SYNCED);
+ }
+ spin_unlock_bh(&lif->rx_filters.lock);
+ if (err)
+ return err;
+
+ /* Don't bother with the write to FW if we know there's no room,
+ * we can try again on the next sync attempt.
+ * Since the FW doesn't have a way to tell us the vlan limit,
+ * we start max_vlans at 0 until we hit the ENOSPC error.
+ */
+ switch (le16_to_cpu(ctx.cmd.rx_filter_add.match)) {
+ case IONIC_RX_FILTER_MATCH_VLAN:
+ netdev_dbg(lif->netdev, "%s: rx_filter add VLAN %d\n",
+ __func__, ctx.cmd.rx_filter_add.vlan.vlan);
+ if (lif->max_vlans && lif->nvlans >= lif->max_vlans)
+ err = -ENOSPC;
+ break;
+ case IONIC_RX_FILTER_MATCH_MAC:
+ netdev_dbg(lif->netdev, "%s: rx_filter add ADDR %pM\n",
+ __func__, ctx.cmd.rx_filter_add.mac.addr);
+ nfilters = le32_to_cpu(lif->identity->eth.max_ucast_filters);
+ if ((lif->nucast + lif->nmcast) >= nfilters)
+ err = -ENOSPC;
+ break;
+ }
+
+ if (err != -ENOSPC)
+ err = ionic_adminq_post_wait_nomsg(lif, &ctx);
+
+ spin_lock_bh(&lif->rx_filters.lock);
+
+ if (err && err != -EEXIST) {
+ /* set the state back to NEW so we can try again later */
+ f = ionic_rx_filter_find(lif, &ctx.cmd.rx_filter_add);
+ if (f && f->state == IONIC_FILTER_STATE_SYNCED) {
+ f->state = IONIC_FILTER_STATE_NEW;
+
+ /* If -ENOSPC we won't waste time trying to sync again
+ * until there is a delete that might make room
+ */
+ if (err != -ENOSPC)
+ set_bit(IONIC_LIF_F_FILTER_SYNC_NEEDED, lif->state);
+ }
+
+ spin_unlock_bh(&lif->rx_filters.lock);
+
+ if (err == -ENOSPC) {
+ if (le16_to_cpu(ctx.cmd.rx_filter_add.match) == IONIC_RX_FILTER_MATCH_VLAN)
+ lif->max_vlans = lif->nvlans;
+ return 0;
+ }
+
+ ionic_adminq_netdev_err_print(lif, ctx.cmd.cmd.opcode,
+ ctx.comp.comp.status, err);
+ switch (le16_to_cpu(ctx.cmd.rx_filter_add.match)) {
+ case IONIC_RX_FILTER_MATCH_VLAN:
+ netdev_info(lif->netdev, "rx_filter add failed: VLAN %d\n",
+ ctx.cmd.rx_filter_add.vlan.vlan);
+ break;
+ case IONIC_RX_FILTER_MATCH_MAC:
+ netdev_info(lif->netdev, "rx_filter add failed: ADDR %pM\n",
+ ctx.cmd.rx_filter_add.mac.addr);
+ break;
+ }
+
+ return err;
+ }
+
+ switch (le16_to_cpu(ctx.cmd.rx_filter_add.match)) {
+ case IONIC_RX_FILTER_MATCH_VLAN:
+ lif->nvlans++;
+ break;
+ case IONIC_RX_FILTER_MATCH_MAC:
+ if (is_multicast_ether_addr(ctx.cmd.rx_filter_add.mac.addr))
+ lif->nmcast++;
+ else
+ lif->nucast++;
+ break;
+ }
+
+ f = ionic_rx_filter_find(lif, &ctx.cmd.rx_filter_add);
+ if (f && f->state == IONIC_FILTER_STATE_OLD) {
+ /* Someone requested a delete while we were adding
+ * so update the filter info with the results from the add
+ * and the data will be there for the delete on the next
+ * sync cycle.
+ */
+ err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx,
+ IONIC_FILTER_STATE_OLD);
+ } else {
+ err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx,
+ IONIC_FILTER_STATE_SYNCED);
+ }
+
+ spin_unlock_bh(&lif->rx_filters.lock);
+
+ return err;
+}
+
+int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr)
+{
+ struct ionic_rx_filter_add_cmd ac = {
+ .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC),
+ };
+
+ memcpy(&ac.mac.addr, addr, ETH_ALEN);
+
+ return ionic_lif_filter_add(lif, &ac);
+}
+
+int ionic_lif_vlan_add(struct ionic_lif *lif, const u16 vid)
+{
+ struct ionic_rx_filter_add_cmd ac = {
+ .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_VLAN),
+ .vlan.vlan = cpu_to_le16(vid),
+ };
+
+ return ionic_lif_filter_add(lif, &ac);
+}
+
+static int ionic_lif_filter_del(struct ionic_lif *lif,
+ struct ionic_rx_filter_add_cmd *ac)
+{
+ struct ionic_admin_ctx ctx = {
+ .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
+ .cmd.rx_filter_del = {
+ .opcode = IONIC_CMD_RX_FILTER_DEL,
+ .lif_index = cpu_to_le16(lif->index),
+ },
+ };
+ struct ionic_rx_filter *f;
+ int state;
+ int err;
+
+ spin_lock_bh(&lif->rx_filters.lock);
+ f = ionic_rx_filter_find(lif, ac);
+ if (!f) {
+ spin_unlock_bh(&lif->rx_filters.lock);
+ return -ENOENT;
+ }
+
+ switch (le16_to_cpu(ac->match)) {
+ case IONIC_RX_FILTER_MATCH_VLAN:
+ netdev_dbg(lif->netdev, "%s: rx_filter del VLAN %d id %d\n",
+ __func__, ac->vlan.vlan, f->filter_id);
+ lif->nvlans--;
+ break;
+ case IONIC_RX_FILTER_MATCH_MAC:
+ netdev_dbg(lif->netdev, "%s: rx_filter del ADDR %pM id %d\n",
+ __func__, ac->mac.addr, f->filter_id);
+ if (is_multicast_ether_addr(ac->mac.addr) && lif->nmcast)
+ lif->nmcast--;
+ else if (!is_multicast_ether_addr(ac->mac.addr) && lif->nucast)
+ lif->nucast--;
+ break;
+ }
+
+ state = f->state;
+ ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id);
+ ionic_rx_filter_free(lif, f);
+
+ spin_unlock_bh(&lif->rx_filters.lock);
+
+ if (state != IONIC_FILTER_STATE_NEW) {
+ err = ionic_adminq_post_wait(lif, &ctx);
+ if (err && err != -EEXIST)
+ return err;
+ }
+
+ return 0;
+}
+
+int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr)
+{
+ struct ionic_rx_filter_add_cmd ac = {
+ .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC),
+ };
+
+ memcpy(&ac.mac.addr, addr, ETH_ALEN);
+
+ return ionic_lif_filter_del(lif, &ac);
+}
+
+int ionic_lif_vlan_del(struct ionic_lif *lif, const u16 vid)
+{
+ struct ionic_rx_filter_add_cmd ac = {
+ .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_VLAN),
+ .vlan.vlan = cpu_to_le16(vid),
+ };
+
+ return ionic_lif_filter_del(lif, &ac);
+}
+
struct sync_item {
struct list_head list;
struct ionic_rx_filter f;
@@ -340,14 +577,14 @@ loop_out:
* they can clear room for some new filters
*/
list_for_each_entry_safe(sync_item, spos, &sync_del_list, list) {
- (void)ionic_lif_addr_del(lif, sync_item->f.cmd.mac.addr);
+ (void)ionic_lif_filter_del(lif, &sync_item->f.cmd);
list_del(&sync_item->list);
devm_kfree(dev, sync_item);
}
list_for_each_entry_safe(sync_item, spos, &sync_add_list, list) {
- (void)ionic_lif_addr_add(lif, sync_item->f.cmd.mac.addr);
+ (void)ionic_lif_filter_add(lif, &sync_item->f.cmd);
list_del(&sync_item->list);
devm_kfree(dev, sync_item);
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h
index a66e35f0833b..87b2666f248b 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h
@@ -44,5 +44,7 @@ struct ionic_rx_filter *ionic_rx_filter_rxsteer(struct ionic_lif *lif);
void ionic_rx_filter_sync(struct ionic_lif *lif);
int ionic_lif_list_addr(struct ionic_lif *lif, const u8 *addr, bool mode);
int ionic_rx_filters_need_sync(struct ionic_lif *lif);
+int ionic_lif_vlan_add(struct ionic_lif *lif, const u16 vid);
+int ionic_lif_vlan_del(struct ionic_lif *lif, const u16 vid);
#endif /* _IONIC_RX_FILTER_H_ */
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_stats.c b/drivers/net/ethernet/pensando/ionic/ionic_stats.c
index c14de5fcedea..fd6806b4a1b9 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_stats.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_stats.c
@@ -151,33 +151,11 @@ static const struct ionic_stat_desc ionic_rx_stats_desc[] = {
IONIC_RX_STAT_DESC(vlan_stripped),
};
-static const struct ionic_stat_desc ionic_txq_stats_desc[] = {
- IONIC_TX_Q_STAT_DESC(stop),
- IONIC_TX_Q_STAT_DESC(wake),
- IONIC_TX_Q_STAT_DESC(drop),
- IONIC_TX_Q_STAT_DESC(dbell_count),
-};
-
-static const struct ionic_stat_desc ionic_dbg_cq_stats_desc[] = {
- IONIC_CQ_STAT_DESC(compl_count),
-};
-
-static const struct ionic_stat_desc ionic_dbg_intr_stats_desc[] = {
- IONIC_INTR_STAT_DESC(rearm_count),
-};
-
-static const struct ionic_stat_desc ionic_dbg_napi_stats_desc[] = {
- IONIC_NAPI_STAT_DESC(poll_count),
-};
#define IONIC_NUM_LIF_STATS ARRAY_SIZE(ionic_lif_stats_desc)
#define IONIC_NUM_PORT_STATS ARRAY_SIZE(ionic_port_stats_desc)
#define IONIC_NUM_TX_STATS ARRAY_SIZE(ionic_tx_stats_desc)
#define IONIC_NUM_RX_STATS ARRAY_SIZE(ionic_rx_stats_desc)
-#define IONIC_NUM_TX_Q_STATS ARRAY_SIZE(ionic_txq_stats_desc)
-#define IONIC_NUM_DBG_CQ_STATS ARRAY_SIZE(ionic_dbg_cq_stats_desc)
-#define IONIC_NUM_DBG_INTR_STATS ARRAY_SIZE(ionic_dbg_intr_stats_desc)
-#define IONIC_NUM_DBG_NAPI_STATS ARRAY_SIZE(ionic_dbg_napi_stats_desc)
#define MAX_Q(lif) ((lif)->netdev->real_num_tx_queues)
@@ -253,21 +231,6 @@ static u64 ionic_sw_stats_get_count(struct ionic_lif *lif)
total += tx_queues * IONIC_NUM_TX_STATS;
total += rx_queues * IONIC_NUM_RX_STATS;
- if (test_bit(IONIC_LIF_F_UP, lif->state) &&
- test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state)) {
- /* tx debug stats */
- total += tx_queues * (IONIC_NUM_DBG_CQ_STATS +
- IONIC_NUM_TX_Q_STATS +
- IONIC_NUM_DBG_INTR_STATS +
- IONIC_MAX_NUM_SG_CNTR);
-
- /* rx debug stats */
- total += rx_queues * (IONIC_NUM_DBG_CQ_STATS +
- IONIC_NUM_DBG_INTR_STATS +
- IONIC_NUM_DBG_NAPI_STATS +
- IONIC_MAX_NUM_NAPI_CNTR);
- }
-
return total;
}
@@ -279,22 +242,6 @@ static void ionic_sw_stats_get_tx_strings(struct ionic_lif *lif, u8 **buf,
for (i = 0; i < IONIC_NUM_TX_STATS; i++)
ethtool_sprintf(buf, "tx_%d_%s", q_num,
ionic_tx_stats_desc[i].name);
-
- if (!test_bit(IONIC_LIF_F_UP, lif->state) ||
- !test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state))
- return;
-
- for (i = 0; i < IONIC_NUM_TX_Q_STATS; i++)
- ethtool_sprintf(buf, "txq_%d_%s", q_num,
- ionic_txq_stats_desc[i].name);
- for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++)
- ethtool_sprintf(buf, "txq_%d_cq_%s", q_num,
- ionic_dbg_cq_stats_desc[i].name);
- for (i = 0; i < IONIC_NUM_DBG_INTR_STATS; i++)
- ethtool_sprintf(buf, "txq_%d_intr_%s", q_num,
- ionic_dbg_intr_stats_desc[i].name);
- for (i = 0; i < IONIC_MAX_NUM_SG_CNTR; i++)
- ethtool_sprintf(buf, "txq_%d_sg_cntr_%d", q_num, i);
}
static void ionic_sw_stats_get_rx_strings(struct ionic_lif *lif, u8 **buf,
@@ -305,22 +252,6 @@ static void ionic_sw_stats_get_rx_strings(struct ionic_lif *lif, u8 **buf,
for (i = 0; i < IONIC_NUM_RX_STATS; i++)
ethtool_sprintf(buf, "rx_%d_%s", q_num,
ionic_rx_stats_desc[i].name);
-
- if (!test_bit(IONIC_LIF_F_UP, lif->state) ||
- !test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state))
- return;
-
- for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++)
- ethtool_sprintf(buf, "rxq_%d_cq_%s", q_num,
- ionic_dbg_cq_stats_desc[i].name);
- for (i = 0; i < IONIC_NUM_DBG_INTR_STATS; i++)
- ethtool_sprintf(buf, "rxq_%d_intr_%s", q_num,
- ionic_dbg_intr_stats_desc[i].name);
- for (i = 0; i < IONIC_NUM_DBG_NAPI_STATS; i++)
- ethtool_sprintf(buf, "rxq_%d_napi_%s", q_num,
- ionic_dbg_napi_stats_desc[i].name);
- for (i = 0; i < IONIC_MAX_NUM_NAPI_CNTR; i++)
- ethtool_sprintf(buf, "rxq_%d_napi_work_done_%d", q_num, i);
}
static void ionic_sw_stats_get_strings(struct ionic_lif *lif, u8 **buf)
@@ -350,7 +281,6 @@ static void ionic_sw_stats_get_txq_values(struct ionic_lif *lif, u64 **buf,
int q_num)
{
struct ionic_tx_stats *txstats;
- struct ionic_qcq *txqcq;
int i;
txstats = &lif->txqstats[q_num];
@@ -359,38 +289,12 @@ static void ionic_sw_stats_get_txq_values(struct ionic_lif *lif, u64 **buf,
**buf = IONIC_READ_STAT64(txstats, &ionic_tx_stats_desc[i]);
(*buf)++;
}
-
- if (!test_bit(IONIC_LIF_F_UP, lif->state) ||
- !test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state))
- return;
-
- txqcq = lif->txqcqs[q_num];
- for (i = 0; i < IONIC_NUM_TX_Q_STATS; i++) {
- **buf = IONIC_READ_STAT64(&txqcq->q,
- &ionic_txq_stats_desc[i]);
- (*buf)++;
- }
- for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) {
- **buf = IONIC_READ_STAT64(&txqcq->cq,
- &ionic_dbg_cq_stats_desc[i]);
- (*buf)++;
- }
- for (i = 0; i < IONIC_NUM_DBG_INTR_STATS; i++) {
- **buf = IONIC_READ_STAT64(&txqcq->intr,
- &ionic_dbg_intr_stats_desc[i]);
- (*buf)++;
- }
- for (i = 0; i < IONIC_MAX_NUM_SG_CNTR; i++) {
- **buf = txstats->sg_cntr[i];
- (*buf)++;
- }
}
static void ionic_sw_stats_get_rxq_values(struct ionic_lif *lif, u64 **buf,
int q_num)
{
struct ionic_rx_stats *rxstats;
- struct ionic_qcq *rxqcq;
int i;
rxstats = &lif->rxqstats[q_num];
@@ -399,31 +303,6 @@ static void ionic_sw_stats_get_rxq_values(struct ionic_lif *lif, u64 **buf,
**buf = IONIC_READ_STAT64(rxstats, &ionic_rx_stats_desc[i]);
(*buf)++;
}
-
- if (!test_bit(IONIC_LIF_F_UP, lif->state) ||
- !test_bit(IONIC_LIF_F_SW_DEBUG_STATS, lif->state))
- return;
-
- rxqcq = lif->rxqcqs[q_num];
- for (i = 0; i < IONIC_NUM_DBG_CQ_STATS; i++) {
- **buf = IONIC_READ_STAT64(&rxqcq->cq,
- &ionic_dbg_cq_stats_desc[i]);
- (*buf)++;
- }
- for (i = 0; i < IONIC_NUM_DBG_INTR_STATS; i++) {
- **buf = IONIC_READ_STAT64(&rxqcq->intr,
- &ionic_dbg_intr_stats_desc[i]);
- (*buf)++;
- }
- for (i = 0; i < IONIC_NUM_DBG_NAPI_STATS; i++) {
- **buf = IONIC_READ_STAT64(&rxqcq->napi_stats,
- &ionic_dbg_napi_stats_desc[i]);
- (*buf)++;
- }
- for (i = 0; i < IONIC_MAX_NUM_NAPI_CNTR; i++) {
- **buf = rxqcq->napi_stats.work_done_cntr[i];
- (*buf)++;
- }
}
static void ionic_sw_stats_get_values(struct ionic_lif *lif, u64 **buf)
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index 37c39581b659..94384f5d2a22 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -14,8 +14,6 @@
static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell,
ionic_desc_cb cb_func, void *cb_arg)
{
- DEBUG_STATS_TXQ_POST(q, ring_dbell);
-
ionic_q_post(q, ring_dbell, cb_func, cb_arg);
}
@@ -23,8 +21,6 @@ static inline void ionic_rxq_post(struct ionic_queue *q, bool ring_dbell,
ionic_desc_cb cb_func, void *cb_arg)
{
ionic_q_post(q, ring_dbell, cb_func, cb_arg);
-
- DEBUG_STATS_RX_BUFF_CNT(q);
}
static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q)
@@ -507,8 +503,6 @@ int ionic_tx_napi(struct napi_struct *napi, int budget)
work_done, flags);
}
- DEBUG_STATS_NAPI_POLL(qcq, work_done);
-
return work_done;
}
@@ -546,8 +540,6 @@ int ionic_rx_napi(struct napi_struct *napi, int budget)
work_done, flags);
}
- DEBUG_STATS_NAPI_POLL(qcq, work_done);
-
return work_done;
}
@@ -591,9 +583,6 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget)
tx_work_done + rx_work_done, flags);
}
- DEBUG_STATS_NAPI_POLL(qcq, rx_work_done);
- DEBUG_STATS_NAPI_POLL(qcq, tx_work_done);
-
return rx_work_done;
}
@@ -735,7 +724,6 @@ static void ionic_tx_clean(struct ionic_queue *q,
} else if (unlikely(__netif_subqueue_stopped(q->lif->netdev, qi))) {
netif_wake_subqueue(q->lif->netdev, qi);
- q->wake++;
}
desc_info->bytes = skb->len;
@@ -1174,7 +1162,6 @@ static int ionic_maybe_stop_tx(struct ionic_queue *q, int ndescs)
if (unlikely(!ionic_q_has_space(q, ndescs))) {
netif_stop_subqueue(q->lif->netdev, q->index);
- q->stop++;
stopped = 1;
/* Might race with ionic_tx_clean, check again */
@@ -1269,7 +1256,6 @@ netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK;
err_out_drop:
- q->stop++;
q->drop++;
dev_kfree_skb(skb);
return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 344ea1143454..07dd3c3b1771 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -463,6 +463,7 @@ netxen_read_mac_addr(struct netxen_adapter *adapter)
u64 mac_addr;
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
+ u8 addr[ETH_ALEN];
if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
if (netxen_p3_get_mac_addr(adapter, &mac_addr) != 0)
@@ -474,7 +475,8 @@ netxen_read_mac_addr(struct netxen_adapter *adapter)
p = (unsigned char *)&mac_addr;
for (i = 0; i < 6; i++)
- netdev->dev_addr[i] = *(p + 5 - i);
+ addr[i] = *(p + 5 - i);
+ eth_hw_addr_set(netdev, addr);
memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
@@ -500,7 +502,7 @@ static int netxen_nic_set_mac(struct net_device *netdev, void *p)
}
memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len);
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
adapter->macaddr_set(adapter, addr->sa_data);
if (netif_running(netdev)) {
@@ -842,7 +844,7 @@ netxen_check_options(struct netxen_adapter *adapter)
adapter->fw_version = NETXEN_VERSION_CODE(fw_major, fw_minor, fw_build);
/* Get FW Mini Coredump template and store it */
- if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
+ if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
if (adapter->mdump.md_template == NULL ||
adapter->fw_version > prev_fw_version) {
kfree(adapter->mdump.md_template);
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index d58e021614cd..d613095b78e0 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -23,6 +23,8 @@
#include <linux/qed/qed_if.h>
#include "qed_debug.h"
#include "qed_hsi.h"
+#include "qed_dbg_hsi.h"
+#include "qed_mfw_hsi.h"
extern const struct qed_common_ops qed_common_ops_pass;
@@ -89,14 +91,14 @@ static inline u32 qed_db_addr_vf(u32 cid, u32 DEMS)
}
#define ALIGNED_TYPE_SIZE(type_name, p_hwfn) \
- ((sizeof(type_name) + (u32)(1 << (p_hwfn->cdev->cache_shift)) - 1) & \
+ ((sizeof(type_name) + (u32)(1 << ((p_hwfn)->cdev->cache_shift)) - 1) & \
~((1 << (p_hwfn->cdev->cache_shift)) - 1))
-#define for_each_hwfn(cdev, i) for (i = 0; i < cdev->num_hwfns; i++)
+#define for_each_hwfn(cdev, i) for (i = 0; i < (cdev)->num_hwfns; i++)
#define D_TRINE(val, cond1, cond2, true1, true2, def) \
- (val == (cond1) ? true1 : \
- (val == (cond2) ? true2 : def))
+ ((val) == (cond1) ? true1 : \
+ ((val) == (cond2) ? true2 : def))
/* forward */
struct qed_ptt_pool;
@@ -510,7 +512,7 @@ enum qed_hsi_def_type {
struct qed_simd_fp_handler {
void *token;
- void (*func)(void *);
+ void (*func)(void *cookie);
};
enum qed_slowpath_wq_flag {
@@ -703,8 +705,6 @@ struct qed_dev {
#define QED_IS_BB_B0(dev) (QED_IS_BB(dev) && CHIP_REV_IS_B0(dev))
#define QED_IS_AH(dev) ((dev)->type == QED_DEV_TYPE_AH)
#define QED_IS_K2(dev) QED_IS_AH(dev)
-#define QED_IS_E4(dev) (QED_IS_BB(dev) || QED_IS_AH(dev))
-#define QED_IS_E5(dev) ((dev)->type == QED_DEV_TYPE_E5)
u16 vendor_id;
@@ -875,14 +875,14 @@ u32 qed_get_hsi_def_val(struct qed_dev *cdev, enum qed_hsi_def_type type);
#define NUM_OF_BTB_BLOCKS(dev) \
qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_BTB_BLOCKS)
-
/**
- * @brief qed_concrete_to_sw_fid - get the sw function id from
- * the concrete value.
+ * qed_concrete_to_sw_fid(): Get the sw function id from
+ * the concrete value.
*
- * @param concrete_fid
+ * @cdev: Qed dev pointer.
+ * @concrete_fid: Concrete fid.
*
- * @return inline u8
+ * Return: inline u8.
*/
static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev,
u32 concrete_fid)
@@ -902,7 +902,6 @@ static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev,
}
#define PKT_LB_TC 9
-#define MAX_NUM_VOQS_E4 20
int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate);
void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev,
@@ -914,7 +913,7 @@ int qed_device_num_engines(struct qed_dev *cdev);
void qed_set_fw_mac_addr(__le16 *fw_msb,
__le16 *fw_mid, __le16 *fw_lsb, u8 *mac);
-#define QED_LEADING_HWFN(dev) (&dev->hwfns[0])
+#define QED_LEADING_HWFN(dev) (&(dev)->hwfns[0])
#define QED_IS_CMT(dev) ((dev)->num_hwfns > 1)
/* Macros for getting the engine-affinitized hwfn (FIR: fcoe,iscsi,roce) */
#define QED_FIR_AFFIN_HWFN(dev) (&(dev)->hwfns[dev->fir_affin])
@@ -935,7 +934,7 @@ void qed_set_fw_mac_addr(__le16 *fw_msb,
#define PQ_FLAGS_LLT (BIT(7))
#define PQ_FLAGS_MTC (BIT(8))
-/* physical queue index for cm context intialization */
+/* physical queue index for cm context initialization */
u16 qed_get_cm_pq_idx(struct qed_hwfn *p_hwfn, u32 pq_flags);
u16 qed_get_cm_pq_idx_mcos(struct qed_hwfn *p_hwfn, u8 tc);
u16 qed_get_cm_pq_idx_vf(struct qed_hwfn *p_hwfn, u16 vf);
@@ -947,12 +946,18 @@ void qed_db_recovery_dp(struct qed_hwfn *p_hwfn);
void qed_db_recovery_execute(struct qed_hwfn *p_hwfn);
bool qed_edpm_enabled(struct qed_hwfn *p_hwfn);
+#define GET_GTT_REG_ADDR(__base, __offset, __idx) \
+ ((__base) + __offset ## _GTT_OFFSET((__idx)))
+
+#define GET_GTT_BDQ_REG_ADDR(__base, __offset, __idx, __bdq_idx) \
+ ((__base) + __offset ## _GTT_OFFSET((__idx), (__bdq_idx)))
+
/* Other Linux specific common definitions */
#define DP_NAME(cdev) ((cdev)->name)
-#define REG_ADDR(cdev, offset) (void __iomem *)((u8 __iomem *)\
- (cdev->regview) + \
- (offset))
+#define REG_ADDR(cdev, offset) ((void __iomem *)((u8 __iomem *)\
+ ((cdev)->regview) + \
+ (offset)))
#define REG_RD(cdev, offset) readl(REG_ADDR(cdev, offset))
#define REG_WR(cdev, offset, val) writel((u32)val, REG_ADDR(cdev, offset))
@@ -960,7 +965,7 @@ bool qed_edpm_enabled(struct qed_hwfn *p_hwfn);
#define DOORBELL(cdev, db_addr, val) \
writel((u32)val, (void __iomem *)((u8 __iomem *)\
- (cdev->doorbells) + (db_addr)))
+ ((cdev)->doorbells) + (db_addr)))
#define MFW_PORT(_p_hwfn) ((_p_hwfn)->abs_pf_id % \
qed_device_num_ports((_p_hwfn)->cdev))
@@ -998,4 +1003,5 @@ int qed_llh_add_dst_tcp_port_filter(struct qed_dev *cdev, u16 dest_port);
void qed_llh_remove_src_tcp_port_filter(struct qed_dev *cdev, u16 src_port);
void qed_llh_remove_dst_tcp_port_filter(struct qed_dev *cdev, u16 src_port);
void qed_llh_clear_all_filters(struct qed_dev *cdev);
+unsigned long qed_get_epoch_time(void);
#endif /* _QED_H */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index cb0f2a3a1ac9..452494f8c298 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -54,22 +54,22 @@
/* connection context union */
union conn_context {
- struct e4_core_conn_context core_ctx;
- struct e4_eth_conn_context eth_ctx;
- struct e4_iscsi_conn_context iscsi_ctx;
- struct e4_fcoe_conn_context fcoe_ctx;
- struct e4_roce_conn_context roce_ctx;
+ struct core_conn_context core_ctx;
+ struct eth_conn_context eth_ctx;
+ struct iscsi_conn_context iscsi_ctx;
+ struct fcoe_conn_context fcoe_ctx;
+ struct roce_conn_context roce_ctx;
};
/* TYPE-0 task context - iSCSI, FCOE */
union type0_task_context {
- struct e4_iscsi_task_context iscsi_ctx;
- struct e4_fcoe_task_context fcoe_ctx;
+ struct iscsi_task_context iscsi_ctx;
+ struct fcoe_task_context fcoe_ctx;
};
/* TYPE-1 task context - ROCE */
union type1_task_context {
- struct e4_rdma_task_context roce_ctx;
+ struct rdma_task_context roce_ctx;
};
struct src_ent {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
index 8adb7ed0c12d..168ce2c50385 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
@@ -28,24 +28,23 @@ struct qed_tid_mem {
};
/**
- * @brief qedo_cid_get_cxt_info - Returns the context info for a specific cid
+ * qed_cxt_get_cid_info(): Returns the context info for a specific cidi.
*
+ * @p_hwfn: HW device data.
+ * @p_info: In/out.
*
- * @param p_hwfn
- * @param p_info in/out
- *
- * @return int
+ * Return: Int.
*/
int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn,
struct qed_cxt_info *p_info);
/**
- * @brief qed_cxt_get_tid_mem_info
+ * qed_cxt_get_tid_mem_info(): Returns the tid mem info.
*
- * @param p_hwfn
- * @param p_info
+ * @p_hwfn: HW device data.
+ * @p_info: in/out.
*
- * @return int
+ * Return: int.
*/
int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn,
struct qed_tid_mem *p_info);
@@ -64,142 +63,155 @@ u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn,
enum protocol_type type, u32 *vf_cid);
/**
- * @brief qed_cxt_set_pf_params - Set the PF params for cxt init
+ * qed_cxt_set_pf_params(): Set the PF params for cxt init.
+ *
+ * @p_hwfn: HW device data.
+ * @rdma_tasks: Requested maximum.
*
- * @param p_hwfn
- * @param rdma_tasks - requested maximum
- * @return int
+ * Return: int.
*/
int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn, u32 rdma_tasks);
/**
- * @brief qed_cxt_cfg_ilt_compute - compute ILT init parameters
+ * qed_cxt_cfg_ilt_compute(): Compute ILT init parameters.
*
- * @param p_hwfn
- * @param last_line
+ * @p_hwfn: HW device data.
+ * @last_line: Last_line.
*
- * @return int
+ * Return: Int
*/
int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *last_line);
/**
- * @brief qed_cxt_cfg_ilt_compute_excess - how many lines can be decreased
+ * qed_cxt_cfg_ilt_compute_excess(): How many lines can be decreased.
+ *
+ * @p_hwfn: HW device data.
+ * @used_lines: Used lines.
*
- * @param p_hwfn
- * @param used_lines
+ * Return: Int.
*/
u32 qed_cxt_cfg_ilt_compute_excess(struct qed_hwfn *p_hwfn, u32 used_lines);
/**
- * @brief qed_cxt_mngr_alloc - Allocate and init the context manager struct
+ * qed_cxt_mngr_alloc(): Allocate and init the context manager struct.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @return int
+ * Return: Int.
*/
int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_cxt_mngr_free
+ * qed_cxt_mngr_free() - Context manager free.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
+ *
+ * Return: Void.
*/
void qed_cxt_mngr_free(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_cxt_tables_alloc - Allocate ILT shadow, Searcher T2, acquired map
+ * qed_cxt_tables_alloc(): Allocate ILT shadow, Searcher T2, acquired map.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @return int
+ * Return: Int.
*/
int qed_cxt_tables_alloc(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_cxt_mngr_setup - Reset the acquired CIDs
+ * qed_cxt_mngr_setup(): Reset the acquired CIDs.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*/
void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_cxt_hw_init_common - Initailze ILT and DQ, common phase, per path.
- *
+ * qed_cxt_hw_init_common(): Initailze ILT and DQ, common phase, per path.
*
+ * @p_hwfn: HW device data.
*
- * @param p_hwfn
+ * Return: Void.
*/
void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_cxt_hw_init_pf - Initailze ILT and DQ, PF phase, per path.
+ * qed_cxt_hw_init_pf(): Initailze ILT and DQ, PF phase, per path.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ *
+ * Return: Void.
*/
void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/**
- * @brief qed_qm_init_pf - Initailze the QM PF phase, per path
+ * qed_qm_init_pf(): Initailze the QM PF phase, per path.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @is_pf_loading: Is pf pending.
*
- * @param p_hwfn
- * @param p_ptt
- * @param is_pf_loading
+ * Return: Void.
*/
void qed_qm_init_pf(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, bool is_pf_loading);
/**
- * @brief Reconfigures QM pf on the fly
+ * qed_qm_reconf(): Reconfigures QM pf on the fly.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
*
- * @return int
+ * Return: Int.
*/
int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
#define QED_CXT_PF_CID (0xff)
/**
- * @brief qed_cxt_release - Release a cid
+ * qed_cxt_release_cid(): Release a cid.
*
- * @param p_hwfn
- * @param cid
+ * @p_hwfn: HW device data.
+ * @cid: Cid.
+ *
+ * Return: Void.
*/
void qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid);
/**
- * @brief qed_cxt_release - Release a cid belonging to a vf-queue
+ * _qed_cxt_release_cid(): Release a cid belonging to a vf-queue.
+ *
+ * @p_hwfn: HW device data.
+ * @cid: Cid.
+ * @vfid: Engine relative index. QED_CXT_PF_CID if belongs to PF.
*
- * @param p_hwfn
- * @param cid
- * @param vfid - engine relative index. QED_CXT_PF_CID if belongs to PF
+ * Return: Void.
*/
void _qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid, u8 vfid);
/**
- * @brief qed_cxt_acquire - Acquire a new cid of a specific protocol type
+ * qed_cxt_acquire_cid(): Acquire a new cid of a specific protocol type.
*
- * @param p_hwfn
- * @param type
- * @param p_cid
+ * @p_hwfn: HW device data.
+ * @type: Type.
+ * @p_cid: Pointer cid.
*
- * @return int
+ * Return: Int.
*/
int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
enum protocol_type type, u32 *p_cid);
/**
- * @brief _qed_cxt_acquire - Acquire a new cid of a specific protocol type
- * for a vf-queue
+ * _qed_cxt_acquire_cid(): Acquire a new cid of a specific protocol type
+ * for a vf-queue.
*
- * @param p_hwfn
- * @param type
- * @param p_cid
- * @param vfid - engine relative index. QED_CXT_PF_CID if belongs to PF
+ * @p_hwfn: HW device data.
+ * @type: Type.
+ * @p_cid: Pointer cid.
+ * @vfid: Engine relative index. QED_CXT_PF_CID if belongs to PF.
*
- * @return int
+ * Return: Int.
*/
int _qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
enum protocol_type type, u32 *p_cid, u8 vfid);
@@ -334,7 +346,10 @@ struct qed_cxt_mngr {
/* Maximal number of L2 steering filters */
u32 arfs_count;
- u8 task_type_id;
+ u16 iscsi_task_pages;
+ u16 fcoe_task_pages;
+ u16 roce_task_pages;
+ u16 eth_task_pages;
u16 task_ctx_size;
u16 conn_ctx_size;
};
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h
new file mode 100644
index 000000000000..9d5a0c9e1ca0
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_dbg_hsi.h
@@ -0,0 +1,1491 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/* QLogic qed NIC Driver
+ * Copyright (c) 2019-2021 Marvell International Ltd.
+ */
+#ifndef _QED_DBG_HSI_H
+#define _QED_DBG_HSI_H
+
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+
+/****************************************/
+/* Debug Tools HSI constants and macros */
+/****************************************/
+
+enum block_id {
+ BLOCK_GRC,
+ BLOCK_MISCS,
+ BLOCK_MISC,
+ BLOCK_DBU,
+ BLOCK_PGLUE_B,
+ BLOCK_CNIG,
+ BLOCK_CPMU,
+ BLOCK_NCSI,
+ BLOCK_OPTE,
+ BLOCK_BMB,
+ BLOCK_PCIE,
+ BLOCK_MCP,
+ BLOCK_MCP2,
+ BLOCK_PSWHST,
+ BLOCK_PSWHST2,
+ BLOCK_PSWRD,
+ BLOCK_PSWRD2,
+ BLOCK_PSWWR,
+ BLOCK_PSWWR2,
+ BLOCK_PSWRQ,
+ BLOCK_PSWRQ2,
+ BLOCK_PGLCS,
+ BLOCK_DMAE,
+ BLOCK_PTU,
+ BLOCK_TCM,
+ BLOCK_MCM,
+ BLOCK_UCM,
+ BLOCK_XCM,
+ BLOCK_YCM,
+ BLOCK_PCM,
+ BLOCK_QM,
+ BLOCK_TM,
+ BLOCK_DORQ,
+ BLOCK_BRB,
+ BLOCK_SRC,
+ BLOCK_PRS,
+ BLOCK_TSDM,
+ BLOCK_MSDM,
+ BLOCK_USDM,
+ BLOCK_XSDM,
+ BLOCK_YSDM,
+ BLOCK_PSDM,
+ BLOCK_TSEM,
+ BLOCK_MSEM,
+ BLOCK_USEM,
+ BLOCK_XSEM,
+ BLOCK_YSEM,
+ BLOCK_PSEM,
+ BLOCK_RSS,
+ BLOCK_TMLD,
+ BLOCK_MULD,
+ BLOCK_YULD,
+ BLOCK_XYLD,
+ BLOCK_PRM,
+ BLOCK_PBF_PB1,
+ BLOCK_PBF_PB2,
+ BLOCK_RPB,
+ BLOCK_BTB,
+ BLOCK_PBF,
+ BLOCK_RDIF,
+ BLOCK_TDIF,
+ BLOCK_CDU,
+ BLOCK_CCFC,
+ BLOCK_TCFC,
+ BLOCK_IGU,
+ BLOCK_CAU,
+ BLOCK_UMAC,
+ BLOCK_XMAC,
+ BLOCK_MSTAT,
+ BLOCK_DBG,
+ BLOCK_NIG,
+ BLOCK_WOL,
+ BLOCK_BMBN,
+ BLOCK_IPC,
+ BLOCK_NWM,
+ BLOCK_NWS,
+ BLOCK_MS,
+ BLOCK_PHY_PCIE,
+ BLOCK_LED,
+ BLOCK_AVS_WRAP,
+ BLOCK_PXPREQBUS,
+ BLOCK_BAR0_MAP,
+ BLOCK_MCP_FIO,
+ BLOCK_LAST_INIT,
+ BLOCK_PRS_FC,
+ BLOCK_PBF_FC,
+ BLOCK_NIG_LB_FC,
+ BLOCK_NIG_LB_FC_PLLH,
+ BLOCK_NIG_TX_FC_PLLH,
+ BLOCK_NIG_TX_FC,
+ BLOCK_NIG_RX_FC_PLLH,
+ BLOCK_NIG_RX_FC,
+ MAX_BLOCK_ID
+};
+
+/* binary debug buffer types */
+enum bin_dbg_buffer_type {
+ BIN_BUF_DBG_MODE_TREE,
+ BIN_BUF_DBG_DUMP_REG,
+ BIN_BUF_DBG_DUMP_MEM,
+ BIN_BUF_DBG_IDLE_CHK_REGS,
+ BIN_BUF_DBG_IDLE_CHK_IMMS,
+ BIN_BUF_DBG_IDLE_CHK_RULES,
+ BIN_BUF_DBG_IDLE_CHK_PARSING_DATA,
+ BIN_BUF_DBG_ATTN_BLOCKS,
+ BIN_BUF_DBG_ATTN_REGS,
+ BIN_BUF_DBG_ATTN_INDEXES,
+ BIN_BUF_DBG_ATTN_NAME_OFFSETS,
+ BIN_BUF_DBG_BLOCKS,
+ BIN_BUF_DBG_BLOCKS_CHIP_DATA,
+ BIN_BUF_DBG_BUS_LINES,
+ BIN_BUF_DBG_BLOCKS_USER_DATA,
+ BIN_BUF_DBG_BLOCKS_CHIP_USER_DATA,
+ BIN_BUF_DBG_BUS_LINE_NAME_OFFSETS,
+ BIN_BUF_DBG_RESET_REGS,
+ BIN_BUF_DBG_PARSING_STRINGS,
+ MAX_BIN_DBG_BUFFER_TYPE
+};
+
+/* Attention bit mapping */
+struct dbg_attn_bit_mapping {
+ u16 data;
+#define DBG_ATTN_BIT_MAPPING_VAL_MASK 0x7FFF
+#define DBG_ATTN_BIT_MAPPING_VAL_SHIFT 0
+#define DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT_MASK 0x1
+#define DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT_SHIFT 15
+};
+
+/* Attention block per-type data */
+struct dbg_attn_block_type_data {
+ u16 names_offset;
+ u16 reserved1;
+ u8 num_regs;
+ u8 reserved2;
+ u16 regs_offset;
+
+};
+
+/* Block attentions */
+struct dbg_attn_block {
+ struct dbg_attn_block_type_data per_type_data[2];
+};
+
+/* Attention register result */
+struct dbg_attn_reg_result {
+ u32 data;
+#define DBG_ATTN_REG_RESULT_STS_ADDRESS_MASK 0xFFFFFF
+#define DBG_ATTN_REG_RESULT_STS_ADDRESS_SHIFT 0
+#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_MASK 0xFF
+#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_SHIFT 24
+ u16 block_attn_offset;
+ u16 reserved;
+ u32 sts_val;
+ u32 mask_val;
+};
+
+/* Attention block result */
+struct dbg_attn_block_result {
+ u8 block_id;
+ u8 data;
+#define DBG_ATTN_BLOCK_RESULT_ATTN_TYPE_MASK 0x3
+#define DBG_ATTN_BLOCK_RESULT_ATTN_TYPE_SHIFT 0
+#define DBG_ATTN_BLOCK_RESULT_NUM_REGS_MASK 0x3F
+#define DBG_ATTN_BLOCK_RESULT_NUM_REGS_SHIFT 2
+ u16 names_offset;
+ struct dbg_attn_reg_result reg_results[15];
+};
+
+/* Mode header */
+struct dbg_mode_hdr {
+ u16 data;
+#define DBG_MODE_HDR_EVAL_MODE_MASK 0x1
+#define DBG_MODE_HDR_EVAL_MODE_SHIFT 0
+#define DBG_MODE_HDR_MODES_BUF_OFFSET_MASK 0x7FFF
+#define DBG_MODE_HDR_MODES_BUF_OFFSET_SHIFT 1
+};
+
+/* Attention register */
+struct dbg_attn_reg {
+ struct dbg_mode_hdr mode;
+ u16 block_attn_offset;
+ u32 data;
+#define DBG_ATTN_REG_STS_ADDRESS_MASK 0xFFFFFF
+#define DBG_ATTN_REG_STS_ADDRESS_SHIFT 0
+#define DBG_ATTN_REG_NUM_REG_ATTN_MASK 0xFF
+#define DBG_ATTN_REG_NUM_REG_ATTN_SHIFT 24
+ u32 sts_clr_address;
+ u32 mask_address;
+};
+
+/* Attention types */
+enum dbg_attn_type {
+ ATTN_TYPE_INTERRUPT,
+ ATTN_TYPE_PARITY,
+ MAX_DBG_ATTN_TYPE
+};
+
+/* Block debug data */
+struct dbg_block {
+ u8 name[15];
+ u8 associated_storm_letter;
+};
+
+/* Chip-specific block debug data */
+struct dbg_block_chip {
+ u8 flags;
+#define DBG_BLOCK_CHIP_IS_REMOVED_MASK 0x1
+#define DBG_BLOCK_CHIP_IS_REMOVED_SHIFT 0
+#define DBG_BLOCK_CHIP_HAS_RESET_REG_MASK 0x1
+#define DBG_BLOCK_CHIP_HAS_RESET_REG_SHIFT 1
+#define DBG_BLOCK_CHIP_UNRESET_BEFORE_DUMP_MASK 0x1
+#define DBG_BLOCK_CHIP_UNRESET_BEFORE_DUMP_SHIFT 2
+#define DBG_BLOCK_CHIP_HAS_DBG_BUS_MASK 0x1
+#define DBG_BLOCK_CHIP_HAS_DBG_BUS_SHIFT 3
+#define DBG_BLOCK_CHIP_HAS_LATENCY_EVENTS_MASK 0x1
+#define DBG_BLOCK_CHIP_HAS_LATENCY_EVENTS_SHIFT 4
+#define DBG_BLOCK_CHIP_RESERVED0_MASK 0x7
+#define DBG_BLOCK_CHIP_RESERVED0_SHIFT 5
+ u8 dbg_client_id;
+ u8 reset_reg_id;
+ u8 reset_reg_bit_offset;
+ struct dbg_mode_hdr dbg_bus_mode;
+ u16 reserved1;
+ u8 reserved2;
+ u8 num_of_dbg_bus_lines;
+ u16 dbg_bus_lines_offset;
+ u32 dbg_select_reg_addr;
+ u32 dbg_dword_enable_reg_addr;
+ u32 dbg_shift_reg_addr;
+ u32 dbg_force_valid_reg_addr;
+ u32 dbg_force_frame_reg_addr;
+};
+
+/* Chip-specific block user debug data */
+struct dbg_block_chip_user {
+ u8 num_of_dbg_bus_lines;
+ u8 has_latency_events;
+ u16 names_offset;
+};
+
+/* Block user debug data */
+struct dbg_block_user {
+ u8 name[16];
+};
+
+/* Block Debug line data */
+struct dbg_bus_line {
+ u8 data;
+#define DBG_BUS_LINE_NUM_OF_GROUPS_MASK 0xF
+#define DBG_BUS_LINE_NUM_OF_GROUPS_SHIFT 0
+#define DBG_BUS_LINE_IS_256B_MASK 0x1
+#define DBG_BUS_LINE_IS_256B_SHIFT 4
+#define DBG_BUS_LINE_RESERVED_MASK 0x7
+#define DBG_BUS_LINE_RESERVED_SHIFT 5
+ u8 group_sizes;
+};
+
+/* Condition header for registers dump */
+struct dbg_dump_cond_hdr {
+ struct dbg_mode_hdr mode; /* Mode header */
+ u8 block_id; /* block ID */
+ u8 data_size; /* size in dwords of the data following this header */
+};
+
+/* Memory data for registers dump */
+struct dbg_dump_mem {
+ u32 dword0;
+#define DBG_DUMP_MEM_ADDRESS_MASK 0xFFFFFF
+#define DBG_DUMP_MEM_ADDRESS_SHIFT 0
+#define DBG_DUMP_MEM_MEM_GROUP_ID_MASK 0xFF
+#define DBG_DUMP_MEM_MEM_GROUP_ID_SHIFT 24
+ u32 dword1;
+#define DBG_DUMP_MEM_LENGTH_MASK 0xFFFFFF
+#define DBG_DUMP_MEM_LENGTH_SHIFT 0
+#define DBG_DUMP_MEM_WIDE_BUS_MASK 0x1
+#define DBG_DUMP_MEM_WIDE_BUS_SHIFT 24
+#define DBG_DUMP_MEM_RESERVED_MASK 0x7F
+#define DBG_DUMP_MEM_RESERVED_SHIFT 25
+};
+
+/* Register data for registers dump */
+struct dbg_dump_reg {
+ u32 data;
+#define DBG_DUMP_REG_ADDRESS_MASK 0x7FFFFF
+#define DBG_DUMP_REG_ADDRESS_SHIFT 0
+#define DBG_DUMP_REG_WIDE_BUS_MASK 0x1
+#define DBG_DUMP_REG_WIDE_BUS_SHIFT 23
+#define DBG_DUMP_REG_LENGTH_MASK 0xFF
+#define DBG_DUMP_REG_LENGTH_SHIFT 24
+};
+
+/* Split header for registers dump */
+struct dbg_dump_split_hdr {
+ u32 hdr;
+#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_MASK 0xFFFFFF
+#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_SHIFT 0
+#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_MASK 0xFF
+#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_SHIFT 24
+};
+
+/* Condition header for idle check */
+struct dbg_idle_chk_cond_hdr {
+ struct dbg_mode_hdr mode; /* Mode header */
+ u16 data_size; /* size in dwords of the data following this header */
+};
+
+/* Idle Check condition register */
+struct dbg_idle_chk_cond_reg {
+ u32 data;
+#define DBG_IDLE_CHK_COND_REG_ADDRESS_MASK 0x7FFFFF
+#define DBG_IDLE_CHK_COND_REG_ADDRESS_SHIFT 0
+#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_MASK 0x1
+#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_SHIFT 23
+#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_MASK 0xFF
+#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_SHIFT 24
+ u16 num_entries;
+ u8 entry_size;
+ u8 start_entry;
+};
+
+/* Idle Check info register */
+struct dbg_idle_chk_info_reg {
+ u32 data;
+#define DBG_IDLE_CHK_INFO_REG_ADDRESS_MASK 0x7FFFFF
+#define DBG_IDLE_CHK_INFO_REG_ADDRESS_SHIFT 0
+#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_MASK 0x1
+#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_SHIFT 23
+#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_MASK 0xFF
+#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_SHIFT 24
+ u16 size; /* register size in dwords */
+ struct dbg_mode_hdr mode; /* Mode header */
+};
+
+/* Idle Check register */
+union dbg_idle_chk_reg {
+ struct dbg_idle_chk_cond_reg cond_reg; /* condition register */
+ struct dbg_idle_chk_info_reg info_reg; /* info register */
+};
+
+/* Idle Check result header */
+struct dbg_idle_chk_result_hdr {
+ u16 rule_id; /* Failing rule index */
+ u16 mem_entry_id; /* Failing memory entry index */
+ u8 num_dumped_cond_regs; /* number of dumped condition registers */
+ u8 num_dumped_info_regs; /* number of dumped condition registers */
+ u8 severity; /* from dbg_idle_chk_severity_types enum */
+ u8 reserved;
+};
+
+/* Idle Check result register header */
+struct dbg_idle_chk_result_reg_hdr {
+ u8 data;
+#define DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM_MASK 0x1
+#define DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM_SHIFT 0
+#define DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID_MASK 0x7F
+#define DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID_SHIFT 1
+ u8 start_entry; /* index of the first checked entry */
+ u16 size; /* register size in dwords */
+};
+
+/* Idle Check rule */
+struct dbg_idle_chk_rule {
+ u16 rule_id; /* Idle Check rule ID */
+ u8 severity; /* value from dbg_idle_chk_severity_types enum */
+ u8 cond_id; /* Condition ID */
+ u8 num_cond_regs; /* number of condition registers */
+ u8 num_info_regs; /* number of info registers */
+ u8 num_imms; /* number of immediates in the condition */
+ u8 reserved1;
+ u16 reg_offset; /* offset of this rules registers in the idle check
+ * register array (in dbg_idle_chk_reg units).
+ */
+ u16 imm_offset; /* offset of this rules immediate values in the
+ * immediate values array (in dwords).
+ */
+};
+
+/* Idle Check rule parsing data */
+struct dbg_idle_chk_rule_parsing_data {
+ u32 data;
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_MASK 0x1
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_SHIFT 0
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_MASK 0x7FFFFFFF
+#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_SHIFT 1
+};
+
+/* Idle check severity types */
+enum dbg_idle_chk_severity_types {
+ /* idle check failure should cause an error */
+ IDLE_CHK_SEVERITY_ERROR,
+ /* idle check failure should cause an error only if theres no traffic */
+ IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC,
+ /* idle check failure should cause a warning */
+ IDLE_CHK_SEVERITY_WARNING,
+ MAX_DBG_IDLE_CHK_SEVERITY_TYPES
+};
+
+/* Reset register */
+struct dbg_reset_reg {
+ u32 data;
+#define DBG_RESET_REG_ADDR_MASK 0xFFFFFF
+#define DBG_RESET_REG_ADDR_SHIFT 0
+#define DBG_RESET_REG_IS_REMOVED_MASK 0x1
+#define DBG_RESET_REG_IS_REMOVED_SHIFT 24
+#define DBG_RESET_REG_RESERVED_MASK 0x7F
+#define DBG_RESET_REG_RESERVED_SHIFT 25
+};
+
+/* Debug Bus block data */
+struct dbg_bus_block_data {
+ u8 enable_mask;
+ u8 right_shift;
+ u8 force_valid_mask;
+ u8 force_frame_mask;
+ u8 dword_mask;
+ u8 line_num;
+ u8 hw_id;
+ u8 flags;
+#define DBG_BUS_BLOCK_DATA_IS_256B_LINE_MASK 0x1
+#define DBG_BUS_BLOCK_DATA_IS_256B_LINE_SHIFT 0
+#define DBG_BUS_BLOCK_DATA_RESERVED_MASK 0x7F
+#define DBG_BUS_BLOCK_DATA_RESERVED_SHIFT 1
+};
+
+enum dbg_bus_clients {
+ DBG_BUS_CLIENT_RBCN,
+ DBG_BUS_CLIENT_RBCP,
+ DBG_BUS_CLIENT_RBCR,
+ DBG_BUS_CLIENT_RBCT,
+ DBG_BUS_CLIENT_RBCU,
+ DBG_BUS_CLIENT_RBCF,
+ DBG_BUS_CLIENT_RBCX,
+ DBG_BUS_CLIENT_RBCS,
+ DBG_BUS_CLIENT_RBCH,
+ DBG_BUS_CLIENT_RBCZ,
+ DBG_BUS_CLIENT_OTHER_ENGINE,
+ DBG_BUS_CLIENT_TIMESTAMP,
+ DBG_BUS_CLIENT_CPU,
+ DBG_BUS_CLIENT_RBCY,
+ DBG_BUS_CLIENT_RBCQ,
+ DBG_BUS_CLIENT_RBCM,
+ DBG_BUS_CLIENT_RBCB,
+ DBG_BUS_CLIENT_RBCW,
+ DBG_BUS_CLIENT_RBCV,
+ MAX_DBG_BUS_CLIENTS
+};
+
+/* Debug Bus constraint operation types */
+enum dbg_bus_constraint_ops {
+ DBG_BUS_CONSTRAINT_OP_EQ,
+ DBG_BUS_CONSTRAINT_OP_NE,
+ DBG_BUS_CONSTRAINT_OP_LT,
+ DBG_BUS_CONSTRAINT_OP_LTC,
+ DBG_BUS_CONSTRAINT_OP_LE,
+ DBG_BUS_CONSTRAINT_OP_LEC,
+ DBG_BUS_CONSTRAINT_OP_GT,
+ DBG_BUS_CONSTRAINT_OP_GTC,
+ DBG_BUS_CONSTRAINT_OP_GE,
+ DBG_BUS_CONSTRAINT_OP_GEC,
+ MAX_DBG_BUS_CONSTRAINT_OPS
+};
+
+/* Debug Bus trigger state data */
+struct dbg_bus_trigger_state_data {
+ u8 msg_len;
+ u8 constraint_dword_mask;
+ u8 storm_id;
+ u8 reserved;
+};
+
+/* Debug Bus memory address */
+struct dbg_bus_mem_addr {
+ u32 lo;
+ u32 hi;
+};
+
+/* Debug Bus PCI buffer data */
+struct dbg_bus_pci_buf_data {
+ struct dbg_bus_mem_addr phys_addr; /* PCI buffer physical address */
+ struct dbg_bus_mem_addr virt_addr; /* PCI buffer virtual address */
+ u32 size; /* PCI buffer size in bytes */
+};
+
+/* Debug Bus Storm EID range filter params */
+struct dbg_bus_storm_eid_range_params {
+ u8 min; /* Minimal event ID to filter on */
+ u8 max; /* Maximal event ID to filter on */
+};
+
+/* Debug Bus Storm EID mask filter params */
+struct dbg_bus_storm_eid_mask_params {
+ u8 val; /* Event ID value */
+ u8 mask; /* Event ID mask. 1s in the mask = dont care bits. */
+};
+
+/* Debug Bus Storm EID filter params */
+union dbg_bus_storm_eid_params {
+ struct dbg_bus_storm_eid_range_params range;
+ struct dbg_bus_storm_eid_mask_params mask;
+};
+
+/* Debug Bus Storm data */
+struct dbg_bus_storm_data {
+ u8 enabled;
+ u8 mode;
+ u8 hw_id;
+ u8 eid_filter_en;
+ u8 eid_range_not_mask;
+ u8 cid_filter_en;
+ union dbg_bus_storm_eid_params eid_filter_params;
+ u32 cid;
+};
+
+/* Debug Bus data */
+struct dbg_bus_data {
+ u32 app_version;
+ u8 state;
+ u8 mode_256b_en;
+ u8 num_enabled_blocks;
+ u8 num_enabled_storms;
+ u8 target;
+ u8 one_shot_en;
+ u8 grc_input_en;
+ u8 timestamp_input_en;
+ u8 filter_en;
+ u8 adding_filter;
+ u8 filter_pre_trigger;
+ u8 filter_post_trigger;
+ u8 trigger_en;
+ u8 filter_constraint_dword_mask;
+ u8 next_trigger_state;
+ u8 next_constraint_id;
+ struct dbg_bus_trigger_state_data trigger_states[3];
+ u8 filter_msg_len;
+ u8 rcv_from_other_engine;
+ u8 blocks_dword_mask;
+ u8 blocks_dword_overlap;
+ u32 hw_id_mask;
+ struct dbg_bus_pci_buf_data pci_buf;
+ struct dbg_bus_block_data blocks[132];
+ struct dbg_bus_storm_data storms[6];
+};
+
+/* Debug bus states */
+enum dbg_bus_states {
+ DBG_BUS_STATE_IDLE,
+ DBG_BUS_STATE_READY,
+ DBG_BUS_STATE_RECORDING,
+ DBG_BUS_STATE_STOPPED,
+ MAX_DBG_BUS_STATES
+};
+
+/* Debug Bus Storm modes */
+enum dbg_bus_storm_modes {
+ DBG_BUS_STORM_MODE_PRINTF,
+ DBG_BUS_STORM_MODE_PRAM_ADDR,
+ DBG_BUS_STORM_MODE_DRA_RW,
+ DBG_BUS_STORM_MODE_DRA_W,
+ DBG_BUS_STORM_MODE_LD_ST_ADDR,
+ DBG_BUS_STORM_MODE_DRA_FSM,
+ DBG_BUS_STORM_MODE_FAST_DBGMUX,
+ DBG_BUS_STORM_MODE_RH,
+ DBG_BUS_STORM_MODE_RH_WITH_STORE,
+ DBG_BUS_STORM_MODE_FOC,
+ DBG_BUS_STORM_MODE_EXT_STORE,
+ MAX_DBG_BUS_STORM_MODES
+};
+
+/* Debug bus target IDs */
+enum dbg_bus_targets {
+ DBG_BUS_TARGET_ID_INT_BUF,
+ DBG_BUS_TARGET_ID_NIG,
+ DBG_BUS_TARGET_ID_PCI,
+ MAX_DBG_BUS_TARGETS
+};
+
+/* GRC Dump data */
+struct dbg_grc_data {
+ u8 params_initialized;
+ u8 reserved1;
+ u16 reserved2;
+ u32 param_val[48];
+};
+
+/* Debug GRC params */
+enum dbg_grc_params {
+ DBG_GRC_PARAM_DUMP_TSTORM,
+ DBG_GRC_PARAM_DUMP_MSTORM,
+ DBG_GRC_PARAM_DUMP_USTORM,
+ DBG_GRC_PARAM_DUMP_XSTORM,
+ DBG_GRC_PARAM_DUMP_YSTORM,
+ DBG_GRC_PARAM_DUMP_PSTORM,
+ DBG_GRC_PARAM_DUMP_REGS,
+ DBG_GRC_PARAM_DUMP_RAM,
+ DBG_GRC_PARAM_DUMP_PBUF,
+ DBG_GRC_PARAM_DUMP_IOR,
+ DBG_GRC_PARAM_DUMP_VFC,
+ DBG_GRC_PARAM_DUMP_CM_CTX,
+ DBG_GRC_PARAM_DUMP_PXP,
+ DBG_GRC_PARAM_DUMP_RSS,
+ DBG_GRC_PARAM_DUMP_CAU,
+ DBG_GRC_PARAM_DUMP_QM,
+ DBG_GRC_PARAM_DUMP_MCP,
+ DBG_GRC_PARAM_DUMP_DORQ,
+ DBG_GRC_PARAM_DUMP_CFC,
+ DBG_GRC_PARAM_DUMP_IGU,
+ DBG_GRC_PARAM_DUMP_BRB,
+ DBG_GRC_PARAM_DUMP_BTB,
+ DBG_GRC_PARAM_DUMP_BMB,
+ DBG_GRC_PARAM_RESERVD1,
+ DBG_GRC_PARAM_DUMP_MULD,
+ DBG_GRC_PARAM_DUMP_PRS,
+ DBG_GRC_PARAM_DUMP_DMAE,
+ DBG_GRC_PARAM_DUMP_TM,
+ DBG_GRC_PARAM_DUMP_SDM,
+ DBG_GRC_PARAM_DUMP_DIF,
+ DBG_GRC_PARAM_DUMP_STATIC,
+ DBG_GRC_PARAM_UNSTALL,
+ DBG_GRC_PARAM_RESERVED2,
+ DBG_GRC_PARAM_MCP_TRACE_META_SIZE,
+ DBG_GRC_PARAM_EXCLUDE_ALL,
+ DBG_GRC_PARAM_CRASH,
+ DBG_GRC_PARAM_PARITY_SAFE,
+ DBG_GRC_PARAM_DUMP_CM,
+ DBG_GRC_PARAM_DUMP_PHY,
+ DBG_GRC_PARAM_NO_MCP,
+ DBG_GRC_PARAM_NO_FW_VER,
+ DBG_GRC_PARAM_RESERVED3,
+ DBG_GRC_PARAM_DUMP_MCP_HW_DUMP,
+ DBG_GRC_PARAM_DUMP_ILT_CDUC,
+ DBG_GRC_PARAM_DUMP_ILT_CDUT,
+ DBG_GRC_PARAM_DUMP_CAU_EXT,
+ MAX_DBG_GRC_PARAMS
+};
+
+/* Debug status codes */
+enum dbg_status {
+ DBG_STATUS_OK,
+ DBG_STATUS_APP_VERSION_NOT_SET,
+ DBG_STATUS_UNSUPPORTED_APP_VERSION,
+ DBG_STATUS_DBG_BLOCK_NOT_RESET,
+ DBG_STATUS_INVALID_ARGS,
+ DBG_STATUS_OUTPUT_ALREADY_SET,
+ DBG_STATUS_INVALID_PCI_BUF_SIZE,
+ DBG_STATUS_PCI_BUF_ALLOC_FAILED,
+ DBG_STATUS_PCI_BUF_NOT_ALLOCATED,
+ DBG_STATUS_INVALID_FILTER_TRIGGER_DWORDS,
+ DBG_STATUS_NO_MATCHING_FRAMING_MODE,
+ DBG_STATUS_VFC_READ_ERROR,
+ DBG_STATUS_STORM_ALREADY_ENABLED,
+ DBG_STATUS_STORM_NOT_ENABLED,
+ DBG_STATUS_BLOCK_ALREADY_ENABLED,
+ DBG_STATUS_BLOCK_NOT_ENABLED,
+ DBG_STATUS_NO_INPUT_ENABLED,
+ DBG_STATUS_NO_FILTER_TRIGGER_256B,
+ DBG_STATUS_FILTER_ALREADY_ENABLED,
+ DBG_STATUS_TRIGGER_ALREADY_ENABLED,
+ DBG_STATUS_TRIGGER_NOT_ENABLED,
+ DBG_STATUS_CANT_ADD_CONSTRAINT,
+ DBG_STATUS_TOO_MANY_TRIGGER_STATES,
+ DBG_STATUS_TOO_MANY_CONSTRAINTS,
+ DBG_STATUS_RECORDING_NOT_STARTED,
+ DBG_STATUS_DATA_DIDNT_TRIGGER,
+ DBG_STATUS_NO_DATA_RECORDED,
+ DBG_STATUS_DUMP_BUF_TOO_SMALL,
+ DBG_STATUS_DUMP_NOT_CHUNK_ALIGNED,
+ DBG_STATUS_UNKNOWN_CHIP,
+ DBG_STATUS_VIRT_MEM_ALLOC_FAILED,
+ DBG_STATUS_BLOCK_IN_RESET,
+ DBG_STATUS_INVALID_TRACE_SIGNATURE,
+ DBG_STATUS_INVALID_NVRAM_BUNDLE,
+ DBG_STATUS_NVRAM_GET_IMAGE_FAILED,
+ DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE,
+ DBG_STATUS_NVRAM_READ_FAILED,
+ DBG_STATUS_IDLE_CHK_PARSE_FAILED,
+ DBG_STATUS_MCP_TRACE_BAD_DATA,
+ DBG_STATUS_MCP_TRACE_NO_META,
+ DBG_STATUS_MCP_COULD_NOT_HALT,
+ DBG_STATUS_MCP_COULD_NOT_RESUME,
+ DBG_STATUS_RESERVED0,
+ DBG_STATUS_SEMI_FIFO_NOT_EMPTY,
+ DBG_STATUS_IGU_FIFO_BAD_DATA,
+ DBG_STATUS_MCP_COULD_NOT_MASK_PRTY,
+ DBG_STATUS_FW_ASSERTS_PARSE_FAILED,
+ DBG_STATUS_REG_FIFO_BAD_DATA,
+ DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA,
+ DBG_STATUS_DBG_ARRAY_NOT_SET,
+ DBG_STATUS_RESERVED1,
+ DBG_STATUS_NON_MATCHING_LINES,
+ DBG_STATUS_INSUFFICIENT_HW_IDS,
+ DBG_STATUS_DBG_BUS_IN_USE,
+ DBG_STATUS_INVALID_STORM_DBG_MODE,
+ DBG_STATUS_OTHER_ENGINE_BB_ONLY,
+ DBG_STATUS_FILTER_SINGLE_HW_ID,
+ DBG_STATUS_TRIGGER_SINGLE_HW_ID,
+ DBG_STATUS_MISSING_TRIGGER_STATE_STORM,
+ MAX_DBG_STATUS
+};
+
+/* Debug Storms IDs */
+enum dbg_storms {
+ DBG_TSTORM_ID,
+ DBG_MSTORM_ID,
+ DBG_USTORM_ID,
+ DBG_XSTORM_ID,
+ DBG_YSTORM_ID,
+ DBG_PSTORM_ID,
+ MAX_DBG_STORMS
+};
+
+/* Idle Check data */
+struct idle_chk_data {
+ u32 buf_size;
+ u8 buf_size_set;
+ u8 reserved1;
+ u16 reserved2;
+};
+
+struct pretend_params {
+ u8 split_type;
+ u8 reserved;
+ u16 split_id;
+};
+
+/* Debug Tools data (per HW function)
+ */
+struct dbg_tools_data {
+ struct dbg_grc_data grc;
+ struct dbg_bus_data bus;
+ struct idle_chk_data idle_chk;
+ u8 mode_enable[40];
+ u8 block_in_reset[132];
+ u8 chip_id;
+ u8 hw_type;
+ u8 num_ports;
+ u8 num_pfs_per_port;
+ u8 num_vfs;
+ u8 initialized;
+ u8 use_dmae;
+ u8 reserved;
+ struct pretend_params pretend;
+ u32 num_regs_read;
+};
+
+/* ILT Clients */
+enum ilt_clients {
+ ILT_CLI_CDUC,
+ ILT_CLI_CDUT,
+ ILT_CLI_QM,
+ ILT_CLI_TM,
+ ILT_CLI_SRC,
+ ILT_CLI_TSDM,
+ ILT_CLI_RGFS,
+ ILT_CLI_TGFS,
+ MAX_ILT_CLIENTS
+};
+
+/***************************** Public Functions *******************************/
+
+/**
+ * qed_dbg_set_bin_ptr(): Sets a pointer to the binary data with debug
+ * arrays.
+ *
+ * @p_hwfn: HW device data.
+ * @bin_ptr: A pointer to the binary data with debug arrays.
+ *
+ * Return: enum dbg status.
+ */
+enum dbg_status qed_dbg_set_bin_ptr(struct qed_hwfn *p_hwfn,
+ const u8 * const bin_ptr);
+
+/**
+ * qed_read_regs(): Reads registers into a buffer (using GRC).
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @buf: Destination buffer.
+ * @addr: Source GRC address in dwords.
+ * @len: Number of registers to read.
+ *
+ * Return: Void.
+ */
+void qed_read_regs(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, u32 *buf, u32 addr, u32 len);
+
+/**
+ * qed_read_fw_info(): Reads FW info from the chip.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @fw_info: (Out) a pointer to write the FW info into.
+ *
+ * Return: True if the FW info was read successfully from one of the Storms,
+ * or false if all Storms are in reset.
+ *
+ * The FW info contains FW-related information, such as the FW version,
+ * FW image (main/L2B/kuku), FW timestamp, etc.
+ * The FW info is read from the internal RAM of the first Storm that is not in
+ * reset.
+ */
+bool qed_read_fw_info(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt, struct fw_info *fw_info);
+/**
+ * qed_dbg_grc_config(): Sets the value of a GRC parameter.
+ *
+ * @p_hwfn: HW device data.
+ * @grc_param: GRC parameter.
+ * @val: Value to set.
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set.
+ * - Grc_param is invalid.
+ * - Val is outside the allowed boundaries.
+ */
+enum dbg_status qed_dbg_grc_config(struct qed_hwfn *p_hwfn,
+ enum dbg_grc_params grc_param, u32 val);
+
+/**
+ * qed_dbg_grc_set_params_default(): Reverts all GRC parameters to their
+ * default value.
+ *
+ * @p_hwfn: HW device data.
+ *
+ * Return: Void.
+ */
+void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn);
+/**
+ * qed_dbg_grc_get_dump_buf_size(): Returns the required buffer size for
+ * GRC Dump.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @buf_size: (OUT) required buffer size (in dwords) for the GRC Dump
+ * data.
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size);
+
+/**
+ * qed_dbg_grc_dump(): Dumps GRC data into the specified buffer.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @dump_buf: Pointer to write the collected GRC data into.
+ * @buf_size_in_dwords:Size of the specified buffer in dwords.
+ * @num_dumped_dwords: (OUT) number of dumped dwords.
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set.
+ * - The specified dump buffer is too small.
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords);
+
+/**
+ * qed_dbg_idle_chk_get_dump_buf_size(): Returns the required buffer size
+ * for idle check results.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @buf_size: (OUT) required buffer size (in dwords) for the idle check
+ * data.
+ *
+ * return: Error if one of the following holds:
+ * - The version wasn't set.
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size);
+
+/**
+ * qed_dbg_idle_chk_dump: Performs idle check and writes the results
+ * into the specified buffer.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @dump_buf: Pointer to write the idle check data into.
+ * @buf_size_in_dwords: Size of the specified buffer in dwords.
+ * @num_dumped_dwords: (OUT) number of dumped dwords.
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set.
+ * - The specified buffer is too small.
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords);
+
+/**
+ * qed_dbg_mcp_trace_get_dump_buf_size(): Returns the required buffer size
+ * for mcp trace results.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @buf_size: (OUT) Required buffer size (in dwords) for mcp trace data.
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set.
+ * - The trace data in MCP scratchpad contain an invalid signature.
+ * - The bundle ID in NVRAM is invalid.
+ * - The trace meta data cannot be found (in NVRAM or image file).
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size);
+
+/**
+ * qed_dbg_mcp_trace_dump(): Performs mcp trace and writes the results
+ * into the specified buffer.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @dump_buf: Pointer to write the mcp trace data into.
+ * @buf_size_in_dwords: Size of the specified buffer in dwords.
+ * @num_dumped_dwords: (OUT) number of dumped dwords.
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set.
+ * - The specified buffer is too small.
+ * - The trace data in MCP scratchpad contain an invalid signature.
+ * - The bundle ID in NVRAM is invalid.
+ * - The trace meta data cannot be found (in NVRAM or image file).
+ * - The trace meta data cannot be read (from NVRAM or image file).
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords);
+
+/**
+ * qed_dbg_reg_fifo_get_dump_buf_size(): Returns the required buffer size
+ * for grc trace fifo results.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @buf_size: (OUT) Required buffer size (in dwords) for reg fifo data.
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size);
+
+/**
+ * qed_dbg_reg_fifo_dump(): Reads the reg fifo and writes the results into
+ * the specified buffer.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @dump_buf: Pointer to write the reg fifo data into.
+ * @buf_size_in_dwords: Size of the specified buffer in dwords.
+ * @num_dumped_dwords: (OUT) number of dumped dwords.
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set.
+ * - The specified buffer is too small.
+ * - DMAE transaction failed.
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords);
+
+/**
+ * qed_dbg_igu_fifo_get_dump_buf_size(): Returns the required buffer size
+ * for the IGU fifo results.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @buf_size: (OUT) Required buffer size (in dwords) for the IGU fifo
+ * data.
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set.
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size);
+
+/**
+ * qed_dbg_igu_fifo_dump(): Reads the IGU fifo and writes the results into
+ * the specified buffer.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @dump_buf: Pointer to write the IGU fifo data into.
+ * @buf_size_in_dwords: Size of the specified buffer in dwords.
+ * @num_dumped_dwords: (OUT) number of dumped dwords.
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set
+ * - The specified buffer is too small
+ * - DMAE transaction failed
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords);
+
+/**
+ * qed_dbg_protection_override_get_dump_buf_size(): Returns the required
+ * buffer size for protection override window results.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @buf_size: (OUT) Required buffer size (in dwords) for protection
+ * override data.
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set
+ * Otherwise, returns ok.
+ */
+enum dbg_status
+qed_dbg_protection_override_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size);
+/**
+ * qed_dbg_protection_override_dump(): Reads protection override window
+ * entries and writes the results into the specified buffer.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @dump_buf: Pointer to write the protection override data into.
+ * @buf_size_in_dwords: Size of the specified buffer in dwords.
+ * @num_dumped_dwords: (OUT) number of dumped dwords.
+ *
+ * @return: Error if one of the following holds:
+ * - The version wasn't set.
+ * - The specified buffer is too small.
+ * - DMAE transaction failed.
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords);
+/**
+ * qed_dbg_fw_asserts_get_dump_buf_size(): Returns the required buffer
+ * size for FW Asserts results.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @buf_size: (OUT) Required buffer size (in dwords) for FW Asserts data.
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set.
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *buf_size);
+/**
+ * qed_dbg_fw_asserts_dump(): Reads the FW Asserts and writes the results
+ * into the specified buffer.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @dump_buf: Pointer to write the FW Asserts data into.
+ * @buf_size_in_dwords: Size of the specified buffer in dwords.
+ * @num_dumped_dwords: (OUT) number of dumped dwords.
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set.
+ * - The specified buffer is too small.
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_fw_asserts_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ u32 buf_size_in_dwords,
+ u32 *num_dumped_dwords);
+
+/**
+ * qed_dbg_read_attn(): Reads the attention registers of the specified
+ * block and type, and writes the results into the specified buffer.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @block: Block ID.
+ * @attn_type: Attention type.
+ * @clear_status: Indicates if the attention status should be cleared.
+ * @results: (OUT) Pointer to write the read results into.
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ enum block_id block,
+ enum dbg_attn_type attn_type,
+ bool clear_status,
+ struct dbg_attn_block_result *results);
+
+/**
+ * qed_dbg_print_attn(): Prints attention registers values in the
+ * specified results struct.
+ *
+ * @p_hwfn: HW device data.
+ * @results: Pointer to the attention read results
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_print_attn(struct qed_hwfn *p_hwfn,
+ struct dbg_attn_block_result *results);
+
+/******************************* Data Types **********************************/
+
+struct mcp_trace_format {
+ u32 data;
+#define MCP_TRACE_FORMAT_MODULE_MASK 0x0000ffff
+#define MCP_TRACE_FORMAT_MODULE_OFFSET 0
+#define MCP_TRACE_FORMAT_LEVEL_MASK 0x00030000
+#define MCP_TRACE_FORMAT_LEVEL_OFFSET 16
+#define MCP_TRACE_FORMAT_P1_SIZE_MASK 0x000c0000
+#define MCP_TRACE_FORMAT_P1_SIZE_OFFSET 18
+#define MCP_TRACE_FORMAT_P2_SIZE_MASK 0x00300000
+#define MCP_TRACE_FORMAT_P2_SIZE_OFFSET 20
+#define MCP_TRACE_FORMAT_P3_SIZE_MASK 0x00c00000
+#define MCP_TRACE_FORMAT_P3_SIZE_OFFSET 22
+#define MCP_TRACE_FORMAT_LEN_MASK 0xff000000
+#define MCP_TRACE_FORMAT_LEN_OFFSET 24
+
+ char *format_str;
+};
+
+/* MCP Trace Meta data structure */
+struct mcp_trace_meta {
+ u32 modules_num;
+ char **modules;
+ u32 formats_num;
+ struct mcp_trace_format *formats;
+ bool is_allocated;
+};
+
+/* Debug Tools user data */
+struct dbg_tools_user_data {
+ struct mcp_trace_meta mcp_trace_meta;
+ const u32 *mcp_trace_user_meta_buf;
+};
+
+/******************************** Constants **********************************/
+
+#define MAX_NAME_LEN 16
+
+/***************************** Public Functions *******************************/
+
+/**
+ * qed_dbg_user_set_bin_ptr(): Sets a pointer to the binary data with
+ * debug arrays.
+ *
+ * @p_hwfn: HW device data.
+ * @bin_ptr: a pointer to the binary data with debug arrays.
+ *
+ * Return: dbg_status.
+ */
+enum dbg_status qed_dbg_user_set_bin_ptr(struct qed_hwfn *p_hwfn,
+ const u8 * const bin_ptr);
+
+/**
+ * qed_dbg_alloc_user_data(): Allocates user debug data.
+ *
+ * @p_hwfn: HW device data.
+ * @user_data_ptr: (OUT) a pointer to the allocated memory.
+ *
+ * Return: dbg_status.
+ */
+enum dbg_status qed_dbg_alloc_user_data(struct qed_hwfn *p_hwfn,
+ void **user_data_ptr);
+
+/**
+ * qed_dbg_get_status_str(): Returns a string for the specified status.
+ *
+ * @status: A debug status code.
+ *
+ * Return: A string for the specified status.
+ */
+const char *qed_dbg_get_status_str(enum dbg_status status);
+
+/**
+ * qed_get_idle_chk_results_buf_size(): Returns the required buffer size
+ * for idle check results (in bytes).
+ *
+ * @p_hwfn: HW device data.
+ * @dump_buf: idle check dump buffer.
+ * @num_dumped_dwords: number of dwords that were dumped.
+ * @results_buf_size: (OUT) required buffer size (in bytes) for the parsed
+ * results.
+ *
+ * Return: Error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size);
+/**
+ * qed_print_idle_chk_results(): Prints idle check results
+ *
+ * @p_hwfn: HW device data.
+ * @dump_buf: idle check dump buffer.
+ * @num_dumped_dwords: number of dwords that were dumped.
+ * @results_buf: buffer for printing the idle check results.
+ * @num_errors: (OUT) number of errors found in idle check.
+ * @num_warnings: (OUT) number of warnings found in idle check.
+ *
+ * Return: Error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf,
+ u32 *num_errors,
+ u32 *num_warnings);
+
+/**
+ * qed_dbg_mcp_trace_set_meta_data(): Sets the MCP Trace meta data.
+ *
+ * @p_hwfn: HW device data.
+ * @meta_buf: Meta buffer.
+ *
+ * Return: Void.
+ *
+ * Needed in case the MCP Trace dump doesn't contain the meta data (e.g. due to
+ * no NVRAM access).
+ */
+void qed_dbg_mcp_trace_set_meta_data(struct qed_hwfn *p_hwfn,
+ const u32 *meta_buf);
+
+/**
+ * qed_get_mcp_trace_results_buf_size(): Returns the required buffer size
+ * for MCP Trace results (in bytes).
+ *
+ * @p_hwfn: HW device data.
+ * @dump_buf: MCP Trace dump buffer.
+ * @num_dumped_dwords: number of dwords that were dumped.
+ * @results_buf_size: (OUT) required buffer size (in bytes) for the parsed
+ * results.
+ *
+ * Return: Rrror if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size);
+
+/**
+ * qed_print_mcp_trace_results(): Prints MCP Trace results
+ *
+ * @p_hwfn: HW device data.
+ * @dump_buf: MCP trace dump buffer, starting from the header.
+ * @num_dumped_dwords: Member of dwords that were dumped.
+ * @results_buf: Buffer for printing the mcp trace results.
+ *
+ * Return: Error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf);
+
+/**
+ * qed_print_mcp_trace_results_cont(): Prints MCP Trace results, and
+ * keeps the MCP trace meta data allocated, to support continuous MCP Trace
+ * parsing. After the continuous parsing ends, mcp_trace_free_meta_data should
+ * be called to free the meta data.
+ *
+ * @p_hwfn: HW device data.
+ * @dump_buf: MVP trace dump buffer, starting from the header.
+ * @results_buf: Buffer for printing the mcp trace results.
+ *
+ * Return: Error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_print_mcp_trace_results_cont(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ char *results_buf);
+
+/**
+ * qed_print_mcp_trace_line(): Prints MCP Trace results for a single line
+ *
+ * @p_hwfn: HW device data.
+ * @dump_buf: MCP trace dump buffer, starting from the header.
+ * @num_dumped_bytes: Number of bytes that were dumped.
+ * @results_buf: Buffer for printing the mcp trace results.
+ *
+ * Return: Error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_print_mcp_trace_line(struct qed_hwfn *p_hwfn,
+ u8 *dump_buf,
+ u32 num_dumped_bytes,
+ char *results_buf);
+
+/**
+ * qed_mcp_trace_free_meta_data(): Frees the MCP Trace meta data.
+ * Should be called after continuous MCP Trace parsing.
+ *
+ * @p_hwfn: HW device data.
+ *
+ * Return: Void.
+ */
+void qed_mcp_trace_free_meta_data(struct qed_hwfn *p_hwfn);
+
+/**
+ * qed_get_reg_fifo_results_buf_size(): Returns the required buffer size
+ * for reg_fifo results (in bytes).
+ *
+ * @p_hwfn: HW device data.
+ * @dump_buf: Reg fifo dump buffer.
+ * @num_dumped_dwords: Number of dwords that were dumped.
+ * @results_buf_size: (OUT) required buffer size (in bytes) for the parsed
+ * results.
+ *
+ * Return: Error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size);
+
+/**
+ * qed_print_reg_fifo_results(): Prints reg fifo results.
+ *
+ * @p_hwfn: HW device data.
+ * @dump_buf: Reg fifo dump buffer, starting from the header.
+ * @num_dumped_dwords: Number of dwords that were dumped.
+ * @results_buf: Buffer for printing the reg fifo results.
+ *
+ * Return: Error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf);
+
+/**
+ * qed_get_igu_fifo_results_buf_size(): Returns the required buffer size
+ * for igu_fifo results (in bytes).
+ *
+ * @p_hwfn: HW device data.
+ * @dump_buf: IGU fifo dump buffer.
+ * @num_dumped_dwords: number of dwords that were dumped.
+ * @results_buf_size: (OUT) required buffer size (in bytes) for the parsed
+ * results.
+ *
+ * Return: Error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size);
+
+/**
+ * qed_print_igu_fifo_results(): Prints IGU fifo results
+ *
+ * @p_hwfn: HW device data.
+ * @dump_buf: IGU fifo dump buffer, starting from the header.
+ * @num_dumped_dwords: Number of dwords that were dumped.
+ * @results_buf: Buffer for printing the IGU fifo results.
+ *
+ * Return: Error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf);
+
+/**
+ * qed_get_protection_override_results_buf_size(): Returns the required
+ * buffer size for protection override results (in bytes).
+ *
+ * @p_hwfn: HW device data.
+ * @dump_buf: Protection override dump buffer.
+ * @num_dumped_dwords: Number of dwords that were dumped.
+ * @results_buf_size: (OUT) required buffer size (in bytes) for the parsed
+ * results.
+ *
+ * Return: Error if the parsing fails, ok otherwise.
+ */
+enum dbg_status
+qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size);
+
+/**
+ * qed_print_protection_override_results(): Prints protection override
+ * results.
+ *
+ * @p_hwfn: HW device data.
+ * @dump_buf: Protection override dump buffer, starting from the header.
+ * @num_dumped_dwords: Number of dwords that were dumped.
+ * @results_buf: Buffer for printing the reg fifo results.
+ *
+ * Return: Error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf);
+
+/**
+ * qed_get_fw_asserts_results_buf_size(): Returns the required buffer size
+ * for FW Asserts results (in bytes).
+ *
+ * @p_hwfn: HW device data.
+ * @dump_buf: FW Asserts dump buffer.
+ * @num_dumped_dwords: number of dwords that were dumped.
+ * @results_buf_size: (OUT) required buffer size (in bytes) for the parsed
+ * results.
+ *
+ * Return: Error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ u32 *results_buf_size);
+
+/**
+ * qed_print_fw_asserts_results(): Prints FW Asserts results.
+ *
+ * @p_hwfn: HW device data.
+ * @dump_buf: FW Asserts dump buffer, starting from the header.
+ * @num_dumped_dwords: number of dwords that were dumped.
+ * @results_buf: buffer for printing the FW Asserts results.
+ *
+ * Return: Error if the parsing fails, ok otherwise.
+ */
+enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ u32 num_dumped_dwords,
+ char *results_buf);
+
+/**
+ * qed_dbg_parse_attn(): Parses and prints attention registers values in
+ * the specified results struct.
+ *
+ * @p_hwfn: HW device data.
+ * @results: Pointer to the attention read results
+ *
+ * Return: Error if one of the following holds:
+ * - The version wasn't set.
+ * Otherwise, returns ok.
+ */
+enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
+ struct dbg_attn_block_result *results);
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
index e1798925b444..ea839e605577 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.h
@@ -84,16 +84,17 @@ struct qed_dcbx_mib_meta_data {
extern const struct qed_eth_dcbnl_ops qed_dcbnl_ops_pass;
#ifdef CONFIG_DCB
-int qed_dcbx_get_config_params(struct qed_hwfn *, struct qed_dcbx_set *);
+int qed_dcbx_get_config_params(struct qed_hwfn *p_hwfn,
+ struct qed_dcbx_set *params);
-int qed_dcbx_config_params(struct qed_hwfn *,
- struct qed_ptt *, struct qed_dcbx_set *, bool);
+int qed_dcbx_config_params(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ struct qed_dcbx_set *params, bool hw_commit);
#endif
/* QED local interface routines */
int
-qed_dcbx_mib_update_event(struct qed_hwfn *,
- struct qed_ptt *, enum qed_mib_read_type);
+qed_dcbx_mib_update_event(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ enum qed_mib_read_type type);
int qed_dcbx_info_alloc(struct qed_hwfn *p_hwfn);
void qed_dcbx_info_free(struct qed_hwfn *p_hwfn);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
index 6ab3e60d4928..e3edca187ddf 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
/* QLogic qed NIC Driver
* Copyright (c) 2015 QLogic Corporation
- * Copyright (c) 2019-2020 Marvell International Ltd.
+ * Copyright (c) 2019-2021 Marvell International Ltd.
*/
#include <linux/module.h>
@@ -10,6 +10,7 @@
#include "qed.h"
#include "qed_cxt.h"
#include "qed_hsi.h"
+#include "qed_dbg_hsi.h"
#include "qed_hw.h"
#include "qed_mcp.h"
#include "qed_reg_addr.h"
@@ -121,6 +122,11 @@ static u32 cond0(const u32 *r, const u32 *imm)
return (r[0] & ~r[1]) != imm[0];
}
+static u32 cond14(const u32 *r, const u32 *imm)
+{
+ return (r[0] | imm[0]) != imm[1];
+}
+
static u32 cond1(const u32 *r, const u32 *imm)
{
return r[0] != imm[0];
@@ -172,6 +178,7 @@ static u32(*cond_arr[]) (const u32 *r, const u32 *imm) = {
cond11,
cond12,
cond13,
+ cond14,
};
#define NUM_PHYS_BLOCKS 84
@@ -208,10 +215,61 @@ enum dbg_bus_frame_modes {
DBG_BUS_NUM_FRAME_MODES
};
+/* Debug bus SEMI frame modes */
+enum dbg_bus_semi_frame_modes {
+ DBG_BUS_SEMI_FRAME_MODE_4FAST = 0, /* 4 fast dw */
+ DBG_BUS_SEMI_FRAME_MODE_2FAST_2SLOW = 1, /* 2 fast dw, 2 slow dw */
+ DBG_BUS_SEMI_FRAME_MODE_1FAST_3SLOW = 2, /* 1 fast dw,3 slow dw */
+ DBG_BUS_SEMI_FRAME_MODE_4SLOW = 3, /* 4 slow dw */
+ DBG_BUS_SEMI_NUM_FRAME_MODES
+};
+
+/* Debug bus filter types */
+enum dbg_bus_filter_types {
+ DBG_BUS_FILTER_TYPE_OFF, /* Filter always off */
+ DBG_BUS_FILTER_TYPE_PRE, /* Filter before trigger only */
+ DBG_BUS_FILTER_TYPE_POST, /* Filter after trigger only */
+ DBG_BUS_FILTER_TYPE_ON /* Filter always on */
+};
+
+/* Debug bus pre-trigger recording types */
+enum dbg_bus_pre_trigger_types {
+ DBG_BUS_PRE_TRIGGER_FROM_ZERO, /* Record from time 0 */
+ DBG_BUS_PRE_TRIGGER_NUM_CHUNKS, /* Record some chunks before trigger */
+ DBG_BUS_PRE_TRIGGER_DROP /* Drop data before trigger */
+};
+
+/* Debug bus post-trigger recording types */
+enum dbg_bus_post_trigger_types {
+ DBG_BUS_POST_TRIGGER_RECORD, /* Start recording after trigger */
+ DBG_BUS_POST_TRIGGER_DROP /* Drop data after trigger */
+};
+
+/* Debug bus other engine mode */
+enum dbg_bus_other_engine_modes {
+ DBG_BUS_OTHER_ENGINE_MODE_NONE,
+ DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_TX,
+ DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_RX,
+ DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_TX,
+ DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_RX
+};
+
+/* DBG block Framing mode definitions */
+struct framing_mode_defs {
+ u8 id;
+ u8 blocks_dword_mask;
+ u8 storms_dword_mask;
+ u8 semi_framing_mode_id;
+ u8 full_buf_thr;
+};
+
/* Chip constant definitions */
struct chip_defs {
const char *name;
+ u8 dwords_per_cycle;
+ u8 num_framing_modes;
u32 num_ilt_pages;
+ struct framing_mode_defs *framing_modes;
};
/* HW type constant definitions */
@@ -334,7 +392,7 @@ struct split_type_defs {
#define FIELD_BIT_OFFSET(type, field) type ## _ ## field ## _ ## OFFSET
#define FIELD_BIT_SIZE(type, field) type ## _ ## field ## _ ## SIZE
#define FIELD_DWORD_OFFSET(type, field) \
- (int)(FIELD_BIT_OFFSET(type, field) / 32)
+ ((int)(FIELD_BIT_OFFSET(type, field) / 32))
#define FIELD_DWORD_SHIFT(type, field) (FIELD_BIT_OFFSET(type, field) % 32)
#define FIELD_BIT_MASK(type, field) \
(((1 << FIELD_BIT_SIZE(type, field)) - 1) << \
@@ -431,11 +489,13 @@ struct split_type_defs {
#define STATIC_DEBUG_LINE_DWORDS 9
-#define NUM_COMMON_GLOBAL_PARAMS 9
+#define NUM_COMMON_GLOBAL_PARAMS 11
#define MAX_RECURSION_DEPTH 10
+#define FW_IMG_KUKU 0
#define FW_IMG_MAIN 1
+#define FW_IMG_L2B 2
#define REG_FIFO_ELEMENT_DWORDS 2
#define REG_FIFO_DEPTH_ELEMENTS 32
@@ -464,10 +524,25 @@ struct split_type_defs {
/***************************** Constant Arrays *******************************/
+/* DBG block framing mode definitions, in descending preference order */
+static struct framing_mode_defs s_framing_mode_defs[4] = {
+ {DBG_BUS_FRAME_MODE_4ST, 0x0, 0xf,
+ DBG_BUS_SEMI_FRAME_MODE_4FAST,
+ 10},
+ {DBG_BUS_FRAME_MODE_4HW, 0xf, 0x0, DBG_BUS_SEMI_FRAME_MODE_4SLOW,
+ 10},
+ {DBG_BUS_FRAME_MODE_2ST_2HW, 0x3, 0xc,
+ DBG_BUS_SEMI_FRAME_MODE_2FAST_2SLOW, 10},
+ {DBG_BUS_FRAME_MODE_1ST_3HW, 0x7, 0x8,
+ DBG_BUS_SEMI_FRAME_MODE_1FAST_3SLOW, 10}
+};
+
/* Chip constant definitions array */
static struct chip_defs s_chip_defs[MAX_CHIP_IDS] = {
- {"bb", PSWRQ2_REG_ILT_MEMORY_SIZE_BB / 2},
- {"ah", PSWRQ2_REG_ILT_MEMORY_SIZE_K2 / 2}
+ {"bb", 4, DBG_BUS_NUM_FRAME_MODES, PSWRQ2_REG_ILT_MEMORY_SIZE_BB / 2,
+ s_framing_mode_defs},
+ {"ah", 4, DBG_BUS_NUM_FRAME_MODES, PSWRQ2_REG_ILT_MEMORY_SIZE_K2 / 2,
+ s_framing_mode_defs}
};
/* Storm constant definitions array */
@@ -477,8 +552,8 @@ static struct storm_defs s_storm_defs[] = {
{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCT},
true,
TSEM_REG_FAST_MEMORY,
- TSEM_REG_DBG_FRAME_MODE_BB_K2, TSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
- TSEM_REG_SLOW_DBG_MODE_BB_K2, TSEM_REG_DBG_MODE1_CFG_BB_K2,
+ TSEM_REG_DBG_FRAME_MODE, TSEM_REG_SLOW_DBG_ACTIVE,
+ TSEM_REG_SLOW_DBG_MODE, TSEM_REG_DBG_MODE1_CFG,
TSEM_REG_SYNC_DBG_EMPTY, TSEM_REG_DBG_GPRE_VECT,
TCM_REG_CTX_RBC_ACCS,
{TCM_REG_AGG_CON_CTX, TCM_REG_SM_CON_CTX, TCM_REG_AGG_TASK_CTX,
@@ -491,10 +566,10 @@ static struct storm_defs s_storm_defs[] = {
{DBG_BUS_CLIENT_RBCT, DBG_BUS_CLIENT_RBCM},
false,
MSEM_REG_FAST_MEMORY,
- MSEM_REG_DBG_FRAME_MODE_BB_K2,
- MSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
- MSEM_REG_SLOW_DBG_MODE_BB_K2,
- MSEM_REG_DBG_MODE1_CFG_BB_K2,
+ MSEM_REG_DBG_FRAME_MODE,
+ MSEM_REG_SLOW_DBG_ACTIVE,
+ MSEM_REG_SLOW_DBG_MODE,
+ MSEM_REG_DBG_MODE1_CFG,
MSEM_REG_SYNC_DBG_EMPTY,
MSEM_REG_DBG_GPRE_VECT,
MCM_REG_CTX_RBC_ACCS,
@@ -508,10 +583,10 @@ static struct storm_defs s_storm_defs[] = {
{DBG_BUS_CLIENT_RBCU, DBG_BUS_CLIENT_RBCU},
false,
USEM_REG_FAST_MEMORY,
- USEM_REG_DBG_FRAME_MODE_BB_K2,
- USEM_REG_SLOW_DBG_ACTIVE_BB_K2,
- USEM_REG_SLOW_DBG_MODE_BB_K2,
- USEM_REG_DBG_MODE1_CFG_BB_K2,
+ USEM_REG_DBG_FRAME_MODE,
+ USEM_REG_SLOW_DBG_ACTIVE,
+ USEM_REG_SLOW_DBG_MODE,
+ USEM_REG_DBG_MODE1_CFG,
USEM_REG_SYNC_DBG_EMPTY,
USEM_REG_DBG_GPRE_VECT,
UCM_REG_CTX_RBC_ACCS,
@@ -525,10 +600,10 @@ static struct storm_defs s_storm_defs[] = {
{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCX},
false,
XSEM_REG_FAST_MEMORY,
- XSEM_REG_DBG_FRAME_MODE_BB_K2,
- XSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
- XSEM_REG_SLOW_DBG_MODE_BB_K2,
- XSEM_REG_DBG_MODE1_CFG_BB_K2,
+ XSEM_REG_DBG_FRAME_MODE,
+ XSEM_REG_SLOW_DBG_ACTIVE,
+ XSEM_REG_SLOW_DBG_MODE,
+ XSEM_REG_DBG_MODE1_CFG,
XSEM_REG_SYNC_DBG_EMPTY,
XSEM_REG_DBG_GPRE_VECT,
XCM_REG_CTX_RBC_ACCS,
@@ -541,10 +616,10 @@ static struct storm_defs s_storm_defs[] = {
{DBG_BUS_CLIENT_RBCX, DBG_BUS_CLIENT_RBCY},
false,
YSEM_REG_FAST_MEMORY,
- YSEM_REG_DBG_FRAME_MODE_BB_K2,
- YSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
- YSEM_REG_SLOW_DBG_MODE_BB_K2,
- YSEM_REG_DBG_MODE1_CFG_BB_K2,
+ YSEM_REG_DBG_FRAME_MODE,
+ YSEM_REG_SLOW_DBG_ACTIVE,
+ YSEM_REG_SLOW_DBG_MODE,
+ YSEM_REG_DBG_MODE1_CFG,
YSEM_REG_SYNC_DBG_EMPTY,
YSEM_REG_DBG_GPRE_VECT,
YCM_REG_CTX_RBC_ACCS,
@@ -558,10 +633,10 @@ static struct storm_defs s_storm_defs[] = {
{DBG_BUS_CLIENT_RBCS, DBG_BUS_CLIENT_RBCS},
true,
PSEM_REG_FAST_MEMORY,
- PSEM_REG_DBG_FRAME_MODE_BB_K2,
- PSEM_REG_SLOW_DBG_ACTIVE_BB_K2,
- PSEM_REG_SLOW_DBG_MODE_BB_K2,
- PSEM_REG_DBG_MODE1_CFG_BB_K2,
+ PSEM_REG_DBG_FRAME_MODE,
+ PSEM_REG_SLOW_DBG_ACTIVE,
+ PSEM_REG_SLOW_DBG_MODE,
+ PSEM_REG_DBG_MODE1_CFG,
PSEM_REG_SYNC_DBG_EMPTY,
PSEM_REG_DBG_GPRE_VECT,
PCM_REG_CTX_RBC_ACCS,
@@ -575,7 +650,8 @@ static struct hw_type_defs s_hw_type_defs[] = {
{"asic", 1, 256, 32768},
{"reserved", 0, 0, 0},
{"reserved2", 0, 0, 0},
- {"reserved3", 0, 0, 0}
+ {"reserved3", 0, 0, 0},
+ {"reserved4", 0, 0, 0}
};
static struct grc_param_defs s_grc_param_defs[] = {
@@ -772,25 +848,25 @@ static struct rbc_reset_defs s_rbc_reset_defs[] = {
static struct phy_defs s_phy_defs[] = {
{"nw_phy", NWS_REG_NWS_CMU_K2,
- PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2_E5,
- PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2_E5,
- PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2_E5,
- PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2_E5},
- {"sgmii_phy", MS_REG_MS_CMU_K2_E5,
- PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
- PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
- PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
- PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
- {"pcie_phy0", PHY_PCIE_REG_PHY0_K2_E5,
- PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
- PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
- PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
- PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
- {"pcie_phy1", PHY_PCIE_REG_PHY1_K2_E5,
- PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5,
- PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5,
- PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5,
- PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5},
+ PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2,
+ PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2,
+ PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2,
+ PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2},
+ {"sgmii_phy", MS_REG_MS_CMU_K2,
+ PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2,
+ PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2,
+ PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2,
+ PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2},
+ {"pcie_phy0", PHY_PCIE_REG_PHY0_K2,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2},
+ {"pcie_phy1", PHY_PCIE_REG_PHY1_K2,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2,
+ PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2},
};
static struct split_type_defs s_split_type_defs[] = {
@@ -810,8 +886,17 @@ static struct split_type_defs s_split_type_defs[] = {
{"vf"}
};
+/******************************** Variables **********************************/
+
+/* The version of the calling app */
+static u32 s_app_ver;
+
/**************************** Private Functions ******************************/
+static void qed_static_asserts(void)
+{
+}
+
/* Reads and returns a single dword from the specified unaligned buffer */
static u32 qed_read_unaligned_dword(u8 *buf)
{
@@ -870,6 +955,9 @@ static enum dbg_status qed_dbg_dev_init(struct qed_hwfn *p_hwfn)
if (dev_data->initialized)
return DBG_STATUS_OK;
+ if (!s_app_ver)
+ return DBG_STATUS_APP_VERSION_NOT_SET;
+
/* Set chip */
if (QED_IS_K2(p_hwfn->cdev)) {
dev_data->chip_id = CHIP_K2;
@@ -990,11 +1078,6 @@ static void qed_read_storm_fw_info(struct qed_hwfn *p_hwfn,
for (i = 0; i < size; i++, addr += BYTES_IN_DWORD)
dest[i] = qed_rd(p_hwfn, p_ptt, addr);
- /* qed_rq() fetches data in CPU byteorder. Swap it back to
- * the device's to get right structure layout.
- */
- cpu_to_le32_array(dest, size);
-
/* Read FW version info from Storm RAM */
size = le32_to_cpu(fw_info_location.size);
if (!size || size > sizeof(*fw_info))
@@ -1006,8 +1089,6 @@ static void qed_read_storm_fw_info(struct qed_hwfn *p_hwfn,
for (i = 0; i < size; i++, addr += BYTES_IN_DWORD)
dest[i] = qed_rd(p_hwfn, p_ptt, addr);
-
- cpu_to_le32_array(dest, size);
}
/* Dumps the specified string to the specified buffer.
@@ -1117,9 +1198,15 @@ static u32 qed_dump_fw_ver_param(struct qed_hwfn *p_hwfn,
DP_NOTICE(p_hwfn,
"Unexpected debug error: invalid FW version string\n");
switch (fw_info.ver.image_id) {
+ case FW_IMG_KUKU:
+ strcpy(fw_img_str, "kuku");
+ break;
case FW_IMG_MAIN:
strcpy(fw_img_str, "main");
break;
+ case FW_IMG_L2B:
+ strcpy(fw_img_str, "l2b");
+ break;
default:
strcpy(fw_img_str, "unknown");
break;
@@ -1255,6 +1342,8 @@ static u32 qed_dump_common_global_params(struct qed_hwfn *p_hwfn,
s_hw_type_defs[dev_data->hw_type].name);
offset += qed_dump_num_param(dump_buf + offset,
dump, "pci-func", p_hwfn->abs_pf_id);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump, "epoch", qed_get_epoch_time());
if (dev_data->chip_id == CHIP_BB)
offset += qed_dump_num_param(dump_buf + offset,
dump, "path", QED_PATH_ID(p_hwfn));
@@ -1590,7 +1679,7 @@ static void qed_grc_stall_storms(struct qed_hwfn *p_hwfn,
continue;
reg_addr = s_storm_defs[storm_id].sem_fast_mem_addr +
- SEM_FAST_REG_STALL_0_BB_K2;
+ SEM_FAST_REG_STALL_0;
qed_wr(p_hwfn, p_ptt, reg_addr, stall ? 1 : 0);
}
@@ -1703,8 +1792,8 @@ static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn,
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
const struct dbg_attn_reg *attn_reg_arr;
+ u32 block_id, sts_clr_address;
u8 reg_idx, num_attn_regs;
- u32 block_id;
for (block_id = 0; block_id < NUM_PHYS_BLOCKS; block_id++) {
if (dev_data->block_in_reset[block_id])
@@ -1728,16 +1817,103 @@ static void qed_grc_clear_all_prty(struct qed_hwfn *p_hwfn,
GET_FIELD(reg_data->mode.data,
DBG_MODE_HDR_MODES_BUF_OFFSET);
+ sts_clr_address = reg_data->sts_clr_address;
/* If Mode match: clear parity status */
if (!eval_mode ||
qed_is_mode_match(p_hwfn, &modes_buf_offset))
qed_rd(p_hwfn, p_ptt,
- DWORDS_TO_BYTES(reg_data->
- sts_clr_address));
+ DWORDS_TO_BYTES(sts_clr_address));
}
}
}
+/* Finds the meta data image in NVRAM */
+static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 image_type,
+ u32 *nvram_offset_bytes,
+ u32 *nvram_size_bytes)
+{
+ u32 ret_mcp_resp, ret_mcp_param, ret_txn_size;
+ struct mcp_file_att file_att;
+ int nvm_result;
+
+ /* Call NVRAM get file command */
+ nvm_result = qed_mcp_nvm_rd_cmd(p_hwfn,
+ p_ptt,
+ DRV_MSG_CODE_NVM_GET_FILE_ATT,
+ image_type,
+ &ret_mcp_resp,
+ &ret_mcp_param,
+ &ret_txn_size,
+ (u32 *)&file_att, false);
+
+ /* Check response */
+ if (nvm_result || (ret_mcp_resp & FW_MSG_CODE_MASK) !=
+ FW_MSG_CODE_NVM_OK)
+ return DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
+
+ /* Update return values */
+ *nvram_offset_bytes = file_att.nvm_start_addr;
+ *nvram_size_bytes = file_att.len;
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_DEBUG,
+ "find_nvram_image: found NVRAM image of type %d in NVRAM offset %d bytes with size %d bytes\n",
+ image_type, *nvram_offset_bytes, *nvram_size_bytes);
+
+ /* Check alignment */
+ if (*nvram_size_bytes & 0x3)
+ return DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE;
+
+ return DBG_STATUS_OK;
+}
+
+/* Reads data from NVRAM */
+static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 nvram_offset_bytes,
+ u32 nvram_size_bytes, u32 *ret_buf)
+{
+ u32 ret_mcp_resp, ret_mcp_param, ret_read_size, bytes_to_copy;
+ s32 bytes_left = nvram_size_bytes;
+ u32 read_offset = 0, param = 0;
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_DEBUG,
+ "nvram_read: reading image of size %d bytes from NVRAM\n",
+ nvram_size_bytes);
+
+ do {
+ bytes_to_copy =
+ (bytes_left >
+ MCP_DRV_NVM_BUF_LEN) ? MCP_DRV_NVM_BUF_LEN : bytes_left;
+
+ /* Call NVRAM read command */
+ SET_MFW_FIELD(param,
+ DRV_MB_PARAM_NVM_OFFSET,
+ nvram_offset_bytes + read_offset);
+ SET_MFW_FIELD(param, DRV_MB_PARAM_NVM_LEN, bytes_to_copy);
+ if (qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
+ DRV_MSG_CODE_NVM_READ_NVRAM, param,
+ &ret_mcp_resp,
+ &ret_mcp_param, &ret_read_size,
+ (u32 *)((u8 *)ret_buf + read_offset),
+ false))
+ return DBG_STATUS_NVRAM_READ_FAILED;
+
+ /* Check response */
+ if ((ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
+ return DBG_STATUS_NVRAM_READ_FAILED;
+
+ /* Update read offset */
+ read_offset += ret_read_size;
+ bytes_left -= ret_read_size;
+ } while (bytes_left > 0);
+
+ return DBG_STATUS_OK;
+}
+
/* Dumps GRC registers section header. Returns the dumped size in dwords.
* the following parameters are dumped:
* - count: no. of dumped entries
@@ -3189,17 +3365,6 @@ static u32 qed_grc_dump_phy(struct qed_hwfn *p_hwfn,
return offset;
}
-static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 image_type,
- u32 *nvram_offset_bytes,
- u32 *nvram_size_bytes);
-
-static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 nvram_offset_bytes,
- u32 nvram_size_bytes, u32 *ret_buf);
-
/* Dumps the MCP HW dump from NVRAM. Returns the dumped size in dwords. */
static u32 qed_grc_dump_mcp_hw_dump(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -3283,10 +3448,6 @@ static u32 qed_grc_dump_static_debug(struct qed_hwfn *p_hwfn,
has_dbg_bus = GET_FIELD(block_per_chip->flags,
DBG_BLOCK_CHIP_HAS_DBG_BUS);
- /* read+clear for NWS parity is not working, skip NWS block */
- if (block_id == BLOCK_NWS)
- continue;
-
if (!is_removed && has_dbg_bus &&
GET_FIELD(block_per_chip->dbg_bus_mode.data,
DBG_MODE_HDR_EVAL_MODE) > 0) {
@@ -3375,8 +3536,8 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
bool dump, u32 *num_dumped_dwords)
{
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
- u32 dwords_read, offset = 0;
bool parities_masked = false;
+ u32 dwords_read, offset = 0;
u8 i;
*num_dumped_dwords = 0;
@@ -3545,8 +3706,7 @@ static enum dbg_status qed_grc_dump(struct qed_hwfn *p_hwfn,
*/
static u32 qed_idle_chk_dump_failure(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- u32 *
- dump_buf,
+ u32 *dump_buf,
bool dump,
u16 rule_id,
const struct dbg_idle_chk_rule *rule,
@@ -3894,91 +4054,6 @@ static u32 qed_idle_chk_dump(struct qed_hwfn *p_hwfn,
return offset;
}
-/* Finds the meta data image in NVRAM */
-static enum dbg_status qed_find_nvram_image(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 image_type,
- u32 *nvram_offset_bytes,
- u32 *nvram_size_bytes)
-{
- u32 ret_mcp_resp, ret_mcp_param, ret_txn_size;
- struct mcp_file_att file_att;
- int nvm_result;
-
- /* Call NVRAM get file command */
- nvm_result = qed_mcp_nvm_rd_cmd(p_hwfn,
- p_ptt,
- DRV_MSG_CODE_NVM_GET_FILE_ATT,
- image_type,
- &ret_mcp_resp,
- &ret_mcp_param,
- &ret_txn_size, (u32 *)&file_att);
-
- /* Check response */
- if (nvm_result ||
- (ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
- return DBG_STATUS_NVRAM_GET_IMAGE_FAILED;
-
- /* Update return values */
- *nvram_offset_bytes = file_att.nvm_start_addr;
- *nvram_size_bytes = file_att.len;
-
- DP_VERBOSE(p_hwfn,
- QED_MSG_DEBUG,
- "find_nvram_image: found NVRAM image of type %d in NVRAM offset %d bytes with size %d bytes\n",
- image_type, *nvram_offset_bytes, *nvram_size_bytes);
-
- /* Check alignment */
- if (*nvram_size_bytes & 0x3)
- return DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE;
-
- return DBG_STATUS_OK;
-}
-
-/* Reads data from NVRAM */
-static enum dbg_status qed_nvram_read(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 nvram_offset_bytes,
- u32 nvram_size_bytes, u32 *ret_buf)
-{
- u32 ret_mcp_resp, ret_mcp_param, ret_read_size, bytes_to_copy;
- s32 bytes_left = nvram_size_bytes;
- u32 read_offset = 0, param = 0;
-
- DP_VERBOSE(p_hwfn,
- QED_MSG_DEBUG,
- "nvram_read: reading image of size %d bytes from NVRAM\n",
- nvram_size_bytes);
-
- do {
- bytes_to_copy =
- (bytes_left >
- MCP_DRV_NVM_BUF_LEN) ? MCP_DRV_NVM_BUF_LEN : bytes_left;
-
- /* Call NVRAM read command */
- SET_MFW_FIELD(param,
- DRV_MB_PARAM_NVM_OFFSET,
- nvram_offset_bytes + read_offset);
- SET_MFW_FIELD(param, DRV_MB_PARAM_NVM_LEN, bytes_to_copy);
- if (qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
- DRV_MSG_CODE_NVM_READ_NVRAM, param,
- &ret_mcp_resp,
- &ret_mcp_param, &ret_read_size,
- (u32 *)((u8 *)ret_buf + read_offset)))
- return DBG_STATUS_NVRAM_READ_FAILED;
-
- /* Check response */
- if ((ret_mcp_resp & FW_MSG_CODE_MASK) != FW_MSG_CODE_NVM_OK)
- return DBG_STATUS_NVRAM_READ_FAILED;
-
- /* Update read offset */
- read_offset += ret_read_size;
- bytes_left -= ret_read_size;
- } while (bytes_left > 0);
-
- return DBG_STATUS_OK;
-}
-
/* Get info on the MCP Trace data in the scratchpad:
* - trace_data_grc_addr (OUT): trace data GRC address in bytes
* - trace_data_size (OUT): trace data size in bytes (without the header)
@@ -4480,14 +4555,18 @@ static u32 qed_fw_asserts_dump(struct qed_hwfn *p_hwfn,
/* Dumps the specified ILT pages to the specified buffer.
* Returns the dumped size in dwords.
*/
-static u32 qed_ilt_dump_pages_range(u32 *dump_buf,
- bool dump,
- u32 start_page_id,
+static u32 qed_ilt_dump_pages_range(u32 *dump_buf, u32 *given_offset,
+ bool *dump, u32 start_page_id,
u32 num_pages,
struct phys_mem_desc *ilt_pages,
- bool dump_page_ids)
+ bool dump_page_ids, u32 buf_size_in_dwords,
+ u32 *given_actual_dump_size_in_dwords)
{
- u32 page_id, end_page_id, offset = 0;
+ u32 actual_dump_size_in_dwords = *given_actual_dump_size_in_dwords;
+ u32 page_id, end_page_id, offset = *given_offset;
+ struct phys_mem_desc *mem_desc = NULL;
+ bool continue_dump = *dump;
+ u32 partial_page_size = 0;
if (num_pages == 0)
return offset;
@@ -4495,31 +4574,51 @@ static u32 qed_ilt_dump_pages_range(u32 *dump_buf,
end_page_id = start_page_id + num_pages - 1;
for (page_id = start_page_id; page_id <= end_page_id; page_id++) {
- struct phys_mem_desc *mem_desc = &ilt_pages[page_id];
-
- /**
- *
- * if (page_id >= ->p_cxt_mngr->ilt_shadow_size)
- * break;
- */
-
+ mem_desc = &ilt_pages[page_id];
if (!ilt_pages[page_id].virt_addr)
continue;
if (dump_page_ids) {
- /* Copy page ID to dump buffer */
- if (dump)
+ /* Copy page ID to dump buffer
+ * (if dump is needed and buffer is not full)
+ */
+ if ((continue_dump) &&
+ (offset + 1 > buf_size_in_dwords)) {
+ continue_dump = false;
+ actual_dump_size_in_dwords = offset;
+ }
+ if (continue_dump)
*(dump_buf + offset) = page_id;
offset++;
} else {
/* Copy page memory to dump buffer */
- if (dump)
+ if ((continue_dump) &&
+ (offset + BYTES_TO_DWORDS(mem_desc->size) >
+ buf_size_in_dwords)) {
+ if (offset + BYTES_TO_DWORDS(mem_desc->size) >
+ buf_size_in_dwords) {
+ partial_page_size =
+ buf_size_in_dwords - offset;
+ memcpy(dump_buf + offset,
+ mem_desc->virt_addr,
+ partial_page_size);
+ continue_dump = false;
+ actual_dump_size_in_dwords =
+ offset + partial_page_size;
+ }
+ }
+
+ if (continue_dump)
memcpy(dump_buf + offset,
mem_desc->virt_addr, mem_desc->size);
offset += BYTES_TO_DWORDS(mem_desc->size);
}
}
+ *dump = continue_dump;
+ *given_offset = offset;
+ *given_actual_dump_size_in_dwords = actual_dump_size_in_dwords;
+
return offset;
}
@@ -4528,21 +4627,30 @@ static u32 qed_ilt_dump_pages_range(u32 *dump_buf,
*/
static u32 qed_ilt_dump_pages_section(struct qed_hwfn *p_hwfn,
u32 *dump_buf,
- bool dump,
+ u32 *given_offset,
+ bool *dump,
u32 valid_conn_pf_pages,
u32 valid_conn_vf_pages,
struct phys_mem_desc *ilt_pages,
- bool dump_page_ids)
+ bool dump_page_ids,
+ u32 buf_size_in_dwords,
+ u32 *given_actual_dump_size_in_dwords)
{
struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients;
- u32 pf_start_line, start_page_id, offset = 0;
+ u32 pf_start_line, start_page_id, offset = *given_offset;
u32 cdut_pf_init_pages, cdut_vf_init_pages;
u32 cdut_pf_work_pages, cdut_vf_work_pages;
u32 base_data_offset, size_param_offset;
+ u32 src_pages;
+ u32 section_header_and_param_size;
u32 cdut_pf_pages, cdut_vf_pages;
+ u32 actual_dump_size_in_dwords;
+ bool continue_dump = *dump;
+ bool update_size = *dump;
const char *section_name;
- u8 i;
+ u32 i;
+ actual_dump_size_in_dwords = *given_actual_dump_size_in_dwords;
section_name = dump_page_ids ? "ilt_page_ids" : "ilt_page_mem";
cdut_pf_init_pages = qed_get_cdut_num_pf_init_pages(p_hwfn);
cdut_vf_init_pages = qed_get_cdut_num_vf_init_pages(p_hwfn);
@@ -4551,13 +4659,26 @@ static u32 qed_ilt_dump_pages_section(struct qed_hwfn *p_hwfn,
cdut_pf_pages = cdut_pf_init_pages + cdut_pf_work_pages;
cdut_vf_pages = cdut_vf_init_pages + cdut_vf_work_pages;
pf_start_line = p_hwfn->p_cxt_mngr->pf_start_line;
+ section_header_and_param_size = qed_dump_section_hdr(NULL,
+ false,
+ section_name,
+ 1) +
+ qed_dump_num_param(NULL, false, "size", 0);
+
+ if ((continue_dump) &&
+ (offset + section_header_and_param_size > buf_size_in_dwords)) {
+ continue_dump = false;
+ update_size = false;
+ actual_dump_size_in_dwords = offset;
+ }
- offset +=
- qed_dump_section_hdr(dump_buf + offset, dump, section_name, 1);
+ offset += qed_dump_section_hdr(dump_buf + offset,
+ continue_dump, section_name, 1);
/* Dump size parameter (0 for now, overwritten with real size later) */
size_param_offset = offset;
- offset += qed_dump_num_param(dump_buf + offset, dump, "size", 0);
+ offset += qed_dump_num_param(dump_buf + offset,
+ continue_dump, "size", 0);
base_data_offset = offset;
/* CDUC pages are ordered as follows:
@@ -4570,22 +4691,22 @@ static u32 qed_ilt_dump_pages_section(struct qed_hwfn *p_hwfn,
if (qed_grc_get_param(p_hwfn, DBG_GRC_PARAM_DUMP_ILT_CDUC)) {
/* Dump connection PF pages */
start_page_id = clients[ILT_CLI_CDUC].first.val - pf_start_line;
- offset += qed_ilt_dump_pages_range(dump_buf + offset,
- dump,
- start_page_id,
- valid_conn_pf_pages,
- ilt_pages, dump_page_ids);
+ qed_ilt_dump_pages_range(dump_buf, &offset, &continue_dump,
+ start_page_id, valid_conn_pf_pages,
+ ilt_pages, dump_page_ids,
+ buf_size_in_dwords,
+ &actual_dump_size_in_dwords);
/* Dump connection VF pages */
start_page_id += clients[ILT_CLI_CDUC].pf_total_lines;
for (i = 0; i < p_hwfn->p_cxt_mngr->vf_count;
i++, start_page_id += clients[ILT_CLI_CDUC].vf_total_lines)
- offset += qed_ilt_dump_pages_range(dump_buf + offset,
- dump,
- start_page_id,
- valid_conn_vf_pages,
- ilt_pages,
- dump_page_ids);
+ qed_ilt_dump_pages_range(dump_buf, &offset,
+ &continue_dump, start_page_id,
+ valid_conn_vf_pages,
+ ilt_pages, dump_page_ids,
+ buf_size_in_dwords,
+ &actual_dump_size_in_dwords);
}
/* CDUT pages are ordered as follows:
@@ -4599,63 +4720,84 @@ static u32 qed_ilt_dump_pages_section(struct qed_hwfn *p_hwfn,
/* Dump task PF pages */
start_page_id = clients[ILT_CLI_CDUT].first.val +
cdut_pf_init_pages - pf_start_line;
- offset += qed_ilt_dump_pages_range(dump_buf + offset,
- dump,
- start_page_id,
- cdut_pf_work_pages,
- ilt_pages, dump_page_ids);
+ qed_ilt_dump_pages_range(dump_buf, &offset, &continue_dump,
+ start_page_id, cdut_pf_work_pages,
+ ilt_pages, dump_page_ids,
+ buf_size_in_dwords,
+ &actual_dump_size_in_dwords);
/* Dump task VF pages */
start_page_id = clients[ILT_CLI_CDUT].first.val +
cdut_pf_pages + cdut_vf_init_pages - pf_start_line;
for (i = 0; i < p_hwfn->p_cxt_mngr->vf_count;
i++, start_page_id += cdut_vf_pages)
- offset += qed_ilt_dump_pages_range(dump_buf + offset,
- dump,
- start_page_id,
- cdut_vf_work_pages,
- ilt_pages,
- dump_page_ids);
+ qed_ilt_dump_pages_range(dump_buf, &offset,
+ &continue_dump, start_page_id,
+ cdut_vf_work_pages, ilt_pages,
+ dump_page_ids,
+ buf_size_in_dwords,
+ &actual_dump_size_in_dwords);
+ }
+
+ /*Dump Searcher pages */
+ if (clients[ILT_CLI_SRC].active) {
+ start_page_id = clients[ILT_CLI_SRC].first.val - pf_start_line;
+ src_pages = clients[ILT_CLI_SRC].last.val -
+ clients[ILT_CLI_SRC].first.val + 1;
+ qed_ilt_dump_pages_range(dump_buf, &offset, &continue_dump,
+ start_page_id, src_pages, ilt_pages,
+ dump_page_ids, buf_size_in_dwords,
+ &actual_dump_size_in_dwords);
}
/* Overwrite size param */
- if (dump)
- qed_dump_num_param(dump_buf + size_param_offset,
- dump, "size", offset - base_data_offset);
+ if (update_size) {
+ u32 section_size = (*dump == continue_dump) ?
+ offset - base_data_offset :
+ actual_dump_size_in_dwords - base_data_offset;
+ if (section_size > 0)
+ qed_dump_num_param(dump_buf + size_param_offset,
+ *dump, "size", section_size);
+ else if ((section_size == 0) && (*dump != continue_dump))
+ actual_dump_size_in_dwords -=
+ section_header_and_param_size;
+ }
+
+ *dump = continue_dump;
+ *given_offset = offset;
+ *given_actual_dump_size_in_dwords = actual_dump_size_in_dwords;
return offset;
}
-/* Performs ILT Dump to the specified buffer.
+/* Dumps a section containing the global parameters.
+ * Part of ilt dump process
* Returns the dumped size in dwords.
*/
-static u32 qed_ilt_dump(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u32 *dump_buf, bool dump)
+static u32
+qed_ilt_dump_dump_common_global_params(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf,
+ bool dump,
+ u32 cduc_page_size,
+ u32 conn_ctx_size,
+ u32 cdut_page_size,
+ u32 *full_dump_size_param_offset,
+ u32 *actual_dump_size_param_offset)
{
struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients;
- u32 valid_conn_vf_cids, valid_conn_vf_pages, offset = 0;
- u32 valid_conn_pf_cids, valid_conn_pf_pages, num_pages;
- u32 num_cids_per_page, conn_ctx_size;
- u32 cduc_page_size, cdut_page_size;
- struct phys_mem_desc *ilt_pages;
- u8 conn_type;
-
- cduc_page_size = 1 <<
- (clients[ILT_CLI_CDUC].p_size.val + PXP_ILT_PAGE_SIZE_NUM_BITS_MIN);
- cdut_page_size = 1 <<
- (clients[ILT_CLI_CDUT].p_size.val + PXP_ILT_PAGE_SIZE_NUM_BITS_MIN);
- conn_ctx_size = p_hwfn->p_cxt_mngr->conn_ctx_size;
- num_cids_per_page = (int)(cduc_page_size / conn_ctx_size);
- ilt_pages = p_hwfn->p_cxt_mngr->ilt_shadow;
+ u32 offset = 0;
- /* Dump global params - 22 must match number of params below */
offset += qed_dump_common_global_params(p_hwfn, p_ptt,
- dump_buf + offset, dump, 22);
+ dump_buf + offset,
+ dump, 30);
offset += qed_dump_str_param(dump_buf + offset,
- dump, "dump-type", "ilt-dump");
+ dump,
+ "dump-type", "ilt-dump");
offset += qed_dump_num_param(dump_buf + offset,
dump,
- "cduc-page-size", cduc_page_size);
+ "cduc-page-size",
+ cduc_page_size);
offset += qed_dump_num_param(dump_buf + offset,
dump,
"cduc-first-page-id",
@@ -4667,20 +4809,19 @@ static u32 qed_ilt_dump(struct qed_hwfn *p_hwfn,
offset += qed_dump_num_param(dump_buf + offset,
dump,
"cduc-num-pf-pages",
- clients
- [ILT_CLI_CDUC].pf_total_lines);
+ clients[ILT_CLI_CDUC].pf_total_lines);
offset += qed_dump_num_param(dump_buf + offset,
dump,
"cduc-num-vf-pages",
- clients
- [ILT_CLI_CDUC].vf_total_lines);
+ clients[ILT_CLI_CDUC].vf_total_lines);
offset += qed_dump_num_param(dump_buf + offset,
dump,
"max-conn-ctx-size",
conn_ctx_size);
offset += qed_dump_num_param(dump_buf + offset,
dump,
- "cdut-page-size", cdut_page_size);
+ "cdut-page-size",
+ cdut_page_size);
offset += qed_dump_num_param(dump_buf + offset,
dump,
"cdut-first-page-id",
@@ -4711,19 +4852,16 @@ static u32 qed_ilt_dump(struct qed_hwfn *p_hwfn,
p_hwfn->p_cxt_mngr->task_ctx_size);
offset += qed_dump_num_param(dump_buf + offset,
dump,
- "task-type-id",
- p_hwfn->p_cxt_mngr->task_type_id);
- offset += qed_dump_num_param(dump_buf + offset,
- dump,
"first-vf-id-in-pf",
p_hwfn->p_cxt_mngr->first_vf_in_pf);
- offset += /* 18 */ qed_dump_num_param(dump_buf + offset,
- dump,
- "num-vfs-in-pf",
- p_hwfn->p_cxt_mngr->vf_count);
offset += qed_dump_num_param(dump_buf + offset,
dump,
- "ptr-size-bytes", sizeof(void *));
+ "num-vfs-in-pf",
+ p_hwfn->p_cxt_mngr->vf_count);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "ptr-size-bytes",
+ sizeof(void *));
offset += qed_dump_num_param(dump_buf + offset,
dump,
"pf-start-line",
@@ -4736,58 +4874,281 @@ static u32 qed_ilt_dump(struct qed_hwfn *p_hwfn,
dump,
"ilt-shadow-size",
p_hwfn->p_cxt_mngr->ilt_shadow_size);
+
+ *full_dump_size_param_offset = offset;
+
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump, "dump-size-full", 0);
+
+ *actual_dump_size_param_offset = offset;
+
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "dump-size-actual", 0);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "iscsi_task_pages",
+ p_hwfn->p_cxt_mngr->iscsi_task_pages);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "fcoe_task_pages",
+ p_hwfn->p_cxt_mngr->fcoe_task_pages);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "roce_task_pages",
+ p_hwfn->p_cxt_mngr->roce_task_pages);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "eth_task_pages",
+ p_hwfn->p_cxt_mngr->eth_task_pages);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "src-first-page-id",
+ clients[ILT_CLI_SRC].first.val);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "src-last-page-id",
+ clients[ILT_CLI_SRC].last.val);
+ offset += qed_dump_num_param(dump_buf + offset,
+ dump,
+ "src-is-active",
+ clients[ILT_CLI_SRC].active);
+
/* Additional/Less parameters require matching of number in call to
* dump_common_global_params()
*/
- /* Dump section containing number of PF CIDs per connection type */
+ return offset;
+}
+
+/* Dump section containing number of PF CIDs per connection type.
+ * Part of ilt dump process.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_ilt_dump_dump_num_pf_cids(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ bool dump, u32 *valid_conn_pf_cids)
+{
+ u32 num_pf_cids = 0;
+ u32 offset = 0;
+ u8 conn_type;
+
offset += qed_dump_section_hdr(dump_buf + offset,
dump, "num_pf_cids_per_conn_type", 1);
offset += qed_dump_num_param(dump_buf + offset,
- dump, "size", NUM_OF_CONNECTION_TYPES_E4);
- for (conn_type = 0, valid_conn_pf_cids = 0;
- conn_type < NUM_OF_CONNECTION_TYPES_E4; conn_type++, offset++) {
- u32 num_pf_cids =
- p_hwfn->p_cxt_mngr->conn_cfg[conn_type].cid_count;
-
+ dump, "size", NUM_OF_CONNECTION_TYPES);
+ for (conn_type = 0, *valid_conn_pf_cids = 0;
+ conn_type < NUM_OF_CONNECTION_TYPES; conn_type++, offset++) {
+ num_pf_cids = p_hwfn->p_cxt_mngr->conn_cfg[conn_type].cid_count;
if (dump)
*(dump_buf + offset) = num_pf_cids;
- valid_conn_pf_cids += num_pf_cids;
+ *valid_conn_pf_cids += num_pf_cids;
}
- /* Dump section containing number of VF CIDs per connection type */
- offset += qed_dump_section_hdr(dump_buf + offset,
- dump, "num_vf_cids_per_conn_type", 1);
+ return offset;
+}
+
+/* Dump section containing number of VF CIDs per connection type
+ * Part of ilt dump process.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_ilt_dump_dump_num_vf_cids(struct qed_hwfn *p_hwfn,
+ u32 *dump_buf,
+ bool dump, u32 *valid_conn_vf_cids)
+{
+ u32 num_vf_cids = 0;
+ u32 offset = 0;
+ u8 conn_type;
+
+ offset += qed_dump_section_hdr(dump_buf + offset, dump,
+ "num_vf_cids_per_conn_type", 1);
offset += qed_dump_num_param(dump_buf + offset,
- dump, "size", NUM_OF_CONNECTION_TYPES_E4);
- for (conn_type = 0, valid_conn_vf_cids = 0;
- conn_type < NUM_OF_CONNECTION_TYPES_E4; conn_type++, offset++) {
- u32 num_vf_cids =
+ dump, "size", NUM_OF_CONNECTION_TYPES);
+ for (conn_type = 0, *valid_conn_vf_cids = 0;
+ conn_type < NUM_OF_CONNECTION_TYPES; conn_type++, offset++) {
+ num_vf_cids =
p_hwfn->p_cxt_mngr->conn_cfg[conn_type].cids_per_vf;
-
if (dump)
*(dump_buf + offset) = num_vf_cids;
- valid_conn_vf_cids += num_vf_cids;
+ *valid_conn_vf_cids += num_vf_cids;
+ }
+
+ return offset;
+}
+
+/* Performs ILT Dump to the specified buffer.
+ * buf_size_in_dwords - The dumped buffer size.
+ * Returns the dumped size in dwords.
+ */
+static u32 qed_ilt_dump(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u32 *dump_buf, u32 buf_size_in_dwords, bool dump)
+{
+#if ((!defined VMWARE) && (!defined UEFI))
+ struct qed_ilt_client_cfg *clients = p_hwfn->p_cxt_mngr->clients;
+#endif
+ u32 valid_conn_vf_cids = 0,
+ valid_conn_vf_pages, offset = 0, real_dumped_size = 0;
+ u32 valid_conn_pf_cids = 0, valid_conn_pf_pages, num_pages;
+ u32 num_cids_per_page, conn_ctx_size;
+ u32 cduc_page_size, cdut_page_size;
+ u32 actual_dump_size_in_dwords = 0;
+ struct phys_mem_desc *ilt_pages;
+ u32 actul_dump_off = 0;
+ u32 last_section_size;
+ u32 full_dump_off = 0;
+ u32 section_size = 0;
+ bool continue_dump;
+ u32 page_id;
+
+ last_section_size = qed_dump_last_section(NULL, 0, false);
+ cduc_page_size = 1 <<
+ (clients[ILT_CLI_CDUC].p_size.val + PXP_ILT_PAGE_SIZE_NUM_BITS_MIN);
+ cdut_page_size = 1 <<
+ (clients[ILT_CLI_CDUT].p_size.val + PXP_ILT_PAGE_SIZE_NUM_BITS_MIN);
+ conn_ctx_size = p_hwfn->p_cxt_mngr->conn_ctx_size;
+ num_cids_per_page = (int)(cduc_page_size / conn_ctx_size);
+ ilt_pages = p_hwfn->p_cxt_mngr->ilt_shadow;
+ continue_dump = dump;
+
+ /* if need to dump then save memory for the last section
+ * (last section calculates CRC of dumped data)
+ */
+ if (dump) {
+ if (buf_size_in_dwords >= last_section_size) {
+ buf_size_in_dwords -= last_section_size;
+ } else {
+ continue_dump = false;
+ actual_dump_size_in_dwords = offset;
+ }
}
- /* Dump section containing physical memory descs for each ILT page */
+ /* Dump global params */
+
+ /* if need to dump then first check that there is enough memory
+ * in dumped buffer for this section calculate the size of this
+ * section without dumping. if there is not enough memory - then
+ * stop the dumping.
+ */
+ if (continue_dump) {
+ section_size =
+ qed_ilt_dump_dump_common_global_params(p_hwfn,
+ p_ptt,
+ NULL,
+ false,
+ cduc_page_size,
+ conn_ctx_size,
+ cdut_page_size,
+ &full_dump_off,
+ &actul_dump_off);
+ if (offset + section_size > buf_size_in_dwords) {
+ continue_dump = false;
+ actual_dump_size_in_dwords = offset;
+ }
+ }
+
+ offset += qed_ilt_dump_dump_common_global_params(p_hwfn,
+ p_ptt,
+ dump_buf + offset,
+ continue_dump,
+ cduc_page_size,
+ conn_ctx_size,
+ cdut_page_size,
+ &full_dump_off,
+ &actul_dump_off);
+
+ /* Dump section containing number of PF CIDs per connection type
+ * If need to dump then first check that there is enough memory in
+ * dumped buffer for this section.
+ */
+ if (continue_dump) {
+ section_size =
+ qed_ilt_dump_dump_num_pf_cids(p_hwfn,
+ NULL,
+ false,
+ &valid_conn_pf_cids);
+ if (offset + section_size > buf_size_in_dwords) {
+ continue_dump = false;
+ actual_dump_size_in_dwords = offset;
+ }
+ }
+
+ offset += qed_ilt_dump_dump_num_pf_cids(p_hwfn,
+ dump_buf + offset,
+ continue_dump,
+ &valid_conn_pf_cids);
+
+ /* Dump section containing number of VF CIDs per connection type
+ * If need to dump then first check that there is enough memory in
+ * dumped buffer for this section.
+ */
+ if (continue_dump) {
+ section_size =
+ qed_ilt_dump_dump_num_vf_cids(p_hwfn,
+ NULL,
+ false,
+ &valid_conn_vf_cids);
+ if (offset + section_size > buf_size_in_dwords) {
+ continue_dump = false;
+ actual_dump_size_in_dwords = offset;
+ }
+ }
+
+ offset += qed_ilt_dump_dump_num_vf_cids(p_hwfn,
+ dump_buf + offset,
+ continue_dump,
+ &valid_conn_vf_cids);
+
+ /* Dump section containing physical memory descriptors for each
+ * ILT page.
+ */
num_pages = p_hwfn->p_cxt_mngr->ilt_shadow_size;
+
+ /* If need to dump then first check that there is enough memory
+ * in dumped buffer for the section header.
+ */
+ if (continue_dump) {
+ section_size = qed_dump_section_hdr(NULL,
+ false,
+ "ilt_page_desc",
+ 1) +
+ qed_dump_num_param(NULL,
+ false,
+ "size",
+ num_pages * PAGE_MEM_DESC_SIZE_DWORDS);
+ if (offset + section_size > buf_size_in_dwords) {
+ continue_dump = false;
+ actual_dump_size_in_dwords = offset;
+ }
+ }
+
offset += qed_dump_section_hdr(dump_buf + offset,
- dump, "ilt_page_desc", 1);
+ continue_dump, "ilt_page_desc", 1);
offset += qed_dump_num_param(dump_buf + offset,
- dump,
+ continue_dump,
"size",
num_pages * PAGE_MEM_DESC_SIZE_DWORDS);
- /* Copy memory descriptors to dump buffer */
- if (dump) {
- u32 page_id;
-
+ /* Copy memory descriptors to dump buffer
+ * If need to dump then dump till the dump buffer size
+ */
+ if (continue_dump) {
for (page_id = 0; page_id < num_pages;
- page_id++, offset += PAGE_MEM_DESC_SIZE_DWORDS)
- memcpy(dump_buf + offset,
- &ilt_pages[page_id],
- DWORDS_TO_BYTES(PAGE_MEM_DESC_SIZE_DWORDS));
+ page_id++, offset += PAGE_MEM_DESC_SIZE_DWORDS) {
+ if (continue_dump &&
+ (offset + PAGE_MEM_DESC_SIZE_DWORDS <=
+ buf_size_in_dwords)) {
+ memcpy(dump_buf + offset,
+ &ilt_pages[page_id],
+ DWORDS_TO_BYTES
+ (PAGE_MEM_DESC_SIZE_DWORDS));
+ } else {
+ if (continue_dump) {
+ continue_dump = false;
+ actual_dump_size_in_dwords = offset;
+ }
+ }
+ }
} else {
offset += num_pages * PAGE_MEM_DESC_SIZE_DWORDS;
}
@@ -4798,25 +5159,31 @@ static u32 qed_ilt_dump(struct qed_hwfn *p_hwfn,
num_cids_per_page);
/* Dump ILT pages IDs */
- offset += qed_ilt_dump_pages_section(p_hwfn,
- dump_buf + offset,
- dump,
- valid_conn_pf_pages,
- valid_conn_vf_pages,
- ilt_pages, true);
+ qed_ilt_dump_pages_section(p_hwfn, dump_buf, &offset, &continue_dump,
+ valid_conn_pf_pages, valid_conn_vf_pages,
+ ilt_pages, true, buf_size_in_dwords,
+ &actual_dump_size_in_dwords);
/* Dump ILT pages memory */
- offset += qed_ilt_dump_pages_section(p_hwfn,
- dump_buf + offset,
- dump,
- valid_conn_pf_pages,
- valid_conn_vf_pages,
- ilt_pages, false);
+ qed_ilt_dump_pages_section(p_hwfn, dump_buf, &offset, &continue_dump,
+ valid_conn_pf_pages, valid_conn_vf_pages,
+ ilt_pages, false, buf_size_in_dwords,
+ &actual_dump_size_in_dwords);
+
+ real_dumped_size =
+ (continue_dump == dump) ? offset : actual_dump_size_in_dwords;
+ qed_dump_num_param(dump_buf + full_dump_off, dump,
+ "full-dump-size", offset + last_section_size);
+ qed_dump_num_param(dump_buf + actul_dump_off,
+ dump,
+ "actual-dump-size",
+ real_dumped_size + last_section_size);
/* Dump last section */
- offset += qed_dump_last_section(dump_buf, offset, dump);
+ real_dumped_size += qed_dump_last_section(dump_buf,
+ real_dumped_size, dump);
- return offset;
+ return real_dumped_size;
}
/***************************** Public Functions *******************************/
@@ -4837,6 +5204,16 @@ enum dbg_status qed_dbg_set_bin_ptr(struct qed_hwfn *p_hwfn,
return DBG_STATUS_OK;
}
+static enum dbg_status qed_dbg_set_app_ver(u32 ver)
+{
+ if (ver < TOOLS_VERSION)
+ return DBG_STATUS_UNSUPPORTED_APP_VERSION;
+
+ s_app_ver = ver;
+
+ return DBG_STATUS_OK;
+}
+
bool qed_read_fw_info(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, struct fw_info *fw_info)
{
@@ -4975,6 +5352,9 @@ enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn,
if (buf_size_in_dwords < needed_buf_size_in_dwords)
return DBG_STATUS_DUMP_BUF_TOO_SMALL;
+ /* Doesn't do anything, needed for compile time asserts */
+ qed_static_asserts();
+
/* GRC Dump */
status = qed_grc_dump(p_hwfn, p_ptt, dump_buf, true, num_dumped_dwords);
@@ -5296,7 +5676,7 @@ static enum dbg_status qed_dbg_ilt_get_dump_buf_size(struct qed_hwfn *p_hwfn,
if (status != DBG_STATUS_OK)
return status;
- *buf_size = qed_ilt_dump(p_hwfn, p_ptt, NULL, false);
+ *buf_size = qed_ilt_dump(p_hwfn, p_ptt, NULL, 0, false);
return DBG_STATUS_OK;
}
@@ -5307,21 +5687,9 @@ static enum dbg_status qed_dbg_ilt_dump(struct qed_hwfn *p_hwfn,
u32 buf_size_in_dwords,
u32 *num_dumped_dwords)
{
- u32 needed_buf_size_in_dwords;
- enum dbg_status status;
-
- *num_dumped_dwords = 0;
-
- status = qed_dbg_ilt_get_dump_buf_size(p_hwfn,
- p_ptt,
- &needed_buf_size_in_dwords);
- if (status != DBG_STATUS_OK)
- return status;
-
- if (buf_size_in_dwords < needed_buf_size_in_dwords)
- return DBG_STATUS_DUMP_BUF_TOO_SMALL;
-
- *num_dumped_dwords = qed_ilt_dump(p_hwfn, p_ptt, dump_buf, true);
+ *num_dumped_dwords = qed_ilt_dump(p_hwfn,
+ p_ptt,
+ dump_buf, buf_size_in_dwords, true);
/* Reveret GRC params to their default */
qed_dbg_grc_set_params_default(p_hwfn);
@@ -5724,7 +6092,46 @@ static const char * const s_status_str[] = {
"The configured filter mode requires that all the constraints of a single trigger state will be defined on a single Storm/block input",
/* DBG_STATUS_MISSING_TRIGGER_STATE_STORM */
- "When triggering on Storm data, the Storm to trigger on must be specified"
+ "When triggering on Storm data, the Storm to trigger on must be specified",
+
+ /* DBG_STATUS_MDUMP2_FAILED_TO_REQUEST_OFFSIZE */
+ "Failed to request MDUMP2 Offsize",
+
+ /* DBG_STATUS_MDUMP2_FAILED_VALIDATION_OF_DATA_CRC */
+ "Expected CRC (part of the MDUMP2 data) is different than the calculated CRC over that data",
+
+ /* DBG_STATUS_MDUMP2_INVALID_SIGNATURE */
+ "Invalid Signature found at start of MDUMP2",
+
+ /* DBG_STATUS_MDUMP2_INVALID_LOG_SIZE */
+ "Invalid Log Size of MDUMP2",
+
+ /* DBG_STATUS_MDUMP2_INVALID_LOG_HDR */
+ "Invalid Log Header of MDUMP2",
+
+ /* DBG_STATUS_MDUMP2_INVALID_LOG_DATA */
+ "Invalid Log Data of MDUMP2",
+
+ /* DBG_STATUS_MDUMP2_ERROR_EXTRACTING_NUM_PORTS */
+ "Could not extract number of ports from regval buf of MDUMP2",
+
+ /* DBG_STATUS_MDUMP2_ERROR_EXTRACTING_MFW_STATUS */
+ "Could not extract MFW (link) status from regval buf of MDUMP2",
+
+ /* DBG_STATUS_MDUMP2_ERROR_DISPLAYING_LINKDUMP */
+ "Could not display linkdump of MDUMP2",
+
+ /* DBG_STATUS_MDUMP2_ERROR_READING_PHY_CFG */
+ "Could not read PHY CFG of MDUMP2",
+
+ /* DBG_STATUS_MDUMP2_ERROR_READING_PLL_MODE */
+ "Could not read PLL Mode of MDUMP2",
+
+ /* DBG_STATUS_MDUMP2_ERROR_READING_LANE_REGS */
+ "Could not read TSCF/TSCE Lane Regs of MDUMP2",
+
+ /* DBG_STATUS_MDUMP2_ERROR_ALLOCATING_BUF */
+ "Could not allocate MDUMP2 reg-val internal buffer"
};
/* Idle check severity names array */
@@ -5874,6 +6281,10 @@ static char s_temp_buf[MAX_MSG_LEN];
/**************************** Private Functions ******************************/
+static void qed_user_static_asserts(void)
+{
+}
+
static u32 qed_cyclic_add(u32 a, u32 b, u32 size)
{
return (a + b) % size;
@@ -6153,9 +6564,8 @@ static u32 qed_parse_idle_chk_dump_rules(struct qed_hwfn *p_hwfn,
/* Skip register names until the required reg_id is
* reached.
*/
- for (; reg_id > curr_reg_id;
- curr_reg_id++,
- parsing_str += strlen(parsing_str) + 1);
+ for (; reg_id > curr_reg_id; curr_reg_id++)
+ parsing_str += strlen(parsing_str) + 1;
results_offset +=
sprintf(qed_get_buf_ptr(results_buf,
@@ -6208,9 +6618,9 @@ static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn,
u32 *num_errors,
u32 *num_warnings)
{
+ u32 num_section_params = 0, num_rules, num_rules_not_dumped;
const char *section_name, *param_name, *param_str_val;
u32 *dump_buf_end = dump_buf + num_dumped_dwords;
- u32 num_section_params = 0, num_rules;
/* Offset in results_buf in bytes */
u32 results_offset = 0;
@@ -6234,15 +6644,31 @@ static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn,
num_section_params,
results_buf, &results_offset);
- /* Read idle_chk section */
+ /* Read idle_chk section
+ * There may be 1 or 2 idle_chk section parameters:
+ * - 1st is "num_rules"
+ * - 2nd is "num_rules_not_dumped" (optional)
+ */
+
dump_buf += qed_read_section_hdr(dump_buf,
&section_name, &num_section_params);
- if (strcmp(section_name, "idle_chk") || num_section_params != 1)
+ if (strcmp(section_name, "idle_chk") ||
+ (num_section_params != 2 && num_section_params != 1))
return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
dump_buf += qed_read_param(dump_buf,
&param_name, &param_str_val, &num_rules);
if (strcmp(param_name, "num_rules"))
return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
+ if (num_section_params > 1) {
+ dump_buf += qed_read_param(dump_buf,
+ &param_name,
+ &param_str_val,
+ &num_rules_not_dumped);
+ if (strcmp(param_name, "num_rules_not_dumped"))
+ return DBG_STATUS_IDLE_CHK_PARSE_FAILED;
+ } else {
+ num_rules_not_dumped = 0;
+ }
if (num_rules) {
u32 rules_print_size;
@@ -6309,6 +6735,13 @@ static enum dbg_status qed_parse_idle_chk_dump(struct qed_hwfn *p_hwfn,
results_offset),
"\nIdle Check completed successfully\n");
+ if (num_rules_not_dumped)
+ results_offset +=
+ sprintf(qed_get_buf_ptr(results_buf,
+ results_offset),
+ "\nIdle Check Partially dumped : num_rules_not_dumped = %d\n",
+ num_rules_not_dumped);
+
/* Add 1 for string NULL termination */
*parsed_results_bytes = results_offset + 1;
@@ -7160,6 +7593,9 @@ enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
{
u32 parsed_buf_size;
+ /* Doesn't do anything, needed for compile time asserts */
+ qed_user_static_asserts();
+
return qed_parse_mcp_trace_dump(p_hwfn,
dump_buf,
results_buf, &parsed_buf_size, true);
@@ -7336,7 +7772,7 @@ enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
reg_result->block_attn_offset;
/* Go over attention status bits */
- for (j = 0; j < num_reg_attn; j++, bit_idx++) {
+ for (j = 0; j < num_reg_attn; j++) {
u16 attn_idx_val = GET_FIELD(bit_mapping[j].data,
DBG_ATTN_BIT_MAPPING_VAL);
const char *attn_name, *attn_type_str, *masked_str;
@@ -7353,35 +7789,36 @@ enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
}
/* Check current bit index */
- if (!(reg_result->sts_val & BIT(bit_idx)))
- continue;
+ if (reg_result->sts_val & BIT(bit_idx)) {
+ /* An attention bit with value=1 was found
+ * Find attention name
+ */
+ attn_name_offset =
+ block_attn_name_offsets[attn_idx_val];
+ attn_name = attn_name_base + attn_name_offset;
+ attn_type_str =
+ (attn_type ==
+ ATTN_TYPE_INTERRUPT ? "Interrupt" :
+ "Parity");
+ masked_str = reg_result->mask_val &
+ BIT(bit_idx) ?
+ " [masked]" : "";
+ sts_addr =
+ GET_FIELD(reg_result->data,
+ DBG_ATTN_REG_RESULT_STS_ADDRESS);
+ DP_NOTICE(p_hwfn,
+ "%s (%s) : %s [address 0x%08x, bit %d]%s\n",
+ block_name, attn_type_str, attn_name,
+ sts_addr * 4, bit_idx, masked_str);
+ }
- /* An attention bit with value=1 was found
- * Find attention name
- */
- attn_name_offset =
- block_attn_name_offsets[attn_idx_val];
- attn_name = attn_name_base + attn_name_offset;
- attn_type_str =
- (attn_type ==
- ATTN_TYPE_INTERRUPT ? "Interrupt" :
- "Parity");
- masked_str = reg_result->mask_val & BIT(bit_idx) ?
- " [masked]" : "";
- sts_addr = GET_FIELD(reg_result->data,
- DBG_ATTN_REG_RESULT_STS_ADDRESS);
- DP_NOTICE(p_hwfn,
- "%s (%s) : %s [address 0x%08x, bit %d]%s\n",
- block_name, attn_type_str, attn_name,
- sts_addr * 4, bit_idx, masked_str);
+ bit_idx++;
}
}
return DBG_STATUS_OK;
}
-static DEFINE_MUTEX(qed_dbg_lock);
-
/* Wrapper for unifying the idle_chk and mcp_trace api */
static enum dbg_status
qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn,
@@ -7396,9 +7833,26 @@ qed_print_idle_chk_results_wrapper(struct qed_hwfn *p_hwfn,
&num_warnnings);
}
+static DEFINE_MUTEX(qed_dbg_lock);
+
+#define MAX_PHY_RESULT_BUFFER 9000
+
+/******************************** Feature Meta data section ******************/
+
+#define GRC_NUM_STR_FUNCS 2
+#define IDLE_CHK_NUM_STR_FUNCS 1
+#define MCP_TRACE_NUM_STR_FUNCS 1
+#define REG_FIFO_NUM_STR_FUNCS 1
+#define IGU_FIFO_NUM_STR_FUNCS 1
+#define PROTECTION_OVERRIDE_NUM_STR_FUNCS 1
+#define FW_ASSERTS_NUM_STR_FUNCS 1
+#define ILT_NUM_STR_FUNCS 1
+#define PHY_NUM_STR_FUNCS 20
+
/* Feature meta data lookup table */
static struct {
char *name;
+ u32 num_funcs;
enum dbg_status (*get_size)(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *size);
enum dbg_status (*perform_dump)(struct qed_hwfn *p_hwfn,
@@ -7411,40 +7865,46 @@ static struct {
u32 *dump_buf,
u32 num_dumped_dwords,
u32 *results_buf_size);
+ const struct qed_func_lookup *hsi_func_lookup;
} qed_features_lookup[] = {
{
- "grc", qed_dbg_grc_get_dump_buf_size,
- qed_dbg_grc_dump, NULL, NULL}, {
- "idle_chk",
+ "grc", GRC_NUM_STR_FUNCS, qed_dbg_grc_get_dump_buf_size,
+ qed_dbg_grc_dump, NULL, NULL, NULL}, {
+ "idle_chk", IDLE_CHK_NUM_STR_FUNCS,
qed_dbg_idle_chk_get_dump_buf_size,
qed_dbg_idle_chk_dump,
qed_print_idle_chk_results_wrapper,
- qed_get_idle_chk_results_buf_size}, {
- "mcp_trace",
+ qed_get_idle_chk_results_buf_size,
+ NULL}, {
+ "mcp_trace", MCP_TRACE_NUM_STR_FUNCS,
qed_dbg_mcp_trace_get_dump_buf_size,
qed_dbg_mcp_trace_dump, qed_print_mcp_trace_results,
- qed_get_mcp_trace_results_buf_size}, {
- "reg_fifo",
+ qed_get_mcp_trace_results_buf_size,
+ NULL}, {
+ "reg_fifo", REG_FIFO_NUM_STR_FUNCS,
qed_dbg_reg_fifo_get_dump_buf_size,
qed_dbg_reg_fifo_dump, qed_print_reg_fifo_results,
- qed_get_reg_fifo_results_buf_size}, {
- "igu_fifo",
+ qed_get_reg_fifo_results_buf_size,
+ NULL}, {
+ "igu_fifo", IGU_FIFO_NUM_STR_FUNCS,
qed_dbg_igu_fifo_get_dump_buf_size,
qed_dbg_igu_fifo_dump, qed_print_igu_fifo_results,
- qed_get_igu_fifo_results_buf_size}, {
- "protection_override",
+ qed_get_igu_fifo_results_buf_size,
+ NULL}, {
+ "protection_override", PROTECTION_OVERRIDE_NUM_STR_FUNCS,
qed_dbg_protection_override_get_dump_buf_size,
qed_dbg_protection_override_dump,
qed_print_protection_override_results,
- qed_get_protection_override_results_buf_size}, {
- "fw_asserts",
+ qed_get_protection_override_results_buf_size,
+ NULL}, {
+ "fw_asserts", FW_ASSERTS_NUM_STR_FUNCS,
qed_dbg_fw_asserts_get_dump_buf_size,
qed_dbg_fw_asserts_dump,
qed_print_fw_asserts_results,
- qed_get_fw_asserts_results_buf_size}, {
- "ilt",
- qed_dbg_ilt_get_dump_buf_size,
- qed_dbg_ilt_dump, NULL, NULL},};
+ qed_get_fw_asserts_results_buf_size,
+ NULL}, {
+ "ilt", ILT_NUM_STR_FUNCS, qed_dbg_ilt_get_dump_buf_size,
+ qed_dbg_ilt_dump, NULL, NULL, NULL},};
static void qed_dbg_print_feature(u8 *p_text_buf, u32 text_size)
{
@@ -7466,7 +7926,8 @@ static enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
{
struct qed_dbg_feature *feature =
&p_hwfn->cdev->dbg_features[feature_idx];
- u32 text_size_bytes, null_char_pos, i;
+ u32 txt_size_bytes, null_char_pos, i;
+ u32 *dbuf, dwords;
enum dbg_status rc;
char *text_buf;
@@ -7474,33 +7935,43 @@ static enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
if (!qed_features_lookup[feature_idx].results_buf_size)
return DBG_STATUS_OK;
+ dbuf = (u32 *)feature->dump_buf;
+ dwords = feature->dumped_dwords;
+
/* Obtain size of formatted output */
- rc = qed_features_lookup[feature_idx].
- results_buf_size(p_hwfn, (u32 *)feature->dump_buf,
- feature->dumped_dwords, &text_size_bytes);
+ rc = qed_features_lookup[feature_idx].results_buf_size(p_hwfn,
+ dbuf,
+ dwords,
+ &txt_size_bytes);
if (rc != DBG_STATUS_OK)
return rc;
- /* Make sure that the allocated size is a multiple of dword (4 bytes) */
- null_char_pos = text_size_bytes - 1;
- text_size_bytes = (text_size_bytes + 3) & ~0x3;
+ /* Make sure that the allocated size is a multiple of dword
+ * (4 bytes).
+ */
+ null_char_pos = txt_size_bytes - 1;
+ txt_size_bytes = (txt_size_bytes + 3) & ~0x3;
- if (text_size_bytes < QED_RESULTS_BUF_MIN_SIZE) {
+ if (txt_size_bytes < QED_RESULTS_BUF_MIN_SIZE) {
DP_NOTICE(p_hwfn->cdev,
"formatted size of feature was too small %d. Aborting\n",
- text_size_bytes);
+ txt_size_bytes);
return DBG_STATUS_INVALID_ARGS;
}
- /* Allocate temp text buf */
- text_buf = vzalloc(text_size_bytes);
- if (!text_buf)
+ /* allocate temp text buf */
+ text_buf = vzalloc(txt_size_bytes);
+ if (!text_buf) {
+ DP_NOTICE(p_hwfn->cdev,
+ "failed to allocate text buffer. Aborting\n");
return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
+ }
/* Decode feature opcodes to string on temp buf */
- rc = qed_features_lookup[feature_idx].
- print_results(p_hwfn, (u32 *)feature->dump_buf,
- feature->dumped_dwords, text_buf);
+ rc = qed_features_lookup[feature_idx].print_results(p_hwfn,
+ dbuf,
+ dwords,
+ text_buf);
if (rc != DBG_STATUS_OK) {
vfree(text_buf);
return rc;
@@ -7510,26 +7981,27 @@ static enum dbg_status format_feature(struct qed_hwfn *p_hwfn,
* The bytes that were added as a result of the dword alignment are also
* padded with '\n' characters.
*/
- for (i = null_char_pos; i < text_size_bytes; i++)
+ for (i = null_char_pos; i < txt_size_bytes; i++)
text_buf[i] = '\n';
/* Dump printable feature to log */
if (p_hwfn->cdev->print_dbg_data)
- qed_dbg_print_feature(text_buf, text_size_bytes);
+ qed_dbg_print_feature(text_buf, txt_size_bytes);
- /* Just return the original binary buffer if requested */
+ /* Dump binary data as is to the output file */
if (p_hwfn->cdev->dbg_bin_dump) {
vfree(text_buf);
- return DBG_STATUS_OK;
+ return rc;
}
- /* Free the old dump_buf and point the dump_buf to the newly allocagted
+ /* Free the old dump_buf and point the dump_buf to the newly allocated
* and formatted text buffer.
*/
vfree(feature->dump_buf);
feature->dump_buf = text_buf;
- feature->buf_size = text_size_bytes;
- feature->dumped_dwords = text_size_bytes / 4;
+ feature->buf_size = txt_size_bytes;
+ feature->dumped_dwords = txt_size_bytes / 4;
+
return rc;
}
@@ -7542,7 +8014,7 @@ static enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn,
{
struct qed_dbg_feature *feature =
&p_hwfn->cdev->dbg_features[feature_idx];
- u32 buf_size_dwords;
+ u32 buf_size_dwords, *dbuf, *dwords;
enum dbg_status rc;
DP_NOTICE(p_hwfn->cdev, "Collecting a debug feature [\"%s\"]\n",
@@ -7580,13 +8052,16 @@ static enum dbg_status qed_dbg_dump(struct qed_hwfn *p_hwfn,
if (!feature->dump_buf)
return DBG_STATUS_VIRT_MEM_ALLOC_FAILED;
- rc = qed_features_lookup[feature_idx].
- perform_dump(p_hwfn, p_ptt, (u32 *)feature->dump_buf,
- feature->buf_size / sizeof(u32),
- &feature->dumped_dwords);
+ dbuf = (u32 *)feature->dump_buf;
+ dwords = &feature->dumped_dwords;
+ rc = qed_features_lookup[feature_idx].perform_dump(p_hwfn, p_ptt,
+ dbuf,
+ feature->buf_size /
+ sizeof(u32),
+ dwords);
/* If mcp is stuck we get DBG_STATUS_NVRAM_GET_IMAGE_FAILED error.
- * In this case the buffer holds valid binary data, but we wont able
+ * In this case the buffer holds valid binary data, but we won't able
* to parse it (since parsing relies on data in NVRAM which is only
* accessible when MFW is responsive). skip the formatting but return
* success so that binary data is provided.
@@ -7777,7 +8252,8 @@ enum debug_print_features {
static u32 qed_calc_regdump_header(struct qed_dev *cdev,
enum debug_print_features feature,
- int engine, u32 feature_size, u8 omit_engine)
+ int engine, u32 feature_size,
+ u8 omit_engine, u8 dbg_bin_dump)
{
u32 res = 0;
@@ -7788,7 +8264,7 @@ static u32 qed_calc_regdump_header(struct qed_dev *cdev,
feature, feature_size);
SET_FIELD(res, REGDUMP_HEADER_FEATURE, feature);
- SET_FIELD(res, REGDUMP_HEADER_BIN_DUMP, 1);
+ SET_FIELD(res, REGDUMP_HEADER_BIN_DUMP, dbg_bin_dump);
SET_FIELD(res, REGDUMP_HEADER_OMIT_ENGINE, omit_engine);
SET_FIELD(res, REGDUMP_HEADER_ENGINE, engine);
@@ -7798,12 +8274,10 @@ static u32 qed_calc_regdump_header(struct qed_dev *cdev,
int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
{
u8 cur_engine, omit_engine = 0, org_engine;
- struct qed_hwfn *p_hwfn =
- &cdev->hwfns[cdev->engine_for_debug];
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[cdev->engine_for_debug];
struct dbg_tools_data *dev_data = &p_hwfn->dbg_info;
- int grc_params[MAX_DBG_GRC_PARAMS], i;
+ int grc_params[MAX_DBG_GRC_PARAMS], rc, i;
u32 offset = 0, feature_size;
- int rc;
for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
grc_params[i] = dev_data->grc.param_val[i];
@@ -7811,8 +8285,8 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
if (!QED_IS_CMT(cdev))
omit_engine = 1;
+ cdev->dbg_bin_dump = 1;
mutex_lock(&qed_dbg_lock);
- cdev->dbg_bin_dump = true;
org_engine = qed_get_debug_engine(cdev);
for (cur_engine = 0; cur_engine < cdev->num_hwfns; cur_engine++) {
@@ -7826,8 +8300,11 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
REGDUMP_HEADER_SIZE, &feature_size);
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
- qed_calc_regdump_header(cdev, IDLE_CHK, cur_engine,
- feature_size, omit_engine);
+ qed_calc_regdump_header(cdev, IDLE_CHK,
+ cur_engine,
+ feature_size,
+ omit_engine,
+ cdev->dbg_bin_dump);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else {
DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc);
@@ -7838,8 +8315,11 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
REGDUMP_HEADER_SIZE, &feature_size);
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
- qed_calc_regdump_header(cdev, IDLE_CHK, cur_engine,
- feature_size, omit_engine);
+ qed_calc_regdump_header(cdev, IDLE_CHK,
+ cur_engine,
+ feature_size,
+ omit_engine,
+ cdev->dbg_bin_dump);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else {
DP_ERR(cdev, "qed_dbg_idle_chk failed. rc = %d\n", rc);
@@ -7850,8 +8330,11 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
REGDUMP_HEADER_SIZE, &feature_size);
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
- qed_calc_regdump_header(cdev, REG_FIFO, cur_engine,
- feature_size, omit_engine);
+ qed_calc_regdump_header(cdev, REG_FIFO,
+ cur_engine,
+ feature_size,
+ omit_engine,
+ cdev->dbg_bin_dump);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else {
DP_ERR(cdev, "qed_dbg_reg_fifo failed. rc = %d\n", rc);
@@ -7862,8 +8345,11 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
REGDUMP_HEADER_SIZE, &feature_size);
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
- qed_calc_regdump_header(cdev, IGU_FIFO, cur_engine,
- feature_size, omit_engine);
+ qed_calc_regdump_header(cdev, IGU_FIFO,
+ cur_engine,
+ feature_size,
+ omit_engine,
+ cdev->dbg_bin_dump);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else {
DP_ERR(cdev, "qed_dbg_igu_fifo failed. rc = %d", rc);
@@ -7875,9 +8361,12 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
&feature_size);
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
- qed_calc_regdump_header(cdev, PROTECTION_OVERRIDE,
+ qed_calc_regdump_header(cdev,
+ PROTECTION_OVERRIDE,
cur_engine,
- feature_size, omit_engine);
+ feature_size,
+ omit_engine,
+ cdev->dbg_bin_dump);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else {
DP_ERR(cdev,
@@ -7891,8 +8380,10 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
qed_calc_regdump_header(cdev, FW_ASSERTS,
- cur_engine, feature_size,
- omit_engine);
+ cur_engine,
+ feature_size,
+ omit_engine,
+ cdev->dbg_bin_dump);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else {
DP_ERR(cdev, "qed_dbg_fw_asserts failed. rc = %d\n",
@@ -7900,8 +8391,8 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
}
feature_size = qed_dbg_ilt_size(cdev);
- if (!cdev->disable_ilt_dump &&
- feature_size < ILT_DUMP_MAX_SIZE) {
+ if (!cdev->disable_ilt_dump && feature_size <
+ ILT_DUMP_MAX_SIZE) {
rc = qed_dbg_ilt(cdev, (u8 *)buffer + offset +
REGDUMP_HEADER_SIZE, &feature_size);
if (!rc) {
@@ -7909,15 +8400,16 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
qed_calc_regdump_header(cdev, ILT_DUMP,
cur_engine,
feature_size,
- omit_engine);
- offset += feature_size + REGDUMP_HEADER_SIZE;
+ omit_engine,
+ cdev->dbg_bin_dump);
+ offset += (feature_size + REGDUMP_HEADER_SIZE);
} else {
DP_ERR(cdev, "qed_dbg_ilt failed. rc = %d\n",
rc);
}
}
- /* GRC dump - must be last because when mcp stuck it will
+ /* Grc dump - must be last because when mcp stuck it will
* clutter idle_chk, reg_fifo, ...
*/
for (i = 0; i < MAX_DBG_GRC_PARAMS; i++)
@@ -7929,7 +8421,9 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
*(u32 *)((u8 *)buffer + offset) =
qed_calc_regdump_header(cdev, GRC_DUMP,
cur_engine,
- feature_size, omit_engine);
+ feature_size,
+ omit_engine,
+ cdev->dbg_bin_dump);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else {
DP_ERR(cdev, "qed_dbg_grc failed. rc = %d", rc);
@@ -7944,16 +8438,13 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
qed_calc_regdump_header(cdev, MCP_TRACE, cur_engine,
- feature_size, omit_engine);
+ feature_size, omit_engine,
+ cdev->dbg_bin_dump);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else {
DP_ERR(cdev, "qed_dbg_mcp_trace failed. rc = %d\n", rc);
}
- /* Re-populate nvm attribute info */
- qed_mcp_nvm_info_free(p_hwfn);
- qed_mcp_nvm_info_populate(p_hwfn);
-
/* nvm cfg1 */
rc = qed_dbg_nvm_image(cdev,
(u8 *)buffer + offset +
@@ -7962,43 +8453,51 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
qed_calc_regdump_header(cdev, NVM_CFG1, cur_engine,
- feature_size, omit_engine);
+ feature_size, omit_engine,
+ cdev->dbg_bin_dump);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else if (rc != -ENOENT) {
DP_ERR(cdev,
"qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
- QED_NVM_IMAGE_NVM_CFG1, "QED_NVM_IMAGE_NVM_CFG1", rc);
+ QED_NVM_IMAGE_NVM_CFG1, "QED_NVM_IMAGE_NVM_CFG1",
+ rc);
}
- /* nvm default */
+ /* nvm default */
rc = qed_dbg_nvm_image(cdev,
- (u8 *)buffer + offset + REGDUMP_HEADER_SIZE,
- &feature_size, QED_NVM_IMAGE_DEFAULT_CFG);
+ (u8 *)buffer + offset +
+ REGDUMP_HEADER_SIZE, &feature_size,
+ QED_NVM_IMAGE_DEFAULT_CFG);
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
- qed_calc_regdump_header(cdev, DEFAULT_CFG, cur_engine,
- feature_size, omit_engine);
+ qed_calc_regdump_header(cdev, DEFAULT_CFG,
+ cur_engine, feature_size,
+ omit_engine,
+ cdev->dbg_bin_dump);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else if (rc != -ENOENT) {
DP_ERR(cdev,
"qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
- QED_NVM_IMAGE_DEFAULT_CFG, "QED_NVM_IMAGE_DEFAULT_CFG",
- rc);
+ QED_NVM_IMAGE_DEFAULT_CFG,
+ "QED_NVM_IMAGE_DEFAULT_CFG", rc);
}
/* nvm meta */
rc = qed_dbg_nvm_image(cdev,
- (u8 *)buffer + offset + REGDUMP_HEADER_SIZE,
- &feature_size, QED_NVM_IMAGE_NVM_META);
+ (u8 *)buffer + offset +
+ REGDUMP_HEADER_SIZE, &feature_size,
+ QED_NVM_IMAGE_NVM_META);
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
- qed_calc_regdump_header(cdev, NVM_META, cur_engine,
- feature_size, omit_engine);
+ qed_calc_regdump_header(cdev, NVM_META, cur_engine,
+ feature_size, omit_engine,
+ cdev->dbg_bin_dump);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else if (rc != -ENOENT) {
DP_ERR(cdev,
"qed_dbg_nvm_image failed for image %d (%s), rc = %d\n",
- QED_NVM_IMAGE_NVM_META, "QED_NVM_IMAGE_NVM_META", rc);
+ QED_NVM_IMAGE_NVM_META, "QED_NVM_IMAGE_NVM_META",
+ rc);
}
/* nvm mdump */
@@ -8007,8 +8506,9 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
QED_NVM_IMAGE_MDUMP);
if (!rc) {
*(u32 *)((u8 *)buffer + offset) =
- qed_calc_regdump_header(cdev, MDUMP, cur_engine,
- feature_size, omit_engine);
+ qed_calc_regdump_header(cdev, MDUMP, cur_engine,
+ feature_size, omit_engine,
+ cdev->dbg_bin_dump);
offset += (feature_size + REGDUMP_HEADER_SIZE);
} else if (rc != -ENOENT) {
DP_ERR(cdev,
@@ -8016,17 +8516,16 @@ int qed_dbg_all_data(struct qed_dev *cdev, void *buffer)
QED_NVM_IMAGE_MDUMP, "QED_NVM_IMAGE_MDUMP", rc);
}
- cdev->dbg_bin_dump = false;
mutex_unlock(&qed_dbg_lock);
+ cdev->dbg_bin_dump = 0;
return 0;
}
int qed_dbg_all_data_size(struct qed_dev *cdev)
{
- struct qed_hwfn *p_hwfn =
- &cdev->hwfns[cdev->engine_for_debug];
u32 regs_len = 0, image_len = 0, ilt_len = 0, total_ilt_len = 0;
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[cdev->engine_for_debug];
u8 cur_engine, org_engine;
cdev->disable_ilt_dump = false;
@@ -8037,14 +8536,13 @@ int qed_dbg_all_data_size(struct qed_dev *cdev)
"calculating idle_chk and grcdump register length for current engine\n");
qed_set_debug_engine(cdev, cur_engine);
regs_len += REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) +
- REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) +
- REGDUMP_HEADER_SIZE + qed_dbg_grc_size(cdev) +
- REGDUMP_HEADER_SIZE + qed_dbg_reg_fifo_size(cdev) +
- REGDUMP_HEADER_SIZE + qed_dbg_igu_fifo_size(cdev) +
- REGDUMP_HEADER_SIZE +
- qed_dbg_protection_override_size(cdev) +
- REGDUMP_HEADER_SIZE + qed_dbg_fw_asserts_size(cdev);
-
+ REGDUMP_HEADER_SIZE + qed_dbg_idle_chk_size(cdev) +
+ REGDUMP_HEADER_SIZE + qed_dbg_grc_size(cdev) +
+ REGDUMP_HEADER_SIZE + qed_dbg_reg_fifo_size(cdev) +
+ REGDUMP_HEADER_SIZE + qed_dbg_igu_fifo_size(cdev) +
+ REGDUMP_HEADER_SIZE +
+ qed_dbg_protection_override_size(cdev) +
+ REGDUMP_HEADER_SIZE + qed_dbg_fw_asserts_size(cdev);
ilt_len = REGDUMP_HEADER_SIZE + qed_dbg_ilt_size(cdev);
if (ilt_len < ILT_DUMP_MAX_SIZE) {
total_ilt_len += ilt_len;
@@ -8055,7 +8553,8 @@ int qed_dbg_all_data_size(struct qed_dev *cdev)
qed_set_debug_engine(cdev, org_engine);
/* Engine common */
- regs_len += REGDUMP_HEADER_SIZE + qed_dbg_mcp_trace_size(cdev);
+ regs_len += REGDUMP_HEADER_SIZE + qed_dbg_mcp_trace_size(cdev) +
+ REGDUMP_HEADER_SIZE + qed_dbg_phy_size(cdev);
qed_dbg_nvm_image_length(p_hwfn, QED_NVM_IMAGE_NVM_CFG1, &image_len);
if (image_len)
regs_len += REGDUMP_HEADER_SIZE + image_len;
@@ -8083,10 +8582,8 @@ int qed_dbg_all_data_size(struct qed_dev *cdev)
int qed_dbg_feature(struct qed_dev *cdev, void *buffer,
enum qed_dbg_features feature, u32 *num_dumped_bytes)
{
- struct qed_hwfn *p_hwfn =
- &cdev->hwfns[cdev->engine_for_debug];
- struct qed_dbg_feature *qed_feature =
- &cdev->dbg_features[feature];
+ struct qed_dbg_feature *qed_feature = &cdev->dbg_features[feature];
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[cdev->engine_for_debug];
enum dbg_status dbg_rc;
struct qed_ptt *p_ptt;
int rc = 0;
@@ -8119,9 +8616,8 @@ out:
int qed_dbg_feature_size(struct qed_dev *cdev, enum qed_dbg_features feature)
{
- struct qed_hwfn *p_hwfn =
- &cdev->hwfns[cdev->engine_for_debug];
struct qed_dbg_feature *qed_feature = &cdev->dbg_features[feature];
+ struct qed_hwfn *p_hwfn = &cdev->hwfns[cdev->engine_for_debug];
struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
u32 buf_size_dwords;
enum dbg_status rc;
@@ -8143,6 +8639,14 @@ int qed_dbg_feature_size(struct qed_dev *cdev, enum qed_dbg_features feature)
return qed_feature->buf_size;
}
+int qed_dbg_phy_size(struct qed_dev *cdev)
+{
+ /* return max size of phy info and
+ * phy mac_stat multiplied by the number of ports
+ */
+ return MAX_PHY_RESULT_BUFFER * (1 + qed_device_num_ports(cdev));
+}
+
u8 qed_get_debug_engine(struct qed_dev *cdev)
{
return cdev->engine_for_debug;
@@ -8160,6 +8664,9 @@ void qed_dbg_pf_init(struct qed_dev *cdev)
const u8 *dbg_values = NULL;
int i;
+ /* Sync ver with debugbus qed code */
+ qed_dbg_set_app_ver(TOOLS_VERSION);
+
/* Debug values are after init values.
* The offset is the first dword of the file.
*/
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.h b/drivers/net/ethernet/qlogic/qed/qed_debug.h
index e71af82d3200..b0d4b937cf4a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.h
@@ -1,11 +1,11 @@
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/* QLogic qed NIC Driver
* Copyright (c) 2015 QLogic Corporation
- * Copyright (c) 2019-2020 Marvell International Ltd.
+ * Copyright (c) 2019-2021 Marvell International Ltd.
*/
-#ifndef _QED_DEBUGFS_H
-#define _QED_DEBUGFS_H
+#ifndef _QED_DEBUG_H
+#define _QED_DEBUG_H
enum qed_dbg_features {
DBG_FEATURE_GRC,
@@ -45,6 +45,7 @@ int qed_dbg_ilt_size(struct qed_dev *cdev);
int qed_dbg_mcp_trace(struct qed_dev *cdev, void *buffer,
u32 *num_dumped_bytes);
int qed_dbg_mcp_trace_size(struct qed_dev *cdev);
+int qed_dbg_phy_size(struct qed_dev *cdev);
int qed_dbg_all_data(struct qed_dev *cdev, void *buffer);
int qed_dbg_all_data_size(struct qed_dev *cdev);
u8 qed_get_debug_engine(struct qed_dev *cdev);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 0410c3604abd..cc4ec2bb36db 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -25,6 +25,7 @@
#include "qed_dev_api.h"
#include "qed_fcoe.h"
#include "qed_hsi.h"
+#include "qed_iro_hsi.h"
#include "qed_hw.h"
#include "qed_init_ops.h"
#include "qed_int.h"
@@ -951,7 +952,7 @@ qed_llh_remove_filter(struct qed_hwfn *p_hwfn,
}
int qed_llh_add_mac_filter(struct qed_dev *cdev,
- u8 ppfid, u8 mac_addr[ETH_ALEN])
+ u8 ppfid, const u8 mac_addr[ETH_ALEN])
{
struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
struct qed_ptt *p_ptt = qed_ptt_acquire(p_hwfn);
@@ -1396,12 +1397,13 @@ void qed_resc_free(struct qed_dev *cdev)
qed_rdma_info_free(p_hwfn);
}
+ qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_COMMON);
qed_iov_free(p_hwfn);
qed_l2_free(p_hwfn);
qed_dmae_info_free(p_hwfn);
qed_dcbx_info_free(p_hwfn);
qed_dbg_user_data_free(p_hwfn);
- qed_fw_overlay_mem_free(p_hwfn, p_hwfn->fw_overlay_mem);
+ qed_fw_overlay_mem_free(p_hwfn, &p_hwfn->fw_overlay_mem);
/* Destroy doorbell recovery mechanism */
qed_db_recovery_teardown(p_hwfn);
@@ -1483,8 +1485,8 @@ static u16 qed_init_qm_get_num_pf_rls(struct qed_hwfn *p_hwfn)
u16 num_pf_rls, num_vfs = qed_init_qm_get_num_vfs(p_hwfn);
/* num RLs can't exceed resource amount of rls or vports */
- num_pf_rls = (u16) min_t(u32, RESC_NUM(p_hwfn, QED_RL),
- RESC_NUM(p_hwfn, QED_VPORT));
+ num_pf_rls = (u16)min_t(u32, RESC_NUM(p_hwfn, QED_RL),
+ RESC_NUM(p_hwfn, QED_VPORT));
/* Make sure after we reserve there's something left */
if (num_pf_rls < num_vfs + NUM_DEFAULT_RLS)
@@ -1532,8 +1534,8 @@ static void qed_init_qm_params(struct qed_hwfn *p_hwfn)
bool four_port;
/* pq and vport bases for this PF */
- qm_info->start_pq = (u16) RESC_START(p_hwfn, QED_PQ);
- qm_info->start_vport = (u8) RESC_START(p_hwfn, QED_VPORT);
+ qm_info->start_pq = (u16)RESC_START(p_hwfn, QED_PQ);
+ qm_info->start_vport = (u8)RESC_START(p_hwfn, QED_VPORT);
/* rate limiting and weighted fair queueing are always enabled */
qm_info->vport_rl_en = true;
@@ -1628,9 +1630,9 @@ static void qed_init_qm_advance_vport(struct qed_hwfn *p_hwfn)
*/
/* flags for pq init */
-#define PQ_INIT_SHARE_VPORT (1 << 0)
-#define PQ_INIT_PF_RL (1 << 1)
-#define PQ_INIT_VF_RL (1 << 2)
+#define PQ_INIT_SHARE_VPORT BIT(0)
+#define PQ_INIT_PF_RL BIT(1)
+#define PQ_INIT_VF_RL BIT(2)
/* defines for pq init */
#define PQ_INIT_DEFAULT_WRR_GROUP 1
@@ -2290,7 +2292,7 @@ int qed_resc_alloc(struct qed_dev *cdev)
goto alloc_no_mem;
}
- rc = qed_eq_alloc(p_hwfn, (u16) n_eqes);
+ rc = qed_eq_alloc(p_hwfn, (u16)n_eqes);
if (rc)
goto alloc_err;
@@ -2375,6 +2377,49 @@ alloc_err:
return rc;
}
+static int qed_fw_err_handler(struct qed_hwfn *p_hwfn,
+ u8 opcode,
+ u16 echo,
+ union event_ring_data *data, u8 fw_return_code)
+{
+ if (fw_return_code != COMMON_ERR_CODE_ERROR)
+ goto eqe_unexpected;
+
+ if (data->err_data.recovery_scope == ERR_SCOPE_FUNC &&
+ le16_to_cpu(data->err_data.entity_id) >= MAX_NUM_PFS) {
+ qed_sriov_vfpf_malicious(p_hwfn, &data->err_data);
+ return 0;
+ }
+
+eqe_unexpected:
+ DP_ERR(p_hwfn,
+ "Skipping unexpected eqe 0x%02x, FW return code 0x%x, echo 0x%x\n",
+ opcode, fw_return_code, echo);
+ return -EINVAL;
+}
+
+static int qed_common_eqe_event(struct qed_hwfn *p_hwfn,
+ u8 opcode,
+ __le16 echo,
+ union event_ring_data *data,
+ u8 fw_return_code)
+{
+ switch (opcode) {
+ case COMMON_EVENT_VF_PF_CHANNEL:
+ case COMMON_EVENT_VF_FLR:
+ return qed_sriov_eqe_event(p_hwfn, opcode, echo, data,
+ fw_return_code);
+ case COMMON_EVENT_FW_ERROR:
+ return qed_fw_err_handler(p_hwfn, opcode,
+ le16_to_cpu(echo), data,
+ fw_return_code);
+ default:
+ DP_INFO(p_hwfn->cdev, "Unknown eqe event 0x%02x, echo 0x%x\n",
+ opcode, echo);
+ return -EINVAL;
+ }
+}
+
void qed_resc_setup(struct qed_dev *cdev)
{
int i;
@@ -2403,6 +2448,8 @@ void qed_resc_setup(struct qed_dev *cdev)
qed_l2_setup(p_hwfn);
qed_iov_setup(p_hwfn);
+ qed_spq_register_async_cb(p_hwfn, PROTOCOLID_COMMON,
+ qed_common_eqe_event);
#ifdef CONFIG_QED_LL2
if (p_hwfn->using_ll2)
qed_ll2_setup(p_hwfn);
@@ -2430,9 +2477,8 @@ int qed_final_cleanup(struct qed_hwfn *p_hwfn,
u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT;
int rc = -EBUSY;
- addr = GTT_BAR0_MAP_REG_USDM_RAM +
- USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn->rel_pf_id);
-
+ addr = GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_USDM_RAM,
+ USTORM_FLR_FINAL_ACK, p_hwfn->rel_pf_id);
if (is_vf)
id += 0x10;
@@ -2592,7 +2638,7 @@ static void qed_init_cache_line_size(struct qed_hwfn *p_hwfn,
cache_line_size);
}
- if (L1_CACHE_BYTES > wr_mbs)
+ if (wr_mbs < L1_CACHE_BYTES)
DP_INFO(p_hwfn,
"The cache line size for padding is suboptimal for performance [OS cache line size 0x%x, wr mbs 0x%x]\n",
L1_CACHE_BYTES, wr_mbs);
@@ -2608,13 +2654,21 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, int hw_mode)
{
struct qed_qm_info *qm_info = &p_hwfn->qm_info;
- struct qed_qm_common_rt_init_params params;
+ struct qed_qm_common_rt_init_params *params;
struct qed_dev *cdev = p_hwfn->cdev;
u8 vf_id, max_num_vfs;
u16 num_pfs, pf_id;
u32 concrete_fid;
int rc = 0;
+ params = kzalloc(sizeof(*params), GFP_KERNEL);
+ if (!params) {
+ DP_NOTICE(p_hwfn->cdev,
+ "Failed to allocate common init params\n");
+
+ return -ENOMEM;
+ }
+
qed_init_cau_rt_data(cdev);
/* Program GTT windows */
@@ -2627,16 +2681,15 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
qm_info->pf_wfq_en = true;
}
- memset(&params, 0, sizeof(params));
- params.max_ports_per_engine = p_hwfn->cdev->num_ports_in_engine;
- params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
- params.pf_rl_en = qm_info->pf_rl_en;
- params.pf_wfq_en = qm_info->pf_wfq_en;
- params.global_rl_en = qm_info->vport_rl_en;
- params.vport_wfq_en = qm_info->vport_wfq_en;
- params.port_params = qm_info->qm_port_params;
+ params->max_ports_per_engine = p_hwfn->cdev->num_ports_in_engine;
+ params->max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
+ params->pf_rl_en = qm_info->pf_rl_en;
+ params->pf_wfq_en = qm_info->pf_wfq_en;
+ params->global_rl_en = qm_info->vport_rl_en;
+ params->vport_wfq_en = qm_info->vport_wfq_en;
+ params->port_params = qm_info->qm_port_params;
- qed_qm_common_rt_init(p_hwfn, &params);
+ qed_qm_common_rt_init(p_hwfn, params);
qed_cxt_hw_init_common(p_hwfn);
@@ -2644,7 +2697,7 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
rc = qed_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode);
if (rc)
- return rc;
+ goto out;
qed_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0);
qed_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1);
@@ -2663,7 +2716,7 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
max_num_vfs = QED_IS_AH(cdev) ? MAX_NUM_VFS_K2 : MAX_NUM_VFS_BB;
for (vf_id = 0; vf_id < max_num_vfs; vf_id++) {
concrete_fid = qed_vfid_to_concrete(p_hwfn, vf_id);
- qed_fid_pretend(p_hwfn, p_ptt, (u16) concrete_fid);
+ qed_fid_pretend(p_hwfn, p_ptt, (u16)concrete_fid);
qed_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1);
qed_wr(p_hwfn, p_ptt, CCFC_REG_WEAK_ENABLE_VF, 0x0);
qed_wr(p_hwfn, p_ptt, TCFC_REG_STRONG_ENABLE_VF, 0x1);
@@ -2672,6 +2725,9 @@ static int qed_hw_init_common(struct qed_hwfn *p_hwfn,
/* pretend to original PF */
qed_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
+out:
+ kfree(params);
+
return rc;
}
@@ -2784,7 +2840,7 @@ qed_hw_init_pf_doorbell_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
qed_rdma_dpm_bar(p_hwfn, p_ptt);
}
- p_hwfn->wid_count = (u16) n_cpus;
+ p_hwfn->wid_count = (u16)n_cpus;
DP_INFO(p_hwfn,
"doorbell bar: normal_region_size=%d, pwm_region_size=%d, dpi_size=%d, dpi_count=%d, roce_edpm=%s, page_size=%lu\n",
@@ -3503,8 +3559,8 @@ static void qed_hw_hwfn_prepare(struct qed_hwfn *p_hwfn)
static void get_function_id(struct qed_hwfn *p_hwfn)
{
/* ME Register */
- p_hwfn->hw_info.opaque_fid = (u16) REG_RD(p_hwfn,
- PXP_PF_ME_OPAQUE_ADDR);
+ p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn,
+ PXP_PF_ME_OPAQUE_ADDR);
p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR);
@@ -3670,12 +3726,14 @@ u32 qed_get_hsi_def_val(struct qed_dev *cdev, enum qed_hsi_def_type type)
return qed_hsi_def_val[type][chip_id];
}
+
static int
qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u32 resc_max_val, mcp_resp;
u8 res_id;
int rc;
+
for (res_id = 0; res_id < QED_MAX_RESC; res_id++) {
switch (res_id) {
case QED_LL2_RAM_QUEUE:
@@ -3921,7 +3979,7 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
* resources allocation queries should be atomic. Since several PFs can
* run in parallel - a resource lock is needed.
* If either the resource lock or resource set value commands are not
- * supported - skip the the max values setting, release the lock if
+ * supported - skip the max values setting, release the lock if
* needed, and proceed to the queries. Other failures, including a
* failure to acquire the lock, will cause this function to fail.
*/
@@ -3934,7 +3992,7 @@ static int qed_hw_get_resc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
} else if (rc == -EINVAL) {
DP_INFO(p_hwfn,
"Skip the max values setting of the soft resources since the resource lock is not supported by the MFW\n");
- } else if (!rc && !resc_lock_params.b_granted) {
+ } else if (!resc_lock_params.b_granted) {
DP_NOTICE(p_hwfn,
"Failed to acquire the resource lock for the resource allocation commands\n");
return -EBUSY;
@@ -4775,7 +4833,7 @@ int qed_fw_l2_queue(struct qed_hwfn *p_hwfn, u16 src_id, u16 *dst_id)
if (src_id >= RESC_NUM(p_hwfn, QED_L2_QUEUE)) {
u16 min, max;
- min = (u16) RESC_START(p_hwfn, QED_L2_QUEUE);
+ min = (u16)RESC_START(p_hwfn, QED_L2_QUEUE);
max = min + RESC_NUM(p_hwfn, QED_L2_QUEUE);
DP_NOTICE(p_hwfn,
"l2_queue id [%d] is not valid, available indices [%d - %d]\n",
@@ -4909,7 +4967,7 @@ int qed_set_rxq_coalesce(struct qed_hwfn *p_hwfn,
goto out;
address = BAR0_MAP_REG_USDM_RAM +
- USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
+ USTORM_ETH_QUEUE_ZONE_GTT_OFFSET(p_cid->abs.queue_id);
rc = qed_set_coalesce(p_hwfn, p_ptt, address, &eth_qzone,
sizeof(struct ustorm_eth_queue_zone), timeset);
@@ -4948,7 +5006,7 @@ int qed_set_txq_coalesce(struct qed_hwfn *p_hwfn,
goto out;
address = BAR0_MAP_REG_XSDM_RAM +
- XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
+ XSTORM_ETH_QUEUE_ZONE_GTT_OFFSET(p_cid->abs.queue_id);
rc = qed_set_coalesce(p_hwfn, p_ptt, address, &eth_qzone,
sizeof(struct xstorm_eth_queue_zone), timeset);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
index d3c1f3879be8..f8682356d0cf 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h
@@ -15,44 +15,52 @@
#include "qed_int.h"
/**
- * @brief qed_init_dp - initialize the debug level
+ * qed_init_dp(): Initialize the debug level.
*
- * @param cdev
- * @param dp_module
- * @param dp_level
+ * @cdev: Qed dev pointer.
+ * @dp_module: Module debug parameter.
+ * @dp_level: Module debug level.
+ *
+ * Return: Void.
*/
void qed_init_dp(struct qed_dev *cdev,
u32 dp_module,
u8 dp_level);
/**
- * @brief qed_init_struct - initialize the device structure to
- * its defaults
+ * qed_init_struct(): Initialize the device structure to
+ * its defaults.
+ *
+ * @cdev: Qed dev pointer.
*
- * @param cdev
+ * Return: Void.
*/
void qed_init_struct(struct qed_dev *cdev);
/**
- * @brief qed_resc_free -
+ * qed_resc_free: Free device resources.
+ *
+ * @cdev: Qed dev pointer.
*
- * @param cdev
+ * Return: Void.
*/
void qed_resc_free(struct qed_dev *cdev);
/**
- * @brief qed_resc_alloc -
+ * qed_resc_alloc(): Alloc device resources.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return int
+ * Return: Int.
*/
int qed_resc_alloc(struct qed_dev *cdev);
/**
- * @brief qed_resc_setup -
+ * qed_resc_setup(): Setup device resources.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
+ *
+ * Return: Void.
*/
void qed_resc_setup(struct qed_dev *cdev);
@@ -105,94 +113,96 @@ struct qed_hw_init_params {
};
/**
- * @brief qed_hw_init -
+ * qed_hw_init(): Init Qed hardware.
*
- * @param cdev
- * @param p_params
+ * @cdev: Qed dev pointer.
+ * @p_params: Pointers to params.
*
- * @return int
+ * Return: Int.
*/
int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params);
/**
- * @brief qed_hw_timers_stop_all - stop the timers HW block
+ * qed_hw_timers_stop_all(): Stop the timers HW block.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return void
+ * Return: void.
*/
void qed_hw_timers_stop_all(struct qed_dev *cdev);
/**
- * @brief qed_hw_stop -
+ * qed_hw_stop(): Stop Qed hardware.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return int
+ * Return: int.
*/
int qed_hw_stop(struct qed_dev *cdev);
/**
- * @brief qed_hw_stop_fastpath -should be called incase
- * slowpath is still required for the device,
- * but fastpath is not.
+ * qed_hw_stop_fastpath(): Should be called incase
+ * slowpath is still required for the device,
+ * but fastpath is not.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return int
+ * Return: Int.
*/
int qed_hw_stop_fastpath(struct qed_dev *cdev);
/**
- * @brief qed_hw_start_fastpath -restart fastpath traffic,
- * only if hw_stop_fastpath was called
+ * qed_hw_start_fastpath(): Restart fastpath traffic,
+ * only if hw_stop_fastpath was called.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @return int
+ * Return: Int.
*/
int qed_hw_start_fastpath(struct qed_hwfn *p_hwfn);
-
/**
- * @brief qed_hw_prepare -
+ * qed_hw_prepare(): Prepare Qed hardware.
*
- * @param cdev
- * @param personality - personality to initialize
+ * @cdev: Qed dev pointer.
+ * @personality: Personality to initialize.
*
- * @return int
+ * Return: Int.
*/
int qed_hw_prepare(struct qed_dev *cdev,
int personality);
/**
- * @brief qed_hw_remove -
+ * qed_hw_remove(): Remove Qed hardware.
+ *
+ * @cdev: Qed dev pointer.
*
- * @param cdev
+ * Return: Void.
*/
void qed_hw_remove(struct qed_dev *cdev);
/**
- * @brief qed_ptt_acquire - Allocate a PTT window
+ * qed_ptt_acquire(): Allocate a PTT window.
*
- * Should be called at the entry point to the driver (at the beginning of an
- * exported function)
+ * @p_hwfn: HW device data.
*
- * @param p_hwfn
+ * Return: struct qed_ptt.
*
- * @return struct qed_ptt
+ * Should be called at the entry point to the driver (at the beginning of an
+ * exported function).
*/
struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_ptt_release - Release PTT Window
+ * qed_ptt_release(): Release PTT Window.
*
- * Should be called at the end of a flow - at the end of the function that
- * acquired the PTT.
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
*
+ * Return: Void.
*
- * @param p_hwfn
- * @param p_ptt
+ * Should be called at the end of a flow - at the end of the function that
+ * acquired the PTT.
*/
void qed_ptt_release(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
@@ -205,15 +215,17 @@ enum qed_dmae_address_type_t {
};
/**
- * @brief qed_dmae_host2grc - copy data from source addr to
- * dmae registers using the given ptt
+ * qed_dmae_host2grc(): Copy data from source addr to
+ * dmae registers using the given ptt.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @source_addr: Source address.
+ * @grc_addr: GRC address (dmae_data_offset).
+ * @size_in_dwords: Size.
+ * @p_params: (default parameters will be used in case of NULL).
*
- * @param p_hwfn
- * @param p_ptt
- * @param source_addr
- * @param grc_addr (dmae_data_offset)
- * @param size_in_dwords
- * @param p_params (default parameters will be used in case of NULL)
+ * Return: Int.
*/
int
qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
@@ -224,29 +236,34 @@ qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
struct qed_dmae_params *p_params);
/**
- * @brief qed_dmae_grc2host - Read data from dmae data offset
- * to source address using the given ptt
+ * qed_dmae_grc2host(): Read data from dmae data offset
+ * to source address using the given ptt.
+ *
+ * @p_ptt: P_ptt.
+ * @grc_addr: GRC address (dmae_data_offset).
+ * @dest_addr: Destination Address.
+ * @size_in_dwords: Size.
+ * @p_params: (default parameters will be used in case of NULL).
*
- * @param p_ptt
- * @param grc_addr (dmae_data_offset)
- * @param dest_addr
- * @param size_in_dwords
- * @param p_params (default parameters will be used in case of NULL)
+ * Return: Int.
*/
int qed_dmae_grc2host(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
u32 grc_addr, dma_addr_t dest_addr, u32 size_in_dwords,
struct qed_dmae_params *p_params);
/**
- * @brief qed_dmae_host2host - copy data from to source address
- * to a destination adress (for SRIOV) using the given ptt
+ * qed_dmae_host2host(): Copy data from to source address
+ * to a destination adrress (for SRIOV) using the given
+ * ptt.
*
- * @param p_hwfn
- * @param p_ptt
- * @param source_addr
- * @param dest_addr
- * @param size_in_dwords
- * @param p_params (default parameters will be used in case of NULL)
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @source_addr: Source address.
+ * @dest_addr: Destination address.
+ * @size_in_dwords: size.
+ * @p_params: (default parameters will be used in case of NULL).
+ *
+ * Return: Int.
*/
int qed_dmae_host2host(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -259,51 +276,51 @@ int qed_chain_alloc(struct qed_dev *cdev, struct qed_chain *chain,
void qed_chain_free(struct qed_dev *cdev, struct qed_chain *chain);
/**
- * @@brief qed_fw_l2_queue - Get absolute L2 queue ID
+ * qed_fw_l2_queue(): Get absolute L2 queue ID.
*
- * @param p_hwfn
- * @param src_id - relative to p_hwfn
- * @param dst_id - absolute per engine
+ * @p_hwfn: HW device data.
+ * @src_id: Relative to p_hwfn.
+ * @dst_id: Absolute per engine.
*
- * @return int
+ * Return: Int.
*/
int qed_fw_l2_queue(struct qed_hwfn *p_hwfn,
u16 src_id,
u16 *dst_id);
/**
- * @@brief qed_fw_vport - Get absolute vport ID
+ * qed_fw_vport(): Get absolute vport ID.
*
- * @param p_hwfn
- * @param src_id - relative to p_hwfn
- * @param dst_id - absolute per engine
+ * @p_hwfn: HW device data.
+ * @src_id: Relative to p_hwfn.
+ * @dst_id: Absolute per engine.
*
- * @return int
+ * Return: Int.
*/
int qed_fw_vport(struct qed_hwfn *p_hwfn,
u8 src_id,
u8 *dst_id);
/**
- * @@brief qed_fw_rss_eng - Get absolute RSS engine ID
+ * qed_fw_rss_eng(): Get absolute RSS engine ID.
*
- * @param p_hwfn
- * @param src_id - relative to p_hwfn
- * @param dst_id - absolute per engine
+ * @p_hwfn: HW device data.
+ * @src_id: Relative to p_hwfn.
+ * @dst_id: Absolute per engine.
*
- * @return int
+ * Return: Int.
*/
int qed_fw_rss_eng(struct qed_hwfn *p_hwfn,
u8 src_id,
u8 *dst_id);
/**
- * @brief qed_llh_get_num_ppfid - Return the allocated number of LLH filter
- * banks that are allocated to the PF.
+ * qed_llh_get_num_ppfid(): Return the allocated number of LLH filter
+ * banks that are allocated to the PF.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return u8 - Number of LLH filter banks
+ * Return: u8 Number of LLH filter banks.
*/
u8 qed_llh_get_num_ppfid(struct qed_dev *cdev);
@@ -314,45 +331,50 @@ enum qed_eng {
};
/**
- * @brief qed_llh_set_ppfid_affinity - Set the engine affinity for the given
- * LLH filter bank.
+ * qed_llh_set_ppfid_affinity(): Set the engine affinity for the given
+ * LLH filter bank.
*
- * @param cdev
- * @param ppfid - relative within the allocated ppfids ('0' is the default one).
- * @param eng
+ * @cdev: Qed dev pointer.
+ * @ppfid: Relative within the allocated ppfids ('0' is the default one).
+ * @eng: Engine.
*
- * @return int
+ * Return: Int.
*/
int qed_llh_set_ppfid_affinity(struct qed_dev *cdev,
u8 ppfid, enum qed_eng eng);
/**
- * @brief qed_llh_set_roce_affinity - Set the RoCE engine affinity
+ * qed_llh_set_roce_affinity(): Set the RoCE engine affinity.
*
- * @param cdev
- * @param eng
+ * @cdev: Qed dev pointer.
+ * @eng: Engine.
*
- * @return int
+ * Return: Int.
*/
int qed_llh_set_roce_affinity(struct qed_dev *cdev, enum qed_eng eng);
/**
- * @brief qed_llh_add_mac_filter - Add a LLH MAC filter into the given filter
- * bank.
+ * qed_llh_add_mac_filter(): Add a LLH MAC filter into the given filter
+ * bank.
+ *
+ * @cdev: Qed dev pointer.
+ * @ppfid: Relative within the allocated ppfids ('0' is the default one).
+ * @mac_addr: MAC to add.
*
- * @param cdev
- * @param ppfid - relative within the allocated ppfids ('0' is the default one).
- * @param mac_addr - MAC to add
+ * Return: Int.
*/
int qed_llh_add_mac_filter(struct qed_dev *cdev,
- u8 ppfid, u8 mac_addr[ETH_ALEN]);
+ u8 ppfid, const u8 mac_addr[ETH_ALEN]);
/**
- * @brief qed_llh_remove_mac_filter - Remove a LLH MAC filter from the given
- * filter bank.
+ * qed_llh_remove_mac_filter(): Remove a LLH MAC filter from the given
+ * filter bank.
+ *
+ * @cdev: Qed dev pointer.
+ * @ppfid: Ppfid.
+ * @mac_addr: MAC to remove
*
- * @param p_ptt
- * @param p_filter - MAC to remove
+ * Return: Void.
*/
void qed_llh_remove_mac_filter(struct qed_dev *cdev,
u8 ppfid, u8 mac_addr[ETH_ALEN]);
@@ -368,15 +390,16 @@ enum qed_llh_prot_filter_type_t {
};
/**
- * @brief qed_llh_add_protocol_filter - Add a LLH protocol filter into the
- * given filter bank.
+ * qed_llh_add_protocol_filter(): Add a LLH protocol filter into the
+ * given filter bank.
*
- * @param cdev
- * @param ppfid - relative within the allocated ppfids ('0' is the default one).
- * @param type - type of filters and comparing
- * @param source_port_or_eth_type - source port or ethertype to add
- * @param dest_port - destination port to add
- * @param type - type of filters and comparing
+ * @cdev: Qed dev pointer.
+ * @ppfid: Relative within the allocated ppfids ('0' is the default one).
+ * @type: Type of filters and comparing.
+ * @source_port_or_eth_type: Source port or ethertype to add.
+ * @dest_port: Destination port to add.
+ *
+ * Return: Int.
*/
int
qed_llh_add_protocol_filter(struct qed_dev *cdev,
@@ -385,14 +408,14 @@ qed_llh_add_protocol_filter(struct qed_dev *cdev,
u16 source_port_or_eth_type, u16 dest_port);
/**
- * @brief qed_llh_remove_protocol_filter - Remove a LLH protocol filter from
- * the given filter bank.
+ * qed_llh_remove_protocol_filter(): Remove a LLH protocol filter from
+ * the given filter bank.
*
- * @param cdev
- * @param ppfid - relative within the allocated ppfids ('0' is the default one).
- * @param type - type of filters and comparing
- * @param source_port_or_eth_type - source port or ethertype to add
- * @param dest_port - destination port to add
+ * @cdev: Qed dev pointer.
+ * @ppfid: Relative within the allocated ppfids ('0' is the default one).
+ * @type: Type of filters and comparing.
+ * @source_port_or_eth_type: Source port or ethertype to add.
+ * @dest_port: Destination port to add.
*/
void
qed_llh_remove_protocol_filter(struct qed_dev *cdev,
@@ -401,31 +424,31 @@ qed_llh_remove_protocol_filter(struct qed_dev *cdev,
u16 source_port_or_eth_type, u16 dest_port);
/**
- * *@brief Cleanup of previous driver remains prior to load
+ * qed_final_cleanup(): Cleanup of previous driver remains prior to load.
*
- * @param p_hwfn
- * @param p_ptt
- * @param id - For PF, engine-relative. For VF, PF-relative.
- * @param is_vf - true iff cleanup is made for a VF.
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @id: For PF, engine-relative. For VF, PF-relative.
+ * @is_vf: True iff cleanup is made for a VF.
*
- * @return int
+ * Return: Int.
*/
int qed_final_cleanup(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u16 id, bool is_vf);
/**
- * @brief qed_get_queue_coalesce - Retrieve coalesce value for a given queue.
+ * qed_get_queue_coalesce(): Retrieve coalesce value for a given queue.
*
- * @param p_hwfn
- * @param p_coal - store coalesce value read from the hardware.
- * @param p_handle
+ * @p_hwfn: HW device data.
+ * @coal: Store coalesce value read from the hardware.
+ * @handle: P_handle.
*
- * @return int
+ * Return: Int.
**/
int qed_get_queue_coalesce(struct qed_hwfn *p_hwfn, u16 *coal, void *handle);
/**
- * @brief qed_set_queue_coalesce - Configure coalesce parameters for Rx and
+ * qed_set_queue_coalesce(): Configure coalesce parameters for Rx and
* Tx queue. The fact that we can configure coalescing to up to 511, but on
* varying accuracy [the bigger the value the less accurate] up to a mistake
* of 3usec for the highest values.
@@ -433,37 +456,38 @@ int qed_get_queue_coalesce(struct qed_hwfn *p_hwfn, u16 *coal, void *handle);
* should be in same range [i.e., either 0-0x7f, 0x80-0xff or 0x100-0x1ff]
* otherwise configuration would break.
*
+ * @rx_coal: Rx Coalesce value in micro seconds.
+ * @tx_coal: TX Coalesce value in micro seconds.
+ * @p_handle: P_handle.
*
- * @param rx_coal - Rx Coalesce value in micro seconds.
- * @param tx_coal - TX Coalesce value in micro seconds.
- * @param p_handle
- *
- * @return int
+ * Return: Int.
**/
int
qed_set_queue_coalesce(u16 rx_coal, u16 tx_coal, void *p_handle);
/**
- * @brief qed_pglueb_set_pfid_enable - Enable or disable PCI BUS MASTER
+ * qed_pglueb_set_pfid_enable(): Enable or disable PCI BUS MASTER.
*
- * @param p_hwfn
- * @param p_ptt
- * @param b_enable - true/false
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @b_enable: True/False.
*
- * @return int
+ * Return: Int.
*/
int qed_pglueb_set_pfid_enable(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, bool b_enable);
/**
- * @brief db_recovery_add - add doorbell information to the doorbell
- * recovery mechanism.
+ * qed_db_recovery_add(): add doorbell information to the doorbell
+ * recovery mechanism.
+ *
+ * @cdev: Qed dev pointer.
+ * @db_addr: Doorbell address.
+ * @db_data: Address of where db_data is stored.
+ * @db_width: Doorbell is 32b pr 64b.
+ * @db_space: Doorbell recovery addresses are user or kernel space.
*
- * @param cdev
- * @param db_addr - doorbell address
- * @param db_data - address of where db_data is stored
- * @param db_width - doorbell is 32b pr 64b
- * @param db_space - doorbell recovery addresses are user or kernel space
+ * Return: Int.
*/
int qed_db_recovery_add(struct qed_dev *cdev,
void __iomem *db_addr,
@@ -472,17 +496,18 @@ int qed_db_recovery_add(struct qed_dev *cdev,
enum qed_db_rec_space db_space);
/**
- * @brief db_recovery_del - remove doorbell information from the doorbell
+ * qed_db_recovery_del() - remove doorbell information from the doorbell
* recovery mechanism. db_data serves as key (db_addr is not unique).
*
- * @param cdev
- * @param db_addr - doorbell address
- * @param db_data - address where db_data is stored. Serves as key for the
+ * @cdev: Qed dev pointer.
+ * @db_addr: doorbell address.
+ * @db_data: address where db_data is stored. Serves as key for the
* entry to delete.
+ *
+ * Return: Int.
*/
int qed_db_recovery_del(struct qed_dev *cdev,
void __iomem *db_addr, void *db_data);
-
const char *qed_hw_get_resc_name(enum qed_resources res_id);
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_devlink.c b/drivers/net/ethernet/qlogic/qed/qed_devlink.c
index 78070682f2df..6bb4e165b592 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_devlink.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_devlink.c
@@ -215,10 +215,6 @@ struct devlink *qed_devlink_register(struct qed_dev *cdev)
qdevlink = devlink_priv(dl);
qdevlink->cdev = cdev;
- rc = devlink_register(dl);
- if (rc)
- goto err_free;
-
rc = devlink_params_register(dl, qed_devlink_params,
ARRAY_SIZE(qed_devlink_params));
if (rc)
@@ -229,17 +225,13 @@ struct devlink *qed_devlink_register(struct qed_dev *cdev)
QED_DEVLINK_PARAM_ID_IWARP_CMT,
value);
- devlink_params_publish(dl);
cdev->iwarp_cmt = false;
qed_fw_reporters_create(dl);
-
+ devlink_register(dl);
return dl;
err_unregister:
- devlink_unregister(dl);
-
-err_free:
devlink_free(dl);
return ERR_PTR(rc);
@@ -250,11 +242,11 @@ void qed_devlink_unregister(struct devlink *devlink)
if (!devlink)
return;
+ devlink_unregister(devlink);
qed_fw_reporters_destroy(devlink);
devlink_params_unregister(devlink, qed_devlink_params,
ARRAY_SIZE(qed_devlink_params));
- devlink_unregister(devlink);
devlink_free(devlink);
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
index b768f0698170..3764190b948e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c
@@ -30,6 +30,7 @@
#include "qed_hsi.h"
#include "qed_hw.h"
#include "qed_int.h"
+#include "qed_iro_hsi.h"
#include "qed_ll2.h"
#include "qed_mcp.h"
#include "qed_reg_addr.h"
@@ -89,7 +90,7 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
struct qed_fcoe_pf_params *fcoe_pf_params = NULL;
struct fcoe_init_ramrod_params *p_ramrod = NULL;
struct fcoe_init_func_ramrod_data *p_data;
- struct e4_fcoe_conn_context *p_cxt = NULL;
+ struct fcoe_conn_context *p_cxt = NULL;
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
struct qed_cxt_info cxt_info;
@@ -144,7 +145,7 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
memset(p_cxt, 0, sizeof(*p_cxt));
SET_FIELD(p_cxt->tstorm_ag_context.flags3,
- E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN, 1);
+ TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN, 1);
fcoe_pf_params->dummy_icid = (u16)dummy_cid;
@@ -506,10 +507,9 @@ static void __iomem *qed_fcoe_get_primary_bdq_prod(struct qed_hwfn *p_hwfn,
{
if (RESC_NUM(p_hwfn, QED_BDQ)) {
return (u8 __iomem *)p_hwfn->regview +
- GTT_BAR0_MAP_REG_MSDM_RAM +
- MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn,
- QED_BDQ),
- bdq_id);
+ GET_GTT_BDQ_REG_ADDR(GTT_BAR0_MAP_REG_MSDM_RAM,
+ MSTORM_SCSI_BDQ_EXT_PROD,
+ RESC_START(p_hwfn, QED_BDQ), bdq_id);
} else {
DP_NOTICE(p_hwfn, "BDQ is not allocated!\n");
return NULL;
@@ -521,10 +521,9 @@ static void __iomem *qed_fcoe_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn,
{
if (RESC_NUM(p_hwfn, QED_BDQ)) {
return (u8 __iomem *)p_hwfn->regview +
- GTT_BAR0_MAP_REG_TSDM_RAM +
- TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn,
- QED_BDQ),
- bdq_id);
+ GET_GTT_BDQ_REG_ADDR(GTT_BAR0_MAP_REG_TSDM_RAM,
+ TSTORM_SCSI_BDQ_EXT_PROD,
+ RESC_START(p_hwfn, QED_BDQ), bdq_id);
} else {
DP_NOTICE(p_hwfn, "BDQ is not allocated!\n");
return NULL;
@@ -549,7 +548,7 @@ int qed_fcoe_alloc(struct qed_hwfn *p_hwfn)
void qed_fcoe_setup(struct qed_hwfn *p_hwfn)
{
- struct e4_fcoe_task_context *p_task_ctx = NULL;
+ struct fcoe_task_context *p_task_ctx = NULL;
u32 i, lc;
int rc;
@@ -561,7 +560,7 @@ void qed_fcoe_setup(struct qed_hwfn *p_hwfn)
if (rc)
continue;
- memset(p_task_ctx, 0, sizeof(struct e4_fcoe_task_context));
+ memset(p_task_ctx, 0, sizeof(struct fcoe_task_context));
lc = 0;
SET_FIELD(lc, TIMERS_CONTEXT_VALIDLC0, 1);
@@ -572,7 +571,7 @@ void qed_fcoe_setup(struct qed_hwfn *p_hwfn)
p_task_ctx->timer_context.logical_client_1 = cpu_to_le32(lc);
SET_FIELD(p_task_ctx->tstorm_ag_context.flags0,
- E4_TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, 1);
+ TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, 1);
}
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index fb1baa2da2d0..f2cedbd9489c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
/* QLogic qed NIC Driver
* Copyright (c) 2015-2017 QLogic Corporation
- * Copyright (c) 2019-2020 Marvell International Ltd.
+ * Copyright (c) 2019-2021 Marvell International Ltd.
*/
#ifndef _QED_HSI_H
@@ -38,7 +38,7 @@ enum common_event_opcode {
COMMON_EVENT_VF_PF_CHANNEL,
COMMON_EVENT_VF_FLR,
COMMON_EVENT_PF_UPDATE,
- COMMON_EVENT_MALICIOUS_VF,
+ COMMON_EVENT_FW_ERROR,
COMMON_EVENT_RL_UPDATE,
COMMON_EVENT_EMPTY,
MAX_COMMON_EVENT_OPCODE
@@ -84,6 +84,13 @@ enum core_l4_pseudo_checksum_mode {
MAX_CORE_L4_PSEUDO_CHECKSUM_MODE
};
+/* LL2 SP error code */
+enum core_ll2_error_code {
+ LL2_OK = 0,
+ LL2_ERROR,
+ MAX_CORE_LL2_ERROR_CODE
+};
+
/* Light-L2 RX Producers in Tstorm RAM */
struct core_ll2_port_stats {
struct regpair gsi_invalid_hdr;
@@ -123,6 +130,15 @@ struct core_ll2_ustorm_per_queue_stat {
struct regpair rcv_bcast_pkts;
};
+struct core_ll2_rx_per_queue_stat {
+ struct core_ll2_tstorm_per_queue_stat tstorm_stat;
+ struct core_ll2_ustorm_per_queue_stat ustorm_stat;
+};
+
+struct core_ll2_tx_per_queue_stat {
+ struct core_ll2_pstorm_per_queue_stat pstorm_stat;
+};
+
/* Structure for doorbell data, in PWM mode, for RX producers update. */
struct core_pwm_prod_update_data {
__le16 icid; /* internal CID */
@@ -135,6 +151,15 @@ struct core_pwm_prod_update_data {
struct core_ll2_rx_prod prod; /* Producers */
};
+/* Ramrod data for rx/tx queue statistics query ramrod */
+struct core_queue_stats_query_ramrod_data {
+ u8 rx_stat;
+ u8 tx_stat;
+ __le16 reserved[3];
+ struct regpair rx_stat_addr;
+ struct regpair tx_stat_addr;
+};
+
/* Core Ramrod Command IDs (light L2) */
enum core_ramrod_cmd_id {
CORE_RAMROD_UNUSED,
@@ -210,7 +235,8 @@ struct core_rx_fast_path_cqe {
__le16 vlan;
struct core_rx_cqe_opaque_data opaque_data;
struct parsing_err_flags err_flags;
- __le16 reserved0;
+ u8 packet_source;
+ u8 reserved0;
__le32 reserved1[3];
};
@@ -226,7 +252,8 @@ struct core_rx_gsi_offload_cqe {
__le16 qp_id;
__le32 src_qp;
struct core_rx_cqe_opaque_data opaque_data;
- __le32 reserved;
+ u8 packet_source;
+ u8 reserved[3];
};
/* Core RX CQE for Light L2 */
@@ -245,6 +272,15 @@ union core_rx_cqe_union {
struct core_rx_slow_path_cqe rx_cqe_sp;
};
+/* RX packet source. */
+enum core_rx_pkt_source {
+ CORE_RX_PKT_SOURCE_NETWORK = 0,
+ CORE_RX_PKT_SOURCE_LB,
+ CORE_RX_PKT_SOURCE_TX,
+ CORE_RX_PKT_SOURCE_LL2_TX,
+ MAX_CORE_RX_PKT_SOURCE
+};
+
/* Ramrod data for rx queue start ramrod */
struct core_rx_start_ramrod_data {
struct regpair bd_base;
@@ -362,7 +398,7 @@ struct core_tx_update_ramrod_data {
u8 update_qm_pq_id_flg;
u8 reserved0;
__le16 qm_pq_id;
- __le32 reserved1;
+ __le32 reserved1[1];
};
/* Enum flag for what type of dcb data to update */
@@ -386,224 +422,222 @@ struct pstorm_core_conn_st_ctx {
/* Core Slowpath Connection storm context of Xstorm */
struct xstorm_core_conn_st_ctx {
- __le32 spq_base_lo;
- __le32 spq_base_hi;
- struct regpair consolid_base_addr;
+ struct regpair spq_base_addr;
+ __le32 reserved0[2];
__le16 spq_cons;
- __le16 consolid_cons;
- __le32 reserved0[55];
+ __le16 reserved1[111];
};
-struct e4_xstorm_core_conn_ag_ctx {
+struct xstorm_core_conn_ag_ctx {
u8 reserved0;
u8 state;
u8 flags0;
-#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT 1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT 2
-#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT 4
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT 5
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT 6
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT 7
+#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT 7
u8 flags1;
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT 0
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT 1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT 2
-#define E4_XSTORM_CORE_CONN_AG_CTX_BIT11_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT 3
-#define E4_XSTORM_CORE_CONN_AG_CTX_BIT12_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT 4
-#define E4_XSTORM_CORE_CONN_AG_CTX_BIT13_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT 5
-#define E4_XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
-#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_BIT12_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_BIT13_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
u8 flags2;
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 0
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 2
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 4
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 6
u8 flags3;
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 0
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 2
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 4
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 6
u8 flags4;
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 0
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 2
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 4
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF11_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF12_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT 0
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF13_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT 2
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF14_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT 4
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF15_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF14_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF15_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT 6
u8 flags6;
-#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT 0
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF17_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT 2
-#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT 4
-#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF17_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
u8 flags7;
-#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT 2
-#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT 4
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 6
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 7
+#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 0
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 2
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 4
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 5
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 6
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 7
+#define XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 0
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT 1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT 2
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT 3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT 4
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT 5
-#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT 7
+#define XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT 7
u8 flags10;
-#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
-#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
-#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT 3
-#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT 5
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT 6
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT 7
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT 7
u8 flags11;
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT 0
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT 1
-#define E4_XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 3
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 4
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 5
-#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT 7
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12;
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT 0
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT 1
-#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
-#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT 4
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT 5
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT 6
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT 7
+#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT 7
u8 flags13;
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT 0
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT 1
-#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
-#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
-#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
-#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
-#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
-#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
-#define E4_XSTORM_CORE_CONN_AG_CTX_BIT16_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT 0
-#define E4_XSTORM_CORE_CONN_AG_CTX_BIT17_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT 1
-#define E4_XSTORM_CORE_CONN_AG_CTX_BIT18_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT 2
-#define E4_XSTORM_CORE_CONN_AG_CTX_BIT19_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT 3
-#define E4_XSTORM_CORE_CONN_AG_CTX_BIT20_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT 4
-#define E4_XSTORM_CORE_CONN_AG_CTX_BIT21_MASK 0x1
-#define E4_XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT 5
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF23_MASK 0x3
-#define E4_XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT 6
+#define XSTORM_CORE_CONN_AG_CTX_BIT16_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT 0
+#define XSTORM_CORE_CONN_AG_CTX_BIT17_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT 1
+#define XSTORM_CORE_CONN_AG_CTX_BIT18_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT 2
+#define XSTORM_CORE_CONN_AG_CTX_BIT19_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT 3
+#define XSTORM_CORE_CONN_AG_CTX_BIT20_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT 4
+#define XSTORM_CORE_CONN_AG_CTX_BIT21_MASK 0x1
+#define XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT 5
+#define XSTORM_CORE_CONN_AG_CTX_CF23_MASK 0x3
+#define XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT 6
u8 byte2;
__le16 physical_q0;
__le16 consolid_prod;
@@ -657,89 +691,89 @@ struct e4_xstorm_core_conn_ag_ctx {
__le16 word15;
};
-struct e4_tstorm_core_conn_ag_ctx {
+struct tstorm_core_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2
-#define E4_TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3
-#define E4_TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4
-#define E4_TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6
u8 flags1;
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6
u8 flags2;
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6
u8 flags3;
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7
+#define TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3
+#define TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7
u8 flags4;
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6
-#define E4_TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1
+#define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
-#define E4_TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define E4_TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define E4_TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define E4_TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define E4_TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define E4_TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define E4_TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define E4_TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define E4_TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
__le32 reg0;
__le32 reg1;
__le32 reg2;
@@ -761,63 +795,63 @@ struct e4_tstorm_core_conn_ag_ctx {
__le32 reg10;
};
-struct e4_ustorm_core_conn_ag_ctx {
+struct ustorm_core_conn_ag_ctx {
u8 reserved;
u8 byte1;
u8 flags0;
-#define E4_USTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_USTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_USTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_USTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_USTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_USTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_USTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_USTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
+#define USTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define USTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
+#define USTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
+#define USTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
+#define USTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
+#define USTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
+#define USTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_USTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
-#define E4_USTORM_CORE_CONN_AG_CTX_CF3_SHIFT 0
-#define E4_USTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3
-#define E4_USTORM_CORE_CONN_AG_CTX_CF4_SHIFT 2
-#define E4_USTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3
-#define E4_USTORM_CORE_CONN_AG_CTX_CF5_SHIFT 4
-#define E4_USTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_USTORM_CORE_CONN_AG_CTX_CF6_SHIFT 6
+#define USTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3
+#define USTORM_CORE_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3
+#define USTORM_CORE_CONN_AG_CTX_CF4_SHIFT 2
+#define USTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3
+#define USTORM_CORE_CONN_AG_CTX_CF5_SHIFT 4
+#define USTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3
+#define USTORM_CORE_CONN_AG_CTX_CF6_SHIFT 6
u8 flags2;
-#define E4_USTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_USTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_USTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_USTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
-#define E4_USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 3
-#define E4_USTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
-#define E4_USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 4
-#define E4_USTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
-#define E4_USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 5
-#define E4_USTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 6
-#define E4_USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define USTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define USTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 4
+#define USTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 5
+#define USTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 6
+#define USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags3;
-#define E4_USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define E4_USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define E4_USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define E4_USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define E4_USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define E4_USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define E4_USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define E4_USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define E4_USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -846,15 +880,15 @@ struct tstorm_core_conn_st_ctx {
};
/* core connection context */
-struct e4_core_conn_context {
+struct core_conn_context {
struct ystorm_core_conn_st_ctx ystorm_st_context;
struct regpair ystorm_st_padding[2];
struct pstorm_core_conn_st_ctx pstorm_st_context;
struct regpair pstorm_st_padding[2];
struct xstorm_core_conn_st_ctx xstorm_st_context;
- struct e4_xstorm_core_conn_ag_ctx xstorm_ag_context;
- struct e4_tstorm_core_conn_ag_ctx tstorm_ag_context;
- struct e4_ustorm_core_conn_ag_ctx ustorm_ag_context;
+ struct xstorm_core_conn_ag_ctx xstorm_ag_context;
+ struct tstorm_core_conn_ag_ctx tstorm_ag_context;
+ struct ustorm_core_conn_ag_ctx ustorm_ag_context;
struct mstorm_core_conn_st_ctx mstorm_st_context;
struct ustorm_core_conn_st_ctx ustorm_st_context;
struct regpair ustorm_st_padding[2];
@@ -930,12 +964,12 @@ struct eth_rx_rate_limit {
/* Update RSS indirection table entry command */
struct eth_tstorm_rss_update_data {
- u8 valid;
u8 vport_id;
u8 ind_table_index;
- u8 reserved;
__le16 ind_table_value;
__le16 reserved1;
+ u8 reserved;
+ u8 valid;
};
struct eth_ustorm_per_pf_stat {
@@ -967,19 +1001,20 @@ struct vf_pf_channel_eqe_data {
struct regpair msg_addr;
};
-/* Event Ring malicious VF data */
-struct malicious_vf_eqe_data {
- u8 vf_id;
- u8 err_id;
- __le16 reserved[3];
-};
-
/* Event Ring initial cleanup data */
struct initial_cleanup_eqe_data {
u8 vf_id;
u8 reserved[7];
};
+/* FW error data */
+struct fw_err_data {
+ u8 recovery_scope;
+ u8 err_id;
+ __le16 entity_id;
+ u8 reserved[4];
+};
+
/* Event Data Union */
union event_ring_data {
u8 bytes[8];
@@ -987,8 +1022,8 @@ union event_ring_data {
struct iscsi_eqe_data iscsi_info;
struct iscsi_connect_done_results iscsi_conn_done_info;
union rdma_eqe_data rdma_data;
- struct malicious_vf_eqe_data malicious_vf;
struct initial_cleanup_eqe_data vf_init_cleanup;
+ struct fw_err_data err_data;
};
/* Event Ring Entry */
@@ -1042,6 +1077,15 @@ struct hsi_fp_ver_struct {
u8 major_ver_arr[2];
};
+/* Integration Phase */
+enum integ_phase {
+ INTEG_PHASE_BB_A0_LATEST = 3,
+ INTEG_PHASE_BB_B0_NO_MCP = 10,
+ INTEG_PHASE_BB_B0_WITH_MCP = 11,
+ MAX_INTEG_PHASE
+};
+
+/* Ports mode */
enum iwarp_ll2_tx_queues {
IWARP_LL2_IN_ORDER_TX_QUEUE = 1,
IWARP_LL2_ALIGNED_TX_QUEUE,
@@ -1050,9 +1094,9 @@ enum iwarp_ll2_tx_queues {
MAX_IWARP_LL2_TX_QUEUES
};
-/* Malicious VF error ID */
-enum malicious_vf_error_id {
- MALICIOUS_VF_NO_ERROR,
+/* Function error ID */
+enum func_err_id {
+ FUNC_NO_ERROR,
VF_PF_CHANNEL_NOT_READY,
VF_ZONE_MSG_NOT_VALID,
VF_ZONE_FUNC_NOT_ENABLED,
@@ -1087,13 +1131,33 @@ enum malicious_vf_error_id {
CORE_PACKET_SIZE_TOO_LARGE,
CORE_ILLEGAL_BD_FLAGS,
CORE_GSI_PACKET_VIOLATION,
- MAX_MALICIOUS_VF_ERROR_ID,
+ MAX_FUNC_ERR_ID
+};
+
+/* FW error handling mode */
+enum fw_err_mode {
+ FW_ERR_FATAL_ASSERT,
+ FW_ERR_DRV_REPORT,
+ MAX_FW_ERR_MODE
+};
+
+/* FW error recovery scope */
+enum fw_err_recovery_scope {
+ ERR_SCOPE_INVALID,
+ ERR_SCOPE_TX_Q,
+ ERR_SCOPE_RX_Q,
+ ERR_SCOPE_QP,
+ ERR_SCOPE_VPORT,
+ ERR_SCOPE_FUNC,
+ ERR_SCOPE_PORT,
+ ERR_SCOPE_ENGINE,
+ MAX_FW_ERR_RECOVERY_SCOPE
};
/* Mstorm non-triggering VF zone */
struct mstorm_non_trigger_vf_zone {
struct eth_mstorm_per_queue_stat eth_queue_stat;
- struct eth_rx_prod_data eth_rx_queue_producers[ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD];
+ struct eth_rx_prod_data eth_rx_queue_producers[ETH_MAX_RXQ_VF_QUAD];
};
/* Mstorm VF zone */
@@ -1148,7 +1212,7 @@ struct pf_start_tunnel_config {
/* Ramrod data for PF start ramrod */
struct pf_start_ramrod_data {
struct regpair event_ring_pbl_addr;
- struct regpair consolid_q_pbl_addr;
+ struct regpair consolid_q_pbl_base_addr;
struct pf_start_tunnel_config tunnel_config;
__le16 event_ring_sb_id;
u8 base_vf_id;
@@ -1166,6 +1230,9 @@ struct pf_start_ramrod_data {
u8 reserved0;
struct hsi_fp_ver_struct hsi_fp_ver;
struct outer_tag_config_struct outer_tag_config;
+ u8 pf_fp_err_mode;
+ u8 consolid_q_num_pages;
+ u8 reserved[6];
};
/* Data for port update ramrod */
@@ -1230,6 +1297,13 @@ enum ports_mode {
MAX_PORTS_MODE
};
+/* Protocol-common error code */
+enum protocol_common_error_code {
+ COMMON_ERR_CODE_OK = 0,
+ COMMON_ERR_CODE_ERROR,
+ MAX_PROTOCOL_COMMON_ERROR_CODE
+};
+
/* use to index in hsi_fp_[major|minor]_ver_arr per protocol */
enum protocol_version_array_key {
ETH_VER_KEY = 0,
@@ -1525,74 +1599,74 @@ enum dmae_cmd_src_enum {
MAX_DMAE_CMD_SRC_ENUM
};
-struct e4_mstorm_core_conn_ag_ctx {
+struct mstorm_core_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
+#define MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
+#define MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define E4_MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define E4_MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define E4_MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define E4_MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0;
__le16 word1;
__le32 reg0;
__le32 reg1;
};
-struct e4_ystorm_core_conn_ag_ctx {
+struct ystorm_core_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
+#define YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0
+#define YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3
+#define YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2
+#define YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3
+#define YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4
+#define YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3
+#define YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define E4_YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define E4_YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define E4_YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define E4_YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -1704,6 +1778,7 @@ struct igu_msix_vector {
#define IGU_MSIX_VECTOR_RESERVED1_MASK 0xFF
#define IGU_MSIX_VECTOR_RESERVED1_SHIFT 24
};
+
/* per encapsulation type enabling flags */
struct prs_reg_encapsulation_type_en {
u8 flags;
@@ -1778,22 +1853,22 @@ struct qm_rf_opportunistic_mask {
};
/* QM hardware structure of QM map memory */
-struct qm_rf_pq_map_e4 {
+struct qm_rf_pq_map {
__le32 reg;
-#define QM_RF_PQ_MAP_E4_PQ_VALID_MASK 0x1
-#define QM_RF_PQ_MAP_E4_PQ_VALID_SHIFT 0
-#define QM_RF_PQ_MAP_E4_RL_ID_MASK 0xFF
-#define QM_RF_PQ_MAP_E4_RL_ID_SHIFT 1
-#define QM_RF_PQ_MAP_E4_VP_PQ_ID_MASK 0x1FF
-#define QM_RF_PQ_MAP_E4_VP_PQ_ID_SHIFT 9
-#define QM_RF_PQ_MAP_E4_VOQ_MASK 0x1F
-#define QM_RF_PQ_MAP_E4_VOQ_SHIFT 18
-#define QM_RF_PQ_MAP_E4_WRR_WEIGHT_GROUP_MASK 0x3
-#define QM_RF_PQ_MAP_E4_WRR_WEIGHT_GROUP_SHIFT 23
-#define QM_RF_PQ_MAP_E4_RL_VALID_MASK 0x1
-#define QM_RF_PQ_MAP_E4_RL_VALID_SHIFT 25
-#define QM_RF_PQ_MAP_E4_RESERVED_MASK 0x3F
-#define QM_RF_PQ_MAP_E4_RESERVED_SHIFT 26
+#define QM_RF_PQ_MAP_PQ_VALID_MASK 0x1
+#define QM_RF_PQ_MAP_PQ_VALID_SHIFT 0
+#define QM_RF_PQ_MAP_RL_ID_MASK 0xFF
+#define QM_RF_PQ_MAP_RL_ID_SHIFT 1
+#define QM_RF_PQ_MAP_VP_PQ_ID_MASK 0x1FF
+#define QM_RF_PQ_MAP_VP_PQ_ID_SHIFT 9
+#define QM_RF_PQ_MAP_VOQ_MASK 0x1F
+#define QM_RF_PQ_MAP_VOQ_SHIFT 18
+#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_MASK 0x3
+#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_SHIFT 23
+#define QM_RF_PQ_MAP_RL_VALID_MASK 0x1
+#define QM_RF_PQ_MAP_RL_VALID_SHIFT 25
+#define QM_RF_PQ_MAP_RESERVED_MASK 0x3F
+#define QM_RF_PQ_MAP_RESERVED_SHIFT 26
};
/* Completion params for aggregated interrupt completion */
@@ -1831,769 +1906,6 @@ struct virt_mem_desc {
u32 size; /* In bytes */
};
-/****************************************/
-/* Debug Tools HSI constants and macros */
-/****************************************/
-
-enum block_id {
- BLOCK_GRC,
- BLOCK_MISCS,
- BLOCK_MISC,
- BLOCK_DBU,
- BLOCK_PGLUE_B,
- BLOCK_CNIG,
- BLOCK_CPMU,
- BLOCK_NCSI,
- BLOCK_OPTE,
- BLOCK_BMB,
- BLOCK_PCIE,
- BLOCK_MCP,
- BLOCK_MCP2,
- BLOCK_PSWHST,
- BLOCK_PSWHST2,
- BLOCK_PSWRD,
- BLOCK_PSWRD2,
- BLOCK_PSWWR,
- BLOCK_PSWWR2,
- BLOCK_PSWRQ,
- BLOCK_PSWRQ2,
- BLOCK_PGLCS,
- BLOCK_DMAE,
- BLOCK_PTU,
- BLOCK_TCM,
- BLOCK_MCM,
- BLOCK_UCM,
- BLOCK_XCM,
- BLOCK_YCM,
- BLOCK_PCM,
- BLOCK_QM,
- BLOCK_TM,
- BLOCK_DORQ,
- BLOCK_BRB,
- BLOCK_SRC,
- BLOCK_PRS,
- BLOCK_TSDM,
- BLOCK_MSDM,
- BLOCK_USDM,
- BLOCK_XSDM,
- BLOCK_YSDM,
- BLOCK_PSDM,
- BLOCK_TSEM,
- BLOCK_MSEM,
- BLOCK_USEM,
- BLOCK_XSEM,
- BLOCK_YSEM,
- BLOCK_PSEM,
- BLOCK_RSS,
- BLOCK_TMLD,
- BLOCK_MULD,
- BLOCK_YULD,
- BLOCK_XYLD,
- BLOCK_PRM,
- BLOCK_PBF_PB1,
- BLOCK_PBF_PB2,
- BLOCK_RPB,
- BLOCK_BTB,
- BLOCK_PBF,
- BLOCK_RDIF,
- BLOCK_TDIF,
- BLOCK_CDU,
- BLOCK_CCFC,
- BLOCK_TCFC,
- BLOCK_IGU,
- BLOCK_CAU,
- BLOCK_UMAC,
- BLOCK_XMAC,
- BLOCK_MSTAT,
- BLOCK_DBG,
- BLOCK_NIG,
- BLOCK_WOL,
- BLOCK_BMBN,
- BLOCK_IPC,
- BLOCK_NWM,
- BLOCK_NWS,
- BLOCK_MS,
- BLOCK_PHY_PCIE,
- BLOCK_LED,
- BLOCK_AVS_WRAP,
- BLOCK_PXPREQBUS,
- BLOCK_BAR0_MAP,
- BLOCK_MCP_FIO,
- BLOCK_LAST_INIT,
- BLOCK_PRS_FC,
- BLOCK_PBF_FC,
- BLOCK_NIG_LB_FC,
- BLOCK_NIG_LB_FC_PLLH,
- BLOCK_NIG_TX_FC_PLLH,
- BLOCK_NIG_TX_FC,
- BLOCK_NIG_RX_FC_PLLH,
- BLOCK_NIG_RX_FC,
- MAX_BLOCK_ID
-};
-
-/* binary debug buffer types */
-enum bin_dbg_buffer_type {
- BIN_BUF_DBG_MODE_TREE,
- BIN_BUF_DBG_DUMP_REG,
- BIN_BUF_DBG_DUMP_MEM,
- BIN_BUF_DBG_IDLE_CHK_REGS,
- BIN_BUF_DBG_IDLE_CHK_IMMS,
- BIN_BUF_DBG_IDLE_CHK_RULES,
- BIN_BUF_DBG_IDLE_CHK_PARSING_DATA,
- BIN_BUF_DBG_ATTN_BLOCKS,
- BIN_BUF_DBG_ATTN_REGS,
- BIN_BUF_DBG_ATTN_INDEXES,
- BIN_BUF_DBG_ATTN_NAME_OFFSETS,
- BIN_BUF_DBG_BLOCKS,
- BIN_BUF_DBG_BLOCKS_CHIP_DATA,
- BIN_BUF_DBG_BUS_LINES,
- BIN_BUF_DBG_BLOCKS_USER_DATA,
- BIN_BUF_DBG_BLOCKS_CHIP_USER_DATA,
- BIN_BUF_DBG_BUS_LINE_NAME_OFFSETS,
- BIN_BUF_DBG_RESET_REGS,
- BIN_BUF_DBG_PARSING_STRINGS,
- MAX_BIN_DBG_BUFFER_TYPE
-};
-
-
-/* Attention bit mapping */
-struct dbg_attn_bit_mapping {
- u16 data;
-#define DBG_ATTN_BIT_MAPPING_VAL_MASK 0x7FFF
-#define DBG_ATTN_BIT_MAPPING_VAL_SHIFT 0
-#define DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT_MASK 0x1
-#define DBG_ATTN_BIT_MAPPING_IS_UNUSED_BIT_CNT_SHIFT 15
-};
-
-/* Attention block per-type data */
-struct dbg_attn_block_type_data {
- u16 names_offset;
- u16 reserved1;
- u8 num_regs;
- u8 reserved2;
- u16 regs_offset;
-
-};
-
-/* Block attentions */
-struct dbg_attn_block {
- struct dbg_attn_block_type_data per_type_data[2];
-};
-
-/* Attention register result */
-struct dbg_attn_reg_result {
- u32 data;
-#define DBG_ATTN_REG_RESULT_STS_ADDRESS_MASK 0xFFFFFF
-#define DBG_ATTN_REG_RESULT_STS_ADDRESS_SHIFT 0
-#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_MASK 0xFF
-#define DBG_ATTN_REG_RESULT_NUM_REG_ATTN_SHIFT 24
- u16 block_attn_offset;
- u16 reserved;
- u32 sts_val;
- u32 mask_val;
-};
-
-/* Attention block result */
-struct dbg_attn_block_result {
- u8 block_id;
- u8 data;
-#define DBG_ATTN_BLOCK_RESULT_ATTN_TYPE_MASK 0x3
-#define DBG_ATTN_BLOCK_RESULT_ATTN_TYPE_SHIFT 0
-#define DBG_ATTN_BLOCK_RESULT_NUM_REGS_MASK 0x3F
-#define DBG_ATTN_BLOCK_RESULT_NUM_REGS_SHIFT 2
- u16 names_offset;
- struct dbg_attn_reg_result reg_results[15];
-};
-
-/* Mode header */
-struct dbg_mode_hdr {
- u16 data;
-#define DBG_MODE_HDR_EVAL_MODE_MASK 0x1
-#define DBG_MODE_HDR_EVAL_MODE_SHIFT 0
-#define DBG_MODE_HDR_MODES_BUF_OFFSET_MASK 0x7FFF
-#define DBG_MODE_HDR_MODES_BUF_OFFSET_SHIFT 1
-};
-
-/* Attention register */
-struct dbg_attn_reg {
- struct dbg_mode_hdr mode;
- u16 block_attn_offset;
- u32 data;
-#define DBG_ATTN_REG_STS_ADDRESS_MASK 0xFFFFFF
-#define DBG_ATTN_REG_STS_ADDRESS_SHIFT 0
-#define DBG_ATTN_REG_NUM_REG_ATTN_MASK 0xFF
-#define DBG_ATTN_REG_NUM_REG_ATTN_SHIFT 24
- u32 sts_clr_address;
- u32 mask_address;
-};
-
-/* Attention types */
-enum dbg_attn_type {
- ATTN_TYPE_INTERRUPT,
- ATTN_TYPE_PARITY,
- MAX_DBG_ATTN_TYPE
-};
-
-/* Block debug data */
-struct dbg_block {
- u8 name[15];
- u8 associated_storm_letter;
-};
-
-/* Chip-specific block debug data */
-struct dbg_block_chip {
- u8 flags;
-#define DBG_BLOCK_CHIP_IS_REMOVED_MASK 0x1
-#define DBG_BLOCK_CHIP_IS_REMOVED_SHIFT 0
-#define DBG_BLOCK_CHIP_HAS_RESET_REG_MASK 0x1
-#define DBG_BLOCK_CHIP_HAS_RESET_REG_SHIFT 1
-#define DBG_BLOCK_CHIP_UNRESET_BEFORE_DUMP_MASK 0x1
-#define DBG_BLOCK_CHIP_UNRESET_BEFORE_DUMP_SHIFT 2
-#define DBG_BLOCK_CHIP_HAS_DBG_BUS_MASK 0x1
-#define DBG_BLOCK_CHIP_HAS_DBG_BUS_SHIFT 3
-#define DBG_BLOCK_CHIP_HAS_LATENCY_EVENTS_MASK 0x1
-#define DBG_BLOCK_CHIP_HAS_LATENCY_EVENTS_SHIFT 4
-#define DBG_BLOCK_CHIP_RESERVED0_MASK 0x7
-#define DBG_BLOCK_CHIP_RESERVED0_SHIFT 5
- u8 dbg_client_id;
- u8 reset_reg_id;
- u8 reset_reg_bit_offset;
- struct dbg_mode_hdr dbg_bus_mode;
- u16 reserved1;
- u8 reserved2;
- u8 num_of_dbg_bus_lines;
- u16 dbg_bus_lines_offset;
- u32 dbg_select_reg_addr;
- u32 dbg_dword_enable_reg_addr;
- u32 dbg_shift_reg_addr;
- u32 dbg_force_valid_reg_addr;
- u32 dbg_force_frame_reg_addr;
-};
-
-/* Chip-specific block user debug data */
-struct dbg_block_chip_user {
- u8 num_of_dbg_bus_lines;
- u8 has_latency_events;
- u16 names_offset;
-};
-
-/* Block user debug data */
-struct dbg_block_user {
- u8 name[16];
-};
-
-/* Block Debug line data */
-struct dbg_bus_line {
- u8 data;
-#define DBG_BUS_LINE_NUM_OF_GROUPS_MASK 0xF
-#define DBG_BUS_LINE_NUM_OF_GROUPS_SHIFT 0
-#define DBG_BUS_LINE_IS_256B_MASK 0x1
-#define DBG_BUS_LINE_IS_256B_SHIFT 4
-#define DBG_BUS_LINE_RESERVED_MASK 0x7
-#define DBG_BUS_LINE_RESERVED_SHIFT 5
- u8 group_sizes;
-};
-
-/* Condition header for registers dump */
-struct dbg_dump_cond_hdr {
- struct dbg_mode_hdr mode; /* Mode header */
- u8 block_id; /* block ID */
- u8 data_size; /* size in dwords of the data following this header */
-};
-
-/* Memory data for registers dump */
-struct dbg_dump_mem {
- u32 dword0;
-#define DBG_DUMP_MEM_ADDRESS_MASK 0xFFFFFF
-#define DBG_DUMP_MEM_ADDRESS_SHIFT 0
-#define DBG_DUMP_MEM_MEM_GROUP_ID_MASK 0xFF
-#define DBG_DUMP_MEM_MEM_GROUP_ID_SHIFT 24
- u32 dword1;
-#define DBG_DUMP_MEM_LENGTH_MASK 0xFFFFFF
-#define DBG_DUMP_MEM_LENGTH_SHIFT 0
-#define DBG_DUMP_MEM_WIDE_BUS_MASK 0x1
-#define DBG_DUMP_MEM_WIDE_BUS_SHIFT 24
-#define DBG_DUMP_MEM_RESERVED_MASK 0x7F
-#define DBG_DUMP_MEM_RESERVED_SHIFT 25
-};
-
-/* Register data for registers dump */
-struct dbg_dump_reg {
- u32 data;
-#define DBG_DUMP_REG_ADDRESS_MASK 0x7FFFFF
-#define DBG_DUMP_REG_ADDRESS_SHIFT 0
-#define DBG_DUMP_REG_WIDE_BUS_MASK 0x1
-#define DBG_DUMP_REG_WIDE_BUS_SHIFT 23
-#define DBG_DUMP_REG_LENGTH_MASK 0xFF
-#define DBG_DUMP_REG_LENGTH_SHIFT 24
-};
-
-/* Split header for registers dump */
-struct dbg_dump_split_hdr {
- u32 hdr;
-#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_MASK 0xFFFFFF
-#define DBG_DUMP_SPLIT_HDR_DATA_SIZE_SHIFT 0
-#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_MASK 0xFF
-#define DBG_DUMP_SPLIT_HDR_SPLIT_TYPE_ID_SHIFT 24
-};
-
-/* Condition header for idle check */
-struct dbg_idle_chk_cond_hdr {
- struct dbg_mode_hdr mode; /* Mode header */
- u16 data_size; /* size in dwords of the data following this header */
-};
-
-/* Idle Check condition register */
-struct dbg_idle_chk_cond_reg {
- u32 data;
-#define DBG_IDLE_CHK_COND_REG_ADDRESS_MASK 0x7FFFFF
-#define DBG_IDLE_CHK_COND_REG_ADDRESS_SHIFT 0
-#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_MASK 0x1
-#define DBG_IDLE_CHK_COND_REG_WIDE_BUS_SHIFT 23
-#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_MASK 0xFF
-#define DBG_IDLE_CHK_COND_REG_BLOCK_ID_SHIFT 24
- u16 num_entries;
- u8 entry_size;
- u8 start_entry;
-};
-
-/* Idle Check info register */
-struct dbg_idle_chk_info_reg {
- u32 data;
-#define DBG_IDLE_CHK_INFO_REG_ADDRESS_MASK 0x7FFFFF
-#define DBG_IDLE_CHK_INFO_REG_ADDRESS_SHIFT 0
-#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_MASK 0x1
-#define DBG_IDLE_CHK_INFO_REG_WIDE_BUS_SHIFT 23
-#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_MASK 0xFF
-#define DBG_IDLE_CHK_INFO_REG_BLOCK_ID_SHIFT 24
- u16 size; /* register size in dwords */
- struct dbg_mode_hdr mode; /* Mode header */
-};
-
-/* Idle Check register */
-union dbg_idle_chk_reg {
- struct dbg_idle_chk_cond_reg cond_reg; /* condition register */
- struct dbg_idle_chk_info_reg info_reg; /* info register */
-};
-
-/* Idle Check result header */
-struct dbg_idle_chk_result_hdr {
- u16 rule_id; /* Failing rule index */
- u16 mem_entry_id; /* Failing memory entry index */
- u8 num_dumped_cond_regs; /* number of dumped condition registers */
- u8 num_dumped_info_regs; /* number of dumped condition registers */
- u8 severity; /* from dbg_idle_chk_severity_types enum */
- u8 reserved;
-};
-
-/* Idle Check result register header */
-struct dbg_idle_chk_result_reg_hdr {
- u8 data;
-#define DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM_MASK 0x1
-#define DBG_IDLE_CHK_RESULT_REG_HDR_IS_MEM_SHIFT 0
-#define DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID_MASK 0x7F
-#define DBG_IDLE_CHK_RESULT_REG_HDR_REG_ID_SHIFT 1
- u8 start_entry; /* index of the first checked entry */
- u16 size; /* register size in dwords */
-};
-
-/* Idle Check rule */
-struct dbg_idle_chk_rule {
- u16 rule_id; /* Idle Check rule ID */
- u8 severity; /* value from dbg_idle_chk_severity_types enum */
- u8 cond_id; /* Condition ID */
- u8 num_cond_regs; /* number of condition registers */
- u8 num_info_regs; /* number of info registers */
- u8 num_imms; /* number of immediates in the condition */
- u8 reserved1;
- u16 reg_offset; /* offset of this rules registers in the idle check
- * register array (in dbg_idle_chk_reg units).
- */
- u16 imm_offset; /* offset of this rules immediate values in the
- * immediate values array (in dwords).
- */
-};
-
-/* Idle Check rule parsing data */
-struct dbg_idle_chk_rule_parsing_data {
- u32 data;
-#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_MASK 0x1
-#define DBG_IDLE_CHK_RULE_PARSING_DATA_HAS_FW_MSG_SHIFT 0
-#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_MASK 0x7FFFFFFF
-#define DBG_IDLE_CHK_RULE_PARSING_DATA_STR_OFFSET_SHIFT 1
-};
-
-/* Idle check severity types */
-enum dbg_idle_chk_severity_types {
- /* idle check failure should cause an error */
- IDLE_CHK_SEVERITY_ERROR,
- /* idle check failure should cause an error only if theres no traffic */
- IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC,
- /* idle check failure should cause a warning */
- IDLE_CHK_SEVERITY_WARNING,
- MAX_DBG_IDLE_CHK_SEVERITY_TYPES
-};
-
-/* Reset register */
-struct dbg_reset_reg {
- u32 data;
-#define DBG_RESET_REG_ADDR_MASK 0xFFFFFF
-#define DBG_RESET_REG_ADDR_SHIFT 0
-#define DBG_RESET_REG_IS_REMOVED_MASK 0x1
-#define DBG_RESET_REG_IS_REMOVED_SHIFT 24
-#define DBG_RESET_REG_RESERVED_MASK 0x7F
-#define DBG_RESET_REG_RESERVED_SHIFT 25
-};
-
-/* Debug Bus block data */
-struct dbg_bus_block_data {
- u8 enable_mask;
- u8 right_shift;
- u8 force_valid_mask;
- u8 force_frame_mask;
- u8 dword_mask;
- u8 line_num;
- u8 hw_id;
- u8 flags;
-#define DBG_BUS_BLOCK_DATA_IS_256B_LINE_MASK 0x1
-#define DBG_BUS_BLOCK_DATA_IS_256B_LINE_SHIFT 0
-#define DBG_BUS_BLOCK_DATA_RESERVED_MASK 0x7F
-#define DBG_BUS_BLOCK_DATA_RESERVED_SHIFT 1
-};
-
-enum dbg_bus_clients {
- DBG_BUS_CLIENT_RBCN,
- DBG_BUS_CLIENT_RBCP,
- DBG_BUS_CLIENT_RBCR,
- DBG_BUS_CLIENT_RBCT,
- DBG_BUS_CLIENT_RBCU,
- DBG_BUS_CLIENT_RBCF,
- DBG_BUS_CLIENT_RBCX,
- DBG_BUS_CLIENT_RBCS,
- DBG_BUS_CLIENT_RBCH,
- DBG_BUS_CLIENT_RBCZ,
- DBG_BUS_CLIENT_OTHER_ENGINE,
- DBG_BUS_CLIENT_TIMESTAMP,
- DBG_BUS_CLIENT_CPU,
- DBG_BUS_CLIENT_RBCY,
- DBG_BUS_CLIENT_RBCQ,
- DBG_BUS_CLIENT_RBCM,
- DBG_BUS_CLIENT_RBCB,
- DBG_BUS_CLIENT_RBCW,
- DBG_BUS_CLIENT_RBCV,
- MAX_DBG_BUS_CLIENTS
-};
-
-/* Debug Bus constraint operation types */
-enum dbg_bus_constraint_ops {
- DBG_BUS_CONSTRAINT_OP_EQ,
- DBG_BUS_CONSTRAINT_OP_NE,
- DBG_BUS_CONSTRAINT_OP_LT,
- DBG_BUS_CONSTRAINT_OP_LTC,
- DBG_BUS_CONSTRAINT_OP_LE,
- DBG_BUS_CONSTRAINT_OP_LEC,
- DBG_BUS_CONSTRAINT_OP_GT,
- DBG_BUS_CONSTRAINT_OP_GTC,
- DBG_BUS_CONSTRAINT_OP_GE,
- DBG_BUS_CONSTRAINT_OP_GEC,
- MAX_DBG_BUS_CONSTRAINT_OPS
-};
-
-/* Debug Bus trigger state data */
-struct dbg_bus_trigger_state_data {
- u8 msg_len;
- u8 constraint_dword_mask;
- u8 storm_id;
- u8 reserved;
-};
-
-/* Debug Bus memory address */
-struct dbg_bus_mem_addr {
- u32 lo;
- u32 hi;
-};
-
-/* Debug Bus PCI buffer data */
-struct dbg_bus_pci_buf_data {
- struct dbg_bus_mem_addr phys_addr; /* PCI buffer physical address */
- struct dbg_bus_mem_addr virt_addr; /* PCI buffer virtual address */
- u32 size; /* PCI buffer size in bytes */
-};
-
-/* Debug Bus Storm EID range filter params */
-struct dbg_bus_storm_eid_range_params {
- u8 min; /* Minimal event ID to filter on */
- u8 max; /* Maximal event ID to filter on */
-};
-
-/* Debug Bus Storm EID mask filter params */
-struct dbg_bus_storm_eid_mask_params {
- u8 val; /* Event ID value */
- u8 mask; /* Event ID mask. 1s in the mask = dont care bits. */
-};
-
-/* Debug Bus Storm EID filter params */
-union dbg_bus_storm_eid_params {
- struct dbg_bus_storm_eid_range_params range;
- struct dbg_bus_storm_eid_mask_params mask;
-};
-
-/* Debug Bus Storm data */
-struct dbg_bus_storm_data {
- u8 enabled;
- u8 mode;
- u8 hw_id;
- u8 eid_filter_en;
- u8 eid_range_not_mask;
- u8 cid_filter_en;
- union dbg_bus_storm_eid_params eid_filter_params;
- u32 cid;
-};
-
-/* Debug Bus data */
-struct dbg_bus_data {
- u32 app_version;
- u8 state;
- u8 mode_256b_en;
- u8 num_enabled_blocks;
- u8 num_enabled_storms;
- u8 target;
- u8 one_shot_en;
- u8 grc_input_en;
- u8 timestamp_input_en;
- u8 filter_en;
- u8 adding_filter;
- u8 filter_pre_trigger;
- u8 filter_post_trigger;
- u8 trigger_en;
- u8 filter_constraint_dword_mask;
- u8 next_trigger_state;
- u8 next_constraint_id;
- struct dbg_bus_trigger_state_data trigger_states[3];
- u8 filter_msg_len;
- u8 rcv_from_other_engine;
- u8 blocks_dword_mask;
- u8 blocks_dword_overlap;
- u32 hw_id_mask;
- struct dbg_bus_pci_buf_data pci_buf;
- struct dbg_bus_block_data blocks[132];
- struct dbg_bus_storm_data storms[6];
-};
-
-/* Debug bus states */
-enum dbg_bus_states {
- DBG_BUS_STATE_IDLE,
- DBG_BUS_STATE_READY,
- DBG_BUS_STATE_RECORDING,
- DBG_BUS_STATE_STOPPED,
- MAX_DBG_BUS_STATES
-};
-
-/* Debug Bus Storm modes */
-enum dbg_bus_storm_modes {
- DBG_BUS_STORM_MODE_PRINTF,
- DBG_BUS_STORM_MODE_PRAM_ADDR,
- DBG_BUS_STORM_MODE_DRA_RW,
- DBG_BUS_STORM_MODE_DRA_W,
- DBG_BUS_STORM_MODE_LD_ST_ADDR,
- DBG_BUS_STORM_MODE_DRA_FSM,
- DBG_BUS_STORM_MODE_FAST_DBGMUX,
- DBG_BUS_STORM_MODE_RH,
- DBG_BUS_STORM_MODE_RH_WITH_STORE,
- DBG_BUS_STORM_MODE_FOC,
- DBG_BUS_STORM_MODE_EXT_STORE,
- MAX_DBG_BUS_STORM_MODES
-};
-
-/* Debug bus target IDs */
-enum dbg_bus_targets {
- DBG_BUS_TARGET_ID_INT_BUF,
- DBG_BUS_TARGET_ID_NIG,
- DBG_BUS_TARGET_ID_PCI,
- MAX_DBG_BUS_TARGETS
-};
-
-/* GRC Dump data */
-struct dbg_grc_data {
- u8 params_initialized;
- u8 reserved1;
- u16 reserved2;
- u32 param_val[48];
-};
-
-/* Debug GRC params */
-enum dbg_grc_params {
- DBG_GRC_PARAM_DUMP_TSTORM,
- DBG_GRC_PARAM_DUMP_MSTORM,
- DBG_GRC_PARAM_DUMP_USTORM,
- DBG_GRC_PARAM_DUMP_XSTORM,
- DBG_GRC_PARAM_DUMP_YSTORM,
- DBG_GRC_PARAM_DUMP_PSTORM,
- DBG_GRC_PARAM_DUMP_REGS,
- DBG_GRC_PARAM_DUMP_RAM,
- DBG_GRC_PARAM_DUMP_PBUF,
- DBG_GRC_PARAM_DUMP_IOR,
- DBG_GRC_PARAM_DUMP_VFC,
- DBG_GRC_PARAM_DUMP_CM_CTX,
- DBG_GRC_PARAM_DUMP_PXP,
- DBG_GRC_PARAM_DUMP_RSS,
- DBG_GRC_PARAM_DUMP_CAU,
- DBG_GRC_PARAM_DUMP_QM,
- DBG_GRC_PARAM_DUMP_MCP,
- DBG_GRC_PARAM_DUMP_DORQ,
- DBG_GRC_PARAM_DUMP_CFC,
- DBG_GRC_PARAM_DUMP_IGU,
- DBG_GRC_PARAM_DUMP_BRB,
- DBG_GRC_PARAM_DUMP_BTB,
- DBG_GRC_PARAM_DUMP_BMB,
- DBG_GRC_PARAM_RESERVD1,
- DBG_GRC_PARAM_DUMP_MULD,
- DBG_GRC_PARAM_DUMP_PRS,
- DBG_GRC_PARAM_DUMP_DMAE,
- DBG_GRC_PARAM_DUMP_TM,
- DBG_GRC_PARAM_DUMP_SDM,
- DBG_GRC_PARAM_DUMP_DIF,
- DBG_GRC_PARAM_DUMP_STATIC,
- DBG_GRC_PARAM_UNSTALL,
- DBG_GRC_PARAM_RESERVED2,
- DBG_GRC_PARAM_MCP_TRACE_META_SIZE,
- DBG_GRC_PARAM_EXCLUDE_ALL,
- DBG_GRC_PARAM_CRASH,
- DBG_GRC_PARAM_PARITY_SAFE,
- DBG_GRC_PARAM_DUMP_CM,
- DBG_GRC_PARAM_DUMP_PHY,
- DBG_GRC_PARAM_NO_MCP,
- DBG_GRC_PARAM_NO_FW_VER,
- DBG_GRC_PARAM_RESERVED3,
- DBG_GRC_PARAM_DUMP_MCP_HW_DUMP,
- DBG_GRC_PARAM_DUMP_ILT_CDUC,
- DBG_GRC_PARAM_DUMP_ILT_CDUT,
- DBG_GRC_PARAM_DUMP_CAU_EXT,
- MAX_DBG_GRC_PARAMS
-};
-
-/* Debug status codes */
-enum dbg_status {
- DBG_STATUS_OK,
- DBG_STATUS_APP_VERSION_NOT_SET,
- DBG_STATUS_UNSUPPORTED_APP_VERSION,
- DBG_STATUS_DBG_BLOCK_NOT_RESET,
- DBG_STATUS_INVALID_ARGS,
- DBG_STATUS_OUTPUT_ALREADY_SET,
- DBG_STATUS_INVALID_PCI_BUF_SIZE,
- DBG_STATUS_PCI_BUF_ALLOC_FAILED,
- DBG_STATUS_PCI_BUF_NOT_ALLOCATED,
- DBG_STATUS_INVALID_FILTER_TRIGGER_DWORDS,
- DBG_STATUS_NO_MATCHING_FRAMING_MODE,
- DBG_STATUS_VFC_READ_ERROR,
- DBG_STATUS_STORM_ALREADY_ENABLED,
- DBG_STATUS_STORM_NOT_ENABLED,
- DBG_STATUS_BLOCK_ALREADY_ENABLED,
- DBG_STATUS_BLOCK_NOT_ENABLED,
- DBG_STATUS_NO_INPUT_ENABLED,
- DBG_STATUS_NO_FILTER_TRIGGER_256B,
- DBG_STATUS_FILTER_ALREADY_ENABLED,
- DBG_STATUS_TRIGGER_ALREADY_ENABLED,
- DBG_STATUS_TRIGGER_NOT_ENABLED,
- DBG_STATUS_CANT_ADD_CONSTRAINT,
- DBG_STATUS_TOO_MANY_TRIGGER_STATES,
- DBG_STATUS_TOO_MANY_CONSTRAINTS,
- DBG_STATUS_RECORDING_NOT_STARTED,
- DBG_STATUS_DATA_DIDNT_TRIGGER,
- DBG_STATUS_NO_DATA_RECORDED,
- DBG_STATUS_DUMP_BUF_TOO_SMALL,
- DBG_STATUS_DUMP_NOT_CHUNK_ALIGNED,
- DBG_STATUS_UNKNOWN_CHIP,
- DBG_STATUS_VIRT_MEM_ALLOC_FAILED,
- DBG_STATUS_BLOCK_IN_RESET,
- DBG_STATUS_INVALID_TRACE_SIGNATURE,
- DBG_STATUS_INVALID_NVRAM_BUNDLE,
- DBG_STATUS_NVRAM_GET_IMAGE_FAILED,
- DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE,
- DBG_STATUS_NVRAM_READ_FAILED,
- DBG_STATUS_IDLE_CHK_PARSE_FAILED,
- DBG_STATUS_MCP_TRACE_BAD_DATA,
- DBG_STATUS_MCP_TRACE_NO_META,
- DBG_STATUS_MCP_COULD_NOT_HALT,
- DBG_STATUS_MCP_COULD_NOT_RESUME,
- DBG_STATUS_RESERVED0,
- DBG_STATUS_SEMI_FIFO_NOT_EMPTY,
- DBG_STATUS_IGU_FIFO_BAD_DATA,
- DBG_STATUS_MCP_COULD_NOT_MASK_PRTY,
- DBG_STATUS_FW_ASSERTS_PARSE_FAILED,
- DBG_STATUS_REG_FIFO_BAD_DATA,
- DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA,
- DBG_STATUS_DBG_ARRAY_NOT_SET,
- DBG_STATUS_RESERVED1,
- DBG_STATUS_NON_MATCHING_LINES,
- DBG_STATUS_INSUFFICIENT_HW_IDS,
- DBG_STATUS_DBG_BUS_IN_USE,
- DBG_STATUS_INVALID_STORM_DBG_MODE,
- DBG_STATUS_OTHER_ENGINE_BB_ONLY,
- DBG_STATUS_FILTER_SINGLE_HW_ID,
- DBG_STATUS_TRIGGER_SINGLE_HW_ID,
- DBG_STATUS_MISSING_TRIGGER_STATE_STORM,
- MAX_DBG_STATUS
-};
-
-/* Debug Storms IDs */
-enum dbg_storms {
- DBG_TSTORM_ID,
- DBG_MSTORM_ID,
- DBG_USTORM_ID,
- DBG_XSTORM_ID,
- DBG_YSTORM_ID,
- DBG_PSTORM_ID,
- MAX_DBG_STORMS
-};
-
-/* Idle Check data */
-struct idle_chk_data {
- u32 buf_size;
- u8 buf_size_set;
- u8 reserved1;
- u16 reserved2;
-};
-
-struct pretend_params {
- u8 split_type;
- u8 reserved;
- u16 split_id;
-};
-
-/* Debug Tools data (per HW function)
- */
-struct dbg_tools_data {
- struct dbg_grc_data grc;
- struct dbg_bus_data bus;
- struct idle_chk_data idle_chk;
- u8 mode_enable[40];
- u8 block_in_reset[132];
- u8 chip_id;
- u8 hw_type;
- u8 num_ports;
- u8 num_pfs_per_port;
- u8 num_vfs;
- u8 initialized;
- u8 use_dmae;
- u8 reserved;
- struct pretend_params pretend;
- u32 num_regs_read;
-};
-
-/* ILT Clients */
-enum ilt_clients {
- ILT_CLI_CDUC,
- ILT_CLI_CDUT,
- ILT_CLI_QM,
- ILT_CLI_TM,
- ILT_CLI_SRC,
- ILT_CLI_TSDM,
- ILT_CLI_RGFS,
- ILT_CLI_TGFS,
- MAX_ILT_CLIENTS
-};
-
/********************************/
/* HSI Init Functions constants */
/********************************/
@@ -2644,6 +1956,9 @@ struct init_nig_pri_tc_map_req {
/* QM per global RL init parameters */
struct init_qm_global_rl_params {
+ u8 type;
+ u8 reserved0;
+ u16 reserved1;
u32 rate_limit;
};
@@ -2658,18 +1973,33 @@ struct init_qm_port_params {
/* QM per-PQ init parameters */
struct init_qm_pq_params {
- u8 vport_id;
+ u16 vport_id;
+ u16 rl_id;
+ u8 rl_valid;
u8 tc_id;
u8 wrr_group;
- u8 rl_valid;
- u16 rl_id;
u8 port_id;
- u8 reserved;
+};
+
+/* QM per RL init parameters */
+struct init_qm_rl_params {
+ u32 vport_rl;
+ u8 vport_rl_type;
+ u8 reserved[3];
+};
+
+/* QM Rate Limiter types */
+enum init_qm_rl_type {
+ QM_RL_TYPE_NORMAL,
+ QM_RL_TYPE_QCN,
+ MAX_INIT_QM_RL_TYPE
};
/* QM per-vport init parameters */
struct init_qm_vport_params {
u16 wfq;
+ u16 reserved;
+ u16 tc_wfq[NUM_OF_TCS];
u16 first_tx_pq_id[NUM_OF_TCS];
};
@@ -2728,14 +2058,14 @@ struct fw_info_location {
};
enum init_modes {
- MODE_RESERVED,
+ MODE_BB_A0_DEPRECATED,
MODE_BB,
MODE_K2,
MODE_ASIC,
- MODE_RESERVED2,
- MODE_RESERVED3,
- MODE_RESERVED4,
- MODE_RESERVED5,
+ MODE_EMUL_REDUCED,
+ MODE_EMUL_FULL,
+ MODE_FPGA,
+ MODE_CHIPSIM,
MODE_SF,
MODE_MF_SD,
MODE_MF_SI,
@@ -2743,8 +2073,8 @@ enum init_modes {
MODE_PORTS_PER_ENG_2,
MODE_PORTS_PER_ENG_4,
MODE_100G,
- MODE_RESERVED6,
- MODE_RESERVED7,
+ MODE_SKIP_PRAM_INIT,
+ MODE_EMUL_MAC,
MAX_INIT_MODES
};
@@ -3009,706 +2339,6 @@ struct iro {
u16 size;
};
-/***************************** Public Functions *******************************/
-
-/**
- * @brief qed_dbg_set_bin_ptr - Sets a pointer to the binary data with debug
- * arrays.
- *
- * @param p_hwfn - HW device data
- * @param bin_ptr - a pointer to the binary data with debug arrays.
- */
-enum dbg_status qed_dbg_set_bin_ptr(struct qed_hwfn *p_hwfn,
- const u8 * const bin_ptr);
-
-/**
- * @brief qed_read_regs - Reads registers into a buffer (using GRC).
- *
- * @param p_hwfn - HW device data
- * @param p_ptt - Ptt window used for writing the registers.
- * @param buf - Destination buffer.
- * @param addr - Source GRC address in dwords.
- * @param len - Number of registers to read.
- */
-void qed_read_regs(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u32 *buf, u32 addr, u32 len);
-
-/**
- * @brief qed_read_fw_info - Reads FW info from the chip.
- *
- * The FW info contains FW-related information, such as the FW version,
- * FW image (main/L2B/kuku), FW timestamp, etc.
- * The FW info is read from the internal RAM of the first Storm that is not in
- * reset.
- *
- * @param p_hwfn - HW device data
- * @param p_ptt - Ptt window used for writing the registers.
- * @param fw_info - Out: a pointer to write the FW info into.
- *
- * @return true if the FW info was read successfully from one of the Storms,
- * or false if all Storms are in reset.
- */
-bool qed_read_fw_info(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, struct fw_info *fw_info);
-/**
- * @brief qed_dbg_grc_config - Sets the value of a GRC parameter.
- *
- * @param p_hwfn - HW device data
- * @param grc_param - GRC parameter
- * @param val - Value to set.
- *
- * @return error if one of the following holds:
- * - the version wasn't set
- * - grc_param is invalid
- * - val is outside the allowed boundaries
- */
-enum dbg_status qed_dbg_grc_config(struct qed_hwfn *p_hwfn,
- enum dbg_grc_params grc_param, u32 val);
-
-/**
- * @brief qed_dbg_grc_set_params_default - Reverts all GRC parameters to their
- * default value.
- *
- * @param p_hwfn - HW device data
- */
-void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn);
-/**
- * @brief qed_dbg_grc_get_dump_buf_size - Returns the required buffer size for
- * GRC Dump.
- *
- * @param p_hwfn - HW device data
- * @param p_ptt - Ptt window used for writing the registers.
- * @param buf_size - OUT: required buffer size (in dwords) for the GRC Dump
- * data.
- *
- * @return error if one of the following holds:
- * - the version wasn't set
- * Otherwise, returns ok.
- */
-enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *buf_size);
-
-/**
- * @brief qed_dbg_grc_dump - Dumps GRC data into the specified buffer.
- *
- * @param p_hwfn - HW device data
- * @param p_ptt - Ptt window used for writing the registers.
- * @param dump_buf - Pointer to write the collected GRC data into.
- * @param buf_size_in_dwords - Size of the specified buffer in dwords.
- * @param num_dumped_dwords - OUT: number of dumped dwords.
- *
- * @return error if one of the following holds:
- * - the version wasn't set
- * - the specified dump buffer is too small
- * Otherwise, returns ok.
- */
-enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *dump_buf,
- u32 buf_size_in_dwords,
- u32 *num_dumped_dwords);
-
-/**
- * @brief qed_dbg_idle_chk_get_dump_buf_size - Returns the required buffer size
- * for idle check results.
- *
- * @param p_hwfn - HW device data
- * @param p_ptt - Ptt window used for writing the registers.
- * @param buf_size - OUT: required buffer size (in dwords) for the idle check
- * data.
- *
- * @return error if one of the following holds:
- * - the version wasn't set
- * Otherwise, returns ok.
- */
-enum dbg_status qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *buf_size);
-
-/**
- * @brief qed_dbg_idle_chk_dump - Performs idle check and writes the results
- * into the specified buffer.
- *
- * @param p_hwfn - HW device data
- * @param p_ptt - Ptt window used for writing the registers.
- * @param dump_buf - Pointer to write the idle check data into.
- * @param buf_size_in_dwords - Size of the specified buffer in dwords.
- * @param num_dumped_dwords - OUT: number of dumped dwords.
- *
- * @return error if one of the following holds:
- * - the version wasn't set
- * - the specified buffer is too small
- * Otherwise, returns ok.
- */
-enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *dump_buf,
- u32 buf_size_in_dwords,
- u32 *num_dumped_dwords);
-
-/**
- * @brief qed_dbg_mcp_trace_get_dump_buf_size - Returns the required buffer size
- * for mcp trace results.
- *
- * @param p_hwfn - HW device data
- * @param p_ptt - Ptt window used for writing the registers.
- * @param buf_size - OUT: required buffer size (in dwords) for mcp trace data.
- *
- * @return error if one of the following holds:
- * - the version wasn't set
- * - the trace data in MCP scratchpad contain an invalid signature
- * - the bundle ID in NVRAM is invalid
- * - the trace meta data cannot be found (in NVRAM or image file)
- * Otherwise, returns ok.
- */
-enum dbg_status qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *buf_size);
-
-/**
- * @brief qed_dbg_mcp_trace_dump - Performs mcp trace and writes the results
- * into the specified buffer.
- *
- * @param p_hwfn - HW device data
- * @param p_ptt - Ptt window used for writing the registers.
- * @param dump_buf - Pointer to write the mcp trace data into.
- * @param buf_size_in_dwords - Size of the specified buffer in dwords.
- * @param num_dumped_dwords - OUT: number of dumped dwords.
- *
- * @return error if one of the following holds:
- * - the version wasn't set
- * - the specified buffer is too small
- * - the trace data in MCP scratchpad contain an invalid signature
- * - the bundle ID in NVRAM is invalid
- * - the trace meta data cannot be found (in NVRAM or image file)
- * - the trace meta data cannot be read (from NVRAM or image file)
- * Otherwise, returns ok.
- */
-enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *dump_buf,
- u32 buf_size_in_dwords,
- u32 *num_dumped_dwords);
-
-/**
- * @brief qed_dbg_reg_fifo_get_dump_buf_size - Returns the required buffer size
- * for grc trace fifo results.
- *
- * @param p_hwfn - HW device data
- * @param p_ptt - Ptt window used for writing the registers.
- * @param buf_size - OUT: required buffer size (in dwords) for reg fifo data.
- *
- * @return error if one of the following holds:
- * - the version wasn't set
- * Otherwise, returns ok.
- */
-enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *buf_size);
-
-/**
- * @brief qed_dbg_reg_fifo_dump - Reads the reg fifo and writes the results into
- * the specified buffer.
- *
- * @param p_hwfn - HW device data
- * @param p_ptt - Ptt window used for writing the registers.
- * @param dump_buf - Pointer to write the reg fifo data into.
- * @param buf_size_in_dwords - Size of the specified buffer in dwords.
- * @param num_dumped_dwords - OUT: number of dumped dwords.
- *
- * @return error if one of the following holds:
- * - the version wasn't set
- * - the specified buffer is too small
- * - DMAE transaction failed
- * Otherwise, returns ok.
- */
-enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *dump_buf,
- u32 buf_size_in_dwords,
- u32 *num_dumped_dwords);
-
-/**
- * @brief qed_dbg_igu_fifo_get_dump_buf_size - Returns the required buffer size
- * for the IGU fifo results.
- *
- * @param p_hwfn - HW device data
- * @param p_ptt - Ptt window used for writing the registers.
- * @param buf_size - OUT: required buffer size (in dwords) for the IGU fifo
- * data.
- *
- * @return error if one of the following holds:
- * - the version wasn't set
- * Otherwise, returns ok.
- */
-enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *buf_size);
-
-/**
- * @brief qed_dbg_igu_fifo_dump - Reads the IGU fifo and writes the results into
- * the specified buffer.
- *
- * @param p_hwfn - HW device data
- * @param p_ptt - Ptt window used for writing the registers.
- * @param dump_buf - Pointer to write the IGU fifo data into.
- * @param buf_size_in_dwords - Size of the specified buffer in dwords.
- * @param num_dumped_dwords - OUT: number of dumped dwords.
- *
- * @return error if one of the following holds:
- * - the version wasn't set
- * - the specified buffer is too small
- * - DMAE transaction failed
- * Otherwise, returns ok.
- */
-enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *dump_buf,
- u32 buf_size_in_dwords,
- u32 *num_dumped_dwords);
-
-/**
- * @brief qed_dbg_protection_override_get_dump_buf_size - Returns the required
- * buffer size for protection override window results.
- *
- * @param p_hwfn - HW device data
- * @param p_ptt - Ptt window used for writing the registers.
- * @param buf_size - OUT: required buffer size (in dwords) for protection
- * override data.
- *
- * @return error if one of the following holds:
- * - the version wasn't set
- * Otherwise, returns ok.
- */
-enum dbg_status
-qed_dbg_protection_override_get_dump_buf_size(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *buf_size);
-/**
- * @brief qed_dbg_protection_override_dump - Reads protection override window
- * entries and writes the results into the specified buffer.
- *
- * @param p_hwfn - HW device data
- * @param p_ptt - Ptt window used for writing the registers.
- * @param dump_buf - Pointer to write the protection override data into.
- * @param buf_size_in_dwords - Size of the specified buffer in dwords.
- * @param num_dumped_dwords - OUT: number of dumped dwords.
- *
- * @return error if one of the following holds:
- * - the version wasn't set
- * - the specified buffer is too small
- * - DMAE transaction failed
- * Otherwise, returns ok.
- */
-enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *dump_buf,
- u32 buf_size_in_dwords,
- u32 *num_dumped_dwords);
-/**
- * @brief qed_dbg_fw_asserts_get_dump_buf_size - Returns the required buffer
- * size for FW Asserts results.
- *
- * @param p_hwfn - HW device data
- * @param p_ptt - Ptt window used for writing the registers.
- * @param buf_size - OUT: required buffer size (in dwords) for FW Asserts data.
- *
- * @return error if one of the following holds:
- * - the version wasn't set
- * Otherwise, returns ok.
- */
-enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *buf_size);
-/**
- * @brief qed_dbg_fw_asserts_dump - Reads the FW Asserts and writes the results
- * into the specified buffer.
- *
- * @param p_hwfn - HW device data
- * @param p_ptt - Ptt window used for writing the registers.
- * @param dump_buf - Pointer to write the FW Asserts data into.
- * @param buf_size_in_dwords - Size of the specified buffer in dwords.
- * @param num_dumped_dwords - OUT: number of dumped dwords.
- *
- * @return error if one of the following holds:
- * - the version wasn't set
- * - the specified buffer is too small
- * Otherwise, returns ok.
- */
-enum dbg_status qed_dbg_fw_asserts_dump(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- u32 *dump_buf,
- u32 buf_size_in_dwords,
- u32 *num_dumped_dwords);
-
-/**
- * @brief qed_dbg_read_attn - Reads the attention registers of the specified
- * block and type, and writes the results into the specified buffer.
- *
- * @param p_hwfn - HW device data
- * @param p_ptt - Ptt window used for writing the registers.
- * @param block - Block ID.
- * @param attn_type - Attention type.
- * @param clear_status - Indicates if the attention status should be cleared.
- * @param results - OUT: Pointer to write the read results into
- *
- * @return error if one of the following holds:
- * - the version wasn't set
- * Otherwise, returns ok.
- */
-enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- enum block_id block,
- enum dbg_attn_type attn_type,
- bool clear_status,
- struct dbg_attn_block_result *results);
-
-/**
- * @brief qed_dbg_print_attn - Prints attention registers values in the
- * specified results struct.
- *
- * @param p_hwfn
- * @param results - Pointer to the attention read results
- *
- * @return error if one of the following holds:
- * - the version wasn't set
- * Otherwise, returns ok.
- */
-enum dbg_status qed_dbg_print_attn(struct qed_hwfn *p_hwfn,
- struct dbg_attn_block_result *results);
-
-/******************************* Data Types **********************************/
-
-struct mcp_trace_format {
- u32 data;
-#define MCP_TRACE_FORMAT_MODULE_MASK 0x0000ffff
-#define MCP_TRACE_FORMAT_MODULE_OFFSET 0
-#define MCP_TRACE_FORMAT_LEVEL_MASK 0x00030000
-#define MCP_TRACE_FORMAT_LEVEL_OFFSET 16
-#define MCP_TRACE_FORMAT_P1_SIZE_MASK 0x000c0000
-#define MCP_TRACE_FORMAT_P1_SIZE_OFFSET 18
-#define MCP_TRACE_FORMAT_P2_SIZE_MASK 0x00300000
-#define MCP_TRACE_FORMAT_P2_SIZE_OFFSET 20
-#define MCP_TRACE_FORMAT_P3_SIZE_MASK 0x00c00000
-#define MCP_TRACE_FORMAT_P3_SIZE_OFFSET 22
-#define MCP_TRACE_FORMAT_LEN_MASK 0xff000000
-#define MCP_TRACE_FORMAT_LEN_OFFSET 24
-
- char *format_str;
-};
-
-/* MCP Trace Meta data structure */
-struct mcp_trace_meta {
- u32 modules_num;
- char **modules;
- u32 formats_num;
- struct mcp_trace_format *formats;
- bool is_allocated;
-};
-
-/* Debug Tools user data */
-struct dbg_tools_user_data {
- struct mcp_trace_meta mcp_trace_meta;
- const u32 *mcp_trace_user_meta_buf;
-};
-
-/******************************** Constants **********************************/
-
-#define MAX_NAME_LEN 16
-
-/***************************** Public Functions *******************************/
-
-/**
- * @brief qed_dbg_user_set_bin_ptr - Sets a pointer to the binary data with
- * debug arrays.
- *
- * @param p_hwfn - HW device data
- * @param bin_ptr - a pointer to the binary data with debug arrays.
- */
-enum dbg_status qed_dbg_user_set_bin_ptr(struct qed_hwfn *p_hwfn,
- const u8 * const bin_ptr);
-
-/**
- * @brief qed_dbg_alloc_user_data - Allocates user debug data.
- *
- * @param p_hwfn - HW device data
- * @param user_data_ptr - OUT: a pointer to the allocated memory.
- */
-enum dbg_status qed_dbg_alloc_user_data(struct qed_hwfn *p_hwfn,
- void **user_data_ptr);
-
-/**
- * @brief qed_dbg_get_status_str - Returns a string for the specified status.
- *
- * @param status - a debug status code.
- *
- * @return a string for the specified status
- */
-const char *qed_dbg_get_status_str(enum dbg_status status);
-
-/**
- * @brief qed_get_idle_chk_results_buf_size - Returns the required buffer size
- * for idle check results (in bytes).
- *
- * @param p_hwfn - HW device data
- * @param dump_buf - idle check dump buffer.
- * @param num_dumped_dwords - number of dwords that were dumped.
- * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed
- * results.
- *
- * @return error if the parsing fails, ok otherwise.
- */
-enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- u32 *results_buf_size);
-/**
- * @brief qed_print_idle_chk_results - Prints idle check results
- *
- * @param p_hwfn - HW device data
- * @param dump_buf - idle check dump buffer.
- * @param num_dumped_dwords - number of dwords that were dumped.
- * @param results_buf - buffer for printing the idle check results.
- * @param num_errors - OUT: number of errors found in idle check.
- * @param num_warnings - OUT: number of warnings found in idle check.
- *
- * @return error if the parsing fails, ok otherwise.
- */
-enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- char *results_buf,
- u32 *num_errors,
- u32 *num_warnings);
-
-/**
- * @brief qed_dbg_mcp_trace_set_meta_data - Sets the MCP Trace meta data.
- *
- * Needed in case the MCP Trace dump doesn't contain the meta data (e.g. due to
- * no NVRAM access).
- *
- * @param data - pointer to MCP Trace meta data
- * @param size - size of MCP Trace meta data in dwords
- */
-void qed_dbg_mcp_trace_set_meta_data(struct qed_hwfn *p_hwfn,
- const u32 *meta_buf);
-
-/**
- * @brief qed_get_mcp_trace_results_buf_size - Returns the required buffer size
- * for MCP Trace results (in bytes).
- *
- * @param p_hwfn - HW device data
- * @param dump_buf - MCP Trace dump buffer.
- * @param num_dumped_dwords - number of dwords that were dumped.
- * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed
- * results.
- *
- * @return error if the parsing fails, ok otherwise.
- */
-enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- u32 *results_buf_size);
-
-/**
- * @brief qed_print_mcp_trace_results - Prints MCP Trace results
- *
- * @param p_hwfn - HW device data
- * @param dump_buf - mcp trace dump buffer, starting from the header.
- * @param num_dumped_dwords - number of dwords that were dumped.
- * @param results_buf - buffer for printing the mcp trace results.
- *
- * @return error if the parsing fails, ok otherwise.
- */
-enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- char *results_buf);
-
-/**
- * @brief qed_print_mcp_trace_results_cont - Prints MCP Trace results, and
- * keeps the MCP trace meta data allocated, to support continuous MCP Trace
- * parsing. After the continuous parsing ends, mcp_trace_free_meta_data should
- * be called to free the meta data.
- *
- * @param p_hwfn - HW device data
- * @param dump_buf - mcp trace dump buffer, starting from the header.
- * @param results_buf - buffer for printing the mcp trace results.
- *
- * @return error if the parsing fails, ok otherwise.
- */
-enum dbg_status qed_print_mcp_trace_results_cont(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- char *results_buf);
-
-/**
- * @brief print_mcp_trace_line - Prints MCP Trace results for a single line
- *
- * @param p_hwfn - HW device data
- * @param dump_buf - mcp trace dump buffer, starting from the header.
- * @param num_dumped_bytes - number of bytes that were dumped.
- * @param results_buf - buffer for printing the mcp trace results.
- *
- * @return error if the parsing fails, ok otherwise.
- */
-enum dbg_status qed_print_mcp_trace_line(struct qed_hwfn *p_hwfn,
- u8 *dump_buf,
- u32 num_dumped_bytes,
- char *results_buf);
-
-/**
- * @brief mcp_trace_free_meta_data - Frees the MCP Trace meta data.
- * Should be called after continuous MCP Trace parsing.
- *
- * @param p_hwfn - HW device data
- */
-void qed_mcp_trace_free_meta_data(struct qed_hwfn *p_hwfn);
-
-/**
- * @brief qed_get_reg_fifo_results_buf_size - Returns the required buffer size
- * for reg_fifo results (in bytes).
- *
- * @param p_hwfn - HW device data
- * @param dump_buf - reg fifo dump buffer.
- * @param num_dumped_dwords - number of dwords that were dumped.
- * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed
- * results.
- *
- * @return error if the parsing fails, ok otherwise.
- */
-enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- u32 *results_buf_size);
-
-/**
- * @brief qed_print_reg_fifo_results - Prints reg fifo results
- *
- * @param p_hwfn - HW device data
- * @param dump_buf - reg fifo dump buffer, starting from the header.
- * @param num_dumped_dwords - number of dwords that were dumped.
- * @param results_buf - buffer for printing the reg fifo results.
- *
- * @return error if the parsing fails, ok otherwise.
- */
-enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- char *results_buf);
-
-/**
- * @brief qed_get_igu_fifo_results_buf_size - Returns the required buffer size
- * for igu_fifo results (in bytes).
- *
- * @param p_hwfn - HW device data
- * @param dump_buf - IGU fifo dump buffer.
- * @param num_dumped_dwords - number of dwords that were dumped.
- * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed
- * results.
- *
- * @return error if the parsing fails, ok otherwise.
- */
-enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- u32 *results_buf_size);
-
-/**
- * @brief qed_print_igu_fifo_results - Prints IGU fifo results
- *
- * @param p_hwfn - HW device data
- * @param dump_buf - IGU fifo dump buffer, starting from the header.
- * @param num_dumped_dwords - number of dwords that were dumped.
- * @param results_buf - buffer for printing the IGU fifo results.
- *
- * @return error if the parsing fails, ok otherwise.
- */
-enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- char *results_buf);
-
-/**
- * @brief qed_get_protection_override_results_buf_size - Returns the required
- * buffer size for protection override results (in bytes).
- *
- * @param p_hwfn - HW device data
- * @param dump_buf - protection override dump buffer.
- * @param num_dumped_dwords - number of dwords that were dumped.
- * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed
- * results.
- *
- * @return error if the parsing fails, ok otherwise.
- */
-enum dbg_status
-qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- u32 *results_buf_size);
-
-/**
- * @brief qed_print_protection_override_results - Prints protection override
- * results.
- *
- * @param p_hwfn - HW device data
- * @param dump_buf - protection override dump buffer, starting from the header.
- * @param num_dumped_dwords - number of dwords that were dumped.
- * @param results_buf - buffer for printing the reg fifo results.
- *
- * @return error if the parsing fails, ok otherwise.
- */
-enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- char *results_buf);
-
-/**
- * @brief qed_get_fw_asserts_results_buf_size - Returns the required buffer size
- * for FW Asserts results (in bytes).
- *
- * @param p_hwfn - HW device data
- * @param dump_buf - FW Asserts dump buffer.
- * @param num_dumped_dwords - number of dwords that were dumped.
- * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed
- * results.
- *
- * @return error if the parsing fails, ok otherwise.
- */
-enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- u32 *results_buf_size);
-
-/**
- * @brief qed_print_fw_asserts_results - Prints FW Asserts results
- *
- * @param p_hwfn - HW device data
- * @param dump_buf - FW Asserts dump buffer, starting from the header.
- * @param num_dumped_dwords - number of dwords that were dumped.
- * @param results_buf - buffer for printing the FW Asserts results.
- *
- * @return error if the parsing fails, ok otherwise.
- */
-enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn,
- u32 *dump_buf,
- u32 num_dumped_dwords,
- char *results_buf);
-
-/**
- * @brief qed_dbg_parse_attn - Parses and prints attention registers values in
- * the specified results struct.
- *
- * @param p_hwfn - HW device data
- * @param results - Pointer to the attention read results
- *
- * @return error if one of the following holds:
- * - the version wasn't set
- * Otherwise, returns ok.
- */
-enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
- struct dbg_attn_block_result *results);
-
/* Win 2 */
#define GTT_BAR0_MAP_REG_IGU_CMD 0x00f000UL
@@ -3745,19 +2375,28 @@ enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn,
/* Win 13 */
#define GTT_BAR0_MAP_REG_PSDM_RAM 0x01a000UL
+/* Returns the VOQ based on port and TC */
+#define VOQ(port, tc, max_phys_tcs_per_port) ((tc) == \
+ PURE_LB_TC ? NUM_OF_PHYS_TCS *\
+ MAX_NUM_PORTS_BB + \
+ (port) : (port) * \
+ (max_phys_tcs_per_port) + (tc))
+
+struct init_qm_pq_params;
+
/**
- * @brief qed_qm_pf_mem_size - prepare QM ILT sizes
+ * qed_qm_pf_mem_size(): Prepare QM ILT sizes.
*
- * Returns the required host memory size in 4KB units.
- * Must be called before all QM init HSI functions.
+ * @num_pf_cids: Number of connections used by this PF.
+ * @num_vf_cids: Number of connections used by VFs of this PF.
+ * @num_tids: Number of tasks used by this PF.
+ * @num_pf_pqs: Number of PQs used by this PF.
+ * @num_vf_pqs: Number of PQs used by VFs of this PF.
*
- * @param num_pf_cids - number of connections used by this PF
- * @param num_vf_cids - number of connections used by VFs of this PF
- * @param num_tids - number of tasks used by this PF
- * @param num_pf_pqs - number of PQs used by this PF
- * @param num_vf_pqs - number of PQs used by VFs of this PF
+ * Return: The required host memory size in 4KB units.
*
- * @return The required host memory size in 4KB units.
+ * Returns the required host memory size in 4KB units.
+ * Must be called before all QM init HSI functions.
*/
u32 qed_qm_pf_mem_size(u32 num_pf_cids,
u32 num_vf_cids,
@@ -3771,8 +2410,19 @@ struct qed_qm_common_rt_init_params {
bool global_rl_en;
bool vport_wfq_en;
struct init_qm_port_params *port_params;
+ struct init_qm_global_rl_params
+ global_rl_params[COMMON_MAX_QM_GLOBAL_RLS];
};
+/**
+ * qed_qm_common_rt_init(): Prepare QM runtime init values for the
+ * engine phase.
+ *
+ * @p_hwfn: HW device data.
+ * @p_params: Parameters.
+ *
+ * Return: 0 on success, -1 on error.
+ */
int qed_qm_common_rt_init(struct qed_hwfn *p_hwfn,
struct qed_qm_common_rt_init_params *p_params);
@@ -3789,85 +2439,116 @@ struct qed_qm_pf_rt_init_params {
u16 num_vf_pqs;
u16 start_vport;
u16 num_vports;
+ u16 start_rl;
+ u16 num_rls;
u16 pf_wfq;
u32 pf_rl;
+ u32 link_speed;
struct init_qm_pq_params *pq_params;
struct init_qm_vport_params *vport_params;
+ struct init_qm_rl_params *rl_params;
};
+/**
+ * qed_qm_pf_rt_init(): Prepare QM runtime init values for the PF phase.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers
+ * @p_params: Parameters.
+ *
+ * Return: 0 on success, -1 on error.
+ */
int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- struct qed_qm_pf_rt_init_params *p_params);
+ struct qed_ptt *p_ptt,
+ struct qed_qm_pf_rt_init_params *p_params);
/**
- * @brief qed_init_pf_wfq - Initializes the WFQ weight of the specified PF
+ * qed_init_pf_wfq(): Initializes the WFQ weight of the specified PF.
*
- * @param p_hwfn
- * @param p_ptt - ptt window used for writing the registers
- * @param pf_id - PF ID
- * @param pf_wfq - WFQ weight. Must be non-zero.
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers
+ * @pf_id: PF ID
+ * @pf_wfq: WFQ weight. Must be non-zero.
*
- * @return 0 on success, -1 on error.
+ * Return: 0 on success, -1 on error.
*/
int qed_init_pf_wfq(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u8 pf_id, u16 pf_wfq);
/**
- * @brief qed_init_pf_rl - Initializes the rate limit of the specified PF
+ * qed_init_pf_rl(): Initializes the rate limit of the specified PF
*
- * @param p_hwfn
- * @param p_ptt - ptt window used for writing the registers
- * @param pf_id - PF ID
- * @param pf_rl - rate limit in Mb/sec units
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @pf_id: PF ID.
+ * @pf_rl: rate limit in Mb/sec units
*
- * @return 0 on success, -1 on error.
+ * Return: 0 on success, -1 on error.
*/
int qed_init_pf_rl(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u8 pf_id, u32 pf_rl);
/**
- * @brief qed_init_vport_wfq Initializes the WFQ weight of the specified VPORT
+ * qed_init_vport_wfq(): Initializes the WFQ weight of the specified VPORT
*
- * @param p_hwfn
- * @param p_ptt - ptt window used for writing the registers
- * @param first_tx_pq_id- An array containing the first Tx PQ ID associated
- * with the VPORT for each TC. This array is filled by
- * qed_qm_pf_rt_init
- * @param vport_wfq - WFQ weight. Must be non-zero.
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers
+ * @first_tx_pq_id: An array containing the first Tx PQ ID associated
+ * with the VPORT for each TC. This array is filled by
+ * qed_qm_pf_rt_init
+ * @wfq: WFQ weight. Must be non-zero.
*
- * @return 0 on success, -1 on error.
+ * Return: 0 on success, -1 on error.
*/
int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u16 first_tx_pq_id[NUM_OF_TCS], u16 wfq);
/**
- * @brief qed_init_global_rl - Initializes the rate limit of the specified
- * rate limiter
+ * qed_init_vport_tc_wfq(): Initializes the WFQ weight of the specified
+ * VPORT and TC.
*
- * @param p_hwfn
- * @param p_ptt - ptt window used for writing the registers
- * @param rl_id - RL ID
- * @param rate_limit - rate limit in Mb/sec units
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @first_tx_pq_id: The first Tx PQ ID associated with the VPORT and TC.
+ * (filled by qed_qm_pf_rt_init).
+ * @weight: VPORT+TC WFQ weight.
*
- * @return 0 on success, -1 on error.
+ * Return: 0 on success, -1 on error.
+ */
+int qed_init_vport_tc_wfq(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ u16 first_tx_pq_id, u16 weight);
+
+/**
+ * qed_init_global_rl(): Initializes the rate limit of the specified
+ * rate limiter.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @rl_id: RL ID.
+ * @rate_limit: Rate limit in Mb/sec units
+ * @vport_rl_type: Vport RL type.
+ *
+ * Return: 0 on success, -1 on error.
*/
int qed_init_global_rl(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- u16 rl_id, u32 rate_limit);
+ u16 rl_id, u32 rate_limit,
+ enum init_qm_rl_type vport_rl_type);
/**
- * @brief qed_send_qm_stop_cmd Sends a stop command to the QM
+ * qed_send_qm_stop_cmd(): Sends a stop command to the QM.
*
- * @param p_hwfn
- * @param p_ptt
- * @param is_release_cmd - true for release, false for stop.
- * @param is_tx_pq - true for Tx PQs, false for Other PQs.
- * @param start_pq - first PQ ID to stop
- * @param num_pqs - Number of PQs to stop, starting from start_pq.
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @is_release_cmd: true for release, false for stop.
+ * @is_tx_pq: true for Tx PQs, false for Other PQs.
+ * @start_pq: first PQ ID to stop
+ * @num_pqs: Number of PQs to stop, starting from start_pq.
*
- * @return bool, true if successful, false if timeout occurred while waiting for
- * QM command done.
+ * Return: Bool, true if successful, false if timeout occurred while waiting
+ * for QM command done.
*/
bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -3875,53 +2556,64 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
bool is_tx_pq, u16 start_pq, u16 num_pqs);
/**
- * @brief qed_set_vxlan_dest_port - initializes vxlan tunnel destination udp port
+ * qed_set_vxlan_dest_port(): Initializes vxlan tunnel destination udp port.
*
- * @param p_hwfn
- * @param p_ptt - ptt window used for writing the registers.
- * @param dest_port - vxlan destination udp port.
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @dest_port: vxlan destination udp port.
+ *
+ * Return: Void.
*/
void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u16 dest_port);
/**
- * @brief qed_set_vxlan_enable - enable or disable VXLAN tunnel in HW
+ * qed_set_vxlan_enable(): Enable or disable VXLAN tunnel in HW.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @vxlan_enable: vxlan enable flag.
*
- * @param p_hwfn
- * @param p_ptt - ptt window used for writing the registers.
- * @param vxlan_enable - vxlan enable flag.
+ * Return: Void.
*/
void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, bool vxlan_enable);
/**
- * @brief qed_set_gre_enable - enable or disable GRE tunnel in HW
+ * qed_set_gre_enable(): Enable or disable GRE tunnel in HW.
*
- * @param p_hwfn
- * @param p_ptt - ptt window used for writing the registers.
- * @param eth_gre_enable - eth GRE enable enable flag.
- * @param ip_gre_enable - IP GRE enable enable flag.
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @eth_gre_enable: Eth GRE enable flag.
+ * @ip_gre_enable: IP GRE enable flag.
+ *
+ * Return: Void.
*/
void qed_set_gre_enable(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
bool eth_gre_enable, bool ip_gre_enable);
/**
- * @brief qed_set_geneve_dest_port - initializes geneve tunnel destination udp port
+ * qed_set_geneve_dest_port(): Initializes geneve tunnel destination udp port
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @dest_port: Geneve destination udp port.
*
- * @param p_hwfn
- * @param p_ptt - ptt window used for writing the registers.
- * @param dest_port - geneve destination udp port.
+ * Retur: Void.
*/
void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u16 dest_port);
/**
- * @brief qed_set_gre_enable - enable or disable GRE tunnel in HW
+ * qed_set_geneve_enable(): Enable or disable GRE tunnel in HW.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @eth_geneve_enable: Eth GENEVE enable flag.
+ * @ip_geneve_enable: IP GENEVE enable flag.
*
- * @param p_ptt - ptt window used for writing the registers.
- * @param eth_geneve_enable - eth GENEVE enable enable flag.
- * @param ip_geneve_enable - IP GENEVE enable enable flag.
+ * Return: Void.
*/
void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -3931,25 +2623,29 @@ void qed_set_vxlan_no_l2_enable(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, bool enable);
/**
- * @brief qed_gft_disable - Disable GFT
+ * qed_gft_disable(): Disable GFT.
*
- * @param p_hwfn
- * @param p_ptt - ptt window used for writing the registers.
- * @param pf_id - pf on which to disable GFT.
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @pf_id: PF on which to disable GFT.
+ *
+ * Return: Void.
*/
void qed_gft_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id);
/**
- * @brief qed_gft_config - Enable and configure HW for GFT
+ * qed_gft_config(): Enable and configure HW for GFT.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @pf_id: PF on which to enable GFT.
+ * @tcp: Set profile tcp packets.
+ * @udp: Set profile udp packet.
+ * @ipv4: Set profile ipv4 packet.
+ * @ipv6: Set profile ipv6 packet.
+ * @profile_type: Define packet same fields. Use enum gft_profile_type.
*
- * @param p_hwfn - HW device data
- * @param p_ptt - ptt window used for writing the registers.
- * @param pf_id - pf on which to enable GFT.
- * @param tcp - set profile tcp packets.
- * @param udp - set profile udp packet.
- * @param ipv4 - set profile ipv4 packet.
- * @param ipv6 - set profile ipv6 packet.
- * @param profile_type - define packet same fields. Use enum gft_profile_type.
+ * Return: Void.
*/
void qed_gft_config(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -3959,438 +2655,135 @@ void qed_gft_config(struct qed_hwfn *p_hwfn,
bool ipv4, bool ipv6, enum gft_profile_type profile_type);
/**
- * @brief qed_enable_context_validation - Enable and configure context
- * validation.
+ * qed_enable_context_validation(): Enable and configure context
+ * validation.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
*
- * @param p_hwfn
- * @param p_ptt - ptt window used for writing the registers.
+ * Return: Void.
*/
void qed_enable_context_validation(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
- * @brief qed_calc_session_ctx_validation - Calcualte validation byte for
- * session context.
+ * qed_calc_session_ctx_validation(): Calcualte validation byte for
+ * session context.
*
- * @param p_ctx_mem - pointer to context memory.
- * @param ctx_size - context size.
- * @param ctx_type - context type.
- * @param cid - context cid.
+ * @p_ctx_mem: Pointer to context memory.
+ * @ctx_size: Context size.
+ * @ctx_type: Context type.
+ * @cid: Context cid.
+ *
+ * Return: Void.
*/
void qed_calc_session_ctx_validation(void *p_ctx_mem,
u16 ctx_size, u8 ctx_type, u32 cid);
/**
- * @brief qed_calc_task_ctx_validation - Calcualte validation byte for task
- * context.
+ * qed_calc_task_ctx_validation(): Calcualte validation byte for task
+ * context.
+ *
+ * @p_ctx_mem: Pointer to context memory.
+ * @ctx_size: Context size.
+ * @ctx_type: Context type.
+ * @tid: Context tid.
*
- * @param p_ctx_mem - pointer to context memory.
- * @param ctx_size - context size.
- * @param ctx_type - context type.
- * @param tid - context tid.
+ * Return: Void.
*/
void qed_calc_task_ctx_validation(void *p_ctx_mem,
u16 ctx_size, u8 ctx_type, u32 tid);
/**
- * @brief qed_memset_session_ctx - Memset session context to 0 while
- * preserving validation bytes.
+ * qed_memset_session_ctx(): Memset session context to 0 while
+ * preserving validation bytes.
+ *
+ * @p_ctx_mem: Pointer to context memory.
+ * @ctx_size: Size to initialzie.
+ * @ctx_type: Context type.
*
- * @param p_hwfn -
- * @param p_ctx_mem - pointer to context memory.
- * @param ctx_size - size to initialzie.
- * @param ctx_type - context type.
+ * Return: Void.
*/
void qed_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type);
/**
- * @brief qed_memset_task_ctx - Memset task context to 0 while preserving
- * validation bytes.
+ * qed_memset_task_ctx(): Memset task context to 0 while preserving
+ * validation bytes.
*
- * @param p_ctx_mem - pointer to context memory.
- * @param ctx_size - size to initialzie.
- * @param ctx_type - context type.
+ * @p_ctx_mem: Pointer to context memory.
+ * @ctx_size: size to initialzie.
+ * @ctx_type: context type.
+ *
+ * Return: Void.
*/
void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type);
#define NUM_STORMS 6
/**
- * @brief qed_set_rdma_error_level - Sets the RDMA assert level.
- * If the severity of the error will be
- * above the level, the FW will assert.
- * @param p_hwfn - HW device data
- * @param p_ptt - ptt window used for writing the registers
- * @param assert_level - An array of assert levels for each storm.
+ * qed_set_rdma_error_level(): Sets the RDMA assert level.
+ * If the severity of the error will be
+ * above the level, the FW will assert.
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @assert_level: An array of assert levels for each storm.
*
+ * Return: Void.
*/
void qed_set_rdma_error_level(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u8 assert_level[NUM_STORMS]);
/**
- * @brief qed_fw_overlay_mem_alloc - Allocates and fills the FW overlay memory.
+ * qed_fw_overlay_mem_alloc(): Allocates and fills the FW overlay memory.
*
- * @param p_hwfn - HW device data
- * @param fw_overlay_in_buf - the input FW overlay buffer.
- * @param buf_size - the size of the input FW overlay buffer in bytes.
- * must be aligned to dwords.
- * @param fw_overlay_out_mem - OUT: a pointer to the allocated overlays memory.
+ * @p_hwfn: HW device data.
+ * @fw_overlay_in_buf: The input FW overlay buffer.
+ * @buf_size_in_bytes: The size of the input FW overlay buffer in bytes.
+ * must be aligned to dwords.
*
- * @return a pointer to the allocated overlays memory,
+ * Return: A pointer to the allocated overlays memory,
* or NULL in case of failures.
*/
struct phys_mem_desc *
qed_fw_overlay_mem_alloc(struct qed_hwfn *p_hwfn,
- const u32 * const fw_overlay_in_buf,
+ const u32 *const fw_overlay_in_buf,
u32 buf_size_in_bytes);
/**
- * @brief qed_fw_overlay_init_ram - Initializes the FW overlay RAM.
+ * qed_fw_overlay_init_ram(): Initializes the FW overlay RAM.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: Ptt window used for writing the registers.
+ * @fw_overlay_mem: the allocated FW overlay memory.
*
- * @param p_hwfn - HW device data.
- * @param p_ptt - ptt window used for writing the registers.
- * @param fw_overlay_mem - the allocated FW overlay memory.
+ * Return: Void.
*/
void qed_fw_overlay_init_ram(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct phys_mem_desc *fw_overlay_mem);
/**
- * @brief qed_fw_overlay_mem_free - Frees the FW overlay memory.
+ * qed_fw_overlay_mem_free(): Frees the FW overlay memory.
*
- * @param p_hwfn - HW device data.
- * @param fw_overlay_mem - the allocated FW overlay memory to free.
+ * @p_hwfn: HW device data.
+ * @fw_overlay_mem: The allocated FW overlay memory to free.
+ *
+ * Return: Void.
*/
void qed_fw_overlay_mem_free(struct qed_hwfn *p_hwfn,
- struct phys_mem_desc *fw_overlay_mem);
+ struct phys_mem_desc **fw_overlay_mem);
-/* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */
-#define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base)
-#define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size)
-
-/* Tstorm port statistics */
-#define TSTORM_PORT_STAT_OFFSET(port_id) \
- (IRO[1].base + ((port_id) * IRO[1].m1))
-#define TSTORM_PORT_STAT_SIZE (IRO[1].size)
-
-/* Tstorm ll2 port statistics */
-#define TSTORM_LL2_PORT_STAT_OFFSET(port_id) \
- (IRO[2].base + ((port_id) * IRO[2].m1))
-#define TSTORM_LL2_PORT_STAT_SIZE (IRO[2].size)
-
-/* Ustorm VF-PF Channel ready flag */
-#define USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) \
- (IRO[3].base + ((vf_id) * IRO[3].m1))
-#define USTORM_VF_PF_CHANNEL_READY_SIZE (IRO[3].size)
-
-/* Ustorm Final flr cleanup ack */
-#define USTORM_FLR_FINAL_ACK_OFFSET(pf_id) \
- (IRO[4].base + ((pf_id) * IRO[4].m1))
-#define USTORM_FLR_FINAL_ACK_SIZE (IRO[4].size)
-
-/* Ustorm Event ring consumer */
-#define USTORM_EQE_CONS_OFFSET(pf_id) \
- (IRO[5].base + ((pf_id) * IRO[5].m1))
-#define USTORM_EQE_CONS_SIZE (IRO[5].size)
-
-/* Ustorm eth queue zone */
-#define USTORM_ETH_QUEUE_ZONE_OFFSET(queue_zone_id) \
- (IRO[6].base + ((queue_zone_id) * IRO[6].m1))
-#define USTORM_ETH_QUEUE_ZONE_SIZE (IRO[6].size)
-
-/* Ustorm Common Queue ring consumer */
-#define USTORM_COMMON_QUEUE_CONS_OFFSET(queue_zone_id) \
- (IRO[7].base + ((queue_zone_id) * IRO[7].m1))
-#define USTORM_COMMON_QUEUE_CONS_SIZE (IRO[7].size)
-
-/* Xstorm common PQ info */
-#define XSTORM_PQ_INFO_OFFSET(pq_id) \
- (IRO[8].base + ((pq_id) * IRO[8].m1))
-#define XSTORM_PQ_INFO_SIZE (IRO[8].size)
-
-/* Xstorm Integration Test Data */
-#define XSTORM_INTEG_TEST_DATA_OFFSET (IRO[9].base)
-#define XSTORM_INTEG_TEST_DATA_SIZE (IRO[9].size)
-
-/* Ystorm Integration Test Data */
-#define YSTORM_INTEG_TEST_DATA_OFFSET (IRO[10].base)
-#define YSTORM_INTEG_TEST_DATA_SIZE (IRO[10].size)
-
-/* Pstorm Integration Test Data */
-#define PSTORM_INTEG_TEST_DATA_OFFSET (IRO[11].base)
-#define PSTORM_INTEG_TEST_DATA_SIZE (IRO[11].size)
-
-/* Tstorm Integration Test Data */
-#define TSTORM_INTEG_TEST_DATA_OFFSET (IRO[12].base)
-#define TSTORM_INTEG_TEST_DATA_SIZE (IRO[12].size)
-
-/* Mstorm Integration Test Data */
-#define MSTORM_INTEG_TEST_DATA_OFFSET (IRO[13].base)
-#define MSTORM_INTEG_TEST_DATA_SIZE (IRO[13].size)
-
-/* Ustorm Integration Test Data */
-#define USTORM_INTEG_TEST_DATA_OFFSET (IRO[14].base)
-#define USTORM_INTEG_TEST_DATA_SIZE (IRO[14].size)
-
-/* Xstorm overlay buffer host address */
-#define XSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[15].base)
-#define XSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[15].size)
-
-/* Ystorm overlay buffer host address */
-#define YSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[16].base)
-#define YSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[16].size)
-
-/* Pstorm overlay buffer host address */
-#define PSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[17].base)
-#define PSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[17].size)
-
-/* Tstorm overlay buffer host address */
-#define TSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[18].base)
-#define TSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[18].size)
-
-/* Mstorm overlay buffer host address */
-#define MSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[19].base)
-#define MSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[19].size)
-
-/* Ustorm overlay buffer host address */
-#define USTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[20].base)
-#define USTORM_OVERLAY_BUF_ADDR_SIZE (IRO[20].size)
-
-/* Tstorm producers */
-#define TSTORM_LL2_RX_PRODS_OFFSET(core_rx_queue_id) \
- (IRO[21].base + ((core_rx_queue_id) * IRO[21].m1))
-#define TSTORM_LL2_RX_PRODS_SIZE (IRO[21].size)
-
-/* Tstorm LightL2 queue statistics */
-#define CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \
- (IRO[22].base + ((core_rx_queue_id) * IRO[22].m1))
-#define CORE_LL2_TSTORM_PER_QUEUE_STAT_SIZE (IRO[22].size)
-
-/* Ustorm LiteL2 queue statistics */
-#define CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \
- (IRO[23].base + ((core_rx_queue_id) * IRO[23].m1))
-#define CORE_LL2_USTORM_PER_QUEUE_STAT_SIZE (IRO[23].size)
-
-/* Pstorm LiteL2 queue statistics */
-#define CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) \
- (IRO[24].base + ((core_tx_stats_id) * IRO[24].m1))
-#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE (IRO[24].size)
-
-/* Mstorm queue statistics */
-#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
- (IRO[25].base + ((stat_counter_id) * IRO[25].m1))
-#define MSTORM_QUEUE_STAT_SIZE (IRO[25].size)
-
-/* TPA agregation timeout in us resolution (on ASIC) */
-#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[26].base)
-#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[26].size)
-
-/* Mstorm ETH VF queues producers offset in RAM. Used in default VF zone size
- * mode
- */
-#define MSTORM_ETH_VF_PRODS_OFFSET(vf_id, vf_queue_id) \
- (IRO[27].base + ((vf_id) * IRO[27].m1) + ((vf_queue_id) * IRO[27].m2))
-#define MSTORM_ETH_VF_PRODS_SIZE (IRO[27].size)
-
-/* Mstorm ETH PF queues producers */
-#define MSTORM_ETH_PF_PRODS_OFFSET(queue_id) \
- (IRO[28].base + ((queue_id) * IRO[28].m1))
-#define MSTORM_ETH_PF_PRODS_SIZE (IRO[28].size)
-
-/* Mstorm pf statistics */
-#define MSTORM_ETH_PF_STAT_OFFSET(pf_id) \
- (IRO[29].base + ((pf_id) * IRO[29].m1))
-#define MSTORM_ETH_PF_STAT_SIZE (IRO[29].size)
-
-/* Ustorm queue statistics */
-#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
- (IRO[30].base + ((stat_counter_id) * IRO[30].m1))
-#define USTORM_QUEUE_STAT_SIZE (IRO[30].size)
-
-/* Ustorm pf statistics */
-#define USTORM_ETH_PF_STAT_OFFSET(pf_id) \
- (IRO[31].base + ((pf_id) * IRO[31].m1))
-#define USTORM_ETH_PF_STAT_SIZE (IRO[31].size)
-
-/* Pstorm queue statistics */
-#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
- (IRO[32].base + ((stat_counter_id) * IRO[32].m1))
-#define PSTORM_QUEUE_STAT_SIZE (IRO[32].size)
-
-/* Pstorm pf statistics */
-#define PSTORM_ETH_PF_STAT_OFFSET(pf_id) \
- (IRO[33].base + ((pf_id) * IRO[33].m1))
-#define PSTORM_ETH_PF_STAT_SIZE (IRO[33].size)
-
-/* Control frame's EthType configuration for TX control frame security */
-#define PSTORM_CTL_FRAME_ETHTYPE_OFFSET(eth_type_id) \
- (IRO[34].base + ((eth_type_id) * IRO[34].m1))
-#define PSTORM_CTL_FRAME_ETHTYPE_SIZE (IRO[34].size)
-
-/* Tstorm last parser message */
-#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[35].base)
-#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[35].size)
-
-/* Tstorm Eth limit Rx rate */
-#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) \
- (IRO[36].base + ((pf_id) * IRO[36].m1))
-#define ETH_RX_RATE_LIMIT_SIZE (IRO[36].size)
-
-/* RSS indirection table entry update command per PF offset in TSTORM PF BAR0.
- * Use eth_tstorm_rss_update_data for update
- */
-#define TSTORM_ETH_RSS_UPDATE_OFFSET(pf_id) \
- (IRO[37].base + ((pf_id) * IRO[37].m1))
-#define TSTORM_ETH_RSS_UPDATE_SIZE (IRO[37].size)
-
-/* Xstorm queue zone */
-#define XSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \
- (IRO[38].base + ((queue_id) * IRO[38].m1))
-#define XSTORM_ETH_QUEUE_ZONE_SIZE (IRO[38].size)
-
-/* Ystorm cqe producer */
-#define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) \
- (IRO[39].base + ((rss_id) * IRO[39].m1))
-#define YSTORM_TOE_CQ_PROD_SIZE (IRO[39].size)
-
-/* Ustorm cqe producer */
-#define USTORM_TOE_CQ_PROD_OFFSET(rss_id) \
- (IRO[40].base + ((rss_id) * IRO[40].m1))
-#define USTORM_TOE_CQ_PROD_SIZE (IRO[40].size)
-
-/* Ustorm grq producer */
-#define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) \
- (IRO[41].base + ((pf_id) * IRO[41].m1))
-#define USTORM_TOE_GRQ_PROD_SIZE (IRO[41].size)
-
-/* Tstorm cmdq-cons of given command queue-id */
-#define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) \
- (IRO[42].base + ((cmdq_queue_id) * IRO[42].m1))
-#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[42].size)
-
-/* Tstorm (reflects M-Storm) bdq-external-producer of given function ID,
- * BDqueue-id
+#define PCICFG_OFFSET 0x2000
+#define GRC_CONFIG_REG_PF_INIT_VF 0x624
+
+/* First VF_NUM for PF is encoded in this register.
+ * The number of VFs assigned to a PF is assumed to be a multiple of 8.
+ * Software should program these bits based on Total Number of VFs programmed
+ * for each PF.
+ * Since registers from 0x000-0x7ff are spilt across functions, each PF will
+ * have the same location for the same 4 bits
*/
-#define TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(storage_func_id, bdq_id) \
- (IRO[43].base + ((storage_func_id) * IRO[43].m1) + \
- ((bdq_id) * IRO[43].m2))
-#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[43].size)
-
-/* Mstorm bdq-external-producer of given BDQ resource ID, BDqueue-id */
-#define MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(storage_func_id, bdq_id) \
- (IRO[44].base + ((storage_func_id) * IRO[44].m1) + \
- ((bdq_id) * IRO[44].m2))
-#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[44].size)
-
-/* Tstorm iSCSI RX stats */
-#define TSTORM_ISCSI_RX_STATS_OFFSET(storage_func_id) \
- (IRO[45].base + ((storage_func_id) * IRO[45].m1))
-#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[45].size)
-
-/* Mstorm iSCSI RX stats */
-#define MSTORM_ISCSI_RX_STATS_OFFSET(storage_func_id) \
- (IRO[46].base + ((storage_func_id) * IRO[46].m1))
-#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[46].size)
-
-/* Ustorm iSCSI RX stats */
-#define USTORM_ISCSI_RX_STATS_OFFSET(storage_func_id) \
- (IRO[47].base + ((storage_func_id) * IRO[47].m1))
-#define USTORM_ISCSI_RX_STATS_SIZE (IRO[47].size)
-
-/* Xstorm iSCSI TX stats */
-#define XSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id) \
- (IRO[48].base + ((storage_func_id) * IRO[48].m1))
-#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[48].size)
-
-/* Ystorm iSCSI TX stats */
-#define YSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id) \
- (IRO[49].base + ((storage_func_id) * IRO[49].m1))
-#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[49].size)
-
-/* Pstorm iSCSI TX stats */
-#define PSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id) \
- (IRO[50].base + ((storage_func_id) * IRO[50].m1))
-#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[50].size)
-
-/* Tstorm FCoE RX stats */
-#define TSTORM_FCOE_RX_STATS_OFFSET(pf_id) \
- (IRO[51].base + ((pf_id) * IRO[51].m1))
-#define TSTORM_FCOE_RX_STATS_SIZE (IRO[51].size)
-
-/* Pstorm FCoE TX stats */
-#define PSTORM_FCOE_TX_STATS_OFFSET(pf_id) \
- (IRO[52].base + ((pf_id) * IRO[52].m1))
-#define PSTORM_FCOE_TX_STATS_SIZE (IRO[52].size)
-
-/* Pstorm RDMA queue statistics */
-#define PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
- (IRO[53].base + ((rdma_stat_counter_id) * IRO[53].m1))
-#define PSTORM_RDMA_QUEUE_STAT_SIZE (IRO[53].size)
-
-/* Tstorm RDMA queue statistics */
-#define TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
- (IRO[54].base + ((rdma_stat_counter_id) * IRO[54].m1))
-#define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[54].size)
-
-/* Xstorm error level for assert */
-#define XSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
- (IRO[55].base + ((pf_id) * IRO[55].m1))
-#define XSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[55].size)
-
-/* Ystorm error level for assert */
-#define YSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
- (IRO[56].base + ((pf_id) * IRO[56].m1))
-#define YSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[56].size)
-
-/* Pstorm error level for assert */
-#define PSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
- (IRO[57].base + ((pf_id) * IRO[57].m1))
-#define PSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[57].size)
-
-/* Tstorm error level for assert */
-#define TSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
- (IRO[58].base + ((pf_id) * IRO[58].m1))
-#define TSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[58].size)
-
-/* Mstorm error level for assert */
-#define MSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
- (IRO[59].base + ((pf_id) * IRO[59].m1))
-#define MSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[59].size)
-
-/* Ustorm error level for assert */
-#define USTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
- (IRO[60].base + ((pf_id) * IRO[60].m1))
-#define USTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[60].size)
-
-/* Xstorm iWARP rxmit stats */
-#define XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) \
- (IRO[61].base + ((pf_id) * IRO[61].m1))
-#define XSTORM_IWARP_RXMIT_STATS_SIZE (IRO[61].size)
-
-/* Tstorm RoCE Event Statistics */
-#define TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) \
- (IRO[62].base + ((roce_pf_id) * IRO[62].m1))
-#define TSTORM_ROCE_EVENTS_STAT_SIZE (IRO[62].size)
-
-/* DCQCN Received Statistics */
-#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id)\
- (IRO[63].base + ((roce_pf_id) * IRO[63].m1))
-#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_SIZE (IRO[63].size)
-
-/* RoCE Error Statistics */
-#define YSTORM_ROCE_ERROR_STATS_OFFSET(roce_pf_id) \
- (IRO[64].base + ((roce_pf_id) * IRO[64].m1))
-#define YSTORM_ROCE_ERROR_STATS_SIZE (IRO[64].size)
-
-/* DCQCN Sent Statistics */
-#define PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id) \
- (IRO[65].base + ((roce_pf_id) * IRO[65].m1))
-#define PSTORM_ROCE_DCQCN_SENT_STATS_SIZE (IRO[65].size)
-
-/* RoCE CQEs Statistics */
-#define USTORM_ROCE_CQE_STATS_OFFSET(roce_pf_id) \
- (IRO[66].base + ((roce_pf_id) * IRO[66].m1))
-#define USTORM_ROCE_CQE_STATS_SIZE (IRO[66].size)
+#define GRC_CR_PF_INIT_VF_PF_FIRST_VF_NUM_MASK 0xff
/* Runtime array offsets */
#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET 0
@@ -4721,116 +3114,118 @@ void qed_fw_overlay_mem_free(struct qed_hwfn *p_hwfn,
#define QM_REG_TXPQMAP_RT_SIZE 512
#define QM_REG_WFQVPWEIGHT_RT_OFFSET 31556
#define QM_REG_WFQVPWEIGHT_RT_SIZE 512
-#define QM_REG_WFQVPCRD_RT_OFFSET 32068
+#define QM_REG_WFQVPUPPERBOUND_RT_OFFSET 32068
+#define QM_REG_WFQVPUPPERBOUND_RT_SIZE 512
+#define QM_REG_WFQVPCRD_RT_OFFSET 32580
#define QM_REG_WFQVPCRD_RT_SIZE 512
-#define QM_REG_WFQVPMAP_RT_OFFSET 32580
+#define QM_REG_WFQVPMAP_RT_OFFSET 33092
#define QM_REG_WFQVPMAP_RT_SIZE 512
-#define QM_REG_PTRTBLTX_RT_OFFSET 33092
+#define QM_REG_PTRTBLTX_RT_OFFSET 33604
#define QM_REG_PTRTBLTX_RT_SIZE 1024
-#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 34116
+#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 34628
#define QM_REG_WFQPFCRD_MSB_RT_SIZE 160
-#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 34276
-#define NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET 34277
-#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 34278
-#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 34279
-#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 34280
-#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 34281
-#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 34282
-#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 34283
+#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 34788
+#define NIG_REG_BRB_GATE_DNTFWD_PORT_RT_OFFSET 34789
+#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 34790
+#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 34791
+#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 34792
+#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 34793
+#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 34794
+#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 34795
#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 34287
+#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 34799
#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4
-#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 34291
+#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 34803
#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32
-#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 34323
+#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 34835
#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 34339
+#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 34851
#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 34355
+#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 34867
#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16
-#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 34371
+#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 34883
#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16
-#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 34387
-#define NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET 34388
+#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 34899
+#define NIG_REG_PPF_TO_ENGINE_SEL_RT_OFFSET 34900
#define NIG_REG_PPF_TO_ENGINE_SEL_RT_SIZE 8
-#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 34396
-#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 34397
-#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 34398
-#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 34399
-#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 34400
-#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 34401
-#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 34402
-#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 34403
-#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 34404
-#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 34405
-#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 34406
-#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 34407
-#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 34408
-#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 34409
-#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 34410
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 34411
-#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 34412
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 34413
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 34414
-#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 34415
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 34416
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 34417
-#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 34418
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 34419
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 34420
-#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 34421
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 34422
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 34423
-#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 34424
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 34425
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 34426
-#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 34427
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 34428
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 34429
-#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 34430
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 34431
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 34432
-#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 34433
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 34434
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 34435
-#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 34436
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 34437
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 34438
-#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 34439
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 34440
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 34441
-#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 34442
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 34443
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 34444
-#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 34445
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 34446
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 34447
-#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 34448
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 34449
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 34450
-#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 34451
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 34452
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 34453
-#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 34454
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 34455
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 34456
-#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 34457
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 34458
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 34459
-#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 34460
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 34461
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 34462
-#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 34463
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 34464
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 34465
-#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 34466
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 34467
-#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 34468
-#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 34469
-#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 34470
-#define XCM_REG_CON_PHY_Q3_RT_OFFSET 34471
-
-#define RUNTIME_ARRAY_SIZE 34472
+#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 34908
+#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 34909
+#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 34910
+#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 34911
+#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 34912
+#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 34913
+#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 34914
+#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 34915
+#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 34916
+#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 34917
+#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 34918
+#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 34919
+#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 34920
+#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 34921
+#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 34922
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 34923
+#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 34924
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 34925
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 34926
+#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 34927
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 34928
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 34929
+#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 34930
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 34931
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 34932
+#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 34933
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 34934
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 34935
+#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 34936
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 34937
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 34938
+#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 34939
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 34940
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 34941
+#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 34942
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 34943
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 34944
+#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 34945
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 34946
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 34947
+#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 34948
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 34949
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 34950
+#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 34951
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 34952
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 34953
+#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 34954
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 34955
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 34956
+#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 34957
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 34958
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 34959
+#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 34960
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 34961
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 34962
+#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 34963
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 34964
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 34965
+#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 34966
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 34967
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 34968
+#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 34969
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 34970
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 34971
+#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 34972
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 34973
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 34974
+#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 34975
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 34976
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 34977
+#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 34978
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 34979
+#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 34980
+#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 34981
+#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 34982
+#define XCM_REG_CON_PHY_Q3_RT_OFFSET 34983
+
+#define RUNTIME_ARRAY_SIZE 34984
/* Init Callbacks */
#define DMAE_READY_CB 0
@@ -4850,216 +3245,216 @@ struct xstorm_eth_conn_st_ctx {
__le32 reserved[60];
};
-struct e4_xstorm_eth_conn_ag_ctx {
+struct xstorm_eth_conn_ag_ctx {
u8 reserved0;
u8 state;
u8 flags0;
-#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED1_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED1_SHIFT 1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED2_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED2_SHIFT 2
-#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED3_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED3_SHIFT 4
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED4_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED4_SHIFT 5
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED5_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED5_SHIFT 6
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED6_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED6_SHIFT 7
+#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_SHIFT 7
u8 flags1;
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED7_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED7_SHIFT 0
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED8_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED8_SHIFT 1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED9_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED9_SHIFT 2
-#define E4_XSTORM_ETH_CONN_AG_CTX_BIT11_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_BIT11_SHIFT 3
-#define E4_XSTORM_ETH_CONN_AG_CTX_E5_RESERVED2_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_E5_RESERVED2_SHIFT 4
-#define E4_XSTORM_ETH_CONN_AG_CTX_E5_RESERVED3_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_E5_RESERVED3_SHIFT 5
-#define E4_XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
-#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_E5_RESERVED2_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_E5_RESERVED2_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_E5_RESERVED3_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_E5_RESERVED3_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
u8 flags2;
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 0
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 2
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 4
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 6
u8 flags3;
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 0
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 2
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 4
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 6
u8 flags4;
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 0
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 2
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 4
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF11_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF11_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF12_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF12_SHIFT 0
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF13_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF13_SHIFT 2
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF14_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF14_SHIFT 4
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF15_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF15_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF14_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF14_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF15_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_CF15_SHIFT 6
u8 flags6;
-#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0
-#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2
-#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_SHIFT 4
-#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
u8 flags7;
-#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED10_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED10_SHIFT 2
-#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_SHIFT 4
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 6
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 7
+#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 0
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 1
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 2
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 3
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 4
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 5
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 6
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 7
+#define XSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 0
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF11EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF11EN_SHIFT 1
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF12EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF12EN_SHIFT 2
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF13EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF13EN_SHIFT 3
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF14EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF14EN_SHIFT 4
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF15EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_CF15EN_SHIFT 5
-#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
-#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
+#define XSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_CF14EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF14EN_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_CF15EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
u8 flags10;
-#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
-#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
-#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED11_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED11_SHIFT 3
-#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED12_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED12_SHIFT 6
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED13_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED13_SHIFT 7
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_SHIFT 7
u8 flags11;
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED14_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED14_SHIFT 0
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED15_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RESERVED15_SHIFT 1
-#define E4_XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 3
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 4
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 5
-#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE9EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE9EN_SHIFT 7
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12;
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE10EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE10EN_SHIFT 0
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE11EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE11EN_SHIFT 1
-#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
-#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE14EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE14EN_SHIFT 4
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE15EN_SHIFT 5
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE16EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE16EN_SHIFT 6
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE17EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE17EN_SHIFT 7
+#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_SHIFT 7
u8 flags13;
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE18EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE18EN_SHIFT 0
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE19EN_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_RULE19EN_SHIFT 1
-#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
-#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
-#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
-#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
-#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
-#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
-#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0
-#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1
-#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
-#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
-#define E4_XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4
-#define E4_XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
-#define E4_XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
-#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_MASK 0x3
-#define E4_XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
+#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4
+#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
+#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
+#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_MASK 0x3
+#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
u8 edpm_event_id;
__le16 physical_q0;
__le16 e5_reserved1;
@@ -5118,37 +3513,37 @@ struct ystorm_eth_conn_st_ctx {
__le32 reserved[8];
};
-struct e4_ystorm_eth_conn_ag_ctx {
+struct ystorm_eth_conn_ag_ctx {
u8 byte0;
u8 state;
u8 flags0;
-#define E4_YSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_YSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_YSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_YSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3
-#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 2
-#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_MASK 0x3
-#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_SHIFT 4
-#define E4_YSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_YSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
+#define YSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
+#define YSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3
+#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 2
+#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_MASK 0x3
+#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_SHIFT 4
+#define YSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define YSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1
-#define E4_YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 0
-#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_MASK 0x1
-#define E4_YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_SHIFT 1
-#define E4_YSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_YSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_YSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_YSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define E4_YSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_YSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define E4_YSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_YSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define E4_YSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_YSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define E4_YSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_YSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 0
+#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_SHIFT 1
+#define YSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 tx_q0_int_coallecing_timeset;
u8 byte3;
__le16 word0;
@@ -5162,89 +3557,89 @@ struct e4_ystorm_eth_conn_ag_ctx {
__le32 reg3;
};
-struct e4_tstorm_eth_conn_ag_ctx {
+struct tstorm_eth_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_TSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_TSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_TSTORM_ETH_CONN_AG_CTX_BIT2_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT 2
-#define E4_TSTORM_ETH_CONN_AG_CTX_BIT3_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT 3
-#define E4_TSTORM_ETH_CONN_AG_CTX_BIT4_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT 4
-#define E4_TSTORM_ETH_CONN_AG_CTX_BIT5_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT 5
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 6
+#define TSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define TSTORM_ETH_CONN_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_ETH_CONN_AG_CTX_BIT4_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT 5
+#define TSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 6
u8 flags1;
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 0
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 2
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 4
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 6
+#define TSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 6
u8 flags2;
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 0
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 2
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 4
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 6
+#define TSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 6
u8 flags3;
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 0
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 2
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 4
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 5
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 6
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 7
+#define TSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3
+#define TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 5
+#define TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 6
+#define TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 7
u8 flags4;
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 0
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 1
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 2
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 3
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 4
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 5
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 6
-#define E4_TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 1
+#define TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 5
+#define TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 6
+#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
-#define E4_TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define E4_TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define E4_TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define E4_TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define E4_TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define E4_TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT 5
-#define E4_TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define E4_TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define E4_TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT 5
+#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
__le32 reg0;
__le32 reg1;
__le32 reg2;
@@ -5266,63 +3661,63 @@ struct e4_tstorm_eth_conn_ag_ctx {
__le32 reg10;
};
-struct e4_ustorm_eth_conn_ag_ctx {
+struct ustorm_eth_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_USTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_USTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_MASK 0x3
-#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_SHIFT 2
-#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_MASK 0x3
-#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_SHIFT 4
-#define E4_USTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_USTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
+#define USTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0
+#define USTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_MASK 0x3
+#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_SHIFT 2
+#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_MASK 0x3
+#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_SHIFT 4
+#define USTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define USTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_USTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
-#define E4_USTORM_ETH_CONN_AG_CTX_CF3_SHIFT 0
-#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK 0x3
-#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT 2
-#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK 0x3
-#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT 4
-#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3
-#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 6
+#define USTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3
+#define USTORM_ETH_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK 0x3
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT 2
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK 0x3
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT 4
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 6
u8 flags2;
-#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_MASK 0x1
-#define E4_USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_SHIFT 0
-#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_MASK 0x1
-#define E4_USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_SHIFT 1
-#define E4_USTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_USTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
-#define E4_USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 3
-#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK 0x1
-#define E4_USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT 4
-#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK 0x1
-#define E4_USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT 5
-#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1
-#define E4_USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 6
-#define E4_USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_SHIFT 0
+#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_SHIFT 1
+#define USTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT 4
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT 5
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 6
+#define USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags3;
-#define E4_USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define E4_USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define E4_USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define E4_USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define E4_USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define E4_USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define E4_USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define E4_USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define E4_USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -5346,16 +3741,16 @@ struct mstorm_eth_conn_st_ctx {
};
/* eth connection context */
-struct e4_eth_conn_context {
+struct eth_conn_context {
struct tstorm_eth_conn_st_ctx tstorm_st_context;
struct regpair tstorm_st_padding[2];
struct pstorm_eth_conn_st_ctx pstorm_st_context;
struct xstorm_eth_conn_st_ctx xstorm_st_context;
- struct e4_xstorm_eth_conn_ag_ctx xstorm_ag_context;
- struct e4_tstorm_eth_conn_ag_ctx tstorm_ag_context;
+ struct xstorm_eth_conn_ag_ctx xstorm_ag_context;
+ struct tstorm_eth_conn_ag_ctx tstorm_ag_context;
struct ystorm_eth_conn_st_ctx ystorm_st_context;
- struct e4_ystorm_eth_conn_ag_ctx ystorm_ag_context;
- struct e4_ustorm_eth_conn_ag_ctx ustorm_ag_context;
+ struct ystorm_eth_conn_ag_ctx ystorm_ag_context;
+ struct ustorm_eth_conn_ag_ctx ustorm_ag_context;
struct ustorm_eth_conn_st_ctx ustorm_st_context;
struct mstorm_eth_conn_st_ctx mstorm_st_context;
};
@@ -5512,7 +3907,7 @@ enum eth_ramrod_cmd_id {
ETH_RAMROD_RX_ADD_UDP_FILTER,
ETH_RAMROD_RX_DELETE_UDP_FILTER,
ETH_RAMROD_RX_CREATE_GFT_ACTION,
- ETH_RAMROD_GFT_UPDATE_FILTER,
+ ETH_RAMROD_RX_UPDATE_GFT_FILTER,
ETH_RAMROD_TX_QUEUE_UPDATE,
ETH_RAMROD_RGFS_FILTER_ADD,
ETH_RAMROD_RGFS_FILTER_DEL,
@@ -5596,10 +3991,12 @@ struct eth_vport_rss_config {
u8 update_rss_ind_table;
u8 update_rss_capabilities;
u8 tbl_size;
- __le32 reserved2[2];
+ u8 ind_table_mask_valid;
+ u8 reserved2[3];
__le16 indirection_table[ETH_RSS_IND_TABLE_ENTRIES_NUM];
+ __le32 ind_table_mask[ETH_RSS_IND_TABLE_MASK_SIZE_REGS];
__le32 rss_key[ETH_RSS_KEY_SIZE_REGS];
- __le32 reserved3[2];
+ __le32 reserved3;
};
/* eth vport RSS mode */
@@ -5674,8 +4071,20 @@ enum gft_filter_update_action {
MAX_GFT_FILTER_UPDATE_ACTION
};
+/* Ramrod data for rx create gft action */
+struct rx_create_gft_action_ramrod_data {
+ u8 vport_id;
+ u8 reserved[7];
+};
+
+/* Ramrod data for rx create openflow action */
+struct rx_create_openflow_action_ramrod_data {
+ u8 vport_id;
+ u8 reserved[7];
+};
+
/* Ramrod data for rx add openflow filter */
-struct rx_add_openflow_filter_data {
+struct rx_openflow_filter_ramrod_data {
__le16 action_icid;
u8 priority;
u8 reserved0;
@@ -5698,18 +4107,6 @@ struct rx_add_openflow_filter_data {
__le16 l4_src_port;
};
-/* Ramrod data for rx create gft action */
-struct rx_create_gft_action_data {
- u8 vport_id;
- u8 reserved[7];
-};
-
-/* Ramrod data for rx create openflow action */
-struct rx_create_openflow_action_data {
- u8 vport_id;
- u8 reserved[7];
-};
-
/* Ramrod data for rx queue start ramrod */
struct rx_queue_start_ramrod_data {
__le16 rx_queue_id;
@@ -5768,7 +4165,7 @@ struct rx_queue_update_ramrod_data {
};
/* Ramrod data for rx Add UDP Filter */
-struct rx_udp_filter_data {
+struct rx_udp_filter_ramrod_data {
__le16 action_icid;
__le16 vlan_id;
u8 ip_type;
@@ -5784,7 +4181,7 @@ struct rx_udp_filter_data {
/* Add or delete GFT filter - filter is packet header of type of packet wished
* to pass certain FW flow.
*/
-struct rx_update_gft_filter_data {
+struct rx_update_gft_filter_ramrod_data {
struct regpair pkt_hdr_addr;
__le16 pkt_hdr_length;
__le16 action_icid;
@@ -5824,7 +4221,8 @@ struct tx_queue_start_ramrod_data {
u8 pxp_tph_valid_bd;
u8 pxp_tph_valid_pkt;
__le16 pxp_st_index;
- __le16 comp_agg_size;
+ u8 comp_agg_size;
+ u8 reserved3;
__le16 queue_zone_id;
__le16 reserved2;
__le16 pbl_size;
@@ -5945,7 +4343,12 @@ struct vport_update_ramrod_data_cmn {
u8 ctl_frame_ethtype_check_en;
u8 update_in_to_in_pri_map_mode;
u8 in_to_in_pri_map[8];
- u8 reserved[6];
+ u8 update_tx_dst_port_mode_flg;
+ u8 tx_dst_port_mode_config;
+ u8 dst_vport_id;
+ u8 tx_dst_port_mode;
+ u8 dst_vport_id_valid;
+ u8 reserved[1];
};
struct vport_update_ramrod_mcast {
@@ -5964,7 +4367,7 @@ struct vport_update_ramrod_data {
struct eth_vport_rss_config rss_config;
};
-struct e4_xstorm_eth_conn_ag_ctx_dq_ext_ldpart {
+struct xstorm_eth_conn_ag_ctx_dq_ext_ldpart {
u8 reserved0;
u8 state;
u8 flags0;
@@ -6193,253 +4596,253 @@ struct e4_xstorm_eth_conn_ag_ctx_dq_ext_ldpart {
__le32 reg4;
};
-struct e4_mstorm_eth_conn_ag_ctx {
+struct mstorm_eth_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_MSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_MSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_MSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_MSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
+#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define MSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3
+#define MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define E4_MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define E4_MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define E4_MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define E4_MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0;
__le16 word1;
__le32 reg0;
__le32 reg1;
};
-struct e4_xstorm_eth_hw_conn_ag_ctx {
+struct xstorm_eth_hw_conn_ag_ctx {
u8 reserved0;
u8 state;
u8 flags0;
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_SHIFT 1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_SHIFT 2
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_SHIFT 4
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_SHIFT 5
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_SHIFT 6
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_SHIFT 7
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_SHIFT 7
u8 flags1;
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_SHIFT 0
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_SHIFT 1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_SHIFT 2
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_BIT11_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_BIT11_SHIFT 3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED2_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED2_SHIFT 4
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED3_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED3_SHIFT 5
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED2_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED2_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED3_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_E5_RESERVED3_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7
u8 flags2;
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0_SHIFT 0
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1_SHIFT 2
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2_SHIFT 4
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_SHIFT 6
u8 flags3;
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4_SHIFT 0
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5_SHIFT 2
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6_SHIFT 4
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_SHIFT 6
u8 flags4;
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8_SHIFT 0
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9_SHIFT 2
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10_SHIFT 4
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12_SHIFT 0
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13_SHIFT 2
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14_SHIFT 4
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_SHIFT 6
u8 flags6;
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_SHIFT 4
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_SHIFT 6
u8 flags7;
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_SHIFT 2
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_SHIFT 4
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_SHIFT 6
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_SHIFT 7
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_SHIFT 0
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_SHIFT 1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_SHIFT 2
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_SHIFT 3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_SHIFT 4
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_SHIFT 5
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_SHIFT 6
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_SHIFT 7
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_SHIFT 0
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_SHIFT 1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_SHIFT 2
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_SHIFT 3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_SHIFT 4
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_SHIFT 5
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7
u8 flags10;
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_SHIFT 3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_SHIFT 6
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_SHIFT 7
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_SHIFT 7
u8 flags11;
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_SHIFT 0
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_SHIFT 1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_SHIFT 3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_SHIFT 4
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_SHIFT 5
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_SHIFT 7
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12;
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_SHIFT 0
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_SHIFT 1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_SHIFT 4
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_SHIFT 5
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_SHIFT 6
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_SHIFT 7
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_SHIFT 7
u8 flags13;
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_SHIFT 0
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_SHIFT 1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_MASK 0x3
-#define E4_XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3
+#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4
+#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
+#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_MASK 0x3
+#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT 6
u8 edpm_event_id;
__le16 physical_q0;
__le16 e5_reserved1;
@@ -6479,7 +4882,6 @@ struct gft_cam_line_mapped {
#define GFT_CAM_LINE_MAPPED_RESERVED1_SHIFT 29
};
-
/* Used in gft_profile_key: Indication for ip version */
enum gft_profile_ip_version {
GFT_PROFILE_IPV4 = 0,
@@ -6640,49 +5042,49 @@ struct ystorm_rdma_task_st_ctx {
struct regpair temp[4];
};
-struct e4_ystorm_rdma_task_ag_ctx {
+struct ystorm_rdma_task_ag_ctx {
u8 reserved;
u8 byte1;
__le16 msem_ctx_upd_seq;
u8 flags0;
-#define E4_YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
-#define E4_YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
-#define E4_YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
-#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1
-#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
-#define E4_YSTORM_RDMA_TASK_AG_CTX_VALID_MASK 0x1
-#define E4_YSTORM_RDMA_TASK_AG_CTX_VALID_SHIFT 6
-#define E4_YSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_MASK 0x1
-#define E4_YSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_SHIFT 7
+#define YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define YSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define YSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
+#define YSTORM_RDMA_TASK_AG_CTX_VALID_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_VALID_SHIFT 6
+#define YSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_SHIFT 7
u8 flags1;
-#define E4_YSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3
-#define E4_YSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0
-#define E4_YSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3
-#define E4_YSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2
-#define E4_YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_MASK 0x3
-#define E4_YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
-#define E4_YSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1
-#define E4_YSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6
-#define E4_YSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1
-#define E4_YSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7
+#define YSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3
+#define YSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0
+#define YSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3
+#define YSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2
+#define YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_MASK 0x3
+#define YSTORM_RDMA_TASK_AG_CTX_CF2SPECIAL_SHIFT 4
+#define YSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6
+#define YSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7
u8 flags2;
-#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT4_MASK 0x1
-#define E4_YSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT 0
-#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1
-#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2
-#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3
-#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4
-#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5
-#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6
-#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1
-#define E4_YSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7
+#define YSTORM_RDMA_TASK_AG_CTX_BIT4_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT 0
+#define YSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1
+#define YSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2
+#define YSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3
+#define YSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4
+#define YSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5
+#define YSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6
+#define YSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define YSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7
u8 key;
__le32 mw_cnt_or_qp_id;
u8 ref_cnt_seq;
@@ -6696,49 +5098,49 @@ struct e4_ystorm_rdma_task_ag_ctx {
__le32 fbo_hi;
};
-struct e4_mstorm_rdma_task_ag_ctx {
+struct mstorm_rdma_task_ag_ctx {
u8 reserved;
u8 byte1;
__le16 icid;
u8 flags0;
-#define E4_MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
-#define E4_MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
-#define E4_MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
-#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1
-#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
-#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1
-#define E4_MSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6
-#define E4_MSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_MASK 0x1
-#define E4_MSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_SHIFT 7
+#define MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define MSTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define MSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
+#define MSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6
+#define MSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_DIF_FIRST_IO_SHIFT 7
u8 flags1;
-#define E4_MSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3
-#define E4_MSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0
-#define E4_MSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3
-#define E4_MSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2
-#define E4_MSTORM_RDMA_TASK_AG_CTX_CF2_MASK 0x3
-#define E4_MSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT 4
-#define E4_MSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1
-#define E4_MSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6
-#define E4_MSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1
-#define E4_MSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7
+#define MSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3
+#define MSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 0
+#define MSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3
+#define MSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 2
+#define MSTORM_RDMA_TASK_AG_CTX_CF2_MASK 0x3
+#define MSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT 4
+#define MSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 6
+#define MSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 7
u8 flags2;
-#define E4_MSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK 0x1
-#define E4_MSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT 0
-#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1
-#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2
-#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3
-#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4
-#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5
-#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6
-#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1
-#define E4_MSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7
+#define MSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT 0
+#define MSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 1
+#define MSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 2
+#define MSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 3
+#define MSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 4
+#define MSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 5
+#define MSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 6
+#define MSTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define MSTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 7
u8 key;
__le32 mw_cnt_or_qp_id;
u8 ref_cnt_seq;
@@ -6762,56 +5164,56 @@ struct ustorm_rdma_task_st_ctx {
struct regpair temp[6];
};
-struct e4_ustorm_rdma_task_ag_ctx {
+struct ustorm_rdma_task_ag_ctx {
u8 reserved;
u8 state;
__le16 icid;
u8 flags0;
-#define E4_USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
-#define E4_USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
-#define E4_USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
-#define E4_USTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1
-#define E4_USTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_MASK 0x3
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_SHIFT 6
+#define USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF
+#define USTORM_RDMA_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
+#define USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
+#define USTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
+#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_MASK 0x3
+#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_SHIFT 6
u8 flags1;
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_MASK 0x3
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_SHIFT 0
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_MASK 0x3
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_SHIFT 2
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_BLOCK_SIZE_MASK 0x3
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_BLOCK_SIZE_SHIFT 4
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
+#define USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_MASK 0x3
+#define USTORM_RDMA_TASK_AG_CTX_DIF_RESULT_TOGGLE_BIT_SHIFT 0
+#define USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_MASK 0x3
+#define USTORM_RDMA_TASK_AG_CTX_DIF_TX_IO_FLG_SHIFT 2
+#define USTORM_RDMA_TASK_AG_CTX_DIF_BLOCK_SIZE_MASK 0x3
+#define USTORM_RDMA_TASK_AG_CTX_DIF_BLOCK_SIZE_SHIFT 4
+#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3
+#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6
u8 flags2;
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_MASK 0x1
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_SHIFT 0
-#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED2_MASK 0x1
-#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED2_SHIFT 1
-#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED3_MASK 0x1
-#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED3_SHIFT 2
-#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED4_MASK 0x1
-#define E4_USTORM_RDMA_TASK_AG_CTX_RESERVED4_SHIFT 3
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
-#define E4_USTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define E4_USTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 5
-#define E4_USTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define E4_USTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 6
-#define E4_USTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define E4_USTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 7
+#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_RESULT_CF_EN_SHIFT 0
+#define USTORM_RDMA_TASK_AG_CTX_RESERVED2_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_RESERVED2_SHIFT 1
+#define USTORM_RDMA_TASK_AG_CTX_RESERVED3_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_RESERVED3_SHIFT 2
+#define USTORM_RDMA_TASK_AG_CTX_RESERVED4_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_RESERVED4_SHIFT 3
+#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4
+#define USTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 5
+#define USTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 6
+#define USTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 7
u8 flags3;
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RXMIT_PROD_CONS_EN_MASK 0x1
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_RXMIT_PROD_CONS_EN_SHIFT 0
-#define E4_USTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define E4_USTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 1
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_PROD_CONS_EN_MASK 0x1
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_PROD_CONS_EN_SHIFT 2
-#define E4_USTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1
-#define E4_USTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 3
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
-#define E4_USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
+#define USTORM_RDMA_TASK_AG_CTX_DIF_RXMIT_PROD_CONS_EN_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_DIF_RXMIT_PROD_CONS_EN_SHIFT 0
+#define USTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 1
+#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_PROD_CONS_EN_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_DIF_WRITE_PROD_CONS_EN_SHIFT 2
+#define USTORM_RDMA_TASK_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_RDMA_TASK_AG_CTX_RULE6EN_SHIFT 3
+#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF
+#define USTORM_RDMA_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4
__le32 dif_err_intervals;
__le32 dif_error_1st_interval;
__le32 dif_rxmit_cons;
@@ -6828,16 +5230,853 @@ struct e4_ustorm_rdma_task_ag_ctx {
};
/* RDMA task context */
-struct e4_rdma_task_context {
+struct rdma_task_context {
struct ystorm_rdma_task_st_ctx ystorm_st_context;
- struct e4_ystorm_rdma_task_ag_ctx ystorm_ag_context;
+ struct ystorm_rdma_task_ag_ctx ystorm_ag_context;
struct tdif_task_context tdif_context;
- struct e4_mstorm_rdma_task_ag_ctx mstorm_ag_context;
+ struct mstorm_rdma_task_ag_ctx mstorm_ag_context;
struct mstorm_rdma_task_st_ctx mstorm_st_context;
struct rdif_task_context rdif_context;
struct ustorm_rdma_task_st_ctx ustorm_st_context;
struct regpair ustorm_st_padding[2];
- struct e4_ustorm_rdma_task_ag_ctx ustorm_ag_context;
+ struct ustorm_rdma_task_ag_ctx ustorm_ag_context;
+};
+
+#define TOE_MAX_RAMROD_PER_PF 8
+#define TOE_TX_PAGE_SIZE_BYTES 4096
+#define TOE_GRQ_PAGE_SIZE_BYTES 4096
+#define TOE_RX_CQ_PAGE_SIZE_BYTES 4096
+
+#define TOE_RX_MAX_RSS_CHAINS 64
+#define TOE_TX_MAX_TSS_CHAINS 64
+#define TOE_RSS_INDIRECTION_TABLE_SIZE 128
+
+/* The toe storm context of Mstorm */
+struct mstorm_toe_conn_st_ctx {
+ __le32 reserved[24];
+};
+
+/* The toe storm context of Pstorm */
+struct pstorm_toe_conn_st_ctx {
+ __le32 reserved[36];
+};
+
+/* The toe storm context of Ystorm */
+struct ystorm_toe_conn_st_ctx {
+ __le32 reserved[8];
+};
+
+/* The toe storm context of Xstorm */
+struct xstorm_toe_conn_st_ctx {
+ __le32 reserved[44];
+};
+
+struct ystorm_toe_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define YSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define YSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define YSTORM_TOE_CONN_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_TOE_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_MASK 0x3
+#define YSTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_SHIFT 2
+#define YSTORM_TOE_CONN_AG_CTX_RESET_RECEIVED_CF_MASK 0x3
+#define YSTORM_TOE_CONN_AG_CTX_RESET_RECEIVED_CF_SHIFT 4
+#define YSTORM_TOE_CONN_AG_CTX_CF2_MASK 0x3
+#define YSTORM_TOE_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define YSTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_EN_MASK 0x1
+#define YSTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_EN_SHIFT 0
+#define YSTORM_TOE_CONN_AG_CTX_RESET_RECEIVED_CF_EN_MASK 0x1
+#define YSTORM_TOE_CONN_AG_CTX_RESET_RECEIVED_CF_EN_SHIFT 1
+#define YSTORM_TOE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define YSTORM_TOE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_TOE_CONN_AG_CTX_REL_SEQ_EN_MASK 0x1
+#define YSTORM_TOE_CONN_AG_CTX_REL_SEQ_EN_SHIFT 3
+#define YSTORM_TOE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_TOE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_TOE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_TOE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_TOE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_TOE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_TOE_CONN_AG_CTX_CONS_PROD_EN_MASK 0x1
+#define YSTORM_TOE_CONN_AG_CTX_CONS_PROD_EN_SHIFT 7
+ u8 completion_opcode;
+ u8 byte3;
+ __le16 word0;
+ __le32 rel_seq;
+ __le32 rel_seq_threshold;
+ __le16 app_prod;
+ __le16 app_cons;
+ __le16 word3;
+ __le16 word4;
+ __le32 reg2;
+ __le32 reg3;
+};
+
+struct xstorm_toe_conn_ag_ctx {
+ u8 reserved0;
+ u8 state;
+ u8 flags0;
+#define XSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM1_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1
+#define XSTORM_TOE_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RESERVED1_SHIFT 2
+#define XSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_TOE_CONN_AG_CTX_TX_DEC_RULE_RES_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_TX_DEC_RULE_RES_SHIFT 4
+#define XSTORM_TOE_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RESERVED2_SHIFT 5
+#define XSTORM_TOE_CONN_AG_CTX_BIT6_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT6_SHIFT 6
+#define XSTORM_TOE_CONN_AG_CTX_BIT7_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT7_SHIFT 7
+ u8 flags1;
+#define XSTORM_TOE_CONN_AG_CTX_BIT8_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT8_SHIFT 0
+#define XSTORM_TOE_CONN_AG_CTX_BIT9_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT9_SHIFT 1
+#define XSTORM_TOE_CONN_AG_CTX_BIT10_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT10_SHIFT 2
+#define XSTORM_TOE_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_TOE_CONN_AG_CTX_BIT12_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT12_SHIFT 4
+#define XSTORM_TOE_CONN_AG_CTX_BIT13_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT13_SHIFT 5
+#define XSTORM_TOE_CONN_AG_CTX_BIT14_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT14_SHIFT 6
+#define XSTORM_TOE_CONN_AG_CTX_BIT15_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT15_SHIFT 7
+ u8 flags2;
+#define XSTORM_TOE_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_TOE_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_TOE_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 6
+ u8 flags3;
+#define XSTORM_TOE_CONN_AG_CTX_CF4_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF4_SHIFT 0
+#define XSTORM_TOE_CONN_AG_CTX_CF5_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF5_SHIFT 2
+#define XSTORM_TOE_CONN_AG_CTX_CF6_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF6_SHIFT 4
+#define XSTORM_TOE_CONN_AG_CTX_CF7_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF7_SHIFT 6
+ u8 flags4;
+#define XSTORM_TOE_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_TOE_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_TOE_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_TOE_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF11_SHIFT 6
+ u8 flags5;
+#define XSTORM_TOE_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_TOE_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_TOE_CONN_AG_CTX_CF14_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF14_SHIFT 4
+#define XSTORM_TOE_CONN_AG_CTX_CF15_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF15_SHIFT 6
+ u8 flags6;
+#define XSTORM_TOE_CONN_AG_CTX_CF16_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF16_SHIFT 0
+#define XSTORM_TOE_CONN_AG_CTX_CF17_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF17_SHIFT 2
+#define XSTORM_TOE_CONN_AG_CTX_CF18_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF18_SHIFT 4
+#define XSTORM_TOE_CONN_AG_CTX_DQ_FLUSH_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_DQ_FLUSH_SHIFT 6
+ u8 flags7;
+#define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q1_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q1_SHIFT 2
+#define XSTORM_TOE_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_TOE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_TOE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF1EN_SHIFT 7
+ u8 flags8;
+#define XSTORM_TOE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 1
+#define XSTORM_TOE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF4EN_SHIFT 2
+#define XSTORM_TOE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF5EN_SHIFT 3
+#define XSTORM_TOE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF6EN_SHIFT 4
+#define XSTORM_TOE_CONN_AG_CTX_CF7EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF7EN_SHIFT 5
+#define XSTORM_TOE_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_TOE_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF9EN_SHIFT 7
+ u8 flags9;
+#define XSTORM_TOE_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_TOE_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_TOE_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_TOE_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_TOE_CONN_AG_CTX_CF14EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF14EN_SHIFT 4
+#define XSTORM_TOE_CONN_AG_CTX_CF15EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_TOE_CONN_AG_CTX_CF16EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF16EN_SHIFT 6
+#define XSTORM_TOE_CONN_AG_CTX_CF17EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF17EN_SHIFT 7
+ u8 flags10;
+#define XSTORM_TOE_CONN_AG_CTX_CF18EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF18EN_SHIFT 0
+#define XSTORM_TOE_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1
+#define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q1_EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_FLUSH_Q1_EN_SHIFT 3
+#define XSTORM_TOE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_TOE_CONN_AG_CTX_CF23EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_CF23EN_SHIFT 5
+#define XSTORM_TOE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RULE0EN_SHIFT 6
+#define XSTORM_TOE_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_SHIFT 7
+ u8 flags11;
+#define XSTORM_TOE_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT 0
+#define XSTORM_TOE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RULE3EN_SHIFT 1
+#define XSTORM_TOE_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RESERVED3_SHIFT 2
+#define XSTORM_TOE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_TOE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_TOE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_TOE_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RULE9EN_SHIFT 7
+ u8 flags12;
+#define XSTORM_TOE_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RULE10EN_SHIFT 0
+#define XSTORM_TOE_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_TOE_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define XSTORM_TOE_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_TOE_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_TOE_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RULE17EN_SHIFT 7
+ u8 flags13;
+#define XSTORM_TOE_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define XSTORM_TOE_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+ u8 flags14;
+#define XSTORM_TOE_CONN_AG_CTX_BIT16_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT16_SHIFT 0
+#define XSTORM_TOE_CONN_AG_CTX_BIT17_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT17_SHIFT 1
+#define XSTORM_TOE_CONN_AG_CTX_BIT18_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT18_SHIFT 2
+#define XSTORM_TOE_CONN_AG_CTX_BIT19_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT19_SHIFT 3
+#define XSTORM_TOE_CONN_AG_CTX_BIT20_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT20_SHIFT 4
+#define XSTORM_TOE_CONN_AG_CTX_BIT21_MASK 0x1
+#define XSTORM_TOE_CONN_AG_CTX_BIT21_SHIFT 5
+#define XSTORM_TOE_CONN_AG_CTX_CF23_MASK 0x3
+#define XSTORM_TOE_CONN_AG_CTX_CF23_SHIFT 6
+ u8 byte2;
+ __le16 physical_q0;
+ __le16 physical_q1;
+ __le16 word2;
+ __le16 word3;
+ __le16 bd_prod;
+ __le16 word5;
+ __le16 word6;
+ u8 byte3;
+ u8 byte4;
+ u8 byte5;
+ u8 byte6;
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 more_to_send_seq;
+ __le32 local_adv_wnd_seq;
+ __le32 reg5;
+ __le32 reg6;
+ __le16 word7;
+ __le16 word8;
+ __le16 word9;
+ __le16 word10;
+ __le32 reg7;
+ __le32 reg8;
+ __le32 reg9;
+ u8 byte7;
+ u8 byte8;
+ u8 byte9;
+ u8 byte10;
+ u8 byte11;
+ u8 byte12;
+ u8 byte13;
+ u8 byte14;
+ u8 byte15;
+ u8 e5_reserved;
+ __le16 word11;
+ __le32 reg10;
+ __le32 reg11;
+ __le32 reg12;
+ __le32 reg13;
+ __le32 reg14;
+ __le32 reg15;
+ __le32 reg16;
+ __le32 reg17;
+};
+
+struct tstorm_toe_conn_ag_ctx {
+ u8 reserved0;
+ u8 byte1;
+ u8 flags0;
+#define TSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define TSTORM_TOE_CONN_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_BIT1_SHIFT 1
+#define TSTORM_TOE_CONN_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_TOE_CONN_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_TOE_CONN_AG_CTX_BIT4_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_BIT4_SHIFT 4
+#define TSTORM_TOE_CONN_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_BIT5_SHIFT 5
+#define TSTORM_TOE_CONN_AG_CTX_TIMEOUT_CF_MASK 0x3
+#define TSTORM_TOE_CONN_AG_CTX_TIMEOUT_CF_SHIFT 6
+ u8 flags1;
+#define TSTORM_TOE_CONN_AG_CTX_CF1_MASK 0x3
+#define TSTORM_TOE_CONN_AG_CTX_CF1_SHIFT 0
+#define TSTORM_TOE_CONN_AG_CTX_CF2_MASK 0x3
+#define TSTORM_TOE_CONN_AG_CTX_CF2_SHIFT 2
+#define TSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
+#define TSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4
+#define TSTORM_TOE_CONN_AG_CTX_CF4_MASK 0x3
+#define TSTORM_TOE_CONN_AG_CTX_CF4_SHIFT 6
+ u8 flags2;
+#define TSTORM_TOE_CONN_AG_CTX_CF5_MASK 0x3
+#define TSTORM_TOE_CONN_AG_CTX_CF5_SHIFT 0
+#define TSTORM_TOE_CONN_AG_CTX_CF6_MASK 0x3
+#define TSTORM_TOE_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_TOE_CONN_AG_CTX_CF7_MASK 0x3
+#define TSTORM_TOE_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_TOE_CONN_AG_CTX_CF8_MASK 0x3
+#define TSTORM_TOE_CONN_AG_CTX_CF8_SHIFT 6
+ u8 flags3;
+#define TSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define TSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define TSTORM_TOE_CONN_AG_CTX_CF10_MASK 0x3
+#define TSTORM_TOE_CONN_AG_CTX_CF10_SHIFT 2
+#define TSTORM_TOE_CONN_AG_CTX_TIMEOUT_CF_EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_TIMEOUT_CF_EN_SHIFT 4
+#define TSTORM_TOE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_CF1EN_SHIFT 5
+#define TSTORM_TOE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_CF2EN_SHIFT 6
+#define TSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7
+ u8 flags4;
+#define TSTORM_TOE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_CF4EN_SHIFT 0
+#define TSTORM_TOE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_CF5EN_SHIFT 1
+#define TSTORM_TOE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_TOE_CONN_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_TOE_CONN_AG_CTX_CF8EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 5
+#define TSTORM_TOE_CONN_AG_CTX_CF10EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_CF10EN_SHIFT 6
+#define TSTORM_TOE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags5;
+#define TSTORM_TOE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_TOE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_TOE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_TOE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_TOE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_TOE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define TSTORM_TOE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_TOE_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define TSTORM_TOE_CONN_AG_CTX_RULE8EN_SHIFT 7
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 reg3;
+ __le32 reg4;
+ __le32 reg5;
+ __le32 reg6;
+ __le32 reg7;
+ __le32 reg8;
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+};
+
+struct ustorm_toe_conn_ag_ctx {
+ u8 reserved;
+ u8 byte1;
+ u8 flags0;
+#define USTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define USTORM_TOE_CONN_AG_CTX_BIT1_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_TOE_CONN_AG_CTX_CF0_MASK 0x3
+#define USTORM_TOE_CONN_AG_CTX_CF0_SHIFT 2
+#define USTORM_TOE_CONN_AG_CTX_CF1_MASK 0x3
+#define USTORM_TOE_CONN_AG_CTX_CF1_SHIFT 4
+#define USTORM_TOE_CONN_AG_CTX_PUSH_TIMER_CF_MASK 0x3
+#define USTORM_TOE_CONN_AG_CTX_PUSH_TIMER_CF_SHIFT 6
+ u8 flags1;
+#define USTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
+#define USTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 0
+#define USTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_MASK 0x3
+#define USTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_SHIFT 2
+#define USTORM_TOE_CONN_AG_CTX_DQ_CF_MASK 0x3
+#define USTORM_TOE_CONN_AG_CTX_DQ_CF_SHIFT 4
+#define USTORM_TOE_CONN_AG_CTX_CF6_MASK 0x3
+#define USTORM_TOE_CONN_AG_CTX_CF6_SHIFT 6
+ u8 flags2;
+#define USTORM_TOE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define USTORM_TOE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_TOE_CONN_AG_CTX_PUSH_TIMER_CF_EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_PUSH_TIMER_CF_EN_SHIFT 2
+#define USTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 3
+#define USTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_SLOW_PATH_CF_EN_SHIFT 4
+#define USTORM_TOE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_DQ_CF_EN_SHIFT 5
+#define USTORM_TOE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_CF6EN_SHIFT 6
+#define USTORM_TOE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_RULE0EN_SHIFT 7
+ u8 flags3;
+#define USTORM_TOE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define USTORM_TOE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_TOE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_TOE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_TOE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_TOE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_TOE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_TOE_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define USTORM_TOE_CONN_AG_CTX_RULE8EN_SHIFT 7
+ u8 byte2;
+ u8 byte3;
+ __le16 word0;
+ __le16 word1;
+ __le32 reg0;
+ __le32 reg1;
+ __le32 reg2;
+ __le32 reg3;
+ __le16 word2;
+ __le16 word3;
+};
+
+/* The toe storm context of Tstorm */
+struct tstorm_toe_conn_st_ctx {
+ __le32 reserved[16];
+};
+
+/* The toe storm context of Ustorm */
+struct ustorm_toe_conn_st_ctx {
+ __le32 reserved[52];
+};
+
+/* toe connection context */
+struct toe_conn_context {
+ struct ystorm_toe_conn_st_ctx ystorm_st_context;
+ struct pstorm_toe_conn_st_ctx pstorm_st_context;
+ struct regpair pstorm_st_padding[2];
+ struct xstorm_toe_conn_st_ctx xstorm_st_context;
+ struct regpair xstorm_st_padding[2];
+ struct ystorm_toe_conn_ag_ctx ystorm_ag_context;
+ struct xstorm_toe_conn_ag_ctx xstorm_ag_context;
+ struct tstorm_toe_conn_ag_ctx tstorm_ag_context;
+ struct regpair tstorm_ag_padding[2];
+ struct timers_context timer_context;
+ struct ustorm_toe_conn_ag_ctx ustorm_ag_context;
+ struct tstorm_toe_conn_st_ctx tstorm_st_context;
+ struct mstorm_toe_conn_st_ctx mstorm_st_context;
+ struct ustorm_toe_conn_st_ctx ustorm_st_context;
+};
+
+/* toe init ramrod header */
+struct toe_init_ramrod_header {
+ u8 first_rss;
+ u8 num_rss;
+ u8 reserved[6];
+};
+
+/* toe pf init parameters */
+struct toe_pf_init_params {
+ __le32 push_timeout;
+ __le16 grq_buffer_size;
+ __le16 grq_sb_id;
+ u8 grq_sb_index;
+ u8 max_seg_retransmit;
+ u8 doubt_reachability;
+ u8 ll2_rx_queue_id;
+ __le16 grq_fetch_threshold;
+ u8 reserved1[2];
+ struct regpair grq_page_addr;
+};
+
+/* toe tss parameters */
+struct toe_tss_params {
+ struct regpair curr_page_addr;
+ struct regpair next_page_addr;
+ u8 reserved0;
+ u8 status_block_index;
+ __le16 status_block_id;
+ __le16 reserved1[2];
+};
+
+/* toe rss parameters */
+struct toe_rss_params {
+ struct regpair curr_page_addr;
+ struct regpair next_page_addr;
+ u8 reserved0;
+ u8 status_block_index;
+ __le16 status_block_id;
+ __le16 reserved1[2];
+};
+
+/* toe init ramrod data */
+struct toe_init_ramrod_data {
+ struct toe_init_ramrod_header hdr;
+ struct tcp_init_params tcp_params;
+ struct toe_pf_init_params pf_params;
+ struct toe_tss_params tss_params[TOE_TX_MAX_TSS_CHAINS];
+ struct toe_rss_params rss_params[TOE_RX_MAX_RSS_CHAINS];
+};
+
+/* toe offload parameters */
+struct toe_offload_params {
+ struct regpair tx_bd_page_addr;
+ struct regpair tx_app_page_addr;
+ __le32 more_to_send_seq;
+ __le16 rcv_indication_size;
+ u8 rss_tss_id;
+ u8 ignore_grq_push;
+ struct regpair rx_db_data_ptr;
+};
+
+/* TOE offload ramrod data - DMAed by firmware */
+struct toe_offload_ramrod_data {
+ struct tcp_offload_params tcp_ofld_params;
+ struct toe_offload_params toe_ofld_params;
+};
+
+/* TOE ramrod command IDs */
+enum toe_ramrod_cmd_id {
+ TOE_RAMROD_UNUSED,
+ TOE_RAMROD_FUNC_INIT,
+ TOE_RAMROD_INITATE_OFFLOAD,
+ TOE_RAMROD_FUNC_CLOSE,
+ TOE_RAMROD_SEARCHER_DELETE,
+ TOE_RAMROD_TERMINATE,
+ TOE_RAMROD_QUERY,
+ TOE_RAMROD_UPDATE,
+ TOE_RAMROD_EMPTY,
+ TOE_RAMROD_RESET_SEND,
+ TOE_RAMROD_INVALIDATE,
+ MAX_TOE_RAMROD_CMD_ID
+};
+
+/* Toe RQ buffer descriptor */
+struct toe_rx_bd {
+ struct regpair addr;
+ __le16 size;
+ __le16 flags;
+#define TOE_RX_BD_START_MASK 0x1
+#define TOE_RX_BD_START_SHIFT 0
+#define TOE_RX_BD_END_MASK 0x1
+#define TOE_RX_BD_END_SHIFT 1
+#define TOE_RX_BD_NO_PUSH_MASK 0x1
+#define TOE_RX_BD_NO_PUSH_SHIFT 2
+#define TOE_RX_BD_SPLIT_MASK 0x1
+#define TOE_RX_BD_SPLIT_SHIFT 3
+#define TOE_RX_BD_RESERVED0_MASK 0xFFF
+#define TOE_RX_BD_RESERVED0_SHIFT 4
+ __le32 reserved1;
+};
+
+/* TOE RX completion queue opcodes (opcode 0 is illegal) */
+enum toe_rx_cmp_opcode {
+ TOE_RX_CMP_OPCODE_GA = 1,
+ TOE_RX_CMP_OPCODE_GR = 2,
+ TOE_RX_CMP_OPCODE_GNI = 3,
+ TOE_RX_CMP_OPCODE_GAIR = 4,
+ TOE_RX_CMP_OPCODE_GAIL = 5,
+ TOE_RX_CMP_OPCODE_GRI = 6,
+ TOE_RX_CMP_OPCODE_GJ = 7,
+ TOE_RX_CMP_OPCODE_DGI = 8,
+ TOE_RX_CMP_OPCODE_CMP = 9,
+ TOE_RX_CMP_OPCODE_REL = 10,
+ TOE_RX_CMP_OPCODE_SKP = 11,
+ TOE_RX_CMP_OPCODE_URG = 12,
+ TOE_RX_CMP_OPCODE_RT_TO = 13,
+ TOE_RX_CMP_OPCODE_KA_TO = 14,
+ TOE_RX_CMP_OPCODE_MAX_RT = 15,
+ TOE_RX_CMP_OPCODE_DBT_RE = 16,
+ TOE_RX_CMP_OPCODE_SYN = 17,
+ TOE_RX_CMP_OPCODE_OPT_ERR = 18,
+ TOE_RX_CMP_OPCODE_FW2_TO = 19,
+ TOE_RX_CMP_OPCODE_2WY_CLS = 20,
+ TOE_RX_CMP_OPCODE_RST_RCV = 21,
+ TOE_RX_CMP_OPCODE_FIN_RCV = 22,
+ TOE_RX_CMP_OPCODE_FIN_UPL = 23,
+ TOE_RX_CMP_OPCODE_INIT = 32,
+ TOE_RX_CMP_OPCODE_RSS_UPDATE = 33,
+ TOE_RX_CMP_OPCODE_CLOSE = 34,
+ TOE_RX_CMP_OPCODE_INITIATE_OFFLOAD = 80,
+ TOE_RX_CMP_OPCODE_SEARCHER_DELETE = 81,
+ TOE_RX_CMP_OPCODE_TERMINATE = 82,
+ TOE_RX_CMP_OPCODE_QUERY = 83,
+ TOE_RX_CMP_OPCODE_RESET_SEND = 84,
+ TOE_RX_CMP_OPCODE_INVALIDATE = 85,
+ TOE_RX_CMP_OPCODE_EMPTY = 86,
+ TOE_RX_CMP_OPCODE_UPDATE = 87,
+ MAX_TOE_RX_CMP_OPCODE
+};
+
+/* TOE rx ooo completion data */
+struct toe_rx_cqe_ooo_params {
+ __le32 nbytes;
+ __le16 grq_buff_id;
+ u8 isle_num;
+ u8 reserved0;
+};
+
+/* TOE rx in order completion data */
+struct toe_rx_cqe_in_order_params {
+ __le32 nbytes;
+ __le16 grq_buff_id;
+ __le16 reserved1;
+};
+
+/* Union for TOE rx completion data */
+union toe_rx_cqe_data_union {
+ struct toe_rx_cqe_ooo_params ooo_params;
+ struct toe_rx_cqe_in_order_params in_order_params;
+ struct regpair raw_data;
+};
+
+/* TOE rx completion element */
+struct toe_rx_cqe {
+ __le16 icid;
+ u8 completion_opcode;
+ u8 reserved0;
+ __le32 reserved1;
+ union toe_rx_cqe_data_union data;
+};
+
+/* toe RX doorbel data */
+struct toe_rx_db_data {
+ __le32 local_adv_wnd_seq;
+ __le32 reserved[3];
+};
+
+/* Toe GRQ buffer descriptor */
+struct toe_rx_grq_bd {
+ struct regpair addr;
+ __le16 buff_id;
+ __le16 reserved0;
+ __le32 reserved1;
+};
+
+/* Toe transmission application buffer descriptor */
+struct toe_tx_app_buff_desc {
+ __le32 next_buffer_start_seq;
+ __le32 reserved;
+};
+
+/* Toe transmission application buffer descriptor page pointer */
+struct toe_tx_app_buff_page_pointer {
+ struct regpair next_page_addr;
+};
+
+/* Toe transmission buffer descriptor */
+struct toe_tx_bd {
+ struct regpair addr;
+ __le16 size;
+ __le16 flags;
+#define TOE_TX_BD_PUSH_MASK 0x1
+#define TOE_TX_BD_PUSH_SHIFT 0
+#define TOE_TX_BD_NOTIFY_MASK 0x1
+#define TOE_TX_BD_NOTIFY_SHIFT 1
+#define TOE_TX_BD_LARGE_IO_MASK 0x1
+#define TOE_TX_BD_LARGE_IO_SHIFT 2
+#define TOE_TX_BD_BD_CONS_MASK 0x1FFF
+#define TOE_TX_BD_BD_CONS_SHIFT 3
+ __le32 next_bd_start_seq;
+};
+
+/* TOE completion opcodes */
+enum toe_tx_cmp_opcode {
+ TOE_TX_CMP_OPCODE_DATA,
+ TOE_TX_CMP_OPCODE_TERMINATE,
+ TOE_TX_CMP_OPCODE_EMPTY,
+ TOE_TX_CMP_OPCODE_RESET_SEND,
+ TOE_TX_CMP_OPCODE_INVALIDATE,
+ TOE_TX_CMP_OPCODE_RST_RCV,
+ MAX_TOE_TX_CMP_OPCODE
+};
+
+/* Toe transmission completion element */
+struct toe_tx_cqe {
+ __le16 icid;
+ u8 opcode;
+ u8 reserved;
+ __le32 size;
+};
+
+/* Toe transmission page pointer bd */
+struct toe_tx_page_pointer_bd {
+ struct regpair next_page_addr;
+ struct regpair prev_page_addr;
+};
+
+/* Toe transmission completion element page pointer */
+struct toe_tx_page_pointer_cqe {
+ struct regpair next_page_addr;
+};
+
+/* toe update parameters */
+struct toe_update_params {
+ __le16 flags;
+#define TOE_UPDATE_PARAMS_RCV_INDICATION_SIZE_CHANGED_MASK 0x1
+#define TOE_UPDATE_PARAMS_RCV_INDICATION_SIZE_CHANGED_SHIFT 0
+#define TOE_UPDATE_PARAMS_RESERVED_MASK 0x7FFF
+#define TOE_UPDATE_PARAMS_RESERVED_SHIFT 1
+ __le16 rcv_indication_size;
+ __le16 reserved1[2];
+};
+
+/* TOE update ramrod data - DMAed by firmware */
+struct toe_update_ramrod_data {
+ struct tcp_update_params tcp_upd_params;
+ struct toe_update_params toe_upd_params;
+};
+
+struct mstorm_toe_conn_ag_ctx {
+ u8 byte0;
+ u8 byte1;
+ u8 flags0;
+#define MSTORM_TOE_CONN_AG_CTX_BIT0_MASK 0x1
+#define MSTORM_TOE_CONN_AG_CTX_BIT0_SHIFT 0
+#define MSTORM_TOE_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_TOE_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_TOE_CONN_AG_CTX_CF0_MASK 0x3
+#define MSTORM_TOE_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_TOE_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_TOE_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_TOE_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_TOE_CONN_AG_CTX_CF2_SHIFT 6
+ u8 flags1;
+#define MSTORM_TOE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_TOE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_TOE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_TOE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_TOE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_TOE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_TOE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_TOE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_TOE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_TOE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_TOE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_TOE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_TOE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_TOE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_TOE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_TOE_CONN_AG_CTX_RULE4EN_SHIFT 7
+ __le16 word0;
+ __le16 word1;
+ __le32 reg0;
+ __le32 reg1;
+};
+
+/* TOE doorbell data */
+struct toe_db_data {
+ u8 params;
+#define TOE_DB_DATA_DEST_MASK 0x3
+#define TOE_DB_DATA_DEST_SHIFT 0
+#define TOE_DB_DATA_AGG_CMD_MASK 0x3
+#define TOE_DB_DATA_AGG_CMD_SHIFT 2
+#define TOE_DB_DATA_BYPASS_EN_MASK 0x1
+#define TOE_DB_DATA_BYPASS_EN_SHIFT 4
+#define TOE_DB_DATA_RESERVED_MASK 0x1
+#define TOE_DB_DATA_RESERVED_SHIFT 5
+#define TOE_DB_DATA_AGG_VAL_SEL_MASK 0x3
+#define TOE_DB_DATA_AGG_VAL_SEL_SHIFT 6
+ u8 agg_flags;
+ __le16 bd_prod;
};
/* rdma function init ramrod data */
@@ -6911,6 +6150,8 @@ enum rdma_event_opcode {
RDMA_EVENT_CREATE_SRQ,
RDMA_EVENT_MODIFY_SRQ,
RDMA_EVENT_DESTROY_SRQ,
+ RDMA_EVENT_START_NAMESPACE_TRACKING,
+ RDMA_EVENT_STOP_NAMESPACE_TRACKING,
MAX_RDMA_EVENT_OPCODE
};
@@ -6935,18 +6176,33 @@ struct rdma_init_func_hdr {
u8 relaxed_ordering;
__le16 first_reg_srq_id;
__le32 reg_srq_base_addr;
- u8 searcher_mode;
- u8 pvrdma_mode;
+ u8 flags;
+#define RDMA_INIT_FUNC_HDR_SEARCHER_MODE_MASK 0x1
+#define RDMA_INIT_FUNC_HDR_SEARCHER_MODE_SHIFT 0
+#define RDMA_INIT_FUNC_HDR_PVRDMA_MODE_MASK 0x1
+#define RDMA_INIT_FUNC_HDR_PVRDMA_MODE_SHIFT 1
+#define RDMA_INIT_FUNC_HDR_DPT_MODE_MASK 0x1
+#define RDMA_INIT_FUNC_HDR_DPT_MODE_SHIFT 2
+#define RDMA_INIT_FUNC_HDR_RESERVED0_MASK 0x1F
+#define RDMA_INIT_FUNC_HDR_RESERVED0_SHIFT 3
+ u8 dpt_byte_threshold_log;
+ u8 dpt_common_queue_id;
u8 max_num_ns_log;
- u8 reserved;
};
/* rdma function init ramrod data */
struct rdma_init_func_ramrod_data {
struct rdma_init_func_hdr params_header;
+ struct rdma_cnq_params dptq_params;
struct rdma_cnq_params cnq_params[NUM_OF_GLOBAL_QUEUES];
};
+/* rdma namespace tracking ramrod data */
+struct rdma_namespace_tracking_ramrod_data {
+ u8 name_space;
+ u8 reserved[7];
+};
+
/* RDMA ramrod command IDs */
enum rdma_ramrod_cmd_id {
RDMA_RAMROD_UNUSED,
@@ -6960,6 +6216,8 @@ enum rdma_ramrod_cmd_id {
RDMA_RAMROD_CREATE_SRQ,
RDMA_RAMROD_MODIFY_SRQ,
RDMA_RAMROD_DESTROY_SRQ,
+ RDMA_RAMROD_START_NS_TRACKING,
+ RDMA_RAMROD_STOP_NS_TRACKING,
MAX_RDMA_RAMROD_CMD_ID
};
@@ -7093,73 +6351,73 @@ struct rdma_xrc_srq_context {
struct regpair temp[9];
};
-struct e4_tstorm_rdma_task_ag_ctx {
+struct tstorm_rdma_task_ag_ctx {
u8 byte0;
u8 byte1;
__le16 word0;
u8 flags0;
-#define E4_TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_MASK 0xF
-#define E4_TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_SHIFT 0
-#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT0_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT0_SHIFT 4
-#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
-#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6
-#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7
+#define TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_MASK 0xF
+#define TSTORM_RDMA_TASK_AG_CTX_NIBBLE0_SHIFT 0
+#define TSTORM_RDMA_TASK_AG_CTX_BIT0_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_BIT0_SHIFT 4
+#define TSTORM_RDMA_TASK_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_BIT1_SHIFT 5
+#define TSTORM_RDMA_TASK_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_BIT2_SHIFT 6
+#define TSTORM_RDMA_TASK_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_BIT3_SHIFT 7
u8 flags1;
-#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT4_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT 0
-#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT5_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_BIT5_SHIFT 1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 2
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 4
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF2_MASK 0x3
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT 6
+#define TSTORM_RDMA_TASK_AG_CTX_BIT4_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_BIT4_SHIFT 0
+#define TSTORM_RDMA_TASK_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_BIT5_SHIFT 1
+#define TSTORM_RDMA_TASK_AG_CTX_CF0_MASK 0x3
+#define TSTORM_RDMA_TASK_AG_CTX_CF0_SHIFT 2
+#define TSTORM_RDMA_TASK_AG_CTX_CF1_MASK 0x3
+#define TSTORM_RDMA_TASK_AG_CTX_CF1_SHIFT 4
+#define TSTORM_RDMA_TASK_AG_CTX_CF2_MASK 0x3
+#define TSTORM_RDMA_TASK_AG_CTX_CF2_SHIFT 6
u8 flags2;
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF3_MASK 0x3
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF3_SHIFT 0
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF4_MASK 0x3
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF4_SHIFT 2
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF5_MASK 0x3
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF5_SHIFT 4
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF6_MASK 0x3
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF6_SHIFT 6
+#define TSTORM_RDMA_TASK_AG_CTX_CF3_MASK 0x3
+#define TSTORM_RDMA_TASK_AG_CTX_CF3_SHIFT 0
+#define TSTORM_RDMA_TASK_AG_CTX_CF4_MASK 0x3
+#define TSTORM_RDMA_TASK_AG_CTX_CF4_SHIFT 2
+#define TSTORM_RDMA_TASK_AG_CTX_CF5_MASK 0x3
+#define TSTORM_RDMA_TASK_AG_CTX_CF5_SHIFT 4
+#define TSTORM_RDMA_TASK_AG_CTX_CF6_MASK 0x3
+#define TSTORM_RDMA_TASK_AG_CTX_CF6_SHIFT 6
u8 flags3;
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF7_MASK 0x3
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF7_SHIFT 0
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 2
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 3
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT 4
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF3EN_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF3EN_SHIFT 5
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF4EN_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF4EN_SHIFT 6
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF5EN_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF5EN_SHIFT 7
+#define TSTORM_RDMA_TASK_AG_CTX_CF7_MASK 0x3
+#define TSTORM_RDMA_TASK_AG_CTX_CF7_SHIFT 0
+#define TSTORM_RDMA_TASK_AG_CTX_CF0EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_CF0EN_SHIFT 2
+#define TSTORM_RDMA_TASK_AG_CTX_CF1EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_CF1EN_SHIFT 3
+#define TSTORM_RDMA_TASK_AG_CTX_CF2EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_CF2EN_SHIFT 4
+#define TSTORM_RDMA_TASK_AG_CTX_CF3EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_CF3EN_SHIFT 5
+#define TSTORM_RDMA_TASK_AG_CTX_CF4EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_CF4EN_SHIFT 6
+#define TSTORM_RDMA_TASK_AG_CTX_CF5EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_CF5EN_SHIFT 7
u8 flags4;
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF6EN_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF6EN_SHIFT 0
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF7EN_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_CF7EN_SHIFT 1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 2
-#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 3
-#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 4
-#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 5
-#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 6
-#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
-#define E4_TSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 7
+#define TSTORM_RDMA_TASK_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_CF6EN_SHIFT 0
+#define TSTORM_RDMA_TASK_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_CF7EN_SHIFT 1
+#define TSTORM_RDMA_TASK_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_RULE0EN_SHIFT 2
+#define TSTORM_RDMA_TASK_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_RULE1EN_SHIFT 3
+#define TSTORM_RDMA_TASK_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_RULE2EN_SHIFT 4
+#define TSTORM_RDMA_TASK_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_RULE3EN_SHIFT 5
+#define TSTORM_RDMA_TASK_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_RULE4EN_SHIFT 6
+#define TSTORM_RDMA_TASK_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_RDMA_TASK_AG_CTX_RULE5EN_SHIFT 7
u8 byte2;
__le16 word1;
__le32 reg0;
@@ -7172,63 +6430,63 @@ struct e4_tstorm_rdma_task_ag_ctx {
__le32 reg2;
};
-struct e4_ustorm_rdma_conn_ag_ctx {
+struct ustorm_rdma_conn_ag_ctx {
u8 reserved;
u8 byte1;
u8 flags0;
-#define E4_USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_USTORM_RDMA_CONN_AG_CTX_DIF_ERROR_REPORTED_MASK 0x1
-#define E4_USTORM_RDMA_CONN_AG_CTX_DIF_ERROR_REPORTED_SHIFT 1
-#define E4_USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
-#define E4_USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 2
-#define E4_USTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_USTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_USTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_USTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 6
+#define USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define USTORM_RDMA_CONN_AG_CTX_DIF_ERROR_REPORTED_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_DIF_ERROR_REPORTED_SHIFT 1
+#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 2
+#define USTORM_RDMA_CONN_AG_CTX_CF1_MASK 0x3
+#define USTORM_RDMA_CONN_AG_CTX_CF1_SHIFT 4
+#define USTORM_RDMA_CONN_AG_CTX_CF2_MASK 0x3
+#define USTORM_RDMA_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_USTORM_RDMA_CONN_AG_CTX_CF3_MASK 0x3
-#define E4_USTORM_RDMA_CONN_AG_CTX_CF3_SHIFT 0
-#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_MASK 0x3
-#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT 2
-#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_MASK 0x3
-#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_SHIFT 4
-#define E4_USTORM_RDMA_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_USTORM_RDMA_CONN_AG_CTX_CF6_SHIFT 6
+#define USTORM_RDMA_CONN_AG_CTX_CF3_MASK 0x3
+#define USTORM_RDMA_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_MASK 0x3
+#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT 2
+#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_MASK 0x3
+#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_SHIFT 4
+#define USTORM_RDMA_CONN_AG_CTX_CF6_MASK 0x3
+#define USTORM_RDMA_CONN_AG_CTX_CF6_SHIFT 6
u8 flags2;
-#define E4_USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
-#define E4_USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
-#define E4_USTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_USTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_USTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_USTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_USTORM_RDMA_CONN_AG_CTX_CF3EN_MASK 0x1
-#define E4_USTORM_RDMA_CONN_AG_CTX_CF3EN_SHIFT 3
-#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK 0x1
-#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT 4
-#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_MASK 0x1
-#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT 5
-#define E4_USTORM_RDMA_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_USTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT 6
-#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_MASK 0x1
-#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_SHIFT 7
+#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
+#define USTORM_RDMA_CONN_AG_CTX_CF1EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_RDMA_CONN_AG_CTX_CF2EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_RDMA_CONN_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT 4
+#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT 5
+#define USTORM_RDMA_CONN_AG_CTX_CF6EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_CF6EN_SHIFT 6
+#define USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_CQ_SE_EN_SHIFT 7
u8 flags3;
-#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_EN_MASK 0x1
-#define E4_USTORM_RDMA_CONN_AG_CTX_CQ_EN_SHIFT 0
-#define E4_USTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_USTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define E4_USTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_USTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define E4_USTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_USTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define E4_USTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_USTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define E4_USTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_USTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define E4_USTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_USTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define E4_USTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define E4_USTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define USTORM_RDMA_CONN_AG_CTX_CQ_EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_CQ_EN_SHIFT 0
+#define USTORM_RDMA_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_RDMA_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_RDMA_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_RDMA_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_RDMA_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_RDMA_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_RDMA_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define USTORM_RDMA_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 byte2;
u8 nvmf_only;
__le16 conn_dpi;
@@ -7241,214 +6499,214 @@ struct e4_ustorm_rdma_conn_ag_ctx {
__le16 word3;
};
-struct e4_xstorm_roce_conn_ag_ctx {
+struct xstorm_roce_conn_ag_ctx {
u8 reserved0;
u8 state;
u8 flags0;
-#define E4_XSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT2_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT2_SHIFT 2
-#define E4_XSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT4_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT4_SHIFT 4
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT5_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT5_SHIFT 5
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT6_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT6_SHIFT 6
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT7_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT7_SHIFT 7
+#define XSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1
+#define XSTORM_ROCE_CONN_AG_CTX_BIT2_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_BIT2_SHIFT 2
+#define XSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_ROCE_CONN_AG_CTX_BIT4_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_BIT4_SHIFT 4
+#define XSTORM_ROCE_CONN_AG_CTX_BIT5_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_BIT5_SHIFT 5
+#define XSTORM_ROCE_CONN_AG_CTX_BIT6_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_BIT6_SHIFT 6
+#define XSTORM_ROCE_CONN_AG_CTX_BIT7_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_BIT7_SHIFT 7
u8 flags1;
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT8_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT8_SHIFT 0
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT9_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT9_SHIFT 1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT10_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT10_SHIFT 2
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT11_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT11_SHIFT 3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_MSDM_FLUSH_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_MSDM_FLUSH_SHIFT 4
-#define E4_XSTORM_ROCE_CONN_AG_CTX_MSEM_FLUSH_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_MSEM_FLUSH_SHIFT 5
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT14_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT14_SHIFT 6
-#define E4_XSTORM_ROCE_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7
+#define XSTORM_ROCE_CONN_AG_CTX_BIT8_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_BIT8_SHIFT 0
+#define XSTORM_ROCE_CONN_AG_CTX_BIT9_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_BIT9_SHIFT 1
+#define XSTORM_ROCE_CONN_AG_CTX_BIT10_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_BIT10_SHIFT 2
+#define XSTORM_ROCE_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_ROCE_CONN_AG_CTX_MSDM_FLUSH_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_MSDM_FLUSH_SHIFT 4
+#define XSTORM_ROCE_CONN_AG_CTX_MSEM_FLUSH_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_MSEM_FLUSH_SHIFT 5
+#define XSTORM_ROCE_CONN_AG_CTX_BIT14_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_BIT14_SHIFT 6
+#define XSTORM_ROCE_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7
u8 flags2;
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 0
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF1_SHIFT 2
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 4
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF3_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF3_SHIFT 6
+#define XSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_ROCE_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_ROCE_CONN_AG_CTX_CF3_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF3_SHIFT 6
u8 flags3;
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF4_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF4_SHIFT 0
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF5_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF5_SHIFT 2
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF6_SHIFT 4
-#define E4_XSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
+#define XSTORM_ROCE_CONN_AG_CTX_CF4_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF4_SHIFT 0
+#define XSTORM_ROCE_CONN_AG_CTX_CF5_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF5_SHIFT 2
+#define XSTORM_ROCE_CONN_AG_CTX_CF6_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF6_SHIFT 4
+#define XSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
u8 flags4;
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF8_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF8_SHIFT 0
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF9_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF9_SHIFT 2
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF10_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF10_SHIFT 4
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF11_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF11_SHIFT 6
+#define XSTORM_ROCE_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_ROCE_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_ROCE_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_ROCE_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF12_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF12_SHIFT 0
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF13_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF13_SHIFT 2
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF14_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF14_SHIFT 4
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF15_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF15_SHIFT 6
+#define XSTORM_ROCE_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_ROCE_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_ROCE_CONN_AG_CTX_CF14_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF14_SHIFT 4
+#define XSTORM_ROCE_CONN_AG_CTX_CF15_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF15_SHIFT 6
u8 flags6;
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF16_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF16_SHIFT 0
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF17_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF17_SHIFT 2
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF18_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF18_SHIFT 4
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF19_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF19_SHIFT 6
+#define XSTORM_ROCE_CONN_AG_CTX_CF16_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF16_SHIFT 0
+#define XSTORM_ROCE_CONN_AG_CTX_CF17_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF17_SHIFT 2
+#define XSTORM_ROCE_CONN_AG_CTX_CF18_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF18_SHIFT 4
+#define XSTORM_ROCE_CONN_AG_CTX_CF19_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF19_SHIFT 6
u8 flags7;
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF20_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF20_SHIFT 0
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF21_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF21_SHIFT 2
-#define E4_XSTORM_ROCE_CONN_AG_CTX_SLOW_PATH_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_SLOW_PATH_SHIFT 4
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 6
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF1EN_SHIFT 7
+#define XSTORM_ROCE_CONN_AG_CTX_CF20_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF20_SHIFT 0
+#define XSTORM_ROCE_CONN_AG_CTX_CF21_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF21_SHIFT 2
+#define XSTORM_ROCE_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_ROCE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 0
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF3EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF3EN_SHIFT 1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF4EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF4EN_SHIFT 2
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF5EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF5EN_SHIFT 3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF6EN_SHIFT 4
-#define E4_XSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF8EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF8EN_SHIFT 6
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF9EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF9EN_SHIFT 7
+#define XSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_ROCE_CONN_AG_CTX_CF3EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF3EN_SHIFT 1
+#define XSTORM_ROCE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF4EN_SHIFT 2
+#define XSTORM_ROCE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF5EN_SHIFT 3
+#define XSTORM_ROCE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF6EN_SHIFT 4
+#define XSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
+#define XSTORM_ROCE_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_ROCE_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF10EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF10EN_SHIFT 0
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF11EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF11EN_SHIFT 1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF12EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF12EN_SHIFT 2
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF13EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF13EN_SHIFT 3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF14EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF14EN_SHIFT 4
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF15EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF15EN_SHIFT 5
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF16EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF16EN_SHIFT 6
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF17EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF17EN_SHIFT 7
+#define XSTORM_ROCE_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_ROCE_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_ROCE_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_ROCE_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_ROCE_CONN_AG_CTX_CF14EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF14EN_SHIFT 4
+#define XSTORM_ROCE_CONN_AG_CTX_CF15EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_ROCE_CONN_AG_CTX_CF16EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF16EN_SHIFT 6
+#define XSTORM_ROCE_CONN_AG_CTX_CF17EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF17EN_SHIFT 7
u8 flags10;
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF18EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF18EN_SHIFT 0
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF19EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF19EN_SHIFT 1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF20EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF20EN_SHIFT 2
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF21EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF21EN_SHIFT 3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF23EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF23EN_SHIFT 5
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 6
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 7
+#define XSTORM_ROCE_CONN_AG_CTX_CF18EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF18EN_SHIFT 0
+#define XSTORM_ROCE_CONN_AG_CTX_CF19EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF19EN_SHIFT 1
+#define XSTORM_ROCE_CONN_AG_CTX_CF20EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF20EN_SHIFT 2
+#define XSTORM_ROCE_CONN_AG_CTX_CF21EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF21EN_SHIFT 3
+#define XSTORM_ROCE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_ROCE_CONN_AG_CTX_CF23EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_CF23EN_SHIFT 5
+#define XSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 6
+#define XSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 7
u8 flags11;
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 0
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 2
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE5EN_SHIFT 3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE6EN_SHIFT 4
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE7EN_SHIFT 5
-#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE9EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE9EN_SHIFT 7
+#define XSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 0
+#define XSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 2
+#define XSTORM_ROCE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_ROCE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_ROCE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_ROCE_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12;
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE10EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE10EN_SHIFT 0
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE11EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE11EN_SHIFT 1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
-#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE14EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE14EN_SHIFT 4
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE15EN_SHIFT 5
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE16EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE16EN_SHIFT 6
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE17EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE17EN_SHIFT 7
+#define XSTORM_ROCE_CONN_AG_CTX_RULE10EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE10EN_SHIFT 0
+#define XSTORM_ROCE_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_ROCE_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define XSTORM_ROCE_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_ROCE_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_ROCE_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE17EN_SHIFT 7
u8 flags13;
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE18EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE18EN_SHIFT 0
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE19EN_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RULE19EN_SHIFT 1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
-#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
-#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
-#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
-#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+#define XSTORM_ROCE_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define XSTORM_ROCE_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
-#define E4_XSTORM_ROCE_CONN_AG_CTX_MIGRATION_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_MIGRATION_SHIFT 0
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT17_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_BIT17_SHIFT 1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_DPM_PORT_NUM_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_DPM_PORT_NUM_SHIFT 2
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RESERVED_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_RESERVED_SHIFT 4
-#define E4_XSTORM_ROCE_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
-#define E4_XSTORM_ROCE_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF23_MASK 0x3
-#define E4_XSTORM_ROCE_CONN_AG_CTX_CF23_SHIFT 6
+#define XSTORM_ROCE_CONN_AG_CTX_MIGRATION_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_MIGRATION_SHIFT 0
+#define XSTORM_ROCE_CONN_AG_CTX_BIT17_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_BIT17_SHIFT 1
+#define XSTORM_ROCE_CONN_AG_CTX_DPM_PORT_NUM_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_DPM_PORT_NUM_SHIFT 2
+#define XSTORM_ROCE_CONN_AG_CTX_RESERVED_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_RESERVED_SHIFT 4
+#define XSTORM_ROCE_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
+#define XSTORM_ROCE_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
+#define XSTORM_ROCE_CONN_AG_CTX_CF23_MASK 0x3
+#define XSTORM_ROCE_CONN_AG_CTX_CF23_SHIFT 6
u8 byte2;
__le16 physical_q0;
__le16 word1;
@@ -7470,89 +6728,89 @@ struct e4_xstorm_roce_conn_ag_ctx {
__le32 reg6;
};
-struct e4_tstorm_roce_conn_ag_ctx {
+struct tstorm_roce_conn_ag_ctx {
u8 reserved0;
u8 byte1;
u8 flags0;
-#define E4_TSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT2_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT2_SHIFT 2
-#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT3_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT3_SHIFT 3
-#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT4_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT4_SHIFT 4
-#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT5_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_BIT5_SHIFT 5
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 6
+#define TSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define TSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1
+#define TSTORM_ROCE_CONN_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_ROCE_CONN_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_ROCE_CONN_AG_CTX_BIT4_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_BIT4_SHIFT 4
+#define TSTORM_ROCE_CONN_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_BIT5_SHIFT 5
+#define TSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3
+#define TSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 6
u8 flags1;
-#define E4_TSTORM_ROCE_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3
-#define E4_TSTORM_ROCE_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 2
-#define E4_TSTORM_ROCE_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3
-#define E4_TSTORM_ROCE_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4
-#define E4_TSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
-#define E4_TSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
+#define TSTORM_ROCE_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3
+#define TSTORM_ROCE_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0
+#define TSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3
+#define TSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 2
+#define TSTORM_ROCE_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3
+#define TSTORM_ROCE_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4
+#define TSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define TSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
u8 flags2;
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF5_MASK 0x3
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF5_SHIFT 0
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF6_SHIFT 2
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF7_MASK 0x3
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF7_SHIFT 4
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF8_MASK 0x3
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF8_SHIFT 6
+#define TSTORM_ROCE_CONN_AG_CTX_CF5_MASK 0x3
+#define TSTORM_ROCE_CONN_AG_CTX_CF5_SHIFT 0
+#define TSTORM_ROCE_CONN_AG_CTX_CF6_MASK 0x3
+#define TSTORM_ROCE_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_ROCE_CONN_AG_CTX_CF7_MASK 0x3
+#define TSTORM_ROCE_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_ROCE_CONN_AG_CTX_CF8_MASK 0x3
+#define TSTORM_ROCE_CONN_AG_CTX_CF8_SHIFT 6
u8 flags3;
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF9_MASK 0x3
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF9_SHIFT 0
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF10_MASK 0x3
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF10_SHIFT 2
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 4
-#define E4_TSTORM_ROCE_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 5
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 6
-#define E4_TSTORM_ROCE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7
+#define TSTORM_ROCE_CONN_AG_CTX_CF9_MASK 0x3
+#define TSTORM_ROCE_CONN_AG_CTX_CF9_SHIFT 0
+#define TSTORM_ROCE_CONN_AG_CTX_CF10_MASK 0x3
+#define TSTORM_ROCE_CONN_AG_CTX_CF10_SHIFT 2
+#define TSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 4
+#define TSTORM_ROCE_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 5
+#define TSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 6
+#define TSTORM_ROCE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7
u8 flags4;
-#define E4_TSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF5EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF5EN_SHIFT 1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF6EN_SHIFT 2
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF7EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF7EN_SHIFT 3
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF8EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF8EN_SHIFT 4
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF9EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF9EN_SHIFT 5
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF10EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_CF10EN_SHIFT 6
-#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define TSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
+#define TSTORM_ROCE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_CF5EN_SHIFT 1
+#define TSTORM_ROCE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_ROCE_CONN_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_ROCE_CONN_AG_CTX_CF8EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_ROCE_CONN_AG_CTX_CF9EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_CF9EN_SHIFT 5
+#define TSTORM_ROCE_CONN_AG_CTX_CF10EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_CF10EN_SHIFT 6
+#define TSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
-#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define E4_TSTORM_ROCE_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define TSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_ROCE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_ROCE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define TSTORM_ROCE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_ROCE_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define TSTORM_ROCE_CONN_AG_CTX_RULE8EN_SHIFT 7
__le32 reg0;
__le32 reg1;
__le32 reg2;
@@ -7605,15 +6863,15 @@ struct ustorm_roce_conn_st_ctx {
};
/* roce connection context */
-struct e4_roce_conn_context {
+struct roce_conn_context {
struct ystorm_roce_conn_st_ctx ystorm_st_context;
struct regpair ystorm_st_padding[2];
struct pstorm_roce_conn_st_ctx pstorm_st_context;
struct xstorm_roce_conn_st_ctx xstorm_st_context;
- struct e4_xstorm_roce_conn_ag_ctx xstorm_ag_context;
- struct e4_tstorm_roce_conn_ag_ctx tstorm_ag_context;
+ struct xstorm_roce_conn_ag_ctx xstorm_ag_context;
+ struct tstorm_roce_conn_ag_ctx tstorm_ag_context;
struct timers_context timer_context;
- struct e4_ustorm_rdma_conn_ag_ctx ustorm_ag_context;
+ struct ustorm_rdma_conn_ag_ctx ustorm_ag_context;
struct tstorm_roce_conn_st_ctx tstorm_st_context;
struct regpair tstorm_st_padding[2];
struct mstorm_roce_conn_st_ctx mstorm_st_context;
@@ -7681,8 +6939,10 @@ struct roce_create_qp_req_ramrod_data {
#define ROCE_CREATE_QP_REQ_RAMROD_DATA_EDPM_MODE_SHIFT 0
#define ROCE_CREATE_QP_REQ_RAMROD_DATA_VF_ID_VALID_MASK 0x1
#define ROCE_CREATE_QP_REQ_RAMROD_DATA_VF_ID_VALID_SHIFT 1
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_MASK 0x3F
-#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_SHIFT 2
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_FORCE_LB_MASK 0x1
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_FORCE_LB_SHIFT 2
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_MASK 0x1F
+#define ROCE_CREATE_QP_REQ_RAMROD_DATA_RESERVED_SHIFT 3
u8 name_space;
u8 reserved3[3];
__le16 regular_latency_phy_queue;
@@ -7714,8 +6974,10 @@ struct roce_create_qp_resp_ramrod_data {
#define ROCE_CREATE_QP_RESP_RAMROD_DATA_XRC_FLAG_SHIFT 16
#define ROCE_CREATE_QP_RESP_RAMROD_DATA_VF_ID_VALID_MASK 0x1
#define ROCE_CREATE_QP_RESP_RAMROD_DATA_VF_ID_VALID_SHIFT 17
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_MASK 0x3FFF
-#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_SHIFT 18
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_FORCE_LB_MASK 0x1
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_FORCE_LB_SHIFT 18
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_MASK 0x1FFF
+#define ROCE_CREATE_QP_RESP_RAMROD_DATA_RESERVED_SHIFT 19
__le16 xrc_domain;
u8 max_ird;
u8 traffic_class;
@@ -7752,10 +7014,85 @@ struct roce_create_qp_resp_ramrod_data {
u8 reserved3[3];
};
+/* RoCE Create Suspended qp requester runtime ramrod data */
+struct roce_create_suspended_qp_req_runtime_ramrod_data {
+ __le32 flags;
+#define ROCE_CREATE_SUSPENDED_QP_REQ_RUNTIME_RAMROD_DATA_ERR_FLG_MASK 0x1
+#define ROCE_CREATE_SUSPENDED_QP_REQ_RUNTIME_RAMROD_DATA_ERR_FLG_SHIFT 0
+#define ROCE_CREATE_SUSPENDED_QP_REQ_RUNTIME_RAMROD_DATA_RESERVED0_MASK \
+ 0x7FFFFFFF
+#define ROCE_CREATE_SUSPENDED_QP_REQ_RUNTIME_RAMROD_DATA_RESERVED0_SHIFT 1
+ __le32 send_msg_psn;
+ __le32 inflight_sends;
+ __le32 ssn;
+};
+
+/* RoCE Create Suspended QP requester ramrod data */
+struct roce_create_suspended_qp_req_ramrod_data {
+ struct roce_create_qp_req_ramrod_data qp_params;
+ struct roce_create_suspended_qp_req_runtime_ramrod_data
+ qp_runtime_params;
+};
+
+/* RoCE Create Suspended QP responder runtime params */
+struct roce_create_suspended_qp_resp_runtime_params {
+ __le32 flags;
+#define ROCE_CREATE_SUSPENDED_QP_RESP_RUNTIME_PARAMS_ERR_FLG_MASK 0x1
+#define ROCE_CREATE_SUSPENDED_QP_RESP_RUNTIME_PARAMS_ERR_FLG_SHIFT 0
+#define ROCE_CREATE_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RDMA_ACTIVE_MASK 0x1
+#define ROCE_CREATE_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RDMA_ACTIVE_SHIFT 1
+#define ROCE_CREATE_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RESERVED0_MASK 0x3FFFFFFF
+#define ROCE_CREATE_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RESERVED0_SHIFT 2
+ __le32 receive_msg_psn;
+ __le32 inflight_receives;
+ __le32 rmsn;
+ __le32 rdma_key;
+ struct regpair rdma_va;
+ __le32 rdma_length;
+ __le32 num_rdb_entries;
+ __le32 resreved;
+};
+
+/* RoCE RDB array entry */
+struct roce_resp_qp_rdb_entry {
+ struct regpair atomic_data;
+ struct regpair va;
+ __le32 psn;
+ __le32 rkey;
+ __le32 byte_count;
+ u8 op_type;
+ u8 reserved[3];
+};
+
+/* RoCE Create Suspended QP responder runtime ramrod data */
+struct roce_create_suspended_qp_resp_runtime_ramrod_data {
+ struct roce_create_suspended_qp_resp_runtime_params params;
+ struct roce_resp_qp_rdb_entry
+ rdb_array_entries[RDMA_MAX_IRQ_ELEMS_IN_PAGE];
+};
+
+/* RoCE Create Suspended QP responder ramrod data */
+struct roce_create_suspended_qp_resp_ramrod_data {
+ struct roce_create_qp_resp_ramrod_data
+ qp_params;
+ struct roce_create_suspended_qp_resp_runtime_ramrod_data
+ qp_runtime_params;
+};
+
+/* RoCE create ud qp ramrod data */
+struct roce_create_ud_qp_ramrod_data {
+ __le16 local_mac_addr[3];
+ __le16 vlan_id;
+ __le32 src_qp_id;
+ u8 name_space;
+ u8 reserved[3];
+};
+
/* roce DCQCN received statistics */
struct roce_dcqcn_received_stats {
struct regpair ecn_pkt_rcv;
struct regpair cnp_pkt_rcv;
+ struct regpair cnp_pkt_reject;
};
/* roce DCQCN sent statistics */
@@ -7787,6 +7124,12 @@ struct roce_destroy_qp_resp_ramrod_data {
__le32 reserved;
};
+/* RoCE destroy ud qp ramrod data */
+struct roce_destroy_ud_qp_ramrod_data {
+ __le32 src_qp_id;
+ __le32 reserved;
+};
+
/* roce error statistics */
struct roce_error_stats {
__le32 resp_remote_access_errors;
@@ -7809,13 +7152,21 @@ struct roce_events_stats {
/* roce slow path EQ cmd IDs */
enum roce_event_opcode {
- ROCE_EVENT_CREATE_QP = 11,
+ ROCE_EVENT_CREATE_QP = 13,
ROCE_EVENT_MODIFY_QP,
ROCE_EVENT_QUERY_QP,
ROCE_EVENT_DESTROY_QP,
ROCE_EVENT_CREATE_UD_QP,
ROCE_EVENT_DESTROY_UD_QP,
ROCE_EVENT_FUNC_UPDATE,
+ ROCE_EVENT_SUSPEND_QP,
+ ROCE_EVENT_QUERY_SUSPENDED_QP,
+ ROCE_EVENT_CREATE_SUSPENDED_QP,
+ ROCE_EVENT_RESUME_QP,
+ ROCE_EVENT_SUSPEND_UD_QP,
+ ROCE_EVENT_RESUME_UD_QP,
+ ROCE_EVENT_CREATE_SUSPENDED_UD_QP,
+ ROCE_EVENT_FLUSH_DPT_QP,
MAX_ROCE_EVENT_OPCODE
};
@@ -7843,6 +7194,18 @@ struct roce_init_func_ramrod_data {
struct roce_init_func_params roce;
};
+/* roce_ll2_cqe_data */
+struct roce_ll2_cqe_data {
+ u8 name_space;
+ u8 flags;
+#define ROCE_LL2_CQE_DATA_QP_SUSPENDED_MASK 0x1
+#define ROCE_LL2_CQE_DATA_QP_SUSPENDED_SHIFT 0
+#define ROCE_LL2_CQE_DATA_RESERVED0_MASK 0x7F
+#define ROCE_LL2_CQE_DATA_RESERVED0_SHIFT 1
+ u8 reserved1[2];
+ __le32 cid;
+};
+
/* roce modify qp requester ramrod data */
struct roce_modify_qp_req_ramrod_data {
__le16 flags;
@@ -7870,8 +7233,10 @@ struct roce_modify_qp_req_ramrod_data {
#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PRI_SHIFT 10
#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PHYSICAL_QUEUE_FLG_MASK 0x1
#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_PHYSICAL_QUEUE_FLG_SHIFT 13
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_MASK 0x3
-#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_SHIFT 14
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_FORCE_LB_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_FORCE_LB_SHIFT 14
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_MASK 0x1
+#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_RESERVED1_SHIFT 15
u8 fields;
#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_MASK 0xF
#define ROCE_MODIFY_QP_REQ_RAMROD_DATA_ERR_RETRY_CNT_SHIFT 0
@@ -7917,8 +7282,10 @@ struct roce_modify_qp_resp_ramrod_data {
#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RDMA_OPS_EN_FLG_SHIFT 9
#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PHYSICAL_QUEUE_FLG_MASK 0x1
#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PHYSICAL_QUEUE_FLG_SHIFT 10
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_MASK 0x1F
-#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_SHIFT 11
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_FORCE_LB_MASK 0x1
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_FORCE_LB_SHIFT 11
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_MASK 0xF
+#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_RESERVED1_SHIFT 12
u8 fields;
#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_MASK 0x7
#define ROCE_MODIFY_QP_RESP_RAMROD_DATA_PRI_SHIFT 0
@@ -7969,18 +7336,84 @@ struct roce_query_qp_resp_ramrod_data {
struct regpair output_params_addr;
};
+/* RoCE Query Suspended QP requester output params */
+struct roce_query_suspended_qp_req_output_params {
+ __le32 psn;
+ __le32 flags;
+#define ROCE_QUERY_SUSPENDED_QP_REQ_OUTPUT_PARAMS_ERR_FLG_MASK 0x1
+#define ROCE_QUERY_SUSPENDED_QP_REQ_OUTPUT_PARAMS_ERR_FLG_SHIFT 0
+#define ROCE_QUERY_SUSPENDED_QP_REQ_OUTPUT_PARAMS_RESERVED0_MASK 0x7FFFFFFF
+#define ROCE_QUERY_SUSPENDED_QP_REQ_OUTPUT_PARAMS_RESERVED0_SHIFT 1
+ __le32 send_msg_psn;
+ __le32 inflight_sends;
+ __le32 ssn;
+ __le32 reserved;
+};
+
+/* RoCE Query Suspended QP requester ramrod data */
+struct roce_query_suspended_qp_req_ramrod_data {
+ struct regpair output_params_addr;
+};
+
+/* RoCE Query Suspended QP responder runtime params */
+struct roce_query_suspended_qp_resp_runtime_params {
+ __le32 psn;
+ __le32 flags;
+#define ROCE_QUERY_SUSPENDED_QP_RESP_RUNTIME_PARAMS_ERR_FLG_MASK 0x1
+#define ROCE_QUERY_SUSPENDED_QP_RESP_RUNTIME_PARAMS_ERR_FLG_SHIFT 0
+#define ROCE_QUERY_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RDMA_ACTIVE_MASK 0x1
+#define ROCE_QUERY_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RDMA_ACTIVE_SHIFT 1
+#define ROCE_QUERY_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RESERVED0_MASK 0x3FFFFFFF
+#define ROCE_QUERY_SUSPENDED_QP_RESP_RUNTIME_PARAMS_RESERVED0_SHIFT 2
+ __le32 receive_msg_psn;
+ __le32 inflight_receives;
+ __le32 rmsn;
+ __le32 rdma_key;
+ struct regpair rdma_va;
+ __le32 rdma_length;
+ __le32 num_rdb_entries;
+};
+
+/* RoCE Query Suspended QP responder output params */
+struct roce_query_suspended_qp_resp_output_params {
+ struct roce_query_suspended_qp_resp_runtime_params runtime_params;
+ struct roce_resp_qp_rdb_entry
+ rdb_array_entries[RDMA_MAX_IRQ_ELEMS_IN_PAGE];
+};
+
+/* RoCE Query Suspended QP responder ramrod data */
+struct roce_query_suspended_qp_resp_ramrod_data {
+ struct regpair output_params_addr;
+};
+
/* ROCE ramrod command IDs */
enum roce_ramrod_cmd_id {
- ROCE_RAMROD_CREATE_QP = 11,
+ ROCE_RAMROD_CREATE_QP = 13,
ROCE_RAMROD_MODIFY_QP,
ROCE_RAMROD_QUERY_QP,
ROCE_RAMROD_DESTROY_QP,
ROCE_RAMROD_CREATE_UD_QP,
ROCE_RAMROD_DESTROY_UD_QP,
ROCE_RAMROD_FUNC_UPDATE,
+ ROCE_RAMROD_SUSPEND_QP,
+ ROCE_RAMROD_QUERY_SUSPENDED_QP,
+ ROCE_RAMROD_CREATE_SUSPENDED_QP,
+ ROCE_RAMROD_RESUME_QP,
+ ROCE_RAMROD_SUSPEND_UD_QP,
+ ROCE_RAMROD_RESUME_UD_QP,
+ ROCE_RAMROD_CREATE_SUSPENDED_UD_QP,
+ ROCE_RAMROD_FLUSH_DPT_QP,
MAX_ROCE_RAMROD_CMD_ID
};
+/* ROCE RDB array entry type */
+enum roce_resp_qp_rdb_entry_type {
+ ROCE_QP_RDB_ENTRY_RDMA_RESPONSE = 0,
+ ROCE_QP_RDB_ENTRY_ATOMIC_RESPONSE = 1,
+ ROCE_QP_RDB_ENTRY_INVALID = 2,
+ MAX_ROCE_RESP_QP_RDB_ENTRY_TYPE
+};
+
/* RoCE func init ramrod data */
struct roce_update_func_params {
u8 cnp_vlan_priority;
@@ -7995,7 +7428,7 @@ struct roce_update_func_params {
__le32 cnp_send_timeout;
};
-struct e4_xstorm_roce_conn_ag_ctx_dq_ext_ld_part {
+struct xstorm_roce_conn_ag_ctx_dq_ext_ld_part {
u8 reserved0;
u8 state;
u8 flags0;
@@ -8222,200 +7655,200 @@ struct e4_xstorm_roce_conn_ag_ctx_dq_ext_ld_part {
__le32 reg4;
};
-struct e4_mstorm_roce_conn_ag_ctx {
+struct mstorm_roce_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_MSTORM_ROCE_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_MSTORM_ROCE_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_MSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_MSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_MSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_MSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_MSTORM_ROCE_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_MSTORM_ROCE_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_MSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_MSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 6
+#define MSTORM_ROCE_CONN_AG_CTX_BIT0_MASK 0x1
+#define MSTORM_ROCE_CONN_AG_CTX_BIT0_SHIFT 0
+#define MSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3
+#define MSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_ROCE_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_ROCE_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_MSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_MSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_MSTORM_ROCE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_MSTORM_ROCE_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_MSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_MSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_MSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define MSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_ROCE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_ROCE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0;
__le16 word1;
__le32 reg0;
__le32 reg1;
};
-struct e4_mstorm_roce_req_conn_ag_ctx {
+struct mstorm_roce_req_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0;
__le16 word1;
__le32 reg0;
__le32 reg1;
};
-struct e4_mstorm_roce_resp_conn_ag_ctx {
+struct mstorm_roce_resp_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0;
__le16 word1;
__le32 reg0;
__le32 reg1;
};
-struct e4_tstorm_roce_req_conn_ag_ctx {
+struct tstorm_roce_req_conn_ag_ctx {
u8 reserved0;
u8 state;
u8 flags0;
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURRED_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURRED_SHIFT 1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURRED_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURRED_SHIFT 2
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_SHIFT 3
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 4
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_SHIFT 5
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_MASK 0x3
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_SHIFT 6
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURRED_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_OCCURRED_SHIFT 1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURRED_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_CQE_ERROR_OCCURRED_SHIFT 2
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 4
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_CACHED_ORQ_SHIFT 5
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_SHIFT 6
u8 flags1;
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_MASK 0x3
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_SHIFT 2
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_SHIFT 2
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
u8 flags2;
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FORCE_COMP_CF_MASK 0x3
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FORCE_COMP_CF_SHIFT 0
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_MASK 0x3
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_SHIFT 2
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_MASK 0x3
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_SHIFT 4
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_MASK 0x3
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_SHIFT 6
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FORCE_COMP_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FORCE_COMP_CF_SHIFT 0
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_SHIFT 2
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_SHIFT 4
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_SHIFT 6
u8 flags3;
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_MASK 0x3
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_SHIFT 0
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_MASK 0x3
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_SHIFT 2
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_SHIFT 4
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 5
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_SHIFT 6
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_SHIFT 0
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_MASK 0x3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_SHIFT 2
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_CF_EN_SHIFT 4
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 5
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_SQ_CF_EN_SHIFT 6
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7
u8 flags4;
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FORCE_COMP_CF_EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_FORCE_COMP_CF_EN_SHIFT 1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_SHIFT 2
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_SHIFT 3
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_SHIFT 4
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_SHIFT 5
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_SHIFT 6
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FORCE_COMP_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_FORCE_COMP_CF_EN_SHIFT 1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SET_TIMER_CF_EN_SHIFT 2
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_TX_ASYNC_ERROR_CF_EN_SHIFT 3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RXMIT_DONE_CF_EN_SHIFT 4
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_SCAN_COMPLETED_CF_EN_SHIFT 5
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SQ_DRAIN_COMPLETED_CF_EN_SHIFT 6
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_DIF_CNT_EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_DIF_CNT_EN_SHIFT 1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_SHIFT 5
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define E4_TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_DIF_CNT_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_DIF_CNT_EN_SHIFT 1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_SND_SQ_CONS_EN_SHIFT 5
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define TSTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7
__le32 dif_rxmit_cnt;
__le32 snd_nxt_psn;
__le32 snd_max_psn;
@@ -8437,89 +7870,89 @@ struct e4_tstorm_roce_req_conn_ag_ctx {
__le32 reg10;
};
-struct e4_tstorm_roce_resp_conn_ag_ctx {
+struct tstorm_roce_resp_conn_ag_ctx {
u8 byte0;
u8 state;
u8 flags0;
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_SHIFT 1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_SHIFT 2
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_SHIFT 3
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 4
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_SHIFT 5
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 6
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_NOTIFY_REQUESTER_SHIFT 1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_SHIFT 4
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_BIT5_SHIFT 5
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 6
u8 flags1;
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_MASK 0x3
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_SHIFT 2
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 4
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_SHIFT 0
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_SHIFT 2
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 4
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
u8 flags2;
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT 0
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT 2
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_MASK 0x3
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_SHIFT 4
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK 0x3
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT 6
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT 0
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT 6
u8 flags3;
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK 0x3
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT 0
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK 0x3
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT 2
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 4
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 5
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_SHIFT 6
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 7
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT 0
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK 0x3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT 2
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 4
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_MSTORM_FLUSH_CF_EN_SHIFT 5
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_TX_ERROR_CF_EN_SHIFT 6
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 7
u8 flags4;
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT 2
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_SHIFT 3
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT 4
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT 5
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT 6
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 0
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT 5
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT 6
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_SHIFT 5
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define E4_TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RQ_RULE_EN_SHIFT 5
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define TSTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT 7
__le32 psn_and_rxmit_id_echo;
__le32 reg1;
__le32 reg2;
@@ -8541,63 +7974,63 @@ struct e4_tstorm_roce_resp_conn_ag_ctx {
__le32 reg10;
};
-struct e4_ustorm_roce_req_conn_ag_ctx {
+struct ustorm_roce_req_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6
+#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0
+#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK 0x3
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT 0
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF4_MASK 0x3
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF4_SHIFT 2
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF5_MASK 0x3
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF5_SHIFT 4
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF6_SHIFT 6
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK 0x3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4_MASK 0x3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4_SHIFT 2
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5_MASK 0x3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5_SHIFT 4
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6_MASK 0x3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6_SHIFT 6
u8 flags2;
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK 0x1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT 3
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_MASK 0x1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_SHIFT 4
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_MASK 0x1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_SHIFT 5
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_SHIFT 6
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF4EN_SHIFT 4
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF5EN_SHIFT 5
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_CF6EN_SHIFT 6
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags3;
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define E4_USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define USTORM_ROCE_REQ_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -8610,63 +8043,63 @@ struct e4_ustorm_roce_req_conn_ag_ctx {
__le16 word3;
};
-struct e4_ustorm_roce_resp_conn_ag_ctx {
+struct ustorm_roce_resp_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6
+#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0
+#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 0
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF4_MASK 0x3
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF4_SHIFT 2
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF5_MASK 0x3
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF5_SHIFT 4
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT 6
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4_MASK 0x3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4_SHIFT 2
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5_MASK 0x3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5_SHIFT 4
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6_MASK 0x3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6_SHIFT 6
u8 flags2;
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 3
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_MASK 0x1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_SHIFT 4
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_MASK 0x1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_SHIFT 5
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT 6
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF4EN_SHIFT 4
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF5EN_SHIFT 5
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_CF6EN_SHIFT 6
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags3;
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define E4_USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define USTORM_ROCE_RESP_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -8679,214 +8112,214 @@ struct e4_ustorm_roce_resp_conn_ag_ctx {
__le16 word3;
};
-struct e4_xstorm_roce_req_conn_ag_ctx {
+struct xstorm_roce_req_conn_ag_ctx {
u8 reserved0;
u8 state;
u8 flags0;
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_SHIFT 1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_SHIFT 2
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_SHIFT 4
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_SHIFT 5
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_SHIFT 6
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_SHIFT 7
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED6_SHIFT 7
u8 flags1;
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_SHIFT 0
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_SHIFT 1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_SHIFT 2
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_SHIFT 3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MSDM_FLUSH_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MSDM_FLUSH_SHIFT 4
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MSEM_FLUSH_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MSEM_FLUSH_SHIFT 5
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_SHIFT 6
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT10_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_MSDM_FLUSH_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_MSDM_FLUSH_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_MSEM_FLUSH_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_MSEM_FLUSH_SHIFT 5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_ERROR_STATE_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7
u8 flags2;
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 0
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 2
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 4
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3_SHIFT 6
u8 flags3;
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT 0
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_SHIFT 2
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_SHIFT 4
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
u8 flags4;
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_DIF_ERROR_CF_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_DIF_ERROR_CF_SHIFT 0
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SCAN_SQ_FOR_COMP_CF_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SCAN_SQ_FOR_COMP_CF_SHIFT 2
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_SHIFT 4
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_DIF_ERROR_CF_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_DIF_ERROR_CF_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SCAN_SQ_FOR_COMP_CF_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SCAN_SQ_FOR_COMP_CF_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_SHIFT 0
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_SHIFT 2
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_SHIFT 4
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_FMR_ENDED_CF_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15_SHIFT 6
u8 flags6;
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_SHIFT 0
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_SHIFT 2
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_SHIFT 4
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19_SHIFT 6
u8 flags7;
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_SHIFT 0
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_SHIFT 2
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_SHIFT 4
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 6
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 7
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 0
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT 1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT 2
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_SHIFT 4
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_DIF_ERROR_CF_EN_SHIFT 6
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SCAN_SQ_FOR_COMP_CF_EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SCAN_SQ_FOR_COMP_CF_EN_SHIFT 7
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF3EN_SHIFT 1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SND_RXMIT_CF_EN_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_DIF_ERROR_CF_EN_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SCAN_SQ_FOR_COMP_CF_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SCAN_SQ_FOR_COMP_CF_EN_SHIFT 7
u8 flags9;
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_SHIFT 0
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_SHIFT 1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_SHIFT 2
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_SHIFT 3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_SHIFT 4
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_SHIFT 5
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_SHIFT 6
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_SHIFT 7
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_FME_ENDED_CF_EN_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF16EN_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF17EN_SHIFT 7
u8 flags10;
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_SHIFT 0
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_SHIFT 1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_SHIFT 2
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_SHIFT 3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_SHIFT 5
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 6
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 7
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF18EN_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF19EN_SHIFT 1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF20EN_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF21EN_SHIFT 3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23EN_SHIFT 5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 7
u8 flags11;
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 0
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 2
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT 4
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_SHIFT 5
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_SHIFT 7
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_E2E_CREDIT_RULE_EN_SHIFT 5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12;
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_SHIFT 0
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_SHIFT 1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_SHIFT 4
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_SHIFT 5
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_SHIFT 6
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_SHIFT 7
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_SQ_PROD_EN_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_INV_FENCE_RULE_EN_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_ORQ_FENCE_RULE_EN_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_MAX_ORD_RULE_EN_SHIFT 7
u8 flags13;
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_SHIFT 0
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_SHIFT 1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_SHIFT 0
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_SHIFT 1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_SHIFT 2
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_SHIFT 4
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_MASK 0x3
-#define E4_XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_SHIFT 6
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_MIGRATION_FLAG_SHIFT 0
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_BIT17_SHIFT 1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_DPM_PORT_NUM_SHIFT 2
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_RESERVED_SHIFT 4
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_MASK 0x3
+#define XSTORM_ROCE_REQ_CONN_AG_CTX_CF23_SHIFT 6
u8 byte2;
__le16 physical_q0;
__le16 word1;
@@ -8908,216 +8341,216 @@ struct e4_xstorm_roce_req_conn_ag_ctx {
__le32 orq_cons;
};
-struct e4_xstorm_roce_resp_conn_ag_ctx {
+struct xstorm_roce_resp_conn_ag_ctx {
u8 reserved0;
u8 state;
u8 flags0;
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_SHIFT 1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_SHIFT 2
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_SHIFT 4
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_SHIFT 5
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_SHIFT 6
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_SHIFT 7
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED6_SHIFT 7
u8 flags1;
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_SHIFT 0
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_SHIFT 1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_SHIFT 2
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_SHIFT 3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_MSDM_FLUSH_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_MSDM_FLUSH_SHIFT 4
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_MSEM_FLUSH_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_MSEM_FLUSH_SHIFT 5
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_SHIFT 6
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT10_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_MSDM_FLUSH_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_MSDM_FLUSH_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_MSEM_FLUSH_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_MSEM_FLUSH_SHIFT 5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_ERROR_STATE_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_YSTORM_FLUSH_SHIFT 7
u8 flags2;
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 0
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 2
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 4
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3_SHIFT 6
u8 flags3;
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_SHIFT 0
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT 2
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_SHIFT 4
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 6
u8 flags4;
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT 0
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT 2
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT 4
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_SHIFT 0
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_SHIFT 2
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_SHIFT 4
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15_SHIFT 6
u8 flags6;
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_SHIFT 0
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_SHIFT 2
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_SHIFT 4
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19_SHIFT 6
u8 flags7;
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_SHIFT 0
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_SHIFT 2
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_SHIFT 4
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 6
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 7
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 0
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_SHIFT 2
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_SHIFT 4
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT 6
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT 7
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF3EN_SHIFT 1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RXMIT_CF_EN_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RX_ERROR_CF_EN_SHIFT 3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_FORCE_ACK_CF_EN_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT 0
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_SHIFT 1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_SHIFT 2
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_SHIFT 3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_SHIFT 4
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_SHIFT 5
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_SHIFT 6
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_SHIFT 7
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF14EN_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF16EN_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF17EN_SHIFT 7
u8 flags10;
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_SHIFT 0
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_SHIFT 1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_SHIFT 2
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_SHIFT 3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_SHIFT 5
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 6
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 7
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF18EN_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF19EN_SHIFT 1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF20EN_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF21EN_SHIFT 3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23EN_SHIFT 5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 7
u8 flags11;
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 0
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 2
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT 4
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 5
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_SHIFT 7
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12;
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_SHIFT 0
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE11EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE11EN_SHIFT 1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_SHIFT 4
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_SHIFT 5
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_SHIFT 6
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_SHIFT 7
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_IRQ_PROD_RULE_EN_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE17EN_SHIFT 7
u8 flags13;
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_SHIFT 0
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_SHIFT 1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE18EN_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_SHIFT 0
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_SHIFT 1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_SHIFT 2
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_SHIFT 3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_SHIFT 4
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_MASK 0x1
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_SHIFT 5
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_MASK 0x3
-#define E4_XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_SHIFT 6
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT16_SHIFT 0
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT17_SHIFT 1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT18_SHIFT 2
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT19_SHIFT 3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT20_SHIFT 4
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_MASK 0x1
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_BIT21_SHIFT 5
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_MASK 0x3
+#define XSTORM_ROCE_RESP_CONN_AG_CTX_CF23_SHIFT 6
u8 byte2;
__le16 physical_q0;
__le16 irq_prod_shadow;
@@ -9139,37 +8572,37 @@ struct e4_xstorm_roce_resp_conn_ag_ctx {
__le32 msn_and_syndrome;
};
-struct e4_ystorm_roce_conn_ag_ctx {
+struct ystorm_roce_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_YSTORM_ROCE_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_YSTORM_ROCE_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_YSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_YSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_YSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_YSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_YSTORM_ROCE_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_YSTORM_ROCE_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_YSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_YSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 6
+#define YSTORM_ROCE_CONN_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_ROCE_CONN_AG_CTX_BIT0_SHIFT 0
+#define YSTORM_ROCE_CONN_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_ROCE_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_ROCE_CONN_AG_CTX_CF0_MASK 0x3
+#define YSTORM_ROCE_CONN_AG_CTX_CF0_SHIFT 2
+#define YSTORM_ROCE_CONN_AG_CTX_CF1_MASK 0x3
+#define YSTORM_ROCE_CONN_AG_CTX_CF1_SHIFT 4
+#define YSTORM_ROCE_CONN_AG_CTX_CF2_MASK 0x3
+#define YSTORM_ROCE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_YSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_YSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_YSTORM_ROCE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_YSTORM_ROCE_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_YSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_YSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_YSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define YSTORM_ROCE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_ROCE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define YSTORM_ROCE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_ROCE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define YSTORM_ROCE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define YSTORM_ROCE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_ROCE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_ROCE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_ROCE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_ROCE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_ROCE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_ROCE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_ROCE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_ROCE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_ROCE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_ROCE_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -9183,37 +8616,37 @@ struct e4_ystorm_roce_conn_ag_ctx {
__le32 reg3;
};
-struct e4_ystorm_roce_req_conn_ag_ctx {
+struct ystorm_roce_req_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT0_SHIFT 0
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_MASK 0x3
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0_SHIFT 2
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_MASK 0x3
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1_SHIFT 4
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_MASK 0x3
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF0EN_SHIFT 0
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF1EN_SHIFT 1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_ROCE_REQ_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -9227,37 +8660,37 @@ struct e4_ystorm_roce_req_conn_ag_ctx {
__le32 reg3;
};
-struct e4_ystorm_roce_resp_conn_ag_ctx {
+struct ystorm_roce_resp_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT0_SHIFT 0
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_MASK 0x3
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0_SHIFT 2
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_MASK 0x3
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1_SHIFT 4
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_MASK 0x3
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF0EN_SHIFT 0
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF1EN_SHIFT 1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_ROCE_RESP_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -9294,216 +8727,216 @@ struct xstorm_iwarp_conn_st_ctx {
__le32 reserved[48];
};
-struct e4_xstorm_iwarp_conn_ag_ctx {
+struct xstorm_iwarp_conn_ag_ctx {
u8 reserved0;
u8 state;
u8 flags0;
-#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM2_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM2_SHIFT 2
-#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT4_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT 4
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RESERVED2_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RESERVED2_SHIFT 5
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT6_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT6_SHIFT 6
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT7_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT7_SHIFT 7
+#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1
+#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM2_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM2_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_IWARP_CONN_AG_CTX_BIT4_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RESERVED2_SHIFT 5
+#define XSTORM_IWARP_CONN_AG_CTX_BIT6_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT6_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_BIT7_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT7_SHIFT 7
u8 flags1;
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT8_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT8_SHIFT 0
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT9_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT9_SHIFT 1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT10_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT10_SHIFT 2
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT11_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT11_SHIFT 3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT12_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT12_SHIFT 4
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT13_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT13_SHIFT 5
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT14_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT14_SHIFT 6
-#define E4_XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_SHIFT 7
+#define XSTORM_IWARP_CONN_AG_CTX_BIT8_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT8_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_BIT9_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT9_SHIFT 1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT10_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT10_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_IWARP_CONN_AG_CTX_BIT12_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT12_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_BIT13_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT13_SHIFT 5
+#define XSTORM_IWARP_CONN_AG_CTX_BIT14_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT14_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_YSTORM_FLUSH_OR_REWIND_SND_MAX_SHIFT 7
u8 flags2;
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 0
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 2
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 4
-#define E4_XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 6
u8 flags3;
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF4_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT 0
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF5_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT 2
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 4
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF7_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_CF4_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_CF5_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_CF7_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT 6
u8 flags4;
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF8_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT 0
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF9_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF9_SHIFT 2
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF10_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF10_SHIFT 4
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF11_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF11_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF12_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF12_SHIFT 0
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF13_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF13_SHIFT 2
-#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT 4
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF15_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF15_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_CF15_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF15_SHIFT 6
u8 flags6;
-#define E4_XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_SHIFT 0
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF17_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF17_SHIFT 2
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF18_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF18_SHIFT 4
-#define E4_XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_CF17_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF17_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_CF18_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_CF18_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_SHIFT 6
u8 flags7;
-#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
-#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_SHIFT 2
-#define E4_XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_SHIFT 4
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 6
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 7
+#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 0
-#define E4_XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT 2
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT 3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 4
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT 5
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT 6
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF9EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF9EN_SHIFT 7
+#define XSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 1
+#define XSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT 3
+#define XSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT 5
+#define XSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF10EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF10EN_SHIFT 0
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF11EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF11EN_SHIFT 1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF12EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF12EN_SHIFT 2
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF13EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF13EN_SHIFT 3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT 4
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF15EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF15EN_SHIFT 5
-#define E4_XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_SHIFT 6
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF17EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF17EN_SHIFT 7
+#define XSTORM_IWARP_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_IWARP_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_SQ_FLUSH_CF_EN_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_CF15EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_MPA_OR_ERROR_WAKEUP_TRIGGER_CF_EN_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_CF17EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF17EN_SHIFT 7
u8 flags10;
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF18EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_CF18EN_SHIFT 0
-#define E4_XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
-#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_SHIFT 3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-#define E4_XSTORM_IWARP_CONN_AG_CTX_SEND_TERMINATE_CF_EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_SEND_TERMINATE_CF_EN_SHIFT 5
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 6
-#define E4_XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_SHIFT 7
+#define XSTORM_IWARP_CONN_AG_CTX_CF18EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_CF18EN_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1
+#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_FLUSH_Q1_EN_SHIFT 3
+#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_SEND_TERMINATE_CF_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_SEND_TERMINATE_CF_EN_SHIFT 5
+#define XSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_MORE_TO_SEND_RULE_EN_SHIFT 7
u8 flags11;
-#define E4_XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT 0
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RESERVED3_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RESERVED3_SHIFT 2
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT 4
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 5
-#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE9EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE9EN_SHIFT 7
+#define XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 1
+#define XSTORM_IWARP_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RESERVED3_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12;
-#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_SHIFT 0
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE11EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE11EN_SHIFT 1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
-#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_SHIFT 4
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE15EN_SHIFT 5
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE16EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE16EN_SHIFT 6
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE17EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE17EN_SHIFT 7
+#define XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_SQ_NOT_EMPTY_RULE_EN_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_SQ_FENCE_RULE_EN_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_IWARP_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE17EN_SHIFT 7
u8 flags13;
-#define E4_XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_SHIFT 0
-#define E4_XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_SHIFT 1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_SHIFT 2
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE21EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_RULE21EN_SHIFT 3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
-#define E4_XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_SHIFT 5
-#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
-#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+#define XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_IRQ_NOT_EMPTY_RULE_EN_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_HQ_NOT_FULL_RULE_EN_SHIFT 1
+#define XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_ORQ_RD_FENCE_RULE_EN_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_RULE21EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_RULE21EN_SHIFT 3
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_ORQ_NOT_FULL_RULE_EN_SHIFT 5
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT16_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT16_SHIFT 0
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT17_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT17_SHIFT 1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT18_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_BIT18_SHIFT 2
-#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED1_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED1_SHIFT 3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_SHIFT 4
-#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_MASK 0x1
-#define E4_XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_SHIFT 5
-#define E4_XSTORM_IWARP_CONN_AG_CTX_SEND_TERMINATE_CF_MASK 0x3
-#define E4_XSTORM_IWARP_CONN_AG_CTX_SEND_TERMINATE_CF_SHIFT 6
+#define XSTORM_IWARP_CONN_AG_CTX_BIT16_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT16_SHIFT 0
+#define XSTORM_IWARP_CONN_AG_CTX_BIT17_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT17_SHIFT 1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT18_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_BIT18_SHIFT 2
+#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED1_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED1_SHIFT 3
+#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED2_SHIFT 4
+#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_MASK 0x1
+#define XSTORM_IWARP_CONN_AG_CTX_E5_RESERVED3_SHIFT 5
+#define XSTORM_IWARP_CONN_AG_CTX_SEND_TERMINATE_CF_MASK 0x3
+#define XSTORM_IWARP_CONN_AG_CTX_SEND_TERMINATE_CF_SHIFT 6
u8 byte2;
__le16 physical_q0;
__le16 physical_q1;
@@ -9551,89 +8984,89 @@ struct e4_xstorm_iwarp_conn_ag_ctx {
__le32 reg17;
};
-struct e4_tstorm_iwarp_conn_ag_ctx {
+struct tstorm_iwarp_conn_ag_ctx {
u8 reserved0;
u8 state;
u8 flags0;
-#define E4_TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT2_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT2_SHIFT 2
-#define E4_TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_OR_TERMINATE_SENT_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_OR_TERMINATE_SENT_SHIFT 3
-#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT4_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT 4
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_SHIFT 5
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 6
+#define TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define TSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
+#define TSTORM_IWARP_CONN_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_OR_TERMINATE_SENT_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_MSTORM_FLUSH_OR_TERMINATE_SENT_SHIFT 3
+#define TSTORM_IWARP_CONN_AG_CTX_BIT4_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_BIT4_SHIFT 4
+#define TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_CACHED_ORQ_SHIFT 5
+#define TSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 6
u8 flags1;
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_MASK 0x3
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_SHIFT 0
-#define E4_TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_MASK 0x3
-#define E4_TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_SHIFT 2
-#define E4_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
-#define E4_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF4_MASK 0x3
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT 6
+#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_SHIFT 0
+#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_SHIFT 2
+#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4
+#define TSTORM_IWARP_CONN_AG_CTX_CF4_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_CF4_SHIFT 6
u8 flags2;
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF5_MASK 0x3
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT 0
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 2
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF7_MASK 0x3
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT 4
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF8_MASK 0x3
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT 6
+#define TSTORM_IWARP_CONN_AG_CTX_CF5_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_CF5_SHIFT 0
+#define TSTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_IWARP_CONN_AG_CTX_CF7_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_IWARP_CONN_AG_CTX_CF8_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_CF8_SHIFT 6
u8 flags3;
-#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPLETE_MASK 0x3
-#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPLETE_SHIFT 0
-#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_MASK 0x3
-#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_SHIFT 2
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 4
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_SHIFT 5
-#define E4_TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_SHIFT 6
-#define E4_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7
+#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPLETE_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPLETE_SHIFT 0
+#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_MASK 0x3
+#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_SHIFT 2
+#define TSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 4
+#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_RQ_POST_CF_EN_SHIFT 5
+#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_MPA_TIMEOUT_CF_EN_SHIFT 6
+#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7
u8 flags4;
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT 0
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT 1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 2
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT 3
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT 4
-#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPL_EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPL_EN_SHIFT 5
-#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_SHIFT 6
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define TSTORM_IWARP_CONN_AG_CTX_CF4EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_CF4EN_SHIFT 0
+#define TSTORM_IWARP_CONN_AG_CTX_CF5EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_CF5EN_SHIFT 1
+#define TSTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_IWARP_CONN_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_IWARP_CONN_AG_CTX_CF8EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPL_EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_Q0_AND_TCP_HANDSHAKE_COMPL_EN_SHIFT 5
+#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_FLUSH_OR_ERROR_DETECTED_EN_SHIFT 6
+#define TSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define E4_TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_SHIFT 5
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define E4_TSTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define TSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_SND_SQ_CONS_RULE_SHIFT 5
+#define TSTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define TSTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT 7
__le32 reg0;
__le32 reg1;
__le32 unaligned_nxt_seq;
@@ -9671,16 +9104,16 @@ struct ustorm_iwarp_conn_st_ctx {
};
/* iwarp connection context */
-struct e4_iwarp_conn_context {
+struct iwarp_conn_context {
struct ystorm_iwarp_conn_st_ctx ystorm_st_context;
struct regpair ystorm_st_padding[2];
struct pstorm_iwarp_conn_st_ctx pstorm_st_context;
struct regpair pstorm_st_padding[2];
struct xstorm_iwarp_conn_st_ctx xstorm_st_context;
- struct e4_xstorm_iwarp_conn_ag_ctx xstorm_ag_context;
- struct e4_tstorm_iwarp_conn_ag_ctx tstorm_ag_context;
+ struct xstorm_iwarp_conn_ag_ctx xstorm_ag_context;
+ struct tstorm_iwarp_conn_ag_ctx tstorm_ag_context;
struct timers_context timer_context;
- struct e4_ustorm_rdma_conn_ag_ctx ustorm_ag_context;
+ struct ustorm_rdma_conn_ag_ctx ustorm_ag_context;
struct tstorm_iwarp_conn_st_ctx tstorm_st_context;
struct regpair tstorm_st_padding[2];
struct mstorm_iwarp_conn_st_ctx mstorm_st_context;
@@ -9731,8 +9164,8 @@ enum iwarp_eqe_async_opcode {
IWARP_EVENT_TYPE_ASYNC_EXCEPTION_DETECTED,
IWARP_EVENT_TYPE_ASYNC_QP_IN_ERROR_STATE,
IWARP_EVENT_TYPE_ASYNC_CQ_OVERFLOW,
- IWARP_EVENT_TYPE_ASYNC_SRQ_EMPTY,
IWARP_EVENT_TYPE_ASYNC_SRQ_LIMIT,
+ IWARP_EVENT_TYPE_ASYNC_SRQ_EMPTY,
MAX_IWARP_EQE_ASYNC_OPCODE
};
@@ -9750,8 +9183,7 @@ struct iwarp_eqe_data_tcp_async_completion {
/* iWARP completion queue types */
enum iwarp_eqe_sync_opcode {
- IWARP_EVENT_TYPE_TCP_OFFLOAD =
- 11,
+ IWARP_EVENT_TYPE_TCP_OFFLOAD = 13,
IWARP_EVENT_TYPE_MPA_OFFLOAD,
IWARP_EVENT_TYPE_MPA_OFFLOAD_SEND_RTR,
IWARP_EVENT_TYPE_CREATE_QP,
@@ -9783,8 +9215,6 @@ enum iwarp_fw_return_code {
IWARP_EXCEPTION_DETECTED_LLP_RESET,
IWARP_EXCEPTION_DETECTED_IRQ_FULL,
IWARP_EXCEPTION_DETECTED_RQ_EMPTY,
- IWARP_EXCEPTION_DETECTED_SRQ_EMPTY,
- IWARP_EXCEPTION_DETECTED_SRQ_LIMIT,
IWARP_EXCEPTION_DETECTED_LLP_TIMEOUT,
IWARP_EXCEPTION_DETECTED_REMOTE_PROTECTION_ERROR,
IWARP_EXCEPTION_DETECTED_CQ_OVERFLOW,
@@ -9878,9 +9308,10 @@ struct iwarp_mpa_offload_ramrod_data {
struct regpair async_eqe_output_buf;
struct regpair handle_for_async;
struct regpair shared_queue_addr;
+ __le32 additional_setup_time;
__le16 rcv_wnd;
u8 stats_counter_id;
- u8 reserved3[13];
+ u8 reserved3[9];
};
/* iWARP TCP connection offload params passed by driver to FW */
@@ -9888,11 +9319,13 @@ struct iwarp_offload_params {
struct mpa_ulp_buffer incoming_ulp_buffer;
struct regpair async_eqe_output_buf;
struct regpair handle_for_async;
+ __le32 additional_setup_time;
__le16 physical_q0;
__le16 physical_q1;
u8 stats_counter_id;
u8 mpa_mode;
- u8 reserved[10];
+ u8 src_vport_id;
+ u8 reserved[5];
};
/* iWARP query QP output params */
@@ -9912,7 +9345,7 @@ struct iwarp_query_qp_ramrod_data {
/* iWARP Ramrod Command IDs */
enum iwarp_ramrod_cmd_id {
- IWARP_RAMROD_CMD_ID_TCP_OFFLOAD = 11,
+ IWARP_RAMROD_CMD_ID_TCP_OFFLOAD = 13,
IWARP_RAMROD_CMD_ID_MPA_OFFLOAD,
IWARP_RAMROD_CMD_ID_MPA_OFFLOAD_SEND_RTR,
IWARP_RAMROD_CMD_ID_CREATE_QP,
@@ -9971,100 +9404,100 @@ struct unaligned_opaque_data {
__le32 cid;
};
-struct e4_mstorm_iwarp_conn_ag_ctx {
+struct mstorm_iwarp_conn_ag_ctx {
u8 reserved;
u8 state;
u8 flags0;
-#define E4_MSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_MSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_MSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_MSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_MASK 0x3
-#define E4_MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_SHIFT 2
-#define E4_MSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_MSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_MSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_MSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6
+#define MSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define MSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_MASK 0x3
+#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_SHIFT 2
+#define MSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_EN_MASK 0x1
-#define E4_MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_EN_SHIFT 0
-#define E4_MSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_MSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_MSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_MSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define E4_MSTORM_IWARP_CONN_AG_CTX_RCQ_CONS_EN_MASK 0x1
-#define E4_MSTORM_IWARP_CONN_AG_CTX_RCQ_CONS_EN_SHIFT 6
-#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_MSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_EN_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_INV_STAG_DONE_CF_EN_SHIFT 0
+#define MSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_IWARP_CONN_AG_CTX_RCQ_CONS_EN_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_RCQ_CONS_EN_SHIFT 6
+#define MSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 rcq_cons;
__le16 rcq_cons_th;
__le32 reg0;
__le32 reg1;
};
-struct e4_ustorm_iwarp_conn_ag_ctx {
+struct ustorm_iwarp_conn_ag_ctx {
u8 reserved;
u8 byte1;
u8 flags0;
-#define E4_USTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_USTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_USTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_USTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6
+#define USTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define USTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3
+#define USTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 2
+#define USTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3
+#define USTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4
+#define USTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3
+#define USTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF3_MASK 0x3
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF3_SHIFT 0
-#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_MASK 0x3
-#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT 2
-#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_MASK 0x3
-#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_SHIFT 4
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 6
+#define USTORM_IWARP_CONN_AG_CTX_CF3_MASK 0x3
+#define USTORM_IWARP_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_MASK 0x3
+#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_SHIFT 2
+#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_MASK 0x3
+#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_SHIFT 4
+#define USTORM_IWARP_CONN_AG_CTX_CF6_MASK 0x3
+#define USTORM_IWARP_CONN_AG_CTX_CF6_SHIFT 6
u8 flags2;
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF3EN_MASK 0x1
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF3EN_SHIFT 3
-#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK 0x1
-#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT 4
-#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_EN_MASK 0x1
-#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT 5
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_USTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 6
-#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_SE_EN_MASK 0x1
-#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_SE_EN_SHIFT 7
+#define USTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 0
+#define USTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_IWARP_CONN_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_SE_CF_EN_SHIFT 4
+#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_CQ_ARM_CF_EN_SHIFT 5
+#define USTORM_IWARP_CONN_AG_CTX_CF6EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_CF6EN_SHIFT 6
+#define USTORM_IWARP_CONN_AG_CTX_CQ_SE_EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_CQ_SE_EN_SHIFT 7
u8 flags3;
-#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_EN_MASK 0x1
-#define E4_USTORM_IWARP_CONN_AG_CTX_CQ_EN_SHIFT 0
-#define E4_USTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_USTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define E4_USTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_USTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define E4_USTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_USTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define E4_USTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_USTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define E4_USTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_USTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define E4_USTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_USTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define E4_USTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define E4_USTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define USTORM_IWARP_CONN_AG_CTX_CQ_EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_CQ_EN_SHIFT 0
+#define USTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_IWARP_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_IWARP_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_IWARP_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_IWARP_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define USTORM_IWARP_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -10077,37 +9510,37 @@ struct e4_ustorm_iwarp_conn_ag_ctx {
__le16 word3;
};
-struct e4_ystorm_iwarp_conn_ag_ctx {
+struct ystorm_iwarp_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_YSTORM_IWARP_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_YSTORM_IWARP_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_YSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_YSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_YSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_YSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_YSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_YSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_YSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_YSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6
+#define YSTORM_IWARP_CONN_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_BIT0_SHIFT 0
+#define YSTORM_IWARP_CONN_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_IWARP_CONN_AG_CTX_CF0_MASK 0x3
+#define YSTORM_IWARP_CONN_AG_CTX_CF0_SHIFT 2
+#define YSTORM_IWARP_CONN_AG_CTX_CF1_MASK 0x3
+#define YSTORM_IWARP_CONN_AG_CTX_CF1_SHIFT 4
+#define YSTORM_IWARP_CONN_AG_CTX_CF2_MASK 0x3
+#define YSTORM_IWARP_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_YSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_YSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_YSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_YSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_YSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_YSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_YSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define YSTORM_IWARP_CONN_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_CF0EN_SHIFT 0
+#define YSTORM_IWARP_CONN_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_CF1EN_SHIFT 1
+#define YSTORM_IWARP_CONN_AG_CTX_CF2EN_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_IWARP_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_IWARP_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_IWARP_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_IWARP_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_IWARP_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_IWARP_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -10297,216 +9730,216 @@ struct xstorm_fcoe_conn_st_ctx {
struct fcoe_wqe cached_wqes[16];
};
-struct e4_xstorm_fcoe_conn_ag_ctx {
+struct xstorm_fcoe_conn_ag_ctx {
u8 reserved0;
u8 state;
u8 flags0;
-#define E4_XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED1_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED1_SHIFT 1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED2_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED2_SHIFT 2
-#define E4_XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED3_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED3_SHIFT 4
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED4_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED4_SHIFT 5
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED5_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED5_SHIFT 6
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED6_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED6_SHIFT 7
+#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED1_SHIFT 1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED2_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED3_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED4_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED4_SHIFT 5
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED5_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED5_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED6_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED6_SHIFT 7
u8 flags1;
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED7_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED7_SHIFT 0
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED8_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED8_SHIFT 1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED9_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED9_SHIFT 2
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT11_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT11_SHIFT 3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT12_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT12_SHIFT 4
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT13_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT13_SHIFT 5
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT14_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT14_SHIFT 6
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT15_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT15_SHIFT 7
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED7_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED7_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED8_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED8_SHIFT 1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED9_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED9_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_FCOE_CONN_AG_CTX_BIT12_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT12_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_BIT13_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT13_SHIFT 5
+#define XSTORM_FCOE_CONN_AG_CTX_BIT14_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT14_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_BIT15_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT15_SHIFT 7
u8 flags2;
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 0
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 2
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 4
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF3_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF3_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_CF3_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF3_SHIFT 6
u8 flags3;
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 0
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 2
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 4
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF7_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_CF7_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT 6
u8 flags4;
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF8_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT 0
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF9_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT 2
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF10_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT 4
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF11_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF11_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF12_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF12_SHIFT 0
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF13_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF13_SHIFT 2
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF14_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF14_SHIFT 4
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF15_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF15_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_CF14_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF14_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_CF15_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF15_SHIFT 6
u8 flags6;
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF16_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF16_SHIFT 0
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF17_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF17_SHIFT 2
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF18_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF18_SHIFT 4
-#define E4_XSTORM_FCOE_CONN_AG_CTX_DQ_CF_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_DQ_CF_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_CF16_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF16_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_CF17_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF17_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_CF18_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF18_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_SHIFT 6
u8 flags7;
-#define E4_XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED10_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED10_SHIFT 2
-#define E4_XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_SHIFT 4
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 6
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 7
+#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED10_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED10_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 0
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF3EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT 1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 2
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 4
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT 5
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT 6
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT 7
+#define XSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_CF3EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT 1
+#define XSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 3
+#define XSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT 5
+#define XSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT 0
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF11EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF11EN_SHIFT 1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF12EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF12EN_SHIFT 2
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF13EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF13EN_SHIFT 3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF14EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF14EN_SHIFT 4
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF15EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF15EN_SHIFT 5
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF16EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF16EN_SHIFT 6
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF17EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF17EN_SHIFT 7
+#define XSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_FCOE_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_FCOE_CONN_AG_CTX_CF14EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF14EN_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_CF15EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF15EN_SHIFT 5
+#define XSTORM_FCOE_CONN_AG_CTX_CF16EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF16EN_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_CF17EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF17EN_SHIFT 7
u8 flags10;
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF18EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF18EN_SHIFT 0
-#define E4_XSTORM_FCOE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_DQ_CF_EN_SHIFT 1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED11_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED11_SHIFT 3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF23EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF23EN_SHIFT 5
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED12_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED12_SHIFT 6
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED13_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED13_SHIFT 7
+#define XSTORM_FCOE_CONN_AG_CTX_CF18EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF18EN_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_DQ_CF_EN_SHIFT 1
+#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED11_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED11_SHIFT 3
+#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_CF23EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_CF23EN_SHIFT 5
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED12_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED12_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED13_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED13_SHIFT 7
u8 flags11;
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED14_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED14_SHIFT 0
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED15_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED15_SHIFT 1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED16_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESERVED16_SHIFT 2
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 4
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 5
-#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
-#define E4_XSTORM_FCOE_CONN_AG_CTX_XFERQ_DECISION_EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_XFERQ_DECISION_EN_SHIFT 7
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED14_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED14_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED15_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED15_SHIFT 1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED16_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESERVED16_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_XFERQ_DECISION_EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_XFERQ_DECISION_EN_SHIFT 7
u8 flags12;
-#define E4_XSTORM_FCOE_CONN_AG_CTX_SQ_DECISION_EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_SQ_DECISION_EN_SHIFT 0
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE11EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE11EN_SHIFT 1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
-#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE14EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE14EN_SHIFT 4
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE15EN_SHIFT 5
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE16EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE16EN_SHIFT 6
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE17EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE17EN_SHIFT 7
+#define XSTORM_FCOE_CONN_AG_CTX_SQ_DECISION_EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_SQ_DECISION_EN_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_FCOE_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_FCOE_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RULE17EN_SHIFT 7
u8 flags13;
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESPQ_DECISION_EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RESPQ_DECISION_EN_SHIFT 0
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE19EN_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_RULE19EN_SHIFT 1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
-#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
-#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
-#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
-#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+#define XSTORM_FCOE_CONN_AG_CTX_RESPQ_DECISION_EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RESPQ_DECISION_EN_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_RULE19EN_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_RULE19EN_SHIFT 1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT16_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT16_SHIFT 0
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT17_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT17_SHIFT 1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT18_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT18_SHIFT 2
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT19_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT19_SHIFT 3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT20_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT20_SHIFT 4
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT21_MASK 0x1
-#define E4_XSTORM_FCOE_CONN_AG_CTX_BIT21_SHIFT 5
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF23_MASK 0x3
-#define E4_XSTORM_FCOE_CONN_AG_CTX_CF23_SHIFT 6
+#define XSTORM_FCOE_CONN_AG_CTX_BIT16_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT16_SHIFT 0
+#define XSTORM_FCOE_CONN_AG_CTX_BIT17_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT17_SHIFT 1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT18_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT18_SHIFT 2
+#define XSTORM_FCOE_CONN_AG_CTX_BIT19_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT19_SHIFT 3
+#define XSTORM_FCOE_CONN_AG_CTX_BIT20_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT20_SHIFT 4
+#define XSTORM_FCOE_CONN_AG_CTX_BIT21_MASK 0x1
+#define XSTORM_FCOE_CONN_AG_CTX_BIT21_SHIFT 5
+#define XSTORM_FCOE_CONN_AG_CTX_CF23_MASK 0x3
+#define XSTORM_FCOE_CONN_AG_CTX_CF23_SHIFT 6
u8 byte2;
__le16 physical_q0;
__le16 word1;
@@ -10544,150 +9977,150 @@ struct ustorm_fcoe_conn_st_ctx {
u8 reserved[2];
};
-struct e4_tstorm_fcoe_conn_ag_ctx {
+struct tstorm_fcoe_conn_ag_ctx {
u8 reserved0;
u8 state;
u8 flags0;
-#define E4_TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT2_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT2_SHIFT 2
-#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT3_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT3_SHIFT 3
-#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT4_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT4_SHIFT 4
-#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT5_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_BIT5_SHIFT 5
-#define E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_MASK 0x3
-#define E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_SHIFT 6
+#define TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define TSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1
+#define TSTORM_FCOE_CONN_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_FCOE_CONN_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_FCOE_CONN_AG_CTX_BIT4_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_BIT4_SHIFT 4
+#define TSTORM_FCOE_CONN_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_BIT5_SHIFT 5
+#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_MASK 0x3
+#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_SHIFT 6
u8 flags1;
-#define E4_TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
-#define E4_TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 0
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 2
-#define E4_TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3
-#define E4_TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 6
+#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_MASK 0x3
+#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_SHIFT 0
+#define TSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
+#define TSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 2
+#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_MASK 0x3
+#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_SHIFT 4
+#define TSTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3
+#define TSTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 6
u8 flags2;
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 0
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 2
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF7_MASK 0x3
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT 4
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF8_MASK 0x3
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT 6
+#define TSTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3
+#define TSTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 0
+#define TSTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3
+#define TSTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_FCOE_CONN_AG_CTX_CF7_MASK 0x3
+#define TSTORM_FCOE_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_FCOE_CONN_AG_CTX_CF8_MASK 0x3
+#define TSTORM_FCOE_CONN_AG_CTX_CF8_SHIFT 6
u8 flags3;
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF9_MASK 0x3
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT 0
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF10_MASK 0x3
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT 2
-#define E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_SHIFT 4
-#define E4_TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 6
-#define E4_TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7
+#define TSTORM_FCOE_CONN_AG_CTX_CF9_MASK 0x3
+#define TSTORM_FCOE_CONN_AG_CTX_CF9_SHIFT 0
+#define TSTORM_FCOE_CONN_AG_CTX_CF10_MASK 0x3
+#define TSTORM_FCOE_CONN_AG_CTX_CF10_SHIFT 2
+#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN_SHIFT 4
+#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_FLUSH_Q0_CF_EN_SHIFT 5
+#define TSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 6
+#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_TIMER_STOP_ALL_CF_EN_SHIFT 7
u8 flags4;
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 0
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 2
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT 3
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT 4
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT 5
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT 6
-#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define TSTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 0
+#define TSTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 1
+#define TSTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_FCOE_CONN_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_FCOE_CONN_AG_CTX_CF8EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_FCOE_CONN_AG_CTX_CF9EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_CF9EN_SHIFT 5
+#define TSTORM_FCOE_CONN_AG_CTX_CF10EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_CF10EN_SHIFT 6
+#define TSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
-#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define E4_TSTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define TSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define TSTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define TSTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT 7
__le32 reg0;
__le32 reg1;
};
-struct e4_ustorm_fcoe_conn_ag_ctx {
+struct ustorm_fcoe_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_USTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_USTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_USTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_USTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6
+#define USTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0
+#define USTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3
+#define USTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2
+#define USTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3
+#define USTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4
+#define USTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
+#define USTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF3_MASK 0x3
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF3_SHIFT 0
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 2
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 4
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 6
+#define USTORM_FCOE_CONN_AG_CTX_CF3_MASK 0x3
+#define USTORM_FCOE_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_FCOE_CONN_AG_CTX_CF4_MASK 0x3
+#define USTORM_FCOE_CONN_AG_CTX_CF4_SHIFT 2
+#define USTORM_FCOE_CONN_AG_CTX_CF5_MASK 0x3
+#define USTORM_FCOE_CONN_AG_CTX_CF5_SHIFT 4
+#define USTORM_FCOE_CONN_AG_CTX_CF6_MASK 0x3
+#define USTORM_FCOE_CONN_AG_CTX_CF6_SHIFT 6
u8 flags2;
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF3EN_MASK 0x1
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT 3
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 4
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 5
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_USTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 6
-#define E4_USTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_USTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define USTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define USTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_FCOE_CONN_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_FCOE_CONN_AG_CTX_CF4EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_CF4EN_SHIFT 4
+#define USTORM_FCOE_CONN_AG_CTX_CF5EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_CF5EN_SHIFT 5
+#define USTORM_FCOE_CONN_AG_CTX_CF6EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_CF6EN_SHIFT 6
+#define USTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags3;
-#define E4_USTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_USTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define E4_USTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_USTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define E4_USTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_USTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define E4_USTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_USTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define E4_USTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_USTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define E4_USTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_USTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define E4_USTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_USTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define E4_USTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define E4_USTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define USTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define USTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_FCOE_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_FCOE_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_FCOE_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_FCOE_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define USTORM_FCOE_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -10728,37 +10161,37 @@ struct tstorm_fcoe_conn_st_ctx {
u8 reserved0[4];
};
-struct e4_mstorm_fcoe_conn_ag_ctx {
+struct mstorm_fcoe_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_MSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_MSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_MSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_MSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_MSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_MSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_MSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_MSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_MSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_MSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6
+#define MSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1
+#define MSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0
+#define MSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3
+#define MSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_MSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_MSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_MSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_MSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_MSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_MSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_MSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define MSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0;
__le16 word1;
__le32 reg0;
@@ -10804,21 +10237,21 @@ struct mstorm_fcoe_conn_st_ctx {
};
/* fcoe connection context */
-struct e4_fcoe_conn_context {
+struct fcoe_conn_context {
struct ystorm_fcoe_conn_st_ctx ystorm_st_context;
struct pstorm_fcoe_conn_st_ctx pstorm_st_context;
struct regpair pstorm_st_padding[2];
struct xstorm_fcoe_conn_st_ctx xstorm_st_context;
- struct e4_xstorm_fcoe_conn_ag_ctx xstorm_ag_context;
+ struct xstorm_fcoe_conn_ag_ctx xstorm_ag_context;
struct regpair xstorm_ag_padding[6];
struct ustorm_fcoe_conn_st_ctx ustorm_st_context;
struct regpair ustorm_st_padding[2];
- struct e4_tstorm_fcoe_conn_ag_ctx tstorm_ag_context;
+ struct tstorm_fcoe_conn_ag_ctx tstorm_ag_context;
struct regpair tstorm_ag_padding[2];
struct timers_context timer_context;
- struct e4_ustorm_fcoe_conn_ag_ctx ustorm_ag_context;
+ struct ustorm_fcoe_conn_ag_ctx ustorm_ag_context;
struct tstorm_fcoe_conn_st_ctx tstorm_st_context;
- struct e4_mstorm_fcoe_conn_ag_ctx mstorm_ag_context;
+ struct mstorm_fcoe_conn_ag_ctx mstorm_ag_context;
struct mstorm_fcoe_conn_st_ctx mstorm_st_context;
};
@@ -10869,37 +10302,37 @@ struct fcoe_stat_ramrod_params {
struct fcoe_stat_ramrod_data stat_ramrod_data;
};
-struct e4_ystorm_fcoe_conn_ag_ctx {
+struct ystorm_fcoe_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_YSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_YSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_YSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_YSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_YSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_YSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_YSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_YSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_YSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_YSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6
+#define YSTORM_FCOE_CONN_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_FCOE_CONN_AG_CTX_BIT0_SHIFT 0
+#define YSTORM_FCOE_CONN_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_FCOE_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_FCOE_CONN_AG_CTX_CF0_MASK 0x3
+#define YSTORM_FCOE_CONN_AG_CTX_CF0_SHIFT 2
+#define YSTORM_FCOE_CONN_AG_CTX_CF1_MASK 0x3
+#define YSTORM_FCOE_CONN_AG_CTX_CF1_SHIFT 4
+#define YSTORM_FCOE_CONN_AG_CTX_CF2_MASK 0x3
+#define YSTORM_FCOE_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_YSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_YSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_YSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_YSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_YSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_YSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_YSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define YSTORM_FCOE_CONN_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_FCOE_CONN_AG_CTX_CF0EN_SHIFT 0
+#define YSTORM_FCOE_CONN_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_FCOE_CONN_AG_CTX_CF1EN_SHIFT 1
+#define YSTORM_FCOE_CONN_AG_CTX_CF2EN_MASK 0x1
+#define YSTORM_FCOE_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_FCOE_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_FCOE_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_FCOE_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_FCOE_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_FCOE_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_FCOE_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_FCOE_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_FCOE_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_FCOE_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_FCOE_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -10930,216 +10363,216 @@ struct xstorm_iscsi_tcp_conn_st_ctx {
__le32 reserved_iscsi[44];
};
-struct e4_xstorm_iscsi_conn_ag_ctx {
+struct xstorm_iscsi_conn_ag_ctx {
u8 cdu_validation;
u8 state;
u8 flags0;
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_SHIFT 2
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT 4
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_SHIFT 5
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT6_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT6_SHIFT 6
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT7_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT7_SHIFT 7
+#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM1_SHIFT 1
+#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED1_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED2_SHIFT 5
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT6_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT6_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT7_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT7_SHIFT 7
u8 flags1;
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT8_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT8_SHIFT 0
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT9_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT9_SHIFT 1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT10_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT10_SHIFT 2
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT11_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT11_SHIFT 3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT12_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT12_SHIFT 4
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT13_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT13_SHIFT 5
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT14_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT14_SHIFT 6
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_SHIFT 7
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT8_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT8_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT9_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT9_SHIFT 1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT10_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT10_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT11_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT11_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT12_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT12_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT13_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT13_SHIFT 5
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT14_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT14_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_TX_TRUNCATE_SHIFT 7
u8 flags2;
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 0
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 2
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 4
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 6
u8 flags3;
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 0
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 2
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 4
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF7_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_CF7_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT 6
u8 flags4;
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF8_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT 0
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF9_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF9_SHIFT 2
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF10_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT 4
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF11_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF11_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_CF8_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_CF9_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF9_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_CF10_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF10_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_CF11_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF11_SHIFT 6
u8 flags5;
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF12_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF12_SHIFT 0
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF13_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF13_SHIFT 2
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF14_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF14_SHIFT 4
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_CF12_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF12_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_CF13_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF13_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_CF14_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF14_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_SHIFT 6
u8 flags6;
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF16_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF16_SHIFT 0
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF17_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF17_SHIFT 2
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF18_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF18_SHIFT 4
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_CF16_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF16_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_CF17_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF17_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_CF18_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF18_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_SHIFT 6
u8 flags7;
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_SHIFT 0
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_SHIFT 2
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_SHIFT 4
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 6
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 7
+#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 7
u8 flags8;
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 0
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 2
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 4
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT 5
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT 6
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF9EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF9EN_SHIFT 7
+#define XSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT 5
+#define XSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_CF9EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF9EN_SHIFT 7
u8 flags9;
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF10EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF10EN_SHIFT 0
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF11EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF11EN_SHIFT 1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF12EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF12EN_SHIFT 2
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF13EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF13EN_SHIFT 3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF14EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF14EN_SHIFT 4
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_SHIFT 5
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF16EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF16EN_SHIFT 6
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF17EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF17EN_SHIFT 7
+#define XSTORM_ISCSI_CONN_AG_CTX_CF10EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF10EN_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_CF11EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF11EN_SHIFT 1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF12EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF12EN_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_CF13EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF13EN_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_CF14EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF14EN_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_UPDATE_STATE_TO_BASE_CF_EN_SHIFT 5
+#define XSTORM_ISCSI_CONN_AG_CTX_CF16EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF16EN_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_CF17EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF17EN_SHIFT 7
u8 flags10;
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF18EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_CF18EN_SHIFT 0
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_SHIFT 2
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_SHIFT 3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_SHIFT 5
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 6
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_SHIFT 7
+#define XSTORM_ISCSI_CONN_AG_CTX_CF18EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_CF18EN_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_DQ_FLUSH_EN_SHIFT 1
+#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_MST_XCM_Q0_FLUSH_CF_EN_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_UST_XCM_Q1_FLUSH_CF_EN_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_EN_SHIFT 5
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_MORE_TO_SEND_DEC_RULE_EN_SHIFT 7
u8 flags11;
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT 0
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_SHIFT 2
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 4
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 5
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_SHIFT 7
+#define XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_TX_BLOCKED_EN_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 1
+#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RESERVED3_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 5
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED1_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE9EN_SHIFT 7
u8 flags12;
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_SHIFT 0
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_SHIFT 1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_SHIFT 4
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_SHIFT 5
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_SHIFT 6
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_SHIFT 7
+#define XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_SQ_DEC_RULE_EN_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE11EN_SHIFT 1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED2_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED3_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE14EN_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE15EN_SHIFT 5
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE16EN_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_RULE17EN_SHIFT 7
u8 flags13;
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_SHIFT 0
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_SHIFT 1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
+#define XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_R2TQ_DEC_RULE_EN_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_HQ_DEC_RULE_EN_SHIFT 1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED4_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED5_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED6_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED7_SHIFT 5
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED8_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_A0_RESERVED9_SHIFT 7
u8 flags14;
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT16_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT16_SHIFT 0
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT17_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT17_SHIFT 1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT18_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT18_SHIFT 2
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT19_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT19_SHIFT 3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT20_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_BIT20_SHIFT 4
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_MASK 0x1
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_SHIFT 5
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_MASK 0x3
-#define E4_XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_SHIFT 6
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT16_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT16_SHIFT 0
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT17_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT17_SHIFT 1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT18_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT18_SHIFT 2
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT19_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT19_SHIFT 3
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT20_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_BIT20_SHIFT 4
+#define XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_MASK 0x1
+#define XSTORM_ISCSI_CONN_AG_CTX_DUMMY_READ_DONE_SHIFT 5
+#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_MASK 0x3
+#define XSTORM_ISCSI_CONN_AG_CTX_PROC_ONLY_CLEANUP_SHIFT 6
u8 byte2;
__le16 physical_q0;
__le16 physical_q1;
@@ -11187,89 +10620,89 @@ struct e4_xstorm_iscsi_conn_ag_ctx {
__le32 reg17;
};
-struct e4_tstorm_iscsi_conn_ag_ctx {
+struct tstorm_iscsi_conn_ag_ctx {
u8 reserved0;
u8 state;
u8 flags0;
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT2_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT2_SHIFT 2
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT3_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT3_SHIFT 3
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT 4
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT5_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_BIT5_SHIFT 5
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 6
+#define TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT2_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT2_SHIFT 2
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT3_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT3_SHIFT 3
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT4_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT4_SHIFT 4
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT5_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_BIT5_SHIFT 5
+#define TSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 6
u8 flags1;
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_MASK 0x3
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_SHIFT 0
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_MASK 0x3
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_SHIFT 2
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 6
+#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_SHIFT 0
+#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_SHIFT 2
+#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_SHIFT 4
+#define TSTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 6
u8 flags2;
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 0
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 2
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF7_MASK 0x3
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT 4
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF8_MASK 0x3
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT 6
+#define TSTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 0
+#define TSTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 2
+#define TSTORM_ISCSI_CONN_AG_CTX_CF7_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_CF7_SHIFT 4
+#define TSTORM_ISCSI_CONN_AG_CTX_CF8_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_CF8_SHIFT 6
u8 flags3;
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_MASK 0x3
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_SHIFT 2
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 4
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_SHIFT 5
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_SHIFT 6
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7
+#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_SHIFT 0
+#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_MASK 0x3
+#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_SHIFT 2
+#define TSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 4
+#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_P2T_FLUSH_CF_EN_SHIFT 5
+#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_M2T_FLUSH_CF_EN_SHIFT 6
+#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 7
u8 flags4;
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 0
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 2
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT 3
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT 4
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 5
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_EN_SHIFT 6
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define TSTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 0
+#define TSTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 1
+#define TSTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 2
+#define TSTORM_ISCSI_CONN_AG_CTX_CF7EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_CF7EN_SHIFT 3
+#define TSTORM_ISCSI_CONN_AG_CTX_CF8EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_CF8EN_SHIFT 4
+#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 5
+#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_FLUSH_OOO_ISLES_CF_EN_SHIFT 6
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags5;
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define E4_TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define TSTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7
__le32 reg0;
__le32 reg1;
__le32 rx_tcp_checksum_err_cnt;
@@ -11284,63 +10717,63 @@ struct e4_tstorm_iscsi_conn_ag_ctx {
__le16 word0;
};
-struct e4_ustorm_iscsi_conn_ag_ctx {
+struct ustorm_iscsi_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_USTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_USTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6
+#define USTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0
+#define USTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
+#define USTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
+#define USTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2
+#define USTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
+#define USTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4
+#define USTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
+#define USTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF3_MASK 0x3
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF3_SHIFT 0
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 2
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 4
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 6
+#define USTORM_ISCSI_CONN_AG_CTX_CF3_MASK 0x3
+#define USTORM_ISCSI_CONN_AG_CTX_CF3_SHIFT 0
+#define USTORM_ISCSI_CONN_AG_CTX_CF4_MASK 0x3
+#define USTORM_ISCSI_CONN_AG_CTX_CF4_SHIFT 2
+#define USTORM_ISCSI_CONN_AG_CTX_CF5_MASK 0x3
+#define USTORM_ISCSI_CONN_AG_CTX_CF5_SHIFT 4
+#define USTORM_ISCSI_CONN_AG_CTX_CF6_MASK 0x3
+#define USTORM_ISCSI_CONN_AG_CTX_CF6_SHIFT 6
u8 flags2;
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF3EN_MASK 0x1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF3EN_SHIFT 3
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 4
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 5
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 6
-#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7
+#define USTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0
+#define USTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1
+#define USTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2
+#define USTORM_ISCSI_CONN_AG_CTX_CF3EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_CF3EN_SHIFT 3
+#define USTORM_ISCSI_CONN_AG_CTX_CF4EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_CF4EN_SHIFT 4
+#define USTORM_ISCSI_CONN_AG_CTX_CF5EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_CF5EN_SHIFT 5
+#define USTORM_ISCSI_CONN_AG_CTX_CF6EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_CF6EN_SHIFT 6
+#define USTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 7
u8 flags3;
-#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0
-#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2
-#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3
-#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4
-#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5
-#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6
-#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK 0x1
-#define E4_USTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7
+#define USTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 0
+#define USTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 2
+#define USTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 3
+#define USTORM_ISCSI_CONN_AG_CTX_RULE5EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE5EN_SHIFT 4
+#define USTORM_ISCSI_CONN_AG_CTX_RULE6EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE6EN_SHIFT 5
+#define USTORM_ISCSI_CONN_AG_CTX_RULE7EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE7EN_SHIFT 6
+#define USTORM_ISCSI_CONN_AG_CTX_RULE8EN_MASK 0x1
+#define USTORM_ISCSI_CONN_AG_CTX_RULE8EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -11358,37 +10791,37 @@ struct tstorm_iscsi_conn_st_ctx {
__le32 reserved[44];
};
-struct e4_mstorm_iscsi_conn_ag_ctx {
+struct mstorm_iscsi_conn_ag_ctx {
u8 reserved;
u8 state;
u8 flags0;
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6
+#define MSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0
+#define MSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
+#define MSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
+#define MSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2
+#define MSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
+#define MSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4
+#define MSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
+#define MSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define MSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0
+#define MSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1
+#define MSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define MSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7
__le16 word0;
__le16 word1;
__le32 reg0;
@@ -11407,22 +10840,22 @@ struct ustorm_iscsi_conn_st_ctx {
};
/* iscsi connection context */
-struct e4_iscsi_conn_context {
+struct iscsi_conn_context {
struct ystorm_iscsi_conn_st_ctx ystorm_st_context;
struct pstorm_iscsi_tcp_conn_st_ctx pstorm_st_context;
struct regpair pstorm_st_padding[2];
struct pb_context xpb2_context;
struct xstorm_iscsi_tcp_conn_st_ctx xstorm_st_context;
struct regpair xstorm_st_padding[2];
- struct e4_xstorm_iscsi_conn_ag_ctx xstorm_ag_context;
- struct e4_tstorm_iscsi_conn_ag_ctx tstorm_ag_context;
+ struct xstorm_iscsi_conn_ag_ctx xstorm_ag_context;
+ struct tstorm_iscsi_conn_ag_ctx tstorm_ag_context;
struct regpair tstorm_ag_padding[2];
struct timers_context timer_context;
- struct e4_ustorm_iscsi_conn_ag_ctx ustorm_ag_context;
+ struct ustorm_iscsi_conn_ag_ctx ustorm_ag_context;
struct pb_context upb_context;
struct tstorm_iscsi_conn_st_ctx tstorm_st_context;
struct regpair tstorm_st_padding[2];
- struct e4_mstorm_iscsi_conn_ag_ctx mstorm_ag_context;
+ struct mstorm_iscsi_conn_ag_ctx mstorm_ag_context;
struct mstorm_iscsi_tcp_conn_st_ctx mstorm_st_context;
struct ustorm_iscsi_conn_st_ctx ustorm_st_context;
};
@@ -11433,37 +10866,37 @@ struct iscsi_init_ramrod_params {
struct tcp_init_params tcp_init;
};
-struct e4_ystorm_iscsi_conn_ag_ctx {
+struct ystorm_iscsi_conn_ag_ctx {
u8 byte0;
u8 byte1;
u8 flags0;
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6
+#define YSTORM_ISCSI_CONN_AG_CTX_BIT0_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_BIT0_SHIFT 0
+#define YSTORM_ISCSI_CONN_AG_CTX_BIT1_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_BIT1_SHIFT 1
+#define YSTORM_ISCSI_CONN_AG_CTX_CF0_MASK 0x3
+#define YSTORM_ISCSI_CONN_AG_CTX_CF0_SHIFT 2
+#define YSTORM_ISCSI_CONN_AG_CTX_CF1_MASK 0x3
+#define YSTORM_ISCSI_CONN_AG_CTX_CF1_SHIFT 4
+#define YSTORM_ISCSI_CONN_AG_CTX_CF2_MASK 0x3
+#define YSTORM_ISCSI_CONN_AG_CTX_CF2_SHIFT 6
u8 flags1;
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
-#define E4_YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7
+#define YSTORM_ISCSI_CONN_AG_CTX_CF0EN_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_CF0EN_SHIFT 0
+#define YSTORM_ISCSI_CONN_AG_CTX_CF1EN_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_CF1EN_SHIFT 1
+#define YSTORM_ISCSI_CONN_AG_CTX_CF2EN_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_CF2EN_SHIFT 2
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE0EN_SHIFT 3
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE1EN_SHIFT 4
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE2EN_SHIFT 5
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE3EN_SHIFT 6
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_MASK 0x1
+#define YSTORM_ISCSI_CONN_AG_CTX_RULE4EN_SHIFT 7
u8 byte2;
u8 byte3;
__le16 word0;
@@ -11477,1922 +10910,4 @@ struct e4_ystorm_iscsi_conn_ag_ctx {
__le32 reg3;
};
-#define MFW_TRACE_SIGNATURE 0x25071946
-
-/* The trace in the buffer */
-#define MFW_TRACE_EVENTID_MASK 0x00ffff
-#define MFW_TRACE_PRM_SIZE_MASK 0x0f0000
-#define MFW_TRACE_PRM_SIZE_OFFSET 16
-#define MFW_TRACE_ENTRY_SIZE 3
-
-struct mcp_trace {
- u32 signature; /* Help to identify that the trace is valid */
- u32 size; /* the size of the trace buffer in bytes */
- u32 curr_level; /* 2 - all will be written to the buffer
- * 1 - debug trace will not be written
- * 0 - just errors will be written to the buffer
- */
- u32 modules_mask[2]; /* a bit per module, 1 means write it, 0 means
- * mask it.
- */
-
- /* Warning: the following pointers are assumed to be 32bits as they are
- * used only in the MFW.
- */
- u32 trace_prod; /* The next trace will be written to this offset */
- u32 trace_oldest; /* The oldest valid trace starts at this offset
- * (usually very close after the current producer).
- */
-};
-
-#define VF_MAX_STATIC 192
-
-#define MCP_GLOB_PATH_MAX 2
-#define MCP_PORT_MAX 2
-#define MCP_GLOB_PORT_MAX 4
-#define MCP_GLOB_FUNC_MAX 16
-
-typedef u32 offsize_t; /* In DWORDS !!! */
-/* Offset from the beginning of the MCP scratchpad */
-#define OFFSIZE_OFFSET_SHIFT 0
-#define OFFSIZE_OFFSET_MASK 0x0000ffff
-/* Size of specific element (not the whole array if any) */
-#define OFFSIZE_SIZE_SHIFT 16
-#define OFFSIZE_SIZE_MASK 0xffff0000
-
-#define SECTION_OFFSET(_offsize) ((((_offsize & \
- OFFSIZE_OFFSET_MASK) >> \
- OFFSIZE_OFFSET_SHIFT) << 2))
-
-#define QED_SECTION_SIZE(_offsize) (((_offsize & \
- OFFSIZE_SIZE_MASK) >> \
- OFFSIZE_SIZE_SHIFT) << 2)
-
-#define SECTION_ADDR(_offsize, idx) (MCP_REG_SCRATCH + \
- SECTION_OFFSET(_offsize) + \
- (QED_SECTION_SIZE(_offsize) * idx))
-
-#define SECTION_OFFSIZE_ADDR(_pub_base, _section) \
- (_pub_base + offsetof(struct mcp_public_data, sections[_section]))
-
-/* PHY configuration */
-struct eth_phy_cfg {
- u32 speed;
-#define ETH_SPEED_AUTONEG 0x0
-#define ETH_SPEED_SMARTLINQ 0x8
-
- u32 pause;
-#define ETH_PAUSE_NONE 0x0
-#define ETH_PAUSE_AUTONEG 0x1
-#define ETH_PAUSE_RX 0x2
-#define ETH_PAUSE_TX 0x4
-
- u32 adv_speed;
-
- u32 loopback_mode;
-#define ETH_LOOPBACK_NONE 0x0
-#define ETH_LOOPBACK_INT_PHY 0x1
-#define ETH_LOOPBACK_EXT_PHY 0x2
-#define ETH_LOOPBACK_EXT 0x3
-#define ETH_LOOPBACK_MAC 0x4
-#define ETH_LOOPBACK_CNIG_AH_ONLY_0123 0x5
-#define ETH_LOOPBACK_CNIG_AH_ONLY_2301 0x6
-#define ETH_LOOPBACK_PCS_AH_ONLY 0x7
-#define ETH_LOOPBACK_REVERSE_MAC_AH_ONLY 0x8
-#define ETH_LOOPBACK_INT_PHY_FEA_AH_ONLY 0x9
-
- u32 eee_cfg;
-#define EEE_CFG_EEE_ENABLED BIT(0)
-#define EEE_CFG_TX_LPI BIT(1)
-#define EEE_CFG_ADV_SPEED_1G BIT(2)
-#define EEE_CFG_ADV_SPEED_10G BIT(3)
-#define EEE_TX_TIMER_USEC_MASK 0xfffffff0
-#define EEE_TX_TIMER_USEC_OFFSET 4
-#define EEE_TX_TIMER_USEC_BALANCED_TIME 0xa00
-#define EEE_TX_TIMER_USEC_AGGRESSIVE_TIME 0x100
-#define EEE_TX_TIMER_USEC_LATENCY_TIME 0x6000
-
- u32 deprecated;
-
- u32 fec_mode;
-#define FEC_FORCE_MODE_MASK 0x000000ff
-#define FEC_FORCE_MODE_OFFSET 0
-#define FEC_FORCE_MODE_NONE 0x00
-#define FEC_FORCE_MODE_FIRECODE 0x01
-#define FEC_FORCE_MODE_RS 0x02
-#define FEC_FORCE_MODE_AUTO 0x07
-#define FEC_EXTENDED_MODE_MASK 0xffffff00
-#define FEC_EXTENDED_MODE_OFFSET 8
-#define ETH_EXT_FEC_NONE 0x00000100
-#define ETH_EXT_FEC_10G_NONE 0x00000200
-#define ETH_EXT_FEC_10G_BASE_R 0x00000400
-#define ETH_EXT_FEC_20G_NONE 0x00000800
-#define ETH_EXT_FEC_20G_BASE_R 0x00001000
-#define ETH_EXT_FEC_25G_NONE 0x00002000
-#define ETH_EXT_FEC_25G_BASE_R 0x00004000
-#define ETH_EXT_FEC_25G_RS528 0x00008000
-#define ETH_EXT_FEC_40G_NONE 0x00010000
-#define ETH_EXT_FEC_40G_BASE_R 0x00020000
-#define ETH_EXT_FEC_50G_NONE 0x00040000
-#define ETH_EXT_FEC_50G_BASE_R 0x00080000
-#define ETH_EXT_FEC_50G_RS528 0x00100000
-#define ETH_EXT_FEC_50G_RS544 0x00200000
-#define ETH_EXT_FEC_100G_NONE 0x00400000
-#define ETH_EXT_FEC_100G_BASE_R 0x00800000
-#define ETH_EXT_FEC_100G_RS528 0x01000000
-#define ETH_EXT_FEC_100G_RS544 0x02000000
-
- u32 extended_speed;
-#define ETH_EXT_SPEED_MASK 0x0000ffff
-#define ETH_EXT_SPEED_OFFSET 0
-#define ETH_EXT_SPEED_AN 0x00000001
-#define ETH_EXT_SPEED_1G 0x00000002
-#define ETH_EXT_SPEED_10G 0x00000004
-#define ETH_EXT_SPEED_20G 0x00000008
-#define ETH_EXT_SPEED_25G 0x00000010
-#define ETH_EXT_SPEED_40G 0x00000020
-#define ETH_EXT_SPEED_50G_BASE_R 0x00000040
-#define ETH_EXT_SPEED_50G_BASE_R2 0x00000080
-#define ETH_EXT_SPEED_100G_BASE_R2 0x00000100
-#define ETH_EXT_SPEED_100G_BASE_R4 0x00000200
-#define ETH_EXT_SPEED_100G_BASE_P4 0x00000400
-#define ETH_EXT_ADV_SPEED_MASK 0xffff0000
-#define ETH_EXT_ADV_SPEED_OFFSET 16
-#define ETH_EXT_ADV_SPEED_RESERVED 0x00010000
-#define ETH_EXT_ADV_SPEED_1G 0x00020000
-#define ETH_EXT_ADV_SPEED_10G 0x00040000
-#define ETH_EXT_ADV_SPEED_20G 0x00080000
-#define ETH_EXT_ADV_SPEED_25G 0x00100000
-#define ETH_EXT_ADV_SPEED_40G 0x00200000
-#define ETH_EXT_ADV_SPEED_50G_BASE_R 0x00400000
-#define ETH_EXT_ADV_SPEED_50G_BASE_R2 0x00800000
-#define ETH_EXT_ADV_SPEED_100G_BASE_R2 0x01000000
-#define ETH_EXT_ADV_SPEED_100G_BASE_R4 0x02000000
-#define ETH_EXT_ADV_SPEED_100G_BASE_P4 0x04000000
-};
-
-struct port_mf_cfg {
- u32 dynamic_cfg;
-#define PORT_MF_CFG_OV_TAG_MASK 0x0000ffff
-#define PORT_MF_CFG_OV_TAG_SHIFT 0
-#define PORT_MF_CFG_OV_TAG_DEFAULT PORT_MF_CFG_OV_TAG_MASK
-
- u32 reserved[1];
-};
-
-struct eth_stats {
- u64 r64;
- u64 r127;
- u64 r255;
- u64 r511;
- u64 r1023;
- u64 r1518;
-
- union {
- struct {
- u64 r1522;
- u64 r2047;
- u64 r4095;
- u64 r9216;
- u64 r16383;
- } bb0;
- struct {
- u64 unused1;
- u64 r1519_to_max;
- u64 unused2;
- u64 unused3;
- u64 unused4;
- } ah0;
- } u0;
-
- u64 rfcs;
- u64 rxcf;
- u64 rxpf;
- u64 rxpp;
- u64 raln;
- u64 rfcr;
- u64 rovr;
- u64 rjbr;
- u64 rund;
- u64 rfrg;
- u64 t64;
- u64 t127;
- u64 t255;
- u64 t511;
- u64 t1023;
- u64 t1518;
-
- union {
- struct {
- u64 t2047;
- u64 t4095;
- u64 t9216;
- u64 t16383;
- } bb1;
- struct {
- u64 t1519_to_max;
- u64 unused6;
- u64 unused7;
- u64 unused8;
- } ah1;
- } u1;
-
- u64 txpf;
- u64 txpp;
-
- union {
- struct {
- u64 tlpiec;
- u64 tncl;
- } bb2;
- struct {
- u64 unused9;
- u64 unused10;
- } ah2;
- } u2;
-
- u64 rbyte;
- u64 rxuca;
- u64 rxmca;
- u64 rxbca;
- u64 rxpok;
- u64 tbyte;
- u64 txuca;
- u64 txmca;
- u64 txbca;
- u64 txcf;
-};
-
-struct brb_stats {
- u64 brb_truncate[8];
- u64 brb_discard[8];
-};
-
-struct port_stats {
- struct brb_stats brb;
- struct eth_stats eth;
-};
-
-struct couple_mode_teaming {
- u8 port_cmt[MCP_GLOB_PORT_MAX];
-#define PORT_CMT_IN_TEAM (1 << 0)
-
-#define PORT_CMT_PORT_ROLE (1 << 1)
-#define PORT_CMT_PORT_INACTIVE (0 << 1)
-#define PORT_CMT_PORT_ACTIVE (1 << 1)
-
-#define PORT_CMT_TEAM_MASK (1 << 2)
-#define PORT_CMT_TEAM0 (0 << 2)
-#define PORT_CMT_TEAM1 (1 << 2)
-};
-
-#define LLDP_CHASSIS_ID_STAT_LEN 4
-#define LLDP_PORT_ID_STAT_LEN 4
-#define DCBX_MAX_APP_PROTOCOL 32
-#define MAX_SYSTEM_LLDP_TLV_DATA 32
-
-enum _lldp_agent {
- LLDP_NEAREST_BRIDGE = 0,
- LLDP_NEAREST_NON_TPMR_BRIDGE,
- LLDP_NEAREST_CUSTOMER_BRIDGE,
- LLDP_MAX_LLDP_AGENTS
-};
-
-struct lldp_config_params_s {
- u32 config;
-#define LLDP_CONFIG_TX_INTERVAL_MASK 0x000000ff
-#define LLDP_CONFIG_TX_INTERVAL_SHIFT 0
-#define LLDP_CONFIG_HOLD_MASK 0x00000f00
-#define LLDP_CONFIG_HOLD_SHIFT 8
-#define LLDP_CONFIG_MAX_CREDIT_MASK 0x0000f000
-#define LLDP_CONFIG_MAX_CREDIT_SHIFT 12
-#define LLDP_CONFIG_ENABLE_RX_MASK 0x40000000
-#define LLDP_CONFIG_ENABLE_RX_SHIFT 30
-#define LLDP_CONFIG_ENABLE_TX_MASK 0x80000000
-#define LLDP_CONFIG_ENABLE_TX_SHIFT 31
- u32 local_chassis_id[LLDP_CHASSIS_ID_STAT_LEN];
- u32 local_port_id[LLDP_PORT_ID_STAT_LEN];
-};
-
-struct lldp_status_params_s {
- u32 prefix_seq_num;
- u32 status;
- u32 peer_chassis_id[LLDP_CHASSIS_ID_STAT_LEN];
- u32 peer_port_id[LLDP_PORT_ID_STAT_LEN];
- u32 suffix_seq_num;
-};
-
-struct dcbx_ets_feature {
- u32 flags;
-#define DCBX_ETS_ENABLED_MASK 0x00000001
-#define DCBX_ETS_ENABLED_SHIFT 0
-#define DCBX_ETS_WILLING_MASK 0x00000002
-#define DCBX_ETS_WILLING_SHIFT 1
-#define DCBX_ETS_ERROR_MASK 0x00000004
-#define DCBX_ETS_ERROR_SHIFT 2
-#define DCBX_ETS_CBS_MASK 0x00000008
-#define DCBX_ETS_CBS_SHIFT 3
-#define DCBX_ETS_MAX_TCS_MASK 0x000000f0
-#define DCBX_ETS_MAX_TCS_SHIFT 4
-#define DCBX_OOO_TC_MASK 0x00000f00
-#define DCBX_OOO_TC_SHIFT 8
- u32 pri_tc_tbl[1];
-#define DCBX_TCP_OOO_TC (4)
-
-#define NIG_ETS_ISCSI_OOO_CLIENT_OFFSET (DCBX_TCP_OOO_TC + 1)
-#define DCBX_CEE_STRICT_PRIORITY 0xf
- u32 tc_bw_tbl[2];
- u32 tc_tsa_tbl[2];
-#define DCBX_ETS_TSA_STRICT 0
-#define DCBX_ETS_TSA_CBS 1
-#define DCBX_ETS_TSA_ETS 2
-};
-
-#define DCBX_TCP_OOO_TC (4)
-#define DCBX_TCP_OOO_K2_4PORT_TC (3)
-
-struct dcbx_app_priority_entry {
- u32 entry;
-#define DCBX_APP_PRI_MAP_MASK 0x000000ff
-#define DCBX_APP_PRI_MAP_SHIFT 0
-#define DCBX_APP_PRI_0 0x01
-#define DCBX_APP_PRI_1 0x02
-#define DCBX_APP_PRI_2 0x04
-#define DCBX_APP_PRI_3 0x08
-#define DCBX_APP_PRI_4 0x10
-#define DCBX_APP_PRI_5 0x20
-#define DCBX_APP_PRI_6 0x40
-#define DCBX_APP_PRI_7 0x80
-#define DCBX_APP_SF_MASK 0x00000300
-#define DCBX_APP_SF_SHIFT 8
-#define DCBX_APP_SF_ETHTYPE 0
-#define DCBX_APP_SF_PORT 1
-#define DCBX_APP_SF_IEEE_MASK 0x0000f000
-#define DCBX_APP_SF_IEEE_SHIFT 12
-#define DCBX_APP_SF_IEEE_RESERVED 0
-#define DCBX_APP_SF_IEEE_ETHTYPE 1
-#define DCBX_APP_SF_IEEE_TCP_PORT 2
-#define DCBX_APP_SF_IEEE_UDP_PORT 3
-#define DCBX_APP_SF_IEEE_TCP_UDP_PORT 4
-
-#define DCBX_APP_PROTOCOL_ID_MASK 0xffff0000
-#define DCBX_APP_PROTOCOL_ID_SHIFT 16
-};
-
-struct dcbx_app_priority_feature {
- u32 flags;
-#define DCBX_APP_ENABLED_MASK 0x00000001
-#define DCBX_APP_ENABLED_SHIFT 0
-#define DCBX_APP_WILLING_MASK 0x00000002
-#define DCBX_APP_WILLING_SHIFT 1
-#define DCBX_APP_ERROR_MASK 0x00000004
-#define DCBX_APP_ERROR_SHIFT 2
-#define DCBX_APP_MAX_TCS_MASK 0x0000f000
-#define DCBX_APP_MAX_TCS_SHIFT 12
-#define DCBX_APP_NUM_ENTRIES_MASK 0x00ff0000
-#define DCBX_APP_NUM_ENTRIES_SHIFT 16
- struct dcbx_app_priority_entry app_pri_tbl[DCBX_MAX_APP_PROTOCOL];
-};
-
-struct dcbx_features {
- struct dcbx_ets_feature ets;
- u32 pfc;
-#define DCBX_PFC_PRI_EN_BITMAP_MASK 0x000000ff
-#define DCBX_PFC_PRI_EN_BITMAP_SHIFT 0
-#define DCBX_PFC_PRI_EN_BITMAP_PRI_0 0x01
-#define DCBX_PFC_PRI_EN_BITMAP_PRI_1 0x02
-#define DCBX_PFC_PRI_EN_BITMAP_PRI_2 0x04
-#define DCBX_PFC_PRI_EN_BITMAP_PRI_3 0x08
-#define DCBX_PFC_PRI_EN_BITMAP_PRI_4 0x10
-#define DCBX_PFC_PRI_EN_BITMAP_PRI_5 0x20
-#define DCBX_PFC_PRI_EN_BITMAP_PRI_6 0x40
-#define DCBX_PFC_PRI_EN_BITMAP_PRI_7 0x80
-
-#define DCBX_PFC_FLAGS_MASK 0x0000ff00
-#define DCBX_PFC_FLAGS_SHIFT 8
-#define DCBX_PFC_CAPS_MASK 0x00000f00
-#define DCBX_PFC_CAPS_SHIFT 8
-#define DCBX_PFC_MBC_MASK 0x00004000
-#define DCBX_PFC_MBC_SHIFT 14
-#define DCBX_PFC_WILLING_MASK 0x00008000
-#define DCBX_PFC_WILLING_SHIFT 15
-#define DCBX_PFC_ENABLED_MASK 0x00010000
-#define DCBX_PFC_ENABLED_SHIFT 16
-#define DCBX_PFC_ERROR_MASK 0x00020000
-#define DCBX_PFC_ERROR_SHIFT 17
-
- struct dcbx_app_priority_feature app;
-};
-
-struct dcbx_local_params {
- u32 config;
-#define DCBX_CONFIG_VERSION_MASK 0x00000007
-#define DCBX_CONFIG_VERSION_SHIFT 0
-#define DCBX_CONFIG_VERSION_DISABLED 0
-#define DCBX_CONFIG_VERSION_IEEE 1
-#define DCBX_CONFIG_VERSION_CEE 2
-#define DCBX_CONFIG_VERSION_STATIC 4
-
- u32 flags;
- struct dcbx_features features;
-};
-
-struct dcbx_mib {
- u32 prefix_seq_num;
- u32 flags;
- struct dcbx_features features;
- u32 suffix_seq_num;
-};
-
-struct lldp_system_tlvs_buffer_s {
- u16 valid;
- u16 length;
- u32 data[MAX_SYSTEM_LLDP_TLV_DATA];
-};
-
-struct dcb_dscp_map {
- u32 flags;
-#define DCB_DSCP_ENABLE_MASK 0x1
-#define DCB_DSCP_ENABLE_SHIFT 0
-#define DCB_DSCP_ENABLE 1
- u32 dscp_pri_map[8];
-};
-
-struct public_global {
- u32 max_path;
- u32 max_ports;
-#define MODE_1P 1
-#define MODE_2P 2
-#define MODE_3P 3
-#define MODE_4P 4
- u32 debug_mb_offset;
- u32 phymod_dbg_mb_offset;
- struct couple_mode_teaming cmt;
- s32 internal_temperature;
- u32 mfw_ver;
- u32 running_bundle_id;
- s32 external_temperature;
- u32 mdump_reason;
- u64 reserved;
- u32 data_ptr;
- u32 data_size;
-};
-
-struct fw_flr_mb {
- u32 aggint;
- u32 opgen_addr;
- u32 accum_ack;
-};
-
-struct public_path {
- struct fw_flr_mb flr_mb;
- u32 mcp_vf_disabled[VF_MAX_STATIC / 32];
-
- u32 process_kill;
-#define PROCESS_KILL_COUNTER_MASK 0x0000ffff
-#define PROCESS_KILL_COUNTER_SHIFT 0
-#define PROCESS_KILL_GLOB_AEU_BIT_MASK 0xffff0000
-#define PROCESS_KILL_GLOB_AEU_BIT_SHIFT 16
-#define GLOBAL_AEU_BIT(aeu_reg_id, aeu_bit) (aeu_reg_id * 32 + aeu_bit)
-};
-
-struct public_port {
- u32 validity_map;
-
- u32 link_status;
-#define LINK_STATUS_LINK_UP 0x00000001
-#define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001e
-#define LINK_STATUS_SPEED_AND_DUPLEX_1000THD (1 << 1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD (2 << 1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_10G (3 << 1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_20G (4 << 1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_40G (5 << 1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_50G (6 << 1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_100G (7 << 1)
-#define LINK_STATUS_SPEED_AND_DUPLEX_25G (8 << 1)
-#define LINK_STATUS_AUTO_NEGOTIATE_ENABLED 0x00000020
-#define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE 0x00000040
-#define LINK_STATUS_PARALLEL_DETECTION_USED 0x00000080
-#define LINK_STATUS_PFC_ENABLED 0x00000100
-#define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE 0x00000200
-#define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE 0x00000400
-#define LINK_STATUS_LINK_PARTNER_10G_CAPABLE 0x00000800
-#define LINK_STATUS_LINK_PARTNER_20G_CAPABLE 0x00001000
-#define LINK_STATUS_LINK_PARTNER_40G_CAPABLE 0x00002000
-#define LINK_STATUS_LINK_PARTNER_50G_CAPABLE 0x00004000
-#define LINK_STATUS_LINK_PARTNER_100G_CAPABLE 0x00008000
-#define LINK_STATUS_LINK_PARTNER_25G_CAPABLE 0x00010000
-#define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK 0x000c0000
-#define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE (0 << 18)
-#define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE (1 << 18)
-#define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE (2 << 18)
-#define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE (3 << 18)
-#define LINK_STATUS_SFP_TX_FAULT 0x00100000
-#define LINK_STATUS_TX_FLOW_CONTROL_ENABLED 0x00200000
-#define LINK_STATUS_RX_FLOW_CONTROL_ENABLED 0x00400000
-#define LINK_STATUS_RX_SIGNAL_PRESENT 0x00800000
-#define LINK_STATUS_MAC_LOCAL_FAULT 0x01000000
-#define LINK_STATUS_MAC_REMOTE_FAULT 0x02000000
-#define LINK_STATUS_UNSUPPORTED_SPD_REQ 0x04000000
-
-#define LINK_STATUS_FEC_MODE_MASK 0x38000000
-#define LINK_STATUS_FEC_MODE_NONE (0 << 27)
-#define LINK_STATUS_FEC_MODE_FIRECODE_CL74 (1 << 27)
-#define LINK_STATUS_FEC_MODE_RS_CL91 (2 << 27)
-
- u32 link_status1;
- u32 ext_phy_fw_version;
- u32 drv_phy_cfg_addr;
-
- u32 port_stx;
-
- u32 stat_nig_timer;
-
- struct port_mf_cfg port_mf_config;
- struct port_stats stats;
-
- u32 media_type;
-#define MEDIA_UNSPECIFIED 0x0
-#define MEDIA_SFPP_10G_FIBER 0x1
-#define MEDIA_XFP_FIBER 0x2
-#define MEDIA_DA_TWINAX 0x3
-#define MEDIA_BASE_T 0x4
-#define MEDIA_SFP_1G_FIBER 0x5
-#define MEDIA_MODULE_FIBER 0x6
-#define MEDIA_KR 0xf0
-#define MEDIA_NOT_PRESENT 0xff
-
- u32 lfa_status;
- u32 link_change_count;
-
- struct lldp_config_params_s lldp_config_params[LLDP_MAX_LLDP_AGENTS];
- struct lldp_status_params_s lldp_status_params[LLDP_MAX_LLDP_AGENTS];
- struct lldp_system_tlvs_buffer_s system_lldp_tlvs_buf;
-
- /* DCBX related MIB */
- struct dcbx_local_params local_admin_dcbx_mib;
- struct dcbx_mib remote_dcbx_mib;
- struct dcbx_mib operational_dcbx_mib;
-
- u32 reserved[2];
-
- u32 transceiver_data;
-#define ETH_TRANSCEIVER_STATE_MASK 0x000000ff
-#define ETH_TRANSCEIVER_STATE_SHIFT 0x00000000
-#define ETH_TRANSCEIVER_STATE_OFFSET 0x00000000
-#define ETH_TRANSCEIVER_STATE_UNPLUGGED 0x00000000
-#define ETH_TRANSCEIVER_STATE_PRESENT 0x00000001
-#define ETH_TRANSCEIVER_STATE_VALID 0x00000003
-#define ETH_TRANSCEIVER_STATE_UPDATING 0x00000008
-#define ETH_TRANSCEIVER_TYPE_MASK 0x0000ff00
-#define ETH_TRANSCEIVER_TYPE_OFFSET 0x8
-#define ETH_TRANSCEIVER_TYPE_NONE 0x00
-#define ETH_TRANSCEIVER_TYPE_UNKNOWN 0xff
-#define ETH_TRANSCEIVER_TYPE_1G_PCC 0x01
-#define ETH_TRANSCEIVER_TYPE_1G_ACC 0x02
-#define ETH_TRANSCEIVER_TYPE_1G_LX 0x03
-#define ETH_TRANSCEIVER_TYPE_1G_SX 0x04
-#define ETH_TRANSCEIVER_TYPE_10G_SR 0x05
-#define ETH_TRANSCEIVER_TYPE_10G_LR 0x06
-#define ETH_TRANSCEIVER_TYPE_10G_LRM 0x07
-#define ETH_TRANSCEIVER_TYPE_10G_ER 0x08
-#define ETH_TRANSCEIVER_TYPE_10G_PCC 0x09
-#define ETH_TRANSCEIVER_TYPE_10G_ACC 0x0a
-#define ETH_TRANSCEIVER_TYPE_XLPPI 0x0b
-#define ETH_TRANSCEIVER_TYPE_40G_LR4 0x0c
-#define ETH_TRANSCEIVER_TYPE_40G_SR4 0x0d
-#define ETH_TRANSCEIVER_TYPE_40G_CR4 0x0e
-#define ETH_TRANSCEIVER_TYPE_100G_AOC 0x0f
-#define ETH_TRANSCEIVER_TYPE_100G_SR4 0x10
-#define ETH_TRANSCEIVER_TYPE_100G_LR4 0x11
-#define ETH_TRANSCEIVER_TYPE_100G_ER4 0x12
-#define ETH_TRANSCEIVER_TYPE_100G_ACC 0x13
-#define ETH_TRANSCEIVER_TYPE_100G_CR4 0x14
-#define ETH_TRANSCEIVER_TYPE_4x10G_SR 0x15
-#define ETH_TRANSCEIVER_TYPE_25G_CA_N 0x16
-#define ETH_TRANSCEIVER_TYPE_25G_ACC_S 0x17
-#define ETH_TRANSCEIVER_TYPE_25G_CA_S 0x18
-#define ETH_TRANSCEIVER_TYPE_25G_ACC_M 0x19
-#define ETH_TRANSCEIVER_TYPE_25G_CA_L 0x1a
-#define ETH_TRANSCEIVER_TYPE_25G_ACC_L 0x1b
-#define ETH_TRANSCEIVER_TYPE_25G_SR 0x1c
-#define ETH_TRANSCEIVER_TYPE_25G_LR 0x1d
-#define ETH_TRANSCEIVER_TYPE_25G_AOC 0x1e
-#define ETH_TRANSCEIVER_TYPE_4x10G 0x1f
-#define ETH_TRANSCEIVER_TYPE_4x25G_CR 0x20
-#define ETH_TRANSCEIVER_TYPE_1000BASET 0x21
-#define ETH_TRANSCEIVER_TYPE_10G_BASET 0x22
-#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR 0x30
-#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR 0x31
-#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR 0x32
-#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR 0x33
-#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR 0x34
-#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR 0x35
-#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_AOC 0x36
-#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR 0x37
-#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_LR 0x38
-#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR 0x39
-#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR 0x3a
-
- u32 wol_info;
- u32 wol_pkt_len;
- u32 wol_pkt_details;
- struct dcb_dscp_map dcb_dscp_map;
-
- u32 eee_status;
-#define EEE_ACTIVE_BIT BIT(0)
-#define EEE_LD_ADV_STATUS_MASK 0x000000f0
-#define EEE_LD_ADV_STATUS_OFFSET 4
-#define EEE_1G_ADV BIT(1)
-#define EEE_10G_ADV BIT(2)
-#define EEE_LP_ADV_STATUS_MASK 0x00000f00
-#define EEE_LP_ADV_STATUS_OFFSET 8
-#define EEE_SUPPORTED_SPEED_MASK 0x0000f000
-#define EEE_SUPPORTED_SPEED_OFFSET 12
-#define EEE_1G_SUPPORTED BIT(1)
-#define EEE_10G_SUPPORTED BIT(2)
-
- u32 eee_remote;
-#define EEE_REMOTE_TW_TX_MASK 0x0000ffff
-#define EEE_REMOTE_TW_TX_OFFSET 0
-#define EEE_REMOTE_TW_RX_MASK 0xffff0000
-#define EEE_REMOTE_TW_RX_OFFSET 16
-
- u32 reserved1;
- u32 oem_cfg_port;
-#define OEM_CFG_CHANNEL_TYPE_MASK 0x00000003
-#define OEM_CFG_CHANNEL_TYPE_OFFSET 0
-#define OEM_CFG_CHANNEL_TYPE_VLAN_PARTITION 0x1
-#define OEM_CFG_CHANNEL_TYPE_STAGGED 0x2
-#define OEM_CFG_SCHED_TYPE_MASK 0x0000000C
-#define OEM_CFG_SCHED_TYPE_OFFSET 2
-#define OEM_CFG_SCHED_TYPE_ETS 0x1
-#define OEM_CFG_SCHED_TYPE_VNIC_BW 0x2
-};
-
-struct public_func {
- u32 reserved0[2];
-
- u32 mtu_size;
-
- u32 reserved[7];
-
- u32 config;
-#define FUNC_MF_CFG_FUNC_HIDE 0x00000001
-#define FUNC_MF_CFG_PAUSE_ON_HOST_RING 0x00000002
-#define FUNC_MF_CFG_PAUSE_ON_HOST_RING_SHIFT 0x00000001
-
-#define FUNC_MF_CFG_PROTOCOL_MASK 0x000000f0
-#define FUNC_MF_CFG_PROTOCOL_SHIFT 4
-#define FUNC_MF_CFG_PROTOCOL_ETHERNET 0x00000000
-#define FUNC_MF_CFG_PROTOCOL_ISCSI 0x00000010
-#define FUNC_MF_CFG_PROTOCOL_FCOE 0x00000020
-#define FUNC_MF_CFG_PROTOCOL_ROCE 0x00000030
-#define FUNC_MF_CFG_PROTOCOL_NVMETCP 0x00000040
-#define FUNC_MF_CFG_PROTOCOL_MAX 0x00000040
-
-#define FUNC_MF_CFG_MIN_BW_MASK 0x0000ff00
-#define FUNC_MF_CFG_MIN_BW_SHIFT 8
-#define FUNC_MF_CFG_MIN_BW_DEFAULT 0x00000000
-#define FUNC_MF_CFG_MAX_BW_MASK 0x00ff0000
-#define FUNC_MF_CFG_MAX_BW_SHIFT 16
-#define FUNC_MF_CFG_MAX_BW_DEFAULT 0x00640000
-
- u32 status;
-#define FUNC_STATUS_VIRTUAL_LINK_UP 0x00000001
-
- u32 mac_upper;
-#define FUNC_MF_CFG_UPPERMAC_MASK 0x0000ffff
-#define FUNC_MF_CFG_UPPERMAC_SHIFT 0
-#define FUNC_MF_CFG_UPPERMAC_DEFAULT FUNC_MF_CFG_UPPERMAC_MASK
- u32 mac_lower;
-#define FUNC_MF_CFG_LOWERMAC_DEFAULT 0xffffffff
-
- u32 fcoe_wwn_port_name_upper;
- u32 fcoe_wwn_port_name_lower;
-
- u32 fcoe_wwn_node_name_upper;
- u32 fcoe_wwn_node_name_lower;
-
- u32 ovlan_stag;
-#define FUNC_MF_CFG_OV_STAG_MASK 0x0000ffff
-#define FUNC_MF_CFG_OV_STAG_SHIFT 0
-#define FUNC_MF_CFG_OV_STAG_DEFAULT FUNC_MF_CFG_OV_STAG_MASK
-
- u32 pf_allocation;
-
- u32 preserve_data;
-
- u32 driver_last_activity_ts;
-
- u32 drv_ack_vf_disabled[VF_MAX_STATIC / 32];
-
- u32 drv_id;
-#define DRV_ID_PDA_COMP_VER_MASK 0x0000ffff
-#define DRV_ID_PDA_COMP_VER_SHIFT 0
-
-#define LOAD_REQ_HSI_VERSION 2
-#define DRV_ID_MCP_HSI_VER_MASK 0x00ff0000
-#define DRV_ID_MCP_HSI_VER_SHIFT 16
-#define DRV_ID_MCP_HSI_VER_CURRENT (LOAD_REQ_HSI_VERSION << \
- DRV_ID_MCP_HSI_VER_SHIFT)
-
-#define DRV_ID_DRV_TYPE_MASK 0x7f000000
-#define DRV_ID_DRV_TYPE_SHIFT 24
-#define DRV_ID_DRV_TYPE_UNKNOWN (0 << DRV_ID_DRV_TYPE_SHIFT)
-#define DRV_ID_DRV_TYPE_LINUX (1 << DRV_ID_DRV_TYPE_SHIFT)
-
-#define DRV_ID_DRV_INIT_HW_MASK 0x80000000
-#define DRV_ID_DRV_INIT_HW_SHIFT 31
-#define DRV_ID_DRV_INIT_HW_FLAG (1 << DRV_ID_DRV_INIT_HW_SHIFT)
-
- u32 oem_cfg_func;
-#define OEM_CFG_FUNC_TC_MASK 0x0000000F
-#define OEM_CFG_FUNC_TC_OFFSET 0
-#define OEM_CFG_FUNC_TC_0 0x0
-#define OEM_CFG_FUNC_TC_1 0x1
-#define OEM_CFG_FUNC_TC_2 0x2
-#define OEM_CFG_FUNC_TC_3 0x3
-#define OEM_CFG_FUNC_TC_4 0x4
-#define OEM_CFG_FUNC_TC_5 0x5
-#define OEM_CFG_FUNC_TC_6 0x6
-#define OEM_CFG_FUNC_TC_7 0x7
-
-#define OEM_CFG_FUNC_HOST_PRI_CTRL_MASK 0x00000030
-#define OEM_CFG_FUNC_HOST_PRI_CTRL_OFFSET 4
-#define OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC 0x1
-#define OEM_CFG_FUNC_HOST_PRI_CTRL_OS 0x2
-};
-
-struct mcp_mac {
- u32 mac_upper;
- u32 mac_lower;
-};
-
-struct mcp_val64 {
- u32 lo;
- u32 hi;
-};
-
-struct mcp_file_att {
- u32 nvm_start_addr;
- u32 len;
-};
-
-struct bist_nvm_image_att {
- u32 return_code;
- u32 image_type;
- u32 nvm_start_addr;
- u32 len;
-};
-
-#define MCP_DRV_VER_STR_SIZE 16
-#define MCP_DRV_VER_STR_SIZE_DWORD (MCP_DRV_VER_STR_SIZE / sizeof(u32))
-#define MCP_DRV_NVM_BUF_LEN 32
-struct drv_version_stc {
- u32 version;
- u8 name[MCP_DRV_VER_STR_SIZE - 4];
-};
-
-struct lan_stats_stc {
- u64 ucast_rx_pkts;
- u64 ucast_tx_pkts;
- u32 fcs_err;
- u32 rserved;
-};
-
-struct fcoe_stats_stc {
- u64 rx_pkts;
- u64 tx_pkts;
- u32 fcs_err;
- u32 login_failure;
-};
-
-struct ocbb_data_stc {
- u32 ocbb_host_addr;
- u32 ocsd_host_addr;
- u32 ocsd_req_update_interval;
-};
-
-#define MAX_NUM_OF_SENSORS 7
-struct temperature_status_stc {
- u32 num_of_sensors;
- u32 sensor[MAX_NUM_OF_SENSORS];
-};
-
-/* crash dump configuration header */
-struct mdump_config_stc {
- u32 version;
- u32 config;
- u32 epoc;
- u32 num_of_logs;
- u32 valid_logs;
-};
-
-enum resource_id_enum {
- RESOURCE_NUM_SB_E = 0,
- RESOURCE_NUM_L2_QUEUE_E = 1,
- RESOURCE_NUM_VPORT_E = 2,
- RESOURCE_NUM_VMQ_E = 3,
- RESOURCE_FACTOR_NUM_RSS_PF_E = 4,
- RESOURCE_FACTOR_RSS_PER_VF_E = 5,
- RESOURCE_NUM_RL_E = 6,
- RESOURCE_NUM_PQ_E = 7,
- RESOURCE_NUM_VF_E = 8,
- RESOURCE_VFC_FILTER_E = 9,
- RESOURCE_ILT_E = 10,
- RESOURCE_CQS_E = 11,
- RESOURCE_GFT_PROFILES_E = 12,
- RESOURCE_NUM_TC_E = 13,
- RESOURCE_NUM_RSS_ENGINES_E = 14,
- RESOURCE_LL2_QUEUE_E = 15,
- RESOURCE_RDMA_STATS_QUEUE_E = 16,
- RESOURCE_BDQ_E = 17,
- RESOURCE_QCN_E = 18,
- RESOURCE_LLH_FILTER_E = 19,
- RESOURCE_VF_MAC_ADDR = 20,
- RESOURCE_LL2_CQS_E = 21,
- RESOURCE_VF_CNQS = 22,
- RESOURCE_MAX_NUM,
- RESOURCE_NUM_INVALID = 0xFFFFFFFF
-};
-
-/* Resource ID is to be filled by the driver in the MB request
- * Size, offset & flags to be filled by the MFW in the MB response
- */
-struct resource_info {
- enum resource_id_enum res_id;
- u32 size; /* number of allocated resources */
- u32 offset; /* Offset of the 1st resource */
- u32 vf_size;
- u32 vf_offset;
- u32 flags;
-#define RESOURCE_ELEMENT_STRICT (1 << 0)
-};
-
-#define DRV_ROLE_NONE 0
-#define DRV_ROLE_PREBOOT 1
-#define DRV_ROLE_OS 2
-#define DRV_ROLE_KDUMP 3
-
-struct load_req_stc {
- u32 drv_ver_0;
- u32 drv_ver_1;
- u32 fw_ver;
- u32 misc0;
-#define LOAD_REQ_ROLE_MASK 0x000000FF
-#define LOAD_REQ_ROLE_SHIFT 0
-#define LOAD_REQ_LOCK_TO_MASK 0x0000FF00
-#define LOAD_REQ_LOCK_TO_SHIFT 8
-#define LOAD_REQ_LOCK_TO_DEFAULT 0
-#define LOAD_REQ_LOCK_TO_NONE 255
-#define LOAD_REQ_FORCE_MASK 0x000F0000
-#define LOAD_REQ_FORCE_SHIFT 16
-#define LOAD_REQ_FORCE_NONE 0
-#define LOAD_REQ_FORCE_PF 1
-#define LOAD_REQ_FORCE_ALL 2
-#define LOAD_REQ_FLAGS0_MASK 0x00F00000
-#define LOAD_REQ_FLAGS0_SHIFT 20
-#define LOAD_REQ_FLAGS0_AVOID_RESET (0x1 << 0)
-};
-
-struct load_rsp_stc {
- u32 drv_ver_0;
- u32 drv_ver_1;
- u32 fw_ver;
- u32 misc0;
-#define LOAD_RSP_ROLE_MASK 0x000000FF
-#define LOAD_RSP_ROLE_SHIFT 0
-#define LOAD_RSP_HSI_MASK 0x0000FF00
-#define LOAD_RSP_HSI_SHIFT 8
-#define LOAD_RSP_FLAGS0_MASK 0x000F0000
-#define LOAD_RSP_FLAGS0_SHIFT 16
-#define LOAD_RSP_FLAGS0_DRV_EXISTS (0x1 << 0)
-};
-
-struct mdump_retain_data_stc {
- u32 valid;
- u32 epoch;
- u32 pf;
- u32 status;
-};
-
-union drv_union_data {
- u32 ver_str[MCP_DRV_VER_STR_SIZE_DWORD];
- struct mcp_mac wol_mac;
-
- struct eth_phy_cfg drv_phy_cfg;
-
- struct mcp_val64 val64;
-
- u8 raw_data[MCP_DRV_NVM_BUF_LEN];
-
- struct mcp_file_att file_att;
-
- u32 ack_vf_disabled[VF_MAX_STATIC / 32];
-
- struct drv_version_stc drv_version;
-
- struct lan_stats_stc lan_stats;
- struct fcoe_stats_stc fcoe_stats;
- struct ocbb_data_stc ocbb_info;
- struct temperature_status_stc temp_info;
- struct resource_info resource;
- struct bist_nvm_image_att nvm_image_att;
- struct mdump_config_stc mdump_config;
-};
-
-struct public_drv_mb {
- u32 drv_mb_header;
-#define DRV_MSG_CODE_MASK 0xffff0000
-#define DRV_MSG_CODE_LOAD_REQ 0x10000000
-#define DRV_MSG_CODE_LOAD_DONE 0x11000000
-#define DRV_MSG_CODE_INIT_HW 0x12000000
-#define DRV_MSG_CODE_CANCEL_LOAD_REQ 0x13000000
-#define DRV_MSG_CODE_UNLOAD_REQ 0x20000000
-#define DRV_MSG_CODE_UNLOAD_DONE 0x21000000
-#define DRV_MSG_CODE_INIT_PHY 0x22000000
-#define DRV_MSG_CODE_LINK_RESET 0x23000000
-#define DRV_MSG_CODE_SET_DCBX 0x25000000
-#define DRV_MSG_CODE_OV_UPDATE_CURR_CFG 0x26000000
-#define DRV_MSG_CODE_OV_UPDATE_BUS_NUM 0x27000000
-#define DRV_MSG_CODE_OV_UPDATE_BOOT_PROGRESS 0x28000000
-#define DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER 0x29000000
-#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE 0x31000000
-#define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000
-#define DRV_MSG_CODE_OV_UPDATE_MTU 0x33000000
-#define DRV_MSG_GET_RESOURCE_ALLOC_MSG 0x34000000
-#define DRV_MSG_SET_RESOURCE_VALUE_MSG 0x35000000
-#define DRV_MSG_CODE_OV_UPDATE_WOL 0x38000000
-#define DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE 0x39000000
-#define DRV_MSG_CODE_GET_OEM_UPDATES 0x41000000
-
-#define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000
-#define DRV_MSG_CODE_NIG_DRAIN 0x30000000
-#define DRV_MSG_CODE_S_TAG_UPDATE_ACK 0x3b000000
-#define DRV_MSG_CODE_GET_NVM_CFG_OPTION 0x003e0000
-#define DRV_MSG_CODE_SET_NVM_CFG_OPTION 0x003f0000
-#define DRV_MSG_CODE_INITIATE_PF_FLR 0x02010000
-#define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000
-#define DRV_MSG_CODE_CFG_VF_MSIX 0xc0010000
-#define DRV_MSG_CODE_CFG_PF_VFS_MSIX 0xc0020000
-#define DRV_MSG_CODE_NVM_PUT_FILE_BEGIN 0x00010000
-#define DRV_MSG_CODE_NVM_PUT_FILE_DATA 0x00020000
-#define DRV_MSG_CODE_NVM_GET_FILE_ATT 0x00030000
-#define DRV_MSG_CODE_NVM_READ_NVRAM 0x00050000
-#define DRV_MSG_CODE_NVM_WRITE_NVRAM 0x00060000
-#define DRV_MSG_CODE_MCP_RESET 0x00090000
-#define DRV_MSG_CODE_SET_VERSION 0x000f0000
-#define DRV_MSG_CODE_MCP_HALT 0x00100000
-#define DRV_MSG_CODE_SET_VMAC 0x00110000
-#define DRV_MSG_CODE_GET_VMAC 0x00120000
-#define DRV_MSG_CODE_VMAC_TYPE_SHIFT 4
-#define DRV_MSG_CODE_VMAC_TYPE_MASK 0x30
-#define DRV_MSG_CODE_VMAC_TYPE_MAC 1
-#define DRV_MSG_CODE_VMAC_TYPE_WWNN 2
-#define DRV_MSG_CODE_VMAC_TYPE_WWPN 3
-
-#define DRV_MSG_CODE_GET_STATS 0x00130000
-#define DRV_MSG_CODE_STATS_TYPE_LAN 1
-#define DRV_MSG_CODE_STATS_TYPE_FCOE 2
-#define DRV_MSG_CODE_STATS_TYPE_ISCSI 3
-#define DRV_MSG_CODE_STATS_TYPE_RDMA 4
-
-#define DRV_MSG_CODE_TRANSCEIVER_READ 0x00160000
-
-#define DRV_MSG_CODE_MASK_PARITIES 0x001a0000
-
-#define DRV_MSG_CODE_BIST_TEST 0x001e0000
-#define DRV_MSG_CODE_SET_LED_MODE 0x00200000
-#define DRV_MSG_CODE_RESOURCE_CMD 0x00230000
-/* Send crash dump commands with param[3:0] - opcode */
-#define DRV_MSG_CODE_MDUMP_CMD 0x00250000
-#define DRV_MSG_CODE_GET_TLV_DONE 0x002f0000
-#define DRV_MSG_CODE_GET_ENGINE_CONFIG 0x00370000
-#define DRV_MSG_CODE_GET_PPFID_BITMAP 0x43000000
-
-#define DRV_MSG_CODE_DEBUG_DATA_SEND 0xc0040000
-
-#define RESOURCE_CMD_REQ_RESC_MASK 0x0000001F
-#define RESOURCE_CMD_REQ_RESC_SHIFT 0
-#define RESOURCE_CMD_REQ_OPCODE_MASK 0x000000E0
-#define RESOURCE_CMD_REQ_OPCODE_SHIFT 5
-#define RESOURCE_OPCODE_REQ 1
-#define RESOURCE_OPCODE_REQ_WO_AGING 2
-#define RESOURCE_OPCODE_REQ_W_AGING 3
-#define RESOURCE_OPCODE_RELEASE 4
-#define RESOURCE_OPCODE_FORCE_RELEASE 5
-#define RESOURCE_CMD_REQ_AGE_MASK 0x0000FF00
-#define RESOURCE_CMD_REQ_AGE_SHIFT 8
-
-#define RESOURCE_CMD_RSP_OWNER_MASK 0x000000FF
-#define RESOURCE_CMD_RSP_OWNER_SHIFT 0
-#define RESOURCE_CMD_RSP_OPCODE_MASK 0x00000700
-#define RESOURCE_CMD_RSP_OPCODE_SHIFT 8
-#define RESOURCE_OPCODE_GNT 1
-#define RESOURCE_OPCODE_BUSY 2
-#define RESOURCE_OPCODE_RELEASED 3
-#define RESOURCE_OPCODE_RELEASED_PREVIOUS 4
-#define RESOURCE_OPCODE_WRONG_OWNER 5
-#define RESOURCE_OPCODE_UNKNOWN_CMD 255
-
-#define RESOURCE_DUMP 0
-
-/* DRV_MSG_CODE_MDUMP_CMD parameters */
-#define MDUMP_DRV_PARAM_OPCODE_MASK 0x0000000f
-#define DRV_MSG_CODE_MDUMP_ACK 0x01
-#define DRV_MSG_CODE_MDUMP_SET_VALUES 0x02
-#define DRV_MSG_CODE_MDUMP_TRIGGER 0x03
-#define DRV_MSG_CODE_MDUMP_GET_CONFIG 0x04
-#define DRV_MSG_CODE_MDUMP_SET_ENABLE 0x05
-#define DRV_MSG_CODE_MDUMP_CLEAR_LOGS 0x06
-#define DRV_MSG_CODE_MDUMP_GET_RETAIN 0x07
-#define DRV_MSG_CODE_MDUMP_CLR_RETAIN 0x08
-
-#define DRV_MSG_CODE_HW_DUMP_TRIGGER 0x0a
-#define DRV_MSG_CODE_MDUMP_GEN_MDUMP2 0x0b
-#define DRV_MSG_CODE_MDUMP_FREE_MDUMP2 0x0c
-
-#define DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL 0x002b0000
-#define DRV_MSG_CODE_OS_WOL 0x002e0000
-
-#define DRV_MSG_CODE_FEATURE_SUPPORT 0x00300000
-#define DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT 0x00310000
-#define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff
-
- u32 drv_mb_param;
-#define DRV_MB_PARAM_UNLOAD_WOL_UNKNOWN 0x00000000
-#define DRV_MB_PARAM_UNLOAD_WOL_MCP 0x00000001
-#define DRV_MB_PARAM_UNLOAD_WOL_DISABLED 0x00000002
-#define DRV_MB_PARAM_UNLOAD_WOL_ENABLED 0x00000003
-#define DRV_MB_PARAM_DCBX_NOTIFY_MASK 0x000000FF
-#define DRV_MB_PARAM_DCBX_NOTIFY_SHIFT 3
-
-#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MBI 0x3
-#define DRV_MB_PARAM_NVM_OFFSET_OFFSET 0
-#define DRV_MB_PARAM_NVM_OFFSET_MASK 0x00FFFFFF
-#define DRV_MB_PARAM_NVM_LEN_OFFSET 24
-#define DRV_MB_PARAM_NVM_LEN_MASK 0xFF000000
-
-#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT 0
-#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK 0x000000FF
-#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT 8
-#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK 0x0000FF00
-#define DRV_MB_PARAM_LLDP_SEND_MASK 0x00000001
-#define DRV_MB_PARAM_LLDP_SEND_SHIFT 0
-
-#define DRV_MB_PARAM_OV_CURR_CFG_SHIFT 0
-#define DRV_MB_PARAM_OV_CURR_CFG_MASK 0x0000000F
-#define DRV_MB_PARAM_OV_CURR_CFG_NONE 0
-#define DRV_MB_PARAM_OV_CURR_CFG_OS 1
-#define DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC 2
-#define DRV_MB_PARAM_OV_CURR_CFG_OTHER 3
-
-#define DRV_MB_PARAM_OV_STORM_FW_VER_SHIFT 0
-#define DRV_MB_PARAM_OV_STORM_FW_VER_MASK 0xFFFFFFFF
-#define DRV_MB_PARAM_OV_STORM_FW_VER_MAJOR_MASK 0xFF000000
-#define DRV_MB_PARAM_OV_STORM_FW_VER_MINOR_MASK 0x00FF0000
-#define DRV_MB_PARAM_OV_STORM_FW_VER_BUILD_MASK 0x0000FF00
-#define DRV_MB_PARAM_OV_STORM_FW_VER_DROP_MASK 0x000000FF
-
-#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_SHIFT 0
-#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_MASK 0xF
-#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_UNKNOWN 0x1
-#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED 0x2
-#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_LOADING 0x3
-#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED 0x4
-#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE 0x5
-
-#define DRV_MB_PARAM_OV_MTU_SIZE_SHIFT 0
-#define DRV_MB_PARAM_OV_MTU_SIZE_MASK 0xFFFFFFFF
-
-#define DRV_MB_PARAM_WOL_MASK (DRV_MB_PARAM_WOL_DEFAULT | \
- DRV_MB_PARAM_WOL_DISABLED | \
- DRV_MB_PARAM_WOL_ENABLED)
-#define DRV_MB_PARAM_WOL_DEFAULT DRV_MB_PARAM_UNLOAD_WOL_MCP
-#define DRV_MB_PARAM_WOL_DISABLED DRV_MB_PARAM_UNLOAD_WOL_DISABLED
-#define DRV_MB_PARAM_WOL_ENABLED DRV_MB_PARAM_UNLOAD_WOL_ENABLED
-
-#define DRV_MB_PARAM_ESWITCH_MODE_MASK (DRV_MB_PARAM_ESWITCH_MODE_NONE | \
- DRV_MB_PARAM_ESWITCH_MODE_VEB | \
- DRV_MB_PARAM_ESWITCH_MODE_VEPA)
-#define DRV_MB_PARAM_ESWITCH_MODE_NONE 0x0
-#define DRV_MB_PARAM_ESWITCH_MODE_VEB 0x1
-#define DRV_MB_PARAM_ESWITCH_MODE_VEPA 0x2
-
-#define DRV_MB_PARAM_DUMMY_OEM_UPDATES_MASK 0x1
-#define DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET 0
-
-#define DRV_MB_PARAM_SET_LED_MODE_OPER 0x0
-#define DRV_MB_PARAM_SET_LED_MODE_ON 0x1
-#define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2
-
-#define DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET 0
-#define DRV_MB_PARAM_TRANSCEIVER_PORT_MASK 0x00000003
-#define DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET 2
-#define DRV_MB_PARAM_TRANSCEIVER_SIZE_MASK 0x000000fc
-#define DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET 8
-#define DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK 0x0000ff00
-#define DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET 16
-#define DRV_MB_PARAM_TRANSCEIVER_OFFSET_MASK 0xffff0000
-
- /* Resource Allocation params - Driver version support */
-#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xffff0000
-#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT 16
-#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_MASK 0x0000ffff
-#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT 0
-
-#define DRV_MB_PARAM_BIST_REGISTER_TEST 1
-#define DRV_MB_PARAM_BIST_CLOCK_TEST 2
-#define DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES 3
-#define DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX 4
-
-#define DRV_MB_PARAM_BIST_RC_UNKNOWN 0
-#define DRV_MB_PARAM_BIST_RC_PASSED 1
-#define DRV_MB_PARAM_BIST_RC_FAILED 2
-#define DRV_MB_PARAM_BIST_RC_INVALID_PARAMETER 3
-
-#define DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT 0
-#define DRV_MB_PARAM_BIST_TEST_INDEX_MASK 0x000000ff
-#define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT 8
-#define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_MASK 0x0000ff00
-
-#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_MASK 0x0000ffff
-#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_OFFSET 0
-#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE 0x00000002
-#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_FEC_CONTROL 0x00000004
-#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EXT_SPEED_FEC_CONTROL 0x00000008
-#define DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK 0x00010000
-
-/* DRV_MSG_CODE_DEBUG_DATA_SEND parameters */
-#define DRV_MSG_CODE_DEBUG_DATA_SEND_SIZE_OFFSET 0
-#define DRV_MSG_CODE_DEBUG_DATA_SEND_SIZE_MASK 0xff
-
-/* Driver attributes params */
-#define DRV_MB_PARAM_ATTRIBUTE_KEY_OFFSET 0
-#define DRV_MB_PARAM_ATTRIBUTE_KEY_MASK 0x00ffffff
-#define DRV_MB_PARAM_ATTRIBUTE_CMD_OFFSET 24
-#define DRV_MB_PARAM_ATTRIBUTE_CMD_MASK 0xff000000
-
-#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_OFFSET 0
-#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_SHIFT 0
-#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_MASK 0x0000ffff
-#define DRV_MB_PARAM_NVM_CFG_OPTION_ALL_SHIFT 16
-#define DRV_MB_PARAM_NVM_CFG_OPTION_ALL_MASK 0x00010000
-#define DRV_MB_PARAM_NVM_CFG_OPTION_INIT_SHIFT 17
-#define DRV_MB_PARAM_NVM_CFG_OPTION_INIT_MASK 0x00020000
-#define DRV_MB_PARAM_NVM_CFG_OPTION_COMMIT_SHIFT 18
-#define DRV_MB_PARAM_NVM_CFG_OPTION_COMMIT_MASK 0x00040000
-#define DRV_MB_PARAM_NVM_CFG_OPTION_FREE_SHIFT 19
-#define DRV_MB_PARAM_NVM_CFG_OPTION_FREE_MASK 0x00080000
-#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL_SHIFT 20
-#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL_MASK 0x00100000
-#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID_SHIFT 24
-#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID_MASK 0x0f000000
-
- u32 fw_mb_header;
-#define FW_MSG_CODE_MASK 0xffff0000
-#define FW_MSG_CODE_UNSUPPORTED 0x00000000
-#define FW_MSG_CODE_DRV_LOAD_ENGINE 0x10100000
-#define FW_MSG_CODE_DRV_LOAD_PORT 0x10110000
-#define FW_MSG_CODE_DRV_LOAD_FUNCTION 0x10120000
-#define FW_MSG_CODE_DRV_LOAD_REFUSED_PDA 0x10200000
-#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1 0x10210000
-#define FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG 0x10220000
-#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI 0x10230000
-#define FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE 0x10300000
-#define FW_MSG_CODE_DRV_LOAD_REFUSED_REJECT 0x10310000
-#define FW_MSG_CODE_DRV_LOAD_DONE 0x11100000
-#define FW_MSG_CODE_DRV_UNLOAD_ENGINE 0x20110000
-#define FW_MSG_CODE_DRV_UNLOAD_PORT 0x20120000
-#define FW_MSG_CODE_DRV_UNLOAD_FUNCTION 0x20130000
-#define FW_MSG_CODE_DRV_UNLOAD_DONE 0x21100000
-#define FW_MSG_CODE_RESOURCE_ALLOC_OK 0x34000000
-#define FW_MSG_CODE_RESOURCE_ALLOC_UNKNOWN 0x35000000
-#define FW_MSG_CODE_RESOURCE_ALLOC_DEPRECATED 0x36000000
-#define FW_MSG_CODE_S_TAG_UPDATE_ACK_DONE 0x3b000000
-#define FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE 0xb0010000
-
-#define FW_MSG_CODE_NVM_OK 0x00010000
-#define FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK 0x00400000
-#define FW_MSG_CODE_PHY_OK 0x00110000
-#define FW_MSG_CODE_OK 0x00160000
-#define FW_MSG_CODE_ERROR 0x00170000
-#define FW_MSG_CODE_TRANSCEIVER_DIAG_OK 0x00160000
-#define FW_MSG_CODE_TRANSCEIVER_DIAG_ERROR 0x00170000
-#define FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT 0x00020000
-
-#define FW_MSG_CODE_OS_WOL_SUPPORTED 0x00800000
-#define FW_MSG_CODE_OS_WOL_NOT_SUPPORTED 0x00810000
-#define FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE 0x00870000
-#define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff
-
-#define FW_MSG_CODE_DEBUG_DATA_SEND_INV_ARG 0xb0070000
-#define FW_MSG_CODE_DEBUG_DATA_SEND_BUF_FULL 0xb0080000
-#define FW_MSG_CODE_DEBUG_DATA_SEND_NO_BUF 0xb0090000
-#define FW_MSG_CODE_DEBUG_NOT_ENABLED 0xb00a0000
-#define FW_MSG_CODE_DEBUG_DATA_SEND_OK 0xb00b0000
-
-#define FW_MSG_CODE_MDUMP_INVALID_CMD 0x00030000
-
- u32 fw_mb_param;
-#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xffff0000
-#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT 16
-#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_MASK 0x0000ffff
-#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT 0
-
- /* Get PF RDMA protocol command response */
-#define FW_MB_PARAM_GET_PF_RDMA_NONE 0x0
-#define FW_MB_PARAM_GET_PF_RDMA_ROCE 0x1
-#define FW_MB_PARAM_GET_PF_RDMA_IWARP 0x2
-#define FW_MB_PARAM_GET_PF_RDMA_BOTH 0x3
-
- /* Get MFW feature support response */
-#define FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ BIT(0)
-#define FW_MB_PARAM_FEATURE_SUPPORT_EEE BIT(1)
-#define FW_MB_PARAM_FEATURE_SUPPORT_FEC_CONTROL BIT(5)
-#define FW_MB_PARAM_FEATURE_SUPPORT_EXT_SPEED_FEC_CONTROL BIT(6)
-#define FW_MB_PARAM_FEATURE_SUPPORT_VLINK BIT(16)
-
-#define FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR BIT(0)
-
-#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID_MASK 0x00000001
-#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID_SHIFT 0
-#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE_MASK 0x00000002
-#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE_SHIFT 1
-#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID_MASK 0x00000004
-#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID_SHIFT 2
-#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE_MASK 0x00000008
-#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE_SHIFT 3
-
-#define FW_MB_PARAM_PPFID_BITMAP_MASK 0xff
-#define FW_MB_PARAM_PPFID_BITMAP_SHIFT 0
-
- u32 drv_pulse_mb;
-#define DRV_PULSE_SEQ_MASK 0x00007fff
-#define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000
-#define DRV_PULSE_ALWAYS_ALIVE 0x00008000
-
- u32 mcp_pulse_mb;
-#define MCP_PULSE_SEQ_MASK 0x00007fff
-#define MCP_PULSE_ALWAYS_ALIVE 0x00008000
-#define MCP_EVENT_MASK 0xffff0000
-#define MCP_EVENT_OTHER_DRIVER_RESET_REQ 0x00010000
-
- union drv_union_data union_data;
-};
-
-#define FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET_MASK 0x00ffffff
-#define FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET_SHIFT 0
-#define FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE_MASK 0xff000000
-#define FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE_SHIFT 24
-
-enum MFW_DRV_MSG_TYPE {
- MFW_DRV_MSG_LINK_CHANGE,
- MFW_DRV_MSG_FLR_FW_ACK_FAILED,
- MFW_DRV_MSG_VF_DISABLED,
- MFW_DRV_MSG_LLDP_DATA_UPDATED,
- MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED,
- MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED,
- MFW_DRV_MSG_ERROR_RECOVERY,
- MFW_DRV_MSG_BW_UPDATE,
- MFW_DRV_MSG_S_TAG_UPDATE,
- MFW_DRV_MSG_GET_LAN_STATS,
- MFW_DRV_MSG_GET_FCOE_STATS,
- MFW_DRV_MSG_GET_ISCSI_STATS,
- MFW_DRV_MSG_GET_RDMA_STATS,
- MFW_DRV_MSG_FAILURE_DETECTED,
- MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE,
- MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED,
- MFW_DRV_MSG_RESERVED,
- MFW_DRV_MSG_GET_TLV_REQ,
- MFW_DRV_MSG_OEM_CFG_UPDATE,
- MFW_DRV_MSG_MAX
-};
-
-#define MFW_DRV_MSG_MAX_DWORDS(msgs) (((msgs - 1) >> 2) + 1)
-#define MFW_DRV_MSG_DWORD(msg_id) (msg_id >> 2)
-#define MFW_DRV_MSG_OFFSET(msg_id) ((msg_id & 0x3) << 3)
-#define MFW_DRV_MSG_MASK(msg_id) (0xff << MFW_DRV_MSG_OFFSET(msg_id))
-
-struct public_mfw_mb {
- u32 sup_msgs;
- u32 msg[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)];
- u32 ack[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)];
-};
-
-enum public_sections {
- PUBLIC_DRV_MB,
- PUBLIC_MFW_MB,
- PUBLIC_GLOBAL,
- PUBLIC_PATH,
- PUBLIC_PORT,
- PUBLIC_FUNC,
- PUBLIC_MAX_SECTIONS
-};
-
-struct mcp_public_data {
- u32 num_sections;
- u32 sections[PUBLIC_MAX_SECTIONS];
- struct public_drv_mb drv_mb[MCP_GLOB_FUNC_MAX];
- struct public_mfw_mb mfw_mb[MCP_GLOB_FUNC_MAX];
- struct public_global global;
- struct public_path path[MCP_GLOB_PATH_MAX];
- struct public_port port[MCP_GLOB_PORT_MAX];
- struct public_func func[MCP_GLOB_FUNC_MAX];
-};
-
-#define MAX_I2C_TRANSACTION_SIZE 16
-
-/* OCBB definitions */
-enum tlvs {
- /* Category 1: Device Properties */
- DRV_TLV_CLP_STR,
- DRV_TLV_CLP_STR_CTD,
- /* Category 6: Device Configuration */
- DRV_TLV_SCSI_TO,
- DRV_TLV_R_T_TOV,
- DRV_TLV_R_A_TOV,
- DRV_TLV_E_D_TOV,
- DRV_TLV_CR_TOV,
- DRV_TLV_BOOT_TYPE,
- /* Category 8: Port Configuration */
- DRV_TLV_NPIV_ENABLED,
- /* Category 10: Function Configuration */
- DRV_TLV_FEATURE_FLAGS,
- DRV_TLV_LOCAL_ADMIN_ADDR,
- DRV_TLV_ADDITIONAL_MAC_ADDR_1,
- DRV_TLV_ADDITIONAL_MAC_ADDR_2,
- DRV_TLV_LSO_MAX_OFFLOAD_SIZE,
- DRV_TLV_LSO_MIN_SEGMENT_COUNT,
- DRV_TLV_PROMISCUOUS_MODE,
- DRV_TLV_TX_DESCRIPTORS_QUEUE_SIZE,
- DRV_TLV_RX_DESCRIPTORS_QUEUE_SIZE,
- DRV_TLV_NUM_OF_NET_QUEUE_VMQ_CFG,
- DRV_TLV_FLEX_NIC_OUTER_VLAN_ID,
- DRV_TLV_OS_DRIVER_STATES,
- DRV_TLV_PXE_BOOT_PROGRESS,
- /* Category 12: FC/FCoE Configuration */
- DRV_TLV_NPIV_STATE,
- DRV_TLV_NUM_OF_NPIV_IDS,
- DRV_TLV_SWITCH_NAME,
- DRV_TLV_SWITCH_PORT_NUM,
- DRV_TLV_SWITCH_PORT_ID,
- DRV_TLV_VENDOR_NAME,
- DRV_TLV_SWITCH_MODEL,
- DRV_TLV_SWITCH_FW_VER,
- DRV_TLV_QOS_PRIORITY_PER_802_1P,
- DRV_TLV_PORT_ALIAS,
- DRV_TLV_PORT_STATE,
- DRV_TLV_FIP_TX_DESCRIPTORS_QUEUE_SIZE,
- DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_SIZE,
- DRV_TLV_LINK_FAILURE_COUNT,
- DRV_TLV_FCOE_BOOT_PROGRESS,
- /* Category 13: iSCSI Configuration */
- DRV_TLV_TARGET_LLMNR_ENABLED,
- DRV_TLV_HEADER_DIGEST_FLAG_ENABLED,
- DRV_TLV_DATA_DIGEST_FLAG_ENABLED,
- DRV_TLV_AUTHENTICATION_METHOD,
- DRV_TLV_ISCSI_BOOT_TARGET_PORTAL,
- DRV_TLV_MAX_FRAME_SIZE,
- DRV_TLV_PDU_TX_DESCRIPTORS_QUEUE_SIZE,
- DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_SIZE,
- DRV_TLV_ISCSI_BOOT_PROGRESS,
- /* Category 20: Device Data */
- DRV_TLV_PCIE_BUS_RX_UTILIZATION,
- DRV_TLV_PCIE_BUS_TX_UTILIZATION,
- DRV_TLV_DEVICE_CPU_CORES_UTILIZATION,
- DRV_TLV_LAST_VALID_DCC_TLV_RECEIVED,
- DRV_TLV_NCSI_RX_BYTES_RECEIVED,
- DRV_TLV_NCSI_TX_BYTES_SENT,
- /* Category 22: Base Port Data */
- DRV_TLV_RX_DISCARDS,
- DRV_TLV_RX_ERRORS,
- DRV_TLV_TX_ERRORS,
- DRV_TLV_TX_DISCARDS,
- DRV_TLV_RX_FRAMES_RECEIVED,
- DRV_TLV_TX_FRAMES_SENT,
- /* Category 23: FC/FCoE Port Data */
- DRV_TLV_RX_BROADCAST_PACKETS,
- DRV_TLV_TX_BROADCAST_PACKETS,
- /* Category 28: Base Function Data */
- DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV4,
- DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV6,
- DRV_TLV_TX_DESCRIPTOR_QUEUE_AVG_DEPTH,
- DRV_TLV_RX_DESCRIPTORS_QUEUE_AVG_DEPTH,
- DRV_TLV_PF_RX_FRAMES_RECEIVED,
- DRV_TLV_RX_BYTES_RECEIVED,
- DRV_TLV_PF_TX_FRAMES_SENT,
- DRV_TLV_TX_BYTES_SENT,
- DRV_TLV_IOV_OFFLOAD,
- DRV_TLV_PCI_ERRORS_CAP_ID,
- DRV_TLV_UNCORRECTABLE_ERROR_STATUS,
- DRV_TLV_UNCORRECTABLE_ERROR_MASK,
- DRV_TLV_CORRECTABLE_ERROR_STATUS,
- DRV_TLV_CORRECTABLE_ERROR_MASK,
- DRV_TLV_PCI_ERRORS_AECC_REGISTER,
- DRV_TLV_TX_QUEUES_EMPTY,
- DRV_TLV_RX_QUEUES_EMPTY,
- DRV_TLV_TX_QUEUES_FULL,
- DRV_TLV_RX_QUEUES_FULL,
- /* Category 29: FC/FCoE Function Data */
- DRV_TLV_FCOE_TX_DESCRIPTOR_QUEUE_AVG_DEPTH,
- DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_AVG_DEPTH,
- DRV_TLV_FCOE_RX_FRAMES_RECEIVED,
- DRV_TLV_FCOE_RX_BYTES_RECEIVED,
- DRV_TLV_FCOE_TX_FRAMES_SENT,
- DRV_TLV_FCOE_TX_BYTES_SENT,
- DRV_TLV_CRC_ERROR_COUNT,
- DRV_TLV_CRC_ERROR_1_RECEIVED_SOURCE_FC_ID,
- DRV_TLV_CRC_ERROR_1_TIMESTAMP,
- DRV_TLV_CRC_ERROR_2_RECEIVED_SOURCE_FC_ID,
- DRV_TLV_CRC_ERROR_2_TIMESTAMP,
- DRV_TLV_CRC_ERROR_3_RECEIVED_SOURCE_FC_ID,
- DRV_TLV_CRC_ERROR_3_TIMESTAMP,
- DRV_TLV_CRC_ERROR_4_RECEIVED_SOURCE_FC_ID,
- DRV_TLV_CRC_ERROR_4_TIMESTAMP,
- DRV_TLV_CRC_ERROR_5_RECEIVED_SOURCE_FC_ID,
- DRV_TLV_CRC_ERROR_5_TIMESTAMP,
- DRV_TLV_LOSS_OF_SYNC_ERROR_COUNT,
- DRV_TLV_LOSS_OF_SIGNAL_ERRORS,
- DRV_TLV_PRIMITIVE_SEQUENCE_PROTOCOL_ERROR_COUNT,
- DRV_TLV_DISPARITY_ERROR_COUNT,
- DRV_TLV_CODE_VIOLATION_ERROR_COUNT,
- DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_1,
- DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_2,
- DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_3,
- DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_4,
- DRV_TLV_LAST_FLOGI_TIMESTAMP,
- DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_1,
- DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_2,
- DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_3,
- DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_4,
- DRV_TLV_LAST_FLOGI_ACC_TIMESTAMP,
- DRV_TLV_LAST_FLOGI_RJT,
- DRV_TLV_LAST_FLOGI_RJT_TIMESTAMP,
- DRV_TLV_FDISCS_SENT_COUNT,
- DRV_TLV_FDISC_ACCS_RECEIVED,
- DRV_TLV_FDISC_RJTS_RECEIVED,
- DRV_TLV_PLOGI_SENT_COUNT,
- DRV_TLV_PLOGI_ACCS_RECEIVED,
- DRV_TLV_PLOGI_RJTS_RECEIVED,
- DRV_TLV_PLOGI_1_SENT_DESTINATION_FC_ID,
- DRV_TLV_PLOGI_1_TIMESTAMP,
- DRV_TLV_PLOGI_2_SENT_DESTINATION_FC_ID,
- DRV_TLV_PLOGI_2_TIMESTAMP,
- DRV_TLV_PLOGI_3_SENT_DESTINATION_FC_ID,
- DRV_TLV_PLOGI_3_TIMESTAMP,
- DRV_TLV_PLOGI_4_SENT_DESTINATION_FC_ID,
- DRV_TLV_PLOGI_4_TIMESTAMP,
- DRV_TLV_PLOGI_5_SENT_DESTINATION_FC_ID,
- DRV_TLV_PLOGI_5_TIMESTAMP,
- DRV_TLV_PLOGI_1_ACC_RECEIVED_SOURCE_FC_ID,
- DRV_TLV_PLOGI_1_ACC_TIMESTAMP,
- DRV_TLV_PLOGI_2_ACC_RECEIVED_SOURCE_FC_ID,
- DRV_TLV_PLOGI_2_ACC_TIMESTAMP,
- DRV_TLV_PLOGI_3_ACC_RECEIVED_SOURCE_FC_ID,
- DRV_TLV_PLOGI_3_ACC_TIMESTAMP,
- DRV_TLV_PLOGI_4_ACC_RECEIVED_SOURCE_FC_ID,
- DRV_TLV_PLOGI_4_ACC_TIMESTAMP,
- DRV_TLV_PLOGI_5_ACC_RECEIVED_SOURCE_FC_ID,
- DRV_TLV_PLOGI_5_ACC_TIMESTAMP,
- DRV_TLV_LOGOS_ISSUED,
- DRV_TLV_LOGO_ACCS_RECEIVED,
- DRV_TLV_LOGO_RJTS_RECEIVED,
- DRV_TLV_LOGO_1_RECEIVED_SOURCE_FC_ID,
- DRV_TLV_LOGO_1_TIMESTAMP,
- DRV_TLV_LOGO_2_RECEIVED_SOURCE_FC_ID,
- DRV_TLV_LOGO_2_TIMESTAMP,
- DRV_TLV_LOGO_3_RECEIVED_SOURCE_FC_ID,
- DRV_TLV_LOGO_3_TIMESTAMP,
- DRV_TLV_LOGO_4_RECEIVED_SOURCE_FC_ID,
- DRV_TLV_LOGO_4_TIMESTAMP,
- DRV_TLV_LOGO_5_RECEIVED_SOURCE_FC_ID,
- DRV_TLV_LOGO_5_TIMESTAMP,
- DRV_TLV_LOGOS_RECEIVED,
- DRV_TLV_ACCS_ISSUED,
- DRV_TLV_PRLIS_ISSUED,
- DRV_TLV_ACCS_RECEIVED,
- DRV_TLV_ABTS_SENT_COUNT,
- DRV_TLV_ABTS_ACCS_RECEIVED,
- DRV_TLV_ABTS_RJTS_RECEIVED,
- DRV_TLV_ABTS_1_SENT_DESTINATION_FC_ID,
- DRV_TLV_ABTS_1_TIMESTAMP,
- DRV_TLV_ABTS_2_SENT_DESTINATION_FC_ID,
- DRV_TLV_ABTS_2_TIMESTAMP,
- DRV_TLV_ABTS_3_SENT_DESTINATION_FC_ID,
- DRV_TLV_ABTS_3_TIMESTAMP,
- DRV_TLV_ABTS_4_SENT_DESTINATION_FC_ID,
- DRV_TLV_ABTS_4_TIMESTAMP,
- DRV_TLV_ABTS_5_SENT_DESTINATION_FC_ID,
- DRV_TLV_ABTS_5_TIMESTAMP,
- DRV_TLV_RSCNS_RECEIVED,
- DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_1,
- DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_2,
- DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_3,
- DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_4,
- DRV_TLV_LUN_RESETS_ISSUED,
- DRV_TLV_ABORT_TASK_SETS_ISSUED,
- DRV_TLV_TPRLOS_SENT,
- DRV_TLV_NOS_SENT_COUNT,
- DRV_TLV_NOS_RECEIVED_COUNT,
- DRV_TLV_OLS_COUNT,
- DRV_TLV_LR_COUNT,
- DRV_TLV_LRR_COUNT,
- DRV_TLV_LIP_SENT_COUNT,
- DRV_TLV_LIP_RECEIVED_COUNT,
- DRV_TLV_EOFA_COUNT,
- DRV_TLV_EOFNI_COUNT,
- DRV_TLV_SCSI_STATUS_CHECK_CONDITION_COUNT,
- DRV_TLV_SCSI_STATUS_CONDITION_MET_COUNT,
- DRV_TLV_SCSI_STATUS_BUSY_COUNT,
- DRV_TLV_SCSI_STATUS_INTERMEDIATE_COUNT,
- DRV_TLV_SCSI_STATUS_INTERMEDIATE_CONDITION_MET_COUNT,
- DRV_TLV_SCSI_STATUS_RESERVATION_CONFLICT_COUNT,
- DRV_TLV_SCSI_STATUS_TASK_SET_FULL_COUNT,
- DRV_TLV_SCSI_STATUS_ACA_ACTIVE_COUNT,
- DRV_TLV_SCSI_STATUS_TASK_ABORTED_COUNT,
- DRV_TLV_SCSI_CHECK_CONDITION_1_RECEIVED_SK_ASC_ASCQ,
- DRV_TLV_SCSI_CHECK_1_TIMESTAMP,
- DRV_TLV_SCSI_CHECK_CONDITION_2_RECEIVED_SK_ASC_ASCQ,
- DRV_TLV_SCSI_CHECK_2_TIMESTAMP,
- DRV_TLV_SCSI_CHECK_CONDITION_3_RECEIVED_SK_ASC_ASCQ,
- DRV_TLV_SCSI_CHECK_3_TIMESTAMP,
- DRV_TLV_SCSI_CHECK_CONDITION_4_RECEIVED_SK_ASC_ASCQ,
- DRV_TLV_SCSI_CHECK_4_TIMESTAMP,
- DRV_TLV_SCSI_CHECK_CONDITION_5_RECEIVED_SK_ASC_ASCQ,
- DRV_TLV_SCSI_CHECK_5_TIMESTAMP,
- /* Category 30: iSCSI Function Data */
- DRV_TLV_PDU_TX_DESCRIPTOR_QUEUE_AVG_DEPTH,
- DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_AVG_DEPTH,
- DRV_TLV_ISCSI_PDU_RX_FRAMES_RECEIVED,
- DRV_TLV_ISCSI_PDU_RX_BYTES_RECEIVED,
- DRV_TLV_ISCSI_PDU_TX_FRAMES_SENT,
- DRV_TLV_ISCSI_PDU_TX_BYTES_SENT
-};
-
-struct nvm_cfg_mac_address {
- u32 mac_addr_hi;
-#define NVM_CFG_MAC_ADDRESS_HI_MASK 0x0000ffff
-#define NVM_CFG_MAC_ADDRESS_HI_OFFSET 0
-
- u32 mac_addr_lo;
-};
-
-struct nvm_cfg1_glob {
- u32 generic_cont0;
-#define NVM_CFG1_GLOB_MF_MODE_MASK 0x00000ff0
-#define NVM_CFG1_GLOB_MF_MODE_OFFSET 4
-#define NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED 0x0
-#define NVM_CFG1_GLOB_MF_MODE_DEFAULT 0x1
-#define NVM_CFG1_GLOB_MF_MODE_SPIO4 0x2
-#define NVM_CFG1_GLOB_MF_MODE_NPAR1_0 0x3
-#define NVM_CFG1_GLOB_MF_MODE_NPAR1_5 0x4
-#define NVM_CFG1_GLOB_MF_MODE_NPAR2_0 0x5
-#define NVM_CFG1_GLOB_MF_MODE_BD 0x6
-#define NVM_CFG1_GLOB_MF_MODE_UFP 0x7
-
- u32 engineering_change[3];
- u32 manufacturing_id;
- u32 serial_number[4];
- u32 pcie_cfg;
- u32 mgmt_traffic;
-
- u32 core_cfg;
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK 0x000000ff
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET 0
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G 0x0
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X50G 0x1
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_1X100G 0x2
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X10G_F 0x3
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X10G_E 0x4
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X20G 0x5
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X40G 0xb
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G 0xc
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G 0xd
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G 0xe
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G 0xf
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_2X50G_R1 0x11
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_4X50G_R1 0x12
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_1X100G_R2 0x13
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_2X100G_R2 0x14
-#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_1X100G_R4 0x15
-
- u32 e_lane_cfg1;
- u32 e_lane_cfg2;
- u32 f_lane_cfg1;
- u32 f_lane_cfg2;
- u32 mps10_preemphasis;
- u32 mps10_driver_current;
- u32 mps25_preemphasis;
- u32 mps25_driver_current;
- u32 pci_id;
- u32 pci_subsys_id;
- u32 bar;
- u32 mps10_txfir_main;
- u32 mps10_txfir_post;
- u32 mps25_txfir_main;
- u32 mps25_txfir_post;
- u32 manufacture_ver;
- u32 manufacture_time;
- u32 led_global_settings;
- u32 generic_cont1;
-
- u32 mbi_version;
-#define NVM_CFG1_GLOB_MBI_VERSION_0_MASK 0x000000ff
-#define NVM_CFG1_GLOB_MBI_VERSION_0_OFFSET 0
-#define NVM_CFG1_GLOB_MBI_VERSION_1_MASK 0x0000ff00
-#define NVM_CFG1_GLOB_MBI_VERSION_1_OFFSET 8
-#define NVM_CFG1_GLOB_MBI_VERSION_2_MASK 0x00ff0000
-#define NVM_CFG1_GLOB_MBI_VERSION_2_OFFSET 16
-
- u32 mbi_date;
- u32 misc_sig;
-
- u32 device_capabilities;
-#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET 0x1
-#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_FCOE 0x2
-#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI 0x4
-#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE 0x8
-
- u32 power_dissipated;
- u32 power_consumed;
- u32 efi_version;
- u32 multi_net_modes_cap;
- u32 reserved[41];
-};
-
-struct nvm_cfg1_path {
- u32 reserved[30];
-};
-
-struct nvm_cfg1_port {
- u32 rel_to_opt123;
- u32 rel_to_opt124;
-
- u32 generic_cont0;
-#define NVM_CFG1_PORT_DCBX_MODE_MASK 0x000f0000
-#define NVM_CFG1_PORT_DCBX_MODE_OFFSET 16
-#define NVM_CFG1_PORT_DCBX_MODE_DISABLED 0x0
-#define NVM_CFG1_PORT_DCBX_MODE_IEEE 0x1
-#define NVM_CFG1_PORT_DCBX_MODE_CEE 0x2
-#define NVM_CFG1_PORT_DCBX_MODE_DYNAMIC 0x3
-#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_MASK 0x00f00000
-#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_OFFSET 20
-#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ETHERNET 0x1
-#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_FCOE 0x2
-#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ISCSI 0x4
-
- u32 pcie_cfg;
- u32 features;
-
- u32 speed_cap_mask;
-#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK 0x0000ffff
-#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_OFFSET 0
-#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G 0x1
-#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G 0x2
-#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G 0x4
-#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G 0x8
-#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G 0x10
-#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G 0x20
-#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G 0x40
-
- u32 link_settings;
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_MASK 0x0000000f
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET 0
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG 0x0
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_1G 0x1
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_10G 0x2
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_20G 0x3
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_25G 0x4
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_40G 0x5
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_50G 0x6
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_BB_100G 0x7
-#define NVM_CFG1_PORT_DRV_LINK_SPEED_SMARTLINQ 0x8
-#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK 0x00000070
-#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET 4
-#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG 0x1
-#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX 0x2
-#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX 0x4
-#define NVM_CFG1_PORT_FEC_FORCE_MODE_MASK 0x000e0000
-#define NVM_CFG1_PORT_FEC_FORCE_MODE_OFFSET 17
-#define NVM_CFG1_PORT_FEC_FORCE_MODE_NONE 0x0
-#define NVM_CFG1_PORT_FEC_FORCE_MODE_FIRECODE 0x1
-#define NVM_CFG1_PORT_FEC_FORCE_MODE_RS 0x2
-#define NVM_CFG1_PORT_FEC_FORCE_MODE_AUTO 0x7
-
- u32 phy_cfg;
- u32 mgmt_traffic;
-
- u32 ext_phy;
- /* EEE power saving mode */
-#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_MASK 0x00ff0000
-#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_OFFSET 16
-#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_DISABLED 0x0
-#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_BALANCED 0x1
-#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_AGGRESSIVE 0x2
-#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_LOW_LATENCY 0x3
-
- u32 mba_cfg1;
- u32 mba_cfg2;
- u32 vf_cfg;
- struct nvm_cfg_mac_address lldp_mac_address;
- u32 led_port_settings;
- u32 transceiver_00;
- u32 device_ids;
-
- u32 board_cfg;
-#define NVM_CFG1_PORT_PORT_TYPE_MASK 0x000000ff
-#define NVM_CFG1_PORT_PORT_TYPE_OFFSET 0
-#define NVM_CFG1_PORT_PORT_TYPE_UNDEFINED 0x0
-#define NVM_CFG1_PORT_PORT_TYPE_MODULE 0x1
-#define NVM_CFG1_PORT_PORT_TYPE_BACKPLANE 0x2
-#define NVM_CFG1_PORT_PORT_TYPE_EXT_PHY 0x3
-#define NVM_CFG1_PORT_PORT_TYPE_MODULE_SLAVE 0x4
-
- u32 mnm_10g_cap;
- u32 mnm_10g_ctrl;
- u32 mnm_10g_misc;
- u32 mnm_25g_cap;
- u32 mnm_25g_ctrl;
- u32 mnm_25g_misc;
- u32 mnm_40g_cap;
- u32 mnm_40g_ctrl;
- u32 mnm_40g_misc;
- u32 mnm_50g_cap;
- u32 mnm_50g_ctrl;
- u32 mnm_50g_misc;
- u32 mnm_100g_cap;
- u32 mnm_100g_ctrl;
- u32 mnm_100g_misc;
-
- u32 temperature;
- u32 ext_phy_cfg1;
-
- u32 extended_speed;
-#define NVM_CFG1_PORT_EXTENDED_SPEED_MASK 0x0000ffff
-#define NVM_CFG1_PORT_EXTENDED_SPEED_OFFSET 0
-#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_AN 0x1
-#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_1G 0x2
-#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_10G 0x4
-#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_20G 0x8
-#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_25G 0x10
-#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_40G 0x20
-#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_50G_R 0x40
-#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_50G_R2 0x80
-#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_R2 0x100
-#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_R4 0x200
-#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_P4 0x400
-#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_MASK 0xffff0000
-#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_OFFSET 16
-#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_RESERVED 0x1
-#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_1G 0x2
-#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_10G 0x4
-#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_20G 0x8
-#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_25G 0x10
-#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_40G 0x20
-#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_50G_R 0x40
-#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_50G_R2 0x80
-#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_R2 0x100
-#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_R4 0x200
-#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_P4 0x400
-
- u32 extended_fec_mode;
-
- u32 reserved[112];
-};
-
-struct nvm_cfg1_func {
- struct nvm_cfg_mac_address mac_address;
- u32 rsrv1;
- u32 rsrv2;
- u32 device_id;
- u32 cmn_cfg;
- u32 pci_cfg;
- struct nvm_cfg_mac_address fcoe_node_wwn_mac_addr;
- struct nvm_cfg_mac_address fcoe_port_wwn_mac_addr;
- u32 preboot_generic_cfg;
- u32 reserved[8];
-};
-
-struct nvm_cfg1 {
- struct nvm_cfg1_glob glob;
- struct nvm_cfg1_path path[MCP_GLOB_PATH_MAX];
- struct nvm_cfg1_port port[MCP_GLOB_PORT_MAX];
- struct nvm_cfg1_func func[MCP_GLOB_FUNC_MAX];
-};
-
-enum spad_sections {
- SPAD_SECTION_TRACE,
- SPAD_SECTION_NVM_CFG,
- SPAD_SECTION_PUBLIC,
- SPAD_SECTION_PRIVATE,
- SPAD_SECTION_MAX
-};
-
-#define MCP_TRACE_SIZE 2048 /* 2kb */
-
-/* This section is located at a fixed location in the beginning of the
- * scratchpad, to ensure that the MCP trace is not run over during MFW upgrade.
- * All the rest of data has a floating location which differs from version to
- * version, and is pointed by the mcp_meta_data below.
- * Moreover, the spad_layout section is part of the MFW firmware, and is loaded
- * with it from nvram in order to clear this portion.
- */
-struct static_init {
- u32 num_sections;
- offsize_t sections[SPAD_SECTION_MAX];
-#define SECTION(_sec_) (*((offsize_t *)(STRUCT_OFFSET(sections[_sec_]))))
-
- struct mcp_trace trace;
-#define MCP_TRACE_P ((struct mcp_trace *)(STRUCT_OFFSET(trace)))
- u8 trace_buffer[MCP_TRACE_SIZE];
-#define MCP_TRACE_BUF ((u8 *)(STRUCT_OFFSET(trace_buffer)))
- /* running_mfw has the same definition as in nvm_map.h.
- * This bit indicate both the running dir, and the running bundle.
- * It is set once when the LIM is loaded.
- */
- u32 running_mfw;
-#define RUNNING_MFW (*((u32 *)(STRUCT_OFFSET(running_mfw))))
- u32 build_time;
-#define MFW_BUILD_TIME (*((u32 *)(STRUCT_OFFSET(build_time))))
- u32 reset_type;
-#define RESET_TYPE (*((u32 *)(STRUCT_OFFSET(reset_type))))
- u32 mfw_secure_mode;
-#define MFW_SECURE_MODE (*((u32 *)(STRUCT_OFFSET(mfw_secure_mode))))
- u16 pme_status_pf_bitmap;
-#define PME_STATUS_PF_BITMAP (*((u16 *)(STRUCT_OFFSET(pme_status_pf_bitmap))))
- u16 pme_enable_pf_bitmap;
-#define PME_ENABLE_PF_BITMAP (*((u16 *)(STRUCT_OFFSET(pme_enable_pf_bitmap))))
- u32 mim_nvm_addr;
- u32 mim_start_addr;
- u32 ah_pcie_link_params;
-#define AH_PCIE_LINK_PARAMS_LINK_SPEED_MASK (0x000000ff)
-#define AH_PCIE_LINK_PARAMS_LINK_SPEED_SHIFT (0)
-#define AH_PCIE_LINK_PARAMS_LINK_WIDTH_MASK (0x0000ff00)
-#define AH_PCIE_LINK_PARAMS_LINK_WIDTH_SHIFT (8)
-#define AH_PCIE_LINK_PARAMS_ASPM_MODE_MASK (0x00ff0000)
-#define AH_PCIE_LINK_PARAMS_ASPM_MODE_SHIFT (16)
-#define AH_PCIE_LINK_PARAMS_ASPM_CAP_MASK (0xff000000)
-#define AH_PCIE_LINK_PARAMS_ASPM_CAP_SHIFT (24)
-#define AH_PCIE_LINK_PARAMS (*((u32 *)(STRUCT_OFFSET(ah_pcie_link_params))))
-
- u32 rsrv_persist[5]; /* Persist reserved for MFW upgrades */
-};
-
-#define NVM_MAGIC_VALUE 0x669955aa
-
-enum nvm_image_type {
- NVM_TYPE_TIM1 = 0x01,
- NVM_TYPE_TIM2 = 0x02,
- NVM_TYPE_MIM1 = 0x03,
- NVM_TYPE_MIM2 = 0x04,
- NVM_TYPE_MBA = 0x05,
- NVM_TYPE_MODULES_PN = 0x06,
- NVM_TYPE_VPD = 0x07,
- NVM_TYPE_MFW_TRACE1 = 0x08,
- NVM_TYPE_MFW_TRACE2 = 0x09,
- NVM_TYPE_NVM_CFG1 = 0x0a,
- NVM_TYPE_L2B = 0x0b,
- NVM_TYPE_DIR1 = 0x0c,
- NVM_TYPE_EAGLE_FW1 = 0x0d,
- NVM_TYPE_FALCON_FW1 = 0x0e,
- NVM_TYPE_PCIE_FW1 = 0x0f,
- NVM_TYPE_HW_SET = 0x10,
- NVM_TYPE_LIM = 0x11,
- NVM_TYPE_AVS_FW1 = 0x12,
- NVM_TYPE_DIR2 = 0x13,
- NVM_TYPE_CCM = 0x14,
- NVM_TYPE_EAGLE_FW2 = 0x15,
- NVM_TYPE_FALCON_FW2 = 0x16,
- NVM_TYPE_PCIE_FW2 = 0x17,
- NVM_TYPE_AVS_FW2 = 0x18,
- NVM_TYPE_INIT_HW = 0x19,
- NVM_TYPE_DEFAULT_CFG = 0x1a,
- NVM_TYPE_MDUMP = 0x1b,
- NVM_TYPE_META = 0x1c,
- NVM_TYPE_ISCSI_CFG = 0x1d,
- NVM_TYPE_FCOE_CFG = 0x1f,
- NVM_TYPE_ETH_PHY_FW1 = 0x20,
- NVM_TYPE_ETH_PHY_FW2 = 0x21,
- NVM_TYPE_BDN = 0x22,
- NVM_TYPE_8485X_PHY_FW = 0x23,
- NVM_TYPE_PUB_KEY = 0x24,
- NVM_TYPE_RECOVERY = 0x25,
- NVM_TYPE_PLDM = 0x26,
- NVM_TYPE_UPK1 = 0x27,
- NVM_TYPE_UPK2 = 0x28,
- NVM_TYPE_MASTER_KC = 0x29,
- NVM_TYPE_BACKUP_KC = 0x2a,
- NVM_TYPE_HW_DUMP = 0x2b,
- NVM_TYPE_HW_DUMP_OUT = 0x2c,
- NVM_TYPE_BIN_NVM_META = 0x30,
- NVM_TYPE_ROM_TEST = 0xf0,
- NVM_TYPE_88X33X0_PHY_FW = 0x31,
- NVM_TYPE_88X33X0_PHY_SLAVE_FW = 0x32,
- NVM_TYPE_MAX,
-};
-
-#define DIR_ID_1 (0)
-
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.h b/drivers/net/ethernet/qlogic/qed/qed_hw.h
index 2734f49956f7..e535983ce21b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hw.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hw.h
@@ -53,85 +53,94 @@ enum _dmae_cmd_crc_mask {
#define DMAE_MAX_CLIENTS 32
/**
- * @brief qed_gtt_init - Initialize GTT windows
+ * qed_gtt_init(): Initialize GTT windows.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
+ *
+ * Return: Void.
*/
void qed_gtt_init(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_ptt_invalidate - Forces all ptt entries to be re-configured
+ * qed_ptt_invalidate(): Forces all ptt entries to be re-configured
+ *
+ * @p_hwfn: HW device data.
*
- * @param p_hwfn
+ * Return: Void.
*/
void qed_ptt_invalidate(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_ptt_pool_alloc - Allocate and initialize PTT pool
+ * qed_ptt_pool_alloc(): Allocate and initialize PTT pool.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @return struct _qed_status - success (0), negative - error.
+ * Return: struct _qed_status - success (0), negative - error.
*/
int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_ptt_pool_free -
+ * qed_ptt_pool_free(): Free PTT pool.
+ *
+ * @p_hwfn: HW device data.
*
- * @param p_hwfn
+ * Return: Void.
*/
void qed_ptt_pool_free(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_ptt_get_hw_addr - Get PTT's GRC/HW address
+ * qed_ptt_get_hw_addr(): Get PTT's GRC/HW address.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt
*
- * @return u32
+ * Return: u32.
*/
u32 qed_ptt_get_hw_addr(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
- * @brief qed_ptt_get_bar_addr - Get PPT's external BAR address
+ * qed_ptt_get_bar_addr(): Get PPT's external BAR address.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_ptt: P_ptt
*
- * @return u32
+ * Return: u32.
*/
u32 qed_ptt_get_bar_addr(struct qed_ptt *p_ptt);
/**
- * @brief qed_ptt_set_win - Set PTT Window's GRC BAR address
+ * qed_ptt_set_win(): Set PTT Window's GRC BAR address
*
- * @param p_hwfn
- * @param new_hw_addr
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @new_hw_addr: New HW address.
+ * @p_ptt: P_Ptt
+ *
+ * Return: Void.
*/
void qed_ptt_set_win(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 new_hw_addr);
/**
- * @brief qed_get_reserved_ptt - Get a specific reserved PTT
+ * qed_get_reserved_ptt(): Get a specific reserved PTT.
*
- * @param p_hwfn
- * @param ptt_idx
+ * @p_hwfn: HW device data.
+ * @ptt_idx: Ptt Index.
*
- * @return struct qed_ptt *
+ * Return: struct qed_ptt *.
*/
struct qed_ptt *qed_get_reserved_ptt(struct qed_hwfn *p_hwfn,
enum reserved_ptts ptt_idx);
/**
- * @brief qed_wr - Write value to BAR using the given ptt
+ * qed_wr(): Write value to BAR using the given ptt.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @val: Val.
+ * @hw_addr: HW address
*
- * @param p_hwfn
- * @param p_ptt
- * @param val
- * @param hw_addr
+ * Return: Void.
*/
void qed_wr(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -139,26 +148,28 @@ void qed_wr(struct qed_hwfn *p_hwfn,
u32 val);
/**
- * @brief qed_rd - Read value from BAR using the given ptt
+ * qed_rd(): Read value from BAR using the given ptt.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @hw_addr: HW address
*
- * @param p_hwfn
- * @param p_ptt
- * @param val
- * @param hw_addr
+ * Return: Void.
*/
u32 qed_rd(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 hw_addr);
/**
- * @brief qed_memcpy_from - copy n bytes from BAR using the given
- * ptt
- *
- * @param p_hwfn
- * @param p_ptt
- * @param dest
- * @param hw_addr
- * @param n
+ * qed_memcpy_from(): Copy n bytes from BAR using the given ptt.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @dest: Destination.
+ * @hw_addr: HW address.
+ * @n: N
+ *
+ * Return: Void.
*/
void qed_memcpy_from(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -167,14 +178,15 @@ void qed_memcpy_from(struct qed_hwfn *p_hwfn,
size_t n);
/**
- * @brief qed_memcpy_to - copy n bytes to BAR using the given
- * ptt
- *
- * @param p_hwfn
- * @param p_ptt
- * @param hw_addr
- * @param src
- * @param n
+ * qed_memcpy_to(): Copy n bytes to BAR using the given ptt
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @hw_addr: HW address.
+ * @src: Source.
+ * @n: N
+ *
+ * Return: Void.
*/
void qed_memcpy_to(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -182,83 +194,97 @@ void qed_memcpy_to(struct qed_hwfn *p_hwfn,
void *src,
size_t n);
/**
- * @brief qed_fid_pretend - pretend to another function when
- * accessing the ptt window. There is no way to unpretend
- * a function. The only way to cancel a pretend is to
- * pretend back to the original function.
- *
- * @param p_hwfn
- * @param p_ptt
- * @param fid - fid field of pxp_pretend structure. Can contain
- * either pf / vf, port/path fields are don't care.
+ * qed_fid_pretend(): pretend to another function when
+ * accessing the ptt window. There is no way to unpretend
+ * a function. The only way to cancel a pretend is to
+ * pretend back to the original function.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @fid: fid field of pxp_pretend structure. Can contain
+ * either pf / vf, port/path fields are don't care.
+ *
+ * Return: Void.
*/
void qed_fid_pretend(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u16 fid);
/**
- * @brief qed_port_pretend - pretend to another port when
- * accessing the ptt window
+ * qed_port_pretend(): Pretend to another port when accessing the ptt window
*
- * @param p_hwfn
- * @param p_ptt
- * @param port_id - the port to pretend to
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @port_id: The port to pretend to
+ *
+ * Return: Void.
*/
void qed_port_pretend(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u8 port_id);
/**
- * @brief qed_port_unpretend - cancel any previously set port
- * pretend
+ * qed_port_unpretend(): Cancel any previously set port pretend
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
*
- * @param p_hwfn
- * @param p_ptt
+ * Return: Void.
*/
void qed_port_unpretend(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
- * @brief qed_port_fid_pretend - pretend to another port and another function
- * when accessing the ptt window
+ * qed_port_fid_pretend(): Pretend to another port and another function
+ * when accessing the ptt window
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @port_id: The port to pretend to
+ * @fid: fid field of pxp_pretend structure. Can contain either pf / vf.
*
- * @param p_hwfn
- * @param p_ptt
- * @param port_id - the port to pretend to
- * @param fid - fid field of pxp_pretend structure. Can contain either pf / vf.
+ * Return: Void.
*/
void qed_port_fid_pretend(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u8 port_id, u16 fid);
/**
- * @brief qed_vfid_to_concrete - build a concrete FID for a
- * given VF ID
+ * qed_vfid_to_concrete(): Build a concrete FID for a given VF ID
*
- * @param p_hwfn
- * @param p_ptt
- * @param vfid
+ * @p_hwfn: HW device data.
+ * @vfid: VFID.
+ *
+ * Return: Void.
*/
u32 qed_vfid_to_concrete(struct qed_hwfn *p_hwfn, u8 vfid);
/**
- * @brief qed_dmae_idx_to_go_cmd - map the idx to dmae cmd
- * this is declared here since other files will require it.
- * @param idx
+ * qed_dmae_idx_to_go_cmd(): Map the idx to dmae cmd
+ * this is declared here since other files will require it.
+ *
+ * @idx: Index
+ *
+ * Return: Void.
*/
u32 qed_dmae_idx_to_go_cmd(u8 idx);
/**
- * @brief qed_dmae_info_alloc - Init the dmae_info structure
- * which is part of p_hwfn.
- * @param p_hwfn
+ * qed_dmae_info_alloc(): Init the dmae_info structure
+ * which is part of p_hwfn.
+ *
+ * @p_hwfn: HW device data.
+ *
+ * Return: Int.
*/
int qed_dmae_info_alloc(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_dmae_info_free - Free the dmae_info structure
- * which is part of p_hwfn
+ * qed_dmae_info_free(): Free the dmae_info structure
+ * which is part of p_hwfn.
+ *
+ * @p_hwfn: HW device data.
*
- * @param p_hwfn
+ * Return: Void.
*/
void qed_dmae_info_free(struct qed_hwfn *p_hwfn);
@@ -292,14 +318,16 @@ int qed_dmae_sanity(struct qed_hwfn *p_hwfn,
#define QED_HW_ERR_MAX_STR_SIZE 256
/**
- * @brief qed_hw_err_notify - Notify upper layer driver and management FW
- * about a HW error.
- *
- * @param p_hwfn
- * @param p_ptt
- * @param err_type
- * @param fmt - debug data buffer to send to the MFW
- * @param ... - buffer format args
+ * qed_hw_err_notify(): Notify upper layer driver and management FW
+ * about a HW error.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @err_type: Err Type.
+ * @fmt: Debug data buffer to send to the MFW
+ * @...: buffer format args
+ *
+ * Return void.
*/
void __printf(4, 5) __cold qed_hw_err_notify(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
index ea888a2c6ddb..321c43408153 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_fw_funcs.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
/* QLogic qed NIC Driver
* Copyright (c) 2015-2017 QLogic Corporation
- * Copyright (c) 2019-2020 Marvell International Ltd.
+ * Copyright (c) 2019-2021 Marvell International Ltd.
*/
#include <linux/types.h>
@@ -13,17 +13,18 @@
#include "qed_hsi.h"
#include "qed_hw.h"
#include "qed_init_ops.h"
+#include "qed_iro_hsi.h"
#include "qed_reg_addr.h"
-#define CDU_VALIDATION_DEFAULT_CFG 61
+#define CDU_VALIDATION_DEFAULT_CFG CDU_CONTEXT_VALIDATION_DEFAULT_CFG
-static u16 con_region_offsets[3][NUM_OF_CONNECTION_TYPES_E4] = {
+static u16 con_region_offsets[3][NUM_OF_CONNECTION_TYPES] = {
{400, 336, 352, 368, 304, 384, 416, 352}, /* region 3 offsets */
{528, 496, 416, 512, 448, 512, 544, 480}, /* region 4 offsets */
{608, 544, 496, 576, 576, 592, 624, 560} /* region 5 offsets */
};
-static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
+static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES] = {
{240, 240, 112, 0, 0, 0, 0, 96} /* region 1 offsets */
};
@@ -42,25 +43,49 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
#define QM_BYPASS_EN 1
#define QM_BYTE_CRD_EN 1
+/* Initial VOQ byte credit */
+#define QM_INITIAL_VOQ_BYTE_CRD 98304
/* Other PQ constants */
#define QM_OTHER_PQS_PER_PF 4
+/* VOQ constants */
+#define MAX_NUM_VOQS (MAX_NUM_PORTS_K2 * NUM_TCS_4PORT_K2)
+#define VOQS_BIT_MASK (BIT(MAX_NUM_VOQS) - 1)
+
/* WFQ constants */
-/* Upper bound in MB, 10 * burst size of 1ms in 50Gbps */
-#define QM_WFQ_UPPER_BOUND 62500000
+/* PF WFQ increment value, 0x9000 = 4*9*1024 */
+#define QM_PF_WFQ_INC_VAL(weight) ((weight) * 0x9000)
+
+/* PF WFQ Upper bound, in MB, 10 * burst size of 1ms in 50Gbps */
+#define QM_PF_WFQ_UPPER_BOUND 62500000
+
+/* PF WFQ max increment value, 0.7 * upper bound */
+#define QM_PF_WFQ_MAX_INC_VAL ((QM_PF_WFQ_UPPER_BOUND * 7) / 10)
+
+/* Number of VOQs in E5 PF WFQ credit register (QmWfqCrd) */
+#define QM_PF_WFQ_CRD_E5_NUM_VOQS 16
+
+/* VP WFQ increment value */
+#define QM_VP_WFQ_INC_VAL(weight) ((weight) * QM_VP_WFQ_MIN_INC_VAL)
-/* Bit of VOQ in WFQ VP PQ map */
-#define QM_WFQ_VP_PQ_VOQ_SHIFT 0
+/* VP WFQ min increment value */
+#define QM_VP_WFQ_MIN_INC_VAL 10800
-/* Bit of PF in WFQ VP PQ map */
-#define QM_WFQ_VP_PQ_PF_E4_SHIFT 5
+/* VP WFQ max increment value, 2^30 */
+#define QM_VP_WFQ_MAX_INC_VAL 0x40000000
-/* 0x9000 = 4*9*1024 */
-#define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000)
+/* VP WFQ bypass threshold */
+#define QM_VP_WFQ_BYPASS_THRESH (QM_VP_WFQ_MIN_INC_VAL - 100)
-/* Max WFQ increment value is 0.7 * upper bound */
-#define QM_WFQ_MAX_INC_VAL ((QM_WFQ_UPPER_BOUND * 7) / 10)
+/* VP RL credit task cost */
+#define QM_VP_RL_CRD_TASK_COST 9700
+
+/* Bit of VOQ in VP WFQ PQ map */
+#define QM_VP_WFQ_PQ_VOQ_SHIFT 0
+
+/* Bit of PF in VP WFQ PQ map */
+#define QM_VP_WFQ_PQ_PF_SHIFT 5
/* RL constants */
@@ -71,12 +96,13 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
#define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD)
/* RL increment value - rate is specified in mbps */
-#define QM_RL_INC_VAL(rate) ({ \
- typeof(rate) __rate = (rate); \
- max_t(u32, \
- (u32)(((__rate ? __rate : 1000000) * QM_RL_PERIOD * 101) / \
- (8 * 100)), \
- 1); })
+#define QM_RL_INC_VAL(rate) ({ \
+ typeof(rate) __rate = (rate); \
+ max_t(u32, \
+ (u32)(((__rate ? __rate : \
+ 100000) * \
+ QM_RL_PERIOD * \
+ 101) / (8 * 100)), 1); })
/* PF RL Upper bound is set to 10 * burst size of 1ms in 50Gbps */
#define QM_PF_RL_UPPER_BOUND 62500000
@@ -84,16 +110,13 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
/* Max PF RL increment value is 0.7 * upper bound */
#define QM_PF_RL_MAX_INC_VAL ((QM_PF_RL_UPPER_BOUND * 7) / 10)
-/* Vport RL Upper bound, link speed is in Mpbs */
-#define QM_VP_RL_UPPER_BOUND(speed) ((u32)max_t(u32, \
- QM_RL_INC_VAL(speed), \
- 9700 + 1000))
-
-/* Max Vport RL increment value is the Vport RL upper bound */
-#define QM_VP_RL_MAX_INC_VAL(speed) QM_VP_RL_UPPER_BOUND(speed)
-
-/* Vport RL credit threshold in case of QM bypass */
-#define QM_VP_RL_BYPASS_THRESH_SPEED (QM_VP_RL_UPPER_BOUND(10000) - 1)
+/* QCN RL Upper bound, speed is in Mpbs */
+#define QM_GLOBAL_RL_UPPER_BOUND(speed) ((u32)max_t( \
+ u32, \
+ (u32)(((speed) * \
+ QM_RL_PERIOD * 101) / (8 * 100)), \
+ QM_VP_RL_CRD_TASK_COST \
+ + 1000))
/* AFullOprtnstcCrdMask constants */
#define QM_OPPOR_LINE_VOQ_DEF 1
@@ -156,20 +179,20 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
cmd ## _ ## field, \
value)
-#define QM_INIT_TX_PQ_MAP(p_hwfn, map, chip, pq_id, vp_pq_id, rl_valid, \
+#define QM_INIT_TX_PQ_MAP(p_hwfn, map, pq_id, vp_pq_id, rl_valid, \
rl_id, ext_voq, wrr) \
do { \
u32 __reg = 0; \
\
BUILD_BUG_ON(sizeof((map).reg) != sizeof(__reg)); \
- \
- SET_FIELD(__reg, QM_RF_PQ_MAP_##chip##_PQ_VALID, 1); \
- SET_FIELD(__reg, QM_RF_PQ_MAP_##chip##_RL_VALID, \
+ memset(&(map), 0, sizeof(map)); \
+ SET_FIELD(__reg, QM_RF_PQ_MAP_PQ_VALID, 1); \
+ SET_FIELD(__reg, QM_RF_PQ_MAP_RL_VALID, \
!!(rl_valid)); \
- SET_FIELD(__reg, QM_RF_PQ_MAP_##chip##_VP_PQ_ID, (vp_pq_id)); \
- SET_FIELD(__reg, QM_RF_PQ_MAP_##chip##_RL_ID, (rl_id)); \
- SET_FIELD(__reg, QM_RF_PQ_MAP_##chip##_VOQ, (ext_voq)); \
- SET_FIELD(__reg, QM_RF_PQ_MAP_##chip##_WRR_WEIGHT_GROUP, \
+ SET_FIELD(__reg, QM_RF_PQ_MAP_VP_PQ_ID, (vp_pq_id)); \
+ SET_FIELD(__reg, QM_RF_PQ_MAP_RL_ID, (rl_id)); \
+ SET_FIELD(__reg, QM_RF_PQ_MAP_VOQ, (ext_voq)); \
+ SET_FIELD(__reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP, \
(wrr)); \
\
STORE_RT_REG((p_hwfn), QM_REG_TXPQMAP_RT_OFFSET + (pq_id), \
@@ -184,8 +207,8 @@ static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
(((rl) >> 8) << 9))
#define PQ_INFO_RAM_GRC_ADDRESS(pq_id) \
- XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + \
- XSTORM_PQ_INFO_OFFSET(pq_id)
+ (XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + \
+ XSTORM_PQ_INFO_OFFSET(pq_id))
/******************** INTERNAL IMPLEMENTATION *********************/
@@ -204,7 +227,7 @@ static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, bool pf_rl_en)
{
STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0);
if (pf_rl_en) {
- u8 num_ext_voqs = MAX_NUM_VOQS_E4;
+ u8 num_ext_voqs = MAX_NUM_VOQS;
u64 voq_bit_mask = ((u64)1 << num_ext_voqs) - 1;
/* Enable RLs for all VOQs */
@@ -236,7 +259,7 @@ static void qed_enable_pf_wfq(struct qed_hwfn *p_hwfn, bool pf_wfq_en)
if (pf_wfq_en && QM_BYPASS_EN)
STORE_RT_REG(p_hwfn,
QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET,
- QM_WFQ_UPPER_BOUND);
+ QM_PF_WFQ_UPPER_BOUND);
}
/* Prepare global RL enable/disable runtime init values */
@@ -257,7 +280,7 @@ static void qed_enable_global_rl(struct qed_hwfn *p_hwfn, bool global_rl_en)
if (QM_BYPASS_EN)
STORE_RT_REG(p_hwfn,
QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET,
- QM_VP_RL_BYPASS_THRESH_SPEED);
+ QM_GLOBAL_RL_UPPER_BOUND(10000) - 1);
}
}
@@ -271,7 +294,7 @@ static void qed_enable_vport_wfq(struct qed_hwfn *p_hwfn, bool vport_wfq_en)
if (vport_wfq_en && QM_BYPASS_EN)
STORE_RT_REG(p_hwfn,
QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET,
- QM_WFQ_UPPER_BOUND);
+ QM_VP_WFQ_BYPASS_THRESH);
}
/* Prepare runtime init values to allocate PBF command queue lines for
@@ -291,14 +314,14 @@ static void qed_cmdq_lines_voq_rt_init(struct qed_hwfn *p_hwfn,
}
/* Prepare runtime init values to allocate PBF command queue lines. */
-static void qed_cmdq_lines_rt_init(
- struct qed_hwfn *p_hwfn,
- u8 max_ports_per_engine,
- u8 max_phys_tcs_per_port,
- struct init_qm_port_params port_params[MAX_NUM_PORTS])
+static void
+qed_cmdq_lines_rt_init(struct qed_hwfn *p_hwfn,
+ u8 max_ports_per_engine,
+ u8 max_phys_tcs_per_port,
+ struct init_qm_port_params port_params[MAX_NUM_PORTS])
{
u8 tc, ext_voq, port_id, num_tcs_in_port;
- u8 num_ext_voqs = MAX_NUM_VOQS_E4;
+ u8 num_ext_voqs = MAX_NUM_VOQS;
/* Clear PBF lines of all VOQs */
for (ext_voq = 0; ext_voq < num_ext_voqs; ext_voq++)
@@ -364,11 +387,11 @@ static void qed_cmdq_lines_rt_init(
* - No optimization for lossy TC (all are considered lossless). Shared space
* is not enabled and allocated for each TC.
*/
-static void qed_btb_blocks_rt_init(
- struct qed_hwfn *p_hwfn,
- u8 max_ports_per_engine,
- u8 max_phys_tcs_per_port,
- struct init_qm_port_params port_params[MAX_NUM_PORTS])
+static void
+qed_btb_blocks_rt_init(struct qed_hwfn *p_hwfn,
+ u8 max_ports_per_engine,
+ u8 max_phys_tcs_per_port,
+ struct init_qm_port_params port_params[MAX_NUM_PORTS])
{
u32 usable_blocks, pure_lb_blocks, phys_blocks;
u8 tc, ext_voq, port_id, num_tcs_in_port;
@@ -428,7 +451,7 @@ static void qed_btb_blocks_rt_init(
*/
static int qed_global_rl_rt_init(struct qed_hwfn *p_hwfn)
{
- u32 upper_bound = QM_VP_RL_UPPER_BOUND(QM_MAX_LINK_SPEED) |
+ u32 upper_bound = QM_GLOBAL_RL_UPPER_BOUND(QM_MAX_LINK_SPEED) |
(u32)QM_RL_CRD_REG_SIGN_BIT;
u32 inc_val;
u16 rl_id;
@@ -450,11 +473,73 @@ static int qed_global_rl_rt_init(struct qed_hwfn *p_hwfn)
return 0;
}
+/* Returns the upper bound for the specified Vport RL parameters.
+ * link_speed is in Mbps.
+ * Returns 0 in case of error.
+ */
+static u32 qed_get_vport_rl_upper_bound(enum init_qm_rl_type vport_rl_type,
+ u32 link_speed)
+{
+ switch (vport_rl_type) {
+ case QM_RL_TYPE_NORMAL:
+ return QM_INITIAL_VOQ_BYTE_CRD;
+ case QM_RL_TYPE_QCN:
+ return QM_GLOBAL_RL_UPPER_BOUND(link_speed);
+ default:
+ return 0;
+ }
+}
+
+/* Prepare VPORT RL runtime init values.
+ * Return -1 on error.
+ */
+static int qed_vport_rl_rt_init(struct qed_hwfn *p_hwfn,
+ u16 start_rl,
+ u16 num_rls,
+ u32 link_speed,
+ struct init_qm_rl_params *rl_params)
+{
+ u16 i, rl_id;
+
+ if (num_rls && start_rl + num_rls >= MAX_QM_GLOBAL_RLS) {
+ DP_NOTICE(p_hwfn, "Invalid rate limiter configuration\n");
+ return -1;
+ }
+
+ /* Go over all PF VPORTs */
+ for (i = 0, rl_id = start_rl; i < num_rls; i++, rl_id++) {
+ u32 upper_bound, inc_val;
+
+ upper_bound =
+ qed_get_vport_rl_upper_bound((enum init_qm_rl_type)
+ rl_params[i].vport_rl_type,
+ link_speed);
+
+ inc_val =
+ QM_RL_INC_VAL(rl_params[i].vport_rl ?
+ rl_params[i].vport_rl : link_speed);
+ if (inc_val > upper_bound) {
+ DP_NOTICE(p_hwfn,
+ "Invalid RL rate - limit configuration\n");
+ return -1;
+ }
+
+ STORE_RT_REG(p_hwfn, QM_REG_RLGLBLCRD_RT_OFFSET + rl_id,
+ (u32)QM_RL_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn, QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + rl_id,
+ upper_bound | (u32)QM_RL_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn, QM_REG_RLGLBLINCVAL_RT_OFFSET + rl_id,
+ inc_val);
+ }
+
+ return 0;
+}
+
/* Prepare Tx PQ mapping runtime init values for the specified PF */
-static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt,
- struct qed_qm_pf_rt_init_params *p_params,
- u32 base_mem_addr_4kb)
+static int qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt,
+ struct qed_qm_pf_rt_init_params *p_params,
+ u32 base_mem_addr_4kb)
{
u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
struct init_qm_vport_params *vport_params = p_params->vport_params;
@@ -487,7 +572,7 @@ static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn,
/* Go over all Tx PQs */
for (i = 0, pq_id = p_params->start_pq; i < num_pqs; i++, pq_id++) {
u16 *p_first_tx_pq_id, vport_id_in_pf;
- struct qm_rf_pq_map_e4 tx_pq_map;
+ struct qm_rf_pq_map tx_pq_map;
u8 tc_id = pq_params[i].tc_id;
bool is_vf_pq;
u8 ext_voq;
@@ -504,8 +589,8 @@ static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn,
&vport_params[vport_id_in_pf].first_tx_pq_id[tc_id];
if (*p_first_tx_pq_id == QM_INVALID_PQ_ID) {
u32 map_val =
- (ext_voq << QM_WFQ_VP_PQ_VOQ_SHIFT) |
- (p_params->pf_id << QM_WFQ_VP_PQ_PF_E4_SHIFT);
+ (ext_voq << QM_VP_WFQ_PQ_VOQ_SHIFT) |
+ (p_params->pf_id << QM_VP_WFQ_PQ_PF_SHIFT);
/* Create new VP PQ */
*p_first_tx_pq_id = pq_id;
@@ -520,7 +605,6 @@ static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn,
/* Prepare PQ map entry */
QM_INIT_TX_PQ_MAP(p_hwfn,
tx_pq_map,
- E4,
pq_id,
*p_first_tx_pq_id,
pq_params[i].rl_valid,
@@ -570,6 +654,8 @@ static void qed_tx_pq_map_rt_init(struct qed_hwfn *p_hwfn,
STORE_RT_REG(p_hwfn,
QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + i,
tx_pq_vf_mask[i]);
+
+ return 0;
}
/* Prepare Other PQ mapping runtime init values for the specified PF */
@@ -620,7 +706,6 @@ static void qed_other_pq_map_rt_init(struct qed_hwfn *p_hwfn,
* Return -1 on error.
*/
static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
-
struct qed_qm_pf_rt_init_params *p_params)
{
u16 num_tx_pqs = p_params->num_pf_pqs + p_params->num_vf_pqs;
@@ -629,8 +714,8 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
u8 ext_voq;
u16 i;
- inc_val = QM_WFQ_INC_VAL(p_params->pf_wfq);
- if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
+ inc_val = QM_PF_WFQ_INC_VAL(p_params->pf_wfq);
+ if (!inc_val || inc_val > QM_PF_WFQ_MAX_INC_VAL) {
DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration\n");
return -1;
}
@@ -652,7 +737,7 @@ static int qed_pf_wfq_rt_init(struct qed_hwfn *p_hwfn,
STORE_RT_REG(p_hwfn,
QM_REG_WFQPFUPPERBOUND_RT_OFFSET + p_params->pf_id,
- QM_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT);
+ QM_PF_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT);
STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + p_params->pf_id,
inc_val);
@@ -689,34 +774,38 @@ static int qed_vp_wfq_rt_init(struct qed_hwfn *p_hwfn,
u16 num_vports,
struct init_qm_vport_params *vport_params)
{
- u16 vport_pq_id, i;
+ u16 vport_pq_id, wfq, i;
u32 inc_val;
u8 tc;
/* Go over all PF VPORTs */
for (i = 0; i < num_vports; i++) {
- if (!vport_params[i].wfq)
- continue;
-
- inc_val = QM_WFQ_INC_VAL(vport_params[i].wfq);
- if (inc_val > QM_WFQ_MAX_INC_VAL) {
- DP_NOTICE(p_hwfn,
- "Invalid VPORT WFQ weight configuration\n");
- return -1;
- }
-
/* Each VPORT can have several VPORT PQ IDs for various TCs */
for (tc = 0; tc < NUM_OF_TCS; tc++) {
+ /* Check if VPORT/TC is valid */
vport_pq_id = vport_params[i].first_tx_pq_id[tc];
- if (vport_pq_id != QM_INVALID_PQ_ID) {
- STORE_RT_REG(p_hwfn,
- QM_REG_WFQVPCRD_RT_OFFSET +
- vport_pq_id,
- (u32)QM_WFQ_CRD_REG_SIGN_BIT);
- STORE_RT_REG(p_hwfn,
- QM_REG_WFQVPWEIGHT_RT_OFFSET +
- vport_pq_id, inc_val);
+ if (vport_pq_id == QM_INVALID_PQ_ID)
+ continue;
+
+ /* Find WFQ weight (per VPORT or per VPORT+TC) */
+ wfq = vport_params[i].wfq;
+ wfq = wfq ? wfq : vport_params[i].tc_wfq[tc];
+ inc_val = QM_VP_WFQ_INC_VAL(wfq);
+ if (inc_val > QM_VP_WFQ_MAX_INC_VAL) {
+ DP_NOTICE(p_hwfn,
+ "Invalid VPORT WFQ weight configuration\n");
+ return -1;
}
+
+ /* Config registers */
+ STORE_RT_REG(p_hwfn, QM_REG_WFQVPCRD_RT_OFFSET +
+ vport_pq_id,
+ (u32)QM_WFQ_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn, QM_REG_WFQVPUPPERBOUND_RT_OFFSET +
+ vport_pq_id,
+ inc_val | QM_WFQ_CRD_REG_SIGN_BIT);
+ STORE_RT_REG(p_hwfn, QM_REG_WFQVPWEIGHT_RT_OFFSET +
+ vport_pq_id, inc_val);
}
}
@@ -780,11 +869,14 @@ int qed_qm_common_rt_init(struct qed_hwfn *p_hwfn,
SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_LINEVOQ,
QM_OPPOR_LINE_VOQ_DEF);
SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ, QM_BYTE_CRD_EN);
- SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_PFWFQ, p_params->pf_wfq_en);
- SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_VPWFQ, p_params->vport_wfq_en);
- SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_PFRL, p_params->pf_rl_en);
+ SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_PFWFQ,
+ p_params->pf_wfq_en ? 1 : 0);
+ SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_VPWFQ,
+ p_params->vport_wfq_en ? 1 : 0);
+ SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_PFRL,
+ p_params->pf_rl_en ? 1 : 0);
SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_VPQCNRL,
- p_params->global_rl_en);
+ p_params->global_rl_en ? 1 : 0);
SET_FIELD(mask, QM_RF_OPPORTUNISTIC_MASK_FWPAUSE, QM_OPPOR_FW_STOP_DEF);
SET_FIELD(mask,
QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY, QM_OPPOR_PQ_EMPTY_DEF);
@@ -830,7 +922,6 @@ int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
u16 i;
u8 tc;
-
/* Clear first Tx PQ ID array for each VPORT */
for (i = 0; i < p_params->num_vports; i++)
for (tc = 0; tc < NUM_OF_TCS; tc++)
@@ -843,7 +934,8 @@ int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
p_params->num_tids, 0);
/* Map Tx PQs */
- qed_tx_pq_map_rt_init(p_hwfn, p_ptt, p_params, other_mem_size_4kb);
+ if (qed_tx_pq_map_rt_init(p_hwfn, p_ptt, p_params, other_mem_size_4kb))
+ return -1;
/* Init PF WFQ */
if (p_params->pf_wfq)
@@ -858,15 +950,21 @@ int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn,
if (qed_vp_wfq_rt_init(p_hwfn, p_params->num_vports, vport_params))
return -1;
+ /* Set VPORT RL */
+ if (qed_vport_rl_rt_init(p_hwfn, p_params->start_rl,
+ p_params->num_rls, p_params->link_speed,
+ p_params->rl_params))
+ return -1;
+
return 0;
}
int qed_init_pf_wfq(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u8 pf_id, u16 pf_wfq)
{
- u32 inc_val = QM_WFQ_INC_VAL(pf_wfq);
+ u32 inc_val = QM_PF_WFQ_INC_VAL(pf_wfq);
- if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
+ if (!inc_val || inc_val > QM_PF_WFQ_MAX_INC_VAL) {
DP_NOTICE(p_hwfn, "Invalid PF WFQ weight configuration\n");
return -1;
}
@@ -897,41 +995,66 @@ int qed_init_vport_wfq(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u16 first_tx_pq_id[NUM_OF_TCS], u16 wfq)
{
+ int result = 0;
u16 vport_pq_id;
- u32 inc_val;
u8 tc;
- inc_val = QM_WFQ_INC_VAL(wfq);
- if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
+ for (tc = 0; tc < NUM_OF_TCS && !result; tc++) {
+ vport_pq_id = first_tx_pq_id[tc];
+ if (vport_pq_id != QM_INVALID_PQ_ID)
+ result = qed_init_vport_tc_wfq(p_hwfn, p_ptt,
+ vport_pq_id, wfq);
+ }
+
+ return result;
+}
+
+int qed_init_vport_tc_wfq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
+ u16 first_tx_pq_id, u16 wfq)
+{
+ u32 inc_val;
+
+ if (first_tx_pq_id == QM_INVALID_PQ_ID)
+ return -1;
+
+ inc_val = QM_VP_WFQ_INC_VAL(wfq);
+ if (!inc_val || inc_val > QM_VP_WFQ_MAX_INC_VAL) {
DP_NOTICE(p_hwfn, "Invalid VPORT WFQ configuration.\n");
return -1;
}
- /* A VPORT can have several VPORT PQ IDs for various TCs */
- for (tc = 0; tc < NUM_OF_TCS; tc++) {
- vport_pq_id = first_tx_pq_id[tc];
- if (vport_pq_id != QM_INVALID_PQ_ID)
- qed_wr(p_hwfn,
- p_ptt,
- QM_REG_WFQVPWEIGHT + vport_pq_id * 4, inc_val);
- }
+ qed_wr(p_hwfn, p_ptt, QM_REG_WFQVPCRD + first_tx_pq_id * 4,
+ (u32)QM_WFQ_CRD_REG_SIGN_BIT);
+ qed_wr(p_hwfn, p_ptt, QM_REG_WFQVPUPPERBOUND + first_tx_pq_id * 4,
+ inc_val | QM_WFQ_CRD_REG_SIGN_BIT);
+ qed_wr(p_hwfn, p_ptt, QM_REG_WFQVPWEIGHT + first_tx_pq_id * 4,
+ inc_val);
return 0;
}
int qed_init_global_rl(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u16 rl_id, u32 rate_limit)
+ struct qed_ptt *p_ptt, u16 rl_id, u32 rate_limit,
+ enum init_qm_rl_type vport_rl_type)
{
- u32 inc_val;
+ u32 inc_val, upper_bound;
+ upper_bound =
+ (vport_rl_type ==
+ QM_RL_TYPE_QCN) ? QM_GLOBAL_RL_UPPER_BOUND(QM_MAX_LINK_SPEED) :
+ QM_INITIAL_VOQ_BYTE_CRD;
inc_val = QM_RL_INC_VAL(rate_limit);
- if (inc_val > QM_VP_RL_MAX_INC_VAL(rate_limit)) {
- DP_NOTICE(p_hwfn, "Invalid rate limit configuration.\n");
+ if (inc_val > upper_bound) {
+ DP_NOTICE(p_hwfn, "Invalid VPORT rate limit configuration.\n");
return -1;
}
qed_wr(p_hwfn, p_ptt,
QM_REG_RLGLBLCRD + rl_id * 4, (u32)QM_RL_CRD_REG_SIGN_BIT);
+ qed_wr(p_hwfn,
+ p_ptt,
+ QM_REG_RLGLBLUPPERBOUND + rl_id * 4,
+ upper_bound | (u32)QM_RL_CRD_REG_SIGN_BIT);
qed_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + rl_id * 4, inc_val);
return 0;
@@ -1013,7 +1136,7 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn,
static int qed_dmae_to_grc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
__le32 *p_data, u32 addr, u32 len_in_dwords)
{
- struct qed_dmae_params params = {};
+ struct qed_dmae_params params = { 0 };
u32 *data_cpu;
int rc;
@@ -1066,16 +1189,16 @@ void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn,
/* Update PRS register */
reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
- shift = PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT;
- SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, vxlan_enable);
+ SET_FIELD(reg_val,
+ PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE, vxlan_enable);
qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
if (reg_val) {
reg_val =
- qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2);
+ qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0);
/* Update output only if tunnel blocks not included. */
if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
- qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
+ qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
(u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
}
@@ -1099,18 +1222,20 @@ void qed_set_gre_enable(struct qed_hwfn *p_hwfn,
/* Update PRS register */
reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
- shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT;
- SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, eth_gre_enable);
- shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT;
- SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, ip_gre_enable);
+ SET_FIELD(reg_val,
+ PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE,
+ eth_gre_enable);
+ SET_FIELD(reg_val,
+ PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE,
+ ip_gre_enable);
qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
if (reg_val) {
reg_val =
- qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2);
+ qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0);
/* Update output only if tunnel blocks not included. */
if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
- qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
+ qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
(u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
}
@@ -1148,22 +1273,23 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
bool eth_geneve_enable, bool ip_geneve_enable)
{
u32 reg_val;
- u8 shift;
/* Update PRS register */
reg_val = qed_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
- shift = PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT;
- SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, eth_geneve_enable);
- shift = PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT;
- SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, shift, ip_geneve_enable);
+ SET_FIELD(reg_val,
+ PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE,
+ eth_geneve_enable);
+ SET_FIELD(reg_val,
+ PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE,
+ ip_geneve_enable);
qed_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
if (reg_val) {
reg_val =
- qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2);
+ qed_rd(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0);
/* Update output only if tunnel blocks not included. */
if (reg_val == (u32)PRS_ETH_OUTPUT_FORMAT)
- qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
+ qed_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0,
(u32)PRS_ETH_TUNN_OUTPUT_FORMAT);
}
@@ -1179,16 +1305,16 @@ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn,
/* Update DORQ registers */
qed_wr(p_hwfn,
p_ptt,
- DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2_E5,
+ DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2,
eth_geneve_enable ? 1 : 0);
qed_wr(p_hwfn,
p_ptt,
- DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2_E5,
+ DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2,
ip_geneve_enable ? 1 : 0);
}
#define PRS_ETH_VXLAN_NO_L2_ENABLE_OFFSET 3
-#define PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT -925189872
+#define PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT 0xC8DAB910
void qed_set_vxlan_no_l2_enable(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, bool enable)
@@ -1208,7 +1334,7 @@ void qed_set_vxlan_no_l2_enable(struct qed_hwfn *p_hwfn,
/* update PRS FIC register */
qed_wr(p_hwfn,
p_ptt,
- PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
+ PRS_REG_OUTPUT_FORMAT_4_0,
(u32)PRS_ETH_VXLAN_NO_L2_OUTPUT_FORMAT);
} else {
/* clear VXLAN_NO_L2_ENABLE flag */
@@ -1229,7 +1355,7 @@ void qed_set_vxlan_no_l2_enable(struct qed_hwfn *p_hwfn,
void qed_gft_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id)
{
- struct regpair ram_line = { };
+ struct regpair ram_line = { 0 };
/* Disable gft search for PF */
qed_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0);
@@ -1621,6 +1747,8 @@ struct phys_mem_desc *qed_fw_overlay_mem_alloc(struct qed_hwfn *p_hwfn,
storm_buf_size = GET_FIELD(hdr->data,
FW_OVERLAY_BUF_HDR_BUF_SIZE);
storm_id = GET_FIELD(hdr->data, FW_OVERLAY_BUF_HDR_STORM_ID);
+ if (storm_id >= NUM_STORMS)
+ break;
storm_mem_desc = allocated_mem + storm_id;
storm_mem_desc->size = storm_buf_size * sizeof(u32);
@@ -1645,7 +1773,7 @@ struct phys_mem_desc *qed_fw_overlay_mem_alloc(struct qed_hwfn *p_hwfn,
/* If memory allocation has failed, free all allocated memory */
if (buf_offset < buf_size) {
- qed_fw_overlay_mem_free(p_hwfn, allocated_mem);
+ qed_fw_overlay_mem_free(p_hwfn, &allocated_mem);
return NULL;
}
@@ -1679,16 +1807,16 @@ void qed_fw_overlay_init_ram(struct qed_hwfn *p_hwfn,
}
void qed_fw_overlay_mem_free(struct qed_hwfn *p_hwfn,
- struct phys_mem_desc *fw_overlay_mem)
+ struct phys_mem_desc **fw_overlay_mem)
{
u8 storm_id;
- if (!fw_overlay_mem)
+ if (!fw_overlay_mem || !(*fw_overlay_mem))
return;
for (storm_id = 0; storm_id < NUM_STORMS; storm_id++) {
struct phys_mem_desc *storm_mem_desc =
- (struct phys_mem_desc *)fw_overlay_mem + storm_id;
+ (struct phys_mem_desc *)*fw_overlay_mem + storm_id;
/* Free Storm's physical memory */
if (storm_mem_desc->virt_addr)
@@ -1699,5 +1827,6 @@ void qed_fw_overlay_mem_free(struct qed_hwfn *p_hwfn,
}
/* Free allocated virtual memory */
- kfree(fw_overlay_mem);
+ kfree(*fw_overlay_mem);
+ *fw_overlay_mem = NULL;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
index 7e6c6389523b..b3bf9899c1a1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.c
@@ -15,6 +15,7 @@
#include "qed_hsi.h"
#include "qed_hw.h"
#include "qed_init_ops.h"
+#include "qed_iro_hsi.h"
#include "qed_reg_addr.h"
#include "qed_sriov.h"
@@ -46,30 +47,32 @@ static u32 pxp_global_win[] = {
/* IRO Array */
static const u32 iro_arr[] = {
0x00000000, 0x00000000, 0x00080000,
+ 0x00004478, 0x00000008, 0x00080000,
0x00003288, 0x00000088, 0x00880000,
- 0x000058e8, 0x00000020, 0x00200000,
+ 0x000058a8, 0x00000020, 0x00200000,
+ 0x00003188, 0x00000008, 0x00080000,
0x00000b00, 0x00000008, 0x00040000,
0x00000a80, 0x00000008, 0x00040000,
0x00000000, 0x00000008, 0x00020000,
0x00000080, 0x00000008, 0x00040000,
0x00000084, 0x00000008, 0x00020000,
- 0x00005718, 0x00000004, 0x00040000,
- 0x00004dd0, 0x00000000, 0x00780000,
+ 0x00005798, 0x00000004, 0x00040000,
+ 0x00004e50, 0x00000000, 0x00780000,
0x00003e40, 0x00000000, 0x00780000,
- 0x00004480, 0x00000000, 0x00780000,
+ 0x00004500, 0x00000000, 0x00780000,
0x00003210, 0x00000000, 0x00780000,
0x00003b50, 0x00000000, 0x00780000,
0x00007f58, 0x00000000, 0x00780000,
- 0x00005f58, 0x00000000, 0x00080000,
+ 0x00005fd8, 0x00000000, 0x00080000,
0x00007100, 0x00000000, 0x00080000,
- 0x0000aea0, 0x00000000, 0x00080000,
+ 0x0000af20, 0x00000000, 0x00080000,
0x00004398, 0x00000000, 0x00080000,
0x0000a5a0, 0x00000000, 0x00080000,
0x0000bde8, 0x00000000, 0x00080000,
0x00000020, 0x00000004, 0x00040000,
- 0x000056c8, 0x00000010, 0x00100000,
+ 0x00005688, 0x00000010, 0x00100000,
0x0000c210, 0x00000030, 0x00300000,
- 0x0000b088, 0x00000038, 0x00380000,
+ 0x0000b108, 0x00000038, 0x00380000,
0x00003d20, 0x00000080, 0x00400000,
0x0000bf60, 0x00000000, 0x00040000,
0x00004560, 0x00040080, 0x00040000,
@@ -77,11 +80,11 @@ static const u32 iro_arr[] = {
0x00003d60, 0x00000080, 0x00200000,
0x00008960, 0x00000040, 0x00300000,
0x0000e840, 0x00000060, 0x00600000,
- 0x00004618, 0x00000080, 0x00380000,
- 0x00010738, 0x000000c0, 0x00c00000,
+ 0x00004698, 0x00000080, 0x00380000,
+ 0x000107b8, 0x000000c0, 0x00c00000,
0x000001f8, 0x00000002, 0x00020000,
- 0x0000a2a0, 0x00000000, 0x01080000,
- 0x0000a3a8, 0x00000008, 0x00080000,
+ 0x0000a260, 0x00000000, 0x01080000,
+ 0x0000a368, 0x00000008, 0x00080000,
0x000001c0, 0x00000008, 0x00080000,
0x000001f8, 0x00000008, 0x00080000,
0x00000ac0, 0x00000008, 0x00080000,
@@ -90,39 +93,46 @@ static const u32 iro_arr[] = {
0x00000280, 0x00000008, 0x00080000,
0x00000680, 0x00080018, 0x00080000,
0x00000b78, 0x00080018, 0x00020000,
- 0x0000c640, 0x00000050, 0x003c0000,
- 0x00012038, 0x00000018, 0x00100000,
- 0x00011b00, 0x00000040, 0x00180000,
- 0x000095d0, 0x00000050, 0x00200000,
+ 0x0000c600, 0x00000058, 0x003c0000,
+ 0x00012038, 0x00000020, 0x00100000,
+ 0x00011b00, 0x00000048, 0x00180000,
+ 0x00009650, 0x00000050, 0x00200000,
0x00008b10, 0x00000040, 0x00280000,
- 0x00011640, 0x00000018, 0x00100000,
- 0x0000c828, 0x00000048, 0x00380000,
- 0x00011710, 0x00000020, 0x00200000,
- 0x00004650, 0x00000080, 0x00100000,
+ 0x000116c0, 0x00000018, 0x00100000,
+ 0x0000c808, 0x00000048, 0x00380000,
+ 0x00011790, 0x00000020, 0x00200000,
+ 0x000046d0, 0x00000080, 0x00100000,
0x00003618, 0x00000010, 0x00100000,
- 0x0000a968, 0x00000008, 0x00010000,
+ 0x0000a9e8, 0x00000008, 0x00010000,
0x000097a0, 0x00000008, 0x00010000,
- 0x00011990, 0x00000008, 0x00010000,
- 0x0000f018, 0x00000008, 0x00010000,
- 0x00012628, 0x00000008, 0x00010000,
- 0x00011da8, 0x00000008, 0x00010000,
- 0x0000aa78, 0x00000030, 0x00100000,
- 0x0000d768, 0x00000028, 0x00280000,
- 0x00009a58, 0x00000018, 0x00180000,
- 0x00009bd8, 0x00000008, 0x00080000,
- 0x00013a18, 0x00000008, 0x00080000,
- 0x000126e8, 0x00000018, 0x00180000,
- 0x0000e608, 0x00500288, 0x00100000,
- 0x00012970, 0x00000138, 0x00280000,
+ 0x00011a10, 0x00000008, 0x00010000,
+ 0x0000e9f8, 0x00000008, 0x00010000,
+ 0x00012648, 0x00000008, 0x00010000,
+ 0x000121c8, 0x00000008, 0x00010000,
+ 0x0000af08, 0x00000030, 0x00100000,
+ 0x0000d748, 0x00000028, 0x00280000,
+ 0x00009e68, 0x00000018, 0x00180000,
+ 0x00009fe8, 0x00000008, 0x00080000,
+ 0x00013ea8, 0x00000008, 0x00080000,
+ 0x00012f18, 0x00000018, 0x00180000,
+ 0x0000dfe8, 0x00500288, 0x00100000,
+ 0x000131a0, 0x00000138, 0x00280000,
};
void qed_init_iro_array(struct qed_dev *cdev)
{
- cdev->iro_arr = iro_arr;
+ cdev->iro_arr = iro_arr + E4_IRO_ARR_OFFSET;
}
void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn, u32 rt_offset, u32 val)
{
+ if (rt_offset >= RUNTIME_ARRAY_SIZE) {
+ DP_ERR(p_hwfn,
+ "Avoid storing %u in rt_data at index %u!\n",
+ val, rt_offset);
+ return;
+ }
+
p_hwfn->rt_data.init_val[rt_offset] = val;
p_hwfn->rt_data.b_valid[rt_offset] = true;
}
@@ -132,6 +142,14 @@ void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn,
{
size_t i;
+ if ((rt_offset + size - 1) >= RUNTIME_ARRAY_SIZE) {
+ DP_ERR(p_hwfn,
+ "Avoid storing values in rt_data at indices %u-%u!\n",
+ rt_offset,
+ (u32)(rt_offset + size - 1));
+ return;
+ }
+
for (i = 0; i < size / sizeof(u32); i++) {
p_hwfn->rt_data.init_val[rt_offset + i] = p_val[i];
p_hwfn->rt_data.b_valid[rt_offset + i] = true;
@@ -175,7 +193,7 @@ static int qed_init_rt(struct qed_hwfn *p_hwfn,
return rc;
/* invalidate after writing */
- for (j = i; j < i + segment; j++)
+ for (j = i; j < (u32)(i + segment); j++)
p_valid[j] = false;
/* Jump over the entire segment, including invalid entry */
@@ -245,7 +263,7 @@ static int qed_init_array_dmae(struct qed_hwfn *p_hwfn,
static int qed_init_fill_dmae(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
- u32 addr, u32 fill, u32 fill_count)
+ u32 addr, u32 fill_count)
{
static u32 zero_buffer[DMAE_MAX_RW_SIZE];
struct qed_dmae_params params = {};
@@ -372,7 +390,7 @@ static int qed_init_cmd_wr(struct qed_hwfn *p_hwfn,
case INIT_SRC_ZEROS:
data = le32_to_cpu(p_cmd->args.zeros_count);
if (b_must_dmae || (b_can_dmae && (data >= 64)))
- rc = qed_init_fill_dmae(p_hwfn, p_ptt, addr, 0, data);
+ rc = qed_init_fill_dmae(p_hwfn, p_ptt, addr, data);
else
qed_init_fill(p_hwfn, p_ptt, addr, 0, data);
break;
@@ -419,7 +437,6 @@ static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn,
addr = GET_FIELD(data, INIT_READ_OP_ADDRESS) << 2;
poll = GET_FIELD(data, INIT_READ_OP_POLL_TYPE);
-
val = qed_rd(p_hwfn, p_ptt, addr);
if (poll == INIT_POLL_NONE)
@@ -515,8 +532,7 @@ static u32 qed_init_cmd_mode(struct qed_hwfn *p_hwfn,
INIT_IF_MODE_OP_CMD_OFFSET);
}
-static u32 qed_init_cmd_phase(struct qed_hwfn *p_hwfn,
- struct init_if_phase_op *p_cmd,
+static u32 qed_init_cmd_phase(struct init_if_phase_op *p_cmd,
u32 phase, u32 phase_id)
{
u32 data = le32_to_cpu(p_cmd->phase_data);
@@ -563,7 +579,7 @@ int qed_init_run(struct qed_hwfn *p_hwfn,
modes);
break;
case INIT_OP_IF_PHASE:
- cmd_num += qed_init_cmd_phase(p_hwfn, &cmd->if_phase,
+ cmd_num += qed_init_cmd_phase(&cmd->if_phase,
phase, phase_id);
break;
case INIT_OP_DELAY:
diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.h b/drivers/net/ethernet/qlogic/qed/qed_init_ops.h
index a573c8921982..12e5c4e370d4 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.h
@@ -12,23 +12,24 @@
#include "qed.h"
/**
- * @brief qed_init_iro_array - init iro_arr.
+ * qed_init_iro_array(): init iro_arr.
*
+ * @cdev: Qed dev pointer.
*
- * @param cdev
+ * Return: Void.
*/
void qed_init_iro_array(struct qed_dev *cdev);
/**
- * @brief qed_init_run - Run the init-sequence.
+ * qed_init_run(): Run the init-sequence.
*
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @phase: Phase.
+ * @phase_id: Phase ID.
+ * @modes: Mode.
*
- * @param p_hwfn
- * @param p_ptt
- * @param phase
- * @param phase_id
- * @param modes
- * @return _qed_status_t
+ * Return: _qed_status_t
*/
int qed_init_run(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -37,30 +38,31 @@ int qed_init_run(struct qed_hwfn *p_hwfn,
int modes);
/**
- * @brief qed_init_hwfn_allocate - Allocate RT array, Store 'values' ptrs.
+ * qed_init_alloc(): Allocate RT array, Store 'values' ptrs.
*
+ * @p_hwfn: HW device data.
*
- * @param p_hwfn
- *
- * @return _qed_status_t
+ * Return: _qed_status_t.
*/
int qed_init_alloc(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_init_hwfn_deallocate
+ * qed_init_free(): Init HW function deallocate.
*
+ * @p_hwfn: HW device data.
*
- * @param p_hwfn
+ * Return: Void.
*/
void qed_init_free(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_init_store_rt_reg - Store a configuration value in the RT array.
+ * qed_init_store_rt_reg(): Store a configuration value in the RT array.
*
+ * @p_hwfn: HW device data.
+ * @rt_offset: RT offset.
+ * @val: Val.
*
- * @param p_hwfn
- * @param rt_offset
- * @param val
+ * Return: Void.
*/
void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn,
u32 rt_offset,
@@ -72,29 +74,21 @@ void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn,
#define OVERWRITE_RT_REG(hwfn, offset, val) \
qed_init_store_rt_reg(hwfn, offset, val)
-/**
- * @brief
- *
- *
- * @param p_hwfn
- * @param rt_offset
- * @param val
- * @param size
- */
void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn,
u32 rt_offset,
u32 *val,
size_t size);
#define STORE_RT_REG_AGG(hwfn, offset, val) \
- qed_init_store_rt_agg(hwfn, offset, (u32 *)&val, sizeof(val))
+ qed_init_store_rt_agg(hwfn, offset, (u32 *)&(val), sizeof(val))
/**
- * @brief
- * Initialize GTT global windows and set admin window
- * related params of GTT/PTT to default values.
+ * qed_gtt_init(): Initialize GTT global windows and set admin window
+ * related params of GTT/PTT to default values.
+ *
+ * @p_hwfn: HW device data.
*
- * @param p_hwfn
+ * Return Void.
*/
void qed_gtt_init(struct qed_hwfn *p_hwfn);
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index f78e6055f654..a97f691839e0 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -36,7 +36,7 @@ struct qed_sb_sp_info {
struct qed_sb_info sb_info;
/* per protocol index data */
- struct qed_pi_info pi_info_arr[PIS_PER_SB_E4];
+ struct qed_pi_info pi_info_arr[PIS_PER_SB];
};
enum qed_attention_type {
@@ -1507,7 +1507,7 @@ static void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn,
else
SET_FIELD(prod, CAU_PI_ENTRY_FSM_SEL, 1);
- sb_offset = igu_sb_id * PIS_PER_SB_E4;
+ sb_offset = igu_sb_id * PIS_PER_SB;
pi_offset = sb_offset + pi_index;
if (p_hwfn->hw_init_done)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h
index c5550e96bbe1..84c17e97f569 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.h
@@ -53,51 +53,54 @@ enum qed_coalescing_fsm {
};
/**
- * @brief qed_int_igu_enable_int - enable device interrupts
+ * qed_int_igu_enable_int(): Enable device interrupts.
*
- * @param p_hwfn
- * @param p_ptt
- * @param int_mode - interrupt mode to use
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @int_mode: Interrupt mode to use.
+ *
+ * Return: Void.
*/
void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
enum qed_int_mode int_mode);
/**
- * @brief qed_int_igu_disable_int - disable device interrupts
+ * qed_int_igu_disable_int(): Disable device interrupts.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
*
- * @param p_hwfn
- * @param p_ptt
+ * Return: Void.
*/
void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
- * @brief qed_int_igu_read_sisr_reg - Reads the single isr multiple dpc
- * register from igu.
+ * qed_int_igu_read_sisr_reg(): Reads the single isr multiple dpc
+ * register from igu.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @return u64
+ * Return: u64.
*/
u64 qed_int_igu_read_sisr_reg(struct qed_hwfn *p_hwfn);
#define QED_SP_SB_ID 0xffff
/**
- * @brief qed_int_sb_init - Initializes the sb_info structure.
+ * qed_int_sb_init(): Initializes the sb_info structure.
*
- * once the structure is initialized it can be passed to sb related functions.
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @sb_info: points to an uninitialized (but allocated) sb_info structure
+ * @sb_virt_addr: SB Virtual address.
+ * @sb_phy_addr: SB Physial address.
+ * @sb_id: the sb_id to be used (zero based in driver)
+ * should use QED_SP_SB_ID for SP Status block
*
- * @param p_hwfn
- * @param p_ptt
- * @param sb_info points to an uninitialized (but
- * allocated) sb_info structure
- * @param sb_virt_addr
- * @param sb_phy_addr
- * @param sb_id the sb_id to be used (zero based in driver)
- * should use QED_SP_SB_ID for SP Status block
+ * Return: int.
*
- * @return int
+ * Once the structure is initialized it can be passed to sb related functions.
*/
int qed_int_sb_init(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -106,82 +109,91 @@ int qed_int_sb_init(struct qed_hwfn *p_hwfn,
dma_addr_t sb_phy_addr,
u16 sb_id);
/**
- * @brief qed_int_sb_setup - Setup the sb.
+ * qed_int_sb_setup(): Setup the sb.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @sb_info: Initialized sb_info structure.
*
- * @param p_hwfn
- * @param p_ptt
- * @param sb_info initialized sb_info structure
+ * Return: Void.
*/
void qed_int_sb_setup(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_sb_info *sb_info);
/**
- * @brief qed_int_sb_release - releases the sb_info structure.
+ * qed_int_sb_release(): Releases the sb_info structure.
*
- * once the structure is released, it's memory can be freed
+ * @p_hwfn: HW device data.
+ * @sb_info: Points to an allocated sb_info structure.
+ * @sb_id: The sb_id to be used (zero based in driver)
+ * should never be equal to QED_SP_SB_ID
+ * (SP Status block).
*
- * @param p_hwfn
- * @param sb_info points to an allocated sb_info structure
- * @param sb_id the sb_id to be used (zero based in driver)
- * should never be equal to QED_SP_SB_ID
- * (SP Status block)
+ * Return: int.
*
- * @return int
+ * Once the structure is released, it's memory can be freed.
*/
int qed_int_sb_release(struct qed_hwfn *p_hwfn,
struct qed_sb_info *sb_info,
u16 sb_id);
/**
- * @brief qed_int_sp_dpc - To be called when an interrupt is received on the
- * default status block.
+ * qed_int_sp_dpc(): To be called when an interrupt is received on the
+ * default status block.
*
- * @param p_hwfn - pointer to hwfn
+ * @t: Tasklet.
+ *
+ * Return: Void.
*
*/
void qed_int_sp_dpc(struct tasklet_struct *t);
/**
- * @brief qed_int_get_num_sbs - get the number of status
- * blocks configured for this funciton in the igu.
+ * qed_int_get_num_sbs(): Get the number of status blocks configured
+ * for this funciton in the igu.
*
- * @param p_hwfn
- * @param p_sb_cnt_info
+ * @p_hwfn: HW device data.
+ * @p_sb_cnt_info: Pointer to SB count info.
*
- * @return int - number of status blocks configured
+ * Return: Void.
*/
void qed_int_get_num_sbs(struct qed_hwfn *p_hwfn,
struct qed_sb_cnt_info *p_sb_cnt_info);
/**
- * @brief qed_int_disable_post_isr_release - performs the cleanup post ISR
+ * qed_int_disable_post_isr_release(): Performs the cleanup post ISR
* release. The API need to be called after releasing all slowpath IRQs
* of the device.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
+ * Return: Void.
*/
void qed_int_disable_post_isr_release(struct qed_dev *cdev);
/**
- * @brief qed_int_attn_clr_enable - sets whether the general behavior is
+ * qed_int_attn_clr_enable: Sets whether the general behavior is
* preventing attentions from being reasserted, or following the
* attributes of the specific attention.
*
- * @param cdev
- * @param clr_enable
+ * @cdev: Qed dev pointer.
+ * @clr_enable: Clear enable
+ *
+ * Return: Void.
*
*/
void qed_int_attn_clr_enable(struct qed_dev *cdev, bool clr_enable);
/**
- * @brief - Doorbell Recovery handler.
+ * qed_db_rec_handler(): Doorbell Recovery handler.
* Run doorbell recovery in case of PF overflow (and flush DORQ if
* needed).
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ *
+ * Return: Int.
*/
int qed_db_rec_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
@@ -192,7 +204,7 @@ int qed_db_rec_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
#define QED_SB_EVENT_MASK 0x0003
#define SB_ALIGNED_SIZE(p_hwfn) \
- ALIGNED_TYPE_SIZE(struct status_block_e4, p_hwfn)
+ ALIGNED_TYPE_SIZE(struct status_block, p_hwfn)
#define QED_SB_INVALID_IDX 0xffff
@@ -223,30 +235,34 @@ struct qed_igu_info {
};
/**
- * @brief - Make sure the IGU CAM reflects the resources provided by MFW
+ * qed_int_igu_reset_cam(): Make sure the IGU CAM reflects the resources
+ * provided by MFW.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
*
- * @param p_hwfn
- * @param p_ptt
+ * Return: Void.
*/
int qed_int_igu_reset_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/**
- * @brief Translate the weakly-defined client sb-id into an IGU sb-id
+ * qed_get_igu_sb_id(): Translate the weakly-defined client sb-id into
+ * an IGU sb-id
*
- * @param p_hwfn
- * @param sb_id - user provided sb_id
+ * @p_hwfn: HW device data.
+ * @sb_id: user provided sb_id.
*
- * @return an index inside IGU CAM where the SB resides
+ * Return: An index inside IGU CAM where the SB resides.
*/
u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id);
/**
- * @brief return a pointer to an unused valid SB
+ * qed_get_igu_free_sb(): Return a pointer to an unused valid SB
*
- * @param p_hwfn
- * @param b_is_pf - true iff we want a SB belonging to a PF
+ * @p_hwfn: HW device data.
+ * @b_is_pf: True iff we want a SB belonging to a PF.
*
- * @return point to an igu_block, NULL if none is available
+ * Return: Point to an igu_block, NULL if none is available.
*/
struct qed_igu_block *qed_get_igu_free_sb(struct qed_hwfn *p_hwfn,
bool b_is_pf);
@@ -259,15 +275,15 @@ void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn,
void qed_int_igu_init_rt(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_int_igu_read_cam - Reads the IGU CAM.
+ * qed_int_igu_read_cam(): Reads the IGU CAM.
* This function needs to be called during hardware
* prepare. It reads the info from igu cam to know which
* status block is the default / base status block etc.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
*
- * @return int
+ * Return: Int.
*/
int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
@@ -275,24 +291,22 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn,
typedef int (*qed_int_comp_cb_t)(struct qed_hwfn *p_hwfn,
void *cookie);
/**
- * @brief qed_int_register_cb - Register callback func for
- * slowhwfn statusblock.
- *
- * Every protocol that uses the slowhwfn status block
- * should register a callback function that will be called
- * once there is an update of the sp status block.
- *
- * @param p_hwfn
- * @param comp_cb - function to be called when there is an
- * interrupt on the sp sb
- *
- * @param cookie - passed to the callback function
- * @param sb_idx - OUT parameter which gives the chosen index
- * for this protocol.
- * @param p_fw_cons - pointer to the actual address of the
- * consumer for this protocol.
- *
- * @return int
+ * qed_int_register_cb(): Register callback func for slowhwfn statusblock.
+ *
+ * @p_hwfn: HW device data.
+ * @comp_cb: Function to be called when there is an
+ * interrupt on the sp sb
+ * @cookie: Passed to the callback function
+ * @sb_idx: (OUT) parameter which gives the chosen index
+ * for this protocol.
+ * @p_fw_cons: Pointer to the actual address of the
+ * consumer for this protocol.
+ *
+ * Return: Int.
+ *
+ * Every protocol that uses the slowhwfn status block
+ * should register a callback function that will be called
+ * once there is an update of the sp status block.
*/
int qed_int_register_cb(struct qed_hwfn *p_hwfn,
qed_int_comp_cb_t comp_cb,
@@ -301,37 +315,40 @@ int qed_int_register_cb(struct qed_hwfn *p_hwfn,
__le16 **p_fw_cons);
/**
- * @brief qed_int_unregister_cb - Unregisters callback
- * function from sp sb.
- * Partner of qed_int_register_cb -> should be called
- * when no longer required.
+ * qed_int_unregister_cb(): Unregisters callback function from sp sb.
+ *
+ * @p_hwfn: HW device data.
+ * @pi: Producer Index.
*
- * @param p_hwfn
- * @param pi
+ * Return: Int.
*
- * @return int
+ * Partner of qed_int_register_cb -> should be called
+ * when no longer required.
*/
int qed_int_unregister_cb(struct qed_hwfn *p_hwfn,
u8 pi);
/**
- * @brief qed_int_get_sp_sb_id - Get the slowhwfn sb id.
+ * qed_int_get_sp_sb_id(): Get the slowhwfn sb id.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @return u16
+ * Return: u16.
*/
u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn);
/**
- * @brief Status block cleanup. Should be called for each status
- * block that will be used -> both PF / VF
- *
- * @param p_hwfn
- * @param p_ptt
- * @param igu_sb_id - igu status block id
- * @param opaque - opaque fid of the sb owner.
- * @param b_set - set(1) / clear(0)
+ * qed_int_igu_init_pure_rt_single(): Status block cleanup.
+ * Should be called for each status
+ * block that will be used -> both PF / VF.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @igu_sb_id: IGU status block id.
+ * @opaque: Opaque fid of the sb owner.
+ * @b_set: Set(1) / Clear(0).
+ *
+ * Return: Void.
*/
void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -340,15 +357,16 @@ void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn,
bool b_set);
/**
- * @brief qed_int_cau_conf - configure cau for a given status
- * block
- *
- * @param p_hwfn
- * @param ptt
- * @param sb_phys
- * @param igu_sb_id
- * @param vf_number
- * @param vf_valid
+ * qed_int_cau_conf_sb(): Configure cau for a given status block.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @sb_phys: SB Physical.
+ * @igu_sb_id: IGU status block id.
+ * @vf_number: VF number
+ * @vf_valid: VF valid or not.
+ *
+ * Return: Void.
*/
void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -358,52 +376,58 @@ void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn,
u8 vf_valid);
/**
- * @brief qed_int_alloc
+ * qed_int_alloc(): QED interrupt alloc.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
*
- * @return int
+ * Return: Int.
*/
int qed_int_alloc(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
- * @brief qed_int_free
+ * qed_int_free(): QED interrupt free.
+ *
+ * @p_hwfn: HW device data.
*
- * @param p_hwfn
+ * Return: Void.
*/
void qed_int_free(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_int_setup
+ * qed_int_setup(): QED interrupt setup.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ *
+ * Return: Void.
*/
void qed_int_setup(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
- * @brief - Enable Interrupt & Attention for hw function
+ * qed_int_igu_enable(): Enable Interrupt & Attention for hw function.
*
- * @param p_hwfn
- * @param p_ptt
- * @param int_mode
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @int_mode: Interrut mode
*
- * @return int
+ * Return: Int.
*/
int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
enum qed_int_mode int_mode);
/**
- * @brief - Initialize CAU status block entry
+ * qed_init_cau_sb_entry(): Initialize CAU status block entry.
+ *
+ * @p_hwfn: HW device data.
+ * @p_sb_entry: Pointer SB entry.
+ * @pf_id: PF number
+ * @vf_number: VF number
+ * @vf_valid: VF valid or not.
*
- * @param p_hwfn
- * @param p_sb_entry
- * @param pf_id
- * @param vf_number
- * @param vf_valid
+ * Return: Void.
*/
void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn,
struct cau_sb_entry *p_sb_entry,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iro_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_iro_hsi.h
new file mode 100644
index 000000000000..3ccdd3b1d8cb
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_iro_hsi.h
@@ -0,0 +1,500 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/* QLogic qed NIC Driver
+ * Copyright (c) 2019-2021 Marvell International Ltd.
+ */
+
+#ifndef _QED_IRO_HSI_H
+#define _QED_IRO_HSI_H
+
+#include <linux/types.h>
+
+enum {
+ IRO_YSTORM_FLOW_CONTROL_MODE_GTT,
+ IRO_PSTORM_PKT_DUPLICATION_CFG,
+ IRO_TSTORM_PORT_STAT,
+ IRO_TSTORM_LL2_PORT_STAT,
+ IRO_TSTORM_PKT_DUPLICATION_CFG,
+ IRO_USTORM_VF_PF_CHANNEL_READY_GTT,
+ IRO_USTORM_FLR_FINAL_ACK_GTT,
+ IRO_USTORM_EQE_CONS_GTT,
+ IRO_USTORM_ETH_QUEUE_ZONE_GTT,
+ IRO_USTORM_COMMON_QUEUE_CONS_GTT,
+ IRO_XSTORM_PQ_INFO,
+ IRO_XSTORM_INTEG_TEST_DATA,
+ IRO_YSTORM_INTEG_TEST_DATA,
+ IRO_PSTORM_INTEG_TEST_DATA,
+ IRO_TSTORM_INTEG_TEST_DATA,
+ IRO_MSTORM_INTEG_TEST_DATA,
+ IRO_USTORM_INTEG_TEST_DATA,
+ IRO_XSTORM_OVERLAY_BUF_ADDR,
+ IRO_YSTORM_OVERLAY_BUF_ADDR,
+ IRO_PSTORM_OVERLAY_BUF_ADDR,
+ IRO_TSTORM_OVERLAY_BUF_ADDR,
+ IRO_MSTORM_OVERLAY_BUF_ADDR,
+ IRO_USTORM_OVERLAY_BUF_ADDR,
+ IRO_TSTORM_LL2_RX_PRODS_GTT,
+ IRO_CORE_LL2_TSTORM_PER_QUEUE_STAT,
+ IRO_CORE_LL2_USTORM_PER_QUEUE_STAT,
+ IRO_CORE_LL2_PSTORM_PER_QUEUE_STAT,
+ IRO_MSTORM_QUEUE_STAT,
+ IRO_MSTORM_TPA_TIMEOUT_US,
+ IRO_MSTORM_ETH_VF_PRODS,
+ IRO_MSTORM_ETH_PF_PRODS_GTT,
+ IRO_MSTORM_ETH_PF_STAT,
+ IRO_USTORM_QUEUE_STAT,
+ IRO_USTORM_ETH_PF_STAT,
+ IRO_PSTORM_QUEUE_STAT,
+ IRO_PSTORM_ETH_PF_STAT,
+ IRO_PSTORM_CTL_FRAME_ETHTYPE_GTT,
+ IRO_TSTORM_ETH_PRS_INPUT,
+ IRO_ETH_RX_RATE_LIMIT,
+ IRO_TSTORM_ETH_RSS_UPDATE_GTT,
+ IRO_XSTORM_ETH_QUEUE_ZONE_GTT,
+ IRO_YSTORM_TOE_CQ_PROD,
+ IRO_USTORM_TOE_CQ_PROD,
+ IRO_USTORM_TOE_GRQ_PROD,
+ IRO_TSTORM_SCSI_CMDQ_CONS_GTT,
+ IRO_TSTORM_SCSI_BDQ_EXT_PROD_GTT,
+ IRO_MSTORM_SCSI_BDQ_EXT_PROD_GTT,
+ IRO_TSTORM_ISCSI_RX_STATS,
+ IRO_MSTORM_ISCSI_RX_STATS,
+ IRO_USTORM_ISCSI_RX_STATS,
+ IRO_XSTORM_ISCSI_TX_STATS,
+ IRO_YSTORM_ISCSI_TX_STATS,
+ IRO_PSTORM_ISCSI_TX_STATS,
+ IRO_TSTORM_FCOE_RX_STATS,
+ IRO_PSTORM_FCOE_TX_STATS,
+ IRO_PSTORM_RDMA_QUEUE_STAT,
+ IRO_TSTORM_RDMA_QUEUE_STAT,
+ IRO_XSTORM_RDMA_ASSERT_LEVEL,
+ IRO_YSTORM_RDMA_ASSERT_LEVEL,
+ IRO_PSTORM_RDMA_ASSERT_LEVEL,
+ IRO_TSTORM_RDMA_ASSERT_LEVEL,
+ IRO_MSTORM_RDMA_ASSERT_LEVEL,
+ IRO_USTORM_RDMA_ASSERT_LEVEL,
+ IRO_XSTORM_IWARP_RXMIT_STATS,
+ IRO_TSTORM_ROCE_EVENTS_STAT,
+ IRO_YSTORM_ROCE_DCQCN_RECEIVED_STATS,
+ IRO_YSTORM_ROCE_ERROR_STATS,
+ IRO_PSTORM_ROCE_DCQCN_SENT_STATS,
+ IRO_USTORM_ROCE_CQE_STATS,
+};
+
+/* Pstorm LiteL2 queue statistics */
+
+#define CORE_LL2_PSTORM_PER_QUEUE_STAT_OFFSET(core_tx_stats_id) \
+ (IRO[IRO_CORE_LL2_PSTORM_PER_QUEUE_STAT].base \
+ + ((core_tx_stats_id) * IRO[IRO_CORE_LL2_PSTORM_PER_QUEUE_STAT].m1))
+#define CORE_LL2_PSTORM_PER_QUEUE_STAT_SIZE \
+ (IRO[IRO_CORE_LL2_PSTORM_PER_QUEUE_STAT].size)
+
+/* Tstorm LightL2 queue statistics */
+#define CORE_LL2_TSTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \
+ (IRO[IRO_CORE_LL2_TSTORM_PER_QUEUE_STAT].base \
+ + ((core_rx_queue_id) * IRO[IRO_CORE_LL2_TSTORM_PER_QUEUE_STAT].m1))
+#define CORE_LL2_TSTORM_PER_QUEUE_STAT_SIZE \
+ (IRO[IRO_CORE_LL2_TSTORM_PER_QUEUE_STAT].size)
+
+/* Ustorm LiteL2 queue statistics */
+#define CORE_LL2_USTORM_PER_QUEUE_STAT_OFFSET(core_rx_queue_id) \
+ (IRO[IRO_CORE_LL2_USTORM_PER_QUEUE_STAT].base \
+ + ((core_rx_queue_id) * IRO[IRO_CORE_LL2_USTORM_PER_QUEUE_STAT].m1))
+#define CORE_LL2_USTORM_PER_QUEUE_STAT_SIZE \
+ (IRO[IRO_CORE_LL2_USTORM_PER_QUEUE_STAT].size)
+
+/* Tstorm Eth limit Rx rate */
+#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) \
+ (IRO[IRO_ETH_RX_RATE_LIMIT].base \
+ + ((pf_id) * IRO[IRO_ETH_RX_RATE_LIMIT].m1))
+#define ETH_RX_RATE_LIMIT_SIZE (IRO[IRO_ETH_RX_RATE_LIMIT].size)
+
+/* Mstorm ETH PF queues producers */
+#define MSTORM_ETH_PF_PRODS_GTT_OFFSET(queue_id) \
+ (IRO[IRO_MSTORM_ETH_PF_PRODS_GTT].base \
+ + ((queue_id) * IRO[IRO_MSTORM_ETH_PF_PRODS_GTT].m1))
+#define MSTORM_ETH_PF_PRODS_GTT_SIZE (IRO[IRO_MSTORM_ETH_PF_PRODS_GTT].size)
+
+/* Mstorm pf statistics */
+#define MSTORM_ETH_PF_STAT_OFFSET(pf_id) \
+ (IRO[IRO_MSTORM_ETH_PF_STAT].base \
+ + ((pf_id) * IRO[IRO_MSTORM_ETH_PF_STAT].m1))
+#define MSTORM_ETH_PF_STAT_SIZE (IRO[IRO_MSTORM_ETH_PF_STAT].size)
+
+/* Mstorm ETH VF queues producers offset in RAM. Used in default VF zone
+ * size mode.
+ */
+#define MSTORM_ETH_VF_PRODS_OFFSET(vf_id, vf_queue_id) \
+ (IRO[IRO_MSTORM_ETH_VF_PRODS].base \
+ + ((vf_id) * IRO[IRO_MSTORM_ETH_VF_PRODS].m1) \
+ + ((vf_queue_id) * IRO[IRO_MSTORM_ETH_VF_PRODS].m2))
+#define MSTORM_ETH_VF_PRODS_SIZE (IRO[IRO_MSTORM_ETH_VF_PRODS].size)
+
+/* Mstorm Integration Test Data */
+#define MSTORM_INTEG_TEST_DATA_OFFSET (IRO[IRO_MSTORM_INTEG_TEST_DATA].base)
+#define MSTORM_INTEG_TEST_DATA_SIZE (IRO[IRO_MSTORM_INTEG_TEST_DATA].size)
+
+/* Mstorm iSCSI RX stats */
+#define MSTORM_ISCSI_RX_STATS_OFFSET(storage_func_id) \
+ (IRO[IRO_MSTORM_ISCSI_RX_STATS].base \
+ + ((storage_func_id) * IRO[IRO_MSTORM_ISCSI_RX_STATS].m1))
+#define MSTORM_ISCSI_RX_STATS_SIZE (IRO[IRO_MSTORM_ISCSI_RX_STATS].size)
+
+/* Mstorm overlay buffer host address */
+#define MSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[IRO_MSTORM_OVERLAY_BUF_ADDR].base)
+#define MSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[IRO_MSTORM_OVERLAY_BUF_ADDR].size)
+
+/* Mstorm queue statistics */
+#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
+ (IRO[IRO_MSTORM_QUEUE_STAT].base \
+ + ((stat_counter_id) * IRO[IRO_MSTORM_QUEUE_STAT].m1))
+#define MSTORM_QUEUE_STAT_SIZ (IRO[IRO_MSTORM_QUEUE_STAT].size)
+
+/* Mstorm error level for assert */
+#define MSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
+ (IRO[IRO_MSTORM_RDMA_ASSERT_LEVEL].base \
+ + ((pf_id) * IRO[IRO_MSTORM_RDMA_ASSERT_LEVEL].m1))
+#define MSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[IRO_MSTORM_RDMA_ASSERT_LEVEL].size)
+
+/* Mstorm bdq-external-producer of given BDQ resource ID, BDqueue-id */
+#define MSTORM_SCSI_BDQ_EXT_PROD_GTT_OFFSET(storage_func_id, bdq_id) \
+ (IRO[IRO_MSTORM_SCSI_BDQ_EXT_PROD_GTT].base \
+ + ((storage_func_id) * IRO[IRO_MSTORM_SCSI_BDQ_EXT_PROD_GTT].m1) \
+ + ((bdq_id) * IRO[IRO_MSTORM_SCSI_BDQ_EXT_PROD_GTT].m2))
+#define MSTORM_SCSI_BDQ_EXT_PROD_GTT_SIZE \
+ (IRO[IRO_MSTORM_SCSI_BDQ_EXT_PROD_GTT].size)
+
+/* TPA agregation timeout in us resolution (on ASIC) */
+#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[IRO_MSTORM_TPA_TIMEOUT_US].base)
+#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[IRO_MSTORM_TPA_TIMEOUT_US].size)
+
+/* Control frame's EthType configuration for TX control frame security */
+#define PSTORM_CTL_FRAME_ETHTYPE_GTT_OFFSET(ethtype_id) \
+ (IRO[IRO_PSTORM_CTL_FRAME_ETHTYPE_GTT].base \
+ + ((ethtype_id) * IRO[IRO_PSTORM_CTL_FRAME_ETHTYPE_GTT].m1))
+#define PSTORM_CTL_FRAME_ETHTYPE_GTT_SIZE \
+ (IRO[IRO_PSTORM_CTL_FRAME_ETHTYPE_GTT].size)
+
+/* Pstorm pf statistics */
+#define PSTORM_ETH_PF_STAT_OFFSET(pf_id) \
+ (IRO[IRO_PSTORM_ETH_PF_STAT].base \
+ + ((pf_id) * IRO[IRO_PSTORM_ETH_PF_STAT].m1))
+#define PSTORM_ETH_PF_STAT_SIZE (IRO[IRO_PSTORM_ETH_PF_STAT].size)
+
+/* Pstorm FCoE TX stats */
+#define PSTORM_FCOE_TX_STATS_OFFSET(pf_id) \
+ (IRO[IRO_PSTORM_FCOE_TX_STATS].base \
+ + ((pf_id) * IRO[IRO_PSTORM_FCOE_TX_STATS].m1))
+#define PSTORM_FCOE_TX_STATS_SIZE (IRO[IRO_PSTORM_FCOE_TX_STATS].size)
+
+/* Pstorm Integration Test Data */
+#define PSTORM_INTEG_TEST_DATA_OFFSET (IRO[IRO_PSTORM_INTEG_TEST_DATA].base)
+#define PSTORM_INTEG_TEST_DATA_SIZE (IRO[IRO_PSTORM_INTEG_TEST_DATA].size)
+
+/* Pstorm iSCSI TX stats */
+#define PSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id) \
+ (IRO[IRO_PSTORM_ISCSI_TX_STATS].base \
+ + ((storage_func_id) * IRO[IRO_PSTORM_ISCSI_TX_STATS].m1))
+#define PSTORM_ISCSI_TX_STATS_SIZE (IRO[IRO_PSTORM_ISCSI_TX_STATS].size)
+
+/* Pstorm overlay buffer host address */
+#define PSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[IRO_PSTORM_OVERLAY_BUF_ADDR].base)
+#define PSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[IRO_PSTORM_OVERLAY_BUF_ADDR].size)
+
+/* Pstorm LL2 packet duplication configuration. Use pstorm_pkt_dup_cfg
+ * data type.
+ */
+#define PSTORM_PKT_DUPLICATION_CFG_OFFSET(pf_id) \
+ (IRO[IRO_PSTORM_PKT_DUPLICATION_CFG].base \
+ + ((pf_id) * IRO[IRO_PSTORM_PKT_DUPLICATION_CFG].m1))
+#define PSTORM_PKT_DUPLICATION_CFG_SIZE \
+ (IRO[IRO_PSTORM_PKT_DUPLICATION_CFG].size)
+
+/* Pstorm queue statistics */
+#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
+ (IRO[IRO_PSTORM_QUEUE_STAT].base \
+ + ((stat_counter_id) * IRO[IRO_PSTORM_QUEUE_STAT].m1))
+#define PSTORM_QUEUE_STAT_SIZE (IRO[IRO_PSTORM_QUEUE_STAT].size)
+
+/* Pstorm error level for assert */
+#define PSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
+ (IRO[IRO_PSTORM_RDMA_ASSERT_LEVEL].base \
+ + ((pf_id) * IRO[IRO_PSTORM_RDMA_ASSERT_LEVEL].m1))
+#define PSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[IRO_PSTORM_RDMA_ASSERT_LEVEL].size)
+
+/* Pstorm RDMA queue statistics */
+#define PSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
+ (IRO[IRO_PSTORM_RDMA_QUEUE_STAT].base \
+ + ((rdma_stat_counter_id) * IRO[IRO_PSTORM_RDMA_QUEUE_STAT].m1))
+#define PSTORM_RDMA_QUEUE_STAT_SIZE (IRO[IRO_PSTORM_RDMA_QUEUE_STAT].size)
+
+/* DCQCN Sent Statistics */
+#define PSTORM_ROCE_DCQCN_SENT_STATS_OFFSET(roce_pf_id) \
+ (IRO[IRO_PSTORM_ROCE_DCQCN_SENT_STATS].base \
+ + ((roce_pf_id) * IRO[IRO_PSTORM_ROCE_DCQCN_SENT_STATS].m1))
+#define PSTORM_ROCE_DCQCN_SENT_STATS_SIZE \
+ (IRO[IRO_PSTORM_ROCE_DCQCN_SENT_STATS].size)
+
+/* Tstorm last parser message */
+#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[IRO_TSTORM_ETH_PRS_INPUT].base)
+#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[IRO_TSTORM_ETH_PRS_INPUT].size)
+
+/* RSS indirection table entry update command per PF offset in TSTORM PF BAR0.
+ * Use eth_tstorm_rss_update_data for update.
+ */
+#define TSTORM_ETH_RSS_UPDATE_GTT_OFFSET(pf_id) \
+ (IRO[IRO_TSTORM_ETH_RSS_UPDATE_GTT].base \
+ + ((pf_id) * IRO[IRO_TSTORM_ETH_RSS_UPDATE_GTT].m1))
+#define TSTORM_ETH_RSS_UPDATE_GTT_SIZE\
+ (IRO[IRO_TSTORM_ETH_RSS_UPDATE_GTT].size)
+
+/* Tstorm FCoE RX stats */
+#define TSTORM_FCOE_RX_STATS_OFFSET(pf_id) \
+ (IRO[IRO_TSTORM_FCOE_RX_STATS].base \
+ + ((pf_id) * IRO[IRO_TSTORM_FCOE_RX_STATS].m1))
+#define TSTORM_FCOE_RX_STATS_SIZE (IRO[IRO_TSTORM_FCOE_RX_STATS].size)
+
+/* Tstorm Integration Test Data */
+#define TSTORM_INTEG_TEST_DATA_OFFSET (IRO[IRO_TSTORM_INTEG_TEST_DATA].base)
+#define TSTORM_INTEG_TEST_DATA_SIZE (IRO[IRO_TSTORM_INTEG_TEST_DATA].size)
+
+/* Tstorm iSCSI RX stats */
+#define TSTORM_ISCSI_RX_STATS_OFFSET(storage_func_id) \
+ (IRO[IRO_TSTORM_ISCSI_RX_STATS].base \
+ + ((storage_func_id) * IRO[IRO_TSTORM_ISCSI_RX_STATS].m1))
+#define TSTORM_ISCSI_RX_STATS_SIZE (IRO[IRO_TSTORM_ISCSI_RX_STATS].size)
+
+/* Tstorm ll2 port statistics */
+#define TSTORM_LL2_PORT_STAT_OFFSET(port_id) \
+ (IRO[IRO_TSTORM_LL2_PORT_STAT].base \
+ + ((port_id) * IRO[IRO_TSTORM_LL2_PORT_STAT].m1))
+#define TSTORM_LL2_PORT_STAT_SIZE (IRO[IRO_TSTORM_LL2_PORT_STAT].size)
+
+/* Tstorm producers */
+#define TSTORM_LL2_RX_PRODS_GTT_OFFSET(core_rx_queue_id) \
+ (IRO[IRO_TSTORM_LL2_RX_PRODS_GTT].base \
+ + ((core_rx_queue_id) * IRO[IRO_TSTORM_LL2_RX_PRODS_GTT].m1))
+#define TSTORM_LL2_RX_PRODS_GTT_SIZE (IRO[IRO_TSTORM_LL2_RX_PRODS_GTT].size)
+
+/* Tstorm overlay buffer host address */
+#define TSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[IRO_TSTORM_OVERLAY_BUF_ADDR].base)
+
+#define TSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[IRO_TSTORM_OVERLAY_BUF_ADDR].size)
+
+/* Tstorm LL2 packet duplication configuration.
+ * Use tstorm_pkt_dup_cfg data type.
+ */
+#define TSTORM_PKT_DUPLICATION_CFG_OFFSET(pf_id) \
+ (IRO[IRO_TSTORM_PKT_DUPLICATION_CFG].base \
+ + ((pf_id) * IRO[IRO_TSTORM_PKT_DUPLICATION_CFG].m1))
+#define TSTORM_PKT_DUPLICATION_CFG_SIZE \
+ (IRO[IRO_TSTORM_PKT_DUPLICATION_CFG].size)
+
+/* Tstorm port statistics */
+#define TSTORM_PORT_STAT_OFFSET(port_id) \
+ (IRO[IRO_TSTORM_PORT_STAT].base \
+ + ((port_id) * IRO[IRO_TSTORM_PORT_STAT].m1))
+#define TSTORM_PORT_STAT_SIZE (IRO[IRO_TSTORM_PORT_STAT].size)
+
+/* Tstorm error level for assert */
+#define TSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
+ (IRO[IRO_TSTORM_RDMA_ASSERT_LEVEL].base \
+ + ((pf_id) * IRO[IRO_TSTORM_RDMA_ASSERT_LEVEL].m1))
+#define TSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[IRO_TSTORM_RDMA_ASSERT_LEVEL].size)
+
+/* Tstorm RDMA queue statistics */
+#define TSTORM_RDMA_QUEUE_STAT_OFFSET(rdma_stat_counter_id) \
+ (IRO[IRO_TSTORM_RDMA_QUEUE_STAT].base \
+ + ((rdma_stat_counter_id) * IRO[IRO_TSTORM_RDMA_QUEUE_STAT].m1))
+#define TSTORM_RDMA_QUEUE_STAT_SIZE (IRO[IRO_TSTORM_RDMA_QUEUE_STAT].size)
+
+/* Tstorm RoCE Event Statistics */
+#define TSTORM_ROCE_EVENTS_STAT_OFFSET(roce_pf_id) \
+ (IRO[IRO_TSTORM_ROCE_EVENTS_STAT].base \
+ + ((roce_pf_id) * IRO[IRO_TSTORM_ROCE_EVENTS_STAT].m1))
+#define TSTORM_ROCE_EVENTS_STAT_SIZE (IRO[IRO_TSTORM_ROCE_EVENTS_STAT].size)
+
+/* Tstorm (reflects M-Storm) bdq-external-producer of given function ID,
+ * BDqueue-id.
+ */
+#define TSTORM_SCSI_BDQ_EXT_PROD_GTT_OFFSET(storage_func_id, bdq_id) \
+ (IRO[IRO_TSTORM_SCSI_BDQ_EXT_PROD_GTT].base \
+ + ((storage_func_id) * IRO[IRO_TSTORM_SCSI_BDQ_EXT_PROD_GTT].m1) \
+ + ((bdq_id) * IRO[IRO_TSTORM_SCSI_BDQ_EXT_PROD_GTT].m2))
+#define TSTORM_SCSI_BDQ_EXT_PROD_GTT_SIZE \
+ (IRO[IRO_TSTORM_SCSI_BDQ_EXT_PROD_GTT].size)
+
+/* Tstorm cmdq-cons of given command queue-id */
+#define TSTORM_SCSI_CMDQ_CONS_GTT_OFFSET(cmdq_queue_id) \
+ (IRO[IRO_TSTORM_SCSI_CMDQ_CONS_GTT].base \
+ + ((cmdq_queue_id) * IRO[IRO_TSTORM_SCSI_CMDQ_CONS_GTT].m1))
+#define TSTORM_SCSI_CMDQ_CONS_GTT_SIZE \
+ (IRO[IRO_TSTORM_SCSI_CMDQ_CONS_GTT].size)
+
+/* Ustorm Common Queue ring consumer */
+#define USTORM_COMMON_QUEUE_CONS_GTT_OFFSET(queue_zone_id) \
+ (IRO[IRO_USTORM_COMMON_QUEUE_CONS_GTT].base \
+ + ((queue_zone_id) * IRO[IRO_USTORM_COMMON_QUEUE_CONS_GTT].m1))
+#define USTORM_COMMON_QUEUE_CONS_GTT_SIZE \
+ (IRO[IRO_USTORM_COMMON_QUEUE_CONS_GTT].size)
+
+/* Ustorm Event ring consumer */
+#define USTORM_EQE_CONS_GTT_OFFSET(pf_id) \
+ (IRO[IRO_USTORM_EQE_CONS_GTT].base \
+ + ((pf_id) * IRO[IRO_USTORM_EQE_CONS_GTT].m1))
+#define USTORM_EQE_CONS_GTT_SIZE (IRO[IRO_USTORM_EQE_CONS_GTT].size)
+
+/* Ustorm pf statistics */
+#define USTORM_ETH_PF_STAT_OFFSET(pf_id) \
+ (IRO[IRO_USTORM_ETH_PF_STAT].base \
+ + ((pf_id) * IRO[IRO_USTORM_ETH_PF_STAT].m1))
+#define USTORM_ETH_PF_STAT_SIZE (IRO[IRO_USTORM_ETH_PF_STAT].size)
+
+/* Ustorm eth queue zone */
+#define USTORM_ETH_QUEUE_ZONE_GTT_OFFSET(queue_zone_id) \
+ (IRO[IRO_USTORM_ETH_QUEUE_ZONE_GTT].base \
+ + ((queue_zone_id) * IRO[IRO_USTORM_ETH_QUEUE_ZONE_GTT].m1))
+#define USTORM_ETH_QUEUE_ZONE_GTT_SIZE (IRO[IRO_USTORM_ETH_QUEUE_ZONE_GTT].size)
+
+/* Ustorm Final flr cleanup ack */
+#define USTORM_FLR_FINAL_ACK_GTT_OFFSET(pf_id) \
+ (IRO[IRO_USTORM_FLR_FINAL_ACK_GTT].base \
+ + ((pf_id) * IRO[IRO_USTORM_FLR_FINAL_ACK_GTT].m1))
+#define USTORM_FLR_FINAL_ACK_GTT_SIZE (IRO[IRO_USTORM_FLR_FINAL_ACK_GTT].size)
+
+/* Ustorm Integration Test Data */
+#define USTORM_INTEG_TEST_DATA_OFFSET (IRO[IRO_USTORM_INTEG_TEST_DATA].base)
+#define USTORM_INTEG_TEST_DATA_SIZE (IRO[IRO_USTORM_INTEG_TEST_DATA].size)
+
+/* Ustorm iSCSI RX stats */
+#define USTORM_ISCSI_RX_STATS_OFFSET(storage_func_id) \
+ (IRO[IRO_USTORM_ISCSI_RX_STATS].base \
+ + ((storage_func_id) * IRO[IRO_USTORM_ISCSI_RX_STATS].m1))
+#define USTORM_ISCSI_RX_STATS_SIZE (IRO[IRO_USTORM_ISCSI_RX_STATS].size)
+
+/* Ustorm overlay buffer host address */
+#define USTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[IRO_USTORM_OVERLAY_BUF_ADDR].base)
+#define USTORM_OVERLAY_BUF_ADDR_SIZE (IRO[IRO_USTORM_OVERLAY_BUF_ADDR].size)
+
+/* Ustorm queue statistics */
+#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \
+ (IRO[IRO_USTORM_QUEUE_STAT].base \
+ + ((stat_counter_id) * IRO[IRO_USTORM_QUEUE_STAT].m1))
+#define USTORM_QUEUE_STAT_SIZE (IRO[IRO_USTORM_QUEUE_STAT].size)
+
+/* Ustorm error level for assert */
+#define USTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
+ (IRO[IRO_USTORM_RDMA_ASSERT_LEVEL].base \
+ + ((pf_id) * IRO[IRO_USTORM_RDMA_ASSERT_LEVEL].m1))
+#define USTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[IRO_USTORM_RDMA_ASSERT_LEVEL].size)
+
+/* RoCE CQEs Statistics */
+#define USTORM_ROCE_CQE_STATS_OFFSET(roce_pf_id) \
+ (IRO[IRO_USTORM_ROCE_CQE_STATS].base \
+ + ((roce_pf_id) * IRO[IRO_USTORM_ROCE_CQE_STATS].m1))
+#define USTORM_ROCE_CQE_STATS_SIZE (IRO[IRO_USTORM_ROCE_CQE_STATS].size)
+
+/* Ustorm cqe producer */
+#define USTORM_TOE_CQ_PROD_OFFSET(rss_id) \
+ (IRO[IRO_USTORM_TOE_CQ_PROD].base \
+ + ((rss_id) * IRO[IRO_USTORM_TOE_CQ_PROD].m1))
+#define USTORM_TOE_CQ_PROD_SIZE (IRO[IRO_USTORM_TOE_CQ_PROD].size)
+
+/* Ustorm grq producer */
+#define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) \
+ (IRO[IRO_USTORM_TOE_GRQ_PROD].base \
+ + ((pf_id) * IRO[IRO_USTORM_TOE_GRQ_PROD].m1))
+#define USTORM_TOE_GRQ_PROD_SIZE (IRO[IRO_USTORM_TOE_GRQ_PROD].size)
+
+/* Ustorm VF-PF Channel ready flag */
+#define USTORM_VF_PF_CHANNEL_READY_GTT_OFFSET(vf_id) \
+ (IRO[IRO_USTORM_VF_PF_CHANNEL_READY_GTT].base \
+ + ((vf_id) * IRO[IRO_USTORM_VF_PF_CHANNEL_READY_GTT].m1))
+#define USTORM_VF_PF_CHANNEL_READY_GTT_SIZE \
+ (IRO[IRO_USTORM_VF_PF_CHANNEL_READY_GTT].size)
+
+/* Xstorm queue zone */
+#define XSTORM_ETH_QUEUE_ZONE_GTT_OFFSET(queue_id) \
+ (IRO[IRO_XSTORM_ETH_QUEUE_ZONE_GTT].base \
+ + ((queue_id) * IRO[IRO_XSTORM_ETH_QUEUE_ZONE_GTT].m1))
+#define XSTORM_ETH_QUEUE_ZONE_GTT_SIZE (IRO[IRO_XSTORM_ETH_QUEUE_ZONE_GTT].size)
+
+/* Xstorm Integration Test Data */
+#define XSTORM_INTEG_TEST_DATA_OFFSET (IRO[IRO_XSTORM_INTEG_TEST_DATA].base)
+#define XSTORM_INTEG_TEST_DATA_SIZE (IRO[IRO_XSTORM_INTEG_TEST_DATA].size)
+
+/* Xstorm iSCSI TX stats */
+#define XSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id) \
+ (IRO[IRO_XSTORM_ISCSI_TX_STATS].base \
+ + ((storage_func_id) * IRO[IRO_XSTORM_ISCSI_TX_STATS].m1))
+#define XSTORM_ISCSI_TX_STATS_SIZE (IRO[IRO_XSTORM_ISCSI_TX_STATS].size)
+
+/* Xstorm iWARP rxmit stats */
+#define XSTORM_IWARP_RXMIT_STATS_OFFSET(pf_id) \
+ (IRO[IRO_XSTORM_IWARP_RXMIT_STATS].base \
+ + ((pf_id) * IRO[IRO_XSTORM_IWARP_RXMIT_STATS].m1))
+#define XSTORM_IWARP_RXMIT_STATS_SIZE (IRO[IRO_XSTORM_IWARP_RXMIT_STATS].size)
+
+/* Xstorm overlay buffer host address */
+#define XSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[IRO_XSTORM_OVERLAY_BUF_ADDR].base)
+#define XSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[IRO_XSTORM_OVERLAY_BUF_ADDR].size)
+
+/* Xstorm common PQ info */
+#define XSTORM_PQ_INFO_OFFSET(pq_id) \
+ (IRO[IRO_XSTORM_PQ_INFO].base \
+ + ((pq_id) * IRO[IRO_XSTORM_PQ_INFO].m1))
+#define XSTORM_PQ_INFO_SIZE (IRO[IRO_XSTORM_PQ_INFO].size)
+
+/* Xstorm error level for assert */
+#define XSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
+ (IRO[IRO_XSTORM_RDMA_ASSERT_LEVEL].base \
+ + ((pf_id) * IRO[IRO_XSTORM_RDMA_ASSERT_LEVEL].m1))
+#define XSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[IRO_XSTORM_RDMA_ASSERT_LEVEL].size)
+
+/* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */
+#define YSTORM_FLOW_CONTROL_MODE_GTT_OFFSET \
+ (IRO[IRO_YSTORM_FLOW_CONTROL_MODE_GTT].base)
+#define YSTORM_FLOW_CONTROL_MODE_GTT_SIZE \
+ (IRO[IRO_YSTORM_FLOW_CONTROL_MODE_GTT].size)
+
+/* Ystorm Integration Test Data */
+#define YSTORM_INTEG_TEST_DATA_OFFSET (IRO[IRO_YSTORM_INTEG_TEST_DATA].base)
+#define YSTORM_INTEG_TEST_DATA_SIZE (IRO[IRO_YSTORM_INTEG_TEST_DATA].size)
+
+/* Ystorm iSCSI TX stats */
+#define YSTORM_ISCSI_TX_STATS_OFFSET(storage_func_id) \
+ (IRO[IRO_YSTORM_ISCSI_TX_STATS].base \
+ + ((storage_func_id) * IRO[IRO_YSTORM_ISCSI_TX_STATS].m1))
+#define YSTORM_ISCSI_TX_STATS_SIZE (IRO[IRO_YSTORM_ISCSI_TX_STATS].size)
+
+/* Ystorm overlay buffer host address */
+#define YSTORM_OVERLAY_BUF_ADDR_OFFSET (IRO[IRO_YSTORM_OVERLAY_BUF_ADDR].base)
+#define YSTORM_OVERLAY_BUF_ADDR_SIZE (IRO[IRO_YSTORM_OVERLAY_BUF_ADDR].size)
+
+/* Ystorm error level for assert */
+#define YSTORM_RDMA_ASSERT_LEVEL_OFFSET(pf_id) \
+ (IRO[IRO_YSTORM_RDMA_ASSERT_LEVEL].base \
+ + ((pf_id) * IRO[IRO_YSTORM_RDMA_ASSERT_LEVEL].m1))
+#define YSTORM_RDMA_ASSERT_LEVEL_SIZE (IRO[IRO_YSTORM_RDMA_ASSERT_LEVEL].size)
+
+/* DCQCN Received Statistics */
+#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_OFFSET(roce_pf_id) \
+ (IRO[IRO_YSTORM_ROCE_DCQCN_RECEIVED_STATS].base \
+ + ((roce_pf_id) * IRO[IRO_YSTORM_ROCE_DCQCN_RECEIVED_STATS].m1))
+#define YSTORM_ROCE_DCQCN_RECEIVED_STATS_SIZE \
+ (IRO[IRO_YSTORM_ROCE_DCQCN_RECEIVED_STATS].size)
+
+/* RoCE Error Statistics */
+#define YSTORM_ROCE_ERROR_STATS_OFFSET(roce_pf_id) \
+ (IRO[IRO_YSTORM_ROCE_ERROR_STATS].base \
+ + ((roce_pf_id) * IRO[IRO_YSTORM_ROCE_ERROR_STATS].m1))
+#define YSTORM_ROCE_ERROR_STATS_SIZE (IRO[IRO_YSTORM_ROCE_ERROR_STATS].size)
+
+/* Ystorm cqe producer */
+#define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) \
+ (IRO[IRO_YSTORM_TOE_CQ_PROD].base \
+ + ((rss_id) * IRO[IRO_YSTORM_TOE_CQ_PROD].m1))
+#define YSTORM_TOE_CQ_PROD_SIZE (IRO[IRO_YSTORM_TOE_CQ_PROD].size)
+
+/* Per-chip offsets in iro_arr in dwords */
+#define E4_IRO_ARR_OFFSET 0
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
index db926d8b3033..511ab214eb9c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c
@@ -29,6 +29,7 @@
#include "qed_hsi.h"
#include "qed_hw.h"
#include "qed_int.h"
+#include "qed_iro_hsi.h"
#include "qed_iscsi.h"
#include "qed_ll2.h"
#include "qed_mcp.h"
@@ -627,10 +628,9 @@ static void __iomem *qed_iscsi_get_primary_bdq_prod(struct qed_hwfn *p_hwfn,
{
if (RESC_NUM(p_hwfn, QED_BDQ)) {
return (u8 __iomem *)p_hwfn->regview +
- GTT_BAR0_MAP_REG_MSDM_RAM +
- MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn,
- QED_BDQ),
- bdq_id);
+ GET_GTT_BDQ_REG_ADDR(GTT_BAR0_MAP_REG_MSDM_RAM,
+ MSTORM_SCSI_BDQ_EXT_PROD,
+ RESC_START(p_hwfn, QED_BDQ), bdq_id);
} else {
DP_NOTICE(p_hwfn, "BDQ is not allocated!\n");
return NULL;
@@ -642,10 +642,9 @@ static void __iomem *qed_iscsi_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn,
{
if (RESC_NUM(p_hwfn, QED_BDQ)) {
return (u8 __iomem *)p_hwfn->regview +
- GTT_BAR0_MAP_REG_TSDM_RAM +
- TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(RESC_START(p_hwfn,
- QED_BDQ),
- bdq_id);
+ GET_GTT_BDQ_REG_ADDR(GTT_BAR0_MAP_REG_TSDM_RAM,
+ TSTORM_SCSI_BDQ_EXT_PROD,
+ RESC_START(p_hwfn, QED_BDQ), bdq_id);
} else {
DP_NOTICE(p_hwfn, "BDQ is not allocated!\n");
return NULL;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.h b/drivers/net/ethernet/qlogic/qed/qed_iscsi.h
index dab7a5d09f87..dec2b00259d4 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.h
@@ -34,10 +34,13 @@ void qed_iscsi_setup(struct qed_hwfn *p_hwfn);
void qed_iscsi_free(struct qed_hwfn *p_hwfn);
/**
- * @brief - Fills provided statistics struct with statistics.
+ * qed_get_protocol_stats_iscsi(): Fills provided statistics
+ * struct with statistics.
*
- * @param cdev
- * @param stats - points to struct that will be filled with statistics.
+ * @cdev: Qed dev pointer.
+ * @stats: Points to struct that will be filled with statistics.
+ *
+ * Return: Void.
*/
void qed_get_protocol_stats_iscsi(struct qed_dev *cdev,
struct qed_mcp_iscsi_stats *stats);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
index 186d0048a9d1..1d1d4caad680 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
@@ -114,6 +114,8 @@ qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn,
RESC_START(p_hwfn, QED_LL2_RAM_QUEUE) +
p_hwfn->p_rdma_info->iwarp.ll2_ooo_handle;
+ p_ramrod->tcp.tx_sws_timer = cpu_to_le16(QED_TX_SWS_TIMER_DFLT);
+ p_ramrod->tcp.two_msl_timer = cpu_to_le32(QED_TWO_MSL_TIMER_DFLT);
p_ramrod->tcp.max_fin_rt = QED_IWARP_MAX_FIN_RT_DEFAULT;
return;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index dfaf10edfabf..2edd6bf64a3c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -28,6 +28,7 @@
#include "qed_dev_api.h"
#include <linux/qed/qed_eth_if.h>
#include "qed_hsi.h"
+#include "qed_iro_hsi.h"
#include "qed_hw.h"
#include "qed_int.h"
#include "qed_l2.h"
@@ -37,7 +38,6 @@
#include "qed_sp.h"
#include "qed_sriov.h"
-
#define QED_MAX_SGES_NUM 16
#define CRC32_POLY 0x1edc6f41
@@ -904,9 +904,10 @@ qed_eth_pf_rx_queue_start(struct qed_hwfn *p_hwfn,
{
u32 init_prod_val = 0;
- *pp_prod = p_hwfn->regview +
- GTT_BAR0_MAP_REG_MSDM_RAM +
- MSTORM_ETH_PF_PRODS_OFFSET(p_cid->abs.queue_id);
+ *pp_prod = (u8 __iomem *)
+ p_hwfn->regview +
+ GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_MSDM_RAM,
+ MSTORM_ETH_PF_PRODS, p_cid->abs.queue_id);
/* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
__internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
@@ -1111,7 +1112,6 @@ qed_eth_pf_tx_queue_start(struct qed_hwfn *p_hwfn,
{
int rc;
-
rc = qed_eth_txq_start_ramrod(p_hwfn, p_cid,
pbl_addr, pbl_size,
qed_get_cm_pq_idx_mcos(p_hwfn, tc));
@@ -2010,7 +2010,7 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
struct qed_spq_comp_cb *p_cb,
struct qed_ntuple_filter_params *p_params)
{
- struct rx_update_gft_filter_data *p_ramrod = NULL;
+ struct rx_update_gft_filter_ramrod_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
u16 abs_rx_q_id = 0;
@@ -2031,7 +2031,7 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
}
rc = qed_sp_init_request(p_hwfn, &p_ent,
- ETH_RAMROD_GFT_UPDATE_FILTER,
+ ETH_RAMROD_RX_UPDATE_GFT_FILTER,
PROTOCOLID_ETH, &init_data);
if (rc)
return rc;
@@ -2100,7 +2100,7 @@ int qed_get_rxq_coalesce(struct qed_hwfn *p_hwfn,
CAU_SB_ENTRY_TIMER_RES0);
address = BAR0_MAP_REG_USDM_RAM +
- USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
+ USTORM_ETH_QUEUE_ZONE_GTT_OFFSET(p_cid->abs.queue_id);
coalesce = qed_rd(p_hwfn, p_ptt, address);
is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID);
@@ -2134,7 +2134,7 @@ int qed_get_txq_coalesce(struct qed_hwfn *p_hwfn,
CAU_SB_ENTRY_TIMER_RES1);
address = BAR0_MAP_REG_XSDM_RAM +
- XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
+ XSTORM_ETH_QUEUE_ZONE_GTT_OFFSET(p_cid->abs.queue_id);
coalesce = qed_rd(p_hwfn, p_ptt, address);
is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID);
@@ -2763,25 +2763,6 @@ static int qed_configure_filter_mcast(struct qed_dev *cdev,
return qed_filter_mcast_cmd(cdev, &mcast, QED_SPQ_MODE_CB, NULL);
}
-static int qed_configure_filter(struct qed_dev *cdev,
- struct qed_filter_params *params)
-{
- enum qed_filter_rx_mode_type accept_flags;
-
- switch (params->type) {
- case QED_FILTER_TYPE_UCAST:
- return qed_configure_filter_ucast(cdev, &params->filter.ucast);
- case QED_FILTER_TYPE_MCAST:
- return qed_configure_filter_mcast(cdev, &params->filter.mcast);
- case QED_FILTER_TYPE_RX_MODE:
- accept_flags = params->filter.accept_flags;
- return qed_configure_filter_rx_mode(cdev, accept_flags);
- default:
- DP_NOTICE(cdev, "Unknown filter type %d\n", (int)params->type);
- return -EINVAL;
- }
-}
-
static int qed_configure_arfs_searcher(struct qed_dev *cdev,
enum qed_filter_config_mode mode)
{
@@ -2867,7 +2848,7 @@ static int qed_fp_cqe_completion(struct qed_dev *dev,
cqe);
}
-static int qed_req_bulletin_update_mac(struct qed_dev *cdev, u8 *mac)
+static int qed_req_bulletin_update_mac(struct qed_dev *cdev, const u8 *mac)
{
int i, ret;
@@ -2904,7 +2885,9 @@ static const struct qed_eth_ops qed_eth_ops_pass = {
.q_rx_stop = &qed_stop_rxq,
.q_tx_start = &qed_start_txq,
.q_tx_stop = &qed_stop_txq,
- .filter_config = &qed_configure_filter,
+ .filter_config_rx_mode = &qed_configure_filter_rx_mode,
+ .filter_config_ucast = &qed_configure_filter_ucast,
+ .filter_config_mcast = &qed_configure_filter_mcast,
.fastpath_stop = &qed_fastpath_stop,
.eth_cqe_completion = &qed_fp_cqe_completion,
.get_vport_stats = &qed_get_vport_stats,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h
index 8eceeebb1a7b..a538cf478c14 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h
@@ -92,18 +92,18 @@ struct qed_filter_mcast {
};
/**
- * @brief qed_eth_rx_queue_stop - This ramrod closes an Rx queue
+ * qed_eth_rx_queue_stop(): This ramrod closes an Rx queue.
*
- * @param p_hwfn
- * @param p_rxq Handler of queue to close
- * @param eq_completion_only If True completion will be on
- * EQe, if False completion will be
- * on EQe if p_hwfn opaque
- * different from the RXQ opaque
- * otherwise on CQe.
- * @param cqe_completion If True completion will be
- * receive on CQe.
- * @return int
+ * @p_hwfn: HW device data.
+ * @p_rxq: Handler of queue to close
+ * @eq_completion_only: If True completion will be on
+ * EQe, if False completion will be
+ * on EQe if p_hwfn opaque
+ * different from the RXQ opaque
+ * otherwise on CQe.
+ * @cqe_completion: If True completion will be receive on CQe.
+ *
+ * Return: Int.
*/
int
qed_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
@@ -111,12 +111,12 @@ qed_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
bool eq_completion_only, bool cqe_completion);
/**
- * @brief qed_eth_tx_queue_stop - closes a Tx queue
+ * qed_eth_tx_queue_stop(): Closes a Tx queue.
*
- * @param p_hwfn
- * @param p_txq - handle to Tx queue needed to be closed
+ * @p_hwfn: HW device data.
+ * @p_txq: handle to Tx queue needed to be closed.
*
- * @return int
+ * Return: Int.
*/
int qed_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, void *p_txq);
@@ -146,7 +146,6 @@ struct qed_sp_vport_start_params {
int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
struct qed_sp_vport_start_params *p_params);
-
struct qed_filter_accept_flags {
u8 update_rx_mode_config;
u8 update_tx_mode_config;
@@ -205,16 +204,15 @@ int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
struct qed_spq_comp_cb *p_comp_data);
/**
- * @brief qed_sp_vport_stop -
- *
- * This ramrod closes a VPort after all its RX and TX queues are terminated.
- * An Assert is generated if any queues are left open.
+ * qed_sp_vport_stop: This ramrod closes a VPort after all its
+ * RX and TX queues are terminated.
+ * An Assert is generated if any queues are left open.
*
- * @param p_hwfn
- * @param opaque_fid
- * @param vport_id VPort ID
+ * @p_hwfn: HW device data.
+ * @opaque_fid: Opaque FID
+ * @vport_id: VPort ID.
*
- * @return int
+ * Return: Int.
*/
int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id);
@@ -225,22 +223,21 @@ int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
struct qed_spq_comp_cb *p_comp_data);
/**
- * @brief qed_sp_rx_eth_queues_update -
+ * qed_sp_eth_rx_queues_update(): This ramrod updates an RX queue.
+ * It is used for setting the active state
+ * of the queue and updating the TPA and
+ * SGE parameters.
+ * @p_hwfn: HW device data.
+ * @pp_rxq_handlers: An array of queue handlers to be updated.
+ * @num_rxqs: number of queues to update.
+ * @complete_cqe_flg: Post completion to the CQE Ring if set.
+ * @complete_event_flg: Post completion to the Event Ring if set.
+ * @comp_mode: Comp mode.
+ * @p_comp_data: Pointer Comp data.
*
- * This ramrod updates an RX queue. It is used for setting the active state
- * of the queue and updating the TPA and SGE parameters.
+ * Return: Int.
*
- * @note At the moment - only used by non-linux VFs.
- *
- * @param p_hwfn
- * @param pp_rxq_handlers An array of queue handlers to be updated.
- * @param num_rxqs number of queues to update.
- * @param complete_cqe_flg Post completion to the CQE Ring if set
- * @param complete_event_flg Post completion to the Event Ring if set
- * @param comp_mode
- * @param p_comp_data
- *
- * @return int
+ * Note At the moment - only used by non-linux VFs.
*/
int
@@ -257,30 +254,32 @@ void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats);
void qed_reset_vport_stats(struct qed_dev *cdev);
/**
- * *@brief qed_arfs_mode_configure -
- *
- **Enable or disable rfs mode. It must accept atleast one of tcp or udp true
- **and atleast one of ipv4 or ipv6 true to enable rfs mode.
+ * qed_arfs_mode_configure(): Enable or disable rfs mode.
+ * It must accept at least one of tcp or udp true
+ * and at least one of ipv4 or ipv6 true to enable
+ * rfs mode.
*
- **@param p_hwfn
- **@param p_ptt
- **@param p_cfg_params - arfs mode configuration parameters.
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @p_cfg_params: arfs mode configuration parameters.
*
+ * Return. Void.
*/
void qed_arfs_mode_configure(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_arfs_config_params *p_cfg_params);
/**
- * @brief - qed_configure_rfs_ntuple_filter
+ * qed_configure_rfs_ntuple_filter(): This ramrod should be used to add
+ * or remove arfs hw filter
*
- * This ramrod should be used to add or remove arfs hw filter
+ * @p_hwfn: HW device data.
+ * @p_cb: Used for QED_SPQ_MODE_CB,where client would initialize
+ * it with cookie and callback function address, if not
+ * using this mode then client must pass NULL.
+ * @p_params: Pointer to params.
*
- * @params p_hwfn
- * @params p_cb - Used for QED_SPQ_MODE_CB,where client would initialize
- * it with cookie and callback function address, if not
- * using this mode then client must pass NULL.
- * @params p_params
+ * Return: Void.
*/
int
qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
@@ -374,16 +373,17 @@ qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
struct qed_sp_vport_start_params *p_params);
/**
- * @brief - Starts an Rx queue, when queue_cid is already prepared
+ * qed_eth_rxq_start_ramrod(): Starts an Rx queue, when queue_cid is
+ * already prepared
*
- * @param p_hwfn
- * @param p_cid
- * @param bd_max_bytes
- * @param bd_chain_phys_addr
- * @param cqe_pbl_addr
- * @param cqe_pbl_size
+ * @p_hwfn: HW device data.
+ * @p_cid: Pointer CID.
+ * @bd_max_bytes: Max bytes.
+ * @bd_chain_phys_addr: Chain physcial address.
+ * @cqe_pbl_addr: PBL address.
+ * @cqe_pbl_size: PBL size.
*
- * @return int
+ * Return: Int.
*/
int
qed_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
@@ -393,15 +393,16 @@ qed_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size);
/**
- * @brief - Starts a Tx queue, where queue_cid is already prepared
+ * qed_eth_txq_start_ramrod(): Starts a Tx queue, where queue_cid is
+ * already prepared
*
- * @param p_hwfn
- * @param p_cid
- * @param pbl_addr
- * @param pbl_size
- * @param p_pq_params - parameters for choosing the PQ for this Tx queue
+ * @p_hwfn: HW device data.
+ * @p_cid: Pointer CID.
+ * @pbl_addr: PBL address.
+ * @pbl_size: PBL size.
+ * @pq_id: Parameters for choosing the PQ for this Tx queue.
*
- * @return int
+ * Return: Int.
*/
int
qed_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index c46a7f756ed5..ed274f033626 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -28,6 +28,7 @@
#include "qed_cxt.h"
#include "qed_dev_api.h"
#include "qed_hsi.h"
+#include "qed_iro_hsi.h"
#include "qed_hw.h"
#include "qed_int.h"
#include "qed_ll2.h"
@@ -43,6 +44,8 @@
#define QED_LL2_TX_SIZE (256)
#define QED_LL2_RX_SIZE (4096)
+#define QED_LL2_INVALID_STATS_ID 0xff
+
struct qed_cb_ll2_info {
int rx_cnt;
u32 rx_size;
@@ -62,6 +65,29 @@ struct qed_ll2_buffer {
dma_addr_t phys_addr;
};
+static u8 qed_ll2_handle_to_stats_id(struct qed_hwfn *p_hwfn,
+ u8 ll2_queue_type, u8 qid)
+{
+ u8 stats_id;
+
+ /* For legacy (RAM based) queues, the stats_id will be set as the
+ * queue_id. Otherwise (context based queue), it will be set to
+ * the "abs_pf_id" offset from the end of the RAM based queue IDs.
+ * If the final value exceeds the total counters amount, return
+ * INVALID value to indicate that the stats for this connection should
+ * be disabled.
+ */
+ if (ll2_queue_type == QED_LL2_RX_TYPE_LEGACY)
+ stats_id = qid;
+ else
+ stats_id = MAX_NUM_LL2_RX_RAM_QUEUES + p_hwfn->abs_pf_id;
+
+ if (stats_id < MAX_NUM_LL2_TX_STATS_COUNTERS)
+ return stats_id;
+ else
+ return QED_LL2_INVALID_STATS_ID;
+}
+
static void qed_ll2b_complete_tx_packet(void *cxt,
u8 connection_handle,
void *cookie,
@@ -106,7 +132,7 @@ static int qed_ll2_alloc_buffer(struct qed_dev *cdev,
}
static int qed_ll2_dealloc_buffer(struct qed_dev *cdev,
- struct qed_ll2_buffer *buffer)
+ struct qed_ll2_buffer *buffer)
{
spin_lock_bh(&cdev->ll2->lock);
@@ -352,7 +378,7 @@ static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
num_bds_in_packet = p_pkt->bd_used;
list_del(&p_pkt->list_entry);
- if (num_bds < num_bds_in_packet) {
+ if (unlikely(num_bds < num_bds_in_packet)) {
DP_NOTICE(p_hwfn,
"Rest of BDs does not cover whole packet\n");
goto out;
@@ -462,7 +488,7 @@ qed_ll2_rxq_handle_completion(struct qed_hwfn *p_hwfn,
if (!list_empty(&p_rx->active_descq))
p_pkt = list_first_entry(&p_rx->active_descq,
struct qed_ll2_rx_packet, list_entry);
- if (!p_pkt) {
+ if (unlikely(!p_pkt)) {
DP_NOTICE(p_hwfn,
"[%d] LL2 Rx completion but active_descq is empty\n",
p_ll2_conn->input.conn_type);
@@ -475,7 +501,7 @@ qed_ll2_rxq_handle_completion(struct qed_hwfn *p_hwfn,
qed_ll2_rxq_parse_reg(p_hwfn, p_cqe, &data);
else
qed_ll2_rxq_parse_gsi(p_hwfn, p_cqe, &data);
- if (qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd)
+ if (unlikely(qed_chain_consume(&p_rx->rxq_chain) != p_pkt->rxq_bd))
DP_NOTICE(p_hwfn,
"Mismatch between active_descq and the LL2 Rx chain\n");
@@ -597,18 +623,18 @@ static bool
qed_ll2_lb_rxq_handler_slowpath(struct qed_hwfn *p_hwfn,
struct core_rx_slow_path_cqe *p_cqe)
{
- struct ooo_opaque *iscsi_ooo;
+ struct ooo_opaque *ooo_opq;
u32 cid;
if (p_cqe->ramrod_cmd_id != CORE_RAMROD_RX_QUEUE_FLUSH)
return false;
- iscsi_ooo = (struct ooo_opaque *)&p_cqe->opaque_data;
- if (iscsi_ooo->ooo_opcode != TCP_EVENT_DELETE_ISLES)
+ ooo_opq = (struct ooo_opaque *)&p_cqe->opaque_data;
+ if (ooo_opq->ooo_opcode != TCP_EVENT_DELETE_ISLES)
return false;
/* Need to make a flush */
- cid = le32_to_cpu(iscsi_ooo->cid);
+ cid = le32_to_cpu(ooo_opq->cid);
qed_ooo_release_connection_isles(p_hwfn, p_hwfn->p_ooo_info, cid);
return true;
@@ -624,7 +650,7 @@ static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
union core_rx_cqe_union *cqe = NULL;
u16 cq_new_idx = 0, cq_old_idx = 0;
struct qed_ooo_buffer *p_buffer;
- struct ooo_opaque *iscsi_ooo;
+ struct ooo_opaque *ooo_opq;
u8 placement_offset = 0;
u8 cqe_type;
@@ -645,7 +671,7 @@ static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
&cqe->rx_cqe_sp))
continue;
- if (cqe_type != CORE_RX_CQE_TYPE_REGULAR) {
+ if (unlikely(cqe_type != CORE_RX_CQE_TYPE_REGULAR)) {
DP_NOTICE(p_hwfn,
"Got a non-regular LB LL2 completion [type 0x%02x]\n",
cqe_type);
@@ -657,22 +683,21 @@ static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
parse_flags = le16_to_cpu(p_cqe_fp->parse_flags.flags);
packet_length = le16_to_cpu(p_cqe_fp->packet_length);
vlan = le16_to_cpu(p_cqe_fp->vlan);
- iscsi_ooo = (struct ooo_opaque *)&p_cqe_fp->opaque_data;
- qed_ooo_save_history_entry(p_hwfn, p_hwfn->p_ooo_info,
- iscsi_ooo);
- cid = le32_to_cpu(iscsi_ooo->cid);
+ ooo_opq = (struct ooo_opaque *)&p_cqe_fp->opaque_data;
+ qed_ooo_save_history_entry(p_hwfn, p_hwfn->p_ooo_info, ooo_opq);
+ cid = le32_to_cpu(ooo_opq->cid);
/* Process delete isle first */
- if (iscsi_ooo->drop_size)
+ if (ooo_opq->drop_size)
qed_ooo_delete_isles(p_hwfn, p_hwfn->p_ooo_info, cid,
- iscsi_ooo->drop_isle,
- iscsi_ooo->drop_size);
+ ooo_opq->drop_isle,
+ ooo_opq->drop_size);
- if (iscsi_ooo->ooo_opcode == TCP_EVENT_NOP)
+ if (ooo_opq->ooo_opcode == TCP_EVENT_NOP)
continue;
/* Now process create/add/join isles */
- if (list_empty(&p_rx->active_descq)) {
+ if (unlikely(list_empty(&p_rx->active_descq))) {
DP_NOTICE(p_hwfn,
"LL2 OOO RX chain has no submitted buffers\n"
);
@@ -682,12 +707,12 @@ static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
p_pkt = list_first_entry(&p_rx->active_descq,
struct qed_ll2_rx_packet, list_entry);
- if ((iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_NEW_ISLE) ||
- (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_RIGHT) ||
- (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_ISLE_LEFT) ||
- (iscsi_ooo->ooo_opcode == TCP_EVENT_ADD_PEN) ||
- (iscsi_ooo->ooo_opcode == TCP_EVENT_JOIN)) {
- if (!p_pkt) {
+ if (likely(ooo_opq->ooo_opcode == TCP_EVENT_ADD_NEW_ISLE ||
+ ooo_opq->ooo_opcode == TCP_EVENT_ADD_ISLE_RIGHT ||
+ ooo_opq->ooo_opcode == TCP_EVENT_ADD_ISLE_LEFT ||
+ ooo_opq->ooo_opcode == TCP_EVENT_ADD_PEN ||
+ ooo_opq->ooo_opcode == TCP_EVENT_JOIN)) {
+ if (unlikely(!p_pkt)) {
DP_NOTICE(p_hwfn,
"LL2 OOO RX packet is not valid\n");
return -EIO;
@@ -701,19 +726,19 @@ static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
qed_chain_consume(&p_rx->rxq_chain);
list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
- switch (iscsi_ooo->ooo_opcode) {
+ switch (ooo_opq->ooo_opcode) {
case TCP_EVENT_ADD_NEW_ISLE:
qed_ooo_add_new_isle(p_hwfn,
p_hwfn->p_ooo_info,
cid,
- iscsi_ooo->ooo_isle,
+ ooo_opq->ooo_isle,
p_buffer);
break;
case TCP_EVENT_ADD_ISLE_RIGHT:
qed_ooo_add_new_buffer(p_hwfn,
p_hwfn->p_ooo_info,
cid,
- iscsi_ooo->ooo_isle,
+ ooo_opq->ooo_isle,
p_buffer,
QED_OOO_RIGHT_BUF);
break;
@@ -721,7 +746,7 @@ static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
qed_ooo_add_new_buffer(p_hwfn,
p_hwfn->p_ooo_info,
cid,
- iscsi_ooo->ooo_isle,
+ ooo_opq->ooo_isle,
p_buffer,
QED_OOO_LEFT_BUF);
break;
@@ -729,13 +754,12 @@ static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
qed_ooo_add_new_buffer(p_hwfn,
p_hwfn->p_ooo_info,
cid,
- iscsi_ooo->ooo_isle +
- 1,
+ ooo_opq->ooo_isle + 1,
p_buffer,
QED_OOO_LEFT_BUF);
qed_ooo_join_isles(p_hwfn,
p_hwfn->p_ooo_info,
- cid, iscsi_ooo->ooo_isle);
+ cid, ooo_opq->ooo_isle);
break;
case TCP_EVENT_ADD_PEN:
num_ooo_add_to_peninsula++;
@@ -747,7 +771,7 @@ static int qed_ll2_lb_rxq_handler(struct qed_hwfn *p_hwfn,
} else {
DP_NOTICE(p_hwfn,
"Unexpected event (%d) TX OOO completion\n",
- iscsi_ooo->ooo_opcode);
+ ooo_opq->ooo_opcode);
}
}
@@ -859,16 +883,16 @@ static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
u16 new_idx = 0, num_bds = 0;
int rc;
- if (!p_ll2_conn)
+ if (unlikely(!p_ll2_conn))
return 0;
- if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
+ if (unlikely(!QED_LL2_TX_REGISTERED(p_ll2_conn)))
return 0;
new_idx = le16_to_cpu(*p_tx->p_fw_cons);
num_bds = ((s16)new_idx - (s16)p_tx->bds_idx);
- if (!num_bds)
+ if (unlikely(!num_bds))
return 0;
while (num_bds) {
@@ -877,10 +901,10 @@ static int qed_ll2_lb_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
p_pkt = list_first_entry(&p_tx->active_descq,
struct qed_ll2_tx_packet, list_entry);
- if (!p_pkt)
+ if (unlikely(!p_pkt))
return -EINVAL;
- if (p_pkt->bd_used != 1) {
+ if (unlikely(p_pkt->bd_used != 1)) {
DP_NOTICE(p_hwfn,
"Unexpectedly many BDs(%d) in TX OOO completion\n",
p_pkt->bd_used);
@@ -1008,7 +1032,7 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
return 0;
- if (p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO)
+ if (likely(p_ll2_conn->input.conn_type == QED_LL2_TYPE_OOO))
p_ll2_conn->tx_stats_en = 0;
else
p_ll2_conn->tx_stats_en = 1;
@@ -1124,6 +1148,7 @@ static int qed_sp_ll2_tx_queue_stop(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
int rc = -EINVAL;
+
qed_db_recovery_del(p_hwfn->cdev, p_tx->doorbell_addr, &p_tx->db_msg);
/* Get SPQ entry */
@@ -1533,7 +1558,7 @@ static inline u8 qed_ll2_handle_to_queue_id(struct qed_hwfn *p_hwfn,
int qed_ll2_establish_connection(void *cxt, u8 connection_handle)
{
- struct e4_core_conn_context *p_cxt;
+ struct core_conn_context *p_cxt;
struct qed_ll2_tx_packet *p_pkt;
struct qed_ll2_info *p_ll2_conn;
struct qed_hwfn *p_hwfn = cxt;
@@ -1544,7 +1569,7 @@ int qed_ll2_establish_connection(void *cxt, u8 connection_handle)
int rc = -EINVAL;
u32 i, capacity;
size_t desc_size;
- u8 qid;
+ u8 qid, stats_id;
p_ptt = qed_ptt_acquire(p_hwfn);
if (!p_ptt)
@@ -1610,16 +1635,32 @@ int qed_ll2_establish_connection(void *cxt, u8 connection_handle)
qid = qed_ll2_handle_to_queue_id(p_hwfn, connection_handle,
p_ll2_conn->input.rx_conn_type);
+ stats_id = qed_ll2_handle_to_stats_id(p_hwfn,
+ p_ll2_conn->input.rx_conn_type,
+ qid);
p_ll2_conn->queue_id = qid;
- p_ll2_conn->tx_stats_id = qid;
+ p_ll2_conn->tx_stats_id = stats_id;
- DP_VERBOSE(p_hwfn, QED_MSG_LL2,
- "Establishing ll2 queue. PF %d ctx_based=%d abs qid=%d\n",
- p_hwfn->rel_pf_id, p_ll2_conn->input.rx_conn_type, qid);
+ /* If there is no valid stats id for this connection, disable stats */
+ if (p_ll2_conn->tx_stats_id == QED_LL2_INVALID_STATS_ID) {
+ p_ll2_conn->tx_stats_en = 0;
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_LL2,
+ "Disabling stats for queue %d - not enough counters\n",
+ qid);
+ }
+
+ DP_VERBOSE(p_hwfn,
+ QED_MSG_LL2,
+ "Establishing ll2 queue. PF %d ctx_based=%d abs qid=%d stats_id=%d\n",
+ p_hwfn->rel_pf_id,
+ p_ll2_conn->input.rx_conn_type, qid, stats_id);
if (p_ll2_conn->input.rx_conn_type == QED_LL2_RX_TYPE_LEGACY) {
- p_rx->set_prod_addr = p_hwfn->regview +
- GTT_BAR0_MAP_REG_TSDM_RAM + TSTORM_LL2_RX_PRODS_OFFSET(qid);
+ p_rx->set_prod_addr =
+ (u8 __iomem *)p_hwfn->regview +
+ GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_TSDM_RAM,
+ TSTORM_LL2_RX_PRODS, qid);
} else {
/* QED_LL2_RX_TYPE_CTX - using doorbell */
p_rx->ctx_based = 1;
@@ -1762,7 +1803,7 @@ int qed_ll2_post_rx_buffer(void *cxt,
}
}
- /* If we're lacking entires, let's try to flush buffers to FW */
+ /* If we're lacking entries, let's try to flush buffers to FW */
if (!p_curp || !p_curb) {
rc = -EBUSY;
p_curp = NULL;
@@ -1842,8 +1883,8 @@ qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
}
start_bd = (struct core_tx_bd *)qed_chain_produce(p_tx_chain);
- if (QED_IS_IWARP_PERSONALITY(p_hwfn) &&
- p_ll2->input.conn_type == QED_LL2_TYPE_OOO) {
+ if (likely(QED_IS_IWARP_PERSONALITY(p_hwfn) &&
+ p_ll2->input.conn_type == QED_LL2_TYPE_OOO)) {
start_bd->nw_vlan_or_lb_echo =
cpu_to_le16(IWARP_LL2_IN_ORDER_TX_QUEUE);
} else {
@@ -1964,28 +2005,29 @@ int qed_ll2_prepare_tx_packet(void *cxt,
int rc = 0;
p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
- if (!p_ll2_conn)
+ if (unlikely(!p_ll2_conn))
return -EINVAL;
p_tx = &p_ll2_conn->tx_queue;
p_tx_chain = &p_tx->txq_chain;
- if (pkt->num_of_bds > p_ll2_conn->input.tx_max_bds_per_packet)
+ if (unlikely(pkt->num_of_bds > p_ll2_conn->input.tx_max_bds_per_packet))
return -EIO;
spin_lock_irqsave(&p_tx->lock, flags);
- if (p_tx->cur_send_packet) {
+ if (unlikely(p_tx->cur_send_packet)) {
rc = -EEXIST;
goto out;
}
/* Get entry, but only if we have tx elements for it */
- if (!list_empty(&p_tx->free_descq))
+ if (unlikely(!list_empty(&p_tx->free_descq)))
p_curp = list_first_entry(&p_tx->free_descq,
struct qed_ll2_tx_packet, list_entry);
- if (p_curp && qed_chain_get_elem_left(p_tx_chain) < pkt->num_of_bds)
+ if (unlikely(p_curp &&
+ qed_chain_get_elem_left(p_tx_chain) < pkt->num_of_bds))
p_curp = NULL;
- if (!p_curp) {
+ if (unlikely(!p_curp)) {
rc = -EBUSY;
goto out;
}
@@ -2014,16 +2056,16 @@ int qed_ll2_set_fragment_of_tx_packet(void *cxt,
unsigned long flags;
p_ll2_conn = qed_ll2_handle_sanity(p_hwfn, connection_handle);
- if (!p_ll2_conn)
+ if (unlikely(!p_ll2_conn))
return -EINVAL;
- if (!p_ll2_conn->tx_queue.cur_send_packet)
+ if (unlikely(!p_ll2_conn->tx_queue.cur_send_packet))
return -EINVAL;
p_cur_send_packet = p_ll2_conn->tx_queue.cur_send_packet;
cur_send_frag_num = p_ll2_conn->tx_queue.cur_send_frag_num;
- if (cur_send_frag_num >= p_cur_send_packet->bd_used)
+ if (unlikely(cur_send_frag_num >= p_cur_send_packet->bd_used))
return -EINVAL;
/* Fill the BD information, and possibly notify FW */
@@ -2609,7 +2651,6 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
DP_NOTICE(cdev, "Failed to add an LLH filter\n");
goto err3;
}
-
}
ether_addr_copy(cdev->ll2_mac_address, params->ll2_mac_address);
@@ -2651,7 +2692,7 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb,
*/
nr_frags = skb_shinfo(skb)->nr_frags;
- if (1 + nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) {
+ if (unlikely(1 + nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET)) {
DP_ERR(cdev, "Cannot transmit a packet with %d fragments\n",
1 + nr_frags);
return -EINVAL;
@@ -2693,7 +2734,7 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb,
*/
rc = qed_ll2_prepare_tx_packet(p_hwfn, cdev->ll2->handle,
&pkt, 1);
- if (rc)
+ if (unlikely(rc))
goto err;
for (i = 0; i < nr_frags; i++) {
@@ -2717,7 +2758,7 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb,
/* if failed not much to do here, partial packet has been posted
* we can't free memory, will need to wait for completion
*/
- if (rc)
+ if (unlikely(rc))
goto err2;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
index df88d00053a2..0bfc375161ed 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
@@ -32,7 +32,6 @@
#define QED_LL2_LEGACY_CONN_BASE_PF 0
#define QED_LL2_CTX_CONN_BASE_PF QED_MAX_NUM_OF_LEGACY_LL2_CONNS_PF
-
struct qed_ll2_rx_packet {
struct list_head list_entry;
struct core_rx_bd_with_buff_len *rxq_bd;
@@ -119,41 +118,41 @@ struct qed_ll2_info {
extern const struct qed_ll2_ops qed_ll2_ops_pass;
/**
- * @brief qed_ll2_acquire_connection - allocate resources,
- * starts rx & tx (if relevant) queues pair. Provides
- * connecion handler as output parameter.
+ * qed_ll2_acquire_connection(): Allocate resources,
+ * starts rx & tx (if relevant) queues pair.
+ * Provides connecion handler as output
+ * parameter.
*
+ * @cxt: Pointer to the hw-function [opaque to some].
+ * @data: Describes connection parameters.
*
- * @param cxt - pointer to the hw-function [opaque to some]
- * @param data - describes connection parameters
- * @return int
+ * Return: Int.
*/
int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data);
/**
- * @brief qed_ll2_establish_connection - start previously
- * allocated LL2 queues pair
+ * qed_ll2_establish_connection(): start previously allocated LL2 queues pair
*
- * @param cxt - pointer to the hw-function [opaque to some]
- * @param p_ptt
- * @param connection_handle LL2 connection's handle obtained from
- * qed_ll2_require_connection
+ * @cxt: Pointer to the hw-function [opaque to some].
+ * @connection_handle: LL2 connection's handle obtained from
+ * qed_ll2_require_connection.
*
- * @return 0 on success, failure otherwise
+ * Return: 0 on success, failure otherwise.
*/
int qed_ll2_establish_connection(void *cxt, u8 connection_handle);
/**
- * @brief qed_ll2_post_rx_buffers - submit buffers to LL2 Rx queue.
+ * qed_ll2_post_rx_buffer(): Submit buffers to LL2 Rx queue.
*
- * @param cxt - pointer to the hw-function [opaque to some]
- * @param connection_handle LL2 connection's handle obtained from
- * qed_ll2_require_connection
- * @param addr rx (physical address) buffers to submit
- * @param cookie
- * @param notify_fw produce corresponding Rx BD immediately
+ * @cxt: Pointer to the hw-function [opaque to some].
+ * @connection_handle: LL2 connection's handle obtained from
+ * qed_ll2_require_connection.
+ * @addr: RX (physical address) buffers to submit.
+ * @buf_len: Buffer Len.
+ * @cookie: Cookie.
+ * @notify_fw: Produce corresponding Rx BD immediately.
*
- * @return 0 on success, failure otherwise
+ * Return: 0 on success, failure otherwise.
*/
int qed_ll2_post_rx_buffer(void *cxt,
u8 connection_handle,
@@ -161,15 +160,15 @@ int qed_ll2_post_rx_buffer(void *cxt,
u16 buf_len, void *cookie, u8 notify_fw);
/**
- * @brief qed_ll2_prepare_tx_packet - request for start Tx BD
- * to prepare Tx packet submission to FW.
+ * qed_ll2_prepare_tx_packet(): Request for start Tx BD
+ * to prepare Tx packet submission to FW.
*
- * @param cxt - pointer to the hw-function [opaque to some]
- * @param connection_handle
- * @param pkt - info regarding the tx packet
- * @param notify_fw - issue doorbell to fw for this packet
+ * @cxt: Pointer to the hw-function [opaque to some].
+ * @connection_handle: Connection handle.
+ * @pkt: Info regarding the tx packet.
+ * @notify_fw: Issue doorbell to fw for this packet.
*
- * @return 0 on success, failure otherwise
+ * Return: 0 on success, failure otherwise.
*/
int qed_ll2_prepare_tx_packet(void *cxt,
u8 connection_handle,
@@ -177,81 +176,83 @@ int qed_ll2_prepare_tx_packet(void *cxt,
bool notify_fw);
/**
- * @brief qed_ll2_release_connection - releases resources
- * allocated for LL2 connection
+ * qed_ll2_release_connection(): Releases resources allocated for LL2
+ * connection.
+ *
+ * @cxt: Pointer to the hw-function [opaque to some].
+ * @connection_handle: LL2 connection's handle obtained from
+ * qed_ll2_require_connection.
*
- * @param cxt - pointer to the hw-function [opaque to some]
- * @param connection_handle LL2 connection's handle obtained from
- * qed_ll2_require_connection
+ * Return: Void.
*/
void qed_ll2_release_connection(void *cxt, u8 connection_handle);
/**
- * @brief qed_ll2_set_fragment_of_tx_packet - provides fragments to fill
- * Tx BD of BDs requested by
- * qed_ll2_prepare_tx_packet
+ * qed_ll2_set_fragment_of_tx_packet(): Provides fragments to fill
+ * Tx BD of BDs requested by
+ * qed_ll2_prepare_tx_packet
*
- * @param cxt - pointer to the hw-function [opaque to some]
- * @param connection_handle LL2 connection's handle
- * obtained from
- * qed_ll2_require_connection
- * @param addr
- * @param nbytes
+ * @cxt: Pointer to the hw-function [opaque to some].
+ * @connection_handle: LL2 connection's handle obtained from
+ * qed_ll2_require_connection.
+ * @addr: Address.
+ * @nbytes: Number of bytes.
*
- * @return 0 on success, failure otherwise
+ * Return: 0 on success, failure otherwise.
*/
int qed_ll2_set_fragment_of_tx_packet(void *cxt,
u8 connection_handle,
dma_addr_t addr, u16 nbytes);
/**
- * @brief qed_ll2_terminate_connection - stops Tx/Rx queues
- *
+ * qed_ll2_terminate_connection(): Stops Tx/Rx queues
*
- * @param cxt - pointer to the hw-function [opaque to some]
- * @param connection_handle LL2 connection's handle
- * obtained from
- * qed_ll2_require_connection
+ * @cxt: Pointer to the hw-function [opaque to some].
+ * @connection_handle: LL2 connection's handle obtained from
+ * qed_ll2_require_connection.
*
- * @return 0 on success, failure otherwise
+ * Return: 0 on success, failure otherwise.
*/
int qed_ll2_terminate_connection(void *cxt, u8 connection_handle);
/**
- * @brief qed_ll2_get_stats - get LL2 queue's statistics
+ * qed_ll2_get_stats(): Get LL2 queue's statistics
*
+ * @cxt: Pointer to the hw-function [opaque to some].
+ * @connection_handle: LL2 connection's handle obtained from
+ * qed_ll2_require_connection.
+ * @p_stats: Pointer Status.
*
- * @param cxt - pointer to the hw-function [opaque to some]
- * @param connection_handle LL2 connection's handle obtained from
- * qed_ll2_require_connection
- * @param p_stats
- *
- * @return 0 on success, failure otherwise
+ * Return: 0 on success, failure otherwise.
*/
int qed_ll2_get_stats(void *cxt,
u8 connection_handle, struct qed_ll2_stats *p_stats);
/**
- * @brief qed_ll2_alloc - Allocates LL2 connections set
+ * qed_ll2_alloc(): Allocates LL2 connections set.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @return int
+ * Return: Int.
*/
int qed_ll2_alloc(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_ll2_setup - Inits LL2 connections set
+ * qed_ll2_setup(): Inits LL2 connections set.
+ *
+ * @p_hwfn: HW device data.
*
- * @param p_hwfn
+ * Return: Void.
*
*/
void qed_ll2_setup(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_ll2_free - Releases LL2 connections set
+ * qed_ll2_free(): Releases LL2 connections set
+ *
+ * @p_hwfn: HW device data.
*
- * @param p_hwfn
+ * Return: Void.
*
*/
void qed_ll2_free(struct qed_hwfn *p_hwfn);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index d10e1cd6d2ba..7673b3e07736 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -99,10 +99,6 @@ static const u32 qed_mfw_ext_10g[] __initconst = {
ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT,
};
-static const u32 qed_mfw_ext_20g[] __initconst = {
- ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT,
-};
-
static const u32 qed_mfw_ext_25g[] __initconst = {
ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
@@ -148,7 +144,6 @@ static const u32 qed_mfw_ext_100g_base_r4[] __initconst = {
static struct qed_mfw_speed_map qed_mfw_ext_maps[] __ro_after_init = {
QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_1G, qed_mfw_ext_1g),
QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_10G, qed_mfw_ext_10g),
- QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_20G, qed_mfw_ext_20g),
QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_25G, qed_mfw_ext_25g),
QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_40G, qed_mfw_ext_40g),
QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_50G_BASE_R,
@@ -262,7 +257,7 @@ module_exit(qed_exit);
/* Check if the DMA controller on the machine can properly handle the DMA
* addressing required by the device.
-*/
+ */
static int qed_set_coherency_mask(struct qed_dev *cdev)
{
struct device *dev = &cdev->pdev->dev;
@@ -547,7 +542,7 @@ static struct qed_dev *qed_probe(struct pci_dev *pdev,
goto err2;
}
- DP_INFO(cdev, "qed_probe completed successfully\n");
+ DP_INFO(cdev, "%s completed successfully\n", __func__);
return cdev;
@@ -980,7 +975,7 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
rc = qed_set_int_mode(cdev, false);
if (rc) {
- DP_ERR(cdev, "qed_slowpath_setup_int ERR\n");
+ DP_ERR(cdev, "%s ERR\n", __func__);
return rc;
}
@@ -1161,6 +1156,7 @@ static int qed_slowpath_delayed_work(struct qed_hwfn *hwfn,
/* Memory barrier for setting atomic bit */
smp_mb__before_atomic();
set_bit(wq_flag, &hwfn->slowpath_task_flags);
+ /* Memory barrier after setting atomic bit */
smp_mb__after_atomic();
queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, delay);
@@ -1382,7 +1378,7 @@ static int qed_slowpath_start(struct qed_dev *cdev,
(params->drv_minor << 16) |
(params->drv_rev << 8) |
(params->drv_eng);
- strlcpy(drv_version.name, params->name,
+ strscpy(drv_version.name, params->name,
MCP_DRV_VER_STR_SIZE - 4);
rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,
&drv_version);
@@ -2892,7 +2888,7 @@ static int qed_update_drv_state(struct qed_dev *cdev, bool active)
return status;
}
-static int qed_update_mac(struct qed_dev *cdev, u8 *mac)
+static int qed_update_mac(struct qed_dev *cdev, const u8 *mac)
{
struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
struct qed_ptt *ptt;
@@ -3079,8 +3075,10 @@ int qed_mfw_tlv_req(struct qed_hwfn *hwfn)
DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV,
"Scheduling slowpath task [Flag: %d]\n",
QED_SLOWPATH_MFW_TLV_REQ);
+ /* Memory barrier for setting atomic bit */
smp_mb__before_atomic();
set_bit(QED_SLOWPATH_MFW_TLV_REQ, &hwfn->slowpath_task_flags);
+ /* Memory barrier after setting atomic bit */
smp_mb__after_atomic();
queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0);
@@ -3159,3 +3157,8 @@ int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn, enum qed_mfw_tlv_type type,
return 0;
}
+
+unsigned long qed_get_epoch_time(void)
+{
+ return ktime_get_real_seconds();
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index 24cd41567775..64678a256f3b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -17,6 +17,7 @@
#include "qed_cxt.h"
#include "qed_dcbx.h"
#include "qed_hsi.h"
+#include "qed_mfw_hsi.h"
#include "qed_hw.h"
#include "qed_mcp.h"
#include "qed_reg_addr.h"
@@ -30,11 +31,11 @@
#define QED_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */
#define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
- qed_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
+ qed_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + (_offset)), \
_val)
#define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
- qed_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
+ qed_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + (_offset)))
#define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \
DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
@@ -384,7 +385,7 @@ qed_mcp_update_pending_cmd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
p_mb_params->mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
/* Get the union data */
- if (p_mb_params->p_data_dst != NULL && p_mb_params->data_dst_size) {
+ if (p_mb_params->p_data_dst && p_mb_params->data_dst_size) {
u32 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
offsetof(struct public_drv_mb,
union_data);
@@ -410,7 +411,7 @@ static void __qed_mcp_cmd_and_union(struct qed_hwfn *p_hwfn,
union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
offsetof(struct public_drv_mb, union_data);
memset(&union_data, 0, sizeof(union_data));
- if (p_mb_params->p_data_src != NULL && p_mb_params->data_src_size)
+ if (p_mb_params->p_data_src && p_mb_params->data_src_size)
memcpy(&union_data, p_mb_params->p_data_src,
p_mb_params->data_src_size);
qed_memcpy_to(p_hwfn, p_ptt, union_data_addr, &union_data,
@@ -671,7 +672,8 @@ int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn,
u32 cmd,
u32 param,
u32 *o_mcp_resp,
- u32 *o_mcp_param, u32 *o_txn_size, u32 *o_buf)
+ u32 *o_mcp_param,
+ u32 *o_txn_size, u32 *o_buf, bool b_can_sleep)
{
struct qed_mcp_mb_params mb_params;
u8 raw_data[MCP_DRV_NVM_BUF_LEN];
@@ -684,6 +686,8 @@ int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn,
/* Use the maximal value since the actual one is part of the response */
mb_params.data_dst_size = MCP_DRV_NVM_BUF_LEN;
+ if (b_can_sleep)
+ mb_params.flags = QED_MB_FLAG_CAN_SLEEP;
rc = qed_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params);
if (rc)
@@ -916,7 +920,6 @@ enum qed_load_req_force {
};
static void qed_get_mfw_force_cmd(struct qed_hwfn *p_hwfn,
-
enum qed_load_req_force force_cmd,
u8 *p_mfw_force_cmd)
{
@@ -1526,15 +1529,13 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
FW_MB_PARAM_FEATURE_SUPPORT_EXT_SPEED_FEC_CONTROL) {
ext_speed = 0;
if (params->ext_speed.autoneg)
- ext_speed |= ETH_EXT_SPEED_AN;
+ ext_speed |= ETH_EXT_SPEED_NONE;
val = params->ext_speed.forced_speed;
if (val & QED_EXT_SPEED_1G)
ext_speed |= ETH_EXT_SPEED_1G;
if (val & QED_EXT_SPEED_10G)
ext_speed |= ETH_EXT_SPEED_10G;
- if (val & QED_EXT_SPEED_20G)
- ext_speed |= ETH_EXT_SPEED_20G;
if (val & QED_EXT_SPEED_25G)
ext_speed |= ETH_EXT_SPEED_25G;
if (val & QED_EXT_SPEED_40G)
@@ -1560,8 +1561,6 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
ext_speed |= ETH_EXT_ADV_SPEED_1G;
if (val & QED_EXT_SPEED_MASK_10G)
ext_speed |= ETH_EXT_ADV_SPEED_10G;
- if (val & QED_EXT_SPEED_MASK_20G)
- ext_speed |= ETH_EXT_ADV_SPEED_20G;
if (val & QED_EXT_SPEED_MASK_25G)
ext_speed |= ETH_EXT_ADV_SPEED_25G;
if (val & QED_EXT_SPEED_MASK_40G)
@@ -2081,7 +2080,7 @@ int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *p_mfw_ver, u32 *p_running_bundle_id)
{
- u32 global_offsize;
+ u32 global_offsize, public_base;
if (IS_VF(p_hwfn->cdev)) {
if (p_hwfn->vf_iov_info) {
@@ -2098,16 +2097,16 @@ int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn,
}
}
+ public_base = p_hwfn->mcp_info->public_base;
global_offsize = qed_rd(p_hwfn, p_ptt,
- SECTION_OFFSIZE_ADDR(p_hwfn->
- mcp_info->public_base,
+ SECTION_OFFSIZE_ADDR(public_base,
PUBLIC_GLOBAL));
*p_mfw_ver =
qed_rd(p_hwfn, p_ptt,
SECTION_ADDR(global_offsize,
0) + offsetof(struct public_global, mfw_ver));
- if (p_running_bundle_id != NULL) {
+ if (p_running_bundle_id) {
*p_running_bundle_id = qed_rd(p_hwfn, p_ptt,
SECTION_ADDR(global_offsize, 0) +
offsetof(struct public_global,
@@ -2209,6 +2208,7 @@ int qed_mcp_get_transceiver_data(struct qed_hwfn *p_hwfn,
return 0;
}
+
static bool qed_is_transceiver_ready(u32 transceiver_state,
u32 transceiver_type)
{
@@ -2378,7 +2378,7 @@ qed_mcp_get_shmem_proto_legacy(struct qed_hwfn *p_hwfn,
DP_VERBOSE(p_hwfn, NETIF_MSG_IFUP,
"According to Legacy capabilities, L2 personality is %08x\n",
- (u32) *p_proto);
+ (u32)*p_proto);
}
static int
@@ -2423,7 +2423,7 @@ qed_mcp_get_shmem_proto_mfw(struct qed_hwfn *p_hwfn,
DP_VERBOSE(p_hwfn,
NETIF_MSG_IFUP,
"According to capabilities, L2 personality is %08x [resp %08x param %08x]\n",
- (u32) *p_proto, resp, param);
+ (u32)*p_proto, resp, param);
return 0;
}
@@ -2445,9 +2445,6 @@ qed_mcp_get_shmem_proto(struct qed_hwfn *p_hwfn,
case FUNC_MF_CFG_PROTOCOL_ISCSI:
*p_proto = QED_PCI_ISCSI;
break;
- case FUNC_MF_CFG_PROTOCOL_NVMETCP:
- *p_proto = QED_PCI_NVMETCP;
- break;
case FUNC_MF_CFG_PROTOCOL_FCOE:
*p_proto = QED_PCI_FCOE;
break;
@@ -2854,7 +2851,7 @@ int qed_mcp_ov_update_mtu(struct qed_hwfn *p_hwfn,
}
int qed_mcp_ov_update_mac(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u8 *mac)
+ struct qed_ptt *p_ptt, const u8 *mac)
{
struct qed_mcp_mb_params mb_params;
u32 mfw_mac[2];
@@ -3026,7 +3023,7 @@ int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len)
DRV_MB_PARAM_NVM_LEN_OFFSET),
&resp, &resp_param,
&read_len,
- (u32 *)(p_buf + offset));
+ (u32 *)(p_buf + offset), false);
if (rc || (resp != FW_MSG_CODE_NVM_OK)) {
DP_NOTICE(cdev, "MCP command rc = %d\n", rc);
@@ -3034,7 +3031,7 @@ int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len)
}
/* This can be a lengthy process, and it's possible scheduler
- * isn't preemptable. Sleep a bit to prevent CPU hogging.
+ * isn't preemptible. Sleep a bit to prevent CPU hogging.
*/
if (bytes_left % 0x1000 <
(bytes_left - read_len) % 0x1000)
@@ -3129,10 +3126,12 @@ int qed_mcp_nvm_write(struct qed_dev *cdev,
* to be delivered to MFW.
*/
if (param && cmd == QED_PUT_FILE_DATA) {
- buf_idx = QED_MFW_GET_FIELD(param,
- FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET);
- buf_size = QED_MFW_GET_FIELD(param,
- FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE);
+ buf_idx =
+ QED_MFW_GET_FIELD(param,
+ FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET);
+ buf_size =
+ QED_MFW_GET_FIELD(param,
+ FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE);
} else {
buf_idx += buf_size;
buf_size = min_t(u32, (len - buf_idx),
@@ -3176,7 +3175,7 @@ int qed_mcp_phy_sfp_read(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
DRV_MSG_CODE_TRANSCEIVER_READ,
nvm_offset, &resp, &param, &buf_size,
- (u32 *)(p_buf + offset));
+ (u32 *)(p_buf + offset), true);
if (rc) {
DP_NOTICE(p_hwfn,
"Failed to send a transceiver read command to the MFW. rc = %d.\n",
@@ -3275,7 +3274,7 @@ int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn *p_hwfn,
DRV_MSG_CODE_BIST_TEST, param,
&resp, &resp_param,
&buf_size,
- (u32 *)p_image_att);
+ (u32 *)p_image_att, false);
if (rc)
return rc;
@@ -3388,7 +3387,7 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
type = NVM_TYPE_DEFAULT_CFG;
break;
case QED_NVM_IMAGE_NVM_META:
- type = NVM_TYPE_META;
+ type = NVM_TYPE_NVM_META;
break;
default:
DP_NOTICE(p_hwfn, "Unknown request of image_id %08x\n",
@@ -3905,10 +3904,6 @@ int qed_mcp_set_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK |
DRV_MB_PARAM_FEATURE_SUPPORT_PORT_FEC_CONTROL;
- if (QED_IS_E5(p_hwfn->cdev))
- features |=
- DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EXT_SPEED_FEC_CONTROL;
-
return qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_FEATURE_SUPPORT,
features, &mcp_resp, &mcp_param);
}
@@ -4002,7 +3997,8 @@ int qed_mcp_nvm_get_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
rc = qed_mcp_nvm_rd_cmd(p_hwfn, p_ptt,
DRV_MSG_CODE_GET_NVM_CFG_OPTION,
- mb_param, &resp, &param, p_len, (u32 *)p_buf);
+ mb_param, &resp, &param, p_len,
+ (u32 *)p_buf, false);
return rc;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
index 8edb450d0abf..564723800d15 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
@@ -266,97 +266,97 @@ union qed_mfw_tlv_data {
#define QED_NVM_CFG_OPTION_ENTITY_SEL BIT(4)
/**
- * @brief - returns the link params of the hw function
+ * qed_mcp_get_link_params(): Returns the link params of the hw function.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @returns pointer to link params
+ * Returns: Pointer to link params.
*/
-struct qed_mcp_link_params *qed_mcp_get_link_params(struct qed_hwfn *);
+struct qed_mcp_link_params *qed_mcp_get_link_params(struct qed_hwfn *p_hwfn);
/**
- * @brief - return the link state of the hw function
+ * qed_mcp_get_link_state(): Return the link state of the hw function.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @returns pointer to link state
+ * Returns: Pointer to link state.
*/
-struct qed_mcp_link_state *qed_mcp_get_link_state(struct qed_hwfn *);
+struct qed_mcp_link_state *qed_mcp_get_link_state(struct qed_hwfn *p_hwfn);
/**
- * @brief - return the link capabilities of the hw function
+ * qed_mcp_get_link_capabilities(): Return the link capabilities of the
+ * hw function.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @returns pointer to link capabilities
+ * Returns: Pointer to link capabilities.
*/
struct qed_mcp_link_capabilities
*qed_mcp_get_link_capabilities(struct qed_hwfn *p_hwfn);
/**
- * @brief Request the MFW to set the the link according to 'link_input'.
+ * qed_mcp_set_link(): Request the MFW to set the link according
+ * to 'link_input'.
*
- * @param p_hwfn
- * @param p_ptt
- * @param b_up - raise link if `true'. Reset link if `false'.
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @b_up: Raise link if `true'. Reset link if `false'.
*
- * @return int
+ * Return: Int.
*/
int qed_mcp_set_link(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
bool b_up);
/**
- * @brief Get the management firmware version value
+ * qed_mcp_get_mfw_ver(): Get the management firmware version value.
*
- * @param p_hwfn
- * @param p_ptt
- * @param p_mfw_ver - mfw version value
- * @param p_running_bundle_id - image id in nvram; Optional.
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @p_mfw_ver: MFW version value.
+ * @p_running_bundle_id: Image id in nvram; Optional.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - operation was successful.
*/
int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *p_mfw_ver, u32 *p_running_bundle_id);
/**
- * @brief Get the MBI version value
+ * qed_mcp_get_mbi_ver(): Get the MBI version value.
*
- * @param p_hwfn
- * @param p_ptt
- * @param p_mbi_ver - A pointer to a variable to be filled with the MBI version.
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @p_mbi_ver: A pointer to a variable to be filled with the MBI version.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - operation was successful.
*/
int qed_mcp_get_mbi_ver(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *p_mbi_ver);
/**
- * @brief Get media type value of the port.
+ * qed_mcp_get_media_type(): Get media type value of the port.
*
- * @param cdev - qed dev pointer
- * @param p_ptt
- * @param mfw_ver - media type value
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @media_type: Media type value
*
- * @return int -
- * 0 - Operation was successul.
- * -EBUSY - Operation failed
+ * Return: Int - 0 - Operation was successul.
+ * -EBUSY - Operation failed
*/
int qed_mcp_get_media_type(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *media_type);
/**
- * @brief Get transceiver data of the port.
+ * qed_mcp_get_transceiver_data(): Get transceiver data of the port.
*
- * @param cdev - qed dev pointer
- * @param p_ptt
- * @param p_transceiver_state - transceiver state.
- * @param p_transceiver_type - media type value
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @p_transceiver_state: Transceiver state.
+ * @p_tranceiver_type: Media type value.
*
- * @return int -
- * 0 - Operation was successful.
- * -EBUSY - Operation failed
+ * Return: Int - 0 - Operation was successul.
+ * -EBUSY - Operation failed
*/
int qed_mcp_get_transceiver_data(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -364,50 +364,48 @@ int qed_mcp_get_transceiver_data(struct qed_hwfn *p_hwfn,
u32 *p_tranceiver_type);
/**
- * @brief Get transceiver supported speed mask.
+ * qed_mcp_trans_speed_mask(): Get transceiver supported speed mask.
*
- * @param cdev - qed dev pointer
- * @param p_ptt
- * @param p_speed_mask - Bit mask of all supported speeds.
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @p_speed_mask: Bit mask of all supported speeds.
*
- * @return int -
- * 0 - Operation was successful.
- * -EBUSY - Operation failed
+ * Return: Int - 0 - Operation was successul.
+ * -EBUSY - Operation failed
*/
int qed_mcp_trans_speed_mask(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *p_speed_mask);
/**
- * @brief Get board configuration.
+ * qed_mcp_get_board_config(): Get board configuration.
*
- * @param cdev - qed dev pointer
- * @param p_ptt
- * @param p_board_config - Board config.
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @p_board_config: Board config.
*
- * @return int -
- * 0 - Operation was successful.
- * -EBUSY - Operation failed
+ * Return: Int - 0 - Operation was successul.
+ * -EBUSY - Operation failed
*/
int qed_mcp_get_board_config(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *p_board_config);
/**
- * @brief General function for sending commands to the MCP
- * mailbox. It acquire mutex lock for the entire
- * operation, from sending the request until the MCP
- * response. Waiting for MCP response will be checked up
- * to 5 seconds every 5ms.
+ * qed_mcp_cmd(): General function for sending commands to the MCP
+ * mailbox. It acquire mutex lock for the entire
+ * operation, from sending the request until the MCP
+ * response. Waiting for MCP response will be checked up
+ * to 5 seconds every 5ms.
*
- * @param p_hwfn - hw function
- * @param p_ptt - PTT required for register access
- * @param cmd - command to be sent to the MCP.
- * @param param - Optional param
- * @param o_mcp_resp - The MCP response code (exclude sequence).
- * @param o_mcp_param- Optional parameter provided by the MCP
+ * @p_hwfn: HW device data.
+ * @p_ptt: PTT required for register access.
+ * @cmd: command to be sent to the MCP.
+ * @param: Optional param
+ * @o_mcp_resp: The MCP response code (exclude sequence).
+ * @o_mcp_param: Optional parameter provided by the MCP
* response
- * @return int - 0 - operation
- * was successul.
+ *
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -417,37 +415,39 @@ int qed_mcp_cmd(struct qed_hwfn *p_hwfn,
u32 *o_mcp_param);
/**
- * @brief - drains the nig, allowing completion to pass in case of pauses.
- * (Should be called only from sleepable context)
+ * qed_mcp_drain(): drains the nig, allowing completion to pass in
+ * case of pauses.
+ * (Should be called only from sleepable context)
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: PTT required for register access.
+ *
+ * Return: Int.
*/
int qed_mcp_drain(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
- * @brief Get the flash size value
+ * qed_mcp_get_flash_size(): Get the flash size value.
*
- * @param p_hwfn
- * @param p_ptt
- * @param p_flash_size - flash size in bytes to be filled.
+ * @p_hwfn: HW device data.
+ * @p_ptt: PTT required for register access.
+ * @p_flash_size: Flash size in bytes to be filled.
*
- * @return int - 0 - operation was successul.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *p_flash_size);
/**
- * @brief Send driver version to MFW
+ * qed_mcp_send_drv_version(): Send driver version to MFW.
*
- * @param p_hwfn
- * @param p_ptt
- * @param version - Version value
- * @param name - Protocol driver name
+ * @p_hwfn: HW device data.
+ * @p_ptt: PTT required for register access.
+ * @p_ver: Version value.
*
- * @return int - 0 - operation was successul.
+ * Return: Int - 0 - Operation was successul.
*/
int
qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
@@ -455,146 +455,148 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
struct qed_mcp_drv_version *p_ver);
/**
- * @brief Read the MFW process kill counter
+ * qed_get_process_kill_counter(): Read the MFW process kill counter.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: PTT required for register access.
*
- * @return u32
+ * Return: u32.
*/
u32 qed_get_process_kill_counter(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
- * @brief Trigger a recovery process
+ * qed_start_recovery_process(): Trigger a recovery process.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: PTT required for register access.
*
- * @return int
+ * Return: Int.
*/
int qed_start_recovery_process(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/**
- * @brief A recovery handler must call this function as its first step.
- * It is assumed that the handler is not run from an interrupt context.
+ * qed_recovery_prolog(): A recovery handler must call this function
+ * as its first step.
+ * It is assumed that the handler is not run from
+ * an interrupt context.
*
- * @param cdev
- * @param p_ptt
+ * @cdev: Qed dev pointer.
*
- * @return int
+ * Return: int.
*/
int qed_recovery_prolog(struct qed_dev *cdev);
/**
- * @brief Notify MFW about the change in base device properties
+ * qed_mcp_ov_update_current_config(): Notify MFW about the change in base
+ * device properties
*
- * @param p_hwfn
- * @param p_ptt
- * @param client - qed client type
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @client: Qed client type.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
enum qed_ov_client client);
/**
- * @brief Notify MFW about the driver state
+ * qed_mcp_ov_update_driver_state(): Notify MFW about the driver state.
*
- * @param p_hwfn
- * @param p_ptt
- * @param drv_state - Driver state
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @drv_state: Driver state.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_ov_update_driver_state(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
enum qed_ov_driver_state drv_state);
/**
- * @brief Send MTU size to MFW
+ * qed_mcp_ov_update_mtu(): Send MTU size to MFW.
*
- * @param p_hwfn
- * @param p_ptt
- * @param mtu - MTU size
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @mtu: MTU size.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_ov_update_mtu(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u16 mtu);
/**
- * @brief Send MAC address to MFW
+ * qed_mcp_ov_update_mac(): Send MAC address to MFW.
*
- * @param p_hwfn
- * @param p_ptt
- * @param mac - MAC address
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @mac: MAC address.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_ov_update_mac(struct qed_hwfn *p_hwfn,
- struct qed_ptt *p_ptt, u8 *mac);
+ struct qed_ptt *p_ptt, const u8 *mac);
/**
- * @brief Send WOL mode to MFW
+ * qed_mcp_ov_update_wol(): Send WOL mode to MFW.
*
- * @param p_hwfn
- * @param p_ptt
- * @param wol - WOL mode
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @wol: WOL mode.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_ov_update_wol(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
enum qed_ov_wol wol);
/**
- * @brief Set LED status
+ * qed_mcp_set_led(): Set LED status.
*
- * @param p_hwfn
- * @param p_ptt
- * @param mode - LED mode
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @mode: LED mode.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_set_led(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
enum qed_led_mode mode);
/**
- * @brief Read from nvm
+ * qed_mcp_nvm_read(): Read from NVM.
*
- * @param cdev
- * @param addr - nvm offset
- * @param p_buf - nvm read buffer
- * @param len - buffer len
+ * @cdev: Qed dev pointer.
+ * @addr: NVM offset.
+ * @p_buf: NVM read buffer.
+ * @len: Buffer len.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len);
/**
- * @brief Write to nvm
+ * qed_mcp_nvm_write(): Write to NVM.
*
- * @param cdev
- * @param addr - nvm offset
- * @param cmd - nvm command
- * @param p_buf - nvm write buffer
- * @param len - buffer len
+ * @cdev: Qed dev pointer.
+ * @addr: NVM offset.
+ * @cmd: NVM command.
+ * @p_buf: NVM write buffer.
+ * @len: Buffer len.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_nvm_write(struct qed_dev *cdev,
u32 cmd, u32 addr, u8 *p_buf, u32 len);
/**
- * @brief Check latest response
+ * qed_mcp_nvm_resp(): Check latest response.
*
- * @param cdev
- * @param p_buf - nvm write buffer
+ * @cdev: Qed dev pointer.
+ * @p_buf: NVM write buffer.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_nvm_resp(struct qed_dev *cdev, u8 *p_buf);
@@ -604,13 +606,13 @@ struct qed_nvm_image_att {
};
/**
- * @brief Allows reading a whole nvram image
+ * qed_mcp_get_nvm_image_att(): Allows reading a whole nvram image.
*
- * @param p_hwfn
- * @param image_id - image to get attributes for
- * @param p_image_att - image attributes structure into which to fill data
+ * @p_hwfn: HW device data.
+ * @image_id: Image to get attributes for.
+ * @p_image_att: Image attributes structure into which to fill data.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int
qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
@@ -618,64 +620,65 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
struct qed_nvm_image_att *p_image_att);
/**
- * @brief Allows reading a whole nvram image
+ * qed_mcp_get_nvm_image(): Allows reading a whole nvram image.
*
- * @param p_hwfn
- * @param image_id - image requested for reading
- * @param p_buffer - allocated buffer into which to fill data
- * @param buffer_len - length of the allocated buffer.
+ * @p_hwfn: HW device data.
+ * @image_id: image requested for reading.
+ * @p_buffer: allocated buffer into which to fill data.
+ * @buffer_len: length of the allocated buffer.
*
- * @return 0 iff p_buffer now contains the nvram image.
+ * Return: 0 if p_buffer now contains the nvram image.
*/
int qed_mcp_get_nvm_image(struct qed_hwfn *p_hwfn,
enum qed_nvm_images image_id,
u8 *p_buffer, u32 buffer_len);
/**
- * @brief Bist register test
+ * qed_mcp_bist_register_test(): Bist register test.
*
- * @param p_hwfn - hw function
- * @param p_ptt - PTT required for register access
+ * @p_hwfn: HW device data.
+ * @p_ptt: PTT required for register access.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
- * @brief Bist clock test
+ * qed_mcp_bist_clock_test(): Bist clock test.
*
- * @param p_hwfn - hw function
- * @param p_ptt - PTT required for register access
+ * @p_hwfn: HW device data.
+ * @p_ptt: PTT required for register access.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
- * @brief Bist nvm test - get number of images
+ * qed_mcp_bist_nvm_get_num_images(): Bist nvm test - get number of images.
*
- * @param p_hwfn - hw function
- * @param p_ptt - PTT required for register access
- * @param num_images - number of images if operation was
+ * @p_hwfn: HW device data.
+ * @p_ptt: PTT required for register access.
+ * @num_images: number of images if operation was
* successful. 0 if not.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_bist_nvm_get_num_images(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 *num_images);
/**
- * @brief Bist nvm test - get image attributes by index
+ * qed_mcp_bist_nvm_get_image_att(): Bist nvm test - get image attributes
+ * by index.
*
- * @param p_hwfn - hw function
- * @param p_ptt - PTT required for register access
- * @param p_image_att - Attributes of image
- * @param image_index - Index of image to get information for
+ * @p_hwfn: HW device data.
+ * @p_ptt: PTT required for register access.
+ * @p_image_att: Attributes of image.
+ * @image_index: Index of image to get information for.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -683,23 +686,26 @@ int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn *p_hwfn,
u32 image_index);
/**
- * @brief - Processes the TLV request from MFW i.e., get the required TLV info
- * from the qed client and send it to the MFW.
+ * qed_mfw_process_tlv_req(): Processes the TLV request from MFW i.e.,
+ * get the required TLV info
+ * from the qed client and send it to the MFW.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
*
- * @param return 0 upon success.
+ * Return: 0 upon success.
*/
int qed_mfw_process_tlv_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/**
- * @brief Send raw debug data to the MFW
+ * qed_mcp_send_raw_debug_data(): Send raw debug data to the MFW
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @p_buf: raw debug data buffer.
+ * @size: Buffer size.
*
- * @param p_hwfn
- * @param p_ptt
- * @param p_buf - raw debug data buffer
- * @param size - buffer size
+ * Return : Int.
*/
int
qed_mcp_send_raw_debug_data(struct qed_hwfn *p_hwfn,
@@ -796,47 +802,49 @@ qed_mcp_is_ext_speed_supported(const struct qed_hwfn *p_hwfn)
}
/**
- * @brief Initialize the interface with the MCP
+ * qed_mcp_cmd_init(): Initialize the interface with the MCP.
*
- * @param p_hwfn - HW func
- * @param p_ptt - PTT required for register access
+ * @p_hwfn: HW device data.
+ * @p_ptt: PTT required for register access.
*
- * @return int
+ * Return: Int.
*/
int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
- * @brief Initialize the port interface with the MCP
+ * qed_mcp_cmd_port_init(): Initialize the port interface with the MCP
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ *
+ * Return: Void.
*
- * @param p_hwfn
- * @param p_ptt
* Can only be called after `num_ports_in_engines' is set
*/
void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
- * @brief Releases resources allocated during the init process.
+ * qed_mcp_free(): Releases resources allocated during the init process.
*
- * @param p_hwfn - HW func
- * @param p_ptt - PTT required for register access
+ * @p_hwfn: HW function.
*
- * @return int
+ * Return: Int.
*/
int qed_mcp_free(struct qed_hwfn *p_hwfn);
/**
- * @brief This function is called from the DPC context. After
- * pointing PTT to the mfw mb, check for events sent by the MCP
- * to the driver and ack them. In case a critical event
- * detected, it will be handled here, otherwise the work will be
- * queued to a sleepable work-queue.
+ * qed_mcp_handle_events(): This function is called from the DPC context.
+ * After pointing PTT to the mfw mb, check for events sent by
+ * the MCP to the driver and ack them. In case a critical event
+ * detected, it will be handled here, otherwise the work will be
+ * queued to a sleepable work-queue.
+ *
+ * @p_hwfn: HW function.
+ * @p_ptt: PTT required for register access.
*
- * @param p_hwfn - HW function
- * @param p_ptt - PTT required for register access
- * @return int - 0 - operation
- * was successul.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_handle_events(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
@@ -858,169 +866,177 @@ struct qed_load_req_params {
};
/**
- * @brief Sends a LOAD_REQ to the MFW, and in case the operation succeeds,
- * returns whether this PF is the first on the engine/port or function.
+ * qed_mcp_load_req(): Sends a LOAD_REQ to the MFW, and in case the
+ * operation succeeds, returns whether this PF is
+ * the first on the engine/port or function.
*
- * @param p_hwfn
- * @param p_ptt
- * @param p_params
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @p_params: Params.
*
- * @return int - 0 - Operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_load_req(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_load_req_params *p_params);
/**
- * @brief Sends a LOAD_DONE message to the MFW
+ * qed_mcp_load_done(): Sends a LOAD_DONE message to the MFW.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
*
- * @return int - 0 - Operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_load_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/**
- * @brief Sends a UNLOAD_REQ message to the MFW
+ * qed_mcp_unload_req(): Sends a UNLOAD_REQ message to the MFW.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
*
- * @return int - 0 - Operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/**
- * @brief Sends a UNLOAD_DONE message to the MFW
+ * qed_mcp_unload_done(): Sends a UNLOAD_DONE message to the MFW
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
*
- * @return int - 0 - Operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/**
- * @brief Read the MFW mailbox into Current buffer.
+ * qed_mcp_read_mb(): Read the MFW mailbox into Current buffer.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ *
+ * Return: Void.
*/
void qed_mcp_read_mb(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
- * @brief Ack to mfw that driver finished FLR process for VFs
+ * qed_mcp_ack_vf_flr(): Ack to mfw that driver finished FLR process for VFs
*
- * @param p_hwfn
- * @param p_ptt
- * @param vfs_to_ack - bit mask of all engine VFs for which the PF acks.
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @vfs_to_ack: bit mask of all engine VFs for which the PF acks.
*
- * @param return int - 0 upon success.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 *vfs_to_ack);
/**
- * @brief - calls during init to read shmem of all function-related info.
+ * qed_mcp_fill_shmem_func_info(): Calls during init to read shmem of
+ * all function-related info.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
*
- * @param return 0 upon success.
+ * Return: 0 upon success.
*/
int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
- * @brief - Reset the MCP using mailbox command.
+ * qed_mcp_reset(): Reset the MCP using mailbox command.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
*
- * @param return 0 upon success.
+ * Return: 0 upon success.
*/
int qed_mcp_reset(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt);
/**
- * @brief - Sends an NVM read command request to the MFW to get
- * a buffer.
- *
- * @param p_hwfn
- * @param p_ptt
- * @param cmd - Command: DRV_MSG_CODE_NVM_GET_FILE_DATA or
- * DRV_MSG_CODE_NVM_READ_NVRAM commands
- * @param param - [0:23] - Offset [24:31] - Size
- * @param o_mcp_resp - MCP response
- * @param o_mcp_param - MCP response param
- * @param o_txn_size - Buffer size output
- * @param o_buf - Pointer to the buffer returned by the MFW.
+ * qed_mcp_nvm_rd_cmd(): Sends an NVM read command request to the MFW to get
+ * a buffer.
*
- * @param return 0 upon success.
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @cmd: (Command) DRV_MSG_CODE_NVM_GET_FILE_DATA or
+ * DRV_MSG_CODE_NVM_READ_NVRAM commands.
+ * @param: [0:23] - Offset [24:31] - Size.
+ * @o_mcp_resp: MCP response.
+ * @o_mcp_param: MCP response param.
+ * @o_txn_size: Buffer size output.
+ * @o_buf: Pointer to the buffer returned by the MFW.
+ * @b_can_sleep: Can sleep.
+ *
+ * Return: 0 upon success.
*/
int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u32 cmd,
u32 param,
u32 *o_mcp_resp,
- u32 *o_mcp_param, u32 *o_txn_size, u32 *o_buf);
+ u32 *o_mcp_param,
+ u32 *o_txn_size, u32 *o_buf, bool b_can_sleep);
/**
- * @brief Read from sfp
+ * qed_mcp_phy_sfp_read(): Read from sfp.
*
- * @param p_hwfn - hw function
- * @param p_ptt - PTT required for register access
- * @param port - transceiver port
- * @param addr - I2C address
- * @param offset - offset in sfp
- * @param len - buffer length
- * @param p_buf - buffer to read into
+ * @p_hwfn: HW device data.
+ * @p_ptt: PTT required for register access.
+ * @port: transceiver port.
+ * @addr: I2C address.
+ * @offset: offset in sfp.
+ * @len: buffer length.
+ * @p_buf: buffer to read into.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_phy_sfp_read(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
u32 port, u32 addr, u32 offset, u32 len, u8 *p_buf);
/**
- * @brief indicates whether the MFW objects [under mcp_info] are accessible
+ * qed_mcp_is_init(): indicates whether the MFW objects [under mcp_info]
+ * are accessible
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @return true iff MFW is running and mcp_info is initialized
+ * Return: true if MFW is running and mcp_info is initialized.
*/
bool qed_mcp_is_init(struct qed_hwfn *p_hwfn);
/**
- * @brief request MFW to configure MSI-X for a VF
+ * qed_mcp_config_vf_msix(): Request MFW to configure MSI-X for a VF.
*
- * @param p_hwfn
- * @param p_ptt
- * @param vf_id - absolute inside engine
- * @param num_sbs - number of entries to request
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @vf_id: absolute inside engine.
+ * @num: number of entries to request.
*
- * @return int
+ * Return: Int.
*/
int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u8 vf_id, u8 num);
/**
- * @brief - Halt the MCP.
+ * qed_mcp_halt(): Halt the MCP.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
*
- * @param return 0 upon success.
+ * Return: 0 upon success.
*/
int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/**
- * @brief - Wake up the MCP.
+ * qed_mcp_resume: Wake up the MCP.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
*
- * @param return 0 upon success.
+ * Return: 0 upon success.
*/
int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
@@ -1038,13 +1054,13 @@ int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn,
int qed_mcp_mask_parities(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u32 mask_parities);
-/* @brief - Gets the mdump retained data from the MFW.
+/* qed_mcp_mdump_get_retain(): Gets the mdump retained data from the MFW.
*
- * @param p_hwfn
- * @param p_ptt
- * @param p_mdump_retain
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @p_mdump_retain: mdump retain.
*
- * @param return 0 upon success.
+ * Return: Int - 0 - Operation was successul.
*/
int
qed_mcp_mdump_get_retain(struct qed_hwfn *p_hwfn,
@@ -1052,15 +1068,15 @@ qed_mcp_mdump_get_retain(struct qed_hwfn *p_hwfn,
struct mdump_retain_data_stc *p_mdump_retain);
/**
- * @brief - Sets the MFW's max value for the given resource
+ * qed_mcp_set_resc_max_val(): Sets the MFW's max value for the given resource.
*
- * @param p_hwfn
- * @param p_ptt
- * @param res_id
- * @param resc_max_val
- * @param p_mcp_resp
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @res_id: RES ID.
+ * @resc_max_val: Resec max val.
+ * @p_mcp_resp: MCP Resp
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int
qed_mcp_set_resc_max_val(struct qed_hwfn *p_hwfn,
@@ -1069,16 +1085,17 @@ qed_mcp_set_resc_max_val(struct qed_hwfn *p_hwfn,
u32 resc_max_val, u32 *p_mcp_resp);
/**
- * @brief - Gets the MFW allocation info for the given resource
+ * qed_mcp_get_resc_info(): Gets the MFW allocation info for the given
+ * resource.
*
- * @param p_hwfn
- * @param p_ptt
- * @param res_id
- * @param p_mcp_resp
- * @param p_resc_num
- * @param p_resc_start
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @res_id: Res ID.
+ * @p_mcp_resp: MCP resp.
+ * @p_resc_num: Resc num.
+ * @p_resc_start: Resc start.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int
qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn,
@@ -1087,13 +1104,13 @@ qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn,
u32 *p_mcp_resp, u32 *p_resc_num, u32 *p_resc_start);
/**
- * @brief Send eswitch mode to MFW
+ * qed_mcp_ov_update_eswitch(): Send eswitch mode to MFW.
*
- * @param p_hwfn
- * @param p_ptt
- * @param eswitch - eswitch mode
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @eswitch: eswitch mode.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_ov_update_eswitch(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
@@ -1113,12 +1130,12 @@ enum qed_resc_lock {
};
/**
- * @brief - Initiates PF FLR
+ * qed_mcp_initiate_pf_flr(): Initiates PF FLR.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int qed_mcp_initiate_pf_flr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
struct qed_resc_lock_params {
@@ -1151,13 +1168,13 @@ struct qed_resc_lock_params {
};
/**
- * @brief Acquires MFW generic resource lock
+ * qed_mcp_resc_lock(): Acquires MFW generic resource lock.
*
- * @param p_hwfn
- * @param p_ptt
- * @param p_params
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @p_params: Params.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int
qed_mcp_resc_lock(struct qed_hwfn *p_hwfn,
@@ -1175,13 +1192,13 @@ struct qed_resc_unlock_params {
};
/**
- * @brief Releases MFW generic resource lock
+ * qed_mcp_resc_unlock(): Releases MFW generic resource lock.
*
- * @param p_hwfn
- * @param p_ptt
- * @param p_params
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @p_params: Params.
*
- * @return int - 0 - operation was successful.
+ * Return: Int - 0 - Operation was successul.
*/
int
qed_mcp_resc_unlock(struct qed_hwfn *p_hwfn,
@@ -1189,12 +1206,15 @@ qed_mcp_resc_unlock(struct qed_hwfn *p_hwfn,
struct qed_resc_unlock_params *p_params);
/**
- * @brief - default initialization for lock/unlock resource structs
+ * qed_mcp_resc_lock_default_init(): Default initialization for
+ * lock/unlock resource structs.
+ *
+ * @p_lock: lock params struct to be initialized; Can be NULL.
+ * @p_unlock: unlock params struct to be initialized; Can be NULL.
+ * @resource: the requested resource.
+ * @b_is_permanent: disable retries & aging when set.
*
- * @param p_lock - lock params struct to be initialized; Can be NULL
- * @param p_unlock - unlock params struct to be initialized; Can be NULL
- * @param resource - the requested resource
- * @paral b_is_permanent - disable retries & aging when set
+ * Return: Void.
*/
void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock,
struct qed_resc_unlock_params *p_unlock,
@@ -1202,94 +1222,117 @@ void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock,
resource, bool b_is_permanent);
/**
- * @brief - Return whether management firmware support smart AN
+ * qed_mcp_is_smart_an_supported(): Return whether management firmware
+ * support smart AN
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @return bool - true if feature is supported.
+ * Return: bool true if feature is supported.
*/
bool qed_mcp_is_smart_an_supported(struct qed_hwfn *p_hwfn);
/**
- * @brief Learn of supported MFW features; To be done during early init
+ * qed_mcp_get_capabilities(): Learn of supported MFW features;
+ * To be done during early init.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
*
- * @param p_hwfn
- * @param p_ptt
+ * Return: Int.
*/
int qed_mcp_get_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/**
- * @brief Inform MFW of set of features supported by driver. Should be done
- * inside the content of the LOAD_REQ.
+ * qed_mcp_set_capabilities(): Inform MFW of set of features supported
+ * by driver. Should be done inside the content
+ * of the LOAD_REQ.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ *
+ * Return: Int.
*/
int qed_mcp_set_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/**
- * @brief Read ufp config from the shared memory.
+ * qed_mcp_read_ufp_config(): Read ufp config from the shared memory.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ *
+ * Return: Void.
*/
void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/**
- * @brief Populate the nvm info shadow in the given hardware function
+ * qed_mcp_nvm_info_populate(): Populate the nvm info shadow in the given
+ * hardware function.
+ *
+ * @p_hwfn: HW device data.
*
- * @param p_hwfn
+ * Return: Int.
*/
int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn);
/**
- * @brief Delete nvm info shadow in the given hardware function
+ * qed_mcp_nvm_info_free(): Delete nvm info shadow in the given
+ * hardware function.
+ *
+ * @p_hwfn: HW device data.
*
- * @param p_hwfn
+ * Return: Void.
*/
void qed_mcp_nvm_info_free(struct qed_hwfn *p_hwfn);
/**
- * @brief Get the engine affinity configuration.
+ * qed_mcp_get_engine_config(): Get the engine affinity configuration.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
*
- * @param p_hwfn
- * @param p_ptt
+ * Return: Int.
*/
int qed_mcp_get_engine_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/**
- * @brief Get the PPFID bitmap.
+ * qed_mcp_get_ppfid_bitmap(): Get the PPFID bitmap.
*
- * @param p_hwfn
- * @param p_ptt
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ *
+ * Return: Int.
*/
int qed_mcp_get_ppfid_bitmap(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
/**
- * @brief Get NVM config attribute value.
+ * qed_mcp_nvm_get_cfg(): Get NVM config attribute value.
*
- * @param p_hwfn
- * @param p_ptt
- * @param option_id
- * @param entity_id
- * @param flags
- * @param p_buf
- * @param p_len
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @option_id: Option ID.
+ * @entity_id: Entity ID.
+ * @flags: Flags.
+ * @p_buf: Buf.
+ * @p_len: Len.
+ *
+ * Return: Int.
*/
int qed_mcp_nvm_get_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
u16 option_id, u8 entity_id, u16 flags, u8 *p_buf,
u32 *p_len);
/**
- * @brief Set NVM config attribute value.
+ * qed_mcp_nvm_set_cfg(): Set NVM config attribute value.
*
- * @param p_hwfn
- * @param p_ptt
- * @param option_id
- * @param entity_id
- * @param flags
- * @param p_buf
- * @param len
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @option_id: Option ID.
+ * @entity_id: Entity ID.
+ * @flags: Flags.
+ * @p_buf: Buf.
+ * @len: Len.
+ *
+ * Return: Int.
*/
int qed_mcp_nvm_set_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
u16 option_id, u8 entity_id, u16 flags, u8 *p_buf,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mfw_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_mfw_hsi.h
new file mode 100644
index 000000000000..8a0e3c5d4bda
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_mfw_hsi.h
@@ -0,0 +1,2474 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
+/* QLogic qed NIC Driver
+ * Copyright (c) 2019-2021 Marvell International Ltd.
+ */
+
+#ifndef _QED_MFW_HSI_H
+#define _QED_MFW_HSI_H
+
+#define MFW_TRACE_SIGNATURE 0x25071946
+
+/* The trace in the buffer */
+#define MFW_TRACE_EVENTID_MASK 0x00ffff
+#define MFW_TRACE_PRM_SIZE_MASK 0x0f0000
+#define MFW_TRACE_PRM_SIZE_OFFSET 16
+#define MFW_TRACE_ENTRY_SIZE 3
+
+struct mcp_trace {
+ u32 signature; /* Help to identify that the trace is valid */
+ u32 size; /* the size of the trace buffer in bytes */
+ u32 curr_level; /* 2 - all will be written to the buffer
+ * 1 - debug trace will not be written
+ * 0 - just errors will be written to the buffer
+ */
+ u32 modules_mask[2]; /* a bit per module, 1 means write it, 0 means
+ * mask it.
+ */
+
+ /* Warning: the following pointers are assumed to be 32bits as they are
+ * used only in the MFW.
+ */
+ u32 trace_prod; /* The next trace will be written to this offset */
+ u32 trace_oldest; /* The oldest valid trace starts at this offset
+ * (usually very close after the current producer).
+ */
+};
+
+#define VF_MAX_STATIC 192
+#define VF_BITMAP_SIZE_IN_DWORDS (VF_MAX_STATIC / 32)
+#define VF_BITMAP_SIZE_IN_BYTES (VF_BITMAP_SIZE_IN_DWORDS * sizeof(u32))
+
+#define EXT_VF_MAX_STATIC 240
+#define EXT_VF_BITMAP_SIZE_IN_DWORDS (((EXT_VF_MAX_STATIC - 1) / 32) + 1)
+#define EXT_VF_BITMAP_SIZE_IN_BYTES (EXT_VF_BITMAP_SIZE_IN_DWORDS * sizeof(u32))
+#define ADDED_VF_BITMAP_SIZE 2
+
+#define MCP_GLOB_PATH_MAX 2
+#define MCP_PORT_MAX 2
+#define MCP_GLOB_PORT_MAX 4
+#define MCP_GLOB_FUNC_MAX 16
+
+typedef u32 offsize_t; /* In DWORDS !!! */
+/* Offset from the beginning of the MCP scratchpad */
+#define OFFSIZE_OFFSET_SHIFT 0
+#define OFFSIZE_OFFSET_MASK 0x0000ffff
+/* Size of specific element (not the whole array if any) */
+#define OFFSIZE_SIZE_SHIFT 16
+#define OFFSIZE_SIZE_MASK 0xffff0000
+
+#define SECTION_OFFSET(_offsize) (((((_offsize) & \
+ OFFSIZE_OFFSET_MASK) >> \
+ OFFSIZE_OFFSET_SHIFT) << 2))
+
+#define QED_SECTION_SIZE(_offsize) ((((_offsize) & \
+ OFFSIZE_SIZE_MASK) >> \
+ OFFSIZE_SIZE_SHIFT) << 2)
+
+#define SECTION_ADDR(_offsize, idx) (MCP_REG_SCRATCH + \
+ SECTION_OFFSET((_offsize)) + \
+ (QED_SECTION_SIZE((_offsize)) * (idx)))
+
+#define SECTION_OFFSIZE_ADDR(_pub_base, _section) \
+ ((_pub_base) + offsetof(struct mcp_public_data, sections[_section]))
+
+/* PHY configuration */
+struct eth_phy_cfg {
+ u32 speed;
+#define ETH_SPEED_AUTONEG 0x0
+#define ETH_SPEED_SMARTLINQ 0x8
+
+ u32 pause;
+#define ETH_PAUSE_NONE 0x0
+#define ETH_PAUSE_AUTONEG 0x1
+#define ETH_PAUSE_RX 0x2
+#define ETH_PAUSE_TX 0x4
+
+ u32 adv_speed;
+
+ u32 loopback_mode;
+#define ETH_LOOPBACK_NONE 0x0
+#define ETH_LOOPBACK_INT_PHY 0x1
+#define ETH_LOOPBACK_EXT_PHY 0x2
+#define ETH_LOOPBACK_EXT 0x3
+#define ETH_LOOPBACK_MAC 0x4
+#define ETH_LOOPBACK_CNIG_AH_ONLY_0123 0x5
+#define ETH_LOOPBACK_CNIG_AH_ONLY_2301 0x6
+#define ETH_LOOPBACK_PCS_AH_ONLY 0x7
+#define ETH_LOOPBACK_REVERSE_MAC_AH_ONLY 0x8
+#define ETH_LOOPBACK_INT_PHY_FEA_AH_ONLY 0x9
+
+ u32 eee_cfg;
+#define EEE_CFG_EEE_ENABLED BIT(0)
+#define EEE_CFG_TX_LPI BIT(1)
+#define EEE_CFG_ADV_SPEED_1G BIT(2)
+#define EEE_CFG_ADV_SPEED_10G BIT(3)
+#define EEE_TX_TIMER_USEC_MASK 0xfffffff0
+#define EEE_TX_TIMER_USEC_OFFSET 4
+#define EEE_TX_TIMER_USEC_BALANCED_TIME 0xa00
+#define EEE_TX_TIMER_USEC_AGGRESSIVE_TIME 0x100
+#define EEE_TX_TIMER_USEC_LATENCY_TIME 0x6000
+
+ u32 link_modes;
+
+ u32 fec_mode;
+#define FEC_FORCE_MODE_MASK 0x000000ff
+#define FEC_FORCE_MODE_OFFSET 0
+#define FEC_FORCE_MODE_NONE 0x00
+#define FEC_FORCE_MODE_FIRECODE 0x01
+#define FEC_FORCE_MODE_RS 0x02
+#define FEC_FORCE_MODE_AUTO 0x07
+#define FEC_EXTENDED_MODE_MASK 0xffffff00
+#define FEC_EXTENDED_MODE_OFFSET 8
+#define ETH_EXT_FEC_NONE 0x00000000
+#define ETH_EXT_FEC_10G_NONE 0x00000100
+#define ETH_EXT_FEC_10G_BASE_R 0x00000200
+#define ETH_EXT_FEC_25G_NONE 0x00000400
+#define ETH_EXT_FEC_25G_BASE_R 0x00000800
+#define ETH_EXT_FEC_25G_RS528 0x00001000
+#define ETH_EXT_FEC_40G_NONE 0x00002000
+#define ETH_EXT_FEC_40G_BASE_R 0x00004000
+#define ETH_EXT_FEC_50G_NONE 0x00008000
+#define ETH_EXT_FEC_50G_BASE_R 0x00010000
+#define ETH_EXT_FEC_50G_RS528 0x00020000
+#define ETH_EXT_FEC_50G_RS544 0x00040000
+#define ETH_EXT_FEC_100G_NONE 0x00080000
+#define ETH_EXT_FEC_100G_BASE_R 0x00100000
+#define ETH_EXT_FEC_100G_RS528 0x00200000
+#define ETH_EXT_FEC_100G_RS544 0x00400000
+
+ u32 extended_speed;
+#define ETH_EXT_SPEED_MASK 0x0000ffff
+#define ETH_EXT_SPEED_OFFSET 0
+#define ETH_EXT_SPEED_NONE 0x00000001
+#define ETH_EXT_SPEED_1G 0x00000002
+#define ETH_EXT_SPEED_10G 0x00000004
+#define ETH_EXT_SPEED_25G 0x00000008
+#define ETH_EXT_SPEED_40G 0x00000010
+#define ETH_EXT_SPEED_50G_BASE_R 0x00000020
+#define ETH_EXT_SPEED_50G_BASE_R2 0x00000040
+#define ETH_EXT_SPEED_100G_BASE_R2 0x00000080
+#define ETH_EXT_SPEED_100G_BASE_R4 0x00000100
+#define ETH_EXT_SPEED_100G_BASE_P4 0x00000200
+#define ETH_EXT_ADV_SPEED_MASK 0xFFFF0000
+#define ETH_EXT_ADV_SPEED_OFFSET 16
+#define ETH_EXT_ADV_SPEED_1G 0x00010000
+#define ETH_EXT_ADV_SPEED_10G 0x00020000
+#define ETH_EXT_ADV_SPEED_25G 0x00040000
+#define ETH_EXT_ADV_SPEED_40G 0x00080000
+#define ETH_EXT_ADV_SPEED_50G_BASE_R 0x00100000
+#define ETH_EXT_ADV_SPEED_50G_BASE_R2 0x00200000
+#define ETH_EXT_ADV_SPEED_100G_BASE_R2 0x00400000
+#define ETH_EXT_ADV_SPEED_100G_BASE_R4 0x00800000
+#define ETH_EXT_ADV_SPEED_100G_BASE_P4 0x01000000
+};
+
+struct port_mf_cfg {
+ u32 dynamic_cfg;
+#define PORT_MF_CFG_OV_TAG_MASK 0x0000ffff
+#define PORT_MF_CFG_OV_TAG_SHIFT 0
+#define PORT_MF_CFG_OV_TAG_DEFAULT PORT_MF_CFG_OV_TAG_MASK
+
+ u32 reserved[1];
+};
+
+struct eth_stats {
+ u64 r64;
+ u64 r127;
+ u64 r255;
+ u64 r511;
+ u64 r1023;
+ u64 r1518;
+
+ union {
+ struct {
+ u64 r1522;
+ u64 r2047;
+ u64 r4095;
+ u64 r9216;
+ u64 r16383;
+ } bb0;
+ struct {
+ u64 unused1;
+ u64 r1519_to_max;
+ u64 unused2;
+ u64 unused3;
+ u64 unused4;
+ } ah0;
+ } u0;
+
+ u64 rfcs;
+ u64 rxcf;
+ u64 rxpf;
+ u64 rxpp;
+ u64 raln;
+ u64 rfcr;
+ u64 rovr;
+ u64 rjbr;
+ u64 rund;
+ u64 rfrg;
+ u64 t64;
+ u64 t127;
+ u64 t255;
+ u64 t511;
+ u64 t1023;
+ u64 t1518;
+
+ union {
+ struct {
+ u64 t2047;
+ u64 t4095;
+ u64 t9216;
+ u64 t16383;
+ } bb1;
+ struct {
+ u64 t1519_to_max;
+ u64 unused6;
+ u64 unused7;
+ u64 unused8;
+ } ah1;
+ } u1;
+
+ u64 txpf;
+ u64 txpp;
+
+ union {
+ struct {
+ u64 tlpiec;
+ u64 tncl;
+ } bb2;
+ struct {
+ u64 unused9;
+ u64 unused10;
+ } ah2;
+ } u2;
+
+ u64 rbyte;
+ u64 rxuca;
+ u64 rxmca;
+ u64 rxbca;
+ u64 rxpok;
+ u64 tbyte;
+ u64 txuca;
+ u64 txmca;
+ u64 txbca;
+ u64 txcf;
+};
+
+struct pkt_type_cnt {
+ u64 tc_tx_pkt_cnt[8];
+ u64 tc_tx_oct_cnt[8];
+ u64 priority_rx_pkt_cnt[8];
+ u64 priority_rx_oct_cnt[8];
+};
+
+struct brb_stats {
+ u64 brb_truncate[8];
+ u64 brb_discard[8];
+};
+
+struct port_stats {
+ struct brb_stats brb;
+ struct eth_stats eth;
+};
+
+struct couple_mode_teaming {
+ u8 port_cmt[MCP_GLOB_PORT_MAX];
+#define PORT_CMT_IN_TEAM BIT(0)
+
+#define PORT_CMT_PORT_ROLE BIT(1)
+#define PORT_CMT_PORT_INACTIVE (0 << 1)
+#define PORT_CMT_PORT_ACTIVE BIT(1)
+
+#define PORT_CMT_TEAM_MASK BIT(2)
+#define PORT_CMT_TEAM0 (0 << 2)
+#define PORT_CMT_TEAM1 BIT(2)
+};
+
+#define LLDP_CHASSIS_ID_STAT_LEN 4
+#define LLDP_PORT_ID_STAT_LEN 4
+#define DCBX_MAX_APP_PROTOCOL 32
+#define MAX_SYSTEM_LLDP_TLV_DATA 32
+#define MAX_TLV_BUFFER 128
+
+enum _lldp_agent {
+ LLDP_NEAREST_BRIDGE = 0,
+ LLDP_NEAREST_NON_TPMR_BRIDGE,
+ LLDP_NEAREST_CUSTOMER_BRIDGE,
+ LLDP_MAX_LLDP_AGENTS
+};
+
+struct lldp_config_params_s {
+ u32 config;
+#define LLDP_CONFIG_TX_INTERVAL_MASK 0x000000ff
+#define LLDP_CONFIG_TX_INTERVAL_SHIFT 0
+#define LLDP_CONFIG_HOLD_MASK 0x00000f00
+#define LLDP_CONFIG_HOLD_SHIFT 8
+#define LLDP_CONFIG_MAX_CREDIT_MASK 0x0000f000
+#define LLDP_CONFIG_MAX_CREDIT_SHIFT 12
+#define LLDP_CONFIG_ENABLE_RX_MASK 0x40000000
+#define LLDP_CONFIG_ENABLE_RX_SHIFT 30
+#define LLDP_CONFIG_ENABLE_TX_MASK 0x80000000
+#define LLDP_CONFIG_ENABLE_TX_SHIFT 31
+ u32 local_chassis_id[LLDP_CHASSIS_ID_STAT_LEN];
+ u32 local_port_id[LLDP_PORT_ID_STAT_LEN];
+};
+
+struct lldp_status_params_s {
+ u32 prefix_seq_num;
+ u32 status;
+ u32 peer_chassis_id[LLDP_CHASSIS_ID_STAT_LEN];
+ u32 peer_port_id[LLDP_PORT_ID_STAT_LEN];
+ u32 suffix_seq_num;
+};
+
+struct dcbx_ets_feature {
+ u32 flags;
+#define DCBX_ETS_ENABLED_MASK 0x00000001
+#define DCBX_ETS_ENABLED_SHIFT 0
+#define DCBX_ETS_WILLING_MASK 0x00000002
+#define DCBX_ETS_WILLING_SHIFT 1
+#define DCBX_ETS_ERROR_MASK 0x00000004
+#define DCBX_ETS_ERROR_SHIFT 2
+#define DCBX_ETS_CBS_MASK 0x00000008
+#define DCBX_ETS_CBS_SHIFT 3
+#define DCBX_ETS_MAX_TCS_MASK 0x000000f0
+#define DCBX_ETS_MAX_TCS_SHIFT 4
+#define DCBX_OOO_TC_MASK 0x00000f00
+#define DCBX_OOO_TC_SHIFT 8
+ u32 pri_tc_tbl[1];
+#define DCBX_TCP_OOO_TC (4)
+#define DCBX_TCP_OOO_K2_4PORT_TC (3)
+
+#define NIG_ETS_ISCSI_OOO_CLIENT_OFFSET (DCBX_TCP_OOO_TC + 1)
+#define DCBX_CEE_STRICT_PRIORITY 0xf
+ u32 tc_bw_tbl[2];
+ u32 tc_tsa_tbl[2];
+#define DCBX_ETS_TSA_STRICT 0
+#define DCBX_ETS_TSA_CBS 1
+#define DCBX_ETS_TSA_ETS 2
+};
+
+#define DCBX_TCP_OOO_TC (4)
+#define DCBX_TCP_OOO_K2_4PORT_TC (3)
+
+struct dcbx_app_priority_entry {
+ u32 entry;
+#define DCBX_APP_PRI_MAP_MASK 0x000000ff
+#define DCBX_APP_PRI_MAP_SHIFT 0
+#define DCBX_APP_PRI_0 0x01
+#define DCBX_APP_PRI_1 0x02
+#define DCBX_APP_PRI_2 0x04
+#define DCBX_APP_PRI_3 0x08
+#define DCBX_APP_PRI_4 0x10
+#define DCBX_APP_PRI_5 0x20
+#define DCBX_APP_PRI_6 0x40
+#define DCBX_APP_PRI_7 0x80
+#define DCBX_APP_SF_MASK 0x00000300
+#define DCBX_APP_SF_SHIFT 8
+#define DCBX_APP_SF_ETHTYPE 0
+#define DCBX_APP_SF_PORT 1
+#define DCBX_APP_SF_IEEE_MASK 0x0000f000
+#define DCBX_APP_SF_IEEE_SHIFT 12
+#define DCBX_APP_SF_IEEE_RESERVED 0
+#define DCBX_APP_SF_IEEE_ETHTYPE 1
+#define DCBX_APP_SF_IEEE_TCP_PORT 2
+#define DCBX_APP_SF_IEEE_UDP_PORT 3
+#define DCBX_APP_SF_IEEE_TCP_UDP_PORT 4
+
+#define DCBX_APP_PROTOCOL_ID_MASK 0xffff0000
+#define DCBX_APP_PROTOCOL_ID_SHIFT 16
+};
+
+struct dcbx_app_priority_feature {
+ u32 flags;
+#define DCBX_APP_ENABLED_MASK 0x00000001
+#define DCBX_APP_ENABLED_SHIFT 0
+#define DCBX_APP_WILLING_MASK 0x00000002
+#define DCBX_APP_WILLING_SHIFT 1
+#define DCBX_APP_ERROR_MASK 0x00000004
+#define DCBX_APP_ERROR_SHIFT 2
+#define DCBX_APP_MAX_TCS_MASK 0x0000f000
+#define DCBX_APP_MAX_TCS_SHIFT 12
+#define DCBX_APP_NUM_ENTRIES_MASK 0x00ff0000
+#define DCBX_APP_NUM_ENTRIES_SHIFT 16
+ struct dcbx_app_priority_entry app_pri_tbl[DCBX_MAX_APP_PROTOCOL];
+};
+
+struct dcbx_features {
+ struct dcbx_ets_feature ets;
+ u32 pfc;
+#define DCBX_PFC_PRI_EN_BITMAP_MASK 0x000000ff
+#define DCBX_PFC_PRI_EN_BITMAP_SHIFT 0
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_0 0x01
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_1 0x02
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_2 0x04
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_3 0x08
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_4 0x10
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_5 0x20
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_6 0x40
+#define DCBX_PFC_PRI_EN_BITMAP_PRI_7 0x80
+
+#define DCBX_PFC_FLAGS_MASK 0x0000ff00
+#define DCBX_PFC_FLAGS_SHIFT 8
+#define DCBX_PFC_CAPS_MASK 0x00000f00
+#define DCBX_PFC_CAPS_SHIFT 8
+#define DCBX_PFC_MBC_MASK 0x00004000
+#define DCBX_PFC_MBC_SHIFT 14
+#define DCBX_PFC_WILLING_MASK 0x00008000
+#define DCBX_PFC_WILLING_SHIFT 15
+#define DCBX_PFC_ENABLED_MASK 0x00010000
+#define DCBX_PFC_ENABLED_SHIFT 16
+#define DCBX_PFC_ERROR_MASK 0x00020000
+#define DCBX_PFC_ERROR_SHIFT 17
+
+ struct dcbx_app_priority_feature app;
+};
+
+struct dcbx_local_params {
+ u32 config;
+#define DCBX_CONFIG_VERSION_MASK 0x00000007
+#define DCBX_CONFIG_VERSION_SHIFT 0
+#define DCBX_CONFIG_VERSION_DISABLED 0
+#define DCBX_CONFIG_VERSION_IEEE 1
+#define DCBX_CONFIG_VERSION_CEE 2
+#define DCBX_CONFIG_VERSION_STATIC 4
+
+ u32 flags;
+ struct dcbx_features features;
+};
+
+struct dcbx_mib {
+ u32 prefix_seq_num;
+ u32 flags;
+ struct dcbx_features features;
+ u32 suffix_seq_num;
+};
+
+struct lldp_system_tlvs_buffer_s {
+ u32 flags;
+#define LLDP_SYSTEM_TLV_VALID_MASK 0x1
+#define LLDP_SYSTEM_TLV_VALID_OFFSET 0
+#define LLDP_SYSTEM_TLV_MANDATORY_MASK 0x2
+#define LLDP_SYSTEM_TLV_MANDATORY_SHIFT 1
+#define LLDP_SYSTEM_TLV_LENGTH_MASK 0xffff0000
+#define LLDP_SYSTEM_TLV_LENGTH_SHIFT 16
+ u32 data[MAX_SYSTEM_LLDP_TLV_DATA];
+};
+
+struct lldp_received_tlvs_s {
+ u32 prefix_seq_num;
+ u32 length;
+ u32 tlvs_buffer[MAX_TLV_BUFFER];
+ u32 suffix_seq_num;
+};
+
+struct dcb_dscp_map {
+ u32 flags;
+#define DCB_DSCP_ENABLE_MASK 0x1
+#define DCB_DSCP_ENABLE_SHIFT 0
+#define DCB_DSCP_ENABLE 1
+ u32 dscp_pri_map[8];
+};
+
+struct mcp_val64 {
+ u32 lo;
+ u32 hi;
+};
+
+struct generic_idc_msg_s {
+ u32 source_pf;
+ struct mcp_val64 msg;
+};
+
+struct pcie_stats_stc {
+ u32 sr_cnt_wr_byte_msb;
+ u32 sr_cnt_wr_byte_lsb;
+ u32 sr_cnt_wr_cnt;
+ u32 sr_cnt_rd_byte_msb;
+ u32 sr_cnt_rd_byte_lsb;
+ u32 sr_cnt_rd_cnt;
+};
+
+enum _attribute_commands_e {
+ ATTRIBUTE_CMD_READ = 0,
+ ATTRIBUTE_CMD_WRITE,
+ ATTRIBUTE_CMD_READ_CLEAR,
+ ATTRIBUTE_CMD_CLEAR,
+ ATTRIBUTE_NUM_OF_COMMANDS
+};
+
+struct public_global {
+ u32 max_path;
+ u32 max_ports;
+#define MODE_1P 1
+#define MODE_2P 2
+#define MODE_3P 3
+#define MODE_4P 4
+ u32 debug_mb_offset;
+ u32 phymod_dbg_mb_offset;
+ struct couple_mode_teaming cmt;
+ s32 internal_temperature;
+ u32 mfw_ver;
+ u32 running_bundle_id;
+ s32 external_temperature;
+ u32 mdump_reason;
+ u32 ext_phy_upgrade_fw;
+ u8 runtime_port_swap_map[MODE_4P];
+ u32 data_ptr;
+ u32 data_size;
+ u32 bmb_error_status_cnt;
+ u32 bmb_jumbo_frame_cnt;
+ u32 sent_to_bmc_cnt;
+ u32 handled_by_mfw;
+ u32 sent_to_nw_cnt;
+ u32 to_bmc_kb_per_second;
+ u32 bcast_dropped_to_bmc_cnt;
+ u32 mcast_dropped_to_bmc_cnt;
+ u32 ucast_dropped_to_bmc_cnt;
+ u32 ncsi_response_failure_cnt;
+ u32 device_attr;
+ u32 vpd_warning;
+};
+
+struct fw_flr_mb {
+ u32 aggint;
+ u32 opgen_addr;
+ u32 accum_ack;
+};
+
+struct public_path {
+ struct fw_flr_mb flr_mb;
+ u32 mcp_vf_disabled[VF_MAX_STATIC / 32];
+
+ u32 process_kill;
+#define PROCESS_KILL_COUNTER_MASK 0x0000ffff
+#define PROCESS_KILL_COUNTER_SHIFT 0
+#define PROCESS_KILL_GLOB_AEU_BIT_MASK 0xffff0000
+#define PROCESS_KILL_GLOB_AEU_BIT_SHIFT 16
+#define GLOBAL_AEU_BIT(aeu_reg_id, aeu_bit) ((aeu_reg_id) * 32 + (aeu_bit))
+};
+
+#define FC_NPIV_WWPN_SIZE 8
+#define FC_NPIV_WWNN_SIZE 8
+struct dci_npiv_settings {
+ u8 npiv_wwpn[FC_NPIV_WWPN_SIZE];
+ u8 npiv_wwnn[FC_NPIV_WWNN_SIZE];
+};
+
+struct dci_fc_npiv_cfg {
+ /* hdr used internally by the MFW */
+ u32 hdr;
+ u32 num_of_npiv;
+};
+
+#define MAX_NUMBER_NPIV 64
+struct dci_fc_npiv_tbl {
+ struct dci_fc_npiv_cfg fc_npiv_cfg;
+ struct dci_npiv_settings settings[MAX_NUMBER_NPIV];
+};
+
+struct pause_flood_monitor {
+ u8 period_cnt;
+ u8 any_brb_prs_packet_hist;
+ u8 any_brb_block_is_full_hist;
+ u8 flags;
+ u32 num_of_state_changes;
+};
+
+struct public_port {
+ u32 validity_map;
+
+ u32 link_status;
+#define LINK_STATUS_LINK_UP 0x00000001
+#define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001e
+#define LINK_STATUS_SPEED_AND_DUPLEX_1000THD BIT(1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD (2 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_10G (3 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_20G (4 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_40G (5 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_50G (6 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_100G (7 << 1)
+#define LINK_STATUS_SPEED_AND_DUPLEX_25G (8 << 1)
+#define LINK_STATUS_AUTO_NEGOTIATE_ENABLED 0x00000020
+#define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE 0x00000040
+#define LINK_STATUS_PARALLEL_DETECTION_USED 0x00000080
+#define LINK_STATUS_PFC_ENABLED 0x00000100
+#define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE 0x00000200
+#define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE 0x00000400
+#define LINK_STATUS_LINK_PARTNER_10G_CAPABLE 0x00000800
+#define LINK_STATUS_LINK_PARTNER_20G_CAPABLE 0x00001000
+#define LINK_STATUS_LINK_PARTNER_40G_CAPABLE 0x00002000
+#define LINK_STATUS_LINK_PARTNER_50G_CAPABLE 0x00004000
+#define LINK_STATUS_LINK_PARTNER_100G_CAPABLE 0x00008000
+#define LINK_STATUS_LINK_PARTNER_25G_CAPABLE 0x00010000
+#define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK 0x000c0000
+#define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE (0 << 18)
+#define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE BIT(18)
+#define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE (2 << 18)
+#define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE (3 << 18)
+#define LINK_STATUS_SFP_TX_FAULT 0x00100000
+#define LINK_STATUS_TX_FLOW_CONTROL_ENABLED 0x00200000
+#define LINK_STATUS_RX_FLOW_CONTROL_ENABLED 0x00400000
+#define LINK_STATUS_RX_SIGNAL_PRESENT 0x00800000
+#define LINK_STATUS_MAC_LOCAL_FAULT 0x01000000
+#define LINK_STATUS_MAC_REMOTE_FAULT 0x02000000
+#define LINK_STATUS_UNSUPPORTED_SPD_REQ 0x04000000
+
+#define LINK_STATUS_FEC_MODE_MASK 0x38000000
+#define LINK_STATUS_FEC_MODE_NONE (0 << 27)
+#define LINK_STATUS_FEC_MODE_FIRECODE_CL74 BIT(27)
+#define LINK_STATUS_FEC_MODE_RS_CL91 (2 << 27)
+#define LINK_STATUS_EXT_PHY_LINK_UP BIT(30)
+
+ u32 link_status1;
+ u32 ext_phy_fw_version;
+ u32 drv_phy_cfg_addr;
+
+ u32 port_stx;
+
+ u32 stat_nig_timer;
+
+ struct port_mf_cfg port_mf_config;
+ struct port_stats stats;
+
+ u32 media_type;
+#define MEDIA_UNSPECIFIED 0x0
+#define MEDIA_SFPP_10G_FIBER 0x1
+#define MEDIA_XFP_FIBER 0x2
+#define MEDIA_DA_TWINAX 0x3
+#define MEDIA_BASE_T 0x4
+#define MEDIA_SFP_1G_FIBER 0x5
+#define MEDIA_MODULE_FIBER 0x6
+#define MEDIA_KR 0xf0
+#define MEDIA_NOT_PRESENT 0xff
+
+ u32 lfa_status;
+ u32 link_change_count;
+
+ struct lldp_config_params_s lldp_config_params[LLDP_MAX_LLDP_AGENTS];
+ struct lldp_status_params_s lldp_status_params[LLDP_MAX_LLDP_AGENTS];
+ struct lldp_system_tlvs_buffer_s system_lldp_tlvs_buf;
+
+ /* DCBX related MIB */
+ struct dcbx_local_params local_admin_dcbx_mib;
+ struct dcbx_mib remote_dcbx_mib;
+ struct dcbx_mib operational_dcbx_mib;
+
+ u32 fc_npiv_nvram_tbl_addr;
+ u32 fc_npiv_nvram_tbl_size;
+
+ u32 transceiver_data;
+#define ETH_TRANSCEIVER_STATE_MASK 0x000000ff
+#define ETH_TRANSCEIVER_STATE_SHIFT 0x00000000
+#define ETH_TRANSCEIVER_STATE_OFFSET 0x00000000
+#define ETH_TRANSCEIVER_STATE_UNPLUGGED 0x00000000
+#define ETH_TRANSCEIVER_STATE_PRESENT 0x00000001
+#define ETH_TRANSCEIVER_STATE_VALID 0x00000003
+#define ETH_TRANSCEIVER_STATE_UPDATING 0x00000008
+#define ETH_TRANSCEIVER_STATE_IN_SETUP 0x10
+#define ETH_TRANSCEIVER_TYPE_MASK 0x0000ff00
+#define ETH_TRANSCEIVER_TYPE_OFFSET 0x8
+#define ETH_TRANSCEIVER_TYPE_NONE 0x00
+#define ETH_TRANSCEIVER_TYPE_UNKNOWN 0xff
+#define ETH_TRANSCEIVER_TYPE_1G_PCC 0x01
+#define ETH_TRANSCEIVER_TYPE_1G_ACC 0x02
+#define ETH_TRANSCEIVER_TYPE_1G_LX 0x03
+#define ETH_TRANSCEIVER_TYPE_1G_SX 0x04
+#define ETH_TRANSCEIVER_TYPE_10G_SR 0x05
+#define ETH_TRANSCEIVER_TYPE_10G_LR 0x06
+#define ETH_TRANSCEIVER_TYPE_10G_LRM 0x07
+#define ETH_TRANSCEIVER_TYPE_10G_ER 0x08
+#define ETH_TRANSCEIVER_TYPE_10G_PCC 0x09
+#define ETH_TRANSCEIVER_TYPE_10G_ACC 0x0a
+#define ETH_TRANSCEIVER_TYPE_XLPPI 0x0b
+#define ETH_TRANSCEIVER_TYPE_40G_LR4 0x0c
+#define ETH_TRANSCEIVER_TYPE_40G_SR4 0x0d
+#define ETH_TRANSCEIVER_TYPE_40G_CR4 0x0e
+#define ETH_TRANSCEIVER_TYPE_100G_AOC 0x0f
+#define ETH_TRANSCEIVER_TYPE_100G_SR4 0x10
+#define ETH_TRANSCEIVER_TYPE_100G_LR4 0x11
+#define ETH_TRANSCEIVER_TYPE_100G_ER4 0x12
+#define ETH_TRANSCEIVER_TYPE_100G_ACC 0x13
+#define ETH_TRANSCEIVER_TYPE_100G_CR4 0x14
+#define ETH_TRANSCEIVER_TYPE_4x10G_SR 0x15
+#define ETH_TRANSCEIVER_TYPE_25G_CA_N 0x16
+#define ETH_TRANSCEIVER_TYPE_25G_ACC_S 0x17
+#define ETH_TRANSCEIVER_TYPE_25G_CA_S 0x18
+#define ETH_TRANSCEIVER_TYPE_25G_ACC_M 0x19
+#define ETH_TRANSCEIVER_TYPE_25G_CA_L 0x1a
+#define ETH_TRANSCEIVER_TYPE_25G_ACC_L 0x1b
+#define ETH_TRANSCEIVER_TYPE_25G_SR 0x1c
+#define ETH_TRANSCEIVER_TYPE_25G_LR 0x1d
+#define ETH_TRANSCEIVER_TYPE_25G_AOC 0x1e
+#define ETH_TRANSCEIVER_TYPE_4x10G 0x1f
+#define ETH_TRANSCEIVER_TYPE_4x25G_CR 0x20
+#define ETH_TRANSCEIVER_TYPE_1000BASET 0x21
+#define ETH_TRANSCEIVER_TYPE_10G_BASET 0x22
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR 0x30
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR 0x31
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR 0x32
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR 0x33
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR 0x34
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR 0x35
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_AOC 0x36
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR 0x37
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_LR 0x38
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR 0x39
+#define ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR 0x3a
+
+ u32 wol_info;
+ u32 wol_pkt_len;
+ u32 wol_pkt_details;
+ struct dcb_dscp_map dcb_dscp_map;
+
+ u32 eee_status;
+#define EEE_ACTIVE_BIT BIT(0)
+#define EEE_LD_ADV_STATUS_MASK 0x000000f0
+#define EEE_LD_ADV_STATUS_OFFSET 4
+#define EEE_1G_ADV BIT(1)
+#define EEE_10G_ADV BIT(2)
+#define EEE_LP_ADV_STATUS_MASK 0x00000f00
+#define EEE_LP_ADV_STATUS_OFFSET 8
+#define EEE_SUPPORTED_SPEED_MASK 0x0000f000
+#define EEE_SUPPORTED_SPEED_OFFSET 12
+#define EEE_1G_SUPPORTED BIT(1)
+#define EEE_10G_SUPPORTED BIT(2)
+
+ u32 eee_remote;
+#define EEE_REMOTE_TW_TX_MASK 0x0000ffff
+#define EEE_REMOTE_TW_TX_OFFSET 0
+#define EEE_REMOTE_TW_RX_MASK 0xffff0000
+#define EEE_REMOTE_TW_RX_OFFSET 16
+
+ u32 module_info;
+
+ u32 oem_cfg_port;
+#define OEM_CFG_CHANNEL_TYPE_MASK 0x00000003
+#define OEM_CFG_CHANNEL_TYPE_OFFSET 0
+#define OEM_CFG_CHANNEL_TYPE_VLAN_PARTITION 0x1
+#define OEM_CFG_CHANNEL_TYPE_STAGGED 0x2
+#define OEM_CFG_SCHED_TYPE_MASK 0x0000000C
+#define OEM_CFG_SCHED_TYPE_OFFSET 2
+#define OEM_CFG_SCHED_TYPE_ETS 0x1
+#define OEM_CFG_SCHED_TYPE_VNIC_BW 0x2
+
+ struct lldp_received_tlvs_s lldp_received_tlvs[LLDP_MAX_LLDP_AGENTS];
+ u32 system_lldp_tlvs_buf2[MAX_SYSTEM_LLDP_TLV_DATA];
+ u32 phy_module_temperature;
+ u32 nig_reg_stat_rx_bmb_packet;
+ u32 nig_reg_rx_llh_ncsi_mcp_mask;
+ u32 nig_reg_rx_llh_ncsi_mcp_mask_2;
+ struct pause_flood_monitor pause_flood_monitor;
+ u32 nig_drain_cnt;
+ struct pkt_type_cnt pkt_tc_priority_cnt;
+};
+
+#define MCP_DRV_VER_STR_SIZE 16
+#define MCP_DRV_VER_STR_SIZE_DWORD (MCP_DRV_VER_STR_SIZE / sizeof(u32))
+#define MCP_DRV_NVM_BUF_LEN 32
+struct drv_version_stc {
+ u32 version;
+ u8 name[MCP_DRV_VER_STR_SIZE - 4];
+};
+
+struct public_func {
+ u32 iscsi_boot_signature;
+ u32 iscsi_boot_block_offset;
+
+ u32 mtu_size;
+
+ u32 c2s_pcp_map_lower;
+ u32 c2s_pcp_map_upper;
+ u32 c2s_pcp_map_default;
+
+ struct generic_idc_msg_s generic_idc_msg;
+
+ u32 num_of_msix;
+
+ u32 config;
+#define FUNC_MF_CFG_FUNC_HIDE 0x00000001
+#define FUNC_MF_CFG_PAUSE_ON_HOST_RING 0x00000002
+#define FUNC_MF_CFG_PAUSE_ON_HOST_RING_SHIFT 0x00000001
+
+#define FUNC_MF_CFG_PROTOCOL_MASK 0x000000f0
+#define FUNC_MF_CFG_PROTOCOL_SHIFT 4
+#define FUNC_MF_CFG_PROTOCOL_ETHERNET 0x00000000
+#define FUNC_MF_CFG_PROTOCOL_ISCSI 0x00000010
+#define FUNC_MF_CFG_PROTOCOL_FCOE 0x00000020
+#define FUNC_MF_CFG_PROTOCOL_ROCE 0x00000030
+#define FUNC_MF_CFG_PROTOCOL_MAX 0x00000030
+
+#define FUNC_MF_CFG_MIN_BW_MASK 0x0000ff00
+#define FUNC_MF_CFG_MIN_BW_SHIFT 8
+#define FUNC_MF_CFG_MIN_BW_DEFAULT 0x00000000
+#define FUNC_MF_CFG_MAX_BW_MASK 0x00ff0000
+#define FUNC_MF_CFG_MAX_BW_SHIFT 16
+#define FUNC_MF_CFG_MAX_BW_DEFAULT 0x00640000
+
+ u32 status;
+#define FUNC_STATUS_VIRTUAL_LINK_UP 0x00000001
+
+ u32 mac_upper;
+#define FUNC_MF_CFG_UPPERMAC_MASK 0x0000ffff
+#define FUNC_MF_CFG_UPPERMAC_SHIFT 0
+#define FUNC_MF_CFG_UPPERMAC_DEFAULT FUNC_MF_CFG_UPPERMAC_MASK
+ u32 mac_lower;
+#define FUNC_MF_CFG_LOWERMAC_DEFAULT 0xffffffff
+
+ u32 fcoe_wwn_port_name_upper;
+ u32 fcoe_wwn_port_name_lower;
+
+ u32 fcoe_wwn_node_name_upper;
+ u32 fcoe_wwn_node_name_lower;
+
+ u32 ovlan_stag;
+#define FUNC_MF_CFG_OV_STAG_MASK 0x0000ffff
+#define FUNC_MF_CFG_OV_STAG_SHIFT 0
+#define FUNC_MF_CFG_OV_STAG_DEFAULT FUNC_MF_CFG_OV_STAG_MASK
+
+ u32 pf_allocation;
+
+ u32 preserve_data;
+
+ u32 driver_last_activity_ts;
+
+ u32 drv_ack_vf_disabled[VF_MAX_STATIC / 32];
+
+ u32 drv_id;
+#define DRV_ID_PDA_COMP_VER_MASK 0x0000ffff
+#define DRV_ID_PDA_COMP_VER_SHIFT 0
+
+#define LOAD_REQ_HSI_VERSION 2
+#define DRV_ID_MCP_HSI_VER_MASK 0x00ff0000
+#define DRV_ID_MCP_HSI_VER_SHIFT 16
+#define DRV_ID_MCP_HSI_VER_CURRENT (LOAD_REQ_HSI_VERSION << \
+ DRV_ID_MCP_HSI_VER_SHIFT)
+
+#define DRV_ID_DRV_TYPE_MASK 0x7f000000
+#define DRV_ID_DRV_TYPE_SHIFT 24
+#define DRV_ID_DRV_TYPE_UNKNOWN (0 << DRV_ID_DRV_TYPE_SHIFT)
+#define DRV_ID_DRV_TYPE_LINUX BIT(DRV_ID_DRV_TYPE_SHIFT)
+
+#define DRV_ID_DRV_INIT_HW_MASK 0x80000000
+#define DRV_ID_DRV_INIT_HW_SHIFT 31
+#define DRV_ID_DRV_INIT_HW_FLAG BIT(DRV_ID_DRV_INIT_HW_SHIFT)
+
+ u32 oem_cfg_func;
+#define OEM_CFG_FUNC_TC_MASK 0x0000000F
+#define OEM_CFG_FUNC_TC_OFFSET 0
+#define OEM_CFG_FUNC_TC_0 0x0
+#define OEM_CFG_FUNC_TC_1 0x1
+#define OEM_CFG_FUNC_TC_2 0x2
+#define OEM_CFG_FUNC_TC_3 0x3
+#define OEM_CFG_FUNC_TC_4 0x4
+#define OEM_CFG_FUNC_TC_5 0x5
+#define OEM_CFG_FUNC_TC_6 0x6
+#define OEM_CFG_FUNC_TC_7 0x7
+
+#define OEM_CFG_FUNC_HOST_PRI_CTRL_MASK 0x00000030
+#define OEM_CFG_FUNC_HOST_PRI_CTRL_OFFSET 4
+#define OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC 0x1
+#define OEM_CFG_FUNC_HOST_PRI_CTRL_OS 0x2
+
+ struct drv_version_stc drv_ver;
+};
+
+struct mcp_mac {
+ u32 mac_upper;
+ u32 mac_lower;
+};
+
+struct mcp_file_att {
+ u32 nvm_start_addr;
+ u32 len;
+};
+
+struct bist_nvm_image_att {
+ u32 return_code;
+ u32 image_type;
+ u32 nvm_start_addr;
+ u32 len;
+};
+
+struct lan_stats_stc {
+ u64 ucast_rx_pkts;
+ u64 ucast_tx_pkts;
+ u32 fcs_err;
+ u32 rserved;
+};
+
+struct fcoe_stats_stc {
+ u64 rx_pkts;
+ u64 tx_pkts;
+ u32 fcs_err;
+ u32 login_failure;
+};
+
+struct iscsi_stats_stc {
+ u64 rx_pdus;
+ u64 tx_pdus;
+ u64 rx_bytes;
+ u64 tx_bytes;
+};
+
+struct rdma_stats_stc {
+ u64 rx_pkts;
+ u64 tx_pkts;
+ u64 rx_bytes;
+ u64 tx_bytes;
+};
+
+struct ocbb_data_stc {
+ u32 ocbb_host_addr;
+ u32 ocsd_host_addr;
+ u32 ocsd_req_update_interval;
+};
+
+struct fcoe_cap_stc {
+ u32 max_ios;
+ u32 max_log;
+ u32 max_exch;
+ u32 max_npiv;
+ u32 max_tgt;
+ u32 max_outstnd;
+};
+
+#define MAX_NUM_OF_SENSORS 7
+struct temperature_status_stc {
+ u32 num_of_sensors;
+ u32 sensor[MAX_NUM_OF_SENSORS];
+};
+
+/* crash dump configuration header */
+struct mdump_config_stc {
+ u32 version;
+ u32 config;
+ u32 epoc;
+ u32 num_of_logs;
+ u32 valid_logs;
+};
+
+enum resource_id_enum {
+ RESOURCE_NUM_SB_E = 0,
+ RESOURCE_NUM_L2_QUEUE_E = 1,
+ RESOURCE_NUM_VPORT_E = 2,
+ RESOURCE_NUM_VMQ_E = 3,
+ RESOURCE_FACTOR_NUM_RSS_PF_E = 4,
+ RESOURCE_FACTOR_RSS_PER_VF_E = 5,
+ RESOURCE_NUM_RL_E = 6,
+ RESOURCE_NUM_PQ_E = 7,
+ RESOURCE_NUM_VF_E = 8,
+ RESOURCE_VFC_FILTER_E = 9,
+ RESOURCE_ILT_E = 10,
+ RESOURCE_CQS_E = 11,
+ RESOURCE_GFT_PROFILES_E = 12,
+ RESOURCE_NUM_TC_E = 13,
+ RESOURCE_NUM_RSS_ENGINES_E = 14,
+ RESOURCE_LL2_QUEUE_E = 15,
+ RESOURCE_RDMA_STATS_QUEUE_E = 16,
+ RESOURCE_BDQ_E = 17,
+ RESOURCE_QCN_E = 18,
+ RESOURCE_LLH_FILTER_E = 19,
+ RESOURCE_VF_MAC_ADDR = 20,
+ RESOURCE_LL2_CQS_E = 21,
+ RESOURCE_VF_CNQS = 22,
+ RESOURCE_MAX_NUM,
+ RESOURCE_NUM_INVALID = 0xFFFFFFFF
+};
+
+/* Resource ID is to be filled by the driver in the MB request
+ * Size, offset & flags to be filled by the MFW in the MB response
+ */
+struct resource_info {
+ enum resource_id_enum res_id;
+ u32 size; /* number of allocated resources */
+ u32 offset; /* Offset of the 1st resource */
+ u32 vf_size;
+ u32 vf_offset;
+ u32 flags;
+#define RESOURCE_ELEMENT_STRICT BIT(0)
+};
+
+struct mcp_wwn {
+ u32 wwn_upper;
+ u32 wwn_lower;
+};
+
+#define DRV_ROLE_NONE 0
+#define DRV_ROLE_PREBOOT 1
+#define DRV_ROLE_OS 2
+#define DRV_ROLE_KDUMP 3
+
+struct load_req_stc {
+ u32 drv_ver_0;
+ u32 drv_ver_1;
+ u32 fw_ver;
+ u32 misc0;
+#define LOAD_REQ_ROLE_MASK 0x000000FF
+#define LOAD_REQ_ROLE_SHIFT 0
+#define LOAD_REQ_LOCK_TO_MASK 0x0000FF00
+#define LOAD_REQ_LOCK_TO_SHIFT 8
+#define LOAD_REQ_LOCK_TO_DEFAULT 0
+#define LOAD_REQ_LOCK_TO_NONE 255
+#define LOAD_REQ_FORCE_MASK 0x000F0000
+#define LOAD_REQ_FORCE_SHIFT 16
+#define LOAD_REQ_FORCE_NONE 0
+#define LOAD_REQ_FORCE_PF 1
+#define LOAD_REQ_FORCE_ALL 2
+#define LOAD_REQ_FLAGS0_MASK 0x00F00000
+#define LOAD_REQ_FLAGS0_SHIFT 20
+#define LOAD_REQ_FLAGS0_AVOID_RESET (0x1 << 0)
+};
+
+struct load_rsp_stc {
+ u32 drv_ver_0;
+ u32 drv_ver_1;
+ u32 fw_ver;
+ u32 misc0;
+#define LOAD_RSP_ROLE_MASK 0x000000FF
+#define LOAD_RSP_ROLE_SHIFT 0
+#define LOAD_RSP_HSI_MASK 0x0000FF00
+#define LOAD_RSP_HSI_SHIFT 8
+#define LOAD_RSP_FLAGS0_MASK 0x000F0000
+#define LOAD_RSP_FLAGS0_SHIFT 16
+#define LOAD_RSP_FLAGS0_DRV_EXISTS (0x1 << 0)
+};
+
+struct mdump_retain_data_stc {
+ u32 valid;
+ u32 epoch;
+ u32 pf;
+ u32 status;
+};
+
+struct attribute_cmd_write_stc {
+ u32 val;
+ u32 mask;
+ u32 offset;
+};
+
+struct lldp_stats_stc {
+ u32 tx_frames_total;
+ u32 rx_frames_total;
+ u32 rx_frames_discarded;
+ u32 rx_age_outs;
+};
+
+struct get_att_ctrl_stc {
+ u32 disabled_attns;
+ u32 controllable_attns;
+};
+
+struct trace_filter_stc {
+ u32 level;
+ u32 modules;
+};
+
+union drv_union_data {
+ struct mcp_mac wol_mac;
+
+ struct eth_phy_cfg drv_phy_cfg;
+
+ struct mcp_val64 val64;
+
+ u8 raw_data[MCP_DRV_NVM_BUF_LEN];
+
+ struct mcp_file_att file_att;
+
+ u32 ack_vf_disabled[EXT_VF_BITMAP_SIZE_IN_DWORDS];
+
+ struct drv_version_stc drv_version;
+
+ struct lan_stats_stc lan_stats;
+ struct fcoe_stats_stc fcoe_stats;
+ struct iscsi_stats_stc iscsi_stats;
+ struct rdma_stats_stc rdma_stats;
+ struct ocbb_data_stc ocbb_info;
+ struct temperature_status_stc temp_info;
+ struct resource_info resource;
+ struct bist_nvm_image_att nvm_image_att;
+ struct mdump_config_stc mdump_config;
+ struct mcp_mac lldp_mac;
+ struct mcp_wwn fcoe_fabric_name;
+ u32 dword;
+
+ struct load_req_stc load_req;
+ struct load_rsp_stc load_rsp;
+ struct mdump_retain_data_stc mdump_retain;
+ struct attribute_cmd_write_stc attribute_cmd_write;
+ struct lldp_stats_stc lldp_stats;
+ struct pcie_stats_stc pcie_stats;
+
+ struct get_att_ctrl_stc get_att_ctrl;
+ struct fcoe_cap_stc fcoe_cap;
+ struct trace_filter_stc trace_filter;
+};
+
+struct public_drv_mb {
+ u32 drv_mb_header;
+#define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff
+#define DRV_MSG_SEQ_NUMBER_OFFSET 0
+#define DRV_MSG_CODE_MASK 0xffff0000
+#define DRV_MSG_CODE_OFFSET 16
+
+ u32 drv_mb_param;
+
+ u32 fw_mb_header;
+#define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff
+#define FW_MSG_SEQ_NUMBER_OFFSET 0
+#define FW_MSG_CODE_MASK 0xffff0000
+#define FW_MSG_CODE_OFFSET 16
+
+ u32 fw_mb_param;
+
+ u32 drv_pulse_mb;
+#define DRV_PULSE_SEQ_MASK 0x00007fff
+#define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000
+#define DRV_PULSE_ALWAYS_ALIVE 0x00008000
+
+ u32 mcp_pulse_mb;
+#define MCP_PULSE_SEQ_MASK 0x00007fff
+#define MCP_PULSE_ALWAYS_ALIVE 0x00008000
+#define MCP_EVENT_MASK 0xffff0000
+#define MCP_EVENT_OTHER_DRIVER_RESET_REQ 0x00010000
+
+ union drv_union_data union_data;
+};
+
+#define DRV_MSG_CODE(_code_) ((_code_) << DRV_MSG_CODE_OFFSET)
+enum drv_msg_code_enum {
+ DRV_MSG_CODE_NVM_PUT_FILE_BEGIN = DRV_MSG_CODE(0x0001),
+ DRV_MSG_CODE_NVM_PUT_FILE_DATA = DRV_MSG_CODE(0x0002),
+ DRV_MSG_CODE_NVM_GET_FILE_ATT = DRV_MSG_CODE(0x0003),
+ DRV_MSG_CODE_NVM_READ_NVRAM = DRV_MSG_CODE(0x0005),
+ DRV_MSG_CODE_NVM_WRITE_NVRAM = DRV_MSG_CODE(0x0006),
+ DRV_MSG_CODE_MCP_RESET = DRV_MSG_CODE(0x0009),
+ DRV_MSG_CODE_SET_VERSION = DRV_MSG_CODE(0x000f),
+ DRV_MSG_CODE_MCP_HALT = DRV_MSG_CODE(0x0010),
+ DRV_MSG_CODE_SET_VMAC = DRV_MSG_CODE(0x0011),
+ DRV_MSG_CODE_GET_VMAC = DRV_MSG_CODE(0x0012),
+ DRV_MSG_CODE_GET_STATS = DRV_MSG_CODE(0x0013),
+ DRV_MSG_CODE_TRANSCEIVER_READ = DRV_MSG_CODE(0x0016),
+ DRV_MSG_CODE_MASK_PARITIES = DRV_MSG_CODE(0x001a),
+ DRV_MSG_CODE_BIST_TEST = DRV_MSG_CODE(0x001e),
+ DRV_MSG_CODE_SET_LED_MODE = DRV_MSG_CODE(0x0020),
+ DRV_MSG_CODE_RESOURCE_CMD = DRV_MSG_CODE(0x0023),
+ DRV_MSG_CODE_MDUMP_CMD = DRV_MSG_CODE(0x0025),
+ DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL = DRV_MSG_CODE(0x002b),
+ DRV_MSG_CODE_OS_WOL = DRV_MSG_CODE(0x002e),
+ DRV_MSG_CODE_GET_TLV_DONE = DRV_MSG_CODE(0x002f),
+ DRV_MSG_CODE_FEATURE_SUPPORT = DRV_MSG_CODE(0x0030),
+ DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT = DRV_MSG_CODE(0x0031),
+ DRV_MSG_CODE_GET_ENGINE_CONFIG = DRV_MSG_CODE(0x0037),
+ DRV_MSG_CODE_GET_NVM_CFG_OPTION = DRV_MSG_CODE(0x003e),
+ DRV_MSG_CODE_SET_NVM_CFG_OPTION = DRV_MSG_CODE(0x003f),
+ DRV_MSG_CODE_INITIATE_PF_FLR = DRV_MSG_CODE(0x0201),
+ DRV_MSG_CODE_LOAD_REQ = DRV_MSG_CODE(0x1000),
+ DRV_MSG_CODE_LOAD_DONE = DRV_MSG_CODE(0x1100),
+ DRV_MSG_CODE_INIT_HW = DRV_MSG_CODE(0x1200),
+ DRV_MSG_CODE_CANCEL_LOAD_REQ = DRV_MSG_CODE(0x1300),
+ DRV_MSG_CODE_UNLOAD_REQ = DRV_MSG_CODE(0x2000),
+ DRV_MSG_CODE_UNLOAD_DONE = DRV_MSG_CODE(0x2100),
+ DRV_MSG_CODE_INIT_PHY = DRV_MSG_CODE(0x2200),
+ DRV_MSG_CODE_LINK_RESET = DRV_MSG_CODE(0x2300),
+ DRV_MSG_CODE_SET_DCBX = DRV_MSG_CODE(0x2500),
+ DRV_MSG_CODE_OV_UPDATE_CURR_CFG = DRV_MSG_CODE(0x2600),
+ DRV_MSG_CODE_OV_UPDATE_BUS_NUM = DRV_MSG_CODE(0x2700),
+ DRV_MSG_CODE_OV_UPDATE_BOOT_PROGRESS = DRV_MSG_CODE(0x2800),
+ DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER = DRV_MSG_CODE(0x2900),
+ DRV_MSG_CODE_NIG_DRAIN = DRV_MSG_CODE(0x3000),
+ DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE = DRV_MSG_CODE(0x3100),
+ DRV_MSG_CODE_BW_UPDATE_ACK = DRV_MSG_CODE(0x3200),
+ DRV_MSG_CODE_OV_UPDATE_MTU = DRV_MSG_CODE(0x3300),
+ DRV_MSG_GET_RESOURCE_ALLOC_MSG = DRV_MSG_CODE(0x3400),
+ DRV_MSG_SET_RESOURCE_VALUE_MSG = DRV_MSG_CODE(0x3500),
+ DRV_MSG_CODE_OV_UPDATE_WOL = DRV_MSG_CODE(0x3800),
+ DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE = DRV_MSG_CODE(0x3900),
+ DRV_MSG_CODE_S_TAG_UPDATE_ACK = DRV_MSG_CODE(0x3b00),
+ DRV_MSG_CODE_GET_OEM_UPDATES = DRV_MSG_CODE(0x4100),
+ DRV_MSG_CODE_GET_PPFID_BITMAP = DRV_MSG_CODE(0x4300),
+ DRV_MSG_CODE_VF_DISABLED_DONE = DRV_MSG_CODE(0xc000),
+ DRV_MSG_CODE_CFG_VF_MSIX = DRV_MSG_CODE(0xc001),
+ DRV_MSG_CODE_CFG_PF_VFS_MSIX = DRV_MSG_CODE(0xc002),
+ DRV_MSG_CODE_DEBUG_DATA_SEND = DRV_MSG_CODE(0xc004),
+};
+
+#define DRV_MSG_CODE_VMAC_TYPE_SHIFT 4
+#define DRV_MSG_CODE_VMAC_TYPE_MASK 0x30
+#define DRV_MSG_CODE_VMAC_TYPE_MAC 1
+#define DRV_MSG_CODE_VMAC_TYPE_WWNN 2
+#define DRV_MSG_CODE_VMAC_TYPE_WWPN 3
+
+/* DRV_MSG_CODE_RETAIN_VMAC parameters */
+#define DRV_MSG_CODE_RETAIN_VMAC_FUNC_SHIFT 0
+#define DRV_MSG_CODE_RETAIN_VMAC_FUNC_MASK 0xf
+
+#define DRV_MSG_CODE_RETAIN_VMAC_TYPE_SHIFT 4
+#define DRV_MSG_CODE_RETAIN_VMAC_TYPE_MASK 0x70
+#define DRV_MSG_CODE_RETAIN_VMAC_TYPE_L2 0
+#define DRV_MSG_CODE_RETAIN_VMAC_TYPE_ISCSI 1
+#define DRV_MSG_CODE_RETAIN_VMAC_TYPE_FCOE 2
+#define DRV_MSG_CODE_RETAIN_VMAC_TYPE_WWNN 3
+#define DRV_MSG_CODE_RETAIN_VMAC_TYPE_WWPN 4
+
+#define DRV_MSG_CODE_MCP_RESET_FORCE 0xf04ce
+
+#define DRV_MSG_CODE_STATS_TYPE_LAN 1
+#define DRV_MSG_CODE_STATS_TYPE_FCOE 2
+#define DRV_MSG_CODE_STATS_TYPE_ISCSI 3
+#define DRV_MSG_CODE_STATS_TYPE_RDMA 4
+
+#define BW_MAX_MASK 0x000000ff
+#define BW_MAX_OFFSET 0
+#define BW_MIN_MASK 0x0000ff00
+#define BW_MIN_OFFSET 8
+
+#define DRV_MSG_FAN_FAILURE_TYPE BIT(0)
+#define DRV_MSG_TEMPERATURE_FAILURE_TYPE BIT(1)
+
+#define RESOURCE_CMD_REQ_RESC_MASK 0x0000001F
+#define RESOURCE_CMD_REQ_RESC_SHIFT 0
+#define RESOURCE_CMD_REQ_OPCODE_MASK 0x000000E0
+#define RESOURCE_CMD_REQ_OPCODE_SHIFT 5
+#define RESOURCE_OPCODE_REQ 1
+#define RESOURCE_OPCODE_REQ_WO_AGING 2
+#define RESOURCE_OPCODE_REQ_W_AGING 3
+#define RESOURCE_OPCODE_RELEASE 4
+#define RESOURCE_OPCODE_FORCE_RELEASE 5
+#define RESOURCE_CMD_REQ_AGE_MASK 0x0000FF00
+#define RESOURCE_CMD_REQ_AGE_SHIFT 8
+
+#define RESOURCE_CMD_RSP_OWNER_MASK 0x000000FF
+#define RESOURCE_CMD_RSP_OWNER_SHIFT 0
+#define RESOURCE_CMD_RSP_OPCODE_MASK 0x00000700
+#define RESOURCE_CMD_RSP_OPCODE_SHIFT 8
+#define RESOURCE_OPCODE_GNT 1
+#define RESOURCE_OPCODE_BUSY 2
+#define RESOURCE_OPCODE_RELEASED 3
+#define RESOURCE_OPCODE_RELEASED_PREVIOUS 4
+#define RESOURCE_OPCODE_WRONG_OWNER 5
+#define RESOURCE_OPCODE_UNKNOWN_CMD 255
+
+#define RESOURCE_DUMP 0
+
+/* DRV_MSG_CODE_MDUMP_CMD parameters */
+#define MDUMP_DRV_PARAM_OPCODE_MASK 0x000000ff
+#define DRV_MSG_CODE_MDUMP_ACK 0x01
+#define DRV_MSG_CODE_MDUMP_SET_VALUES 0x02
+#define DRV_MSG_CODE_MDUMP_TRIGGER 0x03
+#define DRV_MSG_CODE_MDUMP_GET_CONFIG 0x04
+#define DRV_MSG_CODE_MDUMP_SET_ENABLE 0x05
+#define DRV_MSG_CODE_MDUMP_CLEAR_LOGS 0x06
+#define DRV_MSG_CODE_MDUMP_GET_RETAIN 0x07
+#define DRV_MSG_CODE_MDUMP_CLR_RETAIN 0x08
+
+#define DRV_MSG_CODE_HW_DUMP_TRIGGER 0x0a
+
+#define DRV_MSG_CODE_MDUMP_FREE_DRIVER_BUF 0x0b
+#define DRV_MSG_CODE_MDUMP_GEN_LINK_DUMP 0x0c
+#define DRV_MSG_CODE_MDUMP_GEN_IDLE_CHK 0x0d
+
+/* DRV_MSG_CODE_MDUMP_CMD options */
+#define MDUMP_DRV_PARAM_OPTION_MASK 0x00000f00
+#define DRV_MSG_CODE_MDUMP_USE_DRIVER_BUF_OFFSET 8
+#define DRV_MSG_CODE_MDUMP_USE_DRIVER_BUF_MASK 0x100
+
+/* DRV_MSG_CODE_EXT_PHY_READ/DRV_MSG_CODE_EXT_PHY_WRITE parameters */
+#define DRV_MB_PARAM_ADDR_SHIFT 0
+#define DRV_MB_PARAM_ADDR_MASK 0x0000FFFF
+#define DRV_MB_PARAM_DEVAD_SHIFT 16
+#define DRV_MB_PARAM_DEVAD_MASK 0x001F0000
+#define DRV_MB_PARAM_PORT_SHIFT 21
+#define DRV_MB_PARAM_PORT_MASK 0x00600000
+
+/* DRV_MSG_CODE_PMBUS_READ/DRV_MSG_CODE_PMBUS_WRITE parameters */
+#define DRV_MB_PARAM_PMBUS_CMD_SHIFT 0
+#define DRV_MB_PARAM_PMBUS_CMD_MASK 0xFF
+#define DRV_MB_PARAM_PMBUS_LEN_SHIFT 8
+#define DRV_MB_PARAM_PMBUS_LEN_MASK 0x300
+#define DRV_MB_PARAM_PMBUS_DATA_SHIFT 16
+#define DRV_MB_PARAM_PMBUS_DATA_MASK 0xFFFF0000
+
+/* UNLOAD_REQ params */
+#define DRV_MB_PARAM_UNLOAD_WOL_UNKNOWN 0x00000000
+#define DRV_MB_PARAM_UNLOAD_WOL_MCP 0x00000001
+#define DRV_MB_PARAM_UNLOAD_WOL_DISABLED 0x00000002
+#define DRV_MB_PARAM_UNLOAD_WOL_ENABLED 0x00000003
+
+/* UNLOAD_DONE_params */
+#define DRV_MB_PARAM_UNLOAD_NON_D3_POWER 0x00000001
+
+/* INIT_PHY params */
+#define DRV_MB_PARAM_INIT_PHY_FORCE 0x00000001
+#define DRV_MB_PARAM_INIT_PHY_DONT_CARE 0x00000002
+
+/* LLDP / DCBX params*/
+#define DRV_MB_PARAM_LLDP_SEND_MASK 0x00000001
+#define DRV_MB_PARAM_LLDP_SEND_SHIFT 0
+#define DRV_MB_PARAM_LLDP_AGENT_MASK 0x00000006
+#define DRV_MB_PARAM_LLDP_AGENT_SHIFT 1
+#define DRV_MB_PARAM_LLDP_TLV_RX_VALID_MASK 0x00000001
+#define DRV_MB_PARAM_LLDP_TLV_RX_VALID_SHIFT 0
+#define DRV_MB_PARAM_LLDP_TLV_RX_TYPE_MASK 0x000007f0
+#define DRV_MB_PARAM_LLDP_TLV_RX_TYPE_SHIFT 4
+#define DRV_MB_PARAM_DCBX_NOTIFY_MASK 0x00000008
+#define DRV_MB_PARAM_DCBX_NOTIFY_SHIFT 3
+#define DRV_MB_PARAM_DCBX_ADMIN_CFG_NOTIFY_MASK 0x00000010
+#define DRV_MB_PARAM_DCBX_ADMIN_CFG_NOTIFY_SHIFT 4
+
+#define DRV_MB_PARAM_NIG_DRAIN_PERIOD_MS_MASK 0x000000FF
+#define DRV_MB_PARAM_NIG_DRAIN_PERIOD_MS_SHIFT 0
+
+#define DRV_MB_PARAM_NVM_PUT_FILE_TYPE_MASK 0x000000ff
+#define DRV_MB_PARAM_NVM_PUT_FILE_TYPE_SHIFT 0
+#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MFW 0x1
+#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_IMAGE 0x2
+
+#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MBI 0x3
+#define DRV_MB_PARAM_NVM_OFFSET_OFFSET 0
+#define DRV_MB_PARAM_NVM_OFFSET_MASK 0x00FFFFFF
+#define DRV_MB_PARAM_NVM_LEN_OFFSET 24
+#define DRV_MB_PARAM_NVM_LEN_MASK 0xFF000000
+
+#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT 0
+#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK 0x000000FF
+#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT 8
+#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK 0x0000FF00
+
+#define DRV_MB_PARAM_OV_CURR_CFG_SHIFT 0
+#define DRV_MB_PARAM_OV_CURR_CFG_MASK 0x0000000F
+#define DRV_MB_PARAM_OV_CURR_CFG_NONE 0
+#define DRV_MB_PARAM_OV_CURR_CFG_OS 1
+#define DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC 2
+#define DRV_MB_PARAM_OV_CURR_CFG_OTHER 3
+
+#define DRV_MB_PARAM_OV_STORM_FW_VER_SHIFT 0
+#define DRV_MB_PARAM_OV_STORM_FW_VER_MASK 0xFFFFFFFF
+#define DRV_MB_PARAM_OV_STORM_FW_VER_MAJOR_MASK 0xFF000000
+#define DRV_MB_PARAM_OV_STORM_FW_VER_MINOR_MASK 0x00FF0000
+#define DRV_MB_PARAM_OV_STORM_FW_VER_BUILD_MASK 0x0000FF00
+#define DRV_MB_PARAM_OV_STORM_FW_VER_DROP_MASK 0x000000FF
+
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_SHIFT 0
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_MASK 0xF
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_UNKNOWN 0x1
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED 0x2
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_LOADING 0x3
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED 0x4
+#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE 0x5
+
+#define DRV_MB_PARAM_OV_MTU_SIZE_SHIFT 0
+#define DRV_MB_PARAM_OV_MTU_SIZE_MASK 0xFFFFFFFF
+
+#define DRV_MB_PARAM_WOL_MASK (DRV_MB_PARAM_WOL_DEFAULT | \
+ DRV_MB_PARAM_WOL_DISABLED | \
+ DRV_MB_PARAM_WOL_ENABLED)
+#define DRV_MB_PARAM_WOL_DEFAULT DRV_MB_PARAM_UNLOAD_WOL_MCP
+#define DRV_MB_PARAM_WOL_DISABLED DRV_MB_PARAM_UNLOAD_WOL_DISABLED
+#define DRV_MB_PARAM_WOL_ENABLED DRV_MB_PARAM_UNLOAD_WOL_ENABLED
+
+#define DRV_MB_PARAM_ESWITCH_MODE_MASK (DRV_MB_PARAM_ESWITCH_MODE_NONE | \
+ DRV_MB_PARAM_ESWITCH_MODE_VEB | \
+ DRV_MB_PARAM_ESWITCH_MODE_VEPA)
+#define DRV_MB_PARAM_ESWITCH_MODE_NONE 0x0
+#define DRV_MB_PARAM_ESWITCH_MODE_VEB 0x1
+#define DRV_MB_PARAM_ESWITCH_MODE_VEPA 0x2
+
+#define DRV_MB_PARAM_DUMMY_OEM_UPDATES_MASK 0x1
+#define DRV_MB_PARAM_DUMMY_OEM_UPDATES_OFFSET 0
+
+#define DRV_MB_PARAM_SET_LED_MODE_OPER 0x0
+#define DRV_MB_PARAM_SET_LED_MODE_ON 0x1
+#define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2
+
+#define DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET 0
+#define DRV_MB_PARAM_TRANSCEIVER_PORT_MASK 0x00000003
+#define DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET 2
+#define DRV_MB_PARAM_TRANSCEIVER_SIZE_MASK 0x000000fc
+#define DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET 8
+#define DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK 0x0000ff00
+#define DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET 16
+#define DRV_MB_PARAM_TRANSCEIVER_OFFSET_MASK 0xffff0000
+
+ /* Resource Allocation params - Driver version support */
+#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xffff0000
+#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT 16
+#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_MASK 0x0000ffff
+#define DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT 0
+
+#define DRV_MB_PARAM_BIST_UNKNOWN_TEST 0
+#define DRV_MB_PARAM_BIST_REGISTER_TEST 1
+#define DRV_MB_PARAM_BIST_CLOCK_TEST 2
+#define DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES 3
+#define DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX 4
+
+#define DRV_MB_PARAM_BIST_RC_UNKNOWN 0
+#define DRV_MB_PARAM_BIST_RC_PASSED 1
+#define DRV_MB_PARAM_BIST_RC_FAILED 2
+#define DRV_MB_PARAM_BIST_RC_INVALID_PARAMETER 3
+
+#define DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT 0
+#define DRV_MB_PARAM_BIST_TEST_INDEX_MASK 0x000000ff
+#define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT 8
+#define DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_MASK 0x0000ff00
+
+#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_MASK 0x0000ffff
+#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_OFFSET 0
+#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_SMARTLINQ 0x00000001
+#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE 0x00000002
+#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_FEC_CONTROL 0x00000004
+#define DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EXT_SPEED_FEC_CONTROL 0x00000008
+#define DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK 0x00010000
+
+/* DRV_MSG_CODE_DEBUG_DATA_SEND parameters */
+#define DRV_MSG_CODE_DEBUG_DATA_SEND_SIZE_OFFSET 0
+#define DRV_MSG_CODE_DEBUG_DATA_SEND_SIZE_MASK 0xff
+
+/* Driver attributes params */
+#define DRV_MB_PARAM_ATTRIBUTE_KEY_OFFSET 0
+#define DRV_MB_PARAM_ATTRIBUTE_KEY_MASK 0x00ffffff
+#define DRV_MB_PARAM_ATTRIBUTE_CMD_OFFSET 24
+#define DRV_MB_PARAM_ATTRIBUTE_CMD_MASK 0xff000000
+
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_OFFSET 0
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_MASK 0x0000ffff
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_IGNORE 0x0000ffff
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ID_SHIFT 0
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ALL_SHIFT 16
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ALL_MASK 0x00010000
+#define DRV_MB_PARAM_NVM_CFG_OPTION_INIT_SHIFT 17
+#define DRV_MB_PARAM_NVM_CFG_OPTION_INIT_MASK 0x00020000
+#define DRV_MB_PARAM_NVM_CFG_OPTION_COMMIT_SHIFT 18
+#define DRV_MB_PARAM_NVM_CFG_OPTION_COMMIT_MASK 0x00040000
+#define DRV_MB_PARAM_NVM_CFG_OPTION_FREE_SHIFT 19
+#define DRV_MB_PARAM_NVM_CFG_OPTION_FREE_MASK 0x00080000
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL_SHIFT 20
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL_MASK 0x00100000
+#define DRV_MB_PARAM_NVM_CFG_OPTION_DEFAULT_RESTORE_ALL_SHIFT 21
+#define DRV_MB_PARAM_NVM_CFG_OPTION_DEFAULT_RESTORE_ALL_MASK 0x00200000
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID_SHIFT 24
+#define DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID_MASK 0x0f000000
+
+/*DRV_MSG_CODE_GET_PERM_MAC parametres*/
+#define DRV_MSG_CODE_GET_PERM_MAC_TYPE_SHIFT 0
+#define DRV_MSG_CODE_GET_PERM_MAC_TYPE_MASK 0xF
+#define DRV_MSG_CODE_GET_PERM_MAC_TYPE_PF 0
+#define DRV_MSG_CODE_GET_PERM_MAC_TYPE_BMC 1
+#define DRV_MSG_CODE_GET_PERM_MAC_TYPE_VF 2
+#define DRV_MSG_CODE_GET_PERM_MAC_TYPE_LLDP 3
+#define DRV_MSG_CODE_GET_PERM_MAC_TYPE_MAX 4
+#define DRV_MSG_CODE_GET_PERM_MAC_INDEX_SHIFT 8
+#define DRV_MSG_CODE_GET_PERM_MAC_INDEX_MASK 0xFFFF00
+
+#define FW_MSG_CODE(_code_) ((_code_) << FW_MSG_CODE_OFFSET)
+enum fw_msg_code_enum {
+ FW_MSG_CODE_UNSUPPORTED = FW_MSG_CODE(0x0000),
+ FW_MSG_CODE_NVM_OK = FW_MSG_CODE(0x0001),
+ FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK = FW_MSG_CODE(0x0040),
+ FW_MSG_CODE_PHY_OK = FW_MSG_CODE(0x0011),
+ FW_MSG_CODE_OK = FW_MSG_CODE(0x0016),
+ FW_MSG_CODE_ERROR = FW_MSG_CODE(0x0017),
+ FW_MSG_CODE_TRANSCEIVER_DIAG_OK = FW_MSG_CODE(0x0016),
+ FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT = FW_MSG_CODE(0x0002),
+ FW_MSG_CODE_MDUMP_INVALID_CMD = FW_MSG_CODE(0x0003),
+ FW_MSG_CODE_OS_WOL_SUPPORTED = FW_MSG_CODE(0x0080),
+ FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE = FW_MSG_CODE(0x0087),
+ FW_MSG_CODE_DRV_LOAD_ENGINE = FW_MSG_CODE(0x1010),
+ FW_MSG_CODE_DRV_LOAD_PORT = FW_MSG_CODE(0x1011),
+ FW_MSG_CODE_DRV_LOAD_FUNCTION = FW_MSG_CODE(0x1012),
+ FW_MSG_CODE_DRV_LOAD_REFUSED_PDA = FW_MSG_CODE(0x1020),
+ FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1 = FW_MSG_CODE(0x1021),
+ FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG = FW_MSG_CODE(0x1022),
+ FW_MSG_CODE_DRV_LOAD_REFUSED_HSI = FW_MSG_CODE(0x1023),
+ FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE = FW_MSG_CODE(0x1030),
+ FW_MSG_CODE_DRV_LOAD_REFUSED_REJECT = FW_MSG_CODE(0x1031),
+ FW_MSG_CODE_DRV_LOAD_DONE = FW_MSG_CODE(0x1110),
+ FW_MSG_CODE_DRV_UNLOAD_ENGINE = FW_MSG_CODE(0x2011),
+ FW_MSG_CODE_DRV_UNLOAD_PORT = FW_MSG_CODE(0x2012),
+ FW_MSG_CODE_DRV_UNLOAD_FUNCTION = FW_MSG_CODE(0x2013),
+ FW_MSG_CODE_DRV_UNLOAD_DONE = FW_MSG_CODE(0x2110),
+ FW_MSG_CODE_RESOURCE_ALLOC_OK = FW_MSG_CODE(0x3400),
+ FW_MSG_CODE_RESOURCE_ALLOC_UNKNOWN = FW_MSG_CODE(0x3500),
+ FW_MSG_CODE_S_TAG_UPDATE_ACK_DONE = FW_MSG_CODE(0x3b00),
+ FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE = FW_MSG_CODE(0xb001),
+ FW_MSG_CODE_DEBUG_NOT_ENABLED = FW_MSG_CODE(0xb00a),
+ FW_MSG_CODE_DEBUG_DATA_SEND_OK = FW_MSG_CODE(0xb00b),
+};
+
+#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xffff0000
+#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT 16
+#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_MASK 0x0000ffff
+#define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT 0
+
+/* Get PF RDMA protocol command response */
+#define FW_MB_PARAM_GET_PF_RDMA_NONE 0x0
+#define FW_MB_PARAM_GET_PF_RDMA_ROCE 0x1
+#define FW_MB_PARAM_GET_PF_RDMA_IWARP 0x2
+#define FW_MB_PARAM_GET_PF_RDMA_BOTH 0x3
+
+/* Get MFW feature support response */
+#define FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ BIT(0)
+#define FW_MB_PARAM_FEATURE_SUPPORT_EEE BIT(1)
+#define FW_MB_PARAM_FEATURE_SUPPORT_DRV_LOAD_TO BIT(2)
+#define FW_MB_PARAM_FEATURE_SUPPORT_LP_PRES_DET BIT(3)
+#define FW_MB_PARAM_FEATURE_SUPPORT_RELAXED_ORD BIT(4)
+#define FW_MB_PARAM_FEATURE_SUPPORT_FEC_CONTROL BIT(5)
+#define FW_MB_PARAM_FEATURE_SUPPORT_EXT_SPEED_FEC_CONTROL BIT(6)
+#define FW_MB_PARAM_FEATURE_SUPPORT_IGU_CLEANUP BIT(7)
+#define FW_MB_PARAM_FEATURE_SUPPORT_VF_DPM BIT(8)
+#define FW_MB_PARAM_FEATURE_SUPPORT_IDLE_CHK BIT(9)
+#define FW_MB_PARAM_FEATURE_SUPPORT_VLINK BIT(16)
+#define FW_MB_PARAM_FEATURE_SUPPORT_DISABLE_LLDP BIT(17)
+#define FW_MB_PARAM_FEATURE_SUPPORT_ENHANCED_SYS_LCK BIT(18)
+#define FW_MB_PARAM_FEATURE_SUPPORT_RESTORE_DEFAULT_CFG BIT(19)
+
+#define FW_MB_PARAM_MANAGEMENT_STATUS_LOCKDOWN_ENABLED 0x00000001
+
+#define FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR BIT(0)
+
+#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID_MASK 0x00000001
+#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID_SHIFT 0
+#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE_MASK 0x00000002
+#define FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE_SHIFT 1
+#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID_MASK 0x00000004
+#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID_SHIFT 2
+#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE_MASK 0x00000008
+#define FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE_SHIFT 3
+
+#define FW_MB_PARAM_PPFID_BITMAP_MASK 0xff
+#define FW_MB_PARAM_PPFID_BITMAP_SHIFT 0
+
+#define FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET_MASK 0x00ffffff
+#define FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET_SHIFT 0
+#define FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE_MASK 0xff000000
+#define FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE_SHIFT 24
+
+enum MFW_DRV_MSG_TYPE {
+ MFW_DRV_MSG_LINK_CHANGE,
+ MFW_DRV_MSG_FLR_FW_ACK_FAILED,
+ MFW_DRV_MSG_VF_DISABLED,
+ MFW_DRV_MSG_LLDP_DATA_UPDATED,
+ MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED,
+ MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED,
+ MFW_DRV_MSG_ERROR_RECOVERY,
+ MFW_DRV_MSG_BW_UPDATE,
+ MFW_DRV_MSG_S_TAG_UPDATE,
+ MFW_DRV_MSG_GET_LAN_STATS,
+ MFW_DRV_MSG_GET_FCOE_STATS,
+ MFW_DRV_MSG_GET_ISCSI_STATS,
+ MFW_DRV_MSG_GET_RDMA_STATS,
+ MFW_DRV_MSG_FAILURE_DETECTED,
+ MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE,
+ MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED,
+ MFW_DRV_MSG_EEE_NEGOTIATION_COMPLETE,
+ MFW_DRV_MSG_GET_TLV_REQ,
+ MFW_DRV_MSG_OEM_CFG_UPDATE,
+ MFW_DRV_MSG_LLDP_RECEIVED_TLVS_UPDATED,
+ MFW_DRV_MSG_GENERIC_IDC,
+ MFW_DRV_MSG_XCVR_TX_FAULT,
+ MFW_DRV_MSG_XCVR_RX_LOS,
+ MFW_DRV_MSG_GET_FCOE_CAP,
+ MFW_DRV_MSG_GEN_LINK_DUMP,
+ MFW_DRV_MSG_GEN_IDLE_CHK,
+ MFW_DRV_MSG_DCBX_ADMIN_CFG_APPLIED,
+ MFW_DRV_MSG_MAX
+};
+
+#define MFW_DRV_MSG_MAX_DWORDS(msgs) ((((msgs) - 1) >> 2) + 1)
+#define MFW_DRV_MSG_DWORD(msg_id) ((msg_id) >> 2)
+#define MFW_DRV_MSG_OFFSET(msg_id) (((msg_id) & 0x3) << 3)
+#define MFW_DRV_MSG_MASK(msg_id) (0xff << MFW_DRV_MSG_OFFSET(msg_id))
+
+struct public_mfw_mb {
+ u32 sup_msgs;
+ u32 msg[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)];
+ u32 ack[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)];
+};
+
+enum public_sections {
+ PUBLIC_DRV_MB,
+ PUBLIC_MFW_MB,
+ PUBLIC_GLOBAL,
+ PUBLIC_PATH,
+ PUBLIC_PORT,
+ PUBLIC_FUNC,
+ PUBLIC_MAX_SECTIONS
+};
+
+struct drv_ver_info_stc {
+ u32 ver;
+ u8 name[32];
+};
+
+/* Runtime data needs about 1/2K. We use 2K to be on the safe side.
+ * Please make sure data does not exceed this size.
+ */
+#define NUM_RUNTIME_DWORDS 16
+struct drv_init_hw_stc {
+ u32 init_hw_bitmask[NUM_RUNTIME_DWORDS];
+ u32 init_hw_data[NUM_RUNTIME_DWORDS * 32];
+};
+
+struct mcp_public_data {
+ u32 num_sections;
+ u32 sections[PUBLIC_MAX_SECTIONS];
+ struct public_drv_mb drv_mb[MCP_GLOB_FUNC_MAX];
+ struct public_mfw_mb mfw_mb[MCP_GLOB_FUNC_MAX];
+ struct public_global global;
+ struct public_path path[MCP_GLOB_PATH_MAX];
+ struct public_port port[MCP_GLOB_PORT_MAX];
+ struct public_func func[MCP_GLOB_FUNC_MAX];
+};
+
+#define I2C_TRANSCEIVER_ADDR 0xa0
+#define MAX_I2C_TRANSACTION_SIZE 16
+#define MAX_I2C_TRANSCEIVER_PAGE_SIZE 256
+
+/* OCBB definitions */
+enum tlvs {
+ /* Category 1: Device Properties */
+ DRV_TLV_CLP_STR,
+ DRV_TLV_CLP_STR_CTD,
+ /* Category 6: Device Configuration */
+ DRV_TLV_SCSI_TO,
+ DRV_TLV_R_T_TOV,
+ DRV_TLV_R_A_TOV,
+ DRV_TLV_E_D_TOV,
+ DRV_TLV_CR_TOV,
+ DRV_TLV_BOOT_TYPE,
+ /* Category 8: Port Configuration */
+ DRV_TLV_NPIV_ENABLED,
+ /* Category 10: Function Configuration */
+ DRV_TLV_FEATURE_FLAGS,
+ DRV_TLV_LOCAL_ADMIN_ADDR,
+ DRV_TLV_ADDITIONAL_MAC_ADDR_1,
+ DRV_TLV_ADDITIONAL_MAC_ADDR_2,
+ DRV_TLV_LSO_MAX_OFFLOAD_SIZE,
+ DRV_TLV_LSO_MIN_SEGMENT_COUNT,
+ DRV_TLV_PROMISCUOUS_MODE,
+ DRV_TLV_TX_DESCRIPTORS_QUEUE_SIZE,
+ DRV_TLV_RX_DESCRIPTORS_QUEUE_SIZE,
+ DRV_TLV_NUM_OF_NET_QUEUE_VMQ_CFG,
+ DRV_TLV_FLEX_NIC_OUTER_VLAN_ID,
+ DRV_TLV_OS_DRIVER_STATES,
+ DRV_TLV_PXE_BOOT_PROGRESS,
+ /* Category 12: FC/FCoE Configuration */
+ DRV_TLV_NPIV_STATE,
+ DRV_TLV_NUM_OF_NPIV_IDS,
+ DRV_TLV_SWITCH_NAME,
+ DRV_TLV_SWITCH_PORT_NUM,
+ DRV_TLV_SWITCH_PORT_ID,
+ DRV_TLV_VENDOR_NAME,
+ DRV_TLV_SWITCH_MODEL,
+ DRV_TLV_SWITCH_FW_VER,
+ DRV_TLV_QOS_PRIORITY_PER_802_1P,
+ DRV_TLV_PORT_ALIAS,
+ DRV_TLV_PORT_STATE,
+ DRV_TLV_FIP_TX_DESCRIPTORS_QUEUE_SIZE,
+ DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_SIZE,
+ DRV_TLV_LINK_FAILURE_COUNT,
+ DRV_TLV_FCOE_BOOT_PROGRESS,
+ /* Category 13: iSCSI Configuration */
+ DRV_TLV_TARGET_LLMNR_ENABLED,
+ DRV_TLV_HEADER_DIGEST_FLAG_ENABLED,
+ DRV_TLV_DATA_DIGEST_FLAG_ENABLED,
+ DRV_TLV_AUTHENTICATION_METHOD,
+ DRV_TLV_ISCSI_BOOT_TARGET_PORTAL,
+ DRV_TLV_MAX_FRAME_SIZE,
+ DRV_TLV_PDU_TX_DESCRIPTORS_QUEUE_SIZE,
+ DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_SIZE,
+ DRV_TLV_ISCSI_BOOT_PROGRESS,
+ /* Category 20: Device Data */
+ DRV_TLV_PCIE_BUS_RX_UTILIZATION,
+ DRV_TLV_PCIE_BUS_TX_UTILIZATION,
+ DRV_TLV_DEVICE_CPU_CORES_UTILIZATION,
+ DRV_TLV_LAST_VALID_DCC_TLV_RECEIVED,
+ DRV_TLV_NCSI_RX_BYTES_RECEIVED,
+ DRV_TLV_NCSI_TX_BYTES_SENT,
+ /* Category 22: Base Port Data */
+ DRV_TLV_RX_DISCARDS,
+ DRV_TLV_RX_ERRORS,
+ DRV_TLV_TX_ERRORS,
+ DRV_TLV_TX_DISCARDS,
+ DRV_TLV_RX_FRAMES_RECEIVED,
+ DRV_TLV_TX_FRAMES_SENT,
+ /* Category 23: FC/FCoE Port Data */
+ DRV_TLV_RX_BROADCAST_PACKETS,
+ DRV_TLV_TX_BROADCAST_PACKETS,
+ /* Category 28: Base Function Data */
+ DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV4,
+ DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV6,
+ DRV_TLV_TX_DESCRIPTOR_QUEUE_AVG_DEPTH,
+ DRV_TLV_RX_DESCRIPTORS_QUEUE_AVG_DEPTH,
+ DRV_TLV_PF_RX_FRAMES_RECEIVED,
+ DRV_TLV_RX_BYTES_RECEIVED,
+ DRV_TLV_PF_TX_FRAMES_SENT,
+ DRV_TLV_TX_BYTES_SENT,
+ DRV_TLV_IOV_OFFLOAD,
+ DRV_TLV_PCI_ERRORS_CAP_ID,
+ DRV_TLV_UNCORRECTABLE_ERROR_STATUS,
+ DRV_TLV_UNCORRECTABLE_ERROR_MASK,
+ DRV_TLV_CORRECTABLE_ERROR_STATUS,
+ DRV_TLV_CORRECTABLE_ERROR_MASK,
+ DRV_TLV_PCI_ERRORS_AECC_REGISTER,
+ DRV_TLV_TX_QUEUES_EMPTY,
+ DRV_TLV_RX_QUEUES_EMPTY,
+ DRV_TLV_TX_QUEUES_FULL,
+ DRV_TLV_RX_QUEUES_FULL,
+ /* Category 29: FC/FCoE Function Data */
+ DRV_TLV_FCOE_TX_DESCRIPTOR_QUEUE_AVG_DEPTH,
+ DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_AVG_DEPTH,
+ DRV_TLV_FCOE_RX_FRAMES_RECEIVED,
+ DRV_TLV_FCOE_RX_BYTES_RECEIVED,
+ DRV_TLV_FCOE_TX_FRAMES_SENT,
+ DRV_TLV_FCOE_TX_BYTES_SENT,
+ DRV_TLV_CRC_ERROR_COUNT,
+ DRV_TLV_CRC_ERROR_1_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_CRC_ERROR_1_TIMESTAMP,
+ DRV_TLV_CRC_ERROR_2_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_CRC_ERROR_2_TIMESTAMP,
+ DRV_TLV_CRC_ERROR_3_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_CRC_ERROR_3_TIMESTAMP,
+ DRV_TLV_CRC_ERROR_4_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_CRC_ERROR_4_TIMESTAMP,
+ DRV_TLV_CRC_ERROR_5_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_CRC_ERROR_5_TIMESTAMP,
+ DRV_TLV_LOSS_OF_SYNC_ERROR_COUNT,
+ DRV_TLV_LOSS_OF_SIGNAL_ERRORS,
+ DRV_TLV_PRIMITIVE_SEQUENCE_PROTOCOL_ERROR_COUNT,
+ DRV_TLV_DISPARITY_ERROR_COUNT,
+ DRV_TLV_CODE_VIOLATION_ERROR_COUNT,
+ DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_1,
+ DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_2,
+ DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_3,
+ DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_4,
+ DRV_TLV_LAST_FLOGI_TIMESTAMP,
+ DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_1,
+ DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_2,
+ DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_3,
+ DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_4,
+ DRV_TLV_LAST_FLOGI_ACC_TIMESTAMP,
+ DRV_TLV_LAST_FLOGI_RJT,
+ DRV_TLV_LAST_FLOGI_RJT_TIMESTAMP,
+ DRV_TLV_FDISCS_SENT_COUNT,
+ DRV_TLV_FDISC_ACCS_RECEIVED,
+ DRV_TLV_FDISC_RJTS_RECEIVED,
+ DRV_TLV_PLOGI_SENT_COUNT,
+ DRV_TLV_PLOGI_ACCS_RECEIVED,
+ DRV_TLV_PLOGI_RJTS_RECEIVED,
+ DRV_TLV_PLOGI_1_SENT_DESTINATION_FC_ID,
+ DRV_TLV_PLOGI_1_TIMESTAMP,
+ DRV_TLV_PLOGI_2_SENT_DESTINATION_FC_ID,
+ DRV_TLV_PLOGI_2_TIMESTAMP,
+ DRV_TLV_PLOGI_3_SENT_DESTINATION_FC_ID,
+ DRV_TLV_PLOGI_3_TIMESTAMP,
+ DRV_TLV_PLOGI_4_SENT_DESTINATION_FC_ID,
+ DRV_TLV_PLOGI_4_TIMESTAMP,
+ DRV_TLV_PLOGI_5_SENT_DESTINATION_FC_ID,
+ DRV_TLV_PLOGI_5_TIMESTAMP,
+ DRV_TLV_PLOGI_1_ACC_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_PLOGI_1_ACC_TIMESTAMP,
+ DRV_TLV_PLOGI_2_ACC_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_PLOGI_2_ACC_TIMESTAMP,
+ DRV_TLV_PLOGI_3_ACC_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_PLOGI_3_ACC_TIMESTAMP,
+ DRV_TLV_PLOGI_4_ACC_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_PLOGI_4_ACC_TIMESTAMP,
+ DRV_TLV_PLOGI_5_ACC_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_PLOGI_5_ACC_TIMESTAMP,
+ DRV_TLV_LOGOS_ISSUED,
+ DRV_TLV_LOGO_ACCS_RECEIVED,
+ DRV_TLV_LOGO_RJTS_RECEIVED,
+ DRV_TLV_LOGO_1_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_LOGO_1_TIMESTAMP,
+ DRV_TLV_LOGO_2_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_LOGO_2_TIMESTAMP,
+ DRV_TLV_LOGO_3_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_LOGO_3_TIMESTAMP,
+ DRV_TLV_LOGO_4_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_LOGO_4_TIMESTAMP,
+ DRV_TLV_LOGO_5_RECEIVED_SOURCE_FC_ID,
+ DRV_TLV_LOGO_5_TIMESTAMP,
+ DRV_TLV_LOGOS_RECEIVED,
+ DRV_TLV_ACCS_ISSUED,
+ DRV_TLV_PRLIS_ISSUED,
+ DRV_TLV_ACCS_RECEIVED,
+ DRV_TLV_ABTS_SENT_COUNT,
+ DRV_TLV_ABTS_ACCS_RECEIVED,
+ DRV_TLV_ABTS_RJTS_RECEIVED,
+ DRV_TLV_ABTS_1_SENT_DESTINATION_FC_ID,
+ DRV_TLV_ABTS_1_TIMESTAMP,
+ DRV_TLV_ABTS_2_SENT_DESTINATION_FC_ID,
+ DRV_TLV_ABTS_2_TIMESTAMP,
+ DRV_TLV_ABTS_3_SENT_DESTINATION_FC_ID,
+ DRV_TLV_ABTS_3_TIMESTAMP,
+ DRV_TLV_ABTS_4_SENT_DESTINATION_FC_ID,
+ DRV_TLV_ABTS_4_TIMESTAMP,
+ DRV_TLV_ABTS_5_SENT_DESTINATION_FC_ID,
+ DRV_TLV_ABTS_5_TIMESTAMP,
+ DRV_TLV_RSCNS_RECEIVED,
+ DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_1,
+ DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_2,
+ DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_3,
+ DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_4,
+ DRV_TLV_LUN_RESETS_ISSUED,
+ DRV_TLV_ABORT_TASK_SETS_ISSUED,
+ DRV_TLV_TPRLOS_SENT,
+ DRV_TLV_NOS_SENT_COUNT,
+ DRV_TLV_NOS_RECEIVED_COUNT,
+ DRV_TLV_OLS_COUNT,
+ DRV_TLV_LR_COUNT,
+ DRV_TLV_LRR_COUNT,
+ DRV_TLV_LIP_SENT_COUNT,
+ DRV_TLV_LIP_RECEIVED_COUNT,
+ DRV_TLV_EOFA_COUNT,
+ DRV_TLV_EOFNI_COUNT,
+ DRV_TLV_SCSI_STATUS_CHECK_CONDITION_COUNT,
+ DRV_TLV_SCSI_STATUS_CONDITION_MET_COUNT,
+ DRV_TLV_SCSI_STATUS_BUSY_COUNT,
+ DRV_TLV_SCSI_STATUS_INTERMEDIATE_COUNT,
+ DRV_TLV_SCSI_STATUS_INTERMEDIATE_CONDITION_MET_COUNT,
+ DRV_TLV_SCSI_STATUS_RESERVATION_CONFLICT_COUNT,
+ DRV_TLV_SCSI_STATUS_TASK_SET_FULL_COUNT,
+ DRV_TLV_SCSI_STATUS_ACA_ACTIVE_COUNT,
+ DRV_TLV_SCSI_STATUS_TASK_ABORTED_COUNT,
+ DRV_TLV_SCSI_CHECK_CONDITION_1_RECEIVED_SK_ASC_ASCQ,
+ DRV_TLV_SCSI_CHECK_1_TIMESTAMP,
+ DRV_TLV_SCSI_CHECK_CONDITION_2_RECEIVED_SK_ASC_ASCQ,
+ DRV_TLV_SCSI_CHECK_2_TIMESTAMP,
+ DRV_TLV_SCSI_CHECK_CONDITION_3_RECEIVED_SK_ASC_ASCQ,
+ DRV_TLV_SCSI_CHECK_3_TIMESTAMP,
+ DRV_TLV_SCSI_CHECK_CONDITION_4_RECEIVED_SK_ASC_ASCQ,
+ DRV_TLV_SCSI_CHECK_4_TIMESTAMP,
+ DRV_TLV_SCSI_CHECK_CONDITION_5_RECEIVED_SK_ASC_ASCQ,
+ DRV_TLV_SCSI_CHECK_5_TIMESTAMP,
+ /* Category 30: iSCSI Function Data */
+ DRV_TLV_PDU_TX_DESCRIPTOR_QUEUE_AVG_DEPTH,
+ DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_AVG_DEPTH,
+ DRV_TLV_ISCSI_PDU_RX_FRAMES_RECEIVED,
+ DRV_TLV_ISCSI_PDU_RX_BYTES_RECEIVED,
+ DRV_TLV_ISCSI_PDU_TX_FRAMES_SENT,
+ DRV_TLV_ISCSI_PDU_TX_BYTES_SENT,
+ DRV_TLV_RDMA_DRV_VERSION
+};
+
+#define I2C_DEV_ADDR_A2 0xa2
+#define SFP_EEPROM_A2_TEMPERATURE_ADDR 0x60
+#define SFP_EEPROM_A2_TEMPERATURE_SIZE 2
+#define SFP_EEPROM_A2_VCC_ADDR 0x62
+#define SFP_EEPROM_A2_VCC_SIZE 2
+#define SFP_EEPROM_A2_TX_BIAS_ADDR 0x64
+#define SFP_EEPROM_A2_TX_BIAS_SIZE 2
+#define SFP_EEPROM_A2_TX_POWER_ADDR 0x66
+#define SFP_EEPROM_A2_TX_POWER_SIZE 2
+#define SFP_EEPROM_A2_RX_POWER_ADDR 0x68
+#define SFP_EEPROM_A2_RX_POWER_SIZE 2
+
+#define I2C_DEV_ADDR_A0 0xa0
+#define QSFP_EEPROM_A0_TEMPERATURE_ADDR 0x16
+#define QSFP_EEPROM_A0_TEMPERATURE_SIZE 2
+#define QSFP_EEPROM_A0_VCC_ADDR 0x1a
+#define QSFP_EEPROM_A0_VCC_SIZE 2
+#define QSFP_EEPROM_A0_TX1_BIAS_ADDR 0x2a
+#define QSFP_EEPROM_A0_TX1_BIAS_SIZE 2
+#define QSFP_EEPROM_A0_TX1_POWER_ADDR 0x32
+#define QSFP_EEPROM_A0_TX1_POWER_SIZE 2
+#define QSFP_EEPROM_A0_RX1_POWER_ADDR 0x22
+#define QSFP_EEPROM_A0_RX1_POWER_SIZE 2
+
+struct nvm_cfg_mac_address {
+ u32 mac_addr_hi;
+#define NVM_CFG_MAC_ADDRESS_HI_MASK 0x0000ffff
+#define NVM_CFG_MAC_ADDRESS_HI_OFFSET 0
+
+ u32 mac_addr_lo;
+};
+
+struct nvm_cfg1_glob {
+ u32 generic_cont0;
+#define NVM_CFG1_GLOB_MF_MODE_MASK 0x00000ff0
+#define NVM_CFG1_GLOB_MF_MODE_OFFSET 4
+#define NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED 0x0
+#define NVM_CFG1_GLOB_MF_MODE_DEFAULT 0x1
+#define NVM_CFG1_GLOB_MF_MODE_SPIO4 0x2
+#define NVM_CFG1_GLOB_MF_MODE_NPAR1_0 0x3
+#define NVM_CFG1_GLOB_MF_MODE_NPAR1_5 0x4
+#define NVM_CFG1_GLOB_MF_MODE_NPAR2_0 0x5
+#define NVM_CFG1_GLOB_MF_MODE_BD 0x6
+#define NVM_CFG1_GLOB_MF_MODE_UFP 0x7
+
+ u32 engineering_change[3];
+ u32 manufacturing_id;
+ u32 serial_number[4];
+ u32 pcie_cfg;
+ u32 mgmt_traffic;
+
+ u32 core_cfg;
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK 0x000000ff
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET 0
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_2X40G 0x0
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X50G 0x1
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_1X100G 0x2
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X10G_F 0x3
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X10G_E 0x4
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_BB_4X20G 0x5
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X40G 0xb
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X25G 0xc
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_1X25G 0xd
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_4X25G 0xe
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_2X10G 0xf
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_2X50G_R1 0x11
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_4X50G_R1 0x12
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_1X100G_R2 0x13
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_2X100G_R2 0x14
+#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_AHP_1X100G_R4 0x15
+
+ u32 e_lane_cfg1;
+ u32 e_lane_cfg2;
+ u32 f_lane_cfg1;
+ u32 f_lane_cfg2;
+ u32 mps10_preemphasis;
+ u32 mps10_driver_current;
+ u32 mps25_preemphasis;
+ u32 mps25_driver_current;
+ u32 pci_id;
+ u32 pci_subsys_id;
+ u32 bar;
+ u32 mps10_txfir_main;
+ u32 mps10_txfir_post;
+ u32 mps25_txfir_main;
+ u32 mps25_txfir_post;
+ u32 manufacture_ver;
+ u32 manufacture_time;
+ u32 led_global_settings;
+ u32 generic_cont1;
+
+ u32 mbi_version;
+#define NVM_CFG1_GLOB_MBI_VERSION_0_MASK 0x000000ff
+#define NVM_CFG1_GLOB_MBI_VERSION_0_OFFSET 0
+#define NVM_CFG1_GLOB_MBI_VERSION_1_MASK 0x0000ff00
+#define NVM_CFG1_GLOB_MBI_VERSION_1_OFFSET 8
+#define NVM_CFG1_GLOB_MBI_VERSION_2_MASK 0x00ff0000
+#define NVM_CFG1_GLOB_MBI_VERSION_2_OFFSET 16
+
+ u32 mbi_date;
+ u32 misc_sig;
+
+ u32 device_capabilities;
+#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET 0x1
+#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_FCOE 0x2
+#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ISCSI 0x4
+#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ROCE 0x8
+#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_IWARP 0x10
+
+ u32 power_dissipated;
+ u32 power_consumed;
+ u32 efi_version;
+ u32 multi_network_modes_capability;
+ u32 nvm_cfg_version;
+ u32 nvm_cfg_new_option_seq;
+ u32 nvm_cfg_removed_option_seq;
+ u32 nvm_cfg_updated_value_seq;
+ u32 extended_serial_number[8];
+ u32 option_kit_pn[8];
+ u32 spare_pn[8];
+ u32 mps25_active_txfir_pre;
+ u32 mps25_active_txfir_main;
+ u32 mps25_active_txfir_post;
+ u32 features;
+ u32 tx_rx_eq_25g_hlpc;
+ u32 tx_rx_eq_25g_llpc;
+ u32 tx_rx_eq_25g_ac;
+ u32 tx_rx_eq_10g_pc;
+ u32 tx_rx_eq_10g_ac;
+ u32 tx_rx_eq_1g;
+ u32 tx_rx_eq_25g_bt;
+ u32 tx_rx_eq_10g_bt;
+ u32 generic_cont4;
+ u32 preboot_debug_mode_std;
+ u32 preboot_debug_mode_ext;
+ u32 ext_phy_cfg1;
+ u32 clocks;
+ u32 pre2_generic_cont_1;
+ u32 pre2_generic_cont_2;
+ u32 pre2_generic_cont_3;
+ u32 tx_rx_eq_50g_hlpc;
+ u32 tx_rx_eq_50g_mlpc;
+ u32 tx_rx_eq_50g_llpc;
+ u32 tx_rx_eq_50g_ac;
+ u32 trace_modules;
+ u32 pcie_class_code_fcoe;
+ u32 pcie_class_code_iscsi;
+ u32 no_provisioned_mac;
+ u32 lowest_mbi_version;
+ u32 generic_cont5;
+ u32 pre2_generic_cont_4;
+ u32 reserved[40];
+};
+
+struct nvm_cfg1_path {
+ u32 reserved[1];
+};
+
+struct nvm_cfg1_port {
+ u32 rel_to_opt123;
+ u32 rel_to_opt124;
+
+ u32 generic_cont0;
+#define NVM_CFG1_PORT_DCBX_MODE_MASK 0x000f0000
+#define NVM_CFG1_PORT_DCBX_MODE_OFFSET 16
+#define NVM_CFG1_PORT_DCBX_MODE_DISABLED 0x0
+#define NVM_CFG1_PORT_DCBX_MODE_IEEE 0x1
+#define NVM_CFG1_PORT_DCBX_MODE_CEE 0x2
+#define NVM_CFG1_PORT_DCBX_MODE_DYNAMIC 0x3
+#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_MASK 0x00f00000
+#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_OFFSET 20
+#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ETHERNET 0x1
+#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_FCOE 0x2
+#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ISCSI 0x4
+
+ u32 pcie_cfg;
+ u32 features;
+
+ u32 speed_cap_mask;
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK 0x0000ffff
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_OFFSET 0
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G 0x1
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G 0x2
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G 0x4
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G 0x8
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G 0x10
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G 0x20
+#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G 0x40
+
+ u32 link_settings;
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_MASK 0x0000000f
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET 0
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG 0x0
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_1G 0x1
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_10G 0x2
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_20G 0x3
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_25G 0x4
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_40G 0x5
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_50G 0x6
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_BB_100G 0x7
+#define NVM_CFG1_PORT_DRV_LINK_SPEED_SMARTLINQ 0x8
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK 0x00000070
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET 4
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG 0x1
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX 0x2
+#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX 0x4
+#define NVM_CFG1_PORT_FEC_FORCE_MODE_MASK 0x000e0000
+#define NVM_CFG1_PORT_FEC_FORCE_MODE_OFFSET 17
+#define NVM_CFG1_PORT_FEC_FORCE_MODE_NONE 0x0
+#define NVM_CFG1_PORT_FEC_FORCE_MODE_FIRECODE 0x1
+#define NVM_CFG1_PORT_FEC_FORCE_MODE_RS 0x2
+#define NVM_CFG1_PORT_FEC_FORCE_MODE_AUTO 0x7
+
+ u32 phy_cfg;
+ u32 mgmt_traffic;
+
+ u32 ext_phy;
+ /* EEE power saving mode */
+#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_MASK 0x00ff0000
+#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_OFFSET 16
+#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_DISABLED 0x0
+#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_BALANCED 0x1
+#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_AGGRESSIVE 0x2
+#define NVM_CFG1_PORT_EEE_POWER_SAVING_MODE_LOW_LATENCY 0x3
+
+ u32 mba_cfg1;
+ u32 mba_cfg2;
+ u32 vf_cfg;
+ struct nvm_cfg_mac_address lldp_mac_address;
+ u32 led_port_settings;
+ u32 transceiver_00;
+ u32 device_ids;
+
+ u32 board_cfg;
+#define NVM_CFG1_PORT_PORT_TYPE_MASK 0x000000ff
+#define NVM_CFG1_PORT_PORT_TYPE_OFFSET 0
+#define NVM_CFG1_PORT_PORT_TYPE_UNDEFINED 0x0
+#define NVM_CFG1_PORT_PORT_TYPE_MODULE 0x1
+#define NVM_CFG1_PORT_PORT_TYPE_BACKPLANE 0x2
+#define NVM_CFG1_PORT_PORT_TYPE_EXT_PHY 0x3
+#define NVM_CFG1_PORT_PORT_TYPE_MODULE_SLAVE 0x4
+
+ u32 mnm_10g_cap;
+ u32 mnm_10g_ctrl;
+ u32 mnm_10g_misc;
+ u32 mnm_25g_cap;
+ u32 mnm_25g_ctrl;
+ u32 mnm_25g_misc;
+ u32 mnm_40g_cap;
+ u32 mnm_40g_ctrl;
+ u32 mnm_40g_misc;
+ u32 mnm_50g_cap;
+ u32 mnm_50g_ctrl;
+ u32 mnm_50g_misc;
+ u32 mnm_100g_cap;
+ u32 mnm_100g_ctrl;
+ u32 mnm_100g_misc;
+
+ u32 temperature;
+ u32 ext_phy_cfg1;
+
+ u32 extended_speed;
+#define NVM_CFG1_PORT_EXTENDED_SPEED_MASK 0x0000ffff
+#define NVM_CFG1_PORT_EXTENDED_SPEED_OFFSET 0
+#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_AN 0x1
+#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_1G 0x2
+#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_10G 0x4
+#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_20G 0x8
+#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_25G 0x10
+#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_40G 0x20
+#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_50G_R 0x40
+#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_50G_R2 0x80
+#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_R2 0x100
+#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_R4 0x200
+#define NVM_CFG1_PORT_EXTENDED_SPEED_EXTND_SPD_100G_P4 0x400
+#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_MASK 0xffff0000
+#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_OFFSET 16
+#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_RESERVED 0x1
+#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_1G 0x2
+#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_10G 0x4
+#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_20G 0x8
+#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_25G 0x10
+#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_40G 0x20
+#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_50G_R 0x40
+#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_50G_R2 0x80
+#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_R2 0x100
+#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_R4 0x200
+#define NVM_CFG1_PORT_EXTENDED_SPEED_CAP_EXTND_SPD_100G_P4 0x400
+
+ u32 extended_fec_mode;
+ u32 port_generic_cont_01;
+ u32 port_generic_cont_02;
+ u32 phy_temp_monitor;
+ u32 reserved[109];
+};
+
+struct nvm_cfg1_func {
+ struct nvm_cfg_mac_address mac_address;
+ u32 rsrv1;
+ u32 rsrv2;
+ u32 device_id;
+ u32 cmn_cfg;
+ u32 pci_cfg;
+ struct nvm_cfg_mac_address fcoe_node_wwn_mac_addr;
+ struct nvm_cfg_mac_address fcoe_port_wwn_mac_addr;
+ u32 preboot_generic_cfg;
+ u32 features;
+ u32 mf_mode_feature;
+ u32 reserved[6];
+};
+
+struct nvm_cfg1 {
+ struct nvm_cfg1_glob glob;
+ struct nvm_cfg1_path path[MCP_GLOB_PATH_MAX];
+ struct nvm_cfg1_port port[MCP_GLOB_PORT_MAX];
+ struct nvm_cfg1_func func[MCP_GLOB_FUNC_MAX];
+};
+
+struct board_info {
+ u16 vendor_id;
+ u16 eth_did_suffix;
+ u16 sub_vendor_id;
+ u16 sub_device_id;
+ char *board_name;
+ char *friendly_name;
+};
+
+struct trace_module_info {
+ char *module_name;
+};
+
+#define NUM_TRACE_MODULES 25
+
+enum nvm_cfg_sections {
+ NVM_CFG_SECTION_NVM_CFG1,
+ NVM_CFG_SECTION_MAX
+};
+
+struct nvm_cfg {
+ u32 num_sections;
+ u32 sections_offset[NVM_CFG_SECTION_MAX];
+ struct nvm_cfg1 cfg1;
+};
+
+#define PORT_0 0
+#define PORT_1 1
+#define PORT_2 2
+#define PORT_3 3
+
+extern struct spad_layout g_spad;
+struct spad_layout {
+ struct nvm_cfg nvm_cfg;
+ struct mcp_public_data public_data;
+};
+
+#define MCP_SPAD_SIZE 0x00028000 /* 160 KB */
+
+#define SPAD_OFFSET(addr) (((u32)(addr) - (u32)CPU_SPAD_BASE))
+
+#define TO_OFFSIZE(_offset, _size) \
+ ((u32)((((u32)(_offset) >> 2) << OFFSIZE_OFFSET_OFFSET) | \
+ (((u32)(_size) >> 2) << OFFSIZE_SIZE_OFFSET)))
+
+enum spad_sections {
+ SPAD_SECTION_TRACE,
+ SPAD_SECTION_NVM_CFG,
+ SPAD_SECTION_PUBLIC,
+ SPAD_SECTION_PRIVATE,
+ SPAD_SECTION_MAX
+};
+
+#define STRUCT_OFFSET(f) (STATIC_INIT_BASE + \
+ __builtin_offsetof(struct static_init, f))
+
+/* This section is located at a fixed location in the beginning of the
+ * scratchpad, to ensure that the MCP trace is not run over during MFW upgrade.
+ * All the rest of data has a floating location which differs from version to
+ * version, and is pointed by the mcp_meta_data below.
+ * Moreover, the spad_layout section is part of the MFW firmware, and is loaded
+ * with it from nvram in order to clear this portion.
+ */
+struct static_init {
+ u32 num_sections;
+ offsize_t sections[SPAD_SECTION_MAX];
+#define SECTION(_sec_) (*((offsize_t *)(STRUCT_OFFSET(sections[_sec_]))))
+
+ u32 tim_hash[8];
+#define PRESERVED_TIM_HASH ((u8 *)(STRUCT_OFFSET(tim_hash)))
+ u32 tpu_hash[8];
+#define PRESERVED_TPU_HASH ((u8 *)(STRUCT_OFFSET(tpu_hash)))
+ u32 secure_pcie_fw_ver;
+#define SECURE_PCIE_FW_VER (*((u32 *)(STRUCT_OFFSET(secure_pcie_fw_ver))))
+ u32 secure_running_mfw;
+#define SECURE_RUNNING_MFW (*((u32 *)(STRUCT_OFFSET(secure_running_mfw))))
+ struct mcp_trace trace;
+};
+
+#define CRC_MAGIC_VALUE 0xDEBB20E3
+#define CRC32_POLYNOMIAL 0xEDB88320
+#define _KB(x) ((x) * 1024)
+#define _MB(x) (_KB(x) * 1024)
+#define NVM_CRC_SIZE (sizeof(u32))
+enum nvm_sw_arbitrator {
+ NVM_SW_ARB_HOST,
+ NVM_SW_ARB_MCP,
+ NVM_SW_ARB_UART,
+ NVM_SW_ARB_RESERVED
+};
+
+struct legacy_bootstrap_region {
+ u32 magic_value;
+#define NVM_MAGIC_VALUE 0x669955aa
+ u32 sram_start_addr;
+ u32 code_len;
+ u32 code_start_addr;
+ u32 crc;
+};
+
+struct nvm_code_entry {
+ u32 image_type;
+ u32 nvm_start_addr;
+ u32 len;
+ u32 sram_start_addr;
+ u32 sram_run_addr;
+};
+
+enum nvm_image_type {
+ NVM_TYPE_TIM1 = 0x01,
+ NVM_TYPE_TIM2 = 0x02,
+ NVM_TYPE_MIM1 = 0x03,
+ NVM_TYPE_MIM2 = 0x04,
+ NVM_TYPE_MBA = 0x05,
+ NVM_TYPE_MODULES_PN = 0x06,
+ NVM_TYPE_VPD = 0x07,
+ NVM_TYPE_MFW_TRACE1 = 0x08,
+ NVM_TYPE_MFW_TRACE2 = 0x09,
+ NVM_TYPE_NVM_CFG1 = 0x0a,
+ NVM_TYPE_L2B = 0x0b,
+ NVM_TYPE_DIR1 = 0x0c,
+ NVM_TYPE_EAGLE_FW1 = 0x0d,
+ NVM_TYPE_FALCON_FW1 = 0x0e,
+ NVM_TYPE_PCIE_FW1 = 0x0f,
+ NVM_TYPE_HW_SET = 0x10,
+ NVM_TYPE_LIM = 0x11,
+ NVM_TYPE_AVS_FW1 = 0x12,
+ NVM_TYPE_DIR2 = 0x13,
+ NVM_TYPE_CCM = 0x14,
+ NVM_TYPE_EAGLE_FW2 = 0x15,
+ NVM_TYPE_FALCON_FW2 = 0x16,
+ NVM_TYPE_PCIE_FW2 = 0x17,
+ NVM_TYPE_AVS_FW2 = 0x18,
+ NVM_TYPE_INIT_HW = 0x19,
+ NVM_TYPE_DEFAULT_CFG = 0x1a,
+ NVM_TYPE_MDUMP = 0x1b,
+ NVM_TYPE_NVM_META = 0x1c,
+ NVM_TYPE_ISCSI_CFG = 0x1d,
+ NVM_TYPE_FCOE_CFG = 0x1f,
+ NVM_TYPE_ETH_PHY_FW1 = 0x20,
+ NVM_TYPE_ETH_PHY_FW2 = 0x21,
+ NVM_TYPE_BDN = 0x22,
+ NVM_TYPE_8485X_PHY_FW = 0x23,
+ NVM_TYPE_PUB_KEY = 0x24,
+ NVM_TYPE_RECOVERY = 0x25,
+ NVM_TYPE_PLDM = 0x26,
+ NVM_TYPE_UPK1 = 0x27,
+ NVM_TYPE_UPK2 = 0x28,
+ NVM_TYPE_MASTER_KC = 0x29,
+ NVM_TYPE_BACKUP_KC = 0x2a,
+ NVM_TYPE_HW_DUMP = 0x2b,
+ NVM_TYPE_HW_DUMP_OUT = 0x2c,
+ NVM_TYPE_BIN_NVM_META = 0x30,
+ NVM_TYPE_ROM_TEST = 0xf0,
+ NVM_TYPE_88X33X0_PHY_FW = 0x31,
+ NVM_TYPE_88X33X0_PHY_SLAVE_FW = 0x32,
+ NVM_TYPE_IDLE_CHK = 0x33,
+ NVM_TYPE_MAX,
+};
+
+#define MAX_NVM_DIR_ENTRIES 100
+
+struct nvm_dir_meta {
+ u32 dir_id;
+ u32 nvm_dir_addr;
+ u32 num_images;
+ u32 next_mfw_to_run;
+};
+
+struct nvm_dir {
+ s32 seq;
+#define NVM_DIR_NEXT_MFW_MASK 0x00000001
+#define NVM_DIR_SEQ_MASK 0xfffffffe
+#define NVM_DIR_NEXT_MFW(seq) ((seq) & NVM_DIR_NEXT_MFW_MASK)
+#define NVM_DIR_UPDATE_SEQ(_seq, swap_mfw)\
+ ({ \
+ _seq = (((_seq + 2) & \
+ NVM_DIR_SEQ_MASK) | \
+ (NVM_DIR_NEXT_MFW(_seq ^ (swap_mfw))));\
+ })
+
+#define IS_DIR_SEQ_VALID(seq) (((seq) & NVM_DIR_SEQ_MASK) != \
+ NVM_DIR_SEQ_MASK)
+
+ u32 num_images;
+ u32 rsrv;
+ struct nvm_code_entry code[1]; /* Up to MAX_NVM_DIR_ENTRIES */
+};
+
+#define NVM_DIR_SIZE(_num_images) (sizeof(struct nvm_dir) + \
+ ((_num_images) - 1) *\
+ sizeof(struct nvm_code_entry) +\
+ NVM_CRC_SIZE)
+
+struct nvm_vpd_image {
+ u32 format_revision;
+#define VPD_IMAGE_VERSION 1
+
+ u8 vpd_data[1];
+};
+
+#define DIR_ID_1 (0)
+#define DIR_ID_2 (1)
+#define MAX_DIR_IDS (2)
+
+#define MFW_BUNDLE_1 (0)
+#define MFW_BUNDLE_2 (1)
+#define MAX_MFW_BUNDLES (2)
+
+#define FLASH_PAGE_SIZE 0x1000
+#define NVM_DIR_MAX_SIZE (FLASH_PAGE_SIZE)
+#define LEGACY_ASIC_MIM_MAX_SIZE (_KB(1200))
+
+#define FPGA_MIM_MAX_SIZE (0x40000)
+
+#define LIM_MAX_SIZE ((2 * FLASH_PAGE_SIZE) - \
+ sizeof(struct legacy_bootstrap_region) \
+ - NVM_RSV_SIZE)
+#define LIM_OFFSET (NVM_OFFSET(lim_image))
+#define NVM_RSV_SIZE (44)
+#define GET_MIM_MAX_SIZE(is_asic, is_e4) (LEGACY_ASIC_MIM_MAX_SIZE)
+#define GET_MIM_OFFSET(idx, is_asic, is_e4) (NVM_OFFSET(dir[MAX_MFW_BUNDLES])\
+ + (((idx) == NVM_TYPE_MIM2) ? \
+ GET_MIM_MAX_SIZE(is_asic, is_e4)\
+ : 0))
+#define GET_NVM_FIXED_AREA_SIZE(is_asic, is_e4) (sizeof(struct nvm_image) + \
+ GET_MIM_MAX_SIZE(is_asic,\
+ is_e4) * 2)
+
+union nvm_dir_union {
+ struct nvm_dir dir;
+ u8 page[FLASH_PAGE_SIZE];
+};
+
+struct nvm_image {
+ struct legacy_bootstrap_region bootstrap;
+ u8 rsrv[NVM_RSV_SIZE];
+ u8 lim_image[LIM_MAX_SIZE];
+ union nvm_dir_union dir[MAX_MFW_BUNDLES];
+};
+
+#define NVM_OFFSET(f) ((u32_t)((int_ptr_t)(&(((struct nvm_image *)0)->(f)))))
+
+struct hw_set_info {
+ u32 reg_type;
+#define GRC_REG_TYPE 1
+#define PHY_REG_TYPE 2
+#define PCI_REG_TYPE 4
+
+ u32 bank_num;
+ u32 pf_num;
+ u32 operation;
+#define READ_OP 1
+#define WRITE_OP 2
+#define RMW_SET_OP 3
+#define RMW_CLR_OP 4
+
+ u32 reg_addr;
+ u32 reg_data;
+
+ u32 reset_type;
+#define POR_RESET_TYPE BIT(0)
+#define HARD_RESET_TYPE BIT(1)
+#define CORE_RESET_TYPE BIT(2)
+#define MCP_RESET_TYPE BIT(3)
+#define PERSET_ASSERT BIT(4)
+#define PERSET_DEASSERT BIT(5)
+};
+
+struct hw_set_image {
+ u32 format_version;
+#define HW_SET_IMAGE_VERSION 1
+ u32 no_hw_sets;
+ struct hw_set_info hw_sets[1];
+};
+
+#define MAX_SUPPORTED_NVM_OPTIONS 1000
+
+#define NVM_META_BIN_OPTION_OFFSET_MASK 0x0000ffff
+#define NVM_META_BIN_OPTION_OFFSET_SHIFT 0
+#define NVM_META_BIN_OPTION_LEN_MASK 0x00ff0000
+#define NVM_META_BIN_OPTION_LEN_OFFSET 16
+#define NVM_META_BIN_OPTION_ENTITY_MASK 0x03000000
+#define NVM_META_BIN_OPTION_ENTITY_SHIFT 24
+#define NVM_META_BIN_OPTION_ENTITY_GLOB 0
+#define NVM_META_BIN_OPTION_ENTITY_PORT 1
+#define NVM_META_BIN_OPTION_ENTITY_FUNC 2
+#define NVM_META_BIN_OPTION_CONFIG_TYPE_MASK 0x0c000000
+#define NVM_META_BIN_OPTION_CONFIG_TYPE_SHIFT 26
+#define NVM_META_BIN_OPTION_CONFIG_TYPE_USER 0
+#define NVM_META_BIN_OPTION_CONFIG_TYPE_FIXED 1
+#define NVM_META_BIN_OPTION_CONFIG_TYPE_FORCED 2
+
+struct nvm_meta_bin_t {
+ u32 magic;
+#define NVM_META_BIN_MAGIC 0x669955bb
+ u32 version;
+#define NVM_META_BIN_VERSION 1
+ u32 num_options;
+ u32 options[0];
+};
+#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ooo.c b/drivers/net/ethernet/qlogic/qed/qed_ooo.c
index b8c5641b29a8..5d725f59db24 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ooo.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ooo.c
@@ -26,12 +26,12 @@ static struct qed_ooo_archipelago
u32 idx = (cid & 0xffff) - p_ooo_info->cid_base;
struct qed_ooo_archipelago *p_archipelago;
- if (idx >= p_ooo_info->max_num_archipelagos)
+ if (unlikely(idx >= p_ooo_info->max_num_archipelagos))
return NULL;
p_archipelago = &p_ooo_info->p_archipelagos_mem[idx];
- if (list_empty(&p_archipelago->isles_list))
+ if (unlikely(list_empty(&p_archipelago->isles_list)))
return NULL;
return p_archipelago;
@@ -46,7 +46,7 @@ static struct qed_ooo_isle *qed_ooo_seek_isle(struct qed_hwfn *p_hwfn,
u8 the_num_of_isle = 1;
p_archipelago = qed_ooo_seek_archipelago(p_hwfn, p_ooo_info, cid);
- if (!p_archipelago) {
+ if (unlikely(!p_archipelago)) {
DP_NOTICE(p_hwfn,
"Connection %d is not found in OOO list\n", cid);
return NULL;
@@ -362,7 +362,7 @@ void qed_ooo_add_new_isle(struct qed_hwfn *p_hwfn,
if (ooo_isle > 1) {
p_prev_isle = qed_ooo_seek_isle(p_hwfn,
p_ooo_info, cid, ooo_isle - 1);
- if (!p_prev_isle) {
+ if (unlikely(!p_prev_isle)) {
DP_NOTICE(p_hwfn,
"Isle %d is not found(cid %d)\n",
ooo_isle - 1, cid);
@@ -370,7 +370,7 @@ void qed_ooo_add_new_isle(struct qed_hwfn *p_hwfn,
}
}
p_archipelago = qed_ooo_seek_archipelago(p_hwfn, p_ooo_info, cid);
- if (!p_archipelago && (ooo_isle != 1)) {
+ if (unlikely(!p_archipelago && ooo_isle != 1)) {
DP_NOTICE(p_hwfn,
"Connection %d is not found in OOO list\n", cid);
return;
@@ -381,7 +381,7 @@ void qed_ooo_add_new_isle(struct qed_hwfn *p_hwfn,
struct qed_ooo_isle, list_entry);
list_del(&p_isle->list_entry);
- if (!list_empty(&p_isle->buffers_list)) {
+ if (unlikely(!list_empty(&p_isle->buffers_list))) {
DP_NOTICE(p_hwfn, "Free isle is not empty\n");
INIT_LIST_HEAD(&p_isle->buffers_list);
}
@@ -418,13 +418,13 @@ void qed_ooo_add_new_buffer(struct qed_hwfn *p_hwfn,
struct qed_ooo_isle *p_isle = NULL;
p_isle = qed_ooo_seek_isle(p_hwfn, p_ooo_info, cid, ooo_isle);
- if (!p_isle) {
+ if (unlikely(!p_isle)) {
DP_NOTICE(p_hwfn,
"Isle %d is not found(cid %d)\n", ooo_isle, cid);
return;
}
- if (buffer_side == QED_OOO_LEFT_BUF)
+ if (unlikely(buffer_side == QED_OOO_LEFT_BUF))
list_add(&p_buffer->list_entry, &p_isle->buffers_list);
else
list_add_tail(&p_buffer->list_entry, &p_isle->buffers_list);
@@ -438,7 +438,7 @@ void qed_ooo_join_isles(struct qed_hwfn *p_hwfn,
p_right_isle = qed_ooo_seek_isle(p_hwfn, p_ooo_info, cid,
left_isle + 1);
- if (!p_right_isle) {
+ if (unlikely(!p_right_isle)) {
DP_NOTICE(p_hwfn,
"Right isle %d is not found(cid %d)\n",
left_isle + 1, cid);
@@ -450,7 +450,7 @@ void qed_ooo_join_isles(struct qed_hwfn *p_hwfn,
if (left_isle) {
p_left_isle = qed_ooo_seek_isle(p_hwfn, p_ooo_info, cid,
left_isle);
- if (!p_left_isle) {
+ if (unlikely(!p_left_isle)) {
DP_NOTICE(p_hwfn,
"Left isle %d is not found(cid %d)\n",
left_isle, cid);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ptp.c b/drivers/net/ethernet/qlogic/qed/qed_ptp.c
index 2c62d732e5c2..295ce435a1a4 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ptp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ptp.c
@@ -63,12 +63,12 @@ static int qed_ptp_res_lock(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
DP_INFO(p_hwfn, "PF doesn't have lock ownership\n");
return -EBUSY;
- } else if (!rc && !params.b_granted) {
+ } else if (!params.b_granted) {
DP_INFO(p_hwfn, "Failed to acquire ptp resource lock\n");
return -EBUSY;
}
- return rc;
+ return 0;
}
static int qed_ptp_res_unlock(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.c b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
index 4f4b79250a2b..23b668de4640 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.c
@@ -19,9 +19,11 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/string.h>
+#include <net/addrconf.h>
#include "qed.h"
#include "qed_cxt.h"
#include "qed_hsi.h"
+#include "qed_iro_hsi.h"
#include "qed_hw.h"
#include "qed_init_ops.h"
#include "qed_int.h"
@@ -33,7 +35,6 @@
#include "qed_roce.h"
#include "qed_sp.h"
-
int qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn,
struct qed_bmap *bmap, u32 max_count, char *name)
{
@@ -410,18 +411,6 @@ static void qed_rdma_free(struct qed_hwfn *p_hwfn)
qed_rdma_resc_free(p_hwfn);
}
-static void qed_rdma_get_guid(struct qed_hwfn *p_hwfn, u8 *guid)
-{
- guid[0] = p_hwfn->hw_info.hw_mac_addr[0] ^ 2;
- guid[1] = p_hwfn->hw_info.hw_mac_addr[1];
- guid[2] = p_hwfn->hw_info.hw_mac_addr[2];
- guid[3] = 0xff;
- guid[4] = 0xfe;
- guid[5] = p_hwfn->hw_info.hw_mac_addr[3];
- guid[6] = p_hwfn->hw_info.hw_mac_addr[4];
- guid[7] = p_hwfn->hw_info.hw_mac_addr[5];
-}
-
static void qed_rdma_init_events(struct qed_hwfn *p_hwfn,
struct qed_rdma_start_in_params *params)
{
@@ -449,7 +438,9 @@ static void qed_rdma_init_devinfo(struct qed_hwfn *p_hwfn,
dev->fw_ver = (FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) |
(FW_REVISION_VERSION << 8) | (FW_ENGINEERING_VERSION);
- qed_rdma_get_guid(p_hwfn, (u8 *)&dev->sys_image_guid);
+ addrconf_addr_eui48((u8 *)&dev->sys_image_guid,
+ p_hwfn->hw_info.hw_mac_addr);
+
dev->node_guid = dev->sys_image_guid;
dev->max_sge = min_t(u32, RDMA_MAX_SGE_PER_SQ_WQE,
@@ -865,8 +856,8 @@ static void qed_rdma_cnq_prod_update(void *rdma_cxt, u8 qz_offset, u16 prod)
}
qz_num = p_hwfn->p_rdma_info->queue_zone_base + qz_offset;
- addr = GTT_BAR0_MAP_REG_USDM_RAM +
- USTORM_COMMON_QUEUE_CONS_OFFSET(qz_num);
+ addr = GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_USDM_RAM,
+ USTORM_COMMON_QUEUE_CONS, qz_num);
REG_WR16(p_hwfn, addr, prod);
@@ -1903,7 +1894,6 @@ void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
val, p_hwfn->dcbx_no_edpm, p_hwfn->db_bar_no_edpm);
}
-
void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
p_hwfn->db_bar_no_edpm = true;
@@ -1966,7 +1956,7 @@ static void qed_rdma_remove_user(void *rdma_cxt, u16 dpi)
static int qed_roce_ll2_set_mac_filter(struct qed_dev *cdev,
u8 *old_mac_address,
- u8 *new_mac_address)
+ const u8 *new_mac_address)
{
int rc = 0;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_rdma.h b/drivers/net/ethernet/qlogic/qed/qed_rdma.h
index 6a1de3a25257..2753723011dd 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_rdma.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_rdma.h
@@ -168,16 +168,19 @@ static inline bool qed_rdma_is_xrc_qp(struct qed_rdma_qp *qp)
return false;
}
+
#if IS_ENABLED(CONFIG_QED_RDMA)
void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
int qed_rdma_info_alloc(struct qed_hwfn *p_hwfn);
void qed_rdma_info_free(struct qed_hwfn *p_hwfn);
#else
-static inline void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {}
+static inline void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn,
+ struct qed_ptt *p_ptt) {}
static inline void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt) {}
-static inline int qed_rdma_info_alloc(struct qed_hwfn *p_hwfn) {return -EINVAL;}
+static inline int qed_rdma_info_alloc(struct qed_hwfn *p_hwfn)
+ {return -EINVAL; }
static inline void qed_rdma_info_free(struct qed_hwfn *p_hwfn) {}
#endif
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
index da1b7fdcbda7..6f1a52e6beb2 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -126,6 +126,8 @@
0x1009c4UL
#define QM_REG_PF_EN \
0x2f2ea4UL
+#define QM_REG_RLGLBLUPPERBOUND \
+ 0x2f3c00UL
#define TCFC_REG_WEAK_ENABLE_VF \
0x2d0704UL
#define TCFC_REG_STRONG_ENABLE_PF \
@@ -576,7 +578,7 @@
#define PRS_REG_ENCAPSULATION_TYPE_EN 0x1f0730UL
#define PRS_REG_GRE_PROTOCOL 0x1f0734UL
#define PRS_REG_VXLAN_PORT 0x1f0738UL
-#define PRS_REG_OUTPUT_FORMAT_4_0_BB_K2 0x1f099cUL
+#define PRS_REG_OUTPUT_FORMAT_4_0 0x1f099cUL
#define NIG_REG_ENC_TYPE_ENABLE 0x501058UL
#define NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE (0x1 << 0)
@@ -595,8 +597,8 @@
#define DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN 0x10090cUL
#define DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN 0x100910UL
#define DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN 0x100914UL
-#define DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2_E5 0x10092cUL
-#define DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2_E5 0x100930UL
+#define DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2 0x10092cUL
+#define DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2 0x100930UL
#define NIG_REG_NGE_IP_ENABLE 0x508b28UL
#define NIG_REG_NGE_ETH_ENABLE 0x508b2cUL
@@ -606,7 +608,10 @@
#define QM_REG_WFQPFWEIGHT 0x2f4e80UL
#define QM_REG_WFQVPWEIGHT 0x2fa000UL
-
+#define QM_REG_WFQVPUPPERBOUND \
+ 0x2fb000UL
+#define QM_REG_WFQVPCRD \
+ 0x2fc000UL
#define PGLCS_REG_DBG_SELECT_K2_E5 \
0x001d14UL
#define PGLCS_REG_DBG_DWORD_ENABLE_K2_E5 \
@@ -1437,29 +1442,29 @@
0x1401140UL
#define XSEM_REG_SYNC_DBG_EMPTY \
0x1401160UL
-#define XSEM_REG_SLOW_DBG_ACTIVE_BB_K2 \
+#define XSEM_REG_SLOW_DBG_ACTIVE \
0x1401400UL
-#define XSEM_REG_SLOW_DBG_MODE_BB_K2 \
+#define XSEM_REG_SLOW_DBG_MODE \
0x1401404UL
-#define XSEM_REG_DBG_FRAME_MODE_BB_K2 \
+#define XSEM_REG_DBG_FRAME_MODE \
0x1401408UL
#define XSEM_REG_DBG_GPRE_VECT \
0x1401410UL
-#define XSEM_REG_DBG_MODE1_CFG_BB_K2 \
+#define XSEM_REG_DBG_MODE1_CFG \
0x1401420UL
#define XSEM_REG_FAST_MEMORY \
0x1440000UL
#define YSEM_REG_SYNC_DBG_EMPTY \
0x1501160UL
-#define YSEM_REG_SLOW_DBG_ACTIVE_BB_K2 \
+#define YSEM_REG_SLOW_DBG_ACTIVE \
0x1501400UL
-#define YSEM_REG_SLOW_DBG_MODE_BB_K2 \
+#define YSEM_REG_SLOW_DBG_MODE \
0x1501404UL
-#define YSEM_REG_DBG_FRAME_MODE_BB_K2 \
+#define YSEM_REG_DBG_FRAME_MODE \
0x1501408UL
#define YSEM_REG_DBG_GPRE_VECT \
0x1501410UL
-#define YSEM_REG_DBG_MODE1_CFG_BB_K2 \
+#define YSEM_REG_DBG_MODE1_CFG \
0x1501420UL
#define YSEM_REG_FAST_MEMORY \
0x1540000UL
@@ -1467,15 +1472,15 @@
0x1601140UL
#define PSEM_REG_SYNC_DBG_EMPTY \
0x1601160UL
-#define PSEM_REG_SLOW_DBG_ACTIVE_BB_K2 \
+#define PSEM_REG_SLOW_DBG_ACTIVE \
0x1601400UL
-#define PSEM_REG_SLOW_DBG_MODE_BB_K2 \
+#define PSEM_REG_SLOW_DBG_MODE \
0x1601404UL
-#define PSEM_REG_DBG_FRAME_MODE_BB_K2 \
+#define PSEM_REG_DBG_FRAME_MODE \
0x1601408UL
#define PSEM_REG_DBG_GPRE_VECT \
0x1601410UL
-#define PSEM_REG_DBG_MODE1_CFG_BB_K2 \
+#define PSEM_REG_DBG_MODE1_CFG \
0x1601420UL
#define PSEM_REG_FAST_MEMORY \
0x1640000UL
@@ -1483,15 +1488,15 @@
0x1701140UL
#define TSEM_REG_SYNC_DBG_EMPTY \
0x1701160UL
-#define TSEM_REG_SLOW_DBG_ACTIVE_BB_K2 \
+#define TSEM_REG_SLOW_DBG_ACTIVE \
0x1701400UL
-#define TSEM_REG_SLOW_DBG_MODE_BB_K2 \
+#define TSEM_REG_SLOW_DBG_MODE \
0x1701404UL
-#define TSEM_REG_DBG_FRAME_MODE_BB_K2 \
+#define TSEM_REG_DBG_FRAME_MODE \
0x1701408UL
#define TSEM_REG_DBG_GPRE_VECT \
0x1701410UL
-#define TSEM_REG_DBG_MODE1_CFG_BB_K2 \
+#define TSEM_REG_DBG_MODE1_CFG \
0x1701420UL
#define TSEM_REG_FAST_MEMORY \
0x1740000UL
@@ -1499,15 +1504,15 @@
0x1801140UL
#define MSEM_REG_SYNC_DBG_EMPTY \
0x1801160UL
-#define MSEM_REG_SLOW_DBG_ACTIVE_BB_K2 \
+#define MSEM_REG_SLOW_DBG_ACTIVE \
0x1801400UL
-#define MSEM_REG_SLOW_DBG_MODE_BB_K2 \
+#define MSEM_REG_SLOW_DBG_MODE \
0x1801404UL
-#define MSEM_REG_DBG_FRAME_MODE_BB_K2 \
+#define MSEM_REG_DBG_FRAME_MODE \
0x1801408UL
#define MSEM_REG_DBG_GPRE_VECT \
0x1801410UL
-#define MSEM_REG_DBG_MODE1_CFG_BB_K2 \
+#define MSEM_REG_DBG_MODE1_CFG \
0x1801420UL
#define MSEM_REG_FAST_MEMORY \
0x1840000UL
@@ -1517,21 +1522,21 @@
20480
#define USEM_REG_SYNC_DBG_EMPTY \
0x1901160UL
-#define USEM_REG_SLOW_DBG_ACTIVE_BB_K2 \
+#define USEM_REG_SLOW_DBG_ACTIVE \
0x1901400UL
-#define USEM_REG_SLOW_DBG_MODE_BB_K2 \
+#define USEM_REG_SLOW_DBG_MODE \
0x1901404UL
-#define USEM_REG_DBG_FRAME_MODE_BB_K2 \
+#define USEM_REG_DBG_FRAME_MODE \
0x1901408UL
#define USEM_REG_DBG_GPRE_VECT \
0x1901410UL
-#define USEM_REG_DBG_MODE1_CFG_BB_K2 \
+#define USEM_REG_DBG_MODE1_CFG \
0x1901420UL
#define USEM_REG_FAST_MEMORY \
0x1940000UL
#define SEM_FAST_REG_DBG_MODE23_SRC_DISABLE \
0x000748UL
-#define SEM_FAST_REG_DBG_MODE4_SRC_DISABLE \
+#define SEM_FAST_REG_DBG_MODSRC_DISABLE \
0x00074cUL
#define SEM_FAST_REG_DBG_MODE6_SRC_DISABLE \
0x000750UL
@@ -1561,7 +1566,7 @@
0x341500UL
#define BRB_REG_BIG_RAM_DATA_SIZE \
64
-#define SEM_FAST_REG_STALL_0_BB_K2 \
+#define SEM_FAST_REG_STALL_0 \
0x000488UL
#define SEM_FAST_REG_STALLED \
0x000494UL
@@ -1619,35 +1624,35 @@
0x008c14UL
#define NWS_REG_NWS_CMU_K2 \
0x720000UL
-#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2_E5 \
+#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_7_0_K2 \
0x000680UL
-#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2_E5 \
+#define PHY_NW_IP_REG_PHY0_TOP_TBUS_ADDR_15_8_K2 \
0x000684UL
-#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2_E5 \
+#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_7_0_K2 \
0x0006c0UL
-#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2_E5 \
+#define PHY_NW_IP_REG_PHY0_TOP_TBUS_DATA_11_8_K2 \
0x0006c4UL
-#define MS_REG_MS_CMU_K2_E5 \
+#define MS_REG_MS_CMU_K2 \
0x6a4000UL
-#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2_E5 \
+#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X130_K2 \
0x000208UL
-#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2_E5 \
+#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X131_K2 \
0x00020cUL
-#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2_E5 \
+#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X132_K2 \
0x000210UL
-#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2_E5 \
+#define PHY_SGMII_IP_REG_AHB_CMU_CSR_0_X133_K2 \
0x000214UL
-#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2_E5 \
+#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X130_K2 \
0x000208UL
-#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2_E5 \
+#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X131_K2 \
0x00020cUL
-#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2_E5 \
+#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X132_K2 \
0x000210UL
-#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2_E5 \
+#define PHY_PCIE_IP_REG_AHB_CMU_CSR_0_X133_K2 \
0x000214UL
-#define PHY_PCIE_REG_PHY0_K2_E5 \
+#define PHY_PCIE_REG_PHY0_K2 \
0x620000UL
-#define PHY_PCIE_REG_PHY1_K2_E5 \
+#define PHY_PCIE_REG_PHY1_K2 \
0x624000UL
#define NIG_REG_ROCE_DUPLICATE_TO_HOST 0x5088f0UL
#define NIG_REG_PPF_TO_ENGINE_SEL 0x508900UL
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
index cf5baa5e59bc..071b4aeaddf2 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
@@ -792,7 +792,6 @@ static int qed_roce_sp_destroy_qp_requester(struct qed_hwfn *p_hwfn,
if (rc)
goto err;
-
/* Free ORQ - only if ramrod succeeded, in case FW is still using it */
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
qp->orq_num_pages * RDMA_RING_PAGE_SIZE,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_selftest.h b/drivers/net/ethernet/qlogic/qed/qed_selftest.h
index e27dd9a4547e..7a3bd749e1e4 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_selftest.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_selftest.h
@@ -6,47 +6,47 @@
#include <linux/types.h>
/**
- * @brief qed_selftest_memory - Perform memory test
+ * qed_selftest_memory(): Perform memory test.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return int
+ * Return: Int.
*/
int qed_selftest_memory(struct qed_dev *cdev);
/**
- * @brief qed_selftest_interrupt - Perform interrupt test
+ * qed_selftest_interrupt(): Perform interrupt test.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return int
+ * Return: Int.
*/
int qed_selftest_interrupt(struct qed_dev *cdev);
/**
- * @brief qed_selftest_register - Perform register test
+ * qed_selftest_register(): Perform register test.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return int
+ * Return: Int.
*/
int qed_selftest_register(struct qed_dev *cdev);
/**
- * @brief qed_selftest_clock - Perform clock test
+ * qed_selftest_clock(): Perform clock test.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return int
+ * Return: Int.
*/
int qed_selftest_clock(struct qed_dev *cdev);
/**
- * @brief qed_selftest_nvram - Perform nvram test
+ * qed_selftest_nvram(): Perform nvram test.
*
- * @param cdev
+ * @cdev: Qed dev pointer.
*
- * @return int
+ * Return: Int.
*/
int qed_selftest_nvram(struct qed_dev *cdev);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
index 60ff3222bf55..4fb02a5579ee 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
@@ -23,31 +23,26 @@ enum spq_mode {
};
struct qed_spq_comp_cb {
- void (*function)(struct qed_hwfn *,
- void *,
- union event_ring_data *,
+ void (*function)(struct qed_hwfn *p_hwfn,
+ void *cookie,
+ union event_ring_data *data,
u8 fw_return_code);
void *cookie;
};
/**
- * @brief qed_eth_cqe_completion - handles the completion of a
- * ramrod on the cqe ring
+ * qed_eth_cqe_completion(): handles the completion of a
+ * ramrod on the cqe ring.
*
- * @param p_hwfn
- * @param cqe
+ * @p_hwfn: HW device data.
+ * @cqe: CQE.
*
- * @return int
+ * Return: Int.
*/
int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
struct eth_slow_path_rx_cqe *cqe);
-/**
- * @file
- *
- * QED Slow-hwfn queue interface
- */
-
+ /* QED Slow-hwfn queue interface */
union ramrod_data {
struct pf_start_ramrod_data pf_start;
struct pf_update_ramrod_data pf_update;
@@ -58,7 +53,7 @@ union ramrod_data {
struct tx_queue_stop_ramrod_data tx_queue_stop;
struct vport_start_ramrod_data vport_start;
struct vport_stop_ramrod_data vport_stop;
- struct rx_update_gft_filter_data rx_update_gft;
+ struct rx_update_gft_filter_ramrod_data rx_update_gft;
struct vport_update_ramrod_data vport_update;
struct core_rx_start_ramrod_data core_rx_queue_start;
struct core_rx_stop_ramrod_data core_rx_queue_stop;
@@ -207,117 +202,128 @@ struct qed_spq {
};
/**
- * @brief qed_spq_post - Posts a Slow hwfn request to FW, or lacking that
- * Pends it to the future list.
+ * qed_spq_post(): Posts a Slow hwfn request to FW, or lacking that
+ * Pends it to the future list.
*
- * @param p_hwfn
- * @param p_req
+ * @p_hwfn: HW device data.
+ * @p_ent: Ent.
+ * @fw_return_code: Return code from firmware.
*
- * @return int
+ * Return: Int.
*/
int qed_spq_post(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_ent,
u8 *fw_return_code);
/**
- * @brief qed_spq_allocate - Alloocates & initializes the SPQ and EQ.
+ * qed_spq_alloc(): Alloocates & initializes the SPQ and EQ.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @return int
+ * Return: Int.
*/
int qed_spq_alloc(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_spq_setup - Reset the SPQ to its start state.
+ * qed_spq_setup(): Reset the SPQ to its start state.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
+ *
+ * Return: Void.
*/
void qed_spq_setup(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_spq_deallocate - Deallocates the given SPQ struct.
+ * qed_spq_free(): Deallocates the given SPQ struct.
+ *
+ * @p_hwfn: HW device data.
*
- * @param p_hwfn
+ * Return: Void.
*/
void qed_spq_free(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_spq_get_entry - Obtain an entrry from the spq
- * free pool list.
- *
- *
+ * qed_spq_get_entry(): Obtain an entrry from the spq
+ * free pool list.
*
- * @param p_hwfn
- * @param pp_ent
+ * @p_hwfn: HW device data.
+ * @pp_ent: PP ENT.
*
- * @return int
+ * Return: Int.
*/
int
qed_spq_get_entry(struct qed_hwfn *p_hwfn,
struct qed_spq_entry **pp_ent);
/**
- * @brief qed_spq_return_entry - Return an entry to spq free
- * pool list
+ * qed_spq_return_entry(): Return an entry to spq free pool list.
*
- * @param p_hwfn
- * @param p_ent
+ * @p_hwfn: HW device data.
+ * @p_ent: P ENT.
+ *
+ * Return: Void.
*/
void qed_spq_return_entry(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_ent);
/**
- * @brief qed_eq_allocate - Allocates & initializes an EQ struct
+ * qed_eq_alloc(): Allocates & initializes an EQ struct.
*
- * @param p_hwfn
- * @param num_elem number of elements in the eq
+ * @p_hwfn: HW device data.
+ * @num_elem: number of elements in the eq.
*
- * @return int
+ * Return: Int.
*/
int qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem);
/**
- * @brief qed_eq_setup - Reset the EQ to its start state.
+ * qed_eq_setup(): Reset the EQ to its start state.
+ *
+ * @p_hwfn: HW device data.
*
- * @param p_hwfn
+ * Return: Void.
*/
void qed_eq_setup(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_eq_free - deallocates the given EQ struct.
+ * qed_eq_free(): deallocates the given EQ struct.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
+ *
+ * Return: Void.
*/
void qed_eq_free(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_eq_prod_update - update the FW with default EQ producer
+ * qed_eq_prod_update(): update the FW with default EQ producer.
+ *
+ * @p_hwfn: HW device data.
+ * @prod: Prod.
*
- * @param p_hwfn
- * @param prod
+ * Return: Void.
*/
void qed_eq_prod_update(struct qed_hwfn *p_hwfn,
u16 prod);
/**
- * @brief qed_eq_completion - Completes currently pending EQ elements
+ * qed_eq_completion(): Completes currently pending EQ elements.
*
- * @param p_hwfn
- * @param cookie
+ * @p_hwfn: HW device data.
+ * @cookie: Cookie.
*
- * @return int
+ * Return: Int.
*/
int qed_eq_completion(struct qed_hwfn *p_hwfn,
void *cookie);
/**
- * @brief qed_spq_completion - Completes a single event
+ * qed_spq_completion(): Completes a single event.
*
- * @param p_hwfn
- * @param echo - echo value from cookie (used for determining completion)
- * @param p_data - data from cookie (used in callback function if applicable)
+ * @p_hwfn: HW device data.
+ * @echo: echo value from cookie (used for determining completion).
+ * @fw_return_code: FW return code.
+ * @p_data: data from cookie (used in callback function if applicable).
*
- * @return int
+ * Return: Int.
*/
int qed_spq_completion(struct qed_hwfn *p_hwfn,
__le16 echo,
@@ -325,44 +331,43 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
union event_ring_data *p_data);
/**
- * @brief qed_spq_get_cid - Given p_hwfn, return cid for the hwfn's SPQ
+ * qed_spq_get_cid(): Given p_hwfn, return cid for the hwfn's SPQ.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @return u32 - SPQ CID
+ * Return: u32 - SPQ CID.
*/
u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_consq_alloc - Allocates & initializes an ConsQ
- * struct
+ * qed_consq_alloc(): Allocates & initializes an ConsQ struct.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @return int
+ * Return: Int.
*/
int qed_consq_alloc(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_consq_setup - Reset the ConsQ to its start state.
+ * qed_consq_setup(): Reset the ConsQ to its start state.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
+ *
+ * Return Void.
*/
void qed_consq_setup(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_consq_free - deallocates the given ConsQ struct.
+ * qed_consq_free(): deallocates the given ConsQ struct.
+ *
+ * @p_hwfn: HW device data.
*
- * @param p_hwfn
+ * Return Void.
*/
void qed_consq_free(struct qed_hwfn *p_hwfn);
int qed_spq_pend_post(struct qed_hwfn *p_hwfn);
-/**
- * @file
- *
- * @brief Slow-hwfn low-level commands (Ramrods) function definitions.
- */
+/* Slow-hwfn low-level commands (Ramrods) function definitions. */
#define QED_SP_EQ_COMPLETION 0x01
#define QED_SP_CQE_COMPLETION 0x02
@@ -377,12 +382,15 @@ struct qed_sp_init_data {
};
/**
- * @brief Returns a SPQ entry to the pool / frees the entry if allocated.
- * Should be called on in error flows after initializing the SPQ entry
- * and before posting it.
+ * qed_sp_destroy_request(): Returns a SPQ entry to the pool / frees the
+ * entry if allocated. Should be called on in error
+ * flows after initializing the SPQ entry
+ * and before posting it.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ent: Ent.
*
- * @param p_hwfn
- * @param p_ent
+ * Return: Void.
*/
void qed_sp_destroy_request(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_ent);
@@ -394,7 +402,14 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
struct qed_sp_init_data *p_data);
/**
- * @brief qed_sp_pf_start - PF Function Start Ramrod
+ * qed_sp_pf_start(): PF Function Start Ramrod.
+ *
+ * @p_hwfn: HW device data.
+ * @p_ptt: P_ptt.
+ * @p_tunn: P_tunn.
+ * @allow_npar_tx_switch: Allow NPAR TX Switch.
+ *
+ * Return: Int.
*
* This ramrod is sent to initialize a physical function (PF). It will
* configure the function related parameters and write its completion to the
@@ -404,12 +419,6 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn,
* allocated by the driver on host memory and its parameters are written
* to the internal RAM of the UStorm by the Function Start Ramrod.
*
- * @param p_hwfn
- * @param p_ptt
- * @param p_tunn
- * @param allow_npar_tx_switch
- *
- * @return int
*/
int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
@@ -418,47 +427,33 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
bool allow_npar_tx_switch);
/**
- * @brief qed_sp_pf_update - PF Function Update Ramrod
+ * qed_sp_pf_update(): PF Function Update Ramrod.
*
- * This ramrod updates function-related parameters. Every parameter can be
- * updated independently, according to configuration flags.
+ * @p_hwfn: HW device data.
*
- * @param p_hwfn
+ * Return: Int.
*
- * @return int
+ * This ramrod updates function-related parameters. Every parameter can be
+ * updated independently, according to configuration flags.
*/
int qed_sp_pf_update(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_sp_pf_update_stag - Update firmware of new outer tag
+ * qed_sp_pf_update_stag(): Update firmware of new outer tag.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @return int
+ * Return: Int.
*/
int qed_sp_pf_update_stag(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_sp_pf_stop - PF Function Stop Ramrod
- *
- * This ramrod is sent to close a Physical Function (PF). It is the last ramrod
- * sent and the last completion written to the PFs Event Ring. This ramrod also
- * deletes the context for the Slowhwfn connection on this PF.
- *
- * @note Not required for first packet.
- *
- * @param p_hwfn
- *
- * @return int
- */
-
-/**
- * @brief qed_sp_pf_update_ufp - PF ufp update Ramrod
+ * qed_sp_pf_update_ufp(): PF ufp update Ramrod.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @return int
+ * Return: Int.
*/
int qed_sp_pf_update_ufp(struct qed_hwfn *p_hwfn);
@@ -470,11 +465,11 @@ int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn,
enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_data);
/**
- * @brief qed_sp_heartbeat_ramrod - Send empty Ramrod
+ * qed_sp_heartbeat_ramrod(): Send empty Ramrod.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @return int
+ * Return: Int.
*/
int qed_sp_heartbeat_ramrod(struct qed_hwfn *p_hwfn);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
index b4ed54ffef9b..648176dfb871 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp_commands.c
@@ -369,8 +369,12 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
qed_chain_get_pbl_phys(&p_hwfn->p_eq->chain));
page_cnt = (u8)qed_chain_get_page_cnt(&p_hwfn->p_eq->chain);
p_ramrod->event_ring_num_pages = page_cnt;
- DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr,
+
+ /* Place consolidation queue address in ramrod */
+ DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_base_addr,
qed_chain_get_pbl_phys(&p_hwfn->p_consq->chain));
+ page_cnt = (u8)qed_chain_get_page_cnt(&p_hwfn->p_consq->chain);
+ p_ramrod->consolid_q_num_pages = page_cnt;
qed_tunn_set_pf_start_params(p_hwfn, p_tunn, &p_ramrod->tunnel_config);
@@ -401,8 +405,8 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
if (p_hwfn->cdev->p_iov_info) {
struct qed_hw_sriov_info *p_iov = p_hwfn->cdev->p_iov_info;
- p_ramrod->base_vf_id = (u8) p_iov->first_vf_in_pf;
- p_ramrod->num_vfs = (u8) p_iov->total_vfs;
+ p_ramrod->base_vf_id = (u8)p_iov->first_vf_in_pf;
+ p_ramrod->num_vfs = (u8)p_iov->total_vfs;
}
p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MINOR;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index 0bc1a0aeb56e..e0473729b161 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -20,6 +20,7 @@
#include "qed_cxt.h"
#include "qed_dev_api.h"
#include "qed_hsi.h"
+#include "qed_iro_hsi.h"
#include "qed_hw.h"
#include "qed_int.h"
#include "qed_iscsi.h"
@@ -31,8 +32,8 @@
#include "qed_rdma.h"
/***************************************************************************
-* Structures & Definitions
-***************************************************************************/
+ * Structures & Definitions
+ ***************************************************************************/
#define SPQ_HIGH_PRI_RESERVE_DEFAULT (1)
@@ -42,8 +43,8 @@
#define SPQ_BLOCK_SLEEP_MS (5)
/***************************************************************************
-* Blocking Imp. (BLOCK/EBLOCK mode)
-***************************************************************************/
+ * Blocking Imp. (BLOCK/EBLOCK mode)
+ ***************************************************************************/
static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn,
void *cookie,
union event_ring_data *data, u8 fw_return_code)
@@ -149,8 +150,8 @@ err:
}
/***************************************************************************
-* SPQ entries inner API
-***************************************************************************/
+ * SPQ entries inner API
+ ***************************************************************************/
static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
struct qed_spq_entry *p_ent)
{
@@ -184,12 +185,12 @@ static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
}
/***************************************************************************
-* HSI access
-***************************************************************************/
+ * HSI access
+ ***************************************************************************/
static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
struct qed_spq *p_spq)
{
- struct e4_core_conn_context *p_cxt;
+ struct core_conn_context *p_cxt;
struct qed_cxt_info cxt_info;
u16 physical_q;
int rc;
@@ -207,23 +208,20 @@ static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
p_cxt = cxt_info.p_cxt;
SET_FIELD(p_cxt->xstorm_ag_context.flags10,
- E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
+ XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
SET_FIELD(p_cxt->xstorm_ag_context.flags1,
- E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
+ XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
SET_FIELD(p_cxt->xstorm_ag_context.flags9,
- E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
+ XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
/* QM physical queue */
physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(physical_q);
- p_cxt->xstorm_st_context.spq_base_lo =
+ p_cxt->xstorm_st_context.spq_base_addr.lo =
DMA_LO_LE(p_spq->chain.p_phys_addr);
- p_cxt->xstorm_st_context.spq_base_hi =
+ p_cxt->xstorm_st_context.spq_base_addr.hi =
DMA_HI_LE(p_spq->chain.p_phys_addr);
-
- DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr,
- p_hwfn->p_consq->chain.p_phys_addr);
}
static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
@@ -265,8 +263,8 @@ static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
}
/***************************************************************************
-* Asynchronous events
-***************************************************************************/
+ * Asynchronous events
+ ***************************************************************************/
static int
qed_async_event_completion(struct qed_hwfn *p_hwfn,
struct event_ring_entry *p_eqe)
@@ -311,12 +309,12 @@ qed_spq_unregister_async_cb(struct qed_hwfn *p_hwfn,
}
/***************************************************************************
-* EQ API
-***************************************************************************/
+ * EQ API
+ ***************************************************************************/
void qed_eq_prod_update(struct qed_hwfn *p_hwfn, u16 prod)
{
- u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
- USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
+ u32 addr = GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_USDM_RAM,
+ USTORM_EQE_CONS, p_hwfn->rel_pf_id);
REG_WR16(p_hwfn, addr, prod);
}
@@ -433,8 +431,8 @@ void qed_eq_free(struct qed_hwfn *p_hwfn)
}
/***************************************************************************
-* CQE API - manipulate EQ functionality
-***************************************************************************/
+ * CQE API - manipulate EQ functionality
+ ***************************************************************************/
static int qed_cqe_completion(struct qed_hwfn *p_hwfn,
struct eth_slow_path_rx_cqe *cqe,
enum protocol_type protocol)
@@ -464,8 +462,8 @@ int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
}
/***************************************************************************
-* Slow hwfn Queue (spq)
-***************************************************************************/
+ * Slow hwfn Queue (spq)
+ ***************************************************************************/
void qed_spq_setup(struct qed_hwfn *p_hwfn)
{
struct qed_spq *p_spq = p_hwfn->p_spq;
@@ -548,7 +546,7 @@ int qed_spq_alloc(struct qed_hwfn *p_hwfn)
int ret;
/* SPQ struct */
- p_spq = kzalloc(sizeof(struct qed_spq), GFP_KERNEL);
+ p_spq = kzalloc(sizeof(*p_spq), GFP_KERNEL);
if (!p_spq)
return -ENOMEM;
@@ -676,7 +674,6 @@ static int qed_spq_add_entry(struct qed_hwfn *p_hwfn,
struct qed_spq *p_spq = p_hwfn->p_spq;
if (p_ent->queue == &p_spq->unlimited_pending) {
-
if (list_empty(&p_spq->free_pool)) {
list_add_tail(&p_ent->list, &p_spq->unlimited_pending);
p_spq->unlimited_pending_count++;
@@ -725,8 +722,8 @@ static int qed_spq_add_entry(struct qed_hwfn *p_hwfn,
}
/***************************************************************************
-* Accessor
-***************************************************************************/
+ * Accessor
+ ***************************************************************************/
u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn)
{
if (!p_hwfn->p_spq)
@@ -735,8 +732,8 @@ u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn)
}
/***************************************************************************
-* Posting new Ramrods
-***************************************************************************/
+ * Posting new Ramrods
+ ***************************************************************************/
static int qed_spq_post_list(struct qed_hwfn *p_hwfn,
struct list_head *head, u32 keep_reserve)
{
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index ed2b6fe5a78d..8ac38828ba45 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -11,6 +11,7 @@
#include <linux/qed/qed_iov_if.h>
#include "qed_cxt.h"
#include "qed_hsi.h"
+#include "qed_iro_hsi.h"
#include "qed_hw.h"
#include "qed_init_ops.h"
#include "qed_int.h"
@@ -19,12 +20,13 @@
#include "qed_sp.h"
#include "qed_sriov.h"
#include "qed_vf.h"
-static int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
- u8 opcode,
- __le16 echo,
- union event_ring_data *data, u8 fw_return_code);
static int qed_iov_bulletin_set_mac(struct qed_hwfn *p_hwfn, u8 *mac, int vfid);
+static u16 qed_vf_from_entity_id(__le16 entity_id)
+{
+ return le16_to_cpu(entity_id) - MAX_NUM_PFS;
+}
+
static u8 qed_vf_calculate_legacy(struct qed_vf_info *p_vf)
{
u8 legacy = 0;
@@ -169,8 +171,8 @@ static struct qed_vf_info *qed_iov_get_vf_info(struct qed_hwfn *p_hwfn,
b_enabled_only, false))
vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id];
else
- DP_ERR(p_hwfn, "qed_iov_get_vf_info: VF[%d] is not enabled\n",
- relative_vf_id);
+ DP_ERR(p_hwfn, "%s: VF[%d] is not enabled\n",
+ __func__, relative_vf_id);
return vf;
}
@@ -308,7 +310,7 @@ static int qed_iov_post_vf_bulletin(struct qed_hwfn *p_hwfn,
struct qed_dmae_params params;
struct qed_vf_info *p_vf;
- p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+ p_vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
if (!p_vf)
return -EINVAL;
@@ -420,7 +422,7 @@ static void qed_iov_setup_vfdb(struct qed_hwfn *p_hwfn)
bulletin_p = p_iov_info->bulletins_phys;
if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) {
DP_ERR(p_hwfn,
- "qed_iov_setup_vfdb called without allocating mem first\n");
+ "%s called without allocating mem first\n", __func__);
return;
}
@@ -464,7 +466,7 @@ static int qed_iov_allocate_vfdb(struct qed_hwfn *p_hwfn)
num_vfs = p_hwfn->cdev->p_iov_info->total_vfs;
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
- "qed_iov_allocate_vfdb for %d VFs\n", num_vfs);
+ "%s for %d VFs\n", __func__, num_vfs);
/* Allocate PF Mailbox buffer (per-VF) */
p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs;
@@ -500,10 +502,10 @@ static int qed_iov_allocate_vfdb(struct qed_hwfn *p_hwfn)
QED_MSG_IOV,
"PF's Requests mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys] Bulletins [%p virt 0x%llx phys]\n",
p_iov_info->mbx_msg_virt_addr,
- (u64) p_iov_info->mbx_msg_phys_addr,
+ (u64)p_iov_info->mbx_msg_phys_addr,
p_iov_info->mbx_reply_virt_addr,
- (u64) p_iov_info->mbx_reply_phys_addr,
- p_iov_info->p_bulletins, (u64) p_iov_info->bulletins_phys);
+ (u64)p_iov_info->mbx_reply_phys_addr,
+ p_iov_info->p_bulletins, (u64)p_iov_info->bulletins_phys);
return 0;
}
@@ -608,7 +610,7 @@ int qed_iov_hw_info(struct qed_hwfn *p_hwfn)
if (rc)
return rc;
- /* We want PF IOV to be synonemous with the existance of p_iov_info;
+ /* We want PF IOV to be synonemous with the existence of p_iov_info;
* In case the capability is published but there are no VFs, simply
* de-allocate the struct.
*/
@@ -714,12 +716,12 @@ static void qed_iov_vf_igu_reset(struct qed_hwfn *p_hwfn,
int i;
/* Set VF masks and configuration - pretend */
- qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
+ qed_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
qed_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0);
/* unpretend */
- qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
+ qed_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
/* iterate over all queues, clear sb consumer */
for (i = 0; i < vf->num_sbs; i++)
@@ -734,7 +736,7 @@ static void qed_iov_vf_igu_set_int(struct qed_hwfn *p_hwfn,
{
u32 igu_vf_conf;
- qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
+ qed_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
igu_vf_conf = qed_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION);
@@ -746,7 +748,7 @@ static void qed_iov_vf_igu_set_int(struct qed_hwfn *p_hwfn,
qed_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf);
/* unpretend */
- qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
+ qed_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
}
static int
@@ -807,7 +809,7 @@ static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn,
if (rc)
return rc;
- qed_fid_pretend(p_hwfn, p_ptt, (u16) vf->concrete_fid);
+ qed_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id);
STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf);
@@ -816,7 +818,7 @@ static int qed_iov_enable_vf_access(struct qed_hwfn *p_hwfn,
p_hwfn->hw_info.hw_mode);
/* unpretend */
- qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
+ qed_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
vf->state = VF_FREE;
@@ -904,7 +906,7 @@ static u8 qed_iov_alloc_vf_igu_sbs(struct qed_hwfn *p_hwfn,
p_block->igu_sb_id * sizeof(u64), 2, NULL);
}
- vf->num_sbs = (u8) num_rx_queues;
+ vf->num_sbs = (u8)num_rx_queues;
return vf->num_sbs;
}
@@ -988,7 +990,7 @@ static int qed_iov_init_hw_for_vf(struct qed_hwfn *p_hwfn,
vf = qed_iov_get_vf_info(p_hwfn, p_params->rel_vf_id, false);
if (!vf) {
- DP_ERR(p_hwfn, "qed_iov_init_hw_for_vf : vf is NULL\n");
+ DP_ERR(p_hwfn, "%s : vf is NULL\n", __func__);
return -EINVAL;
}
@@ -1092,7 +1094,7 @@ static int qed_iov_release_hw_for_vf(struct qed_hwfn *p_hwfn,
vf = qed_iov_get_vf_info(p_hwfn, rel_vf_id, true);
if (!vf) {
- DP_ERR(p_hwfn, "qed_iov_release_hw_for_vf : vf is NULL\n");
+ DP_ERR(p_hwfn, "%s : vf is NULL\n", __func__);
return -EINVAL;
}
@@ -1220,8 +1222,8 @@ static void qed_iov_send_response(struct qed_hwfn *p_hwfn,
* channel would be re-set to ready prior to that.
*/
REG_WR(p_hwfn,
- GTT_BAR0_MAP_REG_USDM_RAM +
- USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
+ GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_USDM_RAM,
+ USTORM_VF_PF_CHANNEL_READY, eng_vf_id), 1);
qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
mbx->req_virt->first_tlv.reply_address,
@@ -1545,7 +1547,7 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
memset(resp, 0, sizeof(*resp));
/* Write the PF version so that VF would know which version
- * is supported - might be later overriden. This guarantees that
+ * is supported - might be later overridden. This guarantees that
* VF could recognize legacy PF based on lack of versions in reply.
*/
pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR;
@@ -1603,7 +1605,7 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
/* fill in pfdev info */
pfdev_info->chip_num = p_hwfn->cdev->chip_num;
pfdev_info->db_size = 0;
- pfdev_info->indices_per_sb = PIS_PER_SB_E4;
+ pfdev_info->indices_per_sb = PIS_PER_SB;
pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED |
PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE;
@@ -1897,7 +1899,7 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
int sb_id;
int rc;
- vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vf->relative_vf_id, true);
+ vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vf->relative_vf_id, true);
if (!vf_info) {
DP_NOTICE(p_hwfn->cdev,
"Failed to get VF info, invalid vfid [%d]\n",
@@ -1957,7 +1959,7 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
rc = qed_sp_eth_vport_start(p_hwfn, &params);
if (rc) {
DP_ERR(p_hwfn,
- "qed_iov_vf_mbx_start_vport returned error %d\n", rc);
+ "%s returned error %d\n", __func__, rc);
status = PFVF_STATUS_FAILURE;
} else {
vf->vport_instance++;
@@ -1993,8 +1995,8 @@ static void qed_iov_vf_mbx_stop_vport(struct qed_hwfn *p_hwfn,
rc = qed_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
if (rc) {
- DP_ERR(p_hwfn, "qed_iov_vf_mbx_stop_vport returned error %d\n",
- rc);
+ DP_ERR(p_hwfn, "%s returned error %d\n",
+ __func__, rc);
status = PFVF_STATUS_FAILURE;
}
@@ -2138,10 +2140,10 @@ static void qed_iov_vf_mbx_start_rxq(struct qed_hwfn *p_hwfn,
* calculate on their own and clean the producer prior to this.
*/
if (!(vf_legacy & QED_QCID_LEGACY_VF_RX_PROD))
- REG_WR(p_hwfn,
- GTT_BAR0_MAP_REG_MSDM_RAM +
- MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid),
- 0);
+ qed_wr(p_hwfn, p_ptt, MSEM_REG_FAST_MEMORY +
+ SEM_FAST_REG_INT_RAM +
+ MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id,
+ req->rx_qid), 0);
rc = qed_eth_rxq_start_ramrod(p_hwfn, p_cid,
req->bd_max_bytes,
@@ -3030,7 +3032,7 @@ static void qed_iov_vf_mbx_vport_update(struct qed_hwfn *p_hwfn,
goto out;
}
p_rss_params = vzalloc(sizeof(*p_rss_params));
- if (p_rss_params == NULL) {
+ if (!p_rss_params) {
status = PFVF_STATUS_FAILURE;
goto out;
}
@@ -3550,6 +3552,7 @@ out:
qed_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_COALESCE_UPDATE,
sizeof(struct pfvf_def_resp_tlv), status);
}
+
static int
qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn,
struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
@@ -3557,7 +3560,7 @@ qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn,
int cnt;
u32 val;
- qed_fid_pretend(p_hwfn, p_ptt, (u16) p_vf->concrete_fid);
+ qed_fid_pretend(p_hwfn, p_ptt, (u16)p_vf->concrete_fid);
for (cnt = 0; cnt < 50; cnt++) {
val = qed_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT);
@@ -3565,7 +3568,7 @@ qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn,
break;
msleep(20);
}
- qed_fid_pretend(p_hwfn, p_ptt, (u16) p_hwfn->hw_info.concrete_fid);
+ qed_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
if (cnt == 50) {
DP_ERR(p_hwfn,
@@ -3577,48 +3580,73 @@ qed_iov_vf_flr_poll_dorq(struct qed_hwfn *p_hwfn,
return 0;
}
+#define MAX_NUM_EXT_VOQS (MAX_NUM_PORTS * NUM_OF_TCS)
+
static int
qed_iov_vf_flr_poll_pbf(struct qed_hwfn *p_hwfn,
struct qed_vf_info *p_vf, struct qed_ptt *p_ptt)
{
- u32 cons[MAX_NUM_VOQS_E4], distance[MAX_NUM_VOQS_E4];
- int i, cnt;
+ u32 prod, cons[MAX_NUM_EXT_VOQS], distance[MAX_NUM_EXT_VOQS], tmp;
+ u8 max_phys_tcs_per_port = p_hwfn->qm_info.max_phys_tcs_per_port;
+ u8 max_ports_per_engine = p_hwfn->cdev->num_ports_in_engine;
+ u32 prod_voq0_addr = PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0;
+ u32 cons_voq0_addr = PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0;
+ u8 port_id, tc, tc_id = 0, voq = 0;
+ int cnt;
- /* Read initial consumers & producers */
- for (i = 0; i < MAX_NUM_VOQS_E4; i++) {
- u32 prod;
+ memset(cons, 0, MAX_NUM_EXT_VOQS * sizeof(u32));
+ memset(distance, 0, MAX_NUM_EXT_VOQS * sizeof(u32));
- cons[i] = qed_rd(p_hwfn, p_ptt,
- PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
- i * 0x40);
- prod = qed_rd(p_hwfn, p_ptt,
- PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 +
- i * 0x40);
- distance[i] = prod - cons[i];
+ /* Read initial consumers & producers */
+ for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
+ /* "max_phys_tcs_per_port" active TCs + 1 pure LB TC */
+ for (tc = 0; tc < max_phys_tcs_per_port + 1; tc++) {
+ tc_id = (tc < max_phys_tcs_per_port) ? tc : PURE_LB_TC;
+ voq = VOQ(port_id, tc_id, max_phys_tcs_per_port);
+ cons[voq] = qed_rd(p_hwfn, p_ptt,
+ cons_voq0_addr + voq * 0x40);
+ prod = qed_rd(p_hwfn, p_ptt,
+ prod_voq0_addr + voq * 0x40);
+ distance[voq] = prod - cons[voq];
+ }
}
/* Wait for consumers to pass the producers */
- i = 0;
+ port_id = 0;
+ tc = 0;
for (cnt = 0; cnt < 50; cnt++) {
- for (; i < MAX_NUM_VOQS_E4; i++) {
- u32 tmp;
+ for (; port_id < max_ports_per_engine; port_id++) {
+ /* "max_phys_tcs_per_port" active TCs + 1 pure LB TC */
+ for (; tc < max_phys_tcs_per_port + 1; tc++) {
+ tc_id = (tc < max_phys_tcs_per_port) ?
+ tc : PURE_LB_TC;
+ voq = VOQ(port_id,
+ tc_id, max_phys_tcs_per_port);
+ tmp = qed_rd(p_hwfn, p_ptt,
+ cons_voq0_addr + voq * 0x40);
+ if (distance[voq] > tmp - cons[voq])
+ break;
+ }
- tmp = qed_rd(p_hwfn, p_ptt,
- PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
- i * 0x40);
- if (distance[i] > tmp - cons[i])
+ if (tc == max_phys_tcs_per_port + 1)
+ tc = 0;
+ else
break;
}
- if (i == MAX_NUM_VOQS_E4)
+ if (port_id == max_ports_per_engine)
break;
msleep(20);
}
if (cnt == 50) {
- DP_ERR(p_hwfn, "VF[%d] - pbf polling failed on VOQ %d\n",
- p_vf->abs_vf_id, i);
+ DP_ERR(p_hwfn, "VF[%d]: pbf poll failed on VOQ%d\n",
+ p_vf->abs_vf_id, (int)voq);
+
+ DP_ERR(p_hwfn, "VOQ %d has port_id as %d and tc_id as %d]\n",
+ (int)voq, (int)port_id, (int)tc_id);
+
return -EBUSY;
}
@@ -3680,8 +3708,8 @@ qed_iov_execute_vf_flr_cleanup(struct qed_hwfn *p_hwfn,
* doesn't do that as a part of FLR.
*/
REG_WR(p_hwfn,
- GTT_BAR0_MAP_REG_USDM_RAM +
- USTORM_VF_PF_CHANNEL_READY_OFFSET(vfid), 1);
+ GET_GTT_REG_ADDR(GTT_BAR0_MAP_REG_USDM_RAM,
+ USTORM_VF_PF_CHANNEL_READY, vfid), 1);
/* VF_STOPPED has to be set only after final cleanup
* but prior to re-enabling the VF.
@@ -3842,7 +3870,7 @@ static void qed_iov_process_mbx_req(struct qed_hwfn *p_hwfn,
struct qed_iov_vf_mbx *mbx;
struct qed_vf_info *p_vf;
- p_vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+ p_vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
if (!p_vf)
return;
@@ -3979,7 +4007,7 @@ static void qed_iov_pf_get_pending_events(struct qed_hwfn *p_hwfn, u64 *events)
static struct qed_vf_info *qed_sriov_get_vf_from_absid(struct qed_hwfn *p_hwfn,
u16 abs_vfid)
{
- u8 min = (u8) p_hwfn->cdev->p_iov_info->first_vf_in_pf;
+ u8 min = (u8)p_hwfn->cdev->p_iov_info->first_vf_in_pf;
if (!_qed_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min, false)) {
DP_VERBOSE(p_hwfn,
@@ -3989,7 +4017,7 @@ static struct qed_vf_info *qed_sriov_get_vf_from_absid(struct qed_hwfn *p_hwfn,
return NULL;
}
- return &p_hwfn->pf_iov_info->vfs_array[(u8) abs_vfid - min];
+ return &p_hwfn->pf_iov_info->vfs_array[(u8)abs_vfid - min];
}
static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn,
@@ -4013,13 +4041,13 @@ static int qed_sriov_vfpf_msg(struct qed_hwfn *p_hwfn,
return 0;
}
-static void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn,
- struct malicious_vf_eqe_data *p_data)
+void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn,
+ struct fw_err_data *p_data)
{
struct qed_vf_info *p_vf;
- p_vf = qed_sriov_get_vf_from_absid(p_hwfn, p_data->vf_id);
-
+ p_vf = qed_sriov_get_vf_from_absid(p_hwfn, qed_vf_from_entity_id
+ (p_data->entity_id));
if (!p_vf)
return;
@@ -4036,16 +4064,13 @@ static void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn,
}
}
-static int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, u8 opcode, __le16 echo,
- union event_ring_data *data, u8 fw_return_code)
+int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, u8 opcode, __le16 echo,
+ union event_ring_data *data, u8 fw_return_code)
{
switch (opcode) {
case COMMON_EVENT_VF_PF_CHANNEL:
return qed_sriov_vfpf_msg(p_hwfn, le16_to_cpu(echo),
&data->vf_pf_channel.msg_addr);
- case COMMON_EVENT_MALICIOUS_VF:
- qed_sriov_vfpf_malicious(p_hwfn, &data->malicious_vf);
- return 0;
default:
DP_INFO(p_hwfn->cdev, "Unknown sriov eqe event 0x%02x\n",
opcode);
@@ -4075,7 +4100,7 @@ static int qed_iov_copy_vf_msg(struct qed_hwfn *p_hwfn, struct qed_ptt *ptt,
struct qed_dmae_params params;
struct qed_vf_info *vf_info;
- vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+ vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
if (!vf_info)
return -EINVAL;
@@ -4176,7 +4201,7 @@ static void qed_iov_bulletin_set_forced_vlan(struct qed_hwfn *p_hwfn,
struct qed_vf_info *vf_info;
u64 feature;
- vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+ vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
if (!vf_info) {
DP_NOTICE(p_hwfn->cdev,
"Can not set forced MAC, invalid vfid [%d]\n", vfid);
@@ -4226,7 +4251,7 @@ static bool qed_iov_vf_has_vport_instance(struct qed_hwfn *p_hwfn, int vfid)
{
struct qed_vf_info *p_vf_info;
- p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+ p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
if (!p_vf_info)
return false;
@@ -4237,7 +4262,7 @@ static bool qed_iov_is_vf_stopped(struct qed_hwfn *p_hwfn, int vfid)
{
struct qed_vf_info *p_vf_info;
- p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+ p_vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
if (!p_vf_info)
return true;
@@ -4248,7 +4273,7 @@ static bool qed_iov_spoofchk_get(struct qed_hwfn *p_hwfn, int vfid)
{
struct qed_vf_info *vf_info;
- vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+ vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
if (!vf_info)
return false;
@@ -4266,7 +4291,7 @@ static int qed_iov_spoofchk_set(struct qed_hwfn *p_hwfn, int vfid, bool val)
goto out;
}
- vf = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+ vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
if (!vf)
goto out;
@@ -4345,7 +4370,8 @@ static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn,
return rc;
rl_id = abs_vp_id; /* The "rl_id" is set as the "vport_id" */
- return qed_init_global_rl(p_hwfn, p_ptt, rl_id, (u32)val);
+ return qed_init_global_rl(p_hwfn, p_ptt, rl_id, (u32)val,
+ QM_RL_TYPE_NORMAL);
}
static int
@@ -4376,7 +4402,7 @@ static int qed_iov_get_vf_min_rate(struct qed_hwfn *p_hwfn, int vfid)
struct qed_wfq_data *vf_vp_wfq;
struct qed_vf_info *vf_info;
- vf_info = qed_iov_get_vf_info(p_hwfn, (u16) vfid, true);
+ vf_info = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
if (!vf_info)
return 0;
@@ -4395,8 +4421,10 @@ static int qed_iov_get_vf_min_rate(struct qed_hwfn *p_hwfn, int vfid)
*/
void qed_schedule_iov(struct qed_hwfn *hwfn, enum qed_iov_wq_flag flag)
{
+ /* Memory barrier for setting atomic bit */
smp_mb__before_atomic();
set_bit(flag, &hwfn->iov_task_flags);
+ /* Memory barrier after setting atomic bit */
smp_mb__after_atomic();
DP_VERBOSE(hwfn, QED_MSG_IOV, "Scheduling iov task [Flag: %d]\n", flag);
queue_delayed_work(hwfn->iov_wq, &hwfn->iov_task, 0);
@@ -4407,8 +4435,8 @@ void qed_vf_start_iov_wq(struct qed_dev *cdev)
int i;
for_each_hwfn(cdev, i)
- queue_delayed_work(cdev->hwfns[i].iov_wq,
- &cdev->hwfns[i].iov_task, 0);
+ queue_delayed_work(cdev->hwfns[i].iov_wq,
+ &cdev->hwfns[i].iov_task, 0);
}
int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
@@ -4416,8 +4444,8 @@ int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
int i, j;
for_each_hwfn(cdev, i)
- if (cdev->hwfns[i].iov_wq)
- flush_workqueue(cdev->hwfns[i].iov_wq);
+ if (cdev->hwfns[i].iov_wq)
+ flush_workqueue(cdev->hwfns[i].iov_wq);
/* Mark VFs for disablement */
qed_iov_set_vfs_to_disable(cdev, true);
@@ -5010,7 +5038,7 @@ static void qed_handle_bulletin_post(struct qed_hwfn *hwfn)
}
qed_for_each_vf(hwfn, i)
- qed_iov_post_vf_bulletin(hwfn, i, ptt);
+ qed_iov_post_vf_bulletin(hwfn, i, ptt);
qed_ptt_release(hwfn, ptt);
}
@@ -5196,7 +5224,6 @@ void qed_iov_wq_stop(struct qed_dev *cdev, bool schedule_first)
cancel_delayed_work_sync(&cdev->hwfns[i].iov_task);
}
- flush_workqueue(cdev->hwfns[i].iov_wq);
destroy_workqueue(cdev->hwfns[i].iov_wq);
}
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
index eacd6457f195..f448e3dd6c8b 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h
@@ -142,7 +142,7 @@ struct qed_vf_queue {
enum vf_state {
VF_FREE = 0, /* VF ready to be acquired holds no resc */
- VF_ACQUIRED, /* VF, acquired, but not initalized */
+ VF_ACQUIRED, /* VF, acquired, but not initialized */
VF_ENABLED, /* VF, Enabled */
VF_RESET, /* VF, FLR'd, pending cleanup */
VF_STOPPED /* VF, Stopped */
@@ -250,29 +250,31 @@ extern const struct qed_iov_hv_ops qed_iov_ops_pass;
#ifdef CONFIG_QED_SRIOV
/**
- * @brief Check if given VF ID @vfid is valid
- * w.r.t. @b_enabled_only value
- * if b_enabled_only = true - only enabled VF id is valid
- * else any VF id less than max_vfs is valid
+ * qed_iov_is_valid_vfid(): Check if given VF ID @vfid is valid
+ * w.r.t. @b_enabled_only value
+ * if b_enabled_only = true - only enabled
+ * VF id is valid.
+ * else any VF id less than max_vfs is valid.
*
- * @param p_hwfn
- * @param rel_vf_id - Relative VF ID
- * @param b_enabled_only - consider only enabled VF
- * @param b_non_malicious - true iff we want to validate vf isn't malicious.
+ * @p_hwfn: HW device data.
+ * @rel_vf_id: Relative VF ID.
+ * @b_enabled_only: consider only enabled VF.
+ * @b_non_malicious: true iff we want to validate vf isn't malicious.
*
- * @return bool - true for valid VF ID
+ * Return: bool - true for valid VF ID
*/
bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
int rel_vf_id,
bool b_enabled_only, bool b_non_malicious);
/**
- * @brief - Given a VF index, return index of next [including that] active VF.
+ * qed_iov_get_next_active_vf(): Given a VF index, return index of
+ * next [including that] active VF.
*
- * @param p_hwfn
- * @param rel_vf_id
+ * @p_hwfn: HW device data.
+ * @rel_vf_id: VF ID.
*
- * @return MAX_NUM_VFS in case no further active VFs, otherwise index.
+ * Return: MAX_NUM_VFS in case no further active VFs, otherwise index.
*/
u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id);
@@ -280,83 +282,117 @@ void qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn,
int vfid, u16 vxlan_port, u16 geneve_port);
/**
- * @brief Read sriov related information and allocated resources
- * reads from configuration space, shmem, etc.
+ * qed_iov_hw_info(): Read sriov related information and allocated resources
+ * reads from configuration space, shmem, etc.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @return int
+ * Return: Int.
*/
int qed_iov_hw_info(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_add_tlv - place a given tlv on the tlv buffer at next offset
+ * qed_add_tlv(): place a given tlv on the tlv buffer at next offset
*
- * @param p_hwfn
- * @param p_iov
- * @param type
- * @param length
+ * @p_hwfn: HW device data.
+ * @offset: offset.
+ * @type: Type
+ * @length: Length.
*
- * @return pointer to the newly placed tlv
+ * Return: pointer to the newly placed tlv
*/
void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length);
/**
- * @brief list the types and lengths of the tlvs on the buffer
+ * qed_dp_tlv_list(): list the types and lengths of the tlvs on the buffer
*
- * @param p_hwfn
- * @param tlvs_list
+ * @p_hwfn: HW device data.
+ * @tlvs_list: Tlvs_list.
+ *
+ * Return: Void.
*/
void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list);
/**
- * @brief qed_iov_alloc - allocate sriov related resources
+ * qed_sriov_vfpf_malicious(): Handle malicious VF/PF.
+ *
+ * @p_hwfn: HW device data.
+ * @p_data: Pointer to data.
+ *
+ * Return: Void.
+ */
+void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn,
+ struct fw_err_data *p_data);
+
+/**
+ * qed_sriov_eqe_event(): Callback for SRIOV events.
+ *
+ * @p_hwfn: HW device data.
+ * @opcode: Opcode.
+ * @echo: Echo.
+ * @data: data
+ * @fw_return_code: FW return code.
+ *
+ * Return: Int.
+ */
+int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, u8 opcode, __le16 echo,
+ union event_ring_data *data, u8 fw_return_code);
+
+/**
+ * qed_iov_alloc(): allocate sriov related resources
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @return int
+ * Return: Int.
*/
int qed_iov_alloc(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_iov_setup - setup sriov related resources
+ * qed_iov_setup(): setup sriov related resources
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
+ *
+ * Return: Void.
*/
void qed_iov_setup(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_iov_free - free sriov related resources
+ * qed_iov_free(): free sriov related resources
+ *
+ * @p_hwfn: HW device data.
*
- * @param p_hwfn
+ * Return: Void.
*/
void qed_iov_free(struct qed_hwfn *p_hwfn);
/**
- * @brief free sriov related memory that was allocated during hw_prepare
+ * qed_iov_free_hw_info(): free sriov related memory that was
+ * allocated during hw_prepare
+ *
+ * @cdev: Qed dev pointer.
*
- * @param cdev
+ * Return: Void.
*/
void qed_iov_free_hw_info(struct qed_dev *cdev);
/**
- * @brief Mark structs of vfs that have been FLR-ed.
+ * qed_iov_mark_vf_flr(): Mark structs of vfs that have been FLR-ed.
*
- * @param p_hwfn
- * @param disabled_vfs - bitmask of all VFs on path that were FLRed
+ * @p_hwfn: HW device data.
+ * @disabled_vfs: bitmask of all VFs on path that were FLRed
*
- * @return true iff one of the PF's vfs got FLRed. false otherwise.
+ * Return: true iff one of the PF's vfs got FLRed. false otherwise.
*/
bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *disabled_vfs);
/**
- * @brief Search extended TLVs in request/reply buffer.
+ * qed_iov_search_list_tlvs(): Search extended TLVs in request/reply buffer.
*
- * @param p_hwfn
- * @param p_tlvs_list - Pointer to tlvs list
- * @param req_type - Type of TLV
+ * @p_hwfn: HW device data.
+ * @p_tlvs_list: Pointer to tlvs list
+ * @req_type: Type of TLV
*
- * @return pointer to tlv type if found, otherwise returns NULL.
+ * Return: pointer to tlv type if found, otherwise returns NULL.
*/
void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn,
void *p_tlvs_list, u16 req_type);
@@ -442,6 +478,18 @@ static inline int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled)
static inline void qed_inform_vf_link_state(struct qed_hwfn *hwfn)
{
}
+
+static inline void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn,
+ struct fw_err_data *p_data)
+{
+}
+
+static inline int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, u8 opcode,
+ __le16 echo, union event_ring_data *data,
+ u8 fw_return_code)
+{
+ return 0;
+}
#endif
#define qed_for_each_vf(_p_hwfn, _i) \
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
index 72a38d53d33f..597cd9cd57b5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
@@ -27,7 +27,7 @@ static void *qed_vf_pf_prep(struct qed_hwfn *p_hwfn, u16 type, u16 length)
"preparing to send 0x%04x tlv over vf pf channel\n",
type);
- /* Reset Requst offset */
+ /* Reset Request offset */
p_iov->offset = (u8 *)p_iov->vf2pf_request;
/* Clear mailbox - both request and reply */
@@ -444,7 +444,7 @@ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
u32 reg;
int rc;
- /* Set number of hwfns - might be overriden once leading hwfn learns
+ /* Set number of hwfns - might be overridden once leading hwfn learns
* actual configuration from PF.
*/
if (IS_LEAD_HWFN(p_hwfn))
@@ -504,7 +504,7 @@ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
QED_MSG_IOV,
"VF's Request mailbox [%p virt 0x%llx phys], Response mailbox [%p virt 0x%llx phys]\n",
p_iov->vf2pf_request,
- (u64) p_iov->vf2pf_request_phys,
+ (u64)p_iov->vf2pf_request_phys,
p_iov->pf2vf_reply, (u64)p_iov->pf2vf_reply_phys);
/* Allocate Bulletin board */
@@ -561,6 +561,7 @@ free_p_iov:
return -ENOMEM;
}
+
#define TSTORM_QZONE_START PXP_VF_BAR0_START_SDM_ZONE_A
#define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \
(TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev)))
@@ -1285,8 +1286,8 @@ int qed_vf_pf_filter_ucast(struct qed_hwfn *p_hwfn,
/* clear mailbox and prep first tlv */
req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_UCAST_FILTER, sizeof(*req));
- req->opcode = (u8) p_ucast->opcode;
- req->type = (u8) p_ucast->type;
+ req->opcode = (u8)p_ucast->opcode;
+ req->type = (u8)p_ucast->type;
memcpy(req->mac, p_ucast->mac, ETH_ALEN);
req->vlan = p_ucast->vlan;
@@ -1372,7 +1373,7 @@ exit:
int
qed_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn,
- u8 *p_mac)
+ const u8 *p_mac)
{
struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct vfpf_bulletin_update_mac_tlv *p_req;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h
index 60d2bb64e65f..306b5f4bc632 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h
@@ -48,7 +48,7 @@ struct channel_tlv {
u16 length;
};
-/* header of first vf->pf tlv carries the offset used to calculate reponse
+/* header of first vf->pf tlv carries the offset used to calculate response
* buffer address
*/
struct vfpf_first_tlv {
@@ -85,8 +85,8 @@ struct vfpf_acquire_tlv {
struct vfpf_first_tlv first_tlv;
struct vf_pf_vfdev_info {
-#define VFPF_ACQUIRE_CAP_PRE_FP_HSI (1 << 0) /* VF pre-FP hsi version */
-#define VFPF_ACQUIRE_CAP_100G (1 << 1) /* VF can support 100g */
+#define VFPF_ACQUIRE_CAP_PRE_FP_HSI BIT(0) /* VF pre-FP hsi version */
+#define VFPF_ACQUIRE_CAP_100G BIT(1) /* VF can support 100g */
/* A requirement for supporting multi-Tx queues on a single queue-zone,
* VF would pass qids as additional information whenever passing queue
* references.
@@ -688,13 +688,16 @@ struct qed_vf_iov {
};
/**
- * @brief VF - Set Rx/Tx coalesce per VF's relative queue.
- * Coalesce value '0' will omit the configuration.
+ * qed_vf_pf_set_coalesce(): VF - Set Rx/Tx coalesce per VF's relative queue.
+ * Coalesce value '0' will omit the
+ * configuration.
*
- * @param p_hwfn
- * @param rx_coal - coalesce value in micro second for rx queue
- * @param tx_coal - coalesce value in micro second for tx queue
- * @param p_cid - queue cid
+ * @p_hwfn: HW device data.
+ * @rx_coal: coalesce value in micro second for rx queue.
+ * @tx_coal: coalesce value in micro second for tx queue.
+ * @p_cid: queue cid.
+ *
+ * Return: Int.
*
**/
int qed_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn,
@@ -702,148 +705,172 @@ int qed_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn,
u16 tx_coal, struct qed_queue_cid *p_cid);
/**
- * @brief VF - Get coalesce per VF's relative queue.
+ * qed_vf_pf_get_coalesce(): VF - Get coalesce per VF's relative queue.
*
- * @param p_hwfn
- * @param p_coal - coalesce value in micro second for VF queues.
- * @param p_cid - queue cid
+ * @p_hwfn: HW device data.
+ * @p_coal: coalesce value in micro second for VF queues.
+ * @p_cid: queue cid.
*
+ * Return: Int.
**/
int qed_vf_pf_get_coalesce(struct qed_hwfn *p_hwfn,
u16 *p_coal, struct qed_queue_cid *p_cid);
#ifdef CONFIG_QED_SRIOV
/**
- * @brief Read the VF bulletin and act on it if needed
+ * qed_vf_read_bulletin(): Read the VF bulletin and act on it if needed.
*
- * @param p_hwfn
- * @param p_change - qed fills 1 iff bulletin board has changed, 0 otherwise.
+ * @p_hwfn: HW device data.
+ * @p_change: qed fills 1 iff bulletin board has changed, 0 otherwise.
*
- * @return enum _qed_status
+ * Return: enum _qed_status.
*/
int qed_vf_read_bulletin(struct qed_hwfn *p_hwfn, u8 *p_change);
/**
- * @brief Get link paramters for VF from qed
+ * qed_vf_get_link_params(): Get link parameters for VF from qed
+ *
+ * @p_hwfn: HW device data.
+ * @params: the link params structure to be filled for the VF.
*
- * @param p_hwfn
- * @param params - the link params structure to be filled for the VF
+ * Return: Void.
*/
void qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
struct qed_mcp_link_params *params);
/**
- * @brief Get link state for VF from qed
+ * qed_vf_get_link_state(): Get link state for VF from qed.
+ *
+ * @p_hwfn: HW device data.
+ * @link: the link state structure to be filled for the VF
*
- * @param p_hwfn
- * @param link - the link state structure to be filled for the VF
+ * Return: Void.
*/
void qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
struct qed_mcp_link_state *link);
/**
- * @brief Get link capabilities for VF from qed
+ * qed_vf_get_link_caps(): Get link capabilities for VF from qed.
*
- * @param p_hwfn
- * @param p_link_caps - the link capabilities structure to be filled for the VF
+ * @p_hwfn: HW device data.
+ * @p_link_caps: the link capabilities structure to be filled for the VF
+ *
+ * Return: Void.
*/
void qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
struct qed_mcp_link_capabilities *p_link_caps);
/**
- * @brief Get number of Rx queues allocated for VF by qed
+ * qed_vf_get_num_rxqs(): Get number of Rx queues allocated for VF by qed
+ *
+ * @p_hwfn: HW device data.
+ * @num_rxqs: allocated RX queues
*
- * @param p_hwfn
- * @param num_rxqs - allocated RX queues
+ * Return: Void.
*/
void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs);
/**
- * @brief Get number of Rx queues allocated for VF by qed
+ * qed_vf_get_num_txqs(): Get number of Rx queues allocated for VF by qed
*
- * @param p_hwfn
- * @param num_txqs - allocated RX queues
+ * @p_hwfn: HW device data.
+ * @num_txqs: allocated RX queues
+ *
+ * Return: Void.
*/
void qed_vf_get_num_txqs(struct qed_hwfn *p_hwfn, u8 *num_txqs);
/**
- * @brief Get number of available connections [both Rx and Tx] for VF
+ * qed_vf_get_num_cids(): Get number of available connections
+ * [both Rx and Tx] for VF
+ *
+ * @p_hwfn: HW device data.
+ * @num_cids: allocated number of connections
*
- * @param p_hwfn
- * @param num_cids - allocated number of connections
+ * Return: Void.
*/
void qed_vf_get_num_cids(struct qed_hwfn *p_hwfn, u8 *num_cids);
/**
- * @brief Get port mac address for VF
+ * qed_vf_get_port_mac(): Get port mac address for VF.
*
- * @param p_hwfn
- * @param port_mac - destination location for port mac
+ * @p_hwfn: HW device data.
+ * @port_mac: destination location for port mac
+ *
+ * Return: Void.
*/
void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac);
/**
- * @brief Get number of VLAN filters allocated for VF by qed
+ * qed_vf_get_num_vlan_filters(): Get number of VLAN filters allocated
+ * for VF by qed.
+ *
+ * @p_hwfn: HW device data.
+ * @num_vlan_filters: allocated VLAN filters
*
- * @param p_hwfn
- * @param num_rxqs - allocated VLAN filters
+ * Return: Void.
*/
void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn,
u8 *num_vlan_filters);
/**
- * @brief Get number of MAC filters allocated for VF by qed
+ * qed_vf_get_num_mac_filters(): Get number of MAC filters allocated
+ * for VF by qed
*
- * @param p_hwfn
- * @param num_rxqs - allocated MAC filters
+ * @p_hwfn: HW device data.
+ * @num_mac_filters: allocated MAC filters
+ *
+ * Return: Void.
*/
void qed_vf_get_num_mac_filters(struct qed_hwfn *p_hwfn, u8 *num_mac_filters);
/**
- * @brief Check if VF can set a MAC address
+ * qed_vf_check_mac(): Check if VF can set a MAC address
*
- * @param p_hwfn
- * @param mac
+ * @p_hwfn: HW device data.
+ * @mac: Mac.
*
- * @return bool
+ * Return: bool.
*/
bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac);
/**
- * @brief Set firmware version information in dev_info from VFs acquire response tlv
+ * qed_vf_get_fw_version(): Set firmware version information
+ * in dev_info from VFs acquire response tlv
+ *
+ * @p_hwfn: HW device data.
+ * @fw_major: FW major.
+ * @fw_minor: FW minor.
+ * @fw_rev: FW rev.
+ * @fw_eng: FW eng.
*
- * @param p_hwfn
- * @param fw_major
- * @param fw_minor
- * @param fw_rev
- * @param fw_eng
+ * Return: Void.
*/
void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn,
u16 *fw_major, u16 *fw_minor,
u16 *fw_rev, u16 *fw_eng);
/**
- * @brief hw preparation for VF
- * sends ACQUIRE message
+ * qed_vf_hw_prepare(): hw preparation for VF sends ACQUIRE message
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @return int
+ * Return: Int.
*/
int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn);
/**
- * @brief VF - start the RX Queue by sending a message to the PF
- * @param p_hwfn
- * @param p_cid - Only relative fields are relevant
- * @param bd_max_bytes - maximum number of bytes per bd
- * @param bd_chain_phys_addr - physical address of bd chain
- * @param cqe_pbl_addr - physical address of pbl
- * @param cqe_pbl_size - pbl size
- * @param pp_prod - pointer to the producer to be
- * used in fastpath
+ * qed_vf_pf_rxq_start(): start the RX Queue by sending a message to the PF
+ *
+ * @p_hwfn: HW device data.
+ * @p_cid: Only relative fields are relevant
+ * @bd_max_bytes: maximum number of bytes per bd
+ * @bd_chain_phys_addr: physical address of bd chain
+ * @cqe_pbl_addr: physical address of pbl
+ * @cqe_pbl_size: pbl size
+ * @pp_prod: pointer to the producer to be used in fastpath
*
- * @return int
+ * Return: Int.
*/
int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
struct qed_queue_cid *p_cid,
@@ -853,18 +880,16 @@ int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
u16 cqe_pbl_size, void __iomem **pp_prod);
/**
- * @brief VF - start the TX queue by sending a message to the
- * PF.
+ * qed_vf_pf_txq_start(): VF - start the TX queue by sending a message to the
+ * PF.
*
- * @param p_hwfn
- * @param tx_queue_id - zero based within the VF
- * @param sb - status block for this queue
- * @param sb_index - index within the status block
- * @param bd_chain_phys_addr - physical address of tx chain
- * @param pp_doorbell - pointer to address to which to
- * write the doorbell too..
+ * @p_hwfn: HW device data.
+ * @p_cid: CID.
+ * @pbl_addr: PBL address.
+ * @pbl_size: PBL Size.
+ * @pp_doorbell: pointer to address to which to write the doorbell too.
*
- * @return int
+ * Return: Int.
*/
int
qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
@@ -873,90 +898,91 @@ qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
u16 pbl_size, void __iomem **pp_doorbell);
/**
- * @brief VF - stop the RX queue by sending a message to the PF
+ * qed_vf_pf_rxq_stop(): VF - stop the RX queue by sending a message to the PF.
*
- * @param p_hwfn
- * @param p_cid
- * @param cqe_completion
+ * @p_hwfn: HW device data.
+ * @p_cid: CID.
+ * @cqe_completion: CQE Completion.
*
- * @return int
+ * Return: Int.
*/
int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn,
struct qed_queue_cid *p_cid, bool cqe_completion);
/**
- * @brief VF - stop the TX queue by sending a message to the PF
+ * qed_vf_pf_txq_stop(): VF - stop the TX queue by sending a message to the PF.
*
- * @param p_hwfn
- * @param tx_qid
+ * @p_hwfn: HW device data.
+ * @p_cid: CID.
*
- * @return int
+ * Return: Int.
*/
int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid);
/**
- * @brief VF - send a vport update command
+ * qed_vf_pf_vport_update(): VF - send a vport update command.
*
- * @param p_hwfn
- * @param params
+ * @p_hwfn: HW device data.
+ * @p_params: Params
*
- * @return int
+ * Return: Int.
*/
int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
struct qed_sp_vport_update_params *p_params);
/**
+ * qed_vf_pf_reset(): VF - send a close message to PF.
*
- * @brief VF - send a close message to PF
+ * @p_hwfn: HW device data.
*
- * @param p_hwfn
- *
- * @return enum _qed_status
+ * Return: enum _qed_status
*/
int qed_vf_pf_reset(struct qed_hwfn *p_hwfn);
/**
- * @brief VF - free vf`s memories
+ * qed_vf_pf_release(): VF - free vf`s memories.
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @return enum _qed_status
+ * Return: enum _qed_status
*/
int qed_vf_pf_release(struct qed_hwfn *p_hwfn);
/**
- * @brief qed_vf_get_igu_sb_id - Get the IGU SB ID for a given
+ * qed_vf_get_igu_sb_id(): Get the IGU SB ID for a given
* sb_id. For VFs igu sbs don't have to be contiguous
*
- * @param p_hwfn
- * @param sb_id
+ * @p_hwfn: HW device data.
+ * @sb_id: SB ID.
*
- * @return INLINE u16
+ * Return: INLINE u16
*/
u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id);
/**
- * @brief Stores [or removes] a configured sb_info.
+ * qed_vf_set_sb_info(): Stores [or removes] a configured sb_info.
+ *
+ * @p_hwfn: HW device data.
+ * @sb_id: zero-based SB index [for fastpath]
+ * @p_sb: may be NULL [during removal].
*
- * @param p_hwfn
- * @param sb_id - zero-based SB index [for fastpath]
- * @param sb_info - may be NULL [during removal].
+ * Return: Void.
*/
void qed_vf_set_sb_info(struct qed_hwfn *p_hwfn,
u16 sb_id, struct qed_sb_info *p_sb);
/**
- * @brief qed_vf_pf_vport_start - perform vport start for VF.
+ * qed_vf_pf_vport_start(): perform vport start for VF.
*
- * @param p_hwfn
- * @param vport_id
- * @param mtu
- * @param inner_vlan_removal
- * @param tpa_mode
- * @param max_buffers_per_cqe,
- * @param only_untagged - default behavior regarding vlan acceptance
+ * @p_hwfn: HW device data.
+ * @vport_id: Vport ID.
+ * @mtu: MTU.
+ * @inner_vlan_removal: Innter VLAN removal.
+ * @tpa_mode: TPA mode
+ * @max_buffers_per_cqe: Max buffer pre CQE.
+ * @only_untagged: default behavior regarding vlan acceptance
*
- * @return enum _qed_status
+ * Return: enum _qed_status
*/
int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
u8 vport_id,
@@ -966,11 +992,11 @@ int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn,
u8 max_buffers_per_cqe, u8 only_untagged);
/**
- * @brief qed_vf_pf_vport_stop - stop the VF's vport
+ * qed_vf_pf_vport_stop(): stop the VF's vport
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @return enum _qed_status
+ * Return: enum _qed_status
*/
int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn);
@@ -981,42 +1007,49 @@ void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn,
struct qed_filter_mcast *p_filter_cmd);
/**
- * @brief qed_vf_pf_int_cleanup - clean the SB of the VF
+ * qed_vf_pf_int_cleanup(): clean the SB of the VF
*
- * @param p_hwfn
+ * @p_hwfn: HW device data.
*
- * @return enum _qed_status
+ * Return: enum _qed_status
*/
int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn);
/**
- * @brief - return the link params in a given bulletin board
+ * __qed_vf_get_link_params(): return the link params in a given bulletin board
*
- * @param p_hwfn
- * @param p_params - pointer to a struct to fill with link params
- * @param p_bulletin
+ * @p_hwfn: HW device data.
+ * @p_params: pointer to a struct to fill with link params
+ * @p_bulletin: Bulletin.
+ *
+ * Return: Void.
*/
void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
struct qed_mcp_link_params *p_params,
struct qed_bulletin_content *p_bulletin);
/**
- * @brief - return the link state in a given bulletin board
+ * __qed_vf_get_link_state(): return the link state in a given bulletin board
+ *
+ * @p_hwfn: HW device data.
+ * @p_link: pointer to a struct to fill with link state
+ * @p_bulletin: Bulletin.
*
- * @param p_hwfn
- * @param p_link - pointer to a struct to fill with link state
- * @param p_bulletin
+ * Return: Void.
*/
void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn,
struct qed_mcp_link_state *p_link,
struct qed_bulletin_content *p_bulletin);
/**
- * @brief - return the link capabilities in a given bulletin board
+ * __qed_vf_get_link_caps(): return the link capabilities in a given
+ * bulletin board
*
- * @param p_hwfn
- * @param p_link - pointer to a struct to fill with link capabilities
- * @param p_bulletin
+ * @p_hwfn: HW device data.
+ * @p_link_caps: pointer to a struct to fill with link capabilities
+ * @p_bulletin: Bulletin.
+ *
+ * Return: Void.
*/
void __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn,
struct qed_mcp_link_capabilities *p_link_caps,
@@ -1029,11 +1062,15 @@ int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn,
u32 qed_vf_hw_bar_size(struct qed_hwfn *p_hwfn, enum BAR_ID bar_id);
/**
- * @brief - Ask PF to update the MAC address in it's bulletin board
+ * qed_vf_pf_bulletin_update_mac(): Ask PF to update the MAC address in
+ * it's bulletin board
+ *
+ * @p_hwfn: HW device data.
+ * @p_mac: mac address to be updated in bulletin board
*
- * @param p_mac - mac address to be updated in bulletin board
+ * Return: Int.
*/
-int qed_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn, u8 *p_mac);
+int qed_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn, const u8 *p_mac);
#else
static inline void qed_vf_get_link_params(struct qed_hwfn *p_hwfn,
@@ -1222,7 +1259,7 @@ static inline int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn,
}
static inline int qed_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn,
- u8 *p_mac)
+ const u8 *p_mac)
{
return -EINVAL;
}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c
index a2e4dfb5cb44..3010833ddde3 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_filter.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c
@@ -557,7 +557,7 @@ void qede_force_mac(void *dev, u8 *mac, bool forced)
return;
}
- ether_addr_copy(edev->ndev->dev_addr, mac);
+ eth_hw_addr_set(edev->ndev, mac);
__qede_unlock(edev);
}
@@ -617,32 +617,30 @@ void qede_fill_rss_params(struct qede_dev *edev,
static int qede_set_ucast_rx_mac(struct qede_dev *edev,
enum qed_filter_xcast_params_type opcode,
- unsigned char mac[ETH_ALEN])
+ const unsigned char mac[ETH_ALEN])
{
- struct qed_filter_params filter_cmd;
+ struct qed_filter_ucast_params ucast;
- memset(&filter_cmd, 0, sizeof(filter_cmd));
- filter_cmd.type = QED_FILTER_TYPE_UCAST;
- filter_cmd.filter.ucast.type = opcode;
- filter_cmd.filter.ucast.mac_valid = 1;
- ether_addr_copy(filter_cmd.filter.ucast.mac, mac);
+ memset(&ucast, 0, sizeof(ucast));
+ ucast.type = opcode;
+ ucast.mac_valid = 1;
+ ether_addr_copy(ucast.mac, mac);
- return edev->ops->filter_config(edev->cdev, &filter_cmd);
+ return edev->ops->filter_config_ucast(edev->cdev, &ucast);
}
static int qede_set_ucast_rx_vlan(struct qede_dev *edev,
enum qed_filter_xcast_params_type opcode,
u16 vid)
{
- struct qed_filter_params filter_cmd;
+ struct qed_filter_ucast_params ucast;
- memset(&filter_cmd, 0, sizeof(filter_cmd));
- filter_cmd.type = QED_FILTER_TYPE_UCAST;
- filter_cmd.filter.ucast.type = opcode;
- filter_cmd.filter.ucast.vlan_valid = 1;
- filter_cmd.filter.ucast.vlan = vid;
+ memset(&ucast, 0, sizeof(ucast));
+ ucast.type = opcode;
+ ucast.vlan_valid = 1;
+ ucast.vlan = vid;
- return edev->ops->filter_config(edev->cdev, &filter_cmd);
+ return edev->ops->filter_config_ucast(edev->cdev, &ucast);
}
static int qede_config_accept_any_vlan(struct qede_dev *edev, bool action)
@@ -1057,18 +1055,17 @@ static int qede_set_mcast_rx_mac(struct qede_dev *edev,
enum qed_filter_xcast_params_type opcode,
unsigned char *mac, int num_macs)
{
- struct qed_filter_params filter_cmd;
+ struct qed_filter_mcast_params mcast;
int i;
- memset(&filter_cmd, 0, sizeof(filter_cmd));
- filter_cmd.type = QED_FILTER_TYPE_MCAST;
- filter_cmd.filter.mcast.type = opcode;
- filter_cmd.filter.mcast.num = num_macs;
+ memset(&mcast, 0, sizeof(mcast));
+ mcast.type = opcode;
+ mcast.num = num_macs;
for (i = 0; i < num_macs; i++, mac += ETH_ALEN)
- ether_addr_copy(filter_cmd.filter.mcast.mac[i], mac);
+ ether_addr_copy(mcast.mac[i], mac);
- return edev->ops->filter_config(edev->cdev, &filter_cmd);
+ return edev->ops->filter_config_mcast(edev->cdev, &mcast);
}
int qede_set_mac_addr(struct net_device *ndev, void *p)
@@ -1104,7 +1101,7 @@ int qede_set_mac_addr(struct net_device *ndev, void *p)
goto out;
}
- ether_addr_copy(ndev->dev_addr, addr->sa_data);
+ eth_hw_addr_set(ndev, addr->sa_data);
DP_INFO(edev, "Setting device MAC to %pM\n", addr->sa_data);
if (edev->state != QEDE_STATE_OPEN) {
@@ -1194,7 +1191,6 @@ void qede_config_rx_mode(struct net_device *ndev)
{
enum qed_filter_rx_mode_type accept_flags;
struct qede_dev *edev = netdev_priv(ndev);
- struct qed_filter_params rx_mode;
unsigned char *uc_macs, *temp;
struct netdev_hw_addr *ha;
int rc, uc_count;
@@ -1220,10 +1216,6 @@ void qede_config_rx_mode(struct net_device *ndev)
netif_addr_unlock_bh(ndev);
- /* Configure the struct for the Rx mode */
- memset(&rx_mode, 0, sizeof(struct qed_filter_params));
- rx_mode.type = QED_FILTER_TYPE_RX_MODE;
-
/* Remove all previous unicast secondary macs and multicast macs
* (configure / leave the primary mac)
*/
@@ -1271,8 +1263,7 @@ void qede_config_rx_mode(struct net_device *ndev)
qede_config_accept_any_vlan(edev, false);
}
- rx_mode.filter.accept_flags = accept_flags;
- edev->ops->filter_config(edev->cdev, &rx_mode);
+ edev->ops->filter_config_rx_mode(edev->cdev, accept_flags);
out:
kfree(uc_macs);
}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 9837bdb89cd4..06c6a5813606 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -836,7 +836,7 @@ static void qede_init_ndev(struct qede_dev *edev)
ndev->max_mtu = QEDE_MAX_JUMBO_PACKET_SIZE;
/* Set network device HW mac */
- ether_addr_copy(edev->ndev->dev_addr, edev->dev_info.common.hw_mac);
+ eth_hw_addr_set(edev->ndev, edev->dev_info.common.hw_mac);
ndev->mtu = edev->dev_info.common.mtu;
}
@@ -1176,19 +1176,17 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
edev->devlink = qed_ops->common->devlink_register(cdev);
if (IS_ERR(edev->devlink)) {
DP_NOTICE(edev, "Cannot register devlink\n");
+ rc = PTR_ERR(edev->devlink);
edev->devlink = NULL;
- /* Go on, we can live without devlink */
+ goto err3;
}
} else {
struct net_device *ndev = pci_get_drvdata(pdev);
+ struct qed_devlink *qdl;
edev = netdev_priv(ndev);
-
- if (edev->devlink) {
- struct qed_devlink *qdl = devlink_priv(edev->devlink);
-
- qdl->cdev = cdev;
- }
+ qdl = devlink_priv(edev->devlink);
+ qdl->cdev = cdev;
edev->cdev = cdev;
memset(&edev->stats, 0, sizeof(edev->stats));
memcpy(&edev->dev_info, &dev_info, sizeof(dev_info));
@@ -1397,7 +1395,7 @@ static void qede_free_mem_sb(struct qede_dev *edev, struct qed_sb_info *sb_info,
static int qede_alloc_mem_sb(struct qede_dev *edev,
struct qed_sb_info *sb_info, u16 sb_id)
{
- struct status_block_e4 *sb_virt;
+ struct status_block *sb_virt;
dma_addr_t sb_phys;
int rc;
@@ -2802,10 +2800,13 @@ static void qede_get_eth_tlv_data(void *dev, void *data)
}
/**
- * qede_io_error_detected - called when PCI error is detected
+ * qede_io_error_detected(): Called when PCI error is detected
+ *
* @pdev: Pointer to PCI device
* @state: The current pci connection state
*
+ *Return: pci_ers_result_t.
+ *
* This function is called after a PCI bus error affecting
* this device has been detected.
*/
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index c00ad57575ea..1e6d72adfe43 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -508,10 +508,12 @@ static void eeprom_readword(struct ql3_adapter *qdev,
static void ql_set_mac_addr(struct net_device *ndev, u16 *addr)
{
- __le16 *p = (__le16 *)ndev->dev_addr;
- p[0] = cpu_to_le16(addr[0]);
- p[1] = cpu_to_le16(addr[1]);
- p[2] = cpu_to_le16(addr[2]);
+ __le16 buf[ETH_ALEN / 2];
+
+ buf[0] = cpu_to_le16(addr[0]);
+ buf[1] = cpu_to_le16(addr[1]);
+ buf[2] = cpu_to_le16(addr[2]);
+ eth_hw_addr_set(ndev, (u8 *)buf);
}
static int ql_get_nvram_params(struct ql3_adapter *qdev)
@@ -3564,7 +3566,7 @@ static int ql3xxx_set_mac_address(struct net_device *ndev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
+ eth_hw_addr_set(ndev, addr->sa_data);
spin_lock_irqsave(&qdev->hw_lock, hw_flags);
/* Program lower 32 bits of the MAC address */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 75960a29f80e..ed84f0f97623 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -304,7 +304,7 @@ int qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
if (ret)
return ret;
- memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
+ eth_hw_addr_set(netdev, mac_addr);
memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
/* set station address */
@@ -356,7 +356,7 @@ static int qlcnic_set_mac(struct net_device *netdev, void *p)
qlcnic_delete_adapter_mac(adapter);
memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len);
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
qlcnic_set_multi(adapter->netdev);
if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
index 87b8c032195d..06104d2ff5b3 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
@@ -420,7 +420,7 @@ static void emac_mac_dma_config(struct emac_adapter *adpt)
}
/* set MAC address */
-static void emac_set_mac_address(struct emac_adapter *adpt, u8 *addr)
+static void emac_set_mac_address(struct emac_adapter *adpt, const u8 *addr)
{
u32 sta;
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index 9015a38eaced..a55c52696d49 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -545,13 +545,10 @@ static int emac_probe_resources(struct platform_device *pdev,
struct emac_adapter *adpt)
{
struct net_device *netdev = adpt->netdev;
- char maddr[ETH_ALEN];
int ret = 0;
/* get mac address */
- if (device_get_mac_address(&pdev->dev, maddr, ETH_ALEN))
- ether_addr_copy(netdev->dev_addr, maddr);
- else
+ if (device_get_ethdev_address(&pdev->dev, netdev))
eth_hw_addr_random(netdev);
/* Core 0 interrupt */
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index 8427fe1b8fd1..955cce644392 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -968,7 +968,7 @@ qca_spi_probe(struct spi_device *spi)
spi_set_drvdata(spi, qcaspi_devs);
- ret = of_get_mac_address(spi->dev.of_node, qca->net_dev->dev_addr);
+ ret = of_get_ethdev_address(spi->dev.of_node, qca->net_dev);
if (ret) {
eth_hw_addr_random(qca->net_dev);
dev_info(&spi->dev, "Using random MAC address: %pM\n",
diff --git a/drivers/net/ethernet/qualcomm/qca_uart.c b/drivers/net/ethernet/qualcomm/qca_uart.c
index ce3f7ce31adc..27c4f43176aa 100644
--- a/drivers/net/ethernet/qualcomm/qca_uart.c
+++ b/drivers/net/ethernet/qualcomm/qca_uart.c
@@ -347,7 +347,7 @@ static int qca_uart_probe(struct serdev_device *serdev)
of_property_read_u32(serdev->dev.of_node, "current-speed", &speed);
- ret = of_get_mac_address(serdev->dev.of_node, qca->net_dev->dev_addr);
+ ret = of_get_ethdev_address(serdev->dev.of_node, qca->net_dev);
if (ret) {
eth_hw_addr_random(qca->net_dev);
dev_info(&serdev->dev, "Using random MAC address: %pM\n",
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
index 13d8eb43a485..1b2119b1d48a 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
@@ -224,7 +224,7 @@ void rmnet_vnd_setup(struct net_device *rmnet_dev)
rmnet_dev->netdev_ops = &rmnet_vnd_ops;
rmnet_dev->mtu = RMNET_DFLT_PACKET_SIZE;
rmnet_dev->needed_headroom = RMNET_NEEDED_HEADROOM;
- eth_random_addr(rmnet_dev->dev_addr);
+ eth_hw_addr_random(rmnet_dev);
rmnet_dev->tx_queue_len = RMNET_TX_QUEUE_LEN;
/* Raw IP mode */
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index 01ef5efd7bc2..a6bf7d505178 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -453,7 +453,7 @@ static void r6040_down(struct net_device *dev)
{
struct r6040_private *lp = netdev_priv(dev);
void __iomem *ioaddr = lp->base;
- u16 *adrp;
+ const u16 *adrp;
/* Stop MAC */
iowrite16(MSK_INT, ioaddr + MIER); /* Mask Off Interrupt */
@@ -462,7 +462,7 @@ static void r6040_down(struct net_device *dev)
r6040_reset_mac(lp);
/* Restore MAC Address to MIDx */
- adrp = (u16 *) dev->dev_addr;
+ adrp = (const u16 *) dev->dev_addr;
iowrite16(adrp[0], ioaddr + MID_0L);
iowrite16(adrp[1], ioaddr + MID_0M);
iowrite16(adrp[2], ioaddr + MID_0H);
@@ -731,13 +731,13 @@ static void r6040_mac_address(struct net_device *dev)
{
struct r6040_private *lp = netdev_priv(dev);
void __iomem *ioaddr = lp->base;
- u16 *adrp;
+ const u16 *adrp;
/* Reset MAC */
r6040_reset_mac(lp);
/* Restore MAC Address */
- adrp = (u16 *) dev->dev_addr;
+ adrp = (const u16 *) dev->dev_addr;
iowrite16(adrp[0], ioaddr + MID_0L);
iowrite16(adrp[1], ioaddr + MID_0M);
iowrite16(adrp[2], ioaddr + MID_0H);
@@ -849,13 +849,13 @@ static void r6040_multicast_list(struct net_device *dev)
unsigned long flags;
struct netdev_hw_addr *ha;
int i;
- u16 *adrp;
+ const u16 *adrp;
u16 hash_table[4] = { 0 };
spin_lock_irqsave(&lp->lock, flags);
/* Keep our MAC Address */
- adrp = (u16 *)dev->dev_addr;
+ adrp = (const u16 *)dev->dev_addr;
iowrite16(adrp[0], ioaddr + MID_0L);
iowrite16(adrp[1], ioaddr + MID_0M);
iowrite16(adrp[2], ioaddr + MID_0H);
@@ -1031,8 +1031,8 @@ static int r6040_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
void __iomem *ioaddr;
int err, io_size = R6040_IO_SIZE;
static int card_idx = -1;
+ u16 addr[ETH_ALEN / 2];
int bar = 0;
- u16 *adrp;
pr_info("%s\n", version);
@@ -1102,14 +1102,14 @@ static int r6040_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Set MAC address */
card_idx++;
- adrp = (u16 *)dev->dev_addr;
- adrp[0] = ioread16(ioaddr + MID_0L);
- adrp[1] = ioread16(ioaddr + MID_0M);
- adrp[2] = ioread16(ioaddr + MID_0H);
+ addr[0] = ioread16(ioaddr + MID_0L);
+ addr[1] = ioread16(ioaddr + MID_0M);
+ addr[2] = ioread16(ioaddr + MID_0H);
+ eth_hw_addr_set(dev, (u8 *)addr);
/* Some bootloader/BIOSes do not initialize
* MAC address, warn about that */
- if (!(adrp[0] || adrp[1] || adrp[2])) {
+ if (!(addr[0] || addr[1] || addr[2])) {
netdev_warn(dev, "MAC address not initialized, "
"generating random\n");
eth_hw_addr_random(dev);
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index 2b84b4565e64..4f39f843bb3a 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -1624,7 +1624,7 @@ static int cp_set_mac_address(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
spin_lock_irq(&cp->lock);
@@ -1889,6 +1889,7 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
void __iomem *regs;
resource_size_t pciaddr;
unsigned int addr_len, i, pci_using_dac;
+ __le16 addr[ETH_ALEN / 2];
pr_info_once("%s", version);
@@ -1979,8 +1980,8 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
/* read MAC address from EEPROM */
addr_len = read_eeprom (regs, 0, 8) == 0x8129 ? 8 : 6;
for (i = 0; i < 3; i++)
- ((__le16 *) (dev->dev_addr))[i] =
- cpu_to_le16(read_eeprom (regs, i + 7, addr_len));
+ addr[i] = cpu_to_le16(read_eeprom (regs, i + 7, addr_len));
+ eth_hw_addr_set(dev, (u8 *)addr);
dev->netdev_ops = &cp_netdev_ops;
netif_napi_add(dev, &cp->napi, cp_rx_poll, 16);
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index 2e6923cc653e..15b40fd93cd2 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -945,6 +945,7 @@ static int rtl8139_init_one(struct pci_dev *pdev,
{
struct net_device *dev = NULL;
struct rtl8139_private *tp;
+ __le16 addr[ETH_ALEN / 2];
int i, addr_len, option;
void __iomem *ioaddr;
static int board_idx = -1;
@@ -994,8 +995,8 @@ static int rtl8139_init_one(struct pci_dev *pdev,
addr_len = read_eeprom (ioaddr, 0, 8) == 0x8129 ? 8 : 6;
for (i = 0; i < 3; i++)
- ((__le16 *) (dev->dev_addr))[i] =
- cpu_to_le16(read_eeprom (ioaddr, i + 7, addr_len));
+ addr[i] = cpu_to_le16(read_eeprom (ioaddr, i + 7, addr_len));
+ eth_hw_addr_set(dev, (u8 *)addr);
/* The Rtl8139-specific entries in the device structure. */
dev->netdev_ops = &rtl8139_netdev_ops;
@@ -2238,7 +2239,7 @@ static int rtl8139_set_mac_address(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
spin_lock_irq(&tp->lock);
diff --git a/drivers/net/ethernet/realtek/atp.c b/drivers/net/ethernet/realtek/atp.c
index b6c849b258a0..6cbcb3164367 100644
--- a/drivers/net/ethernet/realtek/atp.c
+++ b/drivers/net/ethernet/realtek/atp.c
@@ -368,6 +368,7 @@ static int __init atp_probe1(long ioaddr)
static void __init get_node_ID(struct net_device *dev)
{
long ioaddr = dev->base_addr;
+ __be16 addr[ETH_ALEN / 2];
int sa_offset = 0;
int i;
@@ -379,8 +380,9 @@ static void __init get_node_ID(struct net_device *dev)
sa_offset = 15;
for (i = 0; i < 3; i++)
- ((__be16 *)dev->dev_addr)[i] =
+ addr[i] =
cpu_to_be16(eeprom_op(ioaddr, EE_READ(sa_offset + i)));
+ eth_hw_addr_set(dev, (u8 *)addr);
write_reg(ioaddr, CMR2, CMR2_NULL);
}
diff --git a/drivers/net/ethernet/realtek/r8169.h b/drivers/net/ethernet/realtek/r8169.h
index 2728df46ec41..8da4b66b71b5 100644
--- a/drivers/net/ethernet/realtek/r8169.h
+++ b/drivers/net/ethernet/realtek/r8169.h
@@ -37,7 +37,7 @@ enum mac_version {
RTL_GIGA_MAC_VER_24,
RTL_GIGA_MAC_VER_25,
RTL_GIGA_MAC_VER_26,
- RTL_GIGA_MAC_VER_27,
+ /* support for RTL_GIGA_MAC_VER_27 has been removed */
RTL_GIGA_MAC_VER_28,
RTL_GIGA_MAC_VER_29,
RTL_GIGA_MAC_VER_30,
diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
index 46a6ff9a782d..bbe21db20417 100644
--- a/drivers/net/ethernet/realtek/r8169_main.c
+++ b/drivers/net/ethernet/realtek/r8169_main.c
@@ -118,7 +118,6 @@ static const struct {
[RTL_GIGA_MAC_VER_24] = {"RTL8168cp/8111cp" },
[RTL_GIGA_MAC_VER_25] = {"RTL8168d/8111d", FIRMWARE_8168D_1},
[RTL_GIGA_MAC_VER_26] = {"RTL8168d/8111d", FIRMWARE_8168D_2},
- [RTL_GIGA_MAC_VER_27] = {"RTL8168dp/8111dp" },
[RTL_GIGA_MAC_VER_28] = {"RTL8168dp/8111dp" },
[RTL_GIGA_MAC_VER_29] = {"RTL8105e", FIRMWARE_8105E_1},
[RTL_GIGA_MAC_VER_30] = {"RTL8105e", FIRMWARE_8105E_1},
@@ -157,6 +156,7 @@ static const struct pci_device_id rtl8169_pci_tbl[] = {
{ PCI_VDEVICE(REALTEK, 0x8129) },
{ PCI_VDEVICE(REALTEK, 0x8136), RTL_CFG_NO_GBIT },
{ PCI_VDEVICE(REALTEK, 0x8161) },
+ { PCI_VDEVICE(REALTEK, 0x8162) },
{ PCI_VDEVICE(REALTEK, 0x8167) },
{ PCI_VDEVICE(REALTEK, 0x8168) },
{ PCI_VDEVICE(NCUBE, 0x8168) },
@@ -985,33 +985,6 @@ DECLARE_RTL_COND(rtl_ocpar_cond)
return RTL_R32(tp, OCPAR) & OCPAR_FLAG;
}
-static void r8168dp_1_mdio_access(struct rtl8169_private *tp, int reg, u32 data)
-{
- RTL_W32(tp, OCPDR, data | ((reg & OCPDR_REG_MASK) << OCPDR_GPHY_REG_SHIFT));
- RTL_W32(tp, OCPAR, OCPAR_GPHY_WRITE_CMD);
- RTL_W32(tp, EPHY_RXER_NUM, 0);
-
- rtl_loop_wait_low(tp, &rtl_ocpar_cond, 1000, 100);
-}
-
-static void r8168dp_1_mdio_write(struct rtl8169_private *tp, int reg, int value)
-{
- r8168dp_1_mdio_access(tp, reg,
- OCPDR_WRITE_CMD | (value & OCPDR_DATA_MASK));
-}
-
-static int r8168dp_1_mdio_read(struct rtl8169_private *tp, int reg)
-{
- r8168dp_1_mdio_access(tp, reg, OCPDR_READ_CMD);
-
- mdelay(1);
- RTL_W32(tp, OCPAR, OCPAR_GPHY_READ_CMD);
- RTL_W32(tp, EPHY_RXER_NUM, 0);
-
- return rtl_loop_wait_high(tp, &rtl_ocpar_cond, 1000, 100) ?
- RTL_R32(tp, OCPDR) & OCPDR_DATA_MASK : -ETIMEDOUT;
-}
-
#define R8168DP_1_MDIO_ACCESS_BIT 0x00020000
static void r8168dp_2_mdio_start(struct rtl8169_private *tp)
@@ -1053,9 +1026,6 @@ static int r8168dp_2_mdio_read(struct rtl8169_private *tp, int reg)
static void rtl_writephy(struct rtl8169_private *tp, int location, int val)
{
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_27:
- r8168dp_1_mdio_write(tp, location, val);
- break;
case RTL_GIGA_MAC_VER_28:
case RTL_GIGA_MAC_VER_31:
r8168dp_2_mdio_write(tp, location, val);
@@ -1072,8 +1042,6 @@ static void rtl_writephy(struct rtl8169_private *tp, int location, int val)
static int rtl_readphy(struct rtl8169_private *tp, int location)
{
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_27:
- return r8168dp_1_mdio_read(tp, location);
case RTL_GIGA_MAC_VER_28:
case RTL_GIGA_MAC_VER_31:
return r8168dp_2_mdio_read(tp, location);
@@ -1235,7 +1203,6 @@ static bool r8168ep_check_dash(struct rtl8169_private *tp)
static enum rtl_dash_type rtl_check_dash(struct rtl8169_private *tp)
{
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_27:
case RTL_GIGA_MAC_VER_28:
case RTL_GIGA_MAC_VER_31:
return r8168dp_check_dash(tp) ? RTL_DASH_DP : RTL_DASH_NONE;
@@ -2040,8 +2007,7 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii)
/* 8168DP family. */
/* It seems this early RTL8168dp version never made it to
- * the wild. Let's see whether somebody complains, if not
- * we'll remove support for this chip version completely.
+ * the wild. Support has been removed.
* { 0x7cf, 0x288, RTL_GIGA_MAC_VER_27 },
*/
{ 0x7cf, 0x28a, RTL_GIGA_MAC_VER_28 },
@@ -2371,7 +2337,7 @@ static void rtl_jumbo_config(struct rtl8169_private *tp)
r8168c_hw_jumbo_disable(tp);
}
break;
- case RTL_GIGA_MAC_VER_27 ... RTL_GIGA_MAC_VER_28:
+ case RTL_GIGA_MAC_VER_28:
if (jumbo)
r8168dp_hw_jumbo_enable(tp);
else
@@ -3719,7 +3685,6 @@ static void rtl_hw_config(struct rtl8169_private *tp)
[RTL_GIGA_MAC_VER_24] = rtl_hw_start_8168cp_3,
[RTL_GIGA_MAC_VER_25] = rtl_hw_start_8168d,
[RTL_GIGA_MAC_VER_26] = rtl_hw_start_8168d,
- [RTL_GIGA_MAC_VER_27] = rtl_hw_start_8168d,
[RTL_GIGA_MAC_VER_28] = rtl_hw_start_8168d_4,
[RTL_GIGA_MAC_VER_29] = rtl_hw_start_8105e_1,
[RTL_GIGA_MAC_VER_30] = rtl_hw_start_8105e_2,
@@ -3982,7 +3947,6 @@ static void rtl8169_cleanup(struct rtl8169_private *tp, bool going_down)
goto no_reset;
switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_27:
case RTL_GIGA_MAC_VER_28:
case RTL_GIGA_MAC_VER_31:
rtl_loop_wait_low(tp, &rtl_npq_cond, 20, 2000);
@@ -5254,7 +5218,7 @@ static int rtl_get_ether_clk(struct rtl8169_private *tp)
static void rtl_init_mac_address(struct rtl8169_private *tp)
{
struct net_device *dev = tp->dev;
- u8 *mac_addr = dev->dev_addr;
+ u8 mac_addr[ETH_ALEN];
int rc;
rc = eth_platform_get_mac_address(tp_to_dev(tp), mac_addr);
@@ -5272,6 +5236,7 @@ static void rtl_init_mac_address(struct rtl8169_private *tp)
eth_hw_addr_random(dev);
dev_warn(tp_to_dev(tp), "can't read MAC address, setting random one\n");
done:
+ eth_hw_addr_set(dev, mac_addr);
rtl_rar_set(tp, mac_addr);
}
diff --git a/drivers/net/ethernet/realtek/r8169_phy_config.c b/drivers/net/ethernet/realtek/r8169_phy_config.c
index 50f0f621b1aa..f7ad5487879b 100644
--- a/drivers/net/ethernet/realtek/r8169_phy_config.c
+++ b/drivers/net/ethernet/realtek/r8169_phy_config.c
@@ -548,64 +548,6 @@ static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp,
rtl8168d_apply_firmware_cond(tp, phydev, 0xb300);
}
-static void rtl8168d_3_hw_phy_config(struct rtl8169_private *tp,
- struct phy_device *phydev)
-{
- static const struct phy_reg phy_reg_init[] = {
- { 0x1f, 0x0002 },
- { 0x10, 0x0008 },
- { 0x0d, 0x006c },
-
- { 0x1f, 0x0000 },
- { 0x0d, 0xf880 },
-
- { 0x1f, 0x0001 },
- { 0x17, 0x0cc0 },
-
- { 0x1f, 0x0001 },
- { 0x0b, 0xa4d8 },
- { 0x09, 0x281c },
- { 0x07, 0x2883 },
- { 0x0a, 0x6b35 },
- { 0x1d, 0x3da4 },
- { 0x1c, 0xeffd },
- { 0x14, 0x7f52 },
- { 0x18, 0x7fc6 },
- { 0x08, 0x0601 },
- { 0x06, 0x4063 },
- { 0x10, 0xf074 },
- { 0x1f, 0x0003 },
- { 0x13, 0x0789 },
- { 0x12, 0xf4bd },
- { 0x1a, 0x04fd },
- { 0x14, 0x84b0 },
- { 0x1f, 0x0000 },
- { 0x00, 0x9200 },
-
- { 0x1f, 0x0005 },
- { 0x01, 0x0340 },
- { 0x1f, 0x0001 },
- { 0x04, 0x4000 },
- { 0x03, 0x1d21 },
- { 0x02, 0x0c32 },
- { 0x01, 0x0200 },
- { 0x00, 0x5554 },
- { 0x04, 0x4800 },
- { 0x04, 0x4000 },
- { 0x04, 0xf000 },
- { 0x03, 0xdf01 },
- { 0x02, 0xdf20 },
- { 0x01, 0x101a },
- { 0x00, 0xa0ff },
- { 0x04, 0xf800 },
- { 0x04, 0xf000 },
- { 0x1f, 0x0000 },
- };
-
- rtl_writephy_batch(phydev, phy_reg_init);
- r8168d_modify_extpage(phydev, 0x0023, 0x16, 0xffff, 0x0000);
-}
-
static void rtl8168d_4_hw_phy_config(struct rtl8169_private *tp,
struct phy_device *phydev)
{
@@ -1332,7 +1274,6 @@ void r8169_hw_phy_config(struct rtl8169_private *tp, struct phy_device *phydev,
[RTL_GIGA_MAC_VER_24] = rtl8168cp_2_hw_phy_config,
[RTL_GIGA_MAC_VER_25] = rtl8168d_1_hw_phy_config,
[RTL_GIGA_MAC_VER_26] = rtl8168d_2_hw_phy_config,
- [RTL_GIGA_MAC_VER_27] = rtl8168d_3_hw_phy_config,
[RTL_GIGA_MAC_VER_28] = rtl8168d_4_hw_phy_config,
[RTL_GIGA_MAC_VER_29] = rtl8105e_hw_phy_config,
[RTL_GIGA_MAC_VER_30] = rtl8105e_hw_phy_config,
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
index 47c5377e4f42..08062d73df10 100644
--- a/drivers/net/ethernet/renesas/ravb.h
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -81,6 +81,7 @@ enum ravb_reg {
RQC3 = 0x00A0,
RQC4 = 0x00A4,
RPC = 0x00B0,
+ RTC = 0x00B4, /* R-Car Gen3 and RZ/G2L only */
UFCW = 0x00BC,
UFCS = 0x00C0,
UFCV0 = 0x00C4,
@@ -187,19 +188,23 @@ enum ravb_reg {
PIR = 0x0520,
PSR = 0x0528,
PIPR = 0x052c,
+ CXR31 = 0x0530, /* RZ/G2L only */
MPR = 0x0558,
PFTCR = 0x055c,
PFRCR = 0x0560,
GECMR = 0x05b0,
MAHR = 0x05c0,
MALR = 0x05c8,
- TROCR = 0x0700, /* R-Car Gen3 only */
+ TROCR = 0x0700, /* R-Car Gen3 and RZ/G2L only */
+ CXR41 = 0x0708, /* RZ/G2L only */
+ CXR42 = 0x0710, /* RZ/G2L only */
CEFCR = 0x0740,
FRECR = 0x0748,
TSFRCR = 0x0750,
TLFRCR = 0x0758,
RFCR = 0x0760,
MAFCR = 0x0778,
+ CSR0 = 0x0800, /* RZ/G2L only */
};
@@ -810,10 +815,11 @@ enum ECMR_BIT {
ECMR_TXF = 0x00010000, /* Documented for R-Car Gen3 only */
ECMR_RXF = 0x00020000,
ECMR_PFR = 0x00040000,
- ECMR_ZPF = 0x00080000, /* Documented for R-Car Gen3 only */
+ ECMR_ZPF = 0x00080000, /* Documented for R-Car Gen3 and RZ/G2L */
ECMR_RZPF = 0x00100000,
ECMR_DPAD = 0x00200000,
ECMR_RCSC = 0x00800000,
+ ECMR_RCPT = 0x02000000, /* Documented for RZ/G2L only */
ECMR_TRCCM = 0x04000000,
};
@@ -823,6 +829,7 @@ enum ECSR_BIT {
ECSR_MPD = 0x00000002,
ECSR_LCHNG = 0x00000004,
ECSR_PHYI = 0x00000008,
+ ECSR_PFRI = 0x00000010, /* Documented for R-Car Gen3 and RZ/G2L */
};
/* ECSIPR */
@@ -857,9 +864,13 @@ enum MPR_BIT {
/* GECMR */
enum GECMR_BIT {
- GECMR_SPEED = 0x00000001,
- GECMR_SPEED_100 = 0x00000000,
- GECMR_SPEED_1000 = 0x00000001,
+ GECMR_SPEED = 0x00000001,
+ GECMR_SPEED_100 = 0x00000000,
+ GECMR_SPEED_1000 = 0x00000001,
+ GBETH_GECMR_SPEED = 0x00000030,
+ GBETH_GECMR_SPEED_10 = 0x00000000,
+ GBETH_GECMR_SPEED_100 = 0x00000010,
+ GBETH_GECMR_SPEED_1000 = 0x00000020,
};
/* The Ethernet AVB descriptor definitions. */
@@ -949,6 +960,16 @@ enum RAVB_QUEUE {
RAVB_NC, /* Network Control Queue */
};
+enum CXR31_BIT {
+ CXR31_SEL_LINK0 = 0x00000001,
+ CXR31_SEL_LINK1 = 0x00000008,
+};
+
+enum CSR0_BIT {
+ CSR0_TPE = 0x00000010,
+ CSR0_RPE = 0x00000020,
+};
+
#define DBAT_ENTRY_NUM 22
#define RX_QUEUE_OFFSET 4
#define NUM_RX_QUEUE 2
@@ -956,6 +977,9 @@ enum RAVB_QUEUE {
#define RX_BUF_SZ (2048 - ETH_FCS_LEN + sizeof(__sum16))
+#define GBETH_RX_BUFF_MAX 8192
+#define GBETH_RX_DESC_DATA_SIZE 4080
+
struct ravb_tstamp_skb {
struct list_head list;
struct sk_buff *skb;
@@ -985,8 +1009,8 @@ struct ravb_hw_info {
void *(*alloc_rx_desc)(struct net_device *ndev, int q);
bool (*receive)(struct net_device *ndev, int *quota, int q);
void (*set_rate)(struct net_device *ndev);
- int (*set_rx_csum_feature)(struct net_device *ndev, netdev_features_t features);
- void (*dmac_init)(struct net_device *ndev);
+ int (*set_feature)(struct net_device *ndev, netdev_features_t features);
+ int (*dmac_init)(struct net_device *ndev);
void (*emac_init)(struct net_device *ndev);
const char (*gstrings_stats)[ETH_GSTRING_LEN];
size_t gstrings_size;
@@ -994,14 +1018,20 @@ struct ravb_hw_info {
netdev_features_t net_features;
int stats_len;
size_t max_rx_len;
+ u32 tccr_mask;
+ u32 rx_max_buf_size;
unsigned aligned_tx: 1;
/* hardware features */
unsigned internal_delay:1; /* AVB-DMAC has internal delays */
unsigned tx_counters:1; /* E-MAC has TX counters */
+ unsigned carrier_counters:1; /* E-MAC has carrier counters */
unsigned multi_irqs:1; /* AVB-DMAC and E-MAC has multiple irqs */
- unsigned no_ptp_cfg_active:1; /* AVB-DMAC does not support gPTP active in config mode */
- unsigned ptp_cfg_active:1; /* AVB-DMAC has gPTP support active in config mode */
+ unsigned gptp:1; /* AVB-DMAC has gPTP support */
+ unsigned ccc_gac:1; /* AVB-DMAC has gPTP support active in config mode */
+ unsigned nc_queues:1; /* AVB-DMAC has RX and TX NC queues */
+ unsigned magic_pkt:1; /* E-MAC supports magic packet detection */
+ unsigned half_duplex:1; /* E-MAC supports half duplex mode */
};
struct ravb_private {
@@ -1018,9 +1048,11 @@ struct ravb_private {
struct ravb_desc *desc_bat;
dma_addr_t rx_desc_dma[NUM_RX_QUEUE];
dma_addr_t tx_desc_dma[NUM_TX_QUEUE];
+ struct ravb_rx_desc *gbeth_rx_ring;
struct ravb_ex_rx_desc *rx_ring[NUM_RX_QUEUE];
struct ravb_tx_desc *tx_ring[NUM_TX_QUEUE];
void *tx_align[NUM_TX_QUEUE];
+ struct sk_buff *rx_1st_skb;
struct sk_buff **rx_skb[NUM_RX_QUEUE];
struct sk_buff **tx_skb[NUM_TX_QUEUE];
u32 rx_over_errors;
@@ -1056,6 +1088,8 @@ struct ravb_private {
unsigned rgmii_override:1; /* Deprecated rgmii-*id behavior */
unsigned int num_tx_desc; /* TX descriptors per packet */
+ int duplex;
+
const struct ravb_hw_info *info;
struct reset_control *rstc;
};
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 0f85f2d97b18..b4c597f4040c 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -83,7 +83,24 @@ static int ravb_config(struct net_device *ndev)
return error;
}
-static void ravb_set_rate(struct net_device *ndev)
+static void ravb_set_rate_gbeth(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+
+ switch (priv->speed) {
+ case 10: /* 10BASE */
+ ravb_write(ndev, GBETH_GECMR_SPEED_10, GECMR);
+ break;
+ case 100: /* 100BASE */
+ ravb_write(ndev, GBETH_GECMR_SPEED_100, GECMR);
+ break;
+ case 1000: /* 1000BASE */
+ ravb_write(ndev, GBETH_GECMR_SPEED_1000, GECMR);
+ break;
+ }
+}
+
+static void ravb_set_rate_rcar(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
@@ -115,17 +132,19 @@ static void ravb_read_mac_address(struct device_node *np,
{
int ret;
- ret = of_get_mac_address(np, ndev->dev_addr);
+ ret = of_get_ethdev_address(np, ndev);
if (ret) {
u32 mahr = ravb_read(ndev, MAHR);
u32 malr = ravb_read(ndev, MALR);
+ u8 addr[ETH_ALEN];
- ndev->dev_addr[0] = (mahr >> 24) & 0xFF;
- ndev->dev_addr[1] = (mahr >> 16) & 0xFF;
- ndev->dev_addr[2] = (mahr >> 8) & 0xFF;
- ndev->dev_addr[3] = (mahr >> 0) & 0xFF;
- ndev->dev_addr[4] = (malr >> 8) & 0xFF;
- ndev->dev_addr[5] = (malr >> 0) & 0xFF;
+ addr[0] = (mahr >> 24) & 0xFF;
+ addr[1] = (mahr >> 16) & 0xFF;
+ addr[2] = (mahr >> 8) & 0xFF;
+ addr[3] = (mahr >> 0) & 0xFF;
+ addr[4] = (malr >> 8) & 0xFF;
+ addr[5] = (malr >> 0) & 0xFF;
+ eth_hw_addr_set(ndev, addr);
}
}
@@ -217,7 +236,32 @@ static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only)
return free_num;
}
-static void ravb_rx_ring_free(struct net_device *ndev, int q)
+static void ravb_rx_ring_free_gbeth(struct net_device *ndev, int q)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ unsigned int ring_size;
+ unsigned int i;
+
+ if (!priv->gbeth_rx_ring)
+ return;
+
+ for (i = 0; i < priv->num_rx_ring[q]; i++) {
+ struct ravb_rx_desc *desc = &priv->gbeth_rx_ring[i];
+
+ if (!dma_mapping_error(ndev->dev.parent,
+ le32_to_cpu(desc->dptr)))
+ dma_unmap_single(ndev->dev.parent,
+ le32_to_cpu(desc->dptr),
+ GBETH_RX_BUFF_MAX,
+ DMA_FROM_DEVICE);
+ }
+ ring_size = sizeof(struct ravb_rx_desc) * (priv->num_rx_ring[q] + 1);
+ dma_free_coherent(ndev->dev.parent, ring_size, priv->gbeth_rx_ring,
+ priv->rx_desc_dma[q]);
+ priv->gbeth_rx_ring = NULL;
+}
+
+static void ravb_rx_ring_free_rcar(struct net_device *ndev, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
unsigned int ring_size;
@@ -283,7 +327,38 @@ static void ravb_ring_free(struct net_device *ndev, int q)
priv->tx_skb[q] = NULL;
}
-static void ravb_rx_ring_format(struct net_device *ndev, int q)
+static void ravb_rx_ring_format_gbeth(struct net_device *ndev, int q)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ struct ravb_rx_desc *rx_desc;
+ unsigned int rx_ring_size;
+ dma_addr_t dma_addr;
+ unsigned int i;
+
+ rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
+ memset(priv->gbeth_rx_ring, 0, rx_ring_size);
+ /* Build RX ring buffer */
+ for (i = 0; i < priv->num_rx_ring[q]; i++) {
+ /* RX descriptor */
+ rx_desc = &priv->gbeth_rx_ring[i];
+ rx_desc->ds_cc = cpu_to_le16(GBETH_RX_DESC_DATA_SIZE);
+ dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
+ GBETH_RX_BUFF_MAX,
+ DMA_FROM_DEVICE);
+ /* We just set the data size to 0 for a failed mapping which
+ * should prevent DMA from happening...
+ */
+ if (dma_mapping_error(ndev->dev.parent, dma_addr))
+ rx_desc->ds_cc = cpu_to_le16(0);
+ rx_desc->dptr = cpu_to_le32(dma_addr);
+ rx_desc->die_dt = DT_FEMPTY;
+ }
+ rx_desc = &priv->gbeth_rx_ring[i];
+ rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
+ rx_desc->die_dt = DT_LINKFIX; /* type */
+}
+
+static void ravb_rx_ring_format_rcar(struct net_device *ndev, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
struct ravb_ex_rx_desc *rx_desc;
@@ -356,7 +431,20 @@ static void ravb_ring_format(struct net_device *ndev, int q)
desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]);
}
-static void *ravb_alloc_rx_desc(struct net_device *ndev, int q)
+static void *ravb_alloc_rx_desc_gbeth(struct net_device *ndev, int q)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ unsigned int ring_size;
+
+ ring_size = sizeof(struct ravb_rx_desc) * (priv->num_rx_ring[q] + 1);
+
+ priv->gbeth_rx_ring = dma_alloc_coherent(ndev->dev.parent, ring_size,
+ &priv->rx_desc_dma[q],
+ GFP_KERNEL);
+ return priv->gbeth_rx_ring;
+}
+
+static void *ravb_alloc_rx_desc_rcar(struct net_device *ndev, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
unsigned int ring_size;
@@ -426,7 +514,37 @@ error:
return -ENOMEM;
}
-static void ravb_rcar_emac_init(struct net_device *ndev)
+static void ravb_emac_init_gbeth(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+
+ /* Receive frame limit set register */
+ ravb_write(ndev, GBETH_RX_BUFF_MAX + ETH_FCS_LEN, RFLR);
+
+ /* EMAC Mode: PAUSE prohibition; Duplex; TX; RX; CRC Pass Through */
+ ravb_write(ndev, ECMR_ZPF | ((priv->duplex > 0) ? ECMR_DM : 0) |
+ ECMR_TE | ECMR_RE | ECMR_RCPT |
+ ECMR_TXF | ECMR_RXF, ECMR);
+
+ ravb_set_rate_gbeth(ndev);
+
+ /* Set MAC address */
+ ravb_write(ndev,
+ (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
+ (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
+ ravb_write(ndev, (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
+
+ /* E-MAC status register clear */
+ ravb_write(ndev, ECSR_ICD | ECSR_LCHNG | ECSR_PFRI, ECSR);
+ ravb_write(ndev, CSR0_TPE | CSR0_RPE, CSR0);
+
+ /* E-MAC interrupt enable register */
+ ravb_write(ndev, ECSIPR_ICDIP, ECSIPR);
+
+ ravb_modify(ndev, CXR31, CXR31_SEL_LINK0 | CXR31_SEL_LINK1, CXR31_SEL_LINK0);
+}
+
+static void ravb_emac_init_rcar(struct net_device *ndev)
{
/* Receive frame limit set register */
ravb_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, RFLR);
@@ -436,7 +554,7 @@ static void ravb_rcar_emac_init(struct net_device *ndev)
(ndev->features & NETIF_F_RXCSUM ? ECMR_RCSC : 0) |
ECMR_TE | ECMR_RE, ECMR);
- ravb_set_rate(ndev);
+ ravb_set_rate_rcar(ndev);
/* Set MAC address */
ravb_write(ndev,
@@ -461,10 +579,58 @@ static void ravb_emac_init(struct net_device *ndev)
info->emac_init(ndev);
}
-static void ravb_rcar_dmac_init(struct net_device *ndev)
+static int ravb_dmac_init_gbeth(struct net_device *ndev)
+{
+ int error;
+
+ error = ravb_ring_init(ndev, RAVB_BE);
+ if (error)
+ return error;
+
+ /* Descriptor format */
+ ravb_ring_format(ndev, RAVB_BE);
+
+ /* Set DMAC RX */
+ ravb_write(ndev, 0x60000000, RCR);
+
+ /* Set Max Frame Length (RTC) */
+ ravb_write(ndev, 0x7ffc0000 | GBETH_RX_BUFF_MAX, RTC);
+
+ /* Set FIFO size */
+ ravb_write(ndev, 0x00222200, TGC);
+
+ ravb_write(ndev, 0, TCCR);
+
+ /* Frame receive */
+ ravb_write(ndev, RIC0_FRE0, RIC0);
+ /* Disable FIFO full warning */
+ ravb_write(ndev, 0x0, RIC1);
+ /* Receive FIFO full error, descriptor empty */
+ ravb_write(ndev, RIC2_QFE0 | RIC2_RFFE, RIC2);
+
+ ravb_write(ndev, TIC_FTE0, TIC);
+
+ return 0;
+}
+
+static int ravb_dmac_init_rcar(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
+ int error;
+
+ error = ravb_ring_init(ndev, RAVB_BE);
+ if (error)
+ return error;
+ error = ravb_ring_init(ndev, RAVB_NC);
+ if (error) {
+ ravb_ring_free(ndev, RAVB_BE);
+ return error;
+ }
+
+ /* Descriptor format */
+ ravb_ring_format(ndev, RAVB_BE);
+ ravb_ring_format(ndev, RAVB_NC);
/* Set AVB RX */
ravb_write(ndev,
@@ -491,6 +657,8 @@ static void ravb_rcar_dmac_init(struct net_device *ndev)
ravb_write(ndev, RIC2_QFE0 | RIC2_QFE1 | RIC2_RFFE, RIC2);
/* Frame transmitted, timestamp FIFO updated */
ravb_write(ndev, TIC_FTE0 | TIC_FTE1 | TIC_TFUE, TIC);
+
+ return 0;
}
/* Device init function for Ethernet AVB */
@@ -505,20 +673,9 @@ static int ravb_dmac_init(struct net_device *ndev)
if (error)
return error;
- error = ravb_ring_init(ndev, RAVB_BE);
+ error = info->dmac_init(ndev);
if (error)
return error;
- error = ravb_ring_init(ndev, RAVB_NC);
- if (error) {
- ravb_ring_free(ndev, RAVB_BE);
- return error;
- }
-
- /* Descriptor format */
- ravb_ring_format(ndev, RAVB_BE);
- ravb_ring_format(ndev, RAVB_NC);
-
- info->dmac_init(ndev);
/* Setting the control will start the AVB-DMAC process. */
ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_OPERATION);
@@ -579,7 +736,151 @@ static void ravb_rx_csum(struct sk_buff *skb)
skb_trim(skb, skb->len - sizeof(__sum16));
}
-static bool ravb_rcar_rx(struct net_device *ndev, int *quota, int q)
+static struct sk_buff *ravb_get_skb_gbeth(struct net_device *ndev, int entry,
+ struct ravb_rx_desc *desc)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ struct sk_buff *skb;
+
+ skb = priv->rx_skb[RAVB_BE][entry];
+ priv->rx_skb[RAVB_BE][entry] = NULL;
+ dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
+ ALIGN(GBETH_RX_BUFF_MAX, 16), DMA_FROM_DEVICE);
+
+ return skb;
+}
+
+/* Packet receive function for Gigabit Ethernet */
+static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
+ struct net_device_stats *stats;
+ struct ravb_rx_desc *desc;
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+ u8 desc_status;
+ int boguscnt;
+ u16 pkt_len;
+ u8 die_dt;
+ int entry;
+ int limit;
+
+ entry = priv->cur_rx[q] % priv->num_rx_ring[q];
+ boguscnt = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q];
+ stats = &priv->stats[q];
+
+ boguscnt = min(boguscnt, *quota);
+ limit = boguscnt;
+ desc = &priv->gbeth_rx_ring[entry];
+ while (desc->die_dt != DT_FEMPTY) {
+ /* Descriptor type must be checked before all other reads */
+ dma_rmb();
+ desc_status = desc->msc;
+ pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS;
+
+ if (--boguscnt < 0)
+ break;
+
+ /* We use 0-byte descriptors to mark the DMA mapping errors */
+ if (!pkt_len)
+ continue;
+
+ if (desc_status & MSC_MC)
+ stats->multicast++;
+
+ if (desc_status & (MSC_CRC | MSC_RFE | MSC_RTSF | MSC_RTLF | MSC_CEEF)) {
+ stats->rx_errors++;
+ if (desc_status & MSC_CRC)
+ stats->rx_crc_errors++;
+ if (desc_status & MSC_RFE)
+ stats->rx_frame_errors++;
+ if (desc_status & (MSC_RTLF | MSC_RTSF))
+ stats->rx_length_errors++;
+ if (desc_status & MSC_CEEF)
+ stats->rx_missed_errors++;
+ } else {
+ die_dt = desc->die_dt & 0xF0;
+ switch (die_dt) {
+ case DT_FSINGLE:
+ skb = ravb_get_skb_gbeth(ndev, entry, desc);
+ skb_put(skb, pkt_len);
+ skb->protocol = eth_type_trans(skb, ndev);
+ napi_gro_receive(&priv->napi[q], skb);
+ stats->rx_packets++;
+ stats->rx_bytes += pkt_len;
+ break;
+ case DT_FSTART:
+ priv->rx_1st_skb = ravb_get_skb_gbeth(ndev, entry, desc);
+ skb_put(priv->rx_1st_skb, pkt_len);
+ break;
+ case DT_FMID:
+ skb = ravb_get_skb_gbeth(ndev, entry, desc);
+ skb_copy_to_linear_data_offset(priv->rx_1st_skb,
+ priv->rx_1st_skb->len,
+ skb->data,
+ pkt_len);
+ skb_put(priv->rx_1st_skb, pkt_len);
+ dev_kfree_skb(skb);
+ break;
+ case DT_FEND:
+ skb = ravb_get_skb_gbeth(ndev, entry, desc);
+ skb_copy_to_linear_data_offset(priv->rx_1st_skb,
+ priv->rx_1st_skb->len,
+ skb->data,
+ pkt_len);
+ skb_put(priv->rx_1st_skb, pkt_len);
+ dev_kfree_skb(skb);
+ priv->rx_1st_skb->protocol =
+ eth_type_trans(priv->rx_1st_skb, ndev);
+ napi_gro_receive(&priv->napi[q],
+ priv->rx_1st_skb);
+ stats->rx_packets++;
+ stats->rx_bytes += priv->rx_1st_skb->len;
+ break;
+ }
+ }
+
+ entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q];
+ desc = &priv->gbeth_rx_ring[entry];
+ }
+
+ /* Refill the RX ring buffers. */
+ for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
+ entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
+ desc = &priv->gbeth_rx_ring[entry];
+ desc->ds_cc = cpu_to_le16(GBETH_RX_DESC_DATA_SIZE);
+
+ if (!priv->rx_skb[q][entry]) {
+ skb = netdev_alloc_skb(ndev, info->max_rx_len);
+ if (!skb)
+ break;
+ ravb_set_buffer_align(skb);
+ dma_addr = dma_map_single(ndev->dev.parent,
+ skb->data,
+ GBETH_RX_BUFF_MAX,
+ DMA_FROM_DEVICE);
+ skb_checksum_none_assert(skb);
+ /* We just set the data size to 0 for a failed mapping
+ * which should prevent DMA from happening...
+ */
+ if (dma_mapping_error(ndev->dev.parent, dma_addr))
+ desc->ds_cc = cpu_to_le16(0);
+ desc->dptr = cpu_to_le32(dma_addr);
+ priv->rx_skb[q][entry] = skb;
+ }
+ /* Descriptor type must be set after all the above writes */
+ dma_wmb();
+ desc->die_dt = DT_FEMPTY;
+ }
+
+ *quota -= limit - (++boguscnt);
+
+ return boguscnt <= 0;
+}
+
+/* Packet receive function for Ethernet AVB */
+static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
@@ -717,11 +1018,13 @@ static void ravb_rcv_snd_enable(struct net_device *ndev)
/* function for waiting dma process finished */
static int ravb_stop_dma(struct net_device *ndev)
{
+ struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
int error;
/* Wait for stopping the hardware TX process */
- error = ravb_wait(ndev, TCCR,
- TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3, 0);
+ error = ravb_wait(ndev, TCCR, info->tccr_mask, 0);
+
if (error)
return error;
@@ -859,6 +1162,7 @@ static irqreturn_t ravb_interrupt(int irq, void *dev_id)
{
struct net_device *ndev = dev_id;
struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
irqreturn_t result = IRQ_NONE;
u32 iss;
@@ -875,8 +1179,13 @@ static irqreturn_t ravb_interrupt(int irq, void *dev_id)
result = IRQ_HANDLED;
/* Network control and best effort queue RX/TX */
- for (q = RAVB_NC; q >= RAVB_BE; q--) {
- if (ravb_queue_interrupt(ndev, q))
+ if (info->nc_queues) {
+ for (q = RAVB_NC; q >= RAVB_BE; q--) {
+ if (ravb_queue_interrupt(ndev, q))
+ result = IRQ_HANDLED;
+ }
+ } else {
+ if (ravb_queue_interrupt(ndev, RAVB_BE))
result = IRQ_HANDLED;
}
}
@@ -966,16 +1275,25 @@ static int ravb_poll(struct napi_struct *napi, int budget)
struct net_device *ndev = napi->dev;
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
+ bool gptp = info->gptp || info->ccc_gac;
+ struct ravb_rx_desc *desc;
unsigned long flags;
int q = napi - priv->napi;
int mask = BIT(q);
int quota = budget;
+ unsigned int entry;
+ if (!gptp) {
+ entry = priv->cur_rx[q] % priv->num_rx_ring[q];
+ desc = &priv->gbeth_rx_ring[entry];
+ }
/* Processing RX Descriptor Ring */
/* Clear RX interrupt */
ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0);
- if (ravb_rx(ndev, &quota, q))
- goto out;
+ if (gptp || desc->die_dt != DT_FEMPTY) {
+ if (ravb_rx(ndev, &quota, q))
+ goto out;
+ }
/* Processing TX Descriptor Ring */
spin_lock_irqsave(&priv->lock, flags);
@@ -1000,7 +1318,8 @@ static int ravb_poll(struct napi_struct *napi, int budget)
/* Receive error message handling */
priv->rx_over_errors = priv->stats[RAVB_BE].rx_over_errors;
- priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors;
+ if (info->nc_queues)
+ priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors;
if (priv->rx_over_errors != ndev->stats.rx_over_errors)
ndev->stats.rx_over_errors = priv->rx_over_errors;
if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors)
@@ -1009,6 +1328,13 @@ out:
return budget - quota;
}
+static void ravb_set_duplex_gbeth(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+
+ ravb_modify(ndev, ECMR, ECMR_DM, priv->duplex > 0 ? ECMR_DM : 0);
+}
+
/* PHY state control function */
static void ravb_adjust_link(struct net_device *ndev)
{
@@ -1025,6 +1351,12 @@ static void ravb_adjust_link(struct net_device *ndev)
ravb_rcv_snd_disable(ndev);
if (phydev->link) {
+ if (info->half_duplex && phydev->duplex != priv->duplex) {
+ new_state = true;
+ priv->duplex = phydev->duplex;
+ ravb_set_duplex_gbeth(ndev);
+ }
+
if (phydev->speed != priv->speed) {
new_state = true;
priv->speed = phydev->speed;
@@ -1039,6 +1371,8 @@ static void ravb_adjust_link(struct net_device *ndev)
new_state = true;
priv->link = 0;
priv->speed = 0;
+ if (info->half_duplex)
+ priv->duplex = -1;
}
/* Enable TX and RX right over here, if E-MAC change is ignored */
@@ -1061,6 +1395,7 @@ static int ravb_phy_init(struct net_device *ndev)
{
struct device_node *np = ndev->dev.parent->of_node;
struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
struct phy_device *phydev;
struct device_node *pn;
phy_interface_t iface;
@@ -1068,6 +1403,7 @@ static int ravb_phy_init(struct net_device *ndev)
priv->link = 0;
priv->speed = 0;
+ priv->duplex = -1;
/* Try connecting to PHY */
pn = of_parse_phandle(np, "phy-handle", 0);
@@ -1106,15 +1442,17 @@ static int ravb_phy_init(struct net_device *ndev)
netdev_info(ndev, "limited PHY to 100Mbit/s\n");
}
- /* 10BASE, Pause and Asym Pause is not supported */
- phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
- phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
- phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Pause_BIT);
- phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT);
+ if (!info->half_duplex) {
+ /* 10BASE, Pause and Asym Pause is not supported */
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Pause_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT);
- /* Half Duplex is not supported */
- phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
- phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
+ /* Half Duplex is not supported */
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
+ phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
+ }
phy_attached_info(phydev);
@@ -1157,6 +1495,24 @@ static void ravb_set_msglevel(struct net_device *ndev, u32 value)
priv->msg_enable = value;
}
+static const char ravb_gstrings_stats_gbeth[][ETH_GSTRING_LEN] = {
+ "rx_queue_0_current",
+ "tx_queue_0_current",
+ "rx_queue_0_dirty",
+ "tx_queue_0_dirty",
+ "rx_queue_0_packets",
+ "tx_queue_0_packets",
+ "rx_queue_0_bytes",
+ "tx_queue_0_bytes",
+ "rx_queue_0_mcast_packets",
+ "rx_queue_0_errors",
+ "rx_queue_0_crc_errors",
+ "rx_queue_0_frame_errors",
+ "rx_queue_0_length_errors",
+ "rx_queue_0_csum_offload_errors",
+ "rx_queue_0_over_errors",
+};
+
static const char ravb_gstrings_stats[][ETH_GSTRING_LEN] = {
"rx_queue_0_current",
"tx_queue_0_current",
@@ -1208,11 +1564,14 @@ static void ravb_get_ethtool_stats(struct net_device *ndev,
struct ethtool_stats *estats, u64 *data)
{
struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
+ int num_rx_q;
int i = 0;
int q;
+ num_rx_q = info->nc_queues ? NUM_RX_QUEUE : 1;
/* Device-specific stats */
- for (q = RAVB_BE; q < NUM_RX_QUEUE; q++) {
+ for (q = RAVB_BE; q < num_rx_q; q++) {
struct net_device_stats *stats = &priv->stats[q];
data[i++] = priv->cur_rx[q];
@@ -1274,7 +1633,7 @@ static int ravb_set_ringparam(struct net_device *ndev,
if (netif_running(ndev)) {
netif_device_detach(ndev);
/* Stop PTP Clock driver */
- if (info->no_ptp_cfg_active)
+ if (info->gptp)
ravb_ptp_stop(ndev);
/* Wait for DMA stopping */
error = ravb_stop_dma(ndev);
@@ -1287,7 +1646,8 @@ static int ravb_set_ringparam(struct net_device *ndev,
/* Free all the skb's in the RX queue and the DMA buffers. */
ravb_ring_free(ndev, RAVB_BE);
- ravb_ring_free(ndev, RAVB_NC);
+ if (info->nc_queues)
+ ravb_ring_free(ndev, RAVB_NC);
}
/* Set new parameters */
@@ -1306,7 +1666,7 @@ static int ravb_set_ringparam(struct net_device *ndev,
ravb_emac_init(ndev);
/* Initialise PTP Clock driver */
- if (info->no_ptp_cfg_active)
+ if (info->gptp)
ravb_ptp_init(ndev, priv->pdev);
netif_device_attach(ndev);
@@ -1319,6 +1679,7 @@ static int ravb_get_ts_info(struct net_device *ndev,
struct ethtool_ts_info *info)
{
struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *hw_info = priv->info;
info->so_timestamping =
SOF_TIMESTAMPING_TX_SOFTWARE |
@@ -1332,7 +1693,8 @@ static int ravb_get_ts_info(struct net_device *ndev,
(1 << HWTSTAMP_FILTER_NONE) |
(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
(1 << HWTSTAMP_FILTER_ALL);
- info->phc_index = ptp_clock_index(priv->ptp.clock);
+ if (hw_info->gptp || hw_info->ccc_gac)
+ info->phc_index = ptp_clock_index(priv->ptp.clock);
return 0;
}
@@ -1348,8 +1710,9 @@ static void ravb_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
static int ravb_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
{
struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
- if (wol->wolopts & ~WAKE_MAGIC)
+ if (!info->magic_pkt || (wol->wolopts & ~WAKE_MAGIC))
return -EOPNOTSUPP;
priv->wol_enabled = !!(wol->wolopts & WAKE_MAGIC);
@@ -1403,7 +1766,8 @@ static int ravb_open(struct net_device *ndev)
int error;
napi_enable(&priv->napi[RAVB_BE]);
- napi_enable(&priv->napi[RAVB_NC]);
+ if (info->nc_queues)
+ napi_enable(&priv->napi[RAVB_NC]);
if (!info->multi_irqs) {
error = request_irq(ndev->irq, ravb_interrupt, IRQF_SHARED,
@@ -1446,7 +1810,7 @@ static int ravb_open(struct net_device *ndev)
ravb_emac_init(ndev);
/* Initialise PTP Clock driver */
- if (info->no_ptp_cfg_active)
+ if (info->gptp)
ravb_ptp_init(ndev, priv->pdev);
netif_tx_start_all_queues(ndev);
@@ -1460,7 +1824,7 @@ static int ravb_open(struct net_device *ndev)
out_ptp_stop:
/* Stop PTP Clock driver */
- if (info->no_ptp_cfg_active)
+ if (info->gptp)
ravb_ptp_stop(ndev);
out_free_irq_nc_tx:
if (!info->multi_irqs)
@@ -1477,7 +1841,8 @@ out_free_irq_emac:
out_free_irq:
free_irq(ndev->irq, ndev);
out_napi_off:
- napi_disable(&priv->napi[RAVB_NC]);
+ if (info->nc_queues)
+ napi_disable(&priv->napi[RAVB_NC]);
napi_disable(&priv->napi[RAVB_BE]);
return error;
}
@@ -1508,7 +1873,7 @@ static void ravb_tx_timeout_work(struct work_struct *work)
netif_tx_stop_all_queues(ndev);
/* Stop PTP Clock driver */
- if (info->no_ptp_cfg_active)
+ if (info->gptp)
ravb_ptp_stop(ndev);
/* Wait for DMA stopping */
@@ -1526,7 +1891,8 @@ static void ravb_tx_timeout_work(struct work_struct *work)
}
ravb_ring_free(ndev, RAVB_BE);
- ravb_ring_free(ndev, RAVB_NC);
+ if (info->nc_queues)
+ ravb_ring_free(ndev, RAVB_NC);
/* Device init */
error = ravb_dmac_init(ndev);
@@ -1543,7 +1909,7 @@ static void ravb_tx_timeout_work(struct work_struct *work)
out:
/* Initialise PTP Clock driver */
- if (info->no_ptp_cfg_active)
+ if (info->gptp)
ravb_ptp_init(ndev, priv->pdev);
netif_tx_start_all_queues(ndev);
@@ -1553,6 +1919,7 @@ out:
static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
unsigned int num_tx_desc = priv->num_tx_desc;
u16 q = skb_get_queue_mapping(skb);
struct ravb_tstamp_skb *ts_skb;
@@ -1629,28 +1996,30 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
desc->dptr = cpu_to_le32(dma_addr);
/* TX timestamp required */
- if (q == RAVB_NC) {
- ts_skb = kmalloc(sizeof(*ts_skb), GFP_ATOMIC);
- if (!ts_skb) {
- if (num_tx_desc > 1) {
- desc--;
- dma_unmap_single(ndev->dev.parent, dma_addr,
- len, DMA_TO_DEVICE);
+ if (info->gptp || info->ccc_gac) {
+ if (q == RAVB_NC) {
+ ts_skb = kmalloc(sizeof(*ts_skb), GFP_ATOMIC);
+ if (!ts_skb) {
+ if (num_tx_desc > 1) {
+ desc--;
+ dma_unmap_single(ndev->dev.parent, dma_addr,
+ len, DMA_TO_DEVICE);
+ }
+ goto unmap;
}
- goto unmap;
+ ts_skb->skb = skb_get(skb);
+ ts_skb->tag = priv->ts_skb_tag++;
+ priv->ts_skb_tag &= 0x3ff;
+ list_add_tail(&ts_skb->list, &priv->ts_skb_list);
+
+ /* TAG and timestamp required flag */
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ desc->tagh_tsr = (ts_skb->tag >> 4) | TX_TSR;
+ desc->ds_tagl |= cpu_to_le16(ts_skb->tag << 12);
}
- ts_skb->skb = skb_get(skb);
- ts_skb->tag = priv->ts_skb_tag++;
- priv->ts_skb_tag &= 0x3ff;
- list_add_tail(&ts_skb->list, &priv->ts_skb_list);
- /* TAG and timestamp required flag */
- skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
- desc->tagh_tsr = (ts_skb->tag >> 4) | TX_TSR;
- desc->ds_tagl |= cpu_to_le16(ts_skb->tag << 12);
+ skb_tx_timestamp(skb);
}
-
- skb_tx_timestamp(skb);
/* Descriptor type must be set after all the above writes */
dma_wmb();
if (num_tx_desc > 1) {
@@ -1698,28 +2067,45 @@ static struct net_device_stats *ravb_get_stats(struct net_device *ndev)
nstats = &ndev->stats;
stats0 = &priv->stats[RAVB_BE];
- stats1 = &priv->stats[RAVB_NC];
if (info->tx_counters) {
nstats->tx_dropped += ravb_read(ndev, TROCR);
ravb_write(ndev, 0, TROCR); /* (write clear) */
}
- nstats->rx_packets = stats0->rx_packets + stats1->rx_packets;
- nstats->tx_packets = stats0->tx_packets + stats1->tx_packets;
- nstats->rx_bytes = stats0->rx_bytes + stats1->rx_bytes;
- nstats->tx_bytes = stats0->tx_bytes + stats1->tx_bytes;
- nstats->multicast = stats0->multicast + stats1->multicast;
- nstats->rx_errors = stats0->rx_errors + stats1->rx_errors;
- nstats->rx_crc_errors = stats0->rx_crc_errors + stats1->rx_crc_errors;
- nstats->rx_frame_errors =
- stats0->rx_frame_errors + stats1->rx_frame_errors;
- nstats->rx_length_errors =
- stats0->rx_length_errors + stats1->rx_length_errors;
- nstats->rx_missed_errors =
- stats0->rx_missed_errors + stats1->rx_missed_errors;
- nstats->rx_over_errors =
- stats0->rx_over_errors + stats1->rx_over_errors;
+ if (info->carrier_counters) {
+ nstats->collisions += ravb_read(ndev, CXR41);
+ ravb_write(ndev, 0, CXR41); /* (write clear) */
+ nstats->tx_carrier_errors += ravb_read(ndev, CXR42);
+ ravb_write(ndev, 0, CXR42); /* (write clear) */
+ }
+
+ nstats->rx_packets = stats0->rx_packets;
+ nstats->tx_packets = stats0->tx_packets;
+ nstats->rx_bytes = stats0->rx_bytes;
+ nstats->tx_bytes = stats0->tx_bytes;
+ nstats->multicast = stats0->multicast;
+ nstats->rx_errors = stats0->rx_errors;
+ nstats->rx_crc_errors = stats0->rx_crc_errors;
+ nstats->rx_frame_errors = stats0->rx_frame_errors;
+ nstats->rx_length_errors = stats0->rx_length_errors;
+ nstats->rx_missed_errors = stats0->rx_missed_errors;
+ nstats->rx_over_errors = stats0->rx_over_errors;
+ if (info->nc_queues) {
+ stats1 = &priv->stats[RAVB_NC];
+
+ nstats->rx_packets += stats1->rx_packets;
+ nstats->tx_packets += stats1->tx_packets;
+ nstats->rx_bytes += stats1->rx_bytes;
+ nstats->tx_bytes += stats1->tx_bytes;
+ nstats->multicast += stats1->multicast;
+ nstats->rx_errors += stats1->rx_errors;
+ nstats->rx_crc_errors += stats1->rx_crc_errors;
+ nstats->rx_frame_errors += stats1->rx_frame_errors;
+ nstats->rx_length_errors += stats1->rx_length_errors;
+ nstats->rx_missed_errors += stats1->rx_missed_errors;
+ nstats->rx_over_errors += stats1->rx_over_errors;
+ }
return nstats;
}
@@ -1752,7 +2138,7 @@ static int ravb_close(struct net_device *ndev)
ravb_write(ndev, 0, TIC);
/* Stop PTP Clock driver */
- if (info->no_ptp_cfg_active)
+ if (info->gptp)
ravb_ptp_stop(ndev);
/* Set the config mode to stop the AVB-DMAC's processes */
@@ -1761,10 +2147,12 @@ static int ravb_close(struct net_device *ndev)
"device will be stopped after h/w processes are done.\n");
/* Clear the timestamp list */
- list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, list) {
- list_del(&ts_skb->list);
- kfree_skb(ts_skb->skb);
- kfree(ts_skb);
+ if (info->gptp || info->ccc_gac) {
+ list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, list) {
+ list_del(&ts_skb->list);
+ kfree_skb(ts_skb->skb);
+ kfree(ts_skb);
+ }
}
/* PHY disconnect */
@@ -1784,12 +2172,14 @@ static int ravb_close(struct net_device *ndev)
}
free_irq(ndev->irq, ndev);
- napi_disable(&priv->napi[RAVB_NC]);
+ if (info->nc_queues)
+ napi_disable(&priv->napi[RAVB_NC]);
napi_disable(&priv->napi[RAVB_BE]);
/* Free all the skb's in the RX queue and the DMA buffers. */
ravb_ring_free(ndev, RAVB_BE);
- ravb_ring_free(ndev, RAVB_NC);
+ if (info->nc_queues)
+ ravb_ring_free(ndev, RAVB_NC);
return 0;
}
@@ -1918,8 +2308,15 @@ static void ravb_set_rx_csum(struct net_device *ndev, bool enable)
spin_unlock_irqrestore(&priv->lock, flags);
}
-static int ravb_set_features_rx_csum(struct net_device *ndev,
- netdev_features_t features)
+static int ravb_set_features_gbeth(struct net_device *ndev,
+ netdev_features_t features)
+{
+ /* Place holder */
+ return 0;
+}
+
+static int ravb_set_features_rcar(struct net_device *ndev,
+ netdev_features_t features)
{
netdev_features_t changed = ndev->features ^ features;
@@ -1937,7 +2334,7 @@ static int ravb_set_features(struct net_device *ndev,
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
- return info->set_rx_csum_feature(ndev, features);
+ return info->set_feature(ndev, features);
}
static const struct net_device_ops ravb_netdev_ops = {
@@ -2001,43 +2398,72 @@ static int ravb_mdio_release(struct ravb_private *priv)
}
static const struct ravb_hw_info ravb_gen3_hw_info = {
- .rx_ring_free = ravb_rx_ring_free,
- .rx_ring_format = ravb_rx_ring_format,
- .alloc_rx_desc = ravb_alloc_rx_desc,
- .receive = ravb_rcar_rx,
- .set_rate = ravb_set_rate,
- .set_rx_csum_feature = ravb_set_features_rx_csum,
- .dmac_init = ravb_rcar_dmac_init,
- .emac_init = ravb_rcar_emac_init,
+ .rx_ring_free = ravb_rx_ring_free_rcar,
+ .rx_ring_format = ravb_rx_ring_format_rcar,
+ .alloc_rx_desc = ravb_alloc_rx_desc_rcar,
+ .receive = ravb_rx_rcar,
+ .set_rate = ravb_set_rate_rcar,
+ .set_feature = ravb_set_features_rcar,
+ .dmac_init = ravb_dmac_init_rcar,
+ .emac_init = ravb_emac_init_rcar,
.gstrings_stats = ravb_gstrings_stats,
.gstrings_size = sizeof(ravb_gstrings_stats),
.net_hw_features = NETIF_F_RXCSUM,
.net_features = NETIF_F_RXCSUM,
.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
.max_rx_len = RX_BUF_SZ + RAVB_ALIGN - 1,
+ .tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
+ .rx_max_buf_size = SZ_2K,
.internal_delay = 1,
.tx_counters = 1,
.multi_irqs = 1,
- .ptp_cfg_active = 1,
+ .ccc_gac = 1,
+ .nc_queues = 1,
+ .magic_pkt = 1,
};
static const struct ravb_hw_info ravb_gen2_hw_info = {
- .rx_ring_free = ravb_rx_ring_free,
- .rx_ring_format = ravb_rx_ring_format,
- .alloc_rx_desc = ravb_alloc_rx_desc,
- .receive = ravb_rcar_rx,
- .set_rate = ravb_set_rate,
- .set_rx_csum_feature = ravb_set_features_rx_csum,
- .dmac_init = ravb_rcar_dmac_init,
- .emac_init = ravb_rcar_emac_init,
+ .rx_ring_free = ravb_rx_ring_free_rcar,
+ .rx_ring_format = ravb_rx_ring_format_rcar,
+ .alloc_rx_desc = ravb_alloc_rx_desc_rcar,
+ .receive = ravb_rx_rcar,
+ .set_rate = ravb_set_rate_rcar,
+ .set_feature = ravb_set_features_rcar,
+ .dmac_init = ravb_dmac_init_rcar,
+ .emac_init = ravb_emac_init_rcar,
.gstrings_stats = ravb_gstrings_stats,
.gstrings_size = sizeof(ravb_gstrings_stats),
.net_hw_features = NETIF_F_RXCSUM,
.net_features = NETIF_F_RXCSUM,
.stats_len = ARRAY_SIZE(ravb_gstrings_stats),
.max_rx_len = RX_BUF_SZ + RAVB_ALIGN - 1,
+ .tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3,
+ .rx_max_buf_size = SZ_2K,
.aligned_tx = 1,
- .no_ptp_cfg_active = 1,
+ .gptp = 1,
+ .nc_queues = 1,
+ .magic_pkt = 1,
+};
+
+static const struct ravb_hw_info gbeth_hw_info = {
+ .rx_ring_free = ravb_rx_ring_free_gbeth,
+ .rx_ring_format = ravb_rx_ring_format_gbeth,
+ .alloc_rx_desc = ravb_alloc_rx_desc_gbeth,
+ .receive = ravb_rx_gbeth,
+ .set_rate = ravb_set_rate_gbeth,
+ .set_feature = ravb_set_features_gbeth,
+ .dmac_init = ravb_dmac_init_gbeth,
+ .emac_init = ravb_emac_init_gbeth,
+ .gstrings_stats = ravb_gstrings_stats_gbeth,
+ .gstrings_size = sizeof(ravb_gstrings_stats_gbeth),
+ .stats_len = ARRAY_SIZE(ravb_gstrings_stats_gbeth),
+ .max_rx_len = ALIGN(GBETH_RX_BUFF_MAX, RAVB_ALIGN),
+ .tccr_mask = TCCR_TSRQ0,
+ .rx_max_buf_size = SZ_8K,
+ .aligned_tx = 1,
+ .tx_counters = 1,
+ .carrier_counters = 1,
+ .half_duplex = 1,
};
static const struct of_device_id ravb_match_table[] = {
@@ -2046,6 +2472,7 @@ static const struct of_device_id ravb_match_table[] = {
{ .compatible = "renesas,etheravb-rcar-gen2", .data = &ravb_gen2_hw_info },
{ .compatible = "renesas,etheravb-r8a7795", .data = &ravb_gen3_hw_info },
{ .compatible = "renesas,etheravb-rcar-gen3", .data = &ravb_gen3_hw_info },
+ { .compatible = "renesas,rzg2l-gbeth", .data = &gbeth_hw_info },
{ }
};
MODULE_DEVICE_TABLE(of, ravb_match_table);
@@ -2080,13 +2507,15 @@ static void ravb_set_config_mode(struct net_device *ndev)
struct ravb_private *priv = netdev_priv(ndev);
const struct ravb_hw_info *info = priv->info;
- if (info->no_ptp_cfg_active) {
+ if (info->gptp) {
ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG);
/* Set CSEL value */
ravb_modify(ndev, CCC, CCC_CSEL, CCC_CSEL_HPB);
- } else {
+ } else if (info->ccc_gac) {
ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG |
CCC_GAC | CCC_CSEL_HPB);
+ } else {
+ ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG);
}
}
@@ -2192,8 +2621,11 @@ static int ravb_probe(struct platform_device *pdev)
priv->pdev = pdev;
priv->num_tx_ring[RAVB_BE] = BE_TX_RING_SIZE;
priv->num_rx_ring[RAVB_BE] = BE_RX_RING_SIZE;
- priv->num_tx_ring[RAVB_NC] = NC_TX_RING_SIZE;
- priv->num_rx_ring[RAVB_NC] = NC_RX_RING_SIZE;
+ if (info->nc_queues) {
+ priv->num_tx_ring[RAVB_NC] = NC_TX_RING_SIZE;
+ priv->num_rx_ring[RAVB_NC] = NC_RX_RING_SIZE;
+ }
+
priv->addr = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(priv->addr)) {
error = PTR_ERR(priv->addr);
@@ -2252,7 +2684,7 @@ static int ravb_probe(struct platform_device *pdev)
}
clk_prepare_enable(priv->refclk);
- ndev->max_mtu = 2048 - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
+ ndev->max_mtu = info->rx_max_buf_size - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
ndev->min_mtu = ETH_MIN_MTU;
/* FIXME: R-Car Gen2 has 4byte alignment restriction for tx buffer
@@ -2269,13 +2701,15 @@ static int ravb_probe(struct platform_device *pdev)
/* Set AVB config mode */
ravb_set_config_mode(ndev);
- /* Set GTI value */
- error = ravb_set_gti(ndev);
- if (error)
- goto out_disable_refclk;
+ if (info->gptp || info->ccc_gac) {
+ /* Set GTI value */
+ error = ravb_set_gti(ndev);
+ if (error)
+ goto out_disable_refclk;
- /* Request GTI loading */
- ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
+ /* Request GTI loading */
+ ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
+ }
if (info->internal_delay) {
ravb_parse_delay_mode(np, ndev);
@@ -2301,7 +2735,7 @@ static int ravb_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&priv->ts_skb_list);
/* Initialise PTP Clock driver */
- if (info->ptp_cfg_active)
+ if (info->ccc_gac)
ravb_ptp_init(ndev, pdev);
/* Debug message level */
@@ -2323,7 +2757,8 @@ static int ravb_probe(struct platform_device *pdev)
}
netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll, 64);
- netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll, 64);
+ if (info->nc_queues)
+ netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll, 64);
/* Network device register */
error = register_netdev(ndev);
@@ -2341,7 +2776,9 @@ static int ravb_probe(struct platform_device *pdev)
return 0;
out_napi_del:
- netif_napi_del(&priv->napi[RAVB_NC]);
+ if (info->nc_queues)
+ netif_napi_del(&priv->napi[RAVB_NC]);
+
netif_napi_del(&priv->napi[RAVB_BE]);
ravb_mdio_release(priv);
out_dma_free:
@@ -2349,7 +2786,7 @@ out_dma_free:
priv->desc_bat_dma);
/* Stop PTP Clock driver */
- if (info->ptp_cfg_active)
+ if (info->ccc_gac)
ravb_ptp_stop(ndev);
out_disable_refclk:
clk_disable_unprepare(priv->refclk);
@@ -2369,7 +2806,7 @@ static int ravb_remove(struct platform_device *pdev)
const struct ravb_hw_info *info = priv->info;
/* Stop PTP Clock driver */
- if (info->ptp_cfg_active)
+ if (info->ccc_gac)
ravb_ptp_stop(ndev);
clk_disable_unprepare(priv->refclk);
@@ -2380,7 +2817,8 @@ static int ravb_remove(struct platform_device *pdev)
ravb_write(ndev, CCC_OPC_RESET, CCC);
pm_runtime_put_sync(&pdev->dev);
unregister_netdev(ndev);
- netif_napi_del(&priv->napi[RAVB_NC]);
+ if (info->nc_queues)
+ netif_napi_del(&priv->napi[RAVB_NC]);
netif_napi_del(&priv->napi[RAVB_BE]);
ravb_mdio_release(priv);
pm_runtime_disable(&pdev->dev);
@@ -2394,6 +2832,7 @@ static int ravb_remove(struct platform_device *pdev)
static int ravb_wol_setup(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
/* Disable interrupts by clearing the interrupt masks. */
ravb_write(ndev, 0, RIC0);
@@ -2402,7 +2841,8 @@ static int ravb_wol_setup(struct net_device *ndev)
/* Only allow ECI interrupts */
synchronize_irq(priv->emac_irq);
- napi_disable(&priv->napi[RAVB_NC]);
+ if (info->nc_queues)
+ napi_disable(&priv->napi[RAVB_NC]);
napi_disable(&priv->napi[RAVB_BE]);
ravb_write(ndev, ECSIPR_MPDIP, ECSIPR);
@@ -2415,9 +2855,11 @@ static int ravb_wol_setup(struct net_device *ndev)
static int ravb_wol_restore(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
+ const struct ravb_hw_info *info = priv->info;
int ret;
- napi_enable(&priv->napi[RAVB_NC]);
+ if (info->nc_queues)
+ napi_enable(&priv->napi[RAVB_NC]);
napi_enable(&priv->napi[RAVB_BE]);
/* Disable MagicPacket */
@@ -2468,13 +2910,15 @@ static int __maybe_unused ravb_resume(struct device *dev)
/* Set AVB config mode */
ravb_set_config_mode(ndev);
- /* Set GTI value */
- ret = ravb_set_gti(ndev);
- if (ret)
- return ret;
+ if (info->gptp || info->ccc_gac) {
+ /* Set GTI value */
+ ret = ravb_set_gti(ndev);
+ if (ret)
+ return ret;
- /* Request GTI loading */
- ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
+ /* Request GTI loading */
+ ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
+ }
if (info->internal_delay)
ravb_set_delay_mode(ndev);
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 1374faa229a2..a3fbb2221c9a 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -1153,17 +1153,19 @@ static void update_mac_address(struct net_device *ndev)
static void read_mac_address(struct net_device *ndev, unsigned char *mac)
{
if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
- memcpy(ndev->dev_addr, mac, ETH_ALEN);
+ eth_hw_addr_set(ndev, mac);
} else {
u32 mahr = sh_eth_read(ndev, MAHR);
u32 malr = sh_eth_read(ndev, MALR);
-
- ndev->dev_addr[0] = (mahr >> 24) & 0xFF;
- ndev->dev_addr[1] = (mahr >> 16) & 0xFF;
- ndev->dev_addr[2] = (mahr >> 8) & 0xFF;
- ndev->dev_addr[3] = (mahr >> 0) & 0xFF;
- ndev->dev_addr[4] = (malr >> 8) & 0xFF;
- ndev->dev_addr[5] = (malr >> 0) & 0xFF;
+ u8 addr[ETH_ALEN];
+
+ addr[0] = (mahr >> 24) & 0xFF;
+ addr[1] = (mahr >> 16) & 0xFF;
+ addr[2] = (mahr >> 8) & 0xFF;
+ addr[3] = (mahr >> 0) & 0xFF;
+ addr[4] = (malr >> 8) & 0xFF;
+ addr[5] = (malr >> 0) & 0xFF;
+ eth_hw_addr_set(ndev, addr);
}
}
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index 3364b6a56bd1..ba4062881eed 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -1954,7 +1954,7 @@ static int rocker_port_set_mac_address(struct net_device *dev, void *p)
err = rocker_cmd_set_port_settings_macaddr(rocker_port, addr->sa_data);
if (err)
return err;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
return 0;
}
@@ -2545,11 +2545,13 @@ static void rocker_port_dev_addr_init(struct rocker_port *rocker_port)
{
const struct rocker *rocker = rocker_port->rocker;
const struct pci_dev *pdev = rocker->pdev;
+ u8 addr[ETH_ALEN];
int err;
- err = rocker_cmd_get_port_settings_macaddr(rocker_port,
- rocker_port->dev->dev_addr);
- if (err) {
+ err = rocker_cmd_get_port_settings_macaddr(rocker_port, addr);
+ if (!err) {
+ eth_hw_addr_set(rocker_port->dev, addr);
+ } else {
dev_warn(&pdev->dev, "failed to get mac address, using random\n");
eth_hw_addr_random(rocker_port->dev);
}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h
index 049dc6cf4611..0f45107db8dd 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_common.h
@@ -329,7 +329,7 @@ struct sxgbe_core_ops {
/* Set power management mode (e.g. magic frame) */
void (*pmt)(void __iomem *ioaddr, unsigned long mode);
/* Set/Get Unicast MAC addresses */
- void (*set_umac_addr)(void __iomem *ioaddr, unsigned char *addr,
+ void (*set_umac_addr)(void __iomem *ioaddr, const unsigned char *addr,
unsigned int reg_n);
void (*get_umac_addr)(void __iomem *ioaddr, unsigned char *addr,
unsigned int reg_n);
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c
index e96e2bd295ef..7d9f257de92a 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_core.c
@@ -85,7 +85,8 @@ static void sxgbe_core_pmt(void __iomem *ioaddr, unsigned long mode)
}
/* Set/Get Unicast MAC addresses */
-static void sxgbe_core_set_umac_addr(void __iomem *ioaddr, unsigned char *addr,
+static void sxgbe_core_set_umac_addr(void __iomem *ioaddr,
+ const unsigned char *addr,
unsigned int reg_n)
{
u32 high_word, low_word;
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index 6781aa636d58..32161a56726c 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -931,10 +931,13 @@ static int sxgbe_get_hw_features(struct sxgbe_priv_data * const priv)
static void sxgbe_check_ether_addr(struct sxgbe_priv_data *priv)
{
if (!is_valid_ether_addr(priv->dev->dev_addr)) {
+ u8 addr[ETH_ALEN];
+
priv->hw->mac->get_umac_addr((void __iomem *)
- priv->ioaddr,
- priv->dev->dev_addr, 0);
- if (!is_valid_ether_addr(priv->dev->dev_addr))
+ priv->ioaddr, addr, 0);
+ if (is_valid_ether_addr(addr))
+ eth_hw_addr_set(priv->dev, addr);
+ else
eth_hw_addr_random(priv->dev);
}
dev_info(priv->device, "device MAC address %pM\n",
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
index 4639ed9438a3..926532466691 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
@@ -118,7 +118,7 @@ static int sxgbe_platform_probe(struct platform_device *pdev)
}
/* Get MAC address if available (DT) */
- of_get_mac_address(node, priv->dev->dev_addr);
+ of_get_ethdev_address(node, priv->dev);
/* Get the TX/RX IRQ numbers */
for (i = 0, chan = 1; i < SXGBE_TX_QUEUES; i++) {
diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c
index 37ff25a84030..96065dfc747b 100644
--- a/drivers/net/ethernet/seeq/sgiseeq.c
+++ b/drivers/net/ethernet/seeq/sgiseeq.c
@@ -167,7 +167,7 @@ static int sgiseeq_set_mac_address(struct net_device *dev, void *addr)
struct sgiseeq_private *sp = netdev_priv(dev);
struct sockaddr *sa = addr;
- memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, sa->sa_data);
spin_lock_irq(&sp->tx_lock);
__sgiseeq_set_mac_address(dev);
@@ -764,7 +764,7 @@ static int sgiseeq_probe(struct platform_device *pdev)
setup_rx_ring(dev, sp->rx_desc, SEEQ_RX_BUFFERS);
setup_tx_ring(dev, sp->tx_desc, SEEQ_TX_BUFFERS);
- memcpy(dev->dev_addr, pd->mac, ETH_ALEN);
+ eth_hw_addr_set(dev, pd->mac);
#ifdef DEBUG
gpriv = sp;
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index e7e2223aebbf..cf366ed2557c 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -1038,7 +1038,7 @@ int efx_ef10_vadaptor_free(struct efx_nic *efx, unsigned int port_id)
}
int efx_ef10_vport_add_mac(struct efx_nic *efx,
- unsigned int port_id, u8 *mac)
+ unsigned int port_id, const u8 *mac)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN);
@@ -1050,7 +1050,7 @@ int efx_ef10_vport_add_mac(struct efx_nic *efx,
}
int efx_ef10_vport_del_mac(struct efx_nic *efx,
- unsigned int port_id, u8 *mac)
+ unsigned int port_id, const u8 *mac)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN);
diff --git a/drivers/net/ethernet/sfc/ef100_nic.c b/drivers/net/ethernet/sfc/ef100_nic.c
index 518268ce2064..6aa81229b68a 100644
--- a/drivers/net/ethernet/sfc/ef100_nic.c
+++ b/drivers/net/ethernet/sfc/ef100_nic.c
@@ -1250,7 +1250,7 @@ int ef100_probe_pf(struct efx_nic *efx)
if (rc)
goto fail;
/* Assign MAC address */
- memcpy(net_dev->dev_addr, net_dev->perm_addr, ETH_ALEN);
+ eth_hw_addr_set(net_dev, net_dev->perm_addr);
memcpy(nic_data->port_id, net_dev->perm_addr, ETH_ALEN);
return 0;
diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c
index 752d6406f07e..7f5aa4a8c451 100644
--- a/drivers/net/ethernet/sfc/ef10_sriov.c
+++ b/drivers/net/ethernet/sfc/ef10_sriov.c
@@ -480,7 +480,7 @@ static int efx_ef10_vport_del_vf_mac(struct efx_nic *efx, unsigned int port_id,
return rc;
}
-int efx_ef10_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, u8 *mac)
+int efx_ef10_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, const u8 *mac)
{
struct efx_ef10_nic_data *nic_data = efx->nic_data;
struct ef10_vf *vf;
@@ -523,7 +523,7 @@ int efx_ef10_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, u8 *mac)
goto fail;
if (vf->efx)
- ether_addr_copy(vf->efx->net_dev->dev_addr, mac);
+ eth_hw_addr_set(vf->efx->net_dev, mac);
}
ether_addr_copy(vf->mac, mac);
diff --git a/drivers/net/ethernet/sfc/ef10_sriov.h b/drivers/net/ethernet/sfc/ef10_sriov.h
index cfe556d17313..3c703ca878b0 100644
--- a/drivers/net/ethernet/sfc/ef10_sriov.h
+++ b/drivers/net/ethernet/sfc/ef10_sriov.h
@@ -39,7 +39,7 @@ static inline void efx_ef10_sriov_reset(struct efx_nic *efx) {}
void efx_ef10_sriov_fini(struct efx_nic *efx);
static inline void efx_ef10_sriov_flr(struct efx_nic *efx, unsigned vf_i) {}
-int efx_ef10_sriov_set_vf_mac(struct efx_nic *efx, int vf, u8 *mac);
+int efx_ef10_sriov_set_vf_mac(struct efx_nic *efx, int vf, const u8 *mac);
int efx_ef10_sriov_set_vf_vlan(struct efx_nic *efx, int vf_i,
u16 vlan, u8 qos);
@@ -60,9 +60,9 @@ int efx_ef10_vswitching_restore_vf(struct efx_nic *efx);
void efx_ef10_vswitching_remove_pf(struct efx_nic *efx);
void efx_ef10_vswitching_remove_vf(struct efx_nic *efx);
int efx_ef10_vport_add_mac(struct efx_nic *efx,
- unsigned int port_id, u8 *mac);
+ unsigned int port_id, const u8 *mac);
int efx_ef10_vport_del_mac(struct efx_nic *efx,
- unsigned int port_id, u8 *mac);
+ unsigned int port_id, const u8 *mac);
int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id);
int efx_ef10_vadaptor_query(struct efx_nic *efx, unsigned int port_id,
u32 *port_flags, u32 *vadaptor_flags,
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 43ef4f529028..6960a2fe2b53 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -136,7 +136,7 @@ static int efx_probe_port(struct efx_nic *efx)
return rc;
/* Initialise MAC address to permanent address */
- ether_addr_copy(efx->net_dev->dev_addr, efx->net_dev->perm_addr);
+ eth_hw_addr_set(efx->net_dev, efx->net_dev->perm_addr);
return 0;
}
diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c
index 896b59253197..f187631b2c5c 100644
--- a/drivers/net/ethernet/sfc/efx_common.c
+++ b/drivers/net/ethernet/sfc/efx_common.c
@@ -181,11 +181,11 @@ int efx_set_mac_address(struct net_device *net_dev, void *data)
/* save old address */
ether_addr_copy(old_addr, net_dev->dev_addr);
- ether_addr_copy(net_dev->dev_addr, new_addr);
+ eth_hw_addr_set(net_dev, new_addr);
if (efx->type->set_mac_address) {
rc = efx->type->set_mac_address(efx);
if (rc) {
- ether_addr_copy(net_dev->dev_addr, old_addr);
+ eth_hw_addr_set(net_dev, old_addr);
return rc;
}
}
diff --git a/drivers/net/ethernet/sfc/ethtool_common.c b/drivers/net/ethernet/sfc/ethtool_common.c
index bf1443539a1a..bd552c7dffcb 100644
--- a/drivers/net/ethernet/sfc/ethtool_common.c
+++ b/drivers/net/ethernet/sfc/ethtool_common.c
@@ -563,20 +563,14 @@ int efx_ethtool_get_link_ksettings(struct net_device *net_dev,
{
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_link_state *link_state = &efx->link_state;
- u32 supported;
mutex_lock(&efx->mac_lock);
efx_mcdi_phy_get_link_ksettings(efx, cmd);
mutex_unlock(&efx->mac_lock);
/* Both MACs support pause frames (bidirectional and respond-only) */
- ethtool_convert_link_mode_to_legacy_u32(&supported,
- cmd->link_modes.supported);
-
- supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
-
- ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
- supported);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
+ ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause);
if (LOOPBACK_INTERNAL(efx)) {
cmd->base.speed = link_state->speed;
diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c
index 423bdf81200f..c68837a951f4 100644
--- a/drivers/net/ethernet/sfc/falcon/efx.c
+++ b/drivers/net/ethernet/sfc/falcon/efx.c
@@ -1044,7 +1044,7 @@ static int ef4_probe_port(struct ef4_nic *efx)
return rc;
/* Initialise MAC address to permanent address */
- ether_addr_copy(efx->net_dev->dev_addr, efx->net_dev->perm_addr);
+ eth_hw_addr_set(efx->net_dev, efx->net_dev->perm_addr);
return 0;
}
@@ -2162,11 +2162,11 @@ static int ef4_set_mac_address(struct net_device *net_dev, void *data)
/* save old address */
ether_addr_copy(old_addr, net_dev->dev_addr);
- ether_addr_copy(net_dev->dev_addr, new_addr);
+ eth_hw_addr_set(net_dev, new_addr);
if (efx->type->set_mac_address) {
rc = efx->type->set_mac_address(efx);
if (rc) {
- ether_addr_copy(net_dev->dev_addr, old_addr);
+ eth_hw_addr_set(net_dev, old_addr);
return rc;
}
}
diff --git a/drivers/net/ethernet/sfc/mcdi_port_common.c b/drivers/net/ethernet/sfc/mcdi_port_common.c
index 4bd3ef8f3384..c4fe3c48ac46 100644
--- a/drivers/net/ethernet/sfc/mcdi_port_common.c
+++ b/drivers/net/ethernet/sfc/mcdi_port_common.c
@@ -132,16 +132,27 @@ void mcdi_to_ethtool_linkset(u32 media, u32 cap, unsigned long *linkset)
case MC_CMD_MEDIA_SFP_PLUS:
case MC_CMD_MEDIA_QSFP_PLUS:
SET_BIT(FIBRE);
- if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN))
+ if (cap & (1 << MC_CMD_PHY_CAP_1000FDX_LBN)) {
SET_BIT(1000baseT_Full);
- if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
- SET_BIT(10000baseT_Full);
- if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
+ SET_BIT(1000baseX_Full);
+ }
+ if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN)) {
+ SET_BIT(10000baseCR_Full);
+ SET_BIT(10000baseLR_Full);
+ SET_BIT(10000baseSR_Full);
+ }
+ if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN)) {
SET_BIT(40000baseCR4_Full);
- if (cap & (1 << MC_CMD_PHY_CAP_100000FDX_LBN))
+ SET_BIT(40000baseSR4_Full);
+ }
+ if (cap & (1 << MC_CMD_PHY_CAP_100000FDX_LBN)) {
SET_BIT(100000baseCR4_Full);
- if (cap & (1 << MC_CMD_PHY_CAP_25000FDX_LBN))
+ SET_BIT(100000baseSR4_Full);
+ }
+ if (cap & (1 << MC_CMD_PHY_CAP_25000FDX_LBN)) {
SET_BIT(25000baseCR_Full);
+ SET_BIT(25000baseSR_Full);
+ }
if (cap & (1 << MC_CMD_PHY_CAP_50000FDX_LBN))
SET_BIT(50000baseCR2_Full);
break;
@@ -192,15 +203,19 @@ u32 ethtool_linkset_to_mcdi_cap(const unsigned long *linkset)
result |= (1 << MC_CMD_PHY_CAP_100FDX_LBN);
if (TEST_BIT(1000baseT_Half))
result |= (1 << MC_CMD_PHY_CAP_1000HDX_LBN);
- if (TEST_BIT(1000baseT_Full) || TEST_BIT(1000baseKX_Full))
+ if (TEST_BIT(1000baseT_Full) || TEST_BIT(1000baseKX_Full) ||
+ TEST_BIT(1000baseX_Full))
result |= (1 << MC_CMD_PHY_CAP_1000FDX_LBN);
- if (TEST_BIT(10000baseT_Full) || TEST_BIT(10000baseKX4_Full))
+ if (TEST_BIT(10000baseT_Full) || TEST_BIT(10000baseKX4_Full) ||
+ TEST_BIT(10000baseCR_Full) || TEST_BIT(10000baseLR_Full) ||
+ TEST_BIT(10000baseSR_Full))
result |= (1 << MC_CMD_PHY_CAP_10000FDX_LBN);
- if (TEST_BIT(40000baseCR4_Full) || TEST_BIT(40000baseKR4_Full))
+ if (TEST_BIT(40000baseCR4_Full) || TEST_BIT(40000baseKR4_Full) ||
+ TEST_BIT(40000baseSR4_Full))
result |= (1 << MC_CMD_PHY_CAP_40000FDX_LBN);
- if (TEST_BIT(100000baseCR4_Full))
+ if (TEST_BIT(100000baseCR4_Full) || TEST_BIT(100000baseSR4_Full))
result |= (1 << MC_CMD_PHY_CAP_100000FDX_LBN);
- if (TEST_BIT(25000baseCR_Full))
+ if (TEST_BIT(25000baseCR_Full) || TEST_BIT(25000baseSR_Full))
result |= (1 << MC_CMD_PHY_CAP_25000FDX_LBN);
if (TEST_BIT(50000baseCR2_Full))
result |= (1 << MC_CMD_PHY_CAP_50000FDX_LBN);
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index f6981810039d..cc15ee8812d9 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -1440,7 +1440,7 @@ struct efx_nic_type {
bool (*sriov_wanted)(struct efx_nic *efx);
void (*sriov_reset)(struct efx_nic *efx);
void (*sriov_flr)(struct efx_nic *efx, unsigned vf_i);
- int (*sriov_set_vf_mac)(struct efx_nic *efx, int vf_i, u8 *mac);
+ int (*sriov_set_vf_mac)(struct efx_nic *efx, int vf_i, const u8 *mac);
int (*sriov_set_vf_vlan)(struct efx_nic *efx, int vf_i, u16 vlan,
u8 qos);
int (*sriov_set_vf_spoofchk)(struct efx_nic *efx, int vf_i,
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index a39c5143b386..797e51802ccb 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -648,7 +648,7 @@ static int efx_ptp_get_attributes(struct efx_nic *efx)
} else if (rc == -EINVAL) {
fmt = MC_CMD_PTP_OUT_GET_ATTRIBUTES_SECONDS_NANOSECONDS;
} else if (rc == -EPERM) {
- netif_info(efx, probe, efx->net_dev, "no PTP support\n");
+ pci_info(efx->pci_dev, "no PTP support\n");
return rc;
} else {
efx_mcdi_display_error(efx, MC_CMD_PTP, sizeof(inbuf),
@@ -824,7 +824,7 @@ static int efx_ptp_disable(struct efx_nic *efx)
* should only have been called during probe.
*/
if (rc == -ENOSYS || rc == -EPERM)
- netif_info(efx, probe, efx->net_dev, "no PTP support\n");
+ pci_info(efx->pci_dev, "no PTP support\n");
else if (rc)
efx_mcdi_display_error(efx, MC_CMD_PTP,
MC_CMD_PTP_IN_DISABLE_LEN,
diff --git a/drivers/net/ethernet/sfc/siena_sriov.c b/drivers/net/ethernet/sfc/siena_sriov.c
index 83dcfcae3d4b..f12851a527d9 100644
--- a/drivers/net/ethernet/sfc/siena_sriov.c
+++ b/drivers/net/ethernet/sfc/siena_sriov.c
@@ -1057,7 +1057,7 @@ void efx_siena_sriov_probe(struct efx_nic *efx)
return;
if (efx_siena_sriov_cmd(efx, false, &efx->vi_scale, &count)) {
- netif_info(efx, probe, efx->net_dev, "no SR-IOV VFs probed\n");
+ pci_info(efx->pci_dev, "no SR-IOV VFs probed\n");
return;
}
if (count > 0 && count > max_vfs)
@@ -1591,7 +1591,7 @@ void efx_fini_sriov(void)
destroy_workqueue(vfdi_workqueue);
}
-int efx_siena_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, u8 *mac)
+int efx_siena_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, const u8 *mac)
{
struct siena_nic_data *nic_data = efx->nic_data;
struct siena_vf *vf;
diff --git a/drivers/net/ethernet/sfc/siena_sriov.h b/drivers/net/ethernet/sfc/siena_sriov.h
index e441c89c25ce..e548c4daf189 100644
--- a/drivers/net/ethernet/sfc/siena_sriov.h
+++ b/drivers/net/ethernet/sfc/siena_sriov.h
@@ -46,7 +46,7 @@ bool efx_siena_sriov_wanted(struct efx_nic *efx);
void efx_siena_sriov_reset(struct efx_nic *efx);
void efx_siena_sriov_flr(struct efx_nic *efx, unsigned flr);
-int efx_siena_sriov_set_vf_mac(struct efx_nic *efx, int vf, u8 *mac);
+int efx_siena_sriov_set_vf_mac(struct efx_nic *efx, int vf, const u8 *mac);
int efx_siena_sriov_set_vf_vlan(struct efx_nic *efx, int vf,
u16 vlan, u8 qos);
int efx_siena_sriov_set_vf_spoofchk(struct efx_nic *efx, int vf,
diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c
index 062f7844c496..e2d009866a7b 100644
--- a/drivers/net/ethernet/sgi/ioc3-eth.c
+++ b/drivers/net/ethernet/sgi/ioc3-eth.c
@@ -243,7 +243,7 @@ static int ioc3_set_mac_address(struct net_device *dev, void *addr)
struct ioc3_private *ip = netdev_priv(dev);
struct sockaddr *sa = addr;
- memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, sa->sa_data);
spin_lock_irq(&ip->ioc3_lock);
__ioc3_set_mac_address(dev);
@@ -920,7 +920,7 @@ static int ioc3eth_probe(struct platform_device *pdev)
ioc3_mii_start(ip);
ioc3_ssram_disc(ip);
- memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
+ eth_hw_addr_set(dev, mac_addr);
/* The IOC3-specific entries in the device structure. */
dev->watchdog_timeo = 5 * HZ;
diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c
index efce834d8ee6..6d850ea2b94c 100644
--- a/drivers/net/ethernet/sgi/meth.c
+++ b/drivers/net/ethernet/sgi/meth.c
@@ -836,7 +836,7 @@ static int meth_probe(struct platform_device *pdev)
dev->watchdog_timeo = timeout;
dev->irq = MACE_ETHERNET_IRQ;
dev->base_addr = (unsigned long)&mace->eth;
- memcpy(dev->dev_addr, o2meth_eaddr, ETH_ALEN);
+ eth_hw_addr_set(dev, o2meth_eaddr);
priv = netdev_priv(dev);
priv->pdev = pdev;
diff --git a/drivers/net/ethernet/silan/sc92031.c b/drivers/net/ethernet/silan/sc92031.c
index 1fd08a04bd4e..ff4197f5e46d 100644
--- a/drivers/net/ethernet/silan/sc92031.c
+++ b/drivers/net/ethernet/silan/sc92031.c
@@ -1400,6 +1400,7 @@ static int sc92031_probe(struct pci_dev *pdev, const struct pci_device_id *id)
void __iomem* port_base;
struct net_device *dev;
struct sc92031_priv *priv;
+ u8 addr[ETH_ALEN];
u32 mac0, mac1;
err = pci_enable_device(pdev);
@@ -1458,12 +1459,13 @@ static int sc92031_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mac0 = ioread32(port_base + MAC0);
mac1 = ioread32(port_base + MAC0 + 4);
- dev->dev_addr[0] = mac0 >> 24;
- dev->dev_addr[1] = mac0 >> 16;
- dev->dev_addr[2] = mac0 >> 8;
- dev->dev_addr[3] = mac0;
- dev->dev_addr[4] = mac1 >> 8;
- dev->dev_addr[5] = mac1;
+ addr[0] = mac0 >> 24;
+ addr[1] = mac0 >> 16;
+ addr[2] = mac0 >> 8;
+ addr[3] = mac0;
+ addr[4] = mac1 >> 8;
+ addr[5] = mac1;
+ eth_hw_addr_set(dev, addr);
err = register_netdev(dev);
if (err < 0)
diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
index 3d1a18a01ce5..216bb2d34d7c 100644
--- a/drivers/net/ethernet/sis/sis190.c
+++ b/drivers/net/ethernet/sis/sis190.c
@@ -1070,7 +1070,7 @@ static int sis190_open(struct net_device *dev)
/*
* Rx and Tx descriptors need 256 bytes alignment.
- * pci_alloc_consistent() guarantees a stronger alignment.
+ * dma_alloc_coherent() guarantees a stronger alignment.
*/
tp->TxDescRing = dma_alloc_coherent(&pdev->dev, TX_RING_BYTES,
&tp->tx_dma, GFP_KERNEL);
@@ -1586,6 +1586,7 @@ static int sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
{
struct sis190_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->mmio_addr;
+ __le16 addr[ETH_ALEN / 2];
u16 sig;
int i;
@@ -1606,8 +1607,9 @@ static int sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
for (i = 0; i < ETH_ALEN / 2; i++) {
u16 w = sis190_read_eeprom(ioaddr, EEPROMMACAddr + i);
- ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(w);
+ addr[i] = cpu_to_le16(w);
}
+ eth_hw_addr_set(dev, (u8 *)addr);
sis190_set_rgmii(tp, sis190_read_eeprom(ioaddr, EEPROMInfo));
@@ -1629,6 +1631,7 @@ static int sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
static const u16 ids[] = { 0x0965, 0x0966, 0x0968 };
struct sis190_private *tp = netdev_priv(dev);
struct pci_dev *isa_bridge;
+ u8 addr[ETH_ALEN];
u8 reg, tmp8;
unsigned int i;
@@ -1657,8 +1660,9 @@ static int sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
for (i = 0; i < ETH_ALEN; i++) {
outb(0x9 + i, 0x78);
- dev->dev_addr[i] = inb(0x79);
+ addr[i] = inb(0x79);
}
+ eth_hw_addr_set(dev, addr);
outb(0x12, 0x78);
reg = inb(0x79);
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index 60a0c0e9ded2..cc2d907c4c4b 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -258,6 +258,7 @@ static int sis900_get_mac_addr(struct pci_dev *pci_dev,
{
struct sis900_private *sis_priv = netdev_priv(net_dev);
void __iomem *ioaddr = sis_priv->ioaddr;
+ u16 addr[ETH_ALEN / 2];
u16 signature;
int i;
@@ -271,7 +272,8 @@ static int sis900_get_mac_addr(struct pci_dev *pci_dev,
/* get MAC address from EEPROM */
for (i = 0; i < 3; i++)
- ((u16 *)(net_dev->dev_addr))[i] = read_eeprom(ioaddr, i+EEPROMMACAddr);
+ addr[i] = read_eeprom(ioaddr, i+EEPROMMACAddr);
+ eth_hw_addr_set(net_dev, (u8 *)addr);
return 1;
}
@@ -290,6 +292,7 @@ static int sis630e_get_mac_addr(struct pci_dev *pci_dev,
struct net_device *net_dev)
{
struct pci_dev *isa_bridge = NULL;
+ u8 addr[ETH_ALEN];
u8 reg;
int i;
@@ -306,8 +309,9 @@ static int sis630e_get_mac_addr(struct pci_dev *pci_dev,
for (i = 0; i < 6; i++) {
outb(0x09 + i, 0x70);
- ((u8 *)(net_dev->dev_addr))[i] = inb(0x71);
+ addr[i] = inb(0x71);
}
+ eth_hw_addr_set(net_dev, addr);
pci_write_config_byte(isa_bridge, 0x48, reg & ~0x40);
pci_dev_put(isa_bridge);
@@ -331,6 +335,7 @@ static int sis635_get_mac_addr(struct pci_dev *pci_dev,
{
struct sis900_private *sis_priv = netdev_priv(net_dev);
void __iomem *ioaddr = sis_priv->ioaddr;
+ u16 addr[ETH_ALEN / 2];
u32 rfcrSave;
u32 i;
@@ -345,8 +350,9 @@ static int sis635_get_mac_addr(struct pci_dev *pci_dev,
/* load MAC addr to filter data register */
for (i = 0 ; i < 3 ; i++) {
sw32(rfcr, (i << RFADDR_shift));
- *( ((u16 *)net_dev->dev_addr) + i) = sr16(rfdr);
+ addr[i] = sr16(rfdr);
}
+ eth_hw_addr_set(net_dev, (u8 *)addr);
/* enable packet filtering */
sw32(rfcr, rfcrSave | RFEN);
@@ -375,17 +381,18 @@ static int sis96x_get_mac_addr(struct pci_dev *pci_dev,
{
struct sis900_private *sis_priv = netdev_priv(net_dev);
void __iomem *ioaddr = sis_priv->ioaddr;
+ u16 addr[ETH_ALEN / 2];
int wait, rc = 0;
sw32(mear, EEREQ);
for (wait = 0; wait < 2000; wait++) {
if (sr32(mear) & EEGNT) {
- u16 *mac = (u16 *)net_dev->dev_addr;
int i;
/* get MAC address from EEPROM */
for (i = 0; i < 3; i++)
- mac[i] = read_eeprom(ioaddr, i + EEPROMMACAddr);
+ addr[i] = read_eeprom(ioaddr, i + EEPROMMACAddr);
+ eth_hw_addr_set(net_dev, (u8 *)addr);
rc = 1;
break;
@@ -1098,7 +1105,7 @@ sis900_init_rxfilter (struct net_device * net_dev)
/* load MAC addr to filter data register */
for (i = 0 ; i < 3 ; i++) {
- u32 w = (u32) *((u16 *)(net_dev->dev_addr)+i);
+ u32 w = (u32) *((const u16 *)(net_dev->dev_addr)+i);
sw32(rfcr, i << RFADDR_shift);
sw32(rfdr, w);
diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c
index 44daf79a8f97..a0654e88444c 100644
--- a/drivers/net/ethernet/smsc/epic100.c
+++ b/drivers/net/ethernet/smsc/epic100.c
@@ -325,6 +325,7 @@ static int epic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
struct net_device *dev;
struct epic_private *ep;
int i, ret, option = 0, duplex = 0;
+ __le16 addr[ETH_ALEN / 2];
void *ring_space;
dma_addr_t ring_dma;
@@ -416,7 +417,8 @@ static int epic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Note: the '175 does not have a serial EEPROM. */
for (i = 0; i < 3; i++)
- ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(er16(LAN0 + i*4));
+ addr[i] = cpu_to_le16(er16(LAN0 + i*4));
+ eth_hw_addr_set(dev, (u8 *)addr);
if (debug > 2) {
dev_dbg(&pdev->dev, "EEPROM contents:\n");
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index b008b4e8a2a5..89381f796985 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -1788,6 +1788,7 @@ static int smc911x_probe(struct net_device *dev)
struct dma_slave_config config;
dma_cap_mask_t mask;
#endif
+ u8 addr[ETH_ALEN];
DBG(SMC_DEBUG_FUNC, dev, "--> %s\n", __func__);
@@ -1892,7 +1893,8 @@ static int smc911x_probe(struct net_device *dev)
spin_lock_init(&lp->lock);
/* Get the MAC address */
- SMC_GET_MAC_ADDR(lp, dev->dev_addr);
+ SMC_GET_MAC_ADDR(lp, addr);
+ eth_hw_addr_set(dev, addr);
/* now, reset the chip, and put it into a known state */
smc911x_reset(dev);
diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c
index 42fc37c7887a..37c822e27207 100644
--- a/drivers/net/ethernet/smsc/smc91c92_cs.c
+++ b/drivers/net/ethernet/smsc/smc91c92_cs.c
@@ -347,6 +347,7 @@ static void smc91c92_detach(struct pcmcia_device *link)
static int cvt_ascii_address(struct net_device *dev, char *s)
{
+ u8 mac[ETH_ALEN];
int i, j, da, c;
if (strlen(s) != 12)
@@ -359,8 +360,9 @@ static int cvt_ascii_address(struct net_device *dev, char *s)
da += ((c >= '0') && (c <= '9')) ?
(c - '0') : ((c & 0x0f) + 9);
}
- dev->dev_addr[i] = da;
+ mac[i] = da;
}
+ eth_hw_addr_set(dev, mac);
return 0;
}
@@ -539,6 +541,7 @@ static int mot_setup(struct pcmcia_device *link)
struct net_device *dev = link->priv;
unsigned int ioaddr = dev->base_addr;
int i, wait, loop;
+ u8 mac[ETH_ALEN];
u_int addr;
/* Read Ethernet address from Serial EEPROM */
@@ -559,9 +562,10 @@ static int mot_setup(struct pcmcia_device *link)
return -1;
addr = inw(ioaddr + GENERAL);
- dev->dev_addr[2*i] = addr & 0xff;
- dev->dev_addr[2*i+1] = (addr >> 8) & 0xff;
+ mac[2*i] = addr & 0xff;
+ mac[2*i+1] = (addr >> 8) & 0xff;
}
+ eth_hw_addr_set(dev, mac);
return 0;
}
@@ -666,14 +670,13 @@ static int pcmcia_osi_mac(struct pcmcia_device *p_dev,
void *priv)
{
struct net_device *dev = priv;
- int i;
if (tuple->TupleDataLen < 8)
return -EINVAL;
if (tuple->TupleData[0] != 0x04)
return -EINVAL;
- for (i = 0; i < 6; i++)
- dev->dev_addr[i] = tuple->TupleData[i+2];
+
+ eth_hw_addr_set(dev, &tuple->TupleData[2]);
return 0;
};
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 813ea941b91a..a31c159e96ea 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -1851,6 +1851,7 @@ static int smc_probe(struct net_device *dev, void __iomem *ioaddr,
int retval;
unsigned int val, revision_register;
const char *version_string;
+ u8 addr[ETH_ALEN];
DBG(2, dev, "%s: %s\n", CARDNAME, __func__);
@@ -1922,7 +1923,8 @@ static int smc_probe(struct net_device *dev, void __iomem *ioaddr,
/* Get the MAC address */
SMC_SELECT_BANK(lp, 1);
- SMC_GET_MAC_ADDR(lp, dev->dev_addr);
+ SMC_GET_MAC_ADDR(lp, addr);
+ eth_hw_addr_set(dev, addr);
/* now, reset the chip, and put it into a known state */
smc_reset(dev);
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 199a97339280..7a50ba00f8ae 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -1503,7 +1503,7 @@ static int smsc911x_soft_reset(struct smsc911x_data *pdata)
/* Sets the device MAC address to dev_addr, called with mac_lock held */
static void
-smsc911x_set_hw_mac_address(struct smsc911x_data *pdata, u8 dev_addr[6])
+smsc911x_set_hw_mac_address(struct smsc911x_data *pdata, const u8 dev_addr[6])
{
u32 mac_high16 = (dev_addr[5] << 8) | dev_addr[4];
u32 mac_low32 = (dev_addr[3] << 24) | (dev_addr[2] << 16) |
@@ -1939,7 +1939,7 @@ static int smsc911x_set_mac_address(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(dev, addr->sa_data);
spin_lock_irq(&pdata->mac_lock);
smsc911x_set_hw_mac_address(pdata, dev->dev_addr);
@@ -2162,13 +2162,15 @@ static void smsc911x_read_mac_address(struct net_device *dev)
struct smsc911x_data *pdata = netdev_priv(dev);
u32 mac_high16 = smsc911x_mac_read(pdata, ADDRH);
u32 mac_low32 = smsc911x_mac_read(pdata, ADDRL);
+ u8 addr[ETH_ALEN];
- dev->dev_addr[0] = (u8)(mac_low32);
- dev->dev_addr[1] = (u8)(mac_low32 >> 8);
- dev->dev_addr[2] = (u8)(mac_low32 >> 16);
- dev->dev_addr[3] = (u8)(mac_low32 >> 24);
- dev->dev_addr[4] = (u8)(mac_high16);
- dev->dev_addr[5] = (u8)(mac_high16 >> 8);
+ addr[0] = (u8)(mac_low32);
+ addr[1] = (u8)(mac_low32 >> 8);
+ addr[2] = (u8)(mac_low32 >> 16);
+ addr[3] = (u8)(mac_low32 >> 24);
+ addr[4] = (u8)(mac_high16);
+ addr[5] = (u8)(mac_high16 >> 8);
+ eth_hw_addr_set(dev, addr);
}
/* Initializing private device structures, only called from probe */
@@ -2375,7 +2377,7 @@ static int smsc911x_probe_config(struct smsc911x_platform_config *config,
phy_interface = PHY_INTERFACE_MODE_NA;
config->phy_interface = phy_interface;
- device_get_mac_address(dev, config->mac, ETH_ALEN);
+ device_get_mac_address(dev, config->mac);
err = device_property_read_u32(dev, "reg-io-width", &width);
if (err == -ENXIO)
@@ -2525,7 +2527,7 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
SMSC_TRACE(pdata, probe,
"MAC Address is specified by configuration");
} else if (is_valid_ether_addr(pdata->config.mac)) {
- memcpy(dev->dev_addr, pdata->config.mac, ETH_ALEN);
+ eth_hw_addr_set(dev, pdata->config.mac);
SMSC_TRACE(pdata, probe,
"MAC Address specified by platform data");
} else {
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index fdbd2a43e267..d937af18973e 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -404,7 +404,7 @@ static const struct ethtool_ops smsc9420_ethtool_ops = {
static void smsc9420_set_mac_address(struct net_device *dev)
{
struct smsc9420_pdata *pd = netdev_priv(dev);
- u8 *dev_addr = dev->dev_addr;
+ const u8 *dev_addr = dev->dev_addr;
u32 mac_high16 = (dev_addr[5] << 8) | dev_addr[4];
u32 mac_low32 = (dev_addr[3] << 24) | (dev_addr[2] << 16) |
(dev_addr[1] << 8) | dev_addr[0];
@@ -416,6 +416,7 @@ static void smsc9420_set_mac_address(struct net_device *dev)
static void smsc9420_check_mac_address(struct net_device *dev)
{
struct smsc9420_pdata *pd = netdev_priv(dev);
+ u8 addr[ETH_ALEN];
/* Check if mac address has been specified when bringing interface up */
if (is_valid_ether_addr(dev->dev_addr)) {
@@ -427,15 +428,16 @@ static void smsc9420_check_mac_address(struct net_device *dev)
* it will already have been set */
u32 mac_high16 = smsc9420_reg_read(pd, ADDRH);
u32 mac_low32 = smsc9420_reg_read(pd, ADDRL);
- dev->dev_addr[0] = (u8)(mac_low32);
- dev->dev_addr[1] = (u8)(mac_low32 >> 8);
- dev->dev_addr[2] = (u8)(mac_low32 >> 16);
- dev->dev_addr[3] = (u8)(mac_low32 >> 24);
- dev->dev_addr[4] = (u8)(mac_high16);
- dev->dev_addr[5] = (u8)(mac_high16 >> 8);
-
- if (is_valid_ether_addr(dev->dev_addr)) {
+ addr[0] = (u8)(mac_low32);
+ addr[1] = (u8)(mac_low32 >> 8);
+ addr[2] = (u8)(mac_low32 >> 16);
+ addr[3] = (u8)(mac_low32 >> 24);
+ addr[4] = (u8)(mac_high16);
+ addr[5] = (u8)(mac_high16 >> 8);
+
+ if (is_valid_ether_addr(addr)) {
/* eeprom values are valid so use them */
+ eth_hw_addr_set(dev, addr);
netif_dbg(pd, probe, pd->dev,
"Mac Address is read from EEPROM\n");
} else {
@@ -788,7 +790,7 @@ static int smsc9420_alloc_rx_buffer(struct smsc9420_pdata *pd, int index)
PKT_BUF_SZ, DMA_FROM_DEVICE);
if (dma_mapping_error(&pd->pdev->dev, mapping)) {
dev_kfree_skb_any(skb);
- netif_warn(pd, rx_err, pd->dev, "pci_map_single failed!\n");
+ netif_warn(pd, rx_err, pd->dev, "dma_map_single failed!\n");
return -ENOMEM;
}
@@ -940,7 +942,7 @@ static netdev_tx_t smsc9420_hard_start_xmit(struct sk_buff *skb,
DMA_TO_DEVICE);
if (dma_mapping_error(&pd->pdev->dev, mapping)) {
netif_warn(pd, tx_err, pd->dev,
- "pci_map_single failed, dropping packet\n");
+ "dma_map_single failed, dropping packet\n");
return NETDEV_TX_BUSY;
}
@@ -1551,7 +1553,7 @@ smsc9420_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (!pd->rx_ring)
goto out_free_io_4;
- /* descriptors are aligned due to the nature of pci_alloc_consistent */
+ /* descriptors are aligned due to the nature of dma_alloc_coherent */
pd->tx_ring = (pd->rx_ring + RX_RING_SIZE);
pd->tx_dma_addr = pd->rx_dma_addr +
sizeof(struct smsc9420_dma_desc) * RX_RING_SIZE;
diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c
index 1f46af136aa8..de7d8bf2c226 100644
--- a/drivers/net/ethernet/socionext/netsec.c
+++ b/drivers/net/ethernet/socionext/netsec.c
@@ -1860,10 +1860,9 @@ static int netsec_of_probe(struct platform_device *pdev,
*phy_addr = of_mdio_parse_addr(&pdev->dev, priv->phy_np);
priv->clk = devm_clk_get(&pdev->dev, NULL); /* get by 'phy_ref_clk' */
- if (IS_ERR(priv->clk)) {
- dev_err(&pdev->dev, "phy_ref_clk not found\n");
- return PTR_ERR(priv->clk);
- }
+ if (IS_ERR(priv->clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(priv->clk),
+ "phy_ref_clk not found\n");
priv->freq = clk_get_rate(priv->clk);
return 0;
@@ -1886,19 +1885,17 @@ static int netsec_acpi_probe(struct platform_device *pdev,
priv->phy_interface = PHY_INTERFACE_MODE_NA;
ret = device_property_read_u32(&pdev->dev, "phy-channel", phy_addr);
- if (ret) {
- dev_err(&pdev->dev,
- "missing required property 'phy-channel'\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(&pdev->dev, ret,
+ "missing required property 'phy-channel'\n");
ret = device_property_read_u32(&pdev->dev,
"socionext,phy-clock-frequency",
&priv->freq);
if (ret)
- dev_err(&pdev->dev,
- "missing required property 'socionext,phy-clock-frequency'\n");
- return ret;
+ return dev_err_probe(&pdev->dev, ret,
+ "missing required property 'socionext,phy-clock-frequency'\n");
+ return 0;
}
static void netsec_unregister_mdio(struct netsec_priv *priv)
@@ -1981,7 +1978,6 @@ static int netsec_register_mdio(struct netsec_priv *priv, u32 phy_addr)
static int netsec_probe(struct platform_device *pdev)
{
struct resource *mmio_res, *eeprom_res, *irq_res;
- u8 *mac, macbuf[ETH_ALEN];
struct netsec_priv *priv;
u32 hw_ver, phy_addr = 0;
struct net_device *ndev;
@@ -2037,21 +2033,19 @@ static int netsec_probe(struct platform_device *pdev)
goto free_ndev;
}
- mac = device_get_mac_address(&pdev->dev, macbuf, sizeof(macbuf));
- if (mac)
- ether_addr_copy(ndev->dev_addr, mac);
-
- if (priv->eeprom_base &&
- (!mac || !is_valid_ether_addr(ndev->dev_addr))) {
+ ret = device_get_ethdev_address(&pdev->dev, ndev);
+ if (ret && priv->eeprom_base) {
void __iomem *macp = priv->eeprom_base +
NETSEC_EEPROM_MAC_ADDRESS;
-
- ndev->dev_addr[0] = readb(macp + 3);
- ndev->dev_addr[1] = readb(macp + 2);
- ndev->dev_addr[2] = readb(macp + 1);
- ndev->dev_addr[3] = readb(macp + 0);
- ndev->dev_addr[4] = readb(macp + 7);
- ndev->dev_addr[5] = readb(macp + 6);
+ u8 addr[ETH_ALEN];
+
+ addr[0] = readb(macp + 3);
+ addr[1] = readb(macp + 2);
+ addr[2] = readb(macp + 1);
+ addr[3] = readb(macp + 0);
+ addr[4] = readb(macp + 7);
+ addr[5] = readb(macp + 6);
+ eth_hw_addr_set(ndev, addr);
}
if (!is_valid_ether_addr(ndev->dev_addr)) {
diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c
index ae31ed93aaf0..2c48f8b8ab71 100644
--- a/drivers/net/ethernet/socionext/sni_ave.c
+++ b/drivers/net/ethernet/socionext/sni_ave.c
@@ -1599,7 +1599,7 @@ static int ave_probe(struct platform_device *pdev)
ndev->max_mtu = AVE_MAX_ETHFRAME - (ETH_HLEN + ETH_FCS_LEN);
- ret = of_get_mac_address(np, ndev->dev_addr);
+ ret = of_get_ethdev_address(np, ndev);
if (ret) {
/* if the mac address is invalid, use random mac address */
eth_hw_addr_random(ndev);
@@ -1935,6 +1935,17 @@ static const struct ave_soc_data ave_pxs3_data = {
.get_pinmode = ave_pxs3_get_pinmode,
};
+static const struct ave_soc_data ave_nx1_data = {
+ .is_desc_64bit = true,
+ .clock_names = {
+ "ether",
+ },
+ .reset_names = {
+ "ether",
+ },
+ .get_pinmode = ave_pxs3_get_pinmode,
+};
+
static const struct of_device_id of_ave_match[] = {
{
.compatible = "socionext,uniphier-pro4-ave4",
@@ -1956,6 +1967,10 @@ static const struct of_device_id of_ave_match[] = {
.compatible = "socionext,uniphier-pxs3-ave4",
.data = &ave_pxs3_data,
},
+ {
+ .compatible = "socionext,uniphier-nx1-ave4",
+ .data = &ave_nx1_data,
+ },
{ /* Sentinel */ }
};
MODULE_DEVICE_TABLE(of, of_ave_match);
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index b6d945ea903d..9160f9ed363a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -546,13 +546,13 @@ int dwmac4_setup(struct stmmac_priv *priv);
int dwxgmac2_setup(struct stmmac_priv *priv);
int dwxlgmac2_setup(struct stmmac_priv *priv);
-void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
+void stmmac_set_mac_addr(void __iomem *ioaddr, const u8 addr[6],
unsigned int high, unsigned int low);
void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
unsigned int high, unsigned int low);
void stmmac_set_mac(void __iomem *ioaddr, bool enable);
-void stmmac_dwmac4_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
+void stmmac_dwmac4_set_mac_addr(void __iomem *ioaddr, const u8 addr[6],
unsigned int high, unsigned int low);
void stmmac_dwmac4_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
unsigned int high, unsigned int low);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index 4422baeed3d8..617d0e4c6495 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -634,7 +634,7 @@ static void sun8i_dwmac_set_mac(void __iomem *ioaddr, bool enable)
* If addr is NULL, clear the slot
*/
static void sun8i_dwmac_set_umac_addr(struct mac_device_info *hw,
- unsigned char *addr,
+ const unsigned char *addr,
unsigned int reg_n)
{
void __iomem *ioaddr = hw->pcsr;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
index d046e33b8a29..66fc8be34bb7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
@@ -171,10 +171,9 @@ static int visconti_eth_clock_probe(struct platform_device *pdev,
int err;
dwmac->phy_ref_clk = devm_clk_get(&pdev->dev, "phy_ref_clk");
- if (IS_ERR(dwmac->phy_ref_clk)) {
- dev_err(&pdev->dev, "phy_ref_clk clock not found.\n");
- return PTR_ERR(dwmac->phy_ref_clk);
- }
+ if (IS_ERR(dwmac->phy_ref_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(dwmac->phy_ref_clk),
+ "phy_ref_clk clock not found.\n");
err = clk_prepare_enable(dwmac->phy_ref_clk);
if (err < 0) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index fc8759f146c7..76edb9b72675 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -104,7 +104,7 @@ static void dwmac1000_dump_regs(struct mac_device_info *hw, u32 *reg_space)
}
static void dwmac1000_set_umac_addr(struct mac_device_info *hw,
- unsigned char *addr,
+ const unsigned char *addr,
unsigned int reg_n)
{
void __iomem *ioaddr = hw->pcsr;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
index ebcad8dd99db..75071a7d551a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
@@ -68,7 +68,7 @@ static int dwmac100_irq_status(struct mac_device_info *hw,
}
static void dwmac100_set_umac_addr(struct mac_device_info *hw,
- unsigned char *addr,
+ const unsigned char *addr,
unsigned int reg_n)
{
void __iomem *ioaddr = hw->pcsr;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index b21745368983..fd41db65fe1d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -322,7 +322,7 @@ static void dwmac4_pmt(struct mac_device_info *hw, unsigned long mode)
}
static void dwmac4_set_umac_addr(struct mac_device_info *hw,
- unsigned char *addr, unsigned int reg_n)
+ const unsigned char *addr, unsigned int reg_n)
{
void __iomem *ioaddr = hw->pcsr;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
index 9292a1fab7d3..d1c605777985 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
@@ -187,7 +187,7 @@ int dwmac4_dma_interrupt(void __iomem *ioaddr,
return ret;
}
-void stmmac_dwmac4_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
+void stmmac_dwmac4_set_mac_addr(void __iomem *ioaddr, const u8 addr[6],
unsigned int high, unsigned int low)
{
unsigned long data;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
index d1c31200bb91..caa4bfc4c1d6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
@@ -239,7 +239,7 @@ void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr)
do {} while ((readl(ioaddr + DMA_CONTROL) & DMA_CONTROL_FTF));
}
-void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
+void stmmac_set_mac_addr(void __iomem *ioaddr, const u8 addr[6],
unsigned int high, unsigned int low)
{
unsigned long data;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
index c4d78fa93663..c6c4d7948fe5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
@@ -335,7 +335,8 @@ static void dwxgmac2_pmt(struct mac_device_info *hw, unsigned long mode)
}
static void dwxgmac2_set_umac_addr(struct mac_device_info *hw,
- unsigned char *addr, unsigned int reg_n)
+ const unsigned char *addr,
+ unsigned int reg_n)
{
void __iomem *ioaddr = hw->pcsr;
u32 value;
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index fe2660d5694d..f7dc447f05a0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -330,7 +330,8 @@ struct stmmac_ops {
/* Set power management mode (e.g. magic frame) */
void (*pmt)(struct mac_device_info *hw, unsigned long mode);
/* Set/Get Unicast MAC addresses */
- void (*set_umac_addr)(struct mac_device_info *hw, unsigned char *addr,
+ void (*set_umac_addr)(struct mac_device_info *hw,
+ const unsigned char *addr,
unsigned int reg_n);
void (*get_umac_addr)(struct mac_device_info *hw, unsigned char *addr,
unsigned int reg_n);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index eb3b7bf771d7..d3f350c25b9b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -736,7 +736,7 @@ static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
ptp_v2 = PTP_TCR_TSVER2ENA;
snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
- if (priv->synopsys_id != DWMAC_CORE_5_10)
+ if (priv->synopsys_id < DWMAC_CORE_4_10)
ts_event_en = PTP_TCR_TSEVNTENA;
ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
@@ -2818,9 +2818,13 @@ static int stmmac_get_hw_features(struct stmmac_priv *priv)
*/
static void stmmac_check_ether_addr(struct stmmac_priv *priv)
{
+ u8 addr[ETH_ALEN];
+
if (!is_valid_ether_addr(priv->dev->dev_addr)) {
- stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0);
- if (!is_valid_ether_addr(priv->dev->dev_addr))
+ stmmac_get_umac_addr(priv, priv->hw, addr, 0);
+ if (is_valid_ether_addr(addr))
+ eth_hw_addr_set(priv->dev, addr);
+ else
eth_hw_addr_random(priv->dev);
dev_info(priv->device, "device MAC address %pM\n",
priv->dev->dev_addr);
@@ -3510,6 +3514,8 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev)
/* Request Rx MSI irq */
for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
+ if (i >= MTL_MAX_RX_QUEUES)
+ break;
if (priv->rx_irq[i] == 0)
continue;
@@ -3533,6 +3539,8 @@ static int stmmac_request_irq_multi_msi(struct net_device *dev)
/* Request Tx MSI irq */
for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
+ if (i >= MTL_MAX_TX_QUEUES)
+ break;
if (priv->tx_irq[i] == 0)
continue;
@@ -6815,7 +6823,7 @@ int stmmac_dvr_probe(struct device *device,
priv->tx_irq[i] = res->tx_irq[i];
if (!is_zero_ether_addr(res->mac))
- memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
+ eth_hw_addr_set(priv->dev, res->mac);
dev_set_drvdata(device, priv->dev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
index 0462dcc93e53..be3cb63675a5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
@@ -36,7 +36,7 @@ struct stmmac_packet_attrs {
int vlan_id_in;
int vlan_id_out;
unsigned char *src;
- unsigned char *dst;
+ const unsigned char *dst;
u32 ip_src;
u32 ip_dst;
int tcp;
@@ -249,8 +249,8 @@ static int stmmac_test_loopback_validate(struct sk_buff *skb,
struct net_device *orig_ndev)
{
struct stmmac_test_priv *tpriv = pt->af_packet_priv;
+ const unsigned char *dst = tpriv->packet->dst;
unsigned char *src = tpriv->packet->src;
- unsigned char *dst = tpriv->packet->dst;
struct stmmachdr *shdr;
struct ethhdr *ehdr;
struct udphdr *uhdr;
@@ -1104,13 +1104,13 @@ static int stmmac_test_rxp(struct stmmac_priv *priv)
goto cleanup_sel;
}
- actions = kzalloc(nk * sizeof(*actions), GFP_KERNEL);
+ actions = kcalloc(nk, sizeof(*actions), GFP_KERNEL);
if (!actions) {
ret = -ENOMEM;
goto cleanup_exts;
}
- act = kzalloc(nk * sizeof(*act), GFP_KERNEL);
+ act = kcalloc(nk, sizeof(*act), GFP_KERNEL);
if (!act) {
ret = -ENOMEM;
goto cleanup_actions;
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index 287ae4c538aa..d2d4f47c7e28 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -3027,7 +3027,7 @@ static void cas_mac_reset(struct cas *cp)
/* Must be invoked under cp->lock. */
static void cas_init_mac(struct cas *cp)
{
- unsigned char *e = &cp->dev->dev_addr[0];
+ const unsigned char *e = &cp->dev->dev_addr[0];
int i;
cas_mac_reset(cp);
@@ -3379,6 +3379,7 @@ static void cas_check_pci_invariants(struct cas *cp)
static int cas_check_invariants(struct cas *cp)
{
struct pci_dev *pdev = cp->pdev;
+ u8 addr[ETH_ALEN];
u32 cfg;
int i;
@@ -3407,8 +3408,8 @@ static int cas_check_invariants(struct cas *cp)
/* finish phy determination. MDIO1 takes precedence over MDIO0 if
* they're both connected.
*/
- cp->phy_type = cas_get_vpd_info(cp, cp->dev->dev_addr,
- PCI_SLOT(pdev->devfn));
+ cp->phy_type = cas_get_vpd_info(cp, addr, PCI_SLOT(pdev->devfn));
+ eth_hw_addr_set(cp->dev, addr);
if (cp->phy_type & CAS_PHY_SERDES) {
cp->cas_flags |= CAS_FLAG_1000MB_CAP;
return 0; /* no more checking needed */
diff --git a/drivers/net/ethernet/sun/ldmvsw.c b/drivers/net/ethernet/sun/ldmvsw.c
index 50bd4e3b0af9..6b59b14e74b1 100644
--- a/drivers/net/ethernet/sun/ldmvsw.c
+++ b/drivers/net/ethernet/sun/ldmvsw.c
@@ -230,7 +230,6 @@ static struct net_device *vsw_alloc_netdev(u8 hwaddr[],
{
struct net_device *dev;
struct vnet_port *port;
- int i;
dev = alloc_etherdev_mqs(sizeof(*port), VNET_MAX_TXQS, 1);
if (!dev)
@@ -238,10 +237,8 @@ static struct net_device *vsw_alloc_netdev(u8 hwaddr[],
dev->needed_headroom = VNET_PACKET_SKIP + 8;
dev->needed_tailroom = 8;
- for (i = 0; i < ETH_ALEN; i++) {
- dev->dev_addr[i] = hwaddr[i];
- dev->perm_addr[i] = dev->dev_addr[i];
- }
+ eth_hw_addr_set(dev, hwaddr);
+ ether_addr_copy(dev->perm_addr, dev->dev_addr);
sprintf(dev->name, "vif%d.%d", (int)handle, (int)port_id);
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index a68a01d1b2b1..ba8ad76313a9 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -2603,7 +2603,7 @@ static int niu_init_link(struct niu *np)
return 0;
}
-static void niu_set_primary_mac(struct niu *np, unsigned char *addr)
+static void niu_set_primary_mac(struct niu *np, const unsigned char *addr)
{
u16 reg0 = addr[4] << 8 | addr[5];
u16 reg1 = addr[2] << 8 | addr[3];
@@ -6386,7 +6386,7 @@ static int niu_set_mac_addr(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(dev, addr->sa_data);
if (!netif_running(dev))
return 0;
@@ -8312,6 +8312,7 @@ static void niu_pci_vpd_validate(struct niu *np)
{
struct net_device *dev = np->dev;
struct niu_vpd *vpd = &np->vpd;
+ u8 addr[ETH_ALEN];
u8 val8;
if (!is_valid_ether_addr(&vpd->local_mac[0])) {
@@ -8344,17 +8345,20 @@ static void niu_pci_vpd_validate(struct niu *np)
return;
}
- memcpy(dev->dev_addr, vpd->local_mac, ETH_ALEN);
+ ether_addr_copy(addr, vpd->local_mac);
- val8 = dev->dev_addr[5];
- dev->dev_addr[5] += np->port;
- if (dev->dev_addr[5] < val8)
- dev->dev_addr[4]++;
+ val8 = addr[5];
+ addr[5] += np->port;
+ if (addr[5] < val8)
+ addr[4]++;
+
+ eth_hw_addr_set(dev, addr);
}
static int niu_pci_probe_sprom(struct niu *np)
{
struct net_device *dev = np->dev;
+ u8 addr[ETH_ALEN];
int len, i;
u64 val, sum;
u8 val8;
@@ -8446,27 +8450,29 @@ static int niu_pci_probe_sprom(struct niu *np)
val = nr64(ESPC_MAC_ADDR0);
netif_printk(np, probe, KERN_DEBUG, np->dev,
"SPROM: MAC_ADDR0[%08llx]\n", (unsigned long long)val);
- dev->dev_addr[0] = (val >> 0) & 0xff;
- dev->dev_addr[1] = (val >> 8) & 0xff;
- dev->dev_addr[2] = (val >> 16) & 0xff;
- dev->dev_addr[3] = (val >> 24) & 0xff;
+ addr[0] = (val >> 0) & 0xff;
+ addr[1] = (val >> 8) & 0xff;
+ addr[2] = (val >> 16) & 0xff;
+ addr[3] = (val >> 24) & 0xff;
val = nr64(ESPC_MAC_ADDR1);
netif_printk(np, probe, KERN_DEBUG, np->dev,
"SPROM: MAC_ADDR1[%08llx]\n", (unsigned long long)val);
- dev->dev_addr[4] = (val >> 0) & 0xff;
- dev->dev_addr[5] = (val >> 8) & 0xff;
+ addr[4] = (val >> 0) & 0xff;
+ addr[5] = (val >> 8) & 0xff;
- if (!is_valid_ether_addr(&dev->dev_addr[0])) {
+ if (!is_valid_ether_addr(addr)) {
dev_err(np->device, "SPROM MAC address invalid [ %pM ]\n",
- dev->dev_addr);
+ addr);
return -EINVAL;
}
- val8 = dev->dev_addr[5];
- dev->dev_addr[5] += np->port;
- if (dev->dev_addr[5] < val8)
- dev->dev_addr[4]++;
+ val8 = addr[5];
+ addr[5] += np->port;
+ if (addr[5] < val8)
+ addr[4]++;
+
+ eth_hw_addr_set(dev, addr);
val = nr64(ESPC_MOD_STR_LEN);
netif_printk(np, probe, KERN_DEBUG, np->dev,
@@ -9235,7 +9241,7 @@ static int niu_get_of_props(struct niu *np)
netdev_err(dev, "%pOF: OF MAC address prop len (%d) is wrong\n",
dp, prop_len);
}
- memcpy(dev->dev_addr, mac_addr, dev->addr_len);
+ eth_hw_addr_set(dev, mac_addr);
if (!is_valid_ether_addr(&dev->dev_addr[0])) {
netdev_err(dev, "%pOF: OF MAC address is invalid\n", dp);
netdev_err(dev, "%pOF: [ %pM ]\n", dp, dev->dev_addr);
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
index c646575e79d5..531a6f449afa 100644
--- a/drivers/net/ethernet/sun/sunbmac.c
+++ b/drivers/net/ethernet/sun/sunbmac.c
@@ -623,7 +623,7 @@ static int bigmac_init_hw(struct bigmac *bp, bool non_blocking)
void __iomem *cregs = bp->creg;
void __iomem *bregs = bp->bregs;
__u32 bblk_dvma = (__u32)bp->bblock_dvma;
- unsigned char *e = &bp->dev->dev_addr[0];
+ const unsigned char *e = &bp->dev->dev_addr[0];
/* Latch current counters into statistics. */
bigmac_get_counters(bp, bregs);
@@ -1076,7 +1076,6 @@ static int bigmac_ether_init(struct platform_device *op,
struct net_device *dev;
u8 bsizes, bsizes_more;
struct bigmac *bp;
- int i;
/* Get a new device struct for this interface. */
dev = alloc_etherdev(sizeof(struct bigmac));
@@ -1086,8 +1085,7 @@ static int bigmac_ether_init(struct platform_device *op,
if (version_printed++ == 0)
printk(KERN_INFO "%s", version);
- for (i = 0; i < 6; i++)
- dev->dev_addr[i] = idprom->id_ethaddr[i];
+ eth_hw_addr_set(dev, idprom->id_ethaddr);
/* Setup softc, with backpointers to QEC and BigMAC SBUS device structs. */
bp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index d72018a60c0f..036856102c50 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -1810,7 +1810,7 @@ static u32 gem_setup_multicast(struct gem *gp)
static void gem_init_mac(struct gem *gp)
{
- unsigned char *e = &gp->dev->dev_addr[0];
+ const unsigned char *e = &gp->dev->dev_addr[0];
writel(0x1bf0, gp->regs + MAC_SNDPAUSE);
@@ -2087,7 +2087,7 @@ static void gem_stop_phy(struct gem *gp, int wol)
writel(mifcfg, gp->regs + MIF_CFG);
if (wol && gp->has_wol) {
- unsigned char *e = &gp->dev->dev_addr[0];
+ const unsigned char *e = &gp->dev->dev_addr[0];
u32 csr;
/* Setup wake-on-lan for MAGIC packet */
@@ -2431,13 +2431,13 @@ static struct net_device_stats *gem_get_stats(struct net_device *dev)
static int gem_set_mac_address(struct net_device *dev, void *addr)
{
struct sockaddr *macaddr = (struct sockaddr *) addr;
+ const unsigned char *e = &dev->dev_addr[0];
struct gem *gp = netdev_priv(dev);
- unsigned char *e = &dev->dev_addr[0];
if (!is_valid_ether_addr(macaddr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, macaddr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, macaddr->sa_data);
/* We'll just catch it later when the device is up'd or resumed */
if (!netif_running(dev) || !netif_device_present(dev))
@@ -2797,9 +2797,12 @@ static int gem_get_device_address(struct gem *gp)
return -1;
#endif
}
- memcpy(dev->dev_addr, addr, ETH_ALEN);
+ eth_hw_addr_set(dev, addr);
#else
- get_gem_mac_nonobp(gp->pdev, gp->dev->dev_addr);
+ u8 addr[ETH_ALEN];
+
+ get_gem_mac_nonobp(gp->pdev, addr);
+ eth_hw_addr_set(gp->dev, addr);
#endif
return 0;
}
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index 62f81b0d14ed..ad9029ae6848 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -1395,13 +1395,13 @@ force_link:
/* hp->happy_lock must be held */
static int happy_meal_init(struct happy_meal *hp)
{
+ const unsigned char *e = &hp->dev->dev_addr[0];
void __iomem *gregs = hp->gregs;
void __iomem *etxregs = hp->etxregs;
void __iomem *erxregs = hp->erxregs;
void __iomem *bregs = hp->bigmacregs;
void __iomem *tregs = hp->tcvregs;
u32 regtmp, rxcfg;
- unsigned char *e = &hp->dev->dev_addr[0];
/* If auto-negotiation timer is running, kill it. */
del_timer(&hp->happy_timer);
@@ -2661,6 +2661,7 @@ static int happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe)
struct happy_meal *hp;
struct net_device *dev;
int i, qfe_slot = -1;
+ u8 addr[ETH_ALEN];
int err = -ENODEV;
sbus_dp = op->dev.parent->of_node;
@@ -2698,7 +2699,8 @@ static int happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe)
}
if (i < 6) { /* a mac address was given */
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = macaddr[i];
+ addr[i] = macaddr[i];
+ eth_hw_addr_set(dev, addr);
macaddr[5]++;
} else {
const unsigned char *addr;
@@ -2707,9 +2709,9 @@ static int happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe)
addr = of_get_property(dp, "local-mac-address", &len);
if (qfe_slot != -1 && addr && len == ETH_ALEN)
- memcpy(dev->dev_addr, addr, ETH_ALEN);
+ eth_hw_addr_set(dev, addr);
else
- memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
+ eth_hw_addr_set(dev, idprom->id_ethaddr);
}
hp = netdev_priv(dev);
@@ -2969,6 +2971,7 @@ static int happy_meal_pci_probe(struct pci_dev *pdev,
unsigned long hpreg_res;
int i, qfe_slot = -1;
char prom_name[64];
+ u8 addr[ETH_ALEN];
int err;
/* Now make sure pci_dev cookie is there. */
@@ -3044,7 +3047,8 @@ static int happy_meal_pci_probe(struct pci_dev *pdev,
}
if (i < 6) { /* a mac address was given */
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = macaddr[i];
+ addr[i] = macaddr[i];
+ eth_hw_addr_set(dev, addr);
macaddr[5]++;
} else {
#ifdef CONFIG_SPARC
@@ -3055,12 +3059,15 @@ static int happy_meal_pci_probe(struct pci_dev *pdev,
(addr = of_get_property(dp, "local-mac-address", &len))
!= NULL &&
len == 6) {
- memcpy(dev->dev_addr, addr, ETH_ALEN);
+ eth_hw_addr_set(dev, addr);
} else {
- memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
+ eth_hw_addr_set(dev, idprom->id_ethaddr);
}
#else
- get_hme_mac_nonsparc(pdev, &dev->dev_addr[0]);
+ u8 addr[ETH_ALEN];
+
+ get_hme_mac_nonsparc(pdev, addr);
+ eth_hw_addr_set(dev, addr);
#endif
}
diff --git a/drivers/net/ethernet/sun/sunqe.c b/drivers/net/ethernet/sun/sunqe.c
index 577cd9753d8e..efe0d33f6024 100644
--- a/drivers/net/ethernet/sun/sunqe.c
+++ b/drivers/net/ethernet/sun/sunqe.c
@@ -144,7 +144,7 @@ static int qe_init(struct sunqe *qep, int from_irq)
void __iomem *cregs = qep->qcregs;
void __iomem *mregs = qep->mregs;
void __iomem *gregs = qecp->gregs;
- unsigned char *e = &qep->dev->dev_addr[0];
+ const unsigned char *e = &qep->dev->dev_addr[0];
__u32 qblk_dvma = (__u32)qep->qblock_dvma;
u32 tmp;
int i;
@@ -844,7 +844,7 @@ static int qec_ether_init(struct platform_device *op)
if (!dev)
return -ENOMEM;
- memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
+ eth_hw_addr_set(dev, idprom->id_ethaddr);
qe = netdev_priv(dev);
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 58ee89223951..da8119625cf3 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -285,6 +285,7 @@ static struct vnet *vnet_new(const u64 *local_mac,
struct vio_dev *vdev)
{
struct net_device *dev;
+ u8 addr[ETH_ALEN];
struct vnet *vp;
int err, i;
@@ -295,7 +296,8 @@ static struct vnet *vnet_new(const u64 *local_mac,
dev->needed_tailroom = 8;
for (i = 0; i < ETH_ALEN; i++)
- dev->dev_addr[i] = (*local_mac >> (5 - i) * 8) & 0xff;
+ addr[i] = (*local_mac >> (5 - i) * 8) & 0xff;
+ eth_hw_addr_set(dev, addr);
vp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c
index df26cea45904..5c9b6c90942b 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c
@@ -78,7 +78,7 @@ static int xlgmac_init(struct xlgmac_pdata *pdata)
netdev->irq = pdata->dev_irq;
netdev->base_addr = (unsigned long)pdata->mac_regs;
xlgmac_read_mac_addr(pdata);
- memcpy(netdev->dev_addr, pdata->mac_addr, netdev->addr_len);
+ eth_hw_addr_set(netdev, pdata->mac_addr);
/* Set all the function pointers */
xlgmac_init_all_ops(pdata);
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c
index bf6c1c6779ff..76eb7db80f13 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-hw.c
@@ -57,7 +57,7 @@ static int xlgmac_enable_rx_csum(struct xlgmac_pdata *pdata)
return 0;
}
-static int xlgmac_set_mac_address(struct xlgmac_pdata *pdata, u8 *addr)
+static int xlgmac_set_mac_address(struct xlgmac_pdata *pdata, const u8 *addr)
{
unsigned int mac_addr_hi, mac_addr_lo;
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
index 1db7104fef3a..d435519236e4 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
@@ -798,7 +798,7 @@ static int xlgmac_set_mac_address(struct net_device *netdev, void *addr)
if (!is_valid_ether_addr(saddr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(netdev->dev_addr, saddr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, saddr->sa_data);
hw_ops->set_mac_address(pdata, netdev->dev_addr);
diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac.h b/drivers/net/ethernet/synopsys/dwc-xlgmac.h
index 8598aaf3ec99..98e3a271e017 100644
--- a/drivers/net/ethernet/synopsys/dwc-xlgmac.h
+++ b/drivers/net/ethernet/synopsys/dwc-xlgmac.h
@@ -410,7 +410,7 @@ struct xlgmac_hw_ops {
void (*dev_xmit)(struct xlgmac_channel *channel);
int (*dev_read)(struct xlgmac_channel *channel);
- int (*set_mac_address)(struct xlgmac_pdata *pdata, u8 *addr);
+ int (*set_mac_address)(struct xlgmac_pdata *pdata, const u8 *addr);
int (*config_rx_mode)(struct xlgmac_pdata *pdata);
int (*enable_rx_csum)(struct xlgmac_pdata *pdata);
int (*disable_rx_csum)(struct xlgmac_pdata *pdata);
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index 6b409f9c5863..0775a5542f2f 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -832,7 +832,7 @@ static int bdx_set_mac(struct net_device *ndev, void *p)
if (netif_running(dev))
return -EBUSY
*/
- memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
+ eth_hw_addr_set(ndev, addr->sa_data);
bdx_restore_mac(ndev, priv);
RET(0);
}
@@ -840,6 +840,7 @@ static int bdx_set_mac(struct net_device *ndev, void *p)
static int bdx_read_mac(struct bdx_priv *priv)
{
u16 macAddress[3], i;
+ u8 addr[ETH_ALEN];
ENTER;
macAddress[2] = READ_REG(priv, regUNC_MAC0_A);
@@ -849,9 +850,10 @@ static int bdx_read_mac(struct bdx_priv *priv)
macAddress[0] = READ_REG(priv, regUNC_MAC2_A);
macAddress[0] = READ_REG(priv, regUNC_MAC2_A);
for (i = 0; i < 3; i++) {
- priv->ndev->dev_addr[i * 2 + 1] = macAddress[i];
- priv->ndev->dev_addr[i * 2] = macAddress[i] >> 8;
+ addr[i * 2 + 1] = macAddress[i];
+ addr[i * 2] = macAddress[i] >> 8;
}
+ eth_hw_addr_set(priv->ndev, addr);
RET(0);
}
diff --git a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
index 6e4d4f9e32e0..b05de9b61ad6 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
@@ -61,7 +61,7 @@ struct am65_cpsw_regdump_item {
#define AM65_CPSW_REGDUMP_REC(mod, start, end) { \
.hdr.module_id = (mod), \
- .hdr.len = (((u32 *)(end)) - ((u32 *)(start)) + 1) * sizeof(u32) * 2 + \
+ .hdr.len = (end + 4 - start) * 2 + \
sizeof(struct am65_cpsw_regdump_hdr), \
.start_ofs = (start), \
.end_ofs = end, \
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index 130346f74ee8..c092cb61416a 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -1918,7 +1918,7 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
port->port_id,
port->slave.mac_addr);
if (!is_valid_ether_addr(port->slave.mac_addr)) {
- random_ether_addr(port->slave.mac_addr);
+ eth_random_addr(port->slave.mac_addr);
dev_err(dev, "Use random MAC address\n");
}
}
@@ -1970,7 +1970,7 @@ am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx)
ndev_priv->msg_enable = AM65_CPSW_DEBUG;
SET_NETDEV_DEV(port->ndev, dev);
- ether_addr_copy(port->ndev->dev_addr, port->slave.mac_addr);
+ eth_hw_addr_set(port->ndev, port->slave.mac_addr);
port->ndev->min_mtu = AM65_CPSW_MIN_PACKET_SIZE;
port->ndev->max_mtu = AM65_CPSW_MAX_PACKET_SIZE;
@@ -2429,12 +2429,6 @@ static int am65_cpsw_nuss_register_devlink(struct am65_cpsw_common *common)
dl_priv = devlink_priv(common->devlink);
dl_priv->common = common;
- ret = devlink_register(common->devlink);
- if (ret) {
- dev_err(dev, "devlink reg fail ret:%d\n", ret);
- goto dl_free;
- }
-
/* Provide devlink hook to switch mode when multiple external ports
* are present NUSS switchdev driver is enabled.
*/
@@ -2447,7 +2441,6 @@ static int am65_cpsw_nuss_register_devlink(struct am65_cpsw_common *common)
dev_err(dev, "devlink params reg fail ret:%d\n", ret);
goto dl_unreg;
}
- devlink_params_publish(common->devlink);
}
for (i = 1; i <= common->port_num; i++) {
@@ -2468,7 +2461,7 @@ static int am65_cpsw_nuss_register_devlink(struct am65_cpsw_common *common)
}
devlink_port_type_eth_set(dl_port, port->ndev);
}
-
+ devlink_register(common->devlink);
return ret;
dl_port_unreg:
@@ -2479,10 +2472,7 @@ dl_port_unreg:
devlink_port_unregister(dl_port);
}
dl_unreg:
- devlink_unregister(common->devlink);
-dl_free:
devlink_free(common->devlink);
-
return ret;
}
@@ -2492,6 +2482,8 @@ static void am65_cpsw_unregister_devlink(struct am65_cpsw_common *common)
struct am65_cpsw_port *port;
int i;
+ devlink_unregister(common->devlink);
+
for (i = 1; i <= common->port_num; i++) {
port = am65_common_get_port(common, i);
dl_port = &port->devlink_port;
@@ -2500,13 +2492,11 @@ static void am65_cpsw_unregister_devlink(struct am65_cpsw_common *common)
}
if (!AM65_CPSW_IS_CPSW2G(common) &&
- IS_ENABLED(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV)) {
- devlink_params_unpublish(common->devlink);
- devlink_params_unregister(common->devlink, am65_cpsw_devlink_params,
+ IS_ENABLED(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV))
+ devlink_params_unregister(common->devlink,
+ am65_cpsw_devlink_params,
ARRAY_SIZE(am65_cpsw_devlink_params));
- }
- devlink_unregister(common->devlink);
devlink_free(common->devlink);
}
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index 02d4e51f7306..7449436fc87c 100644
--- a/drivers/net/ethernet/ti/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -1112,7 +1112,7 @@ static int cpmac_probe(struct platform_device *pdev)
priv->dev = dev;
priv->ring_size = 64;
priv->msg_enable = netif_msg_init(debug_level, 0xff);
- memcpy(dev->dev_addr, pdata->dev_addr, sizeof(pdata->dev_addr));
+ eth_hw_addr_set(dev, pdata->dev_addr);
snprintf(priv->phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT,
mdio_bus_id, phy_id);
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 66f7ddd9b1f9..33142d505fc8 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -985,7 +985,7 @@ static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
flags, vid);
memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
- memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
+ eth_hw_addr_set(ndev, priv->mac_addr);
for_each_slave(priv, cpsw_set_slave_mac, priv);
pm_runtime_put(cpsw->dev);
@@ -1460,7 +1460,7 @@ static int cpsw_probe_dual_emac(struct cpsw_priv *priv)
dev_info(cpsw->dev, "cpsw: Random MACID = %pM\n",
priv_sl2->mac_addr);
}
- memcpy(ndev->dev_addr, priv_sl2->mac_addr, ETH_ALEN);
+ eth_hw_addr_set(ndev, priv_sl2->mac_addr);
priv_sl2->emac_port = 1;
cpsw->slaves[1].ndev = ndev;
@@ -1639,7 +1639,7 @@ static int cpsw_probe(struct platform_device *pdev)
dev_info(dev, "Random MACID = %pM\n", priv->mac_addr);
}
- memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
+ eth_hw_addr_set(ndev, priv->mac_addr);
cpsw->slaves[0].ndev = ndev;
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
index 7968f24d99c8..279e261e4720 100644
--- a/drivers/net/ethernet/ti/cpsw_new.c
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -1000,7 +1000,7 @@ static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
flags, vid);
ether_addr_copy(priv->mac_addr, addr->sa_data);
- ether_addr_copy(ndev->dev_addr, priv->mac_addr);
+ eth_hw_addr_set(ndev, priv->mac_addr);
cpsw_set_slave_mac(&cpsw->slaves[slave_no], priv);
pm_runtime_put(cpsw->dev);
@@ -1401,7 +1401,7 @@ static int cpsw_create_ports(struct cpsw_common *cpsw)
dev_info(cpsw->dev, "Random MACID = %pM\n",
priv->mac_addr);
}
- ether_addr_copy(ndev->dev_addr, slave_data->mac_addr);
+ eth_hw_addr_set(ndev, slave_data->mac_addr);
ether_addr_copy(priv->mac_addr, slave_data->mac_addr);
cpsw->slaves[i].ndev = ndev;
@@ -1810,12 +1810,6 @@ static int cpsw_register_devlink(struct cpsw_common *cpsw)
dl_priv = devlink_priv(cpsw->devlink);
dl_priv->cpsw = cpsw;
- ret = devlink_register(cpsw->devlink);
- if (ret) {
- dev_err(dev, "DL reg fail ret:%d\n", ret);
- goto dl_free;
- }
-
ret = devlink_params_register(cpsw->devlink, cpsw_devlink_params,
ARRAY_SIZE(cpsw_devlink_params));
if (ret) {
@@ -1823,22 +1817,19 @@ static int cpsw_register_devlink(struct cpsw_common *cpsw)
goto dl_unreg;
}
- devlink_params_publish(cpsw->devlink);
+ devlink_register(cpsw->devlink);
return ret;
dl_unreg:
- devlink_unregister(cpsw->devlink);
-dl_free:
devlink_free(cpsw->devlink);
return ret;
}
static void cpsw_unregister_devlink(struct cpsw_common *cpsw)
{
- devlink_params_unpublish(cpsw->devlink);
+ devlink_unregister(cpsw->devlink);
devlink_params_unregister(cpsw->devlink, cpsw_devlink_params,
ARRAY_SIZE(cpsw_devlink_params));
- devlink_unregister(cpsw->devlink);
devlink_free(cpsw->devlink);
}
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index 43222a34cba0..dc70a6bfaa6a 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -669,10 +669,10 @@ static int cpts_of_mux_clk_setup(struct cpts *cpts, struct device_node *node)
goto mux_fail;
}
- parent_names = devm_kzalloc(cpts->dev, (sizeof(char *) * num_parents),
- GFP_KERNEL);
+ parent_names = devm_kcalloc(cpts->dev, num_parents,
+ sizeof(*parent_names), GFP_KERNEL);
- mux_table = devm_kzalloc(cpts->dev, sizeof(*mux_table) * num_parents,
+ mux_table = devm_kcalloc(cpts->dev, num_parents, sizeof(*mux_table),
GFP_KERNEL);
if (!mux_table || !parent_names) {
ret = -ENOMEM;
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index e8291d848839..2d2dcf70563f 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1132,7 +1132,7 @@ static int emac_dev_setmac_addr(struct net_device *ndev, void *addr)
/* Store mac addr in priv and rx channel and set it in EMAC hw */
memcpy(priv->mac_addr, sa->sa_data, ndev->addr_len);
- memcpy(ndev->dev_addr, sa->sa_data, ndev->addr_len);
+ eth_hw_addr_set(ndev, sa->sa_data);
/* MAC address is configured only after the interface is enabled. */
if (netif_running(ndev)) {
@@ -1402,7 +1402,6 @@ static int match_first_device(struct device *dev, const void *data)
static int emac_dev_open(struct net_device *ndev)
{
struct device *emac_dev = &ndev->dev;
- u32 cnt;
struct resource *res;
int q, m, ret;
int res_num = 0, irq_num = 0;
@@ -1420,8 +1419,7 @@ static int emac_dev_open(struct net_device *ndev)
}
netif_carrier_off(ndev);
- for (cnt = 0; cnt < ETH_ALEN; cnt++)
- ndev->dev_addr[cnt] = priv->mac_addr[cnt];
+ eth_hw_addr_set(ndev, priv->mac_addr);
/* Configuration items */
priv->rx_buf_size = EMAC_DEF_MAX_FRAME_SIZE + NET_IP_ALIGN;
@@ -1899,7 +1897,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
rc = davinci_emac_try_get_mac(pdev, res_ctrl ? 0 : 1, priv->mac_addr);
if (!rc)
- ether_addr_copy(ndev->dev_addr, priv->mac_addr);
+ eth_hw_addr_set(ndev, priv->mac_addr);
if (!is_valid_ether_addr(priv->mac_addr)) {
/* Use random MAC if still none obtained. */
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index eda2961c0fe2..b818e4579f6f 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -2028,16 +2028,16 @@ static int netcp_create_interface(struct netcp_device *netcp_device,
emac_arch_get_mac_addr(efuse_mac_addr, efuse, efuse_mac);
if (is_valid_ether_addr(efuse_mac_addr))
- ether_addr_copy(ndev->dev_addr, efuse_mac_addr);
+ eth_hw_addr_set(ndev, efuse_mac_addr);
else
- eth_random_addr(ndev->dev_addr);
+ eth_hw_addr_random(ndev);
devm_iounmap(dev, efuse);
devm_release_mem_region(dev, res.start, size);
} else {
- ret = of_get_mac_address(node_interface, ndev->dev_addr);
+ ret = of_get_ethdev_address(node_interface, ndev);
if (ret)
- eth_random_addr(ndev->dev_addr);
+ eth_hw_addr_random(ndev);
}
ret = of_property_read_string(node_interface, "rx-channel",
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index 77c448ad67ce..741c42c6a417 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -184,7 +184,7 @@ static void tlan_print_list(struct tlan_list *, char *, int);
static void tlan_read_and_clear_stats(struct net_device *, int);
static void tlan_reset_adapter(struct net_device *);
static void tlan_finish_reset(struct net_device *);
-static void tlan_set_mac(struct net_device *, int areg, char *mac);
+static void tlan_set_mac(struct net_device *, int areg, const char *mac);
static void __tlan_phy_print(struct net_device *);
static void tlan_phy_print(struct net_device *);
@@ -817,6 +817,7 @@ static int tlan_init(struct net_device *dev)
int err;
int i;
struct tlan_priv *priv;
+ u8 addr[ETH_ALEN];
priv = netdev_priv(dev);
@@ -842,7 +843,7 @@ static int tlan_init(struct net_device *dev)
for (i = 0; i < ETH_ALEN; i++)
err |= tlan_ee_read_byte(dev,
(u8) priv->adapter->addr_ofs + i,
- (u8 *) &dev->dev_addr[i]);
+ addr + i);
if (err) {
pr_err("%s: Error reading MAC from eeprom: %d\n",
dev->name, err);
@@ -850,11 +851,12 @@ static int tlan_init(struct net_device *dev)
/* Olicom OC-2325/OC-2326 have the address byte-swapped */
if (priv->adapter->addr_ofs == 0xf8) {
for (i = 0; i < ETH_ALEN; i += 2) {
- char tmp = dev->dev_addr[i];
- dev->dev_addr[i] = dev->dev_addr[i + 1];
- dev->dev_addr[i + 1] = tmp;
+ char tmp = addr[i];
+ addr[i] = addr[i + 1];
+ addr[i + 1] = tmp;
}
}
+ eth_hw_addr_set(dev, addr);
netif_carrier_off(dev);
@@ -2346,7 +2348,7 @@ tlan_finish_reset(struct net_device *dev)
*
**************************************************************/
-static void tlan_set_mac(struct net_device *dev, int areg, char *mac)
+static void tlan_set_mac(struct net_device *dev, int areg, const char *mac)
{
int i;
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index 55e652624bd7..3dbfb1b20649 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -1477,7 +1477,7 @@ int gelic_net_setup_netdev(struct net_device *netdev, struct gelic_card *card)
__func__, status);
return -EINVAL;
}
- memcpy(netdev->dev_addr, &v1, ETH_ALEN);
+ eth_hw_addr_set(netdev, (u8 *)&v1);
if (card->vlan_required) {
netdev->hard_header_len += VLAN_HLEN;
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index 66d4e024d11e..f50f9a43d3ea 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -1296,7 +1296,7 @@ spider_net_set_mac(struct net_device *netdev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(netdev->dev_addr, addr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(netdev, addr->sa_data);
/* switch off GMACTPE and GMACRPE */
regvalue = spider_net_read_reg(card, SPIDER_NET_GMACOPEMD);
diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
index 52245ac60fc7..ce38f7515225 100644
--- a/drivers/net/ethernet/toshiba/tc35815.c
+++ b/drivers/net/ethernet/toshiba/tc35815.c
@@ -708,7 +708,7 @@ static int tc35815_read_plat_dev_addr(struct net_device *dev)
lp->pci_dev, tc35815_mac_match);
if (pd) {
if (pd->platform_data)
- memcpy(dev->dev_addr, pd->platform_data, ETH_ALEN);
+ eth_hw_addr_set(dev, pd->platform_data);
put_device(pd);
return is_valid_ether_addr(dev->dev_addr) ? 0 : -ENODEV;
}
@@ -725,6 +725,7 @@ static int tc35815_init_dev_addr(struct net_device *dev)
{
struct tc35815_regs __iomem *tr =
(struct tc35815_regs __iomem *)dev->base_addr;
+ u8 addr[ETH_ALEN];
int i;
while (tc_readl(&tr->PROM_Ctl) & PROM_Busy)
@@ -735,9 +736,10 @@ static int tc35815_init_dev_addr(struct net_device *dev)
while (tc_readl(&tr->PROM_Ctl) & PROM_Busy)
;
data = tc_readl(&tr->PROM_Data);
- dev->dev_addr[i] = data & 0xff;
- dev->dev_addr[i+1] = data >> 8;
+ addr[i] = data & 0xff;
+ addr[i+1] = data >> 8;
}
+ eth_hw_addr_set(dev, addr);
if (!is_valid_ether_addr(dev->dev_addr))
return tc35815_read_plat_dev_addr(dev);
return 0;
@@ -1859,7 +1861,8 @@ static struct net_device_stats *tc35815_get_stats(struct net_device *dev)
return &dev->stats;
}
-static void tc35815_set_cam_entry(struct net_device *dev, int index, unsigned char *addr)
+static void tc35815_set_cam_entry(struct net_device *dev, int index,
+ const unsigned char *addr)
{
struct tc35815_local *lp = netdev_priv(dev);
struct tc35815_regs __iomem *tr =
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index 3b73a9c55a5a..509c5e9b29df 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -899,6 +899,7 @@ static int rhine_init_one_common(struct device *hwdev, u32 quirks,
struct net_device *dev;
struct rhine_private *rp;
int i, rc, phy_id;
+ u8 addr[ETH_ALEN];
const char *name;
/* this should always be supported */
@@ -933,7 +934,8 @@ static int rhine_init_one_common(struct device *hwdev, u32 quirks,
rhine_hw_init(dev, pioaddr);
for (i = 0; i < 6; i++)
- dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i);
+ addr[i] = ioread8(ioaddr + StationAddr + i);
+ eth_hw_addr_set(dev, addr);
if (!is_valid_ether_addr(dev->dev_addr)) {
/* Report it and use a random ethernet address instead */
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index 4b9c30f735b5..be2b992f24d9 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -2767,6 +2767,7 @@ static int velocity_probe(struct device *dev, int irq,
struct velocity_info *vptr;
struct mac_regs __iomem *regs;
int ret = -ENOMEM;
+ u8 addr[ETH_ALEN];
/* FIXME: this driver, like almost all other ethernet drivers,
* can support more than MAX_UNITS.
@@ -2820,7 +2821,8 @@ static int velocity_probe(struct device *dev, int irq,
mac_wol_reset(regs);
for (i = 0; i < 6; i++)
- netdev->dev_addr[i] = readb(&regs->PAR[i]);
+ addr[i] = readb(&regs->PAR[i]);
+ eth_hw_addr_set(netdev, addr);
velocity_get_options(&vptr->options, velocity_nics);
diff --git a/drivers/net/ethernet/wiznet/w5100-spi.c b/drivers/net/ethernet/wiznet/w5100-spi.c
index 2b84848dc26a..7779a36da3c8 100644
--- a/drivers/net/ethernet/wiznet/w5100-spi.c
+++ b/drivers/net/ethernet/wiznet/w5100-spi.c
@@ -463,7 +463,9 @@ static int w5100_spi_probe(struct spi_device *spi)
static int w5100_spi_remove(struct spi_device *spi)
{
- return w5100_remove(&spi->dev);
+ w5100_remove(&spi->dev);
+
+ return 0;
}
static const struct spi_device_id w5100_spi_ids[] = {
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index f974e70a82e8..ae24d6b86803 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -985,7 +985,7 @@ static int w5100_set_macaddr(struct net_device *ndev, void *addr)
if (!is_valid_ether_addr(sock_addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(ndev->dev_addr, sock_addr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(ndev, sock_addr->sa_data);
w5100_write_macaddr(priv);
return 0;
}
@@ -1064,7 +1064,9 @@ static int w5100_mmio_probe(struct platform_device *pdev)
static int w5100_mmio_remove(struct platform_device *pdev)
{
- return w5100_remove(&pdev->dev);
+ w5100_remove(&pdev->dev);
+
+ return 0;
}
void *w5100_ops_priv(const struct net_device *ndev)
@@ -1155,7 +1157,7 @@ int w5100_probe(struct device *dev, const struct w5100_ops *ops,
INIT_WORK(&priv->restart_work, w5100_restart_work);
if (mac_addr)
- memcpy(ndev->dev_addr, mac_addr, ETH_ALEN);
+ eth_hw_addr_set(ndev, mac_addr);
else
eth_hw_addr_random(ndev);
@@ -1210,7 +1212,7 @@ err_register:
}
EXPORT_SYMBOL_GPL(w5100_probe);
-int w5100_remove(struct device *dev)
+void w5100_remove(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct w5100_priv *priv = netdev_priv(ndev);
@@ -1226,7 +1228,6 @@ int w5100_remove(struct device *dev)
unregister_netdev(ndev);
free_netdev(ndev);
- return 0;
}
EXPORT_SYMBOL_GPL(w5100_remove);
diff --git a/drivers/net/ethernet/wiznet/w5100.h b/drivers/net/ethernet/wiznet/w5100.h
index 5d3d4b541fec..481af3b6d9e8 100644
--- a/drivers/net/ethernet/wiznet/w5100.h
+++ b/drivers/net/ethernet/wiznet/w5100.h
@@ -31,6 +31,6 @@ void *w5100_ops_priv(const struct net_device *ndev);
int w5100_probe(struct device *dev, const struct w5100_ops *ops,
int sizeof_ops_priv, const void *mac_addr, int irq,
int link_gpio);
-int w5100_remove(struct device *dev);
+void w5100_remove(struct device *dev);
extern const struct dev_pm_ops w5100_pm_ops;
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
index 46aae30c4636..402d5036f266 100644
--- a/drivers/net/ethernet/wiznet/w5300.c
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -472,7 +472,7 @@ static int w5300_set_macaddr(struct net_device *ndev, void *addr)
if (!is_valid_ether_addr(sock_addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(ndev->dev_addr, sock_addr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(ndev, sock_addr->sa_data);
w5300_write_macaddr(priv);
return 0;
}
@@ -534,7 +534,7 @@ static int w5300_hw_probe(struct platform_device *pdev)
int ret;
if (data && is_valid_ether_addr(data->mac_addr)) {
- memcpy(ndev->dev_addr, data->mac_addr, ETH_ALEN);
+ eth_hw_addr_set(ndev, data->mac_addr);
} else {
eth_hw_addr_random(ndev);
}
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 463094ced104..e7065c9a8e38 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -438,7 +438,7 @@ static void temac_do_set_mac_address(struct net_device *ndev)
static int temac_init_mac_address(struct net_device *ndev, const void *address)
{
- memcpy(ndev->dev_addr, address, ETH_ALEN);
+ eth_hw_addr_set(ndev, address);
if (!is_valid_ether_addr(ndev->dev_addr))
eth_hw_addr_random(ndev);
temac_do_set_mac_address(ndev);
@@ -451,7 +451,7 @@ static int temac_set_mac_address(struct net_device *ndev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(ndev->dev_addr, addr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(ndev, addr->sa_data);
temac_do_set_mac_address(ndev);
return 0;
}
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 871b5ec3183d..9b068b81ae09 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -360,7 +360,7 @@ static void axienet_set_mac_address(struct net_device *ndev,
struct axienet_local *lp = netdev_priv(ndev);
if (address)
- memcpy(ndev->dev_addr, address, ETH_ALEN);
+ eth_hw_addr_set(ndev, address);
if (!is_valid_ether_addr(ndev->dev_addr))
eth_hw_addr_random(ndev);
@@ -1525,7 +1525,7 @@ static void axienet_validate(struct phylink_config *config,
netdev_warn(ndev, "Cannot use PHY mode %s, supported: %s\n",
phy_modes(state->interface),
phy_modes(lp->phy_mode));
- bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_zero(supported);
return;
}
}
@@ -1558,10 +1558,8 @@ static void axienet_validate(struct phylink_config *config,
break;
}
- bitmap_and(supported, supported, mask,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
- bitmap_and(state->advertising, state->advertising, mask,
- __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_and(supported, supported, mask);
+ linkmode_and(state->advertising, state->advertising, mask);
}
static void axienet_mac_pcs_get_state(struct phylink_config *config,
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index b780aad3550a..0815de581c7f 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -206,12 +206,13 @@ static void xemaclite_disable_interrupts(struct net_local *drvdata)
* This function writes data from a 16-bit aligned buffer to a 32-bit aligned
* address in the EmacLite device.
*/
-static void xemaclite_aligned_write(void *src_ptr, u32 *dest_ptr,
+static void xemaclite_aligned_write(const void *src_ptr, u32 *dest_ptr,
unsigned length)
{
+ const u16 *from_u16_ptr;
u32 align_buffer;
u32 *to_u32_ptr;
- u16 *from_u16_ptr, *to_u16_ptr;
+ u16 *to_u16_ptr;
to_u32_ptr = dest_ptr;
from_u16_ptr = src_ptr;
@@ -470,7 +471,7 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen)
* buffers (if configured).
*/
static void xemaclite_update_address(struct net_local *drvdata,
- u8 *address_ptr)
+ const u8 *address_ptr)
{
void __iomem *addr;
u32 reg_data;
@@ -511,7 +512,7 @@ static int xemaclite_set_mac_address(struct net_device *dev, void *address)
if (netif_running(dev))
return -EBUSY;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
xemaclite_update_address(lp, dev->dev_addr);
return 0;
}
@@ -1157,7 +1158,7 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
lp->tx_ping_pong = get_bool(ofdev, "xlnx,tx-ping-pong");
lp->rx_ping_pong = get_bool(ofdev, "xlnx,rx-ping-pong");
- rc = of_get_mac_address(ofdev->dev.of_node, ndev->dev_addr);
+ rc = of_get_ethdev_address(ofdev->dev.of_node, ndev);
if (rc) {
dev_warn(dev, "No MAC address found, using random\n");
eth_hw_addr_random(ndev);
diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c
index ae611e46da6a..f9587e55b842 100644
--- a/drivers/net/ethernet/xircom/xirc2ps_cs.c
+++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c
@@ -671,7 +671,6 @@ static int pcmcia_get_mac_ce(struct pcmcia_device *p_dev,
void *priv)
{
struct net_device *dev = priv;
- int i;
if (tuple->TupleDataLen != 13)
return -EINVAL;
@@ -679,8 +678,7 @@ static int pcmcia_get_mac_ce(struct pcmcia_device *p_dev,
(tuple->TupleData[2] != 6))
return -EINVAL;
/* another try (James Lehmer's CE2 version 4.1)*/
- for (i = 2; i < 6; i++)
- dev->dev_addr[i] = tuple->TupleData[i+2];
+ dev_addr_mod(dev, 2, &tuple->TupleData[2], 4);
return 0;
};
@@ -742,11 +740,9 @@ xirc2ps_config(struct pcmcia_device * link)
len = pcmcia_get_tuple(link, 0x89, &buf);
/* data layout looks like tuple 0x22 */
if (buf && len == 8) {
- if (*buf == CISTPL_FUNCE_LAN_NODE_ID) {
- int i;
- for (i = 2; i < 6; i++)
- dev->dev_addr[i] = buf[i+2];
- } else
+ if (*buf == CISTPL_FUNCE_LAN_NODE_ID)
+ dev_addr_mod(dev, 2, &buf[2], 4);
+ else
err = -1;
}
kfree(buf);
@@ -1271,7 +1267,7 @@ struct set_address_info {
unsigned int ioaddr;
};
-static void set_address(struct set_address_info *sa_info, char *addr)
+static void set_address(struct set_address_info *sa_info, const char *addr)
{
unsigned int ioaddr = sa_info->ioaddr;
int i;
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index 931494cc1c39..65fdad1107fc 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -1103,10 +1103,9 @@ static int init_queues(struct port *port)
return -ENOMEM;
}
- if (!(port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL,
- &port->desc_tab_phys)))
+ port->desc_tab = dma_pool_zalloc(dma_pool, GFP_KERNEL, &port->desc_tab_phys);
+ if (!port->desc_tab)
return -ENOMEM;
- memset(port->desc_tab, 0, POOL_ALLOC_SIZE);
memset(port->rx_buff_tab, 0, sizeof(port->rx_buff_tab)); /* tables */
memset(port->tx_buff_tab, 0, sizeof(port->tx_buff_tab));
@@ -1524,7 +1523,7 @@ static int ixp4xx_eth_probe(struct platform_device *pdev)
port->plat = plat;
npe_port_tab[NPE_ID(port->id)] = port;
- memcpy(ndev->dev_addr, plat->hwaddr, ETH_ALEN);
+ eth_hw_addr_set(ndev, plat->hwaddr);
platform_set_drvdata(pdev, ndev);
diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c
index 6d1e3f49a3d3..b584ffe38ad6 100644
--- a/drivers/net/fddi/defxx.c
+++ b/drivers/net/fddi/defxx.c
@@ -1028,7 +1028,7 @@ static void dfx_bus_config_check(DFX_board_t *bp)
* or read adapter MAC address
*
* Assumptions:
- * Memory allocated from pci_alloc_consistent() call is physically
+ * Memory allocated from dma_alloc_coherent() call is physically
* contiguous, locked memory.
*
* Side Effects:
@@ -1117,7 +1117,7 @@ static int dfx_driver_init(struct net_device *dev, const char *print_name,
* dfx_ctl_set_mac_address.
*/
- memcpy(dev->dev_addr, bp->factory_mac_addr, FDDI_K_ALEN);
+ dev_addr_set(dev, bp->factory_mac_addr);
if (dfx_bus_tc)
board_name = "DEFTA";
if (dfx_bus_eisa)
@@ -1474,7 +1474,7 @@ static int dfx_open(struct net_device *dev)
* address.
*/
- memcpy(dev->dev_addr, bp->factory_mac_addr, FDDI_K_ALEN);
+ dev_addr_set(dev, bp->factory_mac_addr);
/* Clear local unicast/multicast address tables and counts */
@@ -2379,7 +2379,7 @@ static int dfx_ctl_set_mac_address(struct net_device *dev, void *addr)
/* Copy unicast address to driver-maintained structs and update count */
- memcpy(dev->dev_addr, p_sockaddr->sa_data, FDDI_K_ALEN); /* update device struct */
+ dev_addr_set(dev, p_sockaddr->sa_data); /* update device struct */
memcpy(&bp->uc_table[0], p_sockaddr->sa_data, FDDI_K_ALEN); /* update driver struct */
bp->uc_count = 1;
@@ -3249,7 +3249,7 @@ static void dfx_rcv_queue_process(
* is contained in a single physically contiguous buffer
* in which the virtual address of the start of packet
* (skb->data) can be converted to a physical address
- * by using pci_map_single().
+ * by using dma_map_single().
*
* Since the adapter architecture requires a three byte
* packet request header to prepend the start of packet,
@@ -3402,7 +3402,7 @@ static netdev_tx_t dfx_xmt_queue_pkt(struct sk_buff *skb,
* skb->data.
* 6. The physical address of the start of packet
* can be determined from the virtual address
- * by using pci_map_single() and is only 32-bits
+ * by using dma_map_single() and is only 32-bits
* wide.
*/
diff --git a/drivers/net/fddi/defza.c b/drivers/net/fddi/defza.c
index 0de2c4552f5e..f5c25acaa577 100644
--- a/drivers/net/fddi/defza.c
+++ b/drivers/net/fddi/defza.c
@@ -1380,7 +1380,7 @@ static int fza_probe(struct device *bdev)
goto err_out_irq;
fza_reads(&init->hw_addr, &hw_addr, sizeof(hw_addr));
- memcpy(dev->dev_addr, &hw_addr, FDDI_K_ALEN);
+ dev_addr_set(dev, (u8 *)&hw_addr);
fza_reads(&init->rom_rev, &rom_rev, sizeof(rom_rev));
fza_reads(&init->fw_rev, &fw_rev, sizeof(fw_rev));
diff --git a/drivers/net/fddi/skfp/h/smc.h b/drivers/net/fddi/skfp/h/smc.h
index 3814a2ff64ae..b0e6ce0d893e 100644
--- a/drivers/net/fddi/skfp/h/smc.h
+++ b/drivers/net/fddi/skfp/h/smc.h
@@ -470,7 +470,7 @@ void card_stop(struct s_smc *smc);
void init_board(struct s_smc *smc, u_char *mac_addr);
int init_fplus(struct s_smc *smc);
void init_plc(struct s_smc *smc);
-int init_smt(struct s_smc *smc, u_char *mac_addr);
+int init_smt(struct s_smc *smc, const u_char *mac_addr);
void mac1_irq(struct s_smc *smc, u_short stu, u_short stl);
void mac2_irq(struct s_smc *smc, u_short code_s2u, u_short code_s2l);
void mac3_irq(struct s_smc *smc, u_short code_s3u, u_short code_s3l);
diff --git a/drivers/net/fddi/skfp/skfddi.c b/drivers/net/fddi/skfp/skfddi.c
index c5cb421f9890..2b6a607ac0b7 100644
--- a/drivers/net/fddi/skfp/skfddi.c
+++ b/drivers/net/fddi/skfp/skfddi.c
@@ -78,6 +78,7 @@ static const char * const boot_msg =
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
#include <linux/fddidevice.h>
#include <linux/skbuff.h>
#include <linux/bitops.h>
@@ -433,7 +434,7 @@ static int skfp_driver_init(struct net_device *dev)
}
read_address(smc, NULL);
pr_debug("HW-Addr: %pMF\n", smc->hw.fddi_canon_addr.a);
- memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, ETH_ALEN);
+ eth_hw_addr_set(dev, smc->hw.fddi_canon_addr.a);
smt_reset_defaults(smc, 0);
@@ -500,7 +501,7 @@ static int skfp_open(struct net_device *dev)
* address.
*/
read_address(smc, NULL);
- memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, ETH_ALEN);
+ eth_hw_addr_set(dev, smc->hw.fddi_canon_addr.a);
init_smt(smc, NULL);
smt_online(smc, 1);
@@ -924,7 +925,7 @@ static int skfp_ctl_set_mac_address(struct net_device *dev, void *addr)
unsigned long Flags;
- memcpy(dev->dev_addr, p_sockaddr->sa_data, FDDI_K_ALEN);
+ dev_addr_set(dev, p_sockaddr->sa_data);
spin_lock_irqsave(&bp->DriverLock, Flags);
ResetAdapter(smc);
spin_unlock_irqrestore(&bp->DriverLock, Flags);
@@ -1012,7 +1013,7 @@ static int skfp_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __
* is contained in a single physically contiguous buffer
* in which the virtual address of the start of packet
* (skb->data) can be converted to a physical address
- * by using pci_map_single().
+ * by using dma_map_single().
*
* We have an internal queue for packets we can not send
* immediately. Packets in this queue can be given to the
diff --git a/drivers/net/fddi/skfp/smtinit.c b/drivers/net/fddi/skfp/smtinit.c
index c9898c83fe30..8b172c195685 100644
--- a/drivers/net/fddi/skfp/smtinit.c
+++ b/drivers/net/fddi/skfp/smtinit.c
@@ -19,7 +19,7 @@
#include "h/fddi.h"
#include "h/smc.h"
-void init_fddi_driver(struct s_smc *smc, u_char *mac_addr);
+void init_fddi_driver(struct s_smc *smc, const u_char *mac_addr);
/* define global debug variable */
#if defined(DEBUG) && !defined(DEBUG_BRD)
@@ -57,7 +57,7 @@ static void set_oem_spec_val(struct s_smc *smc)
/*
* Init SMT
*/
-int init_smt(struct s_smc *smc, u_char *mac_addr)
+int init_smt(struct s_smc *smc, const u_char *mac_addr)
/* u_char *mac_addr; canonical address or NULL */
{
int p ;
diff --git a/drivers/net/fjes/fjes_hw.c b/drivers/net/fjes/fjes_hw.c
index 065bb0a40b1d..704e949484d0 100644
--- a/drivers/net/fjes/fjes_hw.c
+++ b/drivers/net/fjes/fjes_hw.c
@@ -137,7 +137,8 @@ static void fjes_hw_free_epbuf(struct epbuf_handler *epbh)
epbh->ring = NULL;
}
-void fjes_hw_setup_epbuf(struct epbuf_handler *epbh, u8 *mac_addr, u32 mtu)
+void fjes_hw_setup_epbuf(struct epbuf_handler *epbh, const u8 *mac_addr,
+ u32 mtu)
{
union ep_buffer_info *info = epbh->info;
u16 vlan_id[EP_BUFFER_SUPPORT_VLAN_MAX];
diff --git a/drivers/net/fjes/fjes_hw.h b/drivers/net/fjes/fjes_hw.h
index b4608ea2a2d5..997c7b37a402 100644
--- a/drivers/net/fjes/fjes_hw.h
+++ b/drivers/net/fjes/fjes_hw.h
@@ -330,7 +330,7 @@ int fjes_hw_register_buff_addr(struct fjes_hw *, int,
int fjes_hw_unregister_buff_addr(struct fjes_hw *, int);
void fjes_hw_init_command_registers(struct fjes_hw *,
struct fjes_device_command_param *);
-void fjes_hw_setup_epbuf(struct epbuf_handler *, u8 *, u32);
+void fjes_hw_setup_epbuf(struct epbuf_handler *, const u8 *, u32);
int fjes_hw_raise_interrupt(struct fjes_hw *, int, enum REG_ICTL_MASK);
void fjes_hw_set_irqmask(struct fjes_hw *, enum REG_ICTL_MASK, bool);
u32 fjes_hw_capture_interrupt_status(struct fjes_hw *);
diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c
index 185c8a398681..b06c17ac8d4e 100644
--- a/drivers/net/fjes/fjes_main.c
+++ b/drivers/net/fjes/fjes_main.c
@@ -1203,6 +1203,7 @@ static int fjes_probe(struct platform_device *plat_dev)
struct net_device *netdev;
struct resource *res;
struct fjes_hw *hw;
+ u8 addr[ETH_ALEN];
int err;
err = -ENOMEM;
@@ -1266,12 +1267,13 @@ static int fjes_probe(struct platform_device *plat_dev)
goto err_free_control_wq;
/* setup MAC address (02:00:00:00:00:[epid])*/
- netdev->dev_addr[0] = 2;
- netdev->dev_addr[1] = 0;
- netdev->dev_addr[2] = 0;
- netdev->dev_addr[3] = 0;
- netdev->dev_addr[4] = 0;
- netdev->dev_addr[5] = hw->my_epid; /* EPID */
+ addr[0] = 2;
+ addr[1] = 0;
+ addr[2] = 0;
+ addr[3] = 0;
+ addr[4] = 0;
+ addr[5] = hw->my_epid; /* EPID */
+ eth_hw_addr_set(netdev, addr);
err = register_netdev(netdev);
if (err)
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 30e0a10595a1..24e5c54d06c1 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -539,7 +539,7 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
mtu = dst_mtu(&rt->dst);
}
- rt->dst.ops->update_pmtu(&rt->dst, NULL, skb, mtu, false);
+ skb_dst_update_pmtu_no_confirm(skb, mtu);
if (!skb_is_gso(skb) && (iph->frag_off & htons(IP_DF)) &&
mtu < ntohs(iph->tot_len)) {
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index 6192244b304a..f4e8793e995d 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -288,7 +288,7 @@ static int sp_set_mac_address(struct net_device *dev, void *addr)
netif_tx_lock_bh(dev);
netif_addr_lock(dev);
- memcpy(dev->dev_addr, &sa->sax25_call, AX25_ADDR_LEN);
+ __dev_addr_set(dev, &sa->sax25_call, AX25_ADDR_LEN);
netif_addr_unlock(dev);
netif_tx_unlock_bh(dev);
@@ -317,7 +317,7 @@ static void sp_setup(struct net_device *dev)
/* Only activated in AX.25 mode */
memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
- memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN);
+ dev_addr_set(dev, (u8 *)&ax25_defaddr);
dev->flags = 0;
}
@@ -726,7 +726,7 @@ static int sixpack_ioctl(struct tty_struct *tty, struct file *file,
}
netif_tx_lock_bh(dev);
- memcpy(dev->dev_addr, &addr, AX25_ADDR_LEN);
+ __dev_addr_set(dev, &addr, AX25_ADDR_LEN);
netif_tx_unlock_bh(dev);
err = 0;
break;
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
index 775dcf4ebde5..a03d0b474641 100644
--- a/drivers/net/hamradio/baycom_epp.c
+++ b/drivers/net/hamradio/baycom_epp.c
@@ -623,16 +623,16 @@ static int receive(struct net_device *dev, int cnt)
/* --------------------------------------------------------------------- */
-#ifdef __i386__
+#if defined(__i386__) && !defined(CONFIG_UML)
#include <asm/msr.h>
#define GETTICK(x) \
({ \
if (boot_cpu_has(X86_FEATURE_TSC)) \
x = (unsigned int)rdtsc(); \
})
-#else /* __i386__ */
+#else /* __i386__ && !CONFIG_UML */
#define GETTICK(x)
-#endif /* __i386__ */
+#endif /* __i386__ && !CONFIG_UML */
static void epp_bh(struct work_struct *work)
{
@@ -791,7 +791,7 @@ static int baycom_set_mac_address(struct net_device *dev, void *addr)
struct sockaddr *sa = (struct sockaddr *)addr;
/* addr is an AX.25 shifted ASCII mac address */
- memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
+ dev_addr_set(dev, sa->sa_data);
return 0;
}
@@ -1159,7 +1159,7 @@ static void baycom_probe(struct net_device *dev)
dev->mtu = AX25_DEF_PACLEN; /* eth_mtu is the default */
dev->addr_len = AX25_ADDR_LEN; /* sizeof an ax.25 address */
memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
- memcpy(dev->dev_addr, &null_ax25_address, AX25_ADDR_LEN);
+ dev_addr_set(dev, (u8 *)&null_ax25_address);
dev->tx_queue_len = 16;
/* New style flags */
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index d967b0748773..30af0081e2be 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -302,7 +302,7 @@ static int bpq_set_mac_address(struct net_device *dev, void *addr)
{
struct sockaddr *sa = (struct sockaddr *)addr;
- memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
+ dev_addr_set(dev, sa->sa_data);
return 0;
}
@@ -457,9 +457,6 @@ static void bpq_setup(struct net_device *dev)
dev->netdev_ops = &bpq_netdev_ops;
dev->needs_free_netdev = true;
- memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
- memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN);
-
dev->flags = 0;
dev->features = NETIF_F_LLTX; /* Allow recursion */
@@ -472,6 +469,8 @@ static void bpq_setup(struct net_device *dev)
dev->mtu = AX25_DEF_PACLEN;
dev->addr_len = AX25_ADDR_LEN;
+ memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
+ dev_addr_set(dev, (u8 *)&ax25_defaddr);
}
/*
diff --git a/drivers/net/hamradio/dmascc.c b/drivers/net/hamradio/dmascc.c
index f4c3efc3e074..7e527499d3ad 100644
--- a/drivers/net/hamradio/dmascc.c
+++ b/drivers/net/hamradio/dmascc.c
@@ -426,7 +426,7 @@ static void __init dev_setup(struct net_device *dev)
dev->addr_len = AX25_ADDR_LEN;
dev->tx_queue_len = 64;
memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
- memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN);
+ dev_addr_set(dev, (u8 *)&ax25_defaddr);
}
static const struct net_device_ops scc_netdev_ops = {
@@ -956,8 +956,7 @@ static int scc_send_packet(struct sk_buff *skb, struct net_device *dev)
static int scc_set_mac_address(struct net_device *dev, void *sa)
{
- memcpy(dev->dev_addr, ((struct sockaddr *) sa)->sa_data,
- dev->addr_len);
+ dev_addr_set(dev, ((struct sockaddr *)sa)->sa_data);
return 0;
}
diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c
index 5805cfc83854..b0edb91bb10a 100644
--- a/drivers/net/hamradio/hdlcdrv.c
+++ b/drivers/net/hamradio/hdlcdrv.c
@@ -415,7 +415,7 @@ static int hdlcdrv_set_mac_address(struct net_device *dev, void *addr)
struct sockaddr *sa = (struct sockaddr *)addr;
/* addr is an AX.25 shifted ASCII mac address */
- memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
+ dev_addr_set(dev, sa->sa_data);
return 0;
}
@@ -675,7 +675,7 @@ static void hdlcdrv_setup(struct net_device *dev)
dev->mtu = AX25_DEF_PACLEN; /* eth_mtu is the default */
dev->addr_len = AX25_ADDR_LEN; /* sizeof an ax.25 address */
memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
- memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN);
+ dev_addr_set(dev, (u8 *)&ax25_defaddr);
dev->tx_queue_len = 16;
}
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index 8666110bec55..867252a0247b 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -344,7 +344,7 @@ static int ax_set_mac_address(struct net_device *dev, void *addr)
netif_tx_lock_bh(dev);
netif_addr_lock(dev);
- memcpy(dev->dev_addr, &sa->sax25_call, AX25_ADDR_LEN);
+ __dev_addr_set(dev, &sa->sax25_call, AX25_ADDR_LEN);
netif_addr_unlock(dev);
netif_tx_unlock_bh(dev);
@@ -647,7 +647,7 @@ static void ax_setup(struct net_device *dev)
memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
- memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN);
+ dev_addr_set(dev, (u8 *)&ax25_defaddr);
dev->flags = IFF_BROADCAST | IFF_MULTICAST;
}
@@ -850,7 +850,7 @@ static int mkiss_ioctl(struct tty_struct *tty, struct file *file,
}
netif_tx_lock_bh(dev);
- memcpy(dev->dev_addr, addr, AX25_ADDR_LEN);
+ __dev_addr_set(dev, addr, AX25_ADDR_LEN);
netif_tx_unlock_bh(dev);
err = 0;
diff --git a/drivers/net/hamradio/scc.c b/drivers/net/hamradio/scc.c
index e0bb131a33d7..3d59dac063ac 100644
--- a/drivers/net/hamradio/scc.c
+++ b/drivers/net/hamradio/scc.c
@@ -1563,9 +1563,6 @@ static void scc_net_setup(struct net_device *dev)
dev->netdev_ops = &scc_netdev_ops;
dev->header_ops = &ax25_header_ops;
- memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
- memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN);
-
dev->flags = 0;
dev->type = ARPHRD_AX25;
@@ -1573,6 +1570,8 @@ static void scc_net_setup(struct net_device *dev)
dev->mtu = AX25_DEF_PACLEN;
dev->addr_len = AX25_ADDR_LEN;
+ memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
+ dev_addr_set(dev, (u8 *)&ax25_defaddr);
}
/* ----> open network device <---- */
@@ -1951,7 +1950,7 @@ static int scc_net_siocdevprivate(struct net_device *dev,
static int scc_net_set_mac_address(struct net_device *dev, void *addr)
{
struct sockaddr *sa = (struct sockaddr *) addr;
- memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
+ dev_addr_set(dev, sa->sa_data);
return 0;
}
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index 6ddacbdb224b..6376b8485976 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -1063,7 +1063,7 @@ static int yam_set_mac_address(struct net_device *dev, void *addr)
struct sockaddr *sa = (struct sockaddr *) addr;
/* addr is an AX.25 shifted ASCII mac address */
- memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
+ dev_addr_set(dev, sa->sa_data);
return 0;
}
@@ -1107,7 +1107,7 @@ static void yam_setup(struct net_device *dev)
dev->mtu = AX25_MTU;
dev->addr_len = AX25_ADDR_LEN;
memcpy(dev->broadcast, &ax25_bcast, AX25_ADDR_LEN);
- memcpy(dev->dev_addr, &ax25_defaddr, AX25_ADDR_LEN);
+ dev_addr_set(dev, (u8 *)&ax25_defaddr);
}
static int __init yam_init_driver(void)
diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c
index 7661dbb31162..16105292b140 100644
--- a/drivers/net/hippi/rrunner.c
+++ b/drivers/net/hippi/rrunner.c
@@ -502,6 +502,7 @@ static unsigned int write_eeprom(struct rr_private *rrpriv,
static int rr_init(struct net_device *dev)
{
+ u8 addr[HIPPI_ALEN] __aligned(4);
struct rr_private *rrpriv;
struct rr_regs __iomem *regs;
u32 sram_size, rev;
@@ -537,10 +538,11 @@ static int rr_init(struct net_device *dev)
* other method I've seen. -VAL
*/
- *(__be16 *)(dev->dev_addr) =
+ *(__be16 *)(addr) =
htons(rr_read_eeprom_word(rrpriv, offsetof(struct eeprom, manf.BoardULA)));
- *(__be32 *)(dev->dev_addr+2) =
+ *(__be32 *)(addr+2) =
htonl(rr_read_eeprom_word(rrpriv, offsetof(struct eeprom, manf.BoardULA[4])));
+ dev_addr_set(dev, addr);
printk(" MAC: %pM\n", dev->dev_addr);
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index bc48855dff10..315278a7cf88 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -1075,14 +1075,15 @@ struct netvsc_device {
/* Receive buffer allocated by us but manages by NetVSP */
void *recv_buf;
u32 recv_buf_size; /* allocated bytes */
- u32 recv_buf_gpadl_handle;
+ struct vmbus_gpadl recv_buf_gpadl_handle;
u32 recv_section_cnt;
u32 recv_section_size;
u32 recv_completion_cnt;
/* Send buffer allocated by us */
void *send_buf;
- u32 send_buf_gpadl_handle;
+ u32 send_buf_size;
+ struct vmbus_gpadl send_buf_gpadl_handle;
u32 send_section_cnt;
u32 send_section_size;
unsigned long *send_section_map;
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 7bd935412853..396bc1c204e6 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -278,9 +278,9 @@ static void netvsc_teardown_recv_gpadl(struct hv_device *device,
{
int ret;
- if (net_device->recv_buf_gpadl_handle) {
+ if (net_device->recv_buf_gpadl_handle.gpadl_handle) {
ret = vmbus_teardown_gpadl(device->channel,
- net_device->recv_buf_gpadl_handle);
+ &net_device->recv_buf_gpadl_handle);
/* If we failed here, we might as well return and have a leak
* rather than continue and a bugchk
@@ -290,7 +290,6 @@ static void netvsc_teardown_recv_gpadl(struct hv_device *device,
"unable to teardown receive buffer's gpadl\n");
return;
}
- net_device->recv_buf_gpadl_handle = 0;
}
}
@@ -300,9 +299,9 @@ static void netvsc_teardown_send_gpadl(struct hv_device *device,
{
int ret;
- if (net_device->send_buf_gpadl_handle) {
+ if (net_device->send_buf_gpadl_handle.gpadl_handle) {
ret = vmbus_teardown_gpadl(device->channel,
- net_device->send_buf_gpadl_handle);
+ &net_device->send_buf_gpadl_handle);
/* If we failed here, we might as well return and have a leak
* rather than continue and a bugchk
@@ -312,7 +311,6 @@ static void netvsc_teardown_send_gpadl(struct hv_device *device,
"unable to teardown send buffer's gpadl\n");
return;
}
- net_device->send_buf_gpadl_handle = 0;
}
}
@@ -380,7 +378,7 @@ static int netvsc_init_buf(struct hv_device *device,
memset(init_packet, 0, sizeof(struct nvsp_message));
init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_RECV_BUF;
init_packet->msg.v1_msg.send_recv_buf.
- gpadl_handle = net_device->recv_buf_gpadl_handle;
+ gpadl_handle = net_device->recv_buf_gpadl_handle.gpadl_handle;
init_packet->msg.v1_msg.
send_recv_buf.id = NETVSC_RECEIVE_BUFFER_ID;
@@ -463,6 +461,7 @@ static int netvsc_init_buf(struct hv_device *device,
ret = -ENOMEM;
goto cleanup;
}
+ net_device->send_buf_size = buf_size;
/* Establish the gpadl handle for this buffer on this
* channel. Note: This call uses the vmbus connection rather
@@ -482,7 +481,7 @@ static int netvsc_init_buf(struct hv_device *device,
memset(init_packet, 0, sizeof(struct nvsp_message));
init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_SEND_BUF;
init_packet->msg.v1_msg.send_send_buf.gpadl_handle =
- net_device->send_buf_gpadl_handle;
+ net_device->send_buf_gpadl_handle.gpadl_handle;
init_packet->msg.v1_msg.send_send_buf.id = NETVSC_SEND_BUFFER_ID;
trace_nvsp_send(ndev, init_packet);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 382bebc2420d..7e66ae1d2a59 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -803,6 +803,7 @@ void netvsc_linkstatus_callback(struct net_device *net,
schedule_delayed_work(&ndev_ctx->dwork, 0);
}
+/* This function should only be called after skb_record_rx_queue() */
static void netvsc_xdp_xmit(struct sk_buff *skb, struct net_device *ndev)
{
int rc;
@@ -2536,7 +2537,7 @@ static int netvsc_probe(struct hv_device *dev,
goto rndis_failed;
}
- memcpy(net->dev_addr, device_info->mac_adr, ETH_ALEN);
+ eth_hw_addr_set(net, device_info->mac_adr);
/* We must get rtnl lock before scheduling nvdev->subchan_work,
* otherwise netvsc_subchan_work() can get rtnl lock first and wait
@@ -2742,8 +2743,7 @@ static int netvsc_netdev_event(struct notifier_block *this,
return NOTIFY_DONE;
/* Avoid Bonding master dev with same MAC registering as VF */
- if ((event_dev->priv_flags & IFF_BONDING) &&
- (event_dev->flags & IFF_MASTER))
+ if (netif_is_bond_master(event_dev))
return NOTIFY_DONE;
switch (event) {
diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c
index 3a2824f24caa..ece6ff6049f6 100644
--- a/drivers/net/ieee802154/ca8210.c
+++ b/drivers/net/ieee802154/ca8210.c
@@ -2938,9 +2938,7 @@ static int ca8210_dev_com_init(struct ca8210_priv *priv)
*/
static void ca8210_dev_com_clear(struct ca8210_priv *priv)
{
- flush_workqueue(priv->mlme_workqueue);
destroy_workqueue(priv->mlme_workqueue);
- flush_workqueue(priv->irq_workqueue);
destroy_workqueue(priv->irq_workqueue);
}
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index e9258a9f3702..31f522b8e54e 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -31,6 +31,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/moduleparam.h>
+#include <linux/netfilter_netdev.h>
#include <net/pkt_sched.h>
#include <net/net_namespace.h>
@@ -75,8 +76,12 @@ static void ifb_ri_tasklet(struct tasklet_struct *t)
}
while ((skb = __skb_dequeue(&txp->tq)) != NULL) {
+ /* Skip tc and netfilter to prevent redirection loop. */
skb->redirected = 0;
+#ifdef CONFIG_NET_CLS_ACT
skb->tc_skip_classify = 1;
+#endif
+ nf_skip_egress(skb, true);
u64_stats_update_begin(&txp->tsync);
txp->tx_packets++;
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index c0b21a5580d5..1d2f4e7d7324 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -579,7 +579,7 @@ int ipvlan_link_new(struct net *src_net, struct net_device *dev,
* world but keep using the physical-dev address for the outgoing
* packets.
*/
- memcpy(dev->dev_addr, phy_dev->dev_addr, ETH_ALEN);
+ eth_hw_addr_set(dev, phy_dev->dev_addr);
dev->priv_flags |= IFF_NO_RX_HANDLER;
@@ -787,7 +787,7 @@ static int ipvlan_device_event(struct notifier_block *unused,
case NETDEV_CHANGEADDR:
list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
- ether_addr_copy(ipvlan->dev->dev_addr, dev->dev_addr);
+ eth_hw_addr_set(ipvlan->dev, dev->dev_addr);
call_netdevice_notifiers(NETDEV_CHANGEADDR, ipvlan->dev);
}
break;
diff --git a/drivers/net/ipvlan/ipvtap.c b/drivers/net/ipvlan/ipvtap.c
index 1cedb634f4f7..ef02f2cf5ce1 100644
--- a/drivers/net/ipvlan/ipvtap.c
+++ b/drivers/net/ipvlan/ipvtap.c
@@ -162,7 +162,7 @@ static int ipvtap_device_event(struct notifier_block *unused,
devt = MKDEV(MAJOR(ipvtap_major), vlantap->tap.minor);
classdev = device_create(&ipvtap_class, &dev->dev, devt,
- dev, tap_name);
+ dev, "%s", tap_name);
if (IS_ERR(classdev)) {
tap_free_minor(ipvtap_major, &vlantap->tap);
return notifier_from_errno(PTR_ERR(classdev));
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 93dc48b9b4f2..16aa3a478e9e 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -250,7 +250,7 @@ static bool send_sci(const struct macsec_secy *secy)
(secy->n_rx_sc > 1 && !tx_sc->end_station && !tx_sc->scb);
}
-static sci_t make_sci(u8 *addr, __be16 port)
+static sci_t make_sci(const u8 *addr, __be16 port)
{
sci_t sci;
@@ -3614,7 +3614,7 @@ static int macsec_set_mac_address(struct net_device *dev, void *p)
dev_uc_del(real_dev, dev->dev_addr);
out:
- ether_addr_copy(dev->dev_addr, addr->sa_data);
+ eth_hw_addr_set(dev, addr->sa_data);
macsec->secy.sci = dev_to_sci(dev, MACSEC_PORT_ES);
/* If h/w offloading is available, propagate to the device */
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 35f46ad040b0..d2f830ec2969 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -202,7 +202,7 @@ static void macvlan_hash_change_addr(struct macvlan_dev *vlan,
/* Now that we are unhashed it is safe to change the device
* address without confusing packet delivery.
*/
- memcpy(vlan->dev->dev_addr, addr, ETH_ALEN);
+ eth_hw_addr_set(vlan->dev, addr);
macvlan_hash_add(vlan);
}
@@ -698,7 +698,8 @@ hash_del:
return 0;
}
-static int macvlan_sync_address(struct net_device *dev, unsigned char *addr)
+static int macvlan_sync_address(struct net_device *dev,
+ const unsigned char *addr)
{
struct macvlan_dev *vlan = netdev_priv(dev);
struct net_device *lowerdev = vlan->lowerdev;
@@ -707,7 +708,7 @@ static int macvlan_sync_address(struct net_device *dev, unsigned char *addr)
if (!(dev->flags & IFF_UP)) {
/* Just copy in the new address */
- ether_addr_copy(dev->dev_addr, addr);
+ eth_hw_addr_set(dev, addr);
} else {
/* Rehash and update the device filters */
if (macvlan_addr_busy(vlan->port, addr))
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 694e2f5dbbe5..6b12902a803f 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -169,7 +169,7 @@ static int macvtap_device_event(struct notifier_block *unused,
devt = MKDEV(MAJOR(macvtap_major), vlantap->tap.minor);
classdev = device_create(&macvtap_class, &dev->dev, devt,
- dev, tap_name);
+ dev, "%s", tap_name);
if (IS_ERR(classdev)) {
tap_free_minor(macvtap_major, &vlantap->tap);
return notifier_from_errno(PTR_ERR(classdev));
diff --git a/drivers/net/net_failover.c b/drivers/net/net_failover.c
index 2a4892402ed8..86ec5aae4289 100644
--- a/drivers/net/net_failover.c
+++ b/drivers/net/net_failover.c
@@ -748,8 +748,7 @@ struct failover *net_failover_create(struct net_device *standby_dev)
failover_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL;
failover_dev->features |= failover_dev->hw_features;
- memcpy(failover_dev->dev_addr, standby_dev->dev_addr,
- failover_dev->addr_len);
+ dev_addr_set(failover_dev, standby_dev->dev_addr);
failover_dev->min_mtu = standby_dev->min_mtu;
failover_dev->max_mtu = standby_dev->max_mtu;
diff --git a/drivers/net/netdevsim/bus.c b/drivers/net/netdevsim/bus.c
index 29f5627d11e6..25cb2e600d53 100644
--- a/drivers/net/netdevsim/bus.c
+++ b/drivers/net/netdevsim/bus.c
@@ -8,7 +8,6 @@
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/mutex.h>
-#include <linux/rtnetlink.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
@@ -24,39 +23,6 @@ static struct nsim_bus_dev *to_nsim_bus_dev(struct device *dev)
return container_of(dev, struct nsim_bus_dev, dev);
}
-static int nsim_bus_dev_vfs_enable(struct nsim_bus_dev *nsim_bus_dev,
- unsigned int num_vfs)
-{
- struct nsim_dev *nsim_dev;
- int err = 0;
-
- if (nsim_bus_dev->max_vfs < num_vfs)
- return -ENOMEM;
-
- if (!nsim_bus_dev->vfconfigs)
- return -ENOMEM;
- nsim_bus_dev->num_vfs = num_vfs;
-
- nsim_dev = dev_get_drvdata(&nsim_bus_dev->dev);
- if (nsim_esw_mode_is_switchdev(nsim_dev)) {
- err = nsim_esw_switchdev_enable(nsim_dev, NULL);
- if (err)
- nsim_bus_dev->num_vfs = 0;
- }
-
- return err;
-}
-
-void nsim_bus_dev_vfs_disable(struct nsim_bus_dev *nsim_bus_dev)
-{
- struct nsim_dev *nsim_dev;
-
- nsim_bus_dev->num_vfs = 0;
- nsim_dev = dev_get_drvdata(&nsim_bus_dev->dev);
- if (nsim_esw_mode_is_switchdev(nsim_dev))
- nsim_esw_legacy_enable(nsim_dev, NULL);
-}
-
static ssize_t
nsim_bus_dev_numvfs_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
@@ -69,27 +35,13 @@ nsim_bus_dev_numvfs_store(struct device *dev, struct device_attribute *attr,
if (ret)
return ret;
- mutex_lock(&nsim_bus_dev->vfs_lock);
- if (nsim_bus_dev->num_vfs == num_vfs)
- goto exit_good;
- if (nsim_bus_dev->num_vfs && num_vfs) {
- ret = -EBUSY;
- goto exit_unlock;
- }
-
- if (num_vfs) {
- ret = nsim_bus_dev_vfs_enable(nsim_bus_dev, num_vfs);
- if (ret)
- goto exit_unlock;
- } else {
- nsim_bus_dev_vfs_disable(nsim_bus_dev);
- }
-exit_good:
- ret = count;
-exit_unlock:
- mutex_unlock(&nsim_bus_dev->vfs_lock);
+ device_lock(dev);
+ ret = -ENOENT;
+ if (dev_get_drvdata(dev))
+ ret = nsim_drv_configure_vfs(nsim_bus_dev, num_vfs);
+ device_unlock(dev);
- return ret;
+ return ret ? ret : count;
}
static ssize_t
@@ -105,79 +57,6 @@ static struct device_attribute nsim_bus_dev_numvfs_attr =
__ATTR(sriov_numvfs, 0664, nsim_bus_dev_numvfs_show,
nsim_bus_dev_numvfs_store);
-ssize_t nsim_bus_dev_max_vfs_read(struct file *file,
- char __user *data,
- size_t count, loff_t *ppos)
-{
- struct nsim_bus_dev *nsim_bus_dev = file->private_data;
- char buf[11];
- ssize_t len;
-
- len = snprintf(buf, sizeof(buf), "%u\n", nsim_bus_dev->max_vfs);
- if (len < 0)
- return len;
-
- return simple_read_from_buffer(data, count, ppos, buf, len);
-}
-
-ssize_t nsim_bus_dev_max_vfs_write(struct file *file,
- const char __user *data,
- size_t count, loff_t *ppos)
-{
- struct nsim_bus_dev *nsim_bus_dev = file->private_data;
- struct nsim_vf_config *vfconfigs;
- ssize_t ret;
- char buf[10];
- u32 val;
-
- if (*ppos != 0)
- return 0;
-
- if (count >= sizeof(buf))
- return -ENOSPC;
-
- mutex_lock(&nsim_bus_dev->vfs_lock);
- /* Reject if VFs are configured */
- if (nsim_bus_dev->num_vfs) {
- ret = -EBUSY;
- goto unlock;
- }
-
- ret = copy_from_user(buf, data, count);
- if (ret) {
- ret = -EFAULT;
- goto unlock;
- }
-
- buf[count] = '\0';
- ret = kstrtouint(buf, 10, &val);
- if (ret) {
- ret = -EIO;
- goto unlock;
- }
-
- /* max_vfs limited by the maximum number of provided port indexes */
- if (val > NSIM_DEV_VF_PORT_INDEX_MAX - NSIM_DEV_VF_PORT_INDEX_BASE) {
- ret = -ERANGE;
- goto unlock;
- }
-
- vfconfigs = kcalloc(val, sizeof(struct nsim_vf_config), GFP_KERNEL | __GFP_NOWARN);
- if (!vfconfigs) {
- ret = -ENOMEM;
- goto unlock;
- }
-
- kfree(nsim_bus_dev->vfconfigs);
- nsim_bus_dev->vfconfigs = vfconfigs;
- nsim_bus_dev->max_vfs = val;
- *ppos += count;
- ret = count;
-unlock:
- mutex_unlock(&nsim_bus_dev->vfs_lock);
- return ret;
-}
-
static ssize_t
new_port_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
@@ -201,7 +80,7 @@ new_port_store(struct device *dev, struct device_attribute *attr,
return -EBUSY;
}
- ret = nsim_dev_port_add(nsim_bus_dev, NSIM_DEV_PORT_TYPE_PF, port_index);
+ ret = nsim_drv_port_add(nsim_bus_dev, NSIM_DEV_PORT_TYPE_PF, port_index);
mutex_unlock(&nsim_bus_dev->nsim_bus_reload_lock);
return ret ? ret : count;
}
@@ -231,7 +110,7 @@ del_port_store(struct device *dev, struct device_attribute *attr,
return -EBUSY;
}
- ret = nsim_dev_port_del(nsim_bus_dev, NSIM_DEV_PORT_TYPE_PF, port_index);
+ ret = nsim_drv_port_del(nsim_bus_dev, NSIM_DEV_PORT_TYPE_PF, port_index);
mutex_unlock(&nsim_bus_dev->nsim_bus_reload_lock);
return ret ? ret : count;
}
@@ -371,14 +250,14 @@ static int nsim_bus_probe(struct device *dev)
{
struct nsim_bus_dev *nsim_bus_dev = to_nsim_bus_dev(dev);
- return nsim_dev_probe(nsim_bus_dev);
+ return nsim_drv_probe(nsim_bus_dev);
}
static void nsim_bus_remove(struct device *dev)
{
struct nsim_bus_dev *nsim_bus_dev = to_nsim_bus_dev(dev);
- nsim_dev_remove(nsim_bus_dev);
+ nsim_drv_remove(nsim_bus_dev);
}
static int nsim_num_vf(struct device *dev)
@@ -420,26 +299,15 @@ nsim_bus_dev_new(unsigned int id, unsigned int port_count, unsigned int num_queu
nsim_bus_dev->initial_net = current->nsproxy->net_ns;
nsim_bus_dev->max_vfs = NSIM_BUS_DEV_MAX_VFS;
mutex_init(&nsim_bus_dev->nsim_bus_reload_lock);
- mutex_init(&nsim_bus_dev->vfs_lock);
/* Disallow using nsim_bus_dev */
smp_store_release(&nsim_bus_dev->init, false);
- nsim_bus_dev->vfconfigs = kcalloc(nsim_bus_dev->max_vfs,
- sizeof(struct nsim_vf_config),
- GFP_KERNEL | __GFP_NOWARN);
- if (!nsim_bus_dev->vfconfigs) {
- err = -ENOMEM;
- goto err_nsim_bus_dev_id_free;
- }
-
err = device_register(&nsim_bus_dev->dev);
if (err)
- goto err_nsim_vfs_free;
+ goto err_nsim_bus_dev_id_free;
return nsim_bus_dev;
-err_nsim_vfs_free:
- kfree(nsim_bus_dev->vfconfigs);
err_nsim_bus_dev_id_free:
ida_free(&nsim_bus_dev_ids, nsim_bus_dev->dev.id);
err_nsim_bus_dev_free:
@@ -453,7 +321,6 @@ static void nsim_bus_dev_del(struct nsim_bus_dev *nsim_bus_dev)
smp_store_release(&nsim_bus_dev->init, false);
device_unregister(&nsim_bus_dev->dev);
ida_free(&nsim_bus_dev_ids, nsim_bus_dev->dev.id);
- kfree(nsim_bus_dev->vfconfigs);
kfree(nsim_bus_dev);
}
diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
index 54313bd57797..54345c096a16 100644
--- a/drivers/net/netdevsim/dev.c
+++ b/drivers/net/netdevsim/dev.c
@@ -56,6 +56,22 @@ static inline unsigned int nsim_dev_port_index_to_vf_index(unsigned int port_ind
static struct dentry *nsim_dev_ddir;
+unsigned int nsim_dev_get_vfs(struct nsim_dev *nsim_dev)
+{
+ WARN_ON(!lockdep_rtnl_is_held() &&
+ !lockdep_is_held(&nsim_dev->vfs_lock));
+
+ return nsim_dev->nsim_bus_dev->num_vfs;
+}
+
+static void
+nsim_bus_dev_set_vfs(struct nsim_bus_dev *nsim_bus_dev, unsigned int num_vfs)
+{
+ rtnl_lock();
+ nsim_bus_dev->num_vfs = num_vfs;
+ rtnl_unlock();
+}
+
#define NSIM_DEV_DUMMY_REGION_SIZE (1024 * 32)
static int
@@ -211,6 +227,70 @@ static const struct file_operations nsim_dev_trap_fa_cookie_fops = {
.owner = THIS_MODULE,
};
+static ssize_t nsim_bus_dev_max_vfs_read(struct file *file, char __user *data,
+ size_t count, loff_t *ppos)
+{
+ struct nsim_dev *nsim_dev = file->private_data;
+ char buf[11];
+ ssize_t len;
+
+ len = scnprintf(buf, sizeof(buf), "%u\n",
+ READ_ONCE(nsim_dev->nsim_bus_dev->max_vfs));
+
+ return simple_read_from_buffer(data, count, ppos, buf, len);
+}
+
+static ssize_t nsim_bus_dev_max_vfs_write(struct file *file,
+ const char __user *data,
+ size_t count, loff_t *ppos)
+{
+ struct nsim_vf_config *vfconfigs;
+ struct nsim_dev *nsim_dev;
+ char buf[10];
+ ssize_t ret;
+ u32 val;
+
+ if (*ppos != 0)
+ return 0;
+
+ if (count >= sizeof(buf))
+ return -ENOSPC;
+
+ ret = copy_from_user(buf, data, count);
+ if (ret)
+ return -EFAULT;
+ buf[count] = '\0';
+
+ ret = kstrtouint(buf, 10, &val);
+ if (ret)
+ return -EINVAL;
+
+ /* max_vfs limited by the maximum number of provided port indexes */
+ if (val > NSIM_DEV_VF_PORT_INDEX_MAX - NSIM_DEV_VF_PORT_INDEX_BASE)
+ return -ERANGE;
+
+ vfconfigs = kcalloc(val, sizeof(struct nsim_vf_config),
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!vfconfigs)
+ return -ENOMEM;
+
+ nsim_dev = file->private_data;
+ mutex_lock(&nsim_dev->vfs_lock);
+ /* Reject if VFs are configured */
+ if (nsim_dev_get_vfs(nsim_dev)) {
+ ret = -EBUSY;
+ } else {
+ swap(nsim_dev->vfconfigs, vfconfigs);
+ WRITE_ONCE(nsim_dev->nsim_bus_dev->max_vfs, val);
+ *ppos += count;
+ ret = count;
+ }
+ mutex_unlock(&nsim_dev->vfs_lock);
+
+ kfree(vfconfigs);
+ return ret;
+}
+
static const struct file_operations nsim_dev_max_vfs_fops = {
.open = simple_open,
.read = nsim_bus_dev_max_vfs_read,
@@ -259,11 +339,9 @@ static int nsim_dev_debugfs_init(struct nsim_dev *nsim_dev)
debugfs_create_bool("fail_trap_policer_counter_get", 0600,
nsim_dev->ddir,
&nsim_dev->fail_trap_policer_counter_get);
- nsim_dev->max_vfs = debugfs_create_file("max_vfs",
- 0600,
- nsim_dev->ddir,
- nsim_dev->nsim_bus_dev,
- &nsim_dev_max_vfs_fops);
+ debugfs_create_file("max_vfs", 0600, nsim_dev->ddir,
+ nsim_dev, &nsim_dev_max_vfs_fops);
+
nsim_dev->nodes_ddir = debugfs_create_dir("rate_nodes", nsim_dev->ddir);
if (IS_ERR(nsim_dev->nodes_ddir)) {
err = PTR_ERR(nsim_dev->nodes_ddir);
@@ -328,9 +406,9 @@ static int nsim_dev_port_debugfs_init(struct nsim_dev *nsim_dev,
unsigned int vf_id = nsim_dev_port_index_to_vf_index(port_index);
debugfs_create_u16("tx_share", 0400, nsim_dev_port->ddir,
- &nsim_bus_dev->vfconfigs[vf_id].min_tx_rate);
+ &nsim_dev->vfconfigs[vf_id].min_tx_rate);
debugfs_create_u16("tx_max", 0400, nsim_dev_port->ddir,
- &nsim_bus_dev->vfconfigs[vf_id].max_tx_rate);
+ &nsim_dev->vfconfigs[vf_id].max_tx_rate);
nsim_dev_port->rate_parent = debugfs_create_file("rate_parent",
0400,
nsim_dev_port->ddir,
@@ -490,7 +568,9 @@ static void nsim_dev_dummy_region_exit(struct nsim_dev *nsim_dev)
}
static void __nsim_dev_port_del(struct nsim_dev_port *nsim_dev_port);
-int nsim_esw_legacy_enable(struct nsim_dev *nsim_dev, struct netlink_ext_ack *extack)
+
+static int nsim_esw_legacy_enable(struct nsim_dev *nsim_dev,
+ struct netlink_ext_ack *extack)
{
struct devlink *devlink = priv_to_devlink(nsim_dev);
struct nsim_dev_port *nsim_dev_port, *tmp;
@@ -505,13 +585,14 @@ int nsim_esw_legacy_enable(struct nsim_dev *nsim_dev, struct netlink_ext_ack *ex
return 0;
}
-int nsim_esw_switchdev_enable(struct nsim_dev *nsim_dev, struct netlink_ext_ack *extack)
+static int nsim_esw_switchdev_enable(struct nsim_dev *nsim_dev,
+ struct netlink_ext_ack *extack)
{
struct nsim_bus_dev *nsim_bus_dev = nsim_dev->nsim_bus_dev;
int i, err;
- for (i = 0; i < nsim_bus_dev->num_vfs; i++) {
- err = nsim_dev_port_add(nsim_bus_dev, NSIM_DEV_PORT_TYPE_VF, i);
+ for (i = 0; i < nsim_dev_get_vfs(nsim_dev); i++) {
+ err = nsim_drv_port_add(nsim_bus_dev, NSIM_DEV_PORT_TYPE_VF, i);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed to initialize VFs' netdevsim ports");
pr_err("Failed to initialize VF id=%d. %d.\n", i, err);
@@ -523,7 +604,7 @@ int nsim_esw_switchdev_enable(struct nsim_dev *nsim_dev, struct netlink_ext_ack
err_port_add_vfs:
for (i--; i >= 0; i--)
- nsim_dev_port_del(nsim_bus_dev, NSIM_DEV_PORT_TYPE_VF, i);
+ nsim_drv_port_del(nsim_bus_dev, NSIM_DEV_PORT_TYPE_VF, i);
return err;
}
@@ -533,7 +614,7 @@ static int nsim_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
struct nsim_dev *nsim_dev = devlink_priv(devlink);
int err = 0;
- mutex_lock(&nsim_dev->nsim_bus_dev->vfs_lock);
+ mutex_lock(&nsim_dev->vfs_lock);
if (mode == nsim_dev->esw_mode)
goto unlock;
@@ -545,7 +626,7 @@ static int nsim_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
err = -EINVAL;
unlock:
- mutex_unlock(&nsim_dev->nsim_bus_dev->vfs_lock);
+ mutex_unlock(&nsim_dev->vfs_lock);
return err;
}
@@ -1093,7 +1174,7 @@ static int nsim_leaf_tx_share_set(struct devlink_rate *devlink_rate, void *priv,
u64 tx_share, struct netlink_ext_ack *extack)
{
struct nsim_dev_port *nsim_dev_port = priv;
- struct nsim_bus_dev *nsim_bus_dev = nsim_dev_port->ns->nsim_bus_dev;
+ struct nsim_dev *nsim_dev = nsim_dev_port->ns->nsim_dev;
int vf_id = nsim_dev_port_index_to_vf_index(nsim_dev_port->port_index);
int err;
@@ -1101,7 +1182,7 @@ static int nsim_leaf_tx_share_set(struct devlink_rate *devlink_rate, void *priv,
if (err)
return err;
- nsim_bus_dev->vfconfigs[vf_id].min_tx_rate = tx_share;
+ nsim_dev->vfconfigs[vf_id].min_tx_rate = tx_share;
return 0;
}
@@ -1109,7 +1190,7 @@ static int nsim_leaf_tx_max_set(struct devlink_rate *devlink_rate, void *priv,
u64 tx_max, struct netlink_ext_ack *extack)
{
struct nsim_dev_port *nsim_dev_port = priv;
- struct nsim_bus_dev *nsim_bus_dev = nsim_dev_port->ns->nsim_bus_dev;
+ struct nsim_dev *nsim_dev = nsim_dev_port->ns->nsim_dev;
int vf_id = nsim_dev_port_index_to_vf_index(nsim_dev_port->port_index);
int err;
@@ -1117,7 +1198,7 @@ static int nsim_leaf_tx_max_set(struct devlink_rate *devlink_rate, void *priv,
if (err)
return err;
- nsim_bus_dev->vfconfigs[vf_id].max_tx_rate = tx_max;
+ nsim_dev->vfconfigs[vf_id].max_tx_rate = tx_max;
return 0;
}
@@ -1273,13 +1354,12 @@ static const struct devlink_ops nsim_dev_devlink_ops = {
static int __nsim_dev_port_add(struct nsim_dev *nsim_dev, enum nsim_dev_port_type type,
unsigned int port_index)
{
- struct nsim_bus_dev *nsim_bus_dev = nsim_dev->nsim_bus_dev;
struct devlink_port_attrs attrs = {};
struct nsim_dev_port *nsim_dev_port;
struct devlink_port *devlink_port;
int err;
- if (type == NSIM_DEV_PORT_TYPE_VF && !nsim_bus_dev->num_vfs)
+ if (type == NSIM_DEV_PORT_TYPE_VF && !nsim_dev_get_vfs(nsim_dev))
return -EINVAL;
nsim_dev_port = kzalloc(sizeof(*nsim_dev_port), GFP_KERNEL);
@@ -1442,7 +1522,7 @@ err_dummy_region_exit:
return err;
}
-int nsim_dev_probe(struct nsim_bus_dev *nsim_bus_dev)
+int nsim_drv_probe(struct nsim_bus_dev *nsim_bus_dev)
{
struct nsim_dev *nsim_dev;
struct devlink *devlink;
@@ -1457,6 +1537,7 @@ int nsim_dev_probe(struct nsim_bus_dev *nsim_bus_dev)
nsim_dev->switch_id.id_len = sizeof(nsim_dev->switch_id.id);
get_random_bytes(nsim_dev->switch_id.id, nsim_dev->switch_id.id_len);
INIT_LIST_HEAD(&nsim_dev->port_list);
+ mutex_init(&nsim_dev->vfs_lock);
mutex_init(&nsim_dev->port_list_lock);
nsim_dev->fw_update_status = true;
nsim_dev->fw_update_overwrite_mask = 0;
@@ -1466,13 +1547,17 @@ int nsim_dev_probe(struct nsim_bus_dev *nsim_bus_dev)
dev_set_drvdata(&nsim_bus_dev->dev, nsim_dev);
- err = nsim_dev_resources_register(devlink);
- if (err)
+ nsim_dev->vfconfigs = kcalloc(nsim_bus_dev->max_vfs,
+ sizeof(struct nsim_vf_config),
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!nsim_dev->vfconfigs) {
+ err = -ENOMEM;
goto err_devlink_free;
+ }
- err = devlink_register(devlink);
+ err = nsim_dev_resources_register(devlink);
if (err)
- goto err_resources_unregister;
+ goto err_vfc_free;
err = devlink_params_register(devlink, nsim_devlink_params,
ARRAY_SIZE(nsim_devlink_params));
@@ -1514,9 +1599,9 @@ int nsim_dev_probe(struct nsim_bus_dev *nsim_bus_dev)
if (err)
goto err_psample_exit;
- devlink_params_publish(devlink);
- devlink_reload_enable(devlink);
nsim_dev->esw_mode = DEVLINK_ESWITCH_MODE_LEGACY;
+ devlink_set_features(devlink, DEVLINK_F_RELOAD);
+ devlink_register(devlink);
return 0;
err_psample_exit:
@@ -1537,11 +1622,12 @@ err_params_unregister:
devlink_params_unregister(devlink, nsim_devlink_params,
ARRAY_SIZE(nsim_devlink_params));
err_dl_unregister:
- devlink_unregister(devlink);
-err_resources_unregister:
devlink_resources_unregister(devlink, NULL);
+err_vfc_free:
+ kfree(nsim_dev->vfconfigs);
err_devlink_free:
devlink_free(devlink);
+ dev_set_drvdata(&nsim_bus_dev->dev, NULL);
return err;
}
@@ -1553,10 +1639,13 @@ static void nsim_dev_reload_destroy(struct nsim_dev *nsim_dev)
return;
debugfs_remove(nsim_dev->take_snapshot);
- mutex_lock(&nsim_dev->nsim_bus_dev->vfs_lock);
- if (nsim_dev->nsim_bus_dev->num_vfs)
- nsim_bus_dev_vfs_disable(nsim_dev->nsim_bus_dev);
- mutex_unlock(&nsim_dev->nsim_bus_dev->vfs_lock);
+ mutex_lock(&nsim_dev->vfs_lock);
+ if (nsim_dev_get_vfs(nsim_dev)) {
+ nsim_bus_dev_set_vfs(nsim_dev->nsim_bus_dev, 0);
+ if (nsim_esw_mode_is_switchdev(nsim_dev))
+ nsim_esw_legacy_enable(nsim_dev, NULL);
+ }
+ mutex_unlock(&nsim_dev->vfs_lock);
nsim_dev_port_del_all(nsim_dev);
nsim_dev_psample_exit(nsim_dev);
@@ -1567,22 +1656,22 @@ static void nsim_dev_reload_destroy(struct nsim_dev *nsim_dev)
mutex_destroy(&nsim_dev->port_list_lock);
}
-void nsim_dev_remove(struct nsim_bus_dev *nsim_bus_dev)
+void nsim_drv_remove(struct nsim_bus_dev *nsim_bus_dev)
{
struct nsim_dev *nsim_dev = dev_get_drvdata(&nsim_bus_dev->dev);
struct devlink *devlink = priv_to_devlink(nsim_dev);
- devlink_reload_disable(devlink);
-
+ devlink_unregister(devlink);
nsim_dev_reload_destroy(nsim_dev);
nsim_bpf_dev_exit(nsim_dev);
nsim_dev_debugfs_exit(nsim_dev);
devlink_params_unregister(devlink, nsim_devlink_params,
ARRAY_SIZE(nsim_devlink_params));
- devlink_unregister(devlink);
devlink_resources_unregister(devlink, NULL);
+ kfree(nsim_dev->vfconfigs);
devlink_free(devlink);
+ dev_set_drvdata(&nsim_bus_dev->dev, NULL);
}
static struct nsim_dev_port *
@@ -1598,7 +1687,7 @@ __nsim_dev_port_lookup(struct nsim_dev *nsim_dev, enum nsim_dev_port_type type,
return NULL;
}
-int nsim_dev_port_add(struct nsim_bus_dev *nsim_bus_dev, enum nsim_dev_port_type type,
+int nsim_drv_port_add(struct nsim_bus_dev *nsim_bus_dev, enum nsim_dev_port_type type,
unsigned int port_index)
{
struct nsim_dev *nsim_dev = dev_get_drvdata(&nsim_bus_dev->dev);
@@ -1613,7 +1702,7 @@ int nsim_dev_port_add(struct nsim_bus_dev *nsim_bus_dev, enum nsim_dev_port_type
return err;
}
-int nsim_dev_port_del(struct nsim_bus_dev *nsim_bus_dev, enum nsim_dev_port_type type,
+int nsim_drv_port_del(struct nsim_bus_dev *nsim_bus_dev, enum nsim_dev_port_type type,
unsigned int port_index)
{
struct nsim_dev *nsim_dev = dev_get_drvdata(&nsim_bus_dev->dev);
@@ -1630,6 +1719,43 @@ int nsim_dev_port_del(struct nsim_bus_dev *nsim_bus_dev, enum nsim_dev_port_type
return err;
}
+int nsim_drv_configure_vfs(struct nsim_bus_dev *nsim_bus_dev,
+ unsigned int num_vfs)
+{
+ struct nsim_dev *nsim_dev = dev_get_drvdata(&nsim_bus_dev->dev);
+ int ret = 0;
+
+ mutex_lock(&nsim_dev->vfs_lock);
+ if (nsim_bus_dev->num_vfs == num_vfs)
+ goto exit_unlock;
+ if (nsim_bus_dev->num_vfs && num_vfs) {
+ ret = -EBUSY;
+ goto exit_unlock;
+ }
+ if (nsim_bus_dev->max_vfs < num_vfs) {
+ ret = -ENOMEM;
+ goto exit_unlock;
+ }
+
+ nsim_bus_dev_set_vfs(nsim_bus_dev, num_vfs);
+ if (nsim_esw_mode_is_switchdev(nsim_dev)) {
+ if (num_vfs) {
+ ret = nsim_esw_switchdev_enable(nsim_dev, NULL);
+ if (ret) {
+ nsim_bus_dev_set_vfs(nsim_bus_dev, 0);
+ goto exit_unlock;
+ }
+ } else {
+ nsim_esw_legacy_enable(nsim_dev, NULL);
+ }
+ }
+
+exit_unlock:
+ mutex_unlock(&nsim_dev->vfs_lock);
+
+ return ret;
+}
+
int nsim_dev_init(void)
{
nsim_dev_ddir = debugfs_create_dir(DRV_NAME, NULL);
diff --git a/drivers/net/netdevsim/ethtool.c b/drivers/net/netdevsim/ethtool.c
index b03a0513eb7e..0ab6a40be611 100644
--- a/drivers/net/netdevsim/ethtool.c
+++ b/drivers/net/netdevsim/ethtool.c
@@ -81,6 +81,30 @@ static int nsim_set_ringparam(struct net_device *dev,
return 0;
}
+static void
+nsim_get_channels(struct net_device *dev, struct ethtool_channels *ch)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+
+ ch->max_combined = ns->nsim_bus_dev->num_queues;
+ ch->combined_count = ns->ethtool.channels;
+}
+
+static int
+nsim_set_channels(struct net_device *dev, struct ethtool_channels *ch)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+ int err;
+
+ err = netif_set_real_num_queues(dev, ch->combined_count,
+ ch->combined_count);
+ if (err)
+ return err;
+
+ ns->ethtool.channels = ch->combined_count;
+ return 0;
+}
+
static int
nsim_get_fecparam(struct net_device *dev, struct ethtool_fecparam *fecparam)
{
@@ -118,6 +142,8 @@ static const struct ethtool_ops nsim_ethtool_ops = {
.get_coalesce = nsim_get_coalesce,
.get_ringparam = nsim_get_ringparam,
.set_ringparam = nsim_set_ringparam,
+ .get_channels = nsim_get_channels,
+ .set_channels = nsim_set_channels,
.get_fecparam = nsim_get_fecparam,
.set_fecparam = nsim_set_fecparam,
};
@@ -141,6 +167,8 @@ void nsim_ethtool_init(struct netdevsim *ns)
ns->ethtool.fec.fec = ETHTOOL_FEC_NONE;
ns->ethtool.fec.active_fec = ETHTOOL_FEC_NONE;
+ ns->ethtool.channels = ns->nsim_bus_dev->num_queues;
+
ethtool = debugfs_create_dir("ethtool", ns->nsim_dev_port->ddir);
debugfs_create_u32("get_err", 0600, ethtool, &ns->ethtool.get_err);
diff --git a/drivers/net/netdevsim/health.c b/drivers/net/netdevsim/health.c
index 04aebdf85747..aa77af4a68df 100644
--- a/drivers/net/netdevsim/health.c
+++ b/drivers/net/netdevsim/health.c
@@ -110,26 +110,6 @@ static int nsim_dev_dummy_fmsg_put(struct devlink_fmsg *fmsg, u32 binary_len)
if (err)
return err;
- err = devlink_fmsg_arr_pair_nest_start(fmsg, "test_bool_array");
- if (err)
- return err;
- for (i = 0; i < 10; i++) {
- err = devlink_fmsg_bool_put(fmsg, true);
- if (err)
- return err;
- }
- err = devlink_fmsg_arr_pair_nest_end(fmsg);
- if (err)
- return err;
-
- err = devlink_fmsg_arr_pair_nest_start(fmsg, "test_u8_array");
- if (err)
- return err;
- for (i = 0; i < 10; i++) {
- err = devlink_fmsg_u8_put(fmsg, i);
- if (err)
- return err;
- }
err = devlink_fmsg_arr_pair_nest_end(fmsg);
if (err)
return err;
@@ -146,18 +126,6 @@ static int nsim_dev_dummy_fmsg_put(struct devlink_fmsg *fmsg, u32 binary_len)
if (err)
return err;
- err = devlink_fmsg_arr_pair_nest_start(fmsg, "test_u64_array");
- if (err)
- return err;
- for (i = 0; i < 10; i++) {
- err = devlink_fmsg_u64_put(fmsg, i);
- if (err)
- return err;
- }
- err = devlink_fmsg_arr_pair_nest_end(fmsg);
- if (err)
- return err;
-
err = devlink_fmsg_arr_pair_nest_start(fmsg, "test_array_of_objects");
if (err)
return err;
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
index 50572e0f1f52..e470e3398abc 100644
--- a/drivers/net/netdevsim/netdev.c
+++ b/drivers/net/netdevsim/netdev.c
@@ -82,12 +82,12 @@ nsim_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
static int nsim_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
{
struct netdevsim *ns = netdev_priv(dev);
- struct nsim_bus_dev *nsim_bus_dev = ns->nsim_bus_dev;
+ struct nsim_dev *nsim_dev = ns->nsim_dev;
/* Only refuse multicast addresses, zero address can mean unset/any. */
- if (vf >= nsim_bus_dev->num_vfs || is_multicast_ether_addr(mac))
+ if (vf >= nsim_dev_get_vfs(nsim_dev) || is_multicast_ether_addr(mac))
return -EINVAL;
- memcpy(nsim_bus_dev->vfconfigs[vf].vf_mac, mac, ETH_ALEN);
+ memcpy(nsim_dev->vfconfigs[vf].vf_mac, mac, ETH_ALEN);
return 0;
}
@@ -96,14 +96,14 @@ static int nsim_set_vf_vlan(struct net_device *dev, int vf,
u16 vlan, u8 qos, __be16 vlan_proto)
{
struct netdevsim *ns = netdev_priv(dev);
- struct nsim_bus_dev *nsim_bus_dev = ns->nsim_bus_dev;
+ struct nsim_dev *nsim_dev = ns->nsim_dev;
- if (vf >= nsim_bus_dev->num_vfs || vlan > 4095 || qos > 7)
+ if (vf >= nsim_dev_get_vfs(nsim_dev) || vlan > 4095 || qos > 7)
return -EINVAL;
- nsim_bus_dev->vfconfigs[vf].vlan = vlan;
- nsim_bus_dev->vfconfigs[vf].qos = qos;
- nsim_bus_dev->vfconfigs[vf].vlan_proto = vlan_proto;
+ nsim_dev->vfconfigs[vf].vlan = vlan;
+ nsim_dev->vfconfigs[vf].qos = qos;
+ nsim_dev->vfconfigs[vf].vlan_proto = vlan_proto;
return 0;
}
@@ -111,18 +111,18 @@ static int nsim_set_vf_vlan(struct net_device *dev, int vf,
static int nsim_set_vf_rate(struct net_device *dev, int vf, int min, int max)
{
struct netdevsim *ns = netdev_priv(dev);
- struct nsim_bus_dev *nsim_bus_dev = ns->nsim_bus_dev;
+ struct nsim_dev *nsim_dev = ns->nsim_dev;
if (nsim_esw_mode_is_switchdev(ns->nsim_dev)) {
pr_err("Not supported in switchdev mode. Please use devlink API.\n");
return -EOPNOTSUPP;
}
- if (vf >= nsim_bus_dev->num_vfs)
+ if (vf >= nsim_dev_get_vfs(nsim_dev))
return -EINVAL;
- nsim_bus_dev->vfconfigs[vf].min_tx_rate = min;
- nsim_bus_dev->vfconfigs[vf].max_tx_rate = max;
+ nsim_dev->vfconfigs[vf].min_tx_rate = min;
+ nsim_dev->vfconfigs[vf].max_tx_rate = max;
return 0;
}
@@ -130,11 +130,11 @@ static int nsim_set_vf_rate(struct net_device *dev, int vf, int min, int max)
static int nsim_set_vf_spoofchk(struct net_device *dev, int vf, bool val)
{
struct netdevsim *ns = netdev_priv(dev);
- struct nsim_bus_dev *nsim_bus_dev = ns->nsim_bus_dev;
+ struct nsim_dev *nsim_dev = ns->nsim_dev;
- if (vf >= nsim_bus_dev->num_vfs)
+ if (vf >= nsim_dev_get_vfs(nsim_dev))
return -EINVAL;
- nsim_bus_dev->vfconfigs[vf].spoofchk_enabled = val;
+ nsim_dev->vfconfigs[vf].spoofchk_enabled = val;
return 0;
}
@@ -142,11 +142,11 @@ static int nsim_set_vf_spoofchk(struct net_device *dev, int vf, bool val)
static int nsim_set_vf_rss_query_en(struct net_device *dev, int vf, bool val)
{
struct netdevsim *ns = netdev_priv(dev);
- struct nsim_bus_dev *nsim_bus_dev = ns->nsim_bus_dev;
+ struct nsim_dev *nsim_dev = ns->nsim_dev;
- if (vf >= nsim_bus_dev->num_vfs)
+ if (vf >= nsim_dev_get_vfs(nsim_dev))
return -EINVAL;
- nsim_bus_dev->vfconfigs[vf].rss_query_enabled = val;
+ nsim_dev->vfconfigs[vf].rss_query_enabled = val;
return 0;
}
@@ -154,11 +154,11 @@ static int nsim_set_vf_rss_query_en(struct net_device *dev, int vf, bool val)
static int nsim_set_vf_trust(struct net_device *dev, int vf, bool val)
{
struct netdevsim *ns = netdev_priv(dev);
- struct nsim_bus_dev *nsim_bus_dev = ns->nsim_bus_dev;
+ struct nsim_dev *nsim_dev = ns->nsim_dev;
- if (vf >= nsim_bus_dev->num_vfs)
+ if (vf >= nsim_dev_get_vfs(nsim_dev))
return -EINVAL;
- nsim_bus_dev->vfconfigs[vf].trusted = val;
+ nsim_dev->vfconfigs[vf].trusted = val;
return 0;
}
@@ -167,22 +167,22 @@ static int
nsim_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivi)
{
struct netdevsim *ns = netdev_priv(dev);
- struct nsim_bus_dev *nsim_bus_dev = ns->nsim_bus_dev;
+ struct nsim_dev *nsim_dev = ns->nsim_dev;
- if (vf >= nsim_bus_dev->num_vfs)
+ if (vf >= nsim_dev_get_vfs(nsim_dev))
return -EINVAL;
ivi->vf = vf;
- ivi->linkstate = nsim_bus_dev->vfconfigs[vf].link_state;
- ivi->min_tx_rate = nsim_bus_dev->vfconfigs[vf].min_tx_rate;
- ivi->max_tx_rate = nsim_bus_dev->vfconfigs[vf].max_tx_rate;
- ivi->vlan = nsim_bus_dev->vfconfigs[vf].vlan;
- ivi->vlan_proto = nsim_bus_dev->vfconfigs[vf].vlan_proto;
- ivi->qos = nsim_bus_dev->vfconfigs[vf].qos;
- memcpy(&ivi->mac, nsim_bus_dev->vfconfigs[vf].vf_mac, ETH_ALEN);
- ivi->spoofchk = nsim_bus_dev->vfconfigs[vf].spoofchk_enabled;
- ivi->trusted = nsim_bus_dev->vfconfigs[vf].trusted;
- ivi->rss_query_en = nsim_bus_dev->vfconfigs[vf].rss_query_enabled;
+ ivi->linkstate = nsim_dev->vfconfigs[vf].link_state;
+ ivi->min_tx_rate = nsim_dev->vfconfigs[vf].min_tx_rate;
+ ivi->max_tx_rate = nsim_dev->vfconfigs[vf].max_tx_rate;
+ ivi->vlan = nsim_dev->vfconfigs[vf].vlan;
+ ivi->vlan_proto = nsim_dev->vfconfigs[vf].vlan_proto;
+ ivi->qos = nsim_dev->vfconfigs[vf].qos;
+ memcpy(&ivi->mac, nsim_dev->vfconfigs[vf].vf_mac, ETH_ALEN);
+ ivi->spoofchk = nsim_dev->vfconfigs[vf].spoofchk_enabled;
+ ivi->trusted = nsim_dev->vfconfigs[vf].trusted;
+ ivi->rss_query_en = nsim_dev->vfconfigs[vf].rss_query_enabled;
return 0;
}
@@ -190,9 +190,9 @@ nsim_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivi)
static int nsim_set_vf_link_state(struct net_device *dev, int vf, int state)
{
struct netdevsim *ns = netdev_priv(dev);
- struct nsim_bus_dev *nsim_bus_dev = ns->nsim_bus_dev;
+ struct nsim_dev *nsim_dev = ns->nsim_dev;
- if (vf >= nsim_bus_dev->num_vfs)
+ if (vf >= nsim_dev_get_vfs(nsim_dev))
return -EINVAL;
switch (state) {
@@ -204,7 +204,7 @@ static int nsim_set_vf_link_state(struct net_device *dev, int vf, int state)
return -EINVAL;
}
- nsim_bus_dev->vfconfigs[vf].link_state = state;
+ nsim_dev->vfconfigs[vf].link_state = state;
return 0;
}
diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
index 793c86dc5a9c..c49771f27f17 100644
--- a/drivers/net/netdevsim/netdevsim.h
+++ b/drivers/net/netdevsim/netdevsim.h
@@ -62,6 +62,7 @@ struct nsim_ethtool_pauseparam {
struct nsim_ethtool {
u32 get_err;
u32 set_err;
+ u32 channels;
struct nsim_ethtool_pauseparam pauseparam;
struct ethtool_coalesce coalesce;
struct ethtool_ringparam ring;
@@ -216,6 +217,19 @@ struct nsim_dev_port {
struct netdevsim *ns;
};
+struct nsim_vf_config {
+ int link_state;
+ u16 min_tx_rate;
+ u16 max_tx_rate;
+ u16 vlan;
+ __be16 vlan_proto;
+ u16 qos;
+ u8 vf_mac[ETH_ALEN];
+ bool spoofchk_enabled;
+ bool trusted;
+ bool rss_query_enabled;
+};
+
struct nsim_dev {
struct nsim_bus_dev *nsim_bus_dev;
struct nsim_fib_data *fib_data;
@@ -223,8 +237,11 @@ struct nsim_dev {
struct dentry *ddir;
struct dentry *ports_ddir;
struct dentry *take_snapshot;
- struct dentry *max_vfs;
struct dentry *nodes_ddir;
+
+ struct mutex vfs_lock; /* Protects vfconfigs */
+ struct nsim_vf_config *vfconfigs;
+
struct bpf_offload_dev *bpf_dev;
bool bpf_bind_accept;
bool bpf_bind_verifier_accept;
@@ -264,9 +281,6 @@ struct nsim_dev {
u16 esw_mode;
};
-int nsim_esw_legacy_enable(struct nsim_dev *nsim_dev, struct netlink_ext_ack *extack);
-int nsim_esw_switchdev_enable(struct nsim_dev *nsim_dev, struct netlink_ext_ack *extack);
-
static inline bool nsim_esw_mode_is_legacy(struct nsim_dev *nsim_dev)
{
return nsim_dev->esw_mode == DEVLINK_ESWITCH_MODE_LEGACY;
@@ -284,14 +298,18 @@ static inline struct net *nsim_dev_net(struct nsim_dev *nsim_dev)
int nsim_dev_init(void);
void nsim_dev_exit(void);
-int nsim_dev_probe(struct nsim_bus_dev *nsim_bus_dev);
-void nsim_dev_remove(struct nsim_bus_dev *nsim_bus_dev);
-int nsim_dev_port_add(struct nsim_bus_dev *nsim_bus_dev,
+int nsim_drv_probe(struct nsim_bus_dev *nsim_bus_dev);
+void nsim_drv_remove(struct nsim_bus_dev *nsim_bus_dev);
+int nsim_drv_port_add(struct nsim_bus_dev *nsim_bus_dev,
enum nsim_dev_port_type type,
unsigned int port_index);
-int nsim_dev_port_del(struct nsim_bus_dev *nsim_bus_dev,
+int nsim_drv_port_del(struct nsim_bus_dev *nsim_bus_dev,
enum nsim_dev_port_type type,
unsigned int port_index);
+int nsim_drv_configure_vfs(struct nsim_bus_dev *nsim_bus_dev,
+ unsigned int num_vfs);
+
+unsigned int nsim_dev_get_vfs(struct nsim_dev *nsim_dev);
struct nsim_fib_data *nsim_fib_create(struct devlink *devlink,
struct netlink_ext_ack *extack);
@@ -299,14 +317,6 @@ void nsim_fib_destroy(struct devlink *devlink, struct nsim_fib_data *fib_data);
u64 nsim_fib_get_val(struct nsim_fib_data *fib_data,
enum nsim_resource_id res_id, bool max);
-ssize_t nsim_bus_dev_max_vfs_read(struct file *file,
- char __user *data,
- size_t count, loff_t *ppos);
-ssize_t nsim_bus_dev_max_vfs_write(struct file *file,
- const char __user *data,
- size_t count, loff_t *ppos);
-void nsim_bus_dev_vfs_disable(struct nsim_bus_dev *nsim_bus_dev);
-
static inline bool nsim_dev_port_is_pf(struct nsim_dev_port *nsim_dev_port)
{
return nsim_dev_port->port_type == NSIM_DEV_PORT_TYPE_PF;
@@ -335,19 +345,6 @@ static inline bool nsim_ipsec_tx(struct netdevsim *ns, struct sk_buff *skb)
}
#endif
-struct nsim_vf_config {
- int link_state;
- u16 min_tx_rate;
- u16 max_tx_rate;
- u16 vlan;
- __be16 vlan_proto;
- u16 qos;
- u8 vf_mac[ETH_ALEN];
- bool spoofchk_enabled;
- bool trusted;
- bool rss_query_enabled;
-};
-
struct nsim_bus_dev {
struct device dev;
struct list_head list;
@@ -358,8 +355,6 @@ struct nsim_bus_dev {
*/
unsigned int max_vfs;
unsigned int num_vfs;
- struct mutex vfs_lock; /* Protects vfconfigs */
- struct nsim_vf_config *vfconfigs;
/* Lock for devlink->reload_enabled in netdevsim module */
struct mutex nsim_bus_reload_lock;
bool in_reload;
diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c
index a5bab614ff84..98ca6b18415e 100644
--- a/drivers/net/ntb_netdev.c
+++ b/drivers/net/ntb_netdev.c
@@ -428,7 +428,7 @@ static int ntb_netdev_probe(struct device *client_dev)
ndev->watchdog_timeo = msecs_to_jiffies(NTB_TX_TIMEOUT_MS);
eth_random_addr(ndev->perm_addr);
- memcpy(ndev->dev_addr, ndev->perm_addr, ndev->addr_len);
+ dev_addr_set(ndev, ndev->perm_addr);
ndev->netdev_ops = &ntb_netdev_ops;
ndev->ethtool_ops = &ntb_ethtool_ops;
diff --git a/drivers/net/pcs/pcs-xpcs.c b/drivers/net/pcs/pcs-xpcs.c
index 7de631f5356f..cd6742e6ba8b 100644
--- a/drivers/net/pcs/pcs-xpcs.c
+++ b/drivers/net/pcs/pcs-xpcs.c
@@ -646,7 +646,7 @@ void xpcs_validate(struct dw_xpcs *xpcs, unsigned long *supported,
if (state->interface == PHY_INTERFACE_MODE_NA)
return;
- bitmap_zero(xpcs_supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_zero(xpcs_supported);
compat = xpcs_find_compat(xpcs->id, state->interface);
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index bdac087058b2..dae95d9a07e8 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -33,14 +33,17 @@
#define AT803X_SFC_DISABLE_JABBER BIT(0)
#define AT803X_SPECIFIC_STATUS 0x11
-#define AT803X_SS_SPEED_MASK (3 << 14)
-#define AT803X_SS_SPEED_1000 (2 << 14)
-#define AT803X_SS_SPEED_100 (1 << 14)
-#define AT803X_SS_SPEED_10 (0 << 14)
+#define AT803X_SS_SPEED_MASK GENMASK(15, 14)
+#define AT803X_SS_SPEED_1000 2
+#define AT803X_SS_SPEED_100 1
+#define AT803X_SS_SPEED_10 0
#define AT803X_SS_DUPLEX BIT(13)
#define AT803X_SS_SPEED_DUPLEX_RESOLVED BIT(11)
#define AT803X_SS_MDIX BIT(6)
+#define QCA808X_SS_SPEED_MASK GENMASK(9, 7)
+#define QCA808X_SS_SPEED_2500 4
+
#define AT803X_INTR_ENABLE 0x12
#define AT803X_INTR_ENABLE_AUTONEG_ERR BIT(15)
#define AT803X_INTR_ENABLE_SPEED_CHANGED BIT(14)
@@ -70,7 +73,8 @@
#define AT803X_CDT_STATUS_DELTA_TIME_MASK GENMASK(7, 0)
#define AT803X_LED_CONTROL 0x18
-#define AT803X_DEVICE_ADDR 0x03
+#define AT803X_PHY_MMD3_WOL_CTRL 0x8012
+#define AT803X_WOL_EN BIT(5)
#define AT803X_LOC_MAC_ADDR_0_15_OFFSET 0x804C
#define AT803X_LOC_MAC_ADDR_16_31_OFFSET 0x804B
#define AT803X_LOC_MAC_ADDR_32_47_OFFSET 0x804A
@@ -86,15 +90,22 @@
#define AT803X_PSSR 0x11 /*PHY-Specific Status Register*/
#define AT803X_PSSR_MR_AN_COMPLETE 0x0200
-#define AT803X_DEBUG_REG_0 0x00
+#define AT803X_DEBUG_ANALOG_TEST_CTRL 0x00
+#define QCA8327_DEBUG_MANU_CTRL_EN BIT(2)
+#define QCA8337_DEBUG_MANU_CTRL_EN GENMASK(3, 2)
#define AT803X_DEBUG_RX_CLK_DLY_EN BIT(15)
-#define AT803X_DEBUG_REG_5 0x05
+#define AT803X_DEBUG_SYSTEM_CTRL_MODE 0x05
#define AT803X_DEBUG_TX_CLK_DLY_EN BIT(8)
+#define AT803X_DEBUG_REG_HIB_CTRL 0x0b
+#define AT803X_DEBUG_HIB_CTRL_SEL_RST_80U BIT(10)
+#define AT803X_DEBUG_HIB_CTRL_EN_ANY_CHANGE BIT(13)
+
#define AT803X_DEBUG_REG_3C 0x3C
-#define AT803X_DEBUG_REG_3D 0x3D
+#define AT803X_DEBUG_REG_GREEN 0x3D
+#define AT803X_DEBUG_GATE_CLK_IN1000 BIT(6)
#define AT803X_DEBUG_REG_1F 0x1F
#define AT803X_DEBUG_PLL_ON BIT(2)
@@ -150,8 +161,12 @@
#define ATH8035_PHY_ID 0x004dd072
#define AT8030_PHY_ID_MASK 0xffffffef
-#define QCA8327_PHY_ID 0x004dd034
+#define QCA8081_PHY_ID 0x004dd101
+
+#define QCA8327_A_PHY_ID 0x004dd033
+#define QCA8327_B_PHY_ID 0x004dd034
#define QCA8337_PHY_ID 0x004dd036
+#define QCA9561_PHY_ID 0x004dd042
#define QCA8K_PHY_ID_MASK 0xffffffff
#define QCA8K_DEVFLAGS_REVISION_MASK GENMASK(2, 0)
@@ -163,7 +178,84 @@
#define AT803X_KEEP_PLL_ENABLED BIT(0)
#define AT803X_DISABLE_SMARTEEE BIT(1)
-MODULE_DESCRIPTION("Qualcomm Atheros AR803x PHY driver");
+/* ADC threshold */
+#define QCA808X_PHY_DEBUG_ADC_THRESHOLD 0x2c80
+#define QCA808X_ADC_THRESHOLD_MASK GENMASK(7, 0)
+#define QCA808X_ADC_THRESHOLD_80MV 0
+#define QCA808X_ADC_THRESHOLD_100MV 0xf0
+#define QCA808X_ADC_THRESHOLD_200MV 0x0f
+#define QCA808X_ADC_THRESHOLD_300MV 0xff
+
+/* CLD control */
+#define QCA808X_PHY_MMD3_ADDR_CLD_CTRL7 0x8007
+#define QCA808X_8023AZ_AFE_CTRL_MASK GENMASK(8, 4)
+#define QCA808X_8023AZ_AFE_EN 0x90
+
+/* AZ control */
+#define QCA808X_PHY_MMD3_AZ_TRAINING_CTRL 0x8008
+#define QCA808X_MMD3_AZ_TRAINING_VAL 0x1c32
+
+#define QCA808X_PHY_MMD1_MSE_THRESHOLD_20DB 0x8014
+#define QCA808X_MSE_THRESHOLD_20DB_VALUE 0x529
+
+#define QCA808X_PHY_MMD1_MSE_THRESHOLD_17DB 0x800E
+#define QCA808X_MSE_THRESHOLD_17DB_VALUE 0x341
+
+#define QCA808X_PHY_MMD1_MSE_THRESHOLD_27DB 0x801E
+#define QCA808X_MSE_THRESHOLD_27DB_VALUE 0x419
+
+#define QCA808X_PHY_MMD1_MSE_THRESHOLD_28DB 0x8020
+#define QCA808X_MSE_THRESHOLD_28DB_VALUE 0x341
+
+#define QCA808X_PHY_MMD7_TOP_OPTION1 0x901c
+#define QCA808X_TOP_OPTION1_DATA 0x0
+
+#define QCA808X_PHY_MMD3_DEBUG_1 0xa100
+#define QCA808X_MMD3_DEBUG_1_VALUE 0x9203
+#define QCA808X_PHY_MMD3_DEBUG_2 0xa101
+#define QCA808X_MMD3_DEBUG_2_VALUE 0x48ad
+#define QCA808X_PHY_MMD3_DEBUG_3 0xa103
+#define QCA808X_MMD3_DEBUG_3_VALUE 0x1698
+#define QCA808X_PHY_MMD3_DEBUG_4 0xa105
+#define QCA808X_MMD3_DEBUG_4_VALUE 0x8001
+#define QCA808X_PHY_MMD3_DEBUG_5 0xa106
+#define QCA808X_MMD3_DEBUG_5_VALUE 0x1111
+#define QCA808X_PHY_MMD3_DEBUG_6 0xa011
+#define QCA808X_MMD3_DEBUG_6_VALUE 0x5f85
+
+/* master/slave seed config */
+#define QCA808X_PHY_DEBUG_LOCAL_SEED 9
+#define QCA808X_MASTER_SLAVE_SEED_ENABLE BIT(1)
+#define QCA808X_MASTER_SLAVE_SEED_CFG GENMASK(12, 2)
+#define QCA808X_MASTER_SLAVE_SEED_RANGE 0x32
+
+/* Hibernation yields lower power consumpiton in contrast with normal operation mode.
+ * when the copper cable is unplugged, the PHY enters into hibernation mode in about 10s.
+ */
+#define QCA808X_DBG_AN_TEST 0xb
+#define QCA808X_HIBERNATION_EN BIT(15)
+
+#define QCA808X_CDT_ENABLE_TEST BIT(15)
+#define QCA808X_CDT_INTER_CHECK_DIS BIT(13)
+#define QCA808X_CDT_LENGTH_UNIT BIT(10)
+
+#define QCA808X_MMD3_CDT_STATUS 0x8064
+#define QCA808X_MMD3_CDT_DIAG_PAIR_A 0x8065
+#define QCA808X_MMD3_CDT_DIAG_PAIR_B 0x8066
+#define QCA808X_MMD3_CDT_DIAG_PAIR_C 0x8067
+#define QCA808X_MMD3_CDT_DIAG_PAIR_D 0x8068
+#define QCA808X_CDT_DIAG_LENGTH GENMASK(7, 0)
+
+#define QCA808X_CDT_CODE_PAIR_A GENMASK(15, 12)
+#define QCA808X_CDT_CODE_PAIR_B GENMASK(11, 8)
+#define QCA808X_CDT_CODE_PAIR_C GENMASK(7, 4)
+#define QCA808X_CDT_CODE_PAIR_D GENMASK(3, 0)
+#define QCA808X_CDT_STATUS_STAT_FAIL 0
+#define QCA808X_CDT_STATUS_STAT_NORMAL 1
+#define QCA808X_CDT_STATUS_STAT_OPEN 2
+#define QCA808X_CDT_STATUS_STAT_SHORT 3
+
+MODULE_DESCRIPTION("Qualcomm Atheros AR803x and QCA808X PHY driver");
MODULE_AUTHOR("Matus Ujhelyi");
MODULE_LICENSE("GPL");
@@ -276,25 +368,25 @@ static int at803x_read_page(struct phy_device *phydev)
static int at803x_enable_rx_delay(struct phy_device *phydev)
{
- return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_0, 0,
+ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL, 0,
AT803X_DEBUG_RX_CLK_DLY_EN);
}
static int at803x_enable_tx_delay(struct phy_device *phydev)
{
- return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_5, 0,
+ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_SYSTEM_CTRL_MODE, 0,
AT803X_DEBUG_TX_CLK_DLY_EN);
}
static int at803x_disable_rx_delay(struct phy_device *phydev)
{
- return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_0,
+ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL,
AT803X_DEBUG_RX_CLK_DLY_EN, 0);
}
static int at803x_disable_tx_delay(struct phy_device *phydev)
{
- return at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_5,
+ return at803x_debug_reg_mask(phydev, AT803X_DEBUG_SYSTEM_CTRL_MODE,
AT803X_DEBUG_TX_CLK_DLY_EN, 0);
}
@@ -327,9 +419,9 @@ static int at803x_set_wol(struct phy_device *phydev,
{
struct net_device *ndev = phydev->attached_dev;
const u8 *mac;
- int ret;
- u32 value;
- unsigned int i, offsets[] = {
+ int ret, irq_enabled;
+ unsigned int i;
+ const unsigned int offsets[] = {
AT803X_LOC_MAC_ADDR_32_47_OFFSET,
AT803X_LOC_MAC_ADDR_16_31_OFFSET,
AT803X_LOC_MAC_ADDR_0_15_OFFSET,
@@ -345,37 +437,63 @@ static int at803x_set_wol(struct phy_device *phydev,
return -EINVAL;
for (i = 0; i < 3; i++)
- phy_write_mmd(phydev, AT803X_DEVICE_ADDR, offsets[i],
+ phy_write_mmd(phydev, MDIO_MMD_PCS, offsets[i],
mac[(i * 2) + 1] | (mac[(i * 2)] << 8));
- value = phy_read(phydev, AT803X_INTR_ENABLE);
- value |= AT803X_INTR_ENABLE_WOL;
- ret = phy_write(phydev, AT803X_INTR_ENABLE, value);
+ /* Enable WOL function */
+ ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, AT803X_PHY_MMD3_WOL_CTRL,
+ 0, AT803X_WOL_EN);
+ if (ret)
+ return ret;
+ /* Enable WOL interrupt */
+ ret = phy_modify(phydev, AT803X_INTR_ENABLE, 0, AT803X_INTR_ENABLE_WOL);
if (ret)
return ret;
- value = phy_read(phydev, AT803X_INTR_STATUS);
} else {
- value = phy_read(phydev, AT803X_INTR_ENABLE);
- value &= (~AT803X_INTR_ENABLE_WOL);
- ret = phy_write(phydev, AT803X_INTR_ENABLE, value);
+ /* Disable WoL function */
+ ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, AT803X_PHY_MMD3_WOL_CTRL,
+ AT803X_WOL_EN, 0);
+ if (ret)
+ return ret;
+ /* Disable WOL interrupt */
+ ret = phy_modify(phydev, AT803X_INTR_ENABLE, AT803X_INTR_ENABLE_WOL, 0);
if (ret)
return ret;
- value = phy_read(phydev, AT803X_INTR_STATUS);
}
- return ret;
+ /* Clear WOL status */
+ ret = phy_read(phydev, AT803X_INTR_STATUS);
+ if (ret < 0)
+ return ret;
+
+ /* Check if there are other interrupts except for WOL triggered when PHY is
+ * in interrupt mode, only the interrupts enabled by AT803X_INTR_ENABLE can
+ * be passed up to the interrupt PIN.
+ */
+ irq_enabled = phy_read(phydev, AT803X_INTR_ENABLE);
+ if (irq_enabled < 0)
+ return irq_enabled;
+
+ irq_enabled &= ~AT803X_INTR_ENABLE_WOL;
+ if (ret & irq_enabled && !phy_polling_mode(phydev))
+ phy_trigger_machine(phydev);
+
+ return 0;
}
static void at803x_get_wol(struct phy_device *phydev,
struct ethtool_wolinfo *wol)
{
- u32 value;
+ int value;
wol->supported = WAKE_MAGIC;
wol->wolopts = 0;
- value = phy_read(phydev, AT803X_INTR_ENABLE);
- if (value & AT803X_INTR_ENABLE_WOL)
+ value = phy_read_mmd(phydev, MDIO_MMD_PCS, AT803X_PHY_MMD3_WOL_CTRL);
+ if (value < 0)
+ return;
+
+ if (value & AT803X_WOL_EN)
wol->wolopts |= WAKE_MAGIC;
}
@@ -703,6 +821,15 @@ static int at803x_get_features(struct phy_device *phydev)
if (err)
return err;
+ if (phydev->drv->phy_id == QCA8081_PHY_ID) {
+ err = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_NG_EXTABLE);
+ if (err < 0)
+ return err;
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, phydev->supported,
+ err & MDIO_PMA_NG_EXTABLE_2_5GBT);
+ }
+
if (phydev->drv->phy_id != ATH8031_PHY_ID)
return 0;
@@ -921,27 +1048,9 @@ static void at803x_link_change_notify(struct phy_device *phydev)
}
}
-static int at803x_read_status(struct phy_device *phydev)
+static int at803x_read_specific_status(struct phy_device *phydev)
{
- int ss, err, old_link = phydev->link;
-
- /* Update the link, but return if there was an error */
- err = genphy_update_link(phydev);
- if (err)
- return err;
-
- /* why bother the PHY if nothing can have changed */
- if (phydev->autoneg == AUTONEG_ENABLE && old_link && phydev->link)
- return 0;
-
- phydev->speed = SPEED_UNKNOWN;
- phydev->duplex = DUPLEX_UNKNOWN;
- phydev->pause = 0;
- phydev->asym_pause = 0;
-
- err = genphy_read_lpa(phydev);
- if (err < 0)
- return err;
+ int ss;
/* Read the AT8035 PHY-Specific Status register, which indicates the
* speed and duplex that the PHY is actually using, irrespective of
@@ -952,13 +1061,19 @@ static int at803x_read_status(struct phy_device *phydev)
return ss;
if (ss & AT803X_SS_SPEED_DUPLEX_RESOLVED) {
- int sfc;
+ int sfc, speed;
sfc = phy_read(phydev, AT803X_SPECIFIC_FUNCTION_CONTROL);
if (sfc < 0)
return sfc;
- switch (ss & AT803X_SS_SPEED_MASK) {
+ /* qca8081 takes the different bits for speed value from at803x */
+ if (phydev->drv->phy_id == QCA8081_PHY_ID)
+ speed = FIELD_GET(QCA808X_SS_SPEED_MASK, ss);
+ else
+ speed = FIELD_GET(AT803X_SS_SPEED_MASK, ss);
+
+ switch (speed) {
case AT803X_SS_SPEED_10:
phydev->speed = SPEED_10;
break;
@@ -968,6 +1083,9 @@ static int at803x_read_status(struct phy_device *phydev)
case AT803X_SS_SPEED_1000:
phydev->speed = SPEED_1000;
break;
+ case QCA808X_SS_SPEED_2500:
+ phydev->speed = SPEED_2500;
+ break;
}
if (ss & AT803X_SS_DUPLEX)
phydev->duplex = DUPLEX_FULL;
@@ -992,6 +1110,35 @@ static int at803x_read_status(struct phy_device *phydev)
}
}
+ return 0;
+}
+
+static int at803x_read_status(struct phy_device *phydev)
+{
+ int err, old_link = phydev->link;
+
+ /* Update the link, but return if there was an error */
+ err = genphy_update_link(phydev);
+ if (err)
+ return err;
+
+ /* why bother the PHY if nothing can have changed */
+ if (phydev->autoneg == AUTONEG_ENABLE && old_link && phydev->link)
+ return 0;
+
+ phydev->speed = SPEED_UNKNOWN;
+ phydev->duplex = DUPLEX_UNKNOWN;
+ phydev->pause = 0;
+ phydev->asym_pause = 0;
+
+ err = genphy_read_lpa(phydev);
+ if (err < 0)
+ return err;
+
+ err = at803x_read_specific_status(phydev);
+ if (err < 0)
+ return err;
+
if (phydev->autoneg == AUTONEG_ENABLE && phydev->autoneg_complete)
phy_resolve_aneg_pause(phydev);
@@ -1039,7 +1186,30 @@ static int at803x_config_aneg(struct phy_device *phydev)
return ret;
}
- return genphy_config_aneg(phydev);
+ /* Do not restart auto-negotiation by setting ret to 0 defautly,
+ * when calling __genphy_config_aneg later.
+ */
+ ret = 0;
+
+ if (phydev->drv->phy_id == QCA8081_PHY_ID) {
+ int phy_ctrl = 0;
+
+ /* The reg MII_BMCR also needs to be configured for force mode, the
+ * genphy_config_aneg is also needed.
+ */
+ if (phydev->autoneg == AUTONEG_DISABLE)
+ genphy_c45_pma_setup_forced(phydev);
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, phydev->advertising))
+ phy_ctrl = MDIO_AN_10GBT_CTRL_ADV2_5G;
+
+ ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL,
+ MDIO_AN_10GBT_CTRL_ADV2_5G, phy_ctrl);
+ if (ret < 0)
+ return ret;
+ }
+
+ return __genphy_config_aneg(phydev, ret);
}
static int at803x_get_downshift(struct phy_device *phydev, u8 *d)
@@ -1175,8 +1345,14 @@ static int at803x_cdt_start(struct phy_device *phydev, int pair)
{
u16 cdt;
- cdt = FIELD_PREP(AT803X_CDT_MDI_PAIR_MASK, pair) |
- AT803X_CDT_ENABLE_TEST;
+ /* qca8081 takes the different bit 15 to enable CDT test */
+ if (phydev->drv->phy_id == QCA8081_PHY_ID)
+ cdt = QCA808X_CDT_ENABLE_TEST |
+ QCA808X_CDT_LENGTH_UNIT |
+ QCA808X_CDT_INTER_CHECK_DIS;
+ else
+ cdt = FIELD_PREP(AT803X_CDT_MDI_PAIR_MASK, pair) |
+ AT803X_CDT_ENABLE_TEST;
return phy_write(phydev, AT803X_CDT, cdt);
}
@@ -1184,10 +1360,16 @@ static int at803x_cdt_start(struct phy_device *phydev, int pair)
static int at803x_cdt_wait_for_completion(struct phy_device *phydev)
{
int val, ret;
+ u16 cdt_en;
+
+ if (phydev->drv->phy_id == QCA8081_PHY_ID)
+ cdt_en = QCA808X_CDT_ENABLE_TEST;
+ else
+ cdt_en = AT803X_CDT_ENABLE_TEST;
/* One test run takes about 25ms */
ret = phy_read_poll_timeout(phydev, AT803X_CDT, val,
- !(val & AT803X_CDT_ENABLE_TEST),
+ !(val & cdt_en),
30000, 100000, true);
return ret < 0 ? ret : 0;
@@ -1236,7 +1418,8 @@ static int at803x_cable_test_get_status(struct phy_device *phydev,
int pair, ret;
if (phydev->phy_id == ATH9331_PHY_ID ||
- phydev->phy_id == ATH8032_PHY_ID)
+ phydev->phy_id == ATH8032_PHY_ID ||
+ phydev->phy_id == QCA9561_PHY_ID)
pair_mask = 0x3;
else
pair_mask = 0xf;
@@ -1276,7 +1459,8 @@ static int at803x_cable_test_start(struct phy_device *phydev)
phy_write(phydev, MII_BMCR, BMCR_ANENABLE);
phy_write(phydev, MII_ADVERTISE, ADVERTISE_CSMA);
if (phydev->phy_id != ATH9331_PHY_ID &&
- phydev->phy_id != ATH8032_PHY_ID)
+ phydev->phy_id != ATH8032_PHY_ID &&
+ phydev->phy_id != QCA9561_PHY_ID)
phy_write(phydev, MII_CTRL1000, 0);
/* we do all the (time consuming) work later */
@@ -1292,9 +1476,9 @@ static int qca83xx_config_init(struct phy_device *phydev)
switch (switch_revision) {
case 1:
/* For 100M waveform */
- at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_0, 0x02ea);
+ at803x_debug_reg_write(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL, 0x02ea);
/* Turn on Gigabit clock */
- at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_3D, 0x68a0);
+ at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_GREEN, 0x68a0);
break;
case 2:
@@ -1302,12 +1486,387 @@ static int qca83xx_config_init(struct phy_device *phydev)
fallthrough;
case 4:
phy_write_mmd(phydev, MDIO_MMD_PCS, MDIO_AZ_DEBUG, 0x803f);
- at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_3D, 0x6860);
- at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_5, 0x2c46);
+ at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_GREEN, 0x6860);
+ at803x_debug_reg_write(phydev, AT803X_DEBUG_SYSTEM_CTRL_MODE, 0x2c46);
at803x_debug_reg_write(phydev, AT803X_DEBUG_REG_3C, 0x6000);
break;
}
+ /* QCA8327 require DAC amplitude adjustment for 100m set to +6%.
+ * Disable on init and enable only with 100m speed following
+ * qca original source code.
+ */
+ if (phydev->drv->phy_id == QCA8327_A_PHY_ID ||
+ phydev->drv->phy_id == QCA8327_B_PHY_ID)
+ at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL,
+ QCA8327_DEBUG_MANU_CTRL_EN, 0);
+
+ /* Following original QCA sourcecode set port to prefer master */
+ phy_set_bits(phydev, MII_CTRL1000, CTL1000_PREFER_MASTER);
+
+ return 0;
+}
+
+static void qca83xx_link_change_notify(struct phy_device *phydev)
+{
+ /* QCA8337 doesn't require DAC Amplitude adjustement */
+ if (phydev->drv->phy_id == QCA8337_PHY_ID)
+ return;
+
+ /* Set DAC Amplitude adjustment to +6% for 100m on link running */
+ if (phydev->state == PHY_RUNNING) {
+ if (phydev->speed == SPEED_100)
+ at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL,
+ QCA8327_DEBUG_MANU_CTRL_EN,
+ QCA8327_DEBUG_MANU_CTRL_EN);
+ } else {
+ /* Reset DAC Amplitude adjustment */
+ at803x_debug_reg_mask(phydev, AT803X_DEBUG_ANALOG_TEST_CTRL,
+ QCA8327_DEBUG_MANU_CTRL_EN, 0);
+ }
+}
+
+static int qca83xx_resume(struct phy_device *phydev)
+{
+ int ret, val;
+
+ /* Skip reset if not suspended */
+ if (!phydev->suspended)
+ return 0;
+
+ /* Reinit the port, reset values set by suspend */
+ qca83xx_config_init(phydev);
+
+ /* Reset the port on port resume */
+ phy_set_bits(phydev, MII_BMCR, BMCR_RESET | BMCR_ANENABLE);
+
+ /* On resume from suspend the switch execute a reset and
+ * restart auto-negotiation. Wait for reset to complete.
+ */
+ ret = phy_read_poll_timeout(phydev, MII_BMCR, val, !(val & BMCR_RESET),
+ 50000, 600000, true);
+ if (ret)
+ return ret;
+
+ msleep(1);
+
+ return 0;
+}
+
+static int qca83xx_suspend(struct phy_device *phydev)
+{
+ u16 mask = 0;
+
+ /* Only QCA8337 support actual suspend.
+ * QCA8327 cause port unreliability when phy suspend
+ * is set.
+ */
+ if (phydev->drv->phy_id == QCA8337_PHY_ID) {
+ genphy_suspend(phydev);
+ } else {
+ mask |= ~(BMCR_SPEED1000 | BMCR_FULLDPLX);
+ phy_modify(phydev, MII_BMCR, mask, 0);
+ }
+
+ at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_GREEN,
+ AT803X_DEBUG_GATE_CLK_IN1000, 0);
+
+ at803x_debug_reg_mask(phydev, AT803X_DEBUG_REG_HIB_CTRL,
+ AT803X_DEBUG_HIB_CTRL_EN_ANY_CHANGE |
+ AT803X_DEBUG_HIB_CTRL_SEL_RST_80U, 0);
+
+ return 0;
+}
+
+static int qca808x_phy_fast_retrain_config(struct phy_device *phydev)
+{
+ int ret;
+
+ /* Enable fast retrain */
+ ret = genphy_c45_fast_retrain(phydev, true);
+ if (ret)
+ return ret;
+
+ phy_write_mmd(phydev, MDIO_MMD_AN, QCA808X_PHY_MMD7_TOP_OPTION1,
+ QCA808X_TOP_OPTION1_DATA);
+ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, QCA808X_PHY_MMD1_MSE_THRESHOLD_20DB,
+ QCA808X_MSE_THRESHOLD_20DB_VALUE);
+ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, QCA808X_PHY_MMD1_MSE_THRESHOLD_17DB,
+ QCA808X_MSE_THRESHOLD_17DB_VALUE);
+ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, QCA808X_PHY_MMD1_MSE_THRESHOLD_27DB,
+ QCA808X_MSE_THRESHOLD_27DB_VALUE);
+ phy_write_mmd(phydev, MDIO_MMD_PMAPMD, QCA808X_PHY_MMD1_MSE_THRESHOLD_28DB,
+ QCA808X_MSE_THRESHOLD_28DB_VALUE);
+ phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_1,
+ QCA808X_MMD3_DEBUG_1_VALUE);
+ phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_4,
+ QCA808X_MMD3_DEBUG_4_VALUE);
+ phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_5,
+ QCA808X_MMD3_DEBUG_5_VALUE);
+ phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_3,
+ QCA808X_MMD3_DEBUG_3_VALUE);
+ phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_6,
+ QCA808X_MMD3_DEBUG_6_VALUE);
+ phy_write_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_DEBUG_2,
+ QCA808X_MMD3_DEBUG_2_VALUE);
+
+ return 0;
+}
+
+static int qca808x_phy_ms_random_seed_set(struct phy_device *phydev)
+{
+ u16 seed_value = (prandom_u32() % QCA808X_MASTER_SLAVE_SEED_RANGE);
+
+ return at803x_debug_reg_mask(phydev, QCA808X_PHY_DEBUG_LOCAL_SEED,
+ QCA808X_MASTER_SLAVE_SEED_CFG,
+ FIELD_PREP(QCA808X_MASTER_SLAVE_SEED_CFG, seed_value));
+}
+
+static int qca808x_phy_ms_seed_enable(struct phy_device *phydev, bool enable)
+{
+ u16 seed_enable = 0;
+
+ if (enable)
+ seed_enable = QCA808X_MASTER_SLAVE_SEED_ENABLE;
+
+ return at803x_debug_reg_mask(phydev, QCA808X_PHY_DEBUG_LOCAL_SEED,
+ QCA808X_MASTER_SLAVE_SEED_ENABLE, seed_enable);
+}
+
+static int qca808x_config_init(struct phy_device *phydev)
+{
+ int ret;
+
+ /* Active adc&vga on 802.3az for the link 1000M and 100M */
+ ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, QCA808X_PHY_MMD3_ADDR_CLD_CTRL7,
+ QCA808X_8023AZ_AFE_CTRL_MASK, QCA808X_8023AZ_AFE_EN);
+ if (ret)
+ return ret;
+
+ /* Adjust the threshold on 802.3az for the link 1000M */
+ ret = phy_write_mmd(phydev, MDIO_MMD_PCS,
+ QCA808X_PHY_MMD3_AZ_TRAINING_CTRL, QCA808X_MMD3_AZ_TRAINING_VAL);
+ if (ret)
+ return ret;
+
+ /* Config the fast retrain for the link 2500M */
+ ret = qca808x_phy_fast_retrain_config(phydev);
+ if (ret)
+ return ret;
+
+ /* Configure lower ramdom seed to make phy linked as slave mode */
+ ret = qca808x_phy_ms_random_seed_set(phydev);
+ if (ret)
+ return ret;
+
+ /* Enable seed */
+ ret = qca808x_phy_ms_seed_enable(phydev, true);
+ if (ret)
+ return ret;
+
+ /* Configure adc threshold as 100mv for the link 10M */
+ return at803x_debug_reg_mask(phydev, QCA808X_PHY_DEBUG_ADC_THRESHOLD,
+ QCA808X_ADC_THRESHOLD_MASK, QCA808X_ADC_THRESHOLD_100MV);
+}
+
+static int qca808x_read_status(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_STAT);
+ if (ret < 0)
+ return ret;
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, phydev->lp_advertising,
+ ret & MDIO_AN_10GBT_STAT_LP2_5G);
+
+ ret = genphy_read_status(phydev);
+ if (ret)
+ return ret;
+
+ ret = at803x_read_specific_status(phydev);
+ if (ret < 0)
+ return ret;
+
+ if (phydev->link && phydev->speed == SPEED_2500)
+ phydev->interface = PHY_INTERFACE_MODE_2500BASEX;
+ else
+ phydev->interface = PHY_INTERFACE_MODE_SMII;
+
+ /* generate seed as a lower random value to make PHY linked as SLAVE easily,
+ * except for master/slave configuration fault detected.
+ * the reason for not putting this code into the function link_change_notify is
+ * the corner case where the link partner is also the qca8081 PHY and the seed
+ * value is configured as the same value, the link can't be up and no link change
+ * occurs.
+ */
+ if (!phydev->link) {
+ if (phydev->master_slave_state == MASTER_SLAVE_STATE_ERR) {
+ qca808x_phy_ms_seed_enable(phydev, false);
+ } else {
+ qca808x_phy_ms_random_seed_set(phydev);
+ qca808x_phy_ms_seed_enable(phydev, true);
+ }
+ }
+
+ return 0;
+}
+
+static int qca808x_soft_reset(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = genphy_soft_reset(phydev);
+ if (ret < 0)
+ return ret;
+
+ return qca808x_phy_ms_seed_enable(phydev, true);
+}
+
+static bool qca808x_cdt_fault_length_valid(int cdt_code)
+{
+ switch (cdt_code) {
+ case QCA808X_CDT_STATUS_STAT_SHORT:
+ case QCA808X_CDT_STATUS_STAT_OPEN:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int qca808x_cable_test_result_trans(int cdt_code)
+{
+ switch (cdt_code) {
+ case QCA808X_CDT_STATUS_STAT_NORMAL:
+ return ETHTOOL_A_CABLE_RESULT_CODE_OK;
+ case QCA808X_CDT_STATUS_STAT_SHORT:
+ return ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT;
+ case QCA808X_CDT_STATUS_STAT_OPEN:
+ return ETHTOOL_A_CABLE_RESULT_CODE_OPEN;
+ case QCA808X_CDT_STATUS_STAT_FAIL:
+ default:
+ return ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC;
+ }
+}
+
+static int qca808x_cdt_fault_length(struct phy_device *phydev, int pair)
+{
+ int val;
+ u32 cdt_length_reg = 0;
+
+ switch (pair) {
+ case ETHTOOL_A_CABLE_PAIR_A:
+ cdt_length_reg = QCA808X_MMD3_CDT_DIAG_PAIR_A;
+ break;
+ case ETHTOOL_A_CABLE_PAIR_B:
+ cdt_length_reg = QCA808X_MMD3_CDT_DIAG_PAIR_B;
+ break;
+ case ETHTOOL_A_CABLE_PAIR_C:
+ cdt_length_reg = QCA808X_MMD3_CDT_DIAG_PAIR_C;
+ break;
+ case ETHTOOL_A_CABLE_PAIR_D:
+ cdt_length_reg = QCA808X_MMD3_CDT_DIAG_PAIR_D;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ val = phy_read_mmd(phydev, MDIO_MMD_PCS, cdt_length_reg);
+ if (val < 0)
+ return val;
+
+ return (FIELD_GET(QCA808X_CDT_DIAG_LENGTH, val) * 824) / 10;
+}
+
+static int qca808x_cable_test_start(struct phy_device *phydev)
+{
+ int ret;
+
+ /* perform CDT with the following configs:
+ * 1. disable hibernation.
+ * 2. force PHY working in MDI mode.
+ * 3. for PHY working in 1000BaseT.
+ * 4. configure the threshold.
+ */
+
+ ret = at803x_debug_reg_mask(phydev, QCA808X_DBG_AN_TEST, QCA808X_HIBERNATION_EN, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = at803x_config_mdix(phydev, ETH_TP_MDI);
+ if (ret < 0)
+ return ret;
+
+ /* Force 1000base-T needs to configure PMA/PMD and MII_BMCR */
+ phydev->duplex = DUPLEX_FULL;
+ phydev->speed = SPEED_1000;
+ ret = genphy_c45_pma_setup_forced(phydev);
+ if (ret < 0)
+ return ret;
+
+ ret = genphy_setup_forced(phydev);
+ if (ret < 0)
+ return ret;
+
+ /* configure the thresholds for open, short, pair ok test */
+ phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8074, 0xc040);
+ phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8076, 0xc040);
+ phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8077, 0xa060);
+ phy_write_mmd(phydev, MDIO_MMD_PCS, 0x8078, 0xc050);
+ phy_write_mmd(phydev, MDIO_MMD_PCS, 0x807a, 0xc060);
+ phy_write_mmd(phydev, MDIO_MMD_PCS, 0x807e, 0xb060);
+
+ return 0;
+}
+
+static int qca808x_cable_test_get_status(struct phy_device *phydev, bool *finished)
+{
+ int ret, val;
+ int pair_a, pair_b, pair_c, pair_d;
+
+ *finished = false;
+
+ ret = at803x_cdt_start(phydev, 0);
+ if (ret)
+ return ret;
+
+ ret = at803x_cdt_wait_for_completion(phydev);
+ if (ret)
+ return ret;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_PCS, QCA808X_MMD3_CDT_STATUS);
+ if (val < 0)
+ return val;
+
+ pair_a = FIELD_GET(QCA808X_CDT_CODE_PAIR_A, val);
+ pair_b = FIELD_GET(QCA808X_CDT_CODE_PAIR_B, val);
+ pair_c = FIELD_GET(QCA808X_CDT_CODE_PAIR_C, val);
+ pair_d = FIELD_GET(QCA808X_CDT_CODE_PAIR_D, val);
+
+ ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
+ qca808x_cable_test_result_trans(pair_a));
+ ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_B,
+ qca808x_cable_test_result_trans(pair_b));
+ ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_C,
+ qca808x_cable_test_result_trans(pair_c));
+ ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_D,
+ qca808x_cable_test_result_trans(pair_d));
+
+ if (qca808x_cdt_fault_length_valid(pair_a))
+ ethnl_cable_test_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_A,
+ qca808x_cdt_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_A));
+ if (qca808x_cdt_fault_length_valid(pair_b))
+ ethnl_cable_test_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_B,
+ qca808x_cdt_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_B));
+ if (qca808x_cdt_fault_length_valid(pair_c))
+ ethnl_cable_test_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_C,
+ qca808x_cdt_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_C));
+ if (qca808x_cdt_fault_length_valid(pair_d))
+ ethnl_cable_test_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_D,
+ qca808x_cdt_fault_length(phydev, ETHTOOL_A_CABLE_PAIR_D));
+
+ *finished = true;
+
return 0;
}
@@ -1408,18 +1967,88 @@ static struct phy_driver at803x_driver[] = {
.soft_reset = genphy_soft_reset,
.config_aneg = at803x_config_aneg,
}, {
+ /* Qualcomm Atheros QCA9561 */
+ PHY_ID_MATCH_EXACT(QCA9561_PHY_ID),
+ .name = "Qualcomm Atheros QCA9561 built-in PHY",
+ .suspend = at803x_suspend,
+ .resume = at803x_resume,
+ .flags = PHY_POLL_CABLE_TEST,
+ /* PHY_BASIC_FEATURES */
+ .config_intr = &at803x_config_intr,
+ .handle_interrupt = at803x_handle_interrupt,
+ .cable_test_start = at803x_cable_test_start,
+ .cable_test_get_status = at803x_cable_test_get_status,
+ .read_status = at803x_read_status,
+ .soft_reset = genphy_soft_reset,
+ .config_aneg = at803x_config_aneg,
+}, {
/* QCA8337 */
- .phy_id = QCA8337_PHY_ID,
- .phy_id_mask = QCA8K_PHY_ID_MASK,
- .name = "QCA PHY 8337",
+ .phy_id = QCA8337_PHY_ID,
+ .phy_id_mask = QCA8K_PHY_ID_MASK,
+ .name = "Qualcomm Atheros 8337 internal PHY",
+ /* PHY_GBIT_FEATURES */
+ .link_change_notify = qca83xx_link_change_notify,
+ .probe = at803x_probe,
+ .flags = PHY_IS_INTERNAL,
+ .config_init = qca83xx_config_init,
+ .soft_reset = genphy_soft_reset,
+ .get_sset_count = at803x_get_sset_count,
+ .get_strings = at803x_get_strings,
+ .get_stats = at803x_get_stats,
+ .suspend = qca83xx_suspend,
+ .resume = qca83xx_resume,
+}, {
+ /* QCA8327-A from switch QCA8327-AL1A */
+ .phy_id = QCA8327_A_PHY_ID,
+ .phy_id_mask = QCA8K_PHY_ID_MASK,
+ .name = "Qualcomm Atheros 8327-A internal PHY",
/* PHY_GBIT_FEATURES */
- .probe = at803x_probe,
- .flags = PHY_IS_INTERNAL,
- .config_init = qca83xx_config_init,
- .soft_reset = genphy_soft_reset,
- .get_sset_count = at803x_get_sset_count,
- .get_strings = at803x_get_strings,
- .get_stats = at803x_get_stats,
+ .link_change_notify = qca83xx_link_change_notify,
+ .probe = at803x_probe,
+ .flags = PHY_IS_INTERNAL,
+ .config_init = qca83xx_config_init,
+ .soft_reset = genphy_soft_reset,
+ .get_sset_count = at803x_get_sset_count,
+ .get_strings = at803x_get_strings,
+ .get_stats = at803x_get_stats,
+ .suspend = qca83xx_suspend,
+ .resume = qca83xx_resume,
+}, {
+ /* QCA8327-B from switch QCA8327-BL1A */
+ .phy_id = QCA8327_B_PHY_ID,
+ .phy_id_mask = QCA8K_PHY_ID_MASK,
+ .name = "Qualcomm Atheros 8327-B internal PHY",
+ /* PHY_GBIT_FEATURES */
+ .link_change_notify = qca83xx_link_change_notify,
+ .probe = at803x_probe,
+ .flags = PHY_IS_INTERNAL,
+ .config_init = qca83xx_config_init,
+ .soft_reset = genphy_soft_reset,
+ .get_sset_count = at803x_get_sset_count,
+ .get_strings = at803x_get_strings,
+ .get_stats = at803x_get_stats,
+ .suspend = qca83xx_suspend,
+ .resume = qca83xx_resume,
+}, {
+ /* Qualcomm QCA8081 */
+ PHY_ID_MATCH_EXACT(QCA8081_PHY_ID),
+ .name = "Qualcomm QCA8081",
+ .flags = PHY_POLL_CABLE_TEST,
+ .config_intr = at803x_config_intr,
+ .handle_interrupt = at803x_handle_interrupt,
+ .get_tunable = at803x_get_tunable,
+ .set_tunable = at803x_set_tunable,
+ .set_wol = at803x_set_wol,
+ .get_wol = at803x_get_wol,
+ .get_features = at803x_get_features,
+ .config_aneg = at803x_config_aneg,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .read_status = qca808x_read_status,
+ .config_init = qca808x_config_init,
+ .soft_reset = qca808x_soft_reset,
+ .cable_test_start = qca808x_cable_test_start,
+ .cable_test_get_status = qca808x_cable_test_get_status,
}, };
module_phy_driver(at803x_driver);
@@ -1430,6 +2059,11 @@ static struct mdio_device_id __maybe_unused atheros_tbl[] = {
{ PHY_ID_MATCH_EXACT(ATH8032_PHY_ID) },
{ PHY_ID_MATCH_EXACT(ATH8035_PHY_ID) },
{ PHY_ID_MATCH_EXACT(ATH9331_PHY_ID) },
+ { PHY_ID_MATCH_EXACT(QCA8337_PHY_ID) },
+ { PHY_ID_MATCH_EXACT(QCA8327_A_PHY_ID) },
+ { PHY_ID_MATCH_EXACT(QCA8327_B_PHY_ID) },
+ { PHY_ID_MATCH_EXACT(QCA9561_PHY_ID) },
+ { PHY_ID_MATCH_EXACT(QCA8081_PHY_ID) },
{ }
};
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index 27b6a3f507ae..75593e7d1118 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -415,6 +415,190 @@ static int bcm7xxx_28nm_ephy_config_init(struct phy_device *phydev)
return bcm7xxx_28nm_ephy_apd_enable(phydev);
}
+static int bcm7xxx_16nm_ephy_afe_config(struct phy_device *phydev)
+{
+ int tmp, rcalcode, rcalnewcodelp, rcalnewcode11, rcalnewcode11d2;
+
+ /* Reset PHY */
+ tmp = genphy_soft_reset(phydev);
+ if (tmp)
+ return tmp;
+
+ /* Reset AFE and PLL */
+ bcm_phy_write_exp_sel(phydev, 0x0003, 0x0006);
+ /* Clear reset */
+ bcm_phy_write_exp_sel(phydev, 0x0003, 0x0000);
+
+ /* Write PLL/AFE control register to select 54MHz crystal */
+ bcm_phy_write_misc(phydev, 0x0030, 0x0001, 0x0000);
+ bcm_phy_write_misc(phydev, 0x0031, 0x0000, 0x044a);
+
+ /* Change Ka,Kp,Ki to pdiv=1 */
+ bcm_phy_write_misc(phydev, 0x0033, 0x0002, 0x71a1);
+ /* Configuration override */
+ bcm_phy_write_misc(phydev, 0x0033, 0x0001, 0x8000);
+
+ /* Change PLL_NDIV and PLL_NUDGE */
+ bcm_phy_write_misc(phydev, 0x0031, 0x0001, 0x2f68);
+ bcm_phy_write_misc(phydev, 0x0031, 0x0002, 0x0000);
+
+ /* Reference frequency is 54Mhz, config_mode[15:14] = 3 (low
+ * phase)
+ */
+ bcm_phy_write_misc(phydev, 0x0030, 0x0003, 0xc036);
+
+ /* Initialize bypass mode */
+ bcm_phy_write_misc(phydev, 0x0032, 0x0003, 0x0000);
+ /* Bypass code, default: VCOCLK enabled */
+ bcm_phy_write_misc(phydev, 0x0033, 0x0000, 0x0002);
+ /* LDOs at default setting */
+ bcm_phy_write_misc(phydev, 0x0030, 0x0002, 0x01c0);
+ /* Release PLL reset */
+ bcm_phy_write_misc(phydev, 0x0030, 0x0001, 0x0001);
+
+ /* Bandgap curvature correction to correct default */
+ bcm_phy_write_misc(phydev, 0x0038, 0x0000, 0x0010);
+
+ /* Run RCAL */
+ bcm_phy_write_misc(phydev, 0x0039, 0x0003, 0x0038);
+ bcm_phy_write_misc(phydev, 0x0039, 0x0003, 0x003b);
+ udelay(2);
+ bcm_phy_write_misc(phydev, 0x0039, 0x0003, 0x003f);
+ mdelay(5);
+
+ /* AFE_CAL_CONFIG_0, Vref=1000, Target=10, averaging enabled */
+ bcm_phy_write_misc(phydev, 0x0039, 0x0001, 0x1c82);
+ /* AFE_CAL_CONFIG_0, no reset and analog powerup */
+ bcm_phy_write_misc(phydev, 0x0039, 0x0001, 0x9e82);
+ udelay(2);
+ /* AFE_CAL_CONFIG_0, start calibration */
+ bcm_phy_write_misc(phydev, 0x0039, 0x0001, 0x9f82);
+ udelay(100);
+ /* AFE_CAL_CONFIG_0, clear start calibration, set HiBW */
+ bcm_phy_write_misc(phydev, 0x0039, 0x0001, 0x9e86);
+ udelay(2);
+ /* AFE_CAL_CONFIG_0, start calibration with hi BW mode set */
+ bcm_phy_write_misc(phydev, 0x0039, 0x0001, 0x9f86);
+ udelay(100);
+
+ /* Adjust 10BT amplitude additional +7% and 100BT +2% */
+ bcm_phy_write_misc(phydev, 0x0038, 0x0001, 0xe7ea);
+ /* Adjust 1G mode amplitude and 1G testmode1 */
+ bcm_phy_write_misc(phydev, 0x0038, 0x0002, 0xede0);
+
+ /* Read CORE_EXPA9 */
+ tmp = bcm_phy_read_exp(phydev, 0x00a9);
+ /* CORE_EXPA9[6:1] is rcalcode[5:0] */
+ rcalcode = (tmp & 0x7e) / 2;
+ /* Correct RCAL code + 1 is -1% rprogr, LP: +16 */
+ rcalnewcodelp = rcalcode + 16;
+ /* Correct RCAL code + 1 is -15 rprogr, 11: +10 */
+ rcalnewcode11 = rcalcode + 10;
+ /* Saturate if necessary */
+ if (rcalnewcodelp > 0x3f)
+ rcalnewcodelp = 0x3f;
+ if (rcalnewcode11 > 0x3f)
+ rcalnewcode11 = 0x3f;
+ /* REXT=1 BYP=1 RCAL_st1<5:0>=new rcal code */
+ tmp = 0x00f8 + rcalnewcodelp * 256;
+ /* Program into AFE_CAL_CONFIG_2 */
+ bcm_phy_write_misc(phydev, 0x0039, 0x0003, tmp);
+ /* AFE_BIAS_CONFIG_0 10BT bias code (Bias: E4) */
+ bcm_phy_write_misc(phydev, 0x0038, 0x0001, 0xe7e4);
+ /* invert adc clock output and 'adc refp ldo current To correct
+ * default
+ */
+ bcm_phy_write_misc(phydev, 0x003b, 0x0000, 0x8002);
+ /* 100BT stair case, high BW, 1G stair case, alternate encode */
+ bcm_phy_write_misc(phydev, 0x003c, 0x0003, 0xf882);
+ /* 1000BT DAC transition method per Erol, bits[32], DAC Shuffle
+ * sequence 1 + 10BT imp adjust bits
+ */
+ bcm_phy_write_misc(phydev, 0x003d, 0x0000, 0x3201);
+ /* Non-overlap fix */
+ bcm_phy_write_misc(phydev, 0x003a, 0x0002, 0x0c00);
+
+ /* pwdb override (rxconfig<5>) to turn on RX LDO indpendent of
+ * pwdb controls from DSP_TAP10
+ */
+ bcm_phy_write_misc(phydev, 0x003a, 0x0001, 0x0020);
+
+ /* Remove references to channel 2 and 3 */
+ bcm_phy_write_misc(phydev, 0x003b, 0x0002, 0x0000);
+ bcm_phy_write_misc(phydev, 0x003b, 0x0003, 0x0000);
+
+ /* Set cal_bypassb bit rxconfig<43> */
+ bcm_phy_write_misc(phydev, 0x003a, 0x0003, 0x0800);
+ udelay(2);
+
+ /* Revert pwdb_override (rxconfig<5>) to 0 so that the RX pwr
+ * is controlled by DSP.
+ */
+ bcm_phy_write_misc(phydev, 0x003a, 0x0001, 0x0000);
+
+ /* Drop LSB */
+ rcalnewcode11d2 = (rcalnewcode11 & 0xfffe) / 2;
+ tmp = bcm_phy_read_misc(phydev, 0x003d, 0x0001);
+ /* Clear bits [11:5] */
+ tmp &= ~0xfe0;
+ /* set txcfg_ch0<5>=1 (enable + set local rcal) */
+ tmp |= 0x0020 | (rcalnewcode11d2 * 64);
+ bcm_phy_write_misc(phydev, 0x003d, 0x0001, tmp);
+ bcm_phy_write_misc(phydev, 0x003d, 0x0002, tmp);
+
+ tmp = bcm_phy_read_misc(phydev, 0x003d, 0x0000);
+ /* set txcfg<45:44>=11 (enable Rextra + invert fullscaledetect)
+ */
+ tmp &= ~0x3000;
+ tmp |= 0x3000;
+ bcm_phy_write_misc(phydev, 0x003d, 0x0000, tmp);
+
+ return 0;
+}
+
+static int bcm7xxx_16nm_ephy_config_init(struct phy_device *phydev)
+{
+ int ret, val;
+
+ ret = bcm7xxx_16nm_ephy_afe_config(phydev);
+ if (ret)
+ return ret;
+
+ ret = bcm_phy_set_eee(phydev, true);
+ if (ret)
+ return ret;
+
+ ret = bcm_phy_read_shadow(phydev, BCM54XX_SHD_SCR3);
+ if (ret < 0)
+ return ret;
+
+ val = ret;
+
+ /* Auto power down of DLL enabled,
+ * TXC/RXC disabled during auto power down.
+ */
+ val &= ~BCM54XX_SHD_SCR3_DLLAPD_DIS;
+ val |= BIT(8);
+
+ ret = bcm_phy_write_shadow(phydev, BCM54XX_SHD_SCR3, val);
+ if (ret < 0)
+ return ret;
+
+ return bcm_phy_enable_apd(phydev, true);
+}
+
+static int bcm7xxx_16nm_ephy_resume(struct phy_device *phydev)
+{
+ int ret;
+
+ /* Re-apply workarounds coming out suspend/resume */
+ ret = bcm7xxx_16nm_ephy_config_init(phydev);
+ if (ret)
+ return ret;
+
+ return genphy_config_aneg(phydev);
+}
+
#define MII_BCM7XXX_REG_INVALID 0xff
static u8 bcm7xxx_28nm_ephy_regnum_to_shd(u16 regnum)
@@ -716,9 +900,25 @@ static void bcm7xxx_28nm_remove(struct phy_device *phydev)
.resume = bcm7xxx_config_init, \
}
+#define BCM7XXX_16NM_EPHY(_oui, _name) \
+{ \
+ .phy_id = (_oui), \
+ .phy_id_mask = 0xfffffff0, \
+ .name = _name, \
+ /* PHY_BASIC_FEATURES */ \
+ .flags = PHY_IS_INTERNAL, \
+ .probe = bcm7xxx_28nm_probe, \
+ .remove = bcm7xxx_28nm_remove, \
+ .config_init = bcm7xxx_16nm_ephy_config_init, \
+ .config_aneg = genphy_config_aneg, \
+ .read_status = genphy_read_status, \
+ .resume = bcm7xxx_16nm_ephy_resume, \
+}
+
static struct phy_driver bcm7xxx_driver[] = {
BCM7XXX_28NM_EPHY(PHY_ID_BCM72113, "Broadcom BCM72113"),
BCM7XXX_28NM_EPHY(PHY_ID_BCM72116, "Broadcom BCM72116"),
+ BCM7XXX_16NM_EPHY(PHY_ID_BCM72165, "Broadcom BCM72165"),
BCM7XXX_28NM_GPHY(PHY_ID_BCM7250, "Broadcom BCM7250"),
BCM7XXX_28NM_EPHY(PHY_ID_BCM7255, "Broadcom BCM7255"),
BCM7XXX_28NM_EPHY(PHY_ID_BCM7260, "Broadcom BCM7260"),
@@ -736,11 +936,13 @@ static struct phy_driver bcm7xxx_driver[] = {
BCM7XXX_40NM_EPHY(PHY_ID_BCM7425, "Broadcom BCM7425"),
BCM7XXX_40NM_EPHY(PHY_ID_BCM7429, "Broadcom BCM7429"),
BCM7XXX_40NM_EPHY(PHY_ID_BCM7435, "Broadcom BCM7435"),
+ BCM7XXX_16NM_EPHY(PHY_ID_BCM7712, "Broadcom BCM7712"),
};
static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = {
{ PHY_ID_BCM72113, 0xfffffff0 },
{ PHY_ID_BCM72116, 0xfffffff0, },
+ { PHY_ID_BCM72165, 0xfffffff0, },
{ PHY_ID_BCM7250, 0xfffffff0, },
{ PHY_ID_BCM7255, 0xfffffff0, },
{ PHY_ID_BCM7260, 0xfffffff0, },
@@ -757,6 +959,7 @@ static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = {
{ PHY_ID_BCM7439, 0xfffffff0, },
{ PHY_ID_BCM7435, 0xfffffff0, },
{ PHY_ID_BCM7445, 0xfffffff0, },
+ { PHY_ID_BCM7712, 0xfffffff0, },
{ }
};
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index 83aea5c5cd03..bb5104ae4610 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -392,10 +392,50 @@ static int bcm54xx_config_init(struct phy_device *phydev)
return 0;
}
+static int bcm54xx_iddq_set(struct phy_device *phydev, bool enable)
+{
+ int ret = 0;
+
+ if (!(phydev->dev_flags & PHY_BRCM_IDDQ_SUSPEND))
+ return ret;
+
+ ret = bcm_phy_read_exp(phydev, BCM54XX_TOP_MISC_IDDQ_CTRL);
+ if (ret < 0)
+ goto out;
+
+ if (enable)
+ ret |= BCM54XX_TOP_MISC_IDDQ_SR | BCM54XX_TOP_MISC_IDDQ_LP;
+ else
+ ret &= ~(BCM54XX_TOP_MISC_IDDQ_SR | BCM54XX_TOP_MISC_IDDQ_LP);
+
+ ret = bcm_phy_write_exp(phydev, BCM54XX_TOP_MISC_IDDQ_CTRL, ret);
+out:
+ return ret;
+}
+
+static int bcm54xx_suspend(struct phy_device *phydev)
+{
+ int ret;
+
+ /* We cannot use a read/modify/write here otherwise the PHY gets into
+ * a bad state where its LEDs keep flashing, thus defeating the purpose
+ * of low power mode.
+ */
+ ret = phy_write(phydev, MII_BMCR, BMCR_PDOWN);
+ if (ret < 0)
+ return ret;
+
+ return bcm54xx_iddq_set(phydev, true);
+}
+
static int bcm54xx_resume(struct phy_device *phydev)
{
int ret;
+ ret = bcm54xx_iddq_set(phydev, false);
+ if (ret < 0)
+ return ret;
+
/* Writes to register other than BMCR would be ignored
* unless we clear the PDOWN bit first
*/
@@ -408,6 +448,15 @@ static int bcm54xx_resume(struct phy_device *phydev)
*/
fsleep(40);
+ /* Issue a soft reset after clearing the power down bit
+ * and before doing any other configuration.
+ */
+ if (phydev->dev_flags & PHY_BRCM_IDDQ_SUSPEND) {
+ ret = genphy_soft_reset(phydev);
+ if (ret < 0)
+ return ret;
+ }
+
return bcm54xx_config_init(phydev);
}
@@ -702,6 +751,36 @@ static void bcm54xx_get_stats(struct phy_device *phydev,
bcm_phy_get_stats(phydev, priv->stats, stats, data);
}
+static void bcm54xx_link_change_notify(struct phy_device *phydev)
+{
+ u16 mask = MII_BCM54XX_EXP_EXP08_EARLY_DAC_WAKE |
+ MII_BCM54XX_EXP_EXP08_FORCE_DAC_WAKE;
+ int ret;
+
+ if (phydev->state != PHY_RUNNING)
+ return;
+
+ /* Don't change the DAC wake settings if auto power down
+ * is not requested.
+ */
+ if (!(phydev->dev_flags & PHY_BRCM_AUTO_PWRDWN_ENABLE))
+ return;
+
+ ret = bcm_phy_read_exp(phydev, MII_BCM54XX_EXP_EXP08);
+ if (ret < 0)
+ return;
+
+ /* Enable/disable 10BaseT auto and forced early DAC wake depending
+ * on the negotiated speed, those settings should only be done
+ * for 10Mbits/sec.
+ */
+ if (phydev->speed == SPEED_10)
+ ret |= mask;
+ else
+ ret &= ~mask;
+ bcm_phy_write_exp(phydev, MII_BCM54XX_EXP_EXP08, ret);
+}
+
static struct phy_driver broadcom_drivers[] = {
{
.phy_id = PHY_ID_BCM5411,
@@ -715,6 +794,7 @@ static struct phy_driver broadcom_drivers[] = {
.config_init = bcm54xx_config_init,
.config_intr = bcm_phy_config_intr,
.handle_interrupt = bcm_phy_handle_interrupt,
+ .link_change_notify = bcm54xx_link_change_notify,
}, {
.phy_id = PHY_ID_BCM5421,
.phy_id_mask = 0xfffffff0,
@@ -727,6 +807,7 @@ static struct phy_driver broadcom_drivers[] = {
.config_init = bcm54xx_config_init,
.config_intr = bcm_phy_config_intr,
.handle_interrupt = bcm_phy_handle_interrupt,
+ .link_change_notify = bcm54xx_link_change_notify,
}, {
.phy_id = PHY_ID_BCM54210E,
.phy_id_mask = 0xfffffff0,
@@ -739,6 +820,9 @@ static struct phy_driver broadcom_drivers[] = {
.config_init = bcm54xx_config_init,
.config_intr = bcm_phy_config_intr,
.handle_interrupt = bcm_phy_handle_interrupt,
+ .link_change_notify = bcm54xx_link_change_notify,
+ .suspend = bcm54xx_suspend,
+ .resume = bcm54xx_resume,
}, {
.phy_id = PHY_ID_BCM5461,
.phy_id_mask = 0xfffffff0,
@@ -751,6 +835,7 @@ static struct phy_driver broadcom_drivers[] = {
.config_init = bcm54xx_config_init,
.config_intr = bcm_phy_config_intr,
.handle_interrupt = bcm_phy_handle_interrupt,
+ .link_change_notify = bcm54xx_link_change_notify,
}, {
.phy_id = PHY_ID_BCM54612E,
.phy_id_mask = 0xfffffff0,
@@ -763,6 +848,7 @@ static struct phy_driver broadcom_drivers[] = {
.config_init = bcm54xx_config_init,
.config_intr = bcm_phy_config_intr,
.handle_interrupt = bcm_phy_handle_interrupt,
+ .link_change_notify = bcm54xx_link_change_notify,
}, {
.phy_id = PHY_ID_BCM54616S,
.phy_id_mask = 0xfffffff0,
@@ -774,6 +860,7 @@ static struct phy_driver broadcom_drivers[] = {
.handle_interrupt = bcm_phy_handle_interrupt,
.read_status = bcm54616s_read_status,
.probe = bcm54616s_probe,
+ .link_change_notify = bcm54xx_link_change_notify,
}, {
.phy_id = PHY_ID_BCM5464,
.phy_id_mask = 0xfffffff0,
@@ -788,6 +875,7 @@ static struct phy_driver broadcom_drivers[] = {
.handle_interrupt = bcm_phy_handle_interrupt,
.suspend = genphy_suspend,
.resume = genphy_resume,
+ .link_change_notify = bcm54xx_link_change_notify,
}, {
.phy_id = PHY_ID_BCM5481,
.phy_id_mask = 0xfffffff0,
@@ -801,6 +889,7 @@ static struct phy_driver broadcom_drivers[] = {
.config_aneg = bcm5481_config_aneg,
.config_intr = bcm_phy_config_intr,
.handle_interrupt = bcm_phy_handle_interrupt,
+ .link_change_notify = bcm54xx_link_change_notify,
}, {
.phy_id = PHY_ID_BCM54810,
.phy_id_mask = 0xfffffff0,
@@ -814,8 +903,9 @@ static struct phy_driver broadcom_drivers[] = {
.config_aneg = bcm5481_config_aneg,
.config_intr = bcm_phy_config_intr,
.handle_interrupt = bcm_phy_handle_interrupt,
- .suspend = genphy_suspend,
+ .suspend = bcm54xx_suspend,
.resume = bcm54xx_resume,
+ .link_change_notify = bcm54xx_link_change_notify,
}, {
.phy_id = PHY_ID_BCM54811,
.phy_id_mask = 0xfffffff0,
@@ -829,8 +919,9 @@ static struct phy_driver broadcom_drivers[] = {
.config_aneg = bcm5481_config_aneg,
.config_intr = bcm_phy_config_intr,
.handle_interrupt = bcm_phy_handle_interrupt,
- .suspend = genphy_suspend,
+ .suspend = bcm54xx_suspend,
.resume = bcm54xx_resume,
+ .link_change_notify = bcm54xx_link_change_notify,
}, {
.phy_id = PHY_ID_BCM5482,
.phy_id_mask = 0xfffffff0,
@@ -843,6 +934,7 @@ static struct phy_driver broadcom_drivers[] = {
.config_init = bcm54xx_config_init,
.config_intr = bcm_phy_config_intr,
.handle_interrupt = bcm_phy_handle_interrupt,
+ .link_change_notify = bcm54xx_link_change_notify,
}, {
.phy_id = PHY_ID_BCM50610,
.phy_id_mask = 0xfffffff0,
@@ -855,6 +947,9 @@ static struct phy_driver broadcom_drivers[] = {
.config_init = bcm54xx_config_init,
.config_intr = bcm_phy_config_intr,
.handle_interrupt = bcm_phy_handle_interrupt,
+ .link_change_notify = bcm54xx_link_change_notify,
+ .suspend = bcm54xx_suspend,
+ .resume = bcm54xx_resume,
}, {
.phy_id = PHY_ID_BCM50610M,
.phy_id_mask = 0xfffffff0,
@@ -867,6 +962,9 @@ static struct phy_driver broadcom_drivers[] = {
.config_init = bcm54xx_config_init,
.config_intr = bcm_phy_config_intr,
.handle_interrupt = bcm_phy_handle_interrupt,
+ .link_change_notify = bcm54xx_link_change_notify,
+ .suspend = bcm54xx_suspend,
+ .resume = bcm54xx_resume,
}, {
.phy_id = PHY_ID_BCM57780,
.phy_id_mask = 0xfffffff0,
@@ -879,6 +977,7 @@ static struct phy_driver broadcom_drivers[] = {
.config_init = bcm54xx_config_init,
.config_intr = bcm_phy_config_intr,
.handle_interrupt = bcm_phy_handle_interrupt,
+ .link_change_notify = bcm54xx_link_change_notify,
}, {
.phy_id = PHY_ID_BCMAC131,
.phy_id_mask = 0xfffffff0,
@@ -905,6 +1004,7 @@ static struct phy_driver broadcom_drivers[] = {
.get_strings = bcm_phy_get_strings,
.get_stats = bcm54xx_get_stats,
.probe = bcm54xx_phy_probe,
+ .link_change_notify = bcm54xx_link_change_notify,
}, {
.phy_id = PHY_ID_BCM53125,
.phy_id_mask = 0xfffffff0,
@@ -918,6 +1018,7 @@ static struct phy_driver broadcom_drivers[] = {
.config_init = bcm54xx_config_init,
.config_intr = bcm_phy_config_intr,
.handle_interrupt = bcm_phy_handle_interrupt,
+ .link_change_notify = bcm54xx_link_change_notify,
}, {
.phy_id = PHY_ID_BCM89610,
.phy_id_mask = 0xfffffff0,
@@ -930,6 +1031,7 @@ static struct phy_driver broadcom_drivers[] = {
.config_init = bcm54xx_config_init,
.config_intr = bcm_phy_config_intr,
.handle_interrupt = bcm_phy_handle_interrupt,
+ .link_change_notify = bcm54xx_link_change_notify,
} };
module_phy_driver(broadcom_drivers);
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index 6bbc81ad295f..8561f2d4443b 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -182,7 +182,7 @@ static int dp83867_set_wol(struct phy_device *phydev,
{
struct net_device *ndev = phydev->attached_dev;
u16 val_rxcfg, val_micr;
- u8 *mac;
+ const u8 *mac;
val_rxcfg = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_RXFCFG);
val_micr = phy_read(phydev, MII_DP83867_MICR);
@@ -193,7 +193,7 @@ static int dp83867_set_wol(struct phy_device *phydev,
val_micr |= MII_DP83867_MICR_WOL_INT_EN;
if (wol->wolopts & WAKE_MAGIC) {
- mac = (u8 *)ndev->dev_addr;
+ mac = (const u8 *)ndev->dev_addr;
if (!is_valid_ether_addr(mac))
return -EINVAL;
@@ -619,6 +619,25 @@ static int dp83867_of_init(struct phy_device *phydev)
#else
static int dp83867_of_init(struct phy_device *phydev)
{
+ struct dp83867_private *dp83867 = phydev->priv;
+ u16 delay;
+
+ /* For non-OF device, the RX and TX ID values are either strapped
+ * or take from default value. So, we init RX & TX ID values here
+ * so that the RGMIIDCTL is configured correctly later in
+ * dp83867_config_init();
+ */
+ delay = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_RGMIIDCTL);
+ dp83867->rx_id_delay = delay & DP83867_RGMII_RX_CLK_DELAY_MAX;
+ dp83867->tx_id_delay = (delay >> DP83867_RGMII_TX_CLK_DELAY_SHIFT) &
+ DP83867_RGMII_TX_CLK_DELAY_MAX;
+
+ /* Per datasheet, IO impedance is default to 50-ohm, so we set the
+ * same here or else the default '0' means highest IO impedance
+ * which is wrong.
+ */
+ dp83867->io_impedance = DP83867_IO_MUX_CFG_IO_IMPEDANCE_MIN / 2;
+
return 0;
}
#endif /* CONFIG_OF_MDIO */
diff --git a/drivers/net/phy/dp83869.c b/drivers/net/phy/dp83869.c
index 755220c6451f..7113925606f7 100644
--- a/drivers/net/phy/dp83869.c
+++ b/drivers/net/phy/dp83869.c
@@ -246,7 +246,7 @@ static int dp83869_set_wol(struct phy_device *phydev,
{
struct net_device *ndev = phydev->attached_dev;
int val_rxcfg, val_micr;
- u8 *mac;
+ const u8 *mac;
int ret;
val_rxcfg = phy_read_mmd(phydev, DP83869_DEVADDR, DP83869_RXFCFG);
@@ -264,7 +264,7 @@ static int dp83869_set_wol(struct phy_device *phydev,
if (wol->wolopts & WAKE_MAGIC ||
wol->wolopts & WAKE_MAGICSECURE) {
- mac = (u8 *)ndev->dev_addr;
+ mac = (const u8 *)ndev->dev_addr;
if (!is_valid_ether_addr(mac))
return -EINVAL;
diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
index bd310e8d5e43..b6fea119fe13 100644
--- a/drivers/net/phy/marvell10g.c
+++ b/drivers/net/phy/marvell10g.c
@@ -22,6 +22,7 @@
* If both the fiber and copper ports are connected, the first to gain
* link takes priority and the other port is completely locked out.
*/
+#include <linux/bitfield.h>
#include <linux/ctype.h>
#include <linux/delay.h>
#include <linux/hwmon.h>
@@ -33,6 +34,8 @@
#define MV_PHY_ALASKA_NBT_QUIRK_MASK 0xfffffffe
#define MV_PHY_ALASKA_NBT_QUIRK_REV (MARVELL_PHY_ID_88X3310 | 0xa)
+#define MV_VERSION(a,b,c,d) ((a) << 24 | (b) << 16 | (c) << 8 | (d))
+
enum {
MV_PMA_FW_VER0 = 0xc011,
MV_PMA_FW_VER1 = 0xc012,
@@ -62,6 +65,15 @@ enum {
MV_PCS_CSCR1_MDIX_MDIX = 0x0020,
MV_PCS_CSCR1_MDIX_AUTO = 0x0060,
+ MV_PCS_DSC1 = 0x8003,
+ MV_PCS_DSC1_ENABLE = BIT(9),
+ MV_PCS_DSC1_10GBT = 0x01c0,
+ MV_PCS_DSC1_1GBR = 0x0038,
+ MV_PCS_DSC1_100BTX = 0x0007,
+ MV_PCS_DSC2 = 0x8004,
+ MV_PCS_DSC2_2P5G = 0xf000,
+ MV_PCS_DSC2_5G = 0x0f00,
+
MV_PCS_CSSR1 = 0x8008,
MV_PCS_CSSR1_SPD1_MASK = 0xc000,
MV_PCS_CSSR1_SPD1_SPD2 = 0xc000,
@@ -125,6 +137,7 @@ enum {
};
struct mv3310_chip {
+ bool (*has_downshift)(struct phy_device *phydev);
void (*init_supported_interfaces)(unsigned long *mask);
int (*get_mactype)(struct phy_device *phydev);
int (*init_interface)(struct phy_device *phydev, int mactype);
@@ -138,6 +151,7 @@ struct mv3310_priv {
DECLARE_BITMAP(supported_interfaces, PHY_INTERFACE_MODE_MAX);
u32 firmware_ver;
+ bool has_downshift;
bool rate_match;
phy_interface_t const_interface;
@@ -330,6 +344,71 @@ static int mv3310_reset(struct phy_device *phydev, u32 unit)
5000, 100000, true);
}
+static int mv3310_get_downshift(struct phy_device *phydev, u8 *ds)
+{
+ struct mv3310_priv *priv = dev_get_drvdata(&phydev->mdio.dev);
+ int val;
+
+ if (!priv->has_downshift)
+ return -EOPNOTSUPP;
+
+ val = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_PCS_DSC1);
+ if (val < 0)
+ return val;
+
+ if (val & MV_PCS_DSC1_ENABLE)
+ /* assume that all fields are the same */
+ *ds = 1 + FIELD_GET(MV_PCS_DSC1_10GBT, (u16)val);
+ else
+ *ds = DOWNSHIFT_DEV_DISABLE;
+
+ return 0;
+}
+
+static int mv3310_set_downshift(struct phy_device *phydev, u8 ds)
+{
+ struct mv3310_priv *priv = dev_get_drvdata(&phydev->mdio.dev);
+ u16 val;
+ int err;
+
+ if (!priv->has_downshift)
+ return -EOPNOTSUPP;
+
+ if (ds == DOWNSHIFT_DEV_DISABLE)
+ return phy_clear_bits_mmd(phydev, MDIO_MMD_PCS, MV_PCS_DSC1,
+ MV_PCS_DSC1_ENABLE);
+
+ /* DOWNSHIFT_DEV_DEFAULT_COUNT is confusing. It looks like it should
+ * set the default settings for the PHY. However, it is used for
+ * "ethtool --set-phy-tunable ethN downshift on". The intention is
+ * to enable downshift at a default number of retries. The default
+ * settings for 88x3310 are for two retries with downshift disabled.
+ * So let's use two retries with downshift enabled.
+ */
+ if (ds == DOWNSHIFT_DEV_DEFAULT_COUNT)
+ ds = 2;
+
+ if (ds > 8)
+ return -E2BIG;
+
+ ds -= 1;
+ val = FIELD_PREP(MV_PCS_DSC2_2P5G, ds);
+ val |= FIELD_PREP(MV_PCS_DSC2_5G, ds);
+ err = phy_modify_mmd(phydev, MDIO_MMD_PCS, MV_PCS_DSC2,
+ MV_PCS_DSC2_2P5G | MV_PCS_DSC2_5G, val);
+ if (err < 0)
+ return err;
+
+ val = MV_PCS_DSC1_ENABLE;
+ val |= FIELD_PREP(MV_PCS_DSC1_10GBT, ds);
+ val |= FIELD_PREP(MV_PCS_DSC1_1GBR, ds);
+ val |= FIELD_PREP(MV_PCS_DSC1_100BTX, ds);
+
+ return phy_modify_mmd(phydev, MDIO_MMD_PCS, MV_PCS_DSC1,
+ MV_PCS_DSC1_ENABLE | MV_PCS_DSC1_10GBT |
+ MV_PCS_DSC1_1GBR | MV_PCS_DSC1_100BTX, val);
+}
+
static int mv3310_get_edpd(struct phy_device *phydev, u16 *edpd)
{
int val;
@@ -448,6 +527,9 @@ static int mv3310_probe(struct phy_device *phydev)
priv->firmware_ver >> 24, (priv->firmware_ver >> 16) & 255,
(priv->firmware_ver >> 8) & 255, priv->firmware_ver & 255);
+ if (chip->has_downshift)
+ priv->has_downshift = chip->has_downshift(phydev);
+
/* Powering down the port when not in use saves about 600mW */
ret = mv3310_power_down(phydev);
if (ret)
@@ -616,7 +698,16 @@ static int mv3310_config_init(struct phy_device *phydev)
}
/* Enable EDPD mode - saving 600mW */
- return mv3310_set_edpd(phydev, ETHTOOL_PHY_EDPD_DFLT_TX_MSECS);
+ err = mv3310_set_edpd(phydev, ETHTOOL_PHY_EDPD_DFLT_TX_MSECS);
+ if (err)
+ return err;
+
+ /* Allow downshift */
+ err = mv3310_set_downshift(phydev, DOWNSHIFT_DEV_DEFAULT_COUNT);
+ if (err && err != -EOPNOTSUPP)
+ return err;
+
+ return 0;
}
static int mv3310_get_features(struct phy_device *phydev)
@@ -886,6 +977,8 @@ static int mv3310_get_tunable(struct phy_device *phydev,
struct ethtool_tunable *tuna, void *data)
{
switch (tuna->id) {
+ case ETHTOOL_PHY_DOWNSHIFT:
+ return mv3310_get_downshift(phydev, data);
case ETHTOOL_PHY_EDPD:
return mv3310_get_edpd(phydev, data);
default:
@@ -897,6 +990,8 @@ static int mv3310_set_tunable(struct phy_device *phydev,
struct ethtool_tunable *tuna, const void *data)
{
switch (tuna->id) {
+ case ETHTOOL_PHY_DOWNSHIFT:
+ return mv3310_set_downshift(phydev, *(u8 *)data);
case ETHTOOL_PHY_EDPD:
return mv3310_set_edpd(phydev, *(u16 *)data);
default:
@@ -904,6 +999,14 @@ static int mv3310_set_tunable(struct phy_device *phydev,
}
}
+static bool mv3310_has_downshift(struct phy_device *phydev)
+{
+ struct mv3310_priv *priv = dev_get_drvdata(&phydev->mdio.dev);
+
+ /* Fails to downshift with firmware older than v0.3.5.0 */
+ return priv->firmware_ver >= MV_VERSION(0,3,5,0);
+}
+
static void mv3310_init_supported_interfaces(unsigned long *mask)
{
__set_bit(PHY_INTERFACE_MODE_SGMII, mask);
@@ -943,6 +1046,7 @@ static void mv2111_init_supported_interfaces(unsigned long *mask)
}
static const struct mv3310_chip mv3310_type = {
+ .has_downshift = mv3310_has_downshift,
.init_supported_interfaces = mv3310_init_supported_interfaces,
.get_mactype = mv3310_get_mactype,
.init_interface = mv3310_init_interface,
@@ -953,6 +1057,7 @@ static const struct mv3310_chip mv3310_type = {
};
static const struct mv3310_chip mv3340_type = {
+ .has_downshift = mv3310_has_downshift,
.init_supported_interfaces = mv3340_init_supported_interfaces,
.get_mactype = mv3310_get_mactype,
.init_interface = mv3340_init_interface,
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 6865d9319197..c204067f1890 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -937,6 +937,28 @@ int mdiobus_modify(struct mii_bus *bus, int addr, u32 regnum, u16 mask, u16 set)
EXPORT_SYMBOL_GPL(mdiobus_modify);
/**
+ * mdiobus_modify_changed - Convenience function for modifying a given mdio
+ * device register and returning if it changed
+ * @bus: the mii_bus struct
+ * @addr: the phy address
+ * @regnum: register number to write
+ * @mask: bit mask of bits to clear
+ * @set: bit mask of bits to set
+ */
+int mdiobus_modify_changed(struct mii_bus *bus, int addr, u32 regnum,
+ u16 mask, u16 set)
+{
+ int err;
+
+ mutex_lock(&bus->mdio_lock);
+ err = __mdiobus_modify_changed(bus, addr, regnum, mask, set);
+ mutex_unlock(&bus->mdio_lock);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mdiobus_modify_changed);
+
+/**
* mdio_bus_match - determine if given MDIO driver supports the given
* MDIO device
* @dev: target MDIO device
@@ -949,8 +971,14 @@ EXPORT_SYMBOL_GPL(mdiobus_modify);
*/
static int mdio_bus_match(struct device *dev, struct device_driver *drv)
{
+ struct mdio_driver *mdiodrv = to_mdio_driver(drv);
struct mdio_device *mdio = to_mdio_device(dev);
+ /* Both the driver and device must type-match */
+ if (!(mdiodrv->mdiodrv.flags & MDIO_DEVICE_IS_PHY) !=
+ !(mdio->flags & MDIO_DEVICE_FLAG_PHY))
+ return 0;
+
if (of_driver_match_device(dev, drv))
return 1;
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 5c928f827173..44a24b99c894 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -863,9 +863,9 @@ static int ksz9031_config_init(struct phy_device *phydev)
MII_KSZ9031RN_TX_DATA_PAD_SKEW, 4,
tx_data_skews, 4, &update);
- if (update && phydev->interface != PHY_INTERFACE_MODE_RGMII)
+ if (update && !phy_interface_is_rgmii(phydev))
phydev_warn(phydev,
- "*-skew-ps values should be used only with phy-mode = \"rgmii\"\n");
+ "*-skew-ps values should be used only with RGMII PHY modes\n");
/* Silicon Errata Sheet (DS80000691D or DS80000692D):
* When the device links in the 1000BASE-T slave mode only,
@@ -1003,6 +1003,26 @@ static int ksz9131_config_rgmii_delay(struct phy_device *phydev)
txcdll_val);
}
+/* Silicon Errata DS80000693B
+ *
+ * When LEDs are configured in Individual Mode, LED1 is ON in a no-link
+ * condition. Workaround is to set register 0x1e, bit 9, this way LED1 behaves
+ * according to the datasheet (off if there is no link).
+ */
+static int ksz9131_led_errata(struct phy_device *phydev)
+{
+ int reg;
+
+ reg = phy_read_mmd(phydev, 2, 0);
+ if (reg < 0)
+ return reg;
+
+ if (!(reg & BIT(4)))
+ return 0;
+
+ return phy_set_bits(phydev, 0x1e, BIT(9));
+}
+
static int ksz9131_config_init(struct phy_device *phydev)
{
struct device_node *of_node;
@@ -1058,6 +1078,10 @@ static int ksz9131_config_init(struct phy_device *phydev)
if (ret < 0)
return ret;
+ ret = ksz9131_led_errata(phydev);
+ if (ret < 0)
+ return ret;
+
return 0;
}
@@ -1537,6 +1561,65 @@ static int ksz886x_cable_test_get_status(struct phy_device *phydev,
return ret;
}
+#define LAN_EXT_PAGE_ACCESS_CONTROL 0x16
+#define LAN_EXT_PAGE_ACCESS_ADDRESS_DATA 0x17
+#define LAN_EXT_PAGE_ACCESS_CTRL_EP_FUNC 0x4000
+
+#define LAN8804_ALIGN_SWAP 0x4a
+#define LAN8804_ALIGN_TX_A_B_SWAP 0x1
+#define LAN8804_ALIGN_TX_A_B_SWAP_MASK GENMASK(2, 0)
+#define LAN8814_CLOCK_MANAGEMENT 0xd
+#define LAN8814_LINK_QUALITY 0x8e
+
+static int lanphy_read_page_reg(struct phy_device *phydev, int page, u32 addr)
+{
+ u32 data;
+
+ phy_write(phydev, LAN_EXT_PAGE_ACCESS_CONTROL, page);
+ phy_write(phydev, LAN_EXT_PAGE_ACCESS_ADDRESS_DATA, addr);
+ phy_write(phydev, LAN_EXT_PAGE_ACCESS_CONTROL,
+ (page | LAN_EXT_PAGE_ACCESS_CTRL_EP_FUNC));
+ data = phy_read(phydev, LAN_EXT_PAGE_ACCESS_ADDRESS_DATA);
+
+ return data;
+}
+
+static int lanphy_write_page_reg(struct phy_device *phydev, int page, u16 addr,
+ u16 val)
+{
+ phy_write(phydev, LAN_EXT_PAGE_ACCESS_CONTROL, page);
+ phy_write(phydev, LAN_EXT_PAGE_ACCESS_ADDRESS_DATA, addr);
+ phy_write(phydev, LAN_EXT_PAGE_ACCESS_CONTROL,
+ (page | LAN_EXT_PAGE_ACCESS_CTRL_EP_FUNC));
+
+ val = phy_write(phydev, LAN_EXT_PAGE_ACCESS_ADDRESS_DATA, val);
+ if (val) {
+ phydev_err(phydev, "Error: phy_write has returned error %d\n",
+ val);
+ return val;
+ }
+ return 0;
+}
+
+static int lan8804_config_init(struct phy_device *phydev)
+{
+ int val;
+
+ /* MDI-X setting for swap A,B transmit */
+ val = lanphy_read_page_reg(phydev, 2, LAN8804_ALIGN_SWAP);
+ val &= ~LAN8804_ALIGN_TX_A_B_SWAP_MASK;
+ val |= LAN8804_ALIGN_TX_A_B_SWAP;
+ lanphy_write_page_reg(phydev, 2, LAN8804_ALIGN_SWAP, val);
+
+ /* Make sure that the PHY will not stop generating the clock when the
+ * link partner goes down
+ */
+ lanphy_write_page_reg(phydev, 31, LAN8814_CLOCK_MANAGEMENT, 0x27e);
+ lanphy_read_page_reg(phydev, 1, LAN8814_LINK_QUALITY);
+
+ return 0;
+}
+
static struct phy_driver ksphy_driver[] = {
{
.phy_id = PHY_ID_KS8737,
@@ -1593,8 +1676,9 @@ static struct phy_driver ksphy_driver[] = {
.get_sset_count = kszphy_get_sset_count,
.get_strings = kszphy_get_strings,
.get_stats = kszphy_get_stats,
- .suspend = genphy_suspend,
- .resume = genphy_resume,
+ /* No suspend/resume callbacks because of errata DS80000700A,
+ * receiver error following software power down.
+ */
}, {
.phy_id = PHY_ID_KSZ8041RNLI,
.phy_id_mask = MICREL_PHY_ID_MASK,
@@ -1719,6 +1803,20 @@ static struct phy_driver ksphy_driver[] = {
.suspend = genphy_suspend,
.resume = kszphy_resume,
}, {
+ .phy_id = PHY_ID_LAN8804,
+ .phy_id_mask = MICREL_PHY_ID_MASK,
+ .name = "Microchip LAN966X Gigabit PHY",
+ .config_init = lan8804_config_init,
+ .driver_data = &ksz9021_type,
+ .probe = kszphy_probe,
+ .soft_reset = genphy_soft_reset,
+ .read_status = ksz9031_read_status,
+ .get_sset_count = kszphy_get_sset_count,
+ .get_strings = kszphy_get_strings,
+ .get_stats = kszphy_get_stats,
+ .suspend = genphy_suspend,
+ .resume = kszphy_resume,
+}, {
.phy_id = PHY_ID_KSZ9131,
.phy_id_mask = MICREL_PHY_ID_MASK,
.name = "Microchip KSZ9131 Gigabit PHY",
@@ -1794,6 +1892,7 @@ static struct mdio_device_id __maybe_unused micrel_tbl[] = {
{ PHY_ID_KSZ8873MLL, MICREL_PHY_ID_MASK },
{ PHY_ID_KSZ886X, MICREL_PHY_ID_MASK },
{ PHY_ID_LAN8814, MICREL_PHY_ID_MASK },
+ { PHY_ID_LAN8804, MICREL_PHY_ID_MASK },
{ }
};
diff --git a/drivers/net/phy/microchip_t1.c b/drivers/net/phy/microchip_t1.c
index 4dc00bd5a8d2..a4de3d2081c5 100644
--- a/drivers/net/phy/microchip_t1.c
+++ b/drivers/net/phy/microchip_t1.c
@@ -6,6 +6,8 @@
#include <linux/delay.h>
#include <linux/mii.h>
#include <linux/phy.h>
+#include <linux/ethtool.h>
+#include <linux/ethtool_netlink.h>
/* External Register Control Register */
#define LAN87XX_EXT_REG_CTL (0x14)
@@ -35,8 +37,14 @@
#define PHYACC_ATTR_BANK_MISC 1
#define PHYACC_ATTR_BANK_PCS 2
#define PHYACC_ATTR_BANK_AFE 3
+#define PHYACC_ATTR_BANK_DSP 4
#define PHYACC_ATTR_BANK_MAX 7
+/* measurement defines */
+#define LAN87XX_CABLE_TEST_OK 0
+#define LAN87XX_CABLE_TEST_OPEN 1
+#define LAN87XX_CABLE_TEST_SAME_SHORT 2
+
#define DRIVER_AUTHOR "Nisar Sayed <nisar.sayed@microchip.com>"
#define DRIVER_DESC "Microchip LAN87XX T1 PHY driver"
@@ -226,11 +234,240 @@ static int lan87xx_config_init(struct phy_device *phydev)
return rc < 0 ? rc : 0;
}
+static int microchip_cable_test_start_common(struct phy_device *phydev)
+{
+ int bmcr, bmsr, ret;
+
+ /* If auto-negotiation is enabled, but not complete, the cable
+ * test never completes. So disable auto-neg.
+ */
+ bmcr = phy_read(phydev, MII_BMCR);
+ if (bmcr < 0)
+ return bmcr;
+
+ bmsr = phy_read(phydev, MII_BMSR);
+
+ if (bmsr < 0)
+ return bmsr;
+
+ if (bmcr & BMCR_ANENABLE) {
+ ret = phy_modify(phydev, MII_BMCR, BMCR_ANENABLE, 0);
+ if (ret < 0)
+ return ret;
+ ret = genphy_soft_reset(phydev);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* If the link is up, allow it some time to go down */
+ if (bmsr & BMSR_LSTATUS)
+ msleep(1500);
+
+ return 0;
+}
+
+static int lan87xx_cable_test_start(struct phy_device *phydev)
+{
+ static const struct access_ereg_val cable_test[] = {
+ /* min wait */
+ {PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP, 93,
+ 0, 0},
+ /* max wait */
+ {PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP, 94,
+ 10, 0},
+ /* pulse cycle */
+ {PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP, 95,
+ 90, 0},
+ /* cable diag thresh */
+ {PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP, 92,
+ 60, 0},
+ /* max gain */
+ {PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP, 79,
+ 31, 0},
+ /* clock align for each iteration */
+ {PHYACC_ATTR_MODE_MODIFY, PHYACC_ATTR_BANK_DSP, 55,
+ 0, 0x0038},
+ /* max cycle wait config */
+ {PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP, 94,
+ 70, 0},
+ /* start cable diag*/
+ {PHYACC_ATTR_MODE_WRITE, PHYACC_ATTR_BANK_DSP, 90,
+ 1, 0},
+ };
+ int rc, i;
+
+ rc = microchip_cable_test_start_common(phydev);
+ if (rc < 0)
+ return rc;
+
+ /* start cable diag */
+ /* check if part is alive - if not, return diagnostic error */
+ rc = access_ereg(phydev, PHYACC_ATTR_MODE_READ, PHYACC_ATTR_BANK_SMI,
+ 0x00, 0);
+ if (rc < 0)
+ return rc;
+
+ /* master/slave specific configs */
+ rc = access_ereg(phydev, PHYACC_ATTR_MODE_READ, PHYACC_ATTR_BANK_SMI,
+ 0x0A, 0);
+ if (rc < 0)
+ return rc;
+
+ if ((rc & 0x4000) != 0x4000) {
+ /* DUT is Slave */
+ rc = access_ereg_modify_changed(phydev, PHYACC_ATTR_BANK_AFE,
+ 0x0E, 0x5, 0x7);
+ if (rc < 0)
+ return rc;
+ rc = access_ereg_modify_changed(phydev, PHYACC_ATTR_BANK_SMI,
+ 0x1A, 0x8, 0x8);
+ if (rc < 0)
+ return rc;
+ } else {
+ /* DUT is Master */
+ rc = access_ereg_modify_changed(phydev, PHYACC_ATTR_BANK_SMI,
+ 0x10, 0x8, 0x40);
+ if (rc < 0)
+ return rc;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(cable_test); i++) {
+ if (cable_test[i].mode == PHYACC_ATTR_MODE_MODIFY) {
+ rc = access_ereg_modify_changed(phydev,
+ cable_test[i].bank,
+ cable_test[i].offset,
+ cable_test[i].val,
+ cable_test[i].mask);
+ /* wait 50ms */
+ msleep(50);
+ } else {
+ rc = access_ereg(phydev, cable_test[i].mode,
+ cable_test[i].bank,
+ cable_test[i].offset,
+ cable_test[i].val);
+ }
+ if (rc < 0)
+ return rc;
+ }
+ /* cable diag started */
+
+ return 0;
+}
+
+static int lan87xx_cable_test_report_trans(u32 result)
+{
+ switch (result) {
+ case LAN87XX_CABLE_TEST_OK:
+ return ETHTOOL_A_CABLE_RESULT_CODE_OK;
+ case LAN87XX_CABLE_TEST_OPEN:
+ return ETHTOOL_A_CABLE_RESULT_CODE_OPEN;
+ case LAN87XX_CABLE_TEST_SAME_SHORT:
+ return ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT;
+ default:
+ /* DIAGNOSTIC_ERROR */
+ return ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC;
+ }
+}
+
+static int lan87xx_cable_test_report(struct phy_device *phydev)
+{
+ int pos_peak_cycle = 0, pos_peak_in_phases = 0, pos_peak_phase = 0;
+ int neg_peak_cycle = 0, neg_peak_in_phases = 0, neg_peak_phase = 0;
+ int noise_margin = 20, time_margin = 89, jitter_var = 30;
+ int min_time_diff = 96, max_time_diff = 96 + time_margin;
+ bool fault = false, check_a = false, check_b = false;
+ int gain_idx = 0, pos_peak = 0, neg_peak = 0;
+ int pos_peak_time = 0, neg_peak_time = 0;
+ int pos_peak_in_phases_hybrid = 0;
+ int detect = -1;
+
+ gain_idx = access_ereg(phydev, PHYACC_ATTR_MODE_READ,
+ PHYACC_ATTR_BANK_DSP, 151, 0);
+ /* read non-hybrid results */
+ pos_peak = access_ereg(phydev, PHYACC_ATTR_MODE_READ,
+ PHYACC_ATTR_BANK_DSP, 153, 0);
+ neg_peak = access_ereg(phydev, PHYACC_ATTR_MODE_READ,
+ PHYACC_ATTR_BANK_DSP, 154, 0);
+ pos_peak_time = access_ereg(phydev, PHYACC_ATTR_MODE_READ,
+ PHYACC_ATTR_BANK_DSP, 156, 0);
+ neg_peak_time = access_ereg(phydev, PHYACC_ATTR_MODE_READ,
+ PHYACC_ATTR_BANK_DSP, 157, 0);
+
+ pos_peak_cycle = (pos_peak_time >> 7) & 0x7F;
+ /* calculate non-hybrid values */
+ pos_peak_phase = pos_peak_time & 0x7F;
+ pos_peak_in_phases = (pos_peak_cycle * 96) + pos_peak_phase;
+ neg_peak_cycle = (neg_peak_time >> 7) & 0x7F;
+ neg_peak_phase = neg_peak_time & 0x7F;
+ neg_peak_in_phases = (neg_peak_cycle * 96) + neg_peak_phase;
+
+ /* process values */
+ check_a =
+ ((pos_peak_in_phases - neg_peak_in_phases) >= min_time_diff) &&
+ ((pos_peak_in_phases - neg_peak_in_phases) < max_time_diff) &&
+ pos_peak_in_phases_hybrid < pos_peak_in_phases &&
+ (pos_peak_in_phases_hybrid < (neg_peak_in_phases + jitter_var));
+ check_b =
+ ((neg_peak_in_phases - pos_peak_in_phases) >= min_time_diff) &&
+ ((neg_peak_in_phases - pos_peak_in_phases) < max_time_diff) &&
+ pos_peak_in_phases_hybrid < neg_peak_in_phases &&
+ (pos_peak_in_phases_hybrid < (pos_peak_in_phases + jitter_var));
+
+ if (pos_peak_in_phases > neg_peak_in_phases && check_a)
+ detect = 2;
+ else if ((neg_peak_in_phases > pos_peak_in_phases) && check_b)
+ detect = 1;
+
+ if (pos_peak > noise_margin && neg_peak > noise_margin &&
+ gain_idx >= 0) {
+ if (detect == 1 || detect == 2)
+ fault = true;
+ }
+
+ if (!fault)
+ detect = 0;
+
+ ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
+ lan87xx_cable_test_report_trans(detect));
+
+ return 0;
+}
+
+static int lan87xx_cable_test_get_status(struct phy_device *phydev,
+ bool *finished)
+{
+ int rc = 0;
+
+ *finished = false;
+
+ /* check if cable diag was finished */
+ rc = access_ereg(phydev, PHYACC_ATTR_MODE_READ, PHYACC_ATTR_BANK_DSP,
+ 90, 0);
+ if (rc < 0)
+ return rc;
+
+ if ((rc & 2) == 2) {
+ /* stop cable diag*/
+ rc = access_ereg(phydev, PHYACC_ATTR_MODE_WRITE,
+ PHYACC_ATTR_BANK_DSP,
+ 90, 0);
+ if (rc < 0)
+ return rc;
+
+ *finished = true;
+
+ return lan87xx_cable_test_report(phydev);
+ }
+
+ return 0;
+}
+
static struct phy_driver microchip_t1_phy_driver[] = {
{
.phy_id = 0x0007c150,
.phy_id_mask = 0xfffffff0,
.name = "Microchip LAN87xx T1",
+ .flags = PHY_POLL_CABLE_TEST,
.features = PHY_BASIC_T1_FEATURES,
@@ -241,6 +478,8 @@ static struct phy_driver microchip_t1_phy_driver[] = {
.suspend = genphy_suspend,
.resume = genphy_resume,
+ .cable_test_start = lan87xx_cable_test_start,
+ .cable_test_get_status = lan87xx_cable_test_get_status,
}
};
diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c
index 6e32da28e138..ebfeeb3c67c1 100644
--- a/drivers/net/phy/mscc/mscc_main.c
+++ b/drivers/net/phy/mscc/mscc_main.c
@@ -273,12 +273,12 @@ static int vsc85xx_downshift_set(struct phy_device *phydev, u8 count)
static int vsc85xx_wol_set(struct phy_device *phydev,
struct ethtool_wolinfo *wol)
{
+ const u8 *mac_addr = phydev->attached_dev->dev_addr;
int rc;
u16 reg_val;
u8 i;
u16 pwd[3] = {0, 0, 0};
struct ethtool_wolinfo *wol_conf = wol;
- u8 *mac_addr = phydev->attached_dev->dev_addr;
mutex_lock(&phydev->lock);
rc = phy_select_page(phydev, MSCC_PHY_PAGE_EXTENDED_2);
diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c
index c617dbcad6ea..db709d30bf84 100644
--- a/drivers/net/phy/phy-c45.c
+++ b/drivers/net/phy/phy-c45.c
@@ -611,6 +611,41 @@ int genphy_c45_loopback(struct phy_device *phydev, bool enable)
}
EXPORT_SYMBOL_GPL(genphy_c45_loopback);
+/**
+ * genphy_c45_fast_retrain - configure fast retrain registers
+ * @phydev: target phy_device struct
+ * @enable: enable fast retrain or not
+ *
+ * Description: If fast-retrain is enabled, we configure PHY as
+ * advertising fast retrain capable and THP Bypass Request, then
+ * enable fast retrain. If it is not enabled, we configure fast
+ * retrain disabled.
+ */
+int genphy_c45_fast_retrain(struct phy_device *phydev, bool enable)
+{
+ int ret;
+
+ if (!enable)
+ return phy_clear_bits_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FSRT_CSR,
+ MDIO_PMA_10GBR_FSRT_ENABLE);
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, phydev->supported)) {
+ ret = phy_set_bits_mmd(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL,
+ MDIO_AN_10GBT_CTRL_ADVFSRT2_5G);
+ if (ret)
+ return ret;
+
+ ret = phy_set_bits_mmd(phydev, MDIO_MMD_AN, MDIO_AN_CTRL2,
+ MDIO_AN_THP_BP2_5GT);
+ if (ret)
+ return ret;
+ }
+
+ return phy_set_bits_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FSRT_CSR,
+ MDIO_PMA_10GBR_FSRT_ENABLE);
+}
+EXPORT_SYMBOL_GPL(genphy_c45_fast_retrain);
+
struct phy_driver genphy_c45_driver = {
.phy_id = 0xffffffff,
.phy_id_mask = 0xffffffff,
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index f124a8a58bd4..a3bfb156c83d 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -243,62 +243,10 @@ static void phy_sanitize_settings(struct phy_device *phydev)
}
}
-int phy_ethtool_ksettings_set(struct phy_device *phydev,
- const struct ethtool_link_ksettings *cmd)
-{
- __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
- u8 autoneg = cmd->base.autoneg;
- u8 duplex = cmd->base.duplex;
- u32 speed = cmd->base.speed;
-
- if (cmd->base.phy_address != phydev->mdio.addr)
- return -EINVAL;
-
- linkmode_copy(advertising, cmd->link_modes.advertising);
-
- /* We make sure that we don't pass unsupported values in to the PHY */
- linkmode_and(advertising, advertising, phydev->supported);
-
- /* Verify the settings we care about. */
- if (autoneg != AUTONEG_ENABLE && autoneg != AUTONEG_DISABLE)
- return -EINVAL;
-
- if (autoneg == AUTONEG_ENABLE && linkmode_empty(advertising))
- return -EINVAL;
-
- if (autoneg == AUTONEG_DISABLE &&
- ((speed != SPEED_1000 &&
- speed != SPEED_100 &&
- speed != SPEED_10) ||
- (duplex != DUPLEX_HALF &&
- duplex != DUPLEX_FULL)))
- return -EINVAL;
-
- phydev->autoneg = autoneg;
-
- if (autoneg == AUTONEG_DISABLE) {
- phydev->speed = speed;
- phydev->duplex = duplex;
- }
-
- linkmode_copy(phydev->advertising, advertising);
-
- linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
- phydev->advertising, autoneg == AUTONEG_ENABLE);
-
- phydev->master_slave_set = cmd->base.master_slave_cfg;
- phydev->mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
-
- /* Restart the PHY */
- phy_start_aneg(phydev);
-
- return 0;
-}
-EXPORT_SYMBOL(phy_ethtool_ksettings_set);
-
void phy_ethtool_ksettings_get(struct phy_device *phydev,
struct ethtool_link_ksettings *cmd)
{
+ mutex_lock(&phydev->lock);
linkmode_copy(cmd->link_modes.supported, phydev->supported);
linkmode_copy(cmd->link_modes.advertising, phydev->advertising);
linkmode_copy(cmd->link_modes.lp_advertising, phydev->lp_advertising);
@@ -317,6 +265,7 @@ void phy_ethtool_ksettings_get(struct phy_device *phydev,
cmd->base.autoneg = phydev->autoneg;
cmd->base.eth_tp_mdix_ctrl = phydev->mdix_ctrl;
cmd->base.eth_tp_mdix = phydev->mdix;
+ mutex_unlock(&phydev->lock);
}
EXPORT_SYMBOL(phy_ethtool_ksettings_get);
@@ -751,7 +700,7 @@ static int phy_check_link_status(struct phy_device *phydev)
}
/**
- * phy_start_aneg - start auto-negotiation for this PHY device
+ * _phy_start_aneg - start auto-negotiation for this PHY device
* @phydev: the phy_device struct
*
* Description: Sanitizes the settings (if we're not autonegotiating
@@ -759,25 +708,43 @@ static int phy_check_link_status(struct phy_device *phydev)
* If the PHYCONTROL Layer is operating, we change the state to
* reflect the beginning of Auto-negotiation or forcing.
*/
-int phy_start_aneg(struct phy_device *phydev)
+static int _phy_start_aneg(struct phy_device *phydev)
{
int err;
+ lockdep_assert_held(&phydev->lock);
+
if (!phydev->drv)
return -EIO;
- mutex_lock(&phydev->lock);
-
if (AUTONEG_DISABLE == phydev->autoneg)
phy_sanitize_settings(phydev);
err = phy_config_aneg(phydev);
if (err < 0)
- goto out_unlock;
+ return err;
if (phy_is_started(phydev))
err = phy_check_link_status(phydev);
-out_unlock:
+
+ return err;
+}
+
+/**
+ * phy_start_aneg - start auto-negotiation for this PHY device
+ * @phydev: the phy_device struct
+ *
+ * Description: Sanitizes the settings (if we're not autonegotiating
+ * them), and then calls the driver's config_aneg function.
+ * If the PHYCONTROL Layer is operating, we change the state to
+ * reflect the beginning of Auto-negotiation or forcing.
+ */
+int phy_start_aneg(struct phy_device *phydev)
+{
+ int err;
+
+ mutex_lock(&phydev->lock);
+ err = _phy_start_aneg(phydev);
mutex_unlock(&phydev->lock);
return err;
@@ -800,6 +767,61 @@ static int phy_poll_aneg_done(struct phy_device *phydev)
return ret < 0 ? ret : 0;
}
+int phy_ethtool_ksettings_set(struct phy_device *phydev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
+ u8 autoneg = cmd->base.autoneg;
+ u8 duplex = cmd->base.duplex;
+ u32 speed = cmd->base.speed;
+
+ if (cmd->base.phy_address != phydev->mdio.addr)
+ return -EINVAL;
+
+ linkmode_copy(advertising, cmd->link_modes.advertising);
+
+ /* We make sure that we don't pass unsupported values in to the PHY */
+ linkmode_and(advertising, advertising, phydev->supported);
+
+ /* Verify the settings we care about. */
+ if (autoneg != AUTONEG_ENABLE && autoneg != AUTONEG_DISABLE)
+ return -EINVAL;
+
+ if (autoneg == AUTONEG_ENABLE && linkmode_empty(advertising))
+ return -EINVAL;
+
+ if (autoneg == AUTONEG_DISABLE &&
+ ((speed != SPEED_1000 &&
+ speed != SPEED_100 &&
+ speed != SPEED_10) ||
+ (duplex != DUPLEX_HALF &&
+ duplex != DUPLEX_FULL)))
+ return -EINVAL;
+
+ mutex_lock(&phydev->lock);
+ phydev->autoneg = autoneg;
+
+ if (autoneg == AUTONEG_DISABLE) {
+ phydev->speed = speed;
+ phydev->duplex = duplex;
+ }
+
+ linkmode_copy(phydev->advertising, advertising);
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ phydev->advertising, autoneg == AUTONEG_ENABLE);
+
+ phydev->master_slave_set = cmd->base.master_slave_cfg;
+ phydev->mdix_ctrl = cmd->base.eth_tp_mdix_ctrl;
+
+ /* Restart the PHY */
+ _phy_start_aneg(phydev);
+
+ mutex_unlock(&phydev->lock);
+ return 0;
+}
+EXPORT_SYMBOL(phy_ethtool_ksettings_set);
+
/**
* phy_speed_down - set speed to lowest speed supported by both link partners
* @phydev: the phy_device struct
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 4f9990b47a37..74d8e1dc125f 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -3149,6 +3149,16 @@ int phy_driver_register(struct phy_driver *new_driver, struct module *owner)
return -EINVAL;
}
+ /* PHYLIB device drivers must not match using a DT compatible table
+ * as this bypasses our checks that the mdiodev that is being matched
+ * is backed by a struct phy_device. If such a case happens, we will
+ * make out-of-bounds accesses and lockup in phydev->lock.
+ */
+ if (WARN(new_driver->mdiodrv.driver.of_match_table,
+ "%s: driver must not provide a DT match table\n",
+ new_driver->name))
+ return -EINVAL;
+
new_driver->mdiodrv.flags |= MDIO_DEVICE_IS_PHY;
new_driver->mdiodrv.driver.name = new_driver->name;
new_driver->mdiodrv.driver.bus = &mdio_bus_type;
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 0a0abe8e4be0..3ad7397b8119 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -132,6 +132,17 @@ void phylink_set_port_modes(unsigned long *mask)
}
EXPORT_SYMBOL_GPL(phylink_set_port_modes);
+void phylink_set_10g_modes(unsigned long *mask)
+{
+ phylink_set(mask, 10000baseT_Full);
+ phylink_set(mask, 10000baseCR_Full);
+ phylink_set(mask, 10000baseSR_Full);
+ phylink_set(mask, 10000baseLR_Full);
+ phylink_set(mask, 10000baseLRM_Full);
+ phylink_set(mask, 10000baseER_Full);
+}
+EXPORT_SYMBOL_GPL(phylink_set_10g_modes);
+
static int phylink_is_empty_linkmode(const unsigned long *linkmode)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(tmp) = { 0, };
@@ -155,9 +166,45 @@ static const char *phylink_an_mode_str(unsigned int mode)
return mode < ARRAY_SIZE(modestr) ? modestr[mode] : "unknown";
}
+static int phylink_validate_any(struct phylink *pl, unsigned long *supported,
+ struct phylink_link_state *state)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(all_adv) = { 0, };
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(all_s) = { 0, };
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(s);
+ struct phylink_link_state t;
+ int intf;
+
+ for (intf = 0; intf < PHY_INTERFACE_MODE_MAX; intf++) {
+ if (test_bit(intf, pl->config->supported_interfaces)) {
+ linkmode_copy(s, supported);
+
+ t = *state;
+ t.interface = intf;
+ pl->mac_ops->validate(pl->config, s, &t);
+ linkmode_or(all_s, all_s, s);
+ linkmode_or(all_adv, all_adv, t.advertising);
+ }
+ }
+
+ linkmode_copy(supported, all_s);
+ linkmode_copy(state->advertising, all_adv);
+
+ return phylink_is_empty_linkmode(supported) ? -EINVAL : 0;
+}
+
static int phylink_validate(struct phylink *pl, unsigned long *supported,
struct phylink_link_state *state)
{
+ if (!phy_interface_empty(pl->config->supported_interfaces)) {
+ if (state->interface == PHY_INTERFACE_MODE_NA)
+ return phylink_validate_any(pl, supported, state);
+
+ if (!test_bit(state->interface,
+ pl->config->supported_interfaces))
+ return -EINVAL;
+ }
+
pl->mac_ops->validate(pl->config, supported, state);
return phylink_is_empty_linkmode(supported) ? -EINVAL : 0;
@@ -540,9 +587,15 @@ static void phylink_mac_pcs_get_state(struct phylink *pl,
linkmode_zero(state->lp_advertising);
state->interface = pl->link_config.interface;
state->an_enabled = pl->link_config.an_enabled;
- state->speed = SPEED_UNKNOWN;
- state->duplex = DUPLEX_UNKNOWN;
- state->pause = MLO_PAUSE_NONE;
+ if (state->an_enabled) {
+ state->speed = SPEED_UNKNOWN;
+ state->duplex = DUPLEX_UNKNOWN;
+ state->pause = MLO_PAUSE_NONE;
+ } else {
+ state->speed = pl->link_config.speed;
+ state->duplex = pl->link_config.duplex;
+ state->pause = pl->link_config.pause;
+ }
state->an_complete = 0;
state->link = 1;
@@ -1333,7 +1386,10 @@ void phylink_suspend(struct phylink *pl, bool mac_wol)
* but one would hope all packets have been sent. This
* also means phylink_resolve() will do nothing.
*/
- netif_carrier_off(pl->netdev);
+ if (pl->netdev)
+ netif_carrier_off(pl->netdev);
+ else
+ pl->old_link_state = false;
/* We do not call mac_link_down() here as we want the
* link to remain up to receive the WoL packets.
@@ -1598,20 +1654,11 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, config.advertising,
config.an_enabled);
- /* Validate without changing the current supported mask. */
- linkmode_copy(support, pl->supported);
- if (phylink_validate(pl, support, &config))
- return -EINVAL;
-
- /* If autonegotiation is enabled, we must have an advertisement */
- if (config.an_enabled && phylink_is_empty_linkmode(config.advertising))
- return -EINVAL;
-
/* If this link is with an SFP, ensure that changes to advertised modes
* also cause the associated interface to be selected such that the
* link can be configured correctly.
*/
- if (pl->sfp_port && pl->sfp_bus) {
+ if (pl->sfp_bus) {
config.interface = sfp_select_interface(pl->sfp_bus,
config.advertising);
if (config.interface == PHY_INTERFACE_MODE_NA) {
@@ -1631,8 +1678,17 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
__ETHTOOL_LINK_MODE_MASK_NBITS, support);
return -EINVAL;
}
+ } else {
+ /* Validate without changing the current supported mask. */
+ linkmode_copy(support, pl->supported);
+ if (phylink_validate(pl, support, &config))
+ return -EINVAL;
}
+ /* If autonegotiation is enabled, we must have an advertisement */
+ if (config.an_enabled && phylink_is_empty_linkmode(config.advertising))
+ return -EINVAL;
+
mutex_lock(&pl->state_mutex);
pl->link_config.speed = config.speed;
pl->link_config.duplex = config.duplex;
@@ -1724,7 +1780,7 @@ int phylink_ethtool_set_pauseparam(struct phylink *pl,
return -EOPNOTSUPP;
if (!phylink_test(pl->supported, Asym_Pause) &&
- !pause->autoneg && pause->rx_pause != pause->tx_pause)
+ pause->rx_pause != pause->tx_pause)
return -EINVAL;
pause_state = 0;
@@ -2522,12 +2578,10 @@ EXPORT_SYMBOL_GPL(phylink_decode_usxgmii_word);
void phylink_mii_c22_pcs_get_state(struct mdio_device *pcs,
struct phylink_link_state *state)
{
- struct mii_bus *bus = pcs->bus;
- int addr = pcs->addr;
int bmsr, lpa;
- bmsr = mdiobus_read(bus, addr, MII_BMSR);
- lpa = mdiobus_read(bus, addr, MII_LPA);
+ bmsr = mdiodev_read(pcs, MII_BMSR);
+ lpa = mdiodev_read(pcs, MII_LPA);
if (bmsr < 0 || lpa < 0) {
state->link = false;
return;
@@ -2535,7 +2589,10 @@ void phylink_mii_c22_pcs_get_state(struct mdio_device *pcs,
state->link = !!(bmsr & BMSR_LSTATUS);
state->an_complete = !!(bmsr & BMSR_ANEGCOMPLETE);
- if (!state->link)
+ /* If there is no link or autonegotiation is disabled, the LP advertisement
+ * data is not meaningful, so don't go any further.
+ */
+ if (!state->link || !state->an_enabled)
return;
switch (state->interface) {
@@ -2580,9 +2637,6 @@ int phylink_mii_c22_pcs_set_advertisement(struct mdio_device *pcs,
phy_interface_t interface,
const unsigned long *advertising)
{
- struct mii_bus *bus = pcs->bus;
- int addr = pcs->addr;
- int val, ret;
u16 adv;
switch (interface) {
@@ -2596,32 +2650,10 @@ int phylink_mii_c22_pcs_set_advertisement(struct mdio_device *pcs,
advertising))
adv |= ADVERTISE_1000XPSE_ASYM;
- val = mdiobus_read(bus, addr, MII_ADVERTISE);
- if (val < 0)
- return val;
-
- if (val == adv)
- return 0;
-
- ret = mdiobus_write(bus, addr, MII_ADVERTISE, adv);
- if (ret < 0)
- return ret;
-
- return 1;
+ return mdiodev_modify_changed(pcs, MII_ADVERTISE, 0xffff, adv);
case PHY_INTERFACE_MODE_SGMII:
- val = mdiobus_read(bus, addr, MII_ADVERTISE);
- if (val < 0)
- return val;
-
- if (val == 0x0001)
- return 0;
-
- ret = mdiobus_write(bus, addr, MII_ADVERTISE, 0x0001);
- if (ret < 0)
- return ret;
-
- return 1;
+ return mdiodev_modify_changed(pcs, MII_ADVERTISE, 0xffff, 0x0001);
default:
/* Nothing to do for other modes */
@@ -2658,9 +2690,13 @@ int phylink_mii_c22_pcs_config(struct mdio_device *pcs, unsigned int mode,
changed = ret > 0;
/* Ensure ISOLATE bit is disabled */
- bmcr = mode == MLO_AN_INBAND ? BMCR_ANENABLE : 0;
- ret = mdiobus_modify(pcs->bus, pcs->addr, MII_BMCR,
- BMCR_ANENABLE | BMCR_ISOLATE, bmcr);
+ if (mode == MLO_AN_INBAND &&
+ linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, advertising))
+ bmcr = BMCR_ANENABLE;
+ else
+ bmcr = 0;
+
+ ret = mdiodev_modify(pcs, MII_BMCR, BMCR_ANENABLE | BMCR_ISOLATE, bmcr);
if (ret < 0)
return ret;
@@ -2681,14 +2717,12 @@ EXPORT_SYMBOL_GPL(phylink_mii_c22_pcs_config);
*/
void phylink_mii_c22_pcs_an_restart(struct mdio_device *pcs)
{
- struct mii_bus *bus = pcs->bus;
- int val, addr = pcs->addr;
+ int val = mdiodev_read(pcs, MII_BMCR);
- val = mdiobus_read(bus, addr, MII_BMCR);
if (val >= 0) {
val |= BMCR_ANRESTART;
- mdiobus_write(bus, addr, MII_BMCR, val);
+ mdiodev_write(pcs, MII_BMCR, val);
}
}
EXPORT_SYMBOL_GPL(phylink_mii_c22_pcs_an_restart);
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index 11be60333fa8..a5671ab896b3 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -1023,6 +1023,14 @@ static struct phy_driver realtek_drvs[] = {
.resume = genphy_resume,
.read_page = rtl821x_read_page,
.write_page = rtl821x_write_page,
+ }, {
+ PHY_ID_MATCH_EXACT(0x001cc942),
+ .name = "RTL8365MB-VC Gigabit Ethernet",
+ /* Interrupt handling analogous to RTL8366RB */
+ .config_intr = genphy_no_config_intr,
+ .handle_interrupt = genphy_handle_interrupt_no_ack,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
},
};
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
index 7362f8c3271c..0c6c0d1843bc 100644
--- a/drivers/net/phy/sfp-bus.c
+++ b/drivers/net/phy/sfp-bus.c
@@ -373,7 +373,7 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
if (bus->sfp_quirk)
bus->sfp_quirk->modes(id, modes);
- bitmap_or(support, support, modes, __ETHTOOL_LINK_MODE_MASK_NBITS);
+ linkmode_or(support, support, modes);
phylink_set(support, Autoneg);
phylink_set(support, Pause);
diff --git a/drivers/net/plip/plip.c b/drivers/net/plip/plip.c
index 82d609401711..0d491b4d6667 100644
--- a/drivers/net/plip/plip.c
+++ b/drivers/net/plip/plip.c
@@ -284,12 +284,16 @@ static const struct net_device_ops plip_netdev_ops = {
static void
plip_init_netdev(struct net_device *dev)
{
+ static const u8 addr_init[ETH_ALEN] = {
+ 0xfc, 0xfc, 0xfc,
+ 0xfc, 0xfc, 0xfc,
+ };
struct net_local *nl = netdev_priv(dev);
/* Then, override parts of it */
dev->tx_queue_len = 10;
dev->flags = IFF_POINTOPOINT|IFF_NOARP;
- memset(dev->dev_addr, 0xfc, ETH_ALEN);
+ eth_hw_addr_set(dev, addr_init);
dev->netdev_ops = &plip_netdev_ops;
dev->header_ops = &plip_header_ops;
@@ -1109,7 +1113,7 @@ plip_open(struct net_device *dev)
plip_init_dev(). */
const struct in_ifaddr *ifa = rcu_dereference(in_dev->ifa_list);
if (ifa != NULL) {
- memcpy(dev->dev_addr+2, &ifa->ifa_local, 4);
+ dev_addr_mod(dev, 2, &ifa->ifa_local, 4);
}
}
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index fb52cd175b45..1180a0e2445f 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -1161,7 +1161,7 @@ static int ppp_unit_register(struct ppp *ppp, int unit, bool ifname_is_set)
if (!ifname_is_set) {
while (1) {
snprintf(ppp->dev->name, IFNAMSIZ, "ppp%i", ret);
- if (!__dev_get_by_name(ppp->ppp_net, ppp->dev->name))
+ if (!netdev_name_in_use(ppp->ppp_net, ppp->dev->name))
break;
unit_put(&pn->units_idr, ret);
ret = unit_get(&pn->units_idr, ppp, ret + 1);
diff --git a/drivers/net/rionet.c b/drivers/net/rionet.c
index 2056d6ad04b5..1a95f3beb784 100644
--- a/drivers/net/rionet.c
+++ b/drivers/net/rionet.c
@@ -482,6 +482,7 @@ static int rionet_setup_netdev(struct rio_mport *mport, struct net_device *ndev)
{
int rc = 0;
struct rionet_private *rnet;
+ u8 addr[ETH_ALEN];
u16 device_id;
const size_t rionet_active_bytes = sizeof(void *) *
RIO_MAX_ROUTE_ENTRIES(mport->sys_size);
@@ -501,12 +502,13 @@ static int rionet_setup_netdev(struct rio_mport *mport, struct net_device *ndev)
/* Set the default MAC address */
device_id = rio_local_get_device_id(mport);
- ndev->dev_addr[0] = 0x00;
- ndev->dev_addr[1] = 0x01;
- ndev->dev_addr[2] = 0x00;
- ndev->dev_addr[3] = 0x01;
- ndev->dev_addr[4] = device_id >> 8;
- ndev->dev_addr[5] = device_id & 0xff;
+ addr[0] = 0x00;
+ addr[1] = 0x01;
+ addr[2] = 0x00;
+ addr[3] = 0x01;
+ addr[4] = device_id >> 8;
+ addr[5] = device_id & 0xff;
+ eth_hw_addr_set(ndev, addr);
ndev->netdev_ops = &rionet_netdev_ops;
ndev->mtu = RIONET_MAX_MTU;
diff --git a/drivers/net/sb1000.c b/drivers/net/sb1000.c
index f01c9db01b16..57a6d598467b 100644
--- a/drivers/net/sb1000.c
+++ b/drivers/net/sb1000.c
@@ -149,6 +149,7 @@ sb1000_probe_one(struct pnp_dev *pdev, const struct pnp_device_id *id)
unsigned short ioaddr[2], irq;
unsigned int serial_number;
int error = -ENODEV;
+ u8 addr[ETH_ALEN];
if (pnp_device_attach(pdev) < 0)
return -ENODEV;
@@ -203,10 +204,13 @@ sb1000_probe_one(struct pnp_dev *pdev, const struct pnp_device_id *id)
dev->netdev_ops = &sb1000_netdev_ops;
/* hardware address is 0:0:serial_number */
- dev->dev_addr[2] = serial_number >> 24 & 0xff;
- dev->dev_addr[3] = serial_number >> 16 & 0xff;
- dev->dev_addr[4] = serial_number >> 8 & 0xff;
- dev->dev_addr[5] = serial_number >> 0 & 0xff;
+ addr[0] = 0;
+ addr[1] = 0;
+ addr[2] = serial_number >> 24 & 0xff;
+ addr[3] = serial_number >> 16 & 0xff;
+ addr[4] = serial_number >> 8 & 0xff;
+ addr[5] = serial_number >> 0 & 0xff;
+ eth_hw_addr_set(dev, addr);
pnp_set_drvdata(pdev, dev);
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index dd7917cab2b1..8b2adc56b92a 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1790,7 +1790,7 @@ static int team_set_mac_address(struct net_device *dev, void *p)
if (dev->type == ARPHRD_ETHER && !is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ dev_addr_set(dev, addr->sa_data);
mutex_lock(&team->lock);
list_for_each_entry(port, &team->port_list, list)
if (team->ops.port_change_dev_addr)
diff --git a/drivers/net/thunderbolt.c b/drivers/net/thunderbolt.c
index 9a6a8353e192..ff5d0e98a088 100644
--- a/drivers/net/thunderbolt.c
+++ b/drivers/net/thunderbolt.c
@@ -1202,17 +1202,19 @@ static void tbnet_generate_mac(struct net_device *dev)
{
const struct tbnet *net = netdev_priv(dev);
const struct tb_xdomain *xd = net->xd;
+ u8 addr[ETH_ALEN];
u8 phy_port;
u32 hash;
phy_port = tb_phy_port_from_link(TBNET_L0_PORT_NUM(xd->route));
/* Unicast and locally administered MAC */
- dev->dev_addr[0] = phy_port << 4 | 0x02;
+ addr[0] = phy_port << 4 | 0x02;
hash = jhash2((u32 *)xd->local_uuid, 4, 0);
- memcpy(dev->dev_addr + 1, &hash, sizeof(hash));
+ memcpy(addr + 1, &hash, sizeof(hash));
hash = jhash2((u32 *)xd->local_uuid, 4, hash);
- dev->dev_addr[5] = hash & 0xff;
+ addr[5] = hash & 0xff;
+ eth_hw_addr_set(dev, addr);
}
static int tbnet_probe(struct tb_service *svc, const struct tb_service_id *id)
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index f87f17503373..b554054a7560 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -117,6 +117,7 @@ config USB_LAN78XX
select PHYLIB
select MICROCHIP_PHY
select FIXED_PHY
+ select CRC32
help
This option adds support for Microchip LAN78XX based USB 2
& USB 3 10/100/1000 Ethernet adapters.
diff --git a/drivers/net/usb/aqc111.c b/drivers/net/usb/aqc111.c
index 73b97f4cc1ec..ea06d10e1c21 100644
--- a/drivers/net/usb/aqc111.c
+++ b/drivers/net/usb/aqc111.c
@@ -119,7 +119,7 @@ static int aqc111_write_cmd_nopm(struct usbnet *dev, u8 cmd, u16 value,
}
static int aqc111_write_cmd(struct usbnet *dev, u8 cmd, u16 value,
- u16 index, u16 size, void *data)
+ u16 index, u16 size, const void *data)
{
int ret;
@@ -714,7 +714,7 @@ static int aqc111_bind(struct usbnet *dev, struct usb_interface *intf)
if (ret)
goto out;
- ether_addr_copy(dev->net->dev_addr, dev->net->perm_addr);
+ eth_hw_addr_set(dev->net, dev->net->perm_addr);
/* Set Rx urb size */
dev->rx_urb_size = URB_SIZE;
diff --git a/drivers/net/usb/asix_common.c b/drivers/net/usb/asix_common.c
index 38cda590895c..42ba4af68090 100644
--- a/drivers/net/usb/asix_common.c
+++ b/drivers/net/usb/asix_common.c
@@ -791,7 +791,7 @@ int asix_set_mac_address(struct net_device *net, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(net->dev_addr, addr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(net, addr->sa_data);
/* We use the 20 byte dev->data
* for our 6 byte mac buffer
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index 30821f6a6d7a..4514d35ef4c4 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -59,7 +59,7 @@ static void asix_status(struct usbnet *dev, struct urb *urb)
static void asix_set_netdev_dev_addr(struct usbnet *dev, u8 *addr)
{
if (is_valid_ether_addr(addr)) {
- memcpy(dev->net->dev_addr, addr, ETH_ALEN);
+ eth_hw_addr_set(dev->net, addr);
} else {
netdev_info(dev->net, "invalid hw address, using random\n");
eth_hw_addr_random(dev->net);
diff --git a/drivers/net/usb/ax88172a.c b/drivers/net/usb/ax88172a.c
index d9777d9a7c5d..3777c7e2e6fc 100644
--- a/drivers/net/usb/ax88172a.c
+++ b/drivers/net/usb/ax88172a.c
@@ -176,7 +176,7 @@ static int ax88172a_bind(struct usbnet *dev, struct usb_interface *intf)
ret = -EIO;
goto free;
}
- memcpy(dev->net->dev_addr, buf, ETH_ALEN);
+ eth_hw_addr_set(dev->net, buf);
dev->net->netdev_ops = &ax88172a_netdev_ops;
dev->net->ethtool_ops = &ax88172a_ethtool_ops;
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index f25448a08870..ea8aa8c33241 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -209,7 +209,7 @@ static int __ax88179_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
}
static int __ax88179_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
- u16 size, void *data, int in_pm)
+ u16 size, const void *data, int in_pm)
{
int ret;
int (*fn)(struct usbnet *, u8, u8, u16, u16, const void *, u16);
@@ -272,7 +272,7 @@ static int ax88179_read_cmd_nopm(struct usbnet *dev, u8 cmd, u16 value,
}
static int ax88179_write_cmd_nopm(struct usbnet *dev, u8 cmd, u16 value,
- u16 index, u16 size, void *data)
+ u16 index, u16 size, const void *data)
{
int ret;
@@ -313,7 +313,7 @@ static int ax88179_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
}
static int ax88179_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
- u16 size, void *data)
+ u16 size, const void *data)
{
int ret;
@@ -463,7 +463,7 @@ static int ax88179_auto_detach(struct usbnet *dev, int in_pm)
u16 tmp16;
u8 tmp8;
int (*fnr)(struct usbnet *, u8, u16, u16, u16, void *);
- int (*fnw)(struct usbnet *, u8, u16, u16, u16, void *);
+ int (*fnw)(struct usbnet *, u8, u16, u16, u16, const void *);
if (!in_pm) {
fnr = ax88179_read_cmd;
@@ -1015,7 +1015,7 @@ static int ax88179_set_mac_addr(struct net_device *net, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(net->dev_addr, addr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(net, addr->sa_data);
/* Set the MAC address */
ret = ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_NODE_ID, ETH_ALEN,
@@ -1310,7 +1310,7 @@ static void ax88179_get_mac_addr(struct usbnet *dev)
}
if (is_valid_ether_addr(mac)) {
- memcpy(dev->net->dev_addr, mac, ETH_ALEN);
+ eth_hw_addr_set(dev->net, mac);
} else {
netdev_info(dev->net, "invalid MAC address, using random\n");
eth_hw_addr_random(dev->net);
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index 97ba67042d12..e7fe9c0f63a9 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -615,7 +615,7 @@ static void catc_stats_timer(struct timer_list *t)
* Receive modes. Broadcast, Multicast, Promisc.
*/
-static void catc_multicast(unsigned char *addr, u8 *multicast)
+static void catc_multicast(const unsigned char *addr, u8 *multicast)
{
u32 crc;
@@ -770,17 +770,23 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
struct net_device *netdev;
struct catc *catc;
u8 broadcast[ETH_ALEN];
- int pktsz, ret;
+ u8 *macbuf;
+ int pktsz, ret = -ENOMEM;
+
+ macbuf = kmalloc(ETH_ALEN, GFP_KERNEL);
+ if (!macbuf)
+ goto error;
if (usb_set_interface(usbdev,
intf->altsetting->desc.bInterfaceNumber, 1)) {
dev_err(dev, "Can't set altsetting 1.\n");
- return -EIO;
+ ret = -EIO;
+ goto fail_mem;;
}
netdev = alloc_etherdev(sizeof(struct catc));
if (!netdev)
- return -ENOMEM;
+ goto fail_mem;
catc = netdev_priv(netdev);
@@ -870,7 +876,8 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
dev_dbg(dev, "Getting MAC from SEEROM.\n");
- catc_get_mac(catc, netdev->dev_addr);
+ catc_get_mac(catc, macbuf);
+ eth_hw_addr_set(netdev, macbuf);
dev_dbg(dev, "Setting MAC into registers.\n");
@@ -899,7 +906,8 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
} else {
dev_dbg(dev, "Performing reset\n");
catc_reset(catc);
- catc_get_mac(catc, netdev->dev_addr);
+ catc_get_mac(catc, macbuf);
+ eth_hw_addr_set(netdev, macbuf);
dev_dbg(dev, "Setting RX Mode\n");
catc->rxmode[0] = RxEnable | RxPolarity | RxMultiCast;
@@ -917,6 +925,7 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
if (ret)
goto fail_clear_intfdata;
+ kfree(macbuf);
return 0;
fail_clear_intfdata:
@@ -927,6 +936,9 @@ fail_free:
usb_free_urb(catc->rx_urb);
usb_free_urb(catc->irq_urb);
free_netdev(netdev);
+fail_mem:
+ kfree(macbuf);
+error:
return ret;
}
diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c
index e1da9102a540..ad5121e9cf5d 100644
--- a/drivers/net/usb/cdc-phonet.c
+++ b/drivers/net/usb/cdc-phonet.c
@@ -275,6 +275,8 @@ static const struct net_device_ops usbpn_ops = {
static void usbpn_setup(struct net_device *dev)
{
+ const u8 addr = PN_MEDIA_USB;
+
dev->features = 0;
dev->netdev_ops = &usbpn_ops;
dev->header_ops = &phonet_header_ops;
@@ -284,8 +286,8 @@ static void usbpn_setup(struct net_device *dev)
dev->min_mtu = PHONET_MIN_MTU;
dev->max_mtu = PHONET_MAX_MTU;
dev->hard_header_len = 1;
- dev->dev_addr[0] = PN_MEDIA_USB;
dev->addr_len = 1;
+ dev_addr_set(dev, &addr);
dev->tx_queue_len = 3;
dev->needs_free_netdev = true;
diff --git a/drivers/net/usb/ch9200.c b/drivers/net/usb/ch9200.c
index d7f3b70d5477..f69d9b902da0 100644
--- a/drivers/net/usb/ch9200.c
+++ b/drivers/net/usb/ch9200.c
@@ -336,6 +336,7 @@ static int ch9200_bind(struct usbnet *dev, struct usb_interface *intf)
{
int retval = 0;
unsigned char data[2];
+ u8 addr[ETH_ALEN];
retval = usbnet_get_endpoints(dev, intf);
if (retval)
@@ -383,7 +384,8 @@ static int ch9200_bind(struct usbnet *dev, struct usb_interface *intf)
retval = control_write(dev, REQUEST_WRITE, 0, MAC_REG_CTRL, data, 0x02,
CONTROL_TIMEOUT_MS);
- retval = get_mac_address(dev, dev->net->dev_addr);
+ retval = get_mac_address(dev, addr);
+ eth_hw_addr_set(dev->net, addr);
return retval;
}
diff --git a/drivers/net/usb/cx82310_eth.c b/drivers/net/usb/cx82310_eth.c
index c4568a491dc4..79a47e2fd437 100644
--- a/drivers/net/usb/cx82310_eth.c
+++ b/drivers/net/usb/cx82310_eth.c
@@ -146,6 +146,7 @@ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf)
u8 link[3];
int timeout = 50;
struct cx82310_priv *priv;
+ u8 addr[ETH_ALEN];
/* avoid ADSL modems - continue only if iProduct is "USB NET CARD" */
if (usb_string(udev, udev->descriptor.iProduct, buf, sizeof(buf)) > 0
@@ -202,12 +203,12 @@ static int cx82310_bind(struct usbnet *dev, struct usb_interface *intf)
goto err;
/* get the MAC address */
- ret = cx82310_cmd(dev, CMD_GET_MAC_ADDR, true, NULL, 0,
- dev->net->dev_addr, ETH_ALEN);
+ ret = cx82310_cmd(dev, CMD_GET_MAC_ADDR, true, NULL, 0, addr, ETH_ALEN);
if (ret) {
netdev_err(dev->net, "unable to read MAC address: %d\n", ret);
goto err;
}
+ eth_hw_addr_set(dev->net, addr);
/* start (does not seem to have any effect?) */
ret = cx82310_cmd(dev, CMD_START, false, NULL, 0, NULL, 0);
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 907f98b1eefe..48d7d278631e 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -93,7 +93,8 @@ static int dm_write_reg(struct usbnet *dev, u8 reg, u8 value)
value, reg, NULL, 0);
}
-static void dm_write_async(struct usbnet *dev, u8 reg, u16 length, void *data)
+static void dm_write_async(struct usbnet *dev, u8 reg, u16 length,
+ const void *data)
{
usbnet_write_cmd_async(dev, DM_WRITE_REGS,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
@@ -331,7 +332,7 @@ static int dm9601_set_mac_address(struct net_device *net, void *p)
return -EINVAL;
}
- memcpy(net->dev_addr, addr->sa_data, net->addr_len);
+ eth_hw_addr_set(net, addr->sa_data);
__dm9601_set_mac_address(dev);
return 0;
@@ -391,7 +392,7 @@ static int dm9601_bind(struct usbnet *dev, struct usb_interface *intf)
* Overwrite the auto-generated address only with good ones.
*/
if (is_valid_ether_addr(mac))
- memcpy(dev->net->dev_addr, mac, ETH_ALEN);
+ eth_hw_addr_set(dev->net, mac);
else {
printk(KERN_WARNING
"dm9601: No valid MAC address in EEPROM, using %pM\n",
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index 06e2181e5810..cd33955df0b6 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -303,7 +303,7 @@ static int ipheth_get_macaddr(struct ipheth_device *dev)
__func__, retval);
retval = -EINVAL;
} else {
- memcpy(net->dev_addr, dev->ctrl_buf, ETH_ALEN);
+ eth_hw_addr_set(net, dev->ctrl_buf);
retval = 0;
}
diff --git a/drivers/net/usb/kalmia.c b/drivers/net/usb/kalmia.c
index fc5895f85cee..9f2b70ef39aa 100644
--- a/drivers/net/usb/kalmia.c
+++ b/drivers/net/usb/kalmia.c
@@ -149,7 +149,7 @@ kalmia_bind(struct usbnet *dev, struct usb_interface *intf)
if (status)
return status;
- memcpy(dev->net->dev_addr, ethernet_addr, ETH_ALEN);
+ eth_hw_addr_set(dev->net, ethernet_addr);
return status;
}
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index 144c686b4333..9b2bc1993ece 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -1044,8 +1044,7 @@ err_fw:
goto err_all_but_rxbuf;
memcpy(netdev->broadcast, &bcast_addr, sizeof(bcast_addr));
- memcpy(netdev->dev_addr, &kaweth->configuration.hw_addr,
- sizeof(kaweth->configuration.hw_addr));
+ eth_hw_addr_set(netdev, (u8 *)&kaweth->configuration.hw_addr);
netdev->netdev_ops = &kaweth_netdev_ops;
netdev->watchdog_timeo = KAWETH_TX_TIMEOUT;
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 793f8fbe0069..f20376c1ef3f 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -1817,7 +1817,7 @@ static void lan78xx_init_mac_address(struct lan78xx_net *dev)
lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
- ether_addr_copy(dev->net->dev_addr, addr);
+ eth_hw_addr_set(dev->net, addr);
}
/* MDIO read and write wrappers for phylib */
@@ -2416,7 +2416,7 @@ static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- ether_addr_copy(netdev->dev_addr, addr->sa_data);
+ eth_hw_addr_set(netdev, addr->sa_data);
addr_lo = netdev->dev_addr[0] |
netdev->dev_addr[1] << 8 |
@@ -4122,6 +4122,12 @@ static int lan78xx_probe(struct usb_interface *intf,
dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out, 1);
+ /* Reject broken descriptors. */
+ if (dev->maxpacket == 0) {
+ ret = -ENODEV;
+ goto out4;
+ }
+
/* driver requires remote-wakeup capability during autosuspend. */
intf->needs_remote_wakeup = 1;
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index 66866bef25df..326cc4e749d8 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -132,7 +132,8 @@ static int mcs7830_hif_get_mac_address(struct usbnet *dev, unsigned char *addr)
return 0;
}
-static int mcs7830_hif_set_mac_address(struct usbnet *dev, unsigned char *addr)
+static int mcs7830_hif_set_mac_address(struct usbnet *dev,
+ const unsigned char *addr)
{
int ret = mcs7830_set_reg(dev, HIF_REG_ETHERNET_ADDR, ETH_ALEN, addr);
@@ -159,7 +160,7 @@ static int mcs7830_set_mac_address(struct net_device *netdev, void *p)
return ret;
/* it worked --> adopt it on netdev side */
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
return 0;
}
@@ -472,17 +473,19 @@ static const struct net_device_ops mcs7830_netdev_ops = {
static int mcs7830_bind(struct usbnet *dev, struct usb_interface *udev)
{
struct net_device *net = dev->net;
+ u8 addr[ETH_ALEN];
int ret;
int retry;
/* Initial startup: Gather MAC address setting from EEPROM */
ret = -EINVAL;
for (retry = 0; retry < 5 && ret; retry++)
- ret = mcs7830_hif_get_mac_address(dev, net->dev_addr);
+ ret = mcs7830_hif_get_mac_address(dev, addr);
if (ret) {
dev_warn(&dev->udev->dev, "Cannot read MAC address\n");
goto out;
}
+ eth_hw_addr_set(net, addr);
mcs7830_data_set_multicast(net);
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 6a92a3fef75e..c4cd40b090fd 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -357,7 +357,7 @@ static void set_ethernet_addr(pegasus_t *pegasus)
goto err;
}
- memcpy(pegasus->net->dev_addr, node_id, sizeof(node_id));
+ eth_hw_addr_set(pegasus->net, node_id);
return;
err:
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 33ada2c59952..86b814e99224 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -835,8 +835,11 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf)
/* make MAC addr easily distinguishable from an IP header */
if (possibly_iphdr(dev->net->dev_addr)) {
- dev->net->dev_addr[0] |= 0x02; /* set local assignment bit */
- dev->net->dev_addr[0] &= 0xbf; /* clear "IP" bit */
+ u8 addr = dev->net->dev_addr[0];
+
+ addr |= 0x02; /* set local assignment bit */
+ addr &= 0xbf; /* clear "IP" bit */
+ dev_addr_mod(dev->net, 0, &addr, 1);
}
dev->net->netdev_ops = &qmi_wwan_netdev_ops;
dev->net->sysfs_groups[0] = &qmi_wwan_sysfs_attr_group;
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index f329e39100a7..4a02f33f0643 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -1571,7 +1571,7 @@ static int __rtl8152_set_mac_address(struct net_device *netdev, void *p,
mutex_lock(&tp->control);
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG);
pla_ocp_write(tp, PLA_IDR, BYTE_EN_SIX_BYTES, 8, addr->sa_data);
@@ -1719,7 +1719,7 @@ static int set_ethernet_addr(struct r8152 *tp, bool in_resume)
return ret;
if (tp->version == RTL_VER_01)
- ether_addr_copy(dev->dev_addr, sa.sa_data);
+ eth_hw_addr_set(dev, sa.sa_data);
else
ret = __rtl8152_set_mac_address(dev, &sa, in_resume);
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index 85a8b96e39a6..4a84f90e377c 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -421,7 +421,7 @@ generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags)
if (bp[0] & 0x02)
eth_hw_addr_random(net);
else
- ether_addr_copy(net->dev_addr, bp);
+ eth_hw_addr_set(net, bp);
/* set a nonzero filter to enable data transfers */
memset(u.set, 0, sizeof *u.set);
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index 4a1b0e0fc3a3..3d2bf2acca94 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -262,7 +262,7 @@ static void set_ethernet_addr(rtl8150_t *dev)
ret = get_registers(dev, IDR, sizeof(node_id), node_id);
if (!ret) {
- ether_addr_copy(dev->netdev->dev_addr, node_id);
+ eth_hw_addr_set(dev->netdev, node_id);
} else {
eth_hw_addr_random(dev->netdev);
netdev_notice(dev->netdev, "Assigned a random MAC address: %pM\n",
@@ -278,7 +278,7 @@ static int rtl8150_set_mac_address(struct net_device *netdev, void *p)
if (netif_running(netdev))
return -EBUSY;
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
netdev_dbg(netdev, "Setting MAC address to %pM\n", netdev->dev_addr);
/* Set the IDR registers. */
set_registers(dev, IDR, netdev->addr_len, netdev->dev_addr);
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
index 55025202dc4f..bb4cbe8fc846 100644
--- a/drivers/net/usb/sierra_net.c
+++ b/drivers/net/usb/sierra_net.c
@@ -669,6 +669,7 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
0x00, 0x00, SIERRA_NET_HIP_MSYNC_ID, 0x00};
static const u8 shdwn_tmplate[sizeof(priv->shdwn_msg)] = {
0x00, 0x00, SIERRA_NET_HIP_SHUTD_ID, 0x00};
+ u8 mod[2];
dev_dbg(&dev->udev->dev, "%s", __func__);
@@ -698,8 +699,9 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
dev->net->netdev_ops = &sierra_net_device_ops;
/* change MAC addr to include, ifacenum, and to be unique */
- dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter);
- dev->net->dev_addr[ETH_ALEN-1] = ifacenum;
+ mod[0] = atomic_inc_return(&iface_counter);
+ mod[1] = ifacenum;
+ dev_addr_mod(dev->net, ETH_ALEN - 2, mod, 2);
/* prepare shutdown message template */
memcpy(priv->shdwn_msg, shdwn_tmplate, sizeof(priv->shdwn_msg));
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 76f7af161313..95de452ff4da 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -757,9 +757,10 @@ static int smsc75xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
static void smsc75xx_init_mac_address(struct usbnet *dev)
{
+ u8 addr[ETH_ALEN];
+
/* maybe the boot loader passed the MAC address in devicetree */
- if (!eth_platform_get_mac_address(&dev->udev->dev,
- dev->net->dev_addr)) {
+ if (!platform_get_ethdev_address(&dev->udev->dev, dev->net)) {
if (is_valid_ether_addr(dev->net->dev_addr)) {
/* device tree values are valid so use them */
netif_dbg(dev, ifup, dev->net, "MAC address read from the device tree\n");
@@ -768,8 +769,8 @@ static void smsc75xx_init_mac_address(struct usbnet *dev)
}
/* try reading mac address from EEPROM */
- if (smsc75xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
- dev->net->dev_addr) == 0) {
+ if (smsc75xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN, addr) == 0) {
+ eth_hw_addr_set(dev->net, addr);
if (is_valid_ether_addr(dev->net->dev_addr)) {
/* eeprom values are valid so use them */
netif_dbg(dev, ifup, dev->net,
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 26b1bd8e845b..20fe4cd8f784 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -755,9 +755,10 @@ static int smsc95xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
static void smsc95xx_init_mac_address(struct usbnet *dev)
{
+ u8 addr[ETH_ALEN];
+
/* maybe the boot loader passed the MAC address in devicetree */
- if (!eth_platform_get_mac_address(&dev->udev->dev,
- dev->net->dev_addr)) {
+ if (!platform_get_ethdev_address(&dev->udev->dev, dev->net)) {
if (is_valid_ether_addr(dev->net->dev_addr)) {
/* device tree values are valid so use them */
netif_dbg(dev, ifup, dev->net, "MAC address read from the device tree\n");
@@ -766,8 +767,8 @@ static void smsc95xx_init_mac_address(struct usbnet *dev)
}
/* try reading mac address from EEPROM */
- if (smsc95xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN,
- dev->net->dev_addr) == 0) {
+ if (smsc95xx_read_eeprom(dev, EEPROM_MAC_OFFSET, ETH_ALEN, addr) == 0) {
+ eth_hw_addr_set(dev->net, addr);
if (is_valid_ether_addr(dev->net->dev_addr)) {
/* eeprom values are valid so use them */
netif_dbg(dev, ifup, dev->net, "MAC address read from EEPROM\n");
diff --git a/drivers/net/usb/sr9700.c b/drivers/net/usb/sr9700.c
index 6516a37893e2..b658510cc9a4 100644
--- a/drivers/net/usb/sr9700.c
+++ b/drivers/net/usb/sr9700.c
@@ -56,7 +56,8 @@ static int sr_write_reg(struct usbnet *dev, u8 reg, u8 value)
value, reg, NULL, 0);
}
-static void sr_write_async(struct usbnet *dev, u8 reg, u16 length, void *data)
+static void sr_write_async(struct usbnet *dev, u8 reg, u16 length,
+ const void *data)
{
usbnet_write_cmd_async(dev, SR_WR_REGS, SR_REQ_WR_REG,
0, reg, data, length);
@@ -296,7 +297,7 @@ static int sr9700_set_mac_address(struct net_device *netdev, void *p)
return -EINVAL;
}
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ eth_hw_addr_set(netdev, addr->sa_data);
sr_write_async(dev, SR_PAR, 6, netdev->dev_addr);
return 0;
@@ -319,6 +320,7 @@ static int sr9700_bind(struct usbnet *dev, struct usb_interface *intf)
{
struct net_device *netdev;
struct mii_if_info *mii;
+ u8 addr[ETH_ALEN];
int ret;
ret = usbnet_get_endpoints(dev, intf);
@@ -349,11 +351,12 @@ static int sr9700_bind(struct usbnet *dev, struct usb_interface *intf)
* EEPROM automatically to PAR. In case there is no EEPROM externally,
* a default MAC address is stored in PAR for making chip work properly.
*/
- if (sr_read(dev, SR_PAR, ETH_ALEN, netdev->dev_addr) < 0) {
+ if (sr_read(dev, SR_PAR, ETH_ALEN, addr) < 0) {
netdev_err(netdev, "Error reading MAC address\n");
ret = -ENODEV;
goto out;
}
+ eth_hw_addr_set(netdev, addr);
/* power up and reset phy */
sr_write_reg(dev, SR_PRR, PRR_PHY_RST);
diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c
index 576401c8b1be..f5e19f3ef6cd 100644
--- a/drivers/net/usb/sr9800.c
+++ b/drivers/net/usb/sr9800.c
@@ -503,7 +503,7 @@ static int sr_set_mac_address(struct net_device *net, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(net->dev_addr, addr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(net, addr->sa_data);
/* We use the 20 byte dev->data
* for our 6 byte mac buffer
@@ -731,6 +731,7 @@ static int sr9800_bind(struct usbnet *dev, struct usb_interface *intf)
struct sr_data *data = (struct sr_data *)&dev->data;
u16 led01_mux, led23_mux;
int ret, embd_phy;
+ u8 addr[ETH_ALEN];
u32 phyid;
u16 rx_ctl;
@@ -754,12 +755,12 @@ static int sr9800_bind(struct usbnet *dev, struct usb_interface *intf)
}
/* Get the MAC address */
- ret = sr_read_cmd(dev, SR_CMD_READ_NODE_ID, 0, 0, ETH_ALEN,
- dev->net->dev_addr);
+ ret = sr_read_cmd(dev, SR_CMD_READ_NODE_ID, 0, 0, ETH_ALEN, addr);
if (ret < 0) {
netdev_dbg(dev->net, "Failed to read MAC address: %d\n", ret);
return ret;
}
+ eth_hw_addr_set(dev->net, addr);
netdev_dbg(dev->net, "mac addr : %pM\n", dev->net->dev_addr);
/* Initialize MII structure */
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 840c1c2ab16a..9a6450f796dc 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -165,12 +165,13 @@ EXPORT_SYMBOL_GPL(usbnet_get_endpoints);
int usbnet_get_ethernet_addr(struct usbnet *dev, int iMACAddress)
{
+ u8 addr[ETH_ALEN];
int tmp = -1, ret;
unsigned char buf [13];
ret = usb_string(dev->udev, iMACAddress, buf, sizeof buf);
if (ret == 12)
- tmp = hex2bin(dev->net->dev_addr, buf, 6);
+ tmp = hex2bin(addr, buf, 6);
if (tmp < 0) {
dev_dbg(&dev->udev->dev,
"bad MAC string %d fetch, %d\n", iMACAddress, tmp);
@@ -178,6 +179,7 @@ int usbnet_get_ethernet_addr(struct usbnet *dev, int iMACAddress)
ret = -EINVAL;
return ret;
}
+ eth_hw_addr_set(dev->net, addr);
return 0;
}
EXPORT_SYMBOL_GPL(usbnet_get_ethernet_addr);
@@ -1726,7 +1728,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
dev->net = net;
strscpy(net->name, "usb%d", sizeof(net->name));
- memcpy (net->dev_addr, node_id, sizeof node_id);
+ eth_hw_addr_set(net, node_id);
/* rx and tx sides can use different message sizes;
* bind() should set rx_urb_size in that case.
@@ -1788,6 +1790,11 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
if (!dev->rx_urb_size)
dev->rx_urb_size = dev->hard_mtu;
dev->maxpacket = usb_maxpacket (dev->udev, dev->out, 1);
+ if (dev->maxpacket == 0) {
+ /* that is a broken device */
+ status = -ENODEV;
+ goto out4;
+ }
/* let userspace know we have a random address */
if (ether_addr_equal(net->dev_addr, node_id))
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 4ad25a8b0870..1771d6e5224f 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -80,6 +80,7 @@ struct virtnet_sq_stats {
u64 xdp_tx;
u64 xdp_tx_drops;
u64 kicks;
+ u64 tx_timeouts;
};
struct virtnet_rq_stats {
@@ -103,6 +104,7 @@ static const struct virtnet_stat_desc virtnet_sq_stats_desc[] = {
{ "xdp_tx", VIRTNET_SQ_STAT(xdp_tx) },
{ "xdp_tx_drops", VIRTNET_SQ_STAT(xdp_tx_drops) },
{ "kicks", VIRTNET_SQ_STAT(kicks) },
+ { "tx_timeouts", VIRTNET_SQ_STAT(tx_timeouts) },
};
static const struct virtnet_stat_desc virtnet_rq_stats_desc[] = {
@@ -406,12 +408,13 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
* add_recvbuf_mergeable() + get_mergeable_buf_len()
*/
truesize = headroom ? PAGE_SIZE : truesize;
- tailroom = truesize - len - headroom - (hdr_padded_len - hdr_len);
+ tailroom = truesize - headroom;
buf = p - headroom;
len -= hdr_len;
offset += hdr_padded_len;
p += hdr_padded_len;
+ tailroom -= hdr_padded_len + len;
shinfo_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
@@ -732,6 +735,12 @@ static struct sk_buff *receive_small(struct net_device *dev,
dev->stats.rx_length_errors++;
goto err_len;
}
+
+ if (likely(!vi->xdp_enabled)) {
+ xdp_prog = NULL;
+ goto skip_xdp;
+ }
+
rcu_read_lock();
xdp_prog = rcu_dereference(rq->xdp_prog);
if (xdp_prog) {
@@ -814,6 +823,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
}
rcu_read_unlock();
+skip_xdp:
skb = build_skb(buf, buflen);
if (!skb) {
put_page(page);
@@ -895,6 +905,12 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
dev->stats.rx_length_errors++;
goto err_skb;
}
+
+ if (likely(!vi->xdp_enabled)) {
+ xdp_prog = NULL;
+ goto skip_xdp;
+ }
+
rcu_read_lock();
xdp_prog = rcu_dereference(rq->xdp_prog);
if (xdp_prog) {
@@ -1022,6 +1038,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
}
rcu_read_unlock();
+skip_xdp:
head_skb = page_to_skb(vi, rq, page, offset, len, truesize, !xdp_prog,
metasize, headroom);
curr_skb = head_skb;
@@ -1860,7 +1877,7 @@ static void virtnet_stats(struct net_device *dev,
int i;
for (i = 0; i < vi->max_queue_pairs; i++) {
- u64 tpackets, tbytes, rpackets, rbytes, rdrops;
+ u64 tpackets, tbytes, terrors, rpackets, rbytes, rdrops;
struct receive_queue *rq = &vi->rq[i];
struct send_queue *sq = &vi->sq[i];
@@ -1868,6 +1885,7 @@ static void virtnet_stats(struct net_device *dev,
start = u64_stats_fetch_begin_irq(&sq->stats.syncp);
tpackets = sq->stats.packets;
tbytes = sq->stats.bytes;
+ terrors = sq->stats.tx_timeouts;
} while (u64_stats_fetch_retry_irq(&sq->stats.syncp, start));
do {
@@ -1882,6 +1900,7 @@ static void virtnet_stats(struct net_device *dev,
tot->rx_bytes += rbytes;
tot->tx_bytes += tbytes;
tot->rx_dropped += rdrops;
+ tot->tx_errors += terrors;
}
tot->tx_dropped = dev->stats.tx_dropped;
@@ -2534,8 +2553,8 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
/* XDP requires extra queues for XDP_TX */
if (curr_qp + xdp_qp > vi->max_queue_pairs) {
- netdev_warn(dev, "XDP request %i queues but max is %i. XDP_TX and XDP_REDIRECT will operate in a slower locked tx mode.\n",
- curr_qp + xdp_qp, vi->max_queue_pairs);
+ netdev_warn_once(dev, "XDP request %i queues but max is %i. XDP_TX and XDP_REDIRECT will operate in a slower locked tx mode.\n",
+ curr_qp + xdp_qp, vi->max_queue_pairs);
xdp_qp = 0;
}
@@ -2663,6 +2682,21 @@ static int virtnet_set_features(struct net_device *dev,
return 0;
}
+static void virtnet_tx_timeout(struct net_device *dev, unsigned int txqueue)
+{
+ struct virtnet_info *priv = netdev_priv(dev);
+ struct send_queue *sq = &priv->sq[txqueue];
+ struct netdev_queue *txq = netdev_get_tx_queue(dev, txqueue);
+
+ u64_stats_update_begin(&sq->stats.syncp);
+ sq->stats.tx_timeouts++;
+ u64_stats_update_end(&sq->stats.syncp);
+
+ netdev_err(dev, "TX timeout on queue: %u, sq: %s, vq: 0x%x, name: %s, %u usecs ago\n",
+ txqueue, sq->name, sq->vq->index, sq->vq->name,
+ jiffies_to_usecs(jiffies - txq->trans_start));
+}
+
static const struct net_device_ops virtnet_netdev = {
.ndo_open = virtnet_open,
.ndo_stop = virtnet_close,
@@ -2678,6 +2712,7 @@ static const struct net_device_ops virtnet_netdev = {
.ndo_features_check = passthru_features_check,
.ndo_get_phys_port_name = virtnet_get_phys_port_name,
.ndo_set_features = virtnet_set_features,
+ .ndo_tx_timeout = virtnet_tx_timeout,
};
static void virtnet_config_changed_work(struct work_struct *work)
@@ -3143,12 +3178,16 @@ static int virtnet_probe(struct virtio_device *vdev)
dev->max_mtu = MAX_MTU;
/* Configuration may specify what MAC to use. Otherwise random. */
- if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC))
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
+ u8 addr[ETH_ALEN];
+
virtio_cread_bytes(vdev,
offsetof(struct virtio_net_config, mac),
- dev->dev_addr, dev->addr_len);
- else
+ addr, ETH_ALEN);
+ eth_hw_addr_set(dev, addr);
+ } else {
eth_hw_addr_random(dev);
+ }
/* Set up our device-specific information */
vi = netdev_priv(dev);
@@ -3384,6 +3423,7 @@ static struct virtio_driver virtio_net_driver = {
.feature_table_size = ARRAY_SIZE(features),
.feature_table_legacy = features_legacy,
.feature_table_size_legacy = ARRAY_SIZE(features_legacy),
+ .suppress_used_validation = true,
.driver.name = KBUILD_MODNAME,
.driver.owner = THIS_MODULE,
.id_table = id_table,
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 142f70670f5c..14fae317bc70 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -46,7 +46,7 @@ MODULE_DEVICE_TABLE(pci, vmxnet3_pciid_table);
static int enable_mq = 1;
static void
-vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac);
+vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, const u8 *mac);
/*
* Enable/Disable the given intr
@@ -2806,7 +2806,7 @@ vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter)
static void
-vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
+vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, const u8 *mac)
{
u32 tmp;
@@ -2824,7 +2824,7 @@ vmxnet3_set_mac_addr(struct net_device *netdev, void *p)
struct sockaddr *addr = p;
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ dev_addr_set(netdev, addr->sa_data);
vmxnet3_write_mac_addr(adapter, addr->sa_data);
return 0;
@@ -3638,7 +3638,7 @@ vmxnet3_probe_device(struct pci_dev *pdev,
#endif
vmxnet3_read_mac_addr(adapter, mac);
- memcpy(netdev->dev_addr, mac, netdev->addr_len);
+ dev_addr_set(netdev, mac);
netdev->netdev_ops = &vmxnet3_netdev_ops;
vmxnet3_set_ethtool_ops(netdev);
@@ -3833,7 +3833,6 @@ vmxnet3_suspend(struct device *device)
vmxnet3_free_intr_resources(adapter);
netif_device_detach(netdev);
- netif_tx_stop_all_queues(netdev);
/* Create wake-up filters. */
pmConf = adapter->pm_conf;
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index 5dd8360b21a0..16f3a2057b90 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -1134,9 +1134,8 @@ static int vmxnet3_set_coalesce(struct net_device *netdev,
}
if (ec->use_adaptive_rx_coalesce != 0) {
- if ((ec->rx_coalesce_usecs != 0) ||
- (ec->tx_max_coalesced_frames != 0) ||
- (ec->rx_max_coalesced_frames != 0)) {
+ if (ec->tx_max_coalesced_frames != 0 ||
+ ec->rx_max_coalesced_frames != 0) {
return -EINVAL;
}
memset(adapter->coal_conf, 0, sizeof(*adapter->coal_conf));
@@ -1146,11 +1145,6 @@ static int vmxnet3_set_coalesce(struct net_device *netdev,
if ((ec->tx_max_coalesced_frames != 0) ||
(ec->rx_max_coalesced_frames != 0)) {
- if ((ec->rx_coalesce_usecs != 0) ||
- (ec->use_adaptive_rx_coalesce != 0)) {
- return -EINVAL;
- }
-
if ((ec->tx_max_coalesced_frames >
VMXNET3_COAL_STATIC_MAX_DEPTH) ||
(ec->rx_max_coalesced_frames >
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index bf2fac913942..ccf677015d5b 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -35,6 +35,7 @@
#include <net/l3mdev.h>
#include <net/fib_rules.h>
#include <net/netns/generic.h>
+#include <net/netfilter/nf_conntrack.h>
#define DRV_NAME "vrf"
#define DRV_VERSION "1.1"
@@ -424,12 +425,26 @@ static int vrf_local_xmit(struct sk_buff *skb, struct net_device *dev,
return NETDEV_TX_OK;
}
+static void vrf_nf_set_untracked(struct sk_buff *skb)
+{
+ if (skb_get_nfct(skb) == 0)
+ nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
+}
+
+static void vrf_nf_reset_ct(struct sk_buff *skb)
+{
+ if (skb_get_nfct(skb) == IP_CT_UNTRACKED)
+ nf_reset_ct(skb);
+}
+
#if IS_ENABLED(CONFIG_IPV6)
static int vrf_ip6_local_out(struct net *net, struct sock *sk,
struct sk_buff *skb)
{
int err;
+ vrf_nf_reset_ct(skb);
+
err = nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net,
sk, skb, NULL, skb_dst(skb)->dev, dst_output);
@@ -508,6 +523,8 @@ static int vrf_ip_local_out(struct net *net, struct sock *sk,
{
int err;
+ vrf_nf_reset_ct(skb);
+
err = nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, net, sk,
skb, NULL, skb_dst(skb)->dev, dst_output);
if (likely(err == 1))
@@ -626,8 +643,7 @@ static void vrf_finish_direct(struct sk_buff *skb)
skb_pull(skb, ETH_HLEN);
}
- /* reset skb device */
- nf_reset_ct(skb);
+ vrf_nf_reset_ct(skb);
}
#if IS_ENABLED(CONFIG_IPV6)
@@ -641,7 +657,7 @@ static int vrf_finish_output6(struct net *net, struct sock *sk,
struct neighbour *neigh;
int ret;
- nf_reset_ct(skb);
+ vrf_nf_reset_ct(skb);
skb->protocol = htons(ETH_P_IPV6);
skb->dev = dev;
@@ -752,6 +768,8 @@ static struct sk_buff *vrf_ip6_out_direct(struct net_device *vrf_dev,
skb->dev = vrf_dev;
+ vrf_nf_set_untracked(skb);
+
err = nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk,
skb, NULL, vrf_dev, vrf_ip6_out_direct_finish);
@@ -858,7 +876,7 @@ static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *s
struct neighbour *neigh;
bool is_v6gw = false;
- nf_reset_ct(skb);
+ vrf_nf_reset_ct(skb);
/* Be paranoid, rather than too clever. */
if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
@@ -980,6 +998,8 @@ static struct sk_buff *vrf_ip_out_direct(struct net_device *vrf_dev,
skb->dev = vrf_dev;
+ vrf_nf_set_untracked(skb);
+
err = nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, net, sk,
skb, NULL, vrf_dev, vrf_ip_out_direct_finish);
@@ -1360,8 +1380,6 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
bool need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr);
bool is_ndisc = ipv6_ndisc_frame(skb);
- nf_reset_ct(skb);
-
/* loopback, multicast & non-ND link-local traffic; do not push through
* packet taps again. Reset pkt_type for upper layers to process skb.
* For strict packets with a source LLA, determine the dst using the
@@ -1424,8 +1442,6 @@ static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev,
skb->skb_iif = vrf_dev->ifindex;
IPCB(skb)->flags |= IPSKB_L3SLAVE;
- nf_reset_ct(skb);
-
if (ipv4_is_multicast(ip_hdr(skb)->daddr))
goto out;
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
index 7637edce443e..81e72bc1891f 100644
--- a/drivers/net/wan/hdlc_fr.c
+++ b/drivers/net/wan/hdlc_fr.c
@@ -1093,7 +1093,9 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
eth_hw_addr_random(dev);
} else {
- *(__be16 *)dev->dev_addr = htons(dlci);
+ __be16 addr = htons(dlci);
+
+ dev_addr_set(dev, (u8 *)&addr);
dlci_to_q922(dev->broadcast, dlci);
}
dev->netdev_ops = &pvc_ops;
diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c
index 89d31adc3809..282192b82404 100644
--- a/drivers/net/wan/lapbether.c
+++ b/drivers/net/wan/lapbether.c
@@ -301,7 +301,7 @@ static int lapbeth_set_mac_address(struct net_device *dev, void *addr)
{
struct sockaddr *sa = addr;
- memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
+ dev_addr_set(dev, sa->sa_data);
return 0;
}
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
index 49cc4b7ed516..0e9bad33fac8 100644
--- a/drivers/net/wireless/ath/ar5523/ar5523.c
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -1772,9 +1772,8 @@ static const struct usb_device_id ar5523_id_table[] = {
AR5523_DEVICE_UG(0x0846, 0x5f00), /* Netgear / WPN111 */
AR5523_DEVICE_UG(0x083a, 0x4506), /* SMC / EZ Connect
SMCWUSBT-G2 */
- AR5523_DEVICE_UG(0x157e, 0x3006), /* Umedia / AR5523_1 */
+ AR5523_DEVICE_UG(0x157e, 0x3006), /* Umedia / AR5523_1, TEW444UBEU*/
AR5523_DEVICE_UX(0x157e, 0x3205), /* Umedia / AR5523_2 */
- AR5523_DEVICE_UG(0x157e, 0x3006), /* Umedia / TEW444UBEU */
AR5523_DEVICE_UG(0x1435, 0x0826), /* Wistronneweb / AR5523_1 */
AR5523_DEVICE_UX(0x1435, 0x0828), /* Wistronneweb / AR5523_2 */
AR5523_DEVICE_UG(0x0cde, 0x0012), /* Zcom / AR5523 */
diff --git a/drivers/net/wireless/ath/ath10k/bmi.h b/drivers/net/wireless/ath/ath10k/bmi.h
index f6fadcbdd86e..0685c0d2d4ea 100644
--- a/drivers/net/wireless/ath/ath10k/bmi.h
+++ b/drivers/net/wireless/ath/ath10k/bmi.h
@@ -109,7 +109,7 @@ struct bmi_cmd {
struct {
__le32 addr;
__le32 len;
- u8 payload[0];
+ u8 payload[];
} write_mem;
struct {
__le32 addr;
@@ -138,18 +138,18 @@ struct bmi_cmd {
} rompatch_uninstall;
struct {
__le32 count;
- __le32 patch_ids[0]; /* length of @count */
+ __le32 patch_ids[]; /* length of @count */
} rompatch_activate;
struct {
__le32 count;
- __le32 patch_ids[0]; /* length of @count */
+ __le32 patch_ids[]; /* length of @count */
} rompatch_deactivate;
struct {
__le32 addr;
} lz_start;
struct {
__le32 len; /* max BMI_MAX_DATA_SIZE */
- u8 payload[0]; /* length of @len */
+ u8 payload[]; /* length of @len */
} lz_data;
struct {
u8 name[BMI_NVRAM_SEG_NAME_SZ];
@@ -160,7 +160,7 @@ struct bmi_cmd {
union bmi_resp {
struct {
- u8 payload[0];
+ DECLARE_FLEX_ARRAY(u8, payload);
} read_mem;
struct {
__le32 result;
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 2f9be182fbfb..5935e0973d14 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -2690,9 +2690,16 @@ static int ath10k_core_copy_target_iram(struct ath10k *ar)
int i, ret;
u32 len, remaining_len;
- hw_mem = ath10k_coredump_get_mem_layout(ar);
+ /* copy target iram feature must work also when
+ * ATH10K_FW_CRASH_DUMP_RAM_DATA is disabled, so
+ * _ath10k_coredump_get_mem_layout() to accomplist that
+ */
+ hw_mem = _ath10k_coredump_get_mem_layout(ar);
if (!hw_mem)
- return -ENOMEM;
+ /* if CONFIG_DEV_COREDUMP is disabled we get NULL, then
+ * just silently disable the feature by doing nothing
+ */
+ return 0;
for (i = 0; i < hw_mem->region_table.size; i++) {
tmp = &hw_mem->region_table.regions[i];
@@ -3224,7 +3231,7 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
ath10k_debug_print_board_info(ar);
}
- device_get_mac_address(ar->dev, ar->mac_addr, sizeof(ar->mac_addr));
+ device_get_mac_address(ar->dev, ar->mac_addr);
ret = ath10k_core_init_firmware_features(ar);
if (ret) {
@@ -3520,13 +3527,10 @@ EXPORT_SYMBOL(ath10k_core_create);
void ath10k_core_destroy(struct ath10k *ar)
{
- flush_workqueue(ar->workqueue);
destroy_workqueue(ar->workqueue);
- flush_workqueue(ar->workqueue_aux);
destroy_workqueue(ar->workqueue_aux);
- flush_workqueue(ar->workqueue_tx_complete);
destroy_workqueue(ar->workqueue_tx_complete);
ath10k_debug_destroy(ar);
diff --git a/drivers/net/wireless/ath/ath10k/coredump.c b/drivers/net/wireless/ath/ath10k/coredump.c
index 7eb72290a925..55e7e11d06d9 100644
--- a/drivers/net/wireless/ath/ath10k/coredump.c
+++ b/drivers/net/wireless/ath/ath10k/coredump.c
@@ -1447,11 +1447,17 @@ static u32 ath10k_coredump_get_ramdump_size(struct ath10k *ar)
const struct ath10k_hw_mem_layout *ath10k_coredump_get_mem_layout(struct ath10k *ar)
{
- int i;
-
if (!test_bit(ATH10K_FW_CRASH_DUMP_RAM_DATA, &ath10k_coredump_mask))
return NULL;
+ return _ath10k_coredump_get_mem_layout(ar);
+}
+EXPORT_SYMBOL(ath10k_coredump_get_mem_layout);
+
+const struct ath10k_hw_mem_layout *_ath10k_coredump_get_mem_layout(struct ath10k *ar)
+{
+ int i;
+
if (WARN_ON(ar->target_version == 0))
return NULL;
@@ -1464,7 +1470,6 @@ const struct ath10k_hw_mem_layout *ath10k_coredump_get_mem_layout(struct ath10k
return NULL;
}
-EXPORT_SYMBOL(ath10k_coredump_get_mem_layout);
struct ath10k_fw_crash_data *ath10k_coredump_new(struct ath10k *ar)
{
diff --git a/drivers/net/wireless/ath/ath10k/coredump.h b/drivers/net/wireless/ath/ath10k/coredump.h
index 42404e246e0e..240d70515088 100644
--- a/drivers/net/wireless/ath/ath10k/coredump.h
+++ b/drivers/net/wireless/ath/ath10k/coredump.h
@@ -176,6 +176,7 @@ int ath10k_coredump_register(struct ath10k *ar);
void ath10k_coredump_unregister(struct ath10k *ar);
void ath10k_coredump_destroy(struct ath10k *ar);
+const struct ath10k_hw_mem_layout *_ath10k_coredump_get_mem_layout(struct ath10k *ar);
const struct ath10k_hw_mem_layout *ath10k_coredump_get_mem_layout(struct ath10k *ar);
#else /* CONFIG_DEV_COREDUMP */
@@ -214,6 +215,12 @@ ath10k_coredump_get_mem_layout(struct ath10k *ar)
return NULL;
}
+static inline const struct ath10k_hw_mem_layout *
+_ath10k_coredump_get_mem_layout(struct ath10k *ar)
+{
+ return NULL;
+}
+
#endif /* CONFIG_DEV_COREDUMP */
#endif /* _COREDUMP_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/htt.h b/drivers/net/wireless/ath/ath10k/htt.h
index ec689e3ce48a..a6de08d3bf4a 100644
--- a/drivers/net/wireless/ath/ath10k/htt.h
+++ b/drivers/net/wireless/ath/ath10k/htt.h
@@ -1674,8 +1674,11 @@ struct htt_tx_fetch_ind {
__le32 token;
__le16 num_resp_ids;
__le16 num_records;
- __le32 resp_ids[0]; /* ath10k_htt_get_tx_fetch_ind_resp_ids() */
- struct htt_tx_fetch_record records[];
+ union {
+ /* ath10k_htt_get_tx_fetch_ind_resp_ids() */
+ DECLARE_FLEX_ARRAY(__le32, resp_ids);
+ DECLARE_FLEX_ARRAY(struct htt_tx_fetch_record, records);
+ };
} __packed;
static inline void *
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index c272b290fa73..1f73fbfee0c0 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -993,8 +993,12 @@ static void ath10k_mac_vif_beacon_cleanup(struct ath10k_vif *arvif)
ath10k_mac_vif_beacon_free(arvif);
if (arvif->beacon_buf) {
- dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN,
- arvif->beacon_buf, arvif->beacon_paddr);
+ if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
+ kfree(arvif->beacon_buf);
+ else
+ dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN,
+ arvif->beacon_buf,
+ arvif->beacon_paddr);
arvif->beacon_buf = NULL;
}
}
@@ -1048,7 +1052,7 @@ static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id)
arg.channel.min_power = 0;
arg.channel.max_power = channel->max_power * 2;
arg.channel.max_reg_power = channel->max_reg_power * 2;
- arg.channel.max_antenna_gain = channel->max_antenna_gain * 2;
+ arg.channel.max_antenna_gain = channel->max_antenna_gain;
reinit_completion(&ar->vdev_setup_done);
reinit_completion(&ar->vdev_delete_done);
@@ -1494,7 +1498,7 @@ static int ath10k_vdev_start_restart(struct ath10k_vif *arvif,
arg.channel.min_power = 0;
arg.channel.max_power = chandef->chan->max_power * 2;
arg.channel.max_reg_power = chandef->chan->max_reg_power * 2;
- arg.channel.max_antenna_gain = chandef->chan->max_antenna_gain * 2;
+ arg.channel.max_antenna_gain = chandef->chan->max_antenna_gain;
if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
arg.ssid = arvif->u.ap.ssid;
@@ -3422,7 +3426,7 @@ static int ath10k_update_channel_list(struct ath10k *ar)
ch->min_power = 0;
ch->max_power = channel->max_power * 2;
ch->max_reg_power = channel->max_reg_power * 2;
- ch->max_antenna_gain = channel->max_antenna_gain * 2;
+ ch->max_antenna_gain = channel->max_antenna_gain;
ch->reg_class_id = 0; /* FIXME */
/* FIXME: why use only legacy modes, why not any
@@ -5576,10 +5580,25 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
if (vif->type == NL80211_IFTYPE_ADHOC ||
vif->type == NL80211_IFTYPE_MESH_POINT ||
vif->type == NL80211_IFTYPE_AP) {
- arvif->beacon_buf = dma_alloc_coherent(ar->dev,
- IEEE80211_MAX_FRAME_LEN,
- &arvif->beacon_paddr,
- GFP_ATOMIC);
+ if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) {
+ arvif->beacon_buf = kmalloc(IEEE80211_MAX_FRAME_LEN,
+ GFP_KERNEL);
+
+ /* Using a kernel pointer in place of a dma_addr_t
+ * token can lead to undefined behavior if that
+ * makes it into cache management functions. Use a
+ * known-invalid address token instead, which
+ * avoids the warning and makes it easier to catch
+ * bugs if it does end up getting used.
+ */
+ arvif->beacon_paddr = DMA_MAPPING_ERROR;
+ } else {
+ arvif->beacon_buf =
+ dma_alloc_coherent(ar->dev,
+ IEEE80211_MAX_FRAME_LEN,
+ &arvif->beacon_paddr,
+ GFP_ATOMIC);
+ }
if (!arvif->beacon_buf) {
ret = -ENOMEM;
ath10k_warn(ar, "failed to allocate beacon buffer: %d\n",
@@ -5794,8 +5813,12 @@ err_vdev_delete:
err:
if (arvif->beacon_buf) {
- dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN,
- arvif->beacon_buf, arvif->beacon_paddr);
+ if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
+ kfree(arvif->beacon_buf);
+ else
+ dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN,
+ arvif->beacon_buf,
+ arvif->beacon_paddr);
arvif->beacon_buf = NULL;
}
diff --git a/drivers/net/wireless/ath/ath10k/qmi.c b/drivers/net/wireless/ath/ath10k/qmi.c
index 07e478f9a808..80fcb917fe4e 100644
--- a/drivers/net/wireless/ath/ath10k/qmi.c
+++ b/drivers/net/wireless/ath/ath10k/qmi.c
@@ -864,7 +864,8 @@ static void ath10k_qmi_event_server_exit(struct ath10k_qmi *qmi)
ath10k_qmi_remove_msa_permission(qmi);
ath10k_core_free_board_files(ar);
- if (!test_bit(ATH10K_SNOC_FLAG_UNREGISTERING, &ar_snoc->flags))
+ if (!test_bit(ATH10K_SNOC_FLAG_UNREGISTERING, &ar_snoc->flags) &&
+ !test_bit(ATH10K_SNOC_FLAG_MODEM_STOPPED, &ar_snoc->flags))
ath10k_snoc_fw_crashed_dump(ar);
ath10k_snoc_fw_indication(ar, ATH10K_QMI_EVENT_FW_DOWN_IND);
diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c
index b746052737e0..63e1c2d783c5 100644
--- a/drivers/net/wireless/ath/ath10k/sdio.c
+++ b/drivers/net/wireless/ath/ath10k/sdio.c
@@ -1363,8 +1363,11 @@ static void ath10k_rx_indication_async_work(struct work_struct *work)
ep->ep_ops.ep_rx_complete(ar, skb);
}
- if (test_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags))
+ if (test_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags)) {
+ local_bh_disable();
napi_schedule(&ar->napi);
+ local_bh_enable();
+ }
}
static int ath10k_sdio_read_rtc_state(struct ath10k_sdio *ar_sdio, unsigned char *state)
@@ -2647,7 +2650,6 @@ static void ath10k_sdio_remove(struct sdio_func *func)
ath10k_core_destroy(ar);
- flush_workqueue(ar_sdio->workqueue);
destroy_workqueue(ar_sdio->workqueue);
}
diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
index ea00fbb15601..9513ab696fff 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.c
+++ b/drivers/net/wireless/ath/ath10k/snoc.c
@@ -12,6 +12,7 @@
#include <linux/platform_device.h>
#include <linux/property.h>
#include <linux/regulator/consumer.h>
+#include <linux/remoteproc/qcom_rproc.h>
#include <linux/of_address.h>
#include <linux/iommu.h>
@@ -1477,6 +1478,74 @@ void ath10k_snoc_fw_crashed_dump(struct ath10k *ar)
mutex_unlock(&ar->dump_mutex);
}
+static int ath10k_snoc_modem_notify(struct notifier_block *nb, unsigned long action,
+ void *data)
+{
+ struct ath10k_snoc *ar_snoc = container_of(nb, struct ath10k_snoc, nb);
+ struct ath10k *ar = ar_snoc->ar;
+ struct qcom_ssr_notify_data *notify_data = data;
+
+ switch (action) {
+ case QCOM_SSR_BEFORE_POWERUP:
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "received modem starting event\n");
+ clear_bit(ATH10K_SNOC_FLAG_MODEM_STOPPED, &ar_snoc->flags);
+ break;
+
+ case QCOM_SSR_AFTER_POWERUP:
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "received modem running event\n");
+ break;
+
+ case QCOM_SSR_BEFORE_SHUTDOWN:
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "received modem %s event\n",
+ notify_data->crashed ? "crashed" : "stopping");
+ if (!notify_data->crashed)
+ set_bit(ATH10K_SNOC_FLAG_MODEM_STOPPED, &ar_snoc->flags);
+ else
+ clear_bit(ATH10K_SNOC_FLAG_MODEM_STOPPED, &ar_snoc->flags);
+ break;
+
+ case QCOM_SSR_AFTER_SHUTDOWN:
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "received modem offline event\n");
+ break;
+
+ default:
+ ath10k_err(ar, "received unrecognized event %lu\n", action);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static int ath10k_modem_init(struct ath10k *ar)
+{
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+ void *notifier;
+ int ret;
+
+ ar_snoc->nb.notifier_call = ath10k_snoc_modem_notify;
+
+ notifier = qcom_register_ssr_notifier("mpss", &ar_snoc->nb);
+ if (IS_ERR(notifier)) {
+ ret = PTR_ERR(notifier);
+ ath10k_err(ar, "failed to initialize modem notifier: %d\n", ret);
+ return ret;
+ }
+
+ ar_snoc->notifier = notifier;
+
+ return 0;
+}
+
+static void ath10k_modem_deinit(struct ath10k *ar)
+{
+ int ret;
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+
+ ret = qcom_unregister_ssr_notifier(ar_snoc->notifier, &ar_snoc->nb);
+ if (ret)
+ ath10k_err(ar, "error %d unregistering notifier\n", ret);
+}
+
static int ath10k_setup_msa_resources(struct ath10k *ar, u32 msa_size)
{
struct device *dev = ar->dev;
@@ -1740,10 +1809,17 @@ static int ath10k_snoc_probe(struct platform_device *pdev)
goto err_fw_deinit;
}
+ ret = ath10k_modem_init(ar);
+ if (ret)
+ goto err_qmi_deinit;
+
ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc probe\n");
return 0;
+err_qmi_deinit:
+ ath10k_qmi_deinit(ar);
+
err_fw_deinit:
ath10k_fw_deinit(ar);
@@ -1771,6 +1847,7 @@ static int ath10k_snoc_free_resources(struct ath10k *ar)
ath10k_fw_deinit(ar);
ath10k_snoc_free_irq(ar);
ath10k_snoc_release_resource(ar);
+ ath10k_modem_deinit(ar);
ath10k_qmi_deinit(ar);
ath10k_core_destroy(ar);
diff --git a/drivers/net/wireless/ath/ath10k/snoc.h b/drivers/net/wireless/ath/ath10k/snoc.h
index 5095d1893681..d4bce1707696 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.h
+++ b/drivers/net/wireless/ath/ath10k/snoc.h
@@ -6,6 +6,8 @@
#ifndef _SNOC_H_
#define _SNOC_H_
+#include <linux/notifier.h>
+
#include "hw.h"
#include "ce.h"
#include "qmi.h"
@@ -45,6 +47,7 @@ struct ath10k_snoc_ce_irq {
enum ath10k_snoc_flags {
ATH10K_SNOC_FLAG_REGISTERED,
ATH10K_SNOC_FLAG_UNREGISTERING,
+ ATH10K_SNOC_FLAG_MODEM_STOPPED,
ATH10K_SNOC_FLAG_RECOVERY,
ATH10K_SNOC_FLAG_8BIT_HOST_CAP_QUIRK,
};
@@ -75,6 +78,8 @@ struct ath10k_snoc {
struct clk_bulk_data *clks;
size_t num_clks;
struct ath10k_qmi *qmi;
+ struct notifier_block nb;
+ void *notifier;
unsigned long flags;
bool xo_cal_supported;
u32 xo_cal_data;
diff --git a/drivers/net/wireless/ath/ath10k/usb.c b/drivers/net/wireless/ath/ath10k/usb.c
index 19b9c27e30e2..3d98f19c6ec8 100644
--- a/drivers/net/wireless/ath/ath10k/usb.c
+++ b/drivers/net/wireless/ath/ath10k/usb.c
@@ -525,7 +525,7 @@ static int ath10k_usb_submit_ctrl_in(struct ath10k *ar,
req,
USB_DIR_IN | USB_TYPE_VENDOR |
USB_RECIP_DEVICE, value, index, buf,
- size, 2 * HZ);
+ size, 2000);
if (ret < 0) {
ath10k_warn(ar, "Failed to read usb control message: %d\n",
@@ -853,6 +853,11 @@ static int ath10k_usb_setup_pipe_resources(struct ath10k *ar,
le16_to_cpu(endpoint->wMaxPacketSize),
endpoint->bInterval);
}
+
+ /* Ignore broken descriptors. */
+ if (usb_endpoint_maxp(endpoint) == 0)
+ continue;
+
urbcount = 0;
pipe_num =
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index b8a4bbfe10b8..7c1c2658cb5f 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -2610,6 +2610,10 @@ int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
if (ieee80211_is_beacon(hdr->frame_control))
ath10k_mac_handle_beacon(ar, skb);
+ if (ieee80211_is_beacon(hdr->frame_control) ||
+ ieee80211_is_probe_resp(hdr->frame_control))
+ status->boottime_ns = ktime_get_boottime_ns();
+
ath10k_dbg(ar, ATH10K_DBG_MGMT,
"event mgmt rx skb %pK len %d ftype %02x stype %02x\n",
skb, skb->len,
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index 41c1a3d339c2..01bfd09a9d88 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -2066,7 +2066,9 @@ struct wmi_channel {
union {
__le32 reginfo1;
struct {
+ /* note: power unit is 1 dBm */
u8 antenna_max;
+ /* note: power unit is 0.5 dBm */
u8 max_tx_power;
} __packed;
} __packed;
@@ -2086,6 +2088,7 @@ struct wmi_channel_arg {
u32 min_power;
u32 max_power;
u32 max_reg_power;
+ /* note: power unit is 1 dBm */
u32 max_antenna_gain;
u32 reg_class_id;
enum wmi_phy_mode mode;
diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c
index 969bf1a590d9..b5a2af3ffc3e 100644
--- a/drivers/net/wireless/ath/ath11k/core.c
+++ b/drivers/net/wireless/ath/ath11k/core.c
@@ -37,7 +37,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.fw = {
.dir = "IPQ8074/hw2.0",
.board_size = 256 * 1024,
- .cal_size = 256 * 1024,
+ .cal_offset = 128 * 1024,
},
.max_radios = 3,
.bdf_addr = 0x4B0C0000,
@@ -58,8 +58,17 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.rx_mac_buf_ring = false,
.vdev_start_delay = false,
.htt_peer_map_v2 = true,
- .tcl_0_only = false,
- .spectral_fft_sz = 2,
+
+ .spectral = {
+ .fft_sz = 2,
+ /* HW bug, expected BIN size is 2 bytes but HW report as 4 bytes.
+ * so added pad size as 2 bytes to compensate the BIN size
+ */
+ .fft_pad_sz = 2,
+ .summary_pad_sz = 0,
+ .fft_hdr_len = 16,
+ .max_fft_bins = 512,
+ },
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP) |
@@ -71,6 +80,8 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.supports_suspend = false,
.hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074),
.fix_l1ss = true,
+ .max_tx_ring = DP_TCL_NUM_RING_MAX,
+ .hal_params = &ath11k_hw_hal_params_ipq8074,
},
{
.hw_rev = ATH11K_HW_IPQ6018_HW10,
@@ -78,7 +89,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.fw = {
.dir = "IPQ6018/hw1.0",
.board_size = 256 * 1024,
- .cal_size = 256 * 1024,
+ .cal_offset = 128 * 1024,
},
.max_radios = 2,
.bdf_addr = 0x4ABC0000,
@@ -99,8 +110,14 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.rx_mac_buf_ring = false,
.vdev_start_delay = false,
.htt_peer_map_v2 = true,
- .tcl_0_only = false,
- .spectral_fft_sz = 4,
+
+ .spectral = {
+ .fft_sz = 4,
+ .fft_pad_sz = 0,
+ .summary_pad_sz = 0,
+ .fft_hdr_len = 16,
+ .max_fft_bins = 512,
+ },
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP) |
@@ -112,6 +129,8 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.supports_suspend = false,
.hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074),
.fix_l1ss = true,
+ .max_tx_ring = DP_TCL_NUM_RING_MAX,
+ .hal_params = &ath11k_hw_hal_params_ipq8074,
},
{
.name = "qca6390 hw2.0",
@@ -119,7 +138,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.fw = {
.dir = "QCA6390/hw2.0",
.board_size = 256 * 1024,
- .cal_size = 256 * 1024,
+ .cal_offset = 128 * 1024,
},
.max_radios = 3,
.bdf_addr = 0x4B0C0000,
@@ -140,8 +159,14 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.rx_mac_buf_ring = true,
.vdev_start_delay = true,
.htt_peer_map_v2 = false,
- .tcl_0_only = true,
- .spectral_fft_sz = 0,
+
+ .spectral = {
+ .fft_sz = 0,
+ .fft_pad_sz = 0,
+ .summary_pad_sz = 0,
+ .fft_hdr_len = 0,
+ .max_fft_bins = 0,
+ },
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP),
@@ -152,6 +177,8 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.supports_suspend = true,
.hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074),
.fix_l1ss = true,
+ .max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390,
+ .hal_params = &ath11k_hw_hal_params_qca6390,
},
{
.name = "qcn9074 hw1.0",
@@ -159,7 +186,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.fw = {
.dir = "QCN9074/hw1.0",
.board_size = 256 * 1024,
- .cal_size = 256 * 1024,
+ .cal_offset = 128 * 1024,
},
.max_radios = 1,
.single_pdev_only = false,
@@ -179,7 +206,15 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.rx_mac_buf_ring = false,
.vdev_start_delay = false,
.htt_peer_map_v2 = true,
- .tcl_0_only = false,
+
+ .spectral = {
+ .fft_sz = 2,
+ .fft_pad_sz = 0,
+ .summary_pad_sz = 16,
+ .fft_hdr_len = 24,
+ .max_fft_bins = 1024,
+ },
+
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_MESH_POINT),
@@ -190,6 +225,8 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.supports_suspend = false,
.hal_desc_sz = sizeof(struct hal_rx_desc_qcn9074),
.fix_l1ss = true,
+ .max_tx_ring = DP_TCL_NUM_RING_MAX,
+ .hal_params = &ath11k_hw_hal_params_ipq8074,
},
{
.name = "wcn6855 hw2.0",
@@ -197,7 +234,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.fw = {
.dir = "WCN6855/hw2.0",
.board_size = 256 * 1024,
- .cal_size = 256 * 1024,
+ .cal_offset = 128 * 1024,
},
.max_radios = 3,
.bdf_addr = 0x4B0C0000,
@@ -218,8 +255,14 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.rx_mac_buf_ring = true,
.vdev_start_delay = true,
.htt_peer_map_v2 = false,
- .tcl_0_only = true,
- .spectral_fft_sz = 0,
+
+ .spectral = {
+ .fft_sz = 0,
+ .fft_pad_sz = 0,
+ .summary_pad_sz = 0,
+ .fft_hdr_len = 0,
+ .max_fft_bins = 0,
+ },
.interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP),
@@ -230,6 +273,8 @@ static const struct ath11k_hw_params ath11k_hw_params[] = {
.supports_suspend = true,
.hal_desc_sz = sizeof(struct hal_rx_desc_wcn6855),
.fix_l1ss = false,
+ .max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390,
+ .hal_params = &ath11k_hw_hal_params_qca6390,
},
};
diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h
index 018fb2385f2a..31d234a51c79 100644
--- a/drivers/net/wireless/ath/ath11k/core.h
+++ b/drivers/net/wireless/ath/ath11k/core.h
@@ -93,6 +93,8 @@ struct ath11k_skb_rxcb {
bool is_first_msdu;
bool is_last_msdu;
bool is_continuation;
+ bool is_mcbc;
+ bool is_eapol;
struct hal_rx_desc *rx_desc;
u8 err_rel_src;
u8 err_code;
@@ -100,6 +102,8 @@ struct ath11k_skb_rxcb {
u8 unmapped;
u8 is_frag;
u8 tid;
+ u16 peer_id;
+ u16 seq_no;
};
enum ath11k_hw_rev {
@@ -193,7 +197,9 @@ enum ath11k_dev_flags {
};
enum ath11k_monitor_flags {
- ATH11K_FLAG_MONITOR_ENABLED,
+ ATH11K_FLAG_MONITOR_CONF_ENABLED,
+ ATH11K_FLAG_MONITOR_STARTED,
+ ATH11K_FLAG_MONITOR_VDEV_CREATED,
};
struct ath11k_vif {
@@ -362,6 +368,7 @@ struct ath11k_sta {
enum hal_pn_type pn_type;
struct work_struct update_wk;
+ struct work_struct set_4addr_wk;
struct rate_info txrate;
struct rate_info last_txrate;
u64 rx_duration;
@@ -374,12 +381,15 @@ struct ath11k_sta {
/* protected by conf_mutex */
bool aggr_mode;
#endif
+
+ bool use_4addr_set;
+ u16 tcl_metadata;
};
#define ATH11K_MIN_5G_FREQ 4150
-#define ATH11K_MIN_6G_FREQ 5945
+#define ATH11K_MIN_6G_FREQ 5925
#define ATH11K_MAX_6G_FREQ 7115
-#define ATH11K_NUM_CHANS 100
+#define ATH11K_NUM_CHANS 101
#define ATH11K_MAX_5G_CHAN 173
enum ath11k_state {
@@ -484,7 +494,6 @@ struct ath11k {
u32 chan_tx_pwr;
u32 num_stations;
u32 max_num_stations;
- bool monitor_present;
/* To synchronize concurrent synchronous mac80211 callback operations,
* concurrent debugfs configuration and concurrent FW statistics events.
*/
@@ -559,6 +568,7 @@ struct ath11k {
struct ath11k_per_peer_tx_stats cached_stats;
u32 last_ppdu_id;
u32 cached_ppdu_id;
+ int monitor_vdev_id;
#ifdef CONFIG_ATH11K_DEBUGFS
struct ath11k_debug debug;
#endif
@@ -591,6 +601,8 @@ struct ath11k_pdev_cap {
u32 tx_chain_mask_shift;
u32 rx_chain_mask_shift;
struct ath11k_band_cap band[NUM_NL80211_BANDS];
+ bool nss_ratio_enabled;
+ u8 nss_ratio_info;
};
struct ath11k_pdev {
@@ -794,12 +806,15 @@ struct ath11k_fw_stats_pdev {
s32 hw_reaped;
/* Num underruns */
s32 underrun;
+ /* Num hw paused */
+ u32 hw_paused;
/* Num PPDUs cleaned up in TX abort */
s32 tx_abort;
/* Num MPDUs requeued by SW */
s32 mpdus_requeued;
/* excessive retries */
u32 tx_ko;
+ u32 tx_xretry;
/* data hw rate code */
u32 data_rc;
/* Scheduler self triggers */
@@ -820,6 +835,30 @@ struct ath11k_fw_stats_pdev {
u32 phy_underrun;
/* MPDU is more than txop limit */
u32 txop_ovf;
+ /* Num sequences posted */
+ u32 seq_posted;
+ /* Num sequences failed in queueing */
+ u32 seq_failed_queueing;
+ /* Num sequences completed */
+ u32 seq_completed;
+ /* Num sequences restarted */
+ u32 seq_restarted;
+ /* Num of MU sequences posted */
+ u32 mu_seq_posted;
+ /* Num MPDUs flushed by SW, HWPAUSED, SW TXABORT
+ * (Reset,channel change)
+ */
+ s32 mpdus_sw_flush;
+ /* Num MPDUs filtered by HW, all filter condition (TTL expired) */
+ s32 mpdus_hw_filter;
+ /* Num MPDUs truncated by PDG (TXOP, TBTT,
+ * PPDU_duration based on rate, dyn_bw)
+ */
+ s32 mpdus_truncated;
+ /* Num MPDUs that was tried but didn't receive ACK or BA */
+ s32 mpdus_ack_failed;
+ /* Num MPDUs that was dropped du to expiry. */
+ s32 mpdus_expired;
/* PDEV RX stats */
/* Cnts any change in ring routing mid-ppdu */
@@ -845,6 +884,8 @@ struct ath11k_fw_stats_pdev {
s32 phy_err_drop;
/* Number of mpdu errors - FCS, MIC, ENC etc. */
s32 mpdu_errs;
+ /* Num overflow errors */
+ s32 rx_ovfl_errs;
};
struct ath11k_fw_stats_vdev {
diff --git a/drivers/net/wireless/ath/ath11k/dbring.c b/drivers/net/wireless/ath/ath11k/dbring.c
index 5e1f5437b418..fd98ba5b1130 100644
--- a/drivers/net/wireless/ath/ath11k/dbring.c
+++ b/drivers/net/wireless/ath/ath11k/dbring.c
@@ -8,8 +8,7 @@
static int ath11k_dbring_bufs_replenish(struct ath11k *ar,
struct ath11k_dbring *ring,
- struct ath11k_dbring_element *buff,
- gfp_t gfp)
+ struct ath11k_dbring_element *buff)
{
struct ath11k_base *ab = ar->ab;
struct hal_srng *srng;
@@ -35,7 +34,7 @@ static int ath11k_dbring_bufs_replenish(struct ath11k *ar,
goto err;
spin_lock_bh(&ring->idr_lock);
- buf_id = idr_alloc(&ring->bufs_idr, buff, 0, ring->bufs_max, gfp);
+ buf_id = idr_alloc(&ring->bufs_idr, buff, 0, ring->bufs_max, GFP_ATOMIC);
spin_unlock_bh(&ring->idr_lock);
if (buf_id < 0) {
ret = -ENOBUFS;
@@ -72,8 +71,7 @@ err:
}
static int ath11k_dbring_fill_bufs(struct ath11k *ar,
- struct ath11k_dbring *ring,
- gfp_t gfp)
+ struct ath11k_dbring *ring)
{
struct ath11k_dbring_element *buff;
struct hal_srng *srng;
@@ -92,11 +90,11 @@ static int ath11k_dbring_fill_bufs(struct ath11k *ar,
size = sizeof(*buff) + ring->buf_sz + align - 1;
while (num_remain > 0) {
- buff = kzalloc(size, gfp);
+ buff = kzalloc(size, GFP_ATOMIC);
if (!buff)
break;
- ret = ath11k_dbring_bufs_replenish(ar, ring, buff, gfp);
+ ret = ath11k_dbring_bufs_replenish(ar, ring, buff);
if (ret) {
ath11k_warn(ar->ab, "failed to replenish db ring num_remain %d req_ent %d\n",
num_remain, req_entries);
@@ -176,7 +174,7 @@ int ath11k_dbring_buf_setup(struct ath11k *ar,
ring->hp_addr = ath11k_hal_srng_get_hp_addr(ar->ab, srng);
ring->tp_addr = ath11k_hal_srng_get_tp_addr(ar->ab, srng);
- ret = ath11k_dbring_fill_bufs(ar, ring, GFP_KERNEL);
+ ret = ath11k_dbring_fill_bufs(ar, ring);
return ret;
}
@@ -322,7 +320,7 @@ int ath11k_dbring_buffer_release_event(struct ath11k_base *ab,
}
memset(buff, 0, size);
- ath11k_dbring_bufs_replenish(ar, ring, buff, GFP_ATOMIC);
+ ath11k_dbring_bufs_replenish(ar, ring, buff);
}
spin_unlock_bh(&srng->lock);
diff --git a/drivers/net/wireless/ath/ath11k/debugfs.c b/drivers/net/wireless/ath/ath11k/debugfs.c
index 554feaf1ed5c..80afd35337a1 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs.c
+++ b/drivers/net/wireless/ath/ath11k/debugfs.c
@@ -806,7 +806,7 @@ static ssize_t ath11k_debugfs_dump_soc_dp_stats(struct file *file,
len += scnprintf(buf + len, size - len, "\nSOC TX STATS:\n");
len += scnprintf(buf + len, size - len, "\nTCL Ring Full Failures:\n");
- for (i = 0; i < DP_TCL_NUM_RING_MAX; i++)
+ for (i = 0; i < ab->hw_params.max_tx_ring; i++)
len += scnprintf(buf + len, size - len, "ring%d: %u\n",
i, soc_stats->tx_err.desc_na[i]);
@@ -902,7 +902,7 @@ static ssize_t ath11k_write_pktlog_filter(struct file *file,
struct htt_rx_ring_tlv_filter tlv_filter = {0};
u32 rx_filter = 0, ring_id, filter, mode;
u8 buf[128] = {0};
- int i, ret;
+ int i, ret, rx_buf_sz = 0;
ssize_t rc;
mutex_lock(&ar->conf_mutex);
@@ -940,6 +940,17 @@ static ssize_t ath11k_write_pktlog_filter(struct file *file,
}
}
+ /* Clear rx filter set for monitor mode and rx status */
+ for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
+ ring_id = ar->dp.rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
+ ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, ar->dp.mac_id,
+ HAL_RXDMA_MONITOR_STATUS,
+ rx_buf_sz, &tlv_filter);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set rx filter for monitor status ring\n");
+ goto out;
+ }
+ }
#define HTT_RX_FILTER_TLV_LITE_MODE \
(HTT_RX_FILTER_TLV_FLAGS_PPDU_START | \
HTT_RX_FILTER_TLV_FLAGS_PPDU_END | \
@@ -955,6 +966,7 @@ static ssize_t ath11k_write_pktlog_filter(struct file *file,
HTT_RX_FILTER_TLV_FLAGS_MPDU_END |
HTT_RX_FILTER_TLV_FLAGS_PACKET_HEADER |
HTT_RX_FILTER_TLV_FLAGS_ATTENTION;
+ rx_buf_sz = DP_RX_BUFFER_SIZE;
} else if (mode == ATH11K_PKTLOG_MODE_LITE) {
ret = ath11k_dp_tx_htt_h2t_ppdu_stats_req(ar,
HTT_PPDU_STATS_TAG_PKTLOG);
@@ -964,7 +976,12 @@ static ssize_t ath11k_write_pktlog_filter(struct file *file,
}
rx_filter = HTT_RX_FILTER_TLV_LITE_MODE;
+ rx_buf_sz = DP_RX_BUFFER_SIZE_LITE;
} else {
+ rx_buf_sz = DP_RX_BUFFER_SIZE;
+ tlv_filter = ath11k_mac_mon_status_filter_default;
+ rx_filter = tlv_filter.rx_filter;
+
ret = ath11k_dp_tx_htt_h2t_ppdu_stats_req(ar,
HTT_PPDU_STATS_TAG_DEFAULT);
if (ret) {
@@ -988,7 +1005,7 @@ static ssize_t ath11k_write_pktlog_filter(struct file *file,
ret = ath11k_dp_tx_htt_rx_filter_setup(ab, ring_id,
ar->dp.mac_id + i,
HAL_RXDMA_MONITOR_STATUS,
- DP_RX_BUFFER_SIZE, &tlv_filter);
+ rx_buf_sz, &tlv_filter);
if (ret) {
ath11k_warn(ab, "failed to set rx filter for monitor status ring\n");
@@ -996,8 +1013,8 @@ static ssize_t ath11k_write_pktlog_filter(struct file *file,
}
}
- ath11k_dbg(ab, ATH11K_DBG_WMI, "pktlog filter %d mode %s\n",
- filter, ((mode == ATH11K_PKTLOG_MODE_FULL) ? "full" : "lite"));
+ ath11k_info(ab, "pktlog mode %s\n",
+ ((mode == ATH11K_PKTLOG_MODE_FULL) ? "full" : "lite"));
ar->debug.pktlog_filter = filter;
ar->debug.pktlog_mode = mode;
diff --git a/drivers/net/wireless/ath/ath11k/debugfs.h b/drivers/net/wireless/ath/ath11k/debugfs.h
index e5346af71f24..ec743a015dc7 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs.h
+++ b/drivers/net/wireless/ath/ath11k/debugfs.h
@@ -38,6 +38,10 @@ enum ath11k_dbg_htt_ext_stats_type {
ATH11K_DBG_HTT_EXT_STATS_TX_SOUNDING_INFO = 22,
ATH11K_DBG_HTT_EXT_STATS_PDEV_OBSS_PD_STATS = 23,
ATH11K_DBG_HTT_EXT_STATS_RING_BACKPRESSURE_STATS = 24,
+ ATH11K_DBG_HTT_EXT_STATS_PEER_CTRL_PATH_TXRX_STATS = 29,
+ ATH11K_DBG_HTT_EXT_STATS_PDEV_TX_RATE_TXBF_STATS = 31,
+ ATH11K_DBG_HTT_EXT_STATS_TXBF_OFDMA = 32,
+ ATH11K_DBG_HTT_EXT_PHY_COUNTERS_AND_PHY_STATS = 37,
/* keep this last */
ATH11K_DBG_HTT_NUM_EXT_STATS,
diff --git a/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c
index 9e0c90da99d3..4484235bcda4 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c
+++ b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.c
@@ -10,23 +10,28 @@
#include "debug.h"
#include "debugfs_htt_stats.h"
-#define HTT_DBG_OUT(buf, len, fmt, ...) \
- scnprintf(buf, len, fmt "\n", ##__VA_ARGS__)
-
-#define HTT_MAX_STRING_LEN 256
#define HTT_MAX_PRINT_CHAR_PER_ELEM 15
#define HTT_TLV_HDR_LEN 4
-#define ARRAY_TO_STRING(out, arr, len) \
+#define PRINT_ARRAY_TO_BUF(out, buflen, arr, str, len, newline) \
do { \
- int index = 0; u8 i; \
+ int index = 0; u8 i; const char *str_val = str; \
+ const char *new_line = newline; \
+ if (str_val) { \
+ index += scnprintf((out + buflen), \
+ (ATH11K_HTT_STATS_BUF_SIZE - buflen), \
+ "%s = ", str_val); \
+ } \
for (i = 0; i < len; i++) { \
- index += scnprintf(out + index, HTT_MAX_STRING_LEN - index, \
- " %u:%u,", i, arr[i]); \
- if (index < 0 || index >= HTT_MAX_STRING_LEN) \
- break; \
+ index += scnprintf((out + buflen) + index, \
+ (ATH11K_HTT_STATS_BUF_SIZE - buflen) - index, \
+ " %u:%u,", i, arr[i]); \
} \
+ index += scnprintf((out + buflen) + index, \
+ (ATH11K_HTT_STATS_BUF_SIZE - buflen) - index, \
+ "%s", new_line); \
+ buflen += index; \
} while (0)
static inline void htt_print_stats_string_tlv(const void *tag_buf,
@@ -38,22 +43,20 @@ static inline void htt_print_stats_string_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
u8 i;
- u16 index = 0;
- char data[HTT_MAX_STRING_LEN] = {0};
tag_len = tag_len >> 2;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_STATS_STRING_TLV:");
+ len += scnprintf(buf + len, buf_len - len, "HTT_STATS_STRING_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "data = ");
for (i = 0; i < tag_len; i++) {
- index += scnprintf(&data[index],
- HTT_MAX_STRING_LEN - index,
- "%.*s", 4, (char *)&(htt_stats_buf->data[i]));
- if (index >= HTT_MAX_STRING_LEN)
- break;
+ len += scnprintf(buf + len,
+ buf_len - len,
+ "%.*s", 4, (char *)&(htt_stats_buf->data[i]));
}
-
- len += HTT_DBG_OUT(buf + len, buf_len - len, "data = %s\n", data);
+ /* New lines are added for better display */
+ len += scnprintf(buf + len, buf_len - len, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -71,107 +74,107 @@ static inline void htt_print_tx_pdev_stats_cmn_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_CMN_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
- htt_stats_buf->mac_id__word & 0xFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "hw_queued = %u",
- htt_stats_buf->hw_queued);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "hw_reaped = %u",
- htt_stats_buf->hw_reaped);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "underrun = %u",
- htt_stats_buf->underrun);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "hw_paused = %u",
- htt_stats_buf->hw_paused);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "hw_flush = %u",
- htt_stats_buf->hw_flush);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "hw_filt = %u",
- htt_stats_buf->hw_filt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_abort = %u",
- htt_stats_buf->tx_abort);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_requeued = %u",
- htt_stats_buf->mpdu_requeued);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_xretry = %u",
- htt_stats_buf->tx_xretry);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "data_rc = %u",
- htt_stats_buf->data_rc);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_dropped_xretry = %u",
- htt_stats_buf->mpdu_dropped_xretry);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "illegal_rate_phy_err = %u",
- htt_stats_buf->illgl_rate_phy_err);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "cont_xretry = %u",
- htt_stats_buf->cont_xretry);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_timeout = %u",
- htt_stats_buf->tx_timeout);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "pdev_resets = %u",
- htt_stats_buf->pdev_resets);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "phy_underrun = %u",
- htt_stats_buf->phy_underrun);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "txop_ovf = %u",
- htt_stats_buf->txop_ovf);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "seq_posted = %u",
- htt_stats_buf->seq_posted);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "seq_failed_queueing = %u",
- htt_stats_buf->seq_failed_queueing);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "seq_completed = %u",
- htt_stats_buf->seq_completed);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "seq_restarted = %u",
- htt_stats_buf->seq_restarted);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_seq_posted = %u",
- htt_stats_buf->mu_seq_posted);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "seq_switch_hw_paused = %u",
- htt_stats_buf->seq_switch_hw_paused);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "next_seq_posted_dsr = %u",
- htt_stats_buf->next_seq_posted_dsr);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "seq_posted_isr = %u",
- htt_stats_buf->seq_posted_isr);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "seq_ctrl_cached = %u",
- htt_stats_buf->seq_ctrl_cached);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_count_tqm = %u",
- htt_stats_buf->mpdu_count_tqm);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "msdu_count_tqm = %u",
- htt_stats_buf->msdu_count_tqm);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_removed_tqm = %u",
- htt_stats_buf->mpdu_removed_tqm);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "msdu_removed_tqm = %u",
- htt_stats_buf->msdu_removed_tqm);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdus_sw_flush = %u",
- htt_stats_buf->mpdus_sw_flush);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdus_hw_filter = %u",
- htt_stats_buf->mpdus_hw_filter);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdus_truncated = %u",
- htt_stats_buf->mpdus_truncated);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdus_ack_failed = %u",
- htt_stats_buf->mpdus_ack_failed);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdus_expired = %u",
- htt_stats_buf->mpdus_expired);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdus_seq_hw_retry = %u",
- htt_stats_buf->mpdus_seq_hw_retry);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ack_tlv_proc = %u",
- htt_stats_buf->ack_tlv_proc);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "coex_abort_mpdu_cnt_valid = %u",
- htt_stats_buf->coex_abort_mpdu_cnt_valid);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "coex_abort_mpdu_cnt = %u",
- htt_stats_buf->coex_abort_mpdu_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_total_ppdus_tried_ota = %u",
- htt_stats_buf->num_total_ppdus_tried_ota);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_data_ppdus_tried_ota = %u",
- htt_stats_buf->num_data_ppdus_tried_ota);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "local_ctrl_mgmt_enqued = %u",
- htt_stats_buf->local_ctrl_mgmt_enqued);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "local_ctrl_mgmt_freed = %u",
- htt_stats_buf->local_ctrl_mgmt_freed);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "local_data_enqued = %u",
- htt_stats_buf->local_data_enqued);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "local_data_freed = %u",
- htt_stats_buf->local_data_freed);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_tried = %u",
- htt_stats_buf->mpdu_tried);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "isr_wait_seq_posted = %u",
- htt_stats_buf->isr_wait_seq_posted);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_active_dur_us_low = %u",
- htt_stats_buf->tx_active_dur_us_low);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_active_dur_us_high = %u\n",
- htt_stats_buf->tx_active_dur_us_high);
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_CMN_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word));
+ len += scnprintf(buf + len, buf_len - len, "hw_queued = %u\n",
+ htt_stats_buf->hw_queued);
+ len += scnprintf(buf + len, buf_len - len, "hw_reaped = %u\n",
+ htt_stats_buf->hw_reaped);
+ len += scnprintf(buf + len, buf_len - len, "underrun = %u\n",
+ htt_stats_buf->underrun);
+ len += scnprintf(buf + len, buf_len - len, "hw_paused = %u\n",
+ htt_stats_buf->hw_paused);
+ len += scnprintf(buf + len, buf_len - len, "hw_flush = %u\n",
+ htt_stats_buf->hw_flush);
+ len += scnprintf(buf + len, buf_len - len, "hw_filt = %u\n",
+ htt_stats_buf->hw_filt);
+ len += scnprintf(buf + len, buf_len - len, "tx_abort = %u\n",
+ htt_stats_buf->tx_abort);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_requeued = %u\n",
+ htt_stats_buf->mpdu_requeued);
+ len += scnprintf(buf + len, buf_len - len, "tx_xretry = %u\n",
+ htt_stats_buf->tx_xretry);
+ len += scnprintf(buf + len, buf_len - len, "data_rc = %u\n",
+ htt_stats_buf->data_rc);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_dropped_xretry = %u\n",
+ htt_stats_buf->mpdu_dropped_xretry);
+ len += scnprintf(buf + len, buf_len - len, "illegal_rate_phy_err = %u\n",
+ htt_stats_buf->illgl_rate_phy_err);
+ len += scnprintf(buf + len, buf_len - len, "cont_xretry = %u\n",
+ htt_stats_buf->cont_xretry);
+ len += scnprintf(buf + len, buf_len - len, "tx_timeout = %u\n",
+ htt_stats_buf->tx_timeout);
+ len += scnprintf(buf + len, buf_len - len, "pdev_resets = %u\n",
+ htt_stats_buf->pdev_resets);
+ len += scnprintf(buf + len, buf_len - len, "phy_underrun = %u\n",
+ htt_stats_buf->phy_underrun);
+ len += scnprintf(buf + len, buf_len - len, "txop_ovf = %u\n",
+ htt_stats_buf->txop_ovf);
+ len += scnprintf(buf + len, buf_len - len, "seq_posted = %u\n",
+ htt_stats_buf->seq_posted);
+ len += scnprintf(buf + len, buf_len - len, "seq_failed_queueing = %u\n",
+ htt_stats_buf->seq_failed_queueing);
+ len += scnprintf(buf + len, buf_len - len, "seq_completed = %u\n",
+ htt_stats_buf->seq_completed);
+ len += scnprintf(buf + len, buf_len - len, "seq_restarted = %u\n",
+ htt_stats_buf->seq_restarted);
+ len += scnprintf(buf + len, buf_len - len, "mu_seq_posted = %u\n",
+ htt_stats_buf->mu_seq_posted);
+ len += scnprintf(buf + len, buf_len - len, "seq_switch_hw_paused = %u\n",
+ htt_stats_buf->seq_switch_hw_paused);
+ len += scnprintf(buf + len, buf_len - len, "next_seq_posted_dsr = %u\n",
+ htt_stats_buf->next_seq_posted_dsr);
+ len += scnprintf(buf + len, buf_len - len, "seq_posted_isr = %u\n",
+ htt_stats_buf->seq_posted_isr);
+ len += scnprintf(buf + len, buf_len - len, "seq_ctrl_cached = %u\n",
+ htt_stats_buf->seq_ctrl_cached);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_count_tqm = %u\n",
+ htt_stats_buf->mpdu_count_tqm);
+ len += scnprintf(buf + len, buf_len - len, "msdu_count_tqm = %u\n",
+ htt_stats_buf->msdu_count_tqm);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_removed_tqm = %u\n",
+ htt_stats_buf->mpdu_removed_tqm);
+ len += scnprintf(buf + len, buf_len - len, "msdu_removed_tqm = %u\n",
+ htt_stats_buf->msdu_removed_tqm);
+ len += scnprintf(buf + len, buf_len - len, "mpdus_sw_flush = %u\n",
+ htt_stats_buf->mpdus_sw_flush);
+ len += scnprintf(buf + len, buf_len - len, "mpdus_hw_filter = %u\n",
+ htt_stats_buf->mpdus_hw_filter);
+ len += scnprintf(buf + len, buf_len - len, "mpdus_truncated = %u\n",
+ htt_stats_buf->mpdus_truncated);
+ len += scnprintf(buf + len, buf_len - len, "mpdus_ack_failed = %u\n",
+ htt_stats_buf->mpdus_ack_failed);
+ len += scnprintf(buf + len, buf_len - len, "mpdus_expired = %u\n",
+ htt_stats_buf->mpdus_expired);
+ len += scnprintf(buf + len, buf_len - len, "mpdus_seq_hw_retry = %u\n",
+ htt_stats_buf->mpdus_seq_hw_retry);
+ len += scnprintf(buf + len, buf_len - len, "ack_tlv_proc = %u\n",
+ htt_stats_buf->ack_tlv_proc);
+ len += scnprintf(buf + len, buf_len - len, "coex_abort_mpdu_cnt_valid = %u\n",
+ htt_stats_buf->coex_abort_mpdu_cnt_valid);
+ len += scnprintf(buf + len, buf_len - len, "coex_abort_mpdu_cnt = %u\n",
+ htt_stats_buf->coex_abort_mpdu_cnt);
+ len += scnprintf(buf + len, buf_len - len, "num_total_ppdus_tried_ota = %u\n",
+ htt_stats_buf->num_total_ppdus_tried_ota);
+ len += scnprintf(buf + len, buf_len - len, "num_data_ppdus_tried_ota = %u\n",
+ htt_stats_buf->num_data_ppdus_tried_ota);
+ len += scnprintf(buf + len, buf_len - len, "local_ctrl_mgmt_enqued = %u\n",
+ htt_stats_buf->local_ctrl_mgmt_enqued);
+ len += scnprintf(buf + len, buf_len - len, "local_ctrl_mgmt_freed = %u\n",
+ htt_stats_buf->local_ctrl_mgmt_freed);
+ len += scnprintf(buf + len, buf_len - len, "local_data_enqued = %u\n",
+ htt_stats_buf->local_data_enqued);
+ len += scnprintf(buf + len, buf_len - len, "local_data_freed = %u\n",
+ htt_stats_buf->local_data_freed);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_tried = %u\n",
+ htt_stats_buf->mpdu_tried);
+ len += scnprintf(buf + len, buf_len - len, "isr_wait_seq_posted = %u\n",
+ htt_stats_buf->isr_wait_seq_posted);
+ len += scnprintf(buf + len, buf_len - len, "tx_active_dur_us_low = %u\n",
+ htt_stats_buf->tx_active_dur_us_low);
+ len += scnprintf(buf + len, buf_len - len, "tx_active_dur_us_high = %u\n\n",
+ htt_stats_buf->tx_active_dur_us_high);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -190,13 +193,12 @@ htt_print_tx_pdev_stats_urrn_tlv_v(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char urrn_stats[HTT_MAX_STRING_LEN] = {0};
u16 num_elems = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_MAX_URRN_STATS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_URRN_TLV_V:");
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_URRN_TLV_V:\n");
- ARRAY_TO_STRING(urrn_stats, htt_stats_buf->urrn_stats, num_elems);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "urrn_stats = %s\n", urrn_stats);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->urrn_stats, "urrn_stats",
+ num_elems, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -215,13 +217,12 @@ htt_print_tx_pdev_stats_flush_tlv_v(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char flush_errs[HTT_MAX_STRING_LEN] = {0};
u16 num_elems = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_MAX_FLUSH_REASON_STATS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_FLUSH_TLV_V:");
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_FLUSH_TLV_V:\n");
- ARRAY_TO_STRING(flush_errs, htt_stats_buf->flush_errs, num_elems);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "flush_errs = %s\n", flush_errs);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->flush_errs, "flush_errs",
+ num_elems, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -240,14 +241,12 @@ htt_print_tx_pdev_stats_sifs_tlv_v(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char sifs_status[HTT_MAX_STRING_LEN] = {0};
u16 num_elems = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_MAX_SIFS_BURST_STATS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_SIFS_TLV_V:");
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_SIFS_TLV_V:\n");
- ARRAY_TO_STRING(sifs_status, htt_stats_buf->sifs_status, num_elems);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sifs_status = %s\n",
- sifs_status);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->sifs_status, "sifs_status",
+ num_elems, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -266,13 +265,12 @@ htt_print_tx_pdev_stats_phy_err_tlv_v(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char phy_errs[HTT_MAX_STRING_LEN] = {0};
u16 num_elems = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_MAX_PHY_ERR_STATS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_PHY_ERR_TLV_V:");
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_PHY_ERR_TLV_V:\n");
- ARRAY_TO_STRING(phy_errs, htt_stats_buf->phy_errs, num_elems);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "phy_errs = %s\n", phy_errs);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->phy_errs, "phy_errs",
+ num_elems, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -291,15 +289,13 @@ htt_print_tx_pdev_stats_sifs_hist_tlv_v(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char sifs_hist_status[HTT_MAX_STRING_LEN] = {0};
u16 num_elems = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_MAX_SIFS_BURST_HIST_STATS);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_TX_PDEV_STATS_SIFS_HIST_TLV_V:");
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_STATS_SIFS_HIST_TLV_V:\n");
- ARRAY_TO_STRING(sifs_hist_status, htt_stats_buf->sifs_hist_status, num_elems);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sifs_hist_status = %s\n",
- sifs_hist_status);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->sifs_hist_status,
+ "sifs_hist_status", num_elems, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -318,23 +314,23 @@ htt_print_tx_pdev_stats_tx_ppdu_stats_tlv_v(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_TX_PDEV_STATS_TX_PPDU_STATS_TLV_V:");
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_STATS_TX_PPDU_STATS_TLV_V:\n");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_data_ppdus_legacy_su = %u",
- htt_stats_buf->num_data_ppdus_legacy_su);
+ len += scnprintf(buf + len, buf_len - len, "num_data_ppdus_legacy_su = %u\n",
+ htt_stats_buf->num_data_ppdus_legacy_su);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_data_ppdus_ac_su = %u",
- htt_stats_buf->num_data_ppdus_ac_su);
+ len += scnprintf(buf + len, buf_len - len, "num_data_ppdus_ac_su = %u\n",
+ htt_stats_buf->num_data_ppdus_ac_su);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_data_ppdus_ax_su = %u",
- htt_stats_buf->num_data_ppdus_ax_su);
+ len += scnprintf(buf + len, buf_len - len, "num_data_ppdus_ax_su = %u\n",
+ htt_stats_buf->num_data_ppdus_ax_su);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_data_ppdus_ac_su_txbf = %u",
- htt_stats_buf->num_data_ppdus_ac_su_txbf);
+ len += scnprintf(buf + len, buf_len - len, "num_data_ppdus_ac_su_txbf = %u\n",
+ htt_stats_buf->num_data_ppdus_ac_su_txbf);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_data_ppdus_ax_su_txbf = %u\n",
- htt_stats_buf->num_data_ppdus_ax_su_txbf);
+ len += scnprintf(buf + len, buf_len - len, "num_data_ppdus_ax_su_txbf = %u\n\n",
+ htt_stats_buf->num_data_ppdus_ax_su_txbf);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -353,25 +349,15 @@ htt_print_tx_pdev_stats_tried_mpdu_cnt_hist_tlv_v(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char tried_mpdu_cnt_hist[HTT_MAX_STRING_LEN] = {0};
u32 num_elements = ((tag_len - sizeof(htt_stats_buf->hist_bin_size)) >> 2);
- u32 required_buffer_size = HTT_MAX_PRINT_CHAR_PER_ELEM * num_elements;
-
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_TX_PDEV_STATS_TRIED_MPDU_CNT_HIST_TLV_V:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "TRIED_MPDU_CNT_HIST_BIN_SIZE : %u",
- htt_stats_buf->hist_bin_size);
-
- if (required_buffer_size < HTT_MAX_STRING_LEN) {
- ARRAY_TO_STRING(tried_mpdu_cnt_hist,
- htt_stats_buf->tried_mpdu_cnt_hist,
- num_elements);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tried_mpdu_cnt_hist = %s\n",
- tried_mpdu_cnt_hist);
- } else {
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "INSUFFICIENT PRINT BUFFER\n");
- }
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_STATS_TRIED_MPDU_CNT_HIST_TLV_V:\n");
+ len += scnprintf(buf + len, buf_len - len, "TRIED_MPDU_CNT_HIST_BIN_SIZE : %u\n",
+ htt_stats_buf->hist_bin_size);
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tried_mpdu_cnt_hist,
+ "tried_mpdu_cnt_hist", num_elements, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -390,14 +376,14 @@ static inline void htt_print_hw_stats_intr_misc_tlv(const void *tag_buf,
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
char hw_intr_name[HTT_STATS_MAX_HW_INTR_NAME_LEN + 1] = {0};
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_HW_STATS_INTR_MISC_TLV:");
+ len += scnprintf(buf + len, buf_len - len, "HTT_HW_STATS_INTR_MISC_TLV:\n");
memcpy(hw_intr_name, &(htt_stats_buf->hw_intr_name[0]),
HTT_STATS_MAX_HW_INTR_NAME_LEN);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "hw_intr_name = %s ", hw_intr_name);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mask = %u",
- htt_stats_buf->mask);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "count = %u\n",
- htt_stats_buf->count);
+ len += scnprintf(buf + len, buf_len - len, "hw_intr_name = %s\n", hw_intr_name);
+ len += scnprintf(buf + len, buf_len - len, "mask = %u\n",
+ htt_stats_buf->mask);
+ len += scnprintf(buf + len, buf_len - len, "count = %u\n\n",
+ htt_stats_buf->count);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -417,13 +403,13 @@ htt_print_hw_stats_wd_timeout_tlv(const void *tag_buf,
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
char hw_module_name[HTT_STATS_MAX_HW_MODULE_NAME_LEN + 1] = {0};
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_HW_STATS_WD_TIMEOUT_TLV:");
+ len += scnprintf(buf + len, buf_len - len, "HTT_HW_STATS_WD_TIMEOUT_TLV:\n");
memcpy(hw_module_name, &(htt_stats_buf->hw_module_name[0]),
HTT_STATS_MAX_HW_MODULE_NAME_LEN);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "hw_module_name = %s ",
- hw_module_name);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "count = %u",
- htt_stats_buf->count);
+ len += scnprintf(buf + len, buf_len - len, "hw_module_name = %s\n",
+ hw_module_name);
+ len += scnprintf(buf + len, buf_len - len, "count = %u\n",
+ htt_stats_buf->count);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -441,29 +427,29 @@ static inline void htt_print_hw_stats_pdev_errs_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_HW_STATS_PDEV_ERRS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
- htt_stats_buf->mac_id__word & 0xFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_abort = %u",
- htt_stats_buf->tx_abort);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_abort_fail_count = %u",
- htt_stats_buf->tx_abort_fail_count);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_abort = %u",
- htt_stats_buf->rx_abort);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_abort_fail_count = %u",
- htt_stats_buf->rx_abort_fail_count);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "warm_reset = %u",
- htt_stats_buf->warm_reset);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "cold_reset = %u",
- htt_stats_buf->cold_reset);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_flush = %u",
- htt_stats_buf->tx_flush);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_glb_reset = %u",
- htt_stats_buf->tx_glb_reset);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_txq_reset = %u",
- htt_stats_buf->tx_txq_reset);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_timeout_reset = %u\n",
- htt_stats_buf->rx_timeout_reset);
+ len += scnprintf(buf + len, buf_len - len, "HTT_HW_STATS_PDEV_ERRS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word));
+ len += scnprintf(buf + len, buf_len - len, "tx_abort = %u\n",
+ htt_stats_buf->tx_abort);
+ len += scnprintf(buf + len, buf_len - len, "tx_abort_fail_count = %u\n",
+ htt_stats_buf->tx_abort_fail_count);
+ len += scnprintf(buf + len, buf_len - len, "rx_abort = %u\n",
+ htt_stats_buf->rx_abort);
+ len += scnprintf(buf + len, buf_len - len, "rx_abort_fail_count = %u\n",
+ htt_stats_buf->rx_abort_fail_count);
+ len += scnprintf(buf + len, buf_len - len, "warm_reset = %u\n",
+ htt_stats_buf->warm_reset);
+ len += scnprintf(buf + len, buf_len - len, "cold_reset = %u\n",
+ htt_stats_buf->cold_reset);
+ len += scnprintf(buf + len, buf_len - len, "tx_flush = %u\n",
+ htt_stats_buf->tx_flush);
+ len += scnprintf(buf + len, buf_len - len, "tx_glb_reset = %u\n",
+ htt_stats_buf->tx_glb_reset);
+ len += scnprintf(buf + len, buf_len - len, "tx_txq_reset = %u\n",
+ htt_stats_buf->tx_txq_reset);
+ len += scnprintf(buf + len, buf_len - len, "rx_timeout_reset = %u\n\n",
+ htt_stats_buf->rx_timeout_reset);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -481,35 +467,36 @@ static inline void htt_print_msdu_flow_stats_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_MSDU_FLOW_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "last_update_timestamp = %u",
- htt_stats_buf->last_update_timestamp);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "last_add_timestamp = %u",
- htt_stats_buf->last_add_timestamp);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "last_remove_timestamp = %u",
- htt_stats_buf->last_remove_timestamp);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "total_processed_msdu_count = %u",
- htt_stats_buf->total_processed_msdu_count);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "cur_msdu_count_in_flowq = %u",
- htt_stats_buf->cur_msdu_count_in_flowq);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sw_peer_id = %u",
- htt_stats_buf->sw_peer_id);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_flow_no = %u",
- htt_stats_buf->tx_flow_no__tid_num__drop_rule & 0xFFFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tid_num = %u",
- (htt_stats_buf->tx_flow_no__tid_num__drop_rule & 0xF0000) >>
- 16);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "drop_rule = %u",
- (htt_stats_buf->tx_flow_no__tid_num__drop_rule & 0x100000) >>
- 20);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "last_cycle_enqueue_count = %u",
- htt_stats_buf->last_cycle_enqueue_count);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "last_cycle_dequeue_count = %u",
- htt_stats_buf->last_cycle_dequeue_count);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "last_cycle_drop_count = %u",
- htt_stats_buf->last_cycle_drop_count);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "current_drop_th = %u\n",
- htt_stats_buf->current_drop_th);
+ len += scnprintf(buf + len, buf_len - len, "HTT_MSDU_FLOW_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "last_update_timestamp = %u\n",
+ htt_stats_buf->last_update_timestamp);
+ len += scnprintf(buf + len, buf_len - len, "last_add_timestamp = %u\n",
+ htt_stats_buf->last_add_timestamp);
+ len += scnprintf(buf + len, buf_len - len, "last_remove_timestamp = %u\n",
+ htt_stats_buf->last_remove_timestamp);
+ len += scnprintf(buf + len, buf_len - len, "total_processed_msdu_count = %u\n",
+ htt_stats_buf->total_processed_msdu_count);
+ len += scnprintf(buf + len, buf_len - len, "cur_msdu_count_in_flowq = %u\n",
+ htt_stats_buf->cur_msdu_count_in_flowq);
+ len += scnprintf(buf + len, buf_len - len, "sw_peer_id = %u\n",
+ htt_stats_buf->sw_peer_id);
+ len += scnprintf(buf + len, buf_len - len, "tx_flow_no = %lu\n",
+ FIELD_GET(HTT_MSDU_FLOW_STATS_TX_FLOW_NO,
+ htt_stats_buf->tx_flow_no__tid_num__drop_rule));
+ len += scnprintf(buf + len, buf_len - len, "tid_num = %lu\n",
+ FIELD_GET(HTT_MSDU_FLOW_STATS_TID_NUM,
+ htt_stats_buf->tx_flow_no__tid_num__drop_rule));
+ len += scnprintf(buf + len, buf_len - len, "drop_rule = %lu\n",
+ FIELD_GET(HTT_MSDU_FLOW_STATS_DROP_RULE,
+ htt_stats_buf->tx_flow_no__tid_num__drop_rule));
+ len += scnprintf(buf + len, buf_len - len, "last_cycle_enqueue_count = %u\n",
+ htt_stats_buf->last_cycle_enqueue_count);
+ len += scnprintf(buf + len, buf_len - len, "last_cycle_dequeue_count = %u\n",
+ htt_stats_buf->last_cycle_dequeue_count);
+ len += scnprintf(buf + len, buf_len - len, "last_cycle_drop_count = %u\n",
+ htt_stats_buf->last_cycle_drop_count);
+ len += scnprintf(buf + len, buf_len - len, "current_drop_th = %u\n\n",
+ htt_stats_buf->current_drop_th);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -528,38 +515,41 @@ static inline void htt_print_tx_tid_stats_tlv(const void *tag_buf,
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
char tid_name[MAX_HTT_TID_NAME + 1] = {0};
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_TID_STATS_TLV:");
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_TID_STATS_TLV:\n");
memcpy(tid_name, &(htt_stats_buf->tid_name[0]), MAX_HTT_TID_NAME);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tid_name = %s ", tid_name);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sw_peer_id = %u",
- htt_stats_buf->sw_peer_id__tid_num & 0xFFFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tid_num = %u",
- (htt_stats_buf->sw_peer_id__tid_num & 0xFFFF0000) >> 16);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_sched_pending = %u",
- htt_stats_buf->num_sched_pending__num_ppdu_in_hwq & 0xFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_ppdu_in_hwq = %u",
- (htt_stats_buf->num_sched_pending__num_ppdu_in_hwq &
- 0xFF00) >> 8);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tid_flags = 0x%x",
- htt_stats_buf->tid_flags);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "hw_queued = %u",
- htt_stats_buf->hw_queued);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "hw_reaped = %u",
- htt_stats_buf->hw_reaped);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdus_hw_filter = %u",
- htt_stats_buf->mpdus_hw_filter);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "qdepth_bytes = %u",
- htt_stats_buf->qdepth_bytes);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "qdepth_num_msdu = %u",
- htt_stats_buf->qdepth_num_msdu);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "qdepth_num_mpdu = %u",
- htt_stats_buf->qdepth_num_mpdu);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "last_scheduled_tsmp = %u",
- htt_stats_buf->last_scheduled_tsmp);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "pause_module_id = %u",
- htt_stats_buf->pause_module_id);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "block_module_id = %u\n",
- htt_stats_buf->block_module_id);
+ len += scnprintf(buf + len, buf_len - len, "tid_name = %s\n", tid_name);
+ len += scnprintf(buf + len, buf_len - len, "sw_peer_id = %lu\n",
+ FIELD_GET(HTT_TX_TID_STATS_SW_PEER_ID,
+ htt_stats_buf->sw_peer_id__tid_num));
+ len += scnprintf(buf + len, buf_len - len, "tid_num = %lu\n",
+ FIELD_GET(HTT_TX_TID_STATS_TID_NUM,
+ htt_stats_buf->sw_peer_id__tid_num));
+ len += scnprintf(buf + len, buf_len - len, "num_sched_pending = %lu\n",
+ FIELD_GET(HTT_TX_TID_STATS_NUM_SCHED_PENDING,
+ htt_stats_buf->num_sched_pending__num_ppdu_in_hwq));
+ len += scnprintf(buf + len, buf_len - len, "num_ppdu_in_hwq = %lu\n",
+ FIELD_GET(HTT_TX_TID_STATS_NUM_PPDU_IN_HWQ,
+ htt_stats_buf->num_sched_pending__num_ppdu_in_hwq));
+ len += scnprintf(buf + len, buf_len - len, "tid_flags = 0x%x\n",
+ htt_stats_buf->tid_flags);
+ len += scnprintf(buf + len, buf_len - len, "hw_queued = %u\n",
+ htt_stats_buf->hw_queued);
+ len += scnprintf(buf + len, buf_len - len, "hw_reaped = %u\n",
+ htt_stats_buf->hw_reaped);
+ len += scnprintf(buf + len, buf_len - len, "mpdus_hw_filter = %u\n",
+ htt_stats_buf->mpdus_hw_filter);
+ len += scnprintf(buf + len, buf_len - len, "qdepth_bytes = %u\n",
+ htt_stats_buf->qdepth_bytes);
+ len += scnprintf(buf + len, buf_len - len, "qdepth_num_msdu = %u\n",
+ htt_stats_buf->qdepth_num_msdu);
+ len += scnprintf(buf + len, buf_len - len, "qdepth_num_mpdu = %u\n",
+ htt_stats_buf->qdepth_num_mpdu);
+ len += scnprintf(buf + len, buf_len - len, "last_scheduled_tsmp = %u\n",
+ htt_stats_buf->last_scheduled_tsmp);
+ len += scnprintf(buf + len, buf_len - len, "pause_module_id = %u\n",
+ htt_stats_buf->pause_module_id);
+ len += scnprintf(buf + len, buf_len - len, "block_module_id = %u\n\n",
+ htt_stats_buf->block_module_id);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -578,42 +568,45 @@ static inline void htt_print_tx_tid_stats_v1_tlv(const void *tag_buf,
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
char tid_name[MAX_HTT_TID_NAME + 1] = {0};
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_TID_STATS_V1_TLV:");
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_TID_STATS_V1_TLV:\n");
memcpy(tid_name, &(htt_stats_buf->tid_name[0]), MAX_HTT_TID_NAME);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tid_name = %s ", tid_name);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sw_peer_id = %u",
- htt_stats_buf->sw_peer_id__tid_num & 0xFFFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tid_num = %u",
- (htt_stats_buf->sw_peer_id__tid_num & 0xFFFF0000) >> 16);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_sched_pending = %u",
- htt_stats_buf->num_sched_pending__num_ppdu_in_hwq & 0xFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_ppdu_in_hwq = %u",
- (htt_stats_buf->num_sched_pending__num_ppdu_in_hwq &
- 0xFF00) >> 8);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tid_flags = 0x%x",
- htt_stats_buf->tid_flags);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "max_qdepth_bytes = %u",
- htt_stats_buf->max_qdepth_bytes);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "max_qdepth_n_msdus = %u",
- htt_stats_buf->max_qdepth_n_msdus);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rsvd = %u",
- htt_stats_buf->rsvd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "qdepth_bytes = %u",
- htt_stats_buf->qdepth_bytes);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "qdepth_num_msdu = %u",
- htt_stats_buf->qdepth_num_msdu);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "qdepth_num_mpdu = %u",
- htt_stats_buf->qdepth_num_mpdu);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "last_scheduled_tsmp = %u",
- htt_stats_buf->last_scheduled_tsmp);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "pause_module_id = %u",
- htt_stats_buf->pause_module_id);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "block_module_id = %u",
- htt_stats_buf->block_module_id);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "allow_n_flags = 0x%x",
- htt_stats_buf->allow_n_flags);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sendn_frms_allowed = %u\n",
- htt_stats_buf->sendn_frms_allowed);
+ len += scnprintf(buf + len, buf_len - len, "tid_name = %s\n", tid_name);
+ len += scnprintf(buf + len, buf_len - len, "sw_peer_id = %lu\n",
+ FIELD_GET(HTT_TX_TID_STATS_V1_SW_PEER_ID,
+ htt_stats_buf->sw_peer_id__tid_num));
+ len += scnprintf(buf + len, buf_len - len, "tid_num = %lu\n",
+ FIELD_GET(HTT_TX_TID_STATS_V1_TID_NUM,
+ htt_stats_buf->sw_peer_id__tid_num));
+ len += scnprintf(buf + len, buf_len - len, "num_sched_pending = %lu\n",
+ FIELD_GET(HTT_TX_TID_STATS_V1_NUM_SCHED_PENDING,
+ htt_stats_buf->num_sched_pending__num_ppdu_in_hwq));
+ len += scnprintf(buf + len, buf_len - len, "num_ppdu_in_hwq = %lu\n",
+ FIELD_GET(HTT_TX_TID_STATS_V1_NUM_PPDU_IN_HWQ,
+ htt_stats_buf->num_sched_pending__num_ppdu_in_hwq));
+ len += scnprintf(buf + len, buf_len - len, "tid_flags = 0x%x\n",
+ htt_stats_buf->tid_flags);
+ len += scnprintf(buf + len, buf_len - len, "max_qdepth_bytes = %u\n",
+ htt_stats_buf->max_qdepth_bytes);
+ len += scnprintf(buf + len, buf_len - len, "max_qdepth_n_msdus = %u\n",
+ htt_stats_buf->max_qdepth_n_msdus);
+ len += scnprintf(buf + len, buf_len - len, "rsvd = %u\n",
+ htt_stats_buf->rsvd);
+ len += scnprintf(buf + len, buf_len - len, "qdepth_bytes = %u\n",
+ htt_stats_buf->qdepth_bytes);
+ len += scnprintf(buf + len, buf_len - len, "qdepth_num_msdu = %u\n",
+ htt_stats_buf->qdepth_num_msdu);
+ len += scnprintf(buf + len, buf_len - len, "qdepth_num_mpdu = %u\n",
+ htt_stats_buf->qdepth_num_mpdu);
+ len += scnprintf(buf + len, buf_len - len, "last_scheduled_tsmp = %u\n",
+ htt_stats_buf->last_scheduled_tsmp);
+ len += scnprintf(buf + len, buf_len - len, "pause_module_id = %u\n",
+ htt_stats_buf->pause_module_id);
+ len += scnprintf(buf + len, buf_len - len, "block_module_id = %u\n",
+ htt_stats_buf->block_module_id);
+ len += scnprintf(buf + len, buf_len - len, "allow_n_flags = 0x%x\n",
+ htt_stats_buf->allow_n_flags);
+ len += scnprintf(buf + len, buf_len - len, "sendn_frms_allowed = %u\n\n",
+ htt_stats_buf->sendn_frms_allowed);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -632,21 +625,23 @@ static inline void htt_print_rx_tid_stats_tlv(const void *tag_buf,
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
char tid_name[MAX_HTT_TID_NAME + 1] = {0};
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_RX_TID_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sw_peer_id = %u",
- htt_stats_buf->sw_peer_id__tid_num & 0xFFFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tid_num = %u",
- (htt_stats_buf->sw_peer_id__tid_num & 0xFFFF0000) >> 16);
+ len += scnprintf(buf + len, buf_len - len, "HTT_RX_TID_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "sw_peer_id = %lu\n",
+ FIELD_GET(HTT_RX_TID_STATS_SW_PEER_ID,
+ htt_stats_buf->sw_peer_id__tid_num));
+ len += scnprintf(buf + len, buf_len - len, "tid_num = %lu\n",
+ FIELD_GET(HTT_RX_TID_STATS_TID_NUM,
+ htt_stats_buf->sw_peer_id__tid_num));
memcpy(tid_name, &(htt_stats_buf->tid_name[0]), MAX_HTT_TID_NAME);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tid_name = %s ", tid_name);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "dup_in_reorder = %u",
- htt_stats_buf->dup_in_reorder);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "dup_past_outside_window = %u",
- htt_stats_buf->dup_past_outside_window);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "dup_past_within_window = %u",
- htt_stats_buf->dup_past_within_window);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rxdesc_err_decrypt = %u\n",
- htt_stats_buf->rxdesc_err_decrypt);
+ len += scnprintf(buf + len, buf_len - len, "tid_name = %s\n", tid_name);
+ len += scnprintf(buf + len, buf_len - len, "dup_in_reorder = %u\n",
+ htt_stats_buf->dup_in_reorder);
+ len += scnprintf(buf + len, buf_len - len, "dup_past_outside_window = %u\n",
+ htt_stats_buf->dup_past_outside_window);
+ len += scnprintf(buf + len, buf_len - len, "dup_past_within_window = %u\n",
+ htt_stats_buf->dup_past_within_window);
+ len += scnprintf(buf + len, buf_len - len, "rxdesc_err_decrypt = %u\n\n",
+ htt_stats_buf->rxdesc_err_decrypt);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -663,16 +658,14 @@ static inline void htt_print_counter_tlv(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char counter_name[HTT_MAX_STRING_LEN] = {0};
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_COUNTER_TLV:");
+ len += scnprintf(buf + len, buf_len - len, "HTT_COUNTER_TLV:\n");
- ARRAY_TO_STRING(counter_name,
- htt_stats_buf->counter_name,
- HTT_MAX_COUNTER_NAME);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "counter_name = %s ", counter_name);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "count = %u\n",
- htt_stats_buf->count);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->counter_name,
+ "counter_name",
+ HTT_MAX_COUNTER_NAME, "\n");
+ len += scnprintf(buf + len, buf_len - len, "count = %u\n\n",
+ htt_stats_buf->count);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -690,35 +683,35 @@ static inline void htt_print_peer_stats_cmn_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_PEER_STATS_CMN_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ppdu_cnt = %u",
- htt_stats_buf->ppdu_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_cnt = %u",
- htt_stats_buf->mpdu_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "msdu_cnt = %u",
- htt_stats_buf->msdu_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "pause_bitmap = %u",
- htt_stats_buf->pause_bitmap);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "block_bitmap = %u",
- htt_stats_buf->block_bitmap);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "last_rssi = %d",
- htt_stats_buf->rssi);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "enqueued_count = %llu",
- htt_stats_buf->peer_enqueued_count_low |
- ((u64)htt_stats_buf->peer_enqueued_count_high << 32));
- len += HTT_DBG_OUT(buf + len, buf_len - len, "dequeued_count = %llu",
- htt_stats_buf->peer_dequeued_count_low |
- ((u64)htt_stats_buf->peer_dequeued_count_high << 32));
- len += HTT_DBG_OUT(buf + len, buf_len - len, "dropped_count = %llu",
- htt_stats_buf->peer_dropped_count_low |
- ((u64)htt_stats_buf->peer_dropped_count_high << 32));
- len += HTT_DBG_OUT(buf + len, buf_len - len, "transmitted_ppdu_bytes = %llu",
- htt_stats_buf->ppdu_transmitted_bytes_low |
- ((u64)htt_stats_buf->ppdu_transmitted_bytes_high << 32));
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ttl_removed_count = %u",
- htt_stats_buf->peer_ttl_removed_count);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "inactive_time = %u\n",
- htt_stats_buf->inactive_time);
+ len += scnprintf(buf + len, buf_len - len, "HTT_PEER_STATS_CMN_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "ppdu_cnt = %u\n",
+ htt_stats_buf->ppdu_cnt);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_cnt = %u\n",
+ htt_stats_buf->mpdu_cnt);
+ len += scnprintf(buf + len, buf_len - len, "msdu_cnt = %u\n",
+ htt_stats_buf->msdu_cnt);
+ len += scnprintf(buf + len, buf_len - len, "pause_bitmap = %u\n",
+ htt_stats_buf->pause_bitmap);
+ len += scnprintf(buf + len, buf_len - len, "block_bitmap = %u\n",
+ htt_stats_buf->block_bitmap);
+ len += scnprintf(buf + len, buf_len - len, "last_rssi = %d\n",
+ htt_stats_buf->rssi);
+ len += scnprintf(buf + len, buf_len - len, "enqueued_count = %llu\n",
+ htt_stats_buf->peer_enqueued_count_low |
+ ((u64)htt_stats_buf->peer_enqueued_count_high << 32));
+ len += scnprintf(buf + len, buf_len - len, "dequeued_count = %llu\n",
+ htt_stats_buf->peer_dequeued_count_low |
+ ((u64)htt_stats_buf->peer_dequeued_count_high << 32));
+ len += scnprintf(buf + len, buf_len - len, "dropped_count = %llu\n",
+ htt_stats_buf->peer_dropped_count_low |
+ ((u64)htt_stats_buf->peer_dropped_count_high << 32));
+ len += scnprintf(buf + len, buf_len - len, "transmitted_ppdu_bytes = %llu\n",
+ htt_stats_buf->ppdu_transmitted_bytes_low |
+ ((u64)htt_stats_buf->ppdu_transmitted_bytes_high << 32));
+ len += scnprintf(buf + len, buf_len - len, "ttl_removed_count = %u\n",
+ htt_stats_buf->peer_ttl_removed_count);
+ len += scnprintf(buf + len, buf_len - len, "inactive_time = %u\n\n",
+ htt_stats_buf->inactive_time);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -736,29 +729,38 @@ static inline void htt_print_peer_details_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_PEER_DETAILS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "peer_type = %u",
- htt_stats_buf->peer_type);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sw_peer_id = %u",
- htt_stats_buf->sw_peer_id);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "vdev_id = %u",
- htt_stats_buf->vdev_pdev_ast_idx & 0xFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "pdev_id = %u",
- (htt_stats_buf->vdev_pdev_ast_idx & 0xFF00) >> 8);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ast_idx = %u",
- (htt_stats_buf->vdev_pdev_ast_idx & 0xFFFF0000) >> 16);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "mac_addr = %02x:%02x:%02x:%02x:%02x:%02x",
- htt_stats_buf->mac_addr.mac_addr_l32 & 0xFF,
- (htt_stats_buf->mac_addr.mac_addr_l32 & 0xFF00) >> 8,
- (htt_stats_buf->mac_addr.mac_addr_l32 & 0xFF0000) >> 16,
- (htt_stats_buf->mac_addr.mac_addr_l32 & 0xFF000000) >> 24,
- (htt_stats_buf->mac_addr.mac_addr_h16 & 0xFF),
- (htt_stats_buf->mac_addr.mac_addr_h16 & 0xFF00) >> 8);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "peer_flags = 0x%x",
- htt_stats_buf->peer_flags);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "qpeer_flags = 0x%x\n",
- htt_stats_buf->qpeer_flags);
+ len += scnprintf(buf + len, buf_len - len, "HTT_PEER_DETAILS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "peer_type = %u\n",
+ htt_stats_buf->peer_type);
+ len += scnprintf(buf + len, buf_len - len, "sw_peer_id = %u\n",
+ htt_stats_buf->sw_peer_id);
+ len += scnprintf(buf + len, buf_len - len, "vdev_id = %lu\n",
+ FIELD_GET(HTT_PEER_DETAILS_VDEV_ID,
+ htt_stats_buf->vdev_pdev_ast_idx));
+ len += scnprintf(buf + len, buf_len - len, "pdev_id = %lu\n",
+ FIELD_GET(HTT_PEER_DETAILS_PDEV_ID,
+ htt_stats_buf->vdev_pdev_ast_idx));
+ len += scnprintf(buf + len, buf_len - len, "ast_idx = %lu\n",
+ FIELD_GET(HTT_PEER_DETAILS_AST_IDX,
+ htt_stats_buf->vdev_pdev_ast_idx));
+ len += scnprintf(buf + len, buf_len - len,
+ "mac_addr = %02lx:%02lx:%02lx:%02lx:%02lx:%02lx\n",
+ FIELD_GET(HTT_MAC_ADDR_L32_0,
+ htt_stats_buf->mac_addr.mac_addr_l32),
+ FIELD_GET(HTT_MAC_ADDR_L32_1,
+ htt_stats_buf->mac_addr.mac_addr_l32),
+ FIELD_GET(HTT_MAC_ADDR_L32_2,
+ htt_stats_buf->mac_addr.mac_addr_l32),
+ FIELD_GET(HTT_MAC_ADDR_L32_3,
+ htt_stats_buf->mac_addr.mac_addr_l32),
+ FIELD_GET(HTT_MAC_ADDR_H16_0,
+ htt_stats_buf->mac_addr.mac_addr_h16),
+ FIELD_GET(HTT_MAC_ADDR_H16_1,
+ htt_stats_buf->mac_addr.mac_addr_h16));
+ len += scnprintf(buf + len, buf_len - len, "peer_flags = 0x%x\n",
+ htt_stats_buf->peer_flags);
+ len += scnprintf(buf + len, buf_len - len, "qpeer_flags = 0x%x\n\n",
+ htt_stats_buf->qpeer_flags);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -775,74 +777,40 @@ static inline void htt_print_tx_peer_rate_stats_tlv(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char str_buf[HTT_MAX_STRING_LEN] = {0};
- char *tx_gi[HTT_TX_PEER_STATS_NUM_GI_COUNTERS] = {NULL};
u8 j;
- for (j = 0; j < HTT_TX_PEER_STATS_NUM_GI_COUNTERS; j++) {
- tx_gi[j] = kmalloc(HTT_MAX_STRING_LEN, GFP_ATOMIC);
- if (!tx_gi[j])
- goto fail;
- }
-
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_PEER_RATE_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_ldpc = %u",
- htt_stats_buf->tx_ldpc);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rts_cnt = %u",
- htt_stats_buf->rts_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ack_rssi = %u",
- htt_stats_buf->ack_rssi);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_mcs,
- HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_mcs = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_su_mcs,
- HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_su_mcs = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_mu_mcs,
- HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_mu_mcs = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf,
- htt_stats_buf->tx_nss,
- HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_nss = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf,
- htt_stats_buf->tx_bw,
- HTT_TX_PDEV_STATS_NUM_BW_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_bw = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_stbc,
- HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_stbc = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_pream,
- HTT_TX_PDEV_STATS_NUM_PREAMBLE_TYPES);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_pream = %s ", str_buf);
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_PEER_RATE_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "tx_ldpc = %u\n",
+ htt_stats_buf->tx_ldpc);
+ len += scnprintf(buf + len, buf_len - len, "rts_cnt = %u\n",
+ htt_stats_buf->rts_cnt);
+ len += scnprintf(buf + len, buf_len - len, "ack_rssi = %u\n",
+ htt_stats_buf->ack_rssi);
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_mcs, "tx_mcs",
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_su_mcs, "tx_su_mcs",
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_mu_mcs, "tx_mu_mcs",
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_nss, "tx_nss",
+ HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_bw, "tx_bw",
+ HTT_TX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_stbc, "tx_stbc",
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_pream, "tx_pream",
+ HTT_TX_PDEV_STATS_NUM_PREAMBLE_TYPES, "\n");
for (j = 0; j < HTT_TX_PEER_STATS_NUM_GI_COUNTERS; j++) {
- ARRAY_TO_STRING(tx_gi[j],
- htt_stats_buf->tx_gi[j],
- HTT_TX_PEER_STATS_NUM_MCS_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_gi[%u] = %s ",
- j, tx_gi[j]);
+ len += scnprintf(buf + len, buf_len - len,
+ "tx_gi[%u] = ", j);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_gi[j], NULL,
+ HTT_TX_PEER_STATS_NUM_MCS_COUNTERS, "\n");
}
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf,
- htt_stats_buf->tx_dcm,
- HTT_TX_PDEV_STATS_NUM_DCM_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_dcm = %s\n", str_buf);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_dcm, "tx_dcm",
+ HTT_TX_PDEV_STATS_NUM_DCM_COUNTERS, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -850,10 +818,6 @@ static inline void htt_print_tx_peer_rate_stats_tlv(const void *tag_buf,
buf[len] = 0;
stats_req->buf_len = len;
-
-fail:
- for (j = 0; j < HTT_TX_PEER_STATS_NUM_GI_COUNTERS; j++)
- kfree(tx_gi[j]);
}
static inline void htt_print_rx_peer_rate_stats_tlv(const void *tag_buf,
@@ -864,79 +828,48 @@ static inline void htt_print_rx_peer_rate_stats_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
u8 j;
- char *rssi_chain[HTT_RX_PEER_STATS_NUM_SPATIAL_STREAMS] = {NULL};
- char *rx_gi[HTT_RX_PEER_STATS_NUM_GI_COUNTERS] = {NULL};
- char str_buf[HTT_MAX_STRING_LEN] = {0};
-
- for (j = 0; j < HTT_RX_PEER_STATS_NUM_SPATIAL_STREAMS; j++) {
- rssi_chain[j] = kmalloc(HTT_MAX_STRING_LEN, GFP_ATOMIC);
- if (!rssi_chain[j])
- goto fail;
- }
-
- for (j = 0; j < HTT_RX_PEER_STATS_NUM_GI_COUNTERS; j++) {
- rx_gi[j] = kmalloc(HTT_MAX_STRING_LEN, GFP_ATOMIC);
- if (!rx_gi[j])
- goto fail;
- }
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_RX_PEER_RATE_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "nsts = %u",
- htt_stats_buf->nsts);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_ldpc = %u",
- htt_stats_buf->rx_ldpc);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rts_cnt = %u",
- htt_stats_buf->rts_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rssi_mgmt = %u",
- htt_stats_buf->rssi_mgmt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rssi_data = %u",
- htt_stats_buf->rssi_data);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rssi_comb = %u",
- htt_stats_buf->rssi_comb);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_mcs,
- HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_mcs = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_nss,
- HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_nss = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_dcm,
- HTT_RX_PDEV_STATS_NUM_DCM_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_dcm = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_stbc,
- HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_stbc = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_bw,
- HTT_RX_PDEV_STATS_NUM_BW_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_bw = %s ", str_buf);
+ len += scnprintf(buf + len, buf_len - len, "HTT_RX_PEER_RATE_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "nsts = %u\n",
+ htt_stats_buf->nsts);
+ len += scnprintf(buf + len, buf_len - len, "rx_ldpc = %u\n",
+ htt_stats_buf->rx_ldpc);
+ len += scnprintf(buf + len, buf_len - len, "rts_cnt = %u\n",
+ htt_stats_buf->rts_cnt);
+ len += scnprintf(buf + len, buf_len - len, "rssi_mgmt = %u\n",
+ htt_stats_buf->rssi_mgmt);
+ len += scnprintf(buf + len, buf_len - len, "rssi_data = %u\n",
+ htt_stats_buf->rssi_data);
+ len += scnprintf(buf + len, buf_len - len, "rssi_comb = %u\n",
+ htt_stats_buf->rssi_comb);
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_mcs, "rx_mcs",
+ HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_nss, "rx_nss",
+ HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_dcm, "rx_dcm",
+ HTT_RX_PDEV_STATS_NUM_DCM_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_stbc, "rx_stbc",
+ HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_bw, "rx_bw",
+ HTT_RX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
for (j = 0; j < HTT_RX_PEER_STATS_NUM_SPATIAL_STREAMS; j++) {
- ARRAY_TO_STRING(rssi_chain[j], htt_stats_buf->rssi_chain[j],
- HTT_RX_PEER_STATS_NUM_BW_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rssi_chain[%u] = %s ",
- j, rssi_chain[j]);
+ len += scnprintf(buf + len, (buf_len - len),
+ "rssi_chain[%u] = ", j);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rssi_chain[j], NULL,
+ HTT_RX_PEER_STATS_NUM_BW_COUNTERS, "\n");
}
for (j = 0; j < HTT_RX_PEER_STATS_NUM_GI_COUNTERS; j++) {
- ARRAY_TO_STRING(rx_gi[j], htt_stats_buf->rx_gi[j],
- HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_gi[%u] = %s ",
- j, rx_gi[j]);
+ len += scnprintf(buf + len, (buf_len - len),
+ "rx_gi[%u] = ", j);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_gi[j], NULL,
+ HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
}
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_pream,
- HTT_RX_PDEV_STATS_NUM_PREAMBLE_TYPES);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_pream = %s\n", str_buf);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_pream, "rx_pream",
+ HTT_RX_PDEV_STATS_NUM_PREAMBLE_TYPES, "\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -944,13 +877,6 @@ static inline void htt_print_rx_peer_rate_stats_tlv(const void *tag_buf,
buf[len] = 0;
stats_req->buf_len = len;
-
-fail:
- for (j = 0; j < HTT_RX_PEER_STATS_NUM_SPATIAL_STREAMS; j++)
- kfree(rssi_chain[j]);
-
- for (j = 0; j < HTT_RX_PEER_STATS_NUM_GI_COUNTERS; j++)
- kfree(rx_gi[j]);
}
static inline void
@@ -962,13 +888,13 @@ htt_print_tx_hwq_mu_mimo_sch_stats_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_HWQ_MU_MIMO_SCH_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_sch_posted = %u",
- htt_stats_buf->mu_mimo_sch_posted);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_sch_failed = %u",
- htt_stats_buf->mu_mimo_sch_failed);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_ppdu_posted = %u\n",
- htt_stats_buf->mu_mimo_ppdu_posted);
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_HWQ_MU_MIMO_SCH_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mu_mimo_sch_posted = %u\n",
+ htt_stats_buf->mu_mimo_sch_posted);
+ len += scnprintf(buf + len, buf_len - len, "mu_mimo_sch_failed = %u\n",
+ htt_stats_buf->mu_mimo_sch_failed);
+ len += scnprintf(buf + len, buf_len - len, "mu_mimo_ppdu_posted = %u\n\n",
+ htt_stats_buf->mu_mimo_ppdu_posted);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -987,22 +913,22 @@ htt_print_tx_hwq_mu_mimo_mpdu_stats_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_TX_HWQ_MU_MIMO_MPDU_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_mpdus_queued_usr = %u",
- htt_stats_buf->mu_mimo_mpdus_queued_usr);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_mpdus_tried_usr = %u",
- htt_stats_buf->mu_mimo_mpdus_tried_usr);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_mpdus_failed_usr = %u",
- htt_stats_buf->mu_mimo_mpdus_failed_usr);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_mpdus_requeued_usr = %u",
- htt_stats_buf->mu_mimo_mpdus_requeued_usr);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_err_no_ba_usr = %u",
- htt_stats_buf->mu_mimo_err_no_ba_usr);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_mpdu_underrun_usr = %u",
- htt_stats_buf->mu_mimo_mpdu_underrun_usr);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_ampdu_underrun_usr = %u\n",
- htt_stats_buf->mu_mimo_ampdu_underrun_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_HWQ_MU_MIMO_MPDU_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mu_mimo_mpdus_queued_usr = %u\n",
+ htt_stats_buf->mu_mimo_mpdus_queued_usr);
+ len += scnprintf(buf + len, buf_len - len, "mu_mimo_mpdus_tried_usr = %u\n",
+ htt_stats_buf->mu_mimo_mpdus_tried_usr);
+ len += scnprintf(buf + len, buf_len - len, "mu_mimo_mpdus_failed_usr = %u\n",
+ htt_stats_buf->mu_mimo_mpdus_failed_usr);
+ len += scnprintf(buf + len, buf_len - len, "mu_mimo_mpdus_requeued_usr = %u\n",
+ htt_stats_buf->mu_mimo_mpdus_requeued_usr);
+ len += scnprintf(buf + len, buf_len - len, "mu_mimo_err_no_ba_usr = %u\n",
+ htt_stats_buf->mu_mimo_err_no_ba_usr);
+ len += scnprintf(buf + len, buf_len - len, "mu_mimo_mpdu_underrun_usr = %u\n",
+ htt_stats_buf->mu_mimo_mpdu_underrun_usr);
+ len += scnprintf(buf + len, buf_len - len, "mu_mimo_ampdu_underrun_usr = %u\n\n",
+ htt_stats_buf->mu_mimo_ampdu_underrun_usr);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -1021,11 +947,13 @@ htt_print_tx_hwq_mu_mimo_cmn_stats_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_HWQ_MU_MIMO_CMN_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
- htt_stats_buf->mac_id__hwq_id__word & 0xFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "hwq_id = %u\n",
- (htt_stats_buf->mac_id__hwq_id__word & 0xFF00) >> 8);
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_HWQ_MU_MIMO_CMN_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_TX_HWQ_STATS_MAC_ID,
+ htt_stats_buf->mac_id__hwq_id__word));
+ len += scnprintf(buf + len, buf_len - len, "hwq_id = %lu\n\n",
+ FIELD_GET(HTT_TX_HWQ_STATS_HWQ_ID,
+ htt_stats_buf->mac_id__hwq_id__word));
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -1044,51 +972,53 @@ htt_print_tx_hwq_stats_cmn_tlv(const void *tag_buf, struct debug_htt_stats_req *
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
/* TODO: HKDBG */
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_HWQ_STATS_CMN_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
- htt_stats_buf->mac_id__hwq_id__word & 0xFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "hwq_id = %u",
- (htt_stats_buf->mac_id__hwq_id__word & 0xFF00) >> 8);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "xretry = %u",
- htt_stats_buf->xretry);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "underrun_cnt = %u",
- htt_stats_buf->underrun_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "flush_cnt = %u",
- htt_stats_buf->flush_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "filt_cnt = %u",
- htt_stats_buf->filt_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "null_mpdu_bmap = %u",
- htt_stats_buf->null_mpdu_bmap);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "user_ack_failure = %u",
- htt_stats_buf->user_ack_failure);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ack_tlv_proc = %u",
- htt_stats_buf->ack_tlv_proc);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_id_proc = %u",
- htt_stats_buf->sched_id_proc);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "null_mpdu_tx_count = %u",
- htt_stats_buf->null_mpdu_tx_count);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_bmap_not_recvd = %u",
- htt_stats_buf->mpdu_bmap_not_recvd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_bar = %u",
- htt_stats_buf->num_bar);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rts = %u",
- htt_stats_buf->rts);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "cts2self = %u",
- htt_stats_buf->cts2self);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "qos_null = %u",
- htt_stats_buf->qos_null);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_tried_cnt = %u",
- htt_stats_buf->mpdu_tried_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_queued_cnt = %u",
- htt_stats_buf->mpdu_queued_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_ack_fail_cnt = %u",
- htt_stats_buf->mpdu_ack_fail_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_filt_cnt = %u",
- htt_stats_buf->mpdu_filt_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "false_mpdu_ack_count = %u",
- htt_stats_buf->false_mpdu_ack_count);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "txq_timeout = %u\n",
- htt_stats_buf->txq_timeout);
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_HWQ_STATS_CMN_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_TX_HWQ_STATS_MAC_ID,
+ htt_stats_buf->mac_id__hwq_id__word));
+ len += scnprintf(buf + len, buf_len - len, "hwq_id = %lu\n",
+ FIELD_GET(HTT_TX_HWQ_STATS_HWQ_ID,
+ htt_stats_buf->mac_id__hwq_id__word));
+ len += scnprintf(buf + len, buf_len - len, "xretry = %u\n",
+ htt_stats_buf->xretry);
+ len += scnprintf(buf + len, buf_len - len, "underrun_cnt = %u\n",
+ htt_stats_buf->underrun_cnt);
+ len += scnprintf(buf + len, buf_len - len, "flush_cnt = %u\n",
+ htt_stats_buf->flush_cnt);
+ len += scnprintf(buf + len, buf_len - len, "filt_cnt = %u\n",
+ htt_stats_buf->filt_cnt);
+ len += scnprintf(buf + len, buf_len - len, "null_mpdu_bmap = %u\n",
+ htt_stats_buf->null_mpdu_bmap);
+ len += scnprintf(buf + len, buf_len - len, "user_ack_failure = %u\n",
+ htt_stats_buf->user_ack_failure);
+ len += scnprintf(buf + len, buf_len - len, "ack_tlv_proc = %u\n",
+ htt_stats_buf->ack_tlv_proc);
+ len += scnprintf(buf + len, buf_len - len, "sched_id_proc = %u\n",
+ htt_stats_buf->sched_id_proc);
+ len += scnprintf(buf + len, buf_len - len, "null_mpdu_tx_count = %u\n",
+ htt_stats_buf->null_mpdu_tx_count);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_bmap_not_recvd = %u\n",
+ htt_stats_buf->mpdu_bmap_not_recvd);
+ len += scnprintf(buf + len, buf_len - len, "num_bar = %u\n",
+ htt_stats_buf->num_bar);
+ len += scnprintf(buf + len, buf_len - len, "rts = %u\n",
+ htt_stats_buf->rts);
+ len += scnprintf(buf + len, buf_len - len, "cts2self = %u\n",
+ htt_stats_buf->cts2self);
+ len += scnprintf(buf + len, buf_len - len, "qos_null = %u\n",
+ htt_stats_buf->qos_null);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_tried_cnt = %u\n",
+ htt_stats_buf->mpdu_tried_cnt);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_queued_cnt = %u\n",
+ htt_stats_buf->mpdu_queued_cnt);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_ack_fail_cnt = %u\n",
+ htt_stats_buf->mpdu_ack_fail_cnt);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_filt_cnt = %u\n",
+ htt_stats_buf->mpdu_filt_cnt);
+ len += scnprintf(buf + len, buf_len - len, "false_mpdu_ack_count = %u\n",
+ htt_stats_buf->false_mpdu_ack_count);
+ len += scnprintf(buf + len, buf_len - len, "txq_timeout = %u\n\n",
+ htt_stats_buf->txq_timeout);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -1108,17 +1038,14 @@ htt_print_tx_hwq_difs_latency_stats_tlv_v(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
u16 data_len = min_t(u16, (tag_len >> 2), HTT_TX_HWQ_MAX_DIFS_LATENCY_BINS);
- char difs_latency_hist[HTT_MAX_STRING_LEN] = {0};
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_TX_HWQ_DIFS_LATENCY_STATS_TLV_V:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "hist_intvl = %u",
- htt_stats_buf->hist_intvl);
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_HWQ_DIFS_LATENCY_STATS_TLV_V:\n");
+ len += scnprintf(buf + len, buf_len - len, "hist_intvl = %u\n",
+ htt_stats_buf->hist_intvl);
- ARRAY_TO_STRING(difs_latency_hist, htt_stats_buf->difs_latency_hist,
- data_len);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "difs_latency_hist = %s\n",
- difs_latency_hist);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->difs_latency_hist,
+ "difs_latency_hist", data_len, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -1138,16 +1065,14 @@ htt_print_tx_hwq_cmd_result_stats_tlv_v(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
u16 data_len;
- char cmd_result[HTT_MAX_STRING_LEN] = {0};
data_len = min_t(u16, (tag_len >> 2), HTT_TX_HWQ_MAX_CMD_RESULT_STATS);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_TX_HWQ_CMD_RESULT_STATS_TLV_V:");
-
- ARRAY_TO_STRING(cmd_result, htt_stats_buf->cmd_result, data_len);
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_HWQ_CMD_RESULT_STATS_TLV_V:\n");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "cmd_result = %s\n", cmd_result);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->cmd_result, "cmd_result",
+ data_len, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -1167,15 +1092,13 @@ htt_print_tx_hwq_cmd_stall_stats_tlv_v(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
u16 num_elems;
- char cmd_stall_status[HTT_MAX_STRING_LEN] = {0};
num_elems = min_t(u16, (tag_len >> 2), HTT_TX_HWQ_MAX_CMD_STALL_STATS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_HWQ_CMD_STALL_STATS_TLV_V:");
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_HWQ_CMD_STALL_STATS_TLV_V:\n");
- ARRAY_TO_STRING(cmd_stall_status, htt_stats_buf->cmd_stall_status, num_elems);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "cmd_stall_status = %s\n",
- cmd_stall_status);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->cmd_stall_status,
+ "cmd_stall_status", num_elems, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -1195,15 +1118,14 @@ htt_print_tx_hwq_fes_result_stats_tlv_v(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
u16 num_elems;
- char fes_result[HTT_MAX_STRING_LEN] = {0};
num_elems = min_t(u16, (tag_len >> 2), HTT_TX_HWQ_MAX_FES_RESULT_STATS);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_TX_HWQ_FES_RESULT_STATS_TLV_V:");
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_HWQ_FES_RESULT_STATS_TLV_V:\n");
- ARRAY_TO_STRING(fes_result, htt_stats_buf->fes_result, num_elems);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fes_result = %s\n", fes_result);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->fes_result, "fes_result",
+ num_elems, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -1222,27 +1144,16 @@ htt_print_tx_hwq_tried_mpdu_cnt_hist_tlv_v(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char tried_mpdu_cnt_hist[HTT_MAX_STRING_LEN] = {0};
u32 num_elements = ((tag_len -
sizeof(htt_stats_buf->hist_bin_size)) >> 2);
- u32 required_buffer_size = HTT_MAX_PRINT_CHAR_PER_ELEM * num_elements;
-
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_TX_HWQ_TRIED_MPDU_CNT_HIST_TLV_V:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "TRIED_MPDU_CNT_HIST_BIN_SIZE : %u",
- htt_stats_buf->hist_bin_size);
-
- if (required_buffer_size < HTT_MAX_STRING_LEN) {
- ARRAY_TO_STRING(tried_mpdu_cnt_hist,
- htt_stats_buf->tried_mpdu_cnt_hist,
- num_elements);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "tried_mpdu_cnt_hist = %s\n",
- tried_mpdu_cnt_hist);
- } else {
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "INSUFFICIENT PRINT BUFFER ");
- }
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_HWQ_TRIED_MPDU_CNT_HIST_TLV_V:\n");
+ len += scnprintf(buf + len, buf_len - len, "TRIED_MPDU_CNT_HIST_BIN_SIZE : %u\n",
+ htt_stats_buf->hist_bin_size);
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tried_mpdu_cnt_hist,
+ "tried_mpdu_cnt_hist", num_elements, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -1261,23 +1172,14 @@ htt_print_tx_hwq_txop_used_cnt_hist_tlv_v(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char txop_used_cnt_hist[HTT_MAX_STRING_LEN] = {0};
u32 num_elements = tag_len >> 2;
- u32 required_buffer_size = HTT_MAX_PRINT_CHAR_PER_ELEM * num_elements;
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_TX_HWQ_TXOP_USED_CNT_HIST_TLV_V:");
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_HWQ_TXOP_USED_CNT_HIST_TLV_V:\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->txop_used_cnt_hist,
+ "txop_used_cnt_hist", num_elements, "\n\n");
- if (required_buffer_size < HTT_MAX_STRING_LEN) {
- ARRAY_TO_STRING(txop_used_cnt_hist,
- htt_stats_buf->txop_used_cnt_hist,
- num_elements);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "txop_used_cnt_hist = %s\n",
- txop_used_cnt_hist);
- } else {
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "INSUFFICIENT PRINT BUFFER ");
- }
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
@@ -1300,86 +1202,86 @@ static inline void htt_print_tx_sounding_stats_tlv(const void *tag_buf,
const u32 *cbf_160 = htt_stats_buf->cbf_160;
if (htt_stats_buf->tx_sounding_mode == HTT_TX_AC_SOUNDING_MODE) {
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "\nHTT_TX_AC_SOUNDING_STATS_TLV:\n");
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ac_cbf_20 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u ",
- cbf_20[HTT_IMPLICIT_TXBF_STEER_STATS],
- cbf_20[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
- cbf_20[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
- cbf_20[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
- cbf_20[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ac_cbf_40 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u",
- cbf_40[HTT_IMPLICIT_TXBF_STEER_STATS],
- cbf_40[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
- cbf_40[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
- cbf_40[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
- cbf_40[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ac_cbf_80 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u",
- cbf_80[HTT_IMPLICIT_TXBF_STEER_STATS],
- cbf_80[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
- cbf_80[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
- cbf_80[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
- cbf_80[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ac_cbf_160 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u",
- cbf_160[HTT_IMPLICIT_TXBF_STEER_STATS],
- cbf_160[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
- cbf_160[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
- cbf_160[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
- cbf_160[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
+ len += scnprintf(buf + len, buf_len - len,
+ "\nHTT_TX_AC_SOUNDING_STATS_TLV:\n\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_cbf_20 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u\n",
+ cbf_20[HTT_IMPLICIT_TXBF_STEER_STATS],
+ cbf_20[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
+ cbf_20[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
+ cbf_20[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
+ cbf_20[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_cbf_40 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u\n",
+ cbf_40[HTT_IMPLICIT_TXBF_STEER_STATS],
+ cbf_40[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
+ cbf_40[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
+ cbf_40[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
+ cbf_40[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_cbf_80 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u\n",
+ cbf_80[HTT_IMPLICIT_TXBF_STEER_STATS],
+ cbf_80[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
+ cbf_80[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
+ cbf_80[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
+ cbf_80[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_cbf_160 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u\n",
+ cbf_160[HTT_IMPLICIT_TXBF_STEER_STATS],
+ cbf_160[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
+ cbf_160[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
+ cbf_160[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
+ cbf_160[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
for (i = 0; i < HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS; i++) {
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "Sounding User %u = 20MHz: %u, 40MHz : %u, 80MHz: %u, 160MHz: %u ",
- i,
- htt_stats_buf->sounding[0],
- htt_stats_buf->sounding[1],
- htt_stats_buf->sounding[2],
- htt_stats_buf->sounding[3]);
+ len += scnprintf(buf + len, buf_len - len,
+ "Sounding User %u = 20MHz: %u, 40MHz : %u, 80MHz: %u, 160MHz: %u\n",
+ i,
+ htt_stats_buf->sounding[0],
+ htt_stats_buf->sounding[1],
+ htt_stats_buf->sounding[2],
+ htt_stats_buf->sounding[3]);
}
} else if (htt_stats_buf->tx_sounding_mode == HTT_TX_AX_SOUNDING_MODE) {
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "\nHTT_TX_AX_SOUNDING_STATS_TLV:\n");
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ax_cbf_20 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u ",
- cbf_20[HTT_IMPLICIT_TXBF_STEER_STATS],
- cbf_20[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
- cbf_20[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
- cbf_20[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
- cbf_20[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ax_cbf_40 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u",
- cbf_40[HTT_IMPLICIT_TXBF_STEER_STATS],
- cbf_40[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
- cbf_40[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
- cbf_40[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
- cbf_40[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ax_cbf_80 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u",
- cbf_80[HTT_IMPLICIT_TXBF_STEER_STATS],
- cbf_80[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
- cbf_80[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
- cbf_80[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
- cbf_80[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ax_cbf_160 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u",
- cbf_160[HTT_IMPLICIT_TXBF_STEER_STATS],
- cbf_160[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
- cbf_160[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
- cbf_160[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
- cbf_160[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
+ len += scnprintf(buf + len, buf_len - len,
+ "\nHTT_TX_AX_SOUNDING_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_cbf_20 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u\n",
+ cbf_20[HTT_IMPLICIT_TXBF_STEER_STATS],
+ cbf_20[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
+ cbf_20[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
+ cbf_20[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
+ cbf_20[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_cbf_40 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u\n",
+ cbf_40[HTT_IMPLICIT_TXBF_STEER_STATS],
+ cbf_40[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
+ cbf_40[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
+ cbf_40[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
+ cbf_40[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_cbf_80 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u\n",
+ cbf_80[HTT_IMPLICIT_TXBF_STEER_STATS],
+ cbf_80[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
+ cbf_80[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
+ cbf_80[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
+ cbf_80[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_cbf_160 = IBF : %u, SU_SIFS : %u, SU_RBO : %u, MU_SIFS : %u, MU_RBO : %u\n",
+ cbf_160[HTT_IMPLICIT_TXBF_STEER_STATS],
+ cbf_160[HTT_EXPLICIT_TXBF_SU_SIFS_STEER_STATS],
+ cbf_160[HTT_EXPLICIT_TXBF_SU_RBO_STEER_STATS],
+ cbf_160[HTT_EXPLICIT_TXBF_MU_SIFS_STEER_STATS],
+ cbf_160[HTT_EXPLICIT_TXBF_MU_RBO_STEER_STATS]);
for (i = 0; i < HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS; i++) {
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "Sounding User %u = 20MHz: %u, 40MHz : %u, 80MHz: %u, 160MHz: %u ",
- i,
- htt_stats_buf->sounding[0],
- htt_stats_buf->sounding[1],
- htt_stats_buf->sounding[2],
- htt_stats_buf->sounding[3]);
+ len += scnprintf(buf + len, buf_len - len,
+ "Sounding User %u = 20MHz: %u, 40MHz : %u, 80MHz: %u, 160MHz: %u\n",
+ i,
+ htt_stats_buf->sounding[0],
+ htt_stats_buf->sounding[1],
+ htt_stats_buf->sounding[2],
+ htt_stats_buf->sounding[3]);
}
}
@@ -1400,31 +1302,31 @@ htt_print_tx_selfgen_cmn_stats_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_SELFGEN_CMN_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
- htt_stats_buf->mac_id__word & 0xFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "su_bar = %u",
- htt_stats_buf->su_bar);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rts = %u",
- htt_stats_buf->rts);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "cts2self = %u",
- htt_stats_buf->cts2self);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "qos_null = %u",
- htt_stats_buf->qos_null);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "delayed_bar_1 = %u",
- htt_stats_buf->delayed_bar_1);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "delayed_bar_2 = %u",
- htt_stats_buf->delayed_bar_2);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "delayed_bar_3 = %u",
- htt_stats_buf->delayed_bar_3);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "delayed_bar_4 = %u",
- htt_stats_buf->delayed_bar_4);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "delayed_bar_5 = %u",
- htt_stats_buf->delayed_bar_5);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "delayed_bar_6 = %u",
- htt_stats_buf->delayed_bar_6);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "delayed_bar_7 = %u\n",
- htt_stats_buf->delayed_bar_7);
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_SELFGEN_CMN_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word));
+ len += scnprintf(buf + len, buf_len - len, "su_bar = %u\n",
+ htt_stats_buf->su_bar);
+ len += scnprintf(buf + len, buf_len - len, "rts = %u\n",
+ htt_stats_buf->rts);
+ len += scnprintf(buf + len, buf_len - len, "cts2self = %u\n",
+ htt_stats_buf->cts2self);
+ len += scnprintf(buf + len, buf_len - len, "qos_null = %u\n",
+ htt_stats_buf->qos_null);
+ len += scnprintf(buf + len, buf_len - len, "delayed_bar_1 = %u\n",
+ htt_stats_buf->delayed_bar_1);
+ len += scnprintf(buf + len, buf_len - len, "delayed_bar_2 = %u\n",
+ htt_stats_buf->delayed_bar_2);
+ len += scnprintf(buf + len, buf_len - len, "delayed_bar_3 = %u\n",
+ htt_stats_buf->delayed_bar_3);
+ len += scnprintf(buf + len, buf_len - len, "delayed_bar_4 = %u\n",
+ htt_stats_buf->delayed_bar_4);
+ len += scnprintf(buf + len, buf_len - len, "delayed_bar_5 = %u\n",
+ htt_stats_buf->delayed_bar_5);
+ len += scnprintf(buf + len, buf_len - len, "delayed_bar_6 = %u\n",
+ htt_stats_buf->delayed_bar_6);
+ len += scnprintf(buf + len, buf_len - len, "delayed_bar_7 = %u\n\n",
+ htt_stats_buf->delayed_bar_7);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -1443,21 +1345,21 @@ htt_print_tx_selfgen_ac_stats_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_SELFGEN_AC_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_su_ndpa = %u",
- htt_stats_buf->ac_su_ndpa);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_su_ndp = %u",
- htt_stats_buf->ac_su_ndp);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_ndpa = %u",
- htt_stats_buf->ac_mu_mimo_ndpa);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_ndp = %u",
- htt_stats_buf->ac_mu_mimo_ndp);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_brpoll_1 = %u",
- htt_stats_buf->ac_mu_mimo_brpoll_1);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_brpoll_2 = %u",
- htt_stats_buf->ac_mu_mimo_brpoll_2);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_brpoll_3 = %u\n",
- htt_stats_buf->ac_mu_mimo_brpoll_3);
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_SELFGEN_AC_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "ac_su_ndpa = %u\n",
+ htt_stats_buf->ac_su_ndpa);
+ len += scnprintf(buf + len, buf_len - len, "ac_su_ndp = %u\n",
+ htt_stats_buf->ac_su_ndp);
+ len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_ndpa = %u\n",
+ htt_stats_buf->ac_mu_mimo_ndpa);
+ len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_ndp = %u\n",
+ htt_stats_buf->ac_mu_mimo_ndp);
+ len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_brpoll_1 = %u\n",
+ htt_stats_buf->ac_mu_mimo_brpoll_1);
+ len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_brpoll_2 = %u\n",
+ htt_stats_buf->ac_mu_mimo_brpoll_2);
+ len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_brpoll_3 = %u\n\n",
+ htt_stats_buf->ac_mu_mimo_brpoll_3);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -1476,37 +1378,37 @@ htt_print_tx_selfgen_ax_stats_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_SELFGEN_AX_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_su_ndpa = %u",
- htt_stats_buf->ax_su_ndpa);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_su_ndp = %u",
- htt_stats_buf->ax_su_ndp);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_ndpa = %u",
- htt_stats_buf->ax_mu_mimo_ndpa);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_ndp = %u",
- htt_stats_buf->ax_mu_mimo_ndp);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brpoll_1 = %u",
- htt_stats_buf->ax_mu_mimo_brpoll_1);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brpoll_2 = %u",
- htt_stats_buf->ax_mu_mimo_brpoll_2);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brpoll_3 = %u",
- htt_stats_buf->ax_mu_mimo_brpoll_3);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brpoll_4 = %u",
- htt_stats_buf->ax_mu_mimo_brpoll_4);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brpoll_5 = %u",
- htt_stats_buf->ax_mu_mimo_brpoll_5);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brpoll_6 = %u",
- htt_stats_buf->ax_mu_mimo_brpoll_6);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brpoll_7 = %u",
- htt_stats_buf->ax_mu_mimo_brpoll_7);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_basic_trigger = %u",
- htt_stats_buf->ax_basic_trigger);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_bsr_trigger = %u",
- htt_stats_buf->ax_bsr_trigger);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_bar_trigger = %u",
- htt_stats_buf->ax_mu_bar_trigger);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_rts_trigger = %u\n",
- htt_stats_buf->ax_mu_rts_trigger);
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_SELFGEN_AX_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "ax_su_ndpa = %u\n",
+ htt_stats_buf->ax_su_ndpa);
+ len += scnprintf(buf + len, buf_len - len, "ax_su_ndp = %u\n",
+ htt_stats_buf->ax_su_ndp);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_ndpa = %u\n",
+ htt_stats_buf->ax_mu_mimo_ndpa);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_ndp = %u\n",
+ htt_stats_buf->ax_mu_mimo_ndp);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brpoll_1 = %u\n",
+ htt_stats_buf->ax_mu_mimo_brpoll_1);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brpoll_2 = %u\n",
+ htt_stats_buf->ax_mu_mimo_brpoll_2);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brpoll_3 = %u\n",
+ htt_stats_buf->ax_mu_mimo_brpoll_3);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brpoll_4 = %u\n",
+ htt_stats_buf->ax_mu_mimo_brpoll_4);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brpoll_5 = %u\n",
+ htt_stats_buf->ax_mu_mimo_brpoll_5);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brpoll_6 = %u\n",
+ htt_stats_buf->ax_mu_mimo_brpoll_6);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brpoll_7 = %u\n",
+ htt_stats_buf->ax_mu_mimo_brpoll_7);
+ len += scnprintf(buf + len, buf_len - len, "ax_basic_trigger = %u\n",
+ htt_stats_buf->ax_basic_trigger);
+ len += scnprintf(buf + len, buf_len - len, "ax_bsr_trigger = %u\n",
+ htt_stats_buf->ax_bsr_trigger);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_bar_trigger = %u\n",
+ htt_stats_buf->ax_mu_bar_trigger);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_rts_trigger = %u\n\n",
+ htt_stats_buf->ax_mu_rts_trigger);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -1525,21 +1427,21 @@ htt_print_tx_selfgen_ac_err_stats_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_SELFGEN_AC_ERR_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_su_ndp_err = %u",
- htt_stats_buf->ac_su_ndp_err);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_su_ndpa_err = %u",
- htt_stats_buf->ac_su_ndpa_err);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_ndpa_err = %u",
- htt_stats_buf->ac_mu_mimo_ndpa_err);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_ndp_err = %u",
- htt_stats_buf->ac_mu_mimo_ndp_err);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_brp1_err = %u",
- htt_stats_buf->ac_mu_mimo_brp1_err);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_brp2_err = %u",
- htt_stats_buf->ac_mu_mimo_brp2_err);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_brp3_err = %u\n",
- htt_stats_buf->ac_mu_mimo_brp3_err);
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_SELFGEN_AC_ERR_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "ac_su_ndp_err = %u\n",
+ htt_stats_buf->ac_su_ndp_err);
+ len += scnprintf(buf + len, buf_len - len, "ac_su_ndpa_err = %u\n",
+ htt_stats_buf->ac_su_ndpa_err);
+ len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_ndpa_err = %u\n",
+ htt_stats_buf->ac_mu_mimo_ndpa_err);
+ len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_ndp_err = %u\n",
+ htt_stats_buf->ac_mu_mimo_ndp_err);
+ len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_brp1_err = %u\n",
+ htt_stats_buf->ac_mu_mimo_brp1_err);
+ len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_brp2_err = %u\n",
+ htt_stats_buf->ac_mu_mimo_brp2_err);
+ len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_brp3_err = %u\n\n",
+ htt_stats_buf->ac_mu_mimo_brp3_err);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -1558,37 +1460,37 @@ htt_print_tx_selfgen_ax_err_stats_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_SELFGEN_AX_ERR_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_su_ndp_err = %u",
- htt_stats_buf->ax_su_ndp_err);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_su_ndpa_err = %u",
- htt_stats_buf->ax_su_ndpa_err);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_ndpa_err = %u",
- htt_stats_buf->ax_mu_mimo_ndpa_err);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_ndp_err = %u",
- htt_stats_buf->ax_mu_mimo_ndp_err);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brp1_err = %u",
- htt_stats_buf->ax_mu_mimo_brp1_err);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brp2_err = %u",
- htt_stats_buf->ax_mu_mimo_brp2_err);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brp3_err = %u",
- htt_stats_buf->ax_mu_mimo_brp3_err);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brp4_err = %u",
- htt_stats_buf->ax_mu_mimo_brp4_err);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brp5_err = %u",
- htt_stats_buf->ax_mu_mimo_brp5_err);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brp6_err = %u",
- htt_stats_buf->ax_mu_mimo_brp6_err);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_brp7_err = %u",
- htt_stats_buf->ax_mu_mimo_brp7_err);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_basic_trigger_err = %u",
- htt_stats_buf->ax_basic_trigger_err);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_bsr_trigger_err = %u",
- htt_stats_buf->ax_bsr_trigger_err);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_bar_trigger_err = %u",
- htt_stats_buf->ax_mu_bar_trigger_err);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_rts_trigger_err = %u\n",
- htt_stats_buf->ax_mu_rts_trigger_err);
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_SELFGEN_AX_ERR_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "ax_su_ndp_err = %u\n",
+ htt_stats_buf->ax_su_ndp_err);
+ len += scnprintf(buf + len, buf_len - len, "ax_su_ndpa_err = %u\n",
+ htt_stats_buf->ax_su_ndpa_err);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_ndpa_err = %u\n",
+ htt_stats_buf->ax_mu_mimo_ndpa_err);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_ndp_err = %u\n",
+ htt_stats_buf->ax_mu_mimo_ndp_err);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brp1_err = %u\n",
+ htt_stats_buf->ax_mu_mimo_brp1_err);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brp2_err = %u\n",
+ htt_stats_buf->ax_mu_mimo_brp2_err);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brp3_err = %u\n",
+ htt_stats_buf->ax_mu_mimo_brp3_err);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brp4_err = %u\n",
+ htt_stats_buf->ax_mu_mimo_brp4_err);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brp5_err = %u\n",
+ htt_stats_buf->ax_mu_mimo_brp5_err);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brp6_err = %u\n",
+ htt_stats_buf->ax_mu_mimo_brp6_err);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_brp7_err = %u\n",
+ htt_stats_buf->ax_mu_mimo_brp7_err);
+ len += scnprintf(buf + len, buf_len - len, "ax_basic_trigger_err = %u\n",
+ htt_stats_buf->ax_basic_trigger_err);
+ len += scnprintf(buf + len, buf_len - len, "ax_bsr_trigger_err = %u\n",
+ htt_stats_buf->ax_bsr_trigger_err);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_bar_trigger_err = %u\n",
+ htt_stats_buf->ax_mu_bar_trigger_err);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_rts_trigger_err = %u\n\n",
+ htt_stats_buf->ax_mu_rts_trigger_err);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -1608,35 +1510,35 @@ htt_print_tx_pdev_mu_mimo_sch_stats_tlv(const void *tag_buf,
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
u8 i;
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_TX_PDEV_MU_MIMO_SCH_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_sch_posted = %u",
- htt_stats_buf->mu_mimo_sch_posted);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_sch_failed = %u",
- htt_stats_buf->mu_mimo_sch_failed);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mu_mimo_ppdu_posted = %u\n",
- htt_stats_buf->mu_mimo_ppdu_posted);
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_MU_MIMO_SCH_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mu_mimo_sch_posted = %u\n",
+ htt_stats_buf->mu_mimo_sch_posted);
+ len += scnprintf(buf + len, buf_len - len, "mu_mimo_sch_failed = %u\n",
+ htt_stats_buf->mu_mimo_sch_failed);
+ len += scnprintf(buf + len, buf_len - len, "mu_mimo_ppdu_posted = %u\n\n",
+ htt_stats_buf->mu_mimo_ppdu_posted);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "11ac MU_MIMO SCH STATS:");
+ len += scnprintf(buf + len, buf_len - len, "11ac MU_MIMO SCH STATS:\n");
for (i = 0; i < HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS; i++)
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ac_mu_mimo_sch_nusers_%u = %u",
- i, htt_stats_buf->ac_mu_mimo_sch_nusers[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_mu_mimo_sch_nusers_%u = %u\n",
+ i, htt_stats_buf->ac_mu_mimo_sch_nusers[i]);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "\n11ax MU_MIMO SCH STATS:");
+ len += scnprintf(buf + len, buf_len - len, "\n11ax MU_MIMO SCH STATS:\n");
for (i = 0; i < HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS; i++)
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ax_mu_mimo_sch_nusers_%u = %u",
- i, htt_stats_buf->ax_mu_mimo_sch_nusers[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_mimo_sch_nusers_%u = %u\n",
+ i, htt_stats_buf->ax_mu_mimo_sch_nusers[i]);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "\n11ax OFDMA SCH STATS:");
+ len += scnprintf(buf + len, buf_len - len, "\n11ax OFDMA SCH STATS:\n");
for (i = 0; i < HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS; i++)
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ax_ofdma_sch_nusers_%u = %u",
- i, htt_stats_buf->ax_ofdma_sch_nusers[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_sch_nusers_%u = %u\n",
+ i, htt_stats_buf->ax_ofdma_sch_nusers[i]);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -1657,114 +1559,114 @@ htt_print_tx_pdev_mu_mimo_mpdu_stats_tlv(const void *tag_buf,
if (htt_stats_buf->tx_sched_mode == HTT_STATS_TX_SCHED_MODE_MU_MIMO_AC) {
if (!htt_stats_buf->user_index)
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_TX_PDEV_MU_MIMO_AC_MPDU_STATS:\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_MU_MIMO_AC_MPDU_STATS:\n");
if (htt_stats_buf->user_index <
HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS) {
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ac_mu_mimo_mpdus_queued_usr_%u = %u",
- htt_stats_buf->user_index,
- htt_stats_buf->mpdus_queued_usr);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ac_mu_mimo_mpdus_tried_usr_%u = %u",
- htt_stats_buf->user_index,
- htt_stats_buf->mpdus_tried_usr);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ac_mu_mimo_mpdus_failed_usr_%u = %u",
- htt_stats_buf->user_index,
- htt_stats_buf->mpdus_failed_usr);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ac_mu_mimo_mpdus_requeued_usr_%u = %u",
- htt_stats_buf->user_index,
- htt_stats_buf->mpdus_requeued_usr);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ac_mu_mimo_err_no_ba_usr_%u = %u",
- htt_stats_buf->user_index,
- htt_stats_buf->err_no_ba_usr);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ac_mu_mimo_mpdu_underrun_usr_%u = %u",
- htt_stats_buf->user_index,
- htt_stats_buf->mpdu_underrun_usr);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ac_mu_mimo_ampdu_underrun_usr_%u = %u\n",
- htt_stats_buf->user_index,
- htt_stats_buf->ampdu_underrun_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_mu_mimo_mpdus_queued_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_queued_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_mu_mimo_mpdus_tried_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_tried_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_mu_mimo_mpdus_failed_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_failed_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_mu_mimo_mpdus_requeued_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_requeued_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_mu_mimo_err_no_ba_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->err_no_ba_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_mu_mimo_mpdu_underrun_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdu_underrun_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ac_mu_mimo_ampdu_underrun_usr_%u = %u\n\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->ampdu_underrun_usr);
}
}
if (htt_stats_buf->tx_sched_mode == HTT_STATS_TX_SCHED_MODE_MU_MIMO_AX) {
if (!htt_stats_buf->user_index)
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_TX_PDEV_MU_MIMO_AX_MPDU_STATS:\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_MU_MIMO_AX_MPDU_STATS:\n");
if (htt_stats_buf->user_index <
HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS) {
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ax_mu_mimo_mpdus_queued_usr_%u = %u",
- htt_stats_buf->user_index,
- htt_stats_buf->mpdus_queued_usr);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ax_mu_mimo_mpdus_tried_usr_%u = %u",
- htt_stats_buf->user_index,
- htt_stats_buf->mpdus_tried_usr);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ax_mu_mimo_mpdus_failed_usr_%u = %u",
- htt_stats_buf->user_index,
- htt_stats_buf->mpdus_failed_usr);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ax_mu_mimo_mpdus_requeued_usr_%u = %u",
- htt_stats_buf->user_index,
- htt_stats_buf->mpdus_requeued_usr);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ax_mu_mimo_err_no_ba_usr_%u = %u",
- htt_stats_buf->user_index,
- htt_stats_buf->err_no_ba_usr);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ax_mu_mimo_mpdu_underrun_usr_%u = %u",
- htt_stats_buf->user_index,
- htt_stats_buf->mpdu_underrun_usr);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ax_mu_mimo_ampdu_underrun_usr_%u = %u\n",
- htt_stats_buf->user_index,
- htt_stats_buf->ampdu_underrun_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_mimo_mpdus_queued_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_queued_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_mimo_mpdus_tried_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_tried_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_mimo_mpdus_failed_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_failed_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_mimo_mpdus_requeued_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_requeued_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_mimo_err_no_ba_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->err_no_ba_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_mimo_mpdu_underrun_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdu_underrun_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_mimo_ampdu_underrun_usr_%u = %u\n\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->ampdu_underrun_usr);
}
}
if (htt_stats_buf->tx_sched_mode == HTT_STATS_TX_SCHED_MODE_MU_OFDMA_AX) {
if (!htt_stats_buf->user_index)
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_TX_PDEV_AX_MU_OFDMA_MPDU_STATS:\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_AX_MU_OFDMA_MPDU_STATS:\n");
if (htt_stats_buf->user_index < HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS) {
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ax_mu_ofdma_mpdus_queued_usr_%u = %u",
- htt_stats_buf->user_index,
- htt_stats_buf->mpdus_queued_usr);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ax_mu_ofdma_mpdus_tried_usr_%u = %u",
- htt_stats_buf->user_index,
- htt_stats_buf->mpdus_tried_usr);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ax_mu_ofdma_mpdus_failed_usr_%u = %u",
- htt_stats_buf->user_index,
- htt_stats_buf->mpdus_failed_usr);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ax_mu_ofdma_mpdus_requeued_usr_%u = %u",
- htt_stats_buf->user_index,
- htt_stats_buf->mpdus_requeued_usr);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ax_mu_ofdma_err_no_ba_usr_%u = %u",
- htt_stats_buf->user_index,
- htt_stats_buf->err_no_ba_usr);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ax_mu_ofdma_mpdu_underrun_usr_%u = %u",
- htt_stats_buf->user_index,
- htt_stats_buf->mpdu_underrun_usr);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ax_mu_ofdma_ampdu_underrun_usr_%u = %u\n",
- htt_stats_buf->user_index,
- htt_stats_buf->ampdu_underrun_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_ofdma_mpdus_queued_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_queued_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_ofdma_mpdus_tried_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_tried_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_ofdma_mpdus_failed_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_failed_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_ofdma_mpdus_requeued_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdus_requeued_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_ofdma_err_no_ba_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->err_no_ba_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_ofdma_mpdu_underrun_usr_%u = %u\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->mpdu_underrun_usr);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_mu_ofdma_ampdu_underrun_usr_%u = %u\n\n",
+ htt_stats_buf->user_index,
+ htt_stats_buf->ampdu_underrun_usr);
}
}
@@ -1785,15 +1687,12 @@ htt_print_sched_txq_cmd_posted_tlv_v(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char sched_cmd_posted[HTT_MAX_STRING_LEN] = {0};
u16 num_elements = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_SCHED_TX_MODE_MAX);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_SCHED_TXQ_CMD_POSTED_TLV_V:");
+ len += scnprintf(buf + len, buf_len - len, "HTT_SCHED_TXQ_CMD_POSTED_TLV_V:\n");
- ARRAY_TO_STRING(sched_cmd_posted, htt_stats_buf->sched_cmd_posted,
- num_elements);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_cmd_posted = %s\n",
- sched_cmd_posted);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->sched_cmd_posted,
+ "sched_cmd_posted", num_elements, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -1812,15 +1711,12 @@ htt_print_sched_txq_cmd_reaped_tlv_v(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char sched_cmd_reaped[HTT_MAX_STRING_LEN] = {0};
u16 num_elements = min_t(u16, (tag_len >> 2), HTT_TX_PDEV_SCHED_TX_MODE_MAX);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_SCHED_TXQ_CMD_REAPED_TLV_V:");
+ len += scnprintf(buf + len, buf_len - len, "HTT_SCHED_TXQ_CMD_REAPED_TLV_V:\n");
- ARRAY_TO_STRING(sched_cmd_reaped, htt_stats_buf->sched_cmd_reaped,
- num_elements);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_cmd_reaped = %s\n",
- sched_cmd_reaped);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->sched_cmd_reaped,
+ "sched_cmd_reaped", num_elements, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -1839,18 +1735,15 @@ htt_print_sched_txq_sched_order_su_tlv_v(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char sched_order_su[HTT_MAX_STRING_LEN] = {0};
/* each entry is u32, i.e. 4 bytes */
u32 sched_order_su_num_entries =
min_t(u32, (tag_len >> 2), HTT_TX_PDEV_NUM_SCHED_ORDER_LOG);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_SCHED_TXQ_SCHED_ORDER_SU_TLV_V:");
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_SCHED_TXQ_SCHED_ORDER_SU_TLV_V:\n");
- ARRAY_TO_STRING(sched_order_su, htt_stats_buf->sched_order_su,
- sched_order_su_num_entries);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_order_su = %s\n",
- sched_order_su);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->sched_order_su, "sched_order_su",
+ sched_order_su_num_entries, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -1869,17 +1762,15 @@ htt_print_sched_txq_sched_ineligibility_tlv_v(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char sched_ineligibility[HTT_MAX_STRING_LEN] = {0};
/* each entry is u32, i.e. 4 bytes */
u32 sched_ineligibility_num_entries = tag_len >> 2;
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_SCHED_TXQ_SCHED_INELIGIBILITY_V:");
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_SCHED_TXQ_SCHED_INELIGIBILITY_V:\n");
- ARRAY_TO_STRING(sched_ineligibility, htt_stats_buf->sched_ineligibility,
- sched_ineligibility_num_entries);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_ineligibility = %s\n",
- sched_ineligibility);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->sched_ineligibility,
+ "sched_ineligibility", sched_ineligibility_num_entries,
+ "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -1898,54 +1789,56 @@ htt_print_tx_pdev_stats_sched_per_txq_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_TX_PDEV_STATS_SCHED_PER_TXQ_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
- htt_stats_buf->mac_id__txq_id__word & 0xFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "txq_id = %u",
- (htt_stats_buf->mac_id__txq_id__word & 0xFF00) >> 8);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_policy = %u",
- htt_stats_buf->sched_policy);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "last_sched_cmd_posted_timestamp = %u",
- htt_stats_buf->last_sched_cmd_posted_timestamp);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "last_sched_cmd_compl_timestamp = %u",
- htt_stats_buf->last_sched_cmd_compl_timestamp);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_2_tac_lwm_count = %u",
- htt_stats_buf->sched_2_tac_lwm_count);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_2_tac_ring_full = %u",
- htt_stats_buf->sched_2_tac_ring_full);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_cmd_post_failure = %u",
- htt_stats_buf->sched_cmd_post_failure);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_active_tids = %u",
- htt_stats_buf->num_active_tids);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_ps_schedules = %u",
- htt_stats_buf->num_ps_schedules);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_cmds_pending = %u",
- htt_stats_buf->sched_cmds_pending);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_tid_register = %u",
- htt_stats_buf->num_tid_register);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_tid_unregister = %u",
- htt_stats_buf->num_tid_unregister);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_qstats_queried = %u",
- htt_stats_buf->num_qstats_queried);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "qstats_update_pending = %u",
- htt_stats_buf->qstats_update_pending);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "last_qstats_query_timestamp = %u",
- htt_stats_buf->last_qstats_query_timestamp);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_tqm_cmdq_full = %u",
- htt_stats_buf->num_tqm_cmdq_full);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_de_sched_algo_trigger = %u",
- htt_stats_buf->num_de_sched_algo_trigger);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_rt_sched_algo_trigger = %u",
- htt_stats_buf->num_rt_sched_algo_trigger);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_tqm_sched_algo_trigger = %u",
- htt_stats_buf->num_tqm_sched_algo_trigger);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "notify_sched = %u\n",
- htt_stats_buf->notify_sched);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "dur_based_sendn_term = %u\n",
- htt_stats_buf->dur_based_sendn_term);
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_PDEV_STATS_SCHED_PER_TXQ_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_TX_PDEV_STATS_SCHED_PER_TXQ_MAC_ID,
+ htt_stats_buf->mac_id__txq_id__word));
+ len += scnprintf(buf + len, buf_len - len, "txq_id = %lu\n",
+ FIELD_GET(HTT_TX_PDEV_STATS_SCHED_PER_TXQ_ID,
+ htt_stats_buf->mac_id__txq_id__word));
+ len += scnprintf(buf + len, buf_len - len, "sched_policy = %u\n",
+ htt_stats_buf->sched_policy);
+ len += scnprintf(buf + len, buf_len - len,
+ "last_sched_cmd_posted_timestamp = %u\n",
+ htt_stats_buf->last_sched_cmd_posted_timestamp);
+ len += scnprintf(buf + len, buf_len - len,
+ "last_sched_cmd_compl_timestamp = %u\n",
+ htt_stats_buf->last_sched_cmd_compl_timestamp);
+ len += scnprintf(buf + len, buf_len - len, "sched_2_tac_lwm_count = %u\n",
+ htt_stats_buf->sched_2_tac_lwm_count);
+ len += scnprintf(buf + len, buf_len - len, "sched_2_tac_ring_full = %u\n",
+ htt_stats_buf->sched_2_tac_ring_full);
+ len += scnprintf(buf + len, buf_len - len, "sched_cmd_post_failure = %u\n",
+ htt_stats_buf->sched_cmd_post_failure);
+ len += scnprintf(buf + len, buf_len - len, "num_active_tids = %u\n",
+ htt_stats_buf->num_active_tids);
+ len += scnprintf(buf + len, buf_len - len, "num_ps_schedules = %u\n",
+ htt_stats_buf->num_ps_schedules);
+ len += scnprintf(buf + len, buf_len - len, "sched_cmds_pending = %u\n",
+ htt_stats_buf->sched_cmds_pending);
+ len += scnprintf(buf + len, buf_len - len, "num_tid_register = %u\n",
+ htt_stats_buf->num_tid_register);
+ len += scnprintf(buf + len, buf_len - len, "num_tid_unregister = %u\n",
+ htt_stats_buf->num_tid_unregister);
+ len += scnprintf(buf + len, buf_len - len, "num_qstats_queried = %u\n",
+ htt_stats_buf->num_qstats_queried);
+ len += scnprintf(buf + len, buf_len - len, "qstats_update_pending = %u\n",
+ htt_stats_buf->qstats_update_pending);
+ len += scnprintf(buf + len, buf_len - len, "last_qstats_query_timestamp = %u\n",
+ htt_stats_buf->last_qstats_query_timestamp);
+ len += scnprintf(buf + len, buf_len - len, "num_tqm_cmdq_full = %u\n",
+ htt_stats_buf->num_tqm_cmdq_full);
+ len += scnprintf(buf + len, buf_len - len, "num_de_sched_algo_trigger = %u\n",
+ htt_stats_buf->num_de_sched_algo_trigger);
+ len += scnprintf(buf + len, buf_len - len, "num_rt_sched_algo_trigger = %u\n",
+ htt_stats_buf->num_rt_sched_algo_trigger);
+ len += scnprintf(buf + len, buf_len - len, "num_tqm_sched_algo_trigger = %u\n",
+ htt_stats_buf->num_tqm_sched_algo_trigger);
+ len += scnprintf(buf + len, buf_len - len, "notify_sched = %u\n\n",
+ htt_stats_buf->notify_sched);
+ len += scnprintf(buf + len, buf_len - len, "dur_based_sendn_term = %u\n\n",
+ htt_stats_buf->dur_based_sendn_term);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -1963,11 +1856,11 @@ static inline void htt_print_stats_tx_sched_cmn_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_STATS_TX_SCHED_CMN_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
- htt_stats_buf->mac_id__word & 0xFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "current_timestamp = %u\n",
- htt_stats_buf->current_timestamp);
+ len += scnprintf(buf + len, buf_len - len, "HTT_STATS_TX_SCHED_CMN_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word));
+ len += scnprintf(buf + len, buf_len - len, "current_timestamp = %u\n\n",
+ htt_stats_buf->current_timestamp);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -1986,16 +1879,13 @@ htt_print_tx_tqm_gen_mpdu_stats_tlv_v(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char gen_mpdu_end_reason[HTT_MAX_STRING_LEN] = {0};
u16 num_elements = min_t(u16, (tag_len >> 2),
HTT_TX_TQM_MAX_LIST_MPDU_END_REASON);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_TQM_GEN_MPDU_STATS_TLV_V:");
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_GEN_MPDU_STATS_TLV_V:\n");
- ARRAY_TO_STRING(gen_mpdu_end_reason, htt_stats_buf->gen_mpdu_end_reason,
- num_elements);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "gen_mpdu_end_reason = %s\n",
- gen_mpdu_end_reason);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->gen_mpdu_end_reason,
+ "gen_mpdu_end_reason", num_elements, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -2014,16 +1904,14 @@ htt_print_tx_tqm_list_mpdu_stats_tlv_v(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char list_mpdu_end_reason[HTT_MAX_STRING_LEN] = {0};
u16 num_elems = min_t(u16, (tag_len >> 2), HTT_TX_TQM_MAX_LIST_MPDU_END_REASON);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_TX_TQM_LIST_MPDU_STATS_TLV_V:");
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_TQM_LIST_MPDU_STATS_TLV_V:\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->list_mpdu_end_reason,
+ "list_mpdu_end_reason", num_elems, "\n\n");
- ARRAY_TO_STRING(list_mpdu_end_reason, htt_stats_buf->list_mpdu_end_reason,
- num_elems);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "list_mpdu_end_reason = %s\n",
- list_mpdu_end_reason);
if (len >= buf_len)
buf[buf_len - 1] = 0;
else
@@ -2041,16 +1929,13 @@ htt_print_tx_tqm_list_mpdu_cnt_tlv_v(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char list_mpdu_cnt_hist[HTT_MAX_STRING_LEN] = {0};
u16 num_elems = min_t(u16, (tag_len >> 2),
HTT_TX_TQM_MAX_LIST_MPDU_CNT_HISTOGRAM_BINS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_TQM_LIST_MPDU_CNT_TLV_V:");
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_LIST_MPDU_CNT_TLV_V:\n");
- ARRAY_TO_STRING(list_mpdu_cnt_hist, htt_stats_buf->list_mpdu_cnt_hist,
- num_elems);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "list_mpdu_cnt_hist = %s\n",
- list_mpdu_cnt_hist);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->list_mpdu_cnt_hist,
+ "list_mpdu_cnt_hist", num_elems, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -2069,69 +1954,69 @@ htt_print_tx_tqm_pdev_stats_tlv_v(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_TQM_PDEV_STATS_TLV_V:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "msdu_count = %u",
- htt_stats_buf->msdu_count);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_count = %u",
- htt_stats_buf->mpdu_count);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "remove_msdu = %u",
- htt_stats_buf->remove_msdu);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "remove_mpdu = %u",
- htt_stats_buf->remove_mpdu);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "remove_msdu_ttl = %u",
- htt_stats_buf->remove_msdu_ttl);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "send_bar = %u",
- htt_stats_buf->send_bar);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "bar_sync = %u",
- htt_stats_buf->bar_sync);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "notify_mpdu = %u",
- htt_stats_buf->notify_mpdu);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sync_cmd = %u",
- htt_stats_buf->sync_cmd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "write_cmd = %u",
- htt_stats_buf->write_cmd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "hwsch_trigger = %u",
- htt_stats_buf->hwsch_trigger);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ack_tlv_proc = %u",
- htt_stats_buf->ack_tlv_proc);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "gen_mpdu_cmd = %u",
- htt_stats_buf->gen_mpdu_cmd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "gen_list_cmd = %u",
- htt_stats_buf->gen_list_cmd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "remove_mpdu_cmd = %u",
- htt_stats_buf->remove_mpdu_cmd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "remove_mpdu_tried_cmd = %u",
- htt_stats_buf->remove_mpdu_tried_cmd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_queue_stats_cmd = %u",
- htt_stats_buf->mpdu_queue_stats_cmd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_head_info_cmd = %u",
- htt_stats_buf->mpdu_head_info_cmd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "msdu_flow_stats_cmd = %u",
- htt_stats_buf->msdu_flow_stats_cmd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "remove_msdu_cmd = %u",
- htt_stats_buf->remove_msdu_cmd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "remove_msdu_ttl_cmd = %u",
- htt_stats_buf->remove_msdu_ttl_cmd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "flush_cache_cmd = %u",
- htt_stats_buf->flush_cache_cmd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "update_mpduq_cmd = %u",
- htt_stats_buf->update_mpduq_cmd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "enqueue = %u",
- htt_stats_buf->enqueue);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "enqueue_notify = %u",
- htt_stats_buf->enqueue_notify);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "notify_mpdu_at_head = %u",
- htt_stats_buf->notify_mpdu_at_head);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "notify_mpdu_state_valid = %u",
- htt_stats_buf->notify_mpdu_state_valid);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_udp_notify1 = %u",
- htt_stats_buf->sched_udp_notify1);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_udp_notify2 = %u",
- htt_stats_buf->sched_udp_notify2);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_nonudp_notify1 = %u",
- htt_stats_buf->sched_nonudp_notify1);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sched_nonudp_notify2 = %u\n",
- htt_stats_buf->sched_nonudp_notify2);
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_PDEV_STATS_TLV_V:\n");
+ len += scnprintf(buf + len, buf_len - len, "msdu_count = %u\n",
+ htt_stats_buf->msdu_count);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_count = %u\n",
+ htt_stats_buf->mpdu_count);
+ len += scnprintf(buf + len, buf_len - len, "remove_msdu = %u\n",
+ htt_stats_buf->remove_msdu);
+ len += scnprintf(buf + len, buf_len - len, "remove_mpdu = %u\n",
+ htt_stats_buf->remove_mpdu);
+ len += scnprintf(buf + len, buf_len - len, "remove_msdu_ttl = %u\n",
+ htt_stats_buf->remove_msdu_ttl);
+ len += scnprintf(buf + len, buf_len - len, "send_bar = %u\n",
+ htt_stats_buf->send_bar);
+ len += scnprintf(buf + len, buf_len - len, "bar_sync = %u\n",
+ htt_stats_buf->bar_sync);
+ len += scnprintf(buf + len, buf_len - len, "notify_mpdu = %u\n",
+ htt_stats_buf->notify_mpdu);
+ len += scnprintf(buf + len, buf_len - len, "sync_cmd = %u\n",
+ htt_stats_buf->sync_cmd);
+ len += scnprintf(buf + len, buf_len - len, "write_cmd = %u\n",
+ htt_stats_buf->write_cmd);
+ len += scnprintf(buf + len, buf_len - len, "hwsch_trigger = %u\n",
+ htt_stats_buf->hwsch_trigger);
+ len += scnprintf(buf + len, buf_len - len, "ack_tlv_proc = %u\n",
+ htt_stats_buf->ack_tlv_proc);
+ len += scnprintf(buf + len, buf_len - len, "gen_mpdu_cmd = %u\n",
+ htt_stats_buf->gen_mpdu_cmd);
+ len += scnprintf(buf + len, buf_len - len, "gen_list_cmd = %u\n",
+ htt_stats_buf->gen_list_cmd);
+ len += scnprintf(buf + len, buf_len - len, "remove_mpdu_cmd = %u\n",
+ htt_stats_buf->remove_mpdu_cmd);
+ len += scnprintf(buf + len, buf_len - len, "remove_mpdu_tried_cmd = %u\n",
+ htt_stats_buf->remove_mpdu_tried_cmd);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_queue_stats_cmd = %u\n",
+ htt_stats_buf->mpdu_queue_stats_cmd);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_head_info_cmd = %u\n",
+ htt_stats_buf->mpdu_head_info_cmd);
+ len += scnprintf(buf + len, buf_len - len, "msdu_flow_stats_cmd = %u\n",
+ htt_stats_buf->msdu_flow_stats_cmd);
+ len += scnprintf(buf + len, buf_len - len, "remove_msdu_cmd = %u\n",
+ htt_stats_buf->remove_msdu_cmd);
+ len += scnprintf(buf + len, buf_len - len, "remove_msdu_ttl_cmd = %u\n",
+ htt_stats_buf->remove_msdu_ttl_cmd);
+ len += scnprintf(buf + len, buf_len - len, "flush_cache_cmd = %u\n",
+ htt_stats_buf->flush_cache_cmd);
+ len += scnprintf(buf + len, buf_len - len, "update_mpduq_cmd = %u\n",
+ htt_stats_buf->update_mpduq_cmd);
+ len += scnprintf(buf + len, buf_len - len, "enqueue = %u\n",
+ htt_stats_buf->enqueue);
+ len += scnprintf(buf + len, buf_len - len, "enqueue_notify = %u\n",
+ htt_stats_buf->enqueue_notify);
+ len += scnprintf(buf + len, buf_len - len, "notify_mpdu_at_head = %u\n",
+ htt_stats_buf->notify_mpdu_at_head);
+ len += scnprintf(buf + len, buf_len - len, "notify_mpdu_state_valid = %u\n",
+ htt_stats_buf->notify_mpdu_state_valid);
+ len += scnprintf(buf + len, buf_len - len, "sched_udp_notify1 = %u\n",
+ htt_stats_buf->sched_udp_notify1);
+ len += scnprintf(buf + len, buf_len - len, "sched_udp_notify2 = %u\n",
+ htt_stats_buf->sched_udp_notify2);
+ len += scnprintf(buf + len, buf_len - len, "sched_nonudp_notify1 = %u\n",
+ htt_stats_buf->sched_nonudp_notify1);
+ len += scnprintf(buf + len, buf_len - len, "sched_nonudp_notify2 = %u\n\n",
+ htt_stats_buf->sched_nonudp_notify2);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -2149,23 +2034,23 @@ static inline void htt_print_tx_tqm_cmn_stats_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_TQM_CMN_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
- htt_stats_buf->mac_id__word & 0xFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "max_cmdq_id = %u",
- htt_stats_buf->max_cmdq_id);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "list_mpdu_cnt_hist_intvl = %u",
- htt_stats_buf->list_mpdu_cnt_hist_intvl);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "add_msdu = %u",
- htt_stats_buf->add_msdu);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "q_empty = %u",
- htt_stats_buf->q_empty);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "q_not_empty = %u",
- htt_stats_buf->q_not_empty);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "drop_notification = %u",
- htt_stats_buf->drop_notification);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "desc_threshold = %u\n",
- htt_stats_buf->desc_threshold);
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_CMN_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word));
+ len += scnprintf(buf + len, buf_len - len, "max_cmdq_id = %u\n",
+ htt_stats_buf->max_cmdq_id);
+ len += scnprintf(buf + len, buf_len - len, "list_mpdu_cnt_hist_intvl = %u\n",
+ htt_stats_buf->list_mpdu_cnt_hist_intvl);
+ len += scnprintf(buf + len, buf_len - len, "add_msdu = %u\n",
+ htt_stats_buf->add_msdu);
+ len += scnprintf(buf + len, buf_len - len, "q_empty = %u\n",
+ htt_stats_buf->q_empty);
+ len += scnprintf(buf + len, buf_len - len, "q_not_empty = %u\n",
+ htt_stats_buf->q_not_empty);
+ len += scnprintf(buf + len, buf_len - len, "drop_notification = %u\n",
+ htt_stats_buf->drop_notification);
+ len += scnprintf(buf + len, buf_len - len, "desc_threshold = %u\n\n",
+ htt_stats_buf->desc_threshold);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -2183,13 +2068,13 @@ static inline void htt_print_tx_tqm_error_stats_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_TQM_ERROR_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "q_empty_failure = %u",
- htt_stats_buf->q_empty_failure);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "q_not_empty_failure = %u",
- htt_stats_buf->q_not_empty_failure);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "add_msdu_failure = %u\n",
- htt_stats_buf->add_msdu_failure);
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_ERROR_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "q_empty_failure = %u\n",
+ htt_stats_buf->q_empty_failure);
+ len += scnprintf(buf + len, buf_len - len, "q_not_empty_failure = %u\n",
+ htt_stats_buf->q_not_empty_failure);
+ len += scnprintf(buf + len, buf_len - len, "add_msdu_failure = %u\n\n",
+ htt_stats_buf->add_msdu_failure);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -2207,33 +2092,35 @@ static inline void htt_print_tx_tqm_cmdq_status_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_TQM_CMDQ_STATUS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
- htt_stats_buf->mac_id__cmdq_id__word & 0xFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "cmdq_id = %u\n",
- (htt_stats_buf->mac_id__cmdq_id__word & 0xFF00) >> 8);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sync_cmd = %u",
- htt_stats_buf->sync_cmd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "write_cmd = %u",
- htt_stats_buf->write_cmd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "gen_mpdu_cmd = %u",
- htt_stats_buf->gen_mpdu_cmd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_queue_stats_cmd = %u",
- htt_stats_buf->mpdu_queue_stats_cmd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_head_info_cmd = %u",
- htt_stats_buf->mpdu_head_info_cmd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "msdu_flow_stats_cmd = %u",
- htt_stats_buf->msdu_flow_stats_cmd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "remove_mpdu_cmd = %u",
- htt_stats_buf->remove_mpdu_cmd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "remove_msdu_cmd = %u",
- htt_stats_buf->remove_msdu_cmd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "flush_cache_cmd = %u",
- htt_stats_buf->flush_cache_cmd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "update_mpduq_cmd = %u",
- htt_stats_buf->update_mpduq_cmd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "update_msduq_cmd = %u\n",
- htt_stats_buf->update_msduq_cmd);
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_CMDQ_STATUS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_TX_TQM_CMDQ_STATUS_MAC_ID,
+ htt_stats_buf->mac_id__cmdq_id__word));
+ len += scnprintf(buf + len, buf_len - len, "cmdq_id = %lu\n\n",
+ FIELD_GET(HTT_TX_TQM_CMDQ_STATUS_CMDQ_ID,
+ htt_stats_buf->mac_id__cmdq_id__word));
+ len += scnprintf(buf + len, buf_len - len, "sync_cmd = %u\n",
+ htt_stats_buf->sync_cmd);
+ len += scnprintf(buf + len, buf_len - len, "write_cmd = %u\n",
+ htt_stats_buf->write_cmd);
+ len += scnprintf(buf + len, buf_len - len, "gen_mpdu_cmd = %u\n",
+ htt_stats_buf->gen_mpdu_cmd);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_queue_stats_cmd = %u\n",
+ htt_stats_buf->mpdu_queue_stats_cmd);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_head_info_cmd = %u\n",
+ htt_stats_buf->mpdu_head_info_cmd);
+ len += scnprintf(buf + len, buf_len - len, "msdu_flow_stats_cmd = %u\n",
+ htt_stats_buf->msdu_flow_stats_cmd);
+ len += scnprintf(buf + len, buf_len - len, "remove_mpdu_cmd = %u\n",
+ htt_stats_buf->remove_mpdu_cmd);
+ len += scnprintf(buf + len, buf_len - len, "remove_msdu_cmd = %u\n",
+ htt_stats_buf->remove_msdu_cmd);
+ len += scnprintf(buf + len, buf_len - len, "flush_cache_cmd = %u\n",
+ htt_stats_buf->flush_cache_cmd);
+ len += scnprintf(buf + len, buf_len - len, "update_mpduq_cmd = %u\n",
+ htt_stats_buf->update_mpduq_cmd);
+ len += scnprintf(buf + len, buf_len - len, "update_msduq_cmd = %u\n\n",
+ htt_stats_buf->update_msduq_cmd);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -2252,20 +2139,20 @@ htt_print_tx_de_eapol_packets_stats_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_TX_DE_EAPOL_PACKETS_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "m1_packets = %u",
- htt_stats_buf->m1_packets);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "m2_packets = %u",
- htt_stats_buf->m2_packets);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "m3_packets = %u",
- htt_stats_buf->m3_packets);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "m4_packets = %u",
- htt_stats_buf->m4_packets);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "g1_packets = %u",
- htt_stats_buf->g1_packets);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "g2_packets = %u\n",
- htt_stats_buf->g2_packets);
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_DE_EAPOL_PACKETS_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "m1_packets = %u\n",
+ htt_stats_buf->m1_packets);
+ len += scnprintf(buf + len, buf_len - len, "m2_packets = %u\n",
+ htt_stats_buf->m2_packets);
+ len += scnprintf(buf + len, buf_len - len, "m3_packets = %u\n",
+ htt_stats_buf->m3_packets);
+ len += scnprintf(buf + len, buf_len - len, "m4_packets = %u\n",
+ htt_stats_buf->m4_packets);
+ len += scnprintf(buf + len, buf_len - len, "g1_packets = %u\n",
+ htt_stats_buf->g1_packets);
+ len += scnprintf(buf + len, buf_len - len, "g2_packets = %u\n\n",
+ htt_stats_buf->g2_packets);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -2284,34 +2171,34 @@ htt_print_tx_de_classify_failed_stats_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_TX_DE_CLASSIFY_FAILED_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ap_bss_peer_not_found = %u",
- htt_stats_buf->ap_bss_peer_not_found);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ap_bcast_mcast_no_peer = %u",
- htt_stats_buf->ap_bcast_mcast_no_peer);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sta_delete_in_progress = %u",
- htt_stats_buf->sta_delete_in_progress);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ibss_no_bss_peer = %u",
- htt_stats_buf->ibss_no_bss_peer);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "invalid_vdev_type = %u",
- htt_stats_buf->invalid_vdev_type);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "invalid_ast_peer_entry = %u",
- htt_stats_buf->invalid_ast_peer_entry);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "peer_entry_invalid = %u",
- htt_stats_buf->peer_entry_invalid);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ethertype_not_ip = %u",
- htt_stats_buf->ethertype_not_ip);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "eapol_lookup_failed = %u",
- htt_stats_buf->eapol_lookup_failed);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "qpeer_not_allow_data = %u",
- htt_stats_buf->qpeer_not_allow_data);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_tid_override = %u",
- htt_stats_buf->fse_tid_override);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ipv6_jumbogram_zero_length = %u",
- htt_stats_buf->ipv6_jumbogram_zero_length);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "qos_to_non_qos_in_prog = %u\n",
- htt_stats_buf->qos_to_non_qos_in_prog);
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_DE_CLASSIFY_FAILED_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "ap_bss_peer_not_found = %u\n",
+ htt_stats_buf->ap_bss_peer_not_found);
+ len += scnprintf(buf + len, buf_len - len, "ap_bcast_mcast_no_peer = %u\n",
+ htt_stats_buf->ap_bcast_mcast_no_peer);
+ len += scnprintf(buf + len, buf_len - len, "sta_delete_in_progress = %u\n",
+ htt_stats_buf->sta_delete_in_progress);
+ len += scnprintf(buf + len, buf_len - len, "ibss_no_bss_peer = %u\n",
+ htt_stats_buf->ibss_no_bss_peer);
+ len += scnprintf(buf + len, buf_len - len, "invalid_vdev_type = %u\n",
+ htt_stats_buf->invalid_vdev_type);
+ len += scnprintf(buf + len, buf_len - len, "invalid_ast_peer_entry = %u\n",
+ htt_stats_buf->invalid_ast_peer_entry);
+ len += scnprintf(buf + len, buf_len - len, "peer_entry_invalid = %u\n",
+ htt_stats_buf->peer_entry_invalid);
+ len += scnprintf(buf + len, buf_len - len, "ethertype_not_ip = %u\n",
+ htt_stats_buf->ethertype_not_ip);
+ len += scnprintf(buf + len, buf_len - len, "eapol_lookup_failed = %u\n",
+ htt_stats_buf->eapol_lookup_failed);
+ len += scnprintf(buf + len, buf_len - len, "qpeer_not_allow_data = %u\n",
+ htt_stats_buf->qpeer_not_allow_data);
+ len += scnprintf(buf + len, buf_len - len, "fse_tid_override = %u\n",
+ htt_stats_buf->fse_tid_override);
+ len += scnprintf(buf + len, buf_len - len, "ipv6_jumbogram_zero_length = %u\n",
+ htt_stats_buf->ipv6_jumbogram_zero_length);
+ len += scnprintf(buf + len, buf_len - len, "qos_to_non_qos_in_prog = %u\n\n",
+ htt_stats_buf->qos_to_non_qos_in_prog);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -2330,73 +2217,73 @@ htt_print_tx_de_classify_stats_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_DE_CLASSIFY_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "arp_packets = %u",
- htt_stats_buf->arp_packets);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "igmp_packets = %u",
- htt_stats_buf->igmp_packets);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "dhcp_packets = %u",
- htt_stats_buf->dhcp_packets);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "host_inspected = %u",
- htt_stats_buf->host_inspected);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_included = %u",
- htt_stats_buf->htt_included);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_valid_mcs = %u",
- htt_stats_buf->htt_valid_mcs);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_valid_nss = %u",
- htt_stats_buf->htt_valid_nss);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_valid_preamble_type = %u",
- htt_stats_buf->htt_valid_preamble_type);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_valid_chainmask = %u",
- htt_stats_buf->htt_valid_chainmask);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_valid_guard_interval = %u",
- htt_stats_buf->htt_valid_guard_interval);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_valid_retries = %u",
- htt_stats_buf->htt_valid_retries);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_valid_bw_info = %u",
- htt_stats_buf->htt_valid_bw_info);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_valid_power = %u",
- htt_stats_buf->htt_valid_power);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_valid_key_flags = 0x%x",
- htt_stats_buf->htt_valid_key_flags);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_valid_no_encryption = %u",
- htt_stats_buf->htt_valid_no_encryption);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_entry_count = %u",
- htt_stats_buf->fse_entry_count);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_priority_be = %u",
- htt_stats_buf->fse_priority_be);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_priority_high = %u",
- htt_stats_buf->fse_priority_high);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_priority_low = %u",
- htt_stats_buf->fse_priority_low);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_traffic_ptrn_be = %u",
- htt_stats_buf->fse_traffic_ptrn_be);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_traffic_ptrn_over_sub = %u",
- htt_stats_buf->fse_traffic_ptrn_over_sub);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_traffic_ptrn_bursty = %u",
- htt_stats_buf->fse_traffic_ptrn_bursty);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_traffic_ptrn_interactive = %u",
- htt_stats_buf->fse_traffic_ptrn_interactive);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_traffic_ptrn_periodic = %u",
- htt_stats_buf->fse_traffic_ptrn_periodic);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_hwqueue_alloc = %u",
- htt_stats_buf->fse_hwqueue_alloc);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_hwqueue_created = %u",
- htt_stats_buf->fse_hwqueue_created);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_hwqueue_send_to_host = %u",
- htt_stats_buf->fse_hwqueue_send_to_host);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mcast_entry = %u",
- htt_stats_buf->mcast_entry);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "bcast_entry = %u",
- htt_stats_buf->bcast_entry);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_update_peer_cache = %u",
- htt_stats_buf->htt_update_peer_cache);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "htt_learning_frame = %u",
- htt_stats_buf->htt_learning_frame);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fse_invalid_peer = %u",
- htt_stats_buf->fse_invalid_peer);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mec_notify = %u\n",
- htt_stats_buf->mec_notify);
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_DE_CLASSIFY_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "arp_packets = %u\n",
+ htt_stats_buf->arp_packets);
+ len += scnprintf(buf + len, buf_len - len, "igmp_packets = %u\n",
+ htt_stats_buf->igmp_packets);
+ len += scnprintf(buf + len, buf_len - len, "dhcp_packets = %u\n",
+ htt_stats_buf->dhcp_packets);
+ len += scnprintf(buf + len, buf_len - len, "host_inspected = %u\n",
+ htt_stats_buf->host_inspected);
+ len += scnprintf(buf + len, buf_len - len, "htt_included = %u\n",
+ htt_stats_buf->htt_included);
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_mcs = %u\n",
+ htt_stats_buf->htt_valid_mcs);
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_nss = %u\n",
+ htt_stats_buf->htt_valid_nss);
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_preamble_type = %u\n",
+ htt_stats_buf->htt_valid_preamble_type);
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_chainmask = %u\n",
+ htt_stats_buf->htt_valid_chainmask);
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_guard_interval = %u\n",
+ htt_stats_buf->htt_valid_guard_interval);
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_retries = %u\n",
+ htt_stats_buf->htt_valid_retries);
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_bw_info = %u\n",
+ htt_stats_buf->htt_valid_bw_info);
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_power = %u\n",
+ htt_stats_buf->htt_valid_power);
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_key_flags = 0x%x\n",
+ htt_stats_buf->htt_valid_key_flags);
+ len += scnprintf(buf + len, buf_len - len, "htt_valid_no_encryption = %u\n",
+ htt_stats_buf->htt_valid_no_encryption);
+ len += scnprintf(buf + len, buf_len - len, "fse_entry_count = %u\n",
+ htt_stats_buf->fse_entry_count);
+ len += scnprintf(buf + len, buf_len - len, "fse_priority_be = %u\n",
+ htt_stats_buf->fse_priority_be);
+ len += scnprintf(buf + len, buf_len - len, "fse_priority_high = %u\n",
+ htt_stats_buf->fse_priority_high);
+ len += scnprintf(buf + len, buf_len - len, "fse_priority_low = %u\n",
+ htt_stats_buf->fse_priority_low);
+ len += scnprintf(buf + len, buf_len - len, "fse_traffic_ptrn_be = %u\n",
+ htt_stats_buf->fse_traffic_ptrn_be);
+ len += scnprintf(buf + len, buf_len - len, "fse_traffic_ptrn_over_sub = %u\n",
+ htt_stats_buf->fse_traffic_ptrn_over_sub);
+ len += scnprintf(buf + len, buf_len - len, "fse_traffic_ptrn_bursty = %u\n",
+ htt_stats_buf->fse_traffic_ptrn_bursty);
+ len += scnprintf(buf + len, buf_len - len, "fse_traffic_ptrn_interactive = %u\n",
+ htt_stats_buf->fse_traffic_ptrn_interactive);
+ len += scnprintf(buf + len, buf_len - len, "fse_traffic_ptrn_periodic = %u\n",
+ htt_stats_buf->fse_traffic_ptrn_periodic);
+ len += scnprintf(buf + len, buf_len - len, "fse_hwqueue_alloc = %u\n",
+ htt_stats_buf->fse_hwqueue_alloc);
+ len += scnprintf(buf + len, buf_len - len, "fse_hwqueue_created = %u\n",
+ htt_stats_buf->fse_hwqueue_created);
+ len += scnprintf(buf + len, buf_len - len, "fse_hwqueue_send_to_host = %u\n",
+ htt_stats_buf->fse_hwqueue_send_to_host);
+ len += scnprintf(buf + len, buf_len - len, "mcast_entry = %u\n",
+ htt_stats_buf->mcast_entry);
+ len += scnprintf(buf + len, buf_len - len, "bcast_entry = %u\n",
+ htt_stats_buf->bcast_entry);
+ len += scnprintf(buf + len, buf_len - len, "htt_update_peer_cache = %u\n",
+ htt_stats_buf->htt_update_peer_cache);
+ len += scnprintf(buf + len, buf_len - len, "htt_learning_frame = %u\n",
+ htt_stats_buf->htt_learning_frame);
+ len += scnprintf(buf + len, buf_len - len, "fse_invalid_peer = %u\n",
+ htt_stats_buf->fse_invalid_peer);
+ len += scnprintf(buf + len, buf_len - len, "mec_notify = %u\n\n",
+ htt_stats_buf->mec_notify);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -2415,24 +2302,24 @@ htt_print_tx_de_classify_status_stats_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_TX_DE_CLASSIFY_STATUS_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "eok = %u",
- htt_stats_buf->eok);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "classify_done = %u",
- htt_stats_buf->classify_done);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "lookup_failed = %u",
- htt_stats_buf->lookup_failed);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "send_host_dhcp = %u",
- htt_stats_buf->send_host_dhcp);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "send_host_mcast = %u",
- htt_stats_buf->send_host_mcast);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "send_host_unknown_dest = %u",
- htt_stats_buf->send_host_unknown_dest);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "send_host = %u",
- htt_stats_buf->send_host);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "status_invalid = %u\n",
- htt_stats_buf->status_invalid);
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_DE_CLASSIFY_STATUS_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "eok = %u\n",
+ htt_stats_buf->eok);
+ len += scnprintf(buf + len, buf_len - len, "classify_done = %u\n",
+ htt_stats_buf->classify_done);
+ len += scnprintf(buf + len, buf_len - len, "lookup_failed = %u\n",
+ htt_stats_buf->lookup_failed);
+ len += scnprintf(buf + len, buf_len - len, "send_host_dhcp = %u\n",
+ htt_stats_buf->send_host_dhcp);
+ len += scnprintf(buf + len, buf_len - len, "send_host_mcast = %u\n",
+ htt_stats_buf->send_host_mcast);
+ len += scnprintf(buf + len, buf_len - len, "send_host_unknown_dest = %u\n",
+ htt_stats_buf->send_host_unknown_dest);
+ len += scnprintf(buf + len, buf_len - len, "send_host = %u\n",
+ htt_stats_buf->send_host);
+ len += scnprintf(buf + len, buf_len - len, "status_invalid = %u\n\n",
+ htt_stats_buf->status_invalid);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -2451,14 +2338,14 @@ htt_print_tx_de_enqueue_packets_stats_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_TX_DE_ENQUEUE_PACKETS_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "enqueued_pkts = %u",
- htt_stats_buf->enqueued_pkts);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "to_tqm = %u",
- htt_stats_buf->to_tqm);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "to_tqm_bypass = %u\n",
- htt_stats_buf->to_tqm_bypass);
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_DE_ENQUEUE_PACKETS_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "enqueued_pkts = %u\n",
+ htt_stats_buf->enqueued_pkts);
+ len += scnprintf(buf + len, buf_len - len, "to_tqm = %u\n",
+ htt_stats_buf->to_tqm);
+ len += scnprintf(buf + len, buf_len - len, "to_tqm_bypass = %u\n\n",
+ htt_stats_buf->to_tqm_bypass);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -2477,14 +2364,14 @@ htt_print_tx_de_enqueue_discard_stats_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_TX_DE_ENQUEUE_DISCARD_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "discarded_pkts = %u",
- htt_stats_buf->discarded_pkts);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "local_frames = %u",
- htt_stats_buf->local_frames);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "is_ext_msdu = %u\n",
- htt_stats_buf->is_ext_msdu);
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_DE_ENQUEUE_DISCARD_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "discarded_pkts = %u\n",
+ htt_stats_buf->discarded_pkts);
+ len += scnprintf(buf + len, buf_len - len, "local_frames = %u\n",
+ htt_stats_buf->local_frames);
+ len += scnprintf(buf + len, buf_len - len, "is_ext_msdu = %u\n\n",
+ htt_stats_buf->is_ext_msdu);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -2502,17 +2389,17 @@ static inline void htt_print_tx_de_compl_stats_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_DE_COMPL_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tcl_dummy_frame = %u",
- htt_stats_buf->tcl_dummy_frame);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tqm_dummy_frame = %u",
- htt_stats_buf->tqm_dummy_frame);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tqm_notify_frame = %u",
- htt_stats_buf->tqm_notify_frame);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fw2wbm_enq = %u",
- htt_stats_buf->fw2wbm_enq);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tqm_bypass_frame = %u\n",
- htt_stats_buf->tqm_bypass_frame);
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_DE_COMPL_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "tcl_dummy_frame = %u\n",
+ htt_stats_buf->tcl_dummy_frame);
+ len += scnprintf(buf + len, buf_len - len, "tqm_dummy_frame = %u\n",
+ htt_stats_buf->tqm_dummy_frame);
+ len += scnprintf(buf + len, buf_len - len, "tqm_notify_frame = %u\n",
+ htt_stats_buf->tqm_notify_frame);
+ len += scnprintf(buf + len, buf_len - len, "fw2wbm_enq = %u\n",
+ htt_stats_buf->fw2wbm_enq);
+ len += scnprintf(buf + len, buf_len - len, "tqm_bypass_frame = %u\n\n",
+ htt_stats_buf->tqm_bypass_frame);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -2531,24 +2418,13 @@ htt_print_tx_de_fw2wbm_ring_full_hist_tlv(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char fw2wbm_ring_full_hist[HTT_MAX_STRING_LEN] = {0};
u16 num_elements = tag_len >> 2;
- u32 required_buffer_size = HTT_MAX_PRINT_CHAR_PER_ELEM * num_elements;
-
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_TX_DE_FW2WBM_RING_FULL_HIST_TLV");
-
- if (required_buffer_size < HTT_MAX_STRING_LEN) {
- ARRAY_TO_STRING(fw2wbm_ring_full_hist,
- htt_stats_buf->fw2wbm_ring_full_hist,
- num_elements);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "fw2wbm_ring_full_hist = %s\n",
- fw2wbm_ring_full_hist);
- } else {
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "INSUFFICIENT PRINT BUFFER ");
- }
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TX_DE_FW2WBM_RING_FULL_HIST_TLV");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->fw2wbm_ring_full_hist,
+ "fw2wbm_ring_full_hist", num_elements, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -2566,21 +2442,21 @@ htt_print_tx_de_cmn_stats_tlv(const void *tag_buf, struct debug_htt_stats_req *s
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_DE_CMN_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
- htt_stats_buf->mac_id__word & 0xFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tcl2fw_entry_count = %u",
- htt_stats_buf->tcl2fw_entry_count);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "not_to_fw = %u",
- htt_stats_buf->not_to_fw);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "invalid_pdev_vdev_peer = %u",
- htt_stats_buf->invalid_pdev_vdev_peer);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tcl_res_invalid_addrx = %u",
- htt_stats_buf->tcl_res_invalid_addrx);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "wbm2fw_entry_count = %u",
- htt_stats_buf->wbm2fw_entry_count);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "invalid_pdev = %u\n",
- htt_stats_buf->invalid_pdev);
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_DE_CMN_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word));
+ len += scnprintf(buf + len, buf_len - len, "tcl2fw_entry_count = %u\n",
+ htt_stats_buf->tcl2fw_entry_count);
+ len += scnprintf(buf + len, buf_len - len, "not_to_fw = %u\n",
+ htt_stats_buf->not_to_fw);
+ len += scnprintf(buf + len, buf_len - len, "invalid_pdev_vdev_peer = %u\n",
+ htt_stats_buf->invalid_pdev_vdev_peer);
+ len += scnprintf(buf + len, buf_len - len, "tcl_res_invalid_addrx = %u\n",
+ htt_stats_buf->tcl_res_invalid_addrx);
+ len += scnprintf(buf + len, buf_len - len, "wbm2fw_entry_count = %u\n",
+ htt_stats_buf->wbm2fw_entry_count);
+ len += scnprintf(buf + len, buf_len - len, "invalid_pdev = %u\n\n",
+ htt_stats_buf->invalid_pdev);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -2597,52 +2473,51 @@ static inline void htt_print_ring_if_stats_tlv(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char low_wm_hit_count[HTT_MAX_STRING_LEN] = {0};
- char high_wm_hit_count[HTT_MAX_STRING_LEN] = {0};
-
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_RING_IF_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "base_addr = %u",
- htt_stats_buf->base_addr);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "elem_size = %u",
- htt_stats_buf->elem_size);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_elems = %u",
- htt_stats_buf->num_elems__prefetch_tail_idx & 0xFFFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "prefetch_tail_idx = %u",
- (htt_stats_buf->num_elems__prefetch_tail_idx &
- 0xFFFF0000) >> 16);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "head_idx = %u",
- htt_stats_buf->head_idx__tail_idx & 0xFFFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tail_idx = %u",
- (htt_stats_buf->head_idx__tail_idx & 0xFFFF0000) >> 16);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "shadow_head_idx = %u",
- htt_stats_buf->shadow_head_idx__shadow_tail_idx & 0xFFFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "shadow_tail_idx = %u",
- (htt_stats_buf->shadow_head_idx__shadow_tail_idx &
- 0xFFFF0000) >> 16);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_tail_incr = %u",
- htt_stats_buf->num_tail_incr);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "lwm_thresh = %u",
- htt_stats_buf->lwm_thresh__hwm_thresh & 0xFFFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "hwm_thresh = %u",
- (htt_stats_buf->lwm_thresh__hwm_thresh & 0xFFFF0000) >> 16);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "overrun_hit_count = %u",
- htt_stats_buf->overrun_hit_count);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "underrun_hit_count = %u",
- htt_stats_buf->underrun_hit_count);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "prod_blockwait_count = %u",
- htt_stats_buf->prod_blockwait_count);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "cons_blockwait_count = %u",
- htt_stats_buf->cons_blockwait_count);
-
- ARRAY_TO_STRING(low_wm_hit_count, htt_stats_buf->low_wm_hit_count,
- HTT_STATS_LOW_WM_BINS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "low_wm_hit_count = %s ",
- low_wm_hit_count);
-
- ARRAY_TO_STRING(high_wm_hit_count, htt_stats_buf->high_wm_hit_count,
- HTT_STATS_HIGH_WM_BINS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "high_wm_hit_count = %s\n",
- high_wm_hit_count);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_RING_IF_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "base_addr = %u\n",
+ htt_stats_buf->base_addr);
+ len += scnprintf(buf + len, buf_len - len, "elem_size = %u\n",
+ htt_stats_buf->elem_size);
+ len += scnprintf(buf + len, buf_len - len, "num_elems = %lu\n",
+ FIELD_GET(HTT_RING_IF_STATS_NUM_ELEMS,
+ htt_stats_buf->num_elems__prefetch_tail_idx));
+ len += scnprintf(buf + len, buf_len - len, "prefetch_tail_idx = %lu\n",
+ FIELD_GET(HTT_RING_IF_STATS_PREFETCH_TAIL_INDEX,
+ htt_stats_buf->num_elems__prefetch_tail_idx));
+ len += scnprintf(buf + len, buf_len - len, "head_idx = %lu\n",
+ FIELD_GET(HTT_RING_IF_STATS_HEAD_IDX,
+ htt_stats_buf->head_idx__tail_idx));
+ len += scnprintf(buf + len, buf_len - len, "tail_idx = %lu\n",
+ FIELD_GET(HTT_RING_IF_STATS_TAIL_IDX,
+ htt_stats_buf->head_idx__tail_idx));
+ len += scnprintf(buf + len, buf_len - len, "shadow_head_idx = %lu\n",
+ FIELD_GET(HTT_RING_IF_STATS_SHADOW_HEAD_IDX,
+ htt_stats_buf->shadow_head_idx__shadow_tail_idx));
+ len += scnprintf(buf + len, buf_len - len, "shadow_tail_idx = %lu\n",
+ FIELD_GET(HTT_RING_IF_STATS_SHADOW_TAIL_IDX,
+ htt_stats_buf->shadow_head_idx__shadow_tail_idx));
+ len += scnprintf(buf + len, buf_len - len, "num_tail_incr = %u\n",
+ htt_stats_buf->num_tail_incr);
+ len += scnprintf(buf + len, buf_len - len, "lwm_thresh = %lu\n",
+ FIELD_GET(HTT_RING_IF_STATS_LWM_THRESH,
+ htt_stats_buf->lwm_thresh__hwm_thresh));
+ len += scnprintf(buf + len, buf_len - len, "hwm_thresh = %lu\n",
+ FIELD_GET(HTT_RING_IF_STATS_HWM_THRESH,
+ htt_stats_buf->lwm_thresh__hwm_thresh));
+ len += scnprintf(buf + len, buf_len - len, "overrun_hit_count = %u\n",
+ htt_stats_buf->overrun_hit_count);
+ len += scnprintf(buf + len, buf_len - len, "underrun_hit_count = %u\n",
+ htt_stats_buf->underrun_hit_count);
+ len += scnprintf(buf + len, buf_len - len, "prod_blockwait_count = %u\n",
+ htt_stats_buf->prod_blockwait_count);
+ len += scnprintf(buf + len, buf_len - len, "cons_blockwait_count = %u\n",
+ htt_stats_buf->cons_blockwait_count);
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->low_wm_hit_count,
+ "low_wm_hit_count", HTT_STATS_LOW_WM_BINS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->high_wm_hit_count,
+ "high_wm_hit_count", HTT_STATS_HIGH_WM_BINS, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -2660,11 +2535,11 @@ static inline void htt_print_ring_if_cmn_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_RING_IF_CMN_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
- htt_stats_buf->mac_id__word & 0xFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_records = %u\n",
- htt_stats_buf->num_records);
+ len += scnprintf(buf + len, buf_len - len, "HTT_RING_IF_CMN_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word));
+ len += scnprintf(buf + len, buf_len - len, "num_records = %u\n\n",
+ htt_stats_buf->num_records);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -2682,16 +2557,12 @@ static inline void htt_print_sfm_client_user_tlv_v(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char dwords_used_by_user_n[HTT_MAX_STRING_LEN] = {0};
u16 num_elems = tag_len >> 2;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_SFM_CLIENT_USER_TLV_V:");
+ len += scnprintf(buf + len, buf_len - len, "HTT_SFM_CLIENT_USER_TLV_V:\n");
- ARRAY_TO_STRING(dwords_used_by_user_n,
- htt_stats_buf->dwords_used_by_user_n,
- num_elems);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "dwords_used_by_user_n = %s\n",
- dwords_used_by_user_n);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->dwords_used_by_user_n,
+ "dwords_used_by_user_n", num_elems, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -2709,21 +2580,21 @@ static inline void htt_print_sfm_client_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_SFM_CLIENT_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "client_id = %u",
- htt_stats_buf->client_id);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "buf_min = %u",
- htt_stats_buf->buf_min);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "buf_max = %u",
- htt_stats_buf->buf_max);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "buf_busy = %u",
- htt_stats_buf->buf_busy);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "buf_alloc = %u",
- htt_stats_buf->buf_alloc);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "buf_avail = %u",
- htt_stats_buf->buf_avail);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_users = %u\n",
- htt_stats_buf->num_users);
+ len += scnprintf(buf + len, buf_len - len, "HTT_SFM_CLIENT_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "client_id = %u\n",
+ htt_stats_buf->client_id);
+ len += scnprintf(buf + len, buf_len - len, "buf_min = %u\n",
+ htt_stats_buf->buf_min);
+ len += scnprintf(buf + len, buf_len - len, "buf_max = %u\n",
+ htt_stats_buf->buf_max);
+ len += scnprintf(buf + len, buf_len - len, "buf_busy = %u\n",
+ htt_stats_buf->buf_busy);
+ len += scnprintf(buf + len, buf_len - len, "buf_alloc = %u\n",
+ htt_stats_buf->buf_alloc);
+ len += scnprintf(buf + len, buf_len - len, "buf_avail = %u\n",
+ htt_stats_buf->buf_avail);
+ len += scnprintf(buf + len, buf_len - len, "num_users = %u\n\n",
+ htt_stats_buf->num_users);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -2741,17 +2612,17 @@ static inline void htt_print_sfm_cmn_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_SFM_CMN_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
- htt_stats_buf->mac_id__word & 0xFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "buf_total = %u",
- htt_stats_buf->buf_total);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mem_empty = %u",
- htt_stats_buf->mem_empty);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "deallocate_bufs = %u",
- htt_stats_buf->deallocate_bufs);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_records = %u\n",
- htt_stats_buf->num_records);
+ len += scnprintf(buf + len, buf_len - len, "HTT_SFM_CMN_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word));
+ len += scnprintf(buf + len, buf_len - len, "buf_total = %u\n",
+ htt_stats_buf->buf_total);
+ len += scnprintf(buf + len, buf_len - len, "mem_empty = %u\n",
+ htt_stats_buf->mem_empty);
+ len += scnprintf(buf + len, buf_len - len, "deallocate_bufs = %u\n",
+ htt_stats_buf->deallocate_bufs);
+ len += scnprintf(buf + len, buf_len - len, "num_records = %u\n\n",
+ htt_stats_buf->num_records);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -2769,42 +2640,51 @@ static inline void htt_print_sring_stats_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_SRING_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
- htt_stats_buf->mac_id__ring_id__arena__ep & 0xFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ring_id = %u",
- (htt_stats_buf->mac_id__ring_id__arena__ep & 0xFF00) >> 8);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "arena = %u",
- (htt_stats_buf->mac_id__ring_id__arena__ep & 0xFF0000) >> 16);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ep = %u",
- (htt_stats_buf->mac_id__ring_id__arena__ep & 0x1000000) >> 24);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "base_addr_lsb = 0x%x",
- htt_stats_buf->base_addr_lsb);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "base_addr_msb = 0x%x",
- htt_stats_buf->base_addr_msb);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ring_size = %u",
- htt_stats_buf->ring_size);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "elem_size = %u",
- htt_stats_buf->elem_size);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_avail_words = %u",
- htt_stats_buf->num_avail_words__num_valid_words & 0xFFFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_valid_words = %u",
- (htt_stats_buf->num_avail_words__num_valid_words &
- 0xFFFF0000) >> 16);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "head_ptr = %u",
- htt_stats_buf->head_ptr__tail_ptr & 0xFFFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tail_ptr = %u",
- (htt_stats_buf->head_ptr__tail_ptr & 0xFFFF0000) >> 16);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "consumer_empty = %u",
- htt_stats_buf->consumer_empty__producer_full & 0xFFFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "producer_full = %u",
- (htt_stats_buf->consumer_empty__producer_full &
- 0xFFFF0000) >> 16);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "prefetch_count = %u",
- htt_stats_buf->prefetch_count__internal_tail_ptr & 0xFFFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "internal_tail_ptr = %u\n",
- (htt_stats_buf->prefetch_count__internal_tail_ptr &
- 0xFFFF0000) >> 16);
+ len += scnprintf(buf + len, buf_len - len, "HTT_SRING_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_SRING_STATS_MAC_ID,
+ htt_stats_buf->mac_id__ring_id__arena__ep));
+ len += scnprintf(buf + len, buf_len - len, "ring_id = %lu\n",
+ FIELD_GET(HTT_SRING_STATS_RING_ID,
+ htt_stats_buf->mac_id__ring_id__arena__ep));
+ len += scnprintf(buf + len, buf_len - len, "arena = %lu\n",
+ FIELD_GET(HTT_SRING_STATS_ARENA,
+ htt_stats_buf->mac_id__ring_id__arena__ep));
+ len += scnprintf(buf + len, buf_len - len, "ep = %lu\n",
+ FIELD_GET(HTT_SRING_STATS_EP,
+ htt_stats_buf->mac_id__ring_id__arena__ep));
+ len += scnprintf(buf + len, buf_len - len, "base_addr_lsb = 0x%x\n",
+ htt_stats_buf->base_addr_lsb);
+ len += scnprintf(buf + len, buf_len - len, "base_addr_msb = 0x%x\n",
+ htt_stats_buf->base_addr_msb);
+ len += scnprintf(buf + len, buf_len - len, "ring_size = %u\n",
+ htt_stats_buf->ring_size);
+ len += scnprintf(buf + len, buf_len - len, "elem_size = %u\n",
+ htt_stats_buf->elem_size);
+ len += scnprintf(buf + len, buf_len - len, "num_avail_words = %lu\n",
+ FIELD_GET(HTT_SRING_STATS_NUM_AVAIL_WORDS,
+ htt_stats_buf->num_avail_words__num_valid_words));
+ len += scnprintf(buf + len, buf_len - len, "num_valid_words = %lu\n",
+ FIELD_GET(HTT_SRING_STATS_NUM_VALID_WORDS,
+ htt_stats_buf->num_avail_words__num_valid_words));
+ len += scnprintf(buf + len, buf_len - len, "head_ptr = %lu\n",
+ FIELD_GET(HTT_SRING_STATS_HEAD_PTR,
+ htt_stats_buf->head_ptr__tail_ptr));
+ len += scnprintf(buf + len, buf_len - len, "tail_ptr = %lu\n",
+ FIELD_GET(HTT_SRING_STATS_TAIL_PTR,
+ htt_stats_buf->head_ptr__tail_ptr));
+ len += scnprintf(buf + len, buf_len - len, "consumer_empty = %lu\n",
+ FIELD_GET(HTT_SRING_STATS_CONSUMER_EMPTY,
+ htt_stats_buf->consumer_empty__producer_full));
+ len += scnprintf(buf + len, buf_len - len, "producer_full = %lu\n",
+ FIELD_GET(HTT_SRING_STATS_PRODUCER_FULL,
+ htt_stats_buf->consumer_empty__producer_full));
+ len += scnprintf(buf + len, buf_len - len, "prefetch_count = %lu\n",
+ FIELD_GET(HTT_SRING_STATS_PREFETCH_COUNT,
+ htt_stats_buf->prefetch_count__internal_tail_ptr));
+ len += scnprintf(buf + len, buf_len - len, "internal_tail_ptr = %lu\n\n",
+ FIELD_GET(HTT_SRING_STATS_INTERNAL_TAIL_PTR,
+ htt_stats_buf->prefetch_count__internal_tail_ptr));
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -2822,9 +2702,9 @@ static inline void htt_print_sring_cmn_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_SRING_CMN_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_records = %u\n",
- htt_stats_buf->num_records);
+ len += scnprintf(buf + len, buf_len - len, "HTT_SRING_CMN_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "num_records = %u\n\n",
+ htt_stats_buf->num_records);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -2842,165 +2722,115 @@ static inline void htt_print_tx_pdev_rate_stats_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
u8 j;
- char str_buf[HTT_MAX_STRING_LEN] = {0};
- char *tx_gi[HTT_TX_PEER_STATS_NUM_GI_COUNTERS] = {NULL};
-
- for (j = 0; j < HTT_TX_PEER_STATS_NUM_GI_COUNTERS; j++) {
- tx_gi[j] = kmalloc(HTT_MAX_STRING_LEN, GFP_ATOMIC);
- if (!tx_gi[j])
- goto fail;
- }
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_TX_PDEV_RATE_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
- htt_stats_buf->mac_id__word & 0xFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_ldpc = %u",
- htt_stats_buf->tx_ldpc);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_tx_ldpc = %u",
- htt_stats_buf->ac_mu_mimo_tx_ldpc);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_tx_ldpc = %u",
- htt_stats_buf->ax_mu_mimo_tx_ldpc);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ofdma_tx_ldpc = %u",
- htt_stats_buf->ofdma_tx_ldpc);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rts_cnt = %u",
- htt_stats_buf->rts_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rts_success = %u",
- htt_stats_buf->rts_success);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ack_rssi = %u",
- htt_stats_buf->ack_rssi);
-
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "Legacy CCK Rates: 1 Mbps: %u, 2 Mbps: %u, 5.5 Mbps: %u, 11 Mbps: %u",
- htt_stats_buf->tx_legacy_cck_rate[0],
- htt_stats_buf->tx_legacy_cck_rate[1],
- htt_stats_buf->tx_legacy_cck_rate[2],
- htt_stats_buf->tx_legacy_cck_rate[3]);
-
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "Legacy OFDM Rates: 6 Mbps: %u, 9 Mbps: %u, 12 Mbps: %u, 18 Mbps: %u\n"
- " 24 Mbps: %u, 36 Mbps: %u, 48 Mbps: %u, 54 Mbps: %u",
- htt_stats_buf->tx_legacy_ofdm_rate[0],
- htt_stats_buf->tx_legacy_ofdm_rate[1],
- htt_stats_buf->tx_legacy_ofdm_rate[2],
- htt_stats_buf->tx_legacy_ofdm_rate[3],
- htt_stats_buf->tx_legacy_ofdm_rate[4],
- htt_stats_buf->tx_legacy_ofdm_rate[5],
- htt_stats_buf->tx_legacy_ofdm_rate[6],
- htt_stats_buf->tx_legacy_ofdm_rate[7]);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_mcs,
- HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_mcs = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->ac_mu_mimo_tx_mcs,
- HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_tx_mcs = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->ax_mu_mimo_tx_mcs,
- HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_tx_mcs = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->ofdma_tx_mcs,
- HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ofdma_tx_mcs = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_nss,
- HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_nss = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->ac_mu_mimo_tx_nss,
- HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_tx_nss = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->ax_mu_mimo_tx_nss,
- HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_tx_nss = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->ofdma_tx_nss,
- HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ofdma_tx_nss = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_bw,
- HTT_TX_PDEV_STATS_NUM_BW_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_bw = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->ac_mu_mimo_tx_bw,
- HTT_TX_PDEV_STATS_NUM_BW_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ac_mu_mimo_tx_bw = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->ax_mu_mimo_tx_bw,
- HTT_TX_PDEV_STATS_NUM_BW_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ax_mu_mimo_tx_bw = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->ofdma_tx_bw,
- HTT_TX_PDEV_STATS_NUM_BW_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ofdma_tx_bw = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_stbc,
- HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_stbc = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_pream,
- HTT_TX_PDEV_STATS_NUM_PREAMBLE_TYPES);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_pream = %s ", str_buf);
-
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HE LTF: 1x: %u, 2x: %u, 4x: %u",
- htt_stats_buf->tx_he_ltf[1],
- htt_stats_buf->tx_he_ltf[2],
- htt_stats_buf->tx_he_ltf[3]);
+ len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_RATE_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word));
+ len += scnprintf(buf + len, buf_len - len, "tx_ldpc = %u\n",
+ htt_stats_buf->tx_ldpc);
+ len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_tx_ldpc = %u\n",
+ htt_stats_buf->ac_mu_mimo_tx_ldpc);
+ len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_tx_ldpc = %u\n",
+ htt_stats_buf->ax_mu_mimo_tx_ldpc);
+ len += scnprintf(buf + len, buf_len - len, "ofdma_tx_ldpc = %u\n",
+ htt_stats_buf->ofdma_tx_ldpc);
+ len += scnprintf(buf + len, buf_len - len, "rts_cnt = %u\n",
+ htt_stats_buf->rts_cnt);
+ len += scnprintf(buf + len, buf_len - len, "rts_success = %u\n",
+ htt_stats_buf->rts_success);
+ len += scnprintf(buf + len, buf_len - len, "ack_rssi = %u\n",
+ htt_stats_buf->ack_rssi);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "Legacy CCK Rates: 1 Mbps: %u, 2 Mbps: %u, 5.5 Mbps: %u, 11 Mbps: %u\n",
+ htt_stats_buf->tx_legacy_cck_rate[0],
+ htt_stats_buf->tx_legacy_cck_rate[1],
+ htt_stats_buf->tx_legacy_cck_rate[2],
+ htt_stats_buf->tx_legacy_cck_rate[3]);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "Legacy OFDM Rates: 6 Mbps: %u, 9 Mbps: %u, 12 Mbps: %u, 18 Mbps: %u\n"
+ " 24 Mbps: %u, 36 Mbps: %u, 48 Mbps: %u, 54 Mbps: %u\n",
+ htt_stats_buf->tx_legacy_ofdm_rate[0],
+ htt_stats_buf->tx_legacy_ofdm_rate[1],
+ htt_stats_buf->tx_legacy_ofdm_rate[2],
+ htt_stats_buf->tx_legacy_ofdm_rate[3],
+ htt_stats_buf->tx_legacy_ofdm_rate[4],
+ htt_stats_buf->tx_legacy_ofdm_rate[5],
+ htt_stats_buf->tx_legacy_ofdm_rate[6],
+ htt_stats_buf->tx_legacy_ofdm_rate[7]);
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_mcs, "tx_mcs",
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ac_mu_mimo_tx_mcs,
+ "ac_mu_mimo_tx_mcs", HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ax_mu_mimo_tx_mcs,
+ "ax_mu_mimo_tx_mcs", HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ofdma_tx_mcs, "ofdma_tx_mcs",
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_nss, "tx_nss",
+ HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ac_mu_mimo_tx_nss,
+ "ac_mu_mimo_tx_nss",
+ HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ax_mu_mimo_tx_nss,
+ "ax_mu_mimo_tx_nss",
+ HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ofdma_tx_nss, "ofdma_tx_nss",
+ HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_bw, "tx_bw",
+ HTT_TX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ac_mu_mimo_tx_bw,
+ "ac_mu_mimo_tx_bw", HTT_TX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ax_mu_mimo_tx_bw,
+ "ax_mu_mimo_tx_bw",
+ HTT_TX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ofdma_tx_bw, "ofdma_tx_bw",
+ HTT_TX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_stbc, "tx_stbc",
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_pream, "tx_pream",
+ HTT_TX_PDEV_STATS_NUM_PREAMBLE_TYPES, "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "HE LTF: 1x: %u, 2x: %u, 4x: %u\n",
+ htt_stats_buf->tx_he_ltf[1],
+ htt_stats_buf->tx_he_ltf[2],
+ htt_stats_buf->tx_he_ltf[3]);
/* SU GI Stats */
for (j = 0; j < HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
- ARRAY_TO_STRING(tx_gi[j], htt_stats_buf->tx_gi[j],
- HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_gi[%u] = %s ",
- j, tx_gi[j]);
+ len += scnprintf(buf + len, (buf_len - len),
+ "tx_gi[%u] = ", j);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_gi[j], NULL,
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
}
/* AC MU-MIMO GI Stats */
for (j = 0; j < HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
- ARRAY_TO_STRING(tx_gi[j], htt_stats_buf->ac_mu_mimo_tx_gi[j],
- HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ac_mu_mimo_tx_gi[%u] = %s ",
- j, tx_gi[j]);
+ len += scnprintf(buf + len, (buf_len - len),
+ "ac_mu_mimo_tx_gi[%u] = ", j);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ac_mu_mimo_tx_gi[j],
+ NULL, HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
}
/* AX MU-MIMO GI Stats */
for (j = 0; j < HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
- ARRAY_TO_STRING(tx_gi[j], htt_stats_buf->ax_mu_mimo_tx_gi[j],
- HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ax_mu_mimo_tx_gi[%u] = %s ",
- j, tx_gi[j]);
+ len += scnprintf(buf + len, (buf_len - len),
+ "ax_mu_mimo_tx_gi[%u] = ", j);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ax_mu_mimo_tx_gi[j],
+ NULL, HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
}
/* DL OFDMA GI Stats */
for (j = 0; j < HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
- ARRAY_TO_STRING(tx_gi[j], htt_stats_buf->ofdma_tx_gi[j],
- HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ofdma_tx_gi[%u] = %s ",
- j, tx_gi[j]);
+ len += scnprintf(buf + len, (buf_len - len),
+ "ofdma_tx_gi[%u] = ", j);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ofdma_tx_gi[j], NULL,
+ HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
}
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->tx_dcm,
- HTT_TX_PDEV_STATS_NUM_DCM_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tx_dcm = %s\n", str_buf);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->tx_dcm, "tx_dcm",
+ HTT_TX_PDEV_STATS_NUM_DCM_COUNTERS, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -3008,9 +2838,6 @@ static inline void htt_print_tx_pdev_rate_stats_tlv(const void *tag_buf,
buf[len] = 0;
stats_req->buf_len = len;
-fail:
- for (j = 0; j < HTT_TX_PEER_STATS_NUM_GI_COUNTERS; j++)
- kfree(tx_gi[j]);
}
static inline void htt_print_rx_pdev_rate_stats_tlv(const void *tag_buf,
@@ -3021,226 +2848,168 @@ static inline void htt_print_rx_pdev_rate_stats_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
u8 i, j;
- u16 index = 0;
- char *rssi_chain[HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS] = {NULL};
- char *rx_gi[HTT_RX_PDEV_STATS_NUM_GI_COUNTERS] = {NULL};
- char str_buf[HTT_MAX_STRING_LEN] = {0};
- char *rx_pilot_evm_db[HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS] = {NULL};
- for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) {
- rssi_chain[j] = kmalloc(HTT_MAX_STRING_LEN, GFP_ATOMIC);
- if (!rssi_chain[j])
- goto fail;
- }
-
- for (j = 0; j < HTT_RX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
- rx_gi[j] = kmalloc(HTT_MAX_STRING_LEN, GFP_ATOMIC);
- if (!rx_gi[j])
- goto fail;
- }
-
- for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) {
- rx_pilot_evm_db[j] = kmalloc(HTT_MAX_STRING_LEN, GFP_ATOMIC);
- if (!rx_pilot_evm_db[j])
- goto fail;
- }
-
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_RX_PDEV_RATE_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
- htt_stats_buf->mac_id__word & 0xFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "nsts = %u",
- htt_stats_buf->nsts);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_ldpc = %u",
- htt_stats_buf->rx_ldpc);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rts_cnt = %u",
- htt_stats_buf->rts_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rssi_mgmt = %u",
- htt_stats_buf->rssi_mgmt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rssi_data = %u",
- htt_stats_buf->rssi_data);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rssi_comb = %u",
- htt_stats_buf->rssi_comb);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rssi_in_dbm = %d",
- htt_stats_buf->rssi_in_dbm);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_mcs,
- HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_mcs = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_nss,
- HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_nss = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_dcm,
- HTT_RX_PDEV_STATS_NUM_DCM_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_dcm = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_stbc,
- HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_stbc = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_bw,
- HTT_RX_PDEV_STATS_NUM_BW_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_bw = %s ", str_buf);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_evm_nss_count = %u",
- htt_stats_buf->nss_count);
-
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_evm_pilot_count = %u",
- htt_stats_buf->pilot_count);
+ len += scnprintf(buf + len, buf_len - len, "HTT_RX_PDEV_RATE_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word));
+ len += scnprintf(buf + len, buf_len - len, "nsts = %u\n",
+ htt_stats_buf->nsts);
+ len += scnprintf(buf + len, buf_len - len, "rx_ldpc = %u\n",
+ htt_stats_buf->rx_ldpc);
+ len += scnprintf(buf + len, buf_len - len, "rts_cnt = %u\n",
+ htt_stats_buf->rts_cnt);
+ len += scnprintf(buf + len, buf_len - len, "rssi_mgmt = %u\n",
+ htt_stats_buf->rssi_mgmt);
+ len += scnprintf(buf + len, buf_len - len, "rssi_data = %u\n",
+ htt_stats_buf->rssi_data);
+ len += scnprintf(buf + len, buf_len - len, "rssi_comb = %u\n",
+ htt_stats_buf->rssi_comb);
+ len += scnprintf(buf + len, buf_len - len, "rssi_in_dbm = %d\n",
+ htt_stats_buf->rssi_in_dbm);
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_mcs, "rx_mcs",
+ HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_nss, "rx_nss",
+ HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_dcm, "rx_dcm",
+ HTT_RX_PDEV_STATS_NUM_DCM_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_stbc, "rx_stbc",
+ HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_bw, "rx_bw",
+ HTT_RX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "rx_evm_nss_count = %u\n",
+ htt_stats_buf->nss_count);
+
+ len += scnprintf(buf + len, buf_len - len, "rx_evm_pilot_count = %u\n",
+ htt_stats_buf->pilot_count);
for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) {
- index = 0;
-
+ len += scnprintf(buf + len, buf_len - len,
+ "pilot_evm_db[%u] = ", j);
for (i = 0; i < HTT_RX_PDEV_STATS_RXEVM_MAX_PILOTS_PER_NSS; i++)
- index += scnprintf(&rx_pilot_evm_db[j][index],
- HTT_MAX_STRING_LEN - index,
- " %u:%d,",
- i,
- htt_stats_buf->rx_pilot_evm_db[j][i]);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "pilot_evm_dB[%u] = %s ",
- j, rx_pilot_evm_db[j]);
+ len += scnprintf(buf + len,
+ buf_len - len,
+ " %u:%d,",
+ i,
+ htt_stats_buf->rx_pilot_evm_db[j][i]);
+ len += scnprintf(buf + len, buf_len - len, "\n");
}
- index = 0;
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ len += scnprintf(buf + len, buf_len - len,
+ "pilot_evm_db_mean = ");
for (i = 0; i < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; i++)
- index += scnprintf(&str_buf[index],
- HTT_MAX_STRING_LEN - index,
- " %u:%d,", i, htt_stats_buf->rx_pilot_evm_db_mean[i]);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "pilot_evm_dB_mean = %s ", str_buf);
+ len += scnprintf(buf + len,
+ buf_len - len,
+ " %u:%d,", i,
+ htt_stats_buf->rx_pilot_evm_db_mean[i]);
+ len += scnprintf(buf + len, buf_len - len, "\n");
for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) {
- ARRAY_TO_STRING(rssi_chain[j], htt_stats_buf->rssi_chain[j],
- HTT_RX_PDEV_STATS_NUM_BW_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rssi_chain[%u] = %s ",
- j, rssi_chain[j]);
+ len += scnprintf(buf + len, buf_len - len,
+ "rssi_chain[%u] = ", j);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rssi_chain[j], NULL,
+ HTT_RX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
}
for (j = 0; j < HTT_RX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
- ARRAY_TO_STRING(rx_gi[j], htt_stats_buf->rx_gi[j],
- HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_gi[%u] = %s ",
- j, rx_gi[j]);
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_gi[%u] = ", j);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_gi[j], NULL,
+ HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
}
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_pream,
- HTT_RX_PDEV_STATS_NUM_PREAMBLE_TYPES);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_pream = %s", str_buf);
-
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_11ax_su_ext = %u",
- htt_stats_buf->rx_11ax_su_ext);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_11ac_mumimo = %u",
- htt_stats_buf->rx_11ac_mumimo);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_11ax_mumimo = %u",
- htt_stats_buf->rx_11ax_mumimo);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_11ax_ofdma = %u",
- htt_stats_buf->rx_11ax_ofdma);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "txbf = %u",
- htt_stats_buf->txbf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_legacy_cck_rate,
- HTT_RX_PDEV_STATS_NUM_LEGACY_CCK_STATS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_legacy_cck_rate = %s ",
- str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_legacy_ofdm_rate,
- HTT_RX_PDEV_STATS_NUM_LEGACY_OFDM_STATS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_legacy_ofdm_rate = %s ",
- str_buf);
-
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_active_dur_us_low = %u",
- htt_stats_buf->rx_active_dur_us_low);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_active_dur_us_high = %u",
- htt_stats_buf->rx_active_dur_us_high);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_11ax_ul_ofdma = %u",
- htt_stats_buf->rx_11ax_ul_ofdma);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->ul_ofdma_rx_mcs,
- HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ul_ofdma_rx_mcs = %s ", str_buf);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_pream, "rx_pream",
+ HTT_RX_PDEV_STATS_NUM_PREAMBLE_TYPES, "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "rx_11ax_su_ext = %u\n",
+ htt_stats_buf->rx_11ax_su_ext);
+ len += scnprintf(buf + len, buf_len - len, "rx_11ac_mumimo = %u\n",
+ htt_stats_buf->rx_11ac_mumimo);
+ len += scnprintf(buf + len, buf_len - len, "rx_11ax_mumimo = %u\n",
+ htt_stats_buf->rx_11ax_mumimo);
+ len += scnprintf(buf + len, buf_len - len, "rx_11ax_ofdma = %u\n",
+ htt_stats_buf->rx_11ax_ofdma);
+ len += scnprintf(buf + len, buf_len - len, "txbf = %u\n",
+ htt_stats_buf->txbf);
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_legacy_cck_rate,
+ "rx_legacy_cck_rate",
+ HTT_RX_PDEV_STATS_NUM_LEGACY_CCK_STATS, "\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_legacy_ofdm_rate,
+ "rx_legacy_ofdm_rate",
+ HTT_RX_PDEV_STATS_NUM_LEGACY_OFDM_STATS, "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "rx_active_dur_us_low = %u\n",
+ htt_stats_buf->rx_active_dur_us_low);
+ len += scnprintf(buf + len, buf_len - len, "rx_active_dur_us_high = %u\n",
+ htt_stats_buf->rx_active_dur_us_high);
+ len += scnprintf(buf + len, buf_len - len, "rx_11ax_ul_ofdma = %u\n",
+ htt_stats_buf->rx_11ax_ul_ofdma);
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ul_ofdma_rx_mcs,
+ "ul_ofdma_rx_mcs",
+ HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
for (j = 0; j < HTT_RX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
- ARRAY_TO_STRING(rx_gi[j], htt_stats_buf->ul_ofdma_rx_gi[j],
- HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ul_ofdma_rx_gi[%u] = %s ",
- j, rx_gi[j]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ul_ofdma_rx_gi[%u] = ", j);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ul_ofdma_rx_gi[j], NULL,
+ HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
}
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->ul_ofdma_rx_nss,
- HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ul_ofdma_rx_nss = %s ", str_buf);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ul_ofdma_rx_nss,
+ "ul_ofdma_rx_nss",
+ HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n");
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->ul_ofdma_rx_bw,
- HTT_RX_PDEV_STATS_NUM_BW_COUNTERS);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ul_ofdma_rx_bw = %s ", str_buf);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->ul_ofdma_rx_bw, "ul_ofdma_rx_bw",
+ HTT_RX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ul_ofdma_rx_stbc = %u",
+ len += scnprintf(buf + len, buf_len - len, "ul_ofdma_rx_stbc = %u\n",
htt_stats_buf->ul_ofdma_rx_stbc);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ul_ofdma_rx_ldpc = %u",
+ len += scnprintf(buf + len, buf_len - len, "ul_ofdma_rx_ldpc = %u\n",
htt_stats_buf->ul_ofdma_rx_ldpc);
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_ulofdma_non_data_ppdu,
- HTT_RX_PDEV_MAX_OFDMA_NUM_USER);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_ulofdma_non_data_ppdu = %s ",
- str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_ulofdma_data_ppdu,
- HTT_RX_PDEV_MAX_OFDMA_NUM_USER);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_ulofdma_data_ppdu = %s ",
- str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_ulofdma_mpdu_ok,
- HTT_RX_PDEV_MAX_OFDMA_NUM_USER);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_ulofdma_mpdu_ok = %s ", str_buf);
-
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
- ARRAY_TO_STRING(str_buf, htt_stats_buf->rx_ulofdma_mpdu_fail,
- HTT_RX_PDEV_MAX_OFDMA_NUM_USER);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_ulofdma_mpdu_fail = %s",
- str_buf);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulofdma_non_data_ppdu,
+ "rx_ulofdma_non_data_ppdu",
+ HTT_RX_PDEV_MAX_OFDMA_NUM_USER, "\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulofdma_data_ppdu,
+ "rx_ulofdma_data_ppdu", HTT_RX_PDEV_MAX_OFDMA_NUM_USER, "\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulofdma_mpdu_ok,
+ "rx_ulofdma_mpdu_ok", HTT_RX_PDEV_MAX_OFDMA_NUM_USER, "\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rx_ulofdma_mpdu_fail,
+ "rx_ulofdma_mpdu_fail", HTT_RX_PDEV_MAX_OFDMA_NUM_USER, "\n");
for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) {
- index = 0;
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_ul_fd_rssi: nss[%u] = ", j);
for (i = 0; i < HTT_RX_PDEV_MAX_OFDMA_NUM_USER; i++)
- index += scnprintf(&str_buf[index],
- HTT_MAX_STRING_LEN - index,
- " %u:%d,",
- i, htt_stats_buf->rx_ul_fd_rssi[j][i]);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "rx_ul_fd_rssi: nss[%u] = %s", j, str_buf);
+ len += scnprintf(buf + len,
+ buf_len - len,
+ " %u:%d,",
+ i, htt_stats_buf->rx_ul_fd_rssi[j][i]);
+ len += scnprintf(buf + len, buf_len - len, "\n");
}
- len += HTT_DBG_OUT(buf + len, buf_len - len, "per_chain_rssi_pkt_type = %#x",
- htt_stats_buf->per_chain_rssi_pkt_type);
+ len += scnprintf(buf + len, buf_len - len, "per_chain_rssi_pkt_type = %#x\n",
+ htt_stats_buf->per_chain_rssi_pkt_type);
for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) {
- index = 0;
- memset(str_buf, 0x0, HTT_MAX_STRING_LEN);
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_per_chain_rssi_in_dbm[%u] = ", j);
for (i = 0; i < HTT_RX_PDEV_STATS_NUM_BW_COUNTERS; i++)
- index += scnprintf(&str_buf[index],
- HTT_MAX_STRING_LEN - index,
- " %u:%d,",
- i,
- htt_stats_buf->rx_per_chain_rssi_in_dbm[j][i]);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "rx_per_chain_rssi_in_dbm[%u] = %s ", j, str_buf);
+ len += scnprintf(buf + len,
+ buf_len - len,
+ " %u:%d,",
+ i,
+ htt_stats_buf->rx_per_chain_rssi_in_dbm[j][i]);
+ len += scnprintf(buf + len, buf_len - len, "\n");
}
- len += HTT_DBG_OUT(buf + len, buf_len - len, "\n");
+ len += scnprintf(buf + len, buf_len - len, "\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -3248,16 +3017,6 @@ static inline void htt_print_rx_pdev_rate_stats_tlv(const void *tag_buf,
buf[len] = 0;
stats_req->buf_len = len;
-
-fail:
- for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++)
- kfree(rssi_chain[j]);
-
- for (j = 0; j < HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++)
- kfree(rx_pilot_evm_db[j]);
-
- for (i = 0; i < HTT_RX_PDEV_STATS_NUM_GI_COUNTERS; i++)
- kfree(rx_gi[i]);
}
static inline void htt_print_rx_soc_fw_stats_tlv(const void *tag_buf,
@@ -3268,34 +3027,34 @@ static inline void htt_print_rx_soc_fw_stats_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_RX_SOC_FW_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_reo_ring_data_msdu = %u",
- htt_stats_buf->fw_reo_ring_data_msdu);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_to_host_data_msdu_bcmc = %u",
- htt_stats_buf->fw_to_host_data_msdu_bcmc);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_to_host_data_msdu_uc = %u",
- htt_stats_buf->fw_to_host_data_msdu_uc);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ofld_remote_data_buf_recycle_cnt = %u",
- htt_stats_buf->ofld_remote_data_buf_recycle_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ofld_remote_free_buf_indication_cnt = %u",
- htt_stats_buf->ofld_remote_free_buf_indication_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ofld_buf_to_host_data_msdu_uc = %u",
- htt_stats_buf->ofld_buf_to_host_data_msdu_uc);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "reo_fw_ring_to_host_data_msdu_uc = %u",
- htt_stats_buf->reo_fw_ring_to_host_data_msdu_uc);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "wbm_sw_ring_reap = %u",
- htt_stats_buf->wbm_sw_ring_reap);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "wbm_forward_to_host_cnt = %u",
- htt_stats_buf->wbm_forward_to_host_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "wbm_target_recycle_cnt = %u",
- htt_stats_buf->wbm_target_recycle_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "target_refill_ring_recycle_cnt = %u",
- htt_stats_buf->target_refill_ring_recycle_cnt);
+ len += scnprintf(buf + len, buf_len - len, "HTT_RX_SOC_FW_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "fw_reo_ring_data_msdu = %u\n",
+ htt_stats_buf->fw_reo_ring_data_msdu);
+ len += scnprintf(buf + len, buf_len - len, "fw_to_host_data_msdu_bcmc = %u\n",
+ htt_stats_buf->fw_to_host_data_msdu_bcmc);
+ len += scnprintf(buf + len, buf_len - len, "fw_to_host_data_msdu_uc = %u\n",
+ htt_stats_buf->fw_to_host_data_msdu_uc);
+ len += scnprintf(buf + len, buf_len - len,
+ "ofld_remote_data_buf_recycle_cnt = %u\n",
+ htt_stats_buf->ofld_remote_data_buf_recycle_cnt);
+ len += scnprintf(buf + len, buf_len - len,
+ "ofld_remote_free_buf_indication_cnt = %u\n",
+ htt_stats_buf->ofld_remote_free_buf_indication_cnt);
+ len += scnprintf(buf + len, buf_len - len,
+ "ofld_buf_to_host_data_msdu_uc = %u\n",
+ htt_stats_buf->ofld_buf_to_host_data_msdu_uc);
+ len += scnprintf(buf + len, buf_len - len,
+ "reo_fw_ring_to_host_data_msdu_uc = %u\n",
+ htt_stats_buf->reo_fw_ring_to_host_data_msdu_uc);
+ len += scnprintf(buf + len, buf_len - len, "wbm_sw_ring_reap = %u\n",
+ htt_stats_buf->wbm_sw_ring_reap);
+ len += scnprintf(buf + len, buf_len - len, "wbm_forward_to_host_cnt = %u\n",
+ htt_stats_buf->wbm_forward_to_host_cnt);
+ len += scnprintf(buf + len, buf_len - len, "wbm_target_recycle_cnt = %u\n",
+ htt_stats_buf->wbm_target_recycle_cnt);
+ len += scnprintf(buf + len, buf_len - len,
+ "target_refill_ring_recycle_cnt = %u\n",
+ htt_stats_buf->target_refill_ring_recycle_cnt);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -3314,17 +3073,13 @@ htt_print_rx_soc_fw_refill_ring_empty_tlv_v(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char refill_ring_empty_cnt[HTT_MAX_STRING_LEN] = {0};
u16 num_elems = min_t(u16, (tag_len >> 2), HTT_RX_STATS_REFILL_MAX_RING);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_RX_SOC_FW_REFILL_RING_EMPTY_TLV_V:");
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_RX_SOC_FW_REFILL_RING_EMPTY_TLV_V:\n");
- ARRAY_TO_STRING(refill_ring_empty_cnt,
- htt_stats_buf->refill_ring_empty_cnt,
- num_elems);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "refill_ring_empty_cnt = %s\n",
- refill_ring_empty_cnt);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->refill_ring_empty_cnt,
+ "refill_ring_empty_cnt", num_elems, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -3344,17 +3099,13 @@ htt_print_rx_soc_fw_refill_ring_num_rxdma_err_tlv_v(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char rxdma_err_cnt[HTT_MAX_STRING_LEN] = {0};
u16 num_elems = min_t(u16, (tag_len >> 2), HTT_RX_RXDMA_MAX_ERR_CODE);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_RX_SOC_FW_REFILL_RING_NUM_RXDMA_ERR_TLV_V:");
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_RX_SOC_FW_REFILL_RING_NUM_RXDMA_ERR_TLV_V:\n");
- ARRAY_TO_STRING(rxdma_err_cnt,
- htt_stats_buf->rxdma_err,
- num_elems);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rxdma_err = %s\n",
- rxdma_err_cnt);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->rxdma_err, "rxdma_err",
+ num_elems, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -3373,17 +3124,13 @@ htt_print_rx_soc_fw_refill_ring_num_reo_err_tlv_v(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char reo_err_cnt[HTT_MAX_STRING_LEN] = {0};
u16 num_elems = min_t(u16, (tag_len >> 2), HTT_RX_REO_MAX_ERR_CODE);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_RX_SOC_FW_REFILL_RING_NUM_REO_ERR_TLV_V:");
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_RX_SOC_FW_REFILL_RING_NUM_REO_ERR_TLV_V:\n");
- ARRAY_TO_STRING(reo_err_cnt,
- htt_stats_buf->reo_err,
- num_elems);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "reo_err = %s\n",
- reo_err_cnt);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->reo_err, "reo_err",
+ num_elems, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -3402,27 +3149,27 @@ htt_print_rx_reo_debug_stats_tlv_v(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_RX_REO_RESOURCE_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sample_id = %u",
- htt_stats_buf->sample_id);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "total_max = %u",
- htt_stats_buf->total_max);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "total_avg = %u",
- htt_stats_buf->total_avg);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "total_sample = %u",
- htt_stats_buf->total_sample);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "non_zeros_avg = %u",
- htt_stats_buf->non_zeros_avg);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "non_zeros_sample = %u",
- htt_stats_buf->non_zeros_sample);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "last_non_zeros_max = %u",
- htt_stats_buf->last_non_zeros_max);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "last_non_zeros_min %u",
- htt_stats_buf->last_non_zeros_min);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "last_non_zeros_avg %u",
- htt_stats_buf->last_non_zeros_avg);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "last_non_zeros_sample %u\n",
- htt_stats_buf->last_non_zeros_sample);
+ len += scnprintf(buf + len, buf_len - len, "HTT_RX_REO_RESOURCE_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "sample_id = %u\n",
+ htt_stats_buf->sample_id);
+ len += scnprintf(buf + len, buf_len - len, "total_max = %u\n",
+ htt_stats_buf->total_max);
+ len += scnprintf(buf + len, buf_len - len, "total_avg = %u\n",
+ htt_stats_buf->total_avg);
+ len += scnprintf(buf + len, buf_len - len, "total_sample = %u\n",
+ htt_stats_buf->total_sample);
+ len += scnprintf(buf + len, buf_len - len, "non_zeros_avg = %u\n",
+ htt_stats_buf->non_zeros_avg);
+ len += scnprintf(buf + len, buf_len - len, "non_zeros_sample = %u\n",
+ htt_stats_buf->non_zeros_sample);
+ len += scnprintf(buf + len, buf_len - len, "last_non_zeros_max = %u\n",
+ htt_stats_buf->last_non_zeros_max);
+ len += scnprintf(buf + len, buf_len - len, "last_non_zeros_min %u\n",
+ htt_stats_buf->last_non_zeros_min);
+ len += scnprintf(buf + len, buf_len - len, "last_non_zeros_avg %u\n",
+ htt_stats_buf->last_non_zeros_avg);
+ len += scnprintf(buf + len, buf_len - len, "last_non_zeros_sample %u\n\n",
+ htt_stats_buf->last_non_zeros_sample);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -3441,17 +3188,13 @@ htt_print_rx_soc_fw_refill_ring_num_refill_tlv_v(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char refill_ring_num_refill[HTT_MAX_STRING_LEN] = {0};
u16 num_elems = min_t(u16, (tag_len >> 2), HTT_RX_STATS_REFILL_MAX_RING);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_RX_SOC_FW_REFILL_RING_NUM_REFILL_TLV_V:");
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_RX_SOC_FW_REFILL_RING_NUM_REFILL_TLV_V:\n");
- ARRAY_TO_STRING(refill_ring_num_refill,
- htt_stats_buf->refill_ring_num_refill,
- num_elems);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "refill_ring_num_refill = %s\n",
- refill_ring_num_refill);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->refill_ring_num_refill,
+ "refill_ring_num_refill", num_elems, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -3468,113 +3211,106 @@ static inline void htt_print_rx_pdev_fw_stats_tlv(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char fw_ring_mgmt_subtype[HTT_MAX_STRING_LEN] = {0};
- char fw_ring_ctrl_subtype[HTT_MAX_STRING_LEN] = {0};
-
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_RX_PDEV_FW_STATS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
- htt_stats_buf->mac_id__word & 0xFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ppdu_recvd = %u",
- htt_stats_buf->ppdu_recvd);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_cnt_fcs_ok = %u",
- htt_stats_buf->mpdu_cnt_fcs_ok);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mpdu_cnt_fcs_err = %u",
- htt_stats_buf->mpdu_cnt_fcs_err);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tcp_msdu_cnt = %u",
- htt_stats_buf->tcp_msdu_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "tcp_ack_msdu_cnt = %u",
- htt_stats_buf->tcp_ack_msdu_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "udp_msdu_cnt = %u",
- htt_stats_buf->udp_msdu_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "other_msdu_cnt = %u",
- htt_stats_buf->other_msdu_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_ring_mpdu_ind = %u",
- htt_stats_buf->fw_ring_mpdu_ind);
-
- ARRAY_TO_STRING(fw_ring_mgmt_subtype,
- htt_stats_buf->fw_ring_mgmt_subtype,
- HTT_STATS_SUBTYPE_MAX);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_ring_mgmt_subtype = %s ",
- fw_ring_mgmt_subtype);
-
- ARRAY_TO_STRING(fw_ring_ctrl_subtype,
- htt_stats_buf->fw_ring_ctrl_subtype,
- HTT_STATS_SUBTYPE_MAX);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_ring_ctrl_subtype = %s ",
- fw_ring_ctrl_subtype);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_ring_mcast_data_msdu = %u",
- htt_stats_buf->fw_ring_mcast_data_msdu);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_ring_bcast_data_msdu = %u",
- htt_stats_buf->fw_ring_bcast_data_msdu);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_ring_ucast_data_msdu = %u",
- htt_stats_buf->fw_ring_ucast_data_msdu);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_ring_null_data_msdu = %u",
- htt_stats_buf->fw_ring_null_data_msdu);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_ring_mpdu_drop = %u",
- htt_stats_buf->fw_ring_mpdu_drop);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "ofld_local_data_ind_cnt = %u",
- htt_stats_buf->ofld_local_data_ind_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "ofld_local_data_buf_recycle_cnt = %u",
- htt_stats_buf->ofld_local_data_buf_recycle_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "drx_local_data_ind_cnt = %u",
- htt_stats_buf->drx_local_data_ind_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "drx_local_data_buf_recycle_cnt = %u",
- htt_stats_buf->drx_local_data_buf_recycle_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "local_nondata_ind_cnt = %u",
- htt_stats_buf->local_nondata_ind_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "local_nondata_buf_recycle_cnt = %u",
- htt_stats_buf->local_nondata_buf_recycle_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_status_buf_ring_refill_cnt = %u",
- htt_stats_buf->fw_status_buf_ring_refill_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_status_buf_ring_empty_cnt = %u",
- htt_stats_buf->fw_status_buf_ring_empty_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_pkt_buf_ring_refill_cnt = %u",
- htt_stats_buf->fw_pkt_buf_ring_refill_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_pkt_buf_ring_empty_cnt = %u",
- htt_stats_buf->fw_pkt_buf_ring_empty_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_link_buf_ring_refill_cnt = %u",
- htt_stats_buf->fw_link_buf_ring_refill_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_link_buf_ring_empty_cnt = %u",
- htt_stats_buf->fw_link_buf_ring_empty_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "host_pkt_buf_ring_refill_cnt = %u",
- htt_stats_buf->host_pkt_buf_ring_refill_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "host_pkt_buf_ring_empty_cnt = %u",
- htt_stats_buf->host_pkt_buf_ring_empty_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mon_pkt_buf_ring_refill_cnt = %u",
- htt_stats_buf->mon_pkt_buf_ring_refill_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mon_pkt_buf_ring_empty_cnt = %u",
- htt_stats_buf->mon_pkt_buf_ring_empty_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "mon_status_buf_ring_refill_cnt = %u",
- htt_stats_buf->mon_status_buf_ring_refill_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mon_status_buf_ring_empty_cnt = %u",
- htt_stats_buf->mon_status_buf_ring_empty_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mon_desc_buf_ring_refill_cnt = %u",
- htt_stats_buf->mon_desc_buf_ring_refill_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mon_desc_buf_ring_empty_cnt = %u",
- htt_stats_buf->mon_desc_buf_ring_empty_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mon_dest_ring_update_cnt = %u",
- htt_stats_buf->mon_dest_ring_update_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mon_dest_ring_full_cnt = %u",
- htt_stats_buf->mon_dest_ring_full_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_suspend_cnt = %u",
- htt_stats_buf->rx_suspend_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_suspend_fail_cnt = %u",
- htt_stats_buf->rx_suspend_fail_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_resume_cnt = %u",
- htt_stats_buf->rx_resume_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_resume_fail_cnt = %u",
- htt_stats_buf->rx_resume_fail_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_ring_switch_cnt = %u",
- htt_stats_buf->rx_ring_switch_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_ring_restore_cnt = %u",
- htt_stats_buf->rx_ring_restore_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_flush_cnt = %u",
- htt_stats_buf->rx_flush_cnt);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "rx_recovery_reset_cnt = %u\n",
- htt_stats_buf->rx_recovery_reset_cnt);
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_RX_PDEV_FW_STATS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word));
+ len += scnprintf(buf + len, buf_len - len, "ppdu_recvd = %u\n",
+ htt_stats_buf->ppdu_recvd);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_cnt_fcs_ok = %u\n",
+ htt_stats_buf->mpdu_cnt_fcs_ok);
+ len += scnprintf(buf + len, buf_len - len, "mpdu_cnt_fcs_err = %u\n",
+ htt_stats_buf->mpdu_cnt_fcs_err);
+ len += scnprintf(buf + len, buf_len - len, "tcp_msdu_cnt = %u\n",
+ htt_stats_buf->tcp_msdu_cnt);
+ len += scnprintf(buf + len, buf_len - len, "tcp_ack_msdu_cnt = %u\n",
+ htt_stats_buf->tcp_ack_msdu_cnt);
+ len += scnprintf(buf + len, buf_len - len, "udp_msdu_cnt = %u\n",
+ htt_stats_buf->udp_msdu_cnt);
+ len += scnprintf(buf + len, buf_len - len, "other_msdu_cnt = %u\n",
+ htt_stats_buf->other_msdu_cnt);
+ len += scnprintf(buf + len, buf_len - len, "fw_ring_mpdu_ind = %u\n",
+ htt_stats_buf->fw_ring_mpdu_ind);
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->fw_ring_mgmt_subtype,
+ "fw_ring_mgmt_subtype", HTT_STATS_SUBTYPE_MAX, "\n");
+
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->fw_ring_ctrl_subtype,
+ "fw_ring_ctrl_subtype", HTT_STATS_SUBTYPE_MAX, "\n");
+
+ len += scnprintf(buf + len, buf_len - len, "fw_ring_mcast_data_msdu = %u\n",
+ htt_stats_buf->fw_ring_mcast_data_msdu);
+ len += scnprintf(buf + len, buf_len - len, "fw_ring_bcast_data_msdu = %u\n",
+ htt_stats_buf->fw_ring_bcast_data_msdu);
+ len += scnprintf(buf + len, buf_len - len, "fw_ring_ucast_data_msdu = %u\n",
+ htt_stats_buf->fw_ring_ucast_data_msdu);
+ len += scnprintf(buf + len, buf_len - len, "fw_ring_null_data_msdu = %u\n",
+ htt_stats_buf->fw_ring_null_data_msdu);
+ len += scnprintf(buf + len, buf_len - len, "fw_ring_mpdu_drop = %u\n",
+ htt_stats_buf->fw_ring_mpdu_drop);
+ len += scnprintf(buf + len, buf_len - len, "ofld_local_data_ind_cnt = %u\n",
+ htt_stats_buf->ofld_local_data_ind_cnt);
+ len += scnprintf(buf + len, buf_len - len,
+ "ofld_local_data_buf_recycle_cnt = %u\n",
+ htt_stats_buf->ofld_local_data_buf_recycle_cnt);
+ len += scnprintf(buf + len, buf_len - len, "drx_local_data_ind_cnt = %u\n",
+ htt_stats_buf->drx_local_data_ind_cnt);
+ len += scnprintf(buf + len, buf_len - len,
+ "drx_local_data_buf_recycle_cnt = %u\n",
+ htt_stats_buf->drx_local_data_buf_recycle_cnt);
+ len += scnprintf(buf + len, buf_len - len, "local_nondata_ind_cnt = %u\n",
+ htt_stats_buf->local_nondata_ind_cnt);
+ len += scnprintf(buf + len, buf_len - len, "local_nondata_buf_recycle_cnt = %u\n",
+ htt_stats_buf->local_nondata_buf_recycle_cnt);
+ len += scnprintf(buf + len, buf_len - len, "fw_status_buf_ring_refill_cnt = %u\n",
+ htt_stats_buf->fw_status_buf_ring_refill_cnt);
+ len += scnprintf(buf + len, buf_len - len, "fw_status_buf_ring_empty_cnt = %u\n",
+ htt_stats_buf->fw_status_buf_ring_empty_cnt);
+ len += scnprintf(buf + len, buf_len - len, "fw_pkt_buf_ring_refill_cnt = %u\n",
+ htt_stats_buf->fw_pkt_buf_ring_refill_cnt);
+ len += scnprintf(buf + len, buf_len - len, "fw_pkt_buf_ring_empty_cnt = %u\n",
+ htt_stats_buf->fw_pkt_buf_ring_empty_cnt);
+ len += scnprintf(buf + len, buf_len - len, "fw_link_buf_ring_refill_cnt = %u\n",
+ htt_stats_buf->fw_link_buf_ring_refill_cnt);
+ len += scnprintf(buf + len, buf_len - len, "fw_link_buf_ring_empty_cnt = %u\n",
+ htt_stats_buf->fw_link_buf_ring_empty_cnt);
+ len += scnprintf(buf + len, buf_len - len, "host_pkt_buf_ring_refill_cnt = %u\n",
+ htt_stats_buf->host_pkt_buf_ring_refill_cnt);
+ len += scnprintf(buf + len, buf_len - len, "host_pkt_buf_ring_empty_cnt = %u\n",
+ htt_stats_buf->host_pkt_buf_ring_empty_cnt);
+ len += scnprintf(buf + len, buf_len - len, "mon_pkt_buf_ring_refill_cnt = %u\n",
+ htt_stats_buf->mon_pkt_buf_ring_refill_cnt);
+ len += scnprintf(buf + len, buf_len - len, "mon_pkt_buf_ring_empty_cnt = %u\n",
+ htt_stats_buf->mon_pkt_buf_ring_empty_cnt);
+ len += scnprintf(buf + len, buf_len - len,
+ "mon_status_buf_ring_refill_cnt = %u\n",
+ htt_stats_buf->mon_status_buf_ring_refill_cnt);
+ len += scnprintf(buf + len, buf_len - len, "mon_status_buf_ring_empty_cnt = %u\n",
+ htt_stats_buf->mon_status_buf_ring_empty_cnt);
+ len += scnprintf(buf + len, buf_len - len, "mon_desc_buf_ring_refill_cnt = %u\n",
+ htt_stats_buf->mon_desc_buf_ring_refill_cnt);
+ len += scnprintf(buf + len, buf_len - len, "mon_desc_buf_ring_empty_cnt = %u\n",
+ htt_stats_buf->mon_desc_buf_ring_empty_cnt);
+ len += scnprintf(buf + len, buf_len - len, "mon_dest_ring_update_cnt = %u\n",
+ htt_stats_buf->mon_dest_ring_update_cnt);
+ len += scnprintf(buf + len, buf_len - len, "mon_dest_ring_full_cnt = %u\n",
+ htt_stats_buf->mon_dest_ring_full_cnt);
+ len += scnprintf(buf + len, buf_len - len, "rx_suspend_cnt = %u\n",
+ htt_stats_buf->rx_suspend_cnt);
+ len += scnprintf(buf + len, buf_len - len, "rx_suspend_fail_cnt = %u\n",
+ htt_stats_buf->rx_suspend_fail_cnt);
+ len += scnprintf(buf + len, buf_len - len, "rx_resume_cnt = %u\n",
+ htt_stats_buf->rx_resume_cnt);
+ len += scnprintf(buf + len, buf_len - len, "rx_resume_fail_cnt = %u\n",
+ htt_stats_buf->rx_resume_fail_cnt);
+ len += scnprintf(buf + len, buf_len - len, "rx_ring_switch_cnt = %u\n",
+ htt_stats_buf->rx_ring_switch_cnt);
+ len += scnprintf(buf + len, buf_len - len, "rx_ring_restore_cnt = %u\n",
+ htt_stats_buf->rx_ring_restore_cnt);
+ len += scnprintf(buf + len, buf_len - len, "rx_flush_cnt = %u\n",
+ htt_stats_buf->rx_flush_cnt);
+ len += scnprintf(buf + len, buf_len - len, "rx_recovery_reset_cnt = %u\n\n",
+ htt_stats_buf->rx_recovery_reset_cnt);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -3592,16 +3328,12 @@ htt_print_rx_pdev_fw_ring_mpdu_err_tlv_v(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char fw_ring_mpdu_err[HTT_MAX_STRING_LEN] = {0};
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_RX_PDEV_FW_RING_MPDU_ERR_TLV_V:");
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_RX_PDEV_FW_RING_MPDU_ERR_TLV_V:\n");
- ARRAY_TO_STRING(fw_ring_mpdu_err,
- htt_stats_buf->fw_ring_mpdu_err,
- HTT_RX_STATS_RXDMA_MAX_ERR);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_ring_mpdu_err = %s\n",
- fw_ring_mpdu_err);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->fw_ring_mpdu_err,
+ "fw_ring_mpdu_err", HTT_RX_STATS_RXDMA_MAX_ERR, "\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -3620,15 +3352,12 @@ htt_print_rx_pdev_fw_mpdu_drop_tlv_v(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char fw_mpdu_drop[HTT_MAX_STRING_LEN] = {0};
u16 num_elems = min_t(u16, (tag_len >> 2), HTT_RX_STATS_FW_DROP_REASON_MAX);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_RX_PDEV_FW_MPDU_DROP_TLV_V:");
+ len += scnprintf(buf + len, buf_len - len, "HTT_RX_PDEV_FW_MPDU_DROP_TLV_V:\n");
- ARRAY_TO_STRING(fw_mpdu_drop,
- htt_stats_buf->fw_mpdu_drop,
- num_elems);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "fw_mpdu_drop = %s\n", fw_mpdu_drop);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->fw_mpdu_drop, "fw_mpdu_drop",
+ num_elems, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -3646,18 +3375,15 @@ htt_print_rx_pdev_fw_stats_phy_err_tlv(const void *tag_buf,
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- char phy_errs[HTT_MAX_STRING_LEN] = {0};
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_RX_PDEV_FW_STATS_PHY_ERR_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id__word = %u",
- htt_stats_buf->mac_id__word);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "total_phy_err_nct = %u",
- htt_stats_buf->total_phy_err_cnt);
+ len += scnprintf(buf + len, buf_len - len, "HTT_RX_PDEV_FW_STATS_PHY_ERR_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id__word = %u\n",
+ htt_stats_buf->mac_id__word);
+ len += scnprintf(buf + len, buf_len - len, "total_phy_err_nct = %u\n",
+ htt_stats_buf->total_phy_err_cnt);
- ARRAY_TO_STRING(phy_errs,
- htt_stats_buf->phy_err,
- HTT_STATS_PHY_ERR_MAX);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "phy_errs = %s\n", phy_errs);
+ PRINT_ARRAY_TO_BUF(buf, len, htt_stats_buf->phy_err, "phy_errs",
+ HTT_STATS_PHY_ERR_MAX, "\n\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -3676,20 +3402,20 @@ htt_print_pdev_cca_stats_hist_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "\nHTT_PDEV_CCA_STATS_HIST_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "chan_num = %u",
- htt_stats_buf->chan_num);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_records = %u",
- htt_stats_buf->num_records);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "valid_cca_counters_bitmap = 0x%x",
- htt_stats_buf->valid_cca_counters_bitmap);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "collection_interval = %u\n",
- htt_stats_buf->collection_interval);
-
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "HTT_PDEV_STATS_CCA_COUNTERS_TLV:(in usec)");
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "| tx_frame| rx_frame| rx_clear| my_rx_frame| cnt| med_rx_idle| med_tx_idle_global| cca_obss|");
+ len += scnprintf(buf + len, buf_len - len, "\nHTT_PDEV_CCA_STATS_HIST_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "chan_num = %u\n",
+ htt_stats_buf->chan_num);
+ len += scnprintf(buf + len, buf_len - len, "num_records = %u\n",
+ htt_stats_buf->num_records);
+ len += scnprintf(buf + len, buf_len - len, "valid_cca_counters_bitmap = 0x%x\n",
+ htt_stats_buf->valid_cca_counters_bitmap);
+ len += scnprintf(buf + len, buf_len - len, "collection_interval = %u\n\n",
+ htt_stats_buf->collection_interval);
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_PDEV_STATS_CCA_COUNTERS_TLV:(in usec)\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "| tx_frame| rx_frame| rx_clear| my_rx_frame| cnt| med_rx_idle| med_tx_idle_global| cca_obss|\n");
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -3708,16 +3434,16 @@ htt_print_pdev_stats_cca_counters_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "|%10u| %10u| %10u| %11u| %10u| %11u| %18u| %10u|",
- htt_stats_buf->tx_frame_usec,
- htt_stats_buf->rx_frame_usec,
- htt_stats_buf->rx_clear_usec,
- htt_stats_buf->my_rx_frame_usec,
- htt_stats_buf->usec_cnt,
- htt_stats_buf->med_rx_idle_usec,
- htt_stats_buf->med_tx_idle_global_usec,
- htt_stats_buf->cca_obss_usec);
+ len += scnprintf(buf + len, buf_len - len,
+ "|%10u| %10u| %10u| %11u| %10u| %11u| %18u| %10u|\n",
+ htt_stats_buf->tx_frame_usec,
+ htt_stats_buf->rx_frame_usec,
+ htt_stats_buf->rx_clear_usec,
+ htt_stats_buf->my_rx_frame_usec,
+ htt_stats_buf->usec_cnt,
+ htt_stats_buf->med_rx_idle_usec,
+ htt_stats_buf->med_tx_idle_global_usec,
+ htt_stats_buf->cca_obss_usec);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -3735,32 +3461,32 @@ static inline void htt_print_hw_stats_whal_tx_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_HW_STATS_WHAL_TX_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "mac_id = %u",
- htt_stats_buf->mac_id__word & 0xFF);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "last_unpause_ppdu_id = %u",
- htt_stats_buf->last_unpause_ppdu_id);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "hwsch_unpause_wait_tqm_write = %u",
- htt_stats_buf->hwsch_unpause_wait_tqm_write);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "hwsch_dummy_tlv_skipped = %u",
- htt_stats_buf->hwsch_dummy_tlv_skipped);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "hwsch_misaligned_offset_received = %u",
- htt_stats_buf->hwsch_misaligned_offset_received);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "hwsch_reset_count = %u",
- htt_stats_buf->hwsch_reset_count);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "hwsch_dev_reset_war = %u",
- htt_stats_buf->hwsch_dev_reset_war);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "hwsch_delayed_pause = %u",
- htt_stats_buf->hwsch_delayed_pause);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "hwsch_long_delayed_pause = %u",
- htt_stats_buf->hwsch_long_delayed_pause);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sch_rx_ppdu_no_response = %u",
- htt_stats_buf->sch_rx_ppdu_no_response);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sch_selfgen_response = %u",
- htt_stats_buf->sch_selfgen_response);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sch_rx_sifs_resp_trigger= %u\n",
- htt_stats_buf->sch_rx_sifs_resp_trigger);
+ len += scnprintf(buf + len, buf_len - len, "HTT_HW_STATS_WHAL_TX_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "mac_id = %lu\n",
+ FIELD_GET(HTT_STATS_MAC_ID, htt_stats_buf->mac_id__word));
+ len += scnprintf(buf + len, buf_len - len, "last_unpause_ppdu_id = %u\n",
+ htt_stats_buf->last_unpause_ppdu_id);
+ len += scnprintf(buf + len, buf_len - len, "hwsch_unpause_wait_tqm_write = %u\n",
+ htt_stats_buf->hwsch_unpause_wait_tqm_write);
+ len += scnprintf(buf + len, buf_len - len, "hwsch_dummy_tlv_skipped = %u\n",
+ htt_stats_buf->hwsch_dummy_tlv_skipped);
+ len += scnprintf(buf + len, buf_len - len,
+ "hwsch_misaligned_offset_received = %u\n",
+ htt_stats_buf->hwsch_misaligned_offset_received);
+ len += scnprintf(buf + len, buf_len - len, "hwsch_reset_count = %u\n",
+ htt_stats_buf->hwsch_reset_count);
+ len += scnprintf(buf + len, buf_len - len, "hwsch_dev_reset_war = %u\n",
+ htt_stats_buf->hwsch_dev_reset_war);
+ len += scnprintf(buf + len, buf_len - len, "hwsch_delayed_pause = %u\n",
+ htt_stats_buf->hwsch_delayed_pause);
+ len += scnprintf(buf + len, buf_len - len, "hwsch_long_delayed_pause = %u\n",
+ htt_stats_buf->hwsch_long_delayed_pause);
+ len += scnprintf(buf + len, buf_len - len, "sch_rx_ppdu_no_response = %u\n",
+ htt_stats_buf->sch_rx_ppdu_no_response);
+ len += scnprintf(buf + len, buf_len - len, "sch_selfgen_response = %u\n",
+ htt_stats_buf->sch_selfgen_response);
+ len += scnprintf(buf + len, buf_len - len, "sch_rx_sifs_resp_trigger= %u\n\n",
+ htt_stats_buf->sch_rx_sifs_resp_trigger);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -3779,11 +3505,11 @@ htt_print_pdev_stats_twt_sessions_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_PDEV_STATS_TWT_SESSIONS_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "pdev_id = %u",
- htt_stats_buf->pdev_id);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_sessions = %u\n",
- htt_stats_buf->num_sessions);
+ len += scnprintf(buf + len, buf_len - len, "HTT_PDEV_STATS_TWT_SESSIONS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "pdev_id = %u\n",
+ htt_stats_buf->pdev_id);
+ len += scnprintf(buf + len, buf_len - len, "num_sessions = %u\n\n",
+ htt_stats_buf->num_sessions);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -3802,27 +3528,33 @@ htt_print_pdev_stats_twt_session_tlv(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "HTT_PDEV_STATS_TWT_SESSION_TLV:");
- len += HTT_DBG_OUT(buf + len, buf_len - len, "vdev_id = %u",
- htt_stats_buf->vdev_id);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "peer_mac = %02x:%02x:%02x:%02x:%02x:%02x",
- htt_stats_buf->peer_mac.mac_addr_l32 & 0xFF,
- (htt_stats_buf->peer_mac.mac_addr_l32 & 0xFF00) >> 8,
- (htt_stats_buf->peer_mac.mac_addr_l32 & 0xFF0000) >> 16,
- (htt_stats_buf->peer_mac.mac_addr_l32 & 0xFF000000) >> 24,
- (htt_stats_buf->peer_mac.mac_addr_h16 & 0xFF),
- (htt_stats_buf->peer_mac.mac_addr_h16 & 0xFF00) >> 8);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "flow_id_flags = %u",
- htt_stats_buf->flow_id_flags);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "dialog_id = %u",
- htt_stats_buf->dialog_id);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "wake_dura_us = %u",
- htt_stats_buf->wake_dura_us);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "wake_intvl_us = %u",
- htt_stats_buf->wake_intvl_us);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "sp_offset_us = %u\n",
- htt_stats_buf->sp_offset_us);
+ len += scnprintf(buf + len, buf_len - len, "HTT_PDEV_STATS_TWT_SESSION_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "vdev_id = %u\n",
+ htt_stats_buf->vdev_id);
+ len += scnprintf(buf + len, buf_len - len,
+ "peer_mac = %02lx:%02lx:%02lx:%02lx:%02lx:%02lx\n",
+ FIELD_GET(HTT_MAC_ADDR_L32_0,
+ htt_stats_buf->peer_mac.mac_addr_l32),
+ FIELD_GET(HTT_MAC_ADDR_L32_1,
+ htt_stats_buf->peer_mac.mac_addr_l32),
+ FIELD_GET(HTT_MAC_ADDR_L32_2,
+ htt_stats_buf->peer_mac.mac_addr_l32),
+ FIELD_GET(HTT_MAC_ADDR_L32_3,
+ htt_stats_buf->peer_mac.mac_addr_l32),
+ FIELD_GET(HTT_MAC_ADDR_H16_0,
+ htt_stats_buf->peer_mac.mac_addr_h16),
+ FIELD_GET(HTT_MAC_ADDR_H16_1,
+ htt_stats_buf->peer_mac.mac_addr_h16));
+ len += scnprintf(buf + len, buf_len - len, "flow_id_flags = %u\n",
+ htt_stats_buf->flow_id_flags);
+ len += scnprintf(buf + len, buf_len - len, "dialog_id = %u\n",
+ htt_stats_buf->dialog_id);
+ len += scnprintf(buf + len, buf_len - len, "wake_dura_us = %u\n",
+ htt_stats_buf->wake_dura_us);
+ len += scnprintf(buf + len, buf_len - len, "wake_intvl_us = %u\n",
+ htt_stats_buf->wake_intvl_us);
+ len += scnprintf(buf + len, buf_len - len, "sp_offset_us = %u\n\n",
+ htt_stats_buf->sp_offset_us);
if (len >= buf_len)
buf[buf_len - 1] = 0;
@@ -3841,21 +3573,21 @@ htt_print_pdev_obss_pd_stats_tlv_v(const void *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "OBSS Tx success PPDU = %u",
+ len += scnprintf(buf + len, buf_len - len, "OBSS Tx success PPDU = %u\n",
htt_stats_buf->num_obss_tx_ppdu_success);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "OBSS Tx failures PPDU = %u\n",
+ len += scnprintf(buf + len, buf_len - len, "OBSS Tx failures PPDU = %u\n",
htt_stats_buf->num_obss_tx_ppdu_failure);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "Non-SRG Opportunities = %u\n",
+ len += scnprintf(buf + len, buf_len - len, "Non-SRG Opportunities = %u\n",
htt_stats_buf->num_non_srg_opportunities);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "Non-SRG tried PPDU = %u\n",
+ len += scnprintf(buf + len, buf_len - len, "Non-SRG tried PPDU = %u\n",
htt_stats_buf->num_non_srg_ppdu_tried);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "Non-SRG success PPDU = %u\n",
+ len += scnprintf(buf + len, buf_len - len, "Non-SRG success PPDU = %u\n",
htt_stats_buf->num_non_srg_ppdu_success);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "SRG Opportunities = %u\n",
+ len += scnprintf(buf + len, buf_len - len, "SRG Opportunities = %u\n",
htt_stats_buf->num_srg_opportunities);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "SRG tried PPDU = %u\n",
+ len += scnprintf(buf + len, buf_len - len, "SRG tried PPDU = %u\n",
htt_stats_buf->num_srg_ppdu_tried);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "SRG success PPDU = %u\n",
+ len += scnprintf(buf + len, buf_len - len, "SRG success PPDU = %u\n\n",
htt_stats_buf->num_srg_ppdu_success);
if (len >= buf_len)
@@ -3878,25 +3610,25 @@ static inline void htt_print_backpressure_stats_tlv_v(const u32 *tag_buf,
u32 len = stats_req->buf_len;
u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
- len += HTT_DBG_OUT(buf + len, buf_len - len, "pdev_id = %u",
- htt_stats_buf->pdev_id);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "current_head_idx = %u",
- htt_stats_buf->current_head_idx);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "current_tail_idx = %u",
- htt_stats_buf->current_tail_idx);
- len += HTT_DBG_OUT(buf + len, buf_len - len, "num_htt_msgs_sent = %u",
- htt_stats_buf->num_htt_msgs_sent);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "backpressure_time_ms = %u",
- htt_stats_buf->backpressure_time_ms);
+ len += scnprintf(buf + len, buf_len - len, "pdev_id = %u\n",
+ htt_stats_buf->pdev_id);
+ len += scnprintf(buf + len, buf_len - len, "current_head_idx = %u\n",
+ htt_stats_buf->current_head_idx);
+ len += scnprintf(buf + len, buf_len - len, "current_tail_idx = %u\n",
+ htt_stats_buf->current_tail_idx);
+ len += scnprintf(buf + len, buf_len - len, "num_htt_msgs_sent = %u\n",
+ htt_stats_buf->num_htt_msgs_sent);
+ len += scnprintf(buf + len, buf_len - len,
+ "backpressure_time_ms = %u\n",
+ htt_stats_buf->backpressure_time_ms);
for (i = 0; i < 5; i++)
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "backpressure_hist_%u = %u",
- i + 1, htt_stats_buf->backpressure_hist[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "backpressure_hist_%u = %u\n",
+ i + 1, htt_stats_buf->backpressure_hist[i]);
- len += HTT_DBG_OUT(buf + len, buf_len - len,
- "============================");
+ len += scnprintf(buf + len, buf_len - len,
+ "============================\n");
if (len >= buf_len) {
buf[buf_len - 1] = 0;
@@ -3907,6 +3639,334 @@ static inline void htt_print_backpressure_stats_tlv_v(const u32 *tag_buf,
}
}
+static inline
+void htt_print_pdev_tx_rate_txbf_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_pdev_txrate_txbf_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ int i;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_STATS_PDEV_TX_RATE_TXBF_STATS:\n");
+
+ len += scnprintf(buf + len, buf_len - len, "tx_ol_mcs = ");
+ for (i = 0; i < HTT_TX_TXBF_RATE_STATS_NUM_MCS_COUNTERS; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%d:%u,", i, htt_stats_buf->tx_su_ol_mcs[i]);
+ len--;
+
+ len += scnprintf(buf + len, buf_len - len, "\ntx_ibf_mcs = ");
+ for (i = 0; i < HTT_TX_TXBF_RATE_STATS_NUM_MCS_COUNTERS; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%d:%u,", i, htt_stats_buf->tx_su_ibf_mcs[i]);
+ len--;
+
+ len += scnprintf(buf + len, buf_len - len, "\ntx_txbf_mcs =");
+ for (i = 0; i < HTT_TX_TXBF_RATE_STATS_NUM_MCS_COUNTERS; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%d:%u,", i, htt_stats_buf->tx_su_txbf_mcs[i]);
+ len--;
+
+ len += scnprintf(buf + len, buf_len - len, "\ntx_ol_nss = ");
+ for (i = 0; i < HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%d:%u,", i, htt_stats_buf->tx_su_ol_nss[i]);
+ len--;
+
+ len += scnprintf(buf + len, buf_len - len, "\ntx_ibf_nss = ");
+ for (i = 0; i < HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%d:%u,", i, htt_stats_buf->tx_su_ibf_nss[i]);
+ len--;
+
+ len += scnprintf(buf + len, buf_len - len, "\ntx_txbf_nss = ");
+ for (i = 0; i < HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%d:%u,", i, htt_stats_buf->tx_su_txbf_nss[i]);
+ len--;
+
+ len += scnprintf(buf + len, buf_len - len, "\ntx_ol_bw = ");
+ for (i = 0; i < HTT_TX_TXBF_RATE_STATS_NUM_BW_COUNTERS; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%d:%u,", i, htt_stats_buf->tx_su_ol_bw[i]);
+ len--;
+
+ len += scnprintf(buf + len, buf_len - len, "\ntx_ibf_bw = ");
+ for (i = 0; i < HTT_TX_TXBF_RATE_STATS_NUM_BW_COUNTERS; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%d:%u,", i, htt_stats_buf->tx_su_ibf_bw[i]);
+ len--;
+
+ len += scnprintf(buf + len, buf_len - len, "\ntx_txbf_bw = ");
+ for (i = 0; i < HTT_TX_TXBF_RATE_STATS_NUM_BW_COUNTERS; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "%d:%u,", i, htt_stats_buf->tx_su_txbf_bw[i]);
+ len--;
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ stats_req->buf_len = len;
+}
+
+static inline
+void htt_print_txbf_ofdma_ndpa_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_txbf_ofdma_ndpa_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ int i;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TXBF_OFDMA_NDPA_STATS_TLV:\n");
+
+ for (i = 0; i < HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS; i++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_ndpa_queued_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_ndpa_queued[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_ndpa_tried_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_ndpa_tried[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_ndpa_flushed_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_ndpa_flushed[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_ndpa_err_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_ndpa_err[i]);
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ }
+
+ stats_req->buf_len = len;
+}
+
+static inline
+void htt_print_txbf_ofdma_ndp_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_txbf_ofdma_ndp_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ int i;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TXBF_OFDMA_NDP_STATS_TLV:\n");
+
+ for (i = 0; i < HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS; i++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_ndp_queued_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_ndp_queued[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_ndp_tried_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_ndp_tried[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_ndp_flushed_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_ndp_flushed[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_ndp_err_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_ndp_err[i]);
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ }
+
+ stats_req->buf_len = len;
+}
+
+static inline
+void htt_print_txbf_ofdma_brp_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_txbf_ofdma_brp_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ int i;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TXBF_OFDMA_BRP_STATS_TLV:\n");
+
+ for (i = 0; i < HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS; i++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_brpoll_queued_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_brpoll_queued[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_brpoll_tried_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_brpoll_tried[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_brpoll_flushed_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_brpoll_flushed[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_brp_err_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_brp_err[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_brp_err_num_cbf_rcvd_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_brp_err_num_cbf_rcvd[i]);
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ }
+
+ stats_req->buf_len = len;
+}
+
+static inline
+void htt_print_txbf_ofdma_steer_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_txbf_ofdma_steer_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ int i;
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_TXBF_OFDMA_STEER_STATS_TLV:\n");
+
+ for (i = 0; i < HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS; i++) {
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_num_ppdu_steer_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_num_ppdu_steer[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_num_ppdu_ol_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_num_ppdu_ol[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_num_usrs_prefetch_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_num_usrs_prefetch[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_num_usrs_sound_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_num_usrs_sound[i]);
+ len += scnprintf(buf + len, buf_len - len,
+ "ax_ofdma_num_usrs_force_sound_user%d = %u\n",
+ i, htt_stats_buf->ax_ofdma_num_usrs_force_sound[i]);
+ len += scnprintf(buf + len, buf_len - len, "\n");
+ }
+
+ stats_req->buf_len = len;
+}
+
+static inline
+void htt_print_phy_counters_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_phy_counters_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ int i;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_PHY_COUNTERS_TLV:\n");
+ len += scnprintf(buf + len, buf_len - len, "rx_ofdma_timing_err_cnt = %u\n",
+ htt_stats_buf->rx_ofdma_timing_err_cnt);
+ len += scnprintf(buf + len, buf_len - len, "rx_cck_fail_cnt = %u\n",
+ htt_stats_buf->rx_cck_fail_cnt);
+ len += scnprintf(buf + len, buf_len - len, "mactx_abort_cnt = %u\n",
+ htt_stats_buf->mactx_abort_cnt);
+ len += scnprintf(buf + len, buf_len - len, "macrx_abort_cnt = %u\n",
+ htt_stats_buf->macrx_abort_cnt);
+ len += scnprintf(buf + len, buf_len - len, "phytx_abort_cnt = %u\n",
+ htt_stats_buf->phytx_abort_cnt);
+ len += scnprintf(buf + len, buf_len - len, "phyrx_abort_cnt = %u\n",
+ htt_stats_buf->phyrx_abort_cnt);
+ len += scnprintf(buf + len, buf_len - len, "phyrx_defer_abort_cnt = %u\n",
+ htt_stats_buf->phyrx_defer_abort_cnt);
+ len += scnprintf(buf + len, buf_len - len, "rx_gain_adj_lstf_event_cnt = %u\n",
+ htt_stats_buf->rx_gain_adj_lstf_event_cnt);
+ len += scnprintf(buf + len, buf_len - len, "rx_gain_adj_non_legacy_cnt = %u\n",
+ htt_stats_buf->rx_gain_adj_non_legacy_cnt);
+
+ for (i = 0; i < HTT_MAX_RX_PKT_CNT; i++)
+ len += scnprintf(buf + len, buf_len - len, "rx_pkt_cnt[%d] = %u\n",
+ i, htt_stats_buf->rx_pkt_cnt[i]);
+
+ for (i = 0; i < HTT_MAX_RX_PKT_CRC_PASS_CNT; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_pkt_crc_pass_cnt[%d] = %u\n",
+ i, htt_stats_buf->rx_pkt_crc_pass_cnt[i]);
+
+ for (i = 0; i < HTT_MAX_PER_BLK_ERR_CNT; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "per_blk_err_cnt[%d] = %u\n",
+ i, htt_stats_buf->per_blk_err_cnt[i]);
+
+ for (i = 0; i < HTT_MAX_RX_OTA_ERR_CNT; i++)
+ len += scnprintf(buf + len, buf_len - len,
+ "rx_ota_err_cnt[%d] = %u\n",
+ i, htt_stats_buf->rx_ota_err_cnt[i]);
+
+ stats_req->buf_len = len;
+}
+
+static inline
+void htt_print_phy_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_phy_stats_tlv *htt_stats_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ int i;
+
+ len += scnprintf(buf + len, buf_len - len, "HTT_PHY_STATS_TLV:\n");
+
+ for (i = 0; i < HTT_STATS_MAX_CHAINS; i++)
+ len += scnprintf(buf + len, buf_len - len, "nf_chain[%d] = %d\n",
+ i, htt_stats_buf->nf_chain[i]);
+
+ len += scnprintf(buf + len, buf_len - len, "false_radar_cnt = %u\n",
+ htt_stats_buf->false_radar_cnt);
+ len += scnprintf(buf + len, buf_len - len, "radar_cs_cnt = %u\n",
+ htt_stats_buf->radar_cs_cnt);
+ len += scnprintf(buf + len, buf_len - len, "ani_level = %d\n",
+ htt_stats_buf->ani_level);
+ len += scnprintf(buf + len, buf_len - len, "fw_run_time = %u\n",
+ htt_stats_buf->fw_run_time);
+
+ stats_req->buf_len = len;
+}
+
+static inline
+void htt_print_peer_ctrl_path_txrx_stats_tlv(const void *tag_buf,
+ struct debug_htt_stats_req *stats_req)
+{
+ const struct htt_peer_ctrl_path_txrx_stats_tlv *htt_stat_buf = tag_buf;
+ u8 *buf = stats_req->buf;
+ u32 len = stats_req->buf_len;
+ u32 buf_len = ATH11K_HTT_STATS_BUF_SIZE;
+ int i;
+ const char *mgmt_frm_type[ATH11K_STATS_MGMT_FRM_TYPE_MAX - 1] = {
+ "assoc_req", "assoc_resp",
+ "reassoc_req", "reassoc_resp",
+ "probe_req", "probe_resp",
+ "timing_advertisement", "reserved",
+ "beacon", "atim", "disassoc",
+ "auth", "deauth", "action", "action_no_ack"};
+
+ len += scnprintf(buf + len, buf_len - len,
+ "HTT_STATS_PEER_CTRL_PATH_TXRX_STATS_TAG:\n");
+ len += scnprintf(buf + len, buf_len - len,
+ "peer_mac_addr = %02x:%02x:%02x:%02x:%02x:%02x\n",
+ htt_stat_buf->peer_mac_addr[0], htt_stat_buf->peer_mac_addr[1],
+ htt_stat_buf->peer_mac_addr[2], htt_stat_buf->peer_mac_addr[3],
+ htt_stat_buf->peer_mac_addr[4], htt_stat_buf->peer_mac_addr[5]);
+
+ len += scnprintf(buf + len, buf_len - len, "peer_tx_mgmt_subtype:\n");
+ for (i = 0; i < ATH11K_STATS_MGMT_FRM_TYPE_MAX - 1; i++)
+ len += scnprintf(buf + len, buf_len - len, "%s:%u\n",
+ mgmt_frm_type[i],
+ htt_stat_buf->peer_rx_mgmt_subtype[i]);
+
+ len += scnprintf(buf + len, buf_len - len, "peer_rx_mgmt_subtype:\n");
+ for (i = 0; i < ATH11K_STATS_MGMT_FRM_TYPE_MAX - 1; i++)
+ len += scnprintf(buf + len, buf_len - len, "%s:%u\n",
+ mgmt_frm_type[i],
+ htt_stat_buf->peer_rx_mgmt_subtype[i]);
+
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ stats_req->buf_len = len;
+}
+
static int ath11k_dbg_htt_ext_stats_parse(struct ath11k_base *ab,
u16 tag, u16 len, const void *tag_buf,
void *user_data)
@@ -4258,6 +4318,30 @@ static int ath11k_dbg_htt_ext_stats_parse(struct ath11k_base *ab,
case HTT_STATS_RING_BACKPRESSURE_STATS_TAG:
htt_print_backpressure_stats_tlv_v(tag_buf, user_data);
break;
+ case HTT_STATS_PDEV_TX_RATE_TXBF_STATS_TAG:
+ htt_print_pdev_tx_rate_txbf_stats_tlv(tag_buf, stats_req);
+ break;
+ case HTT_STATS_TXBF_OFDMA_NDPA_STATS_TAG:
+ htt_print_txbf_ofdma_ndpa_stats_tlv(tag_buf, stats_req);
+ break;
+ case HTT_STATS_TXBF_OFDMA_NDP_STATS_TAG:
+ htt_print_txbf_ofdma_ndp_stats_tlv(tag_buf, stats_req);
+ break;
+ case HTT_STATS_TXBF_OFDMA_BRP_STATS_TAG:
+ htt_print_txbf_ofdma_brp_stats_tlv(tag_buf, stats_req);
+ break;
+ case HTT_STATS_TXBF_OFDMA_STEER_STATS_TAG:
+ htt_print_txbf_ofdma_steer_stats_tlv(tag_buf, stats_req);
+ break;
+ case HTT_STATS_PHY_COUNTERS_TAG:
+ htt_print_phy_counters_tlv(tag_buf, stats_req);
+ break;
+ case HTT_STATS_PHY_STATS_TAG:
+ htt_print_phy_stats_tlv(tag_buf, stats_req);
+ break;
+ case HTT_STATS_PEER_CTRL_PATH_TXRX_STATS_TAG:
+ htt_print_peer_ctrl_path_txrx_stats_tlv(tag_buf, stats_req);
+ break;
default:
break;
}
@@ -4345,8 +4429,7 @@ static ssize_t ath11k_write_htt_stats_type(struct file *file,
if (type >= ATH11K_DBG_HTT_NUM_EXT_STATS)
return -E2BIG;
- if (type == ATH11K_DBG_HTT_EXT_STATS_RESET ||
- type == ATH11K_DBG_HTT_EXT_STATS_PEER_INFO)
+ if (type == ATH11K_DBG_HTT_EXT_STATS_RESET)
return -EPERM;
ar->debug.htt_stats.type = type;
@@ -4407,6 +4490,15 @@ static int ath11k_prep_htt_stats_cfg_params(struct ath11k *ar, u8 type,
case ATH11K_DBG_HTT_EXT_STATS_TX_SOUNDING_INFO:
cfg_params->cfg0 = HTT_STAT_DEFAULT_CFG0_ACTIVE_VDEVS;
break;
+ case ATH11K_DBG_HTT_EXT_STATS_PEER_CTRL_PATH_TXRX_STATS:
+ cfg_params->cfg0 = HTT_STAT_PEER_INFO_MAC_ADDR;
+ cfg_params->cfg1 |= FIELD_PREP(GENMASK(7, 0), mac_addr[0]);
+ cfg_params->cfg1 |= FIELD_PREP(GENMASK(15, 8), mac_addr[1]);
+ cfg_params->cfg1 |= FIELD_PREP(GENMASK(23, 16), mac_addr[2]);
+ cfg_params->cfg1 |= FIELD_PREP(GENMASK(31, 24), mac_addr[3]);
+ cfg_params->cfg2 |= FIELD_PREP(GENMASK(7, 0), mac_addr[4]);
+ cfg_params->cfg2 |= FIELD_PREP(GENMASK(15, 8), mac_addr[5]);
+ break;
default:
break;
}
@@ -4464,7 +4556,9 @@ static int ath11k_open_htt_stats(struct inode *inode, struct file *file)
u8 type = ar->debug.htt_stats.type;
int ret;
- if (type == ATH11K_DBG_HTT_EXT_STATS_RESET)
+ if (type == ATH11K_DBG_HTT_EXT_STATS_RESET ||
+ type == ATH11K_DBG_HTT_EXT_STATS_PEER_INFO ||
+ type == ATH11K_DBG_HTT_EXT_STATS_PEER_CTRL_PATH_TXRX_STATS)
return -EPERM;
mutex_lock(&ar->conf_mutex);
diff --git a/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h
index d428f52003a4..dc210c54d131 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h
+++ b/drivers/net/wireless/ath/ath11k/debugfs_htt_stats.h
@@ -102,6 +102,14 @@ enum htt_tlv_tag_t {
HTT_STATS_PDEV_OBSS_PD_TAG = 88,
HTT_STATS_HW_WAR_TAG = 89,
HTT_STATS_RING_BACKPRESSURE_STATS_TAG = 90,
+ HTT_STATS_PEER_CTRL_PATH_TXRX_STATS_TAG = 101,
+ HTT_STATS_PDEV_TX_RATE_TXBF_STATS_TAG = 108,
+ HTT_STATS_TXBF_OFDMA_NDPA_STATS_TAG = 113,
+ HTT_STATS_TXBF_OFDMA_NDP_STATS_TAG = 114,
+ HTT_STATS_TXBF_OFDMA_BRP_STATS_TAG = 115,
+ HTT_STATS_TXBF_OFDMA_STEER_STATS_TAG = 116,
+ HTT_STATS_PHY_COUNTERS_TAG = 121,
+ HTT_STATS_PHY_STATS_TAG = 122,
HTT_STATS_MAX_TAG,
};
@@ -137,6 +145,8 @@ struct htt_stats_string_tlv {
u32 data[0]; /* Can be variable length */
} __packed;
+#define HTT_STATS_MAC_ID GENMASK(7, 0)
+
/* == TX PDEV STATS == */
struct htt_tx_pdev_stats_cmn_tlv {
u32 mac_id__word;
@@ -290,6 +300,10 @@ struct htt_hw_stats_whal_tx_tlv {
};
/* ============ PEER STATS ============ */
+#define HTT_MSDU_FLOW_STATS_TX_FLOW_NO GENMASK(15, 0)
+#define HTT_MSDU_FLOW_STATS_TID_NUM GENMASK(19, 16)
+#define HTT_MSDU_FLOW_STATS_DROP_RULE BIT(20)
+
struct htt_msdu_flow_stats_tlv {
u32 last_update_timestamp;
u32 last_add_timestamp;
@@ -306,6 +320,11 @@ struct htt_msdu_flow_stats_tlv {
#define MAX_HTT_TID_NAME 8
+#define HTT_TX_TID_STATS_SW_PEER_ID GENMASK(15, 0)
+#define HTT_TX_TID_STATS_TID_NUM GENMASK(31, 16)
+#define HTT_TX_TID_STATS_NUM_SCHED_PENDING GENMASK(7, 0)
+#define HTT_TX_TID_STATS_NUM_PPDU_IN_HWQ GENMASK(15, 8)
+
/* Tidq stats */
struct htt_tx_tid_stats_tlv {
/* Stored as little endian */
@@ -326,6 +345,11 @@ struct htt_tx_tid_stats_tlv {
u32 tid_tx_airtime;
};
+#define HTT_TX_TID_STATS_V1_SW_PEER_ID GENMASK(15, 0)
+#define HTT_TX_TID_STATS_V1_TID_NUM GENMASK(31, 16)
+#define HTT_TX_TID_STATS_V1_NUM_SCHED_PENDING GENMASK(7, 0)
+#define HTT_TX_TID_STATS_V1_NUM_PPDU_IN_HWQ GENMASK(15, 8)
+
/* Tidq stats */
struct htt_tx_tid_stats_v1_tlv {
/* Stored as little endian */
@@ -348,6 +372,9 @@ struct htt_tx_tid_stats_v1_tlv {
u32 sendn_frms_allowed;
};
+#define HTT_RX_TID_STATS_SW_PEER_ID GENMASK(15, 0)
+#define HTT_RX_TID_STATS_TID_NUM GENMASK(31, 16)
+
struct htt_rx_tid_stats_tlv {
u32 sw_peer_id__tid_num;
u8 tid_name[MAX_HTT_TID_NAME];
@@ -386,6 +413,10 @@ struct htt_peer_stats_cmn_tlv {
u32 inactive_time;
};
+#define HTT_PEER_DETAILS_VDEV_ID GENMASK(7, 0)
+#define HTT_PEER_DETAILS_PDEV_ID GENMASK(15, 8)
+#define HTT_PEER_DETAILS_AST_IDX GENMASK(31, 16)
+
struct htt_peer_details_tlv {
u32 peer_type;
u32 sw_peer_id;
@@ -510,6 +541,9 @@ struct htt_tx_hwq_mu_mimo_mpdu_stats_tlv {
u32 mu_mimo_ampdu_underrun_usr;
};
+#define HTT_TX_HWQ_STATS_MAC_ID GENMASK(7, 0)
+#define HTT_TX_HWQ_STATS_HWQ_ID GENMASK(15, 8)
+
struct htt_tx_hwq_mu_mimo_cmn_stats_tlv {
u32 mac_id__hwq_id__word;
};
@@ -789,6 +823,9 @@ struct htt_sched_txq_sched_ineligibility_tlv_v {
u32 sched_ineligibility[0];
};
+#define HTT_TX_PDEV_STATS_SCHED_PER_TXQ_MAC_ID GENMASK(7, 0)
+#define HTT_TX_PDEV_STATS_SCHED_PER_TXQ_ID GENMASK(15, 8)
+
struct htt_tx_pdev_stats_sched_per_txq_tlv {
u32 mac_id__txq_id__word;
u32 sched_policy;
@@ -910,6 +947,9 @@ struct htt_tx_tqm_error_stats_tlv {
};
/* == TQM CMDQ stats == */
+#define HTT_TX_TQM_CMDQ_STATUS_MAC_ID GENMASK(7, 0)
+#define HTT_TX_TQM_CMDQ_STATUS_CMDQ_ID GENMASK(15, 8)
+
struct htt_tx_tqm_cmdq_status_tlv {
u32 mac_id__cmdq_id__word;
u32 sync_cmd;
@@ -1055,6 +1095,15 @@ struct htt_tx_de_cmn_stats_tlv {
#define HTT_STATS_LOW_WM_BINS 5
#define HTT_STATS_HIGH_WM_BINS 5
+#define HTT_RING_IF_STATS_NUM_ELEMS GENMASK(15, 0)
+#define HTT_RING_IF_STATS_PREFETCH_TAIL_INDEX GENMASK(31, 16)
+#define HTT_RING_IF_STATS_HEAD_IDX GENMASK(15, 0)
+#define HTT_RING_IF_STATS_TAIL_IDX GENMASK(31, 16)
+#define HTT_RING_IF_STATS_SHADOW_HEAD_IDX GENMASK(15, 0)
+#define HTT_RING_IF_STATS_SHADOW_TAIL_IDX GENMASK(31, 16)
+#define HTT_RING_IF_STATS_LWM_THRESH GENMASK(15, 0)
+#define HTT_RING_IF_STATS_HWM_THRESH GENMASK(31, 16)
+
struct htt_ring_if_stats_tlv {
u32 base_addr; /* DWORD aligned base memory address of the ring */
u32 elem_size;
@@ -1117,6 +1166,19 @@ struct htt_sfm_cmn_tlv {
};
/* == SRNG STATS == */
+#define HTT_SRING_STATS_MAC_ID GENMASK(7, 0)
+#define HTT_SRING_STATS_RING_ID GENMASK(15, 8)
+#define HTT_SRING_STATS_ARENA GENMASK(23, 16)
+#define HTT_SRING_STATS_EP BIT(24)
+#define HTT_SRING_STATS_NUM_AVAIL_WORDS GENMASK(15, 0)
+#define HTT_SRING_STATS_NUM_VALID_WORDS GENMASK(31, 16)
+#define HTT_SRING_STATS_HEAD_PTR GENMASK(15, 0)
+#define HTT_SRING_STATS_TAIL_PTR GENMASK(31, 16)
+#define HTT_SRING_STATS_CONSUMER_EMPTY GENMASK(15, 0)
+#define HTT_SRING_STATS_PRODUCER_FULL GENMASK(31, 16)
+#define HTT_SRING_STATS_PREFETCH_COUNT GENMASK(15, 0)
+#define HTT_SRING_STATS_INTERNAL_TAIL_PTR GENMASK(31, 16)
+
struct htt_sring_stats_tlv {
u32 mac_id__ring_id__arena__ep;
u32 base_addr_lsb; /* DWORD aligned base memory address of the ring */
@@ -1696,6 +1758,170 @@ struct htt_ring_backpressure_stats_tlv {
u32 backpressure_hist[5];
};
+#define HTT_TX_TXBF_RATE_STATS_NUM_MCS_COUNTERS 14
+#define HTT_TX_TXBF_RATE_STATS_NUM_BW_COUNTERS 5
+#define HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS 8
+
+struct htt_pdev_txrate_txbf_stats_tlv {
+ /* SU TxBF TX MCS stats */
+ u32 tx_su_txbf_mcs[HTT_TX_TXBF_RATE_STATS_NUM_MCS_COUNTERS];
+ /* Implicit BF TX MCS stats */
+ u32 tx_su_ibf_mcs[HTT_TX_TXBF_RATE_STATS_NUM_MCS_COUNTERS];
+ /* Open loop TX MCS stats */
+ u32 tx_su_ol_mcs[HTT_TX_TXBF_RATE_STATS_NUM_MCS_COUNTERS];
+ /* SU TxBF TX NSS stats */
+ u32 tx_su_txbf_nss[HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+ /* Implicit BF TX NSS stats */
+ u32 tx_su_ibf_nss[HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+ /* Open loop TX NSS stats */
+ u32 tx_su_ol_nss[HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+ /* SU TxBF TX BW stats */
+ u32 tx_su_txbf_bw[HTT_TX_TXBF_RATE_STATS_NUM_BW_COUNTERS];
+ /* Implicit BF TX BW stats */
+ u32 tx_su_ibf_bw[HTT_TX_TXBF_RATE_STATS_NUM_BW_COUNTERS];
+ /* Open loop TX BW stats */
+ u32 tx_su_ol_bw[HTT_TX_TXBF_RATE_STATS_NUM_BW_COUNTERS];
+};
+
+struct htt_txbf_ofdma_ndpa_stats_tlv {
+ /* 11AX HE OFDMA NDPA frame queued to the HW */
+ u32 ax_ofdma_ndpa_queued[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ /* 11AX HE OFDMA NDPA frame sent over the air */
+ u32 ax_ofdma_ndpa_tried[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ /* 11AX HE OFDMA NDPA frame flushed by HW */
+ u32 ax_ofdma_ndpa_flushed[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ /* 11AX HE OFDMA NDPA frame completed with error(s) */
+ u32 ax_ofdma_ndpa_err[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+};
+
+struct htt_txbf_ofdma_ndp_stats_tlv {
+ /* 11AX HE OFDMA NDP frame queued to the HW */
+ u32 ax_ofdma_ndp_queued[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ /* 11AX HE OFDMA NDPA frame sent over the air */
+ u32 ax_ofdma_ndp_tried[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ /* 11AX HE OFDMA NDPA frame flushed by HW */
+ u32 ax_ofdma_ndp_flushed[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ /* 11AX HE OFDMA NDPA frame completed with error(s) */
+ u32 ax_ofdma_ndp_err[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+};
+
+struct htt_txbf_ofdma_brp_stats_tlv {
+ /* 11AX HE OFDMA MU BRPOLL frame queued to the HW */
+ u32 ax_ofdma_brpoll_queued[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ /* 11AX HE OFDMA MU BRPOLL frame sent over the air */
+ u32 ax_ofdma_brpoll_tried[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ /* 11AX HE OFDMA MU BRPOLL frame flushed by HW */
+ u32 ax_ofdma_brpoll_flushed[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ /* 11AX HE OFDMA MU BRPOLL frame completed with error(s) */
+ u32 ax_ofdma_brp_err[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ /* Number of CBF(s) received when 11AX HE OFDMA MU BRPOLL frame
+ * completed with error(s).
+ */
+ u32 ax_ofdma_brp_err_num_cbf_rcvd[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS + 1];
+};
+
+struct htt_txbf_ofdma_steer_stats_tlv {
+ /* 11AX HE OFDMA PPDUs that were sent over the air with steering (TXBF + OFDMA) */
+ u32 ax_ofdma_num_ppdu_steer[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ /* 11AX HE OFDMA PPDUs that were sent over the air in open loop */
+ u32 ax_ofdma_num_ppdu_ol[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ /* 11AX HE OFDMA number of users for which CBF prefetch was
+ * initiated to PHY HW during TX.
+ */
+ u32 ax_ofdma_num_usrs_prefetch[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ /* 11AX HE OFDMA number of users for which sounding was initiated during TX */
+ u32 ax_ofdma_num_usrs_sound[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+ /* 11AX HE OFDMA number of users for which sounding was forced during TX */
+ u32 ax_ofdma_num_usrs_force_sound[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
+};
+
+#define HTT_MAX_RX_PKT_CNT 8
+#define HTT_MAX_RX_PKT_CRC_PASS_CNT 8
+#define HTT_MAX_PER_BLK_ERR_CNT 20
+#define HTT_MAX_RX_OTA_ERR_CNT 14
+#define HTT_STATS_MAX_CHAINS 8
+#define ATH11K_STATS_MGMT_FRM_TYPE_MAX 16
+
+struct htt_phy_counters_tlv {
+ /* number of RXTD OFDMA OTA error counts except power surge and drop */
+ u32 rx_ofdma_timing_err_cnt;
+ /* rx_cck_fail_cnt:
+ * number of cck error counts due to rx reception failure because of
+ * timing error in cck
+ */
+ u32 rx_cck_fail_cnt;
+ /* number of times tx abort initiated by mac */
+ u32 mactx_abort_cnt;
+ /* number of times rx abort initiated by mac */
+ u32 macrx_abort_cnt;
+ /* number of times tx abort initiated by phy */
+ u32 phytx_abort_cnt;
+ /* number of times rx abort initiated by phy */
+ u32 phyrx_abort_cnt;
+ /* number of rx defered count initiated by phy */
+ u32 phyrx_defer_abort_cnt;
+ /* number of sizing events generated at LSTF */
+ u32 rx_gain_adj_lstf_event_cnt;
+ /* number of sizing events generated at non-legacy LTF */
+ u32 rx_gain_adj_non_legacy_cnt;
+ /* rx_pkt_cnt -
+ * Received EOP (end-of-packet) count per packet type;
+ * [0] = 11a; [1] = 11b; [2] = 11n; [3] = 11ac; [4] = 11ax; [5] = GF
+ * [6-7]=RSVD
+ */
+ u32 rx_pkt_cnt[HTT_MAX_RX_PKT_CNT];
+ /* rx_pkt_crc_pass_cnt -
+ * Received EOP (end-of-packet) count per packet type;
+ * [0] = 11a; [1] = 11b; [2] = 11n; [3] = 11ac; [4] = 11ax; [5] = GF
+ * [6-7]=RSVD
+ */
+ u32 rx_pkt_crc_pass_cnt[HTT_MAX_RX_PKT_CRC_PASS_CNT];
+ /* per_blk_err_cnt -
+ * Error count per error source;
+ * [0] = unknown; [1] = LSIG; [2] = HTSIG; [3] = VHTSIG; [4] = HESIG;
+ * [5] = RXTD_OTA; [6] = RXTD_FATAL; [7] = DEMF; [8] = ROBE;
+ * [9] = PMI; [10] = TXFD; [11] = TXTD; [12] = PHYRF
+ * [13-19]=RSVD
+ */
+ u32 per_blk_err_cnt[HTT_MAX_PER_BLK_ERR_CNT];
+ /* rx_ota_err_cnt -
+ * RXTD OTA (over-the-air) error count per error reason;
+ * [0] = voting fail; [1] = weak det fail; [2] = strong sig fail;
+ * [3] = cck fail; [4] = power surge; [5] = power drop;
+ * [6] = btcf timing timeout error; [7] = btcf packet detect error;
+ * [8] = coarse timing timeout error
+ * [9-13]=RSVD
+ */
+ u32 rx_ota_err_cnt[HTT_MAX_RX_OTA_ERR_CNT];
+};
+
+struct htt_phy_stats_tlv {
+ /* per chain hw noise floor values in dBm */
+ s32 nf_chain[HTT_STATS_MAX_CHAINS];
+ /* number of false radars detected */
+ u32 false_radar_cnt;
+ /* number of channel switches happened due to radar detection */
+ u32 radar_cs_cnt;
+ /* ani_level -
+ * ANI level (noise interference) corresponds to the channel
+ * the desense levels range from -5 to 15 in dB units,
+ * higher values indicating more noise interference.
+ */
+ s32 ani_level;
+ /* running time in minutes since FW boot */
+ u32 fw_run_time;
+};
+
+struct htt_peer_ctrl_path_txrx_stats_tlv {
+ /* peer mac address */
+ u8 peer_mac_addr[ETH_ALEN];
+ u8 rsvd[2];
+ /* Num of tx mgmt frames with subtype on peer level */
+ u32 peer_tx_mgmt_subtype[ATH11K_STATS_MGMT_FRM_TYPE_MAX];
+ /* Num of rx mgmt frames with subtype on peer level */
+ u32 peer_rx_mgmt_subtype[ATH11K_STATS_MGMT_FRM_TYPE_MAX];
+};
+
#ifdef CONFIG_ATH11K_DEBUGFS
void ath11k_debugfs_htt_stats_init(struct ath11k *ar);
diff --git a/drivers/net/wireless/ath/ath11k/debugfs_sta.c b/drivers/net/wireless/ath/ath11k/debugfs_sta.c
index 270c0edbb10f..fecd9718f5ce 100644
--- a/drivers/net/wireless/ath/ath11k/debugfs_sta.c
+++ b/drivers/net/wireless/ath/ath11k/debugfs_sta.c
@@ -419,15 +419,21 @@ ath11k_dbg_sta_open_htt_peer_stats(struct inode *inode, struct file *file)
struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
struct ath11k *ar = arsta->arvif->ar;
struct debug_htt_stats_req *stats_req;
+ int type = ar->debug.htt_stats.type;
int ret;
+ if ((type != ATH11K_DBG_HTT_EXT_STATS_PEER_INFO &&
+ type != ATH11K_DBG_HTT_EXT_STATS_PEER_CTRL_PATH_TXRX_STATS) ||
+ type == ATH11K_DBG_HTT_EXT_STATS_RESET)
+ return -EPERM;
+
stats_req = vzalloc(sizeof(*stats_req) + ATH11K_HTT_STATS_BUF_SIZE);
if (!stats_req)
return -ENOMEM;
mutex_lock(&ar->conf_mutex);
ar->debug.htt_stats.stats_req = stats_req;
- stats_req->type = ATH11K_DBG_HTT_EXT_STATS_PEER_INFO;
+ stats_req->type = type;
memcpy(stats_req->peer_addr, sta->addr, ETH_ALEN);
ret = ath11k_debugfs_htt_stats_req(ar);
mutex_unlock(&ar->conf_mutex);
diff --git a/drivers/net/wireless/ath/ath11k/dp.c b/drivers/net/wireless/ath/ath11k/dp.c
index b0c8f6290099..8baaeeb8cf82 100644
--- a/drivers/net/wireless/ath/ath11k/dp.c
+++ b/drivers/net/wireless/ath/ath11k/dp.c
@@ -311,7 +311,7 @@ void ath11k_dp_stop_shadow_timers(struct ath11k_base *ab)
if (!ab->hw_params.supports_shadow_regs)
return;
- for (i = 0; i < DP_TCL_NUM_RING_MAX; i++)
+ for (i = 0; i < ab->hw_params.max_tx_ring; i++)
ath11k_dp_shadow_stop_timer(ab, &ab->dp.tx_ring_timer[i]);
ath11k_dp_shadow_stop_timer(ab, &ab->dp.reo_cmd_timer);
@@ -326,7 +326,7 @@ static void ath11k_dp_srng_common_cleanup(struct ath11k_base *ab)
ath11k_dp_srng_cleanup(ab, &dp->wbm_desc_rel_ring);
ath11k_dp_srng_cleanup(ab, &dp->tcl_cmd_ring);
ath11k_dp_srng_cleanup(ab, &dp->tcl_status_ring);
- for (i = 0; i < DP_TCL_NUM_RING_MAX; i++) {
+ for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
ath11k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_data_ring);
ath11k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_comp_ring);
}
@@ -366,7 +366,7 @@ static int ath11k_dp_srng_common_setup(struct ath11k_base *ab)
goto err;
}
- for (i = 0; i < DP_TCL_NUM_RING_MAX; i++) {
+ for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_data_ring,
HAL_TCL_DATA, i, 0,
DP_TCL_DATA_RING_SIZE);
@@ -739,6 +739,7 @@ int ath11k_dp_service_srng(struct ath11k_base *ab,
int budget)
{
struct napi_struct *napi = &irq_grp->napi;
+ const struct ath11k_hw_hal_params *hal_params;
int grp_id = irq_grp->grp_id;
int work_done = 0;
int i = 0, j;
@@ -821,8 +822,9 @@ int ath11k_dp_service_srng(struct ath11k_base *ab,
struct ath11k_pdev_dp *dp = &ar->dp;
struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
+ hal_params = ab->hw_params.hal_params;
ath11k_dp_rxbufs_replenish(ab, id, rx_ring, 0,
- HAL_RX_BUF_RBM_SW3_BM);
+ hal_params->rx_buf_rbm);
}
}
}
@@ -996,7 +998,7 @@ void ath11k_dp_free(struct ath11k_base *ab)
ath11k_dp_reo_cmd_list_cleanup(ab);
- for (i = 0; i < DP_TCL_NUM_RING_MAX; i++) {
+ for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
spin_lock_bh(&dp->tx_ring[i].tx_idr_lock);
idr_for_each(&dp->tx_ring[i].txbuf_idr,
ath11k_dp_tx_pending_cleanup, ab);
@@ -1046,7 +1048,7 @@ int ath11k_dp_alloc(struct ath11k_base *ab)
size = sizeof(struct hal_wbm_release_ring) * DP_TX_COMP_RING_SIZE;
- for (i = 0; i < DP_TCL_NUM_RING_MAX; i++) {
+ for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
idr_init(&dp->tx_ring[i].txbuf_idr);
spin_lock_init(&dp->tx_ring[i].tx_idr_lock);
dp->tx_ring[i].tcl_data_ring_id = i;
diff --git a/drivers/net/wireless/ath/ath11k/dp.h b/drivers/net/wireless/ath/ath11k/dp.h
index ee768ccce46e..4794ca04f213 100644
--- a/drivers/net/wireless/ath/ath11k/dp.h
+++ b/drivers/net/wireless/ath/ath11k/dp.h
@@ -170,6 +170,7 @@ struct ath11k_pdev_dp {
#define DP_BA_WIN_SZ_MAX 256
#define DP_TCL_NUM_RING_MAX 3
+#define DP_TCL_NUM_RING_MAX_QCA6390 1
#define DP_IDLE_SCATTER_BUFS_MAX 16
@@ -195,6 +196,7 @@ struct ath11k_pdev_dp {
#define DP_RXDMA_MONITOR_DESC_RING_SIZE 4096
#define DP_RX_BUFFER_SIZE 2048
+#define DP_RX_BUFFER_SIZE_LITE 1024
#define DP_RX_BUFFER_ALIGN_SIZE 128
#define DP_RXDMA_BUF_COOKIE_BUF_ID GENMASK(17, 0)
@@ -1592,6 +1594,13 @@ struct ath11k_htt_extd_stats_msg {
u8 data[0];
} __packed;
+#define HTT_MAC_ADDR_L32_0 GENMASK(7, 0)
+#define HTT_MAC_ADDR_L32_1 GENMASK(15, 8)
+#define HTT_MAC_ADDR_L32_2 GENMASK(23, 16)
+#define HTT_MAC_ADDR_L32_3 GENMASK(31, 24)
+#define HTT_MAC_ADDR_H16_0 GENMASK(7, 0)
+#define HTT_MAC_ADDR_H16_1 GENMASK(15, 8)
+
struct htt_mac_addr {
u32 mac_addr_l32;
u32 mac_addr_h16;
diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c
index 9a224817630a..c5320847b80a 100644
--- a/drivers/net/wireless/ath/ath11k/dp_rx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_rx.c
@@ -142,6 +142,18 @@ static u32 ath11k_dp_rx_h_attn_mpdu_err(struct rx_attention *attn)
return errmap;
}
+static bool ath11k_dp_rx_h_attn_msdu_len_err(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ struct rx_attention *rx_attention;
+ u32 errmap;
+
+ rx_attention = ath11k_dp_rx_get_attention(ab, desc);
+ errmap = ath11k_dp_rx_h_attn_mpdu_err(rx_attention);
+
+ return errmap & DP_RX_MPDU_ERR_MSDU_LEN;
+}
+
static u16 ath11k_dp_rx_h_msdu_start_msdu_len(struct ath11k_base *ab,
struct hal_rx_desc *desc)
{
@@ -270,6 +282,18 @@ static bool ath11k_dp_rx_h_attn_is_mcbc(struct ath11k_base *ab,
__le32_to_cpu(attn->info1)));
}
+static bool ath11k_dp_rxdesc_mac_addr2_valid(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params.hw_ops->rx_desc_mac_addr2_valid(desc);
+}
+
+static u8 *ath11k_dp_rxdesc_mpdu_start_addr2(struct ath11k_base *ab,
+ struct hal_rx_desc *desc)
+{
+ return ab->hw_params.hw_ops->rx_desc_mpdu_start_addr2(desc);
+}
+
static void ath11k_dp_service_mon_ring(struct timer_list *t)
{
struct ath11k_base *ab = from_timer(ab, t, mon_reap_timer);
@@ -475,7 +499,7 @@ static int ath11k_dp_rxdma_ring_buf_setup(struct ath11k *ar,
rx_ring->bufs_max = num_entries;
ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, rx_ring, num_entries,
- HAL_RX_BUF_RBM_SW3_BM);
+ ar->ab->hw_params.hal_params->rx_buf_rbm);
return 0;
}
@@ -2156,6 +2180,7 @@ static void ath11k_dp_rx_h_undecap(struct ath11k *ar, struct sk_buff *msdu,
{
u8 *first_hdr;
u8 decap;
+ struct ethhdr *ehdr;
first_hdr = ath11k_dp_rx_h_80211_hdr(ar->ab, rx_desc);
decap = ath11k_dp_rx_h_msdu_start_decap_type(ar->ab, rx_desc);
@@ -2170,9 +2195,22 @@ static void ath11k_dp_rx_h_undecap(struct ath11k *ar, struct sk_buff *msdu,
decrypted);
break;
case DP_RX_DECAP_TYPE_ETHERNET2_DIX:
- /* TODO undecap support for middle/last msdu's of amsdu */
- ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr,
- enctype, status);
+ ehdr = (struct ethhdr *)msdu->data;
+
+ /* mac80211 allows fast path only for authorized STA */
+ if (ehdr->h_proto == cpu_to_be16(ETH_P_PAE)) {
+ ATH11K_SKB_RXCB(msdu)->is_eapol = true;
+ ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr,
+ enctype, status);
+ break;
+ }
+
+ /* PN for mcast packets will be validated in mac80211;
+ * remove eth header and add 802.11 header.
+ */
+ if (ATH11K_SKB_RXCB(msdu)->is_mcbc && decrypted)
+ ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr,
+ enctype, status);
break;
case DP_RX_DECAP_TYPE_8023:
/* TODO: Handle undecap for these formats */
@@ -2180,35 +2218,62 @@ static void ath11k_dp_rx_h_undecap(struct ath11k *ar, struct sk_buff *msdu,
}
}
+static struct ath11k_peer *
+ath11k_dp_rx_h_find_peer(struct ath11k_base *ab, struct sk_buff *msdu)
+{
+ struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
+ struct hal_rx_desc *rx_desc = rxcb->rx_desc;
+ struct ath11k_peer *peer = NULL;
+
+ lockdep_assert_held(&ab->base_lock);
+
+ if (rxcb->peer_id)
+ peer = ath11k_peer_find_by_id(ab, rxcb->peer_id);
+
+ if (peer)
+ return peer;
+
+ if (!rx_desc || !(ath11k_dp_rxdesc_mac_addr2_valid(ab, rx_desc)))
+ return NULL;
+
+ peer = ath11k_peer_find_by_addr(ab,
+ ath11k_dp_rxdesc_mpdu_start_addr2(ab, rx_desc));
+ return peer;
+}
+
static void ath11k_dp_rx_h_mpdu(struct ath11k *ar,
struct sk_buff *msdu,
struct hal_rx_desc *rx_desc,
struct ieee80211_rx_status *rx_status)
{
- bool fill_crypto_hdr, mcast;
+ bool fill_crypto_hdr;
enum hal_encrypt_type enctype;
bool is_decrypted = false;
+ struct ath11k_skb_rxcb *rxcb;
struct ieee80211_hdr *hdr;
struct ath11k_peer *peer;
struct rx_attention *rx_attention;
u32 err_bitmap;
- hdr = (struct ieee80211_hdr *)msdu->data;
-
/* PN for multicast packets will be checked in mac80211 */
+ rxcb = ATH11K_SKB_RXCB(msdu);
+ fill_crypto_hdr = ath11k_dp_rx_h_attn_is_mcbc(ar->ab, rx_desc);
+ rxcb->is_mcbc = fill_crypto_hdr;
- mcast = is_multicast_ether_addr(hdr->addr1);
- fill_crypto_hdr = mcast;
+ if (rxcb->is_mcbc) {
+ rxcb->peer_id = ath11k_dp_rx_h_mpdu_start_peer_id(ar->ab, rx_desc);
+ rxcb->seq_no = ath11k_dp_rx_h_mpdu_start_seq_no(ar->ab, rx_desc);
+ }
spin_lock_bh(&ar->ab->base_lock);
- peer = ath11k_peer_find_by_addr(ar->ab, hdr->addr2);
+ peer = ath11k_dp_rx_h_find_peer(ar->ab, msdu);
if (peer) {
- if (mcast)
+ if (rxcb->is_mcbc)
enctype = peer->sec_type_grp;
else
enctype = peer->sec_type;
} else {
- enctype = HAL_ENCRYPT_TYPE_OPEN;
+ enctype = ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, rx_desc);
}
spin_unlock_bh(&ar->ab->base_lock);
@@ -2247,8 +2312,11 @@ static void ath11k_dp_rx_h_mpdu(struct ath11k *ar,
if (!is_decrypted || fill_crypto_hdr)
return;
- hdr = (void *)msdu->data;
- hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
+ if (ath11k_dp_rx_h_msdu_start_decap_type(ar->ab, rx_desc) !=
+ DP_RX_DECAP_TYPE_ETHERNET2_DIX) {
+ hdr = (void *)msdu->data;
+ hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
+ }
}
static void ath11k_dp_rx_h_rate(struct ath11k *ar, struct hal_rx_desc *rx_desc,
@@ -2337,8 +2405,10 @@ static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc,
channel_num = meta_data;
center_freq = meta_data >> 16;
- if (center_freq >= 5935 && center_freq <= 7105) {
+ if (center_freq >= ATH11K_MIN_6G_FREQ &&
+ center_freq <= ATH11K_MAX_6G_FREQ) {
rx_status->band = NL80211_BAND_6GHZ;
+ rx_status->freq = center_freq;
} else if (channel_num >= 1 && channel_num <= 14) {
rx_status->band = NL80211_BAND_2GHZ;
} else if (channel_num >= 36 && channel_num <= 173) {
@@ -2356,57 +2426,56 @@ static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc,
rx_desc, sizeof(struct hal_rx_desc));
}
- rx_status->freq = ieee80211_channel_to_frequency(channel_num,
- rx_status->band);
+ if (rx_status->band != NL80211_BAND_6GHZ)
+ rx_status->freq = ieee80211_channel_to_frequency(channel_num,
+ rx_status->band);
ath11k_dp_rx_h_rate(ar, rx_desc, rx_status);
}
-static char *ath11k_print_get_tid(struct ieee80211_hdr *hdr, char *out,
- size_t size)
-{
- u8 *qc;
- int tid;
-
- if (!ieee80211_is_data_qos(hdr->frame_control))
- return "";
-
- qc = ieee80211_get_qos_ctl(hdr);
- tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
- snprintf(out, size, "tid %d", tid);
-
- return out;
-}
-
static void ath11k_dp_rx_deliver_msdu(struct ath11k *ar, struct napi_struct *napi,
- struct sk_buff *msdu)
+ struct sk_buff *msdu,
+ struct ieee80211_rx_status *status)
{
static const struct ieee80211_radiotap_he known = {
.data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN),
.data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN),
};
- struct ieee80211_rx_status *status;
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
+ struct ieee80211_rx_status *rx_status;
struct ieee80211_radiotap_he *he = NULL;
- char tid[32];
+ struct ieee80211_sta *pubsta = NULL;
+ struct ath11k_peer *peer;
+ struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
+ u8 decap = DP_RX_DECAP_TYPE_RAW;
+ bool is_mcbc = rxcb->is_mcbc;
+ bool is_eapol = rxcb->is_eapol;
- status = IEEE80211_SKB_RXCB(msdu);
- if (status->encoding == RX_ENC_HE) {
+ if (status->encoding == RX_ENC_HE &&
+ !(status->flag & RX_FLAG_RADIOTAP_HE) &&
+ !(status->flag & RX_FLAG_SKIP_MONITOR)) {
he = skb_push(msdu, sizeof(known));
memcpy(he, &known, sizeof(known));
status->flag |= RX_FLAG_RADIOTAP_HE;
}
+ if (!(status->flag & RX_FLAG_ONLY_MONITOR))
+ decap = ath11k_dp_rx_h_msdu_start_decap_type(ar->ab, rxcb->rx_desc);
+
+ spin_lock_bh(&ar->ab->base_lock);
+ peer = ath11k_dp_rx_h_find_peer(ar->ab, msdu);
+ if (peer && peer->sta)
+ pubsta = peer->sta;
+ spin_unlock_bh(&ar->ab->base_lock);
+
ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
- "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
+ "rx skb %pK len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
msdu,
msdu->len,
- ieee80211_get_SA(hdr),
- ath11k_print_get_tid(hdr, tid, sizeof(tid)),
- is_multicast_ether_addr(ieee80211_get_DA(hdr)) ?
- "mcast" : "ucast",
- (__le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4,
+ peer ? peer->addr : NULL,
+ rxcb->tid,
+ is_mcbc ? "mcast" : "ucast",
+ rxcb->seq_no,
(status->encoding == RX_ENC_LEGACY) ? "legacy" : "",
(status->encoding == RX_ENC_HT) ? "ht" : "",
(status->encoding == RX_ENC_VHT) ? "vht" : "",
@@ -2426,22 +2495,32 @@ static void ath11k_dp_rx_deliver_msdu(struct ath11k *ar, struct napi_struct *nap
ath11k_dbg_dump(ar->ab, ATH11K_DBG_DP_RX, NULL, "dp rx msdu: ",
msdu->data, msdu->len);
+ rx_status = IEEE80211_SKB_RXCB(msdu);
+ *rx_status = *status;
+
/* TODO: trace rx packet */
- ieee80211_rx_napi(ar->hw, NULL, msdu, napi);
+ /* PN for multicast packets are not validate in HW,
+ * so skip 802.3 rx path
+ * Also, fast_rx expectes the STA to be authorized, hence
+ * eapol packets are sent in slow path.
+ */
+ if (decap == DP_RX_DECAP_TYPE_ETHERNET2_DIX && !is_eapol &&
+ !(is_mcbc && rx_status->flag & RX_FLAG_DECRYPTED))
+ rx_status->flag |= RX_FLAG_8023;
+
+ ieee80211_rx_napi(ar->hw, pubsta, msdu, napi);
}
static int ath11k_dp_rx_process_msdu(struct ath11k *ar,
struct sk_buff *msdu,
- struct sk_buff_head *msdu_list)
+ struct sk_buff_head *msdu_list,
+ struct ieee80211_rx_status *rx_status)
{
struct ath11k_base *ab = ar->ab;
struct hal_rx_desc *rx_desc, *lrx_desc;
struct rx_attention *rx_attention;
- struct ieee80211_rx_status rx_status = {0};
- struct ieee80211_rx_status *status;
struct ath11k_skb_rxcb *rxcb;
- struct ieee80211_hdr *hdr;
struct sk_buff *last_buf;
u8 l3_pad_bytes;
u8 *hdr_status;
@@ -2458,6 +2537,12 @@ static int ath11k_dp_rx_process_msdu(struct ath11k *ar,
}
rx_desc = (struct hal_rx_desc *)msdu->data;
+ if (ath11k_dp_rx_h_attn_msdu_len_err(ab, rx_desc)) {
+ ath11k_warn(ar->ab, "msdu len not valid\n");
+ ret = -EIO;
+ goto free_out;
+ }
+
lrx_desc = (struct hal_rx_desc *)last_buf->data;
rx_attention = ath11k_dp_rx_get_attention(ab, lrx_desc);
if (!ath11k_dp_rx_h_attn_msdu_done(rx_attention)) {
@@ -2497,19 +2582,11 @@ static int ath11k_dp_rx_process_msdu(struct ath11k *ar,
}
}
- hdr = (struct ieee80211_hdr *)msdu->data;
-
- /* Process only data frames */
- if (!ieee80211_is_data(hdr->frame_control))
- return -EINVAL;
-
- ath11k_dp_rx_h_ppdu(ar, rx_desc, &rx_status);
- ath11k_dp_rx_h_mpdu(ar, msdu, rx_desc, &rx_status);
+ ath11k_dp_rx_h_ppdu(ar, rx_desc, rx_status);
+ ath11k_dp_rx_h_mpdu(ar, msdu, rx_desc, rx_status);
- rx_status.flag |= RX_FLAG_SKIP_MONITOR | RX_FLAG_DUP_VALIDATED;
+ rx_status->flag |= RX_FLAG_SKIP_MONITOR | RX_FLAG_DUP_VALIDATED;
- status = IEEE80211_SKB_RXCB(msdu);
- *status = rx_status;
return 0;
free_out:
@@ -2524,6 +2601,7 @@ static void ath11k_dp_rx_process_received_packets(struct ath11k_base *ab,
struct ath11k_skb_rxcb *rxcb;
struct sk_buff *msdu;
struct ath11k *ar;
+ struct ieee80211_rx_status rx_status = {0};
u8 mac_id;
int ret;
@@ -2546,7 +2624,7 @@ static void ath11k_dp_rx_process_received_packets(struct ath11k_base *ab,
continue;
}
- ret = ath11k_dp_rx_process_msdu(ar, msdu, msdu_list);
+ ret = ath11k_dp_rx_process_msdu(ar, msdu, msdu_list, &rx_status);
if (ret) {
ath11k_dbg(ab, ATH11K_DBG_DATA,
"Unable to process msdu %d", ret);
@@ -2554,7 +2632,7 @@ static void ath11k_dp_rx_process_received_packets(struct ath11k_base *ab,
continue;
}
- ath11k_dp_rx_deliver_msdu(ar, napi, msdu);
+ ath11k_dp_rx_deliver_msdu(ar, napi, msdu, &rx_status);
(*quota)--;
}
@@ -2636,10 +2714,14 @@ try_again:
RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU);
rxcb->is_continuation = !!(desc.rx_msdu_info.info0 &
RX_MSDU_DESC_INFO0_MSDU_CONTINUATION);
- rxcb->mac_id = mac_id;
+ rxcb->peer_id = FIELD_GET(RX_MPDU_DESC_META_DATA_PEER_ID,
+ desc.rx_mpdu_info.meta_data);
+ rxcb->seq_no = FIELD_GET(RX_MPDU_DESC_INFO0_SEQ_NUM,
+ desc.rx_mpdu_info.info0);
rxcb->tid = FIELD_GET(HAL_REO_DEST_RING_INFO0_RX_QUEUE_NUM,
desc.info0);
+ rxcb->mac_id = mac_id;
__skb_queue_tail(&msdu_list, msdu);
if (total_msdu_reaped >= quota && !rxcb->is_continuation) {
@@ -2674,7 +2756,7 @@ try_again:
rx_ring = &ar->dp.rx_refill_buf_ring;
ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i],
- HAL_RX_BUF_RBM_SW3_BM);
+ ab->hw_params.hal_params->rx_buf_rbm);
}
ath11k_dp_rx_process_received_packets(ab, napi, &msdu_list,
@@ -2867,6 +2949,7 @@ static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id,
int *budget, struct sk_buff_head *skb_list)
{
struct ath11k *ar;
+ const struct ath11k_hw_hal_params *hal_params;
struct ath11k_pdev_dp *dp;
struct dp_rxdma_ring *rx_ring;
struct hal_srng *srng;
@@ -2937,8 +3020,9 @@ move_next:
&buf_id);
if (!skb) {
+ hal_params = ab->hw_params.hal_params;
ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, 0, 0,
- HAL_RX_BUF_RBM_SW3_BM);
+ hal_params->rx_buf_rbm);
num_buffs_reaped++;
break;
}
@@ -2948,7 +3032,8 @@ move_next:
FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, rxcb->paddr,
- cookie, HAL_RX_BUF_RBM_SW3_BM);
+ cookie,
+ ab->hw_params.hal_params->rx_buf_rbm);
ath11k_hal_srng_src_get_next_entry(ab, srng);
num_buffs_reaped++;
}
@@ -2969,6 +3054,8 @@ int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id,
struct ath11k_peer *peer;
struct ath11k_sta *arsta;
int num_buffs_reaped = 0;
+ u32 rx_buf_sz;
+ u16 log_type = 0;
__skb_queue_head_init(&skb_list);
@@ -2981,8 +3068,16 @@ int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id,
memset(&ppdu_info, 0, sizeof(ppdu_info));
ppdu_info.peer_id = HAL_INVALID_PEERID;
- if (ath11k_debugfs_is_pktlog_rx_stats_enabled(ar))
- trace_ath11k_htt_rxdesc(ar, skb->data, DP_RX_BUFFER_SIZE);
+ if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar)) {
+ log_type = ATH11K_PKTLOG_TYPE_LITE_RX;
+ rx_buf_sz = DP_RX_BUFFER_SIZE_LITE;
+ } else if (ath11k_debugfs_is_pktlog_rx_stats_enabled(ar)) {
+ log_type = ATH11K_PKTLOG_TYPE_RX_STATBUF;
+ rx_buf_sz = DP_RX_BUFFER_SIZE;
+ }
+
+ if (log_type)
+ trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz);
hal_status = ath11k_hal_rx_parse_mon_status(ab, &ppdu_info, skb);
@@ -3010,7 +3105,7 @@ int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id,
ath11k_dp_rx_update_peer_stats(arsta, &ppdu_info);
if (ath11k_debugfs_is_pktlog_peer_valid(ar, peer->addr))
- trace_ath11k_htt_rxdesc(ar, skb->data, DP_RX_BUFFER_SIZE);
+ trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz);
spin_unlock_bh(&ab->base_lock);
rcu_read_unlock();
@@ -3310,7 +3405,7 @@ static int ath11k_dp_rx_h_defrag_reo_reinject(struct ath11k *ar, struct dp_rx_ti
paddr = dma_map_single(ab->dev, defrag_skb->data,
defrag_skb->len + skb_tailroom(defrag_skb),
- DMA_FROM_DEVICE);
+ DMA_TO_DEVICE);
if (dma_mapping_error(ab->dev, paddr))
return -ENOMEM;
@@ -3327,7 +3422,8 @@ static int ath11k_dp_rx_h_defrag_reo_reinject(struct ath11k *ar, struct dp_rx_ti
cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, dp->mac_id) |
FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
- ath11k_hal_rx_buf_addr_info_set(msdu0, paddr, cookie, HAL_RX_BUF_RBM_SW3_BM);
+ ath11k_hal_rx_buf_addr_info_set(msdu0, paddr, cookie,
+ ab->hw_params.hal_params->rx_buf_rbm);
/* Fill mpdu details into reo entrace ring */
srng = &ab->hal.srng_list[ab->dp.reo_reinject_ring.ring_id];
@@ -3375,7 +3471,7 @@ err_free_idr:
spin_unlock_bh(&rx_refill_ring->idr_lock);
err_unmap_dma:
dma_unmap_single(ab->dev, paddr, defrag_skb->len + skb_tailroom(defrag_skb),
- DMA_FROM_DEVICE);
+ DMA_TO_DEVICE);
return ret;
}
@@ -3704,7 +3800,7 @@ int ath11k_dp_process_rx_err(struct ath11k_base *ab, struct napi_struct *napi,
ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies,
&rbm);
if (rbm != HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST &&
- rbm != HAL_RX_BUF_RBM_SW3_BM) {
+ rbm != ab->hw_params.hal_params->rx_buf_rbm) {
ab->soc_stats.invalid_rbm++;
ath11k_warn(ab, "invalid return buffer manager %d\n", rbm);
ath11k_dp_rx_link_desc_return(ab, desc,
@@ -3760,7 +3856,7 @@ exit:
rx_ring = &ar->dp.rx_refill_buf_ring;
ath11k_dp_rxbufs_replenish(ab, i, rx_ring, n_bufs_reaped[i],
- HAL_RX_BUF_RBM_SW3_BM);
+ ab->hw_params.hal_params->rx_buf_rbm);
}
return tot_n_bufs_reaped;
@@ -3941,7 +4037,6 @@ static void ath11k_dp_rx_wbm_err(struct ath11k *ar,
{
struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
struct ieee80211_rx_status rxs = {0};
- struct ieee80211_rx_status *status;
bool drop = true;
switch (rxcb->err_rel_src) {
@@ -3961,10 +4056,7 @@ static void ath11k_dp_rx_wbm_err(struct ath11k *ar,
return;
}
- status = IEEE80211_SKB_RXCB(msdu);
- *status = rxs;
-
- ath11k_dp_rx_deliver_msdu(ar, napi, msdu);
+ ath11k_dp_rx_deliver_msdu(ar, napi, msdu, &rxs);
}
int ath11k_dp_rx_process_wbm_err(struct ath11k_base *ab,
@@ -4060,7 +4152,7 @@ int ath11k_dp_rx_process_wbm_err(struct ath11k_base *ab,
rx_ring = &ar->dp.rx_refill_buf_ring;
ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i],
- HAL_RX_BUF_RBM_SW3_BM);
+ ab->hw_params.hal_params->rx_buf_rbm);
}
rcu_read_lock();
@@ -4169,7 +4261,7 @@ int ath11k_dp_process_rxdma_err(struct ath11k_base *ab, int mac_id, int budget)
if (num_buf_freed)
ath11k_dp_rxbufs_replenish(ab, mac_id, rx_ring, num_buf_freed,
- HAL_RX_BUF_RBM_SW3_BM);
+ ab->hw_params.hal_params->rx_buf_rbm);
return budget - quota;
}
@@ -4740,7 +4832,7 @@ ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar,
struct ieee80211_rx_status *rxs)
{
struct ath11k_base *ab = ar->ab;
- struct sk_buff *msdu, *mpdu_buf, *prev_buf;
+ struct sk_buff *msdu, *prev_buf;
u32 wifi_hdr_len;
struct hal_rx_desc *rx_desc;
char *hdr_desc;
@@ -4748,8 +4840,6 @@ ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar,
struct ieee80211_hdr_3addr *wh;
struct rx_attention *rx_attention;
- mpdu_buf = NULL;
-
if (!head_msdu)
goto err_merge_fail;
@@ -4832,12 +4922,6 @@ ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar,
return head_msdu;
err_merge_fail:
- if (mpdu_buf && decap_format != DP_RX_DECAP_TYPE_RAW) {
- ath11k_dbg(ab, ATH11K_DBG_DATA,
- "err_merge_fail mpdu_buf %pK", mpdu_buf);
- /* Free the head buffer */
- dev_kfree_skb_any(mpdu_buf);
- }
return NULL;
}
@@ -4848,7 +4932,7 @@ static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id,
{
struct ath11k_pdev_dp *dp = &ar->dp;
struct sk_buff *mon_skb, *skb_next, *header;
- struct ieee80211_rx_status *rxs = &dp->rx_status, *status;
+ struct ieee80211_rx_status *rxs = &dp->rx_status;
mon_skb = ath11k_dp_rx_mon_merg_msdus(ar, mac_id, head_msdu,
tail_msdu, rxs);
@@ -4874,10 +4958,7 @@ static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id,
}
rxs->flag |= RX_FLAG_ONLY_MONITOR;
- status = IEEE80211_SKB_RXCB(mon_skb);
- *status = *rxs;
-
- ath11k_dp_rx_deliver_msdu(ar, napi, mon_skb);
+ ath11k_dp_rx_deliver_msdu(ar, napi, mon_skb, rxs);
mon_skb = skb_next;
} while (mon_skb);
rxs->flag = 0;
@@ -4899,6 +4980,7 @@ static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id,
{
struct ath11k_pdev_dp *dp = &ar->dp;
struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
+ const struct ath11k_hw_hal_params *hal_params;
void *ring_entry;
void *mon_dst_srng;
u32 ppdu_id;
@@ -4962,16 +5044,18 @@ static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id,
if (rx_bufs_used) {
rx_mon_stats->dest_ppdu_done++;
+ hal_params = ar->ab->hw_params.hal_params;
+
if (ar->ab->hw_params.rxdma1_enable)
ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id,
&dp->rxdma_mon_buf_ring,
rx_bufs_used,
- HAL_RX_BUF_RBM_SW3_BM);
+ hal_params->rx_buf_rbm);
else
ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id,
&dp->rx_refill_buf_ring,
rx_bufs_used,
- HAL_RX_BUF_RBM_SW3_BM);
+ hal_params->rx_buf_rbm);
}
}
@@ -5029,7 +5113,7 @@ int ath11k_dp_rx_process_mon_rings(struct ath11k_base *ab, int mac_id,
struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id);
int ret = 0;
- if (test_bit(ATH11K_FLAG_MONITOR_ENABLED, &ar->monitor_flags))
+ if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags))
ret = ath11k_dp_mon_process_rx(ab, mac_id, napi, budget);
else
ret = ath11k_dp_rx_process_mon_status(ab, mac_id, napi, budget);
diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.c b/drivers/net/wireless/ath/ath11k/dp_tx.c
index 8bba5234f81f..879fb2a9dc0c 100644
--- a/drivers/net/wireless/ath/ath11k/dp_tx.c
+++ b/drivers/net/wireless/ath/ath11k/dp_tx.c
@@ -78,7 +78,7 @@ enum hal_encrypt_type ath11k_dp_tx_get_encrypt_type(u32 cipher)
}
int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
- struct sk_buff *skb)
+ struct ath11k_sta *arsta, struct sk_buff *skb)
{
struct ath11k_base *ab = ar->ab;
struct ath11k_dp *dp = &ab->dp;
@@ -115,11 +115,8 @@ int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
tcl_ring_sel:
tcl_ring_retry = false;
- /* For some chip, it can only use tcl0 to tx */
- if (ar->ab->hw_params.tcl_0_only)
- ti.ring_id = 0;
- else
- ti.ring_id = ring_selector % DP_TCL_NUM_RING_MAX;
+
+ ti.ring_id = ring_selector % ab->hw_params.max_tx_ring;
ring_map |= BIT(ti.ring_id);
@@ -131,7 +128,7 @@ tcl_ring_sel:
spin_unlock_bh(&tx_ring->tx_idr_lock);
if (ret < 0) {
- if (ring_map == (BIT(DP_TCL_NUM_RING_MAX) - 1)) {
+ if (ring_map == (BIT(ab->hw_params.max_tx_ring) - 1)) {
atomic_inc(&ab->soc_stats.tx_err.misc_fail);
return -ENOSPC;
}
@@ -145,7 +142,15 @@ tcl_ring_sel:
FIELD_PREP(DP_TX_DESC_ID_MSDU_ID, ret) |
FIELD_PREP(DP_TX_DESC_ID_POOL_ID, pool_id);
ti.encap_type = ath11k_dp_tx_get_encap_type(arvif, skb);
- ti.meta_data_flags = arvif->tcl_metadata;
+
+ if (ieee80211_has_a4(hdr->frame_control) &&
+ is_multicast_ether_addr(hdr->addr3) && arsta &&
+ arsta->use_4addr_set) {
+ ti.meta_data_flags = arsta->tcl_metadata;
+ ti.flags0 |= FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_TO_FW, 1);
+ } else {
+ ti.meta_data_flags = arvif->tcl_metadata;
+ }
if (ti.encap_type == HAL_TCL_ENCAP_TYPE_RAW) {
if (skb_cb->flags & ATH11K_SKB_CIPHER_SET) {
@@ -240,8 +245,8 @@ tcl_ring_sel:
* checking this ring earlier for each pkt tx.
* Restart ring selection if some rings are not checked yet.
*/
- if (ring_map != (BIT(DP_TCL_NUM_RING_MAX) - 1) &&
- !ar->ab->hw_params.tcl_0_only) {
+ if (ring_map != (BIT(ab->hw_params.max_tx_ring) - 1) &&
+ ab->hw_params.max_tx_ring > 1) {
tcl_ring_retry = true;
ring_selector++;
}
@@ -614,6 +619,9 @@ int ath11k_dp_tx_send_reo_cmd(struct ath11k_base *ab, struct dp_rx_tid *rx_tid,
struct hal_srng *cmd_ring;
int cmd_num;
+ if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags))
+ return -ESHUTDOWN;
+
cmd_ring = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id];
cmd_num = ath11k_hal_reo_cmd_send(ab, cmd_ring, type, cmd);
@@ -1068,12 +1076,16 @@ int ath11k_dp_tx_htt_monitor_mode_ring_config(struct ath11k *ar, bool reset)
for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
ring_id = dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
- if (!reset)
+ if (!reset) {
tlv_filter.rx_filter =
HTT_RX_MON_FILTER_TLV_FLAGS_MON_STATUS_RING;
- else
+ } else {
tlv_filter = ath11k_mac_mon_status_filter_default;
+ if (ath11k_debugfs_is_extd_rx_stats_enabled(ar))
+ tlv_filter.rx_filter = ath11k_debugfs_rx_filter(ar);
+ }
+
ret = ath11k_dp_tx_htt_rx_filter_setup(ab, ring_id,
dp->mac_id + i,
HAL_RXDMA_MONITOR_STATUS,
diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.h b/drivers/net/wireless/ath/ath11k/dp_tx.h
index f8a9f9c8e444..698b907b878d 100644
--- a/drivers/net/wireless/ath/ath11k/dp_tx.h
+++ b/drivers/net/wireless/ath/ath11k/dp_tx.h
@@ -17,7 +17,7 @@ struct ath11k_dp_htt_wbm_tx_status {
int ath11k_dp_tx_htt_h2t_ver_req_msg(struct ath11k_base *ab);
int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
- struct sk_buff *skb);
+ struct ath11k_sta *arsta, struct sk_buff *skb);
void ath11k_dp_tx_completion_handler(struct ath11k_base *ab, int ring_id);
int ath11k_dp_tx_send_reo_cmd(struct ath11k_base *ab, struct dp_rx_tid *rx_tid,
enum hal_reo_cmd_type type,
diff --git a/drivers/net/wireless/ath/ath11k/hal_desc.h b/drivers/net/wireless/ath/ath11k/hal_desc.h
index d54ec6aa6281..00b595b84939 100644
--- a/drivers/net/wireless/ath/ath11k/hal_desc.h
+++ b/drivers/net/wireless/ath/ath11k/hal_desc.h
@@ -496,6 +496,8 @@ struct hal_tlv_hdr {
#define RX_MPDU_DESC_INFO0_DA_IDX_TIMEOUT BIT(29)
#define RX_MPDU_DESC_INFO0_RAW_MPDU BIT(30)
+#define RX_MPDU_DESC_META_DATA_PEER_ID GENMASK(15, 0)
+
struct rx_mpdu_desc {
u32 info0; /* %RX_MPDU_DESC_INFO */
u32 meta_data;
diff --git a/drivers/net/wireless/ath/ath11k/hal_rx.c b/drivers/net/wireless/ath/ath11k/hal_rx.c
index 325055ca41ab..329c404cfa80 100644
--- a/drivers/net/wireless/ath/ath11k/hal_rx.c
+++ b/drivers/net/wireless/ath/ath11k/hal_rx.c
@@ -356,6 +356,7 @@ int ath11k_hal_wbm_desc_parse_err(struct ath11k_base *ab, void *desc,
struct hal_wbm_release_ring *wbm_desc = desc;
enum hal_wbm_rel_desc_type type;
enum hal_wbm_rel_src_module rel_src;
+ enum hal_rx_buf_return_buf_manager ret_buf_mgr;
type = FIELD_GET(HAL_WBM_RELEASE_INFO0_DESC_TYPE,
wbm_desc->info0);
@@ -371,8 +372,9 @@ int ath11k_hal_wbm_desc_parse_err(struct ath11k_base *ab, void *desc,
rel_src != HAL_WBM_REL_SRC_MODULE_REO)
return -EINVAL;
- if (FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR,
- wbm_desc->buf_addr_info.info1) != HAL_RX_BUF_RBM_SW3_BM) {
+ ret_buf_mgr = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR,
+ wbm_desc->buf_addr_info.info1);
+ if (ret_buf_mgr != ab->hw_params.hal_params->rx_buf_rbm) {
ab->soc_stats.invalid_rbm++;
return -EINVAL;
}
diff --git a/drivers/net/wireless/ath/ath11k/hw.c b/drivers/net/wireless/ath/ath11k/hw.c
index d9596903b0a5..da35fcf5bc56 100644
--- a/drivers/net/wireless/ath/ath11k/hw.c
+++ b/drivers/net/wireless/ath/ath11k/hw.c
@@ -7,10 +7,11 @@
#include <linux/bitops.h>
#include <linux/bitfield.h>
-#include "hw.h"
#include "core.h"
#include "ce.h"
#include "hif.h"
+#include "hal.h"
+#include "hw.h"
/* Map from pdev index to hw mac index */
static u8 ath11k_hw_ipq8074_mac_from_pdev_id(int pdev_idx)
@@ -97,6 +98,7 @@ static void ath11k_init_wmi_config_qca6390(struct ath11k_base *ab,
config->num_multicast_filter_entries = 0x20;
config->num_wow_filters = 0x16;
config->num_keep_alive_pattern = 0;
+ config->flag1 |= WMI_RSRC_CFG_FLAG1_BSS_CHANNEL_INFO_64;
}
static void ath11k_hw_ipq8074_reo_setup(struct ath11k_base *ab)
@@ -197,6 +199,7 @@ static void ath11k_init_wmi_config_ipq8074(struct ath11k_base *ab,
config->peer_map_unmap_v2_support = 1;
config->twt_ap_pdev_count = ab->num_radios;
config->twt_ap_sta_count = 1000;
+ config->flag1 |= WMI_RSRC_CFG_FLAG1_BSS_CHANNEL_INFO_64;
}
static int ath11k_hw_mac_id_to_pdev_id_ipq8074(struct ath11k_hw_params *hw,
@@ -372,6 +375,17 @@ static void ath11k_hw_ipq8074_rx_desc_set_msdu_len(struct hal_rx_desc *desc, u16
desc->u.ipq8074.msdu_start.info1 = __cpu_to_le32(info);
}
+static bool ath11k_hw_ipq8074_rx_desc_mac_addr2_valid(struct hal_rx_desc *desc)
+{
+ return __le32_to_cpu(desc->u.ipq8074.mpdu_start.info1) &
+ RX_MPDU_START_INFO1_MAC_ADDR2_VALID;
+}
+
+static u8 *ath11k_hw_ipq8074_rx_desc_mpdu_start_addr2(struct hal_rx_desc *desc)
+{
+ return desc->u.ipq8074.mpdu_start.addr2;
+}
+
static
struct rx_attention *ath11k_hw_ipq8074_rx_desc_get_attention(struct hal_rx_desc *desc)
{
@@ -543,6 +557,17 @@ static u8 *ath11k_hw_qcn9074_rx_desc_get_msdu_payload(struct hal_rx_desc *desc)
return &desc->u.qcn9074.msdu_payload[0];
}
+static bool ath11k_hw_ipq9074_rx_desc_mac_addr2_valid(struct hal_rx_desc *desc)
+{
+ return __le32_to_cpu(desc->u.qcn9074.mpdu_start.info11) &
+ RX_MPDU_START_INFO11_MAC_ADDR2_VALID;
+}
+
+static u8 *ath11k_hw_ipq9074_rx_desc_mpdu_start_addr2(struct hal_rx_desc *desc)
+{
+ return desc->u.qcn9074.mpdu_start.addr2;
+}
+
static bool ath11k_hw_wcn6855_rx_desc_get_first_msdu(struct hal_rx_desc *desc)
{
return !!FIELD_GET(RX_MSDU_END_INFO2_FIRST_MSDU_WCN6855,
@@ -703,6 +728,17 @@ static u8 *ath11k_hw_wcn6855_rx_desc_get_msdu_payload(struct hal_rx_desc *desc)
return &desc->u.wcn6855.msdu_payload[0];
}
+static bool ath11k_hw_wcn6855_rx_desc_mac_addr2_valid(struct hal_rx_desc *desc)
+{
+ return __le32_to_cpu(desc->u.wcn6855.mpdu_start.info1) &
+ RX_MPDU_START_INFO1_MAC_ADDR2_VALID;
+}
+
+static u8 *ath11k_hw_wcn6855_rx_desc_mpdu_start_addr2(struct hal_rx_desc *desc)
+{
+ return desc->u.wcn6855.mpdu_start.addr2;
+}
+
static void ath11k_hw_wcn6855_reo_setup(struct ath11k_base *ab)
{
u32 reo_base = HAL_SEQ_WCSS_UMAC_REO_REG;
@@ -799,6 +835,8 @@ const struct ath11k_hw_ops ipq8074_ops = {
.rx_desc_get_msdu_payload = ath11k_hw_ipq8074_rx_desc_get_msdu_payload,
.reo_setup = ath11k_hw_ipq8074_reo_setup,
.mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
+ .rx_desc_mac_addr2_valid = ath11k_hw_ipq8074_rx_desc_mac_addr2_valid,
+ .rx_desc_mpdu_start_addr2 = ath11k_hw_ipq8074_rx_desc_mpdu_start_addr2,
};
const struct ath11k_hw_ops ipq6018_ops = {
@@ -835,6 +873,8 @@ const struct ath11k_hw_ops ipq6018_ops = {
.rx_desc_get_msdu_payload = ath11k_hw_ipq8074_rx_desc_get_msdu_payload,
.reo_setup = ath11k_hw_ipq8074_reo_setup,
.mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
+ .rx_desc_mac_addr2_valid = ath11k_hw_ipq8074_rx_desc_mac_addr2_valid,
+ .rx_desc_mpdu_start_addr2 = ath11k_hw_ipq8074_rx_desc_mpdu_start_addr2,
};
const struct ath11k_hw_ops qca6390_ops = {
@@ -871,6 +911,8 @@ const struct ath11k_hw_ops qca6390_ops = {
.rx_desc_get_msdu_payload = ath11k_hw_ipq8074_rx_desc_get_msdu_payload,
.reo_setup = ath11k_hw_ipq8074_reo_setup,
.mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
+ .rx_desc_mac_addr2_valid = ath11k_hw_ipq8074_rx_desc_mac_addr2_valid,
+ .rx_desc_mpdu_start_addr2 = ath11k_hw_ipq8074_rx_desc_mpdu_start_addr2,
};
const struct ath11k_hw_ops qcn9074_ops = {
@@ -907,6 +949,8 @@ const struct ath11k_hw_ops qcn9074_ops = {
.rx_desc_get_msdu_payload = ath11k_hw_qcn9074_rx_desc_get_msdu_payload,
.reo_setup = ath11k_hw_ipq8074_reo_setup,
.mpdu_info_get_peerid = ath11k_hw_ipq8074_mpdu_info_get_peerid,
+ .rx_desc_mac_addr2_valid = ath11k_hw_ipq9074_rx_desc_mac_addr2_valid,
+ .rx_desc_mpdu_start_addr2 = ath11k_hw_ipq9074_rx_desc_mpdu_start_addr2,
};
const struct ath11k_hw_ops wcn6855_ops = {
@@ -943,6 +987,8 @@ const struct ath11k_hw_ops wcn6855_ops = {
.rx_desc_get_msdu_payload = ath11k_hw_wcn6855_rx_desc_get_msdu_payload,
.reo_setup = ath11k_hw_wcn6855_reo_setup,
.mpdu_info_get_peerid = ath11k_hw_wcn6855_mpdu_info_get_peerid,
+ .rx_desc_mac_addr2_valid = ath11k_hw_wcn6855_rx_desc_mac_addr2_valid,
+ .rx_desc_mpdu_start_addr2 = ath11k_hw_wcn6855_rx_desc_mpdu_start_addr2,
};
#define ATH11K_TX_RING_MASK_0 0x1
@@ -2079,3 +2125,11 @@ const struct ath11k_hw_regs wcn6855_regs = {
.pcie_qserdes_sysclk_en_sel = 0x01e0c0ac,
.pcie_pcs_osc_dtct_config_base = 0x01e0c628,
};
+
+const struct ath11k_hw_hal_params ath11k_hw_hal_params_ipq8074 = {
+ .rx_buf_rbm = HAL_RX_BUF_RBM_SW3_BM,
+};
+
+const struct ath11k_hw_hal_params ath11k_hw_hal_params_qca6390 = {
+ .rx_buf_rbm = HAL_RX_BUF_RBM_SW1_BM,
+};
diff --git a/drivers/net/wireless/ath/ath11k/hw.h b/drivers/net/wireless/ath/ath11k/hw.h
index 62f5978b3005..19223d36846e 100644
--- a/drivers/net/wireless/ath/ath11k/hw.h
+++ b/drivers/net/wireless/ath/ath11k/hw.h
@@ -6,6 +6,7 @@
#ifndef ATH11K_HW_H
#define ATH11K_HW_H
+#include "hal.h"
#include "wmi.h"
/* Target configuration defines */
@@ -119,6 +120,10 @@ struct ath11k_hw_ring_mask {
u8 host2rxdma[ATH11K_EXT_IRQ_GRP_NUM_MAX];
};
+struct ath11k_hw_hal_params {
+ enum hal_rx_buf_return_buf_manager rx_buf_rbm;
+};
+
struct ath11k_hw_params {
const char *name;
u16 hw_rev;
@@ -128,7 +133,7 @@ struct ath11k_hw_params {
struct {
const char *dir;
size_t board_size;
- size_t cal_size;
+ size_t cal_offset;
} fw;
const struct ath11k_hw_ops *hw_ops;
@@ -152,8 +157,14 @@ struct ath11k_hw_params {
bool rx_mac_buf_ring;
bool vdev_start_delay;
bool htt_peer_map_v2;
- bool tcl_0_only;
- u8 spectral_fft_sz;
+
+ struct {
+ u8 fft_sz;
+ u8 fft_pad_sz;
+ u8 summary_pad_sz;
+ u8 fft_hdr_len;
+ u16 max_fft_bins;
+ } spectral;
u16 interface_modes;
bool supports_monitor;
@@ -163,6 +174,8 @@ struct ath11k_hw_params {
bool supports_suspend;
u32 hal_desc_sz;
bool fix_l1ss;
+ u8 max_tx_ring;
+ const struct ath11k_hw_hal_params *hal_params;
};
struct ath11k_hw_ops {
@@ -202,6 +215,8 @@ struct ath11k_hw_ops {
u8 *(*rx_desc_get_msdu_payload)(struct hal_rx_desc *desc);
void (*reo_setup)(struct ath11k_base *ab);
u16 (*mpdu_info_get_peerid)(u8 *tlv_data);
+ bool (*rx_desc_mac_addr2_valid)(struct hal_rx_desc *desc);
+ u8* (*rx_desc_mpdu_start_addr2)(struct hal_rx_desc *desc);
};
extern const struct ath11k_hw_ops ipq8074_ops;
@@ -214,6 +229,9 @@ extern const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_ipq8074;
extern const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_qca6390;
extern const struct ath11k_hw_ring_mask ath11k_hw_ring_mask_qcn9074;
+extern const struct ath11k_hw_hal_params ath11k_hw_hal_params_ipq8074;
+extern const struct ath11k_hw_hal_params ath11k_hw_hal_params_qca6390;
+
static inline
int ath11k_hw_get_mac_from_pdev_id(struct ath11k_hw_params *hw,
int pdev_idx)
diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c
index e9b3689331ec..1cc55602787b 100644
--- a/drivers/net/wireless/ath/ath11k/mac.c
+++ b/drivers/net/wireless/ath/ath11k/mac.c
@@ -150,6 +150,9 @@ static const struct ieee80211_channel ath11k_6ghz_channels[] = {
CHAN6G(225, 7075, 0),
CHAN6G(229, 7095, 0),
CHAN6G(233, 7115, 0),
+
+ /* new addition in IEEE Std 802.11ax-2021 */
+ CHAN6G(2, 5935, 0),
};
static struct ieee80211_rate ath11k_legacy_rates[] = {
@@ -354,6 +357,18 @@ ath11k_mac_max_vht_nss(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])
return 1;
}
+static u32
+ath11k_mac_max_he_nss(const u16 he_mcs_mask[NL80211_HE_NSS_MAX])
+{
+ int nss;
+
+ for (nss = NL80211_HE_NSS_MAX - 1; nss >= 0; nss--)
+ if (he_mcs_mask[nss])
+ return nss + 1;
+
+ return 1;
+}
+
static u8 ath11k_parse_mpdudensity(u8 mpdudensity)
{
/* 802.11n D2.0 defined values for "Minimum MPDU Start Spacing":
@@ -488,7 +503,8 @@ struct ath11k_vif *ath11k_mac_get_arvif_by_vdev_id(struct ath11k_base *ab,
for (i = 0; i < ab->num_radios; i++) {
pdev = rcu_dereference(ab->pdevs_active[i]);
- if (pdev && pdev->ar) {
+ if (pdev && pdev->ar &&
+ (pdev->ar->allocated_vdev_map & (1LL << vdev_id))) {
arvif = ath11k_mac_get_arvif(pdev->ar, vdev_id);
if (arvif)
return arvif;
@@ -715,32 +731,386 @@ void ath11k_mac_peer_cleanup_all(struct ath11k *ar)
ar->num_stations = 0;
}
-static int ath11k_monitor_vdev_up(struct ath11k *ar, int vdev_id)
+static inline int ath11k_mac_vdev_setup_sync(struct ath11k *ar)
{
- int ret = 0;
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags))
+ return -ESHUTDOWN;
+
+ if (!wait_for_completion_timeout(&ar->vdev_setup_done,
+ ATH11K_VDEV_SETUP_TIMEOUT_HZ))
+ return -ETIMEDOUT;
+
+ return ar->last_wmi_vdev_start_status ? -EINVAL : 0;
+}
+
+static void
+ath11k_mac_get_any_chandef_iter(struct ieee80211_hw *hw,
+ struct ieee80211_chanctx_conf *conf,
+ void *data)
+{
+ struct cfg80211_chan_def **def = data;
+
+ *def = &conf->def;
+}
+
+static int ath11k_mac_monitor_vdev_start(struct ath11k *ar, int vdev_id,
+ struct cfg80211_chan_def *chandef)
+{
+ struct ieee80211_channel *channel;
+ struct wmi_vdev_start_req_arg arg = {};
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ channel = chandef->chan;
+
+ arg.vdev_id = vdev_id;
+ arg.channel.freq = channel->center_freq;
+ arg.channel.band_center_freq1 = chandef->center_freq1;
+ arg.channel.band_center_freq2 = chandef->center_freq2;
+
+ arg.channel.mode = ath11k_phymodes[chandef->chan->band][chandef->width];
+ arg.channel.chan_radar = !!(channel->flags & IEEE80211_CHAN_RADAR);
+
+ arg.channel.min_power = 0;
+ arg.channel.max_power = channel->max_power * 2;
+ arg.channel.max_reg_power = channel->max_reg_power * 2;
+ arg.channel.max_antenna_gain = channel->max_antenna_gain * 2;
+
+ arg.pref_tx_streams = ar->num_tx_chains;
+ arg.pref_rx_streams = ar->num_rx_chains;
+
+ arg.channel.passive = !!(chandef->chan->flags & IEEE80211_CHAN_NO_IR);
+
+ reinit_completion(&ar->vdev_setup_done);
+ reinit_completion(&ar->vdev_delete_done);
+
+ ret = ath11k_wmi_vdev_start(ar, &arg, false);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to request monitor vdev %i start: %d\n",
+ vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath11k_mac_vdev_setup_sync(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to synchronize setup for monitor vdev %i start: %d\n",
+ vdev_id, ret);
+ return ret;
+ }
ret = ath11k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr);
if (ret) {
ath11k_warn(ar->ab, "failed to put up monitor vdev %i: %d\n",
vdev_id, ret);
- return ret;
+ goto vdev_stop;
}
ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac monitor vdev %i started\n",
vdev_id);
+
return 0;
+
+vdev_stop:
+ reinit_completion(&ar->vdev_setup_done);
+
+ ret = ath11k_wmi_vdev_stop(ar, vdev_id);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to stop monitor vdev %i after start failure: %d\n",
+ vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath11k_mac_vdev_setup_sync(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to synchronize setup for vdev %i stop: %d\n",
+ vdev_id, ret);
+ return ret;
+ }
+
+ return -EIO;
}
-static int ath11k_mac_op_config(struct ieee80211_hw *hw, u32 changed)
+static int ath11k_mac_monitor_vdev_stop(struct ath11k *ar)
{
- /* mac80211 requires this op to be present and that's why
- * there's an empty function, this can be extended when
- * required.
- */
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ reinit_completion(&ar->vdev_setup_done);
+
+ ret = ath11k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to request monitor vdev %i stop: %d\n",
+ ar->monitor_vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath11k_mac_vdev_setup_sync(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to synchronize monitor vdev %i stop: %d\n",
+ ar->monitor_vdev_id, ret);
+ return ret;
+ }
+
+ ret = ath11k_wmi_vdev_down(ar, ar->monitor_vdev_id);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to put down monitor vdev %i: %d\n",
+ ar->monitor_vdev_id, ret);
+ return ret;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac monitor vdev %i stopped\n",
+ ar->monitor_vdev_id);
+
+ return 0;
+}
+
+static int ath11k_mac_monitor_vdev_create(struct ath11k *ar)
+{
+ struct ath11k_pdev *pdev = ar->pdev;
+ struct vdev_create_params param = {};
+ int bit, ret;
+ u8 tmp_addr[6] = {0};
+ u16 nss;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags))
+ return 0;
+
+ if (ar->ab->free_vdev_map == 0) {
+ ath11k_warn(ar->ab, "failed to find free vdev id for monitor vdev\n");
+ return -ENOMEM;
+ }
+
+ bit = __ffs64(ar->ab->free_vdev_map);
+
+ ar->monitor_vdev_id = bit;
+
+ param.if_id = ar->monitor_vdev_id;
+ param.type = WMI_VDEV_TYPE_MONITOR;
+ param.subtype = WMI_VDEV_SUBTYPE_NONE;
+ param.pdev_id = pdev->pdev_id;
+
+ if (pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP) {
+ param.chains[NL80211_BAND_2GHZ].tx = ar->num_tx_chains;
+ param.chains[NL80211_BAND_2GHZ].rx = ar->num_rx_chains;
+ }
+ if (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP) {
+ param.chains[NL80211_BAND_5GHZ].tx = ar->num_tx_chains;
+ param.chains[NL80211_BAND_5GHZ].rx = ar->num_rx_chains;
+ }
+
+ ret = ath11k_wmi_vdev_create(ar, tmp_addr, &param);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to request monitor vdev %i creation: %d\n",
+ ar->monitor_vdev_id, ret);
+ ar->monitor_vdev_id = -1;
+ return ret;
+ }
+
+ nss = get_num_chains(ar->cfg_tx_chainmask) ? : 1;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, ar->monitor_vdev_id,
+ WMI_VDEV_PARAM_NSS, nss);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set vdev %d chainmask 0x%x, nss %d :%d\n",
+ ar->monitor_vdev_id, ar->cfg_tx_chainmask, nss, ret);
+ goto err_vdev_del;
+ }
+
+ ret = ath11k_mac_txpower_recalc(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to recalc txpower for monitor vdev %d: %d\n",
+ ar->monitor_vdev_id, ret);
+ goto err_vdev_del;
+ }
+
+ ar->allocated_vdev_map |= 1LL << ar->monitor_vdev_id;
+ ar->ab->free_vdev_map &= ~(1LL << ar->monitor_vdev_id);
+ ar->num_created_vdevs++;
+ set_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac monitor vdev %d created\n",
+ ar->monitor_vdev_id);
+
+ return 0;
+
+err_vdev_del:
+ ath11k_wmi_vdev_delete(ar, ar->monitor_vdev_id);
+ ar->monitor_vdev_id = -1;
+ return ret;
+}
+
+static int ath11k_mac_monitor_vdev_delete(struct ath11k *ar)
+{
+ int ret;
+ unsigned long time_left;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (!test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags))
+ return 0;
+
+ reinit_completion(&ar->vdev_delete_done);
+
+ ret = ath11k_wmi_vdev_delete(ar, ar->monitor_vdev_id);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to request wmi monitor vdev %i removal: %d\n",
+ ar->monitor_vdev_id, ret);
+ return ret;
+ }
+
+ time_left = wait_for_completion_timeout(&ar->vdev_delete_done,
+ ATH11K_VDEV_DELETE_TIMEOUT_HZ);
+ if (time_left == 0) {
+ ath11k_warn(ar->ab, "Timeout in receiving vdev delete response\n");
+ } else {
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac monitor vdev %d deleted\n",
+ ar->monitor_vdev_id);
+
+ ar->allocated_vdev_map &= ~(1LL << ar->monitor_vdev_id);
+ ar->ab->free_vdev_map |= 1LL << (ar->monitor_vdev_id);
+ ar->num_created_vdevs--;
+ ar->monitor_vdev_id = -1;
+ clear_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags);
+ }
+
+ return ret;
+}
+
+static int ath11k_mac_monitor_start(struct ath11k *ar)
+{
+ struct cfg80211_chan_def *chandef = NULL;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags))
+ return 0;
+
+ ieee80211_iter_chan_contexts_atomic(ar->hw,
+ ath11k_mac_get_any_chandef_iter,
+ &chandef);
+ if (!chandef)
+ return 0;
+
+ ret = ath11k_mac_monitor_vdev_start(ar, ar->monitor_vdev_id, chandef);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to start monitor vdev: %d\n", ret);
+ ath11k_mac_monitor_vdev_delete(ar);
+ return ret;
+ }
+
+ set_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags);
+
+ ar->num_started_vdevs++;
+ ret = ath11k_dp_tx_htt_monitor_mode_ring_config(ar, false);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to configure htt monitor mode ring during start: %d",
+ ret);
+ return ret;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac monitor started\n");
return 0;
}
+static int ath11k_mac_monitor_stop(struct ath11k *ar)
+{
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ if (!test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags))
+ return 0;
+
+ ret = ath11k_mac_monitor_vdev_stop(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to stop monitor vdev: %d\n", ret);
+ return ret;
+ }
+
+ clear_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags);
+ ar->num_started_vdevs--;
+
+ ret = ath11k_dp_tx_htt_monitor_mode_ring_config(ar, true);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to configure htt monitor mode ring during stop: %d",
+ ret);
+ return ret;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac monitor stopped ret %d\n", ret);
+
+ return 0;
+}
+
+static int ath11k_mac_op_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct ath11k *ar = hw->priv;
+ struct ieee80211_conf *conf = &hw->conf;
+ int ret = 0;
+
+ mutex_lock(&ar->conf_mutex);
+
+ if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
+ if (conf->flags & IEEE80211_CONF_MONITOR) {
+ set_bit(ATH11K_FLAG_MONITOR_CONF_ENABLED, &ar->monitor_flags);
+
+ if (test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED,
+ &ar->monitor_flags))
+ goto out;
+
+ ret = ath11k_mac_monitor_vdev_create(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to create monitor vdev: %d",
+ ret);
+ goto out;
+ }
+
+ ret = ath11k_mac_monitor_start(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to start monitor: %d",
+ ret);
+ goto err_mon_del;
+ }
+ } else {
+ clear_bit(ATH11K_FLAG_MONITOR_CONF_ENABLED, &ar->monitor_flags);
+
+ if (!test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED,
+ &ar->monitor_flags))
+ goto out;
+
+ ret = ath11k_mac_monitor_stop(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to stop monitor: %d",
+ ret);
+ goto out;
+ }
+
+ ret = ath11k_mac_monitor_vdev_delete(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to delete monitor vdev: %d",
+ ret);
+ goto out;
+ }
+ }
+ }
+
+out:
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+
+err_mon_del:
+ ath11k_mac_monitor_vdev_delete(ar);
+ mutex_unlock(&ar->conf_mutex);
+ return ret;
+}
+
static int ath11k_mac_setup_bcn_tmpl(struct ath11k_vif *arvif)
{
struct ath11k *ar = arvif->ar;
@@ -1035,7 +1405,7 @@ ath11k_peer_assoc_h_ht_masked(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
}
static bool
-ath11k_peer_assoc_h_vht_masked(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])
+ath11k_peer_assoc_h_vht_masked(const u16 vht_mcs_mask[])
{
int nss;
@@ -1093,6 +1463,14 @@ static void ath11k_peer_assoc_h_ht(struct ath11k *ar,
arg->peer_rate_caps |= WMI_HOST_RC_CW40_FLAG;
}
+ /* As firmware handles this two flags (IEEE80211_HT_CAP_SGI_20
+ * and IEEE80211_HT_CAP_SGI_40) for enabling SGI, we reset
+ * both flags if guard interval is Default GI
+ */
+ if (arvif->bitrate_mask.control[band].gi == NL80211_TXRATE_DEFAULT_GI)
+ arg->peer_ht_caps &= ~(IEEE80211_HT_CAP_SGI_20 |
+ IEEE80211_HT_CAP_SGI_40);
+
if (arvif->bitrate_mask.control[band].gi != NL80211_TXRATE_FORCE_LGI) {
if (ht_cap->cap & (IEEE80211_HT_CAP_SGI_20 |
IEEE80211_HT_CAP_SGI_40))
@@ -1207,6 +1585,34 @@ ath11k_peer_assoc_h_vht_limit(u16 tx_mcs_set,
return tx_mcs_set;
}
+static u8 ath11k_get_nss_160mhz(struct ath11k *ar,
+ u8 max_nss)
+{
+ u8 nss_ratio_info = ar->pdev->cap.nss_ratio_info;
+ u8 max_sup_nss = 0;
+
+ switch (nss_ratio_info) {
+ case WMI_NSS_RATIO_1BY2_NSS:
+ max_sup_nss = max_nss >> 1;
+ break;
+ case WMI_NSS_RATIO_3BY4_NSS:
+ ath11k_warn(ar->ab, "WMI_NSS_RATIO_3BY4_NSS not supported\n");
+ break;
+ case WMI_NSS_RATIO_1_NSS:
+ max_sup_nss = max_nss;
+ break;
+ case WMI_NSS_RATIO_2_NSS:
+ ath11k_warn(ar->ab, "WMI_NSS_RATIO_2_NSS not supported\n");
+ break;
+ default:
+ ath11k_warn(ar->ab, "invalid nss ratio received from firmware: %d\n",
+ nss_ratio_info);
+ break;
+ }
+
+ return max_sup_nss;
+}
+
static void ath11k_peer_assoc_h_vht(struct ath11k *ar,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@@ -1216,10 +1622,12 @@ static void ath11k_peer_assoc_h_vht(struct ath11k *ar,
struct ath11k_vif *arvif = (void *)vif->drv_priv;
struct cfg80211_chan_def def;
enum nl80211_band band;
- const u16 *vht_mcs_mask;
+ u16 *vht_mcs_mask;
u8 ampdu_factor;
u8 max_nss, vht_mcs;
- int i;
+ int i, vht_nss, nss_idx;
+ bool user_rate_valid = true;
+ u32 rx_nss, tx_nss, nss_160;
if (WARN_ON(ath11k_mac_vif_chan(vif, &def)))
return;
@@ -1262,6 +1670,24 @@ static void ath11k_peer_assoc_h_vht(struct ath11k *ar,
if (sta->bandwidth == IEEE80211_STA_RX_BW_160)
arg->bw_160 = true;
+ vht_nss = ath11k_mac_max_vht_nss(vht_mcs_mask);
+
+ if (vht_nss > sta->rx_nss) {
+ user_rate_valid = false;
+ for (nss_idx = sta->rx_nss - 1; nss_idx >= 0; nss_idx--) {
+ if (vht_mcs_mask[nss_idx]) {
+ user_rate_valid = true;
+ break;
+ }
+ }
+ }
+
+ if (!user_rate_valid) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac setting vht range mcs value to peer supported nss %d for peer %pM\n",
+ sta->rx_nss, sta->addr);
+ vht_mcs_mask[sta->rx_nss - 1] = vht_mcs_mask[vht_nss - 1];
+ }
+
/* Calculate peer NSS capability from VHT capabilities if STA
* supports VHT.
*/
@@ -1294,10 +1720,95 @@ static void ath11k_peer_assoc_h_vht(struct ath11k *ar,
/* TODO: Check */
arg->tx_max_mcs_nss = 0xFF;
- ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n",
- sta->addr, arg->peer_max_mpdu, arg->peer_flags);
+ if (arg->peer_phymode == MODE_11AC_VHT160 ||
+ arg->peer_phymode == MODE_11AC_VHT80_80) {
+ tx_nss = ath11k_get_nss_160mhz(ar, max_nss);
+ rx_nss = min(arg->peer_nss, tx_nss);
+ arg->peer_bw_rxnss_override = ATH11K_BW_NSS_MAP_ENABLE;
+
+ if (!rx_nss) {
+ ath11k_warn(ar->ab, "invalid max_nss\n");
+ return;
+ }
+
+ if (arg->peer_phymode == MODE_11AC_VHT160)
+ nss_160 = FIELD_PREP(ATH11K_PEER_RX_NSS_160MHZ, rx_nss - 1);
+ else
+ nss_160 = FIELD_PREP(ATH11K_PEER_RX_NSS_80_80MHZ, rx_nss - 1);
+
+ arg->peer_bw_rxnss_override |= nss_160;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "mac vht peer %pM max_mpdu %d flags 0x%x nss_override 0x%x\n",
+ sta->addr, arg->peer_max_mpdu, arg->peer_flags,
+ arg->peer_bw_rxnss_override);
+}
+
+static int ath11k_mac_get_max_he_mcs_map(u16 mcs_map, int nss)
+{
+ switch ((mcs_map >> (2 * nss)) & 0x3) {
+ case IEEE80211_HE_MCS_SUPPORT_0_7: return BIT(8) - 1;
+ case IEEE80211_HE_MCS_SUPPORT_0_9: return BIT(10) - 1;
+ case IEEE80211_HE_MCS_SUPPORT_0_11: return BIT(12) - 1;
+ }
+ return 0;
+}
+
+static u16 ath11k_peer_assoc_h_he_limit(u16 tx_mcs_set,
+ const u16 he_mcs_limit[NL80211_HE_NSS_MAX])
+{
+ int idx_limit;
+ int nss;
+ u16 mcs_map;
+ u16 mcs;
+
+ for (nss = 0; nss < NL80211_HE_NSS_MAX; nss++) {
+ mcs_map = ath11k_mac_get_max_he_mcs_map(tx_mcs_set, nss) &
+ he_mcs_limit[nss];
+
+ if (mcs_map)
+ idx_limit = fls(mcs_map) - 1;
+ else
+ idx_limit = -1;
+
+ switch (idx_limit) {
+ case 0 ... 7:
+ mcs = IEEE80211_HE_MCS_SUPPORT_0_7;
+ break;
+ case 8:
+ case 9:
+ mcs = IEEE80211_HE_MCS_SUPPORT_0_9;
+ break;
+ case 10:
+ case 11:
+ mcs = IEEE80211_HE_MCS_SUPPORT_0_11;
+ break;
+ default:
+ WARN_ON(1);
+ fallthrough;
+ case -1:
+ mcs = IEEE80211_HE_MCS_NOT_SUPPORTED;
+ break;
+ }
- /* TODO: rxnss_override */
+ tx_mcs_set &= ~(0x3 << (nss * 2));
+ tx_mcs_set |= mcs << (nss * 2);
+ }
+
+ return tx_mcs_set;
+}
+
+static bool
+ath11k_peer_assoc_h_he_masked(const u16 he_mcs_mask[NL80211_HE_NSS_MAX])
+{
+ int nss;
+
+ for (nss = 0; nss < NL80211_HE_NSS_MAX; nss++)
+ if (he_mcs_mask[nss])
+ return false;
+
+ return true;
}
static void ath11k_peer_assoc_h_he(struct ath11k *ar,
@@ -1305,13 +1816,30 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar,
struct ieee80211_sta *sta,
struct peer_assoc_params *arg)
{
+ struct ath11k_vif *arvif = (void *)vif->drv_priv;
+ struct cfg80211_chan_def def;
const struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
u8 ampdu_factor;
- u16 v;
+ enum nl80211_band band;
+ u16 *he_mcs_mask;
+ u8 max_nss, he_mcs;
+ u16 he_tx_mcs = 0, v = 0;
+ int i, he_nss, nss_idx;
+ bool user_rate_valid = true;
+ u32 rx_nss, tx_nss, nss_160;
+
+ if (WARN_ON(ath11k_mac_vif_chan(vif, &def)))
+ return;
if (!he_cap->has_he)
return;
+ band = def.chan->band;
+ he_mcs_mask = arvif->bitrate_mask.control[band].he_mcs;
+
+ if (ath11k_peer_assoc_h_he_masked(he_mcs_mask))
+ return;
+
arg->he_flag = true;
memcpy_and_pad(&arg->peer_he_cap_macinfo,
@@ -1388,25 +1916,48 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar,
if (he_cap->he_cap_elem.mac_cap_info[0] & IEEE80211_HE_MAC_CAP0_TWT_REQ)
arg->twt_requester = true;
+ he_nss = ath11k_mac_max_he_nss(he_mcs_mask);
+
+ if (he_nss > sta->rx_nss) {
+ user_rate_valid = false;
+ for (nss_idx = sta->rx_nss - 1; nss_idx >= 0; nss_idx--) {
+ if (he_mcs_mask[nss_idx]) {
+ user_rate_valid = true;
+ break;
+ }
+ }
+ }
+
+ if (!user_rate_valid) {
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac setting he range mcs value to peer supported nss %d for peer %pM\n",
+ sta->rx_nss, sta->addr);
+ he_mcs_mask[sta->rx_nss - 1] = he_mcs_mask[he_nss - 1];
+ }
+
switch (sta->bandwidth) {
case IEEE80211_STA_RX_BW_160:
if (he_cap->he_cap_elem.phy_cap_info[0] &
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) {
v = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80p80);
+ v = ath11k_peer_assoc_h_he_limit(v, he_mcs_mask);
arg->peer_he_rx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80_80] = v;
v = le16_to_cpu(he_cap->he_mcs_nss_supp.tx_mcs_80p80);
arg->peer_he_tx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80_80] = v;
arg->peer_he_mcs_count++;
+ he_tx_mcs = v;
}
v = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160);
arg->peer_he_rx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_160] = v;
v = le16_to_cpu(he_cap->he_mcs_nss_supp.tx_mcs_160);
+ v = ath11k_peer_assoc_h_he_limit(v, he_mcs_mask);
arg->peer_he_tx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_160] = v;
arg->peer_he_mcs_count++;
+ if (!he_tx_mcs)
+ he_tx_mcs = v;
fallthrough;
default:
@@ -1414,11 +1965,102 @@ static void ath11k_peer_assoc_h_he(struct ath11k *ar,
arg->peer_he_rx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80] = v;
v = le16_to_cpu(he_cap->he_mcs_nss_supp.tx_mcs_80);
+ v = ath11k_peer_assoc_h_he_limit(v, he_mcs_mask);
arg->peer_he_tx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80] = v;
arg->peer_he_mcs_count++;
+ if (!he_tx_mcs)
+ he_tx_mcs = v;
break;
}
+
+ /* Calculate peer NSS capability from HE capabilities if STA
+ * supports HE.
+ */
+ for (i = 0, max_nss = 0, he_mcs = 0; i < NL80211_HE_NSS_MAX; i++) {
+ he_mcs = he_tx_mcs >> (2 * i) & 3;
+
+ /* In case of fixed rates, MCS Range in he_tx_mcs might have
+ * unsupported range, with he_mcs_mask set, so check either of them
+ * to find nss.
+ */
+ if (he_mcs != IEEE80211_HE_MCS_NOT_SUPPORTED ||
+ he_mcs_mask[i])
+ max_nss = i + 1;
+ }
+ arg->peer_nss = min(sta->rx_nss, max_nss);
+
+ if (arg->peer_phymode == MODE_11AX_HE160 ||
+ arg->peer_phymode == MODE_11AX_HE80_80) {
+ tx_nss = ath11k_get_nss_160mhz(ar, max_nss);
+ rx_nss = min(arg->peer_nss, tx_nss);
+ arg->peer_bw_rxnss_override = ATH11K_BW_NSS_MAP_ENABLE;
+
+ if (!rx_nss) {
+ ath11k_warn(ar->ab, "invalid max_nss\n");
+ return;
+ }
+
+ if (arg->peer_phymode == MODE_11AX_HE160)
+ nss_160 = FIELD_PREP(ATH11K_PEER_RX_NSS_160MHZ, rx_nss - 1);
+ else
+ nss_160 = FIELD_PREP(ATH11K_PEER_RX_NSS_80_80MHZ, rx_nss - 1);
+
+ arg->peer_bw_rxnss_override |= nss_160;
+ }
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "mac he peer %pM nss %d mcs cnt %d nss_override 0x%x\n",
+ sta->addr, arg->peer_nss,
+ arg->peer_he_mcs_count,
+ arg->peer_bw_rxnss_override);
+}
+
+static void ath11k_peer_assoc_h_he_6ghz(struct ath11k *ar,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct peer_assoc_params *arg)
+{
+ const struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
+ struct cfg80211_chan_def def;
+ enum nl80211_band band;
+ u8 ampdu_factor;
+
+ if (WARN_ON(ath11k_mac_vif_chan(vif, &def)))
+ return;
+
+ band = def.chan->band;
+
+ if (!arg->he_flag || band != NL80211_BAND_6GHZ || !sta->he_6ghz_capa.capa)
+ return;
+
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
+ arg->bw_80 = true;
+
+ if (sta->bandwidth == IEEE80211_STA_RX_BW_160)
+ arg->bw_160 = true;
+
+ arg->peer_he_caps_6ghz = le16_to_cpu(sta->he_6ghz_capa.capa);
+ arg->peer_mpdu_density =
+ ath11k_parse_mpdudensity(FIELD_GET(IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START,
+ arg->peer_he_caps_6ghz));
+
+ /* From IEEE Std 802.11ax-2021 - Section 10.12.2: An HE STA shall be capable of
+ * receiving A-MPDU where the A-MPDU pre-EOF padding length is up to the value
+ * indicated by the Maximum A-MPDU Length Exponent Extension field in the HE
+ * Capabilities element and the Maximum A-MPDU Length Exponent field in HE 6 GHz
+ * Band Capabilities element in the 6 GHz band.
+ *
+ * Here, we are extracting the Max A-MPDU Exponent Extension from HE caps and
+ * factor is the Maximum A-MPDU Length Exponent from HE 6 GHZ Band capability.
+ */
+ ampdu_factor = FIELD_GET(IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK,
+ he_cap->he_cap_elem.mac_cap_info[3]) +
+ FIELD_GET(IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP,
+ arg->peer_he_caps_6ghz);
+
+ arg->peer_max_mpdu = (1u << (IEEE80211_HE_6GHZ_MAX_AMPDU_FACTOR +
+ ampdu_factor)) - 1;
}
static void ath11k_peer_assoc_h_smps(struct ieee80211_sta *sta,
@@ -1427,11 +2069,16 @@ static void ath11k_peer_assoc_h_smps(struct ieee80211_sta *sta,
const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
int smps;
- if (!ht_cap->ht_supported)
+ if (!ht_cap->ht_supported && !sta->he_6ghz_capa.capa)
return;
- smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS;
- smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT;
+ if (ht_cap->ht_supported) {
+ smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS;
+ smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT;
+ } else {
+ smps = le16_get_bits(sta->he_6ghz_capa.capa,
+ IEEE80211_HE_6GHZ_CAP_SM_PS);
+ }
switch (smps) {
case WLAN_HT_CAP_SM_PS_STATIC:
@@ -1621,6 +2268,7 @@ static void ath11k_peer_assoc_h_phymode(struct ath11k *ar,
enum nl80211_band band;
const u8 *ht_mcs_mask;
const u16 *vht_mcs_mask;
+ const u16 *he_mcs_mask;
enum wmi_phy_mode phymode = MODE_UNKNOWN;
if (WARN_ON(ath11k_mac_vif_chan(vif, &def)))
@@ -1629,10 +2277,12 @@ static void ath11k_peer_assoc_h_phymode(struct ath11k *ar,
band = def.chan->band;
ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
+ he_mcs_mask = arvif->bitrate_mask.control[band].he_mcs;
switch (band) {
case NL80211_BAND_2GHZ:
- if (sta->he_cap.has_he) {
+ if (sta->he_cap.has_he &&
+ !ath11k_peer_assoc_h_he_masked(he_mcs_mask)) {
if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
phymode = MODE_11AX_HE80_2G;
else if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
@@ -1660,7 +2310,8 @@ static void ath11k_peer_assoc_h_phymode(struct ath11k *ar,
case NL80211_BAND_5GHZ:
case NL80211_BAND_6GHZ:
/* Check HE first */
- if (sta->he_cap.has_he) {
+ if (sta->he_cap.has_he &&
+ !ath11k_peer_assoc_h_he_masked(he_mcs_mask)) {
phymode = ath11k_mac_get_phymode_he(ar, sta);
} else if (sta->vht_cap.vht_supported &&
!ath11k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
@@ -1702,11 +2353,12 @@ static void ath11k_peer_assoc_prepare(struct ath11k *ar,
ath11k_peer_assoc_h_basic(ar, vif, sta, arg);
ath11k_peer_assoc_h_crypto(ar, vif, sta, arg);
ath11k_peer_assoc_h_rates(ar, vif, sta, arg);
+ ath11k_peer_assoc_h_phymode(ar, vif, sta, arg);
ath11k_peer_assoc_h_ht(ar, vif, sta, arg);
ath11k_peer_assoc_h_vht(ar, vif, sta, arg);
ath11k_peer_assoc_h_he(ar, vif, sta, arg);
+ ath11k_peer_assoc_h_he_6ghz(ar, vif, sta, arg);
ath11k_peer_assoc_h_qos(ar, vif, sta, arg);
- ath11k_peer_assoc_h_phymode(ar, vif, sta, arg);
ath11k_peer_assoc_h_smps(sta, arg);
/* TODO: amsdu_disable req? */
@@ -1714,15 +2366,20 @@ static void ath11k_peer_assoc_prepare(struct ath11k *ar,
static int ath11k_setup_peer_smps(struct ath11k *ar, struct ath11k_vif *arvif,
const u8 *addr,
- const struct ieee80211_sta_ht_cap *ht_cap)
+ const struct ieee80211_sta_ht_cap *ht_cap,
+ u16 he_6ghz_capa)
{
int smps;
- if (!ht_cap->ht_supported)
+ if (!ht_cap->ht_supported && !he_6ghz_capa)
return 0;
- smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS;
- smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT;
+ if (ht_cap->ht_supported) {
+ smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS;
+ smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT;
+ } else {
+ smps = FIELD_GET(IEEE80211_HE_6GHZ_CAP_SM_PS, he_6ghz_capa);
+ }
if (smps >= ARRAY_SIZE(ath11k_smps_map))
return -EINVAL;
@@ -1775,7 +2432,8 @@ static void ath11k_bss_assoc(struct ieee80211_hw *hw,
}
ret = ath11k_setup_peer_smps(ar, arvif, bss_conf->bssid,
- &ap_sta->ht_cap);
+ &ap_sta->ht_cap,
+ le16_to_cpu(ap_sta->he_6ghz_capa.capa));
if (ret) {
ath11k_warn(ar->ab, "failed to setup peer SMPS for vdev %d: %d\n",
arvif->vdev_id, ret);
@@ -1956,7 +2614,7 @@ static int ath11k_mac_config_obss_pd(struct ath11k *ar,
/* Set and enable SRG/non-SRG OBSS PD Threshold */
param_id = WMI_PDEV_PARAM_SET_CMD_OBSS_PD_THRESHOLD;
- if (test_bit(ATH11K_FLAG_MONITOR_ENABLED, &ar->monitor_flags)) {
+ if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags)) {
ret = ath11k_wmi_pdev_set_param(ar, param_id, 0, pdev_id);
if (ret)
ath11k_warn(ar->ab,
@@ -2383,18 +3041,21 @@ void __ath11k_mac_scan_finish(struct ath11k *ar)
break;
case ATH11K_SCAN_RUNNING:
case ATH11K_SCAN_ABORTING:
+ if (ar->scan.is_roc && ar->scan.roc_notify)
+ ieee80211_remain_on_channel_expired(ar->hw);
+ fallthrough;
+ case ATH11K_SCAN_STARTING:
if (!ar->scan.is_roc) {
struct cfg80211_scan_info info = {
- .aborted = (ar->scan.state ==
- ATH11K_SCAN_ABORTING),
+ .aborted = ((ar->scan.state ==
+ ATH11K_SCAN_ABORTING) ||
+ (ar->scan.state ==
+ ATH11K_SCAN_STARTING)),
};
ieee80211_scan_completed(ar->hw, &info);
- } else if (ar->scan.roc_notify) {
- ieee80211_remain_on_channel_expired(ar->hw);
}
- fallthrough;
- case ATH11K_SCAN_STARTING:
+
ar->scan.state = ATH11K_SCAN_IDLE;
ar->scan_channel = NULL;
ar->scan.roc_freq = 0;
@@ -2887,6 +3548,20 @@ ath11k_mac_bitrate_mask_num_vht_rates(struct ath11k *ar,
}
static int
+ath11k_mac_bitrate_mask_num_he_rates(struct ath11k *ar,
+ enum nl80211_band band,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ int num_rates = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].he_mcs); i++)
+ num_rates += hweight16(mask->control[band].he_mcs[i]);
+
+ return num_rates;
+}
+
+static int
ath11k_mac_set_peer_vht_fixed_rate(struct ath11k_vif *arvif,
struct ieee80211_sta *sta,
const struct cfg80211_bitrate_mask *mask,
@@ -2914,6 +3589,10 @@ ath11k_mac_set_peer_vht_fixed_rate(struct ath11k_vif *arvif,
return -EINVAL;
}
+ /* Avoid updating invalid nss as fixed rate*/
+ if (nss > sta->rx_nss)
+ return -EINVAL;
+
ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
"Setting Fixed VHT Rate for peer %pM. Device will not switch to any other selected rates",
sta->addr);
@@ -2932,6 +3611,57 @@ ath11k_mac_set_peer_vht_fixed_rate(struct ath11k_vif *arvif,
return ret;
}
+static int
+ath11k_mac_set_peer_he_fixed_rate(struct ath11k_vif *arvif,
+ struct ieee80211_sta *sta,
+ const struct cfg80211_bitrate_mask *mask,
+ enum nl80211_band band)
+{
+ struct ath11k *ar = arvif->ar;
+ u8 he_rate, nss;
+ u32 rate_code;
+ int ret, i;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ nss = 0;
+
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].he_mcs); i++) {
+ if (hweight16(mask->control[band].he_mcs[i]) == 1) {
+ nss = i + 1;
+ he_rate = ffs(mask->control[band].he_mcs[i]) - 1;
+ }
+ }
+
+ if (!nss) {
+ ath11k_warn(ar->ab, "No single he fixed rate found to set for %pM",
+ sta->addr);
+ return -EINVAL;
+ }
+
+ /* Avoid updating invalid nss as fixed rate */
+ if (nss > sta->rx_nss)
+ return -EINVAL;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "mac setting fixed he rate for peer %pM, device will not switch to any other selected rates",
+ sta->addr);
+
+ rate_code = ATH11K_HW_RATE_CODE(he_rate, nss - 1,
+ WMI_RATE_PREAMBLE_HE);
+
+ ret = ath11k_wmi_set_peer_param(ar, sta->addr,
+ arvif->vdev_id,
+ WMI_PEER_PARAM_FIXED_RATE,
+ rate_code);
+ if (ret)
+ ath11k_warn(ar->ab,
+ "failed to update sta %pM fixed rate %d: %d\n",
+ sta->addr, rate_code, ret);
+
+ return ret;
+}
+
static int ath11k_station_assoc(struct ath11k *ar,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@@ -2943,7 +3673,7 @@ static int ath11k_station_assoc(struct ath11k *ar,
struct cfg80211_chan_def def;
enum nl80211_band band;
struct cfg80211_bitrate_mask *mask;
- u8 num_vht_rates;
+ u8 num_vht_rates, num_he_rates;
lockdep_assert_held(&ar->conf_mutex);
@@ -2969,9 +3699,10 @@ static int ath11k_station_assoc(struct ath11k *ar,
}
num_vht_rates = ath11k_mac_bitrate_mask_num_vht_rates(ar, band, mask);
+ num_he_rates = ath11k_mac_bitrate_mask_num_he_rates(ar, band, mask);
- /* If single VHT rate is configured (by set_bitrate_mask()),
- * peer_assoc will disable VHT. This is now enabled by a peer specific
+ /* If single VHT/HE rate is configured (by set_bitrate_mask()),
+ * peer_assoc will disable VHT/HE. This is now enabled by a peer specific
* fixed param.
* Note that all other rates and NSS will be disabled for this peer.
*/
@@ -2980,6 +3711,11 @@ static int ath11k_station_assoc(struct ath11k *ar,
band);
if (ret)
return ret;
+ } else if (sta->he_cap.has_he && num_he_rates == 1) {
+ ret = ath11k_mac_set_peer_he_fixed_rate(arvif, sta, mask,
+ band);
+ if (ret)
+ return ret;
}
/* Re-assoc is run only to update supported rates for given station. It
@@ -2989,7 +3725,7 @@ static int ath11k_station_assoc(struct ath11k *ar,
return 0;
ret = ath11k_setup_peer_smps(ar, arvif, sta->addr,
- &sta->ht_cap);
+ &sta->ht_cap, le16_to_cpu(sta->he_6ghz_capa.capa));
if (ret) {
ath11k_warn(ar->ab, "failed to setup peer SMPS for vdev %d: %d\n",
arvif->vdev_id, ret);
@@ -3050,8 +3786,9 @@ static void ath11k_sta_rc_update_wk(struct work_struct *wk)
enum nl80211_band band;
const u8 *ht_mcs_mask;
const u16 *vht_mcs_mask;
+ const u16 *he_mcs_mask;
u32 changed, bw, nss, smps;
- int err, num_vht_rates;
+ int err, num_vht_rates, num_he_rates;
const struct cfg80211_bitrate_mask *mask;
struct peer_assoc_params peer_arg;
@@ -3066,6 +3803,7 @@ static void ath11k_sta_rc_update_wk(struct work_struct *wk)
band = def.chan->band;
ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
+ he_mcs_mask = arvif->bitrate_mask.control[band].he_mcs;
spin_lock_bh(&ar->data_lock);
@@ -3081,8 +3819,9 @@ static void ath11k_sta_rc_update_wk(struct work_struct *wk)
mutex_lock(&ar->conf_mutex);
nss = max_t(u32, 1, nss);
- nss = min(nss, max(ath11k_mac_max_ht_nss(ht_mcs_mask),
- ath11k_mac_max_vht_nss(vht_mcs_mask)));
+ nss = min(nss, max(max(ath11k_mac_max_ht_nss(ht_mcs_mask),
+ ath11k_mac_max_vht_nss(vht_mcs_mask)),
+ ath11k_mac_max_he_nss(he_mcs_mask)));
if (changed & IEEE80211_RC_BW_CHANGED) {
err = ath11k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
@@ -3118,6 +3857,8 @@ static void ath11k_sta_rc_update_wk(struct work_struct *wk)
mask = &arvif->bitrate_mask;
num_vht_rates = ath11k_mac_bitrate_mask_num_vht_rates(ar, band,
mask);
+ num_he_rates = ath11k_mac_bitrate_mask_num_he_rates(ar, band,
+ mask);
/* Peer_assoc_prepare will reject vht rates in
* bitrate_mask if its not available in range format and
@@ -3133,11 +3874,25 @@ static void ath11k_sta_rc_update_wk(struct work_struct *wk)
if (sta->vht_cap.vht_supported && num_vht_rates == 1) {
ath11k_mac_set_peer_vht_fixed_rate(arvif, sta, mask,
band);
+ } else if (sta->he_cap.has_he && num_he_rates == 1) {
+ ath11k_mac_set_peer_he_fixed_rate(arvif, sta, mask,
+ band);
} else {
- /* If the peer is non-VHT or no fixed VHT rate
+ /* If the peer is non-VHT/HE or no fixed VHT/HE rate
* is provided in the new bitrate mask we set the
- * other rates using peer_assoc command.
+ * other rates using peer_assoc command. Also clear
+ * the peer fixed rate settings as it has higher proprity
+ * than peer assoc
*/
+ err = ath11k_wmi_set_peer_param(ar, sta->addr,
+ arvif->vdev_id,
+ WMI_PEER_PARAM_FIXED_RATE,
+ WMI_FIXED_RATE_NONE);
+ if (err)
+ ath11k_warn(ar->ab,
+ "failed to disable peer fixed rate for sta %pM: %d\n",
+ sta->addr, err);
+
ath11k_peer_assoc_prepare(ar, arvif->vif, sta,
&peer_arg, true);
@@ -3155,6 +3910,31 @@ static void ath11k_sta_rc_update_wk(struct work_struct *wk)
mutex_unlock(&ar->conf_mutex);
}
+static void ath11k_sta_set_4addr_wk(struct work_struct *wk)
+{
+ struct ath11k *ar;
+ struct ath11k_vif *arvif;
+ struct ath11k_sta *arsta;
+ struct ieee80211_sta *sta;
+ int ret = 0;
+
+ arsta = container_of(wk, struct ath11k_sta, set_4addr_wk);
+ sta = container_of((void *)arsta, struct ieee80211_sta, drv_priv);
+ arvif = arsta->arvif;
+ ar = arvif->ar;
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "setting USE_4ADDR for peer %pM\n", sta->addr);
+
+ ret = ath11k_wmi_set_peer_param(ar, sta->addr,
+ arvif->vdev_id,
+ WMI_PEER_USE_4ADDR, 1);
+
+ if (ret)
+ ath11k_warn(ar->ab, "failed to set peer %pM 4addr capability: %d\n",
+ sta->addr, ret);
+}
+
static int ath11k_mac_inc_num_stations(struct ath11k_vif *arvif,
struct ieee80211_sta *sta)
{
@@ -3234,11 +4014,13 @@ static int ath11k_mac_station_add(struct ath11k *ar,
}
if (ieee80211_vif_is_mesh(vif)) {
+ ath11k_dbg(ab, ATH11K_DBG_MAC,
+ "setting USE_4ADDR for mesh STA %pM\n", sta->addr);
ret = ath11k_wmi_set_peer_param(ar, sta->addr,
arvif->vdev_id,
WMI_PEER_USE_4ADDR, 1);
if (ret) {
- ath11k_warn(ab, "failed to STA %pM 4addr capability: %d\n",
+ ath11k_warn(ab, "failed to set mesh STA %pM 4addr capability: %d\n",
sta->addr, ret);
goto free_tx_stats;
}
@@ -3291,8 +4073,10 @@ static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw,
/* cancel must be done outside the mutex to avoid deadlock */
if ((old_state == IEEE80211_STA_NONE &&
- new_state == IEEE80211_STA_NOTEXIST))
+ new_state == IEEE80211_STA_NOTEXIST)) {
cancel_work_sync(&arsta->update_wk);
+ cancel_work_sync(&arsta->set_4addr_wk);
+ }
mutex_lock(&ar->conf_mutex);
@@ -3301,6 +4085,7 @@ static int ath11k_mac_op_sta_state(struct ieee80211_hw *hw,
memset(arsta, 0, sizeof(*arsta));
arsta->arvif = arvif;
INIT_WORK(&arsta->update_wk, ath11k_sta_rc_update_wk);
+ INIT_WORK(&arsta->set_4addr_wk, ath11k_sta_set_4addr_wk);
ret = ath11k_mac_station_add(ar, vif, sta);
if (ret)
@@ -3395,6 +4180,19 @@ out:
return ret;
}
+static void ath11k_mac_op_sta_set_4addr(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool enabled)
+{
+ struct ath11k *ar = hw->priv;
+ struct ath11k_sta *arsta = (struct ath11k_sta *)sta->drv_priv;
+
+ if (enabled && !arsta->use_4addr_set) {
+ ieee80211_queue_work(ar->hw, &arsta->set_4addr_wk);
+ arsta->use_4addr_set = true;
+ }
+}
+
static void ath11k_mac_op_sta_rc_update(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
@@ -3765,11 +4563,6 @@ ath11k_create_vht_cap(struct ath11k *ar, u32 rate_cap_tx_chainmask,
ath11k_set_vht_txbf_cap(ar, &vht_cap.cap);
- /* TODO: Enable back VHT160 mode once association issues are fixed */
- /* Disabling VHT160 and VHT80+80 modes */
- vht_cap.cap &= ~IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
- vht_cap.cap &= ~IEEE80211_VHT_CAP_SHORT_GI_160;
-
rxmcs_map = 0;
txmcs_map = 0;
for (i = 0; i < 8; i++) {
@@ -3814,7 +4607,9 @@ static void ath11k_mac_setup_ht_vht_cap(struct ath11k *ar,
rate_cap_rx_chainmask);
}
- if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP && !ar->supports_6ghz) {
+ if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP &&
+ (ar->ab->hw_params.single_pdev_only ||
+ !ar->supports_6ghz)) {
band = &ar->mac.sbands[NL80211_BAND_5GHZ];
ht_cap = cap->band[NL80211_BAND_5GHZ].ht_cap_info;
if (ht_cap_info)
@@ -4313,6 +5108,7 @@ static void ath11k_mac_op_tx(struct ieee80211_hw *hw,
struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_key_conf *key = info->control.hw_key;
+ struct ath11k_sta *arsta = NULL;
u32 info_flags = info->flags;
bool is_prb_rsp;
int ret;
@@ -4338,7 +5134,10 @@ static void ath11k_mac_op_tx(struct ieee80211_hw *hw,
return;
}
- ret = ath11k_dp_tx(ar, arvif, skb);
+ if (control->sta)
+ arsta = (struct ath11k_sta *)control->sta->drv_priv;
+
+ ret = ath11k_dp_tx(ar, arvif, arsta, skb);
if (ret) {
ath11k_warn(ar->ab, "failed to transmit frame %d\n", ret);
ieee80211_free_txskb(ar->hw, skb);
@@ -4639,7 +5438,8 @@ static void ath11k_mac_op_update_vif_offload(struct ieee80211_hw *hw,
if (ath11k_frame_mode != ATH11K_HW_TXRX_ETHERNET ||
(vif->type != NL80211_IFTYPE_STATION &&
vif->type != NL80211_IFTYPE_AP))
- vif->offload_flags &= ~IEEE80211_OFFLOAD_ENCAP_ENABLED;
+ vif->offload_flags &= ~(IEEE80211_OFFLOAD_ENCAP_ENABLED |
+ IEEE80211_OFFLOAD_DECAP_ENABLED);
if (vif->offload_flags & IEEE80211_OFFLOAD_ENCAP_ENABLED)
param_value = ATH11K_HW_TXRX_ETHERNET;
@@ -4655,6 +5455,22 @@ static void ath11k_mac_op_update_vif_offload(struct ieee80211_hw *hw,
arvif->vdev_id, ret);
vif->offload_flags &= ~IEEE80211_OFFLOAD_ENCAP_ENABLED;
}
+
+ param_id = WMI_VDEV_PARAM_RX_DECAP_TYPE;
+ if (vif->offload_flags & IEEE80211_OFFLOAD_DECAP_ENABLED)
+ param_value = ATH11K_HW_TXRX_ETHERNET;
+ else if (test_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags))
+ param_value = ATH11K_HW_TXRX_RAW;
+ else
+ param_value = ATH11K_HW_TXRX_NATIVE_WIFI;
+
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ param_id, param_value);
+ if (ret) {
+ ath11k_warn(ab, "failed to set vdev %d rx decap mode: %d\n",
+ arvif->vdev_id, ret);
+ vif->offload_flags &= ~IEEE80211_OFFLOAD_DECAP_ENABLED;
+ }
}
static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
@@ -4683,8 +5499,8 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
}
if (ar->num_created_vdevs > (TARGET_NUM_VDEVS - 1)) {
- ath11k_warn(ab, "failed to create vdev, reached max vdev limit %d\n",
- TARGET_NUM_VDEVS);
+ ath11k_warn(ab, "failed to create vdev %u, reached max vdev limit %d\n",
+ ar->num_created_vdevs, TARGET_NUM_VDEVS);
ret = -EBUSY;
goto err;
}
@@ -4700,10 +5516,13 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) {
arvif->bitrate_mask.control[i].legacy = 0xffffffff;
+ arvif->bitrate_mask.control[i].gi = NL80211_TXRATE_FORCE_SGI;
memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff,
sizeof(arvif->bitrate_mask.control[i].ht_mcs));
memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff,
sizeof(arvif->bitrate_mask.control[i].vht_mcs));
+ memset(arvif->bitrate_mask.control[i].he_mcs, 0xff,
+ sizeof(arvif->bitrate_mask.control[i].he_mcs));
}
bit = __ffs64(ab->free_vdev_map);
@@ -4724,6 +5543,7 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
break;
case NL80211_IFTYPE_MONITOR:
arvif->vdev_type = WMI_VDEV_TYPE_MONITOR;
+ ar->monitor_vdev_id = bit;
break;
default:
WARN_ON(1);
@@ -4825,6 +5645,9 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
goto err_peer_del;
}
break;
+ case WMI_VDEV_TYPE_MONITOR:
+ set_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags);
+ break;
default:
break;
}
@@ -4845,6 +5668,16 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
ath11k_dp_vdev_tx_attach(ar, arvif);
+ if (vif->type != NL80211_IFTYPE_MONITOR &&
+ test_bit(ATH11K_FLAG_MONITOR_CONF_ENABLED, &ar->monitor_flags)) {
+ ret = ath11k_mac_monitor_vdev_create(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to create monitor vdev during add interface: %d",
+ ret);
+ goto err_peer_del;
+ }
+ }
+
mutex_unlock(&ar->conf_mutex);
return 0;
@@ -4942,6 +5775,18 @@ static void ath11k_mac_op_remove_interface(struct ieee80211_hw *hw,
ath11k_dbg(ab, ATH11K_DBG_MAC, "vdev %pM deleted, vdev_id %d\n",
vif->addr, arvif->vdev_id);
+ if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
+ clear_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags);
+ ar->monitor_vdev_id = -1;
+ } else if (test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags) &&
+ !test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags)) {
+ ret = ath11k_mac_monitor_vdev_delete(ar);
+ if (ret)
+ /* continue even if there's an error */
+ ath11k_warn(ar->ab, "failed to delete vdev monitor during remove interface: %d",
+ ret);
+ }
+
err_vdev_del:
spin_lock_bh(&ar->data_lock);
list_del(&arvif->list);
@@ -4952,7 +5797,7 @@ err_vdev_del:
idr_for_each(&ar->txmgmt_idr,
ath11k_mac_vif_txmgmt_idr_remove, vif);
- for (i = 0; i < DP_TCL_NUM_RING_MAX; i++) {
+ for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
spin_lock_bh(&ab->dp.tx_ring[i].tx_idr_lock);
idr_for_each(&ab->dp.tx_ring[i].txbuf_idr,
ath11k_mac_vif_unref, vif);
@@ -4961,7 +5806,6 @@ err_vdev_del:
/* Recalc txpower for remaining vdev */
ath11k_mac_txpower_recalc(ar);
- clear_bit(ATH11K_FLAG_MONITOR_ENABLED, &ar->monitor_flags);
/* TODO: recal traffic pause state based on the available vdevs */
@@ -4984,8 +5828,6 @@ static void ath11k_mac_op_configure_filter(struct ieee80211_hw *hw,
u64 multicast)
{
struct ath11k *ar = hw->priv;
- bool reset_flag = false;
- int ret = 0;
mutex_lock(&ar->conf_mutex);
@@ -4993,23 +5835,6 @@ static void ath11k_mac_op_configure_filter(struct ieee80211_hw *hw,
*total_flags &= SUPPORTED_FILTERS;
ar->filter_flags = *total_flags;
- /* For monitor mode */
- reset_flag = !(ar->filter_flags & FIF_BCN_PRBRESP_PROMISC);
-
- ret = ath11k_dp_tx_htt_monitor_mode_ring_config(ar, reset_flag);
- if (!ret) {
- if (!reset_flag)
- set_bit(ATH11K_FLAG_MONITOR_ENABLED, &ar->monitor_flags);
- else
- clear_bit(ATH11K_FLAG_MONITOR_ENABLED, &ar->monitor_flags);
- } else {
- ath11k_warn(ar->ab,
- "fail to set monitor filter: %d\n", ret);
- }
- ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
- "changed_flags:0x%x, total_flags:0x%x, reset_flag:%d\n",
- changed_flags, *total_flags, reset_flag);
-
mutex_unlock(&ar->conf_mutex);
}
@@ -5118,20 +5943,6 @@ static void ath11k_mac_op_remove_chanctx(struct ieee80211_hw *hw,
mutex_unlock(&ar->conf_mutex);
}
-static inline int ath11k_mac_vdev_setup_sync(struct ath11k *ar)
-{
- lockdep_assert_held(&ar->conf_mutex);
-
- if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags))
- return -ESHUTDOWN;
-
- if (!wait_for_completion_timeout(&ar->vdev_setup_done,
- ATH11K_VDEV_SETUP_TIMEOUT_HZ))
- return -ETIMEDOUT;
-
- return ar->last_wmi_vdev_start_status ? -EINVAL : 0;
-}
-
static int
ath11k_mac_vdev_start_restart(struct ath11k_vif *arvif,
const struct cfg80211_chan_def *chandef,
@@ -5214,7 +6025,9 @@ ath11k_mac_vdev_start_restart(struct ath11k_vif *arvif,
return ret;
}
- ar->num_started_vdevs++;
+ if (!restart)
+ ar->num_started_vdevs++;
+
ath11k_dbg(ab, ATH11K_DBG_MAC, "vdev %pM started, vdev_id %d\n",
arvif->vif->addr, arvif->vdev_id);
@@ -5342,12 +6155,16 @@ ath11k_mac_update_vif_chan(struct ath11k *ar,
struct ath11k_vif *arvif;
int ret;
int i;
+ bool monitor_vif = false;
lockdep_assert_held(&ar->conf_mutex);
for (i = 0; i < n_vifs; i++) {
arvif = (void *)vifs[i].vif->drv_priv;
+ if (vifs[i].vif->type == NL80211_IFTYPE_MONITOR)
+ monitor_vif = true;
+
ath11k_dbg(ab, ATH11K_DBG_MAC,
"mac chanctx switch vdev_id %i freq %u->%u width %d->%d\n",
arvif->vdev_id,
@@ -5368,6 +6185,8 @@ ath11k_mac_update_vif_chan(struct ath11k *ar,
arvif->vdev_id, ret);
continue;
}
+
+ ar->num_started_vdevs--;
}
/* All relevant vdevs are downed and associated channel resources
@@ -5405,6 +6224,24 @@ ath11k_mac_update_vif_chan(struct ath11k *ar,
continue;
}
}
+
+ /* Restart the internal monitor vdev on new channel */
+ if (!monitor_vif &&
+ test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags)) {
+ ret = ath11k_mac_monitor_stop(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to stop monitor during vif channel update: %d",
+ ret);
+ return;
+ }
+
+ ret = ath11k_mac_monitor_start(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to start monitor during vif channel update: %d",
+ ret);
+ return;
+ }
+ }
}
static void
@@ -5484,7 +6321,7 @@ static int ath11k_start_vdev_delay(struct ieee80211_hw *hw,
}
if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
- ret = ath11k_monitor_vdev_up(ar, arvif->vdev_id);
+ ret = ath11k_wmi_vdev_up(ar, arvif->vdev_id, 0, ar->mac_addr);
if (ret) {
ath11k_warn(ab, "failed put monitor up: %d\n", ret);
return ret;
@@ -5544,6 +6381,18 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
}
}
+ if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
+ ret = ath11k_mac_monitor_start(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to start monitor during vif channel context assignment: %d",
+ ret);
+ goto out;
+ }
+
+ arvif->is_started = true;
+ goto out;
+ }
+
ret = ath11k_mac_vdev_start(arvif, &ctx->def);
if (ret) {
ath11k_warn(ab, "failed to start vdev %i addr %pM on freq %d: %d\n",
@@ -5551,14 +6400,19 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
ctx->def.chan->center_freq, ret);
goto out;
}
- if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
- ret = ath11k_monitor_vdev_up(ar, arvif->vdev_id);
- if (ret)
- goto out;
- }
arvif->is_started = true;
+ if (arvif->vdev_type != WMI_VDEV_TYPE_MONITOR &&
+ test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags)) {
+ ret = ath11k_mac_monitor_start(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to start monitor during vif channel context assignment: %d",
+ ret);
+ goto out;
+ }
+ }
+
/* TODO: Setup ps and cts/rts protection */
ret = 0;
@@ -5592,6 +6446,20 @@ ath11k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
ath11k_peer_find_by_addr(ab, ar->mac_addr))
ath11k_peer_delete(ar, arvif->vdev_id, ar->mac_addr);
+ if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
+ ret = ath11k_mac_monitor_stop(ar);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to stop monitor during vif channel context unassignment: %d",
+ ret);
+ mutex_unlock(&ar->conf_mutex);
+ return;
+ }
+
+ arvif->is_started = false;
+ mutex_unlock(&ar->conf_mutex);
+ return;
+ }
+
ret = ath11k_mac_vdev_stop(arvif);
if (ret)
ath11k_warn(ab, "failed to stop vdev %i: %d\n",
@@ -5603,6 +6471,16 @@ ath11k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
ath11k_wmi_vdev_down(ar, arvif->vdev_id);
+ if (arvif->vdev_type != WMI_VDEV_TYPE_MONITOR &&
+ ar->num_started_vdevs == 1 &&
+ test_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags)) {
+ ret = ath11k_mac_monitor_stop(ar);
+ if (ret)
+ /* continue even if there's an error */
+ ath11k_warn(ar->ab, "failed to stop monitor during vif channel context unassignment: %d",
+ ret);
+ }
+
mutex_unlock(&ar->conf_mutex);
}
@@ -5720,9 +6598,26 @@ ath11k_mac_has_single_legacy_rate(struct ath11k *ar,
if (ath11k_mac_bitrate_mask_num_vht_rates(ar, band, mask))
return false;
+ if (ath11k_mac_bitrate_mask_num_he_rates(ar, band, mask))
+ return false;
+
return num_rates == 1;
}
+static __le16
+ath11k_mac_get_tx_mcs_map(const struct ieee80211_sta_he_cap *he_cap)
+{
+ if (he_cap->he_cap_elem.phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
+ return he_cap->he_mcs_nss_supp.tx_mcs_80p80;
+
+ if (he_cap->he_cap_elem.phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G)
+ return he_cap->he_mcs_nss_supp.tx_mcs_160;
+
+ return he_cap->he_mcs_nss_supp.tx_mcs_80;
+}
+
static bool
ath11k_mac_bitrate_mask_get_single_nss(struct ath11k *ar,
enum nl80211_band band,
@@ -5731,8 +6626,10 @@ ath11k_mac_bitrate_mask_get_single_nss(struct ath11k *ar,
{
struct ieee80211_supported_band *sband = &ar->mac.sbands[band];
u16 vht_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map);
+ u16 he_mcs_map = 0;
u8 ht_nss_mask = 0;
u8 vht_nss_mask = 0;
+ u8 he_nss_mask = 0;
int i;
/* No need to consider legacy here. Basic rates are always present
@@ -5759,7 +6656,20 @@ ath11k_mac_bitrate_mask_get_single_nss(struct ath11k *ar,
return false;
}
- if (ht_nss_mask != vht_nss_mask)
+ he_mcs_map = le16_to_cpu(ath11k_mac_get_tx_mcs_map(&sband->iftype_data->he_cap));
+
+ for (i = 0; i < ARRAY_SIZE(mask->control[band].he_mcs); i++) {
+ if (mask->control[band].he_mcs[i] == 0)
+ continue;
+
+ if (mask->control[band].he_mcs[i] ==
+ ath11k_mac_get_max_he_mcs_map(he_mcs_map, i))
+ he_nss_mask |= BIT(i);
+ else
+ return false;
+ }
+
+ if (ht_nss_mask != vht_nss_mask || ht_nss_mask != he_nss_mask)
return false;
if (ht_nss_mask == 0)
@@ -5806,42 +6716,125 @@ ath11k_mac_get_single_legacy_rate(struct ath11k *ar,
return 0;
}
-static int ath11k_mac_set_fixed_rate_params(struct ath11k_vif *arvif,
- u32 rate, u8 nss, u8 sgi, u8 ldpc)
+static int
+ath11k_mac_set_fixed_rate_gi_ltf(struct ath11k_vif *arvif, u8 he_gi, u8 he_ltf)
{
struct ath11k *ar = arvif->ar;
- u32 vdev_param;
int ret;
- lockdep_assert_held(&ar->conf_mutex);
+ /* 0.8 = 0, 1.6 = 2 and 3.2 = 3. */
+ if (he_gi && he_gi != 0xFF)
+ he_gi += 1;
- ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "mac set fixed rate params vdev %i rate 0x%02x nss %u sgi %u\n",
- arvif->vdev_id, rate, nss, sgi);
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ WMI_VDEV_PARAM_SGI, he_gi);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set he gi %d: %d\n",
+ he_gi, ret);
+ return ret;
+ }
+ /* start from 1 */
+ if (he_ltf != 0xFF)
+ he_ltf += 1;
- vdev_param = WMI_VDEV_PARAM_FIXED_RATE;
ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
- vdev_param, rate);
+ WMI_VDEV_PARAM_HE_LTF, he_ltf);
if (ret) {
- ath11k_warn(ar->ab, "failed to set fixed rate param 0x%02x: %d\n",
- rate, ret);
+ ath11k_warn(ar->ab, "failed to set he ltf %d: %d\n",
+ he_ltf, ret);
return ret;
}
- vdev_param = WMI_VDEV_PARAM_NSS;
+ return 0;
+}
+
+static int
+ath11k_mac_set_auto_rate_gi_ltf(struct ath11k_vif *arvif, u16 he_gi, u8 he_ltf)
+{
+ struct ath11k *ar = arvif->ar;
+ int ret;
+ u32 he_ar_gi_ltf;
+
+ if (he_gi != 0xFF) {
+ switch (he_gi) {
+ case NL80211_RATE_INFO_HE_GI_0_8:
+ he_gi = WMI_AUTORATE_800NS_GI;
+ break;
+ case NL80211_RATE_INFO_HE_GI_1_6:
+ he_gi = WMI_AUTORATE_1600NS_GI;
+ break;
+ case NL80211_RATE_INFO_HE_GI_3_2:
+ he_gi = WMI_AUTORATE_3200NS_GI;
+ break;
+ default:
+ ath11k_warn(ar->ab, "invalid he gi: %d\n", he_gi);
+ return -EINVAL;
+ }
+ }
+
+ if (he_ltf != 0xFF) {
+ switch (he_ltf) {
+ case NL80211_RATE_INFO_HE_1XLTF:
+ he_ltf = WMI_HE_AUTORATE_LTF_1X;
+ break;
+ case NL80211_RATE_INFO_HE_2XLTF:
+ he_ltf = WMI_HE_AUTORATE_LTF_2X;
+ break;
+ case NL80211_RATE_INFO_HE_4XLTF:
+ he_ltf = WMI_HE_AUTORATE_LTF_4X;
+ break;
+ default:
+ ath11k_warn(ar->ab, "invalid he ltf: %d\n", he_ltf);
+ return -EINVAL;
+ }
+ }
+
+ he_ar_gi_ltf = he_gi | he_ltf;
ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
- vdev_param, nss);
+ WMI_VDEV_PARAM_AUTORATE_MISC_CFG,
+ he_ar_gi_ltf);
if (ret) {
- ath11k_warn(ar->ab, "failed to set nss param %d: %d\n",
- nss, ret);
+ ath11k_warn(ar->ab,
+ "failed to set he autorate gi %u ltf %u: %d\n",
+ he_gi, he_ltf, ret);
return ret;
}
- vdev_param = WMI_VDEV_PARAM_SGI;
+ return 0;
+}
+
+static int ath11k_mac_set_rate_params(struct ath11k_vif *arvif,
+ u32 rate, u8 nss, u8 sgi, u8 ldpc,
+ u8 he_gi, u8 he_ltf, bool he_fixed_rate)
+{
+ struct ath11k *ar = arvif->ar;
+ u32 vdev_param;
+ int ret;
+
+ lockdep_assert_held(&ar->conf_mutex);
+
+ ath11k_dbg(ar->ab, ATH11K_DBG_MAC,
+ "mac set rate params vdev %i rate 0x%02x nss 0x%02x sgi 0x%02x ldpc 0x%02x he_gi 0x%02x he_ltf 0x%02x he_fixed_rate %d\n",
+ arvif->vdev_id, rate, nss, sgi, ldpc, he_gi,
+ he_ltf, he_fixed_rate);
+
+ if (!arvif->vif->bss_conf.he_support) {
+ vdev_param = WMI_VDEV_PARAM_FIXED_RATE;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ vdev_param, rate);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set fixed rate param 0x%02x: %d\n",
+ rate, ret);
+ return ret;
+ }
+ }
+
+ vdev_param = WMI_VDEV_PARAM_NSS;
ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
- vdev_param, sgi);
+ vdev_param, nss);
if (ret) {
- ath11k_warn(ar->ab, "failed to set sgi param %d: %d\n",
- sgi, ret);
+ ath11k_warn(ar->ab, "failed to set nss param %d: %d\n",
+ nss, ret);
return ret;
}
@@ -5854,6 +6847,35 @@ static int ath11k_mac_set_fixed_rate_params(struct ath11k_vif *arvif,
return ret;
}
+ if (arvif->vif->bss_conf.he_support) {
+ if (he_fixed_rate) {
+ ret = ath11k_mac_set_fixed_rate_gi_ltf(arvif, he_gi,
+ he_ltf);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set fixed rate gi ltf: %d\n",
+ ret);
+ return ret;
+ }
+ } else {
+ ret = ath11k_mac_set_auto_rate_gi_ltf(arvif, he_gi,
+ he_ltf);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set auto rate gi ltf: %d\n",
+ ret);
+ return ret;
+ }
+ }
+ } else {
+ vdev_param = WMI_VDEV_PARAM_SGI;
+ ret = ath11k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+ vdev_param, sgi);
+ if (ret) {
+ ath11k_warn(ar->ab, "failed to set sgi param %d: %d\n",
+ sgi, ret);
+ return ret;
+ }
+ }
+
return 0;
}
@@ -5882,6 +6904,31 @@ ath11k_mac_vht_mcs_range_present(struct ath11k *ar,
return true;
}
+static bool
+ath11k_mac_he_mcs_range_present(struct ath11k *ar,
+ enum nl80211_band band,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ int i;
+ u16 he_mcs;
+
+ for (i = 0; i < NL80211_HE_NSS_MAX; i++) {
+ he_mcs = mask->control[band].he_mcs[i];
+
+ switch (he_mcs) {
+ case 0:
+ case BIT(8) - 1:
+ case BIT(10) - 1:
+ case BIT(12) - 1:
+ break;
+ default:
+ return false;
+ }
+ }
+
+ return true;
+}
+
static void ath11k_mac_set_bitrate_mask_iter(void *data,
struct ieee80211_sta *sta)
{
@@ -5913,6 +6960,54 @@ static void ath11k_mac_disable_peer_fixed_rate(void *data,
sta->addr, ret);
}
+static bool
+ath11k_mac_validate_vht_he_fixed_rate_settings(struct ath11k *ar, enum nl80211_band band,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ bool he_fixed_rate = false, vht_fixed_rate = false;
+ struct ath11k_peer *peer, *tmp;
+ const u16 *vht_mcs_mask, *he_mcs_mask;
+ u8 vht_nss, he_nss;
+ bool ret = true;
+
+ vht_mcs_mask = mask->control[band].vht_mcs;
+ he_mcs_mask = mask->control[band].he_mcs;
+
+ if (ath11k_mac_bitrate_mask_num_vht_rates(ar, band, mask) == 1)
+ vht_fixed_rate = true;
+
+ if (ath11k_mac_bitrate_mask_num_he_rates(ar, band, mask) == 1)
+ he_fixed_rate = true;
+
+ if (!vht_fixed_rate && !he_fixed_rate)
+ return true;
+
+ vht_nss = ath11k_mac_max_vht_nss(vht_mcs_mask);
+ he_nss = ath11k_mac_max_he_nss(he_mcs_mask);
+
+ rcu_read_lock();
+ spin_lock_bh(&ar->ab->base_lock);
+ list_for_each_entry_safe(peer, tmp, &ar->ab->peers, list) {
+ if (peer->sta) {
+ if (vht_fixed_rate && (!peer->sta->vht_cap.vht_supported ||
+ peer->sta->rx_nss < vht_nss)) {
+ ret = false;
+ goto out;
+ }
+ if (he_fixed_rate && (!peer->sta->he_cap.has_he ||
+ peer->sta->rx_nss < he_nss)) {
+ ret = false;
+ goto out;
+ }
+ }
+ }
+
+out:
+ spin_unlock_bh(&ar->ab->base_lock);
+ rcu_read_unlock();
+ return ret;
+}
+
static int
ath11k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
@@ -5924,6 +7019,9 @@ ath11k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
enum nl80211_band band;
const u8 *ht_mcs_mask;
const u16 *vht_mcs_mask;
+ const u16 *he_mcs_mask;
+ u8 he_ltf = 0;
+ u8 he_gi = 0;
u32 rate;
u8 nss;
u8 sgi;
@@ -5931,6 +7029,7 @@ ath11k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
int single_nss;
int ret;
int num_rates;
+ bool he_fixed_rate = false;
if (ath11k_mac_vif_chan(vif, &def))
return -EPERM;
@@ -5938,12 +7037,16 @@ ath11k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
band = def.chan->band;
ht_mcs_mask = mask->control[band].ht_mcs;
vht_mcs_mask = mask->control[band].vht_mcs;
+ he_mcs_mask = mask->control[band].he_mcs;
ldpc = !!(ar->ht_cap_info & WMI_HT_CAP_LDPC);
sgi = mask->control[band].gi;
if (sgi == NL80211_TXRATE_FORCE_LGI)
return -EINVAL;
+ he_gi = mask->control[band].he_gi;
+ he_ltf = mask->control[band].he_ltf;
+
/* mac80211 doesn't support sending a fixed HT/VHT MCS alone, rather it
* requires passing atleast one of used basic rates along with them.
* Fixed rate setting across different preambles(legacy, HT, VHT) is
@@ -5967,11 +7070,22 @@ ath11k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
&single_nss)) {
rate = WMI_FIXED_RATE_NONE;
nss = single_nss;
+ mutex_lock(&ar->conf_mutex);
+ arvif->bitrate_mask = *mask;
+ ieee80211_iterate_stations_atomic(ar->hw,
+ ath11k_mac_set_bitrate_mask_iter,
+ arvif);
+ mutex_unlock(&ar->conf_mutex);
} else {
rate = WMI_FIXED_RATE_NONE;
+
+ if (!ath11k_mac_validate_vht_he_fixed_rate_settings(ar, band, mask))
+ ath11k_warn(ar->ab,
+ "could not update fixed rate settings to all peers due to mcs/nss incompatibility\n");
nss = min_t(u32, ar->num_tx_chains,
- max(ath11k_mac_max_ht_nss(ht_mcs_mask),
- ath11k_mac_max_vht_nss(vht_mcs_mask)));
+ max(max(ath11k_mac_max_ht_nss(ht_mcs_mask),
+ ath11k_mac_max_vht_nss(vht_mcs_mask)),
+ ath11k_mac_max_he_nss(he_mcs_mask)));
/* If multiple rates across different preambles are given
* we can reconfigure this info with all peers using PEER_ASSOC
@@ -6002,16 +7116,28 @@ ath11k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
* RATEMASK CMD
*/
ath11k_warn(ar->ab,
- "Setting more than one MCS Value in bitrate mask not supported\n");
+ "setting %d mcs values in bitrate mask not supported\n",
+ num_rates);
return -EINVAL;
}
+ num_rates = ath11k_mac_bitrate_mask_num_he_rates(ar, band,
+ mask);
+ if (num_rates == 1)
+ he_fixed_rate = true;
+
+ if (!ath11k_mac_he_mcs_range_present(ar, band, mask) &&
+ num_rates > 1) {
+ ath11k_warn(ar->ab,
+ "Setting more than one HE MCS Value in bitrate mask not supported\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&ar->conf_mutex);
ieee80211_iterate_stations_atomic(ar->hw,
ath11k_mac_disable_peer_fixed_rate,
arvif);
- mutex_lock(&ar->conf_mutex);
-
arvif->bitrate_mask = *mask;
ieee80211_iterate_stations_atomic(ar->hw,
ath11k_mac_set_bitrate_mask_iter,
@@ -6022,9 +7148,10 @@ ath11k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
mutex_lock(&ar->conf_mutex);
- ret = ath11k_mac_set_fixed_rate_params(arvif, rate, nss, sgi, ldpc);
+ ret = ath11k_mac_set_rate_params(arvif, rate, nss, sgi, ldpc, he_gi,
+ he_ltf, he_fixed_rate);
if (ret) {
- ath11k_warn(ar->ab, "failed to set fixed rate params on vdev %i: %d\n",
+ ath11k_warn(ar->ab, "failed to set rate params on vdev %i: %d\n",
arvif->vdev_id, ret);
}
@@ -6109,7 +7236,13 @@ static int ath11k_mac_op_get_survey(struct ieee80211_hw *hw, int idx,
if (!sband)
sband = hw->wiphy->bands[NL80211_BAND_5GHZ];
+ if (sband && idx >= sband->n_channels) {
+ idx -= sband->n_channels;
+ sband = NULL;
+ }
+ if (!sband)
+ sband = hw->wiphy->bands[NL80211_BAND_6GHZ];
if (!sband || idx >= sband->n_channels) {
ret = -ENOENT;
goto exit;
@@ -6180,6 +7313,7 @@ static const struct ieee80211_ops ath11k_ops = {
.cancel_hw_scan = ath11k_mac_op_cancel_hw_scan,
.set_key = ath11k_mac_op_set_key,
.sta_state = ath11k_mac_op_sta_state,
+ .sta_set_4addr = ath11k_mac_op_sta_set_4addr,
.sta_set_txpwr = ath11k_mac_op_sta_set_txpwr,
.sta_rc_update = ath11k_mac_op_sta_rc_update,
.conf_tx = ath11k_mac_op_conf_tx,
@@ -6240,7 +7374,7 @@ static int ath11k_mac_setup_channels_rates(struct ath11k *ar,
u32 supported_bands)
{
struct ieee80211_supported_band *band;
- struct ath11k_hal_reg_capabilities_ext *reg_cap;
+ struct ath11k_hal_reg_capabilities_ext *reg_cap, *temp_reg_cap;
void *channels;
u32 phy_id;
@@ -6250,6 +7384,7 @@ static int ath11k_mac_setup_channels_rates(struct ath11k *ar,
ATH11K_NUM_CHANS);
reg_cap = &ar->ab->hal_reg_cap[ar->pdev_idx];
+ temp_reg_cap = reg_cap;
if (supported_bands & WMI_HOST_WLAN_2G_CAP) {
channels = kmemdup(ath11k_2ghz_channels,
@@ -6268,11 +7403,11 @@ static int ath11k_mac_setup_channels_rates(struct ath11k *ar,
if (ar->ab->hw_params.single_pdev_only) {
phy_id = ath11k_get_phy_id(ar, WMI_HOST_WLAN_2G_CAP);
- reg_cap = &ar->ab->hal_reg_cap[phy_id];
+ temp_reg_cap = &ar->ab->hal_reg_cap[phy_id];
}
ath11k_mac_update_ch_list(ar, band,
- reg_cap->low_2ghz_chan,
- reg_cap->high_2ghz_chan);
+ temp_reg_cap->low_2ghz_chan,
+ temp_reg_cap->high_2ghz_chan);
}
if (supported_bands & WMI_HOST_WLAN_5G_CAP) {
@@ -6292,9 +7427,15 @@ static int ath11k_mac_setup_channels_rates(struct ath11k *ar,
band->n_bitrates = ath11k_a_rates_size;
band->bitrates = ath11k_a_rates;
ar->hw->wiphy->bands[NL80211_BAND_6GHZ] = band;
+
+ if (ar->ab->hw_params.single_pdev_only) {
+ phy_id = ath11k_get_phy_id(ar, WMI_HOST_WLAN_5G_CAP);
+ temp_reg_cap = &ar->ab->hal_reg_cap[phy_id];
+ }
+
ath11k_mac_update_ch_list(ar, band,
- reg_cap->low_5ghz_chan,
- reg_cap->high_5ghz_chan);
+ temp_reg_cap->low_5ghz_chan,
+ temp_reg_cap->high_5ghz_chan);
}
if (reg_cap->low_5ghz_chan < ATH11K_MIN_6G_FREQ) {
@@ -6317,12 +7458,12 @@ static int ath11k_mac_setup_channels_rates(struct ath11k *ar,
if (ar->ab->hw_params.single_pdev_only) {
phy_id = ath11k_get_phy_id(ar, WMI_HOST_WLAN_5G_CAP);
- reg_cap = &ar->ab->hal_reg_cap[phy_id];
+ temp_reg_cap = &ar->ab->hal_reg_cap[phy_id];
}
ath11k_mac_update_ch_list(ar, band,
- reg_cap->low_5ghz_chan,
- reg_cap->high_5ghz_chan);
+ temp_reg_cap->low_5ghz_chan,
+ temp_reg_cap->high_5ghz_chan);
}
}
@@ -6367,7 +7508,9 @@ static int ath11k_mac_setup_iface_combinations(struct ath11k *ar)
combinations[0].radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
BIT(NL80211_CHAN_WIDTH_20) |
BIT(NL80211_CHAN_WIDTH_40) |
- BIT(NL80211_CHAN_WIDTH_80);
+ BIT(NL80211_CHAN_WIDTH_80) |
+ BIT(NL80211_CHAN_WIDTH_80P80) |
+ BIT(NL80211_CHAN_WIDTH_160);
ar->hw->wiphy->iface_combinations = combinations;
ar->hw->wiphy->n_iface_combinations = 1;
@@ -6505,8 +7648,16 @@ static int __ath11k_mac_register(struct ath11k *ar)
ieee80211_hw_set(ar->hw, QUEUE_CONTROL);
ieee80211_hw_set(ar->hw, SUPPORTS_TX_FRAG);
ieee80211_hw_set(ar->hw, REPORTS_LOW_ACK);
- ieee80211_hw_set(ar->hw, SUPPORTS_TX_ENCAP_OFFLOAD);
- if (ht_cap & WMI_HT_CAP_ENABLED) {
+
+ if (ath11k_frame_mode == ATH11K_HW_TXRX_ETHERNET) {
+ ieee80211_hw_set(ar->hw, SUPPORTS_TX_ENCAP_OFFLOAD);
+ ieee80211_hw_set(ar->hw, SUPPORTS_RX_DECAP_OFFLOAD);
+ }
+
+ if (cap->nss_ratio_enabled)
+ ieee80211_hw_set(ar->hw, SUPPORTS_VHT_EXT_NSS_BW);
+
+ if ((ht_cap & WMI_HT_CAP_ENABLED) || ar->supports_6ghz) {
ieee80211_hw_set(ar->hw, AMPDU_AGGREGATION);
ieee80211_hw_set(ar->hw, TX_AMPDU_SETUP_IN_HW);
ieee80211_hw_set(ar->hw, SUPPORTS_REORDERING_BUFFER);
@@ -6521,7 +7672,7 @@ static int __ath11k_mac_register(struct ath11k *ar)
* for each band for a dual band capable radio. It will be tricky to
* handle it when the ht capability different for each band.
*/
- if (ht_cap & WMI_HT_CAP_DYNAMIC_SMPS)
+ if (ht_cap & WMI_HT_CAP_DYNAMIC_SMPS || ar->supports_6ghz)
ar->hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS;
ar->hw->wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID;
@@ -6590,7 +7741,7 @@ static int __ath11k_mac_register(struct ath11k *ar)
ar->hw->wiphy->interface_modes &= ~BIT(NL80211_IFTYPE_MONITOR);
/* Apply the regd received during initialization */
- ret = ath11k_regd_update(ar, true);
+ ret = ath11k_regd_update(ar);
if (ret) {
ath11k_err(ar->ab, "ath11k regd update failed: %d\n", ret);
goto err_unregister_hw;
@@ -6631,6 +7782,10 @@ int ath11k_mac_register(struct ath11k_base *ab)
if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags))
return 0;
+ /* Initialize channel counters frequency value in hertz */
+ ab->cc_freq_hz = IPQ8074_CC_FREQ_HERTZ;
+ ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS)) - 1;
+
for (i = 0; i < ab->num_radios; i++) {
pdev = &ab->pdevs[i];
ar = pdev->ar;
@@ -6641,18 +7796,14 @@ int ath11k_mac_register(struct ath11k_base *ab)
ar->mac_addr[4] += i;
}
+ idr_init(&ar->txmgmt_idr);
+ spin_lock_init(&ar->txmgmt_idr_lock);
+
ret = __ath11k_mac_register(ar);
if (ret)
goto err_cleanup;
-
- idr_init(&ar->txmgmt_idr);
- spin_lock_init(&ar->txmgmt_idr_lock);
}
- /* Initialize channel counters frequency value in hertz */
- ab->cc_freq_hz = IPQ8074_CC_FREQ_HERTZ;
- ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS)) - 1;
-
return 0;
err_cleanup:
@@ -6723,7 +7874,11 @@ int ath11k_mac_allocate(struct ath11k_base *ab)
INIT_WORK(&ar->wmi_mgmt_tx_work, ath11k_mgmt_over_wmi_tx_work);
skb_queue_head_init(&ar->wmi_mgmt_tx_queue);
- clear_bit(ATH11K_FLAG_MONITOR_ENABLED, &ar->monitor_flags);
+
+ clear_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags);
+
+ ar->monitor_vdev_id = -1;
+ clear_bit(ATH11K_FLAG_MONITOR_VDEV_CREATED, &ar->monitor_flags);
}
return 0;
diff --git a/drivers/net/wireless/ath/ath11k/mac.h b/drivers/net/wireless/ath/ath11k/mac.h
index 4bc59bdaf244..254ca4acc8e8 100644
--- a/drivers/net/wireless/ath/ath11k/mac.h
+++ b/drivers/net/wireless/ath/ath11k/mac.h
@@ -115,6 +115,9 @@ struct ath11k_generic_iter {
#define WMI_MAX_SPATIAL_STREAM 3
#define ATH11K_CHAN_WIDTH_NUM 8
+#define ATH11K_BW_NSS_MAP_ENABLE BIT(31)
+#define ATH11K_PEER_RX_NSS_160MHZ GENMASK(2, 0)
+#define ATH11K_PEER_RX_NSS_80_80MHZ GENMASK(5, 3)
#define ATH11K_OBSS_PD_MAX_THRESHOLD -82
#define ATH11K_OBSS_PD_NON_SRG_MAX_THRESHOLD -62
diff --git a/drivers/net/wireless/ath/ath11k/pci.c b/drivers/net/wireless/ath/ath11k/pci.c
index 5abb38cc3b55..3d353e7c9d5c 100644
--- a/drivers/net/wireless/ath/ath11k/pci.c
+++ b/drivers/net/wireless/ath/ath11k/pci.c
@@ -430,6 +430,8 @@ static void ath11k_pci_force_wake(struct ath11k_base *ab)
static void ath11k_pci_sw_reset(struct ath11k_base *ab, bool power_on)
{
+ mdelay(100);
+
if (power_on) {
ath11k_pci_enable_ltssm(ab);
ath11k_pci_clear_all_intrs(ab);
@@ -439,9 +441,9 @@ static void ath11k_pci_sw_reset(struct ath11k_base *ab, bool power_on)
}
ath11k_mhi_clear_vector(ab);
+ ath11k_pci_clear_dbg_registers(ab);
ath11k_pci_soc_global_reset(ab);
ath11k_mhi_set_mhictrl_reset(ab);
- ath11k_pci_clear_dbg_registers(ab);
}
int ath11k_pci_get_msi_irq(struct device *dev, unsigned int vector)
@@ -853,7 +855,32 @@ static void ath11k_pci_ce_irqs_enable(struct ath11k_base *ab)
}
}
-static int ath11k_pci_enable_msi(struct ath11k_pci *ab_pci)
+static void ath11k_pci_msi_config(struct ath11k_pci *ab_pci, bool enable)
+{
+ struct pci_dev *dev = ab_pci->pdev;
+ u16 control;
+
+ pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
+
+ if (enable)
+ control |= PCI_MSI_FLAGS_ENABLE;
+ else
+ control &= ~PCI_MSI_FLAGS_ENABLE;
+
+ pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control);
+}
+
+static void ath11k_pci_msi_enable(struct ath11k_pci *ab_pci)
+{
+ ath11k_pci_msi_config(ab_pci, true);
+}
+
+static void ath11k_pci_msi_disable(struct ath11k_pci *ab_pci)
+{
+ ath11k_pci_msi_config(ab_pci, false);
+}
+
+static int ath11k_pci_alloc_msi(struct ath11k_pci *ab_pci)
{
struct ath11k_base *ab = ab_pci->ab;
const struct ath11k_msi_config *msi_config = ab_pci->msi_config;
@@ -874,6 +901,7 @@ static int ath11k_pci_enable_msi(struct ath11k_pci *ab_pci)
else
return num_vectors;
}
+ ath11k_pci_msi_disable(ab_pci);
msi_desc = irq_get_msi_desc(ab_pci->pdev->irq);
if (!msi_desc) {
@@ -896,7 +924,7 @@ free_msi_vector:
return ret;
}
-static void ath11k_pci_disable_msi(struct ath11k_pci *ab_pci)
+static void ath11k_pci_free_msi(struct ath11k_pci *ab_pci)
{
pci_free_irq_vectors(ab_pci->pdev);
}
@@ -1017,6 +1045,8 @@ static int ath11k_pci_power_up(struct ath11k_base *ab)
*/
ath11k_pci_aspm_disable(ab_pci);
+ ath11k_pci_msi_enable(ab_pci);
+
ret = ath11k_mhi_start(ab_pci);
if (ret) {
ath11k_err(ab, "failed to start mhi: %d\n", ret);
@@ -1037,6 +1067,9 @@ static void ath11k_pci_power_down(struct ath11k_base *ab)
ath11k_pci_aspm_restore(ab_pci);
ath11k_pci_force_wake(ab_pci->ab);
+
+ ath11k_pci_msi_disable(ab_pci);
+
ath11k_mhi_stop(ab_pci);
clear_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
ath11k_pci_sw_reset(ab_pci->ab, false);
@@ -1261,7 +1294,7 @@ static int ath11k_pci_probe(struct pci_dev *pdev,
goto err_pci_free_region;
}
- ret = ath11k_pci_enable_msi(ab_pci);
+ ret = ath11k_pci_alloc_msi(ab_pci);
if (ret) {
ath11k_err(ab, "failed to enable msi: %d\n", ret);
goto err_pci_free_region;
@@ -1315,7 +1348,7 @@ err_mhi_unregister:
ath11k_mhi_unregister(ab_pci);
err_pci_disable_msi:
- ath11k_pci_disable_msi(ab_pci);
+ ath11k_pci_free_msi(ab_pci);
err_pci_free_region:
ath11k_pci_free_region(ab_pci);
@@ -1346,7 +1379,7 @@ qmi_fail:
ath11k_mhi_unregister(ab_pci);
ath11k_pci_free_irq(ab);
- ath11k_pci_disable_msi(ab_pci);
+ ath11k_pci_free_msi(ab_pci);
ath11k_pci_free_region(ab_pci);
ath11k_hal_srng_deinit(ab);
diff --git a/drivers/net/wireless/ath/ath11k/peer.c b/drivers/net/wireless/ath/ath11k/peer.c
index f49abefa9618..85471f8b3563 100644
--- a/drivers/net/wireless/ath/ath11k/peer.c
+++ b/drivers/net/wireless/ath/ath11k/peer.c
@@ -251,6 +251,7 @@ int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif,
struct ieee80211_sta *sta, struct peer_create_params *param)
{
struct ath11k_peer *peer;
+ struct ath11k_sta *arsta;
int ret;
lockdep_assert_held(&ar->conf_mutex);
@@ -319,6 +320,16 @@ int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif,
peer->sec_type = HAL_ENCRYPT_TYPE_OPEN;
peer->sec_type_grp = HAL_ENCRYPT_TYPE_OPEN;
+ if (sta) {
+ arsta = (struct ath11k_sta *)sta->drv_priv;
+ arsta->tcl_metadata |= FIELD_PREP(HTT_TCL_META_DATA_TYPE, 0) |
+ FIELD_PREP(HTT_TCL_META_DATA_PEER_ID,
+ peer->peer_id);
+
+ /* set HTT extension valid bit to 0 by default */
+ arsta->tcl_metadata &= ~HTT_TCL_META_DATA_VALID_HTT;
+ }
+
ar->num_peers++;
spin_unlock_bh(&ar->ab->base_lock);
diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c
index b5e34d670715..fa73118de6db 100644
--- a/drivers/net/wireless/ath/ath11k/qmi.c
+++ b/drivers/net/wireless/ath/ath11k/qmi.c
@@ -951,6 +951,78 @@ static struct qmi_elem_info qmi_wlanfw_cap_resp_msg_v01_ei[] = {
num_macs),
},
{
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ voltage_mv_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ voltage_mv),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ time_freq_hz_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ time_freq_hz),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ otp_version_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ otp_version),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ eeprom_read_timeout_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_4_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(u32),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(struct qmi_wlanfw_cap_resp_msg_v01,
+ eeprom_read_timeout),
+ },
+ {
.data_type = QMI_EOTI,
.array_type = NO_ARRAY,
.tlv_type = QMI_COMMON_TLV_TYPE,
@@ -1770,7 +1842,7 @@ static int ath11k_qmi_alloc_target_mem_chunk(struct ath11k_base *ab)
chunk->vaddr = dma_alloc_coherent(ab->dev,
chunk->size,
&chunk->paddr,
- GFP_KERNEL);
+ GFP_KERNEL | __GFP_NOWARN);
if (!chunk->vaddr) {
if (ab->qmi.mem_seg_count <= ATH11K_QMI_FW_MEM_REQ_SEGMENT_CNT) {
ath11k_dbg(ab, ATH11K_DBG_QMI,
@@ -1846,8 +1918,8 @@ static int ath11k_qmi_request_target_cap(struct ath11k_base *ab)
memset(&req, 0, sizeof(req));
memset(&resp, 0, sizeof(resp));
- ret = qmi_txn_init(&ab->qmi.handle, &txn,
- qmi_wlanfw_cap_resp_msg_v01_ei, &resp);
+ ret = qmi_txn_init(&ab->qmi.handle, &txn, qmi_wlanfw_cap_resp_msg_v01_ei,
+ &resp);
if (ret < 0)
goto out;
@@ -1900,6 +1972,12 @@ static int ath11k_qmi_request_target_cap(struct ath11k_base *ab)
strlcpy(ab->qmi.target.fw_build_id, resp.fw_build_id,
sizeof(ab->qmi.target.fw_build_id));
+ if (resp.eeprom_read_timeout_valid) {
+ ab->qmi.target.eeprom_caldata =
+ resp.eeprom_read_timeout;
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi cal data supported from eeprom\n");
+ }
+
ath11k_info(ab, "chip_id 0x%x chip_family 0x%x board_id 0x%x soc_id 0x%x\n",
ab->qmi.target.chip_id, ab->qmi.target.chip_family,
ab->qmi.target.board_id, ab->qmi.target.soc_id);
@@ -1917,98 +1995,73 @@ out:
return ret;
}
-static int
-ath11k_qmi_prepare_bdf_download(struct ath11k_base *ab, int type,
- struct qmi_wlanfw_bdf_download_req_msg_v01 *req,
- void __iomem *bdf_addr)
-{
- const struct firmware *fw_entry;
- struct ath11k_board_data bd;
- u32 fw_size;
- int ret;
-
- switch (type) {
- case ATH11K_QMI_FILE_TYPE_BDF_GOLDEN:
- memset(&bd, 0, sizeof(bd));
-
- ret = ath11k_core_fetch_bdf(ab, &bd);
- if (ret) {
- ath11k_warn(ab, "failed to load board file: %d\n", ret);
- return ret;
- }
-
- fw_size = min_t(u32, ab->hw_params.fw.board_size, bd.len);
- memcpy_toio(bdf_addr, bd.data, fw_size);
- ath11k_core_free_bdf(ab, &bd);
- break;
- case ATH11K_QMI_FILE_TYPE_CALDATA:
- fw_entry = ath11k_core_firmware_request(ab, ATH11K_DEFAULT_CAL_FILE);
- if (IS_ERR(fw_entry)) {
- ret = PTR_ERR(fw_entry);
- ath11k_warn(ab, "failed to load %s: %d\n",
- ATH11K_DEFAULT_CAL_FILE, ret);
- return ret;
- }
-
- fw_size = min_t(u32, ab->hw_params.fw.board_size,
- fw_entry->size);
-
- memcpy_toio(bdf_addr + ATH11K_QMI_CALDATA_OFFSET,
- fw_entry->data, fw_size);
-
- release_firmware(fw_entry);
- break;
- default:
- return -EINVAL;
- }
-
- req->total_size = fw_size;
- return 0;
-}
-
-static int ath11k_qmi_load_bdf_fixed_addr(struct ath11k_base *ab)
+static int ath11k_qmi_load_file_target_mem(struct ath11k_base *ab,
+ const u8 *data, u32 len, u8 type)
{
struct qmi_wlanfw_bdf_download_req_msg_v01 *req;
struct qmi_wlanfw_bdf_download_resp_msg_v01 resp;
struct qmi_txn txn = {};
+ const u8 *temp = data;
void __iomem *bdf_addr = NULL;
- int type, ret;
+ int ret;
+ u32 remaining = len;
req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req)
return -ENOMEM;
+
memset(&resp, 0, sizeof(resp));
- bdf_addr = ioremap(ab->hw_params.bdf_addr, ATH11K_QMI_BDF_MAX_SIZE);
- if (!bdf_addr) {
- ath11k_warn(ab, "failed ioremap for board file\n");
- ret = -EIO;
- goto out;
+ if (ab->bus_params.fixed_bdf_addr) {
+ bdf_addr = ioremap(ab->hw_params.bdf_addr, ab->hw_params.fw.board_size);
+ if (!bdf_addr) {
+ ath11k_warn(ab, "qmi ioremap error for bdf_addr\n");
+ ret = -EIO;
+ goto err_free_req;
+ }
}
- for (type = 0; type < ATH11K_QMI_MAX_FILE_TYPE; type++) {
+ while (remaining) {
req->valid = 1;
req->file_id_valid = 1;
req->file_id = ab->qmi.target.board_id;
req->total_size_valid = 1;
+ req->total_size = remaining;
req->seg_id_valid = 1;
- req->seg_id = type;
- req->data_valid = 0;
- req->data_len = ATH11K_QMI_MAX_BDF_FILE_NAME_SIZE;
- req->bdf_type = 0;
- req->bdf_type_valid = 0;
+ req->data_valid = 1;
+ req->bdf_type = type;
+ req->bdf_type_valid = 1;
req->end_valid = 1;
- req->end = 1;
+ req->end = 0;
- ret = ath11k_qmi_prepare_bdf_download(ab, type, req, bdf_addr);
- if (ret < 0)
- goto out_qmi_bdf;
+ if (remaining > QMI_WLANFW_MAX_DATA_SIZE_V01) {
+ req->data_len = QMI_WLANFW_MAX_DATA_SIZE_V01;
+ } else {
+ req->data_len = remaining;
+ req->end = 1;
+ }
+
+ if (ab->bus_params.fixed_bdf_addr ||
+ type == ATH11K_QMI_FILE_TYPE_EEPROM) {
+ req->data_valid = 0;
+ req->end = 1;
+ req->data_len = ATH11K_QMI_MAX_BDF_FILE_NAME_SIZE;
+ } else {
+ memcpy(req->data, temp, req->data_len);
+ }
+
+ if (ab->bus_params.fixed_bdf_addr) {
+ if (type == ATH11K_QMI_FILE_TYPE_CALDATA)
+ bdf_addr += ab->hw_params.fw.cal_offset;
+
+ memcpy_toio(bdf_addr, temp, len);
+ }
ret = qmi_txn_init(&ab->qmi.handle, &txn,
qmi_wlanfw_bdf_download_resp_msg_v01_ei,
&resp);
if (ret < 0)
- goto out_qmi_bdf;
+ goto err_iounmap;
ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi bdf download req fixed addr type %d\n",
type);
@@ -2019,54 +2072,62 @@ static int ath11k_qmi_load_bdf_fixed_addr(struct ath11k_base *ab)
qmi_wlanfw_bdf_download_req_msg_v01_ei, req);
if (ret < 0) {
qmi_txn_cancel(&txn);
- goto out_qmi_bdf;
+ goto err_iounmap;
}
ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
- if (ret < 0)
- goto out_qmi_bdf;
+ if (ret < 0) {
+ ath11k_warn(ab, "failed to wait board file download request: %d\n",
+ ret);
+ goto err_iounmap;
+ }
if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
ath11k_warn(ab, "board file download request failed: %d %d\n",
resp.resp.result, resp.resp.error);
ret = -EINVAL;
- goto out_qmi_bdf;
+ goto err_iounmap;
+ }
+
+ if (ab->bus_params.fixed_bdf_addr ||
+ type == ATH11K_QMI_FILE_TYPE_EEPROM) {
+ remaining = 0;
+ } else {
+ remaining -= req->data_len;
+ temp += req->data_len;
+ req->seg_id++;
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi bdf download request remaining %i\n",
+ remaining);
}
}
-out_qmi_bdf:
- iounmap(bdf_addr);
-out:
+err_iounmap:
+ if (ab->bus_params.fixed_bdf_addr)
+ iounmap(bdf_addr);
+
+err_free_req:
kfree(req);
+
return ret;
}
static int ath11k_qmi_load_bdf_qmi(struct ath11k_base *ab)
{
- struct qmi_wlanfw_bdf_download_req_msg_v01 *req;
- struct qmi_wlanfw_bdf_download_resp_msg_v01 resp;
+ struct device *dev = ab->dev;
+ char filename[ATH11K_QMI_MAX_BDF_FILE_NAME_SIZE];
+ const struct firmware *fw_entry;
struct ath11k_board_data bd;
- unsigned int remaining;
- struct qmi_txn txn = {};
- int ret;
- const u8 *temp;
- int bdf_type;
-
- req = kzalloc(sizeof(*req), GFP_KERNEL);
- if (!req)
- return -ENOMEM;
- memset(&resp, 0, sizeof(resp));
+ u32 fw_size, file_type;
+ int ret = 0, bdf_type;
+ const u8 *tmp;
memset(&bd, 0, sizeof(bd));
ret = ath11k_core_fetch_bdf(ab, &bd);
if (ret) {
- ath11k_warn(ab, "failed to fetch board file: %d\n", ret);
+ ath11k_warn(ab, "qmi failed to fetch board file: %d\n", ret);
goto out;
}
- temp = bd.data;
- remaining = bd.len;
-
if (bd.len >= SELFMAG && memcmp(bd.data, ELFMAG, SELFMAG) == 0)
bdf_type = ATH11K_QMI_BDF_TYPE_ELF;
else
@@ -2074,67 +2135,60 @@ static int ath11k_qmi_load_bdf_qmi(struct ath11k_base *ab)
ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi bdf_type %d\n", bdf_type);
- while (remaining) {
- req->valid = 1;
- req->file_id_valid = 1;
- req->file_id = ab->qmi.target.board_id;
- req->total_size_valid = 1;
- req->total_size = bd.len;
- req->seg_id_valid = 1;
- req->data_valid = 1;
- req->data_len = ATH11K_QMI_MAX_BDF_FILE_NAME_SIZE;
- req->bdf_type = bdf_type;
- req->bdf_type_valid = 1;
- req->end_valid = 1;
- req->end = 0;
+ fw_size = min_t(u32, ab->hw_params.fw.board_size, bd.len);
- if (remaining > QMI_WLANFW_MAX_DATA_SIZE_V01) {
- req->data_len = QMI_WLANFW_MAX_DATA_SIZE_V01;
- } else {
- req->data_len = remaining;
- req->end = 1;
- }
+ ret = ath11k_qmi_load_file_target_mem(ab, bd.data, fw_size, bdf_type);
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed to load bdf file\n");
+ goto out;
+ }
- memcpy(req->data, temp, req->data_len);
+ /* QCA6390 does not support cal data, skip it */
+ if (bdf_type == ATH11K_QMI_BDF_TYPE_ELF)
+ goto out;
- ret = qmi_txn_init(&ab->qmi.handle, &txn,
- qmi_wlanfw_bdf_download_resp_msg_v01_ei,
- &resp);
- if (ret < 0)
- goto out_qmi_bdf;
+ if (ab->qmi.target.eeprom_caldata) {
+ file_type = ATH11K_QMI_FILE_TYPE_EEPROM;
+ tmp = filename;
+ fw_size = ATH11K_QMI_MAX_BDF_FILE_NAME_SIZE;
+ } else {
+ file_type = ATH11K_QMI_FILE_TYPE_CALDATA;
- ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi bdf download request remaining %i\n",
- remaining);
+ /* cal-<bus>-<id>.bin */
+ snprintf(filename, sizeof(filename), "cal-%s-%s.bin",
+ ath11k_bus_str(ab->hif.bus), dev_name(dev));
+ fw_entry = ath11k_core_firmware_request(ab, filename);
+ if (!IS_ERR(fw_entry))
+ goto success;
- ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
- QMI_WLANFW_BDF_DOWNLOAD_REQ_V01,
- QMI_WLANFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_LEN,
- qmi_wlanfw_bdf_download_req_msg_v01_ei, req);
- if (ret < 0) {
- qmi_txn_cancel(&txn);
- goto out_qmi_bdf;
+ fw_entry = ath11k_core_firmware_request(ab, ATH11K_DEFAULT_CAL_FILE);
+ if (IS_ERR(fw_entry)) {
+ ret = PTR_ERR(fw_entry);
+ ath11k_warn(ab,
+ "qmi failed to load CAL data file:%s\n",
+ filename);
+ goto out;
}
+success:
+ fw_size = min_t(u32, ab->hw_params.fw.board_size, fw_entry->size);
+ tmp = fw_entry->data;
+ }
- ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
- if (ret < 0)
- goto out_qmi_bdf;
-
- if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
- ath11k_warn(ab, "bdf download request failed: %d %d\n",
- resp.resp.result, resp.resp.error);
- ret = resp.resp.result;
- goto out_qmi_bdf;
- }
- remaining -= req->data_len;
- temp += req->data_len;
- req->seg_id++;
+ ret = ath11k_qmi_load_file_target_mem(ab, tmp, fw_size, file_type);
+ if (ret < 0) {
+ ath11k_warn(ab, "qmi failed to load caldata\n");
+ goto out_qmi_cal;
}
-out_qmi_bdf:
- ath11k_core_free_bdf(ab, &bd);
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi caldata type: %u\n", file_type);
+out_qmi_cal:
+ if (!ab->qmi.target.eeprom_caldata)
+ release_firmware(fw_entry);
out:
- kfree(req);
+ ath11k_core_free_bdf(ab, &bd);
+ ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi BDF download sequence completed\n");
+
return ret;
}
@@ -2519,10 +2573,7 @@ static int ath11k_qmi_event_load_bdf(struct ath11k_qmi *qmi)
return ret;
}
- if (ab->bus_params.fixed_bdf_addr)
- ret = ath11k_qmi_load_bdf_fixed_addr(ab);
- else
- ret = ath11k_qmi_load_bdf_qmi(ab);
+ ret = ath11k_qmi_load_bdf_qmi(ab);
if (ret < 0) {
ath11k_warn(ab, "failed to load board data file: %d\n", ret);
return ret;
@@ -2707,8 +2758,10 @@ static void ath11k_qmi_driver_event_work(struct work_struct *work)
list_del(&event->list);
spin_unlock(&qmi->event_lock);
- if (test_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags))
+ if (test_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags)) {
+ kfree(event);
return;
+ }
switch (event->type) {
case ATH11K_QMI_EVENT_SERVER_ARRIVE:
diff --git a/drivers/net/wireless/ath/ath11k/qmi.h b/drivers/net/wireless/ath/ath11k/qmi.h
index 3d5930330703..3bb0f9ef7996 100644
--- a/drivers/net/wireless/ath/ath11k/qmi.h
+++ b/drivers/net/wireless/ath/ath11k/qmi.h
@@ -10,11 +10,9 @@
#include <linux/soc/qcom/qmi.h>
#define ATH11K_HOST_VERSION_STRING "WIN"
-#define ATH11K_QMI_WLANFW_TIMEOUT_MS 5000
+#define ATH11K_QMI_WLANFW_TIMEOUT_MS 10000
#define ATH11K_QMI_MAX_BDF_FILE_NAME_SIZE 64
#define ATH11K_QMI_CALDB_ADDRESS 0x4BA00000
-#define ATH11K_QMI_BDF_MAX_SIZE (256 * 1024)
-#define ATH11K_QMI_CALDATA_OFFSET (128 * 1024)
#define ATH11K_QMI_WLANFW_MAX_BUILD_ID_LEN_V01 128
#define ATH11K_QMI_WLFW_SERVICE_ID_V01 0x45
#define ATH11K_QMI_WLFW_SERVICE_VERS_V01 0x01
@@ -44,6 +42,7 @@ struct ath11k_base;
enum ath11k_qmi_file_type {
ATH11K_QMI_FILE_TYPE_BDF_GOLDEN,
ATH11K_QMI_FILE_TYPE_CALDATA,
+ ATH11K_QMI_FILE_TYPE_EEPROM,
ATH11K_QMI_MAX_FILE_TYPE,
};
@@ -104,6 +103,7 @@ struct target_info {
u32 board_id;
u32 soc_id;
u32 fw_version;
+ u32 eeprom_caldata;
char fw_build_timestamp[ATH11K_QMI_WLANFW_MAX_TIMESTAMP_LEN_V01 + 1];
char fw_build_id[ATH11K_QMI_WLANFW_MAX_BUILD_ID_LEN_V01 + 1];
char bdf_ext[ATH11K_QMI_BDF_EXT_STR_LENGTH];
@@ -135,7 +135,7 @@ struct ath11k_qmi {
wait_queue_head_t cold_boot_waitq;
};
-#define QMI_WLANFW_HOST_CAP_REQ_MSG_V01_MAX_LEN 189
+#define QMI_WLANFW_HOST_CAP_REQ_MSG_V01_MAX_LEN 261
#define QMI_WLANFW_HOST_CAP_REQ_V01 0x0034
#define QMI_WLANFW_HOST_CAP_RESP_MSG_V01_MAX_LEN 7
#define QMI_WLFW_HOST_CAP_RESP_V01 0x0034
@@ -285,7 +285,7 @@ struct qmi_wlanfw_fw_cold_cal_done_ind_msg_v01 {
};
#define QMI_WLANFW_CAP_REQ_MSG_V01_MAX_LEN 0
-#define QMI_WLANFW_CAP_RESP_MSG_V01_MAX_LEN 207
+#define QMI_WLANFW_CAP_RESP_MSG_V01_MAX_LEN 235
#define QMI_WLANFW_CAP_REQ_V01 0x0024
#define QMI_WLANFW_CAP_RESP_V01 0x0024
@@ -366,6 +366,14 @@ struct qmi_wlanfw_cap_resp_msg_v01 {
char fw_build_id[ATH11K_QMI_WLANFW_MAX_BUILD_ID_LEN_V01 + 1];
u8 num_macs_valid;
u8 num_macs;
+ u8 voltage_mv_valid;
+ u32 voltage_mv;
+ u8 time_freq_hz_valid;
+ u32 time_freq_hz;
+ u8 otp_version_valid;
+ u32 otp_version;
+ u8 eeprom_read_timeout_valid;
+ u32 eeprom_read_timeout;
};
struct qmi_wlanfw_cap_req_msg_v01 {
diff --git a/drivers/net/wireless/ath/ath11k/reg.c b/drivers/net/wireless/ath/ath11k/reg.c
index e1a1df169034..a66b5bdd2167 100644
--- a/drivers/net/wireless/ath/ath11k/reg.c
+++ b/drivers/net/wireless/ath/ath11k/reg.c
@@ -97,7 +97,6 @@ int ath11k_reg_update_chan_list(struct ath11k *ar)
struct channel_param *ch;
enum nl80211_band band;
int num_channels = 0;
- int params_len;
int i, ret;
bands = hw->wiphy->bands;
@@ -117,10 +116,8 @@ int ath11k_reg_update_chan_list(struct ath11k *ar)
if (WARN_ON(!num_channels))
return -EINVAL;
- params_len = sizeof(struct scan_chan_list_params) +
- num_channels * sizeof(struct channel_param);
- params = kzalloc(params_len, GFP_KERNEL);
-
+ params = kzalloc(struct_size(params, ch_param, num_channels),
+ GFP_KERNEL);
if (!params)
return -ENOMEM;
@@ -198,7 +195,7 @@ static void ath11k_copy_regd(struct ieee80211_regdomain *regd_orig,
sizeof(struct ieee80211_reg_rule));
}
-int ath11k_regd_update(struct ath11k *ar, bool init)
+int ath11k_regd_update(struct ath11k *ar)
{
struct ieee80211_regdomain *regd, *regd_copy = NULL;
int ret, regd_len, pdev_id;
@@ -209,7 +206,10 @@ int ath11k_regd_update(struct ath11k *ar, bool init)
spin_lock_bh(&ab->base_lock);
- if (init) {
+ /* Prefer the latest regd update over default if it's available */
+ if (ab->new_regd[pdev_id]) {
+ regd = ab->new_regd[pdev_id];
+ } else {
/* Apply the regd received during init through
* WMI_REG_CHAN_LIST_CC event. In case of failure to
* receive the regd, initialize with a default world
@@ -222,8 +222,6 @@ int ath11k_regd_update(struct ath11k *ar, bool init)
"failed to receive default regd during init\n");
regd = (struct ieee80211_regdomain *)&ath11k_world_regd;
}
- } else {
- regd = ab->new_regd[pdev_id];
}
if (!regd) {
@@ -683,7 +681,7 @@ void ath11k_regd_update_work(struct work_struct *work)
regd_update_work);
int ret;
- ret = ath11k_regd_update(ar, false);
+ ret = ath11k_regd_update(ar);
if (ret) {
/* Firmware has already moved to the new regd. We need
* to maintain channel consistency across FW, Host driver
diff --git a/drivers/net/wireless/ath/ath11k/reg.h b/drivers/net/wireless/ath/ath11k/reg.h
index 65d56d44796f..5fb9dc03a74e 100644
--- a/drivers/net/wireless/ath/ath11k/reg.h
+++ b/drivers/net/wireless/ath/ath11k/reg.h
@@ -31,6 +31,6 @@ void ath11k_regd_update_work(struct work_struct *work);
struct ieee80211_regdomain *
ath11k_reg_build_regd(struct ath11k_base *ab,
struct cur_regulatory_info *reg_info, bool intersect);
-int ath11k_regd_update(struct ath11k *ar, bool init);
+int ath11k_regd_update(struct ath11k *ar);
int ath11k_reg_update_chan_list(struct ath11k *ar);
#endif
diff --git a/drivers/net/wireless/ath/ath11k/spectral.c b/drivers/net/wireless/ath/ath11k/spectral.c
index 1afe67759659..ac4da99b5577 100644
--- a/drivers/net/wireless/ath/ath11k/spectral.c
+++ b/drivers/net/wireless/ath/ath11k/spectral.c
@@ -11,22 +11,20 @@
#define ATH11K_SPECTRAL_EVENT_TIMEOUT_MS 1
#define ATH11K_SPECTRAL_DWORD_SIZE 4
-/* HW bug, expected BIN size is 2 bytes but HW report as 4 bytes */
-#define ATH11K_SPECTRAL_BIN_SIZE 4
-#define ATH11K_SPECTRAL_ATH11K_MIN_BINS 64
-#define ATH11K_SPECTRAL_ATH11K_MIN_IB_BINS 32
-#define ATH11K_SPECTRAL_ATH11K_MAX_IB_BINS 256
+#define ATH11K_SPECTRAL_MIN_BINS 32
+#define ATH11K_SPECTRAL_MIN_IB_BINS (ATH11K_SPECTRAL_MIN_BINS >> 1)
+#define ATH11K_SPECTRAL_MAX_IB_BINS(x) ((x)->hw_params.spectral.max_fft_bins >> 1)
#define ATH11K_SPECTRAL_SCAN_COUNT_MAX 4095
/* Max channel computed by sum of 2g and 5g band channels */
#define ATH11K_SPECTRAL_TOTAL_CHANNEL 41
#define ATH11K_SPECTRAL_SAMPLES_PER_CHANNEL 70
-#define ATH11K_SPECTRAL_PER_SAMPLE_SIZE (sizeof(struct fft_sample_ath11k) + \
- ATH11K_SPECTRAL_ATH11K_MAX_IB_BINS)
+#define ATH11K_SPECTRAL_PER_SAMPLE_SIZE(x) (sizeof(struct fft_sample_ath11k) + \
+ ATH11K_SPECTRAL_MAX_IB_BINS(x))
#define ATH11K_SPECTRAL_TOTAL_SAMPLE (ATH11K_SPECTRAL_TOTAL_CHANNEL * \
ATH11K_SPECTRAL_SAMPLES_PER_CHANNEL)
-#define ATH11K_SPECTRAL_SUB_BUFF_SIZE ATH11K_SPECTRAL_PER_SAMPLE_SIZE
+#define ATH11K_SPECTRAL_SUB_BUFF_SIZE(x) ATH11K_SPECTRAL_PER_SAMPLE_SIZE(x)
#define ATH11K_SPECTRAL_NUM_SUB_BUF ATH11K_SPECTRAL_TOTAL_SAMPLE
#define ATH11K_SPECTRAL_20MHZ 20
@@ -444,8 +442,8 @@ static ssize_t ath11k_write_file_spectral_bins(struct file *file,
if (kstrtoul(buf, 0, &val))
return -EINVAL;
- if (val < ATH11K_SPECTRAL_ATH11K_MIN_BINS ||
- val > SPECTRAL_ATH11K_MAX_NUM_BINS)
+ if (val < ATH11K_SPECTRAL_MIN_BINS ||
+ val > ar->ab->hw_params.spectral.max_fft_bins)
return -EINVAL;
if (!is_power_of_2(val))
@@ -581,12 +579,12 @@ int ath11k_spectral_process_fft(struct ath11k *ar,
struct spectral_tlv *tlv;
int tlv_len, bin_len, num_bins;
u16 length, freq;
- u8 chan_width_mhz;
+ u8 chan_width_mhz, bin_sz;
int ret;
lockdep_assert_held(&ar->spectral.lock);
- if (!ab->hw_params.spectral_fft_sz) {
+ if (!ab->hw_params.spectral.fft_sz) {
ath11k_warn(ab, "invalid bin size type for hw rev %d\n",
ab->hw_rev);
return -EINVAL;
@@ -596,7 +594,7 @@ int ath11k_spectral_process_fft(struct ath11k *ar,
tlv_len = FIELD_GET(SPECTRAL_TLV_HDR_LEN, __le32_to_cpu(tlv->header));
/* convert Dword into bytes */
tlv_len *= ATH11K_SPECTRAL_DWORD_SIZE;
- bin_len = tlv_len - (sizeof(*fft_report) - sizeof(*tlv));
+ bin_len = tlv_len - ab->hw_params.spectral.fft_hdr_len;
if (data_len < (bin_len + sizeof(*fft_report))) {
ath11k_warn(ab, "mismatch in expected bin len %d and data len %d\n",
@@ -604,12 +602,13 @@ int ath11k_spectral_process_fft(struct ath11k *ar,
return -EINVAL;
}
- num_bins = bin_len / ATH11K_SPECTRAL_BIN_SIZE;
+ bin_sz = ab->hw_params.spectral.fft_sz + ab->hw_params.spectral.fft_pad_sz;
+ num_bins = bin_len / bin_sz;
/* Only In-band bins are useful to user for visualize */
num_bins >>= 1;
- if (num_bins < ATH11K_SPECTRAL_ATH11K_MIN_IB_BINS ||
- num_bins > ATH11K_SPECTRAL_ATH11K_MAX_IB_BINS ||
+ if (num_bins < ATH11K_SPECTRAL_MIN_IB_BINS ||
+ num_bins > ATH11K_SPECTRAL_MAX_IB_BINS(ab) ||
!is_power_of_2(num_bins)) {
ath11k_warn(ab, "Invalid num of bins %d\n", num_bins);
return -EINVAL;
@@ -654,7 +653,7 @@ int ath11k_spectral_process_fft(struct ath11k *ar,
fft_sample->freq2 = __cpu_to_be16(freq);
ath11k_spectral_parse_fft(fft_sample->data, fft_report->bins, num_bins,
- ab->hw_params.spectral_fft_sz);
+ ab->hw_params.spectral.fft_sz);
fft_sample->max_exp = ath11k_spectral_get_max_exp(fft_sample->max_index,
search.peak_mag,
@@ -690,7 +689,7 @@ static int ath11k_spectral_process_data(struct ath11k *ar,
goto unlock;
}
- sample_sz = sizeof(*fft_sample) + ATH11K_SPECTRAL_ATH11K_MAX_IB_BINS;
+ sample_sz = sizeof(*fft_sample) + ATH11K_SPECTRAL_MAX_IB_BINS(ab);
fft_sample = kmalloc(sample_sz, GFP_ATOMIC);
if (!fft_sample) {
ret = -ENOBUFS;
@@ -738,7 +737,8 @@ static int ath11k_spectral_process_data(struct ath11k *ar,
* is 4 DWORD size (16 bytes).
* Need to remove this workaround once HW bug fixed
*/
- tlv_len = sizeof(*summary) - sizeof(*tlv);
+ tlv_len = sizeof(*summary) - sizeof(*tlv) +
+ ab->hw_params.spectral.summary_pad_sz;
if (tlv_len < (sizeof(*summary) - sizeof(*tlv))) {
ath11k_warn(ab, "failed to parse spectral summary at bytes %d tlv_len:%d\n",
@@ -901,7 +901,7 @@ static inline int ath11k_spectral_debug_register(struct ath11k *ar)
ar->spectral.rfs_scan = relay_open("spectral_scan",
ar->debug.debugfs_pdev,
- ATH11K_SPECTRAL_SUB_BUFF_SIZE,
+ ATH11K_SPECTRAL_SUB_BUFF_SIZE(ar->ab),
ATH11K_SPECTRAL_NUM_SUB_BUF,
&rfs_scan_cb, NULL);
if (!ar->spectral.rfs_scan) {
@@ -962,7 +962,7 @@ int ath11k_spectral_init(struct ath11k_base *ab)
ab->wmi_ab.svc_map))
return 0;
- if (!ab->hw_params.spectral_fft_sz)
+ if (!ab->hw_params.spectral.fft_sz)
return 0;
for (i = 0; i < ab->num_radios; i++) {
diff --git a/drivers/net/wireless/ath/ath11k/trace.h b/drivers/net/wireless/ath/ath11k/trace.h
index d2d2a3cb0826..25d18e9d5b0b 100644
--- a/drivers/net/wireless/ath/ath11k/trace.h
+++ b/drivers/net/wireless/ath/ath11k/trace.h
@@ -79,14 +79,15 @@ TRACE_EVENT(ath11k_htt_ppdu_stats,
);
TRACE_EVENT(ath11k_htt_rxdesc,
- TP_PROTO(struct ath11k *ar, const void *data, size_t len),
+ TP_PROTO(struct ath11k *ar, const void *data, size_t log_type, size_t len),
- TP_ARGS(ar, data, len),
+ TP_ARGS(ar, data, log_type, len),
TP_STRUCT__entry(
__string(device, dev_name(ar->ab->dev))
__string(driver, dev_driver_string(ar->ab->dev))
__field(u16, len)
+ __field(u16, log_type)
__dynamic_array(u8, rxdesc, len)
),
@@ -94,14 +95,16 @@ TRACE_EVENT(ath11k_htt_rxdesc,
__assign_str(device, dev_name(ar->ab->dev));
__assign_str(driver, dev_driver_string(ar->ab->dev));
__entry->len = len;
+ __entry->log_type = log_type;
memcpy(__get_dynamic_array(rxdesc), data, len);
),
TP_printk(
- "%s %s rxdesc len %d",
+ "%s %s rxdesc len %d type %d",
__get_str(driver),
__get_str(device),
- __entry->len
+ __entry->len,
+ __entry->log_type
)
);
diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c
index 6c253eae9d06..5ae2ef4680d6 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.c
+++ b/drivers/net/wireless/ath/ath11k/wmi.c
@@ -360,6 +360,10 @@ ath11k_pull_mac_phy_cap_svc_ready_ext(struct ath11k_pdev_wmi *wmi_handle,
pdev_cap->he_mcs = mac_phy_caps->he_supp_mcs_5g;
pdev_cap->tx_chain_mask = mac_phy_caps->tx_chain_mask_5g;
pdev_cap->rx_chain_mask = mac_phy_caps->rx_chain_mask_5g;
+ pdev_cap->nss_ratio_enabled =
+ WMI_NSS_RATIO_ENABLE_DISABLE_GET(mac_phy_caps->nss_ratio);
+ pdev_cap->nss_ratio_info =
+ WMI_NSS_RATIO_INFO_GET(mac_phy_caps->nss_ratio);
} else {
return -EINVAL;
}
@@ -403,18 +407,18 @@ ath11k_pull_mac_phy_cap_svc_ready_ext(struct ath11k_pdev_wmi *wmi_handle,
sizeof(u32) * PSOC_HOST_MAX_PHY_SIZE);
memcpy(&cap_band->he_ppet, &mac_phy_caps->he_ppet5g,
sizeof(struct ath11k_ppe_threshold));
- }
- cap_band = &pdev_cap->band[NL80211_BAND_6GHZ];
- cap_band->max_bw_supported = mac_phy_caps->max_bw_supported_5g;
- cap_band->ht_cap_info = mac_phy_caps->ht_cap_info_5g;
- cap_band->he_cap_info[0] = mac_phy_caps->he_cap_info_5g;
- cap_band->he_cap_info[1] = mac_phy_caps->he_cap_info_5g_ext;
- cap_band->he_mcs = mac_phy_caps->he_supp_mcs_5g;
- memcpy(cap_band->he_cap_phy_info, &mac_phy_caps->he_cap_phy_info_5g,
- sizeof(u32) * PSOC_HOST_MAX_PHY_SIZE);
- memcpy(&cap_band->he_ppet, &mac_phy_caps->he_ppet5g,
- sizeof(struct ath11k_ppe_threshold));
+ cap_band = &pdev_cap->band[NL80211_BAND_6GHZ];
+ cap_band->max_bw_supported = mac_phy_caps->max_bw_supported_5g;
+ cap_band->ht_cap_info = mac_phy_caps->ht_cap_info_5g;
+ cap_band->he_cap_info[0] = mac_phy_caps->he_cap_info_5g;
+ cap_band->he_cap_info[1] = mac_phy_caps->he_cap_info_5g_ext;
+ cap_band->he_mcs = mac_phy_caps->he_supp_mcs_5g;
+ memcpy(cap_band->he_cap_phy_info, &mac_phy_caps->he_cap_phy_info_5g,
+ sizeof(u32) * PSOC_HOST_MAX_PHY_SIZE);
+ memcpy(&cap_band->he_ppet, &mac_phy_caps->he_ppet5g,
+ sizeof(struct ath11k_ppe_threshold));
+ }
return 0;
}
@@ -783,14 +787,26 @@ int ath11k_wmi_vdev_down(struct ath11k *ar, u8 vdev_id)
static void ath11k_wmi_put_wmi_channel(struct wmi_channel *chan,
struct wmi_vdev_start_req_arg *arg)
{
+ u32 center_freq1 = arg->channel.band_center_freq1;
+
memset(chan, 0, sizeof(*chan));
chan->mhz = arg->channel.freq;
chan->band_center_freq1 = arg->channel.band_center_freq1;
- if (arg->channel.mode == MODE_11AC_VHT80_80)
+
+ if (arg->channel.mode == MODE_11AX_HE160) {
+ if (arg->channel.freq > arg->channel.band_center_freq1)
+ chan->band_center_freq1 = center_freq1 + 40;
+ else
+ chan->band_center_freq1 = center_freq1 - 40;
+
+ chan->band_center_freq2 = arg->channel.band_center_freq1;
+
+ } else if (arg->channel.mode == MODE_11AC_VHT80_80) {
chan->band_center_freq2 = arg->channel.band_center_freq2;
- else
+ } else {
chan->band_center_freq2 = 0;
+ }
chan->info |= FIELD_PREP(WMI_CHAN_INFO_MODE, arg->channel.mode);
if (arg->channel.passive)
@@ -868,6 +884,8 @@ int ath11k_wmi_vdev_start(struct ath11k *ar, struct wmi_vdev_start_req_arg *arg,
}
cmd->flags |= WMI_VDEV_START_LDPC_RX_ENABLED;
+ if (test_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags))
+ cmd->flags |= WMI_VDEV_START_HW_ENCRYPTION_DISABLED;
ptr = skb->data + sizeof(*cmd);
chan = ptr;
@@ -1339,6 +1357,7 @@ int ath11k_wmi_pdev_bss_chan_info_request(struct ath11k *ar,
WMI_TAG_PDEV_BSS_CHAN_INFO_REQUEST) |
FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
cmd->req_type = type;
+ cmd->pdev_id = ar->pdev->pdev_id;
ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
"WMI bss chan info req type %d\n", type);
@@ -1903,8 +1922,8 @@ int ath11k_wmi_send_peer_assoc_cmd(struct ath11k *ar,
FIELD_PREP(WMI_TLV_LEN,
sizeof(*he_mcs) - TLV_HDR_SIZE);
- he_mcs->rx_mcs_set = param->peer_he_rx_mcs_set[i];
- he_mcs->tx_mcs_set = param->peer_he_tx_mcs_set[i];
+ he_mcs->rx_mcs_set = param->peer_he_tx_mcs_set[i];
+ he_mcs->tx_mcs_set = param->peer_he_rx_mcs_set[i];
ptr += sizeof(*he_mcs);
}
@@ -2285,7 +2304,7 @@ int ath11k_wmi_send_scan_chan_list_cmd(struct ath11k *ar,
u16 num_send_chans, num_sends = 0, max_chan_limit = 0;
u32 *reg1, *reg2;
- tchan_info = &chan_list->ch_param[0];
+ tchan_info = chan_list->ch_param;
while (chan_list->nallchans) {
len = sizeof(*cmd) + TLV_HDR_SIZE;
max_chan_limit = (wmi->wmi_ab->max_msg_len[ar->pdev_idx] - len) /
@@ -2352,6 +2371,8 @@ int ath11k_wmi_send_scan_chan_list_cmd(struct ath11k *ar,
chan_info->info |= WMI_CHAN_INFO_QUARTER_RATE;
if (tchan_info->psc_channel)
chan_info->info |= WMI_CHAN_INFO_PSC;
+ if (tchan_info->dfs_set)
+ chan_info->info |= WMI_CHAN_INFO_DFS;
chan_info->info |= FIELD_PREP(WMI_CHAN_INFO_MODE,
tchan_info->phy_mode);
@@ -3495,7 +3516,7 @@ ath11k_wmi_copy_resource_config(struct wmi_resource_config *wmi_cfg,
wmi_cfg->bpf_instruction_size = tg_cfg->bpf_instruction_size;
wmi_cfg->max_bssid_rx_filters = tg_cfg->max_bssid_rx_filters;
wmi_cfg->use_pdev_id = tg_cfg->use_pdev_id;
- wmi_cfg->flag1 = tg_cfg->atf_config;
+ wmi_cfg->flag1 = tg_cfg->flag1;
wmi_cfg->peer_map_unmap_v2_support = tg_cfg->peer_map_unmap_v2_support;
wmi_cfg->sched_params = tg_cfg->sched_params;
wmi_cfg->twt_ap_pdev_count = tg_cfg->twt_ap_pdev_count;
@@ -4046,8 +4067,8 @@ static int ath11k_wmi_tlv_mac_phy_caps_parse(struct ath11k_base *soc,
len = min_t(u16, len, sizeof(struct wmi_mac_phy_capabilities));
if (!svc_rdy_ext->n_mac_phy_caps) {
- svc_rdy_ext->mac_phy_caps = kzalloc((svc_rdy_ext->tot_phy_id) * len,
- GFP_ATOMIC);
+ svc_rdy_ext->mac_phy_caps = kcalloc(svc_rdy_ext->tot_phy_id,
+ len, GFP_ATOMIC);
if (!svc_rdy_ext->mac_phy_caps)
return -ENOMEM;
}
@@ -4447,8 +4468,8 @@ static struct cur_reg_rule
struct cur_reg_rule *reg_rule_ptr;
u32 count;
- reg_rule_ptr = kzalloc((num_reg_rules * sizeof(*reg_rule_ptr)),
- GFP_ATOMIC);
+ reg_rule_ptr = kcalloc(num_reg_rules, sizeof(*reg_rule_ptr),
+ GFP_ATOMIC);
if (!reg_rule_ptr)
return NULL;
@@ -5234,9 +5255,11 @@ ath11k_wmi_pull_pdev_stats_tx(const struct wmi_pdev_stats_tx *src,
dst->hw_queued = src->hw_queued;
dst->hw_reaped = src->hw_reaped;
dst->underrun = src->underrun;
+ dst->hw_paused = src->hw_paused;
dst->tx_abort = src->tx_abort;
dst->mpdus_requeued = src->mpdus_requeued;
dst->tx_ko = src->tx_ko;
+ dst->tx_xretry = src->tx_xretry;
dst->data_rc = src->data_rc;
dst->self_triggers = src->self_triggers;
dst->sw_retry_failure = src->sw_retry_failure;
@@ -5247,6 +5270,16 @@ ath11k_wmi_pull_pdev_stats_tx(const struct wmi_pdev_stats_tx *src,
dst->stateless_tid_alloc_failure = src->stateless_tid_alloc_failure;
dst->phy_underrun = src->phy_underrun;
dst->txop_ovf = src->txop_ovf;
+ dst->seq_posted = src->seq_posted;
+ dst->seq_failed_queueing = src->seq_failed_queueing;
+ dst->seq_completed = src->seq_completed;
+ dst->seq_restarted = src->seq_restarted;
+ dst->mu_seq_posted = src->mu_seq_posted;
+ dst->mpdus_sw_flush = src->mpdus_sw_flush;
+ dst->mpdus_hw_filter = src->mpdus_hw_filter;
+ dst->mpdus_truncated = src->mpdus_truncated;
+ dst->mpdus_ack_failed = src->mpdus_ack_failed;
+ dst->mpdus_expired = src->mpdus_expired;
}
static void ath11k_wmi_pull_pdev_stats_rx(const struct wmi_pdev_stats_rx *src,
@@ -5266,6 +5299,7 @@ static void ath11k_wmi_pull_pdev_stats_rx(const struct wmi_pdev_stats_rx *src,
dst->phy_errs = src->phy_errs;
dst->phy_err_drop = src->phy_err_drop;
dst->mpdu_errs = src->mpdu_errs;
+ dst->rx_ovfl_errs = src->rx_ovfl_errs;
}
static void
@@ -5503,11 +5537,15 @@ ath11k_wmi_fw_pdev_tx_stats_fill(const struct ath11k_fw_stats_pdev *pdev,
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"Num underruns", pdev->underrun);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Num HW Paused", pdev->hw_paused);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"PPDUs cleaned", pdev->tx_abort);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"MPDUs requeued", pdev->mpdus_requeued);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
- "Excessive retries", pdev->tx_ko);
+ "PPDU OK", pdev->tx_ko);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Excessive retries", pdev->tx_xretry);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
"HW rate", pdev->data_rc);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
@@ -5531,6 +5569,26 @@ ath11k_wmi_fw_pdev_tx_stats_fill(const struct ath11k_fw_stats_pdev *pdev,
"PHY underrun", pdev->phy_underrun);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
"MPDU is more than txop limit", pdev->txop_ovf);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Num sequences posted", pdev->seq_posted);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Num seq failed queueing ", pdev->seq_failed_queueing);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Num sequences completed ", pdev->seq_completed);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Num sequences restarted ", pdev->seq_restarted);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Num of MU sequences posted ", pdev->mu_seq_posted);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Num of MPDUS SW flushed ", pdev->mpdus_sw_flush);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Num of MPDUS HW filtered ", pdev->mpdus_hw_filter);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Num of MPDUS truncated ", pdev->mpdus_truncated);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Num of MPDUS ACK failed ", pdev->mpdus_ack_failed);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+ "Num of MPDUS expired ", pdev->mpdus_expired);
*length = len;
}
@@ -5575,6 +5633,8 @@ ath11k_wmi_fw_pdev_rx_stats_fill(const struct ath11k_fw_stats_pdev *pdev,
"PHY errors drops", pdev->phy_err_drop);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"MPDU errors (FCS, MIC, ENC)", pdev->mpdu_errs);
+ len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+ "Overflow errors", pdev->rx_ovfl_errs);
*length = len;
}
@@ -5792,6 +5852,17 @@ static int ath11k_reg_chan_list_event(struct ath11k_base *ab, struct sk_buff *sk
pdev_idx = reg_info->phy_id;
+ /* Avoid default reg rule updates sent during FW recovery if
+ * it is already available
+ */
+ spin_lock(&ab->base_lock);
+ if (test_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags) &&
+ ab->default_regd[pdev_idx]) {
+ spin_unlock(&ab->base_lock);
+ goto mem_free;
+ }
+ spin_unlock(&ab->base_lock);
+
if (pdev_idx >= ab->num_radios) {
/* Process the event for phy0 only if single_pdev_only
* is true. If pdev_idx is valid but not 0, discard the
@@ -5829,10 +5900,10 @@ static int ath11k_reg_chan_list_event(struct ath11k_base *ab, struct sk_buff *sk
}
spin_lock(&ab->base_lock);
- if (test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags)) {
- /* Once mac is registered, ar is valid and all CC events from
- * fw is considered to be received due to user requests
- * currently.
+ if (ab->default_regd[pdev_idx]) {
+ /* The initial rules from FW after WMI Init is to build
+ * the default regd. From then on, any rules updated for
+ * the pdev could be due to user reg changes.
* Free previously built regd before assigning the newly
* generated regd to ar. NULL pointer handling will be
* taken care by kfree itself.
@@ -5842,13 +5913,9 @@ static int ath11k_reg_chan_list_event(struct ath11k_base *ab, struct sk_buff *sk
ab->new_regd[pdev_idx] = regd;
ieee80211_queue_work(ar->hw, &ar->regd_update_work);
} else {
- /* Multiple events for the same *ar is not expected. But we
- * can still clear any previously stored default_regd if we
- * are receiving this event for the same radio by mistake.
- * NULL pointer handling will be taken care by kfree itself.
+ /* This regd would be applied during mac registration and is
+ * held constant throughout for regd intersection purpose
*/
- kfree(ab->default_regd[pdev_idx]);
- /* This regd would be applied during mac registration */
ab->default_regd[pdev_idx] = regd;
}
ab->dfs_region = reg_info->dfs_region;
@@ -6119,8 +6186,10 @@ static void ath11k_mgmt_rx_event(struct ath11k_base *ab, struct sk_buff *skb)
if (rx_ev.status & WMI_RX_STATUS_ERR_MIC)
status->flag |= RX_FLAG_MMIC_ERROR;
- if (rx_ev.chan_freq >= ATH11K_MIN_6G_FREQ) {
+ if (rx_ev.chan_freq >= ATH11K_MIN_6G_FREQ &&
+ rx_ev.chan_freq <= ATH11K_MAX_6G_FREQ) {
status->band = NL80211_BAND_6GHZ;
+ status->freq = rx_ev.chan_freq;
} else if (rx_ev.channel >= 1 && rx_ev.channel <= 14) {
status->band = NL80211_BAND_2GHZ;
} else if (rx_ev.channel >= 36 && rx_ev.channel <= ATH11K_MAX_5G_CHAN) {
@@ -6141,8 +6210,10 @@ static void ath11k_mgmt_rx_event(struct ath11k_base *ab, struct sk_buff *skb)
sband = &ar->mac.sbands[status->band];
- status->freq = ieee80211_channel_to_frequency(rx_ev.channel,
- status->band);
+ if (status->band != NL80211_BAND_6GHZ)
+ status->freq = ieee80211_channel_to_frequency(rx_ev.channel,
+ status->band);
+
status->signal = rx_ev.snr + ATH11K_DEFAULT_NOISE_FLOOR;
status->rate_idx = ath11k_mac_bitrate_to_idx(sband, rx_ev.rate / 100);
@@ -6220,8 +6291,9 @@ exit:
rcu_read_unlock();
}
-static struct ath11k *ath11k_get_ar_on_scan_abort(struct ath11k_base *ab,
- u32 vdev_id)
+static struct ath11k *ath11k_get_ar_on_scan_state(struct ath11k_base *ab,
+ u32 vdev_id,
+ enum ath11k_scan_state state)
{
int i;
struct ath11k_pdev *pdev;
@@ -6233,7 +6305,7 @@ static struct ath11k *ath11k_get_ar_on_scan_abort(struct ath11k_base *ab,
ar = pdev->ar;
spin_lock_bh(&ar->data_lock);
- if (ar->scan.state == ATH11K_SCAN_ABORTING &&
+ if (ar->scan.state == state &&
ar->scan.vdev_id == vdev_id) {
spin_unlock_bh(&ar->data_lock);
return ar;
@@ -6263,10 +6335,15 @@ static void ath11k_scan_event(struct ath11k_base *ab, struct sk_buff *skb)
* aborting scan's vdev id matches this event info.
*/
if (scan_ev.event_type == WMI_SCAN_EVENT_COMPLETED &&
- scan_ev.reason == WMI_SCAN_REASON_CANCELLED)
- ar = ath11k_get_ar_on_scan_abort(ab, scan_ev.vdev_id);
- else
+ scan_ev.reason == WMI_SCAN_REASON_CANCELLED) {
+ ar = ath11k_get_ar_on_scan_state(ab, scan_ev.vdev_id,
+ ATH11K_SCAN_ABORTING);
+ if (!ar)
+ ar = ath11k_get_ar_on_scan_state(ab, scan_ev.vdev_id,
+ ATH11K_SCAN_RUNNING);
+ } else {
ar = ath11k_mac_get_ar_by_vdev_id(ab, scan_ev.vdev_id);
+ }
if (!ar) {
ath11k_warn(ab, "Received scan event for unknown vdev");
@@ -6301,6 +6378,8 @@ static void ath11k_scan_event(struct ath11k_base *ab, struct sk_buff *skb)
ath11k_wmi_event_scan_start_failed(ar);
break;
case WMI_SCAN_EVENT_DEQUEUED:
+ __ath11k_mac_scan_finish(ar);
+ break;
case WMI_SCAN_EVENT_PREEMPTED:
case WMI_SCAN_EVENT_RESTARTED:
case WMI_SCAN_EVENT_FOREIGN_CHAN_EXIT:
@@ -7065,6 +7144,7 @@ static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb)
case WMI_TWT_ENABLE_EVENTID:
case WMI_TWT_DISABLE_EVENTID:
case WMI_PDEV_DMA_RING_CFG_RSP_EVENTID:
+ case WMI_PEER_CREATE_CONF_EVENTID:
ath11k_dbg(ab, ATH11K_DBG_WMI,
"ignoring unsupported event 0x%x\n", id);
break;
diff --git a/drivers/net/wireless/ath/ath11k/wmi.h b/drivers/net/wireless/ath/ath11k/wmi.h
index d35c47e0b19d..0584e68e7593 100644
--- a/drivers/net/wireless/ath/ath11k/wmi.h
+++ b/drivers/net/wireless/ath/ath11k/wmi.h
@@ -119,6 +119,22 @@ enum {
WMI_HOST_WLAN_2G_5G_CAP = 0x3,
};
+/* Parameters used for WMI_VDEV_PARAM_AUTORATE_MISC_CFG command.
+ * Used only for HE auto rate mode.
+ */
+enum {
+ /* HE LTF related configuration */
+ WMI_HE_AUTORATE_LTF_1X = BIT(0),
+ WMI_HE_AUTORATE_LTF_2X = BIT(1),
+ WMI_HE_AUTORATE_LTF_4X = BIT(2),
+
+ /* HE GI related configuration */
+ WMI_AUTORATE_400NS_GI = BIT(8),
+ WMI_AUTORATE_800NS_GI = BIT(9),
+ WMI_AUTORATE_1600NS_GI = BIT(10),
+ WMI_AUTORATE_3200NS_GI = BIT(11),
+};
+
/*
* wmi command groups.
*/
@@ -647,6 +663,9 @@ enum wmi_tlv_event_id {
WMI_PEER_RESERVED9_EVENTID,
WMI_PEER_RESERVED10_EVENTID,
WMI_PEER_OPER_MODE_CHANGE_EVENTID,
+ WMI_PEER_TX_PN_RESPONSE_EVENTID,
+ WMI_PEER_CFR_CAPTURE_EVENTID,
+ WMI_PEER_CREATE_CONF_EVENTID,
WMI_MGMT_RX_EVENTID = WMI_TLV_CMD(WMI_GRP_MGMT),
WMI_HOST_SWBA_EVENTID,
WMI_TBTTOFFSET_UPDATE_EVENTID,
@@ -1044,7 +1063,9 @@ enum wmi_tlv_vdev_param {
WMI_VDEV_PARAM_HE_RANGE_EXT,
WMI_VDEV_PARAM_ENABLE_BCAST_PROBE_RESPONSE,
WMI_VDEV_PARAM_FILS_MAX_CHANNEL_GUARD_TIME,
+ WMI_VDEV_PARAM_HE_LTF = 0x74,
WMI_VDEV_PARAM_BA_MODE = 0x7e,
+ WMI_VDEV_PARAM_AUTORATE_MISC_CFG = 0x80,
WMI_VDEV_PARAM_SET_HE_SOUNDING_MODE = 0x87,
WMI_VDEV_PARAM_6GHZ_PARAMS = 0x99,
WMI_VDEV_PARAM_PROTOTYPE = 0x8000,
@@ -2128,6 +2149,24 @@ enum wmi_direct_buffer_module {
WMI_DIRECT_BUF_MAX
};
+/* enum wmi_nss_ratio - NSS ratio received from FW during service ready ext
+ * event
+ * WMI_NSS_RATIO_1BY2_NSS -Max nss of 160MHz is equals to half of the max nss
+ * of 80MHz
+ * WMI_NSS_RATIO_3BY4_NSS - Max nss of 160MHz is equals to 3/4 of the max nss
+ * of 80MHz
+ * WMI_NSS_RATIO_1_NSS - Max nss of 160MHz is equals to the max nss of 80MHz
+ * WMI_NSS_RATIO_2_NSS - Max nss of 160MHz is equals to two times the max
+ * nss of 80MHz
+ */
+
+enum wmi_nss_ratio {
+ WMI_NSS_RATIO_1BY2_NSS = 0x0,
+ WMI_NSS_RATIO_3BY4_NSS = 0x1,
+ WMI_NSS_RATIO_1_NSS = 0x2,
+ WMI_NSS_RATIO_2_NSS = 0x3,
+};
+
struct wmi_host_pdev_band_to_mac {
u32 pdev_id;
u32 start_freq;
@@ -2244,6 +2283,8 @@ struct wmi_init_cmd {
u32 num_host_mem_chunks;
} __packed;
+#define WMI_RSRC_CFG_FLAG1_BSS_CHANNEL_INFO_64 BIT(5)
+
struct wmi_resource_config {
u32 tlv_header;
u32 num_vdevs;
@@ -2370,6 +2411,12 @@ struct wmi_hw_mode_capabilities {
} __packed;
#define WMI_MAX_HECAP_PHY_SIZE (3)
+#define WMI_NSS_RATIO_ENABLE_DISABLE_BITPOS BIT(0)
+#define WMI_NSS_RATIO_ENABLE_DISABLE_GET(_val) \
+ FIELD_GET(WMI_NSS_RATIO_ENABLE_DISABLE_BITPOS, _val)
+#define WMI_NSS_RATIO_INFO_BITPOS GENMASK(4, 1)
+#define WMI_NSS_RATIO_INFO_GET(_val) \
+ FIELD_GET(WMI_NSS_RATIO_INFO_BITPOS, _val)
struct wmi_mac_phy_capabilities {
u32 hw_mode_id;
@@ -2403,6 +2450,12 @@ struct wmi_mac_phy_capabilities {
u32 he_cap_info_2g_ext;
u32 he_cap_info_5g_ext;
u32 he_cap_info_internal;
+ u32 wireless_modes;
+ u32 low_2ghz_chan_freq;
+ u32 high_2ghz_chan_freq;
+ u32 low_5ghz_chan_freq;
+ u32 high_5ghz_chan_freq;
+ u32 nss_ratio;
} __packed;
struct wmi_hal_reg_capabilities_ext {
@@ -2527,6 +2580,7 @@ struct wmi_vdev_down_cmd {
#define WMI_VDEV_START_HIDDEN_SSID BIT(0)
#define WMI_VDEV_START_PMF_ENABLED BIT(1)
#define WMI_VDEV_START_LDPC_RX_ENABLED BIT(3)
+#define WMI_VDEV_START_HW_ENCRYPTION_DISABLED BIT(4)
struct wmi_ssid {
u32 ssid_len;
@@ -2960,6 +3014,7 @@ struct wmi_pdev_bss_chan_info_req_cmd {
u32 tlv_header;
/* ref wmi_bss_chan_info_req_type */
u32 req_type;
+ u32 pdev_id;
} __packed;
struct wmi_ap_ps_peer_cmd {
@@ -3608,7 +3663,7 @@ struct wmi_stop_scan_cmd {
struct scan_chan_list_params {
u32 pdev_id;
u16 nallchans;
- struct channel_param ch_param[1];
+ struct channel_param ch_param[];
};
struct wmi_scan_chan_list_cmd {
@@ -3917,7 +3972,11 @@ struct wmi_vht_rate_set {
struct wmi_he_rate_set {
u32 tlv_header;
+
+ /* MCS at which the peer can receive */
u32 rx_mcs_set;
+
+ /* MCS at which the peer can transmit */
u32 tx_mcs_set;
} __packed;
@@ -4056,7 +4115,6 @@ struct wmi_vdev_stopped_event {
} __packed;
struct wmi_pdev_bss_chan_info_event {
- u32 pdev_id;
u32 freq; /* Units in MHz */
u32 noise_floor; /* units are dBm */
/* rx clear - how often the channel was unused */
@@ -4074,6 +4132,7 @@ struct wmi_pdev_bss_chan_info_event {
/*rx_cycle cnt for my bss in 64bits format */
u32 rx_bss_cycle_count_low;
u32 rx_bss_cycle_count_high;
+ u32 pdev_id;
} __packed;
#define WMI_VDEV_INSTALL_KEY_COMPL_STATUS_SUCCESS 0
@@ -4168,6 +4227,9 @@ struct wmi_pdev_stats_tx {
/* Num underruns */
s32 underrun;
+ /* Num hw paused */
+ u32 hw_paused;
+
/* Num PPDUs cleaned up in TX abort */
s32 tx_abort;
@@ -4177,6 +4239,8 @@ struct wmi_pdev_stats_tx {
/* excessive retries */
u32 tx_ko;
+ u32 tx_xretry;
+
/* data hw rate code */
u32 data_rc;
@@ -4206,6 +4270,40 @@ struct wmi_pdev_stats_tx {
/* MPDU is more than txop limit */
u32 txop_ovf;
+
+ /* Num sequences posted */
+ u32 seq_posted;
+
+ /* Num sequences failed in queueing */
+ u32 seq_failed_queueing;
+
+ /* Num sequences completed */
+ u32 seq_completed;
+
+ /* Num sequences restarted */
+ u32 seq_restarted;
+
+ /* Num of MU sequences posted */
+ u32 mu_seq_posted;
+
+ /* Num MPDUs flushed by SW, HWPAUSED, SW TXABORT
+ * (Reset,channel change)
+ */
+ s32 mpdus_sw_flush;
+
+ /* Num MPDUs filtered by HW, all filter condition (TTL expired) */
+ s32 mpdus_hw_filter;
+
+ /* Num MPDUs truncated by PDG (TXOP, TBTT,
+ * PPDU_duration based on rate, dyn_bw)
+ */
+ s32 mpdus_truncated;
+
+ /* Num MPDUs that was tried but didn't receive ACK or BA */
+ s32 mpdus_ack_failed;
+
+ /* Num MPDUs that was dropped du to expiry. */
+ s32 mpdus_expired;
} __packed;
struct wmi_pdev_stats_rx {
@@ -4240,6 +4338,9 @@ struct wmi_pdev_stats_rx {
/* Number of mpdu errors - FCS, MIC, ENC etc. */
s32 mpdu_errs;
+
+ /* Num overflow errors */
+ s32 rx_ovfl_errs;
} __packed;
struct wmi_pdev_stats {
@@ -5014,7 +5115,7 @@ struct target_resource_config {
u32 vo_minfree;
u32 rx_batchmode;
u32 tt_support;
- u32 atf_config;
+ u32 flag1;
u32 iphdr_pad_config;
u32 qwrap_config:16,
alloc_frag_desc_for_data_pkt:16;
diff --git a/drivers/net/wireless/ath/ath5k/sysfs.c b/drivers/net/wireless/ath/ath5k/sysfs.c
index 8113baddd8fc..37bf64106473 100644
--- a/drivers/net/wireless/ath/ath5k/sysfs.c
+++ b/drivers/net/wireless/ath/ath5k/sysfs.c
@@ -14,7 +14,7 @@ static ssize_t ath5k_attr_show_##name(struct device *dev, \
{ \
struct ieee80211_hw *hw = dev_get_drvdata(dev); \
struct ath5k_hw *ah = hw->priv; \
- return snprintf(buf, PAGE_SIZE, "%d\n", get); \
+ return sysfs_emit(buf, "%d\n", get); \
} \
\
static ssize_t ath5k_attr_store_##name(struct device *dev, \
@@ -41,7 +41,7 @@ static ssize_t ath5k_attr_show_##name(struct device *dev, \
{ \
struct ieee80211_hw *hw = dev_get_drvdata(dev); \
struct ath5k_hw *ah = hw->priv; \
- return snprintf(buf, PAGE_SIZE, "%d\n", get); \
+ return sysfs_emit(buf, "%d\n", get); \
} \
static DEVICE_ATTR(name, 0444, ath5k_attr_show_##name, NULL)
@@ -64,7 +64,7 @@ static ssize_t ath5k_attr_show_noise_immunity_level_max(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%d\n", ATH5K_ANI_MAX_NOISE_IMM_LVL);
+ return sysfs_emit(buf, "%d\n", ATH5K_ANI_MAX_NOISE_IMM_LVL);
}
static DEVICE_ATTR(noise_immunity_level_max, 0444,
ath5k_attr_show_noise_immunity_level_max, NULL);
@@ -73,7 +73,7 @@ static ssize_t ath5k_attr_show_firstep_level_max(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%d\n", ATH5K_ANI_MAX_FIRSTEP_LVL);
+ return sysfs_emit(buf, "%d\n", ATH5K_ANI_MAX_FIRSTEP_LVL);
}
static DEVICE_ATTR(firstep_level_max, 0444,
ath5k_attr_show_firstep_level_max, NULL);
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index fefdc6753acd..bd1183830e91 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -3781,6 +3781,7 @@ struct wireless_dev *ath6kl_interface_add(struct ath6kl *ar, const char *name,
{
struct net_device *ndev;
struct ath6kl_vif *vif;
+ u8 addr[ETH_ALEN];
ndev = alloc_netdev(sizeof(*vif), name, name_assign_type, ether_setup);
if (!ndev)
@@ -3803,14 +3804,14 @@ struct wireless_dev *ath6kl_interface_add(struct ath6kl *ar, const char *name,
vif->htcap[NL80211_BAND_2GHZ].ht_enable = true;
vif->htcap[NL80211_BAND_5GHZ].ht_enable = true;
- memcpy(ndev->dev_addr, ar->mac_addr, ETH_ALEN);
+ ether_addr_copy(addr, ar->mac_addr);
if (fw_vif_idx != 0) {
- ndev->dev_addr[0] = (ndev->dev_addr[0] ^ (1 << fw_vif_idx)) |
- 0x2;
+ addr[0] = (addr[0] ^ (1 << fw_vif_idx)) | 0x2;
if (test_bit(ATH6KL_FW_CAPABILITY_CUSTOM_MAC_ADDR,
ar->fw_capabilities))
- ndev->dev_addr[4] ^= 0x80;
+ addr[4] ^= 0x80;
}
+ eth_hw_addr_set(ndev, addr);
init_netdev(ndev);
diff --git a/drivers/net/wireless/ath/ath6kl/usb.c b/drivers/net/wireless/ath/ath6kl/usb.c
index 5372e948e761..aba70f35e574 100644
--- a/drivers/net/wireless/ath/ath6kl/usb.c
+++ b/drivers/net/wireless/ath/ath6kl/usb.c
@@ -340,6 +340,11 @@ static int ath6kl_usb_setup_pipe_resources(struct ath6kl_usb *ar_usb)
le16_to_cpu(endpoint->wMaxPacketSize),
endpoint->bInterval);
}
+
+ /* Ignore broken descriptors. */
+ if (usb_endpoint_maxp(endpoint) == 0)
+ continue;
+
urbcount = 0;
pipe_num =
@@ -907,7 +912,7 @@ static int ath6kl_usb_submit_ctrl_in(struct ath6kl_usb *ar_usb,
req,
USB_DIR_IN | USB_TYPE_VENDOR |
USB_RECIP_DEVICE, value, index, buf,
- size, 2 * HZ);
+ size, 2000);
if (ret < 0) {
ath6kl_warn("Failed to read usb control message: %d\n", ret);
diff --git a/drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c b/drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c
index 56d1a7764b9f..708c8969b503 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c
+++ b/drivers/net/wireless/ath/ath9k/ath9k_pci_owl_loader.c
@@ -19,9 +19,14 @@
#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/ath9k_platform.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/workqueue.h>
struct owl_ctx {
+ struct pci_dev *pdev;
struct completion eeprom_load;
+ struct work_struct work;
+ struct nvmem_cell *cell;
};
#define EEPROM_FILENAME_LEN 100
@@ -42,6 +47,12 @@ static int ath9k_pci_fixup(struct pci_dev *pdev, const u16 *cal_data,
u32 bar0;
bool swap_needed = false;
+ /* also note that we are doing *u16 operations on the file */
+ if (cal_len > 4096 || cal_len < 0x200 || (cal_len & 1) == 1) {
+ dev_err(&pdev->dev, "eeprom has an invalid size.\n");
+ return -EINVAL;
+ }
+
if (*cal_data != AR5416_EEPROM_MAGIC) {
if (*cal_data != swab16(AR5416_EEPROM_MAGIC)) {
dev_err(&pdev->dev, "invalid calibration data\n");
@@ -99,38 +110,31 @@ static int ath9k_pci_fixup(struct pci_dev *pdev, const u16 *cal_data,
return 0;
}
-static void owl_fw_cb(const struct firmware *fw, void *context)
+static void owl_rescan(struct pci_dev *pdev)
{
- struct pci_dev *pdev = (struct pci_dev *)context;
- struct owl_ctx *ctx = (struct owl_ctx *)pci_get_drvdata(pdev);
- struct pci_bus *bus;
-
- complete(&ctx->eeprom_load);
-
- if (!fw) {
- dev_err(&pdev->dev, "no eeprom data received.\n");
- goto release;
- }
-
- /* also note that we are doing *u16 operations on the file */
- if (fw->size > 4096 || fw->size < 0x200 || (fw->size & 1) == 1) {
- dev_err(&pdev->dev, "eeprom file has an invalid size.\n");
- goto release;
- }
-
- if (ath9k_pci_fixup(pdev, (const u16 *)fw->data, fw->size))
- goto release;
+ struct pci_bus *bus = pdev->bus;
pci_lock_rescan_remove();
- bus = pdev->bus;
pci_stop_and_remove_bus_device(pdev);
/* the device should come back with the proper
* ProductId. But we have to initiate a rescan.
*/
pci_rescan_bus(bus);
pci_unlock_rescan_remove();
+}
+
+static void owl_fw_cb(const struct firmware *fw, void *context)
+{
+ struct owl_ctx *ctx = (struct owl_ctx *)context;
+
+ complete(&ctx->eeprom_load);
-release:
+ if (fw) {
+ ath9k_pci_fixup(ctx->pdev, (const u16 *)fw->data, fw->size);
+ owl_rescan(ctx->pdev);
+ } else {
+ dev_err(&ctx->pdev->dev, "no eeprom data received.\n");
+ }
release_firmware(fw);
}
@@ -152,6 +156,43 @@ static const char *owl_get_eeprom_name(struct pci_dev *pdev)
return eeprom_name;
}
+static void owl_nvmem_work(struct work_struct *work)
+{
+ struct owl_ctx *ctx = container_of(work, struct owl_ctx, work);
+ void *buf;
+ size_t len;
+
+ complete(&ctx->eeprom_load);
+
+ buf = nvmem_cell_read(ctx->cell, &len);
+ if (!IS_ERR(buf)) {
+ ath9k_pci_fixup(ctx->pdev, buf, len);
+ kfree(buf);
+ owl_rescan(ctx->pdev);
+ } else {
+ dev_err(&ctx->pdev->dev, "no nvmem data received.\n");
+ }
+}
+
+static int owl_nvmem_probe(struct owl_ctx *ctx)
+{
+ int err;
+
+ ctx->cell = devm_nvmem_cell_get(&ctx->pdev->dev, "calibration");
+ if (IS_ERR(ctx->cell)) {
+ err = PTR_ERR(ctx->cell);
+ if (err == -ENOENT || err == -EOPNOTSUPP)
+ return 1; /* not present, try firmware_request */
+
+ return err;
+ }
+
+ INIT_WORK(&ctx->work, owl_nvmem_work);
+ schedule_work(&ctx->work);
+
+ return 0;
+}
+
static int owl_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
@@ -164,21 +205,27 @@ static int owl_probe(struct pci_dev *pdev,
pcim_pin_device(pdev);
- eeprom_name = owl_get_eeprom_name(pdev);
- if (!eeprom_name) {
- dev_err(&pdev->dev, "no eeprom filename found.\n");
- return -ENODEV;
- }
-
ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
init_completion(&ctx->eeprom_load);
+ ctx->pdev = pdev;
pci_set_drvdata(pdev, ctx);
+
+ err = owl_nvmem_probe(ctx);
+ if (err <= 0)
+ return err;
+
+ eeprom_name = owl_get_eeprom_name(pdev);
+ if (!eeprom_name) {
+ dev_err(&pdev->dev, "no eeprom filename found.\n");
+ return -ENODEV;
+ }
+
err = request_firmware_nowait(THIS_MODULE, true, eeprom_name,
- &pdev->dev, GFP_KERNEL, pdev, owl_fw_cb);
+ &pdev->dev, GFP_KERNEL, ctx, owl_fw_cb);
if (err)
dev_err(&pdev->dev, "failed to request caldata (%d).\n", err);
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 4c81b1d7f417..fb7a2952d0ce 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -749,9 +749,9 @@ static int read_file_misc(struct seq_file *file, void *data)
static int read_file_reset(struct seq_file *file, void *data)
{
- struct ieee80211_hw *hw = dev_get_drvdata(file->private);
- struct ath_softc *sc = hw->priv;
+ struct ath_softc *sc = file->private;
static const char * const reset_cause[__RESET_TYPE_MAX] = {
+ [RESET_TYPE_USER] = "User reset",
[RESET_TYPE_BB_HANG] = "Baseband Hang",
[RESET_TYPE_BB_WATCHDOG] = "Baseband Watchdog",
[RESET_TYPE_FATAL_INT] = "Fatal HW Error",
@@ -779,6 +779,55 @@ static int read_file_reset(struct seq_file *file, void *data)
return 0;
}
+static int open_file_reset(struct inode *inode, struct file *f)
+{
+ return single_open(f, read_file_reset, inode->i_private);
+}
+
+static ssize_t write_file_reset(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file_inode(file)->i_private;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ unsigned long val;
+ char buf[32];
+ ssize_t len;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+
+ if (val != 1)
+ return -EINVAL;
+
+ /* avoid rearming hw_reset_work on shutdown */
+ mutex_lock(&sc->mutex);
+ if (test_bit(ATH_OP_INVALID, &common->op_flags)) {
+ mutex_unlock(&sc->mutex);
+ return -EBUSY;
+ }
+
+ ath9k_queue_reset(sc, RESET_TYPE_USER);
+ mutex_unlock(&sc->mutex);
+
+ return count;
+}
+
+static const struct file_operations fops_reset = {
+ .read = seq_read,
+ .write = write_file_reset,
+ .open = open_file_reset,
+ .owner = THIS_MODULE,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf,
struct ath_tx_status *ts, struct ath_txq *txq,
unsigned int flags)
@@ -1393,8 +1442,8 @@ int ath9k_init_debug(struct ath_hw *ah)
read_file_queues);
debugfs_create_devm_seqfile(sc->dev, "misc", sc->debug.debugfs_phy,
read_file_misc);
- debugfs_create_devm_seqfile(sc->dev, "reset", sc->debug.debugfs_phy,
- read_file_reset);
+ debugfs_create_file("reset", 0600, sc->debug.debugfs_phy,
+ sc, &fops_reset);
ath9k_cmn_debug_recv(sc->debug.debugfs_phy, &sc->debug.stats.rxstats);
ath9k_cmn_debug_phy_err(sc->debug.debugfs_phy, &sc->debug.stats.rxstats);
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 33826aa13687..389459c04d14 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -39,6 +39,7 @@ struct fft_sample_tlv;
#endif
enum ath_reset_type {
+ RESET_TYPE_USER,
RESET_TYPE_BB_HANG,
RESET_TYPE_BB_WATCHDOG,
RESET_TYPE_FATAL_INT,
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c
index c22d457dbc54..e6b3cd49ea18 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom.c
@@ -135,13 +135,23 @@ static bool ath9k_hw_nvram_read_firmware(const struct firmware *eeprom_blob,
offset, data);
}
+static bool ath9k_hw_nvram_read_nvmem(struct ath_hw *ah, off_t offset,
+ u16 *data)
+{
+ return ath9k_hw_nvram_read_array(ah->nvmem_blob,
+ ah->nvmem_blob_len / sizeof(u16),
+ offset, data);
+}
+
bool ath9k_hw_nvram_read(struct ath_hw *ah, u32 off, u16 *data)
{
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_platform_data *pdata = ah->dev->platform_data;
bool ret;
- if (ah->eeprom_blob)
+ if (ah->nvmem_blob)
+ ret = ath9k_hw_nvram_read_nvmem(ah, off, data);
+ else if (ah->eeprom_blob)
ret = ath9k_hw_nvram_read_firmware(ah->eeprom_blob, off, data);
else if (pdata && !pdata->use_eeprom)
ret = ath9k_hw_nvram_read_pdata(pdata, off, data);
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index b7b65b1c90e8..096a206f49ed 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -977,6 +977,8 @@ struct ath_hw {
bool disable_5ghz;
const struct firmware *eeprom_blob;
+ u16 *nvmem_blob; /* devres managed */
+ size_t nvmem_blob_len;
struct ath_dynack dynack;
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index e9a36dd7144f..4f00400c7ffb 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -22,6 +22,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_net.h>
+#include <linux/nvmem-consumer.h>
#include <linux/relay.h>
#include <linux/dmi.h>
#include <net/ieee80211_radiotap.h>
@@ -568,6 +569,57 @@ static void ath9k_eeprom_release(struct ath_softc *sc)
release_firmware(sc->sc_ah->eeprom_blob);
}
+static int ath9k_nvmem_request_eeprom(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct nvmem_cell *cell;
+ void *buf;
+ size_t len;
+ int err;
+
+ cell = devm_nvmem_cell_get(sc->dev, "calibration");
+ if (IS_ERR(cell)) {
+ err = PTR_ERR(cell);
+
+ /* nvmem cell might not be defined, or the nvmem
+ * subsystem isn't included. In this case, follow
+ * the established "just return 0;" convention of
+ * ath9k_init_platform to say:
+ * "All good. Nothing to see here. Please go on."
+ */
+ if (err == -ENOENT || err == -EOPNOTSUPP)
+ return 0;
+
+ return err;
+ }
+
+ buf = nvmem_cell_read(cell, &len);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ /* run basic sanity checks on the returned nvram cell length.
+ * That length has to be a multiple of a "u16" (i.e.: & 1).
+ * Furthermore, it has to be more than "let's say" 512 bytes
+ * but less than the maximum of AR9300_EEPROM_SIZE (16kb).
+ */
+ if ((len & 1) == 1 || len < 512 || len >= AR9300_EEPROM_SIZE) {
+ kfree(buf);
+ return -EINVAL;
+ }
+
+ /* devres manages the calibration values release on shutdown */
+ ah->nvmem_blob = (u16 *)devm_kmemdup(sc->dev, buf, len, GFP_KERNEL);
+ kfree(buf);
+ if (!ah->nvmem_blob)
+ return -ENOMEM;
+
+ ah->nvmem_blob_len = len;
+ ah->ah_flags &= ~AH_USE_EEPROM;
+ ah->ah_flags |= AH_NO_EEP_SWAP;
+
+ return 0;
+}
+
static int ath9k_init_platform(struct ath_softc *sc)
{
struct ath9k_platform_data *pdata = sc->dev->platform_data;
@@ -704,6 +756,10 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc,
if (ret)
return ret;
+ ret = ath9k_nvmem_request_eeprom(sc);
+ if (ret)
+ return ret;
+
if (ath9k_led_active_high != -1)
ah->config.led_active_high = ath9k_led_active_high == 1;
@@ -1038,6 +1094,8 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc,
ARRAY_SIZE(ath9k_tpt_blink));
#endif
+ wiphy_read_of_freq_limits(hw->wiphy);
+
/* Register with mac80211 */
error = ieee80211_register_hw(hw);
if (error)
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 139831539da3..98090e40e1cf 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -533,8 +533,10 @@ irqreturn_t ath_isr(int irq, void *dev)
ath9k_debug_sync_cause(sc, sync_cause);
status &= ah->imask; /* discard unasked-for bits */
- if (test_bit(ATH_OP_HW_RESET, &common->op_flags))
+ if (test_bit(ATH_OP_HW_RESET, &common->op_flags)) {
+ ath9k_hw_kill_interrupts(sc->sc_ah);
return IRQ_HANDLED;
+ }
/*
* If there are no status bits set, then this interrupt was not
diff --git a/drivers/net/wireless/ath/dfs_pattern_detector.c b/drivers/net/wireless/ath/dfs_pattern_detector.c
index 80390495ea25..75cb53a3ec15 100644
--- a/drivers/net/wireless/ath/dfs_pattern_detector.c
+++ b/drivers/net/wireless/ath/dfs_pattern_detector.c
@@ -183,10 +183,12 @@ static void channel_detector_exit(struct dfs_pattern_detector *dpd,
if (cd == NULL)
return;
list_del(&cd->head);
- for (i = 0; i < dpd->num_radar_types; i++) {
- struct pri_detector *de = cd->detectors[i];
- if (de != NULL)
- de->exit(de);
+ if (cd->detectors) {
+ for (i = 0; i < dpd->num_radar_types; i++) {
+ struct pri_detector *de = cd->detectors[i];
+ if (de != NULL)
+ de->exit(de);
+ }
}
kfree(cd->detectors);
kfree(cd);
diff --git a/drivers/net/wireless/ath/spectral_common.h b/drivers/net/wireless/ath/spectral_common.h
index 9c2e5458e425..e14f374f97d4 100644
--- a/drivers/net/wireless/ath/spectral_common.h
+++ b/drivers/net/wireless/ath/spectral_common.h
@@ -24,7 +24,6 @@
* could be acquired so far.
*/
#define SPECTRAL_ATH10K_MAX_NUM_BINS 256
-#define SPECTRAL_ATH11K_MAX_NUM_BINS 512
/* FFT sample format given to userspace via debugfs.
*
diff --git a/drivers/net/wireless/ath/wcn36xx/debug.c b/drivers/net/wireless/ath/wcn36xx/debug.c
index 389b5e7129a6..6af306ae41ad 100644
--- a/drivers/net/wireless/ath/wcn36xx/debug.c
+++ b/drivers/net/wireless/ath/wcn36xx/debug.c
@@ -120,7 +120,7 @@ static ssize_t write_file_dump(struct file *file,
if (begin == NULL)
break;
- if (kstrtou32(begin, 0, &arg[i]) != 0)
+ if (kstrtos32(begin, 0, &arg[i]) != 0)
break;
}
diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.c b/drivers/net/wireless/ath/wcn36xx/dxe.c
index 8e1dbfda6538..aff04ef66266 100644
--- a/drivers/net/wireless/ath/wcn36xx/dxe.c
+++ b/drivers/net/wireless/ath/wcn36xx/dxe.c
@@ -403,8 +403,21 @@ static void reap_tx_dxes(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch)
dma_unmap_single(wcn->dev, ctl->desc->src_addr_l,
ctl->skb->len, DMA_TO_DEVICE);
info = IEEE80211_SKB_CB(ctl->skb);
- if (!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)) {
- /* Keep frame until TX status comes */
+ if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) {
+ if (info->flags & IEEE80211_TX_CTL_NO_ACK) {
+ info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
+ ieee80211_tx_status_irqsafe(wcn->hw, ctl->skb);
+ } else {
+ /* Wait for the TX ack indication or timeout... */
+ spin_lock(&wcn->dxe_lock);
+ if (WARN_ON(wcn->tx_ack_skb))
+ ieee80211_free_txskb(wcn->hw, wcn->tx_ack_skb);
+ wcn->tx_ack_skb = ctl->skb; /* Tracking ref */
+ mod_timer(&wcn->tx_ack_timer, jiffies + HZ / 10);
+ spin_unlock(&wcn->dxe_lock);
+ }
+ /* do not free, ownership transferred to mac80211 status cb */
+ } else {
ieee80211_free_txskb(wcn->hw, ctl->skb);
}
@@ -426,7 +439,6 @@ static irqreturn_t wcn36xx_irq_tx_complete(int irq, void *dev)
{
struct wcn36xx *wcn = (struct wcn36xx *)dev;
int int_src, int_reason;
- bool transmitted = false;
wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src);
@@ -466,7 +478,6 @@ static irqreturn_t wcn36xx_irq_tx_complete(int irq, void *dev)
if (int_reason & (WCN36XX_CH_STAT_INT_DONE_MASK |
WCN36XX_CH_STAT_INT_ED_MASK)) {
reap_tx_dxes(wcn, &wcn->dxe_tx_h_ch);
- transmitted = true;
}
}
@@ -479,7 +490,6 @@ static irqreturn_t wcn36xx_irq_tx_complete(int irq, void *dev)
WCN36XX_DXE_0_INT_CLR,
WCN36XX_INT_MASK_CHAN_TX_L);
-
if (int_reason & WCN36XX_CH_STAT_INT_ERR_MASK ) {
wcn36xx_dxe_write_register(wcn,
WCN36XX_DXE_0_INT_ERR_CLR,
@@ -507,25 +517,8 @@ static irqreturn_t wcn36xx_irq_tx_complete(int irq, void *dev)
if (int_reason & (WCN36XX_CH_STAT_INT_DONE_MASK |
WCN36XX_CH_STAT_INT_ED_MASK)) {
reap_tx_dxes(wcn, &wcn->dxe_tx_l_ch);
- transmitted = true;
- }
- }
-
- spin_lock(&wcn->dxe_lock);
- if (wcn->tx_ack_skb && transmitted) {
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(wcn->tx_ack_skb);
-
- /* TX complete, no need to wait for 802.11 ack indication */
- if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS &&
- info->flags & IEEE80211_TX_CTL_NO_ACK) {
- info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
- del_timer(&wcn->tx_ack_timer);
- ieee80211_tx_status_irqsafe(wcn->hw, wcn->tx_ack_skb);
- wcn->tx_ack_skb = NULL;
- ieee80211_wake_queues(wcn->hw);
}
}
- spin_unlock(&wcn->dxe_lock);
return IRQ_HANDLED;
}
@@ -613,6 +606,10 @@ static int wcn36xx_rx_handle_packets(struct wcn36xx *wcn,
dxe = ctl->desc;
while (!(READ_ONCE(dxe->ctrl) & WCN36xx_DXE_CTRL_VLD)) {
+ /* do not read until we own DMA descriptor */
+ dma_rmb();
+
+ /* read/modify DMA descriptor */
skb = ctl->skb;
dma_addr = dxe->dst_addr_l;
ret = wcn36xx_dxe_fill_skb(wcn->dev, ctl, GFP_ATOMIC);
@@ -623,9 +620,15 @@ static int wcn36xx_rx_handle_packets(struct wcn36xx *wcn,
dma_unmap_single(wcn->dev, dma_addr, WCN36XX_PKT_SIZE,
DMA_FROM_DEVICE);
wcn36xx_rx_skb(wcn, skb);
- } /* else keep old skb not submitted and use it for rx DMA */
+ }
+ /* else keep old skb not submitted and reuse it for rx DMA
+ * (dropping the packet that it contained)
+ */
+ /* flush descriptor changes before re-marking as valid */
+ dma_wmb();
dxe->ctrl = ctrl;
+
ctl = ctl->next;
dxe = ctl->desc;
}
diff --git a/drivers/net/wireless/ath/wcn36xx/hal.h b/drivers/net/wireless/ath/wcn36xx/hal.h
index 455143c4164e..9bea2b01f9aa 100644
--- a/drivers/net/wireless/ath/wcn36xx/hal.h
+++ b/drivers/net/wireless/ath/wcn36xx/hal.h
@@ -359,6 +359,8 @@ enum wcn36xx_hal_host_msg_type {
WCN36XX_HAL_START_SCAN_OFFLOAD_RSP = 205,
WCN36XX_HAL_STOP_SCAN_OFFLOAD_REQ = 206,
WCN36XX_HAL_STOP_SCAN_OFFLOAD_RSP = 207,
+ WCN36XX_HAL_UPDATE_CHANNEL_LIST_REQ = 208,
+ WCN36XX_HAL_UPDATE_CHANNEL_LIST_RSP = 209,
WCN36XX_HAL_SCAN_OFFLOAD_IND = 210,
WCN36XX_HAL_AVOID_FREQ_RANGE_IND = 233,
@@ -1353,6 +1355,36 @@ struct wcn36xx_hal_stop_scan_offload_rsp_msg {
u32 status;
} __packed;
+#define WCN36XX_HAL_CHAN_REG1_MIN_PWR_MASK 0x000000ff
+#define WCN36XX_HAL_CHAN_REG1_MAX_PWR_MASK 0x0000ff00
+#define WCN36XX_HAL_CHAN_REG1_REG_PWR_MASK 0x00ff0000
+#define WCN36XX_HAL_CHAN_REG1_CLASS_ID_MASK 0xff000000
+#define WCN36XX_HAL_CHAN_REG2_ANT_GAIN_MASK 0x000000ff
+#define WCN36XX_HAL_CHAN_INFO_FLAG_PASSIVE BIT(7)
+#define WCN36XX_HAL_CHAN_INFO_FLAG_DFS BIT(10)
+#define WCN36XX_HAL_CHAN_INFO_FLAG_HT BIT(11)
+#define WCN36XX_HAL_CHAN_INFO_FLAG_VHT BIT(12)
+#define WCN36XX_HAL_CHAN_INFO_PHY_11A 0
+#define WCN36XX_HAL_CHAN_INFO_PHY_11BG 1
+#define WCN36XX_HAL_DEFAULT_ANT_GAIN 6
+#define WCN36XX_HAL_DEFAULT_MIN_POWER 6
+
+struct wcn36xx_hal_channel_param {
+ u32 mhz;
+ u32 band_center_freq1;
+ u32 band_center_freq2;
+ u32 channel_info;
+ u32 reg_info_1;
+ u32 reg_info_2;
+} __packed;
+
+struct wcn36xx_hal_update_channel_list_req_msg {
+ struct wcn36xx_hal_msg_header header;
+
+ u8 num_channel;
+ struct wcn36xx_hal_channel_param channels[80];
+} __packed;
+
enum wcn36xx_hal_rate_index {
HW_RATE_INDEX_1MBPS = 0x82,
HW_RATE_INDEX_2MBPS = 0x84,
@@ -3384,11 +3416,11 @@ struct tl_hal_flush_ac_rsp_msg {
struct wcn36xx_hal_enter_imps_req_msg {
struct wcn36xx_hal_msg_header header;
-};
+} __packed;
-struct wcn36xx_hal_exit_imps_req {
+struct wcn36xx_hal_exit_imps_req_msg {
struct wcn36xx_hal_msg_header header;
-};
+} __packed;
struct wcn36xx_hal_enter_bmps_req_msg {
struct wcn36xx_hal_msg_header header;
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index ec913ec991f3..b04533bbc3a4 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -85,7 +85,9 @@ static struct ieee80211_channel wcn_5ghz_channels[] = {
CHAN5G(5620, 124, PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_HIGH),
CHAN5G(5640, 128, PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_HIGH),
CHAN5G(5660, 132, PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_LOW),
+ CHAN5G(5680, 136, PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_LOW),
CHAN5G(5700, 140, PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_HIGH),
+ CHAN5G(5720, 144, PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_HIGH),
CHAN5G(5745, 149, PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_LOW),
CHAN5G(5765, 153, PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_LOW),
CHAN5G(5785, 157, PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_HIGH),
@@ -135,7 +137,9 @@ static struct ieee80211_supported_band wcn_band_2ghz = {
.cap = IEEE80211_HT_CAP_GRN_FLD |
IEEE80211_HT_CAP_SGI_20 |
IEEE80211_HT_CAP_DSSSCCK40 |
- IEEE80211_HT_CAP_LSIG_TXOP_PROT,
+ IEEE80211_HT_CAP_LSIG_TXOP_PROT |
+ IEEE80211_HT_CAP_SGI_40 |
+ IEEE80211_HT_CAP_SUP_WIDTH_20_40,
.ht_supported = true,
.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K,
.ampdu_density = IEEE80211_HT_MPDU_DENSITY_16,
@@ -432,6 +436,13 @@ static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed)
if (changed & IEEE80211_CONF_CHANGE_PS)
wcn36xx_change_ps(wcn, hw->conf.flags & IEEE80211_CONF_PS);
+ if (changed & IEEE80211_CONF_CHANGE_IDLE) {
+ if (hw->conf.flags & IEEE80211_CONF_IDLE)
+ wcn36xx_smd_enter_imps(wcn);
+ else
+ wcn36xx_smd_exit_imps(wcn);
+ }
+
mutex_unlock(&wcn->conf_mutex);
return 0;
@@ -569,12 +580,14 @@ static int wcn36xx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
if (IEEE80211_KEY_FLAG_PAIRWISE & key_conf->flags) {
sta_priv->is_data_encrypted = true;
/* Reconfigure bss with encrypt_type */
- if (NL80211_IFTYPE_STATION == vif->type)
+ if (NL80211_IFTYPE_STATION == vif->type) {
wcn36xx_smd_config_bss(wcn,
vif,
sta,
sta->addr,
true);
+ wcn36xx_smd_config_sta(wcn, vif, sta);
+ }
wcn36xx_smd_set_stakey(wcn,
vif_priv->encrypt_type,
@@ -604,15 +617,6 @@ static int wcn36xx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
}
}
}
- /* FIXME: Only enable bmps support when encryption is enabled.
- * For any reasons, when connected to open/no-security BSS,
- * the wcn36xx controller in bmps mode does not forward
- * 'wake-up' beacons despite AP sends DTIM with station AID.
- * It could be due to a firmware issue or to the way driver
- * configure the station.
- */
- if (vif->type == NL80211_IFTYPE_STATION)
- vif_priv->allow_bmps = true;
break;
case DISABLE_KEY:
if (!(IEEE80211_KEY_FLAG_PAIRWISE & key_conf->flags)) {
@@ -650,19 +654,19 @@ static int wcn36xx_hw_scan(struct ieee80211_hw *hw,
struct ieee80211_scan_request *hw_req)
{
struct wcn36xx *wcn = hw->priv;
- int i;
if (!get_feat_caps(wcn->fw_feat_caps, SCAN_OFFLOAD)) {
/* fallback to mac80211 software scan */
return 1;
}
- /* For unknown reason, the hardware offloaded scan only works with
- * 2.4Ghz channels, fallback to software scan in other cases.
+ /* Firmware scan offload is limited to 48 channels, fallback to
+ * software driven scanning otherwise.
*/
- for (i = 0; i < hw_req->req.n_channels; i++) {
- if (hw_req->req.channels[i]->band != NL80211_BAND_2GHZ)
- return 1;
+ if (hw_req->req.n_channels > 48) {
+ wcn36xx_warn("Offload scan aborted, n_channels=%u",
+ hw_req->req.n_channels);
+ return 1;
}
mutex_lock(&wcn->scan_lock);
@@ -676,6 +680,7 @@ static int wcn36xx_hw_scan(struct ieee80211_hw *hw,
mutex_unlock(&wcn->scan_lock);
+ wcn36xx_smd_update_channel_list(wcn, &hw_req->req);
return wcn36xx_smd_start_hw_scan(wcn, vif, &hw_req->req);
}
@@ -913,7 +918,6 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
vif->addr,
bss_conf->aid);
vif_priv->sta_assoc = false;
- vif_priv->allow_bmps = false;
wcn36xx_smd_set_link_st(wcn,
bss_conf->bssid,
vif->addr,
@@ -1123,6 +1127,13 @@ static int wcn36xx_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wow)
goto out;
ret = wcn36xx_smd_wlan_host_suspend_ind(wcn);
}
+
+ /* Disable IRQ, we don't want to handle any packet before mac80211 is
+ * resumed and ready to receive packets.
+ */
+ disable_irq(wcn->tx_irq);
+ disable_irq(wcn->rx_irq);
+
out:
mutex_unlock(&wcn->conf_mutex);
return ret;
@@ -1145,6 +1156,10 @@ static int wcn36xx_resume(struct ieee80211_hw *hw)
wcn36xx_smd_ipv6_ns_offload(wcn, vif, false);
wcn36xx_smd_arp_offload(wcn, vif, false);
}
+
+ enable_irq(wcn->tx_irq);
+ enable_irq(wcn->rx_irq);
+
mutex_unlock(&wcn->conf_mutex);
return 0;
@@ -1338,7 +1353,6 @@ static int wcn36xx_init_ieee80211(struct wcn36xx *wcn)
ieee80211_hw_set(wcn->hw, HAS_RATE_CONTROL);
ieee80211_hw_set(wcn->hw, SINGLE_SCAN_ON_ALL_BANDS);
ieee80211_hw_set(wcn->hw, REPORTS_TX_ACK_STATUS);
- ieee80211_hw_set(wcn->hw, CONNECTION_MONITOR);
wcn->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP) |
@@ -1490,6 +1504,7 @@ static int wcn36xx_probe(struct platform_device *pdev)
mutex_init(&wcn->conf_mutex);
mutex_init(&wcn->hal_mutex);
mutex_init(&wcn->scan_lock);
+ __skb_queue_head_init(&wcn->amsdu);
wcn->hal_buf = devm_kmalloc(wcn->dev, WCN36XX_HAL_BUF_SIZE, GFP_KERNEL);
if (!wcn->hal_buf) {
@@ -1567,6 +1582,8 @@ static int wcn36xx_remove(struct platform_device *pdev)
iounmap(wcn->dxe_base);
iounmap(wcn->ccu_base);
+ __skb_queue_purge(&wcn->amsdu);
+
mutex_destroy(&wcn->hal_mutex);
ieee80211_free_hw(hw);
diff --git a/drivers/net/wireless/ath/wcn36xx/pmc.c b/drivers/net/wireless/ath/wcn36xx/pmc.c
index 2d0780fefd47..2c660458b383 100644
--- a/drivers/net/wireless/ath/wcn36xx/pmc.c
+++ b/drivers/net/wireless/ath/wcn36xx/pmc.c
@@ -18,19 +18,19 @@
#include "wcn36xx.h"
+#define WCN36XX_BMPS_FAIL_THREHOLD 3
+
int wcn36xx_pmc_enter_bmps_state(struct wcn36xx *wcn,
struct ieee80211_vif *vif)
{
int ret = 0;
struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
-
- if (!vif_priv->allow_bmps)
- return -ENOTSUPP;
-
+ /* TODO: Make sure the TX chain clean */
ret = wcn36xx_smd_enter_bmps(wcn, vif);
if (!ret) {
wcn36xx_dbg(WCN36XX_DBG_PMC, "Entered BMPS\n");
vif_priv->pw_state = WCN36XX_BMPS;
+ vif_priv->bmps_fail_ct = 0;
vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER;
} else {
/*
@@ -39,6 +39,11 @@ int wcn36xx_pmc_enter_bmps_state(struct wcn36xx *wcn,
* received just after auth complete
*/
wcn36xx_err("Can not enter BMPS!\n");
+
+ if (vif_priv->bmps_fail_ct++ == WCN36XX_BMPS_FAIL_THREHOLD) {
+ ieee80211_connection_loss(vif);
+ vif_priv->bmps_fail_ct = 0;
+ }
}
return ret;
}
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c
index 57fa857b290b..ed45e2cf039b 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.c
+++ b/drivers/net/wireless/ath/wcn36xx/smd.c
@@ -16,6 +16,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/bitfield.h>
#include <linux/etherdevice.h>
#include <linux/firmware.h>
#include <linux/bitops.h>
@@ -266,7 +267,8 @@ static void wcn36xx_smd_set_sta_ht_params(struct ieee80211_sta *sta,
sta_params->max_ampdu_size = sta->ht_cap.ampdu_factor;
sta_params->max_ampdu_density = sta->ht_cap.ampdu_density;
- sta_params->max_amsdu_size = is_cap_supported(caps,
+ /* max_amsdu_size: 1 : 3839 bytes, 0 : 7935 bytes (max) */
+ sta_params->max_amsdu_size = !is_cap_supported(caps,
IEEE80211_HT_CAP_MAX_AMSDU);
sta_params->sgi_20Mhz = is_cap_supported(caps,
IEEE80211_HT_CAP_SGI_20);
@@ -927,6 +929,86 @@ out:
return ret;
}
+int wcn36xx_smd_update_channel_list(struct wcn36xx *wcn, struct cfg80211_scan_request *req)
+{
+ struct wcn36xx_hal_update_channel_list_req_msg *msg_body;
+ int ret, i;
+
+ msg_body = kzalloc(sizeof(*msg_body), GFP_KERNEL);
+ if (!msg_body)
+ return -ENOMEM;
+
+ INIT_HAL_MSG((*msg_body), WCN36XX_HAL_UPDATE_CHANNEL_LIST_REQ);
+
+ msg_body->num_channel = min_t(u8, req->n_channels, sizeof(msg_body->channels));
+ for (i = 0; i < msg_body->num_channel; i++) {
+ struct wcn36xx_hal_channel_param *param = &msg_body->channels[i];
+ u32 min_power = WCN36XX_HAL_DEFAULT_MIN_POWER;
+ u32 ant_gain = WCN36XX_HAL_DEFAULT_ANT_GAIN;
+
+ param->mhz = req->channels[i]->center_freq;
+ param->band_center_freq1 = req->channels[i]->center_freq;
+ param->band_center_freq2 = 0;
+
+ if (req->channels[i]->flags & IEEE80211_CHAN_NO_IR)
+ param->channel_info |= WCN36XX_HAL_CHAN_INFO_FLAG_PASSIVE;
+
+ if (req->channels[i]->flags & IEEE80211_CHAN_RADAR)
+ param->channel_info |= WCN36XX_HAL_CHAN_INFO_FLAG_DFS;
+
+ if (req->channels[i]->band == NL80211_BAND_5GHZ) {
+ param->channel_info |= WCN36XX_HAL_CHAN_INFO_FLAG_HT;
+ param->channel_info |= WCN36XX_HAL_CHAN_INFO_FLAG_VHT;
+ param->channel_info |= WCN36XX_HAL_CHAN_INFO_PHY_11A;
+ } else {
+ param->channel_info |= WCN36XX_HAL_CHAN_INFO_PHY_11BG;
+ }
+
+ if (min_power > req->channels[i]->max_power)
+ min_power = req->channels[i]->max_power;
+
+ if (req->channels[i]->max_antenna_gain)
+ ant_gain = req->channels[i]->max_antenna_gain;
+
+ u32p_replace_bits(&param->reg_info_1, min_power,
+ WCN36XX_HAL_CHAN_REG1_MIN_PWR_MASK);
+ u32p_replace_bits(&param->reg_info_1, req->channels[i]->max_power,
+ WCN36XX_HAL_CHAN_REG1_MAX_PWR_MASK);
+ u32p_replace_bits(&param->reg_info_1, req->channels[i]->max_reg_power,
+ WCN36XX_HAL_CHAN_REG1_REG_PWR_MASK);
+ u32p_replace_bits(&param->reg_info_1, 0,
+ WCN36XX_HAL_CHAN_REG1_CLASS_ID_MASK);
+ u32p_replace_bits(&param->reg_info_2, ant_gain,
+ WCN36XX_HAL_CHAN_REG2_ANT_GAIN_MASK);
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "%s: freq=%u, channel_info=%08x, reg_info1=%08x, reg_info2=%08x\n",
+ __func__, param->mhz, param->channel_info, param->reg_info_1,
+ param->reg_info_2);
+ }
+
+ mutex_lock(&wcn->hal_mutex);
+
+ PREPARE_HAL_BUF(wcn->hal_buf, (*msg_body));
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body->header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_update_channel_list failed\n");
+ goto out;
+ }
+
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_update_channel_list response failed err=%d\n", ret);
+ goto out;
+ }
+
+out:
+ kfree(msg_body);
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
static int wcn36xx_smd_switch_channel_rsp(void *buf, size_t len)
{
struct wcn36xx_hal_switch_channel_rsp_msg *rsp;
@@ -2184,6 +2266,59 @@ out:
return ret;
}
+int wcn36xx_smd_enter_imps(struct wcn36xx *wcn)
+{
+ struct wcn36xx_hal_enter_imps_req_msg msg_body;
+ int ret;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_ENTER_IMPS_REQ);
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_enter_imps failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_enter_imps response failed err=%d\n", ret);
+ goto out;
+ }
+
+ wcn36xx_dbg(WCN36XX_DBG_HAL, "Entered idle mode\n");
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
+int wcn36xx_smd_exit_imps(struct wcn36xx *wcn)
+{
+ struct wcn36xx_hal_exit_imps_req_msg msg_body;
+ int ret;
+
+ mutex_lock(&wcn->hal_mutex);
+ INIT_HAL_MSG(msg_body, WCN36XX_HAL_EXIT_IMPS_REQ);
+
+ PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
+
+ ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
+ if (ret) {
+ wcn36xx_err("Sending hal_exit_imps failed\n");
+ goto out;
+ }
+ ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
+ if (ret) {
+ wcn36xx_err("hal_exit_imps response failed err=%d\n", ret);
+ goto out;
+ }
+ wcn36xx_dbg(WCN36XX_DBG_HAL, "Exited idle mode\n");
+out:
+ mutex_unlock(&wcn->hal_mutex);
+ return ret;
+}
+
int wcn36xx_smd_set_power_params(struct wcn36xx *wcn, bool ignore_dtim)
{
struct wcn36xx_hal_set_power_params_req_msg msg_body;
@@ -2341,8 +2476,11 @@ int wcn36xx_smd_feature_caps_exchange(struct wcn36xx *wcn)
INIT_HAL_MSG(msg_body, WCN36XX_HAL_FEATURE_CAPS_EXCHANGE_REQ);
set_feat_caps(msg_body.feat_caps, STA_POWERSAVE);
- if (wcn->rf_id == RF_IRIS_WCN3680)
+ if (wcn->rf_id == RF_IRIS_WCN3680) {
set_feat_caps(msg_body.feat_caps, DOT11AC);
+ set_feat_caps(msg_body.feat_caps, WLAN_CH144);
+ set_feat_caps(msg_body.feat_caps, ANTENNA_DIVERSITY_SELECTION);
+ }
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
@@ -2623,30 +2761,52 @@ static int wcn36xx_smd_delete_sta_context_ind(struct wcn36xx *wcn,
size_t len)
{
struct wcn36xx_hal_delete_sta_context_ind_msg *rsp = buf;
- struct wcn36xx_vif *tmp;
+ struct wcn36xx_vif *vif_priv;
+ struct ieee80211_vif *vif;
+ struct ieee80211_bss_conf *bss_conf;
struct ieee80211_sta *sta;
+ bool found = false;
if (len != sizeof(*rsp)) {
wcn36xx_warn("Corrupted delete sta indication\n");
return -EIO;
}
- wcn36xx_dbg(WCN36XX_DBG_HAL, "delete station indication %pM index %d\n",
- rsp->addr2, rsp->sta_id);
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "delete station indication %pM index %d reason %d\n",
+ rsp->addr2, rsp->sta_id, rsp->reason_code);
- list_for_each_entry(tmp, &wcn->vif_list, list) {
+ list_for_each_entry(vif_priv, &wcn->vif_list, list) {
rcu_read_lock();
- sta = ieee80211_find_sta(wcn36xx_priv_to_vif(tmp), rsp->addr2);
- if (sta)
- ieee80211_report_low_ack(sta, 0);
+ vif = wcn36xx_priv_to_vif(vif_priv);
+
+ if (vif->type == NL80211_IFTYPE_STATION) {
+ /* We could call ieee80211_find_sta too, but checking
+ * bss_conf is clearer.
+ */
+ bss_conf = &vif->bss_conf;
+ if (vif_priv->sta_assoc &&
+ !memcmp(bss_conf->bssid, rsp->addr2, ETH_ALEN)) {
+ found = true;
+ wcn36xx_dbg(WCN36XX_DBG_HAL,
+ "connection loss bss_index %d\n",
+ vif_priv->bss_index);
+ ieee80211_connection_loss(vif);
+ }
+ } else {
+ sta = ieee80211_find_sta(vif, rsp->addr2);
+ if (sta) {
+ found = true;
+ ieee80211_report_low_ack(sta, 0);
+ }
+ }
+
rcu_read_unlock();
- if (sta)
+ if (found)
return 0;
}
- wcn36xx_warn("STA with addr %pM and index %d not found\n",
- rsp->addr2,
- rsp->sta_id);
+ wcn36xx_warn("BSS or STA with addr %pM not found\n", rsp->addr2);
return -ENOENT;
}
@@ -3060,6 +3220,9 @@ int wcn36xx_smd_rsp_process(struct rpmsg_device *rpdev,
case WCN36XX_HAL_GTK_OFFLOAD_RSP:
case WCN36XX_HAL_GTK_OFFLOAD_GETINFO_RSP:
case WCN36XX_HAL_HOST_RESUME_RSP:
+ case WCN36XX_HAL_ENTER_IMPS_RSP:
+ case WCN36XX_HAL_EXIT_IMPS_RSP:
+ case WCN36XX_HAL_UPDATE_CHANNEL_LIST_RSP:
memcpy(wcn->hal_buf, buf, len);
wcn->hal_rsp_len = len;
complete(&wcn->hal_rsp_compl);
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.h b/drivers/net/wireless/ath/wcn36xx/smd.h
index d8bded03945d..88e045dad8f3 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.h
+++ b/drivers/net/wireless/ath/wcn36xx/smd.h
@@ -70,6 +70,7 @@ int wcn36xx_smd_update_scan_params(struct wcn36xx *wcn, u8 *channels, size_t cha
int wcn36xx_smd_start_hw_scan(struct wcn36xx *wcn, struct ieee80211_vif *vif,
struct cfg80211_scan_request *req);
int wcn36xx_smd_stop_hw_scan(struct wcn36xx *wcn);
+int wcn36xx_smd_update_channel_list(struct wcn36xx *wcn, struct cfg80211_scan_request *req);
int wcn36xx_smd_add_sta_self(struct wcn36xx *wcn, struct ieee80211_vif *vif);
int wcn36xx_smd_delete_sta_self(struct wcn36xx *wcn, u8 *addr);
int wcn36xx_smd_delete_sta(struct wcn36xx *wcn, u8 sta_index);
@@ -163,4 +164,7 @@ int wcn36xx_smd_wlan_host_suspend_ind(struct wcn36xx *wcn);
int wcn36xx_smd_host_resume(struct wcn36xx *wcn);
+int wcn36xx_smd_enter_imps(struct wcn36xx *wcn);
+int wcn36xx_smd_exit_imps(struct wcn36xx *wcn);
+
#endif /* _SMD_H_ */
diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.c b/drivers/net/wireless/ath/wcn36xx/txrx.c
index cab196bb38cd..75951ccbc840 100644
--- a/drivers/net/wireless/ath/wcn36xx/txrx.c
+++ b/drivers/net/wireless/ath/wcn36xx/txrx.c
@@ -31,6 +31,13 @@ struct wcn36xx_rate {
enum rate_info_bw bw;
};
+/* Buffer descriptor rx_ch field is limited to 5-bit (4+1), a mapping is used
+ * for 11A Channels.
+ */
+static const u8 ab_rx_ch_map[] = { 36, 40, 44, 48, 52, 56, 60, 64, 100, 104,
+ 108, 112, 116, 120, 124, 128, 132, 136, 140,
+ 149, 153, 157, 161, 165, 144 };
+
static const struct wcn36xx_rate wcn36xx_rate_table[] = {
/* 11b rates */
{ 10, 0, RX_ENC_LEGACY, 0, RATE_INFO_BW_20 },
@@ -224,6 +231,41 @@ static const struct wcn36xx_rate wcn36xx_rate_table[] = {
{ 4333, 9, RX_ENC_VHT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_80 },
};
+static struct sk_buff *wcn36xx_unchain_msdu(struct sk_buff_head *amsdu)
+{
+ struct sk_buff *skb, *first;
+ int total_len = 0;
+ int space;
+
+ first = __skb_dequeue(amsdu);
+
+ skb_queue_walk(amsdu, skb)
+ total_len += skb->len;
+
+ space = total_len - skb_tailroom(first);
+ if (space > 0 && pskb_expand_head(first, 0, space, GFP_ATOMIC) < 0) {
+ __skb_queue_head(amsdu, first);
+ return NULL;
+ }
+
+ /* Walk list again, copying contents into msdu_head */
+ while ((skb = __skb_dequeue(amsdu))) {
+ skb_copy_from_linear_data(skb, skb_put(first, skb->len),
+ skb->len);
+ dev_kfree_skb_irq(skb);
+ }
+
+ return first;
+}
+
+static void __skb_queue_purge_irq(struct sk_buff_head *list)
+{
+ struct sk_buff *skb;
+
+ while ((skb = __skb_dequeue(list)) != NULL)
+ dev_kfree_skb_irq(skb);
+}
+
int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb)
{
struct ieee80211_rx_status status;
@@ -245,6 +287,26 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb)
"BD <<< ", (char *)bd,
sizeof(struct wcn36xx_rx_bd));
+ if (bd->pdu.mpdu_data_off <= bd->pdu.mpdu_header_off ||
+ bd->pdu.mpdu_len < bd->pdu.mpdu_header_len)
+ goto drop;
+
+ if (bd->asf && !bd->esf) { /* chained A-MSDU chunks */
+ /* Sanity check */
+ if (bd->pdu.mpdu_data_off + bd->pdu.mpdu_len > WCN36XX_PKT_SIZE)
+ goto drop;
+
+ skb_put(skb, bd->pdu.mpdu_data_off + bd->pdu.mpdu_len);
+ skb_pull(skb, bd->pdu.mpdu_data_off);
+
+ /* Only set status for first chained BD (with mac header) */
+ goto done;
+ }
+
+ if (bd->pdu.mpdu_header_off < sizeof(*bd) ||
+ bd->pdu.mpdu_header_off + bd->pdu.mpdu_len > WCN36XX_PKT_SIZE)
+ goto drop;
+
skb_put(skb, bd->pdu.mpdu_header_off + bd->pdu.mpdu_len);
skb_pull(skb, bd->pdu.mpdu_header_off);
@@ -291,6 +353,22 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb)
ieee80211_is_probe_resp(hdr->frame_control))
status.boottime_ns = ktime_get_boottime_ns();
+ if (bd->scan_learn) {
+ /* If packet originates from hardware scanning, extract the
+ * band/channel from bd descriptor.
+ */
+ u8 hwch = (bd->reserved0 << 4) + bd->rx_ch;
+
+ if (bd->rf_band != 1 && hwch <= sizeof(ab_rx_ch_map) && hwch >= 1) {
+ status.band = NL80211_BAND_5GHZ;
+ status.freq = ieee80211_channel_to_frequency(ab_rx_ch_map[hwch - 1],
+ status.band);
+ } else {
+ status.band = NL80211_BAND_2GHZ;
+ status.freq = ieee80211_channel_to_frequency(hwch, status.band);
+ }
+ }
+
memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
if (ieee80211_is_beacon(hdr->frame_control)) {
@@ -305,9 +383,37 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb)
(char *)skb->data, skb->len);
}
+done:
+ /* Chained AMSDU ? slow path */
+ if (unlikely(bd->asf && !(bd->lsf && bd->esf))) {
+ if (bd->esf && !skb_queue_empty(&wcn->amsdu)) {
+ wcn36xx_err("Discarding non complete chain");
+ __skb_queue_purge_irq(&wcn->amsdu);
+ }
+
+ __skb_queue_tail(&wcn->amsdu, skb);
+
+ if (!bd->lsf)
+ return 0; /* Not the last AMSDU, wait for more */
+
+ skb = wcn36xx_unchain_msdu(&wcn->amsdu);
+ if (!skb)
+ goto drop;
+ }
+
ieee80211_rx_irqsafe(wcn->hw, skb);
return 0;
+
+drop: /* drop everything */
+ wcn36xx_err("Drop frame! skb:%p len:%u hoff:%u doff:%u asf=%u esf=%u lsf=%u\n",
+ skb, bd->pdu.mpdu_len, bd->pdu.mpdu_header_off,
+ bd->pdu.mpdu_data_off, bd->asf, bd->esf, bd->lsf);
+
+ dev_kfree_skb_irq(skb);
+ __skb_queue_purge_irq(&wcn->amsdu);
+
+ return -EINVAL;
}
static void wcn36xx_set_tx_pdu(struct wcn36xx_tx_bd *bd,
@@ -321,8 +427,6 @@ static void wcn36xx_set_tx_pdu(struct wcn36xx_tx_bd *bd,
bd->pdu.mpdu_header_off;
bd->pdu.mpdu_len = len;
bd->pdu.tid = tid;
- /* Use seq number generated by mac80211 */
- bd->pdu.bd_ssn = WCN36XX_TXBD_SSN_FILL_HOST;
}
static inline struct wcn36xx_vif *get_vif_by_addr(struct wcn36xx *wcn,
@@ -419,6 +523,9 @@ static void wcn36xx_set_tx_data(struct wcn36xx_tx_bd *bd,
tid = ieee80211_get_tid(hdr);
/* TID->QID is one-to-one mapping */
bd->queue_id = tid;
+ bd->pdu.bd_ssn = WCN36XX_TXBD_SSN_FILL_DPU_QOS;
+ } else {
+ bd->pdu.bd_ssn = WCN36XX_TXBD_SSN_FILL_DPU_NON_QOS;
}
if (info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT ||
@@ -429,6 +536,9 @@ static void wcn36xx_set_tx_data(struct wcn36xx_tx_bd *bd,
if (ieee80211_is_any_nullfunc(hdr->frame_control)) {
/* Don't use a regular queue for null packet (no ampdu) */
bd->queue_id = WCN36XX_TX_U_WQ_ID;
+ bd->bd_rate = WCN36XX_BD_RATE_CTRL;
+ if (ieee80211_is_qos_nullfunc(hdr->frame_control))
+ bd->pdu.bd_ssn = WCN36XX_TXBD_SSN_FILL_HOST;
}
if (bcast) {
@@ -488,6 +598,8 @@ static void wcn36xx_set_tx_mgmt(struct wcn36xx_tx_bd *bd,
bd->queue_id = WCN36XX_TX_U_WQ_ID;
*vif_priv = __vif_priv;
+ bd->pdu.bd_ssn = WCN36XX_TXBD_SSN_FILL_DPU_NON_QOS;
+
wcn36xx_set_tx_pdu(bd,
ieee80211_is_data_qos(hdr->frame_control) ?
sizeof(struct ieee80211_qos_hdr) :
@@ -502,10 +614,11 @@ int wcn36xx_start_tx(struct wcn36xx *wcn,
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct wcn36xx_vif *vif_priv = NULL;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- unsigned long flags;
bool is_low = ieee80211_is_data(hdr->frame_control);
bool bcast = is_broadcast_ether_addr(hdr->addr1) ||
is_multicast_ether_addr(hdr->addr1);
+ bool ack_ind = (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) &&
+ !(info->flags & IEEE80211_TX_CTL_NO_ACK);
struct wcn36xx_tx_bd bd;
int ret;
@@ -521,30 +634,16 @@ int wcn36xx_start_tx(struct wcn36xx *wcn,
bd.dpu_rf = WCN36XX_BMU_WQ_TX;
- if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) {
+ if (unlikely(ack_ind)) {
wcn36xx_dbg(WCN36XX_DBG_DXE, "TX_ACK status requested\n");
- spin_lock_irqsave(&wcn->dxe_lock, flags);
- if (wcn->tx_ack_skb) {
- spin_unlock_irqrestore(&wcn->dxe_lock, flags);
- wcn36xx_warn("tx_ack_skb already set\n");
- return -EINVAL;
- }
-
- wcn->tx_ack_skb = skb;
- spin_unlock_irqrestore(&wcn->dxe_lock, flags);
-
/* Only one at a time is supported by fw. Stop the TX queues
* until the ack status gets back.
*/
ieee80211_stop_queues(wcn->hw);
- /* TX watchdog if no TX irq or ack indication received */
- mod_timer(&wcn->tx_ack_timer, jiffies + HZ / 10);
-
/* Request ack indication from the firmware */
- if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
- bd.tx_comp = 1;
+ bd.tx_comp = 1;
}
/* Data frames served first*/
@@ -558,14 +657,8 @@ int wcn36xx_start_tx(struct wcn36xx *wcn,
bd.tx_bd_sign = 0xbdbdbdbd;
ret = wcn36xx_dxe_tx_frame(wcn, vif_priv, &bd, skb, is_low);
- if (ret && (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)) {
- /* If the skb has not been transmitted,
- * don't keep a reference to it.
- */
- spin_lock_irqsave(&wcn->dxe_lock, flags);
- wcn->tx_ack_skb = NULL;
- spin_unlock_irqrestore(&wcn->dxe_lock, flags);
-
+ if (unlikely(ret && ack_ind)) {
+ /* If the skb has not been transmitted, resume TX queue */
ieee80211_wake_queues(wcn->hw);
}
diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.h b/drivers/net/wireless/ath/wcn36xx/txrx.h
index 032216e82b2b..b54311ffde9c 100644
--- a/drivers/net/wireless/ath/wcn36xx/txrx.h
+++ b/drivers/net/wireless/ath/wcn36xx/txrx.h
@@ -110,7 +110,8 @@ struct wcn36xx_rx_bd {
/* 0x44 */
u32 exp_seq_num:12;
u32 cur_seq_num:12;
- u32 fr_type_subtype:8;
+ u32 rf_band:2;
+ u32 fr_type_subtype:6;
/* 0x48 */
u32 msdu_size:16;
diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
index add6e527e833..1c8d918137da 100644
--- a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
+++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
@@ -128,7 +128,6 @@ struct wcn36xx_vif {
enum wcn36xx_hal_bss_type bss_type;
/* Power management */
- bool allow_bmps;
enum wcn36xx_power_state pw_state;
u8 bss_index;
@@ -151,6 +150,8 @@ struct wcn36xx_vif {
} rekey_data;
struct list_head sta_list;
+
+ int bmps_fail_ct;
};
/**
@@ -269,6 +270,9 @@ struct wcn36xx {
struct sk_buff *tx_ack_skb;
struct timer_list tx_ack_timer;
+ /* For A-MSDU re-aggregation */
+ struct sk_buff_head amsdu;
+
/* RF module */
unsigned rf_id;
@@ -276,7 +280,6 @@ struct wcn36xx {
/* Debug file system entry */
struct wcn36xx_dfs_entry dfs;
#endif /* CONFIG_WCN36XX_DEBUGFS */
-
};
static inline bool wcn36xx_is_fw_version(struct wcn36xx *wcn,
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 1ff2679963f0..764d1d14132b 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -723,11 +723,13 @@ wil_cfg80211_add_iface(struct wiphy *wiphy, const char *name,
ndev = vif_to_ndev(vif);
ether_addr_copy(ndev->perm_addr, ndev_main->perm_addr);
if (is_valid_ether_addr(params->macaddr)) {
- ether_addr_copy(ndev->dev_addr, params->macaddr);
+ eth_hw_addr_set(ndev, params->macaddr);
} else {
- ether_addr_copy(ndev->dev_addr, ndev_main->perm_addr);
- ndev->dev_addr[0] = (ndev->dev_addr[0] ^ (1 << vif->mid)) |
- 0x2; /* locally administered */
+ u8 addr[ETH_ALEN];
+
+ ether_addr_copy(addr, ndev_main->perm_addr);
+ addr[0] = (addr[0] ^ (1 << vif->mid)) | 0x2; /* locally administered */
+ eth_hw_addr_set(ndev, addr);
}
wdev = vif_to_wdev(vif);
ether_addr_copy(wdev->address, ndev->dev_addr);
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 3ba5b2550a8c..7da87c9f363f 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -1358,7 +1358,7 @@ static int wil_get_bl_info(struct wil6210_priv *wil)
ether_addr_copy(ndev->perm_addr, mac);
ether_addr_copy(wiphy->perm_addr, mac);
if (!is_valid_ether_addr(ndev->dev_addr))
- ether_addr_copy(ndev->dev_addr, mac);
+ eth_hw_addr_set(ndev, mac);
if (rf_status) {/* bad RF cable? */
wil_err(wil, "RF communication error 0x%04x",
@@ -1431,7 +1431,7 @@ static int wil_get_otp_info(struct wil6210_priv *wil)
ether_addr_copy(ndev->perm_addr, mac);
ether_addr_copy(wiphy->perm_addr, mac);
if (!is_valid_ether_addr(ndev->dev_addr))
- ether_addr_copy(ndev->dev_addr, mac);
+ eth_hw_addr_set(ndev, mac);
return 0;
}
@@ -1609,7 +1609,7 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
struct net_device *ndev = wil->main_ndev;
ether_addr_copy(ndev->perm_addr, mac);
- ether_addr_copy(ndev->dev_addr, ndev->perm_addr);
+ eth_hw_addr_set(ndev, ndev->perm_addr);
return 0;
}
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index 30392eb1cbbd..11946ecd0b99 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -1341,7 +1341,7 @@ struct wil6210_priv *wil_cfg80211_init(struct device *dev);
void wil_cfg80211_deinit(struct wil6210_priv *wil);
void wil_p2p_wdev_free(struct wil6210_priv *wil);
-int wmi_set_mac_address(struct wil6210_priv *wil, void *addr);
+int wmi_set_mac_address(struct wil6210_priv *wil, const void *addr);
int wmi_pcp_start(struct wil6210_vif *vif, int bi, u8 wmi_nettype, u8 chan,
u8 edmg_chan, u8 hidden_ssid, u8 is_go);
int wmi_pcp_stop(struct wil6210_vif *vif);
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 2dc8406736f4..dd8abbb28849 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -2097,7 +2097,7 @@ int wmi_echo(struct wil6210_priv *wil)
WIL_WMI_CALL_GENERAL_TO_MS);
}
-int wmi_set_mac_address(struct wil6210_priv *wil, void *addr)
+int wmi_set_mac_address(struct wil6210_priv *wil, const void *addr)
{
struct wil6210_vif *vif = ndev_to_vif(wil->main_ndev);
struct wmi_set_mac_address_cmd cmd;
diff --git a/drivers/net/wireless/atmel/atmel.c b/drivers/net/wireless/atmel/atmel.c
index febce4e8b3dd..35c2e798d98b 100644
--- a/drivers/net/wireless/atmel/atmel.c
+++ b/drivers/net/wireless/atmel/atmel.c
@@ -600,7 +600,7 @@ static void atmel_set_mib8(struct atmel_private *priv, u8 type, u8 index,
static void atmel_set_mib16(struct atmel_private *priv, u8 type, u8 index,
u16 data);
static void atmel_set_mib(struct atmel_private *priv, u8 type, u8 index,
- u8 *data, int data_len);
+ const u8 *data, int data_len);
static void atmel_get_mib(struct atmel_private *priv, u8 type, u8 index,
u8 *data, int data_len);
static void atmel_scan(struct atmel_private *priv, int specific_ssid);
@@ -1296,7 +1296,7 @@ static int atmel_set_mac_address(struct net_device *dev, void *p)
{
struct sockaddr *addr = p;
- memcpy (dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
return atmel_open(dev);
}
@@ -3669,6 +3669,7 @@ static int probe_atmel_card(struct net_device *dev)
{
int rc = 0;
struct atmel_private *priv = netdev_priv(dev);
+ u8 addr[ETH_ALEN] = {};
/* reset pccard */
if (priv->bus_type == BUS_TYPE_PCCARD)
@@ -3693,7 +3694,9 @@ static int probe_atmel_card(struct net_device *dev)
if (i == 0) {
printk(KERN_ALERT "%s: MAC failed to boot MAC address reader.\n", dev->name);
} else {
- atmel_copy_to_host(dev, dev->dev_addr, atmel_read16(dev, MR2), 6);
+
+ atmel_copy_to_host(dev, addr, atmel_read16(dev, MR2), 6);
+ eth_hw_addr_set(dev, addr);
/* got address, now squash it again until the network
interface is opened */
if (priv->bus_type == BUS_TYPE_PCCARD)
@@ -3705,7 +3708,8 @@ static int probe_atmel_card(struct net_device *dev)
/* Mac address easy in this case. */
priv->card_type = CARD_TYPE_PARALLEL_FLASH;
atmel_write16(dev, BSR, 1);
- atmel_copy_to_host(dev, dev->dev_addr, 0xc000, 6);
+ atmel_copy_to_host(dev, addr, 0xc000, 6);
+ eth_hw_addr_set(dev, addr);
atmel_write16(dev, BSR, 0x200);
rc = 1;
} else {
@@ -3713,7 +3717,8 @@ static int probe_atmel_card(struct net_device *dev)
for the Mac Address */
priv->card_type = CARD_TYPE_SPI_FLASH;
if (atmel_wakeup_firmware(priv) == 0) {
- atmel_get_mib(priv, Mac_Address_Mib_Type, 0, dev->dev_addr, 6);
+ atmel_get_mib(priv, Mac_Address_Mib_Type, 0, addr, 6);
+ eth_hw_addr_set(dev, addr);
/* got address, now squash it again until the network
interface is opened */
@@ -3730,7 +3735,7 @@ static int probe_atmel_card(struct net_device *dev)
0x00, 0x04, 0x25, 0x00, 0x00, 0x00
};
printk(KERN_ALERT "%s: *** Invalid MAC address. UPGRADE Firmware ****\n", dev->name);
- memcpy(dev->dev_addr, default_mac, ETH_ALEN);
+ eth_hw_addr_set(dev, default_mac);
}
}
@@ -4103,7 +4108,7 @@ static void atmel_set_mib16(struct atmel_private *priv, u8 type, u8 index,
}
static void atmel_set_mib(struct atmel_private *priv, u8 type, u8 index,
- u8 *data, int data_len)
+ const u8 *data, int data_len)
{
struct get_set_mib m;
m.type = type;
diff --git a/drivers/net/wireless/broadcom/b43/phy_g.c b/drivers/net/wireless/broadcom/b43/phy_g.c
index d5a1a5c58236..ac72ca39e409 100644
--- a/drivers/net/wireless/broadcom/b43/phy_g.c
+++ b/drivers/net/wireless/broadcom/b43/phy_g.c
@@ -2297,7 +2297,7 @@ static u8 b43_gphy_aci_scan(struct b43_wldev *dev)
b43_phy_mask(dev, B43_PHY_G_CRS, 0x7FFF);
b43_set_all_gains(dev, 3, 8, 1);
- start = (channel - 5 > 0) ? channel - 5 : 1;
+ start = (channel > 5) ? channel - 5 : 1;
end = (channel + 5 < 14) ? channel + 5 : 13;
for (i = start; i <= end; i++) {
diff --git a/drivers/net/wireless/broadcom/b43legacy/radio.c b/drivers/net/wireless/broadcom/b43legacy/radio.c
index 06891b4f837b..fdf78c10a05c 100644
--- a/drivers/net/wireless/broadcom/b43legacy/radio.c
+++ b/drivers/net/wireless/broadcom/b43legacy/radio.c
@@ -283,7 +283,7 @@ u8 b43legacy_radio_aci_scan(struct b43legacy_wldev *dev)
& 0x7FFF);
b43legacy_set_all_gains(dev, 3, 8, 1);
- start = (channel - 5 > 0) ? channel - 5 : 1;
+ start = (channel > 5) ? channel - 5 : 1;
end = (channel + 5 < 14) ? channel + 5 : 13;
for (i = start; i <= end; i++) {
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index 9db12ffd2ff8..fb727778312c 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -1783,8 +1783,8 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme)
val = WPA_AUTH_PSK;
break;
default:
- bphy_err(drvr, "invalid cipher group (%d)\n",
- sme->crypto.cipher_group);
+ bphy_err(drvr, "invalid akm suite (%d)\n",
+ sme->crypto.akm_suites[0]);
return -EINVAL;
}
} else if (val & (WPA2_AUTH_PSK | WPA2_AUTH_UNSPECIFIED)) {
@@ -1816,8 +1816,8 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme)
profile->is_ft = true;
break;
default:
- bphy_err(drvr, "invalid cipher group (%d)\n",
- sme->crypto.cipher_group);
+ bphy_err(drvr, "invalid akm suite (%d)\n",
+ sme->crypto.akm_suites[0]);
return -EINVAL;
}
} else if (val & WPA3_AUTH_SAE_PSK) {
@@ -1838,8 +1838,8 @@ brcmf_set_key_mgmt(struct net_device *ndev, struct cfg80211_connect_params *sme)
}
break;
default:
- bphy_err(drvr, "invalid cipher group (%d)\n",
- sme->crypto.cipher_group);
+ bphy_err(drvr, "invalid akm suite (%d)\n",
+ sme->crypto.akm_suites[0]);
return -EINVAL;
}
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
index db5f8535fdb5..fed9cd5f29a2 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
@@ -244,7 +244,7 @@ static int brcmf_netdev_set_mac_address(struct net_device *ndev, void *addr)
} else {
brcmf_dbg(TRACE, "updated to %pM\n", sa->sa_data);
memcpy(ifp->mac_addr, sa->sa_data, ETH_ALEN);
- memcpy(ifp->ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
+ eth_hw_addr_set(ifp->ndev, ifp->mac_addr);
}
return err;
}
@@ -655,7 +655,7 @@ int brcmf_net_attach(struct brcmf_if *ifp, bool locked)
ndev->ethtool_ops = &brcmf_ethtool_ops;
/* set the mac address & netns */
- memcpy(ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
+ eth_hw_addr_set(ndev, ifp->mac_addr);
dev_net_set(ndev, wiphy_net(cfg_to_wiphy(drvr->config)));
INIT_WORK(&ifp->multicast_work, _brcmf_set_multicast_list);
@@ -830,7 +830,7 @@ static int brcmf_net_p2p_attach(struct brcmf_if *ifp)
ndev->netdev_ops = &brcmf_netdev_ops_p2p;
/* set the mac address */
- memcpy(ndev->dev_addr, ifp->mac_addr, ETH_ALEN);
+ eth_hw_addr_set(ndev, ifp->mac_addr);
if (register_netdev(ndev) != 0) {
bphy_err(drvr, "couldn't register the p2p net device\n");
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
index 6d5188b78f2d..0af452dca766 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c
@@ -76,6 +76,16 @@ static const struct dmi_system_id dmi_platform_data[] = {
.driver_data = (void *)&acepc_t8_data,
},
{
+ /* Cyberbook T116 rugged tablet */
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Default string"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "20170531"),
+ },
+ /* The factory image nvram file is identical to the ACEPC T8 one */
+ .driver_data = (void *)&acepc_t8_data,
+ },
+ {
/* Match for the GPDwin which unfortunately uses somewhat
* generic dmi strings, which is why we test for 4 strings.
* Comparing against 23 other byt/cht boards, board_vendor
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
index 2f7bc3a70c65..513c7e6421b2 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
@@ -29,7 +29,7 @@ static int brcmf_of_get_country_codes(struct device *dev,
return (count == -EINVAL) ? 0 : count;
}
- cc = devm_kzalloc(dev, sizeof(*cc) + count * sizeof(*cce), GFP_KERNEL);
+ cc = devm_kzalloc(dev, struct_size(cc, table, count), GFP_KERNEL);
if (!cc)
return -ENOMEM;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
index 9ac0d8c73d5a..4735063e4c03 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/p2p.c
@@ -2125,7 +2125,7 @@ static int brcmf_p2p_disable_p2p_if(struct brcmf_cfg80211_vif *vif)
struct brcmf_cfg80211_info *cfg = wdev_to_cfg(&vif->wdev);
struct net_device *pri_ndev = cfg_to_ndev(cfg);
struct brcmf_if *ifp = netdev_priv(pri_ndev);
- u8 *addr = vif->wdev.netdev->dev_addr;
+ const u8 *addr = vif->wdev.netdev->dev_addr;
return brcmf_fil_iovar_data_set(ifp, "p2p_ifdis", addr, ETH_ALEN);
}
@@ -2135,7 +2135,7 @@ static int brcmf_p2p_release_p2p_if(struct brcmf_cfg80211_vif *vif)
struct brcmf_cfg80211_info *cfg = wdev_to_cfg(&vif->wdev);
struct net_device *pri_ndev = cfg_to_ndev(cfg);
struct brcmf_if *ifp = netdev_priv(pri_ndev);
- u8 *addr = vif->wdev.netdev->dev_addr;
+ const u8 *addr = vif->wdev.netdev->dev_addr;
return brcmf_fil_iovar_data_set(ifp, "p2p_ifdel", addr, ETH_ALEN);
}
diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c
index 65dd8cff1b01..45594f003ef7 100644
--- a/drivers/net/wireless/cisco/airo.c
+++ b/drivers/net/wireless/cisco/airo.c
@@ -1109,7 +1109,7 @@ struct airo_info;
static int get_dec_u16(char *buffer, int *start, int limit);
static void OUT4500(struct airo_info *, u16 reg, u16 value);
static unsigned short IN4500(struct airo_info *, u16 reg);
-static u16 setup_card(struct airo_info*, u8 *mac, int lock);
+static u16 setup_card(struct airo_info*, struct net_device *dev, int lock);
static int enable_MAC(struct airo_info *ai, int lock);
static void disable_MAC(struct airo_info *ai, int lock);
static void enable_interrupts(struct airo_info*);
@@ -2337,9 +2337,9 @@ static int airo_set_mac_address(struct net_device *dev, void *p)
disable_MAC(ai, 1);
writeConfigRid (ai, 1);
enable_MAC(ai, 1);
- memcpy (ai->dev->dev_addr, addr->sa_data, dev->addr_len);
+ dev_addr_set(ai->dev, addr->sa_data);
if (ai->wifidev)
- memcpy (ai->wifidev->dev_addr, addr->sa_data, dev->addr_len);
+ dev_addr_set(ai->wifidev, addr->sa_data);
return 0;
}
@@ -2854,7 +2854,7 @@ static struct net_device *_init_airo_card(unsigned short irq, int port,
}
if (probe) {
- if (setup_card(ai, dev->dev_addr, 1) != SUCCESS) {
+ if (setup_card(ai, dev, 1) != SUCCESS) {
airo_print_err(dev->name, "MAC could not be enabled");
rc = -EIO;
goto err_out_map;
@@ -2972,7 +2972,7 @@ int reset_airo_card(struct net_device *dev)
if (reset_card (dev, 1))
return -1;
- if (setup_card(ai, dev->dev_addr, 1) != SUCCESS) {
+ if (setup_card(ai, dev, 1) != SUCCESS) {
airo_print_err(dev->name, "MAC could not be enabled");
return -1;
}
@@ -3817,7 +3817,8 @@ static inline void set_auth_type(struct airo_info *local, int auth_type)
local->last_auth = auth_type;
}
-static int noinline_for_stack airo_readconfig(struct airo_info *ai, u8 *mac, int lock)
+static int noinline_for_stack airo_readconfig(struct airo_info *ai,
+ struct net_device *dev, int lock)
{
int i, status;
/* large variables, so don't inline this function,
@@ -3861,9 +3862,7 @@ static int noinline_for_stack airo_readconfig(struct airo_info *ai, u8 *mac, int
}
/* Save off the MAC */
- for (i = 0; i < ETH_ALEN; i++) {
- mac[i] = ai->config.macAddr[i];
- }
+ eth_hw_addr_set(dev, ai->config.macAddr);
/* Check to see if there are any insmod configured
rates to add */
@@ -3879,7 +3878,7 @@ static int noinline_for_stack airo_readconfig(struct airo_info *ai, u8 *mac, int
}
-static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
+static u16 setup_card(struct airo_info *ai, struct net_device *dev, int lock)
{
Cmd cmd;
Resp rsp;
@@ -3925,7 +3924,7 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
if (lock)
up(&ai->sem);
if (ai->config.len == 0) {
- status = airo_readconfig(ai, mac, lock);
+ status = airo_readconfig(ai, dev, lock);
if (status != SUCCESS)
return ERROR;
}
@@ -5654,7 +5653,7 @@ static int __maybe_unused airo_pci_resume(struct device *dev_d)
if (prev_state != PCI_D1) {
reset_card(dev, 0);
mpi_init_descriptors(ai);
- setup_card(ai, dev->dev_addr, 0);
+ setup_card(ai, dev, 0);
clear_bit(FLAG_RADIO_OFF, &ai->flags);
clear_bit(FLAG_PENDING_XMIT, &ai->flags);
} else {
@@ -7534,7 +7533,7 @@ static int airo_config_commit(struct net_device *dev,
readSsidRid(local, &SSID_rid);
if (test_bit(FLAG_MPI,&local->flags))
- setup_card(local, dev->dev_addr, 1);
+ setup_card(local, dev, 1);
else
reset_airo_card(dev);
disable_MAC(local, 1);
@@ -8208,7 +8207,7 @@ static int flashrestart(struct airo_info *ai, struct net_device *dev)
if (status != SUCCESS)
return status;
}
- status = setup_card(ai, dev->dev_addr, 1);
+ status = setup_card(ai, dev, 1);
if (!test_bit(FLAG_MPI,&ai->flags))
for (i = 0; i < MAX_FIDS; i++) {
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
index 47eb89b773cf..2ace2b27ecad 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
@@ -4685,7 +4685,7 @@ static int ipw2100_read_mac_address(struct ipw2100_priv *priv)
return -EIO;
}
- memcpy(priv->net_dev->dev_addr, addr, ETH_ALEN);
+ eth_hw_addr_set(priv->net_dev, addr);
IPW_DEBUG_INFO("card MAC is %pM\n", priv->net_dev->dev_addr);
return 0;
@@ -4712,7 +4712,7 @@ static int ipw2100_set_mac_address(struct ipw2100_priv *priv, int batch_mode)
if (priv->config & CFG_CUSTOM_MAC) {
memcpy(cmd.host_command_parameters, priv->mac_addr, ETH_ALEN);
- memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
+ eth_hw_addr_set(priv->net_dev, priv->mac_addr);
} else
memcpy(cmd.host_command_parameters, priv->net_dev->dev_addr,
ETH_ALEN);
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
index ada6ce32c1f1..23037bfc9e4c 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c
@@ -199,7 +199,7 @@ static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
struct clx2_tx_queue *txq, int qindex);
static int ipw_queue_reset(struct ipw_priv *priv);
-static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
+static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, const void *buf,
int len, int sync);
static void ipw_tx_queue_free(struct ipw_priv *);
@@ -2264,7 +2264,7 @@ static int ipw_send_cmd_simple(struct ipw_priv *priv, u8 command)
}
static int ipw_send_cmd_pdu(struct ipw_priv *priv, u8 command, u8 len,
- void *data)
+ const void *data)
{
struct host_cmd cmd = {
.cmd = command,
@@ -3777,7 +3777,7 @@ static int ipw_queue_tx_init(struct ipw_priv *priv,
dma_alloc_coherent(&dev->dev, sizeof(q->bd[0]) * count,
&q->q.dma_addr, GFP_KERNEL);
if (!q->bd) {
- IPW_ERROR("pci_alloc_consistent(%zd) failed\n",
+ IPW_ERROR("dma_alloc_coherent(%zd) failed\n",
sizeof(q->bd[0]) * count);
kfree(q->txb);
q->txb = NULL;
@@ -5033,7 +5033,7 @@ static int ipw_queue_tx_reclaim(struct ipw_priv *priv,
return used;
}
-static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, void *buf,
+static int ipw_queue_tx_hcmd(struct ipw_priv *priv, int hcmd, const void *buf,
int len, int sync)
{
struct clx2_tx_queue *txq = &priv->txq_cmd;
@@ -11185,7 +11185,7 @@ static int ipw_up(struct ipw_priv *priv)
ipw_init_ordinals(priv);
if (!(priv->config & CFG_CUSTOM_MAC))
eeprom_parse_mac(priv, priv->mac_addr);
- memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
+ eth_hw_addr_set(priv->net_dev, priv->mac_addr);
ipw_set_geo(priv);
@@ -11542,7 +11542,7 @@ static int ipw_prom_alloc(struct ipw_priv *priv)
priv->prom_priv->priv = priv;
strcpy(priv->prom_net_dev->name, "rtap%d");
- memcpy(priv->prom_net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
+ eth_hw_addr_set(priv->prom_net_dev, priv->mac_addr);
priv->prom_net_dev->type = ARPHRD_IEEE80211_RADIOTAP;
priv->prom_net_dev->netdev_ops = &ipw_prom_netdev_ops;
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.h b/drivers/net/wireless/intel/ipw2x00/ipw2200.h
index 98fe62737888..55cac934f4ee 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2200.h
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.h
@@ -1945,7 +1945,7 @@ struct host_cmd {
u8 cmd;
u8 len;
u16 reserved;
- u32 *param;
+ const u32 *param;
} __packed; /* XXX */
struct cmdlog_host_cmd {
diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
index 45abb25b65a9..bd4e7d752958 100644
--- a/drivers/net/wireless/intel/iwlegacy/3945-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c
@@ -3819,7 +3819,6 @@ il3945_pci_remove(struct pci_dev *pdev)
il3945_unset_hw_params(il);
/*netif_stop_queue(dev); */
- flush_workqueue(il->workqueue);
/* ieee80211_unregister_hw calls il3945_mac_stop, which flushes
* il->workqueue... so we can't take down the workqueue
diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
index 0223532fd56a..d93900e62e3d 100644
--- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c
@@ -6731,7 +6731,6 @@ il4965_pci_remove(struct pci_dev *pdev)
il_eeprom_free(il);
/*netif_stop_queue(dev); */
- flush_workqueue(il->workqueue);
/* ieee80211_unregister_hw calls il_mac_stop, which flushes
* il->workqueue... so we can't take down the workqueue
diff --git a/drivers/net/wireless/intel/iwlegacy/commands.h b/drivers/net/wireless/intel/iwlegacy/commands.h
index 89c6671b32bc..4a97310f8fee 100644
--- a/drivers/net/wireless/intel/iwlegacy/commands.h
+++ b/drivers/net/wireless/intel/iwlegacy/commands.h
@@ -1408,8 +1408,10 @@ struct il3945_tx_cmd {
* MAC header goes here, followed by 2 bytes padding if MAC header
* length is 26 or 30 bytes, followed by payload data
*/
- u8 payload[0];
- struct ieee80211_hdr hdr[];
+ union {
+ DECLARE_FLEX_ARRAY(u8, payload);
+ DECLARE_FLEX_ARRAY(struct ieee80211_hdr, hdr);
+ };
} __packed;
/*
diff --git a/drivers/net/wireless/intel/iwlwifi/Makefile b/drivers/net/wireless/intel/iwlwifi/Makefile
index d86918d162aa..0d4656efe908 100644
--- a/drivers/net/wireless/intel/iwlwifi/Makefile
+++ b/drivers/net/wireless/intel/iwlwifi/Makefile
@@ -15,7 +15,7 @@ iwlwifi-objs += iwl-dbg-tlv.o
iwlwifi-objs += iwl-trans.o
iwlwifi-objs += queue/tx.o
-iwlwifi-objs += fw/img.o fw/notif-wait.o
+iwlwifi-objs += fw/img.o fw/notif-wait.o fw/rs.o
iwlwifi-objs += fw/dbg.o fw/pnvm.o fw/dump.o
iwlwifi-$(CONFIG_IWLMVM) += fw/paging.o fw/smem.o fw/init.o
iwlwifi-$(CONFIG_ACPI) += fw/acpi.o
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/1000.c b/drivers/net/wireless/intel/iwlwifi/cfg/1000.c
index 44c4fe975390..116defb15afb 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/1000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/1000.c
@@ -3,11 +3,6 @@
*
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2018 - 2020 Intel Corporation
- *
- * Contact Information:
- * Intel Linux Wireless <linuxwifi@intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
*****************************************************************************/
#include <linux/module.h>
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/2000.c b/drivers/net/wireless/intel/iwlwifi/cfg/2000.c
index df6ac00340b2..ab2038a3fbe2 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/2000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/2000.c
@@ -3,11 +3,6 @@
*
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2018 - 2020 Intel Corporation
- *
- * Contact Information:
- * Intel Linux Wireless <linuxwifi@intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
*****************************************************************************/
#include <linux/module.h>
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
index d8231cc821ae..1572097bccf1 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
@@ -9,7 +9,7 @@
#include "iwl-prph.h"
/* Highest firmware API version supported */
-#define IWL_22000_UCODE_API_MAX 66
+#define IWL_22000_UCODE_API_MAX 67
/* Lowest firmware API version supported */
#define IWL_22000_UCODE_API_MIN 39
@@ -53,6 +53,9 @@
#define IWL_BZ_A_GF_A_FW_PRE "iwlwifi-bz-a0-gf-a0-"
#define IWL_BZ_A_GF4_A_FW_PRE "iwlwifi-bz-a0-gf4-a0-"
#define IWL_BZ_A_MR_A_FW_PRE "iwlwifi-bz-a0-mr-a0-"
+#define IWL_BZ_A_FM_A_FW_PRE "iwlwifi-bz-a0-fm-a0-"
+#define IWL_GL_A_FM_A_FW_PRE "iwlwifi-gl-a0-fm7-a0-"
+
#define IWL_QU_B_HR_B_MODULE_FIRMWARE(api) \
IWL_QU_B_HR_B_FW_PRE __stringify(api) ".ucode"
@@ -106,6 +109,10 @@
IWL_BZ_A_GF4_A_FW_PRE __stringify(api) ".ucode"
#define IWL_BZ_A_MR_A_MODULE_FIRMWARE(api) \
IWL_BZ_A_MR_A_FW_PRE __stringify(api) ".ucode"
+#define IWL_BZ_A_FM_A_MODULE_FIRMWARE(api) \
+ IWL_BZ_A_FM_A_FW_PRE __stringify(api) ".ucode"
+#define IWL_GL_A_FM_A_MODULE_FIRMWARE(api) \
+ IWL_GL_A_FM_A_FW_PRE __stringify(api) ".ucode"
static const struct iwl_base_params iwl_22000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_32K,
@@ -355,7 +362,7 @@ const struct iwl_cfg_trans_params iwl_so_long_latency_trans_cfg = {
.base_params = &iwl_ax210_base_params,
.umac_prph_offset = 0x300000,
.integrated = true,
- /* TODO: the following values need to be checked */
+ .low_latency_xtal = true,
.xtal_latency = 12000,
.ltr_delay = IWL_CFG_TRANS_LTR_DELAY_2500US,
};
@@ -469,6 +476,14 @@ const char iwl_ax210_killer_1675w_name[] =
"Killer(R) Wi-Fi 6E AX1675w 160MHz Wireless Network Adapter (210D2W)";
const char iwl_ax210_killer_1675x_name[] =
"Killer(R) Wi-Fi 6E AX1675x 160MHz Wireless Network Adapter (210NGW)";
+const char iwl_ax211_killer_1675s_name[] =
+ "Killer(R) Wi-Fi 6E AX1675s 160MHz Wireless Network Adapter (211NGW)";
+const char iwl_ax211_killer_1675i_name[] =
+ "Killer(R) Wi-Fi 6E AX1675i 160MHz Wireless Network Adapter (211NGW)";
+const char iwl_ax411_killer_1690s_name[] =
+ "Killer(R) Wi-Fi 6E AX1690s 160MHz Wireless Network Adapter (411D2W)";
+const char iwl_ax411_killer_1690i_name[] =
+ "Killer(R) Wi-Fi 6E AX1690i 160MHz Wireless Network Adapter (411NGW)";
const struct iwl_cfg iwl_qu_b0_hr1_b0 = {
.fw_name_pre = IWL_QU_B_HR_B_FW_PRE,
@@ -850,6 +865,20 @@ const struct iwl_cfg iwl_cfg_bz_a0_mr_a0 = {
.num_rbds = IWL_NUM_RBDS_AX210_HE,
};
+const struct iwl_cfg iwl_cfg_bz_a0_fm_a0 = {
+ .fw_name_pre = IWL_BZ_A_FM_A_FW_PRE,
+ .uhb_supported = true,
+ IWL_DEVICE_BZ,
+ .num_rbds = IWL_NUM_RBDS_AX210_HE,
+};
+
+const struct iwl_cfg iwl_cfg_gl_a0_fm_a0 = {
+ .fw_name_pre = IWL_GL_A_FM_A_FW_PRE,
+ .uhb_supported = true,
+ IWL_DEVICE_BZ,
+ .num_rbds = IWL_NUM_RBDS_AX210_HE,
+};
+
MODULE_FIRMWARE(IWL_QU_B_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QNJ_B_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QU_C_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
@@ -876,3 +905,5 @@ MODULE_FIRMWARE(IWL_BZ_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_BZ_A_GF_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_BZ_A_GF4_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_BZ_A_MR_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_BZ_A_FM_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_GL_A_FM_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/5000.c b/drivers/net/wireless/intel/iwlwifi/cfg/5000.c
index 6cdd7d983bda..e2e23d2bc1fe 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/5000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/5000.c
@@ -3,11 +3,6 @@
*
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2018 - 2020 Intel Corporation
- *
- * Contact Information:
- * Intel Linux Wireless <linuxwifi@intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
*****************************************************************************/
#include <linux/module.h>
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/6000.c b/drivers/net/wireless/intel/iwlwifi/cfg/6000.c
index 541a3ec85777..20929e59c2f4 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/6000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/6000.c
@@ -3,11 +3,6 @@
*
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2018 - 2020 Intel Corporation
- *
- * Contact Information:
- * Intel Linux Wireless <linuxwifi@intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
*****************************************************************************/
#include <linux/module.h>
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/agn.h b/drivers/net/wireless/intel/iwlwifi/dvm/agn.h
index 1276df1c7a55..abb8696ba294 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/agn.h
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/agn.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014 Intel Corporation
+ * Copyright (C) 2005-2014, 2021 Intel Corporation
*/
#ifndef __iwl_agn_h__
#define __iwl_agn_h__
@@ -398,8 +398,10 @@ do { \
if (!iwl_is_rfkill((m))) \
IWL_ERR(m, fmt, ##args); \
else \
- __iwl_err((m)->dev, true, \
- !iwl_have_debug_level(IWL_DL_RADIO), \
+ __iwl_err((m)->dev, \
+ iwl_have_debug_level(IWL_DL_RADIO) ? \
+ IWL_ERR_MODE_RFKILL : \
+ IWL_ERR_MODE_TRACE_ONLY, \
fmt, ##args); \
} while (0)
#else
@@ -408,7 +410,8 @@ do { \
if (!iwl_is_rfkill((m))) \
IWL_ERR(m, fmt, ##args); \
else \
- __iwl_err((m)->dev, true, true, fmt, ##args); \
+ __iwl_err((m)->dev, IWL_ERR_MODE_TRACE_ONLY, \
+ fmt, ##args); \
} while (0)
#endif /* CONFIG_IWLWIFI_DEBUG */
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/commands.h b/drivers/net/wireless/intel/iwlwifi/dvm/commands.h
index 235c7a2e3483..75a4b8e26232 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/commands.h
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/commands.h
@@ -1251,8 +1251,10 @@ struct iwl_tx_cmd {
* MAC header goes here, followed by 2 bytes padding if MAC header
* length is 26 or 30 bytes, followed by payload data
*/
- u8 payload[0];
- struct ieee80211_hdr hdr[];
+ union {
+ DECLARE_FLEX_ARRAY(u8, payload);
+ DECLARE_FLEX_ARRAY(struct ieee80211_hdr, hdr);
+ };
} __packed;
/*
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c
index 911049201838..b246dbd371b3 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/debugfs.c
@@ -3,10 +3,6 @@
*
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
* Copyright (C) 2018 Intel Corporation
- *
- * Contact Information:
- * Intel Linux Wireless <linuxwifi@intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*****************************************************************************/
#include <linux/slab.h>
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/dev.h b/drivers/net/wireless/intel/iwlwifi/dvm/dev.h
index 4bd792c06ff6..bbd574091201 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/dev.h
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/dev.h
@@ -2,11 +2,6 @@
/******************************************************************************
*
* Copyright(c) 2003 - 2014, 2020 Intel Corporation. All rights reserved.
- *
- * Contact Information:
- * Intel Linux Wireless <linuxwifi@intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
*****************************************************************************/
/*
* Please use this file (dev.h) for driver implementation definitions.
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/devices.c b/drivers/net/wireless/intel/iwlwifi/dvm/devices.c
index c3e25885d194..39e40901fa46 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/devices.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/devices.c
@@ -3,11 +3,6 @@
*
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
* Copyright (C) 2019 Intel Corporation
- *
- * Contact Information:
- * Intel Linux Wireless <linuxwifi@intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
*****************************************************************************/
#include <linux/units.h>
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/led.c b/drivers/net/wireless/intel/iwlwifi/dvm/led.c
index e8a4d604b910..71f67a019cf6 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/led.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/led.c
@@ -3,11 +3,6 @@
*
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
* Copyright (C) 2019 Intel Corporation
- *
- * Contact Information:
- * Intel Linux Wireless <linuxwifi@intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
*****************************************************************************/
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/led.h b/drivers/net/wireless/intel/iwlwifi/dvm/led.h
index 6fe20180dc87..5038fc378a1f 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/led.h
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/led.h
@@ -2,11 +2,6 @@
/******************************************************************************
*
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
- *
- * Contact Information:
- * Intel Linux Wireless <linuxwifi@intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
*****************************************************************************/
#ifndef __iwl_leds_h__
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/lib.c b/drivers/net/wireless/intel/iwlwifi/dvm/lib.c
index 3b937a7dd403..40d790b36d85 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/lib.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/lib.c
@@ -2,11 +2,6 @@
/******************************************************************************
*
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
- *
- * Contact Information:
- * Intel Linux Wireless <linuxwifi@intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
*****************************************************************************/
#include <linux/etherdevice.h>
#include <linux/kernel.h>
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
index 75e7665773c5..754876cd27ce 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c
@@ -6,11 +6,6 @@
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
- *
- * Contact Information:
- * Intel Linux Wireless <linuxwifi@intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
*****************************************************************************/
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/main.c b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
index cc7b69fd14d3..fbd57a2b2bd5 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/main.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/main.c
@@ -6,11 +6,6 @@
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
- *
- * Contact Information:
- * Intel Linux Wireless <linuxwifi@intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
*****************************************************************************/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -52,7 +47,6 @@
#define DRV_DESCRIPTION "Intel(R) Wireless WiFi Link AGN driver for Linux"
MODULE_DESCRIPTION(DRV_DESCRIPTION);
-MODULE_AUTHOR(DRV_AUTHOR);
MODULE_LICENSE("GPL");
/* Please keep this array *SORTED* by hex value.
@@ -1525,7 +1519,6 @@ static void iwl_op_mode_dvm_stop(struct iwl_op_mode *op_mode)
kfree(priv->nvm_data);
/*netif_stop_queue(dev); */
- flush_workqueue(priv->workqueue);
/* ieee80211_unregister_hw calls iwlagn_mac_stop, which flushes
* priv->workqueue... so we can't take down the workqueue
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/power.c b/drivers/net/wireless/intel/iwlwifi/dvm/power.c
index 93ef023905c9..6d16a7105656 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/power.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/power.c
@@ -6,10 +6,6 @@
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
- *
- * Contact Information:
- * Intel Linux Wireless <linuxwifi@intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*****************************************************************************/
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/power.h b/drivers/net/wireless/intel/iwlwifi/dvm/power.h
index 3f8db1fc4b59..f38201ce1e99 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/power.h
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/power.h
@@ -5,10 +5,6 @@
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
- *
- * Contact Information:
- * Intel Linux Wireless <linuxwifi@intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*****************************************************************************/
#ifndef __iwl_power_setting_h__
#define __iwl_power_setting_h__
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
index 548540dd0c0f..b7c8b209bfea 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c
@@ -3,11 +3,6 @@
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright (C) 2019 - 2020 Intel Corporation
- *
- * Contact Information:
- * Intel Linux Wireless <linuxwifi@intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
*****************************************************************************/
#include <linux/kernel.h>
#include <linux/skbuff.h>
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rs.h b/drivers/net/wireless/intel/iwlwifi/dvm/rs.h
index 68a840d739e8..0b47f1993c5d 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rs.h
@@ -2,11 +2,6 @@
/******************************************************************************
*
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
- *
- * Contact Information:
- * Intel Linux Wireless <linuxwifi@intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
*****************************************************************************/
#ifndef __iwl_agn_rs_h__
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rx.c b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c
index 3cd7b423c588..db0c41bbeb0e 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rx.c
@@ -7,11 +7,6 @@
*
* Portions of this file are derived from the ipw3945 project, as well
* as portionhelp of the ieee80211 subsystem header files.
- *
- * Contact Information:
- * Intel Linux Wireless <linuxwifi@intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
*****************************************************************************/
#include <linux/etherdevice.h>
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c b/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c
index 12a3d464ae64..70338bc7bb54 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/rxon.c
@@ -3,11 +3,6 @@
*
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 Intel Deutschland GmbH
- *
- * Contact Information:
- * Intel Linux Wireless <linuxwifi@intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
*****************************************************************************/
#include <linux/etherdevice.h>
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/scan.c b/drivers/net/wireless/intel/iwlwifi/dvm/scan.c
index c4ecf6ed2186..2d38227dfdd2 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/scan.c
@@ -3,10 +3,6 @@
*
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2018 Intel Corporation
- *
- * Contact Information:
- * Intel Linux Wireless <linuxwifi@intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*****************************************************************************/
#include <linux/slab.h>
#include <linux/types.h>
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/sta.c b/drivers/net/wireless/intel/iwlwifi/dvm/sta.c
index ddc14059b07d..8f7a0f36c276 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/sta.c
@@ -5,11 +5,6 @@
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
- *
- * Contact Information:
- * Intel Linux Wireless <linuxwifi@intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
*****************************************************************************/
#include <linux/etherdevice.h>
#include <net/mac80211.h>
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tt.c b/drivers/net/wireless/intel/iwlwifi/dvm/tt.c
index 2684a924ba57..43e8d04d5a8b 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/tt.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/tt.c
@@ -6,10 +6,6 @@
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
- *
- * Contact Information:
- * Intel Linux Wireless <linuxwifi@intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*****************************************************************************/
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tt.h b/drivers/net/wireless/intel/iwlwifi/dvm/tt.h
index 3b0ff458a158..7ace052fc78a 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/tt.h
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/tt.h
@@ -5,10 +5,6 @@
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
- *
- * Contact Information:
- * Intel Linux Wireless <linuxwifi@intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*****************************************************************************/
#ifndef __iwl_tt_setting_h__
#define __iwl_tt_setting_h__
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
index 847b8e07f81c..60a7b61d59aa 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c
@@ -3,11 +3,6 @@
*
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
* Copyright (C) 2019 Intel Corporation
- *
- * Contact Information:
- * Intel Linux Wireless <linuxwifi@intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
*****************************************************************************/
#include <linux/kernel.h>
diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/ucode.c b/drivers/net/wireless/intel/iwlwifi/dvm/ucode.c
index 24194c791218..4b27a53d0bb4 100644
--- a/drivers/net/wireless/intel/iwlwifi/dvm/ucode.c
+++ b/drivers/net/wireless/intel/iwlwifi/dvm/ucode.c
@@ -3,11 +3,6 @@
*
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 Intel Deutschland GmbH
- *
- * Contact Information:
- * Intel Linux Wireless <linuxwifi@intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
*****************************************************************************/
#include <linux/kernel.h>
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
index 1efac0b2a94d..bf431fa4fe81 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c
@@ -184,9 +184,11 @@ int iwl_acpi_get_dsm_u32(struct device *dev, int rev, int func,
}
IWL_EXPORT_SYMBOL(iwl_acpi_get_dsm_u32);
-union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev,
- union acpi_object *data,
- int data_size, int *tbl_rev)
+union acpi_object *iwl_acpi_get_wifi_pkg_range(struct device *dev,
+ union acpi_object *data,
+ int min_data_size,
+ int max_data_size,
+ int *tbl_rev)
{
int i;
union acpi_object *wifi_pkg;
@@ -196,7 +198,7 @@ union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev,
* describes the domain, and one more entry, otherwise there's
* no point in reading it.
*/
- if (WARN_ON_ONCE(data_size < 2))
+ if (WARN_ON_ONCE(min_data_size < 2 || min_data_size > max_data_size))
return ERR_PTR(-EINVAL);
/*
@@ -222,7 +224,8 @@ union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev,
/* skip entries that are not a package with the right size */
if (wifi_pkg->type != ACPI_TYPE_PACKAGE ||
- wifi_pkg->package.count != data_size)
+ wifi_pkg->package.count < min_data_size ||
+ wifi_pkg->package.count > max_data_size)
continue;
domain = &wifi_pkg->package.elements[0];
@@ -236,7 +239,7 @@ union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev,
found:
return wifi_pkg;
}
-IWL_EXPORT_SYMBOL(iwl_acpi_get_wifi_pkg);
+IWL_EXPORT_SYMBOL(iwl_acpi_get_wifi_pkg_range);
int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt,
__le32 *block_list_array,
@@ -707,49 +710,103 @@ int iwl_sar_get_wgds_table(struct iwl_fw_runtime *fwrt)
{
union acpi_object *wifi_pkg, *data;
int i, j, k, ret, tbl_rev;
- int idx = 1; /* start from one to skip the domain */
- u8 num_bands;
+ u8 num_bands, num_profiles;
+ static const struct {
+ u8 revisions;
+ u8 bands;
+ u8 profiles;
+ u8 min_profiles;
+ } rev_data[] = {
+ {
+ .revisions = BIT(3),
+ .bands = ACPI_GEO_NUM_BANDS_REV2,
+ .profiles = ACPI_NUM_GEO_PROFILES_REV3,
+ .min_profiles = 3,
+ },
+ {
+ .revisions = BIT(2),
+ .bands = ACPI_GEO_NUM_BANDS_REV2,
+ .profiles = ACPI_NUM_GEO_PROFILES,
+ },
+ {
+ .revisions = BIT(0) | BIT(1),
+ .bands = ACPI_GEO_NUM_BANDS_REV0,
+ .profiles = ACPI_NUM_GEO_PROFILES,
+ },
+ };
+ int idx;
+ /* start from one to skip the domain */
+ int entry_idx = 1;
+
+ BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES_REV3 != IWL_NUM_GEO_PROFILES_V3);
+ BUILD_BUG_ON(ACPI_NUM_GEO_PROFILES != IWL_NUM_GEO_PROFILES);
data = iwl_acpi_get_object(fwrt->dev, ACPI_WGDS_METHOD);
if (IS_ERR(data))
return PTR_ERR(data);
- /* start by trying to read revision 2 */
- wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
- ACPI_WGDS_WIFI_DATA_SIZE_REV2,
- &tbl_rev);
- if (!IS_ERR(wifi_pkg)) {
- if (tbl_rev != 2) {
- ret = PTR_ERR(wifi_pkg);
- goto out_free;
- }
-
- num_bands = ACPI_GEO_NUM_BANDS_REV2;
-
- goto read_table;
- }
-
- /* then try revision 0 (which is the same as 1) */
- wifi_pkg = iwl_acpi_get_wifi_pkg(fwrt->dev, data,
- ACPI_WGDS_WIFI_DATA_SIZE_REV0,
- &tbl_rev);
- if (!IS_ERR(wifi_pkg)) {
- if (tbl_rev != 0 && tbl_rev != 1) {
- ret = PTR_ERR(wifi_pkg);
- goto out_free;
+ /* read the highest revision we understand first */
+ for (idx = 0; idx < ARRAY_SIZE(rev_data); idx++) {
+ /* min_profiles != 0 requires num_profiles header */
+ u32 hdr_size = 1 + !!rev_data[idx].min_profiles;
+ u32 profile_size = ACPI_GEO_PER_CHAIN_SIZE *
+ rev_data[idx].bands;
+ u32 max_size = hdr_size + profile_size * rev_data[idx].profiles;
+ u32 min_size;
+
+ if (!rev_data[idx].min_profiles)
+ min_size = max_size;
+ else
+ min_size = hdr_size +
+ profile_size * rev_data[idx].min_profiles;
+
+ wifi_pkg = iwl_acpi_get_wifi_pkg_range(fwrt->dev, data,
+ min_size, max_size,
+ &tbl_rev);
+ if (!IS_ERR(wifi_pkg)) {
+ if (!(BIT(tbl_rev) & rev_data[idx].revisions))
+ continue;
+
+ num_bands = rev_data[idx].bands;
+ num_profiles = rev_data[idx].profiles;
+
+ if (rev_data[idx].min_profiles) {
+ /* read header that says # of profiles */
+ union acpi_object *entry;
+
+ entry = &wifi_pkg->package.elements[entry_idx];
+ entry_idx++;
+ if (entry->type != ACPI_TYPE_INTEGER ||
+ entry->integer.value > num_profiles) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+ num_profiles = entry->integer.value;
+
+ /*
+ * this also validates >= min_profiles since we
+ * otherwise wouldn't have gotten the data when
+ * looking up in ACPI
+ */
+ if (wifi_pkg->package.count !=
+ min_size + profile_size * num_profiles) {
+ ret = -EINVAL;
+ goto out_free;
+ }
+ }
+ goto read_table;
}
-
- num_bands = ACPI_GEO_NUM_BANDS_REV0;
-
- goto read_table;
}
- ret = PTR_ERR(wifi_pkg);
+ if (idx < ARRAY_SIZE(rev_data))
+ ret = PTR_ERR(wifi_pkg);
+ else
+ ret = -ENOENT;
goto out_free;
read_table:
fwrt->geo_rev = tbl_rev;
- for (i = 0; i < ACPI_NUM_GEO_PROFILES; i++) {
+ for (i = 0; i < num_profiles; i++) {
for (j = 0; j < ACPI_GEO_NUM_BANDS_REV2; j++) {
union acpi_object *entry;
@@ -762,7 +819,8 @@ read_table:
fwrt->geo_profiles[i].bands[j].max =
fwrt->geo_profiles[i].bands[1].max;
} else {
- entry = &wifi_pkg->package.elements[idx++];
+ entry = &wifi_pkg->package.elements[entry_idx];
+ entry_idx++;
if (entry->type != ACPI_TYPE_INTEGER ||
entry->integer.value > U8_MAX) {
ret = -EINVAL;
@@ -779,7 +837,8 @@ read_table:
fwrt->geo_profiles[i].bands[j].chains[k] =
fwrt->geo_profiles[i].bands[1].chains[k];
} else {
- entry = &wifi_pkg->package.elements[idx++];
+ entry = &wifi_pkg->package.elements[entry_idx];
+ entry_idx++;
if (entry->type != ACPI_TYPE_INTEGER ||
entry->integer.value > U8_MAX) {
ret = -EINVAL;
@@ -803,10 +862,10 @@ IWL_EXPORT_SYMBOL(iwl_sar_get_wgds_table);
bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt)
{
/*
- * The GEO_TX_POWER_LIMIT command is not supported on earlier
- * firmware versions. Unfortunately, we don't have a TLV API
- * flag to rely on, so rely on the major version which is in
- * the first byte of ucode_ver. This was implemented
+ * The PER_CHAIN_LIMIT_OFFSET_CMD command is not supported on
+ * earlier firmware versions. Unfortunately, we don't have a
+ * TLV API flag to rely on, so rely on the major version which
+ * is in the first byte of ucode_ver. This was implemented
* initially on version 38 and then backported to 17. It was
* also backported to 29, but only for 7265D devices. The
* intention was to have it in 36 as well, but not all 8000
@@ -822,14 +881,15 @@ bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt)
IWL_EXPORT_SYMBOL(iwl_sar_geo_support);
int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
- struct iwl_per_chain_offset *table, u32 n_bands)
+ struct iwl_per_chain_offset *table,
+ u32 n_bands, u32 n_profiles)
{
int i, j;
if (!iwl_sar_geo_support(fwrt))
return -EOPNOTSUPP;
- for (i = 0; i < ACPI_NUM_GEO_PROFILES; i++) {
+ for (i = 0; i < n_profiles; i++) {
for (j = 0; j < n_bands; j++) {
struct iwl_per_chain_offset *chain =
&table[i * n_bands + j];
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
index 16ed0995b51e..4aaa8a6b071b 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h
@@ -29,6 +29,7 @@
#define ACPI_SAR_PROFILE_NUM 4
#define ACPI_NUM_GEO_PROFILES 3
+#define ACPI_NUM_GEO_PROFILES_REV3 8
#define ACPI_GEO_PER_CHAIN_SIZE 3
#define ACPI_SAR_NUM_CHAINS_REV0 2
@@ -59,13 +60,6 @@
#define ACPI_GEO_NUM_BANDS_REV2 3
#define ACPI_GEO_NUM_CHAINS 2
-#define ACPI_WGDS_WIFI_DATA_SIZE_REV0 (ACPI_NUM_GEO_PROFILES * \
- ACPI_GEO_NUM_BANDS_REV0 * \
- ACPI_GEO_PER_CHAIN_SIZE + 1)
-#define ACPI_WGDS_WIFI_DATA_SIZE_REV2 (ACPI_NUM_GEO_PROFILES * \
- ACPI_GEO_NUM_BANDS_REV2 * \
- ACPI_GEO_PER_CHAIN_SIZE + 1)
-
#define ACPI_WRDD_WIFI_DATA_SIZE 2
#define ACPI_SPLC_WIFI_DATA_SIZE 2
#define ACPI_ECKV_WIFI_DATA_SIZE 2
@@ -115,8 +109,10 @@ enum iwl_dsm_funcs_rev_0 {
DSM_FUNC_QUERY = 0,
DSM_FUNC_DISABLE_SRD = 1,
DSM_FUNC_ENABLE_INDONESIA_5G2 = 2,
+ DSM_FUNC_ENABLE_6E = 3,
DSM_FUNC_11AX_ENABLEMENT = 6,
- DSM_FUNC_ENABLE_UNII4_CHAN = 7
+ DSM_FUNC_ENABLE_UNII4_CHAN = 7,
+ DSM_FUNC_ACTIVATE_CHANNEL = 8
};
enum iwl_dsm_values_srd {
@@ -158,10 +154,11 @@ int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func,
int iwl_acpi_get_dsm_u32(struct device *dev, int rev, int func,
const guid_t *guid, u32 *value);
-union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev,
- union acpi_object *data,
- int data_size, int *tbl_rev);
-
+union acpi_object *iwl_acpi_get_wifi_pkg_range(struct device *dev,
+ union acpi_object *data,
+ int min_data_size,
+ int max_data_size,
+ int *tbl_rev);
/**
* iwl_acpi_get_mcc - read MCC from ACPI, if available
*
@@ -198,7 +195,8 @@ int iwl_sar_get_wgds_table(struct iwl_fw_runtime *fwrt);
bool iwl_sar_geo_support(struct iwl_fw_runtime *fwrt);
int iwl_sar_geo_init(struct iwl_fw_runtime *fwrt,
- struct iwl_per_chain_offset *table, u32 n_bands);
+ struct iwl_per_chain_offset *table,
+ u32 n_bands, u32 n_profiles);
int iwl_acpi_get_tas(struct iwl_fw_runtime *fwrt, __le32 *block_list_array,
int *block_list_size);
@@ -230,10 +228,11 @@ static inline int iwl_acpi_get_dsm_u32(struct device *dev, int rev, int func,
return -ENOENT;
}
-static inline union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev,
- union acpi_object *data,
- int data_size,
- int *tbl_rev)
+static inline union acpi_object *
+iwl_acpi_get_wifi_pkg_range(struct device *dev,
+ union acpi_object *data,
+ int min_data_size, int max_data_size,
+ int *tbl_rev)
{
return ERR_PTR(-ENOENT);
}
@@ -293,4 +292,14 @@ static inline __le32 iwl_acpi_get_lari_config_bitmap(struct iwl_fw_runtime *fwrt
}
#endif /* CONFIG_ACPI */
+
+static inline union acpi_object *
+iwl_acpi_get_wifi_pkg(struct device *dev,
+ union acpi_object *data,
+ int data_size, int *tbl_rev)
+{
+ return iwl_acpi_get_wifi_pkg_range(dev, data, data_size, data_size,
+ tbl_rev);
+}
+
#endif /* __iwl_fw_acpi__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
index 3ec82cae3981..1503119ea910 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h
@@ -624,7 +624,7 @@ struct iwl_wowlan_status_v6 {
} __packed; /* WOWLAN_STATUSES_API_S_VER_6 */
/**
- * struct iwl_wowlan_status - WoWLAN status
+ * struct iwl_wowlan_status_v7 - WoWLAN status
* @gtk: GTK data
* @igtk: IGTK data
* @replay_ctr: GTK rekey replay counter
@@ -693,49 +693,6 @@ struct iwl_wowlan_status_v9 {
u8 wake_packet[]; /* can be truncated from _length to _bufsize */
} __packed; /* WOWLAN_STATUSES_RSP_API_S_VER_9 */
-/**
- * struct iwl_wowlan_status - WoWLAN status
- * @gtk: GTK data
- * @igtk: IGTK data
- * @bigtk: BIGTK data
- * @replay_ctr: GTK rekey replay counter
- * @pattern_number: number of the matched pattern
- * @non_qos_seq_ctr: non-QoS sequence counter to use next
- * @qos_seq_ctr: QoS sequence counters to use next
- * @wakeup_reasons: wakeup reasons, see &enum iwl_wowlan_wakeup_reason
- * @num_of_gtk_rekeys: number of GTK rekeys
- * @tid_tear_down: bitmap of TIDs torn down
- * @reserved: reserved
- * @received_beacons: number of received beacons
- * @wake_packet_length: wakeup packet length
- * @wake_packet_bufsize: wakeup packet buffer size
- * @tid_tear_down: bit mask of tids whose BA sessions were closed
- * in suspend state
- * @wake_packet: wakeup packet
- */
-struct iwl_wowlan_status {
- struct iwl_wowlan_gtk_status gtk[1];
- struct iwl_wowlan_igtk_status igtk[1];
- struct iwl_wowlan_igtk_status bigtk[WOWLAN_IGTK_KEYS_NUM];
- __le64 replay_ctr;
- __le16 pattern_number;
- __le16 non_qos_seq_ctr;
- __le16 qos_seq_ctr[8];
- __le32 wakeup_reasons;
- __le32 num_of_gtk_rekeys;
- u8 tid_tear_down;
- u8 reserved[3];
- __le32 received_beacons;
- __le32 wake_packet_length;
- __le32 wake_packet_bufsize;
- u8 wake_packet[]; /* can be truncated from _length to _bufsize */
-} __packed; /* WOWLAN_STATUSES_API_S_VER_11 */
-
-static inline u8 iwlmvm_wowlan_gtk_idx(struct iwl_wowlan_gtk_status *gtk)
-{
- return gtk->key_flags & IWL_WOWLAN_GTK_IDX_MASK;
-}
-
/* TODO: NetDetect API */
#endif /* __iwl_fw_api_d3_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
index d8b5870d6e9a..3988f5fea33a 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h
@@ -7,6 +7,7 @@
#include <linux/bitops.h>
+#define IWL_FW_INI_HW_SMEM_REGION_ID 15
#define IWL_FW_INI_MAX_REGION_ID 64
#define IWL_FW_INI_MAX_NAME 32
#define IWL_FW_INI_MAX_CFG_NAME 64
@@ -243,6 +244,62 @@ struct iwl_fw_ini_hcmd_tlv {
} __packed; /* FW_TLV_DEBUG_HCMD_API_S_VER_1 */
/**
+* struct iwl_fw_ini_conf_tlv - preset configuration TLV
+*
+* @address: the base address
+* @value: value to set at address
+
+*/
+struct iwl_fw_ini_addr_val {
+ __le32 address;
+ __le32 value;
+} __packed; /* FW_TLV_DEBUG_ADDR_VALUE_VER_1 */
+
+/**
+ * struct iwl_fw_ini_conf_tlv - configuration TLV to set register/memory.
+ *
+ * @hdr: debug header
+ * @time_point: time point to apply config. One of &enum iwl_fw_ini_time_point
+ * @set_type: write access type preset token for time point.
+ * one of &enum iwl_fw_ini_config_set_type
+ * @addr_offset: the offset to add to any item in address[0] field
+ * @addr_val: address value pair
+ */
+struct iwl_fw_ini_conf_set_tlv {
+ struct iwl_fw_ini_header hdr;
+ __le32 time_point;
+ __le32 set_type;
+ __le32 addr_offset;
+ struct iwl_fw_ini_addr_val addr_val[0];
+} __packed; /* FW_TLV_DEBUG_CONFIG_SET_API_S_VER_1 */
+
+/**
+ * enum iwl_fw_ini_config_set_type
+ *
+ * @IWL_FW_INI_CONFIG_SET_TYPE_INVALID: invalid config set
+ * @IWL_FW_INI_CONFIG_SET_TYPE_DEVICE_PERIPHERY_MAC: for PERIPHERY MAC configuration
+ * @IWL_FW_INI_CONFIG_SET_TYPE_DEVICE_PERIPHERY_PHY: for PERIPHERY PHY configuration
+ * @IWL_FW_INI_CONFIG_SET_TYPE_DEVICE_PERIPHERY_AUX: for PERIPHERY AUX configuration
+ * @IWL_FW_INI_CONFIG_SET_TYPE_DEVICE_MEMORY: for DEVICE MEMORY configuration
+ * @IWL_FW_INI_CONFIG_SET_TYPE_CSR: for CSR configuration
+ * @IWL_FW_INI_CONFIG_SET_TYPE_DBGC_DRAM_ADDR: for DBGC_DRAM_ADDR configuration
+ * @IWL_FW_INI_CONFIG_SET_TYPE_PERIPH_SCRATCH_HWM: for PERIPH SCRATCH HWM configuration
+ * @IWL_FW_INI_ALLOCATION_NUM: max number of configuration supported
+*/
+
+enum iwl_fw_ini_config_set_type {
+ IWL_FW_INI_CONFIG_SET_TYPE_INVALID = 0,
+ IWL_FW_INI_CONFIG_SET_TYPE_DEVICE_PERIPHERY_MAC,
+ IWL_FW_INI_CONFIG_SET_TYPE_DEVICE_PERIPHERY_PHY,
+ IWL_FW_INI_CONFIG_SET_TYPE_DEVICE_PERIPHERY_AUX,
+ IWL_FW_INI_CONFIG_SET_TYPE_DEVICE_MEMORY,
+ IWL_FW_INI_CONFIG_SET_TYPE_CSR,
+ IWL_FW_INI_CONFIG_SET_TYPE_DBGC_DRAM_ADDR,
+ IWL_FW_INI_CONFIG_SET_TYPE_PERIPH_SCRATCH_HWM,
+ IWL_FW_INI_CONFIG_SET_TYPE_MAX_NUM,
+} __packed;
+
+/**
* enum iwl_fw_ini_allocation_id
*
* @IWL_FW_INI_ALLOCATION_INVALID: invalid
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
index 8adccd5da095..029ae64bf2b2 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h
@@ -361,6 +361,41 @@ struct iwl_buf_alloc_cmd {
struct iwl_buf_alloc_frag frags[BUF_ALLOC_MAX_NUM_FRAGS];
} __packed; /* BUFFER_ALLOCATION_CMD_API_S_VER_2 */
+#define DRAM_INFO_FIRST_MAGIC_WORD 0x76543210
+#define DRAM_INFO_SECOND_MAGIC_WORD 0x89ABCDEF
+
+/**
+ * struct iwL_dram_info - DRAM fragments allocation struct
+ *
+ * Driver will fill in the first 1K(+) of the pointed DRAM fragment
+ *
+ * @first_word: magic word value
+ * @second_word: magic word value
+ * @framfrags: DRAM fragmentaion detail
+*/
+struct iwl_dram_info {
+ __le32 first_word;
+ __le32 second_word;
+ struct iwl_buf_alloc_cmd dram_frags[IWL_FW_INI_ALLOCATION_NUM - 1];
+} __packed; /* INIT_DRAM_FRAGS_ALLOCATIONS_S_VER_1 */
+
+/**
+ * struct iwl_dbgc1_info - DBGC1 address and size
+ *
+ * Driver will fill the dbcg1 address and size at address based on config TLV.
+ *
+ * @first_word: all 0 set as identifier
+ * @dbgc1_add_lsb: LSB bits of DBGC1 physical address
+ * @dbgc1_add_msb: MSB bits of DBGC1 physical address
+ * @dbgc1_size: DBGC1 size
+*/
+struct iwl_dbgc1_info {
+ __le32 first_word;
+ __le32 dbgc1_add_lsb;
+ __le32 dbgc1_add_msb;
+ __le32 dbgc1_size;
+} __packed; /* INIT_DRAM_FRAGS_ALLOCATIONS_S_VER_1 */
+
/**
* struct iwl_dbg_host_event_cfg_cmd
* @enabled_severities: enabled severities
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
index 6bbb8b8c91cd..12af94e166ed 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h
@@ -206,7 +206,7 @@ enum iwl_tof_responder_cfg_flags {
IWL_TOF_RESPONDER_FLAGS_SPECIFIC_CALIB_MODE = BIT(8),
IWL_TOF_RESPONDER_FLAGS_FAST_ALGO_SUPPORT = BIT(9),
IWL_TOF_RESPONDER_FLAGS_RETRY_ON_ALGO_FAIL = BIT(10),
- IWL_TOF_RESPONDER_FLAGS_FTM_TX_ANT = RATE_MCS_ANT_ABC_MSK,
+ IWL_TOF_RESPONDER_FLAGS_FTM_TX_ANT = RATE_MCS_ANT_AB_MSK,
IWL_TOF_RESPONDER_FLAGS_NDP_SUPPORT = BIT(24),
IWL_TOF_RESPONDER_FLAGS_LMR_FEEDBACK = BIT(25),
IWL_TOF_RESPONDER_FLAGS_SESSION_ID = BIT(27),
@@ -629,6 +629,7 @@ enum iwl_location_bw {
IWL_LOCATION_BW_20MHZ,
IWL_LOCATION_BW_40MHZ,
IWL_LOCATION_BW_80MHZ,
+ IWL_LOCATION_BW_160MHZ,
};
#define TK_11AZ_LEN 32
@@ -1500,7 +1501,9 @@ struct iwl_tof_range_rsp_ap_entry_ntfy_v6 {
u8 reserved[3];
u8 rx_pn[IEEE80211_CCMP_PN_LEN];
u8 tx_pn[IEEE80211_CCMP_PN_LEN];
-} __packed; /* LOCATION_RANGE_RSP_AP_ETRY_NTFY_API_S_VER_6 */
+} __packed; /* LOCATION_RANGE_RSP_AP_ETRY_NTFY_API_S_VER_6,
+ LOCATION_RANGE_RSP_AP_ETRY_NTFY_API_S_VER_7 */
+
/**
* enum iwl_tof_response_status - tof response status
@@ -1581,7 +1584,8 @@ struct iwl_tof_range_rsp_ntfy_v8 {
u8 last_report;
u8 reserved;
struct iwl_tof_range_rsp_ap_entry_ntfy_v6 ap[IWL_MVM_TOF_MAX_APS];
-} __packed; /* LOCATION_RANGE_RSP_NTFY_API_S_VER_8 */
+} __packed; /* LOCATION_RANGE_RSP_NTFY_API_S_VER_8,
+ LOCATION_RANGE_RSP_NTFY_API_S_VER_9 */
#define IWL_MVM_TOF_MCSI_BUF_SIZE (245)
/**
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
index 6610d1234f74..d088c820b1a9 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2019 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2019, 2021 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -39,9 +39,9 @@ enum iwl_mac_conf_subcmd_ids {
PROBE_RESPONSE_DATA_NOTIF = 0xFC,
/**
- * @CHANNEL_SWITCH_NOA_NOTIF: &struct iwl_channel_switch_noa_notif
+ * @CHANNEL_SWITCH_START_NOTIF: &struct iwl_channel_switch_start_notif
*/
- CHANNEL_SWITCH_NOA_NOTIF = 0xFF,
+ CHANNEL_SWITCH_START_NOTIF = 0xFF,
};
#define IWL_P2P_NOA_DESC_COUNT (2)
@@ -102,11 +102,11 @@ struct iwl_missed_vap_notif {
} __packed; /* MISSED_VAP_NTFY_API_S_VER_1 */
/**
- * struct iwl_channel_switch_noa_notif - Channel switch NOA notification
+ * struct iwl_channel_switch_start_notif - Channel switch start notification
*
* @id_and_color: ID and color of the MAC
*/
-struct iwl_channel_switch_noa_notif {
+struct iwl_channel_switch_start_notif {
__le32 id_and_color;
} __packed; /* CHANNEL_SWITCH_START_NTFY_API_S_VER_1 */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
index 7be7715b431d..11f0bd283e49 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h
@@ -138,6 +138,8 @@ struct iwl_mac_data_ibss {
* @FLEXIBLE_TWT_SUPPORTED: AP supports flexible TWT schedule
* @PROTECTED_TWT_SUPPORTED: AP supports protected TWT frames (with 11w)
* @BROADCAST_TWT_SUPPORTED: AP and STA support broadcast TWT
+ * @COEX_HIGH_PRIORITY_ENABLE: high priority mode for BT coex, to be used
+ * during 802.1X negotiation (and allowed during 4-way-HS)
*/
enum iwl_mac_data_policy {
TWT_SUPPORTED = BIT(0),
@@ -145,6 +147,7 @@ enum iwl_mac_data_policy {
FLEXIBLE_TWT_SUPPORTED = BIT(2),
PROTECTED_TWT_SUPPORTED = BIT(3),
BROADCAST_TWT_SUPPORTED = BIT(4),
+ COEX_HIGH_PRIORITY_ENABLE = BIT(5),
};
/**
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
index cf48c6fa8f65..3551a3f1c1aa 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h
@@ -472,6 +472,29 @@ struct iwl_lari_config_change_cmd_v4 {
} __packed; /* LARI_CHANGE_CONF_CMD_S_VER_4 */
/**
+ * struct iwl_lari_config_change_cmd_v5 - change LARI configuration
+ * @config_bitmap: Bitmap of the config commands. Each bit will trigger a
+ * different predefined FW config operation.
+ * @oem_uhb_allow_bitmap: Bitmap of UHB enabled MCC sets.
+ * @oem_11ax_allow_bitmap: Bitmap of 11ax allowed MCCs. There are two bits
+ * per country, one to indicate whether to override and the other to
+ * indicate the value to use.
+ * @oem_unii4_allow_bitmap: Bitmap of unii4 allowed MCCs.There are two bits
+ * per country, one to indicate whether to override and the other to
+ * indicate allow/disallow unii4 channels.
+ * @chan_state_active_bitmap: Bitmap for overriding channel state to active.
+ * Each bit represents a country or region to activate, according to the BIOS
+ * definitions.
+ */
+struct iwl_lari_config_change_cmd_v5 {
+ __le32 config_bitmap;
+ __le32 oem_uhb_allow_bitmap;
+ __le32 oem_11ax_allow_bitmap;
+ __le32 oem_unii4_allow_bitmap;
+ __le32 chan_state_active_bitmap;
+} __packed; /* LARI_CHANGE_CONF_CMD_S_VER_5 */
+
+/**
* struct iwl_pnvm_init_complete_ntfy - PNVM initialization complete
* @status: PNVM image loading status
*/
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h b/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h
index d07a632f1af7..c04f2521fcb3 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2019-2020 Intel Corporation
+ * Copyright (C) 2012-2014, 2019-2021 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -29,9 +29,9 @@ enum iwl_phy_ops_subcmd_ids {
TEMP_REPORTING_THRESHOLDS_CMD = 0x04,
/**
- * @GEO_TX_POWER_LIMIT: &struct iwl_geo_tx_power_profiles_cmd
+ * @PER_CHAIN_LIMIT_OFFSET_CMD: &struct iwl_geo_tx_power_profiles_cmd
*/
- GEO_TX_POWER_LIMIT = 0x05,
+ PER_CHAIN_LIMIT_OFFSET_CMD = 0x05,
/**
* @PER_PLATFORM_ANT_GAIN_CMD: &struct iwl_ppag_table_cmd
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
index 86445385f072..4d671c878bb7 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -378,9 +378,10 @@ struct iwl_dev_tx_power_cmd {
};
};
-#define IWL_NUM_GEO_PROFILES 3
-#define IWL_NUM_BANDS_PER_CHAIN_V1 2
-#define IWL_NUM_BANDS_PER_CHAIN_V2 3
+#define IWL_NUM_GEO_PROFILES 3
+#define IWL_NUM_GEO_PROFILES_V3 8
+#define IWL_NUM_BANDS_PER_CHAIN_V1 2
+#define IWL_NUM_BANDS_PER_CHAIN_V2 3
/**
* enum iwl_geo_per_chain_offset_operation - type of operation
@@ -390,10 +391,10 @@ struct iwl_dev_tx_power_cmd {
enum iwl_geo_per_chain_offset_operation {
IWL_PER_CHAIN_OFFSET_SET_TABLES,
IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE,
-}; /* GEO_TX_POWER_LIMIT FLAGS TYPE */
+}; /* PER_CHAIN_OFFSET_OPERATION_E */
/**
- * struct iwl_per_chain_offset - embedded struct for GEO_TX_POWER_LIMIT.
+ * struct iwl_per_chain_offset - embedded struct for PER_CHAIN_LIMIT_OFFSET_CMD.
* @max_tx_power: maximum allowed tx power.
* @chain_a: tx power offset for chain a.
* @chain_b: tx power offset for chain b.
@@ -405,17 +406,17 @@ struct iwl_per_chain_offset {
} __packed; /* PER_CHAIN_LIMIT_OFFSET_PER_CHAIN_S_VER_1 */
/**
- * struct iwl_geo_tx_power_profile_cmd_v1 - struct for GEO_TX_POWER_LIMIT cmd.
+ * struct iwl_geo_tx_power_profile_cmd_v1 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd.
* @ops: operations, value from &enum iwl_geo_per_chain_offset_operation
* @table: offset profile per band.
*/
struct iwl_geo_tx_power_profiles_cmd_v1 {
__le32 ops;
struct iwl_per_chain_offset table[IWL_NUM_GEO_PROFILES][IWL_NUM_BANDS_PER_CHAIN_V1];
-} __packed; /* GEO_TX_POWER_LIMIT_VER_1 */
+} __packed; /* PER_CHAIN_LIMIT_OFFSET_CMD_VER_1 */
/**
- * struct iwl_geo_tx_power_profile_cmd_v2 - struct for GEO_TX_POWER_LIMIT cmd.
+ * struct iwl_geo_tx_power_profile_cmd_v2 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd.
* @ops: operations, value from &enum iwl_geo_per_chain_offset_operation
* @table: offset profile per band.
* @table_revision: BIOS table revision.
@@ -424,10 +425,10 @@ struct iwl_geo_tx_power_profiles_cmd_v2 {
__le32 ops;
struct iwl_per_chain_offset table[IWL_NUM_GEO_PROFILES][IWL_NUM_BANDS_PER_CHAIN_V1];
__le32 table_revision;
-} __packed; /* GEO_TX_POWER_LIMIT_VER_2 */
+} __packed; /* PER_CHAIN_LIMIT_OFFSET_CMD_VER_2 */
/**
- * struct iwl_geo_tx_power_profile_cmd_v3 - struct for GEO_TX_POWER_LIMIT cmd.
+ * struct iwl_geo_tx_power_profile_cmd_v3 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd.
* @ops: operations, value from &enum iwl_geo_per_chain_offset_operation
* @table: offset profile per band.
* @table_revision: BIOS table revision.
@@ -436,21 +437,47 @@ struct iwl_geo_tx_power_profiles_cmd_v3 {
__le32 ops;
struct iwl_per_chain_offset table[IWL_NUM_GEO_PROFILES][IWL_NUM_BANDS_PER_CHAIN_V2];
__le32 table_revision;
-} __packed; /* GEO_TX_POWER_LIMIT_VER_3 */
+} __packed; /* PER_CHAIN_LIMIT_OFFSET_CMD_VER_3 */
+
+/**
+ * struct iwl_geo_tx_power_profile_cmd_v4 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd.
+ * @ops: operations, value from &enum iwl_geo_per_chain_offset_operation
+ * @table: offset profile per band.
+ * @table_revision: BIOS table revision.
+ */
+struct iwl_geo_tx_power_profiles_cmd_v4 {
+ __le32 ops;
+ struct iwl_per_chain_offset table[IWL_NUM_GEO_PROFILES_V3][IWL_NUM_BANDS_PER_CHAIN_V1];
+ __le32 table_revision;
+} __packed; /* PER_CHAIN_LIMIT_OFFSET_CMD_VER_4 */
+
+/**
+ * struct iwl_geo_tx_power_profile_cmd_v5 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd.
+ * @ops: operations, value from &enum iwl_geo_per_chain_offset_operation
+ * @table: offset profile per band.
+ * @table_revision: BIOS table revision.
+ */
+struct iwl_geo_tx_power_profiles_cmd_v5 {
+ __le32 ops;
+ struct iwl_per_chain_offset table[IWL_NUM_GEO_PROFILES_V3][IWL_NUM_BANDS_PER_CHAIN_V2];
+ __le32 table_revision;
+} __packed; /* PER_CHAIN_LIMIT_OFFSET_CMD_VER_5 */
union iwl_geo_tx_power_profiles_cmd {
struct iwl_geo_tx_power_profiles_cmd_v1 v1;
struct iwl_geo_tx_power_profiles_cmd_v2 v2;
struct iwl_geo_tx_power_profiles_cmd_v3 v3;
+ struct iwl_geo_tx_power_profiles_cmd_v4 v4;
+ struct iwl_geo_tx_power_profiles_cmd_v5 v5;
};
/**
- * struct iwl_geo_tx_power_profiles_resp - response to GEO_TX_POWER_LIMIT cmd
+ * struct iwl_geo_tx_power_profiles_resp - response to PER_CHAIN_LIMIT_OFFSET_CMD cmd
* @profile_idx: current geo profile in use
*/
struct iwl_geo_tx_power_profiles_resp {
__le32 profile_idx;
-} __packed; /* GEO_TX_POWER_LIMIT_RESP */
+} __packed; /* PER_CHAIN_LIMIT_OFFSET_RSP */
/**
* union iwl_ppag_table_cmd - union for all versions of PPAG command
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
index fc2fa49e9825..a09081d7ed45 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h
@@ -184,6 +184,14 @@ struct iwl_tlc_update_notif {
__le32 amsdu_enabled;
} __packed; /* TLC_MNG_UPDATE_NTFY_API_S_VER_2 */
+
+#define IWL_MAX_MCS_DISPLAY_SIZE 12
+
+struct iwl_rate_mcs_info {
+ char mbps[IWL_MAX_MCS_DISPLAY_SIZE];
+ char mcs[IWL_MAX_MCS_DISPLAY_SIZE];
+};
+
/*
* These serve as indexes into
* struct iwl_rate_info fw_rate_idx_to_plcp[IWL_RATE_COUNT];
@@ -226,6 +234,8 @@ enum {
IWL_LAST_HE_RATE = IWL_RATE_MCS_11_INDEX,
IWL_RATE_COUNT_LEGACY = IWL_LAST_NON_HT_RATE + 1,
IWL_RATE_COUNT = IWL_LAST_HE_RATE + 1,
+ IWL_RATE_INVM_INDEX = IWL_RATE_COUNT,
+ IWL_RATE_INVALID = IWL_RATE_COUNT,
};
#define IWL_RATE_BIT_MSK(r) BIT(IWL_RATE_##r##M_INDEX)
@@ -248,7 +258,7 @@ enum {
};
/*
- * rate_n_flags bit fields
+ * rate_n_flags bit fields version 1
*
* The 32-bit value has different layouts in the low 8 bites depending on the
* format. There are three formats, HT, VHT and legacy (11abg, with subformats
@@ -266,15 +276,15 @@ enum {
/* Bit 8: (1) HT format, (0) legacy or VHT format */
#define RATE_MCS_HT_POS 8
-#define RATE_MCS_HT_MSK (1 << RATE_MCS_HT_POS)
+#define RATE_MCS_HT_MSK_V1 BIT(RATE_MCS_HT_POS)
/* Bit 9: (1) CCK, (0) OFDM. HT (bit 8) must be "0" for this bit to be valid */
-#define RATE_MCS_CCK_POS 9
-#define RATE_MCS_CCK_MSK (1 << RATE_MCS_CCK_POS)
+#define RATE_MCS_CCK_POS_V1 9
+#define RATE_MCS_CCK_MSK_V1 BIT(RATE_MCS_CCK_POS_V1)
/* Bit 26: (1) VHT format, (0) legacy format in bits 8:0 */
-#define RATE_MCS_VHT_POS 26
-#define RATE_MCS_VHT_MSK (1 << RATE_MCS_VHT_POS)
+#define RATE_MCS_VHT_POS_V1 26
+#define RATE_MCS_VHT_MSK_V1 BIT(RATE_MCS_VHT_POS_V1)
/*
@@ -300,15 +310,16 @@ enum {
* streams and 16-23 have three streams. We could also support MCS 32
* which is the duplicate 20 MHz MCS (bit 5 set, all others zero.)
*/
-#define RATE_HT_MCS_RATE_CODE_MSK 0x7
-#define RATE_HT_MCS_NSS_POS 3
-#define RATE_HT_MCS_NSS_MSK (3 << RATE_HT_MCS_NSS_POS)
+#define RATE_HT_MCS_RATE_CODE_MSK_V1 0x7
+#define RATE_HT_MCS_NSS_POS_V1 3
+#define RATE_HT_MCS_NSS_MSK_V1 (3 << RATE_HT_MCS_NSS_POS_V1)
+#define RATE_HT_MCS_MIMO2_MSK BIT(RATE_HT_MCS_NSS_POS_V1)
/* Bit 10: (1) Use Green Field preamble */
#define RATE_HT_MCS_GF_POS 10
#define RATE_HT_MCS_GF_MSK (1 << RATE_HT_MCS_GF_POS)
-#define RATE_HT_MCS_INDEX_MSK 0x3f
+#define RATE_HT_MCS_INDEX_MSK_V1 0x3f
/*
* Very High-throughput (VHT) rate format for bits 7:0
@@ -324,6 +335,7 @@ enum {
#define RATE_VHT_MCS_RATE_CODE_MSK 0xf
#define RATE_VHT_MCS_NSS_POS 4
#define RATE_VHT_MCS_NSS_MSK (3 << RATE_VHT_MCS_NSS_POS)
+#define RATE_VHT_MCS_MIMO2_MSK BIT(RATE_VHT_MCS_NSS_POS)
/*
* Legacy OFDM rate format for bits 7:0
@@ -347,37 +359,30 @@ enum {
* 110) 11 Mbps
* (bit 7 is 0)
*/
-#define RATE_LEGACY_RATE_MSK 0xff
+#define RATE_LEGACY_RATE_MSK_V1 0xff
/* Bit 10 - OFDM HE */
-#define RATE_MCS_HE_POS 10
-#define RATE_MCS_HE_MSK BIT(RATE_MCS_HE_POS)
+#define RATE_MCS_HE_POS_V1 10
+#define RATE_MCS_HE_MSK_V1 BIT(RATE_MCS_HE_POS_V1)
/*
* Bit 11-12: (0) 20MHz, (1) 40MHz, (2) 80MHz, (3) 160MHz
* 0 and 1 are valid for HT and VHT, 2 and 3 only for VHT
*/
#define RATE_MCS_CHAN_WIDTH_POS 11
-#define RATE_MCS_CHAN_WIDTH_MSK (3 << RATE_MCS_CHAN_WIDTH_POS)
-#define RATE_MCS_CHAN_WIDTH_20 (0 << RATE_MCS_CHAN_WIDTH_POS)
-#define RATE_MCS_CHAN_WIDTH_40 (1 << RATE_MCS_CHAN_WIDTH_POS)
-#define RATE_MCS_CHAN_WIDTH_80 (2 << RATE_MCS_CHAN_WIDTH_POS)
-#define RATE_MCS_CHAN_WIDTH_160 (3 << RATE_MCS_CHAN_WIDTH_POS)
+#define RATE_MCS_CHAN_WIDTH_MSK_V1 (3 << RATE_MCS_CHAN_WIDTH_POS)
/* Bit 13: (1) Short guard interval (0.4 usec), (0) normal GI (0.8 usec) */
-#define RATE_MCS_SGI_POS 13
-#define RATE_MCS_SGI_MSK (1 << RATE_MCS_SGI_POS)
+#define RATE_MCS_SGI_POS_V1 13
+#define RATE_MCS_SGI_MSK_V1 BIT(RATE_MCS_SGI_POS_V1)
/* Bit 14-16: Antenna selection (1) Ant A, (2) Ant B, (4) Ant C */
#define RATE_MCS_ANT_POS 14
#define RATE_MCS_ANT_A_MSK (1 << RATE_MCS_ANT_POS)
#define RATE_MCS_ANT_B_MSK (2 << RATE_MCS_ANT_POS)
-#define RATE_MCS_ANT_C_MSK (4 << RATE_MCS_ANT_POS)
#define RATE_MCS_ANT_AB_MSK (RATE_MCS_ANT_A_MSK | \
RATE_MCS_ANT_B_MSK)
-#define RATE_MCS_ANT_ABC_MSK (RATE_MCS_ANT_AB_MSK | \
- RATE_MCS_ANT_C_MSK)
-#define RATE_MCS_ANT_MSK RATE_MCS_ANT_ABC_MSK
+#define RATE_MCS_ANT_MSK RATE_MCS_ANT_AB_MSK
/* Bit 17: (0) SS, (1) SS*2 */
#define RATE_MCS_STBC_POS 17
@@ -411,27 +416,27 @@ enum {
* 3 (does not occur)
*/
#define RATE_MCS_HE_GI_LTF_POS 20
-#define RATE_MCS_HE_GI_LTF_MSK (3 << RATE_MCS_HE_GI_LTF_POS)
+#define RATE_MCS_HE_GI_LTF_MSK_V1 (3 << RATE_MCS_HE_GI_LTF_POS)
/* Bit 22-23: HE type. (0) SU, (1) SU_EXT, (2) MU, (3) trigger based */
-#define RATE_MCS_HE_TYPE_POS 22
-#define RATE_MCS_HE_TYPE_SU (0 << RATE_MCS_HE_TYPE_POS)
-#define RATE_MCS_HE_TYPE_EXT_SU (1 << RATE_MCS_HE_TYPE_POS)
-#define RATE_MCS_HE_TYPE_MU (2 << RATE_MCS_HE_TYPE_POS)
-#define RATE_MCS_HE_TYPE_TRIG (3 << RATE_MCS_HE_TYPE_POS)
-#define RATE_MCS_HE_TYPE_MSK (3 << RATE_MCS_HE_TYPE_POS)
+#define RATE_MCS_HE_TYPE_POS_V1 22
+#define RATE_MCS_HE_TYPE_SU_V1 (0 << RATE_MCS_HE_TYPE_POS_V1)
+#define RATE_MCS_HE_TYPE_EXT_SU_V1 BIT(RATE_MCS_HE_TYPE_POS_V1)
+#define RATE_MCS_HE_TYPE_MU_V1 (2 << RATE_MCS_HE_TYPE_POS_V1)
+#define RATE_MCS_HE_TYPE_TRIG_V1 (3 << RATE_MCS_HE_TYPE_POS_V1)
+#define RATE_MCS_HE_TYPE_MSK_V1 (3 << RATE_MCS_HE_TYPE_POS_V1)
/* Bit 24-25: (0) 20MHz (no dup), (1) 2x20MHz, (2) 4x20MHz, 3 8x20MHz */
-#define RATE_MCS_DUP_POS 24
-#define RATE_MCS_DUP_MSK (3 << RATE_MCS_DUP_POS)
+#define RATE_MCS_DUP_POS_V1 24
+#define RATE_MCS_DUP_MSK_V1 (3 << RATE_MCS_DUP_POS_V1)
/* Bit 27: (1) LDPC enabled, (0) LDPC disabled */
-#define RATE_MCS_LDPC_POS 27
-#define RATE_MCS_LDPC_MSK (1 << RATE_MCS_LDPC_POS)
+#define RATE_MCS_LDPC_POS_V1 27
+#define RATE_MCS_LDPC_MSK_V1 BIT(RATE_MCS_LDPC_POS_V1)
/* Bit 28: (1) 106-tone RX (8 MHz RU), (0) normal bandwidth */
-#define RATE_MCS_HE_106T_POS 28
-#define RATE_MCS_HE_106T_MSK (1 << RATE_MCS_HE_106T_POS)
+#define RATE_MCS_HE_106T_POS_V1 28
+#define RATE_MCS_HE_106T_MSK_V1 BIT(RATE_MCS_HE_106T_POS_V1)
/* Bit 30-31: (1) RTS, (2) CTS */
#define RATE_MCS_RTS_REQUIRED_POS (30)
@@ -440,6 +445,152 @@ enum {
#define RATE_MCS_CTS_REQUIRED_POS (31)
#define RATE_MCS_CTS_REQUIRED_MSK (0x1 << RATE_MCS_CTS_REQUIRED_POS)
+/* rate_n_flags bit field version 2
+ *
+ * The 32-bit value has different layouts in the low 8 bits depending on the
+ * format. There are three formats, HT, VHT and legacy (11abg, with subformats
+ * for CCK and OFDM).
+ *
+ */
+
+/* Bits 10-8: rate format
+ * (0) Legacy CCK (1) Legacy OFDM (2) High-throughput (HT)
+ * (3) Very High-throughput (VHT) (4) High-efficiency (HE)
+ * (5) Extremely High-throughput (EHT)
+ */
+#define RATE_MCS_MOD_TYPE_POS 8
+#define RATE_MCS_MOD_TYPE_MSK (0x7 << RATE_MCS_MOD_TYPE_POS)
+#define RATE_MCS_CCK_MSK (0 << RATE_MCS_MOD_TYPE_POS)
+#define RATE_MCS_LEGACY_OFDM_MSK (1 << RATE_MCS_MOD_TYPE_POS)
+#define RATE_MCS_HT_MSK (2 << RATE_MCS_MOD_TYPE_POS)
+#define RATE_MCS_VHT_MSK (3 << RATE_MCS_MOD_TYPE_POS)
+#define RATE_MCS_HE_MSK (4 << RATE_MCS_MOD_TYPE_POS)
+#define RATE_MCS_EHT_MSK (5 << RATE_MCS_MOD_TYPE_POS)
+
+/*
+ * Legacy CCK rate format for bits 0:3:
+ *
+ * (0) 0xa - 1 Mbps
+ * (1) 0x14 - 2 Mbps
+ * (2) 0x37 - 5.5 Mbps
+ * (3) 0x6e - 11 nbps
+ *
+ * Legacy OFDM rate format for bis 3:0:
+ *
+ * (0) 6 Mbps
+ * (1) 9 Mbps
+ * (2) 12 Mbps
+ * (3) 18 Mbps
+ * (4) 24 Mbps
+ * (5) 36 Mbps
+ * (6) 48 Mbps
+ * (7) 54 Mbps
+ *
+ */
+#define RATE_LEGACY_RATE_MSK 0x7
+
+/*
+ * HT, VHT, HE, EHT rate format for bits 3:0
+ * 3-0: MCS
+ *
+ */
+#define RATE_HT_MCS_CODE_MSK 0x7
+#define RATE_MCS_NSS_POS 4
+#define RATE_MCS_NSS_MSK (1 << RATE_MCS_NSS_POS)
+#define RATE_MCS_CODE_MSK 0xf
+#define RATE_HT_MCS_INDEX(r) ((((r) & RATE_MCS_NSS_MSK) >> 1) | \
+ ((r) & RATE_HT_MCS_CODE_MSK))
+
+/* Bits 7-5: reserved */
+
+/*
+ * Bits 13-11: (0) 20MHz, (1) 40MHz, (2) 80MHz, (3) 160MHz, (4) 320MHz
+ */
+#define RATE_MCS_CHAN_WIDTH_MSK (0x7 << RATE_MCS_CHAN_WIDTH_POS)
+#define RATE_MCS_CHAN_WIDTH_20 (0 << RATE_MCS_CHAN_WIDTH_POS)
+#define RATE_MCS_CHAN_WIDTH_40 (1 << RATE_MCS_CHAN_WIDTH_POS)
+#define RATE_MCS_CHAN_WIDTH_80 (2 << RATE_MCS_CHAN_WIDTH_POS)
+#define RATE_MCS_CHAN_WIDTH_160 (3 << RATE_MCS_CHAN_WIDTH_POS)
+#define RATE_MCS_CHAN_WIDTH_320 (4 << RATE_MCS_CHAN_WIDTH_POS)
+
+/* Bit 15-14: Antenna selection:
+ * Bit 14: Ant A active
+ * Bit 15: Ant B active
+ *
+ * All relevant definitions are same as in v1
+ */
+
+/* Bit 16 (1) LDPC enables, (0) LDPC disabled */
+#define RATE_MCS_LDPC_POS 16
+#define RATE_MCS_LDPC_MSK (1 << RATE_MCS_LDPC_POS)
+
+/* Bit 17: (0) SS, (1) SS*2 (same as v1) */
+
+/* Bit 18: OFDM-HE dual carrier mode (same as v1) */
+
+/* Bit 19: (0) Beamforming is off, (1) Beamforming is on (same as v1) */
+
+/*
+ * Bit 22-20: HE LTF type and guard interval
+ * CCK:
+ * 0 long preamble
+ * 1 short preamble
+ * HT/VHT:
+ * 0 0.8us
+ * 1 0.4us
+ * HE (ext) SU:
+ * 0 1xLTF+0.8us
+ * 1 2xLTF+0.8us
+ * 2 2xLTF+1.6us
+ * 3 4xLTF+3.2us
+ * 4 4xLTF+0.8us
+ * HE MU:
+ * 0 4xLTF+0.8us
+ * 1 2xLTF+0.8us
+ * 2 2xLTF+1.6us
+ * 3 4xLTF+3.2us
+ * HE TRIG:
+ * 0 1xLTF+1.6us
+ * 1 2xLTF+1.6us
+ * 2 4xLTF+3.2us
+ * */
+#define RATE_MCS_HE_GI_LTF_MSK (0x7 << RATE_MCS_HE_GI_LTF_POS)
+#define RATE_MCS_SGI_POS RATE_MCS_HE_GI_LTF_POS
+#define RATE_MCS_SGI_MSK (1 << RATE_MCS_SGI_POS)
+#define RATE_MCS_HE_SU_4_LTF 3
+#define RATE_MCS_HE_SU_4_LTF_08_GI 4
+
+/* Bit 24-23: HE type. (0) SU, (1) SU_EXT, (2) MU, (3) trigger based */
+#define RATE_MCS_HE_TYPE_POS 23
+#define RATE_MCS_HE_TYPE_SU (0 << RATE_MCS_HE_TYPE_POS)
+#define RATE_MCS_HE_TYPE_EXT_SU (1 << RATE_MCS_HE_TYPE_POS)
+#define RATE_MCS_HE_TYPE_MU (2 << RATE_MCS_HE_TYPE_POS)
+#define RATE_MCS_HE_TYPE_TRIG (3 << RATE_MCS_HE_TYPE_POS)
+#define RATE_MCS_HE_TYPE_MSK (3 << RATE_MCS_HE_TYPE_POS)
+
+/* Bit 25: duplicate channel enabled
+ *
+ * if this bit is set, duplicate is according to BW (bits 11-13):
+ *
+ * CCK: 2x 20MHz
+ * OFDM Legacy: N x 20Mhz, (N = BW \ 2 , either 2, 4, 8, 16)
+ * EHT: 2 x BW/2, (80 - 2x40, 160 - 2x80, 320 - 2x160)
+ * */
+#define RATE_MCS_DUP_POS 25
+#define RATE_MCS_DUP_MSK (1 << RATE_MCS_DUP_POS)
+
+/* Bit 26: (1) 106-tone RX (8 MHz RU), (0) normal bandwidth */
+#define RATE_MCS_HE_106T_POS 26
+#define RATE_MCS_HE_106T_MSK (1 << RATE_MCS_HE_106T_POS)
+
+/* Bit 27: EHT extra LTF:
+ * instead of 1 LTF for SISO use 2 LTFs,
+ * instead of 2 LTFs for NSTS=2 use 4 LTFs*/
+#define RATE_MCS_EHT_EXTRA_LTF_POS 27
+#define RATE_MCS_EHT_EXTRA_LTF_MSK (1 << RATE_MCS_EHT_EXTRA_LTF_POS)
+
+/* Bit 31-28: reserved */
+
/* Link Quality definitions */
/* # entries in rate scale table to support Tx retries */
@@ -557,4 +708,13 @@ struct iwl_lq_cmd {
__le32 ss_params;
}; /* LINK_QUALITY_CMD_API_S_VER_1 */
+u8 iwl_fw_rate_idx_to_plcp(int idx);
+u32 iwl_new_rate_from_v1(u32 rate_v1);
+u32 iwl_legacy_rate_to_fw_idx(u32 rate_n_flags);
+const struct iwl_rate_mcs_info *iwl_rate_mcs(int idx);
+const char *iwl_rs_pretty_ant(u8 ant);
+const char *iwl_rs_pretty_bw(int bw);
+int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate);
+bool iwl_he_is_sgi(u32 rate_n_flags);
+
#endif /* __iwl_fw_api_rs_h__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
index 3f13b572915a..1989b270862b 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -13,7 +13,6 @@
#define IWL_RX_INFO_ENERGY_ANT_ABC_IDX 1
#define IWL_RX_INFO_ENERGY_ANT_A_MSK 0x000000ff
#define IWL_RX_INFO_ENERGY_ANT_B_MSK 0x0000ff00
-#define IWL_RX_INFO_ENERGY_ANT_C_MSK 0x00ff0000
#define IWL_RX_INFO_ENERGY_ANT_A_POS 0
#define IWL_RX_INFO_ENERGY_ANT_B_POS 8
#define IWL_RX_INFO_ENERGY_ANT_C_POS 16
@@ -126,14 +125,12 @@ enum iwl_rx_phy_flags {
* @RX_MPDU_RES_STATUS_OVERRUN_OK: there was no RXE overflow
* @RX_MPDU_RES_STATUS_SRC_STA_FOUND: station was found
* @RX_MPDU_RES_STATUS_KEY_VALID: key was valid
- * @RX_MPDU_RES_STATUS_KEY_PARAM_OK: key parameters were usable
* @RX_MPDU_RES_STATUS_ICV_OK: ICV is fine, if not, the packet is destroyed
* @RX_MPDU_RES_STATUS_MIC_OK: used for CCM alg only. TKIP MIC is checked
* in the driver.
* @RX_MPDU_RES_STATUS_TTAK_OK: TTAK is fine
* @RX_MPDU_RES_STATUS_MNG_FRAME_REPLAY_ERR: valid for alg = CCM_CMAC or
- * alg = CCM only. Checks replay attack for 11w frames. Relevant only if
- * %RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME is set.
+ * alg = CCM only. Checks replay attack for 11w frames.
* @RX_MPDU_RES_STATUS_SEC_NO_ENC: this frame is not encrypted
* @RX_MPDU_RES_STATUS_SEC_WEP_ENC: this frame is encrypted using WEP
* @RX_MPDU_RES_STATUS_SEC_CCM_ENC: this frame is encrypted using CCM
@@ -145,9 +142,6 @@ enum iwl_rx_phy_flags {
* @RX_MPDU_RES_STATUS_SEC_ENC_ERR: this frame couldn't be decrypted
* @RX_MPDU_RES_STATUS_SEC_ENC_MSK: bitmask of the encryption algorithm
* @RX_MPDU_RES_STATUS_DEC_DONE: this frame has been successfully decrypted
- * @RX_MPDU_RES_STATUS_EXT_IV_BIT_CMP: extended IV (set with TKIP)
- * @RX_MPDU_RES_STATUS_KEY_ID_CMP_BIT: key ID comparison done
- * @RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME: this frame is an 11w management frame
* @RX_MPDU_RES_STATUS_CSUM_DONE: checksum was done by the hw
* @RX_MPDU_RES_STATUS_CSUM_OK: checksum found no errors
* @RX_MPDU_RES_STATUS_STA_ID_MSK: station ID mask
@@ -158,7 +152,6 @@ enum iwl_mvm_rx_status {
RX_MPDU_RES_STATUS_OVERRUN_OK = BIT(1),
RX_MPDU_RES_STATUS_SRC_STA_FOUND = BIT(2),
RX_MPDU_RES_STATUS_KEY_VALID = BIT(3),
- RX_MPDU_RES_STATUS_KEY_PARAM_OK = BIT(4),
RX_MPDU_RES_STATUS_ICV_OK = BIT(5),
RX_MPDU_RES_STATUS_MIC_OK = BIT(6),
RX_MPDU_RES_STATUS_TTAK_OK = BIT(7),
@@ -172,9 +165,6 @@ enum iwl_mvm_rx_status {
RX_MPDU_RES_STATUS_SEC_ENC_ERR = (7 << 8),
RX_MPDU_RES_STATUS_SEC_ENC_MSK = (7 << 8),
RX_MPDU_RES_STATUS_DEC_DONE = BIT(11),
- RX_MPDU_RES_STATUS_EXT_IV_BIT_CMP = BIT(13),
- RX_MPDU_RES_STATUS_KEY_ID_CMP_BIT = BIT(14),
- RX_MPDU_RES_STATUS_ROBUST_MNG_FRAME = BIT(15),
RX_MPDU_RES_STATUS_CSUM_DONE = BIT(16),
RX_MPDU_RES_STATUS_CSUM_OK = BIT(17),
RX_MDPU_RES_STATUS_STA_ID_SHIFT = 24,
@@ -236,7 +226,6 @@ enum iwl_rx_mpdu_status {
IWL_RX_MPDU_STATUS_OVERRUN_OK = BIT(1),
IWL_RX_MPDU_STATUS_SRC_STA_FOUND = BIT(2),
IWL_RX_MPDU_STATUS_KEY_VALID = BIT(3),
- IWL_RX_MPDU_STATUS_KEY_PARAM_OK = BIT(4),
IWL_RX_MPDU_STATUS_ICV_OK = BIT(5),
IWL_RX_MPDU_STATUS_MIC_OK = BIT(6),
IWL_RX_MPDU_RES_STATUS_TTAK_OK = BIT(7),
@@ -251,12 +240,8 @@ enum iwl_rx_mpdu_status {
IWL_RX_MPDU_STATUS_SEC_EXT_ENC = 0x4 << 8,
IWL_RX_MPDU_STATUS_SEC_GCM = 0x5 << 8,
IWL_RX_MPDU_STATUS_DECRYPTED = BIT(11),
- IWL_RX_MPDU_STATUS_WEP_MATCH = BIT(12),
- IWL_RX_MPDU_STATUS_EXT_IV_MATCH = BIT(13),
- IWL_RX_MPDU_STATUS_KEY_ID_MATCH = BIT(14),
IWL_RX_MPDU_STATUS_ROBUST_MNG_FRAME = BIT(15),
- IWL_RX_MPDU_STATUS_KEY = 0x3f0000,
IWL_RX_MPDU_STATUS_DUPLICATE = BIT(22),
IWL_RX_MPDU_STATUS_STA_ID = 0x1f000000,
@@ -460,7 +445,7 @@ struct iwl_rx_mpdu_desc_v1 {
__le32 phy_data1;
};
};
-} __packed;
+} __packed; /* RX_MPDU_RES_START_API_S_VER_4 */
/**
* struct iwl_rx_mpdu_desc_v3 - RX MPDU descriptor
@@ -560,7 +545,8 @@ struct iwl_rx_mpdu_desc_v3 {
* @reserved: reserved
*/
__le32 reserved[2];
-} __packed; /* RX_MPDU_RES_START_API_S_VER_3 */
+} __packed; /* RX_MPDU_RES_START_API_S_VER_3,
+ RX_MPDU_RES_START_API_S_VER_5 */
/**
* struct iwl_rx_mpdu_desc - RX MPDU descriptor
@@ -625,7 +611,9 @@ struct iwl_rx_mpdu_desc {
struct iwl_rx_mpdu_desc_v1 v1;
struct iwl_rx_mpdu_desc_v3 v3;
};
-} __packed; /* RX_MPDU_RES_START_API_S_VER_3 */
+} __packed; /* RX_MPDU_RES_START_API_S_VER_3,
+ RX_MPDU_RES_START_API_S_VER_4,
+ RX_MPDU_RES_START_API_S_VER_5 */
#define IWL_RX_DESC_SIZE_V1 offsetofend(struct iwl_rx_mpdu_desc, v1)
@@ -679,7 +667,8 @@ struct iwl_rx_no_data {
__le32 rate;
__le32 phy_info[2];
__le32 rx_vec[2];
-} __packed; /* RX_NO_DATA_NTFY_API_S_VER_1 */
+} __packed; /* RX_NO_DATA_NTFY_API_S_VER_1,
+ TX_NO_DATA_NTFY_API_S_VER_2 */
struct iwl_frame_release {
u8 baid;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h b/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h
index f1a3e14880e7..5edbe27c0922 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/sta.h
@@ -28,6 +28,8 @@
* @STA_FLG_MAX_AGG_SIZE_256K: maximal size for A-MPDU (256k supported)
* @STA_FLG_MAX_AGG_SIZE_512K: maximal size for A-MPDU (512k supported)
* @STA_FLG_MAX_AGG_SIZE_1024K: maximal size for A-MPDU (1024k supported)
+ * @STA_FLG_MAX_AGG_SIZE_2M: maximal size for A-MPDU (2M supported)
+ * @STA_FLG_MAX_AGG_SIZE_4M: maximal size for A-MPDU (4M supported)
* @STA_FLG_AGG_MPDU_DENS_MSK: maximal MPDU density for Tx aggregation
* @STA_FLG_FAT_EN_MSK: support for channel width (for Tx). This flag is
* initialised by driver and can be updated by fw upon reception of
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
index 24e4a82a55da..4a74c0ea0f31 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2012-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
#ifndef __iwl_fw_api_tx_h__
@@ -81,6 +81,10 @@ enum iwl_tx_cmd_flags {
IWL_TX_FLAGS_CMD_RATE = BIT(0),
IWL_TX_FLAGS_ENCRYPT_DIS = BIT(1),
IWL_TX_FLAGS_HIGH_PRI = BIT(2),
+ /* Use these flags only from
+ * TX_FLAGS_BITS_API_S_VER_4 and above */
+ IWL_TX_FLAGS_RTS = BIT(3),
+ IWL_TX_FLAGS_CTS = BIT(4),
}; /* TX_FLAGS_BITS_API_S_VER_3 */
/**
@@ -239,8 +243,10 @@ struct iwl_tx_cmd {
u8 tid_tspec;
__le16 pm_frame_timeout;
__le16 reserved4;
- u8 payload[0];
- struct ieee80211_hdr hdr[0];
+ union {
+ DECLARE_FLEX_ARRAY(u8, payload);
+ DECLARE_FLEX_ARRAY(struct ieee80211_hdr, hdr);
+ };
} __packed; /* TX_CMD_API_S_VER_6 */
struct iwl_dram_sec_info {
@@ -267,7 +273,8 @@ struct iwl_tx_cmd_gen2 {
struct iwl_dram_sec_info dram_info;
__le32 rate_n_flags;
struct ieee80211_hdr hdr[];
-} __packed; /* TX_CMD_API_S_VER_7 */
+} __packed; /* TX_CMD_API_S_VER_7,
+ TX_CMD_API_S_VER_9 */
/**
* struct iwl_tx_cmd_gen3 - TX command struct to FW for AX210+ devices
@@ -290,7 +297,8 @@ struct iwl_tx_cmd_gen3 {
__le32 rate_n_flags;
__le64 ttl;
struct ieee80211_hdr hdr[];
-} __packed; /* TX_CMD_API_S_VER_8 */
+} __packed; /* TX_CMD_API_S_VER_8,
+ TX_CMD_API_S_VER_10 */
/*
* TX response related data
@@ -591,7 +599,8 @@ struct iwl_mvm_tx_resp {
__le16 tx_queue;
__le16 reserved2;
struct agg_tx_status status;
-} __packed; /* TX_RSP_API_S_VER_6 */
+} __packed; /* TX_RSP_API_S_VER_6,
+ TX_RSP_API_S_VER_7 */
/**
* struct iwl_mvm_ba_notif - notifies about reception of BA
@@ -713,9 +722,12 @@ struct iwl_mvm_compressed_ba_notif {
__le32 tx_rate;
__le16 tfd_cnt;
__le16 ra_tid_cnt;
- struct iwl_mvm_compressed_ba_ratid ra_tid[0];
- struct iwl_mvm_compressed_ba_tfd tfd[];
-} __packed; /* COMPRESSED_BA_RES_API_S_VER_4 */
+ union {
+ DECLARE_FLEX_ARRAY(struct iwl_mvm_compressed_ba_ratid, ra_tid);
+ DECLARE_FLEX_ARRAY(struct iwl_mvm_compressed_ba_tfd, tfd);
+ };
+} __packed; /* COMPRESSED_BA_RES_API_S_VER_4,
+ COMPRESSED_BA_RES_API_S_VER_5 */
/**
* struct iwl_mac_beacon_cmd_v6 - beacon template command
@@ -755,12 +767,20 @@ struct iwl_mac_beacon_cmd_v7 {
struct ieee80211_hdr frame[];
} __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_7 */
+/* Bit flags for BEACON_TEMPLATE_CMD_API until version 10 */
+enum iwl_mac_beacon_flags_v1 {
+ IWL_MAC_BEACON_CCK_V1 = BIT(8),
+ IWL_MAC_BEACON_ANT_A_V1 = BIT(9),
+ IWL_MAC_BEACON_ANT_B_V1 = BIT(10),
+ IWL_MAC_BEACON_FILS_V1 = BIT(12),
+};
+
+/* Bit flags for BEACON_TEMPLATE_CMD_API version 11 and above */
enum iwl_mac_beacon_flags {
- IWL_MAC_BEACON_CCK = BIT(8),
- IWL_MAC_BEACON_ANT_A = BIT(9),
- IWL_MAC_BEACON_ANT_B = BIT(10),
- IWL_MAC_BEACON_ANT_C = BIT(11),
- IWL_MAC_BEACON_FILS = BIT(12),
+ IWL_MAC_BEACON_CCK = BIT(5),
+ IWL_MAC_BEACON_ANT_A = BIT(6),
+ IWL_MAC_BEACON_ANT_B = BIT(7),
+ IWL_MAC_BEACON_FILS = BIT(8),
};
/**
@@ -788,7 +808,9 @@ struct iwl_mac_beacon_cmd {
__le32 ecsa_offset;
__le32 csa_offset;
struct ieee80211_hdr frame[];
-} __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_10 */
+} __packed; /* BEACON_TEMPLATE_CMD_API_S_VER_10,
+ BEACON_TEMPLATE_CMD_API_S_VER_11,
+ BEACON_TEMPLATE_CMD_API_S_VER_12 */
struct iwl_beacon_notif {
struct iwl_mvm_tx_resp beacon_notify_hdr;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index 6dcafd0a3d4b..a39013c401c9 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -159,11 +159,15 @@ static void iwl_fwrt_dump_txf(struct iwl_fw_runtime *fwrt,
iwl_trans_read_prph(fwrt->trans, TXF_READ_MODIFY_DATA + offset);
/* Read FIFO */
- fifo_len /= sizeof(u32); /* Size in DWORDS */
- for (i = 0; i < fifo_len; i++)
+ for (i = 0; i < fifo_len / sizeof(u32); i++)
fifo_data[i] = iwl_trans_read_prph(fwrt->trans,
TXF_READ_MODIFY_DATA +
offset);
+
+ if (fwrt->sanitize_ops && fwrt->sanitize_ops->frob_txf)
+ fwrt->sanitize_ops->frob_txf(fwrt->sanitize_ctx,
+ fifo_data, fifo_len);
+
*dump_data = iwl_fw_error_next_data(*dump_data);
}
@@ -659,6 +663,10 @@ static void iwl_fw_dump_mem(struct iwl_fw_runtime *fwrt,
iwl_trans_read_mem_bytes(fwrt->trans, ofs, dump_mem->data, len);
*dump_data = iwl_fw_error_next_data(*dump_data);
+ if (fwrt->sanitize_ops && fwrt->sanitize_ops->frob_mem)
+ fwrt->sanitize_ops->frob_mem(fwrt->sanitize_ctx, ofs,
+ dump_mem->data, len);
+
IWL_DEBUG_INFO(fwrt, "WRT memory dump. Type=%u\n", dump_mem->type);
}
@@ -752,6 +760,12 @@ static void iwl_dump_paging(struct iwl_fw_runtime *fwrt,
PAGING_BLOCK_SIZE,
DMA_BIDIRECTIONAL);
(*data) = iwl_fw_error_next_data(*data);
+
+ if (fwrt->sanitize_ops && fwrt->sanitize_ops->frob_mem)
+ fwrt->sanitize_ops->frob_mem(fwrt->sanitize_ctx,
+ fwrt->fw_paging_db[i].fw_offs,
+ paging->data,
+ PAGING_BLOCK_SIZE);
}
}
@@ -980,6 +994,11 @@ iwl_fw_error_dump_file(struct iwl_fw_runtime *fwrt,
dump_data->data + data_size,
data_size);
+ if (fwrt->sanitize_ops && fwrt->sanitize_ops->frob_mem)
+ fwrt->sanitize_ops->frob_mem(fwrt->sanitize_ctx, addr,
+ dump_data->data + data_size,
+ data_size);
+
dump_data = iwl_fw_error_next_data(dump_data);
}
@@ -1146,6 +1165,13 @@ static int iwl_dump_ini_dev_mem_iter(struct iwl_fw_runtime *fwrt,
iwl_trans_read_mem_bytes(fwrt->trans, addr, range->data,
le32_to_cpu(reg->dev_addr.size));
+ if ((le32_to_cpu(reg->id) & IWL_FW_INI_REGION_V2_MASK) ==
+ IWL_FW_INI_HW_SMEM_REGION_ID &&
+ fwrt->sanitize_ops && fwrt->sanitize_ops->frob_txf)
+ fwrt->sanitize_ops->frob_txf(fwrt->sanitize_ctx,
+ range->data,
+ le32_to_cpu(reg->dev_addr.size));
+
return sizeof(*range) + le32_to_cpu(range->range_data_size);
}
@@ -1338,6 +1364,10 @@ static int iwl_dump_ini_txf_iter(struct iwl_fw_runtime *fwrt,
for (i = 0; i < iter->fifo_size; i += sizeof(*data))
*data++ = cpu_to_le32(iwl_read_prph_no_grab(fwrt->trans, addr));
+ if (fwrt->sanitize_ops && fwrt->sanitize_ops->frob_txf)
+ fwrt->sanitize_ops->frob_txf(fwrt->sanitize_ctx,
+ reg_dump, iter->fifo_size);
+
out:
iwl_trans_release_nic_access(fwrt->trans);
@@ -2077,7 +2107,7 @@ static u32 iwl_dump_ini_info(struct iwl_fw_runtime *fwrt,
*/
hw_type = CSR_HW_REV_TYPE(fwrt->trans->hw_rev);
if (hw_type == IWL_AX210_HW_TYPE) {
- u32 prph_val = iwl_read_prph(fwrt->trans, WFPM_OTP_CFG1_ADDR);
+ u32 prph_val = iwl_read_prph(fwrt->trans, WFPM_OTP_CFG1_ADDR_GEN2);
u32 is_jacket = !!(prph_val & WFPM_OTP_CFG1_IS_JACKET_BIT);
u32 is_cdb = !!(prph_val & WFPM_OTP_CFG1_IS_CDB_BIT);
u32 masked_bits = is_jacket | (is_cdb << 1);
@@ -2360,7 +2390,9 @@ static void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt,
if (dump_data->monitor_only)
dump_mask &= BIT(IWL_FW_ERROR_DUMP_FW_MONITOR);
- fw_error_dump.trans_ptr = iwl_trans_dump_data(fwrt->trans, dump_mask);
+ fw_error_dump.trans_ptr = iwl_trans_dump_data(fwrt->trans, dump_mask,
+ fwrt->sanitize_ops,
+ fwrt->sanitize_ctx);
file_len = le32_to_cpu(dump_file->file_len);
fw_error_dump.fwrt_len = file_len;
@@ -2788,6 +2820,12 @@ void iwl_fw_dbg_read_d3_debug_data(struct iwl_fw_runtime *fwrt)
iwl_trans_read_mem_bytes(fwrt->trans, cfg->d3_debug_data_base_addr,
fwrt->dump.d3_debug_data,
cfg->d3_debug_data_length);
+
+ if (fwrt->sanitize_ops && fwrt->sanitize_ops->frob_mem)
+ fwrt->sanitize_ops->frob_mem(fwrt->sanitize_ctx,
+ cfg->d3_debug_data_base_addr,
+ fwrt->dump.d3_debug_data,
+ cfg->d3_debug_data_length);
}
IWL_EXPORT_SYMBOL(iwl_fw_dbg_read_d3_debug_data);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dump.c b/drivers/net/wireless/intel/iwlwifi/fw/dump.c
index a1842205e86a..016b3a4c5f51 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dump.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dump.c
@@ -214,7 +214,7 @@ static void iwl_fwrt_dump_lmac_error_log(struct iwl_fw_runtime *fwrt, u8 lmac_nu
/* reset the device */
iwl_trans_sw_reset(trans);
- err = iwl_finish_nic_init(trans, trans->trans_cfg);
+ err = iwl_finish_nic_init(trans);
if (err)
return;
}
@@ -328,6 +328,13 @@ static void iwl_fwrt_dump_tcm_error_log(struct iwl_fw_runtime *fwrt)
for (i = 0; i < ARRAY_SIZE(table.sw_status); i++)
IWL_ERR(fwrt, "0x%08X | tcm SW status[%d]\n",
table.sw_status[i], i);
+
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
+ u32 scratch = iwl_read32(trans, CSR_FUNC_SCRATCH);
+
+ IWL_ERR(fwrt, "Function Scratch status:\n");
+ IWL_ERR(fwrt, "0x%08X | Func Scratch\n", scratch);
+ }
}
static void iwl_fwrt_dump_iml_error_log(struct iwl_fw_runtime *fwrt)
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
index 521ca2bb0e92..9036b32ec765 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
@@ -342,10 +342,6 @@ struct iwl_fw_ini_dump_cfg_name {
#define IWL_AX210_HW_TYPE 0x42
/* How many bits to roll when adding to the HW type of AX210 HW */
#define IWL_AX210_HW_TYPE_ADDITION_SHIFT 12
-/* This prph is used to tell apart HW_TYPE == 0x42 NICs */
-#define WFPM_OTP_CFG1_ADDR 0xd03098
-#define WFPM_OTP_CFG1_IS_JACKET_BIT BIT(4)
-#define WFPM_OTP_CFG1_IS_CDB_BIT BIT(5)
/* struct iwl_fw_ini_dump_info - ini dump information
* @version: dump version
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h
index 6c8e9f3a6af2..3d572f5024bb 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/file.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h
@@ -100,6 +100,9 @@ enum iwl_ucode_tlv_type {
IWL_UCODE_TLV_PNVM_SKU = 64,
IWL_UCODE_TLV_TCM_DEBUG_ADDRS = 65,
+ IWL_UCODE_TLV_SEC_TABLE_ADDR = 66,
+ IWL_UCODE_TLV_D3_KEK_KCK_ADDR = 67,
+
IWL_UCODE_TLV_FW_NUM_STATIONS = IWL_UCODE_TLV_CONST_BASE + 0,
IWL_UCODE_TLV_TYPE_DEBUG_INFO = IWL_UCODE_TLV_DEBUG_BASE + 0,
@@ -107,6 +110,7 @@ enum iwl_ucode_tlv_type {
IWL_UCODE_TLV_TYPE_HCMD = IWL_UCODE_TLV_DEBUG_BASE + 2,
IWL_UCODE_TLV_TYPE_REGIONS = IWL_UCODE_TLV_DEBUG_BASE + 3,
IWL_UCODE_TLV_TYPE_TRIGGERS = IWL_UCODE_TLV_DEBUG_BASE + 4,
+ IWL_UCODE_TLV_TYPE_CONF_SET = IWL_UCODE_TLV_DEBUG_BASE + 5,
IWL_UCODE_TLV_DEBUG_MAX = IWL_UCODE_TLV_TYPE_TRIGGERS,
/* TLVs 0x1000-0x2000 are for internal driver usage */
@@ -416,6 +420,8 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_PASSIVE_6GHZ_SCAN = (__force iwl_ucode_tlv_capa_t)58,
IWL_UCODE_TLV_CAPA_HIDDEN_6GHZ_SCAN = (__force iwl_ucode_tlv_capa_t)59,
IWL_UCODE_TLV_CAPA_BROADCAST_TWT = (__force iwl_ucode_tlv_capa_t)60,
+ IWL_UCODE_TLV_CAPA_COEX_HIGH_PRIO = (__force iwl_ucode_tlv_capa_t)61,
+ IWL_UCODE_TLV_CAPA_RFIM_SUPPORT = (__force iwl_ucode_tlv_capa_t)62,
/* set 2 */
IWL_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE = (__force iwl_ucode_tlv_capa_t)64,
@@ -449,7 +455,7 @@ enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_PSC_CHAN_SUPPORT = (__force iwl_ucode_tlv_capa_t)98,
IWL_UCODE_TLV_CAPA_BIGTK_SUPPORT = (__force iwl_ucode_tlv_capa_t)100,
- IWL_UCODE_TLV_CAPA_RFIM_SUPPORT = (__force iwl_ucode_tlv_capa_t)102,
+ IWL_UCODE_TLV_CAPA_DRAM_FRAG_SUPPORT = (__force iwl_ucode_tlv_capa_t)104,
#ifdef __CHECKER__
/* sparse says it cannot increment the previous enum member */
@@ -956,6 +962,10 @@ struct iwl_fw_tcm_error_addr {
__le32 addr;
}; /* FW_TLV_TCM_ERROR_INFO_ADDRS_S */
+struct iwl_fw_dump_exclude {
+ __le32 addr, size;
+};
+
static inline size_t _iwl_tlv_array_len(const struct iwl_ucode_tlv *tlv,
size_t fixed_size, size_t var_size)
{
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.c b/drivers/net/wireless/intel/iwlwifi/fw/img.c
index c2a4e60518bc..24a966673691 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/img.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/img.c
@@ -1,59 +1,7 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
* Copyright(c) 2019 - 2020 Intel Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * The full GNU General Public License is included in this distribution
- * in the file called COPYING.
- *
- * Contact Information:
- * Intel Linux Wireless <linuxwifi@intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2019 - 2020 Intel Corporation
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *****************************************************************************/
+ */
#include "img.h"
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.h b/drivers/net/wireless/intel/iwlwifi/fw/img.h
index 153a3529e77a..993bda17fa30 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/img.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/img.h
@@ -124,11 +124,13 @@ struct fw_img {
* @fw_paging_phys: page phy pointer
* @fw_paging_block: pointer to the allocated block
* @fw_paging_size: page size
+ * @fw_offs: offset in the device
*/
struct iwl_fw_paging {
dma_addr_t fw_paging_phys;
struct page *fw_paging_block;
u32 fw_paging_size;
+ u32 fw_offs;
};
/**
@@ -174,6 +176,10 @@ struct iwl_fw_dbg {
u32 dump_mask;
};
+struct iwl_dump_exclude {
+ u32 addr, size;
+};
+
/**
* struct iwl_fw - variables associated with the firmware
*
@@ -194,6 +200,10 @@ struct iwl_fw_dbg {
* @cipher_scheme: optional external cipher scheme.
* @human_readable: human readable version
* we get the ALIVE from the uCode
+ * @phy_integration_ver: PHY integration version string
+ * @phy_integration_ver_len: length of @phy_integration_ver
+ * @dump_excl: image dump exclusion areas for RT image
+ * @dump_excl_wowlan: image dump exclusion areas for WoWLAN image
*/
struct iwl_fw {
u32 ucode_ver;
@@ -225,6 +235,8 @@ struct iwl_fw {
u8 *phy_integration_ver;
u32 phy_integration_ver_len;
+
+ struct iwl_dump_exclude dump_excl[2], dump_excl_wowlan[2];
};
static inline const char *get_fw_dbg_mode_string(int mode)
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/init.c b/drivers/net/wireless/intel/iwlwifi/fw/init.c
index 2ecec00db9da..566957ac4539 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/init.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/init.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
* Copyright (C) 2017 Intel Deutschland GmbH
- * Copyright (C) 2019-2020 Intel Corporation
+ * Copyright (C) 2019-2021 Intel Corporation
*/
#include "iwl-drv.h"
#include "runtime.h"
@@ -16,6 +16,8 @@
void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans,
const struct iwl_fw *fw,
const struct iwl_fw_runtime_ops *ops, void *ops_ctx,
+ const struct iwl_dump_sanitize_ops *sanitize_ops,
+ void *sanitize_ctx,
struct dentry *dbgfs_dir)
{
int i;
@@ -26,6 +28,8 @@ void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans,
fwrt->dev = trans->dev;
fwrt->dump.conf = FW_DBG_INVALID;
fwrt->ops = ops;
+ fwrt->sanitize_ops = sanitize_ops;
+ fwrt->sanitize_ctx = sanitize_ctx;
fwrt->ops_ctx = ops_ctx;
for (i = 0; i < IWL_FW_RUNTIME_DUMP_WK_NUM; i++) {
fwrt->dump.wks[i].idx = i;
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/paging.c b/drivers/net/wireless/intel/iwlwifi/fw/paging.c
index 4a8fe9641a32..58ca3849d1f3 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/paging.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/paging.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2019 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2019, 2021 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -152,6 +152,7 @@ static int iwl_fill_paging_mem(struct iwl_fw_runtime *fwrt,
memcpy(page_address(fwrt->fw_paging_db[0].fw_paging_block),
image->sec[sec_idx].data,
image->sec[sec_idx].len);
+ fwrt->fw_paging_db[0].fw_offs = image->sec[sec_idx].offset;
dma_sync_single_for_device(fwrt->trans->dev,
fwrt->fw_paging_db[0].fw_paging_phys,
fwrt->fw_paging_db[0].fw_paging_size,
@@ -197,6 +198,7 @@ static int iwl_fill_paging_mem(struct iwl_fw_runtime *fwrt,
memcpy(page_address(block->fw_paging_block),
image->sec[sec_idx].data + offset, len);
+ block->fw_offs = image->sec[sec_idx].offset + offset;
dma_sync_single_for_device(fwrt->trans->dev,
block->fw_paging_phys,
block->fw_paging_size,
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
index dde22bdc8703..7d4aa398729a 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c
@@ -162,7 +162,7 @@ done:
goto out;
}
- IWL_INFO(trans, "loaded PNVM version 0x%0x\n", sha1);
+ IWL_INFO(trans, "loaded PNVM version %08x\n", sha1);
ret = iwl_trans_set_pnvm(trans, pnvm_data, size);
out:
@@ -284,16 +284,19 @@ int iwl_pnvm_load(struct iwl_trans *trans,
/* First attempt to get the PNVM from BIOS */
package = iwl_uefi_get_pnvm(trans, &len);
if (!IS_ERR_OR_NULL(package)) {
- data = kmemdup(package->data, len, GFP_KERNEL);
+ if (len >= sizeof(*package)) {
+ /* we need only the data */
+ len -= sizeof(*package);
+ data = kmemdup(package->data, len, GFP_KERNEL);
+ } else {
+ data = NULL;
+ }
/* free package regardless of whether kmemdup succeeded */
kfree(package);
- if (data) {
- /* we need only the data size */
- len -= sizeof(*package);
+ if (data)
goto parse;
- }
}
/* If it's not available, try from the filesystem */
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/rs.c b/drivers/net/wireless/intel/iwlwifi/fw/rs.c
new file mode 100644
index 000000000000..a21c3befd93b
--- /dev/null
+++ b/drivers/net/wireless/intel/iwlwifi/fw/rs.c
@@ -0,0 +1,252 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/*
+ * Copyright (C) 2021 Intel Corporation
+ */
+
+#include <net/mac80211.h>
+#include "fw/api/rs.h"
+#include "iwl-drv.h"
+#include "iwl-config.h"
+
+#define IWL_DECLARE_RATE_INFO(r) \
+ [IWL_RATE_##r##M_INDEX] = IWL_RATE_##r##M_PLCP
+
+/*
+ * Translate from fw_rate_index (IWL_RATE_XXM_INDEX) to PLCP
+ * */
+static const u8 fw_rate_idx_to_plcp[IWL_RATE_COUNT] = {
+ IWL_DECLARE_RATE_INFO(1),
+ IWL_DECLARE_RATE_INFO(2),
+ IWL_DECLARE_RATE_INFO(5),
+ IWL_DECLARE_RATE_INFO(11),
+ IWL_DECLARE_RATE_INFO(6),
+ IWL_DECLARE_RATE_INFO(9),
+ IWL_DECLARE_RATE_INFO(12),
+ IWL_DECLARE_RATE_INFO(18),
+ IWL_DECLARE_RATE_INFO(24),
+ IWL_DECLARE_RATE_INFO(36),
+ IWL_DECLARE_RATE_INFO(48),
+ IWL_DECLARE_RATE_INFO(54),
+};
+
+/* mbps, mcs */
+static const struct iwl_rate_mcs_info rate_mcs[IWL_RATE_COUNT] = {
+ { "1", "BPSK DSSS"},
+ { "2", "QPSK DSSS"},
+ {"5.5", "BPSK CCK"},
+ { "11", "QPSK CCK"},
+ { "6", "BPSK 1/2"},
+ { "9", "BPSK 1/2"},
+ { "12", "QPSK 1/2"},
+ { "18", "QPSK 3/4"},
+ { "24", "16QAM 1/2"},
+ { "36", "16QAM 3/4"},
+ { "48", "64QAM 2/3"},
+ { "54", "64QAM 3/4"},
+ { "60", "64QAM 5/6"},
+};
+
+static const char * const ant_name[] = {
+ [ANT_NONE] = "None",
+ [ANT_A] = "A",
+ [ANT_B] = "B",
+ [ANT_AB] = "AB",
+};
+
+static const char * const pretty_bw[] = {
+ "20Mhz",
+ "40Mhz",
+ "80Mhz",
+ "160 Mhz",
+ "320Mhz",
+};
+
+u8 iwl_fw_rate_idx_to_plcp(int idx)
+{
+ return fw_rate_idx_to_plcp[idx];
+}
+IWL_EXPORT_SYMBOL(iwl_fw_rate_idx_to_plcp);
+
+const struct iwl_rate_mcs_info *iwl_rate_mcs(int idx)
+{
+ return &rate_mcs[idx];
+}
+IWL_EXPORT_SYMBOL(iwl_rate_mcs);
+
+const char *iwl_rs_pretty_ant(u8 ant)
+{
+ if (ant >= ARRAY_SIZE(ant_name))
+ return "UNKNOWN";
+
+ return ant_name[ant];
+}
+IWL_EXPORT_SYMBOL(iwl_rs_pretty_ant);
+
+const char *iwl_rs_pretty_bw(int bw)
+{
+ if (bw >= ARRAY_SIZE(pretty_bw))
+ return "unknown bw";
+
+ return pretty_bw[bw];
+}
+IWL_EXPORT_SYMBOL(iwl_rs_pretty_bw);
+
+u32 iwl_new_rate_from_v1(u32 rate_v1)
+{
+ u32 rate_v2 = 0;
+ u32 dup = 0;
+
+ if (rate_v1 == 0)
+ return rate_v1;
+ /* convert rate */
+ if (rate_v1 & RATE_MCS_HT_MSK_V1) {
+ u32 nss = 0;
+
+ rate_v2 |= RATE_MCS_HT_MSK;
+ rate_v2 |=
+ rate_v1 & RATE_HT_MCS_RATE_CODE_MSK_V1;
+ nss = (rate_v1 & RATE_HT_MCS_MIMO2_MSK) >>
+ RATE_HT_MCS_NSS_POS_V1;
+ rate_v2 |= nss << RATE_MCS_NSS_POS;
+ } else if (rate_v1 & RATE_MCS_VHT_MSK_V1 ||
+ rate_v1 & RATE_MCS_HE_MSK_V1) {
+ rate_v2 |= rate_v1 & RATE_VHT_MCS_RATE_CODE_MSK;
+
+ rate_v2 |= rate_v1 & RATE_VHT_MCS_MIMO2_MSK;
+
+ if (rate_v1 & RATE_MCS_HE_MSK_V1) {
+ u32 he_type_bits = rate_v1 & RATE_MCS_HE_TYPE_MSK_V1;
+ u32 he_type = he_type_bits >> RATE_MCS_HE_TYPE_POS_V1;
+ u32 he_106t = (rate_v1 & RATE_MCS_HE_106T_MSK_V1) >>
+ RATE_MCS_HE_106T_POS_V1;
+ u32 he_gi_ltf = (rate_v1 & RATE_MCS_HE_GI_LTF_MSK_V1) >>
+ RATE_MCS_HE_GI_LTF_POS;
+
+ if ((he_type_bits == RATE_MCS_HE_TYPE_SU ||
+ he_type_bits == RATE_MCS_HE_TYPE_EXT_SU) &&
+ he_gi_ltf == RATE_MCS_HE_SU_4_LTF)
+ /* the new rate have an additional bit to
+ * represent the value 4 rather then using SGI
+ * bit for this purpose - as it was done in the old
+ * rate */
+ he_gi_ltf += (rate_v1 & RATE_MCS_SGI_MSK_V1) >>
+ RATE_MCS_SGI_POS_V1;
+
+ rate_v2 |= he_gi_ltf << RATE_MCS_HE_GI_LTF_POS;
+ rate_v2 |= he_type << RATE_MCS_HE_TYPE_POS;
+ rate_v2 |= he_106t << RATE_MCS_HE_106T_POS;
+ rate_v2 |= rate_v1 & RATE_HE_DUAL_CARRIER_MODE_MSK;
+ rate_v2 |= RATE_MCS_HE_MSK;
+ } else {
+ rate_v2 |= RATE_MCS_VHT_MSK;
+ }
+ /* if legacy format */
+ } else {
+ u32 legacy_rate = iwl_legacy_rate_to_fw_idx(rate_v1);
+
+ WARN_ON(legacy_rate < 0);
+ rate_v2 |= legacy_rate;
+ if (!(rate_v1 & RATE_MCS_CCK_MSK_V1))
+ rate_v2 |= RATE_MCS_LEGACY_OFDM_MSK;
+ }
+
+ /* convert flags */
+ if (rate_v1 & RATE_MCS_LDPC_MSK_V1)
+ rate_v2 |= RATE_MCS_LDPC_MSK;
+ rate_v2 |= (rate_v1 & RATE_MCS_CHAN_WIDTH_MSK_V1) |
+ (rate_v1 & RATE_MCS_ANT_AB_MSK) |
+ (rate_v1 & RATE_MCS_STBC_MSK) |
+ (rate_v1 & RATE_MCS_BF_MSK);
+
+ dup = (rate_v1 & RATE_MCS_DUP_MSK_V1) >> RATE_MCS_DUP_POS_V1;
+ if (dup) {
+ rate_v2 |= RATE_MCS_DUP_MSK;
+ rate_v2 |= dup << RATE_MCS_CHAN_WIDTH_POS;
+ }
+
+ if ((!(rate_v1 & RATE_MCS_HE_MSK_V1)) &&
+ (rate_v1 & RATE_MCS_SGI_MSK_V1))
+ rate_v2 |= RATE_MCS_SGI_MSK;
+
+ return rate_v2;
+}
+IWL_EXPORT_SYMBOL(iwl_new_rate_from_v1);
+
+u32 iwl_legacy_rate_to_fw_idx(u32 rate_n_flags)
+{
+ int rate = rate_n_flags & RATE_LEGACY_RATE_MSK_V1;
+ int idx;
+ bool ofdm = !(rate_n_flags & RATE_MCS_CCK_MSK_V1);
+ int offset = ofdm ? IWL_FIRST_OFDM_RATE : 0;
+ int last = ofdm ? IWL_RATE_COUNT_LEGACY : IWL_FIRST_OFDM_RATE;
+
+ for (idx = offset; idx < last; idx++)
+ if (iwl_fw_rate_idx_to_plcp(idx) == rate)
+ return idx - offset;
+ return -1;
+}
+
+int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate)
+{
+ char *type;
+ u8 mcs = 0, nss = 0;
+ u8 ant = (rate & RATE_MCS_ANT_AB_MSK) >> RATE_MCS_ANT_POS;
+ u32 bw = (rate & RATE_MCS_CHAN_WIDTH_MSK) >>
+ RATE_MCS_CHAN_WIDTH_POS;
+ u32 format = rate & RATE_MCS_MOD_TYPE_MSK;
+ bool sgi;
+
+ if (format == RATE_MCS_CCK_MSK ||
+ format == RATE_MCS_LEGACY_OFDM_MSK) {
+ int legacy_rate = rate & RATE_LEGACY_RATE_MSK;
+ int index = format == RATE_MCS_CCK_MSK ?
+ legacy_rate :
+ legacy_rate + IWL_FIRST_OFDM_RATE;
+
+ return scnprintf(buf, bufsz, "Legacy | ANT: %s Rate: %s Mbps",
+ iwl_rs_pretty_ant(ant),
+ index == IWL_RATE_INVALID ? "BAD" :
+ iwl_rate_mcs(index)->mbps);
+ }
+
+ if (format == RATE_MCS_VHT_MSK)
+ type = "VHT";
+ else if (format == RATE_MCS_HT_MSK)
+ type = "HT";
+ else if (format == RATE_MCS_HE_MSK)
+ type = "HE";
+ else
+ type = "Unknown"; /* shouldn't happen */
+
+ mcs = format == RATE_MCS_HT_MSK ?
+ RATE_HT_MCS_INDEX(rate) :
+ rate & RATE_MCS_CODE_MSK;
+ nss = ((rate & RATE_MCS_NSS_MSK)
+ >> RATE_MCS_NSS_POS) + 1;
+ sgi = format == RATE_MCS_HE_MSK ?
+ iwl_he_is_sgi(rate) :
+ rate & RATE_MCS_SGI_MSK;
+
+ return scnprintf(buf, bufsz,
+ "0x%x: %s | ANT: %s BW: %s MCS: %d NSS: %d %s%s%s%s%s",
+ rate, type, iwl_rs_pretty_ant(ant), iwl_rs_pretty_bw(bw), mcs, nss,
+ (sgi) ? "SGI " : "NGI ",
+ (rate & RATE_MCS_STBC_MSK) ? "STBC " : "",
+ (rate & RATE_MCS_LDPC_MSK) ? "LDPC " : "",
+ (rate & RATE_HE_DUAL_CARRIER_MODE_MSK) ? "DCM " : "",
+ (rate & RATE_MCS_BF_MSK) ? "BF " : "");
+}
+IWL_EXPORT_SYMBOL(rs_pretty_print_rate);
+
+bool iwl_he_is_sgi(u32 rate_n_flags)
+{
+ u32 type = rate_n_flags & RATE_MCS_HE_TYPE_MSK;
+ u32 ltf_gi = rate_n_flags & RATE_MCS_HE_GI_LTF_MSK;
+
+ if (type == RATE_MCS_HE_TYPE_SU ||
+ type == RATE_MCS_HE_TYPE_EXT_SU)
+ return ltf_gi == RATE_MCS_HE_SU_4_LTF_08_GI;
+ return false;
+}
+IWL_EXPORT_SYMBOL(iwl_he_is_sgi);
+
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
index 35af85a5430b..69799f1ed2c4 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h
@@ -105,6 +105,9 @@ struct iwl_fw_runtime {
const struct iwl_fw_runtime_ops *ops;
void *ops_ctx;
+ const struct iwl_dump_sanitize_ops *sanitize_ops;
+ void *sanitize_ctx;
+
/* Paging */
struct iwl_fw_paging fw_paging_db[NUM_OF_FW_PAGING_BLOCKS];
u16 num_of_paging_blk;
@@ -151,7 +154,7 @@ struct iwl_fw_runtime {
struct iwl_sar_profile sar_profiles[ACPI_SAR_PROFILE_NUM];
u8 sar_chain_a_profile;
u8 sar_chain_b_profile;
- struct iwl_geo_profile geo_profiles[ACPI_NUM_GEO_PROFILES];
+ struct iwl_geo_profile geo_profiles[ACPI_NUM_GEO_PROFILES_REV3];
u32 geo_rev;
union iwl_ppag_table_cmd ppag_table;
u32 ppag_ver;
@@ -161,6 +164,8 @@ struct iwl_fw_runtime {
void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans,
const struct iwl_fw *fw,
const struct iwl_fw_runtime_ops *ops, void *ops_ctx,
+ const struct iwl_dump_sanitize_ops *sanitize_ops,
+ void *sanitize_ctx,
struct dentry *dbgfs_dir);
static inline void iwl_fw_runtime_free(struct iwl_fw_runtime *fwrt)
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/uefi.h b/drivers/net/wireless/intel/iwlwifi/fw/uefi.h
index 45d0b36d79b5..d552c656ac9f 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/uefi.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/uefi.h
@@ -2,7 +2,8 @@
/*
* Copyright(c) 2021 Intel Corporation
*/
-
+#ifndef __iwl_fw_uefi__
+#define __iwl_fw_uefi__
#define IWL_UEFI_OEM_PNVM_NAME L"UefiCnvWlanOemSignedPnvm"
#define IWL_UEFI_REDUCED_POWER_NAME L"UefiCnvWlanReducedPower"
@@ -40,3 +41,5 @@ void *iwl_uefi_get_reduced_power(struct iwl_trans *trans, size_t *len)
return ERR_PTR(-EOPNOTSUPP);
}
#endif /* CONFIG_EFI */
+
+#endif /* __iwl_fw_uefi__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index 7eb534df5331..665167a223f6 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -95,7 +95,6 @@ enum iwl_nvm_type {
#define ANT_AC (ANT_A | ANT_C)
#define ANT_BC (ANT_B | ANT_C)
#define ANT_ABC (ANT_A | ANT_B | ANT_C)
-#define MAX_ANT_NUM 3
static inline u8 num_of_ant(u8 mask)
@@ -420,6 +419,7 @@ struct iwl_cfg {
#define IWL_CFG_MAC_TYPE_SOF 0x43
#define IWL_CFG_MAC_TYPE_MA 0x44
#define IWL_CFG_MAC_TYPE_BZ 0x46
+#define IWL_CFG_MAC_TYPE_GL 0x47
#define IWL_CFG_RF_TYPE_TH 0x105
#define IWL_CFG_RF_TYPE_TH1 0x108
@@ -511,6 +511,10 @@ extern const char iwl_ax210_killer_1675w_name[];
extern const char iwl_ax210_killer_1675x_name[];
extern const char iwl9560_killer_1550i_160_name[];
extern const char iwl9560_killer_1550s_160_name[];
+extern const char iwl_ax211_killer_1675s_name[];
+extern const char iwl_ax211_killer_1675i_name[];
+extern const char iwl_ax411_killer_1690s_name[];
+extern const char iwl_ax411_killer_1690i_name[];
extern const char iwl_ax211_name[];
extern const char iwl_ax221_name[];
extern const char iwl_ax231_name[];
@@ -628,6 +632,8 @@ extern const struct iwl_cfg iwl_cfg_bz_a0_hr_b0;
extern const struct iwl_cfg iwl_cfg_bz_a0_gf_a0;
extern const struct iwl_cfg iwl_cfg_bz_a0_gf4_a0;
extern const struct iwl_cfg iwl_cfg_bz_a0_mr_a0;
+extern const struct iwl_cfg iwl_cfg_bz_a0_fm_a0;
+extern const struct iwl_cfg iwl_cfg_gl_a0_fm_a0;
#endif /* CONFIG_IWLMVM */
#endif /* __IWL_CONFIG_H__ */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h b/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h
index e1fec23ac07f..5adf485db38e 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h
@@ -109,12 +109,12 @@ struct iwl_prph_scratch_pnvm_cfg {
* struct iwl_prph_scratch_hwm_cfg - hwm config
* @hwm_base_addr: hwm start address
* @hwm_size: hwm size in DWs
- * @reserved: reserved
+ * @debug_token_config: debug preset
*/
struct iwl_prph_scratch_hwm_cfg {
__le64 hwm_base_addr;
__le32 hwm_size;
- __le32 reserved;
+ __le32 debug_token_config;
} __packed; /* PERIPH_SCRATCH_HWM_CFG_S */
/*
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
index cf796403c45c..ff79a2ecb242 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
@@ -34,6 +34,7 @@
#define CSR_GPIO_IN (CSR_BASE+0x018) /* read external chip pins */
#define CSR_RESET (CSR_BASE+0x020) /* busmaster enable, NMI, etc*/
#define CSR_GP_CNTRL (CSR_BASE+0x024)
+#define CSR_FUNC_SCRATCH (CSR_BASE+0x02c) /* Scratch register - used for FW dbg */
/* 2nd byte of CSR_INT_COALESCING, not accessible via iwl_write32()! */
#define CSR_INT_PERIODIC_REG (CSR_BASE+0x005)
@@ -135,6 +136,12 @@
#define CSR_DBG_HPET_MEM_REG (CSR_BASE+0x240)
#define CSR_DBG_LINK_PWR_MGMT_REG (CSR_BASE+0x250)
+/*
+ * Scratch register initial configuration - this is set on init, and read
+ * during a error FW error.
+ */
+#define CSR_FUNC_SCRATCH_INIT_VALUE (0x01010101)
+
/* Bits for CSR_HW_IF_CONFIG_REG */
#define CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH (0x00000003)
#define CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP (0x0000000C)
@@ -598,6 +605,7 @@ enum msix_hw_int_causes {
MSIX_HW_INT_CAUSES_REG_WAKEUP = BIT(1),
MSIX_HW_INT_CAUSES_REG_IML = BIT(1),
MSIX_HW_INT_CAUSES_REG_RESET_DONE = BIT(2),
+ MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ = BIT(5),
MSIX_HW_INT_CAUSES_REG_CT_KILL = BIT(6),
MSIX_HW_INT_CAUSES_REG_RF_KILL = BIT(7),
MSIX_HW_INT_CAUSES_REG_PERIODIC = BIT(8),
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
index 125479b5c0d6..7ab98b419cc1 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
@@ -16,6 +16,7 @@
* @IWL_DBG_TLV_TYPE_HCMD: host command TLV
* @IWL_DBG_TLV_TYPE_REGION: region TLV
* @IWL_DBG_TLV_TYPE_TRIGGER: trigger TLV
+ * @IWL_DBG_TLV_TYPE_CONF_SET: conf set TLV
* @IWL_DBG_TLV_TYPE_NUM: number of debug TLVs
*/
enum iwl_dbg_tlv_type {
@@ -25,6 +26,7 @@ enum iwl_dbg_tlv_type {
IWL_DBG_TLV_TYPE_HCMD,
IWL_DBG_TLV_TYPE_REGION,
IWL_DBG_TLV_TYPE_TRIGGER,
+ IWL_DBG_TLV_TYPE_CONF_SET,
IWL_DBG_TLV_TYPE_NUM,
};
@@ -59,6 +61,7 @@ dbg_ver_table[IWL_DBG_TLV_TYPE_NUM] = {
[IWL_DBG_TLV_TYPE_HCMD] = {.min_ver = 1, .max_ver = 1,},
[IWL_DBG_TLV_TYPE_REGION] = {.min_ver = 1, .max_ver = 2,},
[IWL_DBG_TLV_TYPE_TRIGGER] = {.min_ver = 1, .max_ver = 1,},
+ [IWL_DBG_TLV_TYPE_CONF_SET] = {.min_ver = 1, .max_ver = 1,},
};
static int iwl_dbg_tlv_add(const struct iwl_ucode_tlv *tlv,
@@ -260,6 +263,30 @@ static int iwl_dbg_tlv_alloc_trigger(struct iwl_trans *trans,
return ret;
}
+static int iwl_dbg_tlv_config_set(struct iwl_trans *trans,
+ const struct iwl_ucode_tlv *tlv)
+{
+ struct iwl_fw_ini_conf_set_tlv *conf_set = (void *)tlv->data;
+ u32 tp = le32_to_cpu(conf_set->time_point);
+ u32 type = le32_to_cpu(conf_set->set_type);
+
+ if (tp <= IWL_FW_INI_TIME_POINT_INVALID ||
+ tp >= IWL_FW_INI_TIME_POINT_NUM) {
+ IWL_DEBUG_FW(trans,
+ "WRT: Invalid time point %u for config set TLV\n", tp);
+ return -EINVAL;
+ }
+
+ if (type <= IWL_FW_INI_CONFIG_SET_TYPE_INVALID ||
+ type >= IWL_FW_INI_CONFIG_SET_TYPE_MAX_NUM) {
+ IWL_DEBUG_FW(trans,
+ "WRT: Invalid config set type %u for config set TLV\n", type);
+ return -EINVAL;
+ }
+
+ return iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].config_list);
+}
+
static int (*dbg_tlv_alloc[])(struct iwl_trans *trans,
const struct iwl_ucode_tlv *tlv) = {
[IWL_DBG_TLV_TYPE_DEBUG_INFO] = iwl_dbg_tlv_alloc_debug_info,
@@ -267,6 +294,7 @@ static int (*dbg_tlv_alloc[])(struct iwl_trans *trans,
[IWL_DBG_TLV_TYPE_HCMD] = iwl_dbg_tlv_alloc_hcmd,
[IWL_DBG_TLV_TYPE_REGION] = iwl_dbg_tlv_alloc_region,
[IWL_DBG_TLV_TYPE_TRIGGER] = iwl_dbg_tlv_alloc_trigger,
+ [IWL_DBG_TLV_TYPE_CONF_SET] = iwl_dbg_tlv_config_set,
};
void iwl_dbg_tlv_alloc(struct iwl_trans *trans, const struct iwl_ucode_tlv *tlv,
@@ -399,6 +427,13 @@ void iwl_dbg_tlv_free(struct iwl_trans *trans)
list_del(&tlv_node->list);
kfree(tlv_node);
}
+
+ list_for_each_entry_safe(tlv_node, tlv_node_tmp,
+ &tp->config_list, list) {
+ list_del(&tlv_node->list);
+ kfree(tlv_node);
+ }
+
}
for (i = 0; i < ARRAY_SIZE(trans->dbg.fw_mon_ini); i++)
@@ -466,6 +501,7 @@ void iwl_dbg_tlv_init(struct iwl_trans *trans)
INIT_LIST_HEAD(&tp->trig_list);
INIT_LIST_HEAD(&tp->hcmd_list);
INIT_LIST_HEAD(&tp->active_trig_list);
+ INIT_LIST_HEAD(&tp->config_list);
}
}
@@ -649,6 +685,10 @@ static void iwl_dbg_tlv_apply_buffers(struct iwl_fw_runtime *fwrt)
{
int ret, i;
+ if (fw_has_capa(&fwrt->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_DRAM_FRAG_SUPPORT))
+ return;
+
for (i = 0; i < IWL_FW_INI_ALLOCATION_NUM; i++) {
ret = iwl_dbg_tlv_apply_buffer(fwrt, i);
if (ret)
@@ -658,6 +698,87 @@ static void iwl_dbg_tlv_apply_buffers(struct iwl_fw_runtime *fwrt)
}
}
+static int iwl_dbg_tlv_update_dram(struct iwl_fw_runtime *fwrt,
+ enum iwl_fw_ini_allocation_id alloc_id,
+ struct iwl_dram_info *dram_info)
+{
+ struct iwl_fw_mon *fw_mon;
+ u32 remain_frags, num_frags;
+ int j, fw_mon_idx = 0;
+ struct iwl_buf_alloc_cmd *data;
+
+ if (le32_to_cpu(fwrt->trans->dbg.fw_mon_cfg[alloc_id].buf_location) !=
+ IWL_FW_INI_LOCATION_DRAM_PATH) {
+ IWL_DEBUG_FW(fwrt, "DRAM_PATH is not supported alloc_id %u\n", alloc_id);
+ return -1;
+ }
+
+ fw_mon = &fwrt->trans->dbg.fw_mon_ini[alloc_id];
+
+ /* the first fragment of DBGC1 is given to the FW via register
+ * or context info
+ */
+ if (alloc_id == IWL_FW_INI_ALLOCATION_ID_DBGC1)
+ fw_mon_idx++;
+
+ remain_frags = fw_mon->num_frags - fw_mon_idx;
+ if (!remain_frags)
+ return -1;
+
+ num_frags = min_t(u32, remain_frags, BUF_ALLOC_MAX_NUM_FRAGS);
+ data = &dram_info->dram_frags[alloc_id - 1];
+ data->alloc_id = cpu_to_le32(alloc_id);
+ data->num_frags = cpu_to_le32(num_frags);
+ data->buf_location = cpu_to_le32(IWL_FW_INI_LOCATION_DRAM_PATH);
+
+ IWL_DEBUG_FW(fwrt, "WRT: DRAM buffer details alloc_id=%u, num_frags=%u\n",
+ cpu_to_le32(alloc_id), cpu_to_le32(num_frags));
+
+ for (j = 0; j < num_frags; j++) {
+ struct iwl_buf_alloc_frag *frag = &data->frags[j];
+ struct iwl_dram_data *fw_mon_frag = &fw_mon->frags[fw_mon_idx++];
+
+ frag->addr = cpu_to_le64(fw_mon_frag->physical);
+ frag->size = cpu_to_le32(fw_mon_frag->size);
+ IWL_DEBUG_FW(fwrt, "WRT: DRAM fragment details\n");
+ IWL_DEBUG_FW(fwrt, "frag=%u, addr=0x%016llx, size=0x%x)\n",
+ j, cpu_to_le64(fw_mon_frag->physical),
+ cpu_to_le32(fw_mon_frag->size));
+ }
+ return 0;
+}
+
+static void iwl_dbg_tlv_update_drams(struct iwl_fw_runtime *fwrt)
+{
+ int ret, i, dram_alloc = 0;
+ struct iwl_dram_info dram_info;
+ struct iwl_dram_data *frags =
+ &fwrt->trans->dbg.fw_mon_ini[IWL_FW_INI_ALLOCATION_ID_DBGC1].frags[0];
+
+ if (!fw_has_capa(&fwrt->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_DRAM_FRAG_SUPPORT))
+ return;
+
+ dram_info.first_word = cpu_to_le32(DRAM_INFO_FIRST_MAGIC_WORD);
+ dram_info.second_word = cpu_to_le32(DRAM_INFO_SECOND_MAGIC_WORD);
+
+ for (i = IWL_FW_INI_ALLOCATION_ID_DBGC1;
+ i <= IWL_FW_INI_ALLOCATION_ID_DBGC3; i++) {
+ ret = iwl_dbg_tlv_update_dram(fwrt, i, &dram_info);
+ if (!ret)
+ dram_alloc++;
+ else
+ IWL_WARN(fwrt,
+ "WRT: Failed to set DRAM buffer for alloc id %d, ret=%d\n",
+ i, ret);
+ }
+ if (dram_alloc) {
+ memcpy(frags->block, &dram_info, sizeof(dram_info));
+ IWL_DEBUG_FW(fwrt, "block data after %016x\n",
+ *((int *)fwrt->trans->dbg.fw_mon_ini[1].frags[0].block));
+ }
+}
+
static void iwl_dbg_tlv_send_hcmds(struct iwl_fw_runtime *fwrt,
struct list_head *hcmd_list)
{
@@ -677,6 +798,97 @@ static void iwl_dbg_tlv_send_hcmds(struct iwl_fw_runtime *fwrt,
}
}
+static void iwl_dbg_tlv_apply_config(struct iwl_fw_runtime *fwrt,
+ struct list_head *config_list)
+{
+ struct iwl_dbg_tlv_node *node;
+
+ list_for_each_entry(node, config_list, list) {
+ struct iwl_fw_ini_conf_set_tlv *config_list = (void *)node->tlv.data;
+ u32 count, address, value;
+ u32 len = (le32_to_cpu(node->tlv.length) - sizeof(*config_list)) / 8;
+ u32 type = le32_to_cpu(config_list->set_type);
+ u32 offset = le32_to_cpu(config_list->addr_offset);
+
+ switch (type) {
+ case IWL_FW_INI_CONFIG_SET_TYPE_DEVICE_PERIPHERY_MAC: {
+ if (!iwl_trans_grab_nic_access(fwrt->trans)) {
+ IWL_DEBUG_FW(fwrt, "WRT: failed to get nic access\n");
+ IWL_DEBUG_FW(fwrt, "WRT: skipping MAC PERIPHERY config\n");
+ continue;
+ }
+ IWL_DEBUG_FW(fwrt, "WRT: MAC PERIPHERY config len: len %u\n", len);
+ for (count = 0; count < len; count++) {
+ address = le32_to_cpu(config_list->addr_val[count].address);
+ value = le32_to_cpu(config_list->addr_val[count].value);
+ iwl_trans_write_prph(fwrt->trans, address + offset, value);
+ }
+ iwl_trans_release_nic_access(fwrt->trans);
+ break;
+ }
+ case IWL_FW_INI_CONFIG_SET_TYPE_DEVICE_MEMORY: {
+ for (count = 0; count < len; count++) {
+ address = le32_to_cpu(config_list->addr_val[count].address);
+ value = le32_to_cpu(config_list->addr_val[count].value);
+ iwl_trans_write_mem32(fwrt->trans, address + offset, value);
+ IWL_DEBUG_FW(fwrt, "WRT: DEV_MEM: count %u, add: %u val: %u\n",
+ count, address, value);
+ }
+ break;
+ }
+ case IWL_FW_INI_CONFIG_SET_TYPE_CSR: {
+ for (count = 0; count < len; count++) {
+ address = le32_to_cpu(config_list->addr_val[count].address);
+ value = le32_to_cpu(config_list->addr_val[count].value);
+ iwl_write32(fwrt->trans, address + offset, value);
+ IWL_DEBUG_FW(fwrt, "WRT: CSR: count %u, add: %u val: %u\n",
+ count, address, value);
+ }
+ break;
+ }
+ case IWL_FW_INI_CONFIG_SET_TYPE_DBGC_DRAM_ADDR: {
+ struct iwl_dbgc1_info dram_info = {};
+ struct iwl_dram_data *frags = &fwrt->trans->dbg.fw_mon_ini[1].frags[0];
+ __le64 dram_base_addr = cpu_to_le64(frags->physical);
+ __le32 dram_size = cpu_to_le32(frags->size);
+ u64 dram_addr = le64_to_cpu(dram_base_addr);
+ u32 ret;
+
+ IWL_DEBUG_FW(fwrt, "WRT: dram_base_addr 0x%016llx, dram_size 0x%x\n",
+ dram_base_addr, dram_size);
+ IWL_DEBUG_FW(fwrt, "WRT: config_list->addr_offset: %u\n",
+ le32_to_cpu(config_list->addr_offset));
+ for (count = 0; count < len; count++) {
+ address = le32_to_cpu(config_list->addr_val[count].address);
+ dram_info.dbgc1_add_lsb =
+ cpu_to_le32((dram_addr & 0x00000000FFFFFFFFULL) + 0x400);
+ dram_info.dbgc1_add_msb =
+ cpu_to_le32((dram_addr & 0xFFFFFFFF00000000ULL) >> 32);
+ dram_info.dbgc1_size = cpu_to_le32(le32_to_cpu(dram_size) - 0x400);
+ ret = iwl_trans_write_mem(fwrt->trans,
+ address + offset, &dram_info, 4);
+ if (ret) {
+ IWL_ERR(fwrt, "Failed to write dram_info to HW_SMEM\n");
+ break;
+ }
+ }
+ break;
+ }
+ case IWL_FW_INI_CONFIG_SET_TYPE_PERIPH_SCRATCH_HWM: {
+ u32 debug_token_config =
+ le32_to_cpu(config_list->addr_val[0].value);
+
+ IWL_DEBUG_FW(fwrt, "WRT: Setting HWM debug token config: %u\n",
+ debug_token_config);
+ fwrt->trans->dbg.ucode_preset = debug_token_config;
+ break;
+ }
+ default:
+ break;
+ }
+ }
+}
+
static void iwl_dbg_tlv_periodic_trig_handler(struct timer_list *t)
{
struct iwl_dbg_tlv_timer_node *timer_node =
@@ -996,8 +1208,10 @@ static void iwl_dbg_tlv_init_cfg(struct iwl_fw_runtime *fwrt)
&fwrt->trans->dbg.fw_mon_cfg[i];
u32 dest = le32_to_cpu(fw_mon_cfg->buf_location);
- if (dest == IWL_FW_INI_LOCATION_INVALID)
+ if (dest == IWL_FW_INI_LOCATION_INVALID) {
+ failed_alloc |= BIT(i);
continue;
+ }
if (*ini_dest == IWL_FW_INI_LOCATION_INVALID)
*ini_dest = dest;
@@ -1024,8 +1238,10 @@ static void iwl_dbg_tlv_init_cfg(struct iwl_fw_runtime *fwrt)
&fwrt->trans->dbg.active_regions[i];
u32 reg_type;
- if (!*active_reg)
+ if (!*active_reg) {
+ fwrt->trans->dbg.unsupported_region_msk |= BIT(i);
continue;
+ }
reg = (void *)(*active_reg)->data;
reg_type = le32_to_cpu(reg->type);
@@ -1051,7 +1267,7 @@ void _iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt,
union iwl_dbg_tlv_tp_data *tp_data,
bool sync)
{
- struct list_head *hcmd_list, *trig_list;
+ struct list_head *hcmd_list, *trig_list, *conf_list;
if (!iwl_trans_dbg_ini_valid(fwrt->trans) ||
tp_id == IWL_FW_INI_TIME_POINT_INVALID ||
@@ -1060,15 +1276,19 @@ void _iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt,
hcmd_list = &fwrt->trans->dbg.time_point[tp_id].hcmd_list;
trig_list = &fwrt->trans->dbg.time_point[tp_id].active_trig_list;
+ conf_list = &fwrt->trans->dbg.time_point[tp_id].config_list;
switch (tp_id) {
case IWL_FW_INI_TIME_POINT_EARLY:
iwl_dbg_tlv_init_cfg(fwrt);
+ iwl_dbg_tlv_apply_config(fwrt, conf_list);
+ iwl_dbg_tlv_update_drams(fwrt);
iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data, NULL);
break;
case IWL_FW_INI_TIME_POINT_AFTER_ALIVE:
iwl_dbg_tlv_apply_buffers(fwrt);
iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list);
+ iwl_dbg_tlv_apply_config(fwrt, conf_list);
iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data, NULL);
break;
case IWL_FW_INI_TIME_POINT_PERIODIC:
@@ -1079,11 +1299,13 @@ void _iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt,
case IWL_FW_INI_TIME_POINT_MISSED_BEACONS:
case IWL_FW_INI_TIME_POINT_FW_DHC_NOTIFICATION:
iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list);
+ iwl_dbg_tlv_apply_config(fwrt, conf_list);
iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data,
iwl_dbg_tlv_check_fw_pkt);
break;
default:
iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list);
+ iwl_dbg_tlv_apply_config(fwrt, conf_list);
iwl_dbg_tlv_tp_trigger(fwrt, sync, trig_list, tp_data, NULL);
break;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h
index c12b1fd3f479..79287708bd6e 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h
@@ -33,11 +33,13 @@ union iwl_dbg_tlv_tp_data {
* @trig_list: list of triggers
* @active_trig_list: list of active triggers
* @hcmd_list: list of host commands
+ * @config_list: list of configuration
*/
struct iwl_dbg_tlv_time_point_data {
struct list_head trig_list;
struct list_head active_trig_list;
struct list_head hcmd_list;
+ struct list_head config_list;
};
struct iwl_trans;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-debug.c b/drivers/net/wireless/intel/iwlwifi/iwl-debug.c
index f6ca2fc37c40..ae4c2a3d63d5 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-debug.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-debug.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2005-2011 Intel Corporation
+ * Copyright (C) 2005-2011, 2021 Intel Corporation
*/
#include <linux/device.h>
#include <linux/interrupt.h>
@@ -31,21 +31,31 @@ IWL_EXPORT_SYMBOL(__iwl_info);
__iwl_fn(crit)
IWL_EXPORT_SYMBOL(__iwl_crit);
-void __iwl_err(struct device *dev, bool rfkill_prefix, bool trace_only,
- const char *fmt, ...)
+void __iwl_err(struct device *dev, enum iwl_err_mode mode, const char *fmt, ...)
{
struct va_format vaf = {
.fmt = fmt,
};
- va_list args;
+ va_list args, args2;
va_start(args, fmt);
- vaf.va = &args;
- if (!trace_only) {
- if (rfkill_prefix)
+ switch (mode) {
+ case IWL_ERR_MODE_RATELIMIT:
+ if (net_ratelimit())
+ break;
+ fallthrough;
+ case IWL_ERR_MODE_REGULAR:
+ case IWL_ERR_MODE_RFKILL:
+ va_copy(args2, args);
+ vaf.va = &args2;
+ if (mode == IWL_ERR_MODE_RFKILL)
dev_err(dev, "(RFKILL) %pV", &vaf);
else
dev_err(dev, "%pV", &vaf);
+ va_end(args2);
+ break;
+ default:
+ break;
}
trace_iwlwifi_err(&vaf);
va_end(args);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-debug.h b/drivers/net/wireless/intel/iwlwifi/iwl-debug.h
index 528eba441926..1b9f16a31b54 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-debug.h
@@ -2,14 +2,9 @@
/******************************************************************************
*
* Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2018 - 2020 Intel Corporation
+ * Copyright(c) 2018 - 2021 Intel Corporation
*
* Portions of this file are derived from the ipw3945 project.
- *
- * Contact Information:
- * Intel Linux Wireless <linuxwifi@intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
*****************************************************************************/
#ifndef __iwl_debug_h__
@@ -27,9 +22,16 @@ static inline bool iwl_have_debug_level(u32 level)
#endif
}
+enum iwl_err_mode {
+ IWL_ERR_MODE_REGULAR,
+ IWL_ERR_MODE_RFKILL,
+ IWL_ERR_MODE_TRACE_ONLY,
+ IWL_ERR_MODE_RATELIMIT,
+};
+
struct device;
-void __iwl_err(struct device *dev, bool rfkill_prefix, bool only_trace,
- const char *fmt, ...) __printf(4, 5);
+void __iwl_err(struct device *dev, enum iwl_err_mode mode, const char *fmt, ...)
+ __printf(3, 4);
void __iwl_warn(struct device *dev, const char *fmt, ...) __printf(2, 3);
void __iwl_info(struct device *dev, const char *fmt, ...) __printf(2, 3);
void __iwl_crit(struct device *dev, const char *fmt, ...) __printf(2, 3);
@@ -38,13 +40,17 @@ void __iwl_crit(struct device *dev, const char *fmt, ...) __printf(2, 3);
#define CHECK_FOR_NEWLINE(f) BUILD_BUG_ON(f[sizeof(f) - 2] != '\n')
/* No matter what is m (priv, bus, trans), this will work */
-#define IWL_ERR_DEV(d, f, a...) \
+#define __IWL_ERR_DEV(d, mode, f, a...) \
do { \
CHECK_FOR_NEWLINE(f); \
- __iwl_err((d), false, false, f, ## a); \
+ __iwl_err((d), mode, f, ## a); \
} while (0)
+#define IWL_ERR_DEV(d, f, a...) \
+ __IWL_ERR_DEV(d, IWL_ERR_MODE_REGULAR, f, ## a)
#define IWL_ERR(m, f, a...) \
IWL_ERR_DEV((m)->dev, f, ## a)
+#define IWL_ERR_LIMIT(m, f, a...) \
+ __IWL_ERR_DEV((m)->dev, IWL_ERR_MODE_RATELIMIT, f, ## a)
#define IWL_WARN(m, f, a...) \
do { \
CHECK_FOR_NEWLINE(f); \
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h
index 1bc6ecc32140..347fd95c4e3a 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h
@@ -4,11 +4,6 @@
* Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 Intel Deutschland GmbH
* Copyright(c) 2018 - 2019 Intel Corporation
- *
- * Contact Information:
- * Intel Linux Wireless <linuxwifi@intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
*****************************************************************************/
#if !defined(__IWLWIFI_DEVICE_TRACE_DATA) || defined(TRACE_HEADER_MULTI_READ)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-io.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-io.h
index a57019241a78..0af9d8362c5b 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-io.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-io.h
@@ -3,11 +3,6 @@
*
* Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2016-2017 Intel Deutschland GmbH
- *
- * Contact Information:
- * Intel Linux Wireless <linuxwifi@intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
*****************************************************************************/
#if !defined(__IWLWIFI_DEVICE_TRACE_IO) || defined(TRACE_HEADER_MULTI_READ)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h
index 72ca882daed5..46ed723f138a 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h
@@ -5,11 +5,6 @@
* Copyright(c) 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 Intel Corporation
- *
- * Contact Information:
- * Intel Linux Wireless <linuxwifi@intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
*****************************************************************************/
#if !defined(__IWLWIFI_DEVICE_TRACE_IWLWIFI) || defined(TRACE_HEADER_MULTI_READ)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h
index d0467da5af03..7dd70011fd1e 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-msg.h
@@ -2,11 +2,6 @@
/******************************************************************************
*
* Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved.
- *
- * Contact Information:
- * Intel Linux Wireless <linuxwifi@intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
*****************************************************************************/
#if !defined(__IWLWIFI_DEVICE_TRACE_MSG) || defined(TRACE_HEADER_MULTI_READ)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-ucode.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-ucode.h
index 2228faefffbc..3ec0205ac9f9 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-ucode.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-ucode.h
@@ -2,11 +2,6 @@
/******************************************************************************
*
* Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved.
- *
- * Contact Information:
- * Intel Linux Wireless <linuxwifi@intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
*****************************************************************************/
#if !defined(__IWLWIFI_DEVICE_TRACE_UCODE) || defined(TRACE_HEADER_MULTI_READ)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.c b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.c
index b5037db0c381..999b7c652289 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.c
@@ -3,11 +3,6 @@
*
* Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved.
* Copyright (C) 2018 Intel Corporation
- *
- * Contact Information:
- * Intel Linux Wireless <linuxwifi@intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
*****************************************************************************/
#include <linux/module.h>
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h
index fc8bc212ee84..1455b578358b 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace.h
@@ -4,11 +4,6 @@
* Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved.
* Copyright(C) 2016 Intel Deutschland GmbH
* Copyright(c) 2018 Intel Corporation
- *
- * Contact Information:
- * Intel Linux Wireless <linuxwifi@intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
*****************************************************************************/
#ifndef __IWLWIFI_DEVICE_TRACE
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index 77124b8b235e..36196e07b1a0 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -31,7 +31,6 @@
#define DRV_DESCRIPTION "Intel(R) Wireless WiFi driver for Linux"
MODULE_DESCRIPTION(DRV_DESCRIPTION);
-MODULE_AUTHOR(DRV_AUTHOR);
MODULE_LICENSE("GPL");
#ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -550,6 +549,43 @@ static int iwl_parse_v1_v2_firmware(struct iwl_drv *drv,
return 0;
}
+static void iwl_drv_set_dump_exclude(struct iwl_drv *drv,
+ enum iwl_ucode_tlv_type tlv_type,
+ const void *tlv_data, u32 tlv_len)
+{
+ const struct iwl_fw_dump_exclude *fw = tlv_data;
+ struct iwl_dump_exclude *excl;
+
+ if (tlv_len < sizeof(*fw))
+ return;
+
+ if (tlv_type == IWL_UCODE_TLV_SEC_TABLE_ADDR) {
+ excl = &drv->fw.dump_excl[0];
+
+ /* second time we find this, it's for WoWLAN */
+ if (excl->addr)
+ excl = &drv->fw.dump_excl_wowlan[0];
+ } else if (fw_has_capa(&drv->fw.ucode_capa,
+ IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG)) {
+ /* IWL_UCODE_TLV_D3_KEK_KCK_ADDR is regular image */
+ excl = &drv->fw.dump_excl[0];
+ } else {
+ /* IWL_UCODE_TLV_D3_KEK_KCK_ADDR is WoWLAN image */
+ excl = &drv->fw.dump_excl_wowlan[0];
+ }
+
+ if (excl->addr)
+ excl++;
+
+ if (excl->addr) {
+ IWL_DEBUG_FW_INFO(drv, "found too many excludes in fw file\n");
+ return;
+ }
+
+ excl->addr = le32_to_cpu(fw->addr) & ~FW_ADDR_CACHE_CONTROL;
+ excl->size = le32_to_cpu(fw->size);
+}
+
static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
const struct firmware *ucode_raw,
struct iwl_firmware_pieces *pieces,
@@ -1133,6 +1169,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
case IWL_UCODE_TLV_TYPE_HCMD:
case IWL_UCODE_TLV_TYPE_REGIONS:
case IWL_UCODE_TLV_TYPE_TRIGGERS:
+ case IWL_UCODE_TLV_TYPE_CONF_SET:
if (iwlwifi_mod_params.enable_ini)
iwl_dbg_tlv_alloc(drv->trans, tlv, false);
break;
@@ -1166,6 +1203,11 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
return -ENOMEM;
drv->fw.phy_integration_ver_len = tlv_len;
break;
+ case IWL_UCODE_TLV_SEC_TABLE_ADDR:
+ case IWL_UCODE_TLV_D3_KEK_KCK_ADDR:
+ iwl_drv_set_dump_exclude(drv, tlv_type,
+ tlv_data, tlv_len);
+ break;
default:
IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type);
break;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.h b/drivers/net/wireless/intel/iwlwifi/iwl-drv.h
index b6442df0c643..2e2d60a58692 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2005-2014, 2020 Intel Corporation
+ * Copyright (C) 2005-2014, 2020-2021 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
*/
#ifndef __iwl_drv_h__
@@ -9,7 +9,6 @@
/* for all modules */
#define DRV_NAME "iwlwifi"
-#define DRV_AUTHOR "Intel Corporation <linuxwifi@intel.com>"
/* radio config bits (actual values from NVM definition) */
#define NVM_RF_CFG_DASH_MSK(x) (x & 0x3) /* bits 0-1 */
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c
index dbab2f10d750..b9e86bf972e5 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2005-2014, 2018-2019 Intel Corporation
+ * Copyright (C) 2005-2014, 2018-2019, 2021 Intel Corporation
*/
#include <linux/types.h>
#include <linux/slab.h>
@@ -139,7 +139,7 @@ static int iwl_init_otp_access(struct iwl_trans *trans)
{
int ret;
- ret = iwl_finish_nic_init(trans, trans->trans_cfg);
+ ret = iwl_finish_nic_init(trans);
if (ret)
return ret;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.c b/drivers/net/wireless/intel/iwlwifi/iwl-io.c
index 2517c4ae07ab..46917b4216b3 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.c
@@ -398,9 +398,50 @@ int iwl_dump_fh(struct iwl_trans *trans, char **buf)
return 0;
}
-int iwl_finish_nic_init(struct iwl_trans *trans,
- const struct iwl_cfg_trans_params *cfg_trans)
+#define IWL_HOST_MON_BLOCK_PEMON 0x00
+#define IWL_HOST_MON_BLOCK_HIPM 0x22
+
+#define IWL_HOST_MON_BLOCK_PEMON_VEC0 0x00
+#define IWL_HOST_MON_BLOCK_PEMON_VEC1 0x01
+#define IWL_HOST_MON_BLOCK_PEMON_WFPM 0x06
+
+static void iwl_dump_host_monitor_block(struct iwl_trans *trans,
+ u32 block, u32 vec, u32 iter)
+{
+ int i;
+
+ IWL_ERR(trans, "Host monitor block 0x%x vector 0x%x\n", block, vec);
+ iwl_write32(trans, CSR_MONITOR_CFG_REG, (block << 8) | vec);
+ for (i = 0; i < iter; i++)
+ IWL_ERR(trans, " value [iter %d]: 0x%08x\n",
+ i, iwl_read32(trans, CSR_MONITOR_STATUS_REG));
+}
+
+static void iwl_dump_host_monitor(struct iwl_trans *trans)
{
+ switch (trans->trans_cfg->device_family) {
+ case IWL_DEVICE_FAMILY_22000:
+ case IWL_DEVICE_FAMILY_AX210:
+ IWL_ERR(trans, "CSR_RESET = 0x%x\n",
+ iwl_read32(trans, CSR_RESET));
+ iwl_dump_host_monitor_block(trans, IWL_HOST_MON_BLOCK_PEMON,
+ IWL_HOST_MON_BLOCK_PEMON_VEC0, 15);
+ iwl_dump_host_monitor_block(trans, IWL_HOST_MON_BLOCK_PEMON,
+ IWL_HOST_MON_BLOCK_PEMON_VEC1, 15);
+ iwl_dump_host_monitor_block(trans, IWL_HOST_MON_BLOCK_PEMON,
+ IWL_HOST_MON_BLOCK_PEMON_WFPM, 15);
+ iwl_dump_host_monitor_block(trans, IWL_HOST_MON_BLOCK_HIPM,
+ IWL_HOST_MON_BLOCK_PEMON_VEC0, 1);
+ break;
+ default:
+ /* not supported yet */
+ return;
+ }
+}
+
+int iwl_finish_nic_init(struct iwl_trans *trans)
+{
+ const struct iwl_cfg_trans_params *cfg_trans = trans->trans_cfg;
u32 poll_ready;
int err;
@@ -433,9 +474,12 @@ int iwl_finish_nic_init(struct iwl_trans *trans,
* and accesses to uCode SRAM.
*/
err = iwl_poll_bit(trans, CSR_GP_CNTRL, poll_ready, poll_ready, 25000);
- if (err < 0)
+ if (err < 0) {
IWL_DEBUG_INFO(trans, "Failed to wake NIC\n");
+ iwl_dump_host_monitor(trans);
+ }
+
if (cfg_trans->bisr_workaround) {
/* ensure BISR shift has finished */
udelay(200);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.h b/drivers/net/wireless/intel/iwlwifi/iwl-io.h
index 3c21c0e081f8..37b3bd62897e 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
- * Copyright (C) 2018-2020 Intel Corporation
+ * Copyright (C) 2018-2021 Intel Corporation
*/
#ifndef __iwl_io_h__
#define __iwl_io_h__
@@ -52,8 +52,7 @@ void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 ofs,
void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask);
void iwl_force_nmi(struct iwl_trans *trans);
-int iwl_finish_nic_init(struct iwl_trans *trans,
- const struct iwl_cfg_trans_params *cfg_trans);
+int iwl_finish_nic_init(struct iwl_trans *trans);
/* Error handling */
int iwl_dump_fh(struct iwl_trans *trans, char **buf);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
index 475f951d4b1e..f470f9aea50f 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c
@@ -534,6 +534,17 @@ static void iwl_init_vht_hw_capab(struct iwl_trans *trans,
cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE);
}
+static const u8 iwl_vendor_caps[] = {
+ 0xdd, /* vendor element */
+ 0x06, /* length */
+ 0x00, 0x17, 0x35, /* Intel OUI */
+ 0x08, /* type (Intel Capabilities) */
+ /* followed by 16 bits of capabilities */
+#define IWL_VENDOR_CAP_IMPROVED_BF_FDBK_HE BIT(0)
+ IWL_VENDOR_CAP_IMPROVED_BF_FDBK_HE,
+ 0x00
+};
+
static const struct ieee80211_sband_iftype_data iwl_he_capa[] = {
{
.types_mask = BIT(NL80211_IFTYPE_STATION),
@@ -781,6 +792,12 @@ iwl_nvm_fixup_sband_iftd(struct iwl_trans *trans,
if (fw_has_capa(&fw->ucode_capa, IWL_UCODE_TLV_CAPA_BROADCAST_TWT))
iftype_data->he_cap.he_cap_elem.mac_cap_info[2] |=
IEEE80211_HE_MAC_CAP2_BCAST_TWT;
+
+ if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000 &&
+ !is_ap) {
+ iftype_data->vendor_elems.data = iwl_vendor_caps;
+ iftype_data->vendor_elems.len = ARRAY_SIZE(iwl_vendor_caps);
+ }
}
static void iwl_init_he_hw_capab(struct iwl_trans *trans,
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
index d0a7d58336a9..a84ab02cf9d7 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h
@@ -347,6 +347,12 @@
#define RADIO_REG_SYS_MANUAL_DFT_0 0xAD4078
#define RFIC_REG_RD 0xAD0470
#define WFPM_CTRL_REG 0xA03030
+#define WFPM_CTRL_REG_GEN2 0xd03030
+#define WFPM_OTP_CFG1_ADDR 0x00a03098
+#define WFPM_OTP_CFG1_ADDR_GEN2 0x00d03098
+#define WFPM_OTP_CFG1_IS_JACKET_BIT BIT(4)
+#define WFPM_OTP_CFG1_IS_CDB_BIT BIT(5)
+
#define WFPM_GP2 0xA030B4
/* DBGI SRAM Register details */
@@ -399,10 +405,40 @@ enum {
LMPM_PAGE_PASS_NOTIF_POS = BIT(20),
};
+/*
+ * CRF ID register
+ *
+ * type: bits 0-11
+ * reserved: bits 12-18
+ * slave_exist: bit 19
+ * dash: bits 20-23
+ * step: bits 24-26
+ * flavor: bits 27-31
+ */
+#define REG_CRF_ID_TYPE(val) (((val) & 0x00000FFF) >> 0)
+#define REG_CRF_ID_SLAVE(val) (((val) & 0x00080000) >> 19)
+#define REG_CRF_ID_DASH(val) (((val) & 0x00F00000) >> 20)
+#define REG_CRF_ID_STEP(val) (((val) & 0x07000000) >> 24)
+#define REG_CRF_ID_FLAVOR(val) (((val) & 0xF8000000) >> 27)
+
#define UREG_CHICK (0xA05C00)
#define UREG_CHICK_MSI_ENABLE BIT(24)
#define UREG_CHICK_MSIX_ENABLE BIT(25)
+#define SD_REG_VER 0xa29600
+#define SD_REG_VER_GEN2 0x00a2b800
+
+#define REG_CRF_ID_TYPE_JF_1 0x201
+#define REG_CRF_ID_TYPE_JF_2 0x202
+#define REG_CRF_ID_TYPE_HR_CDB 0x503
+#define REG_CRF_ID_TYPE_HR_NONE_CDB 0x504
+#define REG_CRF_ID_TYPE_HR_NONE_CDB_1X1 0x501
+#define REG_CRF_ID_TYPE_HR_NONE_CDB_CCP 0x532
+#define REG_CRF_ID_TYPE_GF 0x410
+#define REG_CRF_ID_TYPE_GF_TC 0xF08
+#define REG_CRF_ID_TYPE_MR 0x810
+#define REG_CRF_ID_TYPE_FM 0x910
+
#define HPM_DEBUG 0xA03440
#define PERSISTENCE_BIT BIT(12)
#define PREG_WFPM_ACCESS BIT(12)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index 8f0ff540f439..4ebb1871bd1f 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -363,6 +363,20 @@ struct iwl_hcmd_arr {
{ .arr = x, .size = ARRAY_SIZE(x) }
/**
+ * struct iwl_dump_sanitize_ops - dump sanitization operations
+ * @frob_txf: Scrub the TX FIFO data
+ * @frob_hcmd: Scrub a host command, the %hcmd pointer is to the header
+ * but that might be short or long (&struct iwl_cmd_header or
+ * &struct iwl_cmd_header_wide)
+ * @frob_mem: Scrub memory data
+ */
+struct iwl_dump_sanitize_ops {
+ void (*frob_txf)(void *ctx, void *buf, size_t buflen);
+ void (*frob_hcmd)(void *ctx, void *hcmd, size_t buflen);
+ void (*frob_mem)(void *ctx, u32 mem_addr, void *mem, size_t buflen);
+};
+
+/**
* struct iwl_trans_config - transport configuration
*
* @op_mode: pointer to the upper layer.
@@ -586,7 +600,9 @@ struct iwl_trans_ops {
u32 value);
struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans,
- u32 dump_mask);
+ u32 dump_mask,
+ const struct iwl_dump_sanitize_ops *sanitize_ops,
+ void *sanitize_ctx);
void (*debugfs_cleanup)(struct iwl_trans *trans);
void (*sync_nmi)(struct iwl_trans *trans);
int (*set_pnvm)(struct iwl_trans *trans, const void *data, u32 len);
@@ -723,8 +739,8 @@ struct iwl_self_init_dram {
* @debug_info_tlv_list: list of debug info TLVs
* @time_point: array of debug time points
* @periodic_trig_list: periodic triggers list
- * @domains_bitmap: bitmap of active domains other than
- * &IWL_FW_INI_DOMAIN_ALWAYS_ON
+ * @domains_bitmap: bitmap of active domains other than &IWL_FW_INI_DOMAIN_ALWAYS_ON
+ * @ucode_preset: preset based on ucode
*/
struct iwl_trans_debug {
u8 n_dest_reg;
@@ -758,6 +774,7 @@ struct iwl_trans_debug {
struct list_head periodic_trig_list;
u32 domains_bitmap;
+ u32 ucode_preset;
};
struct iwl_dma_ptr {
@@ -1086,11 +1103,14 @@ static inline int iwl_trans_d3_resume(struct iwl_trans *trans,
}
static inline struct iwl_trans_dump_data *
-iwl_trans_dump_data(struct iwl_trans *trans, u32 dump_mask)
+iwl_trans_dump_data(struct iwl_trans *trans, u32 dump_mask,
+ const struct iwl_dump_sanitize_ops *sanitize_ops,
+ void *sanitize_ctx)
{
if (!trans->ops->dump_data)
return NULL;
- return trans->ops->dump_data(trans, dump_mask);
+ return trans->ops->dump_data(trans, dump_mask,
+ sanitize_ops, sanitize_ctx);
}
static inline struct iwl_device_tx_cmd *
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
index 9f706fffb592..a19f646a324f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c
@@ -1379,12 +1379,49 @@ int iwl_mvm_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
/* converted data from the different status responses */
struct iwl_wowlan_status_data {
- u16 pattern_number;
- u16 qos_seq_ctr[8];
+ u64 replay_ctr;
+ u32 num_of_gtk_rekeys;
+ u32 received_beacons;
u32 wakeup_reasons;
u32 wake_packet_length;
u32 wake_packet_bufsize;
- const u8 *wake_packet;
+ u16 pattern_number;
+ u16 non_qos_seq_ctr;
+ u16 qos_seq_ctr[8];
+ u8 tid_tear_down;
+
+ struct {
+ /*
+ * We store both the TKIP and AES representations
+ * coming from the firmware because we decode the
+ * data from there before we iterate the keys and
+ * know which one we need.
+ */
+ struct {
+ struct ieee80211_key_seq seq[IWL_MAX_TID_COUNT];
+ } tkip, aes;
+ /* including RX MIC key for TKIP */
+ u8 key[WOWLAN_KEY_MAX_SIZE];
+ u8 len;
+ u8 flags;
+ } gtk;
+
+ struct {
+ /* Same as above */
+ struct {
+ struct ieee80211_key_seq seq[IWL_MAX_TID_COUNT];
+ u64 tx_pn;
+ } tkip, aes;
+ } ptk;
+
+ struct {
+ u64 ipn;
+ u8 key[WOWLAN_KEY_MAX_SIZE];
+ u8 len;
+ u8 flags;
+ } igtk;
+
+ u8 wake_packet[];
};
static void iwl_mvm_report_wakeup_reasons(struct iwl_mvm *mvm,
@@ -1540,77 +1577,90 @@ static void iwl_mvm_tkip_sc_to_seq(struct tkip_sc *sc,
seq->tkip.iv16 = le16_to_cpu(sc->iv16);
}
-static void iwl_mvm_set_aes_rx_seq(struct iwl_mvm *mvm, struct aes_sc *scs,
- struct ieee80211_sta *sta,
- struct ieee80211_key_conf *key)
+static void iwl_mvm_set_key_rx_seq_tids(struct ieee80211_key_conf *key,
+ struct ieee80211_key_seq *seq)
{
int tid;
- BUILD_BUG_ON(IWL_NUM_RSC != IEEE80211_NUM_TIDS);
+ for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++)
+ ieee80211_set_key_rx_seq(key, tid, &seq[tid]);
+}
- if (sta && iwl_mvm_has_new_rx_api(mvm)) {
- struct iwl_mvm_sta *mvmsta;
- struct iwl_mvm_key_pn *ptk_pn;
+static void iwl_mvm_set_aes_ptk_rx_seq(struct iwl_mvm *mvm,
+ struct iwl_wowlan_status_data *status,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ struct iwl_mvm_key_pn *ptk_pn;
+ int tid;
- mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ iwl_mvm_set_key_rx_seq_tids(key, status->ptk.aes.seq);
- rcu_read_lock();
- ptk_pn = rcu_dereference(mvmsta->ptk_pn[key->keyidx]);
- if (WARN_ON(!ptk_pn)) {
- rcu_read_unlock();
- return;
- }
+ if (!iwl_mvm_has_new_rx_api(mvm))
+ return;
- for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
- struct ieee80211_key_seq seq = {};
- int i;
- iwl_mvm_aes_sc_to_seq(&scs[tid], &seq);
- ieee80211_set_key_rx_seq(key, tid, &seq);
- for (i = 1; i < mvm->trans->num_rx_queues; i++)
- memcpy(ptk_pn->q[i].pn[tid],
- seq.ccmp.pn, IEEE80211_CCMP_PN_LEN);
- }
+ rcu_read_lock();
+ ptk_pn = rcu_dereference(mvmsta->ptk_pn[key->keyidx]);
+ if (WARN_ON(!ptk_pn)) {
rcu_read_unlock();
- } else {
- for (tid = 0; tid < IWL_NUM_RSC; tid++) {
- struct ieee80211_key_seq seq = {};
+ return;
+ }
- iwl_mvm_aes_sc_to_seq(&scs[tid], &seq);
- ieee80211_set_key_rx_seq(key, tid, &seq);
- }
+ for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
+ int i;
+
+ for (i = 1; i < mvm->trans->num_rx_queues; i++)
+ memcpy(ptk_pn->q[i].pn[tid],
+ status->ptk.aes.seq[tid].ccmp.pn,
+ IEEE80211_CCMP_PN_LEN);
}
+ rcu_read_unlock();
}
-static void iwl_mvm_set_tkip_rx_seq(struct tkip_sc *scs,
- struct ieee80211_key_conf *key)
+static void iwl_mvm_convert_key_counters(struct iwl_wowlan_status_data *status,
+ union iwl_all_tsc_rsc *sc)
{
- int tid;
+ int i;
+
+ BUILD_BUG_ON(IWL_MAX_TID_COUNT > IWL_MAX_TID_COUNT);
+ BUILD_BUG_ON(IWL_MAX_TID_COUNT > IWL_NUM_RSC);
- BUILD_BUG_ON(IWL_NUM_RSC != IEEE80211_NUM_TIDS);
+ /* GTK RX counters */
+ for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
+ iwl_mvm_tkip_sc_to_seq(&sc->tkip.multicast_rsc[i],
+ &status->gtk.tkip.seq[i]);
+ iwl_mvm_aes_sc_to_seq(&sc->aes.multicast_rsc[i],
+ &status->gtk.aes.seq[i]);
+ }
- for (tid = 0; tid < IWL_NUM_RSC; tid++) {
- struct ieee80211_key_seq seq = {};
+ /* PTK TX counter */
+ status->ptk.tkip.tx_pn = (u64)le16_to_cpu(sc->tkip.tsc.iv16) |
+ ((u64)le32_to_cpu(sc->tkip.tsc.iv32) << 16);
+ status->ptk.aes.tx_pn = le64_to_cpu(sc->aes.tsc.pn);
- iwl_mvm_tkip_sc_to_seq(&scs[tid], &seq);
- ieee80211_set_key_rx_seq(key, tid, &seq);
+ /* PTK RX counters */
+ for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
+ iwl_mvm_tkip_sc_to_seq(&sc->tkip.unicast_rsc[i],
+ &status->ptk.tkip.seq[i]);
+ iwl_mvm_aes_sc_to_seq(&sc->aes.unicast_rsc[i],
+ &status->ptk.aes.seq[i]);
}
}
static void iwl_mvm_set_key_rx_seq(struct iwl_mvm *mvm,
struct ieee80211_key_conf *key,
- struct iwl_wowlan_status *status)
+ struct iwl_wowlan_status_data *status)
{
- union iwl_all_tsc_rsc *rsc = &status->gtk[0].rsc.all_tsc_rsc;
-
switch (key->cipher) {
case WLAN_CIPHER_SUITE_CCMP:
case WLAN_CIPHER_SUITE_GCMP:
case WLAN_CIPHER_SUITE_GCMP_256:
- iwl_mvm_set_aes_rx_seq(mvm, rsc->aes.multicast_rsc, NULL, key);
+ iwl_mvm_set_key_rx_seq_tids(key, status->gtk.aes.seq);
break;
case WLAN_CIPHER_SUITE_TKIP:
- iwl_mvm_set_tkip_rx_seq(rsc->tkip.multicast_rsc, key);
+ iwl_mvm_set_key_rx_seq_tids(key, status->gtk.tkip.seq);
break;
default:
WARN_ON(1);
@@ -1619,7 +1669,7 @@ static void iwl_mvm_set_key_rx_seq(struct iwl_mvm *mvm,
struct iwl_mvm_d3_gtk_iter_data {
struct iwl_mvm *mvm;
- struct iwl_wowlan_status *status;
+ struct iwl_wowlan_status_data *status;
void *last_gtk;
u32 cipher;
bool find_phase, unhandled_cipher;
@@ -1633,6 +1683,7 @@ static void iwl_mvm_d3_update_keys(struct ieee80211_hw *hw,
void *_data)
{
struct iwl_mvm_d3_gtk_iter_data *data = _data;
+ struct iwl_wowlan_status_data *status = data->status;
if (data->unhandled_cipher)
return;
@@ -1661,10 +1712,6 @@ static void iwl_mvm_d3_update_keys(struct ieee80211_hw *hw,
* note that this assumes no TDLS sessions are active
*/
if (sta) {
- struct ieee80211_key_seq seq = {};
- union iwl_all_tsc_rsc *sc =
- &data->status->gtk[0].rsc.all_tsc_rsc;
-
if (data->find_phase)
return;
@@ -1672,16 +1719,12 @@ static void iwl_mvm_d3_update_keys(struct ieee80211_hw *hw,
case WLAN_CIPHER_SUITE_CCMP:
case WLAN_CIPHER_SUITE_GCMP:
case WLAN_CIPHER_SUITE_GCMP_256:
- iwl_mvm_set_aes_rx_seq(data->mvm, sc->aes.unicast_rsc,
- sta, key);
- atomic64_set(&key->tx_pn, le64_to_cpu(sc->aes.tsc.pn));
+ atomic64_set(&key->tx_pn, status->ptk.aes.tx_pn);
+ iwl_mvm_set_aes_ptk_rx_seq(data->mvm, status, sta, key);
break;
case WLAN_CIPHER_SUITE_TKIP:
- iwl_mvm_tkip_sc_to_seq(&sc->tkip.tsc, &seq);
- iwl_mvm_set_tkip_rx_seq(sc->tkip.unicast_rsc, key);
- atomic64_set(&key->tx_pn,
- (u64)seq.tkip.iv16 |
- ((u64)seq.tkip.iv32 << 16));
+ atomic64_set(&key->tx_pn, status->ptk.tkip.tx_pn);
+ iwl_mvm_set_key_rx_seq_tids(key, status->ptk.tkip.seq);
break;
}
@@ -1703,7 +1746,7 @@ static void iwl_mvm_d3_update_keys(struct ieee80211_hw *hw,
static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
- struct iwl_wowlan_status *status)
+ struct iwl_wowlan_status_data *status)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mvm_d3_gtk_iter_data gtkdata = {
@@ -1717,7 +1760,7 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
if (!status || !vif->bss_conf.bssid)
return false;
- if (le32_to_cpu(status->wakeup_reasons) & disconnection_reasons)
+ if (status->wakeup_reasons & disconnection_reasons)
return false;
/* find last GTK that we used initially, if any */
@@ -1741,7 +1784,7 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
iwl_mvm_d3_update_keys, &gtkdata);
IWL_DEBUG_WOWLAN(mvm, "num of GTK rekeying %d\n",
- le32_to_cpu(status->num_of_gtk_rekeys));
+ status->num_of_gtk_rekeys);
if (status->num_of_gtk_rekeys) {
struct ieee80211_key_conf *key;
struct {
@@ -1750,36 +1793,32 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
} conf = {
.conf.cipher = gtkdata.cipher,
.conf.keyidx =
- iwlmvm_wowlan_gtk_idx(&status->gtk[0]),
+ status->gtk.flags & IWL_WOWLAN_GTK_IDX_MASK,
};
__be64 replay_ctr;
IWL_DEBUG_WOWLAN(mvm,
"Received from FW GTK cipher %d, key index %d\n",
conf.conf.cipher, conf.conf.keyidx);
+
+ BUILD_BUG_ON(WLAN_KEY_LEN_CCMP != WLAN_KEY_LEN_GCMP);
+ BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_CCMP);
+ BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_GCMP_256);
+ BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_TKIP);
+ BUILD_BUG_ON(sizeof(conf.key) < sizeof(status->gtk.key));
+
+ memcpy(conf.conf.key, status->gtk.key, sizeof(status->gtk.key));
+
switch (gtkdata.cipher) {
case WLAN_CIPHER_SUITE_CCMP:
case WLAN_CIPHER_SUITE_GCMP:
- BUILD_BUG_ON(WLAN_KEY_LEN_CCMP != WLAN_KEY_LEN_GCMP);
- BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_CCMP);
conf.conf.keylen = WLAN_KEY_LEN_CCMP;
- memcpy(conf.conf.key, status->gtk[0].key,
- WLAN_KEY_LEN_CCMP);
break;
case WLAN_CIPHER_SUITE_GCMP_256:
- BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_GCMP_256);
conf.conf.keylen = WLAN_KEY_LEN_GCMP_256;
- memcpy(conf.conf.key, status->gtk[0].key,
- WLAN_KEY_LEN_GCMP_256);
break;
case WLAN_CIPHER_SUITE_TKIP:
- BUILD_BUG_ON(sizeof(conf.key) < WLAN_KEY_LEN_TKIP);
conf.conf.keylen = WLAN_KEY_LEN_TKIP;
- memcpy(conf.conf.key, status->gtk[0].key, 16);
- /* leave TX MIC key zeroed, we don't use it anyway */
- memcpy(conf.conf.key +
- NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY,
- status->gtk[0].tkip_mic_key, 8);
break;
}
@@ -1788,8 +1827,7 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
return false;
iwl_mvm_set_key_rx_seq(mvm, key, status);
- replay_ctr =
- cpu_to_be64(le64_to_cpu(status->replay_ctr));
+ replay_ctr = cpu_to_be64(status->replay_ctr);
ieee80211_gtk_rekey_notify(vif, vif->bss_conf.bssid,
(void *)&replay_ctr, GFP_KERNEL);
@@ -1800,7 +1838,7 @@ out:
WOWLAN_GET_STATUSES, 0) < 10) {
mvmvif->seqno_valid = true;
/* +0x10 because the set API expects next-to-use, not last-used */
- mvmvif->seqno = le16_to_cpu(status->non_qos_seq_ctr) + 0x10;
+ mvmvif->seqno = status->non_qos_seq_ctr + 0x10;
}
return true;
@@ -1808,13 +1846,13 @@ out:
/* Occasionally, templates would be nice. This is one of those times ... */
#define iwl_mvm_parse_wowlan_status_common(_ver) \
-static struct iwl_wowlan_status * \
+static struct iwl_wowlan_status_data * \
iwl_mvm_parse_wowlan_status_common_ ## _ver(struct iwl_mvm *mvm, \
- void *_data, int len) \
+ struct iwl_wowlan_status_ ##_ver *data,\
+ int len) \
{ \
- struct iwl_wowlan_status *status; \
- struct iwl_wowlan_status_ ##_ver *data = _data; \
- int data_size; \
+ struct iwl_wowlan_status_data *status; \
+ int data_size, i; \
\
if (len < sizeof(*data)) { \
IWL_ERR(mvm, "Invalid WoWLAN status response!\n"); \
@@ -1832,18 +1870,22 @@ iwl_mvm_parse_wowlan_status_common_ ## _ver(struct iwl_mvm *mvm, \
return ERR_PTR(-ENOMEM); \
\
/* copy all the common fields */ \
- status->replay_ctr = data->replay_ctr; \
- status->pattern_number = data->pattern_number; \
- status->non_qos_seq_ctr = data->non_qos_seq_ctr; \
- memcpy(status->qos_seq_ctr, data->qos_seq_ctr, \
- sizeof(status->qos_seq_ctr)); \
- status->wakeup_reasons = data->wakeup_reasons; \
- status->num_of_gtk_rekeys = data->num_of_gtk_rekeys; \
- status->received_beacons = data->received_beacons; \
- status->wake_packet_length = data->wake_packet_length; \
- status->wake_packet_bufsize = data->wake_packet_bufsize; \
+ status->replay_ctr = le64_to_cpu(data->replay_ctr); \
+ status->pattern_number = le16_to_cpu(data->pattern_number); \
+ status->non_qos_seq_ctr = le16_to_cpu(data->non_qos_seq_ctr); \
+ for (i = 0; i < 8; i++) \
+ status->qos_seq_ctr[i] = \
+ le16_to_cpu(data->qos_seq_ctr[i]); \
+ status->wakeup_reasons = le32_to_cpu(data->wakeup_reasons); \
+ status->num_of_gtk_rekeys = \
+ le32_to_cpu(data->num_of_gtk_rekeys); \
+ status->received_beacons = le32_to_cpu(data->received_beacons); \
+ status->wake_packet_length = \
+ le32_to_cpu(data->wake_packet_length); \
+ status->wake_packet_bufsize = \
+ le32_to_cpu(data->wake_packet_bufsize); \
memcpy(status->wake_packet, data->wake_packet, \
- le32_to_cpu(status->wake_packet_bufsize)); \
+ status->wake_packet_bufsize); \
\
return status; \
}
@@ -1852,10 +1894,49 @@ iwl_mvm_parse_wowlan_status_common(v6)
iwl_mvm_parse_wowlan_status_common(v7)
iwl_mvm_parse_wowlan_status_common(v9)
-static struct iwl_wowlan_status *
+static void iwl_mvm_convert_gtk(struct iwl_wowlan_status_data *status,
+ struct iwl_wowlan_gtk_status *data)
+{
+ BUILD_BUG_ON(sizeof(status->gtk.key) < sizeof(data->key));
+ BUILD_BUG_ON(NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY +
+ sizeof(data->tkip_mic_key) >
+ sizeof(status->gtk.key));
+
+ status->gtk.len = data->key_len;
+ status->gtk.flags = data->key_flags;
+
+ memcpy(status->gtk.key, data->key, sizeof(data->key));
+
+ /* if it's as long as the TKIP encryption key, copy MIC key */
+ if (status->gtk.len == NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY)
+ memcpy(status->gtk.key + NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY,
+ data->tkip_mic_key, sizeof(data->tkip_mic_key));
+}
+
+static void iwl_mvm_convert_igtk(struct iwl_wowlan_status_data *status,
+ struct iwl_wowlan_igtk_status *data)
+{
+ const u8 *ipn = data->ipn;
+
+ BUILD_BUG_ON(sizeof(status->igtk.key) < sizeof(data->key));
+
+ status->igtk.len = data->key_len;
+ status->igtk.flags = data->key_flags;
+
+ memcpy(status->igtk.key, data->key, sizeof(data->key));
+
+ status->igtk.ipn = ((u64)ipn[5] << 0) |
+ ((u64)ipn[4] << 8) |
+ ((u64)ipn[3] << 16) |
+ ((u64)ipn[2] << 24) |
+ ((u64)ipn[1] << 32) |
+ ((u64)ipn[0] << 40);
+}
+
+static struct iwl_wowlan_status_data *
iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm, u8 sta_id)
{
- struct iwl_wowlan_status *status;
+ struct iwl_wowlan_status_data *status;
struct iwl_wowlan_get_status_cmd get_status_cmd = {
.sta_id = cpu_to_le32(sta_id),
};
@@ -1895,59 +1976,57 @@ iwl_mvm_send_wowlan_get_status(struct iwl_mvm *mvm, u8 sta_id)
IWL_UCODE_TLV_API_WOWLAN_KEY_MATERIAL)) {
struct iwl_wowlan_status_v6 *v6 = (void *)cmd.resp_pkt->data;
- status = iwl_mvm_parse_wowlan_status_common_v6(mvm,
- cmd.resp_pkt->data,
- len);
+ status = iwl_mvm_parse_wowlan_status_common_v6(mvm, v6, len);
if (IS_ERR(status))
goto out_free_resp;
BUILD_BUG_ON(sizeof(v6->gtk.decrypt_key) >
- sizeof(status->gtk[0].key));
- BUILD_BUG_ON(sizeof(v6->gtk.tkip_mic_key) >
- sizeof(status->gtk[0].tkip_mic_key));
+ sizeof(status->gtk.key));
+ BUILD_BUG_ON(NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY +
+ sizeof(v6->gtk.tkip_mic_key) >
+ sizeof(status->gtk.key));
/* copy GTK info to the right place */
- memcpy(status->gtk[0].key, v6->gtk.decrypt_key,
+ memcpy(status->gtk.key, v6->gtk.decrypt_key,
sizeof(v6->gtk.decrypt_key));
- memcpy(status->gtk[0].tkip_mic_key, v6->gtk.tkip_mic_key,
+ memcpy(status->gtk.key + NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY,
+ v6->gtk.tkip_mic_key,
sizeof(v6->gtk.tkip_mic_key));
- memcpy(&status->gtk[0].rsc, &v6->gtk.rsc,
- sizeof(status->gtk[0].rsc));
+
+ iwl_mvm_convert_key_counters(status, &v6->gtk.rsc.all_tsc_rsc);
/* hardcode the key length to 16 since v6 only supports 16 */
- status->gtk[0].key_len = 16;
+ status->gtk.len = 16;
/*
* The key index only uses 2 bits (values 0 to 3) and
* we always set bit 7 which means this is the
* currently used key.
*/
- status->gtk[0].key_flags = v6->gtk.key_index | BIT(7);
+ status->gtk.flags = v6->gtk.key_index | BIT(7);
} else if (notif_ver == 7) {
struct iwl_wowlan_status_v7 *v7 = (void *)cmd.resp_pkt->data;
- status = iwl_mvm_parse_wowlan_status_common_v7(mvm,
- cmd.resp_pkt->data,
- len);
+ status = iwl_mvm_parse_wowlan_status_common_v7(mvm, v7, len);
if (IS_ERR(status))
goto out_free_resp;
- status->gtk[0] = v7->gtk[0];
- status->igtk[0] = v7->igtk[0];
+ iwl_mvm_convert_key_counters(status, &v7->gtk[0].rsc.all_tsc_rsc);
+ iwl_mvm_convert_gtk(status, &v7->gtk[0]);
+ iwl_mvm_convert_igtk(status, &v7->igtk[0]);
} else if (notif_ver == 9 || notif_ver == 10 || notif_ver == 11) {
struct iwl_wowlan_status_v9 *v9 = (void *)cmd.resp_pkt->data;
/* these three command versions have same layout and size, the
* difference is only in a few not used (reserved) fields.
*/
- status = iwl_mvm_parse_wowlan_status_common_v9(mvm,
- cmd.resp_pkt->data,
- len);
+ status = iwl_mvm_parse_wowlan_status_common_v9(mvm, v9, len);
if (IS_ERR(status))
goto out_free_resp;
- status->gtk[0] = v9->gtk[0];
- status->igtk[0] = v9->igtk[0];
+ iwl_mvm_convert_key_counters(status, &v9->gtk[0].rsc.all_tsc_rsc);
+ iwl_mvm_convert_gtk(status, &v9->gtk[0]);
+ iwl_mvm_convert_igtk(status, &v9->igtk[0]);
status->tid_tear_down = v9->tid_tear_down;
} else {
@@ -1962,7 +2041,7 @@ out_free_resp:
return status;
}
-static struct iwl_wowlan_status *
+static struct iwl_wowlan_status_data *
iwl_mvm_get_wakeup_status(struct iwl_mvm *mvm, u8 sta_id)
{
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
@@ -1987,29 +2066,17 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_wowlan_status_data status;
- struct iwl_wowlan_status *fw_status;
+ struct iwl_wowlan_status_data *status;
int i;
bool keep;
struct iwl_mvm_sta *mvm_ap_sta;
- fw_status = iwl_mvm_get_wakeup_status(mvm, mvmvif->ap_sta_id);
- if (IS_ERR_OR_NULL(fw_status))
+ status = iwl_mvm_get_wakeup_status(mvm, mvmvif->ap_sta_id);
+ if (IS_ERR(status))
goto out_unlock;
IWL_DEBUG_WOWLAN(mvm, "wakeup reason 0x%x\n",
- le32_to_cpu(fw_status->wakeup_reasons));
-
- status.pattern_number = le16_to_cpu(fw_status->pattern_number);
- for (i = 0; i < 8; i++)
- status.qos_seq_ctr[i] =
- le16_to_cpu(fw_status->qos_seq_ctr[i]);
- status.wakeup_reasons = le32_to_cpu(fw_status->wakeup_reasons);
- status.wake_packet_length =
- le32_to_cpu(fw_status->wake_packet_length);
- status.wake_packet_bufsize =
- le32_to_cpu(fw_status->wake_packet_bufsize);
- status.wake_packet = fw_status->wake_packet;
+ status->wakeup_reasons);
/* still at hard-coded place 0 for D3 image */
mvm_ap_sta = iwl_mvm_sta_from_staid_protected(mvm, 0);
@@ -2017,7 +2084,7 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
goto out_free;
for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
- u16 seq = status.qos_seq_ctr[i];
+ u16 seq = status->qos_seq_ctr[i];
/* firmware stores last-used value, we store next value */
seq += 0x10;
mvm_ap_sta->tid_data[i].seq_number = seq;
@@ -2033,15 +2100,15 @@ static bool iwl_mvm_query_wakeup_reasons(struct iwl_mvm *mvm,
/* now we have all the data we need, unlock to avoid mac80211 issues */
mutex_unlock(&mvm->mutex);
- iwl_mvm_report_wakeup_reasons(mvm, vif, &status);
+ iwl_mvm_report_wakeup_reasons(mvm, vif, status);
- keep = iwl_mvm_setup_connection_keep(mvm, vif, fw_status);
+ keep = iwl_mvm_setup_connection_keep(mvm, vif, status);
- kfree(fw_status);
+ kfree(status);
return keep;
out_free:
- kfree(fw_status);
+ kfree(status);
out_unlock:
mutex_unlock(&mvm->mutex);
return false;
@@ -2165,16 +2232,16 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
.pattern_idx = -1,
};
struct cfg80211_wowlan_wakeup *wakeup_report = &wakeup;
+ struct iwl_wowlan_status_data *status;
struct iwl_mvm_nd_query_results query;
- struct iwl_wowlan_status *fw_status;
unsigned long matched_profiles;
u32 reasons = 0;
int i, n_matches, ret;
- fw_status = iwl_mvm_get_wakeup_status(mvm, IWL_MVM_INVALID_STA);
- if (!IS_ERR_OR_NULL(fw_status)) {
- reasons = le32_to_cpu(fw_status->wakeup_reasons);
- kfree(fw_status);
+ status = iwl_mvm_get_wakeup_status(mvm, IWL_MVM_INVALID_STA);
+ if (!IS_ERR(status)) {
+ reasons = status->wakeup_reasons;
+ kfree(status);
}
if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED)
@@ -2336,7 +2403,6 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert,
false, 0);
ret = 1;
- mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
goto err;
}
@@ -2385,6 +2451,7 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
}
}
+ /* after the successful handshake, we're out of D3 */
mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
/*
@@ -2455,6 +2522,9 @@ out:
*/
set_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status);
+ /* regardless of what happened, we're now out of D3 */
+ mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED;
+
return 1;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
index 5dc39fbb74d6..ff66001d507e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c
@@ -395,10 +395,9 @@ static ssize_t iwl_dbgfs_rs_data_read(struct file *file, char __user *user_buf,
"A-MPDU size limit %d\n",
lq_sta->pers.dbg_agg_frame_count_lim);
desc += scnprintf(buff + desc, bufsz - desc,
- "valid_tx_ant %s%s%s\n",
+ "valid_tx_ant %s%s\n",
(iwl_mvm_get_valid_tx_ant(mvm) & ANT_A) ? "ANT_A," : "",
- (iwl_mvm_get_valid_tx_ant(mvm) & ANT_B) ? "ANT_B," : "",
- (iwl_mvm_get_valid_tx_ant(mvm) & ANT_C) ? "ANT_C" : "");
+ (iwl_mvm_get_valid_tx_ant(mvm) & ANT_B) ? "ANT_B," : "");
desc += scnprintf(buff + desc, bufsz - desc,
"last tx rate=0x%X ",
lq_sta->last_rate_n_flags);
@@ -986,8 +985,8 @@ static ssize_t iwl_dbgfs_frame_stats_read(struct iwl_mvm *mvm,
continue;
pos += scnprintf(pos, endpos - pos, "Rate[%d]: ",
(int)(ARRAY_SIZE(stats->last_rates) - i));
- pos += rs_pretty_print_rate(pos, endpos - pos,
- stats->last_rates[idx]);
+ pos += rs_pretty_print_rate_v1(pos, endpos - pos,
+ stats->last_rates[idx]);
if (pos < endpos - 1)
*pos++ = '\n';
}
@@ -1060,8 +1059,6 @@ iwl_dbgfs_scan_ant_rxchain_read(struct file *file,
pos += scnprintf(buf + pos, bufsz - pos, "A");
if (mvm->scan_rx_ant & ANT_B)
pos += scnprintf(buf + pos, bufsz - pos, "B");
- if (mvm->scan_rx_ant & ANT_C)
- pos += scnprintf(buf + pos, bufsz - pos, "C");
pos += scnprintf(buf + pos, bufsz - pos, " (%hhx)\n", mvm->scan_rx_ant);
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
@@ -1196,7 +1193,6 @@ static int _iwl_dbgfs_inject_beacon_ie(struct iwl_mvm *mvm, char *bin, int len)
struct ieee80211_tx_info *info;
struct iwl_mac_beacon_cmd beacon_cmd = {};
u8 rate;
- u16 flags;
int i;
len /= 2;
@@ -1243,12 +1239,9 @@ static int _iwl_dbgfs_inject_beacon_ie(struct iwl_mvm *mvm, char *bin, int len)
mvmvif = iwl_mvm_vif_from_mac80211(vif);
info = IEEE80211_SKB_CB(beacon);
rate = iwl_mvm_mac_ctxt_get_lowest_rate(info, vif);
- flags = iwl_mvm_mac80211_idx_to_hwrate(rate);
- if (rate == IWL_FIRST_CCK_RATE)
- flags |= IWL_MAC_BEACON_CCK;
-
- beacon_cmd.flags = cpu_to_le16(flags);
+ beacon_cmd.flags =
+ cpu_to_le16(iwl_mvm_mac_ctxt_get_beacon_flags(mvm->fw, rate));
beacon_cmd.byte_cnt = cpu_to_le16((u16)beacon->len);
beacon_cmd.template_id = cpu_to_le32((u32)mvmvif->id);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
index 03e5bf5cb909..949fb790f8fb 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c
@@ -324,6 +324,7 @@ iwl_mvm_ftm_target_chandef_v2(struct iwl_mvm *mvm,
u8 *ctrl_ch_position)
{
u32 freq = peer->chandef.chan->center_freq;
+ u8 cmd_ver;
*channel = ieee80211_frequency_to_channel(freq);
@@ -344,6 +345,17 @@ iwl_mvm_ftm_target_chandef_v2(struct iwl_mvm *mvm,
*format_bw = IWL_LOCATION_FRAME_FORMAT_VHT;
*format_bw |= IWL_LOCATION_BW_80MHZ << LOCATION_BW_POS;
break;
+ case NL80211_CHAN_WIDTH_160:
+ cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, LOCATION_GROUP,
+ TOF_RANGE_REQ_CMD,
+ IWL_FW_CMD_VER_UNKNOWN);
+
+ if (cmd_ver >= 13) {
+ *format_bw = IWL_LOCATION_FRAME_FORMAT_HE;
+ *format_bw |= IWL_LOCATION_BW_160MHZ << LOCATION_BW_POS;
+ break;
+ }
+ fallthrough;
default:
IWL_ERR(mvm, "Unsupported BW in FTM request (%d)\n",
peer->chandef.width);
@@ -1142,6 +1154,7 @@ static u8 iwl_mvm_ftm_get_range_resp_ver(struct iwl_mvm *mvm)
static bool iwl_mvm_ftm_resp_size_validation(u8 ver, unsigned int pkt_len)
{
switch (ver) {
+ case 9:
case 8:
return pkt_len == sizeof(struct iwl_tof_range_rsp_ntfy_v8);
case 7:
@@ -1205,7 +1218,7 @@ void iwl_mvm_ftm_range_resp(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
int peer_idx;
if (new_api) {
- if (notif_ver == 8) {
+ if (notif_ver >= 8) {
fw_ap = &fw_resp_v8->ap[i];
iwl_mvm_ftm_pasn_update_pn(mvm, fw_ap);
} else if (notif_ver == 7) {
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
index eba5433c2626..bda6da7d988e 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c
@@ -46,8 +46,8 @@ static int iwl_mvm_ftm_responder_set_bw_v1(struct cfg80211_chan_def *chandef,
}
static int iwl_mvm_ftm_responder_set_bw_v2(struct cfg80211_chan_def *chandef,
- u8 *format_bw,
- u8 *ctrl_ch_position)
+ u8 *format_bw, u8 *ctrl_ch_position,
+ u8 cmd_ver)
{
switch (chandef->width) {
case NL80211_CHAN_WIDTH_20_NOHT:
@@ -68,6 +68,14 @@ static int iwl_mvm_ftm_responder_set_bw_v2(struct cfg80211_chan_def *chandef,
*format_bw |= IWL_LOCATION_BW_80MHZ << LOCATION_BW_POS;
*ctrl_ch_position = iwl_mvm_get_ctrl_pos(chandef);
break;
+ case NL80211_CHAN_WIDTH_160:
+ if (cmd_ver >= 9) {
+ *format_bw = IWL_LOCATION_FRAME_FORMAT_HE;
+ *format_bw |= IWL_LOCATION_BW_160MHZ << LOCATION_BW_POS;
+ *ctrl_ch_position = iwl_mvm_get_ctrl_pos(chandef);
+ break;
+ }
+ fallthrough;
default:
return -ENOTSUPP;
}
@@ -140,7 +148,8 @@ iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm,
if (cmd_ver >= 7)
err = iwl_mvm_ftm_responder_set_bw_v2(chandef, &cmd.format_bw,
- &cmd.ctrl_ch_position);
+ &cmd.ctrl_ch_position,
+ cmd_ver);
else
err = iwl_mvm_ftm_responder_set_bw_v1(chandef, &cmd.format_bw,
&cmd.ctrl_ch_position);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 74404c96063b..863fec150e53 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -295,6 +295,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
if (ret) {
struct iwl_trans *trans = mvm->trans;
+ /* SecBoot info */
if (trans->trans_cfg->device_family >=
IWL_DEVICE_FAMILY_22000) {
IWL_ERR(mvm,
@@ -302,6 +303,17 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
iwl_read_umac_prph(trans, UMAG_SB_CPU_1_STATUS),
iwl_read_umac_prph(trans,
UMAG_SB_CPU_2_STATUS));
+ } else if (trans->trans_cfg->device_family >=
+ IWL_DEVICE_FAMILY_8000) {
+ IWL_ERR(mvm,
+ "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
+ iwl_read_prph(trans, SB_CPU_1_STATUS),
+ iwl_read_prph(trans, SB_CPU_2_STATUS));
+ }
+
+ /* LMAC/UMAC PC info */
+ if (trans->trans_cfg->device_family >=
+ IWL_DEVICE_FAMILY_9000) {
IWL_ERR(mvm, "UMAC PC: 0x%x\n",
iwl_read_umac_prph(trans,
UREG_UMAC_CURRENT_PC));
@@ -312,12 +324,6 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
IWL_ERR(mvm, "LMAC2 PC: 0x%x\n",
iwl_read_umac_prph(trans,
UREG_LMAC2_CURRENT_PC));
- } else if (trans->trans_cfg->device_family >=
- IWL_DEVICE_FAMILY_8000) {
- IWL_ERR(mvm,
- "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
- iwl_read_prph(trans, SB_CPU_1_STATUS),
- iwl_read_prph(trans, SB_CPU_2_STATUS));
}
if (ret == -ETIMEDOUT)
@@ -763,14 +769,18 @@ int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
int ret;
struct iwl_host_cmd cmd;
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, PHY_OPS_GROUP,
- GEO_TX_POWER_LIMIT,
+ PER_CHAIN_LIMIT_OFFSET_CMD,
IWL_FW_CMD_VER_UNKNOWN);
/* the ops field is at the same spot for all versions, so set in v1 */
geo_tx_cmd.v1.ops =
cpu_to_le32(IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE);
- if (cmd_ver == 3)
+ if (cmd_ver == 5)
+ len = sizeof(geo_tx_cmd.v5);
+ else if (cmd_ver == 4)
+ len = sizeof(geo_tx_cmd.v4);
+ else if (cmd_ver == 3)
len = sizeof(geo_tx_cmd.v3);
else if (fw_has_api(&mvm->fwrt.fw->ucode_capa,
IWL_UCODE_TLV_API_SAR_TABLE_VER))
@@ -782,7 +792,7 @@ int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
return -EOPNOTSUPP;
cmd = (struct iwl_host_cmd){
- .id = WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT),
+ .id = WIDE_ID(PHY_OPS_GROUP, PER_CHAIN_LIMIT_OFFSET_CMD),
.len = { len, },
.flags = CMD_WANT_SKB,
.data = { &geo_tx_cmd },
@@ -797,7 +807,7 @@ int iwl_mvm_get_sar_geo_profile(struct iwl_mvm *mvm)
resp = (void *)cmd.resp_pkt->data;
ret = le32_to_cpu(resp->profile_idx);
- if (WARN_ON(ret > ACPI_NUM_GEO_PROFILES))
+ if (WARN_ON(ret > ACPI_NUM_GEO_PROFILES_REV3))
ret = -EIO;
iwl_free_resp(&cmd);
@@ -809,36 +819,58 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
union iwl_geo_tx_power_profiles_cmd cmd;
u16 len;
u32 n_bands;
+ u32 n_profiles;
int ret;
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, PHY_OPS_GROUP,
- GEO_TX_POWER_LIMIT,
+ PER_CHAIN_LIMIT_OFFSET_CMD,
IWL_FW_CMD_VER_UNKNOWN);
BUILD_BUG_ON(offsetof(struct iwl_geo_tx_power_profiles_cmd_v1, ops) !=
offsetof(struct iwl_geo_tx_power_profiles_cmd_v2, ops) ||
offsetof(struct iwl_geo_tx_power_profiles_cmd_v2, ops) !=
- offsetof(struct iwl_geo_tx_power_profiles_cmd_v3, ops));
+ offsetof(struct iwl_geo_tx_power_profiles_cmd_v3, ops) ||
+ offsetof(struct iwl_geo_tx_power_profiles_cmd_v3, ops) !=
+ offsetof(struct iwl_geo_tx_power_profiles_cmd_v4, ops) ||
+ offsetof(struct iwl_geo_tx_power_profiles_cmd_v4, ops) !=
+ offsetof(struct iwl_geo_tx_power_profiles_cmd_v5, ops));
+
/* the ops field is at the same spot for all versions, so set in v1 */
cmd.v1.ops = cpu_to_le32(IWL_PER_CHAIN_OFFSET_SET_TABLES);
- if (cmd_ver == 3) {
+ if (cmd_ver == 5) {
+ len = sizeof(cmd.v5);
+ n_bands = ARRAY_SIZE(cmd.v5.table[0]);
+ n_profiles = ACPI_NUM_GEO_PROFILES_REV3;
+ } else if (cmd_ver == 4) {
+ len = sizeof(cmd.v4);
+ n_bands = ARRAY_SIZE(cmd.v4.table[0]);
+ n_profiles = ACPI_NUM_GEO_PROFILES_REV3;
+ } else if (cmd_ver == 3) {
len = sizeof(cmd.v3);
n_bands = ARRAY_SIZE(cmd.v3.table[0]);
+ n_profiles = ACPI_NUM_GEO_PROFILES;
} else if (fw_has_api(&mvm->fwrt.fw->ucode_capa,
IWL_UCODE_TLV_API_SAR_TABLE_VER)) {
len = sizeof(cmd.v2);
n_bands = ARRAY_SIZE(cmd.v2.table[0]);
+ n_profiles = ACPI_NUM_GEO_PROFILES;
} else {
len = sizeof(cmd.v1);
n_bands = ARRAY_SIZE(cmd.v1.table[0]);
+ n_profiles = ACPI_NUM_GEO_PROFILES;
}
BUILD_BUG_ON(offsetof(struct iwl_geo_tx_power_profiles_cmd_v1, table) !=
offsetof(struct iwl_geo_tx_power_profiles_cmd_v2, table) ||
offsetof(struct iwl_geo_tx_power_profiles_cmd_v2, table) !=
- offsetof(struct iwl_geo_tx_power_profiles_cmd_v3, table));
+ offsetof(struct iwl_geo_tx_power_profiles_cmd_v3, table) ||
+ offsetof(struct iwl_geo_tx_power_profiles_cmd_v3, table) !=
+ offsetof(struct iwl_geo_tx_power_profiles_cmd_v4, table) ||
+ offsetof(struct iwl_geo_tx_power_profiles_cmd_v4, table) !=
+ offsetof(struct iwl_geo_tx_power_profiles_cmd_v5, table));
/* the table is at the same position for all versions, so set use v1 */
- ret = iwl_sar_geo_init(&mvm->fwrt, &cmd.v1.table[0][0], n_bands);
+ ret = iwl_sar_geo_init(&mvm->fwrt, &cmd.v1.table[0][0],
+ n_bands, n_profiles);
/*
* It is a valid scenario to not support SAR, or miss wgds table,
@@ -851,14 +883,19 @@ static int iwl_mvm_sar_geo_init(struct iwl_mvm *mvm)
* Set the revision on versions that contain it.
* This must be done after calling iwl_sar_geo_init().
*/
- if (cmd_ver == 3)
+ if (cmd_ver == 5)
+ cmd.v5.table_revision = cpu_to_le32(mvm->fwrt.geo_rev);
+ else if (cmd_ver == 4)
+ cmd.v4.table_revision = cpu_to_le32(mvm->fwrt.geo_rev);
+ else if (cmd_ver == 3)
cmd.v3.table_revision = cpu_to_le32(mvm->fwrt.geo_rev);
else if (fw_has_api(&mvm->fwrt.fw->ucode_capa,
IWL_UCODE_TLV_API_SAR_TABLE_VER))
cmd.v2.table_revision = cpu_to_le32(mvm->fwrt.geo_rev);
return iwl_mvm_send_cmd_pdu(mvm,
- WIDE_ID(PHY_OPS_GROUP, GEO_TX_POWER_LIMIT),
+ WIDE_ID(PHY_OPS_GROUP,
+ PER_CHAIN_LIMIT_OFFSET_CMD),
0, len, &cmd);
}
@@ -1108,7 +1145,7 @@ static void iwl_mvm_tas_init(struct iwl_mvm *mvm)
static u8 iwl_mvm_eval_dsm_rfi(struct iwl_mvm *mvm)
{
u8 value;
- int ret = iwl_acpi_get_dsm_u8((&mvm->fwrt)->dev, 0, DSM_RFI_FUNC_ENABLE,
+ int ret = iwl_acpi_get_dsm_u8(mvm->fwrt.dev, 0, DSM_RFI_FUNC_ENABLE,
&iwl_rfi_guid, &value);
if (ret < 0) {
@@ -1133,30 +1170,45 @@ static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
{
int ret;
u32 value;
- struct iwl_lari_config_change_cmd_v4 cmd = {};
+ struct iwl_lari_config_change_cmd_v5 cmd = {};
cmd.config_bitmap = iwl_acpi_get_lari_config_bitmap(&mvm->fwrt);
- ret = iwl_acpi_get_dsm_u32((&mvm->fwrt)->dev, 0, DSM_FUNC_11AX_ENABLEMENT,
+ ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0, DSM_FUNC_11AX_ENABLEMENT,
&iwl_guid, &value);
if (!ret)
cmd.oem_11ax_allow_bitmap = cpu_to_le32(value);
- /* apply more config masks here */
- ret = iwl_acpi_get_dsm_u32((&mvm->fwrt)->dev, 0,
+ ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0,
DSM_FUNC_ENABLE_UNII4_CHAN,
&iwl_guid, &value);
if (!ret)
cmd.oem_unii4_allow_bitmap = cpu_to_le32(value);
+ ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0,
+ DSM_FUNC_ACTIVATE_CHANNEL,
+ &iwl_guid, &value);
+ if (!ret)
+ cmd.chan_state_active_bitmap = cpu_to_le32(value);
+
+ ret = iwl_acpi_get_dsm_u32(mvm->fwrt.dev, 0,
+ DSM_FUNC_ENABLE_6E,
+ &iwl_guid, &value);
+ if (!ret)
+ cmd.oem_uhb_allow_bitmap = cpu_to_le32(value);
+
if (cmd.config_bitmap ||
+ cmd.oem_uhb_allow_bitmap ||
cmd.oem_11ax_allow_bitmap ||
- cmd.oem_unii4_allow_bitmap) {
+ cmd.oem_unii4_allow_bitmap ||
+ cmd.chan_state_active_bitmap) {
size_t cmd_size;
u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw,
REGULATORY_AND_NVM_GROUP,
LARI_CONFIG_CHANGE, 1);
- if (cmd_ver == 4)
+ if (cmd_ver == 5)
+ cmd_size = sizeof(struct iwl_lari_config_change_cmd_v5);
+ else if (cmd_ver == 4)
cmd_size = sizeof(struct iwl_lari_config_change_cmd_v4);
else if (cmd_ver == 3)
cmd_size = sizeof(struct iwl_lari_config_change_cmd_v3);
@@ -1170,9 +1222,13 @@ static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm)
le32_to_cpu(cmd.config_bitmap),
le32_to_cpu(cmd.oem_11ax_allow_bitmap));
IWL_DEBUG_RADIO(mvm,
- "sending LARI_CONFIG_CHANGE, oem_unii4_allow_bitmap=0x%x, cmd_ver=%d\n",
+ "sending LARI_CONFIG_CHANGE, oem_unii4_allow_bitmap=0x%x, chan_state_active_bitmap=0x%x, cmd_ver=%d\n",
le32_to_cpu(cmd.oem_unii4_allow_bitmap),
+ le32_to_cpu(cmd.chan_state_active_bitmap),
cmd_ver);
+ IWL_DEBUG_RADIO(mvm,
+ "sending LARI_CONFIG_CHANGE, oem_uhb_allow_bitmap=0x%x\n",
+ le32_to_cpu(cmd.oem_uhb_allow_bitmap));
ret = iwl_mvm_send_cmd_pdu(mvm,
WIDE_ID(REGULATORY_AND_NVM_GROUP,
LARI_CONFIG_CHANGE),
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
index fd352b2624a6..fd7d4abfb454 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c
@@ -604,6 +604,12 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm,
ctxt_sta->is_assoc = cpu_to_le32(1);
+ if (!mvmvif->authorized &&
+ fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_COEX_HIGH_PRIO))
+ ctxt_sta->data_policy |=
+ cpu_to_le32(COEX_HIGH_PRIORITY_ENABLE);
+
/*
* allow multicast data frames only as long as the station is
* authorized, i.e., GTK keys are already installed (if needed)
@@ -812,6 +818,21 @@ u8 iwl_mvm_mac_ctxt_get_lowest_rate(struct ieee80211_tx_info *info,
return rate;
}
+u16 iwl_mvm_mac_ctxt_get_beacon_flags(const struct iwl_fw *fw, u8 rate_idx)
+{
+ u16 flags = iwl_mvm_mac80211_idx_to_hwrate(fw, rate_idx);
+ bool is_new_rate = iwl_fw_lookup_cmd_ver(fw,
+ LONG_GROUP,
+ BEACON_TEMPLATE_CMD,
+ 0) > 10;
+
+ if (rate_idx <= IWL_FIRST_CCK_RATE)
+ flags |= is_new_rate ? IWL_MAC_BEACON_CCK
+ : IWL_MAC_BEACON_CCK_V1;
+
+ return flags;
+}
+
static void iwl_mvm_mac_ctxt_set_tx(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct sk_buff *beacon,
@@ -844,9 +865,10 @@ static void iwl_mvm_mac_ctxt_set_tx(struct iwl_mvm *mvm,
rate = iwl_mvm_mac_ctxt_get_lowest_rate(info, vif);
- tx->rate_n_flags |= cpu_to_le32(iwl_mvm_mac80211_idx_to_hwrate(rate));
+ tx->rate_n_flags |=
+ cpu_to_le32(iwl_mvm_mac80211_idx_to_hwrate(mvm->fw, rate));
if (rate == IWL_FIRST_CCK_RATE)
- tx->rate_n_flags |= cpu_to_le32(RATE_MCS_CCK_MSK);
+ tx->rate_n_flags |= cpu_to_le32(RATE_MCS_CCK_MSK_V1);
}
@@ -929,11 +951,7 @@ static int iwl_mvm_mac_ctxt_send_beacon_v9(struct iwl_mvm *mvm,
u16 flags;
struct ieee80211_chanctx_conf *ctx;
int channel;
-
- flags = iwl_mvm_mac80211_idx_to_hwrate(rate);
-
- if (rate == IWL_FIRST_CCK_RATE)
- flags |= IWL_MAC_BEACON_CCK;
+ flags = iwl_mvm_mac_ctxt_get_beacon_flags(mvm->fw, rate);
/* Enable FILS on PSC channels only */
rcu_read_lock();
@@ -942,7 +960,11 @@ static int iwl_mvm_mac_ctxt_send_beacon_v9(struct iwl_mvm *mvm,
WARN_ON(channel == 0);
if (cfg80211_channel_is_psc(ctx->def.chan) &&
!IWL_MVM_DISABLE_AP_FILS) {
- flags |= IWL_MAC_BEACON_FILS;
+ flags |= iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP,
+ BEACON_TEMPLATE_CMD,
+ 0) > 10 ?
+ IWL_MAC_BEACON_FILS :
+ IWL_MAC_BEACON_FILS_V1;
beacon_cmd.short_ssid =
cpu_to_le32(~crc32_le(~0, vif->bss_conf.ssid,
vif->bss_conf.ssid_len));
@@ -1535,11 +1557,11 @@ void iwl_mvm_probe_resp_data_notif(struct iwl_mvm *mvm,
ieee80211_beacon_set_cntdwn(vif, notif->csa_counter);
}
-void iwl_mvm_channel_switch_noa_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb)
+void iwl_mvm_channel_switch_start_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_channel_switch_noa_notif *notif = (void *)pkt->data;
+ struct iwl_channel_switch_start_notif *notif = (void *)pkt->data;
struct ieee80211_vif *csa_vif, *vif;
struct iwl_mvm_vif *mvmvif;
u32 id_n_color, csa_id, mac_id;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 3a4585222d6d..9fb9c7dad314 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -145,7 +145,8 @@ static const struct cfg80211_pmsr_capabilities iwl_mvm_pmsr_capa = {
.bandwidths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
BIT(NL80211_CHAN_WIDTH_20) |
BIT(NL80211_CHAN_WIDTH_40) |
- BIT(NL80211_CHAN_WIDTH_80),
+ BIT(NL80211_CHAN_WIDTH_80) |
+ BIT(NL80211_CHAN_WIDTH_160),
.preambles = BIT(NL80211_PREAMBLE_LEGACY) |
BIT(NL80211_PREAMBLE_HT) |
BIT(NL80211_PREAMBLE_VHT) |
@@ -2022,7 +2023,8 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
}
sband = mvm->hw->wiphy->bands[chanctx_conf->def.chan->band];
- own_he_cap = ieee80211_get_he_iftype_cap(sband, vif->type);
+ own_he_cap = ieee80211_get_he_iftype_cap(sband,
+ ieee80211_vif_type_p2p(vif));
sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_ctxt_cmd.sta_id]);
if (IS_ERR_OR_NULL(sta)) {
@@ -2234,6 +2236,34 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
IWL_ERR(mvm, "Failed to config FW to work HE!\n");
}
+static void iwl_mvm_protect_assoc(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 duration_override)
+{
+ u32 duration = IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
+ u32 min_duration = IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS;
+
+ if (duration_override > duration)
+ duration = duration_override;
+
+ /* Try really hard to protect the session and hear a beacon
+ * The new session protection command allows us to protect the
+ * session for a much longer time since the firmware will internally
+ * create two events: a 300TU one with a very high priority that
+ * won't be fragmented which should be enough for 99% of the cases,
+ * and another one (which we configure here to be 900TU long) which
+ * will have a slightly lower priority, but more importantly, can be
+ * fragmented so that it'll allow other activities to run.
+ */
+ if (fw_has_capa(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD))
+ iwl_mvm_schedule_session_protection(mvm, vif, 900,
+ min_duration, false);
+ else
+ iwl_mvm_protect_session(mvm, vif, duration,
+ min_duration, 500, false);
+}
+
static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
@@ -2317,6 +2347,20 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
u32 dur = (11 * vif->bss_conf.beacon_int) / 10;
iwl_mvm_protect_session(mvm, vif, dur, dur,
5 * dur, false);
+ } else if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART,
+ &mvm->status) &&
+ !vif->bss_conf.dtim_period) {
+ /*
+ * If we're not restarting and still haven't
+ * heard a beacon (dtim period unknown) then
+ * make sure we still have enough minimum time
+ * remaining in the time event, since the auth
+ * might actually have taken quite a while
+ * (especially for SAE) and so the remaining
+ * time could be small without us having heard
+ * a beacon yet.
+ */
+ iwl_mvm_protect_assoc(mvm, vif, 0);
}
iwl_mvm_sf_update(mvm, vif, false);
@@ -3192,38 +3236,52 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
if (iwl_mvm_phy_ctx_count(mvm) > 1)
iwl_mvm_teardown_tdls_peers(mvm);
- if (sta->tdls)
+ if (sta->tdls) {
iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr,
NL80211_TDLS_ENABLE_LINK);
+ } else {
+ /* enable beacon filtering */
+ WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
- /* enable beacon filtering */
- WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0));
+ mvmvif->authorized = 1;
- /*
- * Now that the station is authorized, i.e., keys were already
- * installed, need to indicate to the FW that
- * multicast data frames can be forwarded to the driver
- */
- iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
+ /*
+ * Now that the station is authorized, i.e., keys were already
+ * installed, need to indicate to the FW that
+ * multicast data frames can be forwarded to the driver
+ */
+ iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
+ }
iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band,
true);
} else if (old_state == IEEE80211_STA_AUTHORIZED &&
new_state == IEEE80211_STA_ASSOC) {
- /* Multicast data frames are no longer allowed */
- iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
+ if (!sta->tdls) {
+ /* Multicast data frames are no longer allowed */
+ iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
+
+ /*
+ * Set this after the above iwl_mvm_mac_ctxt_changed()
+ * to avoid sending high prio again for a little time.
+ */
+ mvmvif->authorized = 0;
- /* disable beacon filtering */
- ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
- WARN_ON(ret &&
- !test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
- &mvm->status));
+ /* disable beacon filtering */
+ ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0);
+ WARN_ON(ret &&
+ !test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
+ &mvm->status));
+ }
ret = 0;
} else if (old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTH) {
if (vif->type == NL80211_IFTYPE_AP) {
mvmvif->ap_assoc_sta_count--;
iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL);
+ } else if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) {
+ /* remove session protection if still running */
+ iwl_mvm_stop_session_protection(mvm, vif);
}
ret = 0;
} else if (old_state == IEEE80211_STA_AUTH &&
@@ -3316,29 +3374,24 @@ static void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw,
struct ieee80211_prep_tx_info *info)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- u32 duration = IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS;
- u32 min_duration = IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS;
- if (info->duration > duration)
- duration = info->duration;
+ mutex_lock(&mvm->mutex);
+ iwl_mvm_protect_assoc(mvm, vif, info->duration);
+ mutex_unlock(&mvm->mutex);
+}
+
+static void iwl_mvm_mac_mgd_complete_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_prep_tx_info *info)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ /* for successful cases (auth/assoc), don't cancel session protection */
+ if (info->success)
+ return;
mutex_lock(&mvm->mutex);
- /* Try really hard to protect the session and hear a beacon
- * The new session protection command allows us to protect the
- * session for a much longer time since the firmware will internally
- * create two events: a 300TU one with a very high priority that
- * won't be fragmented which should be enough for 99% of the cases,
- * and another one (which we configure here to be 900TU long) which
- * will have a slightly lower priority, but more importantly, can be
- * fragmented so that it'll allow other activities to run.
- */
- if (fw_has_capa(&mvm->fw->ucode_capa,
- IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD))
- iwl_mvm_schedule_session_protection(mvm, vif, 900,
- min_duration, false);
- else
- iwl_mvm_protect_session(mvm, vif, duration,
- min_duration, 500, false);
+ iwl_mvm_stop_session_protection(mvm, vif);
mutex_unlock(&mvm->mutex);
}
@@ -4704,6 +4757,9 @@ static void iwl_mvm_channel_switch_rx_beacon(struct ieee80211_hw *hw,
if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CS_MODIFY))
return;
+ IWL_DEBUG_MAC80211(mvm, "Modify CSA on mac %d count = %d (old %d) mode = %d\n",
+ mvmvif->id, chsw->count, mvmvif->csa_count, chsw->block_tx);
+
if (chsw->count >= mvmvif->csa_count && chsw->block_tx) {
if (mvmvif->csa_misbehave) {
/* Second time, give up on this AP*/
@@ -4720,8 +4776,6 @@ static void iwl_mvm_channel_switch_rx_beacon(struct ieee80211_hw *hw,
if (mvmvif->csa_failed)
goto out_unlock;
- IWL_DEBUG_MAC80211(mvm, "Modify CSA on mac %d count = %d mode = %d\n",
- mvmvif->id, chsw->count, chsw->block_tx);
WARN_ON(iwl_mvm_send_cmd_pdu(mvm,
WIDE_ID(MAC_CONF_GROUP,
CHANNEL_SWITCH_TIME_EVENT_CMD),
@@ -4873,6 +4927,8 @@ static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx,
static void iwl_mvm_set_sta_rate(u32 rate_n_flags, struct rate_info *rinfo)
{
+ u32 format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
+
switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
case RATE_MCS_CHAN_WIDTH_20:
rinfo->bw = RATE_INFO_BW_20;
@@ -4888,30 +4944,65 @@ static void iwl_mvm_set_sta_rate(u32 rate_n_flags, struct rate_info *rinfo)
break;
}
- if (rate_n_flags & RATE_MCS_HT_MSK) {
- rinfo->flags |= RATE_INFO_FLAGS_MCS;
- rinfo->mcs = u32_get_bits(rate_n_flags, RATE_HT_MCS_INDEX_MSK);
- rinfo->nss = u32_get_bits(rate_n_flags,
- RATE_HT_MCS_NSS_MSK) + 1;
- if (rate_n_flags & RATE_MCS_SGI_MSK)
- rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI;
- } else if (rate_n_flags & RATE_MCS_VHT_MSK) {
- rinfo->flags |= RATE_INFO_FLAGS_VHT_MCS;
- rinfo->mcs = u32_get_bits(rate_n_flags,
- RATE_VHT_MCS_RATE_CODE_MSK);
- rinfo->nss = u32_get_bits(rate_n_flags,
- RATE_VHT_MCS_NSS_MSK) + 1;
- if (rate_n_flags & RATE_MCS_SGI_MSK)
- rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI;
- } else if (rate_n_flags & RATE_MCS_HE_MSK) {
+ if (format == RATE_MCS_CCK_MSK ||
+ format == RATE_MCS_LEGACY_OFDM_MSK) {
+ int rate = u32_get_bits(rate_n_flags, RATE_LEGACY_RATE_MSK);
+
+ /* add the offset needed to get to the legacy ofdm indices */
+ if (format == RATE_MCS_LEGACY_OFDM_MSK)
+ rate += IWL_FIRST_OFDM_RATE;
+
+ switch (rate) {
+ case IWL_RATE_1M_INDEX:
+ rinfo->legacy = 10;
+ break;
+ case IWL_RATE_2M_INDEX:
+ rinfo->legacy = 20;
+ break;
+ case IWL_RATE_5M_INDEX:
+ rinfo->legacy = 55;
+ break;
+ case IWL_RATE_11M_INDEX:
+ rinfo->legacy = 110;
+ break;
+ case IWL_RATE_6M_INDEX:
+ rinfo->legacy = 60;
+ break;
+ case IWL_RATE_9M_INDEX:
+ rinfo->legacy = 90;
+ break;
+ case IWL_RATE_12M_INDEX:
+ rinfo->legacy = 120;
+ break;
+ case IWL_RATE_18M_INDEX:
+ rinfo->legacy = 180;
+ break;
+ case IWL_RATE_24M_INDEX:
+ rinfo->legacy = 240;
+ break;
+ case IWL_RATE_36M_INDEX:
+ rinfo->legacy = 360;
+ break;
+ case IWL_RATE_48M_INDEX:
+ rinfo->legacy = 480;
+ break;
+ case IWL_RATE_54M_INDEX:
+ rinfo->legacy = 540;
+ }
+ return;
+ }
+
+ rinfo->nss = u32_get_bits(rate_n_flags,
+ RATE_MCS_NSS_MSK) + 1;
+ rinfo->mcs = format == RATE_MCS_HT_MSK ?
+ RATE_HT_MCS_INDEX(rate_n_flags) :
+ u32_get_bits(rate_n_flags, RATE_MCS_CODE_MSK);
+
+ if (format == RATE_MCS_HE_MSK) {
u32 gi_ltf = u32_get_bits(rate_n_flags,
RATE_MCS_HE_GI_LTF_MSK);
rinfo->flags |= RATE_INFO_FLAGS_HE_MCS;
- rinfo->mcs = u32_get_bits(rate_n_flags,
- RATE_VHT_MCS_RATE_CODE_MSK);
- rinfo->nss = u32_get_bits(rate_n_flags,
- RATE_VHT_MCS_NSS_MSK) + 1;
if (rate_n_flags & RATE_MCS_HE_106T_MSK) {
rinfo->bw = RATE_INFO_BW_HE_RU;
@@ -4925,10 +5016,10 @@ static void iwl_mvm_set_sta_rate(u32 rate_n_flags, struct rate_info *rinfo)
rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
else if (gi_ltf == 2)
rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
- else if (rate_n_flags & RATE_MCS_SGI_MSK)
- rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
- else
+ else if (gi_ltf == 3)
rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
+ else
+ rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
break;
case RATE_MCS_HE_TYPE_MU:
if (gi_ltf == 0 || gi_ltf == 1)
@@ -4948,46 +5039,19 @@ static void iwl_mvm_set_sta_rate(u32 rate_n_flags, struct rate_info *rinfo)
if (rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK)
rinfo->he_dcm = 1;
- } else {
- switch (u32_get_bits(rate_n_flags, RATE_LEGACY_RATE_MSK)) {
- case IWL_RATE_1M_PLCP:
- rinfo->legacy = 10;
- break;
- case IWL_RATE_2M_PLCP:
- rinfo->legacy = 20;
- break;
- case IWL_RATE_5M_PLCP:
- rinfo->legacy = 55;
- break;
- case IWL_RATE_11M_PLCP:
- rinfo->legacy = 110;
- break;
- case IWL_RATE_6M_PLCP:
- rinfo->legacy = 60;
- break;
- case IWL_RATE_9M_PLCP:
- rinfo->legacy = 90;
- break;
- case IWL_RATE_12M_PLCP:
- rinfo->legacy = 120;
- break;
- case IWL_RATE_18M_PLCP:
- rinfo->legacy = 180;
- break;
- case IWL_RATE_24M_PLCP:
- rinfo->legacy = 240;
- break;
- case IWL_RATE_36M_PLCP:
- rinfo->legacy = 360;
- break;
- case IWL_RATE_48M_PLCP:
- rinfo->legacy = 480;
- break;
- case IWL_RATE_54M_PLCP:
- rinfo->legacy = 540;
- break;
- }
+ return;
+ }
+
+ if (rate_n_flags & RATE_MCS_SGI_MSK)
+ rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI;
+
+ if (format == RATE_MCS_HT_MSK) {
+ rinfo->flags |= RATE_INFO_FLAGS_MCS;
+
+ } else if (format == RATE_MCS_VHT_MSK) {
+ rinfo->flags |= RATE_INFO_FLAGS_VHT_MCS;
}
+
}
static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw,
@@ -5332,6 +5396,7 @@ const struct ieee80211_ops iwl_mvm_hw_ops = {
.sta_rc_update = iwl_mvm_sta_rc_update,
.conf_tx = iwl_mvm_mac_conf_tx,
.mgd_prepare_tx = iwl_mvm_mac_mgd_prepare_tx,
+ .mgd_complete_tx = iwl_mvm_mac_mgd_complete_tx,
.mgd_protect_tdls_discover = iwl_mvm_mac_mgd_protect_tdls_discover,
.flush = iwl_mvm_mac_flush,
.sched_scan_start = iwl_mvm_mac_sched_scan_start,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index f877d86b038e..2b1dcd60e00f 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -297,6 +297,7 @@ struct iwl_probe_resp_data {
* see enum &iwl_mvm_low_latency_cause for causes.
* @low_latency_actual: boolean, indicates low latency is set,
* as a result from low_latency bit flags and takes force into account.
+ * @authorized: indicates the AP station was set to authorized
* @ps_disabled: indicates that this interface requires PS to be disabled
* @queue_params: QoS params for this MAC
* @bcast_sta: station used for broadcast packets. Used by the following
@@ -330,6 +331,7 @@ struct iwl_mvm_vif {
bool monitor_active;
u8 low_latency: 6;
u8 low_latency_actual: 1;
+ u8 authorized:1;
bool ps_disabled;
struct iwl_mvm_vif_bf_data bf_data;
@@ -1443,12 +1445,17 @@ int __iwl_mvm_mac_start(struct iwl_mvm *mvm);
int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm);
/* Utils */
+int iwl_mvm_legacy_hw_idx_to_mac80211_idx(u32 rate_n_flags,
+ enum nl80211_band band);
int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
enum nl80211_band band);
void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags,
enum nl80211_band band,
struct ieee80211_tx_rate *r);
-u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx);
+void iwl_mvm_hwrate_to_tx_rate_v1(u32 rate_n_flags,
+ enum nl80211_band band,
+ struct ieee80211_tx_rate *r);
+u8 iwl_mvm_mac80211_idx_to_hwrate(const struct iwl_fw *fw, int rate_idx);
u8 iwl_mvm_mac80211_ac_to_ucode_ac(enum ieee80211_ac_numbers ac);
static inline void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
@@ -1629,6 +1636,8 @@ int iwl_mvm_mac_ctxt_send_beacon_cmd(struct iwl_mvm *mvm,
void *data, int len);
u8 iwl_mvm_mac_ctxt_get_lowest_rate(struct ieee80211_tx_info *info,
struct ieee80211_vif *vif);
+u16 iwl_mvm_mac_ctxt_get_beacon_flags(const struct iwl_fw *fw,
+ u8 rate_idx);
void iwl_mvm_mac_ctxt_set_tim(struct iwl_mvm *mvm,
__le32 *tim_index, __le32 *tim_size,
u8 *beacon, u32 frame_size);
@@ -1649,8 +1658,8 @@ void iwl_mvm_probe_resp_data_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
void iwl_mvm_rx_missed_vap_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb);
-void iwl_mvm_channel_switch_noa_notif(struct iwl_mvm *mvm,
- struct iwl_rx_cmd_buffer *rxb);
+void iwl_mvm_channel_switch_start_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb);
/* Bindings */
int iwl_mvm_binding_add_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
int iwl_mvm_binding_remove_vif(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
@@ -1732,7 +1741,7 @@ iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
/* rate scaling */
int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq);
void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg);
-int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate);
+int rs_pretty_print_rate_v1(char *buf, int bufsz, const u32 rate);
void rs_update_last_rssi(struct iwl_mvm *mvm,
struct iwl_mvm_sta *mvmsta,
struct ieee80211_rx_status *rx_status);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
index da705fcaf0fc..6d18a1fd649b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c
@@ -583,8 +583,9 @@ void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm *mvm,
return;
wgds_tbl_idx = iwl_mvm_get_sar_geo_profile(mvm);
- if (wgds_tbl_idx < 0)
- IWL_DEBUG_INFO(mvm, "SAR WGDS is disabled (%d)\n",
+ if (wgds_tbl_idx < 1)
+ IWL_DEBUG_INFO(mvm,
+ "SAR WGDS is disabled or error received (%d)\n",
wgds_tbl_idx);
else
IWL_DEBUG_INFO(mvm, "SAR WGDS: geo profile %d is configured\n",
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index 77ea2d0a3091..232ad531d612 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -29,7 +29,6 @@
#define DRV_DESCRIPTION "The new Intel(R) wireless AGN driver for Linux"
MODULE_DESCRIPTION(DRV_DESCRIPTION);
-MODULE_AUTHOR(DRV_AUTHOR);
MODULE_LICENSE("GPL");
static const struct iwl_op_mode_ops iwl_mvm_ops;
@@ -384,9 +383,9 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
iwl_mvm_probe_resp_data_notif,
RX_HANDLER_ASYNC_LOCKED,
struct iwl_probe_resp_data_notif),
- RX_HANDLER_GRP(MAC_CONF_GROUP, CHANNEL_SWITCH_NOA_NOTIF,
- iwl_mvm_channel_switch_noa_notif,
- RX_HANDLER_SYNC, struct iwl_channel_switch_noa_notif),
+ RX_HANDLER_GRP(MAC_CONF_GROUP, CHANNEL_SWITCH_START_NOTIF,
+ iwl_mvm_channel_switch_start_notif,
+ RX_HANDLER_SYNC, struct iwl_channel_switch_start_notif),
RX_HANDLER_GRP(DATA_PATH_GROUP, MONITOR_NOTIF,
iwl_mvm_rx_monitor_notif, RX_HANDLER_ASYNC_LOCKED,
struct iwl_datapath_monitor_notif),
@@ -512,7 +511,7 @@ static const struct iwl_hcmd_names iwl_mvm_mac_conf_names[] = {
HCMD_NAME(CHANNEL_SWITCH_TIME_EVENT_CMD),
HCMD_NAME(SESSION_PROTECTION_CMD),
HCMD_NAME(SESSION_PROTECTION_NOTIF),
- HCMD_NAME(CHANNEL_SWITCH_NOA_NOTIF),
+ HCMD_NAME(CHANNEL_SWITCH_START_NOTIF),
};
/* Please keep this array *SORTED* by hex value.
@@ -522,7 +521,7 @@ static const struct iwl_hcmd_names iwl_mvm_phy_names[] = {
HCMD_NAME(CMD_DTS_MEASUREMENT_TRIGGER_WIDE),
HCMD_NAME(CTDP_CONFIG_CMD),
HCMD_NAME(TEMP_REPORTING_THRESHOLDS_CMD),
- HCMD_NAME(GEO_TX_POWER_LIMIT),
+ HCMD_NAME(PER_CHAIN_LIMIT_OFFSET_CMD),
HCMD_NAME(CT_KILL_NOTIFICATION),
HCMD_NAME(DTS_MEASUREMENT_NOTIF_WIDE),
};
@@ -726,6 +725,183 @@ static int iwl_mvm_start_post_nvm(struct iwl_mvm *mvm)
return 0;
}
+struct iwl_mvm_frob_txf_data {
+ u8 *buf;
+ size_t buflen;
+};
+
+static void iwl_mvm_frob_txf_key_iter(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key,
+ void *data)
+{
+ struct iwl_mvm_frob_txf_data *txf = data;
+ u8 keylen, match, matchend;
+ u8 *keydata;
+ size_t i;
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
+ keydata = key->key;
+ keylen = key->keylen;
+ break;
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ case WLAN_CIPHER_SUITE_TKIP:
+ /*
+ * WEP has short keys which might show up in the payload,
+ * and then you can deduce the key, so in this case just
+ * remove all FIFO data.
+ * For TKIP, we don't know the phase 2 keys here, so same.
+ */
+ memset(txf->buf, 0xBB, txf->buflen);
+ return;
+ default:
+ return;
+ }
+
+ /* scan for key material and clear it out */
+ match = 0;
+ for (i = 0; i < txf->buflen; i++) {
+ if (txf->buf[i] != keydata[match]) {
+ match = 0;
+ continue;
+ }
+ match++;
+ if (match == keylen) {
+ memset(txf->buf + i - keylen, 0xAA, keylen);
+ match = 0;
+ }
+ }
+
+ /* we're dealing with a FIFO, so check wrapped around data */
+ matchend = match;
+ for (i = 0; match && i < keylen - match; i++) {
+ if (txf->buf[i] != keydata[match])
+ break;
+ match++;
+ if (match == keylen) {
+ memset(txf->buf, 0xAA, i + 1);
+ memset(txf->buf + txf->buflen - matchend, 0xAA,
+ matchend);
+ break;
+ }
+ }
+}
+
+static void iwl_mvm_frob_txf(void *ctx, void *buf, size_t buflen)
+{
+ struct iwl_mvm_frob_txf_data txf = {
+ .buf = buf,
+ .buflen = buflen,
+ };
+ struct iwl_mvm *mvm = ctx;
+
+ /* embedded key material exists only on old API */
+ if (iwl_mvm_has_new_tx_api(mvm))
+ return;
+
+ rcu_read_lock();
+ ieee80211_iter_keys_rcu(mvm->hw, NULL, iwl_mvm_frob_txf_key_iter, &txf);
+ rcu_read_unlock();
+}
+
+static void iwl_mvm_frob_hcmd(void *ctx, void *hcmd, size_t len)
+{
+ /* we only use wide headers for commands */
+ struct iwl_cmd_header_wide *hdr = hcmd;
+ unsigned int frob_start = sizeof(*hdr), frob_end = 0;
+
+ if (len < sizeof(hdr))
+ return;
+
+ /* all the commands we care about are in LONG_GROUP */
+ if (hdr->group_id != LONG_GROUP)
+ return;
+
+ switch (hdr->cmd) {
+ case WEP_KEY:
+ case WOWLAN_TKIP_PARAM:
+ case WOWLAN_KEK_KCK_MATERIAL:
+ case ADD_STA_KEY:
+ /*
+ * blank out everything here, easier than dealing
+ * with the various versions of the command
+ */
+ frob_end = INT_MAX;
+ break;
+ case MGMT_MCAST_KEY:
+ frob_start = offsetof(struct iwl_mvm_mgmt_mcast_key_cmd, igtk);
+ BUILD_BUG_ON(offsetof(struct iwl_mvm_mgmt_mcast_key_cmd, igtk) !=
+ offsetof(struct iwl_mvm_mgmt_mcast_key_cmd_v1, igtk));
+
+ frob_end = offsetofend(struct iwl_mvm_mgmt_mcast_key_cmd, igtk);
+ BUILD_BUG_ON(offsetof(struct iwl_mvm_mgmt_mcast_key_cmd, igtk) <
+ offsetof(struct iwl_mvm_mgmt_mcast_key_cmd_v1, igtk));
+ break;
+ }
+
+ if (frob_start >= frob_end)
+ return;
+
+ if (frob_end > len)
+ frob_end = len;
+
+ memset((u8 *)hcmd + frob_start, 0xAA, frob_end - frob_start);
+}
+
+static void iwl_mvm_frob_mem(void *ctx, u32 mem_addr, void *mem, size_t buflen)
+{
+ const struct iwl_dump_exclude *excl;
+ struct iwl_mvm *mvm = ctx;
+ int i;
+
+ switch (mvm->fwrt.cur_fw_img) {
+ case IWL_UCODE_INIT:
+ default:
+ /* not relevant */
+ return;
+ case IWL_UCODE_REGULAR:
+ case IWL_UCODE_REGULAR_USNIFFER:
+ excl = mvm->fw->dump_excl;
+ break;
+ case IWL_UCODE_WOWLAN:
+ excl = mvm->fw->dump_excl_wowlan;
+ break;
+ }
+
+ BUILD_BUG_ON(sizeof(mvm->fw->dump_excl) !=
+ sizeof(mvm->fw->dump_excl_wowlan));
+
+ for (i = 0; i < ARRAY_SIZE(mvm->fw->dump_excl); i++) {
+ u32 start, end;
+
+ if (!excl[i].addr || !excl[i].size)
+ continue;
+
+ start = excl[i].addr;
+ end = start + excl[i].size;
+
+ if (end <= mem_addr || start >= mem_addr + buflen)
+ continue;
+
+ if (start < mem_addr)
+ start = mem_addr;
+
+ if (end > mem_addr + buflen)
+ end = mem_addr + buflen;
+
+ memset((u8 *)mem + start - mem_addr, 0xAA, end - start);
+ }
+}
+
+static const struct iwl_dump_sanitize_ops iwl_mvm_sanitize_ops = {
+ .frob_txf = iwl_mvm_frob_txf,
+ .frob_hcmd = iwl_mvm_frob_hcmd,
+ .frob_mem = iwl_mvm_frob_mem,
+};
+
static struct iwl_op_mode *
iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
const struct iwl_fw *fw, struct dentry *dbgfs_dir)
@@ -775,7 +951,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
mvm->hw = hw;
iwl_fw_runtime_init(&mvm->fwrt, trans, fw, &iwl_mvm_fwrt_ops, mvm,
- dbgfs_dir);
+ &iwl_mvm_sanitize_ops, mvm, dbgfs_dir);
iwl_mvm_get_acpi_tables(mvm);
@@ -868,8 +1044,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
mvm->cmd_ver.range_resp =
iwl_fw_lookup_notif_ver(mvm->fw, LOCATION_GROUP,
TOF_RANGE_RESPONSE_NOTIF, 5);
- /* we only support up to version 8 */
- if (WARN_ON_ONCE(mvm->cmd_ver.range_resp > 8))
+ /* we only support up to version 9 */
+ if (WARN_ON_ONCE(mvm->cmd_ver.range_resp > 9))
goto out_free;
/*
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/power.c b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
index f2b090be3898..b2ea2fca5376 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/power.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2019 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2019, 2021 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015-2017 Intel Deutschland GmbH
*/
@@ -128,6 +128,19 @@ static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm,
enum ieee80211_ac_numbers ac;
bool tid_found = false;
+ if (test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status) ||
+ cmd->flags & cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) {
+ cmd->rx_data_timeout_uapsd =
+ cpu_to_le32(IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT);
+ cmd->tx_data_timeout_uapsd =
+ cpu_to_le32(IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT);
+ } else {
+ cmd->rx_data_timeout_uapsd =
+ cpu_to_le32(IWL_MVM_UAPSD_RX_DATA_TIMEOUT);
+ cmd->tx_data_timeout_uapsd =
+ cpu_to_le32(IWL_MVM_UAPSD_TX_DATA_TIMEOUT);
+ }
+
#ifdef CONFIG_IWLWIFI_DEBUGFS
/* set advanced pm flag with no uapsd ACs to enable ps-poll */
if (mvmvif->dbgfs_pm.use_ps_poll) {
@@ -182,19 +195,6 @@ static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm,
cmd->uapsd_max_sp = mvm->hw->uapsd_max_sp_len;
- if (test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status) ||
- cmd->flags & cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) {
- cmd->rx_data_timeout_uapsd =
- cpu_to_le32(IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT);
- cmd->tx_data_timeout_uapsd =
- cpu_to_le32(IWL_MVM_WOWLAN_PS_TX_DATA_TIMEOUT);
- } else {
- cmd->rx_data_timeout_uapsd =
- cpu_to_le32(IWL_MVM_UAPSD_RX_DATA_TIMEOUT);
- cmd->tx_data_timeout_uapsd =
- cpu_to_le32(IWL_MVM_UAPSD_TX_DATA_TIMEOUT);
- }
-
if (cmd->flags & cpu_to_le16(POWER_FLAGS_SNOOZE_ENA_MSK)) {
cmd->heavy_tx_thld_packets =
IWL_MVM_PS_SNOOZE_HEAVY_TX_THLD_PACKETS;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
index 2d58cb969918..958702403a45 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c
@@ -32,10 +32,6 @@ static u8 rs_fw_set_active_chains(u8 chains)
fw_chains |= IWL_TLC_MNG_CHAIN_A_MSK;
if (chains & ANT_B)
fw_chains |= IWL_TLC_MNG_CHAIN_B_MSK;
- if (chains & ANT_C)
- WARN(false,
- "tlc offload doesn't support antenna C. chains: 0x%x\n",
- chains);
return fw_chains;
}
@@ -314,7 +310,19 @@ void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm,
if (flags & IWL_TLC_NOTIF_FLAG_RATE) {
char pretty_rate[100];
+
+ if (iwl_fw_lookup_notif_ver(mvm->fw, DATA_PATH_GROUP,
+ TLC_MNG_UPDATE_NOTIF, 0) < 3) {
+ rs_pretty_print_rate_v1(pretty_rate, sizeof(pretty_rate),
+ le32_to_cpu(notif->rate));
+ IWL_DEBUG_RATE(mvm,
+ "Got rate in old format. Rate: %s. Converting.\n",
+ pretty_rate);
+ lq_sta->last_rate_n_flags =
+ iwl_new_rate_from_v1(le32_to_cpu(notif->rate));
+ } else {
lq_sta->last_rate_n_flags = le32_to_cpu(notif->rate);
+ }
rs_pretty_print_rate(pretty_rate, sizeof(pretty_rate),
lq_sta->last_rate_n_flags);
IWL_DEBUG_RATE(mvm, "new rate: %s\n", pretty_rate);
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
index b97708cb869d..f4d02f9fe16d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c
@@ -4,11 +4,6 @@
* Copyright(c) 2005 - 2014, 2018 - 2021 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- *
- * Contact Information:
- * Intel Linux Wireless <linuxwifi@intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
*****************************************************************************/
#include <linux/kernel.h>
#include <linux/skbuff.h>
@@ -335,15 +330,15 @@ static const struct rs_tx_column rs_tx_columns[] = {
static inline u8 rs_extract_rate(u32 rate_n_flags)
{
/* also works for HT because bits 7:6 are zero there */
- return (u8)(rate_n_flags & RATE_LEGACY_RATE_MSK);
+ return (u8)(rate_n_flags & RATE_LEGACY_RATE_MSK_V1);
}
static int iwl_hwrate_to_plcp_idx(u32 rate_n_flags)
{
int idx = 0;
- if (rate_n_flags & RATE_MCS_HT_MSK) {
- idx = rate_n_flags & RATE_HT_MCS_RATE_CODE_MSK;
+ if (rate_n_flags & RATE_MCS_HT_MSK_V1) {
+ idx = rate_n_flags & RATE_HT_MCS_RATE_CODE_MSK_V1;
idx += IWL_RATE_MCS_0_INDEX;
/* skip 9M not supported in HT*/
@@ -351,8 +346,8 @@ static int iwl_hwrate_to_plcp_idx(u32 rate_n_flags)
idx += 1;
if ((idx >= IWL_FIRST_HT_RATE) && (idx <= IWL_LAST_HT_RATE))
return idx;
- } else if (rate_n_flags & RATE_MCS_VHT_MSK ||
- rate_n_flags & RATE_MCS_HE_MSK) {
+ } else if (rate_n_flags & RATE_MCS_VHT_MSK_V1 ||
+ rate_n_flags & RATE_MCS_HE_MSK_V1) {
idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
idx += IWL_RATE_MCS_0_INDEX;
@@ -361,8 +356,8 @@ static int iwl_hwrate_to_plcp_idx(u32 rate_n_flags)
idx++;
if ((idx >= IWL_FIRST_VHT_RATE) && (idx <= IWL_LAST_VHT_RATE))
return idx;
- if ((rate_n_flags & RATE_MCS_HE_MSK) &&
- (idx <= IWL_LAST_HE_RATE))
+ if ((rate_n_flags & RATE_MCS_HE_MSK_V1) &&
+ idx <= IWL_LAST_HE_RATE)
return idx;
} else {
/* legacy rate format, search for match in table */
@@ -459,44 +454,8 @@ static const u16 expected_tpt_mimo2_160MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 971, 0, 1925, 2861, 3779, 5574, 7304, 8147, 8976, 10592, 11640},
};
-/* mbps, mcs */
-static const struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = {
- { "1", "BPSK DSSS"},
- { "2", "QPSK DSSS"},
- {"5.5", "BPSK CCK"},
- { "11", "QPSK CCK"},
- { "6", "BPSK 1/2"},
- { "9", "BPSK 1/2"},
- { "12", "QPSK 1/2"},
- { "18", "QPSK 3/4"},
- { "24", "16QAM 1/2"},
- { "36", "16QAM 3/4"},
- { "48", "64QAM 2/3"},
- { "54", "64QAM 3/4"},
- { "60", "64QAM 5/6"},
-};
-
#define MCS_INDEX_PER_STREAM (8)
-static const char *rs_pretty_ant(u8 ant)
-{
- static const char * const ant_name[] = {
- [ANT_NONE] = "None",
- [ANT_A] = "A",
- [ANT_B] = "B",
- [ANT_AB] = "AB",
- [ANT_C] = "C",
- [ANT_AC] = "AC",
- [ANT_BC] = "BC",
- [ANT_ABC] = "ABC",
- };
-
- if (ant > ANT_ABC)
- return "UNKNOWN";
-
- return ant_name[ant];
-}
-
static const char *rs_pretty_lq_type(enum iwl_table_type type)
{
static const char * const lq_types[] = {
@@ -558,7 +517,7 @@ static char *rs_pretty_rate(const struct rs_rate *rate)
rate_str = "BAD_RATE";
sprintf(buf, "(%s|%s|%s)", rs_pretty_lq_type(rate->type),
- rs_pretty_ant(rate->ant), rate_str);
+ iwl_rs_pretty_ant(rate->ant), rate_str);
return buf;
}
@@ -654,8 +613,7 @@ static void rs_tl_turn_on_agg(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
static inline int get_num_of_ant_from_rate(u32 rate_n_flags)
{
return !!(rate_n_flags & RATE_MCS_ANT_A_MSK) +
- !!(rate_n_flags & RATE_MCS_ANT_B_MSK) +
- !!(rate_n_flags & RATE_MCS_ANT_C_MSK);
+ !!(rate_n_flags & RATE_MCS_ANT_B_MSK);
}
/*
@@ -820,12 +778,12 @@ static u32 ucode_rate_from_rs_rate(struct iwl_mvm *mvm,
int index = rate->index;
ucode_rate |= ((rate->ant << RATE_MCS_ANT_POS) &
- RATE_MCS_ANT_ABC_MSK);
+ RATE_MCS_ANT_AB_MSK);
if (is_legacy(rate)) {
ucode_rate |= iwl_rates[index].plcp;
if (index >= IWL_FIRST_CCK_RATE && index <= IWL_LAST_CCK_RATE)
- ucode_rate |= RATE_MCS_CCK_MSK;
+ ucode_rate |= RATE_MCS_CCK_MSK_V1;
return ucode_rate;
}
@@ -840,7 +798,7 @@ static u32 ucode_rate_from_rs_rate(struct iwl_mvm *mvm,
IWL_ERR(mvm, "Invalid HT rate index %d\n", index);
index = IWL_LAST_HT_RATE;
}
- ucode_rate |= RATE_MCS_HT_MSK;
+ ucode_rate |= RATE_MCS_HT_MSK_V1;
if (is_ht_siso(rate))
ucode_rate |= iwl_rates[index].plcp_ht_siso;
@@ -853,7 +811,7 @@ static u32 ucode_rate_from_rs_rate(struct iwl_mvm *mvm,
IWL_ERR(mvm, "Invalid VHT rate index %d\n", index);
index = IWL_LAST_VHT_RATE;
}
- ucode_rate |= RATE_MCS_VHT_MSK;
+ ucode_rate |= RATE_MCS_VHT_MSK_V1;
if (is_vht_siso(rate))
ucode_rate |= iwl_rates[index].plcp_vht_siso;
else if (is_vht_mimo2(rate))
@@ -873,9 +831,9 @@ static u32 ucode_rate_from_rs_rate(struct iwl_mvm *mvm,
ucode_rate |= rate->bw;
if (rate->sgi)
- ucode_rate |= RATE_MCS_SGI_MSK;
+ ucode_rate |= RATE_MCS_SGI_MSK_V1;
if (rate->ldpc)
- ucode_rate |= RATE_MCS_LDPC_MSK;
+ ucode_rate |= RATE_MCS_LDPC_MSK_V1;
return ucode_rate;
}
@@ -885,7 +843,7 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate,
enum nl80211_band band,
struct rs_rate *rate)
{
- u32 ant_msk = ucode_rate & RATE_MCS_ANT_ABC_MSK;
+ u32 ant_msk = ucode_rate & RATE_MCS_ANT_AB_MSK;
u8 num_of_ant = get_num_of_ant_from_rate(ucode_rate);
u8 nss;
@@ -898,9 +856,9 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate,
rate->ant = (ant_msk >> RATE_MCS_ANT_POS);
/* Legacy */
- if (!(ucode_rate & RATE_MCS_HT_MSK) &&
- !(ucode_rate & RATE_MCS_VHT_MSK) &&
- !(ucode_rate & RATE_MCS_HE_MSK)) {
+ if (!(ucode_rate & RATE_MCS_HT_MSK_V1) &&
+ !(ucode_rate & RATE_MCS_VHT_MSK_V1) &&
+ !(ucode_rate & RATE_MCS_HE_MSK_V1)) {
if (num_of_ant == 1) {
if (band == NL80211_BAND_5GHZ)
rate->type = LQ_LEGACY_A;
@@ -912,20 +870,20 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate,
}
/* HT, VHT or HE */
- if (ucode_rate & RATE_MCS_SGI_MSK)
+ if (ucode_rate & RATE_MCS_SGI_MSK_V1)
rate->sgi = true;
- if (ucode_rate & RATE_MCS_LDPC_MSK)
+ if (ucode_rate & RATE_MCS_LDPC_MSK_V1)
rate->ldpc = true;
if (ucode_rate & RATE_MCS_STBC_MSK)
rate->stbc = true;
if (ucode_rate & RATE_MCS_BF_MSK)
rate->bfer = true;
- rate->bw = ucode_rate & RATE_MCS_CHAN_WIDTH_MSK;
+ rate->bw = ucode_rate & RATE_MCS_CHAN_WIDTH_MSK_V1;
- if (ucode_rate & RATE_MCS_HT_MSK) {
- nss = ((ucode_rate & RATE_HT_MCS_NSS_MSK) >>
- RATE_HT_MCS_NSS_POS) + 1;
+ if (ucode_rate & RATE_MCS_HT_MSK_V1) {
+ nss = ((ucode_rate & RATE_HT_MCS_NSS_MSK_V1) >>
+ RATE_HT_MCS_NSS_POS_V1) + 1;
if (nss == 1) {
rate->type = LQ_HT_SISO;
@@ -938,7 +896,7 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate,
} else {
WARN_ON_ONCE(1);
}
- } else if (ucode_rate & RATE_MCS_VHT_MSK) {
+ } else if (ucode_rate & RATE_MCS_VHT_MSK_V1) {
nss = ((ucode_rate & RATE_VHT_MCS_NSS_MSK) >>
RATE_VHT_MCS_NSS_POS) + 1;
@@ -953,7 +911,7 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate,
} else {
WARN_ON_ONCE(1);
}
- } else if (ucode_rate & RATE_MCS_HE_MSK) {
+ } else if (ucode_rate & RATE_MCS_HE_MSK_V1) {
nss = ((ucode_rate & RATE_VHT_MCS_NSS_MSK) >>
RATE_VHT_MCS_NSS_POS) + 1;
@@ -981,9 +939,6 @@ static int rs_toggle_antenna(u32 valid_ant, struct rs_rate *rate)
{
u8 new_ant_type;
- if (!rate->ant || WARN_ON_ONCE(rate->ant & ANT_C))
- return 0;
-
if (!rs_is_valid_ant(valid_ant, rate->ant))
return 0;
@@ -2552,7 +2507,7 @@ static void rs_get_initial_rate(struct iwl_mvm *mvm,
}
IWL_DEBUG_RATE(mvm, "Best ANT: %s Best RSSI: %d\n",
- rs_pretty_ant(best_ant), best_rssi);
+ iwl_rs_pretty_ant(best_ant), best_rssi);
if (best_ant != ANT_A && best_ant != ANT_B)
rate->ant = first_antenna(valid_tx_ant);
@@ -2652,7 +2607,6 @@ void rs_update_last_rssi(struct iwl_mvm *mvm,
lq_sta->pers.chains = rx_status->chains;
lq_sta->pers.chain_signal[0] = rx_status->chain_signal[0];
lq_sta->pers.chain_signal[1] = rx_status->chain_signal[1];
- lq_sta->pers.chain_signal[2] = rx_status->chain_signal[2];
lq_sta->pers.last_rssi = S8_MIN;
for (i = 0; i < ARRAY_SIZE(lq_sta->pers.chain_signal); i++) {
@@ -2738,8 +2692,8 @@ static void rs_drv_get_rate(void *mvm_r, struct ieee80211_sta *sta,
return;
lq_sta = mvm_sta;
- iwl_mvm_hwrate_to_tx_rate(lq_sta->last_rate_n_flags,
- info->band, &info->control.rates[0]);
+ iwl_mvm_hwrate_to_tx_rate_v1(lq_sta->last_rate_n_flags,
+ info->band, &info->control.rates[0]);
info->control.rates[0].count = 1;
/* Report the optimal rate based on rssi and STA caps if we haven't
@@ -2749,8 +2703,8 @@ static void rs_drv_get_rate(void *mvm_r, struct ieee80211_sta *sta,
optimal_rate = rs_get_optimal_rate(mvm, lq_sta);
last_ucode_rate = ucode_rate_from_rs_rate(mvm,
optimal_rate);
- iwl_mvm_hwrate_to_tx_rate(last_ucode_rate, info->band,
- &txrc->reported_rate);
+ iwl_mvm_hwrate_to_tx_rate_v1(last_ucode_rate, info->band,
+ &txrc->reported_rate);
}
}
@@ -2909,7 +2863,7 @@ void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg)
mvm->drv_rx_stats.success_frames++;
- switch (rate & RATE_MCS_CHAN_WIDTH_MSK) {
+ switch (rate & RATE_MCS_CHAN_WIDTH_MSK_V1) {
case RATE_MCS_CHAN_WIDTH_20:
mvm->drv_rx_stats.bw_20_frames++;
break;
@@ -2926,10 +2880,10 @@ void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg)
WARN_ONCE(1, "bad BW. rate 0x%x", rate);
}
- if (rate & RATE_MCS_HT_MSK) {
+ if (rate & RATE_MCS_HT_MSK_V1) {
mvm->drv_rx_stats.ht_frames++;
- nss = ((rate & RATE_HT_MCS_NSS_MSK) >> RATE_HT_MCS_NSS_POS) + 1;
- } else if (rate & RATE_MCS_VHT_MSK) {
+ nss = ((rate & RATE_HT_MCS_NSS_MSK_V1) >> RATE_HT_MCS_NSS_POS_V1) + 1;
+ } else if (rate & RATE_MCS_VHT_MSK_V1) {
mvm->drv_rx_stats.vht_frames++;
nss = ((rate & RATE_VHT_MCS_NSS_MSK) >>
RATE_VHT_MCS_NSS_POS) + 1;
@@ -2942,7 +2896,7 @@ void iwl_mvm_update_frame_stats(struct iwl_mvm *mvm, u32 rate, bool agg)
else if (nss == 2)
mvm->drv_rx_stats.mimo2_frames++;
- if (rate & RATE_MCS_SGI_MSK)
+ if (rate & RATE_MCS_SGI_MSK_V1)
mvm->drv_rx_stats.sgi_frames++;
else
mvm->drv_rx_stats.ngi_frames++;
@@ -3323,7 +3277,7 @@ static void rs_build_rates_table_from_fixed(struct iwl_mvm *mvm,
int i;
int num_rates = ARRAY_SIZE(lq_cmd->rs_table);
__le32 ucode_rate_le32 = cpu_to_le32(ucode_rate);
- u8 ant = (ucode_rate & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS;
+ u8 ant = (ucode_rate & RATE_MCS_ANT_AB_MSK) >> RATE_MCS_ANT_POS;
for (i = 0; i < num_rates; i++)
lq_cmd->rs_table[i] = ucode_rate_le32;
@@ -3688,35 +3642,37 @@ static void rs_free_sta(void *mvm_r, struct ieee80211_sta *sta, void *mvm_sta)
IWL_DEBUG_RATE(mvm, "leave\n");
}
-int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate)
+int rs_pretty_print_rate_v1(char *buf, int bufsz, const u32 rate)
{
- char *type, *bw;
+ char *type;
u8 mcs = 0, nss = 0;
- u8 ant = (rate & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS;
+ u8 ant = (rate & RATE_MCS_ANT_AB_MSK) >> RATE_MCS_ANT_POS;
+ u32 bw = (rate & RATE_MCS_CHAN_WIDTH_MSK_V1) >>
+ RATE_MCS_CHAN_WIDTH_POS;
- if (!(rate & RATE_MCS_HT_MSK) &&
- !(rate & RATE_MCS_VHT_MSK) &&
- !(rate & RATE_MCS_HE_MSK)) {
+ if (!(rate & RATE_MCS_HT_MSK_V1) &&
+ !(rate & RATE_MCS_VHT_MSK_V1) &&
+ !(rate & RATE_MCS_HE_MSK_V1)) {
int index = iwl_hwrate_to_plcp_idx(rate);
return scnprintf(buf, bufsz, "Legacy | ANT: %s Rate: %s Mbps",
- rs_pretty_ant(ant),
+ iwl_rs_pretty_ant(ant),
index == IWL_RATE_INVALID ? "BAD" :
- iwl_rate_mcs[index].mbps);
+ iwl_rate_mcs(index)->mbps);
}
- if (rate & RATE_MCS_VHT_MSK) {
+ if (rate & RATE_MCS_VHT_MSK_V1) {
type = "VHT";
mcs = rate & RATE_VHT_MCS_RATE_CODE_MSK;
nss = ((rate & RATE_VHT_MCS_NSS_MSK)
>> RATE_VHT_MCS_NSS_POS) + 1;
- } else if (rate & RATE_MCS_HT_MSK) {
+ } else if (rate & RATE_MCS_HT_MSK_V1) {
type = "HT";
- mcs = rate & RATE_HT_MCS_INDEX_MSK;
- nss = ((rate & RATE_HT_MCS_NSS_MSK)
- >> RATE_HT_MCS_NSS_POS) + 1;
- } else if (rate & RATE_MCS_HE_MSK) {
+ mcs = rate & RATE_HT_MCS_INDEX_MSK_V1;
+ nss = ((rate & RATE_HT_MCS_NSS_MSK_V1)
+ >> RATE_HT_MCS_NSS_POS_V1) + 1;
+ } else if (rate & RATE_MCS_HE_MSK_V1) {
type = "HE";
mcs = rate & RATE_VHT_MCS_RATE_CODE_MSK;
nss = ((rate & RATE_VHT_MCS_NSS_MSK)
@@ -3725,29 +3681,12 @@ int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate)
type = "Unknown"; /* shouldn't happen */
}
- switch (rate & RATE_MCS_CHAN_WIDTH_MSK) {
- case RATE_MCS_CHAN_WIDTH_20:
- bw = "20Mhz";
- break;
- case RATE_MCS_CHAN_WIDTH_40:
- bw = "40Mhz";
- break;
- case RATE_MCS_CHAN_WIDTH_80:
- bw = "80Mhz";
- break;
- case RATE_MCS_CHAN_WIDTH_160:
- bw = "160Mhz";
- break;
- default:
- bw = "BAD BW";
- }
-
return scnprintf(buf, bufsz,
"0x%x: %s | ANT: %s BW: %s MCS: %d NSS: %d %s%s%s%s%s",
- rate, type, rs_pretty_ant(ant), bw, mcs, nss,
- (rate & RATE_MCS_SGI_MSK) ? "SGI " : "NGI ",
+ rate, type, iwl_rs_pretty_ant(ant), iwl_rs_pretty_bw(bw), mcs, nss,
+ (rate & RATE_MCS_SGI_MSK_V1) ? "SGI " : "NGI ",
(rate & RATE_MCS_STBC_MSK) ? "STBC " : "",
- (rate & RATE_MCS_LDPC_MSK) ? "LDPC " : "",
+ (rate & RATE_MCS_LDPC_MSK_V1) ? "LDPC " : "",
(rate & RATE_HE_DUAL_CARRIER_MODE_MSK) ? "DCM " : "",
(rate & RATE_MCS_BF_MSK) ? "BF " : "");
}
@@ -3830,10 +3769,9 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
lq_sta->active_legacy_rate);
desc += scnprintf(buff + desc, bufsz - desc, "fixed rate 0x%X\n",
lq_sta->pers.dbg_fixed_rate);
- desc += scnprintf(buff + desc, bufsz - desc, "valid_tx_ant %s%s%s\n",
+ desc += scnprintf(buff + desc, bufsz - desc, "valid_tx_ant %s%s\n",
(iwl_mvm_get_valid_tx_ant(mvm) & ANT_A) ? "ANT_A," : "",
- (iwl_mvm_get_valid_tx_ant(mvm) & ANT_B) ? "ANT_B," : "",
- (iwl_mvm_get_valid_tx_ant(mvm) & ANT_C) ? "ANT_C" : "");
+ (iwl_mvm_get_valid_tx_ant(mvm) & ANT_B) ? "ANT_B," : "");
desc += scnprintf(buff + desc, bufsz - desc, "lq type %s\n",
(is_legacy(rate)) ? "legacy" :
is_vht(rate) ? "VHT" : "HT");
@@ -3891,7 +3829,7 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
desc += scnprintf(buff + desc, bufsz - desc,
" rate[%d] 0x%X ", i, r);
- desc += rs_pretty_print_rate(buff + desc, bufsz - desc, r);
+ desc += rs_pretty_print_rate_v1(buff + desc, bufsz - desc, r);
if (desc < bufsz - 1)
buff[desc++] = '\n';
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
index 32104c9f8f5e..b7bc8c1b2dda 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h
@@ -5,11 +5,6 @@
* Copyright(c) 2015 Intel Mobile Communications GmbH
* Copyright(c) 2017 Intel Deutschland GmbH
* Copyright(c) 2018 - 2019 Intel Corporation
- *
- * Contact Information:
- * Intel Linux Wireless <linuxwifi@intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
*****************************************************************************/
#ifndef __rs_h__
@@ -36,11 +31,6 @@ struct iwl_rs_rate_info {
#define IWL_RATE_60M_PLCP 3
-enum {
- IWL_RATE_INVM_INDEX = IWL_RATE_COUNT,
- IWL_RATE_INVALID = IWL_RATE_COUNT,
-};
-
#define LINK_QUAL_MAX_RETRY_NUM 16
enum {
@@ -211,13 +201,6 @@ struct rs_rate {
#define is_ht80(rate) ((rate)->bw == RATE_MCS_CHAN_WIDTH_80)
#define is_ht160(rate) ((rate)->bw == RATE_MCS_CHAN_WIDTH_160)
-#define IWL_MAX_MCS_DISPLAY_SIZE 12
-
-struct iwl_rate_mcs_info {
- char mbps[IWL_MAX_MCS_DISPLAY_SIZE];
- char mcs[IWL_MAX_MCS_DISPLAY_SIZE];
-};
-
/**
* struct iwl_lq_sta_rs_fw - rate and related statistics for RS in FW
* @last_rate_n_flags: last rate reported by FW
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
index 8ef5399ad9be..d22f40a5354d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2020 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2021 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -103,7 +103,7 @@ static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
struct iwl_rx_phy_info *phy_info,
struct ieee80211_rx_status *rx_status)
{
- int energy_a, energy_b, energy_c, max_energy;
+ int energy_a, energy_b, max_energy;
u32 val;
val =
@@ -114,14 +114,10 @@ static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
energy_b = (val & IWL_RX_INFO_ENERGY_ANT_B_MSK) >>
IWL_RX_INFO_ENERGY_ANT_B_POS;
energy_b = energy_b ? -energy_b : S8_MIN;
- energy_c = (val & IWL_RX_INFO_ENERGY_ANT_C_MSK) >>
- IWL_RX_INFO_ENERGY_ANT_C_POS;
- energy_c = energy_c ? -energy_c : S8_MIN;
max_energy = max(energy_a, energy_b);
- max_energy = max(max_energy, energy_c);
- IWL_DEBUG_STATS(mvm, "energy In A %d B %d C %d , and max %d\n",
- energy_a, energy_b, energy_c, max_energy);
+ IWL_DEBUG_STATS(mvm, "energy In A %d B %d , and max %d\n",
+ energy_a, energy_b, max_energy);
rx_status->signal = max_energy;
rx_status->chains = (le16_to_cpu(phy_info->phy_flags) &
@@ -129,7 +125,6 @@ static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
>> RX_RES_PHY_FLAGS_ANTENNA_POS;
rx_status->chain_signal[0] = energy_a;
rx_status->chain_signal[1] = energy_b;
- rx_status->chain_signal[2] = energy_c;
}
/*
@@ -235,7 +230,7 @@ static void iwl_mvm_rx_handle_tcm(struct iwl_mvm *mvm,
mdata->rx.airtime += le16_to_cpu(phy_info->frame_time);
}
- if (!(rate_n_flags & (RATE_MCS_HT_MSK | RATE_MCS_VHT_MSK)))
+ if (!(rate_n_flags & (RATE_MCS_HT_MSK_V1 | RATE_MCS_VHT_MSK_V1)))
return;
mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif);
@@ -249,10 +244,10 @@ static void iwl_mvm_rx_handle_tcm(struct iwl_mvm *mvm,
mvmsta->sta_id != mvmvif->ap_sta_id)
return;
- if (rate_n_flags & RATE_MCS_HT_MSK) {
- thr = thresh_tpt[rate_n_flags & RATE_HT_MCS_RATE_CODE_MSK];
- thr *= 1 + ((rate_n_flags & RATE_HT_MCS_NSS_MSK) >>
- RATE_HT_MCS_NSS_POS);
+ if (rate_n_flags & RATE_MCS_HT_MSK_V1) {
+ thr = thresh_tpt[rate_n_flags & RATE_HT_MCS_RATE_CODE_MSK_V1];
+ thr *= 1 + ((rate_n_flags & RATE_HT_MCS_NSS_MSK_V1) >>
+ RATE_HT_MCS_NSS_POS_V1);
} else {
if (WARN_ON((rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK) >=
ARRAY_SIZE(thresh_tpt)))
@@ -262,7 +257,7 @@ static void iwl_mvm_rx_handle_tcm(struct iwl_mvm *mvm,
RATE_VHT_MCS_NSS_POS);
}
- thr <<= ((rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) >>
+ thr <<= ((rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK_V1) >>
RATE_MCS_CHAN_WIDTH_POS);
mdata->uapsd_nonagg_detect.rx_bytes += len;
@@ -455,7 +450,7 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
}
/* Set up the HT phy flags */
- switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
+ switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK_V1) {
case RATE_MCS_CHAN_WIDTH_20:
break;
case RATE_MCS_CHAN_WIDTH_40:
@@ -468,20 +463,20 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi,
rx_status->bw = RATE_INFO_BW_160;
break;
}
- if (!(rate_n_flags & RATE_MCS_CCK_MSK) &&
- rate_n_flags & RATE_MCS_SGI_MSK)
+ if (!(rate_n_flags & RATE_MCS_CCK_MSK_V1) &&
+ rate_n_flags & RATE_MCS_SGI_MSK_V1)
rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
if (rate_n_flags & RATE_HT_MCS_GF_MSK)
rx_status->enc_flags |= RX_ENC_FLAG_HT_GF;
- if (rate_n_flags & RATE_MCS_LDPC_MSK)
+ if (rate_n_flags & RATE_MCS_LDPC_MSK_V1)
rx_status->enc_flags |= RX_ENC_FLAG_LDPC;
- if (rate_n_flags & RATE_MCS_HT_MSK) {
+ if (rate_n_flags & RATE_MCS_HT_MSK_V1) {
u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >>
RATE_MCS_STBC_POS;
rx_status->encoding = RX_ENC_HT;
- rx_status->rate_idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK;
+ rx_status->rate_idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK_V1;
rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
- } else if (rate_n_flags & RATE_MCS_VHT_MSK) {
+ } else if (rate_n_flags & RATE_MCS_VHT_MSK_V1) {
u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >>
RATE_MCS_STBC_POS;
rx_status->nss =
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
index c12f303cf652..e0601f802628 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c
@@ -240,8 +240,7 @@ static void iwl_mvm_add_rtap_sniffer_config(struct iwl_mvm *mvm,
static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
struct napi_struct *napi,
struct sk_buff *skb, int queue,
- struct ieee80211_sta *sta,
- bool csi)
+ struct ieee80211_sta *sta)
{
if (iwl_mvm_check_pn(mvm, skb, queue, sta))
kfree_skb(skb);
@@ -269,7 +268,6 @@ static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
(rate_flags & RATE_MCS_ANT_AB_MSK) >> RATE_MCS_ANT_POS;
rx_status->chain_signal[0] = energy_a;
rx_status->chain_signal[1] = energy_b;
- rx_status->chain_signal[2] = S8_MIN;
}
static int iwl_mvm_rx_mgmt_prot(struct ieee80211_sta *sta,
@@ -620,7 +618,7 @@ static void iwl_mvm_release_frames(struct iwl_mvm *mvm,
while ((skb = __skb_dequeue(skb_list))) {
iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb,
reorder_buf->queue,
- sta, false);
+ sta);
reorder_buf->num_stored--;
}
}
@@ -1198,7 +1196,7 @@ static void iwl_mvm_decode_he_mu_ext(struct iwl_mvm *mvm,
}
if (FIELD_GET(IWL_RX_PHY_DATA4_HE_MU_EXT_CH2_CRC_OK, phy_data4) &&
- (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) != RATE_MCS_CHAN_WIDTH_20) {
+ (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK_V1) != RATE_MCS_CHAN_WIDTH_20) {
he_mu->flags1 |=
cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH2_RU_KNOWN |
IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH2_CTR_26T_RU_KNOWN);
@@ -1235,7 +1233,7 @@ iwl_mvm_decode_he_phy_ru_alloc(struct iwl_mvm_rx_phy_data *phy_data,
* the TSF/timers are not be transmitted in HE-MU.
*/
u8 ru = le32_get_bits(phy_data->d1, IWL_RX_PHY_DATA1_HE_RU_ALLOC_MASK);
- u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK;
+ u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK_V1;
u8 offs = 0;
rx_status->bw = RATE_INFO_BW_HE_RU;
@@ -1290,13 +1288,13 @@ iwl_mvm_decode_he_phy_ru_alloc(struct iwl_mvm_rx_phy_data *phy_data,
if (he_mu)
he_mu->flags2 |=
- le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK,
+ le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK_V1,
rate_n_flags),
IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW);
- else if (he_type == RATE_MCS_HE_TYPE_TRIG)
+ else if (he_type == RATE_MCS_HE_TYPE_TRIG_V1)
he->data6 |=
cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW_KNOWN) |
- le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK,
+ le16_encode_bits(FIELD_GET(RATE_MCS_CHAN_WIDTH_MSK_V1,
rate_n_flags),
IEEE80211_RADIOTAP_HE_DATA6_TB_PPDU_BW);
}
@@ -1508,9 +1506,9 @@ static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb,
stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >> RATE_MCS_STBC_POS;
rx_status->nss =
- ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
- RATE_VHT_MCS_NSS_POS) + 1;
- rx_status->rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
+ ((rate_n_flags & RATE_MCS_NSS_MSK) >>
+ RATE_MCS_NSS_POS) + 1;
+ rx_status->rate_idx = rate_n_flags & RATE_MCS_CODE_MSK;
rx_status->encoding = RX_ENC_HE;
rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
if (rate_n_flags & RATE_MCS_BF_MSK)
@@ -1562,14 +1560,15 @@ static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb,
}
break;
case 3:
- if ((he_type == RATE_MCS_HE_TYPE_SU ||
- he_type == RATE_MCS_HE_TYPE_EXT_SU) &&
- rate_n_flags & RATE_MCS_SGI_MSK)
- rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
- else
- rx_status->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
+ rx_status->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X;
break;
+ case 4:
+ rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
+ ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X;
+ break;
+ default:
+ ltf = IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_UNKNOWN;
}
he->data5 |= le16_encode_bits(ltf,
@@ -1653,7 +1652,8 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
struct iwl_mvm_rx_phy_data phy_data = {
.info_type = IWL_RX_PHY_INFO_TYPE_NONE,
};
- bool csi = false;
+ u32 format;
+ bool is_sgi;
if (unlikely(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)))
return;
@@ -1691,6 +1691,13 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
phy_data.d2 = desc->v1.phy_data2;
phy_data.d3 = desc->v1.phy_data3;
}
+ if (iwl_fw_lookup_notif_ver(mvm->fw, LEGACY_GROUP,
+ REPLY_RX_MPDU_CMD, 0) < 4) {
+ rate_n_flags = iwl_new_rate_from_v1(rate_n_flags);
+ IWL_DEBUG_DROP(mvm, "Got old format rate, converting. New rate: 0x%x\n",
+ rate_n_flags);
+ }
+ format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
len = le16_to_cpu(desc->mpdu_len);
@@ -1744,7 +1751,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
break;
}
- if (rate_n_flags & RATE_MCS_HE_MSK)
+ if (format == RATE_MCS_HE_MSK)
iwl_mvm_rx_he(mvm, skb, &phy_data, rate_n_flags,
phy_info, queue);
@@ -1761,7 +1768,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
}
/* set the preamble flag if appropriate */
- if (rate_n_flags & RATE_MCS_CCK_MSK &&
+ if (format == RATE_MCS_CCK_MSK &&
phy_info & IWL_RX_MPDU_PHY_SHORT_PREAMBLE)
rx_status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
@@ -1937,33 +1944,34 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
}
}
- if (!(rate_n_flags & RATE_MCS_CCK_MSK) &&
- rate_n_flags & RATE_MCS_SGI_MSK)
+ is_sgi = format == RATE_MCS_HE_MSK ?
+ iwl_he_is_sgi(rate_n_flags) :
+ rate_n_flags & RATE_MCS_SGI_MSK;
+
+ if (!(format == RATE_MCS_CCK_MSK) && is_sgi)
rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
- if (rate_n_flags & RATE_HT_MCS_GF_MSK)
- rx_status->enc_flags |= RX_ENC_FLAG_HT_GF;
if (rate_n_flags & RATE_MCS_LDPC_MSK)
rx_status->enc_flags |= RX_ENC_FLAG_LDPC;
- if (rate_n_flags & RATE_MCS_HT_MSK) {
+ if (format == RATE_MCS_HT_MSK) {
u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >>
- RATE_MCS_STBC_POS;
+ RATE_MCS_STBC_POS;
rx_status->encoding = RX_ENC_HT;
- rx_status->rate_idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK;
+ rx_status->rate_idx = RATE_HT_MCS_INDEX(rate_n_flags);
rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
- } else if (rate_n_flags & RATE_MCS_VHT_MSK) {
+ } else if (format == RATE_MCS_VHT_MSK) {
u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >>
- RATE_MCS_STBC_POS;
- rx_status->nss =
- ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
- RATE_VHT_MCS_NSS_POS) + 1;
- rx_status->rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
+ RATE_MCS_STBC_POS;
+ rx_status->nss =
+ ((rate_n_flags & RATE_MCS_NSS_MSK) >>
+ RATE_MCS_NSS_POS) + 1;
+ rx_status->rate_idx = rate_n_flags & RATE_MCS_CODE_MSK;
rx_status->encoding = RX_ENC_VHT;
rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
if (rate_n_flags & RATE_MCS_BF_MSK)
rx_status->enc_flags |= RX_ENC_FLAG_BF;
- } else if (!(rate_n_flags & RATE_MCS_HE_MSK)) {
- int rate = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
- rx_status->band);
+ } else if (!(format == RATE_MCS_HE_MSK)) {
+ int rate = iwl_mvm_legacy_hw_idx_to_mac80211_idx(rate_n_flags,
+ rx_status->band);
if (WARN(rate < 0 || rate > 0xFF,
"Invalid rate flags 0x%x, band %d,\n",
@@ -1994,7 +2002,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
if (!iwl_mvm_reorder(mvm, napi, queue, sta, skb, desc))
iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue,
- sta, csi);
+ sta);
out:
rcu_read_unlock();
}
@@ -2013,12 +2021,24 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
struct ieee80211_sta *sta = NULL;
struct sk_buff *skb;
u8 channel, energy_a, energy_b;
+ u32 format;
struct iwl_mvm_rx_phy_data phy_data = {
.info_type = le32_get_bits(desc->phy_info[1],
IWL_RX_PHY_DATA1_INFO_TYPE_MASK),
.d0 = desc->phy_info[0],
.d1 = desc->phy_info[1],
};
+ bool is_sgi;
+
+ if (iwl_fw_lookup_notif_ver(mvm->fw, DATA_PATH_GROUP,
+ RX_NO_DATA_NOTIF, 0) < 2) {
+ IWL_DEBUG_DROP(mvm, "Got an old rate format. Old rate: 0x%x\n",
+ rate_n_flags);
+ rate_n_flags = iwl_new_rate_from_v1(rate_n_flags);
+ IWL_DEBUG_DROP(mvm, " Rate after conversion to the new format: 0x%x\n",
+ rate_n_flags);
+ }
+ format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
if (unlikely(iwl_rx_packet_payload_len(pkt) < sizeof(*desc)))
return;
@@ -2075,7 +2095,7 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
break;
}
- if (rate_n_flags & RATE_MCS_HE_MSK)
+ if (format == RATE_MCS_HE_MSK)
iwl_mvm_rx_he(mvm, skb, &phy_data, rate_n_flags,
phy_info, queue);
@@ -2091,23 +2111,24 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
rcu_read_lock();
- if (!(rate_n_flags & RATE_MCS_CCK_MSK) &&
- rate_n_flags & RATE_MCS_SGI_MSK)
+ is_sgi = format == RATE_MCS_HE_MSK ?
+ iwl_he_is_sgi(rate_n_flags) :
+ rate_n_flags & RATE_MCS_SGI_MSK;
+
+ if (!(format == RATE_MCS_CCK_MSK) && is_sgi)
rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
- if (rate_n_flags & RATE_HT_MCS_GF_MSK)
- rx_status->enc_flags |= RX_ENC_FLAG_HT_GF;
if (rate_n_flags & RATE_MCS_LDPC_MSK)
rx_status->enc_flags |= RX_ENC_FLAG_LDPC;
- if (rate_n_flags & RATE_MCS_HT_MSK) {
+ if (format == RATE_MCS_HT_MSK) {
u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >>
RATE_MCS_STBC_POS;
rx_status->encoding = RX_ENC_HT;
- rx_status->rate_idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK;
+ rx_status->rate_idx = RATE_HT_MCS_INDEX(rate_n_flags);
rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
- } else if (rate_n_flags & RATE_MCS_VHT_MSK) {
+ } else if (format == RATE_MCS_VHT_MSK) {
u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >>
RATE_MCS_STBC_POS;
- rx_status->rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
+ rx_status->rate_idx = rate_n_flags & RATE_MCS_CODE_MSK;
rx_status->encoding = RX_ENC_VHT;
rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
if (rate_n_flags & RATE_MCS_BF_MSK)
@@ -2120,12 +2141,12 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi,
rx_status->nss =
le32_get_bits(desc->rx_vec[0],
RX_NO_DATA_RX_VEC0_VHT_NSTS_MSK) + 1;
- } else if (rate_n_flags & RATE_MCS_HE_MSK) {
+ } else if (format == RATE_MCS_HE_MSK) {
rx_status->nss =
le32_get_bits(desc->rx_vec[0],
RX_NO_DATA_RX_VEC0_HE_NSTS_MSK) + 1;
} else {
- int rate = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
+ int rate = iwl_mvm_legacy_hw_idx_to_mac80211_idx(rate_n_flags,
rx_status->band);
if (WARN(rate < 0 || rate > 0xFF,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index d78e436fa8b5..a138b5c4cce8 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -163,7 +163,7 @@ iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum nl80211_band band,
tx_ant = BIT(mvm->scan_last_antenna_idx) << RATE_MCS_ANT_POS;
if (band == NL80211_BAND_2GHZ && !no_cck)
- return cpu_to_le32(IWL_RATE_1M_PLCP | RATE_MCS_CCK_MSK |
+ return cpu_to_le32(IWL_RATE_1M_PLCP | RATE_MCS_CCK_MSK_V1 |
tx_ant);
else
return cpu_to_le32(IWL_RATE_6M_PLCP | tx_ant);
@@ -1995,8 +1995,16 @@ static u16 iwl_mvm_scan_umac_flags_v2(struct iwl_mvm *mvm,
{
u16 flags = 0;
+ /*
+ * If no direct SSIDs are provided perform a passive scan. Otherwise,
+ * if there is a single SSID which is not the broadcast SSID, assume
+ * that the scan is intended for roaming purposes and thus enable Rx on
+ * all chains to improve chances of hearing the beacons/probe responses.
+ */
if (params->n_ssids == 0)
flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_FORCE_PASSIVE;
+ else if (params->n_ssids == 1 && params->ssids[0].ssid_len)
+ flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_USE_ALL_RX_CHAINS;
if (iwl_mvm_is_scan_fragmented(params->type))
flags |= IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC1;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 0a13c2bda2ee..bdd4ee432548 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -268,6 +268,7 @@ static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm,
int rate_idx = -1;
u8 rate_plcp;
u32 rate_flags = 0;
+ bool is_cck;
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
/* info->control is only relevant for non HW rate control */
@@ -299,11 +300,18 @@ static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm,
BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0);
/* Get PLCP rate for tx_cmd->rate_n_flags */
- rate_plcp = iwl_mvm_mac80211_idx_to_hwrate(rate_idx);
+ rate_plcp = iwl_mvm_mac80211_idx_to_hwrate(mvm->fw, rate_idx);
+ is_cck = (rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE);
- /* Set CCK flag as needed */
- if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
- rate_flags |= RATE_MCS_CCK_MSK;
+ /* Set CCK or OFDM flag */
+ if (iwl_fw_lookup_cmd_ver(mvm->fw, LONG_GROUP, TX_CMD, 0) > 8) {
+ if (!is_cck)
+ rate_flags |= RATE_MCS_LEGACY_OFDM_MSK;
+ else
+ rate_flags |= RATE_MCS_CCK_MSK;
+ } else if (is_cck) {
+ rate_flags |= RATE_MCS_CCK_MSK_V1;
+ }
return (u32)rate_plcp | rate_flags;
}
@@ -1284,31 +1292,72 @@ const char *iwl_mvm_get_tx_fail_reason(u32 status)
}
#endif /* CONFIG_IWLWIFI_DEBUG */
-void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags,
- enum nl80211_band band,
- struct ieee80211_tx_rate *r)
+static int iwl_mvm_get_hwrate_chan_width(u32 chan_width)
{
- if (rate_n_flags & RATE_HT_MCS_GF_MSK)
- r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
- switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
+ switch (chan_width) {
case RATE_MCS_CHAN_WIDTH_20:
- break;
+ return 0;
case RATE_MCS_CHAN_WIDTH_40:
- r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
- break;
+ return IEEE80211_TX_RC_40_MHZ_WIDTH;
case RATE_MCS_CHAN_WIDTH_80:
- r->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
- break;
+ return IEEE80211_TX_RC_80_MHZ_WIDTH;
case RATE_MCS_CHAN_WIDTH_160:
- r->flags |= IEEE80211_TX_RC_160_MHZ_WIDTH;
- break;
+ return IEEE80211_TX_RC_160_MHZ_WIDTH;
+ default:
+ return 0;
}
+}
+
+void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags,
+ enum nl80211_band band,
+ struct ieee80211_tx_rate *r)
+{
+ u32 format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
+ u32 rate = format == RATE_MCS_HT_MSK ?
+ RATE_HT_MCS_INDEX(rate_n_flags) :
+ rate_n_flags & RATE_MCS_CODE_MSK;
+
+ r->flags |=
+ iwl_mvm_get_hwrate_chan_width(rate_n_flags &
+ RATE_MCS_CHAN_WIDTH_MSK);
+
if (rate_n_flags & RATE_MCS_SGI_MSK)
r->flags |= IEEE80211_TX_RC_SHORT_GI;
- if (rate_n_flags & RATE_MCS_HT_MSK) {
+ if (format == RATE_MCS_HT_MSK) {
r->flags |= IEEE80211_TX_RC_MCS;
- r->idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK;
- } else if (rate_n_flags & RATE_MCS_VHT_MSK) {
+ r->idx = rate;
+ } else if (format == RATE_MCS_VHT_MSK) {
+ ieee80211_rate_set_vht(r, rate,
+ ((rate_n_flags & RATE_MCS_NSS_MSK) >>
+ RATE_MCS_NSS_POS) + 1);
+ r->flags |= IEEE80211_TX_RC_VHT_MCS;
+ } else if (format == RATE_MCS_HE_MSK) {
+ /* mac80211 cannot do this without ieee80211_tx_status_ext()
+ * but it only matters for radiotap */
+ r->idx = 0;
+ } else {
+ r->idx = iwl_mvm_legacy_hw_idx_to_mac80211_idx(rate_n_flags,
+ band);
+ }
+}
+
+void iwl_mvm_hwrate_to_tx_rate_v1(u32 rate_n_flags,
+ enum nl80211_band band,
+ struct ieee80211_tx_rate *r)
+{
+ if (rate_n_flags & RATE_HT_MCS_GF_MSK)
+ r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
+
+ r->flags |=
+ iwl_mvm_get_hwrate_chan_width(rate_n_flags &
+ RATE_MCS_CHAN_WIDTH_MSK_V1);
+
+ if (rate_n_flags & RATE_MCS_SGI_MSK_V1)
+ r->flags |= IEEE80211_TX_RC_SHORT_GI;
+ if (rate_n_flags & RATE_MCS_HT_MSK_V1) {
+ r->flags |= IEEE80211_TX_RC_MCS;
+ r->idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK_V1;
+ } else if (rate_n_flags & RATE_MCS_VHT_MSK_V1) {
ieee80211_rate_set_vht(
r, rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK,
((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
@@ -1323,14 +1372,20 @@ void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags,
/*
* translate ucode response to mac80211 tx status control values
*/
-static void iwl_mvm_hwrate_to_tx_status(u32 rate_n_flags,
+static void iwl_mvm_hwrate_to_tx_status(const struct iwl_fw *fw,
+ u32 rate_n_flags,
struct ieee80211_tx_info *info)
{
struct ieee80211_tx_rate *r = &info->status.rates[0];
+ if (iwl_fw_lookup_notif_ver(fw, LONG_GROUP,
+ TX_CMD, 0) > 6)
+ rate_n_flags = iwl_new_rate_from_v1(rate_n_flags);
+
info->status.antenna =
- ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
- iwl_mvm_hwrate_to_tx_rate(rate_n_flags, info->band, r);
+ ((rate_n_flags & RATE_MCS_ANT_AB_MSK) >> RATE_MCS_ANT_POS);
+ iwl_mvm_hwrate_to_tx_rate(rate_n_flags,
+ info->band, r);
}
static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm,
@@ -1450,7 +1505,9 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
/* the FW should have stopped the queue and not
* return this status
*/
- WARN_ON(1);
+ IWL_ERR_LIMIT(mvm,
+ "FW reported TX filtered, status=0x%x, FC=0x%x\n",
+ status, le16_to_cpu(hdr->frame_control));
info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
break;
default:
@@ -1472,8 +1529,14 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
iwl_mvm_tx_status_check_trigger(mvm, status, hdr->frame_control);
info->status.rates[0].count = tx_resp->failure_frame + 1;
- iwl_mvm_hwrate_to_tx_status(le32_to_cpu(tx_resp->initial_rate),
+
+ iwl_mvm_hwrate_to_tx_status(mvm->fw,
+ le32_to_cpu(tx_resp->initial_rate),
info);
+
+ /* Don't assign the converted initial_rate, because driver
+ * TLC uses this and doesn't support the new FW rate
+ */
info->status.status_driver_data[1] =
(void *)(uintptr_t)le32_to_cpu(tx_resp->initial_rate);
@@ -1835,7 +1898,7 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
info->flags |= IEEE80211_TX_STAT_AMPDU;
memcpy(&info->status, &tx_info->status,
sizeof(tx_info->status));
- iwl_mvm_hwrate_to_tx_status(rate, info);
+ iwl_mvm_hwrate_to_tx_status(mvm->fw, rate, info);
}
}
@@ -1856,7 +1919,7 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
goto out;
tx_info->band = chanctx_conf->def.chan->band;
- iwl_mvm_hwrate_to_tx_status(rate, tx_info);
+ iwl_mvm_hwrate_to_tx_status(mvm->fw, rate, tx_info);
if (!iwl_mvm_has_tlc_offload(mvm)) {
IWL_DEBUG_TX_REPLY(mvm,
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
index 4a3d2971a98b..caf1dcf48888 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
@@ -135,31 +135,25 @@ int iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id, u16 len,
return iwl_mvm_send_cmd_status(mvm, &cmd, status);
}
-#define IWL_DECLARE_RATE_INFO(r) \
- [IWL_RATE_##r##M_INDEX] = IWL_RATE_##r##M_PLCP
+int iwl_mvm_legacy_hw_idx_to_mac80211_idx(u32 rate_n_flags,
+ enum nl80211_band band)
+{
+ int format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
+ int rate = rate_n_flags & RATE_LEGACY_RATE_MSK;
+ bool is_LB = band == NL80211_BAND_2GHZ;
-/*
- * Translate from fw_rate_index (IWL_RATE_XXM_INDEX) to PLCP
- */
-static const u8 fw_rate_idx_to_plcp[IWL_RATE_COUNT] = {
- IWL_DECLARE_RATE_INFO(1),
- IWL_DECLARE_RATE_INFO(2),
- IWL_DECLARE_RATE_INFO(5),
- IWL_DECLARE_RATE_INFO(11),
- IWL_DECLARE_RATE_INFO(6),
- IWL_DECLARE_RATE_INFO(9),
- IWL_DECLARE_RATE_INFO(12),
- IWL_DECLARE_RATE_INFO(18),
- IWL_DECLARE_RATE_INFO(24),
- IWL_DECLARE_RATE_INFO(36),
- IWL_DECLARE_RATE_INFO(48),
- IWL_DECLARE_RATE_INFO(54),
-};
+ if (format == RATE_MCS_LEGACY_OFDM_MSK)
+ return is_LB ? rate + IWL_FIRST_OFDM_RATE :
+ rate;
+
+ /* CCK is not allowed in HB */
+ return is_LB ? rate : -1;
+}
int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
enum nl80211_band band)
{
- int rate = rate_n_flags & RATE_LEGACY_RATE_MSK;
+ int rate = rate_n_flags & RATE_LEGACY_RATE_MSK_V1;
int idx;
int band_offset = 0;
@@ -167,16 +161,24 @@ int iwl_mvm_legacy_rate_to_mac80211_idx(u32 rate_n_flags,
if (band != NL80211_BAND_2GHZ)
band_offset = IWL_FIRST_OFDM_RATE;
for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
- if (fw_rate_idx_to_plcp[idx] == rate)
+ if (iwl_fw_rate_idx_to_plcp(idx) == rate)
return idx - band_offset;
return -1;
}
-u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx)
+u8 iwl_mvm_mac80211_idx_to_hwrate(const struct iwl_fw *fw, int rate_idx)
{
- /* Get PLCP rate for tx_cmd->rate_n_flags */
- return fw_rate_idx_to_plcp[rate_idx];
+ if (iwl_fw_lookup_cmd_ver(fw, LONG_GROUP,
+ TX_CMD, 0) > 8)
+ /* In the new rate legacy rates are indexed:
+ * 0 - 3 for CCK and 0 - 7 for OFDM.
+ */
+ return (rate_idx >= IWL_FIRST_OFDM_RATE ?
+ rate_idx - IWL_FIRST_OFDM_RATE :
+ rate_idx);
+
+ return iwl_fw_rate_idx_to_plcp(rate_idx);
}
u8 iwl_mvm_mac80211_ac_to_ucode_ac(enum ieee80211_ac_numbers ac)
@@ -217,6 +219,7 @@ u8 first_antenna(u8 mask)
return BIT(ffs(mask) - 1);
}
+#define MAX_ANT_NUM 2
/*
* Toggles between TX antennas to send the probe request on.
* Receives the bitmask of valid TX antennas and the *index* used
@@ -405,6 +408,9 @@ bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm,
lockdep_assert_held(&mvm->mutex);
+ if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM)
+ return false;
+
if (num_of_ant(iwl_mvm_get_valid_rx_ant(mvm)) == 1)
return false;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
index 239a722cd79d..85a6da70ca78 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c
@@ -57,6 +57,10 @@ iwl_pcie_ctxt_info_dbg_enable(struct iwl_trans *trans,
dbg_flags |= IWL_PRPH_SCRATCH_EDBG_DEST_DRAM;
dbg_cfg->hwm_base_addr = cpu_to_le64(frag->physical);
dbg_cfg->hwm_size = cpu_to_le32(frag->size);
+ dbg_cfg->debug_token_config = cpu_to_le32(trans->dbg.ucode_preset);
+ IWL_DEBUG_FW(trans,
+ "WRT: Applying DRAM destination (debug_token_config=%u)\n",
+ dbg_cfg->debug_token_config);
IWL_DEBUG_FW(trans,
"WRT: Applying DRAM destination (alloc_id=%u, num_frags=%u)\n",
alloc_id,
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index e3996ff99bad..c574f041f096 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -499,6 +499,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
/* Ma devices */
{IWL_PCI_DEVICE(0x2729, PCI_ANY_ID, iwl_ma_trans_cfg)},
{IWL_PCI_DEVICE(0x7E40, PCI_ANY_ID, iwl_ma_trans_cfg)},
+ {IWL_PCI_DEVICE(0x7F70, PCI_ANY_ID, iwl_ma_trans_cfg)},
/* Bz devices */
{IWL_PCI_DEVICE(0x2727, PCI_ANY_ID, iwl_bz_trans_cfg)},
@@ -518,7 +519,7 @@ MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
#define IWL_DEV_INFO(_device, _subdevice, _cfg, _name) \
_IWL_DEV_INFO(_device, _subdevice, IWL_CFG_ANY, IWL_CFG_ANY, \
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, \
- IWL_CFG_NO_CDB, _cfg, _name)
+ IWL_CFG_ANY, _cfg, _name)
static const struct iwl_dev_info iwl_dev_info_table[] = {
#if IS_ENABLED(CONFIG_IWLMVM)
@@ -532,15 +533,25 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_DEV_INFO(0x31DC, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name),
IWL_DEV_INFO(0xA370, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_name),
IWL_DEV_INFO(0xA370, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name),
+ IWL_DEV_INFO(0x54F0, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_160_name),
+ IWL_DEV_INFO(0x54F0, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_name),
IWL_DEV_INFO(0x51F0, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_160_name),
IWL_DEV_INFO(0x51F0, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_160_name),
+ IWL_DEV_INFO(0x51F0, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name),
+ IWL_DEV_INFO(0x51F0, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name),
+ IWL_DEV_INFO(0x54F0, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name),
+ IWL_DEV_INFO(0x54F0, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name),
+ IWL_DEV_INFO(0x7A70, 0x1691, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690s_name),
+ IWL_DEV_INFO(0x7A70, 0x1692, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_killer_1690i_name),
IWL_DEV_INFO(0x271C, 0x0214, iwl9260_2ac_cfg, iwl9260_1_name),
+ IWL_DEV_INFO(0x7E40, 0x1691, iwl_cfg_ma_a0_gf4_a0, iwl_ax411_killer_1690s_name),
+ IWL_DEV_INFO(0x7E40, 0x1692, iwl_cfg_ma_a0_gf4_a0, iwl_ax411_killer_1690i_name),
/* AX200 */
+ IWL_DEV_INFO(0x2723, IWL_CFG_ANY, iwl_ax200_cfg_cc, iwl_ax200_name),
IWL_DEV_INFO(0x2723, 0x1653, iwl_ax200_cfg_cc, iwl_ax200_killer_1650w_name),
IWL_DEV_INFO(0x2723, 0x1654, iwl_ax200_cfg_cc, iwl_ax200_killer_1650x_name),
- IWL_DEV_INFO(0x2723, IWL_CFG_ANY, iwl_ax200_cfg_cc, iwl_ax200_name),
/* Qu with Hr */
IWL_DEV_INFO(0x43F0, 0x0070, iwl_ax201_cfg_qu_hr, NULL),
@@ -639,6 +650,12 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_DEV_INFO(0x7AF0, 0x0510, iwlax211_2ax_cfg_so_gf_a0, NULL),
IWL_DEV_INFO(0x7AF0, 0x0A10, iwlax211_2ax_cfg_so_gf_a0, NULL),
+ /* So with JF */
+ IWL_DEV_INFO(0x7A70, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_160_name),
+ IWL_DEV_INFO(0x7A70, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_160_name),
+ IWL_DEV_INFO(0x7AF0, 0x1551, iwl9560_2ac_cfg_soc, iwl9560_killer_1550s_160_name),
+ IWL_DEV_INFO(0x7AF0, 0x1552, iwl9560_2ac_cfg_soc, iwl9560_killer_1550i_160_name),
+
/* SnJ with HR */
IWL_DEV_INFO(0x2725, 0x00B0, iwlax411_2ax_cfg_sosnj_gf4_a0, NULL),
IWL_DEV_INFO(0x2726, 0x0090, iwlax211_cfg_snj_gf_a0, NULL),
@@ -648,6 +665,12 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_DEV_INFO(0x2726, 0x0510, iwlax211_cfg_snj_gf_a0, NULL),
IWL_DEV_INFO(0x2726, 0x1651, iwl_cfg_snj_hr_b0, iwl_ax201_killer_1650s_name),
IWL_DEV_INFO(0x2726, 0x1652, iwl_cfg_snj_hr_b0, iwl_ax201_killer_1650i_name),
+ IWL_DEV_INFO(0x2726, 0x1671, iwlax211_cfg_snj_gf_a0, iwl_ax211_killer_1675s_name),
+ IWL_DEV_INFO(0x2726, 0x1672, iwlax211_cfg_snj_gf_a0, iwl_ax211_killer_1675i_name),
+ IWL_DEV_INFO(0x2726, 0x1691, iwlax411_2ax_cfg_sosnj_gf4_a0, iwl_ax411_killer_1690s_name),
+ IWL_DEV_INFO(0x2726, 0x1692, iwlax411_2ax_cfg_sosnj_gf4_a0, iwl_ax411_killer_1690i_name),
+ IWL_DEV_INFO(0x7F70, 0x1691, iwlax411_2ax_cfg_sosnj_gf4_a0, iwl_ax411_killer_1690s_name),
+ IWL_DEV_INFO(0x7F70, 0x1692, iwlax411_2ax_cfg_sosnj_gf4_a0, iwl_ax411_killer_1690i_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_PU, IWL_CFG_ANY,
@@ -703,17 +726,6 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
iwl9260_2ac_cfg, iwl9462_name),
_IWL_DEV_INFO(0x2526, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_PNJ, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
- IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwl9260_2ac_cfg, iwl9560_160_name),
- _IWL_DEV_INFO(0x2526, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_PNJ, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
- IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
- iwl9260_2ac_cfg, iwl9560_name),
-
- _IWL_DEV_INFO(0x2526, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_TH, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_TH, IWL_CFG_ANY,
IWL_CFG_160, IWL_CFG_CORES_BT_GNSS, IWL_CFG_NO_CDB,
@@ -931,9 +943,9 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_qu_b0_hr1_b0, iwl_ax101_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
+ IWL_CFG_MAC_TYPE_QU, SILICON_B_STEP,
IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_qu_b0_hr_b0, iwl_ax203_name),
/* Qu C step */
@@ -945,7 +957,7 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP,
IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY,
- IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_qu_c0_hr_b0, iwl_ax203_name),
/* QuZ */
@@ -1053,11 +1065,6 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
iwl_cfg_so_a0_hr_a0, iwl_ax203_name),
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
- IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY,
- IWL_CFG_NO_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
- iwl_cfg_so_a0_hr_a0, iwl_ax203_name),
- _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
- IWL_CFG_MAC_TYPE_SO, IWL_CFG_ANY,
IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY,
IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_cfg_so_a0_hr_a0, iwl_ax101_name),
@@ -1112,6 +1119,50 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
IWL_CFG_RF_TYPE_MR, IWL_CFG_ANY,
IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
iwl_cfg_bz_a0_mr_a0, iwl_bz_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ iwl_cfg_bz_a0_fm_a0, iwl_bz_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY,
+ IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_NO_CDB,
+ iwl_cfg_gl_a0_fm_a0, iwl_bz_name),
+
+/* SoF with JF2 */
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ iwlax210_2ax_cfg_so_jf_b0, iwl9560_160_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF2, IWL_CFG_RF_ID_JF,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ iwlax210_2ax_cfg_so_jf_b0, iwl9560_name),
+
+/* SoF with JF */
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ iwlax210_2ax_cfg_so_jf_b0, iwl9461_160_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
+ IWL_CFG_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ iwlax210_2ax_cfg_so_jf_b0, iwl9462_160_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ iwlax210_2ax_cfg_so_jf_b0, iwl9461_name),
+ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
+ IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY,
+ IWL_CFG_RF_TYPE_JF1, IWL_CFG_RF_ID_JF1_DIV,
+ IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB,
+ iwlax210_2ax_cfg_so_jf_b0, iwl9462_name),
/* SoF with JF2 */
_IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY,
@@ -1191,16 +1242,158 @@ static const struct iwl_dev_info iwl_dev_info_table[] = {
#endif /* CONFIG_IWLMVM */
};
+/*
+ * In case that there is no OTP on the NIC, get the rf id and cdb info
+ * from the prph registers.
+ */
+static int get_crf_id(struct iwl_trans *iwl_trans)
+{
+ int ret = 0;
+ u32 wfpm_ctrl_addr;
+ u32 wfpm_otp_cfg_addr;
+ u32 sd_reg_ver_addr;
+ u32 cdb = 0;
+ u32 val;
+
+ if (iwl_trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
+ wfpm_ctrl_addr = WFPM_CTRL_REG_GEN2;
+ wfpm_otp_cfg_addr = WFPM_OTP_CFG1_ADDR_GEN2;
+ sd_reg_ver_addr = SD_REG_VER_GEN2;
+ /* Qu/Pu families have other addresses */
+ } else {
+ wfpm_ctrl_addr = WFPM_CTRL_REG;
+ wfpm_otp_cfg_addr = WFPM_OTP_CFG1_ADDR;
+ sd_reg_ver_addr = SD_REG_VER;
+ }
+
+ if (!iwl_trans_grab_nic_access(iwl_trans)) {
+ IWL_ERR(iwl_trans, "Failed to grab nic access before reading crf id\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ /* Enable access to peripheral registers */
+ val = iwl_read_umac_prph_no_grab(iwl_trans, wfpm_ctrl_addr);
+ val |= ENABLE_WFPM;
+ iwl_write_umac_prph_no_grab(iwl_trans, wfpm_ctrl_addr, val);
+
+ /* Read crf info */
+ val = iwl_read_prph_no_grab(iwl_trans, sd_reg_ver_addr);
+
+ /* Read cdb info (also contains the jacket info if needed in the future */
+ cdb = iwl_read_umac_prph_no_grab(iwl_trans, wfpm_otp_cfg_addr);
+
+ /* Map between crf id to rf id */
+ switch (REG_CRF_ID_TYPE(val)) {
+ case REG_CRF_ID_TYPE_JF_1:
+ iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_JF1 << 12);
+ break;
+ case REG_CRF_ID_TYPE_JF_2:
+ iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_JF2 << 12);
+ break;
+ case REG_CRF_ID_TYPE_HR_NONE_CDB:
+ iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_HR1 << 12);
+ break;
+ case REG_CRF_ID_TYPE_HR_CDB:
+ iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_HR2 << 12);
+ break;
+ case REG_CRF_ID_TYPE_GF:
+ iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_GF << 12);
+ break;
+ case REG_CRF_ID_TYPE_MR:
+ iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_MR << 12);
+ break;
+ case REG_CRF_ID_TYPE_FM:
+ iwl_trans->hw_rf_id = (IWL_CFG_RF_TYPE_FM << 12);
+ break;
+ default:
+ ret = -EIO;
+ IWL_ERR(iwl_trans,
+ "Can find a correct rfid for crf id 0x%x\n",
+ REG_CRF_ID_TYPE(val));
+ goto out_release;
+
+ }
+
+ /* Set CDB capabilities */
+ if (cdb & BIT(4)) {
+ iwl_trans->hw_rf_id += BIT(28);
+ IWL_INFO(iwl_trans, "Adding cdb to rf id\n");
+ }
+
+ IWL_INFO(iwl_trans, "Detected RF 0x%x from crf id 0x%x\n",
+ iwl_trans->hw_rf_id, REG_CRF_ID_TYPE(val));
+
+out_release:
+ iwl_trans_release_nic_access(iwl_trans);
+
+out:
+ return ret;
+}
+
/* PCI registers */
#define PCI_CFG_RETRY_TIMEOUT 0x041
+static const struct iwl_dev_info *
+iwl_pci_find_dev_info(u16 device, u16 subsystem_device,
+ u16 mac_type, u8 mac_step,
+ u16 rf_type, u8 cdb, u8 rf_id, u8 no_160, u8 cores)
+{
+ int i;
+
+ for (i = ARRAY_SIZE(iwl_dev_info_table) - 1; i >= 0; i--) {
+ const struct iwl_dev_info *dev_info = &iwl_dev_info_table[i];
+
+ if (dev_info->device != (u16)IWL_CFG_ANY &&
+ dev_info->device != device)
+ continue;
+
+ if (dev_info->subdevice != (u16)IWL_CFG_ANY &&
+ dev_info->subdevice != subsystem_device)
+ continue;
+
+ if (dev_info->mac_type != (u16)IWL_CFG_ANY &&
+ dev_info->mac_type != mac_type)
+ continue;
+
+ if (dev_info->mac_step != (u8)IWL_CFG_ANY &&
+ dev_info->mac_step != mac_step)
+ continue;
+
+ if (dev_info->rf_type != (u16)IWL_CFG_ANY &&
+ dev_info->rf_type != rf_type)
+ continue;
+
+ if (dev_info->cdb != (u8)IWL_CFG_ANY &&
+ dev_info->cdb != cdb)
+ continue;
+
+ if (dev_info->rf_id != (u8)IWL_CFG_ANY &&
+ dev_info->rf_id != rf_id)
+ continue;
+
+ if (dev_info->no_160 != (u8)IWL_CFG_ANY &&
+ dev_info->no_160 != no_160)
+ continue;
+
+ if (dev_info->cores != (u8)IWL_CFG_ANY &&
+ dev_info->cores != cores)
+ continue;
+
+ return dev_info;
+ }
+
+ return NULL;
+}
+
static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
const struct iwl_cfg_trans_params *trans;
const struct iwl_cfg *cfg_7265d __maybe_unused = NULL;
+ const struct iwl_dev_info *dev_info;
struct iwl_trans *iwl_trans;
struct iwl_trans_pcie *trans_pcie;
- int i, ret;
+ int ret;
const struct iwl_cfg *cfg;
trans = (void *)(ent->driver_data & ~TRANS_CFG_MARKER);
@@ -1222,37 +1415,48 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
trans_pcie = IWL_TRANS_GET_PCIE_TRANS(iwl_trans);
+ /*
+ * Let's try to grab NIC access early here. Sometimes, NICs may
+ * fail to initialize, and if that happens it's better if we see
+ * issues early on (and can reprobe, per the logic inside), than
+ * first trying to load the firmware etc. and potentially only
+ * detecting any problems when the first interface is brought up.
+ */
+ ret = iwl_finish_nic_init(iwl_trans);
+ if (ret)
+ goto out_free_trans;
+ if (iwl_trans_grab_nic_access(iwl_trans)) {
+ /* all good */
+ iwl_trans_release_nic_access(iwl_trans);
+ } else {
+ ret = -EIO;
+ goto out_free_trans;
+ }
+
iwl_trans->hw_rf_id = iwl_read32(iwl_trans, CSR_HW_RF_ID);
- for (i = 0; i < ARRAY_SIZE(iwl_dev_info_table); i++) {
- const struct iwl_dev_info *dev_info = &iwl_dev_info_table[i];
- if ((dev_info->device == (u16)IWL_CFG_ANY ||
- dev_info->device == pdev->device) &&
- (dev_info->subdevice == (u16)IWL_CFG_ANY ||
- dev_info->subdevice == pdev->subsystem_device) &&
- (dev_info->mac_type == (u16)IWL_CFG_ANY ||
- dev_info->mac_type ==
- CSR_HW_REV_TYPE(iwl_trans->hw_rev)) &&
- (dev_info->mac_step == (u8)IWL_CFG_ANY ||
- dev_info->mac_step ==
- CSR_HW_REV_STEP(iwl_trans->hw_rev)) &&
- (dev_info->rf_type == (u16)IWL_CFG_ANY ||
- dev_info->rf_type ==
- CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id)) &&
- (dev_info->cdb == IWL_CFG_NO_CDB ||
- CSR_HW_RFID_IS_CDB(iwl_trans->hw_rf_id)) &&
- (dev_info->rf_id == (u8)IWL_CFG_ANY ||
- dev_info->rf_id ==
- IWL_SUBDEVICE_RF_ID(pdev->subsystem_device)) &&
- (dev_info->no_160 == (u8)IWL_CFG_ANY ||
- dev_info->no_160 ==
- IWL_SUBDEVICE_NO_160(pdev->subsystem_device)) &&
- (dev_info->cores == (u8)IWL_CFG_ANY ||
- dev_info->cores ==
- IWL_SUBDEVICE_CORES(pdev->subsystem_device))) {
- iwl_trans->cfg = dev_info->cfg;
- iwl_trans->name = dev_info->name;
- }
+ /*
+ * The RF_ID is set to zero in blank OTP so read version to
+ * extract the RF_ID.
+ * This is relevant only for family 9000 and up.
+ */
+ if (iwl_trans->trans_cfg->rf_id &&
+ iwl_trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_9000 &&
+ !CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id) && get_crf_id(iwl_trans))
+ goto out_free_trans;
+
+ dev_info = iwl_pci_find_dev_info(pdev->device, pdev->subsystem_device,
+ CSR_HW_REV_TYPE(iwl_trans->hw_rev),
+ CSR_HW_REV_STEP(iwl_trans->hw_rev),
+ CSR_HW_RFID_TYPE(iwl_trans->hw_rf_id),
+ CSR_HW_RFID_IS_CDB(iwl_trans->hw_rf_id),
+ IWL_SUBDEVICE_RF_ID(pdev->subsystem_device),
+ IWL_SUBDEVICE_NO_160(pdev->subsystem_device),
+ IWL_SUBDEVICE_CORES(pdev->subsystem_device));
+
+ if (dev_info) {
+ iwl_trans->cfg = dev_info->cfg;
+ iwl_trans->name = dev_info->name;
}
#if IS_ENABLED(CONFIG_IWLMVM)
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index 8e45eb38304b..14602d6d6699 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -2149,6 +2149,7 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
u32 inta_fh_msk = ~MSIX_FH_INT_CAUSES_DATA_QUEUE;
u32 inta_fh, inta_hw;
bool polling = false;
+ bool sw_err;
if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX)
inta_fh_msk |= MSIX_FH_INT_CAUSES_Q0;
@@ -2221,9 +2222,13 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
wake_up(&trans_pcie->ucode_write_waitq);
}
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ sw_err = inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ;
+ else
+ sw_err = inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR;
+
/* Error detected by uCode */
- if ((inta_fh & MSIX_FH_INT_CAUSES_FH_ERR) ||
- (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR)) {
+ if ((inta_fh & MSIX_FH_INT_CAUSES_FH_ERR) || sw_err) {
IWL_ERR(trans,
"Microcode SW error detected. Restarting 0x%X.\n",
inta_fh);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
index bf0c32a74ca4..645cb4dd4e5a 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
@@ -47,7 +47,7 @@ int iwl_pcie_gen2_apm_init(struct iwl_trans *trans)
iwl_pcie_apm_config(trans);
- ret = iwl_finish_nic_init(trans, trans->trans_cfg);
+ ret = iwl_finish_nic_init(trans);
if (ret)
return ret;
@@ -131,21 +131,9 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
if (trans_pcie->is_down)
return;
- if (trans->state >= IWL_TRANS_FW_STARTED) {
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
- iwl_set_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_REQ);
- iwl_poll_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_STATUS,
- CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_STATUS,
- 5000);
- msleep(100);
- iwl_set_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_SW_RESET);
- } else if (trans_pcie->fw_reset_handshake) {
+ if (trans->state >= IWL_TRANS_FW_STARTED)
+ if (trans_pcie->fw_reset_handshake)
iwl_trans_pcie_fw_reset_handshake(trans);
- }
- }
trans_pcie->is_down = true;
@@ -175,18 +163,6 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
else
iwl_pcie_ctxt_info_free(trans);
- /* Make sure (redundant) we've released our request to stay awake */
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
- iwl_clear_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ);
- else
- iwl_clear_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
-
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
- iwl_set_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_SW_RESET);
- }
/* Stop the device, and put it in low power state */
iwl_pcie_gen2_apm_stop(trans, false);
@@ -466,13 +442,15 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
iwl_pcie_set_ltr(trans);
- if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
+ iwl_write32(trans, CSR_FUNC_SCRATCH, CSR_FUNC_SCRATCH_INIT_VALUE);
iwl_set_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_ROM_START);
- else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
+ } else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
iwl_write_umac_prph(trans, UREG_CPU_INIT_RUN, 1);
- else
+ } else {
iwl_write_prph(trans, UREG_CPU_INIT_RUN, 1);
+ }
/* re-check RF-Kill state since we may have missed the interrupt */
hw_rfkill = iwl_pcie_check_hw_rf_kill(trans);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index f252680f18e8..1efb53f78a62 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -129,7 +129,12 @@ out:
static void iwl_trans_pcie_sw_reset(struct iwl_trans *trans)
{
/* Reset entire device - do controller reset (results in SHRD_HW_RST) */
- iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ iwl_set_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_SW_RESET);
+ else
+ iwl_set_bit(trans, CSR_RESET,
+ CSR_RESET_REG_FLAG_SW_RESET);
usleep_range(5000, 6000);
}
@@ -306,7 +311,7 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans)
if (trans->trans_cfg->base_params->pll_cfg)
iwl_set_bit(trans, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL);
- ret = iwl_finish_nic_init(trans, trans->trans_cfg);
+ ret = iwl_finish_nic_init(trans);
if (ret)
return ret;
@@ -378,7 +383,7 @@ static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
iwl_trans_pcie_sw_reset(trans);
- ret = iwl_finish_nic_init(trans, trans->trans_cfg);
+ ret = iwl_finish_nic_init(trans);
if (WARN_ON(ret)) {
/* Release XTAL ON request */
__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
@@ -458,6 +463,7 @@ void iwl_pcie_apm_stop_master(struct iwl_trans *trans)
CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_STATUS,
CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_STATUS,
100);
+ msleep(100);
} else {
iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
@@ -1056,7 +1062,7 @@ struct iwl_causes_list {
u8 addr;
};
-static struct iwl_causes_list causes_list[] = {
+static const struct iwl_causes_list causes_list_common[] = {
{MSIX_FH_INT_CAUSES_D2S_CH0_NUM, CSR_MSIX_FH_INT_MASK_AD, 0},
{MSIX_FH_INT_CAUSES_D2S_CH1_NUM, CSR_MSIX_FH_INT_MASK_AD, 0x1},
{MSIX_FH_INT_CAUSES_S2D, CSR_MSIX_FH_INT_MASK_AD, 0x3},
@@ -1067,30 +1073,50 @@ static struct iwl_causes_list causes_list[] = {
{MSIX_HW_INT_CAUSES_REG_CT_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x16},
{MSIX_HW_INT_CAUSES_REG_RF_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x17},
{MSIX_HW_INT_CAUSES_REG_PERIODIC, CSR_MSIX_HW_INT_MASK_AD, 0x18},
- {MSIX_HW_INT_CAUSES_REG_SW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x29},
{MSIX_HW_INT_CAUSES_REG_SCD, CSR_MSIX_HW_INT_MASK_AD, 0x2A},
{MSIX_HW_INT_CAUSES_REG_FH_TX, CSR_MSIX_HW_INT_MASK_AD, 0x2B},
{MSIX_HW_INT_CAUSES_REG_HW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x2D},
{MSIX_HW_INT_CAUSES_REG_HAP, CSR_MSIX_HW_INT_MASK_AD, 0x2E},
};
+static const struct iwl_causes_list causes_list_pre_bz[] = {
+ {MSIX_HW_INT_CAUSES_REG_SW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x29},
+};
+
+static const struct iwl_causes_list causes_list_bz[] = {
+ {MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ, CSR_MSIX_HW_INT_MASK_AD, 0x29},
+};
+
+static void iwl_pcie_map_list(struct iwl_trans *trans,
+ const struct iwl_causes_list *causes,
+ int arr_size, int val)
+{
+ int i;
+
+ for (i = 0; i < arr_size; i++) {
+ iwl_write8(trans, CSR_MSIX_IVAR(causes[i].addr), val);
+ iwl_clear_bit(trans, causes[i].mask_reg,
+ causes[i].cause_num);
+ }
+}
+
static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int val = trans_pcie->def_irq | MSIX_NON_AUTO_CLEAR_CAUSE;
- int i, arr_size = ARRAY_SIZE(causes_list);
- struct iwl_causes_list *causes = causes_list;
-
/*
* Access all non RX causes and map them to the default irq.
* In case we are missing at least one interrupt vector,
* the first interrupt vector will serve non-RX and FBQ causes.
*/
- for (i = 0; i < arr_size; i++) {
- iwl_write8(trans, CSR_MSIX_IVAR(causes[i].addr), val);
- iwl_clear_bit(trans, causes[i].mask_reg,
- causes[i].cause_num);
- }
+ iwl_pcie_map_list(trans, causes_list_common,
+ ARRAY_SIZE(causes_list_common), val);
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ iwl_pcie_map_list(trans, causes_list_bz,
+ ARRAY_SIZE(causes_list_bz), val);
+ else
+ iwl_pcie_map_list(trans, causes_list_pre_bz,
+ ARRAY_SIZE(causes_list_pre_bz), val);
}
static void iwl_pcie_map_rx_causes(struct iwl_trans *trans)
@@ -1208,8 +1234,12 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans)
}
/* Make sure (redundant) we've released our request to stay awake */
- iwl_clear_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ iwl_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ);
+ else
+ iwl_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
/* Stop the device, and put it in low power state */
iwl_pcie_apm_stop(trans, false);
@@ -1501,7 +1531,7 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
iwl_set_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
- ret = iwl_finish_nic_init(trans, trans->trans_cfg);
+ ret = iwl_finish_nic_init(trans);
if (ret)
return ret;
@@ -1734,7 +1764,7 @@ static int iwl_pcie_gen2_force_power_gating(struct iwl_trans *trans)
{
int ret;
- ret = iwl_finish_nic_init(trans, trans->trans_cfg);
+ ret = iwl_finish_nic_init(trans);
if (ret < 0)
return ret;
@@ -2140,9 +2170,12 @@ static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans)
if (trans_pcie->cmd_hold_nic_awake)
goto out;
-
- __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ);
+ else
+ __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
/*
* Above we read the CSR_GP_CNTRL register, which will flush
* any previous writes, but we need the write that clears the
@@ -3203,9 +3236,11 @@ static int iwl_trans_get_fw_monitor_len(struct iwl_trans *trans, u32 *len)
return 0;
}
-static struct iwl_trans_dump_data
-*iwl_trans_pcie_dump_data(struct iwl_trans *trans,
- u32 dump_mask)
+static struct iwl_trans_dump_data *
+iwl_trans_pcie_dump_data(struct iwl_trans *trans,
+ u32 dump_mask,
+ const struct iwl_dump_sanitize_ops *sanitize_ops,
+ void *sanitize_ctx)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_fw_error_dump_data *data;
@@ -3305,6 +3340,10 @@ static struct iwl_trans_dump_data
txcmd->caplen = cpu_to_le32(caplen);
memcpy(txcmd->data, cmdq->entries[idx].cmd,
caplen);
+ if (sanitize_ops && sanitize_ops->frob_hcmd)
+ sanitize_ops->frob_hcmd(sanitize_ctx,
+ txcmd->data,
+ caplen);
txcmd = (void *)((u8 *)txcmd->data + caplen);
}
@@ -3365,7 +3404,10 @@ static void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans)
if (trans_pcie->msix_enabled) {
inta_addr = CSR_MSIX_HW_INT_CAUSES_AD;
- sw_err_bit = MSIX_HW_INT_CAUSES_REG_SW_ERR;
+ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
+ sw_err_bit = MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ;
+ else
+ sw_err_bit = MSIX_HW_INT_CAUSES_REG_SW_ERR;
} else {
inta_addr = CSR_INT;
sw_err_bit = CSR_INT_BIT_SW_ERR;
diff --git a/drivers/net/wireless/intersil/hostap/hostap_hw.c b/drivers/net/wireless/intersil/hostap/hostap_hw.c
index 9a19046217df..e459e7192ae9 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_hw.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_hw.c
@@ -1403,14 +1403,17 @@ static int prism2_hw_init2(struct net_device *dev, int initial)
hfa384x_events_only_cmd(dev);
if (initial) {
+ u8 addr[ETH_ALEN] = {};
struct list_head *ptr;
+
prism2_check_sta_fw_version(local);
if (hfa384x_get_rid(dev, HFA384X_RID_CNFOWNMACADDR,
- dev->dev_addr, 6, 1) < 0) {
+ addr, ETH_ALEN, 1) < 0) {
printk("%s: could not get own MAC address\n",
dev->name);
}
+ eth_hw_addr_set(dev, addr);
list_for_each(ptr, &local->hostap_interfaces) {
iface = list_entry(ptr, struct hostap_interface, list);
eth_hw_addr_inherit(iface->dev, dev);
diff --git a/drivers/net/wireless/intersil/hostap/hostap_main.c b/drivers/net/wireless/intersil/hostap/hostap_main.c
index 54f67b682b6a..787f685e70b4 100644
--- a/drivers/net/wireless/intersil/hostap/hostap_main.c
+++ b/drivers/net/wireless/intersil/hostap/hostap_main.c
@@ -713,9 +713,9 @@ static int prism2_set_mac_address(struct net_device *dev, void *p)
read_lock_bh(&local->iface_lock);
list_for_each(ptr, &local->hostap_interfaces) {
iface = list_entry(ptr, struct hostap_interface, list);
- memcpy(iface->dev->dev_addr, addr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(iface->dev, addr->sa_data);
}
- memcpy(local->dev->dev_addr, addr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(local->dev, addr->sa_data);
read_unlock_bh(&local->iface_lock);
return 0;
diff --git a/drivers/net/wireless/intersil/orinoco/main.c b/drivers/net/wireless/intersil/orinoco/main.c
index 0e73a10cc06c..7df88d20ff3d 100644
--- a/drivers/net/wireless/intersil/orinoco/main.c
+++ b/drivers/net/wireless/intersil/orinoco/main.c
@@ -2265,7 +2265,7 @@ int orinoco_if_add(struct orinoco_private *priv,
netif_carrier_off(dev);
- memcpy(dev->dev_addr, wiphy->perm_addr, ETH_ALEN);
+ eth_hw_addr_set(dev, wiphy->perm_addr);
dev->base_addr = base_addr;
dev->irq = irq;
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 0adae76eb8df..23219f3747f8 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -2802,7 +2802,6 @@ out_err:
static const struct ieee80211_sband_iftype_data he_capa_2ghz[] = {
{
- /* TODO: should we support other types, e.g., P2P?*/
.types_mask = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP),
.he_cap = {
@@ -2850,7 +2849,6 @@ static const struct ieee80211_sband_iftype_data he_capa_2ghz[] = {
},
#ifdef CONFIG_MAC80211_MESH
{
- /* TODO: should we support other types, e.g., IBSS?*/
.types_mask = BIT(NL80211_IFTYPE_MESH_POINT),
.he_cap = {
.has_he = true,
@@ -2988,6 +2986,122 @@ static const struct ieee80211_sband_iftype_data he_capa_5ghz[] = {
#endif
};
+static const struct ieee80211_sband_iftype_data he_capa_6ghz[] = {
+ {
+ /* TODO: should we support other types, e.g., P2P?*/
+ .types_mask = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_AP),
+ .he_6ghz_capa = {
+ .capa = cpu_to_le16(IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START |
+ IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP |
+ IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN |
+ IEEE80211_HE_6GHZ_CAP_SM_PS |
+ IEEE80211_HE_6GHZ_CAP_RD_RESPONDER |
+ IEEE80211_HE_6GHZ_CAP_TX_ANTPAT_CONS |
+ IEEE80211_HE_6GHZ_CAP_RX_ANTPAT_CONS),
+ },
+ .he_cap = {
+ .has_he = true,
+ .he_cap_elem = {
+ .mac_cap_info[0] =
+ IEEE80211_HE_MAC_CAP0_HTC_HE,
+ .mac_cap_info[1] =
+ IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US |
+ IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8,
+ .mac_cap_info[2] =
+ IEEE80211_HE_MAC_CAP2_BSR |
+ IEEE80211_HE_MAC_CAP2_MU_CASCADING |
+ IEEE80211_HE_MAC_CAP2_ACK_EN,
+ .mac_cap_info[3] =
+ IEEE80211_HE_MAC_CAP3_OMI_CONTROL |
+ IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_3,
+ .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU,
+ .phy_cap_info[0] =
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G,
+ .phy_cap_info[1] =
+ IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK |
+ IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A |
+ IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD |
+ IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS,
+ .phy_cap_info[2] =
+ IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US |
+ IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ |
+ IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ |
+ IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO |
+ IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO,
+
+ /* Leave all the other PHY capability bytes
+ * unset, as DCM, beam forming, RU and PPE
+ * threshold information are not supported
+ */
+ },
+ .he_mcs_nss_supp = {
+ .rx_mcs_80 = cpu_to_le16(0xfffa),
+ .tx_mcs_80 = cpu_to_le16(0xfffa),
+ .rx_mcs_160 = cpu_to_le16(0xfffa),
+ .tx_mcs_160 = cpu_to_le16(0xfffa),
+ .rx_mcs_80p80 = cpu_to_le16(0xfffa),
+ .tx_mcs_80p80 = cpu_to_le16(0xfffa),
+ },
+ },
+ },
+#ifdef CONFIG_MAC80211_MESH
+ {
+ /* TODO: should we support other types, e.g., IBSS?*/
+ .types_mask = BIT(NL80211_IFTYPE_MESH_POINT),
+ .he_6ghz_capa = {
+ .capa = cpu_to_le16(IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START |
+ IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP |
+ IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN |
+ IEEE80211_HE_6GHZ_CAP_SM_PS |
+ IEEE80211_HE_6GHZ_CAP_RD_RESPONDER |
+ IEEE80211_HE_6GHZ_CAP_TX_ANTPAT_CONS |
+ IEEE80211_HE_6GHZ_CAP_RX_ANTPAT_CONS),
+ },
+ .he_cap = {
+ .has_he = true,
+ .he_cap_elem = {
+ .mac_cap_info[0] =
+ IEEE80211_HE_MAC_CAP0_HTC_HE,
+ .mac_cap_info[1] =
+ IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8,
+ .mac_cap_info[2] =
+ IEEE80211_HE_MAC_CAP2_ACK_EN,
+ .mac_cap_info[3] =
+ IEEE80211_HE_MAC_CAP3_OMI_CONTROL |
+ IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_3,
+ .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU,
+ .phy_cap_info[0] =
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G,
+ .phy_cap_info[1] =
+ IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK |
+ IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A |
+ IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD |
+ IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS,
+ .phy_cap_info[2] = 0,
+
+ /* Leave all the other PHY capability bytes
+ * unset, as DCM, beam forming, RU and PPE
+ * threshold information are not supported
+ */
+ },
+ .he_mcs_nss_supp = {
+ .rx_mcs_80 = cpu_to_le16(0xfffa),
+ .tx_mcs_80 = cpu_to_le16(0xfffa),
+ .rx_mcs_160 = cpu_to_le16(0xfffa),
+ .tx_mcs_160 = cpu_to_le16(0xfffa),
+ .rx_mcs_80p80 = cpu_to_le16(0xfffa),
+ .tx_mcs_80p80 = cpu_to_le16(0xfffa),
+ },
+ },
+ },
+#endif
+};
+
static void mac80211_hwsim_he_capab(struct ieee80211_supported_band *sband)
{
u16 n_iftype_data;
@@ -3000,6 +3114,10 @@ static void mac80211_hwsim_he_capab(struct ieee80211_supported_band *sband)
n_iftype_data = ARRAY_SIZE(he_capa_5ghz);
sband->iftype_data =
(struct ieee80211_sband_iftype_data *)he_capa_5ghz;
+ } else if (sband->band == NL80211_BAND_6GHZ) {
+ n_iftype_data = ARRAY_SIZE(he_capa_6ghz);
+ sband->iftype_data =
+ (struct ieee80211_sband_iftype_data *)he_capa_6ghz;
} else {
return;
}
@@ -3290,6 +3408,12 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
sband->vht_cap.vht_mcs.tx_mcs_map =
sband->vht_cap.vht_mcs.rx_mcs_map;
break;
+ case NL80211_BAND_6GHZ:
+ sband->channels = data->channels_6ghz;
+ sband->n_channels = ARRAY_SIZE(hwsim_channels_6ghz);
+ sband->bitrates = data->rates + 4;
+ sband->n_bitrates = ARRAY_SIZE(hwsim_rates) - 4;
+ break;
case NL80211_BAND_S1GHZ:
memcpy(&sband->s1g_cap, &hwsim_s1g_cap,
sizeof(sband->s1g_cap));
@@ -3300,19 +3424,21 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
continue;
}
- sband->ht_cap.ht_supported = true;
- sband->ht_cap.cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
- IEEE80211_HT_CAP_GRN_FLD |
- IEEE80211_HT_CAP_SGI_20 |
- IEEE80211_HT_CAP_SGI_40 |
- IEEE80211_HT_CAP_DSSSCCK40;
- sband->ht_cap.ampdu_factor = 0x3;
- sband->ht_cap.ampdu_density = 0x6;
- memset(&sband->ht_cap.mcs, 0,
- sizeof(sband->ht_cap.mcs));
- sband->ht_cap.mcs.rx_mask[0] = 0xff;
- sband->ht_cap.mcs.rx_mask[1] = 0xff;
- sband->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+ if (band != NL80211_BAND_6GHZ){
+ sband->ht_cap.ht_supported = true;
+ sband->ht_cap.cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
+ IEEE80211_HT_CAP_GRN_FLD |
+ IEEE80211_HT_CAP_SGI_20 |
+ IEEE80211_HT_CAP_SGI_40 |
+ IEEE80211_HT_CAP_DSSSCCK40;
+ sband->ht_cap.ampdu_factor = 0x3;
+ sband->ht_cap.ampdu_density = 0x6;
+ memset(&sband->ht_cap.mcs, 0,
+ sizeof(sband->ht_cap.mcs));
+ sband->ht_cap.mcs.rx_mask[0] = 0xff;
+ sband->ht_cap.mcs.rx_mask[1] = 0xff;
+ sband->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+ }
mac80211_hwsim_he_capab(sband);
@@ -3527,13 +3653,16 @@ static const struct net_device_ops hwsim_netdev_ops = {
static void hwsim_mon_setup(struct net_device *dev)
{
+ u8 addr[ETH_ALEN];
+
dev->netdev_ops = &hwsim_netdev_ops;
dev->needs_free_netdev = true;
ether_setup(dev);
dev->priv_flags |= IFF_NO_QUEUE;
dev->type = ARPHRD_IEEE80211_RADIOTAP;
- eth_zero_addr(dev->dev_addr);
- dev->dev_addr[0] = 0x12;
+ eth_zero_addr(addr);
+ addr[0] = 0x12;
+ eth_hw_addr_set(dev, addr);
}
static struct mac80211_hwsim_data *get_hwsim_data_ref_from_addr(const u8 *addr)
diff --git a/drivers/net/wireless/marvell/libertas/cmd.c b/drivers/net/wireless/marvell/libertas/cmd.c
index a4d9dd73b258..104d2b6dc9af 100644
--- a/drivers/net/wireless/marvell/libertas/cmd.c
+++ b/drivers/net/wireless/marvell/libertas/cmd.c
@@ -150,10 +150,9 @@ int lbs_update_hw_spec(struct lbs_private *priv)
memmove(priv->current_addr, cmd.permanentaddr, ETH_ALEN);
if (!priv->copied_hwaddr) {
- memcpy(priv->dev->dev_addr, priv->current_addr, ETH_ALEN);
+ eth_hw_addr_set(priv->dev, priv->current_addr);
if (priv->mesh_dev)
- memcpy(priv->mesh_dev->dev_addr,
- priv->current_addr, ETH_ALEN);
+ eth_hw_addr_set(priv->mesh_dev, priv->current_addr);
priv->copied_hwaddr = 1;
}
diff --git a/drivers/net/wireless/marvell/libertas/if_usb.c b/drivers/net/wireless/marvell/libertas/if_usb.c
index 20436a289d5c..5d6dc1dd050d 100644
--- a/drivers/net/wireless/marvell/libertas/if_usb.c
+++ b/drivers/net/wireless/marvell/libertas/if_usb.c
@@ -292,6 +292,7 @@ err_add_card:
if_usb_reset_device(cardp);
dealloc:
if_usb_free(cardp);
+ kfree(cardp);
error:
return r;
@@ -316,6 +317,7 @@ static void if_usb_disconnect(struct usb_interface *intf)
/* Unlink and free urb */
if_usb_free(cardp);
+ kfree(cardp);
usb_set_intfdata(intf, NULL);
usb_put_dev(interface_to_usbdev(intf));
diff --git a/drivers/net/wireless/marvell/libertas/main.c b/drivers/net/wireless/marvell/libertas/main.c
index 64fc5e410864..5c9f295536ea 100644
--- a/drivers/net/wireless/marvell/libertas/main.c
+++ b/drivers/net/wireless/marvell/libertas/main.c
@@ -302,9 +302,9 @@ int lbs_set_mac_address(struct net_device *dev, void *addr)
dev = priv->dev;
memcpy(priv->current_addr, phwaddr->sa_data, ETH_ALEN);
- memcpy(dev->dev_addr, phwaddr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(dev, phwaddr->sa_data);
if (priv->mesh_dev)
- memcpy(priv->mesh_dev->dev_addr, phwaddr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(priv->mesh_dev, phwaddr->sa_data);
return ret;
}
diff --git a/drivers/net/wireless/marvell/libertas/mesh.c b/drivers/net/wireless/marvell/libertas/mesh.c
index 6cbba84989b8..a58c1e141f2c 100644
--- a/drivers/net/wireless/marvell/libertas/mesh.c
+++ b/drivers/net/wireless/marvell/libertas/mesh.c
@@ -169,7 +169,7 @@ static ssize_t anycast_mask_show(struct device *dev,
if (ret)
return ret;
- return snprintf(buf, 12, "0x%X\n", le32_to_cpu(mesh_access.data[0]));
+ return sysfs_emit(buf, "0x%X\n", le32_to_cpu(mesh_access.data[0]));
}
/**
@@ -222,7 +222,7 @@ static ssize_t prb_rsp_limit_show(struct device *dev,
return ret;
retry_limit = le32_to_cpu(mesh_access.data[1]);
- return snprintf(buf, 10, "%d\n", retry_limit);
+ return sysfs_emit(buf, "%d\n", retry_limit);
}
/**
@@ -270,7 +270,7 @@ static ssize_t lbs_mesh_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct lbs_private *priv = to_net_dev(dev)->ml_priv;
- return snprintf(buf, 5, "0x%X\n", !!priv->mesh_dev);
+ return sysfs_emit(buf, "0x%X\n", !!priv->mesh_dev);
}
/**
@@ -369,7 +369,7 @@ static ssize_t bootflag_show(struct device *dev,
if (ret)
return ret;
- return snprintf(buf, 12, "%d\n", le32_to_cpu(defs.bootflag));
+ return sysfs_emit(buf, "%d\n", le32_to_cpu(defs.bootflag));
}
/**
@@ -419,7 +419,7 @@ static ssize_t boottime_show(struct device *dev,
if (ret)
return ret;
- return snprintf(buf, 12, "%d\n", defs.boottime);
+ return sysfs_emit(buf, "%d\n", defs.boottime);
}
/**
@@ -479,7 +479,7 @@ static ssize_t channel_show(struct device *dev,
if (ret)
return ret;
- return snprintf(buf, 12, "%d\n", le16_to_cpu(defs.channel));
+ return sysfs_emit(buf, "%d\n", le16_to_cpu(defs.channel));
}
/**
@@ -605,7 +605,7 @@ static ssize_t protocol_id_show(struct device *dev,
if (ret)
return ret;
- return snprintf(buf, 5, "%d\n", defs.meshie.val.active_protocol_id);
+ return sysfs_emit(buf, "%d\n", defs.meshie.val.active_protocol_id);
}
/**
@@ -667,7 +667,7 @@ static ssize_t metric_id_show(struct device *dev,
if (ret)
return ret;
- return snprintf(buf, 5, "%d\n", defs.meshie.val.active_metric_id);
+ return sysfs_emit(buf, "%d\n", defs.meshie.val.active_metric_id);
}
/**
@@ -729,7 +729,7 @@ static ssize_t capability_show(struct device *dev,
if (ret)
return ret;
- return snprintf(buf, 5, "%d\n", defs.meshie.val.mesh_capability);
+ return sysfs_emit(buf, "%d\n", defs.meshie.val.mesh_capability);
}
/**
diff --git a/drivers/net/wireless/marvell/libertas_tf/if_usb.c b/drivers/net/wireless/marvell/libertas_tf/if_usb.c
index fe0a69e804d8..75b5319d033f 100644
--- a/drivers/net/wireless/marvell/libertas_tf/if_usb.c
+++ b/drivers/net/wireless/marvell/libertas_tf/if_usb.c
@@ -230,6 +230,7 @@ static int if_usb_probe(struct usb_interface *intf,
dealloc:
if_usb_free(cardp);
+ kfree(cardp);
error:
lbtf_deb_leave(LBTF_DEB_MAIN);
return -ENOMEM;
@@ -254,6 +255,7 @@ static void if_usb_disconnect(struct usb_interface *intf)
/* Unlink and free urb */
if_usb_free(cardp);
+ kfree(cardp);
usb_set_intfdata(intf, NULL);
usb_put_dev(interface_to_usbdev(intf));
diff --git a/drivers/net/wireless/marvell/mwifiex/11n.c b/drivers/net/wireless/marvell/mwifiex/11n.c
index 6696bce56178..9ff2058bcd7e 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n.c
@@ -125,7 +125,7 @@ int mwifiex_ret_11n_delba(struct mwifiex_private *priv,
tx_ba_tbl->ra);
} else { /*
* In case of failure, recreate the deleted stream in case
- * we initiated the ADDBA
+ * we initiated the DELBA
*/
if (!INITIATOR_BIT(del_ba_param_set))
return 0;
@@ -657,14 +657,15 @@ int mwifiex_send_delba(struct mwifiex_private *priv, int tid, u8 *peer_mac,
uint16_t del_ba_param_set;
memset(&delba, 0, sizeof(delba));
- delba.del_ba_param_set = cpu_to_le16(tid << DELBA_TID_POS);
- del_ba_param_set = le16_to_cpu(delba.del_ba_param_set);
+ del_ba_param_set = tid << DELBA_TID_POS;
+
if (initiator)
del_ba_param_set |= IEEE80211_DELBA_PARAM_INITIATOR_MASK;
else
del_ba_param_set &= ~IEEE80211_DELBA_PARAM_INITIATOR_MASK;
+ delba.del_ba_param_set = cpu_to_le16(del_ba_param_set);
memcpy(&delba.peer_mac_addr, peer_mac, ETH_ALEN);
/* We don't wait for the response of this command */
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index 0961f4a5e415..6f23ec34e2e2 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -519,8 +519,14 @@ mwifiex_cfg80211_set_default_mgmt_key(struct wiphy *wiphy,
encrypt_key.is_igtk_def_key = true;
eth_broadcast_addr(encrypt_key.mac_addr);
- return mwifiex_send_cmd(priv, HostCmd_CMD_802_11_KEY_MATERIAL,
- HostCmd_ACT_GEN_SET, true, &encrypt_key, true);
+ if (mwifiex_send_cmd(priv, HostCmd_CMD_802_11_KEY_MATERIAL,
+ HostCmd_ACT_GEN_SET, true, &encrypt_key, true)) {
+ mwifiex_dbg(priv->adapter, ERROR,
+ "Sending KEY_MATERIAL command failed\n");
+ return -1;
+ }
+
+ return 0;
}
/*
@@ -908,16 +914,20 @@ mwifiex_init_new_priv_params(struct mwifiex_private *priv,
switch (type) {
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_ADHOC:
- priv->bss_role = MWIFIEX_BSS_ROLE_STA;
+ priv->bss_role = MWIFIEX_BSS_ROLE_STA;
+ priv->bss_type = MWIFIEX_BSS_TYPE_STA;
break;
case NL80211_IFTYPE_P2P_CLIENT:
- priv->bss_role = MWIFIEX_BSS_ROLE_STA;
+ priv->bss_role = MWIFIEX_BSS_ROLE_STA;
+ priv->bss_type = MWIFIEX_BSS_TYPE_P2P;
break;
case NL80211_IFTYPE_P2P_GO:
- priv->bss_role = MWIFIEX_BSS_ROLE_UAP;
+ priv->bss_role = MWIFIEX_BSS_ROLE_UAP;
+ priv->bss_type = MWIFIEX_BSS_TYPE_P2P;
break;
case NL80211_IFTYPE_AP:
priv->bss_role = MWIFIEX_BSS_ROLE_UAP;
+ priv->bss_type = MWIFIEX_BSS_TYPE_UAP;
break;
default:
mwifiex_dbg(adapter, ERROR,
@@ -939,6 +949,117 @@ mwifiex_init_new_priv_params(struct mwifiex_private *priv,
return 0;
}
+static bool
+is_vif_type_change_allowed(struct mwifiex_adapter *adapter,
+ enum nl80211_iftype old_iftype,
+ enum nl80211_iftype new_iftype)
+{
+ switch (old_iftype) {
+ case NL80211_IFTYPE_ADHOC:
+ switch (new_iftype) {
+ case NL80211_IFTYPE_STATION:
+ return true;
+ case NL80211_IFTYPE_P2P_CLIENT:
+ case NL80211_IFTYPE_P2P_GO:
+ return adapter->curr_iface_comb.p2p_intf !=
+ adapter->iface_limit.p2p_intf;
+ case NL80211_IFTYPE_AP:
+ return adapter->curr_iface_comb.uap_intf !=
+ adapter->iface_limit.uap_intf;
+ default:
+ return false;
+ }
+
+ case NL80211_IFTYPE_STATION:
+ switch (new_iftype) {
+ case NL80211_IFTYPE_ADHOC:
+ return true;
+ case NL80211_IFTYPE_P2P_CLIENT:
+ case NL80211_IFTYPE_P2P_GO:
+ return adapter->curr_iface_comb.p2p_intf !=
+ adapter->iface_limit.p2p_intf;
+ case NL80211_IFTYPE_AP:
+ return adapter->curr_iface_comb.uap_intf !=
+ adapter->iface_limit.uap_intf;
+ default:
+ return false;
+ }
+
+ case NL80211_IFTYPE_AP:
+ switch (new_iftype) {
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_STATION:
+ return adapter->curr_iface_comb.sta_intf !=
+ adapter->iface_limit.sta_intf;
+ case NL80211_IFTYPE_P2P_CLIENT:
+ case NL80211_IFTYPE_P2P_GO:
+ return adapter->curr_iface_comb.p2p_intf !=
+ adapter->iface_limit.p2p_intf;
+ default:
+ return false;
+ }
+
+ case NL80211_IFTYPE_P2P_CLIENT:
+ switch (new_iftype) {
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_STATION:
+ return true;
+ case NL80211_IFTYPE_P2P_GO:
+ return true;
+ case NL80211_IFTYPE_AP:
+ return adapter->curr_iface_comb.uap_intf !=
+ adapter->iface_limit.uap_intf;
+ default:
+ return false;
+ }
+
+ case NL80211_IFTYPE_P2P_GO:
+ switch (new_iftype) {
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_STATION:
+ return true;
+ case NL80211_IFTYPE_P2P_CLIENT:
+ return true;
+ case NL80211_IFTYPE_AP:
+ return adapter->curr_iface_comb.uap_intf !=
+ adapter->iface_limit.uap_intf;
+ default:
+ return false;
+ }
+
+ default:
+ break;
+ }
+
+ return false;
+}
+
+static void
+update_vif_type_counter(struct mwifiex_adapter *adapter,
+ enum nl80211_iftype iftype,
+ int change)
+{
+ switch (iftype) {
+ case NL80211_IFTYPE_UNSPECIFIED:
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_STATION:
+ adapter->curr_iface_comb.sta_intf += change;
+ break;
+ case NL80211_IFTYPE_AP:
+ adapter->curr_iface_comb.uap_intf += change;
+ break;
+ case NL80211_IFTYPE_P2P_CLIENT:
+ case NL80211_IFTYPE_P2P_GO:
+ adapter->curr_iface_comb.p2p_intf += change;
+ break;
+ default:
+ mwifiex_dbg(adapter, ERROR,
+ "%s: Unsupported iftype passed: %d\n",
+ __func__, iftype);
+ break;
+ }
+}
+
static int
mwifiex_change_vif_to_p2p(struct net_device *dev,
enum nl80211_iftype curr_iftype,
@@ -955,13 +1076,6 @@ mwifiex_change_vif_to_p2p(struct net_device *dev,
adapter = priv->adapter;
- if (adapter->curr_iface_comb.p2p_intf ==
- adapter->iface_limit.p2p_intf) {
- mwifiex_dbg(adapter, ERROR,
- "cannot create multiple P2P ifaces\n");
- return -1;
- }
-
mwifiex_dbg(adapter, INFO,
"%s: changing role to p2p\n", dev->name);
@@ -970,6 +1084,10 @@ mwifiex_change_vif_to_p2p(struct net_device *dev,
if (mwifiex_init_new_priv_params(priv, dev, type))
return -1;
+ update_vif_type_counter(adapter, curr_iftype, -1);
+ update_vif_type_counter(adapter, type, +1);
+ dev->ieee80211_ptr->iftype = type;
+
switch (type) {
case NL80211_IFTYPE_P2P_CLIENT:
if (mwifiex_cfg80211_init_p2p_client(priv))
@@ -993,21 +1111,6 @@ mwifiex_change_vif_to_p2p(struct net_device *dev,
if (mwifiex_sta_init_cmd(priv, false, false))
return -1;
- switch (curr_iftype) {
- case NL80211_IFTYPE_STATION:
- case NL80211_IFTYPE_ADHOC:
- adapter->curr_iface_comb.sta_intf--;
- break;
- case NL80211_IFTYPE_AP:
- adapter->curr_iface_comb.uap_intf--;
- break;
- default:
- break;
- }
-
- adapter->curr_iface_comb.p2p_intf++;
- dev->ieee80211_ptr->iftype = type;
-
return 0;
}
@@ -1027,15 +1130,6 @@ mwifiex_change_vif_to_sta_adhoc(struct net_device *dev,
adapter = priv->adapter;
- if ((curr_iftype != NL80211_IFTYPE_P2P_CLIENT &&
- curr_iftype != NL80211_IFTYPE_P2P_GO) &&
- (adapter->curr_iface_comb.sta_intf ==
- adapter->iface_limit.sta_intf)) {
- mwifiex_dbg(adapter, ERROR,
- "cannot create multiple station/adhoc ifaces\n");
- return -1;
- }
-
if (type == NL80211_IFTYPE_STATION)
mwifiex_dbg(adapter, INFO,
"%s: changing role to station\n", dev->name);
@@ -1047,26 +1141,17 @@ mwifiex_change_vif_to_sta_adhoc(struct net_device *dev,
return -1;
if (mwifiex_init_new_priv_params(priv, dev, type))
return -1;
+
+ update_vif_type_counter(adapter, curr_iftype, -1);
+ update_vif_type_counter(adapter, type, +1);
+ dev->ieee80211_ptr->iftype = type;
+
if (mwifiex_send_cmd(priv, HostCmd_CMD_SET_BSS_MODE,
HostCmd_ACT_GEN_SET, 0, NULL, true))
return -1;
if (mwifiex_sta_init_cmd(priv, false, false))
return -1;
- switch (curr_iftype) {
- case NL80211_IFTYPE_P2P_CLIENT:
- case NL80211_IFTYPE_P2P_GO:
- adapter->curr_iface_comb.p2p_intf--;
- break;
- case NL80211_IFTYPE_AP:
- adapter->curr_iface_comb.uap_intf--;
- break;
- default:
- break;
- }
-
- adapter->curr_iface_comb.sta_intf++;
- dev->ieee80211_ptr->iftype = type;
return 0;
}
@@ -1086,13 +1171,6 @@ mwifiex_change_vif_to_ap(struct net_device *dev,
adapter = priv->adapter;
- if (adapter->curr_iface_comb.uap_intf ==
- adapter->iface_limit.uap_intf) {
- mwifiex_dbg(adapter, ERROR,
- "cannot create multiple AP ifaces\n");
- return -1;
- }
-
mwifiex_dbg(adapter, INFO,
"%s: changing role to AP\n", dev->name);
@@ -1100,27 +1178,17 @@ mwifiex_change_vif_to_ap(struct net_device *dev,
return -1;
if (mwifiex_init_new_priv_params(priv, dev, type))
return -1;
+
+ update_vif_type_counter(adapter, curr_iftype, -1);
+ update_vif_type_counter(adapter, type, +1);
+ dev->ieee80211_ptr->iftype = type;
+
if (mwifiex_send_cmd(priv, HostCmd_CMD_SET_BSS_MODE,
HostCmd_ACT_GEN_SET, 0, NULL, true))
return -1;
if (mwifiex_sta_init_cmd(priv, false, false))
return -1;
- switch (curr_iftype) {
- case NL80211_IFTYPE_P2P_CLIENT:
- case NL80211_IFTYPE_P2P_GO:
- adapter->curr_iface_comb.p2p_intf--;
- break;
- case NL80211_IFTYPE_STATION:
- case NL80211_IFTYPE_ADHOC:
- adapter->curr_iface_comb.sta_intf--;
- break;
- default:
- break;
- }
-
- adapter->curr_iface_comb.uap_intf++;
- dev->ieee80211_ptr->iftype = type;
return 0;
}
/*
@@ -1141,6 +1209,27 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
return -EBUSY;
}
+ if (type == NL80211_IFTYPE_UNSPECIFIED) {
+ mwifiex_dbg(priv->adapter, INFO,
+ "%s: no new type specified, keeping old type %d\n",
+ dev->name, curr_iftype);
+ return 0;
+ }
+
+ if (curr_iftype == type) {
+ mwifiex_dbg(priv->adapter, INFO,
+ "%s: interface already is of type %d\n",
+ dev->name, curr_iftype);
+ return 0;
+ }
+
+ if (!is_vif_type_change_allowed(priv->adapter, curr_iftype, type)) {
+ mwifiex_dbg(priv->adapter, ERROR,
+ "%s: change from type %d to %d is not allowed\n",
+ dev->name, curr_iftype, type);
+ return -EOPNOTSUPP;
+ }
+
switch (curr_iftype) {
case NL80211_IFTYPE_ADHOC:
switch (type) {
@@ -1160,19 +1249,10 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
case NL80211_IFTYPE_AP:
return mwifiex_change_vif_to_ap(dev, curr_iftype, type,
params);
- case NL80211_IFTYPE_UNSPECIFIED:
- mwifiex_dbg(priv->adapter, INFO,
- "%s: kept type as IBSS\n", dev->name);
- fallthrough;
- case NL80211_IFTYPE_ADHOC: /* This shouldn't happen */
- return 0;
default:
- mwifiex_dbg(priv->adapter, ERROR,
- "%s: changing to %d not supported\n",
- dev->name, type);
- return -EOPNOTSUPP;
+ goto errnotsupp;
}
- break;
+
case NL80211_IFTYPE_STATION:
switch (type) {
case NL80211_IFTYPE_ADHOC:
@@ -1191,22 +1271,14 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
case NL80211_IFTYPE_AP:
return mwifiex_change_vif_to_ap(dev, curr_iftype, type,
params);
- case NL80211_IFTYPE_UNSPECIFIED:
- mwifiex_dbg(priv->adapter, INFO,
- "%s: kept type as STA\n", dev->name);
- fallthrough;
- case NL80211_IFTYPE_STATION: /* This shouldn't happen */
- return 0;
default:
- mwifiex_dbg(priv->adapter, ERROR,
- "%s: changing to %d not supported\n",
- dev->name, type);
- return -EOPNOTSUPP;
+ goto errnotsupp;
}
- break;
+
case NL80211_IFTYPE_AP:
switch (type) {
case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_STATION:
return mwifiex_change_vif_to_sta_adhoc(dev, curr_iftype,
type, params);
break;
@@ -1214,69 +1286,60 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy,
case NL80211_IFTYPE_P2P_GO:
return mwifiex_change_vif_to_p2p(dev, curr_iftype,
type, params);
- case NL80211_IFTYPE_UNSPECIFIED:
- mwifiex_dbg(priv->adapter, INFO,
- "%s: kept type as AP\n", dev->name);
- fallthrough;
- case NL80211_IFTYPE_AP: /* This shouldn't happen */
- return 0;
default:
- mwifiex_dbg(priv->adapter, ERROR,
- "%s: changing to %d not supported\n",
- dev->name, type);
- return -EOPNOTSUPP;
+ goto errnotsupp;
}
- break;
+
case NL80211_IFTYPE_P2P_CLIENT:
- case NL80211_IFTYPE_P2P_GO:
+ if (mwifiex_cfg80211_deinit_p2p(priv))
+ return -EFAULT;
+
switch (type) {
- case NL80211_IFTYPE_STATION:
- if (mwifiex_cfg80211_deinit_p2p(priv))
- return -EFAULT;
- priv->adapter->curr_iface_comb.p2p_intf--;
- priv->adapter->curr_iface_comb.sta_intf++;
- dev->ieee80211_ptr->iftype = type;
- if (mwifiex_deinit_priv_params(priv))
- return -1;
- if (mwifiex_init_new_priv_params(priv, dev, type))
- return -1;
- if (mwifiex_sta_init_cmd(priv, false, false))
- return -1;
- break;
case NL80211_IFTYPE_ADHOC:
- if (mwifiex_cfg80211_deinit_p2p(priv))
- return -EFAULT;
+ case NL80211_IFTYPE_STATION:
return mwifiex_change_vif_to_sta_adhoc(dev, curr_iftype,
type, params);
- break;
+ case NL80211_IFTYPE_P2P_GO:
+ return mwifiex_change_vif_to_p2p(dev, curr_iftype,
+ type, params);
case NL80211_IFTYPE_AP:
- if (mwifiex_cfg80211_deinit_p2p(priv))
- return -EFAULT;
return mwifiex_change_vif_to_ap(dev, curr_iftype, type,
params);
- case NL80211_IFTYPE_UNSPECIFIED:
- mwifiex_dbg(priv->adapter, INFO,
- "%s: kept type as P2P\n", dev->name);
- fallthrough;
+ default:
+ goto errnotsupp;
+ }
+
+ case NL80211_IFTYPE_P2P_GO:
+ if (mwifiex_cfg80211_deinit_p2p(priv))
+ return -EFAULT;
+
+ switch (type) {
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_STATION:
+ return mwifiex_change_vif_to_sta_adhoc(dev, curr_iftype,
+ type, params);
case NL80211_IFTYPE_P2P_CLIENT:
- case NL80211_IFTYPE_P2P_GO:
- return 0;
+ return mwifiex_change_vif_to_p2p(dev, curr_iftype,
+ type, params);
+ case NL80211_IFTYPE_AP:
+ return mwifiex_change_vif_to_ap(dev, curr_iftype, type,
+ params);
default:
- mwifiex_dbg(priv->adapter, ERROR,
- "%s: changing to %d not supported\n",
- dev->name, type);
- return -EOPNOTSUPP;
+ goto errnotsupp;
}
- break;
+
default:
- mwifiex_dbg(priv->adapter, ERROR,
- "%s: unknown iftype: %d\n",
- dev->name, dev->ieee80211_ptr->iftype);
- return -EOPNOTSUPP;
+ goto errnotsupp;
}
return 0;
+
+errnotsupp:
+ mwifiex_dbg(priv->adapter, ERROR,
+ "unsupported interface type transition: %d to %d\n",
+ curr_iftype, type);
+ return -EOPNOTSUPP;
}
static void
@@ -2997,7 +3060,7 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
priv->bss_type = MWIFIEX_BSS_TYPE_P2P;
priv->frame_type = MWIFIEX_DATA_FRAME_TYPE_ETH_II;
- priv->bss_priority = MWIFIEX_BSS_ROLE_STA;
+ priv->bss_priority = 0;
priv->bss_role = MWIFIEX_BSS_ROLE_STA;
priv->bss_started = 0;
@@ -3108,23 +3171,7 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
mwifiex_dev_debugfs_init(priv);
#endif
- switch (type) {
- case NL80211_IFTYPE_UNSPECIFIED:
- case NL80211_IFTYPE_STATION:
- case NL80211_IFTYPE_ADHOC:
- adapter->curr_iface_comb.sta_intf++;
- break;
- case NL80211_IFTYPE_AP:
- adapter->curr_iface_comb.uap_intf++;
- break;
- case NL80211_IFTYPE_P2P_CLIENT:
- adapter->curr_iface_comb.p2p_intf++;
- break;
- default:
- /* This should be dead code; checked above */
- mwifiex_dbg(adapter, ERROR, "type not supported\n");
- return ERR_PTR(-EINVAL);
- }
+ update_vif_type_counter(adapter, type, +1);
return &priv->wdev;
@@ -3177,37 +3224,18 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
cfg80211_unregister_netdevice(wdev->netdev);
if (priv->dfs_cac_workqueue) {
- flush_workqueue(priv->dfs_cac_workqueue);
destroy_workqueue(priv->dfs_cac_workqueue);
priv->dfs_cac_workqueue = NULL;
}
if (priv->dfs_chan_sw_workqueue) {
- flush_workqueue(priv->dfs_chan_sw_workqueue);
destroy_workqueue(priv->dfs_chan_sw_workqueue);
priv->dfs_chan_sw_workqueue = NULL;
}
/* Clear the priv in adapter */
priv->netdev = NULL;
- switch (priv->bss_mode) {
- case NL80211_IFTYPE_UNSPECIFIED:
- case NL80211_IFTYPE_STATION:
- case NL80211_IFTYPE_ADHOC:
- adapter->curr_iface_comb.sta_intf--;
- break;
- case NL80211_IFTYPE_AP:
- adapter->curr_iface_comb.uap_intf--;
- break;
- case NL80211_IFTYPE_P2P_CLIENT:
- case NL80211_IFTYPE_P2P_GO:
- adapter->curr_iface_comb.p2p_intf--;
- break;
- default:
- mwifiex_dbg(adapter, ERROR,
- "del_virtual_intf: type not supported\n");
- break;
- }
+ update_vif_type_counter(adapter, priv->bss_mode, -1);
priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
@@ -3470,7 +3498,7 @@ static int mwifiex_cfg80211_suspend(struct wiphy *wiphy,
}
if (!wowlan) {
- mwifiex_dbg(adapter, ERROR,
+ mwifiex_dbg(adapter, INFO,
"None of the WOWLAN triggers enabled\n");
ret = 0;
goto done;
diff --git a/drivers/net/wireless/marvell/mwifiex/cmdevt.c b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
index 171a25742600..d6a61f850c6f 100644
--- a/drivers/net/wireless/marvell/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
@@ -608,6 +608,11 @@ int mwifiex_send_cmd(struct mwifiex_private *priv, u16 cmd_no,
return -1;
}
+ if (priv->adapter->hs_activated_manually &&
+ cmd_no != HostCmd_CMD_802_11_HS_CFG_ENH) {
+ mwifiex_cancel_hs(priv, MWIFIEX_ASYNC_CMD);
+ priv->adapter->hs_activated_manually = false;
+ }
/* Get a new command node */
cmd_node = mwifiex_get_cmd_node(adapter);
@@ -714,6 +719,15 @@ mwifiex_insert_cmd_to_pending_q(struct mwifiex_adapter *adapter,
}
}
+ /* Same with exit host sleep cmd, luckily that can't happen at the same time as EXIT_PS */
+ if (command == HostCmd_CMD_802_11_HS_CFG_ENH) {
+ struct host_cmd_ds_802_11_hs_cfg_enh *hs_cfg =
+ &host_cmd->params.opt_hs_cfg;
+
+ if (le16_to_cpu(hs_cfg->action) == HS_ACTIVATE)
+ add_tail = false;
+ }
+
spin_lock_bh(&adapter->cmd_pending_q_lock);
if (add_tail)
list_add_tail(&cmd_node->list, &adapter->cmd_pending_q);
@@ -1216,6 +1230,13 @@ mwifiex_process_hs_config(struct mwifiex_adapter *adapter)
__func__);
adapter->if_ops.wakeup(adapter);
+
+ if (adapter->hs_activated_manually) {
+ mwifiex_cancel_hs(mwifiex_get_priv (adapter, MWIFIEX_BSS_ROLE_ANY),
+ MWIFIEX_ASYNC_CMD);
+ adapter->hs_activated_manually = false;
+ }
+
adapter->hs_activated = false;
clear_bit(MWIFIEX_IS_HS_CONFIGURED, &adapter->work_flags);
clear_bit(MWIFIEX_IS_SUSPENDED, &adapter->work_flags);
diff --git a/drivers/net/wireless/marvell/mwifiex/main.c b/drivers/net/wireless/marvell/mwifiex/main.c
index 17399d4aa129..19b996c6a260 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.c
+++ b/drivers/net/wireless/marvell/mwifiex/main.c
@@ -401,6 +401,12 @@ process_start:
!adapter->scan_processing) &&
!adapter->data_sent &&
!skb_queue_empty(&adapter->tx_data_q)) {
+ if (adapter->hs_activated_manually) {
+ mwifiex_cancel_hs(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY),
+ MWIFIEX_ASYNC_CMD);
+ adapter->hs_activated_manually = false;
+ }
+
mwifiex_process_tx_queue(adapter);
if (adapter->hs_activated) {
clear_bit(MWIFIEX_IS_HS_CONFIGURED,
@@ -418,6 +424,12 @@ process_start:
!mwifiex_bypass_txlist_empty(adapter) &&
!mwifiex_is_tdls_chan_switching
(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA))) {
+ if (adapter->hs_activated_manually) {
+ mwifiex_cancel_hs(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY),
+ MWIFIEX_ASYNC_CMD);
+ adapter->hs_activated_manually = false;
+ }
+
mwifiex_process_bypass_tx(adapter);
if (adapter->hs_activated) {
clear_bit(MWIFIEX_IS_HS_CONFIGURED,
@@ -434,6 +446,12 @@ process_start:
!adapter->data_sent && !mwifiex_wmm_lists_empty(adapter) &&
!mwifiex_is_tdls_chan_switching
(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA))) {
+ if (adapter->hs_activated_manually) {
+ mwifiex_cancel_hs(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY),
+ MWIFIEX_ASYNC_CMD);
+ adapter->hs_activated_manually = false;
+ }
+
mwifiex_wmm_process_tx(adapter);
if (adapter->hs_activated) {
clear_bit(MWIFIEX_IS_HS_CONFIGURED,
@@ -498,13 +516,11 @@ static void mwifiex_free_adapter(struct mwifiex_adapter *adapter)
static void mwifiex_terminate_workqueue(struct mwifiex_adapter *adapter)
{
if (adapter->workqueue) {
- flush_workqueue(adapter->workqueue);
destroy_workqueue(adapter->workqueue);
adapter->workqueue = NULL;
}
if (adapter->rx_workqueue) {
- flush_workqueue(adapter->rx_workqueue);
destroy_workqueue(adapter->rx_workqueue);
adapter->rx_workqueue = NULL;
}
@@ -987,7 +1003,7 @@ int mwifiex_set_mac_address(struct mwifiex_private *priv,
return ret;
}
- ether_addr_copy(dev->dev_addr, priv->curr_addr);
+ eth_hw_addr_set(dev, priv->curr_addr);
return 0;
}
diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h
index 5923c5c14c8d..90012cbcfd15 100644
--- a/drivers/net/wireless/marvell/mwifiex/main.h
+++ b/drivers/net/wireless/marvell/mwifiex/main.h
@@ -986,6 +986,7 @@ struct mwifiex_adapter {
struct timer_list wakeup_timer;
struct mwifiex_hs_config_param hs_cfg;
u8 hs_activated;
+ u8 hs_activated_manually;
u16 hs_activate_wait_q_woken;
wait_queue_head_t hs_activate_wait_q;
u8 event_body[MAX_EVENT_SIZE];
diff --git a/drivers/net/wireless/marvell/mwifiex/pcie.c b/drivers/net/wireless/marvell/mwifiex/pcie.c
index c6ccce426b49..c3f5583ea70d 100644
--- a/drivers/net/wireless/marvell/mwifiex/pcie.c
+++ b/drivers/net/wireless/marvell/mwifiex/pcie.c
@@ -17,6 +17,7 @@
* this warranty disclaimer.
*/
+#include <linux/iopoll.h>
#include <linux/firmware.h>
#include "decl.h"
@@ -647,11 +648,15 @@ static void mwifiex_delay_for_sleep_cookie(struct mwifiex_adapter *adapter,
"max count reached while accessing sleep cookie\n");
}
+#define N_WAKEUP_TRIES_SHORT_INTERVAL 15
+#define N_WAKEUP_TRIES_LONG_INTERVAL 35
+
/* This function wakes up the card by reading fw_status register. */
static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter)
{
struct pcie_service_card *card = adapter->card;
const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
+ int retval;
mwifiex_dbg(adapter, EVENT,
"event: Wakeup device...\n");
@@ -659,11 +664,24 @@ static int mwifiex_pm_wakeup_card(struct mwifiex_adapter *adapter)
if (reg->sleep_cookie)
mwifiex_pcie_dev_wakeup_delay(adapter);
- /* Accessing fw_status register will wakeup device */
- if (mwifiex_write_reg(adapter, reg->fw_status, FIRMWARE_READY_PCIE)) {
- mwifiex_dbg(adapter, ERROR,
- "Writing fw_status register failed\n");
- return -1;
+ /* The 88W8897 PCIe+USB firmware (latest version 15.68.19.p21) sometimes
+ * appears to ignore or miss our wakeup request, so we continue trying
+ * until we receive an interrupt from the card.
+ */
+ if (read_poll_timeout(mwifiex_write_reg, retval,
+ READ_ONCE(adapter->int_status) != 0,
+ 500, 500 * N_WAKEUP_TRIES_SHORT_INTERVAL,
+ false,
+ adapter, reg->fw_status, FIRMWARE_READY_PCIE)) {
+ if (read_poll_timeout(mwifiex_write_reg, retval,
+ READ_ONCE(adapter->int_status) != 0,
+ 10000, 10000 * N_WAKEUP_TRIES_LONG_INTERVAL,
+ false,
+ adapter, reg->fw_status, FIRMWARE_READY_PCIE)) {
+ mwifiex_dbg(adapter, ERROR,
+ "Firmware didn't wake up\n");
+ return -EIO;
+ }
}
if (reg->sleep_cookie) {
@@ -1490,6 +1508,14 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb,
ret = -1;
goto done_unmap;
}
+
+ /* The firmware (latest version 15.68.19.p21) of the 88W8897 PCIe+USB card
+ * seems to crash randomly after setting the TX ring write pointer when
+ * ASPM powersaving is enabled. A workaround seems to be keeping the bus
+ * busy by reading a random register afterwards.
+ */
+ mwifiex_read_reg(adapter, PCI_VENDOR_ID, &rx_val);
+
if ((mwifiex_pcie_txbd_not_full(card)) &&
tx_param->next_pkt_len) {
/* have more packets and TxBD still can hold more */
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
index 48ea00da1fc9..1e2798dce18f 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_cmd.c
@@ -396,6 +396,10 @@ mwifiex_cmd_802_11_hs_cfg(struct mwifiex_private *priv,
if (hs_activate) {
hs_cfg->action = cpu_to_le16(HS_ACTIVATE);
hs_cfg->params.hs_activate.resp_ctrl = cpu_to_le16(RESP_NEEDED);
+
+ adapter->hs_activated_manually = true;
+ mwifiex_dbg(priv->adapter, CMD,
+ "cmd: Activating host sleep manually\n");
} else {
hs_cfg->action = cpu_to_le16(HS_CONFIGURE);
hs_cfg->params.hs_config.conditions = hscfg_param->conditions;
diff --git a/drivers/net/wireless/marvell/mwifiex/uap_event.c b/drivers/net/wireless/marvell/mwifiex/uap_event.c
index 9121447e2701..2e25d72dcac5 100644
--- a/drivers/net/wireless/marvell/mwifiex/uap_event.c
+++ b/drivers/net/wireless/marvell/mwifiex/uap_event.c
@@ -197,8 +197,7 @@ int mwifiex_process_uap_event(struct mwifiex_private *priv)
mwifiex_dbg(adapter, EVENT,
"AP EVENT: event id: %#x\n", eventcause);
priv->port_open = false;
- memcpy(priv->netdev->dev_addr, adapter->event_body + 2,
- ETH_ALEN);
+ eth_hw_addr_set(priv->netdev, adapter->event_body + 2);
if (priv->hist_data)
mwifiex_hist_data_reset(priv);
mwifiex_check_uap_capabilities(priv, adapter->event_skb);
diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c
index 426e39d4ccf0..9736aa0ab7fd 100644
--- a/drivers/net/wireless/marvell/mwifiex/usb.c
+++ b/drivers/net/wireless/marvell/mwifiex/usb.c
@@ -505,6 +505,22 @@ static int mwifiex_usb_probe(struct usb_interface *intf,
}
}
+ switch (card->usb_boot_state) {
+ case USB8XXX_FW_DNLD:
+ /* Reject broken descriptors. */
+ if (!card->rx_cmd_ep || !card->tx_cmd_ep)
+ return -ENODEV;
+ if (card->bulk_out_maxpktsize == 0)
+ return -ENODEV;
+ break;
+ case USB8XXX_FW_READY:
+ /* Assume the driver can handle missing endpoints for now. */
+ break;
+ default:
+ WARN_ON(1);
+ return -ENODEV;
+ }
+
usb_set_intfdata(intf, card);
ret = mwifiex_add_card(card, &card->fw_done, &usb_ops,
diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c
index 3bf6571f4149..529e325498cd 100644
--- a/drivers/net/wireless/marvell/mwl8k.c
+++ b/drivers/net/wireless/marvell/mwl8k.c
@@ -5800,8 +5800,8 @@ static void mwl8k_fw_state_machine(const struct firmware *fw, void *context)
fail:
priv->fw_state = FW_STATE_ERROR;
complete(&priv->firmware_loading_complete);
- device_release_driver(&priv->pdev->dev);
mwl8k_release_firmware(priv);
+ device_release_driver(&priv->pdev->dev);
}
#define MAX_RESTART_ATTEMPTS 1
diff --git a/drivers/net/wireless/mediatek/mt76/Makefile b/drivers/net/wireless/mediatek/mt76/Makefile
index 94efe3c29053..79ab850a45a2 100644
--- a/drivers/net/wireless/mediatek/mt76/Makefile
+++ b/drivers/net/wireless/mediatek/mt76/Makefile
@@ -14,7 +14,7 @@ mt76-$(CONFIG_PCI) += pci.o
mt76-$(CONFIG_NL80211_TESTMODE) += testmode.o
mt76-usb-y := usb.o usb_trace.o
-mt76-sdio-y := sdio.o
+mt76-sdio-y := sdio.o sdio_txrx.o
CFLAGS_trace.o := -I$(src)
CFLAGS_usb_trace.o := -I$(src)
diff --git a/drivers/net/wireless/mediatek/mt76/debugfs.c b/drivers/net/wireless/mediatek/mt76/debugfs.c
index fa48cc3a7a8f..b8bcf22a07fd 100644
--- a/drivers/net/wireless/mediatek/mt76/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/debugfs.c
@@ -56,14 +56,14 @@ int mt76_queues_read(struct seq_file *s, void *data)
struct mt76_dev *dev = dev_get_drvdata(s->private);
int i;
+ seq_puts(s, " queue | hw-queued | head | tail |\n");
for (i = 0; i < ARRAY_SIZE(dev->phy.q_tx); i++) {
struct mt76_queue *q = dev->phy.q_tx[i];
if (!q)
continue;
- seq_printf(s,
- "%d: queued=%d head=%d tail=%d\n",
+ seq_printf(s, " %9d | %9d | %9d | %9d |\n",
i, q->queued, q->head, q->tail);
}
@@ -76,12 +76,13 @@ static int mt76_rx_queues_read(struct seq_file *s, void *data)
struct mt76_dev *dev = dev_get_drvdata(s->private);
int i, queued;
+ seq_puts(s, " queue | hw-queued | head | tail |\n");
mt76_for_each_q_rx(dev, i) {
struct mt76_queue *q = &dev->q_rx[i];
queued = mt76_is_usb(dev) ? q->ndesc - q->queued : q->queued;
- seq_printf(s, "%d: queued=%d head=%d tail=%d\n",
- i, queued, q->head, q->tail);
+ seq_printf(s, " %9d | %9d | %9d | %9d |\n",
+ i, q->queued, q->head, q->tail);
}
return 0;
@@ -116,18 +117,21 @@ static int mt76_read_rate_txpower(struct seq_file *s, void *data)
return 0;
}
-struct dentry *mt76_register_debugfs(struct mt76_dev *dev)
+struct dentry *
+mt76_register_debugfs_fops(struct mt76_phy *phy,
+ const struct file_operations *ops)
{
+ const struct file_operations *fops = ops ? ops : &fops_regval;
+ struct mt76_dev *dev = phy->dev;
struct dentry *dir;
- dir = debugfs_create_dir("mt76", dev->hw->wiphy->debugfsdir);
+ dir = debugfs_create_dir("mt76", phy->hw->wiphy->debugfsdir);
if (!dir)
return NULL;
debugfs_create_u8("led_pin", 0600, dir, &dev->led_pin);
debugfs_create_u32("regidx", 0600, dir, &dev->debugfs_reg);
- debugfs_create_file_unsafe("regval", 0600, dir, dev,
- &fops_regval);
+ debugfs_create_file_unsafe("regval", 0600, dir, dev, fops);
debugfs_create_file_unsafe("napi_threaded", 0600, dir, dev,
&fops_napi_threaded);
debugfs_create_blob("eeprom", 0400, dir, &dev->eeprom);
@@ -140,4 +144,4 @@ struct dentry *mt76_register_debugfs(struct mt76_dev *dev)
return dir;
}
-EXPORT_SYMBOL_GPL(mt76_register_debugfs);
+EXPORT_SYMBOL_GPL(mt76_register_debugfs_fops);
diff --git a/drivers/net/wireless/mediatek/mt76/eeprom.c b/drivers/net/wireless/mediatek/mt76/eeprom.c
index 3b47e85e95e7..2d58aa31db93 100644
--- a/drivers/net/wireless/mediatek/mt76/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/eeprom.c
@@ -15,6 +15,7 @@ int mt76_get_of_eeprom(struct mt76_dev *dev, void *eep, int offset, int len)
struct device_node *np = dev->dev->of_node;
struct mtd_info *mtd;
const __be32 *list;
+ const void *data;
const char *part;
phandle phandle;
int size;
@@ -24,6 +25,16 @@ int mt76_get_of_eeprom(struct mt76_dev *dev, void *eep, int offset, int len)
if (!np)
return -ENOENT;
+ data = of_get_property(np, "mediatek,eeprom-data", &size);
+ if (data) {
+ if (size > len)
+ return -EINVAL;
+
+ memcpy(eep, data, size);
+
+ return 0;
+ }
+
list = of_get_property(np, "mediatek,mtd-eeprom", &size);
if (!list)
return -ENOENT;
@@ -54,8 +65,11 @@ int mt76_get_of_eeprom(struct mt76_dev *dev, void *eep, int offset, int len)
offset = be32_to_cpup(list);
ret = mtd_read(mtd, offset, len, &retlen, eep);
put_mtd_device(mtd);
- if (ret)
+ if (ret) {
+ dev_err(dev->dev, "reading EEPROM from mtd %s failed: %i\n",
+ part, ret);
goto out_put_node;
+ }
if (retlen < len) {
ret = -EINVAL;
@@ -285,6 +299,9 @@ s8 mt76_get_rate_power_limits(struct mt76_phy *phy,
case NL80211_BAND_5GHZ:
band = '5';
break;
+ case NL80211_BAND_6GHZ:
+ band = '6';
+ break;
default:
return target_power;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
index d03aedc3286b..62807dc311c1 100644
--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
+++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
@@ -20,6 +20,13 @@
.max_power = 30, \
}
+#define CHAN6G(_idx, _freq) { \
+ .band = NL80211_BAND_6GHZ, \
+ .center_freq = (_freq), \
+ .hw_value = (_idx), \
+ .max_power = 30, \
+}
+
static const struct ieee80211_channel mt76_channels_2ghz[] = {
CHAN2G(1, 2412),
CHAN2G(2, 2417),
@@ -70,6 +77,72 @@ static const struct ieee80211_channel mt76_channels_5ghz[] = {
CHAN5G(173, 5865),
};
+static const struct ieee80211_channel mt76_channels_6ghz[] = {
+ /* UNII-5 */
+ CHAN6G(1, 5955),
+ CHAN6G(5, 5975),
+ CHAN6G(9, 5995),
+ CHAN6G(13, 6015),
+ CHAN6G(17, 6035),
+ CHAN6G(21, 6055),
+ CHAN6G(25, 6075),
+ CHAN6G(29, 6095),
+ CHAN6G(33, 6115),
+ CHAN6G(37, 6135),
+ CHAN6G(41, 6155),
+ CHAN6G(45, 6175),
+ CHAN6G(49, 6195),
+ CHAN6G(53, 6215),
+ CHAN6G(57, 6235),
+ CHAN6G(61, 6255),
+ CHAN6G(65, 6275),
+ CHAN6G(69, 6295),
+ CHAN6G(73, 6315),
+ CHAN6G(77, 6335),
+ CHAN6G(81, 6355),
+ CHAN6G(85, 6375),
+ CHAN6G(89, 6395),
+ CHAN6G(93, 6415),
+ /* UNII-6 */
+ CHAN6G(97, 6435),
+ CHAN6G(101, 6455),
+ CHAN6G(105, 6475),
+ CHAN6G(109, 6495),
+ CHAN6G(113, 6515),
+ CHAN6G(117, 6535),
+ /* UNII-7 */
+ CHAN6G(121, 6555),
+ CHAN6G(125, 6575),
+ CHAN6G(129, 6595),
+ CHAN6G(133, 6615),
+ CHAN6G(137, 6635),
+ CHAN6G(141, 6655),
+ CHAN6G(145, 6675),
+ CHAN6G(149, 6695),
+ CHAN6G(153, 6715),
+ CHAN6G(157, 6735),
+ CHAN6G(161, 6755),
+ CHAN6G(165, 6775),
+ CHAN6G(169, 6795),
+ CHAN6G(173, 6815),
+ CHAN6G(177, 6835),
+ CHAN6G(181, 6855),
+ CHAN6G(185, 6875),
+ /* UNII-8 */
+ CHAN6G(189, 6895),
+ CHAN6G(193, 6915),
+ CHAN6G(197, 6935),
+ CHAN6G(201, 6955),
+ CHAN6G(205, 6975),
+ CHAN6G(209, 6995),
+ CHAN6G(213, 7015),
+ CHAN6G(217, 7035),
+ CHAN6G(221, 7055),
+ CHAN6G(225, 7075),
+ CHAN6G(229, 7095),
+ CHAN6G(233, 7115),
+};
+
static const struct ieee80211_tpt_blink mt76_tpt_blink[] = {
{ .throughput = 0 * 1024, .blink_time = 334 },
{ .throughput = 1 * 1024, .blink_time = 260 },
@@ -99,6 +172,21 @@ struct ieee80211_rate mt76_rates[] = {
};
EXPORT_SYMBOL_GPL(mt76_rates);
+static const struct cfg80211_sar_freq_ranges mt76_sar_freq_ranges[] = {
+ { .start_freq = 2402, .end_freq = 2494, },
+ { .start_freq = 5150, .end_freq = 5350, },
+ { .start_freq = 5350, .end_freq = 5470, },
+ { .start_freq = 5470, .end_freq = 5725, },
+ { .start_freq = 5725, .end_freq = 5950, },
+};
+
+const struct cfg80211_sar_capa mt76_sar_capa = {
+ .type = NL80211_SAR_TYPE_POWER,
+ .num_freq_ranges = ARRAY_SIZE(mt76_sar_freq_ranges),
+ .freq_ranges = &mt76_sar_freq_ranges[0],
+};
+EXPORT_SYMBOL_GPL(mt76_sar_capa);
+
static int mt76_led_init(struct mt76_dev *dev)
{
struct device_node *np = dev->dev->of_node;
@@ -179,13 +267,16 @@ void mt76_set_stream_caps(struct mt76_phy *phy, bool vht)
mt76_init_stream_cap(phy, &phy->sband_2g.sband, false);
if (phy->cap.has_5ghz)
mt76_init_stream_cap(phy, &phy->sband_5g.sband, vht);
+ if (phy->cap.has_6ghz)
+ mt76_init_stream_cap(phy, &phy->sband_6g.sband, vht);
}
EXPORT_SYMBOL_GPL(mt76_set_stream_caps);
static int
mt76_init_sband(struct mt76_phy *phy, struct mt76_sband *msband,
const struct ieee80211_channel *chan, int n_chan,
- struct ieee80211_rate *rates, int n_rates, bool vht)
+ struct ieee80211_rate *rates, int n_rates,
+ bool ht, bool vht)
{
struct ieee80211_supported_band *sband = &msband->sband;
struct ieee80211_sta_vht_cap *vht_cap;
@@ -209,6 +300,9 @@ mt76_init_sband(struct mt76_phy *phy, struct mt76_sband *msband,
sband->bitrates = rates;
sband->n_bitrates = n_rates;
+ if (!ht)
+ return 0;
+
ht_cap = &sband->ht_cap;
ht_cap->ht_supported = true;
ht_cap->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
@@ -245,7 +339,7 @@ mt76_init_sband_2g(struct mt76_phy *phy, struct ieee80211_rate *rates,
return mt76_init_sband(phy, &phy->sband_2g, mt76_channels_2ghz,
ARRAY_SIZE(mt76_channels_2ghz), rates,
- n_rates, false);
+ n_rates, true, false);
}
static int
@@ -256,7 +350,18 @@ mt76_init_sband_5g(struct mt76_phy *phy, struct ieee80211_rate *rates,
return mt76_init_sband(phy, &phy->sband_5g, mt76_channels_5ghz,
ARRAY_SIZE(mt76_channels_5ghz), rates,
- n_rates, vht);
+ n_rates, true, vht);
+}
+
+static int
+mt76_init_sband_6g(struct mt76_phy *phy, struct ieee80211_rate *rates,
+ int n_rates)
+{
+ phy->hw->wiphy->bands[NL80211_BAND_6GHZ] = &phy->sband_6g.sband;
+
+ return mt76_init_sband(phy, &phy->sband_6g, mt76_channels_6ghz,
+ ARRAY_SIZE(mt76_channels_6ghz), rates,
+ n_rates, false, false);
}
static void
@@ -322,12 +427,8 @@ mt76_phy_init(struct mt76_phy *phy, struct ieee80211_hw *hw)
ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS);
ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
-
- if (!(dev->drv->drv_flags & MT_DRV_AMSDU_OFFLOAD)) {
- ieee80211_hw_set(hw, TX_AMSDU);
- ieee80211_hw_set(hw, TX_FRAG_LIST);
- }
-
+ ieee80211_hw_set(hw, TX_AMSDU);
+ ieee80211_hw_set(hw, TX_FRAG_LIST);
ieee80211_hw_set(hw, MFP_CAPABLE);
ieee80211_hw_set(hw, AP_LINK_PS);
ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
@@ -385,9 +486,16 @@ int mt76_register_phy(struct mt76_phy *phy, bool vht,
return ret;
}
+ if (phy->cap.has_6ghz) {
+ ret = mt76_init_sband_6g(phy, rates + 4, n_rates - 4);
+ if (ret)
+ return ret;
+ }
+
wiphy_read_of_freq_limits(phy->hw->wiphy);
mt76_check_sband(phy, &phy->sband_2g, NL80211_BAND_2GHZ);
mt76_check_sband(phy, &phy->sband_5g, NL80211_BAND_5GHZ);
+ mt76_check_sband(phy, &phy->sband_6g, NL80211_BAND_6GHZ);
ret = ieee80211_register_hw(phy->hw);
if (ret)
@@ -403,7 +511,7 @@ void mt76_unregister_phy(struct mt76_phy *phy)
{
struct mt76_dev *dev = phy->dev;
- mt76_tx_status_check(dev, NULL, true);
+ mt76_tx_status_check(dev, true);
ieee80211_unregister_hw(phy->hw);
dev->phy2 = NULL;
}
@@ -435,9 +543,9 @@ mt76_alloc_device(struct device *pdev, unsigned int size,
spin_lock_init(&dev->rx_lock);
spin_lock_init(&dev->lock);
spin_lock_init(&dev->cc_lock);
+ spin_lock_init(&dev->status_lock);
mutex_init(&dev->mutex);
init_waitqueue_head(&dev->tx_wait);
- skb_queue_head_init(&dev->status_list);
skb_queue_head_init(&dev->mcu.res_q);
init_waitqueue_head(&dev->mcu.wait);
@@ -458,6 +566,8 @@ mt76_alloc_device(struct device *pdev, unsigned int size,
spin_lock_init(&dev->token_lock);
idr_init(&dev->token);
+ INIT_LIST_HEAD(&dev->wcid_list);
+
INIT_LIST_HEAD(&dev->txwi_cache);
for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++)
@@ -495,9 +605,16 @@ int mt76_register_device(struct mt76_dev *dev, bool vht,
return ret;
}
+ if (phy->cap.has_6ghz) {
+ ret = mt76_init_sband_6g(phy, rates + 4, n_rates - 4);
+ if (ret)
+ return ret;
+ }
+
wiphy_read_of_freq_limits(hw->wiphy);
mt76_check_sband(&dev->phy, &phy->sband_2g, NL80211_BAND_2GHZ);
mt76_check_sband(&dev->phy, &phy->sband_5g, NL80211_BAND_5GHZ);
+ mt76_check_sband(&dev->phy, &phy->sband_6g, NL80211_BAND_6GHZ);
if (IS_ENABLED(CONFIG_MT76_LEDS)) {
ret = mt76_led_init(dev);
@@ -522,7 +639,7 @@ void mt76_unregister_device(struct mt76_dev *dev)
if (IS_ENABLED(CONFIG_MT76_LEDS))
mt76_led_cleanup(dev);
- mt76_tx_status_check(dev, NULL, true);
+ mt76_tx_status_check(dev, true);
ieee80211_unregister_hw(hw);
}
EXPORT_SYMBOL_GPL(mt76_unregister_device);
@@ -642,6 +759,8 @@ mt76_channel_state(struct mt76_phy *phy, struct ieee80211_channel *c)
if (c->band == NL80211_BAND_2GHZ)
msband = &phy->sband_2g;
+ else if (c->band == NL80211_BAND_6GHZ)
+ msband = &phy->sband_6g;
else
msband = &phy->sband_5g;
@@ -717,10 +836,16 @@ int mt76_get_survey(struct ieee80211_hw *hw, int idx,
if (idx == 0 && dev->drv->update_survey)
mt76_update_survey(phy);
- sband = &phy->sband_2g;
- if (idx >= sband->sband.n_channels) {
- idx -= sband->sband.n_channels;
+ if (idx >= phy->sband_2g.sband.n_channels +
+ phy->sband_5g.sband.n_channels) {
+ idx -= (phy->sband_2g.sband.n_channels +
+ phy->sband_5g.sband.n_channels);
+ sband = &phy->sband_6g;
+ } else if (idx >= phy->sband_2g.sband.n_channels) {
+ idx -= phy->sband_2g.sband.n_channels;
sband = &phy->sband_5g;
+ } else {
+ sband = &phy->sband_2g;
}
if (idx >= sband->sband.n_channels) {
@@ -777,10 +902,17 @@ void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
return;
wcid->rx_check_pn = true;
+
+ /* data frame */
for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
ieee80211_get_key_rx_seq(key, i, &seq);
memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn));
}
+
+ /* robust management frame */
+ ieee80211_get_key_rx_seq(key, -1, &seq);
+ memcpy(wcid->rx_key_pn[i], seq.ccmp.pn, sizeof(seq.ccmp.pn));
+
}
EXPORT_SYMBOL(mt76_wcid_key_setup);
@@ -790,6 +922,7 @@ mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb,
struct ieee80211_sta **sta)
{
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
+ struct ieee80211_hdr *hdr = mt76_skb_get_hdr(skb);
struct mt76_rx_status mstat;
mstat = *((struct mt76_rx_status *)skb->cb);
@@ -812,6 +945,10 @@ mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb,
status->device_timestamp = mstat.timestamp;
status->mactime = mstat.timestamp;
+ if (ieee80211_is_beacon(hdr->frame_control) ||
+ ieee80211_is_probe_resp(hdr->frame_control))
+ status->boottime_ns = ktime_get_boottime_ns();
+
BUILD_BUG_ON(sizeof(mstat) > sizeof(skb->cb));
BUILD_BUG_ON(sizeof(status->chain_signal) !=
sizeof(mstat.chain_signal));
@@ -828,7 +965,7 @@ mt76_check_ccmp_pn(struct sk_buff *skb)
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
struct mt76_wcid *wcid = status->wcid;
struct ieee80211_hdr *hdr;
- u8 tidno = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
+ int security_idx;
int ret;
if (!(status->flag & RX_FLAG_DECRYPTED))
@@ -837,24 +974,39 @@ mt76_check_ccmp_pn(struct sk_buff *skb)
if (!wcid || !wcid->rx_check_pn)
return 0;
+ security_idx = status->qos_ctl & IEEE80211_QOS_CTL_TID_MASK;
+ if (status->flag & RX_FLAG_8023)
+ goto skip_hdr_check;
+
+ hdr = mt76_skb_get_hdr(skb);
if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
/*
* Validate the first fragment both here and in mac80211
* All further fragments will be validated by mac80211 only.
*/
- hdr = mt76_skb_get_hdr(skb);
if (ieee80211_is_frag(hdr) &&
!ieee80211_is_first_frag(hdr->frame_control))
return 0;
}
+ /* IEEE 802.11-2020, 12.5.3.4.4 "PN and replay detection" c):
+ *
+ * the recipient shall maintain a single replay counter for received
+ * individually addressed robust Management frames that are received
+ * with the To DS subfield equal to 0, [...]
+ */
+ if (ieee80211_is_mgmt(hdr->frame_control) &&
+ !ieee80211_has_tods(hdr->frame_control))
+ security_idx = IEEE80211_NUM_TIDS;
+
+skip_hdr_check:
BUILD_BUG_ON(sizeof(status->iv) != sizeof(wcid->rx_key_pn[0]));
- ret = memcmp(status->iv, wcid->rx_key_pn[tidno],
+ ret = memcmp(status->iv, wcid->rx_key_pn[security_idx],
sizeof(status->iv));
if (ret <= 0)
return -EINVAL; /* replay */
- memcpy(wcid->rx_key_pn[tidno], status->iv, sizeof(status->iv));
+ memcpy(wcid->rx_key_pn[security_idx], status->iv, sizeof(status->iv));
if (status->flag & RX_FLAG_IV_STRIPPED)
status->flag |= RX_FLAG_PN_VALIDATED;
@@ -1109,6 +1261,7 @@ mt76_sta_add(struct mt76_dev *dev, struct ieee80211_vif *vif,
wcid->ext_phy = ext_phy;
rcu_assign_pointer(dev->wcid[wcid->idx], wcid);
+ mt76_packet_id_init(wcid);
out:
mutex_unlock(&dev->mutex);
@@ -1127,7 +1280,8 @@ void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
if (dev->drv->sta_remove)
dev->drv->sta_remove(dev, vif, sta);
- mt76_tx_status_check(dev, wcid, true);
+ mt76_packet_id_flush(dev, wcid);
+
mt76_wcid_mask_clear(dev->wcid_mask, idx);
mt76_wcid_mask_clear(dev->wcid_phy_mask, idx);
}
@@ -1270,7 +1424,7 @@ int mt76_get_rate(struct mt76_dev *dev,
int i, offset = 0, len = sband->n_bitrates;
if (cck) {
- if (sband == &dev->phy.sband_5g.sband)
+ if (sband != &dev->phy.sband_2g.sband)
return 0;
idx &= ~BIT(2); /* short preamble */
@@ -1336,3 +1490,49 @@ mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
return hwq;
}
EXPORT_SYMBOL_GPL(mt76_init_queue);
+
+u16 mt76_calculate_default_rate(struct mt76_phy *phy, int rateidx)
+{
+ int offset = 0;
+ struct ieee80211_rate *rate;
+
+ if (phy->chandef.chan->band != NL80211_BAND_2GHZ)
+ offset = 4;
+
+ /* pick the lowest rate for hidden nodes */
+ if (rateidx < 0)
+ rateidx = 0;
+
+ rate = &mt76_rates[offset + rateidx];
+
+ return rate->hw_value;
+}
+EXPORT_SYMBOL_GPL(mt76_calculate_default_rate);
+
+void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi,
+ struct mt76_sta_stats *stats)
+{
+ int i, ei = wi->initial_stat_idx;
+ u64 *data = wi->data;
+
+ wi->sta_count++;
+
+ data[ei++] += stats->tx_mode[MT_PHY_TYPE_CCK];
+ data[ei++] += stats->tx_mode[MT_PHY_TYPE_OFDM];
+ data[ei++] += stats->tx_mode[MT_PHY_TYPE_HT];
+ data[ei++] += stats->tx_mode[MT_PHY_TYPE_HT_GF];
+ data[ei++] += stats->tx_mode[MT_PHY_TYPE_VHT];
+ data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_SU];
+ data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_EXT_SU];
+ data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_TB];
+ data[ei++] += stats->tx_mode[MT_PHY_TYPE_HE_MU];
+
+ for (i = 0; i < ARRAY_SIZE(stats->tx_bw); i++)
+ data[ei++] += stats->tx_bw[i];
+
+ for (i = 0; i < 12; i++)
+ data[ei++] += stats->tx_mcs[i];
+
+ wi->worker_stat_count = ei - wi->initial_stat_idx;
+}
+EXPORT_SYMBOL_GPL(mt76_ethtool_worker);
diff --git a/drivers/net/wireless/mediatek/mt76/mcu.c b/drivers/net/wireless/mediatek/mt76/mcu.c
index d3a5e2c4f12a..3f94c37251df 100644
--- a/drivers/net/wireless/mediatek/mt76/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mcu.c
@@ -106,13 +106,13 @@ out:
}
EXPORT_SYMBOL_GPL(mt76_mcu_skb_send_and_get_msg);
-int mt76_mcu_send_firmware(struct mt76_dev *dev, int cmd, const void *data,
- int len)
+int __mt76_mcu_send_firmware(struct mt76_dev *dev, int cmd, const void *data,
+ int len, int max_len)
{
int err, cur_len;
while (len > 0) {
- cur_len = min_t(int, 4096 - dev->mcu_ops->headroom, len);
+ cur_len = min_t(int, max_len, len);
err = mt76_mcu_send_msg(dev, cmd, data, cur_len, false);
if (err)
@@ -129,4 +129,4 @@ int mt76_mcu_send_firmware(struct mt76_dev *dev, int cmd, const void *data,
return 0;
}
-EXPORT_SYMBOL_GPL(mt76_mcu_send_firmware);
+EXPORT_SYMBOL_GPL(__mt76_mcu_send_firmware);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index 25c5ceef5257..e2da720a91b6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -29,6 +29,7 @@
struct mt76_dev;
struct mt76_phy;
struct mt76_wcid;
+struct mt76s_intr;
struct mt76_reg_pair {
u32 reg;
@@ -244,6 +245,8 @@ struct mt76_wcid {
struct ewma_signal rssi;
int inactive_count;
+ struct rate_info rate;
+
u16 idx;
u8 hw_key_idx;
u8 hw_key_idx2;
@@ -253,13 +256,14 @@ struct mt76_wcid {
u8 amsdu:1;
u8 rx_check_pn;
- u8 rx_key_pn[IEEE80211_NUM_TIDS][6];
+ u8 rx_key_pn[IEEE80211_NUM_TIDS + 1][6];
u16 cipher;
u32 tx_info;
bool sw_iv;
- u8 packet_id;
+ struct list_head list;
+ struct idr pktid;
};
struct mt76_txq {
@@ -305,8 +309,13 @@ struct mt76_rx_tid {
#define MT_PACKET_ID_NO_SKB 1
#define MT_PACKET_ID_FIRST 2
#define MT_PACKET_ID_HAS_RATE BIT(7)
-
-#define MT_TX_STATUS_SKB_TIMEOUT HZ
+/* This is timer for when to give up when waiting for TXS callback,
+ * with starting time being the time at which the DMA_DONE callback
+ * was seen (so, we know packet was processed then, it should not take
+ * long after that for firmware to send the TXS callback if it is going
+ * to do so.)
+ */
+#define MT_TX_STATUS_SKB_TIMEOUT (HZ / 4)
struct mt76_tx_cb {
unsigned long jiffies;
@@ -344,7 +353,6 @@ struct mt76_hw_cap {
#define MT_DRV_SW_RX_AIRTIME BIT(2)
#define MT_DRV_RX_DMA_HDR BIT(3)
#define MT_DRV_HW_MGMT_TXQ BIT(4)
-#define MT_DRV_AMSDU_OFFLOAD BIT(5)
struct mt76_driver_ops {
u32 drv_flags;
@@ -498,13 +506,18 @@ struct mt76_sdio {
struct sdio_func *func;
void *intr_data;
+ u8 hw_ver;
+ wait_queue_head_t wait;
struct {
int pse_data_quota;
int ple_data_quota;
int pse_mcu_quota;
+ int pse_page_size;
int deficit;
} sched;
+
+ int (*parse_irq)(struct mt76_dev *dev, struct mt76s_intr *intr);
};
struct mt76_mmio {
@@ -545,6 +558,11 @@ struct mt76_rx_status {
s8 chain_signal[IEEE80211_MAX_CHAINS];
};
+struct mt76_freq_range_power {
+ const struct cfg80211_sar_freq_ranges *range;
+ s8 power;
+};
+
struct mt76_testmode_ops {
int (*set_state)(struct mt76_phy *phy, enum mt76_testmode_state state);
int (*set_params)(struct mt76_phy *phy, struct nlattr **tb,
@@ -617,6 +635,7 @@ struct mt76_phy {
struct mt76_hw_cap cap;
struct mt76_sband sband_2g;
struct mt76_sband sband_5g;
+ struct mt76_sband sband_6g;
u8 macaddr[ETH_ALEN];
@@ -636,6 +655,8 @@ struct mt76_phy {
struct sk_buff **tail;
u16 seqno;
} rx_amsdu[__MT_RXQ_MAX];
+
+ struct mt76_freq_range_power *frp;
};
struct mt76_dev {
@@ -683,7 +704,8 @@ struct mt76_dev {
int token_count;
wait_queue_head_t tx_wait;
- struct sk_buff_head status_list;
+ /* spinclock used to protect wcid pktid linked list */
+ spinlock_t status_lock;
u32 wcid_mask[DIV_ROUND_UP(MT76_N_WCIDS, 32)];
u32 wcid_phy_mask[DIV_ROUND_UP(MT76_N_WCIDS, 32)];
@@ -692,6 +714,7 @@ struct mt76_dev {
struct mt76_wcid global_wcid;
struct mt76_wcid __rcu *wcid[MT76_N_WCIDS];
+ struct list_head wcid_list;
u32 rev;
@@ -753,6 +776,22 @@ enum mt76_phy_type {
MT_PHY_TYPE_HE_EXT_SU,
MT_PHY_TYPE_HE_TB,
MT_PHY_TYPE_HE_MU,
+ __MT_PHY_TYPE_HE_MAX,
+};
+
+struct mt76_sta_stats {
+ u64 tx_mode[__MT_PHY_TYPE_HE_MAX];
+ u64 tx_bw[4]; /* 20, 40, 80, 160 */
+ u64 tx_nss[4]; /* 1, 2, 3, 4 */
+ u64 tx_mcs[16]; /* mcs idx */
+};
+
+struct mt76_ethtool_worker_info {
+ u64 *data;
+ int idx;
+ int initial_stat_idx;
+ int worker_stat_count;
+ int sta_count;
};
#define CCK_RATE(_idx, _rate) { \
@@ -769,6 +808,7 @@ enum mt76_phy_type {
}
extern struct ieee80211_rate mt76_rates[12];
+extern const struct cfg80211_sar_capa mt76_sar_capa;
#define __mt76_rr(dev, ...) (dev)->bus->rr((dev), __VA_ARGS__)
#define __mt76_wr(dev, ...) (dev)->bus->wr((dev), __VA_ARGS__)
@@ -869,7 +909,13 @@ struct mt76_phy *mt76_alloc_phy(struct mt76_dev *dev, unsigned int size,
int mt76_register_phy(struct mt76_phy *phy, bool vht,
struct ieee80211_rate *rates, int n_rates);
-struct dentry *mt76_register_debugfs(struct mt76_dev *dev);
+struct dentry *mt76_register_debugfs_fops(struct mt76_phy *phy,
+ const struct file_operations *ops);
+static inline struct dentry *mt76_register_debugfs(struct mt76_dev *dev)
+{
+ return mt76_register_debugfs_fops(&dev->phy, NULL);
+}
+
int mt76_queues_read(struct seq_file *s, void *data);
void mt76_seq_puts_array(struct seq_file *file, const char *str,
s8 *val, int len);
@@ -881,6 +927,7 @@ int mt76_get_of_eeprom(struct mt76_dev *dev, void *data, int offset, int len);
struct mt76_queue *
mt76_init_queue(struct mt76_dev *dev, int qid, int idx, int n_desc,
int ring_base);
+u16 mt76_calculate_default_rate(struct mt76_phy *phy, int rateidx);
static inline int mt76_init_tx_queue(struct mt76_phy *phy, int qid, int idx,
int n_desc, int ring_base)
{
@@ -1077,9 +1124,9 @@ void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
struct ieee80211_key_conf *key);
void mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list)
- __acquires(&dev->status_list.lock);
+ __acquires(&dev->status_lock);
void mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list)
- __releases(&dev->status_list.lock);
+ __releases(&dev->status_lock);
int mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid,
struct sk_buff *skb);
@@ -1096,8 +1143,7 @@ mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid, struct sk_buff *skb)
__mt76_tx_complete_skb(dev, wcid, skb, NULL);
}
-void mt76_tx_status_check(struct mt76_dev *dev, struct mt76_wcid *wcid,
- bool flush);
+void mt76_tx_status_check(struct mt76_dev *dev, bool flush);
int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
enum ieee80211_sta_state old_state,
@@ -1203,6 +1249,8 @@ mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len,
return usb_bulk_msg(udev, pipe, data, len, actual_len, timeout);
}
+void mt76_ethtool_worker(struct mt76_ethtool_worker_info *wi,
+ struct mt76_sta_stats *stats);
int mt76_skb_adjust_pad(struct sk_buff *skb, int pad);
int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
u8 req_type, u16 val, u16 offset,
@@ -1220,8 +1268,27 @@ void mt76u_queues_deinit(struct mt76_dev *dev);
int mt76s_init(struct mt76_dev *dev, struct sdio_func *func,
const struct mt76_bus_ops *bus_ops);
-int mt76s_alloc_queues(struct mt76_dev *dev);
+int mt76s_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid);
+int mt76s_alloc_tx(struct mt76_dev *dev);
void mt76s_deinit(struct mt76_dev *dev);
+void mt76s_sdio_irq(struct sdio_func *func);
+void mt76s_txrx_worker(struct mt76_sdio *sdio);
+bool mt76s_txqs_empty(struct mt76_dev *dev);
+int mt76s_hw_init(struct mt76_dev *dev, struct sdio_func *func,
+ int hw_ver);
+u32 mt76s_rr(struct mt76_dev *dev, u32 offset);
+void mt76s_wr(struct mt76_dev *dev, u32 offset, u32 val);
+u32 mt76s_rmw(struct mt76_dev *dev, u32 offset, u32 mask, u32 val);
+u32 mt76s_read_pcr(struct mt76_dev *dev);
+void mt76s_write_copy(struct mt76_dev *dev, u32 offset,
+ const void *data, int len);
+void mt76s_read_copy(struct mt76_dev *dev, u32 offset,
+ void *data, int len);
+int mt76s_wr_rp(struct mt76_dev *dev, u32 base,
+ const struct mt76_reg_pair *data,
+ int len);
+int mt76s_rd_rp(struct mt76_dev *dev, u32 base,
+ struct mt76_reg_pair *data, int len);
struct sk_buff *
mt76_mcu_msg_alloc(struct mt76_dev *dev, const void *data,
@@ -1233,8 +1300,17 @@ int mt76_mcu_send_and_get_msg(struct mt76_dev *dev, int cmd, const void *data,
int len, bool wait_resp, struct sk_buff **ret);
int mt76_mcu_skb_send_and_get_msg(struct mt76_dev *dev, struct sk_buff *skb,
int cmd, bool wait_resp, struct sk_buff **ret);
-int mt76_mcu_send_firmware(struct mt76_dev *dev, int cmd, const void *data,
- int len);
+int __mt76_mcu_send_firmware(struct mt76_dev *dev, int cmd, const void *data,
+ int len, int max_len);
+static inline int
+mt76_mcu_send_firmware(struct mt76_dev *dev, int cmd, const void *data,
+ int len)
+{
+ int max_len = 4096 - dev->mcu_ops->headroom;
+
+ return __mt76_mcu_send_firmware(dev, cmd, data, len, max_len);
+}
+
static inline int
mt76_mcu_send_msg(struct mt76_dev *dev, int cmd, const void *data, int len,
bool wait_resp)
@@ -1293,14 +1369,22 @@ mt76_token_put(struct mt76_dev *dev, int token)
return txwi;
}
-static inline int
-mt76_get_next_pkt_id(struct mt76_wcid *wcid)
+static inline void mt76_packet_id_init(struct mt76_wcid *wcid)
{
- wcid->packet_id = (wcid->packet_id + 1) & MT_PACKET_ID_MASK;
- if (wcid->packet_id == MT_PACKET_ID_NO_ACK ||
- wcid->packet_id == MT_PACKET_ID_NO_SKB)
- wcid->packet_id = MT_PACKET_ID_FIRST;
+ INIT_LIST_HEAD(&wcid->list);
+ idr_init(&wcid->pktid);
+}
- return wcid->packet_id;
+static inline void
+mt76_packet_id_flush(struct mt76_dev *dev, struct mt76_wcid *wcid)
+{
+ struct sk_buff_head list;
+
+ mt76_tx_status_lock(dev, &list);
+ mt76_tx_status_skb_get(dev, wcid, -1, &list);
+ mt76_tx_status_unlock(dev, &list);
+
+ idr_destroy(&wcid->pktid);
}
+
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
index 3972c56136a2..fe03e31989bb 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
@@ -1458,7 +1458,7 @@ static void mt7603_mac_watchdog_reset(struct mt7603_dev *dev)
mt76_queue_rx_reset(dev, i);
}
- mt76_tx_status_check(&dev->mt76, NULL, true);
+ mt76_tx_status_check(&dev->mt76, true);
mt7603_dma_sched_reset(dev);
@@ -1471,17 +1471,20 @@ skip_dma_reset:
mutex_unlock(&dev->mt76.mutex);
mt76_worker_enable(&dev->mt76.tx_worker);
- napi_enable(&dev->mt76.tx_napi);
- napi_schedule(&dev->mt76.tx_napi);
tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
mt7603_beacon_set_timer(dev, -1, beacon_int);
+ local_bh_disable();
+ napi_enable(&dev->mt76.tx_napi);
+ napi_schedule(&dev->mt76.tx_napi);
+
napi_enable(&dev->mt76.napi[0]);
napi_schedule(&dev->mt76.napi[0]);
napi_enable(&dev->mt76.napi[1]);
napi_schedule(&dev->mt76.napi[1]);
+ local_bh_enable();
ieee80211_wake_queues(dev->mt76.hw);
mt76_txq_schedule_all(&dev->mphy);
@@ -1814,7 +1817,7 @@ void mt7603_mac_work(struct work_struct *work)
bool reset = false;
int i, idx;
- mt76_tx_status_check(&dev->mt76, NULL, false);
+ mt76_tx_status_check(&dev->mt76, false);
mutex_lock(&dev->mt76.mutex);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
index 8edea1e7a602..7ac4cd247a73 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
@@ -69,6 +69,7 @@ mt7603_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
INIT_LIST_HEAD(&mvif->sta.poll_list);
mvif->sta.wcid.idx = idx;
mvif->sta.wcid.hw_key_idx = -1;
+ mt76_packet_id_init(&mvif->sta.wcid);
eth_broadcast_addr(bc_addr);
mt7603_wtbl_init(dev, idx, mvif->idx, bc_addr);
@@ -107,6 +108,8 @@ mt7603_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
mutex_lock(&dev->mt76.mutex);
dev->mt76.vif_mask &= ~BIT(mvif->idx);
mutex_unlock(&dev->mt76.mutex);
+
+ mt76_packet_id_flush(&dev->mt76, &mvif->sta.wcid);
}
void mt7603_init_edcca(struct mt7603_dev *dev)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/pci.c b/drivers/net/wireless/mediatek/mt76/mt7603/pci.c
index aa6cb668b012..3d94cdb4314a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/pci.c
@@ -28,7 +28,7 @@ mt76pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_master(pdev);
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (ret)
return ret;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/Makefile b/drivers/net/wireless/mediatek/mt76/mt7615/Makefile
index 83f9861ff522..2b97b9dde477 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/Makefile
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/Makefile
@@ -17,4 +17,4 @@ mt7615e-$(CONFIG_MT7622_WMAC) += soc.o
mt7663-usb-sdio-common-y := usb_sdio.o
mt7663u-y := usb.o usb_mcu.o
-mt7663s-y := sdio.o sdio_mcu.o sdio_txrx.o
+mt7663s-y := sdio.o sdio_mcu.o
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
index cb4659771fd9..6fd6f067da49 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/debugfs.c
@@ -3,6 +3,33 @@
#include "mt7615.h"
static int
+mt7615_reg_set(void *data, u64 val)
+{
+ struct mt7615_dev *dev = data;
+
+ mt7615_mutex_acquire(dev);
+ mt76_wr(dev, dev->mt76.debugfs_reg, val);
+ mt7615_mutex_release(dev);
+
+ return 0;
+}
+
+static int
+mt7615_reg_get(void *data, u64 *val)
+{
+ struct mt7615_dev *dev = data;
+
+ mt7615_mutex_acquire(dev);
+ *val = mt76_rr(dev, dev->mt76.debugfs_reg);
+ mt7615_mutex_release(dev);
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_regval, mt7615_reg_get, mt7615_reg_set,
+ "0x%08llx\n");
+
+static int
mt7615_radar_pattern_set(void *data, u64 val)
{
struct mt7615_dev *dev = data;
@@ -506,7 +533,7 @@ int mt7615_init_debugfs(struct mt7615_dev *dev)
{
struct dentry *dir;
- dir = mt76_register_debugfs(&dev->mt76);
+ dir = mt76_register_debugfs_fops(&dev->mphy, &fops_regval);
if (!dir)
return -ENOMEM;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/init.c b/drivers/net/wireless/mediatek/mt76/mt7615/init.c
index 2f1ac644e018..47f23ac905a3 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/init.c
@@ -49,12 +49,14 @@ int mt7615_thermal_init(struct mt7615_dev *dev)
{
struct wiphy *wiphy = mt76_hw(dev)->wiphy;
struct device *hwmon;
+ const char *name;
if (!IS_REACHABLE(CONFIG_HWMON))
return 0;
- hwmon = devm_hwmon_device_register_with_groups(&wiphy->dev,
- wiphy_name(wiphy), dev,
+ name = devm_kasprintf(&wiphy->dev, GFP_KERNEL, "mt7615_%s",
+ wiphy_name(wiphy));
+ hwmon = devm_hwmon_device_register_with_groups(&wiphy->dev, name, dev,
mt7615_hwmon_groups);
if (IS_ERR(hwmon))
return PTR_ERR(hwmon);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
index ff3f85e4087c..423f69015e3e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mac.c
@@ -755,12 +755,15 @@ int mt7615_mac_write_txwi(struct mt7615_dev *dev, __le32 *txwi,
if (info->flags & IEEE80211_TX_CTL_NO_ACK)
txwi[3] |= cpu_to_le32(MT_TXD3_NO_ACK);
- txwi[7] = FIELD_PREP(MT_TXD7_TYPE, fc_type) |
- FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype) |
- FIELD_PREP(MT_TXD7_SPE_IDX, 0x18);
- if (!is_mmio)
- txwi[8] = FIELD_PREP(MT_TXD8_L_TYPE, fc_type) |
- FIELD_PREP(MT_TXD8_L_SUB_TYPE, fc_stype);
+ val = FIELD_PREP(MT_TXD7_TYPE, fc_type) |
+ FIELD_PREP(MT_TXD7_SUB_TYPE, fc_stype) |
+ FIELD_PREP(MT_TXD7_SPE_IDX, 0x18);
+ txwi[7] = cpu_to_le32(val);
+ if (!is_mmio) {
+ val = FIELD_PREP(MT_TXD8_L_TYPE, fc_type) |
+ FIELD_PREP(MT_TXD8_L_SUB_TYPE, fc_stype);
+ txwi[8] = cpu_to_le32(val);
+ }
return 0;
}
@@ -1494,32 +1497,41 @@ out:
}
static void
-mt7615_mac_tx_free_token(struct mt7615_dev *dev, u16 token)
+mt7615_txwi_free(struct mt7615_dev *dev, struct mt76_txwi_cache *txwi)
{
struct mt76_dev *mdev = &dev->mt76;
- struct mt76_txwi_cache *txwi;
__le32 *txwi_data;
u32 val;
u8 wcid;
- trace_mac_tx_free(dev, token);
- txwi = mt76_token_put(mdev, token);
- if (!txwi)
- return;
+ mt7615_txp_skb_unmap(mdev, txwi);
+ if (!txwi->skb)
+ goto out;
txwi_data = (__le32 *)mt76_get_txwi_ptr(mdev, txwi);
val = le32_to_cpu(txwi_data[1]);
wcid = FIELD_GET(MT_TXD1_WLAN_IDX, val);
+ mt76_tx_complete_skb(mdev, wcid, txwi->skb);
- mt7615_txp_skb_unmap(mdev, txwi);
- if (txwi->skb) {
- mt76_tx_complete_skb(mdev, wcid, txwi->skb);
- txwi->skb = NULL;
- }
-
+out:
+ txwi->skb = NULL;
mt76_put_txwi(mdev, txwi);
}
+static void
+mt7615_mac_tx_free_token(struct mt7615_dev *dev, u16 token)
+{
+ struct mt76_dev *mdev = &dev->mt76;
+ struct mt76_txwi_cache *txwi;
+
+ trace_mac_tx_free(dev, token);
+ txwi = mt76_token_put(mdev, token);
+ if (!txwi)
+ return;
+
+ mt7615_txwi_free(dev, txwi);
+}
+
static void mt7615_mac_tx_free(struct mt7615_dev *dev, struct sk_buff *skb)
{
struct mt7615_tx_free *free = (struct mt7615_tx_free *)skb->data;
@@ -2014,7 +2026,7 @@ void mt7615_mac_work(struct work_struct *work)
mt7615_mutex_release(phy->dev);
- mt76_tx_status_check(mphy->dev, NULL, false);
+ mt76_tx_status_check(mphy->dev, false);
timeout = mt7615_get_macwork_timeout(phy->dev);
ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work, timeout);
@@ -2026,16 +2038,8 @@ void mt7615_tx_token_put(struct mt7615_dev *dev)
int id;
spin_lock_bh(&dev->mt76.token_lock);
- idr_for_each_entry(&dev->mt76.token, txwi, id) {
- mt7615_txp_skb_unmap(&dev->mt76, txwi);
- if (txwi->skb) {
- struct ieee80211_hw *hw;
-
- hw = mt76_tx_status_get_hw(&dev->mt76, txwi->skb);
- ieee80211_free_txskb(hw, txwi->skb);
- }
- mt76_put_txwi(&dev->mt76, txwi);
- }
+ idr_for_each_entry(&dev->mt76.token, txwi, id)
+ mt7615_txwi_free(dev, txwi);
spin_unlock_bh(&dev->mt76.token_lock);
idr_destroy(&dev->mt76.token);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
index dada43d6d879..890d9b07e156 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c
@@ -135,8 +135,6 @@ static int get_omac_idx(enum nl80211_iftype type, u64 mask)
int i;
switch (type) {
- case NL80211_IFTYPE_MESH_POINT:
- case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_STATION:
/* prefer hw bssid slot 1-3 */
i = get_free_idx(mask, HW_BSSID_1, HW_BSSID_3);
@@ -160,6 +158,8 @@ static int get_omac_idx(enum nl80211_iftype type, u64 mask)
return HW_BSSID_0;
break;
+ case NL80211_IFTYPE_ADHOC:
+ case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_MONITOR:
case NL80211_IFTYPE_AP:
/* ap uses hw bssid 0 and ext bssid */
@@ -231,6 +231,8 @@ static int mt7615_add_interface(struct ieee80211_hw *hw,
mvif->sta.wcid.idx = idx;
mvif->sta.wcid.ext_phy = mvif->mt76.band_idx;
mvif->sta.wcid.hw_key_idx = -1;
+ mt76_packet_id_init(&mvif->sta.wcid);
+
mt7615_mac_wtbl_update(dev, idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
@@ -281,6 +283,8 @@ static void mt7615_remove_interface(struct ieee80211_hw *hw,
if (!list_empty(&msta->poll_list))
list_del_init(&msta->poll_list);
spin_unlock_bh(&dev->sta_poll_lock);
+
+ mt76_packet_id_flush(&dev->mt76, &mvif->sta.wcid);
}
static void mt7615_init_dfs_state(struct mt7615_phy *phy)
@@ -567,8 +571,8 @@ static void mt7615_bss_info_changed(struct ieee80211_hw *hw,
mt7615_mcu_add_bss_info(phy, vif, NULL, true);
mt7615_mcu_sta_add(phy, vif, NULL, true);
- if (vif->p2p)
- mt7615_mcu_set_p2p_oppps(hw, vif);
+ if (mt7615_firmware_offload(dev) && vif->p2p)
+ mt76_connac_mcu_set_p2p_oppps(hw, vif);
}
if (changed & (BSS_CHANGED_BEACON |
@@ -858,8 +862,6 @@ mt7615_get_stats(struct ieee80211_hw *hw,
stats->dot11FCSErrorCount = mib->fcs_err_cnt;
stats->dot11ACKFailureCount = mib->ack_fail_cnt;
- memset(mib, 0, sizeof(*mib));
-
mt7615_mutex_release(phy->dev);
return 0;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
index f8a09692d3e4..25f9cbe2cd61 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c
@@ -808,7 +808,8 @@ mt7615_mcu_ctrl_pm_state(struct mt7615_dev *dev, int band, int state)
static int
mt7615_mcu_bss_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, bool enable)
+ struct ieee80211_sta *sta, struct mt7615_phy *phy,
+ bool enable)
{
struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
u32 type = vif->p2p ? NETWORK_P2P : NETWORK_INFRA;
@@ -821,6 +822,7 @@ mt7615_mcu_bss_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
switch (vif->type) {
case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_MONITOR:
break;
case NL80211_IFTYPE_STATION:
/* TODO: enable BSS_INFO_UAPSD & BSS_INFO_PM */
@@ -840,14 +842,19 @@ mt7615_mcu_bss_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
}
bss = (struct bss_info_basic *)tlv;
- memcpy(bss->bssid, vif->bss_conf.bssid, ETH_ALEN);
- bss->bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int);
bss->network_type = cpu_to_le32(type);
- bss->dtim_period = vif->bss_conf.dtim_period;
bss->bmc_tx_wlan_idx = wlan_idx;
bss->wmm_idx = mvif->mt76.wmm_idx;
bss->active = enable;
+ if (vif->type != NL80211_IFTYPE_MONITOR) {
+ memcpy(bss->bssid, vif->bss_conf.bssid, ETH_ALEN);
+ bss->bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int);
+ bss->dtim_period = vif->bss_conf.dtim_period;
+ } else {
+ memcpy(bss->bssid, phy->mt76->macaddr, ETH_ALEN);
+ }
+
return 0;
}
@@ -863,6 +870,7 @@ mt7615_mcu_bss_omac_tlv(struct sk_buff *skb, struct ieee80211_vif *vif)
tlv = mt76_connac_mcu_add_tlv(skb, BSS_INFO_OMAC, sizeof(*omac));
switch (vif->type) {
+ case NL80211_IFTYPE_MONITOR:
case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_AP:
if (vif->p2p)
@@ -929,7 +937,7 @@ mt7615_mcu_add_bss(struct mt7615_phy *phy, struct ieee80211_vif *vif,
if (enable)
mt7615_mcu_bss_omac_tlv(skb, vif);
- mt7615_mcu_bss_basic_tlv(skb, vif, sta, enable);
+ mt7615_mcu_bss_basic_tlv(skb, vif, sta, phy, enable);
if (enable && mvif->mt76.omac_idx >= EXT_BSSID_START &&
mvif->mt76.omac_idx < REPEATER_BSSID_START)
@@ -1690,6 +1698,19 @@ int mt7615_mcu_fw_log_2_host(struct mt7615_dev *dev, u8 ctrl)
sizeof(data), true);
}
+static int mt7615_mcu_cal_cache_apply(struct mt7615_dev *dev)
+{
+ struct {
+ bool cache_enable;
+ u8 pad[3];
+ } data = {
+ .cache_enable = true
+ };
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_CAL_CACHE, &data,
+ sizeof(data), false);
+}
+
static int mt7663_load_n9(struct mt7615_dev *dev, const char *name)
{
u32 offset = 0, override_addr = 0, flag = FW_START_DLYCAL;
@@ -1898,9 +1919,14 @@ int mt7615_mcu_init(struct mt7615_dev *dev)
mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_FWDL], false);
dev_dbg(dev->mt76.dev, "Firmware init done\n");
set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
- mt7615_mcu_fw_log_2_host(dev, 0);
- return 0;
+ if (dev->dbdc_support) {
+ ret = mt7615_mcu_cal_cache_apply(dev);
+ if (ret)
+ return ret;
+ }
+
+ return mt7615_mcu_fw_log_2_host(dev, 0);
}
EXPORT_SYMBOL_GPL(mt7615_mcu_init);
@@ -2761,53 +2787,3 @@ int mt7615_mcu_set_roc(struct mt7615_phy *phy, struct ieee80211_vif *vif,
return mt76_mcu_send_msg(&dev->mt76, MCU_CMD_SET_ROC, &req,
sizeof(req), false);
}
-
-int mt7615_mcu_set_p2p_oppps(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
-{
- struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv;
- int ct_window = vif->bss_conf.p2p_noa_attr.oppps_ctwindow;
- struct mt7615_dev *dev = mt7615_hw_dev(hw);
- struct {
- __le32 ct_win;
- u8 bss_idx;
- u8 rsv[3];
- } __packed req = {
- .ct_win = cpu_to_le32(ct_window),
- .bss_idx = mvif->mt76.idx,
- };
-
- if (!mt7615_firmware_offload(dev))
- return -ENOTSUPP;
-
- return mt76_mcu_send_msg(&dev->mt76, MCU_CMD_SET_P2P_OPPPS, &req,
- sizeof(req), false);
-}
-
-u32 mt7615_mcu_reg_rr(struct mt76_dev *dev, u32 offset)
-{
- struct {
- __le32 addr;
- __le32 val;
- } __packed req = {
- .addr = cpu_to_le32(offset),
- };
-
- return mt76_mcu_send_msg(dev, MCU_CMD_REG_READ, &req, sizeof(req),
- true);
-}
-EXPORT_SYMBOL_GPL(mt7615_mcu_reg_rr);
-
-void mt7615_mcu_reg_wr(struct mt76_dev *dev, u32 offset, u32 val)
-{
- struct {
- __le32 addr;
- __le32 val;
- } __packed req = {
- .addr = cpu_to_le32(offset),
- .val = cpu_to_le32(val),
- };
-
- mt76_mcu_send_msg(dev, MCU_CMD_REG_WRITE, &req, sizeof(req), false);
-}
-EXPORT_SYMBOL_GPL(mt7615_mcu_reg_wr);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
index d0c64a9b09cf..6ff6d5800918 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/mt7615.h
@@ -107,6 +107,18 @@ struct mt7615_wtbl_rate_desc {
struct mt7615_sta *sta;
};
+struct mt7663s_intr {
+ u32 isr;
+ struct {
+ u32 wtqcr[8];
+ } tx;
+ struct {
+ u16 num[2];
+ u16 len[2][16];
+ } rx;
+ u32 rec_mb[2];
+} __packed;
+
struct mt7615_sta {
struct mt76_wcid wcid; /* must be first */
@@ -541,8 +553,6 @@ int mt7615_mcu_apply_rx_dcoc(struct mt7615_phy *phy);
int mt7615_mcu_apply_tx_dpd(struct mt7615_phy *phy);
int mt7615_dfs_init_radar_detector(struct mt7615_phy *phy);
-int mt7615_mcu_set_p2p_oppps(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif);
int mt7615_mcu_set_roc(struct mt7615_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_channel *chan, int duration);
@@ -555,8 +565,6 @@ int mt7615_mac_set_beacon_filter(struct mt7615_phy *phy,
int mt7615_mcu_set_bss_pm(struct mt7615_dev *dev, struct ieee80211_vif *vif,
bool enable);
int __mt7663_load_firmware(struct mt7615_dev *dev);
-u32 mt7615_mcu_reg_rr(struct mt76_dev *dev, u32 offset);
-void mt7615_mcu_reg_wr(struct mt76_dev *dev, u32 offset, u32 val);
void mt7615_coredump_work(struct work_struct *work);
void mt7622_trigger_hif_int(struct mt7615_dev *dev, bool en);
@@ -573,10 +581,6 @@ int mt7663_usb_sdio_register_device(struct mt7615_dev *dev);
int mt7663u_mcu_init(struct mt7615_dev *dev);
/* sdio */
-u32 mt7663s_read_pcr(struct mt7615_dev *dev);
int mt7663s_mcu_init(struct mt7615_dev *dev);
-void mt7663s_txrx_worker(struct mt76_worker *w);
-void mt7663s_rx_work(struct work_struct *work);
-void mt7663s_sdio_irq(struct sdio_func *func);
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci.c
index 11f169cdd603..b808248943ea 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci.c
@@ -39,7 +39,7 @@ static int mt7615_pci_probe(struct pci_dev *pdev,
if (ret < 0)
return ret;
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (ret)
goto error;
@@ -164,12 +164,14 @@ static int mt7615_pci_resume(struct pci_dev *pdev)
dev_err(mdev->dev, "PDMA engine must be reinitialized\n");
mt76_worker_enable(&mdev->tx_worker);
+ local_bh_disable();
mt76_for_each_q_rx(mdev, i) {
napi_enable(&mdev->napi[i]);
napi_schedule(&mdev->napi[i]);
}
napi_enable(&mdev->tx_napi);
napi_schedule(&mdev->tx_napi);
+ local_bh_enable();
if (!test_bit(MT76_STATE_SUSPEND, &dev->mphy.state) &&
mt7615_firmware_offload(dev))
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
index da87c02a73eb..5ee52cd70a4b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/pci_mac.c
@@ -198,7 +198,7 @@ void mt7615_dma_reset(struct mt7615_dev *dev)
mt76_for_each_q_rx(&dev->mt76, i)
mt76_queue_rx_reset(dev, i);
- mt76_tx_status_check(&dev->mt76, NULL, true);
+ mt76_tx_status_check(&dev->mt76, true);
mt7615_dma_start(dev);
}
@@ -326,6 +326,8 @@ void mt7615_mac_reset_work(struct work_struct *work)
clear_bit(MT76_RESET, &phy2->mt76->state);
mt76_worker_enable(&dev->mt76.tx_worker);
+
+ local_bh_disable();
napi_enable(&dev->mt76.tx_napi);
napi_schedule(&dev->mt76.tx_napi);
@@ -334,6 +336,7 @@ void mt7615_mac_reset_work(struct work_struct *work)
napi_enable(&dev->mt76.napi[1]);
napi_schedule(&dev->mt76.napi[1]);
+ local_bh_enable();
ieee80211_wake_queues(mt76_hw(dev));
if (ext_phy)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
index 305bb8597531..31c4a76b7f91 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio.c
@@ -14,8 +14,8 @@
#include <linux/mmc/sdio_ids.h>
#include <linux/mmc/sdio_func.h>
+#include "../sdio.h"
#include "mt7615.h"
-#include "sdio.h"
#include "mac.h"
#include "mcu.h"
@@ -24,200 +24,19 @@ static const struct sdio_device_id mt7663s_table[] = {
{ } /* Terminating entry */
};
-static u32 mt7663s_read_whisr(struct mt76_dev *dev)
+static void mt7663s_txrx_worker(struct mt76_worker *w)
{
- return sdio_readl(dev->sdio.func, MCR_WHISR, NULL);
-}
-
-u32 mt7663s_read_pcr(struct mt7615_dev *dev)
-{
- struct mt76_sdio *sdio = &dev->mt76.sdio;
-
- return sdio_readl(sdio->func, MCR_WHLPCR, NULL);
-}
-
-static u32 mt7663s_read_mailbox(struct mt76_dev *dev, u32 offset)
-{
- struct sdio_func *func = dev->sdio.func;
- u32 val = ~0, status;
- int err;
-
- sdio_claim_host(func);
-
- sdio_writel(func, offset, MCR_H2DSM0R, &err);
- if (err < 0) {
- dev_err(dev->dev, "failed setting address [err=%d]\n", err);
- goto out;
- }
-
- sdio_writel(func, H2D_SW_INT_READ, MCR_WSICR, &err);
- if (err < 0) {
- dev_err(dev->dev, "failed setting read mode [err=%d]\n", err);
- goto out;
- }
-
- err = readx_poll_timeout(mt7663s_read_whisr, dev, status,
- status & H2D_SW_INT_READ, 0, 1000000);
- if (err < 0) {
- dev_err(dev->dev, "query whisr timeout\n");
- goto out;
- }
-
- sdio_writel(func, H2D_SW_INT_READ, MCR_WHISR, &err);
- if (err < 0) {
- dev_err(dev->dev, "failed setting read mode [err=%d]\n", err);
- goto out;
- }
-
- val = sdio_readl(func, MCR_H2DSM0R, &err);
- if (err < 0) {
- dev_err(dev->dev, "failed reading h2dsm0r [err=%d]\n", err);
- goto out;
- }
-
- if (val != offset) {
- dev_err(dev->dev, "register mismatch\n");
- val = ~0;
- goto out;
- }
-
- val = sdio_readl(func, MCR_D2HRM1R, &err);
- if (err < 0)
- dev_err(dev->dev, "failed reading d2hrm1r [err=%d]\n", err);
-
-out:
- sdio_release_host(func);
-
- return val;
-}
-
-static void mt7663s_write_mailbox(struct mt76_dev *dev, u32 offset, u32 val)
-{
- struct sdio_func *func = dev->sdio.func;
- u32 status;
- int err;
-
- sdio_claim_host(func);
-
- sdio_writel(func, offset, MCR_H2DSM0R, &err);
- if (err < 0) {
- dev_err(dev->dev, "failed setting address [err=%d]\n", err);
- goto out;
- }
-
- sdio_writel(func, val, MCR_H2DSM1R, &err);
- if (err < 0) {
- dev_err(dev->dev,
- "failed setting write value [err=%d]\n", err);
- goto out;
- }
-
- sdio_writel(func, H2D_SW_INT_WRITE, MCR_WSICR, &err);
- if (err < 0) {
- dev_err(dev->dev, "failed setting write mode [err=%d]\n", err);
- goto out;
- }
-
- err = readx_poll_timeout(mt7663s_read_whisr, dev, status,
- status & H2D_SW_INT_WRITE, 0, 1000000);
- if (err < 0) {
- dev_err(dev->dev, "query whisr timeout\n");
- goto out;
- }
-
- sdio_writel(func, H2D_SW_INT_WRITE, MCR_WHISR, &err);
- if (err < 0) {
- dev_err(dev->dev, "failed setting write mode [err=%d]\n", err);
- goto out;
- }
-
- val = sdio_readl(func, MCR_H2DSM0R, &err);
- if (err < 0) {
- dev_err(dev->dev, "failed reading h2dsm0r [err=%d]\n", err);
- goto out;
- }
-
- if (val != offset)
- dev_err(dev->dev, "register mismatch\n");
-
-out:
- sdio_release_host(func);
-}
-
-static u32 mt7663s_rr(struct mt76_dev *dev, u32 offset)
-{
- if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state))
- return dev->mcu_ops->mcu_rr(dev, offset);
- else
- return mt7663s_read_mailbox(dev, offset);
-}
-
-static void mt7663s_wr(struct mt76_dev *dev, u32 offset, u32 val)
-{
- if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state))
- dev->mcu_ops->mcu_wr(dev, offset, val);
- else
- mt7663s_write_mailbox(dev, offset, val);
-}
+ struct mt76_sdio *sdio = container_of(w, struct mt76_sdio,
+ txrx_worker);
+ struct mt76_dev *mdev = container_of(sdio, struct mt76_dev, sdio);
+ struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
-static u32 mt7663s_rmw(struct mt76_dev *dev, u32 offset, u32 mask, u32 val)
-{
- val |= mt7663s_rr(dev, offset) & ~mask;
- mt7663s_wr(dev, offset, val);
-
- return val;
-}
-
-static void mt7663s_write_copy(struct mt76_dev *dev, u32 offset,
- const void *data, int len)
-{
- const u32 *val = data;
- int i;
-
- for (i = 0; i < len / sizeof(u32); i++) {
- mt7663s_wr(dev, offset, val[i]);
- offset += sizeof(u32);
- }
-}
-
-static void mt7663s_read_copy(struct mt76_dev *dev, u32 offset,
- void *data, int len)
-{
- u32 *val = data;
- int i;
-
- for (i = 0; i < len / sizeof(u32); i++) {
- val[i] = mt7663s_rr(dev, offset);
- offset += sizeof(u32);
- }
-}
-
-static int mt7663s_wr_rp(struct mt76_dev *dev, u32 base,
- const struct mt76_reg_pair *data,
- int len)
-{
- int i;
-
- for (i = 0; i < len; i++) {
- mt7663s_wr(dev, data->reg, data->value);
- data++;
- }
-
- return 0;
-}
-
-static int mt7663s_rd_rp(struct mt76_dev *dev, u32 base,
- struct mt76_reg_pair *data,
- int len)
-{
- int i;
-
- for (i = 0; i < len; i++) {
- data->value = mt7663s_rr(dev, data->reg);
- data++;
+ if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) {
+ queue_work(mdev->wq, &dev->pm.wake_work);
+ return;
}
-
- return 0;
+ mt76s_txrx_worker(sdio);
+ mt76_connac_pm_unref(&dev->mphy, &dev->pm);
}
static void mt7663s_init_work(struct work_struct *work)
@@ -231,64 +50,24 @@ static void mt7663s_init_work(struct work_struct *work)
mt7615_init_work(dev);
}
-static int mt7663s_hw_init(struct mt7615_dev *dev, struct sdio_func *func)
+static int mt7663s_parse_intr(struct mt76_dev *dev, struct mt76s_intr *intr)
{
- u32 status, ctrl;
- int ret;
-
- sdio_claim_host(func);
-
- ret = sdio_enable_func(func);
- if (ret < 0)
- goto release;
+ struct mt76_sdio *sdio = &dev->sdio;
+ struct mt7663s_intr *irq_data = sdio->intr_data;
+ int i, err;
- /* Get ownership from the device */
- sdio_writel(func, WHLPCR_INT_EN_CLR | WHLPCR_FW_OWN_REQ_CLR,
- MCR_WHLPCR, &ret);
- if (ret < 0)
- goto disable_func;
-
- ret = readx_poll_timeout(mt7663s_read_pcr, dev, status,
- status & WHLPCR_IS_DRIVER_OWN, 2000, 1000000);
- if (ret < 0) {
- dev_err(dev->mt76.dev, "Cannot get ownership from device");
- goto disable_func;
- }
-
- ret = sdio_set_block_size(func, 512);
- if (ret < 0)
- goto disable_func;
-
- /* Enable interrupt */
- sdio_writel(func, WHLPCR_INT_EN_SET, MCR_WHLPCR, &ret);
- if (ret < 0)
- goto disable_func;
-
- ctrl = WHIER_RX0_DONE_INT_EN | WHIER_TX_DONE_INT_EN;
- sdio_writel(func, ctrl, MCR_WHIER, &ret);
- if (ret < 0)
- goto disable_func;
-
- /* set WHISR as read clear and Rx aggregation number as 16 */
- ctrl = FIELD_PREP(MAX_HIF_RX_LEN_NUM, 16);
- sdio_writel(func, ctrl, MCR_WHCR, &ret);
- if (ret < 0)
- goto disable_func;
-
- ret = sdio_claim_irq(func, mt7663s_sdio_irq);
- if (ret < 0)
- goto disable_func;
+ err = sdio_readsb(sdio->func, irq_data, MCR_WHISR, sizeof(*irq_data));
+ if (err)
+ return err;
- sdio_release_host(func);
+ intr->isr = irq_data->isr;
+ intr->rec_mb = irq_data->rec_mb;
+ intr->tx.wtqcr = irq_data->tx.wtqcr;
+ intr->rx.num = irq_data->rx.num;
+ for (i = 0; i < 2 ; i++)
+ intr->rx.len[i] = irq_data->rx.len[i];
return 0;
-
-disable_func:
- sdio_disable_func(func);
-release:
- sdio_release_host(func);
-
- return ret;
}
static int mt7663s_probe(struct sdio_func *func,
@@ -307,13 +86,13 @@ static int mt7663s_probe(struct sdio_func *func,
.update_survey = mt7615_update_channel,
};
static const struct mt76_bus_ops mt7663s_ops = {
- .rr = mt7663s_rr,
- .rmw = mt7663s_rmw,
- .wr = mt7663s_wr,
- .write_copy = mt7663s_write_copy,
- .read_copy = mt7663s_read_copy,
- .wr_rp = mt7663s_wr_rp,
- .rd_rp = mt7663s_rd_rp,
+ .rr = mt76s_rr,
+ .rmw = mt76s_rmw,
+ .wr = mt76s_wr,
+ .write_copy = mt76s_write_copy,
+ .read_copy = mt76s_read_copy,
+ .wr_rp = mt76s_wr_rp,
+ .rd_rp = mt76s_rd_rp,
.type = MT76_BUS_SDIO,
};
struct ieee80211_ops *ops;
@@ -341,7 +120,7 @@ static int mt7663s_probe(struct sdio_func *func,
if (ret < 0)
goto error;
- ret = mt7663s_hw_init(dev, func);
+ ret = mt76s_hw_init(mdev, func, MT76_CONNAC_SDIO);
if (ret)
goto error;
@@ -349,8 +128,9 @@ static int mt7663s_probe(struct sdio_func *func,
(mt76_rr(dev, MT_HW_REV) & 0xff);
dev_dbg(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
+ mdev->sdio.parse_irq = mt7663s_parse_intr;
mdev->sdio.intr_data = devm_kmalloc(mdev->dev,
- sizeof(struct mt76s_intr),
+ sizeof(struct mt7663s_intr),
GFP_KERNEL);
if (!mdev->sdio.intr_data) {
ret = -ENOMEM;
@@ -367,7 +147,11 @@ static int mt7663s_probe(struct sdio_func *func,
}
}
- ret = mt76s_alloc_queues(&dev->mt76);
+ ret = mt76s_alloc_rx_queue(mdev, MT_RXQ_MAIN);
+ if (ret)
+ goto error;
+
+ ret = mt76s_alloc_tx(mdev);
if (ret)
goto error;
@@ -432,7 +216,7 @@ static int mt7663s_suspend(struct device *dev)
cancel_work_sync(&mdev->mt76.sdio.stat_work);
clear_bit(MT76_READING_STATS, &mdev->mphy.state);
- mt76_tx_status_check(&mdev->mt76, NULL, true);
+ mt76_tx_status_check(&mdev->mt76, true);
return 0;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c
index 45c1cd3b9f49..dc9a2f0b45a5 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_mcu.c
@@ -10,11 +10,11 @@
#include <linux/module.h>
#include <linux/iopoll.h>
+#include "../sdio.h"
#include "mt7615.h"
#include "mac.h"
#include "mcu.h"
#include "regs.h"
-#include "sdio.h"
static int mt7663s_mcu_init_sched(struct mt7615_dev *dev)
{
@@ -27,6 +27,7 @@ static int mt7663s_mcu_init_sched(struct mt7615_dev *dev)
MT_HIF1_MIN_QUOTA);
sdio->sched.ple_data_quota = mt76_get_field(dev, MT_PLE_PG_HIF0_GROUP,
MT_HIF0_MIN_QUOTA);
+ sdio->sched.pse_page_size = MT_PSE_PAGE_SZ;
txdwcnt = mt76_get_field(dev, MT_PP_TXDWCNT,
MT_PP_TXDWCNT_TX1_ADD_DW_CNT);
sdio->sched.deficit = txdwcnt << 2;
@@ -63,7 +64,7 @@ static int __mt7663s_mcu_drv_pmctrl(struct mt7615_dev *dev)
sdio_writel(func, WHLPCR_FW_OWN_REQ_CLR, MCR_WHLPCR, NULL);
- ret = readx_poll_timeout(mt7663s_read_pcr, dev, status,
+ ret = readx_poll_timeout(mt76s_read_pcr, &dev->mt76, status,
status & WHLPCR_IS_DRIVER_OWN, 2000, 1000000);
if (ret < 0) {
dev_err(dev->mt76.dev, "Cannot get ownership from device");
@@ -111,7 +112,7 @@ static int mt7663s_mcu_fw_pmctrl(struct mt7615_dev *dev)
sdio_writel(func, WHLPCR_FW_OWN_REQ_SET, MCR_WHLPCR, NULL);
- ret = readx_poll_timeout(mt7663s_read_pcr, dev, status,
+ ret = readx_poll_timeout(mt76s_read_pcr, &dev->mt76, status,
!(status & WHLPCR_IS_DRIVER_OWN), 2000, 1000000);
if (ret < 0) {
dev_err(dev->mt76.dev, "Cannot set ownership to device");
@@ -137,8 +138,8 @@ int mt7663s_mcu_init(struct mt7615_dev *dev)
.mcu_skb_send_msg = mt7663s_mcu_send_message,
.mcu_parse_response = mt7615_mcu_parse_response,
.mcu_restart = mt7615_mcu_restart,
- .mcu_rr = mt7615_mcu_reg_rr,
- .mcu_wr = mt7615_mcu_reg_wr,
+ .mcu_rr = mt76_connac_mcu_reg_rr,
+ .mcu_wr = mt76_connac_mcu_reg_wr,
};
struct mt7615_mcu_ops *mcu_ops;
int ret;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
index 996d48cca18a..bd2939ebcbf4 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb_sdio.c
@@ -169,7 +169,7 @@ bool mt7663_usb_sdio_tx_status_data(struct mt76_dev *mdev, u8 *update)
mt7615_mac_sta_poll(dev);
mt7615_mutex_release(dev);
- return 0;
+ return false;
}
EXPORT_SYMBOL_GPL(mt7663_usb_sdio_tx_status_data);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac.h b/drivers/net/wireless/mediatek/mt76/mt76_connac.h
index f49d97d0a1c5..e7f01c2978a2 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac.h
@@ -85,9 +85,14 @@ struct mt76_connac_coredump {
extern const struct wiphy_wowlan_support mt76_connac_wowlan_support;
+static inline bool is_mt7922(struct mt76_dev *dev)
+{
+ return mt76_chip(dev) == 0x7922;
+}
+
static inline bool is_mt7921(struct mt76_dev *dev)
{
- return mt76_chip(dev) == 0x7961;
+ return mt76_chip(dev) == 0x7961 || is_mt7922(dev);
}
static inline bool is_mt7663(struct mt76_dev *dev)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
index 5c3a81e5f559..26b4b875dcc0 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c
@@ -74,7 +74,7 @@ EXPORT_SYMBOL_GPL(mt76_connac_mcu_init_download);
int mt76_connac_mcu_set_channel_domain(struct mt76_phy *phy)
{
- struct mt76_dev *dev = phy->dev;
+ int len, i, n_max_channels, n_2ch = 0, n_5ch = 0, n_6ch = 0;
struct mt76_connac_mcu_channel_domain {
u8 alpha2[4]; /* regulatory_request.alpha2 */
u8 bw_2g; /* BW_20_40M 0
@@ -84,25 +84,29 @@ int mt76_connac_mcu_set_channel_domain(struct mt76_phy *phy)
* BW_20_40_80_8080M 4
*/
u8 bw_5g;
- __le16 pad;
+ u8 bw_6g;
+ u8 pad;
u8 n_2ch;
u8 n_5ch;
- __le16 pad2;
+ u8 n_6ch;
+ u8 pad2;
} __packed hdr = {
.bw_2g = 0,
- .bw_5g = 3,
+ .bw_5g = 3, /* BW_20_40_80_160M */
+ .bw_6g = 3,
};
struct mt76_connac_mcu_chan {
__le16 hw_value;
__le16 pad;
__le32 flags;
} __packed channel;
- int len, i, n_max_channels, n_2ch = 0, n_5ch = 0;
+ struct mt76_dev *dev = phy->dev;
struct ieee80211_channel *chan;
struct sk_buff *skb;
n_max_channels = phy->sband_2g.sband.n_channels +
- phy->sband_5g.sband.n_channels;
+ phy->sband_5g.sband.n_channels +
+ phy->sband_6g.sband.n_channels;
len = sizeof(hdr) + n_max_channels * sizeof(channel);
skb = mt76_mcu_msg_alloc(dev, NULL, len);
@@ -135,11 +139,24 @@ int mt76_connac_mcu_set_channel_domain(struct mt76_phy *phy)
skb_put_data(skb, &channel, sizeof(channel));
n_5ch++;
}
+ for (i = 0; i < phy->sband_6g.sband.n_channels; i++) {
+ chan = &phy->sband_6g.sband.channels[i];
+ if (chan->flags & IEEE80211_CHAN_DISABLED)
+ continue;
+
+ channel.hw_value = cpu_to_le16(chan->hw_value);
+ channel.flags = cpu_to_le32(chan->flags);
+ channel.pad = 0;
+
+ skb_put_data(skb, &channel, sizeof(channel));
+ n_6ch++;
+ }
BUILD_BUG_ON(sizeof(dev->alpha2) > sizeof(hdr.alpha2));
memcpy(hdr.alpha2, dev->alpha2, sizeof(dev->alpha2));
hdr.n_2ch = n_2ch;
hdr.n_5ch = n_5ch;
+ hdr.n_6ch = n_6ch;
memcpy(__skb_push(skb, sizeof(hdr)), &hdr, sizeof(hdr));
@@ -689,9 +706,9 @@ mt76_connac_get_phy_mode_v2(struct mt76_phy *mphy, struct ieee80211_vif *vif,
if (ht_cap->ht_supported)
mode |= PHY_TYPE_BIT_HT;
- if (he_cap->has_he)
+ if (he_cap && he_cap->has_he)
mode |= PHY_TYPE_BIT_HE;
- } else if (band == NL80211_BAND_5GHZ) {
+ } else if (band == NL80211_BAND_5GHZ || band == NL80211_BAND_6GHZ) {
mode |= PHY_TYPE_BIT_OFDM;
if (ht_cap->ht_supported)
@@ -700,7 +717,7 @@ mt76_connac_get_phy_mode_v2(struct mt76_phy *mphy, struct ieee80211_vif *vif,
if (vht_cap->vht_supported)
mode |= PHY_TYPE_BIT_VHT;
- if (he_cap->has_he)
+ if (he_cap && he_cap->has_he)
mode |= PHY_TYPE_BIT_HE;
}
@@ -719,6 +736,7 @@ void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb,
struct sta_rec_state *state;
struct sta_rec_phy *phy;
struct tlv *tlv;
+ u16 supp_rates;
/* starec ht */
if (sta->ht_cap.ht_supported) {
@@ -748,12 +766,22 @@ void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb,
if (!is_mt7921(dev))
return;
- if (sta->ht_cap.ht_supported)
+ if (sta->ht_cap.ht_supported || sta->he_cap.has_he)
mt76_connac_mcu_sta_amsdu_tlv(skb, sta, vif);
/* starec he */
- if (sta->he_cap.has_he)
+ if (sta->he_cap.has_he) {
mt76_connac_mcu_sta_he_tlv(skb, sta);
+ if (band == NL80211_BAND_6GHZ &&
+ sta_state == MT76_STA_INFO_STATE_ASSOC) {
+ struct sta_rec_he_6g_capa *he_6g_capa;
+
+ tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HE_6G,
+ sizeof(*he_6g_capa));
+ he_6g_capa = (struct sta_rec_he_6g_capa *)tlv;
+ he_6g_capa->capa = sta->he_6ghz_capa.capa;
+ }
+ }
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_PHY, sizeof(*phy));
phy = (struct sta_rec_phy *)tlv;
@@ -767,7 +795,15 @@ void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb,
tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_RA, sizeof(*ra_info));
ra_info = (struct sta_rec_ra_info *)tlv;
- ra_info->legacy = cpu_to_le16((u16)sta->supp_rates[band]);
+
+ supp_rates = sta->supp_rates[band];
+ if (band == NL80211_BAND_2GHZ)
+ supp_rates = FIELD_PREP(RA_LEGACY_OFDM, supp_rates >> 4) |
+ FIELD_PREP(RA_LEGACY_CCK, supp_rates & 0xf);
+ else
+ supp_rates = FIELD_PREP(RA_LEGACY_OFDM, supp_rates);
+
+ ra_info->legacy = cpu_to_le16(supp_rates);
if (sta->ht_cap.ht_supported)
memcpy(ra_info->rx_mcs_bitmask, sta->ht_cap.mcs.rx_mask,
@@ -1145,7 +1181,7 @@ mt76_connac_get_phy_mode(struct mt76_phy *phy, struct ieee80211_vif *vif,
if (he_cap->has_he)
mode |= PHY_MODE_AX_24G;
- } else if (band == NL80211_BAND_5GHZ) {
+ } else if (band == NL80211_BAND_5GHZ || band == NL80211_BAND_6GHZ) {
mode |= PHY_MODE_A;
if (ht_cap->ht_supported)
@@ -1154,8 +1190,12 @@ mt76_connac_get_phy_mode(struct mt76_phy *phy, struct ieee80211_vif *vif,
if (vht_cap->vht_supported)
mode |= PHY_MODE_AC;
- if (he_cap->has_he)
- mode |= PHY_MODE_AX_5G;
+ if (he_cap->has_he) {
+ if (band == NL80211_BAND_6GHZ)
+ mode |= PHY_MODE_AX_6G;
+ else
+ mode |= PHY_MODE_AX_5G;
+ }
}
return mode;
@@ -1252,7 +1292,8 @@ int mt76_connac_mcu_uni_add_bss(struct mt76_phy *phy,
u8 short_st;
u8 ht_op_info;
u8 sco;
- u8 pad[3];
+ u8 band;
+ u8 pad[2];
} __packed rlm;
} __packed rlm_req = {
.hdr = {
@@ -1268,13 +1309,19 @@ int mt76_connac_mcu_uni_add_bss(struct mt76_phy *phy,
.ht_op_info = 4, /* set HT 40M allowed */
.rx_streams = phy->chainmask,
.short_st = true,
+ .band = band,
},
};
int err, conn_type;
- u8 idx;
+ u8 idx, basic_phy;
idx = mvif->omac_idx > EXT_BSSID_START ? HW_BSSID_0 : mvif->omac_idx;
basic_req.basic.hw_bss_idx = idx;
+ if (band == NL80211_BAND_6GHZ)
+ basic_req.basic.phymode_ext = BIT(0);
+
+ basic_phy = mt76_connac_get_phy_mode_v2(phy, vif, band, NULL);
+ basic_req.basic.nonht_basic_phy = cpu_to_le16(basic_phy);
switch (vif->type) {
case NL80211_IFTYPE_MESH_POINT:
@@ -1445,7 +1492,17 @@ int mt76_connac_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif,
else
chan = &req->channels[i];
- chan->band = scan_list[i]->band == NL80211_BAND_2GHZ ? 1 : 2;
+ switch (scan_list[i]->band) {
+ case NL80211_BAND_2GHZ:
+ chan->band = 1;
+ break;
+ case NL80211_BAND_6GHZ:
+ chan->band = 3;
+ break;
+ default:
+ chan->band = 2;
+ break;
+ }
chan->channel_num = scan_list[i]->hw_value;
}
req->channel_type = sreq->n_channels ? 4 : 0;
@@ -1531,8 +1588,10 @@ int mt76_connac_mcu_sched_scan_req(struct mt76_phy *phy,
get_random_mask_addr(addr, sreq->mac_addr,
sreq->mac_addr_mask);
}
- if (is_mt7921(phy->dev))
+ if (is_mt7921(phy->dev)) {
req->mt7921.bss_idx = mvif->idx;
+ req->mt7921.delay = cpu_to_le32(sreq->delay);
+ }
req->ssids_num = sreq->n_ssids;
for (i = 0; i < req->ssids_num; i++) {
@@ -1554,7 +1613,18 @@ int mt76_connac_mcu_sched_scan_req(struct mt76_phy *phy,
req->channels_num = min_t(u8, sreq->n_channels, 64);
for (i = 0; i < req->channels_num; i++) {
chan = &req->channels[i];
- chan->band = scan_list[i]->band == NL80211_BAND_2GHZ ? 1 : 2;
+
+ switch (scan_list[i]->band) {
+ case NL80211_BAND_2GHZ:
+ chan->band = 1;
+ break;
+ case NL80211_BAND_6GHZ:
+ chan->band = 3;
+ break;
+ default:
+ chan->band = 2;
+ break;
+ }
chan->channel_num = scan_list[i]->hw_value;
}
@@ -1652,6 +1722,61 @@ void mt76_connac_mcu_coredump_event(struct mt76_dev *dev, struct sk_buff *skb,
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_coredump_event);
+static void mt76_connac_mcu_parse_tx_resource(struct mt76_dev *dev,
+ struct sk_buff *skb)
+{
+ struct mt76_sdio *sdio = &dev->sdio;
+ struct mt76_connac_tx_resource {
+ __le32 version;
+ __le32 pse_data_quota;
+ __le32 pse_mcu_quota;
+ __le32 ple_data_quota;
+ __le32 ple_mcu_quota;
+ __le16 pse_page_size;
+ __le16 ple_page_size;
+ u8 pp_padding;
+ u8 pad[3];
+ } __packed * tx_res;
+
+ tx_res = (struct mt76_connac_tx_resource *)skb->data;
+ sdio->sched.pse_data_quota = le32_to_cpu(tx_res->pse_data_quota);
+ sdio->sched.pse_mcu_quota = le32_to_cpu(tx_res->pse_mcu_quota);
+ sdio->sched.ple_data_quota = le32_to_cpu(tx_res->ple_data_quota);
+ sdio->sched.pse_page_size = le16_to_cpu(tx_res->pse_page_size);
+ sdio->sched.deficit = tx_res->pp_padding;
+}
+
+static void mt76_connac_mcu_parse_phy_cap(struct mt76_dev *dev,
+ struct sk_buff *skb)
+{
+ struct mt76_connac_phy_cap {
+ u8 ht;
+ u8 vht;
+ u8 _5g;
+ u8 max_bw;
+ u8 nss;
+ u8 dbdc;
+ u8 tx_ldpc;
+ u8 rx_ldpc;
+ u8 tx_stbc;
+ u8 rx_stbc;
+ u8 hw_path;
+ u8 he;
+ } __packed * cap;
+
+ enum {
+ WF0_24G,
+ WF0_5G
+ };
+
+ cap = (struct mt76_connac_phy_cap *)skb->data;
+
+ dev->phy.antenna_mask = BIT(cap->nss) - 1;
+ dev->phy.chainmask = dev->phy.antenna_mask;
+ dev->phy.cap.has_2ghz = cap->hw_path & BIT(WF0_24G);
+ dev->phy.cap.has_5ghz = cap->hw_path & BIT(WF0_5G);
+}
+
int mt76_connac_mcu_get_nic_capability(struct mt76_phy *phy)
{
struct mt76_connac_cap_hdr {
@@ -1694,6 +1819,17 @@ int mt76_connac_mcu_get_nic_capability(struct mt76_phy *phy)
case MT_NIC_CAP_6G:
phy->cap.has_6ghz = skb->data[0];
break;
+ case MT_NIC_CAP_MAC_ADDR:
+ memcpy(phy->macaddr, (void *)skb->data, ETH_ALEN);
+ break;
+ case MT_NIC_CAP_PHY:
+ mt76_connac_mcu_parse_phy_cap(phy->dev, skb);
+ break;
+ case MT_NIC_CAP_TX_RESOURCE:
+ if (mt76_is_sdio(phy->dev))
+ mt76_connac_mcu_parse_tx_resource(phy->dev,
+ skb);
+ break;
default:
break;
}
@@ -1749,6 +1885,70 @@ mt76_connac_mcu_build_sku(struct mt76_dev *dev, s8 *sku,
}
}
+static s8 mt76_connac_get_sar_power(struct mt76_phy *phy,
+ struct ieee80211_channel *chan,
+ s8 target_power)
+{
+ const struct cfg80211_sar_capa *capa = phy->hw->wiphy->sar_capa;
+ struct mt76_freq_range_power *frp = phy->frp;
+ int freq, i;
+
+ if (!capa || !frp)
+ return target_power;
+
+ freq = ieee80211_channel_to_frequency(chan->hw_value, chan->band);
+ for (i = 0 ; i < capa->num_freq_ranges; i++) {
+ if (frp[i].range &&
+ freq >= frp[i].range->start_freq &&
+ freq < frp[i].range->end_freq) {
+ target_power = min_t(s8, frp[i].power, target_power);
+ break;
+ }
+ }
+
+ return target_power;
+}
+
+static s8 mt76_connac_get_ch_power(struct mt76_phy *phy,
+ struct ieee80211_channel *chan,
+ s8 target_power)
+{
+ struct mt76_dev *dev = phy->dev;
+ struct ieee80211_supported_band *sband;
+ int i;
+
+ switch (chan->band) {
+ case NL80211_BAND_2GHZ:
+ sband = &phy->sband_2g.sband;
+ break;
+ case NL80211_BAND_5GHZ:
+ sband = &phy->sband_5g.sband;
+ break;
+ case NL80211_BAND_6GHZ:
+ sband = &phy->sband_6g.sband;
+ break;
+ default:
+ return target_power;
+ }
+
+ for (i = 0; i < sband->n_channels; i++) {
+ struct ieee80211_channel *ch = &sband->channels[i];
+
+ if (ch->hw_value == chan->hw_value) {
+ if (!(ch->flags & IEEE80211_CHAN_DISABLED)) {
+ int power = 2 * ch->max_reg_power;
+
+ if (is_mt7663(dev) && (power > 63 || power < -64))
+ power = 63;
+ target_power = min_t(s8, power, target_power);
+ }
+ break;
+ }
+ }
+
+ return target_power;
+}
+
static int
mt76_connac_mcu_rate_txpower_band(struct mt76_phy *phy,
enum nl80211_band band)
@@ -1768,6 +1968,24 @@ mt76_connac_mcu_rate_txpower_band(struct mt76_phy *phy,
142, 144, 149, 151, 153, 155, 157,
159, 161, 165
};
+ static const u8 chan_list_6ghz[] = {
+ 1, 3, 5, 7, 9, 11, 13,
+ 15, 17, 19, 21, 23, 25, 27,
+ 29, 33, 35, 37, 39, 41, 43,
+ 45, 47, 49, 51, 53, 55, 57,
+ 59, 61, 65, 67, 69, 71, 73,
+ 75, 77, 79, 81, 83, 85, 87,
+ 89, 91, 93, 97, 99, 101, 103,
+ 105, 107, 109, 111, 113, 115, 117,
+ 119, 121, 123, 125, 129, 131, 133,
+ 135, 137, 139, 141, 143, 145, 147,
+ 149, 151, 153, 155, 157, 161, 163,
+ 165, 167, 169, 171, 173, 175, 177,
+ 179, 181, 183, 185, 187, 189, 193,
+ 195, 197, 199, 201, 203, 205, 207,
+ 209, 211, 213, 215, 217, 219, 221,
+ 225, 227, 229, 233
+ };
int i, n_chan, batch_size, idx = 0, tx_power, last_ch;
struct mt76_connac_sku_tlv sku_tlbv;
struct mt76_power_limits limits;
@@ -1781,6 +1999,9 @@ mt76_connac_mcu_rate_txpower_band(struct mt76_phy *phy,
if (band == NL80211_BAND_2GHZ) {
n_chan = ARRAY_SIZE(chan_list_2ghz);
ch_list = chan_list_2ghz;
+ } else if (band == NL80211_BAND_6GHZ) {
+ n_chan = ARRAY_SIZE(chan_list_6ghz);
+ ch_list = chan_list_6ghz;
} else {
n_chan = ARRAY_SIZE(chan_list_5ghz);
ch_list = chan_list_5ghz;
@@ -1789,13 +2010,13 @@ mt76_connac_mcu_rate_txpower_band(struct mt76_phy *phy,
if (!phy->cap.has_5ghz)
last_ch = chan_list_2ghz[n_chan - 1];
+ else if (phy->cap.has_6ghz)
+ last_ch = chan_list_6ghz[n_chan - 1];
else
last_ch = chan_list_5ghz[n_chan - 1];
for (i = 0; i < batch_size; i++) {
- struct mt76_connac_tx_power_limit_tlv tx_power_tlv = {
- .band = band == NL80211_BAND_2GHZ ? 1 : 2,
- };
+ struct mt76_connac_tx_power_limit_tlv tx_power_tlv = {};
int j, err, msg_len, num_ch;
struct sk_buff *skb;
@@ -1811,14 +2032,32 @@ mt76_connac_mcu_rate_txpower_band(struct mt76_phy *phy,
memcpy(tx_power_tlv.alpha2, dev->alpha2, sizeof(dev->alpha2));
tx_power_tlv.n_chan = num_ch;
+ switch (band) {
+ case NL80211_BAND_2GHZ:
+ tx_power_tlv.band = 1;
+ break;
+ case NL80211_BAND_6GHZ:
+ tx_power_tlv.band = 3;
+ break;
+ default:
+ tx_power_tlv.band = 2;
+ break;
+ }
+
for (j = 0; j < num_ch; j++, idx++) {
struct ieee80211_channel chan = {
.hw_value = ch_list[idx],
.band = band,
};
+ s8 reg_power, sar_power;
+
+ reg_power = mt76_connac_get_ch_power(phy, &chan,
+ tx_power);
+ sar_power = mt76_connac_get_sar_power(phy, &chan,
+ reg_power);
mt76_get_rate_power_limits(phy, &chan, &limits,
- tx_power);
+ sar_power);
tx_power_tlv.last_msg = ch_list[idx] == last_ch;
sku_tlbv.channel = ch_list[idx];
@@ -1855,6 +2094,12 @@ int mt76_connac_mcu_set_rate_txpower(struct mt76_phy *phy)
if (err < 0)
return err;
}
+ if (phy->cap.has_6ghz) {
+ err = mt76_connac_mcu_rate_txpower_band(phy,
+ NL80211_BAND_6GHZ);
+ if (err < 0)
+ return err;
+ }
return 0;
}
@@ -1902,6 +2147,26 @@ int mt76_connac_mcu_update_arp_filter(struct mt76_dev *dev,
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_update_arp_filter);
+int mt76_connac_mcu_set_p2p_oppps(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv;
+ int ct_window = vif->bss_conf.p2p_noa_attr.oppps_ctwindow;
+ struct mt76_phy *phy = hw->priv;
+ struct {
+ __le32 ct_win;
+ u8 bss_idx;
+ u8 rsv[3];
+ } __packed req = {
+ .ct_win = cpu_to_le32(ct_window),
+ .bss_idx = mvif->idx,
+ };
+
+ return mt76_mcu_send_msg(phy->dev, MCU_CMD_SET_P2P_OPPPS, &req,
+ sizeof(req), false);
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_p2p_oppps);
+
#ifdef CONFIG_PM
const struct wiphy_wowlan_support mt76_connac_wowlan_support = {
@@ -1929,19 +2194,22 @@ mt76_connac_mcu_key_iter(struct ieee80211_hw *hw,
key->cipher != WLAN_CIPHER_SUITE_TKIP)
return;
- if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
- gtk_tlv->proto = cpu_to_le32(NL80211_WPA_VERSION_1);
+ if (key->cipher == WLAN_CIPHER_SUITE_TKIP)
cipher = BIT(3);
- } else {
- gtk_tlv->proto = cpu_to_le32(NL80211_WPA_VERSION_2);
+ else
cipher = BIT(4);
- }
/* we are assuming here to have a single pairwise key */
if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
+ if (key->cipher == WLAN_CIPHER_SUITE_TKIP)
+ gtk_tlv->proto = cpu_to_le32(NL80211_WPA_VERSION_1);
+ else
+ gtk_tlv->proto = cpu_to_le32(NL80211_WPA_VERSION_2);
+
gtk_tlv->pairwise_cipher = cpu_to_le32(cipher);
- gtk_tlv->group_cipher = cpu_to_le32(cipher);
gtk_tlv->keyid = key->keyidx;
+ } else {
+ gtk_tlv->group_cipher = cpu_to_le32(cipher);
}
}
@@ -2209,8 +2477,35 @@ void mt76_connac_mcu_set_suspend_iter(void *priv, u8 *mac,
mt76_connac_mcu_set_wow_ctrl(phy, vif, suspend, wowlan);
}
EXPORT_SYMBOL_GPL(mt76_connac_mcu_set_suspend_iter);
-
#endif /* CONFIG_PM */
+u32 mt76_connac_mcu_reg_rr(struct mt76_dev *dev, u32 offset)
+{
+ struct {
+ __le32 addr;
+ __le32 val;
+ } __packed req = {
+ .addr = cpu_to_le32(offset),
+ };
+
+ return mt76_mcu_send_msg(dev, MCU_CMD_REG_READ, &req, sizeof(req),
+ true);
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_reg_rr);
+
+void mt76_connac_mcu_reg_wr(struct mt76_dev *dev, u32 offset, u32 val)
+{
+ struct {
+ __le32 addr;
+ __le32 val;
+ } __packed req = {
+ .addr = cpu_to_le32(offset),
+ .val = cpu_to_le32(val),
+ };
+
+ mt76_mcu_send_msg(dev, MCU_CMD_REG_WRITE, &req, sizeof(req), false);
+}
+EXPORT_SYMBOL_GPL(mt76_connac_mcu_reg_wr);
+
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
index 1c73beb22677..4e2c9dafd776 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h
@@ -124,6 +124,8 @@ struct sta_rec_state {
u8 rsv[1];
} __packed;
+#define RA_LEGACY_OFDM GENMASK(13, 6)
+#define RA_LEGACY_CCK GENMASK(3, 0)
#define HT_MCS_MASK_NUM 10
struct sta_rec_ra_info {
__le16 tag;
@@ -143,6 +145,13 @@ struct sta_rec_phy {
u8 rsv[2];
} __packed;
+struct sta_rec_he_6g_capa {
+ __le16 tag;
+ __le16 len;
+ __le16 capa;
+ u8 rsv[2];
+} __packed;
+
/* wtbl_rec */
struct wtbl_req_hdr {
@@ -301,6 +310,7 @@ struct wtbl_raw {
sizeof(struct sta_rec_vht) + \
sizeof(struct sta_rec_uapsd) + \
sizeof(struct sta_rec_amsdu) + \
+ sizeof(struct sta_rec_he_6g_capa) + \
sizeof(struct tlv) + \
MT76_CONNAC_WTBL_UPDATE_MAX_SIZE)
@@ -327,6 +337,7 @@ enum {
STA_REC_MUEDCA,
STA_REC_BFEE,
STA_REC_PHY = 0x15,
+ STA_REC_HE_6G = 0x17,
STA_REC_MAX_NUM
};
@@ -520,6 +531,7 @@ enum {
MCU_EXT_CMD_TX_POWER_FEATURE_CTRL = 0x58,
MCU_EXT_CMD_RXDCOC_CAL = 0x59,
MCU_EXT_CMD_TXDPD_CAL = 0x60,
+ MCU_EXT_CMD_CAL_CACHE = 0x67,
MCU_EXT_CMD_SET_RDD_TH = 0x7c,
MCU_EXT_CMD_SET_RDD_PATTERN = 0x7d,
};
@@ -548,6 +560,7 @@ enum {
/* offload mcu commands */
enum {
+ MCU_CMD_TEST_CTRL = MCU_CE_PREFIX | 0x01,
MCU_CMD_START_HW_SCAN = MCU_CE_PREFIX | 0x03,
MCU_CMD_SET_PS_PROFILE = MCU_CE_PREFIX | 0x05,
MCU_CMD_SET_CHAN_DOMAIN = MCU_CE_PREFIX | 0x0f,
@@ -560,6 +573,7 @@ enum {
MCU_CMD_SCHED_SCAN_ENABLE = MCU_CE_PREFIX | 0x61,
MCU_CMD_SCHED_SCAN_REQ = MCU_CE_PREFIX | 0x62,
MCU_CMD_GET_NIC_CAPAB = MCU_CE_PREFIX | 0x8a,
+ MCU_CMD_SET_MU_EDCA_PARMS = MCU_CE_PREFIX | 0xb0,
MCU_CMD_REG_WRITE = MCU_CE_PREFIX | 0xc0,
MCU_CMD_REG_READ = MCU_CE_PREFIX | MCU_QUERY_MASK | 0xc0,
MCU_CMD_CHIP_CONFIG = MCU_CE_PREFIX | 0xca,
@@ -656,10 +670,14 @@ struct mt76_connac_bss_basic_tlv {
* bit(3): GN
* bit(4): AN
* bit(5): AC
+ * bit(6): AX2
+ * bit(7): AX5
+ * bit(8): AX6
*/
__le16 sta_idx;
- u8 nonht_basic_phy;
- u8 pad[3];
+ __le16 nonht_basic_phy;
+ u8 phymode_ext; /* bit(0) AX_6G */
+ u8 pad[1];
} __packed;
struct mt76_connac_bss_qos_tlv {
@@ -802,7 +820,9 @@ struct mt76_connac_sched_scan_req {
} mt7663;
struct {
u8 bss_idx;
- u8 pad2[19];
+ u8 pad1[3];
+ __le32 delay;
+ u8 pad2[12];
u8 random_mac[ETH_ALEN];
u8 pad3[38];
} mt7921;
@@ -844,14 +864,14 @@ struct mt76_connac_gtk_rekey_tlv {
* 2: rekey update
*/
u8 keyid;
- u8 pad[2];
+ u8 option; /* 1: rekey data update without enabling offload */
+ u8 pad[1];
__le32 proto; /* WPA-RSN-WAPI-OPSN */
__le32 pairwise_cipher;
__le32 group_cipher;
__le32 key_mgmt; /* NONE-PSK-IEEE802.1X */
__le32 mgmt_group_cipher;
- u8 option; /* 1: rekey data update without enabling offload */
- u8 reserverd[3];
+ u8 reserverd[4];
} __packed;
#define MT76_CONNAC_WOW_MASK_MAX_LEN 16
@@ -961,7 +981,7 @@ struct mt76_connac_tx_power_limit_tlv {
__le16 len;
/* DW1 - cmd hint */
u8 n_chan; /* # channel */
- u8 band; /* 2.4GHz - 5GHz */
+ u8 band; /* 2.4GHz - 5GHz - 6GHz */
u8 last_msg;
u8 pad1;
/* DW3 */
@@ -1093,4 +1113,8 @@ int mt76_connac_mcu_set_deep_sleep(struct mt76_dev *dev, bool enable);
void mt76_connac_mcu_coredump_event(struct mt76_dev *dev, struct sk_buff *skb,
struct mt76_connac_coredump *coredump);
int mt76_connac_mcu_set_rate_txpower(struct mt76_phy *phy);
+int mt76_connac_mcu_set_p2p_oppps(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+u32 mt76_connac_mcu_reg_rr(struct mt76_dev *dev, u32 offset);
+void mt76_connac_mcu_reg_wr(struct mt76_dev *dev, u32 offset, u32 val);
#endif /* __MT76_CONNAC_MCU_H */
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
index cea24213186c..da2ca2563ac9 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c
@@ -201,7 +201,7 @@ void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev,
t->stbc[6] = t->stbc[7] = s6_to_s8(val >> 8);
/* vht mcs 8, 9 5GHz */
- val = mt76x02_eeprom_get(dev, 0x132);
+ val = mt76x02_eeprom_get(dev, 0x12c);
t->vht[8] = s6_to_s8(val);
t->vht[9] = s6_to_s8(val >> 8);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
index b795e7245c07..f19228fc5a70 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c
@@ -176,7 +176,7 @@ mt76x0e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_master(pdev);
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (ret)
return ret;
@@ -276,6 +276,7 @@ static int mt76x0e_resume(struct pci_dev *pdev)
mt76_worker_enable(&mdev->tx_worker);
+ local_bh_disable();
mt76_for_each_q_rx(mdev, i) {
mt76_queue_rx_reset(dev, i);
napi_enable(&mdev->napi[i]);
@@ -284,6 +285,7 @@ static int mt76x0e_resume(struct pci_dev *pdev)
napi_enable(&mdev->tx_napi);
napi_schedule(&mdev->tx_napi);
+ local_bh_enable();
return mt76x0e_init_hardware(dev, true);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
index c32e6dc68773..a404fd7ea968 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
@@ -176,7 +176,7 @@ void mt76x02_mac_wcid_set_drop(struct mt76x02_dev *dev, u8 idx, bool drop)
mt76_wr(dev, MT_WCID_DROP(idx), (val & ~bit) | (bit * drop));
}
-static __le16
+static u16
mt76x02_mac_tx_rate_val(struct mt76x02_dev *dev,
const struct ieee80211_tx_rate *rate, u8 *nss_val)
{
@@ -222,14 +222,14 @@ mt76x02_mac_tx_rate_val(struct mt76x02_dev *dev,
rateval |= MT_RXWI_RATE_SGI;
*nss_val = nss;
- return cpu_to_le16(rateval);
+ return rateval;
}
void mt76x02_mac_wcid_set_rate(struct mt76x02_dev *dev, struct mt76_wcid *wcid,
const struct ieee80211_tx_rate *rate)
{
s8 max_txpwr_adj = mt76x02_tx_get_max_txpwr_adj(dev, rate);
- __le16 rateval;
+ u16 rateval;
u32 tx_info;
s8 nss;
@@ -342,7 +342,7 @@ void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
struct ieee80211_key_conf *key = info->control.hw_key;
u32 wcid_tx_info;
u16 rate_ht_mask = FIELD_PREP(MT_RXWI_RATE_PHY, BIT(1) | BIT(2));
- u16 txwi_flags = 0;
+ u16 txwi_flags = 0, rateval;
u8 nss;
s8 txpwr_adj, max_txpwr_adj;
u8 ccmp_pn[8], nstreams = dev->mphy.chainmask & 0xf;
@@ -380,14 +380,15 @@ void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi,
if (wcid && (rate->idx < 0 || !rate->count)) {
wcid_tx_info = wcid->tx_info;
- txwi->rate = FIELD_GET(MT_WCID_TX_INFO_RATE, wcid_tx_info);
+ rateval = FIELD_GET(MT_WCID_TX_INFO_RATE, wcid_tx_info);
max_txpwr_adj = FIELD_GET(MT_WCID_TX_INFO_TXPWR_ADJ,
wcid_tx_info);
nss = FIELD_GET(MT_WCID_TX_INFO_NSS, wcid_tx_info);
} else {
- txwi->rate = mt76x02_mac_tx_rate_val(dev, rate, &nss);
+ rateval = mt76x02_mac_tx_rate_val(dev, rate, &nss);
max_txpwr_adj = mt76x02_tx_get_max_txpwr_adj(dev, rate);
}
+ txwi->rate = cpu_to_le16(rateval);
txpwr_adj = mt76x02_tx_get_txpwr_adj(dev, dev->txpower_conf,
max_txpwr_adj);
@@ -1185,7 +1186,7 @@ void mt76x02_mac_work(struct work_struct *work)
mutex_unlock(&dev->mt76.mutex);
- mt76_tx_status_check(&dev->mt76, NULL, false);
+ mt76_tx_status_check(&dev->mt76, false);
ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mphy.mac_work,
MT_MAC_WORK_INTERVAL);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
index b50084bbe83d..ec0de691129a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c
@@ -53,7 +53,7 @@ static void mt76x02_pre_tbtt_tasklet(struct tasklet_struct *t)
mt76_skb_set_moredata(data.tail[i], false);
}
- spin_lock_bh(&q->lock);
+ spin_lock(&q->lock);
while ((skb = __skb_dequeue(&data.q)) != NULL) {
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_vif *vif = info->control.vif;
@@ -61,7 +61,7 @@ static void mt76x02_pre_tbtt_tasklet(struct tasklet_struct *t)
mt76_tx_queue_skb(dev, q, skb, &mvif->group_wcid, NULL);
}
- spin_unlock_bh(&q->lock);
+ spin_unlock(&q->lock);
}
static void mt76x02e_pre_tbtt_enable(struct mt76x02_dev *dev, bool en)
@@ -472,7 +472,7 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
mt76_queue_rx_reset(dev, i);
}
- mt76_tx_status_check(&dev->mt76, NULL, true);
+ mt76_tx_status_check(&dev->mt76, true);
mt76x02_mac_start(dev);
@@ -491,15 +491,17 @@ static void mt76x02_watchdog_reset(struct mt76x02_dev *dev)
clear_bit(MT76_RESET, &dev->mphy.state);
mt76_worker_enable(&dev->mt76.tx_worker);
+ tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
+
+ local_bh_disable();
napi_enable(&dev->mt76.tx_napi);
napi_schedule(&dev->mt76.tx_napi);
- tasklet_enable(&dev->mt76.pre_tbtt_tasklet);
-
mt76_for_each_q_rx(&dev->mt76, i) {
napi_enable(&dev->mt76.napi[i]);
napi_schedule(&dev->mt76.napi[i]);
}
+ local_bh_enable();
if (restart) {
set_bit(MT76_RESTART, &dev->mphy.state);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
index ccdbab341271..1f17d86ff755 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c
@@ -287,6 +287,8 @@ mt76x02_vif_init(struct mt76x02_dev *dev, struct ieee80211_vif *vif,
mvif->idx = idx;
mvif->group_wcid.idx = MT_VIF_WCID(idx);
mvif->group_wcid.hw_key_idx = -1;
+ mt76_packet_id_init(&mvif->group_wcid);
+
mtxq = (struct mt76_txq *)vif->txq->drv_priv;
mtxq->wcid = &mvif->group_wcid;
}
@@ -341,6 +343,7 @@ void mt76x02_remove_interface(struct ieee80211_hw *hw,
struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv;
dev->mt76.vif_mask &= ~BIT(mvif->idx);
+ mt76_packet_id_flush(&dev->mt76, &mvif->group_wcid);
}
EXPORT_SYMBOL_GPL(mt76x02_remove_interface);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
index adf288e50e21..8a22ee581674 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci.c
@@ -47,7 +47,7 @@ mt76x2e_probe(struct pci_dev *pdev, const struct pci_device_id *id)
pci_set_master(pdev);
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (ret)
return ret;
@@ -149,12 +149,15 @@ mt76x2e_resume(struct pci_dev *pdev)
pci_restore_state(pdev);
mt76_worker_enable(&mdev->tx_worker);
+
+ local_bh_disable();
mt76_for_each_q_rx(mdev, i) {
napi_enable(&mdev->napi[i]);
napi_schedule(&mdev->napi[i]);
}
napi_enable(&mdev->tx_napi);
napi_schedule(&mdev->tx_napi);
+ local_bh_enable();
return mt76x2_resume_device(dev);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
index 64048243e34b..a15aa256d0cf 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/debugfs.c
@@ -7,6 +7,13 @@
/** global debugfs **/
+struct hw_queue_map {
+ const char *name;
+ u8 index;
+ u8 pid;
+ u8 qid;
+};
+
static int
mt7915_implicit_txbf_set(void *data, u64 val)
{
@@ -75,7 +82,7 @@ DEFINE_DEBUGFS_ATTRIBUTE(fops_radar_trigger, NULL,
mt7915_radar_trigger, "%lld\n");
static int
-mt7915_fw_debug_set(void *data, u64 val)
+mt7915_fw_debug_wm_set(void *data, u64 val)
{
struct mt7915_dev *dev = data;
enum {
@@ -85,41 +92,112 @@ mt7915_fw_debug_set(void *data, u64 val)
DEBUG_SPL,
DEBUG_RPT_RX,
} debug;
+ int ret;
- dev->fw_debug = !!val;
+ dev->fw_debug_wm = val ? MCU_FW_LOG_TO_HOST : 0;
- mt7915_mcu_fw_log_2_host(dev, dev->fw_debug ? 2 : 0);
+ ret = mt7915_mcu_fw_log_2_host(dev, MCU_FW_LOG_WM, dev->fw_debug_wm);
+ if (ret)
+ return ret;
- for (debug = DEBUG_TXCMD; debug <= DEBUG_RPT_RX; debug++)
- mt7915_mcu_fw_dbg_ctrl(dev, debug, dev->fw_debug);
+ for (debug = DEBUG_TXCMD; debug <= DEBUG_RPT_RX; debug++) {
+ ret = mt7915_mcu_fw_dbg_ctrl(dev, debug, !!dev->fw_debug_wm);
+ if (ret)
+ return ret;
+ }
+
+ /* WM CPU info record control */
+ mt76_clear(dev, MT_CPU_UTIL_CTRL, BIT(0));
+ mt76_wr(dev, MT_DIC_CMD_REG_CMD, BIT(2) | BIT(13) | !dev->fw_debug_wm);
+ mt76_wr(dev, MT_MCU_WM_CIRQ_IRQ_MASK_CLR_ADDR, BIT(5));
+ mt76_wr(dev, MT_MCU_WM_CIRQ_IRQ_SOFT_ADDR, BIT(5));
return 0;
}
static int
-mt7915_fw_debug_get(void *data, u64 *val)
+mt7915_fw_debug_wm_get(void *data, u64 *val)
{
struct mt7915_dev *dev = data;
- *val = dev->fw_debug;
+ *val = dev->fw_debug_wm;
return 0;
}
-DEFINE_DEBUGFS_ATTRIBUTE(fops_fw_debug, mt7915_fw_debug_get,
- mt7915_fw_debug_set, "%lld\n");
+DEFINE_DEBUGFS_ATTRIBUTE(fops_fw_debug_wm, mt7915_fw_debug_wm_get,
+ mt7915_fw_debug_wm_set, "%lld\n");
+
+static int
+mt7915_fw_debug_wa_set(void *data, u64 val)
+{
+ struct mt7915_dev *dev = data;
+ int ret;
+
+ dev->fw_debug_wa = val ? MCU_FW_LOG_TO_HOST : 0;
+
+ ret = mt7915_mcu_fw_log_2_host(dev, MCU_FW_LOG_WA, dev->fw_debug_wa);
+ if (ret)
+ return ret;
+
+ return mt7915_mcu_wa_cmd(dev, MCU_WA_PARAM_CMD(SET), MCU_WA_PARAM_PDMA_RX,
+ !!dev->fw_debug_wa, 0);
+}
+
+static int
+mt7915_fw_debug_wa_get(void *data, u64 *val)
+{
+ struct mt7915_dev *dev = data;
+
+ *val = dev->fw_debug_wa;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_fw_debug_wa, mt7915_fw_debug_wa_get,
+ mt7915_fw_debug_wa_set, "%lld\n");
+
+static int
+mt7915_fw_util_wm_show(struct seq_file *file, void *data)
+{
+ struct mt7915_dev *dev = file->private;
+
+ if (dev->fw_debug_wm) {
+ seq_printf(file, "Busy: %u%% Peak busy: %u%%\n",
+ mt76_rr(dev, MT_CPU_UTIL_BUSY_PCT),
+ mt76_rr(dev, MT_CPU_UTIL_PEAK_BUSY_PCT));
+ seq_printf(file, "Idle count: %u Peak idle count: %u\n",
+ mt76_rr(dev, MT_CPU_UTIL_IDLE_CNT),
+ mt76_rr(dev, MT_CPU_UTIL_PEAK_IDLE_CNT));
+ }
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mt7915_fw_util_wm);
+
+static int
+mt7915_fw_util_wa_show(struct seq_file *file, void *data)
+{
+ struct mt7915_dev *dev = file->private;
+
+ if (dev->fw_debug_wa)
+ return mt7915_mcu_wa_cmd(dev, MCU_WA_PARAM_CMD(QUERY),
+ MCU_WA_PARAM_CPU_UTIL, 0, 0);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mt7915_fw_util_wa);
static void
mt7915_ampdu_stat_read_phy(struct mt7915_phy *phy,
struct seq_file *file)
{
- struct mt7915_dev *dev = file->private;
+ struct mt7915_dev *dev = phy->dev;
bool ext_phy = phy != &dev->phy;
int bound[15], range[4], i, n;
- if (!phy)
- return;
-
/* Tx ampdu stat */
for (i = 0; i < ARRAY_SIZE(range); i++)
range[i] = mt76_rr(dev, MT_MIB_ARNG(ext_phy, i));
@@ -146,56 +224,46 @@ mt7915_ampdu_stat_read_phy(struct mt7915_phy *phy,
static void
mt7915_txbf_stat_read_phy(struct mt7915_phy *phy, struct seq_file *s)
{
- struct mt7915_dev *dev = s->private;
- bool ext_phy = phy != &dev->phy;
static const char * const bw[] = {
"BW20", "BW40", "BW80", "BW160"
};
- int cnt;
-
- if (!phy)
- return;
+ struct mib_stats *mib = &phy->mib;
/* Tx Beamformer monitor */
seq_puts(s, "\nTx Beamformer applied PPDU counts: ");
- cnt = mt76_rr(dev, MT_ETBF_TX_APP_CNT(ext_phy));
- seq_printf(s, "iBF: %ld, eBF: %ld\n",
- FIELD_GET(MT_ETBF_TX_IBF_CNT, cnt),
- FIELD_GET(MT_ETBF_TX_EBF_CNT, cnt));
+ seq_printf(s, "iBF: %d, eBF: %d\n",
+ mib->tx_bf_ibf_ppdu_cnt,
+ mib->tx_bf_ebf_ppdu_cnt);
/* Tx Beamformer Rx feedback monitor */
seq_puts(s, "Tx Beamformer Rx feedback statistics: ");
- cnt = mt76_rr(dev, MT_ETBF_RX_FB_CNT(ext_phy));
- seq_printf(s, "All: %ld, HE: %ld, VHT: %ld, HT: %ld, ",
- FIELD_GET(MT_ETBF_RX_FB_ALL, cnt),
- FIELD_GET(MT_ETBF_RX_FB_HE, cnt),
- FIELD_GET(MT_ETBF_RX_FB_VHT, cnt),
- FIELD_GET(MT_ETBF_RX_FB_HT, cnt));
- cnt = mt76_rr(dev, MT_ETBF_RX_FB_CONT(ext_phy));
- seq_printf(s, "%s, NC: %ld, NR: %ld\n",
- bw[FIELD_GET(MT_ETBF_RX_FB_BW, cnt)],
- FIELD_GET(MT_ETBF_RX_FB_NC, cnt),
- FIELD_GET(MT_ETBF_RX_FB_NR, cnt));
+ seq_printf(s, "All: %d, HE: %d, VHT: %d, HT: %d, ",
+ mib->tx_bf_rx_fb_all_cnt,
+ mib->tx_bf_rx_fb_he_cnt,
+ mib->tx_bf_rx_fb_vht_cnt,
+ mib->tx_bf_rx_fb_ht_cnt);
+
+ seq_printf(s, "%s, NC: %d, NR: %d\n",
+ bw[mib->tx_bf_rx_fb_bw],
+ mib->tx_bf_rx_fb_nc_cnt,
+ mib->tx_bf_rx_fb_nr_cnt);
/* Tx Beamformee Rx NDPA & Tx feedback report */
- cnt = mt76_rr(dev, MT_ETBF_TX_NDP_BFRP(ext_phy));
- seq_printf(s, "Tx Beamformee successful feedback frames: %ld\n",
- FIELD_GET(MT_ETBF_TX_FB_CPL, cnt));
- seq_printf(s, "Tx Beamformee feedback triggered counts: %ld\n",
- FIELD_GET(MT_ETBF_TX_FB_TRI, cnt));
+ seq_printf(s, "Tx Beamformee successful feedback frames: %d\n",
+ mib->tx_bf_fb_cpl_cnt);
+ seq_printf(s, "Tx Beamformee feedback triggered counts: %d\n",
+ mib->tx_bf_fb_trig_cnt);
/* Tx SU & MU counters */
- cnt = mt76_rr(dev, MT_MIB_SDR34(ext_phy));
- seq_printf(s, "Tx multi-user Beamforming counts: %ld\n",
- FIELD_GET(MT_MIB_MU_BF_TX_CNT, cnt));
- cnt = mt76_rr(dev, MT_MIB_DR8(ext_phy));
- seq_printf(s, "Tx multi-user MPDU counts: %d\n", cnt);
- cnt = mt76_rr(dev, MT_MIB_DR9(ext_phy));
- seq_printf(s, "Tx multi-user successful MPDU counts: %d\n", cnt);
- cnt = mt76_rr(dev, MT_MIB_DR11(ext_phy));
- seq_printf(s, "Tx single-user successful MPDU counts: %d\n", cnt);
+ seq_printf(s, "Tx multi-user Beamforming counts: %d\n",
+ mib->tx_bf_cnt);
+ seq_printf(s, "Tx multi-user MPDU counts: %d\n", mib->tx_mu_mpdu_cnt);
+ seq_printf(s, "Tx multi-user successful MPDU counts: %d\n",
+ mib->tx_mu_acked_mpdu_cnt);
+ seq_printf(s, "Tx single-user successful MPDU counts: %d\n",
+ mib->tx_su_acked_mpdu_cnt);
seq_puts(s, "\n");
}
@@ -203,91 +271,189 @@ mt7915_txbf_stat_read_phy(struct mt7915_phy *phy, struct seq_file *s)
static int
mt7915_tx_stats_show(struct seq_file *file, void *data)
{
- struct mt7915_dev *dev = file->private;
- int stat[8], i, n;
+ struct mt7915_phy *phy = file->private;
+ struct mt7915_dev *dev = phy->dev;
+ struct mib_stats *mib = &phy->mib;
+ int i;
- mt7915_ampdu_stat_read_phy(&dev->phy, file);
- mt7915_txbf_stat_read_phy(&dev->phy, file);
+ mutex_lock(&dev->mt76.mutex);
- mt7915_ampdu_stat_read_phy(mt7915_ext_phy(dev), file);
- mt7915_txbf_stat_read_phy(mt7915_ext_phy(dev), file);
+ mt7915_ampdu_stat_read_phy(phy, file);
+ mt7915_mac_update_stats(phy);
+ mt7915_txbf_stat_read_phy(phy, file);
/* Tx amsdu info */
seq_puts(file, "Tx MSDU statistics:\n");
- for (i = 0, n = 0; i < ARRAY_SIZE(stat); i++) {
- stat[i] = mt76_rr(dev, MT_PLE_AMSDU_PACK_MSDU_CNT(i));
- n += stat[i];
- }
-
- for (i = 0; i < ARRAY_SIZE(stat); i++) {
- seq_printf(file, "AMSDU pack count of %d MSDU in TXD: 0x%x ",
- i + 1, stat[i]);
- if (n != 0)
- seq_printf(file, "(%d%%)\n", stat[i] * 100 / n);
+ for (i = 0; i < ARRAY_SIZE(mib->tx_amsdu); i++) {
+ seq_printf(file, "AMSDU pack count of %d MSDU in TXD: %8d ",
+ i + 1, mib->tx_amsdu[i]);
+ if (mib->tx_amsdu_cnt)
+ seq_printf(file, "(%3d%%)\n",
+ mib->tx_amsdu[i] * 100 / mib->tx_amsdu_cnt);
else
seq_puts(file, "\n");
}
+ mutex_unlock(&dev->mt76.mutex);
+
return 0;
}
DEFINE_SHOW_ATTRIBUTE(mt7915_tx_stats);
-static int
-mt7915_queues_acq(struct seq_file *s, void *data)
+static void
+mt7915_hw_queue_read(struct seq_file *s, u32 base, u32 size,
+ const struct hw_queue_map *map)
{
- struct mt7915_dev *dev = dev_get_drvdata(s->private);
- int i;
+ struct mt7915_phy *phy = s->private;
+ struct mt7915_dev *dev = phy->dev;
+ u32 i, val;
- for (i = 0; i < 16; i++) {
- int j, acs = i / 4, index = i % 4;
- u32 ctrl, val, qlen = 0;
+ val = mt76_rr(dev, base + MT_FL_Q_EMPTY);
+ for (i = 0; i < size; i++) {
+ u32 ctrl, head, tail, queued;
+
+ if (val & BIT(map[i].index))
+ continue;
- val = mt76_rr(dev, MT_PLE_AC_QEMPTY(acs, index));
- ctrl = BIT(31) | BIT(15) | (acs << 8);
+ ctrl = BIT(31) | (map[i].pid << 10) | (map[i].qid << 24);
+ mt76_wr(dev, base + MT_FL_Q0_CTRL, ctrl);
- for (j = 0; j < 32; j++) {
- if (val & BIT(j))
- continue;
+ head = mt76_get_field(dev, base + MT_FL_Q2_CTRL,
+ GENMASK(11, 0));
+ tail = mt76_get_field(dev, base + MT_FL_Q2_CTRL,
+ GENMASK(27, 16));
+ queued = mt76_get_field(dev, base + MT_FL_Q3_CTRL,
+ GENMASK(11, 0));
- mt76_wr(dev, MT_PLE_FL_Q0_CTRL,
- ctrl | (j + (index << 5)));
- qlen += mt76_get_field(dev, MT_PLE_FL_Q3_CTRL,
- GENMASK(11, 0));
- }
- seq_printf(s, "AC%d%d: queued=%d\n", acs, index, qlen);
+ seq_printf(s, "\t%s: ", map[i].name);
+ seq_printf(s, "queued:0x%03x head:0x%03x tail:0x%03x\n",
+ queued, head, tail);
}
+}
+
+static void
+mt7915_sta_hw_queue_read(void *data, struct ieee80211_sta *sta)
+{
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+ struct mt7915_dev *dev = msta->vif->phy->dev;
+ struct seq_file *s = data;
+ u8 ac;
+
+ for (ac = 0; ac < 4; ac++) {
+ u32 qlen, ctrl, val;
+ u32 idx = msta->wcid.idx >> 5;
+ u8 offs = msta->wcid.idx & GENMASK(4, 0);
+
+ ctrl = BIT(31) | BIT(11) | (ac << 24);
+ val = mt76_rr(dev, MT_PLE_AC_QEMPTY(ac, idx));
+
+ if (val & BIT(offs))
+ continue;
+
+ mt76_wr(dev, MT_PLE_BASE + MT_FL_Q0_CTRL, ctrl | msta->wcid.idx);
+ qlen = mt76_get_field(dev, MT_PLE_BASE + MT_FL_Q3_CTRL,
+ GENMASK(11, 0));
+ seq_printf(s, "\tSTA %pM wcid %d: AC%d%d queued:%d\n",
+ sta->addr, msta->wcid.idx, msta->vif->wmm_idx,
+ ac, qlen);
+ }
+}
+
+static int
+mt7915_hw_queues_show(struct seq_file *file, void *data)
+{
+ struct mt7915_phy *phy = file->private;
+ struct mt7915_dev *dev = phy->dev;
+ static const struct hw_queue_map ple_queue_map[] = {
+ { "CPU_Q0", 0, 1, MT_CTX0 },
+ { "CPU_Q1", 1, 1, MT_CTX0 + 1 },
+ { "CPU_Q2", 2, 1, MT_CTX0 + 2 },
+ { "CPU_Q3", 3, 1, MT_CTX0 + 3 },
+ { "ALTX_Q0", 8, 2, MT_LMAC_ALTX0 },
+ { "BMC_Q0", 9, 2, MT_LMAC_BMC0 },
+ { "BCN_Q0", 10, 2, MT_LMAC_BCN0 },
+ { "PSMP_Q0", 11, 2, MT_LMAC_PSMP0 },
+ { "ALTX_Q1", 12, 2, MT_LMAC_ALTX0 + 4 },
+ { "BMC_Q1", 13, 2, MT_LMAC_BMC0 + 4 },
+ { "BCN_Q1", 14, 2, MT_LMAC_BCN0 + 4 },
+ { "PSMP_Q1", 15, 2, MT_LMAC_PSMP0 + 4 },
+ };
+ static const struct hw_queue_map pse_queue_map[] = {
+ { "CPU Q0", 0, 1, MT_CTX0 },
+ { "CPU Q1", 1, 1, MT_CTX0 + 1 },
+ { "CPU Q2", 2, 1, MT_CTX0 + 2 },
+ { "CPU Q3", 3, 1, MT_CTX0 + 3 },
+ { "HIF_Q0", 8, 0, MT_HIF0 },
+ { "HIF_Q1", 9, 0, MT_HIF0 + 1 },
+ { "HIF_Q2", 10, 0, MT_HIF0 + 2 },
+ { "HIF_Q3", 11, 0, MT_HIF0 + 3 },
+ { "HIF_Q4", 12, 0, MT_HIF0 + 4 },
+ { "HIF_Q5", 13, 0, MT_HIF0 + 5 },
+ { "LMAC_Q", 16, 2, 0 },
+ { "MDP_TXQ", 17, 2, 1 },
+ { "MDP_RXQ", 18, 2, 2 },
+ { "SEC_TXQ", 19, 2, 3 },
+ { "SEC_RXQ", 20, 2, 4 },
+ };
+ u32 val, head, tail;
+
+ /* ple queue */
+ val = mt76_rr(dev, MT_PLE_FREEPG_CNT);
+ head = mt76_get_field(dev, MT_PLE_FREEPG_HEAD_TAIL, GENMASK(11, 0));
+ tail = mt76_get_field(dev, MT_PLE_FREEPG_HEAD_TAIL, GENMASK(27, 16));
+ seq_puts(file, "PLE page info:\n");
+ seq_printf(file,
+ "\tTotal free page: 0x%08x head: 0x%03x tail: 0x%03x\n",
+ val, head, tail);
+
+ val = mt76_rr(dev, MT_PLE_PG_HIF_GROUP);
+ head = mt76_get_field(dev, MT_PLE_HIF_PG_INFO, GENMASK(11, 0));
+ tail = mt76_get_field(dev, MT_PLE_HIF_PG_INFO, GENMASK(27, 16));
+ seq_printf(file, "\tHIF free page: 0x%03x res: 0x%03x used: 0x%03x\n",
+ val, head, tail);
+
+ seq_puts(file, "PLE non-empty queue info:\n");
+ mt7915_hw_queue_read(file, MT_PLE_BASE, ARRAY_SIZE(ple_queue_map),
+ &ple_queue_map[0]);
+
+ /* iterate per-sta ple queue */
+ ieee80211_iterate_stations_atomic(phy->mt76->hw,
+ mt7915_sta_hw_queue_read, file);
+ /* pse queue */
+ seq_puts(file, "PSE non-empty queue info:\n");
+ mt7915_hw_queue_read(file, MT_PSE_BASE, ARRAY_SIZE(pse_queue_map),
+ &pse_queue_map[0]);
return 0;
}
+DEFINE_SHOW_ATTRIBUTE(mt7915_hw_queues);
+
static int
-mt7915_queues_read(struct seq_file *s, void *data)
+mt7915_xmit_queues_show(struct seq_file *file, void *data)
{
- struct mt7915_dev *dev = dev_get_drvdata(s->private);
- struct mt76_phy *mphy_ext = dev->mt76.phy2;
- struct mt76_queue *ext_q = mphy_ext ? mphy_ext->q_tx[MT_TXQ_BE] : NULL;
+ struct mt7915_phy *phy = file->private;
+ struct mt7915_dev *dev = phy->dev;
struct {
struct mt76_queue *q;
char *queue;
} queue_map[] = {
- { dev->mphy.q_tx[MT_TXQ_BE], "WFDMA0" },
- { ext_q, "WFDMA1" },
- { dev->mphy.q_tx[MT_TXQ_BE], "WFDMA0" },
- { dev->mt76.q_mcu[MT_MCUQ_WM], "MCUWM" },
- { dev->mt76.q_mcu[MT_MCUQ_WA], "MCUWA" },
- { dev->mt76.q_mcu[MT_MCUQ_FWDL], "MCUFWQ" },
+ { phy->mt76->q_tx[MT_TXQ_BE], " MAIN" },
+ { dev->mt76.q_mcu[MT_MCUQ_WM], " MCUWM" },
+ { dev->mt76.q_mcu[MT_MCUQ_WA], " MCUWA" },
+ { dev->mt76.q_mcu[MT_MCUQ_FWDL], "MCUFWDL" },
};
int i;
+ seq_puts(file, " queue | hw-queued | head | tail |\n");
for (i = 0; i < ARRAY_SIZE(queue_map); i++) {
struct mt76_queue *q = queue_map[i].q;
if (!q)
continue;
- seq_printf(s,
- "%s: queued=%d head=%d tail=%d\n",
+ seq_printf(file, " %s | %9d | %9d | %9d |\n",
queue_map[i].queue, q->queued, q->head,
q->tail);
}
@@ -295,8 +461,10 @@ mt7915_queues_read(struct seq_file *s, void *data)
return 0;
}
-static void
-mt7915_puts_rate_txpower(struct seq_file *s, struct mt7915_phy *phy)
+DEFINE_SHOW_ATTRIBUTE(mt7915_xmit_queues);
+
+static int
+mt7915_rate_txpower_show(struct seq_file *file, void *data)
{
static const char * const sku_group_name[] = {
"CCK", "OFDM", "HT20", "HT40",
@@ -304,14 +472,11 @@ mt7915_puts_rate_txpower(struct seq_file *s, struct mt7915_phy *phy)
"RU26", "RU52", "RU106", "RU242/SU20",
"RU484/SU40", "RU996/SU80", "RU2x996/SU160"
};
+ struct mt7915_phy *phy = file->private;
s8 txpower[MT7915_SKU_RATE_NUM], *buf;
int i;
- if (!phy)
- return;
-
- seq_printf(s, "\nBand %d\n", phy != &phy->dev->phy);
-
+ seq_printf(file, "\nBand %d\n", phy != &phy->dev->phy);
mt7915_mcu_get_txpower_sku(phy, txpower, sizeof(txpower));
for (i = 0, buf = txpower; i < ARRAY_SIZE(mt7915_sku_group_len); i++) {
u8 mcs_num = mt7915_sku_group_len[i];
@@ -319,45 +484,75 @@ mt7915_puts_rate_txpower(struct seq_file *s, struct mt7915_phy *phy)
if (i >= SKU_VHT_BW20 && i <= SKU_VHT_BW160)
mcs_num = 10;
- mt76_seq_puts_array(s, sku_group_name[i], buf, mcs_num);
+ mt76_seq_puts_array(file, sku_group_name[i], buf, mcs_num);
buf += mt7915_sku_group_len[i];
}
+
+ return 0;
}
+DEFINE_SHOW_ATTRIBUTE(mt7915_rate_txpower);
+
static int
-mt7915_read_rate_txpower(struct seq_file *s, void *data)
+mt7915_twt_stats(struct seq_file *s, void *data)
{
struct mt7915_dev *dev = dev_get_drvdata(s->private);
+ struct mt7915_twt_flow *iter;
+
+ rcu_read_lock();
- mt7915_puts_rate_txpower(s, &dev->phy);
- mt7915_puts_rate_txpower(s, mt7915_ext_phy(dev));
+ seq_puts(s, " wcid | id | flags | exp | mantissa");
+ seq_puts(s, " | duration | tsf |\n");
+ list_for_each_entry_rcu(iter, &dev->twt_list, list)
+ seq_printf(s,
+ "%9d | %8d | %5c%c%c%c | %8d | %8d | %8d | %14lld |\n",
+ iter->wcid, iter->id,
+ iter->sched ? 's' : 'u',
+ iter->protection ? 'p' : '-',
+ iter->trigger ? 't' : '-',
+ iter->flowtype ? '-' : 'a',
+ iter->exp, iter->mantissa,
+ iter->duration, iter->tsf);
+
+ rcu_read_unlock();
return 0;
}
-int mt7915_init_debugfs(struct mt7915_dev *dev)
+int mt7915_init_debugfs(struct mt7915_phy *phy)
{
+ struct mt7915_dev *dev = phy->dev;
+ bool ext_phy = phy != &dev->phy;
struct dentry *dir;
- dir = mt76_register_debugfs(&dev->mt76);
+ dir = mt76_register_debugfs_fops(phy->mt76, NULL);
if (!dir)
return -ENOMEM;
- debugfs_create_devm_seqfile(dev->mt76.dev, "queues", dir,
- mt7915_queues_read);
- debugfs_create_devm_seqfile(dev->mt76.dev, "acq", dir,
- mt7915_queues_acq);
- debugfs_create_file("tx_stats", 0400, dir, dev, &mt7915_tx_stats_fops);
- debugfs_create_file("fw_debug", 0600, dir, dev, &fops_fw_debug);
+ debugfs_create_file("hw-queues", 0400, dir, phy,
+ &mt7915_hw_queues_fops);
+ debugfs_create_file("xmit-queues", 0400, dir, phy,
+ &mt7915_xmit_queues_fops);
+ debugfs_create_file("tx_stats", 0400, dir, phy, &mt7915_tx_stats_fops);
+ debugfs_create_file("fw_debug_wm", 0600, dir, dev, &fops_fw_debug_wm);
+ debugfs_create_file("fw_debug_wa", 0600, dir, dev, &fops_fw_debug_wa);
+ debugfs_create_file("fw_util_wm", 0400, dir, dev,
+ &mt7915_fw_util_wm_fops);
+ debugfs_create_file("fw_util_wa", 0400, dir, dev,
+ &mt7915_fw_util_wa_fops);
debugfs_create_file("implicit_txbf", 0600, dir, dev,
&fops_implicit_txbf);
- debugfs_create_u32("dfs_hw_pattern", 0400, dir, &dev->hw_pattern);
- /* test knobs */
- debugfs_create_file("radar_trigger", 0200, dir, dev,
- &fops_radar_trigger);
+ debugfs_create_file("txpower_sku", 0400, dir, phy,
+ &mt7915_rate_txpower_fops);
+ debugfs_create_devm_seqfile(dev->mt76.dev, "twt_stats", dir,
+ mt7915_twt_stats);
debugfs_create_file("ser_trigger", 0200, dir, dev, &fops_ser_trigger);
- debugfs_create_devm_seqfile(dev->mt76.dev, "txpower_sku", dir,
- mt7915_read_rate_txpower);
+ if (!dev->dbdc_support || ext_phy) {
+ debugfs_create_u32("dfs_hw_pattern", 0400, dir,
+ &dev->hw_pattern);
+ debugfs_create_file("radar_trigger", 0200, dir, dev,
+ &fops_radar_trigger);
+ }
return 0;
}
@@ -365,68 +560,89 @@ int mt7915_init_debugfs(struct mt7915_dev *dev)
#ifdef CONFIG_MAC80211_DEBUGFS
/** per-station debugfs **/
-/* usage: <tx mode> <ldpc> <stbc> <bw> <gi> <nss> <mcs> */
-static int mt7915_sta_fixed_rate_set(void *data, u64 rate)
+static ssize_t mt7915_sta_fixed_rate_set(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
{
- struct ieee80211_sta *sta = data;
+ struct ieee80211_sta *sta = file->private_data;
struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+ struct mt7915_dev *dev = msta->vif->phy->dev;
+ struct ieee80211_vif *vif;
+ struct sta_phy phy = {};
+ char buf[100];
+ int ret;
+ u32 field;
+ u8 i, gi, he_ltf;
+
+ if (count >= sizeof(buf))
+ return -EINVAL;
+
+ if (copy_from_user(buf, user_buf, count))
+ return -EFAULT;
+
+ if (count && buf[count - 1] == '\n')
+ buf[count - 1] = '\0';
+ else
+ buf[count] = '\0';
+
+ /* mode - cck: 0, ofdm: 1, ht: 2, gf: 3, vht: 4, he_su: 8, he_er: 9
+ * bw - bw20: 0, bw40: 1, bw80: 2, bw160: 3
+ * nss - vht: 1~4, he: 1~4, others: ignore
+ * mcs - cck: 0~4, ofdm: 0~7, ht: 0~32, vht: 0~9, he_su: 0~11, he_er: 0~2
+ * gi - (ht/vht) lgi: 0, sgi: 1; (he) 0.8us: 0, 1.6us: 1, 3.2us: 2
+ * ldpc - off: 0, on: 1
+ * stbc - off: 0, on: 1
+ * he_ltf - 1xltf: 0, 2xltf: 1, 4xltf: 2
+ */
+ if (sscanf(buf, "%hhu %hhu %hhu %hhu %hhu %hhu %hhu %hhu",
+ &phy.type, &phy.bw, &phy.nss, &phy.mcs, &gi,
+ &phy.ldpc, &phy.stbc, &he_ltf) != 8) {
+ dev_warn(dev->mt76.dev,
+ "format: Mode BW NSS MCS (HE)GI LDPC STBC HE_LTF\n");
+ field = RATE_PARAM_AUTO;
+ goto out;
+ }
+
+ phy.ldpc = (phy.bw || phy.ldpc) * GENMASK(2, 0);
+ for (i = 0; i <= phy.bw; i++) {
+ phy.sgi |= gi << (i << sta->he_cap.has_he);
+ phy.he_ltf |= he_ltf << (i << sta->he_cap.has_he);
+ }
+ field = RATE_PARAM_FIXED;
+
+out:
+ vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv);
+ ret = mt7915_mcu_set_fixed_rate_ctrl(dev, vif, sta, &phy, field);
+ if (ret)
+ return -EFAULT;
- return mt7915_mcu_set_fixed_rate(msta->vif->phy->dev, sta, rate);
+ return count;
}
-DEFINE_DEBUGFS_ATTRIBUTE(fops_fixed_rate, NULL,
- mt7915_sta_fixed_rate_set, "%llx\n");
+static const struct file_operations fops_fixed_rate = {
+ .write = mt7915_sta_fixed_rate_set,
+ .open = simple_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
static int
-mt7915_sta_stats_show(struct seq_file *s, void *data)
+mt7915_queues_show(struct seq_file *s, void *data)
{
struct ieee80211_sta *sta = s->private;
- struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
- struct mt7915_sta_stats *stats = &msta->stats;
- struct rate_info *rate = &stats->prob_rate;
- static const char * const bw[] = {
- "BW20", "BW5", "BW10", "BW40",
- "BW80", "BW160", "BW_HE_RU"
- };
-
- if (!rate->legacy && !rate->flags)
- return 0;
- seq_puts(s, "Probing rate - ");
- if (rate->flags & RATE_INFO_FLAGS_MCS)
- seq_puts(s, "HT ");
- else if (rate->flags & RATE_INFO_FLAGS_VHT_MCS)
- seq_puts(s, "VHT ");
- else if (rate->flags & RATE_INFO_FLAGS_HE_MCS)
- seq_puts(s, "HE ");
- else
- seq_printf(s, "Bitrate %d\n", rate->legacy);
-
- if (rate->flags) {
- seq_printf(s, "%s NSS%d MCS%d ",
- bw[rate->bw], rate->nss, rate->mcs);
-
- if (rate->flags & RATE_INFO_FLAGS_SHORT_GI)
- seq_puts(s, "SGI ");
- else if (rate->he_gi)
- seq_puts(s, "HE GI ");
-
- if (rate->he_dcm)
- seq_puts(s, "DCM ");
- }
-
- seq_printf(s, "\nPPDU PER: %ld.%1ld%%\n",
- stats->per / 10, stats->per % 10);
+ mt7915_sta_hw_queue_read(s, sta);
return 0;
}
-DEFINE_SHOW_ATTRIBUTE(mt7915_sta_stats);
+DEFINE_SHOW_ATTRIBUTE(mt7915_queues);
void mt7915_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct dentry *dir)
{
debugfs_create_file("fixed_rate", 0600, dir, sta, &fops_fixed_rate);
- debugfs_create_file("stats", 0400, dir, sta, &mt7915_sta_stats_fops);
+ debugfs_create_file("hw-queues", 0400, dir, sta, &mt7915_queues_fops);
}
+
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
index 4798d6344305..4fa8e7ba93e6 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c
@@ -42,13 +42,17 @@ static const struct ieee80211_iface_combination if_comb[] = {
}
};
-static ssize_t mt7915_thermal_show_temp(struct device *dev,
+static ssize_t mt7915_thermal_temp_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct mt7915_phy *phy = dev_get_drvdata(dev);
+ int i = to_sensor_dev_attr(attr)->index;
int temperature;
+ if (i)
+ return sprintf(buf, "%u\n", phy->throttle_temp[i - 1] * 1000);
+
temperature = mt7915_mcu_get_temperature(phy);
if (temperature < 0)
return temperature;
@@ -57,11 +61,34 @@ static ssize_t mt7915_thermal_show_temp(struct device *dev,
return sprintf(buf, "%u\n", temperature * 1000);
}
-static SENSOR_DEVICE_ATTR(temp1_input, 0444, mt7915_thermal_show_temp,
- NULL, 0);
+static ssize_t mt7915_thermal_temp_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct mt7915_phy *phy = dev_get_drvdata(dev);
+ int ret, i = to_sensor_dev_attr(attr)->index;
+ long val;
+
+ ret = kstrtol(buf, 10, &val);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&phy->dev->mt76.mutex);
+ val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 60, 130);
+ phy->throttle_temp[i - 1] = val;
+ mutex_unlock(&phy->dev->mt76.mutex);
+
+ return count;
+}
+
+static SENSOR_DEVICE_ATTR_RO(temp1_input, mt7915_thermal_temp, 0);
+static SENSOR_DEVICE_ATTR_RW(temp1_crit, mt7915_thermal_temp, 1);
+static SENSOR_DEVICE_ATTR_RW(temp1_max, mt7915_thermal_temp, 2);
static struct attribute *mt7915_hwmon_attrs[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit.dev_attr.attr,
+ &sensor_dev_attr_temp1_max.dev_attr.attr,
NULL,
};
ATTRIBUTE_GROUPS(mt7915_hwmon);
@@ -96,6 +123,9 @@ mt7915_thermal_set_cur_throttle_state(struct thermal_cooling_device *cdev,
if (state > MT7915_THERMAL_THROTTLE_MAX)
return -EINVAL;
+ if (phy->throttle_temp[0] > phy->throttle_temp[1])
+ return 0;
+
if (state == phy->throttle_state)
return 0;
@@ -130,9 +160,12 @@ static int mt7915_thermal_init(struct mt7915_phy *phy)
struct wiphy *wiphy = phy->mt76->hw->wiphy;
struct thermal_cooling_device *cdev;
struct device *hwmon;
+ const char *name;
- cdev = thermal_cooling_device_register(wiphy_name(wiphy), phy,
- &mt7915_thermal_ops);
+ name = devm_kasprintf(&wiphy->dev, GFP_KERNEL, "mt7915_%s",
+ wiphy_name(wiphy));
+
+ cdev = thermal_cooling_device_register(name, phy, &mt7915_thermal_ops);
if (!IS_ERR(cdev)) {
if (sysfs_create_link(&wiphy->dev.kobj, &cdev->device.kobj,
"cooling_device") < 0)
@@ -144,15 +177,76 @@ static int mt7915_thermal_init(struct mt7915_phy *phy)
if (!IS_REACHABLE(CONFIG_HWMON))
return 0;
- hwmon = devm_hwmon_device_register_with_groups(&wiphy->dev,
- wiphy_name(wiphy), phy,
+ hwmon = devm_hwmon_device_register_with_groups(&wiphy->dev, name, phy,
mt7915_hwmon_groups);
if (IS_ERR(hwmon))
return PTR_ERR(hwmon);
+ /* initialize critical/maximum high temperature */
+ phy->throttle_temp[0] = 110;
+ phy->throttle_temp[1] = 120;
+
+ return 0;
+}
+
+static void mt7915_led_set_config(struct led_classdev *led_cdev,
+ u8 delay_on, u8 delay_off)
+{
+ struct mt7915_dev *dev;
+ struct mt76_dev *mt76;
+ u32 val;
+
+ mt76 = container_of(led_cdev, struct mt76_dev, led_cdev);
+ dev = container_of(mt76, struct mt7915_dev, mt76);
+
+ /* select TX blink mode, 2: only data frames */
+ mt76_rmw_field(dev, MT_TMAC_TCR0(0), MT_TMAC_TCR0_TX_BLINK, 2);
+
+ /* enable LED */
+ mt76_wr(dev, MT_LED_EN(0), 1);
+
+ /* set LED Tx blink on/off time */
+ val = FIELD_PREP(MT_LED_TX_BLINK_ON_MASK, delay_on) |
+ FIELD_PREP(MT_LED_TX_BLINK_OFF_MASK, delay_off);
+ mt76_wr(dev, MT_LED_TX_BLINK(0), val);
+
+ /* control LED */
+ val = MT_LED_CTRL_BLINK_MODE | MT_LED_CTRL_KICK;
+ if (dev->mt76.led_al)
+ val |= MT_LED_CTRL_POLARITY;
+
+ mt76_wr(dev, MT_LED_CTRL(0), val);
+ mt76_clear(dev, MT_LED_CTRL(0), MT_LED_CTRL_KICK);
+}
+
+static int mt7915_led_set_blink(struct led_classdev *led_cdev,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ u16 delta_on = 0, delta_off = 0;
+
+#define HW_TICK 10
+#define TO_HW_TICK(_t) (((_t) > HW_TICK) ? ((_t) / HW_TICK) : HW_TICK)
+
+ if (*delay_on)
+ delta_on = TO_HW_TICK(*delay_on);
+ if (*delay_off)
+ delta_off = TO_HW_TICK(*delay_off);
+
+ mt7915_led_set_config(led_cdev, delta_on, delta_off);
+
return 0;
}
+static void mt7915_led_set_brightness(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ if (!brightness)
+ mt7915_led_set_config(led_cdev, 0, 0xff);
+ else
+ mt7915_led_set_config(led_cdev, 0xff, 0);
+}
+
static void
mt7915_init_txpower(struct mt7915_dev *dev,
struct ieee80211_supported_band *sband)
@@ -232,7 +326,12 @@ mt7915_init_wiphy(struct ieee80211_hw *hw)
wiphy->reg_notifier = mt7915_regd_notifier;
wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BSS_COLOR);
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_LEGACY);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_HT);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_VHT);
+ wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_BEACON_RATE_HE);
ieee80211_hw_set(hw, HAS_RATE_CONTROL);
ieee80211_hw_set(hw, SUPPORTS_TX_ENCAP_OFFLOAD);
@@ -287,9 +386,7 @@ mt7915_mac_init_band(struct mt7915_dev *dev, u8 band)
FIELD_PREP(MT_MDP_RCFR1_RX_DROPPED_MCAST, MT_MDP_TO_HIF);
mt76_rmw(dev, MT_MDP_BNRCFR1(band), mask, set);
- mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(band), MT_WF_RMAC_MIB_RXTIME_EN);
-
- mt76_rmw_field(dev, MT_DMA_DCR0(band), MT_DMA_DCR0_MAX_RX_LEN, 1536);
+ mt76_rmw_field(dev, MT_DMA_DCR0(band), MT_DMA_DCR0_MAX_RX_LEN, 0x680);
/* disable rx rate report by default due to hw issues */
mt76_clear(dev, MT_DMA_DCR0(band), MT_DMA_DCR0_RXD_G5_EN);
}
@@ -298,7 +395,7 @@ static void mt7915_mac_init(struct mt7915_dev *dev)
{
int i;
- mt76_rmw_field(dev, MT_MDP_DCR1, MT_MDP_DCR1_MAX_RX_LEN, 1536);
+ mt76_rmw_field(dev, MT_MDP_DCR1, MT_MDP_DCR1_MAX_RX_LEN, 0x400);
/* enable hardware de-agg */
mt76_set(dev, MT_MDP_DCR0, MT_MDP_DCR0_DAMSDU_EN);
@@ -307,6 +404,11 @@ static void mt7915_mac_init(struct mt7915_dev *dev)
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
for (i = 0; i < 2; i++)
mt7915_mac_init_band(dev, i);
+
+ if (IS_ENABLED(CONFIG_MT76_LEDS)) {
+ i = dev->mt76.led_pin ? MT_LED_GPIO_MUX3 : MT_LED_GPIO_MUX2;
+ mt76_rmw_field(dev, i, MT_LED_GPIO_SEL_MASK, 4);
+ }
}
static int mt7915_txbf_init(struct mt7915_dev *dev)
@@ -350,7 +452,6 @@ static int mt7915_register_ext_phy(struct mt7915_dev *dev)
mphy->chainmask = dev->chainmask & ~dev->mphy.chainmask;
mphy->antenna_mask = BIT(hweight8(mphy->chainmask)) - 1;
- INIT_LIST_HEAD(&phy->stats_list);
INIT_DELAYED_WORK(&mphy->mac_work, mt7915_mac_work);
mt7915_eeprom_parse_band_config(phy);
@@ -374,6 +475,10 @@ static int mt7915_register_ext_phy(struct mt7915_dev *dev)
if (ret)
goto error;
+ ret = mt7915_init_debugfs(phy);
+ if (ret)
+ goto error;
+
return 0;
error:
@@ -480,7 +585,7 @@ static int mt7915_init_hardware(struct mt7915_dev *dev)
}
/* Beacon and mgmt frames should occupy wcid 0 */
- idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7915_WTBL_STA - 1);
+ idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7915_WTBL_STA);
if (idx)
return -ENOSPC;
@@ -525,7 +630,6 @@ mt7915_set_stream_he_txbf_caps(struct ieee80211_sta_he_cap *he_cap,
int vif, int nss)
{
struct ieee80211_he_cap_elem *elem = &he_cap->he_cap_elem;
- struct ieee80211_he_mcs_nss_supp *mcs = &he_cap->he_mcs_nss_supp;
u8 c;
#ifdef CONFIG_MAC80211_MESH
@@ -577,8 +681,11 @@ mt7915_set_stream_he_txbf_caps(struct ieee80211_sta_he_cap *he_cap,
elem->phy_cap_info[3] |= IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER;
elem->phy_cap_info[4] |= IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER;
- /* num_snd_dim */
- c = (nss - 1) | (max_t(int, le16_to_cpu(mcs->tx_mcs_160), 1) << 3);
+ /* num_snd_dim
+ * for mt7915, max supported nss is 2 for bw > 80MHz
+ */
+ c = (nss - 1) |
+ IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2;
elem->phy_cap_info[5] |= c;
c = IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB |
@@ -613,12 +720,19 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band,
{
int i, idx = 0, nss = hweight8(phy->mt76->chainmask);
u16 mcs_map = 0;
+ u16 mcs_map_160 = 0;
for (i = 0; i < 8; i++) {
if (i < nss)
mcs_map |= (IEEE80211_HE_MCS_SUPPORT_0_11 << (i * 2));
else
mcs_map |= (IEEE80211_HE_MCS_NOT_SUPPORTED << (i * 2));
+
+ /* Can do 1/2 of NSS streams in 160Mhz mode. */
+ if (i < nss / 2)
+ mcs_map_160 |= (IEEE80211_HE_MCS_SUPPORT_0_11 << (i * 2));
+ else
+ mcs_map_160 |= (IEEE80211_HE_MCS_NOT_SUPPORTED << (i * 2));
}
for (i = 0; i < NUM_NL80211_IFTYPES; i++) {
@@ -667,6 +781,8 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band,
switch (i) {
case NL80211_IFTYPE_AP:
+ he_cap_elem->mac_cap_info[0] |=
+ IEEE80211_HE_MAC_CAP0_TWT_RES;
he_cap_elem->mac_cap_info[2] |=
IEEE80211_HE_MAC_CAP2_BSR;
he_cap_elem->mac_cap_info[4] |=
@@ -677,7 +793,11 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band,
IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_QPSK |
IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_QPSK;
he_cap_elem->phy_cap_info[6] |=
+ IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE |
IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT;
+ he_cap_elem->phy_cap_info[9] |=
+ IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU |
+ IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU;
break;
case NL80211_IFTYPE_STATION:
he_cap_elem->mac_cap_info[1] |=
@@ -720,10 +840,10 @@ mt7915_init_he_caps(struct mt7915_phy *phy, enum nl80211_band band,
he_mcs->rx_mcs_80 = cpu_to_le16(mcs_map);
he_mcs->tx_mcs_80 = cpu_to_le16(mcs_map);
- he_mcs->rx_mcs_160 = cpu_to_le16(mcs_map);
- he_mcs->tx_mcs_160 = cpu_to_le16(mcs_map);
- he_mcs->rx_mcs_80p80 = cpu_to_le16(mcs_map);
- he_mcs->tx_mcs_80p80 = cpu_to_le16(mcs_map);
+ he_mcs->rx_mcs_160 = cpu_to_le16(mcs_map_160);
+ he_mcs->tx_mcs_160 = cpu_to_le16(mcs_map_160);
+ he_mcs->rx_mcs_80p80 = cpu_to_le16(mcs_map_160);
+ he_mcs->tx_mcs_80p80 = cpu_to_le16(mcs_map_160);
mt7915_set_stream_he_txbf_caps(he_cap, i, nss);
@@ -787,11 +907,11 @@ int mt7915_register_device(struct mt7915_dev *dev)
dev->phy.dev = dev;
dev->phy.mt76 = &dev->mt76.phy;
dev->mt76.phy.priv = &dev->phy;
- INIT_LIST_HEAD(&dev->phy.stats_list);
INIT_WORK(&dev->rc_work, mt7915_mac_sta_rc_work);
INIT_DELAYED_WORK(&dev->mphy.mac_work, mt7915_mac_work);
INIT_LIST_HEAD(&dev->sta_rc_list);
INIT_LIST_HEAD(&dev->sta_poll_list);
+ INIT_LIST_HEAD(&dev->twt_list);
spin_lock_init(&dev->sta_poll_lock);
init_waitqueue_head(&dev->reset_wait);
@@ -816,6 +936,12 @@ int mt7915_register_device(struct mt7915_dev *dev)
dev->mt76.test_ops = &mt7915_testmode_ops;
#endif
+ /* init led callbacks */
+ if (IS_ENABLED(CONFIG_MT76_LEDS)) {
+ dev->mt76.led_cdev.brightness_set = mt7915_led_set_brightness;
+ dev->mt76.led_cdev.blink_set = mt7915_led_set_blink;
+ }
+
ret = mt76_register_device(&dev->mt76, true, mt76_rates,
ARRAY_SIZE(mt76_rates));
if (ret)
@@ -831,7 +957,7 @@ int mt7915_register_device(struct mt7915_dev *dev)
if (ret)
return ret;
- return mt7915_init_debugfs(dev);
+ return mt7915_init_debugfs(&dev->phy);
}
void mt7915_unregister_device(struct mt7915_dev *dev)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
index 2462704094b0..5fcf35f2d9fb 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c
@@ -6,6 +6,7 @@
#include "mt7915.h"
#include "../dma.h"
#include "mac.h"
+#include "mcu.h"
#define to_rssi(field, rxv) ((FIELD_GET(field, rxv) - 220) / 2)
@@ -88,15 +89,14 @@ bool mt7915_mac_wtbl_update(struct mt7915_dev *dev, int idx, u32 mask)
0, 5000);
}
-static u32 mt7915_mac_wtbl_lmac_addr(struct mt7915_dev *dev, u16 wcid)
+u32 mt7915_mac_wtbl_lmac_addr(struct mt7915_dev *dev, u16 wcid, u8 dw)
{
mt76_wr(dev, MT_WTBLON_TOP_WDUCR,
FIELD_PREP(MT_WTBLON_TOP_WDUCR_GROUP, (wcid >> 7)));
- return MT_WTBL_LMAC_OFFS(wcid, 0);
+ return MT_WTBL_LMAC_OFFS(wcid, dw);
}
-/* TODO: use txfree airtime info to avoid runtime accessing in the long run */
static void mt7915_mac_sta_poll(struct mt7915_dev *dev)
{
static const u8 ac_to_tid[] = {
@@ -107,6 +107,7 @@ static void mt7915_mac_sta_poll(struct mt7915_dev *dev)
};
struct ieee80211_sta *sta;
struct mt7915_sta *msta;
+ struct rate_info *rate;
u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS];
LIST_HEAD(sta_poll_list);
int i;
@@ -119,8 +120,9 @@ static void mt7915_mac_sta_poll(struct mt7915_dev *dev)
while (true) {
bool clear = false;
- u32 addr;
+ u32 addr, val;
u16 idx;
+ u8 bw;
spin_lock_bh(&dev->sta_poll_lock);
if (list_empty(&sta_poll_list)) {
@@ -133,7 +135,7 @@ static void mt7915_mac_sta_poll(struct mt7915_dev *dev)
spin_unlock_bh(&dev->sta_poll_lock);
idx = msta->wcid.idx;
- addr = mt7915_mac_wtbl_lmac_addr(dev, idx) + 20 * 4;
+ addr = mt7915_mac_wtbl_lmac_addr(dev, idx, 20);
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
u32 tx_last = msta->airtime_ac[i];
@@ -174,6 +176,43 @@ static void mt7915_mac_sta_poll(struct mt7915_dev *dev)
ieee80211_sta_register_airtime(sta, tid, tx_cur,
rx_cur);
}
+
+ /*
+ * We don't support reading GI info from txs packets.
+ * For accurate tx status reporting and AQL improvement,
+ * we need to make sure that flags match so polling GI
+ * from per-sta counters directly.
+ */
+ rate = &msta->wcid.rate;
+ addr = mt7915_mac_wtbl_lmac_addr(dev, idx, 7);
+ val = mt76_rr(dev, addr);
+
+ switch (rate->bw) {
+ case RATE_INFO_BW_160:
+ bw = IEEE80211_STA_RX_BW_160;
+ break;
+ case RATE_INFO_BW_80:
+ bw = IEEE80211_STA_RX_BW_80;
+ break;
+ case RATE_INFO_BW_40:
+ bw = IEEE80211_STA_RX_BW_40;
+ break;
+ default:
+ bw = IEEE80211_STA_RX_BW_20;
+ break;
+ }
+
+ if (rate->flags & RATE_INFO_FLAGS_HE_MCS) {
+ u8 offs = 24 + 2 * bw;
+
+ rate->he_gi = (val & (0x3 << offs)) >> offs;
+ } else if (rate->flags &
+ (RATE_INFO_FLAGS_VHT_MCS | RATE_INFO_FLAGS_MCS)) {
+ if (val & BIT(12 + bw))
+ rate->flags |= RATE_INFO_FLAGS_SHORT_GI;
+ else
+ rate->flags &= ~RATE_INFO_FLAGS_SHORT_GI;
+ }
}
rcu_read_unlock();
@@ -229,11 +268,50 @@ mt7915_mac_decode_he_radiotap_ru(struct mt76_rx_status *status,
}
static void
+mt7915_mac_decode_he_mu_radiotap(struct sk_buff *skb,
+ struct mt76_rx_status *status,
+ __le32 *rxv)
+{
+ static const struct ieee80211_radiotap_he_mu mu_known = {
+ .flags1 = HE_BITS(MU_FLAGS1_SIG_B_MCS_KNOWN) |
+ HE_BITS(MU_FLAGS1_SIG_B_DCM_KNOWN) |
+ HE_BITS(MU_FLAGS1_CH1_RU_KNOWN) |
+ HE_BITS(MU_FLAGS1_SIG_B_SYMS_USERS_KNOWN),
+ .flags2 = HE_BITS(MU_FLAGS2_BW_FROM_SIG_A_BW_KNOWN),
+ };
+ struct ieee80211_radiotap_he_mu *he_mu = NULL;
+
+ he_mu = skb_push(skb, sizeof(mu_known));
+ memcpy(he_mu, &mu_known, sizeof(mu_known));
+
+#define MU_PREP(f, v) le16_encode_bits(v, IEEE80211_RADIOTAP_HE_MU_##f)
+
+ he_mu->flags1 |= MU_PREP(FLAGS1_SIG_B_MCS, status->rate_idx);
+ if (status->he_dcm)
+ he_mu->flags1 |= MU_PREP(FLAGS1_SIG_B_DCM, status->he_dcm);
+
+ he_mu->flags2 |= MU_PREP(FLAGS2_BW_FROM_SIG_A_BW, status->bw) |
+ MU_PREP(FLAGS2_SIG_B_SYMS_USERS,
+ le32_get_bits(rxv[2], MT_CRXV_HE_NUM_USER));
+
+ he_mu->ru_ch1[0] = le32_get_bits(rxv[3], MT_CRXV_HE_RU0);
+
+ if (status->bw >= RATE_INFO_BW_40) {
+ he_mu->flags1 |= HE_BITS(MU_FLAGS1_CH2_RU_KNOWN);
+ he_mu->ru_ch2[0] = le32_get_bits(rxv[3], MT_CRXV_HE_RU1);
+ }
+
+ if (status->bw >= RATE_INFO_BW_80) {
+ he_mu->ru_ch1[1] = le32_get_bits(rxv[3], MT_CRXV_HE_RU2);
+ he_mu->ru_ch2[1] = le32_get_bits(rxv[3], MT_CRXV_HE_RU3);
+ }
+}
+
+static void
mt7915_mac_decode_he_radiotap(struct sk_buff *skb,
struct mt76_rx_status *status,
__le32 *rxv, u32 phy)
{
- /* TODO: struct ieee80211_radiotap_he_mu */
static const struct ieee80211_radiotap_he known = {
.data1 = HE_BITS(DATA1_DATA_MCS_KNOWN) |
HE_BITS(DATA1_DATA_DCM_KNOWN) |
@@ -241,6 +319,7 @@ mt7915_mac_decode_he_radiotap(struct sk_buff *skb,
HE_BITS(DATA1_CODING_KNOWN) |
HE_BITS(DATA1_LDPC_XSYMSEG_KNOWN) |
HE_BITS(DATA1_DOPPLER_KNOWN) |
+ HE_BITS(DATA1_SPTL_REUSE_KNOWN) |
HE_BITS(DATA1_BSS_COLOR_KNOWN),
.data2 = HE_BITS(DATA2_GI_KNOWN) |
HE_BITS(DATA2_TXBF_KNOWN) |
@@ -255,9 +334,12 @@ mt7915_mac_decode_he_radiotap(struct sk_buff *skb,
he->data3 = HE_PREP(DATA3_BSS_COLOR, BSS_COLOR, rxv[14]) |
HE_PREP(DATA3_LDPC_XSYMSEG, LDPC_EXT_SYM, rxv[2]);
+ he->data4 = HE_PREP(DATA4_SU_MU_SPTL_REUSE, SR_MASK, rxv[11]);
he->data5 = HE_PREP(DATA5_PE_DISAMBIG, PE_DISAMBIG, rxv[2]) |
le16_encode_bits(ltf_size,
IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE);
+ if (le32_to_cpu(rxv[0]) & MT_PRXV_TXBF)
+ he->data5 |= HE_BITS(DATA5_TXBF);
he->data6 = HE_PREP(DATA6_TXOP, TXOP_DUR, rxv[14]) |
HE_PREP(DATA6_DOPPLER, DOPPLER, rxv[14]);
@@ -265,12 +347,10 @@ mt7915_mac_decode_he_radiotap(struct sk_buff *skb,
case MT_PHY_TYPE_HE_SU:
he->data1 |= HE_BITS(DATA1_FORMAT_SU) |
HE_BITS(DATA1_UL_DL_KNOWN) |
- HE_BITS(DATA1_BEAM_CHANGE_KNOWN) |
- HE_BITS(DATA1_SPTL_REUSE_KNOWN);
+ HE_BITS(DATA1_BEAM_CHANGE_KNOWN);
he->data3 |= HE_PREP(DATA3_BEAM_CHANGE, BEAM_CHNG, rxv[14]) |
HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
- he->data4 |= HE_PREP(DATA4_SU_MU_SPTL_REUSE, SR_MASK, rxv[11]);
break;
case MT_PHY_TYPE_HE_EXT_SU:
he->data1 |= HE_BITS(DATA1_FORMAT_EXT_SU) |
@@ -280,23 +360,20 @@ mt7915_mac_decode_he_radiotap(struct sk_buff *skb,
break;
case MT_PHY_TYPE_HE_MU:
he->data1 |= HE_BITS(DATA1_FORMAT_MU) |
- HE_BITS(DATA1_UL_DL_KNOWN) |
- HE_BITS(DATA1_SPTL_REUSE_KNOWN);
+ HE_BITS(DATA1_UL_DL_KNOWN);
he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
- he->data4 |= HE_PREP(DATA4_SU_MU_SPTL_REUSE, SR_MASK, rxv[11]);
+ he->data4 |= HE_PREP(DATA4_MU_STA_ID, MU_AID, rxv[7]);
mt7915_mac_decode_he_radiotap_ru(status, he, rxv);
break;
case MT_PHY_TYPE_HE_TB:
he->data1 |= HE_BITS(DATA1_FORMAT_TRIG) |
- HE_BITS(DATA1_SPTL_REUSE_KNOWN) |
HE_BITS(DATA1_SPTL_REUSE2_KNOWN) |
HE_BITS(DATA1_SPTL_REUSE3_KNOWN) |
HE_BITS(DATA1_SPTL_REUSE4_KNOWN);
- he->data4 |= HE_PREP(DATA4_TB_SPTL_REUSE1, SR_MASK, rxv[11]) |
- HE_PREP(DATA4_TB_SPTL_REUSE2, SR1_MASK, rxv[11]) |
+ he->data4 |= HE_PREP(DATA4_TB_SPTL_REUSE2, SR1_MASK, rxv[11]) |
HE_PREP(DATA4_TB_SPTL_REUSE3, SR2_MASK, rxv[11]) |
HE_PREP(DATA4_TB_SPTL_REUSE4, SR3_MASK, rxv[11]);
@@ -610,8 +687,11 @@ mt7915_mac_fill_rx(struct mt7915_dev *dev, struct sk_buff *skb)
status->flag |= RX_FLAG_8023;
}
- if (rxv && status->flag & RX_FLAG_RADIOTAP_HE)
+ if (rxv && status->flag & RX_FLAG_RADIOTAP_HE) {
mt7915_mac_decode_he_radiotap(skb, status, rxv, mode);
+ if (status->flag & RX_FLAG_RADIOTAP_HE_MU)
+ mt7915_mac_decode_he_mu_radiotap(skb, status, rxv);
+ }
if (!status->wcid || !ieee80211_is_data_qos(fc))
return 0;
@@ -825,17 +905,19 @@ mt7915_mac_write_txwi_8023(struct mt7915_dev *dev, __le32 *txwi,
static void
mt7915_mac_write_txwi_80211(struct mt7915_dev *dev, __le32 *txwi,
- struct sk_buff *skb, struct ieee80211_key_conf *key)
+ struct sk_buff *skb, struct ieee80211_key_conf *key,
+ bool *mcast)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- bool multicast = is_multicast_ether_addr(hdr->addr1);
u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
__le16 fc = hdr->frame_control;
u8 fc_type, fc_stype;
u32 val;
+ *mcast = is_multicast_ether_addr(hdr->addr1);
+
if (ieee80211_is_action(fc) &&
mgmt->u.action.category == WLAN_CATEGORY_BACK &&
mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ) {
@@ -861,15 +943,16 @@ mt7915_mac_write_txwi_80211(struct mt7915_dev *dev, __le32 *txwi,
val = FIELD_PREP(MT_TXD2_FRAME_TYPE, fc_type) |
FIELD_PREP(MT_TXD2_SUB_TYPE, fc_stype) |
- FIELD_PREP(MT_TXD2_MULTICAST, multicast);
+ FIELD_PREP(MT_TXD2_MULTICAST, *mcast);
- if (key && multicast && ieee80211_is_robust_mgmt_frame(skb) &&
+ if (key && *mcast && ieee80211_is_robust_mgmt_frame(skb) &&
key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) {
val |= MT_TXD2_BIP;
txwi[3] &= ~cpu_to_le32(MT_TXD3_PROTECT_FRAME);
}
- if (!ieee80211_is_data(fc) || multicast)
+ if (!ieee80211_is_data(fc) || *mcast ||
+ info->flags & IEEE80211_TX_CTL_USE_MINRATE)
val |= MT_TXD2_FIX_RATE;
txwi[2] |= cpu_to_le32(val);
@@ -899,6 +982,51 @@ mt7915_mac_write_txwi_80211(struct mt7915_dev *dev, __le32 *txwi,
txwi[7] |= cpu_to_le32(val);
}
+static u16
+mt7915_mac_tx_rate_val(struct mt76_phy *mphy, struct ieee80211_vif *vif,
+ bool beacon, bool mcast)
+{
+ u8 mode = 0, band = mphy->chandef.chan->band;
+ int rateidx = 0, mcast_rate;
+
+ if (beacon) {
+ struct cfg80211_bitrate_mask *mask;
+
+ mask = &vif->bss_conf.beacon_tx_rate;
+ if (hweight16(mask->control[band].he_mcs[0]) == 1) {
+ rateidx = ffs(mask->control[band].he_mcs[0]) - 1;
+ mode = MT_PHY_TYPE_HE_SU;
+ goto out;
+ } else if (hweight16(mask->control[band].vht_mcs[0]) == 1) {
+ rateidx = ffs(mask->control[band].vht_mcs[0]) - 1;
+ mode = MT_PHY_TYPE_VHT;
+ goto out;
+ } else if (hweight8(mask->control[band].ht_mcs[0]) == 1) {
+ rateidx = ffs(mask->control[band].ht_mcs[0]) - 1;
+ mode = MT_PHY_TYPE_HT;
+ goto out;
+ } else if (hweight32(mask->control[band].legacy) == 1) {
+ rateidx = ffs(mask->control[band].legacy) - 1;
+ goto legacy;
+ }
+ }
+
+ mcast_rate = vif->bss_conf.mcast_rate[band];
+ if (mcast && mcast_rate > 0)
+ rateidx = mcast_rate - 1;
+ else
+ rateidx = ffs(vif->bss_conf.basic_rates) - 1;
+
+legacy:
+ rateidx = mt76_calculate_default_rate(mphy, rateidx);
+ mode = rateidx >> 8;
+ rateidx &= GENMASK(7, 0);
+
+out:
+ return FIELD_PREP(MT_TX_RATE_IDX, rateidx) |
+ FIELD_PREP(MT_TX_RATE_MODE, mode);
+}
+
void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi,
struct sk_buff *skb, struct mt76_wcid *wcid, int pid,
struct ieee80211_key_conf *key, bool beacon)
@@ -909,6 +1037,7 @@ void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi,
bool ext_phy = info->hw_queue & MT_TX_HW_QUEUE_EXT_PHY;
u8 p_fmt, q_idx, omac_idx = 0, wmm_idx = 0;
bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
+ bool mcast = false;
u16 tx_count = 15;
u32 val;
@@ -939,7 +1068,7 @@ void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi,
FIELD_PREP(MT_TXD0_Q_IDX, q_idx);
txwi[0] = cpu_to_le32(val);
- val = MT_TXD1_LONG_FORMAT |
+ val = MT_TXD1_LONG_FORMAT | MT_TXD1_VTA |
FIELD_PREP(MT_TXD1_WLAN_IDX, wcid->idx) |
FIELD_PREP(MT_TXD1_OWN_MAC, omac_idx);
@@ -971,19 +1100,14 @@ void mt7915_mac_write_txwi(struct mt7915_dev *dev, __le32 *txwi,
if (is_8023)
mt7915_mac_write_txwi_8023(dev, txwi, skb, wcid);
else
- mt7915_mac_write_txwi_80211(dev, txwi, skb, key);
+ mt7915_mac_write_txwi_80211(dev, txwi, skb, key, &mcast);
if (txwi[2] & cpu_to_le32(MT_TXD2_FIX_RATE)) {
- u16 rate;
+ u16 rate = mt7915_mac_tx_rate_val(mphy, vif, beacon, mcast);
/* hardware won't add HTC for mgmt/ctrl frame */
txwi[2] |= cpu_to_le32(MT_TXD2_HTC_VLD);
- if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
- rate = MT7915_5G_RATE_DEFAULT;
- else
- rate = MT7915_2G_RATE_DEFAULT;
-
val = MT_TXD6_FIXED_BW |
FIELD_PREP(MT_TXD6_TX_RATE, rate);
txwi[6] |= cpu_to_le32(val);
@@ -1016,6 +1140,17 @@ int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
if (!wcid)
wcid = &dev->mt76.global_wcid;
+ if (sta) {
+ struct mt7915_sta *msta;
+
+ msta = (struct mt7915_sta *)sta->drv_priv;
+
+ if (time_after(jiffies, msta->jiffies + HZ / 4)) {
+ info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
+ msta->jiffies = jiffies;
+ }
+ }
+
pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
mt7915_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, pid, key,
@@ -1162,7 +1297,6 @@ mt7915_mac_tx_free(struct mt7915_dev *dev, struct sk_buff *skb)
count = FIELD_GET(MT_TX_FREE_MSDU_CNT, le16_to_cpu(free->ctrl));
for (i = 0; i < count; i++) {
u32 msdu, info = le32_to_cpu(free->info[i]);
- u8 stat;
/*
* 1'b1: new wcid pair.
@@ -1170,7 +1304,6 @@ mt7915_mac_tx_free(struct mt7915_dev *dev, struct sk_buff *skb)
*/
if (info & MT_TX_FREE_PAIR) {
struct mt7915_sta *msta;
- struct mt7915_phy *phy;
struct mt76_wcid *wcid;
u16 idx;
@@ -1182,10 +1315,7 @@ mt7915_mac_tx_free(struct mt7915_dev *dev, struct sk_buff *skb)
continue;
msta = container_of(wcid, struct mt7915_sta, wcid);
- phy = msta->vif->phy;
spin_lock_bh(&dev->sta_poll_lock);
- if (list_empty(&msta->stats_list))
- list_add_tail(&msta->stats_list, &phy->stats_list);
if (list_empty(&msta->poll_list))
list_add_tail(&msta->poll_list, &dev->sta_poll_list);
spin_unlock_bh(&dev->sta_poll_lock);
@@ -1193,8 +1323,6 @@ mt7915_mac_tx_free(struct mt7915_dev *dev, struct sk_buff *skb)
}
msdu = FIELD_GET(MT_TX_FREE_MSDU_ID, info);
- stat = FIELD_GET(MT_TX_FREE_STATUS, info);
-
txwi = mt76_token_release(mdev, msdu, &wake);
if (!txwi)
continue;
@@ -1219,20 +1347,27 @@ mt7915_mac_tx_free(struct mt7915_dev *dev, struct sk_buff *skb)
static bool
mt7915_mac_add_txs_skb(struct mt7915_dev *dev, struct mt76_wcid *wcid, int pid,
- __le32 *txs_data)
+ __le32 *txs_data, struct mt76_sta_stats *stats)
{
+ struct ieee80211_supported_band *sband;
struct mt76_dev *mdev = &dev->mt76;
+ struct mt76_phy *mphy;
struct ieee80211_tx_info *info;
struct sk_buff_head list;
+ struct rate_info rate = {};
struct sk_buff *skb;
+ bool cck = false;
+ u32 txrate, txs, mode;
mt76_tx_status_lock(mdev, &list);
skb = mt76_tx_status_skb_get(mdev, wcid, pid, &list);
if (!skb)
- goto out;
+ goto out_no_skb;
+
+ txs = le32_to_cpu(txs_data[0]);
info = IEEE80211_SKB_CB(skb);
- if (!(txs_data[0] & le32_to_cpu(MT_TXS0_ACK_ERROR_MASK)))
+ if (!(txs & MT_TXS0_ACK_ERROR_MASK))
info->flags |= IEEE80211_TX_STAT_ACK;
info->status.ampdu_len = 1;
@@ -1240,9 +1375,92 @@ mt7915_mac_add_txs_skb(struct mt7915_dev *dev, struct mt76_wcid *wcid, int pid,
IEEE80211_TX_STAT_ACK);
info->status.rates[0].idx = -1;
- mt76_tx_status_skb_done(mdev, skb, &list);
+
+ txrate = FIELD_GET(MT_TXS0_TX_RATE, txs);
+
+ rate.mcs = FIELD_GET(MT_TX_RATE_IDX, txrate);
+ rate.nss = FIELD_GET(MT_TX_RATE_NSS, txrate) + 1;
+
+ if (rate.nss - 1 < ARRAY_SIZE(stats->tx_nss))
+ stats->tx_nss[rate.nss - 1]++;
+ if (rate.mcs < ARRAY_SIZE(stats->tx_mcs))
+ stats->tx_mcs[rate.mcs]++;
+
+ mode = FIELD_GET(MT_TX_RATE_MODE, txrate);
+ switch (mode) {
+ case MT_PHY_TYPE_CCK:
+ cck = true;
+ fallthrough;
+ case MT_PHY_TYPE_OFDM:
+ mphy = &dev->mphy;
+ if (wcid->ext_phy && dev->mt76.phy2)
+ mphy = dev->mt76.phy2;
+
+ if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
+ sband = &mphy->sband_5g.sband;
+ else
+ sband = &mphy->sband_2g.sband;
+
+ rate.mcs = mt76_get_rate(mphy->dev, sband, rate.mcs, cck);
+ rate.legacy = sband->bitrates[rate.mcs].bitrate;
+ break;
+ case MT_PHY_TYPE_HT:
+ case MT_PHY_TYPE_HT_GF:
+ rate.mcs += (rate.nss - 1) * 8;
+ if (rate.mcs > 31)
+ goto out;
+
+ rate.flags = RATE_INFO_FLAGS_MCS;
+ if (wcid->rate.flags & RATE_INFO_FLAGS_SHORT_GI)
+ rate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+ break;
+ case MT_PHY_TYPE_VHT:
+ if (rate.mcs > 9)
+ goto out;
+
+ rate.flags = RATE_INFO_FLAGS_VHT_MCS;
+ break;
+ case MT_PHY_TYPE_HE_SU:
+ case MT_PHY_TYPE_HE_EXT_SU:
+ case MT_PHY_TYPE_HE_TB:
+ case MT_PHY_TYPE_HE_MU:
+ if (rate.mcs > 11)
+ goto out;
+
+ rate.he_gi = wcid->rate.he_gi;
+ rate.he_dcm = FIELD_GET(MT_TX_RATE_DCM, txrate);
+ rate.flags = RATE_INFO_FLAGS_HE_MCS;
+ break;
+ default:
+ goto out;
+ }
+
+ stats->tx_mode[mode]++;
+
+ switch (FIELD_GET(MT_TXS0_BW, txs)) {
+ case IEEE80211_STA_RX_BW_160:
+ rate.bw = RATE_INFO_BW_160;
+ stats->tx_bw[3]++;
+ break;
+ case IEEE80211_STA_RX_BW_80:
+ rate.bw = RATE_INFO_BW_80;
+ stats->tx_bw[2]++;
+ break;
+ case IEEE80211_STA_RX_BW_40:
+ rate.bw = RATE_INFO_BW_40;
+ stats->tx_bw[1]++;
+ break;
+ default:
+ rate.bw = RATE_INFO_BW_20;
+ stats->tx_bw[0]++;
+ break;
+ }
+ wcid->rate = rate;
out:
+ mt76_tx_status_skb_done(mdev, skb, &list);
+
+out_no_skb:
mt76_tx_status_unlock(mdev, &list);
return !!skb;
@@ -1279,12 +1497,13 @@ static void mt7915_mac_add_txs(struct mt7915_dev *dev, void *data)
if (!wcid)
goto out;
- mt7915_mac_add_txs_skb(dev, wcid, pid, txs_data);
+ msta = container_of(wcid, struct mt7915_sta, wcid);
+
+ mt7915_mac_add_txs_skb(dev, wcid, pid, txs_data, &msta->stats);
if (!wcid->sta)
goto out;
- msta = container_of(wcid, struct mt7915_sta, wcid);
spin_lock_bh(&dev->sta_poll_lock);
if (list_empty(&msta->poll_list))
list_add_tail(&msta->poll_list, &dev->sta_poll_list);
@@ -1333,15 +1552,11 @@ void mt7915_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
void mt7915_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
{
- struct mt7915_dev *dev;
-
if (!e->txwi) {
dev_kfree_skb_any(e->skb);
return;
}
- dev = container_of(mdev, struct mt7915_dev, mt76);
-
/* error path */
if (e->skb == DMA_DUMMY_DATA) {
struct mt76_txwi_cache *t;
@@ -1403,17 +1618,12 @@ void mt7915_mac_set_timing(struct mt7915_phy *phy)
FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48);
u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) |
FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28);
- int sifs, offset;
+ int offset;
bool is_5ghz = phy->mt76->chandef.chan->band == NL80211_BAND_5GHZ;
if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
return;
- if (is_5ghz)
- sifs = 16;
- else
- sifs = 10;
-
if (ext_phy) {
coverage_class = max_t(s16, dev->phy.coverage_class,
coverage_class);
@@ -1435,11 +1645,14 @@ void mt7915_mac_set_timing(struct mt7915_phy *phy)
mt76_wr(dev, MT_TMAC_CDTR(ext_phy), cck + reg_offset);
mt76_wr(dev, MT_TMAC_ODTR(ext_phy), ofdm + reg_offset);
mt76_wr(dev, MT_TMAC_ICR0(ext_phy),
- FIELD_PREP(MT_IFS_EIFS, 360) |
+ FIELD_PREP(MT_IFS_EIFS_OFDM, is_5ghz ? 84 : 78) |
FIELD_PREP(MT_IFS_RIFS, 2) |
- FIELD_PREP(MT_IFS_SIFS, sifs) |
+ FIELD_PREP(MT_IFS_SIFS, 10) |
FIELD_PREP(MT_IFS_SLOT, phy->slottime));
+ mt76_wr(dev, MT_TMAC_ICR1(ext_phy),
+ FIELD_PREP(MT_IFS_EIFS_CCK, 314));
+
if (phy->slottime < 20 || is_5ghz)
val = MT7915_CFEND_RATE_DEFAULT;
else
@@ -1580,7 +1793,7 @@ mt7915_dma_reset(struct mt7915_dev *dev)
mt76_for_each_q_rx(&dev->mt76, i)
mt76_queue_rx_reset(dev, i);
- mt76_tx_status_check(&dev->mt76, NULL, true);
+ mt76_tx_status_check(&dev->mt76, true);
/* re-init prefetch settings after reset */
mt7915_dma_prefetch(dev);
@@ -1668,6 +1881,7 @@ void mt7915_mac_reset_work(struct work_struct *work)
if (phy2)
clear_bit(MT76_RESET, &phy2->mt76->state);
+ local_bh_disable();
napi_enable(&dev->mt76.napi[0]);
napi_schedule(&dev->mt76.napi[0]);
@@ -1676,6 +1890,8 @@ void mt7915_mac_reset_work(struct work_struct *work)
napi_enable(&dev->mt76.napi[2]);
napi_schedule(&dev->mt76.napi[2]);
+ local_bh_enable();
+
tasklet_schedule(&dev->irq_tasklet);
mt76_wr(dev, MT_MCU_INT_EVENT, MT_MCU_INT_EVENT_RESET_DONE);
@@ -1702,17 +1918,117 @@ void mt7915_mac_reset_work(struct work_struct *work)
MT7915_WATCHDOG_TIME);
}
-static void
-mt7915_mac_update_stats(struct mt7915_phy *phy)
+void mt7915_mac_update_stats(struct mt7915_phy *phy)
{
struct mt7915_dev *dev = phy->dev;
struct mib_stats *mib = &phy->mib;
bool ext_phy = phy != &dev->phy;
- int i, aggr0, aggr1;
+ int i, aggr0, aggr1, cnt;
mib->fcs_err_cnt += mt76_get_field(dev, MT_MIB_SDR3(ext_phy),
MT_MIB_SDR3_FCS_ERR_MASK);
+ cnt = mt76_rr(dev, MT_MIB_SDR4(ext_phy));
+ mib->rx_fifo_full_cnt += FIELD_GET(MT_MIB_SDR4_RX_FIFO_FULL_MASK, cnt);
+
+ cnt = mt76_rr(dev, MT_MIB_SDR5(ext_phy));
+ mib->rx_mpdu_cnt += cnt;
+
+ cnt = mt76_rr(dev, MT_MIB_SDR6(ext_phy));
+ mib->channel_idle_cnt += FIELD_GET(MT_MIB_SDR6_CHANNEL_IDL_CNT_MASK, cnt);
+
+ cnt = mt76_rr(dev, MT_MIB_SDR7(ext_phy));
+ mib->rx_vector_mismatch_cnt += FIELD_GET(MT_MIB_SDR7_RX_VECTOR_MISMATCH_CNT_MASK, cnt);
+
+ cnt = mt76_rr(dev, MT_MIB_SDR8(ext_phy));
+ mib->rx_delimiter_fail_cnt += FIELD_GET(MT_MIB_SDR8_RX_DELIMITER_FAIL_CNT_MASK, cnt);
+
+ cnt = mt76_rr(dev, MT_MIB_SDR11(ext_phy));
+ mib->rx_len_mismatch_cnt += FIELD_GET(MT_MIB_SDR11_RX_LEN_MISMATCH_CNT_MASK, cnt);
+
+ cnt = mt76_rr(dev, MT_MIB_SDR12(ext_phy));
+ mib->tx_ampdu_cnt += cnt;
+
+ cnt = mt76_rr(dev, MT_MIB_SDR13(ext_phy));
+ mib->tx_stop_q_empty_cnt += FIELD_GET(MT_MIB_SDR13_TX_STOP_Q_EMPTY_CNT_MASK, cnt);
+
+ cnt = mt76_rr(dev, MT_MIB_SDR14(ext_phy));
+ mib->tx_mpdu_attempts_cnt += FIELD_GET(MT_MIB_SDR14_TX_MPDU_ATTEMPTS_CNT_MASK, cnt);
+
+ cnt = mt76_rr(dev, MT_MIB_SDR15(ext_phy));
+ mib->tx_mpdu_success_cnt += FIELD_GET(MT_MIB_SDR15_TX_MPDU_SUCCESS_CNT_MASK, cnt);
+
+ cnt = mt76_rr(dev, MT_MIB_SDR22(ext_phy));
+ mib->rx_ampdu_cnt += cnt;
+
+ cnt = mt76_rr(dev, MT_MIB_SDR23(ext_phy));
+ mib->rx_ampdu_bytes_cnt += cnt;
+
+ cnt = mt76_rr(dev, MT_MIB_SDR24(ext_phy));
+ mib->rx_ampdu_valid_subframe_cnt += FIELD_GET(MT_MIB_SDR24_RX_AMPDU_SF_CNT_MASK, cnt);
+
+ cnt = mt76_rr(dev, MT_MIB_SDR25(ext_phy));
+ mib->rx_ampdu_valid_subframe_bytes_cnt += cnt;
+
+ cnt = mt76_rr(dev, MT_MIB_SDR27(ext_phy));
+ mib->tx_rwp_fail_cnt += FIELD_GET(MT_MIB_SDR27_TX_RWP_FAIL_CNT_MASK, cnt);
+
+ cnt = mt76_rr(dev, MT_MIB_SDR28(ext_phy));
+ mib->tx_rwp_need_cnt += FIELD_GET(MT_MIB_SDR28_TX_RWP_NEED_CNT_MASK, cnt);
+
+ cnt = mt76_rr(dev, MT_MIB_SDR29(ext_phy));
+ mib->rx_pfdrop_cnt += FIELD_GET(MT_MIB_SDR29_RX_PFDROP_CNT_MASK, cnt);
+
+ cnt = mt76_rr(dev, MT_MIB_SDR30(ext_phy));
+ mib->rx_vec_queue_overflow_drop_cnt +=
+ FIELD_GET(MT_MIB_SDR30_RX_VEC_QUEUE_OVERFLOW_DROP_CNT_MASK, cnt);
+
+ cnt = mt76_rr(dev, MT_MIB_SDR31(ext_phy));
+ mib->rx_ba_cnt += cnt;
+
+ cnt = mt76_rr(dev, MT_MIB_SDR32(ext_phy));
+ mib->tx_pkt_ebf_cnt += FIELD_GET(MT_MIB_SDR32_TX_PKT_EBF_CNT_MASK, cnt);
+
+ cnt = mt76_rr(dev, MT_MIB_SDR33(ext_phy));
+ mib->tx_pkt_ibf_cnt += FIELD_GET(MT_MIB_SDR33_TX_PKT_IBF_CNT_MASK, cnt);
+
+ cnt = mt76_rr(dev, MT_MIB_SDR34(ext_phy));
+ mib->tx_bf_cnt += FIELD_GET(MT_MIB_MU_BF_TX_CNT, cnt);
+
+ cnt = mt76_rr(dev, MT_MIB_DR8(ext_phy));
+ mib->tx_mu_mpdu_cnt += cnt;
+
+ cnt = mt76_rr(dev, MT_MIB_DR9(ext_phy));
+ mib->tx_mu_acked_mpdu_cnt += cnt;
+
+ cnt = mt76_rr(dev, MT_MIB_DR11(ext_phy));
+ mib->tx_su_acked_mpdu_cnt += cnt;
+
+ cnt = mt76_rr(dev, MT_ETBF_TX_APP_CNT(ext_phy));
+ mib->tx_bf_ibf_ppdu_cnt += FIELD_GET(MT_ETBF_TX_IBF_CNT, cnt);
+ mib->tx_bf_ebf_ppdu_cnt += FIELD_GET(MT_ETBF_TX_EBF_CNT, cnt);
+
+ cnt = mt76_rr(dev, MT_ETBF_RX_FB_CNT(ext_phy));
+ mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_ETBF_RX_FB_ALL, cnt);
+ mib->tx_bf_rx_fb_he_cnt += FIELD_GET(MT_ETBF_RX_FB_HE, cnt);
+ mib->tx_bf_rx_fb_vht_cnt += FIELD_GET(MT_ETBF_RX_FB_VHT, cnt);
+ mib->tx_bf_rx_fb_ht_cnt += FIELD_GET(MT_ETBF_RX_FB_HT, cnt);
+
+ cnt = mt76_rr(dev, MT_ETBF_RX_FB_CONT(ext_phy));
+ mib->tx_bf_rx_fb_bw = FIELD_GET(MT_ETBF_RX_FB_BW, cnt);
+ mib->tx_bf_rx_fb_nc_cnt += FIELD_GET(MT_ETBF_RX_FB_NC, cnt);
+ mib->tx_bf_rx_fb_nr_cnt += FIELD_GET(MT_ETBF_RX_FB_NR, cnt);
+
+ cnt = mt76_rr(dev, MT_ETBF_TX_NDP_BFRP(ext_phy));
+ mib->tx_bf_fb_cpl_cnt += FIELD_GET(MT_ETBF_TX_FB_CPL, cnt);
+ mib->tx_bf_fb_trig_cnt += FIELD_GET(MT_ETBF_TX_FB_TRI, cnt);
+
+ for (i = 0; i < ARRAY_SIZE(mib->tx_amsdu); i++) {
+ cnt = mt76_rr(dev, MT_PLE_AMSDU_PACK_MSDU_CNT(i));
+ mib->tx_amsdu[i] += cnt;
+ mib->tx_amsdu_cnt += cnt;
+ }
+
aggr0 = ext_phy ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0;
for (i = 0, aggr1 = aggr0 + 4; i < 4; i++) {
u32 val;
@@ -1737,30 +2053,6 @@ mt7915_mac_update_stats(struct mt7915_phy *phy)
}
}
-static void
-mt7915_mac_sta_stats_work(struct mt7915_phy *phy)
-{
- struct mt7915_dev *dev = phy->dev;
- struct mt7915_sta *msta;
- LIST_HEAD(list);
-
- spin_lock_bh(&dev->sta_poll_lock);
- list_splice_init(&phy->stats_list, &list);
-
- while (!list_empty(&list)) {
- msta = list_first_entry(&list, struct mt7915_sta, stats_list);
- list_del_init(&msta->stats_list);
- spin_unlock_bh(&dev->sta_poll_lock);
-
- /* use MT_TX_FREE_RATE to report Tx rate for further devices */
- mt7915_mcu_get_tx_rate(dev, RATE_CTRL_RU_INFO, msta->wcid.idx);
-
- spin_lock_bh(&dev->sta_poll_lock);
- }
-
- spin_unlock_bh(&dev->sta_poll_lock);
-}
-
void mt7915_mac_sta_rc_work(struct work_struct *work)
{
struct mt7915_dev *dev = container_of(work, struct mt7915_dev, rc_work);
@@ -1776,8 +2068,8 @@ void mt7915_mac_sta_rc_work(struct work_struct *work)
while (!list_empty(&list)) {
msta = list_first_entry(&list, struct mt7915_sta, rc_list);
list_del_init(&msta->rc_list);
- changed = msta->stats.changed;
- msta->stats.changed = 0;
+ changed = msta->changed;
+ msta->changed = 0;
spin_unlock_bh(&dev->sta_poll_lock);
sta = container_of((void *)msta, struct ieee80211_sta, drv_priv);
@@ -1785,10 +2077,8 @@ void mt7915_mac_sta_rc_work(struct work_struct *work)
if (changed & (IEEE80211_RC_SUPP_RATES_CHANGED |
IEEE80211_RC_NSS_CHANGED |
- IEEE80211_RC_BW_CHANGED)) {
- mt7915_mcu_add_he(dev, vif, sta);
- mt7915_mcu_add_rate_ctrl(dev, vif, sta);
- }
+ IEEE80211_RC_BW_CHANGED))
+ mt7915_mcu_add_rate_ctrl(dev, vif, sta, true);
if (changed & IEEE80211_RC_SMPS_CHANGED)
mt7915_mcu_add_smps(dev, vif, sta);
@@ -1817,14 +2107,9 @@ void mt7915_mac_work(struct work_struct *work)
mt7915_mac_update_stats(phy);
}
- if (++phy->sta_work_count == 10) {
- phy->sta_work_count = 0;
- mt7915_mac_sta_stats_work(phy);
- }
-
mutex_unlock(&mphy->dev->mutex);
- mt76_tx_status_check(mphy->dev, NULL, false);
+ mt76_tx_status_check(mphy->dev, false);
ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
MT7915_WATCHDOG_TIME);
@@ -1961,3 +2246,182 @@ stop:
mt7915_dfs_stop_radar_detector(phy);
return 0;
}
+
+static int
+mt7915_mac_twt_duration_align(int duration)
+{
+ return duration << 8;
+}
+
+static u64
+mt7915_mac_twt_sched_list_add(struct mt7915_dev *dev,
+ struct mt7915_twt_flow *flow)
+{
+ struct mt7915_twt_flow *iter, *iter_next;
+ u32 duration = flow->duration << 8;
+ u64 start_tsf;
+
+ iter = list_first_entry_or_null(&dev->twt_list,
+ struct mt7915_twt_flow, list);
+ if (!iter || !iter->sched || iter->start_tsf > duration) {
+ /* add flow as first entry in the list */
+ list_add(&flow->list, &dev->twt_list);
+ return 0;
+ }
+
+ list_for_each_entry_safe(iter, iter_next, &dev->twt_list, list) {
+ start_tsf = iter->start_tsf +
+ mt7915_mac_twt_duration_align(iter->duration);
+ if (list_is_last(&iter->list, &dev->twt_list))
+ break;
+
+ if (!iter_next->sched ||
+ iter_next->start_tsf > start_tsf + duration) {
+ list_add(&flow->list, &iter->list);
+ goto out;
+ }
+ }
+
+ /* add flow as last entry in the list */
+ list_add_tail(&flow->list, &dev->twt_list);
+out:
+ return start_tsf;
+}
+
+static int mt7915_mac_check_twt_req(struct ieee80211_twt_setup *twt)
+{
+ struct ieee80211_twt_params *twt_agrt;
+ u64 interval, duration;
+ u16 mantissa;
+ u8 exp;
+
+ /* only individual agreement supported */
+ if (twt->control & IEEE80211_TWT_CONTROL_NEG_TYPE_BROADCAST)
+ return -EOPNOTSUPP;
+
+ /* only 256us unit supported */
+ if (twt->control & IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT)
+ return -EOPNOTSUPP;
+
+ twt_agrt = (struct ieee80211_twt_params *)twt->params;
+
+ /* explicit agreement not supported */
+ if (!(twt_agrt->req_type & cpu_to_le16(IEEE80211_TWT_REQTYPE_IMPLICIT)))
+ return -EOPNOTSUPP;
+
+ exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP,
+ le16_to_cpu(twt_agrt->req_type));
+ mantissa = le16_to_cpu(twt_agrt->mantissa);
+ duration = twt_agrt->min_twt_dur << 8;
+
+ interval = (u64)mantissa << exp;
+ if (interval < duration)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+void mt7915_mac_add_twt_setup(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ struct ieee80211_twt_setup *twt)
+{
+ enum ieee80211_twt_setup_cmd setup_cmd = TWT_SETUP_CMD_REJECT;
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+ struct ieee80211_twt_params *twt_agrt = (void *)twt->params;
+ u16 req_type = le16_to_cpu(twt_agrt->req_type);
+ enum ieee80211_twt_setup_cmd sta_setup_cmd;
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ struct mt7915_twt_flow *flow;
+ int flowid, table_id;
+ u8 exp;
+
+ if (mt7915_mac_check_twt_req(twt))
+ goto out;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ if (dev->twt.n_agrt == MT7915_MAX_TWT_AGRT)
+ goto unlock;
+
+ if (hweight8(msta->twt.flowid_mask) == ARRAY_SIZE(msta->twt.flow))
+ goto unlock;
+
+ flowid = ffs(~msta->twt.flowid_mask) - 1;
+ le16p_replace_bits(&twt_agrt->req_type, flowid,
+ IEEE80211_TWT_REQTYPE_FLOWID);
+
+ table_id = ffs(~dev->twt.table_mask) - 1;
+ exp = FIELD_GET(IEEE80211_TWT_REQTYPE_WAKE_INT_EXP, req_type);
+ sta_setup_cmd = FIELD_GET(IEEE80211_TWT_REQTYPE_SETUP_CMD, req_type);
+
+ flow = &msta->twt.flow[flowid];
+ memset(flow, 0, sizeof(*flow));
+ INIT_LIST_HEAD(&flow->list);
+ flow->wcid = msta->wcid.idx;
+ flow->table_id = table_id;
+ flow->id = flowid;
+ flow->duration = twt_agrt->min_twt_dur;
+ flow->mantissa = twt_agrt->mantissa;
+ flow->exp = exp;
+ flow->protection = !!(req_type & IEEE80211_TWT_REQTYPE_PROTECTION);
+ flow->flowtype = !!(req_type & IEEE80211_TWT_REQTYPE_FLOWTYPE);
+ flow->trigger = !!(req_type & IEEE80211_TWT_REQTYPE_TRIGGER);
+
+ if (sta_setup_cmd == TWT_SETUP_CMD_REQUEST ||
+ sta_setup_cmd == TWT_SETUP_CMD_SUGGEST) {
+ u64 interval = (u64)le16_to_cpu(twt_agrt->mantissa) << exp;
+ u64 flow_tsf, curr_tsf;
+ u32 rem;
+
+ flow->sched = true;
+ flow->start_tsf = mt7915_mac_twt_sched_list_add(dev, flow);
+ curr_tsf = __mt7915_get_tsf(hw, msta->vif);
+ div_u64_rem(curr_tsf - flow->start_tsf, interval, &rem);
+ flow_tsf = curr_tsf + interval - rem;
+ twt_agrt->twt = cpu_to_le64(flow_tsf);
+ } else {
+ list_add_tail(&flow->list, &dev->twt_list);
+ }
+ flow->tsf = le64_to_cpu(twt_agrt->twt);
+
+ if (mt7915_mcu_twt_agrt_update(dev, msta->vif, flow, MCU_TWT_AGRT_ADD))
+ goto unlock;
+
+ setup_cmd = TWT_SETUP_CMD_ACCEPT;
+ dev->twt.table_mask |= BIT(table_id);
+ msta->twt.flowid_mask |= BIT(flowid);
+ dev->twt.n_agrt++;
+
+unlock:
+ mutex_unlock(&dev->mt76.mutex);
+out:
+ le16p_replace_bits(&twt_agrt->req_type, setup_cmd,
+ IEEE80211_TWT_REQTYPE_SETUP_CMD);
+ twt->control = (twt->control & IEEE80211_TWT_CONTROL_WAKE_DUR_UNIT) |
+ (twt->control & IEEE80211_TWT_CONTROL_RX_DISABLED);
+}
+
+void mt7915_mac_twt_teardown_flow(struct mt7915_dev *dev,
+ struct mt7915_sta *msta,
+ u8 flowid)
+{
+ struct mt7915_twt_flow *flow;
+
+ lockdep_assert_held(&dev->mt76.mutex);
+
+ if (flowid >= ARRAY_SIZE(msta->twt.flow))
+ return;
+
+ if (!(msta->twt.flowid_mask & BIT(flowid)))
+ return;
+
+ flow = &msta->twt.flow[flowid];
+ if (mt7915_mcu_twt_agrt_update(dev, msta->vif, flow,
+ MCU_TWT_AGRT_DELETE))
+ return;
+
+ list_del_init(&flow->list);
+ msta->twt.flowid_mask &= ~BIT(flowid);
+ dev->twt.table_mask &= ~BIT(flow->table_id);
+ dev->twt.n_agrt--;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.h b/drivers/net/wireless/mediatek/mt76/mt7915/mac.h
index eb1885f4bd8e..7a2c740d1464 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.h
@@ -117,6 +117,7 @@ enum rx_pkt_type {
#define MT_PRXV_TX_DCM BIT(4)
#define MT_PRXV_TX_ER_SU_106T BIT(5)
#define MT_PRXV_NSTS GENMASK(9, 7)
+#define MT_PRXV_TXBF BIT(10)
#define MT_PRXV_HT_AD_CODE BIT(11)
#define MT_PRXV_HE_RU_ALLOC_L GENMASK(31, 28)
#define MT_PRXV_HE_RU_ALLOC_H GENMASK(3, 0)
@@ -133,7 +134,14 @@ enum rx_pkt_type {
#define MT_CRXV_HE_LTF_SIZE GENMASK(18, 17)
#define MT_CRXV_HE_LDPC_EXT_SYM BIT(20)
#define MT_CRXV_HE_PE_DISAMBIG BIT(23)
+#define MT_CRXV_HE_NUM_USER GENMASK(30, 24)
#define MT_CRXV_HE_UPLINK BIT(31)
+#define MT_CRXV_HE_RU0 GENMASK(7, 0)
+#define MT_CRXV_HE_RU1 GENMASK(15, 8)
+#define MT_CRXV_HE_RU2 GENMASK(23, 16)
+#define MT_CRXV_HE_RU3 GENMASK(31, 24)
+
+#define MT_CRXV_HE_MU_AID GENMASK(30, 20)
#define MT_CRXV_HE_SR_MASK GENMASK(11, 8)
#define MT_CRXV_HE_SR1_MASK GENMASK(16, 12)
@@ -272,7 +280,8 @@ enum tx_mcu_port_q_idx {
#define MT_TX_RATE_MODE GENMASK(9, 6)
#define MT_TX_RATE_SU_EXT_TONE BIT(5)
#define MT_TX_RATE_DCM BIT(4)
-#define MT_TX_RATE_IDX GENMASK(3, 0)
+/* VHT/HE only use bits 0-3 */
+#define MT_TX_RATE_IDX GENMASK(5, 0)
#define MT_TXP_MAX_BUF_NUM 6
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
index c25f8da590dd..057ab27b7083 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c
@@ -172,6 +172,9 @@ static void mt7915_init_bitrate_mask(struct ieee80211_vif *vif)
int i;
for (i = 0; i < ARRAY_SIZE(mvif->bitrate_mask.control); i++) {
+ mvif->bitrate_mask.control[i].gi = NL80211_TXRATE_DEFAULT_GI;
+ mvif->bitrate_mask.control[i].he_gi = GENMASK(7, 0);
+ mvif->bitrate_mask.control[i].he_ltf = GENMASK(7, 0);
mvif->bitrate_mask.control[i].legacy = GENMASK(31, 0);
memset(mvif->bitrate_mask.control[i].ht_mcs, GENMASK(7, 0),
sizeof(mvif->bitrate_mask.control[i].ht_mcs));
@@ -215,7 +218,7 @@ static int mt7915_add_interface(struct ieee80211_hw *hw,
mvif->phy = phy;
mvif->band_idx = ext_phy;
- if (ext_phy)
+ if (dev->mt76.phy2)
mvif->wmm_idx = ext_phy * (MT7915_MAX_WMM_SETS / 2) +
mvif->idx % (MT7915_MAX_WMM_SETS / 2);
else
@@ -231,12 +234,13 @@ static int mt7915_add_interface(struct ieee80211_hw *hw,
idx = MT7915_WTBL_RESERVED - mvif->idx;
INIT_LIST_HEAD(&mvif->sta.rc_list);
- INIT_LIST_HEAD(&mvif->sta.stats_list);
INIT_LIST_HEAD(&mvif->sta.poll_list);
mvif->sta.wcid.idx = idx;
mvif->sta.wcid.ext_phy = mvif->band_idx;
mvif->sta.wcid.hw_key_idx = -1;
mvif->sta.wcid.tx_info |= MT_WCID_TX_INFO_SET;
+ mt76_packet_id_init(&mvif->sta.wcid);
+
mt7915_mac_wtbl_update(dev, idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
@@ -252,6 +256,7 @@ static int mt7915_add_interface(struct ieee80211_hw *hw,
vif->offload_flags |= IEEE80211_OFFLOAD_ENCAP_4ADDR;
mt7915_init_bitrate_mask(vif);
+ memset(&mvif->cap, -1, sizeof(mvif->cap));
out:
mutex_unlock(&dev->mt76.mutex);
@@ -291,6 +296,8 @@ static void mt7915_remove_interface(struct ieee80211_hw *hw,
if (!list_empty(&msta->poll_list))
list_del_init(&msta->poll_list);
spin_unlock_bh(&dev->sta_poll_lock);
+
+ mt76_packet_id_flush(&dev->mt76, &msta->wcid);
}
static void mt7915_init_dfs_state(struct mt7915_phy *phy)
@@ -538,6 +545,29 @@ static void mt7915_configure_filter(struct ieee80211_hw *hw,
mutex_unlock(&dev->mt76.mutex);
}
+static void
+mt7915_update_bss_color(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_he_bss_color *bss_color)
+{
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_AP: {
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+
+ if (mvif->omac_idx > HW_BSSID_MAX)
+ return;
+ fallthrough;
+ }
+ case NL80211_IFTYPE_STATION:
+ mt7915_mcu_update_bss_color(dev, vif, bss_color);
+ break;
+ default:
+ break;
+ }
+}
+
static void mt7915_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info,
@@ -586,6 +616,9 @@ static void mt7915_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_HE_OBSS_PD)
mt7915_mcu_add_obss_spr(dev, vif, info->he_obss_pd.enable);
+ if (changed & BSS_CHANGED_HE_BSS_COLOR)
+ mt7915_update_bss_color(hw, vif, &info->he_bss_color);
+
if (changed & (BSS_CHANGED_BEACON |
BSS_CHANGED_BEACON_ENABLED))
mt7915_mcu_add_beacon(hw, vif, info->enable_beacon);
@@ -613,19 +646,18 @@ int mt7915_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
int ret, idx;
- idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7915_WTBL_STA - 1);
+ idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7915_WTBL_STA);
if (idx < 0)
return -ENOSPC;
INIT_LIST_HEAD(&msta->rc_list);
- INIT_LIST_HEAD(&msta->stats_list);
INIT_LIST_HEAD(&msta->poll_list);
msta->vif = mvif;
msta->wcid.sta = 1;
msta->wcid.idx = idx;
msta->wcid.ext_phy = mvif->band_idx;
msta->wcid.tx_info |= MT_WCID_TX_INFO_SET;
- msta->stats.jiffies = jiffies;
+ msta->jiffies = jiffies;
mt7915_mac_wtbl_update(dev, idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
@@ -634,7 +666,7 @@ int mt7915_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
if (ret)
return ret;
- return mt7915_mcu_add_sta_adv(dev, vif, sta, true);
+ return mt7915_mcu_add_rate_ctrl(dev, vif, sta, false);
}
void mt7915_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
@@ -642,18 +674,19 @@ void mt7915_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
{
struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76);
struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+ int i;
- mt7915_mcu_add_sta_adv(dev, vif, sta, false);
mt7915_mcu_add_sta(dev, vif, sta, false);
mt7915_mac_wtbl_update(dev, msta->wcid.idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
+ for (i = 0; i < ARRAY_SIZE(msta->twt.flow); i++)
+ mt7915_mac_twt_teardown_flow(dev, msta, i);
+
spin_lock_bh(&dev->sta_poll_lock);
if (!list_empty(&msta->poll_list))
list_del_init(&msta->poll_list);
- if (!list_empty(&msta->stats_list))
- list_del_init(&msta->stats_list);
if (!list_empty(&msta->rc_list))
list_del_init(&msta->rc_list);
spin_unlock_bh(&dev->sta_poll_lock);
@@ -781,22 +814,19 @@ mt7915_get_stats(struct ieee80211_hw *hw,
struct mib_stats *mib = &phy->mib;
mutex_lock(&dev->mt76.mutex);
+
stats->dot11RTSSuccessCount = mib->rts_cnt;
stats->dot11RTSFailureCount = mib->rts_retries_cnt;
stats->dot11FCSErrorCount = mib->fcs_err_cnt;
stats->dot11ACKFailureCount = mib->ack_fail_cnt;
- memset(mib, 0, sizeof(*mib));
-
mutex_unlock(&dev->mt76.mutex);
return 0;
}
-static u64
-mt7915_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+u64 __mt7915_get_tsf(struct ieee80211_hw *hw, struct mt7915_vif *mvif)
{
- struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
struct mt7915_dev *dev = mt7915_hw_dev(hw);
struct mt7915_phy *phy = mt7915_hw_phy(hw);
bool band = phy != &dev->phy;
@@ -806,7 +836,7 @@ mt7915_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
} tsf;
u16 n;
- mutex_lock(&dev->mt76.mutex);
+ lockdep_assert_held(&dev->mt76.mutex);
n = mvif->omac_idx > HW_BSSID_MAX ? HW_BSSID_0 : mvif->omac_idx;
/* TSF software read */
@@ -815,9 +845,21 @@ mt7915_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
tsf.t32[0] = mt76_rr(dev, MT_LPON_UTTR0(band));
tsf.t32[1] = mt76_rr(dev, MT_LPON_UTTR1(band));
+ return tsf.t64;
+}
+
+static u64
+mt7915_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ u64 ret;
+
+ mutex_lock(&dev->mt76.mutex);
+ ret = __mt7915_get_tsf(hw, mvif);
mutex_unlock(&dev->mt76.mutex);
- return tsf.t64;
+ return ret;
}
static void
@@ -926,7 +968,7 @@ static void mt7915_sta_statistics(struct ieee80211_hw *hw,
{
struct mt7915_phy *phy = mt7915_hw_phy(hw);
struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
- struct mt7915_sta_stats *stats = &msta->stats;
+ struct rate_info *txrate = &msta->wcid.rate;
struct rate_info rxrate = {};
if (!mt7915_mcu_get_rx_rate(phy, vif, sta, &rxrate)) {
@@ -934,20 +976,20 @@ static void mt7915_sta_statistics(struct ieee80211_hw *hw,
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BITRATE);
}
- if (!stats->tx_rate.legacy && !stats->tx_rate.flags)
+ if (!txrate->legacy && !txrate->flags)
return;
- if (stats->tx_rate.legacy) {
- sinfo->txrate.legacy = stats->tx_rate.legacy;
+ if (txrate->legacy) {
+ sinfo->txrate.legacy = txrate->legacy;
} else {
- sinfo->txrate.mcs = stats->tx_rate.mcs;
- sinfo->txrate.nss = stats->tx_rate.nss;
- sinfo->txrate.bw = stats->tx_rate.bw;
- sinfo->txrate.he_gi = stats->tx_rate.he_gi;
- sinfo->txrate.he_dcm = stats->tx_rate.he_dcm;
- sinfo->txrate.he_ru_alloc = stats->tx_rate.he_ru_alloc;
+ sinfo->txrate.mcs = txrate->mcs;
+ sinfo->txrate.nss = txrate->nss;
+ sinfo->txrate.bw = txrate->bw;
+ sinfo->txrate.he_gi = txrate->he_gi;
+ sinfo->txrate.he_dcm = txrate->he_dcm;
+ sinfo->txrate.he_ru_alloc = txrate->he_ru_alloc;
}
- sinfo->txrate.flags = stats->tx_rate.flags;
+ sinfo->txrate.flags = txrate->flags;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
}
@@ -955,16 +997,13 @@ static void mt7915_sta_rc_work(void *data, struct ieee80211_sta *sta)
{
struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
struct mt7915_dev *dev = msta->vif->phy->dev;
- struct ieee80211_hw *hw = msta->vif->phy->mt76->hw;
u32 *changed = data;
spin_lock_bh(&dev->sta_poll_lock);
- msta->stats.changed |= *changed;
+ msta->changed |= *changed;
if (list_empty(&msta->rc_list))
list_add_tail(&msta->rc_list, &dev->sta_rc_list);
spin_unlock_bh(&dev->sta_poll_lock);
-
- ieee80211_queue_work(hw, &dev->rc_work);
}
static void mt7915_sta_rc_update(struct ieee80211_hw *hw,
@@ -972,7 +1011,11 @@ static void mt7915_sta_rc_update(struct ieee80211_hw *hw,
struct ieee80211_sta *sta,
u32 changed)
{
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ struct mt7915_dev *dev = phy->dev;
+
mt7915_sta_rc_work(&changed, sta);
+ ieee80211_queue_work(hw, &dev->rc_work);
}
static int
@@ -980,22 +1023,22 @@ mt7915_set_bitrate_mask(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
const struct cfg80211_bitrate_mask *mask)
{
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
- enum nl80211_band band = mvif->phy->mt76->chandef.chan->band;
- u32 changed;
-
- if (mask->control[band].gi == NL80211_TXRATE_FORCE_LGI)
- return -EINVAL;
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ struct mt7915_dev *dev = phy->dev;
+ u32 changed = IEEE80211_RC_SUPP_RATES_CHANGED;
- changed = IEEE80211_RC_SUPP_RATES_CHANGED;
mvif->bitrate_mask = *mask;
- /* Update firmware rate control to add a boundary on top of table
- * to limit the rate selection for each peer, so when set bitrates
- * vht-mcs-5 1:9, which actually means nss = 1 mcs = 0~9. This only
- * applies to data frames as for the other mgmt, mcast, bcast still
- * use legacy rates as it is.
+ /* if multiple rates across different preambles are given we can
+ * reconfigure this info with all peers using sta_rec command with
+ * the below exception cases.
+ * - single rate : if a rate is passed along with different preambles,
+ * we select the highest one as fixed rate. i.e VHT MCS for VHT peers.
+ * - multiple rates: if it's not in range format i.e 0-{7,8,9} for VHT
+ * then multiple MCS setting (MCS 4,5,6) is not supported.
*/
ieee80211_iterate_stations_atomic(hw, mt7915_sta_rc_work, &changed);
+ ieee80211_queue_work(hw, &dev->rc_work);
return 0;
}
@@ -1032,6 +1075,240 @@ static void mt7915_sta_set_decap_offload(struct ieee80211_hw *hw,
mt7915_mcu_sta_update_hdr_trans(dev, vif, sta);
}
+static const char mt7915_gstrings_stats[][ETH_GSTRING_LEN] = {
+ "tx_ampdu_cnt",
+ "tx_stop_q_empty_cnt",
+ "tx_mpdu_attempts",
+ "tx_mpdu_success",
+ "tx_rwp_fail_cnt",
+ "tx_rwp_need_cnt",
+ "tx_pkt_ebf_cnt",
+ "tx_pkt_ibf_cnt",
+ "tx_ampdu_len:0-1",
+ "tx_ampdu_len:2-10",
+ "tx_ampdu_len:11-19",
+ "tx_ampdu_len:20-28",
+ "tx_ampdu_len:29-37",
+ "tx_ampdu_len:38-46",
+ "tx_ampdu_len:47-55",
+ "tx_ampdu_len:56-79",
+ "tx_ampdu_len:80-103",
+ "tx_ampdu_len:104-127",
+ "tx_ampdu_len:128-151",
+ "tx_ampdu_len:152-175",
+ "tx_ampdu_len:176-199",
+ "tx_ampdu_len:200-223",
+ "tx_ampdu_len:224-247",
+ "ba_miss_count",
+ "tx_beamformer_ppdu_iBF",
+ "tx_beamformer_ppdu_eBF",
+ "tx_beamformer_rx_feedback_all",
+ "tx_beamformer_rx_feedback_he",
+ "tx_beamformer_rx_feedback_vht",
+ "tx_beamformer_rx_feedback_ht",
+ "tx_beamformer_rx_feedback_bw", /* zero based idx: 20, 40, 80, 160 */
+ "tx_beamformer_rx_feedback_nc",
+ "tx_beamformer_rx_feedback_nr",
+ "tx_beamformee_ok_feedback_pkts",
+ "tx_beamformee_feedback_trig",
+ "tx_mu_beamforming",
+ "tx_mu_mpdu",
+ "tx_mu_successful_mpdu",
+ "tx_su_successful_mpdu",
+ "tx_msdu_pack_1",
+ "tx_msdu_pack_2",
+ "tx_msdu_pack_3",
+ "tx_msdu_pack_4",
+ "tx_msdu_pack_5",
+ "tx_msdu_pack_6",
+ "tx_msdu_pack_7",
+ "tx_msdu_pack_8",
+
+ /* rx counters */
+ "rx_fifo_full_cnt",
+ "rx_mpdu_cnt",
+ "channel_idle_cnt",
+ "rx_vector_mismatch_cnt",
+ "rx_delimiter_fail_cnt",
+ "rx_len_mismatch_cnt",
+ "rx_ampdu_cnt",
+ "rx_ampdu_bytes_cnt",
+ "rx_ampdu_valid_subframe_cnt",
+ "rx_ampdu_valid_subframe_b_cnt",
+ "rx_pfdrop_cnt",
+ "rx_vec_queue_overflow_drop_cnt",
+ "rx_ba_cnt",
+
+ /* per vif counters */
+ "v_tx_mode_cck",
+ "v_tx_mode_ofdm",
+ "v_tx_mode_ht",
+ "v_tx_mode_ht_gf",
+ "v_tx_mode_vht",
+ "v_tx_mode_he_su",
+ "v_tx_mode_he_ext_su",
+ "v_tx_mode_he_tb",
+ "v_tx_mode_he_mu",
+ "v_tx_bw_20",
+ "v_tx_bw_40",
+ "v_tx_bw_80",
+ "v_tx_bw_160",
+ "v_tx_mcs_0",
+ "v_tx_mcs_1",
+ "v_tx_mcs_2",
+ "v_tx_mcs_3",
+ "v_tx_mcs_4",
+ "v_tx_mcs_5",
+ "v_tx_mcs_6",
+ "v_tx_mcs_7",
+ "v_tx_mcs_8",
+ "v_tx_mcs_9",
+ "v_tx_mcs_10",
+ "v_tx_mcs_11",
+};
+
+#define MT7915_SSTATS_LEN ARRAY_SIZE(mt7915_gstrings_stats)
+
+/* Ethtool related API */
+static
+void mt7915_get_et_strings(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ u32 sset, u8 *data)
+{
+ if (sset == ETH_SS_STATS)
+ memcpy(data, *mt7915_gstrings_stats,
+ sizeof(mt7915_gstrings_stats));
+}
+
+static
+int mt7915_get_et_sset_count(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, int sset)
+{
+ if (sset == ETH_SS_STATS)
+ return MT7915_SSTATS_LEN;
+
+ return 0;
+}
+
+static void mt7915_ethtool_worker(void *wi_data, struct ieee80211_sta *sta)
+{
+ struct mt76_ethtool_worker_info *wi = wi_data;
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+
+ if (msta->vif->idx != wi->idx)
+ return;
+
+ mt76_ethtool_worker(wi, &msta->stats);
+}
+
+static
+void mt7915_get_et_stats(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+ struct mt7915_phy *phy = mt7915_hw_phy(hw);
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct mt76_ethtool_worker_info wi = {
+ .data = data,
+ .idx = mvif->idx,
+ };
+ struct mib_stats *mib = &phy->mib;
+ /* See mt7915_ampdu_stat_read_phy, etc */
+ bool ext_phy = phy != &dev->phy;
+ int i, n, ei = 0;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ mt7915_mac_update_stats(phy);
+
+ data[ei++] = mib->tx_ampdu_cnt;
+ data[ei++] = mib->tx_stop_q_empty_cnt;
+ data[ei++] = mib->tx_mpdu_attempts_cnt;
+ data[ei++] = mib->tx_mpdu_success_cnt;
+ data[ei++] = mib->tx_rwp_fail_cnt;
+ data[ei++] = mib->tx_rwp_need_cnt;
+ data[ei++] = mib->tx_pkt_ebf_cnt;
+ data[ei++] = mib->tx_pkt_ibf_cnt;
+
+ /* Tx ampdu stat */
+ n = ext_phy ? ARRAY_SIZE(dev->mt76.aggr_stats) / 2 : 0;
+ for (i = 0; i < 15 /*ARRAY_SIZE(bound)*/; i++)
+ data[ei++] = dev->mt76.aggr_stats[i + n];
+
+ data[ei++] = phy->mib.ba_miss_cnt;
+
+ /* Tx Beamformer monitor */
+ data[ei++] = mib->tx_bf_ibf_ppdu_cnt;
+ data[ei++] = mib->tx_bf_ebf_ppdu_cnt;
+
+ /* Tx Beamformer Rx feedback monitor */
+ data[ei++] = mib->tx_bf_rx_fb_all_cnt;
+ data[ei++] = mib->tx_bf_rx_fb_he_cnt;
+ data[ei++] = mib->tx_bf_rx_fb_vht_cnt;
+ data[ei++] = mib->tx_bf_rx_fb_ht_cnt;
+
+ data[ei++] = mib->tx_bf_rx_fb_bw;
+ data[ei++] = mib->tx_bf_rx_fb_nc_cnt;
+ data[ei++] = mib->tx_bf_rx_fb_nr_cnt;
+
+ /* Tx Beamformee Rx NDPA & Tx feedback report */
+ data[ei++] = mib->tx_bf_fb_cpl_cnt;
+ data[ei++] = mib->tx_bf_fb_trig_cnt;
+
+ /* Tx SU & MU counters */
+ data[ei++] = mib->tx_bf_cnt;
+ data[ei++] = mib->tx_mu_mpdu_cnt;
+ data[ei++] = mib->tx_mu_acked_mpdu_cnt;
+ data[ei++] = mib->tx_su_acked_mpdu_cnt;
+
+ /* Tx amsdu info (pack-count histogram) */
+ for (i = 0; i < ARRAY_SIZE(mib->tx_amsdu); i++)
+ data[ei++] = mib->tx_amsdu[i];
+
+ /* rx counters */
+ data[ei++] = mib->rx_fifo_full_cnt;
+ data[ei++] = mib->rx_mpdu_cnt;
+ data[ei++] = mib->channel_idle_cnt;
+ data[ei++] = mib->rx_vector_mismatch_cnt;
+ data[ei++] = mib->rx_delimiter_fail_cnt;
+ data[ei++] = mib->rx_len_mismatch_cnt;
+ data[ei++] = mib->rx_ampdu_cnt;
+ data[ei++] = mib->rx_ampdu_bytes_cnt;
+ data[ei++] = mib->rx_ampdu_valid_subframe_cnt;
+ data[ei++] = mib->rx_ampdu_valid_subframe_bytes_cnt;
+ data[ei++] = mib->rx_pfdrop_cnt;
+ data[ei++] = mib->rx_vec_queue_overflow_drop_cnt;
+ data[ei++] = mib->rx_ba_cnt;
+
+ /* Add values for all stations owned by this vif */
+ wi.initial_stat_idx = ei;
+ ieee80211_iterate_stations_atomic(hw, mt7915_ethtool_worker, &wi);
+
+ mutex_unlock(&dev->mt76.mutex);
+
+ if (wi.sta_count == 0)
+ return;
+
+ ei += wi.worker_stat_count;
+ if (ei != MT7915_SSTATS_LEN)
+ dev_err(dev->mt76.dev, "ei: %d MT7915_SSTATS_LEN: %d",
+ ei, (int)MT7915_SSTATS_LEN);
+}
+
+static void
+mt7915_twt_teardown_request(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ u8 flowid)
+{
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+ struct mt7915_dev *dev = mt7915_hw_dev(hw);
+
+ mutex_lock(&dev->mt76.mutex);
+ mt7915_mac_twt_teardown_flow(dev, msta, flowid);
+ mutex_unlock(&dev->mt76.mutex);
+}
+
const struct ieee80211_ops mt7915_ops = {
.tx = mt7915_tx,
.start = mt7915_start,
@@ -1056,6 +1333,9 @@ const struct ieee80211_ops mt7915_ops = {
.get_txpower = mt76_get_txpower,
.channel_switch_beacon = mt7915_channel_switch_beacon,
.get_stats = mt7915_get_stats,
+ .get_et_sset_count = mt7915_get_et_sset_count,
+ .get_et_stats = mt7915_get_et_stats,
+ .get_et_strings = mt7915_get_et_strings,
.get_tsf = mt7915_get_tsf,
.set_tsf = mt7915_set_tsf,
.offset_tsf = mt7915_offset_tsf,
@@ -1067,6 +1347,8 @@ const struct ieee80211_ops mt7915_ops = {
.sta_statistics = mt7915_sta_statistics,
.sta_set_4addr = mt7915_sta_set_4addr,
.sta_set_decap_offload = mt7915_sta_set_decap_offload,
+ .add_twt_setup = mt7915_mac_add_twt_setup,
+ .twt_teardown_request = mt7915_twt_teardown_request,
CFG80211_TESTMODE_CMD(mt76_testmode_cmd)
CFG80211_TESTMODE_DUMP(mt76_testmode_dump)
#ifdef CONFIG_MAC80211_DEBUGFS
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
index 43960770a9af..899957b9d0f1 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c
@@ -416,8 +416,7 @@ exit:
return mt76_tx_queue_skb_raw(dev, mdev->q_mcu[qid], skb, 0);
}
-static void
-mt7915_mcu_wa_cmd(struct mt7915_dev *dev, int cmd, u32 a1, u32 a2, u32 a3)
+int mt7915_mcu_wa_cmd(struct mt7915_dev *dev, int cmd, u32 a1, u32 a2, u32 a3)
{
struct {
__le32 args[3];
@@ -429,7 +428,7 @@ mt7915_mcu_wa_cmd(struct mt7915_dev *dev, int cmd, u32 a1, u32 a2, u32 a3)
},
};
- mt76_mcu_send_msg(&dev->mt76, cmd, &req, sizeof(req), true);
+ return mt76_mcu_send_msg(&dev->mt76, cmd, &req, sizeof(req), false);
}
static void
@@ -488,152 +487,6 @@ mt7915_mcu_rx_radar_detected(struct mt7915_dev *dev, struct sk_buff *skb)
dev->hw_pattern++;
}
-static int
-mt7915_mcu_tx_rate_parse(struct mt76_phy *mphy, struct mt7915_mcu_ra_info *ra,
- struct rate_info *rate, u16 r)
-{
- struct ieee80211_supported_band *sband;
- u16 ru_idx = le16_to_cpu(ra->ru_idx);
- bool cck = false;
-
- rate->mcs = FIELD_GET(MT_RA_RATE_MCS, r);
- rate->nss = FIELD_GET(MT_RA_RATE_NSS, r) + 1;
-
- switch (FIELD_GET(MT_RA_RATE_TX_MODE, r)) {
- case MT_PHY_TYPE_CCK:
- cck = true;
- fallthrough;
- case MT_PHY_TYPE_OFDM:
- if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
- sband = &mphy->sband_5g.sband;
- else
- sband = &mphy->sband_2g.sband;
-
- rate->mcs = mt76_get_rate(mphy->dev, sband, rate->mcs, cck);
- rate->legacy = sband->bitrates[rate->mcs].bitrate;
- break;
- case MT_PHY_TYPE_HT:
- case MT_PHY_TYPE_HT_GF:
- rate->mcs += (rate->nss - 1) * 8;
- if (rate->mcs > 31)
- return -EINVAL;
-
- rate->flags = RATE_INFO_FLAGS_MCS;
- if (ra->gi)
- rate->flags |= RATE_INFO_FLAGS_SHORT_GI;
- break;
- case MT_PHY_TYPE_VHT:
- if (rate->mcs > 9)
- return -EINVAL;
-
- rate->flags = RATE_INFO_FLAGS_VHT_MCS;
- if (ra->gi)
- rate->flags |= RATE_INFO_FLAGS_SHORT_GI;
- break;
- case MT_PHY_TYPE_HE_SU:
- case MT_PHY_TYPE_HE_EXT_SU:
- case MT_PHY_TYPE_HE_TB:
- case MT_PHY_TYPE_HE_MU:
- if (ra->gi > NL80211_RATE_INFO_HE_GI_3_2 || rate->mcs > 11)
- return -EINVAL;
-
- rate->he_gi = ra->gi;
- rate->he_dcm = FIELD_GET(MT_RA_RATE_DCM_EN, r);
- rate->flags = RATE_INFO_FLAGS_HE_MCS;
- break;
- default:
- return -EINVAL;
- }
-
- if (ru_idx) {
- switch (ru_idx) {
- case 1 ... 2:
- rate->he_ru_alloc = NL80211_RATE_INFO_HE_RU_ALLOC_996;
- break;
- case 3 ... 6:
- rate->he_ru_alloc = NL80211_RATE_INFO_HE_RU_ALLOC_484;
- break;
- case 7 ... 14:
- rate->he_ru_alloc = NL80211_RATE_INFO_HE_RU_ALLOC_242;
- break;
- default:
- rate->he_ru_alloc = NL80211_RATE_INFO_HE_RU_ALLOC_106;
- break;
- }
- rate->bw = RATE_INFO_BW_HE_RU;
- } else {
- u8 bw = mt7915_mcu_chan_bw(&mphy->chandef) -
- FIELD_GET(MT_RA_RATE_BW, r);
-
- switch (bw) {
- case IEEE80211_STA_RX_BW_160:
- rate->bw = RATE_INFO_BW_160;
- break;
- case IEEE80211_STA_RX_BW_80:
- rate->bw = RATE_INFO_BW_80;
- break;
- case IEEE80211_STA_RX_BW_40:
- rate->bw = RATE_INFO_BW_40;
- break;
- default:
- rate->bw = RATE_INFO_BW_20;
- break;
- }
- }
-
- return 0;
-}
-
-static void
-mt7915_mcu_tx_rate_report(struct mt7915_dev *dev, struct sk_buff *skb)
-{
- struct mt7915_mcu_ra_info *ra = (struct mt7915_mcu_ra_info *)skb->data;
- struct rate_info rate = {}, prob_rate = {};
- u16 probe = le16_to_cpu(ra->prob_up_rate);
- u16 attempts = le16_to_cpu(ra->attempts);
- u16 curr = le16_to_cpu(ra->curr_rate);
- u16 wcidx = le16_to_cpu(ra->wlan_idx);
- struct ieee80211_tx_status status = {};
- struct mt76_phy *mphy = &dev->mphy;
- struct mt7915_sta_stats *stats;
- struct mt7915_sta *msta;
- struct mt76_wcid *wcid;
-
- if (wcidx >= MT76_N_WCIDS)
- return;
-
- wcid = rcu_dereference(dev->mt76.wcid[wcidx]);
- if (!wcid)
- return;
-
- msta = container_of(wcid, struct mt7915_sta, wcid);
- stats = &msta->stats;
-
- if (msta->wcid.ext_phy && dev->mt76.phy2)
- mphy = dev->mt76.phy2;
-
- /* current rate */
- if (!mt7915_mcu_tx_rate_parse(mphy, ra, &rate, curr))
- stats->tx_rate = rate;
-
- /* probing rate */
- if (!mt7915_mcu_tx_rate_parse(mphy, ra, &prob_rate, probe))
- stats->prob_rate = prob_rate;
-
- if (attempts) {
- u16 success = le16_to_cpu(ra->success);
-
- stats->per = 1000 * (attempts - success) / attempts;
- }
-
- status.sta = wcid_to_sta(wcid);
- if (!status.sta)
- return;
-
- status.rate = &stats->tx_rate;
- ieee80211_tx_status_ext(mphy->hw, &status);
-}
-
static void
mt7915_mcu_rx_log_message(struct mt7915_dev *dev, struct sk_buff *skb)
{
@@ -658,6 +511,15 @@ mt7915_mcu_rx_log_message(struct mt7915_dev *dev, struct sk_buff *skb)
}
static void
+mt7915_mcu_cca_finish(void *priv, u8 *mac, struct ieee80211_vif *vif)
+{
+ if (!vif->color_change_active)
+ return;
+
+ ieee80211_color_change_finish(vif);
+}
+
+static void
mt7915_mcu_rx_ext_event(struct mt7915_dev *dev, struct sk_buff *skb)
{
struct mt7915_mcu_rxd *rxd = (struct mt7915_mcu_rxd *)skb->data;
@@ -672,12 +534,14 @@ mt7915_mcu_rx_ext_event(struct mt7915_dev *dev, struct sk_buff *skb)
case MCU_EXT_EVENT_CSA_NOTIFY:
mt7915_mcu_rx_csa_notify(dev, skb);
break;
- case MCU_EXT_EVENT_RATE_REPORT:
- mt7915_mcu_tx_rate_report(dev, skb);
- break;
case MCU_EXT_EVENT_FW_LOG_2_HOST:
mt7915_mcu_rx_log_message(dev, skb);
break;
+ case MCU_EXT_EVENT_BCC_NOTIFY:
+ ieee80211_iterate_active_interfaces_atomic(dev->mt76.hw,
+ IEEE80211_IFACE_ITER_RESUME_ALL,
+ mt7915_mcu_cca_finish, dev);
+ break;
default:
break;
}
@@ -706,7 +570,7 @@ void mt7915_mcu_rx_event(struct mt7915_dev *dev, struct sk_buff *skb)
rxd->ext_eid == MCU_EXT_EVENT_FW_LOG_2_HOST ||
rxd->ext_eid == MCU_EXT_EVENT_ASSERT_DUMP ||
rxd->ext_eid == MCU_EXT_EVENT_PS_SYNC ||
- rxd->ext_eid == MCU_EXT_EVENT_RATE_REPORT ||
+ rxd->ext_eid == MCU_EXT_EVENT_BCC_NOTIFY ||
!rxd->seq)
mt7915_mcu_rx_unsolicited_event(dev, skb);
else
@@ -721,7 +585,7 @@ mt7915_mcu_alloc_sta_req(struct mt7915_dev *dev, struct mt7915_vif *mvif,
.bss_idx = mvif->idx,
.wlan_idx_lo = msta ? to_wcid_lo(msta->wcid.idx) : 0,
.wlan_idx_hi = msta ? to_wcid_hi(msta->wcid.idx) : 0,
- .muar_idx = msta ? mvif->omac_idx : 0,
+ .muar_idx = msta && msta->wcid.sta ? mvif->omac_idx : 0xe,
.is_tlv_append = 1,
};
struct sk_buff *skb;
@@ -757,7 +621,7 @@ mt7915_mcu_alloc_wtbl_req(struct mt7915_dev *dev, struct mt7915_sta *msta,
}
if (sta_hdr)
- sta_hdr->len = cpu_to_le16(sizeof(hdr));
+ le16_add_cpu(&sta_hdr->len, sizeof(hdr));
return skb_put_data(nskb, &hdr, sizeof(hdr));
}
@@ -923,12 +787,15 @@ static void mt7915_check_he_obss_narrow_bw_ru_iter(struct wiphy *wiphy,
struct mt7915_he_obss_narrow_bw_ru_data *data = _data;
const struct element *elem;
+ rcu_read_lock();
elem = ieee80211_bss_get_elem(bss, WLAN_EID_EXT_CAPABILITY);
- if (!elem || elem->datalen < 10 ||
+ if (!elem || elem->datalen <= 10 ||
!(elem->data[10] &
WLAN_EXT_CAPA10_OBSS_NARROW_BW_RU_TOLERANCE_SUPPORT))
data->tolerated = false;
+
+ rcu_read_unlock();
}
static bool mt7915_check_he_obss_narrow_bw_ru(struct ieee80211_hw *hw,
@@ -1201,7 +1068,7 @@ mt7915_mcu_sta_key_tlv(struct mt7915_sta *msta, struct sk_buff *skb,
u8 cipher;
cipher = mt7915_mcu_get_cipher(key->cipher);
- if (cipher == MT_CIPHER_NONE)
+ if (cipher == MCU_CIPHER_NONE)
return -EOPNOTSUPP;
sec_key = &sec->key[0];
@@ -1317,7 +1184,7 @@ mt7915_mcu_wtbl_ba_tlv(struct sk_buff *skb,
ba->rst_ba_sb = 1;
}
- if (enable && tx)
+ if (enable)
ba->ba_winsize = cpu_to_le16(params->buf_size);
}
@@ -1469,17 +1336,21 @@ mt7915_mcu_sta_basic_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
}
static void
-mt7915_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
+mt7915_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
+ struct ieee80211_vif *vif)
{
struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
- struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
- struct ieee80211_he_cap_elem *elem = &he_cap->he_cap_elem;
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct ieee80211_he_cap_elem *elem = &sta->he_cap.he_cap_elem;
enum nl80211_band band = msta->vif->phy->mt76->chandef.chan->band;
const u16 *mcs_mask = msta->vif->bitrate_mask.control[band].he_mcs;
struct sta_rec_he *he;
struct tlv *tlv;
u32 cap = 0;
+ if (!sta->he_cap.has_he)
+ return;
+
tlv = mt7915_mcu_add_tlv(skb, STA_REC_HE, sizeof(*he));
he = (struct sta_rec_he *)tlv;
@@ -1504,8 +1375,8 @@ mt7915_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_5G))
cap |= STA_REC_HE_CAP_BW20_RU242_SUPPORT;
- if (elem->phy_cap_info[1] &
- IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD)
+ if (mvif->cap.ldpc && (elem->phy_cap_info[1] &
+ IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD))
cap |= STA_REC_HE_CAP_LDPC;
if (elem->phy_cap_info[1] &
@@ -1525,6 +1396,10 @@ mt7915_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
cap |= STA_REC_HE_CAP_LE_EQ_80M_RX_STBC;
if (elem->phy_cap_info[6] &
+ IEEE80211_HE_PHY_CAP6_TRIG_CQI_FB)
+ cap |= STA_REC_HE_CAP_TRIG_CQI_FK;
+
+ if (elem->phy_cap_info[6] &
IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE)
cap |= STA_REC_HE_CAP_PARTIAL_BW_EXT_RANGE;
@@ -1549,10 +1424,6 @@ mt7915_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
cap |= STA_REC_HE_CAP_ER_SU_PPDU_1LTF_8US_GI;
if (elem->phy_cap_info[9] &
- IEEE80211_HE_PHY_CAP9_NON_TRIGGERED_CQI_FEEDBACK)
- cap |= STA_REC_HE_CAP_TRIG_CQI_FK;
-
- if (elem->phy_cap_info[9] &
IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU)
cap |= STA_REC_HE_CAP_TX_1024QAM_UNDER_RU242;
@@ -1640,19 +1511,45 @@ mt7915_mcu_sta_uapsd_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
}
static void
-mt7915_mcu_sta_muru_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
+mt7915_mcu_sta_muru_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
+ struct ieee80211_vif *vif)
{
- struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
- struct ieee80211_he_cap_elem *elem = &he_cap->he_cap_elem;
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct ieee80211_he_cap_elem *elem = &sta->he_cap.he_cap_elem;
struct sta_rec_muru *muru;
struct tlv *tlv;
+ if (vif->type != NL80211_IFTYPE_STATION &&
+ vif->type != NL80211_IFTYPE_AP)
+ return;
+
+ if (!sta->vht_cap.vht_supported)
+ return;
+
tlv = mt7915_mcu_add_tlv(skb, STA_REC_MURU, sizeof(*muru));
muru = (struct sta_rec_muru *)tlv;
- muru->cfg.ofdma_dl_en = true;
- muru->cfg.mimo_dl_en = true;
+ muru->cfg.mimo_dl_en = mvif->cap.he_mu_ebfer ||
+ mvif->cap.vht_mu_ebfer ||
+ mvif->cap.vht_mu_ebfee;
+
+ muru->mimo_dl.vht_mu_bfee =
+ !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE);
+
+ if (!sta->he_cap.has_he)
+ return;
+
+ muru->mimo_dl.partial_bw_dl_mimo =
+ HE_PHY(CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO, elem->phy_cap_info[6]);
+
+ muru->cfg.mimo_ul_en = true;
+ muru->mimo_ul.full_ul_mimo =
+ HE_PHY(CAP2_UL_MU_FULL_MU_MIMO, elem->phy_cap_info[2]);
+ muru->mimo_ul.partial_ul_mimo =
+ HE_PHY(CAP2_UL_MU_PARTIAL_MU_MIMO, elem->phy_cap_info[2]);
+
+ muru->cfg.ofdma_dl_en = true;
muru->ofdma_dl.punc_pream_rx =
HE_PHY(CAP1_PREAMBLE_PUNC_RX_MASK, elem->phy_cap_info[1]);
muru->ofdma_dl.he_20m_in_40m_2g =
@@ -1661,9 +1558,6 @@ mt7915_mcu_sta_muru_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
HE_PHY(CAP8_20MHZ_IN_160MHZ_HE_PPDU, elem->phy_cap_info[8]);
muru->ofdma_dl.he_80m_in_160m =
HE_PHY(CAP8_80MHZ_IN_160MHZ_HE_PPDU, elem->phy_cap_info[8]);
- muru->ofdma_dl.lt16_sigb = 0;
- muru->ofdma_dl.rx_su_comp_sigb = 0;
- muru->ofdma_dl.rx_su_non_comp_sigb = 0;
muru->ofdma_ul.t_frame_dur =
HE_MAC(CAP1_TF_MAC_PAD_DUR_MASK, elem->mac_cap_info[1]);
@@ -1671,18 +1565,18 @@ mt7915_mcu_sta_muru_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
HE_MAC(CAP2_MU_CASCADING, elem->mac_cap_info[2]);
muru->ofdma_ul.uo_ra =
HE_MAC(CAP3_OFDMA_RA, elem->mac_cap_info[3]);
- muru->ofdma_ul.he_2x996_tone = 0;
- muru->ofdma_ul.rx_t_frame_11ac = 0;
+}
- muru->mimo_dl.vht_mu_bfee =
- !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE);
- muru->mimo_dl.partial_bw_dl_mimo =
- HE_PHY(CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO, elem->phy_cap_info[6]);
+static void
+mt7915_mcu_sta_ht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
+{
+ struct sta_rec_ht *ht;
+ struct tlv *tlv;
- muru->mimo_ul.full_ul_mimo =
- HE_PHY(CAP2_UL_MU_FULL_MU_MIMO, elem->phy_cap_info[2]);
- muru->mimo_ul.partial_ul_mimo =
- HE_PHY(CAP2_UL_MU_PARTIAL_MU_MIMO, elem->phy_cap_info[2]);
+ tlv = mt7915_mcu_add_tlv(skb, STA_REC_HT, sizeof(*ht));
+
+ ht = (struct sta_rec_ht *)tlv;
+ ht->ht_cap = cpu_to_le16(sta->ht_cap.cap);
}
static void
@@ -1691,6 +1585,9 @@ mt7915_mcu_sta_vht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
struct sta_rec_vht *vht;
struct tlv *tlv;
+ if (!sta->vht_cap.vht_supported)
+ return;
+
tlv = mt7915_mcu_add_tlv(skb, STA_REC_VHT, sizeof(*vht));
vht = (struct sta_rec_vht *)tlv;
@@ -1700,12 +1597,17 @@ mt7915_mcu_sta_vht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
}
static void
-mt7915_mcu_sta_amsdu_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
+mt7915_mcu_sta_amsdu_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
{
struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
struct sta_rec_amsdu *amsdu;
struct tlv *tlv;
+ if (vif->type != NL80211_IFTYPE_STATION &&
+ vif->type != NL80211_IFTYPE_AP)
+ return;
+
if (!sta->max_amsdu_len)
return;
@@ -1718,44 +1620,6 @@ mt7915_mcu_sta_amsdu_tlv(struct sk_buff *skb, struct ieee80211_sta *sta)
msta->wcid.amsdu = true;
}
-static bool
-mt7915_hw_amsdu_supported(struct ieee80211_vif *vif)
-{
- switch (vif->type) {
- case NL80211_IFTYPE_AP:
- case NL80211_IFTYPE_STATION:
- return true;
- default:
- return false;
- }
-}
-
-static void
-mt7915_mcu_sta_tlv(struct mt7915_dev *dev, struct sk_buff *skb,
- struct ieee80211_sta *sta, struct ieee80211_vif *vif)
-{
- struct tlv *tlv;
-
- /* starec ht */
- if (sta->ht_cap.ht_supported) {
- struct sta_rec_ht *ht;
-
- tlv = mt7915_mcu_add_tlv(skb, STA_REC_HT, sizeof(*ht));
- ht = (struct sta_rec_ht *)tlv;
- ht->ht_cap = cpu_to_le16(sta->ht_cap.cap);
-
- if (mt7915_hw_amsdu_supported(vif))
- mt7915_mcu_sta_amsdu_tlv(skb, sta);
- }
-
- /* starec he */
- if (sta->he_cap.has_he)
- mt7915_mcu_sta_he_tlv(skb, sta);
-
- /* starec uapsd */
- mt7915_mcu_sta_uapsd_tlv(skb, sta, vif);
-}
-
static void
mt7915_mcu_wtbl_smps_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
void *sta_wtbl, void *wtbl_tlv)
@@ -1766,15 +1630,15 @@ mt7915_mcu_wtbl_smps_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
tlv = mt7915_mcu_add_nested_tlv(skb, WTBL_SMPS, sizeof(*smps),
wtbl_tlv, sta_wtbl);
smps = (struct wtbl_smps *)tlv;
-
- if (sta->smps_mode == IEEE80211_SMPS_DYNAMIC)
- smps->smps = true;
+ smps->smps = (sta->smps_mode == IEEE80211_SMPS_DYNAMIC);
}
static void
-mt7915_mcu_wtbl_ht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
- void *sta_wtbl, void *wtbl_tlv)
+mt7915_mcu_wtbl_ht_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, void *sta_wtbl,
+ void *wtbl_tlv)
{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
struct wtbl_ht *ht = NULL;
struct tlv *tlv;
@@ -1783,7 +1647,8 @@ mt7915_mcu_wtbl_ht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
tlv = mt7915_mcu_add_nested_tlv(skb, WTBL_HT, sizeof(*ht),
wtbl_tlv, sta_wtbl);
ht = (struct wtbl_ht *)tlv;
- ht->ldpc = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING);
+ ht->ldpc = mvif->cap.ldpc &&
+ (sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING);
ht->af = sta->ht_cap.ampdu_factor;
ht->mm = sta->ht_cap.ampdu_density;
ht->ht = true;
@@ -1797,7 +1662,8 @@ mt7915_mcu_wtbl_ht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
tlv = mt7915_mcu_add_nested_tlv(skb, WTBL_VHT, sizeof(*vht),
wtbl_tlv, sta_wtbl);
vht = (struct wtbl_vht *)tlv;
- vht->ldpc = !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC);
+ vht->ldpc = mvif->cap.ldpc &&
+ (sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC);
vht->vht = true;
af = FIELD_GET(IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK,
@@ -1838,6 +1704,32 @@ mt7915_mcu_wtbl_hdr_trans_tlv(struct sk_buff *skb, struct ieee80211_vif *vif,
}
}
+static int
+mt7915_mcu_sta_wtbl_tlv(struct mt7915_dev *dev, struct sk_buff *skb,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta)
+{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct mt7915_sta *msta;
+ struct wtbl_req_hdr *wtbl_hdr;
+ struct tlv *tlv;
+
+ msta = sta ? (struct mt7915_sta *)sta->drv_priv : &mvif->sta;
+
+ tlv = mt7915_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv));
+ wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_RESET_AND_SET,
+ tlv, &skb);
+ if (IS_ERR(wtbl_hdr))
+ return PTR_ERR(wtbl_hdr);
+
+ mt7915_mcu_wtbl_generic_tlv(skb, vif, sta, tlv, wtbl_hdr);
+ mt7915_mcu_wtbl_hdr_trans_tlv(skb, vif, sta, tlv, wtbl_hdr);
+
+ if (sta)
+ mt7915_mcu_wtbl_ht_tlv(skb, vif, sta, tlv, wtbl_hdr);
+
+ return 0;
+}
+
int mt7915_mcu_sta_update_hdr_trans(struct mt7915_dev *dev,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
@@ -1887,10 +1779,48 @@ int mt7915_mcu_add_smps(struct mt7915_dev *dev, struct ieee80211_vif *vif,
MCU_EXT_CMD(STA_REC_UPDATE), true);
}
+static inline bool
+mt7915_is_ebf_supported(struct mt7915_phy *phy, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, bool bfee)
+{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ int tx_ant = hweight8(phy->mt76->chainmask) - 1;
+
+ if (vif->type != NL80211_IFTYPE_STATION &&
+ vif->type != NL80211_IFTYPE_AP)
+ return false;
+
+ if (!bfee && tx_ant < 2)
+ return false;
+
+ if (sta->he_cap.has_he) {
+ struct ieee80211_he_cap_elem *pe = &sta->he_cap.he_cap_elem;
+
+ if (bfee)
+ return mvif->cap.he_su_ebfee &&
+ HE_PHY(CAP3_SU_BEAMFORMER, pe->phy_cap_info[3]);
+ else
+ return mvif->cap.he_su_ebfer &&
+ HE_PHY(CAP4_SU_BEAMFORMEE, pe->phy_cap_info[4]);
+ }
+
+ if (sta->vht_cap.vht_supported) {
+ u32 cap = sta->vht_cap.cap;
+
+ if (bfee)
+ return mvif->cap.vht_su_ebfee &&
+ (cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE);
+ else
+ return mvif->cap.vht_su_ebfer &&
+ (cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE);
+ }
+
+ return false;
+}
+
static void
mt7915_mcu_sta_sounding_rate(struct sta_rec_bf *bf)
{
- bf->bf_cap = MT_EBF;
bf->sounding_phy = MT_PHY_TYPE_OFDM;
bf->ndp_rate = 0; /* mcs0 */
bf->ndpa_rate = MT7915_CFEND_RATE_DEFAULT; /* ofdm 24m */
@@ -1905,9 +1835,8 @@ mt7915_mcu_sta_bfer_ht(struct ieee80211_sta *sta, struct mt7915_phy *phy,
u8 n = 0;
bf->tx_mode = MT_PHY_TYPE_HT;
- bf->bf_cap = MT_IBF;
- if (mcs->tx_params & IEEE80211_HT_MCS_TX_RX_DIFF &&
+ if ((mcs->tx_params & IEEE80211_HT_MCS_TX_RX_DIFF) &&
(mcs->tx_params & IEEE80211_HT_MCS_TX_DEFINED))
n = FIELD_GET(IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK,
mcs->tx_params);
@@ -1918,8 +1847,8 @@ mt7915_mcu_sta_bfer_ht(struct ieee80211_sta *sta, struct mt7915_phy *phy,
else if (mcs->rx_mask[1])
n = 1;
- bf->nr = hweight8(phy->mt76->chainmask) - 1;
- bf->nc = min_t(u8, bf->nr, n);
+ bf->nrow = hweight8(phy->mt76->chainmask) - 1;
+ bf->ncol = min_t(u8, bf->nrow, n);
bf->ibf_ncol = n;
}
@@ -1936,23 +1865,23 @@ mt7915_mcu_sta_bfer_vht(struct ieee80211_sta *sta, struct mt7915_phy *phy,
bf->tx_mode = MT_PHY_TYPE_VHT;
if (explicit) {
- u8 bfee_nr, bfer_nr;
+ u8 sts, snd_dim;
mt7915_mcu_sta_sounding_rate(bf);
- bfee_nr = FIELD_GET(IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK,
- pc->cap);
- bfer_nr = FIELD_GET(IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK,
+
+ sts = FIELD_GET(IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK,
+ pc->cap);
+ snd_dim = FIELD_GET(IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK,
vc->cap);
- bf->nr = min_t(u8, min_t(u8, bfer_nr, bfee_nr), tx_ant);
- bf->nc = min_t(u8, nss_mcs, bf->nr);
- bf->ibf_ncol = bf->nc;
+ bf->nrow = min_t(u8, min_t(u8, snd_dim, sts), tx_ant);
+ bf->ncol = min_t(u8, nss_mcs, bf->nrow);
+ bf->ibf_ncol = bf->ncol;
if (sta->bandwidth == IEEE80211_STA_RX_BW_160)
- bf->nr = 1;
+ bf->nrow = 1;
} else {
- bf->bf_cap = MT_IBF;
- bf->nr = tx_ant;
- bf->nc = min_t(u8, nss_mcs, bf->nr);
+ bf->nrow = tx_ant;
+ bf->ncol = min_t(u8, nss_mcs, bf->nrow);
bf->ibf_ncol = nss_mcs;
if (sta->bandwidth == IEEE80211_STA_RX_BW_160)
@@ -1970,21 +1899,23 @@ mt7915_mcu_sta_bfer_he(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
const struct ieee80211_he_cap_elem *ve = &vc->he_cap_elem;
u16 mcs_map = le16_to_cpu(pc->he_mcs_nss_supp.rx_mcs_80);
u8 nss_mcs = mt7915_mcu_get_sta_nss(mcs_map);
- u8 bfee_nr, bfer_nr;
+ u8 snd_dim, sts;
bf->tx_mode = MT_PHY_TYPE_HE_SU;
+
mt7915_mcu_sta_sounding_rate(bf);
+
bf->trigger_su = HE_PHY(CAP6_TRIG_SU_BEAMFORMING_FB,
pe->phy_cap_info[6]);
bf->trigger_mu = HE_PHY(CAP6_TRIG_MU_BEAMFORMING_PARTIAL_BW_FB,
pe->phy_cap_info[6]);
- bfer_nr = HE_PHY(CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK,
+ snd_dim = HE_PHY(CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK,
ve->phy_cap_info[5]);
- bfee_nr = HE_PHY(CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_MASK,
- pe->phy_cap_info[4]);
- bf->nr = min_t(u8, bfer_nr, bfee_nr);
- bf->nc = min_t(u8, nss_mcs, bf->nr);
- bf->ibf_ncol = bf->nc;
+ sts = HE_PHY(CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_MASK,
+ pe->phy_cap_info[4]);
+ bf->nrow = min_t(u8, snd_dim, sts);
+ bf->ncol = min_t(u8, nss_mcs, bf->nrow);
+ bf->ibf_ncol = bf->ncol;
if (sta->bandwidth != IEEE80211_STA_RX_BW_160)
return;
@@ -1995,7 +1926,7 @@ mt7915_mcu_sta_bfer_he(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
mcs_map = le16_to_cpu(pc->he_mcs_nss_supp.rx_mcs_160);
nss_mcs = mt7915_mcu_get_sta_nss(mcs_map);
- bf->nc_bw160 = nss_mcs;
+ bf->ncol_bw160 = nss_mcs;
}
if (pe->phy_cap_info[0] &
@@ -2003,25 +1934,27 @@ mt7915_mcu_sta_bfer_he(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
mcs_map = le16_to_cpu(pc->he_mcs_nss_supp.rx_mcs_80p80);
nss_mcs = mt7915_mcu_get_sta_nss(mcs_map);
- if (bf->nc_bw160)
- bf->nc_bw160 = min_t(u8, bf->nc_bw160, nss_mcs);
+ if (bf->ncol_bw160)
+ bf->ncol_bw160 = min_t(u8, bf->ncol_bw160, nss_mcs);
else
- bf->nc_bw160 = nss_mcs;
+ bf->ncol_bw160 = nss_mcs;
}
- bfer_nr = HE_PHY(CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK,
+ snd_dim = HE_PHY(CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK,
ve->phy_cap_info[5]);
- bfee_nr = HE_PHY(CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_MASK,
- pe->phy_cap_info[4]);
+ sts = HE_PHY(CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_MASK,
+ pe->phy_cap_info[4]);
- bf->nr_bw160 = min_t(int, bfer_nr, bfee_nr);
+ bf->nrow_bw160 = min_t(int, snd_dim, sts);
}
static void
-mt7915_mcu_sta_bfer_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
- struct ieee80211_vif *vif, struct mt7915_phy *phy,
- bool enable, bool explicit)
+mt7915_mcu_sta_bfer_tlv(struct mt7915_dev *dev, struct sk_buff *skb,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta)
{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct mt7915_phy *phy =
+ mvif->band_idx ? mt7915_ext_phy(dev) : &dev->phy;
int tx_ant = hweight8(phy->mt76->chainmask) - 1;
struct sta_rec_bf *bf;
struct tlv *tlv;
@@ -2031,43 +1964,42 @@ mt7915_mcu_sta_bfer_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
{2, 4, 4, 0}, /* 3x1, 3x2, 3x3, 3x4 */
{3, 5, 6, 0} /* 4x1, 4x2, 4x3, 4x4 */
};
+ bool ebf;
-#define MT_BFER_FREE cpu_to_le16(GENMASK(15, 0))
+ ebf = mt7915_is_ebf_supported(phy, vif, sta, false);
+ if (!ebf && !dev->ibf)
+ return;
tlv = mt7915_mcu_add_tlv(skb, STA_REC_BF, sizeof(*bf));
bf = (struct sta_rec_bf *)tlv;
- if (!enable) {
- bf->pfmu = MT_BFER_FREE;
- return;
- }
-
/* he: eBF only, in accordance with spec
* vht: support eBF and iBF
* ht: iBF only, since mac80211 lacks of eBF support
*/
- if (sta->he_cap.has_he && explicit)
+ if (sta->he_cap.has_he && ebf)
mt7915_mcu_sta_bfer_he(sta, vif, phy, bf);
else if (sta->vht_cap.vht_supported)
- mt7915_mcu_sta_bfer_vht(sta, phy, bf, explicit);
+ mt7915_mcu_sta_bfer_vht(sta, phy, bf, ebf);
else if (sta->ht_cap.ht_supported)
mt7915_mcu_sta_bfer_ht(sta, phy, bf);
else
return;
+ bf->bf_cap = ebf ? ebf : dev->ibf << 1;
bf->bw = sta->bandwidth;
bf->ibf_dbw = sta->bandwidth;
bf->ibf_nrow = tx_ant;
- if (!explicit && sta->bandwidth <= IEEE80211_STA_RX_BW_40 && !bf->nc)
+ if (!ebf && sta->bandwidth <= IEEE80211_STA_RX_BW_40 && !bf->ncol)
bf->ibf_timeout = 0x48;
else
bf->ibf_timeout = 0x18;
- if (explicit && bf->nr != tx_ant)
- bf->mem_20m = matrix[tx_ant][bf->nc];
+ if (ebf && bf->nrow != tx_ant)
+ bf->mem_20m = matrix[tx_ant][bf->ncol];
else
- bf->mem_20m = matrix[bf->nr][bf->nc];
+ bf->mem_20m = matrix[bf->nrow][bf->ncol];
switch (sta->bandwidth) {
case IEEE80211_STA_RX_BW_160:
@@ -2084,13 +2016,19 @@ mt7915_mcu_sta_bfer_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
}
static void
-mt7915_mcu_sta_bfee_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
- struct mt7915_phy *phy)
+mt7915_mcu_sta_bfee_tlv(struct mt7915_dev *dev, struct sk_buff *skb,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta)
{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct mt7915_phy *phy =
+ mvif->band_idx ? mt7915_ext_phy(dev) : &dev->phy;
int tx_ant = hweight8(phy->mt76->chainmask) - 1;
struct sta_rec_bfee *bfee;
struct tlv *tlv;
- u8 nr = 0;
+ u8 nrow = 0;
+
+ if (!mt7915_is_ebf_supported(phy, vif, sta, true))
+ return;
tlv = mt7915_mcu_add_tlv(skb, STA_REC_BFEE, sizeof(*bfee));
bfee = (struct sta_rec_bfee *)tlv;
@@ -2098,94 +2036,137 @@ mt7915_mcu_sta_bfee_tlv(struct sk_buff *skb, struct ieee80211_sta *sta,
if (sta->he_cap.has_he) {
struct ieee80211_he_cap_elem *pe = &sta->he_cap.he_cap_elem;
- nr = HE_PHY(CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK,
- pe->phy_cap_info[5]);
+ nrow = HE_PHY(CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK,
+ pe->phy_cap_info[5]);
} else if (sta->vht_cap.vht_supported) {
struct ieee80211_sta_vht_cap *pc = &sta->vht_cap;
- nr = FIELD_GET(IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK,
- pc->cap);
+ nrow = FIELD_GET(IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK,
+ pc->cap);
}
/* reply with identity matrix to avoid 2x2 BF negative gain */
- bfee->fb_identity_matrix = !!(nr == 1 && tx_ant == 2);
+ bfee->fb_identity_matrix = (nrow == 1 && tx_ant == 2);
}
-static int
-mt7915_mcu_add_txbf(struct mt7915_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, bool enable)
+int mt7915_mcu_set_fixed_rate_ctrl(struct mt7915_dev *dev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ void *data, u32 field)
{
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
- struct mt7915_phy *phy;
+ struct sta_phy *phy = data;
+ struct sta_rec_ra_fixed *ra;
struct sk_buff *skb;
- int r, len;
- bool ebfee = 0, ebf = 0;
-
- if (vif->type != NL80211_IFTYPE_STATION &&
- vif->type != NL80211_IFTYPE_AP)
- return 0;
-
- phy = mvif->band_idx ? mt7915_ext_phy(dev) : &dev->phy;
+ struct tlv *tlv;
+ int len = sizeof(struct sta_req_hdr) + sizeof(*ra);
- if (sta->he_cap.has_he) {
- struct ieee80211_he_cap_elem *pe;
- const struct ieee80211_he_cap_elem *ve;
- const struct ieee80211_sta_he_cap *vc;
-
- pe = &sta->he_cap.he_cap_elem;
- vc = mt7915_get_he_phy_cap(phy, vif);
- ve = &vc->he_cap_elem;
-
- ebfee = !!(HE_PHY(CAP3_SU_BEAMFORMER, pe->phy_cap_info[3]) &&
- HE_PHY(CAP4_SU_BEAMFORMEE, ve->phy_cap_info[4]));
- ebf = !!(HE_PHY(CAP3_SU_BEAMFORMER, ve->phy_cap_info[3]) &&
- HE_PHY(CAP4_SU_BEAMFORMEE, pe->phy_cap_info[4]));
- } else if (sta->vht_cap.vht_supported) {
- struct ieee80211_sta_vht_cap *pc;
- struct ieee80211_sta_vht_cap *vc;
+ skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta, len);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
- pc = &sta->vht_cap;
- vc = &phy->mt76->sband_5g.sband.vht_cap;
+ tlv = mt7915_mcu_add_tlv(skb, STA_REC_RA_UPDATE, sizeof(*ra));
+ ra = (struct sta_rec_ra_fixed *)tlv;
- ebfee = !!((pc->cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE) &&
- (vc->cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE));
- ebf = !!((vc->cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE) &&
- (pc->cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE));
+ switch (field) {
+ case RATE_PARAM_AUTO:
+ break;
+ case RATE_PARAM_FIXED:
+ case RATE_PARAM_FIXED_MCS:
+ case RATE_PARAM_FIXED_GI:
+ case RATE_PARAM_FIXED_HE_LTF:
+ ra->phy = *phy;
+ break;
+ default:
+ break;
}
+ ra->field = cpu_to_le32(field);
- /* must keep each tag independent */
+ return mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_EXT_CMD(STA_REC_UPDATE), true);
+}
- /* starec bf */
- if (ebf || dev->ibf) {
- len = sizeof(struct sta_req_hdr) + sizeof(struct sta_rec_bf);
+static int
+mt7915_mcu_add_rate_ctrl_fixed(struct mt7915_dev *dev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct cfg80211_chan_def *chandef = &mvif->phy->mt76->chandef;
+ struct cfg80211_bitrate_mask *mask = &mvif->bitrate_mask;
+ enum nl80211_band band = chandef->chan->band;
+ struct sta_phy phy = {};
+ int ret, nrates = 0;
+
+#define __sta_phy_bitrate_mask_check(_mcs, _gi, _he) \
+ do { \
+ u8 i, gi = mask->control[band]._gi; \
+ gi = (_he) ? gi : gi == NL80211_TXRATE_FORCE_SGI; \
+ for (i = 0; i <= sta->bandwidth; i++) { \
+ phy.sgi |= gi << (i << (_he)); \
+ phy.he_ltf |= mask->control[band].he_ltf << (i << (_he));\
+ } \
+ for (i = 0; i < ARRAY_SIZE(mask->control[band]._mcs); i++) \
+ nrates += hweight16(mask->control[band]._mcs[i]); \
+ phy.mcs = ffs(mask->control[band]._mcs[0]) - 1; \
+ } while (0)
- skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta, len);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
+ if (sta->he_cap.has_he) {
+ __sta_phy_bitrate_mask_check(he_mcs, he_gi, 1);
+ } else if (sta->vht_cap.vht_supported) {
+ __sta_phy_bitrate_mask_check(vht_mcs, gi, 0);
+ } else if (sta->ht_cap.ht_supported) {
+ __sta_phy_bitrate_mask_check(ht_mcs, gi, 0);
+ } else {
+ nrates = hweight32(mask->control[band].legacy);
+ phy.mcs = ffs(mask->control[band].legacy) - 1;
+ }
+#undef __sta_phy_bitrate_mask_check
- mt7915_mcu_sta_bfer_tlv(skb, sta, vif, phy, enable, ebf);
+ /* fall back to auto rate control */
+ if (mask->control[band].gi == NL80211_TXRATE_DEFAULT_GI &&
+ mask->control[band].he_gi == GENMASK(7, 0) &&
+ mask->control[band].he_ltf == GENMASK(7, 0) &&
+ nrates != 1)
+ return 0;
- r = mt76_mcu_skb_send_msg(&dev->mt76, skb,
- MCU_EXT_CMD(STA_REC_UPDATE), true);
- if (r)
- return r;
+ /* fixed single rate */
+ if (nrates == 1) {
+ ret = mt7915_mcu_set_fixed_rate_ctrl(dev, vif, sta, &phy,
+ RATE_PARAM_FIXED_MCS);
+ if (ret)
+ return ret;
}
- /* starec bfee */
- if (ebfee) {
- len = sizeof(struct sta_req_hdr) + sizeof(struct sta_rec_bfee);
-
- skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta, len);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
+ /* fixed GI */
+ if (mask->control[band].gi != NL80211_TXRATE_DEFAULT_GI ||
+ mask->control[band].he_gi != GENMASK(7, 0)) {
+ struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+ u32 addr;
+
+ /* firmware updates only TXCMD but doesn't take WTBL into
+ * account, so driver should update here to reflect the
+ * actual txrate hardware sends out.
+ */
+ addr = mt7915_mac_wtbl_lmac_addr(dev, msta->wcid.idx, 7);
+ if (sta->he_cap.has_he)
+ mt76_rmw_field(dev, addr, GENMASK(31, 24), phy.sgi);
+ else
+ mt76_rmw_field(dev, addr, GENMASK(15, 12), phy.sgi);
- mt7915_mcu_sta_bfee_tlv(skb, sta, phy);
+ ret = mt7915_mcu_set_fixed_rate_ctrl(dev, vif, sta, &phy,
+ RATE_PARAM_FIXED_GI);
+ if (ret)
+ return ret;
+ }
- r = mt76_mcu_skb_send_msg(&dev->mt76, skb,
- MCU_EXT_CMD(STA_REC_UPDATE), true);
- if (r)
- return r;
+ /* fixed HE_LTF */
+ if (mask->control[band].he_ltf != GENMASK(7, 0)) {
+ ret = mt7915_mcu_set_fixed_rate_ctrl(dev, vif, sta, &phy,
+ RATE_PARAM_FIXED_HE_LTF);
+ if (ret)
+ return ret;
}
return 0;
@@ -2233,8 +2214,6 @@ mt7915_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7915_dev *dev,
}
if (sta->ht_cap.ht_supported) {
- const u8 *mcs_mask = mask->control[band].ht_mcs;
-
ra->supp_mode |= MODE_HT;
ra->af = sta->ht_cap.ampdu_factor;
ra->ht_gf = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD);
@@ -2248,15 +2227,16 @@ mt7915_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7915_dev *dev,
cap |= STA_CAP_TX_STBC;
if (sta->ht_cap.cap & IEEE80211_HT_CAP_RX_STBC)
cap |= STA_CAP_RX_STBC;
- if (sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING)
+ if (mvif->cap.ldpc &&
+ (sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING))
cap |= STA_CAP_LDPC;
- mt7915_mcu_set_sta_ht_mcs(sta, ra->ht_mcs, mcs_mask);
+ mt7915_mcu_set_sta_ht_mcs(sta, ra->ht_mcs,
+ mask->control[band].ht_mcs);
ra->supp_ht_mcs = *(__le32 *)ra->ht_mcs;
}
if (sta->vht_cap.vht_supported) {
- const u16 *mcs_mask = mask->control[band].vht_mcs;
u8 af;
ra->supp_mode |= MODE_VHT;
@@ -2273,10 +2253,12 @@ mt7915_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7915_dev *dev,
cap |= STA_CAP_VHT_TX_STBC;
if (sta->vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_1)
cap |= STA_CAP_VHT_RX_STBC;
- if (sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC)
+ if (mvif->cap.ldpc &&
+ (sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC))
cap |= STA_CAP_VHT_LDPC;
- mt7915_mcu_set_sta_vht_mcs(sta, ra->supp_vht_mcs, mcs_mask);
+ mt7915_mcu_set_sta_vht_mcs(sta, ra->supp_vht_mcs,
+ mask->control[band].vht_mcs);
}
if (sta->he_cap.has_he) {
@@ -2288,44 +2270,40 @@ mt7915_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7915_dev *dev,
}
int mt7915_mcu_add_rate_ctrl(struct mt7915_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
+ struct ieee80211_sta *sta, bool changed)
{
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
struct sk_buff *skb;
- int len = sizeof(struct sta_req_hdr) + sizeof(struct sta_rec_ra);
+ int ret;
- skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta, len);
+ skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta,
+ MT7915_STA_UPDATE_MAX_SIZE);
if (IS_ERR(skb))
return PTR_ERR(skb);
- mt7915_mcu_sta_rate_ctrl_tlv(skb, dev, vif, sta);
-
- return mt76_mcu_skb_send_msg(&dev->mt76, skb,
- MCU_EXT_CMD(STA_REC_UPDATE), true);
-}
-
-int mt7915_mcu_add_he(struct mt7915_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
- struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
- struct sk_buff *skb;
- int len;
-
- if (!sta->he_cap.has_he)
- return 0;
-
- len = sizeof(struct sta_req_hdr) + sizeof(struct sta_rec_he);
+ /* firmware rc algorithm refers to sta_rec_he for HE control.
+ * once dev->rc_work changes the settings driver should also
+ * update sta_rec_he here.
+ */
+ if (sta->he_cap.has_he && changed)
+ mt7915_mcu_sta_he_tlv(skb, sta, vif);
- skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta, len);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
+ /* sta_rec_ra accommodates BW, NSS and only MCS range format
+ * i.e 0-{7,8,9} for VHT.
+ */
+ mt7915_mcu_sta_rate_ctrl_tlv(skb, dev, vif, sta);
- mt7915_mcu_sta_he_tlv(skb, sta);
+ ret = mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_EXT_CMD(STA_REC_UPDATE), true);
+ if (ret)
+ return ret;
- return mt76_mcu_skb_send_msg(&dev->mt76, skb,
- MCU_EXT_CMD(STA_REC_UPDATE), true);
+ /* sta_rec_ra_fixed accommodates single rate, (HE)GI and HE_LTE,
+ * and updates as peer fixed rate parameters, which overrides
+ * sta_rec_ra and firmware rate control algorithm.
+ */
+ return mt7915_mcu_add_rate_ctrl_fixed(dev, vif, sta);
}
static int
@@ -2334,7 +2312,7 @@ mt7915_mcu_add_group(struct mt7915_dev *dev, struct ieee80211_vif *vif,
{
#define MT_STA_BSS_GROUP 1
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
- struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
+ struct mt7915_sta *msta;
struct {
__le32 action;
u8 wlan_idx_lo;
@@ -2345,75 +2323,24 @@ mt7915_mcu_add_group(struct mt7915_dev *dev, struct ieee80211_vif *vif,
u8 rsv1[8];
} __packed req = {
.action = cpu_to_le32(MT_STA_BSS_GROUP),
- .wlan_idx_lo = to_wcid_lo(msta->wcid.idx),
- .wlan_idx_hi = to_wcid_hi(msta->wcid.idx),
.val = cpu_to_le32(mvif->idx % 16),
};
+ msta = sta ? (struct mt7915_sta *)sta->drv_priv : &mvif->sta;
+ req.wlan_idx_lo = to_wcid_lo(msta->wcid.idx);
+ req.wlan_idx_hi = to_wcid_hi(msta->wcid.idx);
+
return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(SET_DRR_CTRL), &req,
sizeof(req), true);
}
-static int
-mt7915_mcu_add_mu(struct mt7915_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
- struct mt7915_sta *msta = (struct mt7915_sta *)sta->drv_priv;
- struct sk_buff *skb;
- int ret;
-
- if (!sta->vht_cap.vht_supported && !sta->he_cap.has_he)
- return 0;
-
- ret = mt7915_mcu_add_group(dev, vif, sta);
- if (ret)
- return ret;
-
- skb = mt7915_mcu_alloc_sta_req(dev, mvif, msta,
- MT7915_STA_UPDATE_MAX_SIZE);
- if (IS_ERR(skb))
- return PTR_ERR(skb);
-
- /* wait until TxBF and MU ready to update stare vht */
-
- /* starec muru */
- mt7915_mcu_sta_muru_tlv(skb, sta);
- /* starec vht */
- mt7915_mcu_sta_vht_tlv(skb, sta);
-
- return mt76_mcu_skb_send_msg(&dev->mt76, skb,
- MCU_EXT_CMD(STA_REC_UPDATE), true);
-}
-
-int mt7915_mcu_add_sta_adv(struct mt7915_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, bool enable)
-{
- int ret;
-
- if (!sta)
- return 0;
-
- /* must keep the order */
- ret = mt7915_mcu_add_txbf(dev, vif, sta, enable);
- if (ret || !enable)
- return ret;
-
- ret = mt7915_mcu_add_mu(dev, vif, sta);
- if (ret)
- return ret;
-
- return mt7915_mcu_add_rate_ctrl(dev, vif, sta);
-}
-
int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, bool enable)
{
struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
- struct wtbl_req_hdr *wtbl_hdr;
struct mt7915_sta *msta;
- struct tlv *sta_wtbl;
struct sk_buff *skb;
+ int ret;
msta = sta ? (struct mt7915_sta *)sta->drv_priv : &mvif->sta;
@@ -2422,24 +2349,42 @@ int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif,
if (IS_ERR(skb))
return PTR_ERR(skb);
+ /* starec basic */
mt7915_mcu_sta_basic_tlv(skb, vif, sta, enable);
- if (enable && sta)
- mt7915_mcu_sta_tlv(dev, skb, sta, vif);
+ if (!enable)
+ goto out;
- sta_wtbl = mt7915_mcu_add_tlv(skb, STA_REC_WTBL, sizeof(struct tlv));
+ /* tag order is in accordance with firmware dependency. */
+ if (sta && sta->ht_cap.ht_supported) {
+ /* starec bfer */
+ mt7915_mcu_sta_bfer_tlv(dev, skb, vif, sta);
+ /* starec ht */
+ mt7915_mcu_sta_ht_tlv(skb, sta);
+ /* starec vht */
+ mt7915_mcu_sta_vht_tlv(skb, sta);
+ /* starec uapsd */
+ mt7915_mcu_sta_uapsd_tlv(skb, sta, vif);
+ }
- wtbl_hdr = mt7915_mcu_alloc_wtbl_req(dev, msta, WTBL_RESET_AND_SET,
- sta_wtbl, &skb);
- if (IS_ERR(wtbl_hdr))
- return PTR_ERR(wtbl_hdr);
+ ret = mt7915_mcu_sta_wtbl_tlv(dev, skb, vif, sta);
+ if (ret)
+ return ret;
- if (enable) {
- mt7915_mcu_wtbl_generic_tlv(skb, vif, sta, sta_wtbl, wtbl_hdr);
- mt7915_mcu_wtbl_hdr_trans_tlv(skb, vif, sta, sta_wtbl, wtbl_hdr);
- if (sta)
- mt7915_mcu_wtbl_ht_tlv(skb, sta, sta_wtbl, wtbl_hdr);
+ if (sta && sta->ht_cap.ht_supported) {
+ /* starec amsdu */
+ mt7915_mcu_sta_amsdu_tlv(skb, vif, sta);
+ /* starec he */
+ mt7915_mcu_sta_he_tlv(skb, sta, vif);
+ /* starec muru */
+ mt7915_mcu_sta_muru_tlv(skb, sta, vif);
+ /* starec bfee */
+ mt7915_mcu_sta_bfee_tlv(dev, skb, vif, sta);
}
+ ret = mt7915_mcu_add_group(dev, vif, sta);
+ if (ret)
+ return ret;
+out:
return mt76_mcu_skb_send_msg(&dev->mt76, skb,
MCU_EXT_CMD(STA_REC_UPDATE), true);
}
@@ -2464,10 +2409,9 @@ int mt7915_mcu_set_fixed_rate(struct mt7915_dev *dev,
if (!rate) {
ra->field = cpu_to_le32(RATE_PARAM_AUTO);
goto out;
- } else {
- ra->field = cpu_to_le32(RATE_PARAM_FIXED);
}
+ ra->field = cpu_to_le32(RATE_PARAM_FIXED);
ra->phy.type = FIELD_GET(RATE_CFG_PHY_TYPE, rate);
ra->phy.bw = FIELD_GET(RATE_CFG_BW, rate);
ra->phy.nss = FIELD_GET(RATE_CFG_NSS, rate);
@@ -2480,10 +2424,12 @@ int mt7915_mcu_set_fixed_rate(struct mt7915_dev *dev,
ra->phy.ldpc = FIELD_GET(RATE_CFG_LDPC, rate) * 7;
/* HT/VHT - SGI: 1, LGI: 0; HE - SGI: 0, MGI: 1, LGI: 2 */
- if (ra->phy.type > MT_PHY_TYPE_VHT)
- ra->phy.sgi = ra->phy.mcs * 85;
- else
- ra->phy.sgi = ra->phy.mcs * 15;
+ if (ra->phy.type > MT_PHY_TYPE_VHT) {
+ ra->phy.he_ltf = FIELD_GET(RATE_CFG_HE_LTF, rate) * 85;
+ ra->phy.sgi = FIELD_GET(RATE_CFG_GI, rate) * 85;
+ } else {
+ ra->phy.sgi = FIELD_GET(RATE_CFG_GI, rate) * 15;
+ }
out:
return mt76_mcu_skb_send_msg(&dev->mt76, skb,
@@ -2534,25 +2480,28 @@ int mt7915_mcu_add_dev_info(struct mt7915_phy *phy,
}
static void
-mt7915_mcu_beacon_csa(struct sk_buff *rskb, struct sk_buff *skb,
- struct bss_info_bcn *bcn,
- struct ieee80211_mutable_offsets *offs)
+mt7915_mcu_beacon_cntdwn(struct ieee80211_vif *vif, struct sk_buff *rskb,
+ struct sk_buff *skb, struct bss_info_bcn *bcn,
+ struct ieee80211_mutable_offsets *offs)
{
- if (offs->cntdwn_counter_offs[0]) {
- struct tlv *tlv;
- struct bss_info_bcn_csa *csa;
+ struct bss_info_bcn_cntdwn *info;
+ struct tlv *tlv;
+ int sub_tag;
- tlv = mt7915_mcu_add_nested_subtlv(rskb, BSS_INFO_BCN_CSA,
- sizeof(*csa), &bcn->sub_ntlv,
- &bcn->len);
- csa = (struct bss_info_bcn_csa *)tlv;
- csa->cnt = skb->data[offs->cntdwn_counter_offs[0]];
- }
+ if (!offs->cntdwn_counter_offs[0])
+ return;
+
+ sub_tag = vif->csa_active ? BSS_INFO_BCN_CSA : BSS_INFO_BCN_BCC;
+ tlv = mt7915_mcu_add_nested_subtlv(rskb, sub_tag, sizeof(*info),
+ &bcn->sub_ntlv, &bcn->len);
+ info = (struct bss_info_bcn_cntdwn *)tlv;
+ info->cnt = skb->data[offs->cntdwn_counter_offs[0]];
}
static void
-mt7915_mcu_beacon_cont(struct mt7915_dev *dev, struct sk_buff *rskb,
- struct sk_buff *skb, struct bss_info_bcn *bcn,
+mt7915_mcu_beacon_cont(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+ struct sk_buff *rskb, struct sk_buff *skb,
+ struct bss_info_bcn *bcn,
struct ieee80211_mutable_offsets *offs)
{
struct mt76_wcid *wcid = &dev->mt76.global_wcid;
@@ -2568,8 +2517,14 @@ mt7915_mcu_beacon_cont(struct mt7915_dev *dev, struct sk_buff *rskb,
cont->pkt_len = cpu_to_le16(MT_TXD_SIZE + skb->len);
cont->tim_ofs = cpu_to_le16(offs->tim_offset);
- if (offs->cntdwn_counter_offs[0])
- cont->csa_ofs = cpu_to_le16(offs->cntdwn_counter_offs[0] - 4);
+ if (offs->cntdwn_counter_offs[0]) {
+ u16 offset = offs->cntdwn_counter_offs[0];
+
+ if (vif->csa_active)
+ cont->csa_ofs = cpu_to_le16(offset - 4);
+ if (vif->color_change_active)
+ cont->bcc_ofs = cpu_to_le16(offset - 3);
+ }
buf = (u8 *)tlv + sizeof(*cont);
mt7915_mac_write_txwi(dev, (__le32 *)buf, skb, wcid, 0, NULL,
@@ -2577,6 +2532,82 @@ mt7915_mcu_beacon_cont(struct mt7915_dev *dev, struct sk_buff *rskb,
memcpy(buf + MT_TXD_SIZE, skb->data, skb->len);
}
+static void
+mt7915_mcu_beacon_check_caps(struct mt7915_phy *phy, struct ieee80211_vif *vif,
+ struct sk_buff *skb)
+{
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct mt7915_vif_cap *vc = &mvif->cap;
+ const struct ieee80211_he_cap_elem *he;
+ const struct ieee80211_vht_cap *vht;
+ const struct ieee80211_ht_cap *ht;
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
+ const u8 *ie;
+ u32 len, bc;
+
+ /* Check missing configuration options to allow AP mode in mac80211
+ * to remain in sync with hostapd settings, and get a subset of
+ * beacon and hardware capabilities.
+ */
+ if (WARN_ON_ONCE(skb->len <= (mgmt->u.beacon.variable - skb->data)))
+ return;
+
+ memset(vc, 0, sizeof(*vc));
+
+ len = skb->len - (mgmt->u.beacon.variable - skb->data);
+
+ ie = cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, mgmt->u.beacon.variable,
+ len);
+ if (ie && ie[1] >= sizeof(*ht)) {
+ ht = (void *)(ie + 2);
+ vc->ldpc |= !!(le16_to_cpu(ht->cap_info) &
+ IEEE80211_HT_CAP_LDPC_CODING);
+ }
+
+ ie = cfg80211_find_ie(WLAN_EID_VHT_CAPABILITY, mgmt->u.beacon.variable,
+ len);
+ if (ie && ie[1] >= sizeof(*vht)) {
+ u32 pc = phy->mt76->sband_5g.sband.vht_cap.cap;
+
+ vht = (void *)(ie + 2);
+ bc = le32_to_cpu(vht->vht_cap_info);
+
+ vc->ldpc |= !!(bc & IEEE80211_VHT_CAP_RXLDPC);
+ vc->vht_su_ebfer =
+ (bc & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE) &&
+ (pc & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE);
+ vc->vht_su_ebfee =
+ (bc & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE) &&
+ (pc & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE);
+ vc->vht_mu_ebfer =
+ (bc & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) &&
+ (pc & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE);
+ vc->vht_mu_ebfee =
+ (bc & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE) &&
+ (pc & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE);
+ }
+
+ ie = cfg80211_find_ext_ie(WLAN_EID_EXT_HE_CAPABILITY,
+ mgmt->u.beacon.variable, len);
+ if (ie && ie[1] >= sizeof(*he) + 1) {
+ const struct ieee80211_sta_he_cap *pc =
+ mt7915_get_he_phy_cap(phy, vif);
+ const struct ieee80211_he_cap_elem *pe = &pc->he_cap_elem;
+
+ he = (void *)(ie + 3);
+
+ vc->he_su_ebfer =
+ HE_PHY(CAP3_SU_BEAMFORMER, he->phy_cap_info[3]) &&
+ HE_PHY(CAP3_SU_BEAMFORMER, pe->phy_cap_info[3]);
+ vc->he_su_ebfee =
+ HE_PHY(CAP4_SU_BEAMFORMEE, he->phy_cap_info[4]) &&
+ HE_PHY(CAP4_SU_BEAMFORMEE, pe->phy_cap_info[4]);
+ vc->he_mu_ebfer =
+ HE_PHY(CAP4_MU_BEAMFORMER, he->phy_cap_info[4]) &&
+ HE_PHY(CAP4_MU_BEAMFORMER, pe->phy_cap_info[4]);
+ }
+}
+
int mt7915_mcu_add_beacon(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, int en)
{
@@ -2617,9 +2648,11 @@ int mt7915_mcu_add_beacon(struct ieee80211_hw *hw,
info->hw_queue |= MT_TX_HW_QUEUE_EXT_PHY;
}
- /* TODO: subtag - bss color count & 11v MBSSID */
- mt7915_mcu_beacon_csa(rskb, skb, bcn, &offs);
- mt7915_mcu_beacon_cont(dev, rskb, skb, bcn, &offs);
+ mt7915_mcu_beacon_check_caps(phy, vif, skb);
+
+ /* TODO: subtag - 11v MBSSID */
+ mt7915_mcu_beacon_cntdwn(vif, rskb, skb, bcn, &offs);
+ mt7915_mcu_beacon_cont(dev, vif, rskb, skb, bcn, &offs);
dev_kfree_skb(skb);
out:
@@ -2770,8 +2803,8 @@ static int mt7915_load_patch(struct mt7915_dev *dev)
goto out;
}
- ret = mt76_mcu_send_firmware(&dev->mt76, MCU_CMD(FW_SCATTER),
- dl, len);
+ ret = __mt76_mcu_send_firmware(&dev->mt76, MCU_CMD(FW_SCATTER),
+ dl, len, 4096);
if (ret) {
dev_err(dev->mt76.dev, "Failed to send patch\n");
goto out;
@@ -2790,7 +2823,7 @@ out:
default:
ret = -EAGAIN;
dev_err(dev->mt76.dev, "Failed to release patch semaphore\n");
- goto out;
+ break;
}
release_firmware(fw);
@@ -2839,8 +2872,8 @@ mt7915_mcu_send_ram_firmware(struct mt7915_dev *dev,
return err;
}
- err = mt76_mcu_send_firmware(&dev->mt76, MCU_CMD(FW_SCATTER),
- data + offset, len);
+ err = __mt76_mcu_send_firmware(&dev->mt76, MCU_CMD(FW_SCATTER),
+ data + offset, len, 4096);
if (err) {
dev_err(dev->mt76.dev, "Failed to send firmware.\n");
return err;
@@ -2946,7 +2979,7 @@ static int mt7915_load_firmware(struct mt7915_dev *dev)
return 0;
}
-int mt7915_mcu_fw_log_2_host(struct mt7915_dev *dev, u8 ctrl)
+int mt7915_mcu_fw_log_2_host(struct mt7915_dev *dev, u8 type, u8 ctrl)
{
struct {
u8 ctrl_val;
@@ -2955,6 +2988,10 @@ int mt7915_mcu_fw_log_2_host(struct mt7915_dev *dev, u8 ctrl)
.ctrl_val = ctrl
};
+ if (type == MCU_FW_LOG_WA)
+ return mt76_mcu_send_msg(&dev->mt76, MCU_WA_EXT_CMD(FW_LOG_2_HOST),
+ &data, sizeof(data), true);
+
return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(FW_LOG_2_HOST), &data,
sizeof(data), true);
}
@@ -2990,6 +3027,62 @@ static int mt7915_mcu_set_mwds(struct mt7915_dev *dev, bool enabled)
sizeof(req), false);
}
+int mt7915_mcu_set_muru_ctrl(struct mt7915_dev *dev, u32 cmd, u32 val)
+{
+ struct {
+ __le32 cmd;
+ u8 val[4];
+ } __packed req = {
+ .cmd = cpu_to_le32(cmd),
+ };
+
+ put_unaligned_le32(val, req.val);
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(MURU_CTRL), &req,
+ sizeof(req), false);
+}
+
+static int
+mt7915_mcu_init_rx_airtime(struct mt7915_dev *dev)
+{
+#define RX_AIRTIME_FEATURE_CTRL 1
+#define RX_AIRTIME_BITWISE_CTRL 2
+#define RX_AIRTIME_CLEAR_EN 1
+ struct {
+ __le16 field;
+ __le16 sub_field;
+ __le32 set_status;
+ __le32 get_status;
+ u8 _rsv[12];
+
+ bool airtime_en;
+ bool mibtime_en;
+ bool earlyend_en;
+ u8 _rsv1[9];
+
+ bool airtime_clear;
+ bool mibtime_clear;
+ u8 _rsv2[98];
+ } __packed req = {
+ .field = cpu_to_le16(RX_AIRTIME_BITWISE_CTRL),
+ .sub_field = cpu_to_le16(RX_AIRTIME_CLEAR_EN),
+ .airtime_clear = true,
+ };
+ int ret;
+
+ ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(RX_AIRTIME_CTRL), &req,
+ sizeof(req), true);
+ if (ret)
+ return ret;
+
+ req.field = cpu_to_le16(RX_AIRTIME_FEATURE_CTRL);
+ req.sub_field = cpu_to_le16(RX_AIRTIME_CLEAR_EN);
+ req.airtime_en = true;
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(RX_AIRTIME_CTRL), &req,
+ sizeof(req), true);
+}
+
int mt7915_mcu_init(struct mt7915_dev *dev)
{
static const struct mt76_mcu_ops mt7915_mcu_ops = {
@@ -3011,11 +3104,29 @@ int mt7915_mcu_init(struct mt7915_dev *dev)
return ret;
set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
- mt7915_mcu_fw_log_2_host(dev, 0);
- mt7915_mcu_set_mwds(dev, 1);
- mt7915_mcu_wa_cmd(dev, MCU_WA_PARAM_CMD(SET), MCU_WA_PARAM_RED, 0, 0);
+ ret = mt7915_mcu_fw_log_2_host(dev, MCU_FW_LOG_WM, 0);
+ if (ret)
+ return ret;
- return 0;
+ ret = mt7915_mcu_fw_log_2_host(dev, MCU_FW_LOG_WA, 0);
+ if (ret)
+ return ret;
+
+ ret = mt7915_mcu_set_mwds(dev, 1);
+ if (ret)
+ return ret;
+
+ ret = mt7915_mcu_set_muru_ctrl(dev, MURU_SET_PLATFORM_TYPE,
+ MURU_PLATFORM_TYPE_PERF_LEVEL_2);
+ if (ret)
+ return ret;
+
+ ret = mt7915_mcu_init_rx_airtime(dev);
+ if (ret)
+ return ret;
+
+ return mt7915_mcu_wa_cmd(dev, MCU_WA_PARAM_CMD(SET),
+ MCU_WA_PARAM_RED, 0, 0);
}
void mt7915_mcu_exit(struct mt7915_dev *dev)
@@ -3391,20 +3502,20 @@ int mt7915_mcu_set_chan_info(struct mt7915_phy *phy, int cmd)
static int mt7915_mcu_set_eeprom_flash(struct mt7915_dev *dev)
{
-#define TOTAL_PAGE_MASK GENMASK(7, 5)
+#define MAX_PAGE_IDX_MASK GENMASK(7, 5)
#define PAGE_IDX_MASK GENMASK(4, 2)
#define PER_PAGE_SIZE 0x400
struct mt7915_mcu_eeprom req = { .buffer_mode = EE_MODE_BUFFER };
- u8 total = MT7915_EEPROM_SIZE / PER_PAGE_SIZE;
+ u8 total = DIV_ROUND_UP(MT7915_EEPROM_SIZE, PER_PAGE_SIZE);
u8 *eep = (u8 *)dev->mt76.eeprom.data;
int eep_len;
int i;
- for (i = 0; i <= total; i++, eep += eep_len) {
+ for (i = 0; i < total; i++, eep += eep_len) {
struct sk_buff *skb;
int ret;
- if (i == total)
+ if (i == total - 1 && !!(MT7915_EEPROM_SIZE % PER_PAGE_SIZE))
eep_len = MT7915_EEPROM_SIZE % PER_PAGE_SIZE;
else
eep_len = PER_PAGE_SIZE;
@@ -3414,7 +3525,7 @@ static int mt7915_mcu_set_eeprom_flash(struct mt7915_dev *dev)
if (!skb)
return -ENOMEM;
- req.format = FIELD_PREP(TOTAL_PAGE_MASK, total) |
+ req.format = FIELD_PREP(MAX_PAGE_IDX_MASK, total - 1) |
FIELD_PREP(PAGE_IDX_MASK, i) | EE_FORMAT_WHOLE;
req.len = cpu_to_le16(eep_len);
@@ -3481,7 +3592,7 @@ static int mt7915_mcu_set_pre_cal(struct mt7915_dev *dev, u8 idx,
u8 idx;
u8 rsv[4];
__le32 len;
- } req;
+ } req = {};
struct sk_buff *skb;
skb = mt76_mcu_msg_alloc(&dev->mt76, NULL, sizeof(req) + len);
@@ -3691,10 +3802,6 @@ int mt7915_mcu_set_thermal_throttling(struct mt7915_phy *phy, u8 state)
};
int level;
-#define TRIGGER_TEMPERATURE 122
-#define RESTORE_TEMPERATURE 116
-#define SUSTAIN_PERIOD 10
-
if (!state) {
req.ctrl.ctrl_id = THERMAL_PROTECT_DISABLE;
goto out;
@@ -3707,7 +3814,7 @@ int mt7915_mcu_set_thermal_throttling(struct mt7915_phy *phy, u8 state)
req.ctrl.ctrl_id = THERMAL_PROTECT_DUTY_CONFIG;
req.ctrl.duty.duty_level = level;
req.ctrl.duty.duty_cycle = state;
- state = state * 4 / 5;
+ state /= 2;
ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(THERMAL_PROT),
&req, sizeof(req.ctrl), false);
@@ -3715,15 +3822,12 @@ int mt7915_mcu_set_thermal_throttling(struct mt7915_phy *phy, u8 state)
return ret;
}
- /* currently use fixed values for throttling, and would be better
- * to implement thermal zone for dynamic trip in the long run.
- */
-
/* set high-temperature trigger threshold */
req.ctrl.ctrl_id = THERMAL_PROTECT_ENABLE;
- req.trigger_temp = cpu_to_le32(TRIGGER_TEMPERATURE);
- req.restore_temp = cpu_to_le32(RESTORE_TEMPERATURE);
- req.sustain_time = cpu_to_le16(SUSTAIN_PERIOD);
+ /* add a safety margin ~10 */
+ req.restore_temp = cpu_to_le32(phy->throttle_temp[0] - 10);
+ req.trigger_temp = cpu_to_le32(phy->throttle_temp[1]);
+ req.sustain_time = cpu_to_le16(10);
out:
req.ctrl.type.protect_type = 1;
@@ -3733,24 +3837,6 @@ out:
&req, sizeof(req), false);
}
-int mt7915_mcu_get_tx_rate(struct mt7915_dev *dev, u32 cmd, u16 wlan_idx)
-{
- struct {
- __le32 cmd;
- __le16 wlan_idx;
- __le16 ru_idx;
- __le16 direction;
- __le16 dump_group;
- } req = {
- .cmd = cpu_to_le32(cmd),
- .wlan_idx = cpu_to_le16(wlan_idx),
- .dump_group = cpu_to_le16(1),
- };
-
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(RATE_CTRL), &req,
- sizeof(req), false);
-}
-
int mt7915_mcu_set_txpower_sku(struct mt7915_phy *phy)
{
struct mt7915_dev *dev = phy->dev;
@@ -4069,3 +4155,75 @@ out:
return ret;
}
+
+int mt7915_mcu_update_bss_color(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+ struct cfg80211_he_bss_color *he_bss_color)
+{
+ int len = sizeof(struct sta_req_hdr) + sizeof(struct bss_info_color);
+ struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv;
+ struct bss_info_color *bss_color;
+ struct sk_buff *skb;
+ struct tlv *tlv;
+
+ skb = mt7915_mcu_alloc_sta_req(dev, mvif, NULL, len);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
+
+ tlv = mt7915_mcu_add_tlv(skb, BSS_INFO_BSS_COLOR, sizeof(*bss_color));
+ bss_color = (struct bss_info_color *)tlv;
+ bss_color->disable = !he_bss_color->enabled;
+ bss_color->color = he_bss_color->color;
+
+ return mt76_mcu_skb_send_msg(&dev->mt76, skb,
+ MCU_EXT_CMD(BSS_INFO_UPDATE), true);
+}
+
+#define TWT_AGRT_TRIGGER BIT(0)
+#define TWT_AGRT_ANNOUNCE BIT(1)
+#define TWT_AGRT_PROTECT BIT(2)
+
+int mt7915_mcu_twt_agrt_update(struct mt7915_dev *dev,
+ struct mt7915_vif *mvif,
+ struct mt7915_twt_flow *flow,
+ int cmd)
+{
+ struct {
+ u8 tbl_idx;
+ u8 cmd;
+ u8 own_mac_idx;
+ u8 flowid; /* 0xff for group id */
+ __le16 peer_id; /* specify the peer_id (msb=0)
+ * or group_id (msb=1)
+ */
+ u8 duration; /* 256 us */
+ u8 bss_idx;
+ __le64 start_tsf;
+ __le16 mantissa;
+ u8 exponent;
+ u8 is_ap;
+ u8 agrt_params;
+ u8 rsv[23];
+ } __packed req = {
+ .tbl_idx = flow->table_id,
+ .cmd = cmd,
+ .own_mac_idx = mvif->omac_idx,
+ .flowid = flow->id,
+ .peer_id = cpu_to_le16(flow->wcid),
+ .duration = flow->duration,
+ .bss_idx = mvif->idx,
+ .start_tsf = cpu_to_le64(flow->tsf),
+ .mantissa = flow->mantissa,
+ .exponent = flow->exp,
+ .is_ap = true,
+ };
+
+ if (flow->protection)
+ req.agrt_params |= TWT_AGRT_PROTECT;
+ if (!flow->flowtype)
+ req.agrt_params |= TWT_AGRT_ANNOUNCE;
+ if (flow->trigger)
+ req.agrt_params |= TWT_AGRT_TRIGGER;
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD(TWT_AGRT_UPDATE),
+ &req, sizeof(req), true);
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
index e68a562cc5b4..1f5a64ba9b59 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.h
@@ -43,7 +43,7 @@ enum {
MCU_EXT_EVENT_ASSERT_DUMP = 0x23,
MCU_EXT_EVENT_RDD_REPORT = 0x3a,
MCU_EXT_EVENT_CSA_NOTIFY = 0x4f,
- MCU_EXT_EVENT_RATE_REPORT = 0x87,
+ MCU_EXT_EVENT_BCC_NOTIFY = 0x75,
};
enum {
@@ -164,41 +164,6 @@ struct mt7915_mcu_eeprom_info {
u8 data[16];
} __packed;
-struct mt7915_mcu_ra_info {
- struct mt7915_mcu_rxd rxd;
-
- __le32 event_id;
- __le16 wlan_idx;
- __le16 ru_idx;
- __le16 direction;
- __le16 dump_group;
-
- __le32 suggest_rate;
- __le32 min_rate; /* for dynamic sounding */
- __le32 max_rate; /* for dynamic sounding */
- __le32 init_rate_down_rate;
-
- __le16 curr_rate;
- __le16 init_rate_down_total;
- __le16 init_rate_down_succ;
- __le16 success;
- __le16 attempts;
-
- __le16 prev_rate;
- __le16 prob_up_rate;
- u8 no_rate_up_cnt;
- u8 ppdu_cnt;
- u8 gi;
-
- u8 try_up_fail;
- u8 try_up_total;
- u8 suggest_wf;
- u8 try_up_check;
- u8 prob_up_period;
- u8 prob_down_pending;
-} __packed;
-
-
struct mt7915_mcu_phy_rx_info {
u8 category;
u8 rate;
@@ -210,12 +175,6 @@ struct mt7915_mcu_phy_rx_info {
u8 bw;
};
-#define MT_RA_RATE_NSS GENMASK(8, 6)
-#define MT_RA_RATE_MCS GENMASK(3, 0)
-#define MT_RA_RATE_TX_MODE GENMASK(12, 9)
-#define MT_RA_RATE_DCM_EN BIT(4)
-#define MT_RA_RATE_BW GENMASK(14, 13)
-
struct mt7915_mcu_mib {
__le32 band;
__le32 offs;
@@ -270,6 +229,11 @@ enum {
MCU_S2D_H2CN
};
+enum {
+ MCU_FW_LOG_WM,
+ MCU_FW_LOG_WA,
+ MCU_FW_LOG_TO_HOST,
+};
#define __MCU_CMD_FIELD_ID GENMASK(7, 0)
#define __MCU_CMD_FIELD_EXT_ID GENMASK(15, 8)
@@ -312,15 +276,17 @@ enum {
MCU_EXT_CMD_MAC_INIT_CTRL = 0x46,
MCU_EXT_CMD_RX_HDR_TRANS = 0x47,
MCU_EXT_CMD_MUAR_UPDATE = 0x48,
+ MCU_EXT_CMD_RX_AIRTIME_CTRL = 0x4a,
MCU_EXT_CMD_SET_RX_PATH = 0x4e,
MCU_EXT_CMD_TX_POWER_FEATURE_CTRL = 0x58,
MCU_EXT_CMD_GET_MIB_INFO = 0x5a,
MCU_EXT_CMD_MWDS_SUPPORT = 0x80,
MCU_EXT_CMD_SET_SER_TRIGGER = 0x81,
MCU_EXT_CMD_SCS_CTRL = 0x82,
- MCU_EXT_CMD_RATE_CTRL = 0x87,
+ MCU_EXT_CMD_TWT_AGRT_UPDATE = 0x94,
MCU_EXT_CMD_FW_DBG_CTRL = 0x95,
MCU_EXT_CMD_SET_RDD_TH = 0x9d,
+ MCU_EXT_CMD_MURU_CTRL = 0x9f,
MCU_EXT_CMD_SET_SPR = 0xa8,
MCU_EXT_CMD_GROUP_PRE_CAL_INFO = 0xab,
MCU_EXT_CMD_DPD_PRE_CAL_INFO = 0xac,
@@ -328,6 +294,14 @@ enum {
};
enum {
+ MCU_TWT_AGRT_ADD,
+ MCU_TWT_AGRT_MODIFY,
+ MCU_TWT_AGRT_DELETE,
+ MCU_TWT_AGRT_TEARDOWN,
+ MCU_TWT_AGRT_GET_TSF,
+};
+
+enum {
MCU_WA_PARAM_CMD_QUERY,
MCU_WA_PARAM_CMD_SET,
MCU_WA_PARAM_CMD_CAPABILITY,
@@ -335,6 +309,8 @@ enum {
};
enum {
+ MCU_WA_PARAM_PDMA_RX = 0x04,
+ MCU_WA_PARAM_CPU_UTIL = 0x0b,
MCU_WA_PARAM_RED = 0x0e,
};
@@ -545,6 +521,14 @@ struct bss_info_hw_amsdu {
u8 rsv;
} __packed;
+struct bss_info_color {
+ __le16 tag;
+ __le16 len;
+ u8 disable;
+ u8 color;
+ u8 rsv[2];
+} __packed;
+
struct bss_info_he {
__le16 tag;
__le16 len;
@@ -563,14 +547,7 @@ struct bss_info_bcn {
__le16 sub_ntlv;
} __packed __aligned(4);
-struct bss_info_bcn_csa {
- __le16 tag;
- __le16 len;
- u8 cnt;
- u8 rsv[3];
-} __packed __aligned(4);
-
-struct bss_info_bcn_bcc {
+struct bss_info_bcn_cntdwn {
__le16 tag;
__le16 len;
u8 cnt;
@@ -716,6 +693,7 @@ struct wtbl_ba {
__le16 sn;
u8 ba_en;
u8 ba_winsize_idx;
+ /* originator & recipient */
__le16 ba_winsize;
/* recipient only */
u8 peer_addr[ETH_ALEN];
@@ -915,7 +893,7 @@ struct sta_rec_sec {
struct sec_key key[2];
} __packed;
-struct ra_phy {
+struct sta_phy {
u8 type;
u8 flag;
u8 stbc;
@@ -959,7 +937,7 @@ struct sta_rec_ra {
__le32 sta_cap;
- struct ra_phy phy;
+ struct sta_phy phy;
} __packed;
struct sta_rec_ra_fixed {
@@ -972,7 +950,7 @@ struct sta_rec_ra_fixed {
u8 op_vht_rx_nss;
u8 op_vht_rx_nss_type;
- struct ra_phy phy;
+ struct sta_phy phy;
u8 spe_en;
u8 short_preamble;
@@ -980,8 +958,14 @@ struct sta_rec_ra_fixed {
u8 mmps_mode;
} __packed;
-#define RATE_PARAM_FIXED 3
-#define RATE_PARAM_AUTO 20
+enum {
+ RATE_PARAM_FIXED = 3,
+ RATE_PARAM_FIXED_HE_LTF = 7,
+ RATE_PARAM_FIXED_MCS,
+ RATE_PARAM_FIXED_GI = 11,
+ RATE_PARAM_AUTO = 20,
+};
+
#define RATE_CFG_MCS GENMASK(3, 0)
#define RATE_CFG_NSS GENMASK(7, 4)
#define RATE_CFG_GI GENMASK(11, 8)
@@ -989,6 +973,7 @@ struct sta_rec_ra_fixed {
#define RATE_CFG_STBC GENMASK(19, 16)
#define RATE_CFG_LDPC GENMASK(23, 20)
#define RATE_CFG_PHY_TYPE GENMASK(27, 24)
+#define RATE_CFG_HE_LTF GENMASK(31, 28)
struct sta_rec_bf {
__le16 tag;
@@ -1002,8 +987,8 @@ struct sta_rec_bf {
u8 ndp_rate;
u8 rept_poll_rate;
u8 tx_mode; /* 0: legacy, 1: OFDM, 2: HT, 4: VHT ... */
- u8 nc;
- u8 nr;
+ u8 ncol;
+ u8 nrow;
u8 bw; /* 0: 20M, 1: 40M, 2: 80M, 3: 160M */
u8 mem_total;
@@ -1023,8 +1008,8 @@ struct sta_rec_bf {
u8 ibf_dbw;
u8 ibf_ncol;
u8 ibf_nrow;
- u8 nr_bw160;
- u8 nc_bw160;
+ u8 nrow_bw160;
+ u8 ncol_bw160;
u8 ru_start_idx;
u8 ru_end_idx;
@@ -1036,7 +1021,7 @@ struct sta_rec_bf {
bool codebook75_mu;
u8 he_ltf;
- u8 rsv[2];
+ u8 rsv[3];
} __packed;
struct sta_rec_bfee {
@@ -1116,16 +1101,21 @@ enum {
};
enum {
- MT_EBF = BIT(0), /* explicit beamforming */
- MT_IBF = BIT(1) /* implicit beamforming */
-};
-
-enum {
MT_BF_SOUNDING_ON = 1,
MT_BF_TYPE_UPDATE = 20,
MT_BF_MODULE_UPDATE = 25
};
+enum {
+ MURU_SET_ARB_OP_MODE = 14,
+ MURU_SET_PLATFORM_TYPE = 25,
+};
+
+enum {
+ MURU_PLATFORM_TYPE_PERF_LEVEL_1 = 1,
+ MURU_PLATFORM_TYPE_PERF_LEVEL_2,
+};
+
#define MT7915_WTBL_UPDATE_MAX_SIZE (sizeof(struct wtbl_req_hdr) + \
sizeof(struct wtbl_generic) + \
sizeof(struct wtbl_rx) + \
@@ -1137,12 +1127,15 @@ enum {
#define MT7915_STA_UPDATE_MAX_SIZE (sizeof(struct sta_req_hdr) + \
sizeof(struct sta_rec_basic) + \
+ sizeof(struct sta_rec_bf) + \
sizeof(struct sta_rec_ht) + \
sizeof(struct sta_rec_he) + \
sizeof(struct sta_rec_ba) + \
sizeof(struct sta_rec_vht) + \
sizeof(struct sta_rec_uapsd) + \
sizeof(struct sta_rec_amsdu) + \
+ sizeof(struct sta_rec_muru) + \
+ sizeof(struct sta_rec_bfee) + \
sizeof(struct tlv) + \
MT7915_WTBL_UPDATE_MAX_SIZE)
@@ -1157,8 +1150,7 @@ enum {
sizeof(struct bss_info_ext_bss))
#define MT7915_BEACON_UPDATE_SIZE (sizeof(struct sta_req_hdr) + \
- sizeof(struct bss_info_bcn_csa) + \
- sizeof(struct bss_info_bcn_bcc) + \
+ sizeof(struct bss_info_bcn_cntdwn) + \
sizeof(struct bss_info_bcn_mbss) + \
sizeof(struct bss_info_bcn_cont))
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
index af712a936ef6..1f6ba306c850 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mmio.c
@@ -34,6 +34,9 @@ static u32 __mt7915_reg_addr(struct mt7915_dev *dev, u32 addr)
u32 mapped;
u32 size;
} fixed_map[] = {
+ { 0x00400000, 0x80000, 0x10000 }, /* WF_MCU_SYSRAM */
+ { 0x00410000, 0x90000, 0x10000 }, /* WF_MCU_SYSRAM (configure regs) */
+ { 0x40000000, 0x70000, 0x10000 }, /* WF_UMAC_SYSRAM */
{ 0x54000000, 0x02000, 0x1000 }, /* WFDMA PCIE0 MCU DMA0 */
{ 0x55000000, 0x03000, 0x1000 }, /* WFDMA PCIE0 MCU DMA1 */
{ 0x58000000, 0x06000, 0x1000 }, /* WFDMA PCIE1 MCU DMA0 (MEM_DMA) */
@@ -92,8 +95,7 @@ static u32 __mt7915_reg_addr(struct mt7915_dev *dev, u32 addr)
}
if ((addr >= 0x18000000 && addr < 0x18c00000) ||
- (addr >= 0x70000000 && addr < 0x78000000) ||
- (addr >= 0x7c000000 && addr < 0x7c400000))
+ (addr >= 0x70000000 && addr < 0x78000000))
return mt7915_reg_map_l1(dev, addr);
return mt7915_reg_map_l2(dev, addr);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
index 3f613fae6218..e69b4c8974ee 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/mt7915.h
@@ -36,13 +36,14 @@
#define MT7915_CFEND_RATE_DEFAULT 0x49 /* OFDM 24M */
#define MT7915_CFEND_RATE_11B 0x03 /* 11B LP, 11M */
-#define MT7915_5G_RATE_DEFAULT 0x4b /* OFDM 6M */
-#define MT7915_2G_RATE_DEFAULT 0x0 /* CCK 1M */
#define MT7915_THERMAL_THROTTLE_MAX 100
#define MT7915_SKU_RATE_NUM 161
+#define MT7915_MAX_TWT_AGRT 16
+#define MT7915_MAX_STA_TWT_AGRT 8
+
struct mt7915_vif;
struct mt7915_sta;
struct mt7915_dfs_pulse;
@@ -64,35 +65,59 @@ enum mt7915_rxq_id {
MT7915_RXQ_MCU_WA_EXT,
};
-struct mt7915_sta_stats {
- struct rate_info prob_rate;
- struct rate_info tx_rate;
-
- unsigned long per;
- unsigned long changed;
- unsigned long jiffies;
-};
-
struct mt7915_sta_key_conf {
s8 keyidx;
u8 key[16];
};
+struct mt7915_twt_flow {
+ struct list_head list;
+ u64 start_tsf;
+ u64 tsf;
+ u32 duration;
+ u16 wcid;
+ __le16 mantissa;
+ u8 exp;
+ u8 table_id;
+ u8 id;
+ u8 protection:1;
+ u8 flowtype:1;
+ u8 trigger:1;
+ u8 sched:1;
+};
+
struct mt7915_sta {
struct mt76_wcid wcid; /* must be first */
struct mt7915_vif *vif;
- struct list_head stats_list;
struct list_head poll_list;
struct list_head rc_list;
u32 airtime_ac[8];
- struct mt7915_sta_stats stats;
-
+ unsigned long changed;
+ unsigned long jiffies;
unsigned long ampdu_state;
+ struct mt76_sta_stats stats;
+
struct mt7915_sta_key_conf bip;
+
+ struct {
+ u8 flowid_mask;
+ struct mt7915_twt_flow flow[MT7915_MAX_STA_TWT_AGRT];
+ } twt;
+};
+
+struct mt7915_vif_cap {
+ bool ldpc:1;
+ bool vht_su_ebfer:1;
+ bool vht_su_ebfee:1;
+ bool vht_mu_ebfer:1;
+ bool vht_mu_ebfee:1;
+ bool he_su_ebfer:1;
+ bool he_su_ebfee:1;
+ bool he_mu_ebfer:1;
};
struct mt7915_vif {
@@ -101,6 +126,7 @@ struct mt7915_vif {
u8 band_idx;
u8 wmm_idx;
+ struct mt7915_vif_cap cap;
struct mt7915_sta sta;
struct mt7915_phy *phy;
@@ -108,12 +134,58 @@ struct mt7915_vif {
struct cfg80211_bitrate_mask bitrate_mask;
};
+/* per-phy stats. */
struct mib_stats {
u32 ack_fail_cnt;
u32 fcs_err_cnt;
u32 rts_cnt;
u32 rts_retries_cnt;
u32 ba_miss_cnt;
+ u32 tx_bf_cnt;
+ u32 tx_mu_mpdu_cnt;
+ u32 tx_mu_acked_mpdu_cnt;
+ u32 tx_su_acked_mpdu_cnt;
+ u32 tx_bf_ibf_ppdu_cnt;
+ u32 tx_bf_ebf_ppdu_cnt;
+
+ u32 tx_bf_rx_fb_all_cnt;
+ u32 tx_bf_rx_fb_he_cnt;
+ u32 tx_bf_rx_fb_vht_cnt;
+ u32 tx_bf_rx_fb_ht_cnt;
+
+ u32 tx_bf_rx_fb_bw; /* value of last sample, not cumulative */
+ u32 tx_bf_rx_fb_nc_cnt;
+ u32 tx_bf_rx_fb_nr_cnt;
+ u32 tx_bf_fb_cpl_cnt;
+ u32 tx_bf_fb_trig_cnt;
+
+ u32 tx_ampdu_cnt;
+ u32 tx_stop_q_empty_cnt;
+ u32 tx_mpdu_attempts_cnt;
+ u32 tx_mpdu_success_cnt;
+ u32 tx_pkt_ebf_cnt;
+ u32 tx_pkt_ibf_cnt;
+
+ u32 tx_rwp_fail_cnt;
+ u32 tx_rwp_need_cnt;
+
+ /* rx stats */
+ u32 rx_fifo_full_cnt;
+ u32 channel_idle_cnt;
+ u32 rx_vector_mismatch_cnt;
+ u32 rx_delimiter_fail_cnt;
+ u32 rx_len_mismatch_cnt;
+ u32 rx_mpdu_cnt;
+ u32 rx_ampdu_cnt;
+ u32 rx_ampdu_bytes_cnt;
+ u32 rx_ampdu_valid_subframe_cnt;
+ u32 rx_ampdu_valid_subframe_bytes_cnt;
+ u32 rx_pfdrop_cnt;
+ u32 rx_vec_queue_overflow_drop_cnt;
+ u32 rx_ba_cnt;
+
+ u32 tx_amsdu[8];
+ u32 tx_amsdu_cnt;
};
struct mt7915_hif {
@@ -134,6 +206,7 @@ struct mt7915_phy {
struct thermal_cooling_device *cdev;
u8 throttle_state;
+ u32 throttle_temp[2]; /* 0: critical high, 1: maximum */
u32 rxfilter;
u64 omac_mask;
@@ -151,9 +224,6 @@ struct mt7915_phy {
struct mib_stats mib;
struct mt76_channel_state state_ts;
- struct list_head stats_list;
-
- u8 sta_work_count;
#ifdef CONFIG_NL80211_TESTMODE
struct {
@@ -193,16 +263,23 @@ struct mt7915_dev {
struct list_head sta_rc_list;
struct list_head sta_poll_list;
+ struct list_head twt_list;
spinlock_t sta_poll_lock;
u32 hw_pattern;
bool dbdc_support;
bool flash_mode;
- bool fw_debug;
bool ibf;
+ u8 fw_debug_wm;
+ u8 fw_debug_wa;
void *cal;
+
+ struct {
+ u8 table_mask;
+ u8 n_agrt;
+ } twt;
};
enum {
@@ -220,13 +297,17 @@ enum {
};
enum {
- MT_LMAC_AC00,
+ MT_CTX0,
+ MT_HIF0 = 0x0,
+
+ MT_LMAC_AC00 = 0x0,
MT_LMAC_AC01,
MT_LMAC_AC02,
MT_LMAC_AC03,
MT_LMAC_ALTX0 = 0x10,
MT_LMAC_BMC0,
MT_LMAC_BCN0,
+ MT_LMAC_PSMP0,
};
enum {
@@ -250,13 +331,6 @@ enum mt7915_rdd_cmd {
RDD_IRQ_OFF,
};
-enum {
- RATE_CTRL_RU_INFO,
- RATE_CTRL_FIXED_RATE_INFO,
- RATE_CTRL_DUMP_INFO,
- RATE_CTRL_MU_INFO,
-};
-
static inline struct mt7915_phy *
mt7915_hw_phy(struct ieee80211_hw *hw)
{
@@ -294,7 +368,7 @@ extern const struct ieee80211_ops mt7915_ops;
extern const struct mt76_testmode_ops mt7915_testmode_ops;
u32 mt7915_reg_map(struct mt7915_dev *dev, u32 addr);
-
+u64 __mt7915_get_tsf(struct ieee80211_hw *hw, struct mt7915_vif *mvif);
int mt7915_register_device(struct mt7915_dev *dev);
void mt7915_unregister_device(struct mt7915_dev *dev);
int mt7915_eeprom_init(struct mt7915_dev *dev);
@@ -307,14 +381,16 @@ int mt7915_dma_init(struct mt7915_dev *dev);
void mt7915_dma_prefetch(struct mt7915_dev *dev);
void mt7915_dma_cleanup(struct mt7915_dev *dev);
int mt7915_mcu_init(struct mt7915_dev *dev);
+int mt7915_mcu_twt_agrt_update(struct mt7915_dev *dev,
+ struct mt7915_vif *mvif,
+ struct mt7915_twt_flow *flow,
+ int cmd);
int mt7915_mcu_add_dev_info(struct mt7915_phy *phy,
struct ieee80211_vif *vif, bool enable);
int mt7915_mcu_add_bss_info(struct mt7915_phy *phy,
struct ieee80211_vif *vif, int enable);
int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, bool enable);
-int mt7915_mcu_add_sta_adv(struct mt7915_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, bool enable);
int mt7915_mcu_sta_update_hdr_trans(struct mt7915_dev *dev,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
@@ -327,22 +403,24 @@ int mt7915_mcu_add_rx_ba(struct mt7915_dev *dev,
int mt7915_mcu_add_key(struct mt7915_dev *dev, struct ieee80211_vif *vif,
struct mt7915_sta *msta, struct ieee80211_key_conf *key,
enum set_key_cmd cmd);
+int mt7915_mcu_update_bss_color(struct mt7915_dev *dev, struct ieee80211_vif *vif,
+ struct cfg80211_he_bss_color *he_bss_color);
int mt7915_mcu_add_beacon(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
int enable);
int mt7915_mcu_add_obss_spr(struct mt7915_dev *dev, struct ieee80211_vif *vif,
bool enable);
int mt7915_mcu_add_rate_ctrl(struct mt7915_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta);
-int mt7915_mcu_add_he(struct mt7915_dev *dev, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta);
+ struct ieee80211_sta *sta, bool changed);
int mt7915_mcu_add_smps(struct mt7915_dev *dev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
int mt7915_set_channel(struct mt7915_phy *phy);
int mt7915_mcu_set_chan_info(struct mt7915_phy *phy, int cmd);
int mt7915_mcu_set_tx(struct mt7915_dev *dev, struct ieee80211_vif *vif);
int mt7915_mcu_update_edca(struct mt7915_dev *dev, void *req);
-int mt7915_mcu_set_fixed_rate(struct mt7915_dev *dev,
- struct ieee80211_sta *sta, u32 rate);
+int mt7915_mcu_set_fixed_rate_ctrl(struct mt7915_dev *dev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ void *data, u32 field);
int mt7915_mcu_set_eeprom(struct mt7915_dev *dev);
int mt7915_mcu_get_eeprom(struct mt7915_dev *dev, u32 offset);
int mt7915_mcu_set_mac(struct mt7915_dev *dev, int band, bool enable,
@@ -362,17 +440,18 @@ int mt7915_mcu_set_pulse_th(struct mt7915_dev *dev,
const struct mt7915_dfs_pulse *pulse);
int mt7915_mcu_set_radar_th(struct mt7915_dev *dev, int index,
const struct mt7915_dfs_pattern *pattern);
+int mt7915_mcu_set_muru_ctrl(struct mt7915_dev *dev, u32 cmd, u32 val);
int mt7915_mcu_apply_group_cal(struct mt7915_dev *dev);
int mt7915_mcu_apply_tx_dpd(struct mt7915_phy *phy);
int mt7915_mcu_get_chan_mib_info(struct mt7915_phy *phy, bool chan_switch);
int mt7915_mcu_get_temperature(struct mt7915_phy *phy);
int mt7915_mcu_set_thermal_throttling(struct mt7915_phy *phy, u8 state);
-int mt7915_mcu_get_tx_rate(struct mt7915_dev *dev, u32 cmd, u16 wlan_idx);
int mt7915_mcu_get_rx_rate(struct mt7915_phy *phy, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct rate_info *rate);
int mt7915_mcu_rdd_cmd(struct mt7915_dev *dev, enum mt7915_rdd_cmd cmd,
u8 index, u8 rx_sel, u8 val);
-int mt7915_mcu_fw_log_2_host(struct mt7915_dev *dev, u8 ctrl);
+int mt7915_mcu_wa_cmd(struct mt7915_dev *dev, int cmd, u32 a1, u32 a2, u32 a3);
+int mt7915_mcu_fw_log_2_host(struct mt7915_dev *dev, u8 type, u8 ctrl);
int mt7915_mcu_fw_dbg_ctrl(struct mt7915_dev *dev, u32 module, u8 level);
void mt7915_mcu_rx_event(struct mt7915_dev *dev, struct sk_buff *skb);
void mt7915_mcu_exit(struct mt7915_dev *dev);
@@ -403,6 +482,7 @@ static inline void mt7915_irq_disable(struct mt7915_dev *dev, u32 mask)
mt76_set_irq_mask(&dev->mt76, MT_INT_MASK_CSR, mask, 0);
}
+u32 mt7915_mac_wtbl_lmac_addr(struct mt7915_dev *dev, u16 wcid, u8 dw);
bool mt7915_mac_wtbl_update(struct mt7915_dev *dev, int idx, u32 mask);
void mt7915_mac_reset_counters(struct mt7915_phy *phy);
void mt7915_mac_cca_stats_reset(struct mt7915_phy *phy);
@@ -418,7 +498,14 @@ void mt7915_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
void mt7915_mac_work(struct work_struct *work);
void mt7915_mac_reset_work(struct work_struct *work);
void mt7915_mac_sta_rc_work(struct work_struct *work);
+void mt7915_mac_update_stats(struct mt7915_phy *phy);
int mt7915_mmio_init(struct mt76_dev *mdev, void __iomem *mem_base, int irq);
+void mt7915_mac_twt_teardown_flow(struct mt7915_dev *dev,
+ struct mt7915_sta *msta,
+ u8 flowid);
+void mt7915_mac_add_twt_setup(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ struct ieee80211_twt_setup *twt);
int mt7915_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
enum mt76_txq_id qid, struct mt76_wcid *wcid,
struct ieee80211_sta *sta,
@@ -435,7 +522,7 @@ int mt7915_dfs_init_radar_detector(struct mt7915_phy *phy);
void mt7915_set_stream_he_caps(struct mt7915_phy *phy);
void mt7915_set_stream_vht_txbf_caps(struct mt7915_phy *phy);
void mt7915_update_channel(struct mt76_phy *mphy);
-int mt7915_init_debugfs(struct mt7915_dev *dev);
+int mt7915_init_debugfs(struct mt7915_phy *phy);
#ifdef CONFIG_MAC80211_DEBUGFS
void mt7915_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct dentry *dir);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/pci.c b/drivers/net/wireless/mediatek/mt76/mt7915/pci.c
index 340b364da5f0..0af4cdb897b7 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/pci.c
@@ -222,8 +222,7 @@ static int mt7915_pci_probe(struct pci_dev *pdev,
static const struct mt76_driver_ops drv_ops = {
/* txwi_size = txd size + txp size */
.txwi_size = MT_TXD_SIZE + sizeof(struct mt7915_txp),
- .drv_flags = MT_DRV_TXWI_NO_FREE | MT_DRV_HW_MGMT_TXQ |
- MT_DRV_AMSDU_OFFLOAD,
+ .drv_flags = MT_DRV_TXWI_NO_FREE | MT_DRV_HW_MGMT_TXQ,
.survey_flags = SURVEY_INFO_TIME_TX |
SURVEY_INFO_TIME_RX |
SURVEY_INFO_TIME_BSS_RX,
@@ -251,7 +250,7 @@ static int mt7915_pci_probe(struct pci_dev *pdev,
pci_set_master(pdev);
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (ret)
return ret;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
index a213b5cb82f8..59693535b098 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/regs.h
@@ -22,15 +22,22 @@
#define MT_PLE_BASE 0x8000
#define MT_PLE(ofs) (MT_PLE_BASE + (ofs))
-#define MT_PLE_FL_Q0_CTRL MT_PLE(0x1b0)
-#define MT_PLE_FL_Q1_CTRL MT_PLE(0x1b4)
-#define MT_PLE_FL_Q2_CTRL MT_PLE(0x1b8)
-#define MT_PLE_FL_Q3_CTRL MT_PLE(0x1bc)
-
-#define MT_PLE_AC_QEMPTY(ac, n) MT_PLE(0x300 + 0x10 * (ac) + \
+#define MT_FL_Q_EMPTY 0x0b0
+#define MT_FL_Q0_CTRL 0x1b0
+#define MT_FL_Q2_CTRL 0x1b8
+#define MT_FL_Q3_CTRL 0x1bc
+
+#define MT_PLE_FREEPG_CNT MT_PLE(0x100)
+#define MT_PLE_FREEPG_HEAD_TAIL MT_PLE(0x104)
+#define MT_PLE_PG_HIF_GROUP MT_PLE(0x110)
+#define MT_PLE_HIF_PG_INFO MT_PLE(0x114)
+#define MT_PLE_AC_QEMPTY(ac, n) MT_PLE(0x500 + 0x40 * (ac) + \
((n) << 2))
#define MT_PLE_AMSDU_PACK_MSDU_CNT(n) MT_PLE(0x10e0 + ((n) << 2))
+#define MT_PSE_BASE 0xc000
+#define MT_PSE(ofs) (MT_PSE_BASE + (ofs))
+
#define MT_MDP_BASE 0xf000
#define MT_MDP(ofs) (MT_MDP_BASE + (ofs))
@@ -57,6 +64,7 @@
#define MT_WF_TMAC(_band, ofs) (MT_WF_TMAC_BASE(_band) + (ofs))
#define MT_TMAC_TCR0(_band) MT_WF_TMAC(_band, 0)
+#define MT_TMAC_TCR0_TX_BLINK GENMASK(7, 6)
#define MT_TMAC_TCR0_TBTT_STOP_CTRL BIT(25)
#define MT_TMAC_CDTR(_band) MT_WF_TMAC(_band, 0x090)
@@ -72,11 +80,14 @@
#define MT_TMAC_TRCR0_I2T_CHK GENMASK(24, 16)
#define MT_TMAC_ICR0(_band) MT_WF_TMAC(_band, 0x0a4)
-#define MT_IFS_EIFS GENMASK(8, 0)
+#define MT_IFS_EIFS_OFDM GENMASK(8, 0)
#define MT_IFS_RIFS GENMASK(14, 10)
#define MT_IFS_SIFS GENMASK(22, 16)
#define MT_IFS_SLOT GENMASK(30, 24)
+#define MT_TMAC_ICR1(_band) MT_WF_TMAC(_band, 0x0b4)
+#define MT_IFS_EIFS_CCK GENMASK(8, 0)
+
#define MT_TMAC_CTCR0(_band) MT_WF_TMAC(_band, 0x0f4)
#define MT_TMAC_CTCR0_INS_DDLMT_REFTIME GENMASK(5, 0)
#define MT_TMAC_CTCR0_INS_DDLMT_EN BIT(17)
@@ -128,15 +139,120 @@
#define MT_LPON_TCR_SW_READ GENMASK(1, 0)
/* MIB: band 0(0x24800), band 1(0xa4800) */
+/* These counters are (mostly?) clear-on-read. So, some should not
+ * be read at all in case firmware is already reading them. These
+ * are commented with 'DNR' below. The DNR stats will be read by querying
+ * the firmware API for the appropriate message. For counters the driver
+ * does read, the driver should accumulate the counters.
+ */
#define MT_WF_MIB_BASE(_band) ((_band) ? 0xa4800 : 0x24800)
#define MT_WF_MIB(_band, ofs) (MT_WF_MIB_BASE(_band) + (ofs))
+#define MT_MIB_SDR0(_band) MT_WF_MIB(_band, 0x010)
+#define MT_MIB_SDR0_BERACON_TX_CNT_MASK GENMASK(15, 0)
+
#define MT_MIB_SDR3(_band) MT_WF_MIB(_band, 0x014)
#define MT_MIB_SDR3_FCS_ERR_MASK GENMASK(15, 0)
+#define MT_MIB_SDR4(_band) MT_WF_MIB(_band, 0x018)
+#define MT_MIB_SDR4_RX_FIFO_FULL_MASK GENMASK(15, 0)
+
+/* rx mpdu counter, full 32 bits */
+#define MT_MIB_SDR5(_band) MT_WF_MIB(_band, 0x01c)
+
+#define MT_MIB_SDR6(_band) MT_WF_MIB(_band, 0x020)
+#define MT_MIB_SDR6_CHANNEL_IDL_CNT_MASK GENMASK(15, 0)
+
+#define MT_MIB_SDR7(_band) MT_WF_MIB(_band, 0x024)
+#define MT_MIB_SDR7_RX_VECTOR_MISMATCH_CNT_MASK GENMASK(15, 0)
+
+#define MT_MIB_SDR8(_band) MT_WF_MIB(_band, 0x028)
+#define MT_MIB_SDR8_RX_DELIMITER_FAIL_CNT_MASK GENMASK(15, 0)
+
+/* aka CCA_NAV_TX_TIME */
+#define MT_MIB_SDR9_DNR(_band) MT_WF_MIB(_band, 0x02c)
+#define MT_MIB_SDR9_CCA_BUSY_TIME_MASK GENMASK(23, 0)
+
+#define MT_MIB_SDR10_DNR(_band) MT_WF_MIB(_band, 0x030)
+#define MT_MIB_SDR10_MRDY_COUNT_MASK GENMASK(25, 0)
+
+#define MT_MIB_SDR11(_band) MT_WF_MIB(_band, 0x034)
+#define MT_MIB_SDR11_RX_LEN_MISMATCH_CNT_MASK GENMASK(15, 0)
+
+/* tx ampdu cnt, full 32 bits */
+#define MT_MIB_SDR12(_band) MT_WF_MIB(_band, 0x038)
+
+#define MT_MIB_SDR13(_band) MT_WF_MIB(_band, 0x03c)
+#define MT_MIB_SDR13_TX_STOP_Q_EMPTY_CNT_MASK GENMASK(15, 0)
+
+/* counts all mpdus in ampdu, regardless of success */
+#define MT_MIB_SDR14(_band) MT_WF_MIB(_band, 0x040)
+#define MT_MIB_SDR14_TX_MPDU_ATTEMPTS_CNT_MASK GENMASK(23, 0)
+
+/* counts all successfully tx'd mpdus in ampdu */
+#define MT_MIB_SDR15(_band) MT_WF_MIB(_band, 0x044)
+#define MT_MIB_SDR15_TX_MPDU_SUCCESS_CNT_MASK GENMASK(23, 0)
+
+/* in units of 'us' */
+#define MT_MIB_SDR16_DNR(_band) MT_WF_MIB(_band, 0x048)
+#define MT_MIB_SDR16_PRIMARY_CCA_BUSY_TIME_MASK GENMASK(23, 0)
+
+#define MT_MIB_SDR17_DNR(_band) MT_WF_MIB(_band, 0x04c)
+#define MT_MIB_SDR17_SECONDARY_CCA_BUSY_TIME_MASK GENMASK(23, 0)
+
+#define MT_MIB_SDR18(_band) MT_WF_MIB(_band, 0x050)
+#define MT_MIB_SDR18_PRIMARY_ENERGY_DETECT_TIME_MASK GENMASK(23, 0)
+
+/* units are us */
+#define MT_MIB_SDR19_DNR(_band) MT_WF_MIB(_band, 0x054)
+#define MT_MIB_SDR19_CCK_MDRDY_TIME_MASK GENMASK(23, 0)
+
+#define MT_MIB_SDR20_DNR(_band) MT_WF_MIB(_band, 0x058)
+#define MT_MIB_SDR20_OFDM_VHT_MDRDY_TIME_MASK GENMASK(23, 0)
+
+#define MT_MIB_SDR21_DNR(_band) MT_WF_MIB(_band, 0x05c)
+#define MT_MIB_SDR20_GREEN_MDRDY_TIME_MASK GENMASK(23, 0)
+
+/* rx ampdu count, 32-bit */
+#define MT_MIB_SDR22(_band) MT_WF_MIB(_band, 0x060)
+
+/* rx ampdu bytes count, 32-bit */
+#define MT_MIB_SDR23(_band) MT_WF_MIB(_band, 0x064)
+
+/* rx ampdu valid subframe count */
+#define MT_MIB_SDR24(_band) MT_WF_MIB(_band, 0x068)
+#define MT_MIB_SDR24_RX_AMPDU_SF_CNT_MASK GENMASK(23, 0)
+
+/* rx ampdu valid subframe bytes count, 32bits */
+#define MT_MIB_SDR25(_band) MT_WF_MIB(_band, 0x06c)
+
+/* remaining windows protected stats */
+#define MT_MIB_SDR27(_band) MT_WF_MIB(_band, 0x074)
+#define MT_MIB_SDR27_TX_RWP_FAIL_CNT_MASK GENMASK(15, 0)
+
+#define MT_MIB_SDR28(_band) MT_WF_MIB(_band, 0x078)
+#define MT_MIB_SDR28_TX_RWP_NEED_CNT_MASK GENMASK(15, 0)
+
+#define MT_MIB_SDR29(_band) MT_WF_MIB(_band, 0x07c)
+#define MT_MIB_SDR29_RX_PFDROP_CNT_MASK GENMASK(7, 0)
+
+#define MT_MIB_SDR30(_band) MT_WF_MIB(_band, 0x080)
+#define MT_MIB_SDR30_RX_VEC_QUEUE_OVERFLOW_DROP_CNT_MASK GENMASK(15, 0)
+
+/* rx blockack count, 32 bits */
+#define MT_MIB_SDR31(_band) MT_WF_MIB(_band, 0x084)
+
+#define MT_MIB_SDR32(_band) MT_WF_MIB(_band, 0x088)
+#define MT_MIB_SDR32_TX_PKT_EBF_CNT_MASK GENMASK(15, 0)
+
+#define MT_MIB_SDR33(_band) MT_WF_MIB(_band, 0x08c)
+#define MT_MIB_SDR33_TX_PKT_IBF_CNT_MASK GENMASK(15, 0)
+
#define MT_MIB_SDR34(_band) MT_WF_MIB(_band, 0x090)
#define MT_MIB_MU_BF_TX_CNT GENMASK(15, 0)
+/* 36, 37 both DNR */
+
#define MT_MIB_DR8(_band) MT_WF_MIB(_band, 0x0c0)
#define MT_MIB_DR9(_band) MT_WF_MIB(_band, 0x0c4)
#define MT_MIB_DR11(_band) MT_WF_MIB(_band, 0x0cc)
@@ -248,7 +364,6 @@
#define MT_WF_RMAC_MIB_AIRTIME0(_band) MT_WF_RMAC(_band, 0x0380)
#define MT_WF_RMAC_MIB_RXTIME_CLR BIT(31)
-#define MT_WF_RMAC_MIB_RXTIME_EN BIT(30)
/* WFDMA0 */
#define MT_WFDMA0_BASE 0xd4000
@@ -413,6 +528,18 @@
#define MT_HIF_REMAP_L2_BASE GENMASK(31, 12)
#define MT_HIF_REMAP_BASE_L2 0x00000
+#define MT_DIC_CMD_REG_BASE 0x41f000
+#define MT_DIC_CMD_REG(ofs) (MT_DIC_CMD_REG_BASE + (ofs))
+#define MT_DIC_CMD_REG_CMD MT_DIC_CMD_REG(0x10)
+
+#define MT_CPU_UTIL_BASE 0x41f030
+#define MT_CPU_UTIL(ofs) (MT_CPU_UTIL_BASE + (ofs))
+#define MT_CPU_UTIL_BUSY_PCT MT_CPU_UTIL(0x00)
+#define MT_CPU_UTIL_PEAK_BUSY_PCT MT_CPU_UTIL(0x04)
+#define MT_CPU_UTIL_IDLE_CNT MT_CPU_UTIL(0x08)
+#define MT_CPU_UTIL_PEAK_IDLE_CNT MT_CPU_UTIL(0x0c)
+#define MT_CPU_UTIL_CTRL MT_CPU_UTIL(0x1c)
+
#define MT_SWDEF_BASE 0x41f200
#define MT_SWDEF(ofs) (MT_SWDEF_BASE + (ofs))
#define MT_SWDEF_MODE MT_SWDEF(0x3c)
@@ -420,6 +547,20 @@
#define MT_SWDEF_ICAP_MODE 1
#define MT_SWDEF_SPECTRUM_MODE 2
+#define MT_LED_TOP_BASE 0x18013000
+#define MT_LED_PHYS(_n) (MT_LED_TOP_BASE + (_n))
+
+#define MT_LED_CTRL(_n) MT_LED_PHYS(0x00 + ((_n) * 4))
+#define MT_LED_CTRL_KICK BIT(7)
+#define MT_LED_CTRL_BLINK_MODE BIT(2)
+#define MT_LED_CTRL_POLARITY BIT(1)
+
+#define MT_LED_TX_BLINK(_n) MT_LED_PHYS(0x10 + ((_n) * 4))
+#define MT_LED_TX_BLINK_ON_MASK GENMASK(7, 0)
+#define MT_LED_TX_BLINK_OFF_MASK GENMASK(15, 8)
+
+#define MT_LED_EN(_n) MT_LED_PHYS(0x40 + ((_n) * 4))
+
#define MT_TOP_BASE 0x18060000
#define MT_TOP(ofs) (MT_TOP_BASE + (ofs))
@@ -430,6 +571,10 @@
#define MT_TOP_MISC MT_TOP(0xf0)
#define MT_TOP_MISC_FW_STATE GENMASK(2, 0)
+#define MT_LED_GPIO_MUX2 0x70005058 /* GPIO 18 */
+#define MT_LED_GPIO_MUX3 0x7000505C /* GPIO 26 */
+#define MT_LED_GPIO_SEL_MASK GENMASK(11, 8)
+
#define MT_HW_BOUND 0x70010020
#define MT_HW_CHIPID 0x70010200
#define MT_HW_REV 0x70010204
@@ -457,4 +602,9 @@
#define MT_WF_PHY_RXTD12_IRPI_SW_CLR_ONLY BIT(18)
#define MT_WF_PHY_RXTD12_IRPI_SW_CLR BIT(29)
+#define MT_MCU_WM_CIRQ_BASE 0x89010000
+#define MT_MCU_WM_CIRQ(ofs) (MT_MCU_WM_CIRQ_BASE + (ofs))
+#define MT_MCU_WM_CIRQ_IRQ_MASK_CLR_ADDR MT_MCU_WM_CIRQ(0x80)
+#define MT_MCU_WM_CIRQ_IRQ_SOFT_ADDR MT_MCU_WM_CIRQ(0xc0)
+
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c b/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c
index b220b334906b..89aae323d29e 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/testmode.c
@@ -166,6 +166,22 @@ mt7915_tm_set_slot_time(struct mt7915_phy *phy, u8 slot_time, u8 sifs)
}
static int
+mt7915_tm_set_tam_arb(struct mt7915_phy *phy, bool enable, bool mu)
+{
+ struct mt7915_dev *dev = phy->dev;
+ u32 op_mode;
+
+ if (!enable)
+ op_mode = TAM_ARB_OP_MODE_NORMAL;
+ else if (mu)
+ op_mode = TAM_ARB_OP_MODE_TEST;
+ else
+ op_mode = TAM_ARB_OP_MODE_FORCE_SU;
+
+ return mt7915_mcu_set_muru_ctrl(dev, MURU_SET_ARB_OP_MODE, op_mode);
+}
+
+static int
mt7915_tm_set_wmm_qid(struct mt7915_dev *dev, u8 qid, u8 aifs, u8 cw_min,
u16 cw_max, u16 txop)
{
@@ -397,6 +413,10 @@ mt7915_tm_init(struct mt7915_phy *phy, bool en)
mt7915_tm_set_trx(phy, TM_MAC_TXRX, !en);
mt7915_mcu_add_bss_info(phy, phy->monitor_vif, en);
+ mt7915_mcu_add_sta(dev, phy->monitor_vif, NULL, en);
+
+ if (!en)
+ mt7915_tm_set_tam_arb(phy, en, 0);
}
static void
@@ -438,6 +458,9 @@ mt7915_tm_set_tx_frames(struct mt7915_phy *phy, bool en)
}
}
+ mt7915_tm_set_tam_arb(phy, en,
+ td->tx_rate_mode == MT76_TM_TX_MODE_HE_MU);
+
/* if all three params are set, duty_cycle will be ignored */
if (duty_cycle && tx_time && !ipg) {
ipg = tx_time * 100 / duty_cycle - tx_time;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/testmode.h b/drivers/net/wireless/mediatek/mt76/mt7915/testmode.h
index 397a6b5532bc..5573ac309363 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7915/testmode.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7915/testmode.h
@@ -96,4 +96,10 @@ enum {
RF_OPER_WIFI_SPECTRUM,
};
+enum {
+ TAM_ARB_OP_MODE_NORMAL = 1,
+ TAM_ARB_OP_MODE_TEST,
+ TAM_ARB_OP_MODE_FORCE_SU = 5,
+};
+
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/Kconfig b/drivers/net/wireless/mediatek/mt76/mt7921/Kconfig
index 001f2b9cec26..71154fc2a87c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/Kconfig
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/Kconfig
@@ -1,11 +1,26 @@
# SPDX-License-Identifier: ISC
-config MT7921E
- tristate "MediaTek MT7921E (PCIe) support"
+config MT7921_COMMON
+ tristate
select MT76_CONNAC_LIB
select WANT_DEV_COREDUMP
+
+config MT7921E
+ tristate "MediaTek MT7921E (PCIe) support"
+ select MT7921_COMMON
depends on MAC80211
depends on PCI
help
This adds support for MT7921E 802.11ax 2x2:2SS wireless devices.
To compile this driver as a module, choose M here.
+
+config MT7921S
+ tristate "MediaTek MT7921S (SDIO) support"
+ select MT76_SDIO
+ select MT7921_COMMON
+ depends on MAC80211
+ depends on MMC
+ help
+ This adds support for MT7921S 802.11ax 2x2:2SS wireless devices.
+
+ To compile this driver as a module, choose M here.
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/Makefile b/drivers/net/wireless/mediatek/mt76/mt7921/Makefile
index 0ebb59966a08..1187acedfeda 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/Makefile
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/Makefile
@@ -1,7 +1,12 @@
# SPDX-License-Identifier: ISC
+obj-$(CONFIG_MT7921_COMMON) += mt7921-common.o
obj-$(CONFIG_MT7921E) += mt7921e.o
+obj-$(CONFIG_MT7921S) += mt7921s.o
CFLAGS_trace.o := -I$(src)
-mt7921e-y := pci.o mac.o mcu.o dma.o eeprom.o main.o init.o debugfs.o trace.o
+mt7921-common-y := mac.o mcu.o main.o init.o debugfs.o trace.o
+mt7921-common-$(CONFIG_NL80211_TESTMODE) += testmode.o
+mt7921e-y := pci.o pci_mac.o pci_mcu.o dma.o
+mt7921s-y := sdio.o sdio_mac.o sdio_mcu.o
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c
index 77468bdae460..7cdfdf83529f 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/debugfs.c
@@ -5,6 +5,32 @@
#include "eeprom.h"
static int
+mt7921_reg_set(void *data, u64 val)
+{
+ struct mt7921_dev *dev = data;
+
+ mt7921_mutex_acquire(dev);
+ mt76_wr(dev, dev->mt76.debugfs_reg, val);
+ mt7921_mutex_release(dev);
+
+ return 0;
+}
+
+static int
+mt7921_reg_get(void *data, u64 *val)
+{
+ struct mt7921_dev *dev = data;
+
+ mt7921_mutex_acquire(dev);
+ *val = mt76_rr(dev, dev->mt76.debugfs_reg);
+ mt7921_mutex_release(dev);
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_regval, mt7921_reg_get, mt7921_reg_set,
+ "0x%08llx\n");
+static int
mt7921_fw_debug_set(void *data, u64 val)
{
struct mt7921_dev *dev = data;
@@ -42,6 +68,8 @@ mt7921_ampdu_stat_read_phy(struct mt7921_phy *phy,
if (!phy)
return;
+ mt7921_mac_update_mib_stats(phy);
+
/* Tx ampdu stat */
for (i = 0; i < ARRAY_SIZE(range); i++)
range[i] = mt76_rr(dev, MT_MIB_ARNG(0, i));
@@ -67,26 +95,27 @@ static int
mt7921_tx_stats_show(struct seq_file *file, void *data)
{
struct mt7921_dev *dev = file->private;
- int stat[8], i, n;
+ struct mt7921_phy *phy = &dev->phy;
+ struct mib_stats *mib = &phy->mib;
+ int i;
- mt7921_ampdu_stat_read_phy(&dev->phy, file);
+ mt7921_mutex_acquire(dev);
- /* Tx amsdu info */
- seq_puts(file, "Tx MSDU stat:\n");
- for (i = 0, n = 0; i < ARRAY_SIZE(stat); i++) {
- stat[i] = mt76_rr(dev, MT_PLE_AMSDU_PACK_MSDU_CNT(i));
- n += stat[i];
- }
+ mt7921_ampdu_stat_read_phy(phy, file);
- for (i = 0; i < ARRAY_SIZE(stat); i++) {
- seq_printf(file, "AMSDU pack count of %d MSDU in TXD: 0x%x ",
- i + 1, stat[i]);
- if (n != 0)
- seq_printf(file, "(%d%%)\n", stat[i] * 100 / n);
+ seq_puts(file, "Tx MSDU stat:\n");
+ for (i = 0; i < ARRAY_SIZE(mib->tx_amsdu); i++) {
+ seq_printf(file, "AMSDU pack count of %d MSDU in TXD: %8d ",
+ i + 1, mib->tx_amsdu[i]);
+ if (mib->tx_amsdu_cnt)
+ seq_printf(file, "(%3d%%)\n",
+ mib->tx_amsdu[i] * 100 / mib->tx_amsdu_cnt);
else
seq_puts(file, "\n");
}
+ mt7921_mutex_release(dev);
+
return 0;
}
@@ -98,6 +127,8 @@ mt7921_queues_acq(struct seq_file *s, void *data)
struct mt7921_dev *dev = dev_get_drvdata(s->private);
int i;
+ mt7921_mutex_acquire(dev);
+
for (i = 0; i < 16; i++) {
int j, acs = i / 4, index = i % 4;
u32 ctrl, val, qlen = 0;
@@ -117,6 +148,8 @@ mt7921_queues_acq(struct seq_file *s, void *data)
seq_printf(s, "AC%d%d: queued=%d\n", acs, index, qlen);
}
+ mt7921_mutex_release(dev);
+
return 0;
}
@@ -229,30 +262,38 @@ mt7921_txpwr(struct seq_file *s, void *data)
return 0;
}
+static void
+mt7921_pm_interface_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct mt7921_dev *dev = priv;
+
+ mt7921_mcu_set_beacon_filter(dev, vif, dev->pm.enable);
+}
+
static int
mt7921_pm_set(void *data, u64 val)
{
struct mt7921_dev *dev = data;
struct mt76_connac_pm *pm = &dev->pm;
- struct mt76_phy *mphy = dev->phy.mt76;
-
- if (val == pm->enable)
- return 0;
mt7921_mutex_acquire(dev);
+ if (val == pm->enable)
+ goto out;
+
if (!pm->enable) {
pm->stats.last_wake_event = jiffies;
pm->stats.last_doze_event = jiffies;
}
pm->enable = val;
- ieee80211_iterate_active_interfaces(mphy->hw,
+ ieee80211_iterate_active_interfaces(mt76_hw(dev),
IEEE80211_IFACE_ITER_RESUME_ALL,
- mt7921_pm_interface_iter, mphy->priv);
+ mt7921_pm_interface_iter, dev);
mt76_connac_mcu_set_deep_sleep(&dev->mt76, pm->ds_enable);
+out:
mt7921_mutex_release(dev);
return 0;
@@ -369,11 +410,25 @@ static int mt7921_chip_reset(void *data, u64 val)
DEFINE_DEBUGFS_ATTRIBUTE(fops_reset, NULL, mt7921_chip_reset, "%lld\n");
+static int
+mt7921s_sched_quota_read(struct seq_file *s, void *data)
+{
+ struct mt7921_dev *dev = dev_get_drvdata(s->private);
+ struct mt76_sdio *sdio = &dev->mt76.sdio;
+
+ seq_printf(s, "pse_data_quota\t%d\n", sdio->sched.pse_data_quota);
+ seq_printf(s, "ple_data_quota\t%d\n", sdio->sched.ple_data_quota);
+ seq_printf(s, "pse_mcu_quota\t%d\n", sdio->sched.pse_mcu_quota);
+ seq_printf(s, "sched_deficit\t%d\n", sdio->sched.deficit);
+
+ return 0;
+}
+
int mt7921_init_debugfs(struct mt7921_dev *dev)
{
struct dentry *dir;
- dir = mt76_register_debugfs(&dev->mt76);
+ dir = mt76_register_debugfs_fops(&dev->mphy, &fops_regval);
if (!dir)
return -ENOMEM;
@@ -392,6 +447,8 @@ int mt7921_init_debugfs(struct mt7921_dev *dev)
debugfs_create_devm_seqfile(dev->mt76.dev, "runtime_pm_stats", dir,
mt7921_pm_stats);
debugfs_create_file("deep-sleep", 0600, dir, dev, &fops_ds);
-
+ if (mt76_is_sdio(&dev->mt76))
+ debugfs_create_devm_seqfile(dev->mt76.dev, "sched-quota", dir,
+ mt7921s_sched_quota_read);
return 0;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/dma.c b/drivers/net/wireless/mediatek/mt76/mt7921/dma.c
index 7d7d43a5422f..cdff1fd52d93 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/dma.c
@@ -19,46 +19,6 @@ int mt7921_init_tx_queues(struct mt7921_phy *phy, int idx, int n_desc)
return 0;
}
-void mt7921_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
- struct sk_buff *skb)
-{
- struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
- __le32 *rxd = (__le32 *)skb->data;
- enum rx_pkt_type type;
- u16 flag;
-
- type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0]));
- flag = FIELD_GET(MT_RXD0_PKT_FLAG, le32_to_cpu(rxd[0]));
-
- if (type == PKT_TYPE_RX_EVENT && flag == 0x1)
- type = PKT_TYPE_NORMAL_MCU;
-
- switch (type) {
- case PKT_TYPE_TXRX_NOTIFY:
- mt7921_mac_tx_free(dev, skb);
- break;
- case PKT_TYPE_RX_EVENT:
- mt7921_mcu_rx_event(dev, skb);
- break;
- case PKT_TYPE_NORMAL_MCU:
- case PKT_TYPE_NORMAL:
- if (!mt7921_mac_fill_rx(dev, skb)) {
- mt76_rx(&dev->mt76, q, skb);
- return;
- }
- fallthrough;
- default:
- dev_kfree_skb(skb);
- break;
- }
-}
-
-void mt7921_tx_cleanup(struct mt7921_dev *dev)
-{
- mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], false);
- mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WA], false);
-}
-
static int mt7921_poll_tx(struct napi_struct *napi, int budget)
{
struct mt7921_dev *dev;
@@ -71,7 +31,7 @@ static int mt7921_poll_tx(struct napi_struct *napi, int budget)
return 0;
}
- mt7921_tx_cleanup(dev);
+ mt7921_mcu_tx_cleanup(dev);
if (napi_complete(napi))
mt7921_irq_enable(dev, MT_INT_TX_DONE_ALL);
mt76_connac_pm_unref(&dev->mphy, &dev->pm);
@@ -125,36 +85,37 @@ static u32 __mt7921_reg_addr(struct mt7921_dev *dev, u32 addr)
u32 mapped;
u32 size;
} fixed_map[] = {
- { 0x00400000, 0x80000, 0x10000}, /* WF_MCU_SYSRAM */
- { 0x00410000, 0x90000, 0x10000}, /* WF_MCU_SYSRAM (configure register) */
- { 0x40000000, 0x70000, 0x10000}, /* WF_UMAC_SYSRAM */
+ { 0x820d0000, 0x30000, 0x10000 }, /* WF_LMAC_TOP (WF_WTBLON) */
+ { 0x820ed000, 0x24800, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */
+ { 0x820e4000, 0x21000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */
+ { 0x820e7000, 0x21e00, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */
+ { 0x820eb000, 0x24200, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */
+ { 0x820e2000, 0x20800, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */
+ { 0x820e3000, 0x20c00, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */
+ { 0x820e5000, 0x21400, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */
+ { 0x00400000, 0x80000, 0x10000 }, /* WF_MCU_SYSRAM */
+ { 0x00410000, 0x90000, 0x10000 }, /* WF_MCU_SYSRAM (configure register) */
+ { 0x40000000, 0x70000, 0x10000 }, /* WF_UMAC_SYSRAM */
{ 0x54000000, 0x02000, 0x1000 }, /* WFDMA PCIE0 MCU DMA0 */
{ 0x55000000, 0x03000, 0x1000 }, /* WFDMA PCIE0 MCU DMA1 */
{ 0x58000000, 0x06000, 0x1000 }, /* WFDMA PCIE1 MCU DMA0 (MEM_DMA) */
{ 0x59000000, 0x07000, 0x1000 }, /* WFDMA PCIE1 MCU DMA1 */
{ 0x7c000000, 0xf0000, 0x10000 }, /* CONN_INFRA */
{ 0x7c020000, 0xd0000, 0x10000 }, /* CONN_INFRA, WFDMA */
- { 0x7c060000, 0xe0000, 0x10000}, /* CONN_INFRA, conn_host_csr_top */
+ { 0x7c060000, 0xe0000, 0x10000 }, /* CONN_INFRA, conn_host_csr_top */
{ 0x80020000, 0xb0000, 0x10000 }, /* WF_TOP_MISC_OFF */
{ 0x81020000, 0xc0000, 0x10000 }, /* WF_TOP_MISC_ON */
{ 0x820c0000, 0x08000, 0x4000 }, /* WF_UMAC_TOP (PLE) */
{ 0x820c8000, 0x0c000, 0x2000 }, /* WF_UMAC_TOP (PSE) */
- { 0x820cc000, 0x0e000, 0x2000 }, /* WF_UMAC_TOP (PP) */
+ { 0x820cc000, 0x0e000, 0x1000 }, /* WF_UMAC_TOP (PP) */
+ { 0x820cd000, 0x0f000, 0x1000 }, /* WF_MDP_TOP */
{ 0x820ce000, 0x21c00, 0x0200 }, /* WF_LMAC_TOP (WF_SEC) */
{ 0x820cf000, 0x22000, 0x1000 }, /* WF_LMAC_TOP (WF_PF) */
- { 0x820d0000, 0x30000, 0x10000 }, /* WF_LMAC_TOP (WF_WTBLON) */
{ 0x820e0000, 0x20000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_CFG) */
{ 0x820e1000, 0x20400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_TRB) */
- { 0x820e2000, 0x20800, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_AGG) */
- { 0x820e3000, 0x20c00, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_ARB) */
- { 0x820e4000, 0x21000, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_TMAC) */
- { 0x820e5000, 0x21400, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_RMAC) */
- { 0x820e7000, 0x21e00, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_DMA) */
{ 0x820e9000, 0x23400, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_WTBLOFF) */
{ 0x820ea000, 0x24000, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_ETBF) */
- { 0x820eb000, 0x24200, 0x0400 }, /* WF_LMAC_TOP BN0 (WF_LPON) */
{ 0x820ec000, 0x24600, 0x0200 }, /* WF_LMAC_TOP BN0 (WF_INT) */
- { 0x820ed000, 0x24800, 0x0800 }, /* WF_LMAC_TOP BN0 (WF_MIB) */
{ 0x820f0000, 0xa0000, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_CFG) */
{ 0x820f1000, 0xa0600, 0x0200 }, /* WF_LMAC_TOP BN1 (WF_TRB) */
{ 0x820f2000, 0xa0800, 0x0400 }, /* WF_LMAC_TOP BN1 (WF_AGG) */
@@ -306,7 +267,7 @@ static int mt7921_dma_reset(struct mt7921_dev *dev, bool force)
mt76_for_each_q_rx(&dev->mt76, i)
mt76_queue_reset(dev, &dev->mt76.q_rx[i]);
- mt76_tx_status_check(&dev->mt76, NULL, true);
+ mt76_tx_status_check(&dev->mt76, true);
return mt7921_dma_enable(dev);
}
@@ -383,6 +344,9 @@ int mt7921_dma_init(struct mt7921_dev *dev)
struct mt76_bus_ops *bus_ops;
int ret;
+ dev->phy.dev = dev;
+ dev->phy.mt76 = &dev->mt76.phy;
+ dev->mt76.phy.priv = &dev->phy;
dev->bus_ops = dev->mt76.bus;
bus_ops = devm_kmemdup(dev->mt76.dev, dev->bus_ops, sizeof(*bus_ops),
GFP_KERNEL);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7921/eeprom.c
deleted file mode 100644
index 691d14a1a7bf..000000000000
--- a/drivers/net/wireless/mediatek/mt76/mt7921/eeprom.c
+++ /dev/null
@@ -1,100 +0,0 @@
-// SPDX-License-Identifier: ISC
-/* Copyright (C) 2020 MediaTek Inc. */
-
-#include "mt7921.h"
-#include "eeprom.h"
-
-static u32 mt7921_eeprom_read(struct mt7921_dev *dev, u32 offset)
-{
- u8 *data = dev->mt76.eeprom.data;
-
- if (data[offset] == 0xff)
- mt7921_mcu_get_eeprom(dev, offset);
-
- return data[offset];
-}
-
-static int mt7921_eeprom_load(struct mt7921_dev *dev)
-{
- int ret;
-
- ret = mt76_eeprom_init(&dev->mt76, MT7921_EEPROM_SIZE);
- if (ret < 0)
- return ret;
-
- memset(dev->mt76.eeprom.data, -1, MT7921_EEPROM_SIZE);
-
- return 0;
-}
-
-static int mt7921_check_eeprom(struct mt7921_dev *dev)
-{
- u8 *eeprom = dev->mt76.eeprom.data;
- u16 val;
-
- mt7921_eeprom_read(dev, MT_EE_CHIP_ID);
- val = get_unaligned_le16(eeprom);
-
- switch (val) {
- case 0x7961:
- return 0;
- default:
- return -EINVAL;
- }
-}
-
-void mt7921_eeprom_parse_band_config(struct mt7921_phy *phy)
-{
- struct mt7921_dev *dev = phy->dev;
- u32 val;
-
- val = mt7921_eeprom_read(dev, MT_EE_WIFI_CONF);
- val = FIELD_GET(MT_EE_WIFI_CONF_BAND_SEL, val);
-
- switch (val) {
- case MT_EE_5GHZ:
- phy->mt76->cap.has_5ghz = true;
- break;
- case MT_EE_2GHZ:
- phy->mt76->cap.has_2ghz = true;
- break;
- default:
- phy->mt76->cap.has_2ghz = true;
- phy->mt76->cap.has_5ghz = true;
- break;
- }
-}
-
-static void mt7921_eeprom_parse_hw_cap(struct mt7921_dev *dev)
-{
- u8 tx_mask;
-
- mt7921_eeprom_parse_band_config(&dev->phy);
-
- /* TODO: read NSS with MCU_CMD_NIC_CAPV2 */
- tx_mask = 2;
- dev->chainmask = BIT(tx_mask) - 1;
- dev->mphy.antenna_mask = dev->chainmask;
- dev->mphy.chainmask = dev->mphy.antenna_mask;
-}
-
-int mt7921_eeprom_init(struct mt7921_dev *dev)
-{
- int ret;
-
- ret = mt7921_eeprom_load(dev);
- if (ret < 0)
- return ret;
-
- ret = mt7921_check_eeprom(dev);
- if (ret)
- return ret;
-
- mt7921_eeprom_parse_hw_cap(dev);
- memcpy(dev->mphy.macaddr, dev->mt76.eeprom.data + MT_EE_MAC_ADDR,
- ETH_ALEN);
-
- mt76_eeprom_override(&dev->mphy);
-
- return 0;
-}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/init.c b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
index a9ce10b98827..210998f086ab 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/init.c
@@ -41,7 +41,7 @@ mt7921_regd_notifier(struct wiphy *wiphy,
mt7921_mutex_release(dev);
}
-static void
+static int
mt7921_init_wiphy(struct ieee80211_hw *hw)
{
struct mt7921_phy *phy = mt7921_hw_phy(hw);
@@ -62,7 +62,8 @@ mt7921_init_wiphy(struct ieee80211_hw *hw)
hw->vif_data_size = sizeof(struct mt7921_vif);
wiphy->iface_combinations = if_comb;
- wiphy->flags &= ~WIPHY_FLAG_IBSS_RSN;
+ wiphy->flags &= ~(WIPHY_FLAG_IBSS_RSN | WIPHY_FLAG_4ADDR_AP |
+ WIPHY_FLAG_4ADDR_STATION);
wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
wiphy->max_scan_ie_len = MT76_CONNAC_SCAN_IE_LEN;
@@ -75,6 +76,14 @@ mt7921_init_wiphy(struct ieee80211_hw *hw)
wiphy->max_sched_scan_reqs = 1;
wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
wiphy->reg_notifier = mt7921_regd_notifier;
+ wiphy->sar_capa = &mt76_sar_capa;
+
+ phy->mt76->frp = devm_kcalloc(dev->mt76.dev,
+ wiphy->sar_capa->num_freq_ranges,
+ sizeof(struct mt76_freq_range_power),
+ GFP_KERNEL);
+ if (!phy->mt76->frp)
+ return -ENOMEM;
wiphy->features |= NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
@@ -92,6 +101,8 @@ mt7921_init_wiphy(struct ieee80211_hw *hw)
ieee80211_hw_set(hw, CONNECTION_MONITOR);
hw->max_tx_fragments = 4;
+
+ return 0;
}
static void
@@ -106,6 +117,10 @@ mt7921_mac_init_band(struct mt7921_dev *dev, u8 band)
mt76_set(dev, MT_WF_RMAC_MIB_TIME0(band), MT_WF_RMAC_MIB_RXTIME_EN);
mt76_set(dev, MT_WF_RMAC_MIB_AIRTIME0(band), MT_WF_RMAC_MIB_RXTIME_EN);
+ /* enable MIB tx-rx time reporting */
+ mt76_set(dev, MT_MIB_SCR1(band), MT_MIB_TXDUR_EN);
+ mt76_set(dev, MT_MIB_SCR1(band), MT_MIB_RXDUR_EN);
+
mt76_rmw_field(dev, MT_DMA_DCR0(band), MT_DMA_DCR0_MAX_RX_LEN, 1536);
/* disable rx rate report by default due to hw issues */
mt76_clear(dev, MT_DMA_DCR0(band), MT_DMA_DCR0_RXD_G5_EN);
@@ -127,35 +142,53 @@ int mt7921_mac_init(struct mt7921_dev *dev)
for (i = 0; i < 2; i++)
mt7921_mac_init_band(dev, i);
+ dev->mt76.rxfilter = mt76_rr(dev, MT_WF_RFCR(0));
+
return mt76_connac_mcu_set_rts_thresh(&dev->mt76, 0x92b, 0);
}
+EXPORT_SYMBOL_GPL(mt7921_mac_init);
-static int mt7921_init_hardware(struct mt7921_dev *dev)
+static int __mt7921_init_hardware(struct mt7921_dev *dev)
{
- int ret, idx;
-
- ret = mt7921_dma_init(dev);
- if (ret)
- return ret;
-
- set_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
+ int ret;
/* force firmware operation mode into normal state,
* which should be set before firmware download stage.
*/
mt76_wr(dev, MT_SWDEF_MODE, MT_SWDEF_NORMAL_MODE);
-
ret = mt7921_mcu_init(dev);
if (ret)
- return ret;
+ goto out;
- ret = mt7921_eeprom_init(dev);
- if (ret < 0)
- return ret;
+ mt76_eeprom_override(&dev->mphy);
ret = mt7921_mcu_set_eeprom(dev);
if (ret)
+ goto out;
+
+ ret = mt7921_mac_init(dev);
+out:
+ return ret;
+}
+
+static int mt7921_init_hardware(struct mt7921_dev *dev)
+{
+ int ret, idx, i;
+
+ set_bit(MT76_STATE_INITIALIZED, &dev->mphy.state);
+
+ for (i = 0; i < MT7921_MCU_INIT_RETRY_COUNT; i++) {
+ ret = __mt7921_init_hardware(dev);
+ if (!ret)
+ break;
+
+ mt7921_init_reset(dev);
+ }
+
+ if (i == MT7921_MCU_INIT_RETRY_COUNT) {
+ dev_err(dev->mt76.dev, "hardware init failed\n");
return ret;
+ }
/* Beacon and mgmt frames should occupy wcid 0 */
idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT7921_WTBL_STA - 1);
@@ -167,7 +200,7 @@ static int mt7921_init_hardware(struct mt7921_dev *dev)
dev->mt76.global_wcid.tx_info |= MT_WCID_TX_INFO_SET;
rcu_assign_pointer(dev->mt76.wcid[idx], &dev->mt76.global_wcid);
- return mt7921_mac_init(dev);
+ return 0;
}
int mt7921_register_device(struct mt7921_dev *dev)
@@ -185,8 +218,9 @@ int mt7921_register_device(struct mt7921_dev *dev)
spin_lock_init(&dev->pm.wake.lock);
mutex_init(&dev->pm.mutex);
init_waitqueue_head(&dev->pm.wait);
+ if (mt76_is_sdio(&dev->mt76))
+ init_waitqueue_head(&dev->mt76.sdio.wait);
spin_lock_init(&dev->pm.txq_lock);
- INIT_LIST_HEAD(&dev->phy.stats_list);
INIT_DELAYED_WORK(&dev->mphy.mac_work, mt7921_mac_work);
INIT_DELAYED_WORK(&dev->phy.scan_work, mt7921_scan_work);
INIT_DELAYED_WORK(&dev->coredump.work, mt7921_coredump_work);
@@ -200,14 +234,24 @@ int mt7921_register_device(struct mt7921_dev *dev)
dev->pm.idle_timeout = MT7921_PM_TIMEOUT;
dev->pm.stats.last_wake_event = jiffies;
dev->pm.stats.last_doze_event = jiffies;
- dev->pm.enable = true;
- dev->pm.ds_enable = true;
+
+ /* TODO: mt7921s run sleep mode on default */
+ if (mt76_is_mmio(&dev->mt76)) {
+ dev->pm.enable = true;
+ dev->pm.ds_enable = true;
+ }
+
+ if (mt76_is_sdio(&dev->mt76))
+ hw->extra_tx_headroom += MT_SDIO_TXD_SIZE + MT_SDIO_HDR_SIZE;
ret = mt7921_init_hardware(dev);
if (ret)
return ret;
- mt7921_init_wiphy(hw);
+ ret = mt7921_init_wiphy(hw);
+ if (ret)
+ return ret;
+
dev->mphy.sband_2g.sband.ht_cap.cap |=
IEEE80211_HT_CAP_LDPC_CODING |
IEEE80211_HT_CAP_MAX_AMSDU;
@@ -244,14 +288,4 @@ int mt7921_register_device(struct mt7921_dev *dev)
return 0;
}
-
-void mt7921_unregister_device(struct mt7921_dev *dev)
-{
- mt76_unregister_device(&dev->mt76);
- mt7921_tx_token_put(dev);
- mt7921_dma_cleanup(dev);
- mt7921_mcu_exit(dev);
-
- tasklet_disable(&dev->irq_tasklet);
- mt76_free_device(&dev->mt76);
-}
+EXPORT_SYMBOL_GPL(mt7921_register_device);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
index 7fe2e3a50428..db3302b1576a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c
@@ -39,6 +39,7 @@ static struct mt76_wcid *mt7921_rx_get_wcid(struct mt7921_dev *dev,
void mt7921_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
{
}
+EXPORT_SYMBOL_GPL(mt7921_sta_ps);
bool mt7921_mac_wtbl_update(struct mt7921_dev *dev, int idx, u32 mask)
{
@@ -49,7 +50,7 @@ bool mt7921_mac_wtbl_update(struct mt7921_dev *dev, int idx, u32 mask)
0, 5000);
}
-static void mt7921_mac_sta_poll(struct mt7921_dev *dev)
+void mt7921_mac_sta_poll(struct mt7921_dev *dev)
{
static const u8 ac_to_tid[] = {
[IEEE80211_AC_BE] = 0,
@@ -61,18 +62,18 @@ static void mt7921_mac_sta_poll(struct mt7921_dev *dev)
struct mt7921_sta *msta;
u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS];
LIST_HEAD(sta_poll_list);
+ struct rate_info *rate;
int i;
spin_lock_bh(&dev->sta_poll_lock);
list_splice_init(&dev->sta_poll_list, &sta_poll_list);
spin_unlock_bh(&dev->sta_poll_lock);
- rcu_read_lock();
-
while (true) {
bool clear = false;
- u32 addr;
+ u32 addr, val;
u16 idx;
+ u8 bw;
spin_lock_bh(&dev->sta_poll_lock);
if (list_empty(&sta_poll_list)) {
@@ -85,7 +86,7 @@ static void mt7921_mac_sta_poll(struct mt7921_dev *dev)
spin_unlock_bh(&dev->sta_poll_lock);
idx = msta->wcid.idx;
- addr = MT_WTBL_LMAC_OFFS(idx, 0) + 20 * 4;
+ addr = mt7921_mac_wtbl_lmac_addr(idx, MT_WTBL_AC0_CTT_OFFSET);
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
u32 tx_last = msta->airtime_ac[i];
@@ -126,10 +127,46 @@ static void mt7921_mac_sta_poll(struct mt7921_dev *dev)
ieee80211_sta_register_airtime(sta, tid, tx_cur,
rx_cur);
}
- }
- rcu_read_unlock();
+ /* We don't support reading GI info from txs packets.
+ * For accurate tx status reporting and AQL improvement,
+ * we need to make sure that flags match so polling GI
+ * from per-sta counters directly.
+ */
+ rate = &msta->wcid.rate;
+ addr = mt7921_mac_wtbl_lmac_addr(idx,
+ MT_WTBL_TXRX_CAP_RATE_OFFSET);
+ val = mt76_rr(dev, addr);
+
+ switch (rate->bw) {
+ case RATE_INFO_BW_160:
+ bw = IEEE80211_STA_RX_BW_160;
+ break;
+ case RATE_INFO_BW_80:
+ bw = IEEE80211_STA_RX_BW_80;
+ break;
+ case RATE_INFO_BW_40:
+ bw = IEEE80211_STA_RX_BW_40;
+ break;
+ default:
+ bw = IEEE80211_STA_RX_BW_20;
+ break;
+ }
+
+ if (rate->flags & RATE_INFO_FLAGS_HE_MCS) {
+ u8 offs = MT_WTBL_TXRX_RATE_G2_HE + 2 * bw;
+
+ rate->he_gi = (val & (0x3 << offs)) >> offs;
+ } else if (rate->flags &
+ (RATE_INFO_FLAGS_VHT_MCS | RATE_INFO_FLAGS_MCS)) {
+ if (val & BIT(MT_WTBL_TXRX_RATE_G2 + bw))
+ rate->flags |= RATE_INFO_FLAGS_SHORT_GI;
+ else
+ rate->flags &= ~RATE_INFO_FLAGS_SHORT_GI;
+ }
+ }
}
+EXPORT_SYMBOL_GPL(mt7921_mac_sta_poll);
static void
mt7921_mac_decode_he_radiotap_ru(struct mt76_rx_status *status,
@@ -181,11 +218,55 @@ mt7921_mac_decode_he_radiotap_ru(struct mt76_rx_status *status,
}
static void
+mt7921_mac_decode_he_mu_radiotap(struct sk_buff *skb,
+ struct mt76_rx_status *status,
+ __le32 *rxv)
+{
+ static const struct ieee80211_radiotap_he_mu mu_known = {
+ .flags1 = HE_BITS(MU_FLAGS1_SIG_B_MCS_KNOWN) |
+ HE_BITS(MU_FLAGS1_SIG_B_DCM_KNOWN) |
+ HE_BITS(MU_FLAGS1_CH1_RU_KNOWN) |
+ HE_BITS(MU_FLAGS1_SIG_B_SYMS_USERS_KNOWN) |
+ HE_BITS(MU_FLAGS1_SIG_B_COMP_KNOWN),
+ .flags2 = HE_BITS(MU_FLAGS2_BW_FROM_SIG_A_BW_KNOWN) |
+ HE_BITS(MU_FLAGS2_PUNC_FROM_SIG_A_BW_KNOWN),
+ };
+ struct ieee80211_radiotap_he_mu *he_mu;
+
+ he_mu = skb_push(skb, sizeof(mu_known));
+ memcpy(he_mu, &mu_known, sizeof(mu_known));
+
+#define MU_PREP(f, v) le16_encode_bits(v, IEEE80211_RADIOTAP_HE_MU_##f)
+
+ he_mu->flags1 |= MU_PREP(FLAGS1_SIG_B_MCS, status->rate_idx);
+ if (status->he_dcm)
+ he_mu->flags1 |= MU_PREP(FLAGS1_SIG_B_DCM, status->he_dcm);
+
+ he_mu->flags2 |= MU_PREP(FLAGS2_BW_FROM_SIG_A_BW, status->bw) |
+ MU_PREP(FLAGS2_SIG_B_SYMS_USERS,
+ le32_get_bits(rxv[2], MT_CRXV_HE_NUM_USER));
+
+ he_mu->ru_ch1[0] = FIELD_GET(MT_CRXV_HE_RU0, le32_to_cpu(rxv[3]));
+
+ if (status->bw >= RATE_INFO_BW_40) {
+ he_mu->flags1 |= HE_BITS(MU_FLAGS1_CH2_RU_KNOWN);
+ he_mu->ru_ch2[0] =
+ FIELD_GET(MT_CRXV_HE_RU1, le32_to_cpu(rxv[3]));
+ }
+
+ if (status->bw >= RATE_INFO_BW_80) {
+ he_mu->ru_ch1[1] =
+ FIELD_GET(MT_CRXV_HE_RU2, le32_to_cpu(rxv[3]));
+ he_mu->ru_ch2[1] =
+ FIELD_GET(MT_CRXV_HE_RU3, le32_to_cpu(rxv[3]));
+ }
+}
+
+static void
mt7921_mac_decode_he_radiotap(struct sk_buff *skb,
struct mt76_rx_status *status,
__le32 *rxv, u32 phy)
{
- /* TODO: struct ieee80211_radiotap_he_mu */
static const struct ieee80211_radiotap_he known = {
.data1 = HE_BITS(DATA1_DATA_MCS_KNOWN) |
HE_BITS(DATA1_DATA_DCM_KNOWN) |
@@ -193,6 +274,7 @@ mt7921_mac_decode_he_radiotap(struct sk_buff *skb,
HE_BITS(DATA1_CODING_KNOWN) |
HE_BITS(DATA1_LDPC_XSYMSEG_KNOWN) |
HE_BITS(DATA1_DOPPLER_KNOWN) |
+ HE_BITS(DATA1_SPTL_REUSE_KNOWN) |
HE_BITS(DATA1_BSS_COLOR_KNOWN),
.data2 = HE_BITS(DATA2_GI_KNOWN) |
HE_BITS(DATA2_TXBF_KNOWN) |
@@ -207,9 +289,12 @@ mt7921_mac_decode_he_radiotap(struct sk_buff *skb,
he->data3 = HE_PREP(DATA3_BSS_COLOR, BSS_COLOR, rxv[14]) |
HE_PREP(DATA3_LDPC_XSYMSEG, LDPC_EXT_SYM, rxv[2]);
+ he->data4 = HE_PREP(DATA4_SU_MU_SPTL_REUSE, SR_MASK, rxv[11]);
he->data5 = HE_PREP(DATA5_PE_DISAMBIG, PE_DISAMBIG, rxv[2]) |
le16_encode_bits(ltf_size,
IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE);
+ if (le32_to_cpu(rxv[0]) & MT_PRXV_TXBF)
+ he->data5 |= HE_BITS(DATA5_TXBF);
he->data6 = HE_PREP(DATA6_TXOP, TXOP_DUR, rxv[14]) |
HE_PREP(DATA6_DOPPLER, DOPPLER, rxv[14]);
@@ -217,8 +302,7 @@ mt7921_mac_decode_he_radiotap(struct sk_buff *skb,
case MT_PHY_TYPE_HE_SU:
he->data1 |= HE_BITS(DATA1_FORMAT_SU) |
HE_BITS(DATA1_UL_DL_KNOWN) |
- HE_BITS(DATA1_BEAM_CHANGE_KNOWN) |
- HE_BITS(DATA1_SPTL_REUSE_KNOWN);
+ HE_BITS(DATA1_BEAM_CHANGE_KNOWN);
he->data3 |= HE_PREP(DATA3_BEAM_CHANGE, BEAM_CHNG, rxv[14]) |
HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
@@ -232,17 +316,15 @@ mt7921_mac_decode_he_radiotap(struct sk_buff *skb,
break;
case MT_PHY_TYPE_HE_MU:
he->data1 |= HE_BITS(DATA1_FORMAT_MU) |
- HE_BITS(DATA1_UL_DL_KNOWN) |
- HE_BITS(DATA1_SPTL_REUSE_KNOWN);
+ HE_BITS(DATA1_UL_DL_KNOWN);
he->data3 |= HE_PREP(DATA3_UL_DL, UPLINK, rxv[2]);
- he->data4 |= HE_PREP(DATA4_SU_MU_SPTL_REUSE, SR_MASK, rxv[11]);
+ he->data4 |= HE_PREP(DATA4_MU_STA_ID, MU_AID, rxv[7]);
mt7921_mac_decode_he_radiotap_ru(status, he, rxv);
break;
case MT_PHY_TYPE_HE_TB:
he->data1 |= HE_BITS(DATA1_FORMAT_TRIG) |
- HE_BITS(DATA1_SPTL_REUSE_KNOWN) |
HE_BITS(DATA1_SPTL_REUSE2_KNOWN) |
HE_BITS(DATA1_SPTL_REUSE3_KNOWN) |
HE_BITS(DATA1_SPTL_REUSE4_KNOWN);
@@ -271,7 +353,14 @@ mt7921_get_status_freq_info(struct mt7921_dev *dev, struct mt76_phy *mphy,
return;
}
- status->band = chfreq <= 14 ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ;
+ if (chfreq > 180) {
+ status->band = NL80211_BAND_6GHZ;
+ chfreq = (chfreq - 181) * 4 + 1;
+ } else if (chfreq > 14) {
+ status->band = NL80211_BAND_5GHZ;
+ } else {
+ status->band = NL80211_BAND_2GHZ;
+ }
status->freq = ieee80211_channel_to_frequency(chfreq, status->band);
}
@@ -306,7 +395,8 @@ mt7921_mac_assoc_rssi(struct mt7921_dev *dev, struct sk_buff *skb)
mt7921_mac_rssi_iter, skb);
}
-int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
+static int
+mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
{
u32 csum_mask = MT_RXD0_NORMAL_IP_SUM | MT_RXD0_NORMAL_UDP_TCP_SUM;
struct mt76_rx_status *status = (struct mt76_rx_status *)skb->cb;
@@ -356,10 +446,17 @@ int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
mt7921_get_status_freq_info(dev, mphy, status, chfreq);
- if (status->band == NL80211_BAND_5GHZ)
+ switch (status->band) {
+ case NL80211_BAND_5GHZ:
sband = &mphy->sband_5g.sband;
- else
+ break;
+ case NL80211_BAND_6GHZ:
+ sband = &mphy->sband_6g.sband;
+ break;
+ default:
sband = &mphy->sband_2g.sband;
+ break;
+ }
if (!sband->channels)
return -EINVAL;
@@ -606,9 +703,13 @@ int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb)
mt7921_mac_assoc_rssi(dev, skb);
- if (rxv && status->flag & RX_FLAG_RADIOTAP_HE)
+ if (rxv && status->flag & RX_FLAG_RADIOTAP_HE) {
mt7921_mac_decode_he_radiotap(skb, status, rxv, mode);
+ if (status->flag & RX_FLAG_RADIOTAP_HE_MU)
+ mt7921_mac_decode_he_mu_radiotap(skb, status, rxv);
+ }
+
if (!status->wcid || !ieee80211_is_data_qos(fc))
return 0;
@@ -702,7 +803,8 @@ mt7921_mac_write_txwi_80211(struct mt7921_dev *dev, __le32 *txwi,
txwi[3] &= ~cpu_to_le32(MT_TXD3_PROTECT_FRAME);
}
- if (!ieee80211_is_data(fc) || multicast)
+ if (!ieee80211_is_data(fc) || multicast ||
+ info->flags & IEEE80211_TX_CTL_USE_MINRATE)
val |= MT_TXD2_FIX_RATE;
txwi[2] |= cpu_to_le32(val);
@@ -732,31 +834,17 @@ mt7921_mac_write_txwi_80211(struct mt7921_dev *dev, __le32 *txwi,
txwi[7] |= cpu_to_le32(val);
}
-static void mt7921_update_txs(struct mt76_wcid *wcid, __le32 *txwi)
-{
- struct mt7921_sta *msta = container_of(wcid, struct mt7921_sta, wcid);
- u32 pid, frame_type = FIELD_GET(MT_TXD2_FRAME_TYPE, txwi[2]);
-
- if (!(frame_type & (IEEE80211_FTYPE_DATA >> 2)))
- return;
-
- if (time_is_after_eq_jiffies(msta->next_txs_ts))
- return;
-
- msta->next_txs_ts = jiffies + msecs_to_jiffies(250);
- pid = mt76_get_next_pkt_id(wcid);
- txwi[5] |= cpu_to_le32(MT_TXD5_TX_STATUS_MCU |
- FIELD_PREP(MT_TXD5_PID, pid));
-}
-
void mt7921_mac_write_txwi(struct mt7921_dev *dev, __le32 *txwi,
struct sk_buff *skb, struct mt76_wcid *wcid,
- struct ieee80211_key_conf *key, bool beacon)
+ struct ieee80211_key_conf *key, int pid,
+ bool beacon)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_vif *vif = info->control.vif;
struct mt76_phy *mphy = &dev->mphy;
u8 p_fmt, q_idx, omac_idx = 0, wmm_idx = 0;
+ bool is_mmio = mt76_is_mmio(&dev->mt76);
+ u32 sz_txd = is_mmio ? MT_TXD_SIZE : MT_SDIO_TXD_SIZE;
bool is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
u16 tx_count = 15;
u32 val;
@@ -772,15 +860,15 @@ void mt7921_mac_write_txwi(struct mt7921_dev *dev, __le32 *txwi,
p_fmt = MT_TX_TYPE_FW;
q_idx = MT_LMAC_BCN0;
} else if (skb_get_queue_mapping(skb) >= MT_TXQ_PSD) {
- p_fmt = MT_TX_TYPE_CT;
+ p_fmt = is_mmio ? MT_TX_TYPE_CT : MT_TX_TYPE_SF;
q_idx = MT_LMAC_ALTX0;
} else {
- p_fmt = MT_TX_TYPE_CT;
+ p_fmt = is_mmio ? MT_TX_TYPE_CT : MT_TX_TYPE_SF;
q_idx = wmm_idx * MT7921_MAX_WMM_SETS +
mt7921_lmac_mapping(dev, skb_get_queue_mapping(skb));
}
- val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + MT_TXD_SIZE) |
+ val = FIELD_PREP(MT_TXD0_TX_BYTES, skb->len + sz_txd) |
FIELD_PREP(MT_TXD0_PKT_FMT, p_fmt) |
FIELD_PREP(MT_TXD0_Q_IDX, q_idx);
txwi[0] = cpu_to_le32(val);
@@ -800,7 +888,12 @@ void mt7921_mac_write_txwi(struct mt7921_dev *dev, __le32 *txwi,
txwi[3] = cpu_to_le32(val);
txwi[4] = 0;
- txwi[5] = 0;
+
+ val = FIELD_PREP(MT_TXD5_PID, pid);
+ if (pid >= MT_PACKET_ID_FIRST)
+ val |= MT_TXD5_TX_STATUS_HOST;
+ txwi[5] = cpu_to_le32(val);
+
txwi[6] = 0;
txwi[7] = wcid->amsdu ? cpu_to_le32(MT_TXD7_HW_AMSDU) : 0;
@@ -810,105 +903,32 @@ void mt7921_mac_write_txwi(struct mt7921_dev *dev, __le32 *txwi,
mt7921_mac_write_txwi_80211(dev, txwi, skb, key);
if (txwi[2] & cpu_to_le32(MT_TXD2_FIX_RATE)) {
- u16 rate;
+ int rateidx = ffs(vif->bss_conf.basic_rates) - 1;
+ u16 rate, mode;
/* hardware won't add HTC for mgmt/ctrl frame */
txwi[2] |= cpu_to_le32(MT_TXD2_HTC_VLD);
- if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
- rate = MT7921_5G_RATE_DEFAULT;
- else
- rate = MT7921_2G_RATE_DEFAULT;
+ rate = mt76_calculate_default_rate(mphy, rateidx);
+ mode = rate >> 8;
+ rate &= GENMASK(7, 0);
+ rate |= FIELD_PREP(MT_TX_RATE_MODE, mode);
val = MT_TXD6_FIXED_BW |
FIELD_PREP(MT_TXD6_TX_RATE, rate);
txwi[6] |= cpu_to_le32(val);
txwi[3] |= cpu_to_le32(MT_TXD3_BA_DISABLE);
}
-
- mt7921_update_txs(wcid, txwi);
-}
-
-static void
-mt7921_write_hw_txp(struct mt7921_dev *dev, struct mt76_tx_info *tx_info,
- void *txp_ptr, u32 id)
-{
- struct mt7921_hw_txp *txp = txp_ptr;
- struct mt7921_txp_ptr *ptr = &txp->ptr[0];
- int i, nbuf = tx_info->nbuf - 1;
-
- tx_info->buf[0].len = MT_TXD_SIZE + sizeof(*txp);
- tx_info->nbuf = 1;
-
- txp->msdu_id[0] = cpu_to_le16(id | MT_MSDU_ID_VALID);
-
- for (i = 0; i < nbuf; i++) {
- u16 len = tx_info->buf[i + 1].len & MT_TXD_LEN_MASK;
- u32 addr = tx_info->buf[i + 1].addr;
-
- if (i == nbuf - 1)
- len |= MT_TXD_LEN_LAST;
-
- if (i & 1) {
- ptr->buf1 = cpu_to_le32(addr);
- ptr->len1 = cpu_to_le16(len);
- ptr++;
- } else {
- ptr->buf0 = cpu_to_le32(addr);
- ptr->len0 = cpu_to_le16(len);
- }
- }
}
+EXPORT_SYMBOL_GPL(mt7921_mac_write_txwi);
-int mt7921_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
- enum mt76_txq_id qid, struct mt76_wcid *wcid,
- struct ieee80211_sta *sta,
- struct mt76_tx_info *tx_info)
-{
- struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
- struct ieee80211_key_conf *key = info->control.hw_key;
- struct mt76_tx_cb *cb = mt76_tx_skb_cb(tx_info->skb);
- struct mt76_txwi_cache *t;
- struct mt7921_txp_common *txp;
- int id;
- u8 *txwi = (u8 *)txwi_ptr;
-
- if (unlikely(tx_info->skb->len <= ETH_HLEN))
- return -EINVAL;
-
- if (!wcid)
- wcid = &dev->mt76.global_wcid;
-
- cb->wcid = wcid->idx;
-
- t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
- t->skb = tx_info->skb;
-
- id = mt76_token_consume(mdev, &t);
- if (id < 0)
- return id;
-
- mt7921_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, key,
- false);
-
- txp = (struct mt7921_txp_common *)(txwi + MT_TXD_SIZE);
- memset(txp, 0, sizeof(struct mt7921_txp_common));
- mt7921_write_hw_txp(dev, tx_info, txp, id);
-
- tx_info->skb = DMA_DUMMY_DATA;
-
- return 0;
-}
-
-static void
-mt7921_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi)
+void mt7921_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi)
{
struct mt7921_sta *msta;
u16 fc, tid;
u32 val;
- if (!sta || !sta->ht_cap.ht_supported)
+ if (!sta || !(sta->ht_cap.ht_supported || sta->he_cap.has_he))
return;
tid = FIELD_GET(MT_TXD1_TID, le32_to_cpu(txwi[1]));
@@ -925,203 +945,209 @@ mt7921_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi)
if (!test_and_set_bit(tid, &msta->ampdu_state))
ieee80211_start_tx_ba_session(sta, tid, 0);
}
+EXPORT_SYMBOL_GPL(mt7921_tx_check_aggr);
-static void
-mt7921_tx_complete_status(struct mt76_dev *mdev, struct sk_buff *skb,
- struct ieee80211_sta *sta, u8 stat,
- struct list_head *free_list)
+static bool
+mt7921_mac_add_txs_skb(struct mt7921_dev *dev, struct mt76_wcid *wcid, int pid,
+ __le32 *txs_data)
{
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct ieee80211_tx_status status = {
- .sta = sta,
- .info = info,
- .skb = skb,
- .free_list = free_list,
- };
- struct ieee80211_hw *hw;
+ struct mt7921_sta *msta = container_of(wcid, struct mt7921_sta, wcid);
+ struct mt76_sta_stats *stats = &msta->stats;
+ struct ieee80211_supported_band *sband;
+ struct mt76_dev *mdev = &dev->mt76;
+ struct ieee80211_tx_info *info;
+ struct rate_info rate = {};
+ struct sk_buff_head list;
+ u32 txrate, txs, mode;
+ struct sk_buff *skb;
+ bool cck = false;
+
+ mt76_tx_status_lock(mdev, &list);
+ skb = mt76_tx_status_skb_get(mdev, wcid, pid, &list);
+ if (!skb)
+ goto out;
- if (sta) {
- struct mt7921_sta *msta;
+ info = IEEE80211_SKB_CB(skb);
+ txs = le32_to_cpu(txs_data[0]);
+ if (!(txs & MT_TXS0_ACK_ERROR_MASK))
+ info->flags |= IEEE80211_TX_STAT_ACK;
- msta = (struct mt7921_sta *)sta->drv_priv;
- status.rate = &msta->stats.tx_rate;
- }
+ info->status.ampdu_len = 1;
+ info->status.ampdu_ack_len = !!(info->flags &
+ IEEE80211_TX_STAT_ACK);
- hw = mt76_tx_status_get_hw(mdev, skb);
+ info->status.rates[0].idx = -1;
- if (info->flags & IEEE80211_TX_CTL_AMPDU)
- info->flags |= IEEE80211_TX_STAT_AMPDU;
+ if (!wcid->sta)
+ goto out;
- if (stat)
- ieee80211_tx_info_clear_status(info);
+ txrate = FIELD_GET(MT_TXS0_TX_RATE, txs);
- if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
- info->flags |= IEEE80211_TX_STAT_ACK;
+ rate.mcs = FIELD_GET(MT_TX_RATE_IDX, txrate);
+ rate.nss = FIELD_GET(MT_TX_RATE_NSS, txrate) + 1;
- info->status.tx_time = 0;
- ieee80211_tx_status_ext(hw, &status);
-}
+ if (rate.nss - 1 < ARRAY_SIZE(stats->tx_nss))
+ stats->tx_nss[rate.nss - 1]++;
+ if (rate.mcs < ARRAY_SIZE(stats->tx_mcs))
+ stats->tx_mcs[rate.mcs]++;
-void mt7921_txp_skb_unmap(struct mt76_dev *dev,
- struct mt76_txwi_cache *t)
-{
- struct mt7921_txp_common *txp;
- int i;
+ mode = FIELD_GET(MT_TX_RATE_MODE, txrate);
+ switch (mode) {
+ case MT_PHY_TYPE_CCK:
+ cck = true;
+ fallthrough;
+ case MT_PHY_TYPE_OFDM:
+ if (dev->mphy.chandef.chan->band == NL80211_BAND_5GHZ)
+ sband = &dev->mphy.sband_5g.sband;
+ else
+ sband = &dev->mphy.sband_2g.sband;
- txp = mt7921_txwi_to_txp(dev, t);
+ rate.mcs = mt76_get_rate(dev->mphy.dev, sband, rate.mcs, cck);
+ rate.legacy = sband->bitrates[rate.mcs].bitrate;
+ break;
+ case MT_PHY_TYPE_HT:
+ case MT_PHY_TYPE_HT_GF:
+ rate.mcs += (rate.nss - 1) * 8;
+ if (rate.mcs > 31)
+ goto out;
+
+ rate.flags = RATE_INFO_FLAGS_MCS;
+ if (wcid->rate.flags & RATE_INFO_FLAGS_SHORT_GI)
+ rate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+ break;
+ case MT_PHY_TYPE_VHT:
+ if (rate.mcs > 9)
+ goto out;
- for (i = 0; i < ARRAY_SIZE(txp->hw.ptr); i++) {
- struct mt7921_txp_ptr *ptr = &txp->hw.ptr[i];
- bool last;
- u16 len;
+ rate.flags = RATE_INFO_FLAGS_VHT_MCS;
+ break;
+ case MT_PHY_TYPE_HE_SU:
+ case MT_PHY_TYPE_HE_EXT_SU:
+ case MT_PHY_TYPE_HE_TB:
+ case MT_PHY_TYPE_HE_MU:
+ if (rate.mcs > 11)
+ goto out;
- len = le16_to_cpu(ptr->len0);
- last = len & MT_TXD_LEN_LAST;
- len &= MT_TXD_LEN_MASK;
- dma_unmap_single(dev->dev, le32_to_cpu(ptr->buf0), len,
- DMA_TO_DEVICE);
- if (last)
- break;
+ rate.he_gi = wcid->rate.he_gi;
+ rate.he_dcm = FIELD_GET(MT_TX_RATE_DCM, txrate);
+ rate.flags = RATE_INFO_FLAGS_HE_MCS;
+ break;
+ default:
+ goto out;
+ }
+ stats->tx_mode[mode]++;
- len = le16_to_cpu(ptr->len1);
- last = len & MT_TXD_LEN_LAST;
- len &= MT_TXD_LEN_MASK;
- dma_unmap_single(dev->dev, le32_to_cpu(ptr->buf1), len,
- DMA_TO_DEVICE);
- if (last)
- break;
+ switch (FIELD_GET(MT_TXS0_BW, txs)) {
+ case IEEE80211_STA_RX_BW_160:
+ rate.bw = RATE_INFO_BW_160;
+ stats->tx_bw[3]++;
+ break;
+ case IEEE80211_STA_RX_BW_80:
+ rate.bw = RATE_INFO_BW_80;
+ stats->tx_bw[2]++;
+ break;
+ case IEEE80211_STA_RX_BW_40:
+ rate.bw = RATE_INFO_BW_40;
+ stats->tx_bw[1]++;
+ break;
+ default:
+ rate.bw = RATE_INFO_BW_20;
+ stats->tx_bw[0]++;
+ break;
}
-}
+ wcid->rate = rate;
-void mt7921_mac_tx_free(struct mt7921_dev *dev, struct sk_buff *skb)
-{
- struct mt7921_tx_free *free = (struct mt7921_tx_free *)skb->data;
- struct mt76_dev *mdev = &dev->mt76;
- struct mt76_txwi_cache *txwi;
- struct ieee80211_sta *sta = NULL;
- LIST_HEAD(free_list);
- struct sk_buff *tmp;
- bool wake = false;
- u8 i, count;
-
- /* clean DMA queues and unmap buffers first */
- mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
- mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false);
-
- /* TODO: MT_TX_FREE_LATENCY is msdu time from the TXD is queued into PLE,
- * to the time ack is received or dropped by hw (air + hw queue time).
- * Should avoid accessing WTBL to get Tx airtime, and use it instead.
- */
- count = FIELD_GET(MT_TX_FREE_MSDU_CNT, le16_to_cpu(free->ctrl));
- for (i = 0; i < count; i++) {
- u32 msdu, info = le32_to_cpu(free->info[i]);
- u8 stat;
-
- /* 1'b1: new wcid pair.
- * 1'b0: msdu_id with the same 'wcid pair' as above.
- */
- if (info & MT_TX_FREE_PAIR) {
- struct mt7921_sta *msta;
- struct mt7921_phy *phy;
- struct mt76_wcid *wcid;
- u16 idx;
-
- count++;
- idx = FIELD_GET(MT_TX_FREE_WLAN_ID, info);
- wcid = rcu_dereference(dev->mt76.wcid[idx]);
- sta = wcid_to_sta(wcid);
- if (!sta)
- continue;
+out:
+ if (skb)
+ mt76_tx_status_skb_done(mdev, skb, &list);
+ mt76_tx_status_unlock(mdev, &list);
- msta = container_of(wcid, struct mt7921_sta, wcid);
- phy = msta->vif->phy;
- spin_lock_bh(&dev->sta_poll_lock);
- if (list_empty(&msta->stats_list))
- list_add_tail(&msta->stats_list, &phy->stats_list);
- if (list_empty(&msta->poll_list))
- list_add_tail(&msta->poll_list, &dev->sta_poll_list);
- spin_unlock_bh(&dev->sta_poll_lock);
- continue;
- }
+ return !!skb;
+}
- msdu = FIELD_GET(MT_TX_FREE_MSDU_ID, info);
- stat = FIELD_GET(MT_TX_FREE_STATUS, info);
+static void mt7921_mac_add_txs(struct mt7921_dev *dev, void *data)
+{
+ struct mt7921_sta *msta = NULL;
+ struct mt76_wcid *wcid;
+ __le32 *txs_data = data;
+ u16 wcidx;
+ u32 txs;
+ u8 pid;
- txwi = mt76_token_release(mdev, msdu, &wake);
- if (!txwi)
- continue;
+ txs = le32_to_cpu(txs_data[0]);
+ if (FIELD_GET(MT_TXS0_TXS_FORMAT, txs) > 1)
+ return;
- mt7921_txp_skb_unmap(mdev, txwi);
- if (txwi->skb) {
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(txwi->skb);
- void *txwi_ptr = mt76_get_txwi_ptr(mdev, txwi);
+ txs = le32_to_cpu(txs_data[2]);
+ wcidx = FIELD_GET(MT_TXS2_WCID, txs);
- if (likely(txwi->skb->protocol != cpu_to_be16(ETH_P_PAE)))
- mt7921_tx_check_aggr(sta, txwi_ptr);
+ txs = le32_to_cpu(txs_data[3]);
+ pid = FIELD_GET(MT_TXS3_PID, txs);
- if (sta && !info->tx_time_est) {
- struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
- int pending;
+ if (pid < MT_PACKET_ID_FIRST)
+ return;
- pending = atomic_dec_return(&wcid->non_aql_packets);
- if (pending < 0)
- atomic_cmpxchg(&wcid->non_aql_packets, pending, 0);
- }
+ if (wcidx >= MT7921_WTBL_SIZE)
+ return;
- mt7921_tx_complete_status(mdev, txwi->skb, sta, stat, &free_list);
- txwi->skb = NULL;
- }
+ rcu_read_lock();
- mt76_put_txwi(mdev, txwi);
- }
+ wcid = rcu_dereference(dev->mt76.wcid[wcidx]);
+ if (!wcid)
+ goto out;
- if (wake)
- mt76_set_tx_blocked(&dev->mt76, false);
+ mt7921_mac_add_txs_skb(dev, wcid, pid, txs_data);
- napi_consume_skb(skb, 1);
+ if (!wcid->sta)
+ goto out;
- list_for_each_entry_safe(skb, tmp, &free_list, list) {
- skb_list_del_init(skb);
- napi_consume_skb(skb, 1);
- }
+ msta = container_of(wcid, struct mt7921_sta, wcid);
+ spin_lock_bh(&dev->sta_poll_lock);
+ if (list_empty(&msta->poll_list))
+ list_add_tail(&msta->poll_list, &dev->sta_poll_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
- mt7921_mac_sta_poll(dev);
- mt76_worker_schedule(&dev->mt76.tx_worker);
+out:
+ rcu_read_unlock();
}
-void mt7921_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
+void mt7921_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+ struct sk_buff *skb)
{
- struct mt7921_dev *dev;
-
- if (!e->txwi) {
- dev_kfree_skb_any(e->skb);
- return;
- }
-
- dev = container_of(mdev, struct mt7921_dev, mt76);
-
- /* error path */
- if (e->skb == DMA_DUMMY_DATA) {
- struct mt76_txwi_cache *t;
- struct mt7921_txp_common *txp;
- u16 token;
-
- txp = mt7921_txwi_to_txp(mdev, e->txwi);
- token = le16_to_cpu(txp->hw.msdu_id[0]) & ~MT_MSDU_ID_VALID;
- t = mt76_token_put(mdev, token);
- e->skb = t ? t->skb : NULL;
- }
+ struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+ __le32 *rxd = (__le32 *)skb->data;
+ __le32 *end = (__le32 *)&skb->data[skb->len];
+ enum rx_pkt_type type;
+ u16 flag;
- if (e->skb) {
- struct mt76_tx_cb *cb = mt76_tx_skb_cb(e->skb);
- struct mt76_wcid *wcid;
+ type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0]));
+ flag = FIELD_GET(MT_RXD0_PKT_FLAG, le32_to_cpu(rxd[0]));
- wcid = rcu_dereference(dev->mt76.wcid[cb->wcid]);
+ if (type == PKT_TYPE_RX_EVENT && flag == 0x1)
+ type = PKT_TYPE_NORMAL_MCU;
- mt7921_tx_complete_status(mdev, e->skb, wcid_to_sta(wcid), 0,
- NULL);
+ switch (type) {
+ case PKT_TYPE_RX_EVENT:
+ mt7921_mcu_rx_event(dev, skb);
+ break;
+ case PKT_TYPE_TXS:
+ for (rxd += 2; rxd + 8 <= end; rxd += 8)
+ mt7921_mac_add_txs(dev, rxd);
+ dev_kfree_skb(skb);
+ break;
+ case PKT_TYPE_NORMAL_MCU:
+ case PKT_TYPE_NORMAL:
+ if (!mt7921_mac_fill_rx(dev, skb)) {
+ mt76_rx(&dev->mt76, q, skb);
+ return;
+ }
+ fallthrough;
+ default:
+ dev_kfree_skb(skb);
+ break;
}
}
+EXPORT_SYMBOL_GPL(mt7921_queue_rx_skb);
void mt7921_mac_reset_counters(struct mt7921_phy *phy)
{
@@ -1154,17 +1180,12 @@ void mt7921_mac_set_timing(struct mt7921_phy *phy)
FIELD_PREP(MT_TIMEOUT_VAL_CCA, 48);
u32 ofdm = FIELD_PREP(MT_TIMEOUT_VAL_PLCP, 60) |
FIELD_PREP(MT_TIMEOUT_VAL_CCA, 28);
- int sifs, offset;
- bool is_5ghz = phy->mt76->chandef.chan->band == NL80211_BAND_5GHZ;
+ bool is_2ghz = phy->mt76->chandef.chan->band == NL80211_BAND_2GHZ;
+ int sifs = is_2ghz ? 10 : 16, offset;
if (!test_bit(MT76_STATE_RUNNING, &phy->mt76->state))
return;
- if (is_5ghz)
- sifs = 16;
- else
- sifs = 10;
-
mt76_set(dev, MT_ARB_SCR(0),
MT_ARB_SCR_TX_DISABLE | MT_ARB_SCR_RX_DISABLE);
udelay(1);
@@ -1181,7 +1202,7 @@ void mt7921_mac_set_timing(struct mt7921_phy *phy)
FIELD_PREP(MT_IFS_SIFS, sifs) |
FIELD_PREP(MT_IFS_SLOT, phy->slottime));
- if (phy->slottime < 20 || is_5ghz)
+ if (phy->slottime < 20 || !is_2ghz)
val = MT7921_CFEND_RATE_DEFAULT;
else
val = MT7921_CFEND_RATE_11B;
@@ -1242,27 +1263,7 @@ void mt7921_update_channel(struct mt76_phy *mphy)
mt76_connac_power_save_sched(mphy, &dev->pm);
}
-
-void mt7921_tx_token_put(struct mt7921_dev *dev)
-{
- struct mt76_txwi_cache *txwi;
- int id;
-
- spin_lock_bh(&dev->mt76.token_lock);
- idr_for_each_entry(&dev->mt76.token, txwi, id) {
- mt7921_txp_skb_unmap(&dev->mt76, txwi);
- if (txwi->skb) {
- struct ieee80211_hw *hw;
-
- hw = mt76_tx_status_get_hw(&dev->mt76, txwi->skb);
- ieee80211_free_txskb(hw, txwi->skb);
- }
- mt76_put_txwi(&dev->mt76, txwi);
- dev->mt76.token_count--;
- }
- spin_unlock_bh(&dev->mt76.token_lock);
- idr_destroy(&dev->mt76.token);
-}
+EXPORT_SYMBOL_GPL(mt7921_update_channel);
static void
mt7921_vif_connect_iter(void *priv, u8 *mac,
@@ -1278,69 +1279,6 @@ mt7921_vif_connect_iter(void *priv, u8 *mac,
mt7921_mcu_set_tx(dev, vif);
}
-static int
-mt7921_mac_reset(struct mt7921_dev *dev)
-{
- int i, err;
-
- mt76_connac_free_pending_tx_skbs(&dev->pm, NULL);
-
- mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, 0);
- mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0);
-
- set_bit(MT76_RESET, &dev->mphy.state);
- set_bit(MT76_MCU_RESET, &dev->mphy.state);
- wake_up(&dev->mt76.mcu.wait);
- skb_queue_purge(&dev->mt76.mcu.res_q);
-
- mt76_txq_schedule_all(&dev->mphy);
-
- mt76_worker_disable(&dev->mt76.tx_worker);
- napi_disable(&dev->mt76.napi[MT_RXQ_MAIN]);
- napi_disable(&dev->mt76.napi[MT_RXQ_MCU]);
- napi_disable(&dev->mt76.napi[MT_RXQ_MCU_WA]);
- napi_disable(&dev->mt76.tx_napi);
-
- mt7921_tx_token_put(dev);
- idr_init(&dev->mt76.token);
-
- mt7921_wpdma_reset(dev, true);
-
- mt76_for_each_q_rx(&dev->mt76, i) {
- napi_enable(&dev->mt76.napi[i]);
- napi_schedule(&dev->mt76.napi[i]);
- }
-
- clear_bit(MT76_MCU_RESET, &dev->mphy.state);
-
- mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA,
- MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL |
- MT_INT_MCU_CMD);
- mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
-
- err = mt7921_run_firmware(dev);
- if (err)
- goto out;
-
- err = mt7921_mcu_set_eeprom(dev);
- if (err)
- goto out;
-
- err = mt7921_mac_init(dev);
- if (err)
- goto out;
-
- err = __mt7921_start(&dev->phy);
-out:
- clear_bit(MT76_RESET, &dev->mphy.state);
-
- napi_enable(&dev->mt76.tx_napi);
- napi_schedule(&dev->mt76.tx_napi);
- mt76_worker_enable(&dev->mt76.tx_worker);
-
- return err;
-}
-
/* system error recovery */
void mt7921_mac_reset_work(struct work_struct *work)
{
@@ -1359,12 +1297,9 @@ void mt7921_mac_reset_work(struct work_struct *work)
cancel_work_sync(&pm->wake_work);
mutex_lock(&dev->mt76.mutex);
- for (i = 0; i < 10; i++) {
- __mt7921_mcu_drv_pmctrl(dev);
-
- if (!mt7921_mac_reset(dev))
+ for (i = 0; i < 10; i++)
+ if (!mt7921_dev_reset(dev))
break;
- }
mutex_unlock(&dev->mt76.mutex);
if (i == 10)
@@ -1399,12 +1334,12 @@ void mt7921_reset(struct mt76_dev *mdev)
queue_work(dev->mt76.wq, &dev->reset_work);
}
-static void
-mt7921_mac_update_mib_stats(struct mt7921_phy *phy)
+void mt7921_mac_update_mib_stats(struct mt7921_phy *phy)
{
struct mt7921_dev *dev = phy->dev;
struct mib_stats *mib = &phy->mib;
int i, aggr0 = 0, aggr1;
+ u32 val;
mib->fcs_err_cnt += mt76_get_field(dev, MT_MIB_SDR3(0),
MT_MIB_SDR3_FCS_ERR_MASK);
@@ -1417,8 +1352,37 @@ mt7921_mac_update_mib_stats(struct mt7921_phy *phy)
mib->rts_retries_cnt += mt76_get_field(dev, MT_MIB_MB_BSDR1(0),
MT_MIB_RTS_FAIL_COUNT_MASK);
+ mib->tx_ampdu_cnt += mt76_rr(dev, MT_MIB_SDR12(0));
+ mib->tx_mpdu_attempts_cnt += mt76_rr(dev, MT_MIB_SDR14(0));
+ mib->tx_mpdu_success_cnt += mt76_rr(dev, MT_MIB_SDR15(0));
+
+ val = mt76_rr(dev, MT_MIB_SDR32(0));
+ mib->tx_pkt_ebf_cnt += FIELD_GET(MT_MIB_SDR9_EBF_CNT_MASK, val);
+ mib->tx_pkt_ibf_cnt += FIELD_GET(MT_MIB_SDR9_IBF_CNT_MASK, val);
+
+ val = mt76_rr(dev, MT_ETBF_TX_APP_CNT(0));
+ mib->tx_bf_ibf_ppdu_cnt += FIELD_GET(MT_ETBF_TX_IBF_CNT, val);
+ mib->tx_bf_ebf_ppdu_cnt += FIELD_GET(MT_ETBF_TX_EBF_CNT, val);
+
+ val = mt76_rr(dev, MT_ETBF_RX_FB_CNT(0));
+ mib->tx_bf_rx_fb_all_cnt += FIELD_GET(MT_ETBF_RX_FB_ALL, val);
+ mib->tx_bf_rx_fb_he_cnt += FIELD_GET(MT_ETBF_RX_FB_HE, val);
+ mib->tx_bf_rx_fb_vht_cnt += FIELD_GET(MT_ETBF_RX_FB_VHT, val);
+ mib->tx_bf_rx_fb_ht_cnt += FIELD_GET(MT_ETBF_RX_FB_HT, val);
+
+ mib->rx_mpdu_cnt += mt76_rr(dev, MT_MIB_SDR5(0));
+ mib->rx_ampdu_cnt += mt76_rr(dev, MT_MIB_SDR22(0));
+ mib->rx_ampdu_bytes_cnt += mt76_rr(dev, MT_MIB_SDR23(0));
+ mib->rx_ba_cnt += mt76_rr(dev, MT_MIB_SDR31(0));
+
+ for (i = 0; i < ARRAY_SIZE(mib->tx_amsdu); i++) {
+ val = mt76_rr(dev, MT_PLE_AMSDU_PACK_MSDU_CNT(i));
+ mib->tx_amsdu[i] += val;
+ mib->tx_amsdu_cnt += val;
+ }
+
for (i = 0, aggr1 = aggr0 + 4; i < 4; i++) {
- u32 val, val2;
+ u32 val2;
val = mt76_rr(dev, MT_TX_AGG_CNT(0, i));
val2 = mt76_rr(dev, MT_TX_AGG_CNT2(0, i));
@@ -1449,6 +1413,8 @@ void mt7921_mac_work(struct work_struct *work)
}
mt7921_mutex_release(phy->dev);
+
+ mt76_tx_status_check(mphy->dev, false);
ieee80211_queue_delayed_work(phy->mt76->hw, &mphy->mac_work,
MT7921_WATCHDOG_TIME);
}
@@ -1463,12 +1429,18 @@ void mt7921_pm_wake_work(struct work_struct *work)
mphy = dev->phy.mt76;
if (!mt7921_mcu_drv_pmctrl(dev)) {
+ struct mt76_dev *mdev = &dev->mt76;
int i;
- mt76_for_each_q_rx(&dev->mt76, i)
- napi_schedule(&dev->mt76.napi[i]);
- mt76_connac_pm_dequeue_skbs(mphy, &dev->pm);
- mt7921_tx_cleanup(dev);
+ if (mt76_is_sdio(mdev)) {
+ mt76_connac_pm_dequeue_skbs(mphy, &dev->pm);
+ mt76_worker_schedule(&mdev->sdio.txrx_worker);
+ } else {
+ mt76_for_each_q_rx(mdev, i)
+ napi_schedule(&mdev->napi[i]);
+ mt76_connac_pm_dequeue_skbs(mphy, &dev->pm);
+ mt7921_mcu_tx_cleanup(dev);
+ }
if (test_bit(MT76_STATE_RUNNING, &mphy->state))
ieee80211_queue_delayed_work(mphy->hw, &mphy->mac_work,
MT7921_WATCHDOG_TIME);
@@ -1506,34 +1478,6 @@ out:
queue_delayed_work(dev->mt76.wq, &dev->pm.ps_work, delta);
}
-int mt7921_mac_set_beacon_filter(struct mt7921_phy *phy,
- struct ieee80211_vif *vif,
- bool enable)
-{
- struct mt7921_dev *dev = phy->dev;
- bool ext_phy = phy != &dev->phy;
- int err;
-
- if (!dev->pm.enable)
- return -EOPNOTSUPP;
-
- err = mt7921_mcu_set_bss_pm(dev, vif, enable);
- if (err)
- return err;
-
- if (enable) {
- vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER;
- mt76_set(dev, MT_WF_RFCR(ext_phy),
- MT_WF_RFCR_DROP_OTHER_BEACON);
- } else {
- vif->driver_flags &= ~IEEE80211_VIF_BEACON_FILTER;
- mt76_clear(dev, MT_WF_RFCR(ext_phy),
- MT_WF_RFCR_DROP_OTHER_BEACON);
- }
-
- return 0;
-}
-
void mt7921_coredump_work(struct work_struct *work)
{
struct mt7921_dev *dev;
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.h b/drivers/net/wireless/mediatek/mt76/mt7921/mac.h
index 3af67fac213d..544a1c33126a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.h
@@ -116,6 +116,7 @@ enum rx_pkt_type {
#define MT_PRXV_TX_DCM BIT(4)
#define MT_PRXV_TX_ER_SU_106T BIT(5)
#define MT_PRXV_NSTS GENMASK(9, 7)
+#define MT_PRXV_TXBF BIT(10)
#define MT_PRXV_HT_AD_CODE BIT(11)
#define MT_PRXV_FRAME_MODE GENMASK(14, 12)
#define MT_PRXV_SGI GENMASK(16, 15)
@@ -138,8 +139,15 @@ enum rx_pkt_type {
#define MT_CRXV_HE_LTF_SIZE GENMASK(18, 17)
#define MT_CRXV_HE_LDPC_EXT_SYM BIT(20)
#define MT_CRXV_HE_PE_DISAMBIG BIT(23)
+#define MT_CRXV_HE_NUM_USER GENMASK(30, 24)
#define MT_CRXV_HE_UPLINK BIT(31)
+#define MT_CRXV_HE_RU0 GENMASK(7, 0)
+#define MT_CRXV_HE_RU1 GENMASK(15, 8)
+#define MT_CRXV_HE_RU2 GENMASK(23, 16)
+#define MT_CRXV_HE_RU3 GENMASK(31, 24)
+#define MT_CRXV_HE_MU_AID GENMASK(30, 20)
+
#define MT_CRXV_HE_SR_MASK GENMASK(11, 8)
#define MT_CRXV_HE_SR1_MASK GENMASK(16, 12)
#define MT_CRXV_HE_SR2_MASK GENMASK(20, 17)
@@ -191,6 +199,10 @@ enum tx_mcu_port_q_idx {
#define MT_TXD_SIZE (8 * 4)
+#define MT_SDIO_TXD_SIZE (MT_TXD_SIZE + 8 * 4)
+#define MT_SDIO_TAIL_SIZE 8
+#define MT_SDIO_HDR_SIZE 4
+
#define MT_TXD0_Q_IDX GENMASK(31, 25)
#define MT_TXD0_PKT_FMT GENMASK(24, 23)
#define MT_TXD0_ETH_TYPE_OFFSET GENMASK(22, 16)
@@ -309,6 +321,15 @@ struct mt7921_tx_free {
/* will support this field in further revision */
#define MT_TX_FREE_RATE GENMASK(13, 0)
+#define MT_TXS0_BW GENMASK(30, 29)
+#define MT_TXS0_TXS_FORMAT GENMASK(24, 23)
+#define MT_TXS0_ACK_ERROR_MASK GENMASK(18, 16)
+#define MT_TXS0_TX_RATE GENMASK(13, 0)
+
+#define MT_TXS2_WCID GENMASK(25, 16)
+
+#define MT_TXS3_PID GENMASK(31, 24)
+
static inline struct mt7921_txp_common *
mt7921_txwi_to_txp(struct mt76_dev *dev, struct mt76_txwi_cache *t)
{
@@ -350,4 +371,15 @@ struct mt7921_txp_common {
};
};
+#define MT_WTBL_TXRX_CAP_RATE_OFFSET 7
+#define MT_WTBL_TXRX_RATE_G2_HE 24
+#define MT_WTBL_TXRX_RATE_G2 12
+
+#define MT_WTBL_AC0_CTT_OFFSET 20
+
+static inline u32 mt7921_mac_wtbl_lmac_addr(int idx, u8 offset)
+{
+ return MT_WTBL_LMAC_OFFS(idx, 0) + offset * 4;
+}
+
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
index 63ec140c9c37..633c6d2a57ac 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c
@@ -72,7 +72,7 @@ mt7921_init_he_caps(struct mt7921_phy *phy, enum nl80211_band band,
if (band == NL80211_BAND_2GHZ)
he_cap_elem->phy_cap_info[0] =
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G;
- else if (band == NL80211_BAND_5GHZ)
+ else
he_cap_elem->phy_cap_info[0] =
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G;
@@ -93,7 +93,7 @@ mt7921_init_he_caps(struct mt7921_phy *phy, enum nl80211_band band,
if (band == NL80211_BAND_2GHZ)
he_cap_elem->phy_cap_info[0] |=
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_2G;
- else if (band == NL80211_BAND_5GHZ)
+ else
he_cap_elem->phy_cap_info[0] |=
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_5G;
@@ -142,6 +142,32 @@ mt7921_init_he_caps(struct mt7921_phy *phy, enum nl80211_band band,
he_cap_elem->phy_cap_info[9] |=
IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_16US;
}
+
+ if (band == NL80211_BAND_6GHZ) {
+ struct ieee80211_supported_band *sband =
+ &phy->mt76->sband_5g.sband;
+ struct ieee80211_sta_vht_cap *vht_cap = &sband->vht_cap;
+ struct ieee80211_sta_ht_cap *ht_cap = &sband->ht_cap;
+ u32 exp;
+ u16 cap;
+
+ cap = u16_encode_bits(ht_cap->ampdu_density,
+ IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START);
+ exp = u32_get_bits(vht_cap->cap,
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK);
+ cap |= u16_encode_bits(exp,
+ IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP);
+ exp = u32_get_bits(vht_cap->cap,
+ IEEE80211_VHT_CAP_MAX_MPDU_MASK);
+ cap |= u16_encode_bits(exp,
+ IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN);
+ if (vht_cap->cap & IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN)
+ cap |= IEEE80211_HE_6GHZ_CAP_TX_ANTPAT_CONS;
+ if (vht_cap->cap & IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN)
+ cap |= IEEE80211_HE_6GHZ_CAP_RX_ANTPAT_CONS;
+
+ data->he_6ghz_capa.capa = cpu_to_le16(cap);
+ }
idx++;
}
@@ -170,6 +196,15 @@ void mt7921_set_stream_he_caps(struct mt7921_phy *phy)
band = &phy->mt76->sband_5g.sband;
band->iftype_data = data;
band->n_iftype_data = n;
+
+ if (phy->mt76->cap.has_6ghz) {
+ data = phy->iftype[NL80211_BAND_6GHZ];
+ n = mt7921_init_he_caps(phy, NL80211_BAND_6GHZ, data);
+
+ band = &phy->mt76->sband_6g.sband;
+ band->iftype_data = data;
+ band->n_iftype_data = n;
+ }
}
}
@@ -202,6 +237,7 @@ int __mt7921_start(struct mt7921_phy *phy)
return 0;
}
+EXPORT_SYMBOL_GPL(__mt7921_start);
static int mt7921_start(struct ieee80211_hw *hw)
{
@@ -243,10 +279,6 @@ static int mt7921_add_interface(struct ieee80211_hw *hw,
mt7921_mutex_acquire(dev);
- if (vif->type == NL80211_IFTYPE_MONITOR &&
- is_zero_ether_addr(vif->addr))
- phy->monitor_vif = vif;
-
mvif->mt76.idx = ffs(~dev->mt76.vif_mask) - 1;
if (mvif->mt76.idx >= MT7921_MAX_INTERFACES) {
ret = -ENOSPC;
@@ -268,12 +300,13 @@ static int mt7921_add_interface(struct ieee80211_hw *hw,
idx = MT7921_WTBL_RESERVED - mvif->mt76.idx;
- INIT_LIST_HEAD(&mvif->sta.stats_list);
INIT_LIST_HEAD(&mvif->sta.poll_list);
mvif->sta.wcid.idx = idx;
mvif->sta.wcid.ext_phy = mvif->mt76.band_idx;
mvif->sta.wcid.hw_key_idx = -1;
mvif->sta.wcid.tx_info |= MT_WCID_TX_INFO_SET;
+ mt76_packet_id_init(&mvif->sta.wcid);
+
mt7921_mac_wtbl_update(dev, idx,
MT_WTBL_UPDATE_ADM_COUNT_CLEAR);
@@ -306,9 +339,6 @@ static void mt7921_remove_interface(struct ieee80211_hw *hw,
struct mt7921_phy *phy = mt7921_hw_phy(hw);
int idx = msta->wcid.idx;
- if (vif == phy->monitor_vif)
- phy->monitor_vif = NULL;
-
mt7921_mutex_acquire(dev);
mt76_connac_free_pending_tx_skbs(&dev->pm, &msta->wcid);
mt76_connac_mcu_uni_add_dev(&dev->mphy, vif, &mvif->sta.wcid, false);
@@ -323,6 +353,8 @@ static void mt7921_remove_interface(struct ieee80211_hw *hw,
if (!list_empty(&msta->poll_list))
list_del_init(&msta->poll_list);
spin_unlock_bh(&dev->sta_poll_lock);
+
+ mt76_packet_id_flush(&dev->mt76, &msta->wcid);
}
static int mt7921_set_channel(struct mt7921_phy *phy)
@@ -533,36 +565,6 @@ static void mt7921_configure_filter(struct ieee80211_hw *hw,
mt7921_mutex_release(dev);
}
-static int
-mt7921_bss_bcnft_apply(struct mt7921_dev *dev, struct ieee80211_vif *vif,
- bool assoc)
-{
- int ret;
-
- if (!dev->pm.enable)
- return 0;
-
- if (assoc) {
- ret = mt7921_mcu_uni_bss_bcnft(dev, vif, true);
- if (ret)
- return ret;
-
- vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER;
- mt76_set(dev, MT_WF_RFCR(0), MT_WF_RFCR_DROP_OTHER_BEACON);
-
- return 0;
- }
-
- ret = mt7921_mcu_set_bss_pm(dev, vif, false);
- if (ret)
- return ret;
-
- vif->driver_flags &= ~IEEE80211_VIF_BEACON_FILTER;
- mt76_clear(dev, MT_WF_RFCR(0), MT_WF_RFCR_DROP_OTHER_BEACON);
-
- return 0;
-}
-
static void mt7921_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info,
@@ -592,7 +594,8 @@ static void mt7921_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_ASSOC) {
mt7921_mcu_sta_update(dev, NULL, vif, true,
MT76_STA_INFO_STATE_ASSOC);
- mt7921_bss_bcnft_apply(dev, vif, info->assoc);
+ if (dev->pm.enable)
+ mt7921_mcu_set_beacon_filter(dev, vif, info->assoc);
}
if (changed & BSS_CHANGED_ARP_FILTER) {
@@ -617,14 +620,13 @@ int mt7921_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
if (idx < 0)
return -ENOSPC;
- INIT_LIST_HEAD(&msta->stats_list);
INIT_LIST_HEAD(&msta->poll_list);
msta->vif = mvif;
msta->wcid.sta = 1;
msta->wcid.idx = idx;
msta->wcid.ext_phy = mvif->mt76.band_idx;
msta->wcid.tx_info |= MT_WCID_TX_INFO_SET;
- msta->stats.jiffies = jiffies;
+ msta->last_txs = jiffies;
ret = mt76_connac_pm_wake(&dev->mphy, &dev->pm);
if (ret)
@@ -645,6 +647,7 @@ int mt7921_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
return 0;
}
+EXPORT_SYMBOL_GPL(mt7921_mac_sta_add);
void mt7921_mac_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
@@ -666,6 +669,7 @@ void mt7921_mac_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
mt7921_mutex_release(dev);
}
+EXPORT_SYMBOL_GPL(mt7921_mac_sta_assoc);
void mt7921_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
@@ -693,12 +697,11 @@ void mt7921_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
spin_lock_bh(&dev->sta_poll_lock);
if (!list_empty(&msta->poll_list))
list_del_init(&msta->poll_list);
- if (!list_empty(&msta->stats_list))
- list_del_init(&msta->stats_list);
spin_unlock_bh(&dev->sta_poll_lock);
mt76_connac_power_save_sched(&dev->mphy, &dev->pm);
}
+EXPORT_SYMBOL_GPL(mt7921_mac_sta_remove);
void mt7921_tx_worker(struct mt76_worker *w)
{
@@ -853,13 +856,175 @@ mt7921_get_stats(struct ieee80211_hw *hw,
stats->dot11FCSErrorCount = mib->fcs_err_cnt;
stats->dot11ACKFailureCount = mib->ack_fail_cnt;
- memset(mib, 0, sizeof(*mib));
-
mt7921_mutex_release(phy->dev);
return 0;
}
+static const char mt7921_gstrings_stats[][ETH_GSTRING_LEN] = {
+ /* tx counters */
+ "tx_ampdu_cnt",
+ "tx_mpdu_attempts",
+ "tx_mpdu_success",
+ "tx_pkt_ebf_cnt",
+ "tx_pkt_ibf_cnt",
+ "tx_ampdu_len:0-1",
+ "tx_ampdu_len:2-10",
+ "tx_ampdu_len:11-19",
+ "tx_ampdu_len:20-28",
+ "tx_ampdu_len:29-37",
+ "tx_ampdu_len:38-46",
+ "tx_ampdu_len:47-55",
+ "tx_ampdu_len:56-79",
+ "tx_ampdu_len:80-103",
+ "tx_ampdu_len:104-127",
+ "tx_ampdu_len:128-151",
+ "tx_ampdu_len:152-175",
+ "tx_ampdu_len:176-199",
+ "tx_ampdu_len:200-223",
+ "tx_ampdu_len:224-247",
+ "ba_miss_count",
+ "tx_beamformer_ppdu_iBF",
+ "tx_beamformer_ppdu_eBF",
+ "tx_beamformer_rx_feedback_all",
+ "tx_beamformer_rx_feedback_he",
+ "tx_beamformer_rx_feedback_vht",
+ "tx_beamformer_rx_feedback_ht",
+ "tx_msdu_pack_1",
+ "tx_msdu_pack_2",
+ "tx_msdu_pack_3",
+ "tx_msdu_pack_4",
+ "tx_msdu_pack_5",
+ "tx_msdu_pack_6",
+ "tx_msdu_pack_7",
+ "tx_msdu_pack_8",
+ /* rx counters */
+ "rx_mpdu_cnt",
+ "rx_ampdu_cnt",
+ "rx_ampdu_bytes_cnt",
+ "rx_ba_cnt",
+ /* per vif counters */
+ "v_tx_mode_cck",
+ "v_tx_mode_ofdm",
+ "v_tx_mode_ht",
+ "v_tx_mode_ht_gf",
+ "v_tx_mode_vht",
+ "v_tx_mode_he_su",
+ "v_tx_mode_he_ext_su",
+ "v_tx_mode_he_tb",
+ "v_tx_mode_he_mu",
+ "v_tx_bw_20",
+ "v_tx_bw_40",
+ "v_tx_bw_80",
+ "v_tx_bw_160",
+ "v_tx_mcs_0",
+ "v_tx_mcs_1",
+ "v_tx_mcs_2",
+ "v_tx_mcs_3",
+ "v_tx_mcs_4",
+ "v_tx_mcs_5",
+ "v_tx_mcs_6",
+ "v_tx_mcs_7",
+ "v_tx_mcs_8",
+ "v_tx_mcs_9",
+ "v_tx_mcs_10",
+ "v_tx_mcs_11",
+};
+
+static void
+mt7921_get_et_strings(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 sset, u8 *data)
+{
+ if (sset != ETH_SS_STATS)
+ return;
+
+ memcpy(data, *mt7921_gstrings_stats, sizeof(mt7921_gstrings_stats));
+}
+
+static int
+mt7921_get_et_sset_count(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ int sset)
+{
+ return sset == ETH_SS_STATS ? ARRAY_SIZE(mt7921_gstrings_stats) : 0;
+}
+
+static void
+mt7921_ethtool_worker(void *wi_data, struct ieee80211_sta *sta)
+{
+ struct mt7921_sta *msta = (struct mt7921_sta *)sta->drv_priv;
+ struct mt76_ethtool_worker_info *wi = wi_data;
+
+ if (msta->vif->mt76.idx != wi->idx)
+ return;
+
+ mt76_ethtool_worker(wi, &msta->stats);
+}
+
+static
+void mt7921_get_et_stats(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
+ struct mt7921_phy *phy = mt7921_hw_phy(hw);
+ struct mt7921_dev *dev = phy->dev;
+ struct mib_stats *mib = &phy->mib;
+ struct mt76_ethtool_worker_info wi = {
+ .data = data,
+ .idx = mvif->mt76.idx,
+ };
+ int i, ei = 0;
+
+ mt7921_mutex_acquire(dev);
+
+ mt7921_mac_update_mib_stats(phy);
+
+ data[ei++] = mib->tx_ampdu_cnt;
+ data[ei++] = mib->tx_mpdu_attempts_cnt;
+ data[ei++] = mib->tx_mpdu_success_cnt;
+ data[ei++] = mib->tx_pkt_ebf_cnt;
+ data[ei++] = mib->tx_pkt_ibf_cnt;
+
+ /* Tx ampdu stat */
+ for (i = 0; i < 15; i++)
+ data[ei++] = dev->mt76.aggr_stats[i];
+
+ data[ei++] = phy->mib.ba_miss_cnt;
+
+ /* Tx Beamformer monitor */
+ data[ei++] = mib->tx_bf_ibf_ppdu_cnt;
+ data[ei++] = mib->tx_bf_ebf_ppdu_cnt;
+
+ /* Tx Beamformer Rx feedback monitor */
+ data[ei++] = mib->tx_bf_rx_fb_all_cnt;
+ data[ei++] = mib->tx_bf_rx_fb_he_cnt;
+ data[ei++] = mib->tx_bf_rx_fb_vht_cnt;
+ data[ei++] = mib->tx_bf_rx_fb_ht_cnt;
+
+ /* Tx amsdu info (pack-count histogram) */
+ for (i = 0; i < ARRAY_SIZE(mib->tx_amsdu); i++)
+ data[ei++] = mib->tx_amsdu[i];
+
+ /* rx counters */
+ data[ei++] = mib->rx_mpdu_cnt;
+ data[ei++] = mib->rx_ampdu_cnt;
+ data[ei++] = mib->rx_ampdu_bytes_cnt;
+ data[ei++] = mib->rx_ba_cnt;
+
+ /* Add values for all stations owned by this vif */
+ wi.initial_stat_idx = ei;
+ ieee80211_iterate_stations_atomic(hw, mt7921_ethtool_worker, &wi);
+
+ mt7921_mutex_release(dev);
+
+ if (!wi.sta_count)
+ return;
+
+ ei += wi.worker_stat_count;
+ if (ei != ARRAY_SIZE(mt7921_gstrings_stats))
+ dev_err(dev->mt76.dev, "ei: %d SSTATS_LEN: %zu",
+ ei, ARRAY_SIZE(mt7921_gstrings_stats));
+}
+
static u64
mt7921_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
@@ -1048,22 +1213,22 @@ static void mt7921_sta_statistics(struct ieee80211_hw *hw,
struct station_info *sinfo)
{
struct mt7921_sta *msta = (struct mt7921_sta *)sta->drv_priv;
- struct mt7921_sta_stats *stats = &msta->stats;
+ struct rate_info *txrate = &msta->wcid.rate;
- if (!stats->tx_rate.legacy && !stats->tx_rate.flags)
+ if (!txrate->legacy && !txrate->flags)
return;
- if (stats->tx_rate.legacy) {
- sinfo->txrate.legacy = stats->tx_rate.legacy;
+ if (txrate->legacy) {
+ sinfo->txrate.legacy = txrate->legacy;
} else {
- sinfo->txrate.mcs = stats->tx_rate.mcs;
- sinfo->txrate.nss = stats->tx_rate.nss;
- sinfo->txrate.bw = stats->tx_rate.bw;
- sinfo->txrate.he_gi = stats->tx_rate.he_gi;
- sinfo->txrate.he_dcm = stats->tx_rate.he_dcm;
- sinfo->txrate.he_ru_alloc = stats->tx_rate.he_ru_alloc;
+ sinfo->txrate.mcs = txrate->mcs;
+ sinfo->txrate.nss = txrate->nss;
+ sinfo->txrate.bw = txrate->bw;
+ sinfo->txrate.he_gi = txrate->he_gi;
+ sinfo->txrate.he_dcm = txrate->he_dcm;
+ sinfo->txrate.he_ru_alloc = txrate->he_ru_alloc;
}
- sinfo->txrate.flags = stats->tx_rate.flags;
+ sinfo->txrate.flags = txrate->flags;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
}
@@ -1172,6 +1337,43 @@ static void mt7921_sta_set_decap_offload(struct ieee80211_hw *hw,
MCU_UNI_CMD_STA_REC_UPDATE);
}
+static int mt7921_set_sar_specs(struct ieee80211_hw *hw,
+ const struct cfg80211_sar_specs *sar)
+{
+ const struct cfg80211_sar_capa *capa = hw->wiphy->sar_capa;
+ struct mt7921_dev *dev = mt7921_hw_dev(hw);
+ struct mt76_freq_range_power *data, *frp;
+ struct mt76_phy *mphy = hw->priv;
+ int err;
+ u32 i;
+
+ if (sar->type != NL80211_SAR_TYPE_POWER || !sar->num_sub_specs)
+ return -EINVAL;
+
+ mt7921_mutex_acquire(dev);
+
+ data = mphy->frp;
+
+ for (i = 0; i < sar->num_sub_specs; i++) {
+ u32 index = sar->sub_specs[i].freq_range_index;
+ /* SAR specifies power limitaton in 0.25dbm */
+ s32 power = sar->sub_specs[i].power >> 1;
+
+ if (power > 127 || power < -127)
+ power = 127;
+
+ frp = &data[index];
+ frp->range = &capa->freq_ranges[index];
+ frp->power = power;
+ }
+
+ err = mt76_connac_mcu_set_rate_txpower(mphy);
+
+ mt7921_mutex_release(dev);
+
+ return err;
+}
+
const struct ieee80211_ops mt7921_ops = {
.tx = mt7921_tx,
.start = mt7921_start,
@@ -1192,6 +1394,9 @@ const struct ieee80211_ops mt7921_ops = {
.release_buffered_frames = mt76_release_buffered_frames,
.get_txpower = mt76_get_txpower,
.get_stats = mt7921_get_stats,
+ .get_et_sset_count = mt7921_get_et_sset_count,
+ .get_et_strings = mt7921_get_et_strings,
+ .get_et_stats = mt7921_get_et_stats,
.get_tsf = mt7921_get_tsf,
.set_tsf = mt7921_set_tsf,
.get_survey = mt76_get_survey,
@@ -1203,6 +1408,8 @@ const struct ieee80211_ops mt7921_ops = {
.sta_statistics = mt7921_sta_statistics,
.sched_scan_start = mt7921_start_sched_scan,
.sched_scan_stop = mt7921_stop_sched_scan,
+ CFG80211_TESTMODE_CMD(mt7921_testmode_cmd)
+ CFG80211_TESTMODE_DUMP(mt7921_testmode_dump)
#ifdef CONFIG_PM
.suspend = mt7921_suspend,
.resume = mt7921_resume,
@@ -1210,4 +1417,9 @@ const struct ieee80211_ops mt7921_ops = {
.set_rekey_data = mt7921_set_rekey_data,
#endif /* CONFIG_PM */
.flush = mt7921_flush,
+ .set_sar_specs = mt7921_set_sar_specs,
};
+EXPORT_SYMBOL_GPL(mt7921_ops);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
index 9fbaacc67cfa..6ada1ebe7d68 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
@@ -82,9 +82,17 @@ struct mt7921_fw_region {
#define FW_START_OVERRIDE BIT(0)
#define FW_START_WORKING_PDA_CR4 BIT(2)
+#define PATCH_SEC_NOT_SUPPORT GENMASK(31, 0)
#define PATCH_SEC_TYPE_MASK GENMASK(15, 0)
#define PATCH_SEC_TYPE_INFO 0x2
+#define PATCH_SEC_ENC_TYPE_MASK GENMASK(31, 24)
+#define PATCH_SEC_ENC_TYPE_PLAIN 0x00
+#define PATCH_SEC_ENC_TYPE_AES 0x01
+#define PATCH_SEC_ENC_TYPE_SCRAMBLE 0x02
+#define PATCH_SEC_ENC_SCRAMBLE_INFO_MASK GENMASK(15, 0)
+#define PATCH_SEC_ENC_AES_KEY_MASK GENMASK(7, 0)
+
#define to_wcid_lo(id) FIELD_GET(GENMASK(7, 0), (u16)id)
#define to_wcid_hi(id) FIELD_GET(GENMASK(9, 8), (u16)id)
@@ -152,11 +160,11 @@ mt7921_mcu_parse_eeprom(struct mt76_dev *dev, struct sk_buff *skb)
return 0;
}
-static int
-mt7921_mcu_parse_response(struct mt76_dev *mdev, int cmd,
- struct sk_buff *skb, int seq)
+int mt7921_mcu_parse_response(struct mt76_dev *mdev, int cmd,
+ struct sk_buff *skb, int seq)
{
struct mt7921_mcu_rxd *rxd;
+ int mcu_cmd = cmd & MCU_CMD_MASK;
int ret = 0;
if (!skb) {
@@ -194,6 +202,9 @@ mt7921_mcu_parse_response(struct mt76_dev *mdev, int cmd,
skb_pull(skb, sizeof(*rxd));
event = (struct mt7921_mcu_uni_event *)skb->data;
ret = le32_to_cpu(event->status);
+ /* skip invalid event */
+ if (mcu_cmd != event->cid)
+ ret = -EAGAIN;
break;
}
case MCU_CMD_REG_READ: {
@@ -211,14 +222,13 @@ mt7921_mcu_parse_response(struct mt76_dev *mdev, int cmd,
return ret;
}
+EXPORT_SYMBOL_GPL(mt7921_mcu_parse_response);
-static int
-mt7921_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
- int cmd, int *wait_seq)
+int mt7921_mcu_fill_message(struct mt76_dev *mdev, struct sk_buff *skb,
+ int cmd, int *wait_seq)
{
struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
int txd_len, mcu_cmd = cmd & MCU_CMD_MASK;
- enum mt76_mcuq_id txq = MT_MCUQ_WM;
struct mt7921_uni_txd *uni_txd;
struct mt7921_mcu_txd *mcu_txd;
__le32 *txd;
@@ -240,10 +250,8 @@ mt7921_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
if (!seq)
seq = ++dev->mt76.mcu.msg_seq & 0xf;
- if (cmd == MCU_CMD_FW_SCATTER) {
- txq = MT_MCUQ_FWDL;
+ if (cmd == MCU_CMD_FW_SCATTER)
goto exit;
- }
txd_len = cmd & MCU_UNI_PREFIX ? sizeof(*uni_txd) : sizeof(*mcu_txd);
txd = (__le32 *)skb_push(skb, txd_len);
@@ -307,96 +315,9 @@ exit:
if (wait_seq)
*wait_seq = seq;
- return mt76_tx_queue_skb_raw(dev, mdev->q_mcu[txq], skb, 0);
-}
-
-static void
-mt7921_mcu_tx_rate_parse(struct mt76_phy *mphy,
- struct mt7921_mcu_peer_cap *peer,
- struct rate_info *rate, u16 r)
-{
- struct ieee80211_supported_band *sband;
- u16 flags = 0;
- u8 txmode = FIELD_GET(MT_WTBL_RATE_TX_MODE, r);
- u8 gi = 0;
- u8 bw = 0;
-
- rate->mcs = FIELD_GET(MT_WTBL_RATE_MCS, r);
- rate->nss = FIELD_GET(MT_WTBL_RATE_NSS, r) + 1;
-
- switch (peer->bw) {
- case IEEE80211_STA_RX_BW_160:
- gi = peer->g16;
- break;
- case IEEE80211_STA_RX_BW_80:
- gi = peer->g8;
- break;
- case IEEE80211_STA_RX_BW_40:
- gi = peer->g4;
- break;
- default:
- gi = peer->g2;
- break;
- }
-
- gi = txmode >= MT_PHY_TYPE_HE_SU ?
- FIELD_GET(MT_WTBL_RATE_HE_GI, gi) :
- FIELD_GET(MT_WTBL_RATE_GI, gi);
-
- switch (txmode) {
- case MT_PHY_TYPE_CCK:
- case MT_PHY_TYPE_OFDM:
- if (mphy->chandef.chan->band == NL80211_BAND_5GHZ)
- sband = &mphy->sband_5g.sband;
- else
- sband = &mphy->sband_2g.sband;
-
- rate->legacy = sband->bitrates[rate->mcs].bitrate;
- break;
- case MT_PHY_TYPE_HT:
- case MT_PHY_TYPE_HT_GF:
- flags |= RATE_INFO_FLAGS_MCS;
-
- if (gi)
- flags |= RATE_INFO_FLAGS_SHORT_GI;
- break;
- case MT_PHY_TYPE_VHT:
- flags |= RATE_INFO_FLAGS_VHT_MCS;
-
- if (gi)
- flags |= RATE_INFO_FLAGS_SHORT_GI;
- break;
- case MT_PHY_TYPE_HE_SU:
- case MT_PHY_TYPE_HE_EXT_SU:
- case MT_PHY_TYPE_HE_TB:
- case MT_PHY_TYPE_HE_MU:
- rate->he_gi = gi;
- rate->he_dcm = FIELD_GET(MT_RA_RATE_DCM_EN, r);
-
- flags |= RATE_INFO_FLAGS_HE_MCS;
- break;
- default:
- break;
- }
- rate->flags = flags;
-
- bw = mt7921_mcu_chan_bw(&mphy->chandef) - FIELD_GET(MT_RA_RATE_BW, r);
-
- switch (bw) {
- case IEEE80211_STA_RX_BW_160:
- rate->bw = RATE_INFO_BW_160;
- break;
- case IEEE80211_STA_RX_BW_80:
- rate->bw = RATE_INFO_BW_80;
- break;
- case IEEE80211_STA_RX_BW_40:
- rate->bw = RATE_INFO_BW_40;
- break;
- default:
- rate->bw = RATE_INFO_BW_20;
- break;
- }
+ return 0;
}
+EXPORT_SYMBOL_GPL(mt7921_mcu_fill_message);
static void
mt7921_mcu_scan_event(struct mt7921_dev *dev, struct sk_buff *skb)
@@ -498,49 +419,6 @@ mt7921_mcu_low_power_event(struct mt7921_dev *dev, struct sk_buff *skb)
}
static void
-mt7921_mcu_tx_done_event(struct mt7921_dev *dev, struct sk_buff *skb)
-{
- struct mt7921_mcu_tx_done_event *event;
- struct mt7921_sta *msta;
- struct mt7921_phy *mphy = &dev->phy;
- struct mt7921_mcu_peer_cap peer;
- struct ieee80211_sta *sta;
- LIST_HEAD(list);
-
- skb_pull(skb, sizeof(struct mt7921_mcu_rxd));
- event = (struct mt7921_mcu_tx_done_event *)skb->data;
-
- spin_lock_bh(&dev->sta_poll_lock);
- list_splice_init(&mphy->stats_list, &list);
-
- while (!list_empty(&list)) {
- msta = list_first_entry(&list, struct mt7921_sta, stats_list);
- list_del_init(&msta->stats_list);
-
- if (msta->wcid.idx != event->wlan_idx)
- continue;
-
- spin_unlock_bh(&dev->sta_poll_lock);
-
- sta = wcid_to_sta(&msta->wcid);
-
- /* peer config based on IEEE SPEC */
- memset(&peer, 0x0, sizeof(peer));
- peer.bw = event->bw;
- peer.g2 = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20);
- peer.g4 = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40);
- peer.g8 = !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80);
- peer.g16 = !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_160);
- mt7921_mcu_tx_rate_parse(mphy->mt76, &peer,
- &msta->stats.tx_rate, event->tx_rate);
-
- spin_lock_bh(&dev->sta_poll_lock);
- break;
- }
- spin_unlock_bh(&dev->sta_poll_lock);
-}
-
-static void
mt7921_mcu_rx_unsolicited_event(struct mt7921_dev *dev, struct sk_buff *skb)
{
struct mt7921_mcu_rxd *rxd = (struct mt7921_mcu_rxd *)skb->data;
@@ -560,15 +438,13 @@ mt7921_mcu_rx_unsolicited_event(struct mt7921_dev *dev, struct sk_buff *skb)
mt7921_mcu_debug_msg_event(dev, skb);
break;
case MCU_EVENT_COREDUMP:
+ dev->fw_assert = true;
mt76_connac_mcu_coredump_event(&dev->mt76, skb,
&dev->coredump);
return;
case MCU_EVENT_LP_INFO:
mt7921_mcu_low_power_event(dev, skb);
break;
- case MCU_EVENT_TX_DONE:
- mt7921_mcu_tx_done_event(dev, skb);
- break;
default:
break;
}
@@ -577,7 +453,12 @@ mt7921_mcu_rx_unsolicited_event(struct mt7921_dev *dev, struct sk_buff *skb)
void mt7921_mcu_rx_event(struct mt7921_dev *dev, struct sk_buff *skb)
{
- struct mt7921_mcu_rxd *rxd = (struct mt7921_mcu_rxd *)skb->data;
+ struct mt7921_mcu_rxd *rxd;
+
+ if (skb_linearize(skb))
+ return;
+
+ rxd = (struct mt7921_mcu_rxd *)skb->data;
if (rxd->eid == 0x6) {
mt76_mcu_rx_event(&dev->mt76, skb);
@@ -619,7 +500,7 @@ mt7921_mcu_sta_key_tlv(struct mt7921_sta *msta, struct sk_buff *skb,
u8 cipher;
cipher = mt7921_mcu_get_cipher(key->cipher);
- if (cipher == MT_CIPHER_NONE)
+ if (cipher == MCU_CIPHER_NONE)
return -EOPNOTSUPP;
sec_key = &sec->key[0];
@@ -712,7 +593,7 @@ int mt7921_mcu_uni_rx_ba(struct mt7921_dev *dev,
enable, false);
}
-static int mt7921_mcu_restart(struct mt76_dev *dev)
+int mt7921_mcu_restart(struct mt76_dev *dev)
{
struct {
u8 power_mode;
@@ -724,26 +605,55 @@ static int mt7921_mcu_restart(struct mt76_dev *dev)
return mt76_mcu_send_msg(dev, MCU_CMD_NIC_POWER_CTRL, &req,
sizeof(req), false);
}
+EXPORT_SYMBOL_GPL(mt7921_mcu_restart);
-static int mt7921_driver_own(struct mt7921_dev *dev)
+static u32 mt7921_get_data_mode(struct mt7921_dev *dev, u32 info)
{
- u32 reg = mt7921_reg_map_l1(dev, MT_TOP_LPCR_HOST_BAND0);
+ u32 mode = DL_MODE_NEED_RSP;
- mt76_wr(dev, reg, MT_TOP_LPCR_HOST_DRV_OWN);
- if (!mt76_poll_msec(dev, reg, MT_TOP_LPCR_HOST_FW_OWN,
- 0, 500)) {
- dev_err(dev->mt76.dev, "Timeout for driver own\n");
- return -EIO;
+ if (info == PATCH_SEC_NOT_SUPPORT)
+ return mode;
+
+ switch (FIELD_GET(PATCH_SEC_ENC_TYPE_MASK, info)) {
+ case PATCH_SEC_ENC_TYPE_PLAIN:
+ break;
+ case PATCH_SEC_ENC_TYPE_AES:
+ mode |= DL_MODE_ENCRYPT;
+ mode |= FIELD_PREP(DL_MODE_KEY_IDX,
+ (info & PATCH_SEC_ENC_AES_KEY_MASK)) & DL_MODE_KEY_IDX;
+ mode |= DL_MODE_RESET_SEC_IV;
+ break;
+ case PATCH_SEC_ENC_TYPE_SCRAMBLE:
+ mode |= DL_MODE_ENCRYPT;
+ mode |= DL_CONFIG_ENCRY_MODE_SEL;
+ mode |= DL_MODE_RESET_SEC_IV;
+ break;
+ default:
+ dev_err(dev->mt76.dev, "Encryption type not support!\n");
}
- return 0;
+ return mode;
+}
+
+static char *mt7921_patch_name(struct mt7921_dev *dev)
+{
+ char *ret;
+
+ if (is_mt7922(&dev->mt76))
+ ret = MT7922_ROM_PATCH;
+ else
+ ret = MT7921_ROM_PATCH;
+
+ return ret;
}
static int mt7921_load_patch(struct mt7921_dev *dev)
{
const struct mt7921_patch_hdr *hdr;
const struct firmware *fw = NULL;
- int i, ret, sem;
+ int i, ret, sem, max_len;
+
+ max_len = mt76_is_sdio(&dev->mt76) ? 2048 : 4096;
sem = mt76_connac_mcu_patch_sem_ctrl(&dev->mt76, true);
switch (sem) {
@@ -756,7 +666,7 @@ static int mt7921_load_patch(struct mt7921_dev *dev)
return -EAGAIN;
}
- ret = request_firmware(&fw, MT7921_ROM_PATCH, dev->mt76.dev);
+ ret = request_firmware(&fw, mt7921_patch_name(dev), dev->mt76.dev);
if (ret)
goto out;
@@ -774,7 +684,8 @@ static int mt7921_load_patch(struct mt7921_dev *dev)
for (i = 0; i < be32_to_cpu(hdr->desc.n_region); i++) {
struct mt7921_patch_sec *sec;
const u8 *dl;
- u32 len, addr;
+ u32 len, addr, mode;
+ u32 sec_info = 0;
sec = (struct mt7921_patch_sec *)(fw->data + sizeof(*hdr) +
i * sizeof(*sec));
@@ -787,16 +698,18 @@ static int mt7921_load_patch(struct mt7921_dev *dev)
addr = be32_to_cpu(sec->info.addr);
len = be32_to_cpu(sec->info.len);
dl = fw->data + be32_to_cpu(sec->offs);
+ sec_info = be32_to_cpu(sec->info.sec_key_idx);
+ mode = mt7921_get_data_mode(dev, sec_info);
ret = mt76_connac_mcu_init_download(&dev->mt76, addr, len,
- DL_MODE_NEED_RSP);
+ mode);
if (ret) {
dev_err(dev->mt76.dev, "Download request failed\n");
goto out;
}
- ret = mt76_mcu_send_firmware(&dev->mt76, MCU_CMD_FW_SCATTER,
- dl, len);
+ ret = __mt76_mcu_send_firmware(&dev->mt76, MCU_CMD_FW_SCATTER,
+ dl, len, max_len);
if (ret) {
dev_err(dev->mt76.dev, "Failed to send patch\n");
goto out;
@@ -815,7 +728,7 @@ out:
default:
ret = -EAGAIN;
dev_err(dev->mt76.dev, "Failed to release patch semaphore\n");
- goto out;
+ break;
}
release_firmware(fw);
@@ -843,9 +756,11 @@ mt7921_mcu_send_ram_firmware(struct mt7921_dev *dev,
const struct mt7921_fw_trailer *hdr,
const u8 *data, bool is_wa)
{
- int i, offset = 0;
+ int i, offset = 0, max_len;
u32 override = 0, option = 0;
+ max_len = mt76_is_sdio(&dev->mt76) ? 2048 : 4096;
+
for (i = 0; i < hdr->n_region; i++) {
const struct mt7921_fw_region *region;
int err;
@@ -867,8 +782,8 @@ mt7921_mcu_send_ram_firmware(struct mt7921_dev *dev,
return err;
}
- err = mt76_mcu_send_firmware(&dev->mt76, MCU_CMD_FW_SCATTER,
- data + offset, len);
+ err = __mt76_mcu_send_firmware(&dev->mt76, MCU_CMD_FW_SCATTER,
+ data + offset, len, max_len);
if (err) {
dev_err(dev->mt76.dev, "Failed to send firmware.\n");
return err;
@@ -886,13 +801,25 @@ mt7921_mcu_send_ram_firmware(struct mt7921_dev *dev,
return mt76_connac_mcu_start_firmware(&dev->mt76, override, option);
}
+static char *mt7921_ram_name(struct mt7921_dev *dev)
+{
+ char *ret;
+
+ if (is_mt7922(&dev->mt76))
+ ret = MT7922_FIRMWARE_WM;
+ else
+ ret = MT7921_FIRMWARE_WM;
+
+ return ret;
+}
+
static int mt7921_load_ram(struct mt7921_dev *dev)
{
const struct mt7921_fw_trailer *hdr;
const struct firmware *fw;
int ret;
- ret = request_firmware(&fw, MT7921_FIRMWARE_WM, dev->mt76.dev);
+ ret = request_firmware(&fw, mt7921_ram_name(dev), dev->mt76.dev);
if (ret)
return ret;
@@ -929,7 +856,7 @@ static int mt7921_load_firmware(struct mt7921_dev *dev)
int ret;
ret = mt76_get_field(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_N9_RDY);
- if (ret) {
+ if (ret && mt76_is_mmio(&dev->mt76)) {
dev_dbg(dev->mt76.dev, "Firmware is already download\n");
goto fw_loaded;
}
@@ -950,7 +877,6 @@ static int mt7921_load_firmware(struct mt7921_dev *dev)
}
fw_loaded:
- mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_FWDL], false);
#ifdef CONFIG_PM
dev->mt76.hw->wiphy->wowlan = &mt76_connac_wowlan_support;
@@ -978,39 +904,24 @@ int mt7921_run_firmware(struct mt7921_dev *dev)
{
int err;
- err = mt7921_driver_own(dev);
+ err = mt7921_load_firmware(dev);
if (err)
return err;
- err = mt7921_load_firmware(dev);
+ err = mt76_connac_mcu_get_nic_capability(&dev->mphy);
if (err)
return err;
set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
- mt7921_mcu_fw_log_2_host(dev, 1);
-
- return mt76_connac_mcu_get_nic_capability(&dev->mphy);
-}
-
-int mt7921_mcu_init(struct mt7921_dev *dev)
-{
- static const struct mt76_mcu_ops mt7921_mcu_ops = {
- .headroom = sizeof(struct mt7921_mcu_txd),
- .mcu_skb_send_msg = mt7921_mcu_send_message,
- .mcu_parse_response = mt7921_mcu_parse_response,
- .mcu_restart = mt7921_mcu_restart,
- };
-
- dev->mt76.mcu_ops = &mt7921_mcu_ops;
-
- return mt7921_run_firmware(dev);
+ return mt7921_mcu_fw_log_2_host(dev, 1);
}
+EXPORT_SYMBOL_GPL(mt7921_run_firmware);
void mt7921_mcu_exit(struct mt7921_dev *dev)
{
- mt7921_wfsys_reset(dev);
skb_queue_purge(&dev->mt76.mcu.res_q);
}
+EXPORT_SYMBOL_GPL(mt7921_mcu_exit);
int mt7921_mcu_set_tx(struct mt7921_dev *dev, struct ieee80211_vif *vif)
{
@@ -1041,7 +952,30 @@ int mt7921_mcu_set_tx(struct mt7921_dev *dev, struct ieee80211_vif *vif)
.total = IEEE80211_NUM_ACS,
};
struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
- int ac;
+ struct mu_edca {
+ u8 cw_min;
+ u8 cw_max;
+ u8 aifsn;
+ u8 acm;
+ u8 timer;
+ u8 padding[3];
+ };
+ struct mt7921_mcu_mu_tx {
+ u8 ver;
+ u8 pad0;
+ __le16 len;
+ u8 bss_idx;
+ u8 qos;
+ u8 wmm_idx;
+ u8 pad1;
+ struct mu_edca edca[IEEE80211_NUM_ACS];
+ u8 pad3[32];
+ } __packed req_mu = {
+ .bss_idx = mvif->mt76.idx,
+ .qos = vif->bss_conf.qos,
+ .wmm_idx = mvif->mt76.wmm_idx,
+ };
+ int ac, ret;
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
struct ieee80211_tx_queue_params *q = &mvif->queue_params[ac];
@@ -1062,8 +996,34 @@ int mt7921_mcu_set_tx(struct mt7921_dev *dev, struct ieee80211_vif *vif)
else
e->cw_max = cpu_to_le16(10);
}
- return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_EDCA_UPDATE, &req,
- sizeof(req), true);
+
+ ret = mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_EDCA_UPDATE, &req,
+ sizeof(req), true);
+ if (ret)
+ return ret;
+
+ if (!vif->bss_conf.he_support)
+ return 0;
+
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
+ struct ieee80211_he_mu_edca_param_ac_rec *q;
+ struct mu_edca *e;
+ int to_aci[] = {1, 0, 2, 3};
+
+ if (!mvif->queue_params[ac].mu_edca)
+ break;
+
+ q = &mvif->queue_params[ac].mu_edca_param_rec;
+ e = &(req_mu.edca[to_aci[ac]]);
+
+ e->cw_min = q->ecw_min_max & 0xf;
+ e->cw_max = (q->ecw_min_max & 0xf0) >> 4;
+ e->aifsn = q->aifsn;
+ e->timer = q->mu_edca_timer;
+ }
+
+ return mt76_mcu_send_msg(&dev->mt76, MCU_CMD_SET_MU_EDCA_PARMS, &req_mu,
+ sizeof(req_mu), false);
}
int mt7921_mcu_set_chan_info(struct mt7921_phy *phy, int cmd)
@@ -1095,9 +1055,13 @@ int mt7921_mcu_set_chan_info(struct mt7921_phy *phy, int cmd)
.tx_streams_num = hweight8(phy->mt76->antenna_mask),
.rx_streams = phy->mt76->antenna_mask,
.band_idx = phy != &dev->phy,
- .channel_band = chandef->chan->band,
};
+ if (chandef->chan->band == NL80211_BAND_6GHZ)
+ req.channel_band = 2;
+ else
+ req.channel_band = chandef->chan->band;
+
if (dev->mt76.hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)
req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD;
else if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) &&
@@ -1132,6 +1096,7 @@ int mt7921_mcu_set_eeprom(struct mt7921_dev *dev)
return mt76_mcu_send_msg(&dev->mt76, MCU_EXT_CMD_EFUSE_BUFFER_MODE,
&req, sizeof(req), true);
}
+EXPORT_SYMBOL_GPL(mt7921_mcu_set_eeprom);
int mt7921_mcu_get_eeprom(struct mt7921_dev *dev, u32 offset)
{
@@ -1193,8 +1158,9 @@ int mt7921_mcu_uni_bss_ps(struct mt7921_dev *dev, struct ieee80211_vif *vif)
&ps_req, sizeof(ps_req), true);
}
-int mt7921_mcu_uni_bss_bcnft(struct mt7921_dev *dev, struct ieee80211_vif *vif,
- bool enable)
+static int
+mt7921_mcu_uni_bss_bcnft(struct mt7921_dev *dev, struct ieee80211_vif *vif,
+ bool enable)
{
struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
struct {
@@ -1228,8 +1194,9 @@ int mt7921_mcu_uni_bss_bcnft(struct mt7921_dev *dev, struct ieee80211_vif *vif,
&bcnft_req, sizeof(bcnft_req), true);
}
-int mt7921_mcu_set_bss_pm(struct mt7921_dev *dev, struct ieee80211_vif *vif,
- bool enable)
+static int
+mt7921_mcu_set_bss_pm(struct mt7921_dev *dev, struct ieee80211_vif *vif,
+ bool enable)
{
struct mt7921_vif *mvif = (struct mt7921_vif *)vif->drv_priv;
struct {
@@ -1292,35 +1259,6 @@ int mt7921_mcu_sta_update(struct mt7921_dev *dev, struct ieee80211_sta *sta,
return mt76_connac_mcu_sta_cmd(&dev->mphy, &info);
}
-int __mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev)
-{
- struct mt76_phy *mphy = &dev->mt76.phy;
- struct mt76_connac_pm *pm = &dev->pm;
- int i, err = 0;
-
- for (i = 0; i < MT7921_DRV_OWN_RETRY_COUNT; i++) {
- mt76_wr(dev, MT_CONN_ON_LPCTL, PCIE_LPCR_HOST_CLR_OWN);
- if (mt76_poll_msec(dev, MT_CONN_ON_LPCTL,
- PCIE_LPCR_HOST_OWN_SYNC, 0, 50))
- break;
- }
-
- if (i == MT7921_DRV_OWN_RETRY_COUNT) {
- dev_err(dev->mt76.dev, "driver own failed\n");
- err = -EIO;
- goto out;
- }
-
- mt7921_wpdma_reinit_cond(dev);
- clear_bit(MT76_STATE_PM, &mphy->state);
-
- pm->stats.last_wake_event = jiffies;
- pm->stats.doze_time += pm->stats.last_wake_event -
- pm->stats.last_doze_event;
-out:
- return err;
-}
-
int mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev)
{
struct mt76_phy *mphy = &dev->mt76.phy;
@@ -1341,34 +1279,20 @@ out:
return err;
}
+EXPORT_SYMBOL_GPL(mt7921_mcu_drv_pmctrl);
int mt7921_mcu_fw_pmctrl(struct mt7921_dev *dev)
{
struct mt76_phy *mphy = &dev->mt76.phy;
struct mt76_connac_pm *pm = &dev->pm;
- int i, err = 0;
+ int err = 0;
mutex_lock(&pm->mutex);
if (mt76_connac_skip_fw_pmctrl(mphy, pm))
goto out;
- for (i = 0; i < MT7921_DRV_OWN_RETRY_COUNT; i++) {
- mt76_wr(dev, MT_CONN_ON_LPCTL, PCIE_LPCR_HOST_SET_OWN);
- if (mt76_poll_msec(dev, MT_CONN_ON_LPCTL,
- PCIE_LPCR_HOST_OWN_SYNC, 4, 50))
- break;
- }
-
- if (i == MT7921_DRV_OWN_RETRY_COUNT) {
- dev_err(dev->mt76.dev, "firmware own failed\n");
- clear_bit(MT76_STATE_PM, &mphy->state);
- err = -EIO;
- }
-
- pm->stats.last_doze_event = jiffies;
- pm->stats.awake_time += pm->stats.last_doze_event -
- pm->stats.last_wake_event;
+ err = __mt7921_mcu_fw_pmctrl(dev);
out:
mutex_unlock(&pm->mutex);
@@ -1377,32 +1301,36 @@ out:
return err;
}
+EXPORT_SYMBOL_GPL(mt7921_mcu_fw_pmctrl);
-void
-mt7921_pm_interface_iter(void *priv, u8 *mac, struct ieee80211_vif *vif)
+int mt7921_mcu_set_beacon_filter(struct mt7921_dev *dev,
+ struct ieee80211_vif *vif,
+ bool enable)
{
- struct mt7921_phy *phy = priv;
- struct mt7921_dev *dev = phy->dev;
struct ieee80211_hw *hw = mt76_hw(dev);
- int ret;
-
- if (dev->pm.enable)
- ret = mt7921_mcu_uni_bss_bcnft(dev, vif, true);
- else
- ret = mt7921_mcu_set_bss_pm(dev, vif, false);
+ int err;
- if (ret)
- return;
+ if (enable) {
+ err = mt7921_mcu_uni_bss_bcnft(dev, vif, true);
+ if (err)
+ return err;
- if (dev->pm.enable) {
vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER;
ieee80211_hw_set(hw, CONNECTION_MONITOR);
mt76_set(dev, MT_WF_RFCR(0), MT_WF_RFCR_DROP_OTHER_BEACON);
- } else {
- vif->driver_flags &= ~IEEE80211_VIF_BEACON_FILTER;
- __clear_bit(IEEE80211_HW_CONNECTION_MONITOR, hw->flags);
- mt76_clear(dev, MT_WF_RFCR(0), MT_WF_RFCR_DROP_OTHER_BEACON);
+
+ return 0;
}
+
+ err = mt7921_mcu_set_bss_pm(dev, vif, false);
+ if (err)
+ return err;
+
+ vif->driver_flags &= ~IEEE80211_VIF_BEACON_FILTER;
+ __clear_bit(IEEE80211_HW_CONNECTION_MONITOR, hw->flags);
+ mt76_clear(dev, MT_WF_RFCR(0), MT_WF_RFCR_DROP_OTHER_BEACON);
+
+ return 0;
}
int mt7921_get_txpwr_info(struct mt7921_dev *dev, struct mt7921_txpwr *txpwr)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h
index de3c091f6736..edc0c73f8c01 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.h
@@ -259,25 +259,6 @@ struct mt7921_mcu_ant_id_config {
u8 ant_id[4];
} __packed;
-struct mt7921_mcu_peer_cap {
- struct mt7921_mcu_ant_id_config ant_id_config;
-
- u8 power_offset;
- u8 bw_selector;
- u8 change_bw_rate_n;
- u8 bw;
- u8 spe_idx;
-
- u8 g2;
- u8 g4;
- u8 g8;
- u8 g16;
-
- u8 mmss;
- u8 ampdu_factor;
- u8 rsv[1];
-} __packed;
-
struct mt7921_txpwr_req {
u8 ver;
u8 action;
@@ -293,31 +274,29 @@ struct mt7921_txpwr_event {
struct mt7921_txpwr txpwr;
} __packed;
-struct mt7921_mcu_tx_done_event {
- u8 pid;
- u8 status;
- u16 seq;
-
- u8 wlan_idx;
- u8 tx_cnt;
- u16 tx_rate;
-
- u8 flag;
- u8 tid;
- u8 rsp_rate;
- u8 mcs;
-
- u8 bw;
- u8 tx_pwr;
- u8 reason;
- u8 rsv0[1];
+enum {
+ TM_SWITCH_MODE,
+ TM_SET_AT_CMD,
+ TM_QUERY_AT_CMD,
+};
- u32 delay;
- u32 timestamp;
- u32 applied_flag;
+enum {
+ MT7921_TM_NORMAL,
+ MT7921_TM_TESTMODE,
+ MT7921_TM_ICAP,
+ MT7921_TM_ICAP_OVERLAP,
+ MT7921_TM_WIFISPECTRUM,
+};
- u8 txs[28];
+struct mt7921_rftest_cmd {
+ u8 action;
+ u8 rsv[3];
+ __le32 param0;
+ __le32 param1;
+} __packed;
- u8 rsv1[32];
+struct mt7921_rftest_evt {
+ __le32 param0;
+ __le32 param1;
} __packed;
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
index 2d8bd6bfc820..e9c7c3a19507 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mt7921.h
@@ -29,22 +29,47 @@
#define MT7921_RX_MCU_RING_SIZE 512
#define MT7921_DRV_OWN_RETRY_COUNT 10
+#define MT7921_MCU_INIT_RETRY_COUNT 10
#define MT7921_FIRMWARE_WM "mediatek/WIFI_RAM_CODE_MT7961_1.bin"
#define MT7921_ROM_PATCH "mediatek/WIFI_MT7961_patch_mcu_1_2_hdr.bin"
+#define MT7922_FIRMWARE_WM "mediatek/WIFI_RAM_CODE_MT7922_1.bin"
+#define MT7922_ROM_PATCH "mediatek/WIFI_MT7922_patch_mcu_1_1_hdr.bin"
+
#define MT7921_EEPROM_SIZE 3584
#define MT7921_TOKEN_SIZE 8192
#define MT7921_CFEND_RATE_DEFAULT 0x49 /* OFDM 24M */
#define MT7921_CFEND_RATE_11B 0x03 /* 11B LP, 11M */
-#define MT7921_5G_RATE_DEFAULT 0x4b /* OFDM 6M */
-#define MT7921_2G_RATE_DEFAULT 0x0 /* CCK 1M */
#define MT7921_SKU_RATE_NUM 161
#define MT7921_SKU_MAX_DELTA_IDX MT7921_SKU_RATE_NUM
#define MT7921_SKU_TABLE_SIZE (MT7921_SKU_RATE_NUM + 1)
+#define MT7921_SDIO_HDR_TX_BYTES GENMASK(15, 0)
+#define MT7921_SDIO_HDR_PKT_TYPE GENMASK(17, 16)
+
+enum mt7921_sdio_pkt_type {
+ MT7921_SDIO_TXD,
+ MT7921_SDIO_DATA,
+ MT7921_SDIO_CMD,
+ MT7921_SDIO_FWDL,
+};
+
+struct mt7921_sdio_intr {
+ u32 isr;
+ struct {
+ u32 wtqcr[16];
+ } tx;
+ struct {
+ u16 num[2];
+ u16 len0[16];
+ u16 len1[128];
+ } rx;
+ u32 rec_mb[2];
+} __packed;
+
#define to_rssi(field, rxv) ((FIELD_GET(field, rxv) - 220) / 2)
#define to_rcpi(rssi) (2 * (rssi) + 220)
@@ -64,15 +89,6 @@ enum mt7921_rxq_id {
MT7921_RXQ_MCU_WM = 0,
};
-struct mt7921_sta_stats {
- struct rate_info prob_rate;
- struct rate_info tx_rate;
-
- unsigned long per;
- unsigned long changed;
- unsigned long jiffies;
-};
-
struct mt7921_sta_key_conf {
s8 keyidx;
u8 key[16];
@@ -83,17 +99,14 @@ struct mt7921_sta {
struct mt7921_vif *vif;
- struct list_head stats_list;
struct list_head poll_list;
u32 airtime_ac[8];
- struct mt7921_sta_stats stats;
-
+ unsigned long last_txs;
unsigned long ampdu_state;
+ struct mt76_sta_stats stats;
struct mt7921_sta_key_conf bip;
-
- unsigned long next_txs_ts;
};
DECLARE_EWMA(rssi, 10, 8);
@@ -117,15 +130,34 @@ struct mib_stats {
u32 rts_cnt;
u32 rts_retries_cnt;
u32 ba_miss_cnt;
+
+ u32 tx_bf_ibf_ppdu_cnt;
+ u32 tx_bf_ebf_ppdu_cnt;
+ u32 tx_bf_rx_fb_all_cnt;
+ u32 tx_bf_rx_fb_he_cnt;
+ u32 tx_bf_rx_fb_vht_cnt;
+ u32 tx_bf_rx_fb_ht_cnt;
+
+ u32 tx_ampdu_cnt;
+ u32 tx_mpdu_attempts_cnt;
+ u32 tx_mpdu_success_cnt;
+ u32 tx_pkt_ebf_cnt;
+ u32 tx_pkt_ibf_cnt;
+
+ u32 rx_mpdu_cnt;
+ u32 rx_ampdu_cnt;
+ u32 rx_ampdu_bytes_cnt;
+ u32 rx_ba_cnt;
+
+ u32 tx_amsdu[8];
+ u32 tx_amsdu_cnt;
};
struct mt7921_phy {
struct mt76_phy *mt76;
struct mt7921_dev *dev;
- struct ieee80211_sband_iftype_data iftype[2][NUM_NL80211_IFTYPES];
-
- struct ieee80211_vif *monitor_vif;
+ struct ieee80211_sband_iftype_data iftype[NUM_NL80211_BANDS][NUM_NL80211_IFTYPES];
u32 rxfilter;
u64 omac_mask;
@@ -139,7 +171,6 @@ struct mt7921_phy {
u32 ampdu_ref;
struct mib_stats mib;
- struct list_head stats_list;
u8 sta_work_count;
@@ -147,6 +178,19 @@ struct mt7921_phy {
struct delayed_work scan_work;
};
+#define mt7921_init_reset(dev) ((dev)->hif_ops->init_reset(dev))
+#define mt7921_dev_reset(dev) ((dev)->hif_ops->reset(dev))
+#define mt7921_mcu_init(dev) ((dev)->hif_ops->mcu_init(dev))
+#define __mt7921_mcu_drv_pmctrl(dev) ((dev)->hif_ops->drv_own(dev))
+#define __mt7921_mcu_fw_pmctrl(dev) ((dev)->hif_ops->fw_own(dev))
+struct mt7921_hif_ops {
+ int (*init_reset)(struct mt7921_dev *dev);
+ int (*reset)(struct mt7921_dev *dev);
+ int (*mcu_init)(struct mt7921_dev *dev);
+ int (*drv_own)(struct mt7921_dev *dev);
+ int (*fw_own)(struct mt7921_dev *dev);
+};
+
struct mt7921_dev {
union { /* must be first */
struct mt76_dev mt76;
@@ -157,11 +201,10 @@ struct mt7921_dev {
struct mt7921_phy phy;
struct tasklet_struct irq_tasklet;
- u16 chainmask;
-
struct work_struct reset_work;
bool hw_full_reset:1;
bool hw_init_done:1;
+ bool fw_assert:1;
struct list_head sta_poll_list;
spinlock_t sta_poll_lock;
@@ -170,6 +213,7 @@ struct mt7921_dev {
struct mt76_connac_pm pm;
struct mt76_connac_coredump coredump;
+ const struct mt7921_hif_ops *hif_ops;
};
enum {
@@ -247,18 +291,11 @@ u32 mt7921_reg_map(struct mt7921_dev *dev, u32 addr);
int __mt7921_start(struct mt7921_phy *phy);
int mt7921_register_device(struct mt7921_dev *dev);
void mt7921_unregister_device(struct mt7921_dev *dev);
-int mt7921_eeprom_init(struct mt7921_dev *dev);
-void mt7921_eeprom_parse_band_config(struct mt7921_phy *phy);
-int mt7921_eeprom_get_target_power(struct mt7921_dev *dev,
- struct ieee80211_channel *chan,
- u8 chain_idx);
-void mt7921_eeprom_init_sku(struct mt7921_dev *dev);
int mt7921_dma_init(struct mt7921_dev *dev);
int mt7921_wpdma_reset(struct mt7921_dev *dev, bool force);
int mt7921_wpdma_reinit_cond(struct mt7921_dev *dev);
void mt7921_dma_cleanup(struct mt7921_dev *dev);
int mt7921_run_firmware(struct mt7921_dev *dev);
-int mt7921_mcu_init(struct mt7921_dev *dev);
int mt7921_mcu_add_key(struct mt7921_dev *dev, struct ieee80211_vif *vif,
struct mt7921_sta *msta, struct ieee80211_key_conf *key,
enum set_key_cmd cmd);
@@ -324,16 +361,27 @@ static inline bool mt7921_dma_need_reinit(struct mt7921_dev *dev)
return !mt76_get_field(dev, MT_WFDMA_DUMMY_CR, MT_WFDMA_NEED_REINIT);
}
+static inline void mt7921_mcu_tx_cleanup(struct mt7921_dev *dev)
+{
+ mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WM], false);
+ mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_WA], false);
+}
+
+static inline void mt7921_skb_add_sdio_hdr(struct sk_buff *skb,
+ enum mt7921_sdio_pkt_type type)
+{
+ u32 hdr;
+
+ hdr = FIELD_PREP(MT7921_SDIO_HDR_TX_BYTES, skb->len + sizeof(hdr)) |
+ FIELD_PREP(MT7921_SDIO_HDR_PKT_TYPE, type);
+
+ put_unaligned_le32(hdr, skb_push(skb, sizeof(hdr)));
+}
+
int mt7921_mac_init(struct mt7921_dev *dev);
bool mt7921_mac_wtbl_update(struct mt7921_dev *dev, int idx, u32 mask);
void mt7921_mac_reset_counters(struct mt7921_phy *phy);
-void mt7921_mac_write_txwi(struct mt7921_dev *dev, __le32 *txwi,
- struct sk_buff *skb, struct mt76_wcid *wcid,
- struct ieee80211_key_conf *key, bool beacon);
void mt7921_mac_set_timing(struct mt7921_phy *phy);
-int mt7921_mac_fill_rx(struct mt7921_dev *dev, struct sk_buff *skb);
-void mt7921_mac_fill_rx_vector(struct mt7921_dev *dev, struct sk_buff *skb);
-void mt7921_mac_tx_free(struct mt7921_dev *dev, struct sk_buff *skb);
int mt7921_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
void mt7921_mac_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif,
@@ -342,27 +390,28 @@ void mt7921_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif,
struct ieee80211_sta *sta);
void mt7921_mac_work(struct work_struct *work);
void mt7921_mac_reset_work(struct work_struct *work);
+void mt7921_mac_update_mib_stats(struct mt7921_phy *phy);
void mt7921_reset(struct mt76_dev *mdev);
-void mt7921_tx_cleanup(struct mt7921_dev *dev);
-int mt7921_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
- enum mt76_txq_id qid, struct mt76_wcid *wcid,
- struct ieee80211_sta *sta,
- struct mt76_tx_info *tx_info);
+int mt7921e_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+ enum mt76_txq_id qid, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta,
+ struct mt76_tx_info *tx_info);
void mt7921_tx_worker(struct mt76_worker *w);
-void mt7921_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e);
+void mt7921e_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e);
int mt7921_init_tx_queues(struct mt7921_phy *phy, int idx, int n_desc);
void mt7921_tx_token_put(struct mt7921_dev *dev);
void mt7921_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
struct sk_buff *skb);
void mt7921_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps);
void mt7921_stats_work(struct work_struct *work);
-void mt7921_txp_skb_unmap(struct mt76_dev *dev,
- struct mt76_txwi_cache *txwi);
void mt7921_set_stream_he_caps(struct mt7921_phy *phy);
void mt7921_update_channel(struct mt76_phy *mphy);
int mt7921_init_debugfs(struct mt7921_dev *dev);
+int mt7921_mcu_set_beacon_filter(struct mt7921_dev *dev,
+ struct ieee80211_vif *vif,
+ bool enable);
int mt7921_mcu_uni_tx_ba(struct mt7921_dev *dev,
struct ieee80211_ampdu_params *params,
bool enable);
@@ -371,21 +420,47 @@ int mt7921_mcu_uni_rx_ba(struct mt7921_dev *dev,
bool enable);
void mt7921_scan_work(struct work_struct *work);
int mt7921_mcu_uni_bss_ps(struct mt7921_dev *dev, struct ieee80211_vif *vif);
-int mt7921_mcu_uni_bss_bcnft(struct mt7921_dev *dev, struct ieee80211_vif *vif,
- bool enable);
-int mt7921_mcu_set_bss_pm(struct mt7921_dev *dev, struct ieee80211_vif *vif,
- bool enable);
-int __mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev);
int mt7921_mcu_drv_pmctrl(struct mt7921_dev *dev);
int mt7921_mcu_fw_pmctrl(struct mt7921_dev *dev);
void mt7921_pm_wake_work(struct work_struct *work);
void mt7921_pm_power_save_work(struct work_struct *work);
bool mt7921_wait_for_mcu_init(struct mt7921_dev *dev);
-int mt7921_mac_set_beacon_filter(struct mt7921_phy *phy,
- struct ieee80211_vif *vif,
- bool enable);
-void mt7921_pm_interface_iter(void *priv, u8 *mac, struct ieee80211_vif *vif);
void mt7921_coredump_work(struct work_struct *work);
int mt7921_wfsys_reset(struct mt7921_dev *dev);
int mt7921_get_txpwr_info(struct mt7921_dev *dev, struct mt7921_txpwr *txpwr);
+int mt7921_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ void *data, int len);
+int mt7921_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg,
+ struct netlink_callback *cb, void *data, int len);
+void mt7921_mac_write_txwi(struct mt7921_dev *dev, __le32 *txwi,
+ struct sk_buff *skb, struct mt76_wcid *wcid,
+ struct ieee80211_key_conf *key, int pid,
+ bool beacon);
+void mt7921_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi);
+void mt7921_mac_sta_poll(struct mt7921_dev *dev);
+int mt7921_mcu_fill_message(struct mt76_dev *mdev, struct sk_buff *skb,
+ int cmd, int *wait_seq);
+int mt7921_mcu_parse_response(struct mt76_dev *mdev, int cmd,
+ struct sk_buff *skb, int seq);
+int mt7921_mcu_restart(struct mt76_dev *dev);
+
+void mt7921e_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+ struct sk_buff *skb);
+int mt7921e_mac_reset(struct mt7921_dev *dev);
+int mt7921e_mcu_init(struct mt7921_dev *dev);
+int mt7921s_wfsys_reset(struct mt7921_dev *dev);
+int mt7921s_mac_reset(struct mt7921_dev *dev);
+int mt7921s_init_reset(struct mt7921_dev *dev);
+int mt7921e_mcu_drv_pmctrl(struct mt7921_dev *dev);
+int mt7921e_mcu_fw_pmctrl(struct mt7921_dev *dev);
+
+int mt7921s_mcu_init(struct mt7921_dev *dev);
+int mt7921s_mcu_drv_pmctrl(struct mt7921_dev *dev);
+int mt7921s_mcu_fw_pmctrl(struct mt7921_dev *dev);
+int mt7921s_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+ enum mt76_txq_id qid, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta,
+ struct mt76_tx_info *tx_info);
+void mt7921s_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e);
+bool mt7921s_tx_status_data(struct mt76_dev *mdev, u8 *update);
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
index c3905bcab360..305b63fa1a8a 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c
@@ -14,9 +14,14 @@
static const struct pci_device_id mt7921_pci_device_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7961) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MEDIATEK, 0x7922) },
{ },
};
+static bool mt7921_disable_aspm;
+module_param_named(disable_aspm, mt7921_disable_aspm, bool, 0644);
+MODULE_PARM_DESC(disable_aspm, "disable PCI ASPM support");
+
static void
mt7921_rx_poll_complete(struct mt76_dev *mdev, enum mt76_rxq_id q)
{
@@ -88,21 +93,46 @@ static void mt7921_irq_tasklet(unsigned long data)
napi_schedule(&dev->mt76.napi[MT_RXQ_MAIN]);
}
+static int mt7921e_init_reset(struct mt7921_dev *dev)
+{
+ return mt7921_wpdma_reset(dev, true);
+}
+
+static void mt7921e_unregister_device(struct mt7921_dev *dev)
+{
+ int i;
+ struct mt76_connac_pm *pm = &dev->pm;
+
+ mt76_unregister_device(&dev->mt76);
+ mt76_for_each_q_rx(&dev->mt76, i)
+ napi_disable(&dev->mt76.napi[i]);
+ cancel_delayed_work_sync(&pm->ps_work);
+ cancel_work_sync(&pm->wake_work);
+
+ mt7921_tx_token_put(dev);
+ mt7921_mcu_drv_pmctrl(dev);
+ mt7921_dma_cleanup(dev);
+ mt7921_wfsys_reset(dev);
+ mt7921_mcu_exit(dev);
+
+ tasklet_disable(&dev->irq_tasklet);
+ mt76_free_device(&dev->mt76);
+}
+
static int mt7921_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
static const struct mt76_driver_ops drv_ops = {
/* txwi_size = txd size + txp size */
.txwi_size = MT_TXD_SIZE + sizeof(struct mt7921_txp_common),
- .drv_flags = MT_DRV_TXWI_NO_FREE | MT_DRV_HW_MGMT_TXQ |
- MT_DRV_AMSDU_OFFLOAD,
+ .drv_flags = MT_DRV_TXWI_NO_FREE | MT_DRV_HW_MGMT_TXQ,
.survey_flags = SURVEY_INFO_TIME_TX |
SURVEY_INFO_TIME_RX |
SURVEY_INFO_TIME_BSS_RX,
.token_size = MT7921_TOKEN_SIZE,
- .tx_prepare_skb = mt7921_tx_prepare_skb,
- .tx_complete_skb = mt7921_tx_complete_skb,
- .rx_skb = mt7921_queue_rx_skb,
+ .tx_prepare_skb = mt7921e_tx_prepare_skb,
+ .tx_complete_skb = mt7921e_tx_complete_skb,
+ .rx_skb = mt7921e_queue_rx_skb,
.rx_poll_complete = mt7921_rx_poll_complete,
.sta_ps = mt7921_sta_ps,
.sta_add = mt7921_mac_sta_add,
@@ -110,6 +140,15 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
.sta_remove = mt7921_mac_sta_remove,
.update_survey = mt7921_update_channel,
};
+
+ static const struct mt7921_hif_ops mt7921_pcie_ops = {
+ .init_reset = mt7921e_init_reset,
+ .reset = mt7921e_mac_reset,
+ .mcu_init = mt7921e_mcu_init,
+ .drv_own = mt7921e_mcu_drv_pmctrl,
+ .fw_own = mt7921e_mcu_fw_pmctrl,
+ };
+
struct mt7921_dev *dev;
struct mt76_dev *mdev;
int ret;
@@ -128,11 +167,12 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
if (ret < 0)
return ret;
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (ret)
goto err_free_pci_vec;
- mt76_pci_disable_aspm(pdev);
+ if (mt7921_disable_aspm)
+ mt76_pci_disable_aspm(pdev);
mdev = mt76_alloc_device(&pdev->dev, sizeof(*dev), &mt7921_ops,
&drv_ops);
@@ -142,6 +182,7 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
}
dev = container_of(mdev, struct mt7921_dev, mt76);
+ dev->hif_ops = &mt7921_pcie_ops;
mt76_mmio_init(&dev->mt76, pcim_iomap_table(pdev)[0]);
tasklet_init(&dev->irq_tasklet, mt7921_irq_tasklet, (unsigned long)dev);
@@ -158,6 +199,10 @@ static int mt7921_pci_probe(struct pci_dev *pdev,
if (ret)
goto err_free_dev;
+ ret = mt7921_dma_init(dev);
+ if (ret)
+ goto err_free_irq;
+
ret = mt7921_register_device(dev);
if (ret)
goto err_free_irq;
@@ -179,7 +224,7 @@ static void mt7921_pci_remove(struct pci_dev *pdev)
struct mt76_dev *mdev = pci_get_drvdata(pdev);
struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
- mt7921_unregister_device(dev);
+ mt7921e_unregister_device(dev);
devm_free_irq(&pdev->dev, pdev->irq, dev);
pci_free_irq_vectors(pdev);
}
@@ -297,12 +342,15 @@ static int mt7921_pci_resume(struct pci_dev *pdev)
MT_WFDMA0_GLO_CFG_TX_DMA_EN | MT_WFDMA0_GLO_CFG_RX_DMA_EN);
mt76_worker_enable(&mdev->tx_worker);
+
+ local_bh_disable();
mt76_for_each_q_rx(mdev, i) {
napi_enable(&mdev->napi[i]);
napi_schedule(&mdev->napi[i]);
}
napi_enable(&mdev->tx_napi);
napi_schedule(&mdev->tx_napi);
+ local_bh_enable();
/* restore previous ds setting */
if (!pm->ds_enable)
@@ -331,6 +379,8 @@ module_pci_driver(mt7921_pci_driver);
MODULE_DEVICE_TABLE(pci, mt7921_pci_device_table);
MODULE_FIRMWARE(MT7921_FIRMWARE_WM);
MODULE_FIRMWARE(MT7921_ROM_PATCH);
+MODULE_FIRMWARE(MT7922_FIRMWARE_WM);
+MODULE_FIRMWARE(MT7922_ROM_PATCH);
MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
MODULE_AUTHOR("Lorenzo Bianconi <lorenzo@kernel.org>");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c
new file mode 100644
index 000000000000..f9547d27356e
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c
@@ -0,0 +1,348 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2021 MediaTek Inc. */
+
+#include "mt7921.h"
+#include "../dma.h"
+#include "mac.h"
+
+static void
+mt7921_write_hw_txp(struct mt7921_dev *dev, struct mt76_tx_info *tx_info,
+ void *txp_ptr, u32 id)
+{
+ struct mt7921_hw_txp *txp = txp_ptr;
+ struct mt7921_txp_ptr *ptr = &txp->ptr[0];
+ int i, nbuf = tx_info->nbuf - 1;
+
+ tx_info->buf[0].len = MT_TXD_SIZE + sizeof(*txp);
+ tx_info->nbuf = 1;
+
+ txp->msdu_id[0] = cpu_to_le16(id | MT_MSDU_ID_VALID);
+
+ for (i = 0; i < nbuf; i++) {
+ u16 len = tx_info->buf[i + 1].len & MT_TXD_LEN_MASK;
+ u32 addr = tx_info->buf[i + 1].addr;
+
+ if (i == nbuf - 1)
+ len |= MT_TXD_LEN_LAST;
+
+ if (i & 1) {
+ ptr->buf1 = cpu_to_le32(addr);
+ ptr->len1 = cpu_to_le16(len);
+ ptr++;
+ } else {
+ ptr->buf0 = cpu_to_le32(addr);
+ ptr->len0 = cpu_to_le16(len);
+ }
+ }
+}
+
+int mt7921e_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+ enum mt76_txq_id qid, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta,
+ struct mt76_tx_info *tx_info)
+{
+ struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
+ struct ieee80211_key_conf *key = info->control.hw_key;
+ struct mt76_txwi_cache *t;
+ struct mt7921_txp_common *txp;
+ int id, pid;
+ u8 *txwi = (u8 *)txwi_ptr;
+
+ if (unlikely(tx_info->skb->len <= ETH_HLEN))
+ return -EINVAL;
+
+ if (!wcid)
+ wcid = &dev->mt76.global_wcid;
+
+ t = (struct mt76_txwi_cache *)(txwi + mdev->drv->txwi_size);
+ t->skb = tx_info->skb;
+
+ id = mt76_token_consume(mdev, &t);
+ if (id < 0)
+ return id;
+
+ if (sta) {
+ struct mt7921_sta *msta = (struct mt7921_sta *)sta->drv_priv;
+
+ if (time_after(jiffies, msta->last_txs + HZ / 4)) {
+ info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
+ msta->last_txs = jiffies;
+ }
+ }
+
+ pid = mt76_tx_status_skb_add(mdev, wcid, tx_info->skb);
+ mt7921_mac_write_txwi(dev, txwi_ptr, tx_info->skb, wcid, key,
+ pid, false);
+
+ txp = (struct mt7921_txp_common *)(txwi + MT_TXD_SIZE);
+ memset(txp, 0, sizeof(struct mt7921_txp_common));
+ mt7921_write_hw_txp(dev, tx_info, txp, id);
+
+ tx_info->skb = DMA_DUMMY_DATA;
+
+ return 0;
+}
+
+static void
+mt7921_txp_skb_unmap(struct mt76_dev *dev, struct mt76_txwi_cache *t)
+{
+ struct mt7921_txp_common *txp;
+ int i;
+
+ txp = mt7921_txwi_to_txp(dev, t);
+
+ for (i = 0; i < ARRAY_SIZE(txp->hw.ptr); i++) {
+ struct mt7921_txp_ptr *ptr = &txp->hw.ptr[i];
+ bool last;
+ u16 len;
+
+ len = le16_to_cpu(ptr->len0);
+ last = len & MT_TXD_LEN_LAST;
+ len &= MT_TXD_LEN_MASK;
+ dma_unmap_single(dev->dev, le32_to_cpu(ptr->buf0), len,
+ DMA_TO_DEVICE);
+ if (last)
+ break;
+
+ len = le16_to_cpu(ptr->len1);
+ last = len & MT_TXD_LEN_LAST;
+ len &= MT_TXD_LEN_MASK;
+ dma_unmap_single(dev->dev, le32_to_cpu(ptr->buf1), len,
+ DMA_TO_DEVICE);
+ if (last)
+ break;
+ }
+}
+
+static void
+mt7921_txwi_free(struct mt7921_dev *dev, struct mt76_txwi_cache *t,
+ struct ieee80211_sta *sta, bool clear_status,
+ struct list_head *free_list)
+{
+ struct mt76_dev *mdev = &dev->mt76;
+ __le32 *txwi;
+ u16 wcid_idx;
+
+ mt7921_txp_skb_unmap(mdev, t);
+ if (!t->skb)
+ goto out;
+
+ txwi = (__le32 *)mt76_get_txwi_ptr(mdev, t);
+ if (sta) {
+ struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
+
+ if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE)))
+ mt7921_tx_check_aggr(sta, txwi);
+
+ wcid_idx = wcid->idx;
+ } else {
+ wcid_idx = FIELD_GET(MT_TXD1_WLAN_IDX, le32_to_cpu(txwi[1]));
+ }
+
+ __mt76_tx_complete_skb(mdev, wcid_idx, t->skb, free_list);
+
+out:
+ t->skb = NULL;
+ mt76_put_txwi(mdev, t);
+}
+
+static void
+mt7921_mac_tx_free(struct mt7921_dev *dev, struct sk_buff *skb)
+{
+ struct mt7921_tx_free *free = (struct mt7921_tx_free *)skb->data;
+ struct mt76_dev *mdev = &dev->mt76;
+ struct mt76_txwi_cache *txwi;
+ struct ieee80211_sta *sta = NULL;
+ LIST_HEAD(free_list);
+ struct sk_buff *tmp;
+ bool wake = false;
+ u8 i, count;
+
+ /* clean DMA queues and unmap buffers first */
+ mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_PSD], false);
+ mt76_queue_tx_cleanup(dev, dev->mphy.q_tx[MT_TXQ_BE], false);
+
+ /* TODO: MT_TX_FREE_LATENCY is msdu time from the TXD is queued into PLE,
+ * to the time ack is received or dropped by hw (air + hw queue time).
+ * Should avoid accessing WTBL to get Tx airtime, and use it instead.
+ */
+ count = FIELD_GET(MT_TX_FREE_MSDU_CNT, le16_to_cpu(free->ctrl));
+ for (i = 0; i < count; i++) {
+ u32 msdu, info = le32_to_cpu(free->info[i]);
+ u8 stat;
+
+ /* 1'b1: new wcid pair.
+ * 1'b0: msdu_id with the same 'wcid pair' as above.
+ */
+ if (info & MT_TX_FREE_PAIR) {
+ struct mt7921_sta *msta;
+ struct mt76_wcid *wcid;
+ u16 idx;
+
+ count++;
+ idx = FIELD_GET(MT_TX_FREE_WLAN_ID, info);
+ wcid = rcu_dereference(dev->mt76.wcid[idx]);
+ sta = wcid_to_sta(wcid);
+ if (!sta)
+ continue;
+
+ msta = container_of(wcid, struct mt7921_sta, wcid);
+ spin_lock_bh(&dev->sta_poll_lock);
+ if (list_empty(&msta->poll_list))
+ list_add_tail(&msta->poll_list, &dev->sta_poll_list);
+ spin_unlock_bh(&dev->sta_poll_lock);
+ continue;
+ }
+
+ msdu = FIELD_GET(MT_TX_FREE_MSDU_ID, info);
+ stat = FIELD_GET(MT_TX_FREE_STATUS, info);
+
+ txwi = mt76_token_release(mdev, msdu, &wake);
+ if (!txwi)
+ continue;
+
+ mt7921_txwi_free(dev, txwi, sta, stat, &free_list);
+ }
+
+ if (wake)
+ mt76_set_tx_blocked(&dev->mt76, false);
+
+ napi_consume_skb(skb, 1);
+
+ list_for_each_entry_safe(skb, tmp, &free_list, list) {
+ skb_list_del_init(skb);
+ napi_consume_skb(skb, 1);
+ }
+
+ rcu_read_lock();
+ mt7921_mac_sta_poll(dev);
+ rcu_read_unlock();
+
+ mt76_worker_schedule(&dev->mt76.tx_worker);
+}
+
+void mt7921e_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q,
+ struct sk_buff *skb)
+{
+ struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+ __le32 *rxd = (__le32 *)skb->data;
+ enum rx_pkt_type type;
+
+ type = FIELD_GET(MT_RXD0_PKT_TYPE, le32_to_cpu(rxd[0]));
+
+ switch (type) {
+ case PKT_TYPE_TXRX_NOTIFY:
+ mt7921_mac_tx_free(dev, skb);
+ break;
+ default:
+ mt7921_queue_rx_skb(mdev, q, skb);
+ break;
+ }
+}
+
+void mt7921e_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
+{
+ if (!e->txwi) {
+ dev_kfree_skb_any(e->skb);
+ return;
+ }
+
+ /* error path */
+ if (e->skb == DMA_DUMMY_DATA) {
+ struct mt76_txwi_cache *t;
+ struct mt7921_txp_common *txp;
+ u16 token;
+
+ txp = mt7921_txwi_to_txp(mdev, e->txwi);
+ token = le16_to_cpu(txp->hw.msdu_id[0]) & ~MT_MSDU_ID_VALID;
+ t = mt76_token_put(mdev, token);
+ e->skb = t ? t->skb : NULL;
+ }
+
+ if (e->skb)
+ mt76_tx_complete_skb(mdev, e->wcid, e->skb);
+}
+
+void mt7921_tx_token_put(struct mt7921_dev *dev)
+{
+ struct mt76_txwi_cache *txwi;
+ int id;
+
+ spin_lock_bh(&dev->mt76.token_lock);
+ idr_for_each_entry(&dev->mt76.token, txwi, id) {
+ mt7921_txwi_free(dev, txwi, NULL, false, NULL);
+ dev->mt76.token_count--;
+ }
+ spin_unlock_bh(&dev->mt76.token_lock);
+ idr_destroy(&dev->mt76.token);
+}
+
+int mt7921e_mac_reset(struct mt7921_dev *dev)
+{
+ int i, err;
+
+ mt7921e_mcu_drv_pmctrl(dev);
+
+ mt76_connac_free_pending_tx_skbs(&dev->pm, NULL);
+
+ mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA, 0);
+ mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0x0);
+
+ set_bit(MT76_RESET, &dev->mphy.state);
+ set_bit(MT76_MCU_RESET, &dev->mphy.state);
+ wake_up(&dev->mt76.mcu.wait);
+ skb_queue_purge(&dev->mt76.mcu.res_q);
+
+ mt76_txq_schedule_all(&dev->mphy);
+
+ mt76_worker_disable(&dev->mt76.tx_worker);
+ napi_disable(&dev->mt76.napi[MT_RXQ_MAIN]);
+ napi_disable(&dev->mt76.napi[MT_RXQ_MCU]);
+ napi_disable(&dev->mt76.napi[MT_RXQ_MCU_WA]);
+ napi_disable(&dev->mt76.tx_napi);
+
+ mt7921_tx_token_put(dev);
+ idr_init(&dev->mt76.token);
+
+ mt7921_wpdma_reset(dev, true);
+
+ local_bh_disable();
+ mt76_for_each_q_rx(&dev->mt76, i) {
+ napi_enable(&dev->mt76.napi[i]);
+ napi_schedule(&dev->mt76.napi[i]);
+ }
+ local_bh_enable();
+
+ clear_bit(MT76_MCU_RESET, &dev->mphy.state);
+
+ mt76_wr(dev, MT_WFDMA0_HOST_INT_ENA,
+ MT_INT_RX_DONE_ALL | MT_INT_TX_DONE_ALL |
+ MT_INT_MCU_CMD);
+ mt76_wr(dev, MT_PCIE_MAC_INT_ENABLE, 0xff);
+
+ err = mt7921_run_firmware(dev);
+ if (err)
+ goto out;
+
+ err = mt7921_mcu_set_eeprom(dev);
+ if (err)
+ goto out;
+
+ err = mt7921_mac_init(dev);
+ if (err)
+ goto out;
+
+ err = __mt7921_start(&dev->phy);
+out:
+ clear_bit(MT76_RESET, &dev->mphy.state);
+
+ local_bh_disable();
+ napi_enable(&dev->mt76.tx_napi);
+ napi_schedule(&dev->mt76.tx_napi);
+ local_bh_enable();
+
+ mt76_worker_enable(&dev->mt76.tx_worker);
+
+ return err;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mcu.c
new file mode 100644
index 000000000000..583a89a34734
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mcu.c
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2021 MediaTek Inc. */
+
+#include "mt7921.h"
+#include "mcu.h"
+
+static int mt7921e_driver_own(struct mt7921_dev *dev)
+{
+ u32 reg = mt7921_reg_map_l1(dev, MT_TOP_LPCR_HOST_BAND0);
+
+ mt76_wr(dev, reg, MT_TOP_LPCR_HOST_DRV_OWN);
+ if (!mt76_poll_msec(dev, reg, MT_TOP_LPCR_HOST_FW_OWN,
+ 0, 500)) {
+ dev_err(dev->mt76.dev, "Timeout for driver own\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int
+mt7921_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
+ int cmd, int *seq)
+{
+ struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+ enum mt76_mcuq_id txq = MT_MCUQ_WM;
+ int ret;
+
+ ret = mt7921_mcu_fill_message(mdev, skb, cmd, seq);
+ if (ret)
+ return ret;
+
+ if (cmd == MCU_CMD_FW_SCATTER)
+ txq = MT_MCUQ_FWDL;
+
+ return mt76_tx_queue_skb_raw(dev, mdev->q_mcu[txq], skb, 0);
+}
+
+int mt7921e_mcu_init(struct mt7921_dev *dev)
+{
+ static const struct mt76_mcu_ops mt7921_mcu_ops = {
+ .headroom = sizeof(struct mt7921_mcu_txd),
+ .mcu_skb_send_msg = mt7921_mcu_send_message,
+ .mcu_parse_response = mt7921_mcu_parse_response,
+ .mcu_restart = mt7921_mcu_restart,
+ };
+ int err;
+
+ dev->mt76.mcu_ops = &mt7921_mcu_ops;
+
+ err = mt7921e_driver_own(dev);
+ if (err)
+ return err;
+
+ err = mt7921_run_firmware(dev);
+
+ mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_FWDL], false);
+
+ return err;
+}
+
+int mt7921e_mcu_drv_pmctrl(struct mt7921_dev *dev)
+{
+ struct mt76_phy *mphy = &dev->mt76.phy;
+ struct mt76_connac_pm *pm = &dev->pm;
+ int i, err = 0;
+
+ for (i = 0; i < MT7921_DRV_OWN_RETRY_COUNT; i++) {
+ mt76_wr(dev, MT_CONN_ON_LPCTL, PCIE_LPCR_HOST_CLR_OWN);
+ if (mt76_poll_msec(dev, MT_CONN_ON_LPCTL,
+ PCIE_LPCR_HOST_OWN_SYNC, 0, 50))
+ break;
+ }
+
+ if (i == MT7921_DRV_OWN_RETRY_COUNT) {
+ dev_err(dev->mt76.dev, "driver own failed\n");
+ err = -EIO;
+ goto out;
+ }
+
+ mt7921_wpdma_reinit_cond(dev);
+ clear_bit(MT76_STATE_PM, &mphy->state);
+
+ pm->stats.last_wake_event = jiffies;
+ pm->stats.doze_time += pm->stats.last_wake_event -
+ pm->stats.last_doze_event;
+out:
+ return err;
+}
+
+int mt7921e_mcu_fw_pmctrl(struct mt7921_dev *dev)
+{
+ struct mt76_phy *mphy = &dev->mt76.phy;
+ struct mt76_connac_pm *pm = &dev->pm;
+ int i, err = 0;
+
+ for (i = 0; i < MT7921_DRV_OWN_RETRY_COUNT; i++) {
+ mt76_wr(dev, MT_CONN_ON_LPCTL, PCIE_LPCR_HOST_SET_OWN);
+ if (mt76_poll_msec(dev, MT_CONN_ON_LPCTL,
+ PCIE_LPCR_HOST_OWN_SYNC, 4, 50))
+ break;
+ }
+
+ if (i == MT7921_DRV_OWN_RETRY_COUNT) {
+ dev_err(dev->mt76.dev, "firmware own failed\n");
+ clear_bit(MT76_STATE_PM, &mphy->state);
+ err = -EIO;
+ }
+
+ pm->stats.last_doze_event = jiffies;
+ pm->stats.awake_time += pm->stats.last_doze_event -
+ pm->stats.last_wake_event;
+
+ return err;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/regs.h b/drivers/net/wireless/mediatek/mt76/mt7921/regs.h
index b6944c867a57..cbd38122c510 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/regs.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/regs.h
@@ -14,7 +14,7 @@
#define MT_MCU_INT_EVENT_SER_TRIGGER BIT(2)
#define MT_MCU_INT_EVENT_RESET_DONE BIT(3)
-#define MT_PLE_BASE 0x8000
+#define MT_PLE_BASE 0x820c0000
#define MT_PLE(ofs) (MT_PLE_BASE + (ofs))
#define MT_PLE_FL_Q0_CTRL MT_PLE(0x1b0)
@@ -26,7 +26,7 @@
((n) << 2))
#define MT_PLE_AMSDU_PACK_MSDU_CNT(n) MT_PLE(0x10e0 + ((n) << 2))
-#define MT_MDP_BASE 0xf000
+#define MT_MDP_BASE 0x820cd000
#define MT_MDP(ofs) (MT_MDP_BASE + (ofs))
#define MT_MDP_DCR0 MT_MDP(0x000)
@@ -49,7 +49,7 @@
#define MT_MDP_TO_WM 1
/* TMAC: band 0(0x21000), band 1(0xa1000) */
-#define MT_WF_TMAC_BASE(_band) ((_band) ? 0xa1000 : 0x21000)
+#define MT_WF_TMAC_BASE(_band) ((_band) ? 0x820f4000 : 0x820e4000)
#define MT_WF_TMAC(_band, ofs) (MT_WF_TMAC_BASE(_band) + (ofs))
#define MT_TMAC_TCR0(_band) MT_WF_TMAC(_band, 0)
@@ -74,7 +74,7 @@
#define MT_TMAC_TRCR0(_band) MT_WF_TMAC(_band, 0x09c)
#define MT_TMAC_TFCR0(_band) MT_WF_TMAC(_band, 0x1e0)
-#define MT_WF_DMA_BASE(_band) ((_band) ? 0xa1e00 : 0x21e00)
+#define MT_WF_DMA_BASE(_band) ((_band) ? 0x820f7000 : 0x820e7000)
#define MT_WF_DMA(_band, ofs) (MT_WF_DMA_BASE(_band) + (ofs))
#define MT_DMA_DCR0(_band) MT_WF_DMA(_band, 0x000)
@@ -82,7 +82,7 @@
#define MT_DMA_DCR0_RXD_G5_EN BIT(23)
/* LPON: band 0(0x24200), band 1(0xa4200) */
-#define MT_WF_LPON_BASE(_band) ((_band) ? 0xa4200 : 0x24200)
+#define MT_WF_LPON_BASE(_band) ((_band) ? 0x820fb000 : 0x820eb000)
#define MT_WF_LPON(_band, ofs) (MT_WF_LPON_BASE(_band) + (ofs))
#define MT_LPON_UTTR0(_band) MT_WF_LPON(_band, 0x080)
@@ -92,25 +92,57 @@
#define MT_LPON_TCR_SW_MODE GENMASK(1, 0)
#define MT_LPON_TCR_SW_WRITE BIT(0)
+/* ETBF: band 0(0x24000), band 1(0xa4000) */
+#define MT_WF_ETBF_BASE(_band) ((_band) ? 0x820fa000 : 0x820ea000)
+#define MT_WF_ETBF(_band, ofs) (MT_WF_ETBF_BASE(_band) + (ofs))
+
+#define MT_ETBF_TX_APP_CNT(_band) MT_WF_ETBF(_band, 0x150)
+#define MT_ETBF_TX_IBF_CNT GENMASK(31, 16)
+#define MT_ETBF_TX_EBF_CNT GENMASK(15, 0)
+
+#define MT_ETBF_RX_FB_CNT(_band) MT_WF_ETBF(_band, 0x158)
+#define MT_ETBF_RX_FB_ALL GENMASK(31, 24)
+#define MT_ETBF_RX_FB_HE GENMASK(23, 16)
+#define MT_ETBF_RX_FB_VHT GENMASK(15, 8)
+#define MT_ETBF_RX_FB_HT GENMASK(7, 0)
+
/* MIB: band 0(0x24800), band 1(0xa4800) */
-#define MT_WF_MIB_BASE(_band) ((_band) ? 0xa4800 : 0x24800)
+#define MT_WF_MIB_BASE(_band) ((_band) ? 0x820fd000 : 0x820ed000)
#define MT_WF_MIB(_band, ofs) (MT_WF_MIB_BASE(_band) + (ofs))
+#define MT_MIB_SCR1(_band) MT_WF_MIB(_band, 0x004)
+#define MT_MIB_TXDUR_EN BIT(8)
+#define MT_MIB_RXDUR_EN BIT(9)
+
#define MT_MIB_SDR3(_band) MT_WF_MIB(_band, 0x698)
#define MT_MIB_SDR3_FCS_ERR_MASK GENMASK(31, 16)
+#define MT_MIB_SDR5(_band) MT_WF_MIB(_band, 0x780)
+
#define MT_MIB_SDR9(_band) MT_WF_MIB(_band, 0x02c)
#define MT_MIB_SDR9_BUSY_MASK GENMASK(23, 0)
+#define MT_MIB_SDR12(_band) MT_WF_MIB(_band, 0x558)
+#define MT_MIB_SDR14(_band) MT_WF_MIB(_band, 0x564)
+#define MT_MIB_SDR15(_band) MT_WF_MIB(_band, 0x568)
+
#define MT_MIB_SDR16(_band) MT_WF_MIB(_band, 0x048)
#define MT_MIB_SDR16_BUSY_MASK GENMASK(23, 0)
+#define MT_MIB_SDR22(_band) MT_WF_MIB(_band, 0x770)
+#define MT_MIB_SDR23(_band) MT_WF_MIB(_band, 0x774)
+#define MT_MIB_SDR31(_band) MT_WF_MIB(_band, 0x55c)
+
+#define MT_MIB_SDR32(_band) MT_WF_MIB(_band, 0x7a8)
+#define MT_MIB_SDR9_IBF_CNT_MASK GENMASK(31, 16)
+#define MT_MIB_SDR9_EBF_CNT_MASK GENMASK(15, 0)
+
#define MT_MIB_SDR34(_band) MT_WF_MIB(_band, 0x090)
#define MT_MIB_MU_BF_TX_CNT GENMASK(15, 0)
-#define MT_MIB_SDR36(_band) MT_WF_MIB(_band, 0x098)
+#define MT_MIB_SDR36(_band) MT_WF_MIB(_band, 0x054)
#define MT_MIB_SDR36_TXTIME_MASK GENMASK(23, 0)
-#define MT_MIB_SDR37(_band) MT_WF_MIB(_band, 0x09c)
+#define MT_MIB_SDR37(_band) MT_WF_MIB(_band, 0x058)
#define MT_MIB_SDR37_RXTIME_MASK GENMASK(23, 0)
#define MT_MIB_DR8(_band) MT_WF_MIB(_band, 0x0c0)
@@ -138,7 +170,7 @@
#define MT_MIB_ARNG(_band, n) MT_WF_MIB(_band, 0x0b0 + ((n) << 2))
#define MT_MIB_ARNCR_RANGE(val, n) (((val) >> ((n) << 3)) & GENMASK(7, 0))
-#define MT_WTBLON_TOP_BASE 0x34000
+#define MT_WTBLON_TOP_BASE 0x820d4000
#define MT_WTBLON_TOP(ofs) (MT_WTBLON_TOP_BASE + (ofs))
#define MT_WTBLON_TOP_WDUCR MT_WTBLON_TOP(0x200)
#define MT_WTBLON_TOP_WDUCR_GROUP GENMASK(2, 0)
@@ -148,7 +180,7 @@
#define MT_WTBL_UPDATE_ADM_COUNT_CLEAR BIT(12)
#define MT_WTBL_UPDATE_BUSY BIT(31)
-#define MT_WTBL_BASE 0x38000
+#define MT_WTBL_BASE 0x820d8000
#define MT_WTBL_LMAC_ID GENMASK(14, 8)
#define MT_WTBL_LMAC_DW GENMASK(7, 2)
#define MT_WTBL_LMAC_OFFS(_id, _dw) (MT_WTBL_BASE | \
@@ -156,7 +188,7 @@
FIELD_PREP(MT_WTBL_LMAC_DW, _dw))
/* AGG: band 0(0x20800), band 1(0xa0800) */
-#define MT_WF_AGG_BASE(_band) ((_band) ? 0xa0800 : 0x20800)
+#define MT_WF_AGG_BASE(_band) ((_band) ? 0x820f2000 : 0x820e2000)
#define MT_WF_AGG(_band, ofs) (MT_WF_AGG_BASE(_band) + (ofs))
#define MT_AGG_AWSCR0(_band, _n) MT_WF_AGG(_band, 0x05c + (_n) * 4)
@@ -187,7 +219,7 @@
#define MT_AGG_ATCR3(_band) MT_WF_AGG(_band, 0x0f4)
/* ARB: band 0(0x20c00), band 1(0xa0c00) */
-#define MT_WF_ARB_BASE(_band) ((_band) ? 0xa0c00 : 0x20c00)
+#define MT_WF_ARB_BASE(_band) ((_band) ? 0x820f3000 : 0x820e3000)
#define MT_WF_ARB(_band, ofs) (MT_WF_ARB_BASE(_band) + (ofs))
#define MT_ARB_SCR(_band) MT_WF_ARB(_band, 0x080)
@@ -197,7 +229,7 @@
#define MT_ARB_DRNGR0(_band, _n) MT_WF_ARB(_band, 0x194 + (_n) * 4)
/* RMAC: band 0(0x21400), band 1(0xa1400) */
-#define MT_WF_RMAC_BASE(_band) ((_band) ? 0xa1400 : 0x21400)
+#define MT_WF_RMAC_BASE(_band) ((_band) ? 0x820f5000 : 0x820e5000)
#define MT_WF_RMAC(_band, ofs) (MT_WF_RMAC_BASE(_band) + (ofs))
#define MT_WF_RFCR(_band) MT_WF_RMAC(_band, 0x000)
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c b/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
new file mode 100644
index 000000000000..ddf0eeb8b688
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/sdio.c
@@ -0,0 +1,317 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2021 MediaTek Inc.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+
+#include <linux/mmc/host.h>
+#include <linux/mmc/sdio_ids.h>
+#include <linux/mmc/sdio_func.h>
+
+#include "mt7921.h"
+#include "../sdio.h"
+#include "mac.h"
+#include "mcu.h"
+
+static const struct sdio_device_id mt7921s_table[] = {
+ { SDIO_DEVICE(SDIO_VENDOR_ID_MEDIATEK, 0x7901) },
+ { } /* Terminating entry */
+};
+
+static void mt7921s_txrx_worker(struct mt76_worker *w)
+{
+ struct mt76_sdio *sdio = container_of(w, struct mt76_sdio,
+ txrx_worker);
+ struct mt76_dev *mdev = container_of(sdio, struct mt76_dev, sdio);
+ struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+
+ if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) {
+ queue_work(mdev->wq, &dev->pm.wake_work);
+ return;
+ }
+
+ mt76s_txrx_worker(sdio);
+ mt76_connac_pm_unref(&dev->mphy, &dev->pm);
+}
+
+static void mt7921s_unregister_device(struct mt7921_dev *dev)
+{
+ struct mt76_connac_pm *pm = &dev->pm;
+
+ mt76_unregister_device(&dev->mt76);
+ cancel_delayed_work_sync(&pm->ps_work);
+ cancel_work_sync(&pm->wake_work);
+
+ mt76s_deinit(&dev->mt76);
+ mt7921s_wfsys_reset(dev);
+ mt7921_mcu_exit(dev);
+
+ mt76_free_device(&dev->mt76);
+}
+
+static int mt7921s_parse_intr(struct mt76_dev *dev, struct mt76s_intr *intr)
+{
+ struct mt76_sdio *sdio = &dev->sdio;
+ struct mt7921_sdio_intr *irq_data = sdio->intr_data;
+ int i, err;
+
+ err = sdio_readsb(sdio->func, irq_data, MCR_WHISR, sizeof(*irq_data));
+ if (err < 0)
+ return err;
+
+ intr->isr = irq_data->isr;
+ intr->rec_mb = irq_data->rec_mb;
+ intr->tx.wtqcr = irq_data->tx.wtqcr;
+ intr->rx.num = irq_data->rx.num;
+ for (i = 0; i < 2 ; i++) {
+ if (!i)
+ intr->rx.len[0] = irq_data->rx.len0;
+ else
+ intr->rx.len[1] = irq_data->rx.len1;
+ }
+
+ return 0;
+}
+
+static int mt7921s_probe(struct sdio_func *func,
+ const struct sdio_device_id *id)
+{
+ static const struct mt76_driver_ops drv_ops = {
+ .txwi_size = MT_SDIO_TXD_SIZE,
+ .survey_flags = SURVEY_INFO_TIME_TX |
+ SURVEY_INFO_TIME_RX |
+ SURVEY_INFO_TIME_BSS_RX,
+ .tx_prepare_skb = mt7921s_tx_prepare_skb,
+ .tx_complete_skb = mt7921s_tx_complete_skb,
+ .tx_status_data = mt7921s_tx_status_data,
+ .rx_skb = mt7921_queue_rx_skb,
+ .sta_ps = mt7921_sta_ps,
+ .sta_add = mt7921_mac_sta_add,
+ .sta_assoc = mt7921_mac_sta_assoc,
+ .sta_remove = mt7921_mac_sta_remove,
+ .update_survey = mt7921_update_channel,
+ };
+ static const struct mt76_bus_ops mt7921s_ops = {
+ .rr = mt76s_rr,
+ .rmw = mt76s_rmw,
+ .wr = mt76s_wr,
+ .write_copy = mt76s_write_copy,
+ .read_copy = mt76s_read_copy,
+ .wr_rp = mt76s_wr_rp,
+ .rd_rp = mt76s_rd_rp,
+ .type = MT76_BUS_SDIO,
+ };
+ static const struct mt7921_hif_ops mt7921_sdio_ops = {
+ .init_reset = mt7921s_init_reset,
+ .reset = mt7921s_mac_reset,
+ .mcu_init = mt7921s_mcu_init,
+ .drv_own = mt7921s_mcu_drv_pmctrl,
+ .fw_own = mt7921s_mcu_fw_pmctrl,
+ };
+
+ struct mt7921_dev *dev;
+ struct mt76_dev *mdev;
+ int ret, i;
+
+ mdev = mt76_alloc_device(&func->dev, sizeof(*dev), &mt7921_ops,
+ &drv_ops);
+ if (!mdev)
+ return -ENOMEM;
+
+ dev = container_of(mdev, struct mt7921_dev, mt76);
+ dev->hif_ops = &mt7921_sdio_ops;
+
+ sdio_set_drvdata(func, dev);
+
+ ret = mt76s_init(mdev, func, &mt7921s_ops);
+ if (ret < 0)
+ goto error;
+
+ ret = mt76s_hw_init(mdev, func, MT76_CONNAC2_SDIO);
+ if (ret)
+ goto error;
+
+ mdev->rev = (mt76_rr(dev, MT_HW_CHIPID) << 16) |
+ (mt76_rr(dev, MT_HW_REV) & 0xff);
+ dev_dbg(mdev->dev, "ASIC revision: %04x\n", mdev->rev);
+
+ mdev->sdio.parse_irq = mt7921s_parse_intr;
+ mdev->sdio.intr_data = devm_kmalloc(mdev->dev,
+ sizeof(struct mt7921_sdio_intr),
+ GFP_KERNEL);
+ if (!mdev->sdio.intr_data) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(mdev->sdio.xmit_buf); i++) {
+ mdev->sdio.xmit_buf[i] = devm_kmalloc(mdev->dev,
+ MT76S_XMIT_BUF_SZ,
+ GFP_KERNEL);
+ if (!mdev->sdio.xmit_buf[i]) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ }
+
+ ret = mt76s_alloc_rx_queue(mdev, MT_RXQ_MAIN);
+ if (ret)
+ goto error;
+
+ ret = mt76s_alloc_rx_queue(mdev, MT_RXQ_MCU);
+ if (ret)
+ goto error;
+
+ ret = mt76s_alloc_tx(mdev);
+ if (ret)
+ goto error;
+
+ ret = mt76_worker_setup(mt76_hw(dev), &mdev->sdio.txrx_worker,
+ mt7921s_txrx_worker, "sdio-txrx");
+ if (ret)
+ goto error;
+
+ sched_set_fifo_low(mdev->sdio.txrx_worker.task);
+
+ ret = mt7921_register_device(dev);
+ if (ret)
+ goto error;
+
+ return 0;
+
+error:
+ mt76s_deinit(&dev->mt76);
+ mt76_free_device(&dev->mt76);
+
+ return ret;
+}
+
+static void mt7921s_remove(struct sdio_func *func)
+{
+ struct mt7921_dev *dev = sdio_get_drvdata(func);
+
+ mt7921s_unregister_device(dev);
+}
+
+#ifdef CONFIG_PM
+static int mt7921s_suspend(struct device *__dev)
+{
+ struct sdio_func *func = dev_to_sdio_func(__dev);
+ struct mt7921_dev *dev = sdio_get_drvdata(func);
+ struct mt76_connac_pm *pm = &dev->pm;
+ struct mt76_dev *mdev = &dev->mt76;
+ bool hif_suspend;
+ int err;
+
+ pm->suspended = true;
+ cancel_delayed_work_sync(&pm->ps_work);
+ cancel_work_sync(&pm->wake_work);
+
+ err = mt7921_mcu_drv_pmctrl(dev);
+ if (err < 0)
+ goto restore_suspend;
+
+ hif_suspend = !test_bit(MT76_STATE_SUSPEND, &dev->mphy.state);
+ if (hif_suspend) {
+ err = mt76_connac_mcu_set_hif_suspend(mdev, true);
+ if (err)
+ goto restore_suspend;
+ }
+
+ /* always enable deep sleep during suspend to reduce
+ * power consumption
+ */
+ mt76_connac_mcu_set_deep_sleep(mdev, true);
+
+ mt76_txq_schedule_all(&dev->mphy);
+ mt76_worker_disable(&mdev->tx_worker);
+ mt76_worker_disable(&mdev->sdio.txrx_worker);
+ mt76_worker_disable(&mdev->sdio.status_worker);
+ mt76_worker_disable(&mdev->sdio.net_worker);
+ cancel_work_sync(&mdev->sdio.stat_work);
+ clear_bit(MT76_READING_STATS, &dev->mphy.state);
+
+ mt76_tx_status_check(mdev, true);
+
+ err = mt7921_mcu_fw_pmctrl(dev);
+ if (err)
+ goto restore_worker;
+
+ sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
+
+ return 0;
+
+restore_worker:
+ mt76_worker_enable(&mdev->tx_worker);
+ mt76_worker_enable(&mdev->sdio.txrx_worker);
+ mt76_worker_enable(&mdev->sdio.status_worker);
+ mt76_worker_enable(&mdev->sdio.net_worker);
+
+ if (!pm->ds_enable)
+ mt76_connac_mcu_set_deep_sleep(mdev, false);
+
+ if (hif_suspend)
+ mt76_connac_mcu_set_hif_suspend(mdev, false);
+
+restore_suspend:
+ pm->suspended = false;
+
+ return err;
+}
+
+static int mt7921s_resume(struct device *__dev)
+{
+ struct sdio_func *func = dev_to_sdio_func(__dev);
+ struct mt7921_dev *dev = sdio_get_drvdata(func);
+ struct mt76_connac_pm *pm = &dev->pm;
+ struct mt76_dev *mdev = &dev->mt76;
+ int err;
+
+ pm->suspended = false;
+
+ err = mt7921_mcu_drv_pmctrl(dev);
+ if (err < 0)
+ return err;
+
+ mt76_worker_enable(&mdev->tx_worker);
+ mt76_worker_enable(&mdev->sdio.txrx_worker);
+ mt76_worker_enable(&mdev->sdio.status_worker);
+ mt76_worker_enable(&mdev->sdio.net_worker);
+
+ /* restore previous ds setting */
+ if (!pm->ds_enable)
+ mt76_connac_mcu_set_deep_sleep(mdev, false);
+
+ if (!test_bit(MT76_STATE_SUSPEND, &dev->mphy.state))
+ err = mt76_connac_mcu_set_hif_suspend(mdev, false);
+
+ return err;
+}
+
+static const struct dev_pm_ops mt7921s_pm_ops = {
+ .suspend = mt7921s_suspend,
+ .resume = mt7921s_resume,
+};
+#endif
+
+MODULE_DEVICE_TABLE(sdio, mt7921s_table);
+MODULE_FIRMWARE(MT7921_FIRMWARE_WM);
+MODULE_FIRMWARE(MT7921_ROM_PATCH);
+
+static struct sdio_driver mt7921s_driver = {
+ .name = KBUILD_MODNAME,
+ .probe = mt7921s_probe,
+ .remove = mt7921s_remove,
+ .id_table = mt7921s_table,
+#ifdef CONFIG_PM
+ .drv = {
+ .pm = &mt7921s_pm_ops,
+ }
+#endif
+};
+module_sdio_driver(mt7921s_driver);
+MODULE_AUTHOR("Sean Wang <sean.wang@mediatek.com>");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c
new file mode 100644
index 000000000000..137f86a6dbf8
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mac.c
@@ -0,0 +1,220 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2021 MediaTek Inc. */
+
+#include <linux/iopoll.h>
+#include <linux/mmc/sdio_func.h>
+#include "mt7921.h"
+#include "mac.h"
+#include "../sdio.h"
+
+static void mt7921s_enable_irq(struct mt76_dev *dev)
+{
+ struct mt76_sdio *sdio = &dev->sdio;
+
+ sdio_claim_host(sdio->func);
+ sdio_writel(sdio->func, WHLPCR_INT_EN_SET, MCR_WHLPCR, NULL);
+ sdio_release_host(sdio->func);
+}
+
+static void mt7921s_disable_irq(struct mt76_dev *dev)
+{
+ struct mt76_sdio *sdio = &dev->sdio;
+
+ sdio_claim_host(sdio->func);
+ sdio_writel(sdio->func, WHLPCR_INT_EN_CLR, MCR_WHLPCR, NULL);
+ sdio_release_host(sdio->func);
+}
+
+static u32 mt7921s_read_whcr(struct mt76_dev *dev)
+{
+ return sdio_readl(dev->sdio.func, MCR_WHCR, NULL);
+}
+
+int mt7921s_wfsys_reset(struct mt7921_dev *dev)
+{
+ struct mt76_sdio *sdio = &dev->mt76.sdio;
+ u32 val, status;
+
+ mt7921s_mcu_drv_pmctrl(dev);
+
+ sdio_claim_host(sdio->func);
+
+ val = sdio_readl(sdio->func, MCR_WHCR, NULL);
+ val &= ~WF_WHOLE_PATH_RSTB;
+ sdio_writel(sdio->func, val, MCR_WHCR, NULL);
+
+ msleep(50);
+
+ val = sdio_readl(sdio->func, MCR_WHCR, NULL);
+ val &= ~WF_SDIO_WF_PATH_RSTB;
+ sdio_writel(sdio->func, val, MCR_WHCR, NULL);
+
+ usleep_range(1000, 2000);
+
+ val = sdio_readl(sdio->func, MCR_WHCR, NULL);
+ val |= WF_WHOLE_PATH_RSTB;
+ sdio_writel(sdio->func, val, MCR_WHCR, NULL);
+
+ readx_poll_timeout(mt7921s_read_whcr, &dev->mt76, status,
+ status & WF_RST_DONE, 50000, 2000000);
+
+ sdio_release_host(sdio->func);
+
+ /* activate mt7921s again */
+ mt7921s_mcu_fw_pmctrl(dev);
+ mt7921s_mcu_drv_pmctrl(dev);
+
+ return 0;
+}
+
+int mt7921s_init_reset(struct mt7921_dev *dev)
+{
+ set_bit(MT76_MCU_RESET, &dev->mphy.state);
+
+ wake_up(&dev->mt76.mcu.wait);
+ skb_queue_purge(&dev->mt76.mcu.res_q);
+ wait_event_timeout(dev->mt76.sdio.wait,
+ mt76s_txqs_empty(&dev->mt76), 5 * HZ);
+ mt76_worker_disable(&dev->mt76.sdio.txrx_worker);
+
+ mt7921s_disable_irq(&dev->mt76);
+ mt7921s_wfsys_reset(dev);
+
+ mt76_worker_enable(&dev->mt76.sdio.txrx_worker);
+ clear_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
+ clear_bit(MT76_MCU_RESET, &dev->mphy.state);
+ mt7921s_enable_irq(&dev->mt76);
+
+ return 0;
+}
+
+int mt7921s_mac_reset(struct mt7921_dev *dev)
+{
+ int err;
+
+ mt76_connac_free_pending_tx_skbs(&dev->pm, NULL);
+ mt76_txq_schedule_all(&dev->mphy);
+ mt76_worker_disable(&dev->mt76.tx_worker);
+ set_bit(MT76_RESET, &dev->mphy.state);
+ set_bit(MT76_MCU_RESET, &dev->mphy.state);
+ wake_up(&dev->mt76.mcu.wait);
+ skb_queue_purge(&dev->mt76.mcu.res_q);
+ wait_event_timeout(dev->mt76.sdio.wait,
+ mt76s_txqs_empty(&dev->mt76), 5 * HZ);
+ mt76_worker_disable(&dev->mt76.sdio.txrx_worker);
+ mt76_worker_disable(&dev->mt76.sdio.status_worker);
+ mt76_worker_disable(&dev->mt76.sdio.net_worker);
+ cancel_work_sync(&dev->mt76.sdio.stat_work);
+
+ mt7921s_disable_irq(&dev->mt76);
+ mt7921s_wfsys_reset(dev);
+
+ mt76_worker_enable(&dev->mt76.sdio.txrx_worker);
+ mt76_worker_enable(&dev->mt76.sdio.status_worker);
+ mt76_worker_enable(&dev->mt76.sdio.net_worker);
+
+ dev->fw_assert = false;
+ clear_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
+ clear_bit(MT76_MCU_RESET, &dev->mphy.state);
+ mt7921s_enable_irq(&dev->mt76);
+
+ err = mt7921_run_firmware(dev);
+ if (err)
+ goto out;
+
+ err = mt7921_mcu_set_eeprom(dev);
+ if (err)
+ goto out;
+
+ err = mt7921_mac_init(dev);
+ if (err)
+ goto out;
+
+ err = __mt7921_start(&dev->phy);
+out:
+ clear_bit(MT76_RESET, &dev->mphy.state);
+
+ mt76_worker_enable(&dev->mt76.tx_worker);
+
+ return err;
+}
+
+static void
+mt7921s_write_txwi(struct mt7921_dev *dev, struct mt76_wcid *wcid,
+ enum mt76_txq_id qid, struct ieee80211_sta *sta,
+ struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_key_conf *key = info->control.hw_key;
+ __le32 *txwi;
+ int pid;
+
+ pid = mt76_tx_status_skb_add(&dev->mt76, wcid, skb);
+ txwi = (__le32 *)(skb->data - MT_SDIO_TXD_SIZE);
+ memset(txwi, 0, MT_SDIO_TXD_SIZE);
+ mt7921_mac_write_txwi(dev, txwi, skb, wcid, key, pid, false);
+ skb_push(skb, MT_SDIO_TXD_SIZE);
+}
+
+int mt7921s_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr,
+ enum mt76_txq_id qid, struct mt76_wcid *wcid,
+ struct ieee80211_sta *sta,
+ struct mt76_tx_info *tx_info)
+{
+ struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_info->skb);
+ struct sk_buff *skb = tx_info->skb;
+ int pad;
+
+ if (unlikely(tx_info->skb->len <= ETH_HLEN))
+ return -EINVAL;
+
+ if (!wcid)
+ wcid = &dev->mt76.global_wcid;
+
+ if (sta) {
+ struct mt7921_sta *msta = (struct mt7921_sta *)sta->drv_priv;
+
+ if (time_after(jiffies, msta->last_txs + HZ / 4)) {
+ info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
+ msta->last_txs = jiffies;
+ }
+ }
+
+ mt7921s_write_txwi(dev, wcid, qid, sta, skb);
+
+ mt7921_skb_add_sdio_hdr(skb, MT7921_SDIO_DATA);
+ pad = round_up(skb->len, 4) - skb->len;
+
+ return mt76_skb_adjust_pad(skb, pad);
+}
+
+void mt7921s_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue_entry *e)
+{
+ __le32 *txwi = (__le32 *)(e->skb->data + MT_SDIO_HDR_SIZE);
+ unsigned int headroom = MT_SDIO_TXD_SIZE + MT_SDIO_HDR_SIZE;
+ struct ieee80211_sta *sta;
+ struct mt76_wcid *wcid;
+ u16 idx;
+
+ idx = FIELD_GET(MT_TXD1_WLAN_IDX, le32_to_cpu(txwi[1]));
+ wcid = rcu_dereference(mdev->wcid[idx]);
+ sta = wcid_to_sta(wcid);
+
+ if (sta && likely(e->skb->protocol != cpu_to_be16(ETH_P_PAE)))
+ mt7921_tx_check_aggr(sta, txwi);
+
+ skb_pull(e->skb, headroom);
+ mt76_tx_complete_skb(mdev, e->wcid, e->skb);
+}
+
+bool mt7921s_tx_status_data(struct mt76_dev *mdev, u8 *update)
+{
+ struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+
+ mt7921_mutex_acquire(dev);
+ mt7921_mac_sta_poll(dev);
+ mt7921_mutex_release(dev);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mcu.c
new file mode 100644
index 000000000000..437cddad9a90
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/sdio_mcu.c
@@ -0,0 +1,135 @@
+// SPDX-License-Identifier: ISC
+/* Copyright (C) 2021 MediaTek Inc. */
+
+#include <linux/kernel.h>
+#include <linux/mmc/sdio_func.h>
+#include <linux/module.h>
+#include <linux/iopoll.h>
+
+#include "mt7921.h"
+#include "../sdio.h"
+#include "mac.h"
+#include "mcu.h"
+#include "regs.h"
+
+static int
+mt7921s_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb,
+ int cmd, int *seq)
+{
+ struct mt7921_dev *dev = container_of(mdev, struct mt7921_dev, mt76);
+ enum mt7921_sdio_pkt_type type = MT7921_SDIO_CMD;
+ enum mt76_mcuq_id txq = MT_MCUQ_WM;
+ int ret, pad;
+
+ /* We just return in case firmware assertion to avoid blocking the
+ * common workqueue to run, for example, the coredump work might be
+ * blocked by mt7921_mac_work that is excuting register access via sdio
+ * bus.
+ */
+ if (dev->fw_assert)
+ return -EBUSY;
+
+ ret = mt7921_mcu_fill_message(mdev, skb, cmd, seq);
+ if (ret)
+ return ret;
+
+ if (cmd == MCU_CMD_FW_SCATTER)
+ type = MT7921_SDIO_FWDL;
+
+ mt7921_skb_add_sdio_hdr(skb, type);
+ pad = round_up(skb->len, 4) - skb->len;
+ __skb_put_zero(skb, pad);
+
+ ret = mt76_tx_queue_skb_raw(dev, mdev->q_mcu[txq], skb, 0);
+ if (ret)
+ return ret;
+
+ mt76_queue_kick(dev, mdev->q_mcu[txq]);
+
+ return ret;
+}
+
+int mt7921s_mcu_init(struct mt7921_dev *dev)
+{
+ static const struct mt76_mcu_ops mt7921s_mcu_ops = {
+ .headroom = MT_SDIO_HDR_SIZE + sizeof(struct mt7921_mcu_txd),
+ .tailroom = MT_SDIO_TAIL_SIZE,
+ .mcu_skb_send_msg = mt7921s_mcu_send_message,
+ .mcu_parse_response = mt7921_mcu_parse_response,
+ .mcu_rr = mt76_connac_mcu_reg_rr,
+ .mcu_wr = mt76_connac_mcu_reg_wr,
+ };
+ int ret;
+
+ mt7921s_mcu_drv_pmctrl(dev);
+
+ dev->mt76.mcu_ops = &mt7921s_mcu_ops;
+
+ ret = mt7921_run_firmware(dev);
+ if (ret)
+ return ret;
+
+ set_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state);
+
+ return 0;
+}
+
+int mt7921s_mcu_drv_pmctrl(struct mt7921_dev *dev)
+{
+ struct sdio_func *func = dev->mt76.sdio.func;
+ struct mt76_phy *mphy = &dev->mt76.phy;
+ struct mt76_connac_pm *pm = &dev->pm;
+ int err = 0;
+ u32 status;
+
+ sdio_claim_host(func);
+
+ sdio_writel(func, WHLPCR_FW_OWN_REQ_CLR, MCR_WHLPCR, NULL);
+
+ err = readx_poll_timeout(mt76s_read_pcr, &dev->mt76, status,
+ status & WHLPCR_IS_DRIVER_OWN, 2000, 1000000);
+ sdio_release_host(func);
+
+ if (err < 0) {
+ dev_err(dev->mt76.dev, "driver own failed\n");
+ err = -EIO;
+ goto out;
+ }
+
+ clear_bit(MT76_STATE_PM, &mphy->state);
+
+ pm->stats.last_wake_event = jiffies;
+ pm->stats.doze_time += pm->stats.last_wake_event -
+ pm->stats.last_doze_event;
+out:
+ return err;
+}
+
+int mt7921s_mcu_fw_pmctrl(struct mt7921_dev *dev)
+{
+ struct sdio_func *func = dev->mt76.sdio.func;
+ struct mt76_phy *mphy = &dev->mt76.phy;
+ struct mt76_connac_pm *pm = &dev->pm;
+ int err = 0;
+ u32 status;
+
+ sdio_claim_host(func);
+
+ sdio_writel(func, WHLPCR_FW_OWN_REQ_SET, MCR_WHLPCR, NULL);
+
+ err = readx_poll_timeout(mt76s_read_pcr, &dev->mt76, status,
+ !(status & WHLPCR_IS_DRIVER_OWN), 2000, 1000000);
+ sdio_release_host(func);
+
+ if (err < 0) {
+ dev_err(dev->mt76.dev, "firmware own failed\n");
+ clear_bit(MT76_STATE_PM, &mphy->state);
+ err = -EIO;
+ }
+
+ pm->stats.last_doze_event = jiffies;
+ pm->stats.awake_time += pm->stats.last_doze_event -
+ pm->stats.last_wake_event;
+
+ return err;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/testmode.c b/drivers/net/wireless/mediatek/mt76/mt7921/testmode.c
new file mode 100644
index 000000000000..8bd43879dd6f
--- /dev/null
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/testmode.c
@@ -0,0 +1,197 @@
+// SPDX-License-Identifier: ISC
+
+#include "mt7921.h"
+#include "mcu.h"
+
+enum mt7921_testmode_attr {
+ MT7921_TM_ATTR_UNSPEC,
+ MT7921_TM_ATTR_SET,
+ MT7921_TM_ATTR_QUERY,
+ MT7921_TM_ATTR_RSP,
+
+ /* keep last */
+ NUM_MT7921_TM_ATTRS,
+ MT7921_TM_ATTR_MAX = NUM_MT7921_TM_ATTRS - 1,
+};
+
+struct mt7921_tm_cmd {
+ u8 action;
+ u32 param0;
+ u32 param1;
+};
+
+struct mt7921_tm_evt {
+ u32 param0;
+ u32 param1;
+};
+
+static const struct nla_policy mt7921_tm_policy[NUM_MT7921_TM_ATTRS] = {
+ [MT7921_TM_ATTR_SET] = NLA_POLICY_EXACT_LEN(sizeof(struct mt7921_tm_cmd)),
+ [MT7921_TM_ATTR_QUERY] = NLA_POLICY_EXACT_LEN(sizeof(struct mt7921_tm_cmd)),
+};
+
+static int
+mt7921_tm_set(struct mt7921_dev *dev, struct mt7921_tm_cmd *req)
+{
+ struct mt7921_rftest_cmd cmd = {
+ .action = req->action,
+ .param0 = cpu_to_le32(req->param0),
+ .param1 = cpu_to_le32(req->param1),
+ };
+ bool testmode = false, normal = false;
+ struct mt76_connac_pm *pm = &dev->pm;
+ struct mt76_phy *phy = &dev->mphy;
+ int ret = -ENOTCONN;
+
+ mutex_lock(&dev->mt76.mutex);
+
+ if (req->action == TM_SWITCH_MODE) {
+ if (req->param0 == MT7921_TM_NORMAL)
+ normal = true;
+ else
+ testmode = true;
+ }
+
+ if (testmode) {
+ /* Make sure testmode running on full power mode */
+ pm->enable = false;
+ cancel_delayed_work_sync(&pm->ps_work);
+ cancel_work_sync(&pm->wake_work);
+ __mt7921_mcu_drv_pmctrl(dev);
+
+ mt76_wr(dev, MT_WF_RFCR(0), dev->mt76.rxfilter);
+ phy->test.state = MT76_TM_STATE_ON;
+ }
+
+ if (!mt76_testmode_enabled(phy))
+ goto out;
+
+ ret = mt76_mcu_send_msg(&dev->mt76, MCU_CMD_TEST_CTRL, &cmd,
+ sizeof(cmd), false);
+ if (ret)
+ goto out;
+
+ if (normal) {
+ /* Switch back to the normal world */
+ phy->test.state = MT76_TM_STATE_OFF;
+ pm->enable = true;
+ }
+out:
+ mutex_unlock(&dev->mt76.mutex);
+
+ return ret;
+}
+
+static int
+mt7921_tm_query(struct mt7921_dev *dev, struct mt7921_tm_cmd *req,
+ struct mt7921_tm_evt *evt_resp)
+{
+ struct mt7921_rftest_cmd cmd = {
+ .action = req->action,
+ .param0 = cpu_to_le32(req->param0),
+ .param1 = cpu_to_le32(req->param1),
+ };
+ struct mt7921_rftest_evt *evt;
+ struct sk_buff *skb;
+ int ret;
+
+ ret = mt76_mcu_send_and_get_msg(&dev->mt76, MCU_CMD_TEST_CTRL,
+ &cmd, sizeof(cmd), true, &skb);
+ if (ret)
+ goto out;
+
+ evt = (struct mt7921_rftest_evt *)skb->data;
+ evt_resp->param0 = le32_to_cpu(evt->param0);
+ evt_resp->param1 = le32_to_cpu(evt->param1);
+out:
+ dev_kfree_skb(skb);
+
+ return ret;
+}
+
+int mt7921_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ void *data, int len)
+{
+ struct nlattr *tb[NUM_MT76_TM_ATTRS];
+ struct mt76_phy *mphy = hw->priv;
+ struct mt7921_phy *phy = mphy->priv;
+ int err;
+
+ if (!test_bit(MT76_STATE_RUNNING, &mphy->state) ||
+ !(hw->conf.flags & IEEE80211_CONF_MONITOR))
+ return -ENOTCONN;
+
+ err = nla_parse_deprecated(tb, MT76_TM_ATTR_MAX, data, len,
+ mt76_tm_policy, NULL);
+ if (err)
+ return err;
+
+ if (tb[MT76_TM_ATTR_DRV_DATA]) {
+ struct nlattr *drv_tb[NUM_MT7921_TM_ATTRS], *data;
+ int ret;
+
+ data = tb[MT76_TM_ATTR_DRV_DATA];
+ ret = nla_parse_nested_deprecated(drv_tb,
+ MT7921_TM_ATTR_MAX,
+ data, mt7921_tm_policy,
+ NULL);
+ if (ret)
+ return ret;
+
+ data = drv_tb[MT7921_TM_ATTR_SET];
+ if (data)
+ return mt7921_tm_set(phy->dev, nla_data(data));
+ }
+
+ return -EINVAL;
+}
+
+int mt7921_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *msg,
+ struct netlink_callback *cb, void *data, int len)
+{
+ struct nlattr *tb[NUM_MT76_TM_ATTRS];
+ struct mt76_phy *mphy = hw->priv;
+ struct mt7921_phy *phy = mphy->priv;
+ int err;
+
+ if (!test_bit(MT76_STATE_RUNNING, &mphy->state) ||
+ !(hw->conf.flags & IEEE80211_CONF_MONITOR) ||
+ !mt76_testmode_enabled(mphy))
+ return -ENOTCONN;
+
+ if (cb->args[2]++ > 0)
+ return -ENOENT;
+
+ err = nla_parse_deprecated(tb, MT76_TM_ATTR_MAX, data, len,
+ mt76_tm_policy, NULL);
+ if (err)
+ return err;
+
+ if (tb[MT76_TM_ATTR_DRV_DATA]) {
+ struct nlattr *drv_tb[NUM_MT7921_TM_ATTRS], *data;
+ int ret;
+
+ data = tb[MT76_TM_ATTR_DRV_DATA];
+ ret = nla_parse_nested_deprecated(drv_tb,
+ MT7921_TM_ATTR_MAX,
+ data, mt7921_tm_policy,
+ NULL);
+ if (ret)
+ return ret;
+
+ data = drv_tb[MT7921_TM_ATTR_QUERY];
+ if (data) {
+ struct mt7921_tm_evt evt_resp;
+
+ err = mt7921_tm_query(phy->dev, nla_data(data),
+ &evt_resp);
+ if (err)
+ return err;
+
+ return nla_put(msg, MT7921_TM_ATTR_RSP,
+ sizeof(evt_resp), &evt_resp);
+ }
+ }
+
+ return -EINVAL;
+}
diff --git a/drivers/net/wireless/mediatek/mt76/sdio.c b/drivers/net/wireless/mediatek/mt76/sdio.c
index 783a15635ec5..c99acc21225e 100644
--- a/drivers/net/wireless/mediatek/mt76/sdio.c
+++ b/drivers/net/wireless/mediatek/mt76/sdio.c
@@ -16,9 +16,290 @@
#include <linux/kthread.h>
#include "mt76.h"
+#include "sdio.h"
-static int
-mt76s_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid)
+static u32 mt76s_read_whisr(struct mt76_dev *dev)
+{
+ return sdio_readl(dev->sdio.func, MCR_WHISR, NULL);
+}
+
+u32 mt76s_read_pcr(struct mt76_dev *dev)
+{
+ struct mt76_sdio *sdio = &dev->sdio;
+
+ return sdio_readl(sdio->func, MCR_WHLPCR, NULL);
+}
+EXPORT_SYMBOL_GPL(mt76s_read_pcr);
+
+static u32 mt76s_read_mailbox(struct mt76_dev *dev, u32 offset)
+{
+ struct sdio_func *func = dev->sdio.func;
+ u32 val = ~0, status;
+ int err;
+
+ sdio_claim_host(func);
+
+ sdio_writel(func, offset, MCR_H2DSM0R, &err);
+ if (err < 0) {
+ dev_err(dev->dev, "failed setting address [err=%d]\n", err);
+ goto out;
+ }
+
+ sdio_writel(func, H2D_SW_INT_READ, MCR_WSICR, &err);
+ if (err < 0) {
+ dev_err(dev->dev, "failed setting read mode [err=%d]\n", err);
+ goto out;
+ }
+
+ err = readx_poll_timeout(mt76s_read_whisr, dev, status,
+ status & H2D_SW_INT_READ, 0, 1000000);
+ if (err < 0) {
+ dev_err(dev->dev, "query whisr timeout\n");
+ goto out;
+ }
+
+ sdio_writel(func, H2D_SW_INT_READ, MCR_WHISR, &err);
+ if (err < 0) {
+ dev_err(dev->dev, "failed setting read mode [err=%d]\n", err);
+ goto out;
+ }
+
+ val = sdio_readl(func, MCR_H2DSM0R, &err);
+ if (err < 0) {
+ dev_err(dev->dev, "failed reading h2dsm0r [err=%d]\n", err);
+ goto out;
+ }
+
+ if (val != offset) {
+ dev_err(dev->dev, "register mismatch\n");
+ val = ~0;
+ goto out;
+ }
+
+ val = sdio_readl(func, MCR_D2HRM1R, &err);
+ if (err < 0)
+ dev_err(dev->dev, "failed reading d2hrm1r [err=%d]\n", err);
+
+out:
+ sdio_release_host(func);
+
+ return val;
+}
+
+static void mt76s_write_mailbox(struct mt76_dev *dev, u32 offset, u32 val)
+{
+ struct sdio_func *func = dev->sdio.func;
+ u32 status;
+ int err;
+
+ sdio_claim_host(func);
+
+ sdio_writel(func, offset, MCR_H2DSM0R, &err);
+ if (err < 0) {
+ dev_err(dev->dev, "failed setting address [err=%d]\n", err);
+ goto out;
+ }
+
+ sdio_writel(func, val, MCR_H2DSM1R, &err);
+ if (err < 0) {
+ dev_err(dev->dev,
+ "failed setting write value [err=%d]\n", err);
+ goto out;
+ }
+
+ sdio_writel(func, H2D_SW_INT_WRITE, MCR_WSICR, &err);
+ if (err < 0) {
+ dev_err(dev->dev, "failed setting write mode [err=%d]\n", err);
+ goto out;
+ }
+
+ err = readx_poll_timeout(mt76s_read_whisr, dev, status,
+ status & H2D_SW_INT_WRITE, 0, 1000000);
+ if (err < 0) {
+ dev_err(dev->dev, "query whisr timeout\n");
+ goto out;
+ }
+
+ sdio_writel(func, H2D_SW_INT_WRITE, MCR_WHISR, &err);
+ if (err < 0) {
+ dev_err(dev->dev, "failed setting write mode [err=%d]\n", err);
+ goto out;
+ }
+
+ val = sdio_readl(func, MCR_H2DSM0R, &err);
+ if (err < 0) {
+ dev_err(dev->dev, "failed reading h2dsm0r [err=%d]\n", err);
+ goto out;
+ }
+
+ if (val != offset)
+ dev_err(dev->dev, "register mismatch\n");
+
+out:
+ sdio_release_host(func);
+}
+
+u32 mt76s_rr(struct mt76_dev *dev, u32 offset)
+{
+ if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state))
+ return dev->mcu_ops->mcu_rr(dev, offset);
+ else
+ return mt76s_read_mailbox(dev, offset);
+}
+EXPORT_SYMBOL_GPL(mt76s_rr);
+
+void mt76s_wr(struct mt76_dev *dev, u32 offset, u32 val)
+{
+ if (test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state))
+ dev->mcu_ops->mcu_wr(dev, offset, val);
+ else
+ mt76s_write_mailbox(dev, offset, val);
+}
+EXPORT_SYMBOL_GPL(mt76s_wr);
+
+u32 mt76s_rmw(struct mt76_dev *dev, u32 offset, u32 mask, u32 val)
+{
+ val |= mt76s_rr(dev, offset) & ~mask;
+ mt76s_wr(dev, offset, val);
+
+ return val;
+}
+EXPORT_SYMBOL_GPL(mt76s_rmw);
+
+void mt76s_write_copy(struct mt76_dev *dev, u32 offset,
+ const void *data, int len)
+{
+ const u32 *val = data;
+ int i;
+
+ for (i = 0; i < len / sizeof(u32); i++) {
+ mt76s_wr(dev, offset, val[i]);
+ offset += sizeof(u32);
+ }
+}
+EXPORT_SYMBOL_GPL(mt76s_write_copy);
+
+void mt76s_read_copy(struct mt76_dev *dev, u32 offset,
+ void *data, int len)
+{
+ u32 *val = data;
+ int i;
+
+ for (i = 0; i < len / sizeof(u32); i++) {
+ val[i] = mt76s_rr(dev, offset);
+ offset += sizeof(u32);
+ }
+}
+EXPORT_SYMBOL_GPL(mt76s_read_copy);
+
+int mt76s_wr_rp(struct mt76_dev *dev, u32 base,
+ const struct mt76_reg_pair *data,
+ int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++) {
+ mt76s_wr(dev, data->reg, data->value);
+ data++;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76s_wr_rp);
+
+int mt76s_rd_rp(struct mt76_dev *dev, u32 base,
+ struct mt76_reg_pair *data, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++) {
+ data->value = mt76s_rr(dev, data->reg);
+ data++;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mt76s_rd_rp);
+
+int mt76s_hw_init(struct mt76_dev *dev, struct sdio_func *func, int hw_ver)
+{
+ u32 status, ctrl;
+ int ret;
+
+ dev->sdio.hw_ver = hw_ver;
+
+ sdio_claim_host(func);
+
+ ret = sdio_enable_func(func);
+ if (ret < 0)
+ goto release;
+
+ /* Get ownership from the device */
+ sdio_writel(func, WHLPCR_INT_EN_CLR | WHLPCR_FW_OWN_REQ_CLR,
+ MCR_WHLPCR, &ret);
+ if (ret < 0)
+ goto disable_func;
+
+ ret = readx_poll_timeout(mt76s_read_pcr, dev, status,
+ status & WHLPCR_IS_DRIVER_OWN, 2000, 1000000);
+ if (ret < 0) {
+ dev_err(dev->dev, "Cannot get ownership from device");
+ goto disable_func;
+ }
+
+ ret = sdio_set_block_size(func, 512);
+ if (ret < 0)
+ goto disable_func;
+
+ /* Enable interrupt */
+ sdio_writel(func, WHLPCR_INT_EN_SET, MCR_WHLPCR, &ret);
+ if (ret < 0)
+ goto disable_func;
+
+ ctrl = WHIER_RX0_DONE_INT_EN | WHIER_TX_DONE_INT_EN;
+ if (hw_ver == MT76_CONNAC2_SDIO)
+ ctrl |= WHIER_RX1_DONE_INT_EN;
+ sdio_writel(func, ctrl, MCR_WHIER, &ret);
+ if (ret < 0)
+ goto disable_func;
+
+ switch (hw_ver) {
+ case MT76_CONNAC_SDIO:
+ /* set WHISR as read clear and Rx aggregation number as 16 */
+ ctrl = FIELD_PREP(MAX_HIF_RX_LEN_NUM, 16);
+ break;
+ default:
+ ctrl = sdio_readl(func, MCR_WHCR, &ret);
+ if (ret < 0)
+ goto disable_func;
+ ctrl &= ~MAX_HIF_RX_LEN_NUM_CONNAC2;
+ ctrl &= ~W_INT_CLR_CTRL; /* read clear */
+ ctrl |= FIELD_PREP(MAX_HIF_RX_LEN_NUM_CONNAC2, 0);
+ break;
+ }
+
+ sdio_writel(func, ctrl, MCR_WHCR, &ret);
+ if (ret < 0)
+ goto disable_func;
+
+ ret = sdio_claim_irq(func, mt76s_sdio_irq);
+ if (ret < 0)
+ goto disable_func;
+
+ sdio_release_host(func);
+
+ return 0;
+
+disable_func:
+ sdio_disable_func(func);
+release:
+ sdio_release_host(func);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(mt76s_hw_init);
+
+int mt76s_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid)
{
struct mt76_queue *q = &dev->q_rx[qid];
@@ -35,6 +316,7 @@ mt76s_alloc_rx_queue(struct mt76_dev *dev, enum mt76_rxq_id qid)
return 0;
}
+EXPORT_SYMBOL_GPL(mt76s_alloc_rx_queue);
static struct mt76_queue *mt76s_alloc_tx_queue(struct mt76_dev *dev)
{
@@ -56,7 +338,7 @@ static struct mt76_queue *mt76s_alloc_tx_queue(struct mt76_dev *dev)
return q;
}
-static int mt76s_alloc_tx(struct mt76_dev *dev)
+int mt76s_alloc_tx(struct mt76_dev *dev)
{
struct mt76_queue *q;
int i;
@@ -79,18 +361,7 @@ static int mt76s_alloc_tx(struct mt76_dev *dev)
return 0;
}
-
-int mt76s_alloc_queues(struct mt76_dev *dev)
-{
- int err;
-
- err = mt76s_alloc_rx_queue(dev, MT_RXQ_MAIN);
- if (err < 0)
- return err;
-
- return mt76s_alloc_tx(dev);
-}
-EXPORT_SYMBOL_GPL(mt76s_alloc_queues);
+EXPORT_SYMBOL_GPL(mt76s_alloc_tx);
static struct mt76_queue_entry *
mt76s_get_next_rx_entry(struct mt76_queue *q)
@@ -328,7 +599,7 @@ void mt76s_deinit(struct mt76_dev *dev)
cancel_work_sync(&sdio->stat_work);
clear_bit(MT76_READING_STATS, &dev->phy.state);
- mt76_tx_status_check(dev, NULL, true);
+ mt76_tx_status_check(dev, true);
sdio_claim_host(sdio->func);
sdio_release_irq(sdio->func);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.h b/drivers/net/wireless/mediatek/mt76/sdio.h
index 03877d89e152..99db4ad93b7c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/sdio.h
+++ b/drivers/net/wireless/mediatek/mt76/sdio.h
@@ -21,7 +21,12 @@
#define MCR_WHCR 0x000C
#define W_INT_CLR_CTRL BIT(1)
#define RECV_MAILBOX_RD_CLR_EN BIT(2)
+#define WF_SYS_RSTB BIT(4) /* supported in CONNAC2 */
+#define WF_WHOLE_PATH_RSTB BIT(5) /* supported in CONNAC2 */
+#define WF_SDIO_WF_PATH_RSTB BIT(6) /* supported in CONNAC2 */
#define MAX_HIF_RX_LEN_NUM GENMASK(13, 8)
+#define MAX_HIF_RX_LEN_NUM_CONNAC2 GENMASK(14, 8) /* supported in CONNAC2 */
+#define WF_RST_DONE BIT(15) /* supported in CONNAC2 */
#define RX_ENHANCE_MODE BIT(16)
#define MCR_WHISR 0x0010
@@ -29,6 +34,7 @@
#define WHIER_D2H_SW_INT GENMASK(31, 8)
#define WHIER_FW_OWN_BACK_INT_EN BIT(7)
#define WHIER_ABNORMAL_INT_EN BIT(6)
+#define WHIER_WDT_INT_EN BIT(5) /* supported in CONNAC2 */
#define WHIER_RX1_DONE_INT_EN BIT(2)
#define WHIER_RX0_DONE_INT_EN BIT(1)
#define WHIER_TX_DONE_INT_EN BIT(0)
@@ -100,16 +106,33 @@
#define MCR_SWPCDBGR 0x0154
+#define MCR_H2DSM2R 0x0160 /* supported in CONNAC2 */
+#define MCR_H2DSM3R 0x0164 /* supported in CONNAC2 */
+#define MCR_D2HRM3R 0x0174 /* supported in CONNAC2 */
+#define MCR_WTQCR8 0x0190 /* supported in CONNAC2 */
+#define MCR_WTQCR9 0x0194 /* supported in CONNAC2 */
+#define MCR_WTQCR10 0x0198 /* supported in CONNAC2 */
+#define MCR_WTQCR11 0x019C /* supported in CONNAC2 */
+#define MCR_WTQCR12 0x01A0 /* supported in CONNAC2 */
+#define MCR_WTQCR13 0x01A4 /* supported in CONNAC2 */
+#define MCR_WTQCR14 0x01A8 /* supported in CONNAC2 */
+#define MCR_WTQCR15 0x01AC /* supported in CONNAC2 */
+
+enum mt76_connac_sdio_ver {
+ MT76_CONNAC_SDIO,
+ MT76_CONNAC2_SDIO,
+};
+
struct mt76s_intr {
u32 isr;
+ u32 *rec_mb;
struct {
- u32 wtqcr[8];
+ u32 *wtqcr;
} tx;
struct {
- u16 num[2];
- u16 len[2][16];
+ u16 *len[2];
+ u16 *num;
} rx;
- u32 rec_mb[2];
-} __packed;
+};
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c b/drivers/net/wireless/mediatek/mt76/sdio_txrx.c
index 04f4c89b7499..649a56790b89 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c
+++ b/drivers/net/wireless/mediatek/mt76/sdio_txrx.c
@@ -14,12 +14,11 @@
#include <linux/mmc/sdio_ids.h>
#include <linux/mmc/sdio_func.h>
-#include "../trace.h"
-#include "mt7615.h"
+#include "trace.h"
#include "sdio.h"
-#include "mac.h"
+#include "mt76.h"
-static int mt7663s_refill_sched_quota(struct mt76_dev *dev, u32 *data)
+static int mt76s_refill_sched_quota(struct mt76_dev *dev, u32 *data)
{
u32 ple_ac_data_quota[] = {
FIELD_GET(TXQ_CNT_L, data[4]), /* VO */
@@ -53,8 +52,8 @@ static int mt7663s_refill_sched_quota(struct mt76_dev *dev, u32 *data)
return pse_data_quota + ple_data_quota + pse_mcu_quota;
}
-static struct sk_buff *mt7663s_build_rx_skb(void *data, int data_len,
- int buf_len)
+static struct sk_buff *
+mt76s_build_rx_skb(void *data, int data_len, int buf_len)
{
int len = min_t(int, data_len, MT_SKB_HEAD_LEN);
struct sk_buff *skb;
@@ -78,8 +77,9 @@ static struct sk_buff *mt7663s_build_rx_skb(void *data, int data_len,
return skb;
}
-static int mt7663s_rx_run_queue(struct mt76_dev *dev, enum mt76_rxq_id qid,
- struct mt76s_intr *intr)
+static int
+mt76s_rx_run_queue(struct mt76_dev *dev, enum mt76_rxq_id qid,
+ struct mt76s_intr *intr)
{
struct mt76_queue *q = &dev->q_rx[qid];
struct mt76_sdio *sdio = &dev->sdio;
@@ -112,9 +112,11 @@ static int mt7663s_rx_run_queue(struct mt76_dev *dev, enum mt76_rxq_id qid,
for (i = 0; i < intr->rx.num[qid]; i++) {
int index = (q->head + i) % q->ndesc;
struct mt76_queue_entry *e = &q->entry[index];
+ __le32 *rxd = (__le32 *)buf;
- len = intr->rx.len[qid][i];
- e->skb = mt7663s_build_rx_skb(buf, len, round_up(len + 4, 4));
+ /* parse rxd to get the actual packet length */
+ len = FIELD_GET(GENMASK(15, 0), le32_to_cpu(rxd[0]));
+ e->skb = mt76s_build_rx_skb(buf, len, round_up(len + 4, 4));
if (!e->skb)
break;
@@ -132,45 +134,50 @@ static int mt7663s_rx_run_queue(struct mt76_dev *dev, enum mt76_rxq_id qid,
return i;
}
-static int mt7663s_rx_handler(struct mt76_dev *dev)
+static int mt76s_rx_handler(struct mt76_dev *dev)
{
struct mt76_sdio *sdio = &dev->sdio;
- struct mt76s_intr *intr = sdio->intr_data;
+ struct mt76s_intr intr;
int nframes = 0, ret;
- ret = sdio_readsb(sdio->func, intr, MCR_WHISR, sizeof(*intr));
- if (ret < 0)
+ ret = sdio->parse_irq(dev, &intr);
+ if (ret)
return ret;
- trace_dev_irq(dev, intr->isr, 0);
+ trace_dev_irq(dev, intr.isr, 0);
- if (intr->isr & WHIER_RX0_DONE_INT_EN) {
- ret = mt7663s_rx_run_queue(dev, 0, intr);
+ if (intr.isr & WHIER_RX0_DONE_INT_EN) {
+ ret = mt76s_rx_run_queue(dev, 0, &intr);
if (ret > 0) {
mt76_worker_schedule(&sdio->net_worker);
nframes += ret;
}
}
- if (intr->isr & WHIER_RX1_DONE_INT_EN) {
- ret = mt7663s_rx_run_queue(dev, 1, intr);
+ if (intr.isr & WHIER_RX1_DONE_INT_EN) {
+ ret = mt76s_rx_run_queue(dev, 1, &intr);
if (ret > 0) {
mt76_worker_schedule(&sdio->net_worker);
nframes += ret;
}
}
- nframes += !!mt7663s_refill_sched_quota(dev, intr->tx.wtqcr);
+ nframes += !!mt76s_refill_sched_quota(dev, intr.tx.wtqcr);
return nframes;
}
-static int mt7663s_tx_pick_quota(struct mt76_sdio *sdio, bool mcu, int buf_sz,
- int *pse_size, int *ple_size)
+static int
+mt76s_tx_pick_quota(struct mt76_sdio *sdio, bool mcu, int buf_sz,
+ int *pse_size, int *ple_size)
{
int pse_sz;
- pse_sz = DIV_ROUND_UP(buf_sz + sdio->sched.deficit, MT_PSE_PAGE_SZ);
+ pse_sz = DIV_ROUND_UP(buf_sz + sdio->sched.deficit,
+ sdio->sched.pse_page_size);
+
+ if (mcu && sdio->hw_ver == MT76_CONNAC2_SDIO)
+ pse_sz = 1;
if (mcu) {
if (sdio->sched.pse_mcu_quota < *pse_size + pse_sz)
@@ -187,8 +194,9 @@ static int mt7663s_tx_pick_quota(struct mt76_sdio *sdio, bool mcu, int buf_sz,
return 0;
}
-static void mt7663s_tx_update_quota(struct mt76_sdio *sdio, bool mcu,
- int pse_size, int ple_size)
+static void
+mt76s_tx_update_quota(struct mt76_sdio *sdio, bool mcu, int pse_size,
+ int ple_size)
{
if (mcu) {
sdio->sched.pse_mcu_quota -= pse_size;
@@ -198,7 +206,7 @@ static void mt7663s_tx_update_quota(struct mt76_sdio *sdio, bool mcu,
}
}
-static int __mt7663s_xmit_queue(struct mt76_dev *dev, u8 *data, int len)
+static int __mt76s_xmit_queue(struct mt76_dev *dev, u8 *data, int len)
{
struct mt76_sdio *sdio = &dev->sdio;
int err;
@@ -213,7 +221,7 @@ static int __mt7663s_xmit_queue(struct mt76_dev *dev, u8 *data, int len)
return err;
}
-static int mt7663s_tx_run_queue(struct mt76_dev *dev, struct mt76_queue *q)
+static int mt76s_tx_run_queue(struct mt76_dev *dev, struct mt76_queue *q)
{
int qid, err, nframes = 0, len = 0, pse_sz = 0, ple_sz = 0;
bool mcu = q == dev->q_mcu[MT_MCUQ_WM];
@@ -227,10 +235,13 @@ static int mt7663s_tx_run_queue(struct mt76_dev *dev, struct mt76_queue *q)
smp_rmb();
+ if (test_bit(MT76_MCU_RESET, &dev->phy.state))
+ goto next;
+
if (!test_bit(MT76_STATE_MCU_RUNNING, &dev->phy.state)) {
__skb_put_zero(e->skb, 4);
- err = __mt7663s_xmit_queue(dev, e->skb->data,
- e->skb->len);
+ err = __mt76s_xmit_queue(dev, e->skb->data,
+ e->skb->len);
if (err)
return err;
@@ -241,8 +252,8 @@ static int mt7663s_tx_run_queue(struct mt76_dev *dev, struct mt76_queue *q)
if (len + e->skb->len + pad + 4 > MT76S_XMIT_BUF_SZ)
break;
- if (mt7663s_tx_pick_quota(sdio, mcu, e->buf_sz, &pse_sz,
- &ple_sz))
+ if (mt76s_tx_pick_quota(sdio, mcu, e->buf_sz, &pse_sz,
+ &ple_sz))
break;
memcpy(sdio->xmit_buf[qid] + len, e->skb->data,
@@ -268,30 +279,22 @@ next:
if (nframes) {
memset(sdio->xmit_buf[qid] + len, 0, 4);
- err = __mt7663s_xmit_queue(dev, sdio->xmit_buf[qid], len + 4);
+ err = __mt76s_xmit_queue(dev, sdio->xmit_buf[qid], len + 4);
if (err)
return err;
}
- mt7663s_tx_update_quota(sdio, mcu, pse_sz, ple_sz);
+ mt76s_tx_update_quota(sdio, mcu, pse_sz, ple_sz);
mt76_worker_schedule(&sdio->status_worker);
return nframes;
}
-void mt7663s_txrx_worker(struct mt76_worker *w)
+void mt76s_txrx_worker(struct mt76_sdio *sdio)
{
- struct mt76_sdio *sdio = container_of(w, struct mt76_sdio,
- txrx_worker);
- struct mt76_dev *mdev = container_of(sdio, struct mt76_dev, sdio);
- struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76);
+ struct mt76_dev *dev = container_of(sdio, struct mt76_dev, sdio);
int i, nframes, ret;
- if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) {
- queue_work(mdev->wq, &dev->pm.wake_work);
- return;
- }
-
/* disable interrupt */
sdio_claim_host(sdio->func);
sdio_writel(sdio->func, WHLPCR_INT_EN_CLR, MCR_WHLPCR, NULL);
@@ -301,34 +304,61 @@ void mt7663s_txrx_worker(struct mt76_worker *w)
/* tx */
for (i = 0; i <= MT_TXQ_PSD; i++) {
- ret = mt7663s_tx_run_queue(mdev, mdev->phy.q_tx[i]);
+ ret = mt76s_tx_run_queue(dev, dev->phy.q_tx[i]);
if (ret > 0)
nframes += ret;
}
- ret = mt7663s_tx_run_queue(mdev, mdev->q_mcu[MT_MCUQ_WM]);
+ ret = mt76s_tx_run_queue(dev, dev->q_mcu[MT_MCUQ_WM]);
if (ret > 0)
nframes += ret;
/* rx */
- ret = mt7663s_rx_handler(mdev);
+ ret = mt76s_rx_handler(dev);
if (ret > 0)
nframes += ret;
+
+ if (test_bit(MT76_MCU_RESET, &dev->phy.state)) {
+ if (!mt76s_txqs_empty(dev))
+ continue;
+ else
+ wake_up(&sdio->wait);
+ }
} while (nframes > 0);
/* enable interrupt */
sdio_writel(sdio->func, WHLPCR_INT_EN_SET, MCR_WHLPCR, NULL);
sdio_release_host(sdio->func);
-
- mt76_connac_pm_unref(&dev->mphy, &dev->pm);
}
+EXPORT_SYMBOL_GPL(mt76s_txrx_worker);
-void mt7663s_sdio_irq(struct sdio_func *func)
+void mt76s_sdio_irq(struct sdio_func *func)
{
- struct mt7615_dev *dev = sdio_get_drvdata(func);
- struct mt76_sdio *sdio = &dev->mt76.sdio;
+ struct mt76_dev *dev = sdio_get_drvdata(func);
+ struct mt76_sdio *sdio = &dev->sdio;
- if (!test_bit(MT76_STATE_INITIALIZED, &dev->mt76.phy.state))
+ if (!test_bit(MT76_STATE_INITIALIZED, &dev->phy.state) ||
+ test_bit(MT76_MCU_RESET, &dev->phy.state))
return;
mt76_worker_schedule(&sdio->txrx_worker);
}
+EXPORT_SYMBOL_GPL(mt76s_sdio_irq);
+
+bool mt76s_txqs_empty(struct mt76_dev *dev)
+{
+ struct mt76_queue *q;
+ int i;
+
+ for (i = 0; i <= MT_TXQ_PSD + 1; i++) {
+ if (i <= MT_TXQ_PSD)
+ q = dev->phy.q_tx[i];
+ else
+ q = dev->q_mcu[MT_MCUQ_WM];
+
+ if (q->first != q->head)
+ return false;
+ }
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(mt76s_txqs_empty);
diff --git a/drivers/net/wireless/mediatek/mt76/testmode.c b/drivers/net/wireless/mediatek/mt76/testmode.c
index f73ffbd6e622..66afc2b0a935 100644
--- a/drivers/net/wireless/mediatek/mt76/testmode.c
+++ b/drivers/net/wireless/mediatek/mt76/testmode.c
@@ -2,7 +2,7 @@
/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
#include "mt76.h"
-static const struct nla_policy mt76_tm_policy[NUM_MT76_TM_ATTRS] = {
+const struct nla_policy mt76_tm_policy[NUM_MT76_TM_ATTRS] = {
[MT76_TM_ATTR_RESET] = { .type = NLA_FLAG },
[MT76_TM_ATTR_STATE] = { .type = NLA_U8 },
[MT76_TM_ATTR_TX_COUNT] = { .type = NLA_U32 },
@@ -21,7 +21,9 @@ static const struct nla_policy mt76_tm_policy[NUM_MT76_TM_ATTRS] = {
[MT76_TM_ATTR_TX_IPG] = { .type = NLA_U32 },
[MT76_TM_ATTR_TX_TIME] = { .type = NLA_U32 },
[MT76_TM_ATTR_FREQ_OFFSET] = { .type = NLA_U32 },
+ [MT76_TM_ATTR_DRV_DATA] = { .type = NLA_NESTED },
};
+EXPORT_SYMBOL_GPL(mt76_tm_policy);
void mt76_testmode_tx_pending(struct mt76_phy *phy)
{
diff --git a/drivers/net/wireless/mediatek/mt76/testmode.h b/drivers/net/wireless/mediatek/mt76/testmode.h
index d32a7654c47e..d1f9c036dd1f 100644
--- a/drivers/net/wireless/mediatek/mt76/testmode.h
+++ b/drivers/net/wireless/mediatek/mt76/testmode.h
@@ -44,6 +44,7 @@
* @MT76_TM_ATTR_TX_IPG: tx inter-packet gap, in unit of us (u32)
* @MT76_TM_ATTR_TX_TIME: packet transmission time, in unit of us (u32)
*
+ * @MT76_TM_ATTR_DRV_DATA: driver specific netlink attrs (nested)
*/
enum mt76_testmode_attr {
MT76_TM_ATTR_UNSPEC,
@@ -78,6 +79,8 @@ enum mt76_testmode_attr {
MT76_TM_ATTR_TX_IPG,
MT76_TM_ATTR_TX_TIME,
+ MT76_TM_ATTR_DRV_DATA,
+
/* keep last */
NUM_MT76_TM_ATTRS,
MT76_TM_ATTR_MAX = NUM_MT76_TM_ATTRS - 1,
@@ -144,6 +147,7 @@ enum mt76_testmode_rx_attr {
* @MT76_TM_STATE_TX_FRAMES: send a fixed number of test frames
* @MT76_TM_STATE_RX_FRAMES: receive packets and keep statistics
* @MT76_TM_STATE_TX_CONT: waveform tx without time gap
+ * @MT76_TM_STATE_ON: test mode enabled used in offload firmware
*/
enum mt76_testmode_state {
MT76_TM_STATE_OFF,
@@ -151,6 +155,7 @@ enum mt76_testmode_state {
MT76_TM_STATE_TX_FRAMES,
MT76_TM_STATE_RX_FRAMES,
MT76_TM_STATE_TX_CONT,
+ MT76_TM_STATE_ON,
/* keep last */
NUM_MT76_TM_STATES,
@@ -184,4 +189,6 @@ enum mt76_testmode_tx_mode {
MT76_TM_TX_MODE_MAX = NUM_MT76_TM_TX_MODES - 1,
};
+extern const struct nla_policy mt76_tm_policy[NUM_MT76_TM_ATTRS];
+
#endif
diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c
index f0f7a913eaab..11719ef034d8 100644
--- a/drivers/net/wireless/mediatek/mt76/tx.c
+++ b/drivers/net/wireless/mediatek/mt76/tx.c
@@ -38,21 +38,21 @@ EXPORT_SYMBOL_GPL(mt76_tx_check_agg_ssn);
void
mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list)
- __acquires(&dev->status_list.lock)
+ __acquires(&dev->status_lock)
{
__skb_queue_head_init(list);
- spin_lock_bh(&dev->status_list.lock);
+ spin_lock_bh(&dev->status_lock);
}
EXPORT_SYMBOL_GPL(mt76_tx_status_lock);
void
mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list)
- __releases(&dev->status_list.lock)
+ __releases(&dev->status_lock)
{
struct ieee80211_hw *hw;
struct sk_buff *skb;
- spin_unlock_bh(&dev->status_list.lock);
+ spin_unlock_bh(&dev->status_lock);
rcu_read_lock();
while ((skb = __skb_dequeue(list)) != NULL) {
@@ -64,9 +64,13 @@ mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list)
struct mt76_wcid *wcid;
wcid = rcu_dereference(dev->wcid[cb->wcid]);
- if (wcid)
+ if (wcid) {
status.sta = wcid_to_sta(wcid);
+ if (status.sta)
+ status.rate = &wcid->rate;
+ }
+
hw = mt76_tx_status_get_hw(dev, skb);
ieee80211_tx_status_ext(hw, &status);
}
@@ -88,8 +92,6 @@ __mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, u8 flags,
if ((flags & done) != done)
return;
- __skb_unlink(skb, &dev->status_list);
-
/* Tx status can be unreliable. if it fails, mark the frame as ACKed */
if (flags & MT_TX_CB_TXS_FAILED) {
info->status.rates[0].count = 0;
@@ -116,6 +118,8 @@ mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid,
struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
int pid;
+ memset(cb, 0, sizeof(*cb));
+
if (!wcid)
return MT_PACKET_ID_NO_ACK;
@@ -126,16 +130,23 @@ mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid,
IEEE80211_TX_CTL_RATE_CTRL_PROBE)))
return MT_PACKET_ID_NO_SKB;
- spin_lock_bh(&dev->status_list.lock);
+ spin_lock_bh(&dev->status_lock);
+
+ pid = idr_alloc(&wcid->pktid, skb, MT_PACKET_ID_FIRST,
+ MT_PACKET_ID_MASK, GFP_ATOMIC);
+ if (pid < 0) {
+ pid = MT_PACKET_ID_NO_SKB;
+ goto out;
+ }
- memset(cb, 0, sizeof(*cb));
- pid = mt76_get_next_pkt_id(wcid);
cb->wcid = wcid->idx;
cb->pktid = pid;
- cb->jiffies = jiffies;
- __skb_queue_tail(&dev->status_list, skb);
- spin_unlock_bh(&dev->status_list.lock);
+ if (list_empty(&wcid->list))
+ list_add_tail(&wcid->list, &dev->wcid_list);
+
+out:
+ spin_unlock_bh(&dev->status_lock);
return pid;
}
@@ -145,36 +156,53 @@ struct sk_buff *
mt76_tx_status_skb_get(struct mt76_dev *dev, struct mt76_wcid *wcid, int pktid,
struct sk_buff_head *list)
{
- struct sk_buff *skb, *tmp;
+ struct sk_buff *skb;
+ int id;
- skb_queue_walk_safe(&dev->status_list, skb, tmp) {
- struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
+ lockdep_assert_held(&dev->status_lock);
- if (wcid && cb->wcid != wcid->idx)
- continue;
+ skb = idr_remove(&wcid->pktid, pktid);
+ if (skb)
+ goto out;
- if (cb->pktid == pktid)
- return skb;
+ /* look for stale entries in the wcid idr queue */
+ idr_for_each_entry(&wcid->pktid, skb, id) {
+ struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
- if (pktid >= 0 && !time_after(jiffies, cb->jiffies +
- MT_TX_STATUS_SKB_TIMEOUT))
- continue;
+ if (pktid >= 0) {
+ if (!(cb->flags & MT_TX_CB_DMA_DONE))
+ continue;
+
+ if (!time_is_after_jiffies(cb->jiffies +
+ MT_TX_STATUS_SKB_TIMEOUT))
+ continue;
+ }
+ /* It has been too long since DMA_DONE, time out this packet
+ * and stop waiting for TXS callback.
+ */
+ idr_remove(&wcid->pktid, cb->pktid);
__mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_FAILED |
MT_TX_CB_TXS_DONE, list);
}
- return NULL;
+out:
+ if (idr_is_empty(&wcid->pktid))
+ list_del_init(&wcid->list);
+
+ return skb;
}
EXPORT_SYMBOL_GPL(mt76_tx_status_skb_get);
void
-mt76_tx_status_check(struct mt76_dev *dev, struct mt76_wcid *wcid, bool flush)
+mt76_tx_status_check(struct mt76_dev *dev, bool flush)
{
+ struct mt76_wcid *wcid, *tmp;
struct sk_buff_head list;
mt76_tx_status_lock(dev, &list);
- mt76_tx_status_skb_get(dev, wcid, flush ? -1 : 0, &list);
+ list_for_each_entry_safe(wcid, tmp, &dev->wcid_list, list)
+ mt76_tx_status_skb_get(dev, wcid, flush ? -1 : 0, &list);
mt76_tx_status_unlock(dev, &list);
}
EXPORT_SYMBOL_GPL(mt76_tx_status_check);
@@ -197,6 +225,7 @@ mt76_tx_check_non_aql(struct mt76_dev *dev, struct mt76_wcid *wcid,
void __mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff *skb,
struct list_head *free_list)
{
+ struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
struct ieee80211_tx_status status = {
.skb = skb,
.free_list = free_list,
@@ -226,7 +255,7 @@ void __mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff *
}
#endif
- if (!skb->prev) {
+ if (cb->pktid < MT_PACKET_ID_FIRST) {
hw = mt76_tx_status_get_hw(dev, skb);
status.sta = wcid_to_sta(wcid);
ieee80211_tx_status_ext(hw, &status);
@@ -234,6 +263,7 @@ void __mt76_tx_complete_skb(struct mt76_dev *dev, u16 wcid_idx, struct sk_buff *
}
mt76_tx_status_lock(dev, &list);
+ cb->jiffies = jiffies;
__mt76_tx_status_skb_done(dev, skb, MT_TX_CB_DMA_DONE, &list);
mt76_tx_status_unlock(dev, &list);
diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
index 1e9f60bb811a..0a7006c8959b 100644
--- a/drivers/net/wireless/mediatek/mt76/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/usb.c
@@ -1081,7 +1081,7 @@ void mt76u_stop_tx(struct mt76_dev *dev)
mt76_worker_enable(&dev->usb.status_worker);
- mt76_tx_status_check(dev, NULL, true);
+ mt76_tx_status_check(dev, true);
}
EXPORT_SYMBOL_GPL(mt76u_stop_tx);
diff --git a/drivers/net/wireless/mediatek/mt76/util.h b/drivers/net/wireless/mediatek/mt76/util.h
index 1c363ea9ab9c..49c52d781f40 100644
--- a/drivers/net/wireless/mediatek/mt76/util.h
+++ b/drivers/net/wireless/mediatek/mt76/util.h
@@ -70,17 +70,15 @@ mt76_worker_setup(struct ieee80211_hw *hw, struct mt76_worker *w,
if (fn)
w->fn = fn;
- w->task = kthread_create(__mt76_worker_fn, w, "mt76-%s %s",
- name, dev_name);
+ w->task = kthread_run(__mt76_worker_fn, w,
+ "mt76-%s %s", name, dev_name);
- ret = PTR_ERR_OR_ZERO(w->task);
- if (ret) {
+ if (IS_ERR(w->task)) {
+ ret = PTR_ERR(w->task);
w->task = NULL;
return ret;
}
- wake_up_process(w->task);
-
return 0;
}
diff --git a/drivers/net/wireless/mediatek/mt7601u/dma.c b/drivers/net/wireless/mediatek/mt7601u/dma.c
index ed78d2cb35e3..457147394edc 100644
--- a/drivers/net/wireless/mediatek/mt7601u/dma.c
+++ b/drivers/net/wireless/mediatek/mt7601u/dma.c
@@ -515,7 +515,7 @@ static int mt7601u_alloc_tx(struct mt7601u_dev *dev)
int mt7601u_dma_init(struct mt7601u_dev *dev)
{
- int ret = -ENOMEM;
+ int ret;
tasklet_setup(&dev->tx_tasklet, mt7601u_tx_tasklet);
tasklet_setup(&dev->rx_tasklet, mt7601u_rx_tasklet);
diff --git a/drivers/net/wireless/microchip/wilc1000/cfg80211.c b/drivers/net/wireless/microchip/wilc1000/cfg80211.c
index 96973ec7bd9a..dc4bfe7be378 100644
--- a/drivers/net/wireless/microchip/wilc1000/cfg80211.c
+++ b/drivers/net/wireless/microchip/wilc1000/cfg80211.c
@@ -129,8 +129,7 @@ static void cfg_scan_result(enum scan_event scan_event,
info->frame_len,
(s32)info->rssi * 100,
GFP_KERNEL);
- if (!bss)
- cfg80211_put_bss(wiphy, bss);
+ cfg80211_put_bss(wiphy, bss);
} else if (scan_event == SCAN_EVENT_DONE) {
mutex_lock(&priv->scan_req_lock);
@@ -729,6 +728,7 @@ static int get_station(struct wiphy *wiphy, struct net_device *dev,
{
struct wilc_vif *vif = netdev_priv(dev);
struct wilc_priv *priv = &vif->priv;
+ struct wilc *wilc = vif->wilc;
u32 i = 0;
u32 associatedsta = ~0;
u32 inactive_time = 0;
@@ -755,6 +755,9 @@ static int get_station(struct wiphy *wiphy, struct net_device *dev,
} else if (vif->iftype == WILC_STATION_MODE) {
struct rf_info stats;
+ if (!wilc->initialized)
+ return -EBUSY;
+
wilc_get_statistics(vif, &stats);
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL) |
@@ -1581,6 +1584,7 @@ static void wilc_set_wakeup(struct wiphy *wiphy, bool enabled)
}
netdev_info(vif->ndev, "cfg set wake up = %d\n", enabled);
+ wilc_set_wowlan_trigger(vif, enabled);
srcu_read_unlock(&wl->srcu, srcu_idx);
}
@@ -1683,6 +1687,7 @@ static void wlan_init_locks(struct wilc *wl)
mutex_init(&wl->rxq_cs);
mutex_init(&wl->cfg_cmd_lock);
mutex_init(&wl->vif_mutex);
+ mutex_init(&wl->deinit_lock);
spin_lock_init(&wl->txq_spinlock);
mutex_init(&wl->txq_add_to_head_cs);
@@ -1701,6 +1706,7 @@ void wlan_deinit_locks(struct wilc *wilc)
mutex_destroy(&wilc->cfg_cmd_lock);
mutex_destroy(&wilc->txq_add_to_head_cs);
mutex_destroy(&wilc->vif_mutex);
+ mutex_destroy(&wilc->deinit_lock);
cleanup_srcu_struct(&wilc->srcu);
}
@@ -1724,7 +1730,6 @@ int wilc_cfg80211_init(struct wilc **wilc, struct device *dev, int io_type,
*wilc = wl;
wl->io_type = io_type;
wl->hif_func = ops;
- wl->chip_ps_state = WILC_CHIP_WAKEDUP;
for (i = 0; i < NQUEUES; i++)
INIT_LIST_HEAD(&wl->txq[i].txq_head.list);
diff --git a/drivers/net/wireless/microchip/wilc1000/hif.c b/drivers/net/wireless/microchip/wilc1000/hif.c
index a133736a7821..e69b9c7f3d31 100644
--- a/drivers/net/wireless/microchip/wilc1000/hif.c
+++ b/drivers/net/wireless/microchip/wilc1000/hif.c
@@ -23,6 +23,10 @@ struct wilc_set_multicast {
u8 *mc_list;
};
+struct host_if_wowlan_trigger {
+ u8 wowlan_trigger;
+};
+
struct wilc_del_all_sta {
u8 assoc_sta;
u8 mac[WILC_MAX_NUM_STA][ETH_ALEN];
@@ -34,6 +38,7 @@ union wilc_message_body {
struct wilc_set_multicast mc_info;
struct wilc_remain_ch remain_on_ch;
char *data;
+ struct host_if_wowlan_trigger wow_trigger;
};
struct host_if_msg {
@@ -962,6 +967,25 @@ error:
kfree(msg);
}
+void wilc_set_wowlan_trigger(struct wilc_vif *vif, bool enabled)
+{
+ int ret;
+ struct wid wid;
+ u8 wowlan_trigger = 0;
+
+ if (enabled)
+ wowlan_trigger = 1;
+
+ wid.id = WID_WOWLAN_TRIGGER;
+ wid.type = WID_CHAR;
+ wid.val = &wowlan_trigger;
+ wid.size = sizeof(char);
+
+ ret = wilc_send_config_pkt(vif, WILC_SET_CFG, &wid, 1);
+ if (ret)
+ pr_err("Failed to send wowlan trigger config packet\n");
+}
+
static void handle_scan_timer(struct work_struct *work)
{
struct host_if_msg *msg = container_of(work, struct host_if_msg, work);
@@ -1494,7 +1518,6 @@ int wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler)
{
struct host_if_drv *hif_drv;
struct wilc_vif *vif = netdev_priv(dev);
- struct wilc *wilc = vif->wilc;
hif_drv = kzalloc(sizeof(*hif_drv), GFP_KERNEL);
if (!hif_drv)
@@ -1504,9 +1527,6 @@ int wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler)
vif->hif_drv = hif_drv;
- if (wilc->clients_count == 0)
- mutex_init(&wilc->deinit_lock);
-
timer_setup(&vif->periodic_rssi, get_periodic_rssi, 0);
mod_timer(&vif->periodic_rssi, jiffies + msecs_to_jiffies(5000));
@@ -1518,8 +1538,6 @@ int wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler)
hif_drv->p2p_timeout = 0;
- wilc->clients_count++;
-
return 0;
}
@@ -1550,7 +1568,6 @@ int wilc_deinit(struct wilc_vif *vif)
kfree(hif_drv);
vif->hif_drv = NULL;
- vif->wilc->clients_count--;
mutex_unlock(&vif->wilc->deinit_lock);
return result;
}
diff --git a/drivers/net/wireless/microchip/wilc1000/hif.h b/drivers/net/wireless/microchip/wilc1000/hif.h
index 58811911213b..cccd54ed0518 100644
--- a/drivers/net/wireless/microchip/wilc1000/hif.h
+++ b/drivers/net/wireless/microchip/wilc1000/hif.h
@@ -207,6 +207,7 @@ int wilc_get_statistics(struct wilc_vif *vif, struct rf_info *stats);
int wilc_get_vif_idx(struct wilc_vif *vif);
int wilc_set_tx_power(struct wilc_vif *vif, u8 tx_power);
int wilc_get_tx_power(struct wilc_vif *vif, u8 *tx_power);
+void wilc_set_wowlan_trigger(struct wilc_vif *vif, bool enabled);
void wilc_scan_complete_received(struct wilc *wilc, u8 *buffer, u32 length);
void wilc_network_info_received(struct wilc *wilc, u8 *buffer, u32 length);
void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *buffer, u32 length);
diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.c b/drivers/net/wireless/microchip/wilc1000/netdev.c
index 7e4d9235251c..690572e01a2a 100644
--- a/drivers/net/wireless/microchip/wilc1000/netdev.c
+++ b/drivers/net/wireless/microchip/wilc1000/netdev.c
@@ -111,7 +111,8 @@ out:
return ndev;
}
-void wilc_wlan_set_bssid(struct net_device *wilc_netdev, u8 *bssid, u8 mode)
+void wilc_wlan_set_bssid(struct net_device *wilc_netdev, const u8 *bssid,
+ u8 mode)
{
struct wilc_vif *vif = netdev_priv(wilc_netdev);
@@ -594,10 +595,14 @@ static int wilc_mac_open(struct net_device *ndev)
wilc_set_operation_mode(vif, wilc_get_vif_idx(vif), vif->iftype,
vif->idx);
- if (is_valid_ether_addr(ndev->dev_addr))
+ if (is_valid_ether_addr(ndev->dev_addr)) {
wilc_set_mac_address(vif, ndev->dev_addr);
- else
- wilc_get_mac_address(vif, ndev->dev_addr);
+ } else {
+ u8 addr[ETH_ALEN];
+
+ wilc_get_mac_address(vif, addr);
+ eth_hw_addr_set(ndev, addr);
+ }
netdev_dbg(ndev, "Mac address: %pM\n", ndev->dev_addr);
if (!is_valid_ether_addr(ndev->dev_addr)) {
@@ -880,7 +885,6 @@ void wilc_netdev_cleanup(struct wilc *wilc)
srcu_read_unlock(&wilc->srcu, srcu_idx);
wilc_wfi_deinit_mon_interface(wilc, false);
- flush_workqueue(wilc->hif_workqueue);
destroy_workqueue(wilc->hif_workqueue);
while (ifc_cnt < WILC_NUM_CONCURRENT_IFC) {
diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.h b/drivers/net/wireless/microchip/wilc1000/netdev.h
index 86209b391a3d..b9a88b3e322f 100644
--- a/drivers/net/wireless/microchip/wilc1000/netdev.h
+++ b/drivers/net/wireless/microchip/wilc1000/netdev.h
@@ -264,9 +264,7 @@ struct wilc {
struct device *dev;
bool suspend_event;
- int clients_count;
struct workqueue_struct *hif_workqueue;
- enum chip_ps_states chip_ps_state;
struct wilc_cfg cfg;
void *bus_data;
struct net_device *monitor_dev;
@@ -289,7 +287,8 @@ void wilc_frmw_to_host(struct wilc *wilc, u8 *buff, u32 size, u32 pkt_offset);
void wilc_mac_indicate(struct wilc *wilc);
void wilc_netdev_cleanup(struct wilc *wilc);
void wilc_wfi_mgmt_rx(struct wilc *wilc, u8 *buff, u32 size);
-void wilc_wlan_set_bssid(struct net_device *wilc_netdev, u8 *bssid, u8 mode);
+void wilc_wlan_set_bssid(struct net_device *wilc_netdev, const u8 *bssid,
+ u8 mode);
struct wilc_vif *wilc_netdev_ifc_init(struct wilc *wl, const char *name,
int vif_type, enum nl80211_iftype type,
bool rtnl_locked);
diff --git a/drivers/net/wireless/microchip/wilc1000/sdio.c b/drivers/net/wireless/microchip/wilc1000/sdio.c
index 42e03a701ae1..26ebf6664342 100644
--- a/drivers/net/wireless/microchip/wilc1000/sdio.c
+++ b/drivers/net/wireless/microchip/wilc1000/sdio.c
@@ -978,6 +978,7 @@ static const struct wilc_hif_func wilc_hif_sdio = {
.hif_sync_ext = wilc_sdio_sync_ext,
.enable_interrupt = wilc_sdio_enable_interrupt,
.disable_interrupt = wilc_sdio_disable_interrupt,
+ .hif_reset = wilc_sdio_reset,
};
static int wilc_sdio_resume(struct device *dev)
diff --git a/drivers/net/wireless/microchip/wilc1000/spi.c b/drivers/net/wireless/microchip/wilc1000/spi.c
index dd481dc0b5ce..640850f989dd 100644
--- a/drivers/net/wireless/microchip/wilc1000/spi.c
+++ b/drivers/net/wireless/microchip/wilc1000/spi.c
@@ -47,6 +47,8 @@ struct wilc_spi {
static const struct wilc_hif_func wilc_hif_spi;
+static int wilc_spi_reset(struct wilc *wilc);
+
/********************************************
*
* Spi protocol Function
@@ -144,6 +146,12 @@ struct wilc_spi_rsp_data {
u8 data[];
} __packed;
+struct wilc_spi_special_cmd_rsp {
+ u8 skip_byte;
+ u8 rsp_cmd_type;
+ u8 status;
+} __packed;
+
static int wilc_bus_probe(struct spi_device *spi)
{
int ret;
@@ -466,7 +474,7 @@ static int wilc_spi_single_read(struct wilc *wilc, u8 cmd, u32 adr, void *b,
}
r = (struct wilc_spi_rsp_data *)&rb[cmd_len];
- if (r->rsp_cmd_type != cmd) {
+ if (r->rsp_cmd_type != cmd && !clockless) {
if (!spi_priv->probing_crc)
dev_err(&spi->dev,
"Failed cmd, cmd (%02x), resp (%02x)\n",
@@ -474,7 +482,7 @@ static int wilc_spi_single_read(struct wilc *wilc, u8 cmd, u32 adr, void *b,
return -EINVAL;
}
- if (r->status != WILC_SPI_COMMAND_STAT_SUCCESS) {
+ if (r->status != WILC_SPI_COMMAND_STAT_SUCCESS && !clockless) {
dev_err(&spi->dev, "Failed cmd state response state (%02x)\n",
r->status);
return -EINVAL;
@@ -563,14 +571,18 @@ static int wilc_spi_write_cmd(struct wilc *wilc, u8 cmd, u32 adr, u32 data,
}
r = (struct wilc_spi_rsp_data *)&rb[cmd_len];
- if (r->rsp_cmd_type != cmd) {
+ /*
+ * Clockless registers operations might return unexptected responses,
+ * even if successful.
+ */
+ if (r->rsp_cmd_type != cmd && !clockless) {
dev_err(&spi->dev,
"Failed cmd response, cmd (%02x), resp (%02x)\n",
cmd, r->rsp_cmd_type);
return -EINVAL;
}
- if (r->status != WILC_SPI_COMMAND_STAT_SUCCESS) {
+ if (r->status != WILC_SPI_COMMAND_STAT_SUCCESS && !clockless) {
dev_err(&spi->dev, "Failed cmd state response state (%02x)\n",
r->status);
return -EINVAL;
@@ -709,6 +721,61 @@ static int wilc_spi_dma_rw(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz)
return 0;
}
+static int wilc_spi_special_cmd(struct wilc *wilc, u8 cmd)
+{
+ struct spi_device *spi = to_spi_device(wilc->dev);
+ struct wilc_spi *spi_priv = wilc->bus_data;
+ u8 wb[32], rb[32];
+ int cmd_len, resp_len = 0;
+ struct wilc_spi_cmd *c;
+ struct wilc_spi_special_cmd_rsp *r;
+
+ if (cmd != CMD_TERMINATE && cmd != CMD_REPEAT && cmd != CMD_RESET)
+ return -EINVAL;
+
+ memset(wb, 0x0, sizeof(wb));
+ memset(rb, 0x0, sizeof(rb));
+ c = (struct wilc_spi_cmd *)wb;
+ c->cmd_type = cmd;
+
+ if (cmd == CMD_RESET)
+ memset(c->u.simple_cmd.addr, 0xFF, 3);
+
+ cmd_len = offsetof(struct wilc_spi_cmd, u.simple_cmd.crc);
+ resp_len = sizeof(*r);
+
+ if (spi_priv->crc7_enabled) {
+ c->u.simple_cmd.crc[0] = wilc_get_crc7(wb, cmd_len);
+ cmd_len += 1;
+ }
+ if (cmd_len + resp_len > ARRAY_SIZE(wb)) {
+ dev_err(&spi->dev, "spi buffer size too small (%d) (%d) (%zu)\n",
+ cmd_len, resp_len, ARRAY_SIZE(wb));
+ return -EINVAL;
+ }
+
+ if (wilc_spi_tx_rx(wilc, wb, rb, cmd_len + resp_len)) {
+ dev_err(&spi->dev, "Failed cmd write, bus error...\n");
+ return -EINVAL;
+ }
+
+ r = (struct wilc_spi_special_cmd_rsp *)&rb[cmd_len];
+ if (r->rsp_cmd_type != cmd) {
+ if (!spi_priv->probing_crc)
+ dev_err(&spi->dev,
+ "Failed cmd response, cmd (%02x), resp (%02x)\n",
+ cmd, r->rsp_cmd_type);
+ return -EINVAL;
+ }
+
+ if (r->status != WILC_SPI_COMMAND_STAT_SUCCESS) {
+ dev_err(&spi->dev, "Failed cmd state response state (%02x)\n",
+ r->status);
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int wilc_spi_read_reg(struct wilc *wilc, u32 addr, u32 *data)
{
struct spi_device *spi = to_spi_device(wilc->dev);
@@ -895,6 +962,19 @@ static int wilc_spi_write(struct wilc *wilc, u32 addr, u8 *buf, u32 size)
*
********************************************/
+static int wilc_spi_reset(struct wilc *wilc)
+{
+ struct spi_device *spi = to_spi_device(wilc->dev);
+ struct wilc_spi *spi_priv = wilc->bus_data;
+ int result;
+
+ result = wilc_spi_special_cmd(wilc, CMD_RESET);
+ if (result && !spi_priv->probing_crc)
+ dev_err(&spi->dev, "Failed cmd reset\n");
+
+ return result;
+}
+
static int wilc_spi_deinit(struct wilc *wilc)
{
/*
@@ -1087,7 +1167,7 @@ static int wilc_spi_sync_ext(struct wilc *wilc, int nint)
for (i = 0; (i < 3) && (nint > 0); i++, nint--)
reg |= BIT(i);
- ret = wilc_spi_read_reg(wilc, WILC_INTR2_ENABLE, &reg);
+ ret = wilc_spi_write_reg(wilc, WILC_INTR2_ENABLE, reg);
if (ret) {
dev_err(&spi->dev, "Failed write reg (%08x)...\n",
WILC_INTR2_ENABLE);
@@ -1112,4 +1192,5 @@ static const struct wilc_hif_func wilc_hif_spi = {
.hif_block_tx_ext = wilc_spi_write,
.hif_block_rx_ext = wilc_spi_read,
.hif_sync_ext = wilc_spi_sync_ext,
+ .hif_reset = wilc_spi_reset,
};
diff --git a/drivers/net/wireless/microchip/wilc1000/wlan.c b/drivers/net/wireless/microchip/wilc1000/wlan.c
index 200a103a0a85..ea81ef120fd1 100644
--- a/drivers/net/wireless/microchip/wilc1000/wlan.c
+++ b/drivers/net/wireless/microchip/wilc1000/wlan.c
@@ -10,6 +10,8 @@
#include "cfg80211.h"
#include "wlan_cfg.h"
+#define WAKE_UP_TRIAL_RETRY 10000
+
static inline bool is_wilc1000(u32 id)
{
return (id & (~WILC_CHIP_REV_FIELD)) == WILC_1000_BASE_ID;
@@ -425,6 +427,11 @@ int wilc_wlan_txq_add_net_pkt(struct net_device *dev,
return 0;
}
+ if (!wilc->initialized) {
+ tx_complete_fn(tx_data, 0);
+ return 0;
+ }
+
tqe = kmalloc(sizeof(*tqe), GFP_ATOMIC);
if (!tqe) {
@@ -474,6 +481,10 @@ int wilc_wlan_txq_add_mgmt_pkt(struct net_device *dev, void *priv, u8 *buffer,
return 0;
}
+ if (!wilc->initialized) {
+ tx_complete_fn(priv, 0);
+ return 0;
+ }
tqe = kmalloc(sizeof(*tqe), GFP_ATOMIC);
if (!tqe) {
@@ -611,60 +622,67 @@ EXPORT_SYMBOL_GPL(chip_allow_sleep);
void chip_wakeup(struct wilc *wilc)
{
- u32 reg, clk_status_reg;
- const struct wilc_hif_func *h = wilc->hif_func;
-
- if (wilc->io_type == WILC_HIF_SPI) {
- do {
- h->hif_read_reg(wilc, WILC_SPI_WAKEUP_REG, &reg);
- h->hif_write_reg(wilc, WILC_SPI_WAKEUP_REG,
- reg | WILC_SPI_WAKEUP_BIT);
- h->hif_write_reg(wilc, WILC_SPI_WAKEUP_REG,
- reg & ~WILC_SPI_WAKEUP_BIT);
-
- do {
- usleep_range(2000, 2500);
- wilc_get_chipid(wilc, true);
- } while (wilc_get_chipid(wilc, true) == 0);
- } while (wilc_get_chipid(wilc, true) == 0);
- } else if (wilc->io_type == WILC_HIF_SDIO) {
- h->hif_write_reg(wilc, WILC_SDIO_HOST_TO_FW_REG,
- WILC_SDIO_HOST_TO_FW_BIT);
- usleep_range(200, 400);
- h->hif_read_reg(wilc, WILC_SDIO_WAKEUP_REG, &reg);
- do {
- h->hif_write_reg(wilc, WILC_SDIO_WAKEUP_REG,
- reg | WILC_SDIO_WAKEUP_BIT);
- h->hif_read_reg(wilc, WILC_SDIO_CLK_STATUS_REG,
- &clk_status_reg);
-
- while (!(clk_status_reg & WILC_SDIO_CLK_STATUS_BIT)) {
- usleep_range(2000, 2500);
+ u32 ret = 0;
+ u32 clk_status_val = 0, trials = 0;
+ u32 wakeup_reg, wakeup_bit;
+ u32 clk_status_reg, clk_status_bit;
+ u32 to_host_from_fw_reg, to_host_from_fw_bit;
+ u32 from_host_to_fw_reg, from_host_to_fw_bit;
+ const struct wilc_hif_func *hif_func = wilc->hif_func;
- h->hif_read_reg(wilc, WILC_SDIO_CLK_STATUS_REG,
- &clk_status_reg);
- }
- if (!(clk_status_reg & WILC_SDIO_CLK_STATUS_BIT)) {
- h->hif_write_reg(wilc, WILC_SDIO_WAKEUP_REG,
- reg & ~WILC_SDIO_WAKEUP_BIT);
- }
- } while (!(clk_status_reg & WILC_SDIO_CLK_STATUS_BIT));
+ if (wilc->io_type == WILC_HIF_SDIO) {
+ wakeup_reg = WILC_SDIO_WAKEUP_REG;
+ wakeup_bit = WILC_SDIO_WAKEUP_BIT;
+ clk_status_reg = WILC_SDIO_CLK_STATUS_REG;
+ clk_status_bit = WILC_SDIO_CLK_STATUS_BIT;
+ from_host_to_fw_reg = WILC_SDIO_HOST_TO_FW_REG;
+ from_host_to_fw_bit = WILC_SDIO_HOST_TO_FW_BIT;
+ to_host_from_fw_reg = WILC_SDIO_FW_TO_HOST_REG;
+ to_host_from_fw_bit = WILC_SDIO_FW_TO_HOST_BIT;
+ } else {
+ wakeup_reg = WILC_SPI_WAKEUP_REG;
+ wakeup_bit = WILC_SPI_WAKEUP_BIT;
+ clk_status_reg = WILC_SPI_CLK_STATUS_REG;
+ clk_status_bit = WILC_SPI_CLK_STATUS_BIT;
+ from_host_to_fw_reg = WILC_SPI_HOST_TO_FW_REG;
+ from_host_to_fw_bit = WILC_SPI_HOST_TO_FW_BIT;
+ to_host_from_fw_reg = WILC_SPI_FW_TO_HOST_REG;
+ to_host_from_fw_bit = WILC_SPI_FW_TO_HOST_BIT;
}
- if (wilc->chip_ps_state == WILC_CHIP_SLEEPING_MANUAL) {
- if (wilc_get_chipid(wilc, false) < WILC_1000_BASE_ID_2B) {
- u32 val32;
+ /* indicate host wakeup */
+ ret = hif_func->hif_write_reg(wilc, from_host_to_fw_reg,
+ from_host_to_fw_bit);
+ if (ret)
+ return;
- h->hif_read_reg(wilc, WILC_REG_4_TO_1_RX, &val32);
- val32 |= BIT(6);
- h->hif_write_reg(wilc, WILC_REG_4_TO_1_RX, val32);
+ /* Set wake-up bit */
+ ret = hif_func->hif_write_reg(wilc, wakeup_reg,
+ wakeup_bit);
+ if (ret)
+ return;
- h->hif_read_reg(wilc, WILC_REG_4_TO_1_TX_BANK0, &val32);
- val32 |= BIT(6);
- h->hif_write_reg(wilc, WILC_REG_4_TO_1_TX_BANK0, val32);
+ while (trials < WAKE_UP_TRIAL_RETRY) {
+ ret = hif_func->hif_read_reg(wilc, clk_status_reg,
+ &clk_status_val);
+ if (ret) {
+ pr_err("Bus error %d %x\n", ret, clk_status_val);
+ return;
}
+ if (clk_status_val & clk_status_bit)
+ break;
+
+ trials++;
}
- wilc->chip_ps_state = WILC_CHIP_WAKEDUP;
+ if (trials >= WAKE_UP_TRIAL_RETRY) {
+ pr_err("Failed to wake-up the chip\n");
+ return;
+ }
+ /* Sometimes spi fail to read clock regs after reading
+ * writing clockless registers
+ */
+ if (wilc->io_type == WILC_HIF_SPI)
+ wilc->hif_func->hif_reset(wilc);
}
EXPORT_SYMBOL_GPL(chip_wakeup);
@@ -1071,6 +1089,7 @@ int wilc_wlan_firmware_download(struct wilc *wilc, const u8 *buffer,
u32 addr, size, size2, blksz;
u8 *dma_buffer;
int ret = 0;
+ u32 reg = 0;
blksz = BIT(12);
@@ -1079,10 +1098,22 @@ int wilc_wlan_firmware_download(struct wilc *wilc, const u8 *buffer,
return -EIO;
offset = 0;
+ pr_debug("%s: Downloading firmware size = %d\n", __func__, buffer_size);
+
+ acquire_bus(wilc, WILC_BUS_ACQUIRE_AND_WAKEUP);
+
+ wilc->hif_func->hif_read_reg(wilc, WILC_GLB_RESET_0, &reg);
+ reg &= ~BIT(10);
+ ret = wilc->hif_func->hif_write_reg(wilc, WILC_GLB_RESET_0, reg);
+ wilc->hif_func->hif_read_reg(wilc, WILC_GLB_RESET_0, &reg);
+ if (reg & BIT(10))
+ pr_err("%s: Failed to reset\n", __func__);
+
+ release_bus(wilc, WILC_BUS_RELEASE_ONLY);
do {
addr = get_unaligned_le32(&buffer[offset]);
size = get_unaligned_le32(&buffer[offset + 4]);
- acquire_bus(wilc, WILC_BUS_ACQUIRE_ONLY);
+ acquire_bus(wilc, WILC_BUS_ACQUIRE_AND_WAKEUP);
offset += 8;
while (((int)size) && (offset < buffer_size)) {
if (size <= blksz)
@@ -1100,10 +1131,13 @@ int wilc_wlan_firmware_download(struct wilc *wilc, const u8 *buffer,
offset += size2;
size -= size2;
}
- release_bus(wilc, WILC_BUS_RELEASE_ONLY);
+ release_bus(wilc, WILC_BUS_RELEASE_ALLOW_SLEEP);
- if (ret)
+ if (ret) {
+ pr_err("%s Bus error\n", __func__);
goto fail;
+ }
+ pr_debug("%s Offset = %d\n", __func__, offset);
} while (offset < buffer_size);
fail:
diff --git a/drivers/net/wireless/microchip/wilc1000/wlan.h b/drivers/net/wireless/microchip/wilc1000/wlan.h
index 771c25fa849b..13fde636aa0e 100644
--- a/drivers/net/wireless/microchip/wilc1000/wlan.h
+++ b/drivers/net/wireless/microchip/wilc1000/wlan.h
@@ -97,6 +97,8 @@
#define WILC_SPI_WAKEUP_REG 0x1
#define WILC_SPI_WAKEUP_BIT BIT(1)
+#define WILC_SPI_CLK_STATUS_REG 0x0f
+#define WILC_SPI_CLK_STATUS_BIT BIT(2)
#define WILC_SPI_HOST_TO_FW_REG 0x0b
#define WILC_SPI_HOST_TO_FW_BIT BIT(0)
@@ -300,7 +302,7 @@
#define ENABLE_RX_VMM (SEL_VMM_TBL1 | EN_VMM)
#define ENABLE_TX_VMM (SEL_VMM_TBL0 | EN_VMM)
/* time for expiring the completion of cfg packets */
-#define WILC_CFG_PKTS_TIMEOUT msecs_to_jiffies(2000)
+#define WILC_CFG_PKTS_TIMEOUT msecs_to_jiffies(3000)
#define IS_MANAGMEMENT 0x100
#define IS_MANAGMEMENT_CALLBACK 0x080
@@ -371,6 +373,7 @@ struct wilc_hif_func {
int (*hif_sync_ext)(struct wilc *wilc, int nint);
int (*enable_interrupt)(struct wilc *nic);
void (*disable_interrupt)(struct wilc *nic);
+ int (*hif_reset)(struct wilc *wilc);
};
#define WILC_MAX_CFG_FRAME_SIZE 1468
diff --git a/drivers/net/wireless/microchip/wilc1000/wlan_cfg.c b/drivers/net/wireless/microchip/wilc1000/wlan_cfg.c
index fe2a7ed8e5cd..dba301378b7f 100644
--- a/drivers/net/wireless/microchip/wilc1000/wlan_cfg.c
+++ b/drivers/net/wireless/microchip/wilc1000/wlan_cfg.c
@@ -22,6 +22,7 @@ static const struct wilc_cfg_byte g_cfg_byte[] = {
{WID_STATUS, 0},
{WID_RSSI, 0},
{WID_LINKSPEED, 0},
+ {WID_WOWLAN_TRIGGER, 0},
{WID_NIL, 0}
};
diff --git a/drivers/net/wireless/microchip/wilc1000/wlan_if.h b/drivers/net/wireless/microchip/wilc1000/wlan_if.h
index f85fd575136d..6eb7eb4ac294 100644
--- a/drivers/net/wireless/microchip/wilc1000/wlan_if.h
+++ b/drivers/net/wireless/microchip/wilc1000/wlan_if.h
@@ -48,12 +48,6 @@ enum {
WILC_FW_MAX_PSPOLL_PS = 4
};
-enum chip_ps_states {
- WILC_CHIP_WAKEDUP = 0,
- WILC_CHIP_SLEEPING_AUTO = 1,
- WILC_CHIP_SLEEPING_MANUAL = 2
-};
-
enum bus_acquire {
WILC_BUS_ACQUIRE_ONLY = 0,
WILC_BUS_ACQUIRE_AND_WAKEUP = 1,
@@ -662,6 +656,7 @@ enum {
WID_LOG_TERMINAL_SWITCH = 0x00CD,
WID_TX_POWER = 0x00CE,
+ WID_WOWLAN_TRIGGER = 0X00CF,
/* EMAC Short WID list */
/* RTS Threshold */
/*
diff --git a/drivers/net/wireless/quantenna/qtnfmac/core.c b/drivers/net/wireless/quantenna/qtnfmac/core.c
index b4dd60b2ebc9..2a63ffdc4b2c 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/core.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/core.c
@@ -179,7 +179,7 @@ static int qtnf_netdev_set_mac_address(struct net_device *ndev, void *addr)
sa->sa_data);
if (ret)
- memcpy(ndev->dev_addr, old_addr, ETH_ALEN);
+ eth_hw_addr_set(ndev, old_addr);
return ret;
}
@@ -478,7 +478,7 @@ int qtnf_core_net_attach(struct qtnf_wmac *mac, struct qtnf_vif *vif,
dev->needs_free_netdev = true;
dev_net_set(dev, wiphy_net(wiphy));
dev->ieee80211_ptr = &vif->wdev;
- ether_addr_copy(dev->dev_addr, vif->mac_addr);
+ eth_hw_addr_set(dev, vif->mac_addr);
dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
dev->watchdog_timeo = QTNF_DEF_WDOG_TIMEOUT;
dev->tx_queue_len = 100;
@@ -811,13 +811,11 @@ void qtnf_core_detach(struct qtnf_bus *bus)
bus->fw_state = QTNF_FW_STATE_DETACHED;
if (bus->workqueue) {
- flush_workqueue(bus->workqueue);
destroy_workqueue(bus->workqueue);
bus->workqueue = NULL;
}
if (bus->hprio_workqueue) {
- flush_workqueue(bus->hprio_workqueue);
destroy_workqueue(bus->hprio_workqueue);
bus->hprio_workqueue = NULL;
}
diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c
index 5d93c874d666..9ad4c120fa28 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c
@@ -387,7 +387,6 @@ static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
error:
- flush_workqueue(pcie_priv->workqueue);
destroy_workqueue(pcie_priv->workqueue);
pci_set_drvdata(pdev, NULL);
return ret;
@@ -416,7 +415,6 @@ static void qtnf_pcie_remove(struct pci_dev *dev)
qtnf_core_detach(bus);
netif_napi_del(&bus->mux_napi);
- flush_workqueue(priv->workqueue);
destroy_workqueue(priv->workqueue);
tasklet_kill(&priv->reclaim_tq);
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
index b5c67f656cfd..a3ffd1b0c9bc 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2800usb.c
@@ -1101,7 +1101,6 @@ static const struct usb_device_id rt2800usb_device_table[] = {
#ifdef CONFIG_RT2800USB_RT53XX
/* Arcadyan */
{ USB_DEVICE(0x043e, 0x7a12) },
- { USB_DEVICE(0x043e, 0x7a32) },
/* ASUS */
{ USB_DEVICE(0x0b05, 0x17e8) },
/* Azurewave */
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 0f5009c47cd0..e3a3dc3e45b4 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -791,7 +791,7 @@ static int ray_dev_init(struct net_device *dev)
#endif /* RAY_IMMEDIATE_INIT */
/* copy mac and broadcast addresses to linux device */
- memcpy(dev->dev_addr, &local->sparm.b4.a_mac_addr, ADDRLEN);
+ eth_hw_addr_set(dev, local->sparm.b4.a_mac_addr);
eth_broadcast_addr(dev->broadcast);
dev_dbg(&link->dev, "ray_dev_init ending\n");
diff --git a/drivers/net/wireless/realtek/Kconfig b/drivers/net/wireless/realtek/Kconfig
index 474843277fa1..4a1f0e64df03 100644
--- a/drivers/net/wireless/realtek/Kconfig
+++ b/drivers/net/wireless/realtek/Kconfig
@@ -16,5 +16,6 @@ source "drivers/net/wireless/realtek/rtl818x/Kconfig"
source "drivers/net/wireless/realtek/rtlwifi/Kconfig"
source "drivers/net/wireless/realtek/rtl8xxxu/Kconfig"
source "drivers/net/wireless/realtek/rtw88/Kconfig"
+source "drivers/net/wireless/realtek/rtw89/Kconfig"
endif # WLAN_VENDOR_REALTEK
diff --git a/drivers/net/wireless/realtek/Makefile b/drivers/net/wireless/realtek/Makefile
index 888b5d594e79..ab25419f56c6 100644
--- a/drivers/net/wireless/realtek/Makefile
+++ b/drivers/net/wireless/realtek/Makefile
@@ -8,4 +8,5 @@ obj-$(CONFIG_RTL8187) += rtl818x/
obj-$(CONFIG_RTLWIFI) += rtlwifi/
obj-$(CONFIG_RTL8XXXU) += rtl8xxxu/
obj-$(CONFIG_RTW88) += rtw88/
+obj-$(CONFIG_RTW89) += rtw89/
diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
index 585784258c66..4efab907a3ac 100644
--- a/drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
+++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/rtl8225.c
@@ -28,7 +28,7 @@ u8 rtl818x_ioread8_idx(struct rtl8187_priv *priv,
usb_control_msg(priv->udev, usb_rcvctrlpipe(priv->udev, 0),
RTL8187_REQ_GET_REG, RTL8187_REQT_READ,
(unsigned long)addr, idx & 0x03,
- &priv->io_dmabuf->bits8, sizeof(val), HZ / 2);
+ &priv->io_dmabuf->bits8, sizeof(val), 500);
val = priv->io_dmabuf->bits8;
mutex_unlock(&priv->io_mutex);
@@ -45,7 +45,7 @@ u16 rtl818x_ioread16_idx(struct rtl8187_priv *priv,
usb_control_msg(priv->udev, usb_rcvctrlpipe(priv->udev, 0),
RTL8187_REQ_GET_REG, RTL8187_REQT_READ,
(unsigned long)addr, idx & 0x03,
- &priv->io_dmabuf->bits16, sizeof(val), HZ / 2);
+ &priv->io_dmabuf->bits16, sizeof(val), 500);
val = priv->io_dmabuf->bits16;
mutex_unlock(&priv->io_mutex);
@@ -62,7 +62,7 @@ u32 rtl818x_ioread32_idx(struct rtl8187_priv *priv,
usb_control_msg(priv->udev, usb_rcvctrlpipe(priv->udev, 0),
RTL8187_REQ_GET_REG, RTL8187_REQT_READ,
(unsigned long)addr, idx & 0x03,
- &priv->io_dmabuf->bits32, sizeof(val), HZ / 2);
+ &priv->io_dmabuf->bits32, sizeof(val), 500);
val = priv->io_dmabuf->bits32;
mutex_unlock(&priv->io_mutex);
@@ -79,7 +79,7 @@ void rtl818x_iowrite8_idx(struct rtl8187_priv *priv,
usb_control_msg(priv->udev, usb_sndctrlpipe(priv->udev, 0),
RTL8187_REQ_SET_REG, RTL8187_REQT_WRITE,
(unsigned long)addr, idx & 0x03,
- &priv->io_dmabuf->bits8, sizeof(val), HZ / 2);
+ &priv->io_dmabuf->bits8, sizeof(val), 500);
mutex_unlock(&priv->io_mutex);
}
@@ -93,7 +93,7 @@ void rtl818x_iowrite16_idx(struct rtl8187_priv *priv,
usb_control_msg(priv->udev, usb_sndctrlpipe(priv->udev, 0),
RTL8187_REQ_SET_REG, RTL8187_REQT_WRITE,
(unsigned long)addr, idx & 0x03,
- &priv->io_dmabuf->bits16, sizeof(val), HZ / 2);
+ &priv->io_dmabuf->bits16, sizeof(val), 500);
mutex_unlock(&priv->io_mutex);
}
@@ -107,7 +107,7 @@ void rtl818x_iowrite32_idx(struct rtl8187_priv *priv,
usb_control_msg(priv->udev, usb_sndctrlpipe(priv->udev, 0),
RTL8187_REQ_SET_REG, RTL8187_REQT_WRITE,
(unsigned long)addr, idx & 0x03,
- &priv->io_dmabuf->bits32, sizeof(val), HZ / 2);
+ &priv->io_dmabuf->bits32, sizeof(val), 500);
mutex_unlock(&priv->io_mutex);
}
@@ -183,7 +183,7 @@ static void rtl8225_write_8051(struct ieee80211_hw *dev, u8 addr, __le16 data)
usb_control_msg(priv->udev, usb_sndctrlpipe(priv->udev, 0),
RTL8187_REQ_SET_REG, RTL8187_REQT_WRITE,
addr, 0x8225, &priv->io_dmabuf->bits16, sizeof(data),
- HZ / 2);
+ 500);
mutex_unlock(&priv->io_mutex);
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
index 774341b0005a..a42e2081b75f 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c
@@ -4460,13 +4460,17 @@ void rtl8xxxu_gen1_init_aggregation(struct rtl8xxxu_priv *priv)
static void rtl8xxxu_set_basic_rates(struct rtl8xxxu_priv *priv, u32 rate_cfg)
{
+ struct ieee80211_hw *hw = priv->hw;
u32 val32;
u8 rate_idx = 0;
rate_cfg &= RESPONSE_RATE_BITMAP_ALL;
val32 = rtl8xxxu_read32(priv, REG_RESPONSE_RATE_SET);
- val32 &= ~RESPONSE_RATE_BITMAP_ALL;
+ if (hw->conf.chandef.chan->band == NL80211_BAND_5GHZ)
+ val32 &= RESPONSE_RATE_RRSR_INIT_5G;
+ else
+ val32 &= RESPONSE_RATE_RRSR_INIT_2G;
val32 |= rate_cfg;
rtl8xxxu_write32(priv, REG_RESPONSE_RATE_SET, val32);
diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h
index a2a31f374a82..438b65ba9640 100644
--- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h
+++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_regs.h
@@ -516,6 +516,8 @@
#define REG_RESPONSE_RATE_SET 0x0440
#define RESPONSE_RATE_BITMAP_ALL 0xfffff
#define RESPONSE_RATE_RRSR_CCK_ONLY_1M 0xffff1
+#define RESPONSE_RATE_RRSR_INIT_2G 0x15f
+#define RESPONSE_RATE_RRSR_INIT_5G 0x150
#define RSR_1M BIT(0)
#define RSR_2M BIT(1)
#define RSR_5_5M BIT(2)
diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
index 3776495fd9d0..ad327bae754b 100644
--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
+++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
@@ -1743,7 +1743,6 @@ static void rtl_pci_deinit(struct ieee80211_hw *hw)
tasklet_kill(&rtlpriv->works.irq_tasklet);
cancel_work_sync(&rtlpriv->works.lps_change_work);
- flush_workqueue(rtlpriv->works.rtl_wq);
destroy_workqueue(rtlpriv->works.rtl_wq);
}
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
index 88fa2e593fef..76189283104c 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/hw.c
@@ -1430,7 +1430,7 @@ static enum version_8192e _rtl92ee_read_chip_version(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &rtlpriv->phy;
- enum version_8192e version = VERSION_UNKNOWN;
+ enum version_8192e version;
u32 value32;
rtlphy->rf_type = RF_2T2R;
diff --git a/drivers/net/wireless/realtek/rtw88/debug.c b/drivers/net/wireless/realtek/rtw88/debug.c
index dfd52cff5d02..682b23502e6e 100644
--- a/drivers/net/wireless/realtek/rtw88/debug.c
+++ b/drivers/net/wireless/realtek/rtw88/debug.c
@@ -12,6 +12,7 @@
#include "phy.h"
#include "reg.h"
#include "ps.h"
+#include "regd.h"
#ifdef CONFIG_RTW88_DEBUGFS
@@ -587,7 +588,7 @@ static int rtw_debugfs_get_tx_pwr_tbl(struct seq_file *m, void *v)
struct rtw_power_params pwr_param = {0};
u8 bw = hal->current_band_width;
u8 ch = hal->current_channel;
- u8 regd = rtwdev->regd.txpwr_regd;
+ u8 regd = rtw_regd_get(rtwdev);
seq_printf(m, "regulatory: %s\n", rtw_get_regd_string(regd));
seq_printf(m, "%-4s %-10s %-3s%6s %-4s %4s (%-4s %-4s) %-4s\n",
@@ -828,6 +829,38 @@ static int rtw_debugfs_get_coex_enable(struct seq_file *m, void *v)
return 0;
}
+static ssize_t rtw_debugfs_set_edcca_enable(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *loff)
+{
+ struct seq_file *seqpriv = (struct seq_file *)filp->private_data;
+ struct rtw_debugfs_priv *debugfs_priv = seqpriv->private;
+ struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
+ bool input;
+ int err;
+
+ err = kstrtobool_from_user(buffer, count, &input);
+ if (err)
+ return err;
+
+ rtw_edcca_enabled = input;
+ rtw_phy_adaptivity_set_mode(rtwdev);
+
+ return count;
+}
+
+static int rtw_debugfs_get_edcca_enable(struct seq_file *m, void *v)
+{
+ struct rtw_debugfs_priv *debugfs_priv = m->private;
+ struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+
+ seq_printf(m, "EDCCA %s: EDCCA mode %d\n",
+ rtw_edcca_enabled ? "enabled" : "disabled",
+ dm_info->edcca_mode);
+ return 0;
+}
+
static ssize_t rtw_debugfs_set_fw_crash(struct file *filp,
const char __user *buffer,
size_t count, loff_t *loff)
@@ -853,6 +886,7 @@ static ssize_t rtw_debugfs_set_fw_crash(struct file *filp,
mutex_lock(&rtwdev->mutex);
rtw_leave_lps_deep(rtwdev);
+ set_bit(RTW_FLAG_RESTART_TRIGGERING, rtwdev->flags);
rtw_write8(rtwdev, REG_HRCV_MSG, 1);
mutex_unlock(&rtwdev->mutex);
@@ -864,7 +898,9 @@ static int rtw_debugfs_get_fw_crash(struct seq_file *m, void *v)
struct rtw_debugfs_priv *debugfs_priv = m->private;
struct rtw_dev *rtwdev = debugfs_priv->rtwdev;
- seq_printf(m, "%d\n", test_bit(RTW_FLAG_RESTARTING, rtwdev->flags));
+ seq_printf(m, "%d\n",
+ test_bit(RTW_FLAG_RESTART_TRIGGERING, rtwdev->flags) ||
+ test_bit(RTW_FLAG_RESTARTING, rtwdev->flags));
return 0;
}
@@ -1048,6 +1084,11 @@ static struct rtw_debugfs_priv rtw_debug_priv_coex_info = {
.cb_read = rtw_debugfs_get_coex_info,
};
+static struct rtw_debugfs_priv rtw_debug_priv_edcca_enable = {
+ .cb_write = rtw_debugfs_set_edcca_enable,
+ .cb_read = rtw_debugfs_get_edcca_enable,
+};
+
static struct rtw_debugfs_priv rtw_debug_priv_fw_crash = {
.cb_write = rtw_debugfs_set_fw_crash,
.cb_read = rtw_debugfs_get_fw_crash,
@@ -1131,6 +1172,7 @@ void rtw_debugfs_init(struct rtw_dev *rtwdev)
}
rtw_debugfs_add_r(rf_dump);
rtw_debugfs_add_r(tx_pwr_tbl);
+ rtw_debugfs_add_rw(edcca_enable);
rtw_debugfs_add_rw(fw_crash);
rtw_debugfs_add_rw(dm_cap);
}
diff --git a/drivers/net/wireless/realtek/rtw88/debug.h b/drivers/net/wireless/realtek/rtw88/debug.h
index 0dd3f9a88c8d..47c57f395f52 100644
--- a/drivers/net/wireless/realtek/rtw88/debug.h
+++ b/drivers/net/wireless/realtek/rtw88/debug.h
@@ -21,6 +21,7 @@ enum rtw_debug_mask {
RTW_DBG_WOW = 0x00001000,
RTW_DBG_CFO = 0x00002000,
RTW_DBG_PATH_DIV = 0x00004000,
+ RTW_DBG_ADAPTIVITY = 0x00008000,
RTW_DBG_ALL = 0xffffffff
};
diff --git a/drivers/net/wireless/realtek/rtw88/fw.c b/drivers/net/wireless/realtek/rtw88/fw.c
index e6399519584b..0c4f2a2f2d7f 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.c
+++ b/drivers/net/wireless/realtek/rtw88/fw.c
@@ -183,6 +183,28 @@ static void rtw_fw_scan_result(struct rtw_dev *rtwdev, u8 *payload,
dm_info->scan_density);
}
+static void rtw_fw_adaptivity_result(struct rtw_dev *rtwdev, u8 *payload,
+ u8 length)
+{
+ struct rtw_hw_reg_offset *edcca_th = rtwdev->chip->edcca_th;
+ struct rtw_c2h_adaptivity *result = (struct rtw_c2h_adaptivity *)payload;
+
+ rtw_dbg(rtwdev, RTW_DBG_ADAPTIVITY,
+ "Adaptivity: density %x igi %x l2h_th_init %x l2h %x h2l %x option %x\n",
+ result->density, result->igi, result->l2h_th_init, result->l2h,
+ result->h2l, result->option);
+
+ rtw_dbg(rtwdev, RTW_DBG_ADAPTIVITY, "Reg Setting: L2H %x H2L %x\n",
+ rtw_read32_mask(rtwdev, edcca_th[EDCCA_TH_L2H_IDX].hw_reg.addr,
+ edcca_th[EDCCA_TH_L2H_IDX].hw_reg.mask),
+ rtw_read32_mask(rtwdev, edcca_th[EDCCA_TH_H2L_IDX].hw_reg.addr,
+ edcca_th[EDCCA_TH_H2L_IDX].hw_reg.mask));
+
+ rtw_dbg(rtwdev, RTW_DBG_ADAPTIVITY, "EDCCA Flag %s\n",
+ rtw_read32_mask(rtwdev, REG_EDCCA_REPORT, BIT_EDCCA_FLAG) ?
+ "Set" : "Unset");
+}
+
void rtw_fw_c2h_cmd_handle(struct rtw_dev *rtwdev, struct sk_buff *skb)
{
struct rtw_c2h_cmd *c2h;
@@ -252,6 +274,10 @@ void rtw_fw_c2h_cmd_rx_irqsafe(struct rtw_dev *rtwdev, u32 pkt_offset,
rtw_fw_scan_result(rtwdev, c2h->payload, len);
dev_kfree_skb_any(skb);
break;
+ case C2H_ADAPTIVITY:
+ rtw_fw_adaptivity_result(rtwdev, c2h->payload, len);
+ dev_kfree_skb_any(skb);
+ break;
default:
/* pass offset for further operation */
*((u32 *)skb->cb) = pkt_offset;
@@ -1556,12 +1582,10 @@ static void rtw_fw_read_fifo_page(struct rtw_dev *rtwdev, u32 offset, u32 size,
u32 i;
u16 idx = 0;
u16 ctl;
- u8 rcr;
- rcr = rtw_read8(rtwdev, REG_RCR + 2);
ctl = rtw_read16(rtwdev, REG_PKTBUF_DBG_CTRL) & 0xf000;
/* disable rx clock gate */
- rtw_write8(rtwdev, REG_RCR, rcr | BIT(3));
+ rtw_write32_set(rtwdev, REG_RCR, BIT_DISGCLK);
do {
rtw_write16(rtwdev, REG_PKTBUF_DBG_CTRL, start_pg | ctl);
@@ -1580,7 +1604,8 @@ static void rtw_fw_read_fifo_page(struct rtw_dev *rtwdev, u32 offset, u32 size,
out:
rtw_write16(rtwdev, REG_PKTBUF_DBG_CTRL, ctl);
- rtw_write8(rtwdev, REG_RCR + 2, rcr);
+ /* restore rx clock gate */
+ rtw_write32_clr(rtwdev, REG_RCR, BIT_DISGCLK);
}
static void rtw_fw_read_fifo(struct rtw_dev *rtwdev, enum rtw_fw_fifo_sel sel,
@@ -1722,6 +1747,27 @@ void rtw_fw_channel_switch(struct rtw_dev *rtwdev, bool enable)
rtw_fw_send_h2c_packet(rtwdev, h2c_pkt);
}
+void rtw_fw_adaptivity(struct rtw_dev *rtwdev)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ u8 h2c_pkt[H2C_PKT_SIZE] = {0};
+
+ if (!rtw_edcca_enabled) {
+ dm_info->edcca_mode = RTW_EDCCA_NORMAL;
+ rtw_dbg(rtwdev, RTW_DBG_ADAPTIVITY,
+ "EDCCA disabled by debugfs\n");
+ }
+
+ SET_H2C_CMD_ID_CLASS(h2c_pkt, H2C_CMD_ADAPTIVITY);
+ SET_ADAPTIVITY_MODE(h2c_pkt, dm_info->edcca_mode);
+ SET_ADAPTIVITY_OPTION(h2c_pkt, 2);
+ SET_ADAPTIVITY_IGI(h2c_pkt, dm_info->igi_history[0]);
+ SET_ADAPTIVITY_L2H(h2c_pkt, dm_info->l2h_th_ini);
+ SET_ADAPTIVITY_DENSITY(h2c_pkt, dm_info->scan_density);
+
+ rtw_fw_send_h2c_command(rtwdev, h2c_pkt);
+}
+
void rtw_fw_scan_notify(struct rtw_dev *rtwdev, bool start)
{
u8 h2c_pkt[H2C_PKT_SIZE] = {0};
diff --git a/drivers/net/wireless/realtek/rtw88/fw.h b/drivers/net/wireless/realtek/rtw88/fw.h
index 64dcde35a021..09c7afb99e63 100644
--- a/drivers/net/wireless/realtek/rtw88/fw.h
+++ b/drivers/net/wireless/realtek/rtw88/fw.h
@@ -41,6 +41,7 @@ enum rtw_c2h_cmd_id {
C2H_WLAN_INFO = 0x27,
C2H_WLAN_RFON = 0x32,
C2H_BCN_FILTER_NOTIFY = 0x36,
+ C2H_ADAPTIVITY = 0x37,
C2H_SCAN_RESULT = 0x38,
C2H_HW_FEATURE_DUMP = 0xfd,
C2H_HALMAC = 0xff,
@@ -56,6 +57,15 @@ struct rtw_c2h_cmd {
u8 payload[];
} __packed;
+struct rtw_c2h_adaptivity {
+ u8 density;
+ u8 igi;
+ u8 l2h_th_init;
+ u8 l2h;
+ u8 h2l;
+ u8 option;
+} __packed;
+
enum rtw_rsvd_packet_type {
RSVD_BEACON,
RSVD_DUMMY,
@@ -90,6 +100,7 @@ enum rtw_fw_feature {
FW_FEATURE_PG = BIT(3),
FW_FEATURE_BCN_FILTER = BIT(5),
FW_FEATURE_NOTIFY_SCAN = BIT(6),
+ FW_FEATURE_ADAPTIVITY = BIT(7),
FW_FEATURE_MAX = BIT(31),
};
@@ -375,6 +386,7 @@ static inline void rtw_h2c_pkt_set_header(u8 *h2c_pkt, u8 sub_id)
#define H2C_CMD_BCN_FILTER_OFFLOAD_P1 0x57
#define H2C_CMD_WL_PHY_INFO 0x58
#define H2C_CMD_SCAN 0x59
+#define H2C_CMD_ADAPTIVITY 0x5A
#define H2C_CMD_COEX_TDMA_TYPE 0x60
#define H2C_CMD_QUERY_BT_INFO 0x61
@@ -428,6 +440,17 @@ static inline void rtw_h2c_pkt_set_header(u8 *h2c_pkt, u8 sub_id)
#define SET_SCAN_START(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, BIT(8))
+#define SET_ADAPTIVITY_MODE(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(11, 8))
+#define SET_ADAPTIVITY_OPTION(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(15, 12))
+#define SET_ADAPTIVITY_IGI(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(23, 16))
+#define SET_ADAPTIVITY_L2H(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(31, 24))
+#define SET_ADAPTIVITY_DENSITY(h2c_pkt, value) \
+ le32p_replace_bits((__le32 *)(h2c_pkt) + 0x01, value, GENMASK(7, 0))
+
#define SET_PWR_MODE_SET_MODE(h2c_pkt, value) \
le32p_replace_bits((__le32 *)(h2c_pkt) + 0x00, value, GENMASK(14, 8))
#define SET_PWR_MODE_SET_RLBM(h2c_pkt, value) \
@@ -662,4 +685,5 @@ void rtw_fw_c2h_cmd_isr(struct rtw_dev *rtwdev);
int rtw_fw_dump_fifo(struct rtw_dev *rtwdev, u8 fifo_sel, u32 addr, u32 size,
u32 *buffer);
void rtw_fw_scan_notify(struct rtw_dev *rtwdev, bool start);
+void rtw_fw_adaptivity(struct rtw_dev *rtwdev);
#endif
diff --git a/drivers/net/wireless/realtek/rtw88/main.c b/drivers/net/wireless/realtek/rtw88/main.c
index 6bb55e663fc3..a0d4d6e31fb4 100644
--- a/drivers/net/wireless/realtek/rtw88/main.c
+++ b/drivers/net/wireless/realtek/rtw88/main.c
@@ -23,6 +23,14 @@ EXPORT_SYMBOL(rtw_disable_lps_deep_mode);
bool rtw_bf_support = true;
unsigned int rtw_debug_mask;
EXPORT_SYMBOL(rtw_debug_mask);
+/* EDCCA is enabled during normal behavior. For debugging purpose in
+ * a noisy environment, it can be disabled via edcca debugfs. Because
+ * all rtw88 devices will probably be affected if environment is noisy,
+ * rtw_edcca_enabled is just declared by driver instead of by device.
+ * So, turning it off will take effect for all rtw88 devices before
+ * there is a tough reason to maintain rtw_edcca_enabled by device.
+ */
+bool rtw_edcca_enabled = true;
module_param_named(disable_lps_deep, rtw_disable_lps_deep_mode, bool, 0644);
module_param_named(support_bf, rtw_bf_support, bool, 0644);
@@ -556,6 +564,7 @@ static void __fw_recovery_work(struct rtw_dev *rtwdev)
int ret = 0;
set_bit(RTW_FLAG_RESTARTING, rtwdev->flags);
+ clear_bit(RTW_FLAG_RESTART_TRIGGERING, rtwdev->flags);
ret = rtw_fwcd_prep(rtwdev);
if (ret)
@@ -1964,7 +1973,11 @@ int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw)
rtw_set_supported_band(hw, rtwdev->chip);
SET_IEEE80211_PERM_ADDR(hw, rtwdev->efuse.addr);
- rtw_regd_init(rtwdev, rtw_regd_notifier);
+ ret = rtw_regd_init(rtwdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to init regd\n");
+ return ret;
+ }
ret = ieee80211_register_hw(hw);
if (ret) {
@@ -1972,8 +1985,11 @@ int rtw_register_hw(struct rtw_dev *rtwdev, struct ieee80211_hw *hw)
return ret;
}
- if (regulatory_hint(hw->wiphy, rtwdev->regd.alpha2))
- rtw_err(rtwdev, "regulatory_hint fail\n");
+ ret = rtw_regd_hint(rtwdev);
+ if (ret) {
+ rtw_err(rtwdev, "failed to hint regd\n");
+ return ret;
+ }
rtw_debugfs_init(rtwdev);
diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h
index 56812127a053..bbdd535b64e7 100644
--- a/drivers/net/wireless/realtek/rtw88/main.h
+++ b/drivers/net/wireless/realtek/rtw88/main.h
@@ -41,6 +41,7 @@
extern bool rtw_bf_support;
extern bool rtw_disable_lps_deep_mode;
extern unsigned int rtw_debug_mask;
+extern bool rtw_edcca_enabled;
extern const struct ieee80211_ops rtw_ops;
#define RTW_MAX_CHANNEL_NUM_2G 14
@@ -362,6 +363,7 @@ enum rtw_flags {
RTW_FLAG_BUSY_TRAFFIC,
RTW_FLAG_WOWLAN,
RTW_FLAG_RESTARTING,
+ RTW_FLAG_RESTART_TRIGGERING,
NUM_OF_RTW_FLAGS,
};
@@ -545,6 +547,11 @@ struct rtw_rf_sipi_addr {
u32 lssi_read_pi;
};
+struct rtw_hw_reg_offset {
+ struct rtw_hw_reg hw_reg;
+ u8 offset;
+};
+
struct rtw_backup_info {
u8 len;
u32 reg;
@@ -800,8 +807,22 @@ struct rtw_vif {
struct rtw_regulatory {
char alpha2[2];
- u8 chplan;
- u8 txpwr_regd;
+ u8 txpwr_regd_2g;
+ u8 txpwr_regd_5g;
+};
+
+enum rtw_regd_state {
+ RTW_REGD_STATE_WORLDWIDE,
+ RTW_REGD_STATE_PROGRAMMED,
+ RTW_REGD_STATE_SETTING,
+
+ RTW_REGD_STATE_NR,
+};
+
+struct rtw_regd {
+ enum rtw_regd_state state;
+ const struct rtw_regulatory *regulatory;
+ enum nl80211_dfs_regions dfs_region;
};
struct rtw_chip_ops {
@@ -839,6 +860,8 @@ struct rtw_chip_ops {
struct ieee80211_bss_conf *conf);
void (*cfg_csi_rate)(struct rtw_dev *rtwdev, u8 rssi, u8 cur_rate,
u8 fixrate_en, u8 *new_rate);
+ void (*adaptivity_init)(struct rtw_dev *rtwdev);
+ void (*adaptivity)(struct rtw_dev *rtwdev);
void (*cfo_init)(struct rtw_dev *rtwdev);
void (*cfo_track)(struct rtw_dev *rtwdev);
void (*config_tx_path)(struct rtw_dev *rtwdev, u8 tx_path,
@@ -1194,6 +1217,10 @@ struct rtw_chip_info {
u8 bfer_su_max_num;
u8 bfer_mu_max_num;
+ struct rtw_hw_reg_offset *edcca_th;
+ s8 l2h_th_ini_cs;
+ s8 l2h_th_ini_ad;
+
const char *wow_fw_name;
const struct wiphy_wowlan_support *wowlan_stub;
const u8 max_sched_scan_ssids;
@@ -1542,6 +1569,20 @@ struct rtw_gapk_info {
u8 channel;
};
+#define EDCCA_TH_L2H_IDX 0
+#define EDCCA_TH_H2L_IDX 1
+#define EDCCA_TH_L2H_LB 48
+#define EDCCA_ADC_BACKOFF 12
+#define EDCCA_IGI_BASE 50
+#define EDCCA_IGI_L2H_DIFF 8
+#define EDCCA_L2H_H2L_DIFF 7
+#define EDCCA_L2H_H2L_DIFF_NORMAL 8
+
+enum rtw_edcca_mode {
+ RTW_EDCCA_NORMAL = 0,
+ RTW_EDCCA_ADAPTIVITY = 1,
+};
+
struct rtw_cfo_track {
bool is_adjust;
u8 crystal_cap;
@@ -1633,6 +1674,8 @@ struct rtw_dm_info {
struct rtw_gapk_info gapk;
bool is_bt_iqk_timeout;
+ s8 l2h_th_ini;
+ enum rtw_edcca_mode edcca_mode;
u8 scan_density;
};
@@ -1833,7 +1876,7 @@ struct rtw_dev {
struct rtw_efuse efuse;
struct rtw_sec_desc sec;
struct rtw_traffic_stats stats;
- struct rtw_regulatory regd;
+ struct rtw_regd regd;
struct rtw_bf_info bf_info;
struct rtw_dm_info dm_info;
diff --git a/drivers/net/wireless/realtek/rtw88/phy.c b/drivers/net/wireless/realtek/rtw88/phy.c
index 569dd3cfde35..bfddfcbe63f5 100644
--- a/drivers/net/wireless/realtek/rtw88/phy.c
+++ b/drivers/net/wireless/realtek/rtw88/phy.c
@@ -9,6 +9,7 @@
#include "fw.h"
#include "phy.h"
#include "debug.h"
+#include "regd.h"
struct phy_cfg_pair {
u32 addr;
@@ -119,6 +120,63 @@ static void rtw_phy_cck_pd_init(struct rtw_dev *rtwdev)
dm_info->cck_fa_avg = CCK_FA_AVG_RESET;
}
+void rtw_phy_set_edcca_th(struct rtw_dev *rtwdev, u8 l2h, u8 h2l)
+{
+ struct rtw_hw_reg_offset *edcca_th = rtwdev->chip->edcca_th;
+
+ rtw_write32_mask(rtwdev,
+ edcca_th[EDCCA_TH_L2H_IDX].hw_reg.addr,
+ edcca_th[EDCCA_TH_L2H_IDX].hw_reg.mask,
+ l2h + edcca_th[EDCCA_TH_L2H_IDX].offset);
+ rtw_write32_mask(rtwdev,
+ edcca_th[EDCCA_TH_H2L_IDX].hw_reg.addr,
+ edcca_th[EDCCA_TH_H2L_IDX].hw_reg.mask,
+ h2l + edcca_th[EDCCA_TH_H2L_IDX].offset);
+}
+EXPORT_SYMBOL(rtw_phy_set_edcca_th);
+
+void rtw_phy_adaptivity_set_mode(struct rtw_dev *rtwdev)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+
+ /* turn off in debugfs for debug usage */
+ if (!rtw_edcca_enabled) {
+ dm_info->edcca_mode = RTW_EDCCA_NORMAL;
+ rtw_dbg(rtwdev, RTW_DBG_PHY, "EDCCA disabled, cannot be set\n");
+ return;
+ }
+
+ switch (rtwdev->regd.dfs_region) {
+ case NL80211_DFS_ETSI:
+ dm_info->edcca_mode = RTW_EDCCA_ADAPTIVITY;
+ dm_info->l2h_th_ini = chip->l2h_th_ini_ad;
+ break;
+ case NL80211_DFS_JP:
+ dm_info->edcca_mode = RTW_EDCCA_ADAPTIVITY;
+ dm_info->l2h_th_ini = chip->l2h_th_ini_cs;
+ break;
+ default:
+ dm_info->edcca_mode = RTW_EDCCA_NORMAL;
+ break;
+ }
+}
+
+static void rtw_phy_adaptivity_init(struct rtw_dev *rtwdev)
+{
+ struct rtw_chip_info *chip = rtwdev->chip;
+
+ rtw_phy_adaptivity_set_mode(rtwdev);
+ if (chip->ops->adaptivity_init)
+ chip->ops->adaptivity_init(rtwdev);
+}
+
+static void rtw_phy_adaptivity(struct rtw_dev *rtwdev)
+{
+ if (rtwdev->chip->ops->adaptivity)
+ rtwdev->chip->ops->adaptivity(rtwdev);
+}
+
static void rtw_phy_cfo_init(struct rtw_dev *rtwdev)
{
struct rtw_chip_info *chip = rtwdev->chip;
@@ -159,6 +217,7 @@ void rtw_phy_init(struct rtw_dev *rtwdev)
rtw_phy_cck_pd_init(rtwdev);
dm_info->iqk.done = false;
+ rtw_phy_adaptivity_init(rtwdev);
rtw_phy_cfo_init(rtwdev);
rtw_phy_tx_path_div_init(rtwdev);
}
@@ -711,6 +770,11 @@ void rtw_phy_dynamic_mechanism(struct rtw_dev *rtwdev)
rtw_phy_cfo_track(rtwdev);
rtw_phy_dpk_track(rtwdev);
rtw_phy_pwr_track(rtwdev);
+
+ if (rtw_fw_feature_check(&rtwdev->fw, FW_FEATURE_ADAPTIVITY))
+ rtw_fw_adaptivity(rtwdev);
+ else
+ rtw_phy_adaptivity(rtwdev);
}
#define FRAC_BITS 3
@@ -1564,17 +1628,70 @@ static void rtw_xref_txpwr_lmt(struct rtw_dev *rtwdev)
rtw_xref_txpwr_lmt_by_bw(rtwdev, regd);
}
+static void
+__cfg_txpwr_lmt_by_alt(struct rtw_hal *hal, u8 regd, u8 regd_alt, u8 bw, u8 rs)
+{
+ u8 ch;
+
+ for (ch = 0; ch < RTW_MAX_CHANNEL_NUM_2G; ch++)
+ hal->tx_pwr_limit_2g[regd][bw][rs][ch] =
+ hal->tx_pwr_limit_2g[regd_alt][bw][rs][ch];
+
+ for (ch = 0; ch < RTW_MAX_CHANNEL_NUM_5G; ch++)
+ hal->tx_pwr_limit_5g[regd][bw][rs][ch] =
+ hal->tx_pwr_limit_5g[regd_alt][bw][rs][ch];
+}
+
+static void
+rtw_cfg_txpwr_lmt_by_alt(struct rtw_dev *rtwdev, u8 regd, u8 regd_alt)
+{
+ u8 bw, rs;
+
+ for (bw = 0; bw < RTW_CHANNEL_WIDTH_MAX; bw++)
+ for (rs = 0; rs < RTW_RATE_SECTION_MAX; rs++)
+ __cfg_txpwr_lmt_by_alt(&rtwdev->hal, regd, regd_alt,
+ bw, rs);
+}
+
void rtw_parse_tbl_txpwr_lmt(struct rtw_dev *rtwdev,
const struct rtw_table *tbl)
{
const struct rtw_txpwr_lmt_cfg_pair *p = tbl->data;
const struct rtw_txpwr_lmt_cfg_pair *end = p + tbl->size;
+ u32 regd_cfg_flag = 0;
+ u8 regd_alt;
+ u8 i;
for (; p < end; p++) {
+ regd_cfg_flag |= BIT(p->regd);
rtw_phy_set_tx_power_limit(rtwdev, p->regd, p->band,
p->bw, p->rs, p->ch, p->txpwr_lmt);
}
+ for (i = 0; i < RTW_REGD_MAX; i++) {
+ if (i == RTW_REGD_WW)
+ continue;
+
+ if (regd_cfg_flag & BIT(i))
+ continue;
+
+ rtw_dbg(rtwdev, RTW_DBG_REGD,
+ "txpwr regd %d does not be configured\n", i);
+
+ if (rtw_regd_has_alt(i, &regd_alt) &&
+ regd_cfg_flag & BIT(regd_alt)) {
+ rtw_dbg(rtwdev, RTW_DBG_REGD,
+ "cfg txpwr regd %d by regd %d as alternative\n",
+ i, regd_alt);
+
+ rtw_cfg_txpwr_lmt_by_alt(rtwdev, i, regd_alt);
+ continue;
+ }
+
+ rtw_dbg(rtwdev, RTW_DBG_REGD, "cfg txpwr regd %d by WW\n", i);
+ rtw_cfg_txpwr_lmt_by_alt(rtwdev, i, RTW_REGD_WW);
+ }
+
rtw_xref_txpwr_lmt(rtwdev);
}
EXPORT_SYMBOL(rtw_parse_tbl_txpwr_lmt);
@@ -2014,7 +2131,7 @@ static void rtw_phy_set_tx_power_index_by_rs(struct rtw_dev *rtwdev,
u8 ch, u8 path, u8 rs)
{
struct rtw_hal *hal = &rtwdev->hal;
- u8 regd = rtwdev->regd.txpwr_regd;
+ u8 regd = rtw_regd_get(rtwdev);
u8 *rates;
u8 size;
u8 rate;
diff --git a/drivers/net/wireless/realtek/rtw88/phy.h b/drivers/net/wireless/realtek/rtw88/phy.h
index 112ed125970a..02d1ec47ffb1 100644
--- a/drivers/net/wireless/realtek/rtw88/phy.h
+++ b/drivers/net/wireless/realtek/rtw88/phy.h
@@ -59,6 +59,8 @@ bool rtw_phy_pwrtrack_need_lck(struct rtw_dev *rtwdev);
bool rtw_phy_pwrtrack_need_iqk(struct rtw_dev *rtwdev);
void rtw_phy_config_swing_table(struct rtw_dev *rtwdev,
struct rtw_swing_table *swing_table);
+void rtw_phy_set_edcca_th(struct rtw_dev *rtwdev, u8 l2h, u8 h2l);
+void rtw_phy_adaptivity_set_mode(struct rtw_dev *rtwdev);
void rtw_phy_parsing_cfo(struct rtw_dev *rtwdev,
struct rtw_rx_pkt_stat *pkt_stat);
void rtw_phy_tx_path_diversity(struct rtw_dev *rtwdev);
diff --git a/drivers/net/wireless/realtek/rtw88/reg.h b/drivers/net/wireless/realtek/rtw88/reg.h
index f5ce75095e90..84ba9ec489c3 100644
--- a/drivers/net/wireless/realtek/rtw88/reg.h
+++ b/drivers/net/wireless/realtek/rtw88/reg.h
@@ -361,10 +361,12 @@
#define REG_AGGR_BREAK_TIME 0x051A
#define REG_SLOT 0x051B
#define REG_TX_PTCL_CTRL 0x0520
+#define BIT_DIS_EDCCA BIT(15)
#define BIT_SIFS_BK_EN BIT(12)
#define REG_TXPAUSE 0x0522
#define BIT_AC_QUEUE GENMASK(7, 0)
#define REG_RD_CTRL 0x0524
+#define BIT_EDCCA_MSK_CNTDOWN_EN BIT(11)
#define BIT_DIS_TXOP_CFE BIT(10)
#define BIT_DIS_LSIG_CFE BIT(9)
#define BIT_DIS_STBC_CFE BIT(8)
@@ -406,6 +408,7 @@
#define BIT_MFBEN BIT(22)
#define BIT_DISCHKPPDLLEN BIT(21)
#define BIT_PKTCTL_DLEN BIT(20)
+#define BIT_DISGCLK BIT(19)
#define BIT_TIM_PARSER_EN BIT(18)
#define BIT_BC_MD_EN BIT(17)
#define BIT_UC_MD_EN BIT(16)
@@ -640,6 +643,9 @@
#define REG_HRCV_MSG 0x1cf
+#define REG_EDCCA_REPORT 0x2d38
+#define BIT_EDCCA_FLAG BIT(24)
+
#define REG_IGN_GNTBT4 0x4160
#define RF_MODE 0x00
diff --git a/drivers/net/wireless/realtek/rtw88/regd.c b/drivers/net/wireless/realtek/rtw88/regd.c
index 69744dd65968..315c2b193e92 100644
--- a/drivers/net/wireless/realtek/rtw88/regd.c
+++ b/drivers/net/wireless/realtek/rtw88/regd.c
@@ -7,288 +7,274 @@
#include "debug.h"
#include "phy.h"
-#define COUNTRY_CHPLAN_ENT(_alpha2, _chplan, _txpwr_regd) \
+#define COUNTRY_REGD_ENT(_alpha2, _regd_2g, _regd_5g) \
{.alpha2 = (_alpha2), \
- .chplan = (_chplan), \
- .txpwr_regd = (_txpwr_regd) \
+ .txpwr_regd_2g = (_regd_2g), \
+ .txpwr_regd_5g = (_regd_5g), \
}
+#define rtw_dbg_regd_dump(_dev, _msg, _args...) \
+do { \
+ struct rtw_dev *__d = (_dev); \
+ const struct rtw_regd *__r = &__d->regd; \
+ rtw_dbg(__d, RTW_DBG_REGD, _msg \
+ "apply alpha2 %c%c, regd {%d, %d}, dfs_region %d\n",\
+ ##_args, \
+ __r->regulatory->alpha2[0], \
+ __r->regulatory->alpha2[1], \
+ __r->regulatory->txpwr_regd_2g, \
+ __r->regulatory->txpwr_regd_5g, \
+ __r->dfs_region); \
+} while (0)
+
/* If country code is not correctly defined in efuse,
* use worldwide country code and txpwr regd.
*/
-static const struct rtw_regulatory rtw_defined_chplan =
- COUNTRY_CHPLAN_ENT("00", RTW_CHPLAN_REALTEK_DEFINE, RTW_REGD_WW);
-
-static const struct rtw_regulatory all_chplan_map[] = {
- COUNTRY_CHPLAN_ENT("AD", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("AE", RTW_CHPLAN_WORLD_ETSI2, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("AF", RTW_CHPLAN_ETSI1_ETSI4, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("AG", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("AI", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("AL", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("AM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("AN", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("AO", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("AQ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("AR", RTW_CHPLAN_FCC2_FCC7, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("AS", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("AT", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("AU", RTW_CHPLAN_WORLD_ACMA1, RTW_REGD_ACMA),
- COUNTRY_CHPLAN_ENT("AW", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("AZ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("BA", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("BB", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("BD", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("BE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("BF", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("BG", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("BH", RTW_CHPLAN_WORLD_ETSI7, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("BI", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("BJ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("BM", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("BN", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("BO", RTW_CHPLAN_WORLD_FCC7, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("BR", RTW_CHPLAN_FCC2_FCC1, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("BS", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("BT", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("BV", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("BW", RTW_CHPLAN_WORLD_ETSI2, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("BY", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("BZ", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("CA", RTW_CHPLAN_IC1_IC2, RTW_REGD_IC),
- COUNTRY_CHPLAN_ENT("CC", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("CD", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("CF", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("CG", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("CH", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("CI", RTW_CHPLAN_ETSI1_ETSI4, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("CK", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("CL", RTW_CHPLAN_WORLD_CHILE1, RTW_REGD_CHILE),
- COUNTRY_CHPLAN_ENT("CM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("CN", RTW_CHPLAN_WORLD_ETSI7, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("CO", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("CR", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("CV", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("CX", RTW_CHPLAN_WORLD_ACMA1, RTW_REGD_ACMA),
- COUNTRY_CHPLAN_ENT("CY", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("CZ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("DE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("DJ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("DK", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("DM", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("DO", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("DZ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("EC", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("EE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("EG", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("EH", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("ER", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("ES", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("ET", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("FI", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("FJ", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("FK", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("FM", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("FO", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("FR", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("GA", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("GB", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("GD", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("GE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("GF", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("GG", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("GH", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("GI", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("GL", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("GM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("GN", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("GP", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("GQ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("GR", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("GS", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("GT", RTW_CHPLAN_FCC2_FCC7, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("GU", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("GW", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("GY", RTW_CHPLAN_FCC1_NCC3, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("HK", RTW_CHPLAN_WORLD_ETSI2, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("HM", RTW_CHPLAN_WORLD_ACMA1, RTW_REGD_ACMA),
- COUNTRY_CHPLAN_ENT("HN", RTW_CHPLAN_WORLD_FCC5, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("HR", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("HT", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("HU", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("ID", RTW_CHPLAN_ETSI1_ETSI12, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("IE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("IL", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("IM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("IN", RTW_CHPLAN_WORLD_ETSI7, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("IO", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("IQ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("IR", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("IS", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("IT", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("JE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("JM", RTW_CHPLAN_WORLD_FCC5, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("JO", RTW_CHPLAN_WORLD_ETSI8, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("JP", RTW_CHPLAN_MKK1_MKK1, RTW_REGD_MKK),
- COUNTRY_CHPLAN_ENT("KE", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("KG", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("KH", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("KI", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("KM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("KN", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("KR", RTW_CHPLAN_KCC1_KCC3, RTW_REGD_KCC),
- COUNTRY_CHPLAN_ENT("KW", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("KY", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("KZ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("LA", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("LB", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("LC", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("LI", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("LK", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("LR", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("LS", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("LT", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("LU", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("LV", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("LY", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("MA", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("MC", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("MD", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("ME", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("MF", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("MG", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("MH", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("MK", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("ML", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("MM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("MN", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("MO", RTW_CHPLAN_WORLD_ETSI2, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("MP", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("MQ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("MR", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("MS", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("MT", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("MU", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("MV", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("MW", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("MX", RTW_CHPLAN_FCC2_FCC7, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("MY", RTW_CHPLAN_WORLD_ETSI15, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("MZ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("NA", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("NC", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("NE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("NF", RTW_CHPLAN_WORLD_ACMA1, RTW_REGD_ACMA),
- COUNTRY_CHPLAN_ENT("NG", RTW_CHPLAN_WORLD_ETSI20, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("NI", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("NL", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("NO", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("NP", RTW_CHPLAN_WORLD_ETSI7, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("NR", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("NU", RTW_CHPLAN_WORLD_ACMA1, RTW_REGD_ACMA),
- COUNTRY_CHPLAN_ENT("NZ", RTW_CHPLAN_WORLD_ACMA1, RTW_REGD_ACMA),
- COUNTRY_CHPLAN_ENT("OM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("PA", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("PE", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("PF", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("PG", RTW_CHPLAN_WORLD_ETSI2, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("PH", RTW_CHPLAN_WORLD_ETSI2, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("PK", RTW_CHPLAN_WORLD_ETSI10, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("PL", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("PM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("PR", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("PT", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("PW", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("PY", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("QA", RTW_CHPLAN_WORLD_ETSI2, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("RE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("RO", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("RS", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("RU", RTW_CHPLAN_WORLD_ETSI14, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("RW", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("SA", RTW_CHPLAN_WORLD_ETSI2, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("SB", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("SC", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("SE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("SG", RTW_CHPLAN_WORLD_ETSI2, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("SH", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("SI", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("SJ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("SK", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("SL", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("SM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("SN", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("SO", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("SR", RTW_CHPLAN_FCC2_FCC17, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("ST", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("SV", RTW_CHPLAN_WORLD_FCC3, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("SX", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("SZ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("TC", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("TD", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("TF", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("TG", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("TH", RTW_CHPLAN_WORLD_ETSI2, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("TJ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("TK", RTW_CHPLAN_WORLD_ACMA1, RTW_REGD_ACMA),
- COUNTRY_CHPLAN_ENT("TM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("TN", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("TO", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("TR", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("TT", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("TV", RTW_CHPLAN_ETSI1_NULL, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("TW", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("TZ", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("UA", RTW_CHPLAN_WORLD_ETSI3, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("UG", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("US", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("UY", RTW_CHPLAN_WORLD_FCC3, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("UZ", RTW_CHPLAN_WORLD_ETSI6, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("VA", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("VC", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("VE", RTW_CHPLAN_WORLD_FCC3, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("VG", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("VI", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("VN", RTW_CHPLAN_WORLD_ETSI2, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("VU", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("WF", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("WS", RTW_CHPLAN_FCC2_FCC11, RTW_REGD_FCC),
- COUNTRY_CHPLAN_ENT("YE", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("YT", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("ZA", RTW_CHPLAN_WORLD_ETSI2, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("ZM", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
- COUNTRY_CHPLAN_ENT("ZW", RTW_CHPLAN_WORLD_ETSI1, RTW_REGD_ETSI),
+static const struct rtw_regulatory rtw_reg_ww =
+ COUNTRY_REGD_ENT("00", RTW_REGD_WW, RTW_REGD_WW);
+
+static const struct rtw_regulatory rtw_reg_map[] = {
+ COUNTRY_REGD_ENT("AD", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("AE", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("AF", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("AG", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("AI", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("AL", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("AM", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("AN", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("AO", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("AQ", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("AR", RTW_REGD_MEXICO, RTW_REGD_MEXICO),
+ COUNTRY_REGD_ENT("AS", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("AT", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("AU", RTW_REGD_ACMA, RTW_REGD_ACMA),
+ COUNTRY_REGD_ENT("AW", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("AZ", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("BA", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("BB", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("BD", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("BE", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("BF", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("BG", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("BH", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("BI", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("BJ", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("BM", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("BN", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("BO", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("BR", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("BS", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("BT", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("BV", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("BW", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("BY", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("BZ", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("CA", RTW_REGD_IC, RTW_REGD_IC),
+ COUNTRY_REGD_ENT("CC", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("CD", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("CF", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("CG", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("CH", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("CI", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("CK", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("CL", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("CM", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("CN", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("CO", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("CR", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("CV", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("CX", RTW_REGD_ACMA, RTW_REGD_ACMA),
+ COUNTRY_REGD_ENT("CY", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("CZ", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("DE", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("DJ", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("DK", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("DM", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("DO", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("DZ", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("EC", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("EE", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("EG", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("EH", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("ER", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("ES", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("ET", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("FI", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("FJ", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("FK", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("FM", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("FO", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("FR", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("GA", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("GB", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("GD", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("GE", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("GF", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("GG", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("GH", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("GI", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("GL", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("GM", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("GN", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("GP", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("GQ", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("GR", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("GS", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("GT", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("GU", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("GW", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("GY", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("HK", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("HM", RTW_REGD_ACMA, RTW_REGD_ACMA),
+ COUNTRY_REGD_ENT("HN", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("HR", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("HT", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("HU", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("ID", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("IE", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("IL", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("IM", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("IN", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("IO", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("IQ", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("IR", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("IS", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("IT", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("JE", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("JM", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("JO", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("JP", RTW_REGD_MKK, RTW_REGD_MKK),
+ COUNTRY_REGD_ENT("KE", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("KG", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("KH", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("KI", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("KM", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("KN", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("KR", RTW_REGD_KCC, RTW_REGD_KCC),
+ COUNTRY_REGD_ENT("KW", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("KY", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("KZ", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("LA", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("LB", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("LC", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("LI", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("LK", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("LR", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("LS", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("LT", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("LU", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("LV", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("LY", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("MA", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("MC", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("MD", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("ME", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("MF", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("MG", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("MH", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("MK", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("ML", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("MM", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("MN", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("MO", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("MP", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("MQ", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("MR", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("MS", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("MT", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("MU", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("MV", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("MW", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("MX", RTW_REGD_MEXICO, RTW_REGD_MEXICO),
+ COUNTRY_REGD_ENT("MY", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("MZ", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("NA", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("NC", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("NE", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("NF", RTW_REGD_ACMA, RTW_REGD_ACMA),
+ COUNTRY_REGD_ENT("NG", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("NI", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("NL", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("NO", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("NP", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("NR", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("NU", RTW_REGD_ACMA, RTW_REGD_ACMA),
+ COUNTRY_REGD_ENT("NZ", RTW_REGD_ACMA, RTW_REGD_ACMA),
+ COUNTRY_REGD_ENT("OM", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("PA", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("PE", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("PF", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("PG", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("PH", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("PK", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("PL", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("PM", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("PR", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("PS", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("PT", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("PW", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("PY", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("QA", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("RE", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("RO", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("RS", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("RU", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("RW", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("SA", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("SB", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("SC", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("SE", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("SG", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("SH", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("SI", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("SJ", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("SK", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("SL", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("SM", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("SN", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("SO", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("SR", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("ST", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("SV", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("SX", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("SZ", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("TC", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("TD", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("TF", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("TG", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("TH", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("TJ", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("TK", RTW_REGD_ACMA, RTW_REGD_ACMA),
+ COUNTRY_REGD_ENT("TM", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("TN", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("TO", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("TR", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("TT", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("TV", RTW_REGD_ETSI, RTW_REGD_WW),
+ COUNTRY_REGD_ENT("TW", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("TZ", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("UA", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("UG", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("US", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("UY", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("UZ", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("VA", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("VC", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("VE", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("VG", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("VI", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("VN", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("VU", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("WF", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("WS", RTW_REGD_FCC, RTW_REGD_FCC),
+ COUNTRY_REGD_ENT("XK", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("YE", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("YT", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("ZA", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("ZM", RTW_REGD_ETSI, RTW_REGD_ETSI),
+ COUNTRY_REGD_ENT("ZW", RTW_REGD_ETSI, RTW_REGD_ETSI),
};
-static void rtw_regd_apply_beaconing_flags(struct wiphy *wiphy,
- enum nl80211_reg_initiator initiator)
-{
- enum nl80211_band band;
- struct ieee80211_supported_band *sband;
- const struct ieee80211_reg_rule *reg_rule;
- struct ieee80211_channel *ch;
- unsigned int i;
-
- for (band = 0; band < NUM_NL80211_BANDS; band++) {
- if (!wiphy->bands[band])
- continue;
-
- sband = wiphy->bands[band];
- for (i = 0; i < sband->n_channels; i++) {
- ch = &sband->channels[i];
-
- reg_rule = freq_reg_info(wiphy,
- MHZ_TO_KHZ(ch->center_freq));
- if (IS_ERR(reg_rule))
- continue;
-
- ch->flags &= ~IEEE80211_CHAN_DISABLED;
-
- if (!(reg_rule->flags & NL80211_RRF_NO_IR))
- ch->flags &= ~IEEE80211_CHAN_NO_IR;
- }
- }
-}
-
static void rtw_regd_apply_hw_cap_flags(struct wiphy *wiphy)
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
@@ -321,78 +307,223 @@ out_5g:
}
}
-static void rtw_regd_apply_world_flags(struct wiphy *wiphy,
- enum nl80211_reg_initiator initiator)
+static bool rtw_reg_is_ww(const struct rtw_regulatory *reg)
{
- rtw_regd_apply_beaconing_flags(wiphy, initiator);
+ return reg == &rtw_reg_ww;
}
-static struct rtw_regulatory rtw_regd_find_reg_by_name(char *alpha2)
+static bool rtw_reg_match(const struct rtw_regulatory *reg, const char *alpha2)
+{
+ return memcmp(reg->alpha2, alpha2, 2) == 0;
+}
+
+static const struct rtw_regulatory *rtw_reg_find_by_name(const char *alpha2)
{
unsigned int i;
- for (i = 0; i < ARRAY_SIZE(all_chplan_map); i++) {
- if (!memcmp(all_chplan_map[i].alpha2, alpha2, 2))
- return all_chplan_map[i];
+ for (i = 0; i < ARRAY_SIZE(rtw_reg_map); i++) {
+ if (rtw_reg_match(&rtw_reg_map[i], alpha2))
+ return &rtw_reg_map[i];
}
- return rtw_defined_chplan;
+ return &rtw_reg_ww;
}
-static int rtw_regd_notifier_apply(struct rtw_dev *rtwdev,
- struct wiphy *wiphy,
- struct regulatory_request *request)
+static
+void rtw_regd_notifier(struct wiphy *wiphy, struct regulatory_request *request);
+
+/* call this before ieee80211_register_hw() */
+int rtw_regd_init(struct rtw_dev *rtwdev)
{
- if (request->initiator == NL80211_REGDOM_SET_BY_USER)
- return 0;
- rtwdev->regd = rtw_regd_find_reg_by_name(request->alpha2);
- rtw_regd_apply_world_flags(wiphy, request->initiator);
+ struct wiphy *wiphy = rtwdev->hw->wiphy;
+ const struct rtw_regulatory *chip_reg;
- return 0;
-}
+ if (!wiphy)
+ return -EINVAL;
-static int
-rtw_regd_init_wiphy(struct rtw_regulatory *reg, struct wiphy *wiphy,
- void (*reg_notifier)(struct wiphy *wiphy,
- struct regulatory_request *request))
-{
- wiphy->reg_notifier = reg_notifier;
+ wiphy->reg_notifier = rtw_regd_notifier;
- wiphy->regulatory_flags &= ~REGULATORY_CUSTOM_REG;
- wiphy->regulatory_flags &= ~REGULATORY_STRICT_REG;
- wiphy->regulatory_flags &= ~REGULATORY_DISABLE_BEACON_HINTS;
+ chip_reg = rtw_reg_find_by_name(rtwdev->efuse.country_code);
+ if (!rtw_reg_is_ww(chip_reg)) {
+ rtwdev->regd.state = RTW_REGD_STATE_PROGRAMMED;
- rtw_regd_apply_hw_cap_flags(wiphy);
+ /* Set REGULATORY_STRICT_REG before ieee80211_register_hw(),
+ * stack will wait for regulatory_hint() and consider it
+ * as the superset for our regulatory rule.
+ */
+ wiphy->regulatory_flags |= REGULATORY_STRICT_REG;
+ wiphy->regulatory_flags |= REGULATORY_COUNTRY_IE_IGNORE;
+ } else {
+ rtwdev->regd.state = RTW_REGD_STATE_WORLDWIDE;
+ }
+ rtwdev->regd.regulatory = &rtw_reg_ww;
+ rtwdev->regd.dfs_region = NL80211_DFS_UNSET;
+ rtw_dbg_regd_dump(rtwdev, "regd init state %d: ", rtwdev->regd.state);
+
+ rtw_regd_apply_hw_cap_flags(wiphy);
return 0;
}
-int rtw_regd_init(struct rtw_dev *rtwdev,
- void (*reg_notifier)(struct wiphy *wiphy,
- struct regulatory_request *request))
+/* call this after ieee80211_register_hw() */
+int rtw_regd_hint(struct rtw_dev *rtwdev)
{
struct wiphy *wiphy = rtwdev->hw->wiphy;
+ int ret;
if (!wiphy)
return -EINVAL;
- rtwdev->regd = rtw_regd_find_reg_by_name(rtwdev->efuse.country_code);
- rtw_regd_init_wiphy(&rtwdev->regd, wiphy, reg_notifier);
+ if (rtwdev->regd.state == RTW_REGD_STATE_PROGRAMMED) {
+ rtw_dbg(rtwdev, RTW_DBG_REGD,
+ "country domain %c%c is PGed on efuse",
+ rtwdev->efuse.country_code[0],
+ rtwdev->efuse.country_code[1]);
+
+ ret = regulatory_hint(wiphy, rtwdev->efuse.country_code);
+ if (ret) {
+ rtw_warn(rtwdev,
+ "failed to hint regulatory: %d\n", ret);
+ return ret;
+ }
+ }
return 0;
}
+static bool rtw_regd_mgmt_worldwide(struct rtw_dev *rtwdev,
+ struct rtw_regd *next_regd,
+ struct regulatory_request *request)
+{
+ struct wiphy *wiphy = rtwdev->hw->wiphy;
+
+ next_regd->state = RTW_REGD_STATE_WORLDWIDE;
+
+ if (request->initiator == NL80211_REGDOM_SET_BY_USER &&
+ !rtw_reg_is_ww(next_regd->regulatory)) {
+ next_regd->state = RTW_REGD_STATE_SETTING;
+ wiphy->regulatory_flags |= REGULATORY_COUNTRY_IE_IGNORE;
+ }
+
+ return true;
+}
+
+static bool rtw_regd_mgmt_programmed(struct rtw_dev *rtwdev,
+ struct rtw_regd *next_regd,
+ struct regulatory_request *request)
+{
+ if (request->initiator == NL80211_REGDOM_SET_BY_DRIVER &&
+ rtw_reg_match(next_regd->regulatory, rtwdev->efuse.country_code)) {
+ next_regd->state = RTW_REGD_STATE_PROGRAMMED;
+ return true;
+ }
+
+ return false;
+}
+
+static bool rtw_regd_mgmt_setting(struct rtw_dev *rtwdev,
+ struct rtw_regd *next_regd,
+ struct regulatory_request *request)
+{
+ struct wiphy *wiphy = rtwdev->hw->wiphy;
+
+ if (request->initiator != NL80211_REGDOM_SET_BY_USER)
+ return false;
+
+ next_regd->state = RTW_REGD_STATE_SETTING;
+
+ if (rtw_reg_is_ww(next_regd->regulatory)) {
+ next_regd->state = RTW_REGD_STATE_WORLDWIDE;
+ wiphy->regulatory_flags &= ~REGULATORY_COUNTRY_IE_IGNORE;
+ }
+
+ return true;
+}
+
+static bool (*const rtw_regd_handler[RTW_REGD_STATE_NR])
+ (struct rtw_dev *, struct rtw_regd *, struct regulatory_request *) = {
+ [RTW_REGD_STATE_WORLDWIDE] = rtw_regd_mgmt_worldwide,
+ [RTW_REGD_STATE_PROGRAMMED] = rtw_regd_mgmt_programmed,
+ [RTW_REGD_STATE_SETTING] = rtw_regd_mgmt_setting,
+};
+
+static bool rtw_regd_state_hdl(struct rtw_dev *rtwdev,
+ struct rtw_regd *next_regd,
+ struct regulatory_request *request)
+{
+ next_regd->regulatory = rtw_reg_find_by_name(request->alpha2);
+ next_regd->dfs_region = request->dfs_region;
+ return rtw_regd_handler[rtwdev->regd.state](rtwdev, next_regd, request);
+}
+
+static
void rtw_regd_notifier(struct wiphy *wiphy, struct regulatory_request *request)
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct rtw_dev *rtwdev = hw->priv;
struct rtw_hal *hal = &rtwdev->hal;
+ struct rtw_regd next_regd = {0};
+ bool hdl;
+
+ hdl = rtw_regd_state_hdl(rtwdev, &next_regd, request);
+ if (!hdl) {
+ rtw_dbg(rtwdev, RTW_DBG_REGD,
+ "regd state %d: ignore request %c%c of initiator %d\n",
+ rtwdev->regd.state,
+ request->alpha2[0],
+ request->alpha2[1],
+ request->initiator);
+ return;
+ }
+
+ rtw_dbg(rtwdev, RTW_DBG_REGD, "regd state: %d -> %d\n",
+ rtwdev->regd.state, next_regd.state);
- rtw_regd_notifier_apply(rtwdev, wiphy, request);
- rtw_dbg(rtwdev, RTW_DBG_REGD,
- "get alpha2 %c%c from initiator %d, mapping to chplan 0x%x, txregd %d\n",
- request->alpha2[0], request->alpha2[1], request->initiator,
- rtwdev->regd.chplan, rtwdev->regd.txpwr_regd);
+ rtwdev->regd = next_regd;
+ rtw_dbg_regd_dump(rtwdev, "get alpha2 %c%c from initiator %d: ",
+ request->alpha2[0],
+ request->alpha2[1],
+ request->initiator);
+ rtw_phy_adaptivity_set_mode(rtwdev);
rtw_phy_set_tx_power_level(rtwdev, hal->current_channel);
}
+
+u8 rtw_regd_get(struct rtw_dev *rtwdev)
+{
+ struct rtw_hal *hal = &rtwdev->hal;
+ u8 band = hal->current_band_type;
+
+ return band == RTW_BAND_2G ?
+ rtwdev->regd.regulatory->txpwr_regd_2g :
+ rtwdev->regd.regulatory->txpwr_regd_5g;
+}
+EXPORT_SYMBOL(rtw_regd_get);
+
+struct rtw_regd_alternative_t {
+ bool set;
+ u8 alt;
+};
+
+#define DECL_REGD_ALT(_regd, _regd_alt) \
+ [(_regd)] = {.set = true, .alt = (_regd_alt)}
+
+static const struct rtw_regd_alternative_t
+rtw_regd_alt[RTW_REGD_MAX] = {
+ DECL_REGD_ALT(RTW_REGD_IC, RTW_REGD_FCC),
+ DECL_REGD_ALT(RTW_REGD_KCC, RTW_REGD_ETSI),
+ DECL_REGD_ALT(RTW_REGD_ACMA, RTW_REGD_ETSI),
+ DECL_REGD_ALT(RTW_REGD_CHILE, RTW_REGD_FCC),
+ DECL_REGD_ALT(RTW_REGD_UKRAINE, RTW_REGD_ETSI),
+ DECL_REGD_ALT(RTW_REGD_MEXICO, RTW_REGD_FCC),
+ DECL_REGD_ALT(RTW_REGD_CN, RTW_REGD_ETSI),
+};
+
+bool rtw_regd_has_alt(u8 regd, u8 *regd_alt)
+{
+ if (!rtw_regd_alt[regd].set)
+ return false;
+
+ *regd_alt = rtw_regd_alt[regd].alt;
+ return true;
+}
diff --git a/drivers/net/wireless/realtek/rtw88/regd.h b/drivers/net/wireless/realtek/rtw88/regd.h
index 5d4578331788..34cb13d0cd9e 100644
--- a/drivers/net/wireless/realtek/rtw88/regd.h
+++ b/drivers/net/wireless/realtek/rtw88/regd.h
@@ -64,8 +64,8 @@ enum country_code_type {
COUNTRY_CODE_MAX
};
-int rtw_regd_init(struct rtw_dev *rtwdev,
- void (*reg_notifier)(struct wiphy *wiphy,
- struct regulatory_request *request));
-void rtw_regd_notifier(struct wiphy *wiphy, struct regulatory_request *request);
+int rtw_regd_init(struct rtw_dev *rtwdev);
+int rtw_regd_hint(struct rtw_dev *rtwdev);
+u8 rtw_regd_get(struct rtw_dev *rtwdev);
+bool rtw_regd_has_alt(u8 regd, u8 *regd_alt);
#endif
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.c b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
index 785b8181513f..80a6f4da6acd 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8821c.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.c
@@ -14,6 +14,7 @@
#include "reg.h"
#include "debug.h"
#include "bf.h"
+#include "regd.h"
static const s8 lna_gain_table_0[8] = {22, 8, -6, -22, -31, -40, -46, -52};
static const s8 lna_gain_table_1[16] = {10, 6, 2, -2, -6, -10, -14, -17,
@@ -60,6 +61,9 @@ static int rtw8821c_read_efuse(struct rtw_dev *rtwdev, u8 *log_map)
for (i = 0; i < 4; i++)
efuse->txpwr_idx_table[i] = map->txpwr_idx_table[i];
+ if (rtwdev->efuse.rfe_option == 2 || rtwdev->efuse.rfe_option == 4)
+ efuse->txpwr_idx_table[0].pwr_idx_2g = map->txpwr_idx_table[1].pwr_idx_2g;
+
switch (rtw_hci_type(rtwdev)) {
case RTW_HCI_TYPE_PCIE:
rtw8821ce_efuse_parsing(efuse, map);
@@ -304,7 +308,8 @@ static void rtw8821c_set_channel_rf(struct rtw_dev *rtwdev, u8 channel, u8 bw)
if (channel <= 14) {
if (rtwdev->efuse.rfe_option == 0)
rtw8821c_switch_rf_set(rtwdev, SWITCH_TO_WLG);
- else if (rtwdev->efuse.rfe_option == 2)
+ else if (rtwdev->efuse.rfe_option == 2 ||
+ rtwdev->efuse.rfe_option == 4)
rtw8821c_switch_rf_set(rtwdev, SWITCH_TO_BTG);
rtw_write_rf(rtwdev, RF_PATH_A, RF_LUTDBG, BIT(6), 0x1);
rtw_write_rf(rtwdev, RF_PATH_A, 0x64, 0xf, 0xf);
@@ -773,6 +778,15 @@ static void rtw8821c_coex_cfg_ant_switch(struct rtw_dev *rtwdev, u8 ctrl_type,
if (switch_status == coex_dm->cur_switch_status)
return;
+ if (coex_rfe->wlg_at_btg) {
+ ctrl_type = COEX_SWITCH_CTRL_BY_BBSW;
+
+ if (coex_rfe->ant_switch_polarity)
+ pos_type = COEX_SWITCH_TO_WLA;
+ else
+ pos_type = COEX_SWITCH_TO_WLG_BT;
+ }
+
coex_dm->cur_switch_status = switch_status;
if (coex_rfe->ant_switch_diversity &&
@@ -993,7 +1007,7 @@ static void rtw8821c_pwrtrack_set(struct rtw_dev *rtwdev)
s8 pwr_idx_offset_lower;
u8 channel = rtwdev->hal.current_channel;
u8 band_width = rtwdev->hal.current_band_width;
- u8 regd = rtwdev->regd.txpwr_regd;
+ u8 regd = rtw_regd_get(rtwdev);
u8 tx_rate = dm_info->tx_rate;
u8 max_pwr_idx = rtwdev->chip->max_power_index;
@@ -1498,6 +1512,7 @@ static const struct rtw_intf_phy_para_table phy_para_table_8821c = {
static const struct rtw_rfe_def rtw8821c_rfe_defs[] = {
[0] = RTW_DEF_RFE(8821c, 0, 0),
[2] = RTW_DEF_RFE_EXT(8821c, 0, 0, 2),
+ [4] = RTW_DEF_RFE_EXT(8821c, 0, 0, 2),
};
static struct rtw_hw_reg rtw8821c_dig[] = {
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.c b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
index f1789155e901..c409c8c29ec8 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822b.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.c
@@ -15,6 +15,7 @@
#include "reg.h"
#include "debug.h"
#include "bf.h"
+#include "regd.h"
static void rtw8822b_config_trx_mode(struct rtw_dev *rtwdev, u8 tx_path,
u8 rx_path, bool is_tx2_path);
@@ -1436,7 +1437,7 @@ static void rtw8822b_pwrtrack_set(struct rtw_dev *rtwdev, u8 path)
u8 pwr_idx_offset, tx_pwr_idx;
u8 channel = rtwdev->hal.current_channel;
u8 band_width = rtwdev->hal.current_band_width;
- u8 regd = rtwdev->regd.txpwr_regd;
+ u8 regd = rtw_regd_get(rtwdev);
u8 tx_rate = dm_info->tx_rate;
u8 max_pwr_idx = rtwdev->chip->max_power_index;
@@ -1552,6 +1553,39 @@ static void rtw8822b_bf_config_bfee(struct rtw_dev *rtwdev, struct rtw_vif *vif,
rtw_warn(rtwdev, "wrong bfee role\n");
}
+static void rtw8822b_adaptivity_init(struct rtw_dev *rtwdev)
+{
+ rtw_phy_set_edcca_th(rtwdev, RTW8822B_EDCCA_MAX, RTW8822B_EDCCA_MAX);
+
+ /* mac edcca state setting */
+ rtw_write32_clr(rtwdev, REG_TX_PTCL_CTRL, BIT_DIS_EDCCA);
+ rtw_write32_set(rtwdev, REG_RD_CTRL, BIT_EDCCA_MSK_CNTDOWN_EN);
+ rtw_write32_mask(rtwdev, REG_EDCCA_SOURCE, BIT_SOURCE_OPTION,
+ RTW8822B_EDCCA_SRC_DEF);
+ rtw_write32_mask(rtwdev, REG_EDCCA_POW_MA, BIT_MA_LEVEL, 0);
+
+ /* edcca decision opt */
+ rtw_write32_set(rtwdev, REG_EDCCA_DECISION, BIT_EDCCA_OPTION);
+}
+
+static void rtw8822b_adaptivity(struct rtw_dev *rtwdev)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ s8 l2h, h2l;
+ u8 igi;
+
+ igi = dm_info->igi_history[0];
+ if (dm_info->edcca_mode == RTW_EDCCA_NORMAL) {
+ l2h = max_t(s8, igi + EDCCA_IGI_L2H_DIFF, EDCCA_TH_L2H_LB);
+ h2l = l2h - EDCCA_L2H_H2L_DIFF_NORMAL;
+ } else {
+ l2h = min_t(s8, igi, dm_info->l2h_th_ini);
+ h2l = l2h - EDCCA_L2H_H2L_DIFF;
+ }
+
+ rtw_phy_set_edcca_th(rtwdev, l2h, h2l);
+}
+
static const struct rtw_pwr_seq_cmd trans_carddis_to_cardemu_8822b[] = {
{0x0086,
RTW_PWR_CUT_ALL_MSK,
@@ -2125,6 +2159,8 @@ static struct rtw_chip_ops rtw8822b_ops = {
.config_bfee = rtw8822b_bf_config_bfee,
.set_gid_table = rtw_bf_set_gid_table,
.cfg_csi_rate = rtw_bf_cfg_csi_rate,
+ .adaptivity_init = rtw8822b_adaptivity_init,
+ .adaptivity = rtw8822b_adaptivity,
.coex_set_init = rtw8822b_coex_cfg_init,
.coex_set_ant_switch = rtw8822b_coex_cfg_ant_switch,
@@ -2454,6 +2490,11 @@ static const struct rtw_reg_domain coex_info_hw_regs_8822b[] = {
{0xc50, MASKBYTE0, RTW_REG_DOMAIN_MAC8},
};
+static struct rtw_hw_reg_offset rtw8822b_edcca_th[] = {
+ [EDCCA_TH_L2H_IDX] = {{.addr = 0x8a4, .mask = MASKBYTE0}, .offset = 0},
+ [EDCCA_TH_H2L_IDX] = {{.addr = 0x8a4, .mask = MASKBYTE1}, .offset = 0},
+};
+
struct rtw_chip_info rtw8822b_hw_spec = {
.ops = &rtw8822b_ops,
.id = RTW_CHIP_TYPE_8822B,
@@ -2502,6 +2543,9 @@ struct rtw_chip_info rtw8822b_hw_spec = {
.bfer_su_max_num = 2,
.bfer_mu_max_num = 1,
.rx_ldpc = true,
+ .edcca_th = rtw8822b_edcca_th,
+ .l2h_th_ini_cs = 10 + EDCCA_IGI_BASE,
+ .l2h_th_ini_ad = -14 + EDCCA_IGI_BASE,
.coex_para_ver = 0x20070206,
.bt_desired_ver = 0x6,
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.h b/drivers/net/wireless/realtek/rtw88/rtw8822b.h
index 6211f4b547b9..3fff8b881854 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822b.h
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.h
@@ -140,6 +140,8 @@ _rtw_write32s_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 data)
#define GET_PHY_STAT_P1_RXSNR_B(phy_stat) \
le32_get_bits(*((__le32 *)(phy_stat) + 0x06), GENMASK(15, 8))
+#define RTW8822B_EDCCA_MAX 0x7f
+#define RTW8822B_EDCCA_SRC_DEF 1
#define REG_HTSTFWT 0x800
#define REG_RXPSEL 0x808
#define BIT_RX_PSEL_RST (BIT(28) | BIT(29))
@@ -152,11 +154,17 @@ _rtw_write32s_mask(struct rtw_dev *rtwdev, u32 addr, u32 mask, u32 data)
#define REG_L1PKWT 0x840
#define REG_MRC 0x850
#define REG_CLKTRK 0x860
+#define REG_EDCCA_POW_MA 0x8a0
+#define BIT_MA_LEVEL GENMASK(1, 0)
#define REG_ADCCLK 0x8ac
#define REG_ADC160 0x8c4
#define REG_ADC40 0x8c8
+#define REG_EDCCA_DECISION 0x8dc
+#define BIT_EDCCA_OPTION BIT(5)
#define REG_CDDTXP 0x93c
#define REG_TXPSEL1 0x940
+#define REG_EDCCA_SOURCE 0x944
+#define BIT_SOURCE_OPTION GENMASK(29, 28)
#define REG_ACBB0 0x948
#define REG_ACBBRXFIR 0x94c
#define REG_ACGG2TBL 0x958
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
index f3ad079967a6..46b881e8e4fe 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c
@@ -4497,6 +4497,39 @@ static void rtw8822c_pwr_track(struct rtw_dev *rtwdev)
dm_info->pwr_trk_triggered = false;
}
+static void rtw8822c_adaptivity_init(struct rtw_dev *rtwdev)
+{
+ rtw_phy_set_edcca_th(rtwdev, RTW8822C_EDCCA_MAX, RTW8822C_EDCCA_MAX);
+
+ /* mac edcca state setting */
+ rtw_write32_clr(rtwdev, REG_TX_PTCL_CTRL, BIT_DIS_EDCCA);
+ rtw_write32_set(rtwdev, REG_RD_CTRL, BIT_EDCCA_MSK_CNTDOWN_EN);
+
+ /* edcca decistion opt */
+ rtw_write32_clr(rtwdev, REG_EDCCA_DECISION, BIT_EDCCA_OPTION);
+}
+
+static void rtw8822c_adaptivity(struct rtw_dev *rtwdev)
+{
+ struct rtw_dm_info *dm_info = &rtwdev->dm_info;
+ s8 l2h, h2l;
+ u8 igi;
+
+ igi = dm_info->igi_history[0];
+ if (dm_info->edcca_mode == RTW_EDCCA_NORMAL) {
+ l2h = max_t(s8, igi + EDCCA_IGI_L2H_DIFF, EDCCA_TH_L2H_LB);
+ h2l = l2h - EDCCA_L2H_H2L_DIFF_NORMAL;
+ } else {
+ if (igi < dm_info->l2h_th_ini - EDCCA_ADC_BACKOFF)
+ l2h = igi + EDCCA_ADC_BACKOFF;
+ else
+ l2h = dm_info->l2h_th_ini;
+ h2l = l2h - EDCCA_L2H_H2L_DIFF;
+ }
+
+ rtw_phy_set_edcca_th(rtwdev, l2h, h2l);
+}
+
static const struct rtw_pwr_seq_cmd trans_carddis_to_cardemu_8822c[] = {
{0x0086,
RTW_PWR_CUT_ALL_MSK,
@@ -4912,6 +4945,8 @@ static struct rtw_chip_ops rtw8822c_ops = {
.config_bfee = rtw8822c_bf_config_bfee,
.set_gid_table = rtw_bf_set_gid_table,
.cfg_csi_rate = rtw_bf_cfg_csi_rate,
+ .adaptivity_init = rtw8822c_adaptivity_init,
+ .adaptivity = rtw8822c_adaptivity,
.cfo_init = rtw8822c_cfo_init,
.cfo_track = rtw8822c_cfo_track,
.config_tx_path = rtw8822c_config_tx_path,
@@ -5197,6 +5232,15 @@ static const struct rtw_pwr_track_tbl rtw8822c_rtw_pwr_track_tbl = {
.pwrtrk_2g_ccka_p = rtw8822c_pwrtrk_2g_cck_a_p,
};
+static struct rtw_hw_reg_offset rtw8822c_edcca_th[] = {
+ [EDCCA_TH_L2H_IDX] = {
+ {.addr = 0x84c, .mask = MASKBYTE2}, .offset = 0x80
+ },
+ [EDCCA_TH_H2L_IDX] = {
+ {.addr = 0x84c, .mask = MASKBYTE3}, .offset = 0x80
+ },
+};
+
#ifdef CONFIG_PM
static const struct wiphy_wowlan_support rtw_wowlan_stub_8822c = {
.flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_GTK_REKEY_FAILURE |
@@ -5289,6 +5333,9 @@ struct rtw_chip_info rtw8822c_hw_spec = {
.bfer_mu_max_num = 1,
.rx_ldpc = true,
.tx_stbc = true,
+ .edcca_th = rtw8822c_edcca_th,
+ .l2h_th_ini_cs = 60,
+ .l2h_th_ini_ad = 45,
#ifdef CONFIG_PM
.wow_fw_name = "rtw88/rtw8822c_wow_fw.bin",
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.h b/drivers/net/wireless/realtek/rtw88/rtw8822c.h
index 364afc6d851b..3df627419d81 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8822c.h
+++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.h
@@ -162,6 +162,7 @@ const struct rtw_table name ## _tbl = { \
#define GET_PHY_STAT_P1_RXSNR_B(phy_stat) \
le32_get_bits(*((__le32 *)(phy_stat) + 0x06), GENMASK(15, 8))
+#define RTW8822C_EDCCA_MAX 0x7f
#define REG_ANAPARLDO_POW_MAC 0x0029
#define BIT_LDOE25_PON BIT(0)
#define XCAP_MASK GENMASK(6, 0)
@@ -174,6 +175,8 @@ const struct rtw_table name ## _tbl = { \
#define REG_ANTMAP0 0x820
#define BIT_ANT_PATH GENMASK(1, 0)
#define REG_ANTMAP 0x824
+#define REG_EDCCA_DECISION 0x844
+#define BIT_EDCCA_OPTION GENMASK(30, 29)
#define REG_DYMPRITH 0x86c
#define REG_DYMENTH0 0x870
#define REG_DYMENTH 0x874
diff --git a/drivers/net/wireless/realtek/rtw89/Kconfig b/drivers/net/wireless/realtek/rtw89/Kconfig
new file mode 100644
index 000000000000..37e5def24d9f
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/Kconfig
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+menuconfig RTW89
+ tristate "Realtek 802.11ax wireless chips support"
+ depends on MAC80211
+ help
+ This module adds support for mac80211-based wireless drivers that
+ enables Realtek IEEE 802.11ax wireless chipsets.
+
+ If you choose to build a module, it'll be called rtw89.
+
+if RTW89
+
+config RTW89_CORE
+ tristate
+
+config RTW89_PCI
+ tristate
+
+config RTW89_8852AE
+ tristate "Realtek 8852AE PCI wireless network adapter"
+ depends on PCI
+ select RTW89_CORE
+ select RTW89_PCI
+ help
+ Select this option will enable support for 8852AE chipset
+
+ 802.11ax PCIe wireless network adapter
+
+config RTW89_DEBUG
+ bool
+
+config RTW89_DEBUGMSG
+ bool "Realtek rtw89 debug message support"
+ depends on RTW89_CORE
+ select RTW89_DEBUG
+ help
+ Enable debug message support
+
+ If unsure, say Y to simplify debug problems
+
+config RTW89_DEBUGFS
+ bool "Realtek rtw89 debugfs support"
+ depends on RTW89_CORE
+ select RTW89_DEBUG
+ help
+ Enable debugfs support
+
+ If unsure, say Y to simplify debug problems
+
+endif
diff --git a/drivers/net/wireless/realtek/rtw89/Makefile b/drivers/net/wireless/realtek/rtw89/Makefile
new file mode 100644
index 000000000000..077e8fe23f60
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/Makefile
@@ -0,0 +1,25 @@
+# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+
+obj-$(CONFIG_RTW89_CORE) += rtw89_core.o
+rtw89_core-y += core.o \
+ mac80211.o \
+ mac.o \
+ phy.o \
+ fw.o \
+ rtw8852a.o \
+ rtw8852a_table.o \
+ rtw8852a_rfk.o \
+ rtw8852a_rfk_table.o \
+ cam.o \
+ efuse.o \
+ regd.o \
+ sar.o \
+ coex.o \
+ ps.o \
+ ser.o
+
+rtw89_core-$(CONFIG_RTW89_DEBUG) += debug.o
+
+obj-$(CONFIG_RTW89_PCI) += rtw89_pci.o
+rtw89_pci-y := pci.o
+
diff --git a/drivers/net/wireless/realtek/rtw89/cam.c b/drivers/net/wireless/realtek/rtw89/cam.c
new file mode 100644
index 000000000000..ad7a8155dbed
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/cam.c
@@ -0,0 +1,695 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2019-2020 Realtek Corporation
+ */
+
+#include "cam.h"
+#include "debug.h"
+#include "fw.h"
+#include "mac.h"
+
+static struct sk_buff *
+rtw89_cam_get_sec_key_cmd(struct rtw89_dev *rtwdev,
+ struct rtw89_sec_cam_entry *sec_cam,
+ bool ext_key)
+{
+ struct sk_buff *skb;
+ u32 cmd_len = H2C_SEC_CAM_LEN;
+ u32 key32[4];
+ u8 *cmd;
+ int i, j;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(cmd_len);
+ if (!skb)
+ return NULL;
+
+ skb_put_zero(skb, cmd_len);
+
+ for (i = 0; i < 4; i++) {
+ j = i * 4;
+ j += ext_key ? 16 : 0;
+ key32[i] = FIELD_PREP(GENMASK(7, 0), sec_cam->key[j + 0]) |
+ FIELD_PREP(GENMASK(15, 8), sec_cam->key[j + 1]) |
+ FIELD_PREP(GENMASK(23, 16), sec_cam->key[j + 2]) |
+ FIELD_PREP(GENMASK(31, 24), sec_cam->key[j + 3]);
+ }
+
+ cmd = skb->data;
+ RTW89_SET_FWCMD_SEC_IDX(cmd, sec_cam->sec_cam_idx + (ext_key ? 1 : 0));
+ RTW89_SET_FWCMD_SEC_OFFSET(cmd, sec_cam->offset);
+ RTW89_SET_FWCMD_SEC_LEN(cmd, sec_cam->len);
+ RTW89_SET_FWCMD_SEC_TYPE(cmd, sec_cam->type);
+ RTW89_SET_FWCMD_SEC_EXT_KEY(cmd, ext_key);
+ RTW89_SET_FWCMD_SEC_SPP_MODE(cmd, sec_cam->spp_mode);
+ RTW89_SET_FWCMD_SEC_KEY0(cmd, key32[0]);
+ RTW89_SET_FWCMD_SEC_KEY1(cmd, key32[1]);
+ RTW89_SET_FWCMD_SEC_KEY2(cmd, key32[2]);
+ RTW89_SET_FWCMD_SEC_KEY3(cmd, key32[3]);
+
+ return skb;
+}
+
+static int rtw89_cam_send_sec_key_cmd(struct rtw89_dev *rtwdev,
+ struct rtw89_sec_cam_entry *sec_cam)
+{
+ struct sk_buff *skb, *ext_skb;
+ int ret;
+
+ skb = rtw89_cam_get_sec_key_cmd(rtwdev, sec_cam, false);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to get sec key command\n");
+ return -ENOMEM;
+ }
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb,
+ FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_MAC_SEC_CAM,
+ H2C_FUNC_MAC_SEC_UPD, 1, 0,
+ H2C_SEC_CAM_LEN);
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send sec key h2c: %d\n", ret);
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ if (!sec_cam->ext_key)
+ return 0;
+
+ ext_skb = rtw89_cam_get_sec_key_cmd(rtwdev, sec_cam, true);
+ if (!ext_skb) {
+ rtw89_err(rtwdev, "failed to get ext sec key command\n");
+ return -ENOMEM;
+ }
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, ext_skb,
+ FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_MAC_SEC_CAM,
+ H2C_FUNC_MAC_SEC_UPD,
+ 1, 0, H2C_SEC_CAM_LEN);
+ ret = rtw89_h2c_tx(rtwdev, ext_skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send ext sec key h2c: %d\n", ret);
+ dev_kfree_skb(ext_skb);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rtw89_cam_get_avail_sec_cam(struct rtw89_dev *rtwdev,
+ u8 *sec_cam_idx, bool ext_key)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_cam_info *cam_info = &rtwdev->cam_info;
+ u8 sec_cam_num = chip->scam_num;
+ u8 idx = 0;
+
+ if (!ext_key) {
+ idx = find_first_zero_bit(cam_info->sec_cam_map, sec_cam_num);
+ if (idx >= sec_cam_num)
+ return -EBUSY;
+
+ set_bit(idx, cam_info->sec_cam_map);
+ *sec_cam_idx = idx;
+
+ return 0;
+ }
+
+again:
+ idx = find_next_zero_bit(cam_info->sec_cam_map, sec_cam_num, idx);
+ if (idx >= sec_cam_num - 1)
+ return -EBUSY;
+ /* ext keys need two cam entries for 256-bit key */
+ if (test_bit(idx + 1, cam_info->sec_cam_map)) {
+ idx++;
+ goto again;
+ }
+
+ set_bit(idx, cam_info->sec_cam_map);
+ set_bit(idx + 1, cam_info->sec_cam_map);
+ *sec_cam_idx = idx;
+
+ return 0;
+}
+
+static int rtw89_cam_get_addr_cam_key_idx(struct rtw89_addr_cam_entry *addr_cam,
+ struct rtw89_sec_cam_entry *sec_cam,
+ struct ieee80211_key_conf *key,
+ u8 *key_idx)
+{
+ u8 idx;
+
+ /* RTW89_ADDR_CAM_SEC_NONE : not enabled
+ * RTW89_ADDR_CAM_SEC_ALL_UNI : 0 - 6 unicast
+ * RTW89_ADDR_CAM_SEC_NORMAL : 0 - 1 unicast, 2 - 4 group, 5 - 6 BIP
+ * RTW89_ADDR_CAM_SEC_4GROUP : 0 - 1 unicast, 2 - 5 group, 6 BIP
+ */
+ switch (addr_cam->sec_ent_mode) {
+ case RTW89_ADDR_CAM_SEC_NONE:
+ return -EINVAL;
+ case RTW89_ADDR_CAM_SEC_ALL_UNI:
+ if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ return -EINVAL;
+ idx = find_first_zero_bit(addr_cam->sec_cam_map,
+ RTW89_SEC_CAM_IN_ADDR_CAM);
+ if (idx >= RTW89_SEC_CAM_IN_ADDR_CAM)
+ return -EBUSY;
+ *key_idx = idx;
+ break;
+ case RTW89_ADDR_CAM_SEC_NORMAL:
+ if (sec_cam->type == RTW89_SEC_KEY_TYPE_BIP_CCMP128) {
+ idx = find_next_zero_bit(addr_cam->sec_cam_map,
+ RTW89_SEC_CAM_IN_ADDR_CAM, 5);
+ if (idx > 6)
+ return -EBUSY;
+ *key_idx = idx;
+ break;
+ }
+
+ if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
+ idx = find_next_zero_bit(addr_cam->sec_cam_map,
+ RTW89_SEC_CAM_IN_ADDR_CAM, 0);
+ if (idx > 1)
+ return -EBUSY;
+ *key_idx = idx;
+ break;
+ }
+
+ /* Group keys */
+ idx = find_next_zero_bit(addr_cam->sec_cam_map,
+ RTW89_SEC_CAM_IN_ADDR_CAM, 2);
+ if (idx > 4)
+ return -EBUSY;
+ *key_idx = idx;
+ break;
+ case RTW89_ADDR_CAM_SEC_4GROUP:
+ if (sec_cam->type == RTW89_SEC_KEY_TYPE_BIP_CCMP128) {
+ if (test_bit(6, addr_cam->sec_cam_map))
+ return -EINVAL;
+ *key_idx = 6;
+ break;
+ }
+
+ if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
+ idx = find_next_zero_bit(addr_cam->sec_cam_map,
+ RTW89_SEC_CAM_IN_ADDR_CAM, 0);
+ if (idx > 1)
+ return -EBUSY;
+ *key_idx = idx;
+ break;
+ }
+
+ /* Group keys */
+ idx = find_next_zero_bit(addr_cam->sec_cam_map,
+ RTW89_SEC_CAM_IN_ADDR_CAM, 2);
+ if (idx > 5)
+ return -EBUSY;
+ *key_idx = idx;
+ break;
+ }
+
+ return 0;
+}
+
+static int rtw89_cam_attach_sec_cam(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key,
+ struct rtw89_sec_cam_entry *sec_cam)
+{
+ struct rtw89_vif *rtwvif;
+ struct rtw89_addr_cam_entry *addr_cam;
+ u8 key_idx = 0;
+ int ret;
+
+ if (!vif) {
+ rtw89_err(rtwdev, "No iface for adding sec cam\n");
+ return -EINVAL;
+ }
+
+ rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ addr_cam = &rtwvif->addr_cam;
+ ret = rtw89_cam_get_addr_cam_key_idx(addr_cam, sec_cam, key, &key_idx);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to get addr cam key idx %d, %d\n",
+ addr_cam->sec_ent_mode, sec_cam->type);
+ return ret;
+ }
+
+ key->hw_key_idx = key_idx;
+ addr_cam->sec_ent_keyid[key_idx] = key->keyidx;
+ addr_cam->sec_ent[key_idx] = sec_cam->sec_cam_idx;
+ addr_cam->sec_entries[key_idx] = sec_cam;
+ set_bit(key_idx, addr_cam->sec_cam_map);
+ ret = rtw89_fw_h2c_cam(rtwdev, rtwvif);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to update addr cam sec entry: %d\n",
+ ret);
+ clear_bit(key_idx, addr_cam->sec_cam_map);
+ addr_cam->sec_entries[key_idx] = NULL;
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rtw89_cam_sec_key_install(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key,
+ u8 hw_key_type, bool ext_key)
+{
+ struct rtw89_sec_cam_entry *sec_cam = NULL;
+ struct rtw89_cam_info *cam_info = &rtwdev->cam_info;
+ u8 sec_cam_idx;
+ int ret;
+
+ /* maximum key length 256-bit */
+ if (key->keylen > 32) {
+ rtw89_err(rtwdev, "invalid sec key length %d\n", key->keylen);
+ return -EINVAL;
+ }
+
+ ret = rtw89_cam_get_avail_sec_cam(rtwdev, &sec_cam_idx, ext_key);
+ if (ret) {
+ rtw89_warn(rtwdev, "no available sec cam: %d ext: %d\n",
+ ret, ext_key);
+ return ret;
+ }
+
+ sec_cam = kzalloc(sizeof(*sec_cam), GFP_KERNEL);
+ if (!sec_cam) {
+ ret = -ENOMEM;
+ goto err_release_cam;
+ }
+
+ sec_cam->sec_cam_idx = sec_cam_idx;
+ sec_cam->type = hw_key_type;
+ sec_cam->len = RTW89_SEC_CAM_LEN;
+ sec_cam->ext_key = ext_key;
+ memcpy(sec_cam->key, key->key, key->keylen);
+ ret = rtw89_cam_send_sec_key_cmd(rtwdev, sec_cam);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send sec key cmd: %d\n", ret);
+ goto err_release_cam;
+ }
+
+ /* associate with addr cam */
+ ret = rtw89_cam_attach_sec_cam(rtwdev, vif, sta, key, sec_cam);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to attach sec cam: %d\n", ret);
+ goto err_release_cam;
+ }
+
+ return 0;
+
+err_release_cam:
+ kfree(sec_cam);
+ clear_bit(sec_cam_idx, cam_info->sec_cam_map);
+ if (ext_key)
+ clear_bit(sec_cam_idx + 1, cam_info->sec_cam_map);
+
+ return ret;
+}
+
+int rtw89_cam_sec_key_add(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ u8 hw_key_type;
+ bool ext_key = false;
+ int ret;
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ hw_key_type = RTW89_SEC_KEY_TYPE_WEP40;
+ break;
+ case WLAN_CIPHER_SUITE_WEP104:
+ hw_key_type = RTW89_SEC_KEY_TYPE_WEP104;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ hw_key_type = RTW89_SEC_KEY_TYPE_CCMP128;
+ key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ hw_key_type = RTW89_SEC_KEY_TYPE_CCMP256;
+ key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
+ ext_key = true;
+ break;
+ case WLAN_CIPHER_SUITE_GCMP:
+ hw_key_type = RTW89_SEC_KEY_TYPE_GCMP128;
+ key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
+ break;
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ hw_key_type = RTW89_SEC_KEY_TYPE_GCMP256;
+ key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
+ ext_key = true;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+
+ ret = rtw89_cam_sec_key_install(rtwdev, vif, sta, key, hw_key_type,
+ ext_key);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to install key type %d ext %d: %d\n",
+ hw_key_type, ext_key, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int rtw89_cam_sec_key_del(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key,
+ bool inform_fw)
+{
+ struct rtw89_cam_info *cam_info = &rtwdev->cam_info;
+ struct rtw89_vif *rtwvif;
+ struct rtw89_addr_cam_entry *addr_cam;
+ struct rtw89_sec_cam_entry *sec_cam;
+ u8 key_idx = key->hw_key_idx;
+ u8 sec_cam_idx;
+ int ret = 0;
+
+ if (!vif) {
+ rtw89_err(rtwdev, "No iface for deleting sec cam\n");
+ return -EINVAL;
+ }
+
+ rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ addr_cam = &rtwvif->addr_cam;
+ sec_cam = addr_cam->sec_entries[key_idx];
+ if (!sec_cam)
+ return -EINVAL;
+
+ /* detach sec cam from addr cam */
+ clear_bit(key_idx, addr_cam->sec_cam_map);
+ addr_cam->sec_entries[key_idx] = NULL;
+ if (inform_fw) {
+ ret = rtw89_fw_h2c_cam(rtwdev, rtwvif);
+ if (ret)
+ rtw89_err(rtwdev, "failed to update cam del key: %d\n", ret);
+ }
+
+ /* clear valid bit in addr cam will disable sec cam,
+ * so we don't need to send H2C command again
+ */
+ sec_cam_idx = sec_cam->sec_cam_idx;
+ clear_bit(sec_cam_idx, cam_info->sec_cam_map);
+ if (sec_cam->ext_key)
+ clear_bit(sec_cam_idx + 1, cam_info->sec_cam_map);
+
+ kfree(sec_cam);
+
+ return ret;
+}
+
+static void rtw89_cam_reset_key_iter(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key,
+ void *data)
+{
+ struct rtw89_dev *rtwdev = (struct rtw89_dev *)data;
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+
+ rtw89_cam_sec_key_del(rtwdev, vif, sta, key, false);
+ rtw89_cam_deinit(rtwdev, rtwvif);
+}
+
+void rtw89_cam_deinit(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
+{
+ struct rtw89_cam_info *cam_info = &rtwdev->cam_info;
+ struct rtw89_addr_cam_entry *addr_cam = &rtwvif->addr_cam;
+ struct rtw89_bssid_cam_entry *bssid_cam = &rtwvif->bssid_cam;
+
+ addr_cam->valid = false;
+ bssid_cam->valid = false;
+ clear_bit(addr_cam->addr_cam_idx, cam_info->addr_cam_map);
+ clear_bit(bssid_cam->bssid_cam_idx, cam_info->bssid_cam_map);
+}
+
+void rtw89_cam_reset_keys(struct rtw89_dev *rtwdev)
+{
+ rcu_read_lock();
+ ieee80211_iter_keys_rcu(rtwdev->hw, NULL, rtw89_cam_reset_key_iter, rtwdev);
+ rcu_read_unlock();
+}
+
+static int rtw89_cam_get_avail_addr_cam(struct rtw89_dev *rtwdev,
+ u8 *addr_cam_idx)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_cam_info *cam_info = &rtwdev->cam_info;
+ u8 addr_cam_num = chip->acam_num;
+ u8 idx;
+
+ idx = find_first_zero_bit(cam_info->addr_cam_map, addr_cam_num);
+ if (idx >= addr_cam_num)
+ return -EBUSY;
+
+ set_bit(idx, cam_info->addr_cam_map);
+ *addr_cam_idx = idx;
+
+ return 0;
+}
+
+static int rtw89_cam_init_addr_cam(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
+{
+ struct rtw89_addr_cam_entry *addr_cam = &rtwvif->addr_cam;
+ u8 addr_cam_idx;
+ int i;
+ int ret;
+
+ ret = rtw89_cam_get_avail_addr_cam(rtwdev, &addr_cam_idx);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to get available addr cam\n");
+ return ret;
+ }
+
+ addr_cam->addr_cam_idx = addr_cam_idx;
+ addr_cam->len = ADDR_CAM_ENT_SIZE;
+ addr_cam->offset = 0;
+ addr_cam->valid = true;
+ addr_cam->addr_mask = 0;
+ addr_cam->mask_sel = RTW89_NO_MSK;
+ bitmap_zero(addr_cam->sec_cam_map, RTW89_SEC_CAM_IN_ADDR_CAM);
+ ether_addr_copy(addr_cam->sma, rtwvif->mac_addr);
+
+ for (i = 0; i < RTW89_SEC_CAM_IN_ADDR_CAM; i++) {
+ addr_cam->sec_ent_keyid[i] = 0;
+ addr_cam->sec_ent[i] = 0;
+ }
+
+ return 0;
+}
+
+static int rtw89_cam_get_avail_bssid_cam(struct rtw89_dev *rtwdev,
+ u8 *bssid_cam_idx)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_cam_info *cam_info = &rtwdev->cam_info;
+ u8 bssid_cam_num = chip->bcam_num;
+ u8 idx;
+
+ idx = find_first_zero_bit(cam_info->bssid_cam_map, bssid_cam_num);
+ if (idx >= bssid_cam_num)
+ return -EBUSY;
+
+ set_bit(idx, cam_info->bssid_cam_map);
+ *bssid_cam_idx = idx;
+
+ return 0;
+}
+
+static int rtw89_cam_init_bssid_cam(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
+{
+ struct rtw89_bssid_cam_entry *bssid_cam = &rtwvif->bssid_cam;
+ u8 bssid_cam_idx;
+ int ret;
+
+ ret = rtw89_cam_get_avail_bssid_cam(rtwdev, &bssid_cam_idx);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to get available bssid cam\n");
+ return ret;
+ }
+
+ bssid_cam->bssid_cam_idx = bssid_cam_idx;
+ bssid_cam->phy_idx = rtwvif->phy_idx;
+ bssid_cam->len = BSSID_CAM_ENT_SIZE;
+ bssid_cam->offset = 0;
+ bssid_cam->valid = true;
+ ether_addr_copy(bssid_cam->bssid, rtwvif->bssid);
+
+ return 0;
+}
+
+void rtw89_cam_bssid_changed(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
+{
+ struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
+ struct rtw89_addr_cam_entry *addr_cam = &rtwvif->addr_cam;
+ struct rtw89_bssid_cam_entry *bssid_cam = &rtwvif->bssid_cam;
+
+ if (vif->type == NL80211_IFTYPE_STATION)
+ ether_addr_copy(addr_cam->tma, rtwvif->bssid);
+ ether_addr_copy(bssid_cam->bssid, rtwvif->bssid);
+}
+
+int rtw89_cam_init(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
+{
+ struct rtw89_addr_cam_entry *addr_cam = &rtwvif->addr_cam;
+ struct rtw89_bssid_cam_entry *bssid_cam = &rtwvif->bssid_cam;
+ int ret;
+
+ ret = rtw89_cam_init_addr_cam(rtwdev, rtwvif);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to init addr cam\n");
+ return ret;
+ }
+
+ ret = rtw89_cam_init_bssid_cam(rtwdev, rtwvif);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to init bssid cam\n");
+ return ret;
+ }
+
+ /* associate addr cam with bssid cam */
+ addr_cam->bssid_cam_idx = bssid_cam->bssid_cam_idx;
+
+ return 0;
+}
+
+int rtw89_cam_fill_bssid_cam_info(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif, u8 *cmd)
+{
+ struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
+ struct rtw89_bssid_cam_entry *bssid_cam = &rtwvif->bssid_cam;
+ u8 bss_color = vif->bss_conf.he_bss_color.color;
+
+ FWCMD_SET_ADDR_BSSID_IDX(cmd, bssid_cam->bssid_cam_idx);
+ FWCMD_SET_ADDR_BSSID_OFFSET(cmd, bssid_cam->offset);
+ FWCMD_SET_ADDR_BSSID_LEN(cmd, bssid_cam->len);
+ FWCMD_SET_ADDR_BSSID_VALID(cmd, bssid_cam->valid);
+ FWCMD_SET_ADDR_BSSID_BB_SEL(cmd, bssid_cam->phy_idx);
+ FWCMD_SET_ADDR_BSSID_BSS_COLOR(cmd, bss_color);
+
+ FWCMD_SET_ADDR_BSSID_BSSID0(cmd, bssid_cam->bssid[0]);
+ FWCMD_SET_ADDR_BSSID_BSSID1(cmd, bssid_cam->bssid[1]);
+ FWCMD_SET_ADDR_BSSID_BSSID2(cmd, bssid_cam->bssid[2]);
+ FWCMD_SET_ADDR_BSSID_BSSID3(cmd, bssid_cam->bssid[3]);
+ FWCMD_SET_ADDR_BSSID_BSSID4(cmd, bssid_cam->bssid[4]);
+ FWCMD_SET_ADDR_BSSID_BSSID5(cmd, bssid_cam->bssid[5]);
+
+ return 0;
+}
+
+static u8 rtw89_cam_addr_hash(u8 start, u8 *addr)
+{
+ u8 hash = 0;
+ u8 i;
+
+ for (i = start; i < ETH_ALEN; i++)
+ hash ^= addr[i];
+
+ return hash;
+}
+
+void rtw89_cam_fill_addr_cam_info(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ u8 *cmd)
+{
+ struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
+ struct ieee80211_sta *sta;
+ struct rtw89_sta *rtwsta;
+ struct rtw89_addr_cam_entry *addr_cam = &rtwvif->addr_cam;
+ u8 sma_hash, tma_hash, addr_msk_start;
+ u8 sma_start = 0;
+ u8 tma_start = 0;
+
+ if (addr_cam->addr_mask != 0) {
+ addr_msk_start = __ffs(addr_cam->addr_mask);
+ if (addr_cam->mask_sel == RTW89_SMA)
+ sma_start = addr_msk_start;
+ else if (addr_cam->mask_sel == RTW89_TMA)
+ tma_start = addr_msk_start;
+ }
+ sma_hash = rtw89_cam_addr_hash(sma_start, rtwvif->mac_addr);
+ tma_hash = rtw89_cam_addr_hash(tma_start, addr_cam->tma);
+
+ FWCMD_SET_ADDR_IDX(cmd, addr_cam->addr_cam_idx);
+ FWCMD_SET_ADDR_OFFSET(cmd, addr_cam->offset);
+ FWCMD_SET_ADDR_LEN(cmd, addr_cam->len);
+
+ FWCMD_SET_ADDR_VALID(cmd, addr_cam->valid);
+ FWCMD_SET_ADDR_NET_TYPE(cmd, rtwvif->net_type);
+ FWCMD_SET_ADDR_BCN_HIT_COND(cmd, rtwvif->bcn_hit_cond);
+ FWCMD_SET_ADDR_HIT_RULE(cmd, rtwvif->hit_rule);
+ FWCMD_SET_ADDR_BB_SEL(cmd, rtwvif->phy_idx);
+ FWCMD_SET_ADDR_ADDR_MASK(cmd, addr_cam->addr_mask);
+ FWCMD_SET_ADDR_MASK_SEL(cmd, addr_cam->mask_sel);
+ FWCMD_SET_ADDR_SMA_HASH(cmd, sma_hash);
+ FWCMD_SET_ADDR_TMA_HASH(cmd, tma_hash);
+
+ FWCMD_SET_ADDR_BSSID_CAM_IDX(cmd, addr_cam->bssid_cam_idx);
+
+ FWCMD_SET_ADDR_SMA0(cmd, rtwvif->mac_addr[0]);
+ FWCMD_SET_ADDR_SMA1(cmd, rtwvif->mac_addr[1]);
+ FWCMD_SET_ADDR_SMA2(cmd, rtwvif->mac_addr[2]);
+ FWCMD_SET_ADDR_SMA3(cmd, rtwvif->mac_addr[3]);
+ FWCMD_SET_ADDR_SMA4(cmd, rtwvif->mac_addr[4]);
+ FWCMD_SET_ADDR_SMA5(cmd, rtwvif->mac_addr[5]);
+
+ FWCMD_SET_ADDR_TMA0(cmd, addr_cam->tma[0]);
+ FWCMD_SET_ADDR_TMA1(cmd, addr_cam->tma[1]);
+ FWCMD_SET_ADDR_TMA2(cmd, addr_cam->tma[2]);
+ FWCMD_SET_ADDR_TMA3(cmd, addr_cam->tma[3]);
+ FWCMD_SET_ADDR_TMA4(cmd, addr_cam->tma[4]);
+ FWCMD_SET_ADDR_TMA5(cmd, addr_cam->tma[5]);
+
+ FWCMD_SET_ADDR_PORT_INT(cmd, rtwvif->port);
+ FWCMD_SET_ADDR_TSF_SYNC(cmd, rtwvif->port);
+ FWCMD_SET_ADDR_TF_TRS(cmd, rtwvif->trigger);
+ FWCMD_SET_ADDR_LSIG_TXOP(cmd, rtwvif->lsig_txop);
+ FWCMD_SET_ADDR_TGT_IND(cmd, rtwvif->tgt_ind);
+ FWCMD_SET_ADDR_FRM_TGT_IND(cmd, rtwvif->frm_tgt_ind);
+
+ if (vif->type == NL80211_IFTYPE_STATION) {
+ sta = rtwvif->mgd.ap;
+ if (sta) {
+ rtwsta = (struct rtw89_sta *)sta->drv_priv;
+ FWCMD_SET_ADDR_MACID(cmd, rtwsta->mac_id);
+ FWCMD_SET_ADDR_AID12(cmd, vif->bss_conf.aid & 0xfff);
+ }
+ }
+ FWCMD_SET_ADDR_WOL_PATTERN(cmd, rtwvif->wowlan_pattern);
+ FWCMD_SET_ADDR_WOL_UC(cmd, rtwvif->wowlan_uc);
+ FWCMD_SET_ADDR_WOL_MAGIC(cmd, rtwvif->wowlan_magic);
+ FWCMD_SET_ADDR_WAPI(cmd, addr_cam->wapi);
+ FWCMD_SET_ADDR_SEC_ENT_MODE(cmd, addr_cam->sec_ent_mode);
+ FWCMD_SET_ADDR_SEC_ENT0_KEYID(cmd, addr_cam->sec_ent_keyid[0]);
+ FWCMD_SET_ADDR_SEC_ENT1_KEYID(cmd, addr_cam->sec_ent_keyid[1]);
+ FWCMD_SET_ADDR_SEC_ENT2_KEYID(cmd, addr_cam->sec_ent_keyid[2]);
+ FWCMD_SET_ADDR_SEC_ENT3_KEYID(cmd, addr_cam->sec_ent_keyid[3]);
+ FWCMD_SET_ADDR_SEC_ENT4_KEYID(cmd, addr_cam->sec_ent_keyid[4]);
+ FWCMD_SET_ADDR_SEC_ENT5_KEYID(cmd, addr_cam->sec_ent_keyid[5]);
+ FWCMD_SET_ADDR_SEC_ENT6_KEYID(cmd, addr_cam->sec_ent_keyid[6]);
+
+ FWCMD_SET_ADDR_SEC_ENT_VALID(cmd, addr_cam->sec_cam_map[0] & 0xff);
+ FWCMD_SET_ADDR_SEC_ENT0(cmd, addr_cam->sec_ent[0]);
+ FWCMD_SET_ADDR_SEC_ENT1(cmd, addr_cam->sec_ent[1]);
+ FWCMD_SET_ADDR_SEC_ENT2(cmd, addr_cam->sec_ent[2]);
+ FWCMD_SET_ADDR_SEC_ENT3(cmd, addr_cam->sec_ent[3]);
+ FWCMD_SET_ADDR_SEC_ENT4(cmd, addr_cam->sec_ent[4]);
+ FWCMD_SET_ADDR_SEC_ENT5(cmd, addr_cam->sec_ent[5]);
+ FWCMD_SET_ADDR_SEC_ENT6(cmd, addr_cam->sec_ent[6]);
+}
diff --git a/drivers/net/wireless/realtek/rtw89/cam.h b/drivers/net/wireless/realtek/rtw89/cam.h
new file mode 100644
index 000000000000..90a20a5375c6
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/cam.h
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2019-2020 Realtek Corporation
+ */
+
+#ifndef __RTW89_CAM_H__
+#define __RTW89_CAM_H__
+
+#include "core.h"
+
+#define RTW89_SEC_CAM_LEN 20
+
+#define FWCMD_SET_ADDR_IDX(cmd, value) \
+ le32p_replace_bits((__le32 *)(cmd) + 1, value, GENMASK(7, 0))
+#define FWCMD_SET_ADDR_OFFSET(cmd, value) \
+ le32p_replace_bits((__le32 *)(cmd) + 1, value, GENMASK(15, 8))
+#define FWCMD_SET_ADDR_LEN(cmd, value) \
+ le32p_replace_bits((__le32 *)(cmd) + 1, value, GENMASK(23, 16))
+#define FWCMD_SET_ADDR_VALID(cmd, value) \
+ le32p_replace_bits((__le32 *)(cmd) + 2, value, BIT(0))
+#define FWCMD_SET_ADDR_NET_TYPE(cmd, value) \
+ le32p_replace_bits((__le32 *)(cmd) + 2, value, GENMASK(2, 1))
+#define FWCMD_SET_ADDR_BCN_HIT_COND(cmd, value) \
+ le32p_replace_bits((__le32 *)(cmd) + 2, value, GENMASK(4, 3))
+#define FWCMD_SET_ADDR_HIT_RULE(cmd, value) \
+ le32p_replace_bits((__le32 *)(cmd) + 2, value, GENMASK(6, 5))
+#define FWCMD_SET_ADDR_BB_SEL(cmd, value) \
+ le32p_replace_bits((__le32 *)(cmd) + 2, value, BIT(7))
+#define FWCMD_SET_ADDR_ADDR_MASK(cmd, value) \
+ le32p_replace_bits((__le32 *)(cmd) + 2, value, GENMASK(13, 8))
+#define FWCMD_SET_ADDR_MASK_SEL(cmd, value) \
+ le32p_replace_bits((__le32 *)(cmd) + 2, value, GENMASK(15, 14))
+#define FWCMD_SET_ADDR_SMA_HASH(cmd, value) \
+ le32p_replace_bits((__le32 *)(cmd) + 2, value, GENMASK(23, 16))
+#define FWCMD_SET_ADDR_TMA_HASH(cmd, value) \
+ le32p_replace_bits((__le32 *)(cmd) + 2, value, GENMASK(31, 24))
+#define FWCMD_SET_ADDR_BSSID_CAM_IDX(cmd, value) \
+ le32p_replace_bits((__le32 *)(cmd) + 3, value, GENMASK(5, 0))
+#define FWCMD_SET_ADDR_SMA0(cmd, value) \
+ le32p_replace_bits((__le32 *)(cmd) + 4, value, GENMASK(7, 0))
+#define FWCMD_SET_ADDR_SMA1(cmd, value) \
+ le32p_replace_bits((__le32 *)(cmd) + 4, value, GENMASK(15, 8))
+#define FWCMD_SET_ADDR_SMA2(cmd, value) \
+ le32p_replace_bits((__le32 *)(cmd) + 4, value, GENMASK(23, 16))
+#define FWCMD_SET_ADDR_SMA3(cmd, value) \
+ le32p_replace_bits((__le32 *)(cmd) + 4, value, GENMASK(31, 24))
+#define FWCMD_SET_ADDR_SMA4(cmd, value) \
+ le32p_replace_bits((__le32 *)(cmd) + 5, value, GENMASK(7, 0))
+#define FWCMD_SET_ADDR_SMA5(cmd, value) \
+ le32p_replace_bits((__le32 *)(cmd) + 5, value, GENMASK(15, 8))
+#define FWCMD_SET_ADDR_TMA0(cmd, value) \
+ le32p_replace_bits((__le32 *)(cmd) + 5, value, GENMASK(23, 16))
+#define FWCMD_SET_ADDR_TMA1(cmd, value) \
+ le32p_replace_bits((__le32 *)(cmd) + 5, value, GENMASK(31, 24))
+#define FWCMD_SET_ADDR_TMA2(cmd, value) \
+ le32p_replace_bits((__le32 *)(cmd) + 6, value, GENMASK(7, 0))
+#define FWCMD_SET_ADDR_TMA3(cmd, value) \
+ le32p_replace_bits((__le32 *)(cmd) + 6, value, GENMASK(15, 8))
+#define FWCMD_SET_ADDR_TMA4(cmd, value) \
+ le32p_replace_bits((__le32 *)(cmd) + 6, value, GENMASK(23, 16))
+#define FWCMD_SET_ADDR_TMA5(cmd, value) \
+ le32p_replace_bits((__le32 *)(cmd) + 6, value, GENMASK(31, 24))
+#define FWCMD_SET_ADDR_MACID(cmd, value) \
+ le32p_replace_bits((__le32 *)(cmd) + 8, value, GENMASK(7, 0))
+#define FWCMD_SET_ADDR_PORT_INT(cmd, value) \
+ le32p_replace_bits((__le32 *)(cmd) + 8, value, GENMASK(10, 8))
+#define FWCMD_SET_ADDR_TSF_SYNC(cmd, value) \
+ le32p_replace_bits((__le32 *)(cmd) + 8, value, GENMASK(13, 11))
+#define FWCMD_SET_ADDR_TF_TRS(cmd, value) \
+ le32p_replace_bits((__le32 *)(cmd) + 8, value, BIT(14))
+#define FWCMD_SET_ADDR_LSIG_TXOP(cmd, value) \
+ le32p_replace_bits((__le32 *)(cmd) + 8, value, BIT(15))
+#define FWCMD_SET_ADDR_TGT_IND(cmd, value) \
+ le32p_replace_bits((__le32 *)(cmd) + 8, value, GENMASK(26, 24))
+#define FWCMD_SET_ADDR_FRM_TGT_IND(cmd, value) \
+ le32p_replace_bits((__le32 *)(cmd) + 8, value, GENMASK(29, 27))
+#define FWCMD_SET_ADDR_AID12(cmd, value) \
+ le32p_replace_bits((__le32 *)(cmd) + 9, value, GENMASK(11, 0))
+#define FWCMD_SET_ADDR_AID12_0(cmd, value) \
+ le32p_replace_bits((__le32 *)(cmd) + 9, value, GENMASK(7, 0))
+#define FWCMD_SET_ADDR_AID12_1(cmd, value) \
+ le32p_replace_bits((__le32 *)(cmd) + 9, value, GENMASK(11, 8))
+#define FWCMD_SET_ADDR_WOL_PATTERN(cmd, value) \
+ le32p_replace_bits((__le32 *)(cmd) + 9, value, BIT(12))
+#define FWCMD_SET_ADDR_WOL_UC(cmd, value) \
+ le32p_replace_bits((__le32 *)(cmd) + 9, value, BIT(13))
+#define FWCMD_SET_ADDR_WOL_MAGIC(cmd, value) \
+ le32p_replace_bits((__le32 *)(cmd) + 9, value, BIT(14))
+#define FWCMD_SET_ADDR_WAPI(cmd, value) \
+ le32p_replace_bits((__le32 *)(cmd) + 9, value, BIT(15))
+#define FWCMD_SET_ADDR_SEC_ENT_MODE(cmd, value) \
+ le32p_replace_bits((__le32 *)(cmd) + 9, value, GENMASK(17, 16))
+#define FWCMD_SET_ADDR_SEC_ENT0_KEYID(cmd, value) \
+ le32p_replace_bits((__le32 *)(cmd) + 9, value, GENMASK(19, 18))
+#define FWCMD_SET_ADDR_SEC_ENT1_KEYID(cmd, value) \
+ le32p_replace_bits((__le32 *)(cmd) + 9, value, GENMASK(21, 20))
+#define FWCMD_SET_ADDR_SEC_ENT2_KEYID(cmd, value) \
+ le32p_replace_bits((__le32 *)(cmd) + 9, value, GENMASK(23, 22))
+#define FWCMD_SET_ADDR_SEC_ENT3_KEYID(cmd, value) \
+ le32p_replace_bits((__le32 *)(cmd) + 9, value, GENMASK(25, 24))
+#define FWCMD_SET_ADDR_SEC_ENT4_KEYID(cmd, value) \
+ le32p_replace_bits((__le32 *)(cmd) + 9, value, GENMASK(27, 26))
+#define FWCMD_SET_ADDR_SEC_ENT5_KEYID(cmd, value) \
+ le32p_replace_bits((__le32 *)(cmd) + 9, value, GENMASK(29, 28))
+#define FWCMD_SET_ADDR_SEC_ENT6_KEYID(cmd, value) \
+ le32p_replace_bits((__le32 *)(cmd) + 9, value, GENMASK(31, 30))
+#define FWCMD_SET_ADDR_SEC_ENT_VALID(cmd, value) \
+ le32p_replace_bits((__le32 *)(cmd) + 10, value, GENMASK(7, 0))
+#define FWCMD_SET_ADDR_SEC_ENT0(cmd, value) \
+ le32p_replace_bits((__le32 *)(cmd) + 10, value, GENMASK(15, 8))
+#define FWCMD_SET_ADDR_SEC_ENT1(cmd, value) \
+ le32p_replace_bits((__le32 *)(cmd) + 10, value, GENMASK(23, 16))
+#define FWCMD_SET_ADDR_SEC_ENT2(cmd, value) \
+ le32p_replace_bits((__le32 *)(cmd) + 10, value, GENMASK(31, 24))
+#define FWCMD_SET_ADDR_SEC_ENT3(cmd, value) \
+ le32p_replace_bits((__le32 *)(cmd) + 11, value, GENMASK(7, 0))
+#define FWCMD_SET_ADDR_SEC_ENT4(cmd, value) \
+ le32p_replace_bits((__le32 *)(cmd) + 11, value, GENMASK(15, 8))
+#define FWCMD_SET_ADDR_SEC_ENT5(cmd, value) \
+ le32p_replace_bits((__le32 *)(cmd) + 11, value, GENMASK(23, 16))
+#define FWCMD_SET_ADDR_SEC_ENT6(cmd, value) \
+ le32p_replace_bits((__le32 *)(cmd) + 11, value, GENMASK(31, 24))
+#define FWCMD_SET_ADDR_BSSID_IDX(cmd, value) \
+ le32p_replace_bits((__le32 *)(cmd) + 12, value, GENMASK(7, 0))
+#define FWCMD_SET_ADDR_BSSID_OFFSET(cmd, value) \
+ le32p_replace_bits((__le32 *)(cmd) + 12, value, GENMASK(15, 8))
+#define FWCMD_SET_ADDR_BSSID_LEN(cmd, value) \
+ le32p_replace_bits((__le32 *)(cmd) + 12, value, GENMASK(23, 16))
+#define FWCMD_SET_ADDR_BSSID_VALID(cmd, value) \
+ le32p_replace_bits((__le32 *)(cmd) + 13, value, BIT(0))
+#define FWCMD_SET_ADDR_BSSID_BB_SEL(cmd, value) \
+ le32p_replace_bits((__le32 *)(cmd) + 13, value, BIT(1))
+#define FWCMD_SET_ADDR_BSSID_BSS_COLOR(cmd, value) \
+ le32p_replace_bits((__le32 *)(cmd) + 13, value, GENMASK(13, 8))
+#define FWCMD_SET_ADDR_BSSID_BSSID0(cmd, value) \
+ le32p_replace_bits((__le32 *)(cmd) + 13, value, GENMASK(23, 16))
+#define FWCMD_SET_ADDR_BSSID_BSSID1(cmd, value) \
+ le32p_replace_bits((__le32 *)(cmd) + 13, value, GENMASK(31, 24))
+#define FWCMD_SET_ADDR_BSSID_BSSID2(cmd, value) \
+ le32p_replace_bits((__le32 *)(cmd) + 14, value, GENMASK(7, 0))
+#define FWCMD_SET_ADDR_BSSID_BSSID3(cmd, value) \
+ le32p_replace_bits((__le32 *)(cmd) + 14, value, GENMASK(15, 8))
+#define FWCMD_SET_ADDR_BSSID_BSSID4(cmd, value) \
+ le32p_replace_bits((__le32 *)(cmd) + 14, value, GENMASK(23, 16))
+#define FWCMD_SET_ADDR_BSSID_BSSID5(cmd, value) \
+ le32p_replace_bits((__le32 *)(cmd) + 14, value, GENMASK(31, 24))
+
+int rtw89_cam_init(struct rtw89_dev *rtwdev, struct rtw89_vif *vif);
+void rtw89_cam_deinit(struct rtw89_dev *rtwdev, struct rtw89_vif *vif);
+void rtw89_cam_fill_addr_cam_info(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *vif, u8 *cmd);
+int rtw89_cam_fill_bssid_cam_info(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *vif, u8 *cmd);
+int rtw89_cam_sec_key_add(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key);
+int rtw89_cam_sec_key_del(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key,
+ bool inform_fw);
+void rtw89_cam_bssid_changed(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif);
+void rtw89_cam_reset_keys(struct rtw89_dev *rtwdev);
+#endif
diff --git a/drivers/net/wireless/realtek/rtw89/coex.c b/drivers/net/wireless/realtek/rtw89/coex.c
new file mode 100644
index 000000000000..abe4b6549ab2
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/coex.c
@@ -0,0 +1,5716 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2019-2020 Realtek Corporation
+ */
+
+#include "coex.h"
+#include "debug.h"
+#include "fw.h"
+#include "mac.h"
+#include "ps.h"
+#include "reg.h"
+
+#define FCXDEF_STEP 50 /* MUST <= FCXMAX_STEP and match with wl fw*/
+
+enum btc_fbtc_tdma_template {
+ CXTD_OFF = 0x0,
+ CXTD_OFF_B2,
+ CXTD_OFF_EXT,
+ CXTD_FIX,
+ CXTD_PFIX,
+ CXTD_AUTO,
+ CXTD_PAUTO,
+ CXTD_AUTO2,
+ CXTD_PAUTO2,
+ CXTD_MAX,
+};
+
+enum btc_fbtc_tdma_type {
+ CXTDMA_OFF = 0x0,
+ CXTDMA_FIX = 0x1,
+ CXTDMA_AUTO = 0x2,
+ CXTDMA_AUTO2 = 0x3,
+ CXTDMA_MAX
+};
+
+enum btc_fbtc_tdma_rx_flow_ctrl {
+ CXFLC_OFF = 0x0,
+ CXFLC_NULLP = 0x1,
+ CXFLC_QOSNULL = 0x2,
+ CXFLC_CTS = 0x3,
+ CXFLC_MAX
+};
+
+enum btc_fbtc_tdma_wlan_tx_pause {
+ CXTPS_OFF = 0x0, /* no wl tx pause*/
+ CXTPS_ON = 0x1,
+ CXTPS_MAX
+};
+
+enum btc_mlme_state {
+ MLME_NO_LINK,
+ MLME_LINKING,
+ MLME_LINKED,
+};
+
+#define FCXONESLOT_VER 1
+struct btc_fbtc_1slot {
+ u8 fver;
+ u8 sid; /* slot id */
+ struct rtw89_btc_fbtc_slot slot;
+} __packed;
+
+static const struct rtw89_btc_fbtc_tdma t_def[] = {
+ [CXTD_OFF] = { CXTDMA_OFF, CXFLC_OFF, CXTPS_OFF, 0, 0, 0, 0, 0},
+ [CXTD_OFF_B2] = { CXTDMA_OFF, CXFLC_OFF, CXTPS_OFF, 0, 0, 1, 0, 0},
+ [CXTD_OFF_EXT] = { CXTDMA_OFF, CXFLC_OFF, CXTPS_OFF, 0, 0, 3, 0, 0},
+ [CXTD_FIX] = { CXTDMA_FIX, CXFLC_OFF, CXTPS_OFF, 0, 0, 0, 0, 0},
+ [CXTD_PFIX] = { CXTDMA_FIX, CXFLC_NULLP, CXTPS_ON, 0, 5, 0, 0, 0},
+ [CXTD_AUTO] = { CXTDMA_AUTO, CXFLC_OFF, CXTPS_OFF, 0, 0, 0, 0, 0},
+ [CXTD_PAUTO] = { CXTDMA_AUTO, CXFLC_NULLP, CXTPS_ON, 0, 5, 0, 0, 0},
+ [CXTD_AUTO2] = {CXTDMA_AUTO2, CXFLC_OFF, CXTPS_OFF, 0, 0, 0, 0, 0},
+ [CXTD_PAUTO2] = {CXTDMA_AUTO2, CXFLC_NULLP, CXTPS_ON, 0, 5, 0, 0, 0}
+};
+
+#define __DEF_FBTC_SLOT(__dur, __cxtbl, __cxtype) \
+ { .dur = cpu_to_le16(__dur), .cxtbl = cpu_to_le32(__cxtbl), \
+ .cxtype = cpu_to_le16(__cxtype),}
+
+static const struct rtw89_btc_fbtc_slot s_def[] = {
+ [CXST_OFF] = __DEF_FBTC_SLOT(100, 0x55555555, SLOT_MIX),
+ [CXST_B2W] = __DEF_FBTC_SLOT(5, 0x5a5a5a5a, SLOT_ISO),
+ [CXST_W1] = __DEF_FBTC_SLOT(70, 0x5a5a5a5a, SLOT_ISO),
+ [CXST_W2] = __DEF_FBTC_SLOT(70, 0x5a5a5aaa, SLOT_ISO),
+ [CXST_W2B] = __DEF_FBTC_SLOT(15, 0x5a5a5a5a, SLOT_ISO),
+ [CXST_B1] = __DEF_FBTC_SLOT(100, 0x55555555, SLOT_MIX),
+ [CXST_B2] = __DEF_FBTC_SLOT(7, 0x6a5a5a5a, SLOT_MIX),
+ [CXST_B3] = __DEF_FBTC_SLOT(5, 0x55555555, SLOT_MIX),
+ [CXST_B4] = __DEF_FBTC_SLOT(50, 0x55555555, SLOT_MIX),
+ [CXST_LK] = __DEF_FBTC_SLOT(20, 0x5a5a5a5a, SLOT_ISO),
+ [CXST_BLK] = __DEF_FBTC_SLOT(250, 0x55555555, SLOT_MIX),
+ [CXST_E2G] = __DEF_FBTC_SLOT(20, 0x6a5a5a5a, SLOT_MIX),
+ [CXST_E5G] = __DEF_FBTC_SLOT(20, 0xffffffff, SLOT_MIX),
+ [CXST_EBT] = __DEF_FBTC_SLOT(20, 0x55555555, SLOT_MIX),
+ [CXST_ENULL] = __DEF_FBTC_SLOT(7, 0xaaaaaaaa, SLOT_ISO),
+ [CXST_WLK] = __DEF_FBTC_SLOT(250, 0x6a5a6a5a, SLOT_MIX),
+ [CXST_W1FDD] = __DEF_FBTC_SLOT(35, 0xfafafafa, SLOT_ISO),
+ [CXST_B1FDD] = __DEF_FBTC_SLOT(100, 0xffffffff, SLOT_MIX),
+};
+
+static const u32 cxtbl[] = {
+ 0xffffffff, /* 0 */
+ 0xaaaaaaaa, /* 1 */
+ 0x55555555, /* 2 */
+ 0x66555555, /* 3 */
+ 0x66556655, /* 4 */
+ 0x5a5a5a5a, /* 5 */
+ 0x5a5a5aaa, /* 6 */
+ 0xaa5a5a5a, /* 7 */
+ 0x6a5a5a5a, /* 8 */
+ 0x6a5a5aaa, /* 9 */
+ 0x6a5a6a5a, /* 10 */
+ 0x6a5a6aaa, /* 11 */
+ 0x6afa5afa, /* 12 */
+ 0xaaaa5aaa, /* 13 */
+ 0xaaffffaa, /* 14 */
+ 0xaa5555aa, /* 15 */
+ 0xfafafafa, /* 16 */
+ 0xffffddff, /* 17 */
+ 0xdaffdaff, /* 18 */
+ 0xfafadafa /* 19 */
+};
+
+struct rtw89_btc_btf_tlv {
+ u8 type;
+ u8 len;
+ u8 val[1];
+} __packed;
+
+enum btc_btf_set_report_en {
+ RPT_EN_TDMA = BIT(0),
+ RPT_EN_CYCLE = BIT(1),
+ RPT_EN_MREG = BIT(2),
+ RPT_EN_BT_VER_INFO = BIT(3),
+ RPT_EN_BT_SCAN_INFO = BIT(4),
+ RPT_EN_BT_AFH_MAP = BIT(5),
+ RPT_EN_BT_DEVICE_INFO = BIT(6),
+ RPT_EN_WL_ALL = GENMASK(2, 0),
+ RPT_EN_BT_ALL = GENMASK(6, 3),
+ RPT_EN_ALL = GENMASK(6, 0),
+};
+
+#define BTF_SET_REPORT_VER 1
+struct rtw89_btc_btf_set_report {
+ u8 fver;
+ __le32 enable;
+ __le32 para;
+} __packed;
+
+#define BTF_SET_SLOT_TABLE_VER 1
+struct rtw89_btc_btf_set_slot_table {
+ u8 fver;
+ u8 tbl_num;
+ u8 buf[];
+} __packed;
+
+#define BTF_SET_MON_REG_VER 1
+struct rtw89_btc_btf_set_mon_reg {
+ u8 fver;
+ u8 reg_num;
+ u8 buf[];
+} __packed;
+
+enum btc_btf_set_cx_policy {
+ CXPOLICY_TDMA = 0x0,
+ CXPOLICY_SLOT = 0x1,
+ CXPOLICY_TYPE = 0x2,
+ CXPOLICY_MAX,
+};
+
+enum btc_b2w_scoreboard {
+ BTC_BSCB_ACT = BIT(0),
+ BTC_BSCB_ON = BIT(1),
+ BTC_BSCB_WHQL = BIT(2),
+ BTC_BSCB_BT_S1 = BIT(3),
+ BTC_BSCB_A2DP_ACT = BIT(4),
+ BTC_BSCB_RFK_RUN = BIT(5),
+ BTC_BSCB_RFK_REQ = BIT(6),
+ BTC_BSCB_LPS = BIT(7),
+ BTC_BSCB_WLRFK = BIT(11),
+ BTC_BSCB_BT_HILNA = BIT(13),
+ BTC_BSCB_BT_CONNECT = BIT(16),
+ BTC_BSCB_PATCH_CODE = BIT(30),
+ BTC_BSCB_ALL = GENMASK(30, 0),
+};
+
+enum btc_phymap {
+ BTC_PHY_0 = BIT(0),
+ BTC_PHY_1 = BIT(1),
+ BTC_PHY_ALL = BIT(0) | BIT(1),
+};
+
+enum btc_cx_state_map {
+ BTC_WIDLE = 0,
+ BTC_WBUSY_BNOSCAN,
+ BTC_WBUSY_BSCAN,
+ BTC_WSCAN_BNOSCAN,
+ BTC_WSCAN_BSCAN,
+ BTC_WLINKING
+};
+
+enum btc_ant_phase {
+ BTC_ANT_WPOWERON = 0,
+ BTC_ANT_WINIT,
+ BTC_ANT_WONLY,
+ BTC_ANT_WOFF,
+ BTC_ANT_W2G,
+ BTC_ANT_W5G,
+ BTC_ANT_W25G,
+ BTC_ANT_FREERUN,
+ BTC_ANT_WRFK,
+ BTC_ANT_BRFK,
+ BTC_ANT_MAX
+};
+
+enum btc_plt {
+ BTC_PLT_NONE = 0,
+ BTC_PLT_LTE_RX = BIT(0),
+ BTC_PLT_GNT_BT_TX = BIT(1),
+ BTC_PLT_GNT_BT_RX = BIT(2),
+ BTC_PLT_GNT_WL = BIT(3),
+ BTC_PLT_BT = BIT(1) | BIT(2),
+ BTC_PLT_ALL = 0xf
+};
+
+enum btc_cx_poicy_main_type {
+ BTC_CXP_OFF = 0,
+ BTC_CXP_OFFB,
+ BTC_CXP_OFFE,
+ BTC_CXP_FIX,
+ BTC_CXP_PFIX,
+ BTC_CXP_AUTO,
+ BTC_CXP_PAUTO,
+ BTC_CXP_AUTO2,
+ BTC_CXP_PAUTO2,
+ BTC_CXP_MANUAL,
+ BTC_CXP_USERDEF0,
+ BTC_CXP_MAIN_MAX
+};
+
+enum btc_cx_poicy_type {
+ /* TDMA off + pri: BT > WL */
+ BTC_CXP_OFF_BT = (BTC_CXP_OFF << 8) | 0,
+
+ /* TDMA off + pri: WL > BT */
+ BTC_CXP_OFF_WL = (BTC_CXP_OFF << 8) | 1,
+
+ /* TDMA off + pri: BT = WL */
+ BTC_CXP_OFF_EQ0 = (BTC_CXP_OFF << 8) | 2,
+
+ /* TDMA off + pri: BT = WL > BT_Lo */
+ BTC_CXP_OFF_EQ1 = (BTC_CXP_OFF << 8) | 3,
+
+ /* TDMA off + pri: WL = BT, BT_Rx > WL_Lo_Tx */
+ BTC_CXP_OFF_EQ2 = (BTC_CXP_OFF << 8) | 4,
+
+ /* TDMA off + pri: WL_Rx = BT, BT_HI > WL_Tx > BT_Lo */
+ BTC_CXP_OFF_EQ3 = (BTC_CXP_OFF << 8) | 5,
+
+ /* TDMA off + pri: BT_Hi > WL > BT_Lo */
+ BTC_CXP_OFF_BWB0 = (BTC_CXP_OFF << 8) | 6,
+
+ /* TDMA off + pri: WL_Hi-Tx > BT_Hi_Rx, BT_Hi > WL > BT_Lo */
+ BTC_CXP_OFF_BWB1 = (BTC_CXP_OFF << 8) | 7,
+
+ /* TDMA off+Bcn-Protect + pri: WL_Hi-Tx > BT_Hi_Rx, BT_Hi > WL > BT_Lo*/
+ BTC_CXP_OFFB_BWB0 = (BTC_CXP_OFFB << 8) | 0,
+
+ /* TDMA off + Ext-Ctrl + pri: default */
+ BTC_CXP_OFFE_DEF = (BTC_CXP_OFFE << 8) | 0,
+
+ /* TDMA off + Ext-Ctrl + pri: E2G-slot block all BT */
+ BTC_CXP_OFFE_DEF2 = (BTC_CXP_OFFE << 8) | 1,
+
+ /* TDMA Fix slot-0: W1:B1 = 30:30 */
+ BTC_CXP_FIX_TD3030 = (BTC_CXP_FIX << 8) | 0,
+
+ /* TDMA Fix slot-1: W1:B1 = 50:50 */
+ BTC_CXP_FIX_TD5050 = (BTC_CXP_FIX << 8) | 1,
+
+ /* TDMA Fix slot-2: W1:B1 = 20:30 */
+ BTC_CXP_FIX_TD2030 = (BTC_CXP_FIX << 8) | 2,
+
+ /* TDMA Fix slot-3: W1:B1 = 40:10 */
+ BTC_CXP_FIX_TD4010 = (BTC_CXP_FIX << 8) | 3,
+
+ /* TDMA Fix slot-4: W1:B1 = 70:10 */
+ BTC_CXP_FIX_TD7010 = (BTC_CXP_FIX << 8) | 4,
+
+ /* TDMA Fix slot-5: W1:B1 = 20:60 */
+ BTC_CXP_FIX_TD2060 = (BTC_CXP_FIX << 8) | 5,
+
+ /* TDMA Fix slot-6: W1:B1 = 30:60 */
+ BTC_CXP_FIX_TD3060 = (BTC_CXP_FIX << 8) | 6,
+
+ /* TDMA Fix slot-7: W1:B1 = 20:80 */
+ BTC_CXP_FIX_TD2080 = (BTC_CXP_FIX << 8) | 7,
+
+ /* TDMA Fix slot-8: W1:B1 = user-define */
+ BTC_CXP_FIX_TDW1B1 = (BTC_CXP_FIX << 8) | 8,
+
+ /* TDMA Fix slot-9: W1:B1 = 40:20 */
+ BTC_CXP_FIX_TD4020 = (BTC_CXP_FIX << 8) | 9,
+
+ /* PS-TDMA Fix slot-0: W1:B1 = 30:30 */
+ BTC_CXP_PFIX_TD3030 = (BTC_CXP_PFIX << 8) | 0,
+
+ /* PS-TDMA Fix slot-1: W1:B1 = 50:50 */
+ BTC_CXP_PFIX_TD5050 = (BTC_CXP_PFIX << 8) | 1,
+
+ /* PS-TDMA Fix slot-2: W1:B1 = 20:30 */
+ BTC_CXP_PFIX_TD2030 = (BTC_CXP_PFIX << 8) | 2,
+
+ /* PS-TDMA Fix slot-3: W1:B1 = 20:60 */
+ BTC_CXP_PFIX_TD2060 = (BTC_CXP_PFIX << 8) | 3,
+
+ /* PS-TDMA Fix slot-4: W1:B1 = 30:70 */
+ BTC_CXP_PFIX_TD3070 = (BTC_CXP_PFIX << 8) | 4,
+
+ /* PS-TDMA Fix slot-5: W1:B1 = 20:80 */
+ BTC_CXP_PFIX_TD2080 = (BTC_CXP_PFIX << 8) | 5,
+
+ /* PS-TDMA Fix slot-6: W1:B1 = user-define */
+ BTC_CXP_PFIX_TDW1B1 = (BTC_CXP_PFIX << 8) | 6,
+
+ /* TDMA Auto slot-0: W1:B1 = 50:200 */
+ BTC_CXP_AUTO_TD50200 = (BTC_CXP_AUTO << 8) | 0,
+
+ /* TDMA Auto slot-1: W1:B1 = 60:200 */
+ BTC_CXP_AUTO_TD60200 = (BTC_CXP_AUTO << 8) | 1,
+
+ /* TDMA Auto slot-2: W1:B1 = 20:200 */
+ BTC_CXP_AUTO_TD20200 = (BTC_CXP_AUTO << 8) | 2,
+
+ /* TDMA Auto slot-3: W1:B1 = user-define */
+ BTC_CXP_AUTO_TDW1B1 = (BTC_CXP_AUTO << 8) | 3,
+
+ /* PS-TDMA Auto slot-0: W1:B1 = 50:200 */
+ BTC_CXP_PAUTO_TD50200 = (BTC_CXP_PAUTO << 8) | 0,
+
+ /* PS-TDMA Auto slot-1: W1:B1 = 60:200 */
+ BTC_CXP_PAUTO_TD60200 = (BTC_CXP_PAUTO << 8) | 1,
+
+ /* PS-TDMA Auto slot-2: W1:B1 = 20:200 */
+ BTC_CXP_PAUTO_TD20200 = (BTC_CXP_PAUTO << 8) | 2,
+
+ /* PS-TDMA Auto slot-3: W1:B1 = user-define */
+ BTC_CXP_PAUTO_TDW1B1 = (BTC_CXP_PAUTO << 8) | 3,
+
+ /* TDMA Auto slot2-0: W1:B4 = 30:50 */
+ BTC_CXP_AUTO2_TD3050 = (BTC_CXP_AUTO2 << 8) | 0,
+
+ /* TDMA Auto slot2-1: W1:B4 = 30:70 */
+ BTC_CXP_AUTO2_TD3070 = (BTC_CXP_AUTO2 << 8) | 1,
+
+ /* TDMA Auto slot2-2: W1:B4 = 50:50 */
+ BTC_CXP_AUTO2_TD5050 = (BTC_CXP_AUTO2 << 8) | 2,
+
+ /* TDMA Auto slot2-3: W1:B4 = 60:60 */
+ BTC_CXP_AUTO2_TD6060 = (BTC_CXP_AUTO2 << 8) | 3,
+
+ /* TDMA Auto slot2-4: W1:B4 = 20:80 */
+ BTC_CXP_AUTO2_TD2080 = (BTC_CXP_AUTO2 << 8) | 4,
+
+ /* TDMA Auto slot2-5: W1:B4 = user-define */
+ BTC_CXP_AUTO2_TDW1B4 = (BTC_CXP_AUTO2 << 8) | 5,
+
+ /* PS-TDMA Auto slot2-0: W1:B4 = 30:50 */
+ BTC_CXP_PAUTO2_TD3050 = (BTC_CXP_PAUTO2 << 8) | 0,
+
+ /* PS-TDMA Auto slot2-1: W1:B4 = 30:70 */
+ BTC_CXP_PAUTO2_TD3070 = (BTC_CXP_PAUTO2 << 8) | 1,
+
+ /* PS-TDMA Auto slot2-2: W1:B4 = 50:50 */
+ BTC_CXP_PAUTO2_TD5050 = (BTC_CXP_PAUTO2 << 8) | 2,
+
+ /* PS-TDMA Auto slot2-3: W1:B4 = 60:60 */
+ BTC_CXP_PAUTO2_TD6060 = (BTC_CXP_PAUTO2 << 8) | 3,
+
+ /* PS-TDMA Auto slot2-4: W1:B4 = 20:80 */
+ BTC_CXP_PAUTO2_TD2080 = (BTC_CXP_PAUTO2 << 8) | 4,
+
+ /* PS-TDMA Auto slot2-5: W1:B4 = user-define */
+ BTC_CXP_PAUTO2_TDW1B4 = (BTC_CXP_PAUTO2 << 8) | 5,
+
+ BTC_CXP_MAX = 0xffff
+};
+
+enum btc_wl_rfk_result {
+ BTC_WRFK_REJECT = 0,
+ BTC_WRFK_ALLOW = 1,
+};
+
+enum btc_coex_info_map_en {
+ BTC_COEX_INFO_CX = BIT(0),
+ BTC_COEX_INFO_WL = BIT(1),
+ BTC_COEX_INFO_BT = BIT(2),
+ BTC_COEX_INFO_DM = BIT(3),
+ BTC_COEX_INFO_MREG = BIT(4),
+ BTC_COEX_INFO_SUMMARY = BIT(5),
+ BTC_COEX_INFO_ALL = GENMASK(7, 0),
+};
+
+#define BTC_CXP_MASK GENMASK(15, 8)
+
+enum btc_w2b_scoreboard {
+ BTC_WSCB_ACTIVE = BIT(0),
+ BTC_WSCB_ON = BIT(1),
+ BTC_WSCB_SCAN = BIT(2),
+ BTC_WSCB_UNDERTEST = BIT(3),
+ BTC_WSCB_RXGAIN = BIT(4),
+ BTC_WSCB_WLBUSY = BIT(7),
+ BTC_WSCB_EXTFEM = BIT(8),
+ BTC_WSCB_TDMA = BIT(9),
+ BTC_WSCB_FIX2M = BIT(10),
+ BTC_WSCB_WLRFK = BIT(11),
+ BTC_WSCB_BTRFK_GNT = BIT(12), /* not used, use mailbox to inform BT */
+ BTC_WSCB_BT_HILNA = BIT(13),
+ BTC_WSCB_BTLOG = BIT(14),
+ BTC_WSCB_ALL = GENMASK(23, 0),
+};
+
+enum btc_wl_link_mode {
+ BTC_WLINK_NOLINK = 0x0,
+ BTC_WLINK_2G_STA,
+ BTC_WLINK_2G_AP,
+ BTC_WLINK_2G_GO,
+ BTC_WLINK_2G_GC,
+ BTC_WLINK_2G_SCC,
+ BTC_WLINK_2G_MCC,
+ BTC_WLINK_25G_MCC,
+ BTC_WLINK_25G_DBCC,
+ BTC_WLINK_5G,
+ BTC_WLINK_2G_NAN,
+ BTC_WLINK_OTHER,
+ BTC_WLINK_MAX
+};
+
+enum btc_bt_hid_type {
+ BTC_HID_218 = BIT(0),
+ BTC_HID_418 = BIT(1),
+ BTC_HID_BLE = BIT(2),
+ BTC_HID_RCU = BIT(3),
+ BTC_HID_RCU_VOICE = BIT(4),
+ BTC_HID_OTHER_LEGACY = BIT(5)
+};
+
+enum btc_reset_module {
+ BTC_RESET_CX = BIT(0),
+ BTC_RESET_DM = BIT(1),
+ BTC_RESET_CTRL = BIT(2),
+ BTC_RESET_CXDM = BIT(0) | BIT(1),
+ BTC_RESET_BTINFO = BIT(3),
+ BTC_RESET_MDINFO = BIT(4),
+ BTC_RESET_ALL = GENMASK(7, 0),
+};
+
+enum btc_gnt_state {
+ BTC_GNT_HW = 0,
+ BTC_GNT_SW_LO,
+ BTC_GNT_SW_HI,
+ BTC_GNT_MAX
+};
+
+enum btc_wl_max_tx_time {
+ BTC_MAX_TX_TIME_L1 = 500,
+ BTC_MAX_TX_TIME_L2 = 1000,
+ BTC_MAX_TX_TIME_L3 = 2000,
+ BTC_MAX_TX_TIME_DEF = 5280
+};
+
+enum btc_wl_max_tx_retry {
+ BTC_MAX_TX_RETRY_L1 = 7,
+ BTC_MAX_TX_RETRY_L2 = 15,
+ BTC_MAX_TX_RETRY_DEF = 31,
+};
+
+enum btc_reason_and_action {
+ BTC_RSN_NONE,
+ BTC_RSN_NTFY_INIT,
+ BTC_RSN_NTFY_SWBAND,
+ BTC_RSN_NTFY_WL_STA,
+ BTC_RSN_NTFY_RADIO_STATE,
+ BTC_RSN_UPDATE_BT_SCBD,
+ BTC_RSN_NTFY_WL_RFK,
+ BTC_RSN_UPDATE_BT_INFO,
+ BTC_RSN_NTFY_SCAN_START,
+ BTC_RSN_NTFY_SCAN_FINISH,
+ BTC_RSN_NTFY_SPECIFIC_PACKET,
+ BTC_RSN_NTFY_POWEROFF,
+ BTC_RSN_NTFY_ROLE_INFO,
+ BTC_RSN_CMD_SET_COEX,
+ BTC_RSN_ACT1_WORK,
+ BTC_RSN_BT_DEVINFO_WORK,
+ BTC_RSN_RFK_CHK_WORK,
+ BTC_RSN_NUM,
+ BTC_ACT_NONE = 100,
+ BTC_ACT_WL_ONLY,
+ BTC_ACT_WL_5G,
+ BTC_ACT_WL_OTHER,
+ BTC_ACT_WL_IDLE,
+ BTC_ACT_WL_NC,
+ BTC_ACT_WL_RFK,
+ BTC_ACT_WL_INIT,
+ BTC_ACT_WL_OFF,
+ BTC_ACT_FREERUN,
+ BTC_ACT_BT_WHQL,
+ BTC_ACT_BT_RFK,
+ BTC_ACT_BT_OFF,
+ BTC_ACT_BT_IDLE,
+ BTC_ACT_BT_HFP,
+ BTC_ACT_BT_HID,
+ BTC_ACT_BT_A2DP,
+ BTC_ACT_BT_A2DPSINK,
+ BTC_ACT_BT_PAN,
+ BTC_ACT_BT_A2DP_HID,
+ BTC_ACT_BT_A2DP_PAN,
+ BTC_ACT_BT_PAN_HID,
+ BTC_ACT_BT_A2DP_PAN_HID,
+ BTC_ACT_WL_25G_MCC,
+ BTC_ACT_WL_2G_MCC,
+ BTC_ACT_WL_2G_SCC,
+ BTC_ACT_WL_2G_AP,
+ BTC_ACT_WL_2G_GO,
+ BTC_ACT_WL_2G_GC,
+ BTC_ACT_WL_2G_NAN,
+ BTC_ACT_LAST,
+ BTC_ACT_NUM = BTC_ACT_LAST - BTC_ACT_NONE,
+ BTC_ACT_EXT_BIT = BIT(14),
+ BTC_POLICY_EXT_BIT = BIT(15),
+};
+
+#define BTC_FREERUN_ANTISO_MIN 30
+#define BTC_TDMA_BTHID_MAX 2
+#define BTC_BLINK_NOCONNECT 0
+
+static void _run_coex(struct rtw89_dev *rtwdev,
+ enum btc_reason_and_action reason);
+static void _write_scbd(struct rtw89_dev *rtwdev, u32 val, bool state);
+static void _update_bt_scbd(struct rtw89_dev *rtwdev, bool only_update);
+
+static void _send_fw_cmd(struct rtw89_dev *rtwdev, u8 h2c_class, u8 h2c_func,
+ void *param, u16 len)
+{
+ rtw89_fw_h2c_raw_with_hdr(rtwdev, h2c_class, h2c_func, param, len,
+ false, true);
+}
+
+static void _reset_btc_var(struct rtw89_dev *rtwdev, u8 type)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_cx *cx = &btc->cx;
+ struct rtw89_btc_wl_info *wl = &btc->cx.wl;
+ struct rtw89_btc_bt_info *bt = &btc->cx.bt;
+ struct rtw89_btc_bt_link_info *bt_linfo = &bt->link_info;
+ struct rtw89_btc_wl_link_info *wl_linfo = wl->link_info;
+ u8 i;
+
+ rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s\n", __func__);
+
+ if (type & BTC_RESET_CX)
+ memset(cx, 0, sizeof(*cx));
+ else if (type & BTC_RESET_BTINFO) /* only for BT enable */
+ memset(bt, 0, sizeof(*bt));
+
+ if (type & BTC_RESET_CTRL) {
+ memset(&btc->ctrl, 0, sizeof(btc->ctrl));
+ btc->ctrl.trace_step = FCXDEF_STEP;
+ }
+
+ /* Init Coex variables that are not zero */
+ if (type & BTC_RESET_DM) {
+ memset(&btc->dm, 0, sizeof(btc->dm));
+ memset(bt_linfo->rssi_state, 0, sizeof(bt_linfo->rssi_state));
+
+ for (i = 0; i < RTW89_MAX_HW_PORT_NUM; i++)
+ memset(wl_linfo[i].rssi_state, 0,
+ sizeof(wl_linfo[i].rssi_state));
+
+ /* set the slot_now table to original */
+ btc->dm.tdma_now = t_def[CXTD_OFF];
+ btc->dm.tdma = t_def[CXTD_OFF];
+ memcpy(&btc->dm.slot_now, s_def, sizeof(btc->dm.slot_now));
+ memcpy(&btc->dm.slot, s_def, sizeof(btc->dm.slot));
+
+ btc->policy_len = 0;
+ btc->bt_req_len = 0;
+
+ btc->dm.coex_info_map = BTC_COEX_INFO_ALL;
+ btc->dm.wl_tx_limit.tx_time = BTC_MAX_TX_TIME_DEF;
+ btc->dm.wl_tx_limit.tx_retry = BTC_MAX_TX_RETRY_DEF;
+ }
+
+ if (type & BTC_RESET_MDINFO)
+ memset(&btc->mdinfo, 0, sizeof(btc->mdinfo));
+}
+
+#define BTC_FWINFO_BUF 1024
+
+#define BTC_RPT_HDR_SIZE 3
+#define BTC_CHK_WLSLOT_DRIFT_MAX 15
+#define BTC_CHK_HANG_MAX 3
+
+static void _chk_btc_err(struct rtw89_dev *rtwdev, u8 type, u32 cnt)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_cx *cx = &btc->cx;
+ struct rtw89_btc_dm *dm = &btc->dm;
+ struct rtw89_btc_bt_info *bt = &cx->bt;
+
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): type:%d cnt:%d\n",
+ __func__, type, cnt);
+
+ switch (type) {
+ case BTC_DCNT_RPT_FREEZE:
+ if (dm->cnt_dm[BTC_DCNT_RPT] == cnt && btc->fwinfo.rpt_en_map)
+ dm->cnt_dm[BTC_DCNT_RPT_FREEZE]++;
+ else
+ dm->cnt_dm[BTC_DCNT_RPT_FREEZE] = 0;
+
+ if (dm->cnt_dm[BTC_DCNT_RPT_FREEZE] >= BTC_CHK_HANG_MAX)
+ dm->error.map.wl_fw_hang = true;
+ else
+ dm->error.map.wl_fw_hang = false;
+
+ dm->cnt_dm[BTC_DCNT_RPT] = cnt;
+ break;
+ case BTC_DCNT_CYCLE_FREEZE:
+ if (dm->cnt_dm[BTC_DCNT_CYCLE] == cnt &&
+ (dm->tdma_now.type != CXTDMA_OFF ||
+ dm->tdma_now.ext_ctrl == CXECTL_EXT))
+ dm->cnt_dm[BTC_DCNT_CYCLE_FREEZE]++;
+ else
+ dm->cnt_dm[BTC_DCNT_CYCLE_FREEZE] = 0;
+
+ if (dm->cnt_dm[BTC_DCNT_CYCLE_FREEZE] >= BTC_CHK_HANG_MAX)
+ dm->error.map.cycle_hang = true;
+ else
+ dm->error.map.cycle_hang = false;
+
+ dm->cnt_dm[BTC_DCNT_CYCLE] = cnt;
+ break;
+ case BTC_DCNT_W1_FREEZE:
+ if (dm->cnt_dm[BTC_DCNT_W1] == cnt &&
+ dm->tdma_now.type != CXTDMA_OFF)
+ dm->cnt_dm[BTC_DCNT_W1_FREEZE]++;
+ else
+ dm->cnt_dm[BTC_DCNT_W1_FREEZE] = 0;
+
+ if (dm->cnt_dm[BTC_DCNT_W1_FREEZE] >= BTC_CHK_HANG_MAX)
+ dm->error.map.w1_hang = true;
+ else
+ dm->error.map.w1_hang = false;
+
+ dm->cnt_dm[BTC_DCNT_W1] = cnt;
+ break;
+ case BTC_DCNT_B1_FREEZE:
+ if (dm->cnt_dm[BTC_DCNT_B1] == cnt &&
+ dm->tdma_now.type != CXTDMA_OFF)
+ dm->cnt_dm[BTC_DCNT_B1_FREEZE]++;
+ else
+ dm->cnt_dm[BTC_DCNT_B1_FREEZE] = 0;
+
+ if (dm->cnt_dm[BTC_DCNT_B1_FREEZE] >= BTC_CHK_HANG_MAX)
+ dm->error.map.b1_hang = true;
+ else
+ dm->error.map.b1_hang = false;
+
+ dm->cnt_dm[BTC_DCNT_B1] = cnt;
+ break;
+ case BTC_DCNT_TDMA_NONSYNC:
+ if (cnt != 0) /* if tdma not sync between drv/fw */
+ dm->cnt_dm[BTC_DCNT_TDMA_NONSYNC]++;
+ else
+ dm->cnt_dm[BTC_DCNT_TDMA_NONSYNC] = 0;
+
+ if (dm->cnt_dm[BTC_DCNT_TDMA_NONSYNC] >= BTC_CHK_HANG_MAX)
+ dm->error.map.tdma_no_sync = true;
+ else
+ dm->error.map.tdma_no_sync = false;
+ break;
+ case BTC_DCNT_SLOT_NONSYNC:
+ if (cnt != 0) /* if slot not sync between drv/fw */
+ dm->cnt_dm[BTC_DCNT_SLOT_NONSYNC]++;
+ else
+ dm->cnt_dm[BTC_DCNT_SLOT_NONSYNC] = 0;
+
+ if (dm->cnt_dm[BTC_DCNT_SLOT_NONSYNC] >= BTC_CHK_HANG_MAX)
+ dm->error.map.tdma_no_sync = true;
+ else
+ dm->error.map.tdma_no_sync = false;
+ break;
+ case BTC_DCNT_BTCNT_FREEZE:
+ cnt = cx->cnt_bt[BTC_BCNT_HIPRI_RX] +
+ cx->cnt_bt[BTC_BCNT_HIPRI_TX] +
+ cx->cnt_bt[BTC_BCNT_LOPRI_RX] +
+ cx->cnt_bt[BTC_BCNT_LOPRI_TX];
+
+ if (cnt == 0)
+ dm->cnt_dm[BTC_DCNT_BTCNT_FREEZE]++;
+ else
+ dm->cnt_dm[BTC_DCNT_BTCNT_FREEZE] = 0;
+
+ if ((dm->cnt_dm[BTC_DCNT_BTCNT_FREEZE] >= BTC_CHK_HANG_MAX &&
+ bt->enable.now) || (!dm->cnt_dm[BTC_DCNT_BTCNT_FREEZE] &&
+ !bt->enable.now))
+ _update_bt_scbd(rtwdev, false);
+ break;
+ case BTC_DCNT_WL_SLOT_DRIFT:
+ if (cnt >= BTC_CHK_WLSLOT_DRIFT_MAX)
+ dm->cnt_dm[BTC_DCNT_WL_SLOT_DRIFT]++;
+ else
+ dm->cnt_dm[BTC_DCNT_WL_SLOT_DRIFT] = 0;
+
+ if (dm->cnt_dm[BTC_DCNT_WL_SLOT_DRIFT] >= BTC_CHK_HANG_MAX)
+ dm->error.map.wl_slot_drift = true;
+ else
+ dm->error.map.wl_slot_drift = false;
+ break;
+ }
+}
+
+static void _update_bt_report(struct rtw89_dev *rtwdev, u8 rpt_type, u8 *pfinfo)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_bt_info *bt = &btc->cx.bt;
+ struct rtw89_btc_bt_link_info *bt_linfo = &bt->link_info;
+ struct rtw89_btc_bt_a2dp_desc *a2dp = &bt_linfo->a2dp_desc;
+ struct rtw89_btc_fbtc_btver *pver = NULL;
+ struct rtw89_btc_fbtc_btscan *pscan = NULL;
+ struct rtw89_btc_fbtc_btafh *pafh = NULL;
+ struct rtw89_btc_fbtc_btdevinfo *pdev = NULL;
+
+ pver = (struct rtw89_btc_fbtc_btver *)pfinfo;
+ pscan = (struct rtw89_btc_fbtc_btscan *)pfinfo;
+ pafh = (struct rtw89_btc_fbtc_btafh *)pfinfo;
+ pdev = (struct rtw89_btc_fbtc_btdevinfo *)pfinfo;
+
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): rpt_type:%d\n",
+ __func__, rpt_type);
+
+ switch (rpt_type) {
+ case BTC_RPT_TYPE_BT_VER:
+ bt->ver_info.fw = le32_to_cpu(pver->fw_ver);
+ bt->ver_info.fw_coex = le32_get_bits(pver->coex_ver, GENMASK(7, 0));
+ bt->feature = le32_to_cpu(pver->feature);
+ break;
+ case BTC_RPT_TYPE_BT_SCAN:
+ memcpy(bt->scan_info, pscan->scan, BTC_SCAN_MAX1);
+ break;
+ case BTC_RPT_TYPE_BT_AFH:
+ memcpy(&bt_linfo->afh_map[0], pafh->afh_l, 4);
+ memcpy(&bt_linfo->afh_map[4], pafh->afh_m, 4);
+ memcpy(&bt_linfo->afh_map[8], pafh->afh_h, 2);
+ break;
+ case BTC_RPT_TYPE_BT_DEVICE:
+ a2dp->device_name = le32_to_cpu(pdev->dev_name);
+ a2dp->vendor_id = le16_to_cpu(pdev->vendor_id);
+ a2dp->flush_time = le32_to_cpu(pdev->flush_time);
+ break;
+ default:
+ break;
+ }
+}
+
+struct rtw89_btc_fbtc_cysta_cpu {
+ u8 fver;
+ u8 rsvd;
+ u16 cycles;
+ u16 cycles_a2dp[CXT_FLCTRL_MAX];
+ u16 a2dpept;
+ u16 a2dpeptto;
+ u16 tavg_cycle[CXT_MAX];
+ u16 tmax_cycle[CXT_MAX];
+ u16 tmaxdiff_cycle[CXT_MAX];
+ u16 tavg_a2dp[CXT_FLCTRL_MAX];
+ u16 tmax_a2dp[CXT_FLCTRL_MAX];
+ u16 tavg_a2dpept;
+ u16 tmax_a2dpept;
+ u16 tavg_lk;
+ u16 tmax_lk;
+ u32 slot_cnt[CXST_MAX];
+ u32 bcn_cnt[CXBCN_MAX];
+ u32 leakrx_cnt;
+ u32 collision_cnt;
+ u32 skip_cnt;
+ u32 exception;
+ u32 except_cnt;
+ u16 tslot_cycle[BTC_CYCLE_SLOT_MAX];
+};
+
+static void rtw89_btc_fbtc_cysta_to_cpu(const struct rtw89_btc_fbtc_cysta *src,
+ struct rtw89_btc_fbtc_cysta_cpu *dst)
+{
+ static_assert(sizeof(*src) == sizeof(*dst));
+
+#define __CPY_U8(_x) ({dst->_x = src->_x; })
+#define __CPY_LE16(_x) ({dst->_x = le16_to_cpu(src->_x); })
+#define __CPY_LE16S(_x) ({int _i; for (_i = 0; _i < ARRAY_SIZE(dst->_x); _i++) \
+ dst->_x[_i] = le16_to_cpu(src->_x[_i]); })
+#define __CPY_LE32(_x) ({dst->_x = le32_to_cpu(src->_x); })
+#define __CPY_LE32S(_x) ({int _i; for (_i = 0; _i < ARRAY_SIZE(dst->_x); _i++) \
+ dst->_x[_i] = le32_to_cpu(src->_x[_i]); })
+
+ __CPY_U8(fver);
+ __CPY_U8(rsvd);
+ __CPY_LE16(cycles);
+ __CPY_LE16S(cycles_a2dp);
+ __CPY_LE16(a2dpept);
+ __CPY_LE16(a2dpeptto);
+ __CPY_LE16S(tavg_cycle);
+ __CPY_LE16S(tmax_cycle);
+ __CPY_LE16S(tmaxdiff_cycle);
+ __CPY_LE16S(tavg_a2dp);
+ __CPY_LE16S(tmax_a2dp);
+ __CPY_LE16(tavg_a2dpept);
+ __CPY_LE16(tmax_a2dpept);
+ __CPY_LE16(tavg_lk);
+ __CPY_LE16(tmax_lk);
+ __CPY_LE32S(slot_cnt);
+ __CPY_LE32S(bcn_cnt);
+ __CPY_LE32(leakrx_cnt);
+ __CPY_LE32(collision_cnt);
+ __CPY_LE32(skip_cnt);
+ __CPY_LE32(exception);
+ __CPY_LE32(except_cnt);
+ __CPY_LE16S(tslot_cycle);
+
+#undef __CPY_U8
+#undef __CPY_LE16
+#undef __CPY_LE16S
+#undef __CPY_LE32
+#undef __CPY_LE32S
+}
+
+#define BTC_LEAK_AP_TH 10
+#define BTC_CYSTA_CHK_PERIOD 100
+
+struct rtw89_btc_prpt {
+ u8 type;
+ __le16 len;
+ u8 content[];
+} __packed;
+
+static u32 _chk_btc_report(struct rtw89_dev *rtwdev,
+ struct rtw89_btc_btf_fwinfo *pfwinfo,
+ u8 *prptbuf, u32 index)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_dm *dm = &btc->dm;
+ struct rtw89_btc_rpt_cmn_info *pcinfo = NULL;
+ struct rtw89_btc_wl_info *wl = &btc->cx.wl;
+ struct rtw89_btc_fbtc_rpt_ctrl *prpt = NULL;
+ struct rtw89_btc_fbtc_cysta *pcysta_le32 = NULL;
+ struct rtw89_btc_fbtc_cysta_cpu pcysta[1];
+ struct rtw89_btc_prpt *btc_prpt = NULL;
+ struct rtw89_btc_fbtc_slot *rtp_slot = NULL;
+ u8 rpt_type = 0, *rpt_content = NULL, *pfinfo = NULL;
+ u16 wl_slot_set = 0;
+ u32 trace_step = btc->ctrl.trace_step, rpt_len = 0, diff_t;
+ u8 i;
+
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): index:%d\n",
+ __func__, index);
+
+ if (!prptbuf) {
+ pfwinfo->err[BTFRE_INVALID_INPUT]++;
+ return 0;
+ }
+
+ btc_prpt = (struct rtw89_btc_prpt *)&prptbuf[index];
+ rpt_type = btc_prpt->type;
+ rpt_len = le16_to_cpu(btc_prpt->len);
+ rpt_content = btc_prpt->content;
+
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): rpt_type:%d\n",
+ __func__, rpt_type);
+
+ switch (rpt_type) {
+ case BTC_RPT_TYPE_CTRL:
+ pcinfo = &pfwinfo->rpt_ctrl.cinfo;
+ pfinfo = (u8 *)(&pfwinfo->rpt_ctrl.finfo);
+ pcinfo->req_len = sizeof(pfwinfo->rpt_ctrl.finfo);
+ pcinfo->req_fver = BTCRPT_VER;
+ pcinfo->rx_len = rpt_len;
+ pcinfo->rx_cnt++;
+ break;
+ case BTC_RPT_TYPE_TDMA:
+ pcinfo = &pfwinfo->rpt_fbtc_tdma.cinfo;
+ pfinfo = (u8 *)(&pfwinfo->rpt_fbtc_tdma.finfo);
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_tdma.finfo);
+ pcinfo->req_fver = FCXTDMA_VER;
+ pcinfo->rx_len = rpt_len;
+ pcinfo->rx_cnt++;
+ break;
+ case BTC_RPT_TYPE_SLOT:
+ pcinfo = &pfwinfo->rpt_fbtc_slots.cinfo;
+ pfinfo = (u8 *)(&pfwinfo->rpt_fbtc_slots.finfo);
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_slots.finfo);
+ pcinfo->req_fver = FCXSLOTS_VER;
+ pcinfo->rx_len = rpt_len;
+ pcinfo->rx_cnt++;
+ break;
+ case BTC_RPT_TYPE_CYSTA:
+ pcinfo = &pfwinfo->rpt_fbtc_cysta.cinfo;
+ pfinfo = (u8 *)(&pfwinfo->rpt_fbtc_cysta.finfo);
+ pcysta_le32 = &pfwinfo->rpt_fbtc_cysta.finfo;
+ rtw89_btc_fbtc_cysta_to_cpu(pcysta_le32, pcysta);
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_cysta.finfo);
+ pcinfo->req_fver = FCXCYSTA_VER;
+ pcinfo->rx_len = rpt_len;
+ pcinfo->rx_cnt++;
+ break;
+ case BTC_RPT_TYPE_STEP:
+ pcinfo = &pfwinfo->rpt_fbtc_step.cinfo;
+ pfinfo = (u8 *)(&pfwinfo->rpt_fbtc_step.finfo);
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_step.finfo.step[0]) *
+ trace_step + 8;
+ pcinfo->req_fver = FCXSTEP_VER;
+ pcinfo->rx_len = rpt_len;
+ pcinfo->rx_cnt++;
+ break;
+ case BTC_RPT_TYPE_NULLSTA:
+ pcinfo = &pfwinfo->rpt_fbtc_nullsta.cinfo;
+ pfinfo = (u8 *)(&pfwinfo->rpt_fbtc_nullsta.finfo);
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_nullsta.finfo);
+ pcinfo->req_fver = FCXNULLSTA_VER;
+ pcinfo->rx_len = rpt_len;
+ pcinfo->rx_cnt++;
+ break;
+ case BTC_RPT_TYPE_MREG:
+ pcinfo = &pfwinfo->rpt_fbtc_mregval.cinfo;
+ pfinfo = (u8 *)(&pfwinfo->rpt_fbtc_mregval.finfo);
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_mregval.finfo);
+ pcinfo->req_fver = FCXMREG_VER;
+ pcinfo->rx_len = rpt_len;
+ pcinfo->rx_cnt++;
+ break;
+ case BTC_RPT_TYPE_GPIO_DBG:
+ pcinfo = &pfwinfo->rpt_fbtc_gpio_dbg.cinfo;
+ pfinfo = (u8 *)(&pfwinfo->rpt_fbtc_gpio_dbg.finfo);
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_gpio_dbg.finfo);
+ pcinfo->req_fver = FCXGPIODBG_VER;
+ pcinfo->rx_len = rpt_len;
+ pcinfo->rx_cnt++;
+ break;
+ case BTC_RPT_TYPE_BT_VER:
+ pcinfo = &pfwinfo->rpt_fbtc_btver.cinfo;
+ pfinfo = (u8 *)(&pfwinfo->rpt_fbtc_btver.finfo);
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_btver.finfo);
+ pcinfo->req_fver = FCX_BTVER_VER;
+ pcinfo->rx_len = rpt_len;
+ pcinfo->rx_cnt++;
+ break;
+ case BTC_RPT_TYPE_BT_SCAN:
+ pcinfo = &pfwinfo->rpt_fbtc_btscan.cinfo;
+ pfinfo = (u8 *)(&pfwinfo->rpt_fbtc_btscan.finfo);
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_btscan.finfo);
+ pcinfo->req_fver = FCX_BTSCAN_VER;
+ pcinfo->rx_len = rpt_len;
+ pcinfo->rx_cnt++;
+ break;
+ case BTC_RPT_TYPE_BT_AFH:
+ pcinfo = &pfwinfo->rpt_fbtc_btafh.cinfo;
+ pfinfo = (u8 *)(&pfwinfo->rpt_fbtc_btafh.finfo);
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_btafh.finfo);
+ pcinfo->req_fver = FCX_BTAFH_VER;
+ pcinfo->rx_len = rpt_len;
+ pcinfo->rx_cnt++;
+ break;
+ case BTC_RPT_TYPE_BT_DEVICE:
+ pcinfo = &pfwinfo->rpt_fbtc_btdev.cinfo;
+ pfinfo = (u8 *)(&pfwinfo->rpt_fbtc_btdev.finfo);
+ pcinfo->req_len = sizeof(pfwinfo->rpt_fbtc_btdev.finfo);
+ pcinfo->req_fver = FCX_BTDEVINFO_VER;
+ pcinfo->rx_len = rpt_len;
+ pcinfo->rx_cnt++;
+ break;
+ default:
+ pfwinfo->err[BTFRE_UNDEF_TYPE]++;
+ return 0;
+ }
+
+ if (rpt_len != pcinfo->req_len) {
+ if (rpt_type < BTC_RPT_TYPE_MAX)
+ pfwinfo->len_mismch |= (0x1 << rpt_type);
+ else
+ pfwinfo->len_mismch |= BIT(31);
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): %d rpt_len:%d!=req_len:%d\n",
+ __func__, rpt_type, rpt_len, pcinfo->req_len);
+
+ pcinfo->valid = 0;
+ return 0;
+ } else if (!pfinfo || !rpt_content || !pcinfo->req_len) {
+ pfwinfo->err[BTFRE_EXCEPTION]++;
+ pcinfo->valid = 0;
+ return 0;
+ }
+
+ memcpy(pfinfo, rpt_content, pcinfo->req_len);
+ pcinfo->valid = 1;
+
+ if (rpt_type == BTC_RPT_TYPE_TDMA) {
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): check %d %zu\n", __func__,
+ BTC_DCNT_TDMA_NONSYNC, sizeof(dm->tdma_now));
+
+ if (memcmp(&dm->tdma_now, &pfwinfo->rpt_fbtc_tdma.finfo,
+ sizeof(dm->tdma_now)) != 0) {
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): %d tdma_now %x %x %x %x %x %x %x %x\n",
+ __func__, BTC_DCNT_TDMA_NONSYNC,
+ dm->tdma_now.type, dm->tdma_now.rxflctrl,
+ dm->tdma_now.txpause, dm->tdma_now.wtgle_n,
+ dm->tdma_now.leak_n, dm->tdma_now.ext_ctrl,
+ dm->tdma_now.rsvd0, dm->tdma_now.rsvd1);
+
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): %d rpt_fbtc_tdma %x %x %x %x %x %x %x %x\n",
+ __func__, BTC_DCNT_TDMA_NONSYNC,
+ pfwinfo->rpt_fbtc_tdma.finfo.type,
+ pfwinfo->rpt_fbtc_tdma.finfo.rxflctrl,
+ pfwinfo->rpt_fbtc_tdma.finfo.txpause,
+ pfwinfo->rpt_fbtc_tdma.finfo.wtgle_n,
+ pfwinfo->rpt_fbtc_tdma.finfo.leak_n,
+ pfwinfo->rpt_fbtc_tdma.finfo.ext_ctrl,
+ pfwinfo->rpt_fbtc_tdma.finfo.rsvd0,
+ pfwinfo->rpt_fbtc_tdma.finfo.rsvd1);
+ }
+
+ _chk_btc_err(rtwdev, BTC_DCNT_TDMA_NONSYNC,
+ memcmp(&dm->tdma_now,
+ &pfwinfo->rpt_fbtc_tdma.finfo,
+ sizeof(dm->tdma_now)));
+ }
+
+ if (rpt_type == BTC_RPT_TYPE_SLOT) {
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): check %d %zu\n",
+ __func__, BTC_DCNT_SLOT_NONSYNC,
+ sizeof(dm->slot_now));
+
+ if (memcmp(dm->slot_now, pfwinfo->rpt_fbtc_slots.finfo.slot,
+ sizeof(dm->slot_now)) != 0) {
+ for (i = 0; i < CXST_MAX; i++) {
+ rtp_slot =
+ &pfwinfo->rpt_fbtc_slots.finfo.slot[i];
+ if (memcmp(&dm->slot_now[i], rtp_slot,
+ sizeof(dm->slot_now[i])) != 0) {
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): %d slot_now[%d] dur=0x%04x tbl=%08x type=0x%04x\n",
+ __func__,
+ BTC_DCNT_SLOT_NONSYNC, i,
+ dm->slot_now[i].dur,
+ dm->slot_now[i].cxtbl,
+ dm->slot_now[i].cxtype);
+
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): %d rpt_fbtc_slots[%d] dur=0x%04x tbl=%08x type=0x%04x\n",
+ __func__,
+ BTC_DCNT_SLOT_NONSYNC, i,
+ rtp_slot->dur,
+ rtp_slot->cxtbl,
+ rtp_slot->cxtype);
+ }
+ }
+ }
+ _chk_btc_err(rtwdev, BTC_DCNT_SLOT_NONSYNC,
+ memcmp(dm->slot_now,
+ pfwinfo->rpt_fbtc_slots.finfo.slot,
+ sizeof(dm->slot_now)));
+ }
+
+ if (rpt_type == BTC_RPT_TYPE_CYSTA &&
+ pcysta->cycles >= BTC_CYSTA_CHK_PERIOD) {
+ /* Check Leak-AP */
+ if (pcysta->slot_cnt[CXST_LK] != 0 &&
+ pcysta->leakrx_cnt != 0 && dm->tdma_now.rxflctrl) {
+ if (pcysta->slot_cnt[CXST_LK] <
+ BTC_LEAK_AP_TH * pcysta->leakrx_cnt)
+ dm->leak_ap = 1;
+ }
+
+ /* Check diff time between WL slot and W1/E2G slot */
+ if (dm->tdma_now.type == CXTDMA_OFF &&
+ dm->tdma_now.ext_ctrl == CXECTL_EXT)
+ wl_slot_set = le16_to_cpu(dm->slot_now[CXST_E2G].dur);
+ else
+ wl_slot_set = le16_to_cpu(dm->slot_now[CXST_W1].dur);
+
+ if (pcysta->tavg_cycle[CXT_WL] > wl_slot_set) {
+ diff_t = pcysta->tavg_cycle[CXT_WL] - wl_slot_set;
+ _chk_btc_err(rtwdev, BTC_DCNT_WL_SLOT_DRIFT, diff_t);
+ }
+ }
+
+ if (rpt_type == BTC_RPT_TYPE_CTRL) {
+ prpt = &pfwinfo->rpt_ctrl.finfo;
+ btc->fwinfo.rpt_en_map = prpt->rpt_enable;
+ wl->ver_info.fw_coex = prpt->wl_fw_coex_ver;
+ wl->ver_info.fw = prpt->wl_fw_ver;
+ dm->wl_fw_cx_offload = !!(prpt->wl_fw_cx_offload);
+ }
+
+ if (rpt_type >= BTC_RPT_TYPE_BT_VER &&
+ rpt_type <= BTC_RPT_TYPE_BT_DEVICE)
+ _update_bt_report(rtwdev, rpt_type, pfinfo);
+
+ return (rpt_len + BTC_RPT_HDR_SIZE);
+}
+
+static void _parse_btc_report(struct rtw89_dev *rtwdev,
+ struct rtw89_btc_btf_fwinfo *pfwinfo,
+ u8 *pbuf, u32 buf_len)
+{
+ struct rtw89_btc_prpt *btc_prpt = NULL;
+ u32 index = 0, rpt_len = 0;
+
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): buf_len:%d\n",
+ __func__, buf_len);
+
+ while (pbuf) {
+ btc_prpt = (struct rtw89_btc_prpt *)&pbuf[index];
+ if (index + 2 >= BTC_FWINFO_BUF)
+ break;
+ /* At least 3 bytes: type(1) & len(2) */
+ rpt_len = le16_to_cpu(btc_prpt->len);
+ if ((index + rpt_len + BTC_RPT_HDR_SIZE) > buf_len)
+ break;
+
+ rpt_len = _chk_btc_report(rtwdev, pfwinfo, pbuf, index);
+ if (!rpt_len)
+ break;
+ index += rpt_len;
+ }
+}
+
+#define BTC_TLV_HDR_LEN 2
+
+static void _append_tdma(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_dm *dm = &btc->dm;
+ struct rtw89_btc_btf_tlv *tlv = NULL;
+ struct rtw89_btc_fbtc_tdma *v = NULL;
+ u16 len = btc->policy_len;
+
+ if (!btc->update_policy_force &&
+ !memcmp(&dm->tdma, &dm->tdma_now, sizeof(dm->tdma))) {
+ rtw89_debug(rtwdev,
+ RTW89_DBG_BTC, "[BTC], %s(): tdma no change!\n",
+ __func__);
+ return;
+ }
+
+ tlv = (struct rtw89_btc_btf_tlv *)&btc->policy[len];
+ v = (struct rtw89_btc_fbtc_tdma *)&tlv->val[0];
+ tlv->type = CXPOLICY_TDMA;
+ tlv->len = sizeof(*v);
+
+ memcpy(v, &dm->tdma, sizeof(*v));
+ btc->policy_len += BTC_TLV_HDR_LEN + sizeof(*v);
+
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): type:%d, rxflctrl=%d, txpause=%d, wtgle_n=%d, leak_n=%d, ext_ctrl=%d\n",
+ __func__, dm->tdma.type, dm->tdma.rxflctrl,
+ dm->tdma.txpause, dm->tdma.wtgle_n, dm->tdma.leak_n,
+ dm->tdma.ext_ctrl);
+}
+
+static void _append_slot(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_dm *dm = &btc->dm;
+ struct rtw89_btc_btf_tlv *tlv = NULL;
+ struct btc_fbtc_1slot *v = NULL;
+ u16 len = 0;
+ u8 i, cnt = 0;
+
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): A:btc->policy_len = %d\n",
+ __func__, btc->policy_len);
+
+ for (i = 0; i < CXST_MAX; i++) {
+ if (!btc->update_policy_force &&
+ !memcmp(&dm->slot[i], &dm->slot_now[i],
+ sizeof(dm->slot[i])))
+ continue;
+
+ len = btc->policy_len;
+
+ tlv = (struct rtw89_btc_btf_tlv *)&btc->policy[len];
+ v = (struct btc_fbtc_1slot *)&tlv->val[0];
+ tlv->type = CXPOLICY_SLOT;
+ tlv->len = sizeof(*v);
+
+ v->fver = FCXONESLOT_VER;
+ v->sid = i;
+ v->slot = dm->slot[i];
+
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): slot-%d: dur=%d, table=0x%08x, type=%d\n",
+ __func__, i, dm->slot[i].dur, dm->slot[i].cxtbl,
+ dm->slot[i].cxtype);
+ cnt++;
+
+ btc->policy_len += BTC_TLV_HDR_LEN + sizeof(*v);
+ }
+
+ if (cnt > 0)
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): slot update (cnt=%d)!!\n",
+ __func__, cnt);
+}
+
+static void rtw89_btc_fw_en_rpt(struct rtw89_dev *rtwdev,
+ u32 rpt_map, bool rpt_state)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_btf_fwinfo *fwinfo = &btc->fwinfo;
+ struct rtw89_btc_btf_set_report r = {0};
+ u32 val = 0;
+
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): rpt_map=%x, rpt_state=%x\n",
+ __func__, rpt_map, rpt_state);
+
+ if (rpt_state)
+ val = fwinfo->rpt_en_map | rpt_map;
+ else
+ val = fwinfo->rpt_en_map & ~rpt_map;
+
+ if (val == fwinfo->rpt_en_map)
+ return;
+
+ fwinfo->rpt_en_map = val;
+
+ r.fver = BTF_SET_REPORT_VER;
+ r.enable = cpu_to_le32(val);
+ r.para = cpu_to_le32(rpt_state);
+
+ _send_fw_cmd(rtwdev, BTFC_SET, SET_REPORT_EN, &r, sizeof(r));
+}
+
+static void rtw89_btc_fw_set_slots(struct rtw89_dev *rtwdev, u8 num,
+ struct rtw89_btc_fbtc_slot *s)
+{
+ struct rtw89_btc_btf_set_slot_table *tbl = NULL;
+ u8 *ptr = NULL;
+ u16 n = 0;
+
+ n = sizeof(*s) * num + sizeof(*tbl);
+ tbl = kmalloc(n, GFP_KERNEL);
+ if (!tbl)
+ return;
+
+ tbl->fver = BTF_SET_SLOT_TABLE_VER;
+ tbl->tbl_num = num;
+ ptr = &tbl->buf[0];
+ memcpy(ptr, s, num * sizeof(*s));
+
+ _send_fw_cmd(rtwdev, BTFC_SET, SET_SLOT_TABLE, tbl, n);
+
+ kfree(tbl);
+}
+
+static void btc_fw_set_monreg(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_btc_btf_set_mon_reg *monreg = NULL;
+ u8 n, *ptr = NULL, ulen;
+ u16 sz = 0;
+
+ n = chip->mon_reg_num;
+
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): mon_reg_num=%d\n", __func__, n);
+ if (n > CXMREG_MAX) {
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): mon reg count %d > %d\n",
+ __func__, n, CXMREG_MAX);
+ return;
+ }
+
+ ulen = sizeof(struct rtw89_btc_fbtc_mreg);
+ sz = (ulen * n) + sizeof(*monreg);
+ monreg = kmalloc(sz, GFP_KERNEL);
+ if (!monreg)
+ return;
+
+ monreg->fver = BTF_SET_MON_REG_VER;
+ monreg->reg_num = n;
+ ptr = &monreg->buf[0];
+ memcpy(ptr, chip->mon_reg, n * ulen);
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): sz=%d ulen=%d n=%d\n",
+ __func__, sz, ulen, n);
+
+ _send_fw_cmd(rtwdev, BTFC_SET, SET_MREG_TABLE, (u8 *)monreg, sz);
+ kfree(monreg);
+ rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_MREG, 1);
+}
+
+static void _update_dm_step(struct rtw89_dev *rtwdev,
+ enum btc_reason_and_action reason_or_action)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_dm *dm = &btc->dm;
+
+ /* use ring-structure to store dm step */
+ dm->dm_step.step[dm->dm_step.step_pos] = reason_or_action;
+ dm->dm_step.step_pos++;
+
+ if (dm->dm_step.step_pos >= ARRAY_SIZE(dm->dm_step.step)) {
+ dm->dm_step.step_pos = 0;
+ dm->dm_step.step_ov = true;
+ }
+}
+
+static void _fw_set_policy(struct rtw89_dev *rtwdev, u16 policy_type,
+ enum btc_reason_and_action action)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_dm *dm = &btc->dm;
+
+ dm->run_action = action;
+
+ _update_dm_step(rtwdev, action | BTC_ACT_EXT_BIT);
+ _update_dm_step(rtwdev, policy_type | BTC_POLICY_EXT_BIT);
+
+ btc->policy_len = 0;
+ btc->policy_type = policy_type;
+
+ _append_tdma(rtwdev);
+ _append_slot(rtwdev);
+
+ if (btc->policy_len == 0 || btc->policy_len > RTW89_BTC_POLICY_MAXLEN)
+ return;
+
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): action = %d -> policy type/len: 0x%04x/%d\n",
+ __func__, action, policy_type, btc->policy_len);
+
+ if (dm->tdma.rxflctrl == CXFLC_NULLP ||
+ dm->tdma.rxflctrl == CXFLC_QOSNULL)
+ btc->lps = 1;
+ else
+ btc->lps = 0;
+
+ if (btc->lps == 1)
+ rtw89_set_coex_ctrl_lps(rtwdev, btc->lps);
+
+ _send_fw_cmd(rtwdev, BTFC_SET, SET_CX_POLICY,
+ btc->policy, btc->policy_len);
+
+ memcpy(&dm->tdma_now, &dm->tdma, sizeof(dm->tdma_now));
+ memcpy(&dm->slot_now, &dm->slot, sizeof(dm->slot_now));
+
+ if (btc->update_policy_force)
+ btc->update_policy_force = false;
+
+ if (btc->lps == 0)
+ rtw89_set_coex_ctrl_lps(rtwdev, btc->lps);
+}
+
+static void _fw_set_drv_info(struct rtw89_dev *rtwdev, u8 type)
+{
+ switch (type) {
+ case CXDRVINFO_INIT:
+ rtw89_fw_h2c_cxdrv_init(rtwdev);
+ break;
+ case CXDRVINFO_ROLE:
+ rtw89_fw_h2c_cxdrv_role(rtwdev);
+ break;
+ case CXDRVINFO_CTRL:
+ rtw89_fw_h2c_cxdrv_ctrl(rtwdev);
+ break;
+ case CXDRVINFO_RFK:
+ rtw89_fw_h2c_cxdrv_rfk(rtwdev);
+ break;
+ default:
+ break;
+ }
+}
+
+static
+void btc_fw_event(struct rtw89_dev *rtwdev, u8 evt_id, void *data, u32 len)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
+
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): evt_id:%d len:%d\n",
+ __func__, evt_id, len);
+
+ if (!len || !data)
+ return;
+
+ switch (evt_id) {
+ case BTF_EVNT_RPT:
+ _parse_btc_report(rtwdev, pfwinfo, data, len);
+ break;
+ default:
+ break;
+ }
+}
+
+static void _set_gnt_wl(struct rtw89_dev *rtwdev, u8 phy_map, u8 state)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_dm *dm = &btc->dm;
+ struct rtw89_mac_ax_gnt *g = dm->gnt.band;
+ u8 i;
+
+ if (phy_map > BTC_PHY_ALL)
+ return;
+
+ for (i = 0; i < RTW89_PHY_MAX; i++) {
+ if (!(phy_map & BIT(i)))
+ continue;
+
+ switch (state) {
+ case BTC_GNT_HW:
+ g[i].gnt_wl_sw_en = 0;
+ g[i].gnt_wl = 0;
+ break;
+ case BTC_GNT_SW_LO:
+ g[i].gnt_wl_sw_en = 1;
+ g[i].gnt_wl = 0;
+ break;
+ case BTC_GNT_SW_HI:
+ g[i].gnt_wl_sw_en = 1;
+ g[i].gnt_wl = 1;
+ break;
+ }
+ }
+
+ rtw89_mac_cfg_gnt(rtwdev, &dm->gnt);
+}
+
+#define BTC_TDMA_WLROLE_MAX 2
+
+static void _set_bt_ignore_wlan_act(struct rtw89_dev *rtwdev, u8 enable)
+{
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): set bt %s wlan_act\n", __func__,
+ enable ? "ignore" : "do not ignore");
+
+ _send_fw_cmd(rtwdev, BTFC_SET, SET_BT_IGNORE_WLAN_ACT, &enable, 1);
+}
+
+#define WL_TX_POWER_NO_BTC_CTRL GENMASK(31, 0)
+#define WL_TX_POWER_ALL_TIME GENMASK(15, 0)
+#define WL_TX_POWER_WITH_BT GENMASK(31, 16)
+#define WL_TX_POWER_INT_PART GENMASK(8, 2)
+#define WL_TX_POWER_FRA_PART GENMASK(1, 0)
+#define B_BTC_WL_TX_POWER_SIGN BIT(7)
+#define B_TSSI_WL_TX_POWER_SIGN BIT(8)
+
+static void _set_wl_tx_power(struct rtw89_dev *rtwdev, u32 level)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_wl_info *wl = &btc->cx.wl;
+ u32 pwr_val;
+
+ if (wl->rf_para.tx_pwr_freerun == level)
+ return;
+
+ wl->rf_para.tx_pwr_freerun = level;
+ btc->dm.rf_trx_para.wl_tx_power = level;
+
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): level = %d\n",
+ __func__, level);
+
+ if (level == RTW89_BTC_WL_DEF_TX_PWR) {
+ pwr_val = WL_TX_POWER_NO_BTC_CTRL;
+ } else { /* only apply "force tx power" */
+ pwr_val = FIELD_PREP(WL_TX_POWER_INT_PART, level);
+ if (pwr_val > RTW89_BTC_WL_DEF_TX_PWR)
+ pwr_val = RTW89_BTC_WL_DEF_TX_PWR;
+
+ if (level & B_BTC_WL_TX_POWER_SIGN)
+ pwr_val |= B_TSSI_WL_TX_POWER_SIGN;
+ pwr_val |= WL_TX_POWER_WITH_BT;
+ }
+
+ chip->ops->btc_set_wl_txpwr_ctrl(rtwdev, pwr_val);
+}
+
+static void _set_wl_rx_gain(struct rtw89_dev *rtwdev, u32 level)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_wl_info *wl = &btc->cx.wl;
+
+ if (wl->rf_para.rx_gain_freerun == level)
+ return;
+
+ wl->rf_para.rx_gain_freerun = level;
+ btc->dm.rf_trx_para.wl_rx_gain = level;
+
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): level = %d\n",
+ __func__, level);
+}
+
+static void _set_bt_tx_power(struct rtw89_dev *rtwdev, u8 level)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_bt_info *bt = &btc->cx.bt;
+ u8 buf;
+
+ if (bt->rf_para.tx_pwr_freerun == level)
+ return;
+
+ bt->rf_para.tx_pwr_freerun = level;
+ btc->dm.rf_trx_para.bt_tx_power = level;
+
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): level = %d\n",
+ __func__, level);
+
+ buf = (s8)(-level);
+ _send_fw_cmd(rtwdev, BTFC_SET, SET_BT_TX_PWR, &buf, 1);
+}
+
+#define BTC_BT_RX_NORMAL_LVL 7
+
+static void _set_bt_rx_gain(struct rtw89_dev *rtwdev, u8 level)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_bt_info *bt = &btc->cx.bt;
+
+ if (bt->rf_para.rx_gain_freerun == level ||
+ level > BTC_BT_RX_NORMAL_LVL)
+ return;
+
+ bt->rf_para.rx_gain_freerun = level;
+ btc->dm.rf_trx_para.bt_rx_gain = level;
+
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): level = %d\n",
+ __func__, level);
+
+ if (level == BTC_BT_RX_NORMAL_LVL)
+ _write_scbd(rtwdev, BTC_WSCB_RXGAIN, false);
+ else
+ _write_scbd(rtwdev, BTC_WSCB_RXGAIN, true);
+
+ _send_fw_cmd(rtwdev, BTFC_SET, SET_BT_LNA_CONSTRAIN, &level, 1);
+}
+
+static void _set_rf_trx_para(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_dm *dm = &btc->dm;
+ struct rtw89_btc_wl_info *wl = &btc->cx.wl;
+ struct rtw89_btc_bt_info *bt = &btc->cx.bt;
+ struct rtw89_btc_rf_trx_para para;
+ u32 wl_stb_chg = 0;
+ u8 level_id = 0;
+
+ if (!dm->freerun) {
+ dm->trx_para_level = 0;
+ chip->ops->btc_bt_aci_imp(rtwdev);
+ }
+
+ level_id = (u8)dm->trx_para_level;
+
+ if (level_id >= chip->rf_para_dlink_num ||
+ level_id >= chip->rf_para_ulink_num) {
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): invalid level_id: %d\n",
+ __func__, level_id);
+ return;
+ }
+
+ if (wl->status.map.traffic_dir & BIT(RTW89_TFC_UL))
+ para = chip->rf_para_ulink[level_id];
+ else
+ para = chip->rf_para_dlink[level_id];
+
+ if (para.wl_tx_power != RTW89_BTC_WL_DEF_TX_PWR)
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): wl_tx_power=%d\n",
+ __func__, para.wl_tx_power);
+ _set_wl_tx_power(rtwdev, para.wl_tx_power);
+ _set_wl_rx_gain(rtwdev, para.wl_rx_gain);
+ _set_bt_tx_power(rtwdev, para.bt_tx_power);
+ _set_bt_rx_gain(rtwdev, para.bt_rx_gain);
+
+ if (bt->enable.now == 0 || wl->status.map.rf_off == 1 ||
+ wl->status.map.lps == 1)
+ wl_stb_chg = 0;
+ else
+ wl_stb_chg = 1;
+
+ if (wl_stb_chg != dm->wl_stb_chg) {
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): wl_stb_chg=%d\n",
+ __func__, wl_stb_chg);
+ dm->wl_stb_chg = wl_stb_chg;
+ chip->ops->btc_wl_s1_standby(rtwdev, dm->wl_stb_chg);
+ }
+}
+
+static void _update_btc_state_map(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_cx *cx = &btc->cx;
+ struct rtw89_btc_wl_info *wl = &cx->wl;
+ struct rtw89_btc_bt_info *bt = &cx->bt;
+ struct rtw89_btc_bt_link_info *bt_linfo = &bt->link_info;
+
+ if (wl->status.map.connecting || wl->status.map._4way ||
+ wl->status.map.roaming) {
+ cx->state_map = BTC_WLINKING;
+ } else if (wl->status.map.scan) { /* wl scan */
+ if (bt_linfo->status.map.inq_pag)
+ cx->state_map = BTC_WSCAN_BSCAN;
+ else
+ cx->state_map = BTC_WSCAN_BNOSCAN;
+ } else if (wl->status.map.busy) { /* only busy */
+ if (bt_linfo->status.map.inq_pag)
+ cx->state_map = BTC_WBUSY_BSCAN;
+ else
+ cx->state_map = BTC_WBUSY_BNOSCAN;
+ } else { /* wl idle */
+ cx->state_map = BTC_WIDLE;
+ }
+}
+
+static void _set_bt_afh_info(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_wl_info *wl = &btc->cx.wl;
+ struct rtw89_btc_bt_info *bt = &btc->cx.bt;
+ struct rtw89_btc_bt_link_info *b = &bt->link_info;
+ struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info;
+ u8 en = 0, i, ch = 0, bw = 0;
+
+ if (btc->ctrl.manual || wl->status.map.scan)
+ return;
+
+ /* TODO if include module->ant.type == BTC_ANT_SHARED */
+ if (wl->status.map.rf_off || bt->whql_test ||
+ wl_rinfo->link_mode == BTC_WLINK_NOLINK ||
+ wl_rinfo->link_mode == BTC_WLINK_5G ||
+ wl_rinfo->connect_cnt > BTC_TDMA_WLROLE_MAX) {
+ en = false;
+ } else if (wl_rinfo->link_mode == BTC_WLINK_2G_MCC ||
+ wl_rinfo->link_mode == BTC_WLINK_2G_SCC) {
+ en = true;
+ /* get p2p channel */
+ for (i = 0; i < RTW89_MAX_HW_PORT_NUM; i++) {
+ if (wl_rinfo->active_role[i].role ==
+ RTW89_WIFI_ROLE_P2P_GO ||
+ wl_rinfo->active_role[i].role ==
+ RTW89_WIFI_ROLE_P2P_CLIENT) {
+ ch = wl_rinfo->active_role[i].ch;
+ bw = wl_rinfo->active_role[i].bw;
+ break;
+ }
+ }
+ } else {
+ en = true;
+ /* get 2g channel */
+ for (i = 0; i < RTW89_MAX_HW_PORT_NUM; i++) {
+ if (wl_rinfo->active_role[i].connected &&
+ wl_rinfo->active_role[i].band == RTW89_BAND_2G) {
+ ch = wl_rinfo->active_role[i].ch;
+ bw = wl_rinfo->active_role[i].bw;
+ break;
+ }
+ }
+ }
+
+ switch (bw) {
+ case RTW89_CHANNEL_WIDTH_20:
+ bw = 20 + chip->afh_guard_ch * 2;
+ break;
+ case RTW89_CHANNEL_WIDTH_40:
+ bw = 40 + chip->afh_guard_ch * 2;
+ break;
+ case RTW89_CHANNEL_WIDTH_5:
+ bw = 5 + chip->afh_guard_ch * 2;
+ break;
+ case RTW89_CHANNEL_WIDTH_10:
+ bw = 10 + chip->afh_guard_ch * 2;
+ break;
+ default:
+ bw = 0;
+ en = false; /* turn off AFH info if BW > 40 */
+ break;
+ }
+
+ if (wl->afh_info.en == en &&
+ wl->afh_info.ch == ch &&
+ wl->afh_info.bw == bw &&
+ b->profile_cnt.last == b->profile_cnt.now) {
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): return because no change!\n",
+ __func__);
+ return;
+ }
+
+ wl->afh_info.en = en;
+ wl->afh_info.ch = ch;
+ wl->afh_info.bw = bw;
+
+ _send_fw_cmd(rtwdev, BTFC_SET, SET_BT_WL_CH_INFO, &wl->afh_info, 3);
+
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): en=%d, ch=%d, bw=%d\n",
+ __func__, en, ch, bw);
+ btc->cx.cnt_wl[BTC_WCNT_CH_UPDATE]++;
+}
+
+static bool _check_freerun(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_wl_info *wl = &btc->cx.wl;
+ struct rtw89_btc_bt_info *bt = &btc->cx.bt;
+ struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info;
+ struct rtw89_btc_bt_link_info *bt_linfo = &bt->link_info;
+ struct rtw89_btc_bt_hid_desc *hid = &bt_linfo->hid_desc;
+
+ if (btc->mdinfo.ant.type == BTC_ANT_SHARED) {
+ btc->dm.trx_para_level = 0;
+ return false;
+ }
+
+ /* The below is dedicated antenna case */
+ if (wl_rinfo->connect_cnt > BTC_TDMA_WLROLE_MAX) {
+ btc->dm.trx_para_level = 5;
+ return true;
+ }
+
+ if (bt_linfo->profile_cnt.now == 0) {
+ btc->dm.trx_para_level = 5;
+ return true;
+ }
+
+ if (hid->pair_cnt > BTC_TDMA_BTHID_MAX) {
+ btc->dm.trx_para_level = 5;
+ return true;
+ }
+
+ /* TODO get isolation by BT psd */
+ if (btc->mdinfo.ant.isolation >= BTC_FREERUN_ANTISO_MIN) {
+ btc->dm.trx_para_level = 5;
+ return true;
+ }
+
+ if (!wl->status.map.busy) {/* wl idle -> freerun */
+ btc->dm.trx_para_level = 5;
+ return true;
+ } else if (wl->rssi_level > 1) {/* WL rssi < 50% (-60dBm) */
+ btc->dm.trx_para_level = 0;
+ return false;
+ } else if (wl->status.map.traffic_dir & BIT(RTW89_TFC_UL)) {
+ if (wl->rssi_level == 0 && bt_linfo->rssi > 31) {
+ btc->dm.trx_para_level = 6;
+ return true;
+ } else if (wl->rssi_level == 1 && bt_linfo->rssi > 36) {
+ btc->dm.trx_para_level = 7;
+ return true;
+ }
+ btc->dm.trx_para_level = 0;
+ return false;
+ } else if (wl->status.map.traffic_dir & BIT(RTW89_TFC_DL)) {
+ if (bt_linfo->rssi > 28) {
+ btc->dm.trx_para_level = 6;
+ return true;
+ }
+ }
+
+ btc->dm.trx_para_level = 0;
+ return false;
+}
+
+#define _tdma_set_flctrl(btc, flc) ({(btc)->dm.tdma.rxflctrl = flc; })
+#define _tdma_set_tog(btc, wtg) ({(btc)->dm.tdma.wtgle_n = wtg; })
+#define _tdma_set_lek(btc, lek) ({(btc)->dm.tdma.leak_n = lek; })
+
+#define _slot_set(btc, sid, dura, tbl, type) \
+ do { \
+ typeof(sid) _sid = (sid); \
+ typeof(btc) _btc = (btc); \
+ _btc->dm.slot[_sid].dur = cpu_to_le16(dura);\
+ _btc->dm.slot[_sid].cxtbl = cpu_to_le32(tbl); \
+ _btc->dm.slot[_sid].cxtype = cpu_to_le16(type); \
+ } while (0)
+
+#define _slot_set_dur(btc, sid, dura) (btc)->dm.slot[sid].dur = cpu_to_le16(dura)
+#define _slot_set_tbl(btc, sid, tbl) (btc)->dm.slot[sid].cxtbl = cpu_to_le32(tbl)
+#define _slot_set_type(btc, sid, type) (btc)->dm.slot[sid].cxtype = cpu_to_le16(type)
+
+struct btc_btinfo_lb2 {
+ u8 connect: 1;
+ u8 sco_busy: 1;
+ u8 inq_pag: 1;
+ u8 acl_busy: 1;
+ u8 hfp: 1;
+ u8 hid: 1;
+ u8 a2dp: 1;
+ u8 pan: 1;
+};
+
+struct btc_btinfo_lb3 {
+ u8 retry: 4;
+ u8 cqddr: 1;
+ u8 inq: 1;
+ u8 mesh_busy: 1;
+ u8 pag: 1;
+};
+
+struct btc_btinfo_hb0 {
+ s8 rssi;
+};
+
+struct btc_btinfo_hb1 {
+ u8 ble_connect: 1;
+ u8 reinit: 1;
+ u8 relink: 1;
+ u8 igno_wl: 1;
+ u8 voice: 1;
+ u8 ble_scan: 1;
+ u8 role_sw: 1;
+ u8 multi_link: 1;
+};
+
+struct btc_btinfo_hb2 {
+ u8 pan_active: 1;
+ u8 afh_update: 1;
+ u8 a2dp_active: 1;
+ u8 slave: 1;
+ u8 hid_slot: 2;
+ u8 hid_cnt: 2;
+};
+
+struct btc_btinfo_hb3 {
+ u8 a2dp_bitpool: 6;
+ u8 tx_3m: 1;
+ u8 a2dp_sink: 1;
+};
+
+union btc_btinfo {
+ u8 val;
+ struct btc_btinfo_lb2 lb2;
+ struct btc_btinfo_lb3 lb3;
+ struct btc_btinfo_hb0 hb0;
+ struct btc_btinfo_hb1 hb1;
+ struct btc_btinfo_hb2 hb2;
+ struct btc_btinfo_hb3 hb3;
+};
+
+static void _set_policy(struct rtw89_dev *rtwdev, u16 policy_type,
+ enum btc_reason_and_action action)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_dm *dm = &btc->dm;
+ struct rtw89_btc_fbtc_tdma *t = &dm->tdma;
+ struct rtw89_btc_fbtc_slot *s = dm->slot;
+ u8 type;
+ u32 tbl_w1, tbl_b1, tbl_b4;
+
+ if (btc->mdinfo.ant.type == BTC_ANT_SHARED) {
+ if (btc->cx.wl.status.map._4way)
+ tbl_w1 = cxtbl[1];
+ else
+ tbl_w1 = cxtbl[8];
+ tbl_b1 = cxtbl[3];
+ tbl_b4 = cxtbl[3];
+ } else {
+ tbl_w1 = cxtbl[16];
+ tbl_b1 = cxtbl[17];
+ tbl_b4 = cxtbl[17];
+ }
+
+ type = (u8)((policy_type & BTC_CXP_MASK) >> 8);
+ btc->bt_req_en = false;
+
+ switch (type) {
+ case BTC_CXP_USERDEF0:
+ *t = t_def[CXTD_OFF];
+ s[CXST_OFF] = s_def[CXST_OFF];
+ _slot_set_tbl(btc, CXST_OFF, cxtbl[2]);
+ btc->update_policy_force = true;
+ break;
+ case BTC_CXP_OFF: /* TDMA off */
+ _write_scbd(rtwdev, BTC_WSCB_TDMA, false);
+ *t = t_def[CXTD_OFF];
+ s[CXST_OFF] = s_def[CXST_OFF];
+
+ switch (policy_type) {
+ case BTC_CXP_OFF_BT:
+ _slot_set_tbl(btc, CXST_OFF, cxtbl[2]);
+ break;
+ case BTC_CXP_OFF_WL:
+ _slot_set_tbl(btc, CXST_OFF, cxtbl[1]);
+ break;
+ case BTC_CXP_OFF_EQ0:
+ _slot_set_tbl(btc, CXST_OFF, cxtbl[0]);
+ break;
+ case BTC_CXP_OFF_EQ1:
+ _slot_set_tbl(btc, CXST_OFF, cxtbl[16]);
+ break;
+ case BTC_CXP_OFF_EQ2:
+ _slot_set_tbl(btc, CXST_OFF, cxtbl[17]);
+ break;
+ case BTC_CXP_OFF_EQ3:
+ _slot_set_tbl(btc, CXST_OFF, cxtbl[18]);
+ break;
+ case BTC_CXP_OFF_BWB0:
+ _slot_set_tbl(btc, CXST_OFF, cxtbl[5]);
+ break;
+ case BTC_CXP_OFF_BWB1:
+ _slot_set_tbl(btc, CXST_OFF, cxtbl[8]);
+ break;
+ }
+ break;
+ case BTC_CXP_OFFB: /* TDMA off + beacon protect */
+ _write_scbd(rtwdev, BTC_WSCB_TDMA, false);
+ *t = t_def[CXTD_OFF_B2];
+ s[CXST_OFF] = s_def[CXST_OFF];
+ switch (policy_type) {
+ case BTC_CXP_OFFB_BWB0:
+ _slot_set_tbl(btc, CXST_OFF, cxtbl[8]);
+ break;
+ }
+ break;
+ case BTC_CXP_OFFE: /* TDMA off + beacon protect + Ext_control */
+ btc->bt_req_en = true;
+ _write_scbd(rtwdev, BTC_WSCB_TDMA, true);
+ *t = t_def[CXTD_OFF_EXT];
+ switch (policy_type) {
+ case BTC_CXP_OFFE_DEF:
+ s[CXST_E2G] = s_def[CXST_E2G];
+ s[CXST_E5G] = s_def[CXST_E5G];
+ s[CXST_EBT] = s_def[CXST_EBT];
+ s[CXST_ENULL] = s_def[CXST_ENULL];
+ break;
+ case BTC_CXP_OFFE_DEF2:
+ _slot_set(btc, CXST_E2G, 20, cxtbl[1], SLOT_ISO);
+ s[CXST_E5G] = s_def[CXST_E5G];
+ s[CXST_EBT] = s_def[CXST_EBT];
+ s[CXST_ENULL] = s_def[CXST_ENULL];
+ break;
+ }
+ break;
+ case BTC_CXP_FIX: /* TDMA Fix-Slot */
+ _write_scbd(rtwdev, BTC_WSCB_TDMA, true);
+ *t = t_def[CXTD_FIX];
+ switch (policy_type) {
+ case BTC_CXP_FIX_TD3030:
+ _slot_set(btc, CXST_W1, 30, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, 30, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_FIX_TD5050:
+ _slot_set(btc, CXST_W1, 50, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, 50, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_FIX_TD2030:
+ _slot_set(btc, CXST_W1, 20, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, 30, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_FIX_TD4010:
+ _slot_set(btc, CXST_W1, 40, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, 10, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_FIX_TD4020:
+ _slot_set(btc, CXST_W1, 40, cxtbl[1], SLOT_MIX);
+ _slot_set(btc, CXST_B1, 20, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_FIX_TD7010:
+ _slot_set(btc, CXST_W1, 70, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, 10, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_FIX_TD2060:
+ _slot_set(btc, CXST_W1, 20, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, 60, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_FIX_TD3060:
+ _slot_set(btc, CXST_W1, 30, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, 60, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_FIX_TD2080:
+ _slot_set(btc, CXST_W1, 20, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, 80, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_FIX_TDW1B1: /* W1:B1 = user-define */
+ _slot_set(btc, CXST_W1, dm->slot_dur[CXST_W1],
+ tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, dm->slot_dur[CXST_B1],
+ tbl_b1, SLOT_MIX);
+ break;
+ }
+ break;
+ case BTC_CXP_PFIX: /* PS-TDMA Fix-Slot */
+ _write_scbd(rtwdev, BTC_WSCB_TDMA, true);
+ *t = t_def[CXTD_PFIX];
+ if (btc->cx.wl.role_info.role_map.role.ap)
+ _tdma_set_flctrl(btc, CXFLC_QOSNULL);
+
+ switch (policy_type) {
+ case BTC_CXP_PFIX_TD3030:
+ _slot_set(btc, CXST_W1, 30, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, 30, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_PFIX_TD5050:
+ _slot_set(btc, CXST_W1, 50, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, 50, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_PFIX_TD2030:
+ _slot_set(btc, CXST_W1, 20, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, 30, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_PFIX_TD2060:
+ _slot_set(btc, CXST_W1, 20, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, 60, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_PFIX_TD3070:
+ _slot_set(btc, CXST_W1, 30, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, 60, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_PFIX_TD2080:
+ _slot_set(btc, CXST_W1, 20, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, 80, tbl_b1, SLOT_MIX);
+ break;
+ }
+ break;
+ case BTC_CXP_AUTO: /* TDMA Auto-Slot */
+ _write_scbd(rtwdev, BTC_WSCB_TDMA, true);
+ *t = t_def[CXTD_AUTO];
+ switch (policy_type) {
+ case BTC_CXP_AUTO_TD50200:
+ _slot_set(btc, CXST_W1, 50, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_AUTO_TD60200:
+ _slot_set(btc, CXST_W1, 60, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_AUTO_TD20200:
+ _slot_set(btc, CXST_W1, 20, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_AUTO_TDW1B1: /* W1:B1 = user-define */
+ _slot_set(btc, CXST_W1, dm->slot_dur[CXST_W1],
+ tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, dm->slot_dur[CXST_B1],
+ tbl_b1, SLOT_MIX);
+ break;
+ }
+ break;
+ case BTC_CXP_PAUTO: /* PS-TDMA Auto-Slot */
+ _write_scbd(rtwdev, BTC_WSCB_TDMA, true);
+ *t = t_def[CXTD_PAUTO];
+ switch (policy_type) {
+ case BTC_CXP_PAUTO_TD50200:
+ _slot_set(btc, CXST_W1, 50, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_PAUTO_TD60200:
+ _slot_set(btc, CXST_W1, 60, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_PAUTO_TD20200:
+ _slot_set(btc, CXST_W1, 20, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX);
+ break;
+ case BTC_CXP_PAUTO_TDW1B1:
+ _slot_set(btc, CXST_W1, dm->slot_dur[CXST_W1],
+ tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, dm->slot_dur[CXST_B1],
+ tbl_b1, SLOT_MIX);
+ break;
+ }
+ break;
+ case BTC_CXP_AUTO2: /* TDMA Auto-Slot2 */
+ _write_scbd(rtwdev, BTC_WSCB_TDMA, true);
+ *t = t_def[CXTD_AUTO2];
+ switch (policy_type) {
+ case BTC_CXP_AUTO2_TD3050:
+ _slot_set(btc, CXST_W1, 30, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX);
+ _slot_set(btc, CXST_B4, 50, tbl_b4, SLOT_MIX);
+ break;
+ case BTC_CXP_AUTO2_TD3070:
+ _slot_set(btc, CXST_W1, 30, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX);
+ _slot_set(btc, CXST_B4, 70, tbl_b4, SLOT_MIX);
+ break;
+ case BTC_CXP_AUTO2_TD5050:
+ _slot_set(btc, CXST_W1, 50, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX);
+ _slot_set(btc, CXST_B4, 50, tbl_b4, SLOT_MIX);
+ break;
+ case BTC_CXP_AUTO2_TD6060:
+ _slot_set(btc, CXST_W1, 60, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX);
+ _slot_set(btc, CXST_B4, 60, tbl_b4, SLOT_MIX);
+ break;
+ case BTC_CXP_AUTO2_TD2080:
+ _slot_set(btc, CXST_W1, 20, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX);
+ _slot_set(btc, CXST_B4, 80, tbl_b4, SLOT_MIX);
+ break;
+ case BTC_CXP_AUTO2_TDW1B4: /* W1:B1 = user-define */
+ _slot_set(btc, CXST_W1, dm->slot_dur[CXST_W1],
+ tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B4, dm->slot_dur[CXST_B4],
+ tbl_b4, SLOT_MIX);
+ break;
+ }
+ break;
+ case BTC_CXP_PAUTO2: /* PS-TDMA Auto-Slot2 */
+ _write_scbd(rtwdev, BTC_WSCB_TDMA, true);
+ *t = t_def[CXTD_PAUTO2];
+ switch (policy_type) {
+ case BTC_CXP_PAUTO2_TD3050:
+ _slot_set(btc, CXST_W1, 30, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX);
+ _slot_set(btc, CXST_B4, 50, tbl_b4, SLOT_MIX);
+ break;
+ case BTC_CXP_PAUTO2_TD3070:
+ _slot_set(btc, CXST_W1, 30, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX);
+ _slot_set(btc, CXST_B4, 70, tbl_b4, SLOT_MIX);
+ break;
+ case BTC_CXP_PAUTO2_TD5050:
+ _slot_set(btc, CXST_W1, 50, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX);
+ _slot_set(btc, CXST_B4, 50, tbl_b4, SLOT_MIX);
+ break;
+ case BTC_CXP_PAUTO2_TD6060:
+ _slot_set(btc, CXST_W1, 60, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX);
+ _slot_set(btc, CXST_B4, 60, tbl_b4, SLOT_MIX);
+ break;
+ case BTC_CXP_PAUTO2_TD2080:
+ _slot_set(btc, CXST_W1, 20, tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B1, 200, tbl_b1, SLOT_MIX);
+ _slot_set(btc, CXST_B4, 80, tbl_b4, SLOT_MIX);
+ break;
+ case BTC_CXP_PAUTO2_TDW1B4: /* W1:B1 = user-define */
+ _slot_set(btc, CXST_W1, dm->slot_dur[CXST_W1],
+ tbl_w1, SLOT_ISO);
+ _slot_set(btc, CXST_B4, dm->slot_dur[CXST_B4],
+ tbl_b4, SLOT_MIX);
+ break;
+ }
+ break;
+ }
+
+ _fw_set_policy(rtwdev, policy_type, action);
+}
+
+static void _set_gnt_bt(struct rtw89_dev *rtwdev, u8 phy_map, u8 state)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_dm *dm = &btc->dm;
+ struct rtw89_mac_ax_gnt *g = dm->gnt.band;
+ u8 i;
+
+ if (phy_map > BTC_PHY_ALL)
+ return;
+
+ for (i = 0; i < RTW89_PHY_MAX; i++) {
+ if (!(phy_map & BIT(i)))
+ continue;
+
+ switch (state) {
+ case BTC_GNT_HW:
+ g[i].gnt_bt_sw_en = 0;
+ g[i].gnt_bt = 0;
+ break;
+ case BTC_GNT_SW_LO:
+ g[i].gnt_bt_sw_en = 1;
+ g[i].gnt_bt = 0;
+ break;
+ case BTC_GNT_SW_HI:
+ g[i].gnt_bt_sw_en = 1;
+ g[i].gnt_bt = 1;
+ break;
+ }
+ }
+
+ rtw89_mac_cfg_gnt(rtwdev, &dm->gnt);
+}
+
+static void _set_bt_plut(struct rtw89_dev *rtwdev, u8 phy_map,
+ u8 tx_val, u8 rx_val)
+{
+ struct rtw89_mac_ax_plt plt;
+
+ plt.band = RTW89_MAC_0;
+ plt.tx = tx_val;
+ plt.rx = rx_val;
+
+ if (phy_map & BTC_PHY_0)
+ rtw89_mac_cfg_plt(rtwdev, &plt);
+
+ if (!rtwdev->dbcc_en)
+ return;
+
+ plt.band = RTW89_MAC_1;
+ if (phy_map & BTC_PHY_1)
+ rtw89_mac_cfg_plt(rtwdev, &plt);
+}
+
+static void _set_ant(struct rtw89_dev *rtwdev, bool force_exec,
+ u8 phy_map, u8 type)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_dm *dm = &btc->dm;
+ struct rtw89_btc_cx *cx = &btc->cx;
+ struct rtw89_btc_wl_info *wl = &btc->cx.wl;
+ struct rtw89_btc_bt_info *bt = &cx->bt;
+ struct rtw89_btc_wl_dbcc_info *wl_dinfo = &wl->dbcc_info;
+ u8 gnt_wl_ctrl, gnt_bt_ctrl, plt_ctrl, i, b2g = 0;
+ u32 ant_path_type;
+
+ ant_path_type = ((phy_map << 8) + type);
+
+ if (btc->dm.run_reason == BTC_RSN_NTFY_POWEROFF ||
+ btc->dm.run_reason == BTC_RSN_NTFY_RADIO_STATE ||
+ btc->dm.run_reason == BTC_RSN_CMD_SET_COEX)
+ force_exec = FC_EXEC;
+
+ if (!force_exec && ant_path_type == dm->set_ant_path) {
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): return by no change!!\n",
+ __func__);
+ return;
+ } else if (bt->rfk_info.map.run) {
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): return by bt rfk!!\n", __func__);
+ return;
+ } else if (btc->dm.run_reason != BTC_RSN_NTFY_WL_RFK &&
+ wl->rfk_info.state != BTC_WRFK_STOP) {
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): return by wl rfk!!\n", __func__);
+ return;
+ }
+
+ dm->set_ant_path = ant_path_type;
+
+ rtw89_debug(rtwdev,
+ RTW89_DBG_BTC,
+ "[BTC], %s(): path=0x%x, set_type=0x%x\n",
+ __func__, phy_map, dm->set_ant_path & 0xff);
+
+ switch (type) {
+ case BTC_ANT_WPOWERON:
+ rtw89_mac_cfg_ctrl_path(rtwdev, false);
+ break;
+ case BTC_ANT_WINIT:
+ if (bt->enable.now) {
+ _set_gnt_wl(rtwdev, phy_map, BTC_GNT_SW_LO);
+ _set_gnt_bt(rtwdev, phy_map, BTC_GNT_SW_HI);
+ } else {
+ _set_gnt_wl(rtwdev, phy_map, BTC_GNT_SW_HI);
+ _set_gnt_bt(rtwdev, phy_map, BTC_GNT_SW_LO);
+ }
+ rtw89_mac_cfg_ctrl_path(rtwdev, true);
+ _set_bt_plut(rtwdev, BTC_PHY_ALL, BTC_PLT_BT, BTC_PLT_BT);
+ break;
+ case BTC_ANT_WONLY:
+ _set_gnt_wl(rtwdev, phy_map, BTC_GNT_SW_HI);
+ _set_gnt_bt(rtwdev, phy_map, BTC_GNT_SW_LO);
+ rtw89_mac_cfg_ctrl_path(rtwdev, true);
+ _set_bt_plut(rtwdev, BTC_PHY_ALL, BTC_PLT_NONE, BTC_PLT_NONE);
+ break;
+ case BTC_ANT_WOFF:
+ rtw89_mac_cfg_ctrl_path(rtwdev, false);
+ _set_bt_plut(rtwdev, BTC_PHY_ALL, BTC_PLT_NONE, BTC_PLT_NONE);
+ break;
+ case BTC_ANT_W2G:
+ rtw89_mac_cfg_ctrl_path(rtwdev, true);
+ if (rtwdev->dbcc_en) {
+ for (i = 0; i < RTW89_PHY_MAX; i++) {
+ b2g = (wl_dinfo->real_band[i] == RTW89_BAND_2G);
+
+ gnt_wl_ctrl = b2g ? BTC_GNT_HW : BTC_GNT_SW_HI;
+ _set_gnt_wl(rtwdev, BIT(i), gnt_wl_ctrl);
+
+ gnt_bt_ctrl = b2g ? BTC_GNT_HW : BTC_GNT_SW_HI;
+ /* BT should control by GNT_BT if WL_2G at S0 */
+ if (i == 1 &&
+ wl_dinfo->real_band[0] == RTW89_BAND_2G &&
+ wl_dinfo->real_band[1] == RTW89_BAND_5G)
+ gnt_bt_ctrl = BTC_GNT_HW;
+ _set_gnt_bt(rtwdev, BIT(i), gnt_bt_ctrl);
+
+ plt_ctrl = b2g ? BTC_PLT_BT : BTC_PLT_NONE;
+ _set_bt_plut(rtwdev, BIT(i),
+ plt_ctrl, plt_ctrl);
+ }
+ } else {
+ _set_gnt_wl(rtwdev, phy_map, BTC_GNT_HW);
+ _set_gnt_bt(rtwdev, phy_map, BTC_GNT_HW);
+ _set_bt_plut(rtwdev, BTC_PHY_ALL,
+ BTC_PLT_BT, BTC_PLT_BT);
+ }
+ break;
+ case BTC_ANT_W5G:
+ rtw89_mac_cfg_ctrl_path(rtwdev, true);
+ _set_gnt_wl(rtwdev, phy_map, BTC_GNT_SW_HI);
+ _set_gnt_bt(rtwdev, phy_map, BTC_GNT_HW);
+ _set_bt_plut(rtwdev, BTC_PHY_ALL, BTC_PLT_NONE, BTC_PLT_NONE);
+ break;
+ case BTC_ANT_W25G:
+ rtw89_mac_cfg_ctrl_path(rtwdev, true);
+ _set_gnt_wl(rtwdev, phy_map, BTC_GNT_HW);
+ _set_gnt_bt(rtwdev, phy_map, BTC_GNT_HW);
+ _set_bt_plut(rtwdev, BTC_PHY_ALL,
+ BTC_PLT_GNT_WL, BTC_PLT_GNT_WL);
+ break;
+ case BTC_ANT_FREERUN:
+ rtw89_mac_cfg_ctrl_path(rtwdev, true);
+ _set_gnt_wl(rtwdev, phy_map, BTC_GNT_SW_HI);
+ _set_gnt_bt(rtwdev, phy_map, BTC_GNT_SW_HI);
+ _set_bt_plut(rtwdev, BTC_PHY_ALL, BTC_PLT_NONE, BTC_PLT_NONE);
+ break;
+ case BTC_ANT_WRFK:
+ rtw89_mac_cfg_ctrl_path(rtwdev, true);
+ _set_gnt_wl(rtwdev, phy_map, BTC_GNT_SW_HI);
+ _set_gnt_bt(rtwdev, phy_map, BTC_GNT_SW_LO);
+ _set_bt_plut(rtwdev, phy_map, BTC_PLT_NONE, BTC_PLT_NONE);
+ break;
+ case BTC_ANT_BRFK:
+ rtw89_mac_cfg_ctrl_path(rtwdev, false);
+ _set_gnt_wl(rtwdev, phy_map, BTC_GNT_SW_LO);
+ _set_gnt_bt(rtwdev, phy_map, BTC_GNT_SW_HI);
+ _set_bt_plut(rtwdev, phy_map, BTC_PLT_NONE, BTC_PLT_NONE);
+ break;
+ default:
+ break;
+ }
+}
+
+static void _action_wl_only(struct rtw89_dev *rtwdev)
+{
+ _set_ant(rtwdev, FC_EXEC, BTC_PHY_ALL, BTC_ANT_WONLY);
+ _set_policy(rtwdev, BTC_CXP_OFF_BT, BTC_ACT_WL_ONLY);
+}
+
+static void _action_wl_init(struct rtw89_dev *rtwdev)
+{
+ rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): !!\n", __func__);
+
+ _set_ant(rtwdev, FC_EXEC, BTC_PHY_ALL, BTC_ANT_WINIT);
+ _set_policy(rtwdev, BTC_CXP_OFF_BT, BTC_ACT_WL_INIT);
+}
+
+static void _action_wl_off(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_wl_info *wl = &btc->cx.wl;
+
+ rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): !!\n", __func__);
+
+ if (wl->status.map.rf_off || btc->dm.bt_only)
+ _set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_WOFF);
+
+ _set_policy(rtwdev, BTC_CXP_OFF_BT, BTC_ACT_WL_OFF);
+}
+
+static void _action_freerun(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+
+ rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): !!\n", __func__);
+
+ _set_ant(rtwdev, FC_EXEC, BTC_PHY_ALL, BTC_ANT_FREERUN);
+ _set_policy(rtwdev, BTC_CXP_OFF_BT, BTC_ACT_FREERUN);
+
+ btc->dm.freerun = true;
+}
+
+static void _action_bt_whql(struct rtw89_dev *rtwdev)
+{
+ rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): !!\n", __func__);
+
+ _set_ant(rtwdev, FC_EXEC, BTC_PHY_ALL, BTC_ANT_W2G);
+ _set_policy(rtwdev, BTC_CXP_OFF_BT, BTC_ACT_BT_WHQL);
+}
+
+static void _action_bt_off(struct rtw89_dev *rtwdev)
+{
+ rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): !!\n", __func__);
+
+ _set_ant(rtwdev, FC_EXEC, BTC_PHY_ALL, BTC_ANT_WONLY);
+ _set_policy(rtwdev, BTC_CXP_OFF_BT, BTC_ACT_BT_OFF);
+}
+
+static void _action_bt_idle(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_bt_link_info *b = &btc->cx.bt.link_info;
+
+ _set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G);
+
+ if (btc->mdinfo.ant.type == BTC_ANT_SHARED) { /* shared-antenna */
+ switch (btc->cx.state_map) {
+ case BTC_WBUSY_BNOSCAN: /*wl-busy + bt idle*/
+ if (b->profile_cnt.now > 0)
+ _set_policy(rtwdev, BTC_CXP_FIX_TD4010,
+ BTC_ACT_BT_IDLE);
+ else
+ _set_policy(rtwdev, BTC_CXP_FIX_TD4020,
+ BTC_ACT_BT_IDLE);
+ break;
+ case BTC_WBUSY_BSCAN: /*wl-busy + bt-inq */
+ _set_policy(rtwdev, BTC_CXP_PFIX_TD5050,
+ BTC_ACT_BT_IDLE);
+ break;
+ case BTC_WSCAN_BNOSCAN: /* wl-scan + bt-idle */
+ if (b->profile_cnt.now > 0)
+ _set_policy(rtwdev, BTC_CXP_FIX_TD4010,
+ BTC_ACT_BT_IDLE);
+ else
+ _set_policy(rtwdev, BTC_CXP_FIX_TD4020,
+ BTC_ACT_BT_IDLE);
+ break;
+ case BTC_WSCAN_BSCAN: /* wl-scan + bt-inq */
+ _set_policy(rtwdev, BTC_CXP_FIX_TD5050,
+ BTC_ACT_BT_IDLE);
+ break;
+ case BTC_WLINKING: /* wl-connecting + bt-inq or bt-idle */
+ _set_policy(rtwdev, BTC_CXP_FIX_TD7010,
+ BTC_ACT_BT_IDLE);
+ break;
+ case BTC_WIDLE: /* wl-idle + bt-idle */
+ _set_policy(rtwdev, BTC_CXP_OFF_BWB1, BTC_ACT_BT_IDLE);
+ break;
+ }
+ } else { /* dedicated-antenna */
+ _set_policy(rtwdev, BTC_CXP_OFF_EQ0, BTC_ACT_BT_IDLE);
+ }
+}
+
+static void _action_bt_hfp(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+
+ _set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G);
+
+ if (btc->mdinfo.ant.type == BTC_ANT_SHARED) {
+ if (btc->cx.wl.status.map._4way)
+ _set_policy(rtwdev, BTC_CXP_OFF_WL, BTC_ACT_BT_HFP);
+ else
+ _set_policy(rtwdev, BTC_CXP_OFF_BWB0, BTC_ACT_BT_HFP);
+ } else {
+ _set_policy(rtwdev, BTC_CXP_OFF_EQ2, BTC_ACT_BT_HFP);
+ }
+}
+
+static void _action_bt_hid(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+
+ _set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G);
+
+ if (btc->mdinfo.ant.type == BTC_ANT_SHARED) /* shared-antenna */
+ if (btc->cx.wl.status.map._4way)
+ _set_policy(rtwdev, BTC_CXP_OFF_WL, BTC_ACT_BT_HID);
+ else
+ _set_policy(rtwdev, BTC_CXP_OFF_BWB0, BTC_ACT_BT_HID);
+ else /* dedicated-antenna */
+ _set_policy(rtwdev, BTC_CXP_OFF_EQ3, BTC_ACT_BT_HID);
+}
+
+static void _action_bt_a2dp(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_bt_link_info *bt_linfo = &btc->cx.bt.link_info;
+ struct rtw89_btc_bt_a2dp_desc a2dp = bt_linfo->a2dp_desc;
+ struct rtw89_btc_dm *dm = &btc->dm;
+
+ _set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G);
+
+ switch (btc->cx.state_map) {
+ case BTC_WBUSY_BNOSCAN: /* wl-busy + bt-A2DP */
+ if (a2dp.vendor_id == 0x4c || dm->leak_ap) {
+ dm->slot_dur[CXST_W1] = 40;
+ dm->slot_dur[CXST_B1] = 200;
+ _set_policy(rtwdev,
+ BTC_CXP_PAUTO_TDW1B1, BTC_ACT_BT_A2DP);
+ } else {
+ _set_policy(rtwdev,
+ BTC_CXP_PAUTO_TD50200, BTC_ACT_BT_A2DP);
+ }
+ break;
+ case BTC_WBUSY_BSCAN: /* wl-busy + bt-inq + bt-A2DP */
+ _set_policy(rtwdev, BTC_CXP_PAUTO2_TD3050, BTC_ACT_BT_A2DP);
+ break;
+ case BTC_WSCAN_BSCAN: /* wl-scan + bt-inq + bt-A2DP */
+ _set_policy(rtwdev, BTC_CXP_AUTO2_TD3050, BTC_ACT_BT_A2DP);
+ break;
+ case BTC_WSCAN_BNOSCAN: /* wl-scan + bt-A2DP */
+ case BTC_WLINKING: /* wl-connecting + bt-A2DP */
+ if (a2dp.vendor_id == 0x4c || dm->leak_ap) {
+ dm->slot_dur[CXST_W1] = 40;
+ dm->slot_dur[CXST_B1] = 200;
+ _set_policy(rtwdev, BTC_CXP_AUTO_TDW1B1,
+ BTC_ACT_BT_A2DP);
+ } else {
+ _set_policy(rtwdev, BTC_CXP_AUTO_TD50200,
+ BTC_ACT_BT_A2DP);
+ }
+ break;
+ case BTC_WIDLE: /* wl-idle + bt-A2DP */
+ _set_policy(rtwdev, BTC_CXP_AUTO_TD20200, BTC_ACT_BT_A2DP);
+ break;
+ }
+}
+
+static void _action_bt_a2dpsink(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+
+ _set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G);
+
+ switch (btc->cx.state_map) {
+ case BTC_WBUSY_BNOSCAN: /* wl-busy + bt-A2dp_Sink */
+ _set_policy(rtwdev, BTC_CXP_PFIX_TD2030, BTC_ACT_BT_A2DPSINK);
+ break;
+ case BTC_WBUSY_BSCAN: /* wl-busy + bt-inq + bt-A2dp_Sink */
+ _set_policy(rtwdev, BTC_CXP_PFIX_TD2060, BTC_ACT_BT_A2DPSINK);
+ break;
+ case BTC_WSCAN_BNOSCAN: /* wl-scan + bt-A2dp_Sink */
+ _set_policy(rtwdev, BTC_CXP_FIX_TD2030, BTC_ACT_BT_A2DPSINK);
+ break;
+ case BTC_WSCAN_BSCAN: /* wl-scan + bt-inq + bt-A2dp_Sink */
+ _set_policy(rtwdev, BTC_CXP_FIX_TD2060, BTC_ACT_BT_A2DPSINK);
+ break;
+ case BTC_WLINKING: /* wl-connecting + bt-A2dp_Sink */
+ _set_policy(rtwdev, BTC_CXP_FIX_TD3030, BTC_ACT_BT_A2DPSINK);
+ break;
+ case BTC_WIDLE: /* wl-idle + bt-A2dp_Sink */
+ _set_policy(rtwdev, BTC_CXP_FIX_TD2080, BTC_ACT_BT_A2DPSINK);
+ break;
+ }
+}
+
+static void _action_bt_pan(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+
+ _set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G);
+
+ switch (btc->cx.state_map) {
+ case BTC_WBUSY_BNOSCAN: /* wl-busy + bt-PAN */
+ _set_policy(rtwdev, BTC_CXP_PFIX_TD5050, BTC_ACT_BT_PAN);
+ break;
+ case BTC_WBUSY_BSCAN: /* wl-busy + bt-inq + bt-PAN */
+ _set_policy(rtwdev, BTC_CXP_PFIX_TD3070, BTC_ACT_BT_PAN);
+ break;
+ case BTC_WSCAN_BNOSCAN: /* wl-scan + bt-PAN */
+ _set_policy(rtwdev, BTC_CXP_FIX_TD3030, BTC_ACT_BT_PAN);
+ break;
+ case BTC_WSCAN_BSCAN: /* wl-scan + bt-inq + bt-PAN */
+ _set_policy(rtwdev, BTC_CXP_FIX_TD3060, BTC_ACT_BT_PAN);
+ break;
+ case BTC_WLINKING: /* wl-connecting + bt-PAN */
+ _set_policy(rtwdev, BTC_CXP_FIX_TD4020, BTC_ACT_BT_PAN);
+ break;
+ case BTC_WIDLE: /* wl-idle + bt-pan */
+ _set_policy(rtwdev, BTC_CXP_PFIX_TD2080, BTC_ACT_BT_PAN);
+ break;
+ }
+}
+
+static void _action_bt_a2dp_hid(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_bt_link_info *bt_linfo = &btc->cx.bt.link_info;
+ struct rtw89_btc_bt_a2dp_desc a2dp = bt_linfo->a2dp_desc;
+ struct rtw89_btc_dm *dm = &btc->dm;
+
+ _set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G);
+
+ switch (btc->cx.state_map) {
+ case BTC_WBUSY_BNOSCAN: /* wl-busy + bt-A2DP+HID */
+ case BTC_WIDLE: /* wl-idle + bt-A2DP */
+ if (a2dp.vendor_id == 0x4c || dm->leak_ap) {
+ dm->slot_dur[CXST_W1] = 40;
+ dm->slot_dur[CXST_B1] = 200;
+ _set_policy(rtwdev,
+ BTC_CXP_PAUTO_TDW1B1, BTC_ACT_BT_A2DP_HID);
+ } else {
+ _set_policy(rtwdev,
+ BTC_CXP_PAUTO_TD50200, BTC_ACT_BT_A2DP_HID);
+ }
+ break;
+ case BTC_WBUSY_BSCAN: /* wl-busy + bt-inq + bt-A2DP+HID */
+ _set_policy(rtwdev, BTC_CXP_PAUTO2_TD3050, BTC_ACT_BT_A2DP_HID);
+ break;
+
+ case BTC_WSCAN_BSCAN: /* wl-scan + bt-inq + bt-A2DP+HID */
+ _set_policy(rtwdev, BTC_CXP_AUTO2_TD3050, BTC_ACT_BT_A2DP_HID);
+ break;
+ case BTC_WSCAN_BNOSCAN: /* wl-scan + bt-A2DP+HID */
+ case BTC_WLINKING: /* wl-connecting + bt-A2DP+HID */
+ if (a2dp.vendor_id == 0x4c || dm->leak_ap) {
+ dm->slot_dur[CXST_W1] = 40;
+ dm->slot_dur[CXST_B1] = 200;
+ _set_policy(rtwdev, BTC_CXP_AUTO_TDW1B1,
+ BTC_ACT_BT_A2DP_HID);
+ } else {
+ _set_policy(rtwdev, BTC_CXP_AUTO_TD50200,
+ BTC_ACT_BT_A2DP_HID);
+ }
+ break;
+ }
+}
+
+static void _action_bt_a2dp_pan(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+
+ _set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G);
+
+ switch (btc->cx.state_map) {
+ case BTC_WBUSY_BNOSCAN: /* wl-busy + bt-A2DP+PAN */
+ _set_policy(rtwdev, BTC_CXP_PAUTO2_TD3070, BTC_ACT_BT_A2DP_PAN);
+ break;
+ case BTC_WBUSY_BSCAN: /* wl-busy + bt-inq + bt-A2DP+PAN */
+ _set_policy(rtwdev, BTC_CXP_PAUTO2_TD3070, BTC_ACT_BT_A2DP_PAN);
+ break;
+ case BTC_WSCAN_BNOSCAN: /* wl-scan + bt-A2DP+PAN */
+ _set_policy(rtwdev, BTC_CXP_AUTO2_TD5050, BTC_ACT_BT_A2DP_PAN);
+ break;
+ case BTC_WSCAN_BSCAN: /* wl-scan + bt-inq + bt-A2DP+PAN */
+ _set_policy(rtwdev, BTC_CXP_AUTO2_TD3070, BTC_ACT_BT_A2DP_PAN);
+ break;
+ case BTC_WLINKING: /* wl-connecting + bt-A2DP+PAN */
+ _set_policy(rtwdev, BTC_CXP_AUTO2_TD3050, BTC_ACT_BT_A2DP_PAN);
+ break;
+ case BTC_WIDLE: /* wl-idle + bt-A2DP+PAN */
+ _set_policy(rtwdev, BTC_CXP_PAUTO2_TD2080, BTC_ACT_BT_A2DP_PAN);
+ break;
+ }
+}
+
+static void _action_bt_pan_hid(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+
+ _set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G);
+
+ switch (btc->cx.state_map) {
+ case BTC_WBUSY_BNOSCAN: /* wl-busy + bt-PAN+HID */
+ _set_policy(rtwdev, BTC_CXP_PFIX_TD3030, BTC_ACT_BT_PAN_HID);
+ break;
+ case BTC_WBUSY_BSCAN: /* wl-busy + bt-inq + bt-PAN+HID */
+ _set_policy(rtwdev, BTC_CXP_PFIX_TD3070, BTC_ACT_BT_PAN_HID);
+ break;
+ case BTC_WSCAN_BNOSCAN: /* wl-scan + bt-PAN+HID */
+ _set_policy(rtwdev, BTC_CXP_FIX_TD3030, BTC_ACT_BT_PAN_HID);
+ break;
+ case BTC_WSCAN_BSCAN: /* wl-scan + bt-inq + bt-PAN+HID */
+ _set_policy(rtwdev, BTC_CXP_FIX_TD3060, BTC_ACT_BT_PAN_HID);
+ break;
+ case BTC_WLINKING: /* wl-connecting + bt-PAN+HID */
+ _set_policy(rtwdev, BTC_CXP_FIX_TD4010, BTC_ACT_BT_PAN_HID);
+ break;
+ case BTC_WIDLE: /* wl-idle + bt-PAN+HID */
+ _set_policy(rtwdev, BTC_CXP_PFIX_TD2080, BTC_ACT_BT_PAN_HID);
+ break;
+ }
+}
+
+static void _action_bt_a2dp_pan_hid(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+
+ _set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G);
+
+ switch (btc->cx.state_map) {
+ case BTC_WBUSY_BNOSCAN: /* wl-busy + bt-A2DP+PAN+HID */
+ _set_policy(rtwdev, BTC_CXP_PAUTO2_TD3070,
+ BTC_ACT_BT_A2DP_PAN_HID);
+ break;
+ case BTC_WBUSY_BSCAN: /* wl-busy + bt-inq + bt-A2DP+PAN+HID */
+ _set_policy(rtwdev, BTC_CXP_PAUTO2_TD3070,
+ BTC_ACT_BT_A2DP_PAN_HID);
+ break;
+ case BTC_WSCAN_BSCAN: /* wl-scan + bt-inq + bt-A2DP+PAN+HID */
+ _set_policy(rtwdev, BTC_CXP_AUTO2_TD3070,
+ BTC_ACT_BT_A2DP_PAN_HID);
+ break;
+ case BTC_WSCAN_BNOSCAN: /* wl-scan + bt-A2DP+PAN+HID */
+ case BTC_WLINKING: /* wl-connecting + bt-A2DP+PAN+HID */
+ _set_policy(rtwdev, BTC_CXP_AUTO2_TD3050,
+ BTC_ACT_BT_A2DP_PAN_HID);
+ break;
+ case BTC_WIDLE: /* wl-idle + bt-A2DP+PAN+HID */
+ _set_policy(rtwdev, BTC_CXP_PAUTO2_TD2080,
+ BTC_ACT_BT_A2DP_PAN_HID);
+ break;
+ }
+}
+
+static void _action_wl_5g(struct rtw89_dev *rtwdev)
+{
+ _set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W5G);
+ _set_policy(rtwdev, BTC_CXP_OFF_EQ0, BTC_ACT_WL_5G);
+}
+
+static void _action_wl_other(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+
+ _set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G);
+
+ if (btc->mdinfo.ant.type == BTC_ANT_SHARED)
+ _set_policy(rtwdev, BTC_CXP_OFFB_BWB0, BTC_ACT_WL_OTHER);
+ else
+ _set_policy(rtwdev, BTC_CXP_OFF_EQ0, BTC_ACT_WL_OTHER);
+}
+
+static void _action_wl_nc(struct rtw89_dev *rtwdev)
+{
+ _set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G);
+ _set_policy(rtwdev, BTC_CXP_OFF_BT, BTC_ACT_WL_NC);
+}
+
+static void _action_wl_rfk(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_wl_rfk_info rfk = btc->cx.wl.rfk_info;
+
+ if (rfk.state != BTC_WRFK_START)
+ return;
+
+ rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): band = %d\n",
+ __func__, rfk.band);
+
+ _set_ant(rtwdev, FC_EXEC, BTC_PHY_ALL, BTC_ANT_WRFK);
+ _set_policy(rtwdev, BTC_CXP_OFF_WL, BTC_ACT_WL_RFK);
+}
+
+static void _set_btg_ctrl(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_wl_info *wl = &btc->cx.wl;
+ struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info;
+ struct rtw89_btc_wl_dbcc_info *wl_dinfo = &wl->dbcc_info;
+ bool is_btg = false;
+
+ if (btc->ctrl.manual)
+ return;
+
+ /* notify halbb ignore GNT_BT or not for WL BB Rx-AGC control */
+ if (wl_rinfo->link_mode == BTC_WLINK_5G) /* always 0 if 5G */
+ is_btg = false;
+ else if (wl_rinfo->link_mode == BTC_WLINK_25G_DBCC &&
+ wl_dinfo->real_band[RTW89_PHY_1] != RTW89_BAND_2G)
+ is_btg = false;
+ else
+ is_btg = true;
+
+ if (btc->dm.run_reason != BTC_RSN_NTFY_INIT &&
+ is_btg == btc->dm.wl_btg_rx)
+ return;
+
+ btc->dm.wl_btg_rx = is_btg;
+
+ if (wl_rinfo->link_mode == BTC_WLINK_25G_MCC)
+ return;
+
+ rtw89_ctrl_btg(rtwdev, is_btg);
+}
+
+struct rtw89_txtime_data {
+ struct rtw89_dev *rtwdev;
+ int type;
+ u32 tx_time;
+ u8 tx_retry;
+ u16 enable;
+ bool reenable;
+};
+
+static void rtw89_tx_time_iter(void *data, struct ieee80211_sta *sta)
+{
+ struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
+ struct rtw89_txtime_data *iter_data =
+ (struct rtw89_txtime_data *)data;
+ struct rtw89_dev *rtwdev = iter_data->rtwdev;
+ struct rtw89_vif *rtwvif = rtwsta->rtwvif;
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_cx *cx = &btc->cx;
+ struct rtw89_btc_wl_info *wl = &cx->wl;
+ struct rtw89_btc_wl_link_info *plink = NULL;
+ u8 port = rtwvif->port;
+ u32 tx_time = iter_data->tx_time;
+ u8 tx_retry = iter_data->tx_retry;
+ u16 enable = iter_data->enable;
+ bool reenable = iter_data->reenable;
+
+ plink = &wl->link_info[port];
+
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): port = %d\n", __func__, port);
+
+ if (!plink->connected) {
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): connected = %d\n",
+ __func__, plink->connected);
+ return;
+ }
+
+ /* backup the original tx time before tx-limit on */
+ if (reenable) {
+ rtw89_mac_get_tx_time(rtwdev, rtwsta, &plink->tx_time);
+ rtw89_mac_get_tx_retry_limit(rtwdev, rtwsta, &plink->tx_retry);
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): reenable, tx_time=%d tx_retry= %d\n",
+ __func__, plink->tx_time, plink->tx_retry);
+ }
+
+ /* restore the original tx time if no tx-limit */
+ if (!enable) {
+ rtw89_mac_set_tx_time(rtwdev, rtwsta, true, plink->tx_time);
+ rtw89_mac_set_tx_retry_limit(rtwdev, rtwsta, true,
+ plink->tx_retry);
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): restore, tx_time=%d tx_retry= %d\n",
+ __func__, plink->tx_time, plink->tx_retry);
+
+ } else {
+ rtw89_mac_set_tx_time(rtwdev, rtwsta, false, tx_time);
+ rtw89_mac_set_tx_retry_limit(rtwdev, rtwsta, false, tx_retry);
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): set, tx_time=%d tx_retry= %d\n",
+ __func__, tx_time, tx_retry);
+ }
+}
+
+static void _set_wl_tx_limit(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_cx *cx = &btc->cx;
+ struct rtw89_btc_dm *dm = &btc->dm;
+ struct rtw89_btc_wl_info *wl = &cx->wl;
+ struct rtw89_btc_bt_info *bt = &cx->bt;
+ struct rtw89_btc_bt_link_info *b = &bt->link_info;
+ struct rtw89_btc_bt_hfp_desc *hfp = &b->hfp_desc;
+ struct rtw89_btc_bt_hid_desc *hid = &b->hid_desc;
+ struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info;
+ struct rtw89_txtime_data data = {.rtwdev = rtwdev};
+ u8 mode = wl_rinfo->link_mode;
+ u8 tx_retry = 0;
+ u32 tx_time = 0;
+ u16 enable = 0;
+ bool reenable = false;
+
+ if (btc->ctrl.manual)
+ return;
+
+ if (btc->dm.freerun || btc->ctrl.igno_bt || b->profile_cnt.now == 0 ||
+ mode == BTC_WLINK_5G || mode == BTC_WLINK_NOLINK) {
+ enable = 0;
+ tx_time = BTC_MAX_TX_TIME_DEF;
+ tx_retry = BTC_MAX_TX_RETRY_DEF;
+ } else if ((hfp->exist && hid->exist) || hid->pair_cnt > 1) {
+ enable = 1;
+ tx_time = BTC_MAX_TX_TIME_L2;
+ tx_retry = BTC_MAX_TX_RETRY_L1;
+ } else if (hfp->exist || hid->exist) {
+ enable = 1;
+ tx_time = BTC_MAX_TX_TIME_L3;
+ tx_retry = BTC_MAX_TX_RETRY_L1;
+ } else {
+ enable = 0;
+ tx_time = BTC_MAX_TX_TIME_DEF;
+ tx_retry = BTC_MAX_TX_RETRY_DEF;
+ }
+
+ if (dm->wl_tx_limit.enable == enable &&
+ dm->wl_tx_limit.tx_time == tx_time &&
+ dm->wl_tx_limit.tx_retry == tx_retry)
+ return;
+
+ if (!dm->wl_tx_limit.enable && enable)
+ reenable = true;
+
+ dm->wl_tx_limit.enable = enable;
+ dm->wl_tx_limit.tx_time = tx_time;
+ dm->wl_tx_limit.tx_retry = tx_retry;
+
+ data.enable = enable;
+ data.tx_time = tx_time;
+ data.tx_retry = tx_retry;
+ data.reenable = reenable;
+
+ ieee80211_iterate_stations_atomic(rtwdev->hw,
+ rtw89_tx_time_iter,
+ &data);
+}
+
+static void _set_bt_rx_agc(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_wl_info *wl = &btc->cx.wl;
+ struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info;
+ struct rtw89_btc_bt_info *bt = &btc->cx.bt;
+ bool bt_hi_lna_rx = false;
+
+ if (wl_rinfo->link_mode != BTC_WLINK_NOLINK && btc->dm.wl_btg_rx)
+ bt_hi_lna_rx = true;
+
+ if (bt_hi_lna_rx == bt->hi_lna_rx)
+ return;
+
+ _write_scbd(rtwdev, BTC_WSCB_BT_HILNA, bt_hi_lna_rx);
+}
+
+/* TODO add these functions */
+static void _action_common(struct rtw89_dev *rtwdev)
+{
+ _set_btg_ctrl(rtwdev);
+ _set_wl_tx_limit(rtwdev);
+ _set_bt_afh_info(rtwdev);
+ _set_bt_rx_agc(rtwdev);
+ _set_rf_trx_para(rtwdev);
+}
+
+static void _action_by_bt(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_bt_info *bt = &btc->cx.bt;
+ struct rtw89_btc_bt_link_info *bt_linfo = &bt->link_info;
+ struct rtw89_btc_bt_hid_desc hid = bt_linfo->hid_desc;
+ struct rtw89_btc_bt_a2dp_desc a2dp = bt_linfo->a2dp_desc;
+ struct rtw89_btc_bt_pan_desc pan = bt_linfo->pan_desc;
+ u8 profile_map = 0;
+
+ if (bt_linfo->hfp_desc.exist)
+ profile_map |= BTC_BT_HFP;
+
+ if (bt_linfo->hid_desc.exist)
+ profile_map |= BTC_BT_HID;
+
+ if (bt_linfo->a2dp_desc.exist)
+ profile_map |= BTC_BT_A2DP;
+
+ if (bt_linfo->pan_desc.exist)
+ profile_map |= BTC_BT_PAN;
+
+ switch (profile_map) {
+ case BTC_BT_NOPROFILE:
+ if (_check_freerun(rtwdev))
+ _action_freerun(rtwdev);
+ else if (a2dp.active || pan.active)
+ _action_bt_pan(rtwdev);
+ else
+ _action_bt_idle(rtwdev);
+ break;
+ case BTC_BT_HFP:
+ if (_check_freerun(rtwdev))
+ _action_freerun(rtwdev);
+ else
+ _action_bt_hfp(rtwdev);
+ break;
+ case BTC_BT_HFP | BTC_BT_HID:
+ case BTC_BT_HID:
+ if (_check_freerun(rtwdev))
+ _action_freerun(rtwdev);
+ else
+ _action_bt_hid(rtwdev);
+ break;
+ case BTC_BT_A2DP:
+ if (_check_freerun(rtwdev))
+ _action_freerun(rtwdev);
+ else if (a2dp.sink)
+ _action_bt_a2dpsink(rtwdev);
+ else if (bt_linfo->multi_link.now && !hid.pair_cnt)
+ _action_bt_a2dp_pan(rtwdev);
+ else
+ _action_bt_a2dp(rtwdev);
+ break;
+ case BTC_BT_PAN:
+ _action_bt_pan(rtwdev);
+ break;
+ case BTC_BT_A2DP | BTC_BT_HFP:
+ case BTC_BT_A2DP | BTC_BT_HID:
+ case BTC_BT_A2DP | BTC_BT_HFP | BTC_BT_HID:
+ if (_check_freerun(rtwdev))
+ _action_freerun(rtwdev);
+ else
+ _action_bt_a2dp_hid(rtwdev);
+ break;
+ case BTC_BT_A2DP | BTC_BT_PAN:
+ _action_bt_a2dp_pan(rtwdev);
+ break;
+ case BTC_BT_PAN | BTC_BT_HFP:
+ case BTC_BT_PAN | BTC_BT_HID:
+ case BTC_BT_PAN | BTC_BT_HFP | BTC_BT_HID:
+ _action_bt_pan_hid(rtwdev);
+ break;
+ case BTC_BT_A2DP | BTC_BT_PAN | BTC_BT_HID:
+ case BTC_BT_A2DP | BTC_BT_PAN | BTC_BT_HFP:
+ default:
+ _action_bt_a2dp_pan_hid(rtwdev);
+ break;
+ }
+}
+
+static void _action_wl_2g_sta(struct rtw89_dev *rtwdev)
+{
+ _action_by_bt(rtwdev);
+}
+
+static void _action_wl_scan(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_wl_info *wl = &btc->cx.wl;
+ struct rtw89_btc_wl_dbcc_info *wl_dinfo = &wl->dbcc_info;
+
+ if (rtwdev->dbcc_en) {
+ if (wl_dinfo->real_band[RTW89_PHY_0] != RTW89_BAND_2G &&
+ wl_dinfo->real_band[RTW89_PHY_1] != RTW89_BAND_2G)
+ _action_wl_5g(rtwdev);
+ else
+ _action_by_bt(rtwdev);
+ } else {
+ if (wl->scan_info.band[RTW89_PHY_0] != RTW89_BAND_2G)
+ _action_wl_5g(rtwdev);
+ else
+ _action_by_bt(rtwdev);
+ }
+}
+
+static void _action_wl_25g_mcc(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+
+ _set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W25G);
+
+ if (btc->mdinfo.ant.type == BTC_ANT_SHARED) {
+ if (btc->cx.bt.link_info.profile_cnt.now == 0)
+ _set_policy(rtwdev, BTC_CXP_OFFE_DEF2,
+ BTC_ACT_WL_25G_MCC);
+ else
+ _set_policy(rtwdev, BTC_CXP_OFFE_DEF,
+ BTC_ACT_WL_25G_MCC);
+ } else { /* dedicated-antenna */
+ _set_policy(rtwdev, BTC_CXP_OFF_EQ0, BTC_ACT_WL_25G_MCC);
+ }
+}
+
+static void _action_wl_2g_mcc(struct rtw89_dev *rtwdev)
+{ struct rtw89_btc *btc = &rtwdev->btc;
+
+ _set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G);
+
+ if (btc->mdinfo.ant.type == BTC_ANT_SHARED) { /* shared-antenna */
+ if (btc->cx.bt.link_info.profile_cnt.now == 0)
+ _set_policy(rtwdev, BTC_CXP_OFFE_DEF2,
+ BTC_ACT_WL_2G_MCC);
+ else
+ _set_policy(rtwdev, BTC_CXP_OFFE_DEF,
+ BTC_ACT_WL_2G_MCC);
+ } else { /* dedicated-antenna */
+ _set_policy(rtwdev, BTC_CXP_OFF_EQ0, BTC_ACT_WL_2G_MCC);
+ }
+}
+
+static void _action_wl_2g_scc(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+
+ _set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G);
+
+ if (btc->mdinfo.ant.type == BTC_ANT_SHARED) { /* shared-antenna */
+ if (btc->cx.bt.link_info.profile_cnt.now == 0)
+ _set_policy(rtwdev,
+ BTC_CXP_OFFE_DEF2, BTC_ACT_WL_2G_SCC);
+ else
+ _set_policy(rtwdev,
+ BTC_CXP_OFFE_DEF, BTC_ACT_WL_2G_SCC);
+ } else { /* dedicated-antenna */
+ _set_policy(rtwdev, BTC_CXP_OFF_EQ0, BTC_ACT_WL_2G_SCC);
+ }
+}
+
+static void _action_wl_2g_ap(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+
+ _set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G);
+
+ if (btc->mdinfo.ant.type == BTC_ANT_SHARED) {
+ if (btc->cx.bt.link_info.profile_cnt.now == 0)
+ _set_policy(rtwdev, BTC_CXP_OFFE_DEF2,
+ BTC_ACT_WL_2G_AP);
+ else
+ _set_policy(rtwdev, BTC_CXP_OFFE_DEF, BTC_ACT_WL_2G_AP);
+ } else {/* dedicated-antenna */
+ _set_policy(rtwdev, BTC_CXP_OFF_EQ0, BTC_ACT_WL_2G_AP);
+ }
+}
+
+static void _action_wl_2g_go(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+
+ _set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G);
+
+ if (btc->mdinfo.ant.type == BTC_ANT_SHARED) { /* shared-antenna */
+ if (btc->cx.bt.link_info.profile_cnt.now == 0)
+ _set_policy(rtwdev,
+ BTC_CXP_OFFE_DEF2, BTC_ACT_WL_2G_GO);
+ else
+ _set_policy(rtwdev,
+ BTC_CXP_OFFE_DEF, BTC_ACT_WL_2G_GO);
+ } else { /* dedicated-antenna */
+ _set_policy(rtwdev, BTC_CXP_OFF_EQ0, BTC_ACT_WL_2G_GO);
+ }
+}
+
+static void _action_wl_2g_gc(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+
+ _set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G);
+
+ if (btc->mdinfo.ant.type == BTC_ANT_SHARED) { /* shared-antenna */
+ _action_by_bt(rtwdev);
+ } else {/* dedicated-antenna */
+ _set_policy(rtwdev, BTC_CXP_OFF_EQ0, BTC_ACT_WL_2G_GC);
+ }
+}
+
+static void _action_wl_2g_nan(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+
+ _set_ant(rtwdev, NM_EXEC, BTC_PHY_ALL, BTC_ANT_W2G);
+
+ if (btc->mdinfo.ant.type == BTC_ANT_SHARED) { /* shared-antenna */
+ if (btc->cx.bt.link_info.profile_cnt.now == 0)
+ _set_policy(rtwdev,
+ BTC_CXP_OFFE_DEF2, BTC_ACT_WL_2G_NAN);
+ else
+ _set_policy(rtwdev,
+ BTC_CXP_OFFE_DEF, BTC_ACT_WL_2G_NAN);
+ } else { /* dedicated-antenna */
+ _set_policy(rtwdev, BTC_CXP_OFF_EQ0, BTC_ACT_WL_2G_NAN);
+ }
+}
+
+static u32 _read_scbd(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_btc *btc = &rtwdev->btc;
+ u32 scbd_val = 0;
+
+ if (!chip->scbd)
+ return 0;
+
+ scbd_val = rtw89_mac_get_sb(rtwdev);
+ rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], read scbd: 0x%08x\n",
+ scbd_val);
+
+ btc->cx.cnt_bt[BTC_BCNT_SCBDREAD]++;
+ return scbd_val;
+}
+
+static void _write_scbd(struct rtw89_dev *rtwdev, u32 val, bool state)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_wl_info *wl = &btc->cx.wl;
+ u32 scbd_val = 0;
+
+ if (!chip->scbd)
+ return;
+
+ scbd_val = state ? wl->scbd | val : wl->scbd & ~val;
+
+ if (scbd_val == wl->scbd)
+ return;
+ rtw89_mac_cfg_sb(rtwdev, scbd_val);
+ rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], write scbd: 0x%08x\n",
+ scbd_val);
+ wl->scbd = scbd_val;
+
+ btc->cx.cnt_wl[BTC_WCNT_SCBDUPDATE]++;
+}
+
+static u8
+_update_rssi_state(struct rtw89_dev *rtwdev, u8 pre_state, u8 rssi, u8 thresh)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ u8 next_state, tol = chip->rssi_tol;
+
+ if (pre_state == BTC_RSSI_ST_LOW ||
+ pre_state == BTC_RSSI_ST_STAY_LOW) {
+ if (rssi >= (thresh + tol))
+ next_state = BTC_RSSI_ST_HIGH;
+ else
+ next_state = BTC_RSSI_ST_STAY_LOW;
+ } else {
+ if (rssi < thresh)
+ next_state = BTC_RSSI_ST_LOW;
+ else
+ next_state = BTC_RSSI_ST_STAY_HIGH;
+ }
+
+ return next_state;
+}
+
+static
+void _update_dbcc_band(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+
+ btc->cx.wl.dbcc_info.real_band[phy_idx] =
+ btc->cx.wl.scan_info.phy_map & BIT(phy_idx) ?
+ btc->cx.wl.dbcc_info.scan_band[phy_idx] :
+ btc->cx.wl.dbcc_info.op_band[phy_idx];
+}
+
+static void _update_wl_info(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_wl_info *wl = &btc->cx.wl;
+ struct rtw89_btc_wl_link_info *wl_linfo = wl->link_info;
+ struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info;
+ struct rtw89_btc_wl_dbcc_info *wl_dinfo = &wl->dbcc_info;
+ u8 i, cnt_connect = 0, cnt_connecting = 0, cnt_active = 0;
+ u8 cnt_2g = 0, cnt_5g = 0, phy;
+ u32 wl_2g_ch[2] = {0}, wl_5g_ch[2] = {0};
+ bool b2g = false, b5g = false, client_joined = false;
+
+ memset(wl_rinfo, 0, sizeof(*wl_rinfo));
+
+ for (i = 0; i < RTW89_MAX_HW_PORT_NUM; i++) {
+ /* check if role active? */
+ if (!wl_linfo[i].active)
+ continue;
+
+ cnt_active++;
+ wl_rinfo->active_role[cnt_active - 1].role = wl_linfo[i].role;
+ wl_rinfo->active_role[cnt_active - 1].pid = wl_linfo[i].pid;
+ wl_rinfo->active_role[cnt_active - 1].phy = wl_linfo[i].phy;
+ wl_rinfo->active_role[cnt_active - 1].band = wl_linfo[i].band;
+ wl_rinfo->active_role[cnt_active - 1].noa = (u8)wl_linfo[i].noa;
+ wl_rinfo->active_role[cnt_active - 1].connected = 0;
+
+ wl->port_id[wl_linfo[i].role] = wl_linfo[i].pid;
+
+ phy = wl_linfo[i].phy;
+
+ /* check dbcc role */
+ if (rtwdev->dbcc_en && phy < RTW89_PHY_MAX) {
+ wl_dinfo->role[phy] = wl_linfo[i].role;
+ wl_dinfo->op_band[phy] = wl_linfo[i].band;
+ _update_dbcc_band(rtwdev, phy);
+ _fw_set_drv_info(rtwdev, CXDRVINFO_DBCC);
+ }
+
+ if (wl_linfo[i].connected == MLME_NO_LINK) {
+ continue;
+ } else if (wl_linfo[i].connected == MLME_LINKING) {
+ cnt_connecting++;
+ } else {
+ cnt_connect++;
+ if ((wl_linfo[i].role == RTW89_WIFI_ROLE_P2P_GO ||
+ wl_linfo[i].role == RTW89_WIFI_ROLE_AP) &&
+ wl_linfo[i].client_cnt > 1)
+ client_joined = true;
+ }
+
+ wl_rinfo->role_map.val |= BIT(wl_linfo[i].role);
+ wl_rinfo->active_role[cnt_active - 1].ch = wl_linfo[i].ch;
+ wl_rinfo->active_role[cnt_active - 1].bw = wl_linfo[i].bw;
+ wl_rinfo->active_role[cnt_active - 1].connected = 1;
+
+ /* only care 2 roles + BT coex */
+ if (wl_linfo[i].band != RTW89_BAND_2G) {
+ if (cnt_5g <= ARRAY_SIZE(wl_5g_ch) - 1)
+ wl_5g_ch[cnt_5g] = wl_linfo[i].ch;
+ cnt_5g++;
+ b5g = true;
+ } else {
+ if (cnt_2g <= ARRAY_SIZE(wl_2g_ch) - 1)
+ wl_2g_ch[cnt_2g] = wl_linfo[i].ch;
+ cnt_2g++;
+ b2g = true;
+ }
+ }
+
+ wl_rinfo->connect_cnt = cnt_connect;
+
+ /* Be careful to change the following sequence!! */
+ if (cnt_connect == 0) {
+ wl_rinfo->link_mode = BTC_WLINK_NOLINK;
+ wl_rinfo->role_map.role.none = 1;
+ } else if (!b2g && b5g) {
+ wl_rinfo->link_mode = BTC_WLINK_5G;
+ } else if (wl_rinfo->role_map.role.nan) {
+ wl_rinfo->link_mode = BTC_WLINK_2G_NAN;
+ } else if (cnt_connect > BTC_TDMA_WLROLE_MAX) {
+ wl_rinfo->link_mode = BTC_WLINK_OTHER;
+ } else if (b2g && b5g && cnt_connect == 2) {
+ if (rtwdev->dbcc_en) {
+ switch (wl_dinfo->role[RTW89_PHY_0]) {
+ case RTW89_WIFI_ROLE_STATION:
+ wl_rinfo->link_mode = BTC_WLINK_2G_STA;
+ break;
+ case RTW89_WIFI_ROLE_P2P_GO:
+ wl_rinfo->link_mode = BTC_WLINK_2G_GO;
+ break;
+ case RTW89_WIFI_ROLE_P2P_CLIENT:
+ wl_rinfo->link_mode = BTC_WLINK_2G_GC;
+ break;
+ case RTW89_WIFI_ROLE_AP:
+ wl_rinfo->link_mode = BTC_WLINK_2G_AP;
+ break;
+ default:
+ wl_rinfo->link_mode = BTC_WLINK_OTHER;
+ break;
+ }
+ } else {
+ wl_rinfo->link_mode = BTC_WLINK_25G_MCC;
+ }
+ } else if (!b5g && cnt_connect == 2) {
+ if (wl_rinfo->role_map.role.station &&
+ (wl_rinfo->role_map.role.p2p_go ||
+ wl_rinfo->role_map.role.p2p_gc ||
+ wl_rinfo->role_map.role.ap)) {
+ if (wl_2g_ch[0] == wl_2g_ch[1])
+ wl_rinfo->link_mode = BTC_WLINK_2G_SCC;
+ else
+ wl_rinfo->link_mode = BTC_WLINK_2G_MCC;
+ } else {
+ wl_rinfo->link_mode = BTC_WLINK_2G_MCC;
+ }
+ } else if (!b5g && cnt_connect == 1) {
+ if (wl_rinfo->role_map.role.station)
+ wl_rinfo->link_mode = BTC_WLINK_2G_STA;
+ else if (wl_rinfo->role_map.role.ap)
+ wl_rinfo->link_mode = BTC_WLINK_2G_AP;
+ else if (wl_rinfo->role_map.role.p2p_go)
+ wl_rinfo->link_mode = BTC_WLINK_2G_GO;
+ else if (wl_rinfo->role_map.role.p2p_gc)
+ wl_rinfo->link_mode = BTC_WLINK_2G_GC;
+ else
+ wl_rinfo->link_mode = BTC_WLINK_OTHER;
+ }
+
+ /* if no client_joined, don't care P2P-GO/AP role */
+ if (wl_rinfo->role_map.role.p2p_go || wl_rinfo->role_map.role.ap) {
+ if (!client_joined) {
+ if (wl_rinfo->link_mode == BTC_WLINK_2G_SCC ||
+ wl_rinfo->link_mode == BTC_WLINK_2G_MCC) {
+ wl_rinfo->link_mode = BTC_WLINK_2G_STA;
+ wl_rinfo->connect_cnt = 1;
+ } else if (wl_rinfo->link_mode == BTC_WLINK_2G_GO ||
+ wl_rinfo->link_mode == BTC_WLINK_2G_AP) {
+ wl_rinfo->link_mode = BTC_WLINK_NOLINK;
+ wl_rinfo->connect_cnt = 0;
+ }
+ }
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], cnt_connect = %d, link_mode = %d\n",
+ cnt_connect, wl_rinfo->link_mode);
+
+ _fw_set_drv_info(rtwdev, CXDRVINFO_ROLE);
+}
+
+#define BTC_CHK_HANG_MAX 3
+#define BTC_SCB_INV_VALUE GENMASK(31, 0)
+
+void rtw89_coex_act1_work(struct work_struct *work)
+{
+ struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
+ coex_act1_work.work);
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_dm *dm = &rtwdev->btc.dm;
+ struct rtw89_btc_cx *cx = &btc->cx;
+ struct rtw89_btc_wl_info *wl = &cx->wl;
+
+ mutex_lock(&rtwdev->mutex);
+ rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): enter\n", __func__);
+ dm->cnt_notify[BTC_NCNT_TIMER]++;
+ if (wl->status.map._4way)
+ wl->status.map._4way = false;
+ if (wl->status.map.connecting)
+ wl->status.map.connecting = false;
+
+ _run_coex(rtwdev, BTC_RSN_ACT1_WORK);
+ mutex_unlock(&rtwdev->mutex);
+}
+
+void rtw89_coex_bt_devinfo_work(struct work_struct *work)
+{
+ struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
+ coex_bt_devinfo_work.work);
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_dm *dm = &rtwdev->btc.dm;
+ struct rtw89_btc_bt_a2dp_desc *a2dp = &btc->cx.bt.link_info.a2dp_desc;
+
+ mutex_lock(&rtwdev->mutex);
+ rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): enter\n", __func__);
+ dm->cnt_notify[BTC_NCNT_TIMER]++;
+ a2dp->play_latency = 0;
+ _run_coex(rtwdev, BTC_RSN_BT_DEVINFO_WORK);
+ mutex_unlock(&rtwdev->mutex);
+}
+
+void rtw89_coex_rfk_chk_work(struct work_struct *work)
+{
+ struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
+ coex_rfk_chk_work.work);
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_dm *dm = &rtwdev->btc.dm;
+ struct rtw89_btc_cx *cx = &btc->cx;
+ struct rtw89_btc_wl_info *wl = &cx->wl;
+
+ mutex_lock(&rtwdev->mutex);
+ rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): enter\n", __func__);
+ dm->cnt_notify[BTC_NCNT_TIMER]++;
+ if (wl->rfk_info.state != BTC_WRFK_STOP) {
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): RFK timeout\n", __func__);
+ cx->cnt_wl[BTC_WCNT_RFK_TIMEOUT]++;
+ dm->error.map.wl_rfk_timeout = true;
+ wl->rfk_info.state = BTC_WRFK_STOP;
+ _write_scbd(rtwdev, BTC_WSCB_WLRFK, false);
+ _run_coex(rtwdev, BTC_RSN_RFK_CHK_WORK);
+ }
+ mutex_unlock(&rtwdev->mutex);
+}
+
+static void _update_bt_scbd(struct rtw89_dev *rtwdev, bool only_update)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_cx *cx = &btc->cx;
+ struct rtw89_btc_bt_info *bt = &btc->cx.bt;
+ u32 val;
+ bool status_change = false;
+
+ if (!chip->scbd)
+ return;
+
+ rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s\n", __func__);
+
+ val = _read_scbd(rtwdev);
+ if (val == BTC_SCB_INV_VALUE) {
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): return by invalid scbd value\n",
+ __func__);
+ return;
+ }
+
+ if (!(val & BTC_BSCB_ON) ||
+ btc->dm.cnt_dm[BTC_DCNT_BTCNT_FREEZE] >= BTC_CHK_HANG_MAX)
+ bt->enable.now = 0;
+ else
+ bt->enable.now = 1;
+
+ if (bt->enable.now != bt->enable.last)
+ status_change = true;
+
+ /* reset bt info if bt re-enable */
+ if (bt->enable.now && !bt->enable.last) {
+ _reset_btc_var(rtwdev, BTC_RESET_BTINFO);
+ cx->cnt_bt[BTC_BCNT_REENABLE]++;
+ bt->enable.now = 1;
+ }
+
+ bt->enable.last = bt->enable.now;
+ bt->scbd = val;
+ bt->mbx_avl = !!(val & BTC_BSCB_ACT);
+
+ if (bt->whql_test != !!(val & BTC_BSCB_WHQL))
+ status_change = true;
+
+ bt->whql_test = !!(val & BTC_BSCB_WHQL);
+ bt->btg_type = val & BTC_BSCB_BT_S1 ? BTC_BT_BTG : BTC_BT_ALONE;
+ bt->link_info.a2dp_desc.active = !!(val & BTC_BSCB_A2DP_ACT);
+
+ /* if rfk run 1->0 */
+ if (bt->rfk_info.map.run && !(val & BTC_BSCB_RFK_RUN))
+ status_change = true;
+
+ bt->rfk_info.map.run = !!(val & BTC_BSCB_RFK_RUN);
+ bt->rfk_info.map.req = !!(val & BTC_BSCB_RFK_REQ);
+ bt->hi_lna_rx = !!(val & BTC_BSCB_BT_HILNA);
+ bt->link_info.status.map.connect = !!(val & BTC_BSCB_BT_CONNECT);
+ bt->run_patch_code = !!(val & BTC_BSCB_PATCH_CODE);
+
+ if (!only_update && status_change)
+ _run_coex(rtwdev, BTC_RSN_UPDATE_BT_SCBD);
+}
+
+static bool _chk_wl_rfk_request(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_cx *cx = &btc->cx;
+ struct rtw89_btc_bt_info *bt = &cx->bt;
+
+ _update_bt_scbd(rtwdev, true);
+
+ cx->cnt_wl[BTC_WCNT_RFK_REQ]++;
+
+ if ((bt->rfk_info.map.run || bt->rfk_info.map.req) &&
+ !bt->rfk_info.map.timeout) {
+ cx->cnt_wl[BTC_WCNT_RFK_REJECT]++;
+ } else {
+ cx->cnt_wl[BTC_WCNT_RFK_GO]++;
+ return true;
+ }
+ return false;
+}
+
+static
+void _run_coex(struct rtw89_dev *rtwdev, enum btc_reason_and_action reason)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_dm *dm = &rtwdev->btc.dm;
+ struct rtw89_btc_cx *cx = &btc->cx;
+ struct rtw89_btc_wl_info *wl = &btc->cx.wl;
+ struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info;
+ u8 mode = wl_rinfo->link_mode;
+
+ lockdep_assert_held(&rtwdev->mutex);
+ rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): reason=%d, mode=%d\n",
+ __func__, reason, mode);
+ rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): wl_only=%d, bt_only=%d\n",
+ __func__, dm->wl_only, dm->bt_only);
+
+ dm->run_reason = reason;
+ _update_dm_step(rtwdev, reason);
+ _update_btc_state_map(rtwdev);
+
+ /* Be careful to change the following function sequence!! */
+ if (btc->ctrl.manual) {
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): return for Manual CTRL!!\n",
+ __func__);
+ return;
+ }
+
+ if (btc->ctrl.igno_bt &&
+ (reason == BTC_RSN_UPDATE_BT_INFO ||
+ reason == BTC_RSN_UPDATE_BT_SCBD)) {
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): return for Stop Coex DM!!\n",
+ __func__);
+ return;
+ }
+
+ if (!wl->status.map.init_ok) {
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): return for WL init fail!!\n",
+ __func__);
+ return;
+ }
+
+ if (wl->status.map.rf_off_pre == wl->status.map.rf_off &&
+ wl->status.map.lps_pre == wl->status.map.lps &&
+ (reason == BTC_RSN_NTFY_POWEROFF ||
+ reason == BTC_RSN_NTFY_RADIO_STATE)) {
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): return for WL rf off state no change!!\n",
+ __func__);
+ return;
+ }
+
+ dm->cnt_dm[BTC_DCNT_RUN]++;
+
+ if (btc->ctrl.always_freerun) {
+ _action_freerun(rtwdev);
+ btc->ctrl.igno_bt = true;
+ goto exit;
+ }
+
+ if (dm->wl_only) {
+ _action_wl_only(rtwdev);
+ btc->ctrl.igno_bt = true;
+ goto exit;
+ }
+
+ if (wl->status.map.rf_off || wl->status.map.lps || dm->bt_only) {
+ _action_wl_off(rtwdev);
+ btc->ctrl.igno_bt = true;
+ goto exit;
+ }
+
+ btc->ctrl.igno_bt = false;
+ dm->freerun = false;
+
+ if (reason == BTC_RSN_NTFY_INIT) {
+ _action_wl_init(rtwdev);
+ goto exit;
+ }
+
+ if (!cx->bt.enable.now && !cx->other.type) {
+ _action_bt_off(rtwdev);
+ goto exit;
+ }
+
+ if (cx->bt.whql_test) {
+ _action_bt_whql(rtwdev);
+ goto exit;
+ }
+
+ if (wl->rfk_info.state != BTC_WRFK_STOP) {
+ _action_wl_rfk(rtwdev);
+ goto exit;
+ }
+
+ if (cx->state_map == BTC_WLINKING) {
+ if (mode == BTC_WLINK_NOLINK || mode == BTC_WLINK_2G_STA ||
+ mode == BTC_WLINK_5G) {
+ _action_wl_scan(rtwdev);
+ goto exit;
+ }
+ }
+
+ if (wl->status.map.scan) {
+ _action_wl_scan(rtwdev);
+ goto exit;
+ }
+
+ switch (mode) {
+ case BTC_WLINK_NOLINK:
+ _action_wl_nc(rtwdev);
+ break;
+ case BTC_WLINK_2G_STA:
+ _action_wl_2g_sta(rtwdev);
+ break;
+ case BTC_WLINK_2G_AP:
+ _action_wl_2g_ap(rtwdev);
+ break;
+ case BTC_WLINK_2G_GO:
+ _action_wl_2g_go(rtwdev);
+ break;
+ case BTC_WLINK_2G_GC:
+ _action_wl_2g_gc(rtwdev);
+ break;
+ case BTC_WLINK_2G_SCC:
+ _action_wl_2g_scc(rtwdev);
+ break;
+ case BTC_WLINK_2G_MCC:
+ _action_wl_2g_mcc(rtwdev);
+ break;
+ case BTC_WLINK_25G_MCC:
+ _action_wl_25g_mcc(rtwdev);
+ break;
+ case BTC_WLINK_5G:
+ _action_wl_5g(rtwdev);
+ break;
+ case BTC_WLINK_2G_NAN:
+ _action_wl_2g_nan(rtwdev);
+ break;
+ default:
+ _action_wl_other(rtwdev);
+ break;
+ }
+
+exit:
+ rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): exit\n", __func__);
+ _action_common(rtwdev);
+}
+
+void rtw89_btc_ntfy_poweron(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+
+ rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): !!\n", __func__);
+ btc->dm.cnt_notify[BTC_NCNT_POWER_ON]++;
+}
+
+void rtw89_btc_ntfy_poweroff(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+
+ rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): !!\n", __func__);
+ btc->dm.cnt_notify[BTC_NCNT_POWER_OFF]++;
+
+ btc->cx.wl.status.map.rf_off = 1;
+
+ _write_scbd(rtwdev, BTC_WSCB_ALL, false);
+ _run_coex(rtwdev, BTC_RSN_NTFY_POWEROFF);
+
+ rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_ALL, 0);
+
+ btc->cx.wl.status.map.rf_off_pre = btc->cx.wl.status.map.rf_off;
+}
+
+static void _set_init_info(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_dm *dm = &btc->dm;
+ struct rtw89_btc_wl_info *wl = &btc->cx.wl;
+
+ dm->init_info.wl_only = (u8)dm->wl_only;
+ dm->init_info.bt_only = (u8)dm->bt_only;
+ dm->init_info.wl_init_ok = (u8)wl->status.map.init_ok;
+ dm->init_info.dbcc_en = rtwdev->dbcc_en;
+ dm->init_info.cx_other = btc->cx.other.type;
+ dm->init_info.wl_guard_ch = chip->afh_guard_ch;
+ dm->init_info.module = btc->mdinfo;
+}
+
+void rtw89_btc_ntfy_init(struct rtw89_dev *rtwdev, u8 mode)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_dm *dm = &rtwdev->btc.dm;
+ struct rtw89_btc_wl_info *wl = &btc->cx.wl;
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ _reset_btc_var(rtwdev, BTC_RESET_ALL);
+ btc->dm.run_reason = BTC_RSN_NONE;
+ btc->dm.run_action = BTC_ACT_NONE;
+ btc->ctrl.igno_bt = true;
+
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): mode=%d\n", __func__, mode);
+
+ dm->cnt_notify[BTC_NCNT_INIT_COEX]++;
+ dm->wl_only = mode == BTC_MODE_WL ? 1 : 0;
+ dm->bt_only = mode == BTC_MODE_BT ? 1 : 0;
+ wl->status.map.rf_off = mode == BTC_MODE_WLOFF ? 1 : 0;
+
+ chip->ops->btc_set_rfe(rtwdev);
+ chip->ops->btc_init_cfg(rtwdev);
+
+ if (!wl->status.map.init_ok) {
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): return for WL init fail!!\n",
+ __func__);
+ dm->error.map.init = true;
+ return;
+ }
+
+ _write_scbd(rtwdev,
+ BTC_WSCB_ACTIVE | BTC_WSCB_ON | BTC_WSCB_BTLOG, true);
+ _update_bt_scbd(rtwdev, true);
+ if (rtw89_mac_get_ctrl_path(rtwdev)) {
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): PTA owner warning!!\n",
+ __func__);
+ dm->error.map.pta_owner = true;
+ }
+
+ _set_init_info(rtwdev);
+ _set_wl_tx_power(rtwdev, RTW89_BTC_WL_DEF_TX_PWR);
+ rtw89_btc_fw_set_slots(rtwdev, CXST_MAX, dm->slot);
+ btc_fw_set_monreg(rtwdev);
+ _fw_set_drv_info(rtwdev, CXDRVINFO_INIT);
+ _fw_set_drv_info(rtwdev, CXDRVINFO_CTRL);
+
+ _run_coex(rtwdev, BTC_RSN_NTFY_INIT);
+}
+
+void rtw89_btc_ntfy_scan_start(struct rtw89_dev *rtwdev, u8 phy_idx, u8 band)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_wl_info *wl = &btc->cx.wl;
+
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): phy_idx=%d, band=%d\n",
+ __func__, phy_idx, band);
+ btc->dm.cnt_notify[BTC_NCNT_SCAN_START]++;
+ wl->status.map.scan = true;
+ wl->scan_info.band[phy_idx] = band;
+ wl->scan_info.phy_map |= BIT(phy_idx);
+ _fw_set_drv_info(rtwdev, CXDRVINFO_SCAN);
+
+ if (rtwdev->dbcc_en) {
+ wl->dbcc_info.scan_band[phy_idx] = band;
+ _update_dbcc_band(rtwdev, phy_idx);
+ _fw_set_drv_info(rtwdev, CXDRVINFO_DBCC);
+ }
+
+ _run_coex(rtwdev, BTC_RSN_NTFY_SCAN_START);
+}
+
+void rtw89_btc_ntfy_scan_finish(struct rtw89_dev *rtwdev, u8 phy_idx)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_wl_info *wl = &btc->cx.wl;
+
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): phy_idx=%d\n", __func__, phy_idx);
+ btc->dm.cnt_notify[BTC_NCNT_SCAN_FINISH]++;
+
+ wl->status.map.scan = false;
+ wl->scan_info.phy_map &= ~BIT(phy_idx);
+ _fw_set_drv_info(rtwdev, CXDRVINFO_SCAN);
+
+ if (rtwdev->dbcc_en) {
+ _update_dbcc_band(rtwdev, phy_idx);
+ _fw_set_drv_info(rtwdev, CXDRVINFO_DBCC);
+ }
+
+ _run_coex(rtwdev, BTC_RSN_NTFY_SCAN_FINISH);
+}
+
+void rtw89_btc_ntfy_switch_band(struct rtw89_dev *rtwdev, u8 phy_idx, u8 band)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_wl_info *wl = &btc->cx.wl;
+
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): phy_idx=%d, band=%d\n",
+ __func__, phy_idx, band);
+ btc->dm.cnt_notify[BTC_NCNT_SWITCH_BAND]++;
+
+ wl->scan_info.band[phy_idx] = band;
+ wl->scan_info.phy_map |= BIT(phy_idx);
+ _fw_set_drv_info(rtwdev, CXDRVINFO_SCAN);
+
+ if (rtwdev->dbcc_en) {
+ wl->dbcc_info.scan_band[phy_idx] = band;
+ _update_dbcc_band(rtwdev, phy_idx);
+ _fw_set_drv_info(rtwdev, CXDRVINFO_DBCC);
+ }
+ _run_coex(rtwdev, BTC_RSN_NTFY_SWBAND);
+}
+
+void rtw89_btc_ntfy_specific_packet(struct rtw89_dev *rtwdev,
+ enum btc_pkt_type pkt_type)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_cx *cx = &btc->cx;
+ struct rtw89_btc_wl_info *wl = &cx->wl;
+ struct rtw89_btc_bt_link_info *b = &cx->bt.link_info;
+ struct rtw89_btc_bt_hfp_desc *hfp = &b->hfp_desc;
+ struct rtw89_btc_bt_hid_desc *hid = &b->hid_desc;
+ u32 cnt;
+ u32 delay = RTW89_COEX_ACT1_WORK_PERIOD;
+ bool delay_work = false;
+
+ switch (pkt_type) {
+ case PACKET_DHCP:
+ cnt = ++cx->cnt_wl[BTC_WCNT_DHCP];
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): DHCP cnt=%d\n", __func__, cnt);
+ wl->status.map.connecting = true;
+ delay_work = true;
+ break;
+ case PACKET_EAPOL:
+ cnt = ++cx->cnt_wl[BTC_WCNT_EAPOL];
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): EAPOL cnt=%d\n", __func__, cnt);
+ wl->status.map._4way = true;
+ delay_work = true;
+ if (hfp->exist || hid->exist)
+ delay /= 2;
+ break;
+ case PACKET_EAPOL_END:
+ cnt = ++cx->cnt_wl[BTC_WCNT_EAPOL];
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): EAPOL_End cnt=%d\n",
+ __func__, cnt);
+ wl->status.map._4way = false;
+ cancel_delayed_work(&rtwdev->coex_act1_work);
+ break;
+ case PACKET_ARP:
+ cnt = ++cx->cnt_wl[BTC_WCNT_ARP];
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): ARP cnt=%d\n", __func__, cnt);
+ return;
+ case PACKET_ICMP:
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): ICMP pkt\n", __func__);
+ return;
+ default:
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): unknown packet type %d\n",
+ __func__, pkt_type);
+ return;
+ }
+
+ if (delay_work) {
+ cancel_delayed_work(&rtwdev->coex_act1_work);
+ ieee80211_queue_delayed_work(rtwdev->hw,
+ &rtwdev->coex_act1_work, delay);
+ }
+
+ btc->dm.cnt_notify[BTC_NCNT_SPECIAL_PACKET]++;
+ _run_coex(rtwdev, BTC_RSN_NTFY_SPECIFIC_PACKET);
+}
+
+void rtw89_btc_ntfy_eapol_packet_work(struct work_struct *work)
+{
+ struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
+ btc.eapol_notify_work);
+
+ mutex_lock(&rtwdev->mutex);
+ rtw89_leave_ps_mode(rtwdev);
+ rtw89_btc_ntfy_specific_packet(rtwdev, PACKET_EAPOL);
+ mutex_unlock(&rtwdev->mutex);
+}
+
+void rtw89_btc_ntfy_arp_packet_work(struct work_struct *work)
+{
+ struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
+ btc.arp_notify_work);
+
+ mutex_lock(&rtwdev->mutex);
+ rtw89_btc_ntfy_specific_packet(rtwdev, PACKET_ARP);
+ mutex_unlock(&rtwdev->mutex);
+}
+
+void rtw89_btc_ntfy_dhcp_packet_work(struct work_struct *work)
+{
+ struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
+ btc.dhcp_notify_work);
+
+ mutex_lock(&rtwdev->mutex);
+ rtw89_leave_ps_mode(rtwdev);
+ rtw89_btc_ntfy_specific_packet(rtwdev, PACKET_DHCP);
+ mutex_unlock(&rtwdev->mutex);
+}
+
+void rtw89_btc_ntfy_icmp_packet_work(struct work_struct *work)
+{
+ struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
+ btc.icmp_notify_work);
+
+ mutex_lock(&rtwdev->mutex);
+ rtw89_leave_ps_mode(rtwdev);
+ rtw89_btc_ntfy_specific_packet(rtwdev, PACKET_ICMP);
+ mutex_unlock(&rtwdev->mutex);
+}
+
+static void _update_bt_info(struct rtw89_dev *rtwdev, u8 *buf, u32 len)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_cx *cx = &btc->cx;
+ struct rtw89_btc_bt_info *bt = &cx->bt;
+ struct rtw89_btc_bt_link_info *b = &bt->link_info;
+ struct rtw89_btc_bt_hfp_desc *hfp = &b->hfp_desc;
+ struct rtw89_btc_bt_hid_desc *hid = &b->hid_desc;
+ struct rtw89_btc_bt_a2dp_desc *a2dp = &b->a2dp_desc;
+ struct rtw89_btc_bt_pan_desc *pan = &b->pan_desc;
+ union btc_btinfo btinfo;
+
+ if (buf[BTC_BTINFO_L1] != 6)
+ return;
+
+ if (!memcmp(bt->raw_info, buf, BTC_BTINFO_MAX)) {
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): return by bt-info duplicate!!\n",
+ __func__);
+ cx->cnt_bt[BTC_BCNT_INFOSAME]++;
+ return;
+ }
+
+ memcpy(bt->raw_info, buf, BTC_BTINFO_MAX);
+
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): bt_info[2]=0x%02x\n",
+ __func__, bt->raw_info[2]);
+
+ /* reset to mo-connect before update */
+ b->status.val = BTC_BLINK_NOCONNECT;
+ b->profile_cnt.last = b->profile_cnt.now;
+ b->relink.last = b->relink.now;
+ a2dp->exist_last = a2dp->exist;
+ b->multi_link.last = b->multi_link.now;
+ bt->inq_pag.last = bt->inq_pag.now;
+ b->profile_cnt.now = 0;
+ hid->type = 0;
+
+ /* parse raw info low-Byte2 */
+ btinfo.val = bt->raw_info[BTC_BTINFO_L2];
+ b->status.map.connect = btinfo.lb2.connect;
+ b->status.map.sco_busy = btinfo.lb2.sco_busy;
+ b->status.map.acl_busy = btinfo.lb2.acl_busy;
+ b->status.map.inq_pag = btinfo.lb2.inq_pag;
+ bt->inq_pag.now = btinfo.lb2.inq_pag;
+ cx->cnt_bt[BTC_BCNT_INQPAG] += !!(bt->inq_pag.now && !bt->inq_pag.last);
+
+ hfp->exist = btinfo.lb2.hfp;
+ b->profile_cnt.now += (u8)hfp->exist;
+ hid->exist = btinfo.lb2.hid;
+ b->profile_cnt.now += (u8)hid->exist;
+ a2dp->exist = btinfo.lb2.a2dp;
+ b->profile_cnt.now += (u8)a2dp->exist;
+ pan->active = btinfo.lb2.pan;
+
+ /* parse raw info low-Byte3 */
+ btinfo.val = bt->raw_info[BTC_BTINFO_L3];
+ if (btinfo.lb3.retry != 0)
+ cx->cnt_bt[BTC_BCNT_RETRY]++;
+ b->cqddr = btinfo.lb3.cqddr;
+ cx->cnt_bt[BTC_BCNT_INQ] += !!(btinfo.lb3.inq && !bt->inq);
+ bt->inq = btinfo.lb3.inq;
+ cx->cnt_bt[BTC_BCNT_PAGE] += !!(btinfo.lb3.pag && !bt->pag);
+ bt->pag = btinfo.lb3.pag;
+
+ b->status.map.mesh_busy = btinfo.lb3.mesh_busy;
+ /* parse raw info high-Byte0 */
+ btinfo.val = bt->raw_info[BTC_BTINFO_H0];
+ /* raw val is dBm unit, translate from -100~ 0dBm to 0~100%*/
+ b->rssi = chip->ops->btc_get_bt_rssi(rtwdev, btinfo.hb0.rssi);
+
+ /* parse raw info high-Byte1 */
+ btinfo.val = bt->raw_info[BTC_BTINFO_H1];
+ b->status.map.ble_connect = btinfo.hb1.ble_connect;
+ if (btinfo.hb1.ble_connect)
+ hid->type |= (hid->exist ? BTC_HID_BLE : BTC_HID_RCU);
+
+ cx->cnt_bt[BTC_BCNT_REINIT] += !!(btinfo.hb1.reinit && !bt->reinit);
+ bt->reinit = btinfo.hb1.reinit;
+ cx->cnt_bt[BTC_BCNT_RELINK] += !!(btinfo.hb1.relink && !b->relink.now);
+ b->relink.now = btinfo.hb1.relink;
+ cx->cnt_bt[BTC_BCNT_IGNOWL] += !!(btinfo.hb1.igno_wl && !bt->igno_wl);
+ bt->igno_wl = btinfo.hb1.igno_wl;
+
+ if (bt->igno_wl && !cx->wl.status.map.rf_off)
+ _set_bt_ignore_wlan_act(rtwdev, false);
+
+ hid->type |= (btinfo.hb1.voice ? BTC_HID_RCU_VOICE : 0);
+ bt->ble_scan_en = btinfo.hb1.ble_scan;
+
+ cx->cnt_bt[BTC_BCNT_ROLESW] += !!(btinfo.hb1.role_sw && !b->role_sw);
+ b->role_sw = btinfo.hb1.role_sw;
+
+ b->multi_link.now = btinfo.hb1.multi_link;
+
+ /* parse raw info high-Byte2 */
+ btinfo.val = bt->raw_info[BTC_BTINFO_H2];
+ pan->exist = btinfo.hb2.pan_active;
+ b->profile_cnt.now += (u8)pan->exist;
+
+ cx->cnt_bt[BTC_BCNT_AFH] += !!(btinfo.hb2.afh_update && !b->afh_update);
+ b->afh_update = btinfo.hb2.afh_update;
+ a2dp->active = btinfo.hb2.a2dp_active;
+ b->slave_role = btinfo.hb2.slave;
+ hid->slot_info = btinfo.hb2.hid_slot;
+ hid->pair_cnt = btinfo.hb2.hid_cnt;
+ hid->type |= (hid->slot_info == BTC_HID_218 ?
+ BTC_HID_218 : BTC_HID_418);
+ /* parse raw info high-Byte3 */
+ btinfo.val = bt->raw_info[BTC_BTINFO_H3];
+ a2dp->bitpool = btinfo.hb3.a2dp_bitpool;
+
+ if (b->tx_3m != (u32)btinfo.hb3.tx_3m)
+ cx->cnt_bt[BTC_BCNT_RATECHG]++;
+ b->tx_3m = (u32)btinfo.hb3.tx_3m;
+
+ a2dp->sink = btinfo.hb3.a2dp_sink;
+
+ if (b->profile_cnt.now || b->status.map.ble_connect)
+ rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_BT_AFH_MAP, 1);
+ else
+ rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_BT_AFH_MAP, 0);
+
+ if (!a2dp->exist_last && a2dp->exist) {
+ a2dp->vendor_id = 0;
+ a2dp->flush_time = 0;
+ a2dp->play_latency = 1;
+ ieee80211_queue_delayed_work(rtwdev->hw,
+ &rtwdev->coex_bt_devinfo_work,
+ RTW89_COEX_BT_DEVINFO_WORK_PERIOD);
+ }
+
+ if (a2dp->exist && (a2dp->flush_time == 0 || a2dp->vendor_id == 0 ||
+ a2dp->play_latency == 1))
+ rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_BT_DEVICE_INFO, 1);
+ else
+ rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_BT_DEVICE_INFO, 0);
+
+ _run_coex(rtwdev, BTC_RSN_UPDATE_BT_INFO);
+}
+
+enum btc_wl_mode {
+ BTC_WL_MODE_HT = 0,
+ BTC_WL_MODE_VHT = 1,
+ BTC_WL_MODE_HE = 2,
+ BTC_WL_MODE_NUM,
+};
+
+void rtw89_btc_ntfy_role_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta, enum btc_role_state state)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+ struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
+ struct ieee80211_sta *sta = rtwsta_to_sta(rtwsta);
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_wl_info *wl = &btc->cx.wl;
+ struct rtw89_btc_wl_link_info r = {0};
+ struct rtw89_btc_wl_link_info *wlinfo = NULL;
+ u8 mode = 0;
+
+ rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], state=%d\n", state);
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], role is STA=%d\n",
+ vif->type == NL80211_IFTYPE_STATION);
+ rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], port=%d\n", rtwvif->port);
+ rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], band=%d ch=%d bw=%d\n",
+ hal->current_band_type, hal->current_channel,
+ hal->current_band_width);
+ rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], associated=%d\n",
+ state == BTC_ROLE_MSTS_STA_CONN_END);
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], bcn_period=%d dtim_period=%d\n",
+ vif->bss_conf.beacon_int, vif->bss_conf.dtim_period);
+
+ if (rtwsta) {
+ rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], STA mac_id=%d\n",
+ rtwsta->mac_id);
+
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], STA support HE=%d VHT=%d HT=%d\n",
+ sta->he_cap.has_he,
+ sta->vht_cap.vht_supported,
+ sta->ht_cap.ht_supported);
+ if (sta->he_cap.has_he)
+ mode |= BIT(BTC_WL_MODE_HE);
+ if (sta->vht_cap.vht_supported)
+ mode |= BIT(BTC_WL_MODE_VHT);
+ if (sta->ht_cap.ht_supported)
+ mode |= BIT(BTC_WL_MODE_HT);
+
+ r.mode = mode;
+ }
+
+ if (rtwvif->wifi_role >= RTW89_WIFI_ROLE_MLME_MAX)
+ return;
+
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], wifi_role=%d\n", rtwvif->wifi_role);
+
+ r.role = rtwvif->wifi_role;
+ r.phy = rtwvif->phy_idx;
+ r.pid = rtwvif->port;
+ r.active = true;
+ r.connected = MLME_LINKED;
+ r.bcn_period = vif->bss_conf.beacon_int;
+ r.dtim_period = vif->bss_conf.dtim_period;
+ r.band = hal->current_band_type;
+ r.ch = hal->current_channel;
+ r.bw = hal->current_band_width;
+ ether_addr_copy(r.mac_addr, rtwvif->mac_addr);
+
+ if (rtwsta && vif->type == NL80211_IFTYPE_STATION)
+ r.mac_id = rtwsta->mac_id;
+
+ btc->dm.cnt_notify[BTC_NCNT_ROLE_INFO]++;
+
+ wlinfo = &wl->link_info[r.pid];
+
+ memcpy(wlinfo, &r, sizeof(*wlinfo));
+ _update_wl_info(rtwdev);
+
+ if (wlinfo->role == RTW89_WIFI_ROLE_STATION &&
+ wlinfo->connected == MLME_NO_LINK)
+ btc->dm.leak_ap = 0;
+
+ if (state == BTC_ROLE_MSTS_STA_CONN_START)
+ wl->status.map.connecting = 1;
+ else
+ wl->status.map.connecting = 0;
+
+ if (state == BTC_ROLE_MSTS_STA_DIS_CONN)
+ wl->status.map._4way = false;
+
+ _run_coex(rtwdev, BTC_RSN_NTFY_ROLE_INFO);
+}
+
+void rtw89_btc_ntfy_radio_state(struct rtw89_dev *rtwdev, enum btc_rfctrl rf_state)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_wl_info *wl = &btc->cx.wl;
+
+ rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): rf_state = %d\n",
+ __func__, rf_state);
+ btc->dm.cnt_notify[BTC_NCNT_RADIO_STATE]++;
+
+ switch (rf_state) {
+ case BTC_RFCTRL_WL_OFF:
+ wl->status.map.rf_off = 1;
+ wl->status.map.lps = 0;
+ break;
+ case BTC_RFCTRL_FW_CTRL:
+ wl->status.map.rf_off = 0;
+ wl->status.map.lps = 1;
+ break;
+ case BTC_RFCTRL_WL_ON:
+ default:
+ wl->status.map.rf_off = 0;
+ wl->status.map.lps = 0;
+ break;
+ }
+
+ if (rf_state == BTC_RFCTRL_WL_ON) {
+ rtw89_btc_fw_en_rpt(rtwdev,
+ RPT_EN_MREG | RPT_EN_BT_VER_INFO, true);
+ _write_scbd(rtwdev, BTC_WSCB_ACTIVE, true);
+ _update_bt_scbd(rtwdev, true);
+ chip->ops->btc_init_cfg(rtwdev);
+ } else {
+ rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_ALL, false);
+ _write_scbd(rtwdev, BTC_WSCB_ACTIVE | BTC_WSCB_WLBUSY, false);
+ }
+
+ _run_coex(rtwdev, BTC_RSN_NTFY_RADIO_STATE);
+
+ wl->status.map.rf_off_pre = wl->status.map.rf_off;
+ wl->status.map.lps_pre = wl->status.map.lps;
+}
+
+static bool _ntfy_wl_rfk(struct rtw89_dev *rtwdev, u8 phy_path,
+ enum btc_wl_rfk_type type,
+ enum btc_wl_rfk_state state)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_cx *cx = &btc->cx;
+ struct rtw89_btc_wl_info *wl = &cx->wl;
+ bool result = BTC_WRFK_REJECT;
+
+ wl->rfk_info.type = type;
+ wl->rfk_info.path_map = FIELD_GET(BTC_RFK_PATH_MAP, phy_path);
+ wl->rfk_info.phy_map = FIELD_GET(BTC_RFK_PHY_MAP, phy_path);
+ wl->rfk_info.band = FIELD_GET(BTC_RFK_BAND_MAP, phy_path);
+
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s()_start: phy=0x%x, path=0x%x, type=%d, state=%d\n",
+ __func__, wl->rfk_info.phy_map, wl->rfk_info.path_map,
+ type, state);
+
+ switch (state) {
+ case BTC_WRFK_START:
+ result = _chk_wl_rfk_request(rtwdev);
+ wl->rfk_info.state = result ? BTC_WRFK_START : BTC_WRFK_STOP;
+
+ _write_scbd(rtwdev, BTC_WSCB_WLRFK, result);
+
+ btc->dm.cnt_notify[BTC_NCNT_WL_RFK]++;
+ break;
+ case BTC_WRFK_ONESHOT_START:
+ case BTC_WRFK_ONESHOT_STOP:
+ if (wl->rfk_info.state == BTC_WRFK_STOP) {
+ result = BTC_WRFK_REJECT;
+ } else {
+ result = BTC_WRFK_ALLOW;
+ wl->rfk_info.state = state;
+ }
+ break;
+ case BTC_WRFK_STOP:
+ result = BTC_WRFK_ALLOW;
+ wl->rfk_info.state = BTC_WRFK_STOP;
+
+ _write_scbd(rtwdev, BTC_WSCB_WLRFK, false);
+ cancel_delayed_work(&rtwdev->coex_rfk_chk_work);
+ break;
+ default:
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s() warning state=%d\n", __func__, state);
+ break;
+ }
+
+ if (result == BTC_WRFK_ALLOW) {
+ if (wl->rfk_info.state == BTC_WRFK_START ||
+ wl->rfk_info.state == BTC_WRFK_STOP)
+ _run_coex(rtwdev, BTC_RSN_NTFY_WL_RFK);
+
+ if (wl->rfk_info.state == BTC_WRFK_START)
+ ieee80211_queue_delayed_work(rtwdev->hw,
+ &rtwdev->coex_rfk_chk_work,
+ RTW89_COEX_RFK_CHK_WORK_PERIOD);
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s()_finish: rfk_cnt=%d, result=%d\n",
+ __func__, btc->dm.cnt_notify[BTC_NCNT_WL_RFK], result);
+
+ return result == BTC_WRFK_ALLOW;
+}
+
+void rtw89_btc_ntfy_wl_rfk(struct rtw89_dev *rtwdev, u8 phy_map,
+ enum btc_wl_rfk_type type,
+ enum btc_wl_rfk_state state)
+{
+ u8 band;
+ bool allow;
+ int ret;
+
+ band = FIELD_GET(BTC_RFK_BAND_MAP, phy_map);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RFK] RFK notify (%s / PHY%u / K_type = %u / path_idx = %lu / process = %s)\n",
+ band == RTW89_BAND_2G ? "2G" :
+ band == RTW89_BAND_5G ? "5G" : "6G",
+ !!(FIELD_GET(BTC_RFK_PHY_MAP, phy_map) & BIT(RTW89_PHY_1)),
+ type,
+ FIELD_GET(BTC_RFK_PATH_MAP, phy_map),
+ state == BTC_WRFK_STOP ? "RFK_STOP" :
+ state == BTC_WRFK_START ? "RFK_START" :
+ state == BTC_WRFK_ONESHOT_START ? "ONE-SHOT_START" :
+ "ONE-SHOT_STOP");
+
+ if (state != BTC_WRFK_START || rtwdev->is_bt_iqk_timeout) {
+ _ntfy_wl_rfk(rtwdev, phy_map, type, state);
+ return;
+ }
+
+ ret = read_poll_timeout(_ntfy_wl_rfk, allow, allow, 40, 100000, false,
+ rtwdev, phy_map, type, state);
+ if (ret) {
+ rtw89_warn(rtwdev, "RFK notify timeout\n");
+ rtwdev->is_bt_iqk_timeout = true;
+ }
+}
+
+struct rtw89_btc_wl_sta_iter_data {
+ struct rtw89_dev *rtwdev;
+ u8 busy_all;
+ u8 dir_all;
+ u8 rssi_map_all;
+ bool is_sta_change;
+ bool is_traffic_change;
+};
+
+static void rtw89_btc_ntfy_wl_sta_iter(void *data, struct ieee80211_sta *sta)
+{
+ struct rtw89_btc_wl_sta_iter_data *iter_data =
+ (struct rtw89_btc_wl_sta_iter_data *)data;
+ struct rtw89_dev *rtwdev = iter_data->rtwdev;
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_wl_info *wl = &btc->cx.wl;
+ struct rtw89_btc_wl_link_info *link_info = NULL;
+ struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
+ struct rtw89_traffic_stats *link_info_t = NULL;
+ struct rtw89_vif *rtwvif = rtwsta->rtwvif;
+ struct rtw89_traffic_stats *stats = &rtwvif->stats;
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ u32 last_tx_rate, last_rx_rate;
+ u16 last_tx_lvl, last_rx_lvl;
+ u8 port = rtwvif->port;
+ u8 rssi;
+ u8 busy = 0;
+ u8 dir = 0;
+ u8 rssi_map = 0;
+ u8 i = 0;
+ bool is_sta_change = false, is_traffic_change = false;
+
+ rssi = ewma_rssi_read(&rtwsta->avg_rssi) >> RSSI_FACTOR;
+ rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], rssi=%d\n", rssi);
+
+ link_info = &wl->link_info[port];
+ link_info->stat.traffic = rtwvif->stats;
+ link_info_t = &link_info->stat.traffic;
+
+ if (link_info->connected == MLME_NO_LINK) {
+ link_info->rx_rate_drop_cnt = 0;
+ return;
+ }
+
+ link_info->stat.rssi = rssi;
+ for (i = 0; i < BTC_WL_RSSI_THMAX; i++) {
+ link_info->rssi_state[i] =
+ _update_rssi_state(rtwdev,
+ link_info->rssi_state[i],
+ link_info->stat.rssi,
+ chip->wl_rssi_thres[i]);
+ if (BTC_RSSI_LOW(link_info->rssi_state[i]))
+ rssi_map |= BIT(i);
+
+ if (btc->mdinfo.ant.type == BTC_ANT_DEDICATED &&
+ BTC_RSSI_CHANGE(link_info->rssi_state[i]))
+ is_sta_change = true;
+ }
+ iter_data->rssi_map_all |= rssi_map;
+
+ last_tx_rate = link_info_t->tx_rate;
+ last_rx_rate = link_info_t->rx_rate;
+ last_tx_lvl = (u16)link_info_t->tx_tfc_lv;
+ last_rx_lvl = (u16)link_info_t->rx_tfc_lv;
+
+ if (stats->tx_tfc_lv != RTW89_TFC_IDLE ||
+ stats->rx_tfc_lv != RTW89_TFC_IDLE)
+ busy = 1;
+
+ if (stats->tx_tfc_lv > stats->rx_tfc_lv)
+ dir = RTW89_TFC_UL;
+ else
+ dir = RTW89_TFC_DL;
+
+ link_info = &wl->link_info[port];
+ if (link_info->busy != busy || link_info->dir != dir) {
+ is_sta_change = true;
+ link_info->busy = busy;
+ link_info->dir = dir;
+ }
+
+ iter_data->busy_all |= busy;
+ iter_data->dir_all |= BIT(dir);
+
+ if (rtwsta->rx_hw_rate <= RTW89_HW_RATE_CCK2 &&
+ last_rx_rate > RTW89_HW_RATE_CCK2 &&
+ link_info_t->rx_tfc_lv > RTW89_TFC_IDLE)
+ link_info->rx_rate_drop_cnt++;
+
+ if (last_tx_rate != rtwsta->ra_report.hw_rate ||
+ last_rx_rate != rtwsta->rx_hw_rate ||
+ last_tx_lvl != link_info_t->tx_tfc_lv ||
+ last_rx_lvl != link_info_t->rx_tfc_lv)
+ is_traffic_change = true;
+
+ link_info_t->tx_rate = rtwsta->ra_report.hw_rate;
+ link_info_t->rx_rate = rtwsta->rx_hw_rate;
+
+ wl->role_info.active_role[port].tx_lvl = (u16)stats->tx_tfc_lv;
+ wl->role_info.active_role[port].rx_lvl = (u16)stats->rx_tfc_lv;
+ wl->role_info.active_role[port].tx_rate = rtwsta->ra_report.hw_rate;
+ wl->role_info.active_role[port].rx_rate = rtwsta->rx_hw_rate;
+
+ if (is_sta_change)
+ iter_data->is_sta_change = true;
+
+ if (is_traffic_change)
+ iter_data->is_traffic_change = true;
+}
+
+#define BTC_NHM_CHK_INTVL 20
+
+void rtw89_btc_ntfy_wl_sta(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_wl_info *wl = &btc->cx.wl;
+ struct rtw89_btc_wl_sta_iter_data data = {.rtwdev = rtwdev};
+ u8 i;
+
+ ieee80211_iterate_stations_atomic(rtwdev->hw,
+ rtw89_btc_ntfy_wl_sta_iter,
+ &data);
+
+ wl->rssi_level = 0;
+ btc->dm.cnt_notify[BTC_NCNT_WL_STA]++;
+ for (i = BTC_WL_RSSI_THMAX; i > 0; i--) {
+ /* set RSSI level 4 ~ 0 if rssi bit map match */
+ if (data.rssi_map_all & BIT(i - 1)) {
+ wl->rssi_level = i;
+ break;
+ }
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_BTC, "[BTC], %s(): busy=%d\n",
+ __func__, !!wl->status.map.busy);
+
+ _write_scbd(rtwdev, BTC_WSCB_WLBUSY, (!!wl->status.map.busy));
+
+ if (data.is_traffic_change)
+ _fw_set_drv_info(rtwdev, CXDRVINFO_ROLE);
+ if (data.is_sta_change) {
+ wl->status.map.busy = data.busy_all;
+ wl->status.map.traffic_dir = data.dir_all;
+ _run_coex(rtwdev, BTC_RSN_NTFY_WL_STA);
+ } else if (btc->dm.cnt_notify[BTC_NCNT_WL_STA] >=
+ btc->dm.cnt_dm[BTC_DCNT_WL_STA_LAST] + BTC_NHM_CHK_INTVL) {
+ btc->dm.cnt_dm[BTC_DCNT_WL_STA_LAST] =
+ btc->dm.cnt_notify[BTC_NCNT_WL_STA];
+ } else if (btc->dm.cnt_notify[BTC_NCNT_WL_STA] <
+ btc->dm.cnt_dm[BTC_DCNT_WL_STA_LAST]) {
+ btc->dm.cnt_dm[BTC_DCNT_WL_STA_LAST] =
+ btc->dm.cnt_notify[BTC_NCNT_WL_STA];
+ }
+}
+
+void rtw89_btc_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
+ u32 len, u8 class, u8 func)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
+ u8 *buf = &skb->data[RTW89_C2H_HEADER_LEN];
+
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): C2H BT len:%d class:%d fun:%d\n",
+ __func__, len, class, func);
+
+ if (class != BTFC_FW_EVENT)
+ return;
+
+ switch (func) {
+ case BTF_EVNT_RPT:
+ case BTF_EVNT_BUF_OVERFLOW:
+ pfwinfo->event[func]++;
+ /* Don't need rtw89_leave_ps_mode() */
+ btc_fw_event(rtwdev, func, buf, len);
+ break;
+ case BTF_EVNT_BT_INFO:
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], handle C2H BT INFO with data %8ph\n", buf);
+ btc->cx.cnt_bt[BTC_BCNT_INFOUPDATE]++;
+ rtw89_leave_ps_mode(rtwdev);
+ _update_bt_info(rtwdev, buf, len);
+ break;
+ case BTF_EVNT_BT_SCBD:
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], handle C2H BT SCBD with data %8ph\n", buf);
+ btc->cx.cnt_bt[BTC_BCNT_SCBDUPDATE]++;
+ rtw89_leave_ps_mode(rtwdev);
+ _update_bt_scbd(rtwdev, false);
+ break;
+ case BTF_EVNT_BT_PSD:
+ break;
+ case BTF_EVNT_BT_REG:
+ btc->dbg.rb_done = true;
+ btc->dbg.rb_val = le32_to_cpu(*((__le32 *)buf));
+
+ break;
+ case BTF_EVNT_C2H_LOOPBACK:
+ btc->dbg.rb_done = true;
+ btc->dbg.rb_val = buf[0];
+ break;
+ case BTF_EVNT_CX_RUNINFO:
+ btc->dm.cnt_dm[BTC_DCNT_CX_RUNINFO]++;
+ break;
+ }
+}
+
+#define BTC_CX_FW_OFFLOAD 0
+
+static void _show_cx_info(struct rtw89_dev *rtwdev, struct seq_file *m)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_hal *hal = &rtwdev->hal;
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_dm *dm = &btc->dm;
+ struct rtw89_btc_bt_info *bt = &btc->cx.bt;
+ struct rtw89_btc_wl_info *wl = &btc->cx.wl;
+ u32 ver_main = 0, ver_sub = 0, ver_hotfix = 0, id_branch = 0;
+
+ if (!(dm->coex_info_map & BTC_COEX_INFO_CX))
+ return;
+
+ dm->cnt_notify[BTC_NCNT_SHOW_COEX_INFO]++;
+
+ seq_printf(m, "========== [BTC COEX INFO (%d)] ==========\n",
+ chip->chip_id);
+
+ ver_main = FIELD_GET(GENMASK(31, 24), chip->para_ver);
+ ver_sub = FIELD_GET(GENMASK(23, 16), chip->para_ver);
+ ver_hotfix = FIELD_GET(GENMASK(15, 8), chip->para_ver);
+ id_branch = FIELD_GET(GENMASK(7, 0), chip->para_ver);
+ seq_printf(m, " %-15s : Coex:%d.%d.%d(branch:%d), ",
+ "[coex_version]", ver_main, ver_sub, ver_hotfix, id_branch);
+
+ if (dm->wl_fw_cx_offload != BTC_CX_FW_OFFLOAD)
+ dm->error.map.offload_mismatch = true;
+ else
+ dm->error.map.offload_mismatch = false;
+
+ ver_main = FIELD_GET(GENMASK(31, 24), wl->ver_info.fw_coex);
+ ver_sub = FIELD_GET(GENMASK(23, 16), wl->ver_info.fw_coex);
+ ver_hotfix = FIELD_GET(GENMASK(15, 8), wl->ver_info.fw_coex);
+ id_branch = FIELD_GET(GENMASK(7, 0), wl->ver_info.fw_coex);
+ seq_printf(m, "WL_FW_coex:%d.%d.%d(branch:%d)",
+ ver_main, ver_sub, ver_hotfix, id_branch);
+
+ ver_main = FIELD_GET(GENMASK(31, 24), chip->wlcx_desired);
+ ver_sub = FIELD_GET(GENMASK(23, 16), chip->wlcx_desired);
+ ver_hotfix = FIELD_GET(GENMASK(15, 8), chip->wlcx_desired);
+ seq_printf(m, "(%s, desired:%d.%d.%d), ",
+ (wl->ver_info.fw_coex >= chip->wlcx_desired ?
+ "Match" : "Mis-Match"), ver_main, ver_sub, ver_hotfix);
+
+ seq_printf(m, "BT_FW_coex:%d(%s, desired:%d)\n",
+ bt->ver_info.fw_coex,
+ (bt->ver_info.fw_coex >= chip->btcx_desired ?
+ "Match" : "Mis-Match"), chip->btcx_desired);
+
+ if (bt->enable.now && bt->ver_info.fw == 0)
+ rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_BT_VER_INFO, true);
+ else
+ rtw89_btc_fw_en_rpt(rtwdev, RPT_EN_BT_VER_INFO, false);
+
+ ver_main = FIELD_GET(GENMASK(31, 24), wl->ver_info.fw);
+ ver_sub = FIELD_GET(GENMASK(23, 16), wl->ver_info.fw);
+ ver_hotfix = FIELD_GET(GENMASK(15, 8), wl->ver_info.fw);
+ id_branch = FIELD_GET(GENMASK(7, 0), wl->ver_info.fw);
+ seq_printf(m, " %-15s : WL_FW:%d.%d.%d.%d, BT_FW:0x%x(%s)\n",
+ "[sub_module]",
+ ver_main, ver_sub, ver_hotfix, id_branch,
+ bt->ver_info.fw, bt->run_patch_code ? "patch" : "ROM");
+
+ seq_printf(m, " %-15s : cv:%x, rfe_type:0x%x, ant_iso:%d, ant_pg:%d, %s",
+ "[hw_info]", btc->mdinfo.cv, btc->mdinfo.rfe_type,
+ btc->mdinfo.ant.isolation, btc->mdinfo.ant.num,
+ (btc->mdinfo.ant.num > 1 ? "" : (btc->mdinfo.ant.single_pos ?
+ "1Ant_Pos:S1, " : "1Ant_Pos:S0, ")));
+
+ seq_printf(m, "3rd_coex:%d, dbcc:%d, tx_num:%d, rx_num:%d\n",
+ btc->cx.other.type, rtwdev->dbcc_en, hal->tx_nss,
+ hal->rx_nss);
+}
+
+static void _show_wl_role_info(struct rtw89_dev *rtwdev, struct seq_file *m)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_wl_link_info *plink = NULL;
+ struct rtw89_btc_wl_info *wl = &btc->cx.wl;
+ struct rtw89_btc_wl_dbcc_info *wl_dinfo = &wl->dbcc_info;
+ struct rtw89_traffic_stats *t;
+ u8 i;
+
+ if (rtwdev->dbcc_en) {
+ seq_printf(m,
+ " %-15s : PHY0_band(op:%d/scan:%d/real:%d), ",
+ "[dbcc_info]", wl_dinfo->op_band[RTW89_PHY_0],
+ wl_dinfo->scan_band[RTW89_PHY_0],
+ wl_dinfo->real_band[RTW89_PHY_0]);
+ seq_printf(m,
+ "PHY1_band(op:%d/scan:%d/real:%d)\n",
+ wl_dinfo->op_band[RTW89_PHY_1],
+ wl_dinfo->scan_band[RTW89_PHY_1],
+ wl_dinfo->real_band[RTW89_PHY_1]);
+ }
+
+ for (i = 0; i < RTW89_MAX_HW_PORT_NUM; i++) {
+ plink = &btc->cx.wl.link_info[i];
+
+ if (!plink->active)
+ continue;
+
+ seq_printf(m,
+ " [port_%d] : role=%d(phy-%d), connect=%d(client_cnt=%d), mode=%d, center_ch=%d, bw=%d",
+ plink->pid, (u32)plink->role, plink->phy,
+ (u32)plink->connected, plink->client_cnt - 1,
+ (u32)plink->mode, plink->ch, (u32)plink->bw);
+
+ if (plink->connected == MLME_NO_LINK)
+ continue;
+
+ seq_printf(m,
+ ", mac_id=%d, max_tx_time=%dus, max_tx_retry=%d\n",
+ plink->mac_id, plink->tx_time, plink->tx_retry);
+
+ seq_printf(m,
+ " [port_%d] : rssi=-%ddBm(%d), busy=%d, dir=%s, ",
+ plink->pid, 110 - plink->stat.rssi,
+ plink->stat.rssi, plink->busy,
+ plink->dir == RTW89_TFC_UL ? "UL" : "DL");
+
+ t = &plink->stat.traffic;
+
+ seq_printf(m,
+ "tx[rate:%d/busy_level:%d], ",
+ (u32)t->tx_rate, t->tx_tfc_lv);
+
+ seq_printf(m, "rx[rate:%d/busy_level:%d/drop:%d]\n",
+ (u32)t->rx_rate,
+ t->rx_tfc_lv, plink->rx_rate_drop_cnt);
+ }
+}
+
+static void _show_wl_info(struct rtw89_dev *rtwdev, struct seq_file *m)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_cx *cx = &btc->cx;
+ struct rtw89_btc_wl_info *wl = &cx->wl;
+ struct rtw89_btc_wl_role_info *wl_rinfo = &wl->role_info;
+
+ if (!(btc->dm.coex_info_map & BTC_COEX_INFO_WL))
+ return;
+
+ seq_puts(m, "========== [WL Status] ==========\n");
+
+ seq_printf(m, " %-15s : link_mode:%d, ",
+ "[status]", (u32)wl_rinfo->link_mode);
+
+ seq_printf(m,
+ "rf_off:%s, power_save:%s, scan:%s(band:%d/phy_map:0x%x), ",
+ wl->status.map.rf_off ? "Y" : "N",
+ wl->status.map.lps ? "Y" : "N",
+ wl->status.map.scan ? "Y" : "N",
+ wl->scan_info.band[RTW89_PHY_0], wl->scan_info.phy_map);
+
+ seq_printf(m,
+ "connecting:%s, roam:%s, 4way:%s, init_ok:%s\n",
+ wl->status.map.connecting ? "Y" : "N",
+ wl->status.map.roaming ? "Y" : "N",
+ wl->status.map._4way ? "Y" : "N",
+ wl->status.map.init_ok ? "Y" : "N");
+
+ _show_wl_role_info(rtwdev, m);
+}
+
+enum btc_bt_a2dp_type {
+ BTC_A2DP_LEGACY = 0,
+ BTC_A2DP_TWS_SNIFF = 1,
+ BTC_A2DP_TWS_RELAY = 2,
+};
+
+static void _show_bt_profile_info(struct rtw89_dev *rtwdev, struct seq_file *m)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_bt_link_info *bt_linfo = &btc->cx.bt.link_info;
+ struct rtw89_btc_bt_hfp_desc hfp = bt_linfo->hfp_desc;
+ struct rtw89_btc_bt_hid_desc hid = bt_linfo->hid_desc;
+ struct rtw89_btc_bt_a2dp_desc a2dp = bt_linfo->a2dp_desc;
+ struct rtw89_btc_bt_pan_desc pan = bt_linfo->pan_desc;
+
+ if (hfp.exist) {
+ seq_printf(m, " %-15s : type:%s, sut_pwr:%d, golden-rx:%d",
+ "[HFP]", (hfp.type == 0 ? "SCO" : "eSCO"),
+ bt_linfo->sut_pwr_level[0],
+ bt_linfo->golden_rx_shift[0]);
+ }
+
+ if (hid.exist) {
+ seq_printf(m,
+ "\n\r %-15s : type:%s%s%s%s%s pair-cnt:%d, sut_pwr:%d, golden-rx:%d\n",
+ "[HID]",
+ hid.type & BTC_HID_218 ? "2/18," : "",
+ hid.type & BTC_HID_418 ? "4/18," : "",
+ hid.type & BTC_HID_BLE ? "BLE," : "",
+ hid.type & BTC_HID_RCU ? "RCU," : "",
+ hid.type & BTC_HID_RCU_VOICE ? "RCU-Voice," : "",
+ hid.pair_cnt, bt_linfo->sut_pwr_level[1],
+ bt_linfo->golden_rx_shift[1]);
+ }
+
+ if (a2dp.exist) {
+ seq_printf(m,
+ " %-15s : type:%s, bit-pool:%d, flush-time:%d, ",
+ "[A2DP]",
+ a2dp.type == BTC_A2DP_LEGACY ? "Legacy" : "TWS",
+ a2dp.bitpool, a2dp.flush_time);
+
+ seq_printf(m,
+ "vid:0x%x, Dev-name:0x%x, sut_pwr:%d, golden-rx:%d\n",
+ a2dp.vendor_id, a2dp.device_name,
+ bt_linfo->sut_pwr_level[2],
+ bt_linfo->golden_rx_shift[2]);
+ }
+
+ if (pan.exist) {
+ seq_printf(m, " %-15s : sut_pwr:%d, golden-rx:%d\n",
+ "[PAN]",
+ bt_linfo->sut_pwr_level[3],
+ bt_linfo->golden_rx_shift[3]);
+ }
+}
+
+static void _show_bt_info(struct rtw89_dev *rtwdev, struct seq_file *m)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_cx *cx = &btc->cx;
+ struct rtw89_btc_bt_info *bt = &cx->bt;
+ struct rtw89_btc_wl_info *wl = &cx->wl;
+ struct rtw89_btc_module *module = &btc->mdinfo;
+ struct rtw89_btc_bt_link_info *bt_linfo = &bt->link_info;
+ u8 *afh = bt_linfo->afh_map;
+ u16 polt_cnt = 0;
+
+ if (!(btc->dm.coex_info_map & BTC_COEX_INFO_BT))
+ return;
+
+ seq_puts(m, "========== [BT Status] ==========\n");
+
+ seq_printf(m, " %-15s : enable:%s, btg:%s%s, connect:%s, ",
+ "[status]", bt->enable.now ? "Y" : "N",
+ bt->btg_type ? "Y" : "N",
+ (bt->enable.now && (bt->btg_type != module->bt_pos) ?
+ "(efuse-mismatch!!)" : ""),
+ (bt_linfo->status.map.connect ? "Y" : "N"));
+
+ seq_printf(m, "igno_wl:%s, mailbox_avl:%s, rfk_state:0x%x\n",
+ bt->igno_wl ? "Y" : "N",
+ bt->mbx_avl ? "Y" : "N", bt->rfk_info.val);
+
+ seq_printf(m, " %-15s : profile:%s%s%s%s%s ",
+ "[profile]",
+ (bt_linfo->profile_cnt.now == 0) ? "None," : "",
+ bt_linfo->hfp_desc.exist ? "HFP," : "",
+ bt_linfo->hid_desc.exist ? "HID," : "",
+ bt_linfo->a2dp_desc.exist ?
+ (bt_linfo->a2dp_desc.sink ? "A2DP_sink," : "A2DP,") : "",
+ bt_linfo->pan_desc.exist ? "PAN," : "");
+
+ seq_printf(m,
+ "multi-link:%s, role:%s, ble-connect:%s, CQDDR:%s, A2DP_active:%s, PAN_active:%s\n",
+ bt_linfo->multi_link.now ? "Y" : "N",
+ bt_linfo->slave_role ? "Slave" : "Master",
+ bt_linfo->status.map.ble_connect ? "Y" : "N",
+ bt_linfo->cqddr ? "Y" : "N",
+ bt_linfo->a2dp_desc.active ? "Y" : "N",
+ bt_linfo->pan_desc.active ? "Y" : "N");
+
+ seq_printf(m,
+ " %-15s : rssi:%ddBm, tx_rate:%dM, %s%s%s",
+ "[link]", bt_linfo->rssi - 100,
+ bt_linfo->tx_3m ? 3 : 2,
+ bt_linfo->status.map.inq_pag ? " inq-page!!" : "",
+ bt_linfo->status.map.acl_busy ? " acl_busy!!" : "",
+ bt_linfo->status.map.mesh_busy ? " mesh_busy!!" : "");
+
+ seq_printf(m,
+ "%s afh_map[%02x%02x_%02x%02x_%02x%02x_%02x%02x_%02x%02x], ",
+ bt_linfo->relink.now ? " ReLink!!" : "",
+ afh[0], afh[1], afh[2], afh[3], afh[4],
+ afh[5], afh[6], afh[7], afh[8], afh[9]);
+
+ seq_printf(m, "wl_ch_map[en:%d/ch:%d/bw:%d]\n",
+ wl->afh_info.en, wl->afh_info.ch, wl->afh_info.bw);
+
+ seq_printf(m,
+ " %-15s : retry:%d, relink:%d, rate_chg:%d, reinit:%d, reenable:%d, ",
+ "[stat_cnt]", cx->cnt_bt[BTC_BCNT_RETRY],
+ cx->cnt_bt[BTC_BCNT_RELINK], cx->cnt_bt[BTC_BCNT_RATECHG],
+ cx->cnt_bt[BTC_BCNT_REINIT], cx->cnt_bt[BTC_BCNT_REENABLE]);
+
+ seq_printf(m,
+ "role-switch:%d, afh:%d, inq_page:%d(inq:%d/page:%d), igno_wl:%d\n",
+ cx->cnt_bt[BTC_BCNT_ROLESW], cx->cnt_bt[BTC_BCNT_AFH],
+ cx->cnt_bt[BTC_BCNT_INQPAG], cx->cnt_bt[BTC_BCNT_INQ],
+ cx->cnt_bt[BTC_BCNT_PAGE], cx->cnt_bt[BTC_BCNT_IGNOWL]);
+
+ _show_bt_profile_info(rtwdev, m);
+
+ seq_printf(m,
+ " %-15s : raw_data[%02x %02x %02x %02x %02x %02x] (type:%s/cnt:%d/same:%d)\n",
+ "[bt_info]", bt->raw_info[2], bt->raw_info[3],
+ bt->raw_info[4], bt->raw_info[5], bt->raw_info[6],
+ bt->raw_info[7],
+ bt->raw_info[0] == BTC_BTINFO_AUTO ? "auto" : "reply",
+ cx->cnt_bt[BTC_BCNT_INFOUPDATE],
+ cx->cnt_bt[BTC_BCNT_INFOSAME]);
+
+ if (wl->status.map.lps || wl->status.map.rf_off)
+ return;
+
+ chip->ops->btc_update_bt_cnt(rtwdev);
+ _chk_btc_err(rtwdev, BTC_DCNT_BTCNT_FREEZE, 0);
+
+ seq_printf(m,
+ " %-15s : Hi-rx = %d, Hi-tx = %d, Lo-rx = %d, Lo-tx = %d (bt_polut_wl_tx = %d)\n",
+ "[trx_req_cnt]", cx->cnt_bt[BTC_BCNT_HIPRI_RX],
+ cx->cnt_bt[BTC_BCNT_HIPRI_TX], cx->cnt_bt[BTC_BCNT_LOPRI_RX],
+ cx->cnt_bt[BTC_BCNT_LOPRI_TX], polt_cnt);
+}
+
+#define CASE_BTC_RSN_STR(e) case BTC_RSN_ ## e: return #e
+#define CASE_BTC_ACT_STR(e) case BTC_ACT_ ## e | BTC_ACT_EXT_BIT: return #e
+#define CASE_BTC_POLICY_STR(e) \
+ case BTC_CXP_ ## e | BTC_POLICY_EXT_BIT: return #e
+
+static const char *steps_to_str(u16 step)
+{
+ switch (step) {
+ CASE_BTC_RSN_STR(NONE);
+ CASE_BTC_RSN_STR(NTFY_INIT);
+ CASE_BTC_RSN_STR(NTFY_SWBAND);
+ CASE_BTC_RSN_STR(NTFY_WL_STA);
+ CASE_BTC_RSN_STR(NTFY_RADIO_STATE);
+ CASE_BTC_RSN_STR(UPDATE_BT_SCBD);
+ CASE_BTC_RSN_STR(NTFY_WL_RFK);
+ CASE_BTC_RSN_STR(UPDATE_BT_INFO);
+ CASE_BTC_RSN_STR(NTFY_SCAN_START);
+ CASE_BTC_RSN_STR(NTFY_SCAN_FINISH);
+ CASE_BTC_RSN_STR(NTFY_SPECIFIC_PACKET);
+ CASE_BTC_RSN_STR(NTFY_POWEROFF);
+ CASE_BTC_RSN_STR(NTFY_ROLE_INFO);
+ CASE_BTC_RSN_STR(CMD_SET_COEX);
+ CASE_BTC_RSN_STR(ACT1_WORK);
+ CASE_BTC_RSN_STR(BT_DEVINFO_WORK);
+ CASE_BTC_RSN_STR(RFK_CHK_WORK);
+
+ CASE_BTC_ACT_STR(NONE);
+ CASE_BTC_ACT_STR(WL_ONLY);
+ CASE_BTC_ACT_STR(WL_5G);
+ CASE_BTC_ACT_STR(WL_OTHER);
+ CASE_BTC_ACT_STR(WL_IDLE);
+ CASE_BTC_ACT_STR(WL_NC);
+ CASE_BTC_ACT_STR(WL_RFK);
+ CASE_BTC_ACT_STR(WL_INIT);
+ CASE_BTC_ACT_STR(WL_OFF);
+ CASE_BTC_ACT_STR(FREERUN);
+ CASE_BTC_ACT_STR(BT_WHQL);
+ CASE_BTC_ACT_STR(BT_RFK);
+ CASE_BTC_ACT_STR(BT_OFF);
+ CASE_BTC_ACT_STR(BT_IDLE);
+ CASE_BTC_ACT_STR(BT_HFP);
+ CASE_BTC_ACT_STR(BT_HID);
+ CASE_BTC_ACT_STR(BT_A2DP);
+ CASE_BTC_ACT_STR(BT_A2DPSINK);
+ CASE_BTC_ACT_STR(BT_PAN);
+ CASE_BTC_ACT_STR(BT_A2DP_HID);
+ CASE_BTC_ACT_STR(BT_A2DP_PAN);
+ CASE_BTC_ACT_STR(BT_PAN_HID);
+ CASE_BTC_ACT_STR(BT_A2DP_PAN_HID);
+ CASE_BTC_ACT_STR(WL_25G_MCC);
+ CASE_BTC_ACT_STR(WL_2G_MCC);
+ CASE_BTC_ACT_STR(WL_2G_SCC);
+ CASE_BTC_ACT_STR(WL_2G_AP);
+ CASE_BTC_ACT_STR(WL_2G_GO);
+ CASE_BTC_ACT_STR(WL_2G_GC);
+ CASE_BTC_ACT_STR(WL_2G_NAN);
+
+ CASE_BTC_POLICY_STR(OFF_BT);
+ CASE_BTC_POLICY_STR(OFF_WL);
+ CASE_BTC_POLICY_STR(OFF_EQ0);
+ CASE_BTC_POLICY_STR(OFF_EQ1);
+ CASE_BTC_POLICY_STR(OFF_EQ2);
+ CASE_BTC_POLICY_STR(OFF_EQ3);
+ CASE_BTC_POLICY_STR(OFF_BWB0);
+ CASE_BTC_POLICY_STR(OFF_BWB1);
+ CASE_BTC_POLICY_STR(OFFB_BWB0);
+ CASE_BTC_POLICY_STR(OFFE_DEF);
+ CASE_BTC_POLICY_STR(OFFE_DEF2);
+ CASE_BTC_POLICY_STR(FIX_TD3030);
+ CASE_BTC_POLICY_STR(FIX_TD5050);
+ CASE_BTC_POLICY_STR(FIX_TD2030);
+ CASE_BTC_POLICY_STR(FIX_TD4010);
+ CASE_BTC_POLICY_STR(FIX_TD7010);
+ CASE_BTC_POLICY_STR(FIX_TD2060);
+ CASE_BTC_POLICY_STR(FIX_TD3060);
+ CASE_BTC_POLICY_STR(FIX_TD2080);
+ CASE_BTC_POLICY_STR(FIX_TDW1B1);
+ CASE_BTC_POLICY_STR(FIX_TD4020);
+ CASE_BTC_POLICY_STR(PFIX_TD3030);
+ CASE_BTC_POLICY_STR(PFIX_TD5050);
+ CASE_BTC_POLICY_STR(PFIX_TD2030);
+ CASE_BTC_POLICY_STR(PFIX_TD2060);
+ CASE_BTC_POLICY_STR(PFIX_TD3070);
+ CASE_BTC_POLICY_STR(PFIX_TD2080);
+ CASE_BTC_POLICY_STR(PFIX_TDW1B1);
+ CASE_BTC_POLICY_STR(AUTO_TD50200);
+ CASE_BTC_POLICY_STR(AUTO_TD60200);
+ CASE_BTC_POLICY_STR(AUTO_TD20200);
+ CASE_BTC_POLICY_STR(AUTO_TDW1B1);
+ CASE_BTC_POLICY_STR(PAUTO_TD50200);
+ CASE_BTC_POLICY_STR(PAUTO_TD60200);
+ CASE_BTC_POLICY_STR(PAUTO_TD20200);
+ CASE_BTC_POLICY_STR(PAUTO_TDW1B1);
+ CASE_BTC_POLICY_STR(AUTO2_TD3050);
+ CASE_BTC_POLICY_STR(AUTO2_TD3070);
+ CASE_BTC_POLICY_STR(AUTO2_TD5050);
+ CASE_BTC_POLICY_STR(AUTO2_TD6060);
+ CASE_BTC_POLICY_STR(AUTO2_TD2080);
+ CASE_BTC_POLICY_STR(AUTO2_TDW1B4);
+ CASE_BTC_POLICY_STR(PAUTO2_TD3050);
+ CASE_BTC_POLICY_STR(PAUTO2_TD3070);
+ CASE_BTC_POLICY_STR(PAUTO2_TD5050);
+ CASE_BTC_POLICY_STR(PAUTO2_TD6060);
+ CASE_BTC_POLICY_STR(PAUTO2_TD2080);
+ CASE_BTC_POLICY_STR(PAUTO2_TDW1B4);
+ default:
+ return "unknown step";
+ }
+}
+
+static
+void seq_print_segment(struct seq_file *m, const char *prefix, u16 *data,
+ u8 len, u8 seg_len, u8 start_idx, u8 ring_len)
+{
+ u8 i;
+ u8 cur_index;
+
+ for (i = 0; i < len ; i++) {
+ if ((i % seg_len) == 0)
+ seq_printf(m, " %-15s : ", prefix);
+ cur_index = (start_idx + i) % ring_len;
+ if (i % 3 == 0)
+ seq_printf(m, "-> %-20s",
+ steps_to_str(*(data + cur_index)));
+ else if (i % 3 == 1)
+ seq_printf(m, "-> %-15s",
+ steps_to_str(*(data + cur_index)));
+ else
+ seq_printf(m, "-> %-13s",
+ steps_to_str(*(data + cur_index)));
+ if (i == (len - 1) || (i % seg_len) == (seg_len - 1))
+ seq_puts(m, "\n");
+ }
+}
+
+static void _show_dm_step(struct rtw89_dev *rtwdev, struct seq_file *m)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_dm *dm = &btc->dm;
+ u8 start_idx;
+ u8 len;
+
+ len = dm->dm_step.step_ov ? RTW89_BTC_DM_MAXSTEP : dm->dm_step.step_pos;
+ start_idx = dm->dm_step.step_ov ? dm->dm_step.step_pos : 0;
+
+ seq_print_segment(m, "[dm_steps]", dm->dm_step.step, len, 6, start_idx,
+ ARRAY_SIZE(dm->dm_step.step));
+}
+
+static void _show_dm_info(struct rtw89_dev *rtwdev, struct seq_file *m)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_module *module = &btc->mdinfo;
+ struct rtw89_btc_dm *dm = &btc->dm;
+ struct rtw89_btc_wl_info *wl = &btc->cx.wl;
+ struct rtw89_btc_bt_info *bt = &btc->cx.bt;
+
+ if (!(dm->coex_info_map & BTC_COEX_INFO_DM))
+ return;
+
+ seq_printf(m, "========== [Mechanism Status %s] ==========\n",
+ (btc->ctrl.manual ? "(Manual)" : "(Auto)"));
+
+ seq_printf(m,
+ " %-15s : type:%s, reason:%s(), action:%s(), ant_path:%ld, run_cnt:%d\n",
+ "[status]",
+ module->ant.type == BTC_ANT_SHARED ? "shared" : "dedicated",
+ steps_to_str(dm->run_reason),
+ steps_to_str(dm->run_action | BTC_ACT_EXT_BIT),
+ FIELD_GET(GENMASK(7, 0), dm->set_ant_path),
+ dm->cnt_dm[BTC_DCNT_RUN]);
+
+ _show_dm_step(rtwdev, m);
+
+ seq_printf(m, " %-15s : wl_only:%d, bt_only:%d, igno_bt:%d, free_run:%d, wl_ps_ctrl:%d, wl_mimo_ps:%d, ",
+ "[dm_flag]", dm->wl_only, dm->bt_only, btc->ctrl.igno_bt,
+ dm->freerun, btc->lps, dm->wl_mimo_ps);
+
+ seq_printf(m, "leak_ap:%d, fw_offload:%s%s\n", dm->leak_ap,
+ (BTC_CX_FW_OFFLOAD ? "Y" : "N"),
+ (dm->wl_fw_cx_offload == BTC_CX_FW_OFFLOAD ?
+ "" : "(Mis-Match!!)"));
+
+ if (dm->rf_trx_para.wl_tx_power == 0xff)
+ seq_printf(m,
+ " %-15s : wl_rssi_lvl:%d, para_lvl:%d, wl_tx_pwr:orig, ",
+ "[trx_ctrl]", wl->rssi_level, dm->trx_para_level);
+
+ else
+ seq_printf(m,
+ " %-15s : wl_rssi_lvl:%d, para_lvl:%d, wl_tx_pwr:%d, ",
+ "[trx_ctrl]", wl->rssi_level, dm->trx_para_level,
+ dm->rf_trx_para.wl_tx_power);
+
+ seq_printf(m,
+ "wl_rx_lvl:%d, bt_tx_pwr_dec:%d, bt_rx_lna:%d(%s-tbl), wl_btg_rx:%d\n",
+ dm->rf_trx_para.wl_rx_gain, dm->rf_trx_para.bt_tx_power,
+ dm->rf_trx_para.bt_rx_gain,
+ (bt->hi_lna_rx ? "Hi" : "Ori"), dm->wl_btg_rx);
+
+ seq_printf(m,
+ " %-15s : wl_tx_limit[en:%d/max_t:%dus/max_retry:%d], bt_slot_reg:%d-TU\n",
+ "[dm_ctrl]", dm->wl_tx_limit.enable, dm->wl_tx_limit.tx_time,
+ dm->wl_tx_limit.tx_retry, btc->bt_req_len);
+}
+
+static void _show_error(struct rtw89_dev *rtwdev, struct seq_file *m)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
+ struct rtw89_btc_fbtc_cysta *pcysta = NULL;
+
+ pcysta = &pfwinfo->rpt_fbtc_cysta.finfo;
+
+ if (pfwinfo->event[BTF_EVNT_BUF_OVERFLOW] == 0 &&
+ pcysta->except_cnt == 0 &&
+ !pfwinfo->len_mismch && !pfwinfo->fver_mismch)
+ return;
+
+ seq_printf(m, " %-15s : ", "[error]");
+
+ if (pfwinfo->event[BTF_EVNT_BUF_OVERFLOW]) {
+ seq_printf(m,
+ "overflow-cnt: %d, ",
+ pfwinfo->event[BTF_EVNT_BUF_OVERFLOW]);
+ }
+
+ if (pfwinfo->len_mismch) {
+ seq_printf(m,
+ "len-mismatch: 0x%x, ",
+ pfwinfo->len_mismch);
+ }
+
+ if (pfwinfo->fver_mismch) {
+ seq_printf(m,
+ "fver-mismatch: 0x%x, ",
+ pfwinfo->fver_mismch);
+ }
+
+ /* cycle statistics exceptions */
+ if (pcysta->exception || pcysta->except_cnt) {
+ seq_printf(m,
+ "exception-type: 0x%x, exception-cnt = %d",
+ pcysta->exception, pcysta->except_cnt);
+ }
+ seq_puts(m, "\n");
+}
+
+static void _show_fbtc_tdma(struct rtw89_dev *rtwdev, struct seq_file *m)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
+ struct rtw89_btc_rpt_cmn_info *pcinfo = NULL;
+ struct rtw89_btc_fbtc_tdma *t = NULL;
+ struct rtw89_btc_fbtc_slot *s = NULL;
+ struct rtw89_btc_dm *dm = &btc->dm;
+ u8 i, cnt = 0;
+
+ pcinfo = &pfwinfo->rpt_fbtc_tdma.cinfo;
+ if (!pcinfo->valid)
+ return;
+
+ t = &pfwinfo->rpt_fbtc_tdma.finfo;
+
+ seq_printf(m,
+ " %-15s : ", "[tdma_policy]");
+ seq_printf(m,
+ "type:%d, rx_flow_ctrl:%d, tx_pause:%d, ",
+ (u32)t->type,
+ t->rxflctrl, t->txpause);
+
+ seq_printf(m,
+ "wl_toggle_n:%d, leak_n:%d, ext_ctrl:%d, ",
+ t->wtgle_n, t->leak_n, t->ext_ctrl);
+
+ seq_printf(m,
+ "policy_type:%d",
+ (u32)btc->policy_type);
+
+ s = pfwinfo->rpt_fbtc_slots.finfo.slot;
+
+ for (i = 0; i < CXST_MAX; i++) {
+ if (dm->update_slot_map == BIT(CXST_MAX) - 1)
+ break;
+
+ if (!(dm->update_slot_map & BIT(i)))
+ continue;
+
+ if (cnt % 6 == 0)
+ seq_printf(m,
+ " %-15s : %d[%d/0x%x/%d]",
+ "[slot_policy]",
+ (u32)i,
+ s[i].dur, s[i].cxtbl, s[i].cxtype);
+ else
+ seq_printf(m,
+ ", %d[%d/0x%x/%d]",
+ (u32)i,
+ s[i].dur, s[i].cxtbl, s[i].cxtype);
+ if (cnt % 6 == 5)
+ seq_puts(m, "\n");
+ cnt++;
+ }
+ seq_puts(m, "\n");
+}
+
+static void _show_fbtc_slots(struct rtw89_dev *rtwdev, struct seq_file *m)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
+ struct rtw89_btc_rpt_cmn_info *pcinfo = NULL;
+ struct rtw89_btc_fbtc_slots *pslots = NULL;
+ struct rtw89_btc_fbtc_slot s;
+ u8 i = 0;
+
+ pcinfo = &pfwinfo->rpt_fbtc_slots.cinfo;
+ if (!pcinfo->valid)
+ return;
+
+ pslots = &pfwinfo->rpt_fbtc_slots.finfo;
+
+ for (i = 0; i < CXST_MAX; i++) {
+ s = pslots->slot[i];
+ if (i % 6 == 0)
+ seq_printf(m,
+ " %-15s : %02d[%03d/0x%x/%d]",
+ "[slot_list]",
+ (u32)i,
+ s.dur, s.cxtbl, s.cxtype);
+ else
+ seq_printf(m,
+ ", %02d[%03d/0x%x/%d]",
+ (u32)i,
+ s.dur, s.cxtbl, s.cxtype);
+ if (i % 6 == 5)
+ seq_puts(m, "\n");
+ }
+}
+
+static void _show_fbtc_cysta(struct rtw89_dev *rtwdev, struct seq_file *m)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
+ struct rtw89_btc_dm *dm = &btc->dm;
+ struct rtw89_btc_bt_a2dp_desc *a2dp = &btc->cx.bt.link_info.a2dp_desc;
+ struct rtw89_btc_rpt_cmn_info *pcinfo = NULL;
+ struct rtw89_btc_fbtc_cysta *pcysta_le32 = NULL;
+ struct rtw89_btc_fbtc_cysta_cpu pcysta[1];
+ union rtw89_btc_fbtc_rxflct r;
+ u8 i, cnt = 0, slot_pair;
+ u16 cycle, c_begin, c_end, store_index;
+
+ pcinfo = &pfwinfo->rpt_fbtc_cysta.cinfo;
+ if (!pcinfo->valid)
+ return;
+
+ pcysta_le32 = &pfwinfo->rpt_fbtc_cysta.finfo;
+ rtw89_btc_fbtc_cysta_to_cpu(pcysta_le32, pcysta);
+ seq_printf(m,
+ " %-15s : cycle:%d, bcn[all:%d/all_ok:%d/bt:%d/bt_ok:%d]",
+ "[cycle_cnt]", pcysta->cycles, pcysta->bcn_cnt[CXBCN_ALL],
+ pcysta->bcn_cnt[CXBCN_ALL_OK],
+ pcysta->bcn_cnt[CXBCN_BT_SLOT],
+ pcysta->bcn_cnt[CXBCN_BT_OK]);
+
+ _chk_btc_err(rtwdev, BTC_DCNT_CYCLE_FREEZE, (u32)pcysta->cycles);
+
+ for (i = 0; i < CXST_MAX; i++) {
+ if (!pcysta->slot_cnt[i])
+ continue;
+ seq_printf(m,
+ ", %d:%d", (u32)i, pcysta->slot_cnt[i]);
+ }
+
+ if (dm->tdma_now.rxflctrl) {
+ seq_printf(m,
+ ", leak_rx:%d", pcysta->leakrx_cnt);
+ }
+
+ if (pcysta->collision_cnt) {
+ seq_printf(m,
+ ", collision:%d", pcysta->collision_cnt);
+ }
+
+ if (pcysta->skip_cnt) {
+ seq_printf(m,
+ ", skip:%d", pcysta->skip_cnt);
+ }
+ seq_puts(m, "\n");
+
+ _chk_btc_err(rtwdev, BTC_DCNT_W1_FREEZE, pcysta->slot_cnt[CXST_W1]);
+ _chk_btc_err(rtwdev, BTC_DCNT_B1_FREEZE, pcysta->slot_cnt[CXST_B1]);
+
+ seq_printf(m, " %-15s : avg_t[wl:%d/bt:%d/lk:%d.%03d]",
+ "[cycle_time]",
+ pcysta->tavg_cycle[CXT_WL],
+ pcysta->tavg_cycle[CXT_BT],
+ pcysta->tavg_lk / 1000, pcysta->tavg_lk % 1000);
+ seq_printf(m,
+ ", max_t[wl:%d/bt:%d/lk:%d.%03d]",
+ pcysta->tmax_cycle[CXT_WL],
+ pcysta->tmax_cycle[CXT_BT],
+ pcysta->tmax_lk / 1000, pcysta->tmax_lk % 1000);
+ seq_printf(m,
+ ", maxdiff_t[wl:%d/bt:%d]\n",
+ pcysta->tmaxdiff_cycle[CXT_WL],
+ pcysta->tmaxdiff_cycle[CXT_BT]);
+
+ if (pcysta->cycles == 0)
+ return;
+
+ /* 1 cycle record 1 wl-slot and 1 bt-slot */
+ slot_pair = BTC_CYCLE_SLOT_MAX / 2;
+
+ if (pcysta->cycles <= slot_pair)
+ c_begin = 1;
+ else
+ c_begin = pcysta->cycles - slot_pair + 1;
+
+ c_end = pcysta->cycles;
+
+ for (cycle = c_begin; cycle <= c_end; cycle++) {
+ cnt++;
+ store_index = ((cycle - 1) % slot_pair) * 2;
+
+ if (cnt % (BTC_CYCLE_SLOT_MAX / 4) == 1)
+ seq_printf(m,
+ " %-15s : ->b%02d->w%02d", "[cycle_step]",
+ pcysta->tslot_cycle[store_index],
+ pcysta->tslot_cycle[store_index + 1]);
+ else
+ seq_printf(m,
+ "->b%02d->w%02d",
+ pcysta->tslot_cycle[store_index],
+ pcysta->tslot_cycle[store_index + 1]);
+ if (cnt % (BTC_CYCLE_SLOT_MAX / 4) == 0 || cnt == c_end)
+ seq_puts(m, "\n");
+ }
+
+ if (a2dp->exist) {
+ seq_printf(m,
+ " %-15s : a2dp_ept:%d, a2dp_late:%d",
+ "[a2dp_t_sta]",
+ pcysta->a2dpept, pcysta->a2dpeptto);
+
+ seq_printf(m,
+ ", avg_t:%d, max_t:%d",
+ pcysta->tavg_a2dpept, pcysta->tmax_a2dpept);
+ r.val = dm->tdma_now.rxflctrl;
+
+ if (r.type && r.tgln_n) {
+ seq_printf(m,
+ ", cycle[PSTDMA:%d/TDMA:%d], ",
+ pcysta->cycles_a2dp[CXT_FLCTRL_ON],
+ pcysta->cycles_a2dp[CXT_FLCTRL_OFF]);
+
+ seq_printf(m,
+ "avg_t[PSTDMA:%d/TDMA:%d], ",
+ pcysta->tavg_a2dp[CXT_FLCTRL_ON],
+ pcysta->tavg_a2dp[CXT_FLCTRL_OFF]);
+
+ seq_printf(m,
+ "max_t[PSTDMA:%d/TDMA:%d]",
+ pcysta->tmax_a2dp[CXT_FLCTRL_ON],
+ pcysta->tmax_a2dp[CXT_FLCTRL_OFF]);
+ }
+ seq_puts(m, "\n");
+ }
+}
+
+static void _show_fbtc_nullsta(struct rtw89_dev *rtwdev, struct seq_file *m)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
+ struct rtw89_btc_rpt_cmn_info *pcinfo = NULL;
+ struct rtw89_btc_fbtc_cynullsta *ns = NULL;
+ u8 i = 0;
+
+ if (!btc->dm.tdma_now.rxflctrl)
+ return;
+
+ pcinfo = &pfwinfo->rpt_fbtc_nullsta.cinfo;
+ if (!pcinfo->valid)
+ return;
+
+ ns = &pfwinfo->rpt_fbtc_nullsta.finfo;
+
+ seq_printf(m, " %-15s : ", "[null_sta]");
+
+ for (i = 0; i < 2; i++) {
+ if (i != 0)
+ seq_printf(m, ", null-%d", i);
+ else
+ seq_printf(m, "null-%d", i);
+ seq_printf(m, "[ok:%d/", le32_to_cpu(ns->result[i][1]));
+ seq_printf(m, "fail:%d/", le32_to_cpu(ns->result[i][0]));
+ seq_printf(m, "on_time:%d/", le32_to_cpu(ns->result[i][2]));
+ seq_printf(m, "retry:%d/", le32_to_cpu(ns->result[i][3]));
+ seq_printf(m, "avg_t:%d.%03d/",
+ le32_to_cpu(ns->avg_t[i]) / 1000,
+ le32_to_cpu(ns->avg_t[i]) % 1000);
+ seq_printf(m, "max_t:%d.%03d]",
+ le32_to_cpu(ns->max_t[i]) / 1000,
+ le32_to_cpu(ns->max_t[i]) % 1000);
+ }
+ seq_puts(m, "\n");
+}
+
+static void _show_fbtc_step(struct rtw89_dev *rtwdev, struct seq_file *m)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
+ struct rtw89_btc_rpt_cmn_info *pcinfo = NULL;
+ struct rtw89_btc_fbtc_steps *pstep = NULL;
+ u8 type, val, cnt = 0, state = 0;
+ bool outloop = false;
+ u16 i, diff_t, n_start = 0, n_stop = 0;
+ u16 pos_old, pos_new;
+
+ pcinfo = &pfwinfo->rpt_fbtc_step.cinfo;
+ if (!pcinfo->valid)
+ return;
+
+ pstep = &pfwinfo->rpt_fbtc_step.finfo;
+ pos_old = le16_to_cpu(pstep->pos_old);
+ pos_new = le16_to_cpu(pstep->pos_new);
+
+ if (pcinfo->req_fver != pstep->fver)
+ return;
+
+ /* store step info by using ring instead of FIFO*/
+ do {
+ switch (state) {
+ case 0:
+ n_start = pos_old;
+ if (pos_new >= pos_old)
+ n_stop = pos_new;
+ else
+ n_stop = btc->ctrl.trace_step - 1;
+
+ state = 1;
+ break;
+ case 1:
+ for (i = n_start; i <= n_stop; i++) {
+ type = pstep->step[i].type;
+ val = pstep->step[i].val;
+ diff_t = le16_to_cpu(pstep->step[i].difft);
+
+ if (type == CXSTEP_NONE || type >= CXSTEP_MAX)
+ continue;
+
+ if (cnt % 10 == 0)
+ seq_printf(m, " %-15s : ", "[steps]");
+
+ seq_printf(m, "-> %s(%02d)(%02d)",
+ (type == CXSTEP_SLOT ? "SLT" :
+ "EVT"), (u32)val, diff_t);
+ if (cnt % 10 == 9)
+ seq_puts(m, "\n");
+ cnt++;
+ }
+
+ state = 2;
+ break;
+ case 2:
+ if (pos_new < pos_old && n_start != 0) {
+ n_start = 0;
+ n_stop = pos_new;
+ state = 1;
+ } else {
+ outloop = true;
+ }
+ break;
+ }
+ } while (!outloop);
+}
+
+static void _show_fw_dm_msg(struct rtw89_dev *rtwdev, struct seq_file *m)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+
+ if (!(btc->dm.coex_info_map & BTC_COEX_INFO_DM))
+ return;
+
+ _show_error(rtwdev, m);
+ _show_fbtc_tdma(rtwdev, m);
+ _show_fbtc_slots(rtwdev, m);
+ _show_fbtc_cysta(rtwdev, m);
+ _show_fbtc_nullsta(rtwdev, m);
+ _show_fbtc_step(rtwdev, m);
+}
+
+static void _show_mreg(struct rtw89_dev *rtwdev, struct seq_file *m)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
+ struct rtw89_btc_rpt_cmn_info *pcinfo = NULL;
+ struct rtw89_btc_fbtc_mreg_val *pmreg = NULL;
+ struct rtw89_btc_fbtc_gpio_dbg *gdbg = NULL;
+ struct rtw89_btc_cx *cx = &btc->cx;
+ struct rtw89_btc_wl_info *wl = &btc->cx.wl;
+ struct rtw89_btc_bt_info *bt = &btc->cx.bt;
+ struct rtw89_mac_ax_gnt gnt[2] = {0};
+ u8 i = 0, type = 0, cnt = 0;
+ u32 val, offset;
+
+ if (!(btc->dm.coex_info_map & BTC_COEX_INFO_MREG))
+ return;
+
+ seq_puts(m, "========== [HW Status] ==========\n");
+
+ seq_printf(m,
+ " %-15s : WL->BT:0x%08x(cnt:%d), BT->WL:0x%08x(total:%d, bt_update:%d)\n",
+ "[scoreboard]", wl->scbd, cx->cnt_wl[BTC_WCNT_SCBDUPDATE],
+ bt->scbd, cx->cnt_bt[BTC_BCNT_SCBDREAD],
+ cx->cnt_bt[BTC_BCNT_SCBDUPDATE]);
+
+ /* To avoid I/O if WL LPS or power-off */
+ if (!wl->status.map.lps && !wl->status.map.rf_off) {
+ rtw89_mac_read_lte(rtwdev, R_AX_LTE_SW_CFG_1, &val);
+ if (val & (B_AX_GNT_BT_RFC_S0_SW_VAL |
+ B_AX_GNT_BT_BB_S0_SW_VAL))
+ gnt[0].gnt_bt = true;
+ if (val & (B_AX_GNT_BT_RFC_S0_SW_CTRL |
+ B_AX_GNT_BT_BB_S0_SW_CTRL))
+ gnt[0].gnt_bt_sw_en = true;
+ if (val & (B_AX_GNT_WL_RFC_S0_SW_VAL |
+ B_AX_GNT_WL_BB_S0_SW_VAL))
+ gnt[0].gnt_wl = true;
+ if (val & (B_AX_GNT_WL_RFC_S0_SW_CTRL |
+ B_AX_GNT_WL_BB_S0_SW_CTRL))
+ gnt[0].gnt_wl_sw_en = true;
+
+ if (val & (B_AX_GNT_BT_RFC_S1_SW_VAL |
+ B_AX_GNT_BT_BB_S1_SW_VAL))
+ gnt[1].gnt_bt = true;
+ if (val & (B_AX_GNT_BT_RFC_S1_SW_CTRL |
+ B_AX_GNT_BT_BB_S1_SW_CTRL))
+ gnt[1].gnt_bt_sw_en = true;
+ if (val & (B_AX_GNT_WL_RFC_S1_SW_VAL |
+ B_AX_GNT_WL_BB_S1_SW_VAL))
+ gnt[1].gnt_wl = true;
+ if (val & (B_AX_GNT_WL_RFC_S1_SW_CTRL |
+ B_AX_GNT_WL_BB_S1_SW_CTRL))
+ gnt[1].gnt_wl_sw_en = true;
+
+ seq_printf(m,
+ " %-15s : pta_owner:%s, phy-0[gnt_wl:%s-%d/gnt_bt:%s-%d], ",
+ "[gnt_status]",
+ (rtw89_mac_get_ctrl_path(rtwdev) ? "WL" : "BT"),
+ (gnt[0].gnt_wl_sw_en ? "SW" : "HW"), gnt[0].gnt_wl,
+ (gnt[0].gnt_bt_sw_en ? "SW" : "HW"), gnt[0].gnt_bt);
+
+ seq_printf(m, "phy-1[gnt_wl:%s-%d/gnt_bt:%s-%d]\n",
+ (gnt[1].gnt_wl_sw_en ? "SW" : "HW"), gnt[1].gnt_wl,
+ (gnt[1].gnt_bt_sw_en ? "SW" : "HW"), gnt[1].gnt_bt);
+ }
+
+ pcinfo = &pfwinfo->rpt_fbtc_mregval.cinfo;
+ if (!pcinfo->valid) {
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): stop due rpt_fbtc_mregval.cinfo\n",
+ __func__);
+ return;
+ }
+
+ pmreg = &pfwinfo->rpt_fbtc_mregval.finfo;
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): rpt_fbtc_mregval reg_num = %d\n",
+ __func__, pmreg->reg_num);
+
+ for (i = 0; i < pmreg->reg_num; i++) {
+ type = (u8)le16_to_cpu(chip->mon_reg[i].type);
+ offset = le32_to_cpu(chip->mon_reg[i].offset);
+ val = le32_to_cpu(pmreg->mreg_val[i]);
+
+ if (cnt % 6 == 0)
+ seq_printf(m, " %-15s : %d_0x%04x=0x%08x",
+ "[reg]", (u32)type, offset, val);
+ else
+ seq_printf(m, ", %d_0x%04x=0x%08x", (u32)type,
+ offset, val);
+ if (cnt % 6 == 5)
+ seq_puts(m, "\n");
+ cnt++;
+ }
+
+ pcinfo = &pfwinfo->rpt_fbtc_gpio_dbg.cinfo;
+ if (!pcinfo->valid) {
+ rtw89_debug(rtwdev, RTW89_DBG_BTC,
+ "[BTC], %s(): stop due rpt_fbtc_gpio_dbg.cinfo\n",
+ __func__);
+ return;
+ }
+
+ gdbg = &pfwinfo->rpt_fbtc_gpio_dbg.finfo;
+ if (!gdbg->en_map)
+ return;
+
+ seq_printf(m, " %-15s : enable_map:0x%08x",
+ "[gpio_dbg]", gdbg->en_map);
+
+ for (i = 0; i < BTC_DBG_MAX1; i++) {
+ if (!(gdbg->en_map & BIT(i)))
+ continue;
+ seq_printf(m, ", %d->GPIO%d", (u32)i, gdbg->gpio_map[i]);
+ }
+ seq_puts(m, "\n");
+}
+
+static void _show_summary(struct rtw89_dev *rtwdev, struct seq_file *m)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_btf_fwinfo *pfwinfo = &btc->fwinfo;
+ struct rtw89_btc_rpt_cmn_info *pcinfo = NULL;
+ struct rtw89_btc_fbtc_rpt_ctrl *prptctrl = NULL;
+ struct rtw89_btc_cx *cx = &btc->cx;
+ struct rtw89_btc_dm *dm = &btc->dm;
+ struct rtw89_btc_wl_info *wl = &cx->wl;
+ struct rtw89_btc_bt_info *bt = &cx->bt;
+ u32 cnt_sum = 0, *cnt = btc->dm.cnt_notify;
+ u8 i;
+
+ if (!(dm->coex_info_map & BTC_COEX_INFO_SUMMARY))
+ return;
+
+ seq_puts(m, "========== [Statistics] ==========\n");
+
+ pcinfo = &pfwinfo->rpt_ctrl.cinfo;
+ if (pcinfo->valid && !wl->status.map.lps && !wl->status.map.rf_off) {
+ prptctrl = &pfwinfo->rpt_ctrl.finfo;
+
+ seq_printf(m,
+ " %-15s : h2c_cnt=%d(fail:%d, fw_recv:%d), c2h_cnt=%d(fw_send:%d), ",
+ "[summary]", pfwinfo->cnt_h2c,
+ pfwinfo->cnt_h2c_fail, prptctrl->h2c_cnt,
+ pfwinfo->cnt_c2h, prptctrl->c2h_cnt);
+
+ seq_printf(m,
+ "rpt_cnt=%d(fw_send:%d), rpt_map=0x%x, dm_error_map:0x%x",
+ pfwinfo->event[BTF_EVNT_RPT], prptctrl->rpt_cnt,
+ prptctrl->rpt_enable, dm->error.val);
+
+ _chk_btc_err(rtwdev, BTC_DCNT_RPT_FREEZE,
+ pfwinfo->event[BTF_EVNT_RPT]);
+
+ if (dm->error.map.wl_fw_hang)
+ seq_puts(m, " (WL FW Hang!!)");
+ seq_puts(m, "\n");
+ seq_printf(m,
+ " %-15s : send_ok:%d, send_fail:%d, recv:%d",
+ "[mailbox]", prptctrl->mb_send_ok_cnt,
+ prptctrl->mb_send_fail_cnt, prptctrl->mb_recv_cnt);
+
+ seq_printf(m,
+ "(A2DP_empty:%d, A2DP_flowstop:%d, A2DP_full:%d)\n",
+ prptctrl->mb_a2dp_empty_cnt,
+ prptctrl->mb_a2dp_flct_cnt,
+ prptctrl->mb_a2dp_full_cnt);
+
+ seq_printf(m,
+ " %-15s : wl_rfk[req:%d/go:%d/reject:%d/timeout:%d]",
+ "[RFK]", cx->cnt_wl[BTC_WCNT_RFK_REQ],
+ cx->cnt_wl[BTC_WCNT_RFK_GO],
+ cx->cnt_wl[BTC_WCNT_RFK_REJECT],
+ cx->cnt_wl[BTC_WCNT_RFK_TIMEOUT]);
+
+ seq_printf(m,
+ ", bt_rfk[req:%d/go:%d/reject:%d/timeout:%d/fail:%d]\n",
+ prptctrl->bt_rfk_cnt[BTC_BCNT_RFK_REQ],
+ prptctrl->bt_rfk_cnt[BTC_BCNT_RFK_GO],
+ prptctrl->bt_rfk_cnt[BTC_BCNT_RFK_REJECT],
+ prptctrl->bt_rfk_cnt[BTC_BCNT_RFK_TIMEOUT],
+ prptctrl->bt_rfk_cnt[BTC_BCNT_RFK_FAIL]);
+
+ if (prptctrl->bt_rfk_cnt[BTC_BCNT_RFK_TIMEOUT] > 0)
+ bt->rfk_info.map.timeout = 1;
+ else
+ bt->rfk_info.map.timeout = 0;
+
+ dm->error.map.wl_rfk_timeout = bt->rfk_info.map.timeout;
+ } else {
+ seq_printf(m,
+ " %-15s : h2c_cnt=%d(fail:%d), c2h_cnt=%d, rpt_cnt=%d, rpt_map=0x%x",
+ "[summary]", pfwinfo->cnt_h2c,
+ pfwinfo->cnt_h2c_fail, pfwinfo->cnt_c2h,
+ pfwinfo->event[BTF_EVNT_RPT],
+ btc->fwinfo.rpt_en_map);
+ seq_puts(m, " (WL FW report invalid!!)\n");
+ }
+
+ for (i = 0; i < BTC_NCNT_NUM; i++)
+ cnt_sum += dm->cnt_notify[i];
+
+ seq_printf(m,
+ " %-15s : total=%d, show_coex_info=%d, power_on=%d, init_coex=%d, ",
+ "[notify_cnt]", cnt_sum, cnt[BTC_NCNT_SHOW_COEX_INFO],
+ cnt[BTC_NCNT_POWER_ON], cnt[BTC_NCNT_INIT_COEX]);
+
+ seq_printf(m,
+ "power_off=%d, radio_state=%d, role_info=%d, wl_rfk=%d, wl_sta=%d\n",
+ cnt[BTC_NCNT_POWER_OFF], cnt[BTC_NCNT_RADIO_STATE],
+ cnt[BTC_NCNT_ROLE_INFO], cnt[BTC_NCNT_WL_RFK],
+ cnt[BTC_NCNT_WL_STA]);
+
+ seq_printf(m,
+ " %-15s : scan_start=%d, scan_finish=%d, switch_band=%d, special_pkt=%d, ",
+ "[notify_cnt]", cnt[BTC_NCNT_SCAN_START],
+ cnt[BTC_NCNT_SCAN_FINISH], cnt[BTC_NCNT_SWITCH_BAND],
+ cnt[BTC_NCNT_SPECIAL_PACKET]);
+
+ seq_printf(m,
+ "timer=%d, control=%d, customerize=%d\n",
+ cnt[BTC_NCNT_TIMER], cnt[BTC_NCNT_CONTROL],
+ cnt[BTC_NCNT_CUSTOMERIZE]);
+}
+
+void rtw89_btc_dump_info(struct rtw89_dev *rtwdev, struct seq_file *m)
+{
+ struct rtw89_fw_suit *fw_suit = &rtwdev->fw.normal;
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_cx *cx = &btc->cx;
+ struct rtw89_btc_bt_info *bt = &cx->bt;
+
+ seq_puts(m, "=========================================\n");
+ seq_printf(m, "WL FW / BT FW %d.%d.%d.%d / NA\n",
+ fw_suit->major_ver, fw_suit->minor_ver,
+ fw_suit->sub_ver, fw_suit->sub_idex);
+ seq_printf(m, "manual %d\n", btc->ctrl.manual);
+
+ seq_puts(m, "=========================================\n");
+
+ seq_printf(m, "\n\r %-15s : raw_data[%02x %02x %02x %02x %02x %02x] (type:%s/cnt:%d/same:%d)",
+ "[bt_info]",
+ bt->raw_info[2], bt->raw_info[3],
+ bt->raw_info[4], bt->raw_info[5],
+ bt->raw_info[6], bt->raw_info[7],
+ bt->raw_info[0] == BTC_BTINFO_AUTO ? "auto" : "reply",
+ cx->cnt_bt[BTC_BCNT_INFOUPDATE],
+ cx->cnt_bt[BTC_BCNT_INFOSAME]);
+
+ seq_puts(m, "\n=========================================\n");
+
+ _show_cx_info(rtwdev, m);
+ _show_wl_info(rtwdev, m);
+ _show_bt_info(rtwdev, m);
+ _show_dm_info(rtwdev, m);
+ _show_fw_dm_msg(rtwdev, m);
+ _show_mreg(rtwdev, m);
+ _show_summary(rtwdev, m);
+}
diff --git a/drivers/net/wireless/realtek/rtw89/coex.h b/drivers/net/wireless/realtek/rtw89/coex.h
new file mode 100644
index 000000000000..4b4565d15c9e
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/coex.h
@@ -0,0 +1,181 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2019-2020 Realtek Corporation
+ */
+
+#ifndef __RTW89_COEX_H__
+#define __RTW89_COEX_H__
+
+#include "core.h"
+
+enum btc_mode {
+ BTC_MODE_NORMAL,
+ BTC_MODE_WL,
+ BTC_MODE_BT,
+ BTC_MODE_WLOFF,
+ BTC_MODE_MAX
+};
+
+enum btc_wl_rfk_type {
+ BTC_WRFKT_IQK = 0,
+ BTC_WRFKT_LCK = 1,
+ BTC_WRFKT_DPK = 2,
+ BTC_WRFKT_TXGAPK = 3,
+ BTC_WRFKT_DACK = 4,
+ BTC_WRFKT_RXDCK = 5,
+ BTC_WRFKT_TSSI = 6,
+};
+
+#define NM_EXEC false
+#define FC_EXEC true
+
+#define RTW89_COEX_ACT1_WORK_PERIOD round_jiffies_relative(HZ * 4)
+#define RTW89_COEX_BT_DEVINFO_WORK_PERIOD round_jiffies_relative(HZ * 16)
+#define RTW89_COEX_RFK_CHK_WORK_PERIOD msecs_to_jiffies(300)
+#define BTC_RFK_PATH_MAP GENMASK(3, 0)
+#define BTC_RFK_PHY_MAP GENMASK(5, 4)
+#define BTC_RFK_BAND_MAP GENMASK(7, 6)
+
+enum btc_wl_rfk_state {
+ BTC_WRFK_STOP = 0,
+ BTC_WRFK_START = 1,
+ BTC_WRFK_ONESHOT_START = 2,
+ BTC_WRFK_ONESHOT_STOP = 3,
+};
+
+enum btc_pri {
+ BTC_PRI_MASK_RX_RESP = 0,
+ BTC_PRI_MASK_TX_RESP,
+ BTC_PRI_MASK_BEACON,
+ BTC_PRI_MASK_RX_CCK,
+ BTC_PRI_MASK_TX_MNGQ,
+ BTC_PRI_MASK_MAX,
+};
+
+enum btc_bt_trs {
+ BTC_BT_SS_GROUP = 0x0,
+ BTC_BT_TX_GROUP = 0x2,
+ BTC_BT_RX_GROUP = 0x3,
+ BTC_BT_MAX_GROUP,
+};
+
+enum btc_rssi_st {
+ BTC_RSSI_ST_LOW = 0x0,
+ BTC_RSSI_ST_HIGH,
+ BTC_RSSI_ST_STAY_LOW,
+ BTC_RSSI_ST_STAY_HIGH,
+ BTC_RSSI_ST_MAX
+};
+
+#define BTC_RSSI_HIGH(_rssi_) \
+ ({typeof(_rssi_) __rssi = (_rssi_); \
+ ((__rssi == BTC_RSSI_ST_HIGH || \
+ __rssi == BTC_RSSI_ST_STAY_HIGH) ? 1 : 0); })
+
+#define BTC_RSSI_LOW(_rssi_) \
+ ({typeof(_rssi_) __rssi = (_rssi_); \
+ ((__rssi == BTC_RSSI_ST_LOW || \
+ __rssi == BTC_RSSI_ST_STAY_LOW) ? 1 : 0); })
+
+#define BTC_RSSI_CHANGE(_rssi_) \
+ ({typeof(_rssi_) __rssi = (_rssi_); \
+ ((__rssi == BTC_RSSI_ST_LOW || \
+ __rssi == BTC_RSSI_ST_HIGH) ? 1 : 0); })
+
+enum btc_ant {
+ BTC_ANT_SHARED = 0,
+ BTC_ANT_DEDICATED,
+ BTC_ANTTYPE_MAX
+};
+
+enum btc_bt_btg {
+ BTC_BT_ALONE = 0,
+ BTC_BT_BTG
+};
+
+enum btc_switch {
+ BTC_SWITCH_INTERNAL = 0,
+ BTC_SWITCH_EXTERNAL
+};
+
+enum btc_pkt_type {
+ PACKET_DHCP,
+ PACKET_ARP,
+ PACKET_EAPOL,
+ PACKET_EAPOL_END,
+ PACKET_ICMP,
+ PACKET_MAX
+};
+
+enum btc_bt_mailbox_id {
+ BTC_BTINFO_REPLY = 0x23,
+ BTC_BTINFO_AUTO = 0x27
+};
+
+enum btc_role_state {
+ BTC_ROLE_START,
+ BTC_ROLE_STOP,
+ BTC_ROLE_CHG_TYPE,
+ BTC_ROLE_MSTS_STA_CONN_START,
+ BTC_ROLE_MSTS_STA_CONN_END,
+ BTC_ROLE_MSTS_STA_DIS_CONN,
+ BTC_ROLE_MSTS_AP_START,
+ BTC_ROLE_MSTS_AP_STOP,
+ BTC_ROLE_STATE_UNKNOWN
+};
+
+enum btc_rfctrl {
+ BTC_RFCTRL_WL_OFF,
+ BTC_RFCTRL_WL_ON,
+ BTC_RFCTRL_FW_CTRL,
+ BTC_RFCTRL_MAX
+};
+
+void rtw89_btc_ntfy_poweron(struct rtw89_dev *rtwdev);
+void rtw89_btc_ntfy_poweroff(struct rtw89_dev *rtwdev);
+void rtw89_btc_ntfy_init(struct rtw89_dev *rtwdev, u8 mode);
+void rtw89_btc_ntfy_scan_start(struct rtw89_dev *rtwdev, u8 phy_idx, u8 band);
+void rtw89_btc_ntfy_scan_finish(struct rtw89_dev *rtwdev, u8 phy_idx);
+void rtw89_btc_ntfy_switch_band(struct rtw89_dev *rtwdev, u8 phy_idx, u8 band);
+void rtw89_btc_ntfy_specific_packet(struct rtw89_dev *rtwdev,
+ enum btc_pkt_type pkt_type);
+void rtw89_btc_ntfy_eapol_packet_work(struct work_struct *work);
+void rtw89_btc_ntfy_arp_packet_work(struct work_struct *work);
+void rtw89_btc_ntfy_dhcp_packet_work(struct work_struct *work);
+void rtw89_btc_ntfy_icmp_packet_work(struct work_struct *work);
+void rtw89_btc_ntfy_role_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ struct rtw89_sta *rtwsta, enum btc_role_state state);
+void rtw89_btc_ntfy_radio_state(struct rtw89_dev *rtwdev, enum btc_rfctrl rf_state);
+void rtw89_btc_ntfy_wl_rfk(struct rtw89_dev *rtwdev, u8 phy_map,
+ enum btc_wl_rfk_type type,
+ enum btc_wl_rfk_state state);
+void rtw89_btc_ntfy_wl_sta(struct rtw89_dev *rtwdev);
+void rtw89_btc_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
+ u32 len, u8 class, u8 func);
+void rtw89_btc_dump_info(struct rtw89_dev *rtwdev, struct seq_file *m);
+void rtw89_coex_act1_work(struct work_struct *work);
+void rtw89_coex_bt_devinfo_work(struct work_struct *work);
+void rtw89_coex_rfk_chk_work(struct work_struct *work);
+void rtw89_coex_power_on(struct rtw89_dev *rtwdev);
+
+static inline u8 rtw89_btc_phymap(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx,
+ enum rtw89_rf_path_bit paths)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+ u8 phy_map;
+
+ phy_map = FIELD_PREP(BTC_RFK_PATH_MAP, paths) |
+ FIELD_PREP(BTC_RFK_PHY_MAP, BIT(phy_idx)) |
+ FIELD_PREP(BTC_RFK_BAND_MAP, hal->current_band_type);
+
+ return phy_map;
+}
+
+static inline u8 rtw89_btc_path_phymap(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx,
+ enum rtw89_rf_path path)
+{
+ return rtw89_btc_phymap(rtwdev, phy_idx, BIT(path));
+}
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c
new file mode 100644
index 000000000000..d02ec5a735cb
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/core.c
@@ -0,0 +1,2502 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2019-2020 Realtek Corporation
+ */
+
+#include "coex.h"
+#include "core.h"
+#include "efuse.h"
+#include "fw.h"
+#include "mac.h"
+#include "phy.h"
+#include "ps.h"
+#include "reg.h"
+#include "sar.h"
+#include "ser.h"
+#include "txrx.h"
+#include "util.h"
+
+static bool rtw89_disable_ps_mode;
+module_param_named(disable_ps_mode, rtw89_disable_ps_mode, bool, 0644);
+MODULE_PARM_DESC(disable_ps_mode, "Set Y to disable low power mode");
+
+static struct ieee80211_channel rtw89_channels_2ghz[] = {
+ { .center_freq = 2412, .hw_value = 1, },
+ { .center_freq = 2417, .hw_value = 2, },
+ { .center_freq = 2422, .hw_value = 3, },
+ { .center_freq = 2427, .hw_value = 4, },
+ { .center_freq = 2432, .hw_value = 5, },
+ { .center_freq = 2437, .hw_value = 6, },
+ { .center_freq = 2442, .hw_value = 7, },
+ { .center_freq = 2447, .hw_value = 8, },
+ { .center_freq = 2452, .hw_value = 9, },
+ { .center_freq = 2457, .hw_value = 10, },
+ { .center_freq = 2462, .hw_value = 11, },
+ { .center_freq = 2467, .hw_value = 12, },
+ { .center_freq = 2472, .hw_value = 13, },
+ { .center_freq = 2484, .hw_value = 14, },
+};
+
+static struct ieee80211_channel rtw89_channels_5ghz[] = {
+ {.center_freq = 5180, .hw_value = 36,},
+ {.center_freq = 5200, .hw_value = 40,},
+ {.center_freq = 5220, .hw_value = 44,},
+ {.center_freq = 5240, .hw_value = 48,},
+ {.center_freq = 5260, .hw_value = 52,},
+ {.center_freq = 5280, .hw_value = 56,},
+ {.center_freq = 5300, .hw_value = 60,},
+ {.center_freq = 5320, .hw_value = 64,},
+ {.center_freq = 5500, .hw_value = 100,},
+ {.center_freq = 5520, .hw_value = 104,},
+ {.center_freq = 5540, .hw_value = 108,},
+ {.center_freq = 5560, .hw_value = 112,},
+ {.center_freq = 5580, .hw_value = 116,},
+ {.center_freq = 5600, .hw_value = 120,},
+ {.center_freq = 5620, .hw_value = 124,},
+ {.center_freq = 5640, .hw_value = 128,},
+ {.center_freq = 5660, .hw_value = 132,},
+ {.center_freq = 5680, .hw_value = 136,},
+ {.center_freq = 5700, .hw_value = 140,},
+ {.center_freq = 5720, .hw_value = 144,},
+ {.center_freq = 5745, .hw_value = 149,},
+ {.center_freq = 5765, .hw_value = 153,},
+ {.center_freq = 5785, .hw_value = 157,},
+ {.center_freq = 5805, .hw_value = 161,},
+ {.center_freq = 5825, .hw_value = 165,
+ .flags = IEEE80211_CHAN_NO_HT40MINUS},
+};
+
+static struct ieee80211_rate rtw89_bitrates[] = {
+ { .bitrate = 10, .hw_value = 0x00, },
+ { .bitrate = 20, .hw_value = 0x01, },
+ { .bitrate = 55, .hw_value = 0x02, },
+ { .bitrate = 110, .hw_value = 0x03, },
+ { .bitrate = 60, .hw_value = 0x04, },
+ { .bitrate = 90, .hw_value = 0x05, },
+ { .bitrate = 120, .hw_value = 0x06, },
+ { .bitrate = 180, .hw_value = 0x07, },
+ { .bitrate = 240, .hw_value = 0x08, },
+ { .bitrate = 360, .hw_value = 0x09, },
+ { .bitrate = 480, .hw_value = 0x0a, },
+ { .bitrate = 540, .hw_value = 0x0b, },
+};
+
+u16 rtw89_ra_report_to_bitrate(struct rtw89_dev *rtwdev, u8 rpt_rate)
+{
+ struct ieee80211_rate rate;
+
+ if (unlikely(rpt_rate >= ARRAY_SIZE(rtw89_bitrates))) {
+ rtw89_info(rtwdev, "invalid rpt rate %d\n", rpt_rate);
+ return 0;
+ }
+
+ rate = rtw89_bitrates[rpt_rate];
+
+ return rate.bitrate;
+}
+
+static struct ieee80211_supported_band rtw89_sband_2ghz = {
+ .band = NL80211_BAND_2GHZ,
+ .channels = rtw89_channels_2ghz,
+ .n_channels = ARRAY_SIZE(rtw89_channels_2ghz),
+ .bitrates = rtw89_bitrates,
+ .n_bitrates = ARRAY_SIZE(rtw89_bitrates),
+ .ht_cap = {0},
+ .vht_cap = {0},
+};
+
+static struct ieee80211_supported_band rtw89_sband_5ghz = {
+ .band = NL80211_BAND_5GHZ,
+ .channels = rtw89_channels_5ghz,
+ .n_channels = ARRAY_SIZE(rtw89_channels_5ghz),
+
+ /* 5G has no CCK rates, 1M/2M/5.5M/11M */
+ .bitrates = rtw89_bitrates + 4,
+ .n_bitrates = ARRAY_SIZE(rtw89_bitrates) - 4,
+ .ht_cap = {0},
+ .vht_cap = {0},
+};
+
+static void rtw89_traffic_stats_accu(struct rtw89_dev *rtwdev,
+ struct rtw89_traffic_stats *stats,
+ struct sk_buff *skb, bool tx)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+
+ if (!ieee80211_is_data(hdr->frame_control))
+ return;
+
+ if (is_broadcast_ether_addr(hdr->addr1) ||
+ is_multicast_ether_addr(hdr->addr1))
+ return;
+
+ if (tx) {
+ stats->tx_cnt++;
+ stats->tx_unicast += skb->len;
+ } else {
+ stats->rx_cnt++;
+ stats->rx_unicast += skb->len;
+ }
+}
+
+static void rtw89_get_channel_params(struct cfg80211_chan_def *chandef,
+ struct rtw89_channel_params *chan_param)
+{
+ struct ieee80211_channel *channel = chandef->chan;
+ enum nl80211_chan_width width = chandef->width;
+ u8 *cch_by_bw = chan_param->cch_by_bw;
+ u32 primary_freq, center_freq;
+ u8 center_chan;
+ u8 bandwidth = RTW89_CHANNEL_WIDTH_20;
+ u8 primary_chan_idx = 0;
+ u8 i;
+
+ center_chan = channel->hw_value;
+ primary_freq = channel->center_freq;
+ center_freq = chandef->center_freq1;
+
+ /* assign the center channel used while 20M bw is selected */
+ cch_by_bw[RTW89_CHANNEL_WIDTH_20] = channel->hw_value;
+
+ switch (width) {
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ case NL80211_CHAN_WIDTH_20:
+ bandwidth = RTW89_CHANNEL_WIDTH_20;
+ primary_chan_idx = RTW89_SC_DONT_CARE;
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ bandwidth = RTW89_CHANNEL_WIDTH_40;
+ if (primary_freq > center_freq) {
+ primary_chan_idx = RTW89_SC_20_UPPER;
+ center_chan -= 2;
+ } else {
+ primary_chan_idx = RTW89_SC_20_LOWER;
+ center_chan += 2;
+ }
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ bandwidth = RTW89_CHANNEL_WIDTH_80;
+ if (primary_freq > center_freq) {
+ if (primary_freq - center_freq == 10) {
+ primary_chan_idx = RTW89_SC_20_UPPER;
+ center_chan -= 2;
+ } else {
+ primary_chan_idx = RTW89_SC_20_UPMOST;
+ center_chan -= 6;
+ }
+ /* assign the center channel used
+ * while 40M bw is selected
+ */
+ cch_by_bw[RTW89_CHANNEL_WIDTH_40] = center_chan + 4;
+ } else {
+ if (center_freq - primary_freq == 10) {
+ primary_chan_idx = RTW89_SC_20_LOWER;
+ center_chan += 2;
+ } else {
+ primary_chan_idx = RTW89_SC_20_LOWEST;
+ center_chan += 6;
+ }
+ /* assign the center channel used
+ * while 40M bw is selected
+ */
+ cch_by_bw[RTW89_CHANNEL_WIDTH_40] = center_chan - 4;
+ }
+ break;
+ default:
+ center_chan = 0;
+ break;
+ }
+
+ chan_param->center_chan = center_chan;
+ chan_param->primary_chan = channel->hw_value;
+ chan_param->bandwidth = bandwidth;
+ chan_param->pri_ch_idx = primary_chan_idx;
+
+ /* assign the center channel used while current bw is selected */
+ cch_by_bw[bandwidth] = center_chan;
+
+ for (i = bandwidth + 1; i <= RTW89_MAX_CHANNEL_WIDTH; i++)
+ cch_by_bw[i] = 0;
+}
+
+void rtw89_set_channel(struct rtw89_dev *rtwdev)
+{
+ struct ieee80211_hw *hw = rtwdev->hw;
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_hal *hal = &rtwdev->hal;
+ struct rtw89_channel_params ch_param;
+ struct rtw89_channel_help_params bak;
+ u8 center_chan, bandwidth;
+ u8 band_type;
+ bool band_changed;
+ u8 i;
+
+ rtw89_get_channel_params(&hw->conf.chandef, &ch_param);
+ if (WARN(ch_param.center_chan == 0, "Invalid channel\n"))
+ return;
+
+ center_chan = ch_param.center_chan;
+ bandwidth = ch_param.bandwidth;
+ band_type = center_chan > 14 ? RTW89_BAND_5G : RTW89_BAND_2G;
+ band_changed = hal->current_band_type != band_type ||
+ hal->current_channel == 0;
+
+ hal->current_band_width = bandwidth;
+ hal->current_channel = center_chan;
+ hal->current_primary_channel = ch_param.primary_chan;
+ hal->current_band_type = band_type;
+
+ switch (center_chan) {
+ case 1 ... 14:
+ hal->current_subband = RTW89_CH_2G;
+ break;
+ case 36 ... 64:
+ hal->current_subband = RTW89_CH_5G_BAND_1;
+ break;
+ case 100 ... 144:
+ hal->current_subband = RTW89_CH_5G_BAND_3;
+ break;
+ case 149 ... 177:
+ hal->current_subband = RTW89_CH_5G_BAND_4;
+ break;
+ }
+
+ for (i = RTW89_CHANNEL_WIDTH_20; i <= RTW89_MAX_CHANNEL_WIDTH; i++)
+ hal->cch_by_bw[i] = ch_param.cch_by_bw[i];
+
+ rtw89_chip_set_channel_prepare(rtwdev, &bak);
+
+ chip->ops->set_channel(rtwdev, &ch_param);
+
+ rtw89_chip_set_txpwr(rtwdev);
+
+ rtw89_chip_set_channel_done(rtwdev, &bak);
+
+ if (band_changed) {
+ rtw89_btc_ntfy_switch_band(rtwdev, RTW89_PHY_0, hal->current_band_type);
+ rtw89_chip_rfk_band_changed(rtwdev);
+ }
+}
+
+static enum rtw89_core_tx_type
+rtw89_core_get_tx_type(struct rtw89_dev *rtwdev,
+ struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ __le16 fc = hdr->frame_control;
+
+ if (ieee80211_is_mgmt(fc) || ieee80211_is_nullfunc(fc))
+ return RTW89_CORE_TX_TYPE_MGMT;
+
+ return RTW89_CORE_TX_TYPE_DATA;
+}
+
+static void
+rtw89_core_tx_update_ampdu_info(struct rtw89_dev *rtwdev,
+ struct rtw89_core_tx_request *tx_req, u8 tid)
+{
+ struct ieee80211_sta *sta = tx_req->sta;
+ struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
+ struct rtw89_sta *rtwsta;
+ u8 ampdu_num;
+
+ if (!sta) {
+ rtw89_warn(rtwdev, "cannot set ampdu info without sta\n");
+ return;
+ }
+
+ rtwsta = (struct rtw89_sta *)sta->drv_priv;
+
+ ampdu_num = (u8)((rtwsta->ampdu_params[tid].agg_num ?
+ rtwsta->ampdu_params[tid].agg_num :
+ 4 << sta->ht_cap.ampdu_factor) - 1);
+
+ desc_info->agg_en = true;
+ desc_info->ampdu_density = sta->ht_cap.ampdu_density;
+ desc_info->ampdu_num = ampdu_num;
+}
+
+static void
+rtw89_core_tx_update_sec_key(struct rtw89_dev *rtwdev,
+ struct rtw89_core_tx_request *tx_req)
+{
+ struct ieee80211_vif *vif = tx_req->vif;
+ struct ieee80211_tx_info *info;
+ struct ieee80211_key_conf *key;
+ struct rtw89_vif *rtwvif;
+ struct rtw89_addr_cam_entry *addr_cam;
+ struct rtw89_sec_cam_entry *sec_cam;
+ struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
+ struct sk_buff *skb = tx_req->skb;
+ u8 sec_type = RTW89_SEC_KEY_TYPE_NONE;
+
+ if (!vif) {
+ rtw89_warn(rtwdev, "cannot set sec key without vif\n");
+ return;
+ }
+
+ rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ addr_cam = &rtwvif->addr_cam;
+
+ info = IEEE80211_SKB_CB(skb);
+ key = info->control.hw_key;
+ sec_cam = addr_cam->sec_entries[key->hw_key_idx];
+ if (!sec_cam) {
+ rtw89_warn(rtwdev, "sec cam entry is empty\n");
+ return;
+ }
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_WEP40:
+ sec_type = RTW89_SEC_KEY_TYPE_WEP40;
+ break;
+ case WLAN_CIPHER_SUITE_WEP104:
+ sec_type = RTW89_SEC_KEY_TYPE_WEP104;
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ sec_type = RTW89_SEC_KEY_TYPE_TKIP;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ sec_type = RTW89_SEC_KEY_TYPE_CCMP128;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP_256:
+ sec_type = RTW89_SEC_KEY_TYPE_CCMP256;
+ break;
+ case WLAN_CIPHER_SUITE_GCMP:
+ sec_type = RTW89_SEC_KEY_TYPE_GCMP128;
+ break;
+ case WLAN_CIPHER_SUITE_GCMP_256:
+ sec_type = RTW89_SEC_KEY_TYPE_GCMP256;
+ break;
+ default:
+ rtw89_warn(rtwdev, "key cipher not supported %d\n", key->cipher);
+ return;
+ }
+
+ desc_info->sec_en = true;
+ desc_info->sec_type = sec_type;
+ desc_info->sec_cam_idx = sec_cam->sec_cam_idx;
+}
+
+static u16 rtw89_core_get_mgmt_rate(struct rtw89_dev *rtwdev,
+ struct rtw89_core_tx_request *tx_req)
+{
+ struct sk_buff *skb = tx_req->skb;
+ struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_vif *vif = tx_info->control.vif;
+ struct rtw89_hal *hal = &rtwdev->hal;
+ u16 lowest_rate = hal->current_band_type == RTW89_BAND_2G ?
+ RTW89_HW_RATE_CCK1 : RTW89_HW_RATE_OFDM6;
+
+ if (!vif || !vif->bss_conf.basic_rates || !tx_req->sta)
+ return lowest_rate;
+
+ return __ffs(vif->bss_conf.basic_rates) + lowest_rate;
+}
+
+static void
+rtw89_core_tx_update_mgmt_info(struct rtw89_dev *rtwdev,
+ struct rtw89_core_tx_request *tx_req)
+{
+ struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
+ u8 qsel, ch_dma;
+
+ qsel = RTW89_TX_QSEL_B0_MGMT;
+ ch_dma = rtw89_core_get_ch_dma(rtwdev, qsel);
+
+ desc_info->qsel = RTW89_TX_QSEL_B0_MGMT;
+ desc_info->ch_dma = ch_dma;
+
+ /* fixed data rate for mgmt frames */
+ desc_info->en_wd_info = true;
+ desc_info->use_rate = true;
+ desc_info->dis_data_fb = true;
+ desc_info->data_rate = rtw89_core_get_mgmt_rate(rtwdev, tx_req);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TXRX,
+ "tx mgmt frame with rate 0x%x on channel %d (bw %d)\n",
+ desc_info->data_rate, rtwdev->hal.current_channel,
+ rtwdev->hal.current_band_width);
+}
+
+static void
+rtw89_core_tx_update_h2c_info(struct rtw89_dev *rtwdev,
+ struct rtw89_core_tx_request *tx_req)
+{
+ struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
+
+ desc_info->is_bmc = false;
+ desc_info->wd_page = false;
+ desc_info->ch_dma = RTW89_DMA_H2C;
+}
+
+static void rtw89_core_get_no_ul_ofdma_htc(struct rtw89_dev *rtwdev, __le32 *htc)
+{
+ static const u8 rtw89_bandwidth_to_om[] = {
+ [RTW89_CHANNEL_WIDTH_20] = HTC_OM_CHANNEL_WIDTH_20,
+ [RTW89_CHANNEL_WIDTH_40] = HTC_OM_CHANNEL_WIDTH_40,
+ [RTW89_CHANNEL_WIDTH_80] = HTC_OM_CHANNEL_WIDTH_80,
+ [RTW89_CHANNEL_WIDTH_160] = HTC_OM_CHANNEL_WIDTH_160_OR_80_80,
+ [RTW89_CHANNEL_WIDTH_80_80] = HTC_OM_CHANNEL_WIDTH_160_OR_80_80,
+ };
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_hal *hal = &rtwdev->hal;
+ u8 om_bandwidth;
+
+ if (!chip->dis_2g_40m_ul_ofdma ||
+ hal->current_band_type != RTW89_BAND_2G ||
+ hal->current_band_width != RTW89_CHANNEL_WIDTH_40)
+ return;
+
+ om_bandwidth = hal->current_band_width < ARRAY_SIZE(rtw89_bandwidth_to_om) ?
+ rtw89_bandwidth_to_om[hal->current_band_width] : 0;
+ *htc = le32_encode_bits(RTW89_HTC_VARIANT_HE, RTW89_HTC_MASK_VARIANT) |
+ le32_encode_bits(RTW89_HTC_VARIANT_HE_CID_OM, RTW89_HTC_MASK_CTL_ID) |
+ le32_encode_bits(hal->rx_nss - 1, RTW89_HTC_MASK_HTC_OM_RX_NSS) |
+ le32_encode_bits(om_bandwidth, RTW89_HTC_MASK_HTC_OM_CH_WIDTH) |
+ le32_encode_bits(1, RTW89_HTC_MASK_HTC_OM_UL_MU_DIS) |
+ le32_encode_bits(hal->tx_nss - 1, RTW89_HTC_MASK_HTC_OM_TX_NSTS) |
+ le32_encode_bits(0, RTW89_HTC_MASK_HTC_OM_ER_SU_DIS) |
+ le32_encode_bits(0, RTW89_HTC_MASK_HTC_OM_DL_MU_MIMO_RR) |
+ le32_encode_bits(0, RTW89_HTC_MASK_HTC_OM_UL_MU_DATA_DIS);
+}
+
+static bool
+__rtw89_core_tx_check_he_qos_htc(struct rtw89_dev *rtwdev,
+ struct rtw89_core_tx_request *tx_req,
+ enum btc_pkt_type pkt_type)
+{
+ struct ieee80211_sta *sta = tx_req->sta;
+ struct sk_buff *skb = tx_req->skb;
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ __le16 fc = hdr->frame_control;
+
+ /* AP IOT issue with EAPoL, ARP and DHCP */
+ if (pkt_type < PACKET_MAX)
+ return false;
+
+ if (!sta || !sta->he_cap.has_he)
+ return false;
+
+ if (!ieee80211_is_data_qos(fc))
+ return false;
+
+ if (skb_headroom(skb) < IEEE80211_HT_CTL_LEN)
+ return false;
+
+ return true;
+}
+
+static void
+__rtw89_core_tx_adjust_he_qos_htc(struct rtw89_dev *rtwdev,
+ struct rtw89_core_tx_request *tx_req)
+{
+ struct ieee80211_sta *sta = tx_req->sta;
+ struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
+ struct sk_buff *skb = tx_req->skb;
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ __le16 fc = hdr->frame_control;
+ void *data;
+ __le32 *htc;
+ u8 *qc;
+ int hdr_len;
+
+ hdr_len = ieee80211_has_a4(fc) ? 32 : 26;
+ data = skb_push(skb, IEEE80211_HT_CTL_LEN);
+ memmove(data, data + IEEE80211_HT_CTL_LEN, hdr_len);
+
+ hdr = data;
+ htc = data + hdr_len;
+ hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_ORDER);
+ *htc = rtwsta->htc_template ? rtwsta->htc_template :
+ le32_encode_bits(RTW89_HTC_VARIANT_HE, RTW89_HTC_MASK_VARIANT) |
+ le32_encode_bits(RTW89_HTC_VARIANT_HE_CID_CAS, RTW89_HTC_MASK_CTL_ID);
+
+ qc = data + hdr_len - IEEE80211_QOS_CTL_LEN;
+ qc[0] |= IEEE80211_QOS_CTL_EOSP;
+}
+
+static void
+rtw89_core_tx_update_he_qos_htc(struct rtw89_dev *rtwdev,
+ struct rtw89_core_tx_request *tx_req,
+ enum btc_pkt_type pkt_type)
+{
+ struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
+ struct ieee80211_vif *vif = tx_req->vif;
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+
+ if (!__rtw89_core_tx_check_he_qos_htc(rtwdev, tx_req, pkt_type))
+ goto desc_bk;
+
+ __rtw89_core_tx_adjust_he_qos_htc(rtwdev, tx_req);
+
+ desc_info->pkt_size += IEEE80211_HT_CTL_LEN;
+ desc_info->a_ctrl_bsr = true;
+
+desc_bk:
+ if (!rtwvif || rtwvif->last_a_ctrl == desc_info->a_ctrl_bsr)
+ return;
+
+ rtwvif->last_a_ctrl = desc_info->a_ctrl_bsr;
+ desc_info->bk = true;
+}
+
+static void
+rtw89_core_tx_update_data_info(struct rtw89_dev *rtwdev,
+ struct rtw89_core_tx_request *tx_req)
+{
+ struct ieee80211_vif *vif = tx_req->vif;
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ struct rtw89_phy_rate_pattern *rate_pattern = &rtwvif->rate_pattern;
+ struct rtw89_hal *hal = &rtwdev->hal;
+ struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
+ struct sk_buff *skb = tx_req->skb;
+ u8 tid, tid_indicate;
+ u8 qsel, ch_dma;
+
+ tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
+ tid_indicate = rtw89_core_get_tid_indicate(rtwdev, tid);
+ qsel = rtw89_core_get_qsel(rtwdev, tid);
+ ch_dma = rtw89_core_get_ch_dma(rtwdev, qsel);
+
+ desc_info->ch_dma = ch_dma;
+ desc_info->tid_indicate = tid_indicate;
+ desc_info->qsel = qsel;
+
+ /* enable wd_info for AMPDU */
+ desc_info->en_wd_info = true;
+
+ if (IEEE80211_SKB_CB(skb)->flags & IEEE80211_TX_CTL_AMPDU)
+ rtw89_core_tx_update_ampdu_info(rtwdev, tx_req, tid);
+ if (IEEE80211_SKB_CB(skb)->control.hw_key)
+ rtw89_core_tx_update_sec_key(rtwdev, tx_req);
+
+ if (rate_pattern->enable)
+ desc_info->data_retry_lowest_rate = rate_pattern->rate;
+ else if (hal->current_band_type == RTW89_BAND_2G)
+ desc_info->data_retry_lowest_rate = RTW89_HW_RATE_CCK1;
+ else
+ desc_info->data_retry_lowest_rate = RTW89_HW_RATE_OFDM6;
+}
+
+static enum btc_pkt_type
+rtw89_core_tx_btc_spec_pkt_notify(struct rtw89_dev *rtwdev,
+ struct rtw89_core_tx_request *tx_req)
+{
+ struct sk_buff *skb = tx_req->skb;
+ struct udphdr *udphdr;
+
+ if (IEEE80211_SKB_CB(skb)->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO) {
+ ieee80211_queue_work(rtwdev->hw, &rtwdev->btc.eapol_notify_work);
+ return PACKET_EAPOL;
+ }
+
+ if (skb->protocol == htons(ETH_P_ARP)) {
+ ieee80211_queue_work(rtwdev->hw, &rtwdev->btc.arp_notify_work);
+ return PACKET_ARP;
+ }
+
+ if (skb->protocol == htons(ETH_P_IP) &&
+ ip_hdr(skb)->protocol == IPPROTO_UDP) {
+ udphdr = udp_hdr(skb);
+ if (((udphdr->source == htons(67) && udphdr->dest == htons(68)) ||
+ (udphdr->source == htons(68) && udphdr->dest == htons(67))) &&
+ skb->len > 282) {
+ ieee80211_queue_work(rtwdev->hw, &rtwdev->btc.dhcp_notify_work);
+ return PACKET_DHCP;
+ }
+ }
+
+ if (skb->protocol == htons(ETH_P_IP) &&
+ ip_hdr(skb)->protocol == IPPROTO_ICMP) {
+ ieee80211_queue_work(rtwdev->hw, &rtwdev->btc.icmp_notify_work);
+ return PACKET_ICMP;
+ }
+
+ return PACKET_MAX;
+}
+
+static void
+rtw89_core_tx_update_desc_info(struct rtw89_dev *rtwdev,
+ struct rtw89_core_tx_request *tx_req)
+{
+ struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
+ struct sk_buff *skb = tx_req->skb;
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ enum rtw89_core_tx_type tx_type;
+ enum btc_pkt_type pkt_type;
+ bool is_bmc;
+ u16 seq;
+
+ seq = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
+ if (tx_req->tx_type != RTW89_CORE_TX_TYPE_FWCMD) {
+ tx_type = rtw89_core_get_tx_type(rtwdev, skb);
+ tx_req->tx_type = tx_type;
+ }
+ is_bmc = (is_broadcast_ether_addr(hdr->addr1) ||
+ is_multicast_ether_addr(hdr->addr1));
+
+ desc_info->seq = seq;
+ desc_info->pkt_size = skb->len;
+ desc_info->is_bmc = is_bmc;
+ desc_info->wd_page = true;
+
+ switch (tx_req->tx_type) {
+ case RTW89_CORE_TX_TYPE_MGMT:
+ rtw89_core_tx_update_mgmt_info(rtwdev, tx_req);
+ break;
+ case RTW89_CORE_TX_TYPE_DATA:
+ rtw89_core_tx_update_data_info(rtwdev, tx_req);
+ pkt_type = rtw89_core_tx_btc_spec_pkt_notify(rtwdev, tx_req);
+ rtw89_core_tx_update_he_qos_htc(rtwdev, tx_req, pkt_type);
+ break;
+ case RTW89_CORE_TX_TYPE_FWCMD:
+ rtw89_core_tx_update_h2c_info(rtwdev, tx_req);
+ break;
+ }
+}
+
+void rtw89_core_tx_kick_off(struct rtw89_dev *rtwdev, u8 qsel)
+{
+ u8 ch_dma;
+
+ ch_dma = rtw89_core_get_ch_dma(rtwdev, qsel);
+
+ rtw89_hci_tx_kick_off(rtwdev, ch_dma);
+}
+
+int rtw89_h2c_tx(struct rtw89_dev *rtwdev,
+ struct sk_buff *skb, bool fwdl)
+{
+ struct rtw89_core_tx_request tx_req = {0};
+ u32 cnt;
+ int ret;
+
+ tx_req.skb = skb;
+ tx_req.tx_type = RTW89_CORE_TX_TYPE_FWCMD;
+ if (fwdl)
+ tx_req.desc_info.fw_dl = true;
+
+ rtw89_core_tx_update_desc_info(rtwdev, &tx_req);
+
+ if (!fwdl)
+ rtw89_hex_dump(rtwdev, RTW89_DBG_FW, "H2C: ", skb->data, skb->len);
+
+ cnt = rtw89_hci_check_and_reclaim_tx_resource(rtwdev, RTW89_TXCH_CH12);
+ if (cnt == 0) {
+ rtw89_err(rtwdev, "no tx fwcmd resource\n");
+ return -ENOSPC;
+ }
+
+ ret = rtw89_hci_tx_write(rtwdev, &tx_req);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to transmit skb to HCI\n");
+ return ret;
+ }
+ rtw89_hci_tx_kick_off(rtwdev, RTW89_TXCH_CH12);
+
+ return 0;
+}
+
+int rtw89_core_tx_write(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, struct sk_buff *skb, int *qsel)
+{
+ struct rtw89_core_tx_request tx_req = {0};
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ int ret;
+
+ tx_req.skb = skb;
+ tx_req.sta = sta;
+ tx_req.vif = vif;
+
+ rtw89_traffic_stats_accu(rtwdev, &rtwdev->stats, skb, true);
+ rtw89_traffic_stats_accu(rtwdev, &rtwvif->stats, skb, true);
+ rtw89_core_tx_update_desc_info(rtwdev, &tx_req);
+ ret = rtw89_hci_tx_write(rtwdev, &tx_req);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to transmit skb to HCI\n");
+ return ret;
+ }
+
+ if (qsel)
+ *qsel = tx_req.desc_info.qsel;
+
+ return 0;
+}
+
+static __le32 rtw89_build_txwd_body0(struct rtw89_tx_desc_info *desc_info)
+{
+ u32 dword = FIELD_PREP(RTW89_TXWD_BODY0_WP_OFFSET, desc_info->wp_offset) |
+ FIELD_PREP(RTW89_TXWD_BODY0_WD_INFO_EN, desc_info->en_wd_info) |
+ FIELD_PREP(RTW89_TXWD_BODY0_CHANNEL_DMA, desc_info->ch_dma) |
+ FIELD_PREP(RTW89_TXWD_BODY0_HDR_LLC_LEN, desc_info->hdr_llc_len) |
+ FIELD_PREP(RTW89_TXWD_BODY0_WD_PAGE, desc_info->wd_page) |
+ FIELD_PREP(RTW89_TXWD_BODY0_FW_DL, desc_info->fw_dl);
+
+ return cpu_to_le32(dword);
+}
+
+static __le32 rtw89_build_txwd_body2(struct rtw89_tx_desc_info *desc_info)
+{
+ u32 dword = FIELD_PREP(RTW89_TXWD_BODY2_TID_INDICATE, desc_info->tid_indicate) |
+ FIELD_PREP(RTW89_TXWD_BODY2_QSEL, desc_info->qsel) |
+ FIELD_PREP(RTW89_TXWD_BODY2_TXPKT_SIZE, desc_info->pkt_size);
+
+ return cpu_to_le32(dword);
+}
+
+static __le32 rtw89_build_txwd_body3(struct rtw89_tx_desc_info *desc_info)
+{
+ u32 dword = FIELD_PREP(RTW89_TXWD_BODY3_SW_SEQ, desc_info->seq) |
+ FIELD_PREP(RTW89_TXWD_BODY3_AGG_EN, desc_info->agg_en) |
+ FIELD_PREP(RTW89_TXWD_BODY3_BK, desc_info->bk);
+
+ return cpu_to_le32(dword);
+}
+
+static __le32 rtw89_build_txwd_info0(struct rtw89_tx_desc_info *desc_info)
+{
+ u32 dword = FIELD_PREP(RTW89_TXWD_INFO0_USE_RATE, desc_info->use_rate) |
+ FIELD_PREP(RTW89_TXWD_INFO0_DATA_RATE, desc_info->data_rate) |
+ FIELD_PREP(RTW89_TXWD_INFO0_DISDATAFB, desc_info->dis_data_fb);
+
+ return cpu_to_le32(dword);
+}
+
+static __le32 rtw89_build_txwd_info1(struct rtw89_tx_desc_info *desc_info)
+{
+ u32 dword = FIELD_PREP(RTW89_TXWD_INFO1_MAX_AGGNUM, desc_info->ampdu_num) |
+ FIELD_PREP(RTW89_TXWD_INFO1_A_CTRL_BSR, desc_info->a_ctrl_bsr) |
+ FIELD_PREP(RTW89_TXWD_INFO1_DATA_RTY_LOWEST_RATE,
+ desc_info->data_retry_lowest_rate);
+
+ return cpu_to_le32(dword);
+}
+
+static __le32 rtw89_build_txwd_info2(struct rtw89_tx_desc_info *desc_info)
+{
+ u32 dword = FIELD_PREP(RTW89_TXWD_INFO2_AMPDU_DENSITY, desc_info->ampdu_density) |
+ FIELD_PREP(RTW89_TXWD_INFO2_SEC_TYPE, desc_info->sec_type) |
+ FIELD_PREP(RTW89_TXWD_INFO2_SEC_HW_ENC, desc_info->sec_en) |
+ FIELD_PREP(RTW89_TXWD_INFO2_SEC_CAM_IDX, desc_info->sec_cam_idx);
+
+ return cpu_to_le32(dword);
+}
+
+static __le32 rtw89_build_txwd_info4(struct rtw89_tx_desc_info *desc_info)
+{
+ u32 dword = FIELD_PREP(RTW89_TXWD_INFO4_RTS_EN, 1) |
+ FIELD_PREP(RTW89_TXWD_INFO4_HW_RTS_EN, 1);
+
+ return cpu_to_le32(dword);
+}
+
+void rtw89_core_fill_txdesc(struct rtw89_dev *rtwdev,
+ struct rtw89_tx_desc_info *desc_info,
+ void *txdesc)
+{
+ struct rtw89_txwd_body *txwd_body = (struct rtw89_txwd_body *)txdesc;
+ struct rtw89_txwd_info *txwd_info;
+
+ txwd_body->dword0 = rtw89_build_txwd_body0(desc_info);
+ txwd_body->dword2 = rtw89_build_txwd_body2(desc_info);
+ txwd_body->dword3 = rtw89_build_txwd_body3(desc_info);
+
+ if (!desc_info->en_wd_info)
+ return;
+
+ txwd_info = (struct rtw89_txwd_info *)(txwd_body + 1);
+ txwd_info->dword0 = rtw89_build_txwd_info0(desc_info);
+ txwd_info->dword1 = rtw89_build_txwd_info1(desc_info);
+ txwd_info->dword2 = rtw89_build_txwd_info2(desc_info);
+ txwd_info->dword4 = rtw89_build_txwd_info4(desc_info);
+
+}
+EXPORT_SYMBOL(rtw89_core_fill_txdesc);
+
+static int rtw89_core_rx_process_mac_ppdu(struct rtw89_dev *rtwdev,
+ struct sk_buff *skb,
+ struct rtw89_rx_phy_ppdu *phy_ppdu)
+{
+ bool rx_cnt_valid = false;
+ u8 plcp_size = 0;
+ u8 usr_num = 0;
+ u8 *phy_sts;
+
+ rx_cnt_valid = RTW89_GET_RXINFO_RX_CNT_VLD(skb->data);
+ plcp_size = RTW89_GET_RXINFO_PLCP_LEN(skb->data) << 3;
+ usr_num = RTW89_GET_RXINFO_USR_NUM(skb->data);
+ if (usr_num > RTW89_PPDU_MAX_USR) {
+ rtw89_warn(rtwdev, "Invalid user number in mac info\n");
+ return -EINVAL;
+ }
+
+ phy_sts = skb->data + RTW89_PPDU_MAC_INFO_SIZE;
+ phy_sts += usr_num * RTW89_PPDU_MAC_INFO_USR_SIZE;
+ /* 8-byte alignment */
+ if (usr_num & BIT(0))
+ phy_sts += RTW89_PPDU_MAC_INFO_USR_SIZE;
+ if (rx_cnt_valid)
+ phy_sts += RTW89_PPDU_MAC_RX_CNT_SIZE;
+ phy_sts += plcp_size;
+
+ phy_ppdu->buf = phy_sts;
+ phy_ppdu->len = skb->data + skb->len - phy_sts;
+
+ return 0;
+}
+
+static void rtw89_core_rx_process_phy_ppdu_iter(void *data,
+ struct ieee80211_sta *sta)
+{
+ struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
+ struct rtw89_rx_phy_ppdu *phy_ppdu = (struct rtw89_rx_phy_ppdu *)data;
+
+ if (rtwsta->mac_id == phy_ppdu->mac_id && phy_ppdu->to_self)
+ ewma_rssi_add(&rtwsta->avg_rssi, phy_ppdu->rssi_avg);
+}
+
+#define VAR_LEN 0xff
+#define VAR_LEN_UNIT 8
+static u16 rtw89_core_get_phy_status_ie_len(struct rtw89_dev *rtwdev, u8 *addr)
+{
+ static const u8 physts_ie_len_tab[32] = {
+ 16, 32, 24, 24, 8, 8, 8, 8, VAR_LEN, 8, VAR_LEN, 176, VAR_LEN,
+ VAR_LEN, VAR_LEN, VAR_LEN, VAR_LEN, VAR_LEN, 16, 24, VAR_LEN,
+ VAR_LEN, VAR_LEN, 0, 24, 24, 24, 24, 32, 32, 32, 32
+ };
+ u16 ie_len;
+ u8 ie;
+
+ ie = RTW89_GET_PHY_STS_IE_TYPE(addr);
+ if (physts_ie_len_tab[ie] != VAR_LEN)
+ ie_len = physts_ie_len_tab[ie];
+ else
+ ie_len = RTW89_GET_PHY_STS_IE_LEN(addr) * VAR_LEN_UNIT;
+
+ return ie_len;
+}
+
+static void rtw89_core_parse_phy_status_ie01(struct rtw89_dev *rtwdev, u8 *addr,
+ struct rtw89_rx_phy_ppdu *phy_ppdu)
+{
+ s16 cfo;
+
+ /* sign conversion for S(12,2) */
+ cfo = sign_extend32(RTW89_GET_PHY_STS_IE0_CFO(addr), 11);
+ rtw89_phy_cfo_parse(rtwdev, cfo, phy_ppdu);
+}
+
+static int rtw89_core_process_phy_status_ie(struct rtw89_dev *rtwdev, u8 *addr,
+ struct rtw89_rx_phy_ppdu *phy_ppdu)
+{
+ u8 ie;
+
+ ie = RTW89_GET_PHY_STS_IE_TYPE(addr);
+ switch (ie) {
+ case RTW89_PHYSTS_IE01_CMN_OFDM:
+ rtw89_core_parse_phy_status_ie01(rtwdev, addr, phy_ppdu);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static void rtw89_core_update_phy_ppdu(struct rtw89_rx_phy_ppdu *phy_ppdu)
+{
+ s8 *rssi = phy_ppdu->rssi;
+ u8 *buf = phy_ppdu->buf;
+
+ phy_ppdu->rssi_avg = RTW89_GET_PHY_STS_RSSI_AVG(buf);
+ rssi[RF_PATH_A] = RTW89_RSSI_RAW_TO_DBM(RTW89_GET_PHY_STS_RSSI_A(buf));
+ rssi[RF_PATH_B] = RTW89_RSSI_RAW_TO_DBM(RTW89_GET_PHY_STS_RSSI_B(buf));
+ rssi[RF_PATH_C] = RTW89_RSSI_RAW_TO_DBM(RTW89_GET_PHY_STS_RSSI_C(buf));
+ rssi[RF_PATH_D] = RTW89_RSSI_RAW_TO_DBM(RTW89_GET_PHY_STS_RSSI_D(buf));
+}
+
+static int rtw89_core_rx_process_phy_ppdu(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_phy_ppdu *phy_ppdu)
+{
+ if (RTW89_GET_PHY_STS_LEN(phy_ppdu->buf) << 3 != phy_ppdu->len) {
+ rtw89_warn(rtwdev, "phy ppdu len mismatch\n");
+ return -EINVAL;
+ }
+ rtw89_core_update_phy_ppdu(phy_ppdu);
+ ieee80211_iterate_stations_atomic(rtwdev->hw,
+ rtw89_core_rx_process_phy_ppdu_iter,
+ phy_ppdu);
+
+ return 0;
+}
+
+static int rtw89_core_rx_parse_phy_sts(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_phy_ppdu *phy_ppdu)
+{
+ u16 ie_len;
+ u8 *pos, *end;
+
+ if (!phy_ppdu->to_self)
+ return 0;
+
+ pos = (u8 *)phy_ppdu->buf + PHY_STS_HDR_LEN;
+ end = (u8 *)phy_ppdu->buf + phy_ppdu->len;
+ while (pos < end) {
+ ie_len = rtw89_core_get_phy_status_ie_len(rtwdev, pos);
+ rtw89_core_process_phy_status_ie(rtwdev, pos, phy_ppdu);
+ pos += ie_len;
+ if (pos > end || ie_len == 0) {
+ rtw89_debug(rtwdev, RTW89_DBG_TXRX,
+ "phy status parse failed\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static void rtw89_core_rx_process_phy_sts(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_phy_ppdu *phy_ppdu)
+{
+ int ret;
+
+ ret = rtw89_core_rx_parse_phy_sts(rtwdev, phy_ppdu);
+ if (ret)
+ rtw89_debug(rtwdev, RTW89_DBG_TXRX, "parse phy sts failed\n");
+ else
+ phy_ppdu->valid = true;
+}
+
+static u8 rtw89_rxdesc_to_nl_he_gi(struct rtw89_dev *rtwdev,
+ const struct rtw89_rx_desc_info *desc_info,
+ bool rx_status)
+{
+ switch (desc_info->gi_ltf) {
+ case RTW89_GILTF_SGI_4XHE08:
+ case RTW89_GILTF_2XHE08:
+ case RTW89_GILTF_1XHE08:
+ return NL80211_RATE_INFO_HE_GI_0_8;
+ case RTW89_GILTF_2XHE16:
+ case RTW89_GILTF_1XHE16:
+ return NL80211_RATE_INFO_HE_GI_1_6;
+ case RTW89_GILTF_LGI_4XHE32:
+ return NL80211_RATE_INFO_HE_GI_3_2;
+ default:
+ rtw89_warn(rtwdev, "invalid gi_ltf=%d", desc_info->gi_ltf);
+ return rx_status ? NL80211_RATE_INFO_HE_GI_3_2 : U8_MAX;
+ }
+}
+
+static bool rtw89_core_rx_ppdu_match(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_desc_info *desc_info,
+ struct ieee80211_rx_status *status)
+{
+ u8 band = desc_info->bb_sel ? RTW89_PHY_1 : RTW89_PHY_0;
+ u8 data_rate_mode, bw, rate_idx = MASKBYTE0, gi_ltf;
+ u16 data_rate;
+ bool ret;
+
+ data_rate = desc_info->data_rate;
+ data_rate_mode = GET_DATA_RATE_MODE(data_rate);
+ if (data_rate_mode == DATA_RATE_MODE_NON_HT) {
+ rate_idx = GET_DATA_RATE_NOT_HT_IDX(data_rate);
+ /* No 4 CCK rates for 5G */
+ if (status->band == NL80211_BAND_5GHZ)
+ rate_idx -= 4;
+ } else if (data_rate_mode == DATA_RATE_MODE_HT) {
+ rate_idx = GET_DATA_RATE_HT_IDX(data_rate);
+ } else if (data_rate_mode == DATA_RATE_MODE_VHT) {
+ rate_idx = GET_DATA_RATE_VHT_HE_IDX(data_rate);
+ } else if (data_rate_mode == DATA_RATE_MODE_HE) {
+ rate_idx = GET_DATA_RATE_VHT_HE_IDX(data_rate);
+ } else {
+ rtw89_warn(rtwdev, "invalid RX rate mode %d\n", data_rate_mode);
+ }
+
+ if (desc_info->bw == RTW89_CHANNEL_WIDTH_80)
+ bw = RATE_INFO_BW_80;
+ else if (desc_info->bw == RTW89_CHANNEL_WIDTH_40)
+ bw = RATE_INFO_BW_40;
+ else
+ bw = RATE_INFO_BW_20;
+
+ gi_ltf = rtw89_rxdesc_to_nl_he_gi(rtwdev, desc_info, false);
+ ret = rtwdev->ppdu_sts.curr_rx_ppdu_cnt[band] == desc_info->ppdu_cnt &&
+ status->rate_idx == rate_idx &&
+ status->he_gi == gi_ltf &&
+ status->bw == bw;
+
+ return ret;
+}
+
+struct rtw89_vif_rx_stats_iter_data {
+ struct rtw89_dev *rtwdev;
+ struct rtw89_rx_phy_ppdu *phy_ppdu;
+ struct rtw89_rx_desc_info *desc_info;
+ struct sk_buff *skb;
+ const u8 *bssid;
+};
+
+static void rtw89_vif_rx_stats_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ struct rtw89_vif_rx_stats_iter_data *iter_data = data;
+ struct rtw89_dev *rtwdev = iter_data->rtwdev;
+ struct rtw89_pkt_stat *pkt_stat = &rtwdev->phystat.cur_pkt_stat;
+ struct rtw89_rx_desc_info *desc_info = iter_data->desc_info;
+ struct sk_buff *skb = iter_data->skb;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ const u8 *bssid = iter_data->bssid;
+
+ if (!ether_addr_equal(vif->bss_conf.bssid, bssid))
+ return;
+
+ if (ieee80211_is_beacon(hdr->frame_control))
+ pkt_stat->beacon_nr++;
+
+ if (!ether_addr_equal(vif->addr, hdr->addr1))
+ return;
+
+ if (desc_info->data_rate < RTW89_HW_RATE_NR)
+ pkt_stat->rx_rate_cnt[desc_info->data_rate]++;
+
+ rtw89_traffic_stats_accu(rtwdev, &rtwvif->stats, skb, false);
+}
+
+static void rtw89_core_rx_stats(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_phy_ppdu *phy_ppdu,
+ struct rtw89_rx_desc_info *desc_info,
+ struct sk_buff *skb)
+{
+ struct rtw89_vif_rx_stats_iter_data iter_data;
+
+ rtw89_traffic_stats_accu(rtwdev, &rtwdev->stats, skb, false);
+
+ iter_data.rtwdev = rtwdev;
+ iter_data.phy_ppdu = phy_ppdu;
+ iter_data.desc_info = desc_info;
+ iter_data.skb = skb;
+ iter_data.bssid = get_hdr_bssid((struct ieee80211_hdr *)skb->data);
+ rtw89_iterate_vifs_bh(rtwdev, rtw89_vif_rx_stats_iter, &iter_data);
+}
+
+static void rtw89_core_rx_pending_skb(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_phy_ppdu *phy_ppdu,
+ struct rtw89_rx_desc_info *desc_info,
+ struct sk_buff *skb)
+{
+ u8 band = desc_info->bb_sel ? RTW89_PHY_1 : RTW89_PHY_0;
+ int curr = rtwdev->ppdu_sts.curr_rx_ppdu_cnt[band];
+ struct sk_buff *skb_ppdu = NULL, *tmp;
+ struct ieee80211_rx_status *rx_status;
+
+ if (curr > RTW89_MAX_PPDU_CNT)
+ return;
+
+ skb_queue_walk_safe(&rtwdev->ppdu_sts.rx_queue[band], skb_ppdu, tmp) {
+ skb_unlink(skb_ppdu, &rtwdev->ppdu_sts.rx_queue[band]);
+ rx_status = IEEE80211_SKB_RXCB(skb_ppdu);
+ if (rtw89_core_rx_ppdu_match(rtwdev, desc_info, rx_status))
+ rtw89_chip_query_ppdu(rtwdev, phy_ppdu, rx_status);
+ rtw89_core_rx_stats(rtwdev, phy_ppdu, desc_info, skb_ppdu);
+ ieee80211_rx_napi(rtwdev->hw, NULL, skb_ppdu, &rtwdev->napi);
+ rtwdev->napi_budget_countdown--;
+ }
+}
+
+static void rtw89_core_rx_process_ppdu_sts(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_desc_info *desc_info,
+ struct sk_buff *skb)
+{
+ struct rtw89_rx_phy_ppdu phy_ppdu = {.buf = skb->data, .valid = false,
+ .len = skb->len,
+ .to_self = desc_info->addr1_match,
+ .mac_id = desc_info->mac_id};
+ int ret;
+
+ if (desc_info->mac_info_valid)
+ rtw89_core_rx_process_mac_ppdu(rtwdev, skb, &phy_ppdu);
+ ret = rtw89_core_rx_process_phy_ppdu(rtwdev, &phy_ppdu);
+ if (ret)
+ rtw89_debug(rtwdev, RTW89_DBG_TXRX, "process ppdu failed\n");
+
+ rtw89_core_rx_process_phy_sts(rtwdev, &phy_ppdu);
+ rtw89_core_rx_pending_skb(rtwdev, &phy_ppdu, desc_info, skb);
+ dev_kfree_skb_any(skb);
+}
+
+static void rtw89_core_rx_process_report(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_desc_info *desc_info,
+ struct sk_buff *skb)
+{
+ switch (desc_info->pkt_type) {
+ case RTW89_CORE_RX_TYPE_C2H:
+ rtw89_fw_c2h_irqsafe(rtwdev, skb);
+ break;
+ case RTW89_CORE_RX_TYPE_PPDU_STAT:
+ rtw89_core_rx_process_ppdu_sts(rtwdev, desc_info, skb);
+ break;
+ default:
+ rtw89_debug(rtwdev, RTW89_DBG_TXRX, "unhandled pkt_type=%d\n",
+ desc_info->pkt_type);
+ dev_kfree_skb_any(skb);
+ break;
+ }
+}
+
+void rtw89_core_query_rxdesc(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_desc_info *desc_info,
+ u8 *data, u32 data_offset)
+{
+ struct rtw89_rxdesc_short *rxd_s;
+ struct rtw89_rxdesc_long *rxd_l;
+ u8 shift_len, drv_info_len;
+
+ rxd_s = (struct rtw89_rxdesc_short *)(data + data_offset);
+ desc_info->pkt_size = RTW89_GET_RXWD_PKT_SIZE(rxd_s);
+ desc_info->drv_info_size = RTW89_GET_RXWD_DRV_INFO_SIZE(rxd_s);
+ desc_info->long_rxdesc = RTW89_GET_RXWD_LONG_RXD(rxd_s);
+ desc_info->pkt_type = RTW89_GET_RXWD_RPKT_TYPE(rxd_s);
+ desc_info->mac_info_valid = RTW89_GET_RXWD_MAC_INFO_VALID(rxd_s);
+ desc_info->bw = RTW89_GET_RXWD_BW(rxd_s);
+ desc_info->data_rate = RTW89_GET_RXWD_DATA_RATE(rxd_s);
+ desc_info->gi_ltf = RTW89_GET_RXWD_GI_LTF(rxd_s);
+ desc_info->user_id = RTW89_GET_RXWD_USER_ID(rxd_s);
+ desc_info->sr_en = RTW89_GET_RXWD_SR_EN(rxd_s);
+ desc_info->ppdu_cnt = RTW89_GET_RXWD_PPDU_CNT(rxd_s);
+ desc_info->ppdu_type = RTW89_GET_RXWD_PPDU_TYPE(rxd_s);
+ desc_info->free_run_cnt = RTW89_GET_RXWD_FREE_RUN_CNT(rxd_s);
+ desc_info->icv_err = RTW89_GET_RXWD_ICV_ERR(rxd_s);
+ desc_info->crc32_err = RTW89_GET_RXWD_CRC32_ERR(rxd_s);
+ desc_info->hw_dec = RTW89_GET_RXWD_HW_DEC(rxd_s);
+ desc_info->sw_dec = RTW89_GET_RXWD_SW_DEC(rxd_s);
+ desc_info->addr1_match = RTW89_GET_RXWD_A1_MATCH(rxd_s);
+
+ shift_len = desc_info->shift << 1; /* 2-byte unit */
+ drv_info_len = desc_info->drv_info_size << 3; /* 8-byte unit */
+ desc_info->offset = data_offset + shift_len + drv_info_len;
+ desc_info->ready = true;
+
+ if (!desc_info->long_rxdesc)
+ return;
+
+ rxd_l = (struct rtw89_rxdesc_long *)(data + data_offset);
+ desc_info->frame_type = RTW89_GET_RXWD_TYPE(rxd_l);
+ desc_info->addr_cam_valid = RTW89_GET_RXWD_ADDR_CAM_VLD(rxd_l);
+ desc_info->addr_cam_id = RTW89_GET_RXWD_ADDR_CAM_ID(rxd_l);
+ desc_info->sec_cam_id = RTW89_GET_RXWD_SEC_CAM_ID(rxd_l);
+ desc_info->mac_id = RTW89_GET_RXWD_MAC_ID(rxd_l);
+ desc_info->rx_pl_id = RTW89_GET_RXWD_RX_PL_ID(rxd_l);
+}
+EXPORT_SYMBOL(rtw89_core_query_rxdesc);
+
+struct rtw89_core_iter_rx_status {
+ struct rtw89_dev *rtwdev;
+ struct ieee80211_rx_status *rx_status;
+ struct rtw89_rx_desc_info *desc_info;
+ u8 mac_id;
+};
+
+static
+void rtw89_core_stats_sta_rx_status_iter(void *data, struct ieee80211_sta *sta)
+{
+ struct rtw89_core_iter_rx_status *iter_data =
+ (struct rtw89_core_iter_rx_status *)data;
+ struct ieee80211_rx_status *rx_status = iter_data->rx_status;
+ struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
+ struct rtw89_rx_desc_info *desc_info = iter_data->desc_info;
+ u8 mac_id = iter_data->mac_id;
+
+ if (mac_id != rtwsta->mac_id)
+ return;
+
+ rtwsta->rx_status = *rx_status;
+ rtwsta->rx_hw_rate = desc_info->data_rate;
+}
+
+static void rtw89_core_stats_sta_rx_status(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_desc_info *desc_info,
+ struct ieee80211_rx_status *rx_status)
+{
+ struct rtw89_core_iter_rx_status iter_data;
+
+ if (!desc_info->addr1_match || !desc_info->long_rxdesc)
+ return;
+
+ if (desc_info->frame_type != RTW89_RX_TYPE_DATA)
+ return;
+
+ iter_data.rtwdev = rtwdev;
+ iter_data.rx_status = rx_status;
+ iter_data.desc_info = desc_info;
+ iter_data.mac_id = desc_info->mac_id;
+ ieee80211_iterate_stations_atomic(rtwdev->hw,
+ rtw89_core_stats_sta_rx_status_iter,
+ &iter_data);
+}
+
+static void rtw89_core_update_rx_status(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_desc_info *desc_info,
+ struct ieee80211_rx_status *rx_status)
+{
+ struct ieee80211_hw *hw = rtwdev->hw;
+ u16 data_rate;
+ u8 data_rate_mode;
+
+ /* currently using single PHY */
+ rx_status->freq = hw->conf.chandef.chan->center_freq;
+ rx_status->band = hw->conf.chandef.chan->band;
+
+ if (desc_info->icv_err || desc_info->crc32_err)
+ rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
+
+ if (desc_info->hw_dec &&
+ !(desc_info->sw_dec || desc_info->icv_err))
+ rx_status->flag |= RX_FLAG_DECRYPTED;
+
+ if (desc_info->bw == RTW89_CHANNEL_WIDTH_80)
+ rx_status->bw = RATE_INFO_BW_80;
+ else if (desc_info->bw == RTW89_CHANNEL_WIDTH_40)
+ rx_status->bw = RATE_INFO_BW_40;
+ else
+ rx_status->bw = RATE_INFO_BW_20;
+
+ data_rate = desc_info->data_rate;
+ data_rate_mode = GET_DATA_RATE_MODE(data_rate);
+ if (data_rate_mode == DATA_RATE_MODE_NON_HT) {
+ rx_status->encoding = RX_ENC_LEGACY;
+ rx_status->rate_idx = GET_DATA_RATE_NOT_HT_IDX(data_rate);
+ /* No 4 CCK rates for 5G */
+ if (rx_status->band == NL80211_BAND_5GHZ)
+ rx_status->rate_idx -= 4;
+ if (rtwdev->scanning)
+ rx_status->rate_idx = min_t(u8, rx_status->rate_idx,
+ ARRAY_SIZE(rtw89_bitrates) - 5);
+ } else if (data_rate_mode == DATA_RATE_MODE_HT) {
+ rx_status->encoding = RX_ENC_HT;
+ rx_status->rate_idx = GET_DATA_RATE_HT_IDX(data_rate);
+ if (desc_info->gi_ltf)
+ rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+ } else if (data_rate_mode == DATA_RATE_MODE_VHT) {
+ rx_status->encoding = RX_ENC_VHT;
+ rx_status->rate_idx = GET_DATA_RATE_VHT_HE_IDX(data_rate);
+ rx_status->nss = GET_DATA_RATE_NSS(data_rate) + 1;
+ if (desc_info->gi_ltf)
+ rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+ } else if (data_rate_mode == DATA_RATE_MODE_HE) {
+ rx_status->encoding = RX_ENC_HE;
+ rx_status->rate_idx = GET_DATA_RATE_VHT_HE_IDX(data_rate);
+ rx_status->nss = GET_DATA_RATE_NSS(data_rate) + 1;
+ } else {
+ rtw89_warn(rtwdev, "invalid RX rate mode %d\n", data_rate_mode);
+ }
+
+ /* he_gi is used to match ppdu, so we always fill it. */
+ rx_status->he_gi = rtw89_rxdesc_to_nl_he_gi(rtwdev, desc_info, true);
+ rx_status->flag |= RX_FLAG_MACTIME_START;
+ rx_status->mactime = desc_info->free_run_cnt;
+
+ rtw89_core_stats_sta_rx_status(rtwdev, desc_info, rx_status);
+}
+
+static enum rtw89_ps_mode rtw89_update_ps_mode(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (rtw89_disable_ps_mode || !chip->ps_mode_supported)
+ return RTW89_PS_MODE_NONE;
+
+ if (chip->ps_mode_supported & BIT(RTW89_PS_MODE_PWR_GATED))
+ return RTW89_PS_MODE_PWR_GATED;
+
+ if (chip->ps_mode_supported & BIT(RTW89_PS_MODE_CLK_GATED))
+ return RTW89_PS_MODE_CLK_GATED;
+
+ if (chip->ps_mode_supported & BIT(RTW89_PS_MODE_RFOFF))
+ return RTW89_PS_MODE_RFOFF;
+
+ return RTW89_PS_MODE_NONE;
+}
+
+static void rtw89_core_flush_ppdu_rx_queue(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_desc_info *desc_info)
+{
+ struct rtw89_ppdu_sts_info *ppdu_sts = &rtwdev->ppdu_sts;
+ u8 band = desc_info->bb_sel ? RTW89_PHY_1 : RTW89_PHY_0;
+ struct sk_buff *skb_ppdu, *tmp;
+
+ skb_queue_walk_safe(&ppdu_sts->rx_queue[band], skb_ppdu, tmp) {
+ skb_unlink(skb_ppdu, &ppdu_sts->rx_queue[band]);
+ rtw89_core_rx_stats(rtwdev, NULL, desc_info, skb_ppdu);
+ ieee80211_rx_napi(rtwdev->hw, NULL, skb_ppdu, &rtwdev->napi);
+ rtwdev->napi_budget_countdown--;
+ }
+}
+
+void rtw89_core_rx(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_desc_info *desc_info,
+ struct sk_buff *skb)
+{
+ struct ieee80211_rx_status *rx_status;
+ struct rtw89_ppdu_sts_info *ppdu_sts = &rtwdev->ppdu_sts;
+ u8 ppdu_cnt = desc_info->ppdu_cnt;
+ u8 band = desc_info->bb_sel ? RTW89_PHY_1 : RTW89_PHY_0;
+
+ if (desc_info->pkt_type != RTW89_CORE_RX_TYPE_WIFI) {
+ rtw89_core_rx_process_report(rtwdev, desc_info, skb);
+ return;
+ }
+
+ if (ppdu_sts->curr_rx_ppdu_cnt[band] != ppdu_cnt) {
+ rtw89_core_flush_ppdu_rx_queue(rtwdev, desc_info);
+ ppdu_sts->curr_rx_ppdu_cnt[band] = ppdu_cnt;
+ }
+
+ rx_status = IEEE80211_SKB_RXCB(skb);
+ memset(rx_status, 0, sizeof(*rx_status));
+ rtw89_core_update_rx_status(rtwdev, desc_info, rx_status);
+ if (desc_info->long_rxdesc &&
+ BIT(desc_info->frame_type) & PPDU_FILTER_BITMAP) {
+ skb_queue_tail(&ppdu_sts->rx_queue[band], skb);
+ } else {
+ rtw89_core_rx_stats(rtwdev, NULL, desc_info, skb);
+ ieee80211_rx_napi(rtwdev->hw, NULL, skb, &rtwdev->napi);
+ rtwdev->napi_budget_countdown--;
+ }
+}
+EXPORT_SYMBOL(rtw89_core_rx);
+
+void rtw89_core_napi_start(struct rtw89_dev *rtwdev)
+{
+ if (test_and_set_bit(RTW89_FLAG_NAPI_RUNNING, rtwdev->flags))
+ return;
+
+ napi_enable(&rtwdev->napi);
+}
+EXPORT_SYMBOL(rtw89_core_napi_start);
+
+void rtw89_core_napi_stop(struct rtw89_dev *rtwdev)
+{
+ if (!test_and_clear_bit(RTW89_FLAG_NAPI_RUNNING, rtwdev->flags))
+ return;
+
+ napi_synchronize(&rtwdev->napi);
+ napi_disable(&rtwdev->napi);
+}
+EXPORT_SYMBOL(rtw89_core_napi_stop);
+
+void rtw89_core_napi_init(struct rtw89_dev *rtwdev)
+{
+ init_dummy_netdev(&rtwdev->netdev);
+ netif_napi_add(&rtwdev->netdev, &rtwdev->napi,
+ rtwdev->hci.ops->napi_poll, NAPI_POLL_WEIGHT);
+}
+EXPORT_SYMBOL(rtw89_core_napi_init);
+
+void rtw89_core_napi_deinit(struct rtw89_dev *rtwdev)
+{
+ rtw89_core_napi_stop(rtwdev);
+ netif_napi_del(&rtwdev->napi);
+}
+EXPORT_SYMBOL(rtw89_core_napi_deinit);
+
+static void rtw89_core_ba_work(struct work_struct *work)
+{
+ struct rtw89_dev *rtwdev =
+ container_of(work, struct rtw89_dev, ba_work);
+ struct rtw89_txq *rtwtxq, *tmp;
+ int ret;
+
+ spin_lock_bh(&rtwdev->ba_lock);
+ list_for_each_entry_safe(rtwtxq, tmp, &rtwdev->ba_list, list) {
+ struct ieee80211_txq *txq = rtw89_txq_to_txq(rtwtxq);
+ struct ieee80211_sta *sta = txq->sta;
+ struct rtw89_sta *rtwsta = sta ? (struct rtw89_sta *)sta->drv_priv : NULL;
+ u8 tid = txq->tid;
+
+ if (!sta) {
+ rtw89_warn(rtwdev, "cannot start BA without sta\n");
+ goto skip_ba_work;
+ }
+
+ if (rtwsta->disassoc) {
+ rtw89_debug(rtwdev, RTW89_DBG_TXRX,
+ "cannot start BA with disassoc sta\n");
+ goto skip_ba_work;
+ }
+
+ ret = ieee80211_start_tx_ba_session(sta, tid, 0);
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_TXRX,
+ "failed to setup BA session for %pM:%2d: %d\n",
+ sta->addr, tid, ret);
+ if (ret == -EINVAL)
+ set_bit(RTW89_TXQ_F_BLOCK_BA, &rtwtxq->flags);
+ }
+skip_ba_work:
+ list_del_init(&rtwtxq->list);
+ }
+ spin_unlock_bh(&rtwdev->ba_lock);
+}
+
+static void rtw89_core_free_sta_pending_ba(struct rtw89_dev *rtwdev,
+ struct ieee80211_sta *sta)
+{
+ struct rtw89_txq *rtwtxq, *tmp;
+
+ spin_lock_bh(&rtwdev->ba_lock);
+ list_for_each_entry_safe(rtwtxq, tmp, &rtwdev->ba_list, list) {
+ struct ieee80211_txq *txq = rtw89_txq_to_txq(rtwtxq);
+
+ if (sta == txq->sta)
+ list_del_init(&rtwtxq->list);
+ }
+ spin_unlock_bh(&rtwdev->ba_lock);
+}
+
+static void rtw89_core_txq_check_agg(struct rtw89_dev *rtwdev,
+ struct rtw89_txq *rtwtxq,
+ struct sk_buff *skb)
+{
+ struct ieee80211_hw *hw = rtwdev->hw;
+ struct ieee80211_txq *txq = rtw89_txq_to_txq(rtwtxq);
+ struct ieee80211_sta *sta = txq->sta;
+ struct rtw89_sta *rtwsta = sta ? (struct rtw89_sta *)sta->drv_priv : NULL;
+
+ if (unlikely(skb_get_queue_mapping(skb) == IEEE80211_AC_VO))
+ return;
+
+ if (unlikely(skb->protocol == cpu_to_be16(ETH_P_PAE)))
+ return;
+
+ if (unlikely(!sta))
+ return;
+
+ if (unlikely(test_bit(RTW89_TXQ_F_BLOCK_BA, &rtwtxq->flags)))
+ return;
+
+ if (test_bit(RTW89_TXQ_F_AMPDU, &rtwtxq->flags)) {
+ IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_AMPDU;
+ return;
+ }
+
+ spin_lock_bh(&rtwdev->ba_lock);
+ if (!rtwsta->disassoc && list_empty(&rtwtxq->list)) {
+ list_add_tail(&rtwtxq->list, &rtwdev->ba_list);
+ ieee80211_queue_work(hw, &rtwdev->ba_work);
+ }
+ spin_unlock_bh(&rtwdev->ba_lock);
+}
+
+static void rtw89_core_txq_push(struct rtw89_dev *rtwdev,
+ struct rtw89_txq *rtwtxq,
+ unsigned long frame_cnt,
+ unsigned long byte_cnt)
+{
+ struct ieee80211_txq *txq = rtw89_txq_to_txq(rtwtxq);
+ struct ieee80211_vif *vif = txq->vif;
+ struct ieee80211_sta *sta = txq->sta;
+ struct sk_buff *skb;
+ unsigned long i;
+ int ret;
+
+ for (i = 0; i < frame_cnt; i++) {
+ skb = ieee80211_tx_dequeue_ni(rtwdev->hw, txq);
+ if (!skb) {
+ rtw89_debug(rtwdev, RTW89_DBG_TXRX, "dequeue a NULL skb\n");
+ return;
+ }
+ rtw89_core_txq_check_agg(rtwdev, rtwtxq, skb);
+ ret = rtw89_core_tx_write(rtwdev, vif, sta, skb, NULL);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to push txq: %d\n", ret);
+ ieee80211_free_txskb(rtwdev->hw, skb);
+ break;
+ }
+ }
+}
+
+static u32 rtw89_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev, u8 tid)
+{
+ u8 qsel, ch_dma;
+
+ qsel = rtw89_core_get_qsel(rtwdev, tid);
+ ch_dma = rtw89_core_get_ch_dma(rtwdev, qsel);
+
+ return rtw89_hci_check_and_reclaim_tx_resource(rtwdev, ch_dma);
+}
+
+static bool rtw89_core_txq_agg_wait(struct rtw89_dev *rtwdev,
+ struct ieee80211_txq *txq,
+ unsigned long *frame_cnt,
+ bool *sched_txq, bool *reinvoke)
+{
+ struct rtw89_txq *rtwtxq = (struct rtw89_txq *)txq->drv_priv;
+ struct ieee80211_sta *sta = txq->sta;
+ struct rtw89_sta *rtwsta = sta ? (struct rtw89_sta *)sta->drv_priv : NULL;
+
+ if (!sta || rtwsta->max_agg_wait <= 0)
+ return false;
+
+ if (rtwdev->stats.tx_tfc_lv <= RTW89_TFC_MID)
+ return false;
+
+ if (*frame_cnt > 1) {
+ *frame_cnt -= 1;
+ *sched_txq = true;
+ *reinvoke = true;
+ rtwtxq->wait_cnt = 1;
+ return false;
+ }
+
+ if (*frame_cnt == 1 && rtwtxq->wait_cnt < rtwsta->max_agg_wait) {
+ *reinvoke = true;
+ rtwtxq->wait_cnt++;
+ return true;
+ }
+
+ rtwtxq->wait_cnt = 0;
+ return false;
+}
+
+static void rtw89_core_txq_schedule(struct rtw89_dev *rtwdev, u8 ac, bool *reinvoke)
+{
+ struct ieee80211_hw *hw = rtwdev->hw;
+ struct ieee80211_txq *txq;
+ struct rtw89_txq *rtwtxq;
+ unsigned long frame_cnt;
+ unsigned long byte_cnt;
+ u32 tx_resource;
+ bool sched_txq;
+
+ ieee80211_txq_schedule_start(hw, ac);
+ while ((txq = ieee80211_next_txq(hw, ac))) {
+ rtwtxq = (struct rtw89_txq *)txq->drv_priv;
+ tx_resource = rtw89_check_and_reclaim_tx_resource(rtwdev, txq->tid);
+ sched_txq = false;
+
+ ieee80211_txq_get_depth(txq, &frame_cnt, &byte_cnt);
+ if (rtw89_core_txq_agg_wait(rtwdev, txq, &frame_cnt, &sched_txq, reinvoke)) {
+ ieee80211_return_txq(hw, txq, true);
+ continue;
+ }
+ frame_cnt = min_t(unsigned long, frame_cnt, tx_resource);
+ rtw89_core_txq_push(rtwdev, rtwtxq, frame_cnt, byte_cnt);
+ ieee80211_return_txq(hw, txq, sched_txq);
+ if (frame_cnt != 0)
+ rtw89_core_tx_kick_off(rtwdev, rtw89_core_get_qsel(rtwdev, txq->tid));
+ }
+ ieee80211_txq_schedule_end(hw, ac);
+}
+
+static void rtw89_core_txq_work(struct work_struct *w)
+{
+ struct rtw89_dev *rtwdev = container_of(w, struct rtw89_dev, txq_work);
+ bool reinvoke = false;
+ u8 ac;
+
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
+ rtw89_core_txq_schedule(rtwdev, ac, &reinvoke);
+
+ if (reinvoke) {
+ /* reinvoke to process the last frame */
+ mod_delayed_work(rtwdev->txq_wq, &rtwdev->txq_reinvoke_work, 1);
+ }
+}
+
+static void rtw89_core_txq_reinvoke_work(struct work_struct *w)
+{
+ struct rtw89_dev *rtwdev = container_of(w, struct rtw89_dev,
+ txq_reinvoke_work.work);
+
+ queue_work(rtwdev->txq_wq, &rtwdev->txq_work);
+}
+
+static enum rtw89_tfc_lv rtw89_get_traffic_level(struct rtw89_dev *rtwdev,
+ u32 throughput, u64 cnt)
+{
+ if (cnt < 100)
+ return RTW89_TFC_IDLE;
+ if (throughput > 50)
+ return RTW89_TFC_HIGH;
+ if (throughput > 10)
+ return RTW89_TFC_MID;
+ if (throughput > 2)
+ return RTW89_TFC_LOW;
+ return RTW89_TFC_ULTRA_LOW;
+}
+
+static bool rtw89_traffic_stats_calc(struct rtw89_dev *rtwdev,
+ struct rtw89_traffic_stats *stats)
+{
+ enum rtw89_tfc_lv tx_tfc_lv = stats->tx_tfc_lv;
+ enum rtw89_tfc_lv rx_tfc_lv = stats->rx_tfc_lv;
+
+ stats->tx_throughput_raw = (u32)(stats->tx_unicast >> RTW89_TP_SHIFT);
+ stats->rx_throughput_raw = (u32)(stats->rx_unicast >> RTW89_TP_SHIFT);
+
+ ewma_tp_add(&stats->tx_ewma_tp, stats->tx_throughput_raw);
+ ewma_tp_add(&stats->rx_ewma_tp, stats->rx_throughput_raw);
+
+ stats->tx_throughput = ewma_tp_read(&stats->tx_ewma_tp);
+ stats->rx_throughput = ewma_tp_read(&stats->rx_ewma_tp);
+ stats->tx_tfc_lv = rtw89_get_traffic_level(rtwdev, stats->tx_throughput,
+ stats->tx_cnt);
+ stats->rx_tfc_lv = rtw89_get_traffic_level(rtwdev, stats->rx_throughput,
+ stats->rx_cnt);
+ stats->tx_avg_len = stats->tx_cnt ?
+ DIV_ROUND_DOWN_ULL(stats->tx_unicast, stats->tx_cnt) : 0;
+ stats->rx_avg_len = stats->rx_cnt ?
+ DIV_ROUND_DOWN_ULL(stats->rx_unicast, stats->rx_cnt) : 0;
+
+ stats->tx_unicast = 0;
+ stats->rx_unicast = 0;
+ stats->tx_cnt = 0;
+ stats->rx_cnt = 0;
+
+ if (tx_tfc_lv != stats->tx_tfc_lv || rx_tfc_lv != stats->rx_tfc_lv)
+ return true;
+
+ return false;
+}
+
+static bool rtw89_traffic_stats_track(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_vif *rtwvif;
+ bool tfc_changed;
+
+ tfc_changed = rtw89_traffic_stats_calc(rtwdev, &rtwdev->stats);
+ rtw89_for_each_rtwvif(rtwdev, rtwvif)
+ rtw89_traffic_stats_calc(rtwdev, &rtwvif->stats);
+
+ return tfc_changed;
+}
+
+static void rtw89_vif_enter_lps(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
+{
+ if (rtwvif->wifi_role != RTW89_WIFI_ROLE_STATION)
+ return;
+
+ if (rtwvif->stats.tx_tfc_lv == RTW89_TFC_IDLE &&
+ rtwvif->stats.rx_tfc_lv == RTW89_TFC_IDLE)
+ rtw89_enter_lps(rtwdev, rtwvif->mac_id);
+}
+
+static void rtw89_enter_lps_track(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_vif *rtwvif;
+
+ rtw89_for_each_rtwvif(rtwdev, rtwvif)
+ rtw89_vif_enter_lps(rtwdev, rtwvif);
+}
+
+void rtw89_traffic_stats_init(struct rtw89_dev *rtwdev,
+ struct rtw89_traffic_stats *stats)
+{
+ stats->tx_unicast = 0;
+ stats->rx_unicast = 0;
+ stats->tx_cnt = 0;
+ stats->rx_cnt = 0;
+ ewma_tp_init(&stats->tx_ewma_tp);
+ ewma_tp_init(&stats->rx_ewma_tp);
+}
+
+static void rtw89_track_work(struct work_struct *work)
+{
+ struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
+ track_work.work);
+ bool tfc_changed;
+
+ mutex_lock(&rtwdev->mutex);
+
+ if (!test_bit(RTW89_FLAG_RUNNING, rtwdev->flags))
+ goto out;
+
+ ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->track_work,
+ RTW89_TRACK_WORK_PERIOD);
+
+ tfc_changed = rtw89_traffic_stats_track(rtwdev);
+ if (rtwdev->scanning)
+ goto out;
+
+ rtw89_leave_lps(rtwdev);
+
+ if (tfc_changed) {
+ rtw89_hci_recalc_int_mit(rtwdev);
+ rtw89_btc_ntfy_wl_sta(rtwdev);
+ }
+ rtw89_mac_bf_monitor_track(rtwdev);
+ rtw89_phy_stat_track(rtwdev);
+ rtw89_phy_env_monitor_track(rtwdev);
+ rtw89_phy_dig(rtwdev);
+ rtw89_chip_rfk_track(rtwdev);
+ rtw89_phy_ra_update(rtwdev);
+ rtw89_phy_cfo_track(rtwdev);
+
+ if (rtwdev->lps_enabled && !rtwdev->btc.lps)
+ rtw89_enter_lps_track(rtwdev);
+
+out:
+ mutex_unlock(&rtwdev->mutex);
+}
+
+u8 rtw89_core_acquire_bit_map(unsigned long *addr, unsigned long size)
+{
+ unsigned long bit;
+
+ bit = find_first_zero_bit(addr, size);
+ if (bit < size)
+ set_bit(bit, addr);
+
+ return bit;
+}
+
+void rtw89_core_release_bit_map(unsigned long *addr, u8 bit)
+{
+ clear_bit(bit, addr);
+}
+
+void rtw89_core_release_all_bits_map(unsigned long *addr, unsigned int nbits)
+{
+ bitmap_zero(addr, nbits);
+}
+
+#define RTW89_TYPE_MAPPING(_type) \
+ case NL80211_IFTYPE_ ## _type: \
+ rtwvif->wifi_role = RTW89_WIFI_ROLE_ ## _type; \
+ break
+void rtw89_vif_type_mapping(struct ieee80211_vif *vif, bool assoc)
+{
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+
+ switch (vif->type) {
+ RTW89_TYPE_MAPPING(ADHOC);
+ RTW89_TYPE_MAPPING(STATION);
+ RTW89_TYPE_MAPPING(AP);
+ RTW89_TYPE_MAPPING(MONITOR);
+ RTW89_TYPE_MAPPING(MESH_POINT);
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_MESH_POINT:
+ rtwvif->net_type = RTW89_NET_TYPE_AP_MODE;
+ rtwvif->self_role = RTW89_SELF_ROLE_AP;
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ rtwvif->net_type = RTW89_NET_TYPE_AD_HOC;
+ rtwvif->self_role = RTW89_SELF_ROLE_CLIENT;
+ break;
+ case NL80211_IFTYPE_STATION:
+ if (assoc) {
+ rtwvif->net_type = RTW89_NET_TYPE_INFRA;
+ rtwvif->trigger = vif->bss_conf.he_support;
+ } else {
+ rtwvif->net_type = RTW89_NET_TYPE_NO_LINK;
+ rtwvif->trigger = false;
+ }
+ rtwvif->self_role = RTW89_SELF_ROLE_CLIENT;
+ rtwvif->addr_cam.sec_ent_mode = RTW89_ADDR_CAM_SEC_NORMAL;
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+}
+
+int rtw89_core_sta_add(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
+ int i;
+
+ rtwsta->rtwvif = rtwvif;
+ rtwsta->prev_rssi = 0;
+
+ for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
+ rtw89_core_txq_init(rtwdev, sta->txq[i]);
+
+ ewma_rssi_init(&rtwsta->avg_rssi);
+
+ if (vif->type == NL80211_IFTYPE_STATION) {
+ rtwvif->mgd.ap = sta;
+ rtw89_btc_ntfy_role_info(rtwdev, rtwvif, rtwsta,
+ BTC_ROLE_MSTS_STA_CONN_START);
+ rtw89_chip_rfk_channel(rtwdev);
+ }
+
+ return 0;
+}
+
+int rtw89_core_sta_disassoc(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
+
+ rtwdev->total_sta_assoc--;
+ rtwsta->disassoc = true;
+
+ return 0;
+}
+
+int rtw89_core_sta_disconnect(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ int ret;
+
+ rtw89_mac_bf_monitor_calc(rtwdev, sta, true);
+ rtw89_mac_bf_disassoc(rtwdev, vif, sta);
+ rtw89_core_free_sta_pending_ba(rtwdev, sta);
+
+ rtw89_vif_type_mapping(vif, false);
+
+ ret = rtw89_fw_h2c_assoc_cmac_tbl(rtwdev, vif, sta);
+ if (ret) {
+ rtw89_warn(rtwdev, "failed to send h2c cmac table\n");
+ return ret;
+ }
+
+ ret = rtw89_fw_h2c_join_info(rtwdev, rtwvif, 1);
+ if (ret) {
+ rtw89_warn(rtwdev, "failed to send h2c join info\n");
+ return ret;
+ }
+
+ /* update cam aid mac_id net_type */
+ rtw89_fw_h2c_cam(rtwdev, rtwvif);
+ if (ret) {
+ rtw89_warn(rtwdev, "failed to send h2c cam\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+int rtw89_core_sta_assoc(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
+ int ret;
+
+ rtw89_vif_type_mapping(vif, true);
+
+ ret = rtw89_fw_h2c_assoc_cmac_tbl(rtwdev, vif, sta);
+ if (ret) {
+ rtw89_warn(rtwdev, "failed to send h2c cmac table\n");
+ return ret;
+ }
+
+ /* for station mode, assign the mac_id from itself */
+ if (vif->type == NL80211_IFTYPE_STATION)
+ rtwsta->mac_id = rtwvif->mac_id;
+
+ ret = rtw89_fw_h2c_join_info(rtwdev, rtwvif, 0);
+ if (ret) {
+ rtw89_warn(rtwdev, "failed to send h2c join info\n");
+ return ret;
+ }
+
+ /* update cam aid mac_id net_type */
+ rtw89_fw_h2c_cam(rtwdev, rtwvif);
+ if (ret) {
+ rtw89_warn(rtwdev, "failed to send h2c cam\n");
+ return ret;
+ }
+
+ ret = rtw89_fw_h2c_general_pkt(rtwdev, rtwsta->mac_id);
+ if (ret) {
+ rtw89_warn(rtwdev, "failed to send h2c general packet\n");
+ return ret;
+ }
+
+ rtwdev->total_sta_assoc++;
+ rtw89_phy_ra_assoc(rtwdev, sta);
+ rtw89_mac_bf_assoc(rtwdev, vif, sta);
+ rtw89_mac_bf_monitor_calc(rtwdev, sta, false);
+
+ if (vif->type == NL80211_IFTYPE_STATION) {
+ rtw89_btc_ntfy_role_info(rtwdev, rtwvif, rtwsta,
+ BTC_ROLE_MSTS_STA_CONN_END);
+ rtw89_core_get_no_ul_ofdma_htc(rtwdev, &rtwsta->htc_template);
+ }
+
+ return ret;
+}
+
+int rtw89_core_sta_remove(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
+
+ if (vif->type == NL80211_IFTYPE_STATION)
+ rtw89_btc_ntfy_role_info(rtwdev, rtwvif, rtwsta,
+ BTC_ROLE_MSTS_STA_DIS_CONN);
+
+ return 0;
+}
+
+static void rtw89_init_ht_cap(struct rtw89_dev *rtwdev,
+ struct ieee80211_sta_ht_cap *ht_cap)
+{
+ static const __le16 highest[RF_PATH_MAX] = {
+ cpu_to_le16(150), cpu_to_le16(300), cpu_to_le16(450), cpu_to_le16(600),
+ };
+ struct rtw89_hal *hal = &rtwdev->hal;
+ u8 nss = hal->rx_nss;
+ int i;
+
+ ht_cap->ht_supported = true;
+ ht_cap->cap = 0;
+ ht_cap->cap |= IEEE80211_HT_CAP_SGI_20 |
+ IEEE80211_HT_CAP_MAX_AMSDU |
+ IEEE80211_HT_CAP_TX_STBC |
+ (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT);
+ ht_cap->cap |= IEEE80211_HT_CAP_LDPC_CODING;
+ ht_cap->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
+ IEEE80211_HT_CAP_DSSSCCK40 |
+ IEEE80211_HT_CAP_SGI_40;
+ ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+ ht_cap->ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE;
+ ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+ for (i = 0; i < nss; i++)
+ ht_cap->mcs.rx_mask[i] = 0xFF;
+ ht_cap->mcs.rx_mask[4] = 0x01;
+ ht_cap->mcs.rx_highest = highest[nss - 1];
+}
+
+static void rtw89_init_vht_cap(struct rtw89_dev *rtwdev,
+ struct ieee80211_sta_vht_cap *vht_cap)
+{
+ static const __le16 highest[RF_PATH_MAX] = {
+ cpu_to_le16(433), cpu_to_le16(867), cpu_to_le16(1300), cpu_to_le16(1733),
+ };
+ struct rtw89_hal *hal = &rtwdev->hal;
+ u16 tx_mcs_map = 0, rx_mcs_map = 0;
+ u8 sts_cap = 3;
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ if (i < hal->tx_nss)
+ tx_mcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2);
+ else
+ tx_mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2);
+ if (i < hal->rx_nss)
+ rx_mcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2);
+ else
+ rx_mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2);
+ }
+
+ vht_cap->vht_supported = true;
+ vht_cap->cap = IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 |
+ IEEE80211_VHT_CAP_SHORT_GI_80 |
+ IEEE80211_VHT_CAP_RXSTBC_1 |
+ IEEE80211_VHT_CAP_HTC_VHT |
+ IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK |
+ 0;
+ vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC;
+ vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC;
+ vht_cap->cap |= IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE |
+ IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE;
+ vht_cap->cap |= sts_cap << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
+ vht_cap->vht_mcs.rx_mcs_map = cpu_to_le16(rx_mcs_map);
+ vht_cap->vht_mcs.tx_mcs_map = cpu_to_le16(tx_mcs_map);
+ vht_cap->vht_mcs.rx_highest = highest[hal->rx_nss - 1];
+ vht_cap->vht_mcs.tx_highest = highest[hal->tx_nss - 1];
+}
+
+#define RTW89_SBAND_IFTYPES_NR 2
+
+static void rtw89_init_he_cap(struct rtw89_dev *rtwdev,
+ enum nl80211_band band,
+ struct ieee80211_supported_band *sband)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_hal *hal = &rtwdev->hal;
+ struct ieee80211_sband_iftype_data *iftype_data;
+ bool no_ng16 = (chip->chip_id == RTL8852A && hal->cv == CHIP_CBV) ||
+ (chip->chip_id == RTL8852B && hal->cv == CHIP_CAV);
+ u16 mcs_map = 0;
+ int i;
+ int nss = hal->rx_nss;
+ int idx = 0;
+
+ iftype_data = kcalloc(RTW89_SBAND_IFTYPES_NR, sizeof(*iftype_data), GFP_KERNEL);
+ if (!iftype_data)
+ return;
+
+ for (i = 0; i < 8; i++) {
+ if (i < nss)
+ mcs_map |= IEEE80211_HE_MCS_SUPPORT_0_11 << (i * 2);
+ else
+ mcs_map |= IEEE80211_HE_MCS_NOT_SUPPORTED << (i * 2);
+ }
+
+ for (i = 0; i < NUM_NL80211_IFTYPES; i++) {
+ struct ieee80211_sta_he_cap *he_cap;
+ u8 *mac_cap_info;
+ u8 *phy_cap_info;
+
+ switch (i) {
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_AP:
+ break;
+ default:
+ continue;
+ }
+
+ if (idx >= RTW89_SBAND_IFTYPES_NR) {
+ rtw89_warn(rtwdev, "run out of iftype_data\n");
+ break;
+ }
+
+ iftype_data[idx].types_mask = BIT(i);
+ he_cap = &iftype_data[idx].he_cap;
+ mac_cap_info = he_cap->he_cap_elem.mac_cap_info;
+ phy_cap_info = he_cap->he_cap_elem.phy_cap_info;
+
+ he_cap->has_he = true;
+ if (i == NL80211_IFTYPE_AP)
+ mac_cap_info[0] = IEEE80211_HE_MAC_CAP0_HTC_HE;
+ if (i == NL80211_IFTYPE_STATION)
+ mac_cap_info[1] = IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US;
+ mac_cap_info[2] = IEEE80211_HE_MAC_CAP2_ALL_ACK |
+ IEEE80211_HE_MAC_CAP2_BSR;
+ mac_cap_info[3] = IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_2;
+ if (i == NL80211_IFTYPE_AP)
+ mac_cap_info[3] |= IEEE80211_HE_MAC_CAP3_OMI_CONTROL;
+ mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_OPS |
+ IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU;
+ if (i == NL80211_IFTYPE_STATION)
+ mac_cap_info[5] = IEEE80211_HE_MAC_CAP5_HT_VHT_TRIG_FRAME_RX;
+ phy_cap_info[0] = IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G |
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G;
+ phy_cap_info[1] = IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A |
+ IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD |
+ IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US;
+ phy_cap_info[2] = IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US |
+ IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ |
+ IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ |
+ IEEE80211_HE_PHY_CAP2_DOPPLER_TX;
+ phy_cap_info[3] = IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_16_QAM;
+ if (i == NL80211_IFTYPE_STATION)
+ phy_cap_info[3] |= IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_16_QAM |
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_2;
+ if (i == NL80211_IFTYPE_AP)
+ phy_cap_info[3] |= IEEE80211_HE_PHY_CAP3_RX_PARTIAL_BW_SU_IN_20MHZ_MU;
+ phy_cap_info[4] = IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE |
+ IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_4;
+ phy_cap_info[5] = no_ng16 ? 0 :
+ IEEE80211_HE_PHY_CAP5_NG16_SU_FEEDBACK |
+ IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK;
+ phy_cap_info[6] = IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_42_SU |
+ IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU |
+ IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMING_FB |
+ IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE;
+ phy_cap_info[7] = IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_SUPP |
+ IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI |
+ IEEE80211_HE_PHY_CAP7_MAX_NC_1;
+ phy_cap_info[8] = IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI |
+ IEEE80211_HE_PHY_CAP8_HE_ER_SU_1XLTF_AND_08_US_GI |
+ IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_996;
+ phy_cap_info[9] = IEEE80211_HE_PHY_CAP9_LONGER_THAN_16_SIGB_OFDM_SYM |
+ IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU |
+ IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB |
+ IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB |
+ IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_16US;
+ if (i == NL80211_IFTYPE_STATION)
+ phy_cap_info[9] |= IEEE80211_HE_PHY_CAP9_TX_1024_QAM_LESS_THAN_242_TONE_RU;
+ he_cap->he_mcs_nss_supp.rx_mcs_80 = cpu_to_le16(mcs_map);
+ he_cap->he_mcs_nss_supp.tx_mcs_80 = cpu_to_le16(mcs_map);
+
+ idx++;
+ }
+
+ sband->iftype_data = iftype_data;
+ sband->n_iftype_data = idx;
+}
+
+static int rtw89_core_set_supported_band(struct rtw89_dev *rtwdev)
+{
+ struct ieee80211_hw *hw = rtwdev->hw;
+ struct ieee80211_supported_band *sband_2ghz = NULL, *sband_5ghz = NULL;
+ u32 size = sizeof(struct ieee80211_supported_band);
+
+ sband_2ghz = kmemdup(&rtw89_sband_2ghz, size, GFP_KERNEL);
+ if (!sband_2ghz)
+ goto err;
+ rtw89_init_ht_cap(rtwdev, &sband_2ghz->ht_cap);
+ rtw89_init_he_cap(rtwdev, NL80211_BAND_2GHZ, sband_2ghz);
+ hw->wiphy->bands[NL80211_BAND_2GHZ] = sband_2ghz;
+
+ sband_5ghz = kmemdup(&rtw89_sband_5ghz, size, GFP_KERNEL);
+ if (!sband_5ghz)
+ goto err;
+ rtw89_init_ht_cap(rtwdev, &sband_5ghz->ht_cap);
+ rtw89_init_vht_cap(rtwdev, &sband_5ghz->vht_cap);
+ rtw89_init_he_cap(rtwdev, NL80211_BAND_5GHZ, sband_5ghz);
+ hw->wiphy->bands[NL80211_BAND_5GHZ] = sband_5ghz;
+
+ return 0;
+
+err:
+ hw->wiphy->bands[NL80211_BAND_2GHZ] = NULL;
+ hw->wiphy->bands[NL80211_BAND_5GHZ] = NULL;
+ if (sband_2ghz)
+ kfree(sband_2ghz->iftype_data);
+ if (sband_5ghz)
+ kfree(sband_5ghz->iftype_data);
+ kfree(sband_2ghz);
+ kfree(sband_5ghz);
+ return -ENOMEM;
+}
+
+static void rtw89_core_clr_supported_band(struct rtw89_dev *rtwdev)
+{
+ struct ieee80211_hw *hw = rtwdev->hw;
+
+ kfree(hw->wiphy->bands[NL80211_BAND_2GHZ]->iftype_data);
+ kfree(hw->wiphy->bands[NL80211_BAND_5GHZ]->iftype_data);
+ kfree(hw->wiphy->bands[NL80211_BAND_2GHZ]);
+ kfree(hw->wiphy->bands[NL80211_BAND_5GHZ]);
+ hw->wiphy->bands[NL80211_BAND_2GHZ] = NULL;
+ hw->wiphy->bands[NL80211_BAND_5GHZ] = NULL;
+}
+
+static void rtw89_core_ppdu_sts_init(struct rtw89_dev *rtwdev)
+{
+ int i;
+
+ for (i = 0; i < RTW89_PHY_MAX; i++)
+ skb_queue_head_init(&rtwdev->ppdu_sts.rx_queue[i]);
+ for (i = 0; i < RTW89_PHY_MAX; i++)
+ rtwdev->ppdu_sts.curr_rx_ppdu_cnt[i] = U8_MAX;
+}
+
+int rtw89_core_start(struct rtw89_dev *rtwdev)
+{
+ int ret;
+
+ rtwdev->mac.qta_mode = RTW89_QTA_SCC;
+ ret = rtw89_mac_init(rtwdev);
+ if (ret) {
+ rtw89_err(rtwdev, "mac init fail, ret:%d\n", ret);
+ return ret;
+ }
+
+ rtw89_btc_ntfy_poweron(rtwdev);
+
+ /* efuse process */
+
+ /* pre-config BB/RF, BB reset/RFC reset */
+ rtw89_mac_disable_bb_rf(rtwdev);
+ rtw89_mac_enable_bb_rf(rtwdev);
+ rtw89_phy_init_bb_reg(rtwdev);
+ rtw89_phy_init_rf_reg(rtwdev);
+
+ rtw89_btc_ntfy_init(rtwdev, BTC_MODE_NORMAL);
+
+ rtw89_phy_dm_init(rtwdev);
+
+ rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, true);
+ rtw89_mac_update_rts_threshold(rtwdev, RTW89_MAC_0);
+
+ ret = rtw89_hci_start(rtwdev);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to start hci\n");
+ return ret;
+ }
+
+ ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->track_work,
+ RTW89_TRACK_WORK_PERIOD);
+
+ set_bit(RTW89_FLAG_RUNNING, rtwdev->flags);
+
+ rtw89_btc_ntfy_radio_state(rtwdev, BTC_RFCTRL_WL_ON);
+ rtw89_fw_h2c_fw_log(rtwdev, rtwdev->fw.fw_log_enable);
+
+ return 0;
+}
+
+void rtw89_core_stop(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+
+ /* Prvent to stop twice; enter_ips and ops_stop */
+ if (!test_bit(RTW89_FLAG_RUNNING, rtwdev->flags))
+ return;
+
+ rtw89_btc_ntfy_radio_state(rtwdev, BTC_RFCTRL_WL_OFF);
+
+ clear_bit(RTW89_FLAG_RUNNING, rtwdev->flags);
+
+ mutex_unlock(&rtwdev->mutex);
+
+ cancel_work_sync(&rtwdev->c2h_work);
+ cancel_work_sync(&btc->eapol_notify_work);
+ cancel_work_sync(&btc->arp_notify_work);
+ cancel_work_sync(&btc->dhcp_notify_work);
+ cancel_work_sync(&btc->icmp_notify_work);
+ cancel_delayed_work_sync(&rtwdev->txq_reinvoke_work);
+ cancel_delayed_work_sync(&rtwdev->track_work);
+ cancel_delayed_work_sync(&rtwdev->coex_act1_work);
+ cancel_delayed_work_sync(&rtwdev->coex_bt_devinfo_work);
+ cancel_delayed_work_sync(&rtwdev->coex_rfk_chk_work);
+ cancel_delayed_work_sync(&rtwdev->cfo_track_work);
+
+ mutex_lock(&rtwdev->mutex);
+
+ rtw89_btc_ntfy_poweroff(rtwdev);
+ rtw89_hci_flush_queues(rtwdev, BIT(rtwdev->hw->queues) - 1, true);
+ rtw89_mac_flush_txq(rtwdev, BIT(rtwdev->hw->queues) - 1, true);
+ rtw89_hci_stop(rtwdev);
+ rtw89_hci_deinit(rtwdev);
+ rtw89_mac_pwr_off(rtwdev);
+ rtw89_hci_reset(rtwdev);
+}
+
+int rtw89_core_init(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ int ret;
+
+ INIT_LIST_HEAD(&rtwdev->ba_list);
+ INIT_LIST_HEAD(&rtwdev->rtwvifs_list);
+ INIT_LIST_HEAD(&rtwdev->early_h2c_list);
+ INIT_WORK(&rtwdev->ba_work, rtw89_core_ba_work);
+ INIT_WORK(&rtwdev->txq_work, rtw89_core_txq_work);
+ INIT_DELAYED_WORK(&rtwdev->txq_reinvoke_work, rtw89_core_txq_reinvoke_work);
+ INIT_DELAYED_WORK(&rtwdev->track_work, rtw89_track_work);
+ INIT_DELAYED_WORK(&rtwdev->coex_act1_work, rtw89_coex_act1_work);
+ INIT_DELAYED_WORK(&rtwdev->coex_bt_devinfo_work, rtw89_coex_bt_devinfo_work);
+ INIT_DELAYED_WORK(&rtwdev->coex_rfk_chk_work, rtw89_coex_rfk_chk_work);
+ INIT_DELAYED_WORK(&rtwdev->cfo_track_work, rtw89_phy_cfo_track_work);
+ rtwdev->txq_wq = alloc_workqueue("rtw89_tx_wq", WQ_UNBOUND | WQ_HIGHPRI, 0);
+ spin_lock_init(&rtwdev->ba_lock);
+ mutex_init(&rtwdev->mutex);
+ mutex_init(&rtwdev->rf_mutex);
+ rtwdev->total_sta_assoc = 0;
+
+ INIT_WORK(&rtwdev->c2h_work, rtw89_fw_c2h_work);
+ skb_queue_head_init(&rtwdev->c2h_queue);
+ rtw89_core_ppdu_sts_init(rtwdev);
+ rtw89_traffic_stats_init(rtwdev, &rtwdev->stats);
+
+ rtwdev->ps_mode = rtw89_update_ps_mode(rtwdev);
+ rtwdev->hal.rx_fltr = DEFAULT_AX_RX_FLTR;
+
+ INIT_WORK(&btc->eapol_notify_work, rtw89_btc_ntfy_eapol_packet_work);
+ INIT_WORK(&btc->arp_notify_work, rtw89_btc_ntfy_arp_packet_work);
+ INIT_WORK(&btc->dhcp_notify_work, rtw89_btc_ntfy_dhcp_packet_work);
+ INIT_WORK(&btc->icmp_notify_work, rtw89_btc_ntfy_icmp_packet_work);
+
+ ret = rtw89_load_firmware(rtwdev);
+ if (ret) {
+ rtw89_warn(rtwdev, "no firmware loaded\n");
+ return ret;
+ }
+ rtw89_ser_init(rtwdev);
+
+ return 0;
+}
+EXPORT_SYMBOL(rtw89_core_init);
+
+void rtw89_core_deinit(struct rtw89_dev *rtwdev)
+{
+ rtw89_ser_deinit(rtwdev);
+ rtw89_unload_firmware(rtwdev);
+ rtw89_fw_free_all_early_h2c(rtwdev);
+
+ destroy_workqueue(rtwdev->txq_wq);
+ mutex_destroy(&rtwdev->rf_mutex);
+ mutex_destroy(&rtwdev->mutex);
+}
+EXPORT_SYMBOL(rtw89_core_deinit);
+
+static void rtw89_read_chip_ver(struct rtw89_dev *rtwdev)
+{
+ u8 cv;
+
+ cv = rtw89_read32_mask(rtwdev, R_AX_SYS_CFG1, B_AX_CHIP_VER_MASK);
+ if (cv <= CHIP_CBV) {
+ if (rtw89_read32(rtwdev, R_AX_GPIO0_7_FUNC_SEL) == RTW89_R32_DEAD)
+ cv = CHIP_CAV;
+ else
+ cv = CHIP_CBV;
+ }
+
+ rtwdev->hal.cv = cv;
+}
+
+static int rtw89_chip_efuse_info_setup(struct rtw89_dev *rtwdev)
+{
+ int ret;
+
+ ret = rtw89_mac_partial_init(rtwdev);
+ if (ret)
+ return ret;
+
+ ret = rtw89_parse_efuse_map(rtwdev);
+ if (ret)
+ return ret;
+
+ ret = rtw89_parse_phycap_map(rtwdev);
+ if (ret)
+ return ret;
+
+ ret = rtw89_mac_setup_phycap(rtwdev);
+ if (ret)
+ return ret;
+
+ rtw89_mac_pwr_off(rtwdev);
+
+ return 0;
+}
+
+static int rtw89_chip_board_info_setup(struct rtw89_dev *rtwdev)
+{
+ rtw89_chip_fem_setup(rtwdev);
+
+ return 0;
+}
+
+int rtw89_chip_info_setup(struct rtw89_dev *rtwdev)
+{
+ int ret;
+
+ rtw89_read_chip_ver(rtwdev);
+
+ ret = rtw89_wait_firmware_completion(rtwdev);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to wait firmware completion\n");
+ return ret;
+ }
+
+ ret = rtw89_fw_recognize(rtwdev);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to recognize firmware\n");
+ return ret;
+ }
+
+ ret = rtw89_chip_efuse_info_setup(rtwdev);
+ if (ret)
+ return ret;
+
+ ret = rtw89_chip_board_info_setup(rtwdev);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(rtw89_chip_info_setup);
+
+static int rtw89_core_register_hw(struct rtw89_dev *rtwdev)
+{
+ struct ieee80211_hw *hw = rtwdev->hw;
+ struct rtw89_efuse *efuse = &rtwdev->efuse;
+ int ret;
+ int tx_headroom = IEEE80211_HT_CTL_LEN;
+
+ hw->vif_data_size = sizeof(struct rtw89_vif);
+ hw->sta_data_size = sizeof(struct rtw89_sta);
+ hw->txq_data_size = sizeof(struct rtw89_txq);
+
+ SET_IEEE80211_PERM_ADDR(hw, efuse->addr);
+
+ hw->extra_tx_headroom = tx_headroom;
+ hw->queues = IEEE80211_NUM_ACS;
+ hw->max_rx_aggregation_subframes = RTW89_MAX_RX_AGG_NUM;
+ hw->max_tx_aggregation_subframes = RTW89_MAX_TX_AGG_NUM;
+
+ ieee80211_hw_set(hw, SIGNAL_DBM);
+ ieee80211_hw_set(hw, HAS_RATE_CONTROL);
+ ieee80211_hw_set(hw, MFP_CAPABLE);
+ ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
+ ieee80211_hw_set(hw, AMPDU_AGGREGATION);
+ ieee80211_hw_set(hw, RX_INCLUDES_FCS);
+ ieee80211_hw_set(hw, TX_AMSDU);
+ ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
+ ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU);
+ ieee80211_hw_set(hw, SUPPORTS_PS);
+ ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS);
+
+ hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
+ hw->wiphy->available_antennas_tx = BIT(rtwdev->chip->rf_path_num) - 1;
+ hw->wiphy->available_antennas_rx = BIT(rtwdev->chip->rf_path_num) - 1;
+
+ hw->wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
+
+ wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CAN_REPLACE_PTK0);
+
+ ret = rtw89_core_set_supported_band(rtwdev);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to set supported band\n");
+ return ret;
+ }
+
+ hw->wiphy->reg_notifier = rtw89_regd_notifier;
+ hw->wiphy->sar_capa = &rtw89_sar_capa;
+
+ ret = ieee80211_register_hw(hw);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to register hw\n");
+ goto err;
+ }
+
+ ret = rtw89_regd_init(rtwdev, rtw89_regd_notifier);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to init regd\n");
+ goto err;
+ }
+
+ return 0;
+
+err:
+ return ret;
+}
+
+static void rtw89_core_unregister_hw(struct rtw89_dev *rtwdev)
+{
+ struct ieee80211_hw *hw = rtwdev->hw;
+
+ ieee80211_unregister_hw(hw);
+ rtw89_core_clr_supported_band(rtwdev);
+}
+
+int rtw89_core_register(struct rtw89_dev *rtwdev)
+{
+ int ret;
+
+ ret = rtw89_core_register_hw(rtwdev);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to register core hw\n");
+ return ret;
+ }
+
+ rtw89_debugfs_init(rtwdev);
+
+ return 0;
+}
+EXPORT_SYMBOL(rtw89_core_register);
+
+void rtw89_core_unregister(struct rtw89_dev *rtwdev)
+{
+ rtw89_core_unregister_hw(rtwdev);
+}
+EXPORT_SYMBOL(rtw89_core_unregister);
+
+MODULE_AUTHOR("Realtek Corporation");
+MODULE_DESCRIPTION("Realtek 802.11ax wireless core module");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h
new file mode 100644
index 000000000000..c2885e4dd882
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/core.h
@@ -0,0 +1,3384 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2019-2020 Realtek Corporation
+ */
+
+#ifndef __RTW89_CORE_H__
+#define __RTW89_CORE_H__
+
+#include <linux/average.h>
+#include <linux/bitfield.h>
+#include <linux/firmware.h>
+#include <linux/iopoll.h>
+#include <linux/workqueue.h>
+#include <net/mac80211.h>
+
+struct rtw89_dev;
+
+extern const struct ieee80211_ops rtw89_ops;
+extern const struct rtw89_chip_info rtw8852a_chip_info;
+
+#define MASKBYTE0 0xff
+#define MASKBYTE1 0xff00
+#define MASKBYTE2 0xff0000
+#define MASKBYTE3 0xff000000
+#define MASKBYTE4 0xff00000000ULL
+#define MASKHWORD 0xffff0000
+#define MASKLWORD 0x0000ffff
+#define MASKDWORD 0xffffffff
+#define RFREG_MASK 0xfffff
+#define INV_RF_DATA 0xffffffff
+
+#define RTW89_TRACK_WORK_PERIOD round_jiffies_relative(HZ * 2)
+#define CFO_TRACK_MAX_USER 64
+#define MAX_RSSI 110
+#define RSSI_FACTOR 1
+#define RTW89_RSSI_RAW_TO_DBM(rssi) ((s8)((rssi) >> RSSI_FACTOR) - MAX_RSSI)
+#define RTW89_MAX_HW_PORT_NUM 5
+
+#define RTW89_HTC_MASK_VARIANT GENMASK(1, 0)
+#define RTW89_HTC_VARIANT_HE 3
+#define RTW89_HTC_MASK_CTL_ID GENMASK(5, 2)
+#define RTW89_HTC_VARIANT_HE_CID_OM 1
+#define RTW89_HTC_VARIANT_HE_CID_CAS 6
+#define RTW89_HTC_MASK_CTL_INFO GENMASK(31, 6)
+
+#define RTW89_HTC_MASK_HTC_OM_RX_NSS GENMASK(8, 6)
+enum htc_om_channel_width {
+ HTC_OM_CHANNEL_WIDTH_20 = 0,
+ HTC_OM_CHANNEL_WIDTH_40 = 1,
+ HTC_OM_CHANNEL_WIDTH_80 = 2,
+ HTC_OM_CHANNEL_WIDTH_160_OR_80_80 = 3,
+};
+#define RTW89_HTC_MASK_HTC_OM_CH_WIDTH GENMASK(10, 9)
+#define RTW89_HTC_MASK_HTC_OM_UL_MU_DIS BIT(11)
+#define RTW89_HTC_MASK_HTC_OM_TX_NSTS GENMASK(14, 12)
+#define RTW89_HTC_MASK_HTC_OM_ER_SU_DIS BIT(15)
+#define RTW89_HTC_MASK_HTC_OM_DL_MU_MIMO_RR BIT(16)
+#define RTW89_HTC_MASK_HTC_OM_UL_MU_DATA_DIS BIT(17)
+
+enum rtw89_subband {
+ RTW89_CH_2G = 0,
+ RTW89_CH_5G_BAND_1 = 1,
+ /* RTW89_CH_5G_BAND_2 = 2, unused */
+ RTW89_CH_5G_BAND_3 = 3,
+ RTW89_CH_5G_BAND_4 = 4,
+
+ RTW89_SUBBAND_NR,
+};
+
+enum rtw89_hci_type {
+ RTW89_HCI_TYPE_PCIE,
+ RTW89_HCI_TYPE_USB,
+ RTW89_HCI_TYPE_SDIO,
+};
+
+enum rtw89_core_chip_id {
+ RTL8852A,
+ RTL8852B,
+ RTL8852C,
+};
+
+enum rtw89_cv {
+ CHIP_CAV,
+ CHIP_CBV,
+ CHIP_CCV,
+ CHIP_CDV,
+ CHIP_CEV,
+ CHIP_CFV,
+ CHIP_CV_MAX,
+ CHIP_CV_INVALID = CHIP_CV_MAX,
+};
+
+enum rtw89_core_tx_type {
+ RTW89_CORE_TX_TYPE_DATA,
+ RTW89_CORE_TX_TYPE_MGMT,
+ RTW89_CORE_TX_TYPE_FWCMD,
+};
+
+enum rtw89_core_rx_type {
+ RTW89_CORE_RX_TYPE_WIFI = 0,
+ RTW89_CORE_RX_TYPE_PPDU_STAT = 1,
+ RTW89_CORE_RX_TYPE_CHAN_INFO = 2,
+ RTW89_CORE_RX_TYPE_BB_SCOPE = 3,
+ RTW89_CORE_RX_TYPE_F2P_TXCMD = 4,
+ RTW89_CORE_RX_TYPE_SS2FW = 5,
+ RTW89_CORE_RX_TYPE_TX_REPORT = 6,
+ RTW89_CORE_RX_TYPE_TX_REL_HOST = 7,
+ RTW89_CORE_RX_TYPE_DFS_REPORT = 8,
+ RTW89_CORE_RX_TYPE_TX_REL_CPU = 9,
+ RTW89_CORE_RX_TYPE_C2H = 10,
+ RTW89_CORE_RX_TYPE_CSI = 11,
+ RTW89_CORE_RX_TYPE_CQI = 12,
+};
+
+enum rtw89_txq_flags {
+ RTW89_TXQ_F_AMPDU = 0,
+ RTW89_TXQ_F_BLOCK_BA = 1,
+};
+
+enum rtw89_net_type {
+ RTW89_NET_TYPE_NO_LINK = 0,
+ RTW89_NET_TYPE_AD_HOC = 1,
+ RTW89_NET_TYPE_INFRA = 2,
+ RTW89_NET_TYPE_AP_MODE = 3,
+};
+
+enum rtw89_wifi_role {
+ RTW89_WIFI_ROLE_NONE,
+ RTW89_WIFI_ROLE_STATION,
+ RTW89_WIFI_ROLE_AP,
+ RTW89_WIFI_ROLE_AP_VLAN,
+ RTW89_WIFI_ROLE_ADHOC,
+ RTW89_WIFI_ROLE_ADHOC_MASTER,
+ RTW89_WIFI_ROLE_MESH_POINT,
+ RTW89_WIFI_ROLE_MONITOR,
+ RTW89_WIFI_ROLE_P2P_DEVICE,
+ RTW89_WIFI_ROLE_P2P_CLIENT,
+ RTW89_WIFI_ROLE_P2P_GO,
+ RTW89_WIFI_ROLE_NAN,
+ RTW89_WIFI_ROLE_MLME_MAX
+};
+
+enum rtw89_upd_mode {
+ RTW89_VIF_CREATE,
+ RTW89_VIF_REMOVE,
+ RTW89_VIF_TYPE_CHANGE,
+ RTW89_VIF_INFO_CHANGE,
+ RTW89_VIF_CON_DISCONN
+};
+
+enum rtw89_self_role {
+ RTW89_SELF_ROLE_CLIENT,
+ RTW89_SELF_ROLE_AP,
+ RTW89_SELF_ROLE_AP_CLIENT
+};
+
+enum rtw89_msk_sO_el {
+ RTW89_NO_MSK,
+ RTW89_SMA,
+ RTW89_TMA,
+ RTW89_BSSID
+};
+
+enum rtw89_sch_tx_sel {
+ RTW89_SCH_TX_SEL_ALL,
+ RTW89_SCH_TX_SEL_HIQ,
+ RTW89_SCH_TX_SEL_MG0,
+ RTW89_SCH_TX_SEL_MACID,
+};
+
+/* RTW89_ADDR_CAM_SEC_NONE : not enabled
+ * RTW89_ADDR_CAM_SEC_ALL_UNI : 0 - 6 unicast
+ * RTW89_ADDR_CAM_SEC_NORMAL : 0 - 1 unicast, 2 - 4 group, 5 - 6 BIP
+ * RTW89_ADDR_CAM_SEC_4GROUP : 0 - 1 unicast, 2 - 5 group, 6 BIP
+ */
+enum rtw89_add_cam_sec_mode {
+ RTW89_ADDR_CAM_SEC_NONE = 0,
+ RTW89_ADDR_CAM_SEC_ALL_UNI = 1,
+ RTW89_ADDR_CAM_SEC_NORMAL = 2,
+ RTW89_ADDR_CAM_SEC_4GROUP = 3,
+};
+
+enum rtw89_sec_key_type {
+ RTW89_SEC_KEY_TYPE_NONE = 0,
+ RTW89_SEC_KEY_TYPE_WEP40 = 1,
+ RTW89_SEC_KEY_TYPE_WEP104 = 2,
+ RTW89_SEC_KEY_TYPE_TKIP = 3,
+ RTW89_SEC_KEY_TYPE_WAPI = 4,
+ RTW89_SEC_KEY_TYPE_GCMSMS4 = 5,
+ RTW89_SEC_KEY_TYPE_CCMP128 = 6,
+ RTW89_SEC_KEY_TYPE_CCMP256 = 7,
+ RTW89_SEC_KEY_TYPE_GCMP128 = 8,
+ RTW89_SEC_KEY_TYPE_GCMP256 = 9,
+ RTW89_SEC_KEY_TYPE_BIP_CCMP128 = 10,
+};
+
+enum rtw89_port {
+ RTW89_PORT_0 = 0,
+ RTW89_PORT_1 = 1,
+ RTW89_PORT_2 = 2,
+ RTW89_PORT_3 = 3,
+ RTW89_PORT_4 = 4,
+ RTW89_PORT_NUM
+};
+
+enum rtw89_band {
+ RTW89_BAND_2G = 0,
+ RTW89_BAND_5G = 1,
+ RTW89_BAND_MAX,
+};
+
+enum rtw89_hw_rate {
+ RTW89_HW_RATE_CCK1 = 0x0,
+ RTW89_HW_RATE_CCK2 = 0x1,
+ RTW89_HW_RATE_CCK5_5 = 0x2,
+ RTW89_HW_RATE_CCK11 = 0x3,
+ RTW89_HW_RATE_OFDM6 = 0x4,
+ RTW89_HW_RATE_OFDM9 = 0x5,
+ RTW89_HW_RATE_OFDM12 = 0x6,
+ RTW89_HW_RATE_OFDM18 = 0x7,
+ RTW89_HW_RATE_OFDM24 = 0x8,
+ RTW89_HW_RATE_OFDM36 = 0x9,
+ RTW89_HW_RATE_OFDM48 = 0xA,
+ RTW89_HW_RATE_OFDM54 = 0xB,
+ RTW89_HW_RATE_MCS0 = 0x80,
+ RTW89_HW_RATE_MCS1 = 0x81,
+ RTW89_HW_RATE_MCS2 = 0x82,
+ RTW89_HW_RATE_MCS3 = 0x83,
+ RTW89_HW_RATE_MCS4 = 0x84,
+ RTW89_HW_RATE_MCS5 = 0x85,
+ RTW89_HW_RATE_MCS6 = 0x86,
+ RTW89_HW_RATE_MCS7 = 0x87,
+ RTW89_HW_RATE_MCS8 = 0x88,
+ RTW89_HW_RATE_MCS9 = 0x89,
+ RTW89_HW_RATE_MCS10 = 0x8A,
+ RTW89_HW_RATE_MCS11 = 0x8B,
+ RTW89_HW_RATE_MCS12 = 0x8C,
+ RTW89_HW_RATE_MCS13 = 0x8D,
+ RTW89_HW_RATE_MCS14 = 0x8E,
+ RTW89_HW_RATE_MCS15 = 0x8F,
+ RTW89_HW_RATE_MCS16 = 0x90,
+ RTW89_HW_RATE_MCS17 = 0x91,
+ RTW89_HW_RATE_MCS18 = 0x92,
+ RTW89_HW_RATE_MCS19 = 0x93,
+ RTW89_HW_RATE_MCS20 = 0x94,
+ RTW89_HW_RATE_MCS21 = 0x95,
+ RTW89_HW_RATE_MCS22 = 0x96,
+ RTW89_HW_RATE_MCS23 = 0x97,
+ RTW89_HW_RATE_MCS24 = 0x98,
+ RTW89_HW_RATE_MCS25 = 0x99,
+ RTW89_HW_RATE_MCS26 = 0x9A,
+ RTW89_HW_RATE_MCS27 = 0x9B,
+ RTW89_HW_RATE_MCS28 = 0x9C,
+ RTW89_HW_RATE_MCS29 = 0x9D,
+ RTW89_HW_RATE_MCS30 = 0x9E,
+ RTW89_HW_RATE_MCS31 = 0x9F,
+ RTW89_HW_RATE_VHT_NSS1_MCS0 = 0x100,
+ RTW89_HW_RATE_VHT_NSS1_MCS1 = 0x101,
+ RTW89_HW_RATE_VHT_NSS1_MCS2 = 0x102,
+ RTW89_HW_RATE_VHT_NSS1_MCS3 = 0x103,
+ RTW89_HW_RATE_VHT_NSS1_MCS4 = 0x104,
+ RTW89_HW_RATE_VHT_NSS1_MCS5 = 0x105,
+ RTW89_HW_RATE_VHT_NSS1_MCS6 = 0x106,
+ RTW89_HW_RATE_VHT_NSS1_MCS7 = 0x107,
+ RTW89_HW_RATE_VHT_NSS1_MCS8 = 0x108,
+ RTW89_HW_RATE_VHT_NSS1_MCS9 = 0x109,
+ RTW89_HW_RATE_VHT_NSS2_MCS0 = 0x110,
+ RTW89_HW_RATE_VHT_NSS2_MCS1 = 0x111,
+ RTW89_HW_RATE_VHT_NSS2_MCS2 = 0x112,
+ RTW89_HW_RATE_VHT_NSS2_MCS3 = 0x113,
+ RTW89_HW_RATE_VHT_NSS2_MCS4 = 0x114,
+ RTW89_HW_RATE_VHT_NSS2_MCS5 = 0x115,
+ RTW89_HW_RATE_VHT_NSS2_MCS6 = 0x116,
+ RTW89_HW_RATE_VHT_NSS2_MCS7 = 0x117,
+ RTW89_HW_RATE_VHT_NSS2_MCS8 = 0x118,
+ RTW89_HW_RATE_VHT_NSS2_MCS9 = 0x119,
+ RTW89_HW_RATE_VHT_NSS3_MCS0 = 0x120,
+ RTW89_HW_RATE_VHT_NSS3_MCS1 = 0x121,
+ RTW89_HW_RATE_VHT_NSS3_MCS2 = 0x122,
+ RTW89_HW_RATE_VHT_NSS3_MCS3 = 0x123,
+ RTW89_HW_RATE_VHT_NSS3_MCS4 = 0x124,
+ RTW89_HW_RATE_VHT_NSS3_MCS5 = 0x125,
+ RTW89_HW_RATE_VHT_NSS3_MCS6 = 0x126,
+ RTW89_HW_RATE_VHT_NSS3_MCS7 = 0x127,
+ RTW89_HW_RATE_VHT_NSS3_MCS8 = 0x128,
+ RTW89_HW_RATE_VHT_NSS3_MCS9 = 0x129,
+ RTW89_HW_RATE_VHT_NSS4_MCS0 = 0x130,
+ RTW89_HW_RATE_VHT_NSS4_MCS1 = 0x131,
+ RTW89_HW_RATE_VHT_NSS4_MCS2 = 0x132,
+ RTW89_HW_RATE_VHT_NSS4_MCS3 = 0x133,
+ RTW89_HW_RATE_VHT_NSS4_MCS4 = 0x134,
+ RTW89_HW_RATE_VHT_NSS4_MCS5 = 0x135,
+ RTW89_HW_RATE_VHT_NSS4_MCS6 = 0x136,
+ RTW89_HW_RATE_VHT_NSS4_MCS7 = 0x137,
+ RTW89_HW_RATE_VHT_NSS4_MCS8 = 0x138,
+ RTW89_HW_RATE_VHT_NSS4_MCS9 = 0x139,
+ RTW89_HW_RATE_HE_NSS1_MCS0 = 0x180,
+ RTW89_HW_RATE_HE_NSS1_MCS1 = 0x181,
+ RTW89_HW_RATE_HE_NSS1_MCS2 = 0x182,
+ RTW89_HW_RATE_HE_NSS1_MCS3 = 0x183,
+ RTW89_HW_RATE_HE_NSS1_MCS4 = 0x184,
+ RTW89_HW_RATE_HE_NSS1_MCS5 = 0x185,
+ RTW89_HW_RATE_HE_NSS1_MCS6 = 0x186,
+ RTW89_HW_RATE_HE_NSS1_MCS7 = 0x187,
+ RTW89_HW_RATE_HE_NSS1_MCS8 = 0x188,
+ RTW89_HW_RATE_HE_NSS1_MCS9 = 0x189,
+ RTW89_HW_RATE_HE_NSS1_MCS10 = 0x18A,
+ RTW89_HW_RATE_HE_NSS1_MCS11 = 0x18B,
+ RTW89_HW_RATE_HE_NSS2_MCS0 = 0x190,
+ RTW89_HW_RATE_HE_NSS2_MCS1 = 0x191,
+ RTW89_HW_RATE_HE_NSS2_MCS2 = 0x192,
+ RTW89_HW_RATE_HE_NSS2_MCS3 = 0x193,
+ RTW89_HW_RATE_HE_NSS2_MCS4 = 0x194,
+ RTW89_HW_RATE_HE_NSS2_MCS5 = 0x195,
+ RTW89_HW_RATE_HE_NSS2_MCS6 = 0x196,
+ RTW89_HW_RATE_HE_NSS2_MCS7 = 0x197,
+ RTW89_HW_RATE_HE_NSS2_MCS8 = 0x198,
+ RTW89_HW_RATE_HE_NSS2_MCS9 = 0x199,
+ RTW89_HW_RATE_HE_NSS2_MCS10 = 0x19A,
+ RTW89_HW_RATE_HE_NSS2_MCS11 = 0x19B,
+ RTW89_HW_RATE_HE_NSS3_MCS0 = 0x1A0,
+ RTW89_HW_RATE_HE_NSS3_MCS1 = 0x1A1,
+ RTW89_HW_RATE_HE_NSS3_MCS2 = 0x1A2,
+ RTW89_HW_RATE_HE_NSS3_MCS3 = 0x1A3,
+ RTW89_HW_RATE_HE_NSS3_MCS4 = 0x1A4,
+ RTW89_HW_RATE_HE_NSS3_MCS5 = 0x1A5,
+ RTW89_HW_RATE_HE_NSS3_MCS6 = 0x1A6,
+ RTW89_HW_RATE_HE_NSS3_MCS7 = 0x1A7,
+ RTW89_HW_RATE_HE_NSS3_MCS8 = 0x1A8,
+ RTW89_HW_RATE_HE_NSS3_MCS9 = 0x1A9,
+ RTW89_HW_RATE_HE_NSS3_MCS10 = 0x1AA,
+ RTW89_HW_RATE_HE_NSS3_MCS11 = 0x1AB,
+ RTW89_HW_RATE_HE_NSS4_MCS0 = 0x1B0,
+ RTW89_HW_RATE_HE_NSS4_MCS1 = 0x1B1,
+ RTW89_HW_RATE_HE_NSS4_MCS2 = 0x1B2,
+ RTW89_HW_RATE_HE_NSS4_MCS3 = 0x1B3,
+ RTW89_HW_RATE_HE_NSS4_MCS4 = 0x1B4,
+ RTW89_HW_RATE_HE_NSS4_MCS5 = 0x1B5,
+ RTW89_HW_RATE_HE_NSS4_MCS6 = 0x1B6,
+ RTW89_HW_RATE_HE_NSS4_MCS7 = 0x1B7,
+ RTW89_HW_RATE_HE_NSS4_MCS8 = 0x1B8,
+ RTW89_HW_RATE_HE_NSS4_MCS9 = 0x1B9,
+ RTW89_HW_RATE_HE_NSS4_MCS10 = 0x1BA,
+ RTW89_HW_RATE_HE_NSS4_MCS11 = 0x1BB,
+ RTW89_HW_RATE_NR,
+
+ RTW89_HW_RATE_MASK_MOD = GENMASK(8, 7),
+ RTW89_HW_RATE_MASK_VAL = GENMASK(6, 0),
+};
+
+/* 2G channels,
+ * 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
+ */
+#define RTW89_2G_CH_NUM 14
+
+/* 5G channels,
+ * 36, 38, 40, 42, 44, 46, 48, 50,
+ * 52, 54, 56, 58, 60, 62, 64,
+ * 100, 102, 104, 106, 108, 110, 112, 114,
+ * 116, 118, 120, 122, 124, 126, 128, 130,
+ * 132, 134, 136, 138, 140, 142, 144,
+ * 149, 151, 153, 155, 157, 159, 161, 163,
+ * 165, 167, 169, 171, 173, 175, 177
+ */
+#define RTW89_5G_CH_NUM 53
+
+enum rtw89_rate_section {
+ RTW89_RS_CCK,
+ RTW89_RS_OFDM,
+ RTW89_RS_MCS, /* for HT/VHT/HE */
+ RTW89_RS_HEDCM,
+ RTW89_RS_OFFSET,
+ RTW89_RS_MAX,
+ RTW89_RS_LMT_NUM = RTW89_RS_MCS + 1,
+};
+
+enum rtw89_rate_max {
+ RTW89_RATE_CCK_MAX = 4,
+ RTW89_RATE_OFDM_MAX = 8,
+ RTW89_RATE_MCS_MAX = 12,
+ RTW89_RATE_HEDCM_MAX = 4, /* for HEDCM MCS0/1/3/4 */
+ RTW89_RATE_OFFSET_MAX = 5, /* for HE(HEDCM)/VHT/HT/OFDM/CCK offset */
+};
+
+enum rtw89_nss {
+ RTW89_NSS_1 = 0,
+ RTW89_NSS_2 = 1,
+ /* HE DCM only support 1ss and 2ss */
+ RTW89_NSS_HEDCM_MAX = RTW89_NSS_2 + 1,
+ RTW89_NSS_3 = 2,
+ RTW89_NSS_4 = 3,
+ RTW89_NSS_MAX,
+};
+
+enum rtw89_ntx {
+ RTW89_1TX = 0,
+ RTW89_2TX = 1,
+ RTW89_NTX_NUM,
+};
+
+enum rtw89_beamforming_type {
+ RTW89_NONBF = 0,
+ RTW89_BF = 1,
+ RTW89_BF_NUM,
+};
+
+enum rtw89_regulation_type {
+ RTW89_WW = 0,
+ RTW89_ETSI = 1,
+ RTW89_FCC = 2,
+ RTW89_MKK = 3,
+ RTW89_NA = 4,
+ RTW89_IC = 5,
+ RTW89_KCC = 6,
+ RTW89_NCC = 7,
+ RTW89_CHILE = 8,
+ RTW89_ACMA = 9,
+ RTW89_MEXICO = 10,
+ RTW89_UKRAINE = 11,
+ RTW89_CN = 12,
+ RTW89_REGD_NUM,
+};
+
+extern const u8 rtw89_rs_idx_max[RTW89_RS_MAX];
+extern const u8 rtw89_rs_nss_max[RTW89_RS_MAX];
+
+struct rtw89_txpwr_byrate {
+ s8 cck[RTW89_RATE_CCK_MAX];
+ s8 ofdm[RTW89_RATE_OFDM_MAX];
+ s8 mcs[RTW89_NSS_MAX][RTW89_RATE_MCS_MAX];
+ s8 hedcm[RTW89_NSS_HEDCM_MAX][RTW89_RATE_HEDCM_MAX];
+ s8 offset[RTW89_RATE_OFFSET_MAX];
+};
+
+enum rtw89_bandwidth_section_num {
+ RTW89_BW20_SEC_NUM = 8,
+ RTW89_BW40_SEC_NUM = 4,
+ RTW89_BW80_SEC_NUM = 2,
+};
+
+struct rtw89_txpwr_limit {
+ s8 cck_20m[RTW89_BF_NUM];
+ s8 cck_40m[RTW89_BF_NUM];
+ s8 ofdm[RTW89_BF_NUM];
+ s8 mcs_20m[RTW89_BW20_SEC_NUM][RTW89_BF_NUM];
+ s8 mcs_40m[RTW89_BW40_SEC_NUM][RTW89_BF_NUM];
+ s8 mcs_80m[RTW89_BW80_SEC_NUM][RTW89_BF_NUM];
+ s8 mcs_160m[RTW89_BF_NUM];
+ s8 mcs_40m_0p5[RTW89_BF_NUM];
+ s8 mcs_40m_2p5[RTW89_BF_NUM];
+};
+
+#define RTW89_RU_SEC_NUM 8
+
+struct rtw89_txpwr_limit_ru {
+ s8 ru26[RTW89_RU_SEC_NUM];
+ s8 ru52[RTW89_RU_SEC_NUM];
+ s8 ru106[RTW89_RU_SEC_NUM];
+};
+
+struct rtw89_rate_desc {
+ enum rtw89_nss nss;
+ enum rtw89_rate_section rs;
+ u8 idx;
+};
+
+#define PHY_STS_HDR_LEN 8
+#define RF_PATH_MAX 4
+#define RTW89_MAX_PPDU_CNT 8
+struct rtw89_rx_phy_ppdu {
+ u8 *buf;
+ u32 len;
+ u8 rssi_avg;
+ s8 rssi[RF_PATH_MAX];
+ u8 mac_id;
+ bool to_self;
+ bool valid;
+};
+
+enum rtw89_mac_idx {
+ RTW89_MAC_0 = 0,
+ RTW89_MAC_1 = 1,
+};
+
+enum rtw89_phy_idx {
+ RTW89_PHY_0 = 0,
+ RTW89_PHY_1 = 1,
+ RTW89_PHY_MAX
+};
+
+enum rtw89_rf_path {
+ RF_PATH_A = 0,
+ RF_PATH_B = 1,
+ RF_PATH_C = 2,
+ RF_PATH_D = 3,
+ RF_PATH_AB,
+ RF_PATH_AC,
+ RF_PATH_AD,
+ RF_PATH_BC,
+ RF_PATH_BD,
+ RF_PATH_CD,
+ RF_PATH_ABC,
+ RF_PATH_ABD,
+ RF_PATH_ACD,
+ RF_PATH_BCD,
+ RF_PATH_ABCD,
+};
+
+enum rtw89_rf_path_bit {
+ RF_A = BIT(0),
+ RF_B = BIT(1),
+ RF_C = BIT(2),
+ RF_D = BIT(3),
+
+ RF_AB = (RF_A | RF_B),
+ RF_AC = (RF_A | RF_C),
+ RF_AD = (RF_A | RF_D),
+ RF_BC = (RF_B | RF_C),
+ RF_BD = (RF_B | RF_D),
+ RF_CD = (RF_C | RF_D),
+
+ RF_ABC = (RF_A | RF_B | RF_C),
+ RF_ABD = (RF_A | RF_B | RF_D),
+ RF_ACD = (RF_A | RF_C | RF_D),
+ RF_BCD = (RF_B | RF_C | RF_D),
+
+ RF_ABCD = (RF_A | RF_B | RF_C | RF_D),
+};
+
+enum rtw89_bandwidth {
+ RTW89_CHANNEL_WIDTH_20 = 0,
+ RTW89_CHANNEL_WIDTH_40 = 1,
+ RTW89_CHANNEL_WIDTH_80 = 2,
+ RTW89_CHANNEL_WIDTH_160 = 3,
+ RTW89_CHANNEL_WIDTH_80_80 = 4,
+ RTW89_CHANNEL_WIDTH_5 = 5,
+ RTW89_CHANNEL_WIDTH_10 = 6,
+};
+
+enum rtw89_ps_mode {
+ RTW89_PS_MODE_NONE = 0,
+ RTW89_PS_MODE_RFOFF = 1,
+ RTW89_PS_MODE_CLK_GATED = 2,
+ RTW89_PS_MODE_PWR_GATED = 3,
+};
+
+#define RTW89_MAX_CHANNEL_WIDTH RTW89_CHANNEL_WIDTH_80
+#define RTW89_2G_BW_NUM (RTW89_CHANNEL_WIDTH_40 + 1)
+#define RTW89_5G_BW_NUM (RTW89_CHANNEL_WIDTH_80 + 1)
+#define RTW89_PPE_BW_NUM (RTW89_CHANNEL_WIDTH_80 + 1)
+
+enum rtw89_ru_bandwidth {
+ RTW89_RU26 = 0,
+ RTW89_RU52 = 1,
+ RTW89_RU106 = 2,
+ RTW89_RU_NUM,
+};
+
+enum rtw89_sc_offset {
+ RTW89_SC_DONT_CARE = 0,
+ RTW89_SC_20_UPPER = 1,
+ RTW89_SC_20_LOWER = 2,
+ RTW89_SC_20_UPMOST = 3,
+ RTW89_SC_20_LOWEST = 4,
+ RTW89_SC_40_UPPER = 9,
+ RTW89_SC_40_LOWER = 10,
+};
+
+struct rtw89_channel_params {
+ u8 center_chan;
+ u8 primary_chan;
+ u8 bandwidth;
+ u8 pri_ch_idx;
+ u8 cch_by_bw[RTW89_MAX_CHANNEL_WIDTH + 1];
+};
+
+struct rtw89_channel_help_params {
+ u16 tx_en;
+};
+
+struct rtw89_port_reg {
+ u32 port_cfg;
+ u32 tbtt_prohib;
+ u32 bcn_area;
+ u32 bcn_early;
+ u32 tbtt_early;
+ u32 tbtt_agg;
+ u32 bcn_space;
+ u32 bcn_forcetx;
+ u32 bcn_err_cnt;
+ u32 bcn_err_flag;
+ u32 dtim_ctrl;
+ u32 tbtt_shift;
+ u32 bcn_cnt_tmr;
+ u32 tsftr_l;
+ u32 tsftr_h;
+};
+
+struct rtw89_txwd_body {
+ __le32 dword0;
+ __le32 dword1;
+ __le32 dword2;
+ __le32 dword3;
+ __le32 dword4;
+ __le32 dword5;
+} __packed;
+
+struct rtw89_txwd_info {
+ __le32 dword0;
+ __le32 dword1;
+ __le32 dword2;
+ __le32 dword3;
+ __le32 dword4;
+ __le32 dword5;
+} __packed;
+
+struct rtw89_rx_desc_info {
+ u16 pkt_size;
+ u8 pkt_type;
+ u8 drv_info_size;
+ u8 shift;
+ u8 wl_hd_iv_len;
+ bool long_rxdesc;
+ bool bb_sel;
+ bool mac_info_valid;
+ u16 data_rate;
+ u8 gi_ltf;
+ u8 bw;
+ u32 free_run_cnt;
+ u8 user_id;
+ bool sr_en;
+ u8 ppdu_cnt;
+ u8 ppdu_type;
+ bool icv_err;
+ bool crc32_err;
+ bool hw_dec;
+ bool sw_dec;
+ bool addr1_match;
+ u8 frag;
+ u16 seq;
+ u8 frame_type;
+ u8 rx_pl_id;
+ bool addr_cam_valid;
+ u8 addr_cam_id;
+ u8 sec_cam_id;
+ u8 mac_id;
+ u16 offset;
+ bool ready;
+};
+
+struct rtw89_rxdesc_short {
+ __le32 dword0;
+ __le32 dword1;
+ __le32 dword2;
+ __le32 dword3;
+} __packed;
+
+struct rtw89_rxdesc_long {
+ __le32 dword0;
+ __le32 dword1;
+ __le32 dword2;
+ __le32 dword3;
+ __le32 dword4;
+ __le32 dword5;
+ __le32 dword6;
+ __le32 dword7;
+} __packed;
+
+struct rtw89_tx_desc_info {
+ u16 pkt_size;
+ u8 wp_offset;
+ u8 qsel;
+ u8 ch_dma;
+ u8 hdr_llc_len;
+ bool is_bmc;
+ bool en_wd_info;
+ bool wd_page;
+ bool use_rate;
+ bool dis_data_fb;
+ bool tid_indicate;
+ bool agg_en;
+ bool bk;
+ u8 ampdu_density;
+ u8 ampdu_num;
+ bool sec_en;
+ u8 sec_type;
+ u8 sec_cam_idx;
+ u16 data_rate;
+ u16 data_retry_lowest_rate;
+ bool fw_dl;
+ u16 seq;
+ bool a_ctrl_bsr;
+};
+
+struct rtw89_core_tx_request {
+ enum rtw89_core_tx_type tx_type;
+
+ struct sk_buff *skb;
+ struct ieee80211_vif *vif;
+ struct ieee80211_sta *sta;
+ struct rtw89_tx_desc_info desc_info;
+};
+
+struct rtw89_txq {
+ struct list_head list;
+ unsigned long flags;
+ int wait_cnt;
+};
+
+struct rtw89_mac_ax_gnt {
+ u8 gnt_bt_sw_en;
+ u8 gnt_bt;
+ u8 gnt_wl_sw_en;
+ u8 gnt_wl;
+};
+
+#define RTW89_MAC_AX_COEX_GNT_NR 2
+struct rtw89_mac_ax_coex_gnt {
+ struct rtw89_mac_ax_gnt band[RTW89_MAC_AX_COEX_GNT_NR];
+};
+
+enum rtw89_btc_ncnt {
+ BTC_NCNT_POWER_ON = 0x0,
+ BTC_NCNT_POWER_OFF,
+ BTC_NCNT_INIT_COEX,
+ BTC_NCNT_SCAN_START,
+ BTC_NCNT_SCAN_FINISH,
+ BTC_NCNT_SPECIAL_PACKET,
+ BTC_NCNT_SWITCH_BAND,
+ BTC_NCNT_RFK_TIMEOUT,
+ BTC_NCNT_SHOW_COEX_INFO,
+ BTC_NCNT_ROLE_INFO,
+ BTC_NCNT_CONTROL,
+ BTC_NCNT_RADIO_STATE,
+ BTC_NCNT_CUSTOMERIZE,
+ BTC_NCNT_WL_RFK,
+ BTC_NCNT_WL_STA,
+ BTC_NCNT_FWINFO,
+ BTC_NCNT_TIMER,
+ BTC_NCNT_NUM
+};
+
+enum rtw89_btc_btinfo {
+ BTC_BTINFO_L0 = 0,
+ BTC_BTINFO_L1,
+ BTC_BTINFO_L2,
+ BTC_BTINFO_L3,
+ BTC_BTINFO_H0,
+ BTC_BTINFO_H1,
+ BTC_BTINFO_H2,
+ BTC_BTINFO_H3,
+ BTC_BTINFO_MAX
+};
+
+enum rtw89_btc_dcnt {
+ BTC_DCNT_RUN = 0x0,
+ BTC_DCNT_CX_RUNINFO,
+ BTC_DCNT_RPT,
+ BTC_DCNT_RPT_FREEZE,
+ BTC_DCNT_CYCLE,
+ BTC_DCNT_CYCLE_FREEZE,
+ BTC_DCNT_W1,
+ BTC_DCNT_W1_FREEZE,
+ BTC_DCNT_B1,
+ BTC_DCNT_B1_FREEZE,
+ BTC_DCNT_TDMA_NONSYNC,
+ BTC_DCNT_SLOT_NONSYNC,
+ BTC_DCNT_BTCNT_FREEZE,
+ BTC_DCNT_WL_SLOT_DRIFT,
+ BTC_DCNT_WL_STA_LAST,
+ BTC_DCNT_NUM,
+};
+
+enum rtw89_btc_wl_state_cnt {
+ BTC_WCNT_SCANAP = 0x0,
+ BTC_WCNT_DHCP,
+ BTC_WCNT_EAPOL,
+ BTC_WCNT_ARP,
+ BTC_WCNT_SCBDUPDATE,
+ BTC_WCNT_RFK_REQ,
+ BTC_WCNT_RFK_GO,
+ BTC_WCNT_RFK_REJECT,
+ BTC_WCNT_RFK_TIMEOUT,
+ BTC_WCNT_CH_UPDATE,
+ BTC_WCNT_NUM
+};
+
+enum rtw89_btc_bt_state_cnt {
+ BTC_BCNT_RETRY = 0x0,
+ BTC_BCNT_REINIT,
+ BTC_BCNT_REENABLE,
+ BTC_BCNT_SCBDREAD,
+ BTC_BCNT_RELINK,
+ BTC_BCNT_IGNOWL,
+ BTC_BCNT_INQPAG,
+ BTC_BCNT_INQ,
+ BTC_BCNT_PAGE,
+ BTC_BCNT_ROLESW,
+ BTC_BCNT_AFH,
+ BTC_BCNT_INFOUPDATE,
+ BTC_BCNT_INFOSAME,
+ BTC_BCNT_SCBDUPDATE,
+ BTC_BCNT_HIPRI_TX,
+ BTC_BCNT_HIPRI_RX,
+ BTC_BCNT_LOPRI_TX,
+ BTC_BCNT_LOPRI_RX,
+ BTC_BCNT_RATECHG,
+ BTC_BCNT_NUM
+};
+
+enum rtw89_btc_bt_profile {
+ BTC_BT_NOPROFILE = 0,
+ BTC_BT_HFP = BIT(0),
+ BTC_BT_HID = BIT(1),
+ BTC_BT_A2DP = BIT(2),
+ BTC_BT_PAN = BIT(3),
+ BTC_PROFILE_MAX = 4,
+};
+
+struct rtw89_btc_ant_info {
+ u8 type; /* shared, dedicated */
+ u8 num;
+ u8 isolation;
+
+ u8 single_pos: 1;/* Single antenna at S0 or S1 */
+ u8 diversity: 1;
+};
+
+enum rtw89_tfc_dir {
+ RTW89_TFC_UL,
+ RTW89_TFC_DL,
+};
+
+struct rtw89_btc_wl_smap {
+ u32 busy: 1;
+ u32 scan: 1;
+ u32 connecting: 1;
+ u32 roaming: 1;
+ u32 _4way: 1;
+ u32 rf_off: 1;
+ u32 lps: 1;
+ u32 ips: 1;
+ u32 init_ok: 1;
+ u32 traffic_dir : 2;
+ u32 rf_off_pre: 1;
+ u32 lps_pre: 1;
+};
+
+enum rtw89_tfc_lv {
+ RTW89_TFC_IDLE,
+ RTW89_TFC_ULTRA_LOW,
+ RTW89_TFC_LOW,
+ RTW89_TFC_MID,
+ RTW89_TFC_HIGH,
+};
+
+#define RTW89_TP_SHIFT 18 /* bytes/2s --> Mbps */
+DECLARE_EWMA(tp, 10, 2);
+
+struct rtw89_traffic_stats {
+ /* units in bytes */
+ u64 tx_unicast;
+ u64 rx_unicast;
+ u32 tx_avg_len;
+ u32 rx_avg_len;
+
+ /* count for packets */
+ u64 tx_cnt;
+ u64 rx_cnt;
+
+ /* units in Mbps */
+ u32 tx_throughput;
+ u32 rx_throughput;
+ u32 tx_throughput_raw;
+ u32 rx_throughput_raw;
+ enum rtw89_tfc_lv tx_tfc_lv;
+ enum rtw89_tfc_lv rx_tfc_lv;
+ struct ewma_tp tx_ewma_tp;
+ struct ewma_tp rx_ewma_tp;
+
+ u16 tx_rate;
+ u16 rx_rate;
+};
+
+struct rtw89_btc_statistic {
+ u8 rssi; /* 0%~110% (dBm = rssi -110) */
+ struct rtw89_traffic_stats traffic;
+};
+
+#define BTC_WL_RSSI_THMAX 4
+
+struct rtw89_btc_wl_link_info {
+ struct rtw89_btc_statistic stat;
+ enum rtw89_tfc_dir dir;
+ u8 rssi_state[BTC_WL_RSSI_THMAX];
+ u8 mac_addr[ETH_ALEN];
+ u8 busy;
+ u8 ch;
+ u8 bw;
+ u8 band;
+ u8 role;
+ u8 pid;
+ u8 phy;
+ u8 dtim_period;
+ u8 mode;
+
+ u8 mac_id;
+ u8 tx_retry;
+
+ u32 bcn_period;
+ u32 busy_t;
+ u32 tx_time;
+ u32 client_cnt;
+ u32 rx_rate_drop_cnt;
+
+ u32 active: 1;
+ u32 noa: 1;
+ u32 client_ps: 1;
+ u32 connected: 2;
+};
+
+union rtw89_btc_wl_state_map {
+ u32 val;
+ struct rtw89_btc_wl_smap map;
+};
+
+struct rtw89_btc_bt_hfp_desc {
+ u32 exist: 1;
+ u32 type: 2;
+ u32 rsvd: 29;
+};
+
+struct rtw89_btc_bt_hid_desc {
+ u32 exist: 1;
+ u32 slot_info: 2;
+ u32 pair_cnt: 2;
+ u32 type: 8;
+ u32 rsvd: 19;
+};
+
+struct rtw89_btc_bt_a2dp_desc {
+ u8 exist: 1;
+ u8 exist_last: 1;
+ u8 play_latency: 1;
+ u8 type: 3;
+ u8 active: 1;
+ u8 sink: 1;
+
+ u8 bitpool;
+ u16 vendor_id;
+ u32 device_name;
+ u32 flush_time;
+};
+
+struct rtw89_btc_bt_pan_desc {
+ u32 exist: 1;
+ u32 type: 1;
+ u32 active: 1;
+ u32 rsvd: 29;
+};
+
+struct rtw89_btc_bt_rfk_info {
+ u32 run: 1;
+ u32 req: 1;
+ u32 timeout: 1;
+ u32 rsvd: 29;
+};
+
+union rtw89_btc_bt_rfk_info_map {
+ u32 val;
+ struct rtw89_btc_bt_rfk_info map;
+};
+
+struct rtw89_btc_bt_ver_info {
+ u32 fw_coex; /* match with which coex_ver */
+ u32 fw;
+};
+
+struct rtw89_btc_bool_sta_chg {
+ u32 now: 1;
+ u32 last: 1;
+ u32 remain: 1;
+ u32 srvd: 29;
+};
+
+struct rtw89_btc_u8_sta_chg {
+ u8 now;
+ u8 last;
+ u8 remain;
+ u8 rsvd;
+};
+
+struct rtw89_btc_wl_scan_info {
+ u8 band[RTW89_PHY_MAX];
+ u8 phy_map;
+ u8 rsvd;
+};
+
+struct rtw89_btc_wl_dbcc_info {
+ u8 op_band[RTW89_PHY_MAX]; /* op band in each phy */
+ u8 scan_band[RTW89_PHY_MAX]; /* scan band in each phy */
+ u8 real_band[RTW89_PHY_MAX];
+ u8 role[RTW89_PHY_MAX]; /* role in each phy */
+};
+
+struct rtw89_btc_wl_active_role {
+ u8 connected: 1;
+ u8 pid: 3;
+ u8 phy: 1;
+ u8 noa: 1;
+ u8 band: 2;
+
+ u8 client_ps: 1;
+ u8 bw: 7;
+
+ u8 role;
+ u8 ch;
+
+ u16 tx_lvl;
+ u16 rx_lvl;
+ u16 tx_rate;
+ u16 rx_rate;
+};
+
+struct rtw89_btc_wl_role_info_bpos {
+ u16 none: 1;
+ u16 station: 1;
+ u16 ap: 1;
+ u16 vap: 1;
+ u16 adhoc: 1;
+ u16 adhoc_master: 1;
+ u16 mesh: 1;
+ u16 moniter: 1;
+ u16 p2p_device: 1;
+ u16 p2p_gc: 1;
+ u16 p2p_go: 1;
+ u16 nan: 1;
+};
+
+union rtw89_btc_wl_role_info_map {
+ u16 val;
+ struct rtw89_btc_wl_role_info_bpos role;
+};
+
+struct rtw89_btc_wl_role_info { /* struct size must be n*4 bytes */
+ u8 connect_cnt;
+ u8 link_mode;
+ union rtw89_btc_wl_role_info_map role_map;
+ struct rtw89_btc_wl_active_role active_role[RTW89_MAX_HW_PORT_NUM];
+};
+
+struct rtw89_btc_wl_ver_info {
+ u32 fw_coex; /* match with which coex_ver */
+ u32 fw;
+ u32 mac;
+ u32 bb;
+ u32 rf;
+};
+
+struct rtw89_btc_wl_afh_info {
+ u8 en;
+ u8 ch;
+ u8 bw;
+ u8 rsvd;
+} __packed;
+
+struct rtw89_btc_wl_rfk_info {
+ u32 state: 2;
+ u32 path_map: 4;
+ u32 phy_map: 2;
+ u32 band: 2;
+ u32 type: 8;
+ u32 rsvd: 14;
+};
+
+struct rtw89_btc_bt_smap {
+ u32 connect: 1;
+ u32 ble_connect: 1;
+ u32 acl_busy: 1;
+ u32 sco_busy: 1;
+ u32 mesh_busy: 1;
+ u32 inq_pag: 1;
+};
+
+union rtw89_btc_bt_state_map {
+ u32 val;
+ struct rtw89_btc_bt_smap map;
+};
+
+#define BTC_BT_RSSI_THMAX 4
+#define BTC_BT_AFH_GROUP 12
+
+struct rtw89_btc_bt_link_info {
+ struct rtw89_btc_u8_sta_chg profile_cnt;
+ struct rtw89_btc_bool_sta_chg multi_link;
+ struct rtw89_btc_bool_sta_chg relink;
+ struct rtw89_btc_bt_hfp_desc hfp_desc;
+ struct rtw89_btc_bt_hid_desc hid_desc;
+ struct rtw89_btc_bt_a2dp_desc a2dp_desc;
+ struct rtw89_btc_bt_pan_desc pan_desc;
+ union rtw89_btc_bt_state_map status;
+
+ u8 sut_pwr_level[BTC_PROFILE_MAX];
+ u8 golden_rx_shift[BTC_PROFILE_MAX];
+ u8 rssi_state[BTC_BT_RSSI_THMAX];
+ u8 afh_map[BTC_BT_AFH_GROUP];
+
+ u32 role_sw: 1;
+ u32 slave_role: 1;
+ u32 afh_update: 1;
+ u32 cqddr: 1;
+ u32 rssi: 8;
+ u32 tx_3m: 1;
+ u32 rsvd: 19;
+};
+
+struct rtw89_btc_3rdcx_info {
+ u8 type; /* 0: none, 1:zigbee, 2:LTE */
+ u8 hw_coex;
+ u16 rsvd;
+};
+
+struct rtw89_btc_dm_emap {
+ u32 init: 1;
+ u32 pta_owner: 1;
+ u32 wl_rfk_timeout: 1;
+ u32 bt_rfk_timeout: 1;
+
+ u32 wl_fw_hang: 1;
+ u32 offload_mismatch: 1;
+ u32 cycle_hang: 1;
+ u32 w1_hang: 1;
+
+ u32 b1_hang: 1;
+ u32 tdma_no_sync: 1;
+ u32 wl_slot_drift: 1;
+};
+
+union rtw89_btc_dm_error_map {
+ u32 val;
+ struct rtw89_btc_dm_emap map;
+};
+
+struct rtw89_btc_rf_para {
+ u32 tx_pwr_freerun;
+ u32 rx_gain_freerun;
+ u32 tx_pwr_perpkt;
+ u32 rx_gain_perpkt;
+};
+
+struct rtw89_btc_wl_info {
+ struct rtw89_btc_wl_link_info link_info[RTW89_MAX_HW_PORT_NUM];
+ struct rtw89_btc_wl_rfk_info rfk_info;
+ struct rtw89_btc_wl_ver_info ver_info;
+ struct rtw89_btc_wl_afh_info afh_info;
+ struct rtw89_btc_wl_role_info role_info;
+ struct rtw89_btc_wl_scan_info scan_info;
+ struct rtw89_btc_wl_dbcc_info dbcc_info;
+ struct rtw89_btc_rf_para rf_para;
+ union rtw89_btc_wl_state_map status;
+
+ u8 port_id[RTW89_WIFI_ROLE_MLME_MAX];
+ u8 rssi_level;
+
+ u32 scbd;
+};
+
+struct rtw89_btc_module {
+ struct rtw89_btc_ant_info ant;
+ u8 rfe_type;
+ u8 cv;
+
+ u8 bt_solo: 1;
+ u8 bt_pos: 1;
+ u8 switch_type: 1;
+
+ u8 rsvd;
+};
+
+#define RTW89_BTC_DM_MAXSTEP 30
+#define RTW89_BTC_DM_CNT_MAX (RTW89_BTC_DM_MAXSTEP * 8)
+
+struct rtw89_btc_dm_step {
+ u16 step[RTW89_BTC_DM_MAXSTEP];
+ u8 step_pos;
+ bool step_ov;
+};
+
+struct rtw89_btc_init_info {
+ struct rtw89_btc_module module;
+ u8 wl_guard_ch;
+
+ u8 wl_only: 1;
+ u8 wl_init_ok: 1;
+ u8 dbcc_en: 1;
+ u8 cx_other: 1;
+ u8 bt_only: 1;
+
+ u16 rsvd;
+};
+
+struct rtw89_btc_wl_tx_limit_para {
+ u16 enable;
+ u32 tx_time; /* unit: us */
+ u16 tx_retry;
+};
+
+struct rtw89_btc_bt_scan_info {
+ u16 win;
+ u16 intvl;
+ u32 enable: 1;
+ u32 interlace: 1;
+ u32 rsvd: 30;
+};
+
+enum rtw89_btc_bt_scan_type {
+ BTC_SCAN_INQ = 0,
+ BTC_SCAN_PAGE,
+ BTC_SCAN_BLE,
+ BTC_SCAN_INIT,
+ BTC_SCAN_TV,
+ BTC_SCAN_ADV,
+ BTC_SCAN_MAX1,
+};
+
+struct rtw89_btc_bt_info {
+ struct rtw89_btc_bt_link_info link_info;
+ struct rtw89_btc_bt_scan_info scan_info[BTC_SCAN_MAX1];
+ struct rtw89_btc_bt_ver_info ver_info;
+ struct rtw89_btc_bool_sta_chg enable;
+ struct rtw89_btc_bool_sta_chg inq_pag;
+ struct rtw89_btc_rf_para rf_para;
+ union rtw89_btc_bt_rfk_info_map rfk_info;
+
+ u8 raw_info[BTC_BTINFO_MAX]; /* raw bt info from mailbox */
+
+ u32 scbd;
+ u32 feature;
+
+ u32 mbx_avl: 1;
+ u32 whql_test: 1;
+ u32 igno_wl: 1;
+ u32 reinit: 1;
+ u32 ble_scan_en: 1;
+ u32 btg_type: 1;
+ u32 inq: 1;
+ u32 pag: 1;
+ u32 run_patch_code: 1;
+ u32 hi_lna_rx: 1;
+ u32 rsvd: 22;
+};
+
+struct rtw89_btc_cx {
+ struct rtw89_btc_wl_info wl;
+ struct rtw89_btc_bt_info bt;
+ struct rtw89_btc_3rdcx_info other;
+ u32 state_map;
+ u32 cnt_bt[BTC_BCNT_NUM];
+ u32 cnt_wl[BTC_WCNT_NUM];
+};
+
+struct rtw89_btc_fbtc_tdma {
+ u8 type;
+ u8 rxflctrl;
+ u8 txpause;
+ u8 wtgle_n;
+ u8 leak_n;
+ u8 ext_ctrl;
+ u8 rsvd0;
+ u8 rsvd1;
+} __packed;
+
+#define CXMREG_MAX 30
+#define FCXMAX_STEP 255 /*STEP trace record cnt, Max:65535, default:255*/
+#define BTCRPT_VER 1
+#define BTC_CYCLE_SLOT_MAX 48 /* must be even number, non-zero */
+
+enum rtw89_btc_bt_rfk_counter {
+ BTC_BCNT_RFK_REQ = 0,
+ BTC_BCNT_RFK_GO = 1,
+ BTC_BCNT_RFK_REJECT = 2,
+ BTC_BCNT_RFK_FAIL = 3,
+ BTC_BCNT_RFK_TIMEOUT = 4,
+ BTC_BCNT_RFK_MAX
+};
+
+struct rtw89_btc_fbtc_rpt_ctrl {
+ u16 fver;
+ u16 rpt_cnt; /* tmr counters */
+ u32 wl_fw_coex_ver; /* match which driver's coex version */
+ u32 wl_fw_cx_offload;
+ u32 wl_fw_ver;
+ u32 rpt_enable;
+ u32 rpt_para; /* ms */
+ u32 mb_send_fail_cnt; /* fw send mailbox fail counter */
+ u32 mb_send_ok_cnt; /* fw send mailbox ok counter */
+ u32 mb_recv_cnt; /* fw recv mailbox counter */
+ u32 mb_a2dp_empty_cnt; /* a2dp empty count */
+ u32 mb_a2dp_flct_cnt; /* a2dp empty flow control counter */
+ u32 mb_a2dp_full_cnt; /* a2dp empty full counter */
+ u32 bt_rfk_cnt[BTC_BCNT_RFK_MAX];
+ u32 c2h_cnt; /* fw send c2h counter */
+ u32 h2c_cnt; /* fw recv h2c counter */
+} __packed;
+
+enum rtw89_fbtc_ext_ctrl_type {
+ CXECTL_OFF = 0x0, /* tdma off */
+ CXECTL_B2 = 0x1, /* allow B2 (beacon-early) */
+ CXECTL_EXT = 0x2,
+ CXECTL_MAX
+};
+
+union rtw89_btc_fbtc_rxflct {
+ u8 val;
+ u8 type: 3;
+ u8 tgln_n: 5;
+};
+
+enum rtw89_btc_cxst_state {
+ CXST_OFF = 0x0,
+ CXST_B2W = 0x1,
+ CXST_W1 = 0x2,
+ CXST_W2 = 0x3,
+ CXST_W2B = 0x4,
+ CXST_B1 = 0x5,
+ CXST_B2 = 0x6,
+ CXST_B3 = 0x7,
+ CXST_B4 = 0x8,
+ CXST_LK = 0x9,
+ CXST_BLK = 0xa,
+ CXST_E2G = 0xb,
+ CXST_E5G = 0xc,
+ CXST_EBT = 0xd,
+ CXST_ENULL = 0xe,
+ CXST_WLK = 0xf,
+ CXST_W1FDD = 0x10,
+ CXST_B1FDD = 0x11,
+ CXST_MAX = 0x12,
+};
+
+enum {
+ CXBCN_ALL = 0x0,
+ CXBCN_ALL_OK,
+ CXBCN_BT_SLOT,
+ CXBCN_BT_OK,
+ CXBCN_MAX
+};
+
+enum btc_slot_type {
+ SLOT_MIX = 0x0, /* accept BT Lower-Pri Tx/Rx request 0x778 = 1 */
+ SLOT_ISO = 0x1, /* no accept BT Lower-Pri Tx/Rx request 0x778 = d*/
+ CXSTYPE_NUM,
+};
+
+enum { /* TIME */
+ CXT_BT = 0x0,
+ CXT_WL = 0x1,
+ CXT_MAX
+};
+
+enum { /* TIME-A2DP */
+ CXT_FLCTRL_OFF = 0x0,
+ CXT_FLCTRL_ON = 0x1,
+ CXT_FLCTRL_MAX
+};
+
+enum { /* STEP TYPE */
+ CXSTEP_NONE = 0x0,
+ CXSTEP_EVNT = 0x1,
+ CXSTEP_SLOT = 0x2,
+ CXSTEP_MAX,
+};
+
+#define FCXGPIODBG_VER 1
+#define BTC_DBG_MAX1 32
+struct rtw89_btc_fbtc_gpio_dbg {
+ u8 fver;
+ u8 rsvd;
+ u16 rsvd2;
+ u32 en_map; /* which debug signal (see btc_wl_gpio_debug) is enable */
+ u32 pre_state; /* the debug signal is 1 or 0 */
+ u8 gpio_map[BTC_DBG_MAX1]; /*the debug signals to GPIO-Position */
+} __packed;
+
+#define FCXMREG_VER 1
+struct rtw89_btc_fbtc_mreg_val {
+ u8 fver;
+ u8 reg_num;
+ __le16 rsvd;
+ __le32 mreg_val[CXMREG_MAX];
+} __packed;
+
+#define RTW89_DEF_FBTC_MREG(__type, __bytes, __offset) \
+ { .type = cpu_to_le16(__type), .bytes = cpu_to_le16(__bytes), \
+ .offset = cpu_to_le32(__offset), }
+
+struct rtw89_btc_fbtc_mreg {
+ __le16 type;
+ __le16 bytes;
+ __le32 offset;
+} __packed;
+
+struct rtw89_btc_fbtc_slot {
+ __le16 dur;
+ __le32 cxtbl;
+ __le16 cxtype;
+} __packed;
+
+#define FCXSLOTS_VER 1
+struct rtw89_btc_fbtc_slots {
+ u8 fver;
+ u8 tbl_num;
+ __le16 rsvd;
+ __le32 update_map;
+ struct rtw89_btc_fbtc_slot slot[CXST_MAX];
+} __packed;
+
+#define FCXSTEP_VER 2
+struct rtw89_btc_fbtc_step {
+ u8 type;
+ u8 val;
+ __le16 difft;
+} __packed;
+
+struct rtw89_btc_fbtc_steps {
+ u8 fver;
+ u8 rsvd;
+ __le16 cnt;
+ __le16 pos_old;
+ __le16 pos_new;
+ struct rtw89_btc_fbtc_step step[FCXMAX_STEP];
+} __packed;
+
+#define FCXCYSTA_VER 2
+struct rtw89_btc_fbtc_cysta { /* statistics for cycles */
+ u8 fver;
+ u8 rsvd;
+ __le16 cycles; /* total cycle number */
+ __le16 cycles_a2dp[CXT_FLCTRL_MAX];
+ __le16 a2dpept; /* a2dp empty cnt */
+ __le16 a2dpeptto; /* a2dp empty timeout cnt*/
+ __le16 tavg_cycle[CXT_MAX]; /* avg wl/bt cycle time */
+ __le16 tmax_cycle[CXT_MAX]; /* max wl/bt cycle time */
+ __le16 tmaxdiff_cycle[CXT_MAX]; /* max wl-wl bt-bt cycle diff time */
+ __le16 tavg_a2dp[CXT_FLCTRL_MAX]; /* avg a2dp PSTDMA/TDMA time */
+ __le16 tmax_a2dp[CXT_FLCTRL_MAX]; /* max a2dp PSTDMA/TDMA time */
+ __le16 tavg_a2dpept; /* avg a2dp empty time */
+ __le16 tmax_a2dpept; /* max a2dp empty time */
+ __le16 tavg_lk; /* avg leak-slot time */
+ __le16 tmax_lk; /* max leak-slot time */
+ __le32 slot_cnt[CXST_MAX]; /* slot count */
+ __le32 bcn_cnt[CXBCN_MAX];
+ __le32 leakrx_cnt; /* the rximr occur at leak slot */
+ __le32 collision_cnt; /* counter for event/timer occur at same time */
+ __le32 skip_cnt;
+ __le32 exception;
+ __le32 except_cnt;
+ __le16 tslot_cycle[BTC_CYCLE_SLOT_MAX];
+} __packed;
+
+#define FCXNULLSTA_VER 1
+struct rtw89_btc_fbtc_cynullsta { /* cycle null statistics */
+ u8 fver;
+ u8 rsvd;
+ __le16 rsvd2;
+ __le32 max_t[2]; /* max_t for 0:null0/1:null1 */
+ __le32 avg_t[2]; /* avg_t for 0:null0/1:null1 */
+ __le32 result[2][4]; /* 0:fail, 1:ok, 2:on_time, 3:retry */
+} __packed;
+
+#define FCX_BTVER_VER 1
+struct rtw89_btc_fbtc_btver {
+ u8 fver;
+ u8 rsvd;
+ __le16 rsvd2;
+ __le32 coex_ver; /*bit[15:8]->shared, bit[7:0]->non-shared */
+ __le32 fw_ver;
+ __le32 feature;
+} __packed;
+
+#define FCX_BTSCAN_VER 1
+struct rtw89_btc_fbtc_btscan {
+ u8 fver;
+ u8 rsvd;
+ __le16 rsvd2;
+ u8 scan[6];
+} __packed;
+
+#define FCX_BTAFH_VER 1
+struct rtw89_btc_fbtc_btafh {
+ u8 fver;
+ u8 rsvd;
+ __le16 rsvd2;
+ u8 afh_l[4]; /*bit0:2402, bit1: 2403.... bit31:2433 */
+ u8 afh_m[4]; /*bit0:2434, bit1: 2435.... bit31:2465 */
+ u8 afh_h[4]; /*bit0:2466, bit1:2467......bit14:2480 */
+} __packed;
+
+#define FCX_BTDEVINFO_VER 1
+struct rtw89_btc_fbtc_btdevinfo {
+ u8 fver;
+ u8 rsvd;
+ __le16 vendor_id;
+ __le32 dev_name; /* only 24 bits valid */
+ __le32 flush_time;
+} __packed;
+
+#define RTW89_BTC_WL_DEF_TX_PWR GENMASK(7, 0)
+struct rtw89_btc_rf_trx_para {
+ u32 wl_tx_power; /* absolute Tx power (dBm), 0xff-> no BTC control */
+ u32 wl_rx_gain; /* rx gain table index (TBD.) */
+ u8 bt_tx_power; /* decrease Tx power (dB) */
+ u8 bt_rx_gain; /* LNA constrain level */
+};
+
+struct rtw89_btc_dm {
+ struct rtw89_btc_fbtc_slot slot[CXST_MAX];
+ struct rtw89_btc_fbtc_slot slot_now[CXST_MAX];
+ struct rtw89_btc_fbtc_tdma tdma;
+ struct rtw89_btc_fbtc_tdma tdma_now;
+ struct rtw89_mac_ax_coex_gnt gnt;
+ struct rtw89_btc_init_info init_info; /* pass to wl_fw if offload */
+ struct rtw89_btc_rf_trx_para rf_trx_para;
+ struct rtw89_btc_wl_tx_limit_para wl_tx_limit;
+ struct rtw89_btc_dm_step dm_step;
+ union rtw89_btc_dm_error_map error;
+ u32 cnt_dm[BTC_DCNT_NUM];
+ u32 cnt_notify[BTC_NCNT_NUM];
+
+ u32 update_slot_map;
+ u32 set_ant_path;
+
+ u32 wl_only: 1;
+ u32 wl_fw_cx_offload: 1;
+ u32 freerun: 1;
+ u32 wl_ps_ctrl: 2;
+ u32 wl_mimo_ps: 1;
+ u32 leak_ap: 1;
+ u32 noisy_level: 3;
+ u32 coex_info_map: 8;
+ u32 bt_only: 1;
+ u32 wl_btg_rx: 1;
+ u32 trx_para_level: 8;
+ u32 wl_stb_chg: 1;
+ u32 rsvd: 3;
+
+ u16 slot_dur[CXST_MAX];
+
+ u8 run_reason;
+ u8 run_action;
+};
+
+struct rtw89_btc_ctrl {
+ u32 manual: 1;
+ u32 igno_bt: 1;
+ u32 always_freerun: 1;
+ u32 trace_step: 16;
+ u32 rsvd: 12;
+};
+
+struct rtw89_btc_dbg {
+ /* cmd "rb" */
+ bool rb_done;
+ u32 rb_val;
+};
+
+#define FCXTDMA_VER 1
+
+enum rtw89_btc_btf_fw_event {
+ BTF_EVNT_RPT = 0,
+ BTF_EVNT_BT_INFO = 1,
+ BTF_EVNT_BT_SCBD = 2,
+ BTF_EVNT_BT_REG = 3,
+ BTF_EVNT_CX_RUNINFO = 4,
+ BTF_EVNT_BT_PSD = 5,
+ BTF_EVNT_BUF_OVERFLOW,
+ BTF_EVNT_C2H_LOOPBACK,
+ BTF_EVNT_MAX,
+};
+
+enum btf_fw_event_report {
+ BTC_RPT_TYPE_CTRL = 0x0,
+ BTC_RPT_TYPE_TDMA,
+ BTC_RPT_TYPE_SLOT,
+ BTC_RPT_TYPE_CYSTA,
+ BTC_RPT_TYPE_STEP,
+ BTC_RPT_TYPE_NULLSTA,
+ BTC_RPT_TYPE_MREG,
+ BTC_RPT_TYPE_GPIO_DBG,
+ BTC_RPT_TYPE_BT_VER,
+ BTC_RPT_TYPE_BT_SCAN,
+ BTC_RPT_TYPE_BT_AFH,
+ BTC_RPT_TYPE_BT_DEVICE,
+ BTC_RPT_TYPE_TEST,
+ BTC_RPT_TYPE_MAX = 31
+};
+
+enum rtw_btc_btf_reg_type {
+ REG_MAC = 0x0,
+ REG_BB = 0x1,
+ REG_RF = 0x2,
+ REG_BT_RF = 0x3,
+ REG_BT_MODEM = 0x4,
+ REG_BT_BLUEWIZE = 0x5,
+ REG_BT_VENDOR = 0x6,
+ REG_BT_LE = 0x7,
+ REG_MAX_TYPE,
+};
+
+struct rtw89_btc_rpt_cmn_info {
+ u32 rx_cnt;
+ u32 rx_len;
+ u32 req_len; /* expected rsp len */
+ u8 req_fver; /* expected rsp fver */
+ u8 rsp_fver; /* fver from fw */
+ u8 valid;
+} __packed;
+
+struct rtw89_btc_report_ctrl_state {
+ struct rtw89_btc_rpt_cmn_info cinfo; /* common info, by driver */
+ struct rtw89_btc_fbtc_rpt_ctrl finfo; /* info from fw */
+};
+
+struct rtw89_btc_rpt_fbtc_tdma {
+ struct rtw89_btc_rpt_cmn_info cinfo; /* common info, by driver */
+ struct rtw89_btc_fbtc_tdma finfo; /* info from fw */
+};
+
+struct rtw89_btc_rpt_fbtc_slots {
+ struct rtw89_btc_rpt_cmn_info cinfo; /* common info, by driver */
+ struct rtw89_btc_fbtc_slots finfo; /* info from fw */
+};
+
+struct rtw89_btc_rpt_fbtc_cysta {
+ struct rtw89_btc_rpt_cmn_info cinfo; /* common info, by driver */
+ struct rtw89_btc_fbtc_cysta finfo; /* info from fw */
+};
+
+struct rtw89_btc_rpt_fbtc_step {
+ struct rtw89_btc_rpt_cmn_info cinfo; /* common info, by driver */
+ struct rtw89_btc_fbtc_steps finfo; /* info from fw */
+};
+
+struct rtw89_btc_rpt_fbtc_nullsta {
+ struct rtw89_btc_rpt_cmn_info cinfo; /* common info, by driver */
+ struct rtw89_btc_fbtc_cynullsta finfo; /* info from fw */
+};
+
+struct rtw89_btc_rpt_fbtc_mreg {
+ struct rtw89_btc_rpt_cmn_info cinfo; /* common info, by driver */
+ struct rtw89_btc_fbtc_mreg_val finfo; /* info from fw */
+};
+
+struct rtw89_btc_rpt_fbtc_gpio_dbg {
+ struct rtw89_btc_rpt_cmn_info cinfo; /* common info, by driver */
+ struct rtw89_btc_fbtc_gpio_dbg finfo; /* info from fw */
+};
+
+struct rtw89_btc_rpt_fbtc_btver {
+ struct rtw89_btc_rpt_cmn_info cinfo; /* common info, by driver */
+ struct rtw89_btc_fbtc_btver finfo; /* info from fw */
+};
+
+struct rtw89_btc_rpt_fbtc_btscan {
+ struct rtw89_btc_rpt_cmn_info cinfo; /* common info, by driver */
+ struct rtw89_btc_fbtc_btscan finfo; /* info from fw */
+};
+
+struct rtw89_btc_rpt_fbtc_btafh {
+ struct rtw89_btc_rpt_cmn_info cinfo; /* common info, by driver */
+ struct rtw89_btc_fbtc_btafh finfo; /* info from fw */
+};
+
+struct rtw89_btc_rpt_fbtc_btdev {
+ struct rtw89_btc_rpt_cmn_info cinfo; /* common info, by driver */
+ struct rtw89_btc_fbtc_btdevinfo finfo; /* info from fw */
+};
+
+enum rtw89_btc_btfre_type {
+ BTFRE_INVALID_INPUT = 0x0, /* invalid input parameters */
+ BTFRE_UNDEF_TYPE,
+ BTFRE_EXCEPTION,
+ BTFRE_MAX,
+};
+
+struct rtw89_btc_btf_fwinfo {
+ u32 cnt_c2h;
+ u32 cnt_h2c;
+ u32 cnt_h2c_fail;
+ u32 event[BTF_EVNT_MAX];
+
+ u32 err[BTFRE_MAX];
+ u32 len_mismch;
+ u32 fver_mismch;
+ u32 rpt_en_map;
+
+ struct rtw89_btc_report_ctrl_state rpt_ctrl;
+ struct rtw89_btc_rpt_fbtc_tdma rpt_fbtc_tdma;
+ struct rtw89_btc_rpt_fbtc_slots rpt_fbtc_slots;
+ struct rtw89_btc_rpt_fbtc_cysta rpt_fbtc_cysta;
+ struct rtw89_btc_rpt_fbtc_step rpt_fbtc_step;
+ struct rtw89_btc_rpt_fbtc_nullsta rpt_fbtc_nullsta;
+ struct rtw89_btc_rpt_fbtc_mreg rpt_fbtc_mregval;
+ struct rtw89_btc_rpt_fbtc_gpio_dbg rpt_fbtc_gpio_dbg;
+ struct rtw89_btc_rpt_fbtc_btver rpt_fbtc_btver;
+ struct rtw89_btc_rpt_fbtc_btscan rpt_fbtc_btscan;
+ struct rtw89_btc_rpt_fbtc_btafh rpt_fbtc_btafh;
+ struct rtw89_btc_rpt_fbtc_btdev rpt_fbtc_btdev;
+};
+
+#define RTW89_BTC_POLICY_MAXLEN 512
+
+struct rtw89_btc {
+ struct rtw89_btc_cx cx;
+ struct rtw89_btc_dm dm;
+ struct rtw89_btc_ctrl ctrl;
+ struct rtw89_btc_module mdinfo;
+ struct rtw89_btc_btf_fwinfo fwinfo;
+ struct rtw89_btc_dbg dbg;
+
+ struct work_struct eapol_notify_work;
+ struct work_struct arp_notify_work;
+ struct work_struct dhcp_notify_work;
+ struct work_struct icmp_notify_work;
+
+ u32 bt_req_len;
+
+ u8 policy[RTW89_BTC_POLICY_MAXLEN];
+ u16 policy_len;
+ u16 policy_type;
+ bool bt_req_en;
+ bool update_policy_force;
+ bool lps;
+};
+
+enum rtw89_ra_mode {
+ RTW89_RA_MODE_CCK = BIT(0),
+ RTW89_RA_MODE_OFDM = BIT(1),
+ RTW89_RA_MODE_HT = BIT(2),
+ RTW89_RA_MODE_VHT = BIT(3),
+ RTW89_RA_MODE_HE = BIT(4),
+};
+
+enum rtw89_ra_report_mode {
+ RTW89_RA_RPT_MODE_LEGACY,
+ RTW89_RA_RPT_MODE_HT,
+ RTW89_RA_RPT_MODE_VHT,
+ RTW89_RA_RPT_MODE_HE,
+};
+
+enum rtw89_dig_noisy_level {
+ RTW89_DIG_NOISY_LEVEL0 = -1,
+ RTW89_DIG_NOISY_LEVEL1 = 0,
+ RTW89_DIG_NOISY_LEVEL2 = 1,
+ RTW89_DIG_NOISY_LEVEL3 = 2,
+ RTW89_DIG_NOISY_LEVEL_MAX = 3,
+};
+
+enum rtw89_gi_ltf {
+ RTW89_GILTF_LGI_4XHE32 = 0,
+ RTW89_GILTF_SGI_4XHE08 = 1,
+ RTW89_GILTF_2XHE16 = 2,
+ RTW89_GILTF_2XHE08 = 3,
+ RTW89_GILTF_1XHE16 = 4,
+ RTW89_GILTF_1XHE08 = 5,
+ RTW89_GILTF_MAX
+};
+
+enum rtw89_rx_frame_type {
+ RTW89_RX_TYPE_MGNT = 0,
+ RTW89_RX_TYPE_CTRL = 1,
+ RTW89_RX_TYPE_DATA = 2,
+ RTW89_RX_TYPE_RSVD = 3,
+};
+
+struct rtw89_ra_info {
+ u8 is_dis_ra:1;
+ /* Bit0 : CCK
+ * Bit1 : OFDM
+ * Bit2 : HT
+ * Bit3 : VHT
+ * Bit4 : HE
+ */
+ u8 mode_ctrl:5;
+ u8 bw_cap:2;
+ u8 macid;
+ u8 dcm_cap:1;
+ u8 er_cap:1;
+ u8 init_rate_lv:2;
+ u8 upd_all:1;
+ u8 en_sgi:1;
+ u8 ldpc_cap:1;
+ u8 stbc_cap:1;
+ u8 ss_num:3;
+ u8 giltf:3;
+ u8 upd_bw_nss_mask:1;
+ u8 upd_mask:1;
+ u64 ra_mask; /* 63 bits ra_mask + 1 bit CSI ctrl */
+ /* BFee CSI */
+ u8 band_num;
+ u8 ra_csi_rate_en:1;
+ u8 fixed_csi_rate_en:1;
+ u8 cr_tbl_sel:1;
+ u8 rsvd2:5;
+ u8 csi_mcs_ss_idx;
+ u8 csi_mode:2;
+ u8 csi_gi_ltf:3;
+ u8 csi_bw:3;
+};
+
+#define RTW89_PPDU_MAX_USR 4
+#define RTW89_PPDU_MAC_INFO_USR_SIZE 4
+#define RTW89_PPDU_MAC_INFO_SIZE 8
+#define RTW89_PPDU_MAC_RX_CNT_SIZE 96
+
+#define RTW89_MAX_RX_AGG_NUM 64
+#define RTW89_MAX_TX_AGG_NUM 128
+
+struct rtw89_ampdu_params {
+ u16 agg_num;
+ bool amsdu;
+};
+
+struct rtw89_ra_report {
+ struct rate_info txrate;
+ u32 bit_rate;
+ u16 hw_rate;
+};
+
+DECLARE_EWMA(rssi, 10, 16);
+
+struct rtw89_sta {
+ u8 mac_id;
+ bool disassoc;
+ struct rtw89_vif *rtwvif;
+ struct rtw89_ra_info ra;
+ struct rtw89_ra_report ra_report;
+ int max_agg_wait;
+ u8 prev_rssi;
+ struct ewma_rssi avg_rssi;
+ struct rtw89_ampdu_params ampdu_params[IEEE80211_NUM_TIDS];
+ struct ieee80211_rx_status rx_status;
+ u16 rx_hw_rate;
+ __le32 htc_template;
+
+ bool use_cfg_mask;
+ struct cfg80211_bitrate_mask mask;
+
+ bool cctl_tx_time;
+ u32 ampdu_max_time:4;
+ bool cctl_tx_retry_limit;
+ u32 data_tx_cnt_lmt:6;
+};
+
+#define RTW89_MAX_ADDR_CAM_NUM 128
+#define RTW89_MAX_BSSID_CAM_NUM 20
+#define RTW89_MAX_SEC_CAM_NUM 128
+#define RTW89_SEC_CAM_IN_ADDR_CAM 7
+
+struct rtw89_addr_cam_entry {
+ u8 addr_cam_idx;
+ u8 offset;
+ u8 len;
+ u8 valid : 1;
+ u8 addr_mask : 6;
+ u8 wapi : 1;
+ u8 mask_sel : 2;
+ u8 bssid_cam_idx: 6;
+ u8 tma[ETH_ALEN];
+ u8 sma[ETH_ALEN];
+
+ u8 sec_ent_mode;
+ DECLARE_BITMAP(sec_cam_map, RTW89_SEC_CAM_IN_ADDR_CAM);
+ u8 sec_ent_keyid[RTW89_SEC_CAM_IN_ADDR_CAM];
+ u8 sec_ent[RTW89_SEC_CAM_IN_ADDR_CAM];
+ struct rtw89_sec_cam_entry *sec_entries[RTW89_SEC_CAM_IN_ADDR_CAM];
+};
+
+struct rtw89_bssid_cam_entry {
+ u8 bssid[ETH_ALEN];
+ u8 phy_idx;
+ u8 bssid_cam_idx;
+ u8 offset;
+ u8 len;
+ u8 valid : 1;
+ u8 num;
+};
+
+struct rtw89_sec_cam_entry {
+ u8 sec_cam_idx;
+ u8 offset;
+ u8 len;
+ u8 type : 4;
+ u8 ext_key : 1;
+ u8 spp_mode : 1;
+ /* 256 bits */
+ u8 key[32];
+};
+
+struct rtw89_efuse {
+ bool valid;
+ u8 xtal_cap;
+ u8 addr[ETH_ALEN];
+ u8 rfe_type;
+ char country_code[2];
+};
+
+struct rtw89_phy_rate_pattern {
+ u64 ra_mask;
+ u16 rate;
+ u8 ra_mode;
+ bool enable;
+};
+
+struct rtw89_vif {
+ struct list_head list;
+ u8 mac_id;
+ u8 port;
+ u8 mac_addr[ETH_ALEN];
+ u8 bssid[ETH_ALEN];
+ u8 phy_idx;
+ u8 mac_idx;
+ u8 net_type;
+ u8 wifi_role;
+ u8 self_role;
+ u8 wmm;
+ u8 bcn_hit_cond;
+ u8 hit_rule;
+ bool trigger;
+ bool lsig_txop;
+ u8 tgt_ind;
+ u8 frm_tgt_ind;
+ bool wowlan_pattern;
+ bool wowlan_uc;
+ bool wowlan_magic;
+ bool is_hesta;
+ bool last_a_ctrl;
+ union {
+ struct {
+ struct ieee80211_sta *ap;
+ } mgd;
+ struct {
+ struct list_head sta_list;
+ } ap;
+ };
+ struct rtw89_addr_cam_entry addr_cam;
+ struct rtw89_bssid_cam_entry bssid_cam;
+ struct ieee80211_tx_queue_params tx_params[IEEE80211_NUM_ACS];
+ struct rtw89_traffic_stats stats;
+ struct rtw89_phy_rate_pattern rate_pattern;
+};
+
+enum rtw89_lv1_rcvy_step {
+ RTW89_LV1_RCVY_STEP_1,
+ RTW89_LV1_RCVY_STEP_2,
+};
+
+struct rtw89_hci_ops {
+ int (*tx_write)(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req);
+ void (*tx_kick_off)(struct rtw89_dev *rtwdev, u8 txch);
+ void (*flush_queues)(struct rtw89_dev *rtwdev, u32 queues, bool drop);
+ void (*reset)(struct rtw89_dev *rtwdev);
+ int (*start)(struct rtw89_dev *rtwdev);
+ void (*stop)(struct rtw89_dev *rtwdev);
+ void (*recalc_int_mit)(struct rtw89_dev *rtwdev);
+
+ u8 (*read8)(struct rtw89_dev *rtwdev, u32 addr);
+ u16 (*read16)(struct rtw89_dev *rtwdev, u32 addr);
+ u32 (*read32)(struct rtw89_dev *rtwdev, u32 addr);
+ void (*write8)(struct rtw89_dev *rtwdev, u32 addr, u8 data);
+ void (*write16)(struct rtw89_dev *rtwdev, u32 addr, u16 data);
+ void (*write32)(struct rtw89_dev *rtwdev, u32 addr, u32 data);
+
+ int (*mac_pre_init)(struct rtw89_dev *rtwdev);
+ int (*mac_post_init)(struct rtw89_dev *rtwdev);
+ int (*deinit)(struct rtw89_dev *rtwdev);
+
+ u32 (*check_and_reclaim_tx_resource)(struct rtw89_dev *rtwdev, u8 txch);
+ int (*mac_lv1_rcvy)(struct rtw89_dev *rtwdev, enum rtw89_lv1_rcvy_step step);
+ void (*dump_err_status)(struct rtw89_dev *rtwdev);
+ int (*napi_poll)(struct napi_struct *napi, int budget);
+};
+
+struct rtw89_hci_info {
+ const struct rtw89_hci_ops *ops;
+ enum rtw89_hci_type type;
+ u32 rpwm_addr;
+ u32 cpwm_addr;
+};
+
+struct rtw89_chip_ops {
+ void (*bb_reset)(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx);
+ void (*bb_sethw)(struct rtw89_dev *rtwdev);
+ u32 (*read_rf)(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
+ u32 addr, u32 mask);
+ bool (*write_rf)(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
+ u32 addr, u32 mask, u32 data);
+ void (*set_channel)(struct rtw89_dev *rtwdev,
+ struct rtw89_channel_params *param);
+ void (*set_channel_help)(struct rtw89_dev *rtwdev, bool enter,
+ struct rtw89_channel_help_params *p);
+ int (*read_efuse)(struct rtw89_dev *rtwdev, u8 *log_map);
+ int (*read_phycap)(struct rtw89_dev *rtwdev, u8 *phycap_map);
+ void (*fem_setup)(struct rtw89_dev *rtwdev);
+ void (*rfk_init)(struct rtw89_dev *rtwdev);
+ void (*rfk_channel)(struct rtw89_dev *rtwdev);
+ void (*rfk_band_changed)(struct rtw89_dev *rtwdev);
+ void (*rfk_scan)(struct rtw89_dev *rtwdev, bool start);
+ void (*rfk_track)(struct rtw89_dev *rtwdev);
+ void (*power_trim)(struct rtw89_dev *rtwdev);
+ void (*set_txpwr)(struct rtw89_dev *rtwdev);
+ void (*set_txpwr_ctrl)(struct rtw89_dev *rtwdev);
+ int (*init_txpwr_unit)(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
+ u8 (*get_thermal)(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path);
+ void (*ctrl_btg)(struct rtw89_dev *rtwdev, bool btg);
+ void (*query_ppdu)(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_phy_ppdu *phy_ppdu,
+ struct ieee80211_rx_status *status);
+ void (*bb_ctrl_btc_preagc)(struct rtw89_dev *rtwdev, bool bt_en);
+ void (*set_txpwr_ul_tb_offset)(struct rtw89_dev *rtwdev,
+ s16 pw_ofst, enum rtw89_mac_idx mac_idx);
+
+ void (*btc_set_rfe)(struct rtw89_dev *rtwdev);
+ void (*btc_init_cfg)(struct rtw89_dev *rtwdev);
+ void (*btc_set_wl_pri)(struct rtw89_dev *rtwdev, u8 map, bool state);
+ void (*btc_set_wl_txpwr_ctrl)(struct rtw89_dev *rtwdev, u32 txpwr_val);
+ s8 (*btc_get_bt_rssi)(struct rtw89_dev *rtwdev, s8 val);
+ void (*btc_bt_aci_imp)(struct rtw89_dev *rtwdev);
+ void (*btc_update_bt_cnt)(struct rtw89_dev *rtwdev);
+ void (*btc_wl_s1_standby)(struct rtw89_dev *rtwdev, bool state);
+};
+
+enum rtw89_dma_ch {
+ RTW89_DMA_ACH0 = 0,
+ RTW89_DMA_ACH1 = 1,
+ RTW89_DMA_ACH2 = 2,
+ RTW89_DMA_ACH3 = 3,
+ RTW89_DMA_ACH4 = 4,
+ RTW89_DMA_ACH5 = 5,
+ RTW89_DMA_ACH6 = 6,
+ RTW89_DMA_ACH7 = 7,
+ RTW89_DMA_B0MG = 8,
+ RTW89_DMA_B0HI = 9,
+ RTW89_DMA_B1MG = 10,
+ RTW89_DMA_B1HI = 11,
+ RTW89_DMA_H2C = 12,
+ RTW89_DMA_CH_NUM = 13
+};
+
+enum rtw89_qta_mode {
+ RTW89_QTA_SCC,
+ RTW89_QTA_DLFW,
+
+ /* keep last */
+ RTW89_QTA_INVALID,
+};
+
+struct rtw89_hfc_ch_cfg {
+ u16 min;
+ u16 max;
+#define grp_0 0
+#define grp_1 1
+#define grp_num 2
+ u8 grp;
+};
+
+struct rtw89_hfc_ch_info {
+ u16 aval;
+ u16 used;
+};
+
+struct rtw89_hfc_pub_cfg {
+ u16 grp0;
+ u16 grp1;
+ u16 pub_max;
+ u16 wp_thrd;
+};
+
+struct rtw89_hfc_pub_info {
+ u16 g0_used;
+ u16 g1_used;
+ u16 g0_aval;
+ u16 g1_aval;
+ u16 pub_aval;
+ u16 wp_aval;
+};
+
+struct rtw89_hfc_prec_cfg {
+ u16 ch011_prec;
+ u16 h2c_prec;
+ u16 wp_ch07_prec;
+ u16 wp_ch811_prec;
+ u8 ch011_full_cond;
+ u8 h2c_full_cond;
+ u8 wp_ch07_full_cond;
+ u8 wp_ch811_full_cond;
+};
+
+struct rtw89_hfc_param {
+ bool en;
+ bool h2c_en;
+ u8 mode;
+ const struct rtw89_hfc_ch_cfg *ch_cfg;
+ struct rtw89_hfc_ch_info ch_info[RTW89_DMA_CH_NUM];
+ struct rtw89_hfc_pub_cfg pub_cfg;
+ struct rtw89_hfc_pub_info pub_info;
+ struct rtw89_hfc_prec_cfg prec_cfg;
+};
+
+struct rtw89_hfc_param_ini {
+ const struct rtw89_hfc_ch_cfg *ch_cfg;
+ const struct rtw89_hfc_pub_cfg *pub_cfg;
+ const struct rtw89_hfc_prec_cfg *prec_cfg;
+ u8 mode;
+};
+
+struct rtw89_dle_size {
+ u16 pge_size;
+ u16 lnk_pge_num;
+ u16 unlnk_pge_num;
+};
+
+struct rtw89_wde_quota {
+ u16 hif;
+ u16 wcpu;
+ u16 pkt_in;
+ u16 cpu_io;
+};
+
+struct rtw89_ple_quota {
+ u16 cma0_tx;
+ u16 cma1_tx;
+ u16 c2h;
+ u16 h2c;
+ u16 wcpu;
+ u16 mpdu_proc;
+ u16 cma0_dma;
+ u16 cma1_dma;
+ u16 bb_rpt;
+ u16 wd_rel;
+ u16 cpu_io;
+};
+
+struct rtw89_dle_mem {
+ enum rtw89_qta_mode mode;
+ const struct rtw89_dle_size *wde_size;
+ const struct rtw89_dle_size *ple_size;
+ const struct rtw89_wde_quota *wde_min_qt;
+ const struct rtw89_wde_quota *wde_max_qt;
+ const struct rtw89_ple_quota *ple_min_qt;
+ const struct rtw89_ple_quota *ple_max_qt;
+};
+
+struct rtw89_reg_def {
+ u32 addr;
+ u32 mask;
+};
+
+struct rtw89_reg2_def {
+ u32 addr;
+ u32 data;
+};
+
+struct rtw89_reg3_def {
+ u32 addr;
+ u32 mask;
+ u32 data;
+};
+
+struct rtw89_reg5_def {
+ u8 flag; /* recognized by parsers */
+ u8 path;
+ u32 addr;
+ u32 mask;
+ u32 data;
+};
+
+struct rtw89_phy_table {
+ const struct rtw89_reg2_def *regs;
+ u32 n_regs;
+ enum rtw89_rf_path rf_path;
+};
+
+struct rtw89_txpwr_table {
+ const void *data;
+ u32 size;
+ void (*load)(struct rtw89_dev *rtwdev,
+ const struct rtw89_txpwr_table *tbl);
+};
+
+struct rtw89_chip_info {
+ enum rtw89_core_chip_id chip_id;
+ const struct rtw89_chip_ops *ops;
+ const char *fw_name;
+ u32 fifo_size;
+ u16 max_amsdu_limit;
+ bool dis_2g_40m_ul_ofdma;
+ const struct rtw89_hfc_param_ini *hfc_param_ini;
+ const struct rtw89_dle_mem *dle_mem;
+ u32 rf_base_addr[2];
+ u8 rf_path_num;
+ u8 tx_nss;
+ u8 rx_nss;
+ u8 acam_num;
+ u8 bcam_num;
+ u8 scam_num;
+
+ u8 sec_ctrl_efuse_size;
+ u32 physical_efuse_size;
+ u32 logical_efuse_size;
+ u32 limit_efuse_size;
+ u32 phycap_addr;
+ u32 phycap_size;
+
+ const struct rtw89_pwr_cfg * const *pwr_on_seq;
+ const struct rtw89_pwr_cfg * const *pwr_off_seq;
+ const struct rtw89_phy_table *bb_table;
+ const struct rtw89_phy_table *rf_table[RF_PATH_MAX];
+ const struct rtw89_phy_table *nctl_table;
+ const struct rtw89_txpwr_table *byr_table;
+ const struct rtw89_phy_dig_gain_table *dig_table;
+ const s8 (*txpwr_lmt_2g)[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
+ [RTW89_RS_LMT_NUM][RTW89_BF_NUM]
+ [RTW89_REGD_NUM][RTW89_2G_CH_NUM];
+ const s8 (*txpwr_lmt_5g)[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
+ [RTW89_RS_LMT_NUM][RTW89_BF_NUM]
+ [RTW89_REGD_NUM][RTW89_5G_CH_NUM];
+ const s8 (*txpwr_lmt_ru_2g)[RTW89_RU_NUM][RTW89_NTX_NUM]
+ [RTW89_REGD_NUM][RTW89_2G_CH_NUM];
+ const s8 (*txpwr_lmt_ru_5g)[RTW89_RU_NUM][RTW89_NTX_NUM]
+ [RTW89_REGD_NUM][RTW89_5G_CH_NUM];
+
+ u8 txpwr_factor_rf;
+ u8 txpwr_factor_mac;
+
+ u32 para_ver;
+ u32 wlcx_desired;
+ u8 btcx_desired;
+ u8 scbd;
+ u8 mailbox;
+
+ u8 afh_guard_ch;
+ const u8 *wl_rssi_thres;
+ const u8 *bt_rssi_thres;
+ u8 rssi_tol;
+
+ u8 mon_reg_num;
+ const struct rtw89_btc_fbtc_mreg *mon_reg;
+ u8 rf_para_ulink_num;
+ const struct rtw89_btc_rf_trx_para *rf_para_ulink;
+ u8 rf_para_dlink_num;
+ const struct rtw89_btc_rf_trx_para *rf_para_dlink;
+ u8 ps_mode_supported;
+};
+
+enum rtw89_hcifc_mode {
+ RTW89_HCIFC_POH = 0,
+ RTW89_HCIFC_STF = 1,
+ RTW89_HCIFC_SDIO = 2,
+
+ /* keep last */
+ RTW89_HCIFC_MODE_INVALID,
+};
+
+struct rtw89_dle_info {
+ enum rtw89_qta_mode qta_mode;
+ u16 wde_pg_size;
+ u16 ple_pg_size;
+ u16 c0_rx_qta;
+ u16 c1_rx_qta;
+};
+
+enum rtw89_host_rpr_mode {
+ RTW89_RPR_MODE_POH = 0,
+ RTW89_RPR_MODE_STF
+};
+
+struct rtw89_mac_info {
+ struct rtw89_dle_info dle_info;
+ struct rtw89_hfc_param hfc_param;
+ enum rtw89_qta_mode qta_mode;
+ u8 rpwm_seq_num;
+ u8 cpwm_seq_num;
+};
+
+enum rtw89_fw_type {
+ RTW89_FW_NORMAL = 1,
+ RTW89_FW_WOWLAN = 3,
+};
+
+struct rtw89_fw_suit {
+ const u8 *data;
+ u32 size;
+ u8 major_ver;
+ u8 minor_ver;
+ u8 sub_ver;
+ u8 sub_idex;
+ u16 build_year;
+ u16 build_mon;
+ u16 build_date;
+ u16 build_hour;
+ u16 build_min;
+ u8 cmd_ver;
+};
+
+#define RTW89_FW_VER_CODE(major, minor, sub, idx) \
+ (((major) << 24) | ((minor) << 16) | ((sub) << 8) | (idx))
+#define RTW89_FW_SUIT_VER_CODE(s) \
+ RTW89_FW_VER_CODE((s)->major_ver, (s)->minor_ver, (s)->sub_ver, (s)->sub_idex)
+
+struct rtw89_fw_info {
+ const struct firmware *firmware;
+ struct rtw89_dev *rtwdev;
+ struct completion completion;
+ u8 h2c_seq;
+ u8 rec_seq;
+ struct rtw89_fw_suit normal;
+ struct rtw89_fw_suit wowlan;
+ bool fw_log_enable;
+ bool old_ht_ra_format;
+};
+
+struct rtw89_cam_info {
+ DECLARE_BITMAP(addr_cam_map, RTW89_MAX_ADDR_CAM_NUM);
+ DECLARE_BITMAP(bssid_cam_map, RTW89_MAX_BSSID_CAM_NUM);
+ DECLARE_BITMAP(sec_cam_map, RTW89_MAX_SEC_CAM_NUM);
+};
+
+enum rtw89_sar_sources {
+ RTW89_SAR_SOURCE_NONE,
+ RTW89_SAR_SOURCE_COMMON,
+
+ RTW89_SAR_SOURCE_NR,
+};
+
+struct rtw89_sar_cfg_common {
+ bool set[RTW89_SUBBAND_NR];
+ s32 cfg[RTW89_SUBBAND_NR];
+};
+
+struct rtw89_sar_info {
+ /* used to decide how to acces SAR cfg union */
+ enum rtw89_sar_sources src;
+
+ /* reserved for different knids of SAR cfg struct.
+ * supposed that a single cfg struct cannot handle various SAR sources.
+ */
+ union {
+ struct rtw89_sar_cfg_common cfg_common;
+ };
+};
+
+struct rtw89_hal {
+ u32 rx_fltr;
+ u8 cv;
+ u8 current_channel;
+ u8 current_primary_channel;
+ enum rtw89_subband current_subband;
+ u8 current_band_width;
+ u8 current_band_type;
+ /* center channel for different available bandwidth,
+ * val of (bw > current_band_width) is invalid
+ */
+ u8 cch_by_bw[RTW89_MAX_CHANNEL_WIDTH + 1];
+ u32 sw_amsdu_max_size;
+ u32 antenna_tx;
+ u32 antenna_rx;
+ u8 tx_nss;
+ u8 rx_nss;
+};
+
+#define RTW89_MAX_MAC_ID_NUM 128
+
+enum rtw89_flags {
+ RTW89_FLAG_POWERON,
+ RTW89_FLAG_FW_RDY,
+ RTW89_FLAG_RUNNING,
+ RTW89_FLAG_BFEE_MON,
+ RTW89_FLAG_BFEE_EN,
+ RTW89_FLAG_NAPI_RUNNING,
+ RTW89_FLAG_LEISURE_PS,
+ RTW89_FLAG_LOW_POWER_MODE,
+ RTW89_FLAG_INACTIVE_PS,
+
+ NUM_OF_RTW89_FLAGS,
+};
+
+struct rtw89_pkt_stat {
+ u16 beacon_nr;
+ u32 rx_rate_cnt[RTW89_HW_RATE_NR];
+};
+
+DECLARE_EWMA(thermal, 4, 4);
+
+struct rtw89_phy_stat {
+ struct ewma_thermal avg_thermal[RF_PATH_MAX];
+ struct rtw89_pkt_stat cur_pkt_stat;
+ struct rtw89_pkt_stat last_pkt_stat;
+};
+
+#define RTW89_DACK_PATH_NR 2
+#define RTW89_DACK_IDX_NR 2
+#define RTW89_DACK_MSBK_NR 16
+struct rtw89_dack_info {
+ bool dack_done;
+ u8 msbk_d[RTW89_DACK_PATH_NR][RTW89_DACK_IDX_NR][RTW89_DACK_MSBK_NR];
+ u8 dadck_d[RTW89_DACK_PATH_NR][RTW89_DACK_IDX_NR];
+ u16 addck_d[RTW89_DACK_PATH_NR][RTW89_DACK_IDX_NR];
+ u16 biask_d[RTW89_DACK_PATH_NR][RTW89_DACK_IDX_NR];
+ u32 dack_cnt;
+ bool addck_timeout[RTW89_DACK_PATH_NR];
+ bool dadck_timeout[RTW89_DACK_PATH_NR];
+ bool msbk_timeout[RTW89_DACK_PATH_NR];
+};
+
+#define RTW89_IQK_CHS_NR 2
+#define RTW89_IQK_PATH_NR 4
+struct rtw89_iqk_info {
+ bool lok_cor_fail[RTW89_IQK_CHS_NR][RTW89_IQK_PATH_NR];
+ bool lok_fin_fail[RTW89_IQK_CHS_NR][RTW89_IQK_PATH_NR];
+ bool iqk_tx_fail[RTW89_IQK_CHS_NR][RTW89_IQK_PATH_NR];
+ bool iqk_rx_fail[RTW89_IQK_CHS_NR][RTW89_IQK_PATH_NR];
+ u32 iqk_fail_cnt;
+ bool is_iqk_init;
+ u32 iqk_channel[RTW89_IQK_CHS_NR];
+ u8 iqk_band[RTW89_IQK_PATH_NR];
+ u8 iqk_ch[RTW89_IQK_PATH_NR];
+ u8 iqk_bw[RTW89_IQK_PATH_NR];
+ u8 kcount;
+ u8 iqk_times;
+ u8 version;
+ u32 nb_txcfir[RTW89_IQK_PATH_NR];
+ u32 nb_rxcfir[RTW89_IQK_PATH_NR];
+ u32 bp_txkresult[RTW89_IQK_PATH_NR];
+ u32 bp_rxkresult[RTW89_IQK_PATH_NR];
+ u32 bp_iqkenable[RTW89_IQK_PATH_NR];
+ bool is_wb_txiqk[RTW89_IQK_PATH_NR];
+ bool is_wb_rxiqk[RTW89_IQK_PATH_NR];
+ bool is_nbiqk;
+ bool iqk_fft_en;
+ bool iqk_xym_en;
+ bool iqk_sram_en;
+ bool iqk_cfir_en;
+ u8 thermal[RTW89_IQK_PATH_NR];
+ bool thermal_rek_en;
+ u32 syn1to2;
+ u8 iqk_mcc_ch[RTW89_IQK_CHS_NR][RTW89_IQK_PATH_NR];
+ u8 iqk_table_idx[RTW89_IQK_PATH_NR];
+};
+
+#define RTW89_DPK_RF_PATH 2
+#define RTW89_DPK_AVG_THERMAL_NUM 8
+#define RTW89_DPK_BKUP_NUM 2
+struct rtw89_dpk_bkup_para {
+ enum rtw89_band band;
+ enum rtw89_bandwidth bw;
+ u8 ch;
+ bool path_ok;
+ u8 txagc_dpk;
+ u8 ther_dpk;
+ u8 gs;
+ u16 pwsf;
+};
+
+struct rtw89_dpk_info {
+ bool is_dpk_enable;
+ bool is_dpk_reload_en;
+ u16 dc_i[RTW89_DPK_RF_PATH];
+ u16 dc_q[RTW89_DPK_RF_PATH];
+ u8 corr_val[RTW89_DPK_RF_PATH];
+ u8 corr_idx[RTW89_DPK_RF_PATH];
+ u8 cur_idx[RTW89_DPK_RF_PATH];
+ struct rtw89_dpk_bkup_para bp[RTW89_DPK_RF_PATH][RTW89_DPK_BKUP_NUM];
+};
+
+struct rtw89_fem_info {
+ bool elna_2g;
+ bool elna_5g;
+ bool epa_2g;
+ bool epa_5g;
+};
+
+struct rtw89_phy_ch_info {
+ u8 rssi_min;
+ u16 rssi_min_macid;
+ u8 pre_rssi_min;
+ u8 rssi_max;
+ u16 rssi_max_macid;
+ u8 rxsc_160;
+ u8 rxsc_80;
+ u8 rxsc_40;
+ u8 rxsc_20;
+ u8 rxsc_l;
+ u8 is_noisy;
+};
+
+struct rtw89_agc_gaincode_set {
+ u8 lna_idx;
+ u8 tia_idx;
+ u8 rxb_idx;
+};
+
+#define IGI_RSSI_TH_NUM 5
+#define FA_TH_NUM 4
+#define LNA_GAIN_NUM 7
+#define TIA_GAIN_NUM 2
+struct rtw89_dig_info {
+ struct rtw89_agc_gaincode_set cur_gaincode;
+ bool force_gaincode_idx_en;
+ struct rtw89_agc_gaincode_set force_gaincode;
+ u8 igi_rssi_th[IGI_RSSI_TH_NUM];
+ u16 fa_th[FA_TH_NUM];
+ u8 igi_rssi;
+ u8 igi_fa_rssi;
+ u8 fa_rssi_ofst;
+ u8 dyn_igi_max;
+ u8 dyn_igi_min;
+ bool dyn_pd_th_en;
+ u8 dyn_pd_th_max;
+ u8 pd_low_th_ofst;
+ u8 ib_pbk;
+ s8 ib_pkpwr;
+ s8 lna_gain_a[LNA_GAIN_NUM];
+ s8 lna_gain_g[LNA_GAIN_NUM];
+ s8 *lna_gain;
+ s8 tia_gain_a[TIA_GAIN_NUM];
+ s8 tia_gain_g[TIA_GAIN_NUM];
+ s8 *tia_gain;
+ bool is_linked_pre;
+ bool bypass_dig;
+};
+
+enum rtw89_multi_cfo_mode {
+ RTW89_PKT_BASED_AVG_MODE = 0,
+ RTW89_ENTRY_BASED_AVG_MODE = 1,
+ RTW89_TP_BASED_AVG_MODE = 2,
+};
+
+enum rtw89_phy_cfo_status {
+ RTW89_PHY_DCFO_STATE_NORMAL = 0,
+ RTW89_PHY_DCFO_STATE_ENHANCE = 1,
+ RTW89_PHY_DCFO_STATE_MAX
+};
+
+struct rtw89_cfo_tracking_info {
+ u16 cfo_timer_ms;
+ bool cfo_trig_by_timer_en;
+ enum rtw89_phy_cfo_status phy_cfo_status;
+ u8 phy_cfo_trk_cnt;
+ bool is_adjust;
+ enum rtw89_multi_cfo_mode rtw89_multi_cfo_mode;
+ bool apply_compensation;
+ u8 crystal_cap;
+ u8 crystal_cap_default;
+ u8 def_x_cap;
+ s8 x_cap_ofst;
+ u32 sta_cfo_tolerance;
+ s32 cfo_tail[CFO_TRACK_MAX_USER];
+ u16 cfo_cnt[CFO_TRACK_MAX_USER];
+ s32 cfo_avg_pre;
+ s32 cfo_avg[CFO_TRACK_MAX_USER];
+ s32 pre_cfo_avg[CFO_TRACK_MAX_USER];
+ u32 packet_count;
+ u32 packet_count_pre;
+ s32 residual_cfo_acc;
+ u8 phy_cfotrk_state;
+ u8 phy_cfotrk_cnt;
+};
+
+/* 2GL, 2GH, 5GL1, 5GH1, 5GM1, 5GM2, 5GH1, 5GH2 */
+#define TSSI_TRIM_CH_GROUP_NUM 8
+
+#define TSSI_CCK_CH_GROUP_NUM 6
+#define TSSI_MCS_2G_CH_GROUP_NUM 5
+#define TSSI_MCS_5G_CH_GROUP_NUM 14
+#define TSSI_MCS_CH_GROUP_NUM \
+ (TSSI_MCS_2G_CH_GROUP_NUM + TSSI_MCS_5G_CH_GROUP_NUM)
+
+struct rtw89_tssi_info {
+ u8 thermal[RF_PATH_MAX];
+ s8 tssi_trim[RF_PATH_MAX][TSSI_TRIM_CH_GROUP_NUM];
+ s8 tssi_cck[RF_PATH_MAX][TSSI_CCK_CH_GROUP_NUM];
+ s8 tssi_mcs[RF_PATH_MAX][TSSI_MCS_CH_GROUP_NUM];
+ s8 extra_ofst[RF_PATH_MAX];
+ bool tssi_tracking_check[RF_PATH_MAX];
+ u8 default_txagc_offset[RF_PATH_MAX];
+ u32 base_thermal[RF_PATH_MAX];
+};
+
+struct rtw89_power_trim_info {
+ bool pg_thermal_trim;
+ bool pg_pa_bias_trim;
+ u8 thermal_trim[RF_PATH_MAX];
+ u8 pa_bias_trim[RF_PATH_MAX];
+};
+
+struct rtw89_regulatory {
+ char alpha2[3];
+ u8 txpwr_regd[RTW89_BAND_MAX];
+};
+
+enum rtw89_ifs_clm_application {
+ RTW89_IFS_CLM_INIT = 0,
+ RTW89_IFS_CLM_BACKGROUND = 1,
+ RTW89_IFS_CLM_ACS = 2,
+ RTW89_IFS_CLM_DIG = 3,
+ RTW89_IFS_CLM_TDMA_DIG = 4,
+ RTW89_IFS_CLM_DBG = 5,
+ RTW89_IFS_CLM_DBG_MANUAL = 6
+};
+
+enum rtw89_env_racing_lv {
+ RTW89_RAC_RELEASE = 0,
+ RTW89_RAC_LV_1 = 1,
+ RTW89_RAC_LV_2 = 2,
+ RTW89_RAC_LV_3 = 3,
+ RTW89_RAC_LV_4 = 4,
+ RTW89_RAC_MAX_NUM = 5
+};
+
+struct rtw89_ccx_para_info {
+ enum rtw89_env_racing_lv rac_lv;
+ u16 mntr_time;
+ u8 nhm_manual_th_ofst;
+ u8 nhm_manual_th0;
+ enum rtw89_ifs_clm_application ifs_clm_app;
+ u32 ifs_clm_manual_th_times;
+ u32 ifs_clm_manual_th0;
+ u8 fahm_manual_th_ofst;
+ u8 fahm_manual_th0;
+ u8 fahm_numer_opt;
+ u8 fahm_denom_opt;
+};
+
+enum rtw89_ccx_edcca_opt_sc_idx {
+ RTW89_CCX_EDCCA_SEG0_P0 = 0,
+ RTW89_CCX_EDCCA_SEG0_S1 = 1,
+ RTW89_CCX_EDCCA_SEG0_S2 = 2,
+ RTW89_CCX_EDCCA_SEG0_S3 = 3,
+ RTW89_CCX_EDCCA_SEG1_P0 = 4,
+ RTW89_CCX_EDCCA_SEG1_S1 = 5,
+ RTW89_CCX_EDCCA_SEG1_S2 = 6,
+ RTW89_CCX_EDCCA_SEG1_S3 = 7
+};
+
+enum rtw89_ccx_edcca_opt_bw_idx {
+ RTW89_CCX_EDCCA_BW20_0 = 0,
+ RTW89_CCX_EDCCA_BW20_1 = 1,
+ RTW89_CCX_EDCCA_BW20_2 = 2,
+ RTW89_CCX_EDCCA_BW20_3 = 3,
+ RTW89_CCX_EDCCA_BW20_4 = 4,
+ RTW89_CCX_EDCCA_BW20_5 = 5,
+ RTW89_CCX_EDCCA_BW20_6 = 6,
+ RTW89_CCX_EDCCA_BW20_7 = 7
+};
+
+#define RTW89_NHM_TH_NUM 11
+#define RTW89_FAHM_TH_NUM 11
+#define RTW89_NHM_RPT_NUM 12
+#define RTW89_FAHM_RPT_NUM 12
+#define RTW89_IFS_CLM_NUM 4
+struct rtw89_env_monitor_info {
+ u32 ccx_trigger_time;
+ u64 start_time;
+ u8 ccx_rpt_stamp;
+ u8 ccx_watchdog_result;
+ bool ccx_ongoing;
+ u8 ccx_rac_lv;
+ bool ccx_manual_ctrl;
+ u8 ccx_pre_rssi;
+ u16 clm_mntr_time;
+ u16 nhm_mntr_time;
+ u16 ifs_clm_mntr_time;
+ enum rtw89_ifs_clm_application ifs_clm_app;
+ u16 fahm_mntr_time;
+ u16 edcca_clm_mntr_time;
+ u16 ccx_period;
+ u8 ccx_unit_idx;
+ enum rtw89_ccx_edcca_opt_bw_idx ccx_edcca_opt_bw_idx;
+ u8 nhm_th[RTW89_NHM_TH_NUM];
+ u16 ifs_clm_th_l[RTW89_IFS_CLM_NUM];
+ u16 ifs_clm_th_h[RTW89_IFS_CLM_NUM];
+ u8 fahm_numer_opt;
+ u8 fahm_denom_opt;
+ u8 fahm_th[RTW89_FAHM_TH_NUM];
+ u16 clm_result;
+ u16 nhm_result[RTW89_NHM_RPT_NUM];
+ u8 nhm_wgt[RTW89_NHM_RPT_NUM];
+ u16 nhm_tx_cnt;
+ u16 nhm_cca_cnt;
+ u16 nhm_idle_cnt;
+ u16 ifs_clm_tx;
+ u16 ifs_clm_edcca_excl_cca;
+ u16 ifs_clm_ofdmfa;
+ u16 ifs_clm_ofdmcca_excl_fa;
+ u16 ifs_clm_cckfa;
+ u16 ifs_clm_cckcca_excl_fa;
+ u16 ifs_clm_total_ifs;
+ u8 ifs_clm_his[RTW89_IFS_CLM_NUM];
+ u16 ifs_clm_avg[RTW89_IFS_CLM_NUM];
+ u16 ifs_clm_cca[RTW89_IFS_CLM_NUM];
+ u16 fahm_result[RTW89_FAHM_RPT_NUM];
+ u16 fahm_denom_result;
+ u16 edcca_clm_result;
+ u8 clm_ratio;
+ u8 nhm_rpt[RTW89_NHM_RPT_NUM];
+ u8 nhm_tx_ratio;
+ u8 nhm_cca_ratio;
+ u8 nhm_idle_ratio;
+ u8 nhm_ratio;
+ u16 nhm_result_sum;
+ u8 nhm_pwr;
+ u8 ifs_clm_tx_ratio;
+ u8 ifs_clm_edcca_excl_cca_ratio;
+ u8 ifs_clm_cck_fa_ratio;
+ u8 ifs_clm_ofdm_fa_ratio;
+ u8 ifs_clm_cck_cca_excl_fa_ratio;
+ u8 ifs_clm_ofdm_cca_excl_fa_ratio;
+ u16 ifs_clm_cck_fa_permil;
+ u16 ifs_clm_ofdm_fa_permil;
+ u32 ifs_clm_ifs_avg[RTW89_IFS_CLM_NUM];
+ u32 ifs_clm_cca_avg[RTW89_IFS_CLM_NUM];
+ u8 fahm_rpt[RTW89_FAHM_RPT_NUM];
+ u16 fahm_result_sum;
+ u8 fahm_ratio;
+ u8 fahm_denom_ratio;
+ u8 fahm_pwr;
+ u8 edcca_clm_ratio;
+};
+
+enum rtw89_ser_rcvy_step {
+ RTW89_SER_DRV_STOP_TX,
+ RTW89_SER_DRV_STOP_RX,
+ RTW89_SER_DRV_STOP_RUN,
+ RTW89_SER_HAL_STOP_DMA,
+ RTW89_NUM_OF_SER_FLAGS
+};
+
+struct rtw89_ser {
+ u8 state;
+ u8 alarm_event;
+
+ struct work_struct ser_hdl_work;
+ struct delayed_work ser_alarm_work;
+ struct state_ent *st_tbl;
+ struct event_ent *ev_tbl;
+ struct list_head msg_q;
+ spinlock_t msg_q_lock; /* lock when read/write ser msg */
+ DECLARE_BITMAP(flags, RTW89_NUM_OF_SER_FLAGS);
+};
+
+enum rtw89_mac_ax_ps_mode {
+ RTW89_MAC_AX_PS_MODE_ACTIVE = 0,
+ RTW89_MAC_AX_PS_MODE_LEGACY = 1,
+ RTW89_MAC_AX_PS_MODE_WMMPS = 2,
+ RTW89_MAC_AX_PS_MODE_MAX = 3,
+};
+
+enum rtw89_last_rpwm_mode {
+ RTW89_LAST_RPWM_PS = 0x0,
+ RTW89_LAST_RPWM_ACTIVE = 0x6,
+};
+
+struct rtw89_lps_parm {
+ u8 macid;
+ u8 psmode; /* enum rtw89_mac_ax_ps_mode */
+ u8 lastrpwm; /* enum rtw89_last_rpwm_mode */
+};
+
+struct rtw89_ppdu_sts_info {
+ struct sk_buff_head rx_queue[RTW89_PHY_MAX];
+ u8 curr_rx_ppdu_cnt[RTW89_PHY_MAX];
+};
+
+struct rtw89_early_h2c {
+ struct list_head list;
+ u8 *h2c;
+ u16 h2c_len;
+};
+
+struct rtw89_dev {
+ struct ieee80211_hw *hw;
+ struct device *dev;
+
+ bool dbcc_en;
+ const struct rtw89_chip_info *chip;
+ struct rtw89_hal hal;
+ struct rtw89_mac_info mac;
+ struct rtw89_fw_info fw;
+ struct rtw89_hci_info hci;
+ struct rtw89_efuse efuse;
+ struct rtw89_traffic_stats stats;
+
+ /* ensures exclusive access from mac80211 callbacks */
+ struct mutex mutex;
+ struct list_head rtwvifs_list;
+ /* used to protect rf read write */
+ struct mutex rf_mutex;
+ struct workqueue_struct *txq_wq;
+ struct work_struct txq_work;
+ struct delayed_work txq_reinvoke_work;
+ /* used to protect ba_list */
+ spinlock_t ba_lock;
+ /* txqs to setup ba session */
+ struct list_head ba_list;
+ struct work_struct ba_work;
+
+ struct rtw89_cam_info cam_info;
+
+ struct sk_buff_head c2h_queue;
+ struct work_struct c2h_work;
+
+ struct list_head early_h2c_list;
+
+ struct rtw89_ser ser;
+
+ DECLARE_BITMAP(hw_port, RTW89_MAX_HW_PORT_NUM);
+ DECLARE_BITMAP(mac_id_map, RTW89_MAX_MAC_ID_NUM);
+ DECLARE_BITMAP(flags, NUM_OF_RTW89_FLAGS);
+
+ struct rtw89_phy_stat phystat;
+ struct rtw89_dack_info dack;
+ struct rtw89_iqk_info iqk;
+ struct rtw89_dpk_info dpk;
+ bool is_tssi_mode[RF_PATH_MAX];
+ bool is_bt_iqk_timeout;
+
+ struct rtw89_fem_info fem;
+ struct rtw89_txpwr_byrate byr[RTW89_BAND_MAX];
+ struct rtw89_tssi_info tssi;
+ struct rtw89_power_trim_info pwr_trim;
+
+ struct rtw89_cfo_tracking_info cfo_tracking;
+ struct rtw89_env_monitor_info env_monitor;
+ struct rtw89_dig_info dig;
+ struct rtw89_phy_ch_info ch_info;
+ struct delayed_work track_work;
+ struct delayed_work coex_act1_work;
+ struct delayed_work coex_bt_devinfo_work;
+ struct delayed_work coex_rfk_chk_work;
+ struct delayed_work cfo_track_work;
+ struct rtw89_ppdu_sts_info ppdu_sts;
+ u8 total_sta_assoc;
+ bool scanning;
+
+ const struct rtw89_regulatory *regd;
+ struct rtw89_sar_info sar;
+
+ struct rtw89_btc btc;
+ enum rtw89_ps_mode ps_mode;
+ bool lps_enabled;
+
+ /* napi structure */
+ struct net_device netdev;
+ struct napi_struct napi;
+ int napi_budget_countdown;
+
+ /* HCI related data, keep last */
+ u8 priv[0] __aligned(sizeof(void *));
+};
+
+static inline int rtw89_hci_tx_write(struct rtw89_dev *rtwdev,
+ struct rtw89_core_tx_request *tx_req)
+{
+ return rtwdev->hci.ops->tx_write(rtwdev, tx_req);
+}
+
+static inline void rtw89_hci_reset(struct rtw89_dev *rtwdev)
+{
+ rtwdev->hci.ops->reset(rtwdev);
+}
+
+static inline int rtw89_hci_start(struct rtw89_dev *rtwdev)
+{
+ return rtwdev->hci.ops->start(rtwdev);
+}
+
+static inline void rtw89_hci_stop(struct rtw89_dev *rtwdev)
+{
+ rtwdev->hci.ops->stop(rtwdev);
+}
+
+static inline int rtw89_hci_deinit(struct rtw89_dev *rtwdev)
+{
+ return rtwdev->hci.ops->deinit(rtwdev);
+}
+
+static inline void rtw89_hci_recalc_int_mit(struct rtw89_dev *rtwdev)
+{
+ rtwdev->hci.ops->recalc_int_mit(rtwdev);
+}
+
+static inline u32 rtw89_hci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev, u8 txch)
+{
+ return rtwdev->hci.ops->check_and_reclaim_tx_resource(rtwdev, txch);
+}
+
+static inline void rtw89_hci_tx_kick_off(struct rtw89_dev *rtwdev, u8 txch)
+{
+ return rtwdev->hci.ops->tx_kick_off(rtwdev, txch);
+}
+
+static inline void rtw89_hci_flush_queues(struct rtw89_dev *rtwdev, u32 queues,
+ bool drop)
+{
+ if (rtwdev->hci.ops->flush_queues)
+ return rtwdev->hci.ops->flush_queues(rtwdev, queues, drop);
+}
+
+static inline u8 rtw89_read8(struct rtw89_dev *rtwdev, u32 addr)
+{
+ return rtwdev->hci.ops->read8(rtwdev, addr);
+}
+
+static inline u16 rtw89_read16(struct rtw89_dev *rtwdev, u32 addr)
+{
+ return rtwdev->hci.ops->read16(rtwdev, addr);
+}
+
+static inline u32 rtw89_read32(struct rtw89_dev *rtwdev, u32 addr)
+{
+ return rtwdev->hci.ops->read32(rtwdev, addr);
+}
+
+static inline void rtw89_write8(struct rtw89_dev *rtwdev, u32 addr, u8 data)
+{
+ rtwdev->hci.ops->write8(rtwdev, addr, data);
+}
+
+static inline void rtw89_write16(struct rtw89_dev *rtwdev, u32 addr, u16 data)
+{
+ rtwdev->hci.ops->write16(rtwdev, addr, data);
+}
+
+static inline void rtw89_write32(struct rtw89_dev *rtwdev, u32 addr, u32 data)
+{
+ rtwdev->hci.ops->write32(rtwdev, addr, data);
+}
+
+static inline void
+rtw89_write8_set(struct rtw89_dev *rtwdev, u32 addr, u8 bit)
+{
+ u8 val;
+
+ val = rtw89_read8(rtwdev, addr);
+ rtw89_write8(rtwdev, addr, val | bit);
+}
+
+static inline void
+rtw89_write16_set(struct rtw89_dev *rtwdev, u32 addr, u16 bit)
+{
+ u16 val;
+
+ val = rtw89_read16(rtwdev, addr);
+ rtw89_write16(rtwdev, addr, val | bit);
+}
+
+static inline void
+rtw89_write32_set(struct rtw89_dev *rtwdev, u32 addr, u32 bit)
+{
+ u32 val;
+
+ val = rtw89_read32(rtwdev, addr);
+ rtw89_write32(rtwdev, addr, val | bit);
+}
+
+static inline void
+rtw89_write8_clr(struct rtw89_dev *rtwdev, u32 addr, u8 bit)
+{
+ u8 val;
+
+ val = rtw89_read8(rtwdev, addr);
+ rtw89_write8(rtwdev, addr, val & ~bit);
+}
+
+static inline void
+rtw89_write16_clr(struct rtw89_dev *rtwdev, u32 addr, u16 bit)
+{
+ u16 val;
+
+ val = rtw89_read16(rtwdev, addr);
+ rtw89_write16(rtwdev, addr, val & ~bit);
+}
+
+static inline void
+rtw89_write32_clr(struct rtw89_dev *rtwdev, u32 addr, u32 bit)
+{
+ u32 val;
+
+ val = rtw89_read32(rtwdev, addr);
+ rtw89_write32(rtwdev, addr, val & ~bit);
+}
+
+static inline u32
+rtw89_read32_mask(struct rtw89_dev *rtwdev, u32 addr, u32 mask)
+{
+ u32 shift = __ffs(mask);
+ u32 orig;
+ u32 ret;
+
+ orig = rtw89_read32(rtwdev, addr);
+ ret = (orig & mask) >> shift;
+
+ return ret;
+}
+
+static inline u16
+rtw89_read16_mask(struct rtw89_dev *rtwdev, u32 addr, u32 mask)
+{
+ u32 shift = __ffs(mask);
+ u32 orig;
+ u32 ret;
+
+ orig = rtw89_read16(rtwdev, addr);
+ ret = (orig & mask) >> shift;
+
+ return ret;
+}
+
+static inline u8
+rtw89_read8_mask(struct rtw89_dev *rtwdev, u32 addr, u32 mask)
+{
+ u32 shift = __ffs(mask);
+ u32 orig;
+ u32 ret;
+
+ orig = rtw89_read8(rtwdev, addr);
+ ret = (orig & mask) >> shift;
+
+ return ret;
+}
+
+static inline void
+rtw89_write32_mask(struct rtw89_dev *rtwdev, u32 addr, u32 mask, u32 data)
+{
+ u32 shift = __ffs(mask);
+ u32 orig;
+ u32 set;
+
+ WARN(addr & 0x3, "should be 4-byte aligned, addr = 0x%08x\n", addr);
+
+ orig = rtw89_read32(rtwdev, addr);
+ set = (orig & ~mask) | ((data << shift) & mask);
+ rtw89_write32(rtwdev, addr, set);
+}
+
+static inline void
+rtw89_write16_mask(struct rtw89_dev *rtwdev, u32 addr, u32 mask, u16 data)
+{
+ u32 shift;
+ u16 orig, set;
+
+ mask &= 0xffff;
+ shift = __ffs(mask);
+
+ orig = rtw89_read16(rtwdev, addr);
+ set = (orig & ~mask) | ((data << shift) & mask);
+ rtw89_write16(rtwdev, addr, set);
+}
+
+static inline void
+rtw89_write8_mask(struct rtw89_dev *rtwdev, u32 addr, u32 mask, u8 data)
+{
+ u32 shift;
+ u8 orig, set;
+
+ mask &= 0xff;
+ shift = __ffs(mask);
+
+ orig = rtw89_read8(rtwdev, addr);
+ set = (orig & ~mask) | ((data << shift) & mask);
+ rtw89_write8(rtwdev, addr, set);
+}
+
+static inline u32
+rtw89_read_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
+ u32 addr, u32 mask)
+{
+ u32 val;
+
+ mutex_lock(&rtwdev->rf_mutex);
+ val = rtwdev->chip->ops->read_rf(rtwdev, rf_path, addr, mask);
+ mutex_unlock(&rtwdev->rf_mutex);
+
+ return val;
+}
+
+static inline void
+rtw89_write_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
+ u32 addr, u32 mask, u32 data)
+{
+ mutex_lock(&rtwdev->rf_mutex);
+ rtwdev->chip->ops->write_rf(rtwdev, rf_path, addr, mask, data);
+ mutex_unlock(&rtwdev->rf_mutex);
+}
+
+static inline struct ieee80211_txq *rtw89_txq_to_txq(struct rtw89_txq *rtwtxq)
+{
+ void *p = rtwtxq;
+
+ return container_of(p, struct ieee80211_txq, drv_priv);
+}
+
+static inline void rtw89_core_txq_init(struct rtw89_dev *rtwdev,
+ struct ieee80211_txq *txq)
+{
+ struct rtw89_txq *rtwtxq;
+
+ if (!txq)
+ return;
+
+ rtwtxq = (struct rtw89_txq *)txq->drv_priv;
+ INIT_LIST_HEAD(&rtwtxq->list);
+}
+
+static inline struct ieee80211_vif *rtwvif_to_vif(struct rtw89_vif *rtwvif)
+{
+ void *p = rtwvif;
+
+ return container_of(p, struct ieee80211_vif, drv_priv);
+}
+
+static inline struct ieee80211_sta *rtwsta_to_sta(struct rtw89_sta *rtwsta)
+{
+ void *p = rtwsta;
+
+ return container_of(p, struct ieee80211_sta, drv_priv);
+}
+
+static inline
+void rtw89_chip_set_channel_prepare(struct rtw89_dev *rtwdev,
+ struct rtw89_channel_help_params *p)
+{
+ rtwdev->chip->ops->set_channel_help(rtwdev, true, p);
+}
+
+static inline
+void rtw89_chip_set_channel_done(struct rtw89_dev *rtwdev,
+ struct rtw89_channel_help_params *p)
+{
+ rtwdev->chip->ops->set_channel_help(rtwdev, false, p);
+}
+
+static inline void rtw89_chip_fem_setup(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (chip->ops->fem_setup)
+ chip->ops->fem_setup(rtwdev);
+}
+
+static inline void rtw89_chip_bb_sethw(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (chip->ops->bb_sethw)
+ chip->ops->bb_sethw(rtwdev);
+}
+
+static inline void rtw89_chip_rfk_init(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (chip->ops->rfk_init)
+ chip->ops->rfk_init(rtwdev);
+}
+
+static inline void rtw89_chip_rfk_channel(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (chip->ops->rfk_channel)
+ chip->ops->rfk_channel(rtwdev);
+}
+
+static inline void rtw89_chip_rfk_band_changed(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (chip->ops->rfk_band_changed)
+ chip->ops->rfk_band_changed(rtwdev);
+}
+
+static inline void rtw89_chip_rfk_scan(struct rtw89_dev *rtwdev, bool start)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (chip->ops->rfk_scan)
+ chip->ops->rfk_scan(rtwdev, start);
+}
+
+static inline void rtw89_chip_rfk_track(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (chip->ops->rfk_track)
+ chip->ops->rfk_track(rtwdev);
+}
+
+static inline void rtw89_chip_set_txpwr_ctrl(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (chip->ops->set_txpwr_ctrl)
+ chip->ops->set_txpwr_ctrl(rtwdev);
+}
+
+static inline void rtw89_chip_set_txpwr(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ u8 ch = rtwdev->hal.current_channel;
+
+ if (!ch)
+ return;
+
+ if (chip->ops->set_txpwr)
+ chip->ops->set_txpwr(rtwdev);
+}
+
+static inline void rtw89_chip_power_trim(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (chip->ops->power_trim)
+ chip->ops->power_trim(rtwdev);
+}
+
+static inline void rtw89_chip_init_txpwr_unit(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (chip->ops->init_txpwr_unit)
+ chip->ops->init_txpwr_unit(rtwdev, phy_idx);
+}
+
+static inline u8 rtw89_chip_get_thermal(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path rf_path)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (!chip->ops->get_thermal)
+ return 0x10;
+
+ return chip->ops->get_thermal(rtwdev, rf_path);
+}
+
+static inline void rtw89_chip_query_ppdu(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_phy_ppdu *phy_ppdu,
+ struct ieee80211_rx_status *status)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (chip->ops->query_ppdu)
+ chip->ops->query_ppdu(rtwdev, phy_ppdu, status);
+}
+
+static inline void rtw89_chip_bb_ctrl_btc_preagc(struct rtw89_dev *rtwdev,
+ bool bt_en)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (chip->ops->bb_ctrl_btc_preagc)
+ chip->ops->bb_ctrl_btc_preagc(rtwdev, bt_en);
+}
+
+static inline
+void rtw89_chip_cfg_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif)
+{
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (!vif->bss_conf.he_support || !vif->bss_conf.assoc)
+ return;
+
+ if (chip->ops->set_txpwr_ul_tb_offset)
+ chip->ops->set_txpwr_ul_tb_offset(rtwdev, 0, rtwvif->mac_idx);
+}
+
+static inline void rtw89_load_txpwr_table(struct rtw89_dev *rtwdev,
+ const struct rtw89_txpwr_table *tbl)
+{
+ tbl->load(rtwdev, tbl);
+}
+
+static inline u8 rtw89_regd_get(struct rtw89_dev *rtwdev, u8 band)
+{
+ return rtwdev->regd->txpwr_regd[band];
+}
+
+static inline void rtw89_ctrl_btg(struct rtw89_dev *rtwdev, bool btg)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ if (chip->ops->ctrl_btg)
+ chip->ops->ctrl_btg(rtwdev, btg);
+}
+
+static inline u8 *get_hdr_bssid(struct ieee80211_hdr *hdr)
+{
+ __le16 fc = hdr->frame_control;
+
+ if (ieee80211_has_tods(fc))
+ return hdr->addr1;
+ else if (ieee80211_has_fromds(fc))
+ return hdr->addr2;
+ else
+ return hdr->addr3;
+}
+
+static inline bool rtw89_sta_has_beamformer_cap(struct ieee80211_sta *sta)
+{
+ if ((sta->vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) ||
+ (sta->vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE) ||
+ (sta->he_cap.he_cap_elem.phy_cap_info[3] & IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER) ||
+ (sta->he_cap.he_cap_elem.phy_cap_info[4] & IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER))
+ return true;
+ return false;
+}
+
+static inline struct rtw89_fw_suit *rtw89_fw_suit_get(struct rtw89_dev *rtwdev,
+ enum rtw89_fw_type type)
+{
+ struct rtw89_fw_info *fw_info = &rtwdev->fw;
+
+ if (type == RTW89_FW_WOWLAN)
+ return &fw_info->wowlan;
+ return &fw_info->normal;
+}
+
+int rtw89_core_tx_write(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, struct sk_buff *skb, int *qsel);
+int rtw89_h2c_tx(struct rtw89_dev *rtwdev,
+ struct sk_buff *skb, bool fwdl);
+void rtw89_core_tx_kick_off(struct rtw89_dev *rtwdev, u8 qsel);
+void rtw89_core_fill_txdesc(struct rtw89_dev *rtwdev,
+ struct rtw89_tx_desc_info *desc_info,
+ void *txdesc);
+void rtw89_core_rx(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_desc_info *desc_info,
+ struct sk_buff *skb);
+void rtw89_core_query_rxdesc(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_desc_info *desc_info,
+ u8 *data, u32 data_offset);
+void rtw89_core_napi_start(struct rtw89_dev *rtwdev);
+void rtw89_core_napi_stop(struct rtw89_dev *rtwdev);
+void rtw89_core_napi_init(struct rtw89_dev *rtwdev);
+void rtw89_core_napi_deinit(struct rtw89_dev *rtwdev);
+int rtw89_core_sta_add(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+int rtw89_core_sta_assoc(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+int rtw89_core_sta_disassoc(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+int rtw89_core_sta_disconnect(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+int rtw89_core_sta_remove(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+int rtw89_core_init(struct rtw89_dev *rtwdev);
+void rtw89_core_deinit(struct rtw89_dev *rtwdev);
+int rtw89_core_register(struct rtw89_dev *rtwdev);
+void rtw89_core_unregister(struct rtw89_dev *rtwdev);
+void rtw89_set_channel(struct rtw89_dev *rtwdev);
+u8 rtw89_core_acquire_bit_map(unsigned long *addr, unsigned long size);
+void rtw89_core_release_bit_map(unsigned long *addr, u8 bit);
+void rtw89_core_release_all_bits_map(unsigned long *addr, unsigned int nbits);
+void rtw89_vif_type_mapping(struct ieee80211_vif *vif, bool assoc);
+int rtw89_chip_info_setup(struct rtw89_dev *rtwdev);
+u16 rtw89_ra_report_to_bitrate(struct rtw89_dev *rtwdev, u8 rpt_rate);
+int rtw89_regd_init(struct rtw89_dev *rtwdev,
+ void (*reg_notifier)(struct wiphy *wiphy, struct regulatory_request *request));
+void rtw89_regd_notifier(struct wiphy *wiphy, struct regulatory_request *request);
+void rtw89_traffic_stats_init(struct rtw89_dev *rtwdev,
+ struct rtw89_traffic_stats *stats);
+int rtw89_core_start(struct rtw89_dev *rtwdev);
+void rtw89_core_stop(struct rtw89_dev *rtwdev);
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw89/debug.c b/drivers/net/wireless/realtek/rtw89/debug.c
new file mode 100644
index 000000000000..29eb188c888c
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/debug.c
@@ -0,0 +1,2489 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2019-2020 Realtek Corporation
+ */
+
+#include "coex.h"
+#include "debug.h"
+#include "fw.h"
+#include "mac.h"
+#include "ps.h"
+#include "reg.h"
+#include "sar.h"
+
+#ifdef CONFIG_RTW89_DEBUGMSG
+unsigned int rtw89_debug_mask;
+EXPORT_SYMBOL(rtw89_debug_mask);
+module_param_named(debug_mask, rtw89_debug_mask, uint, 0644);
+MODULE_PARM_DESC(debug_mask, "Debugging mask");
+#endif
+
+#ifdef CONFIG_RTW89_DEBUGFS
+struct rtw89_debugfs_priv {
+ struct rtw89_dev *rtwdev;
+ int (*cb_read)(struct seq_file *m, void *v);
+ ssize_t (*cb_write)(struct file *filp, const char __user *buffer,
+ size_t count, loff_t *loff);
+ union {
+ u32 cb_data;
+ struct {
+ u32 addr;
+ u8 len;
+ } read_reg;
+ struct {
+ u32 addr;
+ u32 mask;
+ u8 path;
+ } read_rf;
+ struct {
+ u8 ss_dbg:1;
+ u8 dle_dbg:1;
+ u8 dmac_dbg:1;
+ u8 cmac_dbg:1;
+ u8 dbg_port:1;
+ } dbgpkg_en;
+ struct {
+ u32 start;
+ u32 len;
+ u8 sel;
+ } mac_mem;
+ };
+};
+
+static int rtw89_debugfs_single_show(struct seq_file *m, void *v)
+{
+ struct rtw89_debugfs_priv *debugfs_priv = m->private;
+
+ return debugfs_priv->cb_read(m, v);
+}
+
+static ssize_t rtw89_debugfs_single_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *loff)
+{
+ struct rtw89_debugfs_priv *debugfs_priv = filp->private_data;
+
+ return debugfs_priv->cb_write(filp, buffer, count, loff);
+}
+
+static ssize_t rtw89_debugfs_seq_file_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *loff)
+{
+ struct seq_file *seqpriv = (struct seq_file *)filp->private_data;
+ struct rtw89_debugfs_priv *debugfs_priv = seqpriv->private;
+
+ return debugfs_priv->cb_write(filp, buffer, count, loff);
+}
+
+static int rtw89_debugfs_single_open(struct inode *inode, struct file *filp)
+{
+ return single_open(filp, rtw89_debugfs_single_show, inode->i_private);
+}
+
+static int rtw89_debugfs_close(struct inode *inode, struct file *filp)
+{
+ return 0;
+}
+
+static const struct file_operations file_ops_single_r = {
+ .owner = THIS_MODULE,
+ .open = rtw89_debugfs_single_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations file_ops_common_rw = {
+ .owner = THIS_MODULE,
+ .open = rtw89_debugfs_single_open,
+ .release = single_release,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = rtw89_debugfs_seq_file_write,
+};
+
+static const struct file_operations file_ops_single_w = {
+ .owner = THIS_MODULE,
+ .write = rtw89_debugfs_single_write,
+ .open = simple_open,
+ .release = rtw89_debugfs_close,
+};
+
+static ssize_t
+rtw89_debug_priv_read_reg_select(struct file *filp,
+ const char __user *user_buf,
+ size_t count, loff_t *loff)
+{
+ struct seq_file *m = (struct seq_file *)filp->private_data;
+ struct rtw89_debugfs_priv *debugfs_priv = m->private;
+ struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
+ char buf[32];
+ size_t buf_size;
+ u32 addr, len;
+ int num;
+
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+
+ buf[buf_size] = '\0';
+ num = sscanf(buf, "%x %x", &addr, &len);
+ if (num != 2) {
+ rtw89_info(rtwdev, "invalid format: <addr> <len>\n");
+ return -EINVAL;
+ }
+
+ debugfs_priv->read_reg.addr = addr;
+ debugfs_priv->read_reg.len = len;
+
+ rtw89_info(rtwdev, "select read %d bytes from 0x%08x\n", len, addr);
+
+ return count;
+}
+
+static int rtw89_debug_priv_read_reg_get(struct seq_file *m, void *v)
+{
+ struct rtw89_debugfs_priv *debugfs_priv = m->private;
+ struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
+ u32 addr, data;
+ u8 len;
+
+ len = debugfs_priv->read_reg.len;
+ addr = debugfs_priv->read_reg.addr;
+
+ switch (len) {
+ case 1:
+ data = rtw89_read8(rtwdev, addr);
+ break;
+ case 2:
+ data = rtw89_read16(rtwdev, addr);
+ break;
+ case 4:
+ data = rtw89_read32(rtwdev, addr);
+ break;
+ default:
+ rtw89_info(rtwdev, "invalid read reg len %d\n", len);
+ return -EINVAL;
+ }
+
+ seq_printf(m, "get %d bytes at 0x%08x=0x%08x\n", len, addr, data);
+
+ return 0;
+}
+
+static ssize_t rtw89_debug_priv_write_reg_set(struct file *filp,
+ const char __user *user_buf,
+ size_t count, loff_t *loff)
+{
+ struct rtw89_debugfs_priv *debugfs_priv = filp->private_data;
+ struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
+ char buf[32];
+ size_t buf_size;
+ u32 addr, val, len;
+ int num;
+
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+
+ buf[buf_size] = '\0';
+ num = sscanf(buf, "%x %x %x", &addr, &val, &len);
+ if (num != 3) {
+ rtw89_info(rtwdev, "invalid format: <addr> <val> <len>\n");
+ return -EINVAL;
+ }
+
+ switch (len) {
+ case 1:
+ rtw89_info(rtwdev, "reg write8 0x%08x: 0x%02x\n", addr, val);
+ rtw89_write8(rtwdev, addr, (u8)val);
+ break;
+ case 2:
+ rtw89_info(rtwdev, "reg write16 0x%08x: 0x%04x\n", addr, val);
+ rtw89_write16(rtwdev, addr, (u16)val);
+ break;
+ case 4:
+ rtw89_info(rtwdev, "reg write32 0x%08x: 0x%08x\n", addr, val);
+ rtw89_write32(rtwdev, addr, (u32)val);
+ break;
+ default:
+ rtw89_info(rtwdev, "invalid read write len %d\n", len);
+ break;
+ }
+
+ return count;
+}
+
+static ssize_t
+rtw89_debug_priv_read_rf_select(struct file *filp,
+ const char __user *user_buf,
+ size_t count, loff_t *loff)
+{
+ struct seq_file *m = (struct seq_file *)filp->private_data;
+ struct rtw89_debugfs_priv *debugfs_priv = m->private;
+ struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
+ char buf[32];
+ size_t buf_size;
+ u32 addr, mask;
+ u8 path;
+ int num;
+
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+
+ buf[buf_size] = '\0';
+ num = sscanf(buf, "%hhd %x %x", &path, &addr, &mask);
+ if (num != 3) {
+ rtw89_info(rtwdev, "invalid format: <path> <addr> <mask>\n");
+ return -EINVAL;
+ }
+
+ if (path >= rtwdev->chip->rf_path_num) {
+ rtw89_info(rtwdev, "wrong rf path\n");
+ return -EINVAL;
+ }
+ debugfs_priv->read_rf.addr = addr;
+ debugfs_priv->read_rf.mask = mask;
+ debugfs_priv->read_rf.path = path;
+
+ rtw89_info(rtwdev, "select read rf path %d from 0x%08x\n", path, addr);
+
+ return count;
+}
+
+static int rtw89_debug_priv_read_rf_get(struct seq_file *m, void *v)
+{
+ struct rtw89_debugfs_priv *debugfs_priv = m->private;
+ struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
+ u32 addr, data, mask;
+ u8 path;
+
+ addr = debugfs_priv->read_rf.addr;
+ mask = debugfs_priv->read_rf.mask;
+ path = debugfs_priv->read_rf.path;
+
+ data = rtw89_read_rf(rtwdev, path, addr, mask);
+
+ seq_printf(m, "path %d, rf register 0x%08x=0x%08x\n", path, addr, data);
+
+ return 0;
+}
+
+static ssize_t rtw89_debug_priv_write_rf_set(struct file *filp,
+ const char __user *user_buf,
+ size_t count, loff_t *loff)
+{
+ struct rtw89_debugfs_priv *debugfs_priv = filp->private_data;
+ struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
+ char buf[32];
+ size_t buf_size;
+ u32 addr, val, mask;
+ u8 path;
+ int num;
+
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+
+ buf[buf_size] = '\0';
+ num = sscanf(buf, "%hhd %x %x %x", &path, &addr, &mask, &val);
+ if (num != 4) {
+ rtw89_info(rtwdev, "invalid format: <path> <addr> <mask> <val>\n");
+ return -EINVAL;
+ }
+
+ if (path >= rtwdev->chip->rf_path_num) {
+ rtw89_info(rtwdev, "wrong rf path\n");
+ return -EINVAL;
+ }
+
+ rtw89_info(rtwdev, "path %d, rf register write 0x%08x=0x%08x (mask = 0x%08x)\n",
+ path, addr, val, mask);
+ rtw89_write_rf(rtwdev, path, addr, mask, val);
+
+ return count;
+}
+
+static int rtw89_debug_priv_rf_reg_dump_get(struct seq_file *m, void *v)
+{
+ struct rtw89_debugfs_priv *debugfs_priv = m->private;
+ struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ u32 addr, offset, data;
+ u8 path;
+
+ for (path = 0; path < chip->rf_path_num; path++) {
+ seq_printf(m, "RF path %d:\n\n", path);
+ for (addr = 0; addr < 0x100; addr += 4) {
+ seq_printf(m, "0x%08x: ", addr);
+ for (offset = 0; offset < 4; offset++) {
+ data = rtw89_read_rf(rtwdev, path,
+ addr + offset, RFREG_MASK);
+ seq_printf(m, "0x%05x ", data);
+ }
+ seq_puts(m, "\n");
+ }
+ seq_puts(m, "\n");
+ }
+
+ return 0;
+}
+
+struct txpwr_ent {
+ const char *txt;
+ u8 len;
+};
+
+struct txpwr_map {
+ const struct txpwr_ent *ent;
+ u8 size;
+ u32 addr_from;
+ u32 addr_to;
+};
+
+#define __GEN_TXPWR_ENT2(_t, _e0, _e1) \
+ { .len = 2, .txt = _t "\t- " _e0 " " _e1 }
+
+#define __GEN_TXPWR_ENT4(_t, _e0, _e1, _e2, _e3) \
+ { .len = 4, .txt = _t "\t- " _e0 " " _e1 " " _e2 " " _e3 }
+
+#define __GEN_TXPWR_ENT8(_t, _e0, _e1, _e2, _e3, _e4, _e5, _e6, _e7) \
+ { .len = 8, .txt = _t "\t- " \
+ _e0 " " _e1 " " _e2 " " _e3 " " \
+ _e4 " " _e5 " " _e6 " " _e7 }
+
+static const struct txpwr_ent __txpwr_ent_byr[] = {
+ __GEN_TXPWR_ENT4("CCK ", "1M ", "2M ", "5.5M ", "11M "),
+ __GEN_TXPWR_ENT4("LEGACY ", "6M ", "9M ", "12M ", "18M "),
+ __GEN_TXPWR_ENT4("LEGACY ", "24M ", "36M ", "48M ", "54M "),
+ /* 1NSS */
+ __GEN_TXPWR_ENT4("MCS_1NSS ", "MCS0 ", "MCS1 ", "MCS2 ", "MCS3 "),
+ __GEN_TXPWR_ENT4("MCS_1NSS ", "MCS4 ", "MCS5 ", "MCS6 ", "MCS7 "),
+ __GEN_TXPWR_ENT4("MCS_1NSS ", "MCS8 ", "MCS9 ", "MCS10", "MCS11"),
+ __GEN_TXPWR_ENT4("HEDCM_1NSS", "MCS0 ", "MCS1 ", "MCS3 ", "MCS4 "),
+ /* 2NSS */
+ __GEN_TXPWR_ENT4("MCS_2NSS ", "MCS0 ", "MCS1 ", "MCS2 ", "MCS3 "),
+ __GEN_TXPWR_ENT4("MCS_2NSS ", "MCS4 ", "MCS5 ", "MCS6 ", "MCS7 "),
+ __GEN_TXPWR_ENT4("MCS_2NSS ", "MCS8 ", "MCS9 ", "MCS10", "MCS11"),
+ __GEN_TXPWR_ENT4("HEDCM_2NSS", "MCS0 ", "MCS1 ", "MCS3 ", "MCS4 "),
+};
+
+static_assert((ARRAY_SIZE(__txpwr_ent_byr) * 4) ==
+ (R_AX_PWR_BY_RATE_MAX - R_AX_PWR_BY_RATE + 4));
+
+static const struct txpwr_map __txpwr_map_byr = {
+ .ent = __txpwr_ent_byr,
+ .size = ARRAY_SIZE(__txpwr_ent_byr),
+ .addr_from = R_AX_PWR_BY_RATE,
+ .addr_to = R_AX_PWR_BY_RATE_MAX,
+};
+
+static const struct txpwr_ent __txpwr_ent_lmt[] = {
+ /* 1TX */
+ __GEN_TXPWR_ENT2("CCK_1TX_20M ", "NON_BF", "BF"),
+ __GEN_TXPWR_ENT2("CCK_1TX_40M ", "NON_BF", "BF"),
+ __GEN_TXPWR_ENT2("OFDM_1TX ", "NON_BF", "BF"),
+ __GEN_TXPWR_ENT2("MCS_1TX_20M_0 ", "NON_BF", "BF"),
+ __GEN_TXPWR_ENT2("MCS_1TX_20M_1 ", "NON_BF", "BF"),
+ __GEN_TXPWR_ENT2("MCS_1TX_20M_2 ", "NON_BF", "BF"),
+ __GEN_TXPWR_ENT2("MCS_1TX_20M_3 ", "NON_BF", "BF"),
+ __GEN_TXPWR_ENT2("MCS_1TX_20M_4 ", "NON_BF", "BF"),
+ __GEN_TXPWR_ENT2("MCS_1TX_20M_5 ", "NON_BF", "BF"),
+ __GEN_TXPWR_ENT2("MCS_1TX_20M_6 ", "NON_BF", "BF"),
+ __GEN_TXPWR_ENT2("MCS_1TX_20M_7 ", "NON_BF", "BF"),
+ __GEN_TXPWR_ENT2("MCS_1TX_40M_0 ", "NON_BF", "BF"),
+ __GEN_TXPWR_ENT2("MCS_1TX_40M_1 ", "NON_BF", "BF"),
+ __GEN_TXPWR_ENT2("MCS_1TX_40M_2 ", "NON_BF", "BF"),
+ __GEN_TXPWR_ENT2("MCS_1TX_40M_3 ", "NON_BF", "BF"),
+ __GEN_TXPWR_ENT2("MCS_1TX_80M_0 ", "NON_BF", "BF"),
+ __GEN_TXPWR_ENT2("MCS_1TX_80M_1 ", "NON_BF", "BF"),
+ __GEN_TXPWR_ENT2("MCS_1TX_160M ", "NON_BF", "BF"),
+ __GEN_TXPWR_ENT2("MCS_1TX_40M_0p5", "NON_BF", "BF"),
+ __GEN_TXPWR_ENT2("MCS_1TX_40M_2p5", "NON_BF", "BF"),
+ /* 2TX */
+ __GEN_TXPWR_ENT2("CCK_2TX_20M ", "NON_BF", "BF"),
+ __GEN_TXPWR_ENT2("CCK_2TX_40M ", "NON_BF", "BF"),
+ __GEN_TXPWR_ENT2("OFDM_2TX ", "NON_BF", "BF"),
+ __GEN_TXPWR_ENT2("MCS_2TX_20M_0 ", "NON_BF", "BF"),
+ __GEN_TXPWR_ENT2("MCS_2TX_20M_1 ", "NON_BF", "BF"),
+ __GEN_TXPWR_ENT2("MCS_2TX_20M_2 ", "NON_BF", "BF"),
+ __GEN_TXPWR_ENT2("MCS_2TX_20M_3 ", "NON_BF", "BF"),
+ __GEN_TXPWR_ENT2("MCS_2TX_20M_4 ", "NON_BF", "BF"),
+ __GEN_TXPWR_ENT2("MCS_2TX_20M_5 ", "NON_BF", "BF"),
+ __GEN_TXPWR_ENT2("MCS_2TX_20M_6 ", "NON_BF", "BF"),
+ __GEN_TXPWR_ENT2("MCS_2TX_20M_7 ", "NON_BF", "BF"),
+ __GEN_TXPWR_ENT2("MCS_2TX_40M_0 ", "NON_BF", "BF"),
+ __GEN_TXPWR_ENT2("MCS_2TX_40M_1 ", "NON_BF", "BF"),
+ __GEN_TXPWR_ENT2("MCS_2TX_40M_2 ", "NON_BF", "BF"),
+ __GEN_TXPWR_ENT2("MCS_2TX_40M_3 ", "NON_BF", "BF"),
+ __GEN_TXPWR_ENT2("MCS_2TX_80M_0 ", "NON_BF", "BF"),
+ __GEN_TXPWR_ENT2("MCS_2TX_80M_1 ", "NON_BF", "BF"),
+ __GEN_TXPWR_ENT2("MCS_2TX_160M ", "NON_BF", "BF"),
+ __GEN_TXPWR_ENT2("MCS_2TX_40M_0p5", "NON_BF", "BF"),
+ __GEN_TXPWR_ENT2("MCS_2TX_40M_2p5", "NON_BF", "BF"),
+};
+
+static_assert((ARRAY_SIZE(__txpwr_ent_lmt) * 2) ==
+ (R_AX_PWR_LMT_MAX - R_AX_PWR_LMT + 4));
+
+static const struct txpwr_map __txpwr_map_lmt = {
+ .ent = __txpwr_ent_lmt,
+ .size = ARRAY_SIZE(__txpwr_ent_lmt),
+ .addr_from = R_AX_PWR_LMT,
+ .addr_to = R_AX_PWR_LMT_MAX,
+};
+
+static const struct txpwr_ent __txpwr_ent_lmt_ru[] = {
+ /* 1TX */
+ __GEN_TXPWR_ENT8("1TX", "RU26__0", "RU26__1", "RU26__2", "RU26__3",
+ "RU26__4", "RU26__5", "RU26__6", "RU26__7"),
+ __GEN_TXPWR_ENT8("1TX", "RU52__0", "RU52__1", "RU52__2", "RU52__3",
+ "RU52__4", "RU52__5", "RU52__6", "RU52__7"),
+ __GEN_TXPWR_ENT8("1TX", "RU106_0", "RU106_1", "RU106_2", "RU106_3",
+ "RU106_4", "RU106_5", "RU106_6", "RU106_7"),
+ /* 2TX */
+ __GEN_TXPWR_ENT8("2TX", "RU26__0", "RU26__1", "RU26__2", "RU26__3",
+ "RU26__4", "RU26__5", "RU26__6", "RU26__7"),
+ __GEN_TXPWR_ENT8("2TX", "RU52__0", "RU52__1", "RU52__2", "RU52__3",
+ "RU52__4", "RU52__5", "RU52__6", "RU52__7"),
+ __GEN_TXPWR_ENT8("2TX", "RU106_0", "RU106_1", "RU106_2", "RU106_3",
+ "RU106_4", "RU106_5", "RU106_6", "RU106_7"),
+};
+
+static_assert((ARRAY_SIZE(__txpwr_ent_lmt_ru) * 8) ==
+ (R_AX_PWR_RU_LMT_MAX - R_AX_PWR_RU_LMT + 4));
+
+static const struct txpwr_map __txpwr_map_lmt_ru = {
+ .ent = __txpwr_ent_lmt_ru,
+ .size = ARRAY_SIZE(__txpwr_ent_lmt_ru),
+ .addr_from = R_AX_PWR_RU_LMT,
+ .addr_to = R_AX_PWR_RU_LMT_MAX,
+};
+
+static u8 __print_txpwr_ent(struct seq_file *m, const struct txpwr_ent *ent,
+ const u8 *buf, const u8 cur)
+{
+ char *fmt;
+
+ switch (ent->len) {
+ case 2:
+ fmt = "%s\t| %3d, %3d,\tdBm\n";
+ seq_printf(m, fmt, ent->txt, buf[cur], buf[cur + 1]);
+ return 2;
+ case 4:
+ fmt = "%s\t| %3d, %3d, %3d, %3d,\tdBm\n";
+ seq_printf(m, fmt, ent->txt, buf[cur], buf[cur + 1],
+ buf[cur + 2], buf[cur + 3]);
+ return 4;
+ case 8:
+ fmt = "%s\t| %3d, %3d, %3d, %3d, %3d, %3d, %3d, %3d,\tdBm\n";
+ seq_printf(m, fmt, ent->txt, buf[cur], buf[cur + 1],
+ buf[cur + 2], buf[cur + 3], buf[cur + 4],
+ buf[cur + 5], buf[cur + 6], buf[cur + 7]);
+ return 8;
+ default:
+ return 0;
+ }
+}
+
+static int __print_txpwr_map(struct seq_file *m, struct rtw89_dev *rtwdev,
+ const struct txpwr_map *map)
+{
+ u8 fct = rtwdev->chip->txpwr_factor_mac;
+ u8 *buf, cur, i;
+ u32 val, addr;
+ int ret;
+
+ buf = vzalloc(map->addr_to - map->addr_from + 4);
+ if (!buf)
+ return -ENOMEM;
+
+ for (addr = map->addr_from; addr <= map->addr_to; addr += 4) {
+ ret = rtw89_mac_txpwr_read32(rtwdev, RTW89_PHY_0, addr, &val);
+ if (ret)
+ val = MASKDWORD;
+
+ cur = addr - map->addr_from;
+ for (i = 0; i < 4; i++, val >>= 8)
+ buf[cur + i] = FIELD_GET(MASKBYTE0, val) >> fct;
+ }
+
+ for (cur = 0, i = 0; i < map->size; i++)
+ cur += __print_txpwr_ent(m, &map->ent[i], buf, cur);
+
+ vfree(buf);
+ return 0;
+}
+
+#define case_REGD(_regd) \
+ case RTW89_ ## _regd: \
+ seq_puts(m, #_regd "\n"); \
+ break
+
+static void __print_regd(struct seq_file *m, struct rtw89_dev *rtwdev)
+{
+ u8 band = rtwdev->hal.current_band_type;
+ u8 regd = rtw89_regd_get(rtwdev, band);
+
+ switch (regd) {
+ default:
+ seq_printf(m, "UNKNOWN: %d\n", regd);
+ break;
+ case_REGD(WW);
+ case_REGD(ETSI);
+ case_REGD(FCC);
+ case_REGD(MKK);
+ case_REGD(NA);
+ case_REGD(IC);
+ case_REGD(KCC);
+ case_REGD(NCC);
+ case_REGD(CHILE);
+ case_REGD(ACMA);
+ case_REGD(MEXICO);
+ case_REGD(UKRAINE);
+ case_REGD(CN);
+ }
+}
+
+#undef case_REGD
+
+static int rtw89_debug_priv_txpwr_table_get(struct seq_file *m, void *v)
+{
+ struct rtw89_debugfs_priv *debugfs_priv = m->private;
+ struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
+ int ret = 0;
+
+ mutex_lock(&rtwdev->mutex);
+ rtw89_leave_ps_mode(rtwdev);
+
+ seq_puts(m, "[Regulatory] ");
+ __print_regd(m, rtwdev);
+
+ seq_puts(m, "[SAR]\n");
+ rtw89_print_sar(m, rtwdev);
+
+ seq_puts(m, "\n[TX power byrate]\n");
+ ret = __print_txpwr_map(m, rtwdev, &__txpwr_map_byr);
+ if (ret)
+ goto err;
+
+ seq_puts(m, "\n[TX power limit]\n");
+ ret = __print_txpwr_map(m, rtwdev, &__txpwr_map_lmt);
+ if (ret)
+ goto err;
+
+ seq_puts(m, "\n[TX power limit_ru]\n");
+ ret = __print_txpwr_map(m, rtwdev, &__txpwr_map_lmt_ru);
+ if (ret)
+ goto err;
+
+err:
+ mutex_unlock(&rtwdev->mutex);
+ return ret;
+}
+
+static ssize_t
+rtw89_debug_priv_mac_reg_dump_select(struct file *filp,
+ const char __user *user_buf,
+ size_t count, loff_t *loff)
+{
+ struct seq_file *m = (struct seq_file *)filp->private_data;
+ struct rtw89_debugfs_priv *debugfs_priv = m->private;
+ struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
+ char buf[32];
+ size_t buf_size;
+ int sel;
+ int ret;
+
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+
+ buf[buf_size] = '\0';
+ ret = kstrtoint(buf, 0, &sel);
+ if (ret)
+ return ret;
+
+ if (sel < RTW89_DBG_SEL_MAC_00 || sel > RTW89_DBG_SEL_RFC) {
+ rtw89_info(rtwdev, "invalid args: %d\n", sel);
+ return -EINVAL;
+ }
+
+ debugfs_priv->cb_data = sel;
+ rtw89_info(rtwdev, "select mac page dump %d\n", debugfs_priv->cb_data);
+
+ return count;
+}
+
+#define RTW89_MAC_PAGE_SIZE 0x100
+
+static int rtw89_debug_priv_mac_reg_dump_get(struct seq_file *m, void *v)
+{
+ struct rtw89_debugfs_priv *debugfs_priv = m->private;
+ struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
+ enum rtw89_debug_mac_reg_sel reg_sel = debugfs_priv->cb_data;
+ u32 start, end;
+ u32 i, j, k, page;
+ u32 val;
+
+ switch (reg_sel) {
+ case RTW89_DBG_SEL_MAC_00:
+ seq_puts(m, "Debug selected MAC page 0x00\n");
+ start = 0x000;
+ end = 0x014;
+ break;
+ case RTW89_DBG_SEL_MAC_40:
+ seq_puts(m, "Debug selected MAC page 0x40\n");
+ start = 0x040;
+ end = 0x07f;
+ break;
+ case RTW89_DBG_SEL_MAC_80:
+ seq_puts(m, "Debug selected MAC page 0x80\n");
+ start = 0x080;
+ end = 0x09f;
+ break;
+ case RTW89_DBG_SEL_MAC_C0:
+ seq_puts(m, "Debug selected MAC page 0xc0\n");
+ start = 0x0c0;
+ end = 0x0df;
+ break;
+ case RTW89_DBG_SEL_MAC_E0:
+ seq_puts(m, "Debug selected MAC page 0xe0\n");
+ start = 0x0e0;
+ end = 0x0ff;
+ break;
+ case RTW89_DBG_SEL_BB:
+ seq_puts(m, "Debug selected BB register\n");
+ start = 0x100;
+ end = 0x17f;
+ break;
+ case RTW89_DBG_SEL_IQK:
+ seq_puts(m, "Debug selected IQK register\n");
+ start = 0x180;
+ end = 0x1bf;
+ break;
+ case RTW89_DBG_SEL_RFC:
+ seq_puts(m, "Debug selected RFC register\n");
+ start = 0x1c0;
+ end = 0x1ff;
+ break;
+ default:
+ seq_puts(m, "Selected invalid register page\n");
+ return -EINVAL;
+ }
+
+ for (i = start; i <= end; i++) {
+ page = i << 8;
+ for (j = page; j < page + RTW89_MAC_PAGE_SIZE; j += 16) {
+ seq_printf(m, "%08xh : ", 0x18600000 + j);
+ for (k = 0; k < 4; k++) {
+ val = rtw89_read32(rtwdev, j + (k << 2));
+ seq_printf(m, "%08x ", val);
+ }
+ seq_puts(m, "\n");
+ }
+ }
+
+ return 0;
+}
+
+static ssize_t
+rtw89_debug_priv_mac_mem_dump_select(struct file *filp,
+ const char __user *user_buf,
+ size_t count, loff_t *loff)
+{
+ struct seq_file *m = (struct seq_file *)filp->private_data;
+ struct rtw89_debugfs_priv *debugfs_priv = m->private;
+ struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
+ char buf[32];
+ size_t buf_size;
+ u32 sel, start_addr, len;
+ int num;
+
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+
+ buf[buf_size] = '\0';
+ num = sscanf(buf, "%x %x %x", &sel, &start_addr, &len);
+ if (num != 3) {
+ rtw89_info(rtwdev, "invalid format: <sel> <start> <len>\n");
+ return -EINVAL;
+ }
+
+ debugfs_priv->mac_mem.sel = sel;
+ debugfs_priv->mac_mem.start = start_addr;
+ debugfs_priv->mac_mem.len = len;
+
+ rtw89_info(rtwdev, "select mem %d start %d len %d\n",
+ sel, start_addr, len);
+
+ return count;
+}
+
+static const u32 mac_mem_base_addr_table[RTW89_MAC_MEM_MAX] = {
+ [RTW89_MAC_MEM_SHARED_BUF] = SHARED_BUF_BASE_ADDR,
+ [RTW89_MAC_MEM_DMAC_TBL] = DMAC_TBL_BASE_ADDR,
+ [RTW89_MAC_MEM_SHCUT_MACHDR] = SHCUT_MACHDR_BASE_ADDR,
+ [RTW89_MAC_MEM_STA_SCHED] = STA_SCHED_BASE_ADDR,
+ [RTW89_MAC_MEM_RXPLD_FLTR_CAM] = RXPLD_FLTR_CAM_BASE_ADDR,
+ [RTW89_MAC_MEM_SECURITY_CAM] = SECURITY_CAM_BASE_ADDR,
+ [RTW89_MAC_MEM_WOW_CAM] = WOW_CAM_BASE_ADDR,
+ [RTW89_MAC_MEM_CMAC_TBL] = CMAC_TBL_BASE_ADDR,
+ [RTW89_MAC_MEM_ADDR_CAM] = ADDR_CAM_BASE_ADDR,
+ [RTW89_MAC_MEM_BA_CAM] = BA_CAM_BASE_ADDR,
+ [RTW89_MAC_MEM_BCN_IE_CAM0] = BCN_IE_CAM0_BASE_ADDR,
+ [RTW89_MAC_MEM_BCN_IE_CAM1] = BCN_IE_CAM1_BASE_ADDR,
+};
+
+static void rtw89_debug_dump_mac_mem(struct seq_file *m,
+ struct rtw89_dev *rtwdev,
+ u8 sel, u32 start_addr, u32 len)
+{
+ u32 base_addr, start_page, residue;
+ u32 i, j, p, pages;
+ u32 dump_len, remain;
+ u32 val;
+
+ remain = len;
+ pages = len / MAC_MEM_DUMP_PAGE_SIZE + 1;
+ start_page = start_addr / MAC_MEM_DUMP_PAGE_SIZE;
+ residue = start_addr % MAC_MEM_DUMP_PAGE_SIZE;
+ base_addr = mac_mem_base_addr_table[sel];
+ base_addr += start_page * MAC_MEM_DUMP_PAGE_SIZE;
+
+ for (p = 0; p < pages; p++) {
+ dump_len = min_t(u32, remain, MAC_MEM_DUMP_PAGE_SIZE);
+ rtw89_write32(rtwdev, R_AX_FILTER_MODEL_ADDR, base_addr);
+ for (i = R_AX_INDIR_ACCESS_ENTRY + residue;
+ i < R_AX_INDIR_ACCESS_ENTRY + dump_len;) {
+ seq_printf(m, "%08xh:", i);
+ for (j = 0;
+ j < 4 && i < R_AX_INDIR_ACCESS_ENTRY + dump_len;
+ j++, i += 4) {
+ val = rtw89_read32(rtwdev, i);
+ seq_printf(m, " %08x", val);
+ remain -= 4;
+ }
+ seq_puts(m, "\n");
+ }
+ base_addr += MAC_MEM_DUMP_PAGE_SIZE;
+ }
+}
+
+static int
+rtw89_debug_priv_mac_mem_dump_get(struct seq_file *m, void *v)
+{
+ struct rtw89_debugfs_priv *debugfs_priv = m->private;
+ struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
+
+ mutex_lock(&rtwdev->mutex);
+ rtw89_leave_ps_mode(rtwdev);
+ rtw89_debug_dump_mac_mem(m, rtwdev,
+ debugfs_priv->mac_mem.sel,
+ debugfs_priv->mac_mem.start,
+ debugfs_priv->mac_mem.len);
+ mutex_unlock(&rtwdev->mutex);
+
+ return 0;
+}
+
+static ssize_t
+rtw89_debug_priv_mac_dbg_port_dump_select(struct file *filp,
+ const char __user *user_buf,
+ size_t count, loff_t *loff)
+{
+ struct seq_file *m = (struct seq_file *)filp->private_data;
+ struct rtw89_debugfs_priv *debugfs_priv = m->private;
+ struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
+ char buf[32];
+ size_t buf_size;
+ int sel, set;
+ int num;
+ bool enable;
+
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+
+ buf[buf_size] = '\0';
+ num = sscanf(buf, "%d %d", &sel, &set);
+ if (num != 2) {
+ rtw89_info(rtwdev, "invalid format: <sel> <set>\n");
+ return -EINVAL;
+ }
+
+ enable = set == 0 ? false : true;
+ switch (sel) {
+ case 0:
+ debugfs_priv->dbgpkg_en.ss_dbg = enable;
+ break;
+ case 1:
+ debugfs_priv->dbgpkg_en.dle_dbg = enable;
+ break;
+ case 2:
+ debugfs_priv->dbgpkg_en.dmac_dbg = enable;
+ break;
+ case 3:
+ debugfs_priv->dbgpkg_en.cmac_dbg = enable;
+ break;
+ case 4:
+ debugfs_priv->dbgpkg_en.dbg_port = enable;
+ break;
+ default:
+ rtw89_info(rtwdev, "invalid args: sel %d set %d\n", sel, set);
+ return -EINVAL;
+ }
+
+ rtw89_info(rtwdev, "%s debug port dump %d\n",
+ enable ? "Enable" : "Disable", sel);
+
+ return count;
+}
+
+static int rtw89_debug_mac_dump_ss_dbg(struct rtw89_dev *rtwdev,
+ struct seq_file *m)
+{
+ return 0;
+}
+
+static int rtw89_debug_mac_dump_dle_dbg(struct rtw89_dev *rtwdev,
+ struct seq_file *m)
+{
+#define DLE_DFI_DUMP(__type, __target, __sel) \
+({ \
+ u32 __ctrl; \
+ u32 __reg_ctrl = R_AX_##__type##_DBG_FUN_INTF_CTL; \
+ u32 __reg_data = R_AX_##__type##_DBG_FUN_INTF_DATA; \
+ u32 __data, __val32; \
+ int __ret; \
+ \
+ __ctrl = FIELD_PREP(B_AX_##__type##_DFI_TRGSEL_MASK, \
+ DLE_DFI_TYPE_##__target) | \
+ FIELD_PREP(B_AX_##__type##_DFI_ADDR_MASK, __sel) | \
+ B_AX_WDE_DFI_ACTIVE; \
+ rtw89_write32(rtwdev, __reg_ctrl, __ctrl); \
+ __ret = read_poll_timeout(rtw89_read32, __val32, \
+ !(__val32 & B_AX_##__type##_DFI_ACTIVE), \
+ 1000, 50000, false, \
+ rtwdev, __reg_ctrl); \
+ if (__ret) { \
+ rtw89_err(rtwdev, "failed to dump DLE %s %s %d\n", \
+ #__type, #__target, __sel); \
+ return __ret; \
+ } \
+ \
+ __data = rtw89_read32(rtwdev, __reg_data); \
+ __data; \
+})
+
+#define DLE_DFI_FREE_PAGE_DUMP(__m, __type) \
+({ \
+ u32 __freepg, __pubpg; \
+ u32 __freepg_head, __freepg_tail, __pubpg_num; \
+ \
+ __freepg = DLE_DFI_DUMP(__type, FREEPG, 0); \
+ __pubpg = DLE_DFI_DUMP(__type, FREEPG, 1); \
+ __freepg_head = FIELD_GET(B_AX_DLE_FREE_HEADPG, __freepg); \
+ __freepg_tail = FIELD_GET(B_AX_DLE_FREE_TAILPG, __freepg); \
+ __pubpg_num = FIELD_GET(B_AX_DLE_PUB_PGNUM, __pubpg); \
+ seq_printf(__m, "[%s] freepg head: %d\n", \
+ #__type, __freepg_head); \
+ seq_printf(__m, "[%s] freepg tail: %d\n", \
+ #__type, __freepg_tail); \
+ seq_printf(__m, "[%s] pubpg num : %d\n", \
+ #__type, __pubpg_num); \
+})
+
+#define case_QUOTA(__m, __type, __id) \
+ case __type##_QTAID_##__id: \
+ val32 = DLE_DFI_DUMP(__type, QUOTA, __type##_QTAID_##__id); \
+ rsv_pgnum = FIELD_GET(B_AX_DLE_RSV_PGNUM, val32); \
+ use_pgnum = FIELD_GET(B_AX_DLE_USE_PGNUM, val32); \
+ seq_printf(__m, "[%s][%s] rsv_pgnum: %d\n", \
+ #__type, #__id, rsv_pgnum); \
+ seq_printf(__m, "[%s][%s] use_pgnum: %d\n", \
+ #__type, #__id, use_pgnum); \
+ break
+ u32 quota_id;
+ u32 val32;
+ u16 rsv_pgnum, use_pgnum;
+ int ret;
+
+ ret = rtw89_mac_check_mac_en(rtwdev, 0, RTW89_DMAC_SEL);
+ if (ret) {
+ seq_puts(m, "[DLE] : DMAC not enabled\n");
+ return ret;
+ }
+
+ DLE_DFI_FREE_PAGE_DUMP(m, WDE);
+ DLE_DFI_FREE_PAGE_DUMP(m, PLE);
+ for (quota_id = 0; quota_id <= WDE_QTAID_CPUIO; quota_id++) {
+ switch (quota_id) {
+ case_QUOTA(m, WDE, HOST_IF);
+ case_QUOTA(m, WDE, WLAN_CPU);
+ case_QUOTA(m, WDE, DATA_CPU);
+ case_QUOTA(m, WDE, PKTIN);
+ case_QUOTA(m, WDE, CPUIO);
+ }
+ }
+ for (quota_id = 0; quota_id <= PLE_QTAID_CPUIO; quota_id++) {
+ switch (quota_id) {
+ case_QUOTA(m, PLE, B0_TXPL);
+ case_QUOTA(m, PLE, B1_TXPL);
+ case_QUOTA(m, PLE, C2H);
+ case_QUOTA(m, PLE, H2C);
+ case_QUOTA(m, PLE, WLAN_CPU);
+ case_QUOTA(m, PLE, MPDU);
+ case_QUOTA(m, PLE, CMAC0_RX);
+ case_QUOTA(m, PLE, CMAC1_RX);
+ case_QUOTA(m, PLE, CMAC1_BBRPT);
+ case_QUOTA(m, PLE, WDRLS);
+ case_QUOTA(m, PLE, CPUIO);
+ }
+ }
+
+ return 0;
+
+#undef case_QUOTA
+#undef DLE_DFI_DUMP
+#undef DLE_DFI_FREE_PAGE_DUMP
+}
+
+static int rtw89_debug_mac_dump_dmac_dbg(struct rtw89_dev *rtwdev,
+ struct seq_file *m)
+{
+ int ret;
+
+ ret = rtw89_mac_check_mac_en(rtwdev, 0, RTW89_DMAC_SEL);
+ if (ret) {
+ seq_puts(m, "[DMAC] : DMAC not enabled\n");
+ return ret;
+ }
+
+ seq_printf(m, "R_AX_DMAC_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_DMAC_ERR_ISR));
+ seq_printf(m, "[0]R_AX_WDRLS_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_WDRLS_ERR_ISR));
+ seq_printf(m, "[1]R_AX_SEC_ERR_IMR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_ERR_IMR_ISR));
+ seq_printf(m, "[2.1]R_AX_MPDU_TX_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_MPDU_TX_ERR_ISR));
+ seq_printf(m, "[2.2]R_AX_MPDU_RX_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_MPDU_RX_ERR_ISR));
+ seq_printf(m, "[3]R_AX_STA_SCHEDULER_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_STA_SCHEDULER_ERR_ISR));
+ seq_printf(m, "[4]R_AX_WDE_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_WDE_ERR_ISR));
+ seq_printf(m, "[5.1]R_AX_TXPKTCTL_ERR_IMR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_TXPKTCTL_ERR_IMR_ISR));
+ seq_printf(m, "[5.2]R_AX_TXPKTCTL_ERR_IMR_ISR_B1=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_TXPKTCTL_ERR_IMR_ISR_B1));
+ seq_printf(m, "[6]R_AX_PLE_ERR_FLAG_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_PLE_ERR_FLAG_ISR));
+ seq_printf(m, "[7]R_AX_PKTIN_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_PKTIN_ERR_ISR));
+ seq_printf(m, "[8.1]R_AX_OTHER_DISPATCHER_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_OTHER_DISPATCHER_ERR_ISR));
+ seq_printf(m, "[8.2]R_AX_HOST_DISPATCHER_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_HOST_DISPATCHER_ERR_ISR));
+ seq_printf(m, "[8.3]R_AX_CPU_DISPATCHER_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_CPU_DISPATCHER_ERR_ISR));
+ seq_printf(m, "[10]R_AX_CPUIO_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_CPUIO_ERR_ISR));
+ seq_printf(m, "[11.1]R_AX_BBRPT_COM_ERR_IMR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_BBRPT_COM_ERR_IMR_ISR));
+ seq_printf(m, "[11.2]R_AX_BBRPT_CHINFO_ERR_IMR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_BBRPT_CHINFO_ERR_IMR_ISR));
+ seq_printf(m, "[11.3]R_AX_BBRPT_DFS_ERR_IMR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_BBRPT_DFS_ERR_IMR_ISR));
+ seq_printf(m, "[11.4]R_AX_LA_ERRFLAG=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_LA_ERRFLAG));
+
+ return 0;
+}
+
+static int rtw89_debug_mac_dump_cmac_dbg(struct rtw89_dev *rtwdev,
+ struct seq_file *m)
+{
+ int ret;
+
+ ret = rtw89_mac_check_mac_en(rtwdev, 0, RTW89_CMAC_SEL);
+ if (ret) {
+ seq_puts(m, "[CMAC] : CMAC 0 not enabled\n");
+ return ret;
+ }
+
+ seq_printf(m, "R_AX_CMAC_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_CMAC_ERR_ISR));
+ seq_printf(m, "[0]R_AX_SCHEDULE_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SCHEDULE_ERR_ISR));
+ seq_printf(m, "[1]R_AX_PTCL_ISR0=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_PTCL_ISR0));
+ seq_printf(m, "[3]R_AX_DLE_CTRL=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_DLE_CTRL));
+ seq_printf(m, "[4]R_AX_PHYINFO_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_PHYINFO_ERR_ISR));
+ seq_printf(m, "[5]R_AX_TXPWR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_TXPWR_ISR));
+ seq_printf(m, "[6]R_AX_RMAC_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_RMAC_ERR_ISR));
+ seq_printf(m, "[7]R_AX_TMAC_ERR_IMR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_TMAC_ERR_IMR_ISR));
+
+ ret = rtw89_mac_check_mac_en(rtwdev, 1, RTW89_CMAC_SEL);
+ if (ret) {
+ seq_puts(m, "[CMAC] : CMAC 1 not enabled\n");
+ return ret;
+ }
+
+ seq_printf(m, "R_AX_CMAC_ERR_ISR_C1=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_CMAC_ERR_ISR_C1));
+ seq_printf(m, "[0]R_AX_SCHEDULE_ERR_ISR_C1=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SCHEDULE_ERR_ISR_C1));
+ seq_printf(m, "[1]R_AX_PTCL_ISR0_C1=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_PTCL_ISR0_C1));
+ seq_printf(m, "[3]R_AX_DLE_CTRL_C1=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_DLE_CTRL_C1));
+ seq_printf(m, "[4]R_AX_PHYINFO_ERR_ISR_C1=0x%02x\n",
+ rtw89_read32(rtwdev, R_AX_PHYINFO_ERR_ISR_C1));
+ seq_printf(m, "[5]R_AX_TXPWR_ISR_C1=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_TXPWR_ISR_C1));
+ seq_printf(m, "[6]R_AX_RMAC_ERR_ISR_C1=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_RMAC_ERR_ISR_C1));
+ seq_printf(m, "[7]R_AX_TMAC_ERR_IMR_ISR_C1=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_TMAC_ERR_IMR_ISR_C1));
+
+ return 0;
+}
+
+static const struct rtw89_mac_dbg_port_info dbg_port_ptcl_c0 = {
+ .sel_addr = R_AX_PTCL_DBG,
+ .sel_byte = 1,
+ .sel_msk = B_AX_PTCL_DBG_SEL_MASK,
+ .srt = 0x00,
+ .end = 0x3F,
+ .rd_addr = R_AX_PTCL_DBG_INFO,
+ .rd_byte = 4,
+ .rd_msk = B_AX_PTCL_DBG_INFO_MASK
+};
+
+static const struct rtw89_mac_dbg_port_info dbg_port_ptcl_c1 = {
+ .sel_addr = R_AX_PTCL_DBG_C1,
+ .sel_byte = 1,
+ .sel_msk = B_AX_PTCL_DBG_SEL_MASK,
+ .srt = 0x00,
+ .end = 0x3F,
+ .rd_addr = R_AX_PTCL_DBG_INFO_C1,
+ .rd_byte = 4,
+ .rd_msk = B_AX_PTCL_DBG_INFO_MASK
+};
+
+static const struct rtw89_mac_dbg_port_info dbg_port_sch_c0 = {
+ .sel_addr = R_AX_SCH_DBG_SEL,
+ .sel_byte = 1,
+ .sel_msk = B_AX_SCH_DBG_SEL_MASK,
+ .srt = 0x00,
+ .end = 0x2F,
+ .rd_addr = R_AX_SCH_DBG,
+ .rd_byte = 4,
+ .rd_msk = B_AX_SCHEDULER_DBG_MASK
+};
+
+static const struct rtw89_mac_dbg_port_info dbg_port_sch_c1 = {
+ .sel_addr = R_AX_SCH_DBG_SEL_C1,
+ .sel_byte = 1,
+ .sel_msk = B_AX_SCH_DBG_SEL_MASK,
+ .srt = 0x00,
+ .end = 0x2F,
+ .rd_addr = R_AX_SCH_DBG_C1,
+ .rd_byte = 4,
+ .rd_msk = B_AX_SCHEDULER_DBG_MASK
+};
+
+static const struct rtw89_mac_dbg_port_info dbg_port_tmac_c0 = {
+ .sel_addr = R_AX_MACTX_DBG_SEL_CNT,
+ .sel_byte = 1,
+ .sel_msk = B_AX_DBGSEL_MACTX_MASK,
+ .srt = 0x00,
+ .end = 0x19,
+ .rd_addr = R_AX_DBG_PORT_SEL,
+ .rd_byte = 4,
+ .rd_msk = B_AX_DEBUG_ST_MASK
+};
+
+static const struct rtw89_mac_dbg_port_info dbg_port_tmac_c1 = {
+ .sel_addr = R_AX_MACTX_DBG_SEL_CNT_C1,
+ .sel_byte = 1,
+ .sel_msk = B_AX_DBGSEL_MACTX_MASK,
+ .srt = 0x00,
+ .end = 0x19,
+ .rd_addr = R_AX_DBG_PORT_SEL,
+ .rd_byte = 4,
+ .rd_msk = B_AX_DEBUG_ST_MASK
+};
+
+static const struct rtw89_mac_dbg_port_info dbg_port_rmac_c0 = {
+ .sel_addr = R_AX_RX_DEBUG_SELECT,
+ .sel_byte = 1,
+ .sel_msk = B_AX_DEBUG_SEL_MASK,
+ .srt = 0x00,
+ .end = 0x58,
+ .rd_addr = R_AX_DBG_PORT_SEL,
+ .rd_byte = 4,
+ .rd_msk = B_AX_DEBUG_ST_MASK
+};
+
+static const struct rtw89_mac_dbg_port_info dbg_port_rmac_c1 = {
+ .sel_addr = R_AX_RX_DEBUG_SELECT_C1,
+ .sel_byte = 1,
+ .sel_msk = B_AX_DEBUG_SEL_MASK,
+ .srt = 0x00,
+ .end = 0x58,
+ .rd_addr = R_AX_DBG_PORT_SEL,
+ .rd_byte = 4,
+ .rd_msk = B_AX_DEBUG_ST_MASK
+};
+
+static const struct rtw89_mac_dbg_port_info dbg_port_rmacst_c0 = {
+ .sel_addr = R_AX_RX_STATE_MONITOR,
+ .sel_byte = 1,
+ .sel_msk = B_AX_STATE_SEL_MASK,
+ .srt = 0x00,
+ .end = 0x17,
+ .rd_addr = R_AX_RX_STATE_MONITOR,
+ .rd_byte = 4,
+ .rd_msk = B_AX_RX_STATE_MONITOR_MASK
+};
+
+static const struct rtw89_mac_dbg_port_info dbg_port_rmacst_c1 = {
+ .sel_addr = R_AX_RX_STATE_MONITOR_C1,
+ .sel_byte = 1,
+ .sel_msk = B_AX_STATE_SEL_MASK,
+ .srt = 0x00,
+ .end = 0x17,
+ .rd_addr = R_AX_RX_STATE_MONITOR_C1,
+ .rd_byte = 4,
+ .rd_msk = B_AX_RX_STATE_MONITOR_MASK
+};
+
+static const struct rtw89_mac_dbg_port_info dbg_port_rmac_plcp_c0 = {
+ .sel_addr = R_AX_RMAC_PLCP_MON,
+ .sel_byte = 4,
+ .sel_msk = B_AX_PCLP_MON_SEL_MASK,
+ .srt = 0x0,
+ .end = 0xF,
+ .rd_addr = R_AX_RMAC_PLCP_MON,
+ .rd_byte = 4,
+ .rd_msk = B_AX_RMAC_PLCP_MON_MASK
+};
+
+static const struct rtw89_mac_dbg_port_info dbg_port_rmac_plcp_c1 = {
+ .sel_addr = R_AX_RMAC_PLCP_MON_C1,
+ .sel_byte = 4,
+ .sel_msk = B_AX_PCLP_MON_SEL_MASK,
+ .srt = 0x0,
+ .end = 0xF,
+ .rd_addr = R_AX_RMAC_PLCP_MON_C1,
+ .rd_byte = 4,
+ .rd_msk = B_AX_RMAC_PLCP_MON_MASK
+};
+
+static const struct rtw89_mac_dbg_port_info dbg_port_trxptcl_c0 = {
+ .sel_addr = R_AX_DBGSEL_TRXPTCL,
+ .sel_byte = 1,
+ .sel_msk = B_AX_DBGSEL_TRXPTCL_MASK,
+ .srt = 0x08,
+ .end = 0x10,
+ .rd_addr = R_AX_DBG_PORT_SEL,
+ .rd_byte = 4,
+ .rd_msk = B_AX_DEBUG_ST_MASK
+};
+
+static const struct rtw89_mac_dbg_port_info dbg_port_trxptcl_c1 = {
+ .sel_addr = R_AX_DBGSEL_TRXPTCL_C1,
+ .sel_byte = 1,
+ .sel_msk = B_AX_DBGSEL_TRXPTCL_MASK,
+ .srt = 0x08,
+ .end = 0x10,
+ .rd_addr = R_AX_DBG_PORT_SEL,
+ .rd_byte = 4,
+ .rd_msk = B_AX_DEBUG_ST_MASK
+};
+
+static const struct rtw89_mac_dbg_port_info dbg_port_tx_infol_c0 = {
+ .sel_addr = R_AX_WMAC_TX_CTRL_DEBUG,
+ .sel_byte = 1,
+ .sel_msk = B_AX_TX_CTRL_DEBUG_SEL_MASK,
+ .srt = 0x00,
+ .end = 0x07,
+ .rd_addr = R_AX_WMAC_TX_INFO0_DEBUG,
+ .rd_byte = 4,
+ .rd_msk = B_AX_TX_CTRL_INFO_P0_MASK
+};
+
+static const struct rtw89_mac_dbg_port_info dbg_port_tx_infoh_c0 = {
+ .sel_addr = R_AX_WMAC_TX_CTRL_DEBUG,
+ .sel_byte = 1,
+ .sel_msk = B_AX_TX_CTRL_DEBUG_SEL_MASK,
+ .srt = 0x00,
+ .end = 0x07,
+ .rd_addr = R_AX_WMAC_TX_INFO1_DEBUG,
+ .rd_byte = 4,
+ .rd_msk = B_AX_TX_CTRL_INFO_P1_MASK
+};
+
+static const struct rtw89_mac_dbg_port_info dbg_port_tx_infol_c1 = {
+ .sel_addr = R_AX_WMAC_TX_CTRL_DEBUG_C1,
+ .sel_byte = 1,
+ .sel_msk = B_AX_TX_CTRL_DEBUG_SEL_MASK,
+ .srt = 0x00,
+ .end = 0x07,
+ .rd_addr = R_AX_WMAC_TX_INFO0_DEBUG_C1,
+ .rd_byte = 4,
+ .rd_msk = B_AX_TX_CTRL_INFO_P0_MASK
+};
+
+static const struct rtw89_mac_dbg_port_info dbg_port_tx_infoh_c1 = {
+ .sel_addr = R_AX_WMAC_TX_CTRL_DEBUG_C1,
+ .sel_byte = 1,
+ .sel_msk = B_AX_TX_CTRL_DEBUG_SEL_MASK,
+ .srt = 0x00,
+ .end = 0x07,
+ .rd_addr = R_AX_WMAC_TX_INFO1_DEBUG_C1,
+ .rd_byte = 4,
+ .rd_msk = B_AX_TX_CTRL_INFO_P1_MASK
+};
+
+static const struct rtw89_mac_dbg_port_info dbg_port_txtf_infol_c0 = {
+ .sel_addr = R_AX_WMAC_TX_TF_INFO_0,
+ .sel_byte = 1,
+ .sel_msk = B_AX_WMAC_TX_TF_INFO_SEL_MASK,
+ .srt = 0x00,
+ .end = 0x04,
+ .rd_addr = R_AX_WMAC_TX_TF_INFO_1,
+ .rd_byte = 4,
+ .rd_msk = B_AX_WMAC_TX_TF_INFO_P0_MASK
+};
+
+static const struct rtw89_mac_dbg_port_info dbg_port_txtf_infoh_c0 = {
+ .sel_addr = R_AX_WMAC_TX_TF_INFO_0,
+ .sel_byte = 1,
+ .sel_msk = B_AX_WMAC_TX_TF_INFO_SEL_MASK,
+ .srt = 0x00,
+ .end = 0x04,
+ .rd_addr = R_AX_WMAC_TX_TF_INFO_2,
+ .rd_byte = 4,
+ .rd_msk = B_AX_WMAC_TX_TF_INFO_P1_MASK
+};
+
+static const struct rtw89_mac_dbg_port_info dbg_port_txtf_infol_c1 = {
+ .sel_addr = R_AX_WMAC_TX_TF_INFO_0_C1,
+ .sel_byte = 1,
+ .sel_msk = B_AX_WMAC_TX_TF_INFO_SEL_MASK,
+ .srt = 0x00,
+ .end = 0x04,
+ .rd_addr = R_AX_WMAC_TX_TF_INFO_1_C1,
+ .rd_byte = 4,
+ .rd_msk = B_AX_WMAC_TX_TF_INFO_P0_MASK
+};
+
+static const struct rtw89_mac_dbg_port_info dbg_port_txtf_infoh_c1 = {
+ .sel_addr = R_AX_WMAC_TX_TF_INFO_0_C1,
+ .sel_byte = 1,
+ .sel_msk = B_AX_WMAC_TX_TF_INFO_SEL_MASK,
+ .srt = 0x00,
+ .end = 0x04,
+ .rd_addr = R_AX_WMAC_TX_TF_INFO_2_C1,
+ .rd_byte = 4,
+ .rd_msk = B_AX_WMAC_TX_TF_INFO_P1_MASK
+};
+
+static const struct rtw89_mac_dbg_port_info dbg_port_wde_bufmgn_freepg = {
+ .sel_addr = R_AX_WDE_DBG_FUN_INTF_CTL,
+ .sel_byte = 4,
+ .sel_msk = B_AX_WDE_DFI_DATA_MASK,
+ .srt = 0x80000000,
+ .end = 0x80000001,
+ .rd_addr = R_AX_WDE_DBG_FUN_INTF_DATA,
+ .rd_byte = 4,
+ .rd_msk = B_AX_WDE_DFI_DATA_MASK
+};
+
+static const struct rtw89_mac_dbg_port_info dbg_port_wde_bufmgn_quota = {
+ .sel_addr = R_AX_WDE_DBG_FUN_INTF_CTL,
+ .sel_byte = 4,
+ .sel_msk = B_AX_WDE_DFI_DATA_MASK,
+ .srt = 0x80010000,
+ .end = 0x80010004,
+ .rd_addr = R_AX_WDE_DBG_FUN_INTF_DATA,
+ .rd_byte = 4,
+ .rd_msk = B_AX_WDE_DFI_DATA_MASK
+};
+
+static const struct rtw89_mac_dbg_port_info dbg_port_wde_bufmgn_pagellt = {
+ .sel_addr = R_AX_WDE_DBG_FUN_INTF_CTL,
+ .sel_byte = 4,
+ .sel_msk = B_AX_WDE_DFI_DATA_MASK,
+ .srt = 0x80020000,
+ .end = 0x80020FFF,
+ .rd_addr = R_AX_WDE_DBG_FUN_INTF_DATA,
+ .rd_byte = 4,
+ .rd_msk = B_AX_WDE_DFI_DATA_MASK
+};
+
+static const struct rtw89_mac_dbg_port_info dbg_port_wde_bufmgn_pktinfo = {
+ .sel_addr = R_AX_WDE_DBG_FUN_INTF_CTL,
+ .sel_byte = 4,
+ .sel_msk = B_AX_WDE_DFI_DATA_MASK,
+ .srt = 0x80030000,
+ .end = 0x80030FFF,
+ .rd_addr = R_AX_WDE_DBG_FUN_INTF_DATA,
+ .rd_byte = 4,
+ .rd_msk = B_AX_WDE_DFI_DATA_MASK
+};
+
+static const struct rtw89_mac_dbg_port_info dbg_port_wde_quemgn_prepkt = {
+ .sel_addr = R_AX_WDE_DBG_FUN_INTF_CTL,
+ .sel_byte = 4,
+ .sel_msk = B_AX_WDE_DFI_DATA_MASK,
+ .srt = 0x80040000,
+ .end = 0x80040FFF,
+ .rd_addr = R_AX_WDE_DBG_FUN_INTF_DATA,
+ .rd_byte = 4,
+ .rd_msk = B_AX_WDE_DFI_DATA_MASK
+};
+
+static const struct rtw89_mac_dbg_port_info dbg_port_wde_quemgn_nxtpkt = {
+ .sel_addr = R_AX_WDE_DBG_FUN_INTF_CTL,
+ .sel_byte = 4,
+ .sel_msk = B_AX_WDE_DFI_DATA_MASK,
+ .srt = 0x80050000,
+ .end = 0x80050FFF,
+ .rd_addr = R_AX_WDE_DBG_FUN_INTF_DATA,
+ .rd_byte = 4,
+ .rd_msk = B_AX_WDE_DFI_DATA_MASK
+};
+
+static const struct rtw89_mac_dbg_port_info dbg_port_wde_quemgn_qlnktbl = {
+ .sel_addr = R_AX_WDE_DBG_FUN_INTF_CTL,
+ .sel_byte = 4,
+ .sel_msk = B_AX_WDE_DFI_DATA_MASK,
+ .srt = 0x80060000,
+ .end = 0x80060453,
+ .rd_addr = R_AX_WDE_DBG_FUN_INTF_DATA,
+ .rd_byte = 4,
+ .rd_msk = B_AX_WDE_DFI_DATA_MASK
+};
+
+static const struct rtw89_mac_dbg_port_info dbg_port_wde_quemgn_qempty = {
+ .sel_addr = R_AX_WDE_DBG_FUN_INTF_CTL,
+ .sel_byte = 4,
+ .sel_msk = B_AX_WDE_DFI_DATA_MASK,
+ .srt = 0x80070000,
+ .end = 0x80070011,
+ .rd_addr = R_AX_WDE_DBG_FUN_INTF_DATA,
+ .rd_byte = 4,
+ .rd_msk = B_AX_WDE_DFI_DATA_MASK
+};
+
+static const struct rtw89_mac_dbg_port_info dbg_port_ple_bufmgn_freepg = {
+ .sel_addr = R_AX_PLE_DBG_FUN_INTF_CTL,
+ .sel_byte = 4,
+ .sel_msk = B_AX_PLE_DFI_DATA_MASK,
+ .srt = 0x80000000,
+ .end = 0x80000001,
+ .rd_addr = R_AX_PLE_DBG_FUN_INTF_DATA,
+ .rd_byte = 4,
+ .rd_msk = B_AX_PLE_DFI_DATA_MASK
+};
+
+static const struct rtw89_mac_dbg_port_info dbg_port_ple_bufmgn_quota = {
+ .sel_addr = R_AX_PLE_DBG_FUN_INTF_CTL,
+ .sel_byte = 4,
+ .sel_msk = B_AX_PLE_DFI_DATA_MASK,
+ .srt = 0x80010000,
+ .end = 0x8001000A,
+ .rd_addr = R_AX_PLE_DBG_FUN_INTF_DATA,
+ .rd_byte = 4,
+ .rd_msk = B_AX_PLE_DFI_DATA_MASK
+};
+
+static const struct rtw89_mac_dbg_port_info dbg_port_ple_bufmgn_pagellt = {
+ .sel_addr = R_AX_PLE_DBG_FUN_INTF_CTL,
+ .sel_byte = 4,
+ .sel_msk = B_AX_PLE_DFI_DATA_MASK,
+ .srt = 0x80020000,
+ .end = 0x80020DBF,
+ .rd_addr = R_AX_PLE_DBG_FUN_INTF_DATA,
+ .rd_byte = 4,
+ .rd_msk = B_AX_PLE_DFI_DATA_MASK
+};
+
+static const struct rtw89_mac_dbg_port_info dbg_port_ple_bufmgn_pktinfo = {
+ .sel_addr = R_AX_PLE_DBG_FUN_INTF_CTL,
+ .sel_byte = 4,
+ .sel_msk = B_AX_PLE_DFI_DATA_MASK,
+ .srt = 0x80030000,
+ .end = 0x80030DBF,
+ .rd_addr = R_AX_PLE_DBG_FUN_INTF_DATA,
+ .rd_byte = 4,
+ .rd_msk = B_AX_PLE_DFI_DATA_MASK
+};
+
+static const struct rtw89_mac_dbg_port_info dbg_port_ple_quemgn_prepkt = {
+ .sel_addr = R_AX_PLE_DBG_FUN_INTF_CTL,
+ .sel_byte = 4,
+ .sel_msk = B_AX_PLE_DFI_DATA_MASK,
+ .srt = 0x80040000,
+ .end = 0x80040DBF,
+ .rd_addr = R_AX_PLE_DBG_FUN_INTF_DATA,
+ .rd_byte = 4,
+ .rd_msk = B_AX_PLE_DFI_DATA_MASK
+};
+
+static const struct rtw89_mac_dbg_port_info dbg_port_ple_quemgn_nxtpkt = {
+ .sel_addr = R_AX_PLE_DBG_FUN_INTF_CTL,
+ .sel_byte = 4,
+ .sel_msk = B_AX_PLE_DFI_DATA_MASK,
+ .srt = 0x80050000,
+ .end = 0x80050DBF,
+ .rd_addr = R_AX_PLE_DBG_FUN_INTF_DATA,
+ .rd_byte = 4,
+ .rd_msk = B_AX_PLE_DFI_DATA_MASK
+};
+
+static const struct rtw89_mac_dbg_port_info dbg_port_ple_quemgn_qlnktbl = {
+ .sel_addr = R_AX_PLE_DBG_FUN_INTF_CTL,
+ .sel_byte = 4,
+ .sel_msk = B_AX_PLE_DFI_DATA_MASK,
+ .srt = 0x80060000,
+ .end = 0x80060041,
+ .rd_addr = R_AX_PLE_DBG_FUN_INTF_DATA,
+ .rd_byte = 4,
+ .rd_msk = B_AX_PLE_DFI_DATA_MASK
+};
+
+static const struct rtw89_mac_dbg_port_info dbg_port_ple_quemgn_qempty = {
+ .sel_addr = R_AX_PLE_DBG_FUN_INTF_CTL,
+ .sel_byte = 4,
+ .sel_msk = B_AX_PLE_DFI_DATA_MASK,
+ .srt = 0x80070000,
+ .end = 0x80070001,
+ .rd_addr = R_AX_PLE_DBG_FUN_INTF_DATA,
+ .rd_byte = 4,
+ .rd_msk = B_AX_PLE_DFI_DATA_MASK
+};
+
+static const struct rtw89_mac_dbg_port_info dbg_port_pktinfo = {
+ .sel_addr = R_AX_DBG_FUN_INTF_CTL,
+ .sel_byte = 4,
+ .sel_msk = B_AX_DFI_DATA_MASK,
+ .srt = 0x80000000,
+ .end = 0x8000017f,
+ .rd_addr = R_AX_DBG_FUN_INTF_DATA,
+ .rd_byte = 4,
+ .rd_msk = B_AX_DFI_DATA_MASK
+};
+
+static const struct rtw89_mac_dbg_port_info dbg_port_pcie_txdma = {
+ .sel_addr = R_AX_PCIE_DBG_CTRL,
+ .sel_byte = 2,
+ .sel_msk = B_AX_DBG_SEL_MASK,
+ .srt = 0x00,
+ .end = 0x03,
+ .rd_addr = R_AX_DBG_PORT_SEL,
+ .rd_byte = 4,
+ .rd_msk = B_AX_DEBUG_ST_MASK
+};
+
+static const struct rtw89_mac_dbg_port_info dbg_port_pcie_rxdma = {
+ .sel_addr = R_AX_PCIE_DBG_CTRL,
+ .sel_byte = 2,
+ .sel_msk = B_AX_DBG_SEL_MASK,
+ .srt = 0x00,
+ .end = 0x04,
+ .rd_addr = R_AX_DBG_PORT_SEL,
+ .rd_byte = 4,
+ .rd_msk = B_AX_DEBUG_ST_MASK
+};
+
+static const struct rtw89_mac_dbg_port_info dbg_port_pcie_cvt = {
+ .sel_addr = R_AX_PCIE_DBG_CTRL,
+ .sel_byte = 2,
+ .sel_msk = B_AX_DBG_SEL_MASK,
+ .srt = 0x00,
+ .end = 0x01,
+ .rd_addr = R_AX_DBG_PORT_SEL,
+ .rd_byte = 4,
+ .rd_msk = B_AX_DEBUG_ST_MASK
+};
+
+static const struct rtw89_mac_dbg_port_info dbg_port_pcie_cxpl = {
+ .sel_addr = R_AX_PCIE_DBG_CTRL,
+ .sel_byte = 2,
+ .sel_msk = B_AX_DBG_SEL_MASK,
+ .srt = 0x00,
+ .end = 0x05,
+ .rd_addr = R_AX_DBG_PORT_SEL,
+ .rd_byte = 4,
+ .rd_msk = B_AX_DEBUG_ST_MASK
+};
+
+static const struct rtw89_mac_dbg_port_info dbg_port_pcie_io = {
+ .sel_addr = R_AX_PCIE_DBG_CTRL,
+ .sel_byte = 2,
+ .sel_msk = B_AX_DBG_SEL_MASK,
+ .srt = 0x00,
+ .end = 0x05,
+ .rd_addr = R_AX_DBG_PORT_SEL,
+ .rd_byte = 4,
+ .rd_msk = B_AX_DEBUG_ST_MASK
+};
+
+static const struct rtw89_mac_dbg_port_info dbg_port_pcie_misc = {
+ .sel_addr = R_AX_PCIE_DBG_CTRL,
+ .sel_byte = 2,
+ .sel_msk = B_AX_DBG_SEL_MASK,
+ .srt = 0x00,
+ .end = 0x06,
+ .rd_addr = R_AX_DBG_PORT_SEL,
+ .rd_byte = 4,
+ .rd_msk = B_AX_DEBUG_ST_MASK
+};
+
+static const struct rtw89_mac_dbg_port_info dbg_port_pcie_misc2 = {
+ .sel_addr = R_AX_DBG_CTRL,
+ .sel_byte = 1,
+ .sel_msk = B_AX_DBG_SEL0,
+ .srt = 0x34,
+ .end = 0x3C,
+ .rd_addr = R_AX_DBG_PORT_SEL,
+ .rd_byte = 4,
+ .rd_msk = B_AX_DEBUG_ST_MASK
+};
+
+static const struct rtw89_mac_dbg_port_info *
+rtw89_debug_mac_dbg_port_sel(struct seq_file *m,
+ struct rtw89_dev *rtwdev, u32 sel)
+{
+ const struct rtw89_mac_dbg_port_info *info;
+ u32 val32;
+ u16 val16;
+ u8 val8;
+
+ switch (sel) {
+ case RTW89_DBG_PORT_SEL_PTCL_C0:
+ info = &dbg_port_ptcl_c0;
+ val16 = rtw89_read16(rtwdev, R_AX_PTCL_DBG);
+ val16 |= B_AX_PTCL_DBG_EN;
+ rtw89_write16(rtwdev, R_AX_PTCL_DBG, val16);
+ seq_puts(m, "Enable PTCL C0 dbgport.\n");
+ break;
+ case RTW89_DBG_PORT_SEL_PTCL_C1:
+ info = &dbg_port_ptcl_c1;
+ val16 = rtw89_read16(rtwdev, R_AX_PTCL_DBG_C1);
+ val16 |= B_AX_PTCL_DBG_EN;
+ rtw89_write16(rtwdev, R_AX_PTCL_DBG_C1, val16);
+ seq_puts(m, "Enable PTCL C1 dbgport.\n");
+ break;
+ case RTW89_DBG_PORT_SEL_SCH_C0:
+ info = &dbg_port_sch_c0;
+ val32 = rtw89_read32(rtwdev, R_AX_SCH_DBG_SEL);
+ val32 |= B_AX_SCH_DBG_EN;
+ rtw89_write32(rtwdev, R_AX_SCH_DBG_SEL, val32);
+ seq_puts(m, "Enable SCH C0 dbgport.\n");
+ break;
+ case RTW89_DBG_PORT_SEL_SCH_C1:
+ info = &dbg_port_sch_c1;
+ val32 = rtw89_read32(rtwdev, R_AX_SCH_DBG_SEL_C1);
+ val32 |= B_AX_SCH_DBG_EN;
+ rtw89_write32(rtwdev, R_AX_SCH_DBG_SEL_C1, val32);
+ seq_puts(m, "Enable SCH C1 dbgport.\n");
+ break;
+ case RTW89_DBG_PORT_SEL_TMAC_C0:
+ info = &dbg_port_tmac_c0;
+ val32 = rtw89_read32(rtwdev, R_AX_DBGSEL_TRXPTCL);
+ val32 = u32_replace_bits(val32, TRXPTRL_DBG_SEL_TMAC,
+ B_AX_DBGSEL_TRXPTCL_MASK);
+ rtw89_write32(rtwdev, R_AX_DBGSEL_TRXPTCL, val32);
+
+ val32 = rtw89_read32(rtwdev, R_AX_DBG_CTRL);
+ val32 = u32_replace_bits(val32, TMAC_DBG_SEL_C0, B_AX_DBG_SEL0);
+ val32 = u32_replace_bits(val32, TMAC_DBG_SEL_C0, B_AX_DBG_SEL1);
+ rtw89_write32(rtwdev, R_AX_DBG_CTRL, val32);
+
+ val32 = rtw89_read32(rtwdev, R_AX_SYS_STATUS1);
+ val32 = u32_replace_bits(val32, MAC_DBG_SEL, B_AX_SEL_0XC0_MASK);
+ rtw89_write32(rtwdev, R_AX_SYS_STATUS1, val32);
+ seq_puts(m, "Enable TMAC C0 dbgport.\n");
+ break;
+ case RTW89_DBG_PORT_SEL_TMAC_C1:
+ info = &dbg_port_tmac_c1;
+ val32 = rtw89_read32(rtwdev, R_AX_DBGSEL_TRXPTCL_C1);
+ val32 = u32_replace_bits(val32, TRXPTRL_DBG_SEL_TMAC,
+ B_AX_DBGSEL_TRXPTCL_MASK);
+ rtw89_write32(rtwdev, R_AX_DBGSEL_TRXPTCL_C1, val32);
+
+ val32 = rtw89_read32(rtwdev, R_AX_DBG_CTRL);
+ val32 = u32_replace_bits(val32, TMAC_DBG_SEL_C1, B_AX_DBG_SEL0);
+ val32 = u32_replace_bits(val32, TMAC_DBG_SEL_C1, B_AX_DBG_SEL1);
+ rtw89_write32(rtwdev, R_AX_DBG_CTRL, val32);
+
+ val32 = rtw89_read32(rtwdev, R_AX_SYS_STATUS1);
+ val32 = u32_replace_bits(val32, MAC_DBG_SEL, B_AX_SEL_0XC0_MASK);
+ rtw89_write32(rtwdev, R_AX_SYS_STATUS1, val32);
+ seq_puts(m, "Enable TMAC C1 dbgport.\n");
+ break;
+ case RTW89_DBG_PORT_SEL_RMAC_C0:
+ info = &dbg_port_rmac_c0;
+ val32 = rtw89_read32(rtwdev, R_AX_DBGSEL_TRXPTCL);
+ val32 = u32_replace_bits(val32, TRXPTRL_DBG_SEL_RMAC,
+ B_AX_DBGSEL_TRXPTCL_MASK);
+ rtw89_write32(rtwdev, R_AX_DBGSEL_TRXPTCL, val32);
+
+ val32 = rtw89_read32(rtwdev, R_AX_DBG_CTRL);
+ val32 = u32_replace_bits(val32, RMAC_DBG_SEL_C0, B_AX_DBG_SEL0);
+ val32 = u32_replace_bits(val32, RMAC_DBG_SEL_C0, B_AX_DBG_SEL1);
+ rtw89_write32(rtwdev, R_AX_DBG_CTRL, val32);
+
+ val32 = rtw89_read32(rtwdev, R_AX_SYS_STATUS1);
+ val32 = u32_replace_bits(val32, MAC_DBG_SEL, B_AX_SEL_0XC0_MASK);
+ rtw89_write32(rtwdev, R_AX_SYS_STATUS1, val32);
+
+ val8 = rtw89_read8(rtwdev, R_AX_DBGSEL_TRXPTCL);
+ val8 = u8_replace_bits(val8, RMAC_CMAC_DBG_SEL,
+ B_AX_DBGSEL_TRXPTCL_MASK);
+ rtw89_write8(rtwdev, R_AX_DBGSEL_TRXPTCL, val8);
+ seq_puts(m, "Enable RMAC C0 dbgport.\n");
+ break;
+ case RTW89_DBG_PORT_SEL_RMAC_C1:
+ info = &dbg_port_rmac_c1;
+ val32 = rtw89_read32(rtwdev, R_AX_DBGSEL_TRXPTCL_C1);
+ val32 = u32_replace_bits(val32, TRXPTRL_DBG_SEL_RMAC,
+ B_AX_DBGSEL_TRXPTCL_MASK);
+ rtw89_write32(rtwdev, R_AX_DBGSEL_TRXPTCL_C1, val32);
+
+ val32 = rtw89_read32(rtwdev, R_AX_DBG_CTRL);
+ val32 = u32_replace_bits(val32, RMAC_DBG_SEL_C1, B_AX_DBG_SEL0);
+ val32 = u32_replace_bits(val32, RMAC_DBG_SEL_C1, B_AX_DBG_SEL1);
+ rtw89_write32(rtwdev, R_AX_DBG_CTRL, val32);
+
+ val32 = rtw89_read32(rtwdev, R_AX_SYS_STATUS1);
+ val32 = u32_replace_bits(val32, MAC_DBG_SEL, B_AX_SEL_0XC0_MASK);
+ rtw89_write32(rtwdev, R_AX_SYS_STATUS1, val32);
+
+ val8 = rtw89_read8(rtwdev, R_AX_DBGSEL_TRXPTCL_C1);
+ val8 = u8_replace_bits(val8, RMAC_CMAC_DBG_SEL,
+ B_AX_DBGSEL_TRXPTCL_MASK);
+ rtw89_write8(rtwdev, R_AX_DBGSEL_TRXPTCL_C1, val8);
+ seq_puts(m, "Enable RMAC C1 dbgport.\n");
+ break;
+ case RTW89_DBG_PORT_SEL_RMACST_C0:
+ info = &dbg_port_rmacst_c0;
+ seq_puts(m, "Enable RMAC state C0 dbgport.\n");
+ break;
+ case RTW89_DBG_PORT_SEL_RMACST_C1:
+ info = &dbg_port_rmacst_c1;
+ seq_puts(m, "Enable RMAC state C1 dbgport.\n");
+ break;
+ case RTW89_DBG_PORT_SEL_RMAC_PLCP_C0:
+ info = &dbg_port_rmac_plcp_c0;
+ seq_puts(m, "Enable RMAC PLCP C0 dbgport.\n");
+ break;
+ case RTW89_DBG_PORT_SEL_RMAC_PLCP_C1:
+ info = &dbg_port_rmac_plcp_c1;
+ seq_puts(m, "Enable RMAC PLCP C1 dbgport.\n");
+ break;
+ case RTW89_DBG_PORT_SEL_TRXPTCL_C0:
+ info = &dbg_port_trxptcl_c0;
+ val32 = rtw89_read32(rtwdev, R_AX_DBG_CTRL);
+ val32 = u32_replace_bits(val32, TRXPTCL_DBG_SEL_C0, B_AX_DBG_SEL0);
+ val32 = u32_replace_bits(val32, TRXPTCL_DBG_SEL_C0, B_AX_DBG_SEL1);
+ rtw89_write32(rtwdev, R_AX_DBG_CTRL, val32);
+
+ val32 = rtw89_read32(rtwdev, R_AX_SYS_STATUS1);
+ val32 = u32_replace_bits(val32, MAC_DBG_SEL, B_AX_SEL_0XC0_MASK);
+ rtw89_write32(rtwdev, R_AX_SYS_STATUS1, val32);
+ seq_puts(m, "Enable TRXPTCL C0 dbgport.\n");
+ break;
+ case RTW89_DBG_PORT_SEL_TRXPTCL_C1:
+ info = &dbg_port_trxptcl_c1;
+ val32 = rtw89_read32(rtwdev, R_AX_DBG_CTRL);
+ val32 = u32_replace_bits(val32, TRXPTCL_DBG_SEL_C1, B_AX_DBG_SEL0);
+ val32 = u32_replace_bits(val32, TRXPTCL_DBG_SEL_C1, B_AX_DBG_SEL1);
+ rtw89_write32(rtwdev, R_AX_DBG_CTRL, val32);
+
+ val32 = rtw89_read32(rtwdev, R_AX_SYS_STATUS1);
+ val32 = u32_replace_bits(val32, MAC_DBG_SEL, B_AX_SEL_0XC0_MASK);
+ rtw89_write32(rtwdev, R_AX_SYS_STATUS1, val32);
+ seq_puts(m, "Enable TRXPTCL C1 dbgport.\n");
+ break;
+ case RTW89_DBG_PORT_SEL_TX_INFOL_C0:
+ info = &dbg_port_tx_infol_c0;
+ val32 = rtw89_read32(rtwdev, R_AX_TCR1);
+ val32 |= B_AX_TCR_FORCE_READ_TXDFIFO;
+ rtw89_write32(rtwdev, R_AX_TCR1, val32);
+ seq_puts(m, "Enable tx infol dump.\n");
+ break;
+ case RTW89_DBG_PORT_SEL_TX_INFOH_C0:
+ info = &dbg_port_tx_infoh_c0;
+ val32 = rtw89_read32(rtwdev, R_AX_TCR1);
+ val32 |= B_AX_TCR_FORCE_READ_TXDFIFO;
+ rtw89_write32(rtwdev, R_AX_TCR1, val32);
+ seq_puts(m, "Enable tx infoh dump.\n");
+ break;
+ case RTW89_DBG_PORT_SEL_TX_INFOL_C1:
+ info = &dbg_port_tx_infol_c1;
+ val32 = rtw89_read32(rtwdev, R_AX_TCR1_C1);
+ val32 |= B_AX_TCR_FORCE_READ_TXDFIFO;
+ rtw89_write32(rtwdev, R_AX_TCR1_C1, val32);
+ seq_puts(m, "Enable tx infol dump.\n");
+ break;
+ case RTW89_DBG_PORT_SEL_TX_INFOH_C1:
+ info = &dbg_port_tx_infoh_c1;
+ val32 = rtw89_read32(rtwdev, R_AX_TCR1_C1);
+ val32 |= B_AX_TCR_FORCE_READ_TXDFIFO;
+ rtw89_write32(rtwdev, R_AX_TCR1_C1, val32);
+ seq_puts(m, "Enable tx infoh dump.\n");
+ break;
+ case RTW89_DBG_PORT_SEL_TXTF_INFOL_C0:
+ info = &dbg_port_txtf_infol_c0;
+ val32 = rtw89_read32(rtwdev, R_AX_TCR1);
+ val32 |= B_AX_TCR_FORCE_READ_TXDFIFO;
+ rtw89_write32(rtwdev, R_AX_TCR1, val32);
+ seq_puts(m, "Enable tx tf infol dump.\n");
+ break;
+ case RTW89_DBG_PORT_SEL_TXTF_INFOH_C0:
+ info = &dbg_port_txtf_infoh_c0;
+ val32 = rtw89_read32(rtwdev, R_AX_TCR1);
+ val32 |= B_AX_TCR_FORCE_READ_TXDFIFO;
+ rtw89_write32(rtwdev, R_AX_TCR1, val32);
+ seq_puts(m, "Enable tx tf infoh dump.\n");
+ break;
+ case RTW89_DBG_PORT_SEL_TXTF_INFOL_C1:
+ info = &dbg_port_txtf_infol_c1;
+ val32 = rtw89_read32(rtwdev, R_AX_TCR1_C1);
+ val32 |= B_AX_TCR_FORCE_READ_TXDFIFO;
+ rtw89_write32(rtwdev, R_AX_TCR1_C1, val32);
+ seq_puts(m, "Enable tx tf infol dump.\n");
+ break;
+ case RTW89_DBG_PORT_SEL_TXTF_INFOH_C1:
+ info = &dbg_port_txtf_infoh_c1;
+ val32 = rtw89_read32(rtwdev, R_AX_TCR1_C1);
+ val32 |= B_AX_TCR_FORCE_READ_TXDFIFO;
+ rtw89_write32(rtwdev, R_AX_TCR1_C1, val32);
+ seq_puts(m, "Enable tx tf infoh dump.\n");
+ break;
+ case RTW89_DBG_PORT_SEL_WDE_BUFMGN_FREEPG:
+ info = &dbg_port_wde_bufmgn_freepg;
+ seq_puts(m, "Enable wde bufmgn freepg dump.\n");
+ break;
+ case RTW89_DBG_PORT_SEL_WDE_BUFMGN_QUOTA:
+ info = &dbg_port_wde_bufmgn_quota;
+ seq_puts(m, "Enable wde bufmgn quota dump.\n");
+ break;
+ case RTW89_DBG_PORT_SEL_WDE_BUFMGN_PAGELLT:
+ info = &dbg_port_wde_bufmgn_pagellt;
+ seq_puts(m, "Enable wde bufmgn pagellt dump.\n");
+ break;
+ case RTW89_DBG_PORT_SEL_WDE_BUFMGN_PKTINFO:
+ info = &dbg_port_wde_bufmgn_pktinfo;
+ seq_puts(m, "Enable wde bufmgn pktinfo dump.\n");
+ break;
+ case RTW89_DBG_PORT_SEL_WDE_QUEMGN_PREPKT:
+ info = &dbg_port_wde_quemgn_prepkt;
+ seq_puts(m, "Enable wde quemgn prepkt dump.\n");
+ break;
+ case RTW89_DBG_PORT_SEL_WDE_QUEMGN_NXTPKT:
+ info = &dbg_port_wde_quemgn_nxtpkt;
+ seq_puts(m, "Enable wde quemgn nxtpkt dump.\n");
+ break;
+ case RTW89_DBG_PORT_SEL_WDE_QUEMGN_QLNKTBL:
+ info = &dbg_port_wde_quemgn_qlnktbl;
+ seq_puts(m, "Enable wde quemgn qlnktbl dump.\n");
+ break;
+ case RTW89_DBG_PORT_SEL_WDE_QUEMGN_QEMPTY:
+ info = &dbg_port_wde_quemgn_qempty;
+ seq_puts(m, "Enable wde quemgn qempty dump.\n");
+ break;
+ case RTW89_DBG_PORT_SEL_PLE_BUFMGN_FREEPG:
+ info = &dbg_port_ple_bufmgn_freepg;
+ seq_puts(m, "Enable ple bufmgn freepg dump.\n");
+ break;
+ case RTW89_DBG_PORT_SEL_PLE_BUFMGN_QUOTA:
+ info = &dbg_port_ple_bufmgn_quota;
+ seq_puts(m, "Enable ple bufmgn quota dump.\n");
+ break;
+ case RTW89_DBG_PORT_SEL_PLE_BUFMGN_PAGELLT:
+ info = &dbg_port_ple_bufmgn_pagellt;
+ seq_puts(m, "Enable ple bufmgn pagellt dump.\n");
+ break;
+ case RTW89_DBG_PORT_SEL_PLE_BUFMGN_PKTINFO:
+ info = &dbg_port_ple_bufmgn_pktinfo;
+ seq_puts(m, "Enable ple bufmgn pktinfo dump.\n");
+ break;
+ case RTW89_DBG_PORT_SEL_PLE_QUEMGN_PREPKT:
+ info = &dbg_port_ple_quemgn_prepkt;
+ seq_puts(m, "Enable ple quemgn prepkt dump.\n");
+ break;
+ case RTW89_DBG_PORT_SEL_PLE_QUEMGN_NXTPKT:
+ info = &dbg_port_ple_quemgn_nxtpkt;
+ seq_puts(m, "Enable ple quemgn nxtpkt dump.\n");
+ break;
+ case RTW89_DBG_PORT_SEL_PLE_QUEMGN_QLNKTBL:
+ info = &dbg_port_ple_quemgn_qlnktbl;
+ seq_puts(m, "Enable ple quemgn qlnktbl dump.\n");
+ break;
+ case RTW89_DBG_PORT_SEL_PLE_QUEMGN_QEMPTY:
+ info = &dbg_port_ple_quemgn_qempty;
+ seq_puts(m, "Enable ple quemgn qempty dump.\n");
+ break;
+ case RTW89_DBG_PORT_SEL_PKTINFO:
+ info = &dbg_port_pktinfo;
+ seq_puts(m, "Enable pktinfo dump.\n");
+ break;
+ case RTW89_DBG_PORT_SEL_PCIE_TXDMA:
+ info = &dbg_port_pcie_txdma;
+ val32 = rtw89_read32(rtwdev, R_AX_DBG_CTRL);
+ val32 = u32_replace_bits(val32, PCIE_TXDMA_DBG_SEL, B_AX_DBG_SEL0);
+ val32 = u32_replace_bits(val32, PCIE_TXDMA_DBG_SEL, B_AX_DBG_SEL1);
+ rtw89_write32(rtwdev, R_AX_DBG_CTRL, val32);
+ seq_puts(m, "Enable pcie txdma dump.\n");
+ break;
+ case RTW89_DBG_PORT_SEL_PCIE_RXDMA:
+ info = &dbg_port_pcie_rxdma;
+ val32 = rtw89_read32(rtwdev, R_AX_DBG_CTRL);
+ val32 = u32_replace_bits(val32, PCIE_RXDMA_DBG_SEL, B_AX_DBG_SEL0);
+ val32 = u32_replace_bits(val32, PCIE_RXDMA_DBG_SEL, B_AX_DBG_SEL1);
+ rtw89_write32(rtwdev, R_AX_DBG_CTRL, val32);
+ seq_puts(m, "Enable pcie rxdma dump.\n");
+ break;
+ case RTW89_DBG_PORT_SEL_PCIE_CVT:
+ info = &dbg_port_pcie_cvt;
+ val32 = rtw89_read32(rtwdev, R_AX_DBG_CTRL);
+ val32 = u32_replace_bits(val32, PCIE_CVT_DBG_SEL, B_AX_DBG_SEL0);
+ val32 = u32_replace_bits(val32, PCIE_CVT_DBG_SEL, B_AX_DBG_SEL1);
+ rtw89_write32(rtwdev, R_AX_DBG_CTRL, val32);
+ seq_puts(m, "Enable pcie cvt dump.\n");
+ break;
+ case RTW89_DBG_PORT_SEL_PCIE_CXPL:
+ info = &dbg_port_pcie_cxpl;
+ val32 = rtw89_read32(rtwdev, R_AX_DBG_CTRL);
+ val32 = u32_replace_bits(val32, PCIE_CXPL_DBG_SEL, B_AX_DBG_SEL0);
+ val32 = u32_replace_bits(val32, PCIE_CXPL_DBG_SEL, B_AX_DBG_SEL1);
+ rtw89_write32(rtwdev, R_AX_DBG_CTRL, val32);
+ seq_puts(m, "Enable pcie cxpl dump.\n");
+ break;
+ case RTW89_DBG_PORT_SEL_PCIE_IO:
+ info = &dbg_port_pcie_io;
+ val32 = rtw89_read32(rtwdev, R_AX_DBG_CTRL);
+ val32 = u32_replace_bits(val32, PCIE_IO_DBG_SEL, B_AX_DBG_SEL0);
+ val32 = u32_replace_bits(val32, PCIE_IO_DBG_SEL, B_AX_DBG_SEL1);
+ rtw89_write32(rtwdev, R_AX_DBG_CTRL, val32);
+ seq_puts(m, "Enable pcie io dump.\n");
+ break;
+ case RTW89_DBG_PORT_SEL_PCIE_MISC:
+ info = &dbg_port_pcie_misc;
+ val32 = rtw89_read32(rtwdev, R_AX_DBG_CTRL);
+ val32 = u32_replace_bits(val32, PCIE_MISC_DBG_SEL, B_AX_DBG_SEL0);
+ val32 = u32_replace_bits(val32, PCIE_MISC_DBG_SEL, B_AX_DBG_SEL1);
+ rtw89_write32(rtwdev, R_AX_DBG_CTRL, val32);
+ seq_puts(m, "Enable pcie misc dump.\n");
+ break;
+ case RTW89_DBG_PORT_SEL_PCIE_MISC2:
+ info = &dbg_port_pcie_misc2;
+ val16 = rtw89_read16(rtwdev, R_AX_PCIE_DBG_CTRL);
+ val16 = u16_replace_bits(val16, PCIE_MISC2_DBG_SEL,
+ B_AX_DBG_SEL_MASK);
+ rtw89_write16(rtwdev, R_AX_PCIE_DBG_CTRL, val16);
+ seq_puts(m, "Enable pcie misc2 dump.\n");
+ break;
+ default:
+ seq_puts(m, "Dbg port select err\n");
+ return NULL;
+ }
+
+ return info;
+}
+
+static bool is_dbg_port_valid(struct rtw89_dev *rtwdev, u32 sel)
+{
+ if (rtwdev->hci.type != RTW89_HCI_TYPE_PCIE &&
+ sel >= RTW89_DBG_PORT_SEL_PCIE_TXDMA &&
+ sel <= RTW89_DBG_PORT_SEL_PCIE_MISC2)
+ return false;
+ if (rtwdev->chip->chip_id == RTL8852B &&
+ sel >= RTW89_DBG_PORT_SEL_PTCL_C1 &&
+ sel <= RTW89_DBG_PORT_SEL_TXTF_INFOH_C1)
+ return false;
+ if (rtw89_mac_check_mac_en(rtwdev, 0, RTW89_DMAC_SEL) &&
+ sel >= RTW89_DBG_PORT_SEL_WDE_BUFMGN_FREEPG &&
+ sel <= RTW89_DBG_PORT_SEL_PKTINFO)
+ return false;
+ if (rtw89_mac_check_mac_en(rtwdev, 0, RTW89_CMAC_SEL) &&
+ sel >= RTW89_DBG_PORT_SEL_PTCL_C0 &&
+ sel <= RTW89_DBG_PORT_SEL_TXTF_INFOH_C0)
+ return false;
+ if (rtw89_mac_check_mac_en(rtwdev, 1, RTW89_CMAC_SEL) &&
+ sel >= RTW89_DBG_PORT_SEL_PTCL_C1 &&
+ sel <= RTW89_DBG_PORT_SEL_TXTF_INFOH_C1)
+ return false;
+
+ return true;
+}
+
+static int rtw89_debug_mac_dbg_port_dump(struct rtw89_dev *rtwdev,
+ struct seq_file *m, u32 sel)
+{
+ const struct rtw89_mac_dbg_port_info *info;
+ u8 val8;
+ u16 val16;
+ u32 val32;
+ u32 i;
+
+ info = rtw89_debug_mac_dbg_port_sel(m, rtwdev, sel);
+ if (!info) {
+ rtw89_err(rtwdev, "failed to select debug port %d\n", sel);
+ return -EINVAL;
+ }
+
+#define case_DBG_SEL(__sel) \
+ case RTW89_DBG_PORT_SEL_##__sel: \
+ seq_puts(m, "Dump debug port " #__sel ":\n"); \
+ break
+
+ switch (sel) {
+ case_DBG_SEL(PTCL_C0);
+ case_DBG_SEL(PTCL_C1);
+ case_DBG_SEL(SCH_C0);
+ case_DBG_SEL(SCH_C1);
+ case_DBG_SEL(TMAC_C0);
+ case_DBG_SEL(TMAC_C1);
+ case_DBG_SEL(RMAC_C0);
+ case_DBG_SEL(RMAC_C1);
+ case_DBG_SEL(RMACST_C0);
+ case_DBG_SEL(RMACST_C1);
+ case_DBG_SEL(TRXPTCL_C0);
+ case_DBG_SEL(TRXPTCL_C1);
+ case_DBG_SEL(TX_INFOL_C0);
+ case_DBG_SEL(TX_INFOH_C0);
+ case_DBG_SEL(TX_INFOL_C1);
+ case_DBG_SEL(TX_INFOH_C1);
+ case_DBG_SEL(TXTF_INFOL_C0);
+ case_DBG_SEL(TXTF_INFOH_C0);
+ case_DBG_SEL(TXTF_INFOL_C1);
+ case_DBG_SEL(TXTF_INFOH_C1);
+ case_DBG_SEL(WDE_BUFMGN_FREEPG);
+ case_DBG_SEL(WDE_BUFMGN_QUOTA);
+ case_DBG_SEL(WDE_BUFMGN_PAGELLT);
+ case_DBG_SEL(WDE_BUFMGN_PKTINFO);
+ case_DBG_SEL(WDE_QUEMGN_PREPKT);
+ case_DBG_SEL(WDE_QUEMGN_NXTPKT);
+ case_DBG_SEL(WDE_QUEMGN_QLNKTBL);
+ case_DBG_SEL(WDE_QUEMGN_QEMPTY);
+ case_DBG_SEL(PLE_BUFMGN_FREEPG);
+ case_DBG_SEL(PLE_BUFMGN_QUOTA);
+ case_DBG_SEL(PLE_BUFMGN_PAGELLT);
+ case_DBG_SEL(PLE_BUFMGN_PKTINFO);
+ case_DBG_SEL(PLE_QUEMGN_PREPKT);
+ case_DBG_SEL(PLE_QUEMGN_NXTPKT);
+ case_DBG_SEL(PLE_QUEMGN_QLNKTBL);
+ case_DBG_SEL(PLE_QUEMGN_QEMPTY);
+ case_DBG_SEL(PKTINFO);
+ case_DBG_SEL(PCIE_TXDMA);
+ case_DBG_SEL(PCIE_RXDMA);
+ case_DBG_SEL(PCIE_CVT);
+ case_DBG_SEL(PCIE_CXPL);
+ case_DBG_SEL(PCIE_IO);
+ case_DBG_SEL(PCIE_MISC);
+ case_DBG_SEL(PCIE_MISC2);
+ }
+
+#undef case_DBG_SEL
+
+ seq_printf(m, "Sel addr = 0x%X\n", info->sel_addr);
+ seq_printf(m, "Read addr = 0x%X\n", info->rd_addr);
+
+ for (i = info->srt; i <= info->end; i++) {
+ switch (info->sel_byte) {
+ case 1:
+ default:
+ rtw89_write8_mask(rtwdev, info->sel_addr,
+ info->sel_msk, i);
+ seq_printf(m, "0x%02X: ", i);
+ break;
+ case 2:
+ rtw89_write16_mask(rtwdev, info->sel_addr,
+ info->sel_msk, i);
+ seq_printf(m, "0x%04X: ", i);
+ break;
+ case 4:
+ rtw89_write32_mask(rtwdev, info->sel_addr,
+ info->sel_msk, i);
+ seq_printf(m, "0x%04X: ", i);
+ break;
+ }
+
+ udelay(10);
+
+ switch (info->rd_byte) {
+ case 1:
+ default:
+ val8 = rtw89_read8_mask(rtwdev,
+ info->rd_addr, info->rd_msk);
+ seq_printf(m, "0x%02X\n", val8);
+ break;
+ case 2:
+ val16 = rtw89_read16_mask(rtwdev,
+ info->rd_addr, info->rd_msk);
+ seq_printf(m, "0x%04X\n", val16);
+ break;
+ case 4:
+ val32 = rtw89_read32_mask(rtwdev,
+ info->rd_addr, info->rd_msk);
+ seq_printf(m, "0x%08X\n", val32);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int rtw89_debug_mac_dump_dbg_port(struct rtw89_dev *rtwdev,
+ struct seq_file *m)
+{
+ u32 sel;
+ int ret = 0;
+
+ for (sel = RTW89_DBG_PORT_SEL_PTCL_C0;
+ sel < RTW89_DBG_PORT_SEL_LAST; sel++) {
+ if (!is_dbg_port_valid(rtwdev, sel))
+ continue;
+ ret = rtw89_debug_mac_dbg_port_dump(rtwdev, m, sel);
+ if (ret) {
+ rtw89_err(rtwdev,
+ "failed to dump debug port %d\n", sel);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int
+rtw89_debug_priv_mac_dbg_port_dump_get(struct seq_file *m, void *v)
+{
+ struct rtw89_debugfs_priv *debugfs_priv = m->private;
+ struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
+
+ if (debugfs_priv->dbgpkg_en.ss_dbg)
+ rtw89_debug_mac_dump_ss_dbg(rtwdev, m);
+ if (debugfs_priv->dbgpkg_en.dle_dbg)
+ rtw89_debug_mac_dump_dle_dbg(rtwdev, m);
+ if (debugfs_priv->dbgpkg_en.dmac_dbg)
+ rtw89_debug_mac_dump_dmac_dbg(rtwdev, m);
+ if (debugfs_priv->dbgpkg_en.cmac_dbg)
+ rtw89_debug_mac_dump_cmac_dbg(rtwdev, m);
+ if (debugfs_priv->dbgpkg_en.dbg_port)
+ rtw89_debug_mac_dump_dbg_port(rtwdev, m);
+
+ return 0;
+};
+
+static u8 *rtw89_hex2bin_user(struct rtw89_dev *rtwdev,
+ const char __user *user_buf, size_t count)
+{
+ char *buf;
+ u8 *bin;
+ int num;
+ int err = 0;
+
+ buf = memdup_user(user_buf, count);
+ if (IS_ERR(buf))
+ return buf;
+
+ num = count / 2;
+ bin = kmalloc(num, GFP_KERNEL);
+ if (!bin) {
+ err = -EFAULT;
+ goto out;
+ }
+
+ if (hex2bin(bin, buf, num)) {
+ rtw89_info(rtwdev, "valid format: H1H2H3...\n");
+ kfree(bin);
+ err = -EINVAL;
+ }
+
+out:
+ kfree(buf);
+
+ return err ? ERR_PTR(err) : bin;
+}
+
+static ssize_t rtw89_debug_priv_send_h2c_set(struct file *filp,
+ const char __user *user_buf,
+ size_t count, loff_t *loff)
+{
+ struct rtw89_debugfs_priv *debugfs_priv = filp->private_data;
+ struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
+ u8 *h2c;
+ u16 h2c_len = count / 2;
+
+ h2c = rtw89_hex2bin_user(rtwdev, user_buf, count);
+ if (IS_ERR(h2c))
+ return -EFAULT;
+
+ rtw89_fw_h2c_raw(rtwdev, h2c, h2c_len);
+
+ kfree(h2c);
+
+ return count;
+}
+
+static int
+rtw89_debug_priv_early_h2c_get(struct seq_file *m, void *v)
+{
+ struct rtw89_debugfs_priv *debugfs_priv = m->private;
+ struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
+ struct rtw89_early_h2c *early_h2c;
+ int seq = 0;
+
+ mutex_lock(&rtwdev->mutex);
+ list_for_each_entry(early_h2c, &rtwdev->early_h2c_list, list)
+ seq_printf(m, "%d: %*ph\n", ++seq, early_h2c->h2c_len, early_h2c->h2c);
+ mutex_unlock(&rtwdev->mutex);
+
+ return 0;
+}
+
+static ssize_t
+rtw89_debug_priv_early_h2c_set(struct file *filp, const char __user *user_buf,
+ size_t count, loff_t *loff)
+{
+ struct seq_file *m = (struct seq_file *)filp->private_data;
+ struct rtw89_debugfs_priv *debugfs_priv = m->private;
+ struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
+ struct rtw89_early_h2c *early_h2c;
+ u8 *h2c;
+ u16 h2c_len = count / 2;
+
+ h2c = rtw89_hex2bin_user(rtwdev, user_buf, count);
+ if (IS_ERR(h2c))
+ return -EFAULT;
+
+ if (h2c_len >= 2 && h2c[0] == 0x00 && h2c[1] == 0x00) {
+ kfree(h2c);
+ rtw89_fw_free_all_early_h2c(rtwdev);
+ goto out;
+ }
+
+ early_h2c = kmalloc(sizeof(*early_h2c), GFP_KERNEL);
+ if (!early_h2c) {
+ kfree(h2c);
+ return -EFAULT;
+ }
+
+ early_h2c->h2c = h2c;
+ early_h2c->h2c_len = h2c_len;
+
+ mutex_lock(&rtwdev->mutex);
+ list_add_tail(&early_h2c->list, &rtwdev->early_h2c_list);
+ mutex_unlock(&rtwdev->mutex);
+
+out:
+ return count;
+}
+
+static int rtw89_debug_priv_btc_info_get(struct seq_file *m, void *v)
+{
+ struct rtw89_debugfs_priv *debugfs_priv = m->private;
+ struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
+
+ rtw89_btc_dump_info(rtwdev, m);
+
+ return 0;
+}
+
+static ssize_t rtw89_debug_priv_btc_manual_set(struct file *filp,
+ const char __user *user_buf,
+ size_t count, loff_t *loff)
+{
+ struct rtw89_debugfs_priv *debugfs_priv = filp->private_data;
+ struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
+ struct rtw89_btc *btc = &rtwdev->btc;
+ bool btc_manual;
+
+ if (kstrtobool_from_user(user_buf, count, &btc_manual))
+ goto out;
+
+ btc->ctrl.manual = btc_manual;
+out:
+ return count;
+}
+
+static ssize_t rtw89_debug_fw_log_btc_manual_set(struct file *filp,
+ const char __user *user_buf,
+ size_t count, loff_t *loff)
+{
+ struct rtw89_debugfs_priv *debugfs_priv = filp->private_data;
+ struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
+ struct rtw89_fw_info *fw_info = &rtwdev->fw;
+ bool fw_log_manual;
+
+ if (kstrtobool_from_user(user_buf, count, &fw_log_manual))
+ goto out;
+
+ mutex_lock(&rtwdev->mutex);
+ fw_info->fw_log_enable = fw_log_manual;
+ rtw89_fw_h2c_fw_log(rtwdev, fw_log_manual);
+ mutex_unlock(&rtwdev->mutex);
+out:
+ return count;
+}
+
+static void rtw89_sta_info_get_iter(void *data, struct ieee80211_sta *sta)
+{
+ static const char * const he_gi_str[] = {
+ [NL80211_RATE_INFO_HE_GI_0_8] = "0.8",
+ [NL80211_RATE_INFO_HE_GI_1_6] = "1.6",
+ [NL80211_RATE_INFO_HE_GI_3_2] = "3.2",
+ };
+ struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
+ struct rate_info *rate = &rtwsta->ra_report.txrate;
+ struct ieee80211_rx_status *status = &rtwsta->rx_status;
+ struct seq_file *m = (struct seq_file *)data;
+ u8 rssi;
+
+ seq_printf(m, "TX rate [%d]: ", rtwsta->mac_id);
+
+ if (rate->flags & RATE_INFO_FLAGS_MCS)
+ seq_printf(m, "HT MCS-%d%s", rate->mcs,
+ rate->flags & RATE_INFO_FLAGS_SHORT_GI ? " SGI" : "");
+ else if (rate->flags & RATE_INFO_FLAGS_VHT_MCS)
+ seq_printf(m, "VHT %dSS MCS-%d%s", rate->nss, rate->mcs,
+ rate->flags & RATE_INFO_FLAGS_SHORT_GI ? " SGI" : "");
+ else if (rate->flags & RATE_INFO_FLAGS_HE_MCS)
+ seq_printf(m, "HE %dSS MCS-%d GI:%s", rate->nss, rate->mcs,
+ rate->he_gi <= NL80211_RATE_INFO_HE_GI_3_2 ?
+ he_gi_str[rate->he_gi] : "N/A");
+ else
+ seq_printf(m, "Legacy %d", rate->legacy);
+ seq_printf(m, "\t(hw_rate=0x%x)", rtwsta->ra_report.hw_rate);
+ seq_printf(m, "\t==> agg_wait=%d (%d)\n", rtwsta->max_agg_wait,
+ sta->max_rc_amsdu_len);
+
+ seq_printf(m, "RX rate [%d]: ", rtwsta->mac_id);
+
+ switch (status->encoding) {
+ case RX_ENC_LEGACY:
+ seq_printf(m, "Legacy %d", status->rate_idx +
+ (status->band == NL80211_BAND_5GHZ ? 4 : 0));
+ break;
+ case RX_ENC_HT:
+ seq_printf(m, "HT MCS-%d%s", status->rate_idx,
+ status->enc_flags & RX_ENC_FLAG_SHORT_GI ? " SGI" : "");
+ break;
+ case RX_ENC_VHT:
+ seq_printf(m, "VHT %dSS MCS-%d%s", status->nss, status->rate_idx,
+ status->enc_flags & RX_ENC_FLAG_SHORT_GI ? " SGI" : "");
+ break;
+ case RX_ENC_HE:
+ seq_printf(m, "HE %dSS MCS-%d GI:%s", status->nss, status->rate_idx,
+ status->he_gi <= NL80211_RATE_INFO_HE_GI_3_2 ?
+ he_gi_str[rate->he_gi] : "N/A");
+ break;
+ }
+ seq_printf(m, "\t(hw_rate=0x%x)\n", rtwsta->rx_hw_rate);
+
+ rssi = ewma_rssi_read(&rtwsta->avg_rssi);
+ seq_printf(m, "RSSI: %d dBm (raw=%d, prev=%d)\n",
+ RTW89_RSSI_RAW_TO_DBM(rssi), rssi, rtwsta->prev_rssi);
+}
+
+static void
+rtw89_debug_append_rx_rate(struct seq_file *m, struct rtw89_pkt_stat *pkt_stat,
+ enum rtw89_hw_rate first_rate, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ seq_printf(m, "%s%u", i == 0 ? "" : ", ",
+ pkt_stat->rx_rate_cnt[first_rate + i]);
+}
+
+static const struct rtw89_rx_rate_cnt_info {
+ enum rtw89_hw_rate first_rate;
+ int len;
+ const char *rate_mode;
+} rtw89_rx_rate_cnt_infos[] = {
+ {RTW89_HW_RATE_CCK1, 4, "Legacy:"},
+ {RTW89_HW_RATE_OFDM6, 8, "OFDM:"},
+ {RTW89_HW_RATE_MCS0, 8, "HT 0:"},
+ {RTW89_HW_RATE_MCS8, 8, "HT 1:"},
+ {RTW89_HW_RATE_VHT_NSS1_MCS0, 10, "VHT 1SS:"},
+ {RTW89_HW_RATE_VHT_NSS2_MCS0, 10, "VHT 2SS:"},
+ {RTW89_HW_RATE_HE_NSS1_MCS0, 12, "HE 1SS:"},
+ {RTW89_HW_RATE_HE_NSS2_MCS0, 12, "HE 2ss:"},
+};
+
+static int rtw89_debug_priv_phy_info_get(struct seq_file *m, void *v)
+{
+ struct rtw89_debugfs_priv *debugfs_priv = m->private;
+ struct rtw89_dev *rtwdev = debugfs_priv->rtwdev;
+ struct rtw89_traffic_stats *stats = &rtwdev->stats;
+ struct rtw89_pkt_stat *pkt_stat = &rtwdev->phystat.last_pkt_stat;
+ const struct rtw89_rx_rate_cnt_info *info;
+ int i;
+
+ seq_printf(m, "TP TX: %u [%u] Mbps (lv: %d), RX: %u [%u] Mbps (lv: %d)\n",
+ stats->tx_throughput, stats->tx_throughput_raw, stats->tx_tfc_lv,
+ stats->rx_throughput, stats->rx_throughput_raw, stats->rx_tfc_lv);
+ seq_printf(m, "Beacon: %u\n", pkt_stat->beacon_nr);
+ seq_printf(m, "Avg packet length: TX=%u, RX=%u\n", stats->tx_avg_len,
+ stats->rx_avg_len);
+
+ seq_puts(m, "RX count:\n");
+ for (i = 0; i < ARRAY_SIZE(rtw89_rx_rate_cnt_infos); i++) {
+ info = &rtw89_rx_rate_cnt_infos[i];
+ seq_printf(m, "%10s [", info->rate_mode);
+ rtw89_debug_append_rx_rate(m, pkt_stat,
+ info->first_rate, info->len);
+ seq_puts(m, "]\n");
+ }
+
+ ieee80211_iterate_stations_atomic(rtwdev->hw, rtw89_sta_info_get_iter, m);
+
+ return 0;
+}
+
+static struct rtw89_debugfs_priv rtw89_debug_priv_read_reg = {
+ .cb_read = rtw89_debug_priv_read_reg_get,
+ .cb_write = rtw89_debug_priv_read_reg_select,
+};
+
+static struct rtw89_debugfs_priv rtw89_debug_priv_write_reg = {
+ .cb_write = rtw89_debug_priv_write_reg_set,
+};
+
+static struct rtw89_debugfs_priv rtw89_debug_priv_read_rf = {
+ .cb_read = rtw89_debug_priv_read_rf_get,
+ .cb_write = rtw89_debug_priv_read_rf_select,
+};
+
+static struct rtw89_debugfs_priv rtw89_debug_priv_write_rf = {
+ .cb_write = rtw89_debug_priv_write_rf_set,
+};
+
+static struct rtw89_debugfs_priv rtw89_debug_priv_rf_reg_dump = {
+ .cb_read = rtw89_debug_priv_rf_reg_dump_get,
+};
+
+static struct rtw89_debugfs_priv rtw89_debug_priv_txpwr_table = {
+ .cb_read = rtw89_debug_priv_txpwr_table_get,
+};
+
+static struct rtw89_debugfs_priv rtw89_debug_priv_mac_reg_dump = {
+ .cb_read = rtw89_debug_priv_mac_reg_dump_get,
+ .cb_write = rtw89_debug_priv_mac_reg_dump_select,
+};
+
+static struct rtw89_debugfs_priv rtw89_debug_priv_mac_mem_dump = {
+ .cb_read = rtw89_debug_priv_mac_mem_dump_get,
+ .cb_write = rtw89_debug_priv_mac_mem_dump_select,
+};
+
+static struct rtw89_debugfs_priv rtw89_debug_priv_mac_dbg_port_dump = {
+ .cb_read = rtw89_debug_priv_mac_dbg_port_dump_get,
+ .cb_write = rtw89_debug_priv_mac_dbg_port_dump_select,
+};
+
+static struct rtw89_debugfs_priv rtw89_debug_priv_send_h2c = {
+ .cb_write = rtw89_debug_priv_send_h2c_set,
+};
+
+static struct rtw89_debugfs_priv rtw89_debug_priv_early_h2c = {
+ .cb_read = rtw89_debug_priv_early_h2c_get,
+ .cb_write = rtw89_debug_priv_early_h2c_set,
+};
+
+static struct rtw89_debugfs_priv rtw89_debug_priv_btc_info = {
+ .cb_read = rtw89_debug_priv_btc_info_get,
+};
+
+static struct rtw89_debugfs_priv rtw89_debug_priv_btc_manual = {
+ .cb_write = rtw89_debug_priv_btc_manual_set,
+};
+
+static struct rtw89_debugfs_priv rtw89_debug_priv_fw_log_manual = {
+ .cb_write = rtw89_debug_fw_log_btc_manual_set,
+};
+
+static struct rtw89_debugfs_priv rtw89_debug_priv_phy_info = {
+ .cb_read = rtw89_debug_priv_phy_info_get,
+};
+
+#define rtw89_debugfs_add(name, mode, fopname, parent) \
+ do { \
+ rtw89_debug_priv_ ##name.rtwdev = rtwdev; \
+ if (!debugfs_create_file(#name, mode, \
+ parent, &rtw89_debug_priv_ ##name, \
+ &file_ops_ ##fopname)) \
+ pr_debug("Unable to initialize debugfs:%s\n", #name); \
+ } while (0)
+
+#define rtw89_debugfs_add_w(name) \
+ rtw89_debugfs_add(name, S_IFREG | 0222, single_w, debugfs_topdir)
+#define rtw89_debugfs_add_rw(name) \
+ rtw89_debugfs_add(name, S_IFREG | 0666, common_rw, debugfs_topdir)
+#define rtw89_debugfs_add_r(name) \
+ rtw89_debugfs_add(name, S_IFREG | 0444, single_r, debugfs_topdir)
+
+void rtw89_debugfs_init(struct rtw89_dev *rtwdev)
+{
+ struct dentry *debugfs_topdir;
+
+ debugfs_topdir = debugfs_create_dir("rtw89",
+ rtwdev->hw->wiphy->debugfsdir);
+
+ rtw89_debugfs_add_rw(read_reg);
+ rtw89_debugfs_add_w(write_reg);
+ rtw89_debugfs_add_rw(read_rf);
+ rtw89_debugfs_add_w(write_rf);
+ rtw89_debugfs_add_r(rf_reg_dump);
+ rtw89_debugfs_add_r(txpwr_table);
+ rtw89_debugfs_add_rw(mac_reg_dump);
+ rtw89_debugfs_add_rw(mac_mem_dump);
+ rtw89_debugfs_add_rw(mac_dbg_port_dump);
+ rtw89_debugfs_add_w(send_h2c);
+ rtw89_debugfs_add_rw(early_h2c);
+ rtw89_debugfs_add_r(btc_info);
+ rtw89_debugfs_add_w(btc_manual);
+ rtw89_debugfs_add_w(fw_log_manual);
+ rtw89_debugfs_add_r(phy_info);
+}
+#endif
+
+#ifdef CONFIG_RTW89_DEBUGMSG
+void __rtw89_debug(struct rtw89_dev *rtwdev,
+ enum rtw89_debug_mask mask,
+ const char *fmt, ...)
+{
+ struct va_format vaf = {
+ .fmt = fmt,
+ };
+
+ va_list args;
+
+ va_start(args, fmt);
+ vaf.va = &args;
+
+ if (rtw89_debug_mask & mask)
+ dev_printk(KERN_DEBUG, rtwdev->dev, "%pV", &vaf);
+
+ va_end(args);
+}
+EXPORT_SYMBOL(__rtw89_debug);
+#endif
diff --git a/drivers/net/wireless/realtek/rtw89/debug.h b/drivers/net/wireless/realtek/rtw89/debug.h
new file mode 100644
index 000000000000..f14b726c1a9f
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/debug.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2019-2020 Realtek Corporation
+ */
+
+#ifndef __RTW89_DEBUG_H__
+#define __RTW89_DEBUG_H__
+
+#include "core.h"
+
+enum rtw89_debug_mask {
+ RTW89_DBG_TXRX = BIT(0),
+ RTW89_DBG_RFK = BIT(1),
+ RTW89_DBG_RFK_TRACK = BIT(2),
+ RTW89_DBG_CFO = BIT(3),
+ RTW89_DBG_TSSI = BIT(4),
+ RTW89_DBG_TXPWR = BIT(5),
+ RTW89_DBG_HCI = BIT(6),
+ RTW89_DBG_RA = BIT(7),
+ RTW89_DBG_REGD = BIT(8),
+ RTW89_DBG_PHY_TRACK = BIT(9),
+ RTW89_DBG_DIG = BIT(10),
+ RTW89_DBG_SER = BIT(11),
+ RTW89_DBG_FW = BIT(12),
+ RTW89_DBG_BTC = BIT(13),
+ RTW89_DBG_BF = BIT(14),
+};
+
+enum rtw89_debug_mac_reg_sel {
+ RTW89_DBG_SEL_MAC_00,
+ RTW89_DBG_SEL_MAC_40,
+ RTW89_DBG_SEL_MAC_80,
+ RTW89_DBG_SEL_MAC_C0,
+ RTW89_DBG_SEL_MAC_E0,
+ RTW89_DBG_SEL_BB,
+ RTW89_DBG_SEL_IQK,
+ RTW89_DBG_SEL_RFC,
+};
+
+#ifdef CONFIG_RTW89_DEBUGFS
+void rtw89_debugfs_init(struct rtw89_dev *rtwdev);
+#else
+static inline void rtw89_debugfs_init(struct rtw89_dev *rtwdev) {}
+#endif
+
+#define rtw89_info(rtwdev, a...) dev_info((rtwdev)->dev, ##a)
+#define rtw89_warn(rtwdev, a...) dev_warn((rtwdev)->dev, ##a)
+#define rtw89_err(rtwdev, a...) dev_err((rtwdev)->dev, ##a)
+
+#ifdef CONFIG_RTW89_DEBUGMSG
+extern unsigned int rtw89_debug_mask;
+#define rtw89_debug(rtwdev, a...) __rtw89_debug(rtwdev, ##a)
+
+__printf(3, 4)
+void __rtw89_debug(struct rtw89_dev *rtwdev,
+ enum rtw89_debug_mask mask,
+ const char *fmt, ...);
+static inline void rtw89_hex_dump(struct rtw89_dev *rtwdev,
+ enum rtw89_debug_mask mask,
+ const char *prefix_str,
+ const void *buf, size_t len)
+{
+ if (!(rtw89_debug_mask & mask))
+ return;
+
+ print_hex_dump_bytes(prefix_str, DUMP_PREFIX_OFFSET, buf, len);
+}
+#else
+static inline void rtw89_debug(struct rtw89_dev *rtwdev,
+ enum rtw89_debug_mask mask,
+ const char *fmt, ...) {}
+static inline void rtw89_hex_dump(struct rtw89_dev *rtwdev,
+ enum rtw89_debug_mask mask,
+ const char *prefix_str,
+ const void *buf, size_t len) {}
+#endif
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw89/efuse.c b/drivers/net/wireless/realtek/rtw89/efuse.c
new file mode 100644
index 000000000000..c0b80f3da56c
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/efuse.c
@@ -0,0 +1,188 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2019-2020 Realtek Corporation
+ */
+
+#include "debug.h"
+#include "efuse.h"
+#include "reg.h"
+
+enum rtw89_efuse_bank {
+ RTW89_EFUSE_BANK_WIFI,
+ RTW89_EFUSE_BANK_BT,
+};
+
+static int rtw89_switch_efuse_bank(struct rtw89_dev *rtwdev,
+ enum rtw89_efuse_bank bank)
+{
+ u8 val;
+
+ val = rtw89_read32_mask(rtwdev, R_AX_EFUSE_CTRL_1,
+ B_AX_EF_CELL_SEL_MASK);
+ if (bank == val)
+ return 0;
+
+ rtw89_write32_mask(rtwdev, R_AX_EFUSE_CTRL_1, B_AX_EF_CELL_SEL_MASK,
+ bank);
+
+ val = rtw89_read32_mask(rtwdev, R_AX_EFUSE_CTRL_1,
+ B_AX_EF_CELL_SEL_MASK);
+ if (bank == val)
+ return 0;
+
+ return -EBUSY;
+}
+
+static int rtw89_dump_physical_efuse_map(struct rtw89_dev *rtwdev, u8 *map,
+ u32 dump_addr, u32 dump_size)
+{
+ u32 efuse_ctl;
+ u32 addr;
+ int ret;
+
+ rtw89_switch_efuse_bank(rtwdev, RTW89_EFUSE_BANK_WIFI);
+
+ for (addr = dump_addr; addr < dump_addr + dump_size; addr++) {
+ efuse_ctl = u32_encode_bits(addr, B_AX_EF_ADDR_MASK);
+ rtw89_write32(rtwdev, R_AX_EFUSE_CTRL, efuse_ctl & ~B_AX_EF_RDY);
+
+ ret = read_poll_timeout_atomic(rtw89_read32, efuse_ctl,
+ efuse_ctl & B_AX_EF_RDY, 1, 1000000,
+ true, rtwdev, R_AX_EFUSE_CTRL);
+ if (ret)
+ return -EBUSY;
+
+ *map++ = (u8)(efuse_ctl & 0xff);
+ }
+
+ return 0;
+}
+
+#define invalid_efuse_header(hdr1, hdr2) \
+ ((hdr1) == 0xff || (hdr2) == 0xff)
+#define invalid_efuse_content(word_en, i) \
+ (((word_en) & BIT(i)) != 0x0)
+#define get_efuse_blk_idx(hdr1, hdr2) \
+ ((((hdr2) & 0xf0) >> 4) | (((hdr1) & 0x0f) << 4))
+#define block_idx_to_logical_idx(blk_idx, i) \
+ (((blk_idx) << 3) + ((i) << 1))
+static int rtw89_dump_logical_efuse_map(struct rtw89_dev *rtwdev, u8 *phy_map,
+ u8 *log_map)
+{
+ u32 physical_size = rtwdev->chip->physical_efuse_size;
+ u32 logical_size = rtwdev->chip->logical_efuse_size;
+ u8 sec_ctrl_size = rtwdev->chip->sec_ctrl_efuse_size;
+ u32 phy_idx = sec_ctrl_size;
+ u32 log_idx;
+ u8 hdr1, hdr2;
+ u8 blk_idx;
+ u8 word_en;
+ int i;
+
+ while (phy_idx < physical_size - sec_ctrl_size) {
+ hdr1 = phy_map[phy_idx];
+ hdr2 = phy_map[phy_idx + 1];
+ if (invalid_efuse_header(hdr1, hdr2))
+ break;
+
+ blk_idx = get_efuse_blk_idx(hdr1, hdr2);
+ word_en = hdr2 & 0xf;
+ phy_idx += 2;
+
+ for (i = 0; i < 4; i++) {
+ if (invalid_efuse_content(word_en, i))
+ continue;
+
+ log_idx = block_idx_to_logical_idx(blk_idx, i);
+ if (phy_idx + 1 > physical_size - sec_ctrl_size - 1 ||
+ log_idx + 1 > logical_size)
+ return -EINVAL;
+
+ log_map[log_idx] = phy_map[phy_idx];
+ log_map[log_idx + 1] = phy_map[phy_idx + 1];
+ phy_idx += 2;
+ }
+ }
+ return 0;
+}
+
+int rtw89_parse_efuse_map(struct rtw89_dev *rtwdev)
+{
+ u32 phy_size = rtwdev->chip->physical_efuse_size;
+ u32 log_size = rtwdev->chip->logical_efuse_size;
+ u8 *phy_map = NULL;
+ u8 *log_map = NULL;
+ int ret;
+
+ if (rtw89_read16(rtwdev, R_AX_SYS_WL_EFUSE_CTRL) & B_AX_AUTOLOAD_SUS)
+ rtwdev->efuse.valid = true;
+ else
+ rtw89_warn(rtwdev, "failed to check efuse autoload\n");
+
+ phy_map = kmalloc(phy_size, GFP_KERNEL);
+ log_map = kmalloc(log_size, GFP_KERNEL);
+
+ if (!phy_map || !log_map) {
+ ret = -ENOMEM;
+ goto out_free;
+ }
+
+ ret = rtw89_dump_physical_efuse_map(rtwdev, phy_map, 0, phy_size);
+ if (ret) {
+ rtw89_warn(rtwdev, "failed to dump efuse physical map\n");
+ goto out_free;
+ }
+
+ memset(log_map, 0xff, log_size);
+ ret = rtw89_dump_logical_efuse_map(rtwdev, phy_map, log_map);
+ if (ret) {
+ rtw89_warn(rtwdev, "failed to dump efuse logical map\n");
+ goto out_free;
+ }
+
+ rtw89_hex_dump(rtwdev, RTW89_DBG_FW, "log_map: ", log_map, log_size);
+
+ ret = rtwdev->chip->ops->read_efuse(rtwdev, log_map);
+ if (ret) {
+ rtw89_warn(rtwdev, "failed to read efuse map\n");
+ goto out_free;
+ }
+
+out_free:
+ kfree(log_map);
+ kfree(phy_map);
+
+ return ret;
+}
+
+int rtw89_parse_phycap_map(struct rtw89_dev *rtwdev)
+{
+ u32 phycap_addr = rtwdev->chip->phycap_addr;
+ u32 phycap_size = rtwdev->chip->phycap_size;
+ u8 *phycap_map = NULL;
+ int ret = 0;
+
+ if (!phycap_size)
+ return 0;
+
+ phycap_map = kmalloc(phycap_size, GFP_KERNEL);
+ if (!phycap_map)
+ return -ENOMEM;
+
+ ret = rtw89_dump_physical_efuse_map(rtwdev, phycap_map,
+ phycap_addr, phycap_size);
+ if (ret) {
+ rtw89_warn(rtwdev, "failed to dump phycap map\n");
+ goto out_free;
+ }
+
+ ret = rtwdev->chip->ops->read_phycap(rtwdev, phycap_map);
+ if (ret) {
+ rtw89_warn(rtwdev, "failed to read phycap map\n");
+ goto out_free;
+ }
+
+out_free:
+ kfree(phycap_map);
+
+ return ret;
+}
diff --git a/drivers/net/wireless/realtek/rtw89/efuse.h b/drivers/net/wireless/realtek/rtw89/efuse.h
new file mode 100644
index 000000000000..622ff95e7476
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/efuse.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2019-2020 Realtek Corporation
+ */
+
+#ifndef __RTW89_EFUSE_H__
+#define __RTW89_EFUSE_H__
+
+#include "core.h"
+
+int rtw89_parse_efuse_map(struct rtw89_dev *rtwdev);
+int rtw89_parse_phycap_map(struct rtw89_dev *rtwdev);
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c
new file mode 100644
index 000000000000..212aaf577d3c
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/fw.c
@@ -0,0 +1,1641 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2019-2020 Realtek Corporation
+ */
+
+#include "cam.h"
+#include "coex.h"
+#include "debug.h"
+#include "fw.h"
+#include "mac.h"
+#include "phy.h"
+#include "reg.h"
+
+static struct sk_buff *rtw89_fw_h2c_alloc_skb(u32 len, bool header)
+{
+ struct sk_buff *skb;
+ u32 header_len = 0;
+
+ if (header)
+ header_len = H2C_HEADER_LEN;
+
+ skb = dev_alloc_skb(len + header_len + 24);
+ if (!skb)
+ return NULL;
+ skb_reserve(skb, header_len + 24);
+ memset(skb->data, 0, len);
+
+ return skb;
+}
+
+struct sk_buff *rtw89_fw_h2c_alloc_skb_with_hdr(u32 len)
+{
+ return rtw89_fw_h2c_alloc_skb(len, true);
+}
+
+struct sk_buff *rtw89_fw_h2c_alloc_skb_no_hdr(u32 len)
+{
+ return rtw89_fw_h2c_alloc_skb(len, false);
+}
+
+static u8 _fw_get_rdy(struct rtw89_dev *rtwdev)
+{
+ u8 val = rtw89_read8(rtwdev, R_AX_WCPU_FW_CTRL);
+
+ return FIELD_GET(B_AX_WCPU_FWDL_STS_MASK, val);
+}
+
+#define FWDL_WAIT_CNT 400000
+int rtw89_fw_check_rdy(struct rtw89_dev *rtwdev)
+{
+ u8 val;
+ int ret;
+
+ ret = read_poll_timeout_atomic(_fw_get_rdy, val,
+ val == RTW89_FWDL_WCPU_FW_INIT_RDY,
+ 1, FWDL_WAIT_CNT, false, rtwdev);
+ if (ret) {
+ switch (val) {
+ case RTW89_FWDL_CHECKSUM_FAIL:
+ rtw89_err(rtwdev, "fw checksum fail\n");
+ return -EINVAL;
+
+ case RTW89_FWDL_SECURITY_FAIL:
+ rtw89_err(rtwdev, "fw security fail\n");
+ return -EINVAL;
+
+ case RTW89_FWDL_CV_NOT_MATCH:
+ rtw89_err(rtwdev, "fw cv not match\n");
+ return -EINVAL;
+
+ default:
+ return -EBUSY;
+ }
+ }
+
+ set_bit(RTW89_FLAG_FW_RDY, rtwdev->flags);
+
+ return 0;
+}
+
+static int rtw89_fw_hdr_parser(struct rtw89_dev *rtwdev, const u8 *fw, u32 len,
+ struct rtw89_fw_bin_info *info)
+{
+ struct rtw89_fw_hdr_section_info *section_info;
+ const u8 *fw_end = fw + len;
+ const u8 *bin;
+ u32 i;
+
+ if (!info)
+ return -EINVAL;
+
+ info->section_num = GET_FW_HDR_SEC_NUM(fw);
+ info->hdr_len = RTW89_FW_HDR_SIZE +
+ info->section_num * RTW89_FW_SECTION_HDR_SIZE;
+ SET_FW_HDR_PART_SIZE(fw, FWDL_SECTION_PER_PKT_LEN);
+
+ bin = fw + info->hdr_len;
+
+ /* jump to section header */
+ fw += RTW89_FW_HDR_SIZE;
+ section_info = info->section_info;
+ for (i = 0; i < info->section_num; i++) {
+ section_info->len = GET_FWSECTION_HDR_SEC_SIZE(fw);
+ if (GET_FWSECTION_HDR_CHECKSUM(fw))
+ section_info->len += FWDL_SECTION_CHKSUM_LEN;
+ section_info->redl = GET_FWSECTION_HDR_REDL(fw);
+ section_info->dladdr =
+ GET_FWSECTION_HDR_DL_ADDR(fw) & 0x1fffffff;
+ section_info->addr = bin;
+ bin += section_info->len;
+ fw += RTW89_FW_SECTION_HDR_SIZE;
+ section_info++;
+ }
+
+ if (fw_end != bin) {
+ rtw89_err(rtwdev, "[ERR]fw bin size\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static
+int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type,
+ struct rtw89_fw_suit *fw_suit)
+{
+ struct rtw89_fw_info *fw_info = &rtwdev->fw;
+ const u8 *mfw = fw_info->firmware->data;
+ u32 mfw_len = fw_info->firmware->size;
+ const struct rtw89_mfw_hdr *mfw_hdr = (const struct rtw89_mfw_hdr *)mfw;
+ const struct rtw89_mfw_info *mfw_info;
+ int i;
+
+ if (mfw_hdr->sig != RTW89_MFW_SIG) {
+ rtw89_debug(rtwdev, RTW89_DBG_FW, "use legacy firmware\n");
+ /* legacy firmware support normal type only */
+ if (type != RTW89_FW_NORMAL)
+ return -EINVAL;
+ fw_suit->data = mfw;
+ fw_suit->size = mfw_len;
+ return 0;
+ }
+
+ for (i = 0; i < mfw_hdr->fw_nr; i++) {
+ mfw_info = &mfw_hdr->info[i];
+ if (mfw_info->cv != rtwdev->hal.cv ||
+ mfw_info->type != type ||
+ mfw_info->mp)
+ continue;
+
+ fw_suit->data = mfw + le32_to_cpu(mfw_info->shift);
+ fw_suit->size = le32_to_cpu(mfw_info->size);
+ return 0;
+ }
+
+ rtw89_err(rtwdev, "no suitable firmware found\n");
+ return -ENOENT;
+}
+
+static void rtw89_fw_update_ver(struct rtw89_dev *rtwdev,
+ enum rtw89_fw_type type,
+ struct rtw89_fw_suit *fw_suit)
+{
+ const u8 *hdr = fw_suit->data;
+
+ fw_suit->major_ver = GET_FW_HDR_MAJOR_VERSION(hdr);
+ fw_suit->minor_ver = GET_FW_HDR_MINOR_VERSION(hdr);
+ fw_suit->sub_ver = GET_FW_HDR_SUBVERSION(hdr);
+ fw_suit->sub_idex = GET_FW_HDR_SUBINDEX(hdr);
+ fw_suit->build_year = GET_FW_HDR_YEAR(hdr);
+ fw_suit->build_mon = GET_FW_HDR_MONTH(hdr);
+ fw_suit->build_date = GET_FW_HDR_DATE(hdr);
+ fw_suit->build_hour = GET_FW_HDR_HOUR(hdr);
+ fw_suit->build_min = GET_FW_HDR_MIN(hdr);
+ fw_suit->cmd_ver = GET_FW_HDR_CMD_VERSERION(hdr);
+
+ rtw89_info(rtwdev,
+ "Firmware version %u.%u.%u.%u, cmd version %u, type %u\n",
+ fw_suit->major_ver, fw_suit->minor_ver, fw_suit->sub_ver,
+ fw_suit->sub_idex, fw_suit->cmd_ver, type);
+}
+
+static
+int __rtw89_fw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type)
+{
+ struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type);
+ int ret;
+
+ ret = rtw89_mfw_recognize(rtwdev, type, fw_suit);
+ if (ret)
+ return ret;
+
+ rtw89_fw_update_ver(rtwdev, type, fw_suit);
+
+ return 0;
+}
+
+static void rtw89_fw_recognize_features(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, RTW89_FW_NORMAL);
+
+ if (chip->chip_id == RTL8852A &&
+ RTW89_FW_SUIT_VER_CODE(fw_suit) <= RTW89_FW_VER_CODE(0, 13, 29, 0))
+ rtwdev->fw.old_ht_ra_format = true;
+}
+
+int rtw89_fw_recognize(struct rtw89_dev *rtwdev)
+{
+ int ret;
+
+ ret = __rtw89_fw_recognize(rtwdev, RTW89_FW_NORMAL);
+ if (ret)
+ return ret;
+
+ /* It still works if wowlan firmware isn't existing. */
+ __rtw89_fw_recognize(rtwdev, RTW89_FW_WOWLAN);
+
+ rtw89_fw_recognize_features(rtwdev);
+
+ return 0;
+}
+
+void rtw89_h2c_pkt_set_hdr(struct rtw89_dev *rtwdev, struct sk_buff *skb,
+ u8 type, u8 cat, u8 class, u8 func,
+ bool rack, bool dack, u32 len)
+{
+ struct fwcmd_hdr *hdr;
+
+ hdr = (struct fwcmd_hdr *)skb_push(skb, 8);
+
+ if (!(rtwdev->fw.h2c_seq % 4))
+ rack = true;
+ hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) |
+ FIELD_PREP(H2C_HDR_CAT, cat) |
+ FIELD_PREP(H2C_HDR_CLASS, class) |
+ FIELD_PREP(H2C_HDR_FUNC, func) |
+ FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq));
+
+ hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN,
+ len + H2C_HEADER_LEN) |
+ (rack ? H2C_HDR_REC_ACK : 0) |
+ (dack ? H2C_HDR_DONE_ACK : 0));
+
+ rtwdev->fw.h2c_seq++;
+}
+
+static void rtw89_h2c_pkt_set_hdr_fwdl(struct rtw89_dev *rtwdev,
+ struct sk_buff *skb,
+ u8 type, u8 cat, u8 class, u8 func,
+ u32 len)
+{
+ struct fwcmd_hdr *hdr;
+
+ hdr = (struct fwcmd_hdr *)skb_push(skb, 8);
+
+ hdr->hdr0 = cpu_to_le32(FIELD_PREP(H2C_HDR_DEL_TYPE, type) |
+ FIELD_PREP(H2C_HDR_CAT, cat) |
+ FIELD_PREP(H2C_HDR_CLASS, class) |
+ FIELD_PREP(H2C_HDR_FUNC, func) |
+ FIELD_PREP(H2C_HDR_H2C_SEQ, rtwdev->fw.h2c_seq));
+
+ hdr->hdr1 = cpu_to_le32(FIELD_PREP(H2C_HDR_TOTAL_LEN,
+ len + H2C_HEADER_LEN));
+}
+
+static int __rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, const u8 *fw, u32 len)
+{
+ struct sk_buff *skb;
+ u32 ret = 0;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for fw hdr dl\n");
+ return -ENOMEM;
+ }
+
+ skb_put_data(skb, fw, len);
+ rtw89_h2c_pkt_set_hdr_fwdl(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC, H2C_CL_MAC_FWDL,
+ H2C_FUNC_MAC_FWHDR_DL, len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, false);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ ret = -1;
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
+static int rtw89_fw_download_hdr(struct rtw89_dev *rtwdev, const u8 *fw, u32 len)
+{
+ u8 val;
+ int ret;
+
+ ret = __rtw89_fw_download_hdr(rtwdev, fw, len);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]FW header download\n");
+ return ret;
+ }
+
+ ret = read_poll_timeout_atomic(rtw89_read8, val, val & B_AX_FWDL_PATH_RDY,
+ 1, FWDL_WAIT_CNT, false,
+ rtwdev, R_AX_WCPU_FW_CTRL);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]FWDL path ready\n");
+ return ret;
+ }
+
+ rtw89_write32(rtwdev, R_AX_HALT_H2C_CTRL, 0);
+ rtw89_write32(rtwdev, R_AX_HALT_C2H_CTRL, 0);
+
+ return 0;
+}
+
+static int __rtw89_fw_download_main(struct rtw89_dev *rtwdev,
+ struct rtw89_fw_hdr_section_info *info)
+{
+ struct sk_buff *skb;
+ const u8 *section = info->addr;
+ u32 residue_len = info->len;
+ u32 pkt_len;
+ int ret;
+
+ while (residue_len) {
+ if (residue_len >= FWDL_SECTION_PER_PKT_LEN)
+ pkt_len = FWDL_SECTION_PER_PKT_LEN;
+ else
+ pkt_len = residue_len;
+
+ skb = rtw89_fw_h2c_alloc_skb_no_hdr(pkt_len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
+ return -ENOMEM;
+ }
+ skb_put_data(skb, section, pkt_len);
+
+ ret = rtw89_h2c_tx(rtwdev, skb, true);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ ret = -1;
+ goto fail;
+ }
+
+ section += pkt_len;
+ residue_len -= pkt_len;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return ret;
+}
+
+static int rtw89_fw_download_main(struct rtw89_dev *rtwdev, const u8 *fw,
+ struct rtw89_fw_bin_info *info)
+{
+ struct rtw89_fw_hdr_section_info *section_info = info->section_info;
+ u8 section_num = info->section_num;
+ int ret;
+
+ while (section_num--) {
+ ret = __rtw89_fw_download_main(rtwdev, section_info);
+ if (ret)
+ return ret;
+ section_info++;
+ }
+
+ mdelay(5);
+
+ ret = rtw89_fw_check_rdy(rtwdev);
+ if (ret) {
+ rtw89_warn(rtwdev, "download firmware fail\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void rtw89_fw_prog_cnt_dump(struct rtw89_dev *rtwdev)
+{
+ u32 val32;
+ u16 index;
+
+ rtw89_write32(rtwdev, R_AX_DBG_CTRL,
+ FIELD_PREP(B_AX_DBG_SEL0, FW_PROG_CNTR_DBG_SEL) |
+ FIELD_PREP(B_AX_DBG_SEL1, FW_PROG_CNTR_DBG_SEL));
+ rtw89_write32_mask(rtwdev, R_AX_SYS_STATUS1, B_AX_SEL_0XC0_MASK, MAC_DBG_SEL);
+
+ for (index = 0; index < 15; index++) {
+ val32 = rtw89_read32(rtwdev, R_AX_DBG_PORT_SEL);
+ rtw89_err(rtwdev, "[ERR]fw PC = 0x%x\n", val32);
+ fsleep(10);
+ }
+}
+
+static void rtw89_fw_dl_fail_dump(struct rtw89_dev *rtwdev)
+{
+ u32 val32;
+ u16 val16;
+
+ val32 = rtw89_read32(rtwdev, R_AX_WCPU_FW_CTRL);
+ rtw89_err(rtwdev, "[ERR]fwdl 0x1E0 = 0x%x\n", val32);
+
+ val16 = rtw89_read16(rtwdev, R_AX_BOOT_DBG + 2);
+ rtw89_err(rtwdev, "[ERR]fwdl 0x83F2 = 0x%x\n", val16);
+
+ rtw89_fw_prog_cnt_dump(rtwdev);
+}
+
+int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type)
+{
+ struct rtw89_fw_info *fw_info = &rtwdev->fw;
+ struct rtw89_fw_suit *fw_suit = rtw89_fw_suit_get(rtwdev, type);
+ struct rtw89_fw_bin_info info;
+ const u8 *fw = fw_suit->data;
+ u32 len = fw_suit->size;
+ u8 val;
+ int ret;
+
+ if (!fw || !len) {
+ rtw89_err(rtwdev, "fw type %d isn't recognized\n", type);
+ return -ENOENT;
+ }
+
+ ret = rtw89_fw_hdr_parser(rtwdev, fw, len, &info);
+ if (ret) {
+ rtw89_err(rtwdev, "parse fw header fail\n");
+ goto fwdl_err;
+ }
+
+ ret = read_poll_timeout_atomic(rtw89_read8, val, val & B_AX_H2C_PATH_RDY,
+ 1, FWDL_WAIT_CNT, false,
+ rtwdev, R_AX_WCPU_FW_CTRL);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]H2C path ready\n");
+ goto fwdl_err;
+ }
+
+ ret = rtw89_fw_download_hdr(rtwdev, fw, info.hdr_len);
+ if (ret) {
+ ret = -EBUSY;
+ goto fwdl_err;
+ }
+
+ ret = rtw89_fw_download_main(rtwdev, fw, &info);
+ if (ret) {
+ ret = -EBUSY;
+ goto fwdl_err;
+ }
+
+ fw_info->h2c_seq = 0;
+ fw_info->rec_seq = 0;
+ rtwdev->mac.rpwm_seq_num = RPWM_SEQ_NUM_MAX;
+ rtwdev->mac.cpwm_seq_num = CPWM_SEQ_NUM_MAX;
+
+ return ret;
+
+fwdl_err:
+ rtw89_fw_dl_fail_dump(rtwdev);
+ return ret;
+}
+
+int rtw89_wait_firmware_completion(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_fw_info *fw = &rtwdev->fw;
+
+ wait_for_completion(&fw->completion);
+ if (!fw->firmware)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void rtw89_load_firmware_cb(const struct firmware *firmware, void *context)
+{
+ struct rtw89_fw_info *fw = context;
+ struct rtw89_dev *rtwdev = fw->rtwdev;
+
+ if (!firmware || !firmware->data) {
+ rtw89_err(rtwdev, "failed to request firmware\n");
+ complete_all(&fw->completion);
+ return;
+ }
+
+ fw->firmware = firmware;
+ complete_all(&fw->completion);
+}
+
+int rtw89_load_firmware(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_fw_info *fw = &rtwdev->fw;
+ const char *fw_name = rtwdev->chip->fw_name;
+ int ret;
+
+ fw->rtwdev = rtwdev;
+ init_completion(&fw->completion);
+
+ ret = request_firmware_nowait(THIS_MODULE, true, fw_name, rtwdev->dev,
+ GFP_KERNEL, fw, rtw89_load_firmware_cb);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to async firmware request\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+void rtw89_unload_firmware(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_fw_info *fw = &rtwdev->fw;
+
+ rtw89_wait_firmware_completion(rtwdev);
+
+ if (fw->firmware)
+ release_firmware(fw->firmware);
+}
+
+#define H2C_CAM_LEN 60
+int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
+{
+ struct sk_buff *skb;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_CAM_LEN);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, H2C_CAM_LEN);
+ rtw89_cam_fill_addr_cam_info(rtwdev, rtwvif, skb->data);
+ rtw89_cam_fill_bssid_cam_info(rtwdev, rtwvif, skb->data);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_MAC_ADDR_CAM_UPDATE,
+ H2C_FUNC_MAC_ADDR_CAM_UPD, 0, 1,
+ H2C_CAM_LEN);
+
+ if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return -EBUSY;
+}
+
+#define H2C_BA_CAM_LEN 4
+int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, bool valid, u8 macid,
+ struct ieee80211_ampdu_params *params)
+{
+ struct sk_buff *skb;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_BA_CAM_LEN);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c ba cam\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, H2C_BA_CAM_LEN);
+ SET_BA_CAM_MACID(skb->data, macid);
+ if (!valid)
+ goto end;
+ SET_BA_CAM_VALID(skb->data, valid);
+ SET_BA_CAM_TID(skb->data, params->tid);
+ if (params->buf_size > 64)
+ SET_BA_CAM_BMAP_SIZE(skb->data, 4);
+ else
+ SET_BA_CAM_BMAP_SIZE(skb->data, 0);
+ /* If init req is set, hw will set the ssn */
+ SET_BA_CAM_INIT_REQ(skb->data, 0);
+ SET_BA_CAM_SSN(skb->data, params->ssn);
+
+end:
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_BA_CAM,
+ H2C_FUNC_MAC_BA_CAM, 0, 1,
+ H2C_BA_CAM_LEN);
+
+ if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return -EBUSY;
+}
+
+#define H2C_LOG_CFG_LEN 12
+int rtw89_fw_h2c_fw_log(struct rtw89_dev *rtwdev, bool enable)
+{
+ struct sk_buff *skb;
+ u32 comp = enable ? BIT(RTW89_FW_LOG_COMP_INIT) | BIT(RTW89_FW_LOG_COMP_TASK) |
+ BIT(RTW89_FW_LOG_COMP_PS) | BIT(RTW89_FW_LOG_COMP_ERROR) : 0;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_LOG_CFG_LEN);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for fw log cfg\n");
+ return -ENOMEM;
+ }
+
+ skb_put(skb, H2C_LOG_CFG_LEN);
+ SET_LOG_CFG_LEVEL(skb->data, RTW89_FW_LOG_LEVEL_SER);
+ SET_LOG_CFG_PATH(skb->data, BIT(RTW89_FW_LOG_LEVEL_C2H));
+ SET_LOG_CFG_COMP(skb->data, comp);
+ SET_LOG_CFG_COMP_EXT(skb->data, 0);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_FW_INFO,
+ H2C_FUNC_LOG_CFG, 0, 0,
+ H2C_LOG_CFG_LEN);
+
+ if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return -EBUSY;
+}
+
+#define H2C_GENERAL_PKT_LEN 6
+#define H2C_GENERAL_PKT_ID_UND 0xff
+int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev, u8 macid)
+{
+ struct sk_buff *skb;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_GENERAL_PKT_LEN);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, H2C_GENERAL_PKT_LEN);
+ SET_GENERAL_PKT_MACID(skb->data, macid);
+ SET_GENERAL_PKT_PROBRSP_ID(skb->data, H2C_GENERAL_PKT_ID_UND);
+ SET_GENERAL_PKT_PSPOLL_ID(skb->data, H2C_GENERAL_PKT_ID_UND);
+ SET_GENERAL_PKT_NULL_ID(skb->data, H2C_GENERAL_PKT_ID_UND);
+ SET_GENERAL_PKT_QOS_NULL_ID(skb->data, H2C_GENERAL_PKT_ID_UND);
+ SET_GENERAL_PKT_CTS2SELF_ID(skb->data, H2C_GENERAL_PKT_ID_UND);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_FW_INFO,
+ H2C_FUNC_MAC_GENERAL_PKT, 0, 1,
+ H2C_GENERAL_PKT_LEN);
+
+ if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return -EBUSY;
+}
+
+#define H2C_LPS_PARM_LEN 8
+int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev,
+ struct rtw89_lps_parm *lps_param)
+{
+ struct sk_buff *skb;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_LPS_PARM_LEN);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, H2C_LPS_PARM_LEN);
+
+ SET_LPS_PARM_MACID(skb->data, lps_param->macid);
+ SET_LPS_PARM_PSMODE(skb->data, lps_param->psmode);
+ SET_LPS_PARM_LASTRPWM(skb->data, lps_param->lastrpwm);
+ SET_LPS_PARM_RLBM(skb->data, 1);
+ SET_LPS_PARM_SMARTPS(skb->data, 1);
+ SET_LPS_PARM_AWAKEINTERVAL(skb->data, 1);
+ SET_LPS_PARM_VOUAPSD(skb->data, 0);
+ SET_LPS_PARM_VIUAPSD(skb->data, 0);
+ SET_LPS_PARM_BEUAPSD(skb->data, 0);
+ SET_LPS_PARM_BKUAPSD(skb->data, 0);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC,
+ H2C_CL_MAC_PS,
+ H2C_FUNC_MAC_LPS_PARM, 0, 1,
+ H2C_LPS_PARM_LEN);
+
+ if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return -EBUSY;
+}
+
+#define H2C_CMC_TBL_LEN 68
+int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev, u8 macid)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+ struct sk_buff *skb;
+ u8 ntx_path = hal->antenna_tx ? hal->antenna_tx : RF_B;
+ u8 map_b = hal->antenna_tx == RF_AB ? 1 : 0;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_CMC_TBL_LEN);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, H2C_CMC_TBL_LEN);
+ SET_CTRL_INFO_MACID(skb->data, macid);
+ SET_CTRL_INFO_OPERATION(skb->data, 1);
+ SET_CMC_TBL_TXPWR_MODE(skb->data, 0);
+ SET_CMC_TBL_NTX_PATH_EN(skb->data, ntx_path);
+ SET_CMC_TBL_PATH_MAP_A(skb->data, 0);
+ SET_CMC_TBL_PATH_MAP_B(skb->data, map_b);
+ SET_CMC_TBL_PATH_MAP_C(skb->data, 0);
+ SET_CMC_TBL_PATH_MAP_D(skb->data, 0);
+ SET_CMC_TBL_ANTSEL_A(skb->data, 0);
+ SET_CMC_TBL_ANTSEL_B(skb->data, 0);
+ SET_CMC_TBL_ANTSEL_C(skb->data, 0);
+ SET_CMC_TBL_ANTSEL_D(skb->data, 0);
+ SET_CMC_TBL_DOPPLER_CTRL(skb->data, 0);
+ SET_CMC_TBL_TXPWR_TOLERENCE(skb->data, 0);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
+ H2C_FUNC_MAC_CCTLINFO_UD, 0, 1,
+ H2C_CMC_TBL_LEN);
+
+ if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return -EBUSY;
+}
+
+static void __get_sta_he_pkt_padding(struct rtw89_dev *rtwdev,
+ struct ieee80211_sta *sta, u8 *pads)
+{
+ bool ppe_th;
+ u8 ppe16, ppe8;
+ u8 nss = min(sta->rx_nss, rtwdev->hal.tx_nss) - 1;
+ u8 ppe_thres_hdr = sta->he_cap.ppe_thres[0];
+ u8 ru_bitmap;
+ u8 n, idx, sh;
+ u16 ppe;
+ int i;
+
+ if (!sta->he_cap.has_he)
+ return;
+
+ ppe_th = FIELD_GET(IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT,
+ sta->he_cap.he_cap_elem.phy_cap_info[6]);
+ if (!ppe_th) {
+ u8 pad;
+
+ pad = FIELD_GET(IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_MASK,
+ sta->he_cap.he_cap_elem.phy_cap_info[9]);
+
+ for (i = 0; i < RTW89_PPE_BW_NUM; i++)
+ pads[i] = pad;
+ }
+
+ ru_bitmap = FIELD_GET(IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK, ppe_thres_hdr);
+ n = hweight8(ru_bitmap);
+ n = 7 + (n * IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2) * nss;
+
+ for (i = 0; i < RTW89_PPE_BW_NUM; i++) {
+ if (!(ru_bitmap & BIT(i))) {
+ pads[i] = 1;
+ continue;
+ }
+
+ idx = n >> 3;
+ sh = n & 7;
+ n += IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2;
+
+ ppe = le16_to_cpu(*((__le16 *)&sta->he_cap.ppe_thres[idx]));
+ ppe16 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK;
+ sh += IEEE80211_PPE_THRES_INFO_PPET_SIZE;
+ ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK;
+
+ if (ppe16 != 7 && ppe8 == 7)
+ pads[i] = 2;
+ else if (ppe8 != 7)
+ pads[i] = 1;
+ else
+ pads[i] = 0;
+ }
+}
+
+int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+ struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ struct sk_buff *skb;
+ u8 pads[RTW89_PPE_BW_NUM];
+
+ memset(pads, 0, sizeof(pads));
+ __get_sta_he_pkt_padding(rtwdev, sta, pads);
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_CMC_TBL_LEN);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, H2C_CMC_TBL_LEN);
+ SET_CTRL_INFO_MACID(skb->data, rtwsta->mac_id);
+ SET_CTRL_INFO_OPERATION(skb->data, 1);
+ SET_CMC_TBL_DISRTSFB(skb->data, 1);
+ SET_CMC_TBL_DISDATAFB(skb->data, 1);
+ if (hal->current_band_type == RTW89_BAND_2G)
+ SET_CMC_TBL_RTS_RTY_LOWEST_RATE(skb->data, RTW89_HW_RATE_CCK1);
+ else
+ SET_CMC_TBL_RTS_RTY_LOWEST_RATE(skb->data, RTW89_HW_RATE_OFDM6);
+ SET_CMC_TBL_RTS_TXCNT_LMT_SEL(skb->data, 0);
+ SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 0);
+ if (vif->type == NL80211_IFTYPE_STATION)
+ SET_CMC_TBL_ULDL(skb->data, 1);
+ else
+ SET_CMC_TBL_ULDL(skb->data, 0);
+ SET_CMC_TBL_MULTI_PORT_ID(skb->data, rtwvif->port);
+ SET_CMC_TBL_NOMINAL_PKT_PADDING(skb->data, pads[RTW89_CHANNEL_WIDTH_20]);
+ SET_CMC_TBL_NOMINAL_PKT_PADDING40(skb->data, pads[RTW89_CHANNEL_WIDTH_40]);
+ SET_CMC_TBL_NOMINAL_PKT_PADDING80(skb->data, pads[RTW89_CHANNEL_WIDTH_80]);
+ SET_CMC_TBL_BSR_QUEUE_SIZE_FORMAT(skb->data, sta->he_cap.has_he);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
+ H2C_FUNC_MAC_CCTLINFO_UD, 0, 1,
+ H2C_CMC_TBL_LEN);
+
+ if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return -EBUSY;
+}
+
+int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev,
+ struct rtw89_sta *rtwsta)
+{
+ struct sk_buff *skb;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_CMC_TBL_LEN);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for fw dl\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, H2C_CMC_TBL_LEN);
+ SET_CTRL_INFO_MACID(skb->data, rtwsta->mac_id);
+ SET_CTRL_INFO_OPERATION(skb->data, 1);
+ if (rtwsta->cctl_tx_time) {
+ SET_CMC_TBL_AMPDU_TIME_SEL(skb->data, 1);
+ SET_CMC_TBL_AMPDU_MAX_TIME(skb->data, rtwsta->ampdu_max_time);
+ }
+ if (rtwsta->cctl_tx_retry_limit) {
+ SET_CMC_TBL_DATA_TXCNT_LMT_SEL(skb->data, 1);
+ SET_CMC_TBL_DATA_TX_CNT_LMT(skb->data, rtwsta->data_tx_cnt_lmt);
+ }
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC, H2C_CL_MAC_FR_EXCHG,
+ H2C_FUNC_MAC_CCTLINFO_UD, 0, 1,
+ H2C_CMC_TBL_LEN);
+
+ if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return -EBUSY;
+}
+
+#define H2C_VIF_MAINTAIN_LEN 4
+int rtw89_fw_h2c_vif_maintain(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ enum rtw89_upd_mode upd_mode)
+{
+ struct sk_buff *skb;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_VIF_MAINTAIN_LEN);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c join\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, H2C_VIF_MAINTAIN_LEN);
+ SET_FWROLE_MAINTAIN_MACID(skb->data, rtwvif->mac_id);
+ SET_FWROLE_MAINTAIN_SELF_ROLE(skb->data, rtwvif->self_role);
+ SET_FWROLE_MAINTAIN_UPD_MODE(skb->data, upd_mode);
+ SET_FWROLE_MAINTAIN_WIFI_ROLE(skb->data, rtwvif->wifi_role);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT,
+ H2C_FUNC_MAC_FWROLE_MAINTAIN, 0, 1,
+ H2C_VIF_MAINTAIN_LEN);
+
+ if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return -EBUSY;
+}
+
+#define H2C_JOIN_INFO_LEN 4
+int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ u8 dis_conn)
+{
+ struct sk_buff *skb;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_JOIN_INFO_LEN);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c join\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, H2C_JOIN_INFO_LEN);
+ SET_JOININFO_MACID(skb->data, rtwvif->mac_id);
+ SET_JOININFO_OP(skb->data, dis_conn);
+ SET_JOININFO_BAND(skb->data, rtwvif->mac_idx);
+ SET_JOININFO_WMM(skb->data, rtwvif->wmm);
+ SET_JOININFO_TGR(skb->data, rtwvif->trigger);
+ SET_JOININFO_ISHESTA(skb->data, 0);
+ SET_JOININFO_DLBW(skb->data, 0);
+ SET_JOININFO_TF_MAC_PAD(skb->data, 0);
+ SET_JOININFO_DL_T_PE(skb->data, 0);
+ SET_JOININFO_PORT_ID(skb->data, rtwvif->port);
+ SET_JOININFO_NET_TYPE(skb->data, rtwvif->net_type);
+ SET_JOININFO_WIFI_ROLE(skb->data, rtwvif->wifi_role);
+ SET_JOININFO_SELF_ROLE(skb->data, rtwvif->self_role);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC, H2C_CL_MAC_MEDIA_RPT,
+ H2C_FUNC_MAC_JOININFO, 0, 1,
+ H2C_JOIN_INFO_LEN);
+
+ if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return -EBUSY;
+}
+
+int rtw89_fw_h2c_macid_pause(struct rtw89_dev *rtwdev, u8 sh, u8 grp,
+ bool pause)
+{
+ struct rtw89_fw_macid_pause_grp h2c = {{0}};
+ u8 len = sizeof(struct rtw89_fw_macid_pause_grp);
+ struct sk_buff *skb;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_JOIN_INFO_LEN);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c join\n");
+ return -ENOMEM;
+ }
+ h2c.mask_grp[grp] = cpu_to_le32(BIT(sh));
+ if (pause)
+ h2c.pause_grp[grp] = cpu_to_le32(BIT(sh));
+ skb_put_data(skb, &h2c, len);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
+ H2C_FUNC_MAC_MACID_PAUSE, 1, 0,
+ len);
+
+ if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return -EBUSY;
+}
+
+#define H2C_EDCA_LEN 12
+int rtw89_fw_h2c_set_edca(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ u8 ac, u32 val)
+{
+ struct sk_buff *skb;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_EDCA_LEN);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c edca\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, H2C_EDCA_LEN);
+ RTW89_SET_EDCA_SEL(skb->data, 0);
+ RTW89_SET_EDCA_BAND(skb->data, rtwvif->mac_idx);
+ RTW89_SET_EDCA_WMM(skb->data, 0);
+ RTW89_SET_EDCA_AC(skb->data, ac);
+ RTW89_SET_EDCA_PARAM(skb->data, val);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
+ H2C_FUNC_USR_EDCA, 0, 1,
+ H2C_EDCA_LEN);
+
+ if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return -EBUSY;
+}
+
+#define H2C_OFLD_CFG_LEN 8
+int rtw89_fw_h2c_set_ofld_cfg(struct rtw89_dev *rtwdev)
+{
+ static const u8 cfg[] = {0x09, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x00, 0x00};
+ struct sk_buff *skb;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_OFLD_CFG_LEN);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c ofld\n");
+ return -ENOMEM;
+ }
+ skb_put_data(skb, cfg, H2C_OFLD_CFG_LEN);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_MAC, H2C_CL_MAC_FW_OFLD,
+ H2C_FUNC_OFLD_CFG, 0, 1,
+ H2C_OFLD_CFG_LEN);
+
+ if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return -EBUSY;
+}
+
+#define H2C_RA_LEN 16
+int rtw89_fw_h2c_ra(struct rtw89_dev *rtwdev, struct rtw89_ra_info *ra, bool csi)
+{
+ struct sk_buff *skb;
+ u8 *cmd;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_RA_LEN);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c join\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, H2C_RA_LEN);
+ cmd = skb->data;
+ rtw89_debug(rtwdev, RTW89_DBG_RA,
+ "ra cmd msk: %llx ", ra->ra_mask);
+
+ RTW89_SET_FWCMD_RA_MODE(cmd, ra->mode_ctrl);
+ RTW89_SET_FWCMD_RA_BW_CAP(cmd, ra->bw_cap);
+ RTW89_SET_FWCMD_RA_MACID(cmd, ra->macid);
+ RTW89_SET_FWCMD_RA_DCM(cmd, ra->dcm_cap);
+ RTW89_SET_FWCMD_RA_ER(cmd, ra->er_cap);
+ RTW89_SET_FWCMD_RA_INIT_RATE_LV(cmd, ra->init_rate_lv);
+ RTW89_SET_FWCMD_RA_UPD_ALL(cmd, ra->upd_all);
+ RTW89_SET_FWCMD_RA_SGI(cmd, ra->en_sgi);
+ RTW89_SET_FWCMD_RA_LDPC(cmd, ra->ldpc_cap);
+ RTW89_SET_FWCMD_RA_STBC(cmd, ra->stbc_cap);
+ RTW89_SET_FWCMD_RA_SS_NUM(cmd, ra->ss_num);
+ RTW89_SET_FWCMD_RA_GILTF(cmd, ra->giltf);
+ RTW89_SET_FWCMD_RA_UPD_BW_NSS_MASK(cmd, ra->upd_bw_nss_mask);
+ RTW89_SET_FWCMD_RA_UPD_MASK(cmd, ra->upd_mask);
+ RTW89_SET_FWCMD_RA_MASK_0(cmd, FIELD_GET(MASKBYTE0, ra->ra_mask));
+ RTW89_SET_FWCMD_RA_MASK_1(cmd, FIELD_GET(MASKBYTE1, ra->ra_mask));
+ RTW89_SET_FWCMD_RA_MASK_2(cmd, FIELD_GET(MASKBYTE2, ra->ra_mask));
+ RTW89_SET_FWCMD_RA_MASK_3(cmd, FIELD_GET(MASKBYTE3, ra->ra_mask));
+ RTW89_SET_FWCMD_RA_MASK_4(cmd, FIELD_GET(MASKBYTE4, ra->ra_mask));
+
+ if (csi) {
+ RTW89_SET_FWCMD_RA_BFEE_CSI_CTL(cmd, 1);
+ RTW89_SET_FWCMD_RA_BAND_NUM(cmd, ra->band_num);
+ RTW89_SET_FWCMD_RA_CR_TBL_SEL(cmd, ra->cr_tbl_sel);
+ RTW89_SET_FWCMD_RA_FIXED_CSI_RATE_EN(cmd, ra->fixed_csi_rate_en);
+ RTW89_SET_FWCMD_RA_RA_CSI_RATE_EN(cmd, ra->ra_csi_rate_en);
+ RTW89_SET_FWCMD_RA_FIXED_CSI_MCS_SS_IDX(cmd, ra->csi_mcs_ss_idx);
+ RTW89_SET_FWCMD_RA_FIXED_CSI_MODE(cmd, ra->csi_mode);
+ RTW89_SET_FWCMD_RA_FIXED_CSI_GI_LTF(cmd, ra->csi_gi_ltf);
+ RTW89_SET_FWCMD_RA_FIXED_CSI_BW(cmd, ra->csi_bw);
+ }
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_OUTSRC, H2C_CL_OUTSRC_RA,
+ H2C_FUNC_OUTSRC_RA_MACIDCFG, 0, 0,
+ H2C_RA_LEN);
+
+ if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return -EBUSY;
+}
+
+#define H2C_LEN_CXDRVHDR 2
+#define H2C_LEN_CXDRVINFO_INIT (12 + H2C_LEN_CXDRVHDR)
+int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_dm *dm = &btc->dm;
+ struct rtw89_btc_init_info *init_info = &dm->init_info;
+ struct rtw89_btc_module *module = &init_info->module;
+ struct rtw89_btc_ant_info *ant = &module->ant;
+ struct sk_buff *skb;
+ u8 *cmd;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_LEN_CXDRVINFO_INIT);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_init\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, H2C_LEN_CXDRVINFO_INIT);
+ cmd = skb->data;
+
+ RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_INIT);
+ RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_INIT - H2C_LEN_CXDRVHDR);
+
+ RTW89_SET_FWCMD_CXINIT_ANT_TYPE(cmd, ant->type);
+ RTW89_SET_FWCMD_CXINIT_ANT_NUM(cmd, ant->num);
+ RTW89_SET_FWCMD_CXINIT_ANT_ISO(cmd, ant->isolation);
+ RTW89_SET_FWCMD_CXINIT_ANT_POS(cmd, ant->single_pos);
+ RTW89_SET_FWCMD_CXINIT_ANT_DIVERSITY(cmd, ant->diversity);
+
+ RTW89_SET_FWCMD_CXINIT_MOD_RFE(cmd, module->rfe_type);
+ RTW89_SET_FWCMD_CXINIT_MOD_CV(cmd, module->cv);
+ RTW89_SET_FWCMD_CXINIT_MOD_BT_SOLO(cmd, module->bt_solo);
+ RTW89_SET_FWCMD_CXINIT_MOD_BT_POS(cmd, module->bt_pos);
+ RTW89_SET_FWCMD_CXINIT_MOD_SW_TYPE(cmd, module->switch_type);
+
+ RTW89_SET_FWCMD_CXINIT_WL_GCH(cmd, init_info->wl_guard_ch);
+ RTW89_SET_FWCMD_CXINIT_WL_ONLY(cmd, init_info->wl_only);
+ RTW89_SET_FWCMD_CXINIT_WL_INITOK(cmd, init_info->wl_init_ok);
+ RTW89_SET_FWCMD_CXINIT_DBCC_EN(cmd, init_info->dbcc_en);
+ RTW89_SET_FWCMD_CXINIT_CX_OTHER(cmd, init_info->cx_other);
+ RTW89_SET_FWCMD_CXINIT_BT_ONLY(cmd, init_info->bt_only);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_OUTSRC, BTFC_SET,
+ SET_DRV_INFO, 0, 0,
+ H2C_LEN_CXDRVINFO_INIT);
+
+ if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return -EBUSY;
+}
+
+#define H2C_LEN_CXDRVINFO_ROLE (4 + 12 * RTW89_MAX_HW_PORT_NUM + H2C_LEN_CXDRVHDR)
+int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_wl_info *wl = &btc->cx.wl;
+ struct rtw89_btc_wl_role_info *role_info = &wl->role_info;
+ struct rtw89_btc_wl_role_info_bpos *bpos = &role_info->role_map.role;
+ struct rtw89_btc_wl_active_role *active = role_info->active_role;
+ struct sk_buff *skb;
+ u8 *cmd;
+ int i;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_LEN_CXDRVINFO_ROLE);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_role\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, H2C_LEN_CXDRVINFO_ROLE);
+ cmd = skb->data;
+
+ RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_ROLE);
+ RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_ROLE - H2C_LEN_CXDRVHDR);
+
+ RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, role_info->connect_cnt);
+ RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, role_info->link_mode);
+
+ RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, bpos->none);
+ RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, bpos->station);
+ RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, bpos->ap);
+ RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, bpos->vap);
+ RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, bpos->adhoc);
+ RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, bpos->adhoc_master);
+ RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, bpos->mesh);
+ RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, bpos->moniter);
+ RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, bpos->p2p_device);
+ RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, bpos->p2p_gc);
+ RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, bpos->p2p_go);
+ RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, bpos->nan);
+
+ for (i = 0; i < RTW89_MAX_HW_PORT_NUM; i++, active++) {
+ RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, active->connected, i);
+ RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, active->pid, i);
+ RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, active->phy, i);
+ RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, active->noa, i);
+ RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, active->band, i);
+ RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, active->client_ps, i);
+ RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, active->bw, i);
+ RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, active->role, i);
+ RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, active->ch, i);
+ RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, active->tx_lvl, i);
+ RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, active->rx_lvl, i);
+ RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, active->tx_rate, i);
+ RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, active->rx_rate, i);
+ }
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_OUTSRC, BTFC_SET,
+ SET_DRV_INFO, 0, 0,
+ H2C_LEN_CXDRVINFO_ROLE);
+
+ if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return -EBUSY;
+}
+
+#define H2C_LEN_CXDRVINFO_CTRL (4 + H2C_LEN_CXDRVHDR)
+int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_ctrl *ctrl = &btc->ctrl;
+ struct sk_buff *skb;
+ u8 *cmd;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_LEN_CXDRVINFO_CTRL);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, H2C_LEN_CXDRVINFO_CTRL);
+ cmd = skb->data;
+
+ RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_CTRL);
+ RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_CTRL - H2C_LEN_CXDRVHDR);
+
+ RTW89_SET_FWCMD_CXCTRL_MANUAL(cmd, ctrl->manual);
+ RTW89_SET_FWCMD_CXCTRL_IGNORE_BT(cmd, ctrl->igno_bt);
+ RTW89_SET_FWCMD_CXCTRL_ALWAYS_FREERUN(cmd, ctrl->always_freerun);
+ RTW89_SET_FWCMD_CXCTRL_TRACE_STEP(cmd, ctrl->trace_step);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_OUTSRC, BTFC_SET,
+ SET_DRV_INFO, 0, 0,
+ H2C_LEN_CXDRVINFO_CTRL);
+
+ if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return -EBUSY;
+}
+
+#define H2C_LEN_CXDRVINFO_RFK (4 + H2C_LEN_CXDRVHDR)
+int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_wl_info *wl = &btc->cx.wl;
+ struct rtw89_btc_wl_rfk_info *rfk_info = &wl->rfk_info;
+ struct sk_buff *skb;
+ u8 *cmd;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(H2C_LEN_CXDRVINFO_RFK);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c cxdrv_ctrl\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, H2C_LEN_CXDRVINFO_RFK);
+ cmd = skb->data;
+
+ RTW89_SET_FWCMD_CXHDR_TYPE(cmd, CXDRVINFO_RFK);
+ RTW89_SET_FWCMD_CXHDR_LEN(cmd, H2C_LEN_CXDRVINFO_RFK - H2C_LEN_CXDRVHDR);
+
+ RTW89_SET_FWCMD_CXRFK_STATE(cmd, rfk_info->state);
+ RTW89_SET_FWCMD_CXRFK_PATH_MAP(cmd, rfk_info->path_map);
+ RTW89_SET_FWCMD_CXRFK_PHY_MAP(cmd, rfk_info->phy_map);
+ RTW89_SET_FWCMD_CXRFK_BAND(cmd, rfk_info->band);
+ RTW89_SET_FWCMD_CXRFK_TYPE(cmd, rfk_info->type);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_OUTSRC, BTFC_SET,
+ SET_DRV_INFO, 0, 0,
+ H2C_LEN_CXDRVINFO_RFK);
+
+ if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return -EBUSY;
+}
+
+int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev,
+ struct rtw89_fw_h2c_rf_reg_info *info,
+ u16 len, u8 page)
+{
+ struct sk_buff *skb;
+ u8 class = info->rf_path == RF_PATH_A ?
+ H2C_CL_OUTSRC_RF_REG_A : H2C_CL_OUTSRC_RF_REG_B;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c rf reg\n");
+ return -ENOMEM;
+ }
+ skb_put_data(skb, info->rtw89_phy_config_rf_h2c[page], len);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_OUTSRC, class, page, 0, 0,
+ len);
+
+ if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return -EBUSY;
+}
+
+int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev,
+ u8 h2c_class, u8 h2c_func, u8 *buf, u16 len,
+ bool rack, bool dack)
+{
+ struct sk_buff *skb;
+
+ skb = rtw89_fw_h2c_alloc_skb_with_hdr(len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for raw with hdr\n");
+ return -ENOMEM;
+ }
+ skb_put_data(skb, buf, len);
+
+ rtw89_h2c_pkt_set_hdr(rtwdev, skb, FWCMD_TYPE_H2C,
+ H2C_CAT_OUTSRC, h2c_class, h2c_func, rack, dack,
+ len);
+
+ if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return -EBUSY;
+}
+
+int rtw89_fw_h2c_raw(struct rtw89_dev *rtwdev, const u8 *buf, u16 len)
+{
+ struct sk_buff *skb;
+
+ skb = rtw89_fw_h2c_alloc_skb_no_hdr(len);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to alloc skb for h2c raw\n");
+ return -ENOMEM;
+ }
+ skb_put_data(skb, buf, len);
+
+ if (rtw89_h2c_tx(rtwdev, skb, false)) {
+ rtw89_err(rtwdev, "failed to send h2c\n");
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dev_kfree_skb_any(skb);
+
+ return -EBUSY;
+}
+
+void rtw89_fw_send_all_early_h2c(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_early_h2c *early_h2c;
+
+ lockdep_assert_held(&rtwdev->mutex);
+
+ list_for_each_entry(early_h2c, &rtwdev->early_h2c_list, list) {
+ rtw89_fw_h2c_raw(rtwdev, early_h2c->h2c, early_h2c->h2c_len);
+ }
+}
+
+void rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_early_h2c *early_h2c, *tmp;
+
+ mutex_lock(&rtwdev->mutex);
+ list_for_each_entry_safe(early_h2c, tmp, &rtwdev->early_h2c_list, list) {
+ list_del(&early_h2c->list);
+ kfree(early_h2c->h2c);
+ kfree(early_h2c);
+ }
+ mutex_unlock(&rtwdev->mutex);
+}
+
+void rtw89_fw_c2h_irqsafe(struct rtw89_dev *rtwdev, struct sk_buff *c2h)
+{
+ skb_queue_tail(&rtwdev->c2h_queue, c2h);
+ ieee80211_queue_work(rtwdev->hw, &rtwdev->c2h_work);
+}
+
+static void rtw89_fw_c2h_cmd_handle(struct rtw89_dev *rtwdev,
+ struct sk_buff *skb)
+{
+ u8 category = RTW89_GET_C2H_CATEGORY(skb->data);
+ u8 class = RTW89_GET_C2H_CLASS(skb->data);
+ u8 func = RTW89_GET_C2H_FUNC(skb->data);
+ u16 len = RTW89_GET_C2H_LEN(skb->data);
+ bool dump = true;
+
+ if (!test_bit(RTW89_FLAG_RUNNING, rtwdev->flags))
+ return;
+
+ switch (category) {
+ case RTW89_C2H_CAT_TEST:
+ break;
+ case RTW89_C2H_CAT_MAC:
+ rtw89_mac_c2h_handle(rtwdev, skb, len, class, func);
+ if (class == RTW89_MAC_C2H_CLASS_INFO &&
+ func == RTW89_MAC_C2H_FUNC_C2H_LOG)
+ dump = false;
+ break;
+ case RTW89_C2H_CAT_OUTSRC:
+ if (class >= RTW89_PHY_C2H_CLASS_BTC_MIN &&
+ class <= RTW89_PHY_C2H_CLASS_BTC_MAX)
+ rtw89_btc_c2h_handle(rtwdev, skb, len, class, func);
+ else
+ rtw89_phy_c2h_handle(rtwdev, skb, len, class, func);
+ break;
+ }
+
+ if (dump)
+ rtw89_hex_dump(rtwdev, RTW89_DBG_FW, "C2H: ", skb->data, skb->len);
+}
+
+void rtw89_fw_c2h_work(struct work_struct *work)
+{
+ struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
+ c2h_work);
+ struct sk_buff *skb, *tmp;
+
+ skb_queue_walk_safe(&rtwdev->c2h_queue, skb, tmp) {
+ skb_unlink(skb, &rtwdev->c2h_queue);
+ mutex_lock(&rtwdev->mutex);
+ rtw89_fw_c2h_cmd_handle(rtwdev, skb);
+ mutex_unlock(&rtwdev->mutex);
+ dev_kfree_skb_any(skb);
+ }
+}
+
+static int rtw89_fw_write_h2c_reg(struct rtw89_dev *rtwdev,
+ struct rtw89_mac_h2c_info *info)
+{
+ static const u32 h2c_reg[RTW89_H2CREG_MAX] = {
+ R_AX_H2CREG_DATA0, R_AX_H2CREG_DATA1,
+ R_AX_H2CREG_DATA2, R_AX_H2CREG_DATA3
+ };
+ u8 i, val, len;
+ int ret;
+
+ ret = read_poll_timeout(rtw89_read8, val, val == 0, 1000, 5000, false,
+ rtwdev, R_AX_H2CREG_CTRL);
+ if (ret) {
+ rtw89_warn(rtwdev, "FW does not process h2c registers\n");
+ return ret;
+ }
+
+ len = DIV_ROUND_UP(info->content_len + RTW89_H2CREG_HDR_LEN,
+ sizeof(info->h2creg[0]));
+
+ RTW89_SET_H2CREG_HDR_FUNC(&info->h2creg[0], info->id);
+ RTW89_SET_H2CREG_HDR_LEN(&info->h2creg[0], len);
+ for (i = 0; i < RTW89_H2CREG_MAX; i++)
+ rtw89_write32(rtwdev, h2c_reg[i], info->h2creg[i]);
+
+ rtw89_write8(rtwdev, R_AX_H2CREG_CTRL, B_AX_H2CREG_TRIGGER);
+
+ return 0;
+}
+
+static int rtw89_fw_read_c2h_reg(struct rtw89_dev *rtwdev,
+ struct rtw89_mac_c2h_info *info)
+{
+ static const u32 c2h_reg[RTW89_C2HREG_MAX] = {
+ R_AX_C2HREG_DATA0, R_AX_C2HREG_DATA1,
+ R_AX_C2HREG_DATA2, R_AX_C2HREG_DATA3
+ };
+ u32 ret;
+ u8 i, val;
+
+ info->id = RTW89_FWCMD_C2HREG_FUNC_NULL;
+
+ ret = read_poll_timeout_atomic(rtw89_read8, val, val, 1,
+ RTW89_C2H_TIMEOUT, false, rtwdev,
+ R_AX_C2HREG_CTRL);
+ if (ret) {
+ rtw89_warn(rtwdev, "c2h reg timeout\n");
+ return ret;
+ }
+
+ for (i = 0; i < RTW89_C2HREG_MAX; i++)
+ info->c2hreg[i] = rtw89_read32(rtwdev, c2h_reg[i]);
+
+ rtw89_write8(rtwdev, R_AX_C2HREG_CTRL, 0);
+
+ info->id = RTW89_GET_C2H_HDR_FUNC(*info->c2hreg);
+ info->content_len = (RTW89_GET_C2H_HDR_LEN(*info->c2hreg) << 2) -
+ RTW89_C2HREG_HDR_LEN;
+
+ return 0;
+}
+
+int rtw89_fw_msg_reg(struct rtw89_dev *rtwdev,
+ struct rtw89_mac_h2c_info *h2c_info,
+ struct rtw89_mac_c2h_info *c2h_info)
+{
+ u32 ret;
+
+ if (h2c_info && h2c_info->id != RTW89_FWCMD_H2CREG_FUNC_GET_FEATURE)
+ lockdep_assert_held(&rtwdev->mutex);
+
+ if (!h2c_info && !c2h_info)
+ return -EINVAL;
+
+ if (!h2c_info)
+ goto recv_c2h;
+
+ ret = rtw89_fw_write_h2c_reg(rtwdev, h2c_info);
+ if (ret)
+ return ret;
+
+recv_c2h:
+ if (!c2h_info)
+ return 0;
+
+ ret = rtw89_fw_read_c2h_reg(rtwdev, c2h_info);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+void rtw89_fw_st_dbg_dump(struct rtw89_dev *rtwdev)
+{
+ if (!test_bit(RTW89_FLAG_POWERON, rtwdev->flags)) {
+ rtw89_err(rtwdev, "[ERR]pwr is off\n");
+ return;
+ }
+
+ rtw89_info(rtwdev, "FW status = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM0));
+ rtw89_info(rtwdev, "FW BADADDR = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM1));
+ rtw89_info(rtwdev, "FW EPC/RA = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM2));
+ rtw89_info(rtwdev, "FW MISC = 0x%x\n", rtw89_read32(rtwdev, R_AX_UDM3));
+ rtw89_info(rtwdev, "R_AX_HALT_C2H = 0x%x\n",
+ rtw89_read32(rtwdev, R_AX_HALT_C2H));
+ rtw89_info(rtwdev, "R_AX_SER_DBG_INFO = 0x%x\n",
+ rtw89_read32(rtwdev, R_AX_SER_DBG_INFO));
+
+ rtw89_fw_prog_cnt_dump(rtwdev);
+}
diff --git a/drivers/net/wireless/realtek/rtw89/fw.h b/drivers/net/wireless/realtek/rtw89/fw.h
new file mode 100644
index 000000000000..7ee0d9323310
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/fw.h
@@ -0,0 +1,1378 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2019-2020 Realtek Corporation
+ */
+
+#ifndef __RTW89_FW_H__
+#define __RTW89_FW_H__
+
+#include "core.h"
+
+enum rtw89_fw_dl_status {
+ RTW89_FWDL_INITIAL_STATE = 0,
+ RTW89_FWDL_FWDL_ONGOING = 1,
+ RTW89_FWDL_CHECKSUM_FAIL = 2,
+ RTW89_FWDL_SECURITY_FAIL = 3,
+ RTW89_FWDL_CV_NOT_MATCH = 4,
+ RTW89_FWDL_RSVD0 = 5,
+ RTW89_FWDL_WCPU_FWDL_RDY = 6,
+ RTW89_FWDL_WCPU_FW_INIT_RDY = 7
+};
+
+#define RTW89_GET_C2H_HDR_FUNC(info) \
+ u32_get_bits(info, GENMASK(6, 0))
+#define RTW89_GET_C2H_HDR_LEN(info) \
+ u32_get_bits(info, GENMASK(11, 8))
+
+#define RTW89_SET_H2CREG_HDR_FUNC(info, val) \
+ u32p_replace_bits(info, val, GENMASK(6, 0))
+#define RTW89_SET_H2CREG_HDR_LEN(info, val) \
+ u32p_replace_bits(info, val, GENMASK(11, 8))
+
+#define RTW89_H2CREG_MAX 4
+#define RTW89_C2HREG_MAX 4
+#define RTW89_C2HREG_HDR_LEN 2
+#define RTW89_H2CREG_HDR_LEN 2
+#define RTW89_C2H_TIMEOUT 1000000
+struct rtw89_mac_c2h_info {
+ u8 id;
+ u8 content_len;
+ u32 c2hreg[RTW89_C2HREG_MAX];
+};
+
+struct rtw89_mac_h2c_info {
+ u8 id;
+ u8 content_len;
+ u32 h2creg[RTW89_H2CREG_MAX];
+};
+
+enum rtw89_mac_h2c_type {
+ RTW89_FWCMD_H2CREG_FUNC_H2CREG_LB = 0,
+ RTW89_FWCMD_H2CREG_FUNC_CNSL_CMD,
+ RTW89_FWCMD_H2CREG_FUNC_FWERR,
+ RTW89_FWCMD_H2CREG_FUNC_GET_FEATURE,
+ RTW89_FWCMD_H2CREG_FUNC_GETPKT_INFORM,
+ RTW89_FWCMD_H2CREG_FUNC_SCH_TX_EN
+};
+
+enum rtw89_mac_c2h_type {
+ RTW89_FWCMD_C2HREG_FUNC_C2HREG_LB = 0,
+ RTW89_FWCMD_C2HREG_FUNC_ERR_RPT,
+ RTW89_FWCMD_C2HREG_FUNC_ERR_MSG,
+ RTW89_FWCMD_C2HREG_FUNC_PHY_CAP,
+ RTW89_FWCMD_C2HREG_FUNC_TX_PAUSE_RPT,
+ RTW89_FWCMD_C2HREG_FUNC_NULL = 0xFF
+};
+
+struct rtw89_c2h_phy_cap {
+ u32 func:7;
+ u32 ack:1;
+ u32 len:4;
+ u32 seq:4;
+ u32 rx_nss:8;
+ u32 bw:8;
+
+ u32 tx_nss:8;
+ u32 prot:8;
+ u32 nic:8;
+ u32 wl_func:8;
+
+ u32 hw_type:8;
+} __packed;
+
+enum rtw89_fw_c2h_category {
+ RTW89_C2H_CAT_TEST,
+ RTW89_C2H_CAT_MAC,
+ RTW89_C2H_CAT_OUTSRC,
+};
+
+enum rtw89_fw_log_level {
+ RTW89_FW_LOG_LEVEL_OFF,
+ RTW89_FW_LOG_LEVEL_CRT,
+ RTW89_FW_LOG_LEVEL_SER,
+ RTW89_FW_LOG_LEVEL_WARN,
+ RTW89_FW_LOG_LEVEL_LOUD,
+ RTW89_FW_LOG_LEVEL_TR,
+};
+
+enum rtw89_fw_log_path {
+ RTW89_FW_LOG_LEVEL_UART,
+ RTW89_FW_LOG_LEVEL_C2H,
+ RTW89_FW_LOG_LEVEL_SNI,
+};
+
+enum rtw89_fw_log_comp {
+ RTW89_FW_LOG_COMP_VER,
+ RTW89_FW_LOG_COMP_INIT,
+ RTW89_FW_LOG_COMP_TASK,
+ RTW89_FW_LOG_COMP_CNS,
+ RTW89_FW_LOG_COMP_H2C,
+ RTW89_FW_LOG_COMP_C2H,
+ RTW89_FW_LOG_COMP_TX,
+ RTW89_FW_LOG_COMP_RX,
+ RTW89_FW_LOG_COMP_IPSEC,
+ RTW89_FW_LOG_COMP_TIMER,
+ RTW89_FW_LOG_COMP_DBGPKT,
+ RTW89_FW_LOG_COMP_PS,
+ RTW89_FW_LOG_COMP_ERROR,
+ RTW89_FW_LOG_COMP_WOWLAN,
+ RTW89_FW_LOG_COMP_SECURE_BOOT,
+ RTW89_FW_LOG_COMP_BTC,
+ RTW89_FW_LOG_COMP_BB,
+ RTW89_FW_LOG_COMP_TWT,
+ RTW89_FW_LOG_COMP_RF,
+ RTW89_FW_LOG_COMP_MCC = 20,
+};
+
+#define FWDL_SECTION_MAX_NUM 10
+#define FWDL_SECTION_CHKSUM_LEN 8
+#define FWDL_SECTION_PER_PKT_LEN 2020
+
+struct rtw89_fw_hdr_section_info {
+ u8 redl;
+ const u8 *addr;
+ u32 len;
+ u32 dladdr;
+};
+
+struct rtw89_fw_bin_info {
+ u8 section_num;
+ u32 hdr_len;
+ struct rtw89_fw_hdr_section_info section_info[FWDL_SECTION_MAX_NUM];
+};
+
+struct rtw89_fw_macid_pause_grp {
+ __le32 pause_grp[4];
+ __le32 mask_grp[4];
+} __packed;
+
+struct rtw89_h2creg_sch_tx_en {
+ u8 func:7;
+ u8 ack:1;
+ u8 total_len:4;
+ u8 seq_num:4;
+ u16 tx_en:16;
+ u16 mask:16;
+ u8 band:1;
+ u16 rsvd:15;
+} __packed;
+
+#define RTW89_SET_FWCMD_RA_IS_DIS(cmd, val) \
+ le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(0))
+#define RTW89_SET_FWCMD_RA_MODE(cmd, val) \
+ le32p_replace_bits((__le32 *)(cmd) + 0x00, val, GENMASK(5, 1))
+#define RTW89_SET_FWCMD_RA_BW_CAP(cmd, val) \
+ le32p_replace_bits((__le32 *)(cmd) + 0x00, val, GENMASK(7, 6))
+#define RTW89_SET_FWCMD_RA_MACID(cmd, val) \
+ le32p_replace_bits((__le32 *)(cmd) + 0x00, val, GENMASK(15, 8))
+#define RTW89_SET_FWCMD_RA_DCM(cmd, val) \
+ le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(16))
+#define RTW89_SET_FWCMD_RA_ER(cmd, val) \
+ le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(17))
+#define RTW89_SET_FWCMD_RA_INIT_RATE_LV(cmd, val) \
+ le32p_replace_bits((__le32 *)(cmd) + 0x00, val, GENMASK(19, 18))
+#define RTW89_SET_FWCMD_RA_UPD_ALL(cmd, val) \
+ le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(20))
+#define RTW89_SET_FWCMD_RA_SGI(cmd, val) \
+ le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(21))
+#define RTW89_SET_FWCMD_RA_LDPC(cmd, val) \
+ le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(22))
+#define RTW89_SET_FWCMD_RA_STBC(cmd, val) \
+ le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(23))
+#define RTW89_SET_FWCMD_RA_SS_NUM(cmd, val) \
+ le32p_replace_bits((__le32 *)(cmd) + 0x00, val, GENMASK(26, 24))
+#define RTW89_SET_FWCMD_RA_GILTF(cmd, val) \
+ le32p_replace_bits((__le32 *)(cmd) + 0x00, val, GENMASK(29, 27))
+#define RTW89_SET_FWCMD_RA_UPD_BW_NSS_MASK(cmd, val) \
+ le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(30))
+#define RTW89_SET_FWCMD_RA_UPD_MASK(cmd, val) \
+ le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(31))
+#define RTW89_SET_FWCMD_RA_MASK_0(cmd, val) \
+ le32p_replace_bits((__le32 *)(cmd) + 0x01, val, GENMASK(7, 0))
+#define RTW89_SET_FWCMD_RA_MASK_1(cmd, val) \
+ le32p_replace_bits((__le32 *)(cmd) + 0x01, val, GENMASK(15, 8))
+#define RTW89_SET_FWCMD_RA_MASK_2(cmd, val) \
+ le32p_replace_bits((__le32 *)(cmd) + 0x01, val, GENMASK(23, 16))
+#define RTW89_SET_FWCMD_RA_MASK_3(cmd, val) \
+ le32p_replace_bits((__le32 *)(cmd) + 0x01, val, GENMASK(31, 24))
+#define RTW89_SET_FWCMD_RA_MASK_4(cmd, val) \
+ le32p_replace_bits((__le32 *)(cmd) + 0x02, val, GENMASK(7, 0))
+#define RTW89_SET_FWCMD_RA_BFEE_CSI_CTL(cmd, val) \
+ le32p_replace_bits((__le32 *)(cmd) + 0x02, val, BIT(31))
+#define RTW89_SET_FWCMD_RA_BAND_NUM(cmd, val) \
+ le32p_replace_bits((__le32 *)(cmd) + 0x03, val, GENMASK(7, 0))
+#define RTW89_SET_FWCMD_RA_RA_CSI_RATE_EN(cmd, val) \
+ le32p_replace_bits((__le32 *)(cmd) + 0x03, val, BIT(8))
+#define RTW89_SET_FWCMD_RA_FIXED_CSI_RATE_EN(cmd, val) \
+ le32p_replace_bits((__le32 *)(cmd) + 0x03, val, BIT(9))
+#define RTW89_SET_FWCMD_RA_CR_TBL_SEL(cmd, val) \
+ le32p_replace_bits((__le32 *)(cmd) + 0x03, val, BIT(10))
+#define RTW89_SET_FWCMD_RA_FIXED_CSI_MCS_SS_IDX(cmd, val) \
+ le32p_replace_bits((__le32 *)(cmd) + 0x03, val, GENMASK(23, 16))
+#define RTW89_SET_FWCMD_RA_FIXED_CSI_MODE(cmd, val) \
+ le32p_replace_bits((__le32 *)(cmd) + 0x03, val, GENMASK(25, 24))
+#define RTW89_SET_FWCMD_RA_FIXED_CSI_GI_LTF(cmd, val) \
+ le32p_replace_bits((__le32 *)(cmd) + 0x03, val, GENMASK(28, 26))
+#define RTW89_SET_FWCMD_RA_FIXED_CSI_BW(cmd, val) \
+ le32p_replace_bits((__le32 *)(cmd) + 0x03, val, GENMASK(31, 29))
+
+#define RTW89_SET_FWCMD_SEC_IDX(cmd, val) \
+ le32p_replace_bits((__le32 *)(cmd) + 0x00, val, GENMASK(7, 0))
+#define RTW89_SET_FWCMD_SEC_OFFSET(cmd, val) \
+ le32p_replace_bits((__le32 *)(cmd) + 0x00, val, GENMASK(15, 8))
+#define RTW89_SET_FWCMD_SEC_LEN(cmd, val) \
+ le32p_replace_bits((__le32 *)(cmd) + 0x00, val, GENMASK(23, 16))
+#define RTW89_SET_FWCMD_SEC_TYPE(cmd, val) \
+ le32p_replace_bits((__le32 *)(cmd) + 0x01, val, GENMASK(3, 0))
+#define RTW89_SET_FWCMD_SEC_EXT_KEY(cmd, val) \
+ le32p_replace_bits((__le32 *)(cmd) + 0x01, val, BIT(4))
+#define RTW89_SET_FWCMD_SEC_SPP_MODE(cmd, val) \
+ le32p_replace_bits((__le32 *)(cmd) + 0x01, val, BIT(5))
+#define RTW89_SET_FWCMD_SEC_KEY0(cmd, val) \
+ le32p_replace_bits((__le32 *)(cmd) + 0x02, val, GENMASK(31, 0))
+#define RTW89_SET_FWCMD_SEC_KEY1(cmd, val) \
+ le32p_replace_bits((__le32 *)(cmd) + 0x03, val, GENMASK(31, 0))
+#define RTW89_SET_FWCMD_SEC_KEY2(cmd, val) \
+ le32p_replace_bits((__le32 *)(cmd) + 0x04, val, GENMASK(31, 0))
+#define RTW89_SET_FWCMD_SEC_KEY3(cmd, val) \
+ le32p_replace_bits((__le32 *)(cmd) + 0x05, val, GENMASK(31, 0))
+
+#define RTW89_SET_EDCA_SEL(cmd, val) \
+ le32p_replace_bits((__le32 *)(cmd) + 0x00, val, GENMASK(1, 0))
+#define RTW89_SET_EDCA_BAND(cmd, val) \
+ le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(3))
+#define RTW89_SET_EDCA_WMM(cmd, val) \
+ le32p_replace_bits((__le32 *)(cmd) + 0x00, val, BIT(4))
+#define RTW89_SET_EDCA_AC(cmd, val) \
+ le32p_replace_bits((__le32 *)(cmd) + 0x00, val, GENMASK(6, 5))
+#define RTW89_SET_EDCA_PARAM(cmd, val) \
+ le32p_replace_bits((__le32 *)(cmd) + 0x01, val, GENMASK(31, 0))
+#define FW_EDCA_PARAM_TXOPLMT_MSK GENMASK(26, 16)
+#define FW_EDCA_PARAM_CWMAX_MSK GENMASK(15, 12)
+#define FW_EDCA_PARAM_CWMIN_MSK GENMASK(11, 8)
+#define FW_EDCA_PARAM_AIFS_MSK GENMASK(7, 0)
+
+#define GET_FWSECTION_HDR_SEC_SIZE(fwhdr) \
+ le32_get_bits(*((__le32 *)(fwhdr) + 1), GENMASK(23, 0))
+#define GET_FWSECTION_HDR_CHECKSUM(fwhdr) \
+ le32_get_bits(*((__le32 *)(fwhdr) + 1), BIT(28))
+#define GET_FWSECTION_HDR_REDL(fwhdr) \
+ le32_get_bits(*((__le32 *)(fwhdr) + 1), BIT(29))
+#define GET_FWSECTION_HDR_DL_ADDR(fwhdr) \
+ le32_get_bits(*((__le32 *)(fwhdr)), GENMASK(31, 0))
+
+#define GET_FW_HDR_MAJOR_VERSION(fwhdr) \
+ le32_get_bits(*((__le32 *)(fwhdr) + 1), GENMASK(7, 0))
+#define GET_FW_HDR_MINOR_VERSION(fwhdr) \
+ le32_get_bits(*((__le32 *)(fwhdr) + 1), GENMASK(15, 8))
+#define GET_FW_HDR_SUBVERSION(fwhdr) \
+ le32_get_bits(*((__le32 *)(fwhdr) + 1), GENMASK(23, 16))
+#define GET_FW_HDR_SUBINDEX(fwhdr) \
+ le32_get_bits(*((__le32 *)(fwhdr) + 1), GENMASK(31, 24))
+#define GET_FW_HDR_MONTH(fwhdr) \
+ le32_get_bits(*((__le32 *)(fwhdr) + 4), GENMASK(7, 0))
+#define GET_FW_HDR_DATE(fwhdr) \
+ le32_get_bits(*((__le32 *)(fwhdr) + 4), GENMASK(15, 8))
+#define GET_FW_HDR_HOUR(fwhdr) \
+ le32_get_bits(*((__le32 *)(fwhdr) + 4), GENMASK(23, 16))
+#define GET_FW_HDR_MIN(fwhdr) \
+ le32_get_bits(*((__le32 *)(fwhdr) + 4), GENMASK(31, 24))
+#define GET_FW_HDR_YEAR(fwhdr) \
+ le32_get_bits(*((__le32 *)(fwhdr) + 5), GENMASK(31, 0))
+#define GET_FW_HDR_SEC_NUM(fwhdr) \
+ le32_get_bits(*((__le32 *)(fwhdr) + 6), GENMASK(15, 8))
+#define GET_FW_HDR_CMD_VERSERION(fwhdr) \
+ le32_get_bits(*((__le32 *)(fwhdr) + 7), GENMASK(31, 24))
+#define SET_FW_HDR_PART_SIZE(fwhdr, val) \
+ le32p_replace_bits((__le32 *)(fwhdr) + 7, val, GENMASK(15, 0))
+
+#define SET_CTRL_INFO_MACID(table, val) \
+ le32p_replace_bits((__le32 *)(table) + 0, val, GENMASK(6, 0))
+#define SET_CTRL_INFO_OPERATION(table, val) \
+ le32p_replace_bits((__le32 *)(table) + 0, val, BIT(7))
+#define SET_CMC_TBL_MASK_DATARATE GENMASK(8, 0)
+#define SET_CMC_TBL_DATARATE(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 1, val, GENMASK(8, 0)); \
+ le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_DATARATE, \
+ GENMASK(8, 0)); \
+} while (0)
+#define SET_CMC_TBL_MASK_FORCE_TXOP BIT(0)
+#define SET_CMC_TBL_FORCE_TXOP(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 1, val, BIT(9)); \
+ le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_FORCE_TXOP, \
+ BIT(9)); \
+} while (0)
+#define SET_CMC_TBL_MASK_DATA_BW GENMASK(1, 0)
+#define SET_CMC_TBL_DATA_BW(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 1, val, GENMASK(11, 10)); \
+ le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_DATA_BW, \
+ GENMASK(11, 10)); \
+} while (0)
+#define SET_CMC_TBL_MASK_DATA_GI_LTF GENMASK(2, 0)
+#define SET_CMC_TBL_DATA_GI_LTF(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 1, val, GENMASK(14, 12)); \
+ le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_DATA_GI_LTF, \
+ GENMASK(14, 12)); \
+} while (0)
+#define SET_CMC_TBL_MASK_DARF_TC_INDEX BIT(0)
+#define SET_CMC_TBL_DARF_TC_INDEX(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 1, val, BIT(15)); \
+ le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_DARF_TC_INDEX, \
+ BIT(15)); \
+} while (0)
+#define SET_CMC_TBL_MASK_ARFR_CTRL GENMASK(3, 0)
+#define SET_CMC_TBL_ARFR_CTRL(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 1, val, GENMASK(19, 16)); \
+ le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_ARFR_CTRL, \
+ GENMASK(19, 16)); \
+} while (0)
+#define SET_CMC_TBL_MASK_ACQ_RPT_EN BIT(0)
+#define SET_CMC_TBL_ACQ_RPT_EN(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 1, val, BIT(20)); \
+ le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_ACQ_RPT_EN, \
+ BIT(20)); \
+} while (0)
+#define SET_CMC_TBL_MASK_MGQ_RPT_EN BIT(0)
+#define SET_CMC_TBL_MGQ_RPT_EN(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 1, val, BIT(21)); \
+ le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_MGQ_RPT_EN, \
+ BIT(21)); \
+} while (0)
+#define SET_CMC_TBL_MASK_ULQ_RPT_EN BIT(0)
+#define SET_CMC_TBL_ULQ_RPT_EN(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 1, val, BIT(22)); \
+ le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_ULQ_RPT_EN, \
+ BIT(22)); \
+} while (0)
+#define SET_CMC_TBL_MASK_TWTQ_RPT_EN BIT(0)
+#define SET_CMC_TBL_TWTQ_RPT_EN(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 1, val, BIT(23)); \
+ le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_TWTQ_RPT_EN, \
+ BIT(23)); \
+} while (0)
+#define SET_CMC_TBL_MASK_DISRTSFB BIT(0)
+#define SET_CMC_TBL_DISRTSFB(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 1, val, BIT(25)); \
+ le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_DISRTSFB, \
+ BIT(25)); \
+} while (0)
+#define SET_CMC_TBL_MASK_DISDATAFB BIT(0)
+#define SET_CMC_TBL_DISDATAFB(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 1, val, BIT(26)); \
+ le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_DISDATAFB, \
+ BIT(26)); \
+} while (0)
+#define SET_CMC_TBL_MASK_TRYRATE BIT(0)
+#define SET_CMC_TBL_TRYRATE(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 1, val, BIT(27)); \
+ le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_TRYRATE, \
+ BIT(27)); \
+} while (0)
+#define SET_CMC_TBL_MASK_AMPDU_DENSITY GENMASK(3, 0)
+#define SET_CMC_TBL_AMPDU_DENSITY(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 1, val, GENMASK(31, 28)); \
+ le32p_replace_bits((__le32 *)(table) + 9, SET_CMC_TBL_MASK_AMPDU_DENSITY, \
+ GENMASK(31, 28)); \
+} while (0)
+#define SET_CMC_TBL_MASK_DATA_RTY_LOWEST_RATE GENMASK(8, 0)
+#define SET_CMC_TBL_DATA_RTY_LOWEST_RATE(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 2, val, GENMASK(8, 0)); \
+ le32p_replace_bits((__le32 *)(table) + 10, SET_CMC_TBL_MASK_DATA_RTY_LOWEST_RATE, \
+ GENMASK(8, 0)); \
+} while (0)
+#define SET_CMC_TBL_MASK_AMPDU_TIME_SEL BIT(0)
+#define SET_CMC_TBL_AMPDU_TIME_SEL(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 2, val, BIT(9)); \
+ le32p_replace_bits((__le32 *)(table) + 10, SET_CMC_TBL_MASK_AMPDU_TIME_SEL, \
+ BIT(9)); \
+} while (0)
+#define SET_CMC_TBL_MASK_AMPDU_LEN_SEL BIT(0)
+#define SET_CMC_TBL_AMPDU_LEN_SEL(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 2, val, BIT(10)); \
+ le32p_replace_bits((__le32 *)(table) + 10, SET_CMC_TBL_MASK_AMPDU_LEN_SEL, \
+ BIT(10)); \
+} while (0)
+#define SET_CMC_TBL_MASK_RTS_TXCNT_LMT_SEL BIT(0)
+#define SET_CMC_TBL_RTS_TXCNT_LMT_SEL(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 2, val, BIT(11)); \
+ le32p_replace_bits((__le32 *)(table) + 10, SET_CMC_TBL_MASK_RTS_TXCNT_LMT_SEL, \
+ BIT(11)); \
+} while (0)
+#define SET_CMC_TBL_MASK_RTS_TXCNT_LMT GENMASK(3, 0)
+#define SET_CMC_TBL_RTS_TXCNT_LMT(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 2, val, GENMASK(15, 12)); \
+ le32p_replace_bits((__le32 *)(table) + 10, SET_CMC_TBL_MASK_RTS_TXCNT_LMT, \
+ GENMASK(15, 12)); \
+} while (0)
+#define SET_CMC_TBL_MASK_RTSRATE GENMASK(8, 0)
+#define SET_CMC_TBL_RTSRATE(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 2, val, GENMASK(24, 16)); \
+ le32p_replace_bits((__le32 *)(table) + 10, SET_CMC_TBL_MASK_RTSRATE, \
+ GENMASK(24, 16)); \
+} while (0)
+#define SET_CMC_TBL_MASK_VCS_STBC BIT(0)
+#define SET_CMC_TBL_VCS_STBC(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 2, val, BIT(27)); \
+ le32p_replace_bits((__le32 *)(table) + 10, SET_CMC_TBL_MASK_VCS_STBC, \
+ BIT(27)); \
+} while (0)
+#define SET_CMC_TBL_MASK_RTS_RTY_LOWEST_RATE GENMASK(3, 0)
+#define SET_CMC_TBL_RTS_RTY_LOWEST_RATE(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 2, val, GENMASK(31, 28)); \
+ le32p_replace_bits((__le32 *)(table) + 10, SET_CMC_TBL_MASK_RTS_RTY_LOWEST_RATE, \
+ GENMASK(31, 28)); \
+} while (0)
+#define SET_CMC_TBL_MASK_DATA_TX_CNT_LMT GENMASK(5, 0)
+#define SET_CMC_TBL_DATA_TX_CNT_LMT(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 3, val, GENMASK(5, 0)); \
+ le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_DATA_TX_CNT_LMT, \
+ GENMASK(5, 0)); \
+} while (0)
+#define SET_CMC_TBL_MASK_DATA_TXCNT_LMT_SEL BIT(0)
+#define SET_CMC_TBL_DATA_TXCNT_LMT_SEL(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 3, val, BIT(6)); \
+ le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_DATA_TXCNT_LMT_SEL, \
+ BIT(6)); \
+} while (0)
+#define SET_CMC_TBL_MASK_MAX_AGG_NUM_SEL BIT(0)
+#define SET_CMC_TBL_MAX_AGG_NUM_SEL(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 3, val, BIT(7)); \
+ le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_MAX_AGG_NUM_SEL, \
+ BIT(7)); \
+} while (0)
+#define SET_CMC_TBL_MASK_RTS_EN BIT(0)
+#define SET_CMC_TBL_RTS_EN(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 3, val, BIT(8)); \
+ le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_RTS_EN, \
+ BIT(8)); \
+} while (0)
+#define SET_CMC_TBL_MASK_CTS2SELF_EN BIT(0)
+#define SET_CMC_TBL_CTS2SELF_EN(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 3, val, BIT(9)); \
+ le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_CTS2SELF_EN, \
+ BIT(9)); \
+} while (0)
+#define SET_CMC_TBL_MASK_CCA_RTS GENMASK(1, 0)
+#define SET_CMC_TBL_CCA_RTS(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 3, val, GENMASK(11, 10)); \
+ le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_CCA_RTS, \
+ GENMASK(11, 10)); \
+} while (0)
+#define SET_CMC_TBL_MASK_HW_RTS_EN BIT(0)
+#define SET_CMC_TBL_HW_RTS_EN(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 3, val, BIT(12)); \
+ le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_HW_RTS_EN, \
+ BIT(12)); \
+} while (0)
+#define SET_CMC_TBL_MASK_RTS_DROP_DATA_MODE GENMASK(1, 0)
+#define SET_CMC_TBL_RTS_DROP_DATA_MODE(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 3, val, GENMASK(14, 13)); \
+ le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_RTS_DROP_DATA_MODE, \
+ GENMASK(14, 13)); \
+} while (0)
+#define SET_CMC_TBL_MASK_AMPDU_MAX_LEN GENMASK(10, 0)
+#define SET_CMC_TBL_AMPDU_MAX_LEN(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 3, val, GENMASK(26, 16)); \
+ le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_AMPDU_MAX_LEN, \
+ GENMASK(26, 16)); \
+} while (0)
+#define SET_CMC_TBL_MASK_UL_MU_DIS BIT(0)
+#define SET_CMC_TBL_UL_MU_DIS(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 3, val, BIT(27)); \
+ le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_UL_MU_DIS, \
+ BIT(27)); \
+} while (0)
+#define SET_CMC_TBL_MASK_AMPDU_MAX_TIME GENMASK(3, 0)
+#define SET_CMC_TBL_AMPDU_MAX_TIME(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 3, val, GENMASK(31, 28)); \
+ le32p_replace_bits((__le32 *)(table) + 11, SET_CMC_TBL_MASK_AMPDU_MAX_TIME, \
+ GENMASK(31, 28)); \
+} while (0)
+#define SET_CMC_TBL_MASK_MAX_AGG_NUM GENMASK(7, 0)
+#define SET_CMC_TBL_MAX_AGG_NUM(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 4, val, GENMASK(7, 0)); \
+ le32p_replace_bits((__le32 *)(table) + 12, SET_CMC_TBL_MASK_MAX_AGG_NUM, \
+ GENMASK(7, 0)); \
+} while (0)
+#define SET_CMC_TBL_MASK_BA_BMAP GENMASK(1, 0)
+#define SET_CMC_TBL_BA_BMAP(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 4, val, GENMASK(9, 8)); \
+ le32p_replace_bits((__le32 *)(table) + 12, SET_CMC_TBL_MASK_BA_BMAP, \
+ GENMASK(9, 8)); \
+} while (0)
+#define SET_CMC_TBL_MASK_VO_LFTIME_SEL GENMASK(2, 0)
+#define SET_CMC_TBL_VO_LFTIME_SEL(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 4, val, GENMASK(18, 16)); \
+ le32p_replace_bits((__le32 *)(table) + 12, SET_CMC_TBL_MASK_VO_LFTIME_SEL, \
+ GENMASK(18, 16)); \
+} while (0)
+#define SET_CMC_TBL_MASK_VI_LFTIME_SEL GENMASK(2, 0)
+#define SET_CMC_TBL_VI_LFTIME_SEL(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 4, val, GENMASK(21, 19)); \
+ le32p_replace_bits((__le32 *)(table) + 12, SET_CMC_TBL_MASK_VI_LFTIME_SEL, \
+ GENMASK(21, 19)); \
+} while (0)
+#define SET_CMC_TBL_MASK_BE_LFTIME_SEL GENMASK(2, 0)
+#define SET_CMC_TBL_BE_LFTIME_SEL(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 4, val, GENMASK(24, 22)); \
+ le32p_replace_bits((__le32 *)(table) + 12, SET_CMC_TBL_MASK_BE_LFTIME_SEL, \
+ GENMASK(24, 22)); \
+} while (0)
+#define SET_CMC_TBL_MASK_BK_LFTIME_SEL GENMASK(2, 0)
+#define SET_CMC_TBL_BK_LFTIME_SEL(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 4, val, GENMASK(27, 25)); \
+ le32p_replace_bits((__le32 *)(table) + 12, SET_CMC_TBL_MASK_BK_LFTIME_SEL, \
+ GENMASK(27, 25)); \
+} while (0)
+#define SET_CMC_TBL_MASK_SECTYPE GENMASK(3, 0)
+#define SET_CMC_TBL_SECTYPE(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 4, val, GENMASK(31, 28)); \
+ le32p_replace_bits((__le32 *)(table) + 12, SET_CMC_TBL_MASK_SECTYPE, \
+ GENMASK(31, 28)); \
+} while (0)
+#define SET_CMC_TBL_MASK_MULTI_PORT_ID GENMASK(2, 0)
+#define SET_CMC_TBL_MULTI_PORT_ID(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(2, 0)); \
+ le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_MULTI_PORT_ID, \
+ GENMASK(2, 0)); \
+} while (0)
+#define SET_CMC_TBL_MASK_BMC BIT(0)
+#define SET_CMC_TBL_BMC(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 5, val, BIT(3)); \
+ le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_BMC, \
+ BIT(3)); \
+} while (0)
+#define SET_CMC_TBL_MASK_MBSSID GENMASK(3, 0)
+#define SET_CMC_TBL_MBSSID(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(7, 4)); \
+ le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_MBSSID, \
+ GENMASK(7, 4)); \
+} while (0)
+#define SET_CMC_TBL_MASK_NAVUSEHDR BIT(0)
+#define SET_CMC_TBL_NAVUSEHDR(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 5, val, BIT(8)); \
+ le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_NAVUSEHDR, \
+ BIT(8)); \
+} while (0)
+#define SET_CMC_TBL_MASK_TXPWR_MODE GENMASK(2, 0)
+#define SET_CMC_TBL_TXPWR_MODE(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(11, 9)); \
+ le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_TXPWR_MODE, \
+ GENMASK(11, 9)); \
+} while (0)
+#define SET_CMC_TBL_MASK_DATA_DCM BIT(0)
+#define SET_CMC_TBL_DATA_DCM(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 5, val, BIT(12)); \
+ le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_DATA_DCM, \
+ BIT(12)); \
+} while (0)
+#define SET_CMC_TBL_MASK_DATA_ER BIT(0)
+#define SET_CMC_TBL_DATA_ER(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 5, val, BIT(13)); \
+ le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_DATA_ER, \
+ BIT(13)); \
+} while (0)
+#define SET_CMC_TBL_MASK_DATA_LDPC BIT(0)
+#define SET_CMC_TBL_DATA_LDPC(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 5, val, BIT(14)); \
+ le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_DATA_LDPC, \
+ BIT(14)); \
+} while (0)
+#define SET_CMC_TBL_MASK_DATA_STBC BIT(0)
+#define SET_CMC_TBL_DATA_STBC(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 5, val, BIT(15)); \
+ le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_DATA_STBC, \
+ BIT(15)); \
+} while (0)
+#define SET_CMC_TBL_MASK_A_CTRL_BQR BIT(0)
+#define SET_CMC_TBL_A_CTRL_BQR(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 5, val, BIT(16)); \
+ le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_A_CTRL_BQR, \
+ BIT(16)); \
+} while (0)
+#define SET_CMC_TBL_MASK_A_CTRL_UPH BIT(0)
+#define SET_CMC_TBL_A_CTRL_UPH(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 5, val, BIT(17)); \
+ le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_A_CTRL_UPH, \
+ BIT(17)); \
+} while (0)
+#define SET_CMC_TBL_MASK_A_CTRL_BSR BIT(0)
+#define SET_CMC_TBL_A_CTRL_BSR(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 5, val, BIT(18)); \
+ le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_A_CTRL_BSR, \
+ BIT(18)); \
+} while (0)
+#define SET_CMC_TBL_MASK_A_CTRL_CAS BIT(0)
+#define SET_CMC_TBL_A_CTRL_CAS(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 5, val, BIT(19)); \
+ le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_A_CTRL_CAS, \
+ BIT(19)); \
+} while (0)
+#define SET_CMC_TBL_MASK_DATA_BW_ER BIT(0)
+#define SET_CMC_TBL_DATA_BW_ER(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 5, val, BIT(20)); \
+ le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_DATA_BW_ER, \
+ BIT(20)); \
+} while (0)
+#define SET_CMC_TBL_MASK_LSIG_TXOP_EN BIT(0)
+#define SET_CMC_TBL_LSIG_TXOP_EN(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 5, val, BIT(21)); \
+ le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_LSIG_TXOP_EN, \
+ BIT(21)); \
+} while (0)
+#define SET_CMC_TBL_MASK_CTRL_CNT_VLD BIT(0)
+#define SET_CMC_TBL_CTRL_CNT_VLD(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 5, val, BIT(27)); \
+ le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_CTRL_CNT_VLD, \
+ BIT(27)); \
+} while (0)
+#define SET_CMC_TBL_MASK_CTRL_CNT GENMASK(3, 0)
+#define SET_CMC_TBL_CTRL_CNT(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 5, val, GENMASK(31, 28)); \
+ le32p_replace_bits((__le32 *)(table) + 13, SET_CMC_TBL_MASK_CTRL_CNT, \
+ GENMASK(31, 28)); \
+} while (0)
+#define SET_CMC_TBL_MASK_RESP_REF_RATE GENMASK(8, 0)
+#define SET_CMC_TBL_RESP_REF_RATE(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 6, val, GENMASK(8, 0)); \
+ le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_RESP_REF_RATE, \
+ GENMASK(8, 0)); \
+} while (0)
+#define SET_CMC_TBL_MASK_ALL_ACK_SUPPORT BIT(0)
+#define SET_CMC_TBL_ALL_ACK_SUPPORT(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 6, val, BIT(12)); \
+ le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_ALL_ACK_SUPPORT, \
+ BIT(12)); \
+} while (0)
+#define SET_CMC_TBL_MASK_BSR_QUEUE_SIZE_FORMAT BIT(0)
+#define SET_CMC_TBL_BSR_QUEUE_SIZE_FORMAT(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 6, val, BIT(13)); \
+ le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_BSR_QUEUE_SIZE_FORMAT, \
+ BIT(13)); \
+} while (0)
+#define SET_CMC_TBL_MASK_NTX_PATH_EN GENMASK(3, 0)
+#define SET_CMC_TBL_NTX_PATH_EN(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 6, val, GENMASK(19, 16)); \
+ le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_NTX_PATH_EN, \
+ GENMASK(19, 16)); \
+} while (0)
+#define SET_CMC_TBL_MASK_PATH_MAP_A GENMASK(1, 0)
+#define SET_CMC_TBL_PATH_MAP_A(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 6, val, GENMASK(21, 20)); \
+ le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_PATH_MAP_A, \
+ GENMASK(21, 20)); \
+} while (0)
+#define SET_CMC_TBL_MASK_PATH_MAP_B GENMASK(1, 0)
+#define SET_CMC_TBL_PATH_MAP_B(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 6, val, GENMASK(23, 22)); \
+ le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_PATH_MAP_B, \
+ GENMASK(23, 22)); \
+} while (0)
+#define SET_CMC_TBL_MASK_PATH_MAP_C GENMASK(1, 0)
+#define SET_CMC_TBL_PATH_MAP_C(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 6, val, GENMASK(25, 24)); \
+ le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_PATH_MAP_C, \
+ GENMASK(25, 24)); \
+} while (0)
+#define SET_CMC_TBL_MASK_PATH_MAP_D GENMASK(1, 0)
+#define SET_CMC_TBL_PATH_MAP_D(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 6, val, GENMASK(27, 26)); \
+ le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_PATH_MAP_D, \
+ GENMASK(27, 26)); \
+} while (0)
+#define SET_CMC_TBL_MASK_ANTSEL_A BIT(0)
+#define SET_CMC_TBL_ANTSEL_A(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 6, val, BIT(28)); \
+ le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_ANTSEL_A, \
+ BIT(28)); \
+} while (0)
+#define SET_CMC_TBL_MASK_ANTSEL_B BIT(0)
+#define SET_CMC_TBL_ANTSEL_B(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 6, val, BIT(29)); \
+ le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_ANTSEL_B, \
+ BIT(29)); \
+} while (0)
+#define SET_CMC_TBL_MASK_ANTSEL_C BIT(0)
+#define SET_CMC_TBL_ANTSEL_C(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 6, val, BIT(30)); \
+ le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_ANTSEL_C, \
+ BIT(30)); \
+} while (0)
+#define SET_CMC_TBL_MASK_ANTSEL_D BIT(0)
+#define SET_CMC_TBL_ANTSEL_D(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 6, val, BIT(31)); \
+ le32p_replace_bits((__le32 *)(table) + 14, SET_CMC_TBL_MASK_ANTSEL_D, \
+ BIT(31)); \
+} while (0)
+#define SET_CMC_TBL_MASK_ADDR_CAM_INDEX GENMASK(7, 0)
+#define SET_CMC_TBL_ADDR_CAM_INDEX(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(7, 0)); \
+ le32p_replace_bits((__le32 *)(table) + 15, SET_CMC_TBL_MASK_ADDR_CAM_INDEX, \
+ GENMASK(7, 0)); \
+} while (0)
+#define SET_CMC_TBL_MASK_PAID GENMASK(8, 0)
+#define SET_CMC_TBL_PAID(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(16, 8)); \
+ le32p_replace_bits((__le32 *)(table) + 15, SET_CMC_TBL_MASK_PAID, \
+ GENMASK(16, 8)); \
+} while (0)
+#define SET_CMC_TBL_MASK_ULDL BIT(0)
+#define SET_CMC_TBL_ULDL(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 7, val, BIT(17)); \
+ le32p_replace_bits((__le32 *)(table) + 15, SET_CMC_TBL_MASK_ULDL, \
+ BIT(17)); \
+} while (0)
+#define SET_CMC_TBL_MASK_DOPPLER_CTRL GENMASK(1, 0)
+#define SET_CMC_TBL_DOPPLER_CTRL(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(19, 18)); \
+ le32p_replace_bits((__le32 *)(table) + 15, SET_CMC_TBL_MASK_DOPPLER_CTRL, \
+ GENMASK(19, 18)); \
+} while (0)
+#define SET_CMC_TBL_MASK_NOMINAL_PKT_PADDING GENMASK(1, 0)
+#define SET_CMC_TBL_NOMINAL_PKT_PADDING(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(21, 20)); \
+ le32p_replace_bits((__le32 *)(table) + 15, SET_CMC_TBL_MASK_NOMINAL_PKT_PADDING, \
+ GENMASK(21, 20)); \
+} while (0)
+#define SET_CMC_TBL_NOMINAL_PKT_PADDING40(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(23, 22)); \
+ le32p_replace_bits((__le32 *)(table) + 15, SET_CMC_TBL_MASK_NOMINAL_PKT_PADDING, \
+ GENMASK(23, 22)); \
+} while (0)
+#define SET_CMC_TBL_MASK_TXPWR_TOLERENCE GENMASK(3, 0)
+#define SET_CMC_TBL_TXPWR_TOLERENCE(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(27, 24)); \
+ le32p_replace_bits((__le32 *)(table) + 15, SET_CMC_TBL_MASK_TXPWR_TOLERENCE, \
+ GENMASK(27, 24)); \
+} while (0)
+#define SET_CMC_TBL_NOMINAL_PKT_PADDING80(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 7, val, GENMASK(31, 30)); \
+ le32p_replace_bits((__le32 *)(table) + 15, SET_CMC_TBL_MASK_NOMINAL_PKT_PADDING, \
+ GENMASK(31, 30)); \
+} while (0)
+#define SET_CMC_TBL_MASK_NC GENMASK(2, 0)
+#define SET_CMC_TBL_NC(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 8, val, GENMASK(2, 0)); \
+ le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_NC, \
+ GENMASK(2, 0)); \
+} while (0)
+#define SET_CMC_TBL_MASK_NR GENMASK(2, 0)
+#define SET_CMC_TBL_NR(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 8, val, GENMASK(5, 3)); \
+ le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_NR, \
+ GENMASK(5, 3)); \
+} while (0)
+#define SET_CMC_TBL_MASK_NG GENMASK(1, 0)
+#define SET_CMC_TBL_NG(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 8, val, GENMASK(7, 6)); \
+ le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_NG, \
+ GENMASK(7, 6)); \
+} while (0)
+#define SET_CMC_TBL_MASK_CB GENMASK(1, 0)
+#define SET_CMC_TBL_CB(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 8, val, GENMASK(9, 8)); \
+ le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_CB, \
+ GENMASK(9, 8)); \
+} while (0)
+#define SET_CMC_TBL_MASK_CS GENMASK(1, 0)
+#define SET_CMC_TBL_CS(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 8, val, GENMASK(11, 10)); \
+ le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_CS, \
+ GENMASK(11, 10)); \
+} while (0)
+#define SET_CMC_TBL_MASK_CSI_TXBF_EN BIT(0)
+#define SET_CMC_TBL_CSI_TXBF_EN(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 8, val, BIT(12)); \
+ le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_CSI_TXBF_EN, \
+ BIT(12)); \
+} while (0)
+#define SET_CMC_TBL_MASK_CSI_STBC_EN BIT(0)
+#define SET_CMC_TBL_CSI_STBC_EN(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 8, val, BIT(13)); \
+ le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_CSI_STBC_EN, \
+ BIT(13)); \
+} while (0)
+#define SET_CMC_TBL_MASK_CSI_LDPC_EN BIT(0)
+#define SET_CMC_TBL_CSI_LDPC_EN(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 8, val, BIT(14)); \
+ le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_CSI_LDPC_EN, \
+ BIT(14)); \
+} while (0)
+#define SET_CMC_TBL_MASK_CSI_PARA_EN BIT(0)
+#define SET_CMC_TBL_CSI_PARA_EN(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 8, val, BIT(15)); \
+ le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_CSI_PARA_EN, \
+ BIT(15)); \
+} while (0)
+#define SET_CMC_TBL_MASK_CSI_FIX_RATE GENMASK(8, 0)
+#define SET_CMC_TBL_CSI_FIX_RATE(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 8, val, GENMASK(24, 16)); \
+ le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_CSI_FIX_RATE, \
+ GENMASK(24, 16)); \
+} while (0)
+#define SET_CMC_TBL_MASK_CSI_GI_LTF GENMASK(2, 0)
+#define SET_CMC_TBL_CSI_GI_LTF(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 8, val, GENMASK(27, 25)); \
+ le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_CSI_GI_LTF, \
+ GENMASK(27, 25)); \
+} while (0)
+#define SET_CMC_TBL_MASK_CSI_GID_SEL BIT(0)
+#define SET_CMC_TBL_CSI_GID_SEL(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 8, val, BIT(29)); \
+ le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_CSI_GID_SEL, \
+ BIT(29)); \
+} while (0)
+#define SET_CMC_TBL_MASK_CSI_BW GENMASK(1, 0)
+#define SET_CMC_TBL_CSI_BW(table, val) \
+do { \
+ le32p_replace_bits((__le32 *)(table) + 8, val, GENMASK(31, 30)); \
+ le32p_replace_bits((__le32 *)(table) + 16, SET_CMC_TBL_MASK_CSI_BW, \
+ GENMASK(31, 30)); \
+} while (0)
+
+#define SET_FWROLE_MAINTAIN_MACID(h2c, val) \
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(7, 0))
+#define SET_FWROLE_MAINTAIN_SELF_ROLE(h2c, val) \
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(9, 8))
+#define SET_FWROLE_MAINTAIN_UPD_MODE(h2c, val) \
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(12, 10))
+#define SET_FWROLE_MAINTAIN_WIFI_ROLE(h2c, val) \
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(16, 13))
+
+#define SET_JOININFO_MACID(h2c, val) \
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(7, 0))
+#define SET_JOININFO_OP(h2c, val) \
+ le32p_replace_bits((__le32 *)h2c, val, BIT(8))
+#define SET_JOININFO_BAND(h2c, val) \
+ le32p_replace_bits((__le32 *)h2c, val, BIT(9))
+#define SET_JOININFO_WMM(h2c, val) \
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(11, 10))
+#define SET_JOININFO_TGR(h2c, val) \
+ le32p_replace_bits((__le32 *)h2c, val, BIT(12))
+#define SET_JOININFO_ISHESTA(h2c, val) \
+ le32p_replace_bits((__le32 *)h2c, val, BIT(13))
+#define SET_JOININFO_DLBW(h2c, val) \
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(15, 14))
+#define SET_JOININFO_TF_MAC_PAD(h2c, val) \
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(17, 16))
+#define SET_JOININFO_DL_T_PE(h2c, val) \
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(20, 18))
+#define SET_JOININFO_PORT_ID(h2c, val) \
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(23, 21))
+#define SET_JOININFO_NET_TYPE(h2c, val) \
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(25, 24))
+#define SET_JOININFO_WIFI_ROLE(h2c, val) \
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(29, 26))
+#define SET_JOININFO_SELF_ROLE(h2c, val) \
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(31, 30))
+
+#define SET_GENERAL_PKT_MACID(h2c, val) \
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(7, 0))
+#define SET_GENERAL_PKT_PROBRSP_ID(h2c, val) \
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(15, 8))
+#define SET_GENERAL_PKT_PSPOLL_ID(h2c, val) \
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(23, 16))
+#define SET_GENERAL_PKT_NULL_ID(h2c, val) \
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(31, 24))
+#define SET_GENERAL_PKT_QOS_NULL_ID(h2c, val) \
+ le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(7, 0))
+#define SET_GENERAL_PKT_CTS2SELF_ID(h2c, val) \
+ le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(15, 8))
+
+#define SET_LOG_CFG_LEVEL(h2c, val) \
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(7, 0))
+#define SET_LOG_CFG_PATH(h2c, val) \
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(15, 8))
+#define SET_LOG_CFG_COMP(h2c, val) \
+ le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(31, 0))
+#define SET_LOG_CFG_COMP_EXT(h2c, val) \
+ le32p_replace_bits((__le32 *)(h2c) + 2, val, GENMASK(31, 0))
+
+#define SET_BA_CAM_VALID(h2c, val) \
+ le32p_replace_bits((__le32 *)h2c, val, BIT(0))
+#define SET_BA_CAM_INIT_REQ(h2c, val) \
+ le32p_replace_bits((__le32 *)h2c, val, BIT(1))
+#define SET_BA_CAM_ENTRY_IDX(h2c, val) \
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(3, 2))
+#define SET_BA_CAM_TID(h2c, val) \
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(7, 4))
+#define SET_BA_CAM_MACID(h2c, val) \
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(15, 8))
+#define SET_BA_CAM_BMAP_SIZE(h2c, val) \
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(19, 16))
+#define SET_BA_CAM_SSN(h2c, val) \
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(31, 20))
+
+#define SET_LPS_PARM_MACID(h2c, val) \
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(7, 0))
+#define SET_LPS_PARM_PSMODE(h2c, val) \
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(15, 8))
+#define SET_LPS_PARM_RLBM(h2c, val) \
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(19, 16))
+#define SET_LPS_PARM_SMARTPS(h2c, val) \
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(23, 20))
+#define SET_LPS_PARM_AWAKEINTERVAL(h2c, val) \
+ le32p_replace_bits((__le32 *)h2c, val, GENMASK(31, 24))
+#define SET_LPS_PARM_VOUAPSD(h2c, val) \
+ le32p_replace_bits((__le32 *)(h2c) + 1, val, BIT(0))
+#define SET_LPS_PARM_VIUAPSD(h2c, val) \
+ le32p_replace_bits((__le32 *)(h2c) + 1, val, BIT(1))
+#define SET_LPS_PARM_BEUAPSD(h2c, val) \
+ le32p_replace_bits((__le32 *)(h2c) + 1, val, BIT(2))
+#define SET_LPS_PARM_BKUAPSD(h2c, val) \
+ le32p_replace_bits((__le32 *)(h2c) + 1, val, BIT(3))
+#define SET_LPS_PARM_LASTRPWM(h2c, val) \
+ le32p_replace_bits((__le32 *)(h2c) + 1, val, GENMASK(15, 8))
+
+enum rtw89_btc_btf_h2c_class {
+ BTFC_SET = 0x10,
+ BTFC_GET = 0x11,
+ BTFC_FW_EVENT = 0x12,
+};
+
+enum rtw89_btc_btf_set {
+ SET_REPORT_EN = 0x0,
+ SET_SLOT_TABLE,
+ SET_MREG_TABLE,
+ SET_CX_POLICY,
+ SET_GPIO_DBG,
+ SET_DRV_INFO,
+ SET_DRV_EVENT,
+ SET_BT_WREG_ADDR,
+ SET_BT_WREG_VAL,
+ SET_BT_RREG_ADDR,
+ SET_BT_WL_CH_INFO,
+ SET_BT_INFO_REPORT,
+ SET_BT_IGNORE_WLAN_ACT,
+ SET_BT_TX_PWR,
+ SET_BT_LNA_CONSTRAIN,
+ SET_BT_GOLDEN_RX_RANGE,
+ SET_BT_PSD_REPORT,
+ SET_H2C_TEST,
+ SET_MAX1,
+};
+
+enum rtw89_btc_cxdrvinfo {
+ CXDRVINFO_INIT = 0,
+ CXDRVINFO_ROLE,
+ CXDRVINFO_DBCC,
+ CXDRVINFO_SMAP,
+ CXDRVINFO_RFK,
+ CXDRVINFO_RUN,
+ CXDRVINFO_CTRL,
+ CXDRVINFO_SCAN,
+ CXDRVINFO_MAX,
+};
+
+#define RTW89_SET_FWCMD_CXHDR_TYPE(cmd, val) \
+ u8p_replace_bits((u8 *)(cmd) + 0, val, GENMASK(7, 0))
+#define RTW89_SET_FWCMD_CXHDR_LEN(cmd, val) \
+ u8p_replace_bits((u8 *)(cmd) + 1, val, GENMASK(7, 0))
+
+#define RTW89_SET_FWCMD_CXINIT_ANT_TYPE(cmd, val) \
+ u8p_replace_bits((u8 *)(cmd) + 2, val, GENMASK(7, 0))
+#define RTW89_SET_FWCMD_CXINIT_ANT_NUM(cmd, val) \
+ u8p_replace_bits((u8 *)(cmd) + 3, val, GENMASK(7, 0))
+#define RTW89_SET_FWCMD_CXINIT_ANT_ISO(cmd, val) \
+ u8p_replace_bits((u8 *)(cmd) + 4, val, GENMASK(7, 0))
+#define RTW89_SET_FWCMD_CXINIT_ANT_POS(cmd, val) \
+ u8p_replace_bits((u8 *)(cmd) + 5, val, BIT(0))
+#define RTW89_SET_FWCMD_CXINIT_ANT_DIVERSITY(cmd, val) \
+ u8p_replace_bits((u8 *)(cmd) + 5, val, BIT(1))
+#define RTW89_SET_FWCMD_CXINIT_MOD_RFE(cmd, val) \
+ u8p_replace_bits((u8 *)(cmd) + 6, val, GENMASK(7, 0))
+#define RTW89_SET_FWCMD_CXINIT_MOD_CV(cmd, val) \
+ u8p_replace_bits((u8 *)(cmd) + 7, val, GENMASK(7, 0))
+#define RTW89_SET_FWCMD_CXINIT_MOD_BT_SOLO(cmd, val) \
+ u8p_replace_bits((u8 *)(cmd) + 8, val, BIT(0))
+#define RTW89_SET_FWCMD_CXINIT_MOD_BT_POS(cmd, val) \
+ u8p_replace_bits((u8 *)(cmd) + 8, val, BIT(1))
+#define RTW89_SET_FWCMD_CXINIT_MOD_SW_TYPE(cmd, val) \
+ u8p_replace_bits((u8 *)(cmd) + 8, val, BIT(2))
+#define RTW89_SET_FWCMD_CXINIT_WL_GCH(cmd, val) \
+ u8p_replace_bits((u8 *)(cmd) + 10, val, GENMASK(7, 0))
+#define RTW89_SET_FWCMD_CXINIT_WL_ONLY(cmd, val) \
+ u8p_replace_bits((u8 *)(cmd) + 11, val, BIT(0))
+#define RTW89_SET_FWCMD_CXINIT_WL_INITOK(cmd, val) \
+ u8p_replace_bits((u8 *)(cmd) + 11, val, BIT(1))
+#define RTW89_SET_FWCMD_CXINIT_DBCC_EN(cmd, val) \
+ u8p_replace_bits((u8 *)(cmd) + 11, val, BIT(2))
+#define RTW89_SET_FWCMD_CXINIT_CX_OTHER(cmd, val) \
+ u8p_replace_bits((u8 *)(cmd) + 11, val, BIT(3))
+#define RTW89_SET_FWCMD_CXINIT_BT_ONLY(cmd, val) \
+ u8p_replace_bits((u8 *)(cmd) + 11, val, BIT(4))
+
+#define RTW89_SET_FWCMD_CXROLE_CONNECT_CNT(cmd, val) \
+ u8p_replace_bits((u8 *)(cmd) + 2, val, GENMASK(7, 0))
+#define RTW89_SET_FWCMD_CXROLE_LINK_MODE(cmd, val) \
+ u8p_replace_bits((u8 *)(cmd) + 3, val, GENMASK(7, 0))
+#define RTW89_SET_FWCMD_CXROLE_ROLE_NONE(cmd, val) \
+ le16p_replace_bits((__le16 *)((u8 *)(cmd) + 4), val, BIT(0))
+#define RTW89_SET_FWCMD_CXROLE_ROLE_STA(cmd, val) \
+ le16p_replace_bits((__le16 *)((u8 *)(cmd) + 4), val, BIT(1))
+#define RTW89_SET_FWCMD_CXROLE_ROLE_AP(cmd, val) \
+ le16p_replace_bits((__le16 *)((u8 *)(cmd) + 4), val, BIT(2))
+#define RTW89_SET_FWCMD_CXROLE_ROLE_VAP(cmd, val) \
+ le16p_replace_bits((__le16 *)((u8 *)(cmd) + 4), val, BIT(3))
+#define RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC(cmd, val) \
+ le16p_replace_bits((__le16 *)((u8 *)(cmd) + 4), val, BIT(4))
+#define RTW89_SET_FWCMD_CXROLE_ROLE_ADHOC_MASTER(cmd, val) \
+ le16p_replace_bits((__le16 *)((u8 *)(cmd) + 4), val, BIT(5))
+#define RTW89_SET_FWCMD_CXROLE_ROLE_MESH(cmd, val) \
+ le16p_replace_bits((__le16 *)((u8 *)(cmd) + 4), val, BIT(6))
+#define RTW89_SET_FWCMD_CXROLE_ROLE_MONITOR(cmd, val) \
+ le16p_replace_bits((__le16 *)((u8 *)(cmd) + 4), val, BIT(7))
+#define RTW89_SET_FWCMD_CXROLE_ROLE_P2P_DEV(cmd, val) \
+ le16p_replace_bits((__le16 *)((u8 *)(cmd) + 4), val, BIT(8))
+#define RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GC(cmd, val) \
+ le16p_replace_bits((__le16 *)((u8 *)(cmd) + 4), val, BIT(9))
+#define RTW89_SET_FWCMD_CXROLE_ROLE_P2P_GO(cmd, val) \
+ le16p_replace_bits((__le16 *)((u8 *)(cmd) + 4), val, BIT(10))
+#define RTW89_SET_FWCMD_CXROLE_ROLE_NAN(cmd, val) \
+ le16p_replace_bits((__le16 *)((u8 *)(cmd) + 4), val, BIT(11))
+#define RTW89_SET_FWCMD_CXROLE_ACT_CONNECTED(cmd, val, n) \
+ u8p_replace_bits((u8 *)(cmd) + (6 + 12 * (n)), val, BIT(0))
+#define RTW89_SET_FWCMD_CXROLE_ACT_PID(cmd, val, n) \
+ u8p_replace_bits((u8 *)(cmd) + (6 + 12 * (n)), val, GENMASK(3, 1))
+#define RTW89_SET_FWCMD_CXROLE_ACT_PHY(cmd, val, n) \
+ u8p_replace_bits((u8 *)(cmd) + (6 + 12 * (n)), val, BIT(4))
+#define RTW89_SET_FWCMD_CXROLE_ACT_NOA(cmd, val, n) \
+ u8p_replace_bits((u8 *)(cmd) + (6 + 12 * (n)), val, BIT(5))
+#define RTW89_SET_FWCMD_CXROLE_ACT_BAND(cmd, val, n) \
+ u8p_replace_bits((u8 *)(cmd) + (6 + 12 * (n)), val, GENMASK(7, 6))
+#define RTW89_SET_FWCMD_CXROLE_ACT_CLIENT_PS(cmd, val, n) \
+ u8p_replace_bits((u8 *)(cmd) + (7 + 12 * (n)), val, BIT(0))
+#define RTW89_SET_FWCMD_CXROLE_ACT_BW(cmd, val, n) \
+ u8p_replace_bits((u8 *)(cmd) + (7 + 12 * (n)), val, GENMASK(7, 1))
+#define RTW89_SET_FWCMD_CXROLE_ACT_ROLE(cmd, val, n) \
+ u8p_replace_bits((u8 *)(cmd) + (8 + 12 * (n)), val, GENMASK(7, 0))
+#define RTW89_SET_FWCMD_CXROLE_ACT_CH(cmd, val, n) \
+ u8p_replace_bits((u8 *)(cmd) + (9 + 12 * (n)), val, GENMASK(7, 0))
+#define RTW89_SET_FWCMD_CXROLE_ACT_TX_LVL(cmd, val, n) \
+ le16p_replace_bits((__le16 *)((u8 *)(cmd) + (10 + 12 * (n))), val, GENMASK(15, 0))
+#define RTW89_SET_FWCMD_CXROLE_ACT_RX_LVL(cmd, val, n) \
+ le16p_replace_bits((__le16 *)((u8 *)(cmd) + (12 + 12 * (n))), val, GENMASK(15, 0))
+#define RTW89_SET_FWCMD_CXROLE_ACT_TX_RATE(cmd, val, n) \
+ le16p_replace_bits((__le16 *)((u8 *)(cmd) + (14 + 12 * (n))), val, GENMASK(15, 0))
+#define RTW89_SET_FWCMD_CXROLE_ACT_RX_RATE(cmd, val, n) \
+ le16p_replace_bits((__le16 *)((u8 *)(cmd) + (16 + 12 * (n))), val, GENMASK(15, 0))
+
+#define RTW89_SET_FWCMD_CXCTRL_MANUAL(cmd, val) \
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 2), val, BIT(0))
+#define RTW89_SET_FWCMD_CXCTRL_IGNORE_BT(cmd, val) \
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 2), val, BIT(1))
+#define RTW89_SET_FWCMD_CXCTRL_ALWAYS_FREERUN(cmd, val) \
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 2), val, BIT(2))
+#define RTW89_SET_FWCMD_CXCTRL_TRACE_STEP(cmd, val) \
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 2), val, GENMASK(18, 3))
+
+#define RTW89_SET_FWCMD_CXRFK_STATE(cmd, val) \
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 2), val, GENMASK(1, 0))
+#define RTW89_SET_FWCMD_CXRFK_PATH_MAP(cmd, val) \
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 2), val, GENMASK(5, 2))
+#define RTW89_SET_FWCMD_CXRFK_PHY_MAP(cmd, val) \
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 2), val, GENMASK(7, 6))
+#define RTW89_SET_FWCMD_CXRFK_BAND(cmd, val) \
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 2), val, GENMASK(9, 8))
+#define RTW89_SET_FWCMD_CXRFK_TYPE(cmd, val) \
+ le32p_replace_bits((__le32 *)((u8 *)(cmd) + 2), val, GENMASK(17, 10))
+
+#define RTW89_C2H_HEADER_LEN 8
+
+#define RTW89_GET_C2H_CATEGORY(c2h) \
+ le32_get_bits(*((__le32 *)c2h), GENMASK(1, 0))
+#define RTW89_GET_C2H_CLASS(c2h) \
+ le32_get_bits(*((__le32 *)c2h), GENMASK(7, 2))
+#define RTW89_GET_C2H_FUNC(c2h) \
+ le32_get_bits(*((__le32 *)c2h), GENMASK(15, 8))
+#define RTW89_GET_C2H_LEN(c2h) \
+ le32_get_bits(*((__le32 *)(c2h) + 1), GENMASK(13, 0))
+
+#define RTW89_GET_C2H_LOG_SRT_PRT(c2h) (char *)((__le32 *)(c2h) + 2)
+#define RTW89_GET_C2H_LOG_LEN(len) ((len) - RTW89_C2H_HEADER_LEN)
+
+#define RTW89_GET_MAC_C2H_DONE_ACK_CAT(c2h) \
+ le32_get_bits(*((__le32 *)(c2h) + 2), GENMASK(1, 0))
+#define RTW89_GET_MAC_C2H_DONE_ACK_CLASS(c2h) \
+ le32_get_bits(*((__le32 *)(c2h) + 2), GENMASK(7, 2))
+#define RTW89_GET_MAC_C2H_DONE_ACK_FUNC(c2h) \
+ le32_get_bits(*((__le32 *)(c2h) + 2), GENMASK(15, 8))
+#define RTW89_GET_MAC_C2H_DONE_ACK_H2C_RETURN(c2h) \
+ le32_get_bits(*((__le32 *)(c2h) + 2), GENMASK(23, 16))
+#define RTW89_GET_MAC_C2H_DONE_ACK_H2C_SEQ(c2h) \
+ le32_get_bits(*((__le32 *)(c2h) + 2), GENMASK(31, 24))
+
+#define RTW89_GET_MAC_C2H_REV_ACK_CAT(c2h) \
+ le32_get_bits(*((__le32 *)(c2h) + 2), GENMASK(1, 0))
+#define RTW89_GET_MAC_C2H_REV_ACK_CLASS(c2h) \
+ le32_get_bits(*((__le32 *)(c2h) + 2), GENMASK(7, 2))
+#define RTW89_GET_MAC_C2H_REV_ACK_FUNC(c2h) \
+ le32_get_bits(*((__le32 *)(c2h) + 2), GENMASK(15, 8))
+#define RTW89_GET_MAC_C2H_REV_ACK_H2C_SEQ(c2h) \
+ le32_get_bits(*((__le32 *)(c2h) + 2), GENMASK(23, 16))
+
+#define RTW89_GET_PHY_C2H_RA_RPT_MACID(c2h) \
+ le32_get_bits(*((__le32 *)(c2h) + 2), GENMASK(15, 0))
+#define RTW89_GET_PHY_C2H_RA_RPT_RETRY_RATIO(c2h) \
+ le32_get_bits(*((__le32 *)(c2h) + 2), GENMASK(23, 16))
+#define RTW89_GET_PHY_C2H_RA_RPT_MCSNSS(c2h) \
+ le32_get_bits(*((__le32 *)(c2h) + 3), GENMASK(6, 0))
+#define RTW89_GET_PHY_C2H_RA_RPT_MD_SEL(c2h) \
+ le32_get_bits(*((__le32 *)(c2h) + 3), GENMASK(9, 8))
+#define RTW89_GET_PHY_C2H_RA_RPT_GILTF(c2h) \
+ le32_get_bits(*((__le32 *)(c2h) + 3), GENMASK(12, 10))
+#define RTW89_GET_PHY_C2H_RA_RPT_BW(c2h) \
+ le32_get_bits(*((__le32 *)(c2h) + 3), GENMASK(14, 13))
+
+/* VHT, HE, HT-old: [6:4]: NSS, [3:0]: MCS
+ * HT-new: [6:5]: NA, [4:0]: MCS
+ */
+#define RTW89_RA_RATE_MASK_NSS GENMASK(6, 4)
+#define RTW89_RA_RATE_MASK_MCS GENMASK(3, 0)
+#define RTW89_RA_RATE_MASK_HT_MCS GENMASK(4, 0)
+#define RTW89_MK_HT_RATE(nss, mcs) (FIELD_PREP(GENMASK(4, 3), nss) | \
+ FIELD_PREP(GENMASK(2, 0), mcs))
+
+#define RTW89_FW_HDR_SIZE 32
+#define RTW89_FW_SECTION_HDR_SIZE 16
+
+#define RTW89_MFW_SIG 0xFF
+
+struct rtw89_mfw_info {
+ u8 cv;
+ u8 type; /* enum rtw89_fw_type */
+ u8 mp;
+ u8 rsvd;
+ __le32 shift;
+ __le32 size;
+ u8 rsvd2[4];
+} __packed;
+
+struct rtw89_mfw_hdr {
+ u8 sig; /* RTW89_MFW_SIG */
+ u8 fw_nr;
+ u8 rsvd[14];
+ struct rtw89_mfw_info info[];
+} __packed;
+
+struct fwcmd_hdr {
+ __le32 hdr0;
+ __le32 hdr1;
+};
+
+#define RTW89_H2C_RF_PAGE_SIZE 500
+#define RTW89_H2C_RF_PAGE_NUM 3
+struct rtw89_fw_h2c_rf_reg_info {
+ enum rtw89_rf_path rf_path;
+ __le32 rtw89_phy_config_rf_h2c[RTW89_H2C_RF_PAGE_NUM][RTW89_H2C_RF_PAGE_SIZE];
+ u16 curr_idx;
+};
+
+#define H2C_SEC_CAM_LEN 24
+
+#define H2C_HEADER_LEN 8
+#define H2C_HDR_CAT GENMASK(1, 0)
+#define H2C_HDR_CLASS GENMASK(7, 2)
+#define H2C_HDR_FUNC GENMASK(15, 8)
+#define H2C_HDR_DEL_TYPE GENMASK(19, 16)
+#define H2C_HDR_H2C_SEQ GENMASK(31, 24)
+#define H2C_HDR_TOTAL_LEN GENMASK(13, 0)
+#define H2C_HDR_REC_ACK BIT(14)
+#define H2C_HDR_DONE_ACK BIT(15)
+
+#define FWCMD_TYPE_H2C 0
+
+#define H2C_CAT_MAC 0x1
+
+/* CLASS 0 - FW INFO */
+#define H2C_CL_FW_INFO 0x0
+#define H2C_FUNC_LOG_CFG 0x0
+#define H2C_FUNC_MAC_GENERAL_PKT 0x1
+
+/* CLASS 2 - PS */
+#define H2C_CL_MAC_PS 0x2
+#define H2C_FUNC_MAC_LPS_PARM 0x0
+
+/* CLASS 3 - FW download */
+#define H2C_CL_MAC_FWDL 0x3
+#define H2C_FUNC_MAC_FWHDR_DL 0x0
+
+/* CLASS 5 - Frame Exchange */
+#define H2C_CL_MAC_FR_EXCHG 0x5
+#define H2C_FUNC_MAC_CCTLINFO_UD 0x2
+
+/* CLASS 6 - Address CAM */
+#define H2C_CL_MAC_ADDR_CAM_UPDATE 0x6
+#define H2C_FUNC_MAC_ADDR_CAM_UPD 0x0
+
+/* CLASS 8 - Media Status Report */
+#define H2C_CL_MAC_MEDIA_RPT 0x8
+#define H2C_FUNC_MAC_JOININFO 0x0
+#define H2C_FUNC_MAC_FWROLE_MAINTAIN 0x4
+
+/* CLASS 9 - FW offload */
+#define H2C_CL_MAC_FW_OFLD 0x9
+#define H2C_FUNC_MAC_MACID_PAUSE 0x8
+#define H2C_FUNC_USR_EDCA 0xF
+#define H2C_FUNC_OFLD_CFG 0x14
+
+/* CLASS 10 - Security CAM */
+#define H2C_CL_MAC_SEC_CAM 0xa
+#define H2C_FUNC_MAC_SEC_UPD 0x1
+
+/* CLASS 12 - BA CAM */
+#define H2C_CL_BA_CAM 0xc
+#define H2C_FUNC_MAC_BA_CAM 0x0
+
+#define H2C_CAT_OUTSRC 0x2
+
+#define H2C_CL_OUTSRC_RA 0x1
+#define H2C_FUNC_OUTSRC_RA_MACIDCFG 0x0
+
+#define H2C_CL_OUTSRC_RF_REG_A 0x8
+#define H2C_CL_OUTSRC_RF_REG_B 0x9
+
+int rtw89_fw_check_rdy(struct rtw89_dev *rtwdev);
+int rtw89_fw_recognize(struct rtw89_dev *rtwdev);
+int rtw89_fw_download(struct rtw89_dev *rtwdev, enum rtw89_fw_type type);
+int rtw89_load_firmware(struct rtw89_dev *rtwdev);
+void rtw89_unload_firmware(struct rtw89_dev *rtwdev);
+int rtw89_wait_firmware_completion(struct rtw89_dev *rtwdev);
+void rtw89_h2c_pkt_set_hdr(struct rtw89_dev *rtwdev, struct sk_buff *skb,
+ u8 type, u8 cat, u8 class, u8 func,
+ bool rack, bool dack, u32 len);
+int rtw89_fw_h2c_default_cmac_tbl(struct rtw89_dev *rtwdev, u8 macid);
+int rtw89_fw_h2c_assoc_cmac_tbl(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+int rtw89_fw_h2c_txtime_cmac_tbl(struct rtw89_dev *rtwdev,
+ struct rtw89_sta *rtwsta);
+int rtw89_fw_h2c_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *vif);
+void rtw89_fw_c2h_irqsafe(struct rtw89_dev *rtwdev, struct sk_buff *c2h);
+void rtw89_fw_c2h_work(struct work_struct *work);
+int rtw89_fw_h2c_vif_maintain(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif,
+ enum rtw89_upd_mode upd_mode);
+int rtw89_fw_h2c_join_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ u8 dis_conn);
+int rtw89_fw_h2c_macid_pause(struct rtw89_dev *rtwdev, u8 sh, u8 grp,
+ bool pause);
+int rtw89_fw_h2c_set_edca(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ u8 ac, u32 val);
+int rtw89_fw_h2c_set_ofld_cfg(struct rtw89_dev *rtwdev);
+int rtw89_fw_h2c_ra(struct rtw89_dev *rtwdev, struct rtw89_ra_info *ra, bool csi);
+int rtw89_fw_h2c_cxdrv_init(struct rtw89_dev *rtwdev);
+int rtw89_fw_h2c_cxdrv_role(struct rtw89_dev *rtwdev);
+int rtw89_fw_h2c_cxdrv_ctrl(struct rtw89_dev *rtwdev);
+int rtw89_fw_h2c_cxdrv_rfk(struct rtw89_dev *rtwdev);
+int rtw89_fw_h2c_rf_reg(struct rtw89_dev *rtwdev,
+ struct rtw89_fw_h2c_rf_reg_info *info,
+ u16 len, u8 page);
+int rtw89_fw_h2c_raw_with_hdr(struct rtw89_dev *rtwdev,
+ u8 h2c_class, u8 h2c_func, u8 *buf, u16 len,
+ bool rack, bool dack);
+int rtw89_fw_h2c_raw(struct rtw89_dev *rtwdev, const u8 *buf, u16 len);
+void rtw89_fw_send_all_early_h2c(struct rtw89_dev *rtwdev);
+void rtw89_fw_free_all_early_h2c(struct rtw89_dev *rtwdev);
+int rtw89_fw_h2c_general_pkt(struct rtw89_dev *rtwdev, u8 macid);
+int rtw89_fw_h2c_ba_cam(struct rtw89_dev *rtwdev, bool valid, u8 macid,
+ struct ieee80211_ampdu_params *params);
+int rtw89_fw_h2c_lps_parm(struct rtw89_dev *rtwdev,
+ struct rtw89_lps_parm *lps_param);
+struct sk_buff *rtw89_fw_h2c_alloc_skb_with_hdr(u32 len);
+struct sk_buff *rtw89_fw_h2c_alloc_skb_no_hdr(u32 len);
+int rtw89_fw_msg_reg(struct rtw89_dev *rtwdev,
+ struct rtw89_mac_h2c_info *h2c_info,
+ struct rtw89_mac_c2h_info *c2h_info);
+int rtw89_fw_h2c_fw_log(struct rtw89_dev *rtwdev, bool enable);
+void rtw89_fw_st_dbg_dump(struct rtw89_dev *rtwdev);
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c
new file mode 100644
index 000000000000..afcd07ab1de7
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/mac.c
@@ -0,0 +1,3836 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2019-2020 Realtek Corporation
+ */
+
+#include "cam.h"
+#include "debug.h"
+#include "fw.h"
+#include "mac.h"
+#include "ps.h"
+#include "reg.h"
+#include "util.h"
+
+int rtw89_mac_check_mac_en(struct rtw89_dev *rtwdev, u8 mac_idx,
+ enum rtw89_mac_hwmod_sel sel)
+{
+ u32 val, r_val;
+
+ if (sel == RTW89_DMAC_SEL) {
+ r_val = rtw89_read32(rtwdev, R_AX_DMAC_FUNC_EN);
+ val = (B_AX_MAC_FUNC_EN | B_AX_DMAC_FUNC_EN);
+ } else if (sel == RTW89_CMAC_SEL && mac_idx == 0) {
+ r_val = rtw89_read32(rtwdev, R_AX_CMAC_FUNC_EN);
+ val = B_AX_CMAC_EN;
+ } else if (sel == RTW89_CMAC_SEL && mac_idx == 1) {
+ r_val = rtw89_read32(rtwdev, R_AX_SYS_ISO_CTRL_EXTEND);
+ val = B_AX_CMAC1_FEN;
+ } else {
+ return -EINVAL;
+ }
+ if (r_val == RTW89_R32_EA || r_val == RTW89_R32_DEAD ||
+ (val & r_val) != val)
+ return -EFAULT;
+
+ return 0;
+}
+
+int rtw89_mac_write_lte(struct rtw89_dev *rtwdev, const u32 offset, u32 val)
+{
+ u8 lte_ctrl;
+ int ret;
+
+ ret = read_poll_timeout(rtw89_read8, lte_ctrl, (lte_ctrl & BIT(5)) != 0,
+ 50, 50000, false, rtwdev, R_AX_LTE_CTRL + 3);
+ if (ret)
+ rtw89_err(rtwdev, "[ERR]lte not ready(W)\n");
+
+ rtw89_write32(rtwdev, R_AX_LTE_WDATA, val);
+ rtw89_write32(rtwdev, R_AX_LTE_CTRL, 0xC00F0000 | offset);
+
+ return ret;
+}
+
+int rtw89_mac_read_lte(struct rtw89_dev *rtwdev, const u32 offset, u32 *val)
+{
+ u8 lte_ctrl;
+ int ret;
+
+ ret = read_poll_timeout(rtw89_read8, lte_ctrl, (lte_ctrl & BIT(5)) != 0,
+ 50, 50000, false, rtwdev, R_AX_LTE_CTRL + 3);
+ if (ret)
+ rtw89_err(rtwdev, "[ERR]lte not ready(W)\n");
+
+ rtw89_write32(rtwdev, R_AX_LTE_CTRL, 0x800F0000 | offset);
+ *val = rtw89_read32(rtwdev, R_AX_LTE_RDATA);
+
+ return ret;
+}
+
+static
+int dle_dfi_ctrl(struct rtw89_dev *rtwdev, struct rtw89_mac_dle_dfi_ctrl *ctrl)
+{
+ u32 ctrl_reg, data_reg, ctrl_data;
+ u32 val;
+ int ret;
+
+ switch (ctrl->type) {
+ case DLE_CTRL_TYPE_WDE:
+ ctrl_reg = R_AX_WDE_DBG_FUN_INTF_CTL;
+ data_reg = R_AX_WDE_DBG_FUN_INTF_DATA;
+ ctrl_data = FIELD_PREP(B_AX_WDE_DFI_TRGSEL_MASK, ctrl->target) |
+ FIELD_PREP(B_AX_WDE_DFI_ADDR_MASK, ctrl->addr) |
+ B_AX_WDE_DFI_ACTIVE;
+ break;
+ case DLE_CTRL_TYPE_PLE:
+ ctrl_reg = R_AX_PLE_DBG_FUN_INTF_CTL;
+ data_reg = R_AX_PLE_DBG_FUN_INTF_DATA;
+ ctrl_data = FIELD_PREP(B_AX_PLE_DFI_TRGSEL_MASK, ctrl->target) |
+ FIELD_PREP(B_AX_PLE_DFI_ADDR_MASK, ctrl->addr) |
+ B_AX_PLE_DFI_ACTIVE;
+ break;
+ default:
+ rtw89_warn(rtwdev, "[ERR] dfi ctrl type %d\n", ctrl->type);
+ return -EINVAL;
+ }
+
+ rtw89_write32(rtwdev, ctrl_reg, ctrl_data);
+
+ ret = read_poll_timeout_atomic(rtw89_read32, val, !(val & B_AX_WDE_DFI_ACTIVE),
+ 1, 1000, false, rtwdev, ctrl_reg);
+ if (ret) {
+ rtw89_warn(rtwdev, "[ERR] dle dfi ctrl 0x%X set 0x%X timeout\n",
+ ctrl_reg, ctrl_data);
+ return ret;
+ }
+
+ ctrl->out_data = rtw89_read32(rtwdev, data_reg);
+ return 0;
+}
+
+static int dle_dfi_quota(struct rtw89_dev *rtwdev,
+ struct rtw89_mac_dle_dfi_quota *quota)
+{
+ struct rtw89_mac_dle_dfi_ctrl ctrl;
+ int ret;
+
+ ctrl.type = quota->dle_type;
+ ctrl.target = DLE_DFI_TYPE_QUOTA;
+ ctrl.addr = quota->qtaid;
+ ret = dle_dfi_ctrl(rtwdev, &ctrl);
+ if (ret) {
+ rtw89_warn(rtwdev, "[ERR]dle_dfi_ctrl %d\n", ret);
+ return ret;
+ }
+
+ quota->rsv_pgnum = FIELD_GET(B_AX_DLE_RSV_PGNUM, ctrl.out_data);
+ quota->use_pgnum = FIELD_GET(B_AX_DLE_USE_PGNUM, ctrl.out_data);
+ return 0;
+}
+
+static int dle_dfi_qempty(struct rtw89_dev *rtwdev,
+ struct rtw89_mac_dle_dfi_qempty *qempty)
+{
+ struct rtw89_mac_dle_dfi_ctrl ctrl;
+ u32 ret;
+
+ ctrl.type = qempty->dle_type;
+ ctrl.target = DLE_DFI_TYPE_QEMPTY;
+ ctrl.addr = qempty->grpsel;
+ ret = dle_dfi_ctrl(rtwdev, &ctrl);
+ if (ret) {
+ rtw89_warn(rtwdev, "[ERR]dle_dfi_ctrl %d\n", ret);
+ return ret;
+ }
+
+ qempty->qempty = FIELD_GET(B_AX_DLE_QEMPTY_GRP, ctrl.out_data);
+ return 0;
+}
+
+static void dump_err_status_dispatcher(struct rtw89_dev *rtwdev)
+{
+ rtw89_info(rtwdev, "R_AX_HOST_DISPATCHER_ALWAYS_IMR=0x%08x ",
+ rtw89_read32(rtwdev, R_AX_HOST_DISPATCHER_ERR_IMR));
+ rtw89_info(rtwdev, "R_AX_HOST_DISPATCHER_ALWAYS_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_HOST_DISPATCHER_ERR_ISR));
+ rtw89_info(rtwdev, "R_AX_CPU_DISPATCHER_ALWAYS_IMR=0x%08x ",
+ rtw89_read32(rtwdev, R_AX_CPU_DISPATCHER_ERR_IMR));
+ rtw89_info(rtwdev, "R_AX_CPU_DISPATCHER_ALWAYS_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_CPU_DISPATCHER_ERR_ISR));
+ rtw89_info(rtwdev, "R_AX_OTHER_DISPATCHER_ALWAYS_IMR=0x%08x ",
+ rtw89_read32(rtwdev, R_AX_OTHER_DISPATCHER_ERR_IMR));
+ rtw89_info(rtwdev, "R_AX_OTHER_DISPATCHER_ALWAYS_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_OTHER_DISPATCHER_ERR_ISR));
+}
+
+static void rtw89_mac_dump_qta_lost(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_mac_dle_dfi_qempty qempty;
+ struct rtw89_mac_dle_dfi_quota quota;
+ struct rtw89_mac_dle_dfi_ctrl ctrl;
+ u32 val, not_empty, i;
+ int ret;
+
+ qempty.dle_type = DLE_CTRL_TYPE_PLE;
+ qempty.grpsel = 0;
+ ret = dle_dfi_qempty(rtwdev, &qempty);
+ if (ret)
+ rtw89_warn(rtwdev, "%s: query DLE fail\n", __func__);
+ else
+ rtw89_info(rtwdev, "DLE group0 empty: 0x%x\n", qempty.qempty);
+
+ for (not_empty = ~qempty.qempty, i = 0; not_empty != 0; not_empty >>= 1, i++) {
+ if (!(not_empty & BIT(0)))
+ continue;
+ ctrl.type = DLE_CTRL_TYPE_PLE;
+ ctrl.target = DLE_DFI_TYPE_QLNKTBL;
+ ctrl.addr = (QLNKTBL_ADDR_INFO_SEL_0 ? QLNKTBL_ADDR_INFO_SEL : 0) |
+ FIELD_PREP(QLNKTBL_ADDR_TBL_IDX_MASK, i);
+ ret = dle_dfi_ctrl(rtwdev, &ctrl);
+ if (ret)
+ rtw89_warn(rtwdev, "%s: query DLE fail\n", __func__);
+ else
+ rtw89_info(rtwdev, "qidx%d pktcnt = %ld\n", i,
+ FIELD_GET(QLNKTBL_DATA_SEL1_PKT_CNT_MASK,
+ ctrl.out_data));
+ }
+
+ quota.dle_type = DLE_CTRL_TYPE_PLE;
+ quota.qtaid = 6;
+ ret = dle_dfi_quota(rtwdev, &quota);
+ if (ret)
+ rtw89_warn(rtwdev, "%s: query DLE fail\n", __func__);
+ else
+ rtw89_info(rtwdev, "quota6 rsv/use: 0x%x/0x%x\n",
+ quota.rsv_pgnum, quota.use_pgnum);
+
+ val = rtw89_read32(rtwdev, R_AX_PLE_QTA6_CFG);
+ rtw89_info(rtwdev, "[PLE][CMAC0_RX]min_pgnum=0x%lx\n",
+ FIELD_GET(B_AX_PLE_Q6_MIN_SIZE_MASK, val));
+ rtw89_info(rtwdev, "[PLE][CMAC0_RX]max_pgnum=0x%lx\n",
+ FIELD_GET(B_AX_PLE_Q6_MAX_SIZE_MASK, val));
+
+ dump_err_status_dispatcher(rtwdev);
+}
+
+static void rtw89_mac_dump_l0_to_l1(struct rtw89_dev *rtwdev,
+ enum mac_ax_err_info err)
+{
+ u32 dbg, event;
+
+ dbg = rtw89_read32(rtwdev, R_AX_SER_DBG_INFO);
+ event = FIELD_GET(B_AX_L0_TO_L1_EVENT_MASK, dbg);
+
+ switch (event) {
+ case MAC_AX_L0_TO_L1_RX_QTA_LOST:
+ rtw89_info(rtwdev, "quota lost!\n");
+ rtw89_mac_dump_qta_lost(rtwdev);
+ break;
+ default:
+ break;
+ }
+}
+
+static void rtw89_mac_dump_err_status(struct rtw89_dev *rtwdev,
+ enum mac_ax_err_info err)
+{
+ u32 dmac_err, cmac_err;
+
+ if (err != MAC_AX_ERR_L1_ERR_DMAC &&
+ err != MAC_AX_ERR_L0_PROMOTE_TO_L1)
+ return;
+
+ rtw89_info(rtwdev, "--->\nerr=0x%x\n", err);
+ rtw89_info(rtwdev, "R_AX_SER_DBG_INFO =0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SER_DBG_INFO));
+
+ cmac_err = rtw89_read32(rtwdev, R_AX_CMAC_ERR_ISR);
+ rtw89_info(rtwdev, "R_AX_CMAC_ERR_ISR =0x%08x\n", cmac_err);
+ dmac_err = rtw89_read32(rtwdev, R_AX_DMAC_ERR_ISR);
+ rtw89_info(rtwdev, "R_AX_DMAC_ERR_ISR =0x%08x\n", dmac_err);
+
+ if (dmac_err) {
+ rtw89_info(rtwdev, "R_AX_WDE_ERR_FLAG_CFG =0x%08x ",
+ rtw89_read32(rtwdev, R_AX_WDE_ERR_FLAG_CFG));
+ rtw89_info(rtwdev, "R_AX_PLE_ERR_FLAG_CFG =0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_PLE_ERR_FLAG_CFG));
+ }
+
+ if (dmac_err & B_AX_WDRLS_ERR_FLAG) {
+ rtw89_info(rtwdev, "R_AX_WDRLS_ERR_IMR =0x%08x ",
+ rtw89_read32(rtwdev, R_AX_WDRLS_ERR_IMR));
+ rtw89_info(rtwdev, "R_AX_WDRLS_ERR_ISR =0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_WDRLS_ERR_ISR));
+ }
+
+ if (dmac_err & B_AX_WSEC_ERR_FLAG) {
+ rtw89_info(rtwdev, "R_AX_SEC_ERR_IMR_ISR =0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_DEBUG));
+ rtw89_info(rtwdev, "SEC_local_Register 0x9D00 =0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_ENG_CTRL));
+ rtw89_info(rtwdev, "SEC_local_Register 0x9D04 =0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_MPDU_PROC));
+ rtw89_info(rtwdev, "SEC_local_Register 0x9D10 =0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_CAM_ACCESS));
+ rtw89_info(rtwdev, "SEC_local_Register 0x9D14 =0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_CAM_RDATA));
+ rtw89_info(rtwdev, "SEC_local_Register 0x9D18 =0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_CAM_WDATA));
+ rtw89_info(rtwdev, "SEC_local_Register 0x9D20 =0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_TX_DEBUG));
+ rtw89_info(rtwdev, "SEC_local_Register 0x9D24 =0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_RX_DEBUG));
+ rtw89_info(rtwdev, "SEC_local_Register 0x9D28 =0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_TRX_PKT_CNT));
+ rtw89_info(rtwdev, "SEC_local_Register 0x9D2C =0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_SEC_TRX_BLK_CNT));
+ }
+
+ if (dmac_err & B_AX_MPDU_ERR_FLAG) {
+ rtw89_info(rtwdev, "R_AX_MPDU_TX_ERR_IMR =0x%08x ",
+ rtw89_read32(rtwdev, R_AX_MPDU_TX_ERR_IMR));
+ rtw89_info(rtwdev, "R_AX_MPDU_TX_ERR_ISR =0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_MPDU_TX_ERR_ISR));
+ rtw89_info(rtwdev, "R_AX_MPDU_RX_ERR_IMR =0x%08x ",
+ rtw89_read32(rtwdev, R_AX_MPDU_RX_ERR_IMR));
+ rtw89_info(rtwdev, "R_AX_MPDU_RX_ERR_ISR =0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_MPDU_RX_ERR_ISR));
+ }
+
+ if (dmac_err & B_AX_STA_SCHEDULER_ERR_FLAG) {
+ rtw89_info(rtwdev, "R_AX_STA_SCHEDULER_ERR_IMR =0x%08x ",
+ rtw89_read32(rtwdev, R_AX_STA_SCHEDULER_ERR_IMR));
+ rtw89_info(rtwdev, "R_AX_STA_SCHEDULER_ERR_ISR= 0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_STA_SCHEDULER_ERR_ISR));
+ }
+
+ if (dmac_err & B_AX_WDE_DLE_ERR_FLAG) {
+ rtw89_info(rtwdev, "R_AX_WDE_ERR_IMR=0x%08x ",
+ rtw89_read32(rtwdev, R_AX_WDE_ERR_IMR));
+ rtw89_info(rtwdev, "R_AX_WDE_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_WDE_ERR_ISR));
+ rtw89_info(rtwdev, "R_AX_PLE_ERR_IMR=0x%08x ",
+ rtw89_read32(rtwdev, R_AX_PLE_ERR_IMR));
+ rtw89_info(rtwdev, "R_AX_PLE_ERR_FLAG_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_PLE_ERR_FLAG_ISR));
+ dump_err_status_dispatcher(rtwdev);
+ }
+
+ if (dmac_err & B_AX_TXPKTCTRL_ERR_FLAG) {
+ rtw89_info(rtwdev, "R_AX_TXPKTCTL_ERR_IMR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_TXPKTCTL_ERR_IMR_ISR));
+ rtw89_info(rtwdev, "R_AX_TXPKTCTL_ERR_IMR_ISR_B1=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_TXPKTCTL_ERR_IMR_ISR_B1));
+ }
+
+ if (dmac_err & B_AX_PLE_DLE_ERR_FLAG) {
+ rtw89_info(rtwdev, "R_AX_WDE_ERR_IMR=0x%08x ",
+ rtw89_read32(rtwdev, R_AX_WDE_ERR_IMR));
+ rtw89_info(rtwdev, "R_AX_WDE_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_WDE_ERR_ISR));
+ rtw89_info(rtwdev, "R_AX_PLE_ERR_IMR=0x%08x ",
+ rtw89_read32(rtwdev, R_AX_PLE_ERR_IMR));
+ rtw89_info(rtwdev, "R_AX_PLE_ERR_FLAG_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_PLE_ERR_FLAG_ISR));
+ rtw89_info(rtwdev, "R_AX_WD_CPUQ_OP_0=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_WD_CPUQ_OP_0));
+ rtw89_info(rtwdev, "R_AX_WD_CPUQ_OP_1=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_WD_CPUQ_OP_1));
+ rtw89_info(rtwdev, "R_AX_WD_CPUQ_OP_2=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_WD_CPUQ_OP_2));
+ rtw89_info(rtwdev, "R_AX_WD_CPUQ_OP_STATUS=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_WD_CPUQ_OP_STATUS));
+ rtw89_info(rtwdev, "R_AX_PL_CPUQ_OP_0=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_PL_CPUQ_OP_0));
+ rtw89_info(rtwdev, "R_AX_PL_CPUQ_OP_1=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_PL_CPUQ_OP_1));
+ rtw89_info(rtwdev, "R_AX_PL_CPUQ_OP_2=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_PL_CPUQ_OP_2));
+ rtw89_info(rtwdev, "R_AX_PL_CPUQ_OP_STATUS=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_PL_CPUQ_OP_STATUS));
+ rtw89_info(rtwdev, "R_AX_RXDMA_PKT_INFO_0=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_RXDMA_PKT_INFO_0));
+ rtw89_info(rtwdev, "R_AX_RXDMA_PKT_INFO_1=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_RXDMA_PKT_INFO_1));
+ rtw89_info(rtwdev, "R_AX_RXDMA_PKT_INFO_2=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_RXDMA_PKT_INFO_2));
+ dump_err_status_dispatcher(rtwdev);
+ }
+
+ if (dmac_err & B_AX_PKTIN_ERR_FLAG) {
+ rtw89_info(rtwdev, "R_AX_PKTIN_ERR_IMR =0x%08x ",
+ rtw89_read32(rtwdev, R_AX_PKTIN_ERR_IMR));
+ rtw89_info(rtwdev, "R_AX_PKTIN_ERR_ISR =0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_PKTIN_ERR_ISR));
+ rtw89_info(rtwdev, "R_AX_PKTIN_ERR_IMR =0x%08x ",
+ rtw89_read32(rtwdev, R_AX_PKTIN_ERR_IMR));
+ rtw89_info(rtwdev, "R_AX_PKTIN_ERR_ISR =0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_PKTIN_ERR_ISR));
+ }
+
+ if (dmac_err & B_AX_DISPATCH_ERR_FLAG)
+ dump_err_status_dispatcher(rtwdev);
+
+ if (dmac_err & B_AX_DLE_CPUIO_ERR_FLAG) {
+ rtw89_info(rtwdev, "R_AX_CPUIO_ERR_IMR=0x%08x ",
+ rtw89_read32(rtwdev, R_AX_CPUIO_ERR_IMR));
+ rtw89_info(rtwdev, "R_AX_CPUIO_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_CPUIO_ERR_ISR));
+ }
+
+ if (dmac_err & BIT(11)) {
+ rtw89_info(rtwdev, "R_AX_BBRPT_COM_ERR_IMR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_BBRPT_COM_ERR_IMR_ISR));
+ }
+
+ if (cmac_err & B_AX_SCHEDULE_TOP_ERR_IND) {
+ rtw89_info(rtwdev, "R_AX_SCHEDULE_ERR_IMR=0x%08x ",
+ rtw89_read32(rtwdev, R_AX_SCHEDULE_ERR_IMR));
+ rtw89_info(rtwdev, "R_AX_SCHEDULE_ERR_ISR=0x%04x\n",
+ rtw89_read16(rtwdev, R_AX_SCHEDULE_ERR_ISR));
+ }
+
+ if (cmac_err & B_AX_PTCL_TOP_ERR_IND) {
+ rtw89_info(rtwdev, "R_AX_PTCL_IMR0=0x%08x ",
+ rtw89_read32(rtwdev, R_AX_PTCL_IMR0));
+ rtw89_info(rtwdev, "R_AX_PTCL_ISR0=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_PTCL_ISR0));
+ }
+
+ if (cmac_err & B_AX_DMA_TOP_ERR_IND) {
+ rtw89_info(rtwdev, "R_AX_DLE_CTRL=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_DLE_CTRL));
+ }
+
+ if (cmac_err & B_AX_PHYINTF_ERR_IND) {
+ rtw89_info(rtwdev, "R_AX_PHYINFO_ERR_IMR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_PHYINFO_ERR_IMR));
+ }
+
+ if (cmac_err & B_AX_TXPWR_CTRL_ERR_IND) {
+ rtw89_info(rtwdev, "R_AX_TXPWR_IMR=0x%08x ",
+ rtw89_read32(rtwdev, R_AX_TXPWR_IMR));
+ rtw89_info(rtwdev, "R_AX_TXPWR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_TXPWR_ISR));
+ }
+
+ if (cmac_err & B_AX_WMAC_RX_ERR_IND) {
+ rtw89_info(rtwdev, "R_AX_DBGSEL_TRXPTCL=0x%08x ",
+ rtw89_read32(rtwdev, R_AX_DBGSEL_TRXPTCL));
+ rtw89_info(rtwdev, "R_AX_PHYINFO_ERR_ISR=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_PHYINFO_ERR_ISR));
+ }
+
+ if (cmac_err & B_AX_WMAC_TX_ERR_IND) {
+ rtw89_info(rtwdev, "R_AX_TMAC_ERR_IMR_ISR=0x%08x ",
+ rtw89_read32(rtwdev, R_AX_TMAC_ERR_IMR_ISR));
+ rtw89_info(rtwdev, "R_AX_DBGSEL_TRXPTCL=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_DBGSEL_TRXPTCL));
+ }
+
+ rtwdev->hci.ops->dump_err_status(rtwdev);
+
+ if (err == MAC_AX_ERR_L0_PROMOTE_TO_L1)
+ rtw89_mac_dump_l0_to_l1(rtwdev, err);
+
+ rtw89_info(rtwdev, "<---\n");
+}
+
+u32 rtw89_mac_get_err_status(struct rtw89_dev *rtwdev)
+{
+ u32 err;
+ int ret;
+
+ ret = read_poll_timeout(rtw89_read32, err, (err != 0), 1000, 100000,
+ false, rtwdev, R_AX_HALT_C2H_CTRL);
+ if (ret) {
+ rtw89_warn(rtwdev, "Polling FW err status fail\n");
+ return ret;
+ }
+
+ err = rtw89_read32(rtwdev, R_AX_HALT_C2H);
+ rtw89_write32(rtwdev, R_AX_HALT_C2H_CTRL, 0);
+
+ rtw89_fw_st_dbg_dump(rtwdev);
+ rtw89_mac_dump_err_status(rtwdev, err);
+
+ return err;
+}
+EXPORT_SYMBOL(rtw89_mac_get_err_status);
+
+int rtw89_mac_set_err_status(struct rtw89_dev *rtwdev, u32 err)
+{
+ u32 halt;
+ int ret = 0;
+
+ if (err > MAC_AX_SET_ERR_MAX) {
+ rtw89_err(rtwdev, "Bad set-err-status value 0x%08x\n", err);
+ return -EINVAL;
+ }
+
+ ret = read_poll_timeout(rtw89_read32, halt, (halt == 0x0), 1000,
+ 100000, false, rtwdev, R_AX_HALT_H2C_CTRL);
+ if (ret) {
+ rtw89_err(rtwdev, "FW doesn't receive previous msg\n");
+ return -EFAULT;
+ }
+
+ rtw89_write32(rtwdev, R_AX_HALT_H2C, err);
+ rtw89_write32(rtwdev, R_AX_HALT_H2C_CTRL, B_AX_HALT_H2C_TRIGGER);
+
+ return 0;
+}
+EXPORT_SYMBOL(rtw89_mac_set_err_status);
+
+const struct rtw89_hfc_prec_cfg rtw_hfc_preccfg_pcie = {
+ 2, 40, 0, 0, 1, 0, 0, 0
+};
+
+static int hfc_reset_param(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param;
+ struct rtw89_hfc_param_ini param_ini = {NULL};
+ u8 qta_mode = rtwdev->mac.dle_info.qta_mode;
+
+ switch (rtwdev->hci.type) {
+ case RTW89_HCI_TYPE_PCIE:
+ param_ini = rtwdev->chip->hfc_param_ini[qta_mode];
+ param->en = 0;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (param_ini.pub_cfg)
+ param->pub_cfg = *param_ini.pub_cfg;
+
+ if (param_ini.prec_cfg) {
+ param->prec_cfg = *param_ini.prec_cfg;
+ rtwdev->hal.sw_amsdu_max_size =
+ param->prec_cfg.wp_ch07_prec * HFC_PAGE_UNIT;
+ }
+
+ if (param_ini.ch_cfg)
+ param->ch_cfg = param_ini.ch_cfg;
+
+ memset(&param->ch_info, 0, sizeof(param->ch_info));
+ memset(&param->pub_info, 0, sizeof(param->pub_info));
+ param->mode = param_ini.mode;
+
+ return 0;
+}
+
+static int hfc_ch_cfg_chk(struct rtw89_dev *rtwdev, u8 ch)
+{
+ struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param;
+ const struct rtw89_hfc_ch_cfg *ch_cfg = param->ch_cfg;
+ const struct rtw89_hfc_pub_cfg *pub_cfg = &param->pub_cfg;
+ const struct rtw89_hfc_prec_cfg *prec_cfg = &param->prec_cfg;
+
+ if (ch >= RTW89_DMA_CH_NUM)
+ return -EINVAL;
+
+ if ((ch_cfg[ch].min && ch_cfg[ch].min < prec_cfg->ch011_prec) ||
+ ch_cfg[ch].max > pub_cfg->pub_max)
+ return -EINVAL;
+ if (ch_cfg[ch].grp >= grp_num)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int hfc_pub_info_chk(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param;
+ const struct rtw89_hfc_pub_cfg *cfg = &param->pub_cfg;
+ struct rtw89_hfc_pub_info *info = &param->pub_info;
+
+ if (info->g0_used + info->g1_used + info->pub_aval != cfg->pub_max) {
+ if (rtwdev->chip->chip_id == RTL8852A)
+ return 0;
+ else
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int hfc_pub_cfg_chk(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param;
+ const struct rtw89_hfc_pub_cfg *pub_cfg = &param->pub_cfg;
+
+ if (pub_cfg->grp0 + pub_cfg->grp1 != pub_cfg->pub_max)
+ return -EFAULT;
+
+ return 0;
+}
+
+static int hfc_ch_ctrl(struct rtw89_dev *rtwdev, u8 ch)
+{
+ struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param;
+ const struct rtw89_hfc_ch_cfg *cfg = param->ch_cfg;
+ int ret = 0;
+ u32 val = 0;
+
+ ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL);
+ if (ret)
+ return ret;
+
+ ret = hfc_ch_cfg_chk(rtwdev, ch);
+ if (ret)
+ return ret;
+
+ if (ch > RTW89_DMA_B1HI)
+ return -EINVAL;
+
+ val = u32_encode_bits(cfg[ch].min, B_AX_MIN_PG_MASK) |
+ u32_encode_bits(cfg[ch].max, B_AX_MAX_PG_MASK) |
+ (cfg[ch].grp ? B_AX_GRP : 0);
+ rtw89_write32(rtwdev, R_AX_ACH0_PAGE_CTRL + ch * 4, val);
+
+ return 0;
+}
+
+static int hfc_upd_ch_info(struct rtw89_dev *rtwdev, u8 ch)
+{
+ struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param;
+ struct rtw89_hfc_ch_info *info = param->ch_info;
+ const struct rtw89_hfc_ch_cfg *cfg = param->ch_cfg;
+ u32 val;
+ u32 ret;
+
+ ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL);
+ if (ret)
+ return ret;
+
+ if (ch > RTW89_DMA_H2C)
+ return -EINVAL;
+
+ val = rtw89_read32(rtwdev, R_AX_ACH0_PAGE_INFO + ch * 4);
+ info[ch].aval = u32_get_bits(val, B_AX_AVAL_PG_MASK);
+ if (ch < RTW89_DMA_H2C)
+ info[ch].used = u32_get_bits(val, B_AX_USE_PG_MASK);
+ else
+ info[ch].used = cfg[ch].min - info[ch].aval;
+
+ return 0;
+}
+
+static int hfc_pub_ctrl(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_hfc_pub_cfg *cfg = &rtwdev->mac.hfc_param.pub_cfg;
+ u32 val;
+ int ret;
+
+ ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL);
+ if (ret)
+ return ret;
+
+ ret = hfc_pub_cfg_chk(rtwdev);
+ if (ret)
+ return ret;
+
+ val = u32_encode_bits(cfg->grp0, B_AX_PUBPG_G0_MASK) |
+ u32_encode_bits(cfg->grp1, B_AX_PUBPG_G1_MASK);
+ rtw89_write32(rtwdev, R_AX_PUB_PAGE_CTRL1, val);
+
+ val = u32_encode_bits(cfg->wp_thrd, B_AX_WP_THRD_MASK);
+ rtw89_write32(rtwdev, R_AX_WP_PAGE_CTRL2, val);
+
+ return 0;
+}
+
+static int hfc_upd_mix_info(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param;
+ struct rtw89_hfc_pub_cfg *pub_cfg = &param->pub_cfg;
+ struct rtw89_hfc_prec_cfg *prec_cfg = &param->prec_cfg;
+ struct rtw89_hfc_pub_info *info = &param->pub_info;
+ u32 val;
+ int ret;
+
+ ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL);
+ if (ret)
+ return ret;
+
+ val = rtw89_read32(rtwdev, R_AX_PUB_PAGE_INFO1);
+ info->g0_used = u32_get_bits(val, B_AX_G0_USE_PG_MASK);
+ info->g1_used = u32_get_bits(val, B_AX_G1_USE_PG_MASK);
+ val = rtw89_read32(rtwdev, R_AX_PUB_PAGE_INFO3);
+ info->g0_aval = u32_get_bits(val, B_AX_G0_AVAL_PG_MASK);
+ info->g1_aval = u32_get_bits(val, B_AX_G1_AVAL_PG_MASK);
+ info->pub_aval =
+ u32_get_bits(rtw89_read32(rtwdev, R_AX_PUB_PAGE_INFO2),
+ B_AX_PUB_AVAL_PG_MASK);
+ info->wp_aval =
+ u32_get_bits(rtw89_read32(rtwdev, R_AX_WP_PAGE_INFO1),
+ B_AX_WP_AVAL_PG_MASK);
+
+ val = rtw89_read32(rtwdev, R_AX_HCI_FC_CTRL);
+ param->en = val & B_AX_HCI_FC_EN ? 1 : 0;
+ param->h2c_en = val & B_AX_HCI_FC_CH12_EN ? 1 : 0;
+ param->mode = u32_get_bits(val, B_AX_HCI_FC_MODE_MASK);
+ prec_cfg->ch011_full_cond =
+ u32_get_bits(val, B_AX_HCI_FC_WD_FULL_COND_MASK);
+ prec_cfg->h2c_full_cond =
+ u32_get_bits(val, B_AX_HCI_FC_CH12_FULL_COND_MASK);
+ prec_cfg->wp_ch07_full_cond =
+ u32_get_bits(val, B_AX_HCI_FC_WP_CH07_FULL_COND_MASK);
+ prec_cfg->wp_ch811_full_cond =
+ u32_get_bits(val, B_AX_HCI_FC_WP_CH811_FULL_COND_MASK);
+
+ val = rtw89_read32(rtwdev, R_AX_CH_PAGE_CTRL);
+ prec_cfg->ch011_prec = u32_get_bits(val, B_AX_PREC_PAGE_CH011_MASK);
+ prec_cfg->h2c_prec = u32_get_bits(val, B_AX_PREC_PAGE_CH12_MASK);
+
+ val = rtw89_read32(rtwdev, R_AX_PUB_PAGE_CTRL2);
+ pub_cfg->pub_max = u32_get_bits(val, B_AX_PUBPG_ALL_MASK);
+
+ val = rtw89_read32(rtwdev, R_AX_WP_PAGE_CTRL1);
+ prec_cfg->wp_ch07_prec = u32_get_bits(val, B_AX_PREC_PAGE_WP_CH07_MASK);
+ prec_cfg->wp_ch811_prec = u32_get_bits(val, B_AX_PREC_PAGE_WP_CH811_MASK);
+
+ val = rtw89_read32(rtwdev, R_AX_WP_PAGE_CTRL2);
+ pub_cfg->wp_thrd = u32_get_bits(val, B_AX_WP_THRD_MASK);
+
+ val = rtw89_read32(rtwdev, R_AX_PUB_PAGE_CTRL1);
+ pub_cfg->grp0 = u32_get_bits(val, B_AX_PUBPG_G0_MASK);
+ pub_cfg->grp1 = u32_get_bits(val, B_AX_PUBPG_G1_MASK);
+
+ ret = hfc_pub_info_chk(rtwdev);
+ if (param->en && ret)
+ return ret;
+
+ return 0;
+}
+
+static void hfc_h2c_cfg(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param;
+ const struct rtw89_hfc_prec_cfg *prec_cfg = &param->prec_cfg;
+ u32 val;
+
+ val = u32_encode_bits(prec_cfg->h2c_prec, B_AX_PREC_PAGE_CH12_MASK);
+ rtw89_write32(rtwdev, R_AX_CH_PAGE_CTRL, val);
+
+ rtw89_write32_mask(rtwdev, R_AX_HCI_FC_CTRL,
+ B_AX_HCI_FC_CH12_FULL_COND_MASK,
+ prec_cfg->h2c_full_cond);
+}
+
+static void hfc_mix_cfg(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param;
+ const struct rtw89_hfc_pub_cfg *pub_cfg = &param->pub_cfg;
+ const struct rtw89_hfc_prec_cfg *prec_cfg = &param->prec_cfg;
+ u32 val;
+
+ val = u32_encode_bits(prec_cfg->ch011_prec, B_AX_PREC_PAGE_CH011_MASK) |
+ u32_encode_bits(prec_cfg->h2c_prec, B_AX_PREC_PAGE_CH12_MASK);
+ rtw89_write32(rtwdev, R_AX_CH_PAGE_CTRL, val);
+
+ val = u32_encode_bits(pub_cfg->pub_max, B_AX_PUBPG_ALL_MASK);
+ rtw89_write32(rtwdev, R_AX_PUB_PAGE_CTRL2, val);
+
+ val = u32_encode_bits(prec_cfg->wp_ch07_prec,
+ B_AX_PREC_PAGE_WP_CH07_MASK) |
+ u32_encode_bits(prec_cfg->wp_ch811_prec,
+ B_AX_PREC_PAGE_WP_CH811_MASK);
+ rtw89_write32(rtwdev, R_AX_WP_PAGE_CTRL1, val);
+
+ val = u32_replace_bits(rtw89_read32(rtwdev, R_AX_HCI_FC_CTRL),
+ param->mode, B_AX_HCI_FC_MODE_MASK);
+ val = u32_replace_bits(val, prec_cfg->ch011_full_cond,
+ B_AX_HCI_FC_WD_FULL_COND_MASK);
+ val = u32_replace_bits(val, prec_cfg->h2c_full_cond,
+ B_AX_HCI_FC_CH12_FULL_COND_MASK);
+ val = u32_replace_bits(val, prec_cfg->wp_ch07_full_cond,
+ B_AX_HCI_FC_WP_CH07_FULL_COND_MASK);
+ val = u32_replace_bits(val, prec_cfg->wp_ch811_full_cond,
+ B_AX_HCI_FC_WP_CH811_FULL_COND_MASK);
+ rtw89_write32(rtwdev, R_AX_HCI_FC_CTRL, val);
+}
+
+static void hfc_func_en(struct rtw89_dev *rtwdev, bool en, bool h2c_en)
+{
+ struct rtw89_hfc_param *param = &rtwdev->mac.hfc_param;
+ u32 val;
+
+ val = rtw89_read32(rtwdev, R_AX_HCI_FC_CTRL);
+ param->en = en;
+ param->h2c_en = h2c_en;
+ val = en ? (val | B_AX_HCI_FC_EN) : (val & ~B_AX_HCI_FC_EN);
+ val = h2c_en ? (val | B_AX_HCI_FC_CH12_EN) :
+ (val & ~B_AX_HCI_FC_CH12_EN);
+ rtw89_write32(rtwdev, R_AX_HCI_FC_CTRL, val);
+}
+
+static int hfc_init(struct rtw89_dev *rtwdev, bool reset, bool en, bool h2c_en)
+{
+ u8 ch;
+ u32 ret = 0;
+
+ if (reset)
+ ret = hfc_reset_param(rtwdev);
+ if (ret)
+ return ret;
+
+ ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL);
+ if (ret)
+ return ret;
+
+ hfc_func_en(rtwdev, false, false);
+
+ if (!en && h2c_en) {
+ hfc_h2c_cfg(rtwdev);
+ hfc_func_en(rtwdev, en, h2c_en);
+ return ret;
+ }
+
+ for (ch = RTW89_DMA_ACH0; ch < RTW89_DMA_H2C; ch++) {
+ ret = hfc_ch_ctrl(rtwdev, ch);
+ if (ret)
+ return ret;
+ }
+
+ ret = hfc_pub_ctrl(rtwdev);
+ if (ret)
+ return ret;
+
+ hfc_mix_cfg(rtwdev);
+ if (en || h2c_en) {
+ hfc_func_en(rtwdev, en, h2c_en);
+ udelay(10);
+ }
+ for (ch = RTW89_DMA_ACH0; ch < RTW89_DMA_H2C; ch++) {
+ ret = hfc_upd_ch_info(rtwdev, ch);
+ if (ret)
+ return ret;
+ }
+ ret = hfc_upd_mix_info(rtwdev);
+
+ return ret;
+}
+
+#define PWR_POLL_CNT 2000
+static int pwr_cmd_poll(struct rtw89_dev *rtwdev,
+ const struct rtw89_pwr_cfg *cfg)
+{
+ u8 val = 0;
+ int ret;
+ u32 addr = cfg->base == PWR_INTF_MSK_SDIO ?
+ cfg->addr | SDIO_LOCAL_BASE_ADDR : cfg->addr;
+
+ ret = read_poll_timeout(rtw89_read8, val, !((val ^ cfg->val) & cfg->msk),
+ 1000, 1000 * PWR_POLL_CNT, false, rtwdev, addr);
+
+ if (!ret)
+ return 0;
+
+ rtw89_warn(rtwdev, "[ERR] Polling timeout\n");
+ rtw89_warn(rtwdev, "[ERR] addr: %X, %X\n", addr, cfg->addr);
+ rtw89_warn(rtwdev, "[ERR] val: %X, %X\n", val, cfg->val);
+
+ return -EBUSY;
+}
+
+static int rtw89_mac_sub_pwr_seq(struct rtw89_dev *rtwdev, u8 cv_msk,
+ u8 intf_msk, const struct rtw89_pwr_cfg *cfg)
+{
+ const struct rtw89_pwr_cfg *cur_cfg;
+ u32 addr;
+ u8 val;
+
+ for (cur_cfg = cfg; cur_cfg->cmd != PWR_CMD_END; cur_cfg++) {
+ if (!(cur_cfg->intf_msk & intf_msk) ||
+ !(cur_cfg->cv_msk & cv_msk))
+ continue;
+
+ switch (cur_cfg->cmd) {
+ case PWR_CMD_WRITE:
+ addr = cur_cfg->addr;
+
+ if (cur_cfg->base == PWR_BASE_SDIO)
+ addr |= SDIO_LOCAL_BASE_ADDR;
+
+ val = rtw89_read8(rtwdev, addr);
+ val &= ~(cur_cfg->msk);
+ val |= (cur_cfg->val & cur_cfg->msk);
+
+ rtw89_write8(rtwdev, addr, val);
+ break;
+ case PWR_CMD_POLL:
+ if (pwr_cmd_poll(rtwdev, cur_cfg))
+ return -EBUSY;
+ break;
+ case PWR_CMD_DELAY:
+ if (cur_cfg->val == PWR_DELAY_US)
+ udelay(cur_cfg->addr);
+ else
+ fsleep(cur_cfg->addr * 1000);
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int rtw89_mac_pwr_seq(struct rtw89_dev *rtwdev,
+ const struct rtw89_pwr_cfg * const *cfg_seq)
+{
+ int ret;
+
+ for (; *cfg_seq; cfg_seq++) {
+ ret = rtw89_mac_sub_pwr_seq(rtwdev, BIT(rtwdev->hal.cv),
+ PWR_INTF_MSK_PCIE, *cfg_seq);
+ if (ret)
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static enum rtw89_rpwm_req_pwr_state
+rtw89_mac_get_req_pwr_state(struct rtw89_dev *rtwdev)
+{
+ enum rtw89_rpwm_req_pwr_state state;
+
+ switch (rtwdev->ps_mode) {
+ case RTW89_PS_MODE_RFOFF:
+ state = RTW89_MAC_RPWM_REQ_PWR_STATE_BAND0_RFOFF;
+ break;
+ case RTW89_PS_MODE_CLK_GATED:
+ state = RTW89_MAC_RPWM_REQ_PWR_STATE_CLK_GATED;
+ break;
+ case RTW89_PS_MODE_PWR_GATED:
+ state = RTW89_MAC_RPWM_REQ_PWR_STATE_PWR_GATED;
+ break;
+ default:
+ state = RTW89_MAC_RPWM_REQ_PWR_STATE_ACTIVE;
+ break;
+ }
+ return state;
+}
+
+static void rtw89_mac_send_rpwm(struct rtw89_dev *rtwdev,
+ enum rtw89_rpwm_req_pwr_state req_pwr_state)
+{
+ u16 request;
+
+ request = rtw89_read16(rtwdev, R_AX_RPWM);
+ request ^= request | PS_RPWM_TOGGLE;
+
+ rtwdev->mac.rpwm_seq_num = (rtwdev->mac.rpwm_seq_num + 1) &
+ RPWM_SEQ_NUM_MAX;
+ request |= FIELD_PREP(PS_RPWM_SEQ_NUM, rtwdev->mac.rpwm_seq_num);
+
+ request |= req_pwr_state;
+
+ if (req_pwr_state < RTW89_MAC_RPWM_REQ_PWR_STATE_CLK_GATED)
+ request |= PS_RPWM_ACK;
+
+ rtw89_write16(rtwdev, rtwdev->hci.rpwm_addr, request);
+}
+
+static int rtw89_mac_check_cpwm_state(struct rtw89_dev *rtwdev,
+ enum rtw89_rpwm_req_pwr_state req_pwr_state)
+{
+ bool request_deep_mode;
+ bool in_deep_mode;
+ u8 rpwm_req_num;
+ u8 cpwm_rsp_seq;
+ u8 cpwm_seq;
+ u8 cpwm_status;
+
+ if (req_pwr_state >= RTW89_MAC_RPWM_REQ_PWR_STATE_CLK_GATED)
+ request_deep_mode = true;
+ else
+ request_deep_mode = false;
+
+ if (rtw89_read32_mask(rtwdev, R_AX_LDM, B_AX_EN_32K))
+ in_deep_mode = true;
+ else
+ in_deep_mode = false;
+
+ if (request_deep_mode != in_deep_mode)
+ return -EPERM;
+
+ if (request_deep_mode)
+ return 0;
+
+ rpwm_req_num = rtwdev->mac.rpwm_seq_num;
+ cpwm_rsp_seq = rtw89_read16_mask(rtwdev, R_AX_CPWM,
+ PS_CPWM_RSP_SEQ_NUM);
+
+ if (rpwm_req_num != cpwm_rsp_seq)
+ return -EPERM;
+
+ rtwdev->mac.cpwm_seq_num = (rtwdev->mac.cpwm_seq_num + 1) &
+ CPWM_SEQ_NUM_MAX;
+
+ cpwm_seq = rtw89_read16_mask(rtwdev, R_AX_CPWM, PS_CPWM_SEQ_NUM);
+ if (cpwm_seq != rtwdev->mac.cpwm_seq_num)
+ return -EPERM;
+
+ cpwm_status = rtw89_read16_mask(rtwdev, R_AX_CPWM, PS_CPWM_STATE);
+ if (cpwm_status != req_pwr_state)
+ return -EPERM;
+
+ return 0;
+}
+
+void rtw89_mac_power_mode_change(struct rtw89_dev *rtwdev, bool enter)
+{
+ enum rtw89_rpwm_req_pwr_state state;
+ int ret;
+
+ if (enter)
+ state = rtw89_mac_get_req_pwr_state(rtwdev);
+ else
+ state = RTW89_MAC_RPWM_REQ_PWR_STATE_ACTIVE;
+
+ rtw89_mac_send_rpwm(rtwdev, state);
+ ret = read_poll_timeout_atomic(rtw89_mac_check_cpwm_state, ret, !ret,
+ 1000, 15000, false, rtwdev, state);
+ if (ret)
+ rtw89_err(rtwdev, "firmware failed to ack for %s ps mode\n",
+ enter ? "entering" : "leaving");
+}
+
+static int rtw89_mac_power_switch(struct rtw89_dev *rtwdev, bool on)
+{
+#define PWR_ACT 1
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ const struct rtw89_pwr_cfg * const *cfg_seq;
+ struct rtw89_hal *hal = &rtwdev->hal;
+ int ret;
+ u8 val;
+
+ if (on)
+ cfg_seq = chip->pwr_on_seq;
+ else
+ cfg_seq = chip->pwr_off_seq;
+
+ if (test_bit(RTW89_FLAG_FW_RDY, rtwdev->flags))
+ __rtw89_leave_ps_mode(rtwdev);
+
+ val = rtw89_read32_mask(rtwdev, R_AX_IC_PWR_STATE, B_AX_WLMAC_PWR_STE_MASK);
+ if (on && val == PWR_ACT) {
+ rtw89_err(rtwdev, "MAC has already powered on\n");
+ return -EBUSY;
+ }
+
+ ret = rtw89_mac_pwr_seq(rtwdev, cfg_seq);
+ if (ret)
+ return ret;
+
+ if (on) {
+ set_bit(RTW89_FLAG_POWERON, rtwdev->flags);
+ rtw89_write8(rtwdev, R_AX_SCOREBOARD + 3, MAC_AX_NOTIFY_TP_MAJOR);
+ } else {
+ clear_bit(RTW89_FLAG_POWERON, rtwdev->flags);
+ clear_bit(RTW89_FLAG_FW_RDY, rtwdev->flags);
+ rtw89_write8(rtwdev, R_AX_SCOREBOARD + 3, MAC_AX_NOTIFY_PWR_MAJOR);
+ hal->current_channel = 0;
+ }
+
+ return 0;
+#undef PWR_ACT
+}
+
+void rtw89_mac_pwr_off(struct rtw89_dev *rtwdev)
+{
+ rtw89_mac_power_switch(rtwdev, false);
+}
+
+static int cmac_func_en(struct rtw89_dev *rtwdev, u8 mac_idx, bool en)
+{
+ u32 func_en = 0;
+ u32 ck_en = 0;
+ u32 c1pc_en = 0;
+ u32 addrl_func_en[] = {R_AX_CMAC_FUNC_EN, R_AX_CMAC_FUNC_EN_C1};
+ u32 addrl_ck_en[] = {R_AX_CK_EN, R_AX_CK_EN_C1};
+
+ func_en = B_AX_CMAC_EN | B_AX_CMAC_TXEN | B_AX_CMAC_RXEN |
+ B_AX_PHYINTF_EN | B_AX_CMAC_DMA_EN | B_AX_PTCLTOP_EN |
+ B_AX_SCHEDULER_EN | B_AX_TMAC_EN | B_AX_RMAC_EN;
+ ck_en = B_AX_CMAC_CKEN | B_AX_PHYINTF_CKEN | B_AX_CMAC_DMA_CKEN |
+ B_AX_PTCLTOP_CKEN | B_AX_SCHEDULER_CKEN | B_AX_TMAC_CKEN |
+ B_AX_RMAC_CKEN;
+ c1pc_en = B_AX_R_SYM_WLCMAC1_PC_EN |
+ B_AX_R_SYM_WLCMAC1_P1_PC_EN |
+ B_AX_R_SYM_WLCMAC1_P2_PC_EN |
+ B_AX_R_SYM_WLCMAC1_P3_PC_EN |
+ B_AX_R_SYM_WLCMAC1_P4_PC_EN;
+
+ if (en) {
+ if (mac_idx == RTW89_MAC_1) {
+ rtw89_write32_set(rtwdev, R_AX_AFE_CTRL1, c1pc_en);
+ rtw89_write32_clr(rtwdev, R_AX_SYS_ISO_CTRL_EXTEND,
+ B_AX_R_SYM_ISO_CMAC12PP);
+ rtw89_write32_set(rtwdev, R_AX_SYS_ISO_CTRL_EXTEND,
+ B_AX_CMAC1_FEN);
+ }
+ rtw89_write32_set(rtwdev, addrl_ck_en[mac_idx], ck_en);
+ rtw89_write32_set(rtwdev, addrl_func_en[mac_idx], func_en);
+ } else {
+ rtw89_write32_clr(rtwdev, addrl_func_en[mac_idx], func_en);
+ rtw89_write32_clr(rtwdev, addrl_ck_en[mac_idx], ck_en);
+ if (mac_idx == RTW89_MAC_1) {
+ rtw89_write32_clr(rtwdev, R_AX_SYS_ISO_CTRL_EXTEND,
+ B_AX_CMAC1_FEN);
+ rtw89_write32_set(rtwdev, R_AX_SYS_ISO_CTRL_EXTEND,
+ B_AX_R_SYM_ISO_CMAC12PP);
+ rtw89_write32_clr(rtwdev, R_AX_AFE_CTRL1, c1pc_en);
+ }
+ }
+
+ return 0;
+}
+
+static int dmac_func_en(struct rtw89_dev *rtwdev)
+{
+ u32 val32;
+ u32 ret = 0;
+
+ val32 = (B_AX_MAC_FUNC_EN | B_AX_DMAC_FUNC_EN | B_AX_MAC_SEC_EN |
+ B_AX_DISPATCHER_EN | B_AX_DLE_CPUIO_EN | B_AX_PKT_IN_EN |
+ B_AX_DMAC_TBL_EN | B_AX_PKT_BUF_EN | B_AX_STA_SCH_EN |
+ B_AX_TXPKT_CTRL_EN | B_AX_WD_RLS_EN | B_AX_MPDU_PROC_EN);
+ rtw89_write32(rtwdev, R_AX_DMAC_FUNC_EN, val32);
+
+ val32 = (B_AX_MAC_SEC_CLK_EN | B_AX_DISPATCHER_CLK_EN |
+ B_AX_DLE_CPUIO_CLK_EN | B_AX_PKT_IN_CLK_EN |
+ B_AX_STA_SCH_CLK_EN | B_AX_TXPKT_CTRL_CLK_EN |
+ B_AX_WD_RLS_CLK_EN);
+ rtw89_write32(rtwdev, R_AX_DMAC_CLK_EN, val32);
+
+ return ret;
+}
+
+static int chip_func_en(struct rtw89_dev *rtwdev)
+{
+ rtw89_write32_set(rtwdev, R_AX_SPSLDO_ON_CTRL0, B_AX_OCP_L1_MASK);
+
+ return 0;
+}
+
+static int rtw89_mac_sys_init(struct rtw89_dev *rtwdev)
+{
+ int ret;
+
+ ret = dmac_func_en(rtwdev);
+ if (ret)
+ return ret;
+
+ ret = cmac_func_en(rtwdev, 0, true);
+ if (ret)
+ return ret;
+
+ ret = chip_func_en(rtwdev);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+/* PCIE 64 */
+const struct rtw89_dle_size wde_size0 = {
+ RTW89_WDE_PG_64, 4095, 1,
+};
+
+/* DLFW */
+const struct rtw89_dle_size wde_size4 = {
+ RTW89_WDE_PG_64, 0, 4096,
+};
+
+/* PCIE */
+const struct rtw89_dle_size ple_size0 = {
+ RTW89_PLE_PG_128, 1520, 16,
+};
+
+/* DLFW */
+const struct rtw89_dle_size ple_size4 = {
+ RTW89_PLE_PG_128, 64, 1472,
+};
+
+/* PCIE 64 */
+const struct rtw89_wde_quota wde_qt0 = {
+ 3792, 196, 0, 107,
+};
+
+/* DLFW */
+const struct rtw89_wde_quota wde_qt4 = {
+ 0, 0, 0, 0,
+};
+
+/* PCIE SCC */
+const struct rtw89_ple_quota ple_qt4 = {
+ 264, 0, 16, 20, 26, 13, 356, 0, 32, 40, 8,
+};
+
+/* PCIE SCC */
+const struct rtw89_ple_quota ple_qt5 = {
+ 264, 0, 32, 20, 64, 13, 1101, 0, 64, 128, 120,
+};
+
+/* DLFW */
+const struct rtw89_ple_quota ple_qt13 = {
+ 0, 0, 16, 48, 0, 0, 0, 0, 0, 0, 0
+};
+
+static const struct rtw89_dle_mem *get_dle_mem_cfg(struct rtw89_dev *rtwdev,
+ enum rtw89_qta_mode mode)
+{
+ struct rtw89_mac_info *mac = &rtwdev->mac;
+ const struct rtw89_dle_mem *cfg;
+
+ cfg = &rtwdev->chip->dle_mem[mode];
+ if (!cfg)
+ return NULL;
+
+ if (cfg->mode != mode) {
+ rtw89_warn(rtwdev, "qta mode unmatch!\n");
+ return NULL;
+ }
+
+ mac->dle_info.wde_pg_size = cfg->wde_size->pge_size;
+ mac->dle_info.ple_pg_size = cfg->ple_size->pge_size;
+ mac->dle_info.qta_mode = mode;
+ mac->dle_info.c0_rx_qta = cfg->ple_min_qt->cma0_dma;
+ mac->dle_info.c1_rx_qta = cfg->ple_min_qt->cma1_dma;
+
+ return cfg;
+}
+
+static inline u32 dle_used_size(const struct rtw89_dle_size *wde,
+ const struct rtw89_dle_size *ple)
+{
+ return wde->pge_size * (wde->lnk_pge_num + wde->unlnk_pge_num) +
+ ple->pge_size * (ple->lnk_pge_num + ple->unlnk_pge_num);
+}
+
+static void dle_func_en(struct rtw89_dev *rtwdev, bool enable)
+{
+ if (enable)
+ rtw89_write32_set(rtwdev, R_AX_DMAC_FUNC_EN,
+ B_AX_DLE_WDE_EN | B_AX_DLE_PLE_EN);
+ else
+ rtw89_write32_clr(rtwdev, R_AX_DMAC_FUNC_EN,
+ B_AX_DLE_WDE_EN | B_AX_DLE_PLE_EN);
+}
+
+static void dle_clk_en(struct rtw89_dev *rtwdev, bool enable)
+{
+ if (enable)
+ rtw89_write32_set(rtwdev, R_AX_DMAC_CLK_EN,
+ B_AX_DLE_WDE_CLK_EN | B_AX_DLE_PLE_CLK_EN);
+ else
+ rtw89_write32_clr(rtwdev, R_AX_DMAC_CLK_EN,
+ B_AX_DLE_WDE_CLK_EN | B_AX_DLE_PLE_CLK_EN);
+}
+
+static int dle_mix_cfg(struct rtw89_dev *rtwdev, const struct rtw89_dle_mem *cfg)
+{
+ const struct rtw89_dle_size *size_cfg;
+ u32 val;
+ u8 bound = 0;
+
+ val = rtw89_read32(rtwdev, R_AX_WDE_PKTBUF_CFG);
+ size_cfg = cfg->wde_size;
+
+ switch (size_cfg->pge_size) {
+ default:
+ case RTW89_WDE_PG_64:
+ val = u32_replace_bits(val, S_AX_WDE_PAGE_SEL_64,
+ B_AX_WDE_PAGE_SEL_MASK);
+ break;
+ case RTW89_WDE_PG_128:
+ val = u32_replace_bits(val, S_AX_WDE_PAGE_SEL_128,
+ B_AX_WDE_PAGE_SEL_MASK);
+ break;
+ case RTW89_WDE_PG_256:
+ rtw89_err(rtwdev, "[ERR]WDE DLE doesn't support 256 byte!\n");
+ return -EINVAL;
+ }
+
+ val = u32_replace_bits(val, bound, B_AX_WDE_START_BOUND_MASK);
+ val = u32_replace_bits(val, size_cfg->lnk_pge_num,
+ B_AX_WDE_FREE_PAGE_NUM_MASK);
+ rtw89_write32(rtwdev, R_AX_WDE_PKTBUF_CFG, val);
+
+ val = rtw89_read32(rtwdev, R_AX_PLE_PKTBUF_CFG);
+ bound = (size_cfg->lnk_pge_num + size_cfg->unlnk_pge_num)
+ * size_cfg->pge_size / DLE_BOUND_UNIT;
+ size_cfg = cfg->ple_size;
+
+ switch (size_cfg->pge_size) {
+ default:
+ case RTW89_PLE_PG_64:
+ rtw89_err(rtwdev, "[ERR]PLE DLE doesn't support 64 byte!\n");
+ return -EINVAL;
+ case RTW89_PLE_PG_128:
+ val = u32_replace_bits(val, S_AX_PLE_PAGE_SEL_128,
+ B_AX_PLE_PAGE_SEL_MASK);
+ break;
+ case RTW89_PLE_PG_256:
+ val = u32_replace_bits(val, S_AX_PLE_PAGE_SEL_256,
+ B_AX_PLE_PAGE_SEL_MASK);
+ break;
+ }
+
+ val = u32_replace_bits(val, bound, B_AX_PLE_START_BOUND_MASK);
+ val = u32_replace_bits(val, size_cfg->lnk_pge_num,
+ B_AX_PLE_FREE_PAGE_NUM_MASK);
+ rtw89_write32(rtwdev, R_AX_PLE_PKTBUF_CFG, val);
+
+ return 0;
+}
+
+#define INVALID_QT_WCPU U16_MAX
+#define SET_QUOTA_VAL(_min_x, _max_x, _module, _idx) \
+ do { \
+ val = ((_min_x) & \
+ B_AX_ ## _module ## _MIN_SIZE_MASK) | \
+ (((_max_x) << 16) & \
+ B_AX_ ## _module ## _MAX_SIZE_MASK); \
+ rtw89_write32(rtwdev, \
+ R_AX_ ## _module ## _QTA ## _idx ## _CFG, \
+ val); \
+ } while (0)
+#define SET_QUOTA(_x, _module, _idx) \
+ SET_QUOTA_VAL(min_cfg->_x, max_cfg->_x, _module, _idx)
+
+static void wde_quota_cfg(struct rtw89_dev *rtwdev,
+ const struct rtw89_wde_quota *min_cfg,
+ const struct rtw89_wde_quota *max_cfg,
+ u16 ext_wde_min_qt_wcpu)
+{
+ u16 min_qt_wcpu = ext_wde_min_qt_wcpu != INVALID_QT_WCPU ?
+ ext_wde_min_qt_wcpu : min_cfg->wcpu;
+ u32 val;
+
+ SET_QUOTA(hif, WDE, 0);
+ SET_QUOTA_VAL(min_qt_wcpu, max_cfg->wcpu, WDE, 1);
+ SET_QUOTA(pkt_in, WDE, 3);
+ SET_QUOTA(cpu_io, WDE, 4);
+}
+
+static void ple_quota_cfg(struct rtw89_dev *rtwdev,
+ const struct rtw89_ple_quota *min_cfg,
+ const struct rtw89_ple_quota *max_cfg)
+{
+ u32 val;
+
+ SET_QUOTA(cma0_tx, PLE, 0);
+ SET_QUOTA(cma1_tx, PLE, 1);
+ SET_QUOTA(c2h, PLE, 2);
+ SET_QUOTA(h2c, PLE, 3);
+ SET_QUOTA(wcpu, PLE, 4);
+ SET_QUOTA(mpdu_proc, PLE, 5);
+ SET_QUOTA(cma0_dma, PLE, 6);
+ SET_QUOTA(cma1_dma, PLE, 7);
+ SET_QUOTA(bb_rpt, PLE, 8);
+ SET_QUOTA(wd_rel, PLE, 9);
+ SET_QUOTA(cpu_io, PLE, 10);
+}
+
+#undef SET_QUOTA
+
+static void dle_quota_cfg(struct rtw89_dev *rtwdev,
+ const struct rtw89_dle_mem *cfg,
+ u16 ext_wde_min_qt_wcpu)
+{
+ wde_quota_cfg(rtwdev, cfg->wde_min_qt, cfg->wde_max_qt, ext_wde_min_qt_wcpu);
+ ple_quota_cfg(rtwdev, cfg->ple_min_qt, cfg->ple_max_qt);
+}
+
+static int dle_init(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mode,
+ enum rtw89_qta_mode ext_mode)
+{
+ const struct rtw89_dle_mem *cfg, *ext_cfg;
+ u16 ext_wde_min_qt_wcpu = INVALID_QT_WCPU;
+ int ret = 0;
+ u32 ini;
+
+ ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL);
+ if (ret)
+ return ret;
+
+ cfg = get_dle_mem_cfg(rtwdev, mode);
+ if (!cfg) {
+ rtw89_err(rtwdev, "[ERR]get_dle_mem_cfg\n");
+ ret = -EINVAL;
+ goto error;
+ }
+
+ if (mode == RTW89_QTA_DLFW) {
+ ext_cfg = get_dle_mem_cfg(rtwdev, ext_mode);
+ if (!ext_cfg) {
+ rtw89_err(rtwdev, "[ERR]get_dle_ext_mem_cfg %d\n",
+ ext_mode);
+ ret = -EINVAL;
+ goto error;
+ }
+ ext_wde_min_qt_wcpu = ext_cfg->wde_min_qt->wcpu;
+ }
+
+ if (dle_used_size(cfg->wde_size, cfg->ple_size) != rtwdev->chip->fifo_size) {
+ rtw89_err(rtwdev, "[ERR]wd/dle mem cfg\n");
+ ret = -EINVAL;
+ goto error;
+ }
+
+ dle_func_en(rtwdev, false);
+ dle_clk_en(rtwdev, true);
+
+ ret = dle_mix_cfg(rtwdev, cfg);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR] dle mix cfg\n");
+ goto error;
+ }
+ dle_quota_cfg(rtwdev, cfg, ext_wde_min_qt_wcpu);
+
+ dle_func_en(rtwdev, true);
+
+ ret = read_poll_timeout(rtw89_read32, ini,
+ (ini & WDE_MGN_INI_RDY) == WDE_MGN_INI_RDY, 1,
+ 2000, false, rtwdev, R_AX_WDE_INI_STATUS);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]WDE cfg ready\n");
+ return ret;
+ }
+
+ ret = read_poll_timeout(rtw89_read32, ini,
+ (ini & WDE_MGN_INI_RDY) == WDE_MGN_INI_RDY, 1,
+ 2000, false, rtwdev, R_AX_PLE_INI_STATUS);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]PLE cfg ready\n");
+ return ret;
+ }
+
+ return 0;
+error:
+ dle_func_en(rtwdev, false);
+ rtw89_err(rtwdev, "[ERR]trxcfg wde 0x8900 = %x\n",
+ rtw89_read32(rtwdev, R_AX_WDE_INI_STATUS));
+ rtw89_err(rtwdev, "[ERR]trxcfg ple 0x8D00 = %x\n",
+ rtw89_read32(rtwdev, R_AX_PLE_INI_STATUS));
+
+ return ret;
+}
+
+static bool dle_is_txq_empty(struct rtw89_dev *rtwdev)
+{
+ u32 msk32;
+ u32 val32;
+
+ msk32 = B_AX_WDE_EMPTY_QUE_CMAC0_ALL_AC | B_AX_WDE_EMPTY_QUE_CMAC0_MBH |
+ B_AX_WDE_EMPTY_QUE_CMAC1_MBH | B_AX_WDE_EMPTY_QUE_CMAC0_WMM0 |
+ B_AX_WDE_EMPTY_QUE_CMAC0_WMM1 | B_AX_WDE_EMPTY_QUE_OTHERS |
+ B_AX_PLE_EMPTY_QUE_DMAC_MPDU_TX | B_AX_PLE_EMPTY_QTA_DMAC_H2C |
+ B_AX_PLE_EMPTY_QUE_DMAC_SEC_TX | B_AX_WDE_EMPTY_QUE_DMAC_PKTIN |
+ B_AX_WDE_EMPTY_QTA_DMAC_HIF | B_AX_WDE_EMPTY_QTA_DMAC_WLAN_CPU |
+ B_AX_WDE_EMPTY_QTA_DMAC_PKTIN | B_AX_WDE_EMPTY_QTA_DMAC_CPUIO |
+ B_AX_PLE_EMPTY_QTA_DMAC_B0_TXPL |
+ B_AX_PLE_EMPTY_QTA_DMAC_B1_TXPL |
+ B_AX_PLE_EMPTY_QTA_DMAC_MPDU_TX |
+ B_AX_PLE_EMPTY_QTA_DMAC_CPUIO |
+ B_AX_WDE_EMPTY_QTA_DMAC_DATA_CPU |
+ B_AX_PLE_EMPTY_QTA_DMAC_WLAN_CPU;
+ val32 = rtw89_read32(rtwdev, R_AX_DLE_EMPTY0);
+
+ if ((val32 & msk32) == msk32)
+ return true;
+
+ return false;
+}
+
+static int sta_sch_init(struct rtw89_dev *rtwdev)
+{
+ u32 p_val;
+ u8 val;
+ int ret;
+
+ ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL);
+ if (ret)
+ return ret;
+
+ val = rtw89_read8(rtwdev, R_AX_SS_CTRL);
+ val |= B_AX_SS_EN;
+ rtw89_write8(rtwdev, R_AX_SS_CTRL, val);
+
+ ret = read_poll_timeout(rtw89_read32, p_val, p_val & B_AX_SS_INIT_DONE_1,
+ 1, TRXCFG_WAIT_CNT, false, rtwdev, R_AX_SS_CTRL);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]STA scheduler init\n");
+ return ret;
+ }
+
+ rtw89_write32_set(rtwdev, R_AX_SS_CTRL, B_AX_SS_WARM_INIT_FLG);
+
+ return 0;
+}
+
+static int mpdu_proc_init(struct rtw89_dev *rtwdev)
+{
+ int ret;
+
+ ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL);
+ if (ret)
+ return ret;
+
+ rtw89_write32(rtwdev, R_AX_ACTION_FWD0, TRXCFG_MPDU_PROC_ACT_FRWD);
+ rtw89_write32(rtwdev, R_AX_TF_FWD, TRXCFG_MPDU_PROC_TF_FRWD);
+ rtw89_write32_set(rtwdev, R_AX_MPDU_PROC,
+ B_AX_APPEND_FCS | B_AX_A_ICV_ERR);
+ rtw89_write32(rtwdev, R_AX_CUT_AMSDU_CTRL, TRXCFG_MPDU_PROC_CUT_CTRL);
+
+ return 0;
+}
+
+static int sec_eng_init(struct rtw89_dev *rtwdev)
+{
+ u32 val = 0;
+ int ret;
+
+ ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL);
+ if (ret)
+ return ret;
+
+ val = rtw89_read32(rtwdev, R_AX_SEC_ENG_CTRL);
+ /* init clock */
+ val |= (B_AX_CLK_EN_CGCMP | B_AX_CLK_EN_WAPI | B_AX_CLK_EN_WEP_TKIP);
+ /* init TX encryption */
+ val |= (B_AX_SEC_TX_ENC | B_AX_SEC_RX_DEC);
+ val |= (B_AX_MC_DEC | B_AX_BC_DEC);
+ val &= ~B_AX_TX_PARTIAL_MODE;
+ rtw89_write32(rtwdev, R_AX_SEC_ENG_CTRL, val);
+
+ /* init MIC ICV append */
+ val = rtw89_read32(rtwdev, R_AX_SEC_MPDU_PROC);
+ val |= (B_AX_APPEND_ICV | B_AX_APPEND_MIC);
+
+ /* option init */
+ rtw89_write32(rtwdev, R_AX_SEC_MPDU_PROC, val);
+
+ return 0;
+}
+
+static int dmac_init(struct rtw89_dev *rtwdev, u8 mac_idx)
+{
+ int ret;
+
+ ret = dle_init(rtwdev, rtwdev->mac.qta_mode, RTW89_QTA_INVALID);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]DLE init %d\n", ret);
+ return ret;
+ }
+
+ ret = hfc_init(rtwdev, true, true, true);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]HCI FC init %d\n", ret);
+ return ret;
+ }
+
+ ret = sta_sch_init(rtwdev);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]STA SCH init %d\n", ret);
+ return ret;
+ }
+
+ ret = mpdu_proc_init(rtwdev);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]MPDU Proc init %d\n", ret);
+ return ret;
+ }
+
+ ret = sec_eng_init(rtwdev);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]Security Engine init %d\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int addr_cam_init(struct rtw89_dev *rtwdev, u8 mac_idx)
+{
+ u32 val, reg;
+ u16 p_val;
+ int ret;
+
+ ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
+ if (ret)
+ return ret;
+
+ reg = rtw89_mac_reg_by_idx(R_AX_ADDR_CAM_CTRL, mac_idx);
+
+ val = rtw89_read32(rtwdev, reg);
+ val |= u32_encode_bits(0x7f, B_AX_ADDR_CAM_RANGE_MASK) |
+ B_AX_ADDR_CAM_CLR | B_AX_ADDR_CAM_EN;
+ rtw89_write32(rtwdev, reg, val);
+
+ ret = read_poll_timeout(rtw89_read16, p_val, !(p_val & B_AX_ADDR_CAM_CLR),
+ 1, TRXCFG_WAIT_CNT, false, rtwdev, B_AX_ADDR_CAM_CLR);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]ADDR_CAM reset\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int scheduler_init(struct rtw89_dev *rtwdev, u8 mac_idx)
+{
+ u32 ret;
+ u32 reg;
+
+ ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
+ if (ret)
+ return ret;
+
+ reg = rtw89_mac_reg_by_idx(R_AX_PREBKF_CFG_0, mac_idx);
+ rtw89_write32_mask(rtwdev, reg, B_AX_PREBKF_TIME_MASK, SCH_PREBKF_24US);
+
+ return 0;
+}
+
+static int rtw89_mac_typ_fltr_opt(struct rtw89_dev *rtwdev,
+ enum rtw89_machdr_frame_type type,
+ enum rtw89_mac_fwd_target fwd_target,
+ u8 mac_idx)
+{
+ u32 reg;
+ u32 val;
+
+ switch (fwd_target) {
+ case RTW89_FWD_DONT_CARE:
+ val = RX_FLTR_FRAME_DROP;
+ break;
+ case RTW89_FWD_TO_HOST:
+ val = RX_FLTR_FRAME_TO_HOST;
+ break;
+ case RTW89_FWD_TO_WLAN_CPU:
+ val = RX_FLTR_FRAME_TO_WLCPU;
+ break;
+ default:
+ rtw89_err(rtwdev, "[ERR]set rx filter fwd target err\n");
+ return -EINVAL;
+ }
+
+ switch (type) {
+ case RTW89_MGNT:
+ reg = rtw89_mac_reg_by_idx(R_AX_MGNT_FLTR, mac_idx);
+ break;
+ case RTW89_CTRL:
+ reg = rtw89_mac_reg_by_idx(R_AX_CTRL_FLTR, mac_idx);
+ break;
+ case RTW89_DATA:
+ reg = rtw89_mac_reg_by_idx(R_AX_DATA_FLTR, mac_idx);
+ break;
+ default:
+ rtw89_err(rtwdev, "[ERR]set rx filter type err\n");
+ return -EINVAL;
+ }
+ rtw89_write32(rtwdev, reg, val);
+
+ return 0;
+}
+
+static int rx_fltr_init(struct rtw89_dev *rtwdev, u8 mac_idx)
+{
+ int ret, i;
+ u32 mac_ftlr, plcp_ftlr;
+
+ ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
+ if (ret)
+ return ret;
+
+ for (i = RTW89_MGNT; i <= RTW89_DATA; i++) {
+ ret = rtw89_mac_typ_fltr_opt(rtwdev, i, RTW89_FWD_TO_HOST,
+ mac_idx);
+ if (ret)
+ return ret;
+ }
+ mac_ftlr = rtwdev->hal.rx_fltr;
+ plcp_ftlr = B_AX_CCK_CRC_CHK | B_AX_CCK_SIG_CHK |
+ B_AX_LSIG_PARITY_CHK_EN | B_AX_SIGA_CRC_CHK |
+ B_AX_VHT_SU_SIGB_CRC_CHK | B_AX_VHT_MU_SIGB_CRC_CHK |
+ B_AX_HE_SIGB_CRC_CHK;
+ rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(R_AX_RX_FLTR_OPT, mac_idx),
+ mac_ftlr);
+ rtw89_write16(rtwdev, rtw89_mac_reg_by_idx(R_AX_PLCP_HDR_FLTR, mac_idx),
+ plcp_ftlr);
+
+ return 0;
+}
+
+static void _patch_dis_resp_chk(struct rtw89_dev *rtwdev, u8 mac_idx)
+{
+ u32 reg, val32;
+ u32 b_rsp_chk_nav, b_rsp_chk_cca;
+
+ b_rsp_chk_nav = B_AX_RSP_CHK_TXNAV | B_AX_RSP_CHK_INTRA_NAV |
+ B_AX_RSP_CHK_BASIC_NAV;
+ b_rsp_chk_cca = B_AX_RSP_CHK_SEC_CCA_80 | B_AX_RSP_CHK_SEC_CCA_40 |
+ B_AX_RSP_CHK_SEC_CCA_20 | B_AX_RSP_CHK_BTCCA |
+ B_AX_RSP_CHK_EDCCA | B_AX_RSP_CHK_CCA;
+
+ switch (rtwdev->chip->chip_id) {
+ case RTL8852A:
+ case RTL8852B:
+ reg = rtw89_mac_reg_by_idx(R_AX_RSP_CHK_SIG, mac_idx);
+ val32 = rtw89_read32(rtwdev, reg) & ~b_rsp_chk_nav;
+ rtw89_write32(rtwdev, reg, val32);
+
+ reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_0, mac_idx);
+ val32 = rtw89_read32(rtwdev, reg) & ~b_rsp_chk_cca;
+ rtw89_write32(rtwdev, reg, val32);
+ break;
+ default:
+ reg = rtw89_mac_reg_by_idx(R_AX_RSP_CHK_SIG, mac_idx);
+ val32 = rtw89_read32(rtwdev, reg) | b_rsp_chk_nav;
+ rtw89_write32(rtwdev, reg, val32);
+
+ reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_0, mac_idx);
+ val32 = rtw89_read32(rtwdev, reg) | b_rsp_chk_cca;
+ rtw89_write32(rtwdev, reg, val32);
+ break;
+ }
+}
+
+static int cca_ctrl_init(struct rtw89_dev *rtwdev, u8 mac_idx)
+{
+ u32 val, reg;
+ int ret;
+
+ ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
+ if (ret)
+ return ret;
+
+ reg = rtw89_mac_reg_by_idx(R_AX_CCA_CONTROL, mac_idx);
+ val = rtw89_read32(rtwdev, reg);
+ val |= (B_AX_TB_CHK_BASIC_NAV | B_AX_TB_CHK_BTCCA |
+ B_AX_TB_CHK_EDCCA | B_AX_TB_CHK_CCA_P20 |
+ B_AX_SIFS_CHK_BTCCA | B_AX_SIFS_CHK_CCA_P20 |
+ B_AX_CTN_CHK_INTRA_NAV |
+ B_AX_CTN_CHK_BASIC_NAV | B_AX_CTN_CHK_BTCCA |
+ B_AX_CTN_CHK_EDCCA | B_AX_CTN_CHK_CCA_S80 |
+ B_AX_CTN_CHK_CCA_S40 | B_AX_CTN_CHK_CCA_S20 |
+ B_AX_CTN_CHK_CCA_P20 | B_AX_SIFS_CHK_EDCCA);
+ val &= ~(B_AX_TB_CHK_TX_NAV | B_AX_TB_CHK_CCA_S80 |
+ B_AX_TB_CHK_CCA_S40 | B_AX_TB_CHK_CCA_S20 |
+ B_AX_SIFS_CHK_CCA_S80 | B_AX_SIFS_CHK_CCA_S40 |
+ B_AX_SIFS_CHK_CCA_S20 | B_AX_CTN_CHK_TXNAV);
+
+ rtw89_write32(rtwdev, reg, val);
+
+ _patch_dis_resp_chk(rtwdev, mac_idx);
+
+ return 0;
+}
+
+static int spatial_reuse_init(struct rtw89_dev *rtwdev, u8 mac_idx)
+{
+ u32 reg;
+ int ret;
+
+ ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
+ if (ret)
+ return ret;
+ reg = rtw89_mac_reg_by_idx(R_AX_RX_SR_CTRL, mac_idx);
+ rtw89_write8_clr(rtwdev, reg, B_AX_SR_EN);
+
+ return 0;
+}
+
+static int tmac_init(struct rtw89_dev *rtwdev, u8 mac_idx)
+{
+ u32 reg;
+ int ret;
+
+ ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
+ if (ret)
+ return ret;
+
+ reg = rtw89_mac_reg_by_idx(R_AX_MAC_LOOPBACK, mac_idx);
+ rtw89_write32_clr(rtwdev, reg, B_AX_MACLBK_EN);
+
+ return 0;
+}
+
+static int trxptcl_init(struct rtw89_dev *rtwdev, u8 mac_idx)
+{
+ u32 reg, val, sifs;
+ int ret;
+
+ ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
+ if (ret)
+ return ret;
+
+ reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_0, mac_idx);
+ val = rtw89_read32(rtwdev, reg);
+ val &= ~B_AX_WMAC_SPEC_SIFS_CCK_MASK;
+ val |= FIELD_PREP(B_AX_WMAC_SPEC_SIFS_CCK_MASK, WMAC_SPEC_SIFS_CCK);
+
+ switch (rtwdev->chip->chip_id) {
+ case RTL8852A:
+ sifs = WMAC_SPEC_SIFS_OFDM_52A;
+ break;
+ case RTL8852B:
+ sifs = WMAC_SPEC_SIFS_OFDM_52B;
+ break;
+ default:
+ sifs = WMAC_SPEC_SIFS_OFDM_52C;
+ break;
+ }
+ val &= ~B_AX_WMAC_SPEC_SIFS_OFDM_MASK;
+ val |= FIELD_PREP(B_AX_WMAC_SPEC_SIFS_OFDM_MASK, sifs);
+ rtw89_write32(rtwdev, reg, val);
+
+ reg = rtw89_mac_reg_by_idx(R_AX_RXTRIG_TEST_USER_2, mac_idx);
+ rtw89_write32_set(rtwdev, reg, B_AX_RXTRIG_FCSCHK_EN);
+
+ return 0;
+}
+
+static int rmac_init(struct rtw89_dev *rtwdev, u8 mac_idx)
+{
+#define TRXCFG_RMAC_CCA_TO 32
+#define TRXCFG_RMAC_DATA_TO 15
+#define RX_MAX_LEN_UNIT 512
+#define PLD_RLS_MAX_PG 127
+ int ret;
+ u32 reg, rx_max_len, rx_qta;
+ u16 val;
+
+ ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
+ if (ret)
+ return ret;
+
+ reg = rtw89_mac_reg_by_idx(R_AX_RESPBA_CAM_CTRL, mac_idx);
+ rtw89_write8_set(rtwdev, reg, B_AX_SSN_SEL);
+
+ reg = rtw89_mac_reg_by_idx(R_AX_DLK_PROTECT_CTL, mac_idx);
+ val = rtw89_read16(rtwdev, reg);
+ val = u16_replace_bits(val, TRXCFG_RMAC_DATA_TO,
+ B_AX_RX_DLK_DATA_TIME_MASK);
+ val = u16_replace_bits(val, TRXCFG_RMAC_CCA_TO,
+ B_AX_RX_DLK_CCA_TIME_MASK);
+ rtw89_write16(rtwdev, reg, val);
+
+ reg = rtw89_mac_reg_by_idx(R_AX_RCR, mac_idx);
+ rtw89_write8_mask(rtwdev, reg, B_AX_CH_EN_MASK, 0x1);
+
+ reg = rtw89_mac_reg_by_idx(R_AX_RX_FLTR_OPT, mac_idx);
+ if (mac_idx == RTW89_MAC_0)
+ rx_qta = rtwdev->mac.dle_info.c0_rx_qta;
+ else
+ rx_qta = rtwdev->mac.dle_info.c1_rx_qta;
+ rx_qta = rx_qta > PLD_RLS_MAX_PG ? PLD_RLS_MAX_PG : rx_qta;
+ rx_max_len = (rx_qta - 1) * rtwdev->mac.dle_info.ple_pg_size /
+ RX_MAX_LEN_UNIT;
+ rx_max_len = rx_max_len > B_AX_RX_MPDU_MAX_LEN_SIZE ?
+ B_AX_RX_MPDU_MAX_LEN_SIZE : rx_max_len;
+ rtw89_write32_mask(rtwdev, reg, B_AX_RX_MPDU_MAX_LEN_MASK, rx_max_len);
+
+ if (rtwdev->chip->chip_id == RTL8852A &&
+ rtwdev->hal.cv == CHIP_CBV) {
+ rtw89_write16_mask(rtwdev,
+ rtw89_mac_reg_by_idx(R_AX_DLK_PROTECT_CTL, mac_idx),
+ B_AX_RX_DLK_CCA_TIME_MASK, 0);
+ rtw89_write16_set(rtwdev, rtw89_mac_reg_by_idx(R_AX_RCR, mac_idx),
+ BIT(12));
+ }
+
+ reg = rtw89_mac_reg_by_idx(R_AX_PLCP_HDR_FLTR, mac_idx);
+ rtw89_write8_clr(rtwdev, reg, B_AX_VHT_SU_SIGB_CRC_CHK);
+
+ return ret;
+}
+
+static int cmac_com_init(struct rtw89_dev *rtwdev, u8 mac_idx)
+{
+ u32 val, reg;
+ int ret;
+
+ ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
+ if (ret)
+ return ret;
+
+ reg = rtw89_mac_reg_by_idx(R_AX_TX_SUB_CARRIER_VALUE, mac_idx);
+ val = rtw89_read32(rtwdev, reg);
+ val = u32_replace_bits(val, 0, B_AX_TXSC_20M_MASK);
+ val = u32_replace_bits(val, 0, B_AX_TXSC_40M_MASK);
+ val = u32_replace_bits(val, 0, B_AX_TXSC_80M_MASK);
+ rtw89_write32(rtwdev, reg, val);
+
+ return 0;
+}
+
+static bool is_qta_dbcc(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mode)
+{
+ const struct rtw89_dle_mem *cfg;
+
+ cfg = get_dle_mem_cfg(rtwdev, mode);
+ if (!cfg) {
+ rtw89_err(rtwdev, "[ERR]get_dle_mem_cfg\n");
+ return false;
+ }
+
+ return (cfg->ple_min_qt->cma1_dma && cfg->ple_max_qt->cma1_dma);
+}
+
+static int ptcl_init(struct rtw89_dev *rtwdev, u8 mac_idx)
+{
+ u32 val, reg;
+ int ret;
+
+ ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
+ if (ret)
+ return ret;
+
+ if (rtwdev->hci.type == RTW89_HCI_TYPE_PCIE) {
+ reg = rtw89_mac_reg_by_idx(R_AX_SIFS_SETTING, mac_idx);
+ val = rtw89_read32(rtwdev, reg);
+ val = u32_replace_bits(val, S_AX_CTS2S_TH_1K,
+ B_AX_HW_CTS2SELF_PKT_LEN_TH_MASK);
+ val |= B_AX_HW_CTS2SELF_EN;
+ rtw89_write32(rtwdev, reg, val);
+
+ reg = rtw89_mac_reg_by_idx(R_AX_PTCL_FSM_MON, mac_idx);
+ val = rtw89_read32(rtwdev, reg);
+ val = u32_replace_bits(val, S_AX_PTCL_TO_2MS, B_AX_PTCL_TX_ARB_TO_THR_MASK);
+ val &= ~B_AX_PTCL_TX_ARB_TO_MODE;
+ rtw89_write32(rtwdev, reg, val);
+ }
+
+ reg = rtw89_mac_reg_by_idx(R_AX_SIFS_SETTING, mac_idx);
+ val = rtw89_read32(rtwdev, reg);
+ val = u32_replace_bits(val, S_AX_CTS2S_TH_SEC_256B, B_AX_HW_CTS2SELF_PKT_LEN_TH_TWW_MASK);
+ val |= B_AX_HW_CTS2SELF_EN;
+ rtw89_write32(rtwdev, reg, val);
+
+ return 0;
+}
+
+static int cmac_init(struct rtw89_dev *rtwdev, u8 mac_idx)
+{
+ int ret;
+
+ ret = scheduler_init(rtwdev, mac_idx);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]CMAC%d SCH init %d\n", mac_idx, ret);
+ return ret;
+ }
+
+ ret = addr_cam_init(rtwdev, mac_idx);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]CMAC%d ADDR_CAM reset %d\n", mac_idx,
+ ret);
+ return ret;
+ }
+
+ ret = rx_fltr_init(rtwdev, mac_idx);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]CMAC%d RX filter init %d\n", mac_idx,
+ ret);
+ return ret;
+ }
+
+ ret = cca_ctrl_init(rtwdev, mac_idx);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]CMAC%d CCA CTRL init %d\n", mac_idx,
+ ret);
+ return ret;
+ }
+
+ ret = spatial_reuse_init(rtwdev, mac_idx);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]CMAC%d Spatial Reuse init %d\n",
+ mac_idx, ret);
+ return ret;
+ }
+
+ ret = tmac_init(rtwdev, mac_idx);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]CMAC%d TMAC init %d\n", mac_idx, ret);
+ return ret;
+ }
+
+ ret = trxptcl_init(rtwdev, mac_idx);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]CMAC%d TRXPTCL init %d\n", mac_idx, ret);
+ return ret;
+ }
+
+ ret = rmac_init(rtwdev, mac_idx);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]CMAC%d RMAC init %d\n", mac_idx, ret);
+ return ret;
+ }
+
+ ret = cmac_com_init(rtwdev, mac_idx);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]CMAC%d Com init %d\n", mac_idx, ret);
+ return ret;
+ }
+
+ ret = ptcl_init(rtwdev, mac_idx);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]CMAC%d PTCL init %d\n", mac_idx, ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int rtw89_mac_read_phycap(struct rtw89_dev *rtwdev,
+ struct rtw89_mac_c2h_info *c2h_info)
+{
+ struct rtw89_mac_h2c_info h2c_info = {0};
+ u32 ret;
+
+ h2c_info.id = RTW89_FWCMD_H2CREG_FUNC_GET_FEATURE;
+ h2c_info.content_len = 0;
+
+ ret = rtw89_fw_msg_reg(rtwdev, &h2c_info, c2h_info);
+ if (ret)
+ return ret;
+
+ if (c2h_info->id != RTW89_FWCMD_C2HREG_FUNC_PHY_CAP)
+ return -EINVAL;
+
+ return 0;
+}
+
+int rtw89_mac_setup_phycap(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_mac_c2h_info c2h_info = {0};
+ struct rtw89_c2h_phy_cap *cap =
+ (struct rtw89_c2h_phy_cap *)&c2h_info.c2hreg[0];
+ u32 ret;
+
+ ret = rtw89_mac_read_phycap(rtwdev, &c2h_info);
+ if (ret)
+ return ret;
+
+ hal->tx_nss = cap->tx_nss ?
+ min_t(u8, cap->tx_nss, chip->tx_nss) : chip->tx_nss;
+ hal->rx_nss = cap->rx_nss ?
+ min_t(u8, cap->rx_nss, chip->rx_nss) : chip->rx_nss;
+
+ rtw89_debug(rtwdev, RTW89_DBG_FW,
+ "phycap hal/phy/chip: tx_nss=0x%x/0x%x/0x%x rx_nss=0x%x/0x%x/0x%x\n",
+ hal->tx_nss, cap->tx_nss, chip->tx_nss,
+ hal->rx_nss, cap->rx_nss, chip->rx_nss);
+
+ return 0;
+}
+
+static int rtw89_hw_sch_tx_en_h2c(struct rtw89_dev *rtwdev, u8 band,
+ u16 tx_en_u16, u16 mask_u16)
+{
+ u32 ret;
+ struct rtw89_mac_c2h_info c2h_info = {0};
+ struct rtw89_mac_h2c_info h2c_info = {0};
+ struct rtw89_h2creg_sch_tx_en *h2creg =
+ (struct rtw89_h2creg_sch_tx_en *)h2c_info.h2creg;
+
+ h2c_info.id = RTW89_FWCMD_H2CREG_FUNC_SCH_TX_EN;
+ h2c_info.content_len = sizeof(*h2creg) - RTW89_H2CREG_HDR_LEN;
+ h2creg->tx_en = tx_en_u16;
+ h2creg->mask = mask_u16;
+ h2creg->band = band;
+
+ ret = rtw89_fw_msg_reg(rtwdev, &h2c_info, &c2h_info);
+ if (ret)
+ return ret;
+
+ if (c2h_info.id != RTW89_FWCMD_C2HREG_FUNC_TX_PAUSE_RPT)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int rtw89_set_hw_sch_tx_en(struct rtw89_dev *rtwdev, u8 mac_idx,
+ u16 tx_en, u16 tx_en_mask)
+{
+ u32 reg = rtw89_mac_reg_by_idx(R_AX_CTN_TXEN, mac_idx);
+ u16 val;
+ int ret;
+
+ ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
+ if (ret)
+ return ret;
+
+ if (test_bit(RTW89_FLAG_FW_RDY, rtwdev->flags))
+ return rtw89_hw_sch_tx_en_h2c(rtwdev, mac_idx,
+ tx_en, tx_en_mask);
+
+ val = rtw89_read16(rtwdev, reg);
+ val = (val & ~tx_en_mask) | (tx_en & tx_en_mask);
+ rtw89_write16(rtwdev, reg, val);
+
+ return 0;
+}
+
+int rtw89_mac_stop_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx,
+ u16 *tx_en, enum rtw89_sch_tx_sel sel)
+{
+ int ret;
+
+ *tx_en = rtw89_read16(rtwdev,
+ rtw89_mac_reg_by_idx(R_AX_CTN_TXEN, mac_idx));
+
+ switch (sel) {
+ case RTW89_SCH_TX_SEL_ALL:
+ ret = rtw89_set_hw_sch_tx_en(rtwdev, mac_idx, 0, 0xffff);
+ if (ret)
+ return ret;
+ break;
+ case RTW89_SCH_TX_SEL_HIQ:
+ ret = rtw89_set_hw_sch_tx_en(rtwdev, mac_idx,
+ 0, B_AX_CTN_TXEN_HGQ);
+ if (ret)
+ return ret;
+ break;
+ case RTW89_SCH_TX_SEL_MG0:
+ ret = rtw89_set_hw_sch_tx_en(rtwdev, mac_idx,
+ 0, B_AX_CTN_TXEN_MGQ);
+ if (ret)
+ return ret;
+ break;
+ case RTW89_SCH_TX_SEL_MACID:
+ ret = rtw89_set_hw_sch_tx_en(rtwdev, mac_idx, 0, 0xffff);
+ if (ret)
+ return ret;
+ break;
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
+int rtw89_mac_resume_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx, u16 tx_en)
+{
+ int ret;
+
+ ret = rtw89_set_hw_sch_tx_en(rtwdev, mac_idx, tx_en, 0xffff);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static u16 rtw89_mac_dle_buf_req(struct rtw89_dev *rtwdev, u16 buf_len,
+ bool wd)
+{
+ u32 val, reg;
+ int ret;
+
+ reg = wd ? R_AX_WD_BUF_REQ : R_AX_PL_BUF_REQ;
+ val = buf_len;
+ val |= B_AX_WD_BUF_REQ_EXEC;
+ rtw89_write32(rtwdev, reg, val);
+
+ reg = wd ? R_AX_WD_BUF_STATUS : R_AX_PL_BUF_STATUS;
+
+ ret = read_poll_timeout(rtw89_read32, val, val & B_AX_WD_BUF_STAT_DONE,
+ 1, 2000, false, rtwdev, reg);
+ if (ret)
+ return 0xffff;
+
+ return FIELD_GET(B_AX_WD_BUF_STAT_PKTID_MASK, val);
+}
+
+static int rtw89_mac_set_cpuio(struct rtw89_dev *rtwdev,
+ struct rtw89_cpuio_ctrl *ctrl_para,
+ bool wd)
+{
+ u32 val, cmd_type, reg;
+ int ret;
+
+ cmd_type = ctrl_para->cmd_type;
+
+ reg = wd ? R_AX_WD_CPUQ_OP_2 : R_AX_PL_CPUQ_OP_2;
+ val = 0;
+ val = u32_replace_bits(val, ctrl_para->start_pktid,
+ B_AX_WD_CPUQ_OP_STRT_PKTID_MASK);
+ val = u32_replace_bits(val, ctrl_para->end_pktid,
+ B_AX_WD_CPUQ_OP_END_PKTID_MASK);
+ rtw89_write32(rtwdev, reg, val);
+
+ reg = wd ? R_AX_WD_CPUQ_OP_1 : R_AX_PL_CPUQ_OP_1;
+ val = 0;
+ val = u32_replace_bits(val, ctrl_para->src_pid,
+ B_AX_CPUQ_OP_SRC_PID_MASK);
+ val = u32_replace_bits(val, ctrl_para->src_qid,
+ B_AX_CPUQ_OP_SRC_QID_MASK);
+ val = u32_replace_bits(val, ctrl_para->dst_pid,
+ B_AX_CPUQ_OP_DST_PID_MASK);
+ val = u32_replace_bits(val, ctrl_para->dst_qid,
+ B_AX_CPUQ_OP_DST_QID_MASK);
+ rtw89_write32(rtwdev, reg, val);
+
+ reg = wd ? R_AX_WD_CPUQ_OP_0 : R_AX_PL_CPUQ_OP_0;
+ val = 0;
+ val = u32_replace_bits(val, cmd_type,
+ B_AX_CPUQ_OP_CMD_TYPE_MASK);
+ val = u32_replace_bits(val, ctrl_para->macid,
+ B_AX_CPUQ_OP_MACID_MASK);
+ val = u32_replace_bits(val, ctrl_para->pkt_num,
+ B_AX_CPUQ_OP_PKTNUM_MASK);
+ val |= B_AX_WD_CPUQ_OP_EXEC;
+ rtw89_write32(rtwdev, reg, val);
+
+ reg = wd ? R_AX_WD_CPUQ_OP_STATUS : R_AX_PL_CPUQ_OP_STATUS;
+
+ ret = read_poll_timeout(rtw89_read32, val, val & B_AX_WD_CPUQ_OP_STAT_DONE,
+ 1, 2000, false, rtwdev, reg);
+ if (ret)
+ return ret;
+
+ if (cmd_type == CPUIO_OP_CMD_GET_1ST_PID ||
+ cmd_type == CPUIO_OP_CMD_GET_NEXT_PID)
+ ctrl_para->pktid = FIELD_GET(B_AX_WD_CPUQ_OP_PKTID_MASK, val);
+
+ return 0;
+}
+
+static int dle_quota_change(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mode)
+{
+ const struct rtw89_dle_mem *cfg;
+ struct rtw89_cpuio_ctrl ctrl_para = {0};
+ u16 pkt_id;
+ int ret;
+
+ cfg = get_dle_mem_cfg(rtwdev, mode);
+ if (!cfg) {
+ rtw89_err(rtwdev, "[ERR]wd/dle mem cfg\n");
+ return -EINVAL;
+ }
+
+ if (dle_used_size(cfg->wde_size, cfg->ple_size) != rtwdev->chip->fifo_size) {
+ rtw89_err(rtwdev, "[ERR]wd/dle mem cfg\n");
+ return -EINVAL;
+ }
+
+ dle_quota_cfg(rtwdev, cfg, INVALID_QT_WCPU);
+
+ pkt_id = rtw89_mac_dle_buf_req(rtwdev, 0x20, true);
+ if (pkt_id == 0xffff) {
+ rtw89_err(rtwdev, "[ERR]WDE DLE buf req\n");
+ return -ENOMEM;
+ }
+
+ ctrl_para.cmd_type = CPUIO_OP_CMD_ENQ_TO_HEAD;
+ ctrl_para.start_pktid = pkt_id;
+ ctrl_para.end_pktid = pkt_id;
+ ctrl_para.pkt_num = 0;
+ ctrl_para.dst_pid = WDE_DLE_PORT_ID_WDRLS;
+ ctrl_para.dst_qid = WDE_DLE_QUEID_NO_REPORT;
+ ret = rtw89_mac_set_cpuio(rtwdev, &ctrl_para, true);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]WDE DLE enqueue to head\n");
+ return -EFAULT;
+ }
+
+ pkt_id = rtw89_mac_dle_buf_req(rtwdev, 0x20, false);
+ if (pkt_id == 0xffff) {
+ rtw89_err(rtwdev, "[ERR]PLE DLE buf req\n");
+ return -ENOMEM;
+ }
+
+ ctrl_para.cmd_type = CPUIO_OP_CMD_ENQ_TO_HEAD;
+ ctrl_para.start_pktid = pkt_id;
+ ctrl_para.end_pktid = pkt_id;
+ ctrl_para.pkt_num = 0;
+ ctrl_para.dst_pid = PLE_DLE_PORT_ID_PLRLS;
+ ctrl_para.dst_qid = PLE_DLE_QUEID_NO_REPORT;
+ ret = rtw89_mac_set_cpuio(rtwdev, &ctrl_para, false);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]PLE DLE enqueue to head\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int band_idle_ck_b(struct rtw89_dev *rtwdev, u8 mac_idx)
+{
+ int ret;
+ u32 reg;
+ u8 val;
+
+ ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
+ if (ret)
+ return ret;
+
+ reg = rtw89_mac_reg_by_idx(R_AX_PTCL_TX_CTN_SEL, mac_idx);
+
+ ret = read_poll_timeout(rtw89_read8, val,
+ (val & B_AX_PTCL_TX_ON_STAT) == 0,
+ SW_CVR_DUR_US,
+ SW_CVR_DUR_US * PTCL_IDLE_POLL_CNT,
+ false, rtwdev, reg);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int band1_enable(struct rtw89_dev *rtwdev)
+{
+ int ret, i;
+ u32 sleep_bak[4] = {0};
+ u32 pause_bak[4] = {0};
+ u16 tx_en;
+
+ ret = rtw89_mac_stop_sch_tx(rtwdev, 0, &tx_en, RTW89_SCH_TX_SEL_ALL);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]stop sch tx %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < 4; i++) {
+ sleep_bak[i] = rtw89_read32(rtwdev, R_AX_MACID_SLEEP_0 + i * 4);
+ pause_bak[i] = rtw89_read32(rtwdev, R_AX_SS_MACID_PAUSE_0 + i * 4);
+ rtw89_write32(rtwdev, R_AX_MACID_SLEEP_0 + i * 4, U32_MAX);
+ rtw89_write32(rtwdev, R_AX_SS_MACID_PAUSE_0 + i * 4, U32_MAX);
+ }
+
+ ret = band_idle_ck_b(rtwdev, 0);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]tx idle poll %d\n", ret);
+ return ret;
+ }
+
+ ret = dle_quota_change(rtwdev, rtwdev->mac.qta_mode);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]DLE quota change %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < 4; i++) {
+ rtw89_write32(rtwdev, R_AX_MACID_SLEEP_0 + i * 4, sleep_bak[i]);
+ rtw89_write32(rtwdev, R_AX_SS_MACID_PAUSE_0 + i * 4, pause_bak[i]);
+ }
+
+ ret = rtw89_mac_resume_sch_tx(rtwdev, 0, tx_en);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]CMAC1 resume sch tx %d\n", ret);
+ return ret;
+ }
+
+ ret = cmac_func_en(rtwdev, 1, true);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]CMAC1 func en %d\n", ret);
+ return ret;
+ }
+
+ ret = cmac_init(rtwdev, 1);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]CMAC1 init %d\n", ret);
+ return ret;
+ }
+
+ rtw89_write32_set(rtwdev, R_AX_SYS_ISO_CTRL_EXTEND,
+ B_AX_R_SYM_FEN_WLBBFUN_1 | B_AX_R_SYM_FEN_WLBBGLB_1);
+
+ return 0;
+}
+
+static int rtw89_mac_enable_imr(struct rtw89_dev *rtwdev, u8 mac_idx,
+ enum rtw89_mac_hwmod_sel sel)
+{
+ u32 reg, val;
+ int ret;
+
+ ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, sel);
+ if (ret) {
+ rtw89_err(rtwdev, "MAC%d mac_idx%d is not ready\n",
+ sel, mac_idx);
+ return ret;
+ }
+
+ if (sel == RTW89_DMAC_SEL) {
+ rtw89_write32_clr(rtwdev, R_AX_TXPKTCTL_ERR_IMR_ISR,
+ B_AX_TXPKTCTL_USRCTL_RLSBMPLEN_ERR_INT_EN |
+ B_AX_TXPKTCTL_USRCTL_RDNRLSCMD_ERR_INT_EN |
+ B_AX_TXPKTCTL_CMDPSR_FRZTO_ERR_INT_EN);
+ rtw89_write32_clr(rtwdev, R_AX_TXPKTCTL_ERR_IMR_ISR_B1,
+ B_AX_TXPKTCTL_USRCTL_RLSBMPLEN_ERR_INT_EN |
+ B_AX_TXPKTCTL_USRCTL_RDNRLSCMD_ERR_INT_EN);
+ rtw89_write32_clr(rtwdev, R_AX_HOST_DISPATCHER_ERR_IMR,
+ B_AX_HDT_PKT_FAIL_DBG_INT_EN |
+ B_AX_HDT_OFFSET_UNMATCH_INT_EN);
+ rtw89_write32_clr(rtwdev, R_AX_CPU_DISPATCHER_ERR_IMR,
+ B_AX_CPU_SHIFT_EN_ERR_INT_EN);
+ rtw89_write32_clr(rtwdev, R_AX_PLE_ERR_IMR,
+ B_AX_PLE_GETNPG_STRPG_ERR_INT_EN);
+ rtw89_write32_clr(rtwdev, R_AX_WDRLS_ERR_IMR,
+ B_AX_WDRLS_PLEBREQ_TO_ERR_INT_EN);
+ rtw89_write32_set(rtwdev, R_AX_HD0IMR, B_AX_WDT_PTFM_INT_EN);
+ rtw89_write32_clr(rtwdev, R_AX_TXPKTCTL_ERR_IMR_ISR,
+ B_AX_TXPKTCTL_USRCTL_NOINIT_ERR_INT_EN);
+ } else if (sel == RTW89_CMAC_SEL) {
+ reg = rtw89_mac_reg_by_idx(R_AX_SCHEDULE_ERR_IMR, mac_idx);
+ rtw89_write32_clr(rtwdev, reg,
+ B_AX_SORT_NON_IDLE_ERR_INT_EN);
+
+ reg = rtw89_mac_reg_by_idx(R_AX_DLE_CTRL, mac_idx);
+ rtw89_write32_clr(rtwdev, reg,
+ B_AX_NO_RESERVE_PAGE_ERR_IMR |
+ B_AX_RXDATA_FSM_HANG_ERROR_IMR);
+
+ reg = rtw89_mac_reg_by_idx(R_AX_PTCL_IMR0, mac_idx);
+ val = B_AX_F2PCMD_USER_ALLC_ERR_INT_EN |
+ B_AX_TX_RECORD_PKTID_ERR_INT_EN |
+ B_AX_FSM_TIMEOUT_ERR_INT_EN;
+ rtw89_write32(rtwdev, reg, val);
+
+ reg = rtw89_mac_reg_by_idx(R_AX_PHYINFO_ERR_IMR, mac_idx);
+ rtw89_write32_set(rtwdev, reg,
+ B_AX_PHY_TXON_TIMEOUT_INT_EN |
+ B_AX_CCK_CCA_TIMEOUT_INT_EN |
+ B_AX_OFDM_CCA_TIMEOUT_INT_EN |
+ B_AX_DATA_ON_TIMEOUT_INT_EN |
+ B_AX_STS_ON_TIMEOUT_INT_EN |
+ B_AX_CSI_ON_TIMEOUT_INT_EN);
+
+ reg = rtw89_mac_reg_by_idx(R_AX_RMAC_ERR_ISR, mac_idx);
+ val = rtw89_read32(rtwdev, reg);
+ val |= (B_AX_RMAC_RX_CSI_TIMEOUT_INT_EN |
+ B_AX_RMAC_RX_TIMEOUT_INT_EN |
+ B_AX_RMAC_CSI_TIMEOUT_INT_EN);
+ val &= ~(B_AX_RMAC_CCA_TO_IDLE_TIMEOUT_INT_EN |
+ B_AX_RMAC_DATA_ON_TO_IDLE_TIMEOUT_INT_EN |
+ B_AX_RMAC_CCA_TIMEOUT_INT_EN |
+ B_AX_RMAC_DATA_ON_TIMEOUT_INT_EN);
+ rtw89_write32(rtwdev, reg, val);
+ } else {
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int rtw89_mac_dbcc_enable(struct rtw89_dev *rtwdev, bool enable)
+{
+ int ret = 0;
+
+ if (enable) {
+ ret = band1_enable(rtwdev);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR] band1_enable %d\n", ret);
+ return ret;
+ }
+
+ ret = rtw89_mac_enable_imr(rtwdev, RTW89_MAC_1, RTW89_CMAC_SEL);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR] enable CMAC1 IMR %d\n", ret);
+ return ret;
+ }
+ } else {
+ rtw89_err(rtwdev, "[ERR] disable dbcc is not implemented not\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int set_host_rpr(struct rtw89_dev *rtwdev)
+{
+ if (rtwdev->hci.type == RTW89_HCI_TYPE_PCIE) {
+ rtw89_write32_mask(rtwdev, R_AX_WDRLS_CFG,
+ B_AX_WDRLS_MODE_MASK, RTW89_RPR_MODE_POH);
+ rtw89_write32_set(rtwdev, R_AX_RLSRPT0_CFG0,
+ B_AX_RLSRPT0_FLTR_MAP_MASK);
+ } else {
+ rtw89_write32_mask(rtwdev, R_AX_WDRLS_CFG,
+ B_AX_WDRLS_MODE_MASK, RTW89_RPR_MODE_STF);
+ rtw89_write32_clr(rtwdev, R_AX_RLSRPT0_CFG0,
+ B_AX_RLSRPT0_FLTR_MAP_MASK);
+ }
+
+ rtw89_write32_mask(rtwdev, R_AX_RLSRPT0_CFG1, B_AX_RLSRPT0_AGGNUM_MASK, 30);
+ rtw89_write32_mask(rtwdev, R_AX_RLSRPT0_CFG1, B_AX_RLSRPT0_TO_MASK, 255);
+
+ return 0;
+}
+
+static int rtw89_mac_trx_init(struct rtw89_dev *rtwdev)
+{
+ enum rtw89_qta_mode qta_mode = rtwdev->mac.qta_mode;
+ int ret;
+
+ ret = dmac_init(rtwdev, 0);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]DMAC init %d\n", ret);
+ return ret;
+ }
+
+ ret = cmac_init(rtwdev, 0);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]CMAC%d init %d\n", 0, ret);
+ return ret;
+ }
+
+ if (is_qta_dbcc(rtwdev, qta_mode)) {
+ ret = rtw89_mac_dbcc_enable(rtwdev, true);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]dbcc_enable init %d\n", ret);
+ return ret;
+ }
+ }
+
+ ret = rtw89_mac_enable_imr(rtwdev, RTW89_MAC_0, RTW89_DMAC_SEL);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR] enable DMAC IMR %d\n", ret);
+ return ret;
+ }
+
+ ret = rtw89_mac_enable_imr(rtwdev, RTW89_MAC_0, RTW89_CMAC_SEL);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR] to enable CMAC0 IMR %d\n", ret);
+ return ret;
+ }
+
+ ret = set_host_rpr(rtwdev);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR] set host rpr %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void rtw89_mac_disable_cpu(struct rtw89_dev *rtwdev)
+{
+ clear_bit(RTW89_FLAG_FW_RDY, rtwdev->flags);
+
+ rtw89_write32_clr(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_WCPU_EN);
+ rtw89_write32_clr(rtwdev, R_AX_SYS_CLK_CTRL, B_AX_CPU_CLK_EN);
+}
+
+static int rtw89_mac_enable_cpu(struct rtw89_dev *rtwdev, u8 boot_reason,
+ bool dlfw)
+{
+ u32 val;
+ int ret;
+
+ if (rtw89_read32(rtwdev, R_AX_PLATFORM_ENABLE) & B_AX_WCPU_EN)
+ return -EFAULT;
+
+ rtw89_write32(rtwdev, R_AX_HALT_H2C_CTRL, 0);
+ rtw89_write32(rtwdev, R_AX_HALT_C2H_CTRL, 0);
+
+ rtw89_write32_set(rtwdev, R_AX_SYS_CLK_CTRL, B_AX_CPU_CLK_EN);
+
+ val = rtw89_read32(rtwdev, R_AX_WCPU_FW_CTRL);
+ val &= ~(B_AX_WCPU_FWDL_EN | B_AX_H2C_PATH_RDY | B_AX_FWDL_PATH_RDY);
+ val = u32_replace_bits(val, RTW89_FWDL_INITIAL_STATE,
+ B_AX_WCPU_FWDL_STS_MASK);
+
+ if (dlfw)
+ val |= B_AX_WCPU_FWDL_EN;
+
+ rtw89_write32(rtwdev, R_AX_WCPU_FW_CTRL, val);
+ rtw89_write16_mask(rtwdev, R_AX_BOOT_REASON, B_AX_BOOT_REASON_MASK,
+ boot_reason);
+ rtw89_write32_set(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_WCPU_EN);
+
+ if (!dlfw) {
+ mdelay(5);
+
+ ret = rtw89_fw_check_rdy(rtwdev);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rtw89_mac_fw_dl_pre_init(struct rtw89_dev *rtwdev)
+{
+ u32 val;
+ int ret;
+
+ val = B_AX_MAC_FUNC_EN | B_AX_DMAC_FUNC_EN | B_AX_DISPATCHER_EN |
+ B_AX_PKT_BUF_EN;
+ rtw89_write32(rtwdev, R_AX_DMAC_FUNC_EN, val);
+
+ val = B_AX_DISPATCHER_CLK_EN;
+ rtw89_write32(rtwdev, R_AX_DMAC_CLK_EN, val);
+
+ ret = dle_init(rtwdev, RTW89_QTA_DLFW, rtwdev->mac.qta_mode);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]DLE pre init %d\n", ret);
+ return ret;
+ }
+
+ ret = hfc_init(rtwdev, true, false, true);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]HCI FC pre init %d\n", ret);
+ return ret;
+ }
+
+ return ret;
+}
+
+static void rtw89_mac_hci_func_en(struct rtw89_dev *rtwdev)
+{
+ rtw89_write32_set(rtwdev, R_AX_HCI_FUNC_EN,
+ B_AX_HCI_TXDMA_EN | B_AX_HCI_RXDMA_EN);
+}
+
+void rtw89_mac_enable_bb_rf(struct rtw89_dev *rtwdev)
+{
+ rtw89_write8_set(rtwdev, R_AX_SYS_FUNC_EN,
+ B_AX_FEN_BBRSTB | B_AX_FEN_BB_GLB_RSTN);
+ rtw89_write32_set(rtwdev, R_AX_WLRF_CTRL,
+ B_AX_WLRF1_CTRL_7 | B_AX_WLRF1_CTRL_1 |
+ B_AX_WLRF_CTRL_7 | B_AX_WLRF_CTRL_1);
+ rtw89_write8_set(rtwdev, R_AX_PHYREG_SET, PHYREG_SET_ALL_CYCLE);
+}
+
+void rtw89_mac_disable_bb_rf(struct rtw89_dev *rtwdev)
+{
+ rtw89_write8_clr(rtwdev, R_AX_SYS_FUNC_EN,
+ B_AX_FEN_BBRSTB | B_AX_FEN_BB_GLB_RSTN);
+ rtw89_write32_clr(rtwdev, R_AX_WLRF_CTRL,
+ B_AX_WLRF1_CTRL_7 | B_AX_WLRF1_CTRL_1 |
+ B_AX_WLRF_CTRL_7 | B_AX_WLRF_CTRL_1);
+ rtw89_write8_clr(rtwdev, R_AX_PHYREG_SET, PHYREG_SET_ALL_CYCLE);
+}
+
+int rtw89_mac_partial_init(struct rtw89_dev *rtwdev)
+{
+ int ret;
+
+ ret = rtw89_mac_power_switch(rtwdev, true);
+ if (ret) {
+ rtw89_mac_power_switch(rtwdev, false);
+ ret = rtw89_mac_power_switch(rtwdev, true);
+ if (ret)
+ return ret;
+ }
+
+ rtw89_mac_hci_func_en(rtwdev);
+
+ if (rtwdev->hci.ops->mac_pre_init) {
+ ret = rtwdev->hci.ops->mac_pre_init(rtwdev);
+ if (ret)
+ return ret;
+ }
+
+ ret = rtw89_mac_fw_dl_pre_init(rtwdev);
+ if (ret)
+ return ret;
+
+ rtw89_mac_disable_cpu(rtwdev);
+ ret = rtw89_mac_enable_cpu(rtwdev, 0, true);
+ if (ret)
+ return ret;
+
+ ret = rtw89_fw_download(rtwdev, RTW89_FW_NORMAL);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int rtw89_mac_init(struct rtw89_dev *rtwdev)
+{
+ int ret;
+
+ ret = rtw89_mac_partial_init(rtwdev);
+ if (ret)
+ goto fail;
+
+ rtw89_mac_enable_bb_rf(rtwdev);
+
+ ret = rtw89_mac_sys_init(rtwdev);
+ if (ret)
+ goto fail;
+
+ ret = rtw89_mac_trx_init(rtwdev);
+ if (ret)
+ goto fail;
+
+ if (rtwdev->hci.ops->mac_post_init) {
+ ret = rtwdev->hci.ops->mac_post_init(rtwdev);
+ if (ret)
+ goto fail;
+ }
+
+ rtw89_fw_send_all_early_h2c(rtwdev);
+ rtw89_fw_h2c_set_ofld_cfg(rtwdev);
+
+ return ret;
+fail:
+ rtw89_mac_power_switch(rtwdev, false);
+
+ return ret;
+}
+
+static void rtw89_mac_dmac_tbl_init(struct rtw89_dev *rtwdev, u8 macid)
+{
+ u8 i;
+
+ for (i = 0; i < 4; i++) {
+ rtw89_write32(rtwdev, R_AX_FILTER_MODEL_ADDR,
+ DMAC_TBL_BASE_ADDR + (macid << 4) + (i << 2));
+ rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY, 0);
+ }
+}
+
+static void rtw89_mac_cmac_tbl_init(struct rtw89_dev *rtwdev, u8 macid)
+{
+ rtw89_write32(rtwdev, R_AX_FILTER_MODEL_ADDR,
+ CMAC_TBL_BASE_ADDR + macid * CCTL_INFO_SIZE);
+ rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY, 0x4);
+ rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY + 4, 0x400A0004);
+ rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY + 8, 0);
+ rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY + 12, 0);
+ rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY + 16, 0);
+ rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY + 20, 0xE43000B);
+ rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY + 24, 0);
+ rtw89_write32(rtwdev, R_AX_INDIR_ACCESS_ENTRY + 28, 0xB8109);
+}
+
+static int rtw89_set_macid_pause(struct rtw89_dev *rtwdev, u8 macid, bool pause)
+{
+ u8 sh = FIELD_GET(GENMASK(4, 0), macid);
+ u8 grp = macid >> 5;
+ int ret;
+
+ ret = rtw89_mac_check_mac_en(rtwdev, RTW89_MAC_0, RTW89_CMAC_SEL);
+ if (ret)
+ return ret;
+
+ rtw89_fw_h2c_macid_pause(rtwdev, sh, grp, pause);
+
+ return 0;
+}
+
+static const struct rtw89_port_reg rtw_port_base = {
+ .port_cfg = R_AX_PORT_CFG_P0,
+ .tbtt_prohib = R_AX_TBTT_PROHIB_P0,
+ .bcn_area = R_AX_BCN_AREA_P0,
+ .bcn_early = R_AX_BCNERLYINT_CFG_P0,
+ .tbtt_early = R_AX_TBTTERLYINT_CFG_P0,
+ .tbtt_agg = R_AX_TBTT_AGG_P0,
+ .bcn_space = R_AX_BCN_SPACE_CFG_P0,
+ .bcn_forcetx = R_AX_BCN_FORCETX_P0,
+ .bcn_err_cnt = R_AX_BCN_ERR_CNT_P0,
+ .bcn_err_flag = R_AX_BCN_ERR_FLAG_P0,
+ .dtim_ctrl = R_AX_DTIM_CTRL_P0,
+ .tbtt_shift = R_AX_TBTT_SHIFT_P0,
+ .bcn_cnt_tmr = R_AX_BCN_CNT_TMR_P0,
+ .tsftr_l = R_AX_TSFTR_LOW_P0,
+ .tsftr_h = R_AX_TSFTR_HIGH_P0
+};
+
+#define BCN_INTERVAL 100
+#define BCN_ERLY_DEF 160
+#define BCN_SETUP_DEF 2
+#define BCN_HOLD_DEF 200
+#define BCN_MASK_DEF 0
+#define TBTT_ERLY_DEF 5
+#define BCN_SET_UNIT 32
+#define BCN_ERLY_SET_DLY (10 * 2)
+
+static void rtw89_mac_port_cfg_func_sw(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
+{
+ struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
+ const struct rtw89_port_reg *p = &rtw_port_base;
+
+ if (!rtw89_read32_port_mask(rtwdev, rtwvif, p->port_cfg, B_AX_PORT_FUNC_EN))
+ return;
+
+ rtw89_write32_port_clr(rtwdev, rtwvif, p->tbtt_prohib, B_AX_TBTT_SETUP_MASK);
+ rtw89_write32_port_mask(rtwdev, rtwvif, p->tbtt_prohib, B_AX_TBTT_HOLD_MASK, 1);
+ rtw89_write16_port_clr(rtwdev, rtwvif, p->tbtt_early, B_AX_TBTTERLY_MASK);
+ rtw89_write16_port_clr(rtwdev, rtwvif, p->bcn_early, B_AX_BCNERLY_MASK);
+
+ msleep(vif->bss_conf.beacon_int + 1);
+
+ rtw89_write32_port_clr(rtwdev, rtwvif, p->port_cfg, B_AX_PORT_FUNC_EN |
+ B_AX_BRK_SETUP);
+ rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, B_AX_TSFTR_RST);
+ rtw89_write32_port(rtwdev, rtwvif, p->bcn_cnt_tmr, 0);
+}
+
+static void rtw89_mac_port_cfg_tx_rpt(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif, bool en)
+{
+ const struct rtw89_port_reg *p = &rtw_port_base;
+
+ if (en)
+ rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, B_AX_TXBCN_RPT_EN);
+ else
+ rtw89_write32_port_clr(rtwdev, rtwvif, p->port_cfg, B_AX_TXBCN_RPT_EN);
+}
+
+static void rtw89_mac_port_cfg_rx_rpt(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif, bool en)
+{
+ const struct rtw89_port_reg *p = &rtw_port_base;
+
+ if (en)
+ rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, B_AX_RXBCN_RPT_EN);
+ else
+ rtw89_write32_port_clr(rtwdev, rtwvif, p->port_cfg, B_AX_RXBCN_RPT_EN);
+}
+
+static void rtw89_mac_port_cfg_net_type(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
+{
+ const struct rtw89_port_reg *p = &rtw_port_base;
+
+ rtw89_write32_port_mask(rtwdev, rtwvif, p->port_cfg, B_AX_NET_TYPE_MASK,
+ rtwvif->net_type);
+}
+
+static void rtw89_mac_port_cfg_bcn_prct(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
+{
+ const struct rtw89_port_reg *p = &rtw_port_base;
+ bool en = rtwvif->net_type != RTW89_NET_TYPE_NO_LINK;
+ u32 bits = B_AX_TBTT_PROHIB_EN | B_AX_BRK_SETUP;
+
+ if (en)
+ rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, bits);
+ else
+ rtw89_write32_port_clr(rtwdev, rtwvif, p->port_cfg, bits);
+}
+
+static void rtw89_mac_port_cfg_rx_sw(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
+{
+ const struct rtw89_port_reg *p = &rtw_port_base;
+ bool en = rtwvif->net_type == RTW89_NET_TYPE_INFRA ||
+ rtwvif->net_type == RTW89_NET_TYPE_AD_HOC;
+ u32 bit = B_AX_RX_BSSID_FIT_EN;
+
+ if (en)
+ rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, bit);
+ else
+ rtw89_write32_port_clr(rtwdev, rtwvif, p->port_cfg, bit);
+}
+
+static void rtw89_mac_port_cfg_rx_sync(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
+{
+ const struct rtw89_port_reg *p = &rtw_port_base;
+ bool en = rtwvif->net_type == RTW89_NET_TYPE_INFRA ||
+ rtwvif->net_type == RTW89_NET_TYPE_AD_HOC;
+
+ if (en)
+ rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, B_AX_TSF_UDT_EN);
+ else
+ rtw89_write32_port_clr(rtwdev, rtwvif, p->port_cfg, B_AX_TSF_UDT_EN);
+}
+
+static void rtw89_mac_port_cfg_tx_sw(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
+{
+ const struct rtw89_port_reg *p = &rtw_port_base;
+ bool en = rtwvif->net_type == RTW89_NET_TYPE_AP_MODE ||
+ rtwvif->net_type == RTW89_NET_TYPE_AD_HOC;
+
+ if (en)
+ rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, B_AX_BCNTX_EN);
+ else
+ rtw89_write32_port_clr(rtwdev, rtwvif, p->port_cfg, B_AX_BCNTX_EN);
+}
+
+static void rtw89_mac_port_cfg_bcn_intv(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
+{
+ struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
+ const struct rtw89_port_reg *p = &rtw_port_base;
+ u16 bcn_int = vif->bss_conf.beacon_int ? vif->bss_conf.beacon_int : BCN_INTERVAL;
+
+ rtw89_write32_port_mask(rtwdev, rtwvif, p->bcn_space, B_AX_BCN_SPACE_MASK,
+ bcn_int);
+}
+
+static void rtw89_mac_port_cfg_bcn_setup_time(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
+{
+ const struct rtw89_port_reg *p = &rtw_port_base;
+
+ rtw89_write32_port_mask(rtwdev, rtwvif, p->tbtt_prohib,
+ B_AX_TBTT_SETUP_MASK, BCN_SETUP_DEF);
+}
+
+static void rtw89_mac_port_cfg_bcn_hold_time(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
+{
+ const struct rtw89_port_reg *p = &rtw_port_base;
+
+ rtw89_write32_port_mask(rtwdev, rtwvif, p->tbtt_prohib,
+ B_AX_TBTT_HOLD_MASK, BCN_HOLD_DEF);
+}
+
+static void rtw89_mac_port_cfg_bcn_mask_area(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
+{
+ const struct rtw89_port_reg *p = &rtw_port_base;
+
+ rtw89_write32_port_mask(rtwdev, rtwvif, p->bcn_area,
+ B_AX_BCN_MSK_AREA_MASK, BCN_MASK_DEF);
+}
+
+static void rtw89_mac_port_cfg_tbtt_early(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
+{
+ const struct rtw89_port_reg *p = &rtw_port_base;
+
+ rtw89_write16_port_mask(rtwdev, rtwvif, p->tbtt_early,
+ B_AX_TBTTERLY_MASK, TBTT_ERLY_DEF);
+}
+
+static void rtw89_mac_port_cfg_bss_color(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
+{
+ struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
+ static const u32 masks[RTW89_PORT_NUM] = {
+ B_AX_BSS_COLOB_AX_PORT_0_MASK, B_AX_BSS_COLOB_AX_PORT_1_MASK,
+ B_AX_BSS_COLOB_AX_PORT_2_MASK, B_AX_BSS_COLOB_AX_PORT_3_MASK,
+ B_AX_BSS_COLOB_AX_PORT_4_MASK,
+ };
+ u8 port = rtwvif->port;
+ u32 reg_base;
+ u32 reg;
+ u8 bss_color;
+
+ bss_color = vif->bss_conf.he_bss_color.color;
+ reg_base = port >= 4 ? R_AX_PTCL_BSS_COLOR_1 : R_AX_PTCL_BSS_COLOR_0;
+ reg = rtw89_mac_reg_by_idx(reg_base, rtwvif->mac_idx);
+ rtw89_write32_mask(rtwdev, reg, masks[port], bss_color);
+}
+
+static void rtw89_mac_port_cfg_mbssid(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
+{
+ u8 port = rtwvif->port;
+ u32 reg;
+
+ if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE)
+ return;
+
+ if (port == 0) {
+ reg = rtw89_mac_reg_by_idx(R_AX_MBSSID_CTRL, rtwvif->mac_idx);
+ rtw89_write32_clr(rtwdev, reg, B_AX_P0MB_ALL_MASK);
+ }
+}
+
+static void rtw89_mac_port_cfg_hiq_drop(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
+{
+ u8 port = rtwvif->port;
+ u32 reg;
+ u32 val;
+
+ reg = rtw89_mac_reg_by_idx(R_AX_MBSSID_DROP_0, rtwvif->mac_idx);
+ val = rtw89_read32(rtwdev, reg);
+ val &= ~FIELD_PREP(B_AX_PORT_DROP_4_0_MASK, BIT(port));
+ if (port == 0)
+ val &= ~BIT(0);
+ rtw89_write32(rtwdev, reg, val);
+}
+
+static void rtw89_mac_port_cfg_func_en(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
+{
+ const struct rtw89_port_reg *p = &rtw_port_base;
+
+ rtw89_write32_port_set(rtwdev, rtwvif, p->port_cfg, B_AX_PORT_FUNC_EN);
+}
+
+static void rtw89_mac_port_cfg_bcn_early(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
+{
+ const struct rtw89_port_reg *p = &rtw_port_base;
+
+ rtw89_write32_port_mask(rtwdev, rtwvif, p->bcn_early, B_AX_BCNERLY_MASK,
+ BCN_ERLY_DEF);
+}
+
+int rtw89_mac_vif_init(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
+{
+ int ret;
+
+ ret = rtw89_mac_port_update(rtwdev, rtwvif);
+ if (ret)
+ return ret;
+
+ rtw89_mac_dmac_tbl_init(rtwdev, rtwvif->mac_id);
+ rtw89_mac_cmac_tbl_init(rtwdev, rtwvif->mac_id);
+
+ ret = rtw89_set_macid_pause(rtwdev, rtwvif->mac_id, false);
+ if (ret)
+ return ret;
+
+ ret = rtw89_fw_h2c_vif_maintain(rtwdev, rtwvif, RTW89_VIF_CREATE);
+ if (ret)
+ return ret;
+
+ ret = rtw89_cam_init(rtwdev, rtwvif);
+ if (ret)
+ return ret;
+
+ ret = rtw89_fw_h2c_cam(rtwdev, rtwvif);
+ if (ret)
+ return ret;
+
+ ret = rtw89_fw_h2c_default_cmac_tbl(rtwdev, rtwvif->mac_id);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int rtw89_mac_vif_deinit(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
+{
+ int ret;
+
+ ret = rtw89_fw_h2c_vif_maintain(rtwdev, rtwvif, RTW89_VIF_REMOVE);
+ if (ret)
+ return ret;
+
+ rtw89_cam_deinit(rtwdev, rtwvif);
+
+ ret = rtw89_fw_h2c_cam(rtwdev, rtwvif);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int rtw89_mac_port_update(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
+{
+ u8 port = rtwvif->port;
+
+ if (port >= RTW89_PORT_NUM)
+ return -EINVAL;
+
+ rtw89_mac_port_cfg_func_sw(rtwdev, rtwvif);
+ rtw89_mac_port_cfg_tx_rpt(rtwdev, rtwvif, false);
+ rtw89_mac_port_cfg_rx_rpt(rtwdev, rtwvif, false);
+ rtw89_mac_port_cfg_net_type(rtwdev, rtwvif);
+ rtw89_mac_port_cfg_bcn_prct(rtwdev, rtwvif);
+ rtw89_mac_port_cfg_rx_sw(rtwdev, rtwvif);
+ rtw89_mac_port_cfg_rx_sync(rtwdev, rtwvif);
+ rtw89_mac_port_cfg_tx_sw(rtwdev, rtwvif);
+ rtw89_mac_port_cfg_bcn_intv(rtwdev, rtwvif);
+ rtw89_mac_port_cfg_bcn_setup_time(rtwdev, rtwvif);
+ rtw89_mac_port_cfg_bcn_hold_time(rtwdev, rtwvif);
+ rtw89_mac_port_cfg_bcn_mask_area(rtwdev, rtwvif);
+ rtw89_mac_port_cfg_tbtt_early(rtwdev, rtwvif);
+ rtw89_mac_port_cfg_bss_color(rtwdev, rtwvif);
+ rtw89_mac_port_cfg_mbssid(rtwdev, rtwvif);
+ rtw89_mac_port_cfg_hiq_drop(rtwdev, rtwvif);
+ rtw89_mac_port_cfg_func_en(rtwdev, rtwvif);
+ fsleep(BCN_ERLY_SET_DLY);
+ rtw89_mac_port_cfg_bcn_early(rtwdev, rtwvif);
+
+ return 0;
+}
+
+int rtw89_mac_add_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
+{
+ int ret;
+
+ rtwvif->mac_id = rtw89_core_acquire_bit_map(rtwdev->mac_id_map,
+ RTW89_MAX_MAC_ID_NUM);
+ if (rtwvif->mac_id == RTW89_MAX_MAC_ID_NUM)
+ return -ENOSPC;
+
+ ret = rtw89_mac_vif_init(rtwdev, rtwvif);
+ if (ret)
+ goto release_mac_id;
+
+ return 0;
+
+release_mac_id:
+ rtw89_core_release_bit_map(rtwdev->mac_id_map, rtwvif->mac_id);
+
+ return ret;
+}
+
+int rtw89_mac_remove_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
+{
+ int ret;
+
+ ret = rtw89_mac_vif_deinit(rtwdev, rtwvif);
+ rtw89_core_release_bit_map(rtwdev->mac_id_map, rtwvif->mac_id);
+
+ return ret;
+}
+
+static void
+rtw89_mac_c2h_macid_pause(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
+{
+}
+
+static void
+rtw89_mac_c2h_rec_ack(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
+{
+ rtw89_debug(rtwdev, RTW89_DBG_FW,
+ "C2H rev ack recv, cat: %d, class: %d, func: %d, seq : %d\n",
+ RTW89_GET_MAC_C2H_REV_ACK_CAT(c2h->data),
+ RTW89_GET_MAC_C2H_REV_ACK_CLASS(c2h->data),
+ RTW89_GET_MAC_C2H_REV_ACK_FUNC(c2h->data),
+ RTW89_GET_MAC_C2H_REV_ACK_H2C_SEQ(c2h->data));
+}
+
+static void
+rtw89_mac_c2h_done_ack(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
+{
+ rtw89_debug(rtwdev, RTW89_DBG_FW,
+ "C2H done ack recv, cat: %d, class: %d, func: %d, ret: %d, seq : %d\n",
+ RTW89_GET_MAC_C2H_DONE_ACK_CAT(c2h->data),
+ RTW89_GET_MAC_C2H_DONE_ACK_CLASS(c2h->data),
+ RTW89_GET_MAC_C2H_DONE_ACK_FUNC(c2h->data),
+ RTW89_GET_MAC_C2H_DONE_ACK_H2C_RETURN(c2h->data),
+ RTW89_GET_MAC_C2H_DONE_ACK_H2C_SEQ(c2h->data));
+}
+
+static void
+rtw89_mac_c2h_log(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
+{
+ rtw89_info(rtwdev, "%*s", RTW89_GET_C2H_LOG_LEN(len),
+ RTW89_GET_C2H_LOG_SRT_PRT(c2h->data));
+}
+
+static
+void (* const rtw89_mac_c2h_ofld_handler[])(struct rtw89_dev *rtwdev,
+ struct sk_buff *c2h, u32 len) = {
+ [RTW89_MAC_C2H_FUNC_EFUSE_DUMP] = NULL,
+ [RTW89_MAC_C2H_FUNC_READ_RSP] = NULL,
+ [RTW89_MAC_C2H_FUNC_PKT_OFLD_RSP] = NULL,
+ [RTW89_MAC_C2H_FUNC_BCN_RESEND] = NULL,
+ [RTW89_MAC_C2H_FUNC_MACID_PAUSE] = rtw89_mac_c2h_macid_pause,
+};
+
+static
+void (* const rtw89_mac_c2h_info_handler[])(struct rtw89_dev *rtwdev,
+ struct sk_buff *c2h, u32 len) = {
+ [RTW89_MAC_C2H_FUNC_REC_ACK] = rtw89_mac_c2h_rec_ack,
+ [RTW89_MAC_C2H_FUNC_DONE_ACK] = rtw89_mac_c2h_done_ack,
+ [RTW89_MAC_C2H_FUNC_C2H_LOG] = rtw89_mac_c2h_log,
+};
+
+void rtw89_mac_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
+ u32 len, u8 class, u8 func)
+{
+ void (*handler)(struct rtw89_dev *rtwdev,
+ struct sk_buff *c2h, u32 len) = NULL;
+
+ switch (class) {
+ case RTW89_MAC_C2H_CLASS_INFO:
+ if (func < RTW89_MAC_C2H_FUNC_INFO_MAX)
+ handler = rtw89_mac_c2h_info_handler[func];
+ break;
+ case RTW89_MAC_C2H_CLASS_OFLD:
+ if (func < RTW89_MAC_C2H_FUNC_OFLD_MAX)
+ handler = rtw89_mac_c2h_ofld_handler[func];
+ break;
+ case RTW89_MAC_C2H_CLASS_FWDBG:
+ return;
+ default:
+ rtw89_info(rtwdev, "c2h class %d not support\n", class);
+ return;
+ }
+ if (!handler) {
+ rtw89_info(rtwdev, "c2h class %d func %d not support\n", class,
+ func);
+ return;
+ }
+ handler(rtwdev, skb, len);
+}
+
+bool rtw89_mac_get_txpwr_cr(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx,
+ u32 reg_base, u32 *cr)
+{
+ const struct rtw89_dle_mem *dle_mem = rtwdev->chip->dle_mem;
+ enum rtw89_qta_mode mode = dle_mem->mode;
+ u32 addr = rtw89_mac_reg_by_idx(reg_base, phy_idx);
+
+ if (addr < R_AX_PWR_RATE_CTRL || addr > CMAC1_END_ADDR) {
+ rtw89_err(rtwdev, "[TXPWR] addr=0x%x exceed txpwr cr\n",
+ addr);
+ goto error;
+ }
+
+ if (addr >= CMAC1_START_ADDR && addr <= CMAC1_END_ADDR)
+ if (mode == RTW89_QTA_SCC) {
+ rtw89_err(rtwdev,
+ "[TXPWR] addr=0x%x but hw not enable\n",
+ addr);
+ goto error;
+ }
+
+ *cr = addr;
+ return true;
+
+error:
+ rtw89_err(rtwdev, "[TXPWR] check txpwr cr 0x%x(phy%d) fail\n",
+ addr, phy_idx);
+
+ return false;
+}
+
+int rtw89_mac_cfg_ppdu_status(struct rtw89_dev *rtwdev, u8 mac_idx, bool enable)
+{
+ u32 reg = rtw89_mac_reg_by_idx(R_AX_PPDU_STAT, mac_idx);
+ int ret = 0;
+
+ ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
+ if (ret)
+ return ret;
+
+ if (!enable) {
+ rtw89_write32_clr(rtwdev, reg, B_AX_PPDU_STAT_RPT_EN);
+ return ret;
+ }
+
+ rtw89_write32(rtwdev, reg, B_AX_PPDU_STAT_RPT_EN |
+ B_AX_APP_MAC_INFO_RPT |
+ B_AX_APP_RX_CNT_RPT | B_AX_APP_PLCP_HDR_RPT |
+ B_AX_PPDU_STAT_RPT_CRC32);
+ rtw89_write32_mask(rtwdev, R_AX_HW_RPT_FWD, B_AX_FWD_PPDU_STAT_MASK,
+ RTW89_PRPT_DEST_HOST);
+
+ return ret;
+}
+
+void rtw89_mac_update_rts_threshold(struct rtw89_dev *rtwdev, u8 mac_idx)
+{
+#define MAC_AX_TIME_TH_SH 5
+#define MAC_AX_LEN_TH_SH 4
+#define MAC_AX_TIME_TH_MAX 255
+#define MAC_AX_LEN_TH_MAX 255
+#define MAC_AX_TIME_TH_DEF 88
+#define MAC_AX_LEN_TH_DEF 4080
+ struct ieee80211_hw *hw = rtwdev->hw;
+ u32 rts_threshold = hw->wiphy->rts_threshold;
+ u32 time_th, len_th;
+ u32 reg;
+
+ if (rts_threshold == (u32)-1) {
+ time_th = MAC_AX_TIME_TH_DEF;
+ len_th = MAC_AX_LEN_TH_DEF;
+ } else {
+ time_th = MAC_AX_TIME_TH_MAX << MAC_AX_TIME_TH_SH;
+ len_th = rts_threshold;
+ }
+
+ time_th = min_t(u32, time_th >> MAC_AX_TIME_TH_SH, MAC_AX_TIME_TH_MAX);
+ len_th = min_t(u32, len_th >> MAC_AX_LEN_TH_SH, MAC_AX_LEN_TH_MAX);
+
+ reg = rtw89_mac_reg_by_idx(R_AX_AGG_LEN_HT_0, mac_idx);
+ rtw89_write16_mask(rtwdev, reg, B_AX_RTS_TXTIME_TH_MASK, time_th);
+ rtw89_write16_mask(rtwdev, reg, B_AX_RTS_LEN_TH_MASK, len_th);
+}
+
+void rtw89_mac_flush_txq(struct rtw89_dev *rtwdev, u32 queues, bool drop)
+{
+ bool empty;
+ int ret;
+
+ if (!test_bit(RTW89_FLAG_POWERON, rtwdev->flags))
+ return;
+
+ ret = read_poll_timeout(dle_is_txq_empty, empty, empty,
+ 10000, 200000, false, rtwdev);
+ if (ret && !drop && (rtwdev->total_sta_assoc || rtwdev->scanning))
+ rtw89_info(rtwdev, "timed out to flush queues\n");
+}
+
+int rtw89_mac_coex_init(struct rtw89_dev *rtwdev, const struct rtw89_mac_ax_coex *coex)
+{
+ u8 val;
+ u16 val16;
+ u32 val32;
+ int ret;
+
+ rtw89_write8_set(rtwdev, R_AX_GPIO_MUXCFG, B_AX_ENBT);
+ rtw89_write8_set(rtwdev, R_AX_BTC_FUNC_EN, B_AX_PTA_WL_TX_EN);
+ rtw89_write8_set(rtwdev, R_AX_BT_COEX_CFG_2 + 1, B_AX_GNT_BT_POLARITY >> 8);
+ rtw89_write8_set(rtwdev, R_AX_CSR_MODE, B_AX_STATIS_BT_EN | B_AX_WL_ACT_MSK);
+ rtw89_write8_set(rtwdev, R_AX_CSR_MODE + 2, B_AX_BT_CNT_RST >> 16);
+ rtw89_write8_clr(rtwdev, R_AX_TRXPTCL_RESP_0 + 3, B_AX_RSP_CHK_BTCCA >> 24);
+
+ val16 = rtw89_read16(rtwdev, R_AX_CCA_CFG_0);
+ val16 = (val16 | B_AX_BTCCA_EN) & ~B_AX_BTCCA_BRK_TXOP_EN;
+ rtw89_write16(rtwdev, R_AX_CCA_CFG_0, val16);
+
+ ret = rtw89_mac_read_lte(rtwdev, R_AX_LTE_SW_CFG_2, &val32);
+ if (ret) {
+ rtw89_err(rtwdev, "Read R_AX_LTE_SW_CFG_2 fail!\n");
+ return ret;
+ }
+ val32 = val32 & B_AX_WL_RX_CTRL;
+ ret = rtw89_mac_write_lte(rtwdev, R_AX_LTE_SW_CFG_2, val32);
+ if (ret) {
+ rtw89_err(rtwdev, "Write R_AX_LTE_SW_CFG_2 fail!\n");
+ return ret;
+ }
+
+ switch (coex->pta_mode) {
+ case RTW89_MAC_AX_COEX_RTK_MODE:
+ val = rtw89_read8(rtwdev, R_AX_GPIO_MUXCFG);
+ val &= ~B_AX_BTMODE_MASK;
+ val |= FIELD_PREP(B_AX_BTMODE_MASK, MAC_AX_BT_MODE_0_3);
+ rtw89_write8(rtwdev, R_AX_GPIO_MUXCFG, val);
+
+ val = rtw89_read8(rtwdev, R_AX_TDMA_MODE);
+ rtw89_write8(rtwdev, R_AX_TDMA_MODE, val | B_AX_RTK_BT_ENABLE);
+
+ val = rtw89_read8(rtwdev, R_AX_BT_COEX_CFG_5);
+ val &= ~B_AX_BT_RPT_SAMPLE_RATE_MASK;
+ val |= FIELD_PREP(B_AX_BT_RPT_SAMPLE_RATE_MASK, MAC_AX_RTK_RATE);
+ rtw89_write8(rtwdev, R_AX_BT_COEX_CFG_5, val);
+ break;
+ case RTW89_MAC_AX_COEX_CSR_MODE:
+ val = rtw89_read8(rtwdev, R_AX_GPIO_MUXCFG);
+ val &= ~B_AX_BTMODE_MASK;
+ val |= FIELD_PREP(B_AX_BTMODE_MASK, MAC_AX_BT_MODE_2);
+ rtw89_write8(rtwdev, R_AX_GPIO_MUXCFG, val);
+
+ val16 = rtw89_read16(rtwdev, R_AX_CSR_MODE);
+ val16 &= ~B_AX_BT_PRI_DETECT_TO_MASK;
+ val16 |= FIELD_PREP(B_AX_BT_PRI_DETECT_TO_MASK, MAC_AX_CSR_PRI_TO);
+ val16 &= ~B_AX_BT_TRX_INIT_DETECT_MASK;
+ val16 |= FIELD_PREP(B_AX_BT_TRX_INIT_DETECT_MASK, MAC_AX_CSR_TRX_TO);
+ val16 &= ~B_AX_BT_STAT_DELAY_MASK;
+ val16 |= FIELD_PREP(B_AX_BT_STAT_DELAY_MASK, MAC_AX_CSR_DELAY);
+ val16 |= B_AX_ENHANCED_BT;
+ rtw89_write16(rtwdev, R_AX_CSR_MODE, val16);
+
+ rtw89_write8(rtwdev, R_AX_BT_COEX_CFG_2, MAC_AX_CSR_RATE);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (coex->direction) {
+ case RTW89_MAC_AX_COEX_INNER:
+ val = rtw89_read8(rtwdev, R_AX_GPIO_MUXCFG + 1);
+ val = (val & ~BIT(2)) | BIT(1);
+ rtw89_write8(rtwdev, R_AX_GPIO_MUXCFG + 1, val);
+ break;
+ case RTW89_MAC_AX_COEX_OUTPUT:
+ val = rtw89_read8(rtwdev, R_AX_GPIO_MUXCFG + 1);
+ val = val | BIT(1) | BIT(0);
+ rtw89_write8(rtwdev, R_AX_GPIO_MUXCFG + 1, val);
+ break;
+ case RTW89_MAC_AX_COEX_INPUT:
+ val = rtw89_read8(rtwdev, R_AX_GPIO_MUXCFG + 1);
+ val = val & ~(BIT(2) | BIT(1));
+ rtw89_write8(rtwdev, R_AX_GPIO_MUXCFG + 1, val);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int rtw89_mac_cfg_gnt(struct rtw89_dev *rtwdev,
+ const struct rtw89_mac_ax_coex_gnt *gnt_cfg)
+{
+ u32 val, ret;
+
+ ret = rtw89_mac_read_lte(rtwdev, R_AX_LTE_SW_CFG_1, &val);
+ if (ret) {
+ rtw89_err(rtwdev, "Read LTE fail!\n");
+ return ret;
+ }
+ val = (gnt_cfg->band[0].gnt_bt ?
+ B_AX_GNT_BT_RFC_S0_SW_VAL | B_AX_GNT_BT_BB_S0_SW_VAL : 0) |
+ (gnt_cfg->band[0].gnt_bt_sw_en ?
+ B_AX_GNT_BT_RFC_S0_SW_CTRL | B_AX_GNT_BT_BB_S0_SW_CTRL : 0) |
+ (gnt_cfg->band[0].gnt_wl ?
+ B_AX_GNT_WL_RFC_S0_SW_VAL | B_AX_GNT_WL_BB_S0_SW_VAL : 0) |
+ (gnt_cfg->band[0].gnt_wl_sw_en ?
+ B_AX_GNT_WL_RFC_S0_SW_CTRL | B_AX_GNT_WL_BB_S0_SW_CTRL : 0) |
+ (gnt_cfg->band[1].gnt_bt ?
+ B_AX_GNT_BT_RFC_S1_SW_VAL | B_AX_GNT_BT_BB_S1_SW_VAL : 0) |
+ (gnt_cfg->band[1].gnt_bt_sw_en ?
+ B_AX_GNT_BT_RFC_S1_SW_CTRL | B_AX_GNT_BT_BB_S1_SW_CTRL : 0) |
+ (gnt_cfg->band[1].gnt_wl ?
+ B_AX_GNT_WL_RFC_S1_SW_VAL | B_AX_GNT_WL_BB_S1_SW_VAL : 0) |
+ (gnt_cfg->band[1].gnt_wl_sw_en ?
+ B_AX_GNT_WL_RFC_S1_SW_CTRL | B_AX_GNT_WL_BB_S1_SW_CTRL : 0);
+ ret = rtw89_mac_write_lte(rtwdev, R_AX_LTE_SW_CFG_1, val);
+ if (ret) {
+ rtw89_err(rtwdev, "Write LTE fail!\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+int rtw89_mac_cfg_plt(struct rtw89_dev *rtwdev, struct rtw89_mac_ax_plt *plt)
+{
+ u32 reg;
+ u8 val;
+ int ret;
+
+ ret = rtw89_mac_check_mac_en(rtwdev, plt->band, RTW89_CMAC_SEL);
+ if (ret)
+ return ret;
+
+ reg = rtw89_mac_reg_by_idx(R_AX_BT_PLT, plt->band);
+ val = (plt->tx & RTW89_MAC_AX_PLT_LTE_RX ? B_AX_TX_PLT_GNT_LTE_RX : 0) |
+ (plt->tx & RTW89_MAC_AX_PLT_GNT_BT_TX ? B_AX_TX_PLT_GNT_BT_TX : 0) |
+ (plt->tx & RTW89_MAC_AX_PLT_GNT_BT_RX ? B_AX_TX_PLT_GNT_BT_RX : 0) |
+ (plt->tx & RTW89_MAC_AX_PLT_GNT_WL ? B_AX_TX_PLT_GNT_WL : 0) |
+ (plt->rx & RTW89_MAC_AX_PLT_LTE_RX ? B_AX_RX_PLT_GNT_LTE_RX : 0) |
+ (plt->rx & RTW89_MAC_AX_PLT_GNT_BT_TX ? B_AX_RX_PLT_GNT_BT_TX : 0) |
+ (plt->rx & RTW89_MAC_AX_PLT_GNT_BT_RX ? B_AX_RX_PLT_GNT_BT_RX : 0) |
+ (plt->rx & RTW89_MAC_AX_PLT_GNT_WL ? B_AX_RX_PLT_GNT_WL : 0);
+ rtw89_write8(rtwdev, reg, val);
+
+ return 0;
+}
+
+void rtw89_mac_cfg_sb(struct rtw89_dev *rtwdev, u32 val)
+{
+ u32 fw_sb;
+
+ fw_sb = rtw89_read32(rtwdev, R_AX_SCOREBOARD);
+ fw_sb = FIELD_GET(B_MAC_AX_SB_FW_MASK, fw_sb);
+ fw_sb = fw_sb & ~B_MAC_AX_BTGS1_NOTIFY;
+ if (!test_bit(RTW89_FLAG_POWERON, rtwdev->flags))
+ fw_sb = fw_sb | MAC_AX_NOTIFY_PWR_MAJOR;
+ else
+ fw_sb = fw_sb | MAC_AX_NOTIFY_TP_MAJOR;
+ val = FIELD_GET(B_MAC_AX_SB_DRV_MASK, val);
+ val = B_AX_TOGGLE |
+ FIELD_PREP(B_MAC_AX_SB_DRV_MASK, val) |
+ FIELD_PREP(B_MAC_AX_SB_FW_MASK, fw_sb);
+ rtw89_write32(rtwdev, R_AX_SCOREBOARD, val);
+ fsleep(1000); /* avoid BT FW loss information */
+}
+
+u32 rtw89_mac_get_sb(struct rtw89_dev *rtwdev)
+{
+ return rtw89_read32(rtwdev, R_AX_SCOREBOARD);
+}
+
+int rtw89_mac_cfg_ctrl_path(struct rtw89_dev *rtwdev, bool wl)
+{
+ u8 val = rtw89_read8(rtwdev, R_AX_SYS_SDIO_CTRL + 3);
+
+ val = wl ? val | BIT(2) : val & ~BIT(2);
+ rtw89_write8(rtwdev, R_AX_SYS_SDIO_CTRL + 3, val);
+
+ return 0;
+}
+
+bool rtw89_mac_get_ctrl_path(struct rtw89_dev *rtwdev)
+{
+ u8 val = rtw89_read8(rtwdev, R_AX_SYS_SDIO_CTRL + 3);
+
+ return FIELD_GET(B_AX_LTE_MUX_CTRL_PATH >> 24, val);
+}
+
+static void rtw89_mac_bfee_ctrl(struct rtw89_dev *rtwdev, u8 mac_idx, bool en)
+{
+ u32 reg;
+ u32 mask = B_AX_BFMEE_HT_NDPA_EN | B_AX_BFMEE_VHT_NDPA_EN |
+ B_AX_BFMEE_HE_NDPA_EN;
+
+ rtw89_debug(rtwdev, RTW89_DBG_BF, "set bfee ndpa_en to %d\n", en);
+ reg = rtw89_mac_reg_by_idx(R_AX_BFMEE_RESP_OPTION, mac_idx);
+ if (en) {
+ set_bit(RTW89_FLAG_BFEE_EN, rtwdev->flags);
+ rtw89_write32_set(rtwdev, reg, mask);
+ } else {
+ clear_bit(RTW89_FLAG_BFEE_EN, rtwdev->flags);
+ rtw89_write32_clr(rtwdev, reg, mask);
+ }
+}
+
+static int rtw89_mac_init_bfee(struct rtw89_dev *rtwdev, u8 mac_idx)
+{
+ u32 reg;
+ u32 val32;
+ int ret;
+
+ ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
+ if (ret)
+ return ret;
+
+ /* AP mode set tx gid to 63 */
+ /* STA mode set tx gid to 0(default) */
+ reg = rtw89_mac_reg_by_idx(R_AX_BFMER_CTRL_0, mac_idx);
+ rtw89_write32_set(rtwdev, reg, B_AX_BFMER_NDP_BFEN);
+
+ reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_CSI_RRSC, mac_idx);
+ rtw89_write32(rtwdev, reg, CSI_RRSC_BMAP);
+
+ reg = rtw89_mac_reg_by_idx(R_AX_BFMEE_RESP_OPTION, mac_idx);
+ val32 = FIELD_PREP(B_AX_BFMEE_BFRP_RX_STANDBY_TIMER_MASK, BFRP_RX_STANDBY_TIMER);
+ val32 |= FIELD_PREP(B_AX_BFMEE_NDP_RX_STANDBY_TIMER_MASK, NDP_RX_STANDBY_TIMER);
+ rtw89_write32(rtwdev, reg, val32);
+ rtw89_mac_bfee_ctrl(rtwdev, mac_idx, true);
+
+ reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_CSI_CTRL_0, mac_idx);
+ rtw89_write32_set(rtwdev, reg, B_AX_BFMEE_BFPARAM_SEL |
+ B_AX_BFMEE_USE_NSTS |
+ B_AX_BFMEE_CSI_GID_SEL |
+ B_AX_BFMEE_CSI_FORCE_RETE_EN);
+ reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_CSI_RATE, mac_idx);
+ rtw89_write32(rtwdev, reg,
+ u32_encode_bits(CSI_INIT_RATE_HT, B_AX_BFMEE_HT_CSI_RATE_MASK) |
+ u32_encode_bits(CSI_INIT_RATE_VHT, B_AX_BFMEE_VHT_CSI_RATE_MASK) |
+ u32_encode_bits(CSI_INIT_RATE_HE, B_AX_BFMEE_HE_CSI_RATE_MASK));
+
+ return 0;
+}
+
+static int rtw89_mac_set_csi_para_reg(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ u8 mac_idx = rtwvif->mac_idx;
+ u8 nc = 1, nr = 3, ng = 0, cb = 1, cs = 1, ldpc_en = 1, stbc_en = 1;
+ u8 port_sel = rtwvif->port;
+ u8 sound_dim = 3, t;
+ u8 *phy_cap = sta->he_cap.he_cap_elem.phy_cap_info;
+ u32 reg;
+ u16 val;
+ int ret;
+
+ ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
+ if (ret)
+ return ret;
+
+ if ((phy_cap[3] & IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER) ||
+ (phy_cap[4] & IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER)) {
+ ldpc_en &= !!(phy_cap[1] & IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD);
+ stbc_en &= !!(phy_cap[2] & IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ);
+ t = FIELD_GET(IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK,
+ phy_cap[5]);
+ sound_dim = min(sound_dim, t);
+ }
+ if ((sta->vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) ||
+ (sta->vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)) {
+ ldpc_en &= !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC);
+ stbc_en &= !!(sta->vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_MASK);
+ t = FIELD_GET(IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK,
+ sta->vht_cap.cap);
+ sound_dim = min(sound_dim, t);
+ }
+ nc = min(nc, sound_dim);
+ nr = min(nr, sound_dim);
+
+ reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_CSI_CTRL_0, mac_idx);
+ rtw89_write32_set(rtwdev, reg, B_AX_BFMEE_BFPARAM_SEL);
+
+ val = FIELD_PREP(B_AX_BFMEE_CSIINFO0_NC_MASK, nc) |
+ FIELD_PREP(B_AX_BFMEE_CSIINFO0_NR_MASK, nr) |
+ FIELD_PREP(B_AX_BFMEE_CSIINFO0_NG_MASK, ng) |
+ FIELD_PREP(B_AX_BFMEE_CSIINFO0_CB_MASK, cb) |
+ FIELD_PREP(B_AX_BFMEE_CSIINFO0_CS_MASK, cs) |
+ FIELD_PREP(B_AX_BFMEE_CSIINFO0_LDPC_EN, ldpc_en) |
+ FIELD_PREP(B_AX_BFMEE_CSIINFO0_STBC_EN, stbc_en);
+
+ if (port_sel == 0)
+ reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_CSI_CTRL_0, mac_idx);
+ else
+ reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_CSI_CTRL_1, mac_idx);
+
+ rtw89_write16(rtwdev, reg, val);
+
+ return 0;
+}
+
+static int rtw89_mac_csi_rrsc(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ u32 rrsc = BIT(RTW89_MAC_BF_RRSC_6M) | BIT(RTW89_MAC_BF_RRSC_24M);
+ u32 reg;
+ u8 mac_idx = rtwvif->mac_idx;
+ int ret;
+
+ ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
+ if (ret)
+ return ret;
+
+ if (sta->he_cap.has_he) {
+ rrsc |= (BIT(RTW89_MAC_BF_RRSC_HE_MSC0) |
+ BIT(RTW89_MAC_BF_RRSC_HE_MSC3) |
+ BIT(RTW89_MAC_BF_RRSC_HE_MSC5));
+ }
+ if (sta->vht_cap.vht_supported) {
+ rrsc |= (BIT(RTW89_MAC_BF_RRSC_VHT_MSC0) |
+ BIT(RTW89_MAC_BF_RRSC_VHT_MSC3) |
+ BIT(RTW89_MAC_BF_RRSC_VHT_MSC5));
+ }
+ if (sta->ht_cap.ht_supported) {
+ rrsc |= (BIT(RTW89_MAC_BF_RRSC_HT_MSC0) |
+ BIT(RTW89_MAC_BF_RRSC_HT_MSC3) |
+ BIT(RTW89_MAC_BF_RRSC_HT_MSC5));
+ }
+ reg = rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_CSI_CTRL_0, mac_idx);
+ rtw89_write32_set(rtwdev, reg, B_AX_BFMEE_BFPARAM_SEL);
+ rtw89_write32_clr(rtwdev, reg, B_AX_BFMEE_CSI_FORCE_RETE_EN);
+ rtw89_write32(rtwdev,
+ rtw89_mac_reg_by_idx(R_AX_TRXPTCL_RESP_CSI_RRSC, mac_idx),
+ rrsc);
+
+ return 0;
+}
+
+void rtw89_mac_bf_assoc(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+
+ if (rtw89_sta_has_beamformer_cap(sta)) {
+ rtw89_debug(rtwdev, RTW89_DBG_BF,
+ "initialize bfee for new association\n");
+ rtw89_mac_init_bfee(rtwdev, rtwvif->mac_idx);
+ rtw89_mac_set_csi_para_reg(rtwdev, vif, sta);
+ rtw89_mac_csi_rrsc(rtwdev, vif, sta);
+ }
+}
+
+void rtw89_mac_bf_disassoc(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+
+ rtw89_mac_bfee_ctrl(rtwdev, rtwvif->mac_idx, false);
+}
+
+void rtw89_mac_bf_set_gid_table(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *conf)
+{
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ u8 mac_idx = rtwvif->mac_idx;
+ __le32 *p;
+
+ rtw89_debug(rtwdev, RTW89_DBG_BF, "update bf GID table\n");
+
+ p = (__le32 *)conf->mu_group.membership;
+ rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(R_AX_GID_POSITION_EN0, mac_idx),
+ le32_to_cpu(p[0]));
+ rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(R_AX_GID_POSITION_EN1, mac_idx),
+ le32_to_cpu(p[1]));
+
+ p = (__le32 *)conf->mu_group.position;
+ rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(R_AX_GID_POSITION0, mac_idx),
+ le32_to_cpu(p[0]));
+ rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(R_AX_GID_POSITION1, mac_idx),
+ le32_to_cpu(p[1]));
+ rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(R_AX_GID_POSITION2, mac_idx),
+ le32_to_cpu(p[2]));
+ rtw89_write32(rtwdev, rtw89_mac_reg_by_idx(R_AX_GID_POSITION3, mac_idx),
+ le32_to_cpu(p[3]));
+}
+
+struct rtw89_mac_bf_monitor_iter_data {
+ struct rtw89_dev *rtwdev;
+ struct ieee80211_sta *down_sta;
+ int count;
+};
+
+static
+void rtw89_mac_bf_monitor_calc_iter(void *data, struct ieee80211_sta *sta)
+{
+ struct rtw89_mac_bf_monitor_iter_data *iter_data =
+ (struct rtw89_mac_bf_monitor_iter_data *)data;
+ struct ieee80211_sta *down_sta = iter_data->down_sta;
+ int *count = &iter_data->count;
+
+ if (down_sta == sta)
+ return;
+
+ if (rtw89_sta_has_beamformer_cap(sta))
+ (*count)++;
+}
+
+void rtw89_mac_bf_monitor_calc(struct rtw89_dev *rtwdev,
+ struct ieee80211_sta *sta, bool disconnect)
+{
+ struct rtw89_mac_bf_monitor_iter_data data;
+
+ data.rtwdev = rtwdev;
+ data.down_sta = disconnect ? sta : NULL;
+ data.count = 0;
+ ieee80211_iterate_stations_atomic(rtwdev->hw,
+ rtw89_mac_bf_monitor_calc_iter,
+ &data);
+
+ rtw89_debug(rtwdev, RTW89_DBG_BF, "bfee STA count=%d\n", data.count);
+ if (data.count)
+ set_bit(RTW89_FLAG_BFEE_MON, rtwdev->flags);
+ else
+ clear_bit(RTW89_FLAG_BFEE_MON, rtwdev->flags);
+}
+
+void _rtw89_mac_bf_monitor_track(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_traffic_stats *stats = &rtwdev->stats;
+ struct rtw89_vif *rtwvif;
+ bool en = stats->tx_tfc_lv > stats->rx_tfc_lv ? false : true;
+ bool old = test_bit(RTW89_FLAG_BFEE_EN, rtwdev->flags);
+
+ if (en == old)
+ return;
+
+ rtw89_for_each_rtwvif(rtwdev, rtwvif)
+ rtw89_mac_bfee_ctrl(rtwdev, rtwvif->mac_idx, en);
+}
+
+static int
+__rtw89_mac_set_tx_time(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
+ u32 tx_time)
+{
+#define MAC_AX_DFLT_TX_TIME 5280
+ u8 mac_idx = rtwsta->rtwvif->mac_idx;
+ u32 max_tx_time = tx_time == 0 ? MAC_AX_DFLT_TX_TIME : tx_time;
+ u32 reg;
+ int ret = 0;
+
+ if (rtwsta->cctl_tx_time) {
+ rtwsta->ampdu_max_time = (max_tx_time - 512) >> 9;
+ ret = rtw89_fw_h2c_txtime_cmac_tbl(rtwdev, rtwsta);
+ } else {
+ ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
+ if (ret) {
+ rtw89_warn(rtwdev, "failed to check cmac in set txtime\n");
+ return ret;
+ }
+
+ reg = rtw89_mac_reg_by_idx(R_AX_AMPDU_AGG_LIMIT, mac_idx);
+ rtw89_write32_mask(rtwdev, reg, B_AX_AMPDU_MAX_TIME_MASK,
+ max_tx_time >> 5);
+ }
+
+ return ret;
+}
+
+int rtw89_mac_set_tx_time(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
+ bool resume, u32 tx_time)
+{
+ int ret = 0;
+
+ if (!resume) {
+ rtwsta->cctl_tx_time = true;
+ ret = __rtw89_mac_set_tx_time(rtwdev, rtwsta, tx_time);
+ } else {
+ ret = __rtw89_mac_set_tx_time(rtwdev, rtwsta, tx_time);
+ rtwsta->cctl_tx_time = false;
+ }
+
+ return ret;
+}
+
+int rtw89_mac_get_tx_time(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
+ u32 *tx_time)
+{
+ u8 mac_idx = rtwsta->rtwvif->mac_idx;
+ u32 reg;
+ int ret = 0;
+
+ if (rtwsta->cctl_tx_time) {
+ *tx_time = (rtwsta->ampdu_max_time + 1) << 9;
+ } else {
+ ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
+ if (ret) {
+ rtw89_warn(rtwdev, "failed to check cmac in tx_time\n");
+ return ret;
+ }
+
+ reg = rtw89_mac_reg_by_idx(R_AX_AMPDU_AGG_LIMIT, mac_idx);
+ *tx_time = rtw89_read32_mask(rtwdev, reg, B_AX_AMPDU_MAX_TIME_MASK) << 5;
+ }
+
+ return ret;
+}
+
+int rtw89_mac_set_tx_retry_limit(struct rtw89_dev *rtwdev,
+ struct rtw89_sta *rtwsta,
+ bool resume, u8 tx_retry)
+{
+ int ret = 0;
+
+ rtwsta->data_tx_cnt_lmt = tx_retry;
+
+ if (!resume) {
+ rtwsta->cctl_tx_retry_limit = true;
+ ret = rtw89_fw_h2c_txtime_cmac_tbl(rtwdev, rtwsta);
+ } else {
+ ret = rtw89_fw_h2c_txtime_cmac_tbl(rtwdev, rtwsta);
+ rtwsta->cctl_tx_retry_limit = false;
+ }
+
+ return ret;
+}
+
+int rtw89_mac_get_tx_retry_limit(struct rtw89_dev *rtwdev,
+ struct rtw89_sta *rtwsta, u8 *tx_retry)
+{
+ u8 mac_idx = rtwsta->rtwvif->mac_idx;
+ u32 reg;
+ int ret = 0;
+
+ if (rtwsta->cctl_tx_retry_limit) {
+ *tx_retry = rtwsta->data_tx_cnt_lmt;
+ } else {
+ ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
+ if (ret) {
+ rtw89_warn(rtwdev, "failed to check cmac in rty_lmt\n");
+ return ret;
+ }
+
+ reg = rtw89_mac_reg_by_idx(R_AX_TXCNT, mac_idx);
+ *tx_retry = rtw89_read32_mask(rtwdev, reg, B_AX_L_TXCNT_LMT_MASK);
+ }
+
+ return ret;
+}
+
+int rtw89_mac_set_hw_muedca_ctrl(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif, bool en)
+{
+ u8 mac_idx = rtwvif->mac_idx;
+ u16 set = B_AX_MUEDCA_EN_0 | B_AX_SET_MUEDCATIMER_TF_0;
+ u32 reg;
+ u32 ret;
+
+ ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
+ if (ret)
+ return ret;
+
+ reg = rtw89_mac_reg_by_idx(R_AX_MUEDCA_EN, mac_idx);
+ if (en)
+ rtw89_write16_set(rtwdev, reg, set);
+ else
+ rtw89_write16_clr(rtwdev, reg, set);
+
+ return 0;
+}
diff --git a/drivers/net/wireless/realtek/rtw89/mac.h b/drivers/net/wireless/realtek/rtw89/mac.h
new file mode 100644
index 000000000000..6f3db8a2a9c2
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/mac.h
@@ -0,0 +1,860 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2019-2020 Realtek Corporation
+ */
+
+#ifndef __RTW89_MAC_H__
+#define __RTW89_MAC_H__
+
+#include "core.h"
+
+#define MAC_MEM_DUMP_PAGE_SIZE 0x40000
+#define ADDR_CAM_ENT_SIZE 0x40
+#define BSSID_CAM_ENT_SIZE 0x08
+#define HFC_PAGE_UNIT 64
+
+enum rtw89_mac_hwmod_sel {
+ RTW89_DMAC_SEL = 0,
+ RTW89_CMAC_SEL = 1,
+
+ RTW89_MAC_INVALID,
+};
+
+enum rtw89_mac_fwd_target {
+ RTW89_FWD_DONT_CARE = 0,
+ RTW89_FWD_TO_HOST = 1,
+ RTW89_FWD_TO_WLAN_CPU = 2
+};
+
+enum rtw89_mac_wd_dma_intvl {
+ RTW89_MAC_WD_DMA_INTVL_0S,
+ RTW89_MAC_WD_DMA_INTVL_256NS,
+ RTW89_MAC_WD_DMA_INTVL_512NS,
+ RTW89_MAC_WD_DMA_INTVL_768NS,
+ RTW89_MAC_WD_DMA_INTVL_1US,
+ RTW89_MAC_WD_DMA_INTVL_1_5US,
+ RTW89_MAC_WD_DMA_INTVL_2US,
+ RTW89_MAC_WD_DMA_INTVL_4US,
+ RTW89_MAC_WD_DMA_INTVL_8US,
+ RTW89_MAC_WD_DMA_INTVL_16US,
+ RTW89_MAC_WD_DMA_INTVL_DEF = 0xFE
+};
+
+enum rtw89_mac_multi_tag_num {
+ RTW89_MAC_TAG_NUM_1,
+ RTW89_MAC_TAG_NUM_2,
+ RTW89_MAC_TAG_NUM_3,
+ RTW89_MAC_TAG_NUM_4,
+ RTW89_MAC_TAG_NUM_5,
+ RTW89_MAC_TAG_NUM_6,
+ RTW89_MAC_TAG_NUM_7,
+ RTW89_MAC_TAG_NUM_8,
+ RTW89_MAC_TAG_NUM_DEF = 0xFE
+};
+
+enum rtw89_mac_lbc_tmr {
+ RTW89_MAC_LBC_TMR_8US = 0,
+ RTW89_MAC_LBC_TMR_16US,
+ RTW89_MAC_LBC_TMR_32US,
+ RTW89_MAC_LBC_TMR_64US,
+ RTW89_MAC_LBC_TMR_128US,
+ RTW89_MAC_LBC_TMR_256US,
+ RTW89_MAC_LBC_TMR_512US,
+ RTW89_MAC_LBC_TMR_1MS,
+ RTW89_MAC_LBC_TMR_2MS,
+ RTW89_MAC_LBC_TMR_4MS,
+ RTW89_MAC_LBC_TMR_8MS,
+ RTW89_MAC_LBC_TMR_DEF = 0xFE
+};
+
+enum rtw89_mac_cpuio_op_cmd_type {
+ CPUIO_OP_CMD_GET_1ST_PID = 0,
+ CPUIO_OP_CMD_GET_NEXT_PID = 1,
+ CPUIO_OP_CMD_ENQ_TO_TAIL = 4,
+ CPUIO_OP_CMD_ENQ_TO_HEAD = 5,
+ CPUIO_OP_CMD_DEQ = 8,
+ CPUIO_OP_CMD_DEQ_ENQ_ALL = 9,
+ CPUIO_OP_CMD_DEQ_ENQ_TO_TAIL = 12
+};
+
+enum rtw89_mac_wde_dle_port_id {
+ WDE_DLE_PORT_ID_DISPATCH = 0,
+ WDE_DLE_PORT_ID_PKTIN = 1,
+ WDE_DLE_PORT_ID_CMAC0 = 3,
+ WDE_DLE_PORT_ID_CMAC1 = 4,
+ WDE_DLE_PORT_ID_CPU_IO = 6,
+ WDE_DLE_PORT_ID_WDRLS = 7,
+ WDE_DLE_PORT_ID_END = 8
+};
+
+enum rtw89_mac_wde_dle_queid_wdrls {
+ WDE_DLE_QUEID_TXOK = 0,
+ WDE_DLE_QUEID_DROP_RETRY_LIMIT = 1,
+ WDE_DLE_QUEID_DROP_LIFETIME_TO = 2,
+ WDE_DLE_QUEID_DROP_MACID_DROP = 3,
+ WDE_DLE_QUEID_NO_REPORT = 4
+};
+
+enum rtw89_mac_ple_dle_port_id {
+ PLE_DLE_PORT_ID_DISPATCH = 0,
+ PLE_DLE_PORT_ID_MPDU = 1,
+ PLE_DLE_PORT_ID_SEC = 2,
+ PLE_DLE_PORT_ID_CMAC0 = 3,
+ PLE_DLE_PORT_ID_CMAC1 = 4,
+ PLE_DLE_PORT_ID_WDRLS = 5,
+ PLE_DLE_PORT_ID_CPU_IO = 6,
+ PLE_DLE_PORT_ID_PLRLS = 7,
+ PLE_DLE_PORT_ID_END = 8
+};
+
+enum rtw89_mac_ple_dle_queid_plrls {
+ PLE_DLE_QUEID_NO_REPORT = 0x0
+};
+
+enum rtw89_machdr_frame_type {
+ RTW89_MGNT = 0,
+ RTW89_CTRL = 1,
+ RTW89_DATA = 2,
+};
+
+enum rtw89_mac_dle_dfi_type {
+ DLE_DFI_TYPE_FREEPG = 0,
+ DLE_DFI_TYPE_QUOTA = 1,
+ DLE_DFI_TYPE_PAGELLT = 2,
+ DLE_DFI_TYPE_PKTINFO = 3,
+ DLE_DFI_TYPE_PREPKTLLT = 4,
+ DLE_DFI_TYPE_NXTPKTLLT = 5,
+ DLE_DFI_TYPE_QLNKTBL = 6,
+ DLE_DFI_TYPE_QEMPTY = 7,
+};
+
+enum rtw89_mac_dle_wde_quota_id {
+ WDE_QTAID_HOST_IF = 0,
+ WDE_QTAID_WLAN_CPU = 1,
+ WDE_QTAID_DATA_CPU = 2,
+ WDE_QTAID_PKTIN = 3,
+ WDE_QTAID_CPUIO = 4,
+};
+
+enum rtw89_mac_dle_ple_quota_id {
+ PLE_QTAID_B0_TXPL = 0,
+ PLE_QTAID_B1_TXPL = 1,
+ PLE_QTAID_C2H = 2,
+ PLE_QTAID_H2C = 3,
+ PLE_QTAID_WLAN_CPU = 4,
+ PLE_QTAID_MPDU = 5,
+ PLE_QTAID_CMAC0_RX = 6,
+ PLE_QTAID_CMAC1_RX = 7,
+ PLE_QTAID_CMAC1_BBRPT = 8,
+ PLE_QTAID_WDRLS = 9,
+ PLE_QTAID_CPUIO = 10,
+};
+
+enum rtw89_mac_dle_ctrl_type {
+ DLE_CTRL_TYPE_WDE = 0,
+ DLE_CTRL_TYPE_PLE = 1,
+ DLE_CTRL_TYPE_NUM = 2,
+};
+
+enum rtw89_mac_ax_l0_to_l1_event {
+ MAC_AX_L0_TO_L1_CHIF_IDLE = 0,
+ MAC_AX_L0_TO_L1_CMAC_DMA_IDLE = 1,
+ MAC_AX_L0_TO_L1_RLS_PKID = 2,
+ MAC_AX_L0_TO_L1_PTCL_IDLE = 3,
+ MAC_AX_L0_TO_L1_RX_QTA_LOST = 4,
+ MAC_AX_L0_TO_L1_DLE_STAT_HANG = 5,
+ MAC_AX_L0_TO_L1_PCIE_STUCK = 6,
+ MAC_AX_L0_TO_L1_EVENT_MAX = 15,
+};
+
+enum rtw89_mac_dbg_port_sel {
+ /* CMAC 0 related */
+ RTW89_DBG_PORT_SEL_PTCL_C0 = 0,
+ RTW89_DBG_PORT_SEL_SCH_C0,
+ RTW89_DBG_PORT_SEL_TMAC_C0,
+ RTW89_DBG_PORT_SEL_RMAC_C0,
+ RTW89_DBG_PORT_SEL_RMACST_C0,
+ RTW89_DBG_PORT_SEL_RMAC_PLCP_C0,
+ RTW89_DBG_PORT_SEL_TRXPTCL_C0,
+ RTW89_DBG_PORT_SEL_TX_INFOL_C0,
+ RTW89_DBG_PORT_SEL_TX_INFOH_C0,
+ RTW89_DBG_PORT_SEL_TXTF_INFOL_C0,
+ RTW89_DBG_PORT_SEL_TXTF_INFOH_C0,
+ /* CMAC 1 related */
+ RTW89_DBG_PORT_SEL_PTCL_C1,
+ RTW89_DBG_PORT_SEL_SCH_C1,
+ RTW89_DBG_PORT_SEL_TMAC_C1,
+ RTW89_DBG_PORT_SEL_RMAC_C1,
+ RTW89_DBG_PORT_SEL_RMACST_C1,
+ RTW89_DBG_PORT_SEL_RMAC_PLCP_C1,
+ RTW89_DBG_PORT_SEL_TRXPTCL_C1,
+ RTW89_DBG_PORT_SEL_TX_INFOL_C1,
+ RTW89_DBG_PORT_SEL_TX_INFOH_C1,
+ RTW89_DBG_PORT_SEL_TXTF_INFOL_C1,
+ RTW89_DBG_PORT_SEL_TXTF_INFOH_C1,
+ /* DLE related */
+ RTW89_DBG_PORT_SEL_WDE_BUFMGN_FREEPG,
+ RTW89_DBG_PORT_SEL_WDE_BUFMGN_QUOTA,
+ RTW89_DBG_PORT_SEL_WDE_BUFMGN_PAGELLT,
+ RTW89_DBG_PORT_SEL_WDE_BUFMGN_PKTINFO,
+ RTW89_DBG_PORT_SEL_WDE_QUEMGN_PREPKT,
+ RTW89_DBG_PORT_SEL_WDE_QUEMGN_NXTPKT,
+ RTW89_DBG_PORT_SEL_WDE_QUEMGN_QLNKTBL,
+ RTW89_DBG_PORT_SEL_WDE_QUEMGN_QEMPTY,
+ RTW89_DBG_PORT_SEL_PLE_BUFMGN_FREEPG,
+ RTW89_DBG_PORT_SEL_PLE_BUFMGN_QUOTA,
+ RTW89_DBG_PORT_SEL_PLE_BUFMGN_PAGELLT,
+ RTW89_DBG_PORT_SEL_PLE_BUFMGN_PKTINFO,
+ RTW89_DBG_PORT_SEL_PLE_QUEMGN_PREPKT,
+ RTW89_DBG_PORT_SEL_PLE_QUEMGN_NXTPKT,
+ RTW89_DBG_PORT_SEL_PLE_QUEMGN_QLNKTBL,
+ RTW89_DBG_PORT_SEL_PLE_QUEMGN_QEMPTY,
+ RTW89_DBG_PORT_SEL_PKTINFO,
+ /* PCIE related */
+ RTW89_DBG_PORT_SEL_PCIE_TXDMA,
+ RTW89_DBG_PORT_SEL_PCIE_RXDMA,
+ RTW89_DBG_PORT_SEL_PCIE_CVT,
+ RTW89_DBG_PORT_SEL_PCIE_CXPL,
+ RTW89_DBG_PORT_SEL_PCIE_IO,
+ RTW89_DBG_PORT_SEL_PCIE_MISC,
+ RTW89_DBG_PORT_SEL_PCIE_MISC2,
+
+ /* keep last */
+ RTW89_DBG_PORT_SEL_LAST,
+ RTW89_DBG_PORT_SEL_MAX = RTW89_DBG_PORT_SEL_LAST,
+ RTW89_DBG_PORT_SEL_INVALID = RTW89_DBG_PORT_SEL_LAST,
+};
+
+/* SRAM mem dump */
+#define R_AX_INDIR_ACCESS_ENTRY 0x40000
+
+#define STA_SCHED_BASE_ADDR 0x18808000
+#define RXPLD_FLTR_CAM_BASE_ADDR 0x18813000
+#define SECURITY_CAM_BASE_ADDR 0x18814000
+#define WOW_CAM_BASE_ADDR 0x18815000
+#define CMAC_TBL_BASE_ADDR 0x18840000
+#define ADDR_CAM_BASE_ADDR 0x18850000
+#define BSSID_CAM_BASE_ADDR 0x18853000
+#define BA_CAM_BASE_ADDR 0x18854000
+#define BCN_IE_CAM0_BASE_ADDR 0x18855000
+#define SHARED_BUF_BASE_ADDR 0x18700000
+#define DMAC_TBL_BASE_ADDR 0x18800000
+#define SHCUT_MACHDR_BASE_ADDR 0x18800800
+#define BCN_IE_CAM1_BASE_ADDR 0x188A0000
+
+#define CCTL_INFO_SIZE 32
+
+enum rtw89_mac_mem_sel {
+ RTW89_MAC_MEM_SHARED_BUF,
+ RTW89_MAC_MEM_DMAC_TBL,
+ RTW89_MAC_MEM_SHCUT_MACHDR,
+ RTW89_MAC_MEM_STA_SCHED,
+ RTW89_MAC_MEM_RXPLD_FLTR_CAM,
+ RTW89_MAC_MEM_SECURITY_CAM,
+ RTW89_MAC_MEM_WOW_CAM,
+ RTW89_MAC_MEM_CMAC_TBL,
+ RTW89_MAC_MEM_ADDR_CAM,
+ RTW89_MAC_MEM_BA_CAM,
+ RTW89_MAC_MEM_BCN_IE_CAM0,
+ RTW89_MAC_MEM_BCN_IE_CAM1,
+
+ /* keep last */
+ RTW89_MAC_MEM_LAST,
+ RTW89_MAC_MEM_MAX = RTW89_MAC_MEM_LAST,
+ RTW89_MAC_MEM_INVALID = RTW89_MAC_MEM_LAST,
+};
+
+enum rtw89_rpwm_req_pwr_state {
+ RTW89_MAC_RPWM_REQ_PWR_STATE_ACTIVE = 0,
+ RTW89_MAC_RPWM_REQ_PWR_STATE_BAND0_RFON = 1,
+ RTW89_MAC_RPWM_REQ_PWR_STATE_BAND1_RFON = 2,
+ RTW89_MAC_RPWM_REQ_PWR_STATE_BAND0_RFOFF = 3,
+ RTW89_MAC_RPWM_REQ_PWR_STATE_BAND1_RFOFF = 4,
+ RTW89_MAC_RPWM_REQ_PWR_STATE_CLK_GATED = 5,
+ RTW89_MAC_RPWM_REQ_PWR_STATE_PWR_GATED = 6,
+ RTW89_MAC_RPWM_REQ_PWR_STATE_HIOE_PWR_GATED = 7,
+ RTW89_MAC_RPWM_REQ_PWR_STATE_MAX,
+};
+
+struct rtw89_pwr_cfg {
+ u16 addr;
+ u8 cv_msk;
+ u8 intf_msk;
+ u8 base:4;
+ u8 cmd:4;
+ u8 msk;
+ u8 val;
+};
+
+enum rtw89_mac_c2h_ofld_func {
+ RTW89_MAC_C2H_FUNC_EFUSE_DUMP,
+ RTW89_MAC_C2H_FUNC_READ_RSP,
+ RTW89_MAC_C2H_FUNC_PKT_OFLD_RSP,
+ RTW89_MAC_C2H_FUNC_BCN_RESEND,
+ RTW89_MAC_C2H_FUNC_MACID_PAUSE,
+ RTW89_MAC_C2H_FUNC_OFLD_MAX,
+};
+
+enum rtw89_mac_c2h_info_func {
+ RTW89_MAC_C2H_FUNC_REC_ACK,
+ RTW89_MAC_C2H_FUNC_DONE_ACK,
+ RTW89_MAC_C2H_FUNC_C2H_LOG,
+ RTW89_MAC_C2H_FUNC_INFO_MAX,
+};
+
+enum rtw89_mac_c2h_class {
+ RTW89_MAC_C2H_CLASS_INFO,
+ RTW89_MAC_C2H_CLASS_OFLD,
+ RTW89_MAC_C2H_CLASS_TWT,
+ RTW89_MAC_C2H_CLASS_WOW,
+ RTW89_MAC_C2H_CLASS_MCC,
+ RTW89_MAC_C2H_CLASS_FWDBG,
+ RTW89_MAC_C2H_CLASS_MAX,
+};
+
+struct rtw89_mac_ax_coex {
+#define RTW89_MAC_AX_COEX_RTK_MODE 0
+#define RTW89_MAC_AX_COEX_CSR_MODE 1
+ u8 pta_mode;
+#define RTW89_MAC_AX_COEX_INNER 0
+#define RTW89_MAC_AX_COEX_OUTPUT 1
+#define RTW89_MAC_AX_COEX_INPUT 2
+ u8 direction;
+};
+
+struct rtw89_mac_ax_plt {
+#define RTW89_MAC_AX_PLT_LTE_RX BIT(0)
+#define RTW89_MAC_AX_PLT_GNT_BT_TX BIT(1)
+#define RTW89_MAC_AX_PLT_GNT_BT_RX BIT(2)
+#define RTW89_MAC_AX_PLT_GNT_WL BIT(3)
+ u8 band;
+ u8 tx;
+ u8 rx;
+};
+
+enum rtw89_mac_bf_rrsc_rate {
+ RTW89_MAC_BF_RRSC_6M = 0,
+ RTW89_MAC_BF_RRSC_9M = 1,
+ RTW89_MAC_BF_RRSC_12M,
+ RTW89_MAC_BF_RRSC_18M,
+ RTW89_MAC_BF_RRSC_24M,
+ RTW89_MAC_BF_RRSC_36M,
+ RTW89_MAC_BF_RRSC_48M,
+ RTW89_MAC_BF_RRSC_54M,
+ RTW89_MAC_BF_RRSC_HT_MSC0,
+ RTW89_MAC_BF_RRSC_HT_MSC1,
+ RTW89_MAC_BF_RRSC_HT_MSC2,
+ RTW89_MAC_BF_RRSC_HT_MSC3,
+ RTW89_MAC_BF_RRSC_HT_MSC4,
+ RTW89_MAC_BF_RRSC_HT_MSC5,
+ RTW89_MAC_BF_RRSC_HT_MSC6,
+ RTW89_MAC_BF_RRSC_HT_MSC7,
+ RTW89_MAC_BF_RRSC_VHT_MSC0,
+ RTW89_MAC_BF_RRSC_VHT_MSC1,
+ RTW89_MAC_BF_RRSC_VHT_MSC2,
+ RTW89_MAC_BF_RRSC_VHT_MSC3,
+ RTW89_MAC_BF_RRSC_VHT_MSC4,
+ RTW89_MAC_BF_RRSC_VHT_MSC5,
+ RTW89_MAC_BF_RRSC_VHT_MSC6,
+ RTW89_MAC_BF_RRSC_VHT_MSC7,
+ RTW89_MAC_BF_RRSC_HE_MSC0,
+ RTW89_MAC_BF_RRSC_HE_MSC1,
+ RTW89_MAC_BF_RRSC_HE_MSC2,
+ RTW89_MAC_BF_RRSC_HE_MSC3,
+ RTW89_MAC_BF_RRSC_HE_MSC4,
+ RTW89_MAC_BF_RRSC_HE_MSC5,
+ RTW89_MAC_BF_RRSC_HE_MSC6,
+ RTW89_MAC_BF_RRSC_HE_MSC7 = 31,
+ RTW89_MAC_BF_RRSC_MAX = 32
+};
+
+#define RTW89_R32_EA 0xEAEAEAEA
+#define RTW89_R32_DEAD 0xDEADBEEF
+#define MAC_REG_POOL_COUNT 10
+#define ACCESS_CMAC(_addr) \
+ ({typeof(_addr) __addr = (_addr); \
+ __addr >= R_AX_CMAC_REG_START && __addr <= R_AX_CMAC_REG_END; })
+
+#define PTCL_IDLE_POLL_CNT 10000
+#define SW_CVR_DUR_US 8
+#define SW_CVR_CNT 8
+
+#define DLE_BOUND_UNIT (8 * 1024)
+#define DLE_WAIT_CNT 2000
+#define TRXCFG_WAIT_CNT 2000
+
+#define RTW89_WDE_PG_64 64
+#define RTW89_WDE_PG_128 128
+#define RTW89_WDE_PG_256 256
+
+#define S_AX_WDE_PAGE_SEL_64 0
+#define S_AX_WDE_PAGE_SEL_128 1
+#define S_AX_WDE_PAGE_SEL_256 2
+
+#define RTW89_PLE_PG_64 64
+#define RTW89_PLE_PG_128 128
+#define RTW89_PLE_PG_256 256
+
+#define S_AX_PLE_PAGE_SEL_64 0
+#define S_AX_PLE_PAGE_SEL_128 1
+#define S_AX_PLE_PAGE_SEL_256 2
+
+#define SDIO_LOCAL_BASE_ADDR 0x80000000
+
+#define PWR_CMD_WRITE 0
+#define PWR_CMD_POLL 1
+#define PWR_CMD_DELAY 2
+#define PWR_CMD_END 3
+
+#define PWR_INTF_MSK_SDIO BIT(0)
+#define PWR_INTF_MSK_USB BIT(1)
+#define PWR_INTF_MSK_PCIE BIT(2)
+#define PWR_INTF_MSK_ALL 0x7
+
+#define PWR_BASE_MAC 0
+#define PWR_BASE_USB 1
+#define PWR_BASE_PCIE 2
+#define PWR_BASE_SDIO 3
+
+#define PWR_CV_MSK_A BIT(0)
+#define PWR_CV_MSK_B BIT(1)
+#define PWR_CV_MSK_C BIT(2)
+#define PWR_CV_MSK_D BIT(3)
+#define PWR_CV_MSK_E BIT(4)
+#define PWR_CV_MSK_F BIT(5)
+#define PWR_CV_MSK_G BIT(6)
+#define PWR_CV_MSK_TEST BIT(7)
+#define PWR_CV_MSK_ALL 0xFF
+
+#define PWR_DELAY_US 0
+#define PWR_DELAY_MS 1
+
+/* STA scheduler */
+#define SS_MACID_SH 8
+#define SS_TX_LEN_MSK 0x1FFFFF
+#define SS_CTRL1_R_TX_LEN 5
+#define SS_CTRL1_R_NEXT_LINK 20
+#define SS_LINK_SIZE 256
+
+/* MAC debug port */
+#define TMAC_DBG_SEL_C0 0xA5
+#define RMAC_DBG_SEL_C0 0xA6
+#define TRXPTCL_DBG_SEL_C0 0xA7
+#define TMAC_DBG_SEL_C1 0xB5
+#define RMAC_DBG_SEL_C1 0xB6
+#define TRXPTCL_DBG_SEL_C1 0xB7
+#define FW_PROG_CNTR_DBG_SEL 0xF2
+#define PCIE_TXDMA_DBG_SEL 0x30
+#define PCIE_RXDMA_DBG_SEL 0x31
+#define PCIE_CVT_DBG_SEL 0x32
+#define PCIE_CXPL_DBG_SEL 0x33
+#define PCIE_IO_DBG_SEL 0x37
+#define PCIE_MISC_DBG_SEL 0x38
+#define PCIE_MISC2_DBG_SEL 0x00
+#define MAC_DBG_SEL 1
+#define RMAC_CMAC_DBG_SEL 1
+
+/* TRXPTCL dbg port sel */
+#define TRXPTRL_DBG_SEL_TMAC 0
+#define TRXPTRL_DBG_SEL_RMAC 1
+
+struct rtw89_cpuio_ctrl {
+ u16 pkt_num;
+ u16 start_pktid;
+ u16 end_pktid;
+ u8 cmd_type;
+ u8 macid;
+ u8 src_pid;
+ u8 src_qid;
+ u8 dst_pid;
+ u8 dst_qid;
+ u16 pktid;
+};
+
+struct rtw89_mac_dbg_port_info {
+ u32 sel_addr;
+ u8 sel_byte;
+ u32 sel_msk;
+ u32 srt;
+ u32 end;
+ u32 rd_addr;
+ u8 rd_byte;
+ u32 rd_msk;
+};
+
+#define QLNKTBL_ADDR_INFO_SEL BIT(0)
+#define QLNKTBL_ADDR_INFO_SEL_0 0
+#define QLNKTBL_ADDR_INFO_SEL_1 1
+#define QLNKTBL_ADDR_TBL_IDX_MASK GENMASK(10, 1)
+#define QLNKTBL_DATA_SEL1_PKT_CNT_MASK GENMASK(11, 0)
+
+struct rtw89_mac_dle_dfi_ctrl {
+ enum rtw89_mac_dle_ctrl_type type;
+ u32 target;
+ u32 addr;
+ u32 out_data;
+};
+
+struct rtw89_mac_dle_dfi_quota {
+ enum rtw89_mac_dle_ctrl_type dle_type;
+ u32 qtaid;
+ u16 rsv_pgnum;
+ u16 use_pgnum;
+};
+
+struct rtw89_mac_dle_dfi_qempty {
+ enum rtw89_mac_dle_ctrl_type dle_type;
+ u32 grpsel;
+ u32 qempty;
+};
+
+/* Define DBG and recovery enum */
+enum mac_ax_err_info {
+ /* Get error info */
+
+ /* L0 */
+ MAC_AX_ERR_L0_ERR_CMAC0 = 0x0001,
+ MAC_AX_ERR_L0_ERR_CMAC1 = 0x0002,
+ MAC_AX_ERR_L0_RESET_DONE = 0x0003,
+ MAC_AX_ERR_L0_PROMOTE_TO_L1 = 0x0010,
+
+ /* L1 */
+ MAC_AX_ERR_L1_ERR_DMAC = 0x1000,
+ MAC_AX_ERR_L1_RESET_DISABLE_DMAC_DONE = 0x1001,
+ MAC_AX_ERR_L1_RESET_RECOVERY_DONE = 0x1002,
+ MAC_AX_ERR_L1_PROMOTE_TO_L2 = 0x1010,
+ MAC_AX_ERR_L1_RCVY_STOP_DONE = 0x1011,
+
+ /* L2 */
+ /* address hole (master) */
+ MAC_AX_ERR_L2_ERR_AH_DMA = 0x2000,
+ MAC_AX_ERR_L2_ERR_AH_HCI = 0x2010,
+ MAC_AX_ERR_L2_ERR_AH_RLX4081 = 0x2020,
+ MAC_AX_ERR_L2_ERR_AH_IDDMA = 0x2030,
+ MAC_AX_ERR_L2_ERR_AH_HIOE = 0x2040,
+ MAC_AX_ERR_L2_ERR_AH_IPSEC = 0x2050,
+ MAC_AX_ERR_L2_ERR_AH_RX4281 = 0x2060,
+ MAC_AX_ERR_L2_ERR_AH_OTHERS = 0x2070,
+
+ /* AHB bridge timeout (master) */
+ MAC_AX_ERR_L2_ERR_AHB_TO_DMA = 0x2100,
+ MAC_AX_ERR_L2_ERR_AHB_TO_HCI = 0x2110,
+ MAC_AX_ERR_L2_ERR_AHB_TO_RLX4081 = 0x2120,
+ MAC_AX_ERR_L2_ERR_AHB_TO_IDDMA = 0x2130,
+ MAC_AX_ERR_L2_ERR_AHB_TO_HIOE = 0x2140,
+ MAC_AX_ERR_L2_ERR_AHB_TO_IPSEC = 0x2150,
+ MAC_AX_ERR_L2_ERR_AHB_TO_RX4281 = 0x2160,
+ MAC_AX_ERR_L2_ERR_AHB_TO_OTHERS = 0x2170,
+
+ /* APB_SA bridge timeout (master + slave) */
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_DMA_WVA = 0x2200,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_DMA_UART = 0x2201,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_DMA_CPULOCAL = 0x2202,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_DMA_AXIDMA = 0x2203,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_DMA_HIOE = 0x2204,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_DMA_IDDMA = 0x2205,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_DMA_IPSEC = 0x2206,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_DMA_WON = 0x2207,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_DMA_WDMAC = 0x2208,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_DMA_WCMAC = 0x2209,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_DMA_OTHERS = 0x220A,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_HCI_WVA = 0x2210,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_HCI_UART = 0x2211,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_HCI_CPULOCAL = 0x2212,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_HCI_AXIDMA = 0x2213,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_HCI_HIOE = 0x2214,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_HCI_IDDMA = 0x2215,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_HCI_IPSEC = 0x2216,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_HCI_WDMAC = 0x2218,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_HCI_WCMAC = 0x2219,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_HCI_OTHERS = 0x221A,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_RLX4081_WVA = 0x2220,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_RLX4081_UART = 0x2221,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_RLX4081_CPULOCAL = 0x2222,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_RLX4081_AXIDMA = 0x2223,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_RLX4081_HIOE = 0x2224,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_RLX4081_IDDMA = 0x2225,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_RLX4081_IPSEC = 0x2226,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_RLX4081_WON = 0x2227,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_RLX4081_WDMAC = 0x2228,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_RLX4081_WCMAC = 0x2229,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_RLX4081_OTHERS = 0x222A,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_IDDMA_WVA = 0x2230,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_IDDMA_UART = 0x2231,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_IDDMA_CPULOCAL = 0x2232,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_IDDMA_AXIDMA = 0x2233,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_IDDMA_HIOE = 0x2234,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_IDDMA_IDDMA = 0x2235,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_IDDMA_IPSEC = 0x2236,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_IDDMA_WON = 0x2237,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_IDDMA_WDMAC = 0x2238,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_IDDMA_WCMAC = 0x2239,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_IDDMA_OTHERS = 0x223A,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_HIOE_WVA = 0x2240,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_HIOE_UART = 0x2241,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_HIOE_CPULOCAL = 0x2242,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_HIOE_AXIDMA = 0x2243,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_HIOE_HIOE = 0x2244,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_HIOE_IDDMA = 0x2245,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_HIOE_IPSEC = 0x2246,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_HIOE_WON = 0x2247,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_HIOE_WDMAC = 0x2248,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_HIOE_WCMAC = 0x2249,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_HIOE_OTHERS = 0x224A,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_IPSEC_WVA = 0x2250,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_IPSEC_UART = 0x2251,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_IPSEC_CPULOCAL = 0x2252,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_IPSEC_AXIDMA = 0x2253,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_IPSEC_HIOE = 0x2254,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_IPSEC_IDDMA = 0x2255,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_IPSEC_IPSEC = 0x2256,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_IPSEC_WON = 0x2257,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_IPSEC_WDMAC = 0x2258,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_IPSEC_WCMAC = 0x2259,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_IPSEC_OTHERS = 0x225A,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_RX4281_WVA = 0x2260,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_RX4281_UART = 0x2261,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_RX4281_CPULOCAL = 0x2262,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_RX4281_AXIDMA = 0x2263,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_RX4281_HIOE = 0x2264,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_RX4281_IDDMA = 0x2265,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_RX4281_IPSEC = 0x2266,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_RX4281_WON = 0x2267,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_RX4281_WDMAC = 0x2268,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_RX4281_WCMAC = 0x2269,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_RX4281_OTHERS = 0x226A,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_OTHERS_WVA = 0x2270,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_OTHERS_UART = 0x2271,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_OTHERS_CPULOCAL = 0x2272,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_OTHERS_AXIDMA = 0x2273,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_OTHERS_HIOE = 0x2274,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_OTHERS_IDDMA = 0x2275,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_OTHERS_IPSEC = 0x2276,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_OTHERS_WON = 0x2277,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_OTHERS_WDMAC = 0x2278,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_OTHERS_WCMAC = 0x2279,
+ MAC_AX_ERR_L2_ERR_APB_SA_TO_OTHERS_OTHERS = 0x227A,
+
+ /* APB_BBRF bridge timeout (master) */
+ MAC_AX_ERR_L2_ERR_APB_BBRF_TO_DMA = 0x2300,
+ MAC_AX_ERR_L2_ERR_APB_BBRF_TO_HCI = 0x2310,
+ MAC_AX_ERR_L2_ERR_APB_BBRF_TO_RLX4081 = 0x2320,
+ MAC_AX_ERR_L2_ERR_APB_BBRF_TO_IDDMA = 0x2330,
+ MAC_AX_ERR_L2_ERR_APB_BBRF_TO_HIOE = 0x2340,
+ MAC_AX_ERR_L2_ERR_APB_BBRF_TO_IPSEC = 0x2350,
+ MAC_AX_ERR_L2_ERR_APB_BBRF_TO_RX4281 = 0x2360,
+ MAC_AX_ERR_L2_ERR_APB_BBRF_TO_OTHERS = 0x2370,
+ MAC_AX_ERR_L2_RESET_DONE = 0x2400,
+ MAC_AX_ERR_CPU_EXCEPTION = 0x3000,
+ MAC_AX_GET_ERR_MAX,
+ MAC_AX_DUMP_SHAREBUFF_INDICATOR = 0x80000000,
+
+ /* set error info */
+ MAC_AX_ERR_L1_DISABLE_EN = 0x0001,
+ MAC_AX_ERR_L1_RCVY_EN = 0x0002,
+ MAC_AX_ERR_L1_RCVY_STOP_REQ = 0x0003,
+ MAC_AX_ERR_L1_RCVY_START_REQ = 0x0004,
+ MAC_AX_ERR_L0_CFG_NOTIFY = 0x0010,
+ MAC_AX_ERR_L0_CFG_DIS_NOTIFY = 0x0011,
+ MAC_AX_ERR_L0_CFG_HANDSHAKE = 0x0012,
+ MAC_AX_ERR_L0_RCVY_EN = 0x0013,
+ MAC_AX_SET_ERR_MAX,
+};
+
+extern const struct rtw89_hfc_prec_cfg rtw_hfc_preccfg_pcie;
+extern const struct rtw89_dle_size wde_size0;
+extern const struct rtw89_dle_size wde_size4;
+extern const struct rtw89_dle_size ple_size0;
+extern const struct rtw89_dle_size ple_size4;
+extern const struct rtw89_wde_quota wde_qt0;
+extern const struct rtw89_wde_quota wde_qt4;
+extern const struct rtw89_ple_quota ple_qt4;
+extern const struct rtw89_ple_quota ple_qt5;
+extern const struct rtw89_ple_quota ple_qt13;
+
+static inline u32 rtw89_mac_reg_by_idx(u32 reg_base, u8 band)
+{
+ return band == 0 ? reg_base : (reg_base + 0x2000);
+}
+
+static inline u32 rtw89_mac_reg_by_port(u32 base, u8 port, u8 mac_idx)
+{
+ return rtw89_mac_reg_by_idx(base + port * 0x40, mac_idx);
+}
+
+static inline u32
+rtw89_read32_port_mask(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ u32 base, u32 mask)
+{
+ u32 reg;
+
+ reg = rtw89_mac_reg_by_port(base, rtwvif->port, rtwvif->mac_idx);
+ return rtw89_read32_mask(rtwdev, reg, mask);
+}
+
+static inline void
+rtw89_write32_port(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, u32 base,
+ u32 data)
+{
+ u32 reg;
+
+ reg = rtw89_mac_reg_by_port(base, rtwvif->port, rtwvif->mac_idx);
+ rtw89_write32(rtwdev, reg, data);
+}
+
+static inline void
+rtw89_write32_port_mask(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ u32 base, u32 mask, u32 data)
+{
+ u32 reg;
+
+ reg = rtw89_mac_reg_by_port(base, rtwvif->port, rtwvif->mac_idx);
+ rtw89_write32_mask(rtwdev, reg, mask, data);
+}
+
+static inline void
+rtw89_write16_port_mask(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ u32 base, u32 mask, u16 data)
+{
+ u32 reg;
+
+ reg = rtw89_mac_reg_by_port(base, rtwvif->port, rtwvif->mac_idx);
+ rtw89_write16_mask(rtwdev, reg, mask, data);
+}
+
+static inline void
+rtw89_write32_port_clr(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ u32 base, u32 bit)
+{
+ u32 reg;
+
+ reg = rtw89_mac_reg_by_port(base, rtwvif->port, rtwvif->mac_idx);
+ rtw89_write32_clr(rtwdev, reg, bit);
+}
+
+static inline void
+rtw89_write16_port_clr(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ u32 base, u16 bit)
+{
+ u32 reg;
+
+ reg = rtw89_mac_reg_by_port(base, rtwvif->port, rtwvif->mac_idx);
+ rtw89_write16_clr(rtwdev, reg, bit);
+}
+
+static inline void
+rtw89_write32_port_set(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif,
+ u32 base, u32 bit)
+{
+ u32 reg;
+
+ reg = rtw89_mac_reg_by_port(base, rtwvif->port, rtwvif->mac_idx);
+ rtw89_write32_set(rtwdev, reg, bit);
+}
+
+void rtw89_mac_pwr_off(struct rtw89_dev *rtwdev);
+int rtw89_mac_partial_init(struct rtw89_dev *rtwdev);
+int rtw89_mac_init(struct rtw89_dev *rtwdev);
+int rtw89_mac_check_mac_en(struct rtw89_dev *rtwdev, u8 band,
+ enum rtw89_mac_hwmod_sel sel);
+int rtw89_mac_write_lte(struct rtw89_dev *rtwdev, const u32 offset, u32 val);
+int rtw89_mac_read_lte(struct rtw89_dev *rtwdev, const u32 offset, u32 *val);
+int rtw89_mac_add_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *vif);
+int rtw89_mac_port_update(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif);
+int rtw89_mac_remove_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *vif);
+void rtw89_mac_enable_bb_rf(struct rtw89_dev *rtwdev);
+void rtw89_mac_disable_bb_rf(struct rtw89_dev *rtwdev);
+u32 rtw89_mac_get_err_status(struct rtw89_dev *rtwdev);
+int rtw89_mac_set_err_status(struct rtw89_dev *rtwdev, u32 err);
+void rtw89_mac_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
+ u32 len, u8 class, u8 func);
+int rtw89_mac_setup_phycap(struct rtw89_dev *rtwdev);
+int rtw89_mac_stop_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx,
+ u16 *tx_en, enum rtw89_sch_tx_sel sel);
+int rtw89_mac_resume_sch_tx(struct rtw89_dev *rtwdev, u8 mac_idx, u16 tx_en);
+int rtw89_mac_cfg_ppdu_status(struct rtw89_dev *rtwdev, u8 mac_ids, bool enable);
+void rtw89_mac_update_rts_threshold(struct rtw89_dev *rtwdev, u8 mac_idx);
+void rtw89_mac_flush_txq(struct rtw89_dev *rtwdev, u32 queues, bool drop);
+int rtw89_mac_coex_init(struct rtw89_dev *rtwdev, const struct rtw89_mac_ax_coex *coex);
+int rtw89_mac_cfg_gnt(struct rtw89_dev *rtwdev,
+ const struct rtw89_mac_ax_coex_gnt *gnt_cfg);
+int rtw89_mac_cfg_plt(struct rtw89_dev *rtwdev, struct rtw89_mac_ax_plt *plt);
+void rtw89_mac_cfg_sb(struct rtw89_dev *rtwdev, u32 val);
+u32 rtw89_mac_get_sb(struct rtw89_dev *rtwdev);
+bool rtw89_mac_get_ctrl_path(struct rtw89_dev *rtwdev);
+int rtw89_mac_cfg_ctrl_path(struct rtw89_dev *rtwdev, bool wl);
+bool rtw89_mac_get_txpwr_cr(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx,
+ u32 reg_base, u32 *cr);
+void rtw89_mac_power_mode_change(struct rtw89_dev *rtwdev, bool enter);
+void rtw89_mac_bf_assoc(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+void rtw89_mac_bf_disassoc(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+void rtw89_mac_bf_set_gid_table(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *conf);
+void rtw89_mac_bf_monitor_calc(struct rtw89_dev *rtwdev,
+ struct ieee80211_sta *sta, bool disconnect);
+void _rtw89_mac_bf_monitor_track(struct rtw89_dev *rtwdev);
+int rtw89_mac_vif_init(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif);
+int rtw89_mac_vif_deinit(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif);
+int rtw89_mac_set_hw_muedca_ctrl(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif, bool en);
+
+static inline void rtw89_mac_bf_monitor_track(struct rtw89_dev *rtwdev)
+{
+ if (!test_bit(RTW89_FLAG_BFEE_MON, rtwdev->flags))
+ return;
+
+ _rtw89_mac_bf_monitor_track(rtwdev);
+}
+
+static inline int rtw89_mac_txpwr_read32(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx,
+ u32 reg_base, u32 *val)
+{
+ u32 cr;
+
+ if (!rtw89_mac_get_txpwr_cr(rtwdev, phy_idx, reg_base, &cr))
+ return -EINVAL;
+
+ *val = rtw89_read32(rtwdev, cr);
+ return 0;
+}
+
+static inline int rtw89_mac_txpwr_write32(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx,
+ u32 reg_base, u32 val)
+{
+ u32 cr;
+
+ if (!rtw89_mac_get_txpwr_cr(rtwdev, phy_idx, reg_base, &cr))
+ return -EINVAL;
+
+ rtw89_write32(rtwdev, cr, val);
+ return 0;
+}
+
+static inline int rtw89_mac_txpwr_write32_mask(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx,
+ u32 reg_base, u32 mask, u32 val)
+{
+ u32 cr;
+
+ if (!rtw89_mac_get_txpwr_cr(rtwdev, phy_idx, reg_base, &cr))
+ return -EINVAL;
+
+ rtw89_write32_mask(rtwdev, cr, mask, val);
+ return 0;
+}
+
+int rtw89_mac_set_tx_time(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
+ bool resume, u32 tx_time);
+int rtw89_mac_get_tx_time(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta,
+ u32 *tx_time);
+int rtw89_mac_set_tx_retry_limit(struct rtw89_dev *rtwdev,
+ struct rtw89_sta *rtwsta,
+ bool resume, u8 tx_retry);
+int rtw89_mac_get_tx_retry_limit(struct rtw89_dev *rtwdev,
+ struct rtw89_sta *rtwsta, u8 *tx_retry);
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw89/mac80211.c b/drivers/net/wireless/realtek/rtw89/mac80211.c
new file mode 100644
index 000000000000..16dc6fb7dbb0
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/mac80211.c
@@ -0,0 +1,676 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2019-2020 Realtek Corporation
+ */
+
+#include "cam.h"
+#include "coex.h"
+#include "debug.h"
+#include "fw.h"
+#include "mac.h"
+#include "phy.h"
+#include "ps.h"
+#include "reg.h"
+#include "sar.h"
+#include "ser.h"
+
+static void rtw89_ops_tx(struct ieee80211_hw *hw,
+ struct ieee80211_tx_control *control,
+ struct sk_buff *skb)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_vif *vif = info->control.vif;
+ struct ieee80211_sta *sta = control->sta;
+ int ret, qsel;
+
+ ret = rtw89_core_tx_write(rtwdev, vif, sta, skb, &qsel);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to transmit skb: %d\n", ret);
+ ieee80211_free_txskb(hw, skb);
+ }
+ rtw89_core_tx_kick_off(rtwdev, qsel);
+}
+
+static void rtw89_ops_wake_tx_queue(struct ieee80211_hw *hw,
+ struct ieee80211_txq *txq)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+
+ ieee80211_schedule_txq(hw, txq);
+ queue_work(rtwdev->txq_wq, &rtwdev->txq_work);
+}
+
+static int rtw89_ops_start(struct ieee80211_hw *hw)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+ int ret;
+
+ mutex_lock(&rtwdev->mutex);
+ ret = rtw89_core_start(rtwdev);
+ mutex_unlock(&rtwdev->mutex);
+
+ return ret;
+}
+
+static void rtw89_ops_stop(struct ieee80211_hw *hw)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+
+ mutex_lock(&rtwdev->mutex);
+ rtw89_core_stop(rtwdev);
+ mutex_unlock(&rtwdev->mutex);
+}
+
+static int rtw89_ops_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+
+ mutex_lock(&rtwdev->mutex);
+ rtw89_leave_ps_mode(rtwdev);
+
+ if ((changed & IEEE80211_CONF_CHANGE_IDLE) &&
+ !(hw->conf.flags & IEEE80211_CONF_IDLE))
+ rtw89_leave_ips(rtwdev);
+
+ if (changed & IEEE80211_CONF_CHANGE_PS) {
+ if (hw->conf.flags & IEEE80211_CONF_PS) {
+ rtwdev->lps_enabled = true;
+ } else {
+ rtw89_leave_lps(rtwdev);
+ rtwdev->lps_enabled = false;
+ }
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL)
+ rtw89_set_channel(rtwdev);
+
+ if ((changed & IEEE80211_CONF_CHANGE_IDLE) &&
+ (hw->conf.flags & IEEE80211_CONF_IDLE))
+ rtw89_enter_ips(rtwdev);
+
+ mutex_unlock(&rtwdev->mutex);
+
+ return 0;
+}
+
+static int rtw89_ops_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ int ret = 0;
+
+ mutex_lock(&rtwdev->mutex);
+ list_add_tail(&rtwvif->list, &rtwdev->rtwvifs_list);
+ rtw89_leave_ps_mode(rtwdev);
+
+ rtw89_traffic_stats_init(rtwdev, &rtwvif->stats);
+ rtw89_vif_type_mapping(vif, false);
+ rtwvif->port = rtw89_core_acquire_bit_map(rtwdev->hw_port,
+ RTW89_MAX_HW_PORT_NUM);
+ if (rtwvif->port == RTW89_MAX_HW_PORT_NUM) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ rtwvif->bcn_hit_cond = 0;
+ rtwvif->mac_idx = RTW89_MAC_0;
+ rtwvif->phy_idx = RTW89_PHY_0;
+ rtwvif->hit_rule = 0;
+ ether_addr_copy(rtwvif->mac_addr, vif->addr);
+
+ ret = rtw89_mac_add_vif(rtwdev, rtwvif);
+ if (ret) {
+ rtw89_core_release_bit_map(rtwdev->hw_port, rtwvif->port);
+ goto out;
+ }
+
+ rtw89_core_txq_init(rtwdev, vif->txq);
+
+ rtw89_btc_ntfy_role_info(rtwdev, rtwvif, NULL, BTC_ROLE_START);
+out:
+ mutex_unlock(&rtwdev->mutex);
+
+ return ret;
+}
+
+static void rtw89_ops_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+
+ mutex_lock(&rtwdev->mutex);
+ rtw89_leave_ps_mode(rtwdev);
+ rtw89_btc_ntfy_role_info(rtwdev, rtwvif, NULL, BTC_ROLE_STOP);
+ rtw89_mac_remove_vif(rtwdev, rtwvif);
+ rtw89_core_release_bit_map(rtwdev->hw_port, rtwvif->port);
+ list_del_init(&rtwvif->list);
+ mutex_unlock(&rtwdev->mutex);
+}
+
+static void rtw89_ops_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *new_flags,
+ u64 multicast)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+
+ mutex_lock(&rtwdev->mutex);
+ rtw89_leave_ps_mode(rtwdev);
+
+ *new_flags &= FIF_ALLMULTI | FIF_OTHER_BSS | FIF_FCSFAIL |
+ FIF_BCN_PRBRESP_PROMISC;
+
+ if (changed_flags & FIF_ALLMULTI) {
+ if (*new_flags & FIF_ALLMULTI)
+ rtwdev->hal.rx_fltr &= ~B_AX_A_MC;
+ else
+ rtwdev->hal.rx_fltr |= B_AX_A_MC;
+ }
+ if (changed_flags & FIF_FCSFAIL) {
+ if (*new_flags & FIF_FCSFAIL)
+ rtwdev->hal.rx_fltr |= B_AX_A_CRC32_ERR;
+ else
+ rtwdev->hal.rx_fltr &= ~B_AX_A_CRC32_ERR;
+ }
+ if (changed_flags & FIF_OTHER_BSS) {
+ if (*new_flags & FIF_OTHER_BSS)
+ rtwdev->hal.rx_fltr &= ~B_AX_A_A1_MATCH;
+ else
+ rtwdev->hal.rx_fltr |= B_AX_A_A1_MATCH;
+ }
+ if (changed_flags & FIF_BCN_PRBRESP_PROMISC) {
+ if (*new_flags & FIF_BCN_PRBRESP_PROMISC) {
+ rtwdev->hal.rx_fltr &= ~B_AX_A_BCN_CHK_EN;
+ rtwdev->hal.rx_fltr &= ~B_AX_A_BC;
+ rtwdev->hal.rx_fltr &= ~B_AX_A_A1_MATCH;
+ } else {
+ rtwdev->hal.rx_fltr |= B_AX_A_BCN_CHK_EN;
+ rtwdev->hal.rx_fltr |= B_AX_A_BC;
+ rtwdev->hal.rx_fltr |= B_AX_A_A1_MATCH;
+ }
+ }
+
+ rtw89_write32_mask(rtwdev,
+ rtw89_mac_reg_by_idx(R_AX_RX_FLTR_OPT, RTW89_MAC_0),
+ B_AX_RX_FLTR_CFG_MASK,
+ rtwdev->hal.rx_fltr);
+ if (!rtwdev->dbcc_en)
+ goto out;
+ rtw89_write32_mask(rtwdev,
+ rtw89_mac_reg_by_idx(R_AX_RX_FLTR_OPT, RTW89_MAC_1),
+ B_AX_RX_FLTR_CFG_MASK,
+ rtwdev->hal.rx_fltr);
+
+out:
+ mutex_unlock(&rtwdev->mutex);
+}
+
+static const u8 ac_to_fw_idx[IEEE80211_NUM_ACS] = {
+ [IEEE80211_AC_VO] = 3,
+ [IEEE80211_AC_VI] = 2,
+ [IEEE80211_AC_BE] = 0,
+ [IEEE80211_AC_BK] = 1,
+};
+
+static u8 rtw89_aifsn_to_aifs(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif, u8 aifsn)
+{
+ struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif);
+ u8 slot_time;
+ u8 sifs;
+
+ slot_time = vif->bss_conf.use_short_slot ? 9 : 20;
+ sifs = rtwdev->hal.current_band_type == RTW89_BAND_5G ? 16 : 10;
+
+ return aifsn * slot_time + sifs;
+}
+
+static void ____rtw89_conf_tx_edca(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif, u16 ac)
+{
+ struct ieee80211_tx_queue_params *params = &rtwvif->tx_params[ac];
+ u32 val;
+ u8 ecw_max, ecw_min;
+ u8 aifs;
+
+ /* 2^ecw - 1 = cw; ecw = log2(cw + 1) */
+ ecw_max = ilog2(params->cw_max + 1);
+ ecw_min = ilog2(params->cw_min + 1);
+ aifs = rtw89_aifsn_to_aifs(rtwdev, rtwvif, params->aifs);
+ val = FIELD_PREP(FW_EDCA_PARAM_TXOPLMT_MSK, params->txop) |
+ FIELD_PREP(FW_EDCA_PARAM_CWMAX_MSK, ecw_max) |
+ FIELD_PREP(FW_EDCA_PARAM_CWMIN_MSK, ecw_min) |
+ FIELD_PREP(FW_EDCA_PARAM_AIFS_MSK, aifs);
+ rtw89_fw_h2c_set_edca(rtwdev, rtwvif, ac_to_fw_idx[ac], val);
+}
+
+static const u32 ac_to_mu_edca_param[IEEE80211_NUM_ACS] = {
+ [IEEE80211_AC_VO] = R_AX_MUEDCA_VO_PARAM_0,
+ [IEEE80211_AC_VI] = R_AX_MUEDCA_VI_PARAM_0,
+ [IEEE80211_AC_BE] = R_AX_MUEDCA_BE_PARAM_0,
+ [IEEE80211_AC_BK] = R_AX_MUEDCA_BK_PARAM_0,
+};
+
+static void ____rtw89_conf_tx_mu_edca(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif, u16 ac)
+{
+ struct ieee80211_tx_queue_params *params = &rtwvif->tx_params[ac];
+ struct ieee80211_he_mu_edca_param_ac_rec *mu_edca;
+ u8 aifs, aifsn;
+ u16 timer_32us;
+ u32 reg;
+ u32 val;
+
+ if (!params->mu_edca)
+ return;
+
+ mu_edca = &params->mu_edca_param_rec;
+ aifsn = FIELD_GET(GENMASK(3, 0), mu_edca->aifsn);
+ aifs = aifsn ? rtw89_aifsn_to_aifs(rtwdev, rtwvif, aifsn) : 0;
+ timer_32us = mu_edca->mu_edca_timer << 8;
+
+ val = FIELD_PREP(B_AX_MUEDCA_BE_PARAM_0_TIMER_MASK, timer_32us) |
+ FIELD_PREP(B_AX_MUEDCA_BE_PARAM_0_CW_MASK, mu_edca->ecw_min_max) |
+ FIELD_PREP(B_AX_MUEDCA_BE_PARAM_0_AIFS_MASK, aifs);
+ reg = rtw89_mac_reg_by_idx(ac_to_mu_edca_param[ac], rtwvif->mac_idx);
+ rtw89_write32(rtwdev, reg, val);
+
+ rtw89_mac_set_hw_muedca_ctrl(rtwdev, rtwvif, true);
+}
+
+static void __rtw89_conf_tx(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif, u16 ac)
+{
+ ____rtw89_conf_tx_edca(rtwdev, rtwvif, ac);
+ ____rtw89_conf_tx_mu_edca(rtwdev, rtwvif, ac);
+}
+
+static void rtw89_conf_tx(struct rtw89_dev *rtwdev,
+ struct rtw89_vif *rtwvif)
+{
+ u16 ac;
+
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
+ __rtw89_conf_tx(rtwdev, rtwvif, ac);
+}
+
+static void rtw89_station_mode_sta_assoc(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *conf)
+{
+ struct ieee80211_sta *sta;
+
+ if (vif->type != NL80211_IFTYPE_STATION)
+ return;
+
+ sta = ieee80211_find_sta(vif, conf->bssid);
+ if (!sta) {
+ rtw89_err(rtwdev, "can't find sta to set sta_assoc state\n");
+ return;
+ }
+ rtw89_core_sta_assoc(rtwdev, vif, sta);
+}
+
+static void rtw89_ops_bss_info_changed(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *conf,
+ u32 changed)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+
+ mutex_lock(&rtwdev->mutex);
+ rtw89_leave_ps_mode(rtwdev);
+
+ if (changed & BSS_CHANGED_ASSOC) {
+ if (conf->assoc) {
+ rtw89_station_mode_sta_assoc(rtwdev, vif, conf);
+ rtw89_phy_set_bss_color(rtwdev, vif);
+ rtw89_chip_cfg_txpwr_ul_tb_offset(rtwdev, vif);
+ rtw89_mac_port_update(rtwdev, rtwvif);
+ }
+ }
+
+ if (changed & BSS_CHANGED_BSSID) {
+ ether_addr_copy(rtwvif->bssid, conf->bssid);
+ rtw89_cam_bssid_changed(rtwdev, rtwvif);
+ rtw89_fw_h2c_cam(rtwdev, rtwvif);
+ }
+
+ if (changed & BSS_CHANGED_ERP_SLOT)
+ rtw89_conf_tx(rtwdev, rtwvif);
+
+ if (changed & BSS_CHANGED_HE_BSS_COLOR)
+ rtw89_phy_set_bss_color(rtwdev, vif);
+
+ if (changed & BSS_CHANGED_MU_GROUPS)
+ rtw89_mac_bf_set_gid_table(rtwdev, vif, conf);
+
+ mutex_unlock(&rtwdev->mutex);
+}
+
+static int rtw89_ops_conf_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u16 ac,
+ const struct ieee80211_tx_queue_params *params)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+
+ mutex_lock(&rtwdev->mutex);
+ rtw89_leave_ps_mode(rtwdev);
+ rtwvif->tx_params[ac] = *params;
+ __rtw89_conf_tx(rtwdev, rtwvif, ac);
+ mutex_unlock(&rtwdev->mutex);
+
+ return 0;
+}
+
+static int __rtw89_ops_sta_state(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ enum ieee80211_sta_state old_state,
+ enum ieee80211_sta_state new_state)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+
+ if (old_state == IEEE80211_STA_NOTEXIST &&
+ new_state == IEEE80211_STA_NONE)
+ return rtw89_core_sta_add(rtwdev, vif, sta);
+
+ if (old_state == IEEE80211_STA_AUTH &&
+ new_state == IEEE80211_STA_ASSOC) {
+ if (vif->type == NL80211_IFTYPE_STATION)
+ return 0; /* defer to bss_info_changed to have vif info */
+ return rtw89_core_sta_assoc(rtwdev, vif, sta);
+ }
+
+ if (old_state == IEEE80211_STA_ASSOC &&
+ new_state == IEEE80211_STA_AUTH)
+ return rtw89_core_sta_disassoc(rtwdev, vif, sta);
+
+ if (old_state == IEEE80211_STA_AUTH &&
+ new_state == IEEE80211_STA_NONE)
+ return rtw89_core_sta_disconnect(rtwdev, vif, sta);
+
+ if (old_state == IEEE80211_STA_NONE &&
+ new_state == IEEE80211_STA_NOTEXIST)
+ return rtw89_core_sta_remove(rtwdev, vif, sta);
+
+ return 0;
+}
+
+static int rtw89_ops_sta_state(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ enum ieee80211_sta_state old_state,
+ enum ieee80211_sta_state new_state)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+ int ret;
+
+ mutex_lock(&rtwdev->mutex);
+ rtw89_leave_ps_mode(rtwdev);
+ ret = __rtw89_ops_sta_state(hw, vif, sta, old_state, new_state);
+ mutex_unlock(&rtwdev->mutex);
+
+ return ret;
+}
+
+static int rtw89_ops_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+ int ret = 0;
+
+ mutex_lock(&rtwdev->mutex);
+ rtw89_leave_ps_mode(rtwdev);
+
+ switch (cmd) {
+ case SET_KEY:
+ rtw89_btc_ntfy_specific_packet(rtwdev, PACKET_EAPOL_END);
+ ret = rtw89_cam_sec_key_add(rtwdev, vif, sta, key);
+ if (ret && ret != -EOPNOTSUPP) {
+ rtw89_err(rtwdev, "failed to add key to sec cam\n");
+ goto out;
+ }
+ break;
+ case DISABLE_KEY:
+ rtw89_hci_flush_queues(rtwdev, BIT(rtwdev->hw->queues) - 1,
+ false);
+ rtw89_mac_flush_txq(rtwdev, BIT(rtwdev->hw->queues) - 1, false);
+ ret = rtw89_cam_sec_key_del(rtwdev, vif, sta, key, true);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to remove key from sec cam\n");
+ goto out;
+ }
+ break;
+ }
+
+out:
+ mutex_unlock(&rtwdev->mutex);
+
+ return ret;
+}
+
+static int rtw89_ops_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_ampdu_params *params)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+ struct ieee80211_sta *sta = params->sta;
+ struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
+ u16 tid = params->tid;
+ struct ieee80211_txq *txq = sta->txq[tid];
+ struct rtw89_txq *rtwtxq = (struct rtw89_txq *)txq->drv_priv;
+
+ switch (params->action) {
+ case IEEE80211_AMPDU_TX_START:
+ return IEEE80211_AMPDU_TX_START_IMMEDIATE;
+ case IEEE80211_AMPDU_TX_STOP_CONT:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH:
+ case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
+ mutex_lock(&rtwdev->mutex);
+ clear_bit(RTW89_TXQ_F_AMPDU, &rtwtxq->flags);
+ rtw89_fw_h2c_ba_cam(rtwdev, false, rtwsta->mac_id, params);
+ mutex_unlock(&rtwdev->mutex);
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ mutex_lock(&rtwdev->mutex);
+ set_bit(RTW89_TXQ_F_AMPDU, &rtwtxq->flags);
+ rtwsta->ampdu_params[tid].agg_num = params->buf_size;
+ rtwsta->ampdu_params[tid].amsdu = params->amsdu;
+ rtw89_leave_ps_mode(rtwdev);
+ rtw89_fw_h2c_ba_cam(rtwdev, true, rtwsta->mac_id, params);
+ mutex_unlock(&rtwdev->mutex);
+ break;
+ case IEEE80211_AMPDU_RX_START:
+ case IEEE80211_AMPDU_RX_STOP:
+ break;
+ default:
+ WARN_ON(1);
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static int rtw89_ops_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+
+ mutex_lock(&rtwdev->mutex);
+ rtw89_leave_ps_mode(rtwdev);
+ if (test_bit(RTW89_FLAG_POWERON, rtwdev->flags))
+ rtw89_mac_update_rts_threshold(rtwdev, RTW89_MAC_0);
+ mutex_unlock(&rtwdev->mutex);
+
+ return 0;
+}
+
+static void rtw89_ops_sta_statistics(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct station_info *sinfo)
+{
+ struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
+
+ sinfo->txrate = rtwsta->ra_report.txrate;
+ sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
+}
+
+static void rtw89_ops_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 queues, bool drop)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+
+ mutex_lock(&rtwdev->mutex);
+ rtw89_leave_lps(rtwdev);
+ rtw89_hci_flush_queues(rtwdev, queues, drop);
+ rtw89_mac_flush_txq(rtwdev, queues, drop);
+ mutex_unlock(&rtwdev->mutex);
+}
+
+struct rtw89_iter_bitrate_mask_data {
+ struct rtw89_dev *rtwdev;
+ struct ieee80211_vif *vif;
+ const struct cfg80211_bitrate_mask *mask;
+};
+
+static void rtw89_ra_mask_info_update_iter(void *data, struct ieee80211_sta *sta)
+{
+ struct rtw89_iter_bitrate_mask_data *br_data = data;
+ struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
+ struct ieee80211_vif *vif = rtwvif_to_vif(rtwsta->rtwvif);
+
+ if (vif != br_data->vif)
+ return;
+
+ rtwsta->use_cfg_mask = true;
+ rtwsta->mask = *br_data->mask;
+ rtw89_phy_ra_updata_sta(br_data->rtwdev, sta);
+}
+
+static void rtw89_ra_mask_info_update(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ struct rtw89_iter_bitrate_mask_data br_data = { .rtwdev = rtwdev,
+ .vif = vif,
+ .mask = mask};
+
+ ieee80211_iterate_stations_atomic(rtwdev->hw, rtw89_ra_mask_info_update_iter,
+ &br_data);
+}
+
+static int rtw89_ops_set_bitrate_mask(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+
+ mutex_lock(&rtwdev->mutex);
+ rtw89_phy_rate_pattern_vif(rtwdev, vif, mask);
+ rtw89_ra_mask_info_update(rtwdev, vif, mask);
+ mutex_unlock(&rtwdev->mutex);
+
+ return 0;
+}
+
+static
+int rtw89_ops_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+ struct rtw89_hal *hal = &rtwdev->hal;
+
+ if (rx_ant != hw->wiphy->available_antennas_rx)
+ return -EINVAL;
+
+ mutex_lock(&rtwdev->mutex);
+ hal->antenna_tx = tx_ant;
+ hal->antenna_rx = rx_ant;
+ mutex_unlock(&rtwdev->mutex);
+
+ return 0;
+}
+
+static
+int rtw89_ops_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+ struct rtw89_hal *hal = &rtwdev->hal;
+
+ *tx_ant = hal->antenna_tx;
+ *rx_ant = hal->antenna_rx;
+
+ return 0;
+}
+
+static void rtw89_ops_sw_scan_start(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const u8 *mac_addr)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+ struct rtw89_hal *hal = &rtwdev->hal;
+
+ mutex_lock(&rtwdev->mutex);
+ rtwdev->scanning = true;
+ rtw89_leave_lps(rtwdev);
+ rtw89_btc_ntfy_scan_start(rtwdev, RTW89_PHY_0, hal->current_band_type);
+ rtw89_chip_rfk_scan(rtwdev, true);
+ rtw89_hci_recalc_int_mit(rtwdev);
+ mutex_unlock(&rtwdev->mutex);
+}
+
+static void rtw89_ops_sw_scan_complete(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+
+ mutex_lock(&rtwdev->mutex);
+ rtw89_chip_rfk_scan(rtwdev, false);
+ rtw89_btc_ntfy_scan_finish(rtwdev, RTW89_PHY_0);
+ rtwdev->scanning = false;
+ rtwdev->dig.bypass_dig = true;
+ mutex_unlock(&rtwdev->mutex);
+}
+
+static void rtw89_ops_reconfig_complete(struct ieee80211_hw *hw,
+ enum ieee80211_reconfig_type reconfig_type)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+
+ if (reconfig_type == IEEE80211_RECONFIG_TYPE_RESTART)
+ rtw89_ser_recfg_done(rtwdev);
+}
+
+const struct ieee80211_ops rtw89_ops = {
+ .tx = rtw89_ops_tx,
+ .wake_tx_queue = rtw89_ops_wake_tx_queue,
+ .start = rtw89_ops_start,
+ .stop = rtw89_ops_stop,
+ .config = rtw89_ops_config,
+ .add_interface = rtw89_ops_add_interface,
+ .remove_interface = rtw89_ops_remove_interface,
+ .configure_filter = rtw89_ops_configure_filter,
+ .bss_info_changed = rtw89_ops_bss_info_changed,
+ .conf_tx = rtw89_ops_conf_tx,
+ .sta_state = rtw89_ops_sta_state,
+ .set_key = rtw89_ops_set_key,
+ .ampdu_action = rtw89_ops_ampdu_action,
+ .set_rts_threshold = rtw89_ops_set_rts_threshold,
+ .sta_statistics = rtw89_ops_sta_statistics,
+ .flush = rtw89_ops_flush,
+ .set_bitrate_mask = rtw89_ops_set_bitrate_mask,
+ .set_antenna = rtw89_ops_set_antenna,
+ .get_antenna = rtw89_ops_get_antenna,
+ .sw_scan_start = rtw89_ops_sw_scan_start,
+ .sw_scan_complete = rtw89_ops_sw_scan_complete,
+ .reconfig_complete = rtw89_ops_reconfig_complete,
+ .set_sar_specs = rtw89_ops_set_sar_specs,
+};
+EXPORT_SYMBOL(rtw89_ops);
diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c
new file mode 100644
index 000000000000..2c94762e4f93
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/pci.c
@@ -0,0 +1,3060 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2020 Realtek Corporation
+ */
+
+#include <linux/pci.h>
+
+#include "mac.h"
+#include "pci.h"
+#include "reg.h"
+#include "ser.h"
+
+static bool rtw89_pci_disable_clkreq;
+static bool rtw89_pci_disable_aspm_l1;
+static bool rtw89_pci_disable_l1ss;
+module_param_named(disable_clkreq, rtw89_pci_disable_clkreq, bool, 0644);
+module_param_named(disable_aspm_l1, rtw89_pci_disable_aspm_l1, bool, 0644);
+module_param_named(disable_aspm_l1ss, rtw89_pci_disable_l1ss, bool, 0644);
+MODULE_PARM_DESC(disable_clkreq, "Set Y to disable PCI clkreq support");
+MODULE_PARM_DESC(disable_aspm_l1, "Set Y to disable PCI ASPM L1 support");
+MODULE_PARM_DESC(disable_aspm_l1ss, "Set Y to disable PCI L1SS support");
+
+static int rtw89_pci_rst_bdram_pcie(struct rtw89_dev *rtwdev)
+{
+ u32 val;
+ int ret;
+
+ rtw89_write32(rtwdev, R_AX_PCIE_INIT_CFG1,
+ rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) | B_AX_RST_BDRAM);
+
+ ret = read_poll_timeout_atomic(rtw89_read32, val, !(val & B_AX_RST_BDRAM),
+ 1, RTW89_PCI_POLL_BDRAM_RST_CNT, false,
+ rtwdev, R_AX_PCIE_INIT_CFG1);
+
+ if (ret)
+ return -EBUSY;
+
+ return 0;
+}
+
+static u32 rtw89_pci_dma_recalc(struct rtw89_dev *rtwdev,
+ struct rtw89_pci_dma_ring *bd_ring,
+ u32 cur_idx, bool tx)
+{
+ u32 cnt, cur_rp, wp, rp, len;
+
+ rp = bd_ring->rp;
+ wp = bd_ring->wp;
+ len = bd_ring->len;
+
+ cur_rp = FIELD_GET(TXBD_HW_IDX_MASK, cur_idx);
+ if (tx)
+ cnt = cur_rp >= rp ? cur_rp - rp : len - (rp - cur_rp);
+ else
+ cnt = cur_rp >= wp ? cur_rp - wp : len - (wp - cur_rp);
+
+ bd_ring->rp = cur_rp;
+
+ return cnt;
+}
+
+static u32 rtw89_pci_txbd_recalc(struct rtw89_dev *rtwdev,
+ struct rtw89_pci_tx_ring *tx_ring)
+{
+ struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring;
+ u32 addr_idx = bd_ring->addr_idx;
+ u32 cnt, idx;
+
+ idx = rtw89_read32(rtwdev, addr_idx);
+ cnt = rtw89_pci_dma_recalc(rtwdev, bd_ring, idx, true);
+
+ return cnt;
+}
+
+static void rtw89_pci_release_fwcmd(struct rtw89_dev *rtwdev,
+ struct rtw89_pci *rtwpci,
+ u32 cnt, bool release_all)
+{
+ struct rtw89_pci_tx_data *tx_data;
+ struct sk_buff *skb;
+ u32 qlen;
+
+ while (cnt--) {
+ skb = skb_dequeue(&rtwpci->h2c_queue);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to pre-release fwcmd\n");
+ return;
+ }
+ skb_queue_tail(&rtwpci->h2c_release_queue, skb);
+ }
+
+ qlen = skb_queue_len(&rtwpci->h2c_release_queue);
+ if (!release_all)
+ qlen = qlen > RTW89_PCI_MULTITAG ? qlen - RTW89_PCI_MULTITAG : 0;
+
+ while (qlen--) {
+ skb = skb_dequeue(&rtwpci->h2c_release_queue);
+ if (!skb) {
+ rtw89_err(rtwdev, "failed to release fwcmd\n");
+ return;
+ }
+ tx_data = RTW89_PCI_TX_SKB_CB(skb);
+ dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len,
+ DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
+ }
+}
+
+static void rtw89_pci_reclaim_tx_fwcmd(struct rtw89_dev *rtwdev,
+ struct rtw89_pci *rtwpci)
+{
+ struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[RTW89_TXCH_CH12];
+ u32 cnt;
+
+ cnt = rtw89_pci_txbd_recalc(rtwdev, tx_ring);
+ if (!cnt)
+ return;
+ rtw89_pci_release_fwcmd(rtwdev, rtwpci, cnt, false);
+}
+
+static u32 rtw89_pci_rxbd_recalc(struct rtw89_dev *rtwdev,
+ struct rtw89_pci_rx_ring *rx_ring)
+{
+ struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
+ u32 addr_idx = bd_ring->addr_idx;
+ u32 cnt, idx;
+
+ idx = rtw89_read32(rtwdev, addr_idx);
+ cnt = rtw89_pci_dma_recalc(rtwdev, bd_ring, idx, false);
+
+ return cnt;
+}
+
+static void rtw89_pci_sync_skb_for_cpu(struct rtw89_dev *rtwdev,
+ struct sk_buff *skb)
+{
+ struct rtw89_pci_rx_info *rx_info;
+ dma_addr_t dma;
+
+ rx_info = RTW89_PCI_RX_SKB_CB(skb);
+ dma = rx_info->dma;
+ dma_sync_single_for_cpu(rtwdev->dev, dma, RTW89_PCI_RX_BUF_SIZE,
+ DMA_FROM_DEVICE);
+}
+
+static void rtw89_pci_sync_skb_for_device(struct rtw89_dev *rtwdev,
+ struct sk_buff *skb)
+{
+ struct rtw89_pci_rx_info *rx_info;
+ dma_addr_t dma;
+
+ rx_info = RTW89_PCI_RX_SKB_CB(skb);
+ dma = rx_info->dma;
+ dma_sync_single_for_device(rtwdev->dev, dma, RTW89_PCI_RX_BUF_SIZE,
+ DMA_FROM_DEVICE);
+}
+
+static int rtw89_pci_rxbd_info_update(struct rtw89_dev *rtwdev,
+ struct sk_buff *skb)
+{
+ struct rtw89_pci_rxbd_info *rxbd_info;
+ struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb);
+
+ rxbd_info = (struct rtw89_pci_rxbd_info *)skb->data;
+ rx_info->fs = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_FS);
+ rx_info->ls = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_LS);
+ rx_info->len = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_WRITE_SIZE);
+ rx_info->tag = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_TAG);
+
+ return 0;
+}
+
+static bool
+rtw89_skb_put_rx_data(struct rtw89_dev *rtwdev, bool fs, bool ls,
+ struct sk_buff *new,
+ const struct sk_buff *skb, u32 offset,
+ const struct rtw89_pci_rx_info *rx_info,
+ const struct rtw89_rx_desc_info *desc_info)
+{
+ u32 copy_len = rx_info->len - offset;
+
+ if (unlikely(skb_tailroom(new) < copy_len)) {
+ rtw89_debug(rtwdev, RTW89_DBG_TXRX,
+ "invalid rx data length bd_len=%d desc_len=%d offset=%d (fs=%d ls=%d)\n",
+ rx_info->len, desc_info->pkt_size, offset, fs, ls);
+ rtw89_hex_dump(rtwdev, RTW89_DBG_TXRX, "rx_data: ",
+ skb->data, rx_info->len);
+ /* length of a single segment skb is desc_info->pkt_size */
+ if (fs && ls) {
+ copy_len = desc_info->pkt_size;
+ } else {
+ rtw89_info(rtwdev, "drop rx data due to invalid length\n");
+ return false;
+ }
+ }
+
+ skb_put_data(new, skb->data + offset, copy_len);
+
+ return true;
+}
+
+static u32 rtw89_pci_rxbd_deliver_skbs(struct rtw89_dev *rtwdev,
+ struct rtw89_pci_rx_ring *rx_ring)
+{
+ struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
+ struct rtw89_pci_rx_info *rx_info;
+ struct rtw89_rx_desc_info *desc_info = &rx_ring->diliver_desc;
+ struct sk_buff *new = rx_ring->diliver_skb;
+ struct sk_buff *skb;
+ u32 rxinfo_size = sizeof(struct rtw89_pci_rxbd_info);
+ u32 offset;
+ u32 cnt = 1;
+ bool fs, ls;
+ int ret;
+
+ skb = rx_ring->buf[bd_ring->wp];
+ rtw89_pci_sync_skb_for_cpu(rtwdev, skb);
+
+ ret = rtw89_pci_rxbd_info_update(rtwdev, skb);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to update %d RXBD info: %d\n",
+ bd_ring->wp, ret);
+ goto err_sync_device;
+ }
+
+ rx_info = RTW89_PCI_RX_SKB_CB(skb);
+ fs = rx_info->fs;
+ ls = rx_info->ls;
+
+ if (fs) {
+ if (new) {
+ rtw89_err(rtwdev, "skb should not be ready before first segment start\n");
+ goto err_sync_device;
+ }
+ if (desc_info->ready) {
+ rtw89_warn(rtwdev, "desc info should not be ready before first segment start\n");
+ goto err_sync_device;
+ }
+
+ rtw89_core_query_rxdesc(rtwdev, desc_info, skb->data, rxinfo_size);
+
+ new = dev_alloc_skb(desc_info->pkt_size);
+ if (!new)
+ goto err_sync_device;
+
+ rx_ring->diliver_skb = new;
+
+ /* first segment has RX desc */
+ offset = desc_info->offset;
+ offset += desc_info->long_rxdesc ? sizeof(struct rtw89_rxdesc_long) :
+ sizeof(struct rtw89_rxdesc_short);
+ } else {
+ offset = sizeof(struct rtw89_pci_rxbd_info);
+ if (!new) {
+ rtw89_warn(rtwdev, "no last skb\n");
+ goto err_sync_device;
+ }
+ }
+ if (!rtw89_skb_put_rx_data(rtwdev, fs, ls, new, skb, offset, rx_info, desc_info))
+ goto err_sync_device;
+ rtw89_pci_sync_skb_for_device(rtwdev, skb);
+ rtw89_pci_rxbd_increase(rx_ring, 1);
+
+ if (!desc_info->ready) {
+ rtw89_warn(rtwdev, "no rx desc information\n");
+ goto err_free_resource;
+ }
+ if (ls) {
+ rtw89_core_rx(rtwdev, desc_info, new);
+ rx_ring->diliver_skb = NULL;
+ desc_info->ready = false;
+ }
+
+ return cnt;
+
+err_sync_device:
+ rtw89_pci_sync_skb_for_device(rtwdev, skb);
+ rtw89_pci_rxbd_increase(rx_ring, 1);
+err_free_resource:
+ if (new)
+ dev_kfree_skb_any(new);
+ rx_ring->diliver_skb = NULL;
+ desc_info->ready = false;
+
+ return cnt;
+}
+
+static void rtw89_pci_rxbd_deliver(struct rtw89_dev *rtwdev,
+ struct rtw89_pci_rx_ring *rx_ring,
+ u32 cnt)
+{
+ struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
+ u32 rx_cnt;
+
+ while (cnt && rtwdev->napi_budget_countdown > 0) {
+ rx_cnt = rtw89_pci_rxbd_deliver_skbs(rtwdev, rx_ring);
+ if (!rx_cnt) {
+ rtw89_err(rtwdev, "failed to deliver RXBD skb\n");
+
+ /* skip the rest RXBD bufs */
+ rtw89_pci_rxbd_increase(rx_ring, cnt);
+ break;
+ }
+
+ cnt -= rx_cnt;
+ }
+
+ rtw89_write16(rtwdev, bd_ring->addr_idx, bd_ring->wp);
+}
+
+static int rtw89_pci_poll_rxq_dma(struct rtw89_dev *rtwdev,
+ struct rtw89_pci *rtwpci, int budget)
+{
+ struct rtw89_pci_rx_ring *rx_ring;
+ int countdown = rtwdev->napi_budget_countdown;
+ u32 cnt;
+
+ rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RXQ];
+
+ cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring);
+ if (!cnt)
+ return 0;
+
+ cnt = min_t(u32, budget, cnt);
+
+ rtw89_pci_rxbd_deliver(rtwdev, rx_ring, cnt);
+
+ /* In case of flushing pending SKBs, the countdown may exceed. */
+ if (rtwdev->napi_budget_countdown <= 0)
+ return budget;
+
+ return budget - countdown;
+}
+
+static void rtw89_pci_tx_status(struct rtw89_dev *rtwdev,
+ struct rtw89_pci_tx_ring *tx_ring,
+ struct sk_buff *skb, u8 tx_status)
+{
+ struct ieee80211_tx_info *info;
+
+ info = IEEE80211_SKB_CB(skb);
+ ieee80211_tx_info_clear_status(info);
+
+ if (info->flags & IEEE80211_TX_CTL_NO_ACK)
+ info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
+ if (tx_status == RTW89_TX_DONE) {
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ tx_ring->tx_acked++;
+ } else {
+ if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS)
+ rtw89_debug(rtwdev, RTW89_DBG_FW,
+ "failed to TX of status %x\n", tx_status);
+ switch (tx_status) {
+ case RTW89_TX_RETRY_LIMIT:
+ tx_ring->tx_retry_lmt++;
+ break;
+ case RTW89_TX_LIFE_TIME:
+ tx_ring->tx_life_time++;
+ break;
+ case RTW89_TX_MACID_DROP:
+ tx_ring->tx_mac_id_drop++;
+ break;
+ default:
+ rtw89_warn(rtwdev, "invalid TX status %x\n", tx_status);
+ break;
+ }
+ }
+
+ ieee80211_tx_status_ni(rtwdev->hw, skb);
+}
+
+static void rtw89_pci_reclaim_txbd(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring)
+{
+ struct rtw89_pci_tx_wd *txwd;
+ u32 cnt;
+
+ cnt = rtw89_pci_txbd_recalc(rtwdev, tx_ring);
+ while (cnt--) {
+ txwd = list_first_entry_or_null(&tx_ring->busy_pages, struct rtw89_pci_tx_wd, list);
+ if (!txwd) {
+ rtw89_warn(rtwdev, "No busy txwd pages available\n");
+ break;
+ }
+
+ list_del_init(&txwd->list);
+ }
+}
+
+static void rtw89_pci_release_busy_txwd(struct rtw89_dev *rtwdev,
+ struct rtw89_pci_tx_ring *tx_ring)
+{
+ struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
+ struct rtw89_pci_tx_wd *txwd;
+ int i;
+
+ for (i = 0; i < wd_ring->page_num; i++) {
+ txwd = list_first_entry_or_null(&tx_ring->busy_pages, struct rtw89_pci_tx_wd, list);
+ if (!txwd)
+ break;
+
+ list_del_init(&txwd->list);
+ }
+}
+
+static void rtw89_pci_release_txwd_skb(struct rtw89_dev *rtwdev,
+ struct rtw89_pci_tx_ring *tx_ring,
+ struct rtw89_pci_tx_wd *txwd, u16 seq,
+ u8 tx_status)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ struct rtw89_pci_tx_data *tx_data;
+ struct sk_buff *skb, *tmp;
+ u8 txch = tx_ring->txch;
+
+ if (!list_empty(&txwd->list)) {
+ rtw89_warn(rtwdev, "queue %d txwd %d is not idle\n",
+ txch, seq);
+ return;
+ }
+
+ /* currently, support for only one frame */
+ if (skb_queue_len(&txwd->queue) != 1) {
+ rtw89_warn(rtwdev, "empty pending queue %d page %d\n",
+ txch, seq);
+ return;
+ }
+
+ skb_queue_walk_safe(&txwd->queue, skb, tmp) {
+ skb_unlink(skb, &txwd->queue);
+
+ tx_data = RTW89_PCI_TX_SKB_CB(skb);
+ dma_unmap_single(&rtwpci->pdev->dev, tx_data->dma, skb->len,
+ DMA_TO_DEVICE);
+
+ rtw89_pci_tx_status(rtwdev, tx_ring, skb, tx_status);
+ }
+
+ rtw89_pci_enqueue_txwd(tx_ring, txwd);
+}
+
+static void rtw89_pci_release_rpp(struct rtw89_dev *rtwdev,
+ struct rtw89_pci_rpp_fmt *rpp)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ struct rtw89_pci_tx_ring *tx_ring;
+ struct rtw89_pci_tx_wd_ring *wd_ring;
+ struct rtw89_pci_tx_wd *txwd;
+ u16 seq;
+ u8 qsel, tx_status, txch;
+
+ seq = le32_get_bits(rpp->dword, RTW89_PCI_RPP_SEQ);
+ qsel = le32_get_bits(rpp->dword, RTW89_PCI_RPP_QSEL);
+ tx_status = le32_get_bits(rpp->dword, RTW89_PCI_RPP_TX_STATUS);
+ txch = rtw89_core_get_ch_dma(rtwdev, qsel);
+
+ if (txch == RTW89_TXCH_CH12) {
+ rtw89_warn(rtwdev, "should no fwcmd release report\n");
+ return;
+ }
+
+ tx_ring = &rtwpci->tx_rings[txch];
+ rtw89_pci_reclaim_txbd(rtwdev, tx_ring);
+ wd_ring = &tx_ring->wd_ring;
+ txwd = &wd_ring->pages[seq];
+
+ rtw89_pci_release_txwd_skb(rtwdev, tx_ring, txwd, seq, tx_status);
+}
+
+static void rtw89_pci_release_pending_txwd_skb(struct rtw89_dev *rtwdev,
+ struct rtw89_pci_tx_ring *tx_ring)
+{
+ struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
+ struct rtw89_pci_tx_wd *txwd;
+ int i;
+
+ for (i = 0; i < wd_ring->page_num; i++) {
+ txwd = &wd_ring->pages[i];
+
+ if (!list_empty(&txwd->list))
+ continue;
+
+ rtw89_pci_release_txwd_skb(rtwdev, tx_ring, txwd, i, RTW89_TX_MACID_DROP);
+ }
+}
+
+static u32 rtw89_pci_release_tx_skbs(struct rtw89_dev *rtwdev,
+ struct rtw89_pci_rx_ring *rx_ring,
+ u32 max_cnt)
+{
+ struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
+ struct rtw89_pci_rx_info *rx_info;
+ struct rtw89_pci_rpp_fmt *rpp;
+ struct rtw89_rx_desc_info desc_info = {};
+ struct sk_buff *skb;
+ u32 cnt = 0;
+ u32 rpp_size = sizeof(struct rtw89_pci_rpp_fmt);
+ u32 rxinfo_size = sizeof(struct rtw89_pci_rxbd_info);
+ u32 offset;
+ int ret;
+
+ skb = rx_ring->buf[bd_ring->wp];
+ rtw89_pci_sync_skb_for_cpu(rtwdev, skb);
+
+ ret = rtw89_pci_rxbd_info_update(rtwdev, skb);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to update %d RXBD info: %d\n",
+ bd_ring->wp, ret);
+ goto err_sync_device;
+ }
+
+ rx_info = RTW89_PCI_RX_SKB_CB(skb);
+ if (!rx_info->fs || !rx_info->ls) {
+ rtw89_err(rtwdev, "cannot process RP frame not set FS/LS\n");
+ return cnt;
+ }
+
+ rtw89_core_query_rxdesc(rtwdev, &desc_info, skb->data, rxinfo_size);
+
+ /* first segment has RX desc */
+ offset = desc_info.offset;
+ offset += desc_info.long_rxdesc ? sizeof(struct rtw89_rxdesc_long) :
+ sizeof(struct rtw89_rxdesc_short);
+ for (; offset + rpp_size <= rx_info->len; offset += rpp_size) {
+ rpp = (struct rtw89_pci_rpp_fmt *)(skb->data + offset);
+ rtw89_pci_release_rpp(rtwdev, rpp);
+ }
+
+ rtw89_pci_sync_skb_for_device(rtwdev, skb);
+ rtw89_pci_rxbd_increase(rx_ring, 1);
+ cnt++;
+
+ return cnt;
+
+err_sync_device:
+ rtw89_pci_sync_skb_for_device(rtwdev, skb);
+ return 0;
+}
+
+static void rtw89_pci_release_tx(struct rtw89_dev *rtwdev,
+ struct rtw89_pci_rx_ring *rx_ring,
+ u32 cnt)
+{
+ struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
+ u32 release_cnt;
+
+ while (cnt) {
+ release_cnt = rtw89_pci_release_tx_skbs(rtwdev, rx_ring, cnt);
+ if (!release_cnt) {
+ rtw89_err(rtwdev, "failed to release TX skbs\n");
+
+ /* skip the rest RXBD bufs */
+ rtw89_pci_rxbd_increase(rx_ring, cnt);
+ break;
+ }
+
+ cnt -= release_cnt;
+ }
+
+ rtw89_write16(rtwdev, bd_ring->addr_idx, bd_ring->wp);
+}
+
+static int rtw89_pci_poll_rpq_dma(struct rtw89_dev *rtwdev,
+ struct rtw89_pci *rtwpci, int budget)
+{
+ struct rtw89_pci_rx_ring *rx_ring;
+ u32 cnt;
+ int work_done;
+
+ rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RPQ];
+
+ spin_lock_bh(&rtwpci->trx_lock);
+
+ cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring);
+ if (cnt == 0)
+ goto out_unlock;
+
+ rtw89_pci_release_tx(rtwdev, rx_ring, cnt);
+
+out_unlock:
+ spin_unlock_bh(&rtwpci->trx_lock);
+
+ /* always release all RPQ */
+ work_done = min_t(int, cnt, budget);
+ rtwdev->napi_budget_countdown -= work_done;
+
+ return work_done;
+}
+
+static void rtw89_pci_isr_rxd_unavail(struct rtw89_dev *rtwdev,
+ struct rtw89_pci *rtwpci)
+{
+ struct rtw89_pci_rx_ring *rx_ring;
+ struct rtw89_pci_dma_ring *bd_ring;
+ u32 reg_idx;
+ u16 hw_idx, hw_idx_next, host_idx;
+ int i;
+
+ for (i = 0; i < RTW89_RXCH_NUM; i++) {
+ rx_ring = &rtwpci->rx_rings[i];
+ bd_ring = &rx_ring->bd_ring;
+
+ reg_idx = rtw89_read32(rtwdev, bd_ring->addr_idx);
+ hw_idx = FIELD_GET(TXBD_HW_IDX_MASK, reg_idx);
+ host_idx = FIELD_GET(TXBD_HOST_IDX_MASK, reg_idx);
+ hw_idx_next = (hw_idx + 1) % bd_ring->len;
+
+ if (hw_idx_next == host_idx)
+ rtw89_warn(rtwdev, "%d RXD unavailable\n", i);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TXRX,
+ "%d RXD unavailable, idx=0x%08x, len=%d\n",
+ i, reg_idx, bd_ring->len);
+ }
+}
+
+static void rtw89_pci_recognize_intrs(struct rtw89_dev *rtwdev,
+ struct rtw89_pci *rtwpci,
+ struct rtw89_pci_isrs *isrs)
+{
+ isrs->halt_c2h_isrs = rtw89_read32(rtwdev, R_AX_HISR0) & rtwpci->halt_c2h_intrs;
+ isrs->isrs[0] = rtw89_read32(rtwdev, R_AX_PCIE_HISR00) & rtwpci->intrs[0];
+ isrs->isrs[1] = rtw89_read32(rtwdev, R_AX_PCIE_HISR10) & rtwpci->intrs[1];
+
+ rtw89_write32(rtwdev, R_AX_HISR0, isrs->halt_c2h_isrs);
+ rtw89_write32(rtwdev, R_AX_PCIE_HISR00, isrs->isrs[0]);
+ rtw89_write32(rtwdev, R_AX_PCIE_HISR10, isrs->isrs[1]);
+}
+
+static void rtw89_pci_clear_isr0(struct rtw89_dev *rtwdev, u32 isr00)
+{
+ /* write 1 clear */
+ rtw89_write32(rtwdev, R_AX_PCIE_HISR00, isr00);
+}
+
+static void rtw89_pci_enable_intr(struct rtw89_dev *rtwdev,
+ struct rtw89_pci *rtwpci)
+{
+ rtw89_write32(rtwdev, R_AX_HIMR0, rtwpci->halt_c2h_intrs);
+ rtw89_write32(rtwdev, R_AX_PCIE_HIMR00, rtwpci->intrs[0]);
+ rtw89_write32(rtwdev, R_AX_PCIE_HIMR10, rtwpci->intrs[1]);
+}
+
+static void rtw89_pci_disable_intr(struct rtw89_dev *rtwdev,
+ struct rtw89_pci *rtwpci)
+{
+ rtw89_write32(rtwdev, R_AX_HIMR0, 0);
+ rtw89_write32(rtwdev, R_AX_PCIE_HIMR00, 0);
+ rtw89_write32(rtwdev, R_AX_PCIE_HIMR10, 0);
+}
+
+static irqreturn_t rtw89_pci_interrupt_threadfn(int irq, void *dev)
+{
+ struct rtw89_dev *rtwdev = dev;
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ struct rtw89_pci_isrs isrs;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rtwpci->irq_lock, flags);
+ rtw89_pci_recognize_intrs(rtwdev, rtwpci, &isrs);
+ spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
+
+ if (unlikely(isrs.isrs[0] & B_AX_RDU_INT))
+ rtw89_pci_isr_rxd_unavail(rtwdev, rtwpci);
+
+ if (unlikely(isrs.halt_c2h_isrs & B_AX_HALT_C2H_INT_EN))
+ rtw89_ser_notify(rtwdev, rtw89_mac_get_err_status(rtwdev));
+
+ if (likely(rtwpci->running)) {
+ local_bh_disable();
+ napi_schedule(&rtwdev->napi);
+ local_bh_enable();
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rtw89_pci_interrupt_handler(int irq, void *dev)
+{
+ struct rtw89_dev *rtwdev = dev;
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ unsigned long flags;
+ irqreturn_t irqret = IRQ_WAKE_THREAD;
+
+ spin_lock_irqsave(&rtwpci->irq_lock, flags);
+
+ /* If interrupt event is on the road, it is still trigger interrupt
+ * even we have done pci_stop() to turn off IMR.
+ */
+ if (unlikely(!rtwpci->running)) {
+ irqret = IRQ_HANDLED;
+ goto exit;
+ }
+
+ rtw89_pci_disable_intr(rtwdev, rtwpci);
+exit:
+ spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
+
+ return irqret;
+}
+
+#define case_TXCHADDRS(txch) \
+ case RTW89_TXCH_##txch: \
+ *addr_num = R_AX_##txch##_TXBD_NUM; \
+ *addr_idx = R_AX_##txch##_TXBD_IDX; \
+ *addr_bdram = R_AX_##txch##_BDRAM_CTRL; \
+ *addr_desa_l = R_AX_##txch##_TXBD_DESA_L; \
+ *addr_desa_h = R_AX_##txch##_TXBD_DESA_H; \
+ break
+
+static int rtw89_pci_get_txch_addrs(enum rtw89_tx_channel txch,
+ u32 *addr_num,
+ u32 *addr_idx,
+ u32 *addr_bdram,
+ u32 *addr_desa_l,
+ u32 *addr_desa_h)
+{
+ switch (txch) {
+ case_TXCHADDRS(ACH0);
+ case_TXCHADDRS(ACH1);
+ case_TXCHADDRS(ACH2);
+ case_TXCHADDRS(ACH3);
+ case_TXCHADDRS(ACH4);
+ case_TXCHADDRS(ACH5);
+ case_TXCHADDRS(ACH6);
+ case_TXCHADDRS(ACH7);
+ case_TXCHADDRS(CH8);
+ case_TXCHADDRS(CH9);
+ case_TXCHADDRS(CH10);
+ case_TXCHADDRS(CH11);
+ case_TXCHADDRS(CH12);
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#undef case_TXCHADDRS
+
+#define case_RXCHADDRS(rxch) \
+ case RTW89_RXCH_##rxch: \
+ *addr_num = R_AX_##rxch##_RXBD_NUM; \
+ *addr_idx = R_AX_##rxch##_RXBD_IDX; \
+ *addr_desa_l = R_AX_##rxch##_RXBD_DESA_L; \
+ *addr_desa_h = R_AX_##rxch##_RXBD_DESA_H; \
+ break
+
+static int rtw89_pci_get_rxch_addrs(enum rtw89_rx_channel rxch,
+ u32 *addr_num,
+ u32 *addr_idx,
+ u32 *addr_desa_l,
+ u32 *addr_desa_h)
+{
+ switch (rxch) {
+ case_RXCHADDRS(RXQ);
+ case_RXCHADDRS(RPQ);
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#undef case_RXCHADDRS
+
+static u32 rtw89_pci_get_avail_txbd_num(struct rtw89_pci_tx_ring *ring)
+{
+ struct rtw89_pci_dma_ring *bd_ring = &ring->bd_ring;
+
+ /* reserved 1 desc check ring is full or not */
+ if (bd_ring->rp > bd_ring->wp)
+ return bd_ring->rp - bd_ring->wp - 1;
+
+ return bd_ring->len - (bd_ring->wp - bd_ring->rp) - 1;
+}
+
+static
+u32 __rtw89_pci_check_and_reclaim_tx_fwcmd_resource(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[RTW89_TXCH_CH12];
+ u32 cnt;
+
+ spin_lock_bh(&rtwpci->trx_lock);
+ rtw89_pci_reclaim_tx_fwcmd(rtwdev, rtwpci);
+ cnt = rtw89_pci_get_avail_txbd_num(tx_ring);
+ spin_unlock_bh(&rtwpci->trx_lock);
+
+ return cnt;
+}
+
+static u32 __rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev,
+ u8 txch)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch];
+ struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
+ u32 bd_cnt, wd_cnt, min_cnt = 0;
+ struct rtw89_pci_rx_ring *rx_ring;
+ u32 cnt;
+
+ rx_ring = &rtwpci->rx_rings[RTW89_RXCH_RPQ];
+
+ spin_lock_bh(&rtwpci->trx_lock);
+ bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring);
+ wd_cnt = wd_ring->curr_num;
+
+ if (wd_cnt == 0 || bd_cnt == 0) {
+ cnt = rtw89_pci_rxbd_recalc(rtwdev, rx_ring);
+ if (!cnt)
+ goto out_unlock;
+ rtw89_pci_release_tx(rtwdev, rx_ring, cnt);
+ }
+
+ bd_cnt = rtw89_pci_get_avail_txbd_num(tx_ring);
+ wd_cnt = wd_ring->curr_num;
+ min_cnt = min(bd_cnt, wd_cnt);
+ if (min_cnt == 0)
+ rtw89_warn(rtwdev, "still no tx resource after reclaim\n");
+
+out_unlock:
+ spin_unlock_bh(&rtwpci->trx_lock);
+
+ return min_cnt;
+}
+
+static u32 rtw89_pci_check_and_reclaim_tx_resource(struct rtw89_dev *rtwdev,
+ u8 txch)
+{
+ if (txch == RTW89_TXCH_CH12)
+ return __rtw89_pci_check_and_reclaim_tx_fwcmd_resource(rtwdev);
+
+ return __rtw89_pci_check_and_reclaim_tx_resource(rtwdev, txch);
+}
+
+static void __rtw89_pci_tx_kick_off(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring)
+{
+ struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring;
+ u32 host_idx, addr;
+
+ addr = bd_ring->addr_idx;
+ host_idx = bd_ring->wp;
+ rtw89_write16(rtwdev, addr, host_idx);
+}
+
+static void rtw89_pci_tx_bd_ring_update(struct rtw89_dev *rtwdev, struct rtw89_pci_tx_ring *tx_ring,
+ int n_txbd)
+{
+ struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring;
+ u32 host_idx, len;
+
+ len = bd_ring->len;
+ host_idx = bd_ring->wp + n_txbd;
+ host_idx = host_idx < len ? host_idx : host_idx - len;
+
+ bd_ring->wp = host_idx;
+}
+
+static void rtw89_pci_ops_tx_kick_off(struct rtw89_dev *rtwdev, u8 txch)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch];
+
+ spin_lock_bh(&rtwpci->trx_lock);
+ __rtw89_pci_tx_kick_off(rtwdev, tx_ring);
+ spin_unlock_bh(&rtwpci->trx_lock);
+}
+
+static void __pci_flush_txch(struct rtw89_dev *rtwdev, u8 txch, bool drop)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ struct rtw89_pci_tx_ring *tx_ring = &rtwpci->tx_rings[txch];
+ struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring;
+ u32 cur_idx, cur_rp;
+ u8 i;
+
+ /* Because the time taked by the I/O is a bit dynamic, it's hard to
+ * define a reasonable fixed total timeout to use read_poll_timeout*
+ * helper. Instead, we can ensure a reasonable polling times, so we
+ * just use for loop with udelay here.
+ */
+ for (i = 0; i < 60; i++) {
+ cur_idx = rtw89_read32(rtwdev, bd_ring->addr_idx);
+ cur_rp = FIELD_GET(TXBD_HW_IDX_MASK, cur_idx);
+ if (cur_rp == bd_ring->wp)
+ return;
+
+ udelay(1);
+ }
+
+ if (!drop)
+ rtw89_info(rtwdev, "timed out to flush pci txch: %d\n", txch);
+}
+
+static void __rtw89_pci_ops_flush_txchs(struct rtw89_dev *rtwdev, u32 txchs,
+ bool drop)
+{
+ u8 i;
+
+ for (i = 0; i < RTW89_TXCH_NUM; i++) {
+ /* It may be unnecessary to flush FWCMD queue. */
+ if (i == RTW89_TXCH_CH12)
+ continue;
+
+ if (txchs & BIT(i))
+ __pci_flush_txch(rtwdev, i, drop);
+ }
+}
+
+static void rtw89_pci_ops_flush_queues(struct rtw89_dev *rtwdev, u32 queues,
+ bool drop)
+{
+ __rtw89_pci_ops_flush_txchs(rtwdev, BIT(RTW89_TXCH_NUM) - 1, drop);
+}
+
+static int rtw89_pci_txwd_submit(struct rtw89_dev *rtwdev,
+ struct rtw89_pci_tx_ring *tx_ring,
+ struct rtw89_pci_tx_wd *txwd,
+ struct rtw89_core_tx_request *tx_req)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
+ struct rtw89_txwd_body *txwd_body;
+ struct rtw89_txwd_info *txwd_info;
+ struct rtw89_pci_tx_wp_info *txwp_info;
+ struct rtw89_pci_tx_addr_info_32 *txaddr_info;
+ struct pci_dev *pdev = rtwpci->pdev;
+ struct sk_buff *skb = tx_req->skb;
+ struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb);
+ bool en_wd_info = desc_info->en_wd_info;
+ u32 txwd_len;
+ u32 txwp_len;
+ u32 txaddr_info_len;
+ dma_addr_t dma;
+ int ret;
+
+ rtw89_core_fill_txdesc(rtwdev, desc_info, txwd->vaddr);
+
+ dma = dma_map_single(&pdev->dev, skb->data, skb->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, dma)) {
+ rtw89_err(rtwdev, "failed to map skb dma data\n");
+ ret = -EBUSY;
+ goto err;
+ }
+
+ tx_data->dma = dma;
+
+ txaddr_info_len = sizeof(*txaddr_info);
+ txwp_len = sizeof(*txwp_info);
+ txwd_len = sizeof(*txwd_body);
+ txwd_len += en_wd_info ? sizeof(*txwd_info) : 0;
+
+ txwp_info = txwd->vaddr + txwd_len;
+ txwp_info->seq0 = cpu_to_le16(txwd->seq | RTW89_PCI_TXWP_VALID);
+ txwp_info->seq1 = 0;
+ txwp_info->seq2 = 0;
+ txwp_info->seq3 = 0;
+
+ tx_ring->tx_cnt++;
+ txaddr_info = txwd->vaddr + txwd_len + txwp_len;
+ txaddr_info->length = cpu_to_le16(skb->len);
+ txaddr_info->option = cpu_to_le16(RTW89_PCI_ADDR_MSDU_LS |
+ RTW89_PCI_ADDR_NUM(1));
+ txaddr_info->dma = cpu_to_le32(dma);
+
+ txwd->len = txwd_len + txwp_len + txaddr_info_len;
+
+ skb_queue_tail(&txwd->queue, skb);
+
+ return 0;
+
+err:
+ return ret;
+}
+
+static int rtw89_pci_fwcmd_submit(struct rtw89_dev *rtwdev,
+ struct rtw89_pci_tx_ring *tx_ring,
+ struct rtw89_pci_tx_bd_32 *txbd,
+ struct rtw89_core_tx_request *tx_req)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
+ struct rtw89_txwd_body *txwd_body;
+ struct pci_dev *pdev = rtwpci->pdev;
+ struct sk_buff *skb = tx_req->skb;
+ struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb);
+ dma_addr_t dma;
+
+ txwd_body = (struct rtw89_txwd_body *)skb_push(skb, sizeof(*txwd_body));
+ memset(txwd_body, 0, sizeof(*txwd_body));
+ rtw89_core_fill_txdesc(rtwdev, desc_info, txwd_body);
+
+ dma = dma_map_single(&pdev->dev, skb->data, skb->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(&pdev->dev, dma)) {
+ rtw89_err(rtwdev, "failed to map fwcmd dma data\n");
+ return -EBUSY;
+ }
+
+ tx_data->dma = dma;
+ txbd->option = cpu_to_le16(RTW89_PCI_TXBD_OPTION_LS);
+ txbd->length = cpu_to_le16(skb->len);
+ txbd->dma = cpu_to_le32(tx_data->dma);
+ skb_queue_tail(&rtwpci->h2c_queue, skb);
+
+ rtw89_pci_tx_bd_ring_update(rtwdev, tx_ring, 1);
+
+ return 0;
+}
+
+static int rtw89_pci_txbd_submit(struct rtw89_dev *rtwdev,
+ struct rtw89_pci_tx_ring *tx_ring,
+ struct rtw89_pci_tx_bd_32 *txbd,
+ struct rtw89_core_tx_request *tx_req)
+{
+ struct rtw89_pci_tx_wd *txwd;
+ int ret;
+
+ /* FWCMD queue doesn't have wd pages. Instead, it submits the CMD
+ * buffer with WD BODY only. So here we don't need to check the free
+ * pages of the wd ring.
+ */
+ if (tx_ring->txch == RTW89_TXCH_CH12)
+ return rtw89_pci_fwcmd_submit(rtwdev, tx_ring, txbd, tx_req);
+
+ txwd = rtw89_pci_dequeue_txwd(tx_ring);
+ if (!txwd) {
+ rtw89_err(rtwdev, "no available TXWD\n");
+ ret = -ENOSPC;
+ goto err;
+ }
+
+ ret = rtw89_pci_txwd_submit(rtwdev, tx_ring, txwd, tx_req);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to submit TXWD %d\n", txwd->seq);
+ goto err_enqueue_wd;
+ }
+
+ list_add_tail(&txwd->list, &tx_ring->busy_pages);
+
+ txbd->option = cpu_to_le16(RTW89_PCI_TXBD_OPTION_LS);
+ txbd->length = cpu_to_le16(txwd->len);
+ txbd->dma = cpu_to_le32(txwd->paddr);
+
+ rtw89_pci_tx_bd_ring_update(rtwdev, tx_ring, 1);
+
+ return 0;
+
+err_enqueue_wd:
+ rtw89_pci_enqueue_txwd(tx_ring, txwd);
+err:
+ return ret;
+}
+
+static int rtw89_pci_tx_write(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req,
+ u8 txch)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ struct rtw89_pci_tx_ring *tx_ring;
+ struct rtw89_pci_tx_bd_32 *txbd;
+ u32 n_avail_txbd;
+ int ret = 0;
+
+ /* check the tx type and dma channel for fw cmd queue */
+ if ((txch == RTW89_TXCH_CH12 ||
+ tx_req->tx_type == RTW89_CORE_TX_TYPE_FWCMD) &&
+ (txch != RTW89_TXCH_CH12 ||
+ tx_req->tx_type != RTW89_CORE_TX_TYPE_FWCMD)) {
+ rtw89_err(rtwdev, "only fw cmd uses dma channel 12\n");
+ return -EINVAL;
+ }
+
+ tx_ring = &rtwpci->tx_rings[txch];
+ spin_lock_bh(&rtwpci->trx_lock);
+
+ n_avail_txbd = rtw89_pci_get_avail_txbd_num(tx_ring);
+ if (n_avail_txbd == 0) {
+ rtw89_err(rtwdev, "no available TXBD\n");
+ ret = -ENOSPC;
+ goto err_unlock;
+ }
+
+ txbd = rtw89_pci_get_next_txbd(tx_ring);
+ ret = rtw89_pci_txbd_submit(rtwdev, tx_ring, txbd, tx_req);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to submit TXBD\n");
+ goto err_unlock;
+ }
+
+ spin_unlock_bh(&rtwpci->trx_lock);
+ return 0;
+
+err_unlock:
+ spin_unlock_bh(&rtwpci->trx_lock);
+ return ret;
+}
+
+static int rtw89_pci_ops_tx_write(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req)
+{
+ struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info;
+ int ret;
+
+ ret = rtw89_pci_tx_write(rtwdev, tx_req, desc_info->ch_dma);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to TX Queue %d\n", desc_info->ch_dma);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct rtw89_pci_bd_ram bd_ram_table[RTW89_TXCH_NUM] = {
+ [RTW89_TXCH_ACH0] = {.start_idx = 0, .max_num = 5, .min_num = 2},
+ [RTW89_TXCH_ACH1] = {.start_idx = 5, .max_num = 5, .min_num = 2},
+ [RTW89_TXCH_ACH2] = {.start_idx = 10, .max_num = 5, .min_num = 2},
+ [RTW89_TXCH_ACH3] = {.start_idx = 15, .max_num = 5, .min_num = 2},
+ [RTW89_TXCH_ACH4] = {.start_idx = 20, .max_num = 5, .min_num = 2},
+ [RTW89_TXCH_ACH5] = {.start_idx = 25, .max_num = 5, .min_num = 2},
+ [RTW89_TXCH_ACH6] = {.start_idx = 30, .max_num = 5, .min_num = 2},
+ [RTW89_TXCH_ACH7] = {.start_idx = 35, .max_num = 5, .min_num = 2},
+ [RTW89_TXCH_CH8] = {.start_idx = 40, .max_num = 5, .min_num = 1},
+ [RTW89_TXCH_CH9] = {.start_idx = 45, .max_num = 5, .min_num = 1},
+ [RTW89_TXCH_CH10] = {.start_idx = 50, .max_num = 5, .min_num = 1},
+ [RTW89_TXCH_CH11] = {.start_idx = 55, .max_num = 5, .min_num = 1},
+ [RTW89_TXCH_CH12] = {.start_idx = 60, .max_num = 4, .min_num = 1},
+};
+
+static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ struct rtw89_pci_tx_ring *tx_ring;
+ struct rtw89_pci_rx_ring *rx_ring;
+ struct rtw89_pci_dma_ring *bd_ring;
+ const struct rtw89_pci_bd_ram *bd_ram;
+ u32 addr_num;
+ u32 addr_bdram;
+ u32 addr_desa_l;
+ u32 val32;
+ int i;
+
+ for (i = 0; i < RTW89_TXCH_NUM; i++) {
+ tx_ring = &rtwpci->tx_rings[i];
+ bd_ring = &tx_ring->bd_ring;
+ bd_ram = &bd_ram_table[i];
+ addr_num = bd_ring->addr_num;
+ addr_bdram = bd_ring->addr_bdram;
+ addr_desa_l = bd_ring->addr_desa_l;
+ bd_ring->wp = 0;
+ bd_ring->rp = 0;
+
+ val32 = FIELD_PREP(BDRAM_SIDX_MASK, bd_ram->start_idx) |
+ FIELD_PREP(BDRAM_MAX_MASK, bd_ram->max_num) |
+ FIELD_PREP(BDRAM_MIN_MASK, bd_ram->min_num);
+
+ rtw89_write16(rtwdev, addr_num, bd_ring->len);
+ rtw89_write32(rtwdev, addr_bdram, val32);
+ rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma);
+ }
+
+ for (i = 0; i < RTW89_RXCH_NUM; i++) {
+ rx_ring = &rtwpci->rx_rings[i];
+ bd_ring = &rx_ring->bd_ring;
+ addr_num = bd_ring->addr_num;
+ addr_desa_l = bd_ring->addr_desa_l;
+ bd_ring->wp = 0;
+ bd_ring->rp = 0;
+ rx_ring->diliver_skb = NULL;
+ rx_ring->diliver_desc.ready = false;
+
+ rtw89_write16(rtwdev, addr_num, bd_ring->len);
+ rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma);
+ }
+}
+
+static void rtw89_pci_release_tx_ring(struct rtw89_dev *rtwdev,
+ struct rtw89_pci_tx_ring *tx_ring)
+{
+ rtw89_pci_release_busy_txwd(rtwdev, tx_ring);
+ rtw89_pci_release_pending_txwd_skb(rtwdev, tx_ring);
+}
+
+static void rtw89_pci_ops_reset(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ int txch;
+
+ rtw89_pci_reset_trx_rings(rtwdev);
+
+ spin_lock_bh(&rtwpci->trx_lock);
+ for (txch = 0; txch < RTW89_TXCH_NUM; txch++) {
+ if (txch == RTW89_TXCH_CH12) {
+ rtw89_pci_release_fwcmd(rtwdev, rtwpci,
+ skb_queue_len(&rtwpci->h2c_queue), true);
+ continue;
+ }
+ rtw89_pci_release_tx_ring(rtwdev, &rtwpci->tx_rings[txch]);
+ }
+ spin_unlock_bh(&rtwpci->trx_lock);
+}
+
+static int rtw89_pci_ops_start(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ unsigned long flags;
+
+ rtw89_core_napi_start(rtwdev);
+
+ spin_lock_irqsave(&rtwpci->irq_lock, flags);
+ rtwpci->running = true;
+ rtw89_pci_enable_intr(rtwdev, rtwpci);
+ spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
+
+ return 0;
+}
+
+static void rtw89_pci_ops_stop(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ struct pci_dev *pdev = rtwpci->pdev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rtwpci->irq_lock, flags);
+ rtwpci->running = false;
+ rtw89_pci_disable_intr(rtwdev, rtwpci);
+ spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
+
+ synchronize_irq(pdev->irq);
+ rtw89_core_napi_stop(rtwdev);
+}
+
+static void rtw89_pci_ops_write32(struct rtw89_dev *rtwdev, u32 addr, u32 data);
+
+static u32 rtw89_pci_ops_read32_cmac(struct rtw89_dev *rtwdev, u32 addr)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ u32 val = readl(rtwpci->mmap + addr);
+ int count;
+
+ for (count = 0; ; count++) {
+ if (val != RTW89_R32_DEAD)
+ return val;
+ if (count >= MAC_REG_POOL_COUNT) {
+ rtw89_warn(rtwdev, "addr %#x = %#x\n", addr, val);
+ return RTW89_R32_DEAD;
+ }
+ rtw89_pci_ops_write32(rtwdev, R_AX_CK_EN, B_AX_CMAC_ALLCKEN);
+ val = readl(rtwpci->mmap + addr);
+ }
+
+ return val;
+}
+
+static u8 rtw89_pci_ops_read8(struct rtw89_dev *rtwdev, u32 addr)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ u32 addr32, val32, shift;
+
+ if (!ACCESS_CMAC(addr))
+ return readb(rtwpci->mmap + addr);
+
+ addr32 = addr & ~0x3;
+ shift = (addr & 0x3) * 8;
+ val32 = rtw89_pci_ops_read32_cmac(rtwdev, addr32);
+ return val32 >> shift;
+}
+
+static u16 rtw89_pci_ops_read16(struct rtw89_dev *rtwdev, u32 addr)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ u32 addr32, val32, shift;
+
+ if (!ACCESS_CMAC(addr))
+ return readw(rtwpci->mmap + addr);
+
+ addr32 = addr & ~0x3;
+ shift = (addr & 0x3) * 8;
+ val32 = rtw89_pci_ops_read32_cmac(rtwdev, addr32);
+ return val32 >> shift;
+}
+
+static u32 rtw89_pci_ops_read32(struct rtw89_dev *rtwdev, u32 addr)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+
+ if (!ACCESS_CMAC(addr))
+ return readl(rtwpci->mmap + addr);
+
+ return rtw89_pci_ops_read32_cmac(rtwdev, addr);
+}
+
+static void rtw89_pci_ops_write8(struct rtw89_dev *rtwdev, u32 addr, u8 data)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+
+ writeb(data, rtwpci->mmap + addr);
+}
+
+static void rtw89_pci_ops_write16(struct rtw89_dev *rtwdev, u32 addr, u16 data)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+
+ writew(data, rtwpci->mmap + addr);
+}
+
+static void rtw89_pci_ops_write32(struct rtw89_dev *rtwdev, u32 addr, u32 data)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+
+ writel(data, rtwpci->mmap + addr);
+}
+
+static void rtw89_pci_ctrl_dma_all(struct rtw89_dev *rtwdev, bool enable)
+{
+ if (enable) {
+ rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1,
+ B_AX_TXHCI_EN | B_AX_RXHCI_EN);
+ rtw89_write32_clr(rtwdev, R_AX_PCIE_DMA_STOP1,
+ B_AX_STOP_PCIEIO);
+ } else {
+ rtw89_write32_set(rtwdev, R_AX_PCIE_DMA_STOP1,
+ B_AX_STOP_PCIEIO);
+ rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1,
+ B_AX_TXHCI_EN | B_AX_RXHCI_EN);
+ }
+}
+
+static int rtw89_pci_check_mdio(struct rtw89_dev *rtwdev, u8 addr, u8 speed, u16 rw_bit)
+{
+ u16 val;
+
+ rtw89_write8(rtwdev, R_AX_MDIO_CFG, addr & 0x1F);
+
+ val = rtw89_read16(rtwdev, R_AX_MDIO_CFG);
+ switch (speed) {
+ case PCIE_PHY_GEN1:
+ if (addr < 0x20)
+ val = u16_replace_bits(val, MDIO_PG0_G1, B_AX_MDIO_PHY_ADDR_MASK);
+ else
+ val = u16_replace_bits(val, MDIO_PG1_G1, B_AX_MDIO_PHY_ADDR_MASK);
+ break;
+ case PCIE_PHY_GEN2:
+ if (addr < 0x20)
+ val = u16_replace_bits(val, MDIO_PG0_G2, B_AX_MDIO_PHY_ADDR_MASK);
+ else
+ val = u16_replace_bits(val, MDIO_PG1_G2, B_AX_MDIO_PHY_ADDR_MASK);
+ break;
+ default:
+ rtw89_err(rtwdev, "[ERR]Error Speed %d!\n", speed);
+ return -EINVAL;
+ }
+ rtw89_write16(rtwdev, R_AX_MDIO_CFG, val);
+ rtw89_write16_set(rtwdev, R_AX_MDIO_CFG, rw_bit);
+
+ return read_poll_timeout(rtw89_read16, val, !(val & rw_bit), 10, 2000,
+ false, rtwdev, R_AX_MDIO_CFG);
+}
+
+static int
+rtw89_read16_mdio(struct rtw89_dev *rtwdev, u8 addr, u8 speed, u16 *val)
+{
+ int ret;
+
+ ret = rtw89_pci_check_mdio(rtwdev, addr, speed, B_AX_MDIO_RFLAG);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]MDIO R16 0x%X fail ret=%d!\n", addr, ret);
+ return ret;
+ }
+ *val = rtw89_read16(rtwdev, R_AX_MDIO_RDATA);
+
+ return 0;
+}
+
+static int
+rtw89_write16_mdio(struct rtw89_dev *rtwdev, u8 addr, u16 data, u8 speed)
+{
+ int ret;
+
+ rtw89_write16(rtwdev, R_AX_MDIO_WDATA, data);
+ ret = rtw89_pci_check_mdio(rtwdev, addr, speed, B_AX_MDIO_WFLAG);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]MDIO W16 0x%X = %x fail ret=%d!\n", addr, data, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int rtw89_write16_mdio_set(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u8 speed)
+{
+ int ret;
+ u16 val;
+
+ ret = rtw89_read16_mdio(rtwdev, addr, speed, &val);
+ if (ret)
+ return ret;
+ ret = rtw89_write16_mdio(rtwdev, addr, val | mask, speed);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int rtw89_write16_mdio_clr(struct rtw89_dev *rtwdev, u8 addr, u16 mask, u8 speed)
+{
+ int ret;
+ u16 val;
+
+ ret = rtw89_read16_mdio(rtwdev, addr, speed, &val);
+ if (ret)
+ return ret;
+ ret = rtw89_write16_mdio(rtwdev, addr, val & ~mask, speed);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int rtw89_dbi_write8(struct rtw89_dev *rtwdev, u16 addr, u8 data)
+{
+ u16 write_addr;
+ u16 remainder = addr & ~(B_AX_DBI_ADDR_MSK | B_AX_DBI_WREN_MSK);
+ u8 flag;
+ int ret;
+
+ write_addr = addr & B_AX_DBI_ADDR_MSK;
+ write_addr |= u16_encode_bits(BIT(remainder), B_AX_DBI_WREN_MSK);
+ rtw89_write8(rtwdev, R_AX_DBI_WDATA + remainder, data);
+ rtw89_write16(rtwdev, R_AX_DBI_FLAG, write_addr);
+ rtw89_write8(rtwdev, R_AX_DBI_FLAG + 2, B_AX_DBI_WFLAG >> 16);
+
+ ret = read_poll_timeout_atomic(rtw89_read8, flag, !flag, 10,
+ 10 * RTW89_PCI_WR_RETRY_CNT, false,
+ rtwdev, R_AX_DBI_FLAG + 2);
+ if (ret)
+ WARN(flag, "failed to write to DBI register, addr=0x%04x\n",
+ addr);
+
+ return ret;
+}
+
+static int rtw89_dbi_read8(struct rtw89_dev *rtwdev, u16 addr, u8 *value)
+{
+ u16 read_addr = addr & B_AX_DBI_ADDR_MSK;
+ u8 flag;
+ int ret;
+
+ rtw89_write16(rtwdev, R_AX_DBI_FLAG, read_addr);
+ rtw89_write8(rtwdev, R_AX_DBI_FLAG + 2, B_AX_DBI_RFLAG >> 16);
+
+ ret = read_poll_timeout_atomic(rtw89_read8, flag, !flag, 10,
+ 10 * RTW89_PCI_WR_RETRY_CNT, false,
+ rtwdev, R_AX_DBI_FLAG + 2);
+
+ if (!ret) {
+ read_addr = R_AX_DBI_RDATA + (addr & 3);
+ *value = rtw89_read8(rtwdev, read_addr);
+ } else {
+ WARN(1, "failed to read DBI register, addr=0x%04x\n", addr);
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+static int rtw89_dbi_write8_set(struct rtw89_dev *rtwdev, u16 addr, u8 bit)
+{
+ u8 value;
+ int ret;
+
+ ret = rtw89_dbi_read8(rtwdev, addr, &value);
+ if (ret)
+ return ret;
+
+ value |= bit;
+ ret = rtw89_dbi_write8(rtwdev, addr, value);
+
+ return ret;
+}
+
+static int rtw89_dbi_write8_clr(struct rtw89_dev *rtwdev, u16 addr, u8 bit)
+{
+ u8 value;
+ int ret;
+
+ ret = rtw89_dbi_read8(rtwdev, addr, &value);
+ if (ret)
+ return ret;
+
+ value &= ~bit;
+ ret = rtw89_dbi_write8(rtwdev, addr, value);
+
+ return ret;
+}
+
+static int
+__get_target(struct rtw89_dev *rtwdev, u16 *target, enum rtw89_pcie_phy phy_rate)
+{
+ u16 val, tar;
+ int ret;
+
+ /* Enable counter */
+ ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val);
+ if (ret)
+ return ret;
+ ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val & ~B_AX_CLK_CALIB_EN,
+ phy_rate);
+ if (ret)
+ return ret;
+ ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val | B_AX_CLK_CALIB_EN,
+ phy_rate);
+ if (ret)
+ return ret;
+
+ fsleep(300);
+
+ ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &tar);
+ if (ret)
+ return ret;
+ ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val & ~B_AX_CLK_CALIB_EN,
+ phy_rate);
+ if (ret)
+ return ret;
+
+ tar = tar & 0x0FFF;
+ if (tar == 0 || tar == 0x0FFF) {
+ rtw89_err(rtwdev, "[ERR]Get target failed.\n");
+ return -EINVAL;
+ }
+
+ *target = tar;
+
+ return 0;
+}
+
+static int rtw89_pci_auto_refclk_cal(struct rtw89_dev *rtwdev, bool autook_en)
+{
+ enum rtw89_pcie_phy phy_rate;
+ u16 val16, mgn_set, div_set, tar;
+ u8 val8, bdr_ori;
+ bool l1_flag = false;
+ int ret = 0;
+
+ if ((rtwdev->chip->chip_id == RTL8852A && rtwdev->hal.cv == CHIP_CBV) ||
+ rtwdev->chip->chip_id == RTL8852C)
+ return 0;
+
+ ret = rtw89_dbi_read8(rtwdev, RTW89_PCIE_PHY_RATE, &val8);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]dbi_r8_pcie %X\n", RTW89_PCIE_PHY_RATE);
+ return ret;
+ }
+
+ if (FIELD_GET(RTW89_PCIE_PHY_RATE_MASK, val8) == 0x1) {
+ phy_rate = PCIE_PHY_GEN1;
+ } else if (FIELD_GET(RTW89_PCIE_PHY_RATE_MASK, val8) == 0x2) {
+ phy_rate = PCIE_PHY_GEN2;
+ } else {
+ rtw89_err(rtwdev, "[ERR]PCIe PHY rate %#x not support\n", val8);
+ return -EOPNOTSUPP;
+ }
+ /* Disable L1BD */
+ ret = rtw89_dbi_read8(rtwdev, RTW89_PCIE_L1_CTRL, &bdr_ori);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]dbi_r8_pcie %X\n", RTW89_PCIE_L1_CTRL);
+ return ret;
+ }
+
+ if (bdr_ori & RTW89_PCIE_BIT_L1) {
+ ret = rtw89_dbi_write8(rtwdev, RTW89_PCIE_L1_CTRL,
+ bdr_ori & ~RTW89_PCIE_BIT_L1);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]dbi_w8_pcie %X\n", RTW89_PCIE_L1_CTRL);
+ return ret;
+ }
+ l1_flag = true;
+ }
+
+ ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val16);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]mdio_r16_pcie %X\n", RAC_CTRL_PPR_V1);
+ goto end;
+ }
+
+ if (val16 & B_AX_CALIB_EN) {
+ ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1,
+ val16 & ~B_AX_CALIB_EN, phy_rate);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1);
+ goto end;
+ }
+ }
+
+ if (!autook_en)
+ goto end;
+ /* Set div */
+ ret = rtw89_write16_mdio_clr(rtwdev, RAC_CTRL_PPR_V1, B_AX_DIV, phy_rate);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1);
+ goto end;
+ }
+
+ /* Obtain div and margin */
+ ret = __get_target(rtwdev, &tar, phy_rate);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]1st get target fail %d\n", ret);
+ goto end;
+ }
+
+ mgn_set = tar * INTF_INTGRA_HOSTREF_V1 / INTF_INTGRA_MINREF_V1 - tar;
+
+ if (mgn_set >= 128) {
+ div_set = 0x0003;
+ mgn_set = 0x000F;
+ } else if (mgn_set >= 64) {
+ div_set = 0x0003;
+ mgn_set >>= 3;
+ } else if (mgn_set >= 32) {
+ div_set = 0x0002;
+ mgn_set >>= 2;
+ } else if (mgn_set >= 16) {
+ div_set = 0x0001;
+ mgn_set >>= 1;
+ } else if (mgn_set == 0) {
+ rtw89_err(rtwdev, "[ERR]cal mgn is 0,tar = %d\n", tar);
+ goto end;
+ } else {
+ div_set = 0x0000;
+ }
+
+ ret = rtw89_read16_mdio(rtwdev, RAC_CTRL_PPR_V1, phy_rate, &val16);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]mdio_r16_pcie %X\n", RAC_CTRL_PPR_V1);
+ goto end;
+ }
+
+ val16 |= u16_encode_bits(div_set, B_AX_DIV);
+
+ ret = rtw89_write16_mdio(rtwdev, RAC_CTRL_PPR_V1, val16, phy_rate);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1);
+ goto end;
+ }
+
+ ret = __get_target(rtwdev, &tar, phy_rate);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]2nd get target fail %d\n", ret);
+ goto end;
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_HCI, "[TRACE]target = 0x%X, div = 0x%X, margin = 0x%X\n",
+ tar, div_set, mgn_set);
+ ret = rtw89_write16_mdio(rtwdev, RAC_SET_PPR_V1,
+ (tar & 0x0FFF) | (mgn_set << 12), phy_rate);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_SET_PPR_V1);
+ goto end;
+ }
+
+ /* Enable function */
+ ret = rtw89_write16_mdio_set(rtwdev, RAC_CTRL_PPR_V1, B_AX_CALIB_EN, phy_rate);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]mdio_w16_pcie %X\n", RAC_CTRL_PPR_V1);
+ goto end;
+ }
+
+ /* CLK delay = 0 */
+ ret = rtw89_dbi_write8(rtwdev, RTW89_PCIE_CLK_CTRL, PCIE_CLKDLY_HW_0);
+
+end:
+ /* Set L1BD to ori */
+ if (l1_flag) {
+ ret = rtw89_dbi_write8(rtwdev, RTW89_PCIE_L1_CTRL, bdr_ori);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR]dbi_w8_pcie %X\n", RTW89_PCIE_L1_CTRL);
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static int rtw89_pci_deglitch_setting(struct rtw89_dev *rtwdev)
+{
+ int ret;
+
+ if (rtwdev->chip->chip_id != RTL8852A)
+ return 0;
+
+ ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA24, B_AX_DEGLITCH,
+ PCIE_PHY_GEN1);
+ if (ret)
+ return ret;
+ ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA24, B_AX_DEGLITCH,
+ PCIE_PHY_GEN2);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void rtw89_pci_rxdma_prefth(struct rtw89_dev *rtwdev)
+{
+ rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_DIS_RXDMA_PRE);
+}
+
+static void rtw89_pci_l1off_pwroff(struct rtw89_dev *rtwdev)
+{
+ if (rtwdev->chip->chip_id == RTL8852C)
+ return;
+
+ rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL, B_AX_L1OFF_PWR_OFF_EN);
+}
+
+static u32 rtw89_pci_l2_rxen_lat(struct rtw89_dev *rtwdev)
+{
+ int ret;
+
+ if (rtwdev->chip->chip_id == RTL8852C)
+ return 0;
+
+ ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA26, B_AX_RXEN,
+ PCIE_PHY_GEN1);
+ if (ret)
+ return ret;
+
+ ret = rtw89_write16_mdio_clr(rtwdev, RAC_ANA26, B_AX_RXEN,
+ PCIE_PHY_GEN2);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void rtw89_pci_aphy_pwrcut(struct rtw89_dev *rtwdev)
+{
+ if (rtwdev->chip->chip_id != RTL8852A)
+ return;
+
+ rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_PSUS_OFF_CAPC_EN);
+}
+
+static void rtw89_pci_hci_ldo(struct rtw89_dev *rtwdev)
+{
+ if (rtwdev->chip->chip_id != RTL8852A)
+ return;
+
+ rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL,
+ B_AX_PCIE_DIS_L2_CTRL_LDO_HCI);
+ rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL,
+ B_AX_PCIE_DIS_WLSUS_AFT_PDN);
+}
+
+static void rtw89_pci_set_sic(struct rtw89_dev *rtwdev)
+{
+ if (rtwdev->chip->chip_id == RTL8852C)
+ return;
+
+ rtw89_write32_clr(rtwdev, R_AX_PCIE_EXP_CTRL,
+ B_AX_SIC_EN_FORCE_CLKREQ);
+}
+
+static void rtw89_pci_set_dbg(struct rtw89_dev *rtwdev)
+{
+ if (rtwdev->chip->chip_id == RTL8852C)
+ return;
+
+ rtw89_write32_set(rtwdev, R_AX_PCIE_DBG_CTRL,
+ B_AX_ASFF_FULL_NO_STK | B_AX_EN_STUCK_DBG);
+
+ if (rtwdev->chip->chip_id == RTL8852A)
+ rtw89_write32_set(rtwdev, R_AX_PCIE_EXP_CTRL,
+ B_AX_EN_CHKDSC_NO_RX_STUCK);
+}
+
+static void rtw89_pci_clr_idx_all(struct rtw89_dev *rtwdev)
+{
+ u32 val = B_AX_CLR_ACH0_IDX | B_AX_CLR_ACH1_IDX | B_AX_CLR_ACH2_IDX |
+ B_AX_CLR_ACH3_IDX | B_AX_CLR_CH8_IDX | B_AX_CLR_CH9_IDX |
+ B_AX_CLR_CH12_IDX;
+
+ if (rtwdev->chip->chip_id == RTL8852A)
+ val |= B_AX_CLR_ACH4_IDX | B_AX_CLR_ACH5_IDX |
+ B_AX_CLR_ACH6_IDX | B_AX_CLR_ACH7_IDX;
+ /* clear DMA indexes */
+ rtw89_write32_set(rtwdev, R_AX_TXBD_RWPTR_CLR1, val);
+ if (rtwdev->chip->chip_id == RTL8852A)
+ rtw89_write32_set(rtwdev, R_AX_TXBD_RWPTR_CLR2,
+ B_AX_CLR_CH10_IDX | B_AX_CLR_CH11_IDX);
+ rtw89_write32_set(rtwdev, R_AX_RXBD_RWPTR_CLR,
+ B_AX_CLR_RXQ_IDX | B_AX_CLR_RPQ_IDX);
+}
+
+static int rtw89_pci_ops_deinit(struct rtw89_dev *rtwdev)
+{
+ if (rtwdev->chip->chip_id == RTL8852A) {
+ /* ltr sw trigger */
+ rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_IDLE);
+ }
+ rtw89_pci_ctrl_dma_all(rtwdev, false);
+ rtw89_pci_clr_idx_all(rtwdev);
+
+ return 0;
+}
+
+static int rtw89_pci_ops_mac_pre_init(struct rtw89_dev *rtwdev)
+{
+ u32 dma_busy;
+ u32 check;
+ u32 lbc;
+ int ret;
+
+ rtw89_pci_rxdma_prefth(rtwdev);
+ rtw89_pci_l1off_pwroff(rtwdev);
+ rtw89_pci_deglitch_setting(rtwdev);
+ ret = rtw89_pci_l2_rxen_lat(rtwdev);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR] pcie l2 rxen lat %d\n", ret);
+ return ret;
+ }
+
+ rtw89_pci_aphy_pwrcut(rtwdev);
+ rtw89_pci_hci_ldo(rtwdev);
+
+ ret = rtw89_pci_auto_refclk_cal(rtwdev, false);
+ if (ret) {
+ rtw89_err(rtwdev, "[ERR] pcie autok fail %d\n", ret);
+ return ret;
+ }
+
+ rtw89_pci_set_sic(rtwdev);
+ rtw89_pci_set_dbg(rtwdev);
+
+ if (rtwdev->chip->chip_id == RTL8852A)
+ rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL,
+ B_AX_PCIE_AUXCLK_GATE);
+
+ lbc = rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG);
+ lbc = u32_replace_bits(lbc, RTW89_MAC_LBC_TMR_128US, B_AX_LBC_TIMER);
+ lbc |= B_AX_LBC_FLAG | B_AX_LBC_EN;
+ rtw89_write32(rtwdev, R_AX_LBC_WATCHDOG, lbc);
+
+ rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1,
+ B_AX_PCIE_TXRST_KEEP_REG | B_AX_PCIE_RXRST_KEEP_REG);
+ rtw89_write32_set(rtwdev, R_AX_PCIE_DMA_STOP1, B_AX_STOP_WPDMA);
+
+ /* stop DMA activities */
+ rtw89_pci_ctrl_dma_all(rtwdev, false);
+
+ /* check PCI at idle state */
+ check = B_AX_PCIEIO_BUSY | B_AX_PCIEIO_TX_BUSY | B_AX_PCIEIO_RX_BUSY;
+ ret = read_poll_timeout(rtw89_read32, dma_busy, (dma_busy & check) == 0,
+ 100, 3000, false, rtwdev, R_AX_PCIE_DMA_BUSY1);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to poll io busy\n");
+ return ret;
+ }
+
+ rtw89_pci_clr_idx_all(rtwdev);
+
+ /* configure TX/RX op modes */
+ rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_TX_TRUNC_MODE |
+ B_AX_RX_TRUNC_MODE);
+ rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_RXBD_MODE);
+ rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_TXDMA_MASK, 7);
+ rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_RXDMA_MASK, 3);
+ /* multi-tag mode */
+ rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_LATENCY_CONTROL);
+ rtw89_write32_mask(rtwdev, R_AX_PCIE_EXP_CTRL, B_AX_MAX_TAG_NUM,
+ RTW89_MAC_TAG_NUM_8);
+ rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_IDLE,
+ RTW89_MAC_WD_DMA_INTVL_256NS);
+ rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_ACT,
+ RTW89_MAC_WD_DMA_INTVL_256NS);
+
+ /* fill TRX BD indexes */
+ rtw89_pci_ops_reset(rtwdev);
+
+ ret = rtw89_pci_rst_bdram_pcie(rtwdev);
+ if (ret) {
+ rtw89_warn(rtwdev, "reset bdram busy\n");
+ return ret;
+ }
+
+ /* enable FW CMD queue to download firmware */
+ rtw89_write32_set(rtwdev, R_AX_PCIE_DMA_STOP1, B_AX_TX_STOP1_ALL);
+ rtw89_write32_clr(rtwdev, R_AX_PCIE_DMA_STOP1, B_AX_STOP_CH12);
+ rtw89_write32_set(rtwdev, R_AX_PCIE_DMA_STOP2, B_AX_TX_STOP2_ALL);
+
+ /* start DMA activities */
+ rtw89_pci_ctrl_dma_all(rtwdev, true);
+
+ return 0;
+}
+
+static int rtw89_pci_ltr_set(struct rtw89_dev *rtwdev)
+{
+ u32 val;
+
+ val = rtw89_read32(rtwdev, R_AX_LTR_CTRL_0);
+ if (rtw89_pci_ltr_is_err_reg_val(val))
+ return -EINVAL;
+ val = rtw89_read32(rtwdev, R_AX_LTR_CTRL_1);
+ if (rtw89_pci_ltr_is_err_reg_val(val))
+ return -EINVAL;
+ val = rtw89_read32(rtwdev, R_AX_LTR_IDLE_LATENCY);
+ if (rtw89_pci_ltr_is_err_reg_val(val))
+ return -EINVAL;
+ val = rtw89_read32(rtwdev, R_AX_LTR_ACTIVE_LATENCY);
+ if (rtw89_pci_ltr_is_err_reg_val(val))
+ return -EINVAL;
+
+ rtw89_write32_clr(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_HW_EN);
+ rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_EN);
+ rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_SPACE_IDX_MASK,
+ PCI_LTR_SPC_500US);
+ rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_0, B_AX_LTR_IDLE_TIMER_IDX_MASK,
+ PCI_LTR_IDLE_TIMER_800US);
+ rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX0_TH_MASK, 0x28);
+ rtw89_write32_mask(rtwdev, R_AX_LTR_CTRL_1, B_AX_LTR_RX1_TH_MASK, 0x28);
+ rtw89_write32(rtwdev, R_AX_LTR_IDLE_LATENCY, 0x88e088e0);
+ rtw89_write32(rtwdev, R_AX_LTR_ACTIVE_LATENCY, 0x880b880b);
+
+ return 0;
+}
+
+static int rtw89_pci_ops_mac_post_init(struct rtw89_dev *rtwdev)
+{
+ int ret;
+
+ ret = rtw89_pci_ltr_set(rtwdev);
+ if (ret) {
+ rtw89_err(rtwdev, "pci ltr set fail\n");
+ return ret;
+ }
+ if (rtwdev->chip->chip_id == RTL8852A) {
+ /* ltr sw trigger */
+ rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_ACT);
+ }
+ /* ADDR info 8-byte mode */
+ rtw89_write32_set(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING,
+ B_AX_HOST_ADDR_INFO_8B_SEL);
+ rtw89_write32_clr(rtwdev, R_AX_PKTIN_SETTING, B_AX_WD_ADDR_INFO_LENGTH);
+
+ /* enable DMA for all queues */
+ rtw89_write32_clr(rtwdev, R_AX_PCIE_DMA_STOP1, B_AX_TX_STOP1_ALL);
+ rtw89_write32_clr(rtwdev, R_AX_PCIE_DMA_STOP2, B_AX_TX_STOP2_ALL);
+
+ /* Release PCI IO */
+ rtw89_write32_clr(rtwdev, R_AX_PCIE_DMA_STOP1,
+ B_AX_STOP_WPDMA | B_AX_STOP_PCIEIO);
+
+ return 0;
+}
+
+static int rtw89_pci_claim_device(struct rtw89_dev *rtwdev,
+ struct pci_dev *pdev)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ int ret;
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to enable pci device\n");
+ return ret;
+ }
+
+ pci_set_master(pdev);
+ pci_set_drvdata(pdev, rtwdev->hw);
+
+ rtwpci->pdev = pdev;
+
+ return 0;
+}
+
+static void rtw89_pci_declaim_device(struct rtw89_dev *rtwdev,
+ struct pci_dev *pdev)
+{
+ pci_clear_master(pdev);
+ pci_disable_device(pdev);
+}
+
+static int rtw89_pci_setup_mapping(struct rtw89_dev *rtwdev,
+ struct pci_dev *pdev)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ unsigned long resource_len;
+ u8 bar_id = 2;
+ int ret;
+
+ ret = pci_request_regions(pdev, KBUILD_MODNAME);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to request pci regions\n");
+ goto err;
+ }
+
+ ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ rtw89_err(rtwdev, "failed to set dma mask to 32-bit\n");
+ goto err_release_regions;
+ }
+
+ ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ rtw89_err(rtwdev, "failed to set consistent dma mask to 32-bit\n");
+ goto err_release_regions;
+ }
+
+ resource_len = pci_resource_len(pdev, bar_id);
+ rtwpci->mmap = pci_iomap(pdev, bar_id, resource_len);
+ if (!rtwpci->mmap) {
+ rtw89_err(rtwdev, "failed to map pci io\n");
+ ret = -EIO;
+ goto err_release_regions;
+ }
+
+ return 0;
+
+err_release_regions:
+ pci_release_regions(pdev);
+err:
+ return ret;
+}
+
+static void rtw89_pci_clear_mapping(struct rtw89_dev *rtwdev,
+ struct pci_dev *pdev)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+
+ if (rtwpci->mmap) {
+ pci_iounmap(pdev, rtwpci->mmap);
+ pci_release_regions(pdev);
+ }
+}
+
+static void rtw89_pci_free_tx_wd_ring(struct rtw89_dev *rtwdev,
+ struct pci_dev *pdev,
+ struct rtw89_pci_tx_ring *tx_ring)
+{
+ struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
+ u8 *head = wd_ring->head;
+ dma_addr_t dma = wd_ring->dma;
+ u32 page_size = wd_ring->page_size;
+ u32 page_num = wd_ring->page_num;
+ u32 ring_sz = page_size * page_num;
+
+ dma_free_coherent(&pdev->dev, ring_sz, head, dma);
+ wd_ring->head = NULL;
+}
+
+static void rtw89_pci_free_tx_ring(struct rtw89_dev *rtwdev,
+ struct pci_dev *pdev,
+ struct rtw89_pci_tx_ring *tx_ring)
+{
+ int ring_sz;
+ u8 *head;
+ dma_addr_t dma;
+
+ head = tx_ring->bd_ring.head;
+ dma = tx_ring->bd_ring.dma;
+ ring_sz = tx_ring->bd_ring.desc_size * tx_ring->bd_ring.len;
+ dma_free_coherent(&pdev->dev, ring_sz, head, dma);
+
+ tx_ring->bd_ring.head = NULL;
+}
+
+static void rtw89_pci_free_tx_rings(struct rtw89_dev *rtwdev,
+ struct pci_dev *pdev)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ struct rtw89_pci_tx_ring *tx_ring;
+ int i;
+
+ for (i = 0; i < RTW89_TXCH_NUM; i++) {
+ tx_ring = &rtwpci->tx_rings[i];
+ rtw89_pci_free_tx_wd_ring(rtwdev, pdev, tx_ring);
+ rtw89_pci_free_tx_ring(rtwdev, pdev, tx_ring);
+ }
+}
+
+static void rtw89_pci_free_rx_ring(struct rtw89_dev *rtwdev,
+ struct pci_dev *pdev,
+ struct rtw89_pci_rx_ring *rx_ring)
+{
+ struct rtw89_pci_rx_info *rx_info;
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ u32 buf_sz;
+ u8 *head;
+ int ring_sz = rx_ring->bd_ring.desc_size * rx_ring->bd_ring.len;
+ int i;
+
+ buf_sz = rx_ring->buf_sz;
+ for (i = 0; i < rx_ring->bd_ring.len; i++) {
+ skb = rx_ring->buf[i];
+ if (!skb)
+ continue;
+
+ rx_info = RTW89_PCI_RX_SKB_CB(skb);
+ dma = rx_info->dma;
+ dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE);
+ dev_kfree_skb(skb);
+ rx_ring->buf[i] = NULL;
+ }
+
+ head = rx_ring->bd_ring.head;
+ dma = rx_ring->bd_ring.dma;
+ dma_free_coherent(&pdev->dev, ring_sz, head, dma);
+
+ rx_ring->bd_ring.head = NULL;
+}
+
+static void rtw89_pci_free_rx_rings(struct rtw89_dev *rtwdev,
+ struct pci_dev *pdev)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ struct rtw89_pci_rx_ring *rx_ring;
+ int i;
+
+ for (i = 0; i < RTW89_RXCH_NUM; i++) {
+ rx_ring = &rtwpci->rx_rings[i];
+ rtw89_pci_free_rx_ring(rtwdev, pdev, rx_ring);
+ }
+}
+
+static void rtw89_pci_free_trx_rings(struct rtw89_dev *rtwdev,
+ struct pci_dev *pdev)
+{
+ rtw89_pci_free_rx_rings(rtwdev, pdev);
+ rtw89_pci_free_tx_rings(rtwdev, pdev);
+}
+
+static int rtw89_pci_init_rx_bd(struct rtw89_dev *rtwdev, struct pci_dev *pdev,
+ struct rtw89_pci_rx_ring *rx_ring,
+ struct sk_buff *skb, int buf_sz, u32 idx)
+{
+ struct rtw89_pci_rx_info *rx_info;
+ struct rtw89_pci_rx_bd_32 *rx_bd;
+ dma_addr_t dma;
+
+ if (!skb)
+ return -EINVAL;
+
+ dma = dma_map_single(&pdev->dev, skb->data, buf_sz, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev, dma))
+ return -EBUSY;
+
+ rx_info = RTW89_PCI_RX_SKB_CB(skb);
+ rx_bd = RTW89_PCI_RX_BD(rx_ring, idx);
+
+ memset(rx_bd, 0, sizeof(*rx_bd));
+ rx_bd->buf_size = cpu_to_le16(buf_sz);
+ rx_bd->dma = cpu_to_le32(dma);
+ rx_info->dma = dma;
+
+ return 0;
+}
+
+static int rtw89_pci_alloc_tx_wd_ring(struct rtw89_dev *rtwdev,
+ struct pci_dev *pdev,
+ struct rtw89_pci_tx_ring *tx_ring,
+ enum rtw89_tx_channel txch)
+{
+ struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
+ struct rtw89_pci_tx_wd *txwd;
+ dma_addr_t dma;
+ dma_addr_t cur_paddr;
+ u8 *head;
+ u8 *cur_vaddr;
+ u32 page_size = RTW89_PCI_TXWD_PAGE_SIZE;
+ u32 page_num = RTW89_PCI_TXWD_NUM_MAX;
+ u32 ring_sz = page_size * page_num;
+ u32 page_offset;
+ int i;
+
+ /* FWCMD queue doesn't use txwd as pages */
+ if (txch == RTW89_TXCH_CH12)
+ return 0;
+
+ head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL);
+ if (!head)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&wd_ring->free_pages);
+ wd_ring->head = head;
+ wd_ring->dma = dma;
+ wd_ring->page_size = page_size;
+ wd_ring->page_num = page_num;
+
+ page_offset = 0;
+ for (i = 0; i < page_num; i++) {
+ txwd = &wd_ring->pages[i];
+ cur_paddr = dma + page_offset;
+ cur_vaddr = head + page_offset;
+
+ skb_queue_head_init(&txwd->queue);
+ INIT_LIST_HEAD(&txwd->list);
+ txwd->paddr = cur_paddr;
+ txwd->vaddr = cur_vaddr;
+ txwd->len = page_size;
+ txwd->seq = i;
+ rtw89_pci_enqueue_txwd(tx_ring, txwd);
+
+ page_offset += page_size;
+ }
+
+ return 0;
+}
+
+static int rtw89_pci_alloc_tx_ring(struct rtw89_dev *rtwdev,
+ struct pci_dev *pdev,
+ struct rtw89_pci_tx_ring *tx_ring,
+ u32 desc_size, u32 len,
+ enum rtw89_tx_channel txch)
+{
+ int ring_sz = desc_size * len;
+ u8 *head;
+ dma_addr_t dma;
+ u32 addr_num;
+ u32 addr_idx;
+ u32 addr_bdram;
+ u32 addr_desa_l;
+ u32 addr_desa_h;
+ int ret;
+
+ ret = rtw89_pci_alloc_tx_wd_ring(rtwdev, pdev, tx_ring, txch);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to alloc txwd ring of txch %d\n", txch);
+ goto err;
+ }
+
+ ret = rtw89_pci_get_txch_addrs(txch, &addr_num, &addr_idx, &addr_bdram,
+ &addr_desa_l, &addr_desa_h);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to get address of txch %d", txch);
+ goto err_free_wd_ring;
+ }
+
+ head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL);
+ if (!head) {
+ ret = -ENOMEM;
+ goto err_free_wd_ring;
+ }
+
+ INIT_LIST_HEAD(&tx_ring->busy_pages);
+ tx_ring->bd_ring.head = head;
+ tx_ring->bd_ring.dma = dma;
+ tx_ring->bd_ring.len = len;
+ tx_ring->bd_ring.desc_size = desc_size;
+ tx_ring->bd_ring.addr_num = addr_num;
+ tx_ring->bd_ring.addr_idx = addr_idx;
+ tx_ring->bd_ring.addr_bdram = addr_bdram;
+ tx_ring->bd_ring.addr_desa_l = addr_desa_l;
+ tx_ring->bd_ring.addr_desa_h = addr_desa_h;
+ tx_ring->bd_ring.wp = 0;
+ tx_ring->bd_ring.rp = 0;
+ tx_ring->txch = txch;
+
+ return 0;
+
+err_free_wd_ring:
+ rtw89_pci_free_tx_wd_ring(rtwdev, pdev, tx_ring);
+err:
+ return ret;
+}
+
+static int rtw89_pci_alloc_tx_rings(struct rtw89_dev *rtwdev,
+ struct pci_dev *pdev)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ struct rtw89_pci_tx_ring *tx_ring;
+ u32 desc_size;
+ u32 len;
+ u32 i, tx_allocated;
+ int ret;
+
+ for (i = 0; i < RTW89_TXCH_NUM; i++) {
+ tx_ring = &rtwpci->tx_rings[i];
+ desc_size = sizeof(struct rtw89_pci_tx_bd_32);
+ len = RTW89_PCI_TXBD_NUM_MAX;
+ ret = rtw89_pci_alloc_tx_ring(rtwdev, pdev, tx_ring,
+ desc_size, len, i);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to alloc tx ring %d\n", i);
+ goto err_free;
+ }
+ }
+
+ return 0;
+
+err_free:
+ tx_allocated = i;
+ for (i = 0; i < tx_allocated; i++) {
+ tx_ring = &rtwpci->tx_rings[i];
+ rtw89_pci_free_tx_ring(rtwdev, pdev, tx_ring);
+ }
+
+ return ret;
+}
+
+static int rtw89_pci_alloc_rx_ring(struct rtw89_dev *rtwdev,
+ struct pci_dev *pdev,
+ struct rtw89_pci_rx_ring *rx_ring,
+ u32 desc_size, u32 len, u32 rxch)
+{
+ struct sk_buff *skb;
+ u8 *head;
+ dma_addr_t dma;
+ u32 addr_num;
+ u32 addr_idx;
+ u32 addr_desa_l;
+ u32 addr_desa_h;
+ int ring_sz = desc_size * len;
+ int buf_sz = RTW89_PCI_RX_BUF_SIZE;
+ int i, allocated;
+ int ret;
+
+ ret = rtw89_pci_get_rxch_addrs(rxch, &addr_num, &addr_idx,
+ &addr_desa_l, &addr_desa_h);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to get address of rxch %d", rxch);
+ return ret;
+ }
+
+ head = dma_alloc_coherent(&pdev->dev, ring_sz, &dma, GFP_KERNEL);
+ if (!head) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ rx_ring->bd_ring.head = head;
+ rx_ring->bd_ring.dma = dma;
+ rx_ring->bd_ring.len = len;
+ rx_ring->bd_ring.desc_size = desc_size;
+ rx_ring->bd_ring.addr_num = addr_num;
+ rx_ring->bd_ring.addr_idx = addr_idx;
+ rx_ring->bd_ring.addr_desa_l = addr_desa_l;
+ rx_ring->bd_ring.addr_desa_h = addr_desa_h;
+ rx_ring->bd_ring.wp = 0;
+ rx_ring->bd_ring.rp = 0;
+ rx_ring->buf_sz = buf_sz;
+ rx_ring->diliver_skb = NULL;
+ rx_ring->diliver_desc.ready = false;
+
+ for (i = 0; i < len; i++) {
+ skb = dev_alloc_skb(buf_sz);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto err_free;
+ }
+
+ memset(skb->data, 0, buf_sz);
+ rx_ring->buf[i] = skb;
+ ret = rtw89_pci_init_rx_bd(rtwdev, pdev, rx_ring, skb,
+ buf_sz, i);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to init rx buf %d\n", i);
+ dev_kfree_skb_any(skb);
+ rx_ring->buf[i] = NULL;
+ goto err_free;
+ }
+ }
+
+ return 0;
+
+err_free:
+ allocated = i;
+ for (i = 0; i < allocated; i++) {
+ skb = rx_ring->buf[i];
+ if (!skb)
+ continue;
+ dma = *((dma_addr_t *)skb->cb);
+ dma_unmap_single(&pdev->dev, dma, buf_sz, DMA_FROM_DEVICE);
+ dev_kfree_skb(skb);
+ rx_ring->buf[i] = NULL;
+ }
+
+ head = rx_ring->bd_ring.head;
+ dma = rx_ring->bd_ring.dma;
+ dma_free_coherent(&pdev->dev, ring_sz, head, dma);
+
+ rx_ring->bd_ring.head = NULL;
+err:
+ return ret;
+}
+
+static int rtw89_pci_alloc_rx_rings(struct rtw89_dev *rtwdev,
+ struct pci_dev *pdev)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ struct rtw89_pci_rx_ring *rx_ring;
+ u32 desc_size;
+ u32 len;
+ int i, rx_allocated;
+ int ret;
+
+ for (i = 0; i < RTW89_RXCH_NUM; i++) {
+ rx_ring = &rtwpci->rx_rings[i];
+ desc_size = sizeof(struct rtw89_pci_rx_bd_32);
+ len = RTW89_PCI_RXBD_NUM_MAX;
+ ret = rtw89_pci_alloc_rx_ring(rtwdev, pdev, rx_ring,
+ desc_size, len, i);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to alloc rx ring %d\n", i);
+ goto err_free;
+ }
+ }
+
+ return 0;
+
+err_free:
+ rx_allocated = i;
+ for (i = 0; i < rx_allocated; i++) {
+ rx_ring = &rtwpci->rx_rings[i];
+ rtw89_pci_free_rx_ring(rtwdev, pdev, rx_ring);
+ }
+
+ return ret;
+}
+
+static int rtw89_pci_alloc_trx_rings(struct rtw89_dev *rtwdev,
+ struct pci_dev *pdev)
+{
+ int ret;
+
+ ret = rtw89_pci_alloc_tx_rings(rtwdev, pdev);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to alloc dma tx rings\n");
+ goto err;
+ }
+
+ ret = rtw89_pci_alloc_rx_rings(rtwdev, pdev);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to alloc dma rx rings\n");
+ goto err_free_tx_rings;
+ }
+
+ return 0;
+
+err_free_tx_rings:
+ rtw89_pci_free_tx_rings(rtwdev, pdev);
+err:
+ return ret;
+}
+
+static void rtw89_pci_h2c_init(struct rtw89_dev *rtwdev,
+ struct rtw89_pci *rtwpci)
+{
+ skb_queue_head_init(&rtwpci->h2c_queue);
+ skb_queue_head_init(&rtwpci->h2c_release_queue);
+}
+
+static int rtw89_pci_setup_resource(struct rtw89_dev *rtwdev,
+ struct pci_dev *pdev)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ int ret;
+
+ ret = rtw89_pci_setup_mapping(rtwdev, pdev);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to setup pci mapping\n");
+ goto err;
+ }
+
+ ret = rtw89_pci_alloc_trx_rings(rtwdev, pdev);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to alloc pci trx rings\n");
+ goto err_pci_unmap;
+ }
+
+ rtw89_pci_h2c_init(rtwdev, rtwpci);
+
+ spin_lock_init(&rtwpci->irq_lock);
+ spin_lock_init(&rtwpci->trx_lock);
+
+ return 0;
+
+err_pci_unmap:
+ rtw89_pci_clear_mapping(rtwdev, pdev);
+err:
+ return ret;
+}
+
+static void rtw89_pci_clear_resource(struct rtw89_dev *rtwdev,
+ struct pci_dev *pdev)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+
+ rtw89_pci_free_trx_rings(rtwdev, pdev);
+ rtw89_pci_clear_mapping(rtwdev, pdev);
+ rtw89_pci_release_fwcmd(rtwdev, rtwpci,
+ skb_queue_len(&rtwpci->h2c_queue), true);
+}
+
+static void rtw89_pci_default_intr_mask(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+
+ rtwpci->halt_c2h_intrs = B_AX_HALT_C2H_INT_EN | 0;
+ rtwpci->intrs[0] = B_AX_TXDMA_STUCK_INT_EN |
+ B_AX_RXDMA_INT_EN |
+ B_AX_RXP1DMA_INT_EN |
+ B_AX_RPQDMA_INT_EN |
+ B_AX_RXDMA_STUCK_INT_EN |
+ B_AX_RDU_INT_EN |
+ B_AX_RPQBD_FULL_INT_EN |
+ B_AX_HS0ISR_IND_INT_EN;
+
+ rtwpci->intrs[1] = B_AX_HC10ISR_IND_INT_EN;
+}
+
+static int rtw89_pci_request_irq(struct rtw89_dev *rtwdev,
+ struct pci_dev *pdev)
+{
+ unsigned long flags = 0;
+ int ret;
+
+ flags |= PCI_IRQ_LEGACY | PCI_IRQ_MSI;
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, flags);
+ if (ret < 0) {
+ rtw89_err(rtwdev, "failed to alloc irq vectors, ret %d\n", ret);
+ goto err;
+ }
+
+ ret = devm_request_threaded_irq(rtwdev->dev, pdev->irq,
+ rtw89_pci_interrupt_handler,
+ rtw89_pci_interrupt_threadfn,
+ IRQF_SHARED, KBUILD_MODNAME, rtwdev);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to request threaded irq\n");
+ goto err_free_vector;
+ }
+
+ rtw89_pci_default_intr_mask(rtwdev);
+
+ return 0;
+
+err_free_vector:
+ pci_free_irq_vectors(pdev);
+err:
+ return ret;
+}
+
+static void rtw89_pci_free_irq(struct rtw89_dev *rtwdev,
+ struct pci_dev *pdev)
+{
+ devm_free_irq(rtwdev->dev, pdev->irq, rtwdev);
+ pci_free_irq_vectors(pdev);
+}
+
+static void rtw89_pci_clkreq_set(struct rtw89_dev *rtwdev, bool enable)
+{
+ int ret;
+
+ if (rtw89_pci_disable_clkreq)
+ return;
+
+ ret = rtw89_dbi_write8(rtwdev, RTW89_PCIE_CLK_CTRL,
+ PCIE_CLKDLY_HW_30US);
+ if (ret)
+ rtw89_err(rtwdev, "failed to set CLKREQ Delay\n");
+
+ if (enable)
+ ret = rtw89_dbi_write8_set(rtwdev, RTW89_PCIE_L1_CTRL,
+ RTW89_PCIE_BIT_CLK);
+ else
+ ret = rtw89_dbi_write8_clr(rtwdev, RTW89_PCIE_L1_CTRL,
+ RTW89_PCIE_BIT_CLK);
+ if (ret)
+ rtw89_err(rtwdev, "failed to %s CLKREQ_L1, ret=%d",
+ enable ? "set" : "unset", ret);
+}
+
+static void rtw89_pci_aspm_set(struct rtw89_dev *rtwdev, bool enable)
+{
+ u8 value = 0;
+ int ret;
+
+ if (rtw89_pci_disable_aspm_l1)
+ return;
+
+ ret = rtw89_dbi_read8(rtwdev, RTW89_PCIE_ASPM_CTRL, &value);
+ if (ret)
+ rtw89_err(rtwdev, "failed to read ASPM Delay\n");
+
+ value &= ~(RTW89_L1DLY_MASK | RTW89_L0DLY_MASK);
+ value |= FIELD_PREP(RTW89_L1DLY_MASK, PCIE_L1DLY_16US) |
+ FIELD_PREP(RTW89_L0DLY_MASK, PCIE_L0SDLY_4US);
+
+ ret = rtw89_dbi_write8(rtwdev, RTW89_PCIE_ASPM_CTRL, value);
+ if (ret)
+ rtw89_err(rtwdev, "failed to read ASPM Delay\n");
+
+ if (enable)
+ ret = rtw89_dbi_write8_set(rtwdev, RTW89_PCIE_L1_CTRL,
+ RTW89_PCIE_BIT_L1);
+ else
+ ret = rtw89_dbi_write8_clr(rtwdev, RTW89_PCIE_L1_CTRL,
+ RTW89_PCIE_BIT_L1);
+ if (ret)
+ rtw89_err(rtwdev, "failed to %s ASPM L1, ret=%d",
+ enable ? "set" : "unset", ret);
+}
+
+static void rtw89_pci_recalc_int_mit(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_traffic_stats *stats = &rtwdev->stats;
+ enum rtw89_tfc_lv tx_tfc_lv = stats->tx_tfc_lv;
+ enum rtw89_tfc_lv rx_tfc_lv = stats->rx_tfc_lv;
+ u32 val = 0;
+
+ if (!rtwdev->scanning &&
+ (tx_tfc_lv >= RTW89_TFC_HIGH || rx_tfc_lv >= RTW89_TFC_HIGH))
+ val = B_AX_RXMIT_RXP2_SEL | B_AX_RXMIT_RXP1_SEL |
+ FIELD_PREP(B_AX_RXCOUNTER_MATCH_MASK, RTW89_PCI_RXBD_NUM_MAX / 2) |
+ FIELD_PREP(B_AX_RXTIMER_UNIT_MASK, AX_RXTIMER_UNIT_64US) |
+ FIELD_PREP(B_AX_RXTIMER_MATCH_MASK, 2048 / 64);
+
+ rtw89_write32(rtwdev, R_AX_INT_MIT_RX, val);
+}
+
+static void rtw89_pci_link_cfg(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ struct pci_dev *pdev = rtwpci->pdev;
+ u16 link_ctrl;
+ int ret;
+
+ /* Though there is standard PCIE configuration space to set the
+ * link control register, but by Realtek's design, driver should
+ * check if host supports CLKREQ/ASPM to enable the HW module.
+ *
+ * These functions are implemented by two HW modules associated,
+ * one is responsible to access PCIE configuration space to
+ * follow the host settings, and another is in charge of doing
+ * CLKREQ/ASPM mechanisms, it is default disabled. Because sometimes
+ * the host does not support it, and due to some reasons or wrong
+ * settings (ex. CLKREQ# not Bi-Direction), it could lead to device
+ * loss if HW misbehaves on the link.
+ *
+ * Hence it's designed that driver should first check the PCIE
+ * configuration space is sync'ed and enabled, then driver can turn
+ * on the other module that is actually working on the mechanism.
+ */
+ ret = pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &link_ctrl);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to read PCI cap, ret=%d\n", ret);
+ return;
+ }
+
+ if (link_ctrl & PCI_EXP_LNKCTL_CLKREQ_EN)
+ rtw89_pci_clkreq_set(rtwdev, true);
+
+ if (link_ctrl & PCI_EXP_LNKCTL_ASPM_L1)
+ rtw89_pci_aspm_set(rtwdev, true);
+}
+
+static void rtw89_pci_l1ss_set(struct rtw89_dev *rtwdev, bool enable)
+{
+ int ret;
+
+ if (enable)
+ ret = rtw89_dbi_write8_set(rtwdev, RTW89_PCIE_TIMER_CTRL,
+ RTW89_PCIE_BIT_L1SUB);
+ else
+ ret = rtw89_dbi_write8_clr(rtwdev, RTW89_PCIE_TIMER_CTRL,
+ RTW89_PCIE_BIT_L1SUB);
+ if (ret)
+ rtw89_err(rtwdev, "failed to %s L1SS, ret=%d",
+ enable ? "set" : "unset", ret);
+}
+
+static void rtw89_pci_l1ss_cfg(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ struct pci_dev *pdev = rtwpci->pdev;
+ u32 l1ss_cap_ptr, l1ss_ctrl;
+
+ if (rtw89_pci_disable_l1ss)
+ return;
+
+ l1ss_cap_ptr = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
+ if (!l1ss_cap_ptr)
+ return;
+
+ pci_read_config_dword(pdev, l1ss_cap_ptr + PCI_L1SS_CTL1, &l1ss_ctrl);
+
+ if (l1ss_ctrl & PCI_L1SS_CTL1_L1SS_MASK)
+ rtw89_pci_l1ss_set(rtwdev, true);
+}
+
+static void rtw89_pci_ctrl_dma_all_pcie(struct rtw89_dev *rtwdev, u8 en)
+{
+ u32 val32;
+
+ if (en == MAC_AX_FUNC_EN) {
+ val32 = B_AX_STOP_PCIEIO;
+ rtw89_write32_clr(rtwdev, R_AX_PCIE_DMA_STOP1, val32);
+
+ val32 = B_AX_TXHCI_EN | B_AX_RXHCI_EN;
+ rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, val32);
+ } else {
+ val32 = B_AX_STOP_PCIEIO;
+ rtw89_write32_set(rtwdev, R_AX_PCIE_DMA_STOP1, val32);
+
+ val32 = B_AX_TXHCI_EN | B_AX_RXHCI_EN;
+ rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, val32);
+ }
+}
+
+static int rtw89_pci_poll_io_idle(struct rtw89_dev *rtwdev)
+{
+ int ret = 0;
+ u32 sts;
+ u32 busy = B_AX_PCIEIO_BUSY | B_AX_PCIEIO_TX_BUSY | B_AX_PCIEIO_RX_BUSY;
+
+ ret = read_poll_timeout_atomic(rtw89_read32, sts, (sts & busy) == 0x0,
+ 10, 1000, false, rtwdev,
+ R_AX_PCIE_DMA_BUSY1);
+ if (ret) {
+ rtw89_err(rtwdev, "pci dmach busy1 0x%X\n",
+ rtw89_read32(rtwdev, R_AX_PCIE_DMA_BUSY1));
+ return -EINVAL;
+ }
+ return ret;
+}
+
+static int rtw89_pci_lv1rst_stop_dma(struct rtw89_dev *rtwdev)
+{
+ u32 val, dma_rst = 0;
+ int ret;
+
+ rtw89_pci_ctrl_dma_all_pcie(rtwdev, MAC_AX_FUNC_DIS);
+ ret = rtw89_pci_poll_io_idle(rtwdev);
+ if (ret) {
+ val = rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG);
+ rtw89_debug(rtwdev, RTW89_DBG_HCI,
+ "[PCIe] poll_io_idle fail, before 0x%08x: 0x%08x\n",
+ R_AX_DBG_ERR_FLAG, val);
+ if (val & B_AX_TX_STUCK || val & B_AX_PCIE_TXBD_LEN0)
+ dma_rst |= B_AX_HCI_TXDMA_EN;
+ if (val & B_AX_RX_STUCK)
+ dma_rst |= B_AX_HCI_RXDMA_EN;
+ val = rtw89_read32(rtwdev, R_AX_HCI_FUNC_EN);
+ rtw89_write32(rtwdev, R_AX_HCI_FUNC_EN, val & ~dma_rst);
+ rtw89_write32(rtwdev, R_AX_HCI_FUNC_EN, val | dma_rst);
+ ret = rtw89_pci_poll_io_idle(rtwdev);
+ val = rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG);
+ rtw89_debug(rtwdev, RTW89_DBG_HCI,
+ "[PCIe] poll_io_idle fail, after 0x%08x: 0x%08x\n",
+ R_AX_DBG_ERR_FLAG, val);
+ }
+
+ return ret;
+}
+
+static void rtw89_pci_ctrl_hci_dma_en(struct rtw89_dev *rtwdev, u8 en)
+{
+ u32 val32;
+
+ if (en == MAC_AX_FUNC_EN) {
+ val32 = B_AX_HCI_TXDMA_EN | B_AX_HCI_RXDMA_EN;
+ rtw89_write32_set(rtwdev, R_AX_HCI_FUNC_EN, val32);
+ } else {
+ val32 = B_AX_HCI_TXDMA_EN | B_AX_HCI_RXDMA_EN;
+ rtw89_write32_clr(rtwdev, R_AX_HCI_FUNC_EN, val32);
+ }
+}
+
+static int rtw89_pci_rst_bdram(struct rtw89_dev *rtwdev)
+{
+ int ret = 0;
+ u32 val32, sts;
+
+ val32 = B_AX_RST_BDRAM;
+ rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, val32);
+
+ ret = read_poll_timeout_atomic(rtw89_read32, sts,
+ (sts & B_AX_RST_BDRAM) == 0x0, 1, 100,
+ true, rtwdev, R_AX_PCIE_INIT_CFG1);
+ return ret;
+}
+
+static int rtw89_pci_lv1rst_start_dma(struct rtw89_dev *rtwdev)
+{
+ u32 ret;
+
+ rtw89_pci_ctrl_hci_dma_en(rtwdev, MAC_AX_FUNC_DIS);
+ rtw89_pci_ctrl_hci_dma_en(rtwdev, MAC_AX_FUNC_EN);
+ rtw89_pci_clr_idx_all(rtwdev);
+
+ ret = rtw89_pci_rst_bdram(rtwdev);
+ if (ret)
+ return ret;
+
+ rtw89_pci_ctrl_dma_all_pcie(rtwdev, MAC_AX_FUNC_EN);
+ return ret;
+}
+
+static int rtw89_pci_ops_mac_lv1_recovery(struct rtw89_dev *rtwdev,
+ enum rtw89_lv1_rcvy_step step)
+{
+ int ret;
+
+ switch (step) {
+ case RTW89_LV1_RCVY_STEP_1:
+ ret = rtw89_pci_lv1rst_stop_dma(rtwdev);
+ if (ret)
+ rtw89_err(rtwdev, "lv1 rcvy pci stop dma fail\n");
+
+ break;
+
+ case RTW89_LV1_RCVY_STEP_2:
+ ret = rtw89_pci_lv1rst_start_dma(rtwdev);
+ if (ret)
+ rtw89_err(rtwdev, "lv1 rcvy pci start dma fail\n");
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static void rtw89_pci_ops_dump_err_status(struct rtw89_dev *rtwdev)
+{
+ rtw89_info(rtwdev, "R_AX_RPQ_RXBD_IDX =0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_RPQ_RXBD_IDX));
+ rtw89_info(rtwdev, "R_AX_DBG_ERR_FLAG=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_DBG_ERR_FLAG));
+ rtw89_info(rtwdev, "R_AX_LBC_WATCHDOG=0x%08x\n",
+ rtw89_read32(rtwdev, R_AX_LBC_WATCHDOG));
+}
+
+static int rtw89_pci_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct rtw89_dev *rtwdev = container_of(napi, struct rtw89_dev, napi);
+ struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv;
+ unsigned long flags;
+ int work_done;
+
+ rtwdev->napi_budget_countdown = budget;
+
+ rtw89_pci_clear_isr0(rtwdev, B_AX_RPQDMA_INT | B_AX_RPQBD_FULL_INT);
+ work_done = rtw89_pci_poll_rpq_dma(rtwdev, rtwpci, rtwdev->napi_budget_countdown);
+ if (work_done == budget)
+ return budget;
+
+ rtw89_pci_clear_isr0(rtwdev, B_AX_RXP1DMA_INT | B_AX_RXDMA_INT | B_AX_RDU_INT);
+ work_done += rtw89_pci_poll_rxq_dma(rtwdev, rtwpci, rtwdev->napi_budget_countdown);
+ if (work_done < budget && napi_complete_done(napi, work_done)) {
+ spin_lock_irqsave(&rtwpci->irq_lock, flags);
+ if (likely(rtwpci->running))
+ rtw89_pci_enable_intr(rtwdev, rtwpci);
+ spin_unlock_irqrestore(&rtwpci->irq_lock, flags);
+ }
+
+ return work_done;
+}
+
+static int __maybe_unused rtw89_pci_suspend(struct device *dev)
+{
+ struct ieee80211_hw *hw = dev_get_drvdata(dev);
+ struct rtw89_dev *rtwdev = hw->priv;
+
+ rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL,
+ B_AX_PCIE_DIS_L2_CTRL_LDO_HCI);
+ rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6);
+ rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST);
+ rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6);
+ rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1,
+ B_AX_PCIE_PERST_KEEP_REG | B_AX_PCIE_TRAIN_KEEP_REG);
+
+ return 0;
+}
+
+static void rtw89_pci_l2_hci_ldo(struct rtw89_dev *rtwdev)
+{
+ if (rtwdev->chip->chip_id == RTL8852C)
+ return;
+
+ /* Hardware need write the reg twice to ensure the setting work */
+ rtw89_dbi_write8_set(rtwdev, RTW89_PCIE_RST_MSTATE,
+ RTW89_PCIE_BIT_CFG_RST_MSTATE);
+ rtw89_dbi_write8_set(rtwdev, RTW89_PCIE_RST_MSTATE,
+ RTW89_PCIE_BIT_CFG_RST_MSTATE);
+}
+
+static int __maybe_unused rtw89_pci_resume(struct device *dev)
+{
+ struct ieee80211_hw *hw = dev_get_drvdata(dev);
+ struct rtw89_dev *rtwdev = hw->priv;
+
+ rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL,
+ B_AX_PCIE_DIS_L2_CTRL_LDO_HCI);
+ rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6);
+ rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST);
+ rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6);
+ rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1,
+ B_AX_PCIE_PERST_KEEP_REG | B_AX_PCIE_TRAIN_KEEP_REG);
+ rtw89_pci_l2_hci_ldo(rtwdev);
+ rtw89_pci_link_cfg(rtwdev);
+ rtw89_pci_l1ss_cfg(rtwdev);
+
+ return 0;
+}
+
+SIMPLE_DEV_PM_OPS(rtw89_pm_ops, rtw89_pci_suspend, rtw89_pci_resume);
+EXPORT_SYMBOL(rtw89_pm_ops);
+
+static const struct rtw89_hci_ops rtw89_pci_ops = {
+ .tx_write = rtw89_pci_ops_tx_write,
+ .tx_kick_off = rtw89_pci_ops_tx_kick_off,
+ .flush_queues = rtw89_pci_ops_flush_queues,
+ .reset = rtw89_pci_ops_reset,
+ .start = rtw89_pci_ops_start,
+ .stop = rtw89_pci_ops_stop,
+ .recalc_int_mit = rtw89_pci_recalc_int_mit,
+
+ .read8 = rtw89_pci_ops_read8,
+ .read16 = rtw89_pci_ops_read16,
+ .read32 = rtw89_pci_ops_read32,
+ .write8 = rtw89_pci_ops_write8,
+ .write16 = rtw89_pci_ops_write16,
+ .write32 = rtw89_pci_ops_write32,
+
+ .mac_pre_init = rtw89_pci_ops_mac_pre_init,
+ .mac_post_init = rtw89_pci_ops_mac_post_init,
+ .deinit = rtw89_pci_ops_deinit,
+
+ .check_and_reclaim_tx_resource = rtw89_pci_check_and_reclaim_tx_resource,
+ .mac_lv1_rcvy = rtw89_pci_ops_mac_lv1_recovery,
+ .dump_err_status = rtw89_pci_ops_dump_err_status,
+ .napi_poll = rtw89_pci_napi_poll,
+};
+
+static int rtw89_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct ieee80211_hw *hw;
+ struct rtw89_dev *rtwdev;
+ int driver_data_size;
+ int ret;
+
+ driver_data_size = sizeof(struct rtw89_dev) + sizeof(struct rtw89_pci);
+ hw = ieee80211_alloc_hw(driver_data_size, &rtw89_ops);
+ if (!hw) {
+ dev_err(&pdev->dev, "failed to allocate hw\n");
+ return -ENOMEM;
+ }
+
+ rtwdev = hw->priv;
+ rtwdev->hw = hw;
+ rtwdev->dev = &pdev->dev;
+ rtwdev->hci.ops = &rtw89_pci_ops;
+ rtwdev->hci.type = RTW89_HCI_TYPE_PCIE;
+ rtwdev->hci.rpwm_addr = R_AX_PCIE_HRPWM;
+ rtwdev->hci.cpwm_addr = R_AX_CPWM;
+
+ SET_IEEE80211_DEV(rtwdev->hw, &pdev->dev);
+
+ switch (id->driver_data) {
+ case RTL8852A:
+ rtwdev->chip = &rtw8852a_chip_info;
+ break;
+ default:
+ return -ENOENT;
+ }
+
+ ret = rtw89_core_init(rtwdev);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to initialise core\n");
+ goto err_release_hw;
+ }
+
+ ret = rtw89_pci_claim_device(rtwdev, pdev);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to claim pci device\n");
+ goto err_core_deinit;
+ }
+
+ ret = rtw89_pci_setup_resource(rtwdev, pdev);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to setup pci resource\n");
+ goto err_declaim_pci;
+ }
+
+ ret = rtw89_chip_info_setup(rtwdev);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to setup chip information\n");
+ goto err_clear_resource;
+ }
+
+ rtw89_pci_link_cfg(rtwdev);
+ rtw89_pci_l1ss_cfg(rtwdev);
+
+ ret = rtw89_core_register(rtwdev);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to register core\n");
+ goto err_clear_resource;
+ }
+
+ rtw89_core_napi_init(rtwdev);
+
+ ret = rtw89_pci_request_irq(rtwdev, pdev);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to request pci irq\n");
+ goto err_unregister;
+ }
+
+ return 0;
+
+err_unregister:
+ rtw89_core_napi_deinit(rtwdev);
+ rtw89_core_unregister(rtwdev);
+err_clear_resource:
+ rtw89_pci_clear_resource(rtwdev, pdev);
+err_declaim_pci:
+ rtw89_pci_declaim_device(rtwdev, pdev);
+err_core_deinit:
+ rtw89_core_deinit(rtwdev);
+err_release_hw:
+ ieee80211_free_hw(hw);
+
+ return ret;
+}
+
+static void rtw89_pci_remove(struct pci_dev *pdev)
+{
+ struct ieee80211_hw *hw = pci_get_drvdata(pdev);
+ struct rtw89_dev *rtwdev;
+
+ rtwdev = hw->priv;
+
+ rtw89_pci_free_irq(rtwdev, pdev);
+ rtw89_core_napi_deinit(rtwdev);
+ rtw89_core_unregister(rtwdev);
+ rtw89_pci_clear_resource(rtwdev, pdev);
+ rtw89_pci_declaim_device(rtwdev, pdev);
+ rtw89_core_deinit(rtwdev);
+ ieee80211_free_hw(hw);
+}
+
+static const struct pci_device_id rtw89_pci_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8852), .driver_data = RTL8852A },
+ { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0xa85a), .driver_data = RTL8852A },
+ {},
+};
+MODULE_DEVICE_TABLE(pci, rtw89_pci_id_table);
+
+static struct pci_driver rtw89_pci_driver = {
+ .name = "rtw89_pci",
+ .id_table = rtw89_pci_id_table,
+ .probe = rtw89_pci_probe,
+ .remove = rtw89_pci_remove,
+ .driver.pm = &rtw89_pm_ops,
+};
+module_pci_driver(rtw89_pci_driver);
+
+MODULE_AUTHOR("Realtek Corporation");
+MODULE_DESCRIPTION("Realtek 802.11ax wireless PCI driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/wireless/realtek/rtw89/pci.h b/drivers/net/wireless/realtek/rtw89/pci.h
new file mode 100644
index 000000000000..20e6767ea5c4
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/pci.h
@@ -0,0 +1,630 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2020 Realtek Corporation
+ */
+
+#ifndef __RTW89_PCI_H__
+#define __RTW89_PCI_H__
+
+#include "txrx.h"
+
+#define MDIO_PG0_G1 0
+#define MDIO_PG1_G1 1
+#define MDIO_PG0_G2 2
+#define MDIO_PG1_G2 3
+#define RAC_ANA10 0x10
+#define RAC_ANA19 0x19
+#define RAC_ANA1F 0x1F
+#define RAC_ANA24 0x24
+#define B_AX_DEGLITCH GENMASK(11, 8)
+#define RAC_ANA26 0x26
+#define B_AX_RXEN GENMASK(15, 14)
+#define RAC_CTRL_PPR_V1 0x30
+#define B_AX_CLK_CALIB_EN BIT(12)
+#define B_AX_CALIB_EN BIT(13)
+#define B_AX_DIV GENMASK(15, 14)
+#define RAC_SET_PPR_V1 0x31
+
+#define R_AX_DBI_FLAG 0x1090
+#define B_AX_DBI_RFLAG BIT(17)
+#define B_AX_DBI_WFLAG BIT(16)
+#define B_AX_DBI_WREN_MSK GENMASK(15, 12)
+#define B_AX_DBI_ADDR_MSK GENMASK(11, 2)
+#define R_AX_DBI_WDATA 0x1094
+#define R_AX_DBI_RDATA 0x1098
+
+#define R_AX_MDIO_WDATA 0x10A4
+#define R_AX_MDIO_RDATA 0x10A6
+
+#define RTW89_PCI_WR_RETRY_CNT 20
+
+/* Interrupts */
+#define R_AX_HIMR0 0x01A0
+#define B_AX_HALT_C2H_INT_EN BIT(21)
+#define R_AX_HISR0 0x01A4
+
+#define R_AX_MDIO_CFG 0x10A0
+#define B_AX_MDIO_PHY_ADDR_MASK GENMASK(13, 12)
+#define B_AX_MDIO_RFLAG BIT(9)
+#define B_AX_MDIO_WFLAG BIT(8)
+#define B_AX_MDIO_ADDR_MASK GENMASK(4, 0)
+
+#define R_AX_PCIE_HIMR00 0x10B0
+#define B_AX_HC00ISR_IND_INT_EN BIT(27)
+#define B_AX_HD1ISR_IND_INT_EN BIT(26)
+#define B_AX_HD0ISR_IND_INT_EN BIT(25)
+#define B_AX_HS0ISR_IND_INT_EN BIT(24)
+#define B_AX_RETRAIN_INT_EN BIT(21)
+#define B_AX_RPQBD_FULL_INT_EN BIT(20)
+#define B_AX_RDU_INT_EN BIT(19)
+#define B_AX_RXDMA_STUCK_INT_EN BIT(18)
+#define B_AX_TXDMA_STUCK_INT_EN BIT(17)
+#define B_AX_PCIE_HOTRST_INT_EN BIT(16)
+#define B_AX_PCIE_FLR_INT_EN BIT(15)
+#define B_AX_PCIE_PERST_INT_EN BIT(14)
+#define B_AX_TXDMA_CH12_INT_EN BIT(13)
+#define B_AX_TXDMA_CH9_INT_EN BIT(12)
+#define B_AX_TXDMA_CH8_INT_EN BIT(11)
+#define B_AX_TXDMA_ACH7_INT_EN BIT(10)
+#define B_AX_TXDMA_ACH6_INT_EN BIT(9)
+#define B_AX_TXDMA_ACH5_INT_EN BIT(8)
+#define B_AX_TXDMA_ACH4_INT_EN BIT(7)
+#define B_AX_TXDMA_ACH3_INT_EN BIT(6)
+#define B_AX_TXDMA_ACH2_INT_EN BIT(5)
+#define B_AX_TXDMA_ACH1_INT_EN BIT(4)
+#define B_AX_TXDMA_ACH0_INT_EN BIT(3)
+#define B_AX_RPQDMA_INT_EN BIT(2)
+#define B_AX_RXP1DMA_INT_EN BIT(1)
+#define B_AX_RXDMA_INT_EN BIT(0)
+
+#define R_AX_PCIE_HISR00 0x10B4
+#define B_AX_HC00ISR_IND_INT BIT(27)
+#define B_AX_HD1ISR_IND_INT BIT(26)
+#define B_AX_HD0ISR_IND_INT BIT(25)
+#define B_AX_HS0ISR_IND_INT BIT(24)
+#define B_AX_RETRAIN_INT BIT(21)
+#define B_AX_RPQBD_FULL_INT BIT(20)
+#define B_AX_RDU_INT BIT(19)
+#define B_AX_RXDMA_STUCK_INT BIT(18)
+#define B_AX_TXDMA_STUCK_INT BIT(17)
+#define B_AX_PCIE_HOTRST_INT BIT(16)
+#define B_AX_PCIE_FLR_INT BIT(15)
+#define B_AX_PCIE_PERST_INT BIT(14)
+#define B_AX_TXDMA_CH12_INT BIT(13)
+#define B_AX_TXDMA_CH9_INT BIT(12)
+#define B_AX_TXDMA_CH8_INT BIT(11)
+#define B_AX_TXDMA_ACH7_INT BIT(10)
+#define B_AX_TXDMA_ACH6_INT BIT(9)
+#define B_AX_TXDMA_ACH5_INT BIT(8)
+#define B_AX_TXDMA_ACH4_INT BIT(7)
+#define B_AX_TXDMA_ACH3_INT BIT(6)
+#define B_AX_TXDMA_ACH2_INT BIT(5)
+#define B_AX_TXDMA_ACH1_INT BIT(4)
+#define B_AX_TXDMA_ACH0_INT BIT(3)
+#define B_AX_RPQDMA_INT BIT(2)
+#define B_AX_RXP1DMA_INT BIT(1)
+#define B_AX_RXDMA_INT BIT(0)
+
+#define R_AX_PCIE_HIMR10 0x13B0
+#define B_AX_HC10ISR_IND_INT_EN BIT(28)
+#define B_AX_TXDMA_CH11_INT_EN BIT(12)
+#define B_AX_TXDMA_CH10_INT_EN BIT(11)
+
+#define R_AX_PCIE_HISR10 0x13B4
+#define B_AX_HC10ISR_IND_INT BIT(28)
+#define B_AX_TXDMA_CH11_INT BIT(12)
+#define B_AX_TXDMA_CH10_INT BIT(11)
+
+/* TX/RX */
+#define R_AX_RXQ_RXBD_IDX 0x1050
+#define R_AX_RPQ_RXBD_IDX 0x1054
+#define R_AX_ACH0_TXBD_IDX 0x1058
+#define R_AX_ACH1_TXBD_IDX 0x105C
+#define R_AX_ACH2_TXBD_IDX 0x1060
+#define R_AX_ACH3_TXBD_IDX 0x1064
+#define R_AX_ACH4_TXBD_IDX 0x1068
+#define R_AX_ACH5_TXBD_IDX 0x106C
+#define R_AX_ACH6_TXBD_IDX 0x1070
+#define R_AX_ACH7_TXBD_IDX 0x1074
+#define R_AX_CH8_TXBD_IDX 0x1078 /* Management Queue band 0 */
+#define R_AX_CH9_TXBD_IDX 0x107C /* HI Queue band 0 */
+#define R_AX_CH10_TXBD_IDX 0x137C /* Management Queue band 1 */
+#define R_AX_CH11_TXBD_IDX 0x1380 /* HI Queue band 1 */
+#define R_AX_CH12_TXBD_IDX 0x1080 /* FWCMD Queue */
+#define TXBD_HW_IDX_MASK GENMASK(27, 16)
+#define TXBD_HOST_IDX_MASK GENMASK(11, 0)
+
+#define R_AX_ACH0_TXBD_DESA_L 0x1110
+#define R_AX_ACH0_TXBD_DESA_H 0x1114
+#define R_AX_ACH1_TXBD_DESA_L 0x1118
+#define R_AX_ACH1_TXBD_DESA_H 0x111C
+#define R_AX_ACH2_TXBD_DESA_L 0x1120
+#define R_AX_ACH2_TXBD_DESA_H 0x1124
+#define R_AX_ACH3_TXBD_DESA_L 0x1128
+#define R_AX_ACH3_TXBD_DESA_H 0x112C
+#define R_AX_ACH4_TXBD_DESA_L 0x1130
+#define R_AX_ACH4_TXBD_DESA_H 0x1134
+#define R_AX_ACH5_TXBD_DESA_L 0x1138
+#define R_AX_ACH5_TXBD_DESA_H 0x113C
+#define R_AX_ACH6_TXBD_DESA_L 0x1140
+#define R_AX_ACH6_TXBD_DESA_H 0x1144
+#define R_AX_ACH7_TXBD_DESA_L 0x1148
+#define R_AX_ACH7_TXBD_DESA_H 0x114C
+#define R_AX_CH8_TXBD_DESA_L 0x1150
+#define R_AX_CH8_TXBD_DESA_H 0x1154
+#define R_AX_CH9_TXBD_DESA_L 0x1158
+#define R_AX_CH9_TXBD_DESA_H 0x115C
+#define R_AX_CH10_TXBD_DESA_L 0x1358
+#define R_AX_CH10_TXBD_DESA_H 0x135C
+#define R_AX_CH11_TXBD_DESA_L 0x1360
+#define R_AX_CH11_TXBD_DESA_H 0x1364
+#define R_AX_CH12_TXBD_DESA_L 0x1160
+#define R_AX_CH12_TXBD_DESA_H 0x1164
+#define R_AX_RXQ_RXBD_DESA_L 0x1100
+#define R_AX_RXQ_RXBD_DESA_H 0x1104
+#define R_AX_RPQ_RXBD_DESA_L 0x1108
+#define R_AX_RPQ_RXBD_DESA_H 0x110C
+#define B_AX_DESC_NUM_MSK GENMASK(11, 0)
+
+#define R_AX_RXQ_RXBD_NUM 0x1020
+#define R_AX_RPQ_RXBD_NUM 0x1022
+#define R_AX_ACH0_TXBD_NUM 0x1024
+#define R_AX_ACH1_TXBD_NUM 0x1026
+#define R_AX_ACH2_TXBD_NUM 0x1028
+#define R_AX_ACH3_TXBD_NUM 0x102A
+#define R_AX_ACH4_TXBD_NUM 0x102C
+#define R_AX_ACH5_TXBD_NUM 0x102E
+#define R_AX_ACH6_TXBD_NUM 0x1030
+#define R_AX_ACH7_TXBD_NUM 0x1032
+#define R_AX_CH8_TXBD_NUM 0x1034
+#define R_AX_CH9_TXBD_NUM 0x1036
+#define R_AX_CH10_TXBD_NUM 0x1338
+#define R_AX_CH11_TXBD_NUM 0x133A
+#define R_AX_CH12_TXBD_NUM 0x1038
+
+#define R_AX_ACH0_BDRAM_CTRL 0x1200
+#define R_AX_ACH1_BDRAM_CTRL 0x1204
+#define R_AX_ACH2_BDRAM_CTRL 0x1208
+#define R_AX_ACH3_BDRAM_CTRL 0x120C
+#define R_AX_ACH4_BDRAM_CTRL 0x1210
+#define R_AX_ACH5_BDRAM_CTRL 0x1214
+#define R_AX_ACH6_BDRAM_CTRL 0x1218
+#define R_AX_ACH7_BDRAM_CTRL 0x121C
+#define R_AX_CH8_BDRAM_CTRL 0x1220
+#define R_AX_CH9_BDRAM_CTRL 0x1224
+#define R_AX_CH10_BDRAM_CTRL 0x1320
+#define R_AX_CH11_BDRAM_CTRL 0x1324
+#define R_AX_CH12_BDRAM_CTRL 0x1228
+#define BDRAM_SIDX_MASK GENMASK(7, 0)
+#define BDRAM_MAX_MASK GENMASK(15, 8)
+#define BDRAM_MIN_MASK GENMASK(23, 16)
+
+#define R_AX_PCIE_INIT_CFG1 0x1000
+#define B_AX_PCIE_RXRST_KEEP_REG BIT(23)
+#define B_AX_PCIE_TXRST_KEEP_REG BIT(22)
+#define B_AX_PCIE_PERST_KEEP_REG BIT(21)
+#define B_AX_PCIE_FLR_KEEP_REG BIT(20)
+#define B_AX_PCIE_TRAIN_KEEP_REG BIT(19)
+#define B_AX_RXBD_MODE BIT(18)
+#define B_AX_PCIE_MAX_RXDMA_MASK GENMASK(16, 14)
+#define B_AX_RXHCI_EN BIT(13)
+#define B_AX_LATENCY_CONTROL BIT(12)
+#define B_AX_TXHCI_EN BIT(11)
+#define B_AX_PCIE_MAX_TXDMA_MASK GENMASK(10, 8)
+#define B_AX_TX_TRUNC_MODE BIT(5)
+#define B_AX_RX_TRUNC_MODE BIT(4)
+#define B_AX_RST_BDRAM BIT(3)
+#define B_AX_DIS_RXDMA_PRE BIT(2)
+
+#define R_AX_TXDMA_ADDR_H 0x10F0
+#define R_AX_RXDMA_ADDR_H 0x10F4
+
+#define R_AX_PCIE_DMA_STOP1 0x1010
+#define B_AX_STOP_PCIEIO BIT(20)
+#define B_AX_STOP_WPDMA BIT(19)
+#define B_AX_STOP_CH12 BIT(18)
+#define B_AX_STOP_CH9 BIT(17)
+#define B_AX_STOP_CH8 BIT(16)
+#define B_AX_STOP_ACH7 BIT(15)
+#define B_AX_STOP_ACH6 BIT(14)
+#define B_AX_STOP_ACH5 BIT(13)
+#define B_AX_STOP_ACH4 BIT(12)
+#define B_AX_STOP_ACH3 BIT(11)
+#define B_AX_STOP_ACH2 BIT(10)
+#define B_AX_STOP_ACH1 BIT(9)
+#define B_AX_STOP_ACH0 BIT(8)
+#define B_AX_STOP_RPQ BIT(1)
+#define B_AX_STOP_RXQ BIT(0)
+#define B_AX_TX_STOP1_ALL GENMASK(18, 8)
+
+#define R_AX_PCIE_DMA_STOP2 0x1310
+#define B_AX_STOP_CH11 BIT(1)
+#define B_AX_STOP_CH10 BIT(0)
+#define B_AX_TX_STOP2_ALL GENMASK(1, 0)
+
+#define R_AX_TXBD_RWPTR_CLR1 0x1014
+#define B_AX_CLR_CH12_IDX BIT(10)
+#define B_AX_CLR_CH9_IDX BIT(9)
+#define B_AX_CLR_CH8_IDX BIT(8)
+#define B_AX_CLR_ACH7_IDX BIT(7)
+#define B_AX_CLR_ACH6_IDX BIT(6)
+#define B_AX_CLR_ACH5_IDX BIT(5)
+#define B_AX_CLR_ACH4_IDX BIT(4)
+#define B_AX_CLR_ACH3_IDX BIT(3)
+#define B_AX_CLR_ACH2_IDX BIT(2)
+#define B_AX_CLR_ACH1_IDX BIT(1)
+#define B_AX_CLR_ACH0_IDX BIT(0)
+#define B_AX_TXBD_CLR1_ALL GENMASK(10, 0)
+
+#define R_AX_RXBD_RWPTR_CLR 0x1018
+#define B_AX_CLR_RPQ_IDX BIT(1)
+#define B_AX_CLR_RXQ_IDX BIT(0)
+#define B_AX_RXBD_CLR_ALL GENMASK(1, 0)
+
+#define R_AX_TXBD_RWPTR_CLR2 0x1314
+#define B_AX_CLR_CH11_IDX BIT(1)
+#define B_AX_CLR_CH10_IDX BIT(0)
+#define B_AX_TXBD_CLR2_ALL GENMASK(1, 0)
+
+#define R_AX_PCIE_DMA_BUSY1 0x101C
+#define B_AX_PCIEIO_RX_BUSY BIT(22)
+#define B_AX_PCIEIO_TX_BUSY BIT(21)
+#define B_AX_PCIEIO_BUSY BIT(20)
+#define B_AX_WPDMA_BUSY BIT(19)
+
+#define R_AX_PCIE_DMA_BUSY2 0x131C
+#define B_AX_CH11_BUSY BIT(1)
+#define B_AX_CH10_BUSY BIT(0)
+
+/* Configure */
+#define R_AX_PCIE_INIT_CFG2 0x1004
+#define B_AX_WD_ITVL_IDLE GENMASK(27, 24)
+#define B_AX_WD_ITVL_ACT GENMASK(19, 16)
+
+#define R_AX_PCIE_PS_CTRL 0x1008
+#define B_AX_L1OFF_PWR_OFF_EN BIT(5)
+
+#define R_AX_INT_MIT_RX 0x10D4
+#define B_AX_RXMIT_RXP2_SEL BIT(19)
+#define B_AX_RXMIT_RXP1_SEL BIT(18)
+#define B_AX_RXTIMER_UNIT_MASK GENMASK(17, 16)
+#define AX_RXTIMER_UNIT_64US 0
+#define AX_RXTIMER_UNIT_128US 1
+#define AX_RXTIMER_UNIT_256US 2
+#define AX_RXTIMER_UNIT_512US 3
+#define B_AX_RXCOUNTER_MATCH_MASK GENMASK(15, 8)
+#define B_AX_RXTIMER_MATCH_MASK GENMASK(7, 0)
+
+#define R_AX_DBG_ERR_FLAG 0x11C4
+#define B_AX_PCIE_RPQ_FULL BIT(29)
+#define B_AX_PCIE_RXQ_FULL BIT(28)
+#define B_AX_CPL_STATUS_MASK GENMASK(27, 25)
+#define B_AX_RX_STUCK BIT(22)
+#define B_AX_TX_STUCK BIT(21)
+#define B_AX_PCIEDBG_TXERR0 BIT(16)
+#define B_AX_PCIE_RXP1_ERR0 BIT(4)
+#define B_AX_PCIE_TXBD_LEN0 BIT(1)
+#define B_AX_PCIE_TXBD_4KBOUD_LENERR BIT(0)
+
+#define R_AX_LBC_WATCHDOG 0x11D8
+#define B_AX_LBC_TIMER GENMASK(7, 4)
+#define B_AX_LBC_FLAG BIT(1)
+#define B_AX_LBC_EN BIT(0)
+
+#define R_AX_PCIE_EXP_CTRL 0x13F0
+#define B_AX_EN_CHKDSC_NO_RX_STUCK BIT(20)
+#define B_AX_MAX_TAG_NUM GENMASK(18, 16)
+#define B_AX_SIC_EN_FORCE_CLKREQ BIT(4)
+
+#define R_AX_PCIE_RX_PREF_ADV 0x13F4
+#define B_AX_RXDMA_PREF_ADV_EN BIT(0)
+
+#define RTW89_PCI_TXBD_NUM_MAX 256
+#define RTW89_PCI_RXBD_NUM_MAX 256
+#define RTW89_PCI_TXWD_NUM_MAX 512
+#define RTW89_PCI_TXWD_PAGE_SIZE 128
+#define RTW89_PCI_ADDRINFO_MAX 4
+#define RTW89_PCI_RX_BUF_SIZE 11460
+
+#define RTW89_PCI_POLL_BDRAM_RST_CNT 100
+#define RTW89_PCI_MULTITAG 8
+
+/* PCIE CFG register */
+#define RTW89_PCIE_ASPM_CTRL 0x070F
+#define RTW89_L1DLY_MASK GENMASK(5, 3)
+#define RTW89_L0DLY_MASK GENMASK(2, 0)
+#define RTW89_PCIE_TIMER_CTRL 0x0718
+#define RTW89_PCIE_BIT_L1SUB BIT(5)
+#define RTW89_PCIE_L1_CTRL 0x0719
+#define RTW89_PCIE_BIT_CLK BIT(4)
+#define RTW89_PCIE_BIT_L1 BIT(3)
+#define RTW89_PCIE_CLK_CTRL 0x0725
+#define RTW89_PCIE_RST_MSTATE 0x0B48
+#define RTW89_PCIE_BIT_CFG_RST_MSTATE BIT(0)
+#define RTW89_PCIE_PHY_RATE 0x82
+#define RTW89_PCIE_PHY_RATE_MASK GENMASK(1, 0)
+#define INTF_INTGRA_MINREF_V1 90
+#define INTF_INTGRA_HOSTREF_V1 100
+
+enum rtw89_pcie_phy {
+ PCIE_PHY_GEN1,
+ PCIE_PHY_GEN2,
+ PCIE_PHY_GEN1_UNDEFINE = 0x7F,
+};
+
+enum mac_ax_func_sw {
+ MAC_AX_FUNC_DIS,
+ MAC_AX_FUNC_EN,
+};
+
+enum rtw89_pcie_l0sdly {
+ PCIE_L0SDLY_1US = 0,
+ PCIE_L0SDLY_2US = 1,
+ PCIE_L0SDLY_3US = 2,
+ PCIE_L0SDLY_4US = 3,
+ PCIE_L0SDLY_5US = 4,
+ PCIE_L0SDLY_6US = 5,
+ PCIE_L0SDLY_7US = 6,
+};
+
+enum rtw89_pcie_l1dly {
+ PCIE_L1DLY_16US = 4,
+ PCIE_L1DLY_32US = 5,
+ PCIE_L1DLY_64US = 6,
+ PCIE_L1DLY_HW_INFI = 7,
+};
+
+enum rtw89_pcie_clkdly_hw {
+ PCIE_CLKDLY_HW_0 = 0,
+ PCIE_CLKDLY_HW_30US = 0x1,
+ PCIE_CLKDLY_HW_50US = 0x2,
+ PCIE_CLKDLY_HW_100US = 0x3,
+ PCIE_CLKDLY_HW_150US = 0x4,
+ PCIE_CLKDLY_HW_200US = 0x5,
+};
+
+struct rtw89_pci_bd_ram {
+ u8 start_idx;
+ u8 max_num;
+ u8 min_num;
+};
+
+struct rtw89_pci_tx_data {
+ dma_addr_t dma;
+};
+
+struct rtw89_pci_rx_info {
+ dma_addr_t dma;
+ u32 fs:1, ls:1, tag:11, len:14;
+};
+
+#define RTW89_PCI_TXBD_OPTION_LS BIT(14)
+
+struct rtw89_pci_tx_bd_32 {
+ __le16 length;
+ __le16 option;
+ __le32 dma;
+} __packed;
+
+#define RTW89_PCI_TXWP_VALID BIT(15)
+
+struct rtw89_pci_tx_wp_info {
+ __le16 seq0;
+ __le16 seq1;
+ __le16 seq2;
+ __le16 seq3;
+} __packed;
+
+#define RTW89_PCI_ADDR_MSDU_LS BIT(15)
+#define RTW89_PCI_ADDR_LS BIT(14)
+#define RTW89_PCI_ADDR_HIGH(a) (((a) << 6) & GENMASK(13, 6))
+#define RTW89_PCI_ADDR_NUM(x) ((x) & GENMASK(5, 0))
+
+struct rtw89_pci_tx_addr_info_32 {
+ __le16 length;
+ __le16 option;
+ __le32 dma;
+} __packed;
+
+#define RTW89_PCI_RPP_POLLUTED BIT(31)
+#define RTW89_PCI_RPP_SEQ GENMASK(30, 16)
+#define RTW89_PCI_RPP_TX_STATUS GENMASK(15, 13)
+#define RTW89_TX_DONE 0x0
+#define RTW89_TX_RETRY_LIMIT 0x1
+#define RTW89_TX_LIFE_TIME 0x2
+#define RTW89_TX_MACID_DROP 0x3
+#define RTW89_PCI_RPP_QSEL GENMASK(12, 8)
+#define RTW89_PCI_RPP_MACID GENMASK(7, 0)
+
+struct rtw89_pci_rpp_fmt {
+ __le32 dword;
+} __packed;
+
+struct rtw89_pci_rx_bd_32 {
+ __le16 buf_size;
+ __le16 rsvd;
+ __le32 dma;
+} __packed;
+
+#define RTW89_PCI_RXBD_FS BIT(15)
+#define RTW89_PCI_RXBD_LS BIT(14)
+#define RTW89_PCI_RXBD_WRITE_SIZE GENMASK(13, 0)
+#define RTW89_PCI_RXBD_TAG GENMASK(28, 16)
+
+struct rtw89_pci_rxbd_info {
+ __le32 dword;
+};
+
+struct rtw89_pci_tx_wd {
+ struct list_head list;
+ struct sk_buff_head queue;
+
+ void *vaddr;
+ dma_addr_t paddr;
+ u32 len;
+ u32 seq;
+};
+
+struct rtw89_pci_dma_ring {
+ void *head;
+ u8 desc_size;
+ dma_addr_t dma;
+
+ u32 addr_num;
+ u32 addr_idx;
+ u32 addr_bdram;
+ u32 addr_desa_l;
+ u32 addr_desa_h;
+
+ u32 len;
+ u32 wp; /* host idx */
+ u32 rp; /* hw idx */
+};
+
+struct rtw89_pci_tx_wd_ring {
+ void *head;
+ dma_addr_t dma;
+
+ struct rtw89_pci_tx_wd pages[RTW89_PCI_TXWD_NUM_MAX];
+ struct list_head free_pages;
+
+ u32 page_size;
+ u32 page_num;
+ u32 curr_num;
+};
+
+#define RTW89_RX_TAG_MAX 0x1fff
+
+struct rtw89_pci_tx_ring {
+ struct rtw89_pci_tx_wd_ring wd_ring;
+ struct rtw89_pci_dma_ring bd_ring;
+ struct list_head busy_pages;
+ u8 txch;
+ bool dma_enabled;
+ u16 tag; /* range from 0x0001 ~ 0x1fff */
+
+ u64 tx_cnt;
+ u64 tx_acked;
+ u64 tx_retry_lmt;
+ u64 tx_life_time;
+ u64 tx_mac_id_drop;
+};
+
+struct rtw89_pci_rx_ring {
+ struct rtw89_pci_dma_ring bd_ring;
+ struct sk_buff *buf[RTW89_PCI_RXBD_NUM_MAX];
+ u32 buf_sz;
+ struct sk_buff *diliver_skb;
+ struct rtw89_rx_desc_info diliver_desc;
+};
+
+struct rtw89_pci_isrs {
+ u32 halt_c2h_isrs;
+ u32 isrs[2];
+};
+
+struct rtw89_pci {
+ struct pci_dev *pdev;
+
+ /* protect HW irq related registers */
+ spinlock_t irq_lock;
+ /* protect TRX resources (exclude RXQ) */
+ spinlock_t trx_lock;
+ bool running;
+ struct rtw89_pci_tx_ring tx_rings[RTW89_TXCH_NUM];
+ struct rtw89_pci_rx_ring rx_rings[RTW89_RXCH_NUM];
+ struct sk_buff_head h2c_queue;
+ struct sk_buff_head h2c_release_queue;
+
+ u32 halt_c2h_intrs;
+ u32 intrs[2];
+ void __iomem *mmap;
+};
+
+static inline struct rtw89_pci_rx_info *RTW89_PCI_RX_SKB_CB(struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ BUILD_BUG_ON(sizeof(struct rtw89_pci_tx_data) >
+ sizeof(info->status.status_driver_data));
+
+ return (struct rtw89_pci_rx_info *)skb->cb;
+}
+
+static inline struct rtw89_pci_rx_bd_32 *
+RTW89_PCI_RX_BD(struct rtw89_pci_rx_ring *rx_ring, u32 idx)
+{
+ struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
+ u8 *head = bd_ring->head;
+ u32 desc_size = bd_ring->desc_size;
+ u32 offset = idx * desc_size;
+
+ return (struct rtw89_pci_rx_bd_32 *)(head + offset);
+}
+
+static inline void
+rtw89_pci_rxbd_increase(struct rtw89_pci_rx_ring *rx_ring, u32 cnt)
+{
+ struct rtw89_pci_dma_ring *bd_ring = &rx_ring->bd_ring;
+
+ bd_ring->wp += cnt;
+
+ if (bd_ring->wp >= bd_ring->len)
+ bd_ring->wp -= bd_ring->len;
+}
+
+static inline struct rtw89_pci_tx_data *RTW89_PCI_TX_SKB_CB(struct sk_buff *skb)
+{
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ return (struct rtw89_pci_tx_data *)info->status.status_driver_data;
+}
+
+static inline struct rtw89_pci_tx_bd_32 *
+rtw89_pci_get_next_txbd(struct rtw89_pci_tx_ring *tx_ring)
+{
+ struct rtw89_pci_dma_ring *bd_ring = &tx_ring->bd_ring;
+ struct rtw89_pci_tx_bd_32 *tx_bd, *head;
+
+ head = bd_ring->head;
+ tx_bd = head + bd_ring->wp;
+
+ return tx_bd;
+}
+
+static inline struct rtw89_pci_tx_wd *
+rtw89_pci_dequeue_txwd(struct rtw89_pci_tx_ring *tx_ring)
+{
+ struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
+ struct rtw89_pci_tx_wd *txwd;
+
+ txwd = list_first_entry_or_null(&wd_ring->free_pages,
+ struct rtw89_pci_tx_wd, list);
+ if (!txwd)
+ return NULL;
+
+ list_del_init(&txwd->list);
+ txwd->len = 0;
+ wd_ring->curr_num--;
+
+ return txwd;
+}
+
+static inline void
+rtw89_pci_enqueue_txwd(struct rtw89_pci_tx_ring *tx_ring,
+ struct rtw89_pci_tx_wd *txwd)
+{
+ struct rtw89_pci_tx_wd_ring *wd_ring = &tx_ring->wd_ring;
+
+ memset(txwd->vaddr, 0, wd_ring->page_size);
+ list_add_tail(&txwd->list, &wd_ring->free_pages);
+ wd_ring->curr_num++;
+}
+
+static inline bool rtw89_pci_ltr_is_err_reg_val(u32 val)
+{
+ return val == 0xffffffff || val == 0xeaeaeaea;
+}
+
+extern const struct dev_pm_ops rtw89_pm_ops;
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw89/phy.c b/drivers/net/wireless/realtek/rtw89/phy.c
new file mode 100644
index 000000000000..ab134856baac
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/phy.c
@@ -0,0 +1,2868 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2019-2020 Realtek Corporation
+ */
+
+#include "debug.h"
+#include "fw.h"
+#include "phy.h"
+#include "ps.h"
+#include "reg.h"
+#include "sar.h"
+#include "coex.h"
+
+static u16 get_max_amsdu_len(struct rtw89_dev *rtwdev,
+ const struct rtw89_ra_report *report)
+{
+ const struct rate_info *txrate = &report->txrate;
+ u32 bit_rate = report->bit_rate;
+ u8 mcs;
+
+ /* lower than ofdm, do not aggregate */
+ if (bit_rate < 550)
+ return 1;
+
+ /* prevent hardware rate fallback to G mode rate */
+ if (txrate->flags & RATE_INFO_FLAGS_MCS)
+ mcs = txrate->mcs & 0x07;
+ else if (txrate->flags & (RATE_INFO_FLAGS_VHT_MCS | RATE_INFO_FLAGS_HE_MCS))
+ mcs = txrate->mcs;
+ else
+ mcs = 0;
+
+ if (mcs <= 2)
+ return 1;
+
+ /* lower than 20M vht 2ss mcs8, make it small */
+ if (bit_rate < 1800)
+ return 1200;
+
+ /* lower than 40M vht 2ss mcs9, make it medium */
+ if (bit_rate < 4000)
+ return 2600;
+
+ /* not yet 80M vht 2ss mcs8/9, make it twice regular packet size */
+ if (bit_rate < 7000)
+ return 3500;
+
+ return rtwdev->chip->max_amsdu_limit;
+}
+
+static u64 get_mcs_ra_mask(u16 mcs_map, u8 highest_mcs, u8 gap)
+{
+ u64 ra_mask = 0;
+ u8 mcs_cap;
+ int i, nss;
+
+ for (i = 0, nss = 12; i < 4; i++, mcs_map >>= 2, nss += 12) {
+ mcs_cap = mcs_map & 0x3;
+ switch (mcs_cap) {
+ case 2:
+ ra_mask |= GENMASK_ULL(highest_mcs, 0) << nss;
+ break;
+ case 1:
+ ra_mask |= GENMASK_ULL(highest_mcs - gap, 0) << nss;
+ break;
+ case 0:
+ ra_mask |= GENMASK_ULL(highest_mcs - gap * 2, 0) << nss;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return ra_mask;
+}
+
+static u64 get_he_ra_mask(struct ieee80211_sta *sta)
+{
+ struct ieee80211_sta_he_cap cap = sta->he_cap;
+ u16 mcs_map;
+
+ switch (sta->bandwidth) {
+ case IEEE80211_STA_RX_BW_160:
+ if (cap.he_cap_elem.phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
+ mcs_map = le16_to_cpu(cap.he_mcs_nss_supp.rx_mcs_80p80);
+ else
+ mcs_map = le16_to_cpu(cap.he_mcs_nss_supp.rx_mcs_160);
+ break;
+ default:
+ mcs_map = le16_to_cpu(cap.he_mcs_nss_supp.rx_mcs_80);
+ }
+
+ /* MCS11, MCS9, MCS7 */
+ return get_mcs_ra_mask(mcs_map, 11, 2);
+}
+
+#define RA_FLOOR_TABLE_SIZE 7
+#define RA_FLOOR_UP_GAP 3
+static u64 rtw89_phy_ra_mask_rssi(struct rtw89_dev *rtwdev, u8 rssi,
+ u8 ratr_state)
+{
+ u8 rssi_lv_t[RA_FLOOR_TABLE_SIZE] = {30, 44, 48, 52, 56, 60, 100};
+ u8 rssi_lv = 0;
+ u8 i;
+
+ rssi >>= 1;
+ for (i = 0; i < RA_FLOOR_TABLE_SIZE; i++) {
+ if (i >= ratr_state)
+ rssi_lv_t[i] += RA_FLOOR_UP_GAP;
+ if (rssi < rssi_lv_t[i]) {
+ rssi_lv = i;
+ break;
+ }
+ }
+ if (rssi_lv == 0)
+ return 0xffffffffffffffffULL;
+ else if (rssi_lv == 1)
+ return 0xfffffffffffffff0ULL;
+ else if (rssi_lv == 2)
+ return 0xffffffffffffffe0ULL;
+ else if (rssi_lv == 3)
+ return 0xffffffffffffffc0ULL;
+ else if (rssi_lv == 4)
+ return 0xffffffffffffff80ULL;
+ else if (rssi_lv >= 5)
+ return 0xffffffffffffff00ULL;
+
+ return 0xffffffffffffffffULL;
+}
+
+static u64 rtw89_phy_ra_mask_cfg(struct rtw89_dev *rtwdev, struct rtw89_sta *rtwsta)
+{
+ struct rtw89_hal *hal = &rtwdev->hal;
+ struct ieee80211_sta *sta = rtwsta_to_sta(rtwsta);
+ struct cfg80211_bitrate_mask *mask = &rtwsta->mask;
+ enum nl80211_band band;
+ u64 cfg_mask;
+
+ if (!rtwsta->use_cfg_mask)
+ return -1;
+
+ switch (hal->current_band_type) {
+ case RTW89_BAND_2G:
+ band = NL80211_BAND_2GHZ;
+ cfg_mask = u64_encode_bits(mask->control[NL80211_BAND_2GHZ].legacy,
+ RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES);
+ break;
+ case RTW89_BAND_5G:
+ band = NL80211_BAND_5GHZ;
+ cfg_mask = u64_encode_bits(mask->control[NL80211_BAND_5GHZ].legacy,
+ RA_MASK_OFDM_RATES);
+ break;
+ default:
+ rtw89_warn(rtwdev, "unhandled band type %d\n", hal->current_band_type);
+ return -1;
+ }
+
+ if (sta->he_cap.has_he) {
+ cfg_mask |= u64_encode_bits(mask->control[band].he_mcs[0],
+ RA_MASK_HE_1SS_RATES);
+ cfg_mask |= u64_encode_bits(mask->control[band].he_mcs[1],
+ RA_MASK_HE_2SS_RATES);
+ } else if (sta->vht_cap.vht_supported) {
+ cfg_mask |= u64_encode_bits(mask->control[band].vht_mcs[0],
+ RA_MASK_VHT_1SS_RATES);
+ cfg_mask |= u64_encode_bits(mask->control[band].vht_mcs[1],
+ RA_MASK_VHT_2SS_RATES);
+ } else if (sta->ht_cap.ht_supported) {
+ cfg_mask |= u64_encode_bits(mask->control[band].ht_mcs[0],
+ RA_MASK_HT_1SS_RATES);
+ cfg_mask |= u64_encode_bits(mask->control[band].ht_mcs[1],
+ RA_MASK_HT_2SS_RATES);
+ }
+
+ return cfg_mask;
+}
+
+static const u64
+rtw89_ra_mask_ht_rates[4] = {RA_MASK_HT_1SS_RATES, RA_MASK_HT_2SS_RATES,
+ RA_MASK_HT_3SS_RATES, RA_MASK_HT_4SS_RATES};
+static const u64
+rtw89_ra_mask_vht_rates[4] = {RA_MASK_VHT_1SS_RATES, RA_MASK_VHT_2SS_RATES,
+ RA_MASK_VHT_3SS_RATES, RA_MASK_VHT_4SS_RATES};
+static const u64
+rtw89_ra_mask_he_rates[4] = {RA_MASK_HE_1SS_RATES, RA_MASK_HE_2SS_RATES,
+ RA_MASK_HE_3SS_RATES, RA_MASK_HE_4SS_RATES};
+
+static void rtw89_phy_ra_sta_update(struct rtw89_dev *rtwdev,
+ struct ieee80211_sta *sta, bool csi)
+{
+ struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
+ struct rtw89_vif *rtwvif = rtwsta->rtwvif;
+ struct rtw89_phy_rate_pattern *rate_pattern = &rtwvif->rate_pattern;
+ struct rtw89_ra_info *ra = &rtwsta->ra;
+ const u64 *high_rate_masks = rtw89_ra_mask_ht_rates;
+ u8 rssi = ewma_rssi_read(&rtwsta->avg_rssi);
+ u64 high_rate_mask = 0;
+ u64 ra_mask = 0;
+ u8 mode = 0;
+ u8 csi_mode = RTW89_RA_RPT_MODE_LEGACY;
+ u8 bw_mode = 0;
+ u8 stbc_en = 0;
+ u8 ldpc_en = 0;
+ u8 i;
+ bool sgi = false;
+
+ memset(ra, 0, sizeof(*ra));
+ /* Set the ra mask from sta's capability */
+ if (sta->he_cap.has_he) {
+ mode |= RTW89_RA_MODE_HE;
+ csi_mode = RTW89_RA_RPT_MODE_HE;
+ ra_mask |= get_he_ra_mask(sta);
+ high_rate_masks = rtw89_ra_mask_he_rates;
+ if (sta->he_cap.he_cap_elem.phy_cap_info[2] &
+ IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ)
+ stbc_en = 1;
+ if (sta->he_cap.he_cap_elem.phy_cap_info[1] &
+ IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD)
+ ldpc_en = 1;
+ } else if (sta->vht_cap.vht_supported) {
+ u16 mcs_map = le16_to_cpu(sta->vht_cap.vht_mcs.rx_mcs_map);
+
+ mode |= RTW89_RA_MODE_VHT;
+ csi_mode = RTW89_RA_RPT_MODE_VHT;
+ /* MCS9, MCS8, MCS7 */
+ ra_mask |= get_mcs_ra_mask(mcs_map, 9, 1);
+ high_rate_masks = rtw89_ra_mask_vht_rates;
+ if (sta->vht_cap.cap & IEEE80211_VHT_CAP_RXSTBC_MASK)
+ stbc_en = 1;
+ if (sta->vht_cap.cap & IEEE80211_VHT_CAP_RXLDPC)
+ ldpc_en = 1;
+ } else if (sta->ht_cap.ht_supported) {
+ mode |= RTW89_RA_MODE_HT;
+ csi_mode = RTW89_RA_RPT_MODE_HT;
+ ra_mask |= ((u64)sta->ht_cap.mcs.rx_mask[3] << 48) |
+ ((u64)sta->ht_cap.mcs.rx_mask[2] << 36) |
+ (sta->ht_cap.mcs.rx_mask[1] << 24) |
+ (sta->ht_cap.mcs.rx_mask[0] << 12);
+ high_rate_masks = rtw89_ra_mask_ht_rates;
+ if (sta->ht_cap.cap & IEEE80211_HT_CAP_RX_STBC)
+ stbc_en = 1;
+ if (sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING)
+ ldpc_en = 1;
+ }
+
+ if (rtwdev->hal.current_band_type == RTW89_BAND_2G) {
+ if (sta->supp_rates[NL80211_BAND_2GHZ] <= 0xf)
+ mode |= RTW89_RA_MODE_CCK;
+ else
+ mode |= RTW89_RA_MODE_CCK | RTW89_RA_MODE_OFDM;
+ } else {
+ mode |= RTW89_RA_MODE_OFDM;
+ }
+
+ if (mode >= RTW89_RA_MODE_HT) {
+ for (i = 0; i < rtwdev->hal.tx_nss; i++)
+ high_rate_mask |= high_rate_masks[i];
+ ra_mask &= high_rate_mask;
+ if (mode & RTW89_RA_MODE_OFDM)
+ ra_mask |= RA_MASK_SUBOFDM_RATES;
+ if (mode & RTW89_RA_MODE_CCK)
+ ra_mask |= RA_MASK_SUBCCK_RATES;
+ } else if (mode & RTW89_RA_MODE_OFDM) {
+ if (mode & RTW89_RA_MODE_CCK)
+ ra_mask |= RA_MASK_SUBCCK_RATES;
+ ra_mask |= RA_MASK_OFDM_RATES;
+ } else {
+ ra_mask = RA_MASK_CCK_RATES;
+ }
+
+ if (mode != RTW89_RA_MODE_CCK) {
+ ra_mask &= rtw89_phy_ra_mask_rssi(rtwdev, rssi, 0);
+ ra_mask &= rtw89_phy_ra_mask_cfg(rtwdev, rtwsta);
+ }
+
+ switch (sta->bandwidth) {
+ case IEEE80211_STA_RX_BW_80:
+ bw_mode = RTW89_CHANNEL_WIDTH_80;
+ sgi = sta->vht_cap.vht_supported &&
+ (sta->vht_cap.cap & IEEE80211_VHT_CAP_SHORT_GI_80);
+ break;
+ case IEEE80211_STA_RX_BW_40:
+ bw_mode = RTW89_CHANNEL_WIDTH_40;
+ sgi = sta->ht_cap.ht_supported &&
+ (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40);
+ break;
+ default:
+ bw_mode = RTW89_CHANNEL_WIDTH_20;
+ sgi = sta->ht_cap.ht_supported &&
+ (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20);
+ break;
+ }
+
+ if (sta->he_cap.he_cap_elem.phy_cap_info[3] &
+ IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_16_QAM)
+ ra->dcm_cap = 1;
+
+ if (rate_pattern->enable) {
+ ra_mask = rtw89_phy_ra_mask_cfg(rtwdev, rtwsta);
+ ra_mask &= rate_pattern->ra_mask;
+ mode = rate_pattern->ra_mode;
+ }
+
+ ra->bw_cap = bw_mode;
+ ra->mode_ctrl = mode;
+ ra->macid = rtwsta->mac_id;
+ ra->stbc_cap = stbc_en;
+ ra->ldpc_cap = ldpc_en;
+ ra->ss_num = min(sta->rx_nss, rtwdev->hal.tx_nss) - 1;
+ ra->en_sgi = sgi;
+ ra->ra_mask = ra_mask;
+
+ if (!csi)
+ return;
+
+ ra->fixed_csi_rate_en = false;
+ ra->ra_csi_rate_en = true;
+ ra->cr_tbl_sel = false;
+ ra->band_num = rtwvif->phy_idx;
+ ra->csi_bw = bw_mode;
+ ra->csi_gi_ltf = RTW89_GILTF_LGI_4XHE32;
+ ra->csi_mcs_ss_idx = 5;
+ ra->csi_mode = csi_mode;
+}
+
+void rtw89_phy_ra_updata_sta(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta)
+{
+ struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
+ struct rtw89_ra_info *ra = &rtwsta->ra;
+
+ rtw89_phy_ra_sta_update(rtwdev, sta, false);
+ ra->upd_mask = 1;
+ rtw89_debug(rtwdev, RTW89_DBG_RA,
+ "ra updat: macid = %d, bw = %d, nss = %d, gi = %d %d",
+ ra->macid,
+ ra->bw_cap,
+ ra->ss_num,
+ ra->en_sgi,
+ ra->giltf);
+
+ rtw89_fw_h2c_ra(rtwdev, ra, false);
+}
+
+static bool __check_rate_pattern(struct rtw89_phy_rate_pattern *next,
+ u16 rate_base, u64 ra_mask, u8 ra_mode,
+ u32 rate_ctrl, u32 ctrl_skip, bool force)
+{
+ u8 n, c;
+
+ if (rate_ctrl == ctrl_skip)
+ return true;
+
+ n = hweight32(rate_ctrl);
+ if (n == 0)
+ return true;
+
+ if (force && n != 1)
+ return false;
+
+ if (next->enable)
+ return false;
+
+ c = __fls(rate_ctrl);
+ next->rate = rate_base + c;
+ next->ra_mode = ra_mode;
+ next->ra_mask = ra_mask;
+ next->enable = true;
+
+ return true;
+}
+
+void rtw89_phy_rate_pattern_vif(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif,
+ const struct cfg80211_bitrate_mask *mask)
+{
+ struct ieee80211_supported_band *sband;
+ struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv;
+ struct rtw89_phy_rate_pattern next_pattern = {0};
+ static const u16 hw_rate_he[] = {RTW89_HW_RATE_HE_NSS1_MCS0,
+ RTW89_HW_RATE_HE_NSS2_MCS0,
+ RTW89_HW_RATE_HE_NSS3_MCS0,
+ RTW89_HW_RATE_HE_NSS4_MCS0};
+ static const u16 hw_rate_vht[] = {RTW89_HW_RATE_VHT_NSS1_MCS0,
+ RTW89_HW_RATE_VHT_NSS2_MCS0,
+ RTW89_HW_RATE_VHT_NSS3_MCS0,
+ RTW89_HW_RATE_VHT_NSS4_MCS0};
+ static const u16 hw_rate_ht[] = {RTW89_HW_RATE_MCS0,
+ RTW89_HW_RATE_MCS8,
+ RTW89_HW_RATE_MCS16,
+ RTW89_HW_RATE_MCS24};
+ u8 band = rtwdev->hal.current_band_type;
+ u8 tx_nss = rtwdev->hal.tx_nss;
+ u8 i;
+
+ for (i = 0; i < tx_nss; i++)
+ if (!__check_rate_pattern(&next_pattern, hw_rate_he[i],
+ RA_MASK_HE_RATES, RTW89_RA_MODE_HE,
+ mask->control[band].he_mcs[i],
+ 0, true))
+ goto out;
+
+ for (i = 0; i < tx_nss; i++)
+ if (!__check_rate_pattern(&next_pattern, hw_rate_vht[i],
+ RA_MASK_VHT_RATES, RTW89_RA_MODE_VHT,
+ mask->control[band].vht_mcs[i],
+ 0, true))
+ goto out;
+
+ for (i = 0; i < tx_nss; i++)
+ if (!__check_rate_pattern(&next_pattern, hw_rate_ht[i],
+ RA_MASK_HT_RATES, RTW89_RA_MODE_HT,
+ mask->control[band].ht_mcs[i],
+ 0, true))
+ goto out;
+
+ /* lagacy cannot be empty for nl80211_parse_tx_bitrate_mask, and
+ * require at least one basic rate for ieee80211_set_bitrate_mask,
+ * so the decision just depends on if all bitrates are set or not.
+ */
+ sband = rtwdev->hw->wiphy->bands[band];
+ if (band == RTW89_BAND_2G) {
+ if (!__check_rate_pattern(&next_pattern, RTW89_HW_RATE_CCK1,
+ RA_MASK_CCK_RATES | RA_MASK_OFDM_RATES,
+ RTW89_RA_MODE_CCK | RTW89_RA_MODE_OFDM,
+ mask->control[band].legacy,
+ BIT(sband->n_bitrates) - 1, false))
+ goto out;
+ } else {
+ if (!__check_rate_pattern(&next_pattern, RTW89_HW_RATE_OFDM6,
+ RA_MASK_OFDM_RATES, RTW89_RA_MODE_OFDM,
+ mask->control[band].legacy,
+ BIT(sband->n_bitrates) - 1, false))
+ goto out;
+ }
+
+ if (!next_pattern.enable)
+ goto out;
+
+ rtwvif->rate_pattern = next_pattern;
+ rtw89_debug(rtwdev, RTW89_DBG_RA,
+ "configure pattern: rate 0x%x, mask 0x%llx, mode 0x%x\n",
+ next_pattern.rate,
+ next_pattern.ra_mask,
+ next_pattern.ra_mode);
+ return;
+
+out:
+ rtwvif->rate_pattern.enable = false;
+ rtw89_debug(rtwdev, RTW89_DBG_RA, "unset rate pattern\n");
+}
+
+static void rtw89_phy_ra_updata_sta_iter(void *data, struct ieee80211_sta *sta)
+{
+ struct rtw89_dev *rtwdev = (struct rtw89_dev *)data;
+
+ rtw89_phy_ra_updata_sta(rtwdev, sta);
+}
+
+void rtw89_phy_ra_update(struct rtw89_dev *rtwdev)
+{
+ ieee80211_iterate_stations_atomic(rtwdev->hw,
+ rtw89_phy_ra_updata_sta_iter,
+ rtwdev);
+}
+
+void rtw89_phy_ra_assoc(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta)
+{
+ struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
+ struct rtw89_ra_info *ra = &rtwsta->ra;
+ u8 rssi = ewma_rssi_read(&rtwsta->avg_rssi) >> RSSI_FACTOR;
+ bool csi = rtw89_sta_has_beamformer_cap(sta);
+
+ rtw89_phy_ra_sta_update(rtwdev, sta, csi);
+
+ if (rssi > 40)
+ ra->init_rate_lv = 1;
+ else if (rssi > 20)
+ ra->init_rate_lv = 2;
+ else if (rssi > 1)
+ ra->init_rate_lv = 3;
+ else
+ ra->init_rate_lv = 0;
+ ra->upd_all = 1;
+ rtw89_debug(rtwdev, RTW89_DBG_RA,
+ "ra assoc: macid = %d, mode = %d, bw = %d, nss = %d, lv = %d",
+ ra->macid,
+ ra->mode_ctrl,
+ ra->bw_cap,
+ ra->ss_num,
+ ra->init_rate_lv);
+ rtw89_debug(rtwdev, RTW89_DBG_RA,
+ "ra assoc: dcm = %d, er = %d, ldpc = %d, stbc = %d, gi = %d %d",
+ ra->dcm_cap,
+ ra->er_cap,
+ ra->ldpc_cap,
+ ra->stbc_cap,
+ ra->en_sgi,
+ ra->giltf);
+
+ rtw89_fw_h2c_ra(rtwdev, ra, csi);
+}
+
+u8 rtw89_phy_get_txsc(struct rtw89_dev *rtwdev,
+ struct rtw89_channel_params *param,
+ enum rtw89_bandwidth dbw)
+{
+ enum rtw89_bandwidth cbw = param->bandwidth;
+ u8 pri_ch = param->primary_chan;
+ u8 central_ch = param->center_chan;
+ u8 txsc_idx = 0;
+ u8 tmp = 0;
+
+ if (cbw == dbw || cbw == RTW89_CHANNEL_WIDTH_20)
+ return txsc_idx;
+
+ switch (cbw) {
+ case RTW89_CHANNEL_WIDTH_40:
+ txsc_idx = pri_ch > central_ch ? 1 : 2;
+ break;
+ case RTW89_CHANNEL_WIDTH_80:
+ if (dbw == RTW89_CHANNEL_WIDTH_20) {
+ if (pri_ch > central_ch)
+ txsc_idx = (pri_ch - central_ch) >> 1;
+ else
+ txsc_idx = ((central_ch - pri_ch) >> 1) + 1;
+ } else {
+ txsc_idx = pri_ch > central_ch ? 9 : 10;
+ }
+ break;
+ case RTW89_CHANNEL_WIDTH_160:
+ if (pri_ch > central_ch)
+ tmp = (pri_ch - central_ch) >> 1;
+ else
+ tmp = ((central_ch - pri_ch) >> 1) + 1;
+
+ if (dbw == RTW89_CHANNEL_WIDTH_20) {
+ txsc_idx = tmp;
+ } else if (dbw == RTW89_CHANNEL_WIDTH_40) {
+ if (tmp == 1 || tmp == 3)
+ txsc_idx = 9;
+ else if (tmp == 5 || tmp == 7)
+ txsc_idx = 11;
+ else if (tmp == 2 || tmp == 4)
+ txsc_idx = 10;
+ else if (tmp == 6 || tmp == 8)
+ txsc_idx = 12;
+ else
+ return 0xff;
+ } else {
+ txsc_idx = pri_ch > central_ch ? 13 : 14;
+ }
+ break;
+ case RTW89_CHANNEL_WIDTH_80_80:
+ if (dbw == RTW89_CHANNEL_WIDTH_20) {
+ if (pri_ch > central_ch)
+ txsc_idx = (10 - (pri_ch - central_ch)) >> 1;
+ else
+ txsc_idx = ((central_ch - pri_ch) >> 1) + 5;
+ } else if (dbw == RTW89_CHANNEL_WIDTH_40) {
+ txsc_idx = pri_ch > central_ch ? 10 : 12;
+ } else {
+ txsc_idx = 14;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return txsc_idx;
+}
+
+u32 rtw89_phy_read_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
+ u32 addr, u32 mask)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ const u32 *base_addr = chip->rf_base_addr;
+ u32 val, direct_addr;
+
+ if (rf_path >= rtwdev->chip->rf_path_num) {
+ rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
+ return INV_RF_DATA;
+ }
+
+ addr &= 0xff;
+ direct_addr = base_addr[rf_path] + (addr << 2);
+ mask &= RFREG_MASK;
+
+ val = rtw89_phy_read32_mask(rtwdev, direct_addr, mask);
+
+ return val;
+}
+EXPORT_SYMBOL(rtw89_phy_read_rf);
+
+bool rtw89_phy_write_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
+ u32 addr, u32 mask, u32 data)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ const u32 *base_addr = chip->rf_base_addr;
+ u32 direct_addr;
+
+ if (rf_path >= rtwdev->chip->rf_path_num) {
+ rtw89_err(rtwdev, "unsupported rf path (%d)\n", rf_path);
+ return false;
+ }
+
+ addr &= 0xff;
+ direct_addr = base_addr[rf_path] + (addr << 2);
+ mask &= RFREG_MASK;
+
+ rtw89_phy_write32_mask(rtwdev, direct_addr, mask, data);
+
+ /* delay to ensure writing properly */
+ udelay(1);
+
+ return true;
+}
+EXPORT_SYMBOL(rtw89_phy_write_rf);
+
+static void rtw89_phy_bb_reset(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ chip->ops->bb_reset(rtwdev, phy_idx);
+}
+
+static void rtw89_phy_config_bb_reg(struct rtw89_dev *rtwdev,
+ const struct rtw89_reg2_def *reg,
+ enum rtw89_rf_path rf_path,
+ void *extra_data)
+{
+ if (reg->addr == 0xfe)
+ mdelay(50);
+ else if (reg->addr == 0xfd)
+ mdelay(5);
+ else if (reg->addr == 0xfc)
+ mdelay(1);
+ else if (reg->addr == 0xfb)
+ udelay(50);
+ else if (reg->addr == 0xfa)
+ udelay(5);
+ else if (reg->addr == 0xf9)
+ udelay(1);
+ else
+ rtw89_phy_write32(rtwdev, reg->addr, reg->data);
+}
+
+static void
+rtw89_phy_cofig_rf_reg_store(struct rtw89_dev *rtwdev,
+ const struct rtw89_reg2_def *reg,
+ enum rtw89_rf_path rf_path,
+ struct rtw89_fw_h2c_rf_reg_info *info)
+{
+ u16 idx = info->curr_idx % RTW89_H2C_RF_PAGE_SIZE;
+ u8 page = info->curr_idx / RTW89_H2C_RF_PAGE_SIZE;
+
+ info->rtw89_phy_config_rf_h2c[page][idx] =
+ cpu_to_le32((reg->addr << 20) | reg->data);
+ info->curr_idx++;
+}
+
+static int rtw89_phy_config_rf_reg_fw(struct rtw89_dev *rtwdev,
+ struct rtw89_fw_h2c_rf_reg_info *info)
+{
+ u16 page = info->curr_idx / RTW89_H2C_RF_PAGE_SIZE;
+ u16 len = (info->curr_idx % RTW89_H2C_RF_PAGE_SIZE) * 4;
+ u8 i;
+ int ret = 0;
+
+ if (page > RTW89_H2C_RF_PAGE_NUM) {
+ rtw89_warn(rtwdev,
+ "rf reg h2c total page num %d larger than %d (RTW89_H2C_RF_PAGE_NUM)\n",
+ page, RTW89_H2C_RF_PAGE_NUM);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < page; i++) {
+ ret = rtw89_fw_h2c_rf_reg(rtwdev, info,
+ RTW89_H2C_RF_PAGE_SIZE * 4, i);
+ if (ret)
+ return ret;
+ }
+ ret = rtw89_fw_h2c_rf_reg(rtwdev, info, len, i);
+ if (ret)
+ return ret;
+ info->curr_idx = 0;
+
+ return 0;
+}
+
+static void rtw89_phy_config_rf_reg(struct rtw89_dev *rtwdev,
+ const struct rtw89_reg2_def *reg,
+ enum rtw89_rf_path rf_path,
+ void *extra_data)
+{
+ if (reg->addr == 0xfe) {
+ mdelay(50);
+ } else if (reg->addr == 0xfd) {
+ mdelay(5);
+ } else if (reg->addr == 0xfc) {
+ mdelay(1);
+ } else if (reg->addr == 0xfb) {
+ udelay(50);
+ } else if (reg->addr == 0xfa) {
+ udelay(5);
+ } else if (reg->addr == 0xf9) {
+ udelay(1);
+ } else {
+ rtw89_write_rf(rtwdev, rf_path, reg->addr, 0xfffff, reg->data);
+ rtw89_phy_cofig_rf_reg_store(rtwdev, reg, rf_path,
+ (struct rtw89_fw_h2c_rf_reg_info *)extra_data);
+ }
+}
+
+static int rtw89_phy_sel_headline(struct rtw89_dev *rtwdev,
+ const struct rtw89_phy_table *table,
+ u32 *headline_size, u32 *headline_idx,
+ u8 rfe, u8 cv)
+{
+ const struct rtw89_reg2_def *reg;
+ u32 headline;
+ u32 compare, target;
+ u8 rfe_para, cv_para;
+ u8 cv_max = 0;
+ bool case_matched = false;
+ u32 i;
+
+ for (i = 0; i < table->n_regs; i++) {
+ reg = &table->regs[i];
+ headline = get_phy_headline(reg->addr);
+ if (headline != PHY_HEADLINE_VALID)
+ break;
+ }
+ *headline_size = i;
+ if (*headline_size == 0)
+ return 0;
+
+ /* case 1: RFE match, CV match */
+ compare = get_phy_compare(rfe, cv);
+ for (i = 0; i < *headline_size; i++) {
+ reg = &table->regs[i];
+ target = get_phy_target(reg->addr);
+ if (target == compare) {
+ *headline_idx = i;
+ return 0;
+ }
+ }
+
+ /* case 2: RFE match, CV don't care */
+ compare = get_phy_compare(rfe, PHY_COND_DONT_CARE);
+ for (i = 0; i < *headline_size; i++) {
+ reg = &table->regs[i];
+ target = get_phy_target(reg->addr);
+ if (target == compare) {
+ *headline_idx = i;
+ return 0;
+ }
+ }
+
+ /* case 3: RFE match, CV max in table */
+ for (i = 0; i < *headline_size; i++) {
+ reg = &table->regs[i];
+ rfe_para = get_phy_cond_rfe(reg->addr);
+ cv_para = get_phy_cond_cv(reg->addr);
+ if (rfe_para == rfe) {
+ if (cv_para >= cv_max) {
+ cv_max = cv_para;
+ *headline_idx = i;
+ case_matched = true;
+ }
+ }
+ }
+
+ if (case_matched)
+ return 0;
+
+ /* case 4: RFE don't care, CV max in table */
+ for (i = 0; i < *headline_size; i++) {
+ reg = &table->regs[i];
+ rfe_para = get_phy_cond_rfe(reg->addr);
+ cv_para = get_phy_cond_cv(reg->addr);
+ if (rfe_para == PHY_COND_DONT_CARE) {
+ if (cv_para >= cv_max) {
+ cv_max = cv_para;
+ *headline_idx = i;
+ case_matched = true;
+ }
+ }
+ }
+
+ if (case_matched)
+ return 0;
+
+ return -EINVAL;
+}
+
+static void rtw89_phy_init_reg(struct rtw89_dev *rtwdev,
+ const struct rtw89_phy_table *table,
+ void (*config)(struct rtw89_dev *rtwdev,
+ const struct rtw89_reg2_def *reg,
+ enum rtw89_rf_path rf_path,
+ void *data),
+ void *extra_data)
+{
+ const struct rtw89_reg2_def *reg;
+ enum rtw89_rf_path rf_path = table->rf_path;
+ u8 rfe = rtwdev->efuse.rfe_type;
+ u8 cv = rtwdev->hal.cv;
+ u32 i;
+ u32 headline_size = 0, headline_idx = 0;
+ u32 target = 0, cfg_target;
+ u8 cond;
+ bool is_matched = true;
+ bool target_found = false;
+ int ret;
+
+ ret = rtw89_phy_sel_headline(rtwdev, table, &headline_size,
+ &headline_idx, rfe, cv);
+ if (ret) {
+ rtw89_err(rtwdev, "invalid PHY package: %d/%d\n", rfe, cv);
+ return;
+ }
+
+ cfg_target = get_phy_target(table->regs[headline_idx].addr);
+ for (i = headline_size; i < table->n_regs; i++) {
+ reg = &table->regs[i];
+ cond = get_phy_cond(reg->addr);
+ switch (cond) {
+ case PHY_COND_BRANCH_IF:
+ case PHY_COND_BRANCH_ELIF:
+ target = get_phy_target(reg->addr);
+ break;
+ case PHY_COND_BRANCH_ELSE:
+ is_matched = false;
+ if (!target_found) {
+ rtw89_warn(rtwdev, "failed to load CR %x/%x\n",
+ reg->addr, reg->data);
+ return;
+ }
+ break;
+ case PHY_COND_BRANCH_END:
+ is_matched = true;
+ target_found = false;
+ break;
+ case PHY_COND_CHECK:
+ if (target_found) {
+ is_matched = false;
+ break;
+ }
+
+ if (target == cfg_target) {
+ is_matched = true;
+ target_found = true;
+ } else {
+ is_matched = false;
+ target_found = false;
+ }
+ break;
+ default:
+ if (is_matched)
+ config(rtwdev, reg, rf_path, extra_data);
+ break;
+ }
+ }
+}
+
+void rtw89_phy_init_bb_reg(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ const struct rtw89_phy_table *bb_table = chip->bb_table;
+
+ rtw89_phy_init_reg(rtwdev, bb_table, rtw89_phy_config_bb_reg, NULL);
+ rtw89_chip_init_txpwr_unit(rtwdev, RTW89_PHY_0);
+ rtw89_phy_bb_reset(rtwdev, RTW89_PHY_0);
+}
+
+static u32 rtw89_phy_nctl_poll(struct rtw89_dev *rtwdev)
+{
+ rtw89_phy_write32(rtwdev, 0x8080, 0x4);
+ udelay(1);
+ return rtw89_phy_read32(rtwdev, 0x8080);
+}
+
+void rtw89_phy_init_rf_reg(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ const struct rtw89_phy_table *rf_table;
+ struct rtw89_fw_h2c_rf_reg_info *rf_reg_info;
+ u8 path;
+
+ rf_reg_info = kzalloc(sizeof(*rf_reg_info), GFP_KERNEL);
+ if (!rf_reg_info)
+ return;
+
+ for (path = RF_PATH_A; path < chip->rf_path_num; path++) {
+ rf_reg_info->rf_path = path;
+ rf_table = chip->rf_table[path];
+ rtw89_phy_init_reg(rtwdev, rf_table, rtw89_phy_config_rf_reg,
+ (void *)rf_reg_info);
+ if (rtw89_phy_config_rf_reg_fw(rtwdev, rf_reg_info))
+ rtw89_warn(rtwdev, "rf path %d reg h2c config failed\n",
+ path);
+ }
+ kfree(rf_reg_info);
+}
+
+static void rtw89_phy_init_rf_nctl(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ const struct rtw89_phy_table *nctl_table;
+ u32 val;
+ int ret;
+
+ /* IQK/DPK clock & reset */
+ rtw89_phy_write32_set(rtwdev, 0x0c60, 0x3);
+ rtw89_phy_write32_set(rtwdev, 0x0c6c, 0x1);
+ rtw89_phy_write32_set(rtwdev, 0x58ac, 0x8000000);
+ rtw89_phy_write32_set(rtwdev, 0x78ac, 0x8000000);
+
+ /* check 0x8080 */
+ rtw89_phy_write32(rtwdev, 0x8000, 0x8);
+
+ ret = read_poll_timeout(rtw89_phy_nctl_poll, val, val == 0x4, 10,
+ 1000, false, rtwdev);
+ if (ret)
+ rtw89_err(rtwdev, "failed to poll nctl block\n");
+
+ nctl_table = chip->nctl_table;
+ rtw89_phy_init_reg(rtwdev, nctl_table, rtw89_phy_config_bb_reg, NULL);
+}
+
+static u32 rtw89_phy0_phy1_offset(struct rtw89_dev *rtwdev, u32 addr)
+{
+ u32 phy_page = addr >> 8;
+ u32 ofst = 0;
+
+ switch (phy_page) {
+ case 0x6:
+ case 0x7:
+ case 0x8:
+ case 0x9:
+ case 0xa:
+ case 0xb:
+ case 0xc:
+ case 0xd:
+ case 0x19:
+ case 0x1a:
+ case 0x1b:
+ ofst = 0x2000;
+ break;
+ default:
+ /* warning case */
+ ofst = 0;
+ break;
+ }
+
+ if (phy_page >= 0x40 && phy_page <= 0x4f)
+ ofst = 0x2000;
+
+ return ofst;
+}
+
+void rtw89_phy_write32_idx(struct rtw89_dev *rtwdev, u32 addr, u32 mask,
+ u32 data, enum rtw89_phy_idx phy_idx)
+{
+ if (rtwdev->dbcc_en && phy_idx == RTW89_PHY_1)
+ addr += rtw89_phy0_phy1_offset(rtwdev, addr);
+ rtw89_phy_write32_mask(rtwdev, addr, mask, data);
+}
+
+void rtw89_phy_set_phy_regs(struct rtw89_dev *rtwdev, u32 addr, u32 mask,
+ u32 val)
+{
+ rtw89_phy_write32_idx(rtwdev, addr, mask, val, RTW89_PHY_0);
+
+ if (!rtwdev->dbcc_en)
+ return;
+
+ rtw89_phy_write32_idx(rtwdev, addr, mask, val, RTW89_PHY_1);
+}
+
+void rtw89_phy_write_reg3_tbl(struct rtw89_dev *rtwdev,
+ const struct rtw89_phy_reg3_tbl *tbl)
+{
+ const struct rtw89_reg3_def *reg3;
+ int i;
+
+ for (i = 0; i < tbl->size; i++) {
+ reg3 = &tbl->reg3[i];
+ rtw89_phy_write32_mask(rtwdev, reg3->addr, reg3->mask, reg3->data);
+ }
+}
+
+const u8 rtw89_rs_idx_max[] = {
+ [RTW89_RS_CCK] = RTW89_RATE_CCK_MAX,
+ [RTW89_RS_OFDM] = RTW89_RATE_OFDM_MAX,
+ [RTW89_RS_MCS] = RTW89_RATE_MCS_MAX,
+ [RTW89_RS_HEDCM] = RTW89_RATE_HEDCM_MAX,
+ [RTW89_RS_OFFSET] = RTW89_RATE_OFFSET_MAX,
+};
+
+const u8 rtw89_rs_nss_max[] = {
+ [RTW89_RS_CCK] = 1,
+ [RTW89_RS_OFDM] = 1,
+ [RTW89_RS_MCS] = RTW89_NSS_MAX,
+ [RTW89_RS_HEDCM] = RTW89_NSS_HEDCM_MAX,
+ [RTW89_RS_OFFSET] = 1,
+};
+
+static const u8 _byr_of_rs[] = {
+ [RTW89_RS_CCK] = offsetof(struct rtw89_txpwr_byrate, cck),
+ [RTW89_RS_OFDM] = offsetof(struct rtw89_txpwr_byrate, ofdm),
+ [RTW89_RS_MCS] = offsetof(struct rtw89_txpwr_byrate, mcs),
+ [RTW89_RS_HEDCM] = offsetof(struct rtw89_txpwr_byrate, hedcm),
+ [RTW89_RS_OFFSET] = offsetof(struct rtw89_txpwr_byrate, offset),
+};
+
+#define _byr_seek(rs, raw) ((s8 *)(raw) + _byr_of_rs[rs])
+#define _byr_idx(rs, nss, idx) ((nss) * rtw89_rs_idx_max[rs] + (idx))
+#define _byr_chk(rs, nss, idx) \
+ ((nss) < rtw89_rs_nss_max[rs] && (idx) < rtw89_rs_idx_max[rs])
+
+void rtw89_phy_load_txpwr_byrate(struct rtw89_dev *rtwdev,
+ const struct rtw89_txpwr_table *tbl)
+{
+ const struct rtw89_txpwr_byrate_cfg *cfg = tbl->data;
+ const struct rtw89_txpwr_byrate_cfg *end = cfg + tbl->size;
+ s8 *byr;
+ u32 data;
+ u8 i, idx;
+
+ for (; cfg < end; cfg++) {
+ byr = _byr_seek(cfg->rs, &rtwdev->byr[cfg->band]);
+ data = cfg->data;
+
+ for (i = 0; i < cfg->len; i++, data >>= 8) {
+ idx = _byr_idx(cfg->rs, cfg->nss, (cfg->shf + i));
+ byr[idx] = (s8)(data & 0xff);
+ }
+ }
+}
+
+#define _phy_txpwr_rf_to_mac(rtwdev, txpwr_rf) \
+({ \
+ const struct rtw89_chip_info *__c = (rtwdev)->chip; \
+ (txpwr_rf) >> (__c->txpwr_factor_rf - __c->txpwr_factor_mac); \
+})
+
+s8 rtw89_phy_read_txpwr_byrate(struct rtw89_dev *rtwdev,
+ const struct rtw89_rate_desc *rate_desc)
+{
+ enum rtw89_band band = rtwdev->hal.current_band_type;
+ s8 *byr;
+ u8 idx;
+
+ if (rate_desc->rs == RTW89_RS_CCK)
+ band = RTW89_BAND_2G;
+
+ if (!_byr_chk(rate_desc->rs, rate_desc->nss, rate_desc->idx)) {
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
+ "[TXPWR] unknown byrate desc rs=%d nss=%d idx=%d\n",
+ rate_desc->rs, rate_desc->nss, rate_desc->idx);
+
+ return 0;
+ }
+
+ byr = _byr_seek(rate_desc->rs, &rtwdev->byr[band]);
+ idx = _byr_idx(rate_desc->rs, rate_desc->nss, rate_desc->idx);
+
+ return _phy_txpwr_rf_to_mac(rtwdev, byr[idx]);
+}
+
+static u8 rtw89_channel_to_idx(struct rtw89_dev *rtwdev, u8 channel)
+{
+ switch (channel) {
+ case 1 ... 14:
+ return channel - 1;
+ case 36 ... 64:
+ return (channel - 36) / 2;
+ case 100 ... 144:
+ return ((channel - 100) / 2) + 15;
+ case 149 ... 177:
+ return ((channel - 149) / 2) + 38;
+ default:
+ rtw89_warn(rtwdev, "unknown channel: %d\n", channel);
+ return 0;
+ }
+}
+
+s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev,
+ u8 bw, u8 ntx, u8 rs, u8 bf, u8 ch)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ u8 ch_idx = rtw89_channel_to_idx(rtwdev, ch);
+ u8 band = rtwdev->hal.current_band_type;
+ u8 regd = rtw89_regd_get(rtwdev, band);
+ s8 lmt = 0, sar;
+
+ switch (band) {
+ case RTW89_BAND_2G:
+ lmt = (*chip->txpwr_lmt_2g)[bw][ntx][rs][bf][regd][ch_idx];
+ break;
+ case RTW89_BAND_5G:
+ lmt = (*chip->txpwr_lmt_5g)[bw][ntx][rs][bf][regd][ch_idx];
+ break;
+ default:
+ rtw89_warn(rtwdev, "unknown band type: %d\n", band);
+ return 0;
+ }
+
+ lmt = _phy_txpwr_rf_to_mac(rtwdev, lmt);
+ sar = rtw89_query_sar(rtwdev);
+
+ return min(lmt, sar);
+}
+
+#define __fill_txpwr_limit_nonbf_bf(ptr, bw, ntx, rs, ch) \
+ do { \
+ u8 __i; \
+ for (__i = 0; __i < RTW89_BF_NUM; __i++) \
+ ptr[__i] = rtw89_phy_read_txpwr_limit(rtwdev, \
+ bw, ntx, \
+ rs, __i, \
+ (ch)); \
+ } while (0)
+
+static void rtw89_phy_fill_txpwr_limit_20m(struct rtw89_dev *rtwdev,
+ struct rtw89_txpwr_limit *lmt,
+ u8 ntx, u8 ch)
+{
+ __fill_txpwr_limit_nonbf_bf(lmt->cck_20m, RTW89_CHANNEL_WIDTH_20,
+ ntx, RTW89_RS_CCK, ch);
+ __fill_txpwr_limit_nonbf_bf(lmt->cck_40m, RTW89_CHANNEL_WIDTH_40,
+ ntx, RTW89_RS_CCK, ch);
+ __fill_txpwr_limit_nonbf_bf(lmt->ofdm, RTW89_CHANNEL_WIDTH_20,
+ ntx, RTW89_RS_OFDM, ch);
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], RTW89_CHANNEL_WIDTH_20,
+ ntx, RTW89_RS_MCS, ch);
+}
+
+static void rtw89_phy_fill_txpwr_limit_40m(struct rtw89_dev *rtwdev,
+ struct rtw89_txpwr_limit *lmt,
+ u8 ntx, u8 ch)
+{
+ __fill_txpwr_limit_nonbf_bf(lmt->cck_20m, RTW89_CHANNEL_WIDTH_20,
+ ntx, RTW89_RS_CCK, ch - 2);
+ __fill_txpwr_limit_nonbf_bf(lmt->cck_40m, RTW89_CHANNEL_WIDTH_40,
+ ntx, RTW89_RS_CCK, ch);
+ __fill_txpwr_limit_nonbf_bf(lmt->ofdm, RTW89_CHANNEL_WIDTH_20,
+ ntx, RTW89_RS_OFDM, ch - 2);
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], RTW89_CHANNEL_WIDTH_20,
+ ntx, RTW89_RS_MCS, ch - 2);
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[1], RTW89_CHANNEL_WIDTH_20,
+ ntx, RTW89_RS_MCS, ch + 2);
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[0], RTW89_CHANNEL_WIDTH_40,
+ ntx, RTW89_RS_MCS, ch);
+}
+
+static void rtw89_phy_fill_txpwr_limit_80m(struct rtw89_dev *rtwdev,
+ struct rtw89_txpwr_limit *lmt,
+ u8 ntx, u8 ch)
+{
+ s8 val_0p5_n[RTW89_BF_NUM];
+ s8 val_0p5_p[RTW89_BF_NUM];
+ u8 i;
+
+ __fill_txpwr_limit_nonbf_bf(lmt->ofdm, RTW89_CHANNEL_WIDTH_20,
+ ntx, RTW89_RS_OFDM, ch - 6);
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[0], RTW89_CHANNEL_WIDTH_20,
+ ntx, RTW89_RS_MCS, ch - 6);
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[1], RTW89_CHANNEL_WIDTH_20,
+ ntx, RTW89_RS_MCS, ch - 2);
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[2], RTW89_CHANNEL_WIDTH_20,
+ ntx, RTW89_RS_MCS, ch + 2);
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_20m[3], RTW89_CHANNEL_WIDTH_20,
+ ntx, RTW89_RS_MCS, ch + 6);
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[0], RTW89_CHANNEL_WIDTH_40,
+ ntx, RTW89_RS_MCS, ch - 4);
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_40m[1], RTW89_CHANNEL_WIDTH_40,
+ ntx, RTW89_RS_MCS, ch + 4);
+ __fill_txpwr_limit_nonbf_bf(lmt->mcs_80m[0], RTW89_CHANNEL_WIDTH_80,
+ ntx, RTW89_RS_MCS, ch);
+
+ __fill_txpwr_limit_nonbf_bf(val_0p5_n, RTW89_CHANNEL_WIDTH_40,
+ ntx, RTW89_RS_MCS, ch - 4);
+ __fill_txpwr_limit_nonbf_bf(val_0p5_p, RTW89_CHANNEL_WIDTH_40,
+ ntx, RTW89_RS_MCS, ch + 4);
+
+ for (i = 0; i < RTW89_BF_NUM; i++)
+ lmt->mcs_40m_0p5[i] = min_t(s8, val_0p5_n[i], val_0p5_p[i]);
+}
+
+void rtw89_phy_fill_txpwr_limit(struct rtw89_dev *rtwdev,
+ struct rtw89_txpwr_limit *lmt,
+ u8 ntx)
+{
+ u8 ch = rtwdev->hal.current_channel;
+ u8 bw = rtwdev->hal.current_band_width;
+
+ memset(lmt, 0, sizeof(*lmt));
+
+ switch (bw) {
+ case RTW89_CHANNEL_WIDTH_20:
+ rtw89_phy_fill_txpwr_limit_20m(rtwdev, lmt, ntx, ch);
+ break;
+ case RTW89_CHANNEL_WIDTH_40:
+ rtw89_phy_fill_txpwr_limit_40m(rtwdev, lmt, ntx, ch);
+ break;
+ case RTW89_CHANNEL_WIDTH_80:
+ rtw89_phy_fill_txpwr_limit_80m(rtwdev, lmt, ntx, ch);
+ break;
+ }
+}
+
+static s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev,
+ u8 ru, u8 ntx, u8 ch)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ u8 ch_idx = rtw89_channel_to_idx(rtwdev, ch);
+ u8 band = rtwdev->hal.current_band_type;
+ u8 regd = rtw89_regd_get(rtwdev, band);
+ s8 lmt_ru = 0, sar;
+
+ switch (band) {
+ case RTW89_BAND_2G:
+ lmt_ru = (*chip->txpwr_lmt_ru_2g)[ru][ntx][regd][ch_idx];
+ break;
+ case RTW89_BAND_5G:
+ lmt_ru = (*chip->txpwr_lmt_ru_5g)[ru][ntx][regd][ch_idx];
+ break;
+ default:
+ rtw89_warn(rtwdev, "unknown band type: %d\n", band);
+ return 0;
+ }
+
+ lmt_ru = _phy_txpwr_rf_to_mac(rtwdev, lmt_ru);
+ sar = rtw89_query_sar(rtwdev);
+
+ return min(lmt_ru, sar);
+}
+
+static void
+rtw89_phy_fill_txpwr_limit_ru_20m(struct rtw89_dev *rtwdev,
+ struct rtw89_txpwr_limit_ru *lmt_ru,
+ u8 ntx, u8 ch)
+{
+ lmt_ru->ru26[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU26,
+ ntx, ch);
+ lmt_ru->ru52[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU52,
+ ntx, ch);
+ lmt_ru->ru106[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU106,
+ ntx, ch);
+}
+
+static void
+rtw89_phy_fill_txpwr_limit_ru_40m(struct rtw89_dev *rtwdev,
+ struct rtw89_txpwr_limit_ru *lmt_ru,
+ u8 ntx, u8 ch)
+{
+ lmt_ru->ru26[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU26,
+ ntx, ch - 2);
+ lmt_ru->ru26[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU26,
+ ntx, ch + 2);
+ lmt_ru->ru52[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU52,
+ ntx, ch - 2);
+ lmt_ru->ru52[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU52,
+ ntx, ch + 2);
+ lmt_ru->ru106[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU106,
+ ntx, ch - 2);
+ lmt_ru->ru106[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU106,
+ ntx, ch + 2);
+}
+
+static void
+rtw89_phy_fill_txpwr_limit_ru_80m(struct rtw89_dev *rtwdev,
+ struct rtw89_txpwr_limit_ru *lmt_ru,
+ u8 ntx, u8 ch)
+{
+ lmt_ru->ru26[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU26,
+ ntx, ch - 6);
+ lmt_ru->ru26[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU26,
+ ntx, ch - 2);
+ lmt_ru->ru26[2] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU26,
+ ntx, ch + 2);
+ lmt_ru->ru26[3] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU26,
+ ntx, ch + 6);
+ lmt_ru->ru52[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU52,
+ ntx, ch - 6);
+ lmt_ru->ru52[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU52,
+ ntx, ch - 2);
+ lmt_ru->ru52[2] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU52,
+ ntx, ch + 2);
+ lmt_ru->ru52[3] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU52,
+ ntx, ch + 6);
+ lmt_ru->ru106[0] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU106,
+ ntx, ch - 6);
+ lmt_ru->ru106[1] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU106,
+ ntx, ch - 2);
+ lmt_ru->ru106[2] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU106,
+ ntx, ch + 2);
+ lmt_ru->ru106[3] = rtw89_phy_read_txpwr_limit_ru(rtwdev, RTW89_RU106,
+ ntx, ch + 6);
+}
+
+void rtw89_phy_fill_txpwr_limit_ru(struct rtw89_dev *rtwdev,
+ struct rtw89_txpwr_limit_ru *lmt_ru,
+ u8 ntx)
+{
+ u8 ch = rtwdev->hal.current_channel;
+ u8 bw = rtwdev->hal.current_band_width;
+
+ memset(lmt_ru, 0, sizeof(*lmt_ru));
+
+ switch (bw) {
+ case RTW89_CHANNEL_WIDTH_20:
+ rtw89_phy_fill_txpwr_limit_ru_20m(rtwdev, lmt_ru, ntx, ch);
+ break;
+ case RTW89_CHANNEL_WIDTH_40:
+ rtw89_phy_fill_txpwr_limit_ru_40m(rtwdev, lmt_ru, ntx, ch);
+ break;
+ case RTW89_CHANNEL_WIDTH_80:
+ rtw89_phy_fill_txpwr_limit_ru_80m(rtwdev, lmt_ru, ntx, ch);
+ break;
+ }
+}
+
+struct rtw89_phy_iter_ra_data {
+ struct rtw89_dev *rtwdev;
+ struct sk_buff *c2h;
+};
+
+static void rtw89_phy_c2h_ra_rpt_iter(void *data, struct ieee80211_sta *sta)
+{
+ struct rtw89_phy_iter_ra_data *ra_data = (struct rtw89_phy_iter_ra_data *)data;
+ struct rtw89_dev *rtwdev = ra_data->rtwdev;
+ struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
+ struct rtw89_ra_report *ra_report = &rtwsta->ra_report;
+ struct sk_buff *c2h = ra_data->c2h;
+ u8 mode, rate, bw, giltf, mac_id;
+
+ mac_id = RTW89_GET_PHY_C2H_RA_RPT_MACID(c2h->data);
+ if (mac_id != rtwsta->mac_id)
+ return;
+
+ memset(ra_report, 0, sizeof(*ra_report));
+
+ rate = RTW89_GET_PHY_C2H_RA_RPT_MCSNSS(c2h->data);
+ bw = RTW89_GET_PHY_C2H_RA_RPT_BW(c2h->data);
+ giltf = RTW89_GET_PHY_C2H_RA_RPT_GILTF(c2h->data);
+ mode = RTW89_GET_PHY_C2H_RA_RPT_MD_SEL(c2h->data);
+
+ switch (mode) {
+ case RTW89_RA_RPT_MODE_LEGACY:
+ ra_report->txrate.legacy = rtw89_ra_report_to_bitrate(rtwdev, rate);
+ break;
+ case RTW89_RA_RPT_MODE_HT:
+ ra_report->txrate.flags |= RATE_INFO_FLAGS_MCS;
+ if (rtwdev->fw.old_ht_ra_format)
+ rate = RTW89_MK_HT_RATE(FIELD_GET(RTW89_RA_RATE_MASK_NSS, rate),
+ FIELD_GET(RTW89_RA_RATE_MASK_MCS, rate));
+ else
+ rate = FIELD_GET(RTW89_RA_RATE_MASK_HT_MCS, rate);
+ ra_report->txrate.mcs = rate;
+ if (giltf)
+ ra_report->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+ break;
+ case RTW89_RA_RPT_MODE_VHT:
+ ra_report->txrate.flags |= RATE_INFO_FLAGS_VHT_MCS;
+ ra_report->txrate.mcs = FIELD_GET(RTW89_RA_RATE_MASK_MCS, rate);
+ ra_report->txrate.nss = FIELD_GET(RTW89_RA_RATE_MASK_NSS, rate) + 1;
+ if (giltf)
+ ra_report->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+ break;
+ case RTW89_RA_RPT_MODE_HE:
+ ra_report->txrate.flags |= RATE_INFO_FLAGS_HE_MCS;
+ ra_report->txrate.mcs = FIELD_GET(RTW89_RA_RATE_MASK_MCS, rate);
+ ra_report->txrate.nss = FIELD_GET(RTW89_RA_RATE_MASK_NSS, rate) + 1;
+ if (giltf == RTW89_GILTF_2XHE08 || giltf == RTW89_GILTF_1XHE08)
+ ra_report->txrate.he_gi = NL80211_RATE_INFO_HE_GI_0_8;
+ else if (giltf == RTW89_GILTF_2XHE16 || giltf == RTW89_GILTF_1XHE16)
+ ra_report->txrate.he_gi = NL80211_RATE_INFO_HE_GI_1_6;
+ else
+ ra_report->txrate.he_gi = NL80211_RATE_INFO_HE_GI_3_2;
+ break;
+ }
+
+ if (bw == RTW89_CHANNEL_WIDTH_80)
+ ra_report->txrate.bw = RATE_INFO_BW_80;
+ else if (bw == RTW89_CHANNEL_WIDTH_40)
+ ra_report->txrate.bw = RATE_INFO_BW_40;
+ else
+ ra_report->txrate.bw = RATE_INFO_BW_20;
+
+ ra_report->bit_rate = cfg80211_calculate_bitrate(&ra_report->txrate);
+ ra_report->hw_rate = FIELD_PREP(RTW89_HW_RATE_MASK_MOD, mode) |
+ FIELD_PREP(RTW89_HW_RATE_MASK_VAL, rate);
+ sta->max_rc_amsdu_len = get_max_amsdu_len(rtwdev, ra_report);
+ rtwsta->max_agg_wait = sta->max_rc_amsdu_len / 1500 - 1;
+}
+
+static void
+rtw89_phy_c2h_ra_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 len)
+{
+ struct rtw89_phy_iter_ra_data ra_data;
+
+ ra_data.rtwdev = rtwdev;
+ ra_data.c2h = c2h;
+ ieee80211_iterate_stations_atomic(rtwdev->hw,
+ rtw89_phy_c2h_ra_rpt_iter,
+ &ra_data);
+}
+
+static
+void (* const rtw89_phy_c2h_ra_handler[])(struct rtw89_dev *rtwdev,
+ struct sk_buff *c2h, u32 len) = {
+ [RTW89_PHY_C2H_FUNC_STS_RPT] = rtw89_phy_c2h_ra_rpt,
+ [RTW89_PHY_C2H_FUNC_MU_GPTBL_RPT] = NULL,
+ [RTW89_PHY_C2H_FUNC_TXSTS] = NULL,
+};
+
+void rtw89_phy_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
+ u32 len, u8 class, u8 func)
+{
+ void (*handler)(struct rtw89_dev *rtwdev,
+ struct sk_buff *c2h, u32 len) = NULL;
+
+ switch (class) {
+ case RTW89_PHY_C2H_CLASS_RA:
+ if (func < RTW89_PHY_C2H_FUNC_RA_MAX)
+ handler = rtw89_phy_c2h_ra_handler[func];
+ break;
+ default:
+ rtw89_info(rtwdev, "c2h class %d not support\n", class);
+ return;
+ }
+ if (!handler) {
+ rtw89_info(rtwdev, "c2h class %d func %d not support\n", class,
+ func);
+ return;
+ }
+ handler(rtwdev, skb, len);
+}
+
+static u8 rtw89_phy_cfo_get_xcap_reg(struct rtw89_dev *rtwdev, bool sc_xo)
+{
+ u32 reg_mask;
+
+ if (sc_xo)
+ reg_mask = B_AX_XTAL_SC_XO_MASK;
+ else
+ reg_mask = B_AX_XTAL_SC_XI_MASK;
+
+ return (u8)rtw89_read32_mask(rtwdev, R_AX_XTAL_ON_CTRL0, reg_mask);
+}
+
+static void rtw89_phy_cfo_set_xcap_reg(struct rtw89_dev *rtwdev, bool sc_xo,
+ u8 val)
+{
+ u32 reg_mask;
+
+ if (sc_xo)
+ reg_mask = B_AX_XTAL_SC_XO_MASK;
+ else
+ reg_mask = B_AX_XTAL_SC_XI_MASK;
+
+ rtw89_write32_mask(rtwdev, R_AX_XTAL_ON_CTRL0, reg_mask, val);
+}
+
+static void rtw89_phy_cfo_set_crystal_cap(struct rtw89_dev *rtwdev,
+ u8 crystal_cap, bool force)
+{
+ struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
+ u8 sc_xi_val, sc_xo_val;
+
+ if (!force && cfo->crystal_cap == crystal_cap)
+ return;
+ crystal_cap = clamp_t(u8, crystal_cap, 0, 127);
+ rtw89_phy_cfo_set_xcap_reg(rtwdev, true, crystal_cap);
+ rtw89_phy_cfo_set_xcap_reg(rtwdev, false, crystal_cap);
+ sc_xo_val = rtw89_phy_cfo_get_xcap_reg(rtwdev, true);
+ sc_xi_val = rtw89_phy_cfo_get_xcap_reg(rtwdev, false);
+ cfo->crystal_cap = sc_xi_val;
+ cfo->x_cap_ofst = (s8)((int)cfo->crystal_cap - cfo->def_x_cap);
+
+ rtw89_debug(rtwdev, RTW89_DBG_CFO, "Set sc_xi=0x%x\n", sc_xi_val);
+ rtw89_debug(rtwdev, RTW89_DBG_CFO, "Set sc_xo=0x%x\n", sc_xo_val);
+ rtw89_debug(rtwdev, RTW89_DBG_CFO, "Get xcap_ofst=%d\n",
+ cfo->x_cap_ofst);
+ rtw89_debug(rtwdev, RTW89_DBG_CFO, "Set xcap OK\n");
+}
+
+static void rtw89_phy_cfo_reset(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
+ u8 cap;
+
+ cfo->def_x_cap = cfo->crystal_cap_default & B_AX_XTAL_SC_MASK;
+ cfo->is_adjust = false;
+ if (cfo->crystal_cap == cfo->def_x_cap)
+ return;
+ cap = cfo->crystal_cap;
+ cap += (cap > cfo->def_x_cap ? -1 : 1);
+ rtw89_phy_cfo_set_crystal_cap(rtwdev, cap, false);
+ rtw89_debug(rtwdev, RTW89_DBG_CFO,
+ "(0x%x) approach to dflt_val=(0x%x)\n", cfo->crystal_cap,
+ cfo->def_x_cap);
+}
+
+static void rtw89_dcfo_comp(struct rtw89_dev *rtwdev, s32 curr_cfo)
+{
+ bool is_linked = rtwdev->total_sta_assoc > 0;
+ s32 cfo_avg_312;
+ s32 dcfo_comp;
+ int sign;
+
+ if (!is_linked) {
+ rtw89_debug(rtwdev, RTW89_DBG_CFO, "DCFO: is_linked=%d\n",
+ is_linked);
+ return;
+ }
+ rtw89_debug(rtwdev, RTW89_DBG_CFO, "DCFO: curr_cfo=%d\n", curr_cfo);
+ if (curr_cfo == 0)
+ return;
+ dcfo_comp = rtw89_phy_read32_mask(rtwdev, R_DCFO, B_DCFO);
+ sign = curr_cfo > 0 ? 1 : -1;
+ cfo_avg_312 = (curr_cfo << 3) / 5 + sign * dcfo_comp;
+ rtw89_debug(rtwdev, RTW89_DBG_CFO, "DCFO: avg_cfo=%d\n", cfo_avg_312);
+ if (rtwdev->chip->chip_id == RTL8852A && rtwdev->hal.cv == CHIP_CBV)
+ cfo_avg_312 = -cfo_avg_312;
+ rtw89_phy_set_phy_regs(rtwdev, R_DCFO_COMP_S0, B_DCFO_COMP_S0_MSK,
+ cfo_avg_312);
+}
+
+static void rtw89_dcfo_comp_init(struct rtw89_dev *rtwdev)
+{
+ rtw89_phy_set_phy_regs(rtwdev, R_DCFO_OPT, B_DCFO_OPT_EN, 1);
+ rtw89_phy_set_phy_regs(rtwdev, R_DCFO_WEIGHT, B_DCFO_WEIGHT_MSK, 8);
+ rtw89_write32_clr(rtwdev, R_AX_PWR_UL_CTRL2, B_AX_PWR_UL_CFO_MASK);
+}
+
+static void rtw89_phy_cfo_init(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
+ struct rtw89_efuse *efuse = &rtwdev->efuse;
+
+ cfo->crystal_cap_default = efuse->xtal_cap & B_AX_XTAL_SC_MASK;
+ cfo->crystal_cap = cfo->crystal_cap_default;
+ cfo->def_x_cap = cfo->crystal_cap;
+ cfo->is_adjust = false;
+ cfo->x_cap_ofst = 0;
+ cfo->rtw89_multi_cfo_mode = RTW89_TP_BASED_AVG_MODE;
+ cfo->apply_compensation = false;
+ cfo->residual_cfo_acc = 0;
+ rtw89_debug(rtwdev, RTW89_DBG_CFO, "Default xcap=%0x\n",
+ cfo->crystal_cap_default);
+ rtw89_phy_cfo_set_crystal_cap(rtwdev, cfo->crystal_cap_default, true);
+ rtw89_phy_set_phy_regs(rtwdev, R_DCFO, B_DCFO, 1);
+ rtw89_dcfo_comp_init(rtwdev);
+ cfo->cfo_timer_ms = 2000;
+ cfo->cfo_trig_by_timer_en = false;
+ cfo->phy_cfo_trk_cnt = 0;
+ cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_NORMAL;
+}
+
+static void rtw89_phy_cfo_crystal_cap_adjust(struct rtw89_dev *rtwdev,
+ s32 curr_cfo)
+{
+ struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
+ s8 crystal_cap = cfo->crystal_cap;
+ s32 cfo_abs = abs(curr_cfo);
+ int sign;
+
+ if (!cfo->is_adjust) {
+ if (cfo_abs > CFO_TRK_ENABLE_TH)
+ cfo->is_adjust = true;
+ } else {
+ if (cfo_abs < CFO_TRK_STOP_TH)
+ cfo->is_adjust = false;
+ }
+ if (!cfo->is_adjust) {
+ rtw89_debug(rtwdev, RTW89_DBG_CFO, "Stop CFO tracking\n");
+ return;
+ }
+ sign = curr_cfo > 0 ? 1 : -1;
+ if (cfo_abs > CFO_TRK_STOP_TH_4)
+ crystal_cap += 7 * sign;
+ else if (cfo_abs > CFO_TRK_STOP_TH_3)
+ crystal_cap += 5 * sign;
+ else if (cfo_abs > CFO_TRK_STOP_TH_2)
+ crystal_cap += 3 * sign;
+ else if (cfo_abs > CFO_TRK_STOP_TH_1)
+ crystal_cap += 1 * sign;
+ else
+ return;
+ rtw89_phy_cfo_set_crystal_cap(rtwdev, (u8)crystal_cap, false);
+ rtw89_debug(rtwdev, RTW89_DBG_CFO,
+ "X_cap{Curr,Default}={0x%x,0x%x}\n",
+ cfo->crystal_cap, cfo->def_x_cap);
+}
+
+static s32 rtw89_phy_average_cfo_calc(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
+ s32 cfo_khz_all = 0;
+ s32 cfo_cnt_all = 0;
+ s32 cfo_all_avg = 0;
+ u8 i;
+
+ if (rtwdev->total_sta_assoc != 1)
+ return 0;
+ rtw89_debug(rtwdev, RTW89_DBG_CFO, "one_entry_only\n");
+ for (i = 0; i < CFO_TRACK_MAX_USER; i++) {
+ if (cfo->cfo_cnt[i] == 0)
+ continue;
+ cfo_khz_all += cfo->cfo_tail[i];
+ cfo_cnt_all += cfo->cfo_cnt[i];
+ cfo_all_avg = phy_div(cfo_khz_all, cfo_cnt_all);
+ cfo->pre_cfo_avg[i] = cfo->cfo_avg[i];
+ }
+ rtw89_debug(rtwdev, RTW89_DBG_CFO,
+ "CFO track for macid = %d\n", i);
+ rtw89_debug(rtwdev, RTW89_DBG_CFO,
+ "Total cfo=%dK, pkt_cnt=%d, avg_cfo=%dK\n",
+ cfo_khz_all, cfo_cnt_all, cfo_all_avg);
+ return cfo_all_avg;
+}
+
+static s32 rtw89_phy_multi_sta_cfo_calc(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
+ struct rtw89_traffic_stats *stats = &rtwdev->stats;
+ s32 target_cfo = 0;
+ s32 cfo_khz_all = 0;
+ s32 cfo_khz_all_tp_wgt = 0;
+ s32 cfo_avg = 0;
+ s32 max_cfo_lb = BIT(31);
+ s32 min_cfo_ub = GENMASK(30, 0);
+ u16 cfo_cnt_all = 0;
+ u8 active_entry_cnt = 0;
+ u8 sta_cnt = 0;
+ u32 tp_all = 0;
+ u8 i;
+ u8 cfo_tol = 0;
+
+ rtw89_debug(rtwdev, RTW89_DBG_CFO, "Multi entry cfo_trk\n");
+ if (cfo->rtw89_multi_cfo_mode == RTW89_PKT_BASED_AVG_MODE) {
+ rtw89_debug(rtwdev, RTW89_DBG_CFO, "Pkt based avg mode\n");
+ for (i = 0; i < CFO_TRACK_MAX_USER; i++) {
+ if (cfo->cfo_cnt[i] == 0)
+ continue;
+ cfo_khz_all += cfo->cfo_tail[i];
+ cfo_cnt_all += cfo->cfo_cnt[i];
+ cfo_avg = phy_div(cfo_khz_all, (s32)cfo_cnt_all);
+ rtw89_debug(rtwdev, RTW89_DBG_CFO,
+ "Msta cfo=%d, pkt_cnt=%d, avg_cfo=%d\n",
+ cfo_khz_all, cfo_cnt_all, cfo_avg);
+ target_cfo = cfo_avg;
+ }
+ } else if (cfo->rtw89_multi_cfo_mode == RTW89_ENTRY_BASED_AVG_MODE) {
+ rtw89_debug(rtwdev, RTW89_DBG_CFO, "Entry based avg mode\n");
+ for (i = 0; i < CFO_TRACK_MAX_USER; i++) {
+ if (cfo->cfo_cnt[i] == 0)
+ continue;
+ cfo->cfo_avg[i] = phy_div(cfo->cfo_tail[i],
+ (s32)cfo->cfo_cnt[i]);
+ cfo_khz_all += cfo->cfo_avg[i];
+ rtw89_debug(rtwdev, RTW89_DBG_CFO,
+ "Macid=%d, cfo_avg=%d\n", i,
+ cfo->cfo_avg[i]);
+ }
+ sta_cnt = rtwdev->total_sta_assoc;
+ cfo_avg = phy_div(cfo_khz_all, (s32)sta_cnt);
+ rtw89_debug(rtwdev, RTW89_DBG_CFO,
+ "Msta cfo_acc=%d, ent_cnt=%d, avg_cfo=%d\n",
+ cfo_khz_all, sta_cnt, cfo_avg);
+ target_cfo = cfo_avg;
+ } else if (cfo->rtw89_multi_cfo_mode == RTW89_TP_BASED_AVG_MODE) {
+ rtw89_debug(rtwdev, RTW89_DBG_CFO, "TP based avg mode\n");
+ cfo_tol = cfo->sta_cfo_tolerance;
+ for (i = 0; i < CFO_TRACK_MAX_USER; i++) {
+ sta_cnt++;
+ if (cfo->cfo_cnt[i] != 0) {
+ cfo->cfo_avg[i] = phy_div(cfo->cfo_tail[i],
+ (s32)cfo->cfo_cnt[i]);
+ active_entry_cnt++;
+ } else {
+ cfo->cfo_avg[i] = cfo->pre_cfo_avg[i];
+ }
+ max_cfo_lb = max(cfo->cfo_avg[i] - cfo_tol, max_cfo_lb);
+ min_cfo_ub = min(cfo->cfo_avg[i] + cfo_tol, min_cfo_ub);
+ cfo_khz_all += cfo->cfo_avg[i];
+ /* need tp for each entry */
+ rtw89_debug(rtwdev, RTW89_DBG_CFO,
+ "[%d] cfo_avg=%d, tp=tbd\n",
+ i, cfo->cfo_avg[i]);
+ if (sta_cnt >= rtwdev->total_sta_assoc)
+ break;
+ }
+ tp_all = stats->rx_throughput; /* need tp for each entry */
+ cfo_avg = phy_div(cfo_khz_all_tp_wgt, (s32)tp_all);
+
+ rtw89_debug(rtwdev, RTW89_DBG_CFO, "Assoc sta cnt=%d\n",
+ sta_cnt);
+ rtw89_debug(rtwdev, RTW89_DBG_CFO, "Active sta cnt=%d\n",
+ active_entry_cnt);
+ rtw89_debug(rtwdev, RTW89_DBG_CFO,
+ "Msta cfo with tp_wgt=%d, avg_cfo=%d\n",
+ cfo_khz_all_tp_wgt, cfo_avg);
+ rtw89_debug(rtwdev, RTW89_DBG_CFO, "cfo_lb=%d,cfo_ub=%d\n",
+ max_cfo_lb, min_cfo_ub);
+ if (max_cfo_lb <= min_cfo_ub) {
+ rtw89_debug(rtwdev, RTW89_DBG_CFO,
+ "cfo win_size=%d\n",
+ min_cfo_ub - max_cfo_lb);
+ target_cfo = clamp(cfo_avg, max_cfo_lb, min_cfo_ub);
+ } else {
+ rtw89_debug(rtwdev, RTW89_DBG_CFO,
+ "No intersection of cfo tolerance windows\n");
+ target_cfo = phy_div(cfo_khz_all, (s32)sta_cnt);
+ }
+ for (i = 0; i < CFO_TRACK_MAX_USER; i++)
+ cfo->pre_cfo_avg[i] = cfo->cfo_avg[i];
+ }
+ rtw89_debug(rtwdev, RTW89_DBG_CFO, "Target cfo=%d\n", target_cfo);
+ return target_cfo;
+}
+
+static void rtw89_phy_cfo_statistics_reset(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
+
+ memset(&cfo->cfo_tail, 0, sizeof(cfo->cfo_tail));
+ memset(&cfo->cfo_cnt, 0, sizeof(cfo->cfo_cnt));
+ cfo->packet_count = 0;
+ cfo->packet_count_pre = 0;
+ cfo->cfo_avg_pre = 0;
+}
+
+static void rtw89_phy_cfo_dm(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
+ s32 new_cfo = 0;
+ bool x_cap_update = false;
+ u8 pre_x_cap = cfo->crystal_cap;
+
+ rtw89_debug(rtwdev, RTW89_DBG_CFO, "CFO:total_sta_assoc=%d\n",
+ rtwdev->total_sta_assoc);
+ if (rtwdev->total_sta_assoc == 0) {
+ rtw89_phy_cfo_reset(rtwdev);
+ return;
+ }
+ if (cfo->packet_count == 0) {
+ rtw89_debug(rtwdev, RTW89_DBG_CFO, "Pkt cnt = 0\n");
+ return;
+ }
+ if (cfo->packet_count == cfo->packet_count_pre) {
+ rtw89_debug(rtwdev, RTW89_DBG_CFO, "Pkt cnt doesn't change\n");
+ return;
+ }
+ if (rtwdev->total_sta_assoc == 1)
+ new_cfo = rtw89_phy_average_cfo_calc(rtwdev);
+ else
+ new_cfo = rtw89_phy_multi_sta_cfo_calc(rtwdev);
+ if (new_cfo == 0) {
+ rtw89_debug(rtwdev, RTW89_DBG_CFO, "curr_cfo=0\n");
+ return;
+ }
+ rtw89_phy_cfo_crystal_cap_adjust(rtwdev, new_cfo);
+ cfo->cfo_avg_pre = new_cfo;
+ x_cap_update = cfo->crystal_cap == pre_x_cap ? false : true;
+ rtw89_debug(rtwdev, RTW89_DBG_CFO, "Xcap_up=%d\n", x_cap_update);
+ rtw89_debug(rtwdev, RTW89_DBG_CFO, "Xcap: D:%x C:%x->%x, ofst=%d\n",
+ cfo->def_x_cap, pre_x_cap, cfo->crystal_cap,
+ cfo->x_cap_ofst);
+ if (x_cap_update) {
+ if (new_cfo > 0)
+ new_cfo -= CFO_SW_COMP_FINE_TUNE;
+ else
+ new_cfo += CFO_SW_COMP_FINE_TUNE;
+ }
+ rtw89_dcfo_comp(rtwdev, new_cfo);
+ rtw89_phy_cfo_statistics_reset(rtwdev);
+}
+
+void rtw89_phy_cfo_track_work(struct work_struct *work)
+{
+ struct rtw89_dev *rtwdev = container_of(work, struct rtw89_dev,
+ cfo_track_work.work);
+ struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
+
+ mutex_lock(&rtwdev->mutex);
+ if (!cfo->cfo_trig_by_timer_en)
+ goto out;
+ rtw89_leave_ps_mode(rtwdev);
+ rtw89_phy_cfo_dm(rtwdev);
+ ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->cfo_track_work,
+ msecs_to_jiffies(cfo->cfo_timer_ms));
+out:
+ mutex_unlock(&rtwdev->mutex);
+}
+
+static void rtw89_phy_cfo_start_work(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
+
+ ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->cfo_track_work,
+ msecs_to_jiffies(cfo->cfo_timer_ms));
+}
+
+void rtw89_phy_cfo_track(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
+ struct rtw89_traffic_stats *stats = &rtwdev->stats;
+
+ switch (cfo->phy_cfo_status) {
+ case RTW89_PHY_DCFO_STATE_NORMAL:
+ if (stats->tx_throughput >= CFO_TP_UPPER) {
+ cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_ENHANCE;
+ cfo->cfo_trig_by_timer_en = true;
+ cfo->cfo_timer_ms = CFO_COMP_PERIOD;
+ rtw89_phy_cfo_start_work(rtwdev);
+ }
+ break;
+ case RTW89_PHY_DCFO_STATE_ENHANCE:
+ if (cfo->phy_cfo_trk_cnt >= CFO_PERIOD_CNT) {
+ cfo->phy_cfo_trk_cnt = 0;
+ cfo->cfo_trig_by_timer_en = false;
+ }
+ if (cfo->cfo_trig_by_timer_en == 1)
+ cfo->phy_cfo_trk_cnt++;
+ if (stats->tx_throughput <= CFO_TP_LOWER) {
+ cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_NORMAL;
+ cfo->phy_cfo_trk_cnt = 0;
+ cfo->cfo_trig_by_timer_en = false;
+ }
+ break;
+ default:
+ cfo->phy_cfo_status = RTW89_PHY_DCFO_STATE_NORMAL;
+ cfo->phy_cfo_trk_cnt = 0;
+ break;
+ }
+ rtw89_debug(rtwdev, RTW89_DBG_CFO,
+ "[CFO]WatchDog tp=%d,state=%d,timer_en=%d,trk_cnt=%d,thermal=%ld\n",
+ stats->tx_throughput, cfo->phy_cfo_status,
+ cfo->cfo_trig_by_timer_en, cfo->phy_cfo_trk_cnt,
+ ewma_thermal_read(&rtwdev->phystat.avg_thermal[0]));
+ if (cfo->cfo_trig_by_timer_en)
+ return;
+ rtw89_phy_cfo_dm(rtwdev);
+}
+
+void rtw89_phy_cfo_parse(struct rtw89_dev *rtwdev, s16 cfo_val,
+ struct rtw89_rx_phy_ppdu *phy_ppdu)
+{
+ struct rtw89_cfo_tracking_info *cfo = &rtwdev->cfo_tracking;
+ u8 macid = phy_ppdu->mac_id;
+
+ cfo->cfo_tail[macid] += cfo_val;
+ cfo->cfo_cnt[macid]++;
+ cfo->packet_count++;
+}
+
+static void rtw89_phy_stat_thermal_update(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_phy_stat *phystat = &rtwdev->phystat;
+ int i;
+ u8 th;
+
+ for (i = 0; i < rtwdev->chip->rf_path_num; i++) {
+ th = rtw89_chip_get_thermal(rtwdev, i);
+ if (th)
+ ewma_thermal_add(&phystat->avg_thermal[i], th);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
+ "path(%d) thermal cur=%u avg=%ld", i, th,
+ ewma_thermal_read(&phystat->avg_thermal[i]));
+ }
+}
+
+struct rtw89_phy_iter_rssi_data {
+ struct rtw89_dev *rtwdev;
+ struct rtw89_phy_ch_info *ch_info;
+ bool rssi_changed;
+};
+
+static void rtw89_phy_stat_rssi_update_iter(void *data,
+ struct ieee80211_sta *sta)
+{
+ struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
+ struct rtw89_phy_iter_rssi_data *rssi_data =
+ (struct rtw89_phy_iter_rssi_data *)data;
+ struct rtw89_phy_ch_info *ch_info = rssi_data->ch_info;
+ unsigned long rssi_curr;
+
+ rssi_curr = ewma_rssi_read(&rtwsta->avg_rssi);
+
+ if (rssi_curr < ch_info->rssi_min) {
+ ch_info->rssi_min = rssi_curr;
+ ch_info->rssi_min_macid = rtwsta->mac_id;
+ }
+
+ if (rtwsta->prev_rssi == 0) {
+ rtwsta->prev_rssi = rssi_curr;
+ } else if (abs((int)rtwsta->prev_rssi - (int)rssi_curr) > (3 << RSSI_FACTOR)) {
+ rtwsta->prev_rssi = rssi_curr;
+ rssi_data->rssi_changed = true;
+ }
+}
+
+static void rtw89_phy_stat_rssi_update(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_phy_iter_rssi_data rssi_data = {0};
+
+ rssi_data.rtwdev = rtwdev;
+ rssi_data.ch_info = &rtwdev->ch_info;
+ rssi_data.ch_info->rssi_min = U8_MAX;
+ ieee80211_iterate_stations_atomic(rtwdev->hw,
+ rtw89_phy_stat_rssi_update_iter,
+ &rssi_data);
+ if (rssi_data.rssi_changed)
+ rtw89_btc_ntfy_wl_sta(rtwdev);
+}
+
+static void rtw89_phy_stat_init(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_phy_stat *phystat = &rtwdev->phystat;
+ int i;
+
+ for (i = 0; i < rtwdev->chip->rf_path_num; i++)
+ ewma_thermal_init(&phystat->avg_thermal[i]);
+
+ rtw89_phy_stat_thermal_update(rtwdev);
+
+ memset(&phystat->cur_pkt_stat, 0, sizeof(phystat->cur_pkt_stat));
+ memset(&phystat->last_pkt_stat, 0, sizeof(phystat->last_pkt_stat));
+}
+
+void rtw89_phy_stat_track(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_phy_stat *phystat = &rtwdev->phystat;
+
+ rtw89_phy_stat_thermal_update(rtwdev);
+ rtw89_phy_stat_rssi_update(rtwdev);
+
+ phystat->last_pkt_stat = phystat->cur_pkt_stat;
+ memset(&phystat->cur_pkt_stat, 0, sizeof(phystat->cur_pkt_stat));
+}
+
+static u16 rtw89_phy_ccx_us_to_idx(struct rtw89_dev *rtwdev, u32 time_us)
+{
+ struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
+
+ return time_us >> (ilog2(CCX_US_BASE_RATIO) + env->ccx_unit_idx);
+}
+
+static u32 rtw89_phy_ccx_idx_to_us(struct rtw89_dev *rtwdev, u16 idx)
+{
+ struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
+
+ return idx << (ilog2(CCX_US_BASE_RATIO) + env->ccx_unit_idx);
+}
+
+static void rtw89_phy_ccx_top_setting_init(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
+
+ env->ccx_manual_ctrl = false;
+ env->ccx_ongoing = false;
+ env->ccx_rac_lv = RTW89_RAC_RELEASE;
+ env->ccx_rpt_stamp = 0;
+ env->ccx_period = 0;
+ env->ccx_unit_idx = RTW89_CCX_32_US;
+ env->ccx_trigger_time = 0;
+ env->ccx_edcca_opt_bw_idx = RTW89_CCX_EDCCA_BW20_0;
+
+ rtw89_phy_set_phy_regs(rtwdev, R_CCX, B_CCX_EN_MSK, 1);
+ rtw89_phy_set_phy_regs(rtwdev, R_CCX, B_CCX_TRIG_OPT_MSK, 1);
+ rtw89_phy_set_phy_regs(rtwdev, R_CCX, B_MEASUREMENT_TRIG_MSK, 1);
+ rtw89_phy_set_phy_regs(rtwdev, R_CCX, B_CCX_EDCCA_OPT_MSK,
+ RTW89_CCX_EDCCA_BW20_0);
+}
+
+static u16 rtw89_phy_ccx_get_report(struct rtw89_dev *rtwdev, u16 report,
+ u16 score)
+{
+ struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
+ u32 numer = 0;
+ u16 ret = 0;
+
+ numer = report * score + (env->ccx_period >> 1);
+ if (env->ccx_period)
+ ret = numer / env->ccx_period;
+
+ return ret >= score ? score - 1 : ret;
+}
+
+static void rtw89_phy_ccx_ms_to_period_unit(struct rtw89_dev *rtwdev,
+ u16 time_ms, u32 *period,
+ u32 *unit_idx)
+{
+ u32 idx;
+ u8 quotient;
+
+ if (time_ms >= CCX_MAX_PERIOD)
+ time_ms = CCX_MAX_PERIOD;
+
+ quotient = CCX_MAX_PERIOD_UNIT * time_ms / CCX_MAX_PERIOD;
+
+ if (quotient < 4)
+ idx = RTW89_CCX_4_US;
+ else if (quotient < 8)
+ idx = RTW89_CCX_8_US;
+ else if (quotient < 16)
+ idx = RTW89_CCX_16_US;
+ else
+ idx = RTW89_CCX_32_US;
+
+ *unit_idx = idx;
+ *period = (time_ms * MS_TO_4US_RATIO) >> idx;
+
+ rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
+ "[Trigger Time] period:%d, unit_idx:%d\n",
+ *period, *unit_idx);
+}
+
+static void rtw89_phy_ccx_racing_release(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
+
+ rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
+ "lv:(%d)->(0)\n", env->ccx_rac_lv);
+
+ env->ccx_ongoing = false;
+ env->ccx_rac_lv = RTW89_RAC_RELEASE;
+ env->ifs_clm_app = RTW89_IFS_CLM_BACKGROUND;
+}
+
+static bool rtw89_phy_ifs_clm_th_update_check(struct rtw89_dev *rtwdev,
+ struct rtw89_ccx_para_info *para)
+{
+ struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
+ bool is_update = env->ifs_clm_app != para->ifs_clm_app;
+ u8 i = 0;
+ u16 *ifs_th_l = env->ifs_clm_th_l;
+ u16 *ifs_th_h = env->ifs_clm_th_h;
+ u32 ifs_th0_us = 0, ifs_th_times = 0;
+ u32 ifs_th_h_us[RTW89_IFS_CLM_NUM] = {0};
+
+ if (!is_update)
+ goto ifs_update_finished;
+
+ switch (para->ifs_clm_app) {
+ case RTW89_IFS_CLM_INIT:
+ case RTW89_IFS_CLM_BACKGROUND:
+ case RTW89_IFS_CLM_ACS:
+ case RTW89_IFS_CLM_DBG:
+ case RTW89_IFS_CLM_DIG:
+ case RTW89_IFS_CLM_TDMA_DIG:
+ ifs_th0_us = IFS_CLM_TH0_UPPER;
+ ifs_th_times = IFS_CLM_TH_MUL;
+ break;
+ case RTW89_IFS_CLM_DBG_MANUAL:
+ ifs_th0_us = para->ifs_clm_manual_th0;
+ ifs_th_times = para->ifs_clm_manual_th_times;
+ break;
+ default:
+ break;
+ }
+
+ /* Set sampling threshold for 4 different regions, unit in idx_cnt.
+ * low[i] = high[i-1] + 1
+ * high[i] = high[i-1] * ifs_th_times
+ */
+ ifs_th_l[IFS_CLM_TH_START_IDX] = 0;
+ ifs_th_h_us[IFS_CLM_TH_START_IDX] = ifs_th0_us;
+ ifs_th_h[IFS_CLM_TH_START_IDX] = rtw89_phy_ccx_us_to_idx(rtwdev,
+ ifs_th0_us);
+ for (i = 1; i < RTW89_IFS_CLM_NUM; i++) {
+ ifs_th_l[i] = ifs_th_h[i - 1] + 1;
+ ifs_th_h_us[i] = ifs_th_h_us[i - 1] * ifs_th_times;
+ ifs_th_h[i] = rtw89_phy_ccx_us_to_idx(rtwdev, ifs_th_h_us[i]);
+ }
+
+ifs_update_finished:
+ if (!is_update)
+ rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
+ "No need to update IFS_TH\n");
+
+ return is_update;
+}
+
+static void rtw89_phy_ifs_clm_set_th_reg(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
+ u8 i = 0;
+
+ rtw89_phy_set_phy_regs(rtwdev, R_IFS_T1, B_IFS_T1_TH_LOW_MSK,
+ env->ifs_clm_th_l[0]);
+ rtw89_phy_set_phy_regs(rtwdev, R_IFS_T2, B_IFS_T2_TH_LOW_MSK,
+ env->ifs_clm_th_l[1]);
+ rtw89_phy_set_phy_regs(rtwdev, R_IFS_T3, B_IFS_T3_TH_LOW_MSK,
+ env->ifs_clm_th_l[2]);
+ rtw89_phy_set_phy_regs(rtwdev, R_IFS_T4, B_IFS_T4_TH_LOW_MSK,
+ env->ifs_clm_th_l[3]);
+
+ rtw89_phy_set_phy_regs(rtwdev, R_IFS_T1, B_IFS_T1_TH_HIGH_MSK,
+ env->ifs_clm_th_h[0]);
+ rtw89_phy_set_phy_regs(rtwdev, R_IFS_T2, B_IFS_T2_TH_HIGH_MSK,
+ env->ifs_clm_th_h[1]);
+ rtw89_phy_set_phy_regs(rtwdev, R_IFS_T3, B_IFS_T3_TH_HIGH_MSK,
+ env->ifs_clm_th_h[2]);
+ rtw89_phy_set_phy_regs(rtwdev, R_IFS_T4, B_IFS_T4_TH_HIGH_MSK,
+ env->ifs_clm_th_h[3]);
+
+ for (i = 0; i < RTW89_IFS_CLM_NUM; i++)
+ rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
+ "Update IFS_T%d_th{low, high} : {%d, %d}\n",
+ i + 1, env->ifs_clm_th_l[i], env->ifs_clm_th_h[i]);
+}
+
+static void rtw89_phy_ifs_clm_setting_init(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
+ struct rtw89_ccx_para_info para = {0};
+
+ env->ifs_clm_app = RTW89_IFS_CLM_BACKGROUND;
+ env->ifs_clm_mntr_time = 0;
+
+ para.ifs_clm_app = RTW89_IFS_CLM_INIT;
+ if (rtw89_phy_ifs_clm_th_update_check(rtwdev, &para))
+ rtw89_phy_ifs_clm_set_th_reg(rtwdev);
+
+ rtw89_phy_set_phy_regs(rtwdev, R_IFS_COUNTER, B_IFS_COLLECT_EN,
+ true);
+ rtw89_phy_set_phy_regs(rtwdev, R_IFS_T1, B_IFS_T1_EN_MSK, true);
+ rtw89_phy_set_phy_regs(rtwdev, R_IFS_T2, B_IFS_T2_EN_MSK, true);
+ rtw89_phy_set_phy_regs(rtwdev, R_IFS_T3, B_IFS_T3_EN_MSK, true);
+ rtw89_phy_set_phy_regs(rtwdev, R_IFS_T4, B_IFS_T4_EN_MSK, true);
+}
+
+static int rtw89_phy_ccx_racing_ctrl(struct rtw89_dev *rtwdev,
+ enum rtw89_env_racing_lv level)
+{
+ struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
+ int ret = 0;
+
+ if (level >= RTW89_RAC_MAX_NUM) {
+ rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
+ "[WARNING] Wrong LV=%d\n", level);
+ return -EINVAL;
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
+ "ccx_ongoing=%d, level:(%d)->(%d)\n", env->ccx_ongoing,
+ env->ccx_rac_lv, level);
+
+ if (env->ccx_ongoing) {
+ if (level <= env->ccx_rac_lv)
+ ret = -EINVAL;
+ else
+ env->ccx_ongoing = false;
+ }
+
+ if (ret == 0)
+ env->ccx_rac_lv = level;
+
+ rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "ccx racing success=%d\n",
+ !ret);
+
+ return ret;
+}
+
+static void rtw89_phy_ccx_trigger(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
+
+ rtw89_phy_set_phy_regs(rtwdev, R_IFS_COUNTER, B_IFS_COUNTER_CLR_MSK, 0);
+ rtw89_phy_set_phy_regs(rtwdev, R_CCX, B_MEASUREMENT_TRIG_MSK, 0);
+ rtw89_phy_set_phy_regs(rtwdev, R_IFS_COUNTER, B_IFS_COUNTER_CLR_MSK, 1);
+ rtw89_phy_set_phy_regs(rtwdev, R_CCX, B_MEASUREMENT_TRIG_MSK, 1);
+
+ env->ccx_rpt_stamp++;
+ env->ccx_ongoing = true;
+}
+
+static void rtw89_phy_ifs_clm_get_utility(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
+ u8 i = 0;
+ u32 res = 0;
+
+ env->ifs_clm_tx_ratio =
+ rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_tx, PERCENT);
+ env->ifs_clm_edcca_excl_cca_ratio =
+ rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_edcca_excl_cca,
+ PERCENT);
+ env->ifs_clm_cck_fa_ratio =
+ rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_cckfa, PERCENT);
+ env->ifs_clm_ofdm_fa_ratio =
+ rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_ofdmfa, PERCENT);
+ env->ifs_clm_cck_cca_excl_fa_ratio =
+ rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_cckcca_excl_fa,
+ PERCENT);
+ env->ifs_clm_ofdm_cca_excl_fa_ratio =
+ rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_ofdmcca_excl_fa,
+ PERCENT);
+ env->ifs_clm_cck_fa_permil =
+ rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_cckfa, PERMIL);
+ env->ifs_clm_ofdm_fa_permil =
+ rtw89_phy_ccx_get_report(rtwdev, env->ifs_clm_ofdmfa, PERMIL);
+
+ for (i = 0; i < RTW89_IFS_CLM_NUM; i++) {
+ if (env->ifs_clm_his[i] > ENV_MNTR_IFSCLM_HIS_MAX) {
+ env->ifs_clm_ifs_avg[i] = ENV_MNTR_FAIL_DWORD;
+ } else {
+ env->ifs_clm_ifs_avg[i] =
+ rtw89_phy_ccx_idx_to_us(rtwdev,
+ env->ifs_clm_avg[i]);
+ }
+
+ res = rtw89_phy_ccx_idx_to_us(rtwdev, env->ifs_clm_cca[i]);
+ res += env->ifs_clm_his[i] >> 1;
+ if (env->ifs_clm_his[i])
+ res /= env->ifs_clm_his[i];
+ else
+ res = 0;
+ env->ifs_clm_cca_avg[i] = res;
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
+ "IFS-CLM ratio {Tx, EDCCA_exclu_cca} = {%d, %d}\n",
+ env->ifs_clm_tx_ratio, env->ifs_clm_edcca_excl_cca_ratio);
+ rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
+ "IFS-CLM FA ratio {CCK, OFDM} = {%d, %d}\n",
+ env->ifs_clm_cck_fa_ratio, env->ifs_clm_ofdm_fa_ratio);
+ rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
+ "IFS-CLM FA permil {CCK, OFDM} = {%d, %d}\n",
+ env->ifs_clm_cck_fa_permil, env->ifs_clm_ofdm_fa_permil);
+ rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
+ "IFS-CLM CCA_exclu_FA ratio {CCK, OFDM} = {%d, %d}\n",
+ env->ifs_clm_cck_cca_excl_fa_ratio,
+ env->ifs_clm_ofdm_cca_excl_fa_ratio);
+ rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
+ "Time:[his, ifs_avg(us), cca_avg(us)]\n");
+ for (i = 0; i < RTW89_IFS_CLM_NUM; i++)
+ rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "T%d:[%d, %d, %d]\n",
+ i + 1, env->ifs_clm_his[i], env->ifs_clm_ifs_avg[i],
+ env->ifs_clm_cca_avg[i]);
+}
+
+static bool rtw89_phy_ifs_clm_get_result(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
+ u8 i = 0;
+
+ if (rtw89_phy_read32_mask(rtwdev, R_IFSCNT, B_IFSCNT_DONE_MSK) == 0) {
+ rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
+ "Get IFS_CLM report Fail\n");
+ return false;
+ }
+
+ env->ifs_clm_tx =
+ rtw89_phy_read32_mask(rtwdev, R_IFS_CLM_TX_CNT,
+ B_IFS_CLM_TX_CNT_MSK);
+ env->ifs_clm_edcca_excl_cca =
+ rtw89_phy_read32_mask(rtwdev, R_IFS_CLM_TX_CNT,
+ B_IFS_CLM_EDCCA_EXCLUDE_CCA_FA_MSK);
+ env->ifs_clm_cckcca_excl_fa =
+ rtw89_phy_read32_mask(rtwdev, R_IFS_CLM_CCA,
+ B_IFS_CLM_CCKCCA_EXCLUDE_FA_MSK);
+ env->ifs_clm_ofdmcca_excl_fa =
+ rtw89_phy_read32_mask(rtwdev, R_IFS_CLM_CCA,
+ B_IFS_CLM_OFDMCCA_EXCLUDE_FA_MSK);
+ env->ifs_clm_cckfa =
+ rtw89_phy_read32_mask(rtwdev, R_IFS_CLM_FA,
+ B_IFS_CLM_CCK_FA_MSK);
+ env->ifs_clm_ofdmfa =
+ rtw89_phy_read32_mask(rtwdev, R_IFS_CLM_FA,
+ B_IFS_CLM_OFDM_FA_MSK);
+
+ env->ifs_clm_his[0] =
+ rtw89_phy_read32_mask(rtwdev, R_IFS_HIS, B_IFS_T1_HIS_MSK);
+ env->ifs_clm_his[1] =
+ rtw89_phy_read32_mask(rtwdev, R_IFS_HIS, B_IFS_T2_HIS_MSK);
+ env->ifs_clm_his[2] =
+ rtw89_phy_read32_mask(rtwdev, R_IFS_HIS, B_IFS_T3_HIS_MSK);
+ env->ifs_clm_his[3] =
+ rtw89_phy_read32_mask(rtwdev, R_IFS_HIS, B_IFS_T4_HIS_MSK);
+
+ env->ifs_clm_avg[0] =
+ rtw89_phy_read32_mask(rtwdev, R_IFS_AVG_L, B_IFS_T1_AVG_MSK);
+ env->ifs_clm_avg[1] =
+ rtw89_phy_read32_mask(rtwdev, R_IFS_AVG_L, B_IFS_T2_AVG_MSK);
+ env->ifs_clm_avg[2] =
+ rtw89_phy_read32_mask(rtwdev, R_IFS_AVG_H, B_IFS_T3_AVG_MSK);
+ env->ifs_clm_avg[3] =
+ rtw89_phy_read32_mask(rtwdev, R_IFS_AVG_H, B_IFS_T4_AVG_MSK);
+
+ env->ifs_clm_cca[0] =
+ rtw89_phy_read32_mask(rtwdev, R_IFS_CCA_L, B_IFS_T1_CCA_MSK);
+ env->ifs_clm_cca[1] =
+ rtw89_phy_read32_mask(rtwdev, R_IFS_CCA_L, B_IFS_T2_CCA_MSK);
+ env->ifs_clm_cca[2] =
+ rtw89_phy_read32_mask(rtwdev, R_IFS_CCA_H, B_IFS_T3_CCA_MSK);
+ env->ifs_clm_cca[3] =
+ rtw89_phy_read32_mask(rtwdev, R_IFS_CCA_H, B_IFS_T4_CCA_MSK);
+
+ env->ifs_clm_total_ifs =
+ rtw89_phy_read32_mask(rtwdev, R_IFSCNT, B_IFSCNT_TOTAL_CNT_MSK);
+
+ rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "IFS-CLM total_ifs = %d\n",
+ env->ifs_clm_total_ifs);
+ rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
+ "{Tx, EDCCA_exclu_cca} = {%d, %d}\n",
+ env->ifs_clm_tx, env->ifs_clm_edcca_excl_cca);
+ rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
+ "IFS-CLM FA{CCK, OFDM} = {%d, %d}\n",
+ env->ifs_clm_cckfa, env->ifs_clm_ofdmfa);
+ rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
+ "IFS-CLM CCA_exclu_FA{CCK, OFDM} = {%d, %d}\n",
+ env->ifs_clm_cckcca_excl_fa, env->ifs_clm_ofdmcca_excl_fa);
+
+ rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK, "Time:[his, avg, cca]\n");
+ for (i = 0; i < RTW89_IFS_CLM_NUM; i++)
+ rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
+ "T%d:[%d, %d, %d]\n", i + 1, env->ifs_clm_his[i],
+ env->ifs_clm_avg[i], env->ifs_clm_cca[i]);
+
+ rtw89_phy_ifs_clm_get_utility(rtwdev);
+
+ return true;
+}
+
+static int rtw89_phy_ifs_clm_set(struct rtw89_dev *rtwdev,
+ struct rtw89_ccx_para_info *para)
+{
+ struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
+ u32 period = 0;
+ u32 unit_idx = 0;
+
+ if (para->mntr_time == 0) {
+ rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
+ "[WARN] MNTR_TIME is 0\n");
+ return -EINVAL;
+ }
+
+ if (rtw89_phy_ccx_racing_ctrl(rtwdev, para->rac_lv))
+ return -EINVAL;
+
+ if (para->mntr_time != env->ifs_clm_mntr_time) {
+ rtw89_phy_ccx_ms_to_period_unit(rtwdev, para->mntr_time,
+ &period, &unit_idx);
+ rtw89_phy_set_phy_regs(rtwdev, R_IFS_COUNTER,
+ B_IFS_CLM_PERIOD_MSK, period);
+ rtw89_phy_set_phy_regs(rtwdev, R_IFS_COUNTER,
+ B_IFS_CLM_COUNTER_UNIT_MSK, unit_idx);
+
+ rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
+ "Update IFS-CLM time ((%d)) -> ((%d))\n",
+ env->ifs_clm_mntr_time, para->mntr_time);
+
+ env->ifs_clm_mntr_time = para->mntr_time;
+ env->ccx_period = (u16)period;
+ env->ccx_unit_idx = (u8)unit_idx;
+ }
+
+ if (rtw89_phy_ifs_clm_th_update_check(rtwdev, para)) {
+ env->ifs_clm_app = para->ifs_clm_app;
+ rtw89_phy_ifs_clm_set_th_reg(rtwdev);
+ }
+
+ return 0;
+}
+
+void rtw89_phy_env_monitor_track(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
+ struct rtw89_ccx_para_info para = {0};
+ u8 chk_result = RTW89_PHY_ENV_MON_CCX_FAIL;
+
+ env->ccx_watchdog_result = RTW89_PHY_ENV_MON_CCX_FAIL;
+ if (env->ccx_manual_ctrl) {
+ rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
+ "CCX in manual ctrl\n");
+ return;
+ }
+
+ /* only ifs_clm for now */
+ if (rtw89_phy_ifs_clm_get_result(rtwdev))
+ env->ccx_watchdog_result |= RTW89_PHY_ENV_MON_IFS_CLM;
+
+ rtw89_phy_ccx_racing_release(rtwdev);
+ para.mntr_time = 1900;
+ para.rac_lv = RTW89_RAC_LV_1;
+ para.ifs_clm_app = RTW89_IFS_CLM_BACKGROUND;
+
+ if (rtw89_phy_ifs_clm_set(rtwdev, &para) == 0)
+ chk_result |= RTW89_PHY_ENV_MON_IFS_CLM;
+ if (chk_result)
+ rtw89_phy_ccx_trigger(rtwdev);
+
+ rtw89_debug(rtwdev, RTW89_DBG_PHY_TRACK,
+ "get_result=0x%x, chk_result:0x%x\n",
+ env->ccx_watchdog_result, chk_result);
+}
+
+static void rtw89_phy_dig_read_gain_table(struct rtw89_dev *rtwdev, int type)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ struct rtw89_dig_info *dig = &rtwdev->dig;
+ const struct rtw89_phy_dig_gain_cfg *cfg;
+ const char *msg;
+ u8 i;
+ s8 gain_base;
+ s8 *gain_arr;
+ u32 tmp;
+
+ switch (type) {
+ case RTW89_DIG_GAIN_LNA_G:
+ gain_arr = dig->lna_gain_g;
+ gain_base = LNA0_GAIN;
+ cfg = chip->dig_table->cfg_lna_g;
+ msg = "lna_gain_g";
+ break;
+ case RTW89_DIG_GAIN_TIA_G:
+ gain_arr = dig->tia_gain_g;
+ gain_base = TIA0_GAIN_G;
+ cfg = chip->dig_table->cfg_tia_g;
+ msg = "tia_gain_g";
+ break;
+ case RTW89_DIG_GAIN_LNA_A:
+ gain_arr = dig->lna_gain_a;
+ gain_base = LNA0_GAIN;
+ cfg = chip->dig_table->cfg_lna_a;
+ msg = "lna_gain_a";
+ break;
+ case RTW89_DIG_GAIN_TIA_A:
+ gain_arr = dig->tia_gain_a;
+ gain_base = TIA0_GAIN_A;
+ cfg = chip->dig_table->cfg_tia_a;
+ msg = "tia_gain_a";
+ break;
+ default:
+ return;
+ }
+
+ for (i = 0; i < cfg->size; i++) {
+ tmp = rtw89_phy_read32_mask(rtwdev, cfg->table[i].addr,
+ cfg->table[i].mask);
+ tmp >>= DIG_GAIN_SHIFT;
+ gain_arr[i] = sign_extend32(tmp, U4_MAX_BIT) + gain_base;
+ gain_base += DIG_GAIN;
+
+ rtw89_debug(rtwdev, RTW89_DBG_DIG, "%s[%d]=%d\n",
+ msg, i, gain_arr[i]);
+ }
+}
+
+static void rtw89_phy_dig_update_gain_para(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_dig_info *dig = &rtwdev->dig;
+ u32 tmp;
+ u8 i;
+
+ tmp = rtw89_phy_read32_mask(rtwdev, R_PATH0_IB_PKPW,
+ B_PATH0_IB_PKPW_MSK);
+ dig->ib_pkpwr = sign_extend32(tmp >> DIG_GAIN_SHIFT, U8_MAX_BIT);
+ dig->ib_pbk = rtw89_phy_read32_mask(rtwdev, R_PATH0_IB_PBK,
+ B_PATH0_IB_PBK_MSK);
+ rtw89_debug(rtwdev, RTW89_DBG_DIG, "ib_pkpwr=%d, ib_pbk=%d\n",
+ dig->ib_pkpwr, dig->ib_pbk);
+
+ for (i = RTW89_DIG_GAIN_LNA_G; i < RTW89_DIG_GAIN_MAX; i++)
+ rtw89_phy_dig_read_gain_table(rtwdev, i);
+}
+
+static const u8 rssi_nolink = 22;
+static const u8 igi_rssi_th[IGI_RSSI_TH_NUM] = {68, 84, 90, 98, 104};
+static const u16 fa_th_2g[FA_TH_NUM] = {22, 44, 66, 88};
+static const u16 fa_th_5g[FA_TH_NUM] = {4, 8, 12, 16};
+static const u16 fa_th_nolink[FA_TH_NUM] = {196, 352, 440, 528};
+
+static void rtw89_phy_dig_update_rssi_info(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_phy_ch_info *ch_info = &rtwdev->ch_info;
+ struct rtw89_dig_info *dig = &rtwdev->dig;
+ bool is_linked = rtwdev->total_sta_assoc > 0;
+
+ if (is_linked) {
+ dig->igi_rssi = ch_info->rssi_min >> 1;
+ } else {
+ rtw89_debug(rtwdev, RTW89_DBG_DIG, "RSSI update : NO Link\n");
+ dig->igi_rssi = rssi_nolink;
+ }
+}
+
+static void rtw89_phy_dig_update_para(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_dig_info *dig = &rtwdev->dig;
+ bool is_linked = rtwdev->total_sta_assoc > 0;
+ const u16 *fa_th_src = NULL;
+
+ switch (rtwdev->hal.current_band_type) {
+ case RTW89_BAND_2G:
+ dig->lna_gain = dig->lna_gain_g;
+ dig->tia_gain = dig->tia_gain_g;
+ fa_th_src = is_linked ? fa_th_2g : fa_th_nolink;
+ dig->force_gaincode_idx_en = false;
+ dig->dyn_pd_th_en = true;
+ break;
+ case RTW89_BAND_5G:
+ default:
+ dig->lna_gain = dig->lna_gain_a;
+ dig->tia_gain = dig->tia_gain_a;
+ fa_th_src = is_linked ? fa_th_5g : fa_th_nolink;
+ dig->force_gaincode_idx_en = true;
+ dig->dyn_pd_th_en = true;
+ break;
+ }
+ memcpy(dig->fa_th, fa_th_src, sizeof(dig->fa_th));
+ memcpy(dig->igi_rssi_th, igi_rssi_th, sizeof(dig->igi_rssi_th));
+}
+
+static const u8 pd_low_th_offset = 20, dynamic_igi_min = 0x20;
+static const u8 igi_max_performance_mode = 0x5a;
+static const u8 dynamic_pd_threshold_max;
+
+static void rtw89_phy_dig_para_reset(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_dig_info *dig = &rtwdev->dig;
+
+ dig->cur_gaincode.lna_idx = LNA_IDX_MAX;
+ dig->cur_gaincode.tia_idx = TIA_IDX_MAX;
+ dig->cur_gaincode.rxb_idx = RXB_IDX_MAX;
+ dig->force_gaincode.lna_idx = LNA_IDX_MAX;
+ dig->force_gaincode.tia_idx = TIA_IDX_MAX;
+ dig->force_gaincode.rxb_idx = RXB_IDX_MAX;
+
+ dig->dyn_igi_max = igi_max_performance_mode;
+ dig->dyn_igi_min = dynamic_igi_min;
+ dig->dyn_pd_th_max = dynamic_pd_threshold_max;
+ dig->pd_low_th_ofst = pd_low_th_offset;
+ dig->is_linked_pre = false;
+}
+
+static void rtw89_phy_dig_init(struct rtw89_dev *rtwdev)
+{
+ rtw89_phy_dig_update_gain_para(rtwdev);
+ rtw89_phy_dig_reset(rtwdev);
+}
+
+static u8 rtw89_phy_dig_lna_idx_by_rssi(struct rtw89_dev *rtwdev, u8 rssi)
+{
+ struct rtw89_dig_info *dig = &rtwdev->dig;
+ u8 lna_idx;
+
+ if (rssi < dig->igi_rssi_th[0])
+ lna_idx = RTW89_DIG_GAIN_LNA_IDX6;
+ else if (rssi < dig->igi_rssi_th[1])
+ lna_idx = RTW89_DIG_GAIN_LNA_IDX5;
+ else if (rssi < dig->igi_rssi_th[2])
+ lna_idx = RTW89_DIG_GAIN_LNA_IDX4;
+ else if (rssi < dig->igi_rssi_th[3])
+ lna_idx = RTW89_DIG_GAIN_LNA_IDX3;
+ else if (rssi < dig->igi_rssi_th[4])
+ lna_idx = RTW89_DIG_GAIN_LNA_IDX2;
+ else
+ lna_idx = RTW89_DIG_GAIN_LNA_IDX1;
+
+ return lna_idx;
+}
+
+static u8 rtw89_phy_dig_tia_idx_by_rssi(struct rtw89_dev *rtwdev, u8 rssi)
+{
+ struct rtw89_dig_info *dig = &rtwdev->dig;
+ u8 tia_idx;
+
+ if (rssi < dig->igi_rssi_th[0])
+ tia_idx = RTW89_DIG_GAIN_TIA_IDX1;
+ else
+ tia_idx = RTW89_DIG_GAIN_TIA_IDX0;
+
+ return tia_idx;
+}
+
+#define IB_PBK_BASE 110
+#define WB_RSSI_BASE 10
+static u8 rtw89_phy_dig_rxb_idx_by_rssi(struct rtw89_dev *rtwdev, u8 rssi,
+ struct rtw89_agc_gaincode_set *set)
+{
+ struct rtw89_dig_info *dig = &rtwdev->dig;
+ s8 lna_gain = dig->lna_gain[set->lna_idx];
+ s8 tia_gain = dig->tia_gain[set->tia_idx];
+ s32 wb_rssi = rssi + lna_gain + tia_gain;
+ s32 rxb_idx_tmp = IB_PBK_BASE + WB_RSSI_BASE;
+ u8 rxb_idx;
+
+ rxb_idx_tmp += dig->ib_pkpwr - dig->ib_pbk - wb_rssi;
+ rxb_idx = clamp_t(s32, rxb_idx_tmp, RXB_IDX_MIN, RXB_IDX_MAX);
+
+ rtw89_debug(rtwdev, RTW89_DBG_DIG, "wb_rssi=%03d, rxb_idx_tmp=%03d\n",
+ wb_rssi, rxb_idx_tmp);
+
+ return rxb_idx;
+}
+
+static void rtw89_phy_dig_gaincode_by_rssi(struct rtw89_dev *rtwdev, u8 rssi,
+ struct rtw89_agc_gaincode_set *set)
+{
+ set->lna_idx = rtw89_phy_dig_lna_idx_by_rssi(rtwdev, rssi);
+ set->tia_idx = rtw89_phy_dig_tia_idx_by_rssi(rtwdev, rssi);
+ set->rxb_idx = rtw89_phy_dig_rxb_idx_by_rssi(rtwdev, rssi, set);
+
+ rtw89_debug(rtwdev, RTW89_DBG_DIG,
+ "final_rssi=%03d, (lna,tia,rab)=(%d,%d,%02d)\n",
+ rssi, set->lna_idx, set->tia_idx, set->rxb_idx);
+}
+
+#define IGI_OFFSET_MAX 25
+#define IGI_OFFSET_MUL 2
+static void rtw89_phy_dig_igi_offset_by_env(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_dig_info *dig = &rtwdev->dig;
+ struct rtw89_env_monitor_info *env = &rtwdev->env_monitor;
+ enum rtw89_dig_noisy_level noisy_lv;
+ u8 igi_offset = dig->fa_rssi_ofst;
+ u16 fa_ratio = 0;
+
+ fa_ratio = env->ifs_clm_cck_fa_permil + env->ifs_clm_ofdm_fa_permil;
+
+ if (fa_ratio < dig->fa_th[0])
+ noisy_lv = RTW89_DIG_NOISY_LEVEL0;
+ else if (fa_ratio < dig->fa_th[1])
+ noisy_lv = RTW89_DIG_NOISY_LEVEL1;
+ else if (fa_ratio < dig->fa_th[2])
+ noisy_lv = RTW89_DIG_NOISY_LEVEL2;
+ else if (fa_ratio < dig->fa_th[3])
+ noisy_lv = RTW89_DIG_NOISY_LEVEL3;
+ else
+ noisy_lv = RTW89_DIG_NOISY_LEVEL_MAX;
+
+ if (noisy_lv == RTW89_DIG_NOISY_LEVEL0 && igi_offset < 2)
+ igi_offset = 0;
+ else
+ igi_offset += noisy_lv * IGI_OFFSET_MUL;
+
+ igi_offset = min_t(u8, igi_offset, IGI_OFFSET_MAX);
+ dig->fa_rssi_ofst = igi_offset;
+
+ rtw89_debug(rtwdev, RTW89_DBG_DIG,
+ "fa_th: [+6 (%d) +4 (%d) +2 (%d) 0 (%d) -2 ]\n",
+ dig->fa_th[3], dig->fa_th[2], dig->fa_th[1], dig->fa_th[0]);
+
+ rtw89_debug(rtwdev, RTW89_DBG_DIG,
+ "fa(CCK,OFDM,ALL)=(%d,%d,%d)%%, noisy_lv=%d, ofst=%d\n",
+ env->ifs_clm_cck_fa_permil, env->ifs_clm_ofdm_fa_permil,
+ env->ifs_clm_cck_fa_permil + env->ifs_clm_ofdm_fa_permil,
+ noisy_lv, igi_offset);
+}
+
+static void rtw89_phy_dig_set_lna_idx(struct rtw89_dev *rtwdev, u8 lna_idx)
+{
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_LNA_INIT,
+ B_PATH0_LNA_INIT_IDX_MSK, lna_idx);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_LNA_INIT,
+ B_PATH1_LNA_INIT_IDX_MSK, lna_idx);
+}
+
+static void rtw89_phy_dig_set_tia_idx(struct rtw89_dev *rtwdev, u8 tia_idx)
+{
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_TIA_INIT,
+ B_PATH0_TIA_INIT_IDX_MSK, tia_idx);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_TIA_INIT,
+ B_PATH1_TIA_INIT_IDX_MSK, tia_idx);
+}
+
+static void rtw89_phy_dig_set_rxb_idx(struct rtw89_dev *rtwdev, u8 rxb_idx)
+{
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_RXB_INIT,
+ B_PATH0_RXB_INIT_IDX_MSK, rxb_idx);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_RXB_INIT,
+ B_PATH1_RXB_INIT_IDX_MSK, rxb_idx);
+}
+
+static void rtw89_phy_dig_set_igi_cr(struct rtw89_dev *rtwdev,
+ const struct rtw89_agc_gaincode_set set)
+{
+ rtw89_phy_dig_set_lna_idx(rtwdev, set.lna_idx);
+ rtw89_phy_dig_set_tia_idx(rtwdev, set.tia_idx);
+ rtw89_phy_dig_set_rxb_idx(rtwdev, set.rxb_idx);
+
+ rtw89_debug(rtwdev, RTW89_DBG_DIG, "Set (lna,tia,rxb)=((%d,%d,%02d))\n",
+ set.lna_idx, set.tia_idx, set.rxb_idx);
+}
+
+static const struct rtw89_reg_def sdagc_config[4] = {
+ {R_PATH0_P20_FOLLOW_BY_PAGCUGC, B_PATH0_P20_FOLLOW_BY_PAGCUGC_EN_MSK},
+ {R_PATH0_S20_FOLLOW_BY_PAGCUGC, B_PATH0_S20_FOLLOW_BY_PAGCUGC_EN_MSK},
+ {R_PATH1_P20_FOLLOW_BY_PAGCUGC, B_PATH1_P20_FOLLOW_BY_PAGCUGC_EN_MSK},
+ {R_PATH1_S20_FOLLOW_BY_PAGCUGC, B_PATH1_S20_FOLLOW_BY_PAGCUGC_EN_MSK},
+};
+
+static void rtw89_phy_dig_sdagc_follow_pagc_config(struct rtw89_dev *rtwdev,
+ bool enable)
+{
+ u8 i = 0;
+
+ for (i = 0; i < ARRAY_SIZE(sdagc_config); i++)
+ rtw89_phy_write32_mask(rtwdev, sdagc_config[i].addr,
+ sdagc_config[i].mask, enable);
+
+ rtw89_debug(rtwdev, RTW89_DBG_DIG, "sdagc_follow_pagc=%d\n", enable);
+}
+
+static void rtw89_phy_dig_dyn_pd_th(struct rtw89_dev *rtwdev, u8 rssi,
+ bool enable)
+{
+ enum rtw89_bandwidth cbw = rtwdev->hal.current_band_width;
+ struct rtw89_dig_info *dig = &rtwdev->dig;
+ u8 final_rssi = 0, under_region = dig->pd_low_th_ofst;
+ u32 val = 0;
+
+ under_region += PD_TH_SB_FLTR_CMP_VAL;
+
+ switch (cbw) {
+ case RTW89_CHANNEL_WIDTH_40:
+ under_region += PD_TH_BW40_CMP_VAL;
+ break;
+ case RTW89_CHANNEL_WIDTH_80:
+ under_region += PD_TH_BW80_CMP_VAL;
+ break;
+ case RTW89_CHANNEL_WIDTH_20:
+ fallthrough;
+ default:
+ under_region += PD_TH_BW20_CMP_VAL;
+ break;
+ }
+
+ dig->dyn_pd_th_max = dig->igi_rssi;
+
+ final_rssi = min_t(u8, rssi, dig->igi_rssi);
+ final_rssi = clamp_t(u8, final_rssi, PD_TH_MIN_RSSI + under_region,
+ PD_TH_MAX_RSSI + under_region);
+
+ if (enable) {
+ val = (final_rssi - under_region - PD_TH_MIN_RSSI) >> 1;
+ rtw89_debug(rtwdev, RTW89_DBG_DIG,
+ "dyn_max=%d, final_rssi=%d, total=%d, PD_low=%d\n",
+ dig->igi_rssi, final_rssi, under_region, val);
+ } else {
+ rtw89_debug(rtwdev, RTW89_DBG_DIG,
+ "Dynamic PD th disabled, Set PD_low_bd=0\n");
+ }
+
+ rtw89_phy_write32_mask(rtwdev, R_SEG0R_PD, B_SEG0R_PD_LOWER_BOUND_MSK,
+ val);
+ rtw89_phy_write32_mask(rtwdev, R_SEG0R_PD,
+ B_SEG0R_PD_SPATIAL_REUSE_EN_MSK, enable);
+}
+
+void rtw89_phy_dig_reset(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_dig_info *dig = &rtwdev->dig;
+
+ dig->bypass_dig = false;
+ rtw89_phy_dig_para_reset(rtwdev);
+ rtw89_phy_dig_set_igi_cr(rtwdev, dig->force_gaincode);
+ rtw89_phy_dig_dyn_pd_th(rtwdev, rssi_nolink, false);
+ rtw89_phy_dig_sdagc_follow_pagc_config(rtwdev, false);
+ rtw89_phy_dig_update_para(rtwdev);
+}
+
+#define IGI_RSSI_MIN 10
+void rtw89_phy_dig(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_dig_info *dig = &rtwdev->dig;
+ bool is_linked = rtwdev->total_sta_assoc > 0;
+
+ if (unlikely(dig->bypass_dig)) {
+ dig->bypass_dig = false;
+ return;
+ }
+
+ if (!dig->is_linked_pre && is_linked) {
+ rtw89_debug(rtwdev, RTW89_DBG_DIG, "First connected\n");
+ rtw89_phy_dig_update_para(rtwdev);
+ } else if (dig->is_linked_pre && !is_linked) {
+ rtw89_debug(rtwdev, RTW89_DBG_DIG, "First disconnected\n");
+ rtw89_phy_dig_update_para(rtwdev);
+ }
+ dig->is_linked_pre = is_linked;
+
+ rtw89_phy_dig_igi_offset_by_env(rtwdev);
+ rtw89_phy_dig_update_rssi_info(rtwdev);
+
+ dig->dyn_igi_min = (dig->igi_rssi > IGI_RSSI_MIN) ?
+ dig->igi_rssi - IGI_RSSI_MIN : 0;
+ dig->dyn_igi_max = dig->dyn_igi_min + IGI_OFFSET_MAX;
+ dig->igi_fa_rssi = dig->dyn_igi_min + dig->fa_rssi_ofst;
+
+ dig->igi_fa_rssi = clamp(dig->igi_fa_rssi, dig->dyn_igi_min,
+ dig->dyn_igi_max);
+
+ rtw89_debug(rtwdev, RTW89_DBG_DIG,
+ "rssi=%03d, dyn(max,min)=(%d,%d), final_rssi=%d\n",
+ dig->igi_rssi, dig->dyn_igi_max, dig->dyn_igi_min,
+ dig->igi_fa_rssi);
+
+ if (dig->force_gaincode_idx_en) {
+ rtw89_phy_dig_set_igi_cr(rtwdev, dig->force_gaincode);
+ rtw89_debug(rtwdev, RTW89_DBG_DIG,
+ "Force gaincode index enabled.\n");
+ } else {
+ rtw89_phy_dig_gaincode_by_rssi(rtwdev, dig->igi_fa_rssi,
+ &dig->cur_gaincode);
+ rtw89_phy_dig_set_igi_cr(rtwdev, dig->cur_gaincode);
+ }
+
+ rtw89_phy_dig_dyn_pd_th(rtwdev, dig->igi_fa_rssi, dig->dyn_pd_th_en);
+
+ if (dig->dyn_pd_th_en && dig->igi_fa_rssi > dig->dyn_pd_th_max)
+ rtw89_phy_dig_sdagc_follow_pagc_config(rtwdev, true);
+ else
+ rtw89_phy_dig_sdagc_follow_pagc_config(rtwdev, false);
+}
+
+static void rtw89_phy_env_monitor_init(struct rtw89_dev *rtwdev)
+{
+ rtw89_phy_ccx_top_setting_init(rtwdev);
+ rtw89_phy_ifs_clm_setting_init(rtwdev);
+}
+
+void rtw89_phy_dm_init(struct rtw89_dev *rtwdev)
+{
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+
+ rtw89_phy_stat_init(rtwdev);
+
+ rtw89_chip_bb_sethw(rtwdev);
+
+ rtw89_phy_env_monitor_init(rtwdev);
+ rtw89_phy_dig_init(rtwdev);
+ rtw89_phy_cfo_init(rtwdev);
+
+ rtw89_phy_init_rf_nctl(rtwdev);
+ rtw89_chip_rfk_init(rtwdev);
+ rtw89_load_txpwr_table(rtwdev, chip->byr_table);
+ rtw89_chip_set_txpwr_ctrl(rtwdev);
+ rtw89_chip_power_trim(rtwdev);
+}
+
+void rtw89_phy_set_bss_color(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif)
+{
+ enum rtw89_phy_idx phy_idx = RTW89_PHY_0;
+ u8 bss_color;
+
+ if (!vif->bss_conf.he_support || !vif->bss_conf.assoc)
+ return;
+
+ bss_color = vif->bss_conf.he_bss_color.color;
+
+ rtw89_phy_write32_idx(rtwdev, R_BSS_CLR_MAP, B_BSS_CLR_MAP_VLD0, 0x1,
+ phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BSS_CLR_MAP, B_BSS_CLR_MAP_TGT, bss_color,
+ phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_BSS_CLR_MAP, B_BSS_CLR_MAP_STAID,
+ vif->bss_conf.aid, phy_idx);
+}
diff --git a/drivers/net/wireless/realtek/rtw89/phy.h b/drivers/net/wireless/realtek/rtw89/phy.h
new file mode 100644
index 000000000000..370129345e0f
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/phy.h
@@ -0,0 +1,311 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2019-2020 Realtek Corporation
+ */
+
+#ifndef __RTW89_PHY_H__
+#define __RTW89_PHY_H__
+
+#include "core.h"
+
+#define RTW89_PHY_ADDR_OFFSET 0x10000
+
+#define get_phy_headline(addr) FIELD_GET(GENMASK(31, 28), addr)
+#define PHY_HEADLINE_VALID 0xf
+#define get_phy_target(addr) FIELD_GET(GENMASK(27, 0), addr)
+#define get_phy_compare(rfe, cv) (FIELD_PREP(GENMASK(23, 16), rfe) | \
+ FIELD_PREP(GENMASK(7, 0), cv))
+
+#define get_phy_cond(addr) FIELD_GET(GENMASK(31, 28), addr)
+#define get_phy_cond_rfe(addr) FIELD_GET(GENMASK(23, 16), addr)
+#define get_phy_cond_pkg(addr) FIELD_GET(GENMASK(15, 8), addr)
+#define get_phy_cond_cv(addr) FIELD_GET(GENMASK(7, 0), addr)
+#define phy_div(a, b) ({typeof(b) _b = (b); (_b) ? ((a) / (_b)) : 0; })
+#define PHY_COND_BRANCH_IF 0x8
+#define PHY_COND_BRANCH_ELIF 0x9
+#define PHY_COND_BRANCH_ELSE 0xa
+#define PHY_COND_BRANCH_END 0xb
+#define PHY_COND_CHECK 0x4
+#define PHY_COND_DONT_CARE 0xff
+
+#define RA_MASK_CCK_RATES GENMASK_ULL(3, 0)
+#define RA_MASK_OFDM_RATES GENMASK_ULL(11, 4)
+#define RA_MASK_SUBCCK_RATES 0x5ULL
+#define RA_MASK_SUBOFDM_RATES 0x10ULL
+#define RA_MASK_HT_1SS_RATES GENMASK_ULL(19, 12)
+#define RA_MASK_HT_2SS_RATES GENMASK_ULL(31, 24)
+#define RA_MASK_HT_3SS_RATES GENMASK_ULL(43, 36)
+#define RA_MASK_HT_4SS_RATES GENMASK_ULL(55, 48)
+#define RA_MASK_HT_RATES GENMASK_ULL(55, 12)
+#define RA_MASK_VHT_1SS_RATES GENMASK_ULL(21, 12)
+#define RA_MASK_VHT_2SS_RATES GENMASK_ULL(33, 24)
+#define RA_MASK_VHT_3SS_RATES GENMASK_ULL(45, 36)
+#define RA_MASK_VHT_4SS_RATES GENMASK_ULL(57, 48)
+#define RA_MASK_VHT_RATES GENMASK_ULL(57, 12)
+#define RA_MASK_HE_1SS_RATES GENMASK_ULL(23, 12)
+#define RA_MASK_HE_2SS_RATES GENMASK_ULL(35, 24)
+#define RA_MASK_HE_3SS_RATES GENMASK_ULL(47, 36)
+#define RA_MASK_HE_4SS_RATES GENMASK_ULL(59, 48)
+#define RA_MASK_HE_RATES GENMASK_ULL(59, 12)
+
+#define CFO_TRK_ENABLE_TH (2 << 2)
+#define CFO_TRK_STOP_TH_4 (30 << 2)
+#define CFO_TRK_STOP_TH_3 (20 << 2)
+#define CFO_TRK_STOP_TH_2 (10 << 2)
+#define CFO_TRK_STOP_TH_1 (00 << 2)
+#define CFO_TRK_STOP_TH (2 << 2)
+#define CFO_SW_COMP_FINE_TUNE (2 << 2)
+#define CFO_PERIOD_CNT 15
+#define CFO_TP_UPPER 100
+#define CFO_TP_LOWER 50
+#define CFO_COMP_PERIOD 250
+#define CFO_COMP_WEIGHT 8
+#define MAX_CFO_TOLERANCE 30
+
+#define CCX_MAX_PERIOD 2097
+#define CCX_MAX_PERIOD_UNIT 32
+#define MS_TO_4US_RATIO 250
+#define ENV_MNTR_FAIL_DWORD 0xffffffff
+#define ENV_MNTR_IFSCLM_HIS_MAX 127
+#define PERMIL 1000
+#define PERCENT 100
+#define IFS_CLM_TH0_UPPER 64
+#define IFS_CLM_TH_MUL 4
+#define IFS_CLM_TH_START_IDX 0
+
+#define TIA0_GAIN_A 12
+#define TIA0_GAIN_G 16
+#define LNA0_GAIN (-24)
+#define U4_MAX_BIT 3
+#define U8_MAX_BIT 7
+#define DIG_GAIN_SHIFT 2
+#define DIG_GAIN 8
+
+#define LNA_IDX_MAX 6
+#define LNA_IDX_MIN 0
+#define TIA_IDX_MAX 1
+#define TIA_IDX_MIN 0
+#define RXB_IDX_MAX 31
+#define RXB_IDX_MIN 0
+
+#define PD_TH_MAX_RSSI 70
+#define PD_TH_MIN_RSSI 8
+#define PD_TH_BW80_CMP_VAL 6
+#define PD_TH_BW40_CMP_VAL 3
+#define PD_TH_BW20_CMP_VAL 0
+#define PD_TH_CMP_VAL 3
+#define PD_TH_SB_FLTR_CMP_VAL 7
+
+#define PHYSTS_MGNT BIT(RTW89_RX_TYPE_MGNT)
+#define PHYSTS_CTRL BIT(RTW89_RX_TYPE_CTRL)
+#define PHYSTS_DATA BIT(RTW89_RX_TYPE_DATA)
+#define PHYSTS_RSVD BIT(RTW89_RX_TYPE_RSVD)
+#define PPDU_FILTER_BITMAP (PHYSTS_MGNT | PHYSTS_DATA)
+
+enum rtw89_phy_c2h_ra_func {
+ RTW89_PHY_C2H_FUNC_STS_RPT,
+ RTW89_PHY_C2H_FUNC_MU_GPTBL_RPT,
+ RTW89_PHY_C2H_FUNC_TXSTS,
+ RTW89_PHY_C2H_FUNC_RA_MAX,
+};
+
+enum rtw89_phy_c2h_class {
+ RTW89_PHY_C2H_CLASS_RUA,
+ RTW89_PHY_C2H_CLASS_RA,
+ RTW89_PHY_C2H_CLASS_DM,
+ RTW89_PHY_C2H_CLASS_BTC_MIN = 0x10,
+ RTW89_PHY_C2H_CLASS_BTC_MAX = 0x17,
+ RTW89_PHY_C2H_CLASS_MAX,
+};
+
+enum rtw89_env_monitor_result_level {
+ RTW89_PHY_ENV_MON_CCX_FAIL = 0,
+ RTW89_PHY_ENV_MON_NHM = BIT(0),
+ RTW89_PHY_ENV_MON_CLM = BIT(1),
+ RTW89_PHY_ENV_MON_FAHM = BIT(2),
+ RTW89_PHY_ENV_MON_IFS_CLM = BIT(3),
+ RTW89_PHY_ENV_MON_EDCCA_CLM = BIT(4),
+};
+
+#define CCX_US_BASE_RATIO 4
+enum rtw89_ccx_unit {
+ RTW89_CCX_4_US = 0,
+ RTW89_CCX_8_US = 1,
+ RTW89_CCX_16_US = 2,
+ RTW89_CCX_32_US = 3
+};
+
+enum rtw89_dig_gain_type {
+ RTW89_DIG_GAIN_LNA_G = 0,
+ RTW89_DIG_GAIN_TIA_G = 1,
+ RTW89_DIG_GAIN_LNA_A = 2,
+ RTW89_DIG_GAIN_TIA_A = 3,
+ RTW89_DIG_GAIN_MAX = 4
+};
+
+enum rtw89_dig_gain_lna_idx {
+ RTW89_DIG_GAIN_LNA_IDX1 = 1,
+ RTW89_DIG_GAIN_LNA_IDX2 = 2,
+ RTW89_DIG_GAIN_LNA_IDX3 = 3,
+ RTW89_DIG_GAIN_LNA_IDX4 = 4,
+ RTW89_DIG_GAIN_LNA_IDX5 = 5,
+ RTW89_DIG_GAIN_LNA_IDX6 = 6
+};
+
+enum rtw89_dig_gain_tia_idx {
+ RTW89_DIG_GAIN_TIA_IDX0 = 0,
+ RTW89_DIG_GAIN_TIA_IDX1 = 1
+};
+
+struct rtw89_txpwr_byrate_cfg {
+ enum rtw89_band band;
+ enum rtw89_nss nss;
+ enum rtw89_rate_section rs;
+ u8 shf;
+ u8 len;
+ u32 data;
+};
+
+#define DELTA_SWINGIDX_SIZE 30
+
+struct rtw89_txpwr_track_cfg {
+ const u8 (*delta_swingidx_5gb_n)[DELTA_SWINGIDX_SIZE];
+ const u8 (*delta_swingidx_5gb_p)[DELTA_SWINGIDX_SIZE];
+ const u8 (*delta_swingidx_5ga_n)[DELTA_SWINGIDX_SIZE];
+ const u8 (*delta_swingidx_5ga_p)[DELTA_SWINGIDX_SIZE];
+ const u8 *delta_swingidx_2gb_n;
+ const u8 *delta_swingidx_2gb_p;
+ const u8 *delta_swingidx_2ga_n;
+ const u8 *delta_swingidx_2ga_p;
+ const u8 *delta_swingidx_2g_cck_b_n;
+ const u8 *delta_swingidx_2g_cck_b_p;
+ const u8 *delta_swingidx_2g_cck_a_n;
+ const u8 *delta_swingidx_2g_cck_a_p;
+};
+
+struct rtw89_phy_dig_gain_cfg {
+ const struct rtw89_reg_def *table;
+ u8 size;
+};
+
+struct rtw89_phy_dig_gain_table {
+ const struct rtw89_phy_dig_gain_cfg *cfg_lna_g;
+ const struct rtw89_phy_dig_gain_cfg *cfg_tia_g;
+ const struct rtw89_phy_dig_gain_cfg *cfg_lna_a;
+ const struct rtw89_phy_dig_gain_cfg *cfg_tia_a;
+};
+
+struct rtw89_phy_reg3_tbl {
+ const struct rtw89_reg3_def *reg3;
+ int size;
+};
+
+#define DECLARE_PHY_REG3_TBL(_name) \
+const struct rtw89_phy_reg3_tbl _name ## _tbl = { \
+ .reg3 = _name, \
+ .size = ARRAY_SIZE(_name), \
+}
+
+static inline void rtw89_phy_write8(struct rtw89_dev *rtwdev,
+ u32 addr, u8 data)
+{
+ rtw89_write8(rtwdev, addr | RTW89_PHY_ADDR_OFFSET, data);
+}
+
+static inline void rtw89_phy_write16(struct rtw89_dev *rtwdev,
+ u32 addr, u16 data)
+{
+ rtw89_write16(rtwdev, addr | RTW89_PHY_ADDR_OFFSET, data);
+}
+
+static inline void rtw89_phy_write32(struct rtw89_dev *rtwdev,
+ u32 addr, u32 data)
+{
+ rtw89_write32(rtwdev, addr | RTW89_PHY_ADDR_OFFSET, data);
+}
+
+static inline void rtw89_phy_write32_set(struct rtw89_dev *rtwdev,
+ u32 addr, u32 bits)
+{
+ rtw89_write32_set(rtwdev, addr | RTW89_PHY_ADDR_OFFSET, bits);
+}
+
+static inline void rtw89_phy_write32_clr(struct rtw89_dev *rtwdev,
+ u32 addr, u32 bits)
+{
+ rtw89_write32_clr(rtwdev, addr | RTW89_PHY_ADDR_OFFSET, bits);
+}
+
+static inline void rtw89_phy_write32_mask(struct rtw89_dev *rtwdev,
+ u32 addr, u32 mask, u32 data)
+{
+ rtw89_write32_mask(rtwdev, addr | RTW89_PHY_ADDR_OFFSET, mask, data);
+}
+
+static inline u8 rtw89_phy_read8(struct rtw89_dev *rtwdev, u32 addr)
+{
+ return rtw89_read8(rtwdev, addr | RTW89_PHY_ADDR_OFFSET);
+}
+
+static inline u16 rtw89_phy_read16(struct rtw89_dev *rtwdev, u32 addr)
+{
+ return rtw89_read16(rtwdev, addr | RTW89_PHY_ADDR_OFFSET);
+}
+
+static inline u32 rtw89_phy_read32(struct rtw89_dev *rtwdev, u32 addr)
+{
+ return rtw89_read32(rtwdev, addr | RTW89_PHY_ADDR_OFFSET);
+}
+
+static inline u32 rtw89_phy_read32_mask(struct rtw89_dev *rtwdev,
+ u32 addr, u32 mask)
+{
+ return rtw89_read32_mask(rtwdev, addr | RTW89_PHY_ADDR_OFFSET, mask);
+}
+
+void rtw89_phy_write_reg3_tbl(struct rtw89_dev *rtwdev,
+ const struct rtw89_phy_reg3_tbl *tbl);
+u8 rtw89_phy_get_txsc(struct rtw89_dev *rtwdev,
+ struct rtw89_channel_params *param,
+ enum rtw89_bandwidth dbw);
+u32 rtw89_phy_read_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
+ u32 addr, u32 mask);
+bool rtw89_phy_write_rf(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path,
+ u32 addr, u32 mask, u32 data);
+void rtw89_phy_init_bb_reg(struct rtw89_dev *rtwdev);
+void rtw89_phy_init_rf_reg(struct rtw89_dev *rtwdev);
+void rtw89_phy_dm_init(struct rtw89_dev *rtwdev);
+void rtw89_phy_write32_idx(struct rtw89_dev *rtwdev, u32 addr, u32 mask,
+ u32 data, enum rtw89_phy_idx phy_idx);
+void rtw89_phy_load_txpwr_byrate(struct rtw89_dev *rtwdev,
+ const struct rtw89_txpwr_table *tbl);
+s8 rtw89_phy_read_txpwr_byrate(struct rtw89_dev *rtwdev,
+ const struct rtw89_rate_desc *rate_desc);
+void rtw89_phy_fill_txpwr_limit(struct rtw89_dev *rtwdev,
+ struct rtw89_txpwr_limit *lmt,
+ u8 ntx);
+void rtw89_phy_fill_txpwr_limit_ru(struct rtw89_dev *rtwdev,
+ struct rtw89_txpwr_limit_ru *lmt_ru,
+ u8 ntx);
+s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev,
+ u8 bw, u8 ntx, u8 rs, u8 bf, u8 ch);
+void rtw89_phy_ra_assoc(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta);
+void rtw89_phy_ra_update(struct rtw89_dev *rtwdev);
+void rtw89_phy_ra_updata_sta(struct rtw89_dev *rtwdev, struct ieee80211_sta *sta);
+void rtw89_phy_rate_pattern_vif(struct rtw89_dev *rtwdev,
+ struct ieee80211_vif *vif,
+ const struct cfg80211_bitrate_mask *mask);
+void rtw89_phy_c2h_handle(struct rtw89_dev *rtwdev, struct sk_buff *skb,
+ u32 len, u8 class, u8 func);
+void rtw89_phy_cfo_track(struct rtw89_dev *rtwdev);
+void rtw89_phy_cfo_track_work(struct work_struct *work);
+void rtw89_phy_cfo_parse(struct rtw89_dev *rtwdev, s16 cfo_val,
+ struct rtw89_rx_phy_ppdu *phy_ppdu);
+void rtw89_phy_stat_track(struct rtw89_dev *rtwdev);
+void rtw89_phy_env_monitor_track(struct rtw89_dev *rtwdev);
+void rtw89_phy_set_phy_regs(struct rtw89_dev *rtwdev, u32 addr, u32 mask,
+ u32 val);
+void rtw89_phy_dig_reset(struct rtw89_dev *rtwdev);
+void rtw89_phy_dig(struct rtw89_dev *rtwdev);
+void rtw89_phy_set_bss_color(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif);
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw89/ps.c b/drivers/net/wireless/realtek/rtw89/ps.c
new file mode 100644
index 000000000000..7eaa01e41ef2
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/ps.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2019-2020 Realtek Corporation
+ */
+
+#include "coex.h"
+#include "core.h"
+#include "debug.h"
+#include "fw.h"
+#include "mac.h"
+#include "ps.h"
+#include "reg.h"
+#include "util.h"
+
+static int rtw89_fw_leave_lps_check(struct rtw89_dev *rtwdev, u8 macid)
+{
+ u32 pwr_en_bit = 0xE;
+ u32 chk_msk = pwr_en_bit << (4 * macid);
+ u32 polling;
+ int ret;
+
+ ret = read_poll_timeout_atomic(rtw89_read32_mask, polling, !polling,
+ 1000, 50000, false, rtwdev,
+ R_AX_PPWRBIT_SETTING, chk_msk);
+ if (ret) {
+ rtw89_info(rtwdev, "rtw89: failed to leave lps state\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static void __rtw89_enter_ps_mode(struct rtw89_dev *rtwdev)
+{
+ if (!rtwdev->ps_mode)
+ return;
+
+ if (test_and_set_bit(RTW89_FLAG_LOW_POWER_MODE, rtwdev->flags))
+ return;
+
+ rtw89_mac_power_mode_change(rtwdev, true);
+}
+
+void __rtw89_leave_ps_mode(struct rtw89_dev *rtwdev)
+{
+ if (!rtwdev->ps_mode)
+ return;
+
+ if (test_and_clear_bit(RTW89_FLAG_LOW_POWER_MODE, rtwdev->flags))
+ rtw89_mac_power_mode_change(rtwdev, false);
+}
+
+static void __rtw89_enter_lps(struct rtw89_dev *rtwdev, u8 mac_id)
+{
+ struct rtw89_lps_parm lps_param = {
+ .macid = mac_id,
+ .psmode = RTW89_MAC_AX_PS_MODE_LEGACY,
+ .lastrpwm = RTW89_LAST_RPWM_PS,
+ };
+
+ rtw89_btc_ntfy_radio_state(rtwdev, BTC_RFCTRL_FW_CTRL);
+ rtw89_fw_h2c_lps_parm(rtwdev, &lps_param);
+}
+
+static void __rtw89_leave_lps(struct rtw89_dev *rtwdev, u8 mac_id)
+{
+ struct rtw89_lps_parm lps_param = {
+ .macid = mac_id,
+ .psmode = RTW89_MAC_AX_PS_MODE_ACTIVE,
+ .lastrpwm = RTW89_LAST_RPWM_ACTIVE,
+ };
+
+ rtw89_fw_h2c_lps_parm(rtwdev, &lps_param);
+ rtw89_fw_leave_lps_check(rtwdev, 0);
+ rtw89_btc_ntfy_radio_state(rtwdev, BTC_RFCTRL_WL_ON);
+}
+
+void rtw89_leave_ps_mode(struct rtw89_dev *rtwdev)
+{
+ lockdep_assert_held(&rtwdev->mutex);
+
+ __rtw89_leave_ps_mode(rtwdev);
+}
+
+void rtw89_enter_lps(struct rtw89_dev *rtwdev, u8 mac_id)
+{
+ lockdep_assert_held(&rtwdev->mutex);
+
+ if (test_and_set_bit(RTW89_FLAG_LEISURE_PS, rtwdev->flags))
+ return;
+
+ __rtw89_enter_lps(rtwdev, mac_id);
+ __rtw89_enter_ps_mode(rtwdev);
+}
+
+static void rtw89_leave_lps_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
+{
+ if (rtwvif->wifi_role != RTW89_WIFI_ROLE_STATION)
+ return;
+
+ __rtw89_leave_ps_mode(rtwdev);
+ __rtw89_leave_lps(rtwdev, rtwvif->mac_id);
+}
+
+void rtw89_leave_lps(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_vif *rtwvif;
+
+ lockdep_assert_held(&rtwdev->mutex);
+
+ if (!test_and_clear_bit(RTW89_FLAG_LEISURE_PS, rtwdev->flags))
+ return;
+
+ rtw89_for_each_rtwvif(rtwdev, rtwvif)
+ rtw89_leave_lps_vif(rtwdev, rtwvif);
+}
+
+void rtw89_enter_ips(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_vif *rtwvif;
+
+ set_bit(RTW89_FLAG_INACTIVE_PS, rtwdev->flags);
+
+ rtw89_for_each_rtwvif(rtwdev, rtwvif)
+ rtw89_mac_vif_deinit(rtwdev, rtwvif);
+
+ rtw89_core_stop(rtwdev);
+}
+
+void rtw89_leave_ips(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_vif *rtwvif;
+ int ret;
+
+ ret = rtw89_core_start(rtwdev);
+ if (ret)
+ rtw89_err(rtwdev, "failed to leave idle state\n");
+
+ rtw89_set_channel(rtwdev);
+
+ rtw89_for_each_rtwvif(rtwdev, rtwvif)
+ rtw89_mac_vif_init(rtwdev, rtwvif);
+
+ clear_bit(RTW89_FLAG_INACTIVE_PS, rtwdev->flags);
+}
+
+void rtw89_set_coex_ctrl_lps(struct rtw89_dev *rtwdev, bool btc_ctrl)
+{
+ if (btc_ctrl)
+ rtw89_leave_lps(rtwdev);
+}
diff --git a/drivers/net/wireless/realtek/rtw89/ps.h b/drivers/net/wireless/realtek/rtw89/ps.h
new file mode 100644
index 000000000000..a184b68994aa
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/ps.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2019-2020 Realtek Corporation
+ */
+
+#ifndef __RTW89_PS_H_
+#define __RTW89_PS_H_
+
+void rtw89_enter_lps(struct rtw89_dev *rtwdev, u8 mac_id);
+void rtw89_leave_lps(struct rtw89_dev *rtwdev);
+void __rtw89_leave_ps_mode(struct rtw89_dev *rtwdev);
+void rtw89_leave_ps_mode(struct rtw89_dev *rtwdev);
+void rtw89_enter_ips(struct rtw89_dev *rtwdev);
+void rtw89_leave_ips(struct rtw89_dev *rtwdev);
+void rtw89_set_coex_ctrl_lps(struct rtw89_dev *rtwdev, bool btc_ctrl);
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h
new file mode 100644
index 000000000000..365d8c8ce57b
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/reg.h
@@ -0,0 +1,2159 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2019-2020 Realtek Corporation
+ */
+
+#ifndef __RTW89_REG_H__
+#define __RTW89_REG_H__
+
+#define R_AX_SYS_WL_EFUSE_CTRL 0x000A
+#define B_AX_AUTOLOAD_SUS BIT(5)
+
+#define R_AX_SYS_FUNC_EN 0x0002
+#define B_AX_FEN_BB_GLB_RSTN BIT(1)
+#define B_AX_FEN_BBRSTB BIT(0)
+
+#define R_AX_SYS_PW_CTRL 0x0004
+#define B_AX_PSUS_OFF_CAPC_EN BIT(14)
+
+#define R_AX_SYS_CLK_CTRL 0x0008
+#define B_AX_CPU_CLK_EN BIT(14)
+
+#define R_AX_RSV_CTRL 0x001C
+#define B_AX_R_DIS_PRST BIT(6)
+#define B_AX_WLOCK_1C_BIT6 BIT(5)
+
+#define R_AX_EFUSE_CTRL_1 0x0038
+#define B_AX_EF_PGPD_MASK GENMASK(30, 28)
+#define B_AX_EF_RDT BIT(27)
+#define B_AX_EF_VDDQST_MASK GENMASK(26, 24)
+#define B_AX_EF_PGTS_MASK GENMASK(23, 20)
+#define B_AX_EF_PD_DIS BIT(11)
+#define B_AX_EF_POR BIT(10)
+#define B_AX_EF_CELL_SEL_MASK GENMASK(9, 8)
+
+#define R_AX_SPSLDO_ON_CTRL0 0x0200
+#define B_AX_OCP_L1_MASK GENMASK(15, 13)
+
+#define R_AX_EFUSE_CTRL 0x0030
+#define B_AX_EF_MODE_SEL_MASK GENMASK(31, 30)
+#define B_AX_EF_RDY BIT(29)
+#define B_AX_EF_COMP_RESULT BIT(28)
+#define B_AX_EF_ADDR_MASK GENMASK(26, 16)
+#define B_AX_EF_DATA_MASK GENMASK(15, 0)
+
+#define R_AX_GPIO_MUXCFG 0x0040
+#define B_AX_BOOT_MODE BIT(19)
+#define B_AX_WL_EECS_EXT_32K_SEL BIT(18)
+#define B_AX_WL_SEC_BONDING_OPT_STS BIT(17)
+#define B_AX_SECSIC_SEL BIT(16)
+#define B_AX_ENHTP BIT(14)
+#define B_AX_BT_AOD_GPIO3 BIT(13)
+#define B_AX_ENSIC BIT(12)
+#define B_AX_SIC_SWRST BIT(11)
+#define B_AX_PO_WIFI_PTA_PINS BIT(10)
+#define B_AX_PO_BT_PTA_PINS BIT(9)
+#define B_AX_ENUARTTX BIT(8)
+#define B_AX_BTMODE_MASK GENMASK(7, 6)
+#define MAC_AX_BT_MODE_0_3 0
+#define MAC_AX_BT_MODE_2 2
+#define B_AX_ENBT BIT(5)
+#define B_AX_EROM_EN BIT(4)
+#define B_AX_ENUARTRX BIT(2)
+#define B_AX_GPIOSEL_MASK GENMASK(1, 0)
+
+#define R_AX_DBG_CTRL 0x0058
+#define B_AX_DBG_SEL1_4BIT GENMASK(31, 30)
+#define B_AX_DBG_SEL1_16BIT BIT(27)
+#define B_AX_DBG_SEL1 GENMASK(23, 16)
+#define B_AX_DBG_SEL0_4BIT GENMASK(15, 14)
+#define B_AX_DBG_SEL0_16BIT BIT(11)
+#define B_AX_DBG_SEL0 GENMASK(7, 0)
+
+#define R_AX_SYS_SDIO_CTRL 0x0070
+#define B_AX_PCIE_DIS_L2_CTRL_LDO_HCI BIT(15)
+#define B_AX_PCIE_DIS_WLSUS_AFT_PDN BIT(14)
+#define B_AX_PCIE_AUXCLK_GATE BIT(11)
+#define B_AX_LTE_MUX_CTRL_PATH BIT(26)
+
+#define R_AX_PLATFORM_ENABLE 0x0088
+#define B_AX_WCPU_EN BIT(1)
+
+#define R_AX_SCOREBOARD 0x00AC
+#define B_AX_TOGGLE BIT(31)
+#define B_MAC_AX_SB_FW_MASK GENMASK(30, 24)
+#define B_MAC_AX_SB_DRV_MASK GENMASK(23, 0)
+#define B_MAC_AX_BTGS1_NOTIFY BIT(0)
+#define MAC_AX_NOTIFY_TP_MAJOR 0x81
+#define MAC_AX_NOTIFY_PWR_MAJOR 0x80
+
+#define R_AX_DBG_PORT_SEL 0x00C0
+#define B_AX_DEBUG_ST_MASK GENMASK(31, 0)
+
+#define R_AX_SYS_CFG1 0x00F0
+#define B_AX_CHIP_VER_MASK GENMASK(15, 12)
+
+#define R_AX_SYS_STATUS1 0x00F4
+#define B_AX_SEL_0XC0_MASK GENMASK(17, 16)
+
+#define R_AX_HALT_H2C_CTRL 0x0160
+#define R_AX_HALT_H2C 0x0168
+#define B_AX_HALT_H2C_TRIGGER BIT(0)
+#define R_AX_HALT_C2H_CTRL 0x0164
+#define R_AX_HALT_C2H 0x016C
+
+#define R_AX_WCPU_FW_CTRL 0x01E0
+#define B_AX_WCPU_FWDL_STS_MASK GENMASK(7, 5)
+#define B_AX_FWDL_PATH_RDY BIT(2)
+#define B_AX_H2C_PATH_RDY BIT(1)
+#define B_AX_WCPU_FWDL_EN BIT(0)
+
+#define R_AX_RPWM 0x01E4
+#define R_AX_PCIE_HRPWM 0x10C0
+#define PS_RPWM_TOGGLE BIT(15)
+#define PS_RPWM_ACK BIT(14)
+#define PS_RPWM_SEQ_NUM GENMASK(13, 12)
+#define PS_RPWM_STATE 0x7
+#define RPWM_SEQ_NUM_MAX 3
+#define PS_CPWM_SEQ_NUM GENMASK(13, 12)
+#define PS_CPWM_RSP_SEQ_NUM GENMASK(9, 8)
+#define PS_CPWM_STATE GENMASK(2, 0)
+#define CPWM_SEQ_NUM_MAX 3
+
+#define R_AX_BOOT_REASON 0x01E6
+#define B_AX_BOOT_REASON_MASK GENMASK(2, 0)
+
+#define R_AX_LDM 0x01E8
+#define B_AX_EN_32K BIT(31)
+
+#define R_AX_UDM0 0x01F0
+#define R_AX_UDM1 0x01F4
+#define R_AX_UDM2 0x01F8
+#define R_AX_UDM3 0x01FC
+
+#define R_AX_XTAL_ON_CTRL0 0x0280
+#define B_AX_XTAL_SC_LPS BIT(31)
+#define B_AX_XTAL_SC_XO_MASK GENMASK(23, 17)
+#define B_AX_XTAL_SC_XI_MASK GENMASK(16, 10)
+#define B_AX_XTAL_SC_MASK GENMASK(6, 0)
+
+#define R_AX_GPIO0_7_FUNC_SEL 0x02D0
+
+#define R_AX_WLRF_CTRL 0x02F0
+#define B_AX_WLRF1_CTRL_7 BIT(15)
+#define B_AX_WLRF1_CTRL_1 BIT(9)
+#define B_AX_WLRF_CTRL_7 BIT(7)
+#define B_AX_WLRF_CTRL_1 BIT(1)
+
+#define R_AX_IC_PWR_STATE 0x03F0
+#define B_AX_WHOLE_SYS_PWR_STE_MASK GENMASK(25, 16)
+#define B_AX_WLMAC_PWR_STE_MASK GENMASK(9, 8)
+#define B_AX_UART_HCISYS_PWR_STE_MASK GENMASK(7, 6)
+#define B_AX_SDIO_HCISYS_PWR_STE_MASK GENMASK(5, 4)
+#define B_AX_USB_HCISYS_PWR_STE_MASK GENMASK(3, 2)
+#define B_AX_PCIE_HCISYS_PWR_STE_MASK GENMASK(1, 0)
+
+#define R_AX_FILTER_MODEL_ADDR 0x0C04
+
+#define R_AX_PCIE_DBG_CTRL 0x11C0
+#define B_AX_DBG_DUMMY_MASK GENMASK(23, 16)
+#define B_AX_DBG_SEL_MASK GENMASK(15, 13)
+#define B_AX_PCIE_DBG_SEL BIT(12)
+#define B_AX_MRD_TIMEOUT_EN BIT(10)
+#define B_AX_ASFF_FULL_NO_STK BIT(1)
+#define B_AX_EN_STUCK_DBG BIT(0)
+
+#define R_AX_PHYREG_SET 0x8040
+#define PHYREG_SET_ALL_CYCLE 0x8
+
+#define R_AX_HD0IMR 0x8110
+#define B_AX_WDT_PTFM_INT_EN BIT(5)
+#define B_AX_CPWM_INT_EN BIT(2)
+#define B_AX_GT3_INT_EN BIT(1)
+#define B_AX_C2H_INT_EN BIT(0)
+#define R_AX_HD0ISR 0x8114
+#define B_AX_C2H_INT BIT(0)
+
+#define R_AX_H2CREG_DATA0 0x8140
+#define R_AX_H2CREG_DATA1 0x8144
+#define R_AX_H2CREG_DATA2 0x8148
+#define R_AX_H2CREG_DATA3 0x814C
+#define R_AX_C2HREG_DATA0 0x8150
+#define R_AX_C2HREG_DATA1 0x8154
+#define R_AX_C2HREG_DATA2 0x8158
+#define R_AX_C2HREG_DATA3 0x815C
+#define R_AX_H2CREG_CTRL 0x8160
+#define B_AX_H2CREG_TRIGGER BIT(0)
+#define R_AX_C2HREG_CTRL 0x8164
+#define B_AX_C2HREG_TRIGGER BIT(0)
+#define R_AX_CPWM 0x8170
+
+#define R_AX_HCI_FUNC_EN 0x8380
+#define B_AX_HCI_RXDMA_EN BIT(1)
+#define B_AX_HCI_TXDMA_EN BIT(0)
+
+#define R_AX_BOOT_DBG 0x83F0
+
+#define R_AX_DMAC_FUNC_EN 0x8400
+#define B_AX_MAC_FUNC_EN BIT(30)
+#define B_AX_DMAC_FUNC_EN BIT(29)
+#define B_AX_MPDU_PROC_EN BIT(28)
+#define B_AX_WD_RLS_EN BIT(27)
+#define B_AX_DLE_WDE_EN BIT(26)
+#define B_AX_TXPKT_CTRL_EN BIT(25)
+#define B_AX_STA_SCH_EN BIT(24)
+#define B_AX_DLE_PLE_EN BIT(23)
+#define B_AX_PKT_BUF_EN BIT(22)
+#define B_AX_DMAC_TBL_EN BIT(21)
+#define B_AX_PKT_IN_EN BIT(20)
+#define B_AX_DLE_CPUIO_EN BIT(19)
+#define B_AX_DISPATCHER_EN BIT(18)
+#define B_AX_MAC_SEC_EN BIT(16)
+
+#define R_AX_DMAC_CLK_EN 0x8404
+#define B_AX_WD_RLS_CLK_EN BIT(27)
+#define B_AX_DLE_WDE_CLK_EN BIT(26)
+#define B_AX_TXPKT_CTRL_CLK_EN BIT(25)
+#define B_AX_STA_SCH_CLK_EN BIT(24)
+#define B_AX_DLE_PLE_CLK_EN BIT(23)
+#define B_AX_PKT_IN_CLK_EN BIT(20)
+#define B_AX_DLE_CPUIO_CLK_EN BIT(19)
+#define B_AX_DISPATCHER_CLK_EN BIT(18)
+#define B_AX_MAC_SEC_CLK_EN BIT(16)
+
+#define PCI_LTR_IDLE_TIMER_1US 0
+#define PCI_LTR_IDLE_TIMER_10US 1
+#define PCI_LTR_IDLE_TIMER_100US 2
+#define PCI_LTR_IDLE_TIMER_200US 3
+#define PCI_LTR_IDLE_TIMER_400US 4
+#define PCI_LTR_IDLE_TIMER_800US 5
+#define PCI_LTR_IDLE_TIMER_1_6MS 6
+#define PCI_LTR_IDLE_TIMER_3_2MS 7
+#define PCI_LTR_IDLE_TIMER_R_ERR 0xFD
+#define PCI_LTR_IDLE_TIMER_DEF 0xFE
+#define PCI_LTR_IDLE_TIMER_IGNORE 0xFF
+
+#define PCI_LTR_SPC_10US 0
+#define PCI_LTR_SPC_100US 1
+#define PCI_LTR_SPC_500US 2
+#define PCI_LTR_SPC_1MS 3
+#define PCI_LTR_SPC_R_ERR 0xFD
+#define PCI_LTR_SPC_DEF 0xFE
+#define PCI_LTR_SPC_IGNORE 0xFF
+
+#define R_AX_LTR_CTRL_0 0x8410
+#define B_AX_LTR_SPACE_IDX_MASK GENMASK(13, 12)
+#define B_AX_LTR_IDLE_TIMER_IDX_MASK GENMASK(10, 8)
+#define B_AX_APP_LTR_ACT BIT(5)
+#define B_AX_APP_LTR_IDLE BIT(4)
+#define B_AX_LTR_EN BIT(1)
+#define B_AX_LTR_HW_EN BIT(0)
+
+#define R_AX_LTR_CTRL_1 0x8414
+#define B_AX_LTR_RX1_TH_MASK GENMASK(27, 16)
+#define B_AX_LTR_RX0_TH_MASK GENMASK(11, 0)
+
+#define R_AX_LTR_IDLE_LATENCY 0x8418
+
+#define R_AX_LTR_ACTIVE_LATENCY 0x841C
+
+#define R_AX_SER_DBG_INFO 0x8424
+#define B_AX_L0_TO_L1_EVENT_MASK GENMASK(31, 28)
+
+#define R_AX_DLE_EMPTY0 0x8430
+#define B_AX_PLE_EMPTY_QTA_DMAC_CPUIO BIT(26)
+#define B_AX_PLE_EMPTY_QTA_DMAC_MPDU_TX BIT(25)
+#define B_AX_PLE_EMPTY_QTA_DMAC_WLAN_CPU BIT(24)
+#define B_AX_PLE_EMPTY_QTA_DMAC_H2C BIT(23)
+#define B_AX_PLE_EMPTY_QTA_DMAC_B1_TXPL BIT(22)
+#define B_AX_PLE_EMPTY_QTA_DMAC_B0_TXPL BIT(21)
+#define B_AX_WDE_EMPTY_QTA_DMAC_CPUIO BIT(20)
+#define B_AX_WDE_EMPTY_QTA_DMAC_PKTIN BIT(19)
+#define B_AX_WDE_EMPTY_QTA_DMAC_DATA_CPU BIT(18)
+#define B_AX_WDE_EMPTY_QTA_DMAC_WLAN_CPU BIT(17)
+#define B_AX_WDE_EMPTY_QTA_DMAC_HIF BIT(16)
+#define B_AX_WDE_EMPTY_QUE_DMAC_PKTIN BIT(10)
+#define B_AX_PLE_EMPTY_QUE_DMAC_SEC_TX BIT(9)
+#define B_AX_PLE_EMPTY_QUE_DMAC_MPDU_TX BIT(8)
+#define B_AX_WDE_EMPTY_QUE_OTHERS BIT(7)
+#define B_AX_WDE_EMPTY_QUE_CMAC0_WMM1 BIT(4)
+#define B_AX_WDE_EMPTY_QUE_CMAC0_WMM0 BIT(3)
+#define B_AX_WDE_EMPTY_QUE_CMAC1_MBH BIT(2)
+#define B_AX_WDE_EMPTY_QUE_CMAC0_MBH BIT(1)
+#define B_AX_WDE_EMPTY_QUE_CMAC0_ALL_AC BIT(0)
+
+#define R_AX_DMAC_ERR_ISR 0x8524
+#define B_AX_DLE_CPUIO_ERR_FLAG BIT(10)
+#define B_AX_APB_BRIDGE_ERR_FLAG BIT(9)
+#define B_AX_DISPATCH_ERR_FLAG BIT(8)
+#define B_AX_PKTIN_ERR_FLAG BIT(7)
+#define B_AX_PLE_DLE_ERR_FLAG BIT(6)
+#define B_AX_TXPKTCTRL_ERR_FLAG BIT(5)
+#define B_AX_WDE_DLE_ERR_FLAG BIT(4)
+#define B_AX_STA_SCHEDULER_ERR_FLAG BIT(3)
+#define B_AX_MPDU_ERR_FLAG BIT(2)
+#define B_AX_WSEC_ERR_FLAG BIT(1)
+#define B_AX_WDRLS_ERR_FLAG BIT(0)
+
+#define R_AX_DISPATCHER_GLOBAL_SETTING_0 0x8800
+#define B_AX_PL_PAGE_128B_SEL BIT(9)
+#define B_AX_WD_PAGE_64B_SEL BIT(8)
+#define R_AX_OTHER_DISPATCHER_ERR_ISR 0x8804
+#define R_AX_HOST_DISPATCHER_ERR_ISR 0x8808
+#define R_AX_CPU_DISPATCHER_ERR_ISR 0x880C
+#define R_AX_TX_ADDRESS_INFO_MODE_SETTING 0x8810
+#define B_AX_HOST_ADDR_INFO_8B_SEL BIT(0)
+
+#define R_AX_HOST_DISPATCHER_ERR_IMR 0x8850
+#define B_AX_HDT_OFFSET_UNMATCH_INT_EN BIT(7)
+#define B_AX_HDT_PKT_FAIL_DBG_INT_EN BIT(2)
+
+#define R_AX_CPU_DISPATCHER_ERR_IMR 0x8854
+#define B_AX_CPU_SHIFT_EN_ERR_INT_EN BIT(25)
+
+#define R_AX_OTHER_DISPATCHER_ERR_IMR 0x8858
+
+#define R_AX_HCI_FC_CTRL 0x8A00
+#define B_AX_HCI_FC_CH12_FULL_COND_MASK GENMASK(11, 10)
+#define B_AX_HCI_FC_WP_CH811_FULL_COND_MASK GENMASK(9, 8)
+#define B_AX_HCI_FC_WP_CH07_FULL_COND_MASK GENMASK(7, 6)
+#define B_AX_HCI_FC_WD_FULL_COND_MASK GENMASK(5, 4)
+#define B_AX_HCI_FC_CH12_EN BIT(3)
+#define B_AX_HCI_FC_MODE_MASK GENMASK(2, 1)
+#define B_AX_HCI_FC_EN BIT(0)
+
+#define R_AX_CH_PAGE_CTRL 0x8A04
+#define B_AX_PREC_PAGE_CH12_MASK GENMASK(24, 16)
+#define B_AX_PREC_PAGE_CH011_MASK GENMASK(8, 0)
+
+#define B_AX_MAX_PG_MASK GENMASK(28, 16)
+#define B_AX_MIN_PG_MASK GENMASK(12, 0)
+#define B_AX_GRP BIT(31)
+#define R_AX_ACH0_PAGE_CTRL 0x8A10
+#define R_AX_ACH1_PAGE_CTRL 0x8A14
+#define R_AX_ACH2_PAGE_CTRL 0x8A18
+#define R_AX_ACH3_PAGE_CTRL 0x8A1C
+#define R_AX_ACH4_PAGE_CTRL 0x8A20
+#define R_AX_ACH5_PAGE_CTRL 0x8A24
+#define R_AX_ACH6_PAGE_CTRL 0x8A28
+#define R_AX_ACH7_PAGE_CTRL 0x8A2C
+#define R_AX_CH8_PAGE_CTRL 0x8A30
+#define R_AX_CH9_PAGE_CTRL 0x8A34
+#define R_AX_CH10_PAGE_CTRL 0x8A38
+#define R_AX_CH11_PAGE_CTRL 0x8A3C
+
+#define B_AX_AVAL_PG_MASK GENMASK(27, 16)
+#define B_AX_USE_PG_MASK GENMASK(12, 0)
+#define R_AX_ACH0_PAGE_INFO 0x8A50
+#define R_AX_ACH1_PAGE_INFO 0x8A54
+#define R_AX_ACH2_PAGE_INFO 0x8A58
+#define R_AX_ACH3_PAGE_INFO 0x8A5C
+#define R_AX_ACH4_PAGE_INFO 0x8A60
+#define R_AX_ACH5_PAGE_INFO 0x8A64
+#define R_AX_ACH6_PAGE_INFO 0x8A68
+#define R_AX_ACH7_PAGE_INFO 0x8A6C
+#define R_AX_CH8_PAGE_INFO 0x8A70
+#define R_AX_CH9_PAGE_INFO 0x8A74
+#define R_AX_CH10_PAGE_INFO 0x8A78
+#define R_AX_CH11_PAGE_INFO 0x8A7C
+#define R_AX_CH12_PAGE_INFO 0x8A80
+
+#define R_AX_PUB_PAGE_INFO3 0x8A8C
+#define B_AX_G1_AVAL_PG_MASK GENMASK(28, 16)
+#define B_AX_G0_AVAL_PG_MASK GENMASK(12, 0)
+
+#define R_AX_PUB_PAGE_CTRL1 0x8A90
+#define B_AX_PUBPG_G1_MASK GENMASK(28, 16)
+#define B_AX_PUBPG_G0_MASK GENMASK(12, 0)
+
+#define R_AX_PUB_PAGE_CTRL2 0x8A94
+#define B_AX_PUBPG_ALL_MASK GENMASK(12, 0)
+
+#define R_AX_PUB_PAGE_INFO1 0x8A98
+#define B_AX_G1_USE_PG_MASK GENMASK(28, 16)
+#define B_AX_G0_USE_PG_MASK GENMASK(12, 0)
+
+#define R_AX_PUB_PAGE_INFO2 0x8A9C
+#define B_AX_PUB_AVAL_PG_MASK GENMASK(12, 0)
+
+#define R_AX_WP_PAGE_CTRL1 0x8AA0
+#define B_AX_PREC_PAGE_WP_CH811_MASK GENMASK(24, 16)
+#define B_AX_PREC_PAGE_WP_CH07_MASK GENMASK(8, 0)
+
+#define R_AX_WP_PAGE_CTRL2 0x8AA4
+#define B_AX_WP_THRD_MASK GENMASK(12, 0)
+
+#define R_AX_WP_PAGE_INFO1 0x8AA8
+#define B_AX_WP_AVAL_PG_MASK GENMASK(28, 16)
+
+#define R_AX_WDE_PKTBUF_CFG 0x8C08
+#define B_AX_WDE_START_BOUND_MASK GENMASK(13, 8)
+#define B_AX_WDE_PAGE_SEL_MASK GENMASK(1, 0)
+#define B_AX_WDE_FREE_PAGE_NUM_MASK GENMASK(28, 16)
+#define R_AX_WDE_ERR_FLAG_CFG 0x8C34
+#define R_AX_WDE_ERR_IMR 0x8C38
+#define R_AX_WDE_ERR_ISR 0x8C3C
+
+#define B_AX_WDE_MAX_SIZE_MASK GENMASK(27, 16)
+#define B_AX_WDE_MIN_SIZE_MASK GENMASK(11, 0)
+#define R_AX_WDE_QTA0_CFG 0x8C40
+#define R_AX_WDE_QTA1_CFG 0x8C44
+#define R_AX_WDE_QTA2_CFG 0x8C48
+#define R_AX_WDE_QTA3_CFG 0x8C4C
+#define R_AX_WDE_QTA4_CFG 0x8C50
+
+#define B_AX_DLE_PUB_PGNUM GENMASK(12, 0)
+#define B_AX_DLE_FREE_HEADPG GENMASK(11, 0)
+#define B_AX_DLE_FREE_TAILPG GENMASK(27, 16)
+#define B_AX_DLE_USE_PGNUM GENMASK(27, 16)
+#define B_AX_DLE_RSV_PGNUM GENMASK(11, 0)
+#define B_AX_DLE_QEMPTY_GRP GENMASK(31, 0)
+
+#define R_AX_WDE_INI_STATUS 0x8D00
+#define B_AX_WDE_Q_MGN_INI_RDY BIT(1)
+#define B_AX_WDE_BUF_MGN_INI_RDY BIT(0)
+#define WDE_MGN_INI_RDY (B_AX_WDE_Q_MGN_INI_RDY | B_AX_WDE_BUF_MGN_INI_RDY)
+#define R_AX_WDE_DBG_FUN_INTF_CTL 0x8D10
+#define B_AX_WDE_DFI_ACTIVE BIT(31)
+#define B_AX_WDE_DFI_TRGSEL_MASK GENMASK(19, 16)
+#define B_AX_WDE_DFI_ADDR_MASK GENMASK(15, 0)
+#define R_AX_WDE_DBG_FUN_INTF_DATA 0x8D14
+#define B_AX_WDE_DFI_DATA_MASK GENMASK(31, 0)
+
+#define R_AX_PLE_PKTBUF_CFG 0x9008
+#define B_AX_PLE_START_BOUND_MASK GENMASK(13, 8)
+#define B_AX_PLE_PAGE_SEL_MASK GENMASK(1, 0)
+#define B_AX_PLE_FREE_PAGE_NUM_MASK GENMASK(28, 16)
+#define R_AX_PLE_ERR_FLAG_CFG 0x9034
+
+#define R_AX_PLE_ERR_IMR 0x9038
+#define B_AX_PLE_GETNPG_STRPG_ERR_INT_EN BIT(5)
+
+#define R_AX_PLE_ERR_FLAG_ISR 0x903C
+#define B_AX_PLE_MAX_SIZE_MASK GENMASK(27, 16)
+#define B_AX_PLE_MIN_SIZE_MASK GENMASK(11, 0)
+#define R_AX_PLE_QTA0_CFG 0x9040
+#define R_AX_PLE_QTA1_CFG 0x9044
+#define R_AX_PLE_QTA2_CFG 0x9048
+#define R_AX_PLE_QTA3_CFG 0x904C
+#define R_AX_PLE_QTA4_CFG 0x9050
+#define R_AX_PLE_QTA5_CFG 0x9054
+#define R_AX_PLE_QTA6_CFG 0x9058
+#define B_AX_PLE_Q6_MAX_SIZE_MASK GENMASK(27, 16)
+#define B_AX_PLE_Q6_MIN_SIZE_MASK GENMASK(11, 0)
+#define R_AX_PLE_QTA7_CFG 0x905C
+#define R_AX_PLE_QTA8_CFG 0x9060
+#define R_AX_PLE_QTA9_CFG 0x9064
+#define R_AX_PLE_QTA10_CFG 0x9068
+
+#define R_AX_PLE_INI_STATUS 0x9100
+#define B_AX_PLE_Q_MGN_INI_RDY BIT(1)
+#define B_AX_PLE_BUF_MGN_INI_RDY BIT(0)
+#define PLE_MGN_INI_RDY (B_AX_PLE_Q_MGN_INI_RDY | B_AX_PLE_BUF_MGN_INI_RDY)
+#define R_AX_PLE_DBG_FUN_INTF_CTL 0x9110
+#define B_AX_PLE_DFI_ACTIVE BIT(31)
+#define B_AX_PLE_DFI_TRGSEL_MASK GENMASK(19, 16)
+#define B_AX_PLE_DFI_ADDR_MASK GENMASK(15, 0)
+#define R_AX_PLE_DBG_FUN_INTF_DATA 0x9114
+#define B_AX_PLE_DFI_DATA_MASK GENMASK(31, 0)
+
+#define R_AX_WDRLS_CFG 0x9408
+#define B_AX_RLSRPT_BUFREQ_TO_MASK GENMASK(15, 8)
+#define B_AX_WDRLS_MODE_MASK GENMASK(1, 0)
+
+#define R_AX_RLSRPT0_CFG0 0x9410
+#define B_AX_RLSRPT0_FLTR_MAP_MASK GENMASK(27, 24)
+#define B_AX_RLSRPT0_PKTTYPE_MASK GENMASK(19, 16)
+#define B_AX_RLSRPT0_PID_MASK GENMASK(10, 8)
+#define B_AX_RLSRPT0_QID_MASK GENMASK(5, 0)
+
+#define R_AX_RLSRPT0_CFG1 0x9414
+#define B_AX_RLSRPT0_TO_MASK GENMASK(23, 16)
+#define B_AX_RLSRPT0_AGGNUM_MASK GENMASK(7, 0)
+
+#define R_AX_WDRLS_ERR_IMR 0x9430
+#define B_AX_WDRLS_RPT1_FRZTO_ERR_INT_EN BIT(13)
+#define B_AX_WDRLS_RPT1_AGGNUM0_ERR_INT_EN BIT(12)
+#define B_AX_WDRLS_RPT0_FRZTO_ERR_INT_EN BIT(9)
+#define B_AX_WDRLS_RPT0_AGGNUM0_ERR_INT_EN BIT(8)
+#define B_AX_WDRLS_PLEBREQ_PKTID_ISNULL_ERR_INT_EN BIT(5)
+#define B_AX_WDRLS_PLEBREQ_TO_ERR_INT_EN BIT(4)
+#define B_AX_WDRLS_CTL_FRZTO_ERR_INT_EN BIT(2)
+#define B_AX_WDRLS_CTL_PLPKTID_ISNULL_ERR_INT_EN BIT(1)
+#define B_AX_WDRLS_CTL_WDPKTID_ISNULL_ERR_INT_EN BIT(0)
+#define R_AX_WDRLS_ERR_ISR 0x9434
+
+#define R_AX_BBRPT_COM_ERR_IMR_ISR 0x960C
+#define R_AX_BBRPT_CHINFO_ERR_IMR_ISR 0x962C
+#define R_AX_BBRPT_DFS_ERR_IMR_ISR 0x963C
+#define R_AX_LA_ERRFLAG 0x966C
+
+#define R_AX_WD_BUF_REQ 0x9800
+#define R_AX_PL_BUF_REQ 0x9820
+#define B_AX_WD_BUF_REQ_EXEC BIT(31)
+#define B_AX_WD_BUF_REQ_QUOTA_ID_MASK GENMASK(23, 16)
+#define B_AX_WD_BUF_REQ_LEN_MASK GENMASK(15, 0)
+
+#define R_AX_WD_BUF_STATUS 0x9804
+#define R_AX_PL_BUF_STATUS 0x9824
+#define B_AX_WD_BUF_STAT_DONE BIT(31)
+#define B_AX_WD_BUF_STAT_PKTID_MASK GENMASK(11, 0)
+
+#define R_AX_WD_CPUQ_OP_0 0x9810
+#define R_AX_PL_CPUQ_OP_0 0x9830
+#define B_AX_WD_CPUQ_OP_EXEC BIT(31)
+#define B_AX_CPUQ_OP_CMD_TYPE_MASK GENMASK(27, 24)
+#define B_AX_CPUQ_OP_MACID_MASK GENMASK(23, 16)
+#define B_AX_CPUQ_OP_PKTNUM_MASK GENMASK(7, 0)
+
+#define R_AX_WD_CPUQ_OP_1 0x9814
+#define R_AX_PL_CPUQ_OP_1 0x9834
+#define B_AX_CPUQ_OP_SRC_PID_MASK GENMASK(24, 22)
+#define B_AX_CPUQ_OP_SRC_QID_MASK GENMASK(21, 16)
+#define B_AX_CPUQ_OP_DST_PID_MASK GENMASK(8, 6)
+#define B_AX_CPUQ_OP_DST_QID_MASK GENMASK(5, 0)
+
+#define R_AX_WD_CPUQ_OP_2 0x9818
+#define R_AX_PL_CPUQ_OP_2 0x9838
+#define B_AX_WD_CPUQ_OP_STRT_PKTID_MASK GENMASK(27, 16)
+#define B_AX_WD_CPUQ_OP_END_PKTID_MASK GENMASK(11, 0)
+
+#define R_AX_WD_CPUQ_OP_STATUS 0x981C
+#define R_AX_PL_CPUQ_OP_STATUS 0x983C
+#define B_AX_WD_CPUQ_OP_STAT_DONE BIT(31)
+#define B_AX_WD_CPUQ_OP_PKTID_MASK GENMASK(11, 0)
+#define R_AX_CPUIO_ERR_IMR 0x9840
+#define R_AX_CPUIO_ERR_ISR 0x9844
+
+#define R_AX_SEC_ERR_IMR_ISR 0x991C
+
+#define R_AX_PKTIN_SETTING 0x9A00
+#define B_AX_WD_ADDR_INFO_LENGTH BIT(1)
+#define R_AX_PKTIN_ERR_IMR 0x9A20
+#define R_AX_PKTIN_ERR_ISR 0x9A24
+
+#define R_AX_MPDU_TX_ERR_ISR 0x9BF0
+#define R_AX_MPDU_TX_ERR_IMR 0x9BF4
+
+#define R_AX_MPDU_PROC 0x9C00
+#define B_AX_A_ICV_ERR BIT(1)
+#define B_AX_APPEND_FCS BIT(0)
+
+#define R_AX_ACTION_FWD0 0x9C04
+#define TRXCFG_MPDU_PROC_ACT_FRWD 0x02A95A95
+
+#define R_AX_TF_FWD 0x9C14
+#define TRXCFG_MPDU_PROC_TF_FRWD 0x0000AA55
+
+#define R_AX_HW_RPT_FWD 0x9C18
+#define B_AX_FWD_PPDU_STAT_MASK GENMASK(1, 0)
+#define RTW89_PRPT_DEST_HOST 1
+#define RTW89_PRPT_DEST_WLCPU 2
+
+#define R_AX_CUT_AMSDU_CTRL 0x9C40
+#define TRXCFG_MPDU_PROC_CUT_CTRL 0x010E05F0
+
+#define R_AX_MPDU_RX_ERR_ISR 0x9CF0
+#define R_AX_MPDU_RX_ERR_IMR 0x9CF4
+
+#define R_AX_SEC_ENG_CTRL 0x9D00
+#define B_AX_TX_PARTIAL_MODE BIT(11)
+#define B_AX_CLK_EN_CGCMP BIT(10)
+#define B_AX_CLK_EN_WAPI BIT(9)
+#define B_AX_CLK_EN_WEP_TKIP BIT(8)
+#define B_AX_BMC_MGNT_DEC BIT(5)
+#define B_AX_UC_MGNT_DEC BIT(4)
+#define B_AX_MC_DEC BIT(3)
+#define B_AX_BC_DEC BIT(2)
+#define B_AX_SEC_RX_DEC BIT(1)
+#define B_AX_SEC_TX_ENC BIT(0)
+
+#define R_AX_SEC_MPDU_PROC 0x9D04
+#define B_AX_APPEND_ICV BIT(1)
+#define B_AX_APPEND_MIC BIT(0)
+
+#define R_AX_SEC_CAM_ACCESS 0x9D10
+#define R_AX_SEC_CAM_RDATA 0x9D14
+#define R_AX_SEC_CAM_WDATA 0x9D18
+#define R_AX_SEC_DEBUG 0x9D1C
+#define R_AX_SEC_TX_DEBUG 0x9D20
+#define R_AX_SEC_RX_DEBUG 0x9D24
+#define R_AX_SEC_TRX_PKT_CNT 0x9D28
+#define R_AX_SEC_TRX_BLK_CNT 0x9D2C
+
+#define R_AX_SS_CTRL 0x9E10
+#define B_AX_SS_INIT_DONE_1 BIT(31)
+#define B_AX_SS_WARM_INIT_FLG BIT(29)
+#define B_AX_SS_EN BIT(0)
+
+#define R_AX_SS_MACID_PAUSE_0 0x9EB0
+#define B_AX_SS_MACID31_0_PAUSE_SH 0
+#define B_AX_SS_MACID31_0_PAUSE_MASK GENMASK(31, 0)
+
+#define R_AX_SS_MACID_PAUSE_1 0x9EB4
+#define B_AX_SS_MACID63_32_PAUSE_SH 0
+#define B_AX_SS_MACID63_32_PAUSE_MASK GENMASK(31, 0)
+
+#define R_AX_SS_MACID_PAUSE_2 0x9EB8
+#define B_AX_SS_MACID95_64_PAUSE_SH 0
+#define B_AX_SS_MACID95_64_PAUSE_MASK GENMASK(31, 0)
+
+#define R_AX_SS_MACID_PAUSE_3 0x9EBC
+#define B_AX_SS_MACID127_96_PAUSE_SH 0
+#define B_AX_SS_MACID127_96_PAUSE_MASK GENMASK(31, 0)
+
+#define R_AX_STA_SCHEDULER_ERR_IMR 0x9EF0
+#define R_AX_STA_SCHEDULER_ERR_ISR 0x9EF4
+
+#define R_AX_TXPKTCTL_ERR_IMR_ISR 0x9F1C
+#define R_AX_TXPKTCTL_ERR_IMR_ISR_B1 0x9F2C
+#define B_AX_TXPKTCTL_CMDPSR_FRZTO_ERR_INT_EN BIT(9)
+#define B_AX_TXPKTCTL_USRCTL_RLSBMPLEN_ERR_INT_EN BIT(3)
+#define B_AX_TXPKTCTL_USRCTL_RDNRLSCMD_ERR_INT_EN BIT(2)
+#define B_AX_TXPKTCTL_USRCTL_NOINIT_ERR_INT_EN BIT(1)
+
+#define R_AX_DBG_FUN_INTF_CTL 0x9F30
+#define B_AX_DFI_ACTIVE BIT(31)
+#define B_AX_DFI_TRGSEL_MASK GENMASK(19, 16)
+#define B_AX_DFI_ADDR_MASK GENMASK(15, 0)
+#define R_AX_DBG_FUN_INTF_DATA 0x9F34
+#define B_AX_DFI_DATA_MASK GENMASK(31, 0)
+
+#define R_AX_AFE_CTRL1 0x0024
+
+#define B_AX_R_SYM_WLCMAC1_P4_PC_EN BIT(4)
+#define B_AX_R_SYM_WLCMAC1_P3_PC_EN BIT(3)
+#define B_AX_R_SYM_WLCMAC1_P2_PC_EN BIT(2)
+#define B_AX_R_SYM_WLCMAC1_P1_PC_EN BIT(1)
+#define B_AX_R_SYM_WLCMAC1_PC_EN BIT(0)
+
+#define R_AX_SYS_ISO_CTRL_EXTEND 0x0080
+#define B_AX_CMAC1_FEN BIT(30)
+#define B_AX_R_SYM_FEN_WLBBGLB_1 BIT(17)
+#define B_AX_R_SYM_FEN_WLBBFUN_1 BIT(16)
+#define B_AX_R_SYM_ISO_CMAC12PP BIT(5)
+
+#define R_AX_CMAC_REG_START 0xC000
+
+#define R_AX_CMAC_FUNC_EN 0xC000
+#define R_AX_CMAC_FUNC_EN_C1 0xE000
+#define B_AX_CMAC_CRPRT BIT(31)
+#define B_AX_CMAC_EN BIT(30)
+#define B_AX_CMAC_TXEN BIT(29)
+#define B_AX_CMAC_RXEN BIT(28)
+#define B_AX_FORCE_CMACREG_GCKEN BIT(15)
+#define B_AX_PHYINTF_EN BIT(5)
+#define B_AX_CMAC_DMA_EN BIT(4)
+#define B_AX_PTCLTOP_EN BIT(3)
+#define B_AX_SCHEDULER_EN BIT(2)
+#define B_AX_TMAC_EN BIT(1)
+#define B_AX_RMAC_EN BIT(0)
+
+#define R_AX_CK_EN 0xC004
+#define R_AX_CK_EN_C1 0xE004
+#define B_AX_CMAC_ALLCKEN GENMASK(31, 0)
+#define B_AX_CMAC_CKEN BIT(30)
+#define B_AX_PHYINTF_CKEN BIT(5)
+#define B_AX_CMAC_DMA_CKEN BIT(4)
+#define B_AX_PTCLTOP_CKEN BIT(3)
+#define B_AX_SCHEDULER_CKEN BIT(2)
+#define B_AX_TMAC_CKEN BIT(1)
+#define B_AX_RMAC_CKEN BIT(0)
+
+#define R_AX_WMAC_RFMOD 0xC010
+#define R_AX_WMAC_RFMOD_C1 0xE010
+#define B_AX_WMAC_RFMOD_MASK GENMASK(1, 0)
+
+#define R_AX_GID_POSITION0 0xC070
+#define R_AX_GID_POSITION0_C1 0xE070
+#define R_AX_GID_POSITION1 0xC074
+#define R_AX_GID_POSITION1_C1 0xE074
+#define R_AX_GID_POSITION2 0xC078
+#define R_AX_GID_POSITION2_C1 0xE078
+#define R_AX_GID_POSITION3 0xC07C
+#define R_AX_GID_POSITION3_C1 0xE07C
+#define R_AX_GID_POSITION_EN0 0xC080
+#define R_AX_GID_POSITION_EN0_C1 0xE080
+#define R_AX_GID_POSITION_EN1 0xC084
+#define R_AX_GID_POSITION_EN1_C1 0xE084
+
+#define R_AX_TX_SUB_CARRIER_VALUE 0xC088
+#define R_AX_TX_SUB_CARRIER_VALUE_C1 0xE088
+#define B_AX_TXSC_80M_MASK GENMASK(11, 8)
+#define B_AX_TXSC_40M_MASK GENMASK(7, 4)
+#define B_AX_TXSC_20M_MASK GENMASK(3, 0)
+
+#define R_AX_CMAC_ERR_ISR 0xC164
+#define R_AX_CMAC_ERR_ISR_C1 0xE164
+#define B_AX_WMAC_TX_ERR_IND BIT(7)
+#define B_AX_WMAC_RX_ERR_IND BIT(6)
+#define B_AX_TXPWR_CTRL_ERR_IND BIT(5)
+#define B_AX_PHYINTF_ERR_IND BIT(4)
+#define B_AX_DMA_TOP_ERR_IND BIT(3)
+#define B_AX_PTCL_TOP_ERR_IND BIT(1)
+#define B_AX_SCHEDULE_TOP_ERR_IND BIT(0)
+
+#define R_AX_MACID_SLEEP_0 0xC2C0
+#define R_AX_MACID_SLEEP_0_C1 0xE2C0
+#define B_AX_MACID31_0_SLEEP_SH 0
+#define B_AX_MACID31_0_SLEEP_MASK GENMASK(31, 0)
+
+#define R_AX_MACID_SLEEP_1 0xC2C4
+#define R_AX_MACID_SLEEP_1_C1 0xE2C4
+#define B_AX_MACID63_32_SLEEP_SH 0
+#define B_AX_MACID63_32_SLEEP_MASK GENMASK(31, 0)
+
+#define R_AX_MACID_SLEEP_2 0xC2C8
+#define R_AX_MACID_SLEEP_2_C1 0xE2C8
+#define B_AX_MACID95_64_SLEEP_SH 0
+#define B_AX_MACID95_64_SLEEP_MASK GENMASK(31, 0)
+
+#define R_AX_MACID_SLEEP_3 0xC2CC
+#define R_AX_MACID_SLEEP_3_C1 0xE2CC
+#define B_AX_MACID127_96_SLEEP_SH 0
+#define B_AX_MACID127_96_SLEEP_MASK GENMASK(31, 0)
+
+#define SCH_PREBKF_24US 0x18
+#define R_AX_PREBKF_CFG_0 0xC338
+#define R_AX_PREBKF_CFG_0_C1 0xE338
+#define B_AX_PREBKF_TIME_MASK GENMASK(4, 0)
+
+#define R_AX_CCA_CFG_0 0xC340
+#define R_AX_CCA_CFG_0_C1 0xE340
+#define B_AX_BTCCA_BRK_TXOP_EN BIT(9)
+#define B_AX_BTCCA_EN BIT(5)
+#define B_AX_EDCCA_EN BIT(4)
+#define B_AX_SEC80_EN BIT(3)
+#define B_AX_SEC40_EN BIT(2)
+#define B_AX_SEC20_EN BIT(1)
+#define B_AX_CCA_EN BIT(0)
+
+#define R_AX_CTN_TXEN 0xC348
+#define R_AX_CTN_TXEN_C1 0xE348
+#define B_AX_CTN_TXEN_TWT_1 BIT(15)
+#define B_AX_CTN_TXEN_TWT_0 BIT(14)
+#define B_AX_CTN_TXEN_ULQ BIT(13)
+#define B_AX_CTN_TXEN_BCNQ BIT(12)
+#define B_AX_CTN_TXEN_HGQ BIT(11)
+#define B_AX_CTN_TXEN_CPUMGQ BIT(10)
+#define B_AX_CTN_TXEN_MGQ1 BIT(9)
+#define B_AX_CTN_TXEN_MGQ BIT(8)
+#define B_AX_CTN_TXEN_VO_1 BIT(7)
+#define B_AX_CTN_TXEN_VI_1 BIT(6)
+#define B_AX_CTN_TXEN_BK_1 BIT(5)
+#define B_AX_CTN_TXEN_BE_1 BIT(4)
+#define B_AX_CTN_TXEN_VO_0 BIT(3)
+#define B_AX_CTN_TXEN_VI_0 BIT(2)
+#define B_AX_CTN_TXEN_BK_0 BIT(1)
+#define B_AX_CTN_TXEN_BE_0 BIT(0)
+
+#define R_AX_MUEDCA_BE_PARAM_0 0xC350
+#define R_AX_MUEDCA_BE_PARAM_0_C1 0xE350
+#define B_AX_MUEDCA_BE_PARAM_0_TIMER_MASK GENMASK(31, 16)
+#define B_AX_MUEDCA_BE_PARAM_0_CW_MASK GENMASK(15, 8)
+#define B_AX_MUEDCA_BE_PARAM_0_AIFS_MASK GENMASK(7, 0)
+
+#define R_AX_MUEDCA_BK_PARAM_0 0xC354
+#define R_AX_MUEDCA_BK_PARAM_0_C1 0xE354
+#define R_AX_MUEDCA_VI_PARAM_0 0xC358
+#define R_AX_MUEDCA_VI_PARAM_0_C1 0xE358
+#define R_AX_MUEDCA_VO_PARAM_0 0xC35C
+#define R_AX_MUEDCA_VO_PARAM_0_C1 0xE35C
+
+#define R_AX_MUEDCA_EN 0xC370
+#define R_AX_MUEDCA_EN_C1 0xE370
+#define B_AX_MUEDCA_WMM_SEL BIT(8)
+#define B_AX_SET_MUEDCATIMER_TF_0 BIT(4)
+#define B_AX_MUEDCA_EN_0 BIT(0)
+
+#define R_AX_CCA_CONTROL 0xC390
+#define R_AX_CCA_CONTROL_C1 0xE390
+#define B_AX_TB_CHK_TX_NAV BIT(31)
+#define B_AX_TB_CHK_BASIC_NAV BIT(30)
+#define B_AX_TB_CHK_BTCCA BIT(29)
+#define B_AX_TB_CHK_EDCCA BIT(28)
+#define B_AX_TB_CHK_CCA_S80 BIT(27)
+#define B_AX_TB_CHK_CCA_S40 BIT(26)
+#define B_AX_TB_CHK_CCA_S20 BIT(25)
+#define B_AX_TB_CHK_CCA_P20 BIT(24)
+#define B_AX_SIFS_CHK_BTCCA BIT(21)
+#define B_AX_SIFS_CHK_EDCCA BIT(20)
+#define B_AX_SIFS_CHK_CCA_S80 BIT(19)
+#define B_AX_SIFS_CHK_CCA_S40 BIT(18)
+#define B_AX_SIFS_CHK_CCA_S20 BIT(17)
+#define B_AX_SIFS_CHK_CCA_P20 BIT(16)
+#define B_AX_CTN_CHK_TXNAV BIT(8)
+#define B_AX_CTN_CHK_INTRA_NAV BIT(7)
+#define B_AX_CTN_CHK_BASIC_NAV BIT(6)
+#define B_AX_CTN_CHK_BTCCA BIT(5)
+#define B_AX_CTN_CHK_EDCCA BIT(4)
+#define B_AX_CTN_CHK_CCA_S80 BIT(3)
+#define B_AX_CTN_CHK_CCA_S40 BIT(2)
+#define B_AX_CTN_CHK_CCA_S20 BIT(1)
+#define B_AX_CTN_CHK_CCA_P20 BIT(0)
+
+#define R_AX_SCHEDULE_ERR_IMR 0xC3E8
+#define R_AX_SCHEDULE_ERR_IMR_C1 0xE3E8
+#define B_AX_SORT_NON_IDLE_ERR_INT_EN BIT(1)
+#define B_AX_FSM_TIMEOUT_ERR_INT_EN BIT(0)
+
+#define R_AX_SCHEDULE_ERR_ISR 0xC3EC
+#define R_AX_SCHEDULE_ERR_ISR_C1 0xE3EC
+
+#define R_AX_SCH_DBG_SEL 0xC3F4
+#define R_AX_SCH_DBG_SEL_C1 0xE3F4
+#define B_AX_SCH_DBG_EN BIT(16)
+#define B_AX_SCH_CFG_CMD_SEL GENMASK(15, 8)
+#define B_AX_SCH_DBG_SEL_MASK GENMASK(7, 0)
+
+#define R_AX_SCH_DBG 0xC3F8
+#define R_AX_SCH_DBG_C1 0xE3F8
+#define B_AX_SCHEDULER_DBG_MASK GENMASK(31, 0)
+
+#define R_AX_PORT_CFG_P0 0xC400
+#define R_AX_PORT_CFG_P1 0xC440
+#define R_AX_PORT_CFG_P2 0xC480
+#define R_AX_PORT_CFG_P3 0xC4C0
+#define R_AX_PORT_CFG_P4 0xC500
+#define B_AX_BRK_SETUP BIT(16)
+#define B_AX_TBTT_UPD_SHIFT_SEL BIT(15)
+#define B_AX_BCN_DROP_ALLOW BIT(14)
+#define B_AX_TBTT_PROHIB_EN BIT(13)
+#define B_AX_BCNTX_EN BIT(12)
+#define B_AX_NET_TYPE_MASK GENMASK(11, 10)
+#define B_AX_BCN_FORCETX_EN BIT(9)
+#define B_AX_TXBCN_BTCCA_EN BIT(8)
+#define B_AX_BCNERR_CNT_EN BIT(7)
+#define B_AX_BCN_AGRES BIT(6)
+#define B_AX_TSFTR_RST BIT(5)
+#define B_AX_RX_BSSID_FIT_EN BIT(4)
+#define B_AX_TSF_UDT_EN BIT(3)
+#define B_AX_PORT_FUNC_EN BIT(2)
+#define B_AX_TXBCN_RPT_EN BIT(1)
+#define B_AX_RXBCN_RPT_EN BIT(0)
+
+#define R_AX_TBTT_PROHIB_P0 0xC404
+#define R_AX_TBTT_PROHIB_P1 0xC444
+#define R_AX_TBTT_PROHIB_P2 0xC484
+#define R_AX_TBTT_PROHIB_P3 0xC4C4
+#define R_AX_TBTT_PROHIB_P4 0xC504
+#define B_AX_TBTT_HOLD_MASK GENMASK(27, 16)
+#define B_AX_TBTT_SETUP_MASK GENMASK(7, 0)
+
+#define R_AX_BCN_AREA_P0 0xC408
+#define R_AX_BCN_AREA_P1 0xC448
+#define R_AX_BCN_AREA_P2 0xC488
+#define R_AX_BCN_AREA_P3 0xC4C8
+#define R_AX_BCN_AREA_P4 0xC508
+#define B_AX_BCN_MSK_AREA_MASK GENMASK(27, 16)
+#define B_AX_BCN_CTN_AREA_MASK GENMASK(11, 0)
+
+#define R_AX_BCNERLYINT_CFG_P0 0xC40C
+#define R_AX_BCNERLYINT_CFG_P1 0xC44C
+#define R_AX_BCNERLYINT_CFG_P2 0xC48C
+#define R_AX_BCNERLYINT_CFG_P3 0xC4CC
+#define R_AX_BCNERLYINT_CFG_P4 0xC50C
+#define B_AX_BCNERLY_MASK GENMASK(11, 0)
+
+#define R_AX_TBTTERLYINT_CFG_P0 0xC40E
+#define R_AX_TBTTERLYINT_CFG_P1 0xC44E
+#define R_AX_TBTTERLYINT_CFG_P2 0xC48E
+#define R_AX_TBTTERLYINT_CFG_P3 0xC4CE
+#define R_AX_TBTTERLYINT_CFG_P4 0xC50E
+#define B_AX_TBTTERLY_MASK GENMASK(11, 0)
+
+#define R_AX_TBTT_AGG_P0 0xC412
+#define R_AX_TBTT_AGG_P1 0xC452
+#define R_AX_TBTT_AGG_P2 0xC492
+#define R_AX_TBTT_AGG_P3 0xC4D2
+#define R_AX_TBTT_AGG_P4 0xC512
+#define B_AX_TBTT_AGG_NUM_MASK GENMASK(15, 8)
+
+#define R_AX_BCN_SPACE_CFG_P0 0xC414
+#define R_AX_BCN_SPACE_CFG_P1 0xC454
+#define R_AX_BCN_SPACE_CFG_P2 0xC494
+#define R_AX_BCN_SPACE_CFG_P3 0xC4D4
+#define R_AX_BCN_SPACE_CFG_P4 0xC514
+#define B_AX_SUB_BCN_SPACE_MASK GENMASK(23, 16)
+#define B_AX_BCN_SPACE_MASK GENMASK(15, 0)
+
+#define R_AX_BCN_FORCETX_P0 0xC418
+#define R_AX_BCN_FORCETX_P1 0xC458
+#define R_AX_BCN_FORCETX_P2 0xC498
+#define R_AX_BCN_FORCETX_P3 0xC4D8
+#define R_AX_BCN_FORCETX_P4 0xC518
+#define B_AX_FORCE_BCN_CURRCNT_MASK GENMASK(23, 16)
+#define B_AX_FORCE_BCN_NUM_MASK GENMASK(15, 0)
+#define B_AX_BCN_MAX_ERR_MASK GENMASK(7, 0)
+
+#define R_AX_BCN_ERR_CNT_P0 0xC420
+#define R_AX_BCN_ERR_CNT_P1 0xC460
+#define R_AX_BCN_ERR_CNT_P2 0xC4A0
+#define R_AX_BCN_ERR_CNT_P3 0xC4E0
+#define R_AX_BCN_ERR_CNT_P4 0xC520
+#define B_AX_BCN_ERR_CNT_SUM_MASK GENMASK(31, 24)
+#define B_AX_BCN_ERR_CNT_NAV_MASK GENMASK(23, 16)
+#define B_AX_BCN_ERR_CNT_EDCCA_MASK GENMASK(15, 0)
+#define B_AX_BCN_ERR_CNT_CCA_MASK GENMASK(7, 0)
+
+#define R_AX_BCN_ERR_FLAG_P0 0xC424
+#define R_AX_BCN_ERR_FLAG_P1 0xC464
+#define R_AX_BCN_ERR_FLAG_P2 0xC4A4
+#define R_AX_BCN_ERR_FLAG_P3 0xC4E4
+#define R_AX_BCN_ERR_FLAG_P4 0xC524
+#define B_AX_BCN_ERR_FLAG_OTHERS BIT(6)
+#define B_AX_BCN_ERR_FLAG_MAC BIT(5)
+#define B_AX_BCN_ERR_FLAG_TXON BIT(4)
+#define B_AX_BCN_ERR_FLAG_SRCHEND BIT(3)
+#define B_AX_BCN_ERR_FLAG_INVALID BIT(2)
+#define B_AX_BCN_ERR_FLAG_CMP BIT(1)
+#define B_AX_BCN_ERR_FLAG_LOCK BIT(0)
+
+#define R_AX_DTIM_CTRL_P0 0xC426
+#define R_AX_DTIM_CTRL_P1 0xC466
+#define R_AX_DTIM_CTRL_P2 0xC4A6
+#define R_AX_DTIM_CTRL_P3 0xC4E6
+#define R_AX_DTIM_CTRL_P4 0xC526
+#define B_AX_DTIM_NUM_MASK GENMASK(15, 0)
+#define B_AX_DTIM_CURRCNT_MASK GENMASK(7, 0)
+
+#define R_AX_TBTT_SHIFT_P0 0xC428
+#define R_AX_TBTT_SHIFT_P1 0xC468
+#define R_AX_TBTT_SHIFT_P2 0xC4A8
+#define R_AX_TBTT_SHIFT_P3 0xC4E8
+#define R_AX_TBTT_SHIFT_P4 0xC528
+#define B_AX_TBTT_SHIFT_OFST_MASK GENMASK(11, 0)
+
+#define R_AX_BCN_CNT_TMR_P0 0xC434
+#define R_AX_BCN_CNT_TMR_P1 0xC474
+#define R_AX_BCN_CNT_TMR_P2 0xC4B4
+#define R_AX_BCN_CNT_TMR_P3 0xC4F4
+#define R_AX_BCN_CNT_TMR_P4 0xC534
+#define B_AX_BCN_CNT_TMR_MASK GENMASK(31, 0)
+
+#define R_AX_TSFTR_LOW_P0 0xC438
+#define R_AX_TSFTR_LOW_P1 0xC478
+#define R_AX_TSFTR_LOW_P2 0xC4B8
+#define R_AX_TSFTR_LOW_P3 0xC4F8
+#define R_AX_TSFTR_LOW_P4 0xC538
+#define B_AX_TSFTR_LOW_MASK GENMASK(31, 0)
+
+#define R_AX_TSFTR_HIGH_P0 0xC43C
+#define R_AX_TSFTR_HIGH_P1 0xC47C
+#define R_AX_TSFTR_HIGH_P2 0xC4BC
+#define R_AX_TSFTR_HIGH_P3 0xC4FC
+#define R_AX_TSFTR_HIGH_P4 0xC53C
+#define B_AX_TSFTR_HIGH_MASK GENMASK(31, 0)
+
+#define R_AX_MBSSID_CTRL 0xC568
+#define R_AX_MBSSID_CTRL_C1 0xE568
+#define B_AX_P0MB_ALL_MASK GENMASK(23, 1)
+#define B_AX_P0MB_NUM_MASK GENMASK(19, 16)
+#define B_AX_P0MB15_EN BIT(15)
+#define B_AX_P0MB14_EN BIT(14)
+#define B_AX_P0MB13_EN BIT(13)
+#define B_AX_P0MB12_EN BIT(12)
+#define B_AX_P0MB11_EN BIT(11)
+#define B_AX_P0MB10_EN BIT(10)
+#define B_AX_P0MB9_EN BIT(9)
+#define B_AX_P0MB8_EN BIT(8)
+#define B_AX_P0MB7_EN BIT(7)
+#define B_AX_P0MB6_EN BIT(6)
+#define B_AX_P0MB5_EN BIT(5)
+#define B_AX_P0MB4_EN BIT(4)
+#define B_AX_P0MB3_EN BIT(3)
+#define B_AX_P0MB2_EN BIT(2)
+#define B_AX_P0MB1_EN BIT(1)
+
+#define R_AX_AMPDU_AGG_LIMIT 0xC610
+#define B_AX_AMPDU_MAX_TIME_MASK GENMASK(31, 24)
+#define B_AX_RA_TRY_RATE_AGG_LMT_MASK GENMASK(23, 16)
+#define B_AX_RTS_MAX_AGG_NUM_MASK GENMASK(15, 8)
+#define B_AX_MAX_AGG_NUM_MASK GENMASK(7, 0)
+
+#define R_AX_AGG_LEN_HT_0 0xC614
+#define R_AX_AGG_LEN_HT_0_C1 0xE614
+#define B_AX_AMPDU_MAX_LEN_HT_MASK GENMASK(31, 16)
+#define B_AX_RTS_TXTIME_TH_MASK GENMASK(15, 8)
+#define B_AX_RTS_LEN_TH_MASK GENMASK(7, 0)
+
+#define S_AX_CTS2S_TH_SEC_256B 1
+#define R_AX_SIFS_SETTING 0xC624
+#define R_AX_SIFS_SETTING_C1 0xE624
+#define B_AX_HW_CTS2SELF_PKT_LEN_TH_MASK GENMASK(31, 24)
+#define B_AX_HW_CTS2SELF_PKT_LEN_TH_TWW_MASK GENMASK(23, 18)
+#define B_AX_HW_CTS2SELF_EN BIT(16)
+#define B_AX_SPEC_SIFS_OFDM_PTCL_SH 8
+#define B_AX_SPEC_SIFS_OFDM_PTCL_MASK GENMASK(15, 8)
+#define B_AX_SPEC_SIFS_CCK_PTCL_MASK GENMASK(7, 0)
+#define S_AX_CTS2S_TH_1K 4
+
+#define R_AX_TXRATE_CHK 0xC628
+#define R_AX_TXRATE_CHK_C1 0xE628
+#define B_AX_DEFT_RATE_MASK GENMASK(15, 7)
+#define B_AX_BAND_MODE BIT(4)
+#define B_AX_MAX_TXNSS_MASK GENMASK(3, 2)
+#define B_AX_RTS_LIMIT_IN_OFDM6 BIT(1)
+#define B_AX_CHECK_CCK_EN BIT(0)
+
+#define R_AX_TXCNT 0xC62C
+#define R_AX_TXCNT_C1 0xE62C
+#define B_AX_ADD_TXCNT_BY BIT(31)
+#define B_AX_S_TXCNT_LMT_MASK GENMASK(29, 24)
+#define B_AX_L_TXCNT_LMT_MASK GENMASK(21, 16)
+
+#define R_AX_MBSSID_DROP_0 0xC63C
+#define R_AX_MBSSID_DROP_0_C1 0xE63C
+#define B_AX_GI_LTF_FB_SEL BIT(30)
+#define B_AX_RATE_SEL_MASK GENMASK(29, 24)
+#define B_AX_PORT_DROP_4_0_MASK GENMASK(20, 16)
+#define B_AX_MBSSID_DROP_15_0_MASK GENMASK(15, 0)
+
+#define R_AX_BT_PLT 0xC67C
+#define R_AX_BT_PLT_C1 0xE67C
+#define B_AX_BT_PLT_PKT_CNT_MASK GENMASK(31, 16)
+#define B_AX_BT_PLT_RST BIT(9)
+#define B_AX_PLT_EN BIT(8)
+#define B_AX_RX_PLT_GNT_LTE_RX BIT(7)
+#define B_AX_RX_PLT_GNT_BT_RX BIT(6)
+#define B_AX_RX_PLT_GNT_BT_TX BIT(5)
+#define B_AX_RX_PLT_GNT_WL BIT(4)
+#define B_AX_TX_PLT_GNT_LTE_RX BIT(3)
+#define B_AX_TX_PLT_GNT_BT_RX BIT(2)
+#define B_AX_TX_PLT_GNT_BT_TX BIT(1)
+#define B_AX_TX_PLT_GNT_WL BIT(0)
+
+#define R_AX_PTCL_BSS_COLOR_0 0xC6A0
+#define R_AX_PTCL_BSS_COLOR_0_C1 0xE6A0
+#define B_AX_BSS_COLOB_AX_PORT_3_MASK GENMASK(29, 24)
+#define B_AX_BSS_COLOB_AX_PORT_2_MASK GENMASK(21, 16)
+#define B_AX_BSS_COLOB_AX_PORT_1_MASK GENMASK(13, 8)
+#define B_AX_BSS_COLOB_AX_PORT_0_MASK GENMASK(5, 0)
+
+#define R_AX_PTCL_BSS_COLOR_1 0xC6A4
+#define R_AX_PTCL_BSS_COLOR_1_C1 0xE6A4
+#define B_AX_BSS_COLOB_AX_PORT_4_MASK GENMASK(5, 0)
+
+#define R_AX_PTCL_IMR0 0xC6C0
+#define R_AX_PTCL_IMR0_C1 0xE6C0
+#define B_AX_F2PCMD_USER_ALLC_ERR_INT_EN BIT(28)
+#define B_AX_TX_RECORD_PKTID_ERR_INT_EN BIT(23)
+
+#define R_AX_PTCL_ISR0 0xC6C4
+#define R_AX_PTCL_ISR0_C1 0xE6C4
+
+#define S_AX_PTCL_TO_2MS 0x3F
+#define R_AX_PTCL_FSM_MON 0xC6E8
+#define R_AX_PTCL_FSM_MON_C1 0xE6E8
+#define B_AX_PTCL_TX_ARB_TO_MODE BIT(6)
+#define B_AX_PTCL_TX_ARB_TO_THR_MASK GENMASK(5, 0)
+
+#define R_AX_PTCL_TX_CTN_SEL 0xC6EC
+#define R_AX_PTCL_TX_CTN_SEL_C1 0xE6EC
+#define B_AX_PTCL_TX_ON_STAT BIT(7)
+
+#define R_AX_PTCL_DBG_INFO 0xC6F0
+#define R_AX_PTCL_DBG_INFO_C1 0xE6F0
+#define B_AX_PTCL_DBG_INFO_MASK GENMASK(31, 0)
+#define R_AX_PTCL_DBG 0xC6F4
+#define R_AX_PTCL_DBG_C1 0xE6F4
+#define B_AX_PTCL_DBG_EN BIT(8)
+#define B_AX_PTCL_DBG_SEL_MASK GENMASK(7, 0)
+
+#define R_AX_DLE_CTRL 0xC800
+#define R_AX_DLE_CTRL_C1 0xE800
+#define B_AX_NO_RESERVE_PAGE_ERR_IMR BIT(23)
+#define B_AX_RXDATA_FSM_HANG_ERROR_IMR BIT(15)
+#define R_AX_RXDMA_PKT_INFO_0 0xC814
+#define R_AX_RXDMA_PKT_INFO_1 0xC818
+#define R_AX_RXDMA_PKT_INFO_2 0xC81C
+
+#define R_AX_TCR1 0xCA04
+#define R_AX_TCR1_C1 0xEA04
+#define B_AX_TXDFIFO_THRESHOLD GENMASK(31, 28)
+#define B_AX_TCR_CCK_LOCK_CLK BIT(27)
+#define B_AX_TCR_FORCE_READ_TXDFIFO BIT(26)
+#define B_AX_TCR_USTIME GENMASK(23, 16)
+#define B_AX_TCR_SMOOTH_VAL BIT(15)
+#define B_AX_TCR_SMOOTH_CTRL BIT(14)
+#define B_AX_CS_REQ_VAL BIT(13)
+#define B_AX_CS_REQ_SEL BIT(12)
+#define B_AX_TCR_ZLD_USTIME_AFTERPHYTXON GENMASK(11, 8)
+#define B_AX_TCR_TXTIMEOUT GENMASK(7, 0)
+
+#define R_AX_PPWRBIT_SETTING 0xCA0C
+#define R_AX_PPWRBIT_SETTING_C1 0xEA0C
+
+#define R_AX_MACTX_DBG_SEL_CNT 0xCA20
+#define R_AX_MACTX_DBG_SEL_CNT_C1 0xEA20
+#define B_AX_MACTX_MPDU_CNT GENMASK(31, 24)
+#define B_AX_MACTX_DMA_CNT GENMASK(23, 16)
+#define B_AX_LENGTH_ERR_FLAG_U3 BIT(11)
+#define B_AX_LENGTH_ERR_FLAG_U2 BIT(10)
+#define B_AX_LENGTH_ERR_FLAG_U1 BIT(9)
+#define B_AX_LENGTH_ERR_FLAG_U0 BIT(8)
+#define B_AX_DBGSEL_MACTX_MASK GENMASK(5, 0)
+
+#define R_AX_WMAC_TX_CTRL_DEBUG 0xCAE4
+#define R_AX_WMAC_TX_CTRL_DEBUG_C1 0xEAE4
+#define B_AX_TX_CTRL_DEBUG_SEL_MASK GENMASK(3, 0)
+
+#define R_AX_WMAC_TX_INFO0_DEBUG 0xCAE8
+#define R_AX_WMAC_TX_INFO0_DEBUG_C1 0xEAE8
+#define B_AX_TX_CTRL_INFO_P0_MASK GENMASK(31, 0)
+
+#define R_AX_WMAC_TX_INFO1_DEBUG 0xCAEC
+#define R_AX_WMAC_TX_INFO1_DEBUG_C1 0xEAEC
+#define B_AX_TX_CTRL_INFO_P1_MASK GENMASK(31, 0)
+
+#define R_AX_RSP_CHK_SIG 0xCC00
+#define R_AX_RSP_CHK_SIG_C1 0xEC00
+#define B_AX_RSP_STATIC_RTS_CHK_SERV_BW_EN BIT(30)
+#define B_AX_RSP_TBPPDU_CHK_PWR BIT(29)
+#define B_AX_RSP_CHK_BASIC_NAV BIT(21)
+#define B_AX_RSP_CHK_INTRA_NAV BIT(20)
+#define B_AX_RSP_CHK_TXNAV BIT(19)
+#define B_AX_TXDATA_END_PS_OPT BIT(18)
+#define B_AX_CHECK_SOUNDING_SEQ BIT(17)
+#define B_AX_RXBA_IGNOREA2 BIT(16)
+#define B_AX_ACKTO_CCK_MASK GENMASK(15, 8)
+#define B_AX_ACKTO_MASK GENMASK(7, 0)
+
+#define R_AX_TRXPTCL_RESP_0 0xCC04
+#define R_AX_TRXPTCL_RESP_0_C1 0xEC04
+#define B_AX_WMAC_RESP_STBC_EN BIT(31)
+#define B_AX_WMAC_RXFTM_TXACK_SC BIT(30)
+#define B_AX_WMAC_RXFTM_TXACKBWEQ BIT(29)
+#define B_AX_RSP_CHK_SEC_CCA_80 BIT(28)
+#define B_AX_RSP_CHK_SEC_CCA_40 BIT(27)
+#define B_AX_RSP_CHK_SEC_CCA_20 BIT(26)
+#define B_AX_RSP_CHK_BTCCA BIT(25)
+#define B_AX_RSP_CHK_EDCCA BIT(24)
+#define B_AX_RSP_CHK_CCA BIT(23)
+#define B_AX_WMAC_LDPC_EN BIT(22)
+#define B_AX_WMAC_SGIEN BIT(21)
+#define B_AX_WMAC_SPLCPEN BIT(20)
+#define B_AX_WMAC_BESP_EARLY_TXBA BIT(17)
+#define B_AX_WMAC_SPEC_SIFS_OFDM_MASK GENMASK(15, 8)
+#define B_AX_WMAC_SPEC_SIFS_CCK_MASK GENMASK(7, 0)
+#define WMAC_SPEC_SIFS_OFDM_52A 0x15
+#define WMAC_SPEC_SIFS_OFDM_52B 0x11
+#define WMAC_SPEC_SIFS_OFDM_52C 0x11
+#define WMAC_SPEC_SIFS_CCK 0xA
+
+#define R_AX_MAC_LOOPBACK 0xCC20
+#define R_AX_MAC_LOOPBACK_C1 0xEC20
+#define B_AX_MACLBK_EN BIT(0)
+
+#define R_AX_RXTRIG_TEST_USER_2 0xCCB0
+#define R_AX_RXTRIG_TEST_USER_2_C1 0xECB0
+#define B_AX_RXTRIG_MACID_MASK GENMASK(31, 24)
+#define B_AX_RXTRIG_RU26_DIS BIT(21)
+#define B_AX_RXTRIG_FCSCHK_EN BIT(20)
+#define B_AX_RXTRIG_PORT_SEL_MASK GENMASK(19, 17)
+#define B_AX_RXTRIG_EN BIT(16)
+#define B_AX_RXTRIG_USERINFO_2_MASK GENMASK(15, 0)
+
+#define R_AX_WMAC_TX_TF_INFO_0 0xCCD0
+#define R_AX_WMAC_TX_TF_INFO_0_C1 0xECD0
+#define B_AX_WMAC_TX_TF_INFO_SEL_MASK GENMASK(2, 0)
+
+#define R_AX_WMAC_TX_TF_INFO_1 0xCCD4
+#define R_AX_WMAC_TX_TF_INFO_1_C1 0xECD4
+#define B_AX_WMAC_TX_TF_INFO_P0_MASK GENMASK(31, 0)
+
+#define R_AX_WMAC_TX_TF_INFO_2 0xCCD8
+#define R_AX_WMAC_TX_TF_INFO_2_C1 0xECD8
+#define B_AX_WMAC_TX_TF_INFO_P1_MASK GENMASK(31, 0)
+
+#define R_AX_TMAC_ERR_IMR_ISR 0xCCEC
+#define R_AX_TMAC_ERR_IMR_ISR_C1 0xECEC
+
+#define R_AX_DBGSEL_TRXPTCL 0xCCF4
+#define R_AX_DBGSEL_TRXPTCL_C1 0xECF4
+#define B_AX_DBGSEL_TRXPTCL_MASK GENMASK(7, 0)
+
+#define R_AX_PHYINFO_ERR_IMR 0xCCFC
+#define R_AX_PHYINFO_ERR_IMR_C1 0xECFC
+#define B_AX_CSI_ON_TIMEOUT BIT(29)
+#define B_AX_STS_ON_TIMEOUT BIT(28)
+#define B_AX_DATA_ON_TIMEOUT BIT(27)
+#define B_AX_OFDM_CCA_TIMEOUT BIT(26)
+#define B_AX_CCK_CCA_TIMEOUT BIT(25)
+#define B_AXC_PHY_TXON_TIMEOUT BIT(24)
+#define B_AX_CSI_ON_TIMEOUT_INT_EN BIT(21)
+#define B_AX_STS_ON_TIMEOUT_INT_EN BIT(20)
+#define B_AX_DATA_ON_TIMEOUT_INT_EN BIT(19)
+#define B_AX_OFDM_CCA_TIMEOUT_INT_EN BIT(18)
+#define B_AX_CCK_CCA_TIMEOUT_INT_EN BIT(17)
+#define B_AX_PHY_TXON_TIMEOUT_INT_EN BIT(16)
+#define B_AX_PHYINTF_TIMEOUT_THR_MSAK GENMASK(5, 0)
+
+#define R_AX_PHYINFO_ERR_ISR 0xCCFC
+#define R_AX_PHYINFO_ERR_ISR_C1 0xECFC
+
+#define R_AX_BFMER_CTRL_0 0xCD78
+#define R_AX_BFMER_CTRL_0_C1 0xED78
+#define B_AX_BFMER_HE_CSI_OFFSET_MASK GENMASK(31, 24)
+#define B_AX_BFMER_VHT_CSI_OFFSET_MASK GENMASK(23, 16)
+#define B_AX_BFMER_HT_CSI_OFFSET_MASK GENMASK(15, 8)
+#define B_AX_BFMER_NDP_BFEN BIT(2)
+#define B_AX_BFMER_VHT_BFPRT_CHK BIT(0)
+
+#define R_AX_BFMEE_RESP_OPTION 0xCD80
+#define R_AX_BFMEE_RESP_OPTION_C1 0xED80
+#define B_AX_BFMEE_NDP_RX_STANDBY_TIMER_MASK GENMASK(31, 24)
+#define B_AX_BFMEE_BFRP_RX_STANDBY_TIMER_MASK GENMASK(23, 20)
+#define B_AX_MU_BFRPTSEG_SEL_MASK GENMASK(18, 17)
+#define B_AX_BFMEE_NDP_RXSTDBY_SEL BIT(16)
+#define BFRP_RX_STANDBY_TIMER 0x0
+#define NDP_RX_STANDBY_TIMER 0xFF
+#define B_AX_BFMEE_HE_NDPA_EN BIT(2)
+#define B_AX_BFMEE_VHT_NDPA_EN BIT(1)
+#define B_AX_BFMEE_HT_NDPA_EN BIT(0)
+
+#define R_AX_TRXPTCL_RESP_CSI_CTRL_0 0xCD88
+#define R_AX_TRXPTCL_RESP_CSI_CTRL_0_C1 0xED88
+#define R_AX_TRXPTCL_RESP_CSI_CTRL_1 0xCD94
+#define R_AX_TRXPTCL_RESP_CSI_CTRL_1_C1 0xED94
+#define B_AX_BFMEE_CSISEQ_SEL BIT(29)
+#define B_AX_BFMEE_BFPARAM_SEL BIT(28)
+#define B_AX_BFMEE_OFDM_LEN_TH_MASK GENMASK(27, 24)
+#define B_AX_BFMEE_BF_PORT_SEL BIT(23)
+#define B_AX_BFMEE_USE_NSTS BIT(22)
+#define B_AX_BFMEE_CSI_RATE_FB_EN BIT(21)
+#define B_AX_BFMEE_CSI_GID_SEL BIT(20)
+#define B_AX_BFMEE_CSI_RSC_MASK GENMASK(19, 18)
+#define B_AX_BFMEE_CSI_FORCE_RETE_EN BIT(17)
+#define B_AX_BFMEE_CSI_USE_NDPARATE BIT(16)
+#define B_AX_BFMEE_CSI_WITHHTC_EN BIT(15)
+#define B_AX_BFMEE_CSIINFO0_BF_EN BIT(14)
+#define B_AX_BFMEE_CSIINFO0_STBC_EN BIT(13)
+#define B_AX_BFMEE_CSIINFO0_LDPC_EN BIT(12)
+#define B_AX_BFMEE_CSIINFO0_CS_MASK GENMASK(11, 10)
+#define B_AX_BFMEE_CSIINFO0_CB_MASK GENMASK(9, 8)
+#define B_AX_BFMEE_CSIINFO0_NG_MASK GENMASK(7, 6)
+#define B_AX_BFMEE_CSIINFO0_NR_MASK GENMASK(5, 3)
+#define B_AX_BFMEE_CSIINFO0_NC_MASK GENMASK(2, 0)
+
+#define R_AX_TRXPTCL_RESP_CSI_RRSC 0xCD8C
+#define R_AX_TRXPTCL_RESP_CSI_RRSC_C1 0xED8C
+#define CSI_RRSC_BMAP 0x29292911
+
+#define R_AX_TRXPTCL_RESP_CSI_RATE 0xCD90
+#define R_AX_TRXPTCL_RESP_CSI_RATE_C1 0xED90
+#define B_AX_BFMEE_HE_CSI_RATE_MASK GENMASK(22, 16)
+#define B_AX_BFMEE_VHT_CSI_RATE_MASK GENMASK(14, 8)
+#define B_AX_BFMEE_HT_CSI_RATE_MASK GENMASK(6, 0)
+#define CSI_INIT_RATE_HE 0x3
+#define CSI_INIT_RATE_VHT 0x3
+#define CSI_INIT_RATE_HT 0x3
+
+#define R_AX_RCR 0xCE00
+#define R_AX_RCR_C1 0xEE00
+#define B_AX_STOP_RX_IN BIT(11)
+#define B_AX_DRV_INFO_SIZE_MASK GENMASK(10, 8)
+#define B_AX_CH_EN_MASK GENMASK(3, 0)
+
+#define R_AX_DLK_PROTECT_CTL 0xCE02
+#define R_AX_DLK_PROTECT_CTL_C1 0xEE02
+#define B_AX_RX_DLK_CCA_TIME_MASK GENMASK(15, 8)
+#define B_AX_RX_DLK_DATA_TIME_MASK GENMASK(7, 4)
+
+#define R_AX_PLCP_HDR_FLTR 0xCE04
+#define R_AX_PLCP_HDR_FLTR_C1 0xEE04
+#define B_AX_DIS_CHK_MIN_LEN BIT(8)
+#define B_AX_HE_SIGB_CRC_CHK BIT(6)
+#define B_AX_VHT_MU_SIGB_CRC_CHK BIT(5)
+#define B_AX_VHT_SU_SIGB_CRC_CHK BIT(4)
+#define B_AX_SIGA_CRC_CHK BIT(3)
+#define B_AX_LSIG_PARITY_CHK_EN BIT(2)
+#define B_AX_CCK_SIG_CHK BIT(1)
+#define B_AX_CCK_CRC_CHK BIT(0)
+
+#define R_AX_RX_FLTR_OPT 0xCE20
+#define R_AX_RX_FLTR_OPT_C1 0xEE20
+#define B_AX_UID_FILTER_MASK GENMASK(31, 24)
+#define B_AX_UNSPT_FILTER_SH 22
+#define B_AX_UNSPT_FILTER_MASK GENMASK(23, 22)
+#define B_AX_RX_MPDU_MAX_LEN_MASK GENMASK(21, 16)
+#define B_AX_RX_MPDU_MAX_LEN_SIZE 0x3f
+#define B_AX_A_FTM_REQ BIT(14)
+#define B_AX_A_ERR_PKT BIT(13)
+#define B_AX_A_UNSUP_PKT BIT(12)
+#define B_AX_A_CRC32_ERR BIT(11)
+#define B_AX_A_PWR_MGNT BIT(10)
+#define B_AX_A_BCN_CHK_RULE_MASK GENMASK(9, 8)
+#define B_AX_A_BCN_CHK_EN BIT(7)
+#define B_AX_A_MC_LIST_CAM_MATCH BIT(6)
+#define B_AX_A_BC_CAM_MATCH BIT(5)
+#define B_AX_A_UC_CAM_MATCH BIT(4)
+#define B_AX_A_MC BIT(3)
+#define B_AX_A_BC BIT(2)
+#define B_AX_A_A1_MATCH BIT(1)
+#define B_AX_SNIFFER_MODE BIT(0)
+#define DEFAULT_AX_RX_FLTR (B_AX_A_A1_MATCH | B_AX_A_BC | B_AX_A_MC | \
+ B_AX_A_UC_CAM_MATCH | B_AX_A_BC_CAM_MATCH | \
+ B_AX_A_PWR_MGNT | B_AX_A_FTM_REQ | \
+ u32_encode_bits(3, B_AX_UID_FILTER_MASK) | \
+ B_AX_A_BCN_CHK_EN)
+#define B_AX_RX_FLTR_CFG_MASK ((u32)~B_AX_RX_MPDU_MAX_LEN_MASK)
+
+#define R_AX_CTRL_FLTR 0xCE24
+#define R_AX_CTRL_FLTR_C1 0xEE24
+#define R_AX_MGNT_FLTR 0xCE28
+#define R_AX_MGNT_FLTR_C1 0xEE28
+#define R_AX_DATA_FLTR 0xCE2C
+#define R_AX_DATA_FLTR_C1 0xEE2C
+#define RX_FLTR_FRAME_DROP 0x00000000
+#define RX_FLTR_FRAME_TO_HOST 0x55555555
+#define RX_FLTR_FRAME_TO_WLCPU 0xAAAAAAAA
+
+#define R_AX_ADDR_CAM_CTRL 0xCE34
+#define R_AX_ADDR_CAM_CTRL_C1 0xEE34
+#define B_AX_ADDR_CAM_RANGE_MASK GENMASK(23, 16)
+#define B_AX_ADDR_CAM_CMPLIMT_MASK GENMASK(15, 12)
+#define B_AX_ADDR_CAM_CLR BIT(8)
+#define B_AX_ADDR_CAM_A2_B0_CHK BIT(2)
+#define B_AX_ADDR_CAM_SRCH_PERPKT BIT(1)
+#define B_AX_ADDR_CAM_EN BIT(0)
+
+#define R_AX_RESPBA_CAM_CTRL 0xCE3C
+#define R_AX_RESPBA_CAM_CTRL_C1 0xEE3C
+#define B_AX_SSN_SEL BIT(2)
+
+#define R_AX_PPDU_STAT 0xCE40
+#define R_AX_PPDU_STAT_C1 0xEE40
+#define B_AX_PPDU_STAT_RPT_TRIG BIT(8)
+#define B_AX_PPDU_STAT_RPT_CRC32 BIT(5)
+#define B_AX_PPDU_STAT_RPT_A1M BIT(4)
+#define B_AX_APP_PLCP_HDR_RPT BIT(3)
+#define B_AX_APP_RX_CNT_RPT BIT(2)
+#define B_AX_APP_MAC_INFO_RPT BIT(1)
+#define B_AX_PPDU_STAT_RPT_EN BIT(0)
+
+#define R_AX_RX_SR_CTRL 0xCE4A
+#define R_AX_RX_SR_CTRL_C1 0xEE4A
+#define B_AX_SR_EN BIT(0)
+
+#define R_AX_RX_STATE_MONITOR 0xCEF0
+#define R_AX_RX_STATE_MONITOR_C1 0xEEF0
+#define B_AX_RX_STATE_MONITOR_MASK GENMASK(31, 0)
+#define B_AX_STATE_CUR_MASK GENMASK(31, 16)
+#define B_AX_STATE_NXT_MASK GENMASK(13, 8)
+#define B_AX_STATE_UPD BIT(7)
+#define B_AX_STATE_SEL_MASK GENMASK(4, 0)
+
+#define R_AX_RMAC_ERR_ISR 0xCEF4
+#define R_AX_RMAC_ERR_ISR_C1 0xEEF4
+#define B_AX_RXERR_INTPS_EN BIT(31)
+#define B_AX_RMAC_RX_CSI_TIMEOUT_INT_EN BIT(19)
+#define B_AX_RMAC_RX_TIMEOUT_INT_EN BIT(18)
+#define B_AX_RMAC_CSI_TIMEOUT_INT_EN BIT(17)
+#define B_AX_RMAC_DATA_ON_TIMEOUT_INT_EN BIT(16)
+#define B_AX_RMAC_CCA_TIMEOUT_INT_EN BIT(15)
+#define B_AX_RMAC_DMA_TIMEOUT_INT_EN BIT(14)
+#define B_AX_RMAC_DATA_ON_TO_IDLE_TIMEOUT_INT_EN BIT(13)
+#define B_AX_RMAC_CCA_TO_IDLE_TIMEOUT_INT_EN BIT(12)
+#define B_AX_RMAC_RX_CSI_TIMEOUT_FLAG BIT(7)
+#define B_AX_RMAC_RX_TIMEOUT_FLAG BIT(6)
+#define B_AX_BMAC_CSI_TIMEOUT_FLAG BIT(5)
+#define B_AX_BMAC_DATA_ON_TIMEOUT_FLAG BIT(4)
+#define B_AX_BMAC_CCA_TIMEOUT_FLAG BIT(3)
+#define B_AX_BMAC_DMA_TIMEOUT_FLAG BIT(2)
+#define B_AX_BMAC_DATA_ON_TO_IDLE_TIMEOUT_FLAG BIT(1)
+#define B_AX_BMAC_CCA_TO_IDLE_TIMEOUT_FLAG BIT(0)
+
+#define R_AX_RMAC_PLCP_MON 0xCEF8
+#define R_AX_RMAC_PLCP_MON_C1 0xEEF8
+#define B_AX_RMAC_PLCP_MON_MASK GENMASK(31, 0)
+#define B_AX_PCLP_MON_SEL_MASK GENMASK(31, 28)
+#define B_AX_PCLP_MON_CONT_MASK GENMASK(27, 0)
+
+#define R_AX_RX_DEBUG_SELECT 0xCEFC
+#define R_AX_RX_DEBUG_SELECT_C1 0xEEFC
+#define B_AX_DEBUG_SEL_MASK GENMASK(7, 0)
+
+#define R_AX_PWR_RATE_CTRL 0xD200
+#define R_AX_PWR_RATE_CTRL_C1 0xF200
+#define B_AX_FORCE_PWR_BY_RATE_EN BIT(9)
+#define B_AX_FORCE_PWR_BY_RATE_VALUE_MASK GENMASK(8, 0)
+
+#define R_AX_PWR_RATE_OFST_CTRL 0xD204
+#define R_AX_PWR_COEXT_CTRL 0xD220
+#define B_AX_TXAGC_BT_EN BIT(1)
+#define B_AX_TXAGC_BT_MASK GENMASK(11, 3)
+
+#define R_AX_PWR_UL_CTRL0 0xD240
+#define R_AX_PWR_UL_CTRL2 0xD248
+#define B_AX_PWR_UL_CFO_MASK GENMASK(2, 0)
+#define B_AX_PWR_UL_CTRL2_MASK 0x07700007
+#define R_AX_PWR_UL_TB_CTRL 0xD288
+#define B_AX_PWR_UL_TB_CTRL_EN BIT(31)
+#define R_AX_PWR_UL_TB_1T 0xD28C
+#define B_AX_PWR_UL_TB_1T_MASK GENMASK(4, 0)
+#define R_AX_PWR_UL_TB_2T 0xD290
+#define B_AX_PWR_UL_TB_2T_MASK GENMASK(4, 0)
+#define R_AX_PWR_BY_RATE_TABLE0 0xD2C0
+#define R_AX_PWR_BY_RATE_TABLE10 0xD2E8
+#define R_AX_PWR_BY_RATE R_AX_PWR_BY_RATE_TABLE0
+#define R_AX_PWR_BY_RATE_MAX R_AX_PWR_BY_RATE_TABLE10
+#define R_AX_PWR_LMT_TABLE0 0xD2EC
+#define R_AX_PWR_LMT_TABLE19 0xD338
+#define R_AX_PWR_LMT R_AX_PWR_LMT_TABLE0
+#define R_AX_PWR_LMT_MAX R_AX_PWR_LMT_TABLE19
+#define R_AX_PWR_RU_LMT_TABLE0 0xD33C
+#define R_AX_PWR_RU_LMT_TABLE11 0xD368
+#define R_AX_PWR_RU_LMT R_AX_PWR_RU_LMT_TABLE0
+#define R_AX_PWR_RU_LMT_MAX R_AX_PWR_RU_LMT_TABLE11
+#define R_AX_PWR_MACID_LMT_TABLE0 0xD36C
+#define R_AX_PWR_MACID_LMT_TABLE127 0xD568
+
+#define R_AX_TXPWR_IMR 0xD9E0
+#define R_AX_TXPWR_IMR_C1 0xF9E0
+#define R_AX_TXPWR_ISR 0xD9E4
+#define R_AX_TXPWR_ISR_C1 0xF9E4
+
+#define R_AX_BTC_CFG 0xDA00
+#define B_AX_DIS_BTC_CLK_G BIT(2)
+
+#define R_AX_WL_PRI_MSK 0xDA10
+#define B_AX_PTA_WL_PRI_MASK_BCNQ BIT(8)
+
+#define R_AX_BTC_FUNC_EN 0xDA20
+#define R_AX_BTC_FUNC_EN_C1 0xFA20
+#define B_AX_PTA_WL_TX_EN BIT(1)
+#define B_AX_PTA_EDCCA_EN BIT(0)
+
+#define R_BTC_BREAK_TABLE 0xDA2C
+#define BTC_BREAK_PARAM 0xf0ffffff
+
+#define R_BTC_BT_COEX_MSK_TABLE 0xDA30
+#define B_BTC_PRI_MASK_TX_RESP_V1 BIT(3)
+
+#define R_AX_BT_COEX_CFG_2 0xDA34
+#define R_AX_BT_COEX_CFG_2_C1 0xFA34
+#define B_AX_GNT_BT_BYPASS_PRIORITY BIT(12)
+#define B_AX_GNT_BT_POLARITY BIT(8)
+#define B_AX_TIMER_MASK GENMASK(7, 0)
+#define MAC_AX_CSR_RATE 80
+
+#define R_AX_CSR_MODE 0xDA40
+#define R_AX_CSR_MODE_C1 0xFA40
+#define B_AX_BT_CNT_RST BIT(16)
+#define B_AX_BT_STAT_DELAY_MASK GENMASK(15, 12)
+#define MAC_AX_CSR_DELAY 0
+#define B_AX_BT_TRX_INIT_DETECT_MASK GENMASK(11, 8)
+#define MAC_AX_CSR_TRX_TO 4
+#define B_AX_BT_PRI_DETECT_TO_MASK GENMASK(7, 4)
+#define MAC_AX_CSR_PRI_TO 5
+#define B_AX_WL_ACT_MSK BIT(3)
+#define B_AX_STATIS_BT_EN BIT(2)
+#define B_AX_WL_ACT_MASK_ENABLE BIT(1)
+#define B_AX_ENHANCED_BT BIT(0)
+
+#define R_AX_BT_STAST_HIGH 0xDA44
+#define B_AX_STATIS_BT_HI_RX_MASK GENMASK(31, 16)
+#define B_AX_STATIS_BT_HI_TX_MASK GENMASK(15, 0)
+#define R_AX_BT_STAST_LOW 0xDA48
+#define B_AX_STATIS_BT_LO_TX_1_MASK GENMASK(15, 0)
+#define B_AX_STATIS_BT_LO_RX_1_MASK GENMASK(31, 16)
+
+#define R_AX_TDMA_MODE 0xDA4C
+#define R_AX_TDMA_MODE_C1 0xFA4C
+#define B_AX_R_BT_CMD_RPT_MASK GENMASK(31, 16)
+#define B_AX_R_RPT_FROM_BT_MASK GENMASK(15, 8)
+#define B_AX_BT_HID_ISR_SET_MASK GENMASK(7, 6)
+#define B_AX_TDMA_BT_START_NOTIFY BIT(5)
+#define B_AX_ENABLE_TDMA_FW_MODE BIT(4)
+#define B_AX_ENABLE_PTA_TDMA_MODE BIT(3)
+#define B_AX_ENABLE_COEXIST_TAB_IN_TDMA BIT(2)
+#define B_AX_GPIO2_GPIO3_EXANGE_OR_NO_BT_CCA BIT(1)
+#define B_AX_RTK_BT_ENABLE BIT(0)
+
+#define R_AX_BT_COEX_CFG_5 0xDA6C
+#define R_AX_BT_COEX_CFG_5_C1 0xFA6C
+#define B_AX_BT_TIME_MASK GENMASK(31, 6)
+#define B_AX_BT_RPT_SAMPLE_RATE_MASK GENMASK(5, 0)
+#define MAC_AX_RTK_RATE 5
+
+#define R_AX_LTE_CTRL 0xDAF0
+#define R_AX_LTE_WDATA 0xDAF4
+#define R_AX_LTE_RDATA 0xDAF8
+
+#define CMAC1_START_ADDR 0xE000
+#define CMAC1_END_ADDR 0xFFFF
+#define R_AX_CMAC_REG_END 0xFFFF
+
+#define R_AX_LTE_SW_CFG_1 0x0038
+#define R_AX_LTE_SW_CFG_1_C1 0x2038
+#define B_AX_GNT_BT_RFC_S1_SW_VAL BIT(31)
+#define B_AX_GNT_BT_RFC_S1_SW_CTRL BIT(30)
+#define B_AX_GNT_WL_RFC_S1_SW_VAL BIT(29)
+#define B_AX_GNT_WL_RFC_S1_SW_CTRL BIT(28)
+#define B_AX_GNT_BT_BB_S1_SW_VAL BIT(27)
+#define B_AX_GNT_BT_BB_S1_SW_CTRL BIT(26)
+#define B_AX_GNT_WL_BB_S1_SW_VAL BIT(25)
+#define B_AX_GNT_WL_BB_S1_SW_CTRL BIT(24)
+#define B_AX_BT_SW_CTRL_WL_PRIORITY BIT(19)
+#define B_AX_WL_SW_CTRL_WL_PRIORITY BIT(18)
+#define B_AX_LTE_PATTERN_2_EN BIT(17)
+#define B_AX_LTE_PATTERN_1_EN BIT(16)
+#define B_AX_GNT_BT_RFC_S0_SW_VAL BIT(15)
+#define B_AX_GNT_BT_RFC_S0_SW_CTRL BIT(14)
+#define B_AX_GNT_WL_RFC_S0_SW_VAL BIT(13)
+#define B_AX_GNT_WL_RFC_S0_SW_CTRL BIT(12)
+#define B_AX_GNT_BT_BB_S0_SW_VAL BIT(11)
+#define B_AX_GNT_BT_BB_S0_SW_CTRL BIT(10)
+#define B_AX_GNT_WL_BB_S0_SW_VAL BIT(9)
+#define B_AX_GNT_WL_BB_S0_SW_CTRL BIT(8)
+#define B_AX_LTECOEX_FUN_EN BIT(7)
+#define B_AX_LTECOEX_3WIRE_CTRL_MUX BIT(6)
+#define B_AX_LTECOEX_OP_MODE_SEL_MASK GENMASK(5, 4)
+#define B_AX_LTECOEX_UART_MUX BIT(3)
+#define B_AX_LTECOEX_UART_MODE_SEL_MASK GENMASK(2, 0)
+
+#define R_AX_LTE_SW_CFG_2 0x003C
+#define R_AX_LTE_SW_CFG_2_C1 0x203C
+#define B_AX_WL_RX_CTRL BIT(8)
+#define B_AX_GNT_WL_RX_SW_VAL BIT(7)
+#define B_AX_GNT_WL_RX_SW_CTRL BIT(6)
+#define B_AX_GNT_WL_TX_SW_VAL BIT(5)
+#define B_AX_GNT_WL_TX_SW_CTRL BIT(4)
+#define B_AX_GNT_BT_RX_SW_VAL BIT(3)
+#define B_AX_GNT_BT_RX_SW_CTRL BIT(2)
+#define B_AX_GNT_BT_TX_SW_VAL BIT(1)
+#define B_AX_GNT_BT_TX_SW_CTRL BIT(0)
+
+#define RR_MOD 0x00
+#define RR_MOD_IQK GENMASK(19, 4)
+#define RR_MOD_DPK GENMASK(19, 5)
+#define RR_MOD_MASK GENMASK(19, 16)
+#define RR_MOD_V_DOWN 0x0
+#define RR_MOD_V_STANDBY 0x1
+#define RR_MOD_V_TX 0x2
+#define RR_MOD_V_RX 0x3
+#define RR_MOD_V_TXIQK 0x4
+#define RR_MOD_V_DPK 0x5
+#define RR_MOD_V_RXK1 0x6
+#define RR_MOD_V_RXK2 0x7
+#define RR_MOD_M_RXG GENMASK(13, 4)
+#define RR_MOD_M_RXBB GENMASK(9, 5)
+#define RR_MODOPT 0x01
+#define RR_MODOPT_M_TXPWR GENMASK(5, 0)
+#define RR_WLSEL 0x02
+#define RR_WLSEL_AG GENMASK(18, 16)
+#define RR_RSV1 0x05
+#define RR_RSV1_RST BIT(0)
+#define RR_DTXLOK 0x08
+#define RR_RSV2 0x09
+#define RR_CFGCH 0x18
+#define RR_BTC 0x1a
+#define RR_BTC_TXBB GENMASK(14, 12)
+#define RR_BTC_RXBB GENMASK(11, 10)
+#define RR_RCKC 0x1b
+#define RR_RCKC_CA GENMASK(14, 10)
+#define RR_RCKS 0x1c
+#define RR_RCKO 0x1d
+#define RR_RCKO_OFF GENMASK(13, 9)
+#define RR_RXKPLL 0x1e
+#define RR_RXKPLL_OFF GENMASK(5, 0)
+#define RR_RXKPLL_POW BIT(19)
+#define RR_RSV4 0x1f
+#define RR_RXK 0x20
+#define RR_RXK_PLLEN BIT(5)
+#define RR_RXK_SEL5G BIT(7)
+#define RR_RXK_SEL2G BIT(8)
+#define RR_LUTWA 0x33
+#define RR_LUTWA_MASK GENMASK(9, 0)
+#define RR_LUTWD1 0x3e
+#define RR_LUTWD0 0x3f
+#define RR_TM 0x42
+#define RR_TM_TRI BIT(19)
+#define RR_TM_VAL GENMASK(6, 1)
+#define RR_TM2 0x43
+#define RR_TM2_OFF GENMASK(19, 16)
+#define RR_TXG1 0x51
+#define RR_TXG1_ATT2 BIT(19)
+#define RR_TXG1_ATT1 BIT(11)
+#define RR_TXG2 0x52
+#define RR_TXG2_ATT0 BIT(11)
+#define RR_BSPAD 0x54
+#define RR_TXGA 0x55
+#define RR_TXGA_LOK_EN BIT(0)
+#define RR_TXGA_TRK_EN BIT(7)
+#define RR_GAINTX 0x56
+#define RR_GAINTX_ALL GENMASK(15, 0)
+#define RR_GAINTX_PAD GENMASK(9, 5)
+#define RR_GAINTX_BB GENMASK(4, 0)
+#define RR_TXMO 0x58
+#define RR_TXMO_COI GENMASK(19, 15)
+#define RR_TXMO_COQ GENMASK(14, 10)
+#define RR_TXMO_FII GENMASK(9, 6)
+#define RR_TXMO_FIQ GENMASK(5, 2)
+#define RR_TXA 0x5d
+#define RR_TXA_TRK GENMASK(19, 14)
+#define RR_TXRSV 0x5c
+#define RR_TXRSV_GAPK BIT(19)
+#define RR_BIAS 0x5e
+#define RR_BIAS_GAPK BIT(19)
+#define RR_BIASA 0x60
+#define RR_BIASA_TXG GENMASK(15, 12)
+#define RR_BIASA_TXA GENMASK(19, 16)
+#define RR_BIASA_A GENMASK(2, 0)
+#define RR_BIASA2 0x63
+#define RR_BIASA2_LB GENMASK(4, 2)
+#define RR_TXATANK 0x64
+#define RR_TXATANK_LBSW GENMASK(16, 15)
+#define RR_TRXIQ 0x66
+#define RR_RSV6 0x6d
+#define RR_TXPOW 0x7f
+#define RR_TXPOW_TXG BIT(1)
+#define RR_TXPOW_TXA BIT(8)
+#define RR_RXPOW 0x80
+#define RR_RXPOW_IQK GENMASK(17, 16)
+#define RR_RXBB 0x83
+#define RR_RXBB_C2G GENMASK(16, 10)
+#define RR_RXBB_C1G GENMASK(9, 8)
+#define RR_RXBB_ATTR GENMASK(7, 4)
+#define RR_RXBB_ATTC GENMASK(2, 0)
+#define RR_XGLNA2 0x85
+#define RR_XGLNA2_SW GENMASK(1, 0)
+#define RR_RXA 0x8a
+#define RR_RXA_DPK GENMASK(9, 8)
+#define RR_RXA2 0x8c
+#define RR_RXA2_C2 GENMASK(9, 3)
+#define RR_RXA2_C1 GENMASK(12, 10)
+#define RR_RXIQGEN 0x8d
+#define RR_RXIQGEN_ATTL GENMASK(12, 8)
+#define RR_RXIQGEN_ATTH GENMASK(14, 13)
+#define RR_RXBB2 0x8f
+#define RR_EN_TIA_IDA GENMASK(11, 10)
+#define RR_RXBB2_DAC_EN BIT(13)
+#define RR_XALNA2 0x90
+#define RR_XALNA2_SW GENMASK(1, 0)
+#define RR_DCK 0x92
+#define RR_DCK_FINE BIT(1)
+#define RR_DCK_LV BIT(0)
+#define RR_DCK1 0x93
+#define RR_DCK1_SEL BIT(3)
+#define RR_DCK2 0x94
+#define RR_DCK2_CYCLE GENMASK(7, 2)
+#define RR_MIXER 0x9f
+#define RR_MIXER_GN GENMASK(4, 3)
+#define RR_XTALX2 0xb8
+#define RR_MALSEL 0xbe
+#define RR_RCKD 0xde
+#define RR_RCKD_POW GENMASK(19, 13)
+#define RR_RCKD_BW BIT(2)
+#define RR_TXADBG 0xde
+#define RR_LUTDBG 0xdf
+#define RR_LUTDBG_LOK BIT(2)
+#define RR_LUTWE2 0xee
+#define RR_LUTWE 0xef
+#define RR_LUTWE_LOK BIT(2)
+#define RR_RFC 0xf0
+#define RR_RFC_CKEN BIT(1)
+
+#define R_UPD_P0 0x0000
+#define R_RSTB_WATCH_DOG 0x000C
+#define B_P0_RSTB_WATCH_DOG BIT(0)
+#define B_P1_RSTB_WATCH_DOG BIT(1)
+#define B_UPD_P0_EN BIT(30)
+#define R_ANAPAR_PW15 0x030C
+#define B_ANAPAR_PW15 GENMASK(31, 24)
+#define B_ANAPAR_PW15_H GENMASK(27, 24)
+#define B_ANAPAR_PW15_H2 GENMASK(27, 26)
+#define R_ANAPAR 0x032C
+#define B_ANAPAR_15 GENMASK(31, 16)
+#define B_ANAPAR_ADCCLK BIT(30)
+#define B_ANAPAR_FLTRST BIT(22)
+#define B_ANAPAR_CRXBB GENMASK(18, 16)
+#define B_ANAPAR_14 GENMASK(15, 0)
+#define R_UPD_CLK_ADC 0x0700
+#define B_UPD_CLK_ADC_ON BIT(24)
+#define B_UPD_CLK_ADC_VAL GENMASK(26, 25)
+#define R_RSTB_ASYNC 0x0704
+#define B_RSTB_ASYNC_ALL BIT(1)
+#define R_PMAC_GNT 0x0980
+#define B_PMAC_GNT_TXEN BIT(0)
+#define B_PMAC_GNT_RXEN BIT(16)
+#define B_PMAC_GNT_P1 GENMASK(20, 17)
+#define B_PMAC_GNT_P2 GENMASK(29, 26)
+#define R_PMAC_RX_CFG1 0x0988
+#define B_PMAC_OPT1_MSK GENMASK(11, 0)
+#define R_PMAC_RXMOD 0x0994
+#define B_PMAC_RXMOD_MSK GENMASK(7, 4)
+#define R_MAC_SEL 0x09A4
+#define B_MAC_SEL_MOD GENMASK(4, 2)
+#define B_MAC_SEL_DPD_EN BIT(10)
+#define B_MAC_SEL_PWR_EN BIT(16)
+#define R_PMAC_TX_CTRL 0x09C0
+#define B_PMAC_TXEN_DIS BIT(0)
+#define R_PMAC_TX_PRD 0x09C4
+#define B_PMAC_TX_PRD_MSK GENMASK(31, 8)
+#define B_PMAC_CTX_EN BIT(0)
+#define B_PMAC_PTX_EN BIT(4)
+#define R_PMAC_TX_CNT 0x09C8
+#define B_PMAC_TX_CNT_MSK GENMASK(31, 0)
+#define R_CCX 0x0C00
+#define B_CCX_EDCCA_OPT_MSK GENMASK(6, 4)
+#define B_MEASUREMENT_TRIG_MSK BIT(2)
+#define B_CCX_TRIG_OPT_MSK BIT(1)
+#define B_CCX_EN_MSK BIT(0)
+#define R_IFS_COUNTER 0x0C28
+#define B_IFS_CLM_PERIOD_MSK GENMASK(31, 16)
+#define B_IFS_CLM_COUNTER_UNIT_MSK GENMASK(15, 14)
+#define B_IFS_COUNTER_CLR_MSK BIT(13)
+#define B_IFS_COLLECT_EN BIT(12)
+#define R_IFS_T1 0x0C2C
+#define B_IFS_T1_TH_HIGH_MSK GENMASK(31, 16)
+#define B_IFS_T1_EN_MSK BIT(15)
+#define B_IFS_T1_TH_LOW_MSK GENMASK(14, 0)
+#define R_IFS_T2 0x0C30
+#define B_IFS_T2_TH_HIGH_MSK GENMASK(31, 16)
+#define B_IFS_T2_EN_MSK BIT(15)
+#define B_IFS_T2_TH_LOW_MSK GENMASK(14, 0)
+#define R_IFS_T3 0x0C34
+#define B_IFS_T3_TH_HIGH_MSK GENMASK(31, 16)
+#define B_IFS_T3_EN_MSK BIT(15)
+#define B_IFS_T3_TH_LOW_MSK GENMASK(14, 0)
+#define R_IFS_T4 0x0C38
+#define B_IFS_T4_TH_HIGH_MSK GENMASK(31, 16)
+#define B_IFS_T4_EN_MSK BIT(15)
+#define B_IFS_T4_TH_LOW_MSK GENMASK(14, 0)
+#define R_PD_CTRL 0x0C3C
+#define B_PD_HIT_DIS BIT(9)
+#define R_IOQ_IQK_DPK 0x0C60
+#define B_IOQ_IQK_DPK_EN BIT(1)
+#define R_P0_EN_SOUND_WO_NDP 0x0D7C
+#define B_P0_EN_SOUND_WO_NDP BIT(1)
+#define R_SPOOF_ASYNC_RST 0x0D84
+#define B_SPOOF_ASYNC_RST BIT(15)
+#define R_NDP_BRK0 0xDA0
+#define R_NDP_BRK1 0xDA4
+#define B_NDP_RU_BRK BIT(0)
+#define R_BRK_ASYNC_RST_EN_1 0x0DC0
+#define R_BRK_ASYNC_RST_EN_2 0x0DC4
+#define R_BRK_ASYNC_RST_EN_3 0x0DC8
+#define R_P0_RXCK 0x12A0
+#define B_P0_RXCK_VAL GENMASK(18, 16)
+#define B_P0_RXCK_ON BIT(19)
+#define B_P0_RXCK_BW3 BIT(30)
+#define R_P0_NRBW 0x12B8
+#define B_P0_NRBW_DBG BIT(30)
+#define R_S0_RXDC 0x12D4
+#define B_S0_RXDC_I GENMASK(25, 16)
+#define B_S0_RXDC_Q GENMASK(31, 26)
+#define R_S0_RXDC2 0x12D8
+#define B_S0_RXDC2_SEL GENMASK(9, 8)
+#define B_S0_RXDC2_AVG GENMASK(7, 6)
+#define B_S0_RXDC2_MEN GENMASK(5, 4)
+#define B_S0_RXDC2_Q2 GENMASK(3, 0)
+#define R_CFO_COMP_SEG0_L 0x1384
+#define R_CFO_COMP_SEG0_H 0x1388
+#define R_CFO_COMP_SEG0_CTRL 0x138C
+#define R_DBG32_D 0x1730
+#define R_TX_COUNTER 0x1A40
+#define R_IFS_CLM_TX_CNT 0x1ACC
+#define B_IFS_CLM_EDCCA_EXCLUDE_CCA_FA_MSK GENMASK(31, 16)
+#define B_IFS_CLM_TX_CNT_MSK GENMASK(15, 0)
+#define R_IFS_CLM_CCA 0x1AD0
+#define B_IFS_CLM_OFDMCCA_EXCLUDE_FA_MSK GENMASK(31, 16)
+#define B_IFS_CLM_CCKCCA_EXCLUDE_FA_MSK GENMASK(15, 0)
+#define R_IFS_CLM_FA 0x1AD4
+#define B_IFS_CLM_OFDM_FA_MSK GENMASK(31, 16)
+#define B_IFS_CLM_CCK_FA_MSK GENMASK(15, 0)
+#define R_IFS_HIS 0x1AD8
+#define B_IFS_T4_HIS_MSK GENMASK(31, 24)
+#define B_IFS_T3_HIS_MSK GENMASK(23, 16)
+#define B_IFS_T2_HIS_MSK GENMASK(15, 8)
+#define B_IFS_T1_HIS_MSK GENMASK(7, 0)
+#define R_IFS_AVG_L 0x1ADC
+#define B_IFS_T2_AVG_MSK GENMASK(31, 16)
+#define B_IFS_T1_AVG_MSK GENMASK(15, 0)
+#define R_IFS_AVG_H 0x1AE0
+#define B_IFS_T4_AVG_MSK GENMASK(31, 16)
+#define B_IFS_T3_AVG_MSK GENMASK(15, 0)
+#define R_IFS_CCA_L 0x1AE4
+#define B_IFS_T2_CCA_MSK GENMASK(31, 16)
+#define B_IFS_T1_CCA_MSK GENMASK(15, 0)
+#define R_IFS_CCA_H 0x1AE8
+#define B_IFS_T4_CCA_MSK GENMASK(31, 16)
+#define B_IFS_T3_CCA_MSK GENMASK(15, 0)
+#define R_IFSCNT 0x1AEC
+#define B_IFSCNT_DONE_MSK BIT(16)
+#define B_IFSCNT_TOTAL_CNT_MSK GENMASK(15, 0)
+#define R_TXAGC_TP 0x1C04
+#define B_TXAGC_TP GENMASK(2, 0)
+#define R_TSSI_THER 0x1C10
+#define B_TSSI_THER GENMASK(29, 24)
+#define R_TXAGC_BB 0x1C60
+#define B_TXAGC_BB_OFT GENMASK(31, 16)
+#define B_TXAGC_BB GENMASK(31, 24)
+#define R_S0_ADDCK 0x1E00
+#define B_S0_ADDCK_I GENMASK(9, 0)
+#define B_S0_ADDCK_Q GENMASK(19, 10)
+#define R_ADC_FIFO 0x20fc
+#define B_ADC_FIFO_RST GENMASK(31, 24)
+#define R_TXFIR0 0x2300
+#define B_TXFIR_C01 GENMASK(23, 0)
+#define R_TXFIR2 0x2304
+#define B_TXFIR_C23 GENMASK(23, 0)
+#define R_TXFIR4 0x2308
+#define B_TXFIR_C45 GENMASK(23, 0)
+#define R_TXFIR6 0x230c
+#define B_TXFIR_C67 GENMASK(23, 0)
+#define R_TXFIR8 0x2310
+#define B_TXFIR_C89 GENMASK(23, 0)
+#define R_TXFIRA 0x2314
+#define B_TXFIR_CAB GENMASK(23, 0)
+#define R_TXFIRC 0x2318
+#define B_TXFIR_CCD GENMASK(23, 0)
+#define R_TXFIRE 0x231c
+#define B_TXFIR_CEF GENMASK(23, 0)
+#define R_RXCCA 0x2344
+#define B_RXCCA_DIS BIT(31)
+#define R_RXSC 0x237C
+#define B_RXSC_EN BIT(0)
+#define R_RXSCOBC 0x23B0
+#define B_RXSCOBC_TH GENMASK(18, 0)
+#define R_RXSCOCCK 0x23B4
+#define B_RXSCOCCK_TH GENMASK(18, 0)
+#define R_P1_EN_SOUND_WO_NDP 0x2D7C
+#define B_P1_EN_SOUND_WO_NDP BIT(1)
+#define R_P1_DBGMOD 0x32B8
+#define B_P1_DBGMOD_ON BIT(30)
+#define R_S1_RXDC 0x32D4
+#define B_S1_RXDC_I GENMASK(25, 16)
+#define B_S1_RXDC_Q GENMASK(31, 26)
+#define R_S1_RXDC2 0x32D8
+#define B_S1_RXDC2_EN GENMASK(5, 4)
+#define B_S1_RXDC2_SEL GENMASK(9, 8)
+#define B_S1_RXDC2_Q2 GENMASK(3, 0)
+#define R_TXAGC_BB_S1 0x3C60
+#define B_TXAGC_BB_S1_OFT GENMASK(31, 16)
+#define B_TXAGC_BB_S1 GENMASK(31, 24)
+#define R_S1_ADDCK 0x3E00
+#define B_S1_ADDCK_I GENMASK(9, 0)
+#define B_S1_ADDCK_Q GENMASK(19, 10)
+#define R_DCFO 0x4264
+#define B_DCFO GENMASK(1, 0)
+#define R_SEG0CSI 0x42AC
+#define B_SEG0CSI_IDX GENMASK(10, 0)
+#define R_SEG0CSI_EN 0x42C4
+#define B_SEG0CSI_EN BIT(23)
+#define R_BSS_CLR_MAP 0x43ac
+#define B_BSS_CLR_MAP_VLD0 BIT(28)
+#define B_BSS_CLR_MAP_TGT GENMASK(27, 22)
+#define B_BSS_CLR_MAP_STAID GENMASK(21, 11)
+#define R_CFO_TRK0 0x4404
+#define R_CFO_TRK1 0x440C
+#define B_CFO_TRK_MSK GENMASK(14, 10)
+#define R_DCFO_COMP_S0 0x448C
+#define B_DCFO_COMP_S0_MSK GENMASK(11, 0)
+#define R_DCFO_WEIGHT 0x4490
+#define B_DCFO_WEIGHT_MSK GENMASK(27, 24)
+#define R_DCFO_OPT 0x4494
+#define B_DCFO_OPT_EN BIT(29)
+#define R_BANDEDGE 0x4498
+#define B_BANDEDGE_EN BIT(30)
+#define R_TXPATH_SEL 0x458C
+#define B_TXPATH_SEL_MSK GENMASK(31, 28)
+#define R_TXPWR 0x4594
+#define B_TXPWR_MSK GENMASK(30, 22)
+#define R_TXNSS_MAP 0x45B4
+#define B_TXNSS_MAP_MSK GENMASK(20, 17)
+#define R_PATH0_IB_PKPW 0x4628
+#define B_PATH0_IB_PKPW_MSK GENMASK(11, 6)
+#define R_PATH0_LNA_ERR1 0x462C
+#define B_PATH0_LNA_ERR_G1_A_MSK GENMASK(29, 24)
+#define B_PATH0_LNA_ERR_G0_G_MSK GENMASK(17, 12)
+#define B_PATH0_LNA_ERR_G0_A_MSK GENMASK(11, 6)
+#define R_PATH0_LNA_ERR2 0x4630
+#define B_PATH0_LNA_ERR_G2_G_MSK GENMASK(23, 18)
+#define B_PATH0_LNA_ERR_G2_A_MSK GENMASK(17, 12)
+#define B_PATH0_LNA_ERR_G1_G_MSK GENMASK(5, 0)
+#define R_PATH0_LNA_ERR3 0x4634
+#define B_PATH0_LNA_ERR_G4_G_MSK GENMASK(29, 24)
+#define B_PATH0_LNA_ERR_G4_A_MSK GENMASK(23, 18)
+#define B_PATH0_LNA_ERR_G3_G_MSK GENMASK(11, 6)
+#define B_PATH0_LNA_ERR_G3_A_MSK GENMASK(5, 0)
+#define R_PATH0_LNA_ERR4 0x4638
+#define B_PATH0_LNA_ERR_G6_A_MSK GENMASK(29, 24)
+#define B_PATH0_LNA_ERR_G5_G_MSK GENMASK(17, 12)
+#define B_PATH0_LNA_ERR_G5_A_MSK GENMASK(11, 6)
+#define R_PATH0_LNA_ERR5 0x463C
+#define B_PATH0_LNA_ERR_G6_G_MSK GENMASK(5, 0)
+#define R_PATH0_TIA_ERR_G0 0x4640
+#define B_PATH0_TIA_ERR_G0_G_MSK GENMASK(23, 18)
+#define B_PATH0_TIA_ERR_G0_A_MSK GENMASK(17, 12)
+#define R_PATH0_TIA_ERR_G1 0x4644
+#define B_PATH0_TIA_ERR_G1_SEL GENMASK(31, 30)
+#define B_PATH0_TIA_ERR_G1_G_MSK GENMASK(11, 6)
+#define B_PATH0_TIA_ERR_G1_A_MSK GENMASK(5, 0)
+#define R_PATH0_IB_PBK 0x4650
+#define B_PATH0_IB_PBK_MSK GENMASK(14, 10)
+#define R_PATH0_RXB_INIT 0x4658
+#define B_PATH0_RXB_INIT_IDX_MSK GENMASK(9, 5)
+#define R_PATH0_LNA_INIT 0x4668
+#define B_PATH0_LNA_INIT_IDX_MSK GENMASK(26, 24)
+#define R_PATH0_BTG 0x466C
+#define B_PATH0_BTG_SHEN GENMASK(18, 17)
+#define R_PATH0_TIA_INIT 0x4674
+#define B_PATH0_TIA_INIT_IDX_MSK BIT(17)
+#define R_PATH0_P20_FOLLOW_BY_PAGCUGC 0x46A0
+#define B_PATH0_P20_FOLLOW_BY_PAGCUGC_EN_MSK BIT(5)
+#define R_PATH0_S20_FOLLOW_BY_PAGCUGC 0x46A4
+#define B_PATH0_S20_FOLLOW_BY_PAGCUGC_EN_MSK BIT(5)
+#define R_P0_NBIIDX 0x469C
+#define B_P0_NBIIDX_VAL GENMASK(11, 0)
+#define B_P0_NBIIDX_NOTCH_EN BIT(12)
+#define R_P1_MODE 0x4718
+#define B_P1_MODE_SEL GENMASK(31, 30)
+#define R_PATH1_LNA_INIT 0x473C
+#define B_PATH1_LNA_INIT_IDX_MSK GENMASK(26, 24)
+#define R_PATH1_TIA_INIT 0x4748
+#define B_PATH1_TIA_INIT_IDX_MSK BIT(17)
+#define R_PATH1_BTG 0x4740
+#define B_PATH1_BTG_SHEN GENMASK(18, 17)
+#define R_PATH1_RXB_INIT 0x472C
+#define B_PATH1_RXB_INIT_IDX_MSK GENMASK(9, 5)
+#define R_PATH1_P20_FOLLOW_BY_PAGCUGC 0x4774
+#define B_PATH1_P20_FOLLOW_BY_PAGCUGC_EN_MSK BIT(5)
+#define R_PATH1_S20_FOLLOW_BY_PAGCUGC 0x4778
+#define B_PATH1_S20_FOLLOW_BY_PAGCUGC_EN_MSK BIT(5)
+#define R_P1_NBIIDX 0x4770
+#define B_P1_NBIIDX_VAL GENMASK(11, 0)
+#define B_P1_NBIIDX_NOTCH_EN BIT(12)
+#define R_SEG0R_PD 0x481C
+#define B_SEG0R_PD_SPATIAL_REUSE_EN_MSK BIT(29)
+#define B_SEG0R_PD_LOWER_BOUND_MSK GENMASK(10, 6)
+#define R_2P4G_BAND 0x4970
+#define B_2P4G_BAND_SEL BIT(1)
+#define R_FC0_BW 0x4974
+#define B_FC0_BW_INV GENMASK(6, 0)
+#define B_FC0_BW_SET GENMASK(31, 30)
+#define R_CHBW_MOD 0x4978
+#define B_CHBW_MOD_PRICH GENMASK(11, 8)
+#define B_CHBW_MOD_SBW GENMASK(13, 12)
+#define R_CFO_COMP_SEG1_L 0x5384
+#define R_CFO_COMP_SEG1_H 0x5388
+#define R_CFO_COMP_SEG1_CTRL 0x538C
+#define B_CFO_COMP_VALID_BIT BIT(29)
+#define B_CFO_COMP_WEIGHT_MSK GENMASK(27, 24)
+#define B_CFO_COMP_VAL_MSK GENMASK(11, 0)
+#define R_DPD_OFT_EN 0x5800
+#define B_DPD_OFT_EN BIT(28)
+#define R_DPD_OFT_ADDR 0x5804
+#define B_DPD_OFT_ADDR GENMASK(31, 27)
+#define R_P0_TMETER 0x5810
+#define B_P0_TMETER GENMASK(15, 10)
+#define B_P0_TMETER_DIS BIT(16)
+#define B_P0_TMETER_TRK BIT(24)
+#define R_P0_TSSI_TRK 0x5818
+#define B_P0_TSSI_TRK_EN BIT(30)
+#define B_P0_TSSI_OFT_EN BIT(28)
+#define B_P0_TSSI_OFT GENMASK(7, 0)
+#define R_P0_TSSI_AVG 0x5820
+#define B_P0_TSSI_AVG GENMASK(15, 12)
+#define R_P0_RFCTM 0x5864
+#define B_P0_RFCTM_VAL GENMASK(25, 20)
+#define R_P0_RFCTM_RDY BIT(26)
+#define R_P0_TXDPD 0x58D4
+#define B_P0_TXDPD GENMASK(31, 28)
+#define R_P0_TXPW_RSTB 0x58DC
+#define B_P0_TXPW_RSTB_MANON BIT(30)
+#define B_P0_TXPW_RSTB_TSSI BIT(31)
+#define R_P0_TSSI_MV_AVG 0x58E4
+#define B_P0_TSSI_MV_AVG GENMASK(13, 11)
+#define R_TXGAIN_SCALE 0x58F0
+#define B_TXGAIN_SCALE_EN BIT(19)
+#define B_TXGAIN_SCALE_OFT GENMASK(31, 24)
+#define R_P0_TSSI_BASE 0x5C00
+#define R_S0_DACKI 0x5E00
+#define B_S0_DACKI_AR GENMASK(31, 28)
+#define B_S0_DACKI_EN BIT(3)
+#define R_S0_DACKI2 0x5E30
+#define B_S0_DACKI2_K GENMASK(21, 12)
+#define R_S0_DACKI7 0x5E44
+#define B_S0_DACKI7_K GENMASK(15, 8)
+#define R_S0_DACKI8 0x5E48
+#define B_S0_DACKI8_K GENMASK(15, 8)
+#define R_S0_DACKQ 0x5E50
+#define B_S0_DACKQ_AR GENMASK(31, 28)
+#define B_S0_DACKQ_EN BIT(3)
+#define R_S0_DACKQ2 0x5E80
+#define B_S0_DACKQ2_K GENMASK(21, 12)
+#define R_S0_DACKQ7 0x5E94
+#define B_S0_DACKQ7_K GENMASK(15, 8)
+#define R_S0_DACKQ8 0x5E98
+#define B_S0_DACKQ8_K GENMASK(15, 8)
+#define R_P1_TMETER 0x7810
+#define B_P1_TMETER GENMASK(15, 10)
+#define B_P1_TMETER_DIS BIT(16)
+#define B_P1_TMETER_TRK BIT(24)
+#define R_P1_TSSI_TRK 0x7818
+#define B_P1_TSSI_TRK_EN BIT(30)
+#define B_P1_TSSI_OFT_EN BIT(28)
+#define B_P1_TSSI_OFT GENMASK(7, 0)
+#define R_P1_TSSI_AVG 0x7820
+#define B_P1_TSSI_AVG GENMASK(15, 12)
+#define R_P1_RFCTM 0x7864
+#define R_P1_RFCTM_RDY BIT(26)
+#define B_P1_RFCTM_VAL GENMASK(25, 20)
+#define R_P1_TXPW_RSTB 0x78DC
+#define B_P1_TXPW_RSTB_MANON BIT(30)
+#define B_P1_TXPW_RSTB_TSSI BIT(31)
+#define R_P1_TSSI_MV_AVG 0x78E4
+#define B_P1_TSSI_MV_AVG GENMASK(13, 11)
+#define R_TSSI_THOF 0x7C00
+#define R_S1_DACKI 0x7E00
+#define B_S1_DACKI_AR GENMASK(31, 28)
+#define B_S1_DACKI_EN BIT(3)
+#define R_S1_DACKI2 0x7E30
+#define B_S1_DACKI2_K GENMASK(21, 12)
+#define R_S1_DACKI7 0x7E44
+#define B_S1_DACKI_K GENMASK(15, 8)
+#define R_S1_DACKI8 0x7E48
+#define B_S1_DACKI8_K GENMASK(15, 8)
+#define R_S1_DACKQ 0x7E50
+#define B_S1_DACKQ_AR GENMASK(31, 28)
+#define B_S1_DACKQ_EN BIT(3)
+#define R_S1_DACKQ2 0x7E80
+#define B_S1_DACKQ2_K GENMASK(21, 12)
+#define R_S1_DACKQ7 0x7E94
+#define B_S1_DACKQ7_K GENMASK(15, 8)
+#define R_S1_DACKQ8 0x7E98
+#define B_S1_DACKQ8_K GENMASK(15, 8)
+#define R_NCTL_CFG 0x8000
+#define B_NCTL_CFG_SPAGE GENMASK(2, 1)
+#define R_NCTL_RPT 0x8008
+#define B_NCTL_RPT_FLG BIT(26)
+#define R_NCTL_N1 0x8010
+#define B_NCTL_N1_CIP GENMASK(7, 0)
+#define R_NCTL_N2 0x8014
+#define R_IQK_COM 0x8018
+#define R_IQK_DIF 0x801C
+#define B_IQK_DIF_TRX GENMASK(1, 0)
+#define R_IQK_DIF1 0x8020
+#define B_IQK_DIF1_TXPI GENMASK(19, 0)
+#define R_IQK_DIF2 0x8024
+#define B_IQK_DIF2_RXPI GENMASK(19, 0)
+#define R_IQK_DIF4 0x802C
+#define B_IQK_DIF4_TXT GENMASK(11, 0)
+#define B_IQK_DIF4_RXT GENMASK(27, 16)
+#define R_IQK_CFG 0x8034
+#define B_IQK_CFG_SET GENMASK(5, 4)
+#define R_TPG_MOD 0x806C
+#define B_TPG_MOD_F GENMASK(2, 1)
+#define R_MDPK_SYNC 0x8070
+#define B_MDPK_SYNC_SEL BIT(31)
+#define B_MDPK_SYNC_MAN GENMASK(31, 28)
+#define R_MDPK_RX_DCK 0x8074
+#define R_NCTL_RW 0x8080
+#define R_KIP_SYSCFG 0x8088
+#define R_KIP_CLK 0x808C
+#define R_LDL_NORM 0x80A0
+#define B_LDL_NORM_PN GENMASK(12, 8)
+#define B_LDL_NORM_OP GENMASK(1, 0)
+#define R_DPK_CTL 0x80B0
+#define B_DPK_CTL_EN BIT(28)
+#define R_DPK_CFG 0x80B8
+#define B_DPK_CFG_IDX GENMASK(14, 12)
+#define R_DPK_CFG2 0x80BC
+#define B_DPK_CFG2_ST BIT(14)
+#define R_DPK_CFG3 0x80C0
+#define R_KPATH_CFG 0x80D0
+#define R_KIP_RPT1 0x80D4
+#define B_KIP_RPT1_SEL GENMASK(21, 16)
+#define R_SRAM_IQRX 0x80D8
+#define R_GAPK 0x80E0
+#define B_GAPK_ADR BIT(0)
+#define R_SRAM_IQRX2 0x80E8
+#define R_DPK_TRK 0x80f0
+#define B_DPK_TRK_DIS BIT(31)
+#define R_RPT_COM 0x80FC
+#define B_PRT_COM_SYNERR BIT(30)
+#define B_PRT_COM_DCI GENMASK(27, 16)
+#define B_PRT_COM_CORV GENMASK(15, 8)
+#define B_PRT_COM_DCQ GENMASK(11, 0)
+#define B_PRT_COM_GL GENMASK(7, 4)
+#define B_PRT_COM_CORI GENMASK(7, 0)
+#define R_COEF_SEL 0x8104
+#define B_COEF_SEL_IQC BIT(0)
+#define B_COEF_SEL_MDPD BIT(8)
+#define R_CFIR_SYS 0x8120
+#define R_IQK_RES 0x8124
+#define B_IQK_RES_TXCFIR GENMASK(11, 8)
+#define B_IQK_RES_RXCFIR GENMASK(3, 0)
+#define R_TXIQC 0x8138
+#define R_RXIQC 0x813c
+#define B_RXIQC_BYPASS BIT(0)
+#define B_RXIQC_BYPASS2 BIT(2)
+#define B_RXIQC_NEWP GENMASK(19, 8)
+#define B_RXIQC_NEWX GENMASK(31, 20)
+#define R_KIP 0x8140
+#define B_KIP_DBCC BIT(0)
+#define B_KIP_RFGAIN BIT(8)
+#define R_RFGAIN 0x8144
+#define B_RFGAIN_PAD GENMASK(4, 0)
+#define B_RFGAIN_TXBB GENMASK(12, 8)
+#define R_RFGAIN_BND 0x8148
+#define B_RFGAIN_BND GENMASK(4, 0)
+#define R_CFIR_MAP 0x8150
+#define R_CFIR_LUT 0x8154
+#define B_CFIR_LUT_SEL BIT(8)
+#define B_CFIR_LUT_G3 BIT(3)
+#define B_CFIR_LUT_G2 BIT(2)
+#define B_CFIR_LUT_GP GENMASK(1, 0)
+#define R_DPD_V1 0x81a0
+#define R_DPD_CH0 0x81AC
+#define R_DPD_BND 0x81B4
+#define R_DPD_CH0A 0x81BC
+#define R_TXAGC_RFK 0x81C4
+#define B_TXAGC_RFK_CH0 GENMASK(5, 0)
+#define R_DPD_COM 0x81C8
+#define R_KIP_IQP 0x81CC
+#define B_KIP_IQP_IQSW GENMASK(5, 0)
+#define R_KIP_RPT 0x81D4
+#define B_KIP_RPT_SEL GENMASK(21, 16)
+#define R_W_COEF 0x81D8
+#define R_LOAD_COEF 0x81DC
+#define B_LOAD_COEF_MDPD BIT(16)
+#define B_LOAD_COEF_CFIR GENMASK(1, 0)
+#define B_LOAD_COEF_AUTO BIT(0)
+#define R_RPT_PER 0x81FC
+#define R_RXCFIR_P0C0 0x8D40
+#define R_RXCFIR_P0C1 0x8D84
+#define R_RXCFIR_P0C2 0x8DC8
+#define R_RXCFIR_P0C3 0x8E0C
+#define R_TXCFIR_P0C0 0x8F50
+#define R_TXCFIR_P0C1 0x8F84
+#define R_TXCFIR_P0C2 0x8FB8
+#define R_TXCFIR_P0C3 0x8FEC
+#define R_RXCFIR_P1C0 0x9140
+#define R_RXCFIR_P1C1 0x9184
+#define R_RXCFIR_P1C2 0x91C8
+#define R_RXCFIR_P1C3 0x920C
+#define R_TXCFIR_P1C0 0x9350
+#define R_TXCFIR_P1C1 0x9384
+#define R_TXCFIR_P1C2 0x93B8
+#define R_TXCFIR_P1C3 0x93EC
+#define R_IQKINF 0x9FE0
+#define B_IQKINF_VER GENMASK(31, 24)
+#define B_IQKINF_FAIL_RXGRP GENMASK(23, 16)
+#define B_IQKINF_FAIL_TXGRP GENMASK(15, 8)
+#define B_IQKINF_FAIL GENMASK(3, 0)
+#define B_IQKINF_F_RX BIT(3)
+#define B_IQKINF_FTX BIT(2)
+#define B_IQKINF_FFIN BIT(1)
+#define B_IQKINF_FCOR BIT(0)
+#define R_IQKCH 0x9FE4
+#define B_IQKCH_CH GENMASK(15, 8)
+#define B_IQKCH_BW GENMASK(7, 4)
+#define B_IQKCH_BAND GENMASK(3, 0)
+#define R_IQKINF2 0x9FE8
+#define B_IQKINF2_FCNT GENMASK(23, 16)
+#define B_IQKINF2_KCNT GENMASK(15, 8)
+#define B_IQKINF2_NCTLV GENMAKS(7, 0)
+#endif
diff --git a/drivers/net/wireless/realtek/rtw89/regd.c b/drivers/net/wireless/realtek/rtw89/regd.c
new file mode 100644
index 000000000000..f00b94ecfff4
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/regd.c
@@ -0,0 +1,353 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2019-2020 Realtek Corporation
+ */
+
+#include "debug.h"
+#include "ps.h"
+
+#define COUNTRY_REGD(_alpha2, _txpwr_regd_2g, _txpwr_regd_5g) \
+ {.alpha2 = (_alpha2), \
+ .txpwr_regd[RTW89_BAND_2G] = (_txpwr_regd_2g), \
+ .txpwr_regd[RTW89_BAND_5G] = (_txpwr_regd_5g) \
+ }
+
+static const struct rtw89_regulatory rtw89_ww_regd =
+ COUNTRY_REGD("00", RTW89_WW, RTW89_WW);
+
+static const struct rtw89_regulatory rtw89_regd_map[] = {
+ COUNTRY_REGD("AR", RTW89_FCC, RTW89_FCC),
+ COUNTRY_REGD("BO", RTW89_WW, RTW89_FCC),
+ COUNTRY_REGD("BR", RTW89_FCC, RTW89_FCC),
+ COUNTRY_REGD("CL", RTW89_WW, RTW89_CHILE),
+ COUNTRY_REGD("CO", RTW89_FCC, RTW89_FCC),
+ COUNTRY_REGD("CR", RTW89_FCC, RTW89_FCC),
+ COUNTRY_REGD("EC", RTW89_FCC, RTW89_FCC),
+ COUNTRY_REGD("SV", RTW89_WW, RTW89_FCC),
+ COUNTRY_REGD("GT", RTW89_FCC, RTW89_FCC),
+ COUNTRY_REGD("HN", RTW89_WW, RTW89_FCC),
+ COUNTRY_REGD("MX", RTW89_FCC, RTW89_MEXICO),
+ COUNTRY_REGD("NI", RTW89_FCC, RTW89_FCC),
+ COUNTRY_REGD("PA", RTW89_FCC, RTW89_FCC),
+ COUNTRY_REGD("PY", RTW89_FCC, RTW89_FCC),
+ COUNTRY_REGD("PE", RTW89_FCC, RTW89_FCC),
+ COUNTRY_REGD("US", RTW89_FCC, RTW89_FCC),
+ COUNTRY_REGD("UY", RTW89_WW, RTW89_FCC),
+ COUNTRY_REGD("VE", RTW89_WW, RTW89_FCC),
+ COUNTRY_REGD("PR", RTW89_FCC, RTW89_FCC),
+ COUNTRY_REGD("DO", RTW89_FCC, RTW89_FCC),
+ COUNTRY_REGD("AT", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("BE", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("CY", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("CZ", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("DK", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("EE", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("FI", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("FR", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("DE", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("GR", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("HU", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("IS", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("IE", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("IT", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("LV", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("LI", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("LT", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("LU", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("MT", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("MC", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("NL", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("NO", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("PL", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("PT", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("SK", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("SI", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("ES", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("SE", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("CH", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("GB", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("AL", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("AZ", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("BH", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("BA", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("BG", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("HR", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("EG", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("GH", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("IQ", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("IL", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("JO", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("KZ", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("KE", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("KW", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("KG", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("LB", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("LS", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("MK", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("MA", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("MZ", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("NA", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("NG", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("OM", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("QA", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("RO", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("RU", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("SA", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("SN", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("RS", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("ME", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("ZA", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("TR", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("UA", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("AE", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("YE", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("ZW", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("BD", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("KH", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("CN", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("HK", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("IN", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("ID", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("KR", RTW89_KCC, RTW89_KCC),
+ COUNTRY_REGD("MY", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("PK", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("PH", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("SG", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("LK", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("TW", RTW89_FCC, RTW89_FCC),
+ COUNTRY_REGD("TH", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("VN", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("AU", RTW89_WW, RTW89_ACMA),
+ COUNTRY_REGD("NZ", RTW89_WW, RTW89_ACMA),
+ COUNTRY_REGD("PG", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("CA", RTW89_IC, RTW89_IC),
+ COUNTRY_REGD("JP", RTW89_MKK, RTW89_MKK),
+ COUNTRY_REGD("JM", RTW89_WW, RTW89_FCC),
+ COUNTRY_REGD("AN", RTW89_FCC, RTW89_FCC),
+ COUNTRY_REGD("TT", RTW89_FCC, RTW89_FCC),
+ COUNTRY_REGD("TN", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("AF", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("DZ", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("AS", RTW89_FCC, RTW89_FCC),
+ COUNTRY_REGD("AD", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("AO", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("AI", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("AQ", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("AG", RTW89_FCC, RTW89_FCC),
+ COUNTRY_REGD("AM", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("AW", RTW89_FCC, RTW89_FCC),
+ COUNTRY_REGD("BS", RTW89_FCC, RTW89_FCC),
+ COUNTRY_REGD("BB", RTW89_FCC, RTW89_FCC),
+ COUNTRY_REGD("BY", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("BZ", RTW89_FCC, RTW89_FCC),
+ COUNTRY_REGD("BJ", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("BM", RTW89_FCC, RTW89_FCC),
+ COUNTRY_REGD("BT", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("BW", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("BV", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("IO", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("VG", RTW89_FCC, RTW89_FCC),
+ COUNTRY_REGD("BN", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("BF", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("MM", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("BI", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("CM", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("CV", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("KY", RTW89_FCC, RTW89_FCC),
+ COUNTRY_REGD("CF", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("TD", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("CX", RTW89_WW, RTW89_ACMA),
+ COUNTRY_REGD("CC", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("KM", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("CG", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("CD", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("CK", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("CI", RTW89_ETSI, RTW89_ETSI),
+ COUNTRY_REGD("DJ", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("DM", RTW89_FCC, RTW89_FCC),
+ COUNTRY_REGD("GQ", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("ER", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("ET", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("FK", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("FO", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("FJ", RTW89_FCC, RTW89_FCC),
+ COUNTRY_REGD("GF", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("PF", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("TF", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("GA", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("GM", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("GE", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("GI", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("GL", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("GD", RTW89_FCC, RTW89_FCC),
+ COUNTRY_REGD("GP", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("GU", RTW89_FCC, RTW89_FCC),
+ COUNTRY_REGD("GG", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("GN", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("GW", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("GY", RTW89_FCC, RTW89_NCC),
+ COUNTRY_REGD("HT", RTW89_FCC, RTW89_FCC),
+ COUNTRY_REGD("HM", RTW89_WW, RTW89_ACMA),
+ COUNTRY_REGD("VA", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("IM", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("JE", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("KI", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("LA", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("LR", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("LY", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("MO", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("MG", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("MW", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("MV", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("ML", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("MH", RTW89_FCC, RTW89_FCC),
+ COUNTRY_REGD("MQ", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("MR", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("MU", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("YT", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("FM", RTW89_FCC, RTW89_FCC),
+ COUNTRY_REGD("MD", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("MN", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("MS", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("NR", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("NP", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("NC", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("NE", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("NU", RTW89_WW, RTW89_ACMA),
+ COUNTRY_REGD("NF", RTW89_WW, RTW89_ACMA),
+ COUNTRY_REGD("MP", RTW89_FCC, RTW89_FCC),
+ COUNTRY_REGD("PW", RTW89_FCC, RTW89_FCC),
+ COUNTRY_REGD("RE", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("RW", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("SH", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("KN", RTW89_FCC, RTW89_FCC),
+ COUNTRY_REGD("LC", RTW89_FCC, RTW89_FCC),
+ COUNTRY_REGD("MF", RTW89_FCC, RTW89_FCC),
+ COUNTRY_REGD("SX", RTW89_FCC, RTW89_FCC),
+ COUNTRY_REGD("PM", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("VC", RTW89_FCC, RTW89_FCC),
+ COUNTRY_REGD("WS", RTW89_FCC, RTW89_FCC),
+ COUNTRY_REGD("SM", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("ST", RTW89_FCC, RTW89_FCC),
+ COUNTRY_REGD("SC", RTW89_FCC, RTW89_FCC),
+ COUNTRY_REGD("SL", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("SB", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("SO", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("GS", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("SR", RTW89_FCC, RTW89_FCC),
+ COUNTRY_REGD("SJ", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("SZ", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("TJ", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("TZ", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("TG", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("TK", RTW89_WW, RTW89_ACMA),
+ COUNTRY_REGD("TO", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("TM", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("TC", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("TV", RTW89_ETSI, RTW89_NA),
+ COUNTRY_REGD("UG", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("VI", RTW89_FCC, RTW89_FCC),
+ COUNTRY_REGD("UZ", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("VU", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("WF", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("EH", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("ZM", RTW89_WW, RTW89_ETSI),
+ COUNTRY_REGD("IR", RTW89_WW, RTW89_ETSI),
+};
+
+static const struct rtw89_regulatory *rtw89_regd_find_reg_by_name(char *alpha2)
+{
+ u32 i;
+
+ for (i = 0; i < ARRAY_SIZE(rtw89_regd_map); i++) {
+ if (!memcmp(rtw89_regd_map[i].alpha2, alpha2, 2))
+ return &rtw89_regd_map[i];
+ }
+
+ return &rtw89_ww_regd;
+}
+
+static bool rtw89_regd_is_ww(const struct rtw89_regulatory *regd)
+{
+ return regd == &rtw89_ww_regd;
+}
+
+int rtw89_regd_init(struct rtw89_dev *rtwdev,
+ void (*reg_notifier)(struct wiphy *wiphy,
+ struct regulatory_request *request))
+{
+ const struct rtw89_regulatory *chip_regd;
+ struct wiphy *wiphy = rtwdev->hw->wiphy;
+ int ret;
+
+ if (!wiphy)
+ return -EINVAL;
+
+ chip_regd = rtw89_regd_find_reg_by_name(rtwdev->efuse.country_code);
+ if (!rtw89_regd_is_ww(chip_regd)) {
+ rtwdev->regd = chip_regd;
+ /* Ignore country ie if there is a country domain programmed in chip */
+ wiphy->regulatory_flags |= REGULATORY_COUNTRY_IE_IGNORE;
+ wiphy->regulatory_flags |= REGULATORY_STRICT_REG;
+
+ ret = regulatory_hint(rtwdev->hw->wiphy, rtwdev->regd->alpha2);
+ if (ret)
+ rtw89_warn(rtwdev, "failed to hint regulatory:%d\n", ret);
+
+ rtw89_debug(rtwdev, RTW89_DBG_REGD,
+ "efuse country code %c%c, mapping to 2g txregd %d, 5g txregd %d\n",
+ rtwdev->efuse.country_code[0], rtwdev->efuse.country_code[1],
+ rtwdev->regd->txpwr_regd[RTW89_BAND_2G],
+ rtwdev->regd->txpwr_regd[RTW89_BAND_5G]);
+
+ return 0;
+ }
+ rtw89_debug(rtwdev, RTW89_DBG_REGD,
+ "worldwide roaming chip, follow the setting of stack(%c%c), mapping to 2g txregd %d, 5g txregd %d\n",
+ rtwdev->regd->alpha2[0], rtwdev->regd->alpha2[1],
+ rtwdev->regd->txpwr_regd[RTW89_BAND_2G],
+ rtwdev->regd->txpwr_regd[RTW89_BAND_5G]);
+
+ return 0;
+}
+
+static void rtw89_regd_notifier_apply(struct rtw89_dev *rtwdev,
+ struct wiphy *wiphy,
+ struct regulatory_request *request)
+{
+ rtwdev->regd = rtw89_regd_find_reg_by_name(request->alpha2);
+ /* This notification might be set from the system of distros,
+ * and it does not expect the regulatory will be modified by
+ * connecting to an AP (i.e. country ie).
+ */
+ if (request->initiator == NL80211_REGDOM_SET_BY_USER &&
+ !rtw89_regd_is_ww(rtwdev->regd))
+ wiphy->regulatory_flags |= REGULATORY_COUNTRY_IE_IGNORE;
+ else
+ wiphy->regulatory_flags &= ~REGULATORY_COUNTRY_IE_IGNORE;
+}
+
+void rtw89_regd_notifier(struct wiphy *wiphy, struct regulatory_request *request)
+{
+ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+ struct rtw89_dev *rtwdev = hw->priv;
+
+ mutex_lock(&rtwdev->mutex);
+ rtw89_leave_ps_mode(rtwdev);
+
+ if (wiphy->regd) {
+ rtw89_debug(rtwdev, RTW89_DBG_REGD,
+ "There is a country domain programmed in chip, ignore notifications\n");
+ goto exit;
+ }
+ rtw89_regd_notifier_apply(rtwdev, wiphy, request);
+ rtw89_debug(rtwdev, RTW89_DBG_REGD,
+ "get alpha2 %c%c from initiator %d, mapping to 2g txregd %d, 5g txregd %d\n",
+ request->alpha2[0], request->alpha2[1], request->initiator,
+ rtwdev->regd->txpwr_regd[RTW89_BAND_2G],
+ rtwdev->regd->txpwr_regd[RTW89_BAND_5G]);
+
+ rtw89_chip_set_txpwr(rtwdev);
+
+exit:
+ mutex_unlock(&rtwdev->mutex);
+}
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.c b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
new file mode 100644
index 000000000000..5c6ffca3a324
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.c
@@ -0,0 +1,2036 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2019-2020 Realtek Corporation
+ */
+
+#include "coex.h"
+#include "mac.h"
+#include "phy.h"
+#include "reg.h"
+#include "rtw8852a.h"
+#include "rtw8852a_rfk.h"
+#include "rtw8852a_table.h"
+#include "txrx.h"
+
+static const struct rtw89_hfc_ch_cfg rtw8852a_hfc_chcfg_pcie[] = {
+ {128, 1896, grp_0}, /* ACH 0 */
+ {128, 1896, grp_0}, /* ACH 1 */
+ {128, 1896, grp_0}, /* ACH 2 */
+ {128, 1896, grp_0}, /* ACH 3 */
+ {128, 1896, grp_1}, /* ACH 4 */
+ {128, 1896, grp_1}, /* ACH 5 */
+ {128, 1896, grp_1}, /* ACH 6 */
+ {128, 1896, grp_1}, /* ACH 7 */
+ {32, 1896, grp_0}, /* B0MGQ */
+ {128, 1896, grp_0}, /* B0HIQ */
+ {32, 1896, grp_1}, /* B1MGQ */
+ {128, 1896, grp_1}, /* B1HIQ */
+ {40, 0, 0} /* FWCMDQ */
+};
+
+static const struct rtw89_hfc_pub_cfg rtw8852a_hfc_pubcfg_pcie = {
+ 1896, /* Group 0 */
+ 1896, /* Group 1 */
+ 3792, /* Public Max */
+ 0 /* WP threshold */
+};
+
+static const struct rtw89_hfc_param_ini rtw8852a_hfc_param_ini_pcie[] = {
+ [RTW89_QTA_SCC] = {rtw8852a_hfc_chcfg_pcie, &rtw8852a_hfc_pubcfg_pcie,
+ &rtw_hfc_preccfg_pcie, RTW89_HCIFC_POH},
+ [RTW89_QTA_DLFW] = {NULL, NULL, &rtw_hfc_preccfg_pcie, RTW89_HCIFC_POH},
+ [RTW89_QTA_INVALID] = {NULL},
+};
+
+static const struct rtw89_dle_mem rtw8852a_dle_mem_pcie[] = {
+ [RTW89_QTA_SCC] = {RTW89_QTA_SCC, &wde_size0, &ple_size0, &wde_qt0,
+ &wde_qt0, &ple_qt4, &ple_qt5},
+ [RTW89_QTA_DLFW] = {RTW89_QTA_DLFW, &wde_size4, &ple_size4,
+ &wde_qt4, &wde_qt4, &ple_qt13, &ple_qt13},
+ [RTW89_QTA_INVALID] = {RTW89_QTA_INVALID, NULL, NULL, NULL, NULL, NULL,
+ NULL},
+};
+
+static const struct rtw89_reg2_def rtw8852a_pmac_ht20_mcs7_tbl[] = {
+ {0x44AC, 0x00000000},
+ {0x44B0, 0x00000000},
+ {0x44B4, 0x00000000},
+ {0x44B8, 0x00000000},
+ {0x44BC, 0x00000000},
+ {0x44C0, 0x00000000},
+ {0x44C4, 0x00000000},
+ {0x44C8, 0x00000000},
+ {0x44CC, 0x00000000},
+ {0x44D0, 0x00000000},
+ {0x44D4, 0x00000000},
+ {0x44D8, 0x00000000},
+ {0x44DC, 0x00000000},
+ {0x44E0, 0x00000000},
+ {0x44E4, 0x00000000},
+ {0x44E8, 0x00000000},
+ {0x44EC, 0x00000000},
+ {0x44F0, 0x00000000},
+ {0x44F4, 0x00000000},
+ {0x44F8, 0x00000000},
+ {0x44FC, 0x00000000},
+ {0x4500, 0x00000000},
+ {0x4504, 0x00000000},
+ {0x4508, 0x00000000},
+ {0x450C, 0x00000000},
+ {0x4510, 0x00000000},
+ {0x4514, 0x00000000},
+ {0x4518, 0x00000000},
+ {0x451C, 0x00000000},
+ {0x4520, 0x00000000},
+ {0x4524, 0x00000000},
+ {0x4528, 0x00000000},
+ {0x452C, 0x00000000},
+ {0x4530, 0x4E1F3E81},
+ {0x4534, 0x00000000},
+ {0x4538, 0x0000005A},
+ {0x453C, 0x00000000},
+ {0x4540, 0x00000000},
+ {0x4544, 0x00000000},
+ {0x4548, 0x00000000},
+ {0x454C, 0x00000000},
+ {0x4550, 0x00000000},
+ {0x4554, 0x00000000},
+ {0x4558, 0x00000000},
+ {0x455C, 0x00000000},
+ {0x4560, 0x4060001A},
+ {0x4564, 0x40000000},
+ {0x4568, 0x00000000},
+ {0x456C, 0x00000000},
+ {0x4570, 0x04000007},
+ {0x4574, 0x0000DC87},
+ {0x4578, 0x00000BAB},
+ {0x457C, 0x03E00000},
+ {0x4580, 0x00000048},
+ {0x4584, 0x00000000},
+ {0x4588, 0x000003E8},
+ {0x458C, 0x30000000},
+ {0x4590, 0x00000000},
+ {0x4594, 0x10000000},
+ {0x4598, 0x00000001},
+ {0x459C, 0x00030000},
+ {0x45A0, 0x01000000},
+ {0x45A4, 0x03000200},
+ {0x45A8, 0xC00001C0},
+ {0x45AC, 0x78018000},
+ {0x45B0, 0x80000000},
+ {0x45B4, 0x01C80600},
+ {0x45B8, 0x00000002},
+ {0x4594, 0x10000000}
+};
+
+static const struct rtw89_reg3_def rtw8852a_btc_preagc_en_defs[] = {
+ {0x4624, GENMASK(20, 14), 0x40},
+ {0x46f8, GENMASK(20, 14), 0x40},
+ {0x4674, GENMASK(20, 19), 0x2},
+ {0x4748, GENMASK(20, 19), 0x2},
+ {0x4650, GENMASK(14, 10), 0x18},
+ {0x4724, GENMASK(14, 10), 0x18},
+ {0x4688, GENMASK(1, 0), 0x3},
+ {0x475c, GENMASK(1, 0), 0x3},
+};
+
+static DECLARE_PHY_REG3_TBL(rtw8852a_btc_preagc_en_defs);
+
+static const struct rtw89_reg3_def rtw8852a_btc_preagc_dis_defs[] = {
+ {0x4624, GENMASK(20, 14), 0x1a},
+ {0x46f8, GENMASK(20, 14), 0x1a},
+ {0x4674, GENMASK(20, 19), 0x1},
+ {0x4748, GENMASK(20, 19), 0x1},
+ {0x4650, GENMASK(14, 10), 0x12},
+ {0x4724, GENMASK(14, 10), 0x12},
+ {0x4688, GENMASK(1, 0), 0x0},
+ {0x475c, GENMASK(1, 0), 0x0},
+};
+
+static DECLARE_PHY_REG3_TBL(rtw8852a_btc_preagc_dis_defs);
+
+static const struct rtw89_pwr_cfg rtw8852a_pwron[] = {
+ {0x00C6,
+ PWR_CV_MSK_B,
+ PWR_INTF_MSK_PCIE,
+ PWR_BASE_MAC,
+ PWR_CMD_WRITE, BIT(6), BIT(6)},
+ {0x1086,
+ PWR_CV_MSK_ALL,
+ PWR_INTF_MSK_SDIO,
+ PWR_BASE_MAC,
+ PWR_CMD_WRITE, BIT(0), 0},
+ {0x1086,
+ PWR_CV_MSK_ALL,
+ PWR_INTF_MSK_SDIO,
+ PWR_BASE_MAC,
+ PWR_CMD_POLL, BIT(1), BIT(1)},
+ {0x0005,
+ PWR_CV_MSK_ALL,
+ PWR_INTF_MSK_ALL,
+ PWR_BASE_MAC,
+ PWR_CMD_WRITE, BIT(4) | BIT(3), 0},
+ {0x0005,
+ PWR_CV_MSK_ALL,
+ PWR_INTF_MSK_ALL,
+ PWR_BASE_MAC,
+ PWR_CMD_WRITE, BIT(7), 0},
+ {0x0005,
+ PWR_CV_MSK_ALL,
+ PWR_INTF_MSK_ALL,
+ PWR_BASE_MAC,
+ PWR_CMD_WRITE, BIT(2), 0},
+ {0x0006,
+ PWR_CV_MSK_ALL,
+ PWR_INTF_MSK_ALL,
+ PWR_BASE_MAC,
+ PWR_CMD_POLL, BIT(1), BIT(1)},
+ {0x0006,
+ PWR_CV_MSK_ALL,
+ PWR_INTF_MSK_ALL,
+ PWR_BASE_MAC,
+ PWR_CMD_WRITE, BIT(0), BIT(0)},
+ {0x0005,
+ PWR_CV_MSK_ALL,
+ PWR_INTF_MSK_ALL,
+ PWR_BASE_MAC,
+ PWR_CMD_WRITE, BIT(0), BIT(0)},
+ {0x0005,
+ PWR_CV_MSK_ALL,
+ PWR_INTF_MSK_ALL,
+ PWR_BASE_MAC,
+ PWR_CMD_POLL, BIT(0), 0},
+ {0x106D,
+ PWR_CV_MSK_B | PWR_CV_MSK_C,
+ PWR_INTF_MSK_USB,
+ PWR_BASE_MAC,
+ PWR_CMD_WRITE, BIT(6), 0},
+ {0x0088,
+ PWR_CV_MSK_ALL,
+ PWR_INTF_MSK_ALL,
+ PWR_BASE_MAC,
+ PWR_CMD_WRITE, BIT(0), BIT(0)},
+ {0x0088,
+ PWR_CV_MSK_ALL,
+ PWR_INTF_MSK_ALL,
+ PWR_BASE_MAC,
+ PWR_CMD_WRITE, BIT(0), 0},
+ {0x0088,
+ PWR_CV_MSK_ALL,
+ PWR_INTF_MSK_ALL,
+ PWR_BASE_MAC,
+ PWR_CMD_WRITE, BIT(0), BIT(0)},
+ {0x0088,
+ PWR_CV_MSK_ALL,
+ PWR_INTF_MSK_ALL,
+ PWR_BASE_MAC,
+ PWR_CMD_WRITE, BIT(0), 0},
+ {0x0088,
+ PWR_CV_MSK_ALL,
+ PWR_INTF_MSK_ALL,
+ PWR_BASE_MAC,
+ PWR_CMD_WRITE, BIT(0), BIT(0)},
+ {0x0083,
+ PWR_CV_MSK_ALL,
+ PWR_INTF_MSK_ALL,
+ PWR_BASE_MAC,
+ PWR_CMD_WRITE, BIT(6), 0},
+ {0x0080,
+ PWR_CV_MSK_ALL,
+ PWR_INTF_MSK_ALL,
+ PWR_BASE_MAC,
+ PWR_CMD_WRITE, BIT(5), BIT(5)},
+ {0x0024,
+ PWR_CV_MSK_ALL,
+ PWR_INTF_MSK_ALL,
+ PWR_BASE_MAC,
+ PWR_CMD_WRITE, BIT(4) | BIT(3) | BIT(2) | BIT(1) | BIT(0), 0},
+ {0x02A0,
+ PWR_CV_MSK_ALL,
+ PWR_INTF_MSK_ALL,
+ PWR_BASE_MAC,
+ PWR_CMD_WRITE, BIT(1), BIT(1)},
+ {0x02A2,
+ PWR_CV_MSK_ALL,
+ PWR_INTF_MSK_ALL,
+ PWR_BASE_MAC,
+ PWR_CMD_WRITE, BIT(7) | BIT(6) | BIT(5), 0},
+ {0x0071,
+ PWR_CV_MSK_ALL,
+ PWR_INTF_MSK_PCIE,
+ PWR_BASE_MAC,
+ PWR_CMD_WRITE, BIT(4), 0},
+ {0x0010,
+ PWR_CV_MSK_A,
+ PWR_INTF_MSK_PCIE,
+ PWR_BASE_MAC,
+ PWR_CMD_WRITE, BIT(2), BIT(2)},
+ {0x02A0,
+ PWR_CV_MSK_A,
+ PWR_INTF_MSK_ALL,
+ PWR_BASE_MAC,
+ PWR_CMD_WRITE, BIT(7) | BIT(6), 0},
+ {0xFFFF,
+ PWR_CV_MSK_ALL,
+ PWR_INTF_MSK_ALL,
+ 0,
+ PWR_CMD_END, 0, 0},
+};
+
+static const struct rtw89_pwr_cfg rtw8852a_pwroff[] = {
+ {0x02F0,
+ PWR_CV_MSK_ALL,
+ PWR_INTF_MSK_ALL,
+ PWR_BASE_MAC,
+ PWR_CMD_WRITE, 0xFF, 0},
+ {0x02F1,
+ PWR_CV_MSK_ALL,
+ PWR_INTF_MSK_ALL,
+ PWR_BASE_MAC,
+ PWR_CMD_WRITE, 0xFF, 0},
+ {0x0006,
+ PWR_CV_MSK_ALL,
+ PWR_INTF_MSK_ALL,
+ PWR_BASE_MAC,
+ PWR_CMD_WRITE, BIT(0), BIT(0)},
+ {0x0002,
+ PWR_CV_MSK_ALL,
+ PWR_INTF_MSK_ALL,
+ PWR_BASE_MAC,
+ PWR_CMD_WRITE, BIT(1) | BIT(0), 0},
+ {0x0082,
+ PWR_CV_MSK_ALL,
+ PWR_INTF_MSK_ALL,
+ PWR_BASE_MAC,
+ PWR_CMD_WRITE, BIT(1) | BIT(0), 0},
+ {0x106D,
+ PWR_CV_MSK_B | PWR_CV_MSK_C,
+ PWR_INTF_MSK_USB,
+ PWR_BASE_MAC,
+ PWR_CMD_WRITE, BIT(6), BIT(6)},
+ {0x0005,
+ PWR_CV_MSK_ALL,
+ PWR_INTF_MSK_ALL,
+ PWR_BASE_MAC,
+ PWR_CMD_WRITE, BIT(1), BIT(1)},
+ {0x0005,
+ PWR_CV_MSK_ALL,
+ PWR_INTF_MSK_ALL,
+ PWR_BASE_MAC,
+ PWR_CMD_POLL, BIT(1), 0},
+ {0x0091,
+ PWR_CV_MSK_ALL,
+ PWR_INTF_MSK_PCIE,
+ PWR_BASE_MAC,
+ PWR_CMD_WRITE, BIT(0), 0},
+ {0x0005,
+ PWR_CV_MSK_ALL,
+ PWR_INTF_MSK_PCIE,
+ PWR_BASE_MAC,
+ PWR_CMD_WRITE, BIT(2), BIT(2)},
+ {0x0007,
+ PWR_CV_MSK_ALL,
+ PWR_INTF_MSK_USB,
+ PWR_BASE_MAC,
+ PWR_CMD_WRITE, BIT(4), 0},
+ {0x0007,
+ PWR_CV_MSK_ALL,
+ PWR_INTF_MSK_SDIO,
+ PWR_BASE_MAC,
+ PWR_CMD_WRITE, BIT(6) | BIT(4), 0},
+ {0x0005,
+ PWR_CV_MSK_ALL,
+ PWR_INTF_MSK_SDIO,
+ PWR_BASE_MAC,
+ PWR_CMD_WRITE, BIT(4) | BIT(3), BIT(3)},
+ {0x0005,
+ PWR_CV_MSK_C | PWR_CV_MSK_D | PWR_CV_MSK_E | PWR_CV_MSK_F |
+ PWR_CV_MSK_G,
+ PWR_INTF_MSK_USB,
+ PWR_BASE_MAC,
+ PWR_CMD_WRITE, BIT(4) | BIT(3), BIT(3)},
+ {0x1086,
+ PWR_CV_MSK_ALL,
+ PWR_INTF_MSK_SDIO,
+ PWR_BASE_MAC,
+ PWR_CMD_WRITE, BIT(0), BIT(0)},
+ {0x1086,
+ PWR_CV_MSK_ALL,
+ PWR_INTF_MSK_SDIO,
+ PWR_BASE_MAC,
+ PWR_CMD_POLL, BIT(1), 0},
+ {0xFFFF,
+ PWR_CV_MSK_ALL,
+ PWR_INTF_MSK_ALL,
+ 0,
+ PWR_CMD_END, 0, 0},
+};
+
+static const struct rtw89_pwr_cfg * const pwr_on_seq_8852a[] = {
+ rtw8852a_pwron, NULL
+};
+
+static const struct rtw89_pwr_cfg * const pwr_off_seq_8852a[] = {
+ rtw8852a_pwroff, NULL
+};
+
+static void rtw8852ae_efuse_parsing(struct rtw89_efuse *efuse,
+ struct rtw8852a_efuse *map)
+{
+ ether_addr_copy(efuse->addr, map->e.mac_addr);
+ efuse->rfe_type = map->rfe_type;
+ efuse->xtal_cap = map->xtal_k;
+}
+
+static void rtw8852a_efuse_parsing_tssi(struct rtw89_dev *rtwdev,
+ struct rtw8852a_efuse *map)
+{
+ struct rtw89_tssi_info *tssi = &rtwdev->tssi;
+ struct rtw8852a_tssi_offset *ofst[] = {&map->path_a_tssi, &map->path_b_tssi};
+ u8 i, j;
+
+ tssi->thermal[RF_PATH_A] = map->path_a_therm;
+ tssi->thermal[RF_PATH_B] = map->path_b_therm;
+
+ for (i = 0; i < RF_PATH_NUM_8852A; i++) {
+ memcpy(tssi->tssi_cck[i], ofst[i]->cck_tssi,
+ sizeof(ofst[i]->cck_tssi));
+
+ for (j = 0; j < TSSI_CCK_CH_GROUP_NUM; j++)
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][EFUSE] path=%d cck[%d]=0x%x\n",
+ i, j, tssi->tssi_cck[i][j]);
+
+ memcpy(tssi->tssi_mcs[i], ofst[i]->bw40_tssi,
+ sizeof(ofst[i]->bw40_tssi));
+ memcpy(tssi->tssi_mcs[i] + TSSI_MCS_2G_CH_GROUP_NUM,
+ ofst[i]->bw40_1s_tssi_5g, sizeof(ofst[i]->bw40_1s_tssi_5g));
+
+ for (j = 0; j < TSSI_MCS_CH_GROUP_NUM; j++)
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][EFUSE] path=%d mcs[%d]=0x%x\n",
+ i, j, tssi->tssi_mcs[i][j]);
+ }
+}
+
+static int rtw8852a_read_efuse(struct rtw89_dev *rtwdev, u8 *log_map)
+{
+ struct rtw89_efuse *efuse = &rtwdev->efuse;
+ struct rtw8852a_efuse *map;
+
+ map = (struct rtw8852a_efuse *)log_map;
+
+ efuse->country_code[0] = map->country_code[0];
+ efuse->country_code[1] = map->country_code[1];
+ rtw8852a_efuse_parsing_tssi(rtwdev, map);
+
+ switch (rtwdev->hci.type) {
+ case RTW89_HCI_TYPE_PCIE:
+ rtw8852ae_efuse_parsing(efuse, map);
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ rtw89_info(rtwdev, "chip rfe_type is %d\n", efuse->rfe_type);
+
+ return 0;
+}
+
+static void rtw8852a_phycap_parsing_tssi(struct rtw89_dev *rtwdev, u8 *phycap_map)
+{
+ struct rtw89_tssi_info *tssi = &rtwdev->tssi;
+ static const u32 tssi_trim_addr[RF_PATH_NUM_8852A] = {0x5D6, 0x5AB};
+ u32 addr = rtwdev->chip->phycap_addr;
+ bool pg = false;
+ u32 ofst;
+ u8 i, j;
+
+ for (i = 0; i < RF_PATH_NUM_8852A; i++) {
+ for (j = 0; j < TSSI_TRIM_CH_GROUP_NUM; j++) {
+ /* addrs are in decreasing order */
+ ofst = tssi_trim_addr[i] - addr - j;
+ tssi->tssi_trim[i][j] = phycap_map[ofst];
+
+ if (phycap_map[ofst] != 0xff)
+ pg = true;
+ }
+ }
+
+ if (!pg) {
+ memset(tssi->tssi_trim, 0, sizeof(tssi->tssi_trim));
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM] no PG, set all trim info to 0\n");
+ }
+
+ for (i = 0; i < RF_PATH_NUM_8852A; i++)
+ for (j = 0; j < TSSI_TRIM_CH_GROUP_NUM; j++)
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI] path=%d idx=%d trim=0x%x addr=0x%x\n",
+ i, j, tssi->tssi_trim[i][j],
+ tssi_trim_addr[i] - j);
+}
+
+static void rtw8852a_phycap_parsing_thermal_trim(struct rtw89_dev *rtwdev,
+ u8 *phycap_map)
+{
+ struct rtw89_power_trim_info *info = &rtwdev->pwr_trim;
+ static const u32 thm_trim_addr[RF_PATH_NUM_8852A] = {0x5DF, 0x5DC};
+ u32 addr = rtwdev->chip->phycap_addr;
+ u8 i;
+
+ for (i = 0; i < RF_PATH_NUM_8852A; i++) {
+ info->thermal_trim[i] = phycap_map[thm_trim_addr[i] - addr];
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[THERMAL][TRIM] path=%d thermal_trim=0x%x\n",
+ i, info->thermal_trim[i]);
+
+ if (info->thermal_trim[i] != 0xff)
+ info->pg_thermal_trim = true;
+ }
+}
+
+static void rtw8852a_thermal_trim(struct rtw89_dev *rtwdev)
+{
+#define __thm_setting(raw) \
+({ \
+ u8 __v = (raw); \
+ ((__v & 0x1) << 3) | ((__v & 0x1f) >> 1); \
+})
+ struct rtw89_power_trim_info *info = &rtwdev->pwr_trim;
+ u8 i, val;
+
+ if (!info->pg_thermal_trim) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[THERMAL][TRIM] no PG, do nothing\n");
+
+ return;
+ }
+
+ for (i = 0; i < RF_PATH_NUM_8852A; i++) {
+ val = __thm_setting(info->thermal_trim[i]);
+ rtw89_write_rf(rtwdev, i, RR_TM2, RR_TM2_OFF, val);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[THERMAL][TRIM] path=%d thermal_setting=0x%x\n",
+ i, val);
+ }
+#undef __thm_setting
+}
+
+static void rtw8852a_phycap_parsing_pa_bias_trim(struct rtw89_dev *rtwdev,
+ u8 *phycap_map)
+{
+ struct rtw89_power_trim_info *info = &rtwdev->pwr_trim;
+ static const u32 pabias_trim_addr[RF_PATH_NUM_8852A] = {0x5DE, 0x5DB};
+ u32 addr = rtwdev->chip->phycap_addr;
+ u8 i;
+
+ for (i = 0; i < RF_PATH_NUM_8852A; i++) {
+ info->pa_bias_trim[i] = phycap_map[pabias_trim_addr[i] - addr];
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[PA_BIAS][TRIM] path=%d pa_bias_trim=0x%x\n",
+ i, info->pa_bias_trim[i]);
+
+ if (info->pa_bias_trim[i] != 0xff)
+ info->pg_pa_bias_trim = true;
+ }
+}
+
+static void rtw8852a_pa_bias_trim(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_power_trim_info *info = &rtwdev->pwr_trim;
+ u8 pabias_2g, pabias_5g;
+ u8 i;
+
+ if (!info->pg_pa_bias_trim) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[PA_BIAS][TRIM] no PG, do nothing\n");
+
+ return;
+ }
+
+ for (i = 0; i < RF_PATH_NUM_8852A; i++) {
+ pabias_2g = FIELD_GET(GENMASK(3, 0), info->pa_bias_trim[i]);
+ pabias_5g = FIELD_GET(GENMASK(7, 4), info->pa_bias_trim[i]);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[PA_BIAS][TRIM] path=%d 2G=0x%x 5G=0x%x\n",
+ i, pabias_2g, pabias_5g);
+
+ rtw89_write_rf(rtwdev, i, RR_BIASA, RR_BIASA_TXG, pabias_2g);
+ rtw89_write_rf(rtwdev, i, RR_BIASA, RR_BIASA_TXA, pabias_5g);
+ }
+}
+
+static int rtw8852a_read_phycap(struct rtw89_dev *rtwdev, u8 *phycap_map)
+{
+ rtw8852a_phycap_parsing_tssi(rtwdev, phycap_map);
+ rtw8852a_phycap_parsing_thermal_trim(rtwdev, phycap_map);
+ rtw8852a_phycap_parsing_pa_bias_trim(rtwdev, phycap_map);
+
+ return 0;
+}
+
+static void rtw8852a_power_trim(struct rtw89_dev *rtwdev)
+{
+ rtw8852a_thermal_trim(rtwdev);
+ rtw8852a_pa_bias_trim(rtwdev);
+}
+
+static void rtw8852a_set_channel_mac(struct rtw89_dev *rtwdev,
+ struct rtw89_channel_params *param,
+ u8 mac_idx)
+{
+ u32 rf_mod = rtw89_mac_reg_by_idx(R_AX_WMAC_RFMOD, mac_idx);
+ u32 sub_carr = rtw89_mac_reg_by_idx(R_AX_TX_SUB_CARRIER_VALUE,
+ mac_idx);
+ u32 chk_rate = rtw89_mac_reg_by_idx(R_AX_TXRATE_CHK, mac_idx);
+ u8 txsc20 = 0, txsc40 = 0;
+
+ switch (param->bandwidth) {
+ case RTW89_CHANNEL_WIDTH_80:
+ txsc40 = rtw89_phy_get_txsc(rtwdev, param,
+ RTW89_CHANNEL_WIDTH_40);
+ fallthrough;
+ case RTW89_CHANNEL_WIDTH_40:
+ txsc20 = rtw89_phy_get_txsc(rtwdev, param,
+ RTW89_CHANNEL_WIDTH_20);
+ break;
+ default:
+ break;
+ }
+
+ switch (param->bandwidth) {
+ case RTW89_CHANNEL_WIDTH_80:
+ rtw89_write8_mask(rtwdev, rf_mod, B_AX_WMAC_RFMOD_MASK, BIT(1));
+ rtw89_write32(rtwdev, sub_carr, txsc20 | (txsc40 << 4));
+ break;
+ case RTW89_CHANNEL_WIDTH_40:
+ rtw89_write8_mask(rtwdev, rf_mod, B_AX_WMAC_RFMOD_MASK, BIT(0));
+ rtw89_write32(rtwdev, sub_carr, txsc20);
+ break;
+ case RTW89_CHANNEL_WIDTH_20:
+ rtw89_write8_clr(rtwdev, rf_mod, B_AX_WMAC_RFMOD_MASK);
+ rtw89_write32(rtwdev, sub_carr, 0);
+ break;
+ default:
+ break;
+ }
+
+ if (param->center_chan > 14)
+ rtw89_write8_set(rtwdev, chk_rate,
+ B_AX_CHECK_CCK_EN | B_AX_RTS_LIMIT_IN_OFDM6);
+ else
+ rtw89_write8_clr(rtwdev, chk_rate,
+ B_AX_CHECK_CCK_EN | B_AX_RTS_LIMIT_IN_OFDM6);
+}
+
+static const u32 rtw8852a_sco_barker_threshold[14] = {
+ 0x1cfea, 0x1d0e1, 0x1d1d7, 0x1d2cd, 0x1d3c3, 0x1d4b9, 0x1d5b0, 0x1d6a6,
+ 0x1d79c, 0x1d892, 0x1d988, 0x1da7f, 0x1db75, 0x1ddc4
+};
+
+static const u32 rtw8852a_sco_cck_threshold[14] = {
+ 0x27de3, 0x27f35, 0x28088, 0x281da, 0x2832d, 0x2847f, 0x285d2, 0x28724,
+ 0x28877, 0x289c9, 0x28b1c, 0x28c6e, 0x28dc1, 0x290ed
+};
+
+static int rtw8852a_ctrl_sco_cck(struct rtw89_dev *rtwdev, u8 central_ch,
+ u8 primary_ch, enum rtw89_bandwidth bw)
+{
+ u8 ch_element;
+
+ if (bw == RTW89_CHANNEL_WIDTH_20) {
+ ch_element = central_ch - 1;
+ } else if (bw == RTW89_CHANNEL_WIDTH_40) {
+ if (primary_ch == 1)
+ ch_element = central_ch - 1 + 2;
+ else
+ ch_element = central_ch - 1 - 2;
+ } else {
+ rtw89_warn(rtwdev, "Invalid BW:%d for CCK\n", bw);
+ return -EINVAL;
+ }
+ rtw89_phy_write32_mask(rtwdev, R_RXSCOBC, B_RXSCOBC_TH,
+ rtw8852a_sco_barker_threshold[ch_element]);
+ rtw89_phy_write32_mask(rtwdev, R_RXSCOCCK, B_RXSCOCCK_TH,
+ rtw8852a_sco_cck_threshold[ch_element]);
+
+ return 0;
+}
+
+static void rtw8852a_ch_setting(struct rtw89_dev *rtwdev, u8 central_ch,
+ u8 path)
+{
+ u32 val;
+
+ val = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK);
+ if (val == INV_RF_DATA) {
+ rtw89_warn(rtwdev, "Invalid RF_0x18 for Path-%d\n", path);
+ return;
+ }
+ val &= ~0x303ff;
+ val |= central_ch;
+ if (central_ch > 14)
+ val |= (BIT(16) | BIT(8));
+ rtw89_write_rf(rtwdev, path, RR_CFGCH, RFREG_MASK, val);
+}
+
+static u8 rtw8852a_sco_mapping(u8 central_ch)
+{
+ if (central_ch == 1)
+ return 109;
+ else if (central_ch >= 2 && central_ch <= 6)
+ return 108;
+ else if (central_ch >= 7 && central_ch <= 10)
+ return 107;
+ else if (central_ch >= 11 && central_ch <= 14)
+ return 106;
+ else if (central_ch == 36 || central_ch == 38)
+ return 51;
+ else if (central_ch >= 40 && central_ch <= 58)
+ return 50;
+ else if (central_ch >= 60 && central_ch <= 64)
+ return 49;
+ else if (central_ch == 100 || central_ch == 102)
+ return 48;
+ else if (central_ch >= 104 && central_ch <= 126)
+ return 47;
+ else if (central_ch >= 128 && central_ch <= 151)
+ return 46;
+ else if (central_ch >= 153 && central_ch <= 177)
+ return 45;
+ else
+ return 0;
+}
+
+static void rtw8852a_ctrl_ch(struct rtw89_dev *rtwdev, u8 central_ch,
+ enum rtw89_phy_idx phy_idx)
+{
+ u8 sco_comp;
+ bool is_2g = central_ch <= 14;
+
+ if (phy_idx == RTW89_PHY_0) {
+ /* Path A */
+ rtw8852a_ch_setting(rtwdev, central_ch, RF_PATH_A);
+ if (is_2g)
+ rtw89_phy_write32_idx(rtwdev, R_PATH0_TIA_ERR_G1,
+ B_PATH0_TIA_ERR_G1_SEL, 1,
+ phy_idx);
+ else
+ rtw89_phy_write32_idx(rtwdev, R_PATH0_TIA_ERR_G1,
+ B_PATH0_TIA_ERR_G1_SEL, 0,
+ phy_idx);
+
+ /* Path B */
+ if (!rtwdev->dbcc_en) {
+ rtw8852a_ch_setting(rtwdev, central_ch, RF_PATH_B);
+ if (is_2g)
+ rtw89_phy_write32_idx(rtwdev, R_P1_MODE,
+ B_P1_MODE_SEL,
+ 1, phy_idx);
+ else
+ rtw89_phy_write32_idx(rtwdev, R_P1_MODE,
+ B_P1_MODE_SEL,
+ 0, phy_idx);
+ } else {
+ if (is_2g)
+ rtw89_phy_write32_clr(rtwdev, R_2P4G_BAND,
+ B_2P4G_BAND_SEL);
+ else
+ rtw89_phy_write32_set(rtwdev, R_2P4G_BAND,
+ B_2P4G_BAND_SEL);
+ }
+ /* SCO compensate FC setting */
+ sco_comp = rtw8852a_sco_mapping(central_ch);
+ rtw89_phy_write32_idx(rtwdev, R_FC0_BW, B_FC0_BW_INV,
+ sco_comp, phy_idx);
+ } else {
+ /* Path B */
+ rtw8852a_ch_setting(rtwdev, central_ch, RF_PATH_B);
+ if (is_2g)
+ rtw89_phy_write32_idx(rtwdev, R_P1_MODE,
+ B_P1_MODE_SEL,
+ 1, phy_idx);
+ else
+ rtw89_phy_write32_idx(rtwdev, R_P1_MODE,
+ B_P1_MODE_SEL,
+ 0, phy_idx);
+ /* SCO compensate FC setting */
+ sco_comp = rtw8852a_sco_mapping(central_ch);
+ rtw89_phy_write32_idx(rtwdev, R_FC0_BW, B_FC0_BW_INV,
+ sco_comp, phy_idx);
+ }
+
+ /* Band edge */
+ if (is_2g)
+ rtw89_phy_write32_idx(rtwdev, R_BANDEDGE, B_BANDEDGE_EN, 1,
+ phy_idx);
+ else
+ rtw89_phy_write32_idx(rtwdev, R_BANDEDGE, B_BANDEDGE_EN, 0,
+ phy_idx);
+
+ /* CCK parameters */
+ if (central_ch == 14) {
+ rtw89_phy_write32_mask(rtwdev, R_TXFIR0, B_TXFIR_C01,
+ 0x3b13ff);
+ rtw89_phy_write32_mask(rtwdev, R_TXFIR2, B_TXFIR_C23,
+ 0x1c42de);
+ rtw89_phy_write32_mask(rtwdev, R_TXFIR4, B_TXFIR_C45,
+ 0xfdb0ad);
+ rtw89_phy_write32_mask(rtwdev, R_TXFIR6, B_TXFIR_C67,
+ 0xf60f6e);
+ rtw89_phy_write32_mask(rtwdev, R_TXFIR8, B_TXFIR_C89,
+ 0xfd8f92);
+ rtw89_phy_write32_mask(rtwdev, R_TXFIRA, B_TXFIR_CAB, 0x2d011);
+ rtw89_phy_write32_mask(rtwdev, R_TXFIRC, B_TXFIR_CCD, 0x1c02c);
+ rtw89_phy_write32_mask(rtwdev, R_TXFIRE, B_TXFIR_CEF,
+ 0xfff00a);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_TXFIR0, B_TXFIR_C01,
+ 0x3d23ff);
+ rtw89_phy_write32_mask(rtwdev, R_TXFIR2, B_TXFIR_C23,
+ 0x29b354);
+ rtw89_phy_write32_mask(rtwdev, R_TXFIR4, B_TXFIR_C45, 0xfc1c8);
+ rtw89_phy_write32_mask(rtwdev, R_TXFIR6, B_TXFIR_C67,
+ 0xfdb053);
+ rtw89_phy_write32_mask(rtwdev, R_TXFIR8, B_TXFIR_C89,
+ 0xf86f9a);
+ rtw89_phy_write32_mask(rtwdev, R_TXFIRA, B_TXFIR_CAB,
+ 0xfaef92);
+ rtw89_phy_write32_mask(rtwdev, R_TXFIRC, B_TXFIR_CCD,
+ 0xfe5fcc);
+ rtw89_phy_write32_mask(rtwdev, R_TXFIRE, B_TXFIR_CEF,
+ 0xffdff5);
+ }
+}
+
+static void rtw8852a_bw_setting(struct rtw89_dev *rtwdev, u8 bw, u8 path)
+{
+ u32 val = 0;
+ u32 adc_sel[2] = {0x12d0, 0x32d0};
+ u32 wbadc_sel[2] = {0x12ec, 0x32ec};
+
+ val = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK);
+ if (val == INV_RF_DATA) {
+ rtw89_warn(rtwdev, "Invalid RF_0x18 for Path-%d\n", path);
+ return;
+ }
+ val &= ~(BIT(11) | BIT(10));
+ switch (bw) {
+ case RTW89_CHANNEL_WIDTH_5:
+ rtw89_phy_write32_mask(rtwdev, adc_sel[path], 0x6000, 0x1);
+ rtw89_phy_write32_mask(rtwdev, wbadc_sel[path], 0x30, 0x0);
+ val |= (BIT(11) | BIT(10));
+ break;
+ case RTW89_CHANNEL_WIDTH_10:
+ rtw89_phy_write32_mask(rtwdev, adc_sel[path], 0x6000, 0x2);
+ rtw89_phy_write32_mask(rtwdev, wbadc_sel[path], 0x30, 0x1);
+ val |= (BIT(11) | BIT(10));
+ break;
+ case RTW89_CHANNEL_WIDTH_20:
+ rtw89_phy_write32_mask(rtwdev, adc_sel[path], 0x6000, 0x0);
+ rtw89_phy_write32_mask(rtwdev, wbadc_sel[path], 0x30, 0x2);
+ val |= (BIT(11) | BIT(10));
+ break;
+ case RTW89_CHANNEL_WIDTH_40:
+ rtw89_phy_write32_mask(rtwdev, adc_sel[path], 0x6000, 0x0);
+ rtw89_phy_write32_mask(rtwdev, wbadc_sel[path], 0x30, 0x2);
+ val |= BIT(11);
+ break;
+ case RTW89_CHANNEL_WIDTH_80:
+ rtw89_phy_write32_mask(rtwdev, adc_sel[path], 0x6000, 0x0);
+ rtw89_phy_write32_mask(rtwdev, wbadc_sel[path], 0x30, 0x2);
+ val |= BIT(10);
+ break;
+ default:
+ rtw89_warn(rtwdev, "Fail to set ADC\n");
+ }
+
+ rtw89_write_rf(rtwdev, path, RR_CFGCH, RFREG_MASK, val);
+}
+
+static void
+rtw8852a_ctrl_bw(struct rtw89_dev *rtwdev, u8 pri_ch, u8 bw,
+ enum rtw89_phy_idx phy_idx)
+{
+ /* Switch bandwidth */
+ switch (bw) {
+ case RTW89_CHANNEL_WIDTH_5:
+ rtw89_phy_write32_idx(rtwdev, R_FC0_BW, B_FC0_BW_SET, 0x0,
+ phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD, B_CHBW_MOD_SBW, 0x1,
+ phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD, B_CHBW_MOD_PRICH,
+ 0x0, phy_idx);
+ break;
+ case RTW89_CHANNEL_WIDTH_10:
+ rtw89_phy_write32_idx(rtwdev, R_FC0_BW, B_FC0_BW_SET, 0x0,
+ phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD, B_CHBW_MOD_SBW, 0x2,
+ phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD, B_CHBW_MOD_PRICH,
+ 0x0, phy_idx);
+ break;
+ case RTW89_CHANNEL_WIDTH_20:
+ rtw89_phy_write32_idx(rtwdev, R_FC0_BW, B_FC0_BW_SET, 0x0,
+ phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD, B_CHBW_MOD_SBW, 0x0,
+ phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD, B_CHBW_MOD_PRICH,
+ 0x0, phy_idx);
+ break;
+ case RTW89_CHANNEL_WIDTH_40:
+ rtw89_phy_write32_idx(rtwdev, R_FC0_BW, B_FC0_BW_SET, 0x1,
+ phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD, B_CHBW_MOD_SBW, 0x0,
+ phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD, B_CHBW_MOD_PRICH,
+ pri_ch,
+ phy_idx);
+ if (pri_ch == RTW89_SC_20_UPPER)
+ rtw89_phy_write32_mask(rtwdev, R_RXSC, B_RXSC_EN, 1);
+ else
+ rtw89_phy_write32_mask(rtwdev, R_RXSC, B_RXSC_EN, 0);
+ break;
+ case RTW89_CHANNEL_WIDTH_80:
+ rtw89_phy_write32_idx(rtwdev, R_FC0_BW, B_FC0_BW_SET, 0x2,
+ phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD, B_CHBW_MOD_SBW, 0x0,
+ phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD, B_CHBW_MOD_PRICH,
+ pri_ch,
+ phy_idx);
+ break;
+ default:
+ rtw89_warn(rtwdev, "Fail to switch bw (bw:%d, pri ch:%d)\n", bw,
+ pri_ch);
+ }
+
+ if (phy_idx == RTW89_PHY_0) {
+ rtw8852a_bw_setting(rtwdev, bw, RF_PATH_A);
+ if (!rtwdev->dbcc_en)
+ rtw8852a_bw_setting(rtwdev, bw, RF_PATH_B);
+ } else {
+ rtw8852a_bw_setting(rtwdev, bw, RF_PATH_B);
+ }
+}
+
+static void rtw8852a_spur_elimination(struct rtw89_dev *rtwdev, u8 central_ch)
+{
+ if (central_ch == 153) {
+ rtw89_phy_write32_mask(rtwdev, R_P0_NBIIDX, B_P0_NBIIDX_VAL,
+ 0x210);
+ rtw89_phy_write32_mask(rtwdev, R_P1_NBIIDX, B_P1_NBIIDX_VAL,
+ 0x210);
+ rtw89_phy_write32_mask(rtwdev, R_SEG0CSI, 0xfff, 0x7c0);
+ rtw89_phy_write32_mask(rtwdev, R_P0_NBIIDX,
+ B_P0_NBIIDX_NOTCH_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P1_NBIIDX,
+ B_P1_NBIIDX_NOTCH_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_SEG0CSI_EN, B_SEG0CSI_EN,
+ 0x1);
+ } else if (central_ch == 151) {
+ rtw89_phy_write32_mask(rtwdev, R_P0_NBIIDX, B_P0_NBIIDX_VAL,
+ 0x210);
+ rtw89_phy_write32_mask(rtwdev, R_P1_NBIIDX, B_P1_NBIIDX_VAL,
+ 0x210);
+ rtw89_phy_write32_mask(rtwdev, R_SEG0CSI, 0xfff, 0x40);
+ rtw89_phy_write32_mask(rtwdev, R_P0_NBIIDX,
+ B_P0_NBIIDX_NOTCH_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P1_NBIIDX,
+ B_P1_NBIIDX_NOTCH_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_SEG0CSI_EN, B_SEG0CSI_EN,
+ 0x1);
+ } else if (central_ch == 155) {
+ rtw89_phy_write32_mask(rtwdev, R_P0_NBIIDX, B_P0_NBIIDX_VAL,
+ 0x2d0);
+ rtw89_phy_write32_mask(rtwdev, R_P1_NBIIDX, B_P1_NBIIDX_VAL,
+ 0x2d0);
+ rtw89_phy_write32_mask(rtwdev, R_SEG0CSI, 0xfff, 0x740);
+ rtw89_phy_write32_mask(rtwdev, R_P0_NBIIDX,
+ B_P0_NBIIDX_NOTCH_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P1_NBIIDX,
+ B_P1_NBIIDX_NOTCH_EN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_SEG0CSI_EN, B_SEG0CSI_EN,
+ 0x1);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_P0_NBIIDX,
+ B_P0_NBIIDX_NOTCH_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P1_NBIIDX,
+ B_P1_NBIIDX_NOTCH_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_SEG0CSI_EN, B_SEG0CSI_EN,
+ 0x0);
+ }
+}
+
+static void rtw8852a_bb_reset_all(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 1,
+ phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 0,
+ phy_idx);
+ rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 1,
+ phy_idx);
+}
+
+static void rtw8852a_bb_reset_en(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx, bool en)
+{
+ if (en)
+ rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL,
+ 1,
+ phy_idx);
+ else
+ rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL,
+ 0,
+ phy_idx);
+}
+
+static void rtw8852a_bb_reset(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw89_phy_write32_set(rtwdev, R_P0_TXPW_RSTB, B_P0_TXPW_RSTB_MANON);
+ rtw89_phy_write32_set(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_TRK_EN);
+ rtw89_phy_write32_set(rtwdev, R_P1_TXPW_RSTB, B_P1_TXPW_RSTB_MANON);
+ rtw89_phy_write32_set(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_TRK_EN);
+ rtw8852a_bb_reset_all(rtwdev, phy_idx);
+ rtw89_phy_write32_clr(rtwdev, R_P0_TXPW_RSTB, B_P0_TXPW_RSTB_MANON);
+ rtw89_phy_write32_clr(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_TRK_EN);
+ rtw89_phy_write32_clr(rtwdev, R_P1_TXPW_RSTB, B_P1_TXPW_RSTB_MANON);
+ rtw89_phy_write32_clr(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_TRK_EN);
+}
+
+static void rtw8852a_bb_macid_ctrl_init(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ u32 addr;
+
+ for (addr = R_AX_PWR_MACID_LMT_TABLE0;
+ addr <= R_AX_PWR_MACID_LMT_TABLE127; addr += 4)
+ rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, 0);
+}
+
+static void rtw8852a_bb_sethw(struct rtw89_dev *rtwdev)
+{
+ rtw89_phy_write32_clr(rtwdev, R_P0_EN_SOUND_WO_NDP, B_P0_EN_SOUND_WO_NDP);
+ rtw89_phy_write32_clr(rtwdev, R_P1_EN_SOUND_WO_NDP, B_P1_EN_SOUND_WO_NDP);
+
+ if (rtwdev->hal.cv <= CHIP_CCV) {
+ rtw89_phy_write32_set(rtwdev, R_RSTB_WATCH_DOG, B_P0_RSTB_WATCH_DOG);
+ rtw89_phy_write32(rtwdev, R_BRK_ASYNC_RST_EN_1, 0x864FA000);
+ rtw89_phy_write32(rtwdev, R_BRK_ASYNC_RST_EN_2, 0x3F);
+ rtw89_phy_write32(rtwdev, R_BRK_ASYNC_RST_EN_3, 0x7FFF);
+ rtw89_phy_write32_set(rtwdev, R_SPOOF_ASYNC_RST, B_SPOOF_ASYNC_RST);
+ rtw89_phy_write32_set(rtwdev, R_P0_TXPW_RSTB, B_P0_TXPW_RSTB_MANON);
+ rtw89_phy_write32_set(rtwdev, R_P1_TXPW_RSTB, B_P1_TXPW_RSTB_MANON);
+ }
+ rtw89_phy_write32_mask(rtwdev, R_CFO_TRK0, B_CFO_TRK_MSK, 0x1f);
+ rtw89_phy_write32_mask(rtwdev, R_CFO_TRK1, B_CFO_TRK_MSK, 0x0c);
+ rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_MOD, 0x0, RTW89_PHY_0);
+ rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_MOD, 0x0, RTW89_PHY_1);
+ rtw89_phy_write32_clr(rtwdev, R_NDP_BRK0, B_NDP_RU_BRK);
+ rtw89_phy_write32_set(rtwdev, R_NDP_BRK1, B_NDP_RU_BRK);
+
+ rtw8852a_bb_macid_ctrl_init(rtwdev, RTW89_PHY_0);
+}
+
+static void rtw8852a_bbrst_for_rfk(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ rtw89_phy_write32_set(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_TRK_EN);
+ rtw89_phy_write32_set(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_TRK_EN);
+ rtw8852a_bb_reset_all(rtwdev, phy_idx);
+ rtw89_phy_write32_clr(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_TRK_EN);
+ rtw89_phy_write32_clr(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_TRK_EN);
+ udelay(1);
+}
+
+static void rtw8852a_set_channel_bb(struct rtw89_dev *rtwdev,
+ struct rtw89_channel_params *param,
+ enum rtw89_phy_idx phy_idx)
+{
+ bool cck_en = param->center_chan > 14 ? false : true;
+ u8 pri_ch_idx = param->pri_ch_idx;
+
+ if (param->center_chan <= 14)
+ rtw8852a_ctrl_sco_cck(rtwdev, param->center_chan,
+ param->primary_chan, param->bandwidth);
+
+ rtw8852a_ctrl_ch(rtwdev, param->center_chan, phy_idx);
+ rtw8852a_ctrl_bw(rtwdev, pri_ch_idx, param->bandwidth, phy_idx);
+ if (cck_en) {
+ rtw89_phy_write32_mask(rtwdev, R_RXCCA, B_RXCCA_DIS, 0);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_RXCCA, B_RXCCA_DIS, 1);
+ rtw8852a_bbrst_for_rfk(rtwdev, phy_idx);
+ }
+ rtw8852a_spur_elimination(rtwdev, param->center_chan);
+ rtw8852a_bb_reset_all(rtwdev, phy_idx);
+}
+
+static void rtw8852a_set_channel(struct rtw89_dev *rtwdev,
+ struct rtw89_channel_params *params)
+{
+ rtw8852a_set_channel_mac(rtwdev, params, RTW89_MAC_0);
+ rtw8852a_set_channel_bb(rtwdev, params, RTW89_PHY_0);
+}
+
+static void rtw8852a_dfs_en(struct rtw89_dev *rtwdev, bool en)
+{
+ if (en)
+ rtw89_phy_write32_mask(rtwdev, R_UPD_P0, B_UPD_P0_EN, 1);
+ else
+ rtw89_phy_write32_mask(rtwdev, R_UPD_P0, B_UPD_P0_EN, 0);
+}
+
+static void rtw8852a_tssi_cont_en(struct rtw89_dev *rtwdev, bool en,
+ enum rtw89_rf_path path)
+{
+ static const u32 tssi_trk[2] = {0x5818, 0x7818};
+ static const u32 ctrl_bbrst[2] = {0x58dc, 0x78dc};
+
+ if (en) {
+ rtw89_phy_write32_mask(rtwdev, ctrl_bbrst[path], BIT(30), 0x0);
+ rtw89_phy_write32_mask(rtwdev, tssi_trk[path], BIT(30), 0x0);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, ctrl_bbrst[path], BIT(30), 0x1);
+ rtw89_phy_write32_mask(rtwdev, tssi_trk[path], BIT(30), 0x1);
+ }
+}
+
+static void rtw8852a_tssi_cont_en_phyidx(struct rtw89_dev *rtwdev, bool en,
+ u8 phy_idx)
+{
+ if (!rtwdev->dbcc_en) {
+ rtw8852a_tssi_cont_en(rtwdev, en, RF_PATH_A);
+ rtw8852a_tssi_cont_en(rtwdev, en, RF_PATH_B);
+ } else {
+ if (phy_idx == RTW89_PHY_0)
+ rtw8852a_tssi_cont_en(rtwdev, en, RF_PATH_A);
+ else
+ rtw8852a_tssi_cont_en(rtwdev, en, RF_PATH_B);
+ }
+}
+
+static void rtw8852a_adc_en(struct rtw89_dev *rtwdev, bool en)
+{
+ if (en)
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RST,
+ 0x0);
+ else
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RST,
+ 0xf);
+}
+
+static void rtw8852a_set_channel_help(struct rtw89_dev *rtwdev, bool enter,
+ struct rtw89_channel_help_params *p)
+{
+ u8 phy_idx = RTW89_PHY_0;
+
+ if (enter) {
+ rtw89_mac_stop_sch_tx(rtwdev, RTW89_MAC_0, &p->tx_en, RTW89_SCH_TX_SEL_ALL);
+ rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, false);
+ rtw8852a_dfs_en(rtwdev, false);
+ rtw8852a_tssi_cont_en_phyidx(rtwdev, false, RTW89_PHY_0);
+ rtw8852a_adc_en(rtwdev, false);
+ fsleep(40);
+ rtw8852a_bb_reset_en(rtwdev, phy_idx, false);
+ } else {
+ rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, true);
+ rtw8852a_adc_en(rtwdev, true);
+ rtw8852a_dfs_en(rtwdev, true);
+ rtw8852a_tssi_cont_en_phyidx(rtwdev, true, RTW89_PHY_0);
+ rtw8852a_bb_reset_en(rtwdev, phy_idx, true);
+ rtw89_mac_resume_sch_tx(rtwdev, RTW89_MAC_0, p->tx_en);
+ }
+}
+
+static void rtw8852a_fem_setup(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_efuse *efuse = &rtwdev->efuse;
+
+ switch (efuse->rfe_type) {
+ case 11:
+ case 12:
+ case 17:
+ case 18:
+ case 51:
+ case 53:
+ rtwdev->fem.epa_2g = true;
+ rtwdev->fem.elna_2g = true;
+ fallthrough;
+ case 9:
+ case 10:
+ case 15:
+ case 16:
+ rtwdev->fem.epa_5g = true;
+ rtwdev->fem.elna_5g = true;
+ break;
+ default:
+ break;
+ }
+}
+
+static void rtw8852a_rfk_init(struct rtw89_dev *rtwdev)
+{
+ rtwdev->is_tssi_mode[RF_PATH_A] = false;
+ rtwdev->is_tssi_mode[RF_PATH_B] = false;
+
+ rtw8852a_rck(rtwdev);
+ rtw8852a_dack(rtwdev);
+ rtw8852a_rx_dck(rtwdev, RTW89_PHY_0, true);
+}
+
+static void rtw8852a_rfk_channel(struct rtw89_dev *rtwdev)
+{
+ enum rtw89_phy_idx phy_idx = RTW89_PHY_0;
+
+ rtw8852a_rx_dck(rtwdev, phy_idx, true);
+ rtw8852a_iqk(rtwdev, phy_idx);
+ rtw8852a_tssi(rtwdev, phy_idx);
+ rtw8852a_dpk(rtwdev, phy_idx);
+}
+
+static void rtw8852a_rfk_band_changed(struct rtw89_dev *rtwdev)
+{
+ rtw8852a_tssi_scan(rtwdev, RTW89_PHY_0);
+}
+
+static void rtw8852a_rfk_scan(struct rtw89_dev *rtwdev, bool start)
+{
+ rtw8852a_wifi_scan_notify(rtwdev, start, RTW89_PHY_0);
+}
+
+static void rtw8852a_rfk_track(struct rtw89_dev *rtwdev)
+{
+ rtw8852a_dpk_track(rtwdev);
+ rtw8852a_iqk_track(rtwdev);
+ rtw8852a_tssi_track(rtwdev);
+}
+
+static u32 rtw8852a_bb_cal_txpwr_ref(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx, s16 ref)
+{
+ s8 ofst_int = 0;
+ u8 base_cw_0db = 0x27;
+ u16 tssi_16dbm_cw = 0x12c;
+ s16 pwr_s10_3 = 0;
+ s16 rf_pwr_cw = 0;
+ u16 bb_pwr_cw = 0;
+ u32 pwr_cw = 0;
+ u32 tssi_ofst_cw = 0;
+
+ pwr_s10_3 = (ref << 1) + (s16)(ofst_int) + (s16)(base_cw_0db << 3);
+ bb_pwr_cw = FIELD_GET(GENMASK(2, 0), pwr_s10_3);
+ rf_pwr_cw = FIELD_GET(GENMASK(8, 3), pwr_s10_3);
+ rf_pwr_cw = clamp_t(s16, rf_pwr_cw, 15, 63);
+ pwr_cw = (rf_pwr_cw << 3) | bb_pwr_cw;
+
+ tssi_ofst_cw = (u32)((s16)tssi_16dbm_cw + (ref << 1) - (16 << 3));
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
+ "[TXPWR] tssi_ofst_cw=%d rf_cw=0x%x bb_cw=0x%x\n",
+ tssi_ofst_cw, rf_pwr_cw, bb_pwr_cw);
+
+ return (tssi_ofst_cw << 18) | (pwr_cw << 9) | (ref & GENMASK(8, 0));
+}
+
+static
+void rtw8852a_set_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev,
+ s16 pw_ofst, enum rtw89_mac_idx mac_idx)
+{
+ s32 val_1t = 0;
+ s32 val_2t = 0;
+ u32 reg;
+
+ if (pw_ofst < -16 || pw_ofst > 15) {
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[ULTB] Err pwr_offset=%d\n",
+ pw_ofst);
+ return;
+ }
+ reg = rtw89_mac_reg_by_idx(R_AX_PWR_UL_TB_CTRL, mac_idx);
+ rtw89_write32_set(rtwdev, reg, B_AX_PWR_UL_TB_CTRL_EN);
+ val_1t = (s32)pw_ofst;
+ reg = rtw89_mac_reg_by_idx(R_AX_PWR_UL_TB_1T, mac_idx);
+ rtw89_write32_mask(rtwdev, reg, B_AX_PWR_UL_TB_1T_MASK, val_1t);
+ val_2t = max(val_1t - 3, -16);
+ reg = rtw89_mac_reg_by_idx(R_AX_PWR_UL_TB_2T, mac_idx);
+ rtw89_write32_mask(rtwdev, reg, B_AX_PWR_UL_TB_2T_MASK, val_2t);
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[ULTB] Set TB pwr_offset=(%d, %d)\n",
+ val_1t, val_2t);
+}
+
+static void rtw8852a_set_txpwr_ref(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ static const u32 addr[RF_PATH_NUM_8852A] = {0x5800, 0x7800};
+ const u32 mask = 0x7FFFFFF;
+ const u8 ofst_ofdm = 0x4;
+ const u8 ofst_cck = 0x8;
+ s16 ref_ofdm = 0;
+ s16 ref_cck = 0;
+ u32 val;
+ u8 i;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set txpwr reference\n");
+
+ rtw89_mac_txpwr_write32_mask(rtwdev, phy_idx, R_AX_PWR_RATE_CTRL,
+ GENMASK(27, 10), 0x0);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set bb ofdm txpwr ref\n");
+ val = rtw8852a_bb_cal_txpwr_ref(rtwdev, phy_idx, ref_ofdm);
+
+ for (i = 0; i < RF_PATH_NUM_8852A; i++)
+ rtw89_phy_write32_idx(rtwdev, addr[i] + ofst_ofdm, mask, val,
+ phy_idx);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set bb cck txpwr ref\n");
+ val = rtw8852a_bb_cal_txpwr_ref(rtwdev, phy_idx, ref_cck);
+
+ for (i = 0; i < RF_PATH_NUM_8852A; i++)
+ rtw89_phy_write32_idx(rtwdev, addr[i] + ofst_cck, mask, val,
+ phy_idx);
+}
+
+static void rtw8852a_set_txpwr_byrate(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ u8 ch = rtwdev->hal.current_channel;
+ static const u8 rs[] = {
+ RTW89_RS_CCK,
+ RTW89_RS_OFDM,
+ RTW89_RS_MCS,
+ RTW89_RS_HEDCM,
+ };
+ s8 tmp;
+ u8 i, j;
+ u32 val, shf, addr = R_AX_PWR_BY_RATE;
+ struct rtw89_rate_desc cur;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
+ "[TXPWR] set txpwr byrate with ch=%d\n", ch);
+
+ for (cur.nss = 0; cur.nss <= RTW89_NSS_2; cur.nss++) {
+ for (i = 0; i < ARRAY_SIZE(rs); i++) {
+ if (cur.nss >= rtw89_rs_nss_max[rs[i]])
+ continue;
+
+ val = 0;
+ cur.rs = rs[i];
+
+ for (j = 0; j < rtw89_rs_idx_max[rs[i]]; j++) {
+ cur.idx = j;
+ shf = (j % 4) * 8;
+ tmp = rtw89_phy_read_txpwr_byrate(rtwdev, &cur);
+ val |= (tmp << shf);
+
+ if ((j + 1) % 4)
+ continue;
+
+ rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, val);
+ val = 0;
+ addr += 4;
+ }
+ }
+ }
+}
+
+static void rtw8852a_set_txpwr_offset(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+ struct rtw89_rate_desc desc = {
+ .nss = RTW89_NSS_1,
+ .rs = RTW89_RS_OFFSET,
+ };
+ u32 val = 0;
+ s8 v;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set txpwr offset\n");
+
+ for (desc.idx = 0; desc.idx < RTW89_RATE_OFFSET_MAX; desc.idx++) {
+ v = rtw89_phy_read_txpwr_byrate(rtwdev, &desc);
+ val |= ((v & 0xf) << (4 * desc.idx));
+ }
+
+ rtw89_mac_txpwr_write32_mask(rtwdev, phy_idx, R_AX_PWR_RATE_OFST_CTRL,
+ GENMASK(19, 0), val);
+}
+
+static void rtw8852a_set_txpwr_limit(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+#define __MAC_TXPWR_LMT_PAGE_SIZE 40
+ u8 ch = rtwdev->hal.current_channel;
+ u8 bw = rtwdev->hal.current_band_width;
+ struct rtw89_txpwr_limit lmt[NTX_NUM_8852A];
+ u32 addr, val;
+ const s8 *ptr;
+ u8 i, j, k;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
+ "[TXPWR] set txpwr limit with ch=%d bw=%d\n", ch, bw);
+
+ for (i = 0; i < NTX_NUM_8852A; i++) {
+ rtw89_phy_fill_txpwr_limit(rtwdev, &lmt[i], i);
+
+ for (j = 0; j < __MAC_TXPWR_LMT_PAGE_SIZE; j += 4) {
+ addr = R_AX_PWR_LMT + j + __MAC_TXPWR_LMT_PAGE_SIZE * i;
+ ptr = (s8 *)&lmt[i] + j;
+ val = 0;
+
+ for (k = 0; k < 4; k++)
+ val |= (ptr[k] << (8 * k));
+
+ rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, val);
+ }
+ }
+#undef __MAC_TXPWR_LMT_PAGE_SIZE
+}
+
+static void rtw8852a_set_txpwr_limit_ru(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx)
+{
+#define __MAC_TXPWR_LMT_RU_PAGE_SIZE 24
+ u8 ch = rtwdev->hal.current_channel;
+ u8 bw = rtwdev->hal.current_band_width;
+ struct rtw89_txpwr_limit_ru lmt_ru[NTX_NUM_8852A];
+ u32 addr, val;
+ const s8 *ptr;
+ u8 i, j, k;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR,
+ "[TXPWR] set txpwr limit ru with ch=%d bw=%d\n", ch, bw);
+
+ for (i = 0; i < NTX_NUM_8852A; i++) {
+ rtw89_phy_fill_txpwr_limit_ru(rtwdev, &lmt_ru[i], i);
+
+ for (j = 0; j < __MAC_TXPWR_LMT_RU_PAGE_SIZE; j += 4) {
+ addr = R_AX_PWR_RU_LMT + j +
+ __MAC_TXPWR_LMT_RU_PAGE_SIZE * i;
+ ptr = (s8 *)&lmt_ru[i] + j;
+ val = 0;
+
+ for (k = 0; k < 4; k++)
+ val |= (ptr[k] << (8 * k));
+
+ rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, val);
+ }
+ }
+
+#undef __MAC_TXPWR_LMT_RU_PAGE_SIZE
+}
+
+static void rtw8852a_set_txpwr(struct rtw89_dev *rtwdev)
+{
+ rtw8852a_set_txpwr_byrate(rtwdev, RTW89_PHY_0);
+ rtw8852a_set_txpwr_limit(rtwdev, RTW89_PHY_0);
+ rtw8852a_set_txpwr_limit_ru(rtwdev, RTW89_PHY_0);
+}
+
+static void rtw8852a_set_txpwr_ctrl(struct rtw89_dev *rtwdev)
+{
+ rtw8852a_set_txpwr_ref(rtwdev, RTW89_PHY_0);
+ rtw8852a_set_txpwr_offset(rtwdev, RTW89_PHY_0);
+}
+
+static int
+rtw8852a_init_txpwr_unit(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ int ret;
+
+ ret = rtw89_mac_txpwr_write32(rtwdev, phy_idx, R_AX_PWR_UL_CTRL2, 0x07763333);
+ if (ret)
+ return ret;
+
+ ret = rtw89_mac_txpwr_write32(rtwdev, phy_idx, R_AX_PWR_COEXT_CTRL, 0x01ebf004);
+ if (ret)
+ return ret;
+
+ ret = rtw89_mac_txpwr_write32(rtwdev, phy_idx, R_AX_PWR_UL_CTRL0, 0x0002f8ff);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+void rtw8852a_bb_set_plcp_tx(struct rtw89_dev *rtwdev)
+{
+ u8 i = 0;
+ u32 addr, val;
+
+ for (i = 0; i < ARRAY_SIZE(rtw8852a_pmac_ht20_mcs7_tbl); i++) {
+ addr = rtw8852a_pmac_ht20_mcs7_tbl[i].addr;
+ val = rtw8852a_pmac_ht20_mcs7_tbl[i].data;
+ rtw89_phy_write32(rtwdev, addr, val);
+ }
+}
+
+static void rtw8852a_stop_pmac_tx(struct rtw89_dev *rtwdev,
+ struct rtw8852a_bb_pmac_info *tx_info,
+ enum rtw89_phy_idx idx)
+{
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI, "PMAC Stop Tx");
+ if (tx_info->mode == CONT_TX)
+ rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_PRD, B_PMAC_CTX_EN, 0,
+ idx);
+ else if (tx_info->mode == PKTS_TX)
+ rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_PRD, B_PMAC_PTX_EN, 0,
+ idx);
+}
+
+static void rtw8852a_start_pmac_tx(struct rtw89_dev *rtwdev,
+ struct rtw8852a_bb_pmac_info *tx_info,
+ enum rtw89_phy_idx idx)
+{
+ enum rtw8852a_pmac_mode mode = tx_info->mode;
+ u32 pkt_cnt = tx_info->tx_cnt;
+ u16 period = tx_info->period;
+
+ if (mode == CONT_TX && !tx_info->is_cck) {
+ rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_PRD, B_PMAC_CTX_EN, 1,
+ idx);
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI, "PMAC CTx Start");
+ } else if (mode == PKTS_TX) {
+ rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_PRD, B_PMAC_PTX_EN, 1,
+ idx);
+ rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_PRD,
+ B_PMAC_TX_PRD_MSK, period, idx);
+ rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_CNT, B_PMAC_TX_CNT_MSK,
+ pkt_cnt, idx);
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI, "PMAC PTx Start");
+ }
+ rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_CTRL, B_PMAC_TXEN_DIS, 1, idx);
+ rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_CTRL, B_PMAC_TXEN_DIS, 0, idx);
+}
+
+void rtw8852a_bb_set_pmac_tx(struct rtw89_dev *rtwdev,
+ struct rtw8852a_bb_pmac_info *tx_info,
+ enum rtw89_phy_idx idx)
+{
+ if (!tx_info->en_pmac_tx) {
+ rtw8852a_stop_pmac_tx(rtwdev, tx_info, idx);
+ rtw89_phy_write32_idx(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0, idx);
+ if (rtwdev->hal.current_band_type == RTW89_BAND_2G)
+ rtw89_phy_write32_clr(rtwdev, R_RXCCA, B_RXCCA_DIS);
+ return;
+ }
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI, "PMAC Tx Enable");
+ rtw89_phy_write32_idx(rtwdev, R_PMAC_GNT, B_PMAC_GNT_TXEN, 1, idx);
+ rtw89_phy_write32_idx(rtwdev, R_PMAC_GNT, B_PMAC_GNT_RXEN, 1, idx);
+ rtw89_phy_write32_idx(rtwdev, R_PMAC_RX_CFG1, B_PMAC_OPT1_MSK, 0x3f,
+ idx);
+ rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 0, idx);
+ rtw89_phy_write32_idx(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 1, idx);
+ rtw89_phy_write32_set(rtwdev, R_RXCCA, B_RXCCA_DIS);
+ rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 1, idx);
+ rtw8852a_start_pmac_tx(rtwdev, tx_info, idx);
+}
+
+void rtw8852a_bb_set_pmac_pkt_tx(struct rtw89_dev *rtwdev, u8 enable,
+ u16 tx_cnt, u16 period, u16 tx_time,
+ enum rtw89_phy_idx idx)
+{
+ struct rtw8852a_bb_pmac_info tx_info = {0};
+
+ tx_info.en_pmac_tx = enable;
+ tx_info.is_cck = 0;
+ tx_info.mode = PKTS_TX;
+ tx_info.tx_cnt = tx_cnt;
+ tx_info.period = period;
+ tx_info.tx_time = tx_time;
+ rtw8852a_bb_set_pmac_tx(rtwdev, &tx_info, idx);
+}
+
+void rtw8852a_bb_set_power(struct rtw89_dev *rtwdev, s16 pwr_dbm,
+ enum rtw89_phy_idx idx)
+{
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI, "PMAC CFG Tx PWR = %d", pwr_dbm);
+ rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_PWR_EN, 1, idx);
+ rtw89_phy_write32_idx(rtwdev, R_TXPWR, B_TXPWR_MSK, pwr_dbm, idx);
+}
+
+void rtw8852a_bb_cfg_tx_path(struct rtw89_dev *rtwdev, u8 tx_path)
+{
+ u32 rst_mask0 = 0;
+ u32 rst_mask1 = 0;
+
+ rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_MOD, 7, RTW89_PHY_0);
+ rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_MOD, 7, RTW89_PHY_1);
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI, "PMAC CFG Tx Path = %d", tx_path);
+ if (!rtwdev->dbcc_en) {
+ if (tx_path == RF_PATH_A) {
+ rtw89_phy_write32_mask(rtwdev, R_TXPATH_SEL,
+ B_TXPATH_SEL_MSK, 1);
+ rtw89_phy_write32_mask(rtwdev, R_TXNSS_MAP,
+ B_TXNSS_MAP_MSK, 0);
+ } else if (tx_path == RF_PATH_B) {
+ rtw89_phy_write32_mask(rtwdev, R_TXPATH_SEL,
+ B_TXPATH_SEL_MSK, 2);
+ rtw89_phy_write32_mask(rtwdev, R_TXNSS_MAP,
+ B_TXNSS_MAP_MSK, 0);
+ } else if (tx_path == RF_PATH_AB) {
+ rtw89_phy_write32_mask(rtwdev, R_TXPATH_SEL,
+ B_TXPATH_SEL_MSK, 3);
+ rtw89_phy_write32_mask(rtwdev, R_TXNSS_MAP,
+ B_TXNSS_MAP_MSK, 4);
+ } else {
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI, "Error Tx Path");
+ }
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_TXPATH_SEL, B_TXPATH_SEL_MSK,
+ 1);
+ rtw89_phy_write32_idx(rtwdev, R_TXPATH_SEL, B_TXPATH_SEL_MSK, 2,
+ RTW89_PHY_1);
+ rtw89_phy_write32_mask(rtwdev, R_TXNSS_MAP, B_TXNSS_MAP_MSK,
+ 0);
+ rtw89_phy_write32_idx(rtwdev, R_TXNSS_MAP, B_TXNSS_MAP_MSK, 4,
+ RTW89_PHY_1);
+ }
+ rst_mask0 = B_P0_TXPW_RSTB_MANON | B_P0_TXPW_RSTB_TSSI;
+ rst_mask1 = B_P1_TXPW_RSTB_MANON | B_P1_TXPW_RSTB_TSSI;
+ if (tx_path == RF_PATH_A) {
+ rtw89_phy_write32_mask(rtwdev, R_P0_TXPW_RSTB, rst_mask0, 1);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TXPW_RSTB, rst_mask0, 3);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_P1_TXPW_RSTB, rst_mask1, 1);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TXPW_RSTB, rst_mask1, 3);
+ }
+}
+
+void rtw8852a_bb_tx_mode_switch(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx idx, u8 mode)
+{
+ if (mode != 0)
+ return;
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI, "Tx mode switch");
+ rtw89_phy_write32_idx(rtwdev, R_PMAC_GNT, B_PMAC_GNT_TXEN, 0, idx);
+ rtw89_phy_write32_idx(rtwdev, R_PMAC_GNT, B_PMAC_GNT_RXEN, 0, idx);
+ rtw89_phy_write32_idx(rtwdev, R_PMAC_RX_CFG1, B_PMAC_OPT1_MSK, 0, idx);
+ rtw89_phy_write32_idx(rtwdev, R_PMAC_RXMOD, B_PMAC_RXMOD_MSK, 0, idx);
+ rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_DPD_EN, 0, idx);
+ rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_MOD, 0, idx);
+ rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_PWR_EN, 0, idx);
+}
+
+static void rtw8852a_bb_ctrl_btc_preagc(struct rtw89_dev *rtwdev, bool bt_en)
+{
+ rtw89_phy_write_reg3_tbl(rtwdev, bt_en ? &rtw8852a_btc_preagc_en_defs_tbl :
+ &rtw8852a_btc_preagc_dis_defs_tbl);
+}
+
+static u8 rtw8852a_get_thermal(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path)
+{
+ if (rtwdev->is_tssi_mode[rf_path]) {
+ u32 addr = 0x1c10 + (rf_path << 13);
+
+ return (u8)rtw89_phy_read32_mask(rtwdev, addr, 0x3F000000);
+ }
+
+ rtw89_write_rf(rtwdev, rf_path, RR_TM, RR_TM_TRI, 0x1);
+ rtw89_write_rf(rtwdev, rf_path, RR_TM, RR_TM_TRI, 0x0);
+ rtw89_write_rf(rtwdev, rf_path, RR_TM, RR_TM_TRI, 0x1);
+
+ fsleep(200);
+
+ return (u8)rtw89_read_rf(rtwdev, rf_path, RR_TM, RR_TM_VAL);
+}
+
+static void rtw8852a_btc_set_rfe(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_module *module = &btc->mdinfo;
+
+ module->rfe_type = rtwdev->efuse.rfe_type;
+ module->cv = rtwdev->hal.cv;
+ module->bt_solo = 0;
+ module->switch_type = BTC_SWITCH_INTERNAL;
+
+ if (module->rfe_type > 0)
+ module->ant.num = (module->rfe_type % 2 ? 2 : 3);
+ else
+ module->ant.num = 2;
+
+ module->ant.diversity = 0;
+ module->ant.isolation = 10;
+
+ if (module->ant.num == 3) {
+ module->ant.type = BTC_ANT_DEDICATED;
+ module->bt_pos = BTC_BT_ALONE;
+ } else {
+ module->ant.type = BTC_ANT_SHARED;
+ module->bt_pos = BTC_BT_BTG;
+ }
+}
+
+static
+void rtw8852a_set_trx_mask(struct rtw89_dev *rtwdev, u8 path, u8 group, u32 val)
+{
+ rtw89_write_rf(rtwdev, path, RR_LUTWE, 0xfffff, 0x20000);
+ rtw89_write_rf(rtwdev, path, RR_LUTWA, 0xfffff, group);
+ rtw89_write_rf(rtwdev, path, RR_LUTWD0, 0xfffff, val);
+ rtw89_write_rf(rtwdev, path, RR_LUTWE, 0xfffff, 0x0);
+}
+
+static void rtw8852a_ctrl_btg(struct rtw89_dev *rtwdev, bool btg)
+{
+ if (btg) {
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_BTG, B_PATH0_BTG_SHEN, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_BTG, B_PATH1_BTG_SHEN, 0x3);
+ rtw89_phy_write32_mask(rtwdev, R_PMAC_GNT, B_PMAC_GNT_P1, 0x0);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_PATH0_BTG, B_PATH0_BTG_SHEN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PATH1_BTG, B_PATH1_BTG_SHEN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_PMAC_GNT, B_PMAC_GNT_P1, 0xf);
+ rtw89_phy_write32_mask(rtwdev, R_PMAC_GNT, B_PMAC_GNT_P2, 0x4);
+ }
+}
+
+static void rtw8852a_btc_init_cfg(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_module *module = &btc->mdinfo;
+ const struct rtw89_chip_info *chip = rtwdev->chip;
+ const struct rtw89_mac_ax_coex coex_params = {
+ .pta_mode = RTW89_MAC_AX_COEX_RTK_MODE,
+ .direction = RTW89_MAC_AX_COEX_INNER,
+ };
+
+ /* PTA init */
+ rtw89_mac_coex_init(rtwdev, &coex_params);
+
+ /* set WL Tx response = Hi-Pri */
+ chip->ops->btc_set_wl_pri(rtwdev, BTC_PRI_MASK_TX_RESP, true);
+ chip->ops->btc_set_wl_pri(rtwdev, BTC_PRI_MASK_BEACON, true);
+
+ /* set rf gnt debug off */
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_WLSEL, 0xfffff, 0x0);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_WLSEL, 0xfffff, 0x0);
+
+ /* set WL Tx thru in TRX mask table if GNT_WL = 0 && BT_S1 = ss group */
+ if (module->ant.type == BTC_ANT_SHARED) {
+ rtw8852a_set_trx_mask(rtwdev,
+ RF_PATH_A, BTC_BT_SS_GROUP, 0x5ff);
+ rtw8852a_set_trx_mask(rtwdev,
+ RF_PATH_B, BTC_BT_SS_GROUP, 0x5ff);
+ } else { /* set WL Tx stb if GNT_WL = 0 && BT_S1 = ss group for 3-ant */
+ rtw8852a_set_trx_mask(rtwdev,
+ RF_PATH_A, BTC_BT_SS_GROUP, 0x5df);
+ rtw8852a_set_trx_mask(rtwdev,
+ RF_PATH_B, BTC_BT_SS_GROUP, 0x5df);
+ }
+
+ /* set PTA break table */
+ rtw89_write32(rtwdev, R_BTC_BREAK_TABLE, BTC_BREAK_PARAM);
+
+ /* enable BT counter 0xda40[16,2] = 2b'11 */
+ rtw89_write32_set(rtwdev,
+ R_AX_CSR_MODE, B_AX_BT_CNT_RST | B_AX_STATIS_BT_EN);
+ btc->cx.wl.status.map.init_ok = true;
+}
+
+static
+void rtw8852a_btc_set_wl_pri(struct rtw89_dev *rtwdev, u8 map, bool state)
+{
+ u32 bitmap = 0;
+ u32 reg = 0;
+
+ switch (map) {
+ case BTC_PRI_MASK_TX_RESP:
+ reg = R_BTC_BT_COEX_MSK_TABLE;
+ bitmap = B_BTC_PRI_MASK_TX_RESP_V1;
+ break;
+ case BTC_PRI_MASK_BEACON:
+ reg = R_AX_WL_PRI_MSK;
+ bitmap = B_AX_PTA_WL_PRI_MASK_BCNQ;
+ break;
+ default:
+ return;
+ }
+
+ if (state)
+ rtw89_write32_set(rtwdev, reg, bitmap);
+ else
+ rtw89_write32_clr(rtwdev, reg, bitmap);
+}
+
+static inline u32 __btc_ctrl_val_all_time(u32 ctrl)
+{
+ return FIELD_GET(GENMASK(15, 0), ctrl);
+}
+
+static inline u32 __btc_ctrl_rst_all_time(u32 cur)
+{
+ return cur & ~B_AX_FORCE_PWR_BY_RATE_EN;
+}
+
+static inline u32 __btc_ctrl_gen_all_time(u32 cur, u32 val)
+{
+ u32 hv = cur & ~B_AX_FORCE_PWR_BY_RATE_VALUE_MASK;
+ u32 lv = val & B_AX_FORCE_PWR_BY_RATE_VALUE_MASK;
+
+ return hv | lv | B_AX_FORCE_PWR_BY_RATE_EN;
+}
+
+static inline u32 __btc_ctrl_val_gnt_bt(u32 ctrl)
+{
+ return FIELD_GET(GENMASK(31, 16), ctrl);
+}
+
+static inline u32 __btc_ctrl_rst_gnt_bt(u32 cur)
+{
+ return cur & ~B_AX_TXAGC_BT_EN;
+}
+
+static inline u32 __btc_ctrl_gen_gnt_bt(u32 cur, u32 val)
+{
+ u32 ov = cur & ~B_AX_TXAGC_BT_MASK;
+ u32 iv = FIELD_PREP(B_AX_TXAGC_BT_MASK, val);
+
+ return ov | iv | B_AX_TXAGC_BT_EN;
+}
+
+static void
+rtw8852a_btc_set_wl_txpwr_ctrl(struct rtw89_dev *rtwdev, u32 txpwr_val)
+{
+ const u32 __btc_cr_all_time = R_AX_PWR_RATE_CTRL;
+ const u32 __btc_cr_gnt_bt = R_AX_PWR_COEXT_CTRL;
+
+#define __do_clr(_chk) ((_chk) == GENMASK(15, 0))
+#define __handle(_case) \
+ do { \
+ const u32 _reg = __btc_cr_ ## _case; \
+ u32 _val = __btc_ctrl_val_ ## _case(txpwr_val); \
+ u32 _cur, _wrt; \
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR, \
+ "btc ctrl %s: 0x%x\n", #_case, _val); \
+ rtw89_mac_txpwr_read32(rtwdev, RTW89_PHY_0, _reg, &_cur);\
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR, \
+ "btc ctrl ori 0x%x: 0x%x\n", _reg, _cur); \
+ _wrt = __do_clr(_val) ? \
+ __btc_ctrl_rst_ ## _case(_cur) : \
+ __btc_ctrl_gen_ ## _case(_cur, _val); \
+ rtw89_mac_txpwr_write32(rtwdev, RTW89_PHY_0, _reg, _wrt);\
+ rtw89_debug(rtwdev, RTW89_DBG_TXPWR, \
+ "btc ctrl set 0x%x: 0x%x\n", _reg, _wrt); \
+ } while (0)
+
+ __handle(all_time);
+ __handle(gnt_bt);
+
+#undef __handle
+#undef __do_clr
+}
+
+static
+s8 rtw8852a_btc_get_bt_rssi(struct rtw89_dev *rtwdev, s8 val)
+{
+ return clamp_t(s8, val, -100, 0) + 100;
+}
+
+static struct rtw89_btc_rf_trx_para rtw89_btc_8852a_rf_ul[] = {
+ {255, 0, 0, 7}, /* 0 -> original */
+ {255, 2, 0, 7}, /* 1 -> for BT-connected ACI issue && BTG co-rx */
+ {255, 0, 0, 7}, /* 2 ->reserved for shared-antenna */
+ {255, 0, 0, 7}, /* 3- >reserved for shared-antenna */
+ {255, 0, 0, 7}, /* 4 ->reserved for shared-antenna */
+ {255, 0, 0, 7}, /* the below id is for non-shared-antenna free-run */
+ {6, 1, 0, 7},
+ {13, 1, 0, 7},
+ {13, 1, 0, 7}
+};
+
+static struct rtw89_btc_rf_trx_para rtw89_btc_8852a_rf_dl[] = {
+ {255, 0, 0, 7}, /* 0 -> original */
+ {255, 2, 0, 7}, /* 1 -> reserved for shared-antenna */
+ {255, 0, 0, 7}, /* 2 ->reserved for shared-antenna */
+ {255, 0, 0, 7}, /* 3- >reserved for shared-antenna */
+ {255, 0, 0, 7}, /* 4 ->reserved for shared-antenna */
+ {255, 0, 0, 7}, /* the below id is for non-shared-antenna free-run */
+ {255, 1, 0, 7},
+ {255, 1, 0, 7},
+ {255, 1, 0, 7}
+};
+
+static const
+u8 rtw89_btc_8852a_wl_rssi_thres[BTC_WL_RSSI_THMAX] = {60, 50, 40, 30};
+static const
+u8 rtw89_btc_8852a_bt_rssi_thres[BTC_BT_RSSI_THMAX] = {40, 36, 31, 28};
+
+static struct rtw89_btc_fbtc_mreg rtw89_btc_8852a_mon_reg[] = {
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xda24),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xda28),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xda2c),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xda30),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xda4c),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xda10),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xda20),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xda34),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0xcef4),
+ RTW89_DEF_FBTC_MREG(REG_MAC, 4, 0x8424),
+ RTW89_DEF_FBTC_MREG(REG_BB, 4, 0x980),
+ RTW89_DEF_FBTC_MREG(REG_BT_MODEM, 4, 0x178),
+};
+
+static
+void rtw8852a_btc_bt_aci_imp(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_dm *dm = &btc->dm;
+ struct rtw89_btc_bt_info *bt = &btc->cx.bt;
+ struct rtw89_btc_bt_link_info *b = &bt->link_info;
+
+ /* fix LNA2 = level-5 for BT ACI issue at BTG */
+ if (btc->dm.wl_btg_rx && b->profile_cnt.now != 0)
+ dm->trx_para_level = 1;
+}
+
+static
+void rtw8852a_btc_update_bt_cnt(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_btc *btc = &rtwdev->btc;
+ struct rtw89_btc_cx *cx = &btc->cx;
+ u32 val;
+
+ val = rtw89_read32(rtwdev, R_AX_BT_STAST_HIGH);
+ cx->cnt_bt[BTC_BCNT_HIPRI_TX] = FIELD_GET(B_AX_STATIS_BT_HI_TX_MASK, val);
+ cx->cnt_bt[BTC_BCNT_HIPRI_RX] = FIELD_GET(B_AX_STATIS_BT_HI_RX_MASK, val);
+
+ val = rtw89_read32(rtwdev, R_AX_BT_STAST_LOW);
+ cx->cnt_bt[BTC_BCNT_LOPRI_TX] = FIELD_GET(B_AX_STATIS_BT_LO_TX_1_MASK, val);
+ cx->cnt_bt[BTC_BCNT_LOPRI_RX] = FIELD_GET(B_AX_STATIS_BT_LO_RX_1_MASK, val);
+
+ /* clock-gate off before reset counter*/
+ rtw89_write32_set(rtwdev, R_AX_BTC_CFG, B_AX_DIS_BTC_CLK_G);
+ rtw89_write32_clr(rtwdev, R_AX_CSR_MODE, B_AX_BT_CNT_RST);
+ rtw89_write32_set(rtwdev, R_AX_CSR_MODE, B_AX_BT_CNT_RST);
+ rtw89_write32_clr(rtwdev, R_AX_BTC_CFG, B_AX_DIS_BTC_CLK_G);
+}
+
+static
+void rtw8852a_btc_wl_s1_standby(struct rtw89_dev *rtwdev, bool state)
+{
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x80000);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x1);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD1, RFREG_MASK, 0x1);
+
+ /* set WL standby = Rx for GNT_BT_Tx = 1->0 settle issue */
+ if (state)
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0,
+ RFREG_MASK, 0xa2d7c);
+ else
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0,
+ RFREG_MASK, 0xa2020);
+
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x0);
+}
+
+static void rtw8852a_query_ppdu(struct rtw89_dev *rtwdev,
+ struct rtw89_rx_phy_ppdu *phy_ppdu,
+ struct ieee80211_rx_status *status)
+{
+ u8 path;
+ s8 *rx_power = phy_ppdu->rssi;
+
+ status->signal = max_t(s8, rx_power[RF_PATH_A], rx_power[RF_PATH_B]);
+ for (path = 0; path < rtwdev->chip->rf_path_num; path++) {
+ status->chains |= BIT(path);
+ status->chain_signal[path] = rx_power[path];
+ }
+}
+
+static const struct rtw89_chip_ops rtw8852a_chip_ops = {
+ .bb_reset = rtw8852a_bb_reset,
+ .bb_sethw = rtw8852a_bb_sethw,
+ .read_rf = rtw89_phy_read_rf,
+ .write_rf = rtw89_phy_write_rf,
+ .set_channel = rtw8852a_set_channel,
+ .set_channel_help = rtw8852a_set_channel_help,
+ .read_efuse = rtw8852a_read_efuse,
+ .read_phycap = rtw8852a_read_phycap,
+ .fem_setup = rtw8852a_fem_setup,
+ .rfk_init = rtw8852a_rfk_init,
+ .rfk_channel = rtw8852a_rfk_channel,
+ .rfk_band_changed = rtw8852a_rfk_band_changed,
+ .rfk_scan = rtw8852a_rfk_scan,
+ .rfk_track = rtw8852a_rfk_track,
+ .power_trim = rtw8852a_power_trim,
+ .set_txpwr = rtw8852a_set_txpwr,
+ .set_txpwr_ctrl = rtw8852a_set_txpwr_ctrl,
+ .init_txpwr_unit = rtw8852a_init_txpwr_unit,
+ .get_thermal = rtw8852a_get_thermal,
+ .ctrl_btg = rtw8852a_ctrl_btg,
+ .query_ppdu = rtw8852a_query_ppdu,
+ .bb_ctrl_btc_preagc = rtw8852a_bb_ctrl_btc_preagc,
+ .set_txpwr_ul_tb_offset = rtw8852a_set_txpwr_ul_tb_offset,
+
+ .btc_set_rfe = rtw8852a_btc_set_rfe,
+ .btc_init_cfg = rtw8852a_btc_init_cfg,
+ .btc_set_wl_pri = rtw8852a_btc_set_wl_pri,
+ .btc_set_wl_txpwr_ctrl = rtw8852a_btc_set_wl_txpwr_ctrl,
+ .btc_get_bt_rssi = rtw8852a_btc_get_bt_rssi,
+ .btc_bt_aci_imp = rtw8852a_btc_bt_aci_imp,
+ .btc_update_bt_cnt = rtw8852a_btc_update_bt_cnt,
+ .btc_wl_s1_standby = rtw8852a_btc_wl_s1_standby,
+};
+
+const struct rtw89_chip_info rtw8852a_chip_info = {
+ .chip_id = RTL8852A,
+ .ops = &rtw8852a_chip_ops,
+ .fw_name = "rtw89/rtw8852a_fw.bin",
+ .fifo_size = 458752,
+ .max_amsdu_limit = 3500,
+ .dis_2g_40m_ul_ofdma = true,
+ .hfc_param_ini = rtw8852a_hfc_param_ini_pcie,
+ .dle_mem = rtw8852a_dle_mem_pcie,
+ .rf_base_addr = {0xc000, 0xd000},
+ .pwr_on_seq = pwr_on_seq_8852a,
+ .pwr_off_seq = pwr_off_seq_8852a,
+ .bb_table = &rtw89_8852a_phy_bb_table,
+ .rf_table = {&rtw89_8852a_phy_radioa_table,
+ &rtw89_8852a_phy_radiob_table,},
+ .nctl_table = &rtw89_8852a_phy_nctl_table,
+ .byr_table = &rtw89_8852a_byr_table,
+ .txpwr_lmt_2g = &rtw89_8852a_txpwr_lmt_2g,
+ .txpwr_lmt_5g = &rtw89_8852a_txpwr_lmt_5g,
+ .txpwr_lmt_ru_2g = &rtw89_8852a_txpwr_lmt_ru_2g,
+ .txpwr_lmt_ru_5g = &rtw89_8852a_txpwr_lmt_ru_5g,
+ .txpwr_factor_rf = 2,
+ .txpwr_factor_mac = 1,
+ .dig_table = &rtw89_8852a_phy_dig_table,
+ .rf_path_num = 2,
+ .tx_nss = 2,
+ .rx_nss = 2,
+ .acam_num = 128,
+ .bcam_num = 10,
+ .scam_num = 128,
+ .sec_ctrl_efuse_size = 4,
+ .physical_efuse_size = 1216,
+ .logical_efuse_size = 1536,
+ .limit_efuse_size = 1152,
+ .phycap_addr = 0x580,
+ .phycap_size = 128,
+ .para_ver = 0x05050764,
+ .wlcx_desired = 0x05050000,
+ .btcx_desired = 0x5,
+ .scbd = 0x1,
+ .mailbox = 0x1,
+ .afh_guard_ch = 6,
+ .wl_rssi_thres = rtw89_btc_8852a_wl_rssi_thres,
+ .bt_rssi_thres = rtw89_btc_8852a_bt_rssi_thres,
+ .rssi_tol = 2,
+ .mon_reg_num = ARRAY_SIZE(rtw89_btc_8852a_mon_reg),
+ .mon_reg = rtw89_btc_8852a_mon_reg,
+ .rf_para_ulink_num = ARRAY_SIZE(rtw89_btc_8852a_rf_ul),
+ .rf_para_ulink = rtw89_btc_8852a_rf_ul,
+ .rf_para_dlink_num = ARRAY_SIZE(rtw89_btc_8852a_rf_dl),
+ .rf_para_dlink = rtw89_btc_8852a_rf_dl,
+ .ps_mode_supported = BIT(RTW89_PS_MODE_RFOFF) |
+ BIT(RTW89_PS_MODE_CLK_GATED) |
+ BIT(RTW89_PS_MODE_PWR_GATED),
+};
+EXPORT_SYMBOL(rtw8852a_chip_info);
+
+MODULE_FIRMWARE("rtw89/rtw8852a_fw.bin");
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.h b/drivers/net/wireless/realtek/rtw89/rtw8852a.h
new file mode 100644
index 000000000000..633384374de0
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.h
@@ -0,0 +1,109 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2019-2020 Realtek Corporation
+ */
+
+#ifndef __RTW89_8852A_H__
+#define __RTW89_8852A_H__
+
+#include "core.h"
+
+#define RF_PATH_NUM_8852A 2
+#define NTX_NUM_8852A 2
+
+enum rtw8852a_pmac_mode {
+ NONE_TEST,
+ PKTS_TX,
+ PKTS_RX,
+ CONT_TX
+};
+
+struct rtw8852au_efuse {
+ u8 rsvd[0x38];
+ u8 mac_addr[ETH_ALEN];
+};
+
+struct rtw8852ae_efuse {
+ u8 mac_addr[ETH_ALEN];
+};
+
+struct rtw8852a_tssi_offset {
+ u8 cck_tssi[TSSI_CCK_CH_GROUP_NUM];
+ u8 bw40_tssi[TSSI_MCS_2G_CH_GROUP_NUM];
+ u8 rsvd[7];
+ u8 bw40_1s_tssi_5g[TSSI_MCS_5G_CH_GROUP_NUM];
+} __packed;
+
+struct rtw8852a_efuse {
+ u8 rsvd[0x210];
+ struct rtw8852a_tssi_offset path_a_tssi;
+ u8 rsvd1[10];
+ struct rtw8852a_tssi_offset path_b_tssi;
+ u8 rsvd2[94];
+ u8 channel_plan;
+ u8 xtal_k;
+ u8 rsvd3;
+ u8 iqk_lck;
+ u8 rsvd4[5];
+ u8 reg_setting:2;
+ u8 tx_diversity:1;
+ u8 rx_diversity:2;
+ u8 ac_mode:1;
+ u8 module_type:2;
+ u8 rsvd5;
+ u8 shared_ant:1;
+ u8 coex_type:3;
+ u8 ant_iso:1;
+ u8 radio_on_off:1;
+ u8 rsvd6:2;
+ u8 eeprom_version;
+ u8 customer_id;
+ u8 tx_bb_swing_2g;
+ u8 tx_bb_swing_5g;
+ u8 tx_cali_pwr_trk_mode;
+ u8 trx_path_selection;
+ u8 rfe_type;
+ u8 country_code[2];
+ u8 rsvd7[3];
+ u8 path_a_therm;
+ u8 path_b_therm;
+ u8 rsvd8[46];
+ u8 path_a_cck_pwr_idx[6];
+ u8 path_a_bw40_1tx_pwr_idx[5];
+ u8 path_a_ofdm_1tx_pwr_idx_diff:4;
+ u8 path_a_bw20_1tx_pwr_idx_diff:4;
+ u8 path_a_bw20_2tx_pwr_idx_diff:4;
+ u8 path_a_bw40_2tx_pwr_idx_diff:4;
+ u8 path_a_cck_2tx_pwr_idx_diff:4;
+ u8 path_a_ofdm_2tx_pwr_idx_diff:4;
+ u8 rsvd9[0xf2];
+ union {
+ struct rtw8852au_efuse u;
+ struct rtw8852ae_efuse e;
+ };
+} __packed;
+
+struct rtw8852a_bb_pmac_info {
+ u8 en_pmac_tx:1;
+ u8 is_cck:1;
+ u8 mode:3;
+ u8 rsvd:3;
+ u16 tx_cnt;
+ u16 period;
+ u16 tx_time;
+ u8 duty_cycle;
+};
+
+void rtw8852a_bb_set_plcp_tx(struct rtw89_dev *rtwdev);
+void rtw8852a_bb_set_pmac_tx(struct rtw89_dev *rtwdev,
+ struct rtw8852a_bb_pmac_info *tx_info,
+ enum rtw89_phy_idx idx);
+void rtw8852a_bb_set_pmac_pkt_tx(struct rtw89_dev *rtwdev, u8 enable,
+ u16 tx_cnt, u16 period, u16 tx_time,
+ enum rtw89_phy_idx idx);
+void rtw8852a_bb_set_power(struct rtw89_dev *rtwdev, s16 pwr_dbm,
+ enum rtw89_phy_idx idx);
+void rtw8852a_bb_cfg_tx_path(struct rtw89_dev *rtwdev, u8 tx_path);
+void rtw8852a_bb_tx_mode_switch(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx idx, u8 mode);
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c
new file mode 100644
index 000000000000..c021e93eb07b
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.c
@@ -0,0 +1,3911 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2019-2020 Realtek Corporation
+ */
+
+#include "coex.h"
+#include "debug.h"
+#include "mac.h"
+#include "phy.h"
+#include "reg.h"
+#include "rtw8852a.h"
+#include "rtw8852a_rfk.h"
+#include "rtw8852a_rfk_table.h"
+#include "rtw8852a_table.h"
+
+static void
+_rfk_write_rf(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
+{
+ rtw89_write_rf(rtwdev, def->path, def->addr, def->mask, def->data);
+}
+
+static void
+_rfk_write32_mask(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
+{
+ rtw89_phy_write32_mask(rtwdev, def->addr, def->mask, def->data);
+}
+
+static void
+_rfk_write32_set(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
+{
+ rtw89_phy_write32_set(rtwdev, def->addr, def->mask);
+}
+
+static void
+_rfk_write32_clr(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
+{
+ rtw89_phy_write32_clr(rtwdev, def->addr, def->mask);
+}
+
+static void
+_rfk_delay(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def)
+{
+ udelay(def->data);
+}
+
+static void
+(*_rfk_handler[])(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def) = {
+ [RTW89_RFK_F_WRF] = _rfk_write_rf,
+ [RTW89_RFK_F_WM] = _rfk_write32_mask,
+ [RTW89_RFK_F_WS] = _rfk_write32_set,
+ [RTW89_RFK_F_WC] = _rfk_write32_clr,
+ [RTW89_RFK_F_DELAY] = _rfk_delay,
+};
+
+static_assert(ARRAY_SIZE(_rfk_handler) == RTW89_RFK_F_NUM);
+
+static void
+rtw89_rfk_parser(struct rtw89_dev *rtwdev, const struct rtw89_rfk_tbl *tbl)
+{
+ const struct rtw89_reg5_def *p = tbl->defs;
+ const struct rtw89_reg5_def *end = tbl->defs + tbl->size;
+
+ for (; p < end; p++)
+ _rfk_handler[p->flag](rtwdev, p);
+}
+
+#define rtw89_rfk_parser_by_cond(rtwdev, cond, tbl_t, tbl_f) \
+ do { \
+ typeof(rtwdev) _dev = (rtwdev); \
+ if (cond) \
+ rtw89_rfk_parser(_dev, (tbl_t)); \
+ else \
+ rtw89_rfk_parser(_dev, (tbl_f)); \
+ } while (0)
+
+static u8 _kpath(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]dbcc_en: %x, PHY%d\n",
+ rtwdev->dbcc_en, phy_idx);
+
+ if (!rtwdev->dbcc_en)
+ return RF_AB;
+
+ if (phy_idx == RTW89_PHY_0)
+ return RF_A;
+ else
+ return RF_B;
+}
+
+static const u32 rtw8852a_backup_bb_regs[] = {0x2344, 0x58f0, 0x78f0};
+static const u32 rtw8852a_backup_rf_regs[] = {0xef, 0xde, 0x0, 0x1e, 0x2, 0x85, 0x90, 0x5};
+#define BACKUP_BB_REGS_NR ARRAY_SIZE(rtw8852a_backup_bb_regs)
+#define BACKUP_RF_REGS_NR ARRAY_SIZE(rtw8852a_backup_rf_regs)
+
+static void _rfk_backup_bb_reg(struct rtw89_dev *rtwdev, u32 backup_bb_reg_val[])
+{
+ u32 i;
+
+ for (i = 0; i < BACKUP_BB_REGS_NR; i++) {
+ backup_bb_reg_val[i] =
+ rtw89_phy_read32_mask(rtwdev, rtw8852a_backup_bb_regs[i],
+ MASKDWORD);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]backup bb reg : %x, value =%x\n",
+ rtw8852a_backup_bb_regs[i], backup_bb_reg_val[i]);
+ }
+}
+
+static void _rfk_backup_rf_reg(struct rtw89_dev *rtwdev, u32 backup_rf_reg_val[],
+ u8 rf_path)
+{
+ u32 i;
+
+ for (i = 0; i < BACKUP_RF_REGS_NR; i++) {
+ backup_rf_reg_val[i] =
+ rtw89_read_rf(rtwdev, rf_path,
+ rtw8852a_backup_rf_regs[i], RFREG_MASK);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]backup rf S%d reg : %x, value =%x\n", rf_path,
+ rtw8852a_backup_rf_regs[i], backup_rf_reg_val[i]);
+ }
+}
+
+static void _rfk_restore_bb_reg(struct rtw89_dev *rtwdev,
+ u32 backup_bb_reg_val[])
+{
+ u32 i;
+
+ for (i = 0; i < BACKUP_BB_REGS_NR; i++) {
+ rtw89_phy_write32_mask(rtwdev, rtw8852a_backup_bb_regs[i],
+ MASKDWORD, backup_bb_reg_val[i]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]restore bb reg : %x, value =%x\n",
+ rtw8852a_backup_bb_regs[i], backup_bb_reg_val[i]);
+ }
+}
+
+static void _rfk_restore_rf_reg(struct rtw89_dev *rtwdev,
+ u32 backup_rf_reg_val[], u8 rf_path)
+{
+ u32 i;
+
+ for (i = 0; i < BACKUP_RF_REGS_NR; i++) {
+ rtw89_write_rf(rtwdev, rf_path, rtw8852a_backup_rf_regs[i],
+ RFREG_MASK, backup_rf_reg_val[i]);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]restore rf S%d reg: %x, value =%x\n", rf_path,
+ rtw8852a_backup_rf_regs[i], backup_rf_reg_val[i]);
+ }
+}
+
+static void _wait_rx_mode(struct rtw89_dev *rtwdev, u8 kpath)
+{
+ u8 path;
+ u32 rf_mode;
+ int ret;
+
+ for (path = 0; path < RF_PATH_MAX; path++) {
+ if (!(kpath & BIT(path)))
+ continue;
+
+ ret = read_poll_timeout_atomic(rtw89_read_rf, rf_mode, rf_mode != 2,
+ 2, 5000, false, rtwdev, path, 0x00,
+ RR_MOD_MASK);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RFK] Wait S%d to Rx mode!! (ret = %d)\n",
+ path, ret);
+ }
+}
+
+static void _dack_dump(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_dack_info *dack = &rtwdev->dack;
+ u8 i;
+ u8 t;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DACK]S0 ADC_DCK ic = 0x%x, qc = 0x%x\n",
+ dack->addck_d[0][0], dack->addck_d[0][1]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DACK]S1 ADC_DCK ic = 0x%x, qc = 0x%x\n",
+ dack->addck_d[1][0], dack->addck_d[1][1]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DACK]S0 DAC_DCK ic = 0x%x, qc = 0x%x\n",
+ dack->dadck_d[0][0], dack->dadck_d[0][1]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DACK]S1 DAC_DCK ic = 0x%x, qc = 0x%x\n",
+ dack->dadck_d[1][0], dack->dadck_d[1][1]);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DACK]S0 biask ic = 0x%x, qc = 0x%x\n",
+ dack->biask_d[0][0], dack->biask_d[0][1]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DACK]S1 biask ic = 0x%x, qc = 0x%x\n",
+ dack->biask_d[1][0], dack->biask_d[1][1]);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK ic:\n");
+ for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
+ t = dack->msbk_d[0][0][i];
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
+ }
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK qc:\n");
+ for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
+ t = dack->msbk_d[0][1][i];
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
+ }
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK ic:\n");
+ for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
+ t = dack->msbk_d[1][0][i];
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
+ }
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK qc:\n");
+ for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
+ t = dack->msbk_d[1][1][i];
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t);
+ }
+}
+
+static void _afe_init(struct rtw89_dev *rtwdev)
+{
+ rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_afe_init_defs_tbl);
+}
+
+static void _addck_backup(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_dack_info *dack = &rtwdev->dack;
+
+ rtw89_phy_write32_clr(rtwdev, R_S0_RXDC2, B_S0_RXDC2_SEL);
+ dack->addck_d[0][0] = (u16)rtw89_phy_read32_mask(rtwdev, R_S0_ADDCK,
+ B_S0_ADDCK_Q);
+ dack->addck_d[0][1] = (u16)rtw89_phy_read32_mask(rtwdev, R_S0_ADDCK,
+ B_S0_ADDCK_I);
+
+ rtw89_phy_write32_clr(rtwdev, R_S1_RXDC2, B_S1_RXDC2_SEL);
+ dack->addck_d[1][0] = (u16)rtw89_phy_read32_mask(rtwdev, R_S1_ADDCK,
+ B_S1_ADDCK_Q);
+ dack->addck_d[1][1] = (u16)rtw89_phy_read32_mask(rtwdev, R_S1_ADDCK,
+ B_S1_ADDCK_I);
+}
+
+static void _addck_reload(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_dack_info *dack = &rtwdev->dack;
+
+ rtw89_phy_write32_mask(rtwdev, R_S0_RXDC, B_S0_RXDC_I, dack->addck_d[0][0]);
+ rtw89_phy_write32_mask(rtwdev, R_S0_RXDC2, B_S0_RXDC2_Q2,
+ (dack->addck_d[0][1] >> 6));
+ rtw89_phy_write32_mask(rtwdev, R_S0_RXDC, B_S0_RXDC_Q,
+ (dack->addck_d[0][1] & 0x3f));
+ rtw89_phy_write32_set(rtwdev, R_S0_RXDC2, B_S0_RXDC2_MEN);
+ rtw89_phy_write32_mask(rtwdev, R_S1_RXDC, B_S1_RXDC_I, dack->addck_d[1][0]);
+ rtw89_phy_write32_mask(rtwdev, R_S1_RXDC2, B_S1_RXDC2_Q2,
+ (dack->addck_d[1][1] >> 6));
+ rtw89_phy_write32_mask(rtwdev, R_S1_RXDC, B_S1_RXDC_Q,
+ (dack->addck_d[1][1] & 0x3f));
+ rtw89_phy_write32_set(rtwdev, R_S1_RXDC2, B_S1_RXDC2_EN);
+}
+
+static void _dack_backup_s0(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_dack_info *dack = &rtwdev->dack;
+ u8 i;
+
+ rtw89_phy_write32_set(rtwdev, R_S0_DACKI, B_S0_DACKI_EN);
+ rtw89_phy_write32_set(rtwdev, R_S0_DACKQ, B_S0_DACKQ_EN);
+ rtw89_phy_write32_set(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG);
+
+ for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
+ rtw89_phy_write32_mask(rtwdev, R_S0_DACKI, B_S0_DACKI_AR, i);
+ dack->msbk_d[0][0][i] =
+ (u8)rtw89_phy_read32_mask(rtwdev, R_S0_DACKI7, B_S0_DACKI7_K);
+ rtw89_phy_write32_mask(rtwdev, R_S0_DACKQ, B_S0_DACKQ_AR, i);
+ dack->msbk_d[0][1][i] =
+ (u8)rtw89_phy_read32_mask(rtwdev, R_S0_DACKQ7, B_S0_DACKQ7_K);
+ }
+ dack->biask_d[0][0] = (u16)rtw89_phy_read32_mask(rtwdev, R_S0_DACKI2,
+ B_S0_DACKI2_K);
+ dack->biask_d[0][1] = (u16)rtw89_phy_read32_mask(rtwdev, R_S0_DACKQ2,
+ B_S0_DACKQ2_K);
+ dack->dadck_d[0][0] = (u8)rtw89_phy_read32_mask(rtwdev, R_S0_DACKI8,
+ B_S0_DACKI8_K) - 8;
+ dack->dadck_d[0][1] = (u8)rtw89_phy_read32_mask(rtwdev, R_S0_DACKQ8,
+ B_S0_DACKQ8_K) - 8;
+}
+
+static void _dack_backup_s1(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_dack_info *dack = &rtwdev->dack;
+ u8 i;
+
+ rtw89_phy_write32_set(rtwdev, R_S1_DACKI, B_S1_DACKI_EN);
+ rtw89_phy_write32_set(rtwdev, R_S1_DACKQ, B_S1_DACKQ_EN);
+ rtw89_phy_write32_set(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON);
+
+ for (i = 0; i < RTW89_DACK_MSBK_NR; i++) {
+ rtw89_phy_write32_mask(rtwdev, R_S1_DACKI, B_S1_DACKI_AR, i);
+ dack->msbk_d[1][0][i] =
+ (u8)rtw89_phy_read32_mask(rtwdev, R_S1_DACKI7, B_S1_DACKI_K);
+ rtw89_phy_write32_mask(rtwdev, R_S1_DACKQ, B_S1_DACKQ_AR, i);
+ dack->msbk_d[1][1][i] =
+ (u8)rtw89_phy_read32_mask(rtwdev, R_S1_DACKQ7, B_S1_DACKQ7_K);
+ }
+ dack->biask_d[1][0] =
+ (u16)rtw89_phy_read32_mask(rtwdev, R_S1_DACKI2, B_S1_DACKI2_K);
+ dack->biask_d[1][1] =
+ (u16)rtw89_phy_read32_mask(rtwdev, R_S1_DACKQ2, B_S1_DACKQ2_K);
+ dack->dadck_d[1][0] =
+ (u8)rtw89_phy_read32_mask(rtwdev, R_S1_DACKI8, B_S1_DACKI8_K) - 8;
+ dack->dadck_d[1][1] =
+ (u8)rtw89_phy_read32_mask(rtwdev, R_S1_DACKQ8, B_S1_DACKQ8_K) - 8;
+}
+
+static void _dack_reload_by_path(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path path, u8 index)
+{
+ struct rtw89_dack_info *dack = &rtwdev->dack;
+ u32 tmp = 0, tmp_offset, tmp_reg;
+ u8 i;
+ u32 idx_offset, path_offset;
+
+ if (index == 0)
+ idx_offset = 0;
+ else
+ idx_offset = 0x50;
+
+ if (path == RF_PATH_A)
+ path_offset = 0;
+ else
+ path_offset = 0x2000;
+
+ tmp_offset = idx_offset + path_offset;
+ /* msbk_d: 15/14/13/12 */
+ tmp = 0x0;
+ for (i = 0; i < RTW89_DACK_MSBK_NR / 4; i++)
+ tmp |= dack->msbk_d[path][index][i + 12] << (i * 8);
+ tmp_reg = 0x5e14 + tmp_offset;
+ rtw89_phy_write32(rtwdev, tmp_reg, tmp);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", tmp_reg,
+ rtw89_phy_read32_mask(rtwdev, tmp_reg, MASKDWORD));
+ /* msbk_d: 11/10/9/8 */
+ tmp = 0x0;
+ for (i = 0; i < RTW89_DACK_MSBK_NR / 4; i++)
+ tmp |= dack->msbk_d[path][index][i + 8] << (i * 8);
+ tmp_reg = 0x5e18 + tmp_offset;
+ rtw89_phy_write32(rtwdev, tmp_reg, tmp);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", tmp_reg,
+ rtw89_phy_read32_mask(rtwdev, tmp_reg, MASKDWORD));
+ /* msbk_d: 7/6/5/4 */
+ tmp = 0x0;
+ for (i = 0; i < RTW89_DACK_MSBK_NR / 4; i++)
+ tmp |= dack->msbk_d[path][index][i + 4] << (i * 8);
+ tmp_reg = 0x5e1c + tmp_offset;
+ rtw89_phy_write32(rtwdev, tmp_reg, tmp);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", tmp_reg,
+ rtw89_phy_read32_mask(rtwdev, tmp_reg, MASKDWORD));
+ /* msbk_d: 3/2/1/0 */
+ tmp = 0x0;
+ for (i = 0; i < RTW89_DACK_MSBK_NR / 4; i++)
+ tmp |= dack->msbk_d[path][index][i] << (i * 8);
+ tmp_reg = 0x5e20 + tmp_offset;
+ rtw89_phy_write32(rtwdev, tmp_reg, tmp);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", tmp_reg,
+ rtw89_phy_read32_mask(rtwdev, tmp_reg, MASKDWORD));
+ /* dadak_d/biask_d */
+ tmp = 0x0;
+ tmp = (dack->biask_d[path][index] << 22) |
+ (dack->dadck_d[path][index] << 14);
+ tmp_reg = 0x5e24 + tmp_offset;
+ rtw89_phy_write32(rtwdev, tmp_reg, tmp);
+}
+
+static void _dack_reload(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
+{
+ u8 i;
+
+ for (i = 0; i < 2; i++)
+ _dack_reload_by_path(rtwdev, path, i);
+
+ rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
+ &rtw8852a_rfk_dack_reload_defs_a_tbl,
+ &rtw8852a_rfk_dack_reload_defs_b_tbl);
+}
+
+#define ADDC_T_AVG 100
+static void _check_addc(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
+{
+ s32 dc_re = 0, dc_im = 0;
+ u32 tmp;
+ u32 i;
+
+ rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
+ &rtw8852a_rfk_check_addc_defs_a_tbl,
+ &rtw8852a_rfk_check_addc_defs_b_tbl);
+
+ for (i = 0; i < ADDC_T_AVG; i++) {
+ tmp = rtw89_phy_read32_mask(rtwdev, R_DBG32_D, MASKDWORD);
+ dc_re += sign_extend32(FIELD_GET(0xfff000, tmp), 11);
+ dc_im += sign_extend32(FIELD_GET(0xfff, tmp), 11);
+ }
+
+ dc_re /= ADDC_T_AVG;
+ dc_im /= ADDC_T_AVG;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DACK]S%d,dc_re = 0x%x,dc_im =0x%x\n", path, dc_re, dc_im);
+}
+
+static void _addck(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_dack_info *dack = &rtwdev->dack;
+ u32 val;
+ int ret;
+
+ /* S0 */
+ rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_addck_reset_defs_a_tbl);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]before S0 ADDCK\n");
+ _check_addc(rtwdev, RF_PATH_A);
+
+ rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_addck_trigger_defs_a_tbl);
+
+ ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
+ false, rtwdev, 0x1e00, BIT(0));
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 ADDCK timeout\n");
+ dack->addck_timeout[0] = true;
+ }
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]ADDCK ret = %d\n", ret);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S0 ADDCK\n");
+ _check_addc(rtwdev, RF_PATH_A);
+
+ rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_addck_restore_defs_a_tbl);
+
+ /* S1 */
+ rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_addck_reset_defs_b_tbl);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]before S1 ADDCK\n");
+ _check_addc(rtwdev, RF_PATH_B);
+
+ rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_addck_trigger_defs_b_tbl);
+
+ ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
+ false, rtwdev, 0x3e00, BIT(0));
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 ADDCK timeout\n");
+ dack->addck_timeout[1] = true;
+ }
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]ADDCK ret = %d\n", ret);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S1 ADDCK\n");
+ _check_addc(rtwdev, RF_PATH_B);
+
+ rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_addck_restore_defs_b_tbl);
+}
+
+static void _check_dadc(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
+{
+ rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
+ &rtw8852a_rfk_check_dadc_defs_f_a_tbl,
+ &rtw8852a_rfk_check_dadc_defs_f_b_tbl);
+
+ _check_addc(rtwdev, path);
+
+ rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
+ &rtw8852a_rfk_check_dadc_defs_r_a_tbl,
+ &rtw8852a_rfk_check_dadc_defs_r_b_tbl);
+}
+
+static void _dack_s0(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_dack_info *dack = &rtwdev->dack;
+ u32 val;
+ int ret;
+
+ rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dack_defs_f_a_tbl);
+
+ ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
+ false, rtwdev, 0x5e28, BIT(15));
+ ret |= read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
+ false, rtwdev, 0x5e78, BIT(15));
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK timeout\n");
+ dack->msbk_timeout[0] = true;
+ }
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK ret = %d\n", ret);
+
+ rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dack_defs_m_a_tbl);
+
+ ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
+ false, rtwdev, 0x5e48, BIT(17));
+ ret |= read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
+ false, rtwdev, 0x5e98, BIT(17));
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 DADACK timeout\n");
+ dack->dadck_timeout[0] = true;
+ }
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK ret = %d\n", ret);
+
+ rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dack_defs_r_a_tbl);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S0 DADCK\n");
+ _check_dadc(rtwdev, RF_PATH_A);
+
+ _dack_backup_s0(rtwdev);
+ _dack_reload(rtwdev, RF_PATH_A);
+
+ rtw89_phy_write32_clr(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG);
+}
+
+static void _dack_s1(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_dack_info *dack = &rtwdev->dack;
+ u32 val;
+ int ret;
+
+ rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dack_defs_f_b_tbl);
+
+ ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
+ false, rtwdev, 0x7e28, BIT(15));
+ ret |= read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
+ false, rtwdev, 0x7e78, BIT(15));
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK timeout\n");
+ dack->msbk_timeout[1] = true;
+ }
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK ret = %d\n", ret);
+
+ rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dack_defs_m_b_tbl);
+
+ ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
+ false, rtwdev, 0x7e48, BIT(17));
+ ret |= read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, 1, 10000,
+ false, rtwdev, 0x7e98, BIT(17));
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 DADCK timeout\n");
+ dack->dadck_timeout[1] = true;
+ }
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK ret = %d\n", ret);
+
+ rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dack_defs_r_b_tbl);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]after S1 DADCK\n");
+ _check_dadc(rtwdev, RF_PATH_B);
+
+ _dack_backup_s1(rtwdev);
+ _dack_reload(rtwdev, RF_PATH_B);
+
+ rtw89_phy_write32_clr(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON);
+}
+
+static void _dack(struct rtw89_dev *rtwdev)
+{
+ _dack_s0(rtwdev);
+ _dack_s1(rtwdev);
+}
+
+static void _dac_cal(struct rtw89_dev *rtwdev, bool force)
+{
+ struct rtw89_dack_info *dack = &rtwdev->dack;
+ u32 rf0_0, rf1_0;
+ u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, RF_AB);
+
+ dack->dack_done = false;
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK b\n");
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK start!!!\n");
+ rf0_0 = rtw89_read_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK);
+ rf1_0 = rtw89_read_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK);
+ _afe_init(rtwdev);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RR_RSV1_RST, 0x0);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RR_RSV1_RST, 0x0);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK, 0x30001);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK, 0x30001);
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_ONESHOT_START);
+ _addck(rtwdev);
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_ONESHOT_STOP);
+ _addck_backup(rtwdev);
+ _addck_reload(rtwdev);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK, 0x40001);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK, 0x40001);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_MODOPT, RFREG_MASK, 0x0);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_MODOPT, RFREG_MASK, 0x0);
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_ONESHOT_START);
+ _dack(rtwdev);
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_ONESHOT_STOP);
+ _dack_dump(rtwdev);
+ dack->dack_done = true;
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK, rf0_0);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK, rf1_0);
+ rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RR_RSV1_RST, 0x1);
+ rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RR_RSV1_RST, 0x1);
+ dack->dack_cnt++;
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK finish!!!\n");
+}
+
+#define RTW8852A_NCTL_VER 0xd
+#define RTW8852A_IQK_VER 0x2a
+#define RTW8852A_IQK_SS 2
+#define RTW8852A_IQK_THR_REK 8
+#define RTW8852A_IQK_CFIR_GROUP_NR 4
+
+enum rtw8852a_iqk_type {
+ ID_TXAGC,
+ ID_FLOK_COARSE,
+ ID_FLOK_FINE,
+ ID_TXK,
+ ID_RXAGC,
+ ID_RXK,
+ ID_NBTXK,
+ ID_NBRXK,
+};
+
+static void _iqk_read_fft_dbcc0(struct rtw89_dev *rtwdev, u8 path)
+{
+ u8 i = 0x0;
+ u32 fft[6] = {0x0};
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x00160000);
+ fft[0] = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x00170000);
+ fft[1] = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x00180000);
+ fft[2] = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x00190000);
+ fft[3] = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x001a0000);
+ fft[4] = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x001b0000);
+ fft[5] = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD);
+ for (i = 0; i < 6; i++)
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x,fft[%x]= %x\n",
+ path, i, fft[i]);
+}
+
+static void _iqk_read_xym_dbcc0(struct rtw89_dev *rtwdev, u8 path)
+{
+ u8 i = 0x0;
+ u32 tmp = 0x0;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, B_NCTL_CFG_SPAGE, path);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DIF, B_IQK_DIF_TRX, 0x1);
+
+ for (i = 0x0; i < 0x18; i++) {
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_N2, MASKDWORD, 0x000000c0 + i);
+ rtw89_phy_write32_clr(rtwdev, R_NCTL_N2, MASKDWORD);
+ tmp = rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x8%lx38 = %x\n",
+ path, BIT(path), tmp);
+ udelay(1);
+ }
+ rtw89_phy_write32_clr(rtwdev, R_IQK_DIF, B_IQK_DIF_TRX);
+ rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD, 0x40000000);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_N2, MASKDWORD, 0x80010100);
+ udelay(1);
+}
+
+static void _iqk_read_txcfir_dbcc0(struct rtw89_dev *rtwdev, u8 path,
+ u8 group)
+{
+ static const u32 base_addrs[RTW8852A_IQK_SS][RTW8852A_IQK_CFIR_GROUP_NR] = {
+ {0x8f20, 0x8f54, 0x8f88, 0x8fbc},
+ {0x9320, 0x9354, 0x9388, 0x93bc},
+ };
+ u8 idx = 0x0;
+ u32 tmp = 0x0;
+ u32 base_addr;
+
+ if (path >= RTW8852A_IQK_SS) {
+ rtw89_warn(rtwdev, "cfir path %d out of range\n", path);
+ return;
+ }
+ if (group >= RTW8852A_IQK_CFIR_GROUP_NR) {
+ rtw89_warn(rtwdev, "cfir group %d out of range\n", group);
+ return;
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
+ rtw89_phy_write32_mask(rtwdev, R_W_COEF + (path << 8), MASKDWORD, 0x00000001);
+
+ base_addr = base_addrs[path][group];
+
+ for (idx = 0; idx < 0x0d; idx++) {
+ tmp = rtw89_phy_read32_mask(rtwdev, base_addr + (idx << 2), MASKDWORD);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK] %x = %x\n",
+ base_addr + (idx << 2), tmp);
+ }
+
+ if (path == 0x0) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]\n");
+ tmp = rtw89_phy_read32_mask(rtwdev, R_TXCFIR_P0C0, MASKDWORD);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x8f50 = %x\n", tmp);
+ tmp = rtw89_phy_read32_mask(rtwdev, R_TXCFIR_P0C1, MASKDWORD);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x8f84 = %x\n", tmp);
+ tmp = rtw89_phy_read32_mask(rtwdev, R_TXCFIR_P0C2, MASKDWORD);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x8fb8 = %x\n", tmp);
+ tmp = rtw89_phy_read32_mask(rtwdev, R_TXCFIR_P0C3, MASKDWORD);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x8fec = %x\n", tmp);
+ } else {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]\n");
+ tmp = rtw89_phy_read32_mask(rtwdev, R_TXCFIR_P1C0, MASKDWORD);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x9350 = %x\n", tmp);
+ tmp = rtw89_phy_read32_mask(rtwdev, R_TXCFIR_P1C1, MASKDWORD);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x9384 = %x\n", tmp);
+ tmp = rtw89_phy_read32_mask(rtwdev, R_TXCFIR_P1C2, MASKDWORD);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x93b8 = %x\n", tmp);
+ tmp = rtw89_phy_read32_mask(rtwdev, R_TXCFIR_P1C3, MASKDWORD);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x93ec = %x\n", tmp);
+ }
+ rtw89_phy_write32_clr(rtwdev, R_W_COEF + (path << 8), MASKDWORD);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_RPT + (path << 8), B_KIP_RPT_SEL, 0xc);
+ udelay(1);
+ tmp = rtw89_phy_read32_mask(rtwdev, R_RPT_PER + (path << 8), MASKDWORD);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x8%lxfc = %x\n", path,
+ BIT(path), tmp);
+}
+
+static void _iqk_read_rxcfir_dbcc0(struct rtw89_dev *rtwdev, u8 path,
+ u8 group)
+{
+ static const u32 base_addrs[RTW8852A_IQK_SS][RTW8852A_IQK_CFIR_GROUP_NR] = {
+ {0x8d00, 0x8d44, 0x8d88, 0x8dcc},
+ {0x9100, 0x9144, 0x9188, 0x91cc},
+ };
+ u8 idx = 0x0;
+ u32 tmp = 0x0;
+ u32 base_addr;
+
+ if (path >= RTW8852A_IQK_SS) {
+ rtw89_warn(rtwdev, "cfir path %d out of range\n", path);
+ return;
+ }
+ if (group >= RTW8852A_IQK_CFIR_GROUP_NR) {
+ rtw89_warn(rtwdev, "cfir group %d out of range\n", group);
+ return;
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
+ rtw89_phy_write32_mask(rtwdev, R_W_COEF + (path << 8), MASKDWORD, 0x00000001);
+
+ base_addr = base_addrs[path][group];
+ for (idx = 0; idx < 0x10; idx++) {
+ tmp = rtw89_phy_read32_mask(rtwdev, base_addr + (idx << 2), MASKDWORD);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]%x = %x\n",
+ base_addr + (idx << 2), tmp);
+ }
+
+ if (path == 0x0) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]\n");
+ tmp = rtw89_phy_read32_mask(rtwdev, R_RXCFIR_P0C0, MASKDWORD);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x8d40 = %x\n", tmp);
+ tmp = rtw89_phy_read32_mask(rtwdev, R_RXCFIR_P0C1, MASKDWORD);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x8d84 = %x\n", tmp);
+ tmp = rtw89_phy_read32_mask(rtwdev, R_RXCFIR_P0C2, MASKDWORD);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x8dc8 = %x\n", tmp);
+ tmp = rtw89_phy_read32_mask(rtwdev, R_RXCFIR_P0C3, MASKDWORD);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x8e0c = %x\n", tmp);
+ } else {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]\n");
+ tmp = rtw89_phy_read32_mask(rtwdev, R_RXCFIR_P1C0, MASKDWORD);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x9140 = %x\n", tmp);
+ tmp = rtw89_phy_read32_mask(rtwdev, R_RXCFIR_P1C1, MASKDWORD);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x9184 = %x\n", tmp);
+ tmp = rtw89_phy_read32_mask(rtwdev, R_RXCFIR_P1C2, MASKDWORD);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x91c8 = %x\n", tmp);
+ tmp = rtw89_phy_read32_mask(rtwdev, R_RXCFIR_P1C3, MASKDWORD);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] 0x920c = %x\n", tmp);
+ }
+ rtw89_phy_write32_clr(rtwdev, R_W_COEF + (path << 8), MASKDWORD);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_RPT + (path << 8), B_KIP_RPT_SEL, 0xd);
+ tmp = rtw89_phy_read32_mask(rtwdev, R_RPT_PER + (path << 8), MASKDWORD);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x8%lxfc = %x\n", path,
+ BIT(path), tmp);
+}
+
+static void _iqk_sram(struct rtw89_dev *rtwdev, u8 path)
+{
+ u32 tmp = 0x0;
+ u32 i = 0x0;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x00020000);
+ rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX2, MASKDWORD, 0x00000080);
+ rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX, MASKDWORD, 0x00010000);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x009);
+
+ for (i = 0; i <= 0x9f; i++) {
+ rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX, MASKDWORD, 0x00010000 + i);
+ tmp = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]0x%x\n", tmp);
+ }
+
+ for (i = 0; i <= 0x9f; i++) {
+ rtw89_phy_write32_mask(rtwdev, R_SRAM_IQRX, MASKDWORD, 0x00010000 + i);
+ tmp = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCQ);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]0x%x\n", tmp);
+ }
+ rtw89_phy_write32_clr(rtwdev, R_SRAM_IQRX2, MASKDWORD);
+ rtw89_phy_write32_clr(rtwdev, R_SRAM_IQRX, MASKDWORD);
+}
+
+static void _iqk_rxk_setting(struct rtw89_dev *rtwdev, u8 path)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ u32 tmp = 0x0;
+
+ rtw89_phy_write32_set(rtwdev, R_P0_NRBW + (path << 13), B_P0_NRBW_DBG);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x3);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0xa041);
+ udelay(1);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H2, 0x3);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, 0x0);
+ udelay(1);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H2, 0x0);
+ udelay(1);
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RST, 0x0303);
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RST, 0x0000);
+
+ switch (iqk_info->iqk_band[path]) {
+ case RTW89_BAND_2G:
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RXK2);
+ rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL2G, 0x1);
+ break;
+ case RTW89_BAND_5G:
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RXK2);
+ rtw89_write_rf(rtwdev, path, RR_WLSEL, RR_WLSEL_AG, 0x5);
+ rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL5G, 0x1);
+ break;
+ default:
+ break;
+ }
+ tmp = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK);
+ rtw89_write_rf(rtwdev, path, RR_RSV4, RFREG_MASK, tmp);
+ rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_OFF, 0x13);
+ rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x1);
+ fsleep(128);
+}
+
+static bool _iqk_check_cal(struct rtw89_dev *rtwdev, u8 path, u8 ktype)
+{
+ u32 tmp;
+ u32 val;
+ int ret;
+
+ ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55, 1, 8200,
+ false, rtwdev, 0xbff8, MASKBYTE0);
+ if (ret)
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]IQK timeout!!!\n");
+ rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, MASKBYTE0);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, ret=%d\n", path, ret);
+ tmp = rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, MASKDWORD);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]S%x, type= %x, 0x8008 = 0x%x\n", path, ktype, tmp);
+
+ return false;
+}
+
+static bool _iqk_one_shot(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx, u8 path, u8 ktype)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ bool fail = false;
+ u32 iqk_cmd = 0x0;
+ u8 phy_map = rtw89_btc_path_phymap(rtwdev, phy_idx, path);
+ u32 addr_rfc_ctl = 0x0;
+
+ if (path == RF_PATH_A)
+ addr_rfc_ctl = 0x5864;
+ else
+ addr_rfc_ctl = 0x7864;
+
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START);
+ switch (ktype) {
+ case ID_TXAGC:
+ iqk_cmd = 0x008 | (1 << (4 + path)) | (path << 1);
+ break;
+ case ID_FLOK_COARSE:
+ rtw89_phy_write32_set(rtwdev, addr_rfc_ctl, 0x20000000);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x009);
+ iqk_cmd = 0x108 | (1 << (4 + path));
+ break;
+ case ID_FLOK_FINE:
+ rtw89_phy_write32_set(rtwdev, addr_rfc_ctl, 0x20000000);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x009);
+ iqk_cmd = 0x208 | (1 << (4 + path));
+ break;
+ case ID_TXK:
+ rtw89_phy_write32_clr(rtwdev, addr_rfc_ctl, 0x20000000);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x025);
+ iqk_cmd = 0x008 | (1 << (path + 4)) |
+ (((0x8 + iqk_info->iqk_bw[path]) & 0xf) << 8);
+ break;
+ case ID_RXAGC:
+ iqk_cmd = 0x508 | (1 << (4 + path)) | (path << 1);
+ break;
+ case ID_RXK:
+ rtw89_phy_write32_set(rtwdev, addr_rfc_ctl, 0x20000000);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x011);
+ iqk_cmd = 0x008 | (1 << (path + 4)) |
+ (((0xb + iqk_info->iqk_bw[path]) & 0xf) << 8);
+ break;
+ case ID_NBTXK:
+ rtw89_phy_write32_clr(rtwdev, addr_rfc_ctl, 0x20000000);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x025);
+ iqk_cmd = 0x308 | (1 << (4 + path));
+ break;
+ case ID_NBRXK:
+ rtw89_phy_write32_set(rtwdev, addr_rfc_ctl, 0x20000000);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x011);
+ iqk_cmd = 0x608 | (1 << (4 + path));
+ break;
+ default:
+ return false;
+ }
+
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, iqk_cmd + 1);
+ rtw89_phy_write32_set(rtwdev, R_DPK_CTL, B_DPK_CTL_EN);
+ udelay(1);
+ fail = _iqk_check_cal(rtwdev, path, ktype);
+ if (iqk_info->iqk_xym_en)
+ _iqk_read_xym_dbcc0(rtwdev, path);
+ if (iqk_info->iqk_fft_en)
+ _iqk_read_fft_dbcc0(rtwdev, path);
+ if (iqk_info->iqk_sram_en)
+ _iqk_sram(rtwdev, path);
+ if (iqk_info->iqk_cfir_en) {
+ if (ktype == ID_TXK) {
+ _iqk_read_txcfir_dbcc0(rtwdev, path, 0x0);
+ _iqk_read_txcfir_dbcc0(rtwdev, path, 0x1);
+ _iqk_read_txcfir_dbcc0(rtwdev, path, 0x2);
+ _iqk_read_txcfir_dbcc0(rtwdev, path, 0x3);
+ } else {
+ _iqk_read_rxcfir_dbcc0(rtwdev, path, 0x0);
+ _iqk_read_rxcfir_dbcc0(rtwdev, path, 0x1);
+ _iqk_read_rxcfir_dbcc0(rtwdev, path, 0x2);
+ _iqk_read_rxcfir_dbcc0(rtwdev, path, 0x3);
+ }
+ }
+
+ rtw89_phy_write32_clr(rtwdev, addr_rfc_ctl, 0x20000000);
+
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP);
+
+ return fail;
+}
+
+static bool _rxk_group_sel(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx, u8 path)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ static const u32 rxgn_a[4] = {0x18C, 0x1A0, 0x28C, 0x2A0};
+ static const u32 attc2_a[4] = {0x0, 0x0, 0x07, 0x30};
+ static const u32 attc1_a[4] = {0x7, 0x5, 0x1, 0x1};
+ static const u32 rxgn_g[4] = {0x1CC, 0x1E0, 0x2CC, 0x2E0};
+ static const u32 attc2_g[4] = {0x0, 0x15, 0x3, 0x1a};
+ static const u32 attc1_g[4] = {0x1, 0x0, 0x1, 0x0};
+ u8 gp = 0x0;
+ bool fail = false;
+ u32 rf0 = 0x0;
+
+ for (gp = 0; gp < 0x4; gp++) {
+ switch (iqk_info->iqk_band[path]) {
+ case RTW89_BAND_2G:
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG, rxgn_g[gp]);
+ rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C2G, attc2_g[gp]);
+ rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C1G, attc1_g[gp]);
+ break;
+ case RTW89_BAND_5G:
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG, rxgn_a[gp]);
+ rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_C2, attc2_a[gp]);
+ rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_C1, attc1_a[gp]);
+ break;
+ default:
+ break;
+ }
+ rtw89_phy_write32_set(rtwdev, R_IQK_CFG, B_IQK_CFG_SET);
+ rf0 = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DIF2, B_IQK_DIF2_RXPI,
+ rf0 | iqk_info->syn1to2);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_COM, MASKDWORD, 0x40010100);
+ rtw89_phy_write32_clr(rtwdev, R_IQK_RES + (path << 8), B_IQK_RES_RXCFIR);
+ rtw89_phy_write32_set(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL);
+ rtw89_phy_write32_clr(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G3);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_GP, gp);
+ rtw89_phy_write32_mask(rtwdev, R_IOQ_IQK_DPK, B_IOQ_IQK_DPK_EN, 0x1);
+ rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP);
+ fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXK);
+ rtw89_phy_write32_mask(rtwdev, R_IQKINF, BIT(16 + gp + path * 4), fail);
+ }
+
+ switch (iqk_info->iqk_band[path]) {
+ case RTW89_BAND_2G:
+ rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL2G, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0);
+ break;
+ case RTW89_BAND_5G:
+ rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL5G, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_WLSEL, RR_WLSEL_AG, 0x0);
+ break;
+ default:
+ break;
+ }
+ iqk_info->nb_rxcfir[path] = 0x40000000;
+ rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8),
+ B_IQK_RES_RXCFIR, 0x5);
+ iqk_info->is_wb_rxiqk[path] = true;
+ return false;
+}
+
+static bool _iqk_nbrxk(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx, u8 path)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ u8 group = 0x0;
+ u32 rf0 = 0x0, tmp = 0x0;
+ u32 idxrxgain_a = 0x1a0;
+ u32 idxattc2_a = 0x00;
+ u32 idxattc1_a = 0x5;
+ u32 idxrxgain_g = 0x1E0;
+ u32 idxattc2_g = 0x15;
+ u32 idxattc1_g = 0x0;
+ bool fail = false;
+
+ switch (iqk_info->iqk_band[path]) {
+ case RTW89_BAND_2G:
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG, idxrxgain_g);
+ rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C2G, idxattc2_g);
+ rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C1G, idxattc1_g);
+ break;
+ case RTW89_BAND_5G:
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXG, idxrxgain_a);
+ rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_C2, idxattc2_a);
+ rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_C1, idxattc1_a);
+ break;
+ default:
+ break;
+ }
+ rtw89_phy_write32_set(rtwdev, R_IQK_CFG, B_IQK_CFG_SET);
+ rf0 = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DIF2, B_IQK_DIF2_RXPI,
+ rf0 | iqk_info->syn1to2);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_COM, MASKDWORD, 0x40010100);
+ rtw89_phy_write32_clr(rtwdev, R_IQK_RES + (path << 8), B_IQK_RES_RXCFIR);
+ rtw89_phy_write32_set(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL);
+ rtw89_phy_write32_clr(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G3);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
+ B_CFIR_LUT_GP, group);
+ rtw89_phy_write32_set(rtwdev, R_IOQ_IQK_DPK, B_IOQ_IQK_DPK_EN);
+ rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP);
+ fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBRXK);
+
+ switch (iqk_info->iqk_band[path]) {
+ case RTW89_BAND_2G:
+ rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL2G, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0);
+ break;
+ case RTW89_BAND_5G:
+ rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL5G, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_WLSEL, RR_WLSEL_AG, 0x0);
+ break;
+ default:
+ break;
+ }
+ if (!fail) {
+ tmp = rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD);
+ iqk_info->nb_rxcfir[path] = tmp | 0x2;
+ } else {
+ iqk_info->nb_rxcfir[path] = 0x40000002;
+ }
+ return fail;
+}
+
+static void _iqk_rxclk_setting(struct rtw89_dev *rtwdev, u8 path)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+
+ if (iqk_info->iqk_bw[path] == RTW89_CHANNEL_WIDTH_80) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS + (path << 8),
+ MASKDWORD, 0x4d000a08);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RXCK + (path << 13),
+ B_P0_RXCK_VAL, 0x2);
+ rtw89_phy_write32_set(rtwdev, R_P0_RXCK + (path << 13), B_P0_RXCK_ON);
+ rtw89_phy_write32_set(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_ON);
+ rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_VAL, 0x1);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS + (path << 8),
+ MASKDWORD, 0x44000a08);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RXCK + (path << 13),
+ B_P0_RXCK_VAL, 0x1);
+ rtw89_phy_write32_set(rtwdev, R_P0_RXCK + (path << 13), B_P0_RXCK_ON);
+ rtw89_phy_write32_set(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_ON);
+ rtw89_phy_write32_clr(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_VAL);
+ }
+}
+
+static bool _txk_group_sel(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx, u8 path)
+{
+ static const u32 a_txgain[4] = {0xE466, 0x646D, 0xE4E2, 0x64ED};
+ static const u32 g_txgain[4] = {0x60e8, 0x60f0, 0x61e8, 0x61ED};
+ static const u32 a_itqt[4] = {0x12, 0x12, 0x12, 0x1b};
+ static const u32 g_itqt[4] = {0x09, 0x12, 0x12, 0x12};
+ static const u32 g_attsmxr[4] = {0x0, 0x1, 0x1, 0x1};
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ bool fail = false;
+ u8 gp = 0x0;
+ u32 tmp = 0x0;
+
+ for (gp = 0x0; gp < 0x4; gp++) {
+ switch (iqk_info->iqk_band[path]) {
+ case RTW89_BAND_2G:
+ rtw89_phy_write32_mask(rtwdev, R_RFGAIN_BND + (path << 8),
+ B_RFGAIN_BND, 0x08);
+ rtw89_write_rf(rtwdev, path, RR_GAINTX, RR_GAINTX_ALL,
+ g_txgain[gp]);
+ rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT1,
+ g_attsmxr[gp]);
+ rtw89_write_rf(rtwdev, path, RR_TXG2, RR_TXG2_ATT0,
+ g_attsmxr[gp]);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
+ MASKDWORD, g_itqt[gp]);
+ break;
+ case RTW89_BAND_5G:
+ rtw89_phy_write32_mask(rtwdev, R_RFGAIN_BND + (path << 8),
+ B_RFGAIN_BND, 0x04);
+ rtw89_write_rf(rtwdev, path, RR_GAINTX, RR_GAINTX_ALL,
+ a_txgain[gp]);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8),
+ MASKDWORD, a_itqt[gp]);
+ break;
+ default:
+ break;
+ }
+ rtw89_phy_write32_clr(rtwdev, R_IQK_RES + (path << 8), B_IQK_RES_TXCFIR);
+ rtw89_phy_write32_set(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL);
+ rtw89_phy_write32_set(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G3);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
+ B_CFIR_LUT_GP, gp);
+ rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP);
+ fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_TXK);
+ rtw89_phy_write32_mask(rtwdev, R_IQKINF, BIT(8 + gp + path * 4), fail);
+ }
+
+ iqk_info->nb_txcfir[path] = 0x40000000;
+ rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8),
+ B_IQK_RES_TXCFIR, 0x5);
+ iqk_info->is_wb_txiqk[path] = true;
+ tmp = rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x8%lx38 = 0x%x\n", path,
+ BIT(path), tmp);
+ return false;
+}
+
+static bool _iqk_nbtxk(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx, u8 path)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ u8 group = 0x2;
+ u32 a_mode_txgain = 0x64e2;
+ u32 g_mode_txgain = 0x61e8;
+ u32 attsmxr = 0x1;
+ u32 itqt = 0x12;
+ u32 tmp = 0x0;
+ bool fail = false;
+
+ switch (iqk_info->iqk_band[path]) {
+ case RTW89_BAND_2G:
+ rtw89_phy_write32_mask(rtwdev, R_RFGAIN_BND + (path << 8),
+ B_RFGAIN_BND, 0x08);
+ rtw89_write_rf(rtwdev, path, RR_GAINTX, RR_GAINTX_ALL, g_mode_txgain);
+ rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT1, attsmxr);
+ rtw89_write_rf(rtwdev, path, RR_TXG2, RR_TXG2_ATT0, attsmxr);
+ break;
+ case RTW89_BAND_5G:
+ rtw89_phy_write32_mask(rtwdev, R_RFGAIN_BND + (path << 8),
+ B_RFGAIN_BND, 0x04);
+ rtw89_write_rf(rtwdev, path, RR_GAINTX, RR_GAINTX_ALL, a_mode_txgain);
+ break;
+ default:
+ break;
+ }
+ rtw89_phy_write32_clr(rtwdev, R_IQK_RES + (path << 8), B_IQK_RES_TXCFIR);
+ rtw89_phy_write32_set(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL);
+ rtw89_phy_write32_set(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G3);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_GP, group);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), MASKDWORD, itqt);
+ rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP);
+ fail = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK);
+ if (!fail) {
+ tmp = rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD);
+ iqk_info->nb_txcfir[path] = tmp | 0x2;
+ } else {
+ iqk_info->nb_txcfir[path] = 0x40000002;
+ }
+ tmp = rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x8%lx38 = 0x%x\n", path,
+ BIT(path), tmp);
+ return fail;
+}
+
+static void _lok_res_table(struct rtw89_dev *rtwdev, u8 path, u8 ibias)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, ibias = %x\n", path, ibias);
+ rtw89_write_rf(rtwdev, path, RR_LUTWE, RFREG_MASK, 0x2);
+ if (iqk_info->iqk_band[path] == RTW89_BAND_2G)
+ rtw89_write_rf(rtwdev, path, RR_LUTWA, RFREG_MASK, 0x0);
+ else
+ rtw89_write_rf(rtwdev, path, RR_LUTWA, RFREG_MASK, 0x1);
+ rtw89_write_rf(rtwdev, path, RR_LUTWD0, RFREG_MASK, ibias);
+ rtw89_write_rf(rtwdev, path, RR_LUTWE, RFREG_MASK, 0x0);
+}
+
+static bool _lok_finetune_check(struct rtw89_dev *rtwdev, u8 path)
+{
+ bool is_fail = false;
+ u32 tmp = 0x0;
+ u32 core_i = 0x0;
+ u32 core_q = 0x0;
+
+ tmp = rtw89_read_rf(rtwdev, path, RR_TXMO, RFREG_MASK);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK][FineLOK] S%x, 0x58 = 0x%x\n",
+ path, tmp);
+ core_i = FIELD_GET(RR_TXMO_COI, tmp);
+ core_q = FIELD_GET(RR_TXMO_COQ, tmp);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, i = 0x%x\n", path, core_i);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, q = 0x%x\n", path, core_q);
+
+ if (core_i < 0x2 || core_i > 0x1d || core_q < 0x2 || core_q > 0x1d)
+ is_fail = true;
+ return is_fail;
+}
+
+static bool _iqk_lok(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx, u8 path)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ u32 rf0 = 0x0;
+ u8 itqt = 0x12;
+ bool fail = false;
+ bool tmp = false;
+
+ switch (iqk_info->iqk_band[path]) {
+ case RTW89_BAND_2G:
+ rtw89_write_rf(rtwdev, path, RR_GAINTX, RR_GAINTX_ALL, 0xe5e0);
+ itqt = 0x09;
+ break;
+ case RTW89_BAND_5G:
+ rtw89_write_rf(rtwdev, path, RR_GAINTX, RR_GAINTX_ALL, 0xe4e0);
+ itqt = 0x12;
+ break;
+ default:
+ break;
+ }
+ rtw89_phy_write32_set(rtwdev, R_IQK_CFG, B_IQK_CFG_SET);
+ rf0 = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK);
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DIF1, B_IQK_DIF1_TXPI,
+ rf0 | iqk_info->syn1to2);
+ rtw89_phy_write32_clr(rtwdev, R_IQK_RES + (path << 8), B_IQK_RES_TXCFIR);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_G3, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_GP, 0x0);
+ rtw89_phy_write32_set(rtwdev, R_IOQ_IQK_DPK, B_IOQ_IQK_DPK_EN);
+ rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), MASKDWORD, itqt);
+ tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_COARSE);
+ iqk_info->lok_cor_fail[0][path] = tmp;
+ fsleep(10);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), MASKDWORD, itqt);
+ tmp = _iqk_one_shot(rtwdev, phy_idx, path, ID_FLOK_FINE);
+ iqk_info->lok_fin_fail[0][path] = tmp;
+ fail = _lok_finetune_check(rtwdev, path);
+ return fail;
+}
+
+static void _iqk_txk_setting(struct rtw89_dev *rtwdev, u8 path)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+
+ rtw89_phy_write32_set(rtwdev, R_P0_NRBW + (path << 13), B_P0_NRBW_DBG);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x1f);
+ udelay(1);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x13);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0x0001);
+ udelay(1);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0x0041);
+ udelay(1);
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RST, 0x0303);
+ rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RST, 0x0000);
+ switch (iqk_info->iqk_band[path]) {
+ case RTW89_BAND_2G:
+ rtw89_write_rf(rtwdev, path, RR_XALNA2, RR_XALNA2_SW, 0x00);
+ rtw89_write_rf(rtwdev, path, RR_RCKD, RR_RCKD_POW, 0x3f);
+ rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT2, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT1, 0x1);
+ rtw89_write_rf(rtwdev, path, RR_TXG2, RR_TXG2_ATT0, 0x1);
+ rtw89_write_rf(rtwdev, path, RR_TXGA, RR_TXGA_LOK_EN, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x1);
+ rtw89_write_rf(rtwdev, path, RR_LUTDBG, RR_LUTDBG_LOK, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_LUTWA, RR_LUTWA_MASK, 0x000);
+ rtw89_write_rf(rtwdev, path, RR_RSV2, RFREG_MASK, 0x80200);
+ rtw89_write_rf(rtwdev, path, RR_DTXLOK, RFREG_MASK, 0x80200);
+ rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK,
+ 0x403e0 | iqk_info->syn1to2);
+ udelay(1);
+ break;
+ case RTW89_BAND_5G:
+ rtw89_write_rf(rtwdev, path, RR_XGLNA2, RR_XGLNA2_SW, 0x00);
+ rtw89_write_rf(rtwdev, path, RR_RCKD, RR_RCKD_POW, 0x3f);
+ rtw89_write_rf(rtwdev, path, RR_BIASA, RR_BIASA_A, 0x7);
+ rtw89_write_rf(rtwdev, path, RR_TXGA, RR_TXGA_LOK_EN, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x1);
+ rtw89_write_rf(rtwdev, path, RR_LUTDBG, RR_LUTDBG_LOK, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_LUTWA, RR_LUTWA_MASK, 0x100);
+ rtw89_write_rf(rtwdev, path, RR_RSV2, RFREG_MASK, 0x80200);
+ rtw89_write_rf(rtwdev, path, RR_DTXLOK, RFREG_MASK, 0x80200);
+ rtw89_write_rf(rtwdev, path, RR_LUTWD0, RFREG_MASK, 0x1);
+ rtw89_write_rf(rtwdev, path, RR_LUTWD0, RFREG_MASK, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK,
+ 0x403e0 | iqk_info->syn1to2);
+ udelay(1);
+ break;
+ default:
+ break;
+ }
+}
+
+static void _iqk_txclk_setting(struct rtw89_dev *rtwdev, u8 path)
+{
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS + (path << 8), MASKDWORD, 0xce000a08);
+}
+
+static void _iqk_info_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ u8 path)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ u32 tmp = 0x0;
+ bool flag = 0x0;
+
+ iqk_info->thermal[path] =
+ ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
+ iqk_info->thermal_rek_en = false;
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_thermal = %d\n", path,
+ iqk_info->thermal[path]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_LOK_COR_fail= %d\n", path,
+ iqk_info->lok_cor_fail[0][path]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_LOK_FIN_fail= %d\n", path,
+ iqk_info->lok_fin_fail[0][path]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_TXIQK_fail = %d\n", path,
+ iqk_info->iqk_tx_fail[0][path]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%d_RXIQK_fail= %d,\n", path,
+ iqk_info->iqk_rx_fail[0][path]);
+ flag = iqk_info->lok_cor_fail[0][path];
+ rtw89_phy_write32_mask(rtwdev, R_IQKINF, BIT(0) << (path * 4), flag);
+ flag = iqk_info->lok_fin_fail[0][path];
+ rtw89_phy_write32_mask(rtwdev, R_IQKINF, BIT(1) << (path * 4), flag);
+ flag = iqk_info->iqk_tx_fail[0][path];
+ rtw89_phy_write32_mask(rtwdev, R_IQKINF, BIT(2) << (path * 4), flag);
+ flag = iqk_info->iqk_rx_fail[0][path];
+ rtw89_phy_write32_mask(rtwdev, R_IQKINF, BIT(3) << (path * 4), flag);
+
+ tmp = rtw89_phy_read32_mask(rtwdev, R_IQK_RES + (path << 8), MASKDWORD);
+ iqk_info->bp_iqkenable[path] = tmp;
+ tmp = rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD);
+ iqk_info->bp_txkresult[path] = tmp;
+ tmp = rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD);
+ iqk_info->bp_rxkresult[path] = tmp;
+
+ rtw89_phy_write32_mask(rtwdev, R_IQKINF2, B_IQKINF2_KCNT,
+ (u8)iqk_info->iqk_times);
+
+ tmp = rtw89_phy_read32_mask(rtwdev, R_IQKINF, 0x0000000f << (path * 4));
+ if (tmp != 0x0)
+ iqk_info->iqk_fail_cnt++;
+ rtw89_phy_write32_mask(rtwdev, R_IQKINF2, 0x00ff0000 << (path * 4),
+ iqk_info->iqk_fail_cnt);
+}
+
+static
+void _iqk_by_path(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ bool lok_is_fail = false;
+ u8 ibias = 0x1;
+ u8 i = 0;
+
+ _iqk_txclk_setting(rtwdev, path);
+
+ for (i = 0; i < 3; i++) {
+ _lok_res_table(rtwdev, path, ibias++);
+ _iqk_txk_setting(rtwdev, path);
+ lok_is_fail = _iqk_lok(rtwdev, phy_idx, path);
+ if (!lok_is_fail)
+ break;
+ }
+ if (iqk_info->is_nbiqk)
+ iqk_info->iqk_tx_fail[0][path] = _iqk_nbtxk(rtwdev, phy_idx, path);
+ else
+ iqk_info->iqk_tx_fail[0][path] = _txk_group_sel(rtwdev, phy_idx, path);
+
+ _iqk_rxclk_setting(rtwdev, path);
+ _iqk_rxk_setting(rtwdev, path);
+ if (iqk_info->is_nbiqk || rtwdev->dbcc_en || iqk_info->iqk_band[path] == RTW89_BAND_2G)
+ iqk_info->iqk_rx_fail[0][path] = _iqk_nbrxk(rtwdev, phy_idx, path);
+ else
+ iqk_info->iqk_rx_fail[0][path] = _rxk_group_sel(rtwdev, phy_idx, path);
+
+ _iqk_info_iqk(rtwdev, phy_idx, path);
+}
+
+static void _iqk_get_ch_info(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy, u8 path)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ struct rtw89_hal *hal = &rtwdev->hal;
+ u32 reg_rf18 = 0x0, reg_35c = 0x0;
+ u8 idx = 0;
+ u8 get_empty_table = false;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
+ for (idx = 0; idx < RTW89_IQK_CHS_NR; idx++) {
+ if (iqk_info->iqk_mcc_ch[idx][path] == 0) {
+ get_empty_table = true;
+ break;
+ }
+ }
+ if (!get_empty_table) {
+ idx = iqk_info->iqk_table_idx[path] + 1;
+ if (idx > RTW89_IQK_CHS_NR - 1)
+ idx = 0;
+ }
+ reg_rf18 = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]cfg ch = %d\n", reg_rf18);
+ reg_35c = rtw89_phy_read32_mask(rtwdev, 0x35c, 0x00000c00);
+
+ iqk_info->iqk_band[path] = hal->current_band_type;
+ iqk_info->iqk_bw[path] = hal->current_band_width;
+ iqk_info->iqk_ch[path] = hal->current_channel;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]iqk_info->iqk_band[%x] = 0x%x\n", path,
+ iqk_info->iqk_band[path]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]iqk_info->iqk_bw[%x] = 0x%x\n",
+ path, iqk_info->iqk_bw[path]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]iqk_info->iqk_ch[%x] = 0x%x\n",
+ path, iqk_info->iqk_ch[path]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]S%d (PHY%d): / DBCC %s/ %s/ CH%d/ %s\n", path, phy,
+ rtwdev->dbcc_en ? "on" : "off",
+ iqk_info->iqk_band[path] == 0 ? "2G" :
+ iqk_info->iqk_band[path] == 1 ? "5G" : "6G",
+ iqk_info->iqk_ch[path],
+ iqk_info->iqk_bw[path] == 0 ? "20M" :
+ iqk_info->iqk_bw[path] == 1 ? "40M" : "80M");
+ if (reg_35c == 0x01)
+ iqk_info->syn1to2 = 0x1;
+ else
+ iqk_info->syn1to2 = 0x0;
+
+ rtw89_phy_write32_mask(rtwdev, R_IQKINF, B_IQKINF_VER, RTW8852A_IQK_VER);
+ rtw89_phy_write32_mask(rtwdev, R_IQKCH, 0x000f << (path * 16),
+ (u8)iqk_info->iqk_band[path]);
+ rtw89_phy_write32_mask(rtwdev, R_IQKCH, 0x00f0 << (path * 16),
+ (u8)iqk_info->iqk_bw[path]);
+ rtw89_phy_write32_mask(rtwdev, R_IQKCH, 0xff00 << (path * 16),
+ (u8)iqk_info->iqk_ch[path]);
+
+ rtw89_phy_write32_mask(rtwdev, R_IQKINF2, 0x000000ff, RTW8852A_NCTL_VER);
+}
+
+static void _iqk_start_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ u8 path)
+{
+ _iqk_by_path(rtwdev, phy_idx, path);
+}
+
+static void _iqk_restore(struct rtw89_dev *rtwdev, u8 path)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+
+ rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD,
+ iqk_info->nb_txcfir[path]);
+ rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD,
+ iqk_info->nb_rxcfir[path]);
+ rtw89_phy_write32_clr(rtwdev, R_NCTL_RPT, MASKDWORD);
+ rtw89_phy_write32_clr(rtwdev, R_MDPK_RX_DCK, MASKDWORD);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x80000000);
+ rtw89_phy_write32_clr(rtwdev, R_KPATH_CFG, MASKDWORD);
+ rtw89_phy_write32_clr(rtwdev, R_GAPK, B_GAPK_ADR);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS + (path << 8), MASKDWORD, 0x10010000);
+ rtw89_phy_write32_clr(rtwdev, R_KIP + (path << 8), B_KIP_RFGAIN);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_MAP + (path << 8), MASKDWORD, 0xe4e4e4e4);
+ rtw89_phy_write32_clr(rtwdev, R_CFIR_LUT + (path << 8), B_CFIR_LUT_SEL);
+ rtw89_phy_write32_clr(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW);
+ rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), MASKDWORD, 0x00000002);
+ rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_RCKD, RR_RCKD_POW, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
+ rtw89_write_rf(rtwdev, path, RR_TXRSV, RR_TXRSV_GAPK, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_BIAS, RR_BIAS_GAPK, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x1);
+}
+
+static void _iqk_afebb_restore(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx, u8 path)
+{
+ const struct rtw89_rfk_tbl *tbl;
+
+ switch (_kpath(rtwdev, phy_idx)) {
+ case RF_A:
+ tbl = &rtw8852a_rfk_iqk_restore_defs_dbcc_path0_tbl;
+ break;
+ case RF_B:
+ tbl = &rtw8852a_rfk_iqk_restore_defs_dbcc_path1_tbl;
+ break;
+ default:
+ tbl = &rtw8852a_rfk_iqk_restore_defs_nondbcc_path01_tbl;
+ break;
+ }
+
+ rtw89_rfk_parser(rtwdev, tbl);
+}
+
+static void _iqk_preset(struct rtw89_dev *rtwdev, u8 path)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ u8 idx = iqk_info->iqk_table_idx[path];
+
+ if (rtwdev->dbcc_en) {
+ rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8),
+ B_COEF_SEL_IQC, path & 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
+ B_CFIR_LUT_G2, path & 0x1);
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8),
+ B_COEF_SEL_IQC, idx);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8),
+ B_CFIR_LUT_G2, idx);
+ }
+ rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000080);
+ rtw89_phy_write32_clr(rtwdev, R_NCTL_RW, MASKDWORD);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x81ff010a);
+ rtw89_phy_write32_mask(rtwdev, R_KPATH_CFG, MASKDWORD, 0x00200000);
+ rtw89_phy_write32_mask(rtwdev, R_MDPK_RX_DCK, MASKDWORD, 0x80000000);
+ rtw89_phy_write32_clr(rtwdev, R_LOAD_COEF + (path << 8), MASKDWORD);
+}
+
+static void _iqk_macbb_setting(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy_idx, u8 path)
+{
+ const struct rtw89_rfk_tbl *tbl;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===> %s\n", __func__);
+
+ switch (_kpath(rtwdev, phy_idx)) {
+ case RF_A:
+ tbl = &rtw8852a_rfk_iqk_set_defs_dbcc_path0_tbl;
+ break;
+ case RF_B:
+ tbl = &rtw8852a_rfk_iqk_set_defs_dbcc_path1_tbl;
+ break;
+ default:
+ tbl = &rtw8852a_rfk_iqk_set_defs_nondbcc_path01_tbl;
+ break;
+ }
+
+ rtw89_rfk_parser(rtwdev, tbl);
+}
+
+static void _iqk_dbcc(struct rtw89_dev *rtwdev, u8 path)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ u8 phy_idx = 0x0;
+
+ iqk_info->iqk_times++;
+
+ if (path == 0x0)
+ phy_idx = RTW89_PHY_0;
+ else
+ phy_idx = RTW89_PHY_1;
+
+ _iqk_get_ch_info(rtwdev, phy_idx, path);
+ _iqk_macbb_setting(rtwdev, phy_idx, path);
+ _iqk_preset(rtwdev, path);
+ _iqk_start_iqk(rtwdev, phy_idx, path);
+ _iqk_restore(rtwdev, path);
+ _iqk_afebb_restore(rtwdev, phy_idx, path);
+}
+
+static void _iqk_track(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_iqk_info *iqk = &rtwdev->iqk;
+ u8 path = 0x0;
+ u8 cur_ther;
+
+ if (iqk->iqk_band[0] == RTW89_BAND_2G)
+ return;
+ if (iqk->iqk_bw[0] < RTW89_CHANNEL_WIDTH_80)
+ return;
+
+ /* only check path 0 */
+ for (path = 0; path < 1; path++) {
+ cur_ther = ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
+
+ if (abs(cur_ther - iqk->thermal[path]) > RTW8852A_IQK_THR_REK)
+ iqk->thermal_rek_en = true;
+ else
+ iqk->thermal_rek_en = false;
+ }
+}
+
+static void _rck(struct rtw89_dev *rtwdev, enum rtw89_rf_path path)
+{
+ u32 rf_reg5, rck_val = 0;
+ u32 val;
+ int ret;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] ====== S%d RCK ======\n", path);
+
+ rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK);
+
+ rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] RF0x00 = 0x%x\n",
+ rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK));
+
+ /* RCK trigger */
+ rtw89_write_rf(rtwdev, path, RR_RCKC, RFREG_MASK, 0x00240);
+
+ ret = read_poll_timeout_atomic(rtw89_read_rf, val, val, 2, 20,
+ false, rtwdev, path, 0x1c, BIT(3));
+ if (ret)
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] RCK timeout\n");
+
+ rck_val = rtw89_read_rf(rtwdev, path, RR_RCKC, RR_RCKC_CA);
+ rtw89_write_rf(rtwdev, path, RR_RCKC, RFREG_MASK, rck_val);
+
+ /* RCK_ADC_OFFSET */
+ rtw89_write_rf(rtwdev, path, RR_RCKO, RR_RCKO_OFF, 0x4);
+
+ rtw89_write_rf(rtwdev, path, RR_RFC, RR_RFC_CKEN, 0x1);
+ rtw89_write_rf(rtwdev, path, RR_RFC, RR_RFC_CKEN, 0x0);
+
+ rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RCK] RF 0x1b / 0x1c / 0x1d = 0x%x / 0x%x / 0x%x\n",
+ rtw89_read_rf(rtwdev, path, RR_RCKC, RFREG_MASK),
+ rtw89_read_rf(rtwdev, path, RR_RCKS, RFREG_MASK),
+ rtw89_read_rf(rtwdev, path, RR_RCKO, RFREG_MASK));
+}
+
+static void _iqk_init(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ u8 ch, path;
+
+ rtw89_phy_write32_clr(rtwdev, R_IQKINF, MASKDWORD);
+ if (iqk_info->is_iqk_init)
+ return;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__);
+ iqk_info->is_iqk_init = true;
+ iqk_info->is_nbiqk = false;
+ iqk_info->iqk_fft_en = false;
+ iqk_info->iqk_sram_en = false;
+ iqk_info->iqk_cfir_en = false;
+ iqk_info->iqk_xym_en = false;
+ iqk_info->thermal_rek_en = false;
+ iqk_info->iqk_times = 0x0;
+
+ for (ch = 0; ch < RTW89_IQK_CHS_NR; ch++) {
+ iqk_info->iqk_channel[ch] = 0x0;
+ for (path = 0; path < RTW8852A_IQK_SS; path++) {
+ iqk_info->lok_cor_fail[ch][path] = false;
+ iqk_info->lok_fin_fail[ch][path] = false;
+ iqk_info->iqk_tx_fail[ch][path] = false;
+ iqk_info->iqk_rx_fail[ch][path] = false;
+ iqk_info->iqk_mcc_ch[ch][path] = 0x0;
+ iqk_info->iqk_table_idx[path] = 0x0;
+ }
+ }
+}
+
+static void _doiqk(struct rtw89_dev *rtwdev, bool force,
+ enum rtw89_phy_idx phy_idx, u8 path)
+{
+ struct rtw89_iqk_info *iqk_info = &rtwdev->iqk;
+ u32 backup_bb_val[BACKUP_BB_REGS_NR];
+ u32 backup_rf_val[RTW8852A_IQK_SS][BACKUP_RF_REGS_NR];
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB);
+
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[IQK]==========IQK strat!!!!!==========\n");
+ iqk_info->iqk_times++;
+ iqk_info->kcount = 0;
+ iqk_info->version = RTW8852A_IQK_VER;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]Test Ver 0x%x\n", iqk_info->version);
+ _iqk_get_ch_info(rtwdev, phy_idx, path);
+ _rfk_backup_bb_reg(rtwdev, &backup_bb_val[0]);
+ _rfk_backup_rf_reg(rtwdev, &backup_rf_val[path][0], path);
+ _iqk_macbb_setting(rtwdev, phy_idx, path);
+ _iqk_preset(rtwdev, path);
+ _iqk_start_iqk(rtwdev, phy_idx, path);
+ _iqk_restore(rtwdev, path);
+ _iqk_afebb_restore(rtwdev, phy_idx, path);
+ _rfk_restore_bb_reg(rtwdev, &backup_bb_val[0]);
+ _rfk_restore_rf_reg(rtwdev, &backup_rf_val[path][0], path);
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP);
+}
+
+static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force)
+{
+ switch (_kpath(rtwdev, phy_idx)) {
+ case RF_A:
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_A);
+ break;
+ case RF_B:
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_B);
+ break;
+ case RF_AB:
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_A);
+ _doiqk(rtwdev, force, phy_idx, RF_PATH_B);
+ break;
+ default:
+ break;
+ }
+}
+
+#define RXDCK_VER_8852A 0xe
+
+static void _set_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, bool is_afe)
+{
+ u8 phy_map = rtw89_btc_path_phymap(rtwdev, phy, path);
+ u32 ori_val;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RX_DCK] ==== S%d RX DCK (by %s)====\n",
+ path, is_afe ? "AFE" : "RFC");
+
+ ori_val = rtw89_phy_read32_mask(rtwdev, R_P0_RXCK + (path << 13), MASKDWORD);
+
+ if (is_afe) {
+ rtw89_phy_write32_set(rtwdev, R_P0_NRBW + (path << 13), B_P0_NRBW_DBG);
+ rtw89_phy_write32_set(rtwdev, R_P0_RXCK + (path << 13), B_P0_RXCK_ON);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RXCK + (path << 13),
+ B_P0_RXCK_VAL, 0x3);
+ rtw89_phy_write32_set(rtwdev, R_S0_RXDC2 + (path << 13), B_S0_RXDC2_MEN);
+ rtw89_phy_write32_mask(rtwdev, R_S0_RXDC2 + (path << 13),
+ B_S0_RXDC2_AVG, 0x3);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0x3);
+ rtw89_phy_write32_clr(rtwdev, R_ANAPAR, B_ANAPAR_ADCCLK);
+ rtw89_phy_write32_clr(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST);
+ rtw89_phy_write32_set(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST);
+ rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_CRXBB, 0x1);
+ }
+
+ rtw89_write_rf(rtwdev, path, RR_DCK2, RR_DCK2_CYCLE, 0x3f);
+ rtw89_write_rf(rtwdev, path, RR_DCK1, RR_DCK1_SEL, is_afe);
+
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_ONESHOT_START);
+
+ rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x1);
+
+ fsleep(600);
+
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_ONESHOT_STOP);
+
+ rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x0);
+
+ if (is_afe) {
+ rtw89_phy_write32_clr(rtwdev, R_P0_NRBW + (path << 13), B_P0_NRBW_DBG);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RXCK + (path << 13),
+ MASKDWORD, ori_val);
+ }
+}
+
+static void _rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ bool is_afe)
+{
+ u8 path, kpath, dck_tune;
+ u32 rf_reg5;
+ u32 addr;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[RX_DCK] ****** RXDCK Start (Ver: 0x%x, Cv: %d) ******\n",
+ RXDCK_VER_8852A, rtwdev->hal.cv);
+
+ kpath = _kpath(rtwdev, phy);
+
+ for (path = 0; path < 2; path++) {
+ if (!(kpath & BIT(path)))
+ continue;
+
+ rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK);
+ dck_tune = (u8)rtw89_read_rf(rtwdev, path, RR_DCK, RR_DCK_FINE);
+
+ if (rtwdev->is_tssi_mode[path]) {
+ addr = 0x5818 + (path << 13);
+ /* TSSI pause */
+ rtw89_phy_write32_set(rtwdev, addr, BIT(30));
+ }
+
+ rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_FINE, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
+ _set_rx_dck(rtwdev, phy, path, is_afe);
+ rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_FINE, dck_tune);
+ rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5);
+
+ if (rtwdev->is_tssi_mode[path]) {
+ addr = 0x5818 + (path << 13);
+ /* TSSI resume */
+ rtw89_phy_write32_clr(rtwdev, addr, BIT(30));
+ }
+ }
+}
+
+#define RTW8852A_RF_REL_VERSION 34
+#define RTW8852A_DPK_VER 0x10
+#define RTW8852A_DPK_TH_AVG_NUM 4
+#define RTW8852A_DPK_RF_PATH 2
+#define RTW8852A_DPK_KIP_REG_NUM 2
+
+enum rtw8852a_dpk_id {
+ LBK_RXIQK = 0x06,
+ SYNC = 0x10,
+ MDPK_IDL = 0x11,
+ MDPK_MPA = 0x12,
+ GAIN_LOSS = 0x13,
+ GAIN_CAL = 0x14,
+};
+
+static void _rf_direct_cntrl(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path path, bool is_bybb)
+{
+ if (is_bybb)
+ rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x1);
+ else
+ rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0);
+}
+
+static void _dpk_onoff(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path path, bool off);
+
+static void _dpk_bkup_kip(struct rtw89_dev *rtwdev, u32 *reg,
+ u32 reg_bkup[][RTW8852A_DPK_KIP_REG_NUM],
+ u8 path)
+{
+ u8 i;
+
+ for (i = 0; i < RTW8852A_DPK_KIP_REG_NUM; i++) {
+ reg_bkup[path][i] = rtw89_phy_read32_mask(rtwdev,
+ reg[i] + (path << 8),
+ MASKDWORD);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Backup 0x%x = %x\n",
+ reg[i] + (path << 8), reg_bkup[path][i]);
+ }
+}
+
+static void _dpk_reload_kip(struct rtw89_dev *rtwdev, u32 *reg,
+ u32 reg_bkup[][RTW8852A_DPK_KIP_REG_NUM], u8 path)
+{
+ u8 i;
+
+ for (i = 0; i < RTW8852A_DPK_KIP_REG_NUM; i++) {
+ rtw89_phy_write32_mask(rtwdev, reg[i] + (path << 8),
+ MASKDWORD, reg_bkup[path][i]);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Reload 0x%x = %x\n",
+ reg[i] + (path << 8), reg_bkup[path][i]);
+ }
+}
+
+static u8 _dpk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, enum rtw8852a_dpk_id id)
+{
+ u8 phy_map = rtw89_btc_path_phymap(rtwdev, phy, path);
+ u16 dpk_cmd = 0x0;
+ u32 val;
+ int ret;
+
+ dpk_cmd = (u16)((id << 8) | (0x19 + (path << 4)));
+
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_ONESHOT_START);
+
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, dpk_cmd);
+ rtw89_phy_write32_set(rtwdev, R_DPK_CTL, B_DPK_CTL_EN);
+
+ ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55,
+ 10, 20000, false, rtwdev, 0xbff8, MASKBYTE0);
+
+ rtw89_phy_write32_clr(rtwdev, R_NCTL_N1, MASKBYTE0);
+
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_ONESHOT_STOP);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] one-shot for %s = 0x%x (ret=%d)\n",
+ id == 0x06 ? "LBK_RXIQK" :
+ id == 0x10 ? "SYNC" :
+ id == 0x11 ? "MDPK_IDL" :
+ id == 0x12 ? "MDPK_MPA" :
+ id == 0x13 ? "GAIN_LOSS" : "PWR_CAL",
+ dpk_cmd, ret);
+
+ if (ret) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] one-shot over 20ms!!!!\n");
+ return 1;
+ }
+
+ return 0;
+}
+
+static void _dpk_rx_dck(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ rtw89_write_rf(rtwdev, path, RR_RXBB2, RR_EN_TIA_IDA, 0x3);
+ _set_rx_dck(rtwdev, phy, path, false);
+}
+
+static void _dpk_information(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+ struct rtw89_hal *hal = &rtwdev->hal;
+
+ u8 kidx = dpk->cur_idx[path];
+
+ dpk->bp[path][kidx].band = hal->current_band_type;
+ dpk->bp[path][kidx].ch = hal->current_channel;
+ dpk->bp[path][kidx].bw = hal->current_band_width;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] S%d[%d] (PHY%d): TSSI %s/ DBCC %s/ %s/ CH%d/ %s\n",
+ path, dpk->cur_idx[path], phy,
+ rtwdev->is_tssi_mode[path] ? "on" : "off",
+ rtwdev->dbcc_en ? "on" : "off",
+ dpk->bp[path][kidx].band == 0 ? "2G" :
+ dpk->bp[path][kidx].band == 1 ? "5G" : "6G",
+ dpk->bp[path][kidx].ch,
+ dpk->bp[path][kidx].bw == 0 ? "20M" :
+ dpk->bp[path][kidx].bw == 1 ? "40M" : "80M");
+}
+
+static void _dpk_bb_afe_setting(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, u8 kpath)
+{
+ switch (kpath) {
+ case RF_A:
+ rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_bb_afe_sf_defs_a_tbl);
+
+ if (rtw89_phy_read32_mask(rtwdev, R_2P4G_BAND, B_2P4G_BAND_SEL) == 0x0)
+ rtw89_phy_write32_set(rtwdev, R_RXCCA, B_RXCCA_DIS);
+
+ rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_bb_afe_sr_defs_a_tbl);
+ break;
+ case RF_B:
+ rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_bb_afe_sf_defs_b_tbl);
+
+ if (rtw89_phy_read32_mask(rtwdev, R_2P4G_BAND, B_2P4G_BAND_SEL) == 0x1)
+ rtw89_phy_write32_set(rtwdev, R_RXCCA, B_RXCCA_DIS);
+
+ rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_bb_afe_sr_defs_b_tbl);
+ break;
+ case RF_AB:
+ rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_bb_afe_s_defs_ab_tbl);
+ break;
+ default:
+ break;
+ }
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] Set BB/AFE for PHY%d (kpath=%d)\n", phy, kpath);
+}
+
+static void _dpk_bb_afe_restore(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, u8 kpath)
+{
+ switch (kpath) {
+ case RF_A:
+ rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_bb_afe_r_defs_a_tbl);
+ break;
+ case RF_B:
+ rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_bb_afe_r_defs_b_tbl);
+ break;
+ case RF_AB:
+ rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_bb_afe_r_defs_ab_tbl);
+ break;
+ default:
+ break;
+ }
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] Restore BB/AFE for PHY%d (kpath=%d)\n", phy, kpath);
+}
+
+static void _dpk_tssi_pause(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path path, bool is_pause)
+{
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK + (path << 13),
+ B_P0_TSSI_TRK_EN, is_pause);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d TSSI %s\n", path,
+ is_pause ? "pause" : "resume");
+}
+
+static void _dpk_kip_setting(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path path, u8 kidx)
+{
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000080);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_CLK, MASKDWORD, 0x00093f3f);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x807f030a);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS + (path << 8), MASKDWORD, 0xce000a08);
+ rtw89_phy_write32_mask(rtwdev, R_DPK_CFG, B_DPK_CFG_IDX, 0x2);
+ rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, B_NCTL_CFG_SPAGE, path); /*subpage_id*/
+ rtw89_phy_write32_mask(rtwdev, R_DPD_CH0 + (path << 8) + (kidx << 2),
+ MASKDWORD, 0x003f2e2e);
+ rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
+ MASKDWORD, 0x005b5b5b);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] KIP setting for S%d[%d]!!\n",
+ path, kidx);
+}
+
+static void _dpk_kip_restore(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path path)
+{
+ rtw89_phy_write32_clr(rtwdev, R_NCTL_RPT, MASKDWORD);
+ rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x80000000);
+ rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS + (path << 8), MASKDWORD, 0x10010000);
+ rtw89_phy_write32_clr(rtwdev, R_KIP_CLK, MASKDWORD);
+
+ if (rtwdev->hal.cv > CHIP_CBV)
+ rtw89_phy_write32_mask(rtwdev, R_DPD_COM + (path << 8), BIT(15), 0x1);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d restore KIP\n", path);
+}
+
+static void _dpk_lbk_rxiqk(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ u8 cur_rxbb;
+
+ cur_rxbb = (u8)rtw89_read_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXBB);
+
+ rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_lbk_rxiqk_defs_f_tbl);
+
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc);
+ rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_PLLEN, 0x1);
+ rtw89_write_rf(rtwdev, path, RR_RXPOW, RR_RXPOW_IQK, 0x2);
+ rtw89_write_rf(rtwdev, path, RR_RSV4, RFREG_MASK,
+ rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK));
+ rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_OFF, 0x13);
+ rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x1);
+
+ fsleep(70);
+
+ rtw89_write_rf(rtwdev, path, RR_RXIQGEN, RR_RXIQGEN_ATTL, 0x1f);
+
+ if (cur_rxbb <= 0xa)
+ rtw89_write_rf(rtwdev, path, RR_RXIQGEN, RR_RXIQGEN_ATTH, 0x3);
+ else if (cur_rxbb <= 0x10 && cur_rxbb >= 0xb)
+ rtw89_write_rf(rtwdev, path, RR_RXIQGEN, RR_RXIQGEN_ATTH, 0x1);
+ else
+ rtw89_write_rf(rtwdev, path, RR_RXIQGEN, RR_RXIQGEN_ATTH, 0x0);
+
+ rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x11);
+
+ _dpk_one_shot(rtwdev, phy, path, LBK_RXIQK);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d LBK RXIQC = 0x%x\n", path,
+ rtw89_phy_read32_mask(rtwdev, R_RXIQC, MASKDWORD));
+
+ rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_PLLEN, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_RXPOW, RR_RXPOW_IQK, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0); /*POW IQKPLL*/
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_DPK);
+
+ rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_lbk_rxiqk_defs_r_tbl);
+}
+
+static void _dpk_get_thermal(struct rtw89_dev *rtwdev, u8 kidx,
+ enum rtw89_rf_path path)
+{
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+
+ dpk->bp[path][kidx].ther_dpk =
+ ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] thermal@DPK = 0x%x\n",
+ dpk->bp[path][kidx].ther_dpk);
+}
+
+static u8 _dpk_set_tx_pwr(struct rtw89_dev *rtwdev, u8 gain,
+ enum rtw89_rf_path path)
+{
+ u8 txagc_ori = 0x38;
+
+ rtw89_write_rf(rtwdev, path, RR_MODOPT, RFREG_MASK, txagc_ori);
+
+ return txagc_ori;
+}
+
+static void _dpk_rf_setting(struct rtw89_dev *rtwdev, u8 gain,
+ enum rtw89_rf_path path, u8 kidx)
+{
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+
+ if (dpk->bp[path][kidx].band == RTW89_BAND_2G) {
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_DPK, 0x280b);
+ rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_ATTC, 0x0);
+ rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_ATTR, 0x4);
+ rtw89_write_rf(rtwdev, path, RR_MIXER, RR_MIXER_GN, 0x0);
+ } else {
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_DPK, 0x282e);
+ rtw89_write_rf(rtwdev, path, RR_BIASA2, RR_BIASA2_LB, 0x7);
+ rtw89_write_rf(rtwdev, path, RR_TXATANK, RR_TXATANK_LBSW, 0x3);
+ rtw89_write_rf(rtwdev, path, RR_RXA, RR_RXA_DPK, 0x3);
+ }
+ rtw89_write_rf(rtwdev, path, RR_RCKD, RR_RCKD_BW, 0x1);
+ rtw89_write_rf(rtwdev, path, RR_BTC, RR_BTC_TXBB, dpk->bp[path][kidx].bw + 1);
+ rtw89_write_rf(rtwdev, path, RR_BTC, RR_BTC_RXBB, 0x0);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] RF 0x0/0x1/0x1a = 0x%x/ 0x%x/ 0x%x\n",
+ rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK),
+ rtw89_read_rf(rtwdev, path, RR_MODOPT, RFREG_MASK),
+ rtw89_read_rf(rtwdev, path, RR_BTC, RFREG_MASK));
+}
+
+static void _dpk_manual_txcfir(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path path, bool is_manual)
+{
+ u8 tmp_pad, tmp_txbb;
+
+ if (is_manual) {
+ rtw89_phy_write32_mask(rtwdev, R_KIP + (path << 8), B_KIP_RFGAIN, 0x1);
+ tmp_pad = (u8)rtw89_read_rf(rtwdev, path, RR_GAINTX, RR_GAINTX_PAD);
+ rtw89_phy_write32_mask(rtwdev, R_RFGAIN + (path << 8),
+ B_RFGAIN_PAD, tmp_pad);
+
+ tmp_txbb = (u8)rtw89_read_rf(rtwdev, path, RR_GAINTX, RR_GAINTX_BB);
+ rtw89_phy_write32_mask(rtwdev, R_RFGAIN + (path << 8),
+ B_RFGAIN_TXBB, tmp_txbb);
+
+ rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8),
+ B_LOAD_COEF_CFIR, 0x1);
+ rtw89_phy_write32_clr(rtwdev, R_LOAD_COEF + (path << 8),
+ B_LOAD_COEF_CFIR);
+
+ rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), BIT(1), 0x1);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] PAD_man / TXBB_man = 0x%x / 0x%x\n", tmp_pad,
+ tmp_txbb);
+ } else {
+ rtw89_phy_write32_clr(rtwdev, R_KIP + (path << 8), B_KIP_RFGAIN);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] disable manual switch TXCFIR\n");
+ }
+}
+
+static void _dpk_bypass_rxcfir(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path path, bool is_bypass)
+{
+ if (is_bypass) {
+ rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8),
+ B_RXIQC_BYPASS2, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8),
+ B_RXIQC_BYPASS, 0x1);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] Bypass RXIQC (0x8%d3c = 0x%x)\n", 1 + path,
+ rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8),
+ MASKDWORD));
+ } else {
+ rtw89_phy_write32_clr(rtwdev, R_RXIQC + (path << 8), B_RXIQC_BYPASS2);
+ rtw89_phy_write32_clr(rtwdev, R_RXIQC + (path << 8), B_RXIQC_BYPASS);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] restore 0x8%d3c = 0x%x\n", 1 + path,
+ rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8),
+ MASKDWORD));
+ }
+}
+
+static
+void _dpk_tpg_sel(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx)
+{
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+
+ if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80)
+ rtw89_phy_write32_clr(rtwdev, R_TPG_MOD, B_TPG_MOD_F);
+ else if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40)
+ rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x2);
+ else
+ rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x1);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] TPG_Select for %s\n",
+ dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80 ? "80M" :
+ dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40 ? "40M" : "20M");
+}
+
+static void _dpk_table_select(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path path, u8 kidx, u8 gain)
+{
+ u8 val;
+
+ val = 0x80 + kidx * 0x20 + gain * 0x10;
+ rtw89_phy_write32_mask(rtwdev, R_DPD_CH0 + (path << 8), MASKBYTE3, val);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] table select for Kidx[%d], Gain[%d] (0x%x)\n", kidx,
+ gain, val);
+}
+
+static bool _dpk_sync_check(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path path)
+{
+#define DPK_SYNC_TH_DC_I 200
+#define DPK_SYNC_TH_DC_Q 200
+#define DPK_SYNC_TH_CORR 170
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+ u16 dc_i, dc_q;
+ u8 corr_val, corr_idx;
+
+ rtw89_phy_write32_clr(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL);
+
+ corr_idx = (u8)rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_CORI);
+ corr_val = (u8)rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_CORV);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] S%d Corr_idx / Corr_val = %d / %d\n", path, corr_idx,
+ corr_val);
+
+ dpk->corr_idx[path] = corr_idx;
+ dpk->corr_val[path] = corr_val;
+
+ rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x9);
+
+ dc_i = (u16)rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI);
+ dc_q = (u16)rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCQ);
+
+ dc_i = abs(sign_extend32(dc_i, 11));
+ dc_q = abs(sign_extend32(dc_q, 11));
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d DC I/Q, = %d / %d\n",
+ path, dc_i, dc_q);
+
+ dpk->dc_i[path] = dc_i;
+ dpk->dc_q[path] = dc_q;
+
+ if (dc_i > DPK_SYNC_TH_DC_I || dc_q > DPK_SYNC_TH_DC_Q ||
+ corr_val < DPK_SYNC_TH_CORR)
+ return true;
+ else
+ return false;
+}
+
+static bool _dpk_sync(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, u8 kidx)
+{
+ _dpk_tpg_sel(rtwdev, path, kidx);
+ _dpk_one_shot(rtwdev, phy, path, SYNC);
+ return _dpk_sync_check(rtwdev, path); /*1= fail*/
+}
+
+static u16 _dpk_dgain_read(struct rtw89_dev *rtwdev)
+{
+ u16 dgain = 0x0;
+
+ rtw89_phy_write32_clr(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL);
+
+ rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_SYNERR);
+
+ dgain = (u16)rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] DGain = 0x%x (%d)\n", dgain,
+ dgain);
+
+ return dgain;
+}
+
+static s8 _dpk_dgain_mapping(struct rtw89_dev *rtwdev, u16 dgain)
+{
+ s8 offset;
+
+ if (dgain >= 0x783)
+ offset = 0x6;
+ else if (dgain <= 0x782 && dgain >= 0x551)
+ offset = 0x3;
+ else if (dgain <= 0x550 && dgain >= 0x3c4)
+ offset = 0x0;
+ else if (dgain <= 0x3c3 && dgain >= 0x2aa)
+ offset = -3;
+ else if (dgain <= 0x2a9 && dgain >= 0x1e3)
+ offset = -6;
+ else if (dgain <= 0x1e2 && dgain >= 0x156)
+ offset = -9;
+ else if (dgain <= 0x155)
+ offset = -12;
+ else
+ offset = 0x0;
+
+ return offset;
+}
+
+static u8 _dpk_gainloss_read(struct rtw89_dev *rtwdev)
+{
+ rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x6);
+ rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, 0x1);
+ return rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_GL);
+}
+
+static void _dpk_gainloss(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy, enum rtw89_rf_path path,
+ u8 kidx)
+{
+ _dpk_table_select(rtwdev, path, kidx, 1);
+ _dpk_one_shot(rtwdev, phy, path, GAIN_LOSS);
+}
+
+#define DPK_TXAGC_LOWER 0x2e
+#define DPK_TXAGC_UPPER 0x3f
+#define DPK_TXAGC_INVAL 0xff
+
+static u8 _dpk_set_offset(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path path, s8 gain_offset)
+{
+ u8 txagc;
+
+ txagc = (u8)rtw89_read_rf(rtwdev, path, RR_MODOPT, RFREG_MASK);
+
+ if (txagc - gain_offset < DPK_TXAGC_LOWER)
+ txagc = DPK_TXAGC_LOWER;
+ else if (txagc - gain_offset > DPK_TXAGC_UPPER)
+ txagc = DPK_TXAGC_UPPER;
+ else
+ txagc = txagc - gain_offset;
+
+ rtw89_write_rf(rtwdev, path, RR_MODOPT, RFREG_MASK, txagc);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] tmp_txagc (GL=%d) = 0x%x\n",
+ gain_offset, txagc);
+ return txagc;
+}
+
+enum dpk_agc_step {
+ DPK_AGC_STEP_SYNC_DGAIN,
+ DPK_AGC_STEP_GAIN_ADJ,
+ DPK_AGC_STEP_GAIN_LOSS_IDX,
+ DPK_AGC_STEP_GL_GT_CRITERION,
+ DPK_AGC_STEP_GL_LT_CRITERION,
+ DPK_AGC_STEP_SET_TX_GAIN,
+};
+
+static u8 _dpk_pas_read(struct rtw89_dev *rtwdev, bool is_check)
+{
+ u32 val1_i = 0, val1_q = 0, val2_i = 0, val2_q = 0;
+ u8 i;
+
+ rtw89_rfk_parser(rtwdev, &rtw8852a_rfk_dpk_pas_read_defs_tbl);
+
+ if (is_check) {
+ rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, 0x00);
+ val1_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD);
+ val1_i = abs(sign_extend32(val1_i, 11));
+ val1_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD);
+ val1_q = abs(sign_extend32(val1_q, 11));
+ rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, 0x1f);
+ val2_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD);
+ val2_i = abs(sign_extend32(val2_i, 11));
+ val2_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD);
+ val2_q = abs(sign_extend32(val2_q, 11));
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] PAS_delta = 0x%x\n",
+ (val1_i * val1_i + val1_q * val1_q) /
+ (val2_i * val2_i + val2_q * val2_q));
+
+ } else {
+ for (i = 0; i < 32; i++) {
+ rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, i);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] PAS_Read[%02d]= 0x%08x\n", i,
+ rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD));
+ }
+ }
+ if ((val1_i * val1_i + val1_q * val1_q) >=
+ ((val2_i * val2_i + val2_q * val2_q) * 8 / 5))
+ return 1;
+ else
+ return 0;
+}
+
+static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, u8 kidx, u8 init_txagc,
+ bool loss_only)
+{
+#define DPK_AGC_ADJ_LMT 6
+#define DPK_DGAIN_UPPER 1922
+#define DPK_DGAIN_LOWER 342
+#define DPK_RXBB_UPPER 0x1f
+#define DPK_RXBB_LOWER 0
+#define DPK_GL_CRIT 7
+ u8 tmp_txagc, tmp_rxbb = 0, tmp_gl_idx = 0;
+ u8 agc_cnt = 0;
+ bool limited_rxbb = false;
+ s8 offset = 0;
+ u16 dgain = 0;
+ u8 step = DPK_AGC_STEP_SYNC_DGAIN;
+ bool goout = false;
+
+ tmp_txagc = init_txagc;
+
+ do {
+ switch (step) {
+ case DPK_AGC_STEP_SYNC_DGAIN:
+ if (_dpk_sync(rtwdev, phy, path, kidx)) {
+ tmp_txagc = DPK_TXAGC_INVAL;
+ goout = true;
+ break;
+ }
+
+ dgain = _dpk_dgain_read(rtwdev);
+
+ if (loss_only || limited_rxbb)
+ step = DPK_AGC_STEP_GAIN_LOSS_IDX;
+ else
+ step = DPK_AGC_STEP_GAIN_ADJ;
+ break;
+
+ case DPK_AGC_STEP_GAIN_ADJ:
+ tmp_rxbb = (u8)rtw89_read_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXBB);
+ offset = _dpk_dgain_mapping(rtwdev, dgain);
+
+ if (tmp_rxbb + offset > DPK_RXBB_UPPER) {
+ tmp_rxbb = DPK_RXBB_UPPER;
+ limited_rxbb = true;
+ } else if (tmp_rxbb + offset < DPK_RXBB_LOWER) {
+ tmp_rxbb = DPK_RXBB_LOWER;
+ limited_rxbb = true;
+ } else {
+ tmp_rxbb = tmp_rxbb + offset;
+ }
+
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_M_RXBB, tmp_rxbb);
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] Adjust RXBB (%d) = 0x%x\n", offset,
+ tmp_rxbb);
+ if (offset != 0 || agc_cnt == 0) {
+ if (rtwdev->hal.current_band_width < RTW89_CHANNEL_WIDTH_80)
+ _dpk_bypass_rxcfir(rtwdev, path, true);
+ else
+ _dpk_lbk_rxiqk(rtwdev, phy, path);
+ }
+ if (dgain > DPK_DGAIN_UPPER || dgain < DPK_DGAIN_LOWER)
+ step = DPK_AGC_STEP_SYNC_DGAIN;
+ else
+ step = DPK_AGC_STEP_GAIN_LOSS_IDX;
+
+ agc_cnt++;
+ break;
+
+ case DPK_AGC_STEP_GAIN_LOSS_IDX:
+ _dpk_gainloss(rtwdev, phy, path, kidx);
+ tmp_gl_idx = _dpk_gainloss_read(rtwdev);
+
+ if ((tmp_gl_idx == 0 && _dpk_pas_read(rtwdev, true)) ||
+ tmp_gl_idx > DPK_GL_CRIT)
+ step = DPK_AGC_STEP_GL_GT_CRITERION;
+ else if (tmp_gl_idx == 0)
+ step = DPK_AGC_STEP_GL_LT_CRITERION;
+ else
+ step = DPK_AGC_STEP_SET_TX_GAIN;
+ break;
+
+ case DPK_AGC_STEP_GL_GT_CRITERION:
+ if (tmp_txagc == DPK_TXAGC_LOWER) {
+ goout = true;
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] Txagc@lower bound!!\n");
+ } else {
+ tmp_txagc = _dpk_set_offset(rtwdev, path, 3);
+ }
+ step = DPK_AGC_STEP_GAIN_LOSS_IDX;
+ agc_cnt++;
+ break;
+
+ case DPK_AGC_STEP_GL_LT_CRITERION:
+ if (tmp_txagc == DPK_TXAGC_UPPER) {
+ goout = true;
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] Txagc@upper bound!!\n");
+ } else {
+ tmp_txagc = _dpk_set_offset(rtwdev, path, -2);
+ }
+ step = DPK_AGC_STEP_GAIN_LOSS_IDX;
+ agc_cnt++;
+ break;
+
+ case DPK_AGC_STEP_SET_TX_GAIN:
+ tmp_txagc = _dpk_set_offset(rtwdev, path, tmp_gl_idx);
+ goout = true;
+ agc_cnt++;
+ break;
+
+ default:
+ goout = true;
+ break;
+ }
+ } while (!goout && (agc_cnt < DPK_AGC_ADJ_LMT));
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] Txagc / RXBB for DPK = 0x%x / 0x%x\n", tmp_txagc,
+ tmp_rxbb);
+
+ return tmp_txagc;
+}
+
+static void _dpk_set_mdpd_para(struct rtw89_dev *rtwdev, u8 order)
+{
+ switch (order) {
+ case 0:
+ rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, order);
+ rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_PN, 0x3);
+ rtw89_phy_write32_mask(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_MAN, 0x1);
+ break;
+ case 1:
+ rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, order);
+ rtw89_phy_write32_clr(rtwdev, R_LDL_NORM, B_LDL_NORM_PN);
+ rtw89_phy_write32_clr(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_MAN);
+ break;
+ case 2:
+ rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, order);
+ rtw89_phy_write32_clr(rtwdev, R_LDL_NORM, B_LDL_NORM_PN);
+ rtw89_phy_write32_clr(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_MAN);
+ break;
+ default:
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] Wrong MDPD order!!(0x%x)\n", order);
+ break;
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] Set MDPD order to 0x%x for IDL\n", order);
+}
+
+static void _dpk_idl_mpa(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, u8 kidx, u8 gain)
+{
+ _dpk_set_mdpd_para(rtwdev, 0x0);
+ _dpk_table_select(rtwdev, path, kidx, 1);
+ _dpk_one_shot(rtwdev, phy, path, MDPK_IDL);
+}
+
+static void _dpk_fill_result(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path path, u8 kidx, u8 gain,
+ u8 txagc)
+{
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+
+ u16 pwsf = 0x78;
+ u8 gs = 0x5b;
+
+ rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8), B_COEF_SEL_MDPD, kidx);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] Fill txagc/ pwsf/ gs = 0x%x/ 0x%x/ 0x%x\n", txagc,
+ pwsf, gs);
+
+ dpk->bp[path][kidx].txagc_dpk = txagc;
+ rtw89_phy_write32_mask(rtwdev, R_TXAGC_RFK + (path << 8),
+ 0x3F << ((gain << 3) + (kidx << 4)), txagc);
+
+ dpk->bp[path][kidx].pwsf = pwsf;
+ rtw89_phy_write32_mask(rtwdev, R_DPD_BND + (path << 8) + (kidx << 2),
+ 0x1FF << (gain << 4), pwsf);
+
+ rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD, 0x1);
+ rtw89_phy_write32_clr(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD);
+
+ dpk->bp[path][kidx].gs = gs;
+ rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
+ MASKDWORD, 0x065b5b5b);
+
+ rtw89_phy_write32_clr(rtwdev, R_DPD_V1 + (path << 8), MASKDWORD);
+
+ rtw89_phy_write32_clr(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_SEL);
+}
+
+static bool _dpk_reload_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+ bool is_reload = false;
+ u8 idx, cur_band, cur_ch;
+
+ cur_band = rtwdev->hal.current_band_type;
+ cur_ch = rtwdev->hal.current_channel;
+
+ for (idx = 0; idx < RTW89_DPK_BKUP_NUM; idx++) {
+ if (cur_band != dpk->bp[path][idx].band ||
+ cur_ch != dpk->bp[path][idx].ch)
+ continue;
+
+ rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8),
+ B_COEF_SEL_MDPD, idx);
+ dpk->cur_idx[path] = idx;
+ is_reload = true;
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] reload S%d[%d] success\n", path, idx);
+ }
+
+ return is_reload;
+}
+
+static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path, u8 gain)
+{
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+ u8 txagc = 0, kidx = dpk->cur_idx[path];
+ bool is_fail = false;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] ========= S%d[%d] DPK Start =========\n", path,
+ kidx);
+
+ _rf_direct_cntrl(rtwdev, path, false);
+ txagc = _dpk_set_tx_pwr(rtwdev, gain, path);
+ _dpk_rf_setting(rtwdev, gain, path, kidx);
+ _dpk_rx_dck(rtwdev, phy, path);
+
+ _dpk_kip_setting(rtwdev, path, kidx);
+ _dpk_manual_txcfir(rtwdev, path, true);
+ txagc = _dpk_agc(rtwdev, phy, path, kidx, txagc, false);
+ if (txagc == DPK_TXAGC_INVAL)
+ is_fail = true;
+ _dpk_get_thermal(rtwdev, kidx, path);
+
+ _dpk_idl_mpa(rtwdev, phy, path, kidx, gain);
+ rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX);
+ _dpk_fill_result(rtwdev, path, kidx, gain, txagc);
+ _dpk_manual_txcfir(rtwdev, path, false);
+
+ if (!is_fail)
+ dpk->bp[path][kidx].path_ok = true;
+ else
+ dpk->bp[path][kidx].path_ok = false;
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] DPK %s\n", path, kidx,
+ is_fail ? "Check" : "Success");
+
+ return is_fail;
+}
+
+static void _dpk_cal_select(struct rtw89_dev *rtwdev, bool force,
+ enum rtw89_phy_idx phy, u8 kpath)
+{
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+ u32 backup_bb_val[BACKUP_BB_REGS_NR];
+ u32 backup_rf_val[RTW8852A_DPK_RF_PATH][BACKUP_RF_REGS_NR];
+ u32 kip_bkup[RTW8852A_DPK_RF_PATH][RTW8852A_DPK_KIP_REG_NUM] = {{0}};
+ u32 kip_reg[] = {R_RXIQC, R_IQK_RES};
+ u8 path;
+ bool is_fail = true, reloaded[RTW8852A_DPK_RF_PATH] = {false};
+
+ if (dpk->is_dpk_reload_en) {
+ for (path = 0; path < RTW8852A_DPK_RF_PATH; path++) {
+ if (!(kpath & BIT(path)))
+ continue;
+
+ reloaded[path] = _dpk_reload_check(rtwdev, phy, path);
+ if (!reloaded[path] && dpk->bp[path][0].ch != 0)
+ dpk->cur_idx[path] = !dpk->cur_idx[path];
+ else
+ _dpk_onoff(rtwdev, path, false);
+ }
+ } else {
+ for (path = 0; path < RTW8852A_DPK_RF_PATH; path++)
+ dpk->cur_idx[path] = 0;
+ }
+
+ if ((kpath == RF_A && reloaded[RF_PATH_A]) ||
+ (kpath == RF_B && reloaded[RF_PATH_B]) ||
+ (kpath == RF_AB && reloaded[RF_PATH_A] && reloaded[RF_PATH_B]))
+ return;
+
+ _rfk_backup_bb_reg(rtwdev, &backup_bb_val[0]);
+
+ for (path = 0; path < RTW8852A_DPK_RF_PATH; path++) {
+ if (!(kpath & BIT(path)) || reloaded[path])
+ continue;
+ if (rtwdev->is_tssi_mode[path])
+ _dpk_tssi_pause(rtwdev, path, true);
+ _dpk_bkup_kip(rtwdev, kip_reg, kip_bkup, path);
+ _rfk_backup_rf_reg(rtwdev, &backup_rf_val[path][0], path);
+ _dpk_information(rtwdev, phy, path);
+ }
+
+ _dpk_bb_afe_setting(rtwdev, phy, path, kpath);
+
+ for (path = 0; path < RTW8852A_DPK_RF_PATH; path++) {
+ if (!(kpath & BIT(path)) || reloaded[path])
+ continue;
+
+ is_fail = _dpk_main(rtwdev, phy, path, 1);
+ _dpk_onoff(rtwdev, path, is_fail);
+ }
+
+ _dpk_bb_afe_restore(rtwdev, phy, path, kpath);
+ _rfk_restore_bb_reg(rtwdev, &backup_bb_val[0]);
+
+ for (path = 0; path < RTW8852A_DPK_RF_PATH; path++) {
+ if (!(kpath & BIT(path)) || reloaded[path])
+ continue;
+
+ _dpk_kip_restore(rtwdev, path);
+ _dpk_reload_kip(rtwdev, kip_reg, kip_bkup, path);
+ _rfk_restore_rf_reg(rtwdev, &backup_rf_val[path][0], path);
+ if (rtwdev->is_tssi_mode[path])
+ _dpk_tssi_pause(rtwdev, path, false);
+ }
+}
+
+static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+{
+ struct rtw89_fem_info *fem = &rtwdev->fem;
+
+ if (fem->epa_2g && rtwdev->hal.current_band_type == RTW89_BAND_2G) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] Skip DPK due to 2G_ext_PA exist!!\n");
+ return true;
+ } else if (fem->epa_5g && rtwdev->hal.current_band_type == RTW89_BAND_5G) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] Skip DPK due to 5G_ext_PA exist!!\n");
+ return true;
+ }
+
+ return false;
+}
+
+static void _dpk_force_bypass(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+{
+ u8 path, kpath;
+
+ kpath = _kpath(rtwdev, phy);
+
+ for (path = 0; path < RTW8852A_DPK_RF_PATH; path++) {
+ if (kpath & BIT(path))
+ _dpk_onoff(rtwdev, path, true);
+ }
+}
+
+static void _dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool force)
+{
+ rtw89_debug(rtwdev, RTW89_DBG_RFK,
+ "[DPK] ****** DPK Start (Ver: 0x%x, Cv: %d, RF_para: %d) ******\n",
+ RTW8852A_DPK_VER, rtwdev->hal.cv,
+ RTW8852A_RF_REL_VERSION);
+
+ if (_dpk_bypass_check(rtwdev, phy))
+ _dpk_force_bypass(rtwdev, phy);
+ else
+ _dpk_cal_select(rtwdev, force, phy, _kpath(rtwdev, phy));
+}
+
+static void _dpk_onoff(struct rtw89_dev *rtwdev,
+ enum rtw89_rf_path path, bool off)
+{
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+ u8 val, kidx = dpk->cur_idx[path];
+
+ val = dpk->is_dpk_enable && !off && dpk->bp[path][kidx].path_ok;
+
+ rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2),
+ MASKBYTE3, 0x6 | val);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] DPK %s !!!\n", path,
+ kidx, dpk->is_dpk_enable && !off ? "enable" : "disable");
+}
+
+static void _dpk_track(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_dpk_info *dpk = &rtwdev->dpk;
+ struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
+ u8 path, kidx;
+ u8 trk_idx = 0, txagc_rf = 0;
+ s8 txagc_bb = 0, txagc_bb_tp = 0, ini_diff = 0, txagc_ofst = 0;
+ u16 pwsf[2];
+ u8 cur_ther;
+ s8 delta_ther[2] = {0};
+
+ for (path = 0; path < RTW8852A_DPK_RF_PATH; path++) {
+ kidx = dpk->cur_idx[path];
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
+ "[DPK_TRK] ================[S%d[%d] (CH %d)]================\n",
+ path, kidx, dpk->bp[path][kidx].ch);
+
+ cur_ther = ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
+ "[DPK_TRK] thermal now = %d\n", cur_ther);
+
+ if (dpk->bp[path][kidx].ch != 0 && cur_ther != 0)
+ delta_ther[path] = dpk->bp[path][kidx].ther_dpk - cur_ther;
+
+ if (dpk->bp[path][kidx].band == RTW89_BAND_2G)
+ delta_ther[path] = delta_ther[path] * 3 / 2;
+ else
+ delta_ther[path] = delta_ther[path] * 5 / 2;
+
+ txagc_rf = (u8)rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13),
+ RR_MODOPT_M_TXPWR);
+
+ if (rtwdev->is_tssi_mode[path]) {
+ trk_idx = (u8)rtw89_read_rf(rtwdev, path, RR_TXA, RR_TXA_TRK);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
+ "[DPK_TRK] txagc_RF / track_idx = 0x%x / %d\n",
+ txagc_rf, trk_idx);
+
+ txagc_bb =
+ (s8)rtw89_phy_read32_mask(rtwdev,
+ R_TXAGC_BB + (path << 13),
+ MASKBYTE2);
+ txagc_bb_tp =
+ (u8)rtw89_phy_read32_mask(rtwdev,
+ R_TXAGC_TP + (path << 13),
+ B_TXAGC_TP);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
+ "[DPK_TRK] txagc_bb_tp / txagc_bb = 0x%x / 0x%x\n",
+ txagc_bb_tp, txagc_bb);
+
+ txagc_ofst =
+ (s8)rtw89_phy_read32_mask(rtwdev,
+ R_TXAGC_BB + (path << 13),
+ MASKBYTE3);
+
+ rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
+ "[DPK_TRK] txagc_offset / delta_ther = %d / %d\n",
+ txagc_ofst, delta_ther[path]);
+
+ if (rtw89_phy_read32_mask(rtwdev, R_DPD_COM + (path << 8),
+ BIT(15)) == 0x1)
+ txagc_ofst = 0;
+
+ if (txagc_rf != 0 && cur_ther != 0)
+ ini_diff = txagc_ofst + delta_ther[path];
+
+ if (rtw89_phy_read32_mask(rtwdev, R_P0_TXDPD + (path << 13),
+ B_P0_TXDPD) == 0x0) {
+ pwsf[0] = dpk->bp[path][kidx].pwsf + txagc_bb_tp -
+ txagc_bb + ini_diff +
+ tssi_info->extra_ofst[path];
+ pwsf[1] = dpk->bp[path][kidx].pwsf + txagc_bb_tp -
+ txagc_bb + ini_diff +
+ tssi_info->extra_ofst[path];
+ } else {
+ pwsf[0] = dpk->bp[path][kidx].pwsf + ini_diff +
+ tssi_info->extra_ofst[path];
+ pwsf[1] = dpk->bp[path][kidx].pwsf + ini_diff +
+ tssi_info->extra_ofst[path];
+ }
+
+ } else {
+ pwsf[0] = (dpk->bp[path][kidx].pwsf + delta_ther[path]) & 0x1ff;
+ pwsf[1] = (dpk->bp[path][kidx].pwsf + delta_ther[path]) & 0x1ff;
+ }
+
+ if (rtw89_phy_read32_mask(rtwdev, R_DPK_TRK, B_DPK_TRK_DIS) == 0x0 &&
+ txagc_rf != 0) {
+ rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK,
+ "[DPK_TRK] New pwsf[0] / pwsf[1] = 0x%x / 0x%x\n",
+ pwsf[0], pwsf[1]);
+
+ rtw89_phy_write32_mask(rtwdev, R_DPD_BND + (path << 8) + (kidx << 2),
+ 0x000001FF, pwsf[0]);
+ rtw89_phy_write32_mask(rtwdev, R_DPD_BND + (path << 8) + (kidx << 2),
+ 0x01FF0000, pwsf[1]);
+ }
+ }
+}
+
+static void _tssi_rf_setting(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ enum rtw89_band band = rtwdev->hal.current_band_type;
+
+ if (band == RTW89_BAND_2G)
+ rtw89_write_rf(rtwdev, path, RR_TXPOW, RR_TXPOW_TXG, 0x1);
+ else
+ rtw89_write_rf(rtwdev, path, RR_TXPOW, RR_TXPOW_TXA, 0x1);
+}
+
+static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+{
+ enum rtw89_band band = rtwdev->hal.current_band_type;
+
+ rtw89_rfk_parser(rtwdev, &rtw8852a_tssi_sys_defs_tbl);
+ rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
+ &rtw8852a_tssi_sys_defs_2g_tbl,
+ &rtw8852a_tssi_sys_defs_5g_tbl);
+}
+
+static void _tssi_ini_txpwr_ctrl_bb(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ enum rtw89_band band = rtwdev->hal.current_band_type;
+
+ rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
+ &rtw8852a_tssi_txpwr_ctrl_bb_defs_a_tbl,
+ &rtw8852a_tssi_txpwr_ctrl_bb_defs_b_tbl);
+ rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G,
+ &rtw8852a_tssi_txpwr_ctrl_bb_defs_2g_tbl,
+ &rtw8852a_tssi_txpwr_ctrl_bb_defs_5g_tbl);
+}
+
+static void _tssi_ini_txpwr_ctrl_bb_he_tb(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
+ &rtw8852a_tssi_txpwr_ctrl_bb_he_tb_defs_a_tbl,
+ &rtw8852a_tssi_txpwr_ctrl_bb_he_tb_defs_b_tbl);
+}
+
+static void _tssi_set_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
+ &rtw8852a_tssi_dck_defs_a_tbl,
+ &rtw8852a_tssi_dck_defs_b_tbl);
+}
+
+static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+#define __get_val(ptr, idx) \
+({ \
+ s8 *__ptr = (ptr); \
+ u8 __idx = (idx), __i, __v; \
+ u32 __val = 0; \
+ for (__i = 0; __i < 4; __i++) { \
+ __v = (__ptr[__idx + __i]); \
+ __val |= (__v << (8 * __i)); \
+ } \
+ __val; \
+})
+ struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
+ u8 ch = rtwdev->hal.current_channel;
+ u8 subband = rtwdev->hal.current_subband;
+ const u8 *thm_up_a = NULL;
+ const u8 *thm_down_a = NULL;
+ const u8 *thm_up_b = NULL;
+ const u8 *thm_down_b = NULL;
+ u8 thermal = 0xff;
+ s8 thm_ofst[64] = {0};
+ u32 tmp = 0;
+ u8 i, j;
+
+ switch (subband) {
+ case RTW89_CH_2G:
+ thm_up_a = rtw89_8852a_trk_cfg.delta_swingidx_2ga_p;
+ thm_down_a = rtw89_8852a_trk_cfg.delta_swingidx_2ga_n;
+ thm_up_b = rtw89_8852a_trk_cfg.delta_swingidx_2gb_p;
+ thm_down_b = rtw89_8852a_trk_cfg.delta_swingidx_2gb_n;
+ break;
+ case RTW89_CH_5G_BAND_1:
+ thm_up_a = rtw89_8852a_trk_cfg.delta_swingidx_5ga_p[0];
+ thm_down_a = rtw89_8852a_trk_cfg.delta_swingidx_5ga_n[0];
+ thm_up_b = rtw89_8852a_trk_cfg.delta_swingidx_5gb_p[0];
+ thm_down_b = rtw89_8852a_trk_cfg.delta_swingidx_5gb_n[0];
+ break;
+ case RTW89_CH_5G_BAND_3:
+ thm_up_a = rtw89_8852a_trk_cfg.delta_swingidx_5ga_p[1];
+ thm_down_a = rtw89_8852a_trk_cfg.delta_swingidx_5ga_n[1];
+ thm_up_b = rtw89_8852a_trk_cfg.delta_swingidx_5gb_p[1];
+ thm_down_b = rtw89_8852a_trk_cfg.delta_swingidx_5gb_n[1];
+ break;
+ case RTW89_CH_5G_BAND_4:
+ thm_up_a = rtw89_8852a_trk_cfg.delta_swingidx_5ga_p[2];
+ thm_down_a = rtw89_8852a_trk_cfg.delta_swingidx_5ga_n[2];
+ thm_up_b = rtw89_8852a_trk_cfg.delta_swingidx_5gb_p[2];
+ thm_down_b = rtw89_8852a_trk_cfg.delta_swingidx_5gb_n[2];
+ break;
+ }
+
+ if (path == RF_PATH_A) {
+ thermal = tssi_info->thermal[RF_PATH_A];
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI] ch=%d thermal_pathA=0x%x\n", ch, thermal);
+
+ rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER_DIS, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER_TRK, 0x1);
+
+ if (thermal == 0xff) {
+ rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER, 32);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_VAL, 32);
+
+ for (i = 0; i < 64; i += 4) {
+ rtw89_phy_write32(rtwdev, R_P0_TSSI_BASE + i, 0x0);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI] write 0x%x val=0x%08x\n",
+ 0x5c00 + i, 0x0);
+ }
+
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER, thermal);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_VAL,
+ thermal);
+
+ i = 0;
+ for (j = 0; j < 32; j++)
+ thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
+ -thm_down_a[i++] :
+ -thm_down_a[DELTA_SWINGIDX_SIZE - 1];
+
+ i = 1;
+ for (j = 63; j >= 32; j--)
+ thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
+ thm_up_a[i++] :
+ thm_up_a[DELTA_SWINGIDX_SIZE - 1];
+
+ for (i = 0; i < 64; i += 4) {
+ tmp = __get_val(thm_ofst, i);
+ rtw89_phy_write32(rtwdev, R_P0_TSSI_BASE + i, tmp);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI] write 0x%x val=0x%08x\n",
+ 0x5c00 + i, tmp);
+ }
+ }
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, R_P0_RFCTM_RDY, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, R_P0_RFCTM_RDY, 0x0);
+
+ } else {
+ thermal = tssi_info->thermal[RF_PATH_B];
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI] ch=%d thermal_pathB=0x%x\n", ch, thermal);
+
+ rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER_DIS, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER_TRK, 0x1);
+
+ if (thermal == 0xff) {
+ rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER, 32);
+ rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_RFCTM_VAL, 32);
+
+ for (i = 0; i < 64; i += 4) {
+ rtw89_phy_write32(rtwdev, R_TSSI_THOF + i, 0x0);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI] write 0x%x val=0x%08x\n",
+ 0x7c00 + i, 0x0);
+ }
+
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER, thermal);
+ rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_RFCTM_VAL,
+ thermal);
+
+ i = 0;
+ for (j = 0; j < 32; j++)
+ thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
+ -thm_down_b[i++] :
+ -thm_down_b[DELTA_SWINGIDX_SIZE - 1];
+
+ i = 1;
+ for (j = 63; j >= 32; j--)
+ thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ?
+ thm_up_b[i++] :
+ thm_up_b[DELTA_SWINGIDX_SIZE - 1];
+
+ for (i = 0; i < 64; i += 4) {
+ tmp = __get_val(thm_ofst, i);
+ rtw89_phy_write32(rtwdev, R_TSSI_THOF + i, tmp);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI] write 0x%x val=0x%08x\n",
+ 0x7c00 + i, tmp);
+ }
+ }
+ rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, R_P1_RFCTM_RDY, 0x1);
+ rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, R_P1_RFCTM_RDY, 0x0);
+ }
+#undef __get_val
+}
+
+static void _tssi_set_dac_gain_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
+ &rtw8852a_tssi_dac_gain_tbl_defs_a_tbl,
+ &rtw8852a_tssi_dac_gain_tbl_defs_b_tbl);
+}
+
+static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
+ &rtw8852a_tssi_slope_cal_org_defs_a_tbl,
+ &rtw8852a_tssi_slope_cal_org_defs_b_tbl);
+}
+
+static void _tssi_set_rf_gap_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
+ &rtw8852a_tssi_rf_gap_tbl_defs_a_tbl,
+ &rtw8852a_tssi_rf_gap_tbl_defs_b_tbl);
+}
+
+static void _tssi_set_slope(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
+ &rtw8852a_tssi_slope_defs_a_tbl,
+ &rtw8852a_tssi_slope_defs_b_tbl);
+}
+
+static void _tssi_set_track(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
+ &rtw8852a_tssi_track_defs_a_tbl,
+ &rtw8852a_tssi_track_defs_b_tbl);
+}
+
+static void _tssi_set_txagc_offset_mv_avg(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
+ &rtw8852a_tssi_txagc_ofst_mv_avg_defs_a_tbl,
+ &rtw8852a_tssi_txagc_ofst_mv_avg_defs_b_tbl);
+}
+
+static void _tssi_pak(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ u8 subband = rtwdev->hal.current_subband;
+
+ switch (subband) {
+ case RTW89_CH_2G:
+ rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
+ &rtw8852a_tssi_pak_defs_a_2g_tbl,
+ &rtw8852a_tssi_pak_defs_b_2g_tbl);
+ break;
+ case RTW89_CH_5G_BAND_1:
+ rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
+ &rtw8852a_tssi_pak_defs_a_5g_1_tbl,
+ &rtw8852a_tssi_pak_defs_b_5g_1_tbl);
+ break;
+ case RTW89_CH_5G_BAND_3:
+ rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
+ &rtw8852a_tssi_pak_defs_a_5g_3_tbl,
+ &rtw8852a_tssi_pak_defs_b_5g_3_tbl);
+ break;
+ case RTW89_CH_5G_BAND_4:
+ rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A,
+ &rtw8852a_tssi_pak_defs_a_5g_4_tbl,
+ &rtw8852a_tssi_pak_defs_b_5g_4_tbl);
+ break;
+ }
+}
+
+static void _tssi_enable(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+{
+ struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
+ u8 i;
+
+ for (i = 0; i < RF_PATH_NUM_8852A; i++) {
+ _tssi_set_track(rtwdev, phy, i);
+ _tssi_set_txagc_offset_mv_avg(rtwdev, phy, i);
+
+ rtw89_rfk_parser_by_cond(rtwdev, i == RF_PATH_A,
+ &rtw8852a_tssi_enable_defs_a_tbl,
+ &rtw8852a_tssi_enable_defs_b_tbl);
+
+ tssi_info->base_thermal[i] =
+ ewma_thermal_read(&rtwdev->phystat.avg_thermal[i]);
+ rtwdev->is_tssi_mode[i] = true;
+ }
+}
+
+static void _tssi_disable(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+{
+ rtw89_rfk_parser(rtwdev, &rtw8852a_tssi_disable_defs_tbl);
+
+ rtwdev->is_tssi_mode[RF_PATH_A] = false;
+ rtwdev->is_tssi_mode[RF_PATH_B] = false;
+}
+
+static u32 _tssi_get_cck_group(struct rtw89_dev *rtwdev, u8 ch)
+{
+ switch (ch) {
+ case 1 ... 2:
+ return 0;
+ case 3 ... 5:
+ return 1;
+ case 6 ... 8:
+ return 2;
+ case 9 ... 11:
+ return 3;
+ case 12 ... 13:
+ return 4;
+ case 14:
+ return 5;
+ }
+
+ return 0;
+}
+
+#define TSSI_EXTRA_GROUP_BIT (BIT(31))
+#define TSSI_EXTRA_GROUP(idx) (TSSI_EXTRA_GROUP_BIT | (idx))
+#define IS_TSSI_EXTRA_GROUP(group) ((group) & TSSI_EXTRA_GROUP_BIT)
+#define TSSI_EXTRA_GET_GROUP_IDX1(group) ((group) & ~TSSI_EXTRA_GROUP_BIT)
+#define TSSI_EXTRA_GET_GROUP_IDX2(group) (TSSI_EXTRA_GET_GROUP_IDX1(group) + 1)
+
+static u32 _tssi_get_ofdm_group(struct rtw89_dev *rtwdev, u8 ch)
+{
+ switch (ch) {
+ case 1 ... 2:
+ return 0;
+ case 3 ... 5:
+ return 1;
+ case 6 ... 8:
+ return 2;
+ case 9 ... 11:
+ return 3;
+ case 12 ... 14:
+ return 4;
+ case 36 ... 40:
+ return 5;
+ case 41 ... 43:
+ return TSSI_EXTRA_GROUP(5);
+ case 44 ... 48:
+ return 6;
+ case 49 ... 51:
+ return TSSI_EXTRA_GROUP(6);
+ case 52 ... 56:
+ return 7;
+ case 57 ... 59:
+ return TSSI_EXTRA_GROUP(7);
+ case 60 ... 64:
+ return 8;
+ case 100 ... 104:
+ return 9;
+ case 105 ... 107:
+ return TSSI_EXTRA_GROUP(9);
+ case 108 ... 112:
+ return 10;
+ case 113 ... 115:
+ return TSSI_EXTRA_GROUP(10);
+ case 116 ... 120:
+ return 11;
+ case 121 ... 123:
+ return TSSI_EXTRA_GROUP(11);
+ case 124 ... 128:
+ return 12;
+ case 129 ... 131:
+ return TSSI_EXTRA_GROUP(12);
+ case 132 ... 136:
+ return 13;
+ case 137 ... 139:
+ return TSSI_EXTRA_GROUP(13);
+ case 140 ... 144:
+ return 14;
+ case 149 ... 153:
+ return 15;
+ case 154 ... 156:
+ return TSSI_EXTRA_GROUP(15);
+ case 157 ... 161:
+ return 16;
+ case 162 ... 164:
+ return TSSI_EXTRA_GROUP(16);
+ case 165 ... 169:
+ return 17;
+ case 170 ... 172:
+ return TSSI_EXTRA_GROUP(17);
+ case 173 ... 177:
+ return 18;
+ }
+
+ return 0;
+}
+
+static u32 _tssi_get_trim_group(struct rtw89_dev *rtwdev, u8 ch)
+{
+ switch (ch) {
+ case 1 ... 8:
+ return 0;
+ case 9 ... 14:
+ return 1;
+ case 36 ... 48:
+ return 2;
+ case 52 ... 64:
+ return 3;
+ case 100 ... 112:
+ return 4;
+ case 116 ... 128:
+ return 5;
+ case 132 ... 144:
+ return 6;
+ case 149 ... 177:
+ return 7;
+ }
+
+ return 0;
+}
+
+static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
+ u8 ch = rtwdev->hal.current_channel;
+ u32 gidx, gidx_1st, gidx_2nd;
+ s8 de_1st = 0;
+ s8 de_2nd = 0;
+ s8 val;
+
+ gidx = _tssi_get_ofdm_group(rtwdev, ch);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs group_idx=0x%x\n",
+ path, gidx);
+
+ if (IS_TSSI_EXTRA_GROUP(gidx)) {
+ gidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(gidx);
+ gidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(gidx);
+ de_1st = tssi_info->tssi_mcs[path][gidx_1st];
+ de_2nd = tssi_info->tssi_mcs[path][gidx_2nd];
+ val = (de_1st + de_2nd) / 2;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs de=%d 1st=%d 2nd=%d\n",
+ path, val, de_1st, de_2nd);
+ } else {
+ val = tssi_info->tssi_mcs[path][gidx];
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs de=%d\n", path, val);
+ }
+
+ return val;
+}
+
+static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy,
+ enum rtw89_rf_path path)
+{
+ struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
+ u8 ch = rtwdev->hal.current_channel;
+ u32 tgidx, tgidx_1st, tgidx_2nd;
+ s8 tde_1st = 0;
+ s8 tde_2nd = 0;
+ s8 val;
+
+ tgidx = _tssi_get_trim_group(rtwdev, ch);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs trim_group_idx=0x%x\n",
+ path, tgidx);
+
+ if (IS_TSSI_EXTRA_GROUP(tgidx)) {
+ tgidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(tgidx);
+ tgidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(tgidx);
+ tde_1st = tssi_info->tssi_trim[path][tgidx_1st];
+ tde_2nd = tssi_info->tssi_trim[path][tgidx_2nd];
+ val = (tde_1st + tde_2nd) / 2;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs trim_de=%d 1st=%d 2nd=%d\n",
+ path, val, tde_1st, tde_2nd);
+ } else {
+ val = tssi_info->tssi_trim[path][tgidx];
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs trim_de=%d\n",
+ path, val);
+ }
+
+ return val;
+}
+
+static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy)
+{
+#define __DE_MASK 0x003ff000
+ struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
+ static const u32 r_cck_long[RF_PATH_NUM_8852A] = {0x5858, 0x7858};
+ static const u32 r_cck_short[RF_PATH_NUM_8852A] = {0x5860, 0x7860};
+ static const u32 r_mcs_20m[RF_PATH_NUM_8852A] = {0x5838, 0x7838};
+ static const u32 r_mcs_40m[RF_PATH_NUM_8852A] = {0x5840, 0x7840};
+ static const u32 r_mcs_80m[RF_PATH_NUM_8852A] = {0x5848, 0x7848};
+ static const u32 r_mcs_80m_80m[RF_PATH_NUM_8852A] = {0x5850, 0x7850};
+ static const u32 r_mcs_5m[RF_PATH_NUM_8852A] = {0x5828, 0x7828};
+ static const u32 r_mcs_10m[RF_PATH_NUM_8852A] = {0x5830, 0x7830};
+ u8 ch = rtwdev->hal.current_channel;
+ u8 i, gidx;
+ s8 ofdm_de;
+ s8 trim_de;
+ s32 val;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI][TRIM]: phy=%d ch=%d\n",
+ phy, ch);
+
+ for (i = 0; i < RF_PATH_NUM_8852A; i++) {
+ gidx = _tssi_get_cck_group(rtwdev, ch);
+ trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
+ val = tssi_info->tssi_cck[i][gidx] + trim_de;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d cck[%d]=0x%x trim=0x%x\n",
+ i, gidx, tssi_info->tssi_cck[i][gidx], trim_de);
+
+ rtw89_phy_write32_mask(rtwdev, r_cck_long[i], __DE_MASK, val);
+ rtw89_phy_write32_mask(rtwdev, r_cck_short[i], __DE_MASK, val);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI] Set TSSI CCK DE 0x%x[21:12]=0x%x\n",
+ r_cck_long[i],
+ rtw89_phy_read32_mask(rtwdev, r_cck_long[i],
+ __DE_MASK));
+
+ ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i);
+ trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i);
+ val = ofdm_de + trim_de;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRIM]: path=%d mcs=0x%x trim=0x%x\n",
+ i, ofdm_de, trim_de);
+
+ rtw89_phy_write32_mask(rtwdev, r_mcs_20m[i], __DE_MASK, val);
+ rtw89_phy_write32_mask(rtwdev, r_mcs_40m[i], __DE_MASK, val);
+ rtw89_phy_write32_mask(rtwdev, r_mcs_80m[i], __DE_MASK, val);
+ rtw89_phy_write32_mask(rtwdev, r_mcs_80m_80m[i], __DE_MASK, val);
+ rtw89_phy_write32_mask(rtwdev, r_mcs_5m[i], __DE_MASK, val);
+ rtw89_phy_write32_mask(rtwdev, r_mcs_10m[i], __DE_MASK, val);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI] Set TSSI MCS DE 0x%x[21:12]=0x%x\n",
+ r_mcs_20m[i],
+ rtw89_phy_read32_mask(rtwdev, r_mcs_20m[i],
+ __DE_MASK));
+ }
+#undef __DE_MASK
+}
+
+static void _tssi_track(struct rtw89_dev *rtwdev)
+{
+ static const u32 tx_gain_scale_table[] = {
+ 0x400, 0x40e, 0x41d, 0x427, 0x43c, 0x44c, 0x45c, 0x46c,
+ 0x400, 0x39d, 0x3ab, 0x3b8, 0x3c6, 0x3d4, 0x3e2, 0x3f1
+ };
+ struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
+ u8 path;
+ u8 cur_ther;
+ s32 delta_ther = 0, gain_offset_int, gain_offset_float;
+ s8 gain_offset;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI][TRK] %s:\n",
+ __func__);
+
+ if (!rtwdev->is_tssi_mode[RF_PATH_A])
+ return;
+ if (!rtwdev->is_tssi_mode[RF_PATH_B])
+ return;
+
+ for (path = RF_PATH_A; path < RF_PATH_NUM_8852A; path++) {
+ if (!tssi_info->tssi_tracking_check[path]) {
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI][TRK] return!!!\n");
+ continue;
+ }
+
+ cur_ther = (u8)rtw89_phy_read32_mask(rtwdev,
+ R_TSSI_THER + (path << 13),
+ B_TSSI_THER);
+
+ if (cur_ther == 0 || tssi_info->base_thermal[path] == 0)
+ continue;
+
+ delta_ther = cur_ther - tssi_info->base_thermal[path];
+
+ gain_offset = (s8)delta_ther * 15 / 10;
+
+ tssi_info->extra_ofst[path] = gain_offset;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI][TRK] base_thermal=%d gain_offset=0x%x path=%d\n",
+ tssi_info->base_thermal[path], gain_offset, path);
+
+ gain_offset_int = gain_offset >> 3;
+ gain_offset_float = gain_offset & 7;
+
+ if (gain_offset_int > 15)
+ gain_offset_int = 15;
+ else if (gain_offset_int < -16)
+ gain_offset_int = -16;
+
+ rtw89_phy_write32_mask(rtwdev, R_DPD_OFT_EN + (path << 13),
+ B_DPD_OFT_EN, 0x1);
+
+ rtw89_phy_write32_mask(rtwdev, R_TXGAIN_SCALE + (path << 13),
+ B_TXGAIN_SCALE_EN, 0x1);
+
+ rtw89_phy_write32_mask(rtwdev, R_DPD_OFT_ADDR + (path << 13),
+ B_DPD_OFT_ADDR, gain_offset_int);
+
+ rtw89_phy_write32_mask(rtwdev, R_TXGAIN_SCALE + (path << 13),
+ B_TXGAIN_SCALE_OFT,
+ tx_gain_scale_table[gain_offset_float]);
+ }
+}
+
+static void _tssi_high_power(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+{
+ struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
+ u8 ch = rtwdev->hal.current_channel, ch_tmp;
+ u8 bw = rtwdev->hal.current_band_width;
+ u8 subband = rtwdev->hal.current_subband;
+ s8 power;
+ s32 xdbm;
+
+ if (bw == RTW89_CHANNEL_WIDTH_40)
+ ch_tmp = ch - 2;
+ else if (bw == RTW89_CHANNEL_WIDTH_80)
+ ch_tmp = ch - 6;
+ else
+ ch_tmp = ch;
+
+ power = rtw89_phy_read_txpwr_limit(rtwdev, bw, RTW89_1TX,
+ RTW89_RS_MCS, RTW89_NONBF, ch_tmp);
+
+ xdbm = power * 100 / 4;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d xdbm=%d\n",
+ __func__, phy, xdbm);
+
+ if (xdbm > 1800 && subband == RTW89_CH_2G) {
+ tssi_info->tssi_tracking_check[RF_PATH_A] = true;
+ tssi_info->tssi_tracking_check[RF_PATH_B] = true;
+ } else {
+ rtw89_rfk_parser(rtwdev, &rtw8852a_tssi_tracking_defs_tbl);
+ tssi_info->extra_ofst[RF_PATH_A] = 0;
+ tssi_info->extra_ofst[RF_PATH_B] = 0;
+ tssi_info->tssi_tracking_check[RF_PATH_A] = false;
+ tssi_info->tssi_tracking_check[RF_PATH_B] = false;
+ }
+}
+
+static void _tssi_hw_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy,
+ u8 path, s16 pwr_dbm, u8 enable)
+{
+ rtw8852a_bb_set_plcp_tx(rtwdev);
+ rtw8852a_bb_cfg_tx_path(rtwdev, path);
+ rtw8852a_bb_set_power(rtwdev, pwr_dbm, phy);
+ rtw8852a_bb_set_pmac_pkt_tx(rtwdev, enable, 20, 5000, 0, phy);
+}
+
+static void _tssi_pre_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+{
+ struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
+ const struct rtw89_chip_info *mac_reg = rtwdev->chip;
+ u8 ch = rtwdev->hal.current_channel, ch_tmp;
+ u8 bw = rtwdev->hal.current_band_width;
+ u16 tx_en;
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy, 0);
+ s8 power;
+ s16 xdbm;
+ u32 i, tx_counter = 0;
+
+ if (bw == RTW89_CHANNEL_WIDTH_40)
+ ch_tmp = ch - 2;
+ else if (bw == RTW89_CHANNEL_WIDTH_80)
+ ch_tmp = ch - 6;
+ else
+ ch_tmp = ch;
+
+ power = rtw89_phy_read_txpwr_limit(rtwdev, RTW89_CHANNEL_WIDTH_20, RTW89_1TX,
+ RTW89_RS_OFDM, RTW89_NONBF, ch_tmp);
+
+ xdbm = (power * 100) >> mac_reg->txpwr_factor_mac;
+
+ if (xdbm > 1800)
+ xdbm = 68;
+ else
+ xdbm = power * 2;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI] %s: phy=%d org_power=%d xdbm=%d\n",
+ __func__, phy, power, xdbm);
+
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_START);
+ rtw89_mac_stop_sch_tx(rtwdev, phy, &tx_en, RTW89_SCH_TX_SEL_ALL);
+ _wait_rx_mode(rtwdev, _kpath(rtwdev, phy));
+ tx_counter = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD);
+
+ _tssi_hw_tx(rtwdev, phy, RF_PATH_AB, xdbm, true);
+ mdelay(15);
+ _tssi_hw_tx(rtwdev, phy, RF_PATH_AB, xdbm, false);
+
+ tx_counter = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD) -
+ tx_counter;
+
+ if (rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB, MASKHWORD) != 0xc000 &&
+ rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB, MASKHWORD) != 0x0) {
+ for (i = 0; i < 6; i++) {
+ tssi_info->default_txagc_offset[RF_PATH_A] =
+ rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB,
+ MASKBYTE3);
+
+ if (tssi_info->default_txagc_offset[RF_PATH_A] != 0x0)
+ break;
+ }
+ }
+
+ if (rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1, MASKHWORD) != 0xc000 &&
+ rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1, MASKHWORD) != 0x0) {
+ for (i = 0; i < 6; i++) {
+ tssi_info->default_txagc_offset[RF_PATH_B] =
+ rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1,
+ MASKBYTE3);
+
+ if (tssi_info->default_txagc_offset[RF_PATH_B] != 0x0)
+ break;
+ }
+ }
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI] %s: tx counter=%d\n",
+ __func__, tx_counter);
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI,
+ "[TSSI] Backup R_TXAGC_BB=0x%x R_TXAGC_BB_S1=0x%x\n",
+ tssi_info->default_txagc_offset[RF_PATH_A],
+ tssi_info->default_txagc_offset[RF_PATH_B]);
+
+ rtw8852a_bb_tx_mode_switch(rtwdev, phy, 0);
+
+ rtw89_mac_resume_sch_tx(rtwdev, phy, tx_en);
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_STOP);
+}
+
+void rtw8852a_rck(struct rtw89_dev *rtwdev)
+{
+ u8 path;
+
+ for (path = 0; path < 2; path++)
+ _rck(rtwdev, path);
+}
+
+void rtw8852a_dack(struct rtw89_dev *rtwdev)
+{
+ u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, 0);
+
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_START);
+ _dac_cal(rtwdev, false);
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_STOP);
+}
+
+void rtw8852a_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ u16 tx_en;
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
+
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_START);
+ rtw89_mac_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
+ _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
+
+ _iqk_init(rtwdev);
+ if (rtwdev->dbcc_en)
+ _iqk_dbcc(rtwdev, phy_idx);
+ else
+ _iqk(rtwdev, phy_idx, false);
+
+ rtw89_mac_resume_sch_tx(rtwdev, phy_idx, tx_en);
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_STOP);
+}
+
+void rtw8852a_iqk_track(struct rtw89_dev *rtwdev)
+{
+ _iqk_track(rtwdev);
+}
+
+void rtw8852a_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ bool is_afe)
+{
+ u16 tx_en;
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
+
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_START);
+ rtw89_mac_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
+ _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
+
+ _rx_dck(rtwdev, phy_idx, is_afe);
+
+ rtw89_mac_resume_sch_tx(rtwdev, phy_idx, tx_en);
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_STOP);
+}
+
+void rtw8852a_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx)
+{
+ u16 tx_en;
+ u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0);
+
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_START);
+ rtw89_mac_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL);
+ _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx));
+
+ rtwdev->dpk.is_dpk_enable = true;
+ rtwdev->dpk.is_dpk_reload_en = false;
+ _dpk(rtwdev, phy_idx, false);
+
+ rtw89_mac_resume_sch_tx(rtwdev, phy_idx, tx_en);
+ rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DPK, BTC_WRFK_STOP);
+}
+
+void rtw8852a_dpk_track(struct rtw89_dev *rtwdev)
+{
+ _dpk_track(rtwdev);
+}
+
+void rtw8852a_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+{
+ u8 i;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d\n",
+ __func__, phy);
+
+ _tssi_disable(rtwdev, phy);
+
+ for (i = RF_PATH_A; i < RF_PATH_NUM_8852A; i++) {
+ _tssi_rf_setting(rtwdev, phy, i);
+ _tssi_set_sys(rtwdev, phy);
+ _tssi_ini_txpwr_ctrl_bb(rtwdev, phy, i);
+ _tssi_ini_txpwr_ctrl_bb_he_tb(rtwdev, phy, i);
+ _tssi_set_dck(rtwdev, phy, i);
+ _tssi_set_tmeter_tbl(rtwdev, phy, i);
+ _tssi_set_dac_gain_tbl(rtwdev, phy, i);
+ _tssi_slope_cal_org(rtwdev, phy, i);
+ _tssi_set_rf_gap_tbl(rtwdev, phy, i);
+ _tssi_set_slope(rtwdev, phy, i);
+ _tssi_pak(rtwdev, phy, i);
+ }
+
+ _tssi_enable(rtwdev, phy);
+ _tssi_set_efuse_to_de(rtwdev, phy);
+ _tssi_high_power(rtwdev, phy);
+ _tssi_pre_tx(rtwdev, phy);
+}
+
+void rtw8852a_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+{
+ u8 i;
+
+ rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d\n",
+ __func__, phy);
+
+ if (!rtwdev->is_tssi_mode[RF_PATH_A])
+ return;
+ if (!rtwdev->is_tssi_mode[RF_PATH_B])
+ return;
+
+ _tssi_disable(rtwdev, phy);
+
+ for (i = RF_PATH_A; i < RF_PATH_NUM_8852A; i++) {
+ _tssi_rf_setting(rtwdev, phy, i);
+ _tssi_set_sys(rtwdev, phy);
+ _tssi_set_tmeter_tbl(rtwdev, phy, i);
+ _tssi_pak(rtwdev, phy, i);
+ }
+
+ _tssi_enable(rtwdev, phy);
+ _tssi_set_efuse_to_de(rtwdev, phy);
+}
+
+void rtw8852a_tssi_track(struct rtw89_dev *rtwdev)
+{
+ _tssi_track(rtwdev);
+}
+
+static
+void _rtw8852a_tssi_avg_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+{
+ if (!rtwdev->is_tssi_mode[RF_PATH_A] && !rtwdev->is_tssi_mode[RF_PATH_B])
+ return;
+
+ /* disable */
+ rtw89_rfk_parser(rtwdev, &rtw8852a_tssi_disable_defs_tbl);
+
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG, B_P0_TSSI_AVG, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TSSI_MV_AVG, 0x0);
+
+ rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG, B_P1_TSSI_AVG, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_TSSI_MV_AVG, 0x0);
+
+ /* enable */
+ rtw89_rfk_parser(rtwdev, &rtw8852a_tssi_enable_defs_ab_tbl);
+}
+
+static
+void _rtw8852a_tssi_set_avg(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy)
+{
+ if (!rtwdev->is_tssi_mode[RF_PATH_A] && !rtwdev->is_tssi_mode[RF_PATH_B])
+ return;
+
+ /* disable */
+ rtw89_rfk_parser(rtwdev, &rtw8852a_tssi_disable_defs_tbl);
+
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG, B_P0_TSSI_AVG, 0x4);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TSSI_MV_AVG, 0x2);
+
+ rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG, B_P1_TSSI_AVG, 0x4);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_TSSI_MV_AVG, 0x2);
+
+ /* enable */
+ rtw89_rfk_parser(rtwdev, &rtw8852a_tssi_enable_defs_ab_tbl);
+}
+
+static void rtw8852a_tssi_set_avg(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy, bool enable)
+{
+ if (!rtwdev->is_tssi_mode[RF_PATH_A] && !rtwdev->is_tssi_mode[RF_PATH_B])
+ return;
+
+ if (enable) {
+ /* SCAN_START */
+ _rtw8852a_tssi_avg_scan(rtwdev, phy);
+ } else {
+ /* SCAN_END */
+ _rtw8852a_tssi_set_avg(rtwdev, phy);
+ }
+}
+
+static void rtw8852a_tssi_default_txagc(struct rtw89_dev *rtwdev,
+ enum rtw89_phy_idx phy, bool enable)
+{
+ struct rtw89_tssi_info *tssi_info = &rtwdev->tssi;
+ u8 i;
+
+ if (!rtwdev->is_tssi_mode[RF_PATH_A] && !rtwdev->is_tssi_mode[RF_PATH_B])
+ return;
+
+ if (enable) {
+ if (rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB, B_TXAGC_BB_OFT) != 0xc000 &&
+ rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB, B_TXAGC_BB_OFT) != 0x0) {
+ for (i = 0; i < 6; i++) {
+ tssi_info->default_txagc_offset[RF_PATH_A] =
+ rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB,
+ B_TXAGC_BB);
+ if (tssi_info->default_txagc_offset[RF_PATH_A])
+ break;
+ }
+ }
+
+ if (rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1, B_TXAGC_BB_S1_OFT) != 0xc000 &&
+ rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1, B_TXAGC_BB_S1_OFT) != 0x0) {
+ for (i = 0; i < 6; i++) {
+ tssi_info->default_txagc_offset[RF_PATH_B] =
+ rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB_S1,
+ B_TXAGC_BB_S1);
+ if (tssi_info->default_txagc_offset[RF_PATH_B])
+ break;
+ }
+ }
+ } else {
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT,
+ tssi_info->default_txagc_offset[RF_PATH_A]);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT,
+ tssi_info->default_txagc_offset[RF_PATH_B]);
+
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x1);
+
+ rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x0);
+ rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x1);
+ }
+}
+
+void rtw8852a_wifi_scan_notify(struct rtw89_dev *rtwdev,
+ bool scan_start, enum rtw89_phy_idx phy_idx)
+{
+ if (scan_start) {
+ rtw8852a_tssi_default_txagc(rtwdev, phy_idx, true);
+ rtw8852a_tssi_set_avg(rtwdev, phy_idx, true);
+ } else {
+ rtw8852a_tssi_default_txagc(rtwdev, phy_idx, false);
+ rtw8852a_tssi_set_avg(rtwdev, phy_idx, false);
+ }
+}
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.h b/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.h
new file mode 100644
index 000000000000..ea36553a76b7
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2019-2020 Realtek Corporation
+ */
+
+#ifndef __RTW89_8852A_RFK_H__
+#define __RTW89_8852A_RFK_H__
+
+#include "core.h"
+
+void rtw8852a_rck(struct rtw89_dev *rtwdev);
+void rtw8852a_dack(struct rtw89_dev *rtwdev);
+void rtw8852a_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx);
+void rtw8852a_iqk_track(struct rtw89_dev *rtwdev);
+void rtw8852a_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx,
+ bool is_afe);
+void rtw8852a_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy);
+void rtw8852a_dpk_track(struct rtw89_dev *rtwdev);
+void rtw8852a_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy);
+void rtw8852a_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy);
+void rtw8852a_tssi_track(struct rtw89_dev *rtwdev);
+void rtw8852a_wifi_scan_notify(struct rtw89_dev *rtwdev, bool scan_start,
+ enum rtw89_phy_idx phy_idx);
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk_table.c b/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk_table.c
new file mode 100644
index 000000000000..510570090502
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk_table.c
@@ -0,0 +1,1607 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2019-2020 Realtek Corporation
+ */
+
+#include "rtw8852a_rfk_table.h"
+
+static const struct rtw89_reg5_def rtw8852a_tssi_sys_defs[] = {
+ DECL_RFK_WM(0x12a8, 0x00000001, 0x00000001),
+ DECL_RFK_WM(0x12a8, 0x0000000e, 0x00000002),
+ DECL_RFK_WM(0x32a8, 0x00000001, 0x00000001),
+ DECL_RFK_WM(0x32a8, 0x0000000e, 0x00000002),
+ DECL_RFK_WM(0x12bc, 0x000000f0, 0x00000005),
+ DECL_RFK_WM(0x12bc, 0x00000f00, 0x00000005),
+ DECL_RFK_WM(0x12bc, 0x000f0000, 0x00000005),
+ DECL_RFK_WM(0x12bc, 0x0000f000, 0x00000005),
+ DECL_RFK_WM(0x120c, 0x000000ff, 0x00000033),
+ DECL_RFK_WM(0x12c0, 0x0ff00000, 0x00000033),
+ DECL_RFK_WM(0x32bc, 0x000000f0, 0x00000005),
+ DECL_RFK_WM(0x32bc, 0x00000f00, 0x00000005),
+ DECL_RFK_WM(0x32bc, 0x000f0000, 0x00000005),
+ DECL_RFK_WM(0x32bc, 0x0000f000, 0x00000005),
+ DECL_RFK_WM(0x320c, 0x000000ff, 0x00000033),
+ DECL_RFK_WM(0x32c0, 0x0ff00000, 0x00000033),
+ DECL_RFK_WM(0x0300, 0xff000000, 0x00000019),
+ DECL_RFK_WM(0x0304, 0x000000ff, 0x00000019),
+ DECL_RFK_WM(0x0304, 0x0000ff00, 0x0000001d),
+ DECL_RFK_WM(0x0314, 0xffff0000, 0x00002044),
+ DECL_RFK_WM(0x0318, 0x0000ffff, 0x00002042),
+ DECL_RFK_WM(0x0318, 0xffff0000, 0x00002002),
+ DECL_RFK_WM(0x0020, 0x00006000, 0x00000003),
+ DECL_RFK_WM(0x0024, 0x00006000, 0x00000003),
+ DECL_RFK_WM(0x0704, 0xffff0000, 0x0000601e),
+ DECL_RFK_WM(0x2704, 0xffff0000, 0x0000601e),
+ DECL_RFK_WM(0x0700, 0xf0000000, 0x00000004),
+ DECL_RFK_WM(0x2700, 0xf0000000, 0x00000004),
+ DECL_RFK_WM(0x0650, 0x3c000000, 0x00000000),
+ DECL_RFK_WM(0x2650, 0x3c000000, 0x00000000),
+};
+
+DECLARE_RFK_TBL(rtw8852a_tssi_sys_defs);
+
+static const struct rtw89_reg5_def rtw8852a_tssi_sys_defs_2g[] = {
+ DECL_RFK_WM(0x120c, 0x000000ff, 0x00000033),
+ DECL_RFK_WM(0x12c0, 0x0ff00000, 0x00000033),
+ DECL_RFK_WM(0x32c0, 0x0ff00000, 0x00000033),
+ DECL_RFK_WM(0x320c, 0x000000ff, 0x00000033),
+};
+
+DECLARE_RFK_TBL(rtw8852a_tssi_sys_defs_2g);
+
+static const struct rtw89_reg5_def rtw8852a_tssi_sys_defs_5g[] = {
+ DECL_RFK_WM(0x120c, 0x000000ff, 0x00000044),
+ DECL_RFK_WM(0x12c0, 0x0ff00000, 0x00000044),
+ DECL_RFK_WM(0x32c0, 0x0ff00000, 0x00000044),
+ DECL_RFK_WM(0x320c, 0x000000ff, 0x00000044),
+};
+
+DECLARE_RFK_TBL(rtw8852a_tssi_sys_defs_5g);
+
+static const struct rtw89_reg5_def rtw8852a_tssi_txpwr_ctrl_bb_defs_a[] = {
+ DECL_RFK_WM(0x5800, 0x000000ff, 0x0000007f),
+ DECL_RFK_WM(0x5800, 0x0000ff00, 0x00000080),
+ DECL_RFK_WM(0x5800, 0x003f0000, 0x0000003f),
+ DECL_RFK_WM(0x5800, 0x10000000, 0x00000000),
+ DECL_RFK_WM(0x5800, 0x20000000, 0x00000000),
+ DECL_RFK_WM(0x5800, 0xc0000000, 0x00000000),
+ DECL_RFK_WM(0x5804, 0xf8000000, 0x00000000),
+ DECL_RFK_WM(0x580c, 0x0000007f, 0x00000040),
+ DECL_RFK_WM(0x580c, 0x00007f00, 0x00000040),
+ DECL_RFK_WM(0x580c, 0x00008000, 0x00000000),
+ DECL_RFK_WM(0x580c, 0x0fff0000, 0x00000000),
+ DECL_RFK_WM(0x5810, 0x000001ff, 0x00000000),
+ DECL_RFK_WM(0x5810, 0x00000200, 0x00000000),
+ DECL_RFK_WM(0x5810, 0x0000fc00, 0x00000000),
+ DECL_RFK_WM(0x5810, 0x00010000, 0x00000001),
+ DECL_RFK_WM(0x5810, 0x00fe0000, 0x00000000),
+ DECL_RFK_WM(0x5810, 0x01000000, 0x00000001),
+ DECL_RFK_WM(0x5810, 0x06000000, 0x00000000),
+ DECL_RFK_WM(0x5810, 0x38000000, 0x00000003),
+ DECL_RFK_WM(0x5810, 0x40000000, 0x00000001),
+ DECL_RFK_WM(0x5810, 0x80000000, 0x00000000),
+ DECL_RFK_WM(0x5814, 0x000003ff, 0x00000000),
+ DECL_RFK_WM(0x5814, 0x00000c00, 0x00000000),
+ DECL_RFK_WM(0x5814, 0x00001000, 0x00000001),
+ DECL_RFK_WM(0x5814, 0x00002000, 0x00000000),
+ DECL_RFK_WM(0x5814, 0x00004000, 0x00000001),
+ DECL_RFK_WM(0x5814, 0x00038000, 0x00000005),
+ DECL_RFK_WM(0x5814, 0x003c0000, 0x00000000),
+ DECL_RFK_WM(0x5814, 0x01c00000, 0x00000000),
+ DECL_RFK_WM(0x5814, 0x18000000, 0x00000000),
+ DECL_RFK_WM(0x5814, 0xe0000000, 0x00000000),
+ DECL_RFK_WM(0x5818, 0x000000ff, 0x00000000),
+ DECL_RFK_WM(0x5818, 0x0001ff00, 0x00000018),
+ DECL_RFK_WM(0x5818, 0x03fe0000, 0x00000016),
+ DECL_RFK_WM(0x5818, 0xfc000000, 0x00000000),
+ DECL_RFK_WM(0x581c, 0x000003ff, 0x00000280),
+ DECL_RFK_WM(0x581c, 0x000ffc00, 0x00000200),
+ DECL_RFK_WM(0x581c, 0x00100000, 0x00000000),
+ DECL_RFK_WM(0x581c, 0x01e00000, 0x00000008),
+ DECL_RFK_WM(0x581c, 0x01e00000, 0x0000000e),
+ DECL_RFK_WM(0x581c, 0x1e000000, 0x00000008),
+ DECL_RFK_WM(0x581c, 0x1e000000, 0x0000000e),
+ DECL_RFK_WM(0x581c, 0x20000000, 0x00000000),
+ DECL_RFK_WM(0x5820, 0x00000fff, 0x00000080),
+ DECL_RFK_WM(0x5820, 0x0000f000, 0x0000000f),
+ DECL_RFK_WM(0x5820, 0x001f0000, 0x00000000),
+ DECL_RFK_WM(0x5820, 0xffe00000, 0x00000000),
+ DECL_RFK_WM(0x5824, 0x0003ffff, 0x000115f2),
+ DECL_RFK_WM(0x5824, 0x3ffc0000, 0x00000000),
+ DECL_RFK_WM(0x5828, 0x00000fff, 0x00000121),
+ DECL_RFK_WM(0x582c, 0x0003ffff, 0x000115f2),
+ DECL_RFK_WM(0x582c, 0x3ffc0000, 0x00000000),
+ DECL_RFK_WM(0x5830, 0x00000fff, 0x00000121),
+ DECL_RFK_WM(0x5834, 0x0003ffff, 0x000115f2),
+ DECL_RFK_WM(0x5834, 0x3ffc0000, 0x00000000),
+ DECL_RFK_WM(0x5838, 0x00000fff, 0x00000121),
+ DECL_RFK_WM(0x583c, 0x0003ffff, 0x000115f2),
+ DECL_RFK_WM(0x583c, 0x3ffc0000, 0x00000000),
+ DECL_RFK_WM(0x5840, 0x00000fff, 0x00000121),
+ DECL_RFK_WM(0x5844, 0x0003ffff, 0x000115f2),
+ DECL_RFK_WM(0x5844, 0x3ffc0000, 0x00000000),
+ DECL_RFK_WM(0x5848, 0x00000fff, 0x00000121),
+ DECL_RFK_WM(0x584c, 0x0003ffff, 0x000115f2),
+ DECL_RFK_WM(0x584c, 0x3ffc0000, 0x00000000),
+ DECL_RFK_WM(0x5850, 0x00000fff, 0x00000121),
+ DECL_RFK_WM(0x5854, 0x0003ffff, 0x000115f2),
+ DECL_RFK_WM(0x5854, 0x3ffc0000, 0x00000000),
+ DECL_RFK_WM(0x5858, 0x00000fff, 0x00000121),
+ DECL_RFK_WM(0x585c, 0x0003ffff, 0x000115f2),
+ DECL_RFK_WM(0x585c, 0x3ffc0000, 0x00000000),
+ DECL_RFK_WM(0x5860, 0x00000fff, 0x00000121),
+ DECL_RFK_WM(0x5828, 0x003ff000, 0x00000000),
+ DECL_RFK_WM(0x5828, 0x7fc00000, 0x00000000),
+ DECL_RFK_WM(0x5830, 0x003ff000, 0x00000000),
+ DECL_RFK_WM(0x5830, 0x7fc00000, 0x00000000),
+ DECL_RFK_WM(0x5838, 0x003ff000, 0x00000000),
+ DECL_RFK_WM(0x5838, 0x7fc00000, 0x00000000),
+ DECL_RFK_WM(0x5840, 0x003ff000, 0x00000000),
+ DECL_RFK_WM(0x5840, 0x7fc00000, 0x00000000),
+ DECL_RFK_WM(0x5848, 0x003ff000, 0x00000000),
+ DECL_RFK_WM(0x5848, 0x7fc00000, 0x00000000),
+ DECL_RFK_WM(0x5850, 0x003ff000, 0x00000000),
+ DECL_RFK_WM(0x5850, 0x7fc00000, 0x00000000),
+ DECL_RFK_WM(0x5858, 0x003ff000, 0x00000000),
+ DECL_RFK_WM(0x5858, 0x7fc00000, 0x00000000),
+ DECL_RFK_WM(0x5860, 0x003ff000, 0x00000000),
+ DECL_RFK_WM(0x5860, 0x7fc00000, 0x00000000),
+ DECL_RFK_WM(0x5860, 0x80000000, 0x00000000),
+ DECL_RFK_WM(0x5864, 0x000003ff, 0x000001ff),
+ DECL_RFK_WM(0x5864, 0x000ffc00, 0x00000200),
+ DECL_RFK_WM(0x5864, 0x03f00000, 0x00000000),
+ DECL_RFK_WM(0x5864, 0x04000000, 0x00000000),
+ DECL_RFK_WM(0x5898, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x589c, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x58a0, 0x000000ff, 0x000000fd),
+ DECL_RFK_WM(0x58a0, 0x0000ff00, 0x000000e5),
+ DECL_RFK_WM(0x58a0, 0x00ff0000, 0x000000cd),
+ DECL_RFK_WM(0x58a0, 0xff000000, 0x000000b5),
+ DECL_RFK_WM(0x58a4, 0x000000ff, 0x00000016),
+ DECL_RFK_WM(0x58a4, 0x0001ff00, 0x00000000),
+ DECL_RFK_WM(0x58a4, 0x03fe0000, 0x00000000),
+ DECL_RFK_WM(0x58a8, 0x000001ff, 0x00000000),
+ DECL_RFK_WM(0x58a8, 0x0003fe00, 0x00000000),
+ DECL_RFK_WM(0x58a8, 0x07fc0000, 0x00000000),
+ DECL_RFK_WM(0x58ac, 0x000001ff, 0x00000000),
+ DECL_RFK_WM(0x58ac, 0x0003fe00, 0x00000000),
+ DECL_RFK_WM(0x58ac, 0x07fc0000, 0x00000000),
+ DECL_RFK_WM(0x58b0, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x58b4, 0x0000001f, 0x00000000),
+ DECL_RFK_WM(0x58b4, 0x00000020, 0x00000000),
+ DECL_RFK_WM(0x58b4, 0x000001c0, 0x00000000),
+ DECL_RFK_WM(0x58b4, 0x00000200, 0x00000000),
+ DECL_RFK_WM(0x58b4, 0x0000f000, 0x00000002),
+ DECL_RFK_WM(0x58b4, 0x00ff0000, 0x00000000),
+ DECL_RFK_WM(0x58b4, 0x7f000000, 0x0000000a),
+ DECL_RFK_WM(0x58b8, 0x0000007f, 0x00000028),
+ DECL_RFK_WM(0x58b8, 0x00007f00, 0x00000076),
+ DECL_RFK_WM(0x58b8, 0x007f0000, 0x00000000),
+ DECL_RFK_WM(0x58b8, 0x7f000000, 0x00000000),
+ DECL_RFK_WM(0x58bc, 0x000000ff, 0x0000007f),
+ DECL_RFK_WM(0x58bc, 0x0000ff00, 0x00000080),
+ DECL_RFK_WM(0x58bc, 0x00030000, 0x00000003),
+ DECL_RFK_WM(0x58bc, 0x000c0000, 0x00000001),
+ DECL_RFK_WM(0x58bc, 0x00300000, 0x00000002),
+ DECL_RFK_WM(0x58bc, 0x00c00000, 0x00000002),
+ DECL_RFK_WM(0x58bc, 0x07000000, 0x00000007),
+ DECL_RFK_WM(0x58c0, 0x00fe0000, 0x0000003f),
+ DECL_RFK_WM(0x58c0, 0xff000000, 0x00000000),
+ DECL_RFK_WM(0x58c4, 0x0003ffff, 0x0003ffff),
+ DECL_RFK_WM(0x58c4, 0x3ffc0000, 0x00000000),
+ DECL_RFK_WM(0x58c4, 0xc0000000, 0x00000000),
+ DECL_RFK_WM(0x58c8, 0x00ffffff, 0x00000000),
+ DECL_RFK_WM(0x58c8, 0xf0000000, 0x00000000),
+ DECL_RFK_WM(0x58cc, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x58d0, 0x00001fff, 0x00000101),
+ DECL_RFK_WM(0x58d0, 0x0001e000, 0x00000004),
+ DECL_RFK_WM(0x58d0, 0x03fe0000, 0x00000100),
+ DECL_RFK_WM(0x58d0, 0x04000000, 0x00000000),
+ DECL_RFK_WM(0x58d4, 0x000000ff, 0x00000000),
+ DECL_RFK_WM(0x58d4, 0x0003fe00, 0x000000ff),
+ DECL_RFK_WM(0x58d4, 0x07fc0000, 0x00000100),
+ DECL_RFK_WM(0x58d8, 0x000001ff, 0x0000016c),
+ DECL_RFK_WM(0x58d8, 0x0003fe00, 0x0000005c),
+ DECL_RFK_WM(0x58d8, 0x000c0000, 0x00000002),
+ DECL_RFK_WM(0x58d8, 0xfff00000, 0x00000800),
+ DECL_RFK_WM(0x58dc, 0x000000ff, 0x0000007f),
+ DECL_RFK_WM(0x58dc, 0x0000ff00, 0x00000080),
+ DECL_RFK_WM(0x58dc, 0x00010000, 0x00000000),
+ DECL_RFK_WM(0x58dc, 0x3ff00000, 0x00000000),
+ DECL_RFK_WM(0x58dc, 0x80000000, 0x00000001),
+ DECL_RFK_WM(0x58f0, 0x000001ff, 0x000001ff),
+ DECL_RFK_WM(0x58f0, 0x0003fe00, 0x00000000),
+ DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000),
+ DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000),
+ DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000),
+ DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000),
+};
+
+DECLARE_RFK_TBL(rtw8852a_tssi_txpwr_ctrl_bb_defs_a);
+
+static const struct rtw89_reg5_def rtw8852a_tssi_txpwr_ctrl_bb_defs_b[] = {
+ DECL_RFK_WM(0x7800, 0x000000ff, 0x0000007f),
+ DECL_RFK_WM(0x7800, 0x0000ff00, 0x00000080),
+ DECL_RFK_WM(0x7800, 0x003f0000, 0x0000003f),
+ DECL_RFK_WM(0x7800, 0x10000000, 0x00000000),
+ DECL_RFK_WM(0x7800, 0x20000000, 0x00000000),
+ DECL_RFK_WM(0x7800, 0xc0000000, 0x00000000),
+ DECL_RFK_WM(0x7804, 0xf8000000, 0x00000000),
+ DECL_RFK_WM(0x780c, 0x0000007f, 0x00000040),
+ DECL_RFK_WM(0x780c, 0x00007f00, 0x00000040),
+ DECL_RFK_WM(0x780c, 0x00008000, 0x00000000),
+ DECL_RFK_WM(0x780c, 0x0fff0000, 0x00000000),
+ DECL_RFK_WM(0x7810, 0x000001ff, 0x00000000),
+ DECL_RFK_WM(0x7810, 0x00000200, 0x00000000),
+ DECL_RFK_WM(0x7810, 0x0000fc00, 0x00000000),
+ DECL_RFK_WM(0x7810, 0x00010000, 0x00000001),
+ DECL_RFK_WM(0x7810, 0x00fe0000, 0x00000000),
+ DECL_RFK_WM(0x7810, 0x01000000, 0x00000001),
+ DECL_RFK_WM(0x7810, 0x06000000, 0x00000000),
+ DECL_RFK_WM(0x7810, 0x38000000, 0x00000003),
+ DECL_RFK_WM(0x7810, 0x40000000, 0x00000001),
+ DECL_RFK_WM(0x7810, 0x80000000, 0x00000000),
+ DECL_RFK_WM(0x7814, 0x000003ff, 0x00000000),
+ DECL_RFK_WM(0x7814, 0x00000c00, 0x00000000),
+ DECL_RFK_WM(0x7814, 0x00001000, 0x00000001),
+ DECL_RFK_WM(0x7814, 0x00002000, 0x00000000),
+ DECL_RFK_WM(0x7814, 0x00004000, 0x00000001),
+ DECL_RFK_WM(0x7814, 0x00038000, 0x00000005),
+ DECL_RFK_WM(0x7814, 0x003c0000, 0x00000000),
+ DECL_RFK_WM(0x7814, 0x01c00000, 0x00000000),
+ DECL_RFK_WM(0x7814, 0x18000000, 0x00000000),
+ DECL_RFK_WM(0x7814, 0xe0000000, 0x00000000),
+ DECL_RFK_WM(0x7818, 0x000000ff, 0x00000000),
+ DECL_RFK_WM(0x7818, 0x0001ff00, 0x00000018),
+ DECL_RFK_WM(0x7818, 0x03fe0000, 0x00000016),
+ DECL_RFK_WM(0x7818, 0xfc000000, 0x00000000),
+ DECL_RFK_WM(0x781c, 0x000003ff, 0x00000280),
+ DECL_RFK_WM(0x781c, 0x000ffc00, 0x00000200),
+ DECL_RFK_WM(0x781c, 0x00100000, 0x00000000),
+ DECL_RFK_WM(0x781c, 0x01e00000, 0x00000008),
+ DECL_RFK_WM(0x781c, 0x01e00000, 0x0000000e),
+ DECL_RFK_WM(0x781c, 0x1e000000, 0x00000008),
+ DECL_RFK_WM(0x781c, 0x1e000000, 0x0000000e),
+ DECL_RFK_WM(0x781c, 0x20000000, 0x00000000),
+ DECL_RFK_WM(0x7820, 0x00000fff, 0x00000080),
+ DECL_RFK_WM(0x7820, 0x0000f000, 0x00000000),
+ DECL_RFK_WM(0x7820, 0x001f0000, 0x00000000),
+ DECL_RFK_WM(0x7820, 0xffe00000, 0x00000000),
+ DECL_RFK_WM(0x7824, 0x0003ffff, 0x000115f2),
+ DECL_RFK_WM(0x7824, 0x3ffc0000, 0x00000000),
+ DECL_RFK_WM(0x7828, 0x00000fff, 0x00000121),
+ DECL_RFK_WM(0x782c, 0x0003ffff, 0x000115f2),
+ DECL_RFK_WM(0x782c, 0x3ffc0000, 0x00000000),
+ DECL_RFK_WM(0x7830, 0x00000fff, 0x00000121),
+ DECL_RFK_WM(0x7834, 0x0003ffff, 0x000115f2),
+ DECL_RFK_WM(0x7834, 0x3ffc0000, 0x00000000),
+ DECL_RFK_WM(0x7838, 0x00000fff, 0x00000121),
+ DECL_RFK_WM(0x783c, 0x0003ffff, 0x000115f2),
+ DECL_RFK_WM(0x783c, 0x3ffc0000, 0x00000000),
+ DECL_RFK_WM(0x7840, 0x00000fff, 0x00000121),
+ DECL_RFK_WM(0x7844, 0x0003ffff, 0x000115f2),
+ DECL_RFK_WM(0x7844, 0x3ffc0000, 0x00000000),
+ DECL_RFK_WM(0x7848, 0x00000fff, 0x00000121),
+ DECL_RFK_WM(0x784c, 0x0003ffff, 0x000115f2),
+ DECL_RFK_WM(0x784c, 0x3ffc0000, 0x00000000),
+ DECL_RFK_WM(0x7850, 0x00000fff, 0x00000121),
+ DECL_RFK_WM(0x7854, 0x0003ffff, 0x000115f2),
+ DECL_RFK_WM(0x7854, 0x3ffc0000, 0x00000000),
+ DECL_RFK_WM(0x7858, 0x00000fff, 0x00000121),
+ DECL_RFK_WM(0x785c, 0x0003ffff, 0x000115f2),
+ DECL_RFK_WM(0x785c, 0x3ffc0000, 0x00000000),
+ DECL_RFK_WM(0x7860, 0x00000fff, 0x00000121),
+ DECL_RFK_WM(0x7828, 0x003ff000, 0x00000000),
+ DECL_RFK_WM(0x7828, 0x7fc00000, 0x00000000),
+ DECL_RFK_WM(0x7830, 0x003ff000, 0x00000000),
+ DECL_RFK_WM(0x7830, 0x7fc00000, 0x00000000),
+ DECL_RFK_WM(0x7838, 0x003ff000, 0x00000000),
+ DECL_RFK_WM(0x7838, 0x7fc00000, 0x00000000),
+ DECL_RFK_WM(0x7840, 0x003ff000, 0x00000000),
+ DECL_RFK_WM(0x7840, 0x7fc00000, 0x00000000),
+ DECL_RFK_WM(0x7848, 0x003ff000, 0x00000000),
+ DECL_RFK_WM(0x7848, 0x7fc00000, 0x00000000),
+ DECL_RFK_WM(0x7850, 0x003ff000, 0x00000000),
+ DECL_RFK_WM(0x7850, 0x7fc00000, 0x00000000),
+ DECL_RFK_WM(0x7858, 0x003ff000, 0x00000000),
+ DECL_RFK_WM(0x7858, 0x7fc00000, 0x00000000),
+ DECL_RFK_WM(0x7860, 0x003ff000, 0x00000000),
+ DECL_RFK_WM(0x7860, 0x7fc00000, 0x00000000),
+ DECL_RFK_WM(0x7860, 0x80000000, 0x00000000),
+ DECL_RFK_WM(0x7864, 0x000003ff, 0x000001ff),
+ DECL_RFK_WM(0x7864, 0x000ffc00, 0x00000200),
+ DECL_RFK_WM(0x7864, 0x03f00000, 0x00000000),
+ DECL_RFK_WM(0x7864, 0x04000000, 0x00000000),
+ DECL_RFK_WM(0x7898, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x789c, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x78a0, 0x000000ff, 0x000000fd),
+ DECL_RFK_WM(0x78a0, 0x0000ff00, 0x000000e5),
+ DECL_RFK_WM(0x78a0, 0x00ff0000, 0x000000cd),
+ DECL_RFK_WM(0x78a0, 0xff000000, 0x000000b5),
+ DECL_RFK_WM(0x78a4, 0x000000ff, 0x00000016),
+ DECL_RFK_WM(0x78a4, 0x0001ff00, 0x00000000),
+ DECL_RFK_WM(0x78a4, 0x03fe0000, 0x00000000),
+ DECL_RFK_WM(0x78a8, 0x000001ff, 0x00000000),
+ DECL_RFK_WM(0x78a8, 0x0003fe00, 0x00000000),
+ DECL_RFK_WM(0x78a8, 0x07fc0000, 0x00000000),
+ DECL_RFK_WM(0x78ac, 0x000001ff, 0x00000000),
+ DECL_RFK_WM(0x78ac, 0x0003fe00, 0x00000000),
+ DECL_RFK_WM(0x78ac, 0x07fc0000, 0x00000000),
+ DECL_RFK_WM(0x78b0, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x78b4, 0x0000001f, 0x00000000),
+ DECL_RFK_WM(0x78b4, 0x00000020, 0x00000000),
+ DECL_RFK_WM(0x78b4, 0x000001c0, 0x00000000),
+ DECL_RFK_WM(0x78b4, 0x00000200, 0x00000000),
+ DECL_RFK_WM(0x78b4, 0x0000f000, 0x00000002),
+ DECL_RFK_WM(0x78b4, 0x00ff0000, 0x00000000),
+ DECL_RFK_WM(0x78b4, 0x7f000000, 0x0000000a),
+ DECL_RFK_WM(0x78b8, 0x0000007f, 0x00000028),
+ DECL_RFK_WM(0x78b8, 0x00007f00, 0x00000076),
+ DECL_RFK_WM(0x78b8, 0x007f0000, 0x00000000),
+ DECL_RFK_WM(0x78b8, 0x7f000000, 0x00000000),
+ DECL_RFK_WM(0x78bc, 0x000000ff, 0x0000007f),
+ DECL_RFK_WM(0x78bc, 0x0000ff00, 0x00000080),
+ DECL_RFK_WM(0x78bc, 0x00030000, 0x00000003),
+ DECL_RFK_WM(0x78bc, 0x000c0000, 0x00000001),
+ DECL_RFK_WM(0x78bc, 0x00300000, 0x00000002),
+ DECL_RFK_WM(0x78bc, 0x00c00000, 0x00000002),
+ DECL_RFK_WM(0x78bc, 0x07000000, 0x00000007),
+ DECL_RFK_WM(0x78c0, 0x00fe0000, 0x0000003f),
+ DECL_RFK_WM(0x78c0, 0xff000000, 0x00000000),
+ DECL_RFK_WM(0x78c4, 0x0003ffff, 0x0003ffff),
+ DECL_RFK_WM(0x78c4, 0x3ffc0000, 0x00000000),
+ DECL_RFK_WM(0x78c4, 0xc0000000, 0x00000000),
+ DECL_RFK_WM(0x78c8, 0x00ffffff, 0x00000000),
+ DECL_RFK_WM(0x78c8, 0xf0000000, 0x00000000),
+ DECL_RFK_WM(0x78cc, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x78d0, 0x00001fff, 0x00000101),
+ DECL_RFK_WM(0x78d0, 0x0001e000, 0x00000004),
+ DECL_RFK_WM(0x78d0, 0x03fe0000, 0x00000100),
+ DECL_RFK_WM(0x78d0, 0x04000000, 0x00000000),
+ DECL_RFK_WM(0x78d4, 0x000000ff, 0x00000000),
+ DECL_RFK_WM(0x78d4, 0x0003fe00, 0x000000ff),
+ DECL_RFK_WM(0x78d4, 0x07fc0000, 0x00000100),
+ DECL_RFK_WM(0x78d8, 0x000001ff, 0x0000016c),
+ DECL_RFK_WM(0x78d8, 0x0003fe00, 0x0000005c),
+ DECL_RFK_WM(0x78d8, 0x000c0000, 0x00000002),
+ DECL_RFK_WM(0x78d8, 0xfff00000, 0x00000800),
+ DECL_RFK_WM(0x78dc, 0x000000ff, 0x0000007f),
+ DECL_RFK_WM(0x78dc, 0x0000ff00, 0x00000080),
+ DECL_RFK_WM(0x78dc, 0x00010000, 0x00000000),
+ DECL_RFK_WM(0x78dc, 0x3ff00000, 0x00000000),
+ DECL_RFK_WM(0x78dc, 0x80000000, 0x00000001),
+ DECL_RFK_WM(0x78f0, 0x000001ff, 0x000001ff),
+ DECL_RFK_WM(0x78f0, 0x0003fe00, 0x00000000),
+ DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000),
+ DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000),
+ DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000),
+ DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000),
+};
+
+DECLARE_RFK_TBL(rtw8852a_tssi_txpwr_ctrl_bb_defs_b);
+
+static const struct rtw89_reg5_def rtw8852a_tssi_txpwr_ctrl_bb_defs_2g[] = {
+ DECL_RFK_WM(0x58d8, 0x000001ff, 0x0000013c),
+ DECL_RFK_WM(0x78d8, 0x000001ff, 0x0000013c),
+};
+
+DECLARE_RFK_TBL(rtw8852a_tssi_txpwr_ctrl_bb_defs_2g);
+
+static const struct rtw89_reg5_def rtw8852a_tssi_txpwr_ctrl_bb_defs_5g[] = {
+ DECL_RFK_WM(0x58d8, 0x000001ff, 0x0000016c),
+ DECL_RFK_WM(0x78d8, 0x000001ff, 0x0000016c),
+};
+
+DECLARE_RFK_TBL(rtw8852a_tssi_txpwr_ctrl_bb_defs_5g);
+
+static const struct rtw89_reg5_def rtw8852a_tssi_txpwr_ctrl_bb_he_tb_defs_a[] = {
+ DECL_RFK_WM(0x58a0, 0xffffffff, 0x000000fc),
+ DECL_RFK_WM(0x58e4, 0x0000007f, 0x00000020),
+};
+
+DECLARE_RFK_TBL(rtw8852a_tssi_txpwr_ctrl_bb_he_tb_defs_a);
+
+static const struct rtw89_reg5_def rtw8852a_tssi_txpwr_ctrl_bb_he_tb_defs_b[] = {
+ DECL_RFK_WM(0x78a0, 0xffffffff, 0x000000fc),
+ DECL_RFK_WM(0x78e4, 0x0000007f, 0x00000020),
+};
+
+DECLARE_RFK_TBL(rtw8852a_tssi_txpwr_ctrl_bb_he_tb_defs_b);
+
+static const struct rtw89_reg5_def rtw8852a_tssi_dck_defs_a[] = {
+ DECL_RFK_WM(0x580c, 0x0fff0000, 0x00000000),
+ DECL_RFK_WM(0x5814, 0x00001000, 0x00000001),
+ DECL_RFK_WM(0x5814, 0x00002000, 0x00000001),
+ DECL_RFK_WM(0x5814, 0x00004000, 0x00000001),
+ DECL_RFK_WM(0x5814, 0x00038000, 0x00000005),
+ DECL_RFK_WM(0x5814, 0x003c0000, 0x00000003),
+ DECL_RFK_WM(0x5814, 0x18000000, 0x00000000),
+};
+
+DECLARE_RFK_TBL(rtw8852a_tssi_dck_defs_a);
+
+static const struct rtw89_reg5_def rtw8852a_tssi_dck_defs_b[] = {
+ DECL_RFK_WM(0x780c, 0x0fff0000, 0x00000000),
+ DECL_RFK_WM(0x7814, 0x00001000, 0x00000001),
+ DECL_RFK_WM(0x7814, 0x00002000, 0x00000001),
+ DECL_RFK_WM(0x7814, 0x00004000, 0x00000001),
+ DECL_RFK_WM(0x7814, 0x00038000, 0x00000005),
+ DECL_RFK_WM(0x7814, 0x003c0000, 0x00000003),
+ DECL_RFK_WM(0x7814, 0x18000000, 0x00000000),
+};
+
+DECLARE_RFK_TBL(rtw8852a_tssi_dck_defs_b);
+
+static const struct rtw89_reg5_def rtw8852a_tssi_dac_gain_tbl_defs_a[] = {
+ DECL_RFK_WM(0x58b0, 0x00000fff, 0x00000000),
+ DECL_RFK_WM(0x58b0, 0x00000800, 0x00000001),
+ DECL_RFK_WM(0x5a00, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x5a04, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x5a08, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x5a0c, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x5a10, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x5a14, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x5a18, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x5a1c, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x5a20, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x5a24, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x5a28, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x5a2c, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x5a30, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x5a34, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x5a38, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x5a3c, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x5a40, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x5a44, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x5a48, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x5a4c, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x5a50, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x5a54, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x5a58, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x5a5c, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x5a60, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x5a64, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x5a68, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x5a6c, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x5a70, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x5a74, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x5a78, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x5a7c, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x5a80, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x5a84, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x5a88, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x5a8c, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x5a90, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x5a94, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x5a98, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x5a9c, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x5aa0, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x5aa4, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x5aa8, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x5aac, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x5ab0, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x5ab4, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x5ab8, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x5abc, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x5ac0, 0xffffffff, 0x00000000),
+};
+
+DECLARE_RFK_TBL(rtw8852a_tssi_dac_gain_tbl_defs_a);
+
+static const struct rtw89_reg5_def rtw8852a_tssi_dac_gain_tbl_defs_b[] = {
+ DECL_RFK_WM(0x78b0, 0x00000fff, 0x00000000),
+ DECL_RFK_WM(0x78b0, 0x00000800, 0x00000001),
+ DECL_RFK_WM(0x7a00, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x7a04, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x7a08, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x7a0c, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x7a10, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x7a14, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x7a18, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x7a1c, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x7a20, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x7a24, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x7a28, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x7a2c, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x7a30, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x7a34, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x7a38, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x7a3c, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x7a40, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x7a44, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x7a48, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x7a4c, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x7a50, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x7a54, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x7a58, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x7a5c, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x7a60, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x7a64, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x7a68, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x7a6c, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x7a70, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x7a74, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x7a78, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x7a7c, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x7a80, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x7a84, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x7a88, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x7a8c, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x7a90, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x7a94, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x7a98, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x7a9c, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x7aa0, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x7aa4, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x7aa8, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x7aac, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x7ab0, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x7ab4, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x7ab8, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x7abc, 0xffffffff, 0x00000000),
+ DECL_RFK_WM(0x7ac0, 0xffffffff, 0x00000000),
+};
+
+DECLARE_RFK_TBL(rtw8852a_tssi_dac_gain_tbl_defs_b);
+
+static const struct rtw89_reg5_def rtw8852a_tssi_slope_cal_org_defs_a[] = {
+ DECL_RFK_WM(0x581c, 0x00100000, 0x00000000),
+ DECL_RFK_WM(0x58cc, 0x00001000, 0x00000001),
+ DECL_RFK_WM(0x58cc, 0x00000007, 0x00000000),
+ DECL_RFK_WM(0x58cc, 0x00000038, 0x00000001),
+ DECL_RFK_WM(0x58cc, 0x000001c0, 0x00000002),
+ DECL_RFK_WM(0x58cc, 0x00000e00, 0x00000003),
+ DECL_RFK_WM(0x5828, 0x7fc00000, 0x00000040),
+ DECL_RFK_WM(0x5898, 0x000000ff, 0x00000040),
+ DECL_RFK_WM(0x5830, 0x7fc00000, 0x00000040),
+ DECL_RFK_WM(0x5898, 0x0000ff00, 0x00000040),
+ DECL_RFK_WM(0x5838, 0x7fc00000, 0x00000040),
+ DECL_RFK_WM(0x5898, 0x00ff0000, 0x00000040),
+ DECL_RFK_WM(0x5840, 0x7fc00000, 0x00000040),
+ DECL_RFK_WM(0x5898, 0xff000000, 0x00000040),
+ DECL_RFK_WM(0x5848, 0x7fc00000, 0x00000040),
+ DECL_RFK_WM(0x589c, 0x000000ff, 0x00000040),
+ DECL_RFK_WM(0x5850, 0x7fc00000, 0x00000040),
+ DECL_RFK_WM(0x589c, 0x0000ff00, 0x00000040),
+ DECL_RFK_WM(0x5858, 0x7fc00000, 0x00000040),
+ DECL_RFK_WM(0x589c, 0x00ff0000, 0x00000040),
+ DECL_RFK_WM(0x5860, 0x7fc00000, 0x00000040),
+ DECL_RFK_WM(0x589c, 0xff000000, 0x00000040),
+};
+
+DECLARE_RFK_TBL(rtw8852a_tssi_slope_cal_org_defs_a);
+
+static const struct rtw89_reg5_def rtw8852a_tssi_slope_cal_org_defs_b[] = {
+ DECL_RFK_WM(0x781c, 0x00100000, 0x00000000),
+ DECL_RFK_WM(0x78cc, 0x00001000, 0x00000001),
+ DECL_RFK_WM(0x78cc, 0x00000007, 0x00000000),
+ DECL_RFK_WM(0x78cc, 0x00000038, 0x00000001),
+ DECL_RFK_WM(0x78cc, 0x000001c0, 0x00000002),
+ DECL_RFK_WM(0x78cc, 0x00000e00, 0x00000003),
+ DECL_RFK_WM(0x7828, 0x7fc00000, 0x00000040),
+ DECL_RFK_WM(0x7898, 0x000000ff, 0x00000040),
+ DECL_RFK_WM(0x7830, 0x7fc00000, 0x00000040),
+ DECL_RFK_WM(0x7898, 0x0000ff00, 0x00000040),
+ DECL_RFK_WM(0x7838, 0x7fc00000, 0x00000040),
+ DECL_RFK_WM(0x7898, 0x00ff0000, 0x00000040),
+ DECL_RFK_WM(0x7840, 0x7fc00000, 0x00000040),
+ DECL_RFK_WM(0x7898, 0xff000000, 0x00000040),
+ DECL_RFK_WM(0x7848, 0x7fc00000, 0x00000040),
+ DECL_RFK_WM(0x789c, 0x000000ff, 0x00000040),
+ DECL_RFK_WM(0x7850, 0x7fc00000, 0x00000040),
+ DECL_RFK_WM(0x789c, 0x0000ff00, 0x00000040),
+ DECL_RFK_WM(0x7878, 0x7fc00000, 0x00000040),
+ DECL_RFK_WM(0x789c, 0x00ff0000, 0x00000040),
+ DECL_RFK_WM(0x7860, 0x7fc00000, 0x00000040),
+ DECL_RFK_WM(0x789c, 0xff000000, 0x00000040),
+};
+
+DECLARE_RFK_TBL(rtw8852a_tssi_slope_cal_org_defs_b);
+
+static const struct rtw89_reg5_def rtw8852a_tssi_rf_gap_tbl_defs_a[] = {
+ DECL_RFK_WM(0x5814, 0x000003ff, 0x00000000),
+ DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000),
+ DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000),
+ DECL_RFK_WM(0x58f8, 0x000003ff, 0x00000000),
+ DECL_RFK_WM(0x58f8, 0x000ffc00, 0x00000000),
+ DECL_RFK_WM(0x58a4, 0x0001ff00, 0x00000000),
+ DECL_RFK_WM(0x58a4, 0x03fe0000, 0x00000000),
+ DECL_RFK_WM(0x58a8, 0x000001ff, 0x00000000),
+ DECL_RFK_WM(0x58a8, 0x0003fe00, 0x00000000),
+ DECL_RFK_WM(0x58a8, 0x07fc0000, 0x00000000),
+ DECL_RFK_WM(0x58ac, 0x000001ff, 0x00000000),
+ DECL_RFK_WM(0x58ac, 0x0003fe00, 0x00000000),
+ DECL_RFK_WM(0x58ac, 0x07fc0000, 0x00000000),
+};
+
+DECLARE_RFK_TBL(rtw8852a_tssi_rf_gap_tbl_defs_a);
+
+static const struct rtw89_reg5_def rtw8852a_tssi_rf_gap_tbl_defs_b[] = {
+ DECL_RFK_WM(0x7814, 0x000003ff, 0x00000000),
+ DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000),
+ DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000),
+ DECL_RFK_WM(0x78f8, 0x000003ff, 0x00000000),
+ DECL_RFK_WM(0x78f8, 0x000ffc00, 0x00000000),
+ DECL_RFK_WM(0x78a4, 0x0001ff00, 0x00000000),
+ DECL_RFK_WM(0x78a4, 0x03fe0000, 0x00000000),
+ DECL_RFK_WM(0x78a8, 0x000001ff, 0x00000000),
+ DECL_RFK_WM(0x78a8, 0x0003fe00, 0x00000000),
+ DECL_RFK_WM(0x78a8, 0x07fc0000, 0x00000000),
+ DECL_RFK_WM(0x78ac, 0x000001ff, 0x00000000),
+ DECL_RFK_WM(0x78ac, 0x0003fe00, 0x00000000),
+ DECL_RFK_WM(0x78ac, 0x07fc0000, 0x00000000),
+};
+
+DECLARE_RFK_TBL(rtw8852a_tssi_rf_gap_tbl_defs_b);
+
+static const struct rtw89_reg5_def rtw8852a_tssi_slope_defs_a[] = {
+ DECL_RFK_WM(0x5820, 0x80000000, 0x00000000),
+ DECL_RFK_WM(0x5818, 0x10000000, 0x00000000),
+ DECL_RFK_WM(0x5814, 0x00000800, 0x00000001),
+ DECL_RFK_WM(0x581c, 0x20000000, 0x00000001),
+ DECL_RFK_WM(0x5820, 0x0000f000, 0x00000001),
+ DECL_RFK_WM(0x581c, 0x000003ff, 0x00000280),
+ DECL_RFK_WM(0x581c, 0x000ffc00, 0x00000200),
+ DECL_RFK_WM(0x58b8, 0x007f0000, 0x00000000),
+ DECL_RFK_WM(0x58b8, 0x7f000000, 0x00000000),
+ DECL_RFK_WM(0x58b4, 0x7f000000, 0x0000000a),
+ DECL_RFK_WM(0x58b8, 0x0000007f, 0x00000028),
+ DECL_RFK_WM(0x58b8, 0x00007f00, 0x00000076),
+ DECL_RFK_WM(0x5810, 0x20000000, 0x00000000),
+ DECL_RFK_WM(0x5814, 0x20000000, 0x00000001),
+ DECL_RFK_WM(0x580c, 0x10000000, 0x00000001),
+ DECL_RFK_WM(0x580c, 0x40000000, 0x00000001),
+ DECL_RFK_WM(0x5838, 0x003ff000, 0x00000000),
+ DECL_RFK_WM(0x5858, 0x003ff000, 0x00000000),
+ DECL_RFK_WM(0x5834, 0x0003ffff, 0x000115f2),
+ DECL_RFK_WM(0x5834, 0x3ffc0000, 0x00000000),
+ DECL_RFK_WM(0x5838, 0x00000fff, 0x00000121),
+ DECL_RFK_WM(0x5854, 0x0003ffff, 0x000115f2),
+ DECL_RFK_WM(0x5854, 0x3ffc0000, 0x00000000),
+ DECL_RFK_WM(0x5858, 0x00000fff, 0x00000121),
+ DECL_RFK_WM(0x5824, 0x0003ffff, 0x000115f2),
+ DECL_RFK_WM(0x5824, 0x3ffc0000, 0x00000000),
+ DECL_RFK_WM(0x5828, 0x00000fff, 0x00000121),
+ DECL_RFK_WM(0x582c, 0x0003ffff, 0x000115f2),
+ DECL_RFK_WM(0x582c, 0x3ffc0000, 0x00000000),
+ DECL_RFK_WM(0x5830, 0x00000fff, 0x00000121),
+ DECL_RFK_WM(0x583c, 0x0003ffff, 0x000115f2),
+ DECL_RFK_WM(0x583c, 0x3ffc0000, 0x00000000),
+ DECL_RFK_WM(0x5840, 0x00000fff, 0x00000121),
+ DECL_RFK_WM(0x5844, 0x0003ffff, 0x000115f2),
+ DECL_RFK_WM(0x5844, 0x3ffc0000, 0x00000000),
+ DECL_RFK_WM(0x5848, 0x00000fff, 0x00000121),
+ DECL_RFK_WM(0x584c, 0x0003ffff, 0x000115f2),
+ DECL_RFK_WM(0x584c, 0x3ffc0000, 0x00000000),
+ DECL_RFK_WM(0x5850, 0x00000fff, 0x00000121),
+ DECL_RFK_WM(0x585c, 0x0003ffff, 0x000115f2),
+ DECL_RFK_WM(0x585c, 0x3ffc0000, 0x00000000),
+ DECL_RFK_WM(0x5860, 0x00000fff, 0x00000121),
+ DECL_RFK_WM(0x5828, 0x003ff000, 0x00000000),
+ DECL_RFK_WM(0x5830, 0x003ff000, 0x00000000),
+ DECL_RFK_WM(0x5840, 0x003ff000, 0x00000000),
+ DECL_RFK_WM(0x5848, 0x003ff000, 0x00000000),
+ DECL_RFK_WM(0x5850, 0x003ff000, 0x00000000),
+ DECL_RFK_WM(0x5860, 0x003ff000, 0x00000000),
+};
+
+DECLARE_RFK_TBL(rtw8852a_tssi_slope_defs_a);
+
+static const struct rtw89_reg5_def rtw8852a_tssi_slope_defs_b[] = {
+ DECL_RFK_WM(0x7820, 0x80000000, 0x00000000),
+ DECL_RFK_WM(0x7818, 0x10000000, 0x00000000),
+ DECL_RFK_WM(0x7814, 0x00000800, 0x00000001),
+ DECL_RFK_WM(0x781c, 0x20000000, 0x00000001),
+ DECL_RFK_WM(0x7820, 0x0000f000, 0x00000001),
+ DECL_RFK_WM(0x781c, 0x000003ff, 0x00000280),
+ DECL_RFK_WM(0x781c, 0x000ffc00, 0x00000200),
+ DECL_RFK_WM(0x78b8, 0x007f0000, 0x00000000),
+ DECL_RFK_WM(0x78b8, 0x7f000000, 0x00000000),
+ DECL_RFK_WM(0x78b4, 0x7f000000, 0x0000000a),
+ DECL_RFK_WM(0x78b8, 0x0000007f, 0x00000028),
+ DECL_RFK_WM(0x78b8, 0x00007f00, 0x00000076),
+ DECL_RFK_WM(0x7810, 0x20000000, 0x00000000),
+ DECL_RFK_WM(0x7814, 0x20000000, 0x00000001),
+ DECL_RFK_WM(0x780c, 0x10000000, 0x00000001),
+ DECL_RFK_WM(0x780c, 0x40000000, 0x00000001),
+ DECL_RFK_WM(0x7838, 0x003ff000, 0x00000000),
+ DECL_RFK_WM(0x7858, 0x003ff000, 0x00000000),
+ DECL_RFK_WM(0x7834, 0x0003ffff, 0x000115f2),
+ DECL_RFK_WM(0x7834, 0x3ffc0000, 0x00000000),
+ DECL_RFK_WM(0x7838, 0x00000fff, 0x00000121),
+ DECL_RFK_WM(0x7854, 0x0003ffff, 0x000115f2),
+ DECL_RFK_WM(0x7854, 0x3ffc0000, 0x00000000),
+ DECL_RFK_WM(0x7858, 0x00000fff, 0x00000121),
+ DECL_RFK_WM(0x7824, 0x0003ffff, 0x000115f2),
+ DECL_RFK_WM(0x7824, 0x3ffc0000, 0x00000000),
+ DECL_RFK_WM(0x7828, 0x00000fff, 0x00000121),
+ DECL_RFK_WM(0x782c, 0x0003ffff, 0x000115f2),
+ DECL_RFK_WM(0x782c, 0x3ffc0000, 0x00000000),
+ DECL_RFK_WM(0x7830, 0x00000fff, 0x00000121),
+ DECL_RFK_WM(0x783c, 0x0003ffff, 0x000115f2),
+ DECL_RFK_WM(0x783c, 0x3ffc0000, 0x00000000),
+ DECL_RFK_WM(0x7840, 0x00000fff, 0x00000121),
+ DECL_RFK_WM(0x7844, 0x0003ffff, 0x000115f2),
+ DECL_RFK_WM(0x7844, 0x3ffc0000, 0x00000000),
+ DECL_RFK_WM(0x7848, 0x00000fff, 0x00000121),
+ DECL_RFK_WM(0x784c, 0x0003ffff, 0x000115f2),
+ DECL_RFK_WM(0x784c, 0x3ffc0000, 0x00000000),
+ DECL_RFK_WM(0x7850, 0x00000fff, 0x00000121),
+ DECL_RFK_WM(0x785c, 0x0003ffff, 0x000115f2),
+ DECL_RFK_WM(0x785c, 0x3ffc0000, 0x00000000),
+ DECL_RFK_WM(0x7860, 0x00000fff, 0x00000121),
+ DECL_RFK_WM(0x7828, 0x003ff000, 0x00000000),
+ DECL_RFK_WM(0x7830, 0x003ff000, 0x00000000),
+ DECL_RFK_WM(0x7840, 0x003ff000, 0x00000000),
+ DECL_RFK_WM(0x7848, 0x003ff000, 0x00000000),
+ DECL_RFK_WM(0x7850, 0x003ff000, 0x00000000),
+ DECL_RFK_WM(0x7860, 0x003ff000, 0x00000000),
+};
+
+DECLARE_RFK_TBL(rtw8852a_tssi_slope_defs_b);
+
+static const struct rtw89_reg5_def rtw8852a_tssi_track_defs_a[] = {
+ DECL_RFK_WM(0x5820, 0x80000000, 0x00000000),
+ DECL_RFK_WM(0x5818, 0x18000000, 0x00000000),
+ DECL_RFK_WM(0x5814, 0x00000800, 0x00000000),
+ DECL_RFK_WM(0x581c, 0x20000000, 0x00000001),
+ DECL_RFK_WM(0x5864, 0x000003ff, 0x000001ff),
+ DECL_RFK_WM(0x5864, 0x000ffc00, 0x00000200),
+ DECL_RFK_WM(0x5820, 0x00000fff, 0x00000080),
+ DECL_RFK_WM(0x5814, 0x01000000, 0x00000000),
+};
+
+DECLARE_RFK_TBL(rtw8852a_tssi_track_defs_a);
+
+static const struct rtw89_reg5_def rtw8852a_tssi_track_defs_b[] = {
+ DECL_RFK_WM(0x7820, 0x80000000, 0x00000000),
+ DECL_RFK_WM(0x7818, 0x18000000, 0x00000000),
+ DECL_RFK_WM(0x7814, 0x00000800, 0x00000000),
+ DECL_RFK_WM(0x781c, 0x20000000, 0x00000001),
+ DECL_RFK_WM(0x7864, 0x000003ff, 0x000001ff),
+ DECL_RFK_WM(0x7864, 0x000ffc00, 0x00000200),
+ DECL_RFK_WM(0x7820, 0x00000fff, 0x00000080),
+ DECL_RFK_WM(0x7814, 0x01000000, 0x00000000),
+};
+
+DECLARE_RFK_TBL(rtw8852a_tssi_track_defs_b);
+
+static const struct rtw89_reg5_def rtw8852a_tssi_txagc_ofst_mv_avg_defs_a[] = {
+ DECL_RFK_WM(0x58e4, 0x00004000, 0x00000000),
+ DECL_RFK_WM(0x58e4, 0x00004000, 0x00000001),
+ DECL_RFK_WM(0x58e4, 0x00004000, 0x00000000),
+ DECL_RFK_WM(0x58e4, 0x00008000, 0x00000000),
+ DECL_RFK_WM(0x58e4, 0x000f0000, 0x00000000),
+};
+
+DECLARE_RFK_TBL(rtw8852a_tssi_txagc_ofst_mv_avg_defs_a);
+
+static const struct rtw89_reg5_def rtw8852a_tssi_txagc_ofst_mv_avg_defs_b[] = {
+ DECL_RFK_WM(0x78e4, 0x00004000, 0x00000000),
+ DECL_RFK_WM(0x78e4, 0x00004000, 0x00000001),
+ DECL_RFK_WM(0x78e4, 0x00004000, 0x00000000),
+ DECL_RFK_WM(0x78e4, 0x00008000, 0x00000000),
+ DECL_RFK_WM(0x78e4, 0x000f0000, 0x00000000),
+};
+
+DECLARE_RFK_TBL(rtw8852a_tssi_txagc_ofst_mv_avg_defs_b);
+
+static const struct rtw89_reg5_def rtw8852a_tssi_pak_defs_a_2g[] = {
+ DECL_RFK_WM(0x5814, 0x000003ff, 0x00000000),
+ DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000),
+ DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000),
+ DECL_RFK_WM(0x58f8, 0x000003ff, 0x00000000),
+ DECL_RFK_WM(0x58f8, 0x000ffc00, 0x00000000),
+ DECL_RFK_WM(0x58a4, 0x0001ff00, 0x00000000),
+ DECL_RFK_WM(0x58a4, 0x03fe0000, 0x000001d0),
+ DECL_RFK_WM(0x58a8, 0x000001ff, 0x00000000),
+ DECL_RFK_WM(0x58a8, 0x0003fe00, 0x000001e8),
+ DECL_RFK_WM(0x58a8, 0x07fc0000, 0x00000000),
+ DECL_RFK_WM(0x58ac, 0x000001ff, 0x0000000b),
+ DECL_RFK_WM(0x58ac, 0x0003fe00, 0x00000000),
+ DECL_RFK_WM(0x58ac, 0x07fc0000, 0x00000088),
+};
+
+DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_a_2g);
+
+static const struct rtw89_reg5_def rtw8852a_tssi_pak_defs_a_5g_1[] = {
+ DECL_RFK_WM(0x5814, 0x000003ff, 0x00000000),
+ DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000),
+ DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000),
+ DECL_RFK_WM(0x58f8, 0x000003ff, 0x00000000),
+ DECL_RFK_WM(0x58f8, 0x000ffc00, 0x00000000),
+ DECL_RFK_WM(0x58a4, 0x0001ff00, 0x00000000),
+ DECL_RFK_WM(0x58a4, 0x03fe0000, 0x000001d7),
+ DECL_RFK_WM(0x58a8, 0x000001ff, 0x00000000),
+ DECL_RFK_WM(0x58a8, 0x0003fe00, 0x000001fb),
+ DECL_RFK_WM(0x58a8, 0x07fc0000, 0x00000000),
+ DECL_RFK_WM(0x58ac, 0x000001ff, 0x00000000),
+ DECL_RFK_WM(0x58ac, 0x0003fe00, 0x00000005),
+ DECL_RFK_WM(0x58ac, 0x07fc0000, 0x0000007c),
+};
+
+DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_a_5g_1);
+
+static const struct rtw89_reg5_def rtw8852a_tssi_pak_defs_a_5g_3[] = {
+ DECL_RFK_WM(0x5814, 0x000003ff, 0x00000000),
+ DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000),
+ DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000),
+ DECL_RFK_WM(0x58f8, 0x000003ff, 0x00000000),
+ DECL_RFK_WM(0x58f8, 0x000ffc00, 0x00000000),
+ DECL_RFK_WM(0x58a4, 0x0001ff00, 0x00000000),
+ DECL_RFK_WM(0x58a4, 0x03fe0000, 0x000001d8),
+ DECL_RFK_WM(0x58a8, 0x000001ff, 0x00000000),
+ DECL_RFK_WM(0x58a8, 0x0003fe00, 0x000001fc),
+ DECL_RFK_WM(0x58a8, 0x07fc0000, 0x00000000),
+ DECL_RFK_WM(0x58ac, 0x000001ff, 0x00000000),
+ DECL_RFK_WM(0x58ac, 0x0003fe00, 0x00000006),
+ DECL_RFK_WM(0x58ac, 0x07fc0000, 0x00000078),
+};
+
+DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_a_5g_3);
+
+static const struct rtw89_reg5_def rtw8852a_tssi_pak_defs_a_5g_4[] = {
+ DECL_RFK_WM(0x5814, 0x000003ff, 0x00000000),
+ DECL_RFK_WM(0x58f4, 0x000003ff, 0x00000000),
+ DECL_RFK_WM(0x58f4, 0x000ffc00, 0x00000000),
+ DECL_RFK_WM(0x58f8, 0x000003ff, 0x00000000),
+ DECL_RFK_WM(0x58f8, 0x000ffc00, 0x00000000),
+ DECL_RFK_WM(0x58a4, 0x0001ff00, 0x00000000),
+ DECL_RFK_WM(0x58a4, 0x03fe0000, 0x000001e5),
+ DECL_RFK_WM(0x58a8, 0x000001ff, 0x00000000),
+ DECL_RFK_WM(0x58a8, 0x0003fe00, 0x0000000a),
+ DECL_RFK_WM(0x58a8, 0x07fc0000, 0x00000000),
+ DECL_RFK_WM(0x58ac, 0x000001ff, 0x00000000),
+ DECL_RFK_WM(0x58ac, 0x0003fe00, 0x00000011),
+ DECL_RFK_WM(0x58ac, 0x07fc0000, 0x00000075),
+};
+
+DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_a_5g_4);
+
+static const struct rtw89_reg5_def rtw8852a_tssi_pak_defs_b_2g[] = {
+ DECL_RFK_WM(0x7814, 0x000003ff, 0x00000000),
+ DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000),
+ DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000),
+ DECL_RFK_WM(0x78f8, 0x000003ff, 0x00000000),
+ DECL_RFK_WM(0x78f8, 0x000ffc00, 0x00000000),
+ DECL_RFK_WM(0x78a4, 0x0001ff00, 0x00000000),
+ DECL_RFK_WM(0x78a4, 0x03fe0000, 0x000001cc),
+ DECL_RFK_WM(0x78a8, 0x000001ff, 0x00000000),
+ DECL_RFK_WM(0x78a8, 0x0003fe00, 0x000001e2),
+ DECL_RFK_WM(0x78a8, 0x07fc0000, 0x00000000),
+ DECL_RFK_WM(0x78ac, 0x000001ff, 0x00000005),
+ DECL_RFK_WM(0x78ac, 0x0003fe00, 0x00000000),
+ DECL_RFK_WM(0x78ac, 0x07fc0000, 0x00000089),
+};
+
+DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_b_2g);
+
+static const struct rtw89_reg5_def rtw8852a_tssi_pak_defs_b_5g_1[] = {
+ DECL_RFK_WM(0x7814, 0x000003ff, 0x00000000),
+ DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000),
+ DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000),
+ DECL_RFK_WM(0x78f8, 0x000003ff, 0x00000000),
+ DECL_RFK_WM(0x78f8, 0x000ffc00, 0x00000000),
+ DECL_RFK_WM(0x78a4, 0x0001ff00, 0x00000000),
+ DECL_RFK_WM(0x78a4, 0x03fe0000, 0x000001d5),
+ DECL_RFK_WM(0x78a8, 0x000001ff, 0x00000000),
+ DECL_RFK_WM(0x78a8, 0x0003fe00, 0x000001fc),
+ DECL_RFK_WM(0x78a8, 0x07fc0000, 0x00000000),
+ DECL_RFK_WM(0x78ac, 0x000001ff, 0x00000000),
+ DECL_RFK_WM(0x78ac, 0x0003fe00, 0x00000005),
+ DECL_RFK_WM(0x78ac, 0x07fc0000, 0x00000079),
+};
+
+DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_b_5g_1);
+
+static const struct rtw89_reg5_def rtw8852a_tssi_pak_defs_b_5g_3[] = {
+ DECL_RFK_WM(0x7814, 0x000003ff, 0x00000000),
+ DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000),
+ DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000),
+ DECL_RFK_WM(0x78f8, 0x000003ff, 0x00000000),
+ DECL_RFK_WM(0x78f8, 0x000ffc00, 0x00000000),
+ DECL_RFK_WM(0x78a4, 0x0001ff00, 0x00000000),
+ DECL_RFK_WM(0x78a4, 0x03fe0000, 0x000001dc),
+ DECL_RFK_WM(0x78a8, 0x000001ff, 0x00000000),
+ DECL_RFK_WM(0x78a8, 0x0003fe00, 0x00000002),
+ DECL_RFK_WM(0x78a8, 0x07fc0000, 0x00000000),
+ DECL_RFK_WM(0x78ac, 0x000001ff, 0x00000000),
+ DECL_RFK_WM(0x78ac, 0x0003fe00, 0x0000000b),
+ DECL_RFK_WM(0x78ac, 0x07fc0000, 0x00000076),
+};
+
+DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_b_5g_3);
+
+static const struct rtw89_reg5_def rtw8852a_tssi_pak_defs_b_5g_4[] = {
+ DECL_RFK_WM(0x7814, 0x000003ff, 0x00000000),
+ DECL_RFK_WM(0x78f4, 0x000003ff, 0x00000000),
+ DECL_RFK_WM(0x78f4, 0x000ffc00, 0x00000000),
+ DECL_RFK_WM(0x78f8, 0x000003ff, 0x00000000),
+ DECL_RFK_WM(0x78f8, 0x000ffc00, 0x00000000),
+ DECL_RFK_WM(0x78a4, 0x0001ff00, 0x00000000),
+ DECL_RFK_WM(0x78a4, 0x03fe0000, 0x000001f0),
+ DECL_RFK_WM(0x78a8, 0x000001ff, 0x00000000),
+ DECL_RFK_WM(0x78a8, 0x0003fe00, 0x00000016),
+ DECL_RFK_WM(0x78a8, 0x07fc0000, 0x00000000),
+ DECL_RFK_WM(0x78ac, 0x000001ff, 0x00000000),
+ DECL_RFK_WM(0x78ac, 0x0003fe00, 0x0000001f),
+ DECL_RFK_WM(0x78ac, 0x07fc0000, 0x00000072),
+};
+
+DECLARE_RFK_TBL(rtw8852a_tssi_pak_defs_b_5g_4);
+
+static const struct rtw89_reg5_def rtw8852a_tssi_enable_defs_a[] = {
+ DECL_RFK_WRF(0x0, 0x55, 0x00080, 0x00001),
+ DECL_RFK_WM(0x5818, 0x000000ff, 0x000000c0),
+ DECL_RFK_WM(0x5818, 0x10000000, 0x00000000),
+ DECL_RFK_WM(0x5818, 0x10000000, 0x00000001),
+ DECL_RFK_WM(0x5820, 0x80000000, 0x00000000),
+ DECL_RFK_WM(0x5820, 0x80000000, 0x00000001),
+ DECL_RFK_WM(0x5818, 0x18000000, 0x00000003),
+};
+
+DECLARE_RFK_TBL(rtw8852a_tssi_enable_defs_a);
+
+static const struct rtw89_reg5_def rtw8852a_tssi_enable_defs_b[] = {
+ DECL_RFK_WRF(0x1, 0x55, 0x00080, 0x00001),
+ DECL_RFK_WM(0x7818, 0x000000ff, 0x000000c0),
+ DECL_RFK_WM(0x7818, 0x10000000, 0x00000000),
+ DECL_RFK_WM(0x7818, 0x10000000, 0x00000001),
+ DECL_RFK_WM(0x7820, 0x80000000, 0x00000000),
+ DECL_RFK_WM(0x7820, 0x80000000, 0x00000001),
+ DECL_RFK_WM(0x7818, 0x18000000, 0x00000003),
+};
+
+DECLARE_RFK_TBL(rtw8852a_tssi_enable_defs_b);
+
+static const struct rtw89_reg5_def rtw8852a_tssi_disable_defs[] = {
+ DECL_RFK_WM(0x5820, 0x80000000, 0x00000000),
+ DECL_RFK_WM(0x5818, 0x18000000, 0x00000001),
+ DECL_RFK_WM(0x7820, 0x80000000, 0x00000000),
+ DECL_RFK_WM(0x7818, 0x18000000, 0x00000001),
+};
+
+DECLARE_RFK_TBL(rtw8852a_tssi_disable_defs);
+
+static const struct rtw89_reg5_def rtw8852a_tssi_enable_defs_ab[] = {
+ DECL_RFK_WM(0x5820, 0x80000000, 0x0),
+ DECL_RFK_WM(0x5820, 0x80000000, 0x1),
+ DECL_RFK_WM(0x5818, 0x18000000, 0x3),
+ DECL_RFK_WM(0x7820, 0x80000000, 0x0),
+ DECL_RFK_WM(0x7820, 0x80000000, 0x1),
+ DECL_RFK_WM(0x7818, 0x18000000, 0x3),
+};
+
+DECLARE_RFK_TBL(rtw8852a_tssi_enable_defs_ab);
+
+static const struct rtw89_reg5_def rtw8852a_tssi_tracking_defs[] = {
+ DECL_RFK_WM(0x5800, 0x10000000, 0x00000000),
+ DECL_RFK_WM(0x58f0, 0x00080000, 0x00000000),
+ DECL_RFK_WM(0x5804, 0xf8000000, 0x00000000),
+ DECL_RFK_WM(0x58f0, 0xfff00000, 0x00000400),
+ DECL_RFK_WM(0x7800, 0x10000000, 0x00000000),
+ DECL_RFK_WM(0x78f0, 0x00080000, 0x00000000),
+ DECL_RFK_WM(0x7804, 0xf8000000, 0x00000000),
+ DECL_RFK_WM(0x78f0, 0xfff00000, 0x00000400),
+};
+
+DECLARE_RFK_TBL(rtw8852a_tssi_tracking_defs);
+
+static const struct rtw89_reg5_def rtw8852a_rfk_afe_init_defs[] = {
+ DECL_RFK_WC(0x12ec, 0x00008000),
+ DECL_RFK_WS(0x12ec, 0x00008000),
+ DECL_RFK_WC(0x5e00, 0x00000001),
+ DECL_RFK_WS(0x5e00, 0x00000001),
+ DECL_RFK_WC(0x32ec, 0x00008000),
+ DECL_RFK_WS(0x32ec, 0x00008000),
+ DECL_RFK_WC(0x7e00, 0x00000001),
+ DECL_RFK_WS(0x7e00, 0x00000001),
+};
+
+DECLARE_RFK_TBL(rtw8852a_rfk_afe_init_defs);
+
+static const struct rtw89_reg5_def rtw8852a_rfk_dack_reload_defs_a[] = {
+ DECL_RFK_WS(0x5e00, 0x00000008),
+ DECL_RFK_WS(0x5e50, 0x00000008),
+ DECL_RFK_WS(0x5e10, 0x80000000),
+ DECL_RFK_WS(0x5e60, 0x80000000),
+ DECL_RFK_WC(0x5e00, 0x00000008),
+ DECL_RFK_WC(0x5e50, 0x00000008),
+};
+
+DECLARE_RFK_TBL(rtw8852a_rfk_dack_reload_defs_a);
+
+static const struct rtw89_reg5_def rtw8852a_rfk_dack_reload_defs_b[] = {
+ DECL_RFK_WS(0x7e00, 0x00000008),
+ DECL_RFK_WS(0x7e50, 0x00000008),
+ DECL_RFK_WS(0x7e10, 0x80000000),
+ DECL_RFK_WS(0x7e60, 0x80000000),
+ DECL_RFK_WC(0x7e00, 0x00000008),
+ DECL_RFK_WC(0x7e50, 0x00000008),
+};
+
+DECLARE_RFK_TBL(rtw8852a_rfk_dack_reload_defs_b);
+
+static const struct rtw89_reg5_def rtw8852a_rfk_check_addc_defs_a[] = {
+ DECL_RFK_WC(0x20f4, 0x01000000),
+ DECL_RFK_WS(0x20f8, 0x80000000),
+ DECL_RFK_WM(0x20f0, 0x00ff0000, 0x00000001),
+ DECL_RFK_WM(0x20f0, 0x00000f00, 0x00000002),
+ DECL_RFK_WC(0x20f0, 0x0000000f),
+ DECL_RFK_WM(0x20f0, 0x000000c0, 0x00000002),
+};
+
+DECLARE_RFK_TBL(rtw8852a_rfk_check_addc_defs_a);
+
+static const struct rtw89_reg5_def rtw8852a_rfk_check_addc_defs_b[] = {
+ DECL_RFK_WC(0x20f4, 0x01000000),
+ DECL_RFK_WS(0x20f8, 0x80000000),
+ DECL_RFK_WM(0x20f0, 0x00ff0000, 0x00000001),
+ DECL_RFK_WM(0x20f0, 0x00000f00, 0x00000002),
+ DECL_RFK_WC(0x20f0, 0x0000000f),
+ DECL_RFK_WM(0x20f0, 0x000000c0, 0x00000003),
+};
+
+DECLARE_RFK_TBL(rtw8852a_rfk_check_addc_defs_b);
+
+static const struct rtw89_reg5_def rtw8852a_rfk_addck_reset_defs_a[] = {
+ DECL_RFK_WC(0x12d8, 0x00000030),
+ DECL_RFK_WC(0x32d8, 0x00000030),
+ DECL_RFK_WS(0x12b8, 0x40000000),
+ DECL_RFK_WC(0x032c, 0x40000000),
+ DECL_RFK_WC(0x032c, 0x00400000),
+ DECL_RFK_WS(0x032c, 0x00400000),
+ DECL_RFK_WS(0x030c, 0x0f000000),
+ DECL_RFK_WC(0x032c, 0x00010000),
+ DECL_RFK_WS(0x12dc, 0x00000002),
+ DECL_RFK_WM(0x030c, 0x0f000000, 0x00000003),
+};
+
+DECLARE_RFK_TBL(rtw8852a_rfk_addck_reset_defs_a);
+
+static const struct rtw89_reg5_def rtw8852a_rfk_addck_trigger_defs_a[] = {
+ DECL_RFK_WS(0x12d8, 0x000000c0),
+ DECL_RFK_WS(0x12d8, 0x00000800),
+ DECL_RFK_WC(0x12d8, 0x00000800),
+ DECL_RFK_DELAY(1),
+ DECL_RFK_WM(0x12d8, 0x00000300, 0x00000001),
+};
+
+DECLARE_RFK_TBL(rtw8852a_rfk_addck_trigger_defs_a);
+
+static const struct rtw89_reg5_def rtw8852a_rfk_addck_restore_defs_a[] = {
+ DECL_RFK_WC(0x12dc, 0x00000002),
+ DECL_RFK_WS(0x032c, 0x00010000),
+ DECL_RFK_WM(0x030c, 0x0f000000, 0x0000000c),
+ DECL_RFK_WS(0x032c, 0x40000000),
+ DECL_RFK_WC(0x12b8, 0x40000000),
+};
+
+DECLARE_RFK_TBL(rtw8852a_rfk_addck_restore_defs_a);
+
+static const struct rtw89_reg5_def rtw8852a_rfk_addck_reset_defs_b[] = {
+ DECL_RFK_WS(0x32b8, 0x40000000),
+ DECL_RFK_WC(0x032c, 0x40000000),
+ DECL_RFK_WC(0x032c, 0x00400000),
+ DECL_RFK_WS(0x032c, 0x00400000),
+ DECL_RFK_WS(0x030c, 0x0f000000),
+ DECL_RFK_WC(0x032c, 0x00010000),
+ DECL_RFK_WS(0x32dc, 0x00000002),
+ DECL_RFK_WM(0x030c, 0x0f000000, 0x00000003),
+};
+
+DECLARE_RFK_TBL(rtw8852a_rfk_addck_reset_defs_b);
+
+static const struct rtw89_reg5_def rtw8852a_rfk_addck_trigger_defs_b[] = {
+ DECL_RFK_WS(0x32d8, 0x000000c0),
+ DECL_RFK_WS(0x32d8, 0x00000800),
+ DECL_RFK_WC(0x32d8, 0x00000800),
+ DECL_RFK_DELAY(1),
+ DECL_RFK_WM(0x32d8, 0x00000300, 0x00000001),
+};
+
+DECLARE_RFK_TBL(rtw8852a_rfk_addck_trigger_defs_b);
+
+static const struct rtw89_reg5_def rtw8852a_rfk_addck_restore_defs_b[] = {
+ DECL_RFK_WC(0x32dc, 0x00000002),
+ DECL_RFK_WS(0x032c, 0x00010000),
+ DECL_RFK_WM(0x030c, 0x0f000000, 0x0000000c),
+ DECL_RFK_WS(0x032c, 0x40000000),
+ DECL_RFK_WC(0x32b8, 0x40000000),
+};
+
+DECLARE_RFK_TBL(rtw8852a_rfk_addck_restore_defs_b);
+
+static const struct rtw89_reg5_def rtw8852a_rfk_check_dadc_defs_f_a[] = {
+ DECL_RFK_WC(0x032c, 0x40000000),
+ DECL_RFK_WS(0x030c, 0x0f000000),
+ DECL_RFK_WM(0x030c, 0x0f000000, 0x00000003),
+ DECL_RFK_WC(0x032c, 0x00010000),
+ DECL_RFK_WS(0x12dc, 0x00000001),
+ DECL_RFK_WS(0x12e8, 0x00000004),
+ DECL_RFK_WRF(0x0, 0x8f, 0x02000, 0x00001),
+};
+
+DECLARE_RFK_TBL(rtw8852a_rfk_check_dadc_defs_f_a);
+
+static const struct rtw89_reg5_def rtw8852a_rfk_check_dadc_defs_f_b[] = {
+ DECL_RFK_WC(0x032c, 0x40000000),
+ DECL_RFK_WS(0x030c, 0x0f000000),
+ DECL_RFK_WM(0x030c, 0x0f000000, 0x00000003),
+ DECL_RFK_WC(0x032c, 0x00010000),
+ DECL_RFK_WS(0x32dc, 0x00000001),
+ DECL_RFK_WS(0x32e8, 0x00000004),
+ DECL_RFK_WRF(0x1, 0x8f, 0x02000, 0x00001),
+};
+
+DECLARE_RFK_TBL(rtw8852a_rfk_check_dadc_defs_f_b);
+
+static const struct rtw89_reg5_def rtw8852a_rfk_check_dadc_defs_r_a[] = {
+ DECL_RFK_WC(0x12dc, 0x00000001),
+ DECL_RFK_WC(0x12e8, 0x00000004),
+ DECL_RFK_WRF(0x0, 0x8f, 0x02000, 0x00000),
+ DECL_RFK_WM(0x032c, 0x00010000, 0x00000001),
+};
+
+DECLARE_RFK_TBL(rtw8852a_rfk_check_dadc_defs_r_a);
+
+static const struct rtw89_reg5_def rtw8852a_rfk_check_dadc_defs_r_b[] = {
+ DECL_RFK_WC(0x32dc, 0x00000001),
+ DECL_RFK_WC(0x32e8, 0x00000004),
+ DECL_RFK_WRF(0x1, 0x8f, 0x02000, 0x00000),
+ DECL_RFK_WM(0x032c, 0x00010000, 0x00000001),
+};
+
+DECLARE_RFK_TBL(rtw8852a_rfk_check_dadc_defs_r_b);
+
+static const struct rtw89_reg5_def rtw8852a_rfk_dack_defs_f_a[] = {
+ DECL_RFK_WS(0x5e00, 0x00000008),
+ DECL_RFK_WC(0x5e10, 0x80000000),
+ DECL_RFK_WS(0x5e50, 0x00000008),
+ DECL_RFK_WC(0x5e60, 0x80000000),
+ DECL_RFK_WS(0x12a0, 0x00008000),
+ DECL_RFK_WM(0x12a0, 0x00007000, 0x00000003),
+ DECL_RFK_WS(0x12b8, 0x40000000),
+ DECL_RFK_WS(0x030c, 0x10000000),
+ DECL_RFK_WC(0x032c, 0x80000000),
+ DECL_RFK_WS(0x12e0, 0x00010000),
+ DECL_RFK_WS(0x12e4, 0x0c000000),
+ DECL_RFK_WM(0x5e00, 0x03ff0000, 0x00000030),
+ DECL_RFK_WM(0x5e50, 0x03ff0000, 0x00000030),
+ DECL_RFK_WC(0x5e00, 0x0c000000),
+ DECL_RFK_WC(0x5e50, 0x0c000000),
+ DECL_RFK_WC(0x5e0c, 0x00000008),
+ DECL_RFK_WC(0x5e5c, 0x00000008),
+ DECL_RFK_WS(0x5e0c, 0x00000001),
+ DECL_RFK_WS(0x5e5c, 0x00000001),
+ DECL_RFK_DELAY(1),
+};
+
+DECLARE_RFK_TBL(rtw8852a_rfk_dack_defs_f_a);
+
+static const struct rtw89_reg5_def rtw8852a_rfk_dack_defs_m_a[] = {
+ DECL_RFK_WC(0x12e4, 0x0c000000),
+ DECL_RFK_WS(0x5e0c, 0x00000008),
+ DECL_RFK_WS(0x5e5c, 0x00000008),
+ DECL_RFK_DELAY(1),
+};
+
+DECLARE_RFK_TBL(rtw8852a_rfk_dack_defs_m_a);
+
+static const struct rtw89_reg5_def rtw8852a_rfk_dack_defs_r_a[] = {
+ DECL_RFK_WC(0x5e0c, 0x00000001),
+ DECL_RFK_WC(0x5e5c, 0x00000001),
+ DECL_RFK_WC(0x12e0, 0x00010000),
+ DECL_RFK_WC(0x12a0, 0x00008000),
+ DECL_RFK_WS(0x12a0, 0x00007000),
+};
+
+DECLARE_RFK_TBL(rtw8852a_rfk_dack_defs_r_a);
+
+static const struct rtw89_reg5_def rtw8852a_rfk_dack_defs_f_b[] = {
+ DECL_RFK_WS(0x7e00, 0x00000008),
+ DECL_RFK_WC(0x7e10, 0x80000000),
+ DECL_RFK_WS(0x7e50, 0x00000008),
+ DECL_RFK_WC(0x7e60, 0x80000000),
+ DECL_RFK_WS(0x32a0, 0x00008000),
+ DECL_RFK_WM(0x32a0, 0x00007000, 0x00000003),
+ DECL_RFK_WS(0x32b8, 0x40000000),
+ DECL_RFK_WS(0x030c, 0x10000000),
+ DECL_RFK_WC(0x032c, 0x80000000),
+ DECL_RFK_WS(0x32e0, 0x00010000),
+ DECL_RFK_WS(0x32e4, 0x0c000000),
+ DECL_RFK_WM(0x7e00, 0x03ff0000, 0x00000030),
+ DECL_RFK_WM(0x7e50, 0x03ff0000, 0x00000030),
+ DECL_RFK_WC(0x7e00, 0x0c000000),
+ DECL_RFK_WC(0x7e50, 0x0c000000),
+ DECL_RFK_WC(0x7e0c, 0x00000008),
+ DECL_RFK_WC(0x7e5c, 0x00000008),
+ DECL_RFK_WS(0x7e0c, 0x00000001),
+ DECL_RFK_WS(0x7e5c, 0x00000001),
+ DECL_RFK_DELAY(1),
+};
+
+DECLARE_RFK_TBL(rtw8852a_rfk_dack_defs_f_b);
+
+static const struct rtw89_reg5_def rtw8852a_rfk_dack_defs_m_b[] = {
+ DECL_RFK_WC(0x32e4, 0x0c000000),
+ DECL_RFK_WM(0x7e0c, 0x00000008, 0x00000001),
+ DECL_RFK_WM(0x7e5c, 0x00000008, 0x00000001),
+ DECL_RFK_DELAY(1),
+};
+
+DECLARE_RFK_TBL(rtw8852a_rfk_dack_defs_m_b);
+
+static const struct rtw89_reg5_def rtw8852a_rfk_dack_defs_r_b[] = {
+ DECL_RFK_WC(0x7e0c, 0x00000001),
+ DECL_RFK_WC(0x7e5c, 0x00000001),
+ DECL_RFK_WC(0x32e0, 0x00010000),
+ DECL_RFK_WC(0x32a0, 0x00008000),
+ DECL_RFK_WS(0x32a0, 0x00007000),
+};
+
+DECLARE_RFK_TBL(rtw8852a_rfk_dack_defs_r_b);
+
+static const struct rtw89_reg5_def rtw8852a_rfk_dpk_bb_afe_sf_defs_a[] = {
+ DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000101),
+ DECL_RFK_WS(0x12b8, 0x40000000),
+ DECL_RFK_WM(0x030c, 0xff000000, 0x00000013),
+ DECL_RFK_WM(0x032c, 0xffff0000, 0x00000041),
+ DECL_RFK_WS(0x12b8, 0x10000000),
+ DECL_RFK_WS(0x58c8, 0x01000000),
+ DECL_RFK_WS(0x5864, 0xc0000000),
+ DECL_RFK_WS(0x2008, 0x01ffffff),
+ DECL_RFK_WS(0x0c1c, 0x00000004),
+ DECL_RFK_WS(0x0700, 0x08000000),
+ DECL_RFK_WS(0x0c70, 0x000003ff),
+ DECL_RFK_WS(0x0c60, 0x00000003),
+ DECL_RFK_WS(0x0c6c, 0x00000001),
+ DECL_RFK_WS(0x58ac, 0x08000000),
+ DECL_RFK_WS(0x0c3c, 0x00000200),
+};
+
+DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_sf_defs_a);
+
+static const struct rtw89_reg5_def rtw8852a_rfk_dpk_bb_afe_sr_defs_a[] = {
+ DECL_RFK_WS(0x4490, 0x80000000),
+ DECL_RFK_WS(0x12a0, 0x00007000),
+ DECL_RFK_WS(0x12a0, 0x00008000),
+ DECL_RFK_WM(0x12a0, 0x00070000, 0x00000003),
+ DECL_RFK_WS(0x12a0, 0x00080000),
+ DECL_RFK_WS(0x0700, 0x01000000),
+ DECL_RFK_WM(0x0700, 0x06000000, 0x00000002),
+ DECL_RFK_WM(0x20fc, 0xffff0000, 0x00001111),
+ DECL_RFK_WM(0x58f0, 0x00080000, 0x00000000),
+};
+
+DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_sr_defs_a);
+
+static const struct rtw89_reg5_def rtw8852a_rfk_dpk_bb_afe_sf_defs_b[] = {
+ DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000202),
+ DECL_RFK_WS(0x32b8, 0x40000000),
+ DECL_RFK_WM(0x030c, 0xff000000, 0x00000013),
+ DECL_RFK_WM(0x032c, 0xffff0000, 0x00000041),
+ DECL_RFK_WS(0x32b8, 0x10000000),
+ DECL_RFK_WS(0x78c8, 0x01000000),
+ DECL_RFK_WS(0x7864, 0xc0000000),
+ DECL_RFK_WS(0x2008, 0x01ffffff),
+ DECL_RFK_WS(0x2c1c, 0x00000004),
+ DECL_RFK_WS(0x2700, 0x08000000),
+ DECL_RFK_WS(0x0c70, 0x000003ff),
+ DECL_RFK_WS(0x0c60, 0x00000003),
+ DECL_RFK_WS(0x0c6c, 0x00000001),
+ DECL_RFK_WS(0x78ac, 0x08000000),
+ DECL_RFK_WS(0x2c3c, 0x00000200),
+};
+
+DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_sf_defs_b);
+
+static const struct rtw89_reg5_def rtw8852a_rfk_dpk_bb_afe_sr_defs_b[] = {
+ DECL_RFK_WS(0x6490, 0x80000000),
+ DECL_RFK_WS(0x32a0, 0x00007000),
+ DECL_RFK_WS(0x32a0, 0x00008000),
+ DECL_RFK_WM(0x32a0, 0x00070000, 0x00000003),
+ DECL_RFK_WS(0x32a0, 0x00080000),
+ DECL_RFK_WS(0x2700, 0x01000000),
+ DECL_RFK_WM(0x2700, 0x06000000, 0x00000002),
+ DECL_RFK_WM(0x20fc, 0xffff0000, 0x00002222),
+ DECL_RFK_WM(0x78f0, 0x00080000, 0x00000000),
+};
+
+DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_sr_defs_b);
+
+static const struct rtw89_reg5_def rtw8852a_rfk_dpk_bb_afe_s_defs_ab[] = {
+ DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000303),
+ DECL_RFK_WS(0x12b8, 0x40000000),
+ DECL_RFK_WS(0x32b8, 0x40000000),
+ DECL_RFK_WM(0x030c, 0xff000000, 0x00000013),
+ DECL_RFK_WM(0x032c, 0xffff0000, 0x00000041),
+ DECL_RFK_WS(0x12b8, 0x10000000),
+ DECL_RFK_WS(0x58c8, 0x01000000),
+ DECL_RFK_WS(0x78c8, 0x01000000),
+ DECL_RFK_WS(0x5864, 0xc0000000),
+ DECL_RFK_WS(0x7864, 0xc0000000),
+ DECL_RFK_WS(0x2008, 0x01ffffff),
+ DECL_RFK_WS(0x0c1c, 0x00000004),
+ DECL_RFK_WS(0x0700, 0x08000000),
+ DECL_RFK_WS(0x0c70, 0x000003ff),
+ DECL_RFK_WS(0x0c60, 0x00000003),
+ DECL_RFK_WS(0x0c6c, 0x00000001),
+ DECL_RFK_WS(0x58ac, 0x08000000),
+ DECL_RFK_WS(0x78ac, 0x08000000),
+ DECL_RFK_WS(0x0c3c, 0x00000200),
+ DECL_RFK_WS(0x2344, 0x80000000),
+ DECL_RFK_WS(0x4490, 0x80000000),
+ DECL_RFK_WS(0x12a0, 0x00007000),
+ DECL_RFK_WS(0x12a0, 0x00008000),
+ DECL_RFK_WM(0x12a0, 0x00070000, 0x00000003),
+ DECL_RFK_WS(0x12a0, 0x00080000),
+ DECL_RFK_WM(0x32a0, 0x00070000, 0x00000003),
+ DECL_RFK_WS(0x32a0, 0x00080000),
+ DECL_RFK_WS(0x0700, 0x01000000),
+ DECL_RFK_WM(0x0700, 0x06000000, 0x00000002),
+ DECL_RFK_WM(0x20fc, 0xffff0000, 0x00003333),
+ DECL_RFK_WM(0x58f0, 0x00080000, 0x00000000),
+ DECL_RFK_WM(0x78f0, 0x00080000, 0x00000000),
+};
+
+DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_s_defs_ab);
+
+static const struct rtw89_reg5_def rtw8852a_rfk_dpk_bb_afe_r_defs_a[] = {
+ DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000101),
+ DECL_RFK_WC(0x12b8, 0x40000000),
+ DECL_RFK_WC(0x5864, 0xc0000000),
+ DECL_RFK_WC(0x2008, 0x01ffffff),
+ DECL_RFK_WC(0x0c1c, 0x00000004),
+ DECL_RFK_WC(0x0700, 0x08000000),
+ DECL_RFK_WM(0x0c70, 0x0000001f, 0x00000003),
+ DECL_RFK_WM(0x0c70, 0x000003e0, 0x00000003),
+ DECL_RFK_WC(0x12a0, 0x000ff000),
+ DECL_RFK_WC(0x0700, 0x07000000),
+ DECL_RFK_WC(0x5864, 0x20000000),
+ DECL_RFK_WC(0x0c3c, 0x00000200),
+ DECL_RFK_WC(0x20fc, 0xffff0000),
+ DECL_RFK_WC(0x58c8, 0x01000000),
+};
+
+DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_r_defs_a);
+
+static const struct rtw89_reg5_def rtw8852a_rfk_dpk_bb_afe_r_defs_b[] = {
+ DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000202),
+ DECL_RFK_WC(0x32b8, 0x40000000),
+ DECL_RFK_WC(0x7864, 0xc0000000),
+ DECL_RFK_WC(0x2008, 0x01ffffff),
+ DECL_RFK_WC(0x2c1c, 0x00000004),
+ DECL_RFK_WC(0x2700, 0x08000000),
+ DECL_RFK_WM(0x0c70, 0x0000001f, 0x00000003),
+ DECL_RFK_WM(0x0c70, 0x000003e0, 0x00000003),
+ DECL_RFK_WC(0x32a0, 0x000ff000),
+ DECL_RFK_WC(0x2700, 0x07000000),
+ DECL_RFK_WC(0x7864, 0x20000000),
+ DECL_RFK_WC(0x2c3c, 0x00000200),
+ DECL_RFK_WC(0x20fc, 0xffff0000),
+ DECL_RFK_WC(0x78c8, 0x01000000),
+};
+
+DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_r_defs_b);
+
+static const struct rtw89_reg5_def rtw8852a_rfk_dpk_bb_afe_r_defs_ab[] = {
+ DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000303),
+ DECL_RFK_WC(0x12b8, 0x40000000),
+ DECL_RFK_WC(0x32b8, 0x40000000),
+ DECL_RFK_WC(0x5864, 0xc0000000),
+ DECL_RFK_WC(0x7864, 0xc0000000),
+ DECL_RFK_WC(0x2008, 0x01ffffff),
+ DECL_RFK_WC(0x0c1c, 0x00000004),
+ DECL_RFK_WC(0x0700, 0x08000000),
+ DECL_RFK_WM(0x0c70, 0x0000001f, 0x00000003),
+ DECL_RFK_WM(0x0c70, 0x000003e0, 0x00000003),
+ DECL_RFK_WC(0x12a0, 0x000ff000),
+ DECL_RFK_WC(0x32a0, 0x000ff000),
+ DECL_RFK_WC(0x0700, 0x07000000),
+ DECL_RFK_WC(0x5864, 0x20000000),
+ DECL_RFK_WC(0x7864, 0x20000000),
+ DECL_RFK_WC(0x0c3c, 0x00000200),
+ DECL_RFK_WC(0x20fc, 0xffff0000),
+ DECL_RFK_WC(0x58c8, 0x01000000),
+ DECL_RFK_WC(0x78c8, 0x01000000),
+};
+
+DECLARE_RFK_TBL(rtw8852a_rfk_dpk_bb_afe_r_defs_ab);
+
+static const struct rtw89_reg5_def rtw8852a_rfk_dpk_lbk_rxiqk_defs_f[] = {
+ DECL_RFK_WM(0x030c, 0xff000000, 0x0000000f),
+ DECL_RFK_DELAY(1),
+ DECL_RFK_WM(0x030c, 0xff000000, 0x00000003),
+ DECL_RFK_WM(0x032c, 0xffff0000, 0x0000a001),
+ DECL_RFK_DELAY(1),
+ DECL_RFK_WM(0x032c, 0xffff0000, 0x0000a041),
+ DECL_RFK_WS(0x8074, 0x80000000),
+};
+
+DECLARE_RFK_TBL(rtw8852a_rfk_dpk_lbk_rxiqk_defs_f);
+
+static const struct rtw89_reg5_def rtw8852a_rfk_dpk_lbk_rxiqk_defs_r[] = {
+ DECL_RFK_WC(0x8074, 0x80000000),
+ DECL_RFK_WM(0x030c, 0xff000000, 0x0000001f),
+ DECL_RFK_DELAY(1),
+ DECL_RFK_WM(0x030c, 0xff000000, 0x00000013),
+ DECL_RFK_WM(0x032c, 0xffff0000, 0x00000001),
+ DECL_RFK_DELAY(1),
+ DECL_RFK_WM(0x032c, 0xffff0000, 0x00000041),
+ DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000303),
+ DECL_RFK_WM(0x20fc, 0xffff0000, 0x00003333),
+};
+
+DECLARE_RFK_TBL(rtw8852a_rfk_dpk_lbk_rxiqk_defs_r);
+
+static const struct rtw89_reg5_def rtw8852a_rfk_dpk_pas_read_defs[] = {
+ DECL_RFK_WM(0x80d4, 0x00ff0000, 0x00000006),
+ DECL_RFK_WC(0x80bc, 0x00004000),
+ DECL_RFK_WM(0x80c0, 0x00ff0000, 0x00000008),
+};
+
+DECLARE_RFK_TBL(rtw8852a_rfk_dpk_pas_read_defs);
+
+static const struct rtw89_reg5_def rtw8852a_rfk_iqk_set_defs_nondbcc_path01[] = {
+ DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000303),
+ DECL_RFK_WM(0x5864, 0x18000000, 0x00000003),
+ DECL_RFK_WM(0x7864, 0x18000000, 0x00000003),
+ DECL_RFK_WM(0x12b8, 0x40000000, 0x00000001),
+ DECL_RFK_WM(0x32b8, 0x40000000, 0x00000001),
+ DECL_RFK_WM(0x030c, 0xff000000, 0x00000013),
+ DECL_RFK_WM(0x032c, 0xffff0000, 0x00000001),
+ DECL_RFK_WM(0x12b8, 0x10000000, 0x00000001),
+ DECL_RFK_WM(0x58c8, 0x01000000, 0x00000001),
+ DECL_RFK_WM(0x78c8, 0x01000000, 0x00000001),
+ DECL_RFK_WM(0x5864, 0xc0000000, 0x00000003),
+ DECL_RFK_WM(0x7864, 0xc0000000, 0x00000003),
+ DECL_RFK_WM(0x2008, 0x01ffffff, 0x01ffffff),
+ DECL_RFK_WM(0x0c1c, 0x00000004, 0x00000001),
+ DECL_RFK_WM(0x0700, 0x08000000, 0x00000001),
+ DECL_RFK_WM(0x0c70, 0x000003ff, 0x000003ff),
+ DECL_RFK_WM(0x0c60, 0x00000003, 0x00000003),
+ DECL_RFK_WM(0x0c6c, 0x00000001, 0x00000001),
+ DECL_RFK_WM(0x58ac, 0x08000000, 0x00000001),
+ DECL_RFK_WM(0x78ac, 0x08000000, 0x00000001),
+ DECL_RFK_WM(0x0c3c, 0x00000200, 0x00000001),
+ DECL_RFK_WM(0x2344, 0x80000000, 0x00000001),
+ DECL_RFK_WM(0x4490, 0x80000000, 0x00000001),
+ DECL_RFK_WM(0x12a0, 0x00007000, 0x00000007),
+ DECL_RFK_WM(0x12a0, 0x00008000, 0x00000001),
+ DECL_RFK_WM(0x12a0, 0x00070000, 0x00000003),
+ DECL_RFK_WM(0x12a0, 0x00080000, 0x00000001),
+ DECL_RFK_WM(0x32a0, 0x00070000, 0x00000003),
+ DECL_RFK_WM(0x32a0, 0x00080000, 0x00000001),
+ DECL_RFK_WM(0x0700, 0x01000000, 0x00000001),
+ DECL_RFK_WM(0x0700, 0x06000000, 0x00000002),
+ DECL_RFK_WM(0x20fc, 0xffff0000, 0x00003333),
+ DECL_RFK_WM(0x58f0, 0x00080000, 0x00000000),
+ DECL_RFK_WM(0x78f0, 0x00080000, 0x00000000),
+};
+
+DECLARE_RFK_TBL(rtw8852a_rfk_iqk_set_defs_nondbcc_path01);
+
+static const struct rtw89_reg5_def rtw8852a_rfk_iqk_set_defs_dbcc_path0[] = {
+ DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000101),
+ DECL_RFK_WM(0x5864, 0x18000000, 0x00000003),
+ DECL_RFK_WM(0x7864, 0x18000000, 0x00000003),
+ DECL_RFK_WM(0x12b8, 0x40000000, 0x00000001),
+ DECL_RFK_WM(0x030c, 0xff000000, 0x00000013),
+ DECL_RFK_WM(0x032c, 0xffff0000, 0x00000001),
+ DECL_RFK_WM(0x12b8, 0x10000000, 0x00000001),
+ DECL_RFK_WM(0x58c8, 0x01000000, 0x00000001),
+ DECL_RFK_WM(0x5864, 0xc0000000, 0x00000003),
+ DECL_RFK_WM(0x2008, 0x01ffffff, 0x01ffffff),
+ DECL_RFK_WM(0x0c1c, 0x00000004, 0x00000001),
+ DECL_RFK_WM(0x0700, 0x08000000, 0x00000001),
+ DECL_RFK_WM(0x0c70, 0x000003ff, 0x000003ff),
+ DECL_RFK_WM(0x0c60, 0x00000003, 0x00000003),
+ DECL_RFK_WM(0x0c6c, 0x00000001, 0x00000001),
+ DECL_RFK_WM(0x58ac, 0x08000000, 0x00000001),
+ DECL_RFK_WM(0x0c3c, 0x00000200, 0x00000001),
+ DECL_RFK_WM(0x2320, 0x00000001, 0x00000001),
+ DECL_RFK_WM(0x4490, 0x80000000, 0x00000001),
+ DECL_RFK_WM(0x12a0, 0x00007000, 0x00000007),
+ DECL_RFK_WM(0x12a0, 0x00008000, 0x00000001),
+ DECL_RFK_WM(0x12a0, 0x00070000, 0x00000003),
+ DECL_RFK_WM(0x12a0, 0x00080000, 0x00000001),
+ DECL_RFK_WM(0x0700, 0x01000000, 0x00000001),
+ DECL_RFK_WM(0x0700, 0x06000000, 0x00000002),
+ DECL_RFK_WM(0x20fc, 0xffff0000, 0x00001111),
+ DECL_RFK_WM(0x58f0, 0x00080000, 0x00000000),
+};
+
+DECLARE_RFK_TBL(rtw8852a_rfk_iqk_set_defs_dbcc_path0);
+
+static const struct rtw89_reg5_def rtw8852a_rfk_iqk_set_defs_dbcc_path1[] = {
+ DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000202),
+ DECL_RFK_WM(0x7864, 0x18000000, 0x00000003),
+ DECL_RFK_WM(0x32b8, 0x40000000, 0x00000001),
+ DECL_RFK_WM(0x030c, 0xff000000, 0x00000013),
+ DECL_RFK_WM(0x032c, 0xffff0000, 0x00000001),
+ DECL_RFK_WM(0x32b8, 0x10000000, 0x00000001),
+ DECL_RFK_WM(0x78c8, 0x01000000, 0x00000001),
+ DECL_RFK_WM(0x7864, 0xc0000000, 0x00000003),
+ DECL_RFK_WM(0x2008, 0x01ffffff, 0x01ffffff),
+ DECL_RFK_WM(0x2c1c, 0x00000004, 0x00000001),
+ DECL_RFK_WM(0x2700, 0x08000000, 0x00000001),
+ DECL_RFK_WM(0x0c70, 0x000003ff, 0x000003ff),
+ DECL_RFK_WM(0x0c60, 0x00000003, 0x00000003),
+ DECL_RFK_WM(0x0c6c, 0x00000001, 0x00000001),
+ DECL_RFK_WM(0x78ac, 0x08000000, 0x00000001),
+ DECL_RFK_WM(0x2c3c, 0x00000200, 0x00000001),
+ DECL_RFK_WM(0x6490, 0x80000000, 0x00000001),
+ DECL_RFK_WM(0x32a0, 0x00007000, 0x00000007),
+ DECL_RFK_WM(0x32a0, 0x00008000, 0x00000001),
+ DECL_RFK_WM(0x32a0, 0x00070000, 0x00000003),
+ DECL_RFK_WM(0x32a0, 0x00080000, 0x00000001),
+ DECL_RFK_WM(0x2700, 0x01000000, 0x00000001),
+ DECL_RFK_WM(0x2700, 0x06000000, 0x00000002),
+ DECL_RFK_WM(0x20fc, 0xffff0000, 0x00002222),
+ DECL_RFK_WM(0x78f0, 0x00080000, 0x00000000),
+};
+
+DECLARE_RFK_TBL(rtw8852a_rfk_iqk_set_defs_dbcc_path1);
+
+static const struct rtw89_reg5_def rtw8852a_rfk_iqk_restore_defs_nondbcc_path01[] = {
+ DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000303),
+ DECL_RFK_WM(0x12b8, 0x40000000, 0x00000000),
+ DECL_RFK_WM(0x32b8, 0x40000000, 0x00000000),
+ DECL_RFK_WM(0x5864, 0xc0000000, 0x00000000),
+ DECL_RFK_WM(0x7864, 0xc0000000, 0x00000000),
+ DECL_RFK_WM(0x2008, 0x01ffffff, 0x00000000),
+ DECL_RFK_WM(0x0c1c, 0x00000004, 0x00000000),
+ DECL_RFK_WM(0x0700, 0x08000000, 0x00000000),
+ DECL_RFK_WM(0x0c70, 0x0000001f, 0x00000003),
+ DECL_RFK_WM(0x0c70, 0x000003e0, 0x00000003),
+ DECL_RFK_WM(0x12a0, 0x000ff000, 0x00000000),
+ DECL_RFK_WM(0x32a0, 0x000ff000, 0x00000000),
+ DECL_RFK_WM(0x0700, 0x07000000, 0x00000000),
+ DECL_RFK_WM(0x5864, 0x20000000, 0x00000000),
+ DECL_RFK_WM(0x7864, 0x20000000, 0x00000000),
+ DECL_RFK_WM(0x0c3c, 0x00000200, 0x00000000),
+ DECL_RFK_WM(0x2320, 0x00000001, 0x00000000),
+ DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000000),
+ DECL_RFK_WM(0x58c8, 0x01000000, 0x00000000),
+ DECL_RFK_WM(0x78c8, 0x01000000, 0x00000000),
+};
+
+DECLARE_RFK_TBL(rtw8852a_rfk_iqk_restore_defs_nondbcc_path01);
+
+static const struct rtw89_reg5_def rtw8852a_rfk_iqk_restore_defs_dbcc_path0[] = {
+ DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000101),
+ DECL_RFK_WM(0x12b8, 0x40000000, 0x00000000),
+ DECL_RFK_WM(0x5864, 0xc0000000, 0x00000000),
+ DECL_RFK_WM(0x2008, 0x01ffffff, 0x00000000),
+ DECL_RFK_WM(0x0c1c, 0x00000004, 0x00000000),
+ DECL_RFK_WM(0x0700, 0x08000000, 0x00000000),
+ DECL_RFK_WM(0x0c70, 0x0000001f, 0x00000003),
+ DECL_RFK_WM(0x0c70, 0x000003e0, 0x00000003),
+ DECL_RFK_WM(0x12a0, 0x000ff000, 0x00000000),
+ DECL_RFK_WM(0x0700, 0x07000000, 0x00000000),
+ DECL_RFK_WM(0x5864, 0x20000000, 0x00000000),
+ DECL_RFK_WM(0x0c3c, 0x00000200, 0x00000000),
+ DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000000),
+ DECL_RFK_WM(0x58c8, 0x01000000, 0x00000000),
+};
+
+DECLARE_RFK_TBL(rtw8852a_rfk_iqk_restore_defs_dbcc_path0);
+
+static const struct rtw89_reg5_def rtw8852a_rfk_iqk_restore_defs_dbcc_path1[] = {
+ DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000202),
+ DECL_RFK_WM(0x32b8, 0x40000000, 0x00000000),
+ DECL_RFK_WM(0x7864, 0xc0000000, 0x00000000),
+ DECL_RFK_WM(0x2008, 0x01ffffff, 0x00000000),
+ DECL_RFK_WM(0x2c1c, 0x00000004, 0x00000000),
+ DECL_RFK_WM(0x2700, 0x08000000, 0x00000000),
+ DECL_RFK_WM(0x0c70, 0x0000001f, 0x00000003),
+ DECL_RFK_WM(0x0c70, 0x000003e0, 0x00000003),
+ DECL_RFK_WM(0x32a0, 0x000ff000, 0x00000000),
+ DECL_RFK_WM(0x2700, 0x07000000, 0x00000000),
+ DECL_RFK_WM(0x7864, 0x20000000, 0x00000000),
+ DECL_RFK_WM(0x2c3c, 0x00000200, 0x00000000),
+ DECL_RFK_WM(0x20fc, 0xffff0000, 0x00000000),
+ DECL_RFK_WM(0x78c8, 0x01000000, 0x00000000),
+};
+
+DECLARE_RFK_TBL(rtw8852a_rfk_iqk_restore_defs_dbcc_path1);
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk_table.h b/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk_table.h
new file mode 100644
index 000000000000..4a4a45d778ff
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a_rfk_table.h
@@ -0,0 +1,133 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2019-2020 Realtek Corporation
+ */
+
+#ifndef __RTW89_8852A_RFK_TABLE_H__
+#define __RTW89_8852A_RFK_TABLE_H__
+
+#include "core.h"
+
+enum rtw89_rfk_flag {
+ RTW89_RFK_F_WRF = 0,
+ RTW89_RFK_F_WM = 1,
+ RTW89_RFK_F_WS = 2,
+ RTW89_RFK_F_WC = 3,
+ RTW89_RFK_F_DELAY = 4,
+ RTW89_RFK_F_NUM,
+};
+
+struct rtw89_rfk_tbl {
+ const struct rtw89_reg5_def *defs;
+ u32 size;
+};
+
+#define DECLARE_RFK_TBL(_name) \
+const struct rtw89_rfk_tbl _name ## _tbl = { \
+ .defs = _name, \
+ .size = ARRAY_SIZE(_name), \
+}
+
+#define DECL_RFK_WRF(_path, _addr, _mask, _data) \
+ {.flag = RTW89_RFK_F_WRF, \
+ .path = _path, \
+ .addr = _addr, \
+ .mask = _mask, \
+ .data = _data,}
+
+#define DECL_RFK_WM(_addr, _mask, _data) \
+ {.flag = RTW89_RFK_F_WM, \
+ .addr = _addr, \
+ .mask = _mask, \
+ .data = _data,}
+
+#define DECL_RFK_WS(_addr, _mask) \
+ {.flag = RTW89_RFK_F_WS, \
+ .addr = _addr, \
+ .mask = _mask,}
+
+#define DECL_RFK_WC(_addr, _mask) \
+ {.flag = RTW89_RFK_F_WC, \
+ .addr = _addr, \
+ .mask = _mask,}
+
+#define DECL_RFK_DELAY(_data) \
+ {.flag = RTW89_RFK_F_DELAY, \
+ .data = _data,}
+
+extern const struct rtw89_rfk_tbl rtw8852a_tssi_sys_defs_tbl;
+extern const struct rtw89_rfk_tbl rtw8852a_tssi_sys_defs_2g_tbl;
+extern const struct rtw89_rfk_tbl rtw8852a_tssi_sys_defs_5g_tbl;
+extern const struct rtw89_rfk_tbl rtw8852a_tssi_txpwr_ctrl_bb_defs_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852a_tssi_txpwr_ctrl_bb_defs_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852a_tssi_txpwr_ctrl_bb_defs_2g_tbl;
+extern const struct rtw89_rfk_tbl rtw8852a_tssi_txpwr_ctrl_bb_defs_5g_tbl;
+extern const struct rtw89_rfk_tbl rtw8852a_tssi_txpwr_ctrl_bb_he_tb_defs_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852a_tssi_txpwr_ctrl_bb_he_tb_defs_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852a_tssi_dck_defs_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852a_tssi_dck_defs_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852a_tssi_dac_gain_tbl_defs_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852a_tssi_dac_gain_tbl_defs_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852a_tssi_slope_cal_org_defs_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852a_tssi_slope_cal_org_defs_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852a_tssi_rf_gap_tbl_defs_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852a_tssi_rf_gap_tbl_defs_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852a_tssi_slope_defs_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852a_tssi_slope_defs_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852a_tssi_track_defs_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852a_tssi_track_defs_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852a_tssi_txagc_ofst_mv_avg_defs_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852a_tssi_txagc_ofst_mv_avg_defs_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852a_tssi_pak_defs_a_2g_tbl;
+extern const struct rtw89_rfk_tbl rtw8852a_tssi_pak_defs_a_5g_1_tbl;
+extern const struct rtw89_rfk_tbl rtw8852a_tssi_pak_defs_a_5g_3_tbl;
+extern const struct rtw89_rfk_tbl rtw8852a_tssi_pak_defs_a_5g_4_tbl;
+extern const struct rtw89_rfk_tbl rtw8852a_tssi_pak_defs_b_2g_tbl;
+extern const struct rtw89_rfk_tbl rtw8852a_tssi_pak_defs_b_5g_1_tbl;
+extern const struct rtw89_rfk_tbl rtw8852a_tssi_pak_defs_b_5g_3_tbl;
+extern const struct rtw89_rfk_tbl rtw8852a_tssi_pak_defs_b_5g_4_tbl;
+extern const struct rtw89_rfk_tbl rtw8852a_tssi_enable_defs_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852a_tssi_enable_defs_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852a_tssi_enable_defs_ab_tbl;
+extern const struct rtw89_rfk_tbl rtw8852a_tssi_disable_defs_tbl;
+extern const struct rtw89_rfk_tbl rtw8852a_tssi_tracking_defs_tbl;
+
+extern const struct rtw89_rfk_tbl rtw8852a_rfk_afe_init_defs_tbl;
+extern const struct rtw89_rfk_tbl rtw8852a_rfk_dack_reload_defs_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852a_rfk_dack_reload_defs_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852a_rfk_check_addc_defs_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852a_rfk_check_addc_defs_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852a_rfk_addck_reset_defs_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852a_rfk_addck_trigger_defs_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852a_rfk_addck_restore_defs_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852a_rfk_addck_reset_defs_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852a_rfk_addck_trigger_defs_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852a_rfk_addck_restore_defs_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852a_rfk_check_dadc_defs_f_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852a_rfk_check_dadc_defs_f_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852a_rfk_check_dadc_defs_r_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852a_rfk_check_dadc_defs_r_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852a_rfk_dack_defs_f_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852a_rfk_dack_defs_m_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852a_rfk_dack_defs_r_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852a_rfk_dack_defs_f_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852a_rfk_dack_defs_m_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852a_rfk_dack_defs_r_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852a_rfk_dpk_bb_afe_sf_defs_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852a_rfk_dpk_bb_afe_sr_defs_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852a_rfk_dpk_bb_afe_sf_defs_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852a_rfk_dpk_bb_afe_sr_defs_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852a_rfk_dpk_bb_afe_s_defs_ab_tbl;
+extern const struct rtw89_rfk_tbl rtw8852a_rfk_dpk_bb_afe_r_defs_a_tbl;
+extern const struct rtw89_rfk_tbl rtw8852a_rfk_dpk_bb_afe_r_defs_b_tbl;
+extern const struct rtw89_rfk_tbl rtw8852a_rfk_dpk_bb_afe_r_defs_ab_tbl;
+extern const struct rtw89_rfk_tbl rtw8852a_rfk_dpk_lbk_rxiqk_defs_f_tbl;
+extern const struct rtw89_rfk_tbl rtw8852a_rfk_dpk_lbk_rxiqk_defs_r_tbl;
+extern const struct rtw89_rfk_tbl rtw8852a_rfk_dpk_pas_read_defs_tbl;
+extern const struct rtw89_rfk_tbl rtw8852a_rfk_iqk_set_defs_nondbcc_path01_tbl;
+extern const struct rtw89_rfk_tbl rtw8852a_rfk_iqk_set_defs_dbcc_path0_tbl;
+extern const struct rtw89_rfk_tbl rtw8852a_rfk_iqk_set_defs_dbcc_path1_tbl;
+extern const struct rtw89_rfk_tbl rtw8852a_rfk_iqk_restore_defs_nondbcc_path01_tbl;
+extern const struct rtw89_rfk_tbl rtw8852a_rfk_iqk_restore_defs_dbcc_path0_tbl;
+extern const struct rtw89_rfk_tbl rtw8852a_rfk_iqk_restore_defs_dbcc_path1_tbl;
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a_table.c b/drivers/net/wireless/realtek/rtw89/rtw8852a_table.c
new file mode 100644
index 000000000000..3a4fe7207420
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a_table.c
@@ -0,0 +1,48725 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2019-2020 Realtek Corporation
+ */
+
+#include "phy.h"
+#include "reg.h"
+#include "rtw8852a_table.h"
+
+static const struct rtw89_reg2_def rtw89_8852a_phy_bb_regs[] = {
+ {0xF0FF0001, 0x00000000},
+ {0xF03300FF, 0x00000001},
+ {0xF03500FF, 0x00000002},
+ {0xF03200FF, 0x00000003},
+ {0xF03400FF, 0x00000004},
+ {0xF03600FF, 0x00000005},
+ {0x704, 0x601E0100},
+ {0x714, 0x00000000},
+ {0x718, 0x13332333},
+ {0x714, 0x00010000},
+ {0x720, 0x20000000},
+ {0x980, 0x10002250},
+ {0x994, 0x00000010},
+ {0x644, 0x2314283C},
+ {0x644, 0x3426283C},
+ {0x994, 0x00000010},
+ {0xC3C, 0x2840E1BF},
+ {0xC40, 0x00000000},
+ {0xC44, 0x00000007},
+ {0xC48, 0x410E4000},
+ {0xC54, 0x1001436E},
+ {0xC58, 0x41000000},
+ {0x730, 0x00000002},
+ {0xC60, 0x017FFFF2},
+ {0xC64, 0x0010A130},
+ {0xC64, 0x0010A130},
+ {0x80ff0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0xC68, 0x10000068},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0xC68, 0x90000068},
+ {0x903500ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0xC68, 0x90000068},
+ {0x903200ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0xC68, 0x10000068},
+ {0x903400ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0xC68, 0x90000068},
+ {0x903600ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0xC68, 0x90000068},
+ {0xA0000000, 0x00000000},
+ {0xC68, 0x10000068},
+ {0xB0000000, 0x00000000},
+ {0xC64, 0x0010A130},
+ {0xC54, 0x1EE1436E},
+ {0xC6C, 0x00000020},
+ {0x708, 0x00000000},
+ {0xC6C, 0x00000020},
+ {0x804, 0x0043F01D},
+ {0x12D0, 0x00000000},
+ {0x12EC, 0x888CA72B},
+ {0x32D0, 0x00000000},
+ {0x32EC, 0x888CA72B},
+ {0xD40, 0xF64FA0F7},
+ {0xD44, 0x0400063F},
+ {0xD48, 0x0003FF7F},
+ {0xD4C, 0x00000000},
+ {0xD50, 0xF64FA0F7},
+ {0xD54, 0x04100437},
+ {0xD58, 0x0000FF7F},
+ {0xD5C, 0x00000000},
+ {0xD60, 0x00000000},
+ {0xD64, 0x00000000},
+ {0xD70, 0x0000001D},
+ {0xD90, 0x000003FF},
+ {0xD94, 0x00000000},
+ {0xD98, 0x0000003F},
+ {0xD9C, 0x00000000},
+ {0xDA0, 0x000003FF},
+ {0xDA4, 0x00000000},
+ {0xDA8, 0x0000003F},
+ {0xDAC, 0x00000000},
+ {0xD00, 0x77777777},
+ {0xD04, 0xBBBBBBBB},
+ {0xD08, 0xBBBBBBBB},
+ {0xD0C, 0x00000070},
+ {0xD10, 0x20110900},
+ {0xD10, 0x20110FFF},
+ {0xD7C, 0x001D050C},
+ {0xD84, 0x00006207},
+ {0xD18, 0x50209900},
+ {0xD80, 0x00804100},
+ {0x714, 0x00010000},
+ {0x704, 0x601E00FD},
+ {0x710, 0xF3810000},
+ {0x000, 0x0580801F},
+ {0x000, 0x8580801F},
+ {0x334, 0xFFFFFFFF},
+ {0x33C, 0x55000000},
+ {0x340, 0x00005555},
+ {0x724, 0x00111200},
+ {0x5868, 0xA9550000},
+ {0x5870, 0x33221100},
+ {0x5874, 0x77665544},
+ {0x5878, 0xBBAA9988},
+ {0x587C, 0xFFEEDDCC},
+ {0x5880, 0x76543210},
+ {0x5884, 0xFEDCBA98},
+ {0x5888, 0x00000000},
+ {0x588C, 0x00000000},
+ {0x5894, 0x00000008},
+ {0x7868, 0xA9550000},
+ {0x7870, 0x33221100},
+ {0x7874, 0x77665544},
+ {0x7878, 0xBBAA9988},
+ {0x787C, 0xFFEEDDCC},
+ {0x7880, 0x76543210},
+ {0x7884, 0xFEDCBA98},
+ {0x7888, 0x00000000},
+ {0x788C, 0x00000000},
+ {0x7894, 0x00000008},
+ {0x240C, 0x00000000},
+ {0xC70, 0x00000400},
+ {0x700, 0x00000030},
+ {0x704, 0x601E00FF},
+ {0x704, 0x601E00FD},
+ {0x704, 0x601E00FF},
+ {0x586C, 0x000000F0},
+ {0x586C, 0x000000E0},
+ {0x586C, 0x000000D0},
+ {0x586C, 0x000000C0},
+ {0x586C, 0x000000B0},
+ {0x586C, 0x000000A0},
+ {0x586C, 0x00000090},
+ {0x586C, 0x00000080},
+ {0x586C, 0x00000070},
+ {0x586C, 0x00000060},
+ {0x586C, 0x00000050},
+ {0x586C, 0x00000040},
+ {0x586C, 0x00000030},
+ {0x586C, 0x00000020},
+ {0x586C, 0x00000010},
+ {0x80ff0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x586C, 0x00000000},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x586C, 0x03E00000},
+ {0x903500ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x586C, 0x03E00000},
+ {0x903200ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x586C, 0x00000000},
+ {0x903400ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x586C, 0x03E00000},
+ {0x903600ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x586C, 0x03E00000},
+ {0xA0000000, 0x00000000},
+ {0x586C, 0x00000000},
+ {0xB0000000, 0x00000000},
+ {0x786C, 0x000000F0},
+ {0x786C, 0x000000E0},
+ {0x786C, 0x000000D0},
+ {0x786C, 0x000000C0},
+ {0x786C, 0x000000B0},
+ {0x786C, 0x000000A0},
+ {0x786C, 0x00000090},
+ {0x786C, 0x00000080},
+ {0x786C, 0x00000070},
+ {0x786C, 0x00000060},
+ {0x786C, 0x00000050},
+ {0x786C, 0x00000040},
+ {0x786C, 0x00000030},
+ {0x786C, 0x00000020},
+ {0x786C, 0x00000010},
+ {0x80ff0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x786C, 0x00000000},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x786C, 0x03E00000},
+ {0x903500ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x786C, 0x03E00000},
+ {0x903200ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x786C, 0x00000000},
+ {0x903400ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x786C, 0x03E00000},
+ {0x903600ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x786C, 0x03E00000},
+ {0xA0000000, 0x00000000},
+ {0x786C, 0x00000000},
+ {0xB0000000, 0x00000000},
+ {0x5864, 0x080801FF},
+ {0x7864, 0x080801FF},
+ {0xC60, 0x017FFFF3},
+ {0xC6C, 0x00000021},
+ {0x58AC, 0x08000000},
+ {0x78AC, 0x08000000},
+ {0x5864, 0x180801FF},
+ {0x7864, 0x180801FF},
+ {0xC60, 0x017FFFF3},
+ {0xC60, 0x017FFFF3},
+ {0x2C60, 0x013FFF0A},
+ {0xC70, 0x00000600},
+ {0xC70, 0x00000660},
+ {0xC6C, 0x10001021},
+ {0x58AC, 0x08000000},
+ {0x78AC, 0x08000000},
+ {0x5864, 0x100801FF},
+ {0x7864, 0x100801FF},
+ {0x5864, 0x180801FF},
+ {0x7864, 0x180801FF},
+ {0x704, 0x601C01FF},
+ {0x58D4, 0x0401FE00},
+ {0x78D4, 0x0401FE00},
+ {0x58F0, 0x000401FF},
+ {0x78F0, 0x000401FF},
+ {0x58F0, 0x400401FF},
+ {0x78F0, 0x400401FF},
+ {0x12A8, 0x333378A5},
+ {0x32A8, 0x333378A5},
+ {0x2300, 0x02748790},
+ {0x2304, 0x00558670},
+ {0x2308, 0x002883F0},
+ {0x230C, 0x00090120},
+ {0x2310, 0x00000000},
+ {0x2314, 0x06000000},
+ {0x2318, 0x00000000},
+ {0x231C, 0x00000000},
+ {0x2320, 0x03020100},
+ {0x2324, 0x07060504},
+ {0x2328, 0x0B0A0908},
+ {0x232C, 0x0F0E0D0C},
+ {0x2330, 0x13121110},
+ {0x2334, 0x17161514},
+ {0x2338, 0x0C700022},
+ {0x233C, 0x0A05298F},
+ {0x2340, 0x0005298E},
+ {0x2344, 0x0006318A},
+ {0x2348, 0xB7E6318A},
+ {0x234C, 0x80039CE7},
+ {0x2350, 0x80039CE7},
+ {0x2354, 0x0005298F},
+ {0x2358, 0x0015296E},
+ {0x235C, 0x0C07FC31},
+ {0x2360, 0x0219A6AE},
+ {0x2364, 0xE4F624C3},
+ {0x2368, 0x53626F15},
+ {0x236C, 0x48000000},
+ {0x2370, 0x48000000},
+ {0x2374, 0x074C0000},
+ {0x2378, 0x202401B5},
+ {0x237C, 0x00F7000E},
+ {0x2380, 0x0F0A1111},
+ {0x2384, 0x30D9000F},
+ {0x2388, 0x0400EA02},
+ {0x238C, 0x003CB061},
+ {0x2390, 0x69C00000},
+ {0x2394, 0x00000000},
+ {0x2398, 0x000000F0},
+ {0x239C, 0x0001FFFF},
+ {0x23A0, 0x00C80064},
+ {0x23A4, 0x0190012C},
+ {0x23A8, 0x001917BE},
+ {0x23AC, 0x0B308800},
+ {0x23B0, 0x0001D5B0},
+ {0x23B4, 0x000285D2},
+ {0x23B8, 0x00000000},
+ {0x23BC, 0x00000000},
+ {0x23C0, 0x00000000},
+ {0x23C4, 0x00000000},
+ {0x23C8, 0x00000000},
+ {0x23CC, 0x00000000},
+ {0x23D0, 0x00000000},
+ {0x23D4, 0x00000000},
+ {0x23D8, 0x00000000},
+ {0x23DC, 0x00000000},
+ {0x23E0, 0x00000000},
+ {0x23E4, 0x00000000},
+ {0x23E8, 0x00000000},
+ {0x23EC, 0x00000000},
+ {0x23F0, 0x00000000},
+ {0x23F4, 0x00000000},
+ {0x23F8, 0x00000000},
+ {0x23FC, 0x00000000},
+ {0x804, 0x0043F01D},
+ {0x300, 0xF30CE31C},
+ {0x304, 0x13EF1F19},
+ {0x308, 0x0C0CF3F3},
+ {0x30C, 0x0C0C0C0C},
+ {0x310, 0x80416000},
+ {0x314, 0x0041E000},
+ {0x318, 0x20022042},
+ {0x31C, 0x20448001},
+ {0x320, 0x00410040},
+ {0x324, 0xE000E000},
+ {0x328, 0xE000E000},
+ {0x32C, 0xE000E000},
+ {0x12BC, 0x10104041},
+ {0x12C0, 0x14411111},
+ {0x32BC, 0x10104041},
+ {0x32C0, 0x14411111},
+ {0x010, 0x0005FFFF},
+ {0x604, 0x1E1E1E28},
+ {0x650, 0x00200888},
+ {0x620, 0x00141230},
+ {0x35C, 0x000004C4},
+ {0x80ff0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x5804, 0x04237040},
+ {0x7804, 0x04237040},
+ {0x5808, 0x04237040},
+ {0x7808, 0x04237040},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x5804, 0x04231040},
+ {0x7804, 0x04231040},
+ {0x5808, 0x04231040},
+ {0x7808, 0x04231040},
+ {0x903500ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x5804, 0x04231040},
+ {0x7804, 0x04231040},
+ {0x5808, 0x04231040},
+ {0x7808, 0x04231040},
+ {0x903200ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x5804, 0x04237040},
+ {0x7804, 0x04237040},
+ {0x5808, 0x04237040},
+ {0x7808, 0x04237040},
+ {0x903400ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x5804, 0x04231040},
+ {0x7804, 0x04231040},
+ {0x5808, 0x04231040},
+ {0x7808, 0x04231040},
+ {0x903600ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x5804, 0x04231040},
+ {0x7804, 0x04231040},
+ {0x5808, 0x04231040},
+ {0x7808, 0x04231040},
+ {0xA0000000, 0x00000000},
+ {0x5804, 0x04237040},
+ {0x7804, 0x04237040},
+ {0x5808, 0x04237040},
+ {0x7808, 0x04237040},
+ {0xB0000000, 0x00000000},
+ {0x12A0, 0x24903156},
+ {0x32A0, 0x24903156},
+ {0x640, 0x1414141E},
+ {0x12B8, 0x30020000},
+ {0x12AC, 0x02333121},
+ {0x9A4, 0x0000001C},
+ {0x624, 0x01010301},
+ {0x628, 0x00010101},
+ {0x5800, 0x03FF807F},
+ {0x7800, 0x03FF807F},
+ {0x4000, 0x00000000},
+ {0x4004, 0xCA014000},
+ {0x4008, 0xC751D4F0},
+ {0x400C, 0x44511475},
+ {0x4010, 0x00000000},
+ {0x4014, 0x00000000},
+ {0x4018, 0x4F4C084B},
+ {0x401C, 0x084A4E52},
+ {0x4020, 0x4D504E4B},
+ {0x4024, 0x4F4C0849},
+ {0x4028, 0x08484C50},
+ {0x402C, 0x4C50504C},
+ {0x4030, 0x5454084A},
+ {0x4034, 0x084B5654},
+ {0x4038, 0x6A6C605A},
+ {0x403C, 0x4C4C084C},
+ {0x4040, 0x084B4E4D},
+ {0x4044, 0x4E4C4B4B},
+ {0x4048, 0x4B4B084A},
+ {0x404C, 0x084A4E4C},
+ {0x4050, 0x514F4C4A},
+ {0x4054, 0x524E084A},
+ {0x4058, 0x084A5154},
+ {0x405C, 0x53555554},
+ {0x4060, 0x45450845},
+ {0x4064, 0x08454144},
+ {0x4068, 0x40434445},
+ {0x406C, 0x44450845},
+ {0x4070, 0x08444043},
+ {0x4074, 0x42434444},
+ {0x4078, 0x46450844},
+ {0x407C, 0x08444843},
+ {0x4080, 0x4B4E4A47},
+ {0x4084, 0x4F4C084B},
+ {0x4088, 0x084A4E52},
+ {0x408C, 0x4D504E4B},
+ {0x4090, 0x4F4C0849},
+ {0x4094, 0x08484C50},
+ {0x4098, 0x4C50504C},
+ {0x409C, 0x5454084A},
+ {0x40A0, 0x084B5654},
+ {0x40A4, 0x6A6C605A},
+ {0x40A8, 0x4C4C084C},
+ {0x40AC, 0x084B4E4D},
+ {0x40B0, 0x4E4C4B4B},
+ {0x40B4, 0x4B4B084A},
+ {0x40B8, 0x084A4E4C},
+ {0x40BC, 0x514F4C4A},
+ {0x40C0, 0x524E084A},
+ {0x40C4, 0x084A5154},
+ {0x40C8, 0x53555554},
+ {0x40CC, 0x45450845},
+ {0x40D0, 0x08454144},
+ {0x40D4, 0x40434445},
+ {0x40D8, 0x44450845},
+ {0x40DC, 0x08444043},
+ {0x40E0, 0x42434444},
+ {0x40E4, 0x46450844},
+ {0x40E8, 0x08444843},
+ {0x40EC, 0x4B4E4A47},
+ {0x40F0, 0x00000000},
+ {0x40F4, 0x00000006},
+ {0x40F8, 0x00000001},
+ {0x40FC, 0x8C30C30C},
+ {0x4100, 0x4C30C30C},
+ {0x4104, 0x0C30C30C},
+ {0x4108, 0x0C30C30C},
+ {0x410C, 0x0C30C30C},
+ {0x4110, 0x0C30C30C},
+ {0x4114, 0x28A28A28},
+ {0x4118, 0x28A28A28},
+ {0x411C, 0x28A28A28},
+ {0x4120, 0x28A28A28},
+ {0x4124, 0x28A28A28},
+ {0x4128, 0x28A28A28},
+ {0x412C, 0x06666666},
+ {0x4130, 0x33333333},
+ {0x4134, 0x33333333},
+ {0x4138, 0x33333333},
+ {0x413C, 0x00000031},
+ {0x4140, 0x5100600A},
+ {0x4144, 0x18363113},
+ {0x4148, 0x1D976DDC},
+ {0x414C, 0x1C072DD7},
+ {0x4150, 0x1127CDF4},
+ {0x4154, 0x1E37BDF1},
+ {0x4158, 0x1FB7F1D6},
+ {0x415C, 0x1EA7DDF9},
+ {0x4160, 0x1FE445DD},
+ {0x4164, 0x1F97F1FE},
+ {0x4168, 0x1FF781ED},
+ {0x416C, 0x1FA7F5FE},
+ {0x4170, 0x1E07B913},
+ {0x4174, 0x1FD7FDFF},
+ {0x4178, 0x1E17B9FA},
+ {0x417C, 0x19A66914},
+ {0x4180, 0x10F65598},
+ {0x4184, 0x14A5A111},
+ {0x4188, 0x1D3765DB},
+ {0x418C, 0x17C685CA},
+ {0x4190, 0x1107C5F3},
+ {0x4194, 0x1B5785EB},
+ {0x4198, 0x1F97ED8F},
+ {0x419C, 0x1BC7A5F3},
+ {0x41A0, 0x1FE43595},
+ {0x41A4, 0x1EB7D9FC},
+ {0x41A8, 0x1FE65DBE},
+ {0x41AC, 0x1EC7D9FC},
+ {0x41B0, 0x1976FCFF},
+ {0x41B4, 0x1F77F5FF},
+ {0x41B8, 0x1976FDEC},
+ {0x41BC, 0x198664EF},
+ {0x41C0, 0x11062D93},
+ {0x41C4, 0x10C4E910},
+ {0x41C8, 0x1CA759DB},
+ {0x41CC, 0x1335A9B5},
+ {0x41D0, 0x1097B9F3},
+ {0x41D4, 0x17B72DE1},
+ {0x41D8, 0x1F67ED42},
+ {0x41DC, 0x18074DE9},
+ {0x41E0, 0x1FD40547},
+ {0x41E4, 0x1D57ADF9},
+ {0x41E8, 0x1FE52182},
+ {0x41EC, 0x1D67B1F9},
+ {0x41F0, 0x14860CE1},
+ {0x41F4, 0x1EC7E9FE},
+ {0x41F8, 0x14860DD6},
+ {0x41FC, 0x195664C7},
+ {0x4200, 0x0005E58A},
+ {0x4204, 0x00000000},
+ {0x4208, 0x00000000},
+ {0x420C, 0x7A000000},
+ {0x4210, 0x0F9F3D7A},
+ {0x4214, 0x0040817C},
+ {0x4218, 0x00E10204},
+ {0x421C, 0x227D94CD},
+ {0x4220, 0x080238E3},
+ {0x4224, 0x00000210},
+ {0x4228, 0x04688000},
+ {0x422C, 0x0060B002},
+ {0x4230, 0x9A8249A8},
+ {0x4234, 0x26A1469E},
+ {0x4238, 0x2099A824},
+ {0x423C, 0x2359461C},
+ {0x4240, 0x1631A675},
+ {0x4244, 0x2C6B1D63},
+ {0x4248, 0x0000000E},
+ {0x424C, 0x00000001},
+ {0x4250, 0x00000001},
+ {0x4254, 0x00000000},
+ {0x4258, 0x00000000},
+ {0x425C, 0x00000000},
+ {0x4260, 0x0020000C},
+ {0x4264, 0x00000000},
+ {0x4268, 0x00000000},
+ {0x426C, 0x0418317C},
+ {0x4270, 0x00D6135C},
+ {0x4274, 0x00000000},
+ {0x4278, 0x00000000},
+ {0x427C, 0x00000000},
+ {0x4280, 0x00000000},
+ {0x4284, 0x00000000},
+ {0x4288, 0x00000000},
+ {0x428C, 0x00000000},
+ {0x4290, 0x00000000},
+ {0x4294, 0x00000000},
+ {0x4298, 0x84026000},
+ {0x429C, 0x0051AC20},
+ {0x42A0, 0x02024008},
+ {0x42A4, 0x00000000},
+ {0x42A8, 0x00000000},
+ {0x42AC, 0x22CE803C},
+ {0x42B0, 0x80000000},
+ {0x42B4, 0x00E7D03D},
+ {0x42B8, 0x3D67D67D},
+ {0x42BC, 0x7D67D65B},
+ {0x42C0, 0x2802AF59},
+ {0x42C4, 0x00280280},
+ {0x42C8, 0x00000000},
+ {0x42CC, 0x00000000},
+ {0x42D0, 0x00000003},
+ {0x42D4, 0x00000001},
+ {0x42D8, 0x61861800},
+ {0x42DC, 0x830C30C3},
+ {0x42E0, 0xC30C30C3},
+ {0x42E4, 0x830C30C3},
+ {0x42E8, 0x451450C3},
+ {0x42EC, 0x05145145},
+ {0x42F0, 0x05145145},
+ {0x42F4, 0x05145145},
+ {0x42F8, 0x0F0C3145},
+ {0x42FC, 0x030C30CF},
+ {0x4300, 0x030C30C3},
+ {0x4304, 0x030CF3C3},
+ {0x4308, 0x030C30C3},
+ {0x430C, 0x0F3CF3C3},
+ {0x4310, 0x0F3CF3CF},
+ {0x4314, 0x0F3CF3CF},
+ {0x4318, 0x0F3CF3CF},
+ {0x431C, 0x0F3CF3CF},
+ {0x4320, 0x030C10C3},
+ {0x4324, 0x051430C3},
+ {0x4328, 0x051490CB},
+ {0x432C, 0x030CD151},
+ {0x4330, 0x050C50C7},
+ {0x4334, 0x051492CB},
+ {0x4338, 0x05145145},
+ {0x433C, 0x05145145},
+ {0x4340, 0x05145145},
+ {0x4344, 0x05145145},
+ {0x4348, 0x090CD3CF},
+ {0x434C, 0x071491C5},
+ {0x4350, 0x073CF143},
+ {0x4354, 0x071431C3},
+ {0x4358, 0x0F3CF1C5},
+ {0x435C, 0x0F3CF3CF},
+ {0x4360, 0x0F3CF3CF},
+ {0x4364, 0x0F3CF3CF},
+ {0x4368, 0x0F3CF3CF},
+ {0x436C, 0x090C91CF},
+ {0x4370, 0x11243143},
+ {0x4374, 0x9777A777},
+ {0x4378, 0xBB7BAC95},
+ {0x437C, 0xB667B889},
+ {0x4380, 0x7B9B8899},
+ {0x4384, 0x7A5567C8},
+ {0x4388, 0x2278CCCC},
+ {0x438C, 0x7C222222},
+ {0x4390, 0x0000069B},
+ {0x4394, 0x001CCCCC},
+ {0x4398, 0x00000000},
+ {0x439C, 0x00000008},
+ {0x49A4, 0x00000000},
+ {0x43A0, 0x00000000},
+ {0x43A4, 0x00000000},
+ {0x43A8, 0x00000000},
+ {0x43AC, 0x10000800},
+ {0x43B0, 0x00401802},
+ {0x43B4, 0x00061004},
+ {0x43B8, 0x000024D8},
+ {0x43BC, 0x00000000},
+ {0x43C0, 0x10000020},
+ {0x43C4, 0x20000200},
+ {0x43C8, 0x00000000},
+ {0x43CC, 0x04000000},
+ {0x43D0, 0x44000100},
+ {0x43D4, 0x60804060},
+ {0x43D8, 0x44204210},
+ {0x43DC, 0x82108082},
+ {0x43E0, 0x82108402},
+ {0x43E4, 0xC8082108},
+ {0x43E8, 0xC8202084},
+ {0x43EC, 0x44208208},
+ {0x43F0, 0x84108204},
+ {0x43F4, 0xD0108104},
+ {0x43F8, 0xF8210108},
+ {0x43FC, 0x6431E930},
+ {0x4400, 0x02109468},
+ {0x4404, 0x10C61C22},
+ {0x4408, 0x02109469},
+ {0x440C, 0x10C61C22},
+ {0x4410, 0x00041049},
+ {0x4414, 0x00000000},
+ {0x4418, 0x00000000},
+ {0x441C, 0x6C000000},
+ {0x4420, 0xB0200020},
+ {0x4424, 0x00001FF0},
+ {0x4428, 0x00000000},
+ {0x442C, 0x00000000},
+ {0x4430, 0x00000000},
+ {0x4434, 0x00000000},
+ {0x4438, 0x65F962F8},
+ {0x443C, 0x280668A0},
+ {0x4440, 0x64100820},
+ {0x4444, 0x4A146304},
+ {0x4448, 0x0C59008F},
+ {0x444C, 0x6E30498A},
+ {0x4450, 0x656E371B},
+ {0x4454, 0x00000F52},
+ {0x4458, 0x00000000},
+ {0x445C, 0x4801442E},
+ {0x4460, 0x0041A0B8},
+ {0x4464, 0x00000000},
+ {0x4468, 0x00000000},
+ {0x446C, 0x00000000},
+ {0x4470, 0x00000000},
+ {0x4474, 0x00000000},
+ {0x4478, 0x00000000},
+ {0x447C, 0x00000000},
+ {0x4480, 0x2A0A6040},
+ {0x4484, 0x0A0A6829},
+ {0x4488, 0x00000004},
+ {0x448C, 0x00000000},
+ {0x4490, 0x80000000},
+ {0x4494, 0x10000000},
+ {0x4498, 0xA0000000},
+ {0x449C, 0x0000001E},
+ {0x44A0, 0x02B29397},
+ {0x44A4, 0x00000400},
+ {0x44A8, 0x00000001},
+ {0x44AC, 0x00000000},
+ {0x44B0, 0x00000000},
+ {0x44B4, 0x00000000},
+ {0x44B8, 0x00000000},
+ {0x44BC, 0x00000000},
+ {0x44C0, 0x00000000},
+ {0x44C4, 0x00000000},
+ {0x44C8, 0x00000000},
+ {0x44CC, 0x00000000},
+ {0x44D0, 0x00000000},
+ {0x44D4, 0x00000000},
+ {0x44D8, 0x00000000},
+ {0x44DC, 0x00000000},
+ {0x44E0, 0x00000000},
+ {0x44E4, 0x00000000},
+ {0x44E8, 0x00000000},
+ {0x44EC, 0x00000000},
+ {0x44F0, 0x00000000},
+ {0x44F4, 0x00000000},
+ {0x44F8, 0x00000000},
+ {0x44FC, 0x00000000},
+ {0x4500, 0x00000000},
+ {0x4504, 0x00000000},
+ {0x4508, 0x00000000},
+ {0x450C, 0x00000000},
+ {0x4510, 0x00000000},
+ {0x4514, 0x00000000},
+ {0x4518, 0x00000000},
+ {0x451C, 0x00000000},
+ {0x4520, 0x00000000},
+ {0x4524, 0x00000000},
+ {0x4528, 0x00000000},
+ {0x452C, 0x00000000},
+ {0x4530, 0x4EA20631},
+ {0x4534, 0x000005C8},
+ {0x4538, 0x000000FF},
+ {0x453C, 0x00000000},
+ {0x4540, 0x00000000},
+ {0x4544, 0x00000000},
+ {0x4548, 0x00000000},
+ {0x454C, 0x00000000},
+ {0x4550, 0x00000000},
+ {0x4554, 0x00000000},
+ {0x4558, 0x00000000},
+ {0x455C, 0x00000000},
+ {0x4560, 0x4060001A},
+ {0x4564, 0x40000000},
+ {0x4568, 0x00000000},
+ {0x456C, 0x20000000},
+ {0x4570, 0x04800406},
+ {0x4574, 0x00022270},
+ {0x4578, 0x0002024B},
+ {0x457C, 0x00200000},
+ {0x4580, 0x00009B40},
+ {0x4584, 0x00000000},
+ {0x4588, 0x00000063},
+ {0x458C, 0x30000000},
+ {0x4590, 0x00000000},
+ {0x4594, 0x05000000},
+ {0x4598, 0x00000001},
+ {0x459C, 0x0003FE00},
+ {0x45A0, 0x00000000},
+ {0x45A4, 0x00000000},
+ {0x45A8, 0xC00001C0},
+ {0x45AC, 0x78028000},
+ {0x45B0, 0x80000048},
+ {0x45B4, 0x01C90800},
+ {0x45B8, 0x00000002},
+ {0x45BC, 0x06748790},
+ {0x45C0, 0x80000000},
+ {0x45C4, 0x00000000},
+ {0x45C8, 0x00000000},
+ {0x45CC, 0x00558670},
+ {0x45D0, 0x002883F0},
+ {0x45D4, 0x00090120},
+ {0x45D8, 0x00000000},
+ {0x45DC, 0xA3A6D3C4},
+ {0x49A8, 0xAB27B126},
+ {0x49AC, 0x00006778},
+ {0x49FC, 0x000001B5},
+ {0x49B0, 0x11110F0A},
+ {0x49B4, 0x00000007},
+ {0x49B8, 0x0000000A},
+ {0x49BC, 0x0058BC3F},
+ {0x49C0, 0x00000003},
+ {0x49C4, 0x000003D9},
+ {0x49C8, 0x002B1CB0},
+ {0x4A00, 0x00000000},
+ {0x49CC, 0x00000001},
+ {0x49D0, 0x00000010},
+ {0x49D4, 0x00000001},
+ {0x49D8, 0x85298FBF},
+ {0x49DC, 0x18A5296E},
+ {0x49E0, 0x18C6298C},
+ {0x49E4, 0x0A739CA7},
+ {0x49E8, 0x001A50E7},
+ {0x49EC, 0x00000001},
+ {0x49F0, 0x00005924},
+ {0x49F4, 0x0003AAA6},
+ {0x49F8, 0x0000C4C3},
+ {0x45E0, 0x00000000},
+ {0x45E4, 0x00000000},
+ {0x45E8, 0x00E2E100},
+ {0x45EC, 0xCB00B6B6},
+ {0x45F0, 0x59100FCA},
+ {0x45F4, 0x08882550},
+ {0x45F8, 0x08CC2660},
+ {0x45FC, 0x09102660},
+ {0x4600, 0x00000154},
+ {0x4604, 0x00000800},
+ {0x4608, 0x31BF0400},
+ {0x460C, 0x00E0C0A0},
+ {0x4610, 0x30604020},
+ {0x4614, 0x2F346D50},
+ {0x4618, 0x2E36B6E2},
+ {0x461C, 0x3E7EF86B},
+ {0x4620, 0x001FC004},
+ {0x4624, 0xA8068010},
+ {0x4628, 0x4602CA80},
+ {0x80ff0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x462C, 0x76067E8C},
+ {0x4630, 0x8EA350E8},
+ {0x4634, 0xB3B8D8F5},
+ {0x4638, 0x6FFF0C06},
+ {0x463C, 0xB8FA4435},
+ {0x4640, 0xB7C4FEF8},
+ {0x4644, 0x2A72AD07},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x462C, 0x76078E8C},
+ {0x4630, 0x8EDB50F6},
+ {0x4634, 0xB5B8DD75},
+ {0x4638, 0x6FFF4C06},
+ {0x463C, 0xB8FA4434},
+ {0x4640, 0xB7C4FEF8},
+ {0x4644, 0x2A72AD07},
+ {0x903500ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x462C, 0x76078E8C},
+ {0x4630, 0x8EDB50F6},
+ {0x4634, 0xB5B8DD75},
+ {0x4638, 0x6FFF4C06},
+ {0x463C, 0xB8FA4434},
+ {0x4640, 0xB7C4FEF8},
+ {0x4644, 0x2A72AD07},
+ {0x903200ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x462C, 0x76067E8C},
+ {0x4630, 0x8EA350E8},
+ {0x4634, 0xB3B8D8F5},
+ {0x4638, 0x6FFF0C06},
+ {0x463C, 0xB8FA4435},
+ {0x4640, 0xB7C4FEF8},
+ {0x4644, 0x2A72AD07},
+ {0x903400ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x462C, 0x76078E8C},
+ {0x4630, 0x8EDB50F6},
+ {0x4634, 0xB5B8DD75},
+ {0x4638, 0x6FFF4C06},
+ {0x463C, 0xB8FA4434},
+ {0x4640, 0xB7C4FEF8},
+ {0x4644, 0x2A72AD07},
+ {0x903600ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x462C, 0x76078E8C},
+ {0x4630, 0x8EDB50F6},
+ {0x4634, 0xB5B8DD75},
+ {0x4638, 0x6FFF4C06},
+ {0x463C, 0xB8FA4434},
+ {0x4640, 0xB7C4FEF8},
+ {0x4644, 0x2A72AD07},
+ {0xA0000000, 0x00000000},
+ {0x462C, 0x76067E8C},
+ {0x4630, 0x8EA350E8},
+ {0x4634, 0xB3B8D8F5},
+ {0x4638, 0x6FFF0C06},
+ {0x463C, 0xB8FA4435},
+ {0x4640, 0xB7C4FEF8},
+ {0x4644, 0x2A72AD07},
+ {0xB0000000, 0x00000000},
+ {0x4648, 0x64204FB2},
+ {0x464C, 0x4C823404},
+ {0x4650, 0x9084C800},
+ {0x4654, 0x9889314F},
+ {0x4658, 0x5ECC3FF4},
+ {0x465C, 0xFEECAECE},
+ {0x4660, 0x47806638},
+ {0x4664, 0x0F5AF843},
+ {0x80ff0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4668, 0x56452994},
+ {0x466C, 0x54D89ADB},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4668, 0x55452994},
+ {0x466C, 0x56D89ADB},
+ {0x903500ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4668, 0x55452994},
+ {0x466C, 0x56D89ADB},
+ {0x903200ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4668, 0x56452994},
+ {0x466C, 0x54D89ADB},
+ {0x903400ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4668, 0x55452994},
+ {0x466C, 0x56D89ADB},
+ {0x903600ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4668, 0x55452994},
+ {0x466C, 0x56D89ADB},
+ {0xA0000000, 0x00000000},
+ {0x4668, 0x56452994},
+ {0x466C, 0x54D89ADB},
+ {0xB0000000, 0x00000000},
+ {0x4670, 0xE8DF38D8},
+ {0x80ff0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4674, 0x002ACC30},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4674, 0x0028CC30},
+ {0x903500ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4674, 0x0028CC30},
+ {0x903200ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4674, 0x002ACC30},
+ {0x903400ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4674, 0x0028CC30},
+ {0x903600ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4674, 0x0028CC30},
+ {0xA0000000, 0x00000000},
+ {0x4674, 0x002ACC30},
+ {0xB0000000, 0x00000000},
+ {0x4678, 0x00000000},
+ {0x467C, 0x00000000},
+ {0x4680, 0x00000219},
+ {0x4684, 0x00000000},
+ {0x4688, 0x00000000},
+ {0x468C, 0x00000001},
+ {0x4690, 0x00000001},
+ {0x4694, 0x00000000},
+ {0x4698, 0x00000000},
+ {0x469C, 0x00000151},
+ {0x46A0, 0x00000498},
+ {0x46A4, 0x00000498},
+ {0x46A8, 0x00000000},
+ {0x46AC, 0x00000000},
+ {0x46B0, 0x00001146},
+ {0x46B4, 0x00000000},
+ {0x46B8, 0x00000000},
+ {0x46BC, 0x00E2E100},
+ {0x46C0, 0xCB00B6B6},
+ {0x46C4, 0x59100FCA},
+ {0x46C8, 0x08882550},
+ {0x46CC, 0x08CC2660},
+ {0x46D0, 0x09102660},
+ {0x46D4, 0x00000154},
+ {0x46D8, 0x00000800},
+ {0x46DC, 0x31BF0400},
+ {0x46E0, 0x00E0C0A0},
+ {0x46E4, 0x30604020},
+ {0x46E8, 0x4F346D50},
+ {0x46EC, 0x2E36B6E2},
+ {0x46F0, 0x3E7EF86B},
+ {0x46F4, 0x001FC004},
+ {0x46F8, 0xA8068010},
+ {0x46FC, 0x4602CA80},
+ {0x80ff0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4700, 0x7806FECC},
+ {0x4704, 0x8EC360F1},
+ {0x4708, 0xB4C4DA7A},
+ {0x470C, 0x72FF2CC6},
+ {0x4710, 0xB8FA4439},
+ {0x4714, 0xB7C4FEF8},
+ {0x4718, 0x2A72AD09},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4700, 0x78078ECC},
+ {0x4704, 0x8EDB60F6},
+ {0x4708, 0xB5C4DD7A},
+ {0x470C, 0x72FF4CC6},
+ {0x4710, 0xB8FA4434},
+ {0x4714, 0xB7C4FEF8},
+ {0x4718, 0x2A72AD09},
+ {0x903500ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4700, 0x78078ECC},
+ {0x4704, 0x8EDB60F6},
+ {0x4708, 0xB5C4DD7A},
+ {0x470C, 0x72FF4CC6},
+ {0x4710, 0xB8FA4434},
+ {0x4714, 0xB7C4FEF8},
+ {0x4718, 0x2A72AD09},
+ {0x903200ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4700, 0x7806FECC},
+ {0x4704, 0x8EC360F1},
+ {0x4708, 0xB4C4DA7A},
+ {0x470C, 0x72FF2CC6},
+ {0x4710, 0xB8FA4439},
+ {0x4714, 0xB7C4FEF8},
+ {0x4718, 0x2A72AD09},
+ {0x903400ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4700, 0x78078ECC},
+ {0x4704, 0x8EDB60F6},
+ {0x4708, 0xB5C4DD7A},
+ {0x470C, 0x72FF4CC6},
+ {0x4710, 0xB8FA4434},
+ {0x4714, 0xB7C4FEF8},
+ {0x4718, 0x2A72AD09},
+ {0x903600ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4700, 0x78078ECC},
+ {0x4704, 0x8EDB60F6},
+ {0x4708, 0xB5C4DD7A},
+ {0x470C, 0x72FF4CC6},
+ {0x4710, 0xB8FA4434},
+ {0x4714, 0xB7C4FEF8},
+ {0x4718, 0x2A72AD09},
+ {0xA0000000, 0x00000000},
+ {0x4700, 0x7806FECC},
+ {0x4704, 0x8EC360F1},
+ {0x4708, 0xB4C4DA7A},
+ {0x470C, 0x72FF2CC6},
+ {0x4710, 0xB8FA4439},
+ {0x4714, 0xB7C4FEF8},
+ {0x4718, 0x2A72AD09},
+ {0xB0000000, 0x00000000},
+ {0x471C, 0x64204FB2},
+ {0x4720, 0x4C823404},
+ {0x4724, 0x9084C800},
+ {0x4728, 0x9889314F},
+ {0x472C, 0x5ECC3FF4},
+ {0x4730, 0xFEECAECE},
+ {0x4734, 0x47806638},
+ {0x4738, 0x0F4A7843},
+ {0x80ff0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x473C, 0x56452994},
+ {0x4740, 0x54D89ADB},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x473C, 0x55452994},
+ {0x4740, 0x56D89ADB},
+ {0x903500ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x473C, 0x55452994},
+ {0x4740, 0x56D89ADB},
+ {0x903200ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x473C, 0x56452994},
+ {0x4740, 0x54D89ADB},
+ {0x903400ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x473C, 0x55452994},
+ {0x4740, 0x56D89ADB},
+ {0x903600ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x473C, 0x55452994},
+ {0x4740, 0x56D89ADB},
+ {0xA0000000, 0x00000000},
+ {0x473C, 0x56452994},
+ {0x4740, 0x54D89ADB},
+ {0xB0000000, 0x00000000},
+ {0x4744, 0xE8DF38D8},
+ {0x80ff0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4748, 0x002ACC30},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4748, 0x0028CC30},
+ {0x903500ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4748, 0x0028CC30},
+ {0x903200ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4748, 0x002ACC30},
+ {0x903400ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4748, 0x0028CC30},
+ {0x903600ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x4748, 0x0028CC30},
+ {0xA0000000, 0x00000000},
+ {0x4748, 0x002ACC30},
+ {0xB0000000, 0x00000000},
+ {0x474C, 0x00000000},
+ {0x4750, 0x00000000},
+ {0x4754, 0x00000219},
+ {0x4758, 0x00000000},
+ {0x475C, 0x00000000},
+ {0x4760, 0x00000001},
+ {0x4764, 0x00000001},
+ {0x4768, 0x00000000},
+ {0x476C, 0x00000000},
+ {0x4770, 0x00000151},
+ {0x4774, 0x00000498},
+ {0x4778, 0x00000498},
+ {0x477C, 0x00000000},
+ {0x4780, 0x00000000},
+ {0x4784, 0x00001147},
+ {0x4788, 0x00000000},
+ {0x478C, 0xA32103FE},
+ {0x4790, 0x320A7B28},
+ {0x4794, 0xC6A7B14F},
+ {0x4798, 0x000006D7},
+ {0x479C, 0x009B902A},
+ {0x47A0, 0x009B902A},
+ {0x47A4, 0x98682C18},
+ {0x47A8, 0x6308C4C1},
+ {0x47AC, 0x6248C631},
+ {0x47B0, 0x922A8253},
+ {0x47B4, 0x00000005},
+ {0x47B8, 0x00001759},
+ {0x47BC, 0x4B802000},
+ {0x47C0, 0x831408BE},
+ {0x47C4, 0x9ABBCACB},
+ {0x47C8, 0x56767578},
+ {0x47CC, 0xBBCCBBB3},
+ {0x47D0, 0x57889989},
+ {0x47D4, 0x00000F45},
+ {0x47D8, 0x27039CE9},
+ {0x47DC, 0x31413432},
+ {0x47E0, 0x26058342},
+ {0x47E4, 0x00000006},
+ {0x47E8, 0x00000005},
+ {0x47EC, 0x00000005},
+ {0x47F0, 0xC7013016},
+ {0x47F4, 0x84413016},
+ {0x47F8, 0x84413016},
+ {0x47FC, 0x8C413016},
+ {0x4800, 0x8C40B028},
+ {0x4804, 0x3140B028},
+ {0x4808, 0x2940B028},
+ {0x480C, 0x8440B028},
+ {0x4810, 0x2318C610},
+ {0x4814, 0x45334753},
+ {0x4818, 0x236A6A88},
+ {0x481C, 0x576DF814},
+ {0x4820, 0xA08877AC},
+ {0x4824, 0x0000087A},
+ {0x4828, 0xBCEB4A14},
+ {0x482C, 0x000A3A4A},
+ {0x4830, 0xBCEB4A14},
+ {0x4834, 0x000A3A4A},
+ {0x4838, 0xBCBDBD85},
+ {0x483C, 0x0CABB99A},
+ {0x4840, 0x38384242},
+ {0x4844, 0x8086102E},
+ {0x4848, 0xCA24C82A},
+ {0x484C, 0x00008A62},
+ {0x4850, 0x00000008},
+ {0x4854, 0x009B902A},
+ {0x4858, 0x009B902A},
+ {0x485C, 0x98682C18},
+ {0x4860, 0x6308C4C1},
+ {0x4864, 0x6248C631},
+ {0x4868, 0x922A8253},
+ {0x486C, 0x00000005},
+ {0x4870, 0x00001759},
+ {0x4874, 0x4B802000},
+ {0x4878, 0x831408BE},
+ {0x487C, 0x9898A8BB},
+ {0x4880, 0x54535368},
+ {0x4884, 0x999999B3},
+ {0x4888, 0x35555589},
+ {0x488C, 0x00000745},
+ {0x4890, 0x27039CE9},
+ {0x4894, 0x31413432},
+ {0x4898, 0x26058342},
+ {0x489C, 0x00000006},
+ {0x48A0, 0x00000005},
+ {0x48A4, 0x00000005},
+ {0x48A8, 0xC7013016},
+ {0x48AC, 0x84413016},
+ {0x48B0, 0x84413016},
+ {0x48B4, 0x8C413016},
+ {0x48B8, 0x8C40B028},
+ {0x48BC, 0x3140B028},
+ {0x48C0, 0x2940B028},
+ {0x48C4, 0x8440B028},
+ {0x48C8, 0x2318C610},
+ {0x48CC, 0x45334753},
+ {0x48D0, 0x236A6A88},
+ {0x48D4, 0x576DF814},
+ {0x48D8, 0xA08877AC},
+ {0x48DC, 0x0000007A},
+ {0x48E0, 0xBCEB4A14},
+ {0x48E4, 0x000A3A4A},
+ {0x48E8, 0xBCEB4A14},
+ {0x48EC, 0x000A3A4A},
+ {0x48F0, 0x9A8A8A85},
+ {0x48F4, 0x0CA3B99A},
+ {0x48F8, 0x38384242},
+ {0x48FC, 0x8086102E},
+ {0x4900, 0xCA24C82A},
+ {0x4904, 0x00008A62},
+ {0x4908, 0x00000008},
+ {0x490C, 0x80040000},
+ {0x4910, 0x80040000},
+ {0x4914, 0xFE800000},
+ {0x4918, 0x834C0000},
+ {0x491C, 0x00000000},
+ {0x4920, 0x00000000},
+ {0x4924, 0x00000000},
+ {0x4928, 0x00000000},
+ {0x492C, 0x00000000},
+ {0x4930, 0x00000000},
+ {0x4934, 0x40000000},
+ {0x4938, 0x00000000},
+ {0x493C, 0x00000000},
+ {0x4940, 0x00000000},
+ {0x4944, 0x00000000},
+ {0x4948, 0x04065800},
+ {0x494C, 0x32004080},
+ {0x4950, 0x0E1E3E05},
+ {0x4954, 0x0A163068},
+ {0x4958, 0x00206040},
+ {0x495C, 0x02020202},
+ {0x4960, 0x00A16020},
+ {0x4964, 0x031F4284},
+ {0x4968, 0x00A10285},
+ {0x496C, 0x00000005},
+ {0x4970, 0x00000000},
+ {0x4974, 0x800CD62D},
+ {0x4978, 0x00000103},
+ {0x497C, 0x00000000},
+ {0x4980, 0x00000000},
+ {0x4984, 0x00000000},
+ {0x4988, 0x00000000},
+ {0x498C, 0x00000000},
+ {0x4990, 0x00000000},
+ {0x4994, 0x00000000},
+ {0x4998, 0x00000000},
+ {0x499C, 0x00000000},
+ {0x49A0, 0x00000000},
+ {0x2404, 0x00000001},
+ {0xC7C, 0x0000BFE0},
+ {0x020, 0x0000F381},
+ {0x024, 0x0000F381},
+ {0x028, 0x0000F381},
+ {0x02C, 0x0000F381},
+ {0xD78, 0x00000005},
+ {0x12CC, 0x00000CC1},
+ {0x12D0, 0x00000000},
+ {0x12D4, 0x00000000},
+ {0x12D8, 0x00000040},
+ {0x12DC, 0x4486888C},
+ {0x12E0, 0xC43A10E1},
+ {0x12E4, 0x30D52C68},
+ {0x12E8, 0x02024128},
+ {0x12EC, 0x888C272B},
+ {0x12EC, 0x888CA72B},
+ {0x32CC, 0x00000CC1},
+ {0x32D0, 0x00000000},
+ {0x32D4, 0x00000000},
+ {0x32D8, 0x00000040},
+ {0x32DC, 0x4486888C},
+ {0x32E0, 0xC43A10E1},
+ {0x32E4, 0x30D52C68},
+ {0x32E8, 0x02024128},
+ {0x32EC, 0x888C272B},
+ {0x32EC, 0x888CA72B},
+ {0x12AC, 0x12333121},
+ {0x32AC, 0x12333121},
+ {0x738, 0x004100CC},
+ {0x80ff0001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x5820, 0x80001080},
+ {0x7820, 0x80001080},
+ {0x903300ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x5820, 0xC0001080},
+ {0x7820, 0xC0001080},
+ {0x903500ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x5820, 0xC0001080},
+ {0x7820, 0xC0001080},
+ {0x903200ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x5820, 0x80001080},
+ {0x7820, 0x80001080},
+ {0x903400ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x5820, 0x80001080},
+ {0x7820, 0x80001080},
+ {0x903600ff, 0x00000000}, {0x40000000, 0x00000000},
+ {0x5820, 0x80001080},
+ {0x7820, 0x80001080},
+ {0xA0000000, 0x00000000},
+ {0x5820, 0x80001080},
+ {0x7820, 0x80001080},
+ {0xB0000000, 0x00000000},
+ {0x2000, 0x18BBBF84},
+ {0x0F0, 0x00000002},
+ {0x0F4, 0x00000016},
+ {0x0F8, 0x20201013},
+};
+
+static const struct rtw89_reg2_def rtw89_8852a_phy_radioa_regs[] = {
+ {0xF0010000, 0x00000000},
+ {0xF0010001, 0x00000001},
+ {0xF0020001, 0x00000002},
+ {0xF0030001, 0x00000003},
+ {0xF0250001, 0x00000004},
+ {0xF0260001, 0x00000005},
+ {0xF0320001, 0x00000006},
+ {0xF0330001, 0x00000007},
+ {0xF0340001, 0x00000008},
+ {0xF0350001, 0x00000009},
+ {0xF0360001, 0x0000000A},
+ {0xF0010002, 0x0000000B},
+ {0xF0020002, 0x0000000C},
+ {0xF0030002, 0x0000000D},
+ {0xF0250002, 0x0000000E},
+ {0xF0260002, 0x0000000F},
+ {0xF0320002, 0x00000010},
+ {0xF0330002, 0x00000011},
+ {0xF0340002, 0x00000012},
+ {0xF0350002, 0x00000013},
+ {0xF0360002, 0x00000014},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x005, 0x00000001},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x005, 0x00000000},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x005, 0x00000000},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x005, 0x00000000},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x005, 0x00000000},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x005, 0x00000000},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x005, 0x00000000},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x005, 0x00000000},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x005, 0x00000000},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x005, 0x00000000},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x005, 0x00000000},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x005, 0x00000000},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x005, 0x00000000},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x005, 0x00000000},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x005, 0x00000000},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x005, 0x00000000},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x005, 0x00000000},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x005, 0x00000000},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x005, 0x00000000},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x005, 0x00000000},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x005, 0x00000000},
+ {0xA0000000, 0x00000000},
+ {0x005, 0x00000001},
+ {0xB0000000, 0x00000000},
+ {0x000, 0x00030000},
+ {0x018, 0x00011124},
+ {0x000, 0x00033C00},
+ {0x01A, 0x00040004},
+ {0x0FE, 0x00000000},
+ {0x055, 0x00080000},
+ {0x056, 0x0008FFF0},
+ {0x057, 0x0000C485},
+ {0x058, 0x000A4164},
+ {0x059, 0x00010000},
+ {0x05A, 0x00060000},
+ {0x05B, 0x0000A000},
+ {0x05C, 0x00000000},
+ {0x05D, 0x0001C013},
+ {0x05E, 0x00000000},
+ {0x05F, 0x00001FF0},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x060, 0x00011000},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x060, 0x00011008},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x060, 0x00011008},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x060, 0x00011008},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x060, 0x00011008},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x060, 0x00011008},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x060, 0x00011008},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x060, 0x00011008},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x060, 0x00011008},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x060, 0x00011008},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x060, 0x00011008},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x060, 0x00011008},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x060, 0x00011008},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x060, 0x00011008},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x060, 0x00011008},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x060, 0x00011008},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x060, 0x00011008},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x060, 0x00011008},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x060, 0x00011008},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x060, 0x00011008},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x060, 0x00011008},
+ {0xA0000000, 0x00000000},
+ {0x060, 0x00011000},
+ {0xB0000000, 0x00000000},
+ {0x061, 0x0009F338},
+ {0x062, 0x0009233A},
+ {0x063, 0x000D6002},
+ {0x064, 0x000A0CB0},
+ {0x065, 0x00030EFE},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x066, 0x00010000},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x066, 0x00010000},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x066, 0x00010000},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x066, 0x00010000},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x066, 0x00020000},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x066, 0x00020000},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x066, 0x00010000},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x066, 0x00020000},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x066, 0x00020000},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x066, 0x00020000},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x066, 0x00020000},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x066, 0x00010000},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x066, 0x00010000},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x066, 0x00010000},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x066, 0x00020000},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x066, 0x00020000},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x066, 0x00010000},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x066, 0x00020000},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x066, 0x00020000},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x066, 0x00020000},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x066, 0x00020000},
+ {0xA0000000, 0x00000000},
+ {0x066, 0x00020000},
+ {0xB0000000, 0x00000000},
+ {0x068, 0x00000000},
+ {0x069, 0x00030F0A},
+ {0x06A, 0x00000000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x051, 0x000AD6A4},
+ {0x052, 0x00091345},
+ {0x053, 0x00080081},
+ {0x054, 0x0009BC24},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x051, 0x000BD267},
+ {0x052, 0x00091345},
+ {0x053, 0x000B0081},
+ {0x054, 0x0007BCA4},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x051, 0x000BD267},
+ {0x052, 0x00091345},
+ {0x053, 0x000B0081},
+ {0x054, 0x0007BCA4},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x051, 0x000BD267},
+ {0x052, 0x00091345},
+ {0x053, 0x000B0081},
+ {0x054, 0x0007BCA4},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x051, 0x000BD267},
+ {0x052, 0x00091345},
+ {0x053, 0x000B0081},
+ {0x054, 0x0007BCA4},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x051, 0x000BD267},
+ {0x052, 0x00091345},
+ {0x053, 0x000B0081},
+ {0x054, 0x0007BCA4},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x051, 0x000BD267},
+ {0x052, 0x00091345},
+ {0x053, 0x000B0081},
+ {0x054, 0x0007BCA4},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x051, 0x000BD267},
+ {0x052, 0x00091345},
+ {0x053, 0x000B0081},
+ {0x054, 0x0007BCA4},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x051, 0x000BD267},
+ {0x052, 0x00091345},
+ {0x053, 0x000B0081},
+ {0x054, 0x0007BCA4},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x051, 0x000BD267},
+ {0x052, 0x00091345},
+ {0x053, 0x000B0081},
+ {0x054, 0x0007BCA4},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x051, 0x000BD267},
+ {0x052, 0x00091345},
+ {0x053, 0x000B0081},
+ {0x054, 0x0007BCA4},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x051, 0x000BD267},
+ {0x052, 0x00091345},
+ {0x053, 0x000B0081},
+ {0x054, 0x0007BCA4},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x051, 0x000BD267},
+ {0x052, 0x00091345},
+ {0x053, 0x000B0081},
+ {0x054, 0x0007BCA4},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x051, 0x000BD267},
+ {0x052, 0x00091345},
+ {0x053, 0x000B0081},
+ {0x054, 0x0007BCA4},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x051, 0x000BD267},
+ {0x052, 0x00091345},
+ {0x053, 0x000B0081},
+ {0x054, 0x0007BCA4},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x051, 0x000BD267},
+ {0x052, 0x00091345},
+ {0x053, 0x000B0081},
+ {0x054, 0x0007BCA4},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x051, 0x000BD267},
+ {0x052, 0x00091345},
+ {0x053, 0x000B0081},
+ {0x054, 0x0007BCA4},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x051, 0x000BD267},
+ {0x052, 0x00091345},
+ {0x053, 0x000B0081},
+ {0x054, 0x0007BCA4},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x051, 0x000BD267},
+ {0x052, 0x00091345},
+ {0x053, 0x000B0081},
+ {0x054, 0x0007BCA4},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x051, 0x000BD267},
+ {0x052, 0x00091345},
+ {0x053, 0x000B0081},
+ {0x054, 0x0007BCA4},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x051, 0x000BD267},
+ {0x052, 0x00091345},
+ {0x053, 0x000B0081},
+ {0x054, 0x0007BCA4},
+ {0xA0000000, 0x00000000},
+ {0x051, 0x000AD6A4},
+ {0x052, 0x00091345},
+ {0x053, 0x00080081},
+ {0x054, 0x0009BC24},
+ {0xB0000000, 0x00000000},
+ {0x0D3, 0x00000143},
+ {0x043, 0x00005000},
+ {0x0DD, 0x000003A0},
+ {0x0B0, 0x000E6700},
+ {0x0AF, 0x0001F82E},
+ {0x0B2, 0x000210A7},
+ {0x0B1, 0x00065FFF},
+ {0x0BB, 0x000F7A00},
+ {0x0B3, 0x00013F7A},
+ {0x0D4, 0x0000000E},
+ {0x0B7, 0x00001E0C},
+ {0x0A0, 0x0000004F},
+ {0x0B4, 0x0007C03E},
+ {0x0B5, 0x0007E301},
+ {0x0B6, 0x00080800},
+ {0x0CA, 0x00002000},
+ {0x0DD, 0x000003A0},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0CC, 0x00080000},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0CC, 0x000E0000},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0CC, 0x000E0000},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0CC, 0x000E0000},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0CC, 0x000E0000},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0CC, 0x000E0000},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0CC, 0x000E0000},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0CC, 0x000E0000},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0CC, 0x000E0000},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0CC, 0x000E0000},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0CC, 0x000E0000},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0CC, 0x000E0000},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0CC, 0x000E0000},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0CC, 0x000E0000},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0CC, 0x000E0000},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0CC, 0x000E0000},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0CC, 0x000E0000},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0CC, 0x000E0000},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0CC, 0x000E0000},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0CC, 0x000E0000},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0CC, 0x000E0000},
+ {0xA0000000, 0x00000000},
+ {0x0CC, 0x00080000},
+ {0xB0000000, 0x00000000},
+ {0x0A1, 0x0006F300},
+ {0x0A2, 0x00080500},
+ {0x0A3, 0x0008050B},
+ {0x0A4, 0x0006DB12},
+ {0x0A5, 0x00000000},
+ {0x0A6, 0x00000000},
+ {0x0A7, 0x00000000},
+ {0x0A8, 0x00000000},
+ {0x0A9, 0x00000000},
+ {0x0AA, 0x00000000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0A5, 0x000B0000},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0A5, 0x00000000},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0A5, 0x00000000},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0A5, 0x00000000},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0A5, 0x00000000},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0A5, 0x00000000},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0A5, 0x00000000},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0A5, 0x00000000},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0A5, 0x00000000},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0A5, 0x00000000},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0A5, 0x00000000},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0A5, 0x00000000},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0A5, 0x00000000},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0A5, 0x00000000},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0A5, 0x00000000},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0A5, 0x00000000},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0A5, 0x00000000},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0A5, 0x00000000},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0A5, 0x00000000},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0A5, 0x00000000},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0A5, 0x00000000},
+ {0xA0000000, 0x00000000},
+ {0x0A5, 0x000B0000},
+ {0xB0000000, 0x00000000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0ED, 0x00008000},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0ED, 0x00000000},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0ED, 0x00000000},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0ED, 0x00000000},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0ED, 0x00000000},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0ED, 0x00000000},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0ED, 0x00000000},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0ED, 0x00000000},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0ED, 0x00000000},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0ED, 0x00000000},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0ED, 0x00000000},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0ED, 0x00000000},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0ED, 0x00000000},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0ED, 0x00000000},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0ED, 0x00000000},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0ED, 0x00000000},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0ED, 0x00000000},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0ED, 0x00000000},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0ED, 0x00000000},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0ED, 0x00000000},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0ED, 0x00000000},
+ {0xA0000000, 0x00000000},
+ {0x0ED, 0x00008000},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000000},
+ {0x03E, 0x00008000},
+ {0x03F, 0x000E1333},
+ {0x033, 0x00000001},
+ {0x03E, 0x00008000},
+ {0x03F, 0x000E7333},
+ {0x033, 0x00000002},
+ {0x03E, 0x00008000},
+ {0x03F, 0x000FA000},
+ {0x033, 0x00000003},
+ {0x03E, 0x00004000},
+ {0x03F, 0x000FA400},
+ {0x033, 0x00000004},
+ {0x03E, 0x00004000},
+ {0x03F, 0x000F5000},
+ {0x033, 0x00000005},
+ {0x03E, 0x00004001},
+ {0x03F, 0x00029400},
+ {0x033, 0x00000006},
+ {0x03E, 0x0000AAA1},
+ {0x03F, 0x00041999},
+ {0x033, 0x00000007},
+ {0x03E, 0x0000AAA1},
+ {0x03F, 0x00034444},
+ {0x033, 0x00000008},
+ {0x03E, 0x0000AAA1},
+ {0x03F, 0x0004D555},
+ {0x033, 0x00000009},
+ {0x03E, 0x00005551},
+ {0x03F, 0x00046AAA},
+ {0x033, 0x0000000A},
+ {0x03E, 0x00005551},
+ {0x03F, 0x00046AAA},
+ {0x033, 0x0000000B},
+ {0x03E, 0x00005551},
+ {0x03F, 0x0008C555},
+ {0x033, 0x0000000C},
+ {0x03E, 0x0000CCC1},
+ {0x03F, 0x00081EB8},
+ {0x033, 0x0000000D},
+ {0x03E, 0x0000CCC1},
+ {0x03F, 0x00071EB8},
+ {0x033, 0x0000000E},
+ {0x03E, 0x0000CCC1},
+ {0x03F, 0x00090000},
+ {0x033, 0x0000000F},
+ {0x03E, 0x00006661},
+ {0x03F, 0x00088000},
+ {0x033, 0x00000010},
+ {0x03E, 0x00006661},
+ {0x03F, 0x00088000},
+ {0x033, 0x00000011},
+ {0x03E, 0x00006661},
+ {0x03F, 0x000DB999},
+ {0x0ED, 0x00000000},
+ {0x0ED, 0x00002000},
+ {0x033, 0x00000002},
+ {0x03D, 0x0004A883},
+ {0x03E, 0x00000000},
+ {0x03F, 0x00000001},
+ {0x033, 0x00000006},
+ {0x03D, 0x0004A883},
+ {0x03E, 0x00000000},
+ {0x03F, 0x00000001},
+ {0x0ED, 0x00000000},
+ {0x018, 0x00001001},
+ {0x002, 0x0000000D},
+ {0x0EE, 0x00000004},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x033, 0x0000000B},
+ {0x03F, 0x0000000B},
+ {0x033, 0x0000000C},
+ {0x03F, 0x00000012},
+ {0x033, 0x0000000D},
+ {0x03F, 0x00000019},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x033, 0x0000000B},
+ {0x03F, 0x0000000B},
+ {0x033, 0x0000000C},
+ {0x03F, 0x00000012},
+ {0x033, 0x0000000D},
+ {0x03F, 0x00000019},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x033, 0x0000000B},
+ {0x03F, 0x0000000B},
+ {0x033, 0x0000000C},
+ {0x03F, 0x00000012},
+ {0x033, 0x0000000D},
+ {0x03F, 0x00000019},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x033, 0x0000000B},
+ {0x03F, 0x0000000B},
+ {0x033, 0x0000000C},
+ {0x03F, 0x00000012},
+ {0x033, 0x0000000D},
+ {0x03F, 0x00000019},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x033, 0x0000000B},
+ {0x03F, 0x0000000A},
+ {0x033, 0x0000000C},
+ {0x03F, 0x00000011},
+ {0x033, 0x0000000D},
+ {0x03F, 0x00000018},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x033, 0x0000000B},
+ {0x03F, 0x0000000A},
+ {0x033, 0x0000000C},
+ {0x03F, 0x00000011},
+ {0x033, 0x0000000D},
+ {0x03F, 0x00000018},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x033, 0x0000000B},
+ {0x03F, 0x0000000B},
+ {0x033, 0x0000000C},
+ {0x03F, 0x00000012},
+ {0x033, 0x0000000D},
+ {0x03F, 0x00000019},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x033, 0x0000000B},
+ {0x03F, 0x0000000A},
+ {0x033, 0x0000000C},
+ {0x03F, 0x00000011},
+ {0x033, 0x0000000D},
+ {0x03F, 0x00000018},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x033, 0x0000000B},
+ {0x03F, 0x0000000A},
+ {0x033, 0x0000000C},
+ {0x03F, 0x00000011},
+ {0x033, 0x0000000D},
+ {0x03F, 0x00000018},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x033, 0x0000000B},
+ {0x03F, 0x0000000A},
+ {0x033, 0x0000000C},
+ {0x03F, 0x00000011},
+ {0x033, 0x0000000D},
+ {0x03F, 0x00000018},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x033, 0x0000000B},
+ {0x03F, 0x0000000A},
+ {0x033, 0x0000000C},
+ {0x03F, 0x00000011},
+ {0x033, 0x0000000D},
+ {0x03F, 0x00000018},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x033, 0x0000000B},
+ {0x03F, 0x0000000B},
+ {0x033, 0x0000000C},
+ {0x03F, 0x00000012},
+ {0x033, 0x0000000D},
+ {0x03F, 0x00000019},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x033, 0x0000000B},
+ {0x03F, 0x0000000B},
+ {0x033, 0x0000000C},
+ {0x03F, 0x00000012},
+ {0x033, 0x0000000D},
+ {0x03F, 0x00000019},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x033, 0x0000000B},
+ {0x03F, 0x0000000B},
+ {0x033, 0x0000000C},
+ {0x03F, 0x00000012},
+ {0x033, 0x0000000D},
+ {0x03F, 0x00000019},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x033, 0x0000000B},
+ {0x03F, 0x0000000A},
+ {0x033, 0x0000000C},
+ {0x03F, 0x00000011},
+ {0x033, 0x0000000D},
+ {0x03F, 0x00000018},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x033, 0x0000000B},
+ {0x03F, 0x0000000A},
+ {0x033, 0x0000000C},
+ {0x03F, 0x00000011},
+ {0x033, 0x0000000D},
+ {0x03F, 0x00000018},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x033, 0x0000000B},
+ {0x03F, 0x0000000B},
+ {0x033, 0x0000000C},
+ {0x03F, 0x00000012},
+ {0x033, 0x0000000D},
+ {0x03F, 0x00000019},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x033, 0x0000000B},
+ {0x03F, 0x0000000A},
+ {0x033, 0x0000000C},
+ {0x03F, 0x00000011},
+ {0x033, 0x0000000D},
+ {0x03F, 0x00000018},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x033, 0x0000000B},
+ {0x03F, 0x0000000A},
+ {0x033, 0x0000000C},
+ {0x03F, 0x00000011},
+ {0x033, 0x0000000D},
+ {0x03F, 0x00000018},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x033, 0x0000000B},
+ {0x03F, 0x0000000A},
+ {0x033, 0x0000000C},
+ {0x03F, 0x00000011},
+ {0x033, 0x0000000D},
+ {0x03F, 0x00000018},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x033, 0x0000000B},
+ {0x03F, 0x0000000A},
+ {0x033, 0x0000000C},
+ {0x03F, 0x00000011},
+ {0x033, 0x0000000D},
+ {0x03F, 0x00000018},
+ {0xA0000000, 0x00000000},
+ {0x033, 0x0000000B},
+ {0x03F, 0x0000000B},
+ {0x033, 0x0000000C},
+ {0x03F, 0x00000012},
+ {0x033, 0x0000000D},
+ {0x03F, 0x00000019},
+ {0xB0000000, 0x00000000},
+ {0x0EE, 0x00000000},
+ {0x08F, 0x000D0F7A},
+ {0x08C, 0x00084584},
+ {0x0EF, 0x00004000},
+ {0x033, 0x00000007},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000700},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000700},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000700},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000700},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000700},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000700},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000700},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004700},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004700},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004700},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004700},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000700},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000700},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000700},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000700},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000700},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000700},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004700},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004700},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004700},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004700},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000700},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000006},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000700},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000700},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000700},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000700},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000700},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000700},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000700},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004700},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004700},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004700},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004700},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000700},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000700},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000700},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000700},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000700},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000700},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004700},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004700},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004700},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004700},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000700},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000005},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000500},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000B0600},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000B0600},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000B0600},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000B0600},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000B0600},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000B0600},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00094600},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00094600},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00094600},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00094600},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000B0600},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000B0600},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000B0600},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000B0600},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000B0600},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000B0600},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00094600},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00094600},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00094600},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00094600},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000500},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000004},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000400},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000400},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000400},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000400},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000400},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000400},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000400},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000D4500},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000D4500},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000D4500},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000D4500},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000400},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000400},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000400},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000400},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000400},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000400},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000D4500},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000D4500},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000D4500},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000D4500},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000400},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000003},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008B00},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00038B00},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00038B00},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00038B00},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00038B00},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00038B00},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00038B00},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000D4400},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000D4400},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000D4400},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000D4400},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00038B00},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00038B00},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00038B00},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00038B00},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00038B00},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00038B00},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000D4400},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000D4400},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000D4400},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000D4400},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00008B00},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000002},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000B00},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000B00},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000B00},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000B00},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000B00},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000B00},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000B00},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00014B00},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00014B00},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00014B00},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00014B00},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000B00},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000B00},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000B00},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000B00},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000B00},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000B00},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00014B00},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00014B00},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00014B00},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00014B00},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000B00},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000001},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001A00},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001A00},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001A00},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001A00},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001A00},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001A00},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001A00},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004A00},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004A00},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004A00},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004A00},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001A00},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001A00},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001A00},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001A00},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001A00},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001A00},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004A00},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004A00},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004A00},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004A00},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00001A00},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00002900},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00002900},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00002900},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00002900},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00002900},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00002900},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00002900},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004900},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004900},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004900},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004900},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00002900},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00002900},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00002900},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00002900},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00002900},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00002900},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004900},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004900},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004900},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004900},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00002900},
+ {0xB0000000, 0x00000000},
+ {0x0EF, 0x00000000},
+ {0x0EF, 0x00001000},
+ {0x033, 0x00000000},
+ {0x03F, 0x00000015},
+ {0x033, 0x00000001},
+ {0x03F, 0x00000017},
+ {0x0EF, 0x00000000},
+ {0x0EF, 0x00008000},
+ {0x033, 0x00000000},
+ {0x03F, 0x000FECFC},
+ {0x033, 0x00000001},
+ {0x03F, 0x000BECFC},
+ {0x033, 0x00000002},
+ {0x03F, 0x0003E4FC},
+ {0x033, 0x00000003},
+ {0x03F, 0x0001D0FC},
+ {0x033, 0x00000004},
+ {0x03F, 0x0001C3FC},
+ {0x033, 0x00000005},
+ {0x03F, 0x000103FC},
+ {0x033, 0x00000006},
+ {0x03F, 0x0000007C},
+ {0x033, 0x00000007},
+ {0x03F, 0x0000007C},
+ {0x033, 0x00000008},
+ {0x03F, 0x000FECFC},
+ {0x033, 0x00000009},
+ {0x03F, 0x000BECFC},
+ {0x033, 0x0000000A},
+ {0x03F, 0x0003E4FC},
+ {0x033, 0x0000000B},
+ {0x03F, 0x0001D0FC},
+ {0x033, 0x0000000C},
+ {0x03F, 0x0001C3FC},
+ {0x033, 0x0000000D},
+ {0x03F, 0x000103FC},
+ {0x033, 0x0000000E},
+ {0x03F, 0x0000007C},
+ {0x033, 0x0000000F},
+ {0x03F, 0x0000007C},
+ {0x033, 0x00000010},
+ {0x03F, 0x000FECFC},
+ {0x033, 0x00000011},
+ {0x03F, 0x000BECFC},
+ {0x033, 0x00000012},
+ {0x03F, 0x0003E4FC},
+ {0x033, 0x00000013},
+ {0x03F, 0x0001D0FC},
+ {0x033, 0x00000014},
+ {0x03F, 0x0001C3FC},
+ {0x033, 0x00000015},
+ {0x03F, 0x000103FC},
+ {0x033, 0x00000016},
+ {0x03F, 0x0000007C},
+ {0x033, 0x00000017},
+ {0x03F, 0x0000007C},
+ {0x0EF, 0x00000000},
+ {0x0EF, 0x00000100},
+ {0x033, 0x00000000},
+ {0x03F, 0x00003317},
+ {0x033, 0x00000001},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000002},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000003},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000004},
+ {0x03F, 0x00003317},
+ {0x033, 0x00000005},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000006},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000007},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000008},
+ {0x03F, 0x00003317},
+ {0x033, 0x00000009},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000010},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000011},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000012},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000013},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000014},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000015},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000016},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000017},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000018},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000019},
+ {0x03F, 0x00003338},
+ {0x033, 0x0000001A},
+ {0x03F, 0x00003338},
+ {0x033, 0x0000001B},
+ {0x03F, 0x00003338},
+ {0x033, 0x0000001C},
+ {0x03F, 0x00003338},
+ {0x033, 0x0000001D},
+ {0x03F, 0x00003338},
+ {0x033, 0x0000001E},
+ {0x03F, 0x00003338},
+ {0x033, 0x0000001F},
+ {0x03F, 0x00003338},
+ {0x033, 0x00000020},
+ {0x03F, 0x00003338},
+ {0x033, 0x00000021},
+ {0x03F, 0x00003338},
+ {0x033, 0x00000022},
+ {0x03F, 0x00003338},
+ {0x033, 0x00000023},
+ {0x03F, 0x00003338},
+ {0x033, 0x00000024},
+ {0x03F, 0x00003338},
+ {0x033, 0x00000025},
+ {0x03F, 0x00003338},
+ {0x033, 0x00000026},
+ {0x03F, 0x00003338},
+ {0x033, 0x00000027},
+ {0x03F, 0x00003338},
+ {0x033, 0x00000028},
+ {0x03F, 0x00003338},
+ {0x033, 0x00000029},
+ {0x03F, 0x00003338},
+ {0x033, 0x0000002A},
+ {0x03F, 0x00003338},
+ {0x033, 0x0000002B},
+ {0x03F, 0x00003338},
+ {0x033, 0x0000002C},
+ {0x03F, 0x00003338},
+ {0x033, 0x0000002D},
+ {0x03F, 0x00003338},
+ {0x033, 0x0000002E},
+ {0x03F, 0x00003338},
+ {0x033, 0x0000002F},
+ {0x03F, 0x00003338},
+ {0x033, 0x00000030},
+ {0x03F, 0x00003338},
+ {0x0EF, 0x00000000},
+ {0x0EF, 0x00000040},
+ {0x033, 0x00000001},
+ {0x03F, 0x000004BA},
+ {0x033, 0x00000002},
+ {0x03F, 0x000004BA},
+ {0x033, 0x00000003},
+ {0x03F, 0x000004BA},
+ {0x033, 0x00000004},
+ {0x03F, 0x000004BA},
+ {0x033, 0x00000005},
+ {0x03F, 0x000004BA},
+ {0x033, 0x00000006},
+ {0x03F, 0x000004BA},
+ {0x033, 0x00000007},
+ {0x03F, 0x000004BA},
+ {0x033, 0x00000008},
+ {0x03F, 0x000004BA},
+ {0x033, 0x00000009},
+ {0x03F, 0x000004BA},
+ {0x033, 0x0000000A},
+ {0x03F, 0x000004BA},
+ {0x033, 0x0000000B},
+ {0x03F, 0x000004BA},
+ {0x0EF, 0x00000000},
+ {0x0EF, 0x00000010},
+ {0x033, 0x00000001},
+ {0x03F, 0x00000CB0},
+ {0x033, 0x00000002},
+ {0x03F, 0x00000CB0},
+ {0x033, 0x00000003},
+ {0x03F, 0x00000870},
+ {0x033, 0x00000004},
+ {0x03F, 0x00000870},
+ {0x033, 0x00000005},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000430},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000730},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000730},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000430},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000430},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000430},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000430},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000430},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000430},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000430},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000430},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000730},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000730},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000430},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000430},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000430},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000430},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000430},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000430},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000430},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000430},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000430},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000006},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000430},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000730},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000730},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000430},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000430},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000430},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000430},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000430},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000430},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000430},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000430},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000730},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000730},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000430},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000430},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000430},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000430},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000430},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000430},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000430},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000430},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000430},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000007},
+ {0x03F, 0x00000CB0},
+ {0x033, 0x00000008},
+ {0x03F, 0x00000CB0},
+ {0x033, 0x00000009},
+ {0x03F, 0x00000870},
+ {0x033, 0x0000000A},
+ {0x03F, 0x00000870},
+ {0x033, 0x0000000B},
+ {0x03F, 0x00000430},
+ {0x033, 0x0000000C},
+ {0x03F, 0x00000430},
+ {0x033, 0x0000000D},
+ {0x03F, 0x00000000},
+ {0x033, 0x0000000E},
+ {0x03F, 0x00000000},
+ {0x0EF, 0x00000000},
+ {0x0EF, 0x00000080},
+ {0x033, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023458},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023858},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023858},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023858},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023858},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023858},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023858},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023858},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023858},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00023458},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000001},
+ {0x03E, 0x0000000B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023458},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023858},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023858},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023858},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023858},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023858},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023858},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023858},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023858},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00023458},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000002},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00023458},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002F358},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00023858},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00023858},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00023858},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00023858},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002F358},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00023858},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00023858},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00023858},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00023858},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00023458},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000003},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00023458},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025758},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025758},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025758},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00023858},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00023858},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00023858},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00023858},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025758},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025758},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025758},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025758},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025758},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00023858},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00023858},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00023858},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00023858},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00023458},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000004},
+ {0x03E, 0x0000000B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023458},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023858},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023858},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023858},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023858},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023858},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023858},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023858},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023858},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00023458},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000005},
+ {0x03E, 0x0000000B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023458},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023858},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023858},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023858},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023858},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023858},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023858},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023858},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023858},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00023458},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000006},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00023458},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00023858},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00023858},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00023858},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00023858},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00023858},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00023858},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00023858},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00023858},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00023458},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000007},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00023458},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025758},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025758},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025758},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00023858},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00023858},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00023858},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00023858},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025758},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025758},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025758},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025758},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025758},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00023858},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00023858},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00023858},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00023858},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00023458},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000008},
+ {0x03E, 0x0000000B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023458},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023858},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023858},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023858},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023858},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023858},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023858},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023858},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023858},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00023458},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000009},
+ {0x03E, 0x0000000B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002E358},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023858},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023858},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023858},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023858},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023858},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023858},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023858},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023858},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0002E358},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E358},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00023858},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00023858},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00023858},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00023858},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00023858},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00023858},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00023858},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00023858},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E358},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E358},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025758},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025758},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025758},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00023858},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00023858},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00023858},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00023858},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025758},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025758},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025758},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025758},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025758},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00023858},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00023858},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00023858},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00023858},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E358},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000C},
+ {0x03E, 0x0000000B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002E358},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002E758},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002E758},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002E758},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002E758},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002E758},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002E758},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002E758},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002E758},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0002E358},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000D},
+ {0x03E, 0x0000000B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002E358},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002E758},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002E758},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002E758},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002E758},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002E758},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002E758},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002E758},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002E758},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0002E358},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E358},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E358},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E358},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025758},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025758},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025758},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025758},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025758},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025758},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025758},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025758},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E358},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000010},
+ {0x03E, 0x0000000B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002E358},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002E758},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002E758},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002E758},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002E758},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002E758},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002E758},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002E758},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002E758},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0002E358},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000011},
+ {0x03E, 0x0000000B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002E358},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002E758},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002E758},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002E758},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002E758},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002E758},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002E758},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002E758},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002E758},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0002E358},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000012},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E358},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E358},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000013},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E358},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025758},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025758},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025758},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025758},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025758},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025758},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025758},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025758},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E358},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000014},
+ {0x03E, 0x0000000B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002E358},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002E758},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002E758},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002E758},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002E758},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002E758},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002E758},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002E758},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002E758},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0002E358},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000015},
+ {0x03E, 0x0000000B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002E358},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002E758},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002E758},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002E758},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002E758},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00023758},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002E758},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002E758},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002E758},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0002E758},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0002E358},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000016},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E358},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E358},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000017},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E358},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025758},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025758},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025758},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025758},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025758},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025758},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025758},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025758},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E358},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000018},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F258},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00026658},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00026658},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00026658},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00026658},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00026658},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00026658},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00026658},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00026658},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00026658},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00026658},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00026658},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00026658},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F258},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000019},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F258},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00026658},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00026658},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00026658},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00026658},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00026658},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00026658},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00026658},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00026658},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00026658},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00026658},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00026658},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00026658},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F258},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000001A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F258},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C758},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C758},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C758},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C758},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C758},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C758},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C758},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C758},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C758},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C758},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F258},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000001B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F258},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C758},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C758},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C758},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C758},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C758},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C758},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C758},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C758},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C758},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C758},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F258},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000001C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F258},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00026658},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00026658},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00026658},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00026658},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00026658},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00026658},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00026658},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00026658},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00026658},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00026658},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00026658},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00026658},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F258},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000001D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F258},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00026658},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00026658},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00026658},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00026658},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00026658},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00026658},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00026658},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00026658},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00026658},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00026658},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00026658},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x00026658},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F258},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000001E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F258},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F258},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000001F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F258},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002E758},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F258},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000020},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F258},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F258},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000021},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F258},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F258},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000022},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002E258},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002E258},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000023},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F258},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00027758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00027758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00027758},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00027758},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00027758},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00027758},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00027758},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00027758},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00027758},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00027758},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F258},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000024},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F258},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F258},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000025},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F258},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F258},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000026},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F258},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F258},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000027},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F258},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00027758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00027758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00027758},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00027758},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00027758},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00027758},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00027758},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00027758},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00027758},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00027758},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F258},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000028},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F258},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F258},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000029},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F258},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F258},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F258},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002F458},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002F458},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F258},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F258},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00027758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00027758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00027758},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00027758},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00027758},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00027758},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00027758},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00027758},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00027758},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00027758},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F258},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F258},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F258},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002E658},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002E658},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002E658},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002E658},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002E658},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002E658},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002E658},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002E658},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002E658},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002E658},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00027758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00027758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00027758},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00027758},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00027758},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00027758},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00027758},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00027758},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00027758},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00027758},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000030},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000031},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000032},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000033},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00027758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00027758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00027758},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00027758},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00027758},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00027758},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00027758},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00027758},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00027758},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00027758},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000034},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000035},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000036},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000037},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00027758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00027758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00027758},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00027758},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00027758},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00027758},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00027758},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00027758},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00027758},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00027758},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000038},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000039},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000003A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000003B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00027758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00027758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00027758},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00027758},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00027758},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00027758},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00027758},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00027758},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00027758},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00027758},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C758},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000E},
+ {0x03F, 0x0002F658},
+ {0xB0000000, 0x00000000},
+ {0x0EF, 0x00000000},
+ {0x0EE, 0x00002000},
+ {0x033, 0x00000000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A3},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A3},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A3},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A3},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A3},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A3},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A3},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A3},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000001},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A0},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A0},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A0},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A0},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A0},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A0},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A0},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A0},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000002},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000067},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000067},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000067},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000067},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000067},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000067},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000067},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000067},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000003},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000064},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000064},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000064},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000064},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000064},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000064},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000064},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000064},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000004},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000061},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000061},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000061},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000061},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000061},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000061},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000061},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000061},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000005},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005E},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005E},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005E},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005E},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005E},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005E},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005E},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005E},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000006},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AA},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000166},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000166},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000166},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000166},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000166},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000166},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005B},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005B},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005B},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005B},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000166},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000166},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000166},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000166},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000166},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000166},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005B},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005B},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005B},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005B},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001AA},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000007},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A7},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000163},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000163},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000163},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000163},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000163},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000163},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000058},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000058},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000058},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000058},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000163},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000163},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000163},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000163},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000163},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000163},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000058},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000058},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000058},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000058},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001A7},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000008},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E8},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E4},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E4},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E4},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E4},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E4},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E4},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000055},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000055},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000055},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000055},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E4},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E4},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E4},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E4},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E4},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E4},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000055},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000055},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000055},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000055},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000000E8},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000009},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E5},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E1},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E1},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E1},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E1},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E1},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E1},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E1},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E1},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E1},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E1},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E1},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E1},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000000E5},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000068},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000064},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000064},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000064},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000064},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000064},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000064},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004F},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004F},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004F},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004F},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000064},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000064},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000064},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000064},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000064},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000064},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004F},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004F},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004F},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004F},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000068},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000065},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000061},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000061},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000061},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000061},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000061},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000061},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000061},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000061},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000061},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000061},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000061},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000061},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000065},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000062},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005E},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005E},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005E},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005E},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005E},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005E},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005E},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005E},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005E},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005E},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005E},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005E},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000062},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005B},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005B},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005B},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005B},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005B},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005B},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005B},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005B},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005B},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005B},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005B},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005B},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000005F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000058},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000058},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000058},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000058},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000058},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000058},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000058},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000058},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000058},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000058},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000058},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000058},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000005C},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000059},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000055},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000055},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000055},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000055},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000055},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000055},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000055},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000055},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000055},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000055},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000055},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000055},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000059},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000010},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000056},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000056},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000011},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F5},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F5},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000070},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000070},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F5},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F5},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000070},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000070},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000012},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F2},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F2},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006D},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006D},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F2},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F2},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006D},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006D},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000013},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006A},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006A},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006A},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006A},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000014},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EC},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EC},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000067},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000067},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EC},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EC},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000067},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000067},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000015},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E9},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E9},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000064},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000064},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E9},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E9},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000064},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000064},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000016},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000061},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000061},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E4},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000061},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000061},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E4},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000017},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A9},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A5},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A5},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A6},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005E},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005E},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A5},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A5},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A6},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005E},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005E},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001A9},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000018},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A2},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A2},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A3},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005B},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005B},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A2},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A2},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A3},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005B},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005B},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001A6},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000019},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E5},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E2},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E2},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E3},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000058},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000058},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E2},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E2},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E3},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000058},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000058},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000000E5},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000001A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E2},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000055},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000055},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000055},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000055},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000000E2},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000001B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DC},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DC},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DC},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DC},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000000DF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000001C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000065},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000062},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000062},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004F},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004F},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000062},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000062},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004F},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004F},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000065},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000001D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000062},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005F},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005F},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000062},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000001E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000049},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000049},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005C},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005C},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000049},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000049},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000005F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000001F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000059},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000059},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000046},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000046},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000059},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000059},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000046},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000046},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000005C},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000020},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000059},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000056},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000056},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000043},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000043},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000056},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000056},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000043},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000043},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000059},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000021},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000056},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000053},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000053},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000040},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000040},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000053},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000053},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000040},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000040},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000056},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000022},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000070},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000070},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000070},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000070},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000023},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006D},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006D},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006D},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006D},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000024},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006A},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006A},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006A},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006A},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000025},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000067},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000067},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000067},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000067},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000026},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000064},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000064},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000064},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000064},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000027},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E4},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E4},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000061},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000061},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E4},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E4},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E4},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000061},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000061},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E4},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000028},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A9},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A3},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A3},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A6},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005E},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005E},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A3},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A3},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A6},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005E},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005E},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001A9},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000029},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A0},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A0},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A3},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005B},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005B},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A0},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A0},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A3},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005B},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005B},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001A6},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E5},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E3},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000058},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000058},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E3},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000058},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000058},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000000E5},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E2},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000055},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000055},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000055},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000055},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000000E2},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DA},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DA},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DA},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DA},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000000DF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000065},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004F},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004F},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004F},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004F},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000065},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000062},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000062},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000049},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000049},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000049},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000049},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000005F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000030},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000046},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000046},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000046},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000046},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000005C},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000031},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000059},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000043},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000043},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000043},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000043},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000059},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000032},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000056},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000040},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000040},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000040},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000040},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000056},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000033},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000070},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000070},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000070},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000070},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000034},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006D},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006D},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006D},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006D},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000035},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006A},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006A},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006A},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006A},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000036},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000067},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000067},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000067},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000067},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000037},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000064},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000064},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000064},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000064},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000038},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E4},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E4},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000061},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000061},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E4},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E4},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E4},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000061},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000061},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E4},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000039},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A9},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A3},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A3},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A6},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005E},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005E},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A3},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A3},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A6},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005E},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005E},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001A9},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000003A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A0},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A0},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A3},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005B},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005B},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A0},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A0},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A3},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005B},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005B},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001A6},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000003B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E5},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E3},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000058},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000058},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E3},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000058},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000058},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000000E5},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000003C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E2},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000055},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000055},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000055},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000055},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000000E2},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000003D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DA},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DA},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DA},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DA},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000000DF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000003E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000065},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004F},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004F},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004F},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004F},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000065},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000003F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000062},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000062},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000040},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000049},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000049},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000049},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000049},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000005F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000041},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000046},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000046},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000046},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000046},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000005C},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000042},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000059},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000043},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000043},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000043},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000043},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000059},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000043},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000056},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000040},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000040},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000040},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000040},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000056},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000044},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FC},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FC},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FC},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FC},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000078},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000078},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FC},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FC},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FC},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000078},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000078},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FC},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000045},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000075},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000075},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000075},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000075},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000046},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000072},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000072},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000072},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000072},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000047},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006F},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006F},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006F},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006F},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000048},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000049},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E4},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E4},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000004A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AC},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AC},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AC},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AC},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AC},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AC},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AC},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001AC},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000004B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AA},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AA},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AA},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AA},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AA},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AA},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AA},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001AA},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000004C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A7},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A7},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A7},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A7},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A7},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001A7},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000004D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000004E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000004F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000050},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000051},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000052},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004E},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004E},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004E},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004E},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000053},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004B},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004B},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004B},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004B},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000054},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000048},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000048},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000048},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000048},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000055},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FC},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FC},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FC},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FC},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000078},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000078},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FC},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FC},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FC},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000078},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000078},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FC},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000056},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000075},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000075},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000075},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000075},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000057},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000072},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000072},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000072},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000072},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000058},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006F},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006F},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006F},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006F},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000059},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000005A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E4},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E4},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000005B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AC},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AC},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AC},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AC},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AC},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AC},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AC},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001AC},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000005C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AA},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AA},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AA},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AA},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AA},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AA},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AA},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001AA},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000005D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A7},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A7},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A7},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A7},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A7},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001A7},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000005E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000005F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000060},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000061},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000062},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000063},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004E},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004E},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004E},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004E},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000064},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004B},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004B},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004B},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004B},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000065},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000048},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000048},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000048},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000048},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000066},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FC},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FC},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FC},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FC},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000078},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000078},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FC},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FC},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FC},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000078},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000078},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FC},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000067},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000075},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000075},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000075},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000075},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000068},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000072},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000072},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000072},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000072},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000069},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006F},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006F},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006F},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006F},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000006A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000006B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E4},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E4},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000006C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AC},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AC},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AC},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AC},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AC},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AC},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AC},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001AC},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000006D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AA},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AA},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AA},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AA},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AA},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AA},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AA},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001AA},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000006E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A7},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A7},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A7},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A7},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A7},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001A7},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000006F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000070},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000071},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000072},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000073},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000074},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004E},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004E},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004E},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004E},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000075},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004B},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004B},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004B},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004B},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000076},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000048},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000048},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000048},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000048},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000077},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FC},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FC},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FC},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FC},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000078},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000078},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FC},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FC},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FC},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000078},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000078},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FC},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000078},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000075},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000075},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000075},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000075},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000079},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000072},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000072},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000072},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000072},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000007A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006F},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006F},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006F},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006F},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000007B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000007C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E4},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E4},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000007D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AC},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AC},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AC},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AC},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AC},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AC},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AC},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001AC},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000007E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AA},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AA},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AA},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AA},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AA},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AA},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AA},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001AA},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000007F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A7},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A7},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A7},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A7},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A7},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001A7},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000080},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000081},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000082},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000083},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000084},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000085},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004E},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004E},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004E},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004E},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000086},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004B},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004B},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004B},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004B},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000087},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000048},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000048},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000048},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000048},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xB0000000, 0x00000000},
+ {0x0EE, 0x00000000},
+ {0x0EE, 0x00004000},
+ {0x033, 0x00000000},
+ {0x03F, 0x00003BEF},
+ {0x033, 0x00000001},
+ {0x03F, 0x00003BE9},
+ {0x033, 0x00000002},
+ {0x03F, 0x00003BE3},
+ {0x033, 0x00000003},
+ {0x03F, 0x00003BDD},
+ {0x033, 0x00000004},
+ {0x03F, 0x00003BD7},
+ {0x033, 0x00000005},
+ {0x03F, 0x00003BD1},
+ {0x033, 0x00000006},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BD9},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003BCB},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003BCB},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003BCB},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003BCB},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003BCB},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003BCB},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BD7},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BD7},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BD7},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BD7},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003BCB},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003BCB},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003BCB},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003BCB},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003BCB},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003BCB},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BD7},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BD7},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BD7},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BD7},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00001BD9},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000007},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BD3},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BD3},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BD3},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BD3},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BD3},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BD3},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BD3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BD1},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BD1},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BD1},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BD1},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BD3},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BD3},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BD3},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BD3},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BD3},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BD3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BD1},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BD1},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BD1},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BD1},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00001BD3},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000008},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BD9},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BCD},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BCD},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BCD},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BCD},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BCD},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BCD},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BD7},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BD7},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BD7},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BD7},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BCD},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BCD},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BCD},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BCD},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BCD},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BCD},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BD7},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BD7},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BD7},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BD7},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000BD9},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000009},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BD3},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BD3},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BD3},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BD3},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BD3},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BD3},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BD3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BD1},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BD1},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BD1},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BD1},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BD3},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BD3},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BD3},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BD3},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BD3},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BD3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BD1},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BD1},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BD1},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BD1},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000BD3},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D9},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BCD},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BCD},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BCD},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BCD},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BCD},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BCD},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D7},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D7},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D7},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D7},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BCD},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BCD},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BCD},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BCD},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BCD},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BCD},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D7},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D7},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D7},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D7},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000009D9},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D1},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D1},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D1},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D1},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D1},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D1},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D1},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D1},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008D9},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008D7},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008D7},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008D7},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008D7},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008D7},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008D7},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008D7},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008D7},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000008D9},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008D3},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008D3},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008D3},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008D3},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008D3},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008D3},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008D3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008D1},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008D1},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008D1},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008D1},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008D3},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008D3},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008D3},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008D3},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008D3},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008D3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008D1},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008D1},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008D1},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008D1},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000008D3},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000859},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008CD},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008CD},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008CD},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008CD},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008CD},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008CD},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000857},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000857},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000857},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000857},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008CD},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008CD},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008CD},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008CD},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008CD},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008CD},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000857},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000857},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000857},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000857},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000859},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000851},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000851},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000851},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000851},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000851},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000851},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000851},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000851},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000010},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000819},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000084D},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000084D},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000084D},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000084D},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000084D},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000084D},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000817},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000817},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000817},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000817},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000084D},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000084D},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000084D},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000084D},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000084D},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000084D},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000817},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000817},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000817},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000817},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000819},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000011},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000811},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000811},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000811},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000811},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000811},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000811},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000811},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000811},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000012},
+ {0x03F, 0x000039EE},
+ {0x033, 0x00000013},
+ {0x03F, 0x000039E8},
+ {0x033, 0x00000014},
+ {0x03F, 0x000039E2},
+ {0x033, 0x00000015},
+ {0x03F, 0x000039DC},
+ {0x033, 0x00000016},
+ {0x03F, 0x000039D6},
+ {0x033, 0x00000017},
+ {0x03F, 0x000039D0},
+ {0x033, 0x00000018},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D8},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000019D8},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000019},
+ {0x03F, 0x000019D2},
+ {0x033, 0x0000001A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D8},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000009D8},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000001B},
+ {0x03F, 0x000009D2},
+ {0x033, 0x0000001C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008D9},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CC},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CC},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CC},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CC},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CC},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CC},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CC},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CC},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000008D9},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000001D},
+ {0x03F, 0x000008D3},
+ {0x033, 0x0000001E},
+ {0x03F, 0x000008CD},
+ {0x033, 0x0000001F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000020},
+ {0x03F, 0x0000084D},
+ {0x033, 0x00000021},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000022},
+ {0x03F, 0x0000080D},
+ {0x033, 0x00000023},
+ {0x03F, 0x00000807},
+ {0x033, 0x00000024},
+ {0x03F, 0x000039EE},
+ {0x033, 0x00000025},
+ {0x03F, 0x000039E8},
+ {0x033, 0x00000026},
+ {0x03F, 0x000039E2},
+ {0x033, 0x00000027},
+ {0x03F, 0x000039DC},
+ {0x033, 0x00000028},
+ {0x03F, 0x000039D6},
+ {0x033, 0x00000029},
+ {0x03F, 0x000039D0},
+ {0x033, 0x0000002A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D8},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000019D8},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002B},
+ {0x03F, 0x000019D2},
+ {0x033, 0x0000002C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D8},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000009D8},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002D},
+ {0x03F, 0x000009D2},
+ {0x033, 0x0000002E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008D9},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CC},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CC},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CC},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CC},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CC},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CC},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CC},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CC},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000008D9},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002F},
+ {0x03F, 0x000008D3},
+ {0x033, 0x00000030},
+ {0x03F, 0x000008CD},
+ {0x033, 0x00000031},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000032},
+ {0x03F, 0x0000084D},
+ {0x033, 0x00000033},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000034},
+ {0x03F, 0x0000080D},
+ {0x033, 0x00000035},
+ {0x03F, 0x00000807},
+ {0x033, 0x00000036},
+ {0x03F, 0x000039EE},
+ {0x033, 0x00000037},
+ {0x03F, 0x000039E8},
+ {0x033, 0x00000038},
+ {0x03F, 0x000039E2},
+ {0x033, 0x00000039},
+ {0x03F, 0x000039DC},
+ {0x033, 0x0000003A},
+ {0x03F, 0x000039D6},
+ {0x033, 0x0000003B},
+ {0x03F, 0x000039D0},
+ {0x033, 0x0000003C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D8},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000019D8},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000003D},
+ {0x03F, 0x000019D2},
+ {0x033, 0x0000003E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D8},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000009D8},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000003F},
+ {0x03F, 0x000009D2},
+ {0x033, 0x00000040},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008D9},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CC},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CC},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CC},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CC},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CC},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CC},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CC},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CC},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000008D9},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000041},
+ {0x03F, 0x000008D3},
+ {0x033, 0x00000042},
+ {0x03F, 0x000008CD},
+ {0x033, 0x00000043},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000044},
+ {0x03F, 0x0000084D},
+ {0x033, 0x00000045},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000046},
+ {0x03F, 0x0000080D},
+ {0x033, 0x00000047},
+ {0x03F, 0x00000807},
+ {0x033, 0x00000048},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EE},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EE},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EE},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EE},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000049},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E8},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E8},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E8},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E8},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000004A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E2},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E2},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E2},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E2},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000004B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DC},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DC},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DC},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DC},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000004C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D6},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D6},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D6},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000004D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D0},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D0},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D0},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D0},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000004E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000004F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D2},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D2},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D2},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D2},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000050},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000051},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D2},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D2},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D2},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D2},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000052},
+ {0x03F, 0x000009CD},
+ {0x033, 0x00000053},
+ {0x03F, 0x000008D3},
+ {0x033, 0x00000054},
+ {0x03F, 0x000008CD},
+ {0x033, 0x00000055},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000056},
+ {0x03F, 0x0000084D},
+ {0x033, 0x00000057},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000058},
+ {0x03F, 0x0000080D},
+ {0x033, 0x00000059},
+ {0x03F, 0x00000807},
+ {0x033, 0x0000005A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EE},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EE},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EE},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EE},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000005B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E8},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E8},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E8},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E8},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000005C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E2},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E2},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E2},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E2},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000005D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DC},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DC},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DC},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DC},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000005E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D6},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D6},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D6},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000005F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D0},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D0},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D0},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D0},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000060},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000061},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D2},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D2},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D2},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D2},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000062},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000063},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D2},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D2},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D2},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D2},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000064},
+ {0x03F, 0x000009CD},
+ {0x033, 0x00000065},
+ {0x03F, 0x000008D3},
+ {0x033, 0x00000066},
+ {0x03F, 0x000008CD},
+ {0x033, 0x00000067},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000068},
+ {0x03F, 0x0000084D},
+ {0x033, 0x00000069},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000006A},
+ {0x03F, 0x0000080D},
+ {0x033, 0x0000006B},
+ {0x03F, 0x00000807},
+ {0x033, 0x0000006C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EE},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EE},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EE},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EE},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000006D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E8},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E8},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E8},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E8},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000006E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E2},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E2},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E2},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E2},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000006F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DC},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DC},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DC},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DC},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000070},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D6},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D6},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D6},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000071},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D0},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D0},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D0},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D0},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000072},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000073},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D2},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D2},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D2},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D2},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000074},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000075},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D2},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D2},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D2},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D2},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000076},
+ {0x03F, 0x000009CD},
+ {0x033, 0x00000077},
+ {0x03F, 0x000008D3},
+ {0x033, 0x00000078},
+ {0x03F, 0x000008CD},
+ {0x033, 0x00000079},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000007A},
+ {0x03F, 0x0000084D},
+ {0x033, 0x0000007B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000007C},
+ {0x03F, 0x0000080D},
+ {0x033, 0x0000007D},
+ {0x03F, 0x00000807},
+ {0x033, 0x0000007E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EE},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EE},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EE},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EE},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000007F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E8},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E8},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E8},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E8},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000080},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E2},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E2},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E2},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E2},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000081},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DC},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DC},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DC},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DC},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000082},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D6},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D6},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D6},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000083},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D0},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D0},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D0},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D0},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000084},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000085},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D2},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D2},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D2},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D2},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000086},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000087},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D2},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D2},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D2},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D2},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000088},
+ {0x03F, 0x000009CD},
+ {0x033, 0x00000089},
+ {0x03F, 0x000008D3},
+ {0x033, 0x0000008A},
+ {0x03F, 0x000008CD},
+ {0x033, 0x0000008B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000008C},
+ {0x03F, 0x0000084D},
+ {0x033, 0x0000008D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000008E},
+ {0x03F, 0x0000080D},
+ {0x033, 0x0000008F},
+ {0x03F, 0x00000807},
+ {0x0EE, 0x00000000},
+ {0x0EF, 0x00080000},
+ {0x033, 0x00000007},
+ {0x03E, 0x00000001},
+ {0x03F, 0x00020F3C},
+ {0x0EF, 0x00000000},
+ {0x0EF, 0x00080000},
+ {0x033, 0x0000000C},
+ {0x03E, 0x00000001},
+ {0x03F, 0x000305BC},
+ {0x0EF, 0x00000000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EC, 0x00000001},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EC, 0x00000000},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EC, 0x00000000},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EC, 0x00000000},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EC, 0x00000000},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EC, 0x00000000},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EC, 0x00000000},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EC, 0x00000000},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EC, 0x00000000},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EC, 0x00000000},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EC, 0x00000000},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EC, 0x00000000},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EC, 0x00000000},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EC, 0x00000000},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EC, 0x00000000},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EC, 0x00000000},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EC, 0x00000000},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EC, 0x00000000},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EC, 0x00000000},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EC, 0x00000000},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EC, 0x00000000},
+ {0xA0000000, 0x00000000},
+ {0x0EC, 0x00000001},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000003},
+ {0x03C, 0x00000020},
+ {0x03D, 0x00000078},
+ {0x03E, 0x00080000},
+ {0x03F, 0x00001999},
+ {0x0EC, 0x00000000},
+ {0x02F, 0x0002260D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0DE, 0x00000001},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0DE, 0x00000000},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0DE, 0x00000000},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0DE, 0x00000000},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0DE, 0x00000000},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0DE, 0x00000000},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0DE, 0x00000000},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0DE, 0x00000000},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0DE, 0x00000000},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0DE, 0x00000000},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0DE, 0x00000000},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0DE, 0x00000000},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0DE, 0x00000000},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0DE, 0x00000000},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0DE, 0x00000000},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0DE, 0x00000000},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0DE, 0x00000000},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0DE, 0x00000000},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0DE, 0x00000000},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0DE, 0x00000000},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0DE, 0x00000000},
+ {0xA0000000, 0x00000000},
+ {0x0DE, 0x00000001},
+ {0xB0000000, 0x00000000},
+ {0x0EF, 0x00000002},
+ {0x033, 0x00000000},
+ {0x03F, 0x00000002},
+ {0x033, 0x00000001},
+ {0x03F, 0x00000002},
+ {0x0EF, 0x00000000},
+ {0x0EF, 0x00000400},
+ {0x033, 0x00000000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000001},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000002},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000003},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000004},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000005},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000006},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000007},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000008},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000009},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000010},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000011},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000012},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000013},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000014},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000015},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000016},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000017},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000018},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000019},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000001A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000001B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000001C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000001D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000001E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000001F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000020},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000021},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000022},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000017F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000017F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000023},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000017F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000017F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000024},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000017F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000017F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000025},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000017F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000017F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000026},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000017F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000017F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000027},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000017F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000017F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000028},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000000FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000029},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000000FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000000FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000000FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000000FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000000FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000030},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000031},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000032},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000033},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000034},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000035},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000036},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000037},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000038},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000039},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000003A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000003B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000003C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000003D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000003E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000003F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xB0000000, 0x00000000},
+ {0x0EF, 0x00000000},
+ {0x0EF, 0x00000200},
+ {0x033, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000001},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000002},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000003},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000004},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000005},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000006},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000007},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000008},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000009},
+ {0x03F, 0x000001FF},
+ {0x033, 0x0000000A},
+ {0x03F, 0x000001FF},
+ {0x033, 0x0000000B},
+ {0x03F, 0x000001FF},
+ {0x033, 0x0000000C},
+ {0x03F, 0x000001FF},
+ {0x033, 0x0000000D},
+ {0x03F, 0x000001FF},
+ {0x033, 0x0000000E},
+ {0x03F, 0x000001FF},
+ {0x033, 0x0000000F},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000010},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000011},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000012},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000013},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000014},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000015},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000016},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000017},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000018},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000019},
+ {0x03F, 0x000001FF},
+ {0x033, 0x0000001A},
+ {0x03F, 0x000001FF},
+ {0x033, 0x0000001B},
+ {0x03F, 0x000001FF},
+ {0x033, 0x0000001C},
+ {0x03F, 0x000001FF},
+ {0x033, 0x0000001D},
+ {0x03F, 0x000001FF},
+ {0x033, 0x0000001E},
+ {0x03F, 0x000001FF},
+ {0x033, 0x0000001F},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000020},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000021},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000022},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000023},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000024},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000025},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000026},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000027},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000028},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000000FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000029},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000000FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000000FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000000FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000000FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000000FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003B},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003B},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003B},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003B},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000030},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003B},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003B},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000031},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003B},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003B},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000032},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003B},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003B},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000033},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003B},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003B},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000034},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003B},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003B},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000035},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003B},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003B},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000036},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003B},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003B},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000037},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003B},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003B},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000038},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003B},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003B},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000039},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003B},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003B},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000003A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003B},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003B},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000003B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003B},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003B},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000003C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003B},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003B},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000003D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003B},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003B},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000003E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003B},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003B},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000003F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003B},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003B},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xB0000000, 0x00000000},
+ {0x0EF, 0x00000000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06E, 0x00077A7C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06E, 0x00077A7C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06E, 0x00077A7C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06E, 0x00077A7C},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06E, 0x00077A7C},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06E, 0x00077A7C},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06E, 0x00077A7C},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06E, 0x00067A7C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06E, 0x00067A7C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06E, 0x00067A7C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06E, 0x00067A7C},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06E, 0x00077A7C},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06E, 0x00077A7C},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06E, 0x00077A7C},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06E, 0x00077A7C},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06E, 0x00077A7C},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06E, 0x00077A7C},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06E, 0x00067A7C},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06E, 0x00067A7C},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06E, 0x00067A7C},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06E, 0x00067A7C},
+ {0xA0000000, 0x00000000},
+ {0x06E, 0x00077A7C},
+ {0xB0000000, 0x00000000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06F, 0x00077A7C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06F, 0x00077A7C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06F, 0x00077A7C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06F, 0x00077A7C},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06F, 0x00077A7C},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06F, 0x00077A7C},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06F, 0x00077A7C},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06F, 0x00067A7C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06F, 0x00067A7C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06F, 0x00067A7C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06F, 0x00067A7C},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06F, 0x00077A7C},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06F, 0x00077A7C},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06F, 0x00077A7C},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06F, 0x00077A7C},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06F, 0x00077A7C},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06F, 0x00077A7C},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06F, 0x00067A7C},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06F, 0x00067A7C},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06F, 0x00067A7C},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06F, 0x00067A7C},
+ {0xA0000000, 0x00000000},
+ {0x06F, 0x00077A7C},
+ {0xB0000000, 0x00000000},
+ {0x06D, 0x00000C31},
+ {0x0EF, 0x00020000},
+ {0x033, 0x00000000},
+ {0x03F, 0x000005FF},
+ {0x0EF, 0x00000000},
+ {0x005, 0x00000001},
+ {0x0EF, 0x00080000},
+ {0x033, 0x00000001},
+ {0x03E, 0x00000001},
+ {0x03F, 0x00022020},
+ {0x0EF, 0x00000000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x087, 0x00000427},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x087, 0x00000427},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x087, 0x00000427},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x087, 0x00000427},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x087, 0x00000427},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x087, 0x00000427},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x087, 0x0000042F},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x087, 0x0000042F},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x087, 0x0000042F},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x087, 0x0000042F},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x087, 0x0000042F},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x087, 0x00000427},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x087, 0x00000427},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x087, 0x00000427},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x087, 0x00000427},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x087, 0x00000427},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x087, 0x0000042F},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x087, 0x0000042F},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x087, 0x0000042F},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x087, 0x0000042F},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x087, 0x0000042F},
+ {0xA0000000, 0x00000000},
+ {0x087, 0x00000427},
+ {0xB0000000, 0x00000000},
+ {0x002, 0x00000000},
+ {0x067, 0x00000052},
+
+};
+
+static const struct rtw89_reg2_def rtw89_8852a_phy_radiob_regs[] = {
+ {0xF0010000, 0x00000000},
+ {0xF0010001, 0x00000001},
+ {0xF0020001, 0x00000002},
+ {0xF0030001, 0x00000003},
+ {0xF0250001, 0x00000004},
+ {0xF0260001, 0x00000005},
+ {0xF0320001, 0x00000006},
+ {0xF0330001, 0x00000007},
+ {0xF0340001, 0x00000008},
+ {0xF0350001, 0x00000009},
+ {0xF0360001, 0x0000000A},
+ {0xF0010002, 0x0000000B},
+ {0xF0020002, 0x0000000C},
+ {0xF0030002, 0x0000000D},
+ {0xF0250002, 0x0000000E},
+ {0xF0260002, 0x0000000F},
+ {0xF0320002, 0x00000010},
+ {0xF0330002, 0x00000011},
+ {0xF0340002, 0x00000012},
+ {0xF0350002, 0x00000013},
+ {0xF0360002, 0x00000014},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x005, 0x00000001},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x005, 0x00000000},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x005, 0x00000000},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x005, 0x00000000},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x005, 0x00000000},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x005, 0x00000000},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x005, 0x00000000},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x005, 0x00000000},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x005, 0x00000000},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x005, 0x00000000},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x005, 0x00000000},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x005, 0x00000000},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x005, 0x00000000},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x005, 0x00000000},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x005, 0x00000000},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x005, 0x00000000},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x005, 0x00000000},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x005, 0x00000000},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x005, 0x00000000},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x005, 0x00000000},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x005, 0x00000000},
+ {0xA0000000, 0x00000000},
+ {0x005, 0x00000001},
+ {0xB0000000, 0x00000000},
+ {0x000, 0x00030000},
+ {0x018, 0x00011124},
+ {0x000, 0x00033C00},
+ {0x01A, 0x00040004},
+ {0x0FE, 0x00000000},
+ {0x055, 0x00080000},
+ {0x056, 0x0008FFF0},
+ {0x057, 0x0000C485},
+ {0x058, 0x000A4164},
+ {0x059, 0x00010000},
+ {0x05A, 0x00060000},
+ {0x05B, 0x0000A000},
+ {0x05C, 0x00000000},
+ {0x05D, 0x0001C013},
+ {0x05E, 0x00000000},
+ {0x05F, 0x00001FF0},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x060, 0x00011000},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x060, 0x00011008},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x060, 0x00011008},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x060, 0x00011008},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x060, 0x00011008},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x060, 0x00011008},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x060, 0x00011008},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x060, 0x00011008},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x060, 0x00011008},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x060, 0x00011008},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x060, 0x00011008},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x060, 0x00011008},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x060, 0x00011008},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x060, 0x00011008},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x060, 0x00011008},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x060, 0x00011008},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x060, 0x00011008},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x060, 0x00011008},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x060, 0x00011008},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x060, 0x00011008},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x060, 0x00011008},
+ {0xA0000000, 0x00000000},
+ {0x060, 0x00011000},
+ {0xB0000000, 0x00000000},
+ {0x061, 0x0009F338},
+ {0x062, 0x0009233A},
+ {0x063, 0x000D6002},
+ {0x064, 0x000A0CB0},
+ {0x065, 0x00030EFE},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x066, 0x00010000},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x066, 0x00010000},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x066, 0x00010000},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x066, 0x00010000},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x066, 0x00020000},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x066, 0x00020000},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x066, 0x00010000},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x066, 0x00020000},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x066, 0x00020000},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x066, 0x00020000},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x066, 0x00020000},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x066, 0x00010000},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x066, 0x00010000},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x066, 0x00010000},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x066, 0x00020000},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x066, 0x00020000},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x066, 0x00010000},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x066, 0x00020000},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x066, 0x00020000},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x066, 0x00020000},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x066, 0x00020000},
+ {0xA0000000, 0x00000000},
+ {0x066, 0x00010000},
+ {0xB0000000, 0x00000000},
+ {0x068, 0x00000000},
+ {0x069, 0x00030F0A},
+ {0x06A, 0x00000000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x051, 0x000AD6A4},
+ {0x052, 0x00091345},
+ {0x053, 0x00080081},
+ {0x054, 0x0007BC24},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x051, 0x000BD267},
+ {0x052, 0x00091345},
+ {0x053, 0x000B0081},
+ {0x054, 0x0007BCA4},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x051, 0x000BD267},
+ {0x052, 0x00091345},
+ {0x053, 0x000B0081},
+ {0x054, 0x0007BCA4},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x051, 0x000BD267},
+ {0x052, 0x00091345},
+ {0x053, 0x000B0081},
+ {0x054, 0x0007BCA4},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x051, 0x000BD267},
+ {0x052, 0x00091345},
+ {0x053, 0x000B0081},
+ {0x054, 0x0007BCA4},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x051, 0x000BD267},
+ {0x052, 0x00091345},
+ {0x053, 0x000B0081},
+ {0x054, 0x0007BCA4},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x051, 0x000BD267},
+ {0x052, 0x00091345},
+ {0x053, 0x000B0081},
+ {0x054, 0x0007BCA4},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x051, 0x000BD267},
+ {0x052, 0x00091345},
+ {0x053, 0x000B0081},
+ {0x054, 0x0007BCA4},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x051, 0x000BD267},
+ {0x052, 0x00091345},
+ {0x053, 0x000B0081},
+ {0x054, 0x0007BCA4},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x051, 0x000BD267},
+ {0x052, 0x00091345},
+ {0x053, 0x000B0081},
+ {0x054, 0x0007BCA4},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x051, 0x000BD267},
+ {0x052, 0x00091345},
+ {0x053, 0x000B0081},
+ {0x054, 0x0007BCA4},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x051, 0x000BD267},
+ {0x052, 0x00091345},
+ {0x053, 0x000B0081},
+ {0x054, 0x0007BCA4},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x051, 0x000BD267},
+ {0x052, 0x00091345},
+ {0x053, 0x000B0081},
+ {0x054, 0x0007BCA4},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x051, 0x000BD267},
+ {0x052, 0x00091345},
+ {0x053, 0x000B0081},
+ {0x054, 0x0007BCA4},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x051, 0x000BD267},
+ {0x052, 0x00091345},
+ {0x053, 0x000B0081},
+ {0x054, 0x0007BCA4},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x051, 0x000BD267},
+ {0x052, 0x00091345},
+ {0x053, 0x000B0081},
+ {0x054, 0x0007BCA4},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x051, 0x000BD267},
+ {0x052, 0x00091345},
+ {0x053, 0x000B0081},
+ {0x054, 0x0007BCA4},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x051, 0x000BD267},
+ {0x052, 0x00091345},
+ {0x053, 0x000B0081},
+ {0x054, 0x0007BCA4},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x051, 0x000BD267},
+ {0x052, 0x00091345},
+ {0x053, 0x000B0081},
+ {0x054, 0x0007BCA4},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x051, 0x000BD267},
+ {0x052, 0x00091345},
+ {0x053, 0x000B0081},
+ {0x054, 0x0007BCA4},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x051, 0x000BD267},
+ {0x052, 0x00091345},
+ {0x053, 0x000B0081},
+ {0x054, 0x0007BCA4},
+ {0xA0000000, 0x00000000},
+ {0x051, 0x000AD6A4},
+ {0x052, 0x00091345},
+ {0x053, 0x00080081},
+ {0x054, 0x0007BC24},
+ {0xB0000000, 0x00000000},
+ {0x0D3, 0x00000143},
+ {0x043, 0x00005000},
+ {0x0DD, 0x000003A0},
+ {0x0B0, 0x000E6700},
+ {0x0AF, 0x0001F82E},
+ {0x0B2, 0x000210A7},
+ {0x0B1, 0x00065FFF},
+ {0x0BB, 0x000F7A00},
+ {0x0B3, 0x00013F7A},
+ {0x0D4, 0x0000000E},
+ {0x0B7, 0x00001E0C},
+ {0x0A0, 0x0000004F},
+ {0x0B4, 0x0007C03E},
+ {0x0B5, 0x0007E301},
+ {0x0B6, 0x00080800},
+ {0x0CA, 0x00002000},
+ {0x0DD, 0x000003A0},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0CC, 0x00080000},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0CC, 0x000E0000},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0CC, 0x000E0000},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0CC, 0x000E0000},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0CC, 0x000E0000},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0CC, 0x000E0000},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0CC, 0x000E0000},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0CC, 0x000E0000},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0CC, 0x000E0000},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0CC, 0x000E0000},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0CC, 0x000E0000},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0CC, 0x000E0000},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0CC, 0x000E0000},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0CC, 0x000E0000},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0CC, 0x000E0000},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0CC, 0x000E0000},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0CC, 0x000E0000},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0CC, 0x000E0000},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0CC, 0x000E0000},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0CC, 0x000E0000},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0CC, 0x000E0000},
+ {0xA0000000, 0x00000000},
+ {0x0CC, 0x00080000},
+ {0xB0000000, 0x00000000},
+ {0x0A1, 0x0006F300},
+ {0x0A2, 0x00080500},
+ {0x0A3, 0x0008050B},
+ {0x0A4, 0x0006DB12},
+ {0x0A5, 0x00000000},
+ {0x0A6, 0x00000000},
+ {0x0A7, 0x00000000},
+ {0x0A8, 0x00000000},
+ {0x0A9, 0x00000000},
+ {0x0AA, 0x00000000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0A5, 0x000B0000},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0A5, 0x00000000},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0A5, 0x00000000},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0A5, 0x00000000},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0A5, 0x00000000},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0A5, 0x00000000},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0A5, 0x00000000},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0A5, 0x00000000},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0A5, 0x00000000},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0A5, 0x00000000},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0A5, 0x00000000},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0A5, 0x00000000},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0A5, 0x00000000},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0A5, 0x00000000},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0A5, 0x00000000},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0A5, 0x00000000},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0A5, 0x00000000},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0A5, 0x00000000},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0A5, 0x00000000},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0A5, 0x00000000},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0A5, 0x00000000},
+ {0xA0000000, 0x00000000},
+ {0x0A5, 0x000B0000},
+ {0xB0000000, 0x00000000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0ED, 0x00008000},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0ED, 0x00000000},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0ED, 0x00000000},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0ED, 0x00000000},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0ED, 0x00000000},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0ED, 0x00000000},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0ED, 0x00000000},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0ED, 0x00000000},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0ED, 0x00000000},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0ED, 0x00000000},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0ED, 0x00000000},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0ED, 0x00000000},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0ED, 0x00000000},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0ED, 0x00000000},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0ED, 0x00000000},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0ED, 0x00000000},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0ED, 0x00000000},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0ED, 0x00000000},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0ED, 0x00000000},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0ED, 0x00000000},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0ED, 0x00000000},
+ {0xA0000000, 0x00000000},
+ {0x0ED, 0x00008000},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000000},
+ {0x03E, 0x00008000},
+ {0x03F, 0x000E1333},
+ {0x033, 0x00000001},
+ {0x03E, 0x00008000},
+ {0x03F, 0x000E7333},
+ {0x033, 0x00000002},
+ {0x03E, 0x00008000},
+ {0x03F, 0x000FA000},
+ {0x033, 0x00000003},
+ {0x03E, 0x00004000},
+ {0x03F, 0x000FA400},
+ {0x033, 0x00000004},
+ {0x03E, 0x00004000},
+ {0x03F, 0x000F5000},
+ {0x033, 0x00000005},
+ {0x03E, 0x00004001},
+ {0x03F, 0x00029400},
+ {0x033, 0x00000006},
+ {0x03E, 0x0000AAA1},
+ {0x03F, 0x00041999},
+ {0x033, 0x00000007},
+ {0x03E, 0x0000AAA1},
+ {0x03F, 0x00034444},
+ {0x033, 0x00000008},
+ {0x03E, 0x0000AAA1},
+ {0x03F, 0x0004D555},
+ {0x033, 0x00000009},
+ {0x03E, 0x00005551},
+ {0x03F, 0x00046AAA},
+ {0x033, 0x0000000A},
+ {0x03E, 0x00005551},
+ {0x03F, 0x00046AAA},
+ {0x033, 0x0000000B},
+ {0x03E, 0x00005551},
+ {0x03F, 0x0008C555},
+ {0x033, 0x0000000C},
+ {0x03E, 0x0000CCC1},
+ {0x03F, 0x00081EB8},
+ {0x033, 0x0000000D},
+ {0x03E, 0x0000CCC1},
+ {0x03F, 0x00071EB8},
+ {0x033, 0x0000000E},
+ {0x03E, 0x0000CCC1},
+ {0x03F, 0x00090000},
+ {0x033, 0x0000000F},
+ {0x03E, 0x00006661},
+ {0x03F, 0x00088000},
+ {0x033, 0x00000010},
+ {0x03E, 0x00006661},
+ {0x03F, 0x00088000},
+ {0x033, 0x00000011},
+ {0x03E, 0x00006661},
+ {0x03F, 0x000DB999},
+ {0x0ED, 0x00000000},
+ {0x0ED, 0x00002000},
+ {0x033, 0x00000002},
+ {0x03D, 0x0004A883},
+ {0x03E, 0x00000000},
+ {0x03F, 0x00000001},
+ {0x033, 0x00000006},
+ {0x03D, 0x0004A883},
+ {0x03E, 0x00000000},
+ {0x03F, 0x00000001},
+ {0x0ED, 0x00000000},
+ {0x018, 0x00001001},
+ {0x002, 0x0000000D},
+ {0x0EE, 0x00000004},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x033, 0x0000000B},
+ {0x03F, 0x0000000B},
+ {0x033, 0x0000000C},
+ {0x03F, 0x00000012},
+ {0x033, 0x0000000D},
+ {0x03F, 0x00000019},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x033, 0x0000000B},
+ {0x03F, 0x0000000B},
+ {0x033, 0x0000000C},
+ {0x03F, 0x00000012},
+ {0x033, 0x0000000D},
+ {0x03F, 0x00000019},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x033, 0x0000000B},
+ {0x03F, 0x0000000B},
+ {0x033, 0x0000000C},
+ {0x03F, 0x00000012},
+ {0x033, 0x0000000D},
+ {0x03F, 0x00000019},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x033, 0x0000000B},
+ {0x03F, 0x0000000B},
+ {0x033, 0x0000000C},
+ {0x03F, 0x00000012},
+ {0x033, 0x0000000D},
+ {0x03F, 0x00000019},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x033, 0x0000000B},
+ {0x03F, 0x0000000A},
+ {0x033, 0x0000000C},
+ {0x03F, 0x00000011},
+ {0x033, 0x0000000D},
+ {0x03F, 0x00000018},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x033, 0x0000000B},
+ {0x03F, 0x0000000A},
+ {0x033, 0x0000000C},
+ {0x03F, 0x00000011},
+ {0x033, 0x0000000D},
+ {0x03F, 0x00000018},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x033, 0x0000000B},
+ {0x03F, 0x0000000B},
+ {0x033, 0x0000000C},
+ {0x03F, 0x00000012},
+ {0x033, 0x0000000D},
+ {0x03F, 0x00000019},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x033, 0x0000000B},
+ {0x03F, 0x0000000A},
+ {0x033, 0x0000000C},
+ {0x03F, 0x00000011},
+ {0x033, 0x0000000D},
+ {0x03F, 0x00000018},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x033, 0x0000000B},
+ {0x03F, 0x0000000A},
+ {0x033, 0x0000000C},
+ {0x03F, 0x00000011},
+ {0x033, 0x0000000D},
+ {0x03F, 0x00000018},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x033, 0x0000000B},
+ {0x03F, 0x0000000A},
+ {0x033, 0x0000000C},
+ {0x03F, 0x00000011},
+ {0x033, 0x0000000D},
+ {0x03F, 0x00000018},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x033, 0x0000000B},
+ {0x03F, 0x0000000A},
+ {0x033, 0x0000000C},
+ {0x03F, 0x00000011},
+ {0x033, 0x0000000D},
+ {0x03F, 0x00000018},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x033, 0x0000000B},
+ {0x03F, 0x0000000B},
+ {0x033, 0x0000000C},
+ {0x03F, 0x00000012},
+ {0x033, 0x0000000D},
+ {0x03F, 0x00000019},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x033, 0x0000000B},
+ {0x03F, 0x0000000B},
+ {0x033, 0x0000000C},
+ {0x03F, 0x00000012},
+ {0x033, 0x0000000D},
+ {0x03F, 0x00000019},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x033, 0x0000000B},
+ {0x03F, 0x0000000B},
+ {0x033, 0x0000000C},
+ {0x03F, 0x00000012},
+ {0x033, 0x0000000D},
+ {0x03F, 0x00000019},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x033, 0x0000000B},
+ {0x03F, 0x0000000A},
+ {0x033, 0x0000000C},
+ {0x03F, 0x00000011},
+ {0x033, 0x0000000D},
+ {0x03F, 0x00000018},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x033, 0x0000000B},
+ {0x03F, 0x0000000A},
+ {0x033, 0x0000000C},
+ {0x03F, 0x00000011},
+ {0x033, 0x0000000D},
+ {0x03F, 0x00000018},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x033, 0x0000000B},
+ {0x03F, 0x0000000B},
+ {0x033, 0x0000000C},
+ {0x03F, 0x00000012},
+ {0x033, 0x0000000D},
+ {0x03F, 0x00000019},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x033, 0x0000000B},
+ {0x03F, 0x0000000A},
+ {0x033, 0x0000000C},
+ {0x03F, 0x00000011},
+ {0x033, 0x0000000D},
+ {0x03F, 0x00000018},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x033, 0x0000000B},
+ {0x03F, 0x0000000A},
+ {0x033, 0x0000000C},
+ {0x03F, 0x00000011},
+ {0x033, 0x0000000D},
+ {0x03F, 0x00000018},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x033, 0x0000000B},
+ {0x03F, 0x0000000A},
+ {0x033, 0x0000000C},
+ {0x03F, 0x00000011},
+ {0x033, 0x0000000D},
+ {0x03F, 0x00000018},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x033, 0x0000000B},
+ {0x03F, 0x0000000A},
+ {0x033, 0x0000000C},
+ {0x03F, 0x00000011},
+ {0x033, 0x0000000D},
+ {0x03F, 0x00000018},
+ {0xA0000000, 0x00000000},
+ {0x033, 0x0000000B},
+ {0x03F, 0x0000000B},
+ {0x033, 0x0000000C},
+ {0x03F, 0x00000012},
+ {0x033, 0x0000000D},
+ {0x03F, 0x00000019},
+ {0xB0000000, 0x00000000},
+ {0x0EE, 0x00000000},
+ {0x08F, 0x000D0F7A},
+ {0x08C, 0x00084584},
+ {0x0EF, 0x00004000},
+ {0x033, 0x00000007},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000700},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000700},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000700},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000700},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000700},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000700},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000700},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004700},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004700},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004700},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004700},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000700},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000700},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000700},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000700},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000700},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000700},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004700},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004700},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004700},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004700},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000700},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000006},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000700},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000700},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000700},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000700},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000700},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000700},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000700},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004700},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004700},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004700},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004700},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000700},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000700},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000700},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000700},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000700},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000700},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004700},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004700},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004700},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004700},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000700},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000005},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000500},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000B0600},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000B0600},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000B0600},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000B0600},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000B0600},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000B0600},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00094600},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00094600},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00094600},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00094600},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000B0600},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000B0600},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000B0600},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000B0600},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000B0600},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000B0600},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00094600},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00094600},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00094600},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00094600},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000500},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000004},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000400},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000400},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000400},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000400},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000400},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000400},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000400},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000D4500},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000D4500},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000D4500},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000D4500},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000400},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000400},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000400},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000400},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000400},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000400},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000D4500},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000D4500},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000D4500},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000D4500},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000400},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000003},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00008B00},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00038B00},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00038B00},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00038B00},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00038B00},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00038B00},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00038B00},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000D4400},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000D4400},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000D4400},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000D4400},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00038B00},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00038B00},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00038B00},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00038B00},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00038B00},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00038B00},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000D4400},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000D4400},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000D4400},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000D4400},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00008B00},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000002},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000B00},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000B00},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000B00},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000B00},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000B00},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000B00},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000B00},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00014B00},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00014B00},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00014B00},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00014B00},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000B00},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000B00},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000B00},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000B00},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000B00},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000B00},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00014B00},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00014B00},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00014B00},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00014B00},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000B00},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000001},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001A00},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001A00},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001A00},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001A00},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001A00},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001A00},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001A00},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004A00},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004A00},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004A00},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004A00},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001A00},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001A00},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001A00},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001A00},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001A00},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001A00},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004A00},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004A00},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004A00},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004A00},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00001A00},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00002900},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00002900},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00002900},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00002900},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00002900},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00002900},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00002900},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004900},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004900},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004900},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004900},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00002900},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00002900},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00002900},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00002900},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00002900},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00002900},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004900},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004900},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004900},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00004900},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00002900},
+ {0xB0000000, 0x00000000},
+ {0x0EF, 0x00000000},
+ {0x0EF, 0x00001000},
+ {0x033, 0x00000000},
+ {0x03F, 0x00000015},
+ {0x033, 0x00000001},
+ {0x03F, 0x00000017},
+ {0x033, 0x00000002},
+ {0x03F, 0x00000015},
+ {0x033, 0x00000003},
+ {0x03F, 0x00000017},
+ {0x0EF, 0x00000000},
+ {0x0EF, 0x00008000},
+ {0x033, 0x00000000},
+ {0x03F, 0x000FECFC},
+ {0x033, 0x00000001},
+ {0x03F, 0x000BECFC},
+ {0x033, 0x00000002},
+ {0x03F, 0x0003E4FC},
+ {0x033, 0x00000003},
+ {0x03F, 0x0001D0FC},
+ {0x033, 0x00000004},
+ {0x03F, 0x0001C3FC},
+ {0x033, 0x00000005},
+ {0x03F, 0x000103FC},
+ {0x033, 0x00000006},
+ {0x03F, 0x0000007C},
+ {0x033, 0x00000007},
+ {0x03F, 0x0000007C},
+ {0x033, 0x00000008},
+ {0x03F, 0x000FECFC},
+ {0x033, 0x00000009},
+ {0x03F, 0x000BECFC},
+ {0x033, 0x0000000A},
+ {0x03F, 0x0003E4FC},
+ {0x033, 0x0000000B},
+ {0x03F, 0x0001D0FC},
+ {0x033, 0x0000000C},
+ {0x03F, 0x0001C3FC},
+ {0x033, 0x0000000D},
+ {0x03F, 0x000103FC},
+ {0x033, 0x0000000E},
+ {0x03F, 0x0000007C},
+ {0x033, 0x0000000F},
+ {0x03F, 0x0000007C},
+ {0x033, 0x00000010},
+ {0x03F, 0x000FECFC},
+ {0x033, 0x00000011},
+ {0x03F, 0x000BECFC},
+ {0x033, 0x00000012},
+ {0x03F, 0x0003E4FC},
+ {0x033, 0x00000013},
+ {0x03F, 0x0001D0FC},
+ {0x033, 0x00000014},
+ {0x03F, 0x0001C3FC},
+ {0x033, 0x00000015},
+ {0x03F, 0x000103FC},
+ {0x033, 0x00000016},
+ {0x03F, 0x0000007C},
+ {0x033, 0x00000017},
+ {0x03F, 0x0000007C},
+ {0x0EF, 0x00000000},
+ {0x0EF, 0x00000100},
+ {0x033, 0x00000000},
+ {0x03F, 0x00003317},
+ {0x033, 0x00000001},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000002},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000003},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000004},
+ {0x03F, 0x00003317},
+ {0x033, 0x00000005},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000006},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000007},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000008},
+ {0x03F, 0x00003317},
+ {0x033, 0x00000009},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000010},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000011},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000012},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00003336},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000013},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00003338},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000014},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000015},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000016},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000017},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000018},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003337},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00003356},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000019},
+ {0x03F, 0x00003338},
+ {0x033, 0x0000001A},
+ {0x03F, 0x00003338},
+ {0x033, 0x0000001B},
+ {0x03F, 0x00003338},
+ {0x033, 0x0000001C},
+ {0x03F, 0x00003338},
+ {0x033, 0x0000001D},
+ {0x03F, 0x00003338},
+ {0x033, 0x0000001E},
+ {0x03F, 0x00003338},
+ {0x033, 0x0000001F},
+ {0x03F, 0x00003338},
+ {0x033, 0x00000020},
+ {0x03F, 0x00003338},
+ {0x033, 0x00000021},
+ {0x03F, 0x00003338},
+ {0x033, 0x00000022},
+ {0x03F, 0x00003338},
+ {0x033, 0x00000023},
+ {0x03F, 0x00003338},
+ {0x033, 0x00000024},
+ {0x03F, 0x00003338},
+ {0x033, 0x00000025},
+ {0x03F, 0x00003338},
+ {0x033, 0x00000026},
+ {0x03F, 0x00003338},
+ {0x033, 0x00000027},
+ {0x03F, 0x00003338},
+ {0x033, 0x00000028},
+ {0x03F, 0x00003338},
+ {0x033, 0x00000029},
+ {0x03F, 0x00003338},
+ {0x033, 0x0000002A},
+ {0x03F, 0x00003338},
+ {0x033, 0x0000002B},
+ {0x03F, 0x00003338},
+ {0x033, 0x0000002C},
+ {0x03F, 0x00003338},
+ {0x033, 0x0000002D},
+ {0x03F, 0x00003338},
+ {0x033, 0x0000002E},
+ {0x03F, 0x00003338},
+ {0x033, 0x0000002F},
+ {0x03F, 0x00003338},
+ {0x033, 0x00000030},
+ {0x03F, 0x00003338},
+ {0x0EF, 0x00000000},
+ {0x0EF, 0x00000040},
+ {0x033, 0x00000001},
+ {0x03F, 0x000004BA},
+ {0x033, 0x00000002},
+ {0x03F, 0x000004BA},
+ {0x033, 0x00000003},
+ {0x03F, 0x000004BA},
+ {0x033, 0x00000004},
+ {0x03F, 0x000004BA},
+ {0x033, 0x00000005},
+ {0x03F, 0x000004BA},
+ {0x033, 0x00000006},
+ {0x03F, 0x000004BA},
+ {0x033, 0x00000007},
+ {0x03F, 0x000004BA},
+ {0x033, 0x00000008},
+ {0x03F, 0x000004BA},
+ {0x033, 0x00000009},
+ {0x03F, 0x000004BA},
+ {0x033, 0x0000000A},
+ {0x03F, 0x000004BA},
+ {0x033, 0x0000000B},
+ {0x03F, 0x000004BA},
+ {0x0EF, 0x00000000},
+ {0x0EF, 0x00000010},
+ {0x033, 0x00000001},
+ {0x03F, 0x00000CB0},
+ {0x033, 0x00000002},
+ {0x03F, 0x00000CB0},
+ {0x033, 0x00000003},
+ {0x03F, 0x00000870},
+ {0x033, 0x00000004},
+ {0x03F, 0x00000870},
+ {0x033, 0x00000005},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000430},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000730},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000730},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000430},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000430},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000430},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000430},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000430},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000430},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000430},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000430},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000730},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000730},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000430},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000430},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000430},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000430},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000430},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000430},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000430},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000430},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000430},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000006},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000430},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000730},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000730},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000430},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000430},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000430},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000430},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000430},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000430},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000430},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000430},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000730},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000730},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000430},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000430},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000430},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000430},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000430},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000430},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000430},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000430},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000430},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000007},
+ {0x03F, 0x00000CB0},
+ {0x033, 0x00000008},
+ {0x03F, 0x00000CB0},
+ {0x033, 0x00000009},
+ {0x03F, 0x00000870},
+ {0x033, 0x0000000A},
+ {0x03F, 0x00000870},
+ {0x033, 0x0000000B},
+ {0x03F, 0x00000430},
+ {0x033, 0x0000000C},
+ {0x03F, 0x00000430},
+ {0x033, 0x0000000D},
+ {0x03F, 0x00000000},
+ {0x033, 0x0000000E},
+ {0x03F, 0x00000000},
+ {0x0EF, 0x00000000},
+ {0x0EF, 0x00000080},
+ {0x033, 0x00000000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C258},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00036658},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00036658},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00036658},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00036658},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00036658},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00036458},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00036658},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00036658},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00036658},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00036658},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00036658},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00036458},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C258},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000001},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C258},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00036658},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00036658},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00036658},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00036658},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00036658},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00036458},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00036658},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00036658},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00036658},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00036658},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00036658},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00036458},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C258},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000002},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C258},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00028558},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00028558},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00028558},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00028558},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00028558},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002F258},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00028558},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00028558},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00028558},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00028558},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00028558},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002F258},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C258},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000003},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C258},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C258},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000004},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C258},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00036658},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00036658},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00036658},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00036658},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00036658},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00036458},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00036658},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00036658},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00036658},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00036658},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00036658},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00036458},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C258},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000005},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C258},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00036658},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00036658},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00036658},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00036658},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00036658},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00036458},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00036658},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00036658},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00036658},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00036658},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00036658},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00036458},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C258},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000006},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C258},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00028558},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00028558},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00028558},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00028558},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00028558},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00028558},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00028558},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00028558},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00028558},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00028558},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C258},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000007},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C258},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C258},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000008},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C258},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00036658},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00036658},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00036658},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00036658},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00036658},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00036458},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00036658},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00036658},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00036658},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00036658},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00036658},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00036458},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C258},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000009},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C358},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00036658},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00036658},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00036658},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00036658},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00036658},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00036458},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00036658},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00036658},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00036658},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00036658},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00036658},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00036458},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C358},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C358},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00028558},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00028558},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00028558},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00028558},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00028558},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00028558},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00028558},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00028558},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00028558},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00028558},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C358},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C358},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0003C458},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C358},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C358},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026458},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026458},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C358},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C358},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026458},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026458},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C358},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C358},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00028558},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00028558},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00028558},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00028558},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00028558},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00028558},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00028558},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00028558},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00028558},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00028558},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C358},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C358},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C358},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000010},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C358},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026458},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026458},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C358},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000011},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C358},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026458},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026658},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00026458},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C358},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000012},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C358},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00028558},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00028558},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00028558},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00028558},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00028558},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00028558},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00028558},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00028558},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00028558},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00028558},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C358},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000013},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C358},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C358},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000014},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C358},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023558},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023558},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C358},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000015},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C358},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023558},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023558},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C358},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000016},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C358},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00028558},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00028558},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00028558},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00028558},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00028558},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00028558},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00028558},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00028558},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00028558},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00028558},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C358},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000017},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C358},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C358},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000018},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F258},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023558},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023558},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F258},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000019},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F258},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023558},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023558},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F258},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000001A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F258},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F258},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000001B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F258},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F258},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000001C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F258},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023558},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023558},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F258},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000001D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F258},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023558},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C358},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C358},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C358},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C358},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023558},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C358},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C358},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C358},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C358},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F258},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000001E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F258},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F258},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000001F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F258},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0002C558},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F258},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000020},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F258},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023558},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023558},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F258},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000021},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F258},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023558},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023558},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F258},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000022},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F258},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F258},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000023},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F258},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F258},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000024},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F258},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023558},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023558},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F258},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000025},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F258},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023558},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023558},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F258},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000026},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F258},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F258},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000027},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F258},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F258},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000028},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F258},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023558},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023558},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F258},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000029},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F258},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023558},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023558},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F258},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F258},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002F358},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002F358},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F258},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F258},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F258},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F258},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023558},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023558},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F258},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023558},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023558},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000030},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023558},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023558},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000031},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023558},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023558},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000032},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000033},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000034},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023558},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023558},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000035},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023558},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023558},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000036},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000037},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000038},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023558},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023558},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000039},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023558},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023758},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00023558},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000003A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00025558},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000003B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x00024558},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000C},
+ {0x03F, 0x0002C558},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0xA0000000, 0x00000000},
+ {0x03E, 0x0000000B},
+ {0x03F, 0x0006F458},
+ {0xB0000000, 0x00000000},
+ {0x0EF, 0x00000000},
+ {0x0EE, 0x00002000},
+ {0x033, 0x00000000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A3},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A3},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A3},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A3},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A3},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A3},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A3},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A3},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000001},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A0},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A0},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A0},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A0},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A0},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A0},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A0},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A0},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000002},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000067},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000067},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000067},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000067},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000067},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000067},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000067},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000067},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000003},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000064},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000064},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000064},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000064},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000064},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000064},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000064},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000064},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000004},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000061},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000061},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000061},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000061},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000061},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000061},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000061},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000061},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000005},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005E},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005E},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005E},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005E},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005E},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005E},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005E},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005E},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000006},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AA},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000166},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000166},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000166},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000166},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000166},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000166},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005B},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005B},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005B},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005B},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000166},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000166},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000166},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000166},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000166},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000166},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005B},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005B},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005B},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005B},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001AA},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000007},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A7},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000163},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000163},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000163},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000163},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000163},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000163},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000058},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000058},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000058},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000058},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000163},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000163},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000163},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000163},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000163},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000163},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000058},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000058},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000058},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000058},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001A7},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000008},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E8},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E4},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E4},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E4},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E4},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E4},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E4},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000055},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000055},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000055},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000055},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E4},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E4},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E4},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E4},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E4},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E4},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000055},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000055},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000055},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000055},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000000E8},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000009},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E5},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E1},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E1},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E1},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E1},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E1},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E1},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E1},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E1},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E1},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E1},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E1},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E1},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000000E5},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000068},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000064},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000064},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000064},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000064},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000064},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000064},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004F},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004F},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004F},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004F},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000064},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000064},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000064},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000064},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000064},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000064},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004F},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004F},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004F},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004F},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000068},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000065},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000061},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000061},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000061},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000061},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000061},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000061},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000061},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000061},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000061},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000061},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000061},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000061},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000065},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000062},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005E},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005E},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005E},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005E},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005E},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005E},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005E},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005E},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005E},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005E},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005E},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005E},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000062},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005B},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005B},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005B},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005B},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005B},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005B},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005B},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005B},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005B},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005B},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005B},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005B},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000005F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000058},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000058},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000058},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000058},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000058},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000058},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000058},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000058},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000058},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000058},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000058},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000058},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000005C},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000059},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000055},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000055},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000055},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000055},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000055},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000055},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000055},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000055},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000055},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000055},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000055},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000055},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000059},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000010},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000056},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000056},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000011},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F5},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F5},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000070},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000070},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F5},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F5},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000070},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000070},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000012},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F2},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F2},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006D},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006D},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F2},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F2},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006D},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006D},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000013},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006A},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006A},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006A},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006A},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000014},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EC},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EC},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000067},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000067},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EC},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EC},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000067},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000067},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000015},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E9},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E9},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000064},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000064},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E9},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E9},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000064},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000064},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000016},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000061},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000061},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E4},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E6},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000061},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000061},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E4},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000017},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A9},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A5},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A5},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A6},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005E},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005E},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A5},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A5},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A6},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005E},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005E},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001A9},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000018},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A2},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A2},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A3},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005B},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005B},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A2},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A2},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A3},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005B},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005B},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001A6},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000019},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E5},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E2},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E2},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E3},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000058},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000058},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E2},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E2},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E3},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000058},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000058},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000000E5},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000001A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E2},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000055},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000055},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000055},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000055},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000000E2},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000001B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DC},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DC},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DC},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DC},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000000DF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000001C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000065},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000062},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000062},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004F},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004F},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000062},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000062},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004F},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004F},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000065},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000001D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000062},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005F},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005F},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000062},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000001E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000049},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000049},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005C},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005C},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000049},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000049},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000005F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000001F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000059},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000059},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000046},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000046},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000059},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000059},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000046},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000046},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000005C},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000020},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000059},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000056},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000056},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000043},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000043},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000056},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000056},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000043},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000043},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000059},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000021},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000056},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000053},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000053},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000040},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000040},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000053},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000053},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000040},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000040},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000056},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000022},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000070},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000070},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000070},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000070},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000023},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006D},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006D},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006D},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006D},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000024},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006A},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006A},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006A},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006A},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000025},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000067},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000067},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000067},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000067},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000026},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000064},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000064},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000064},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000064},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000027},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E4},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E4},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000061},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000061},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E4},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E4},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E4},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000061},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000061},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E4},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000028},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A9},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A3},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A3},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A6},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005E},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005E},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A3},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A3},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A6},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005E},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005E},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001A9},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000029},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A0},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A0},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A3},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005B},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005B},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A0},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A0},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A3},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005B},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005B},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001A6},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E5},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E3},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000058},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000058},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E3},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000058},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000058},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000000E5},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E2},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000055},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000055},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000055},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000055},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000000E2},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DA},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DA},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DA},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DA},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000000DF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000065},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004F},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004F},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004F},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004F},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000065},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000062},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000062},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000049},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000049},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000049},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000049},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000005F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000030},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000046},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000046},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000046},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000046},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000005C},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000031},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000059},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000043},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000043},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000043},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000043},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000059},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000032},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000056},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000040},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000040},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000040},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000040},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000056},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000033},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000070},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000070},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000070},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000070},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000034},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006D},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006D},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006D},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006D},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000035},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006A},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006A},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006A},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006A},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000036},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000067},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000067},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000067},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000067},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000037},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000064},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000064},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000064},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000064},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000038},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E4},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E4},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000061},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000061},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E4},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E4},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E4},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000061},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000061},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E4},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000039},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A9},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A3},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A3},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A6},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005E},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005E},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A3},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A3},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A6},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005E},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005E},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001A9},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000003A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A0},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A0},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A3},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005B},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005B},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A0},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A0},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A3},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005B},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005B},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001A6},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000003B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E5},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E3},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000058},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000058},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E3},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000058},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000058},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000000E5},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000003C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E2},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000055},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000055},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000055},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000055},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000000E2},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000003D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DA},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DA},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DA},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DA},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000052},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000000DF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000003E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000065},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004F},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004F},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004F},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004F},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000065},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000003F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000062},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004C},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000062},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000040},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000049},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000049},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000049},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000049},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000005F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000041},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000046},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000046},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000046},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000046},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000005C},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000042},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000059},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000043},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000043},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000043},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000043},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000059},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000043},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000056},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000040},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000040},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000040},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000040},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000056},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000044},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FC},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FC},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FC},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FC},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000078},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000078},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FC},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FC},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FC},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000078},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000078},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FC},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000045},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000075},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000075},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000075},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000075},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000046},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000072},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000072},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000072},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000072},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000047},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006F},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006F},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006F},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006F},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000048},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000049},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E4},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E4},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000004A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AC},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AC},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AC},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AC},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AC},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AC},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AC},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001AC},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000004B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AA},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AA},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AA},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AA},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AA},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AA},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AA},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001AA},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000004C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A7},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A7},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A7},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A7},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A7},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001A7},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000004D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000004E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000004F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000050},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000051},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000052},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004E},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004E},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004E},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004E},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000053},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004B},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004B},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004B},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004B},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000054},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000048},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000048},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000048},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000048},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000055},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FC},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FC},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FC},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FC},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000078},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000078},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FC},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FC},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FC},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000078},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000078},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FC},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000056},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000075},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000075},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000075},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000075},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000057},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000072},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000072},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000072},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000072},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000058},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006F},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006F},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006F},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006F},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000059},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000005A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E4},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E4},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000005B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AC},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AC},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AC},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AC},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AC},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AC},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AC},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001AC},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000005C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AA},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AA},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AA},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AA},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AA},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AA},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AA},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001AA},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000005D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A7},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A7},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A7},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A7},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A7},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001A7},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000005E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000005F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000060},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000061},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000062},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000063},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004E},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004E},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004E},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004E},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000064},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004B},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004B},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004B},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004B},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000065},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000048},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000048},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000048},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000048},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000066},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FC},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FC},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FC},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FC},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000078},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000078},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FC},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FC},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FC},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000078},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000078},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FC},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000067},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000075},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000075},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000075},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000075},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000068},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000072},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000072},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000072},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000072},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000069},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006F},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006F},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006F},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006F},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000006A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000006B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E4},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E4},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000006C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AC},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AC},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AC},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AC},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AC},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AC},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AC},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001AC},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000006D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AA},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AA},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AA},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AA},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AA},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AA},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AA},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001AA},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000006E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A7},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A7},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A7},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A7},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A7},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001A7},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000006F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000070},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000071},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000072},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000073},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000074},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004E},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004E},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004E},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004E},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000075},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004B},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004B},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004B},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004B},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000076},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000048},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000048},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000048},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000048},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000077},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FC},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FC},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FC},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FC},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000078},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000078},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FC},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FC},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FC},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000078},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000078},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FC},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000078},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000075},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000075},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000075},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000075},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001F9},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000079},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000072},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000072},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000072},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000072},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001F6},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000007A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006F},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006F},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006F},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006F},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001EA},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001F3},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000007B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E7},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001F0},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000007C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E4},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001E4},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001ED},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000007D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AC},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AC},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AC},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AC},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AC},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AC},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AC},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001AC},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000007E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AA},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AA},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AA},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AA},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AA},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AA},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001AA},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001AA},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000007F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A7},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A7},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A7},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A7},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A7},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000E0},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001A7},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000080},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000DD},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001A4},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000081},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000006C},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000082},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000069},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000083},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000066},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000084},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000063},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000085},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004E},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004E},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004E},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004E},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000057},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000060},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000086},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004B},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004B},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004B},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000004B},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000054},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000005D},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000087},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000048},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000048},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000048},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000048},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000051},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000005A},
+ {0xB0000000, 0x00000000},
+ {0x0EE, 0x00000000},
+ {0x0EE, 0x00004000},
+ {0x033, 0x00000000},
+ {0x03F, 0x00003BEF},
+ {0x033, 0x00000001},
+ {0x03F, 0x00003BE9},
+ {0x033, 0x00000002},
+ {0x03F, 0x00003BE3},
+ {0x033, 0x00000003},
+ {0x03F, 0x00003BDD},
+ {0x033, 0x00000004},
+ {0x03F, 0x00003BD7},
+ {0x033, 0x00000005},
+ {0x03F, 0x00003BD1},
+ {0x033, 0x00000006},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BD9},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003BCB},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003BCB},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003BCB},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003BCB},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003BCB},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003BCB},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BD7},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BD7},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BD7},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BD7},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003BCB},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003BCB},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003BCB},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003BCB},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003BCB},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00003BCB},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BD7},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BD7},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BD7},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BD7},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00001BD9},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000007},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BD3},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BD3},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BD3},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BD3},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BD3},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BD3},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BD3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BD1},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BD1},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BD1},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BD1},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BD3},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BD3},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BD3},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BD3},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BD3},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BD3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BD1},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BD1},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BD1},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BD1},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00001BD3},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000008},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BD9},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BCD},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BCD},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BCD},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BCD},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BCD},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BCD},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BD7},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BD7},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BD7},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BD7},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BCD},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BCD},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BCD},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BCD},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BCD},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00001BCD},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BD7},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BD7},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BD7},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BD7},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000BD9},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000009},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BD3},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BD3},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BD3},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BD3},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BD3},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BD3},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BD3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BD1},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BD1},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BD1},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BD1},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BD3},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BD3},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BD3},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BD3},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BD3},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BD3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BD1},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BD1},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BD1},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BD1},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000BD3},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D9},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BCD},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BCD},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BCD},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BCD},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BCD},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BCD},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D7},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D7},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D7},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D7},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BCD},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BCD},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BCD},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BCD},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BCD},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000BCD},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D7},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D7},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D7},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D7},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000009D9},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D1},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D1},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D1},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D1},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D1},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D1},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D1},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D1},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008D9},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008D7},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008D7},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008D7},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008D7},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008D7},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008D7},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008D7},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008D7},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000008D9},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008D3},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008D3},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008D3},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008D3},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008D3},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008D3},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008D3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008D1},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008D1},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008D1},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008D1},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008D3},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008D3},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008D3},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008D3},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008D3},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008D3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008D1},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008D1},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008D1},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008D1},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000008D3},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000859},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008CD},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008CD},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008CD},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008CD},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008CD},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008CD},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000857},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000857},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000857},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000857},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008CD},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008CD},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008CD},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008CD},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008CD},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008CD},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000857},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000857},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000857},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000857},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000859},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000851},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000851},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000851},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000851},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000851},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000851},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000851},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000851},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000010},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000819},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000084D},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000084D},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000084D},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000084D},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000084D},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000084D},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000817},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000817},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000817},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000817},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000084D},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000084D},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000084D},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000084D},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000084D},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000084D},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000817},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000817},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000817},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000817},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000819},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000011},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000811},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000811},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000811},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000811},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000811},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000811},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000811},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000811},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000012},
+ {0x03F, 0x000039EE},
+ {0x033, 0x00000013},
+ {0x03F, 0x000039E8},
+ {0x033, 0x00000014},
+ {0x03F, 0x000039E2},
+ {0x033, 0x00000015},
+ {0x03F, 0x000039DC},
+ {0x033, 0x00000016},
+ {0x03F, 0x000039D6},
+ {0x033, 0x00000017},
+ {0x03F, 0x000039D0},
+ {0x033, 0x00000018},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D8},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000019D8},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000019},
+ {0x03F, 0x000019D2},
+ {0x033, 0x0000001A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D8},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000009D8},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000001B},
+ {0x03F, 0x000009D2},
+ {0x033, 0x0000001C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008D9},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CC},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CC},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CC},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CC},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CC},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CC},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CC},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CC},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000008D9},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000001D},
+ {0x03F, 0x000008D3},
+ {0x033, 0x0000001E},
+ {0x03F, 0x000008CD},
+ {0x033, 0x0000001F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000020},
+ {0x03F, 0x0000084D},
+ {0x033, 0x00000021},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000022},
+ {0x03F, 0x0000080D},
+ {0x033, 0x00000023},
+ {0x03F, 0x00000807},
+ {0x033, 0x00000024},
+ {0x03F, 0x000039EE},
+ {0x033, 0x00000025},
+ {0x03F, 0x000039E8},
+ {0x033, 0x00000026},
+ {0x03F, 0x000039E2},
+ {0x033, 0x00000027},
+ {0x03F, 0x000039DC},
+ {0x033, 0x00000028},
+ {0x03F, 0x000039D6},
+ {0x033, 0x00000029},
+ {0x03F, 0x000039D0},
+ {0x033, 0x0000002A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D8},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000019D8},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002B},
+ {0x03F, 0x000019D2},
+ {0x033, 0x0000002C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D8},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000009D8},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002D},
+ {0x03F, 0x000009D2},
+ {0x033, 0x0000002E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008D9},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CC},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CC},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CC},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CC},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CC},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CC},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CC},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CC},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000008D9},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002F},
+ {0x03F, 0x000008D3},
+ {0x033, 0x00000030},
+ {0x03F, 0x000008CD},
+ {0x033, 0x00000031},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000032},
+ {0x03F, 0x0000084D},
+ {0x033, 0x00000033},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000034},
+ {0x03F, 0x0000080D},
+ {0x033, 0x00000035},
+ {0x03F, 0x00000807},
+ {0x033, 0x00000036},
+ {0x03F, 0x000039EE},
+ {0x033, 0x00000037},
+ {0x03F, 0x000039E8},
+ {0x033, 0x00000038},
+ {0x03F, 0x000039E2},
+ {0x033, 0x00000039},
+ {0x03F, 0x000039DC},
+ {0x033, 0x0000003A},
+ {0x03F, 0x000039D6},
+ {0x033, 0x0000003B},
+ {0x03F, 0x000039D0},
+ {0x033, 0x0000003C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D8},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000019D8},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000003D},
+ {0x03F, 0x000019D2},
+ {0x033, 0x0000003E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D8},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000009D8},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000003F},
+ {0x03F, 0x000009D2},
+ {0x033, 0x00000040},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008D9},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CC},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CC},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CC},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CC},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CC},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CC},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CC},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CC},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009CD},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000008D9},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000041},
+ {0x03F, 0x000008D3},
+ {0x033, 0x00000042},
+ {0x03F, 0x000008CD},
+ {0x033, 0x00000043},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000044},
+ {0x03F, 0x0000084D},
+ {0x033, 0x00000045},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000046},
+ {0x03F, 0x0000080D},
+ {0x033, 0x00000047},
+ {0x03F, 0x00000807},
+ {0x033, 0x00000048},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EE},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EE},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EE},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EE},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000049},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E8},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E8},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E8},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E8},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000004A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E2},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E2},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E2},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E2},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000004B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DC},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DC},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DC},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DC},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000004C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D6},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D6},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D6},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000004D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D0},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D0},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D0},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D0},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000004E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000004F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D2},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D2},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D2},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D2},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000050},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000051},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D2},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D2},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D2},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D2},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000052},
+ {0x03F, 0x000009CD},
+ {0x033, 0x00000053},
+ {0x03F, 0x000008D3},
+ {0x033, 0x00000054},
+ {0x03F, 0x000008CD},
+ {0x033, 0x00000055},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000056},
+ {0x03F, 0x0000084D},
+ {0x033, 0x00000057},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000058},
+ {0x03F, 0x0000080D},
+ {0x033, 0x00000059},
+ {0x03F, 0x00000807},
+ {0x033, 0x0000005A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EE},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EE},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EE},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EE},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000005B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E8},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E8},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E8},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E8},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000005C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E2},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E2},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E2},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E2},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000005D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DC},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DC},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DC},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DC},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000005E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D6},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D6},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D6},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000005F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D0},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D0},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D0},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D0},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000060},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000061},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D2},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D2},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D2},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D2},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000062},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000063},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D2},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D2},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D2},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D2},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000064},
+ {0x03F, 0x000009CD},
+ {0x033, 0x00000065},
+ {0x03F, 0x000008D3},
+ {0x033, 0x00000066},
+ {0x03F, 0x000008CD},
+ {0x033, 0x00000067},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000068},
+ {0x03F, 0x0000084D},
+ {0x033, 0x00000069},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000006A},
+ {0x03F, 0x0000080D},
+ {0x033, 0x0000006B},
+ {0x03F, 0x00000807},
+ {0x033, 0x0000006C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EE},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EE},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EE},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EE},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000006D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E8},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E8},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E8},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E8},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000006E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E2},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E2},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E2},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E2},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000006F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DC},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DC},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DC},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DC},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000070},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D6},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D6},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D6},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000071},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D0},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D0},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D0},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D0},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000072},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000073},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D2},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D2},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D2},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D2},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000074},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000075},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D2},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D2},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D2},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D2},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000076},
+ {0x03F, 0x000009CD},
+ {0x033, 0x00000077},
+ {0x03F, 0x000008D3},
+ {0x033, 0x00000078},
+ {0x03F, 0x000008CD},
+ {0x033, 0x00000079},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000007A},
+ {0x03F, 0x0000084D},
+ {0x033, 0x0000007B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000007C},
+ {0x03F, 0x0000080D},
+ {0x033, 0x0000007D},
+ {0x03F, 0x00000807},
+ {0x033, 0x0000007E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EE},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EE},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EE},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EE},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000039EF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000007F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E8},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E8},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E8},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E8},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000039E9},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000080},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E2},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E2},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E2},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E2},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000039E3},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000081},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DC},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DC},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DC},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DC},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000039DD},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000082},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D6},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D6},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D6},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D6},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000039D7},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000083},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D0},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D0},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D0},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D0},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000039D1},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000084},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CA},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000039CB},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000085},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D2},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D2},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D2},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D2},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000019D3},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000086},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CC},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000019CD},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000087},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D2},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D2},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D2},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D2},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000009D3},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000088},
+ {0x03F, 0x000009CD},
+ {0x033, 0x00000089},
+ {0x03F, 0x000008D3},
+ {0x033, 0x0000008A},
+ {0x03F, 0x000008CD},
+ {0x033, 0x0000008B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000008C7},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000853},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000008C},
+ {0x03F, 0x0000084D},
+ {0x033, 0x0000008D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000847},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x00000813},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000008E},
+ {0x03F, 0x0000080D},
+ {0x033, 0x0000008F},
+ {0x03F, 0x00000807},
+ {0x0EE, 0x00000000},
+ {0x0EF, 0x00080000},
+ {0x033, 0x00000007},
+ {0x03E, 0x00000001},
+ {0x03F, 0x00020F3C},
+ {0x0EF, 0x00000000},
+ {0x0EF, 0x00080000},
+ {0x033, 0x0000000C},
+ {0x03E, 0x00000001},
+ {0x03F, 0x000305BC},
+ {0x0EF, 0x00000000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EC, 0x00000001},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EC, 0x00000000},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EC, 0x00000000},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EC, 0x00000000},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EC, 0x00000000},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EC, 0x00000000},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EC, 0x00000000},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EC, 0x00000000},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EC, 0x00000000},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EC, 0x00000000},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EC, 0x00000000},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EC, 0x00000000},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EC, 0x00000000},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EC, 0x00000000},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EC, 0x00000000},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EC, 0x00000000},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EC, 0x00000000},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EC, 0x00000000},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EC, 0x00000000},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EC, 0x00000000},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0EC, 0x00000000},
+ {0xA0000000, 0x00000000},
+ {0x0EC, 0x00000001},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000003},
+ {0x03C, 0x00000020},
+ {0x03D, 0x00000078},
+ {0x03E, 0x00080000},
+ {0x03F, 0x00001999},
+ {0x0EC, 0x00000000},
+ {0x02F, 0x0002260D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0DE, 0x00000001},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0DE, 0x00000000},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0DE, 0x00000000},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0DE, 0x00000000},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0DE, 0x00000000},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0DE, 0x00000000},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0DE, 0x00000000},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0DE, 0x00000000},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0DE, 0x00000000},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0DE, 0x00000000},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0DE, 0x00000000},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0DE, 0x00000000},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0DE, 0x00000000},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0DE, 0x00000000},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0DE, 0x00000000},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0DE, 0x00000000},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0DE, 0x00000000},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0DE, 0x00000000},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0DE, 0x00000000},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0DE, 0x00000000},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x0DE, 0x00000000},
+ {0xA0000000, 0x00000000},
+ {0x0DE, 0x00000001},
+ {0xB0000000, 0x00000000},
+ {0x0EF, 0x00000002},
+ {0x033, 0x00000000},
+ {0x03F, 0x00000002},
+ {0x033, 0x00000001},
+ {0x03F, 0x00000002},
+ {0x0EF, 0x00000000},
+ {0x0EF, 0x00000400},
+ {0x033, 0x00000000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000001},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000002},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000003},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000004},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000005},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000006},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000007},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000008},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000009},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000000F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000010},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000011},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000012},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000013},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000014},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000015},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000016},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000017},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000018},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000019},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000001A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000001B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000001C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000001D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000001E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000001F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000020},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000021},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000022},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000017F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000017F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000023},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000017F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000017F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000024},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000017F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000017F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000025},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000017F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000017F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000026},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000017F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000017F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000027},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000017F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000017F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000028},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000000FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000029},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000000FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000000FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000000FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000000FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000013F},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000000FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000030},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000031},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000032},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000033},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000034},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000035},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000036},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000037},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000038},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000039},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000003A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000003B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000003C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000003D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000003E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000003F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FB},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FA},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xB0000000, 0x00000000},
+ {0x0EF, 0x00000000},
+ {0x0EF, 0x00000200},
+ {0x033, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000001},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000002},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000003},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000004},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000005},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000006},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000007},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000008},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000009},
+ {0x03F, 0x000001FF},
+ {0x033, 0x0000000A},
+ {0x03F, 0x000001FF},
+ {0x033, 0x0000000B},
+ {0x03F, 0x000001FF},
+ {0x033, 0x0000000C},
+ {0x03F, 0x000001FF},
+ {0x033, 0x0000000D},
+ {0x03F, 0x000001FF},
+ {0x033, 0x0000000E},
+ {0x03F, 0x000001FF},
+ {0x033, 0x0000000F},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000010},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000011},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000012},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000013},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000014},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000015},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000016},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000017},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000018},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000019},
+ {0x03F, 0x000001FF},
+ {0x033, 0x0000001A},
+ {0x03F, 0x000001FF},
+ {0x033, 0x0000001B},
+ {0x03F, 0x000001FF},
+ {0x033, 0x0000001C},
+ {0x03F, 0x000001FF},
+ {0x033, 0x0000001D},
+ {0x03F, 0x000001FF},
+ {0x033, 0x0000001E},
+ {0x03F, 0x000001FF},
+ {0x033, 0x0000001F},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000020},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000021},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000022},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000023},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000024},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000025},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000026},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000027},
+ {0x03F, 0x000001FF},
+ {0x033, 0x00000028},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000000FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000029},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000000FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000000FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000000FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000000FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000000FF},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x000001FF},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x000000FF},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003B},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003B},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000002F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003B},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003B},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000030},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003B},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003B},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000031},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003B},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003B},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000032},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003B},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003B},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000033},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003B},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003B},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000034},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003B},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003B},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000035},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003B},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003B},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000036},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003B},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003B},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000037},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003B},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003B},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000038},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003B},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003B},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x00000039},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003B},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003B},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000003A},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003B},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003B},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000003B},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003B},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003B},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000003C},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003B},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003B},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000003D},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003B},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003B},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000003E},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003B},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003B},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xB0000000, 0x00000000},
+ {0x033, 0x0000003F},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003B},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003B},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xA0000000, 0x00000000},
+ {0x03F, 0x0000003F},
+ {0xB0000000, 0x00000000},
+ {0x0EF, 0x00000000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06E, 0x00077A7C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06E, 0x00077A7C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06E, 0x00077A7C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06E, 0x00077A7C},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06E, 0x00077A7C},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06E, 0x00077A7C},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06E, 0x00077A7C},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06E, 0x00067A7C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06E, 0x00067A7C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06E, 0x00067A7C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06E, 0x00067A7C},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06E, 0x00077A7C},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06E, 0x00077A7C},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06E, 0x00077A7C},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06E, 0x00077A7C},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06E, 0x00077A7C},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06E, 0x00077A7C},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06E, 0x00067A7C},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06E, 0x00067A7C},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06E, 0x00067A7C},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06E, 0x00067A7C},
+ {0xA0000000, 0x00000000},
+ {0x06E, 0x00077A7C},
+ {0xB0000000, 0x00000000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06F, 0x00077A7C},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06F, 0x00077A7C},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06F, 0x00077A7C},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06F, 0x00077A7C},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06F, 0x00077A7C},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06F, 0x00077A7C},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06F, 0x00077A7C},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06F, 0x00067A7C},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06F, 0x00067A7C},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06F, 0x00067A7C},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06F, 0x00067A7C},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06F, 0x00077A7C},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06F, 0x00077A7C},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06F, 0x00077A7C},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06F, 0x00077A7C},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06F, 0x00077A7C},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06F, 0x00077A7C},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06F, 0x00067A7C},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06F, 0x00067A7C},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06F, 0x00067A7C},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x06F, 0x00067A7C},
+ {0xA0000000, 0x00000000},
+ {0x06F, 0x00077A7C},
+ {0xB0000000, 0x00000000},
+ {0x06D, 0x00000C31},
+ {0x0EF, 0x00020000},
+ {0x033, 0x00000000},
+ {0x03F, 0x000005FF},
+ {0x0EF, 0x00000000},
+ {0x0A0, 0x00000043},
+ {0x005, 0x00000001},
+ {0x0EF, 0x00080000},
+ {0x033, 0x00000001},
+ {0x03E, 0x00000001},
+ {0x03F, 0x00022020},
+ {0x0EF, 0x00000000},
+ {0x80010000, 0x00000000}, {0x40000000, 0x00000000},
+ {0x087, 0x00000427},
+ {0x90010001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x087, 0x00000427},
+ {0x90020001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x087, 0x00000427},
+ {0x90030001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x087, 0x00000427},
+ {0x90250001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x087, 0x00000427},
+ {0x90260001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x087, 0x00000427},
+ {0x90320001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x087, 0x0000042F},
+ {0x90330001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x087, 0x0000042F},
+ {0x90340001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x087, 0x0000042F},
+ {0x90350001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x087, 0x0000042F},
+ {0x90360001, 0x00000000}, {0x40000000, 0x00000000},
+ {0x087, 0x0000042F},
+ {0x90010002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x087, 0x00000427},
+ {0x90020002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x087, 0x00000427},
+ {0x90030002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x087, 0x00000427},
+ {0x90250002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x087, 0x00000427},
+ {0x90260002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x087, 0x00000427},
+ {0x90320002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x087, 0x0000042F},
+ {0x90330002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x087, 0x0000042F},
+ {0x90340002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x087, 0x0000042F},
+ {0x90350002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x087, 0x0000042F},
+ {0x90360002, 0x00000000}, {0x40000000, 0x00000000},
+ {0x087, 0x0000042F},
+ {0xA0000000, 0x00000000},
+ {0x087, 0x00000427},
+ {0xB0000000, 0x00000000},
+ {0x002, 0x00000000},
+ {0x067, 0x00000052},
+
+};
+
+static const struct rtw89_reg2_def rtw89_8852a_phy_nctl_regs[] = {
+ {0x8000, 0x00000008},
+ {0x8008, 0x00000000},
+ {0x8004, 0xf0862966},
+ {0x800c, 0x78000000},
+ {0x8010, 0x88015000},
+ {0x8014, 0x80010100},
+ {0x8018, 0x10010100},
+ {0x801c, 0xa210bc00},
+ {0x8020, 0x000403e0},
+ {0x8024, 0x00072160},
+ {0x8028, 0x00180e00},
+ {0x8030, 0x400000c0},
+ {0x8034, 0x56000800},
+ {0x8038, 0x00000009},
+ {0x803c, 0x00000008},
+ {0x8040, 0x00000046},
+ {0x8044, 0x0010001f},
+ {0x8048, 0xf0000003},
+ {0x804c, 0x62ac6162},
+ {0x8050, 0xf2acf162},
+ {0x8054, 0x62ac6162},
+ {0x8058, 0xf2acf162},
+ {0x805c, 0x150c0b02},
+ {0x8060, 0x150c0b02},
+ {0x8064, 0x2aa00047},
+ {0x8074, 0x80000000},
+ {0x807c, 0x000000ee},
+ {0x8088, 0x80000000},
+ {0x8098, 0x0000ff00},
+ {0x809c, 0x0000001f},
+ {0x80a0, 0x00010300},
+ {0x80b0, 0x00000000},
+ {0x80d0, 0x00000000},
+ {0x8114, 0x00000000},
+ {0x8120, 0x10010000},
+ {0x8124, 0x00000000},
+ {0x812c, 0x0000c000},
+ {0x8138, 0x40000002},
+ {0x813c, 0x40000002},
+ {0x8140, 0x00000000},
+ {0x8144, 0x0b040b03},
+ {0x8148, 0x0b040b04},
+ {0x814c, 0x0b040b03},
+ {0x8150, 0x00000000},
+ {0x8158, 0xffffffff},
+ {0x815c, 0xffffffff},
+ {0x8160, 0xffffffff},
+ {0x8164, 0xffffffff},
+ {0x8168, 0xffffffff},
+ {0x816c, 0x1fffffff},
+ {0x81ac, 0x003f1a00},
+ {0x81b0, 0x003f1a00},
+ {0x81bc, 0x005b5b5b},
+ {0x81c0, 0x005b5b5b},
+ {0x81b4, 0x00600060},
+ {0x81b8, 0x00600060},
+ {0x81cc, 0x00000000},
+ {0x81dc, 0x00000002},
+ {0x8214, 0x00000000},
+ {0x8220, 0x10010000},
+ {0x8224, 0x00000000},
+ {0x822c, 0x0000d000},
+ {0x8238, 0x40000002},
+ {0x823c, 0x40000002},
+ {0x8240, 0x00000000},
+ {0x8244, 0x0b040b03},
+ {0x8248, 0x0b040b03},
+ {0x824c, 0x0b030b03},
+ {0x8250, 0x00000000},
+ {0x8258, 0xffffffff},
+ {0x825c, 0xffffffff},
+ {0x8260, 0xffffffff},
+ {0x8264, 0xffffffff},
+ {0x8268, 0xffffffff},
+ {0x826c, 0x1fffffff},
+ {0x82ac, 0x003f1a00},
+ {0x82b0, 0x003f1a00},
+ {0x82bc, 0x005b5b5b},
+ {0x82c0, 0x005b5b5b},
+ {0x82b4, 0x00600060},
+ {0x82b8, 0x00600060},
+ {0x82cc, 0x00000000},
+ {0x82dc, 0x00000002},
+ {0x81d8, 0x00000001},
+ {0x82d8, 0x00000001},
+ {0x8d00, 0x00000000},
+ {0x8d04, 0x00000000},
+ {0x8d08, 0x00000000},
+ {0x8d0c, 0x00000000},
+ {0x8d10, 0x00000000},
+ {0x8d14, 0x00000000},
+ {0x8d18, 0x00000000},
+ {0x8d1c, 0x00000000},
+ {0x8d20, 0x00000000},
+ {0x8d24, 0x00000000},
+ {0x8d28, 0x00000000},
+ {0x8d2c, 0x00000000},
+ {0x8d30, 0x00000000},
+ {0x8d34, 0x00000000},
+ {0x8d38, 0x00000000},
+ {0x8d3c, 0x00000000},
+ {0x8d40, 0x00000000},
+ {0x8d44, 0x00000000},
+ {0x8d48, 0x00000000},
+ {0x8d4c, 0x00000000},
+ {0x8d50, 0x00000000},
+ {0x8d54, 0x00000000},
+ {0x8d58, 0x00000000},
+ {0x8d5c, 0x00000000},
+ {0x8d60, 0x00000000},
+ {0x8d64, 0x00000000},
+ {0x8d68, 0x00000000},
+ {0x8d6c, 0x00000000},
+ {0x8d70, 0x00000000},
+ {0x8d74, 0x00000000},
+ {0x8d78, 0x00000000},
+ {0x8d7c, 0x00000000},
+ {0x8d80, 0x00000000},
+ {0x8d84, 0x00000000},
+ {0x8d88, 0x00000000},
+ {0x8d8c, 0x00000000},
+ {0x8d90, 0x00000000},
+ {0x8d94, 0x00000000},
+ {0x8d98, 0x00000000},
+ {0x8d9c, 0x00000000},
+ {0x8da0, 0x00000000},
+ {0x8da4, 0x00000000},
+ {0x8da8, 0x00000000},
+ {0x8dac, 0x00000000},
+ {0x8db0, 0x00000000},
+ {0x8db4, 0x00000000},
+ {0x8db8, 0x00000000},
+ {0x8dbc, 0x00000000},
+ {0x8dc0, 0x00000000},
+ {0x8dc4, 0x00000000},
+ {0x8dc8, 0x00000000},
+ {0x8dcc, 0x00000000},
+ {0x8dd0, 0x00000000},
+ {0x8dd4, 0x00000000},
+ {0x8dd8, 0x00000000},
+ {0x8ddc, 0x00000000},
+ {0x8de0, 0x00000000},
+ {0x8de4, 0x00000000},
+ {0x8de8, 0x00000000},
+ {0x8dec, 0x00000000},
+ {0x8df0, 0x00000000},
+ {0x8df4, 0x00000000},
+ {0x8df8, 0x00000000},
+ {0x8dfc, 0x00000000},
+ {0x8e00, 0x00000000},
+ {0x8e04, 0x00000000},
+ {0x8e08, 0x00000000},
+ {0x8e0c, 0x00000000},
+ {0x8e10, 0x00000000},
+ {0x8e14, 0x00000000},
+ {0x8e18, 0x00000000},
+ {0x8e1c, 0x00000000},
+ {0x8e20, 0x00000000},
+ {0x8e24, 0x00000000},
+ {0x8e28, 0x00000000},
+ {0x8e2c, 0x00000000},
+ {0x8e30, 0x00000000},
+ {0x8e34, 0x00000000},
+ {0x8e38, 0x00000000},
+ {0x8e3c, 0x00000000},
+ {0x8e40, 0x00000000},
+ {0x8e44, 0x00000000},
+ {0x8e48, 0x00000000},
+ {0x8e4c, 0x00000000},
+ {0x8e50, 0x00000000},
+ {0x8e54, 0x00000000},
+ {0x8e58, 0x00000000},
+ {0x8e5c, 0x00000000},
+ {0x8e60, 0x00000000},
+ {0x8e64, 0x00000000},
+ {0x8e68, 0x00000000},
+ {0x8e6c, 0x00000000},
+ {0x8e70, 0x00000000},
+ {0x8e74, 0x00000000},
+ {0x8e78, 0x00000000},
+ {0x8e7c, 0x00000000},
+ {0x8e80, 0x00000000},
+ {0x8e84, 0x00000000},
+ {0x8e88, 0x00000000},
+ {0x8e8c, 0x00000000},
+ {0x8e90, 0x00000000},
+ {0x8e94, 0x00000000},
+ {0x8e98, 0x00000000},
+ {0x8e9c, 0x00000000},
+ {0x8ea0, 0x00000000},
+ {0x8ea4, 0x00000000},
+ {0x8ea8, 0x00000000},
+ {0x8eac, 0x00000000},
+ {0x8eb0, 0x00000000},
+ {0x8eb4, 0x00000000},
+ {0x8eb8, 0x00000000},
+ {0x8ebc, 0x00000000},
+ {0x8ec0, 0x00000000},
+ {0x8ec4, 0x00000000},
+ {0x8ec8, 0x00000000},
+ {0x8ecc, 0x00000000},
+ {0x8ed0, 0x00000000},
+ {0x8ed4, 0x00000000},
+ {0x8ed8, 0x00000000},
+ {0x8edc, 0x00000000},
+ {0x8ee0, 0x00000000},
+ {0x8ee4, 0x00000000},
+ {0x8ee8, 0x00000000},
+ {0x8eec, 0x00000000},
+ {0x8ef0, 0x00000000},
+ {0x8ef4, 0x00000000},
+ {0x8ef8, 0x00000000},
+ {0x8efc, 0x00000000},
+ {0x8f00, 0x00000000},
+ {0x8f04, 0x00000000},
+ {0x8f08, 0x00000000},
+ {0x8f0c, 0x00000000},
+ {0x8f10, 0x00000000},
+ {0x8f14, 0x00000000},
+ {0x8f18, 0x00000000},
+ {0x8f1c, 0x00000000},
+ {0x8f20, 0x00000000},
+ {0x8f24, 0x00000000},
+ {0x8f28, 0x00000000},
+ {0x8f2c, 0x00000000},
+ {0x8f30, 0x00000000},
+ {0x8f34, 0x00000000},
+ {0x8f38, 0x00000000},
+ {0x8f3c, 0x00000000},
+ {0x8f40, 0x00000000},
+ {0x8f44, 0x00000000},
+ {0x8f48, 0x00000000},
+ {0x8f4c, 0x00000000},
+ {0x8f50, 0x00000000},
+ {0x8f54, 0x00000000},
+ {0x8f58, 0x00000000},
+ {0x8f5c, 0x00000000},
+ {0x8f60, 0x00000000},
+ {0x8f64, 0x00000000},
+ {0x8f68, 0x00000000},
+ {0x8f6c, 0x00000000},
+ {0x8f70, 0x00000000},
+ {0x8f74, 0x00000000},
+ {0x8f78, 0x00000000},
+ {0x8f7c, 0x00000000},
+ {0x8f80, 0x00000000},
+ {0x8f84, 0x00000000},
+ {0x8f88, 0x00000000},
+ {0x8f8c, 0x00000000},
+ {0x8f90, 0x00000000},
+ {0x8f94, 0x00000000},
+ {0x8f98, 0x00000000},
+ {0x8f9c, 0x00000000},
+ {0x8fa0, 0x00000000},
+ {0x8fa4, 0x00000000},
+ {0x8fa8, 0x00000000},
+ {0x8fac, 0x00000000},
+ {0x8fb0, 0x00000000},
+ {0x8fb4, 0x00000000},
+ {0x8fb8, 0x00000000},
+ {0x8fbc, 0x00000000},
+ {0x8fc0, 0x00000000},
+ {0x8fc4, 0x00000000},
+ {0x8fc8, 0x00000000},
+ {0x8fcc, 0x00000000},
+ {0x8fd0, 0x00000000},
+ {0x8fd4, 0x00000000},
+ {0x8fd8, 0x00000000},
+ {0x8fdc, 0x00000000},
+ {0x8fe0, 0x00000000},
+ {0x8fe4, 0x00000000},
+ {0x8fe8, 0x00000000},
+ {0x8fec, 0x00000000},
+ {0x8ff0, 0x00000000},
+ {0x8ff4, 0x00000000},
+ {0x8ff8, 0x00000000},
+ {0x8ffc, 0x00000000},
+ {0x9000, 0x00000000},
+ {0x9004, 0x00000000},
+ {0x9008, 0x00000000},
+ {0x900c, 0x00000000},
+ {0x9010, 0x00000000},
+ {0x9014, 0x00000000},
+ {0x9018, 0x00000000},
+ {0x901c, 0x00000000},
+ {0x9020, 0x00000000},
+ {0x9024, 0x00000000},
+ {0x9028, 0x00000000},
+ {0x902c, 0x00000000},
+ {0x9030, 0x00000000},
+ {0x9034, 0x00000000},
+ {0x9038, 0x00000000},
+ {0x903c, 0x00000000},
+ {0x9040, 0x00000000},
+ {0x9044, 0x00000000},
+ {0x9048, 0x00000000},
+ {0x904c, 0x00000000},
+ {0x9050, 0x00000000},
+ {0x9054, 0x00000000},
+ {0x9058, 0x00000000},
+ {0x905c, 0x00000000},
+ {0x9060, 0x00000000},
+ {0x9064, 0x00000000},
+ {0x9068, 0x00000000},
+ {0x906c, 0x00000000},
+ {0x9070, 0x00000000},
+ {0x9074, 0x00000000},
+ {0x9078, 0x00000000},
+ {0x907c, 0x00000000},
+ {0x9080, 0x00000000},
+ {0x9084, 0x00000000},
+ {0x9088, 0x00000000},
+ {0x908c, 0x00000000},
+ {0x9090, 0x00000000},
+ {0x9094, 0x00000000},
+ {0x9098, 0x00000000},
+ {0x909c, 0x00000000},
+ {0x90a0, 0x00000000},
+ {0x90a4, 0x00000000},
+ {0x90a8, 0x00000000},
+ {0x90ac, 0x00000000},
+ {0x90b0, 0x00000000},
+ {0x90b4, 0x00000000},
+ {0x90b8, 0x00000000},
+ {0x90bc, 0x00000000},
+ {0x9100, 0x00000000},
+ {0x9104, 0x00000000},
+ {0x9108, 0x00000000},
+ {0x910c, 0x00000000},
+ {0x9110, 0x00000000},
+ {0x9114, 0x00000000},
+ {0x9118, 0x00000000},
+ {0x911c, 0x00000000},
+ {0x9120, 0x00000000},
+ {0x9124, 0x00000000},
+ {0x9128, 0x00000000},
+ {0x912c, 0x00000000},
+ {0x9130, 0x00000000},
+ {0x9134, 0x00000000},
+ {0x9138, 0x00000000},
+ {0x913c, 0x00000000},
+ {0x9140, 0x00000000},
+ {0x9144, 0x00000000},
+ {0x9148, 0x00000000},
+ {0x914c, 0x00000000},
+ {0x9150, 0x00000000},
+ {0x9154, 0x00000000},
+ {0x9158, 0x00000000},
+ {0x915c, 0x00000000},
+ {0x9160, 0x00000000},
+ {0x9164, 0x00000000},
+ {0x9168, 0x00000000},
+ {0x916c, 0x00000000},
+ {0x9170, 0x00000000},
+ {0x9174, 0x00000000},
+ {0x9178, 0x00000000},
+ {0x917c, 0x00000000},
+ {0x9180, 0x00000000},
+ {0x9184, 0x00000000},
+ {0x9188, 0x00000000},
+ {0x918c, 0x00000000},
+ {0x9190, 0x00000000},
+ {0x9194, 0x00000000},
+ {0x9198, 0x00000000},
+ {0x919c, 0x00000000},
+ {0x91a0, 0x00000000},
+ {0x91a4, 0x00000000},
+ {0x91a8, 0x00000000},
+ {0x91ac, 0x00000000},
+ {0x91b0, 0x00000000},
+ {0x91b4, 0x00000000},
+ {0x91b8, 0x00000000},
+ {0x91bc, 0x00000000},
+ {0x91c0, 0x00000000},
+ {0x91c4, 0x00000000},
+ {0x91c8, 0x00000000},
+ {0x91cc, 0x00000000},
+ {0x91d0, 0x00000000},
+ {0x91d4, 0x00000000},
+ {0x91d8, 0x00000000},
+ {0x91dc, 0x00000000},
+ {0x91e0, 0x00000000},
+ {0x91e4, 0x00000000},
+ {0x91e8, 0x00000000},
+ {0x91ec, 0x00000000},
+ {0x91f0, 0x00000000},
+ {0x91f4, 0x00000000},
+ {0x91f8, 0x00000000},
+ {0x91fc, 0x00000000},
+ {0x9200, 0x00000000},
+ {0x9204, 0x00000000},
+ {0x9208, 0x00000000},
+ {0x920c, 0x00000000},
+ {0x9210, 0x00000000},
+ {0x9214, 0x00000000},
+ {0x9218, 0x00000000},
+ {0x921c, 0x00000000},
+ {0x9220, 0x00000000},
+ {0x9224, 0x00000000},
+ {0x9228, 0x00000000},
+ {0x922c, 0x00000000},
+ {0x9230, 0x00000000},
+ {0x9234, 0x00000000},
+ {0x9238, 0x00000000},
+ {0x923c, 0x00000000},
+ {0x9240, 0x00000000},
+ {0x9244, 0x00000000},
+ {0x9248, 0x00000000},
+ {0x924c, 0x00000000},
+ {0x9250, 0x00000000},
+ {0x9254, 0x00000000},
+ {0x9258, 0x00000000},
+ {0x925c, 0x00000000},
+ {0x9260, 0x00000000},
+ {0x9264, 0x00000000},
+ {0x9268, 0x00000000},
+ {0x926c, 0x00000000},
+ {0x9270, 0x00000000},
+ {0x9274, 0x00000000},
+ {0x9278, 0x00000000},
+ {0x927c, 0x00000000},
+ {0x9280, 0x00000000},
+ {0x9284, 0x00000000},
+ {0x9288, 0x00000000},
+ {0x928c, 0x00000000},
+ {0x9290, 0x00000000},
+ {0x9294, 0x00000000},
+ {0x9298, 0x00000000},
+ {0x929c, 0x00000000},
+ {0x92a0, 0x00000000},
+ {0x92a4, 0x00000000},
+ {0x92a8, 0x00000000},
+ {0x92ac, 0x00000000},
+ {0x92b0, 0x00000000},
+ {0x92b4, 0x00000000},
+ {0x92b8, 0x00000000},
+ {0x92bc, 0x00000000},
+ {0x92c0, 0x00000000},
+ {0x92c4, 0x00000000},
+ {0x92c8, 0x00000000},
+ {0x92cc, 0x00000000},
+ {0x92d0, 0x00000000},
+ {0x92d4, 0x00000000},
+ {0x92d8, 0x00000000},
+ {0x92dc, 0x00000000},
+ {0x92e0, 0x00000000},
+ {0x92e4, 0x00000000},
+ {0x92e8, 0x00000000},
+ {0x92ec, 0x00000000},
+ {0x92f0, 0x00000000},
+ {0x92f4, 0x00000000},
+ {0x92f8, 0x00000000},
+ {0x92fc, 0x00000000},
+ {0x9300, 0x00000000},
+ {0x9304, 0x00000000},
+ {0x9308, 0x00000000},
+ {0x930c, 0x00000000},
+ {0x9310, 0x00000000},
+ {0x9314, 0x00000000},
+ {0x9318, 0x00000000},
+ {0x931c, 0x00000000},
+ {0x9320, 0x00000000},
+ {0x9324, 0x00000000},
+ {0x9328, 0x00000000},
+ {0x932c, 0x00000000},
+ {0x9330, 0x00000000},
+ {0x9334, 0x00000000},
+ {0x9338, 0x00000000},
+ {0x933c, 0x00000000},
+ {0x9340, 0x00000000},
+ {0x9344, 0x00000000},
+ {0x9348, 0x00000000},
+ {0x934c, 0x00000000},
+ {0x9350, 0x00000000},
+ {0x9354, 0x00000000},
+ {0x9358, 0x00000000},
+ {0x935c, 0x00000000},
+ {0x9360, 0x00000000},
+ {0x9364, 0x00000000},
+ {0x9368, 0x00000000},
+ {0x936c, 0x00000000},
+ {0x9370, 0x00000000},
+ {0x9374, 0x00000000},
+ {0x9378, 0x00000000},
+ {0x937c, 0x00000000},
+ {0x9380, 0x00000000},
+ {0x9384, 0x00000000},
+ {0x9388, 0x00000000},
+ {0x938c, 0x00000000},
+ {0x9390, 0x00000000},
+ {0x9394, 0x00000000},
+ {0x9398, 0x00000000},
+ {0x939c, 0x00000000},
+ {0x93a0, 0x00000000},
+ {0x93a4, 0x00000000},
+ {0x93a8, 0x00000000},
+ {0x93ac, 0x00000000},
+ {0x93b0, 0x00000000},
+ {0x93b4, 0x00000000},
+ {0x93b8, 0x00000000},
+ {0x93bc, 0x00000000},
+ {0x93c0, 0x00000000},
+ {0x93c4, 0x00000000},
+ {0x93c8, 0x00000000},
+ {0x93cc, 0x00000000},
+ {0x93d0, 0x00000000},
+ {0x93d4, 0x00000000},
+ {0x93d8, 0x00000000},
+ {0x93dc, 0x00000000},
+ {0x93e0, 0x00000000},
+ {0x93e4, 0x00000000},
+ {0x93e8, 0x00000000},
+ {0x93ec, 0x00000000},
+ {0x93f0, 0x00000000},
+ {0x93f4, 0x00000000},
+ {0x93f8, 0x00000000},
+ {0x93fc, 0x00000000},
+ {0x9400, 0x00000000},
+ {0x9404, 0x00000000},
+ {0x9408, 0x00000000},
+ {0x940c, 0x00000000},
+ {0x9410, 0x00000000},
+ {0x9414, 0x00000000},
+ {0x9418, 0x00000000},
+ {0x941c, 0x00000000},
+ {0x9420, 0x00000000},
+ {0x9424, 0x00000000},
+ {0x9428, 0x00000000},
+ {0x942c, 0x00000000},
+ {0x9430, 0x00000000},
+ {0x9434, 0x00000000},
+ {0x9438, 0x00000000},
+ {0x943c, 0x00000000},
+ {0x9440, 0x00000000},
+ {0x9444, 0x00000000},
+ {0x9448, 0x00000000},
+ {0x944c, 0x00000000},
+ {0x9450, 0x00000000},
+ {0x9454, 0x00000000},
+ {0x9458, 0x00000000},
+ {0x945c, 0x00000000},
+ {0x9460, 0x00000000},
+ {0x9464, 0x00000000},
+ {0x9468, 0x00000000},
+ {0x946c, 0x00000000},
+ {0x9470, 0x00000000},
+ {0x9474, 0x00000000},
+ {0x9478, 0x00000000},
+ {0x947c, 0x00000000},
+ {0x9480, 0x00000000},
+ {0x9484, 0x00000000},
+ {0x9488, 0x00000000},
+ {0x948c, 0x00000000},
+ {0x9490, 0x00000000},
+ {0x9494, 0x00000000},
+ {0x9498, 0x00000000},
+ {0x949c, 0x00000000},
+ {0x94a0, 0x00000000},
+ {0x94a4, 0x00000000},
+ {0x94a8, 0x00000000},
+ {0x94ac, 0x00000000},
+ {0x94b0, 0x00000000},
+ {0x94b4, 0x00000000},
+ {0x94b8, 0x00000000},
+ {0x94bc, 0x00000000},
+ {0x81d8, 0x00000000},
+ {0x82d8, 0x00000000},
+ {0x9f04, 0x2b251f19},
+ {0x9f08, 0x433d3731},
+ {0x9f0c, 0x5b554f49},
+ {0x9f10, 0x736d6761},
+ {0x9f14, 0x7f7f7f79},
+ {0x9f18, 0x120f7f7f},
+ {0x9f1c, 0x1e1b1815},
+ {0x9f20, 0x2a272421},
+ {0x9f24, 0x3633302d},
+ {0x9f28, 0x3f3f3c39},
+ {0x9f2c, 0x3f3f3f3f},
+ {0x8088, 0x00000110},
+ {0x8000, 0x00000008},
+ {0x8080, 0x00000005},
+ {0x8500, 0x00060009},
+ {0x8504, 0x000418b0},
+ {0x8508, 0x00089c00},
+ {0x850c, 0x43000004},
+ {0x8510, 0x4b044a00},
+ {0x8514, 0x40098603},
+ {0x8518, 0x4b05e01f},
+ {0x851c, 0x400b8703},
+ {0x8520, 0x4b00e01f},
+ {0x8524, 0x43800004},
+ {0x8528, 0x4c000007},
+ {0x852c, 0x43000004},
+ {0x8530, 0x57007430},
+ {0x8534, 0x73000006},
+ {0x8538, 0x50550004},
+ {0x853c, 0xb4163000},
+ {0x8540, 0xe37ea510},
+ {0x8544, 0xf117f017},
+ {0x8548, 0xf317f217},
+ {0x854c, 0xf517f417},
+ {0x8550, 0xf717f617},
+ {0x8554, 0xf917f817},
+ {0x8558, 0xfb17fa17},
+ {0x855c, 0xfd17fc17},
+ {0x8560, 0xf117f017},
+ {0x8564, 0xf317f217},
+ {0x8568, 0xa503f417},
+ {0x856c, 0xf116f016},
+ {0x8570, 0x304e0001},
+ {0x8574, 0x30873053},
+ {0x8578, 0x30ab30a8},
+ {0x857c, 0x30b330ae},
+ {0x8580, 0x30ba30b6},
+ {0x8584, 0x30d430c7},
+ {0x8588, 0x310d3100},
+ {0x858c, 0x31ed3112},
+ {0x8590, 0x320a31f1},
+ {0x8594, 0x3243320b},
+ {0x8598, 0x31e631b1},
+ {0x859c, 0x5b00e283},
+ {0x85a0, 0xe2d15500},
+ {0x85a4, 0xe2830001},
+ {0x85a8, 0x5b10e2e3},
+ {0x85ac, 0x20987410},
+ {0x85b0, 0xe3750200},
+ {0x85b4, 0x00002080},
+ {0x85b8, 0x23f0e375},
+ {0x85bc, 0xe3750001},
+ {0x85c0, 0x000023f0},
+ {0x85c4, 0x5507e375},
+ {0x85c8, 0xe2d5e2d5},
+ {0x85cc, 0x20887410},
+ {0x85d0, 0xe3750200},
+ {0x85d4, 0x000123f0},
+ {0x85d8, 0x23f0e375},
+ {0x85dc, 0xe3750000},
+ {0x85e0, 0xe2d55517},
+ {0x85e4, 0x4e004f02},
+ {0x85e8, 0x52015302},
+ {0x85ec, 0x7508e2d9},
+ {0x85f0, 0x74207900},
+ {0x85f4, 0x57005710},
+ {0x85f8, 0x75fbe375},
+ {0x85fc, 0x23f07410},
+ {0x8600, 0xe3750001},
+ {0x8604, 0x000023f0},
+ {0x8608, 0x7430e375},
+ {0x860c, 0x5b100001},
+ {0x8610, 0x20907410},
+ {0x8614, 0xe3750000},
+ {0x8618, 0x000123f0},
+ {0x861c, 0x23f0e375},
+ {0x8620, 0xe3750000},
+ {0x8624, 0xe2d55507},
+ {0x8628, 0x7410e2d5},
+ {0x862c, 0x02002098},
+ {0x8630, 0x23f0e375},
+ {0x8634, 0xe3750001},
+ {0x8638, 0x000023f0},
+ {0x863c, 0x5517e375},
+ {0x8640, 0x4f02e2d5},
+ {0x8644, 0x53024e00},
+ {0x8648, 0xe2d95201},
+ {0x864c, 0x30787509},
+ {0x8650, 0xe2e3e283},
+ {0x8654, 0xe27b0001},
+ {0x8658, 0x0001e2e3},
+ {0x865c, 0x5b30e28f},
+ {0x8660, 0xe2d15500},
+ {0x8664, 0xe28f0001},
+ {0x8668, 0x0001e312},
+ {0x866c, 0x4380e287},
+ {0x8670, 0x0001e312},
+ {0x8674, 0x30e2e283},
+ {0x8678, 0xe3600023},
+ {0x867c, 0x54ed0002},
+ {0x8680, 0x00230baa},
+ {0x8684, 0x0002e360},
+ {0x8688, 0xe27be330},
+ {0x868c, 0xe2830001},
+ {0x8690, 0x002230dd},
+ {0x8694, 0x0002e360},
+ {0x8698, 0x0baa54ec},
+ {0x869c, 0xe3600022},
+ {0x86a0, 0xe3300002},
+ {0x86a4, 0x0001e27b},
+ {0x86a8, 0x0baae283},
+ {0x86ac, 0x6d0f6c67},
+ {0x86b0, 0xe360e2e3},
+ {0x86b4, 0xe2e36c8b},
+ {0x86b8, 0x0bace360},
+ {0x86bc, 0x6d0f6cb3},
+ {0x86c0, 0xe360e2e3},
+ {0x86c4, 0x6cdb0bad},
+ {0x86c8, 0xe2e36d0f},
+ {0x86cc, 0x6cf7e360},
+ {0x86d0, 0xe2e36d0f},
+ {0x86d4, 0x6c09e360},
+ {0x86d8, 0xe2e36d00},
+ {0x86dc, 0x6c25e360},
+ {0x86e0, 0xe360e2e3},
+ {0x86e4, 0x6c4df8ca},
+ {0x86e8, 0xe360e2e3},
+ {0x86ec, 0x6c75f9d3},
+ {0x86f0, 0xe360e2e3},
+ {0x86f4, 0xe2e36c99},
+ {0x86f8, 0xe330e360},
+ {0x86fc, 0x0001e27b},
+ {0x8700, 0x314de28f},
+ {0x8704, 0xe3650022},
+ {0x8708, 0x54ec0002},
+ {0x870c, 0x00220baa},
+ {0x8710, 0x0002e365},
+ {0x8714, 0xe287e330},
+ {0x8718, 0xe28f0001},
+ {0x871c, 0xe3303139},
+ {0x8720, 0x0001e287},
+ {0x8724, 0x0ba6e28f},
+ {0x8728, 0x21e07410},
+ {0x872c, 0x21e80009},
+ {0x8730, 0x6e670009},
+ {0x8734, 0xe32b6f0f},
+ {0x8738, 0xe365e312},
+ {0x873c, 0x21e07410},
+ {0x8740, 0x21e8000a},
+ {0x8744, 0x6e77000a},
+ {0x8748, 0xe312e32b},
+ {0x874c, 0x7410e365},
+ {0x8750, 0x000b21e0},
+ {0x8754, 0x000b21e8},
+ {0x8758, 0xe32b6e8b},
+ {0x875c, 0xe365e312},
+ {0x8760, 0x21e07410},
+ {0x8764, 0x21e8000c},
+ {0x8768, 0x6e9f000c},
+ {0x876c, 0xe312e32b},
+ {0x8770, 0x0baae365},
+ {0x8774, 0x21e07410},
+ {0x8778, 0x21e8000d},
+ {0x877c, 0x6eb3000d},
+ {0x8780, 0xe32b6f0f},
+ {0x8784, 0xe365e312},
+ {0x8788, 0x21e07410},
+ {0x878c, 0x21e8000e},
+ {0x8790, 0x6ec7000e},
+ {0x8794, 0xe312e32b},
+ {0x8798, 0x0bace365},
+ {0x879c, 0x21e07410},
+ {0x87a0, 0x21e8000f},
+ {0x87a4, 0x6edb000f},
+ {0x87a8, 0xe32b6f0f},
+ {0x87ac, 0xe365e312},
+ {0x87b0, 0x21e07410},
+ {0x87b4, 0x21e80010},
+ {0x87b8, 0x6eef0010},
+ {0x87bc, 0xe312e32b},
+ {0x87c0, 0xe365e365},
+ {0x87c4, 0x21e07410},
+ {0x87c8, 0x21e80013},
+ {0x87cc, 0x6e110013},
+ {0x87d0, 0xe32b6f00},
+ {0x87d4, 0xe365e312},
+ {0x87d8, 0x7410e365},
+ {0x87dc, 0x001421e0},
+ {0x87e0, 0x001421e8},
+ {0x87e4, 0xe32b6e25},
+ {0x87e8, 0xe365e312},
+ {0x87ec, 0x7410fb8c},
+ {0x87f0, 0x001521e0},
+ {0x87f4, 0x001521e8},
+ {0x87f8, 0xe32b6e39},
+ {0x87fc, 0xe365e312},
+ {0x8800, 0x21e07410},
+ {0x8804, 0x21e80016},
+ {0x8808, 0x6e4d0016},
+ {0x880c, 0xe312e32b},
+ {0x8810, 0xfc86e365},
+ {0x8814, 0x21e07410},
+ {0x8818, 0x21e80017},
+ {0x881c, 0x6e610017},
+ {0x8820, 0xe312e32b},
+ {0x8824, 0x7410e365},
+ {0x8828, 0x001821e0},
+ {0x882c, 0x001821e8},
+ {0x8830, 0xe32b6e75},
+ {0x8834, 0xe365e312},
+ {0x8838, 0x21e07410},
+ {0x883c, 0x21e80019},
+ {0x8840, 0x6e890019},
+ {0x8844, 0xe312e32b},
+ {0x8848, 0x7410e365},
+ {0x884c, 0x001a21e0},
+ {0x8850, 0x001a21e8},
+ {0x8854, 0xe32b6e99},
+ {0x8858, 0xe365e312},
+ {0x885c, 0xe287e330},
+ {0x8860, 0x00040001},
+ {0x8864, 0x0007775c},
+ {0x8868, 0x62006220},
+ {0x886c, 0x55010004},
+ {0x8870, 0xe2d15b00},
+ {0x8874, 0x66055b40},
+ {0x8878, 0x62000007},
+ {0x887c, 0xe3506300},
+ {0x8880, 0xe2d10004},
+ {0x8884, 0x0a010900},
+ {0x8888, 0x0d000b40},
+ {0x888c, 0x00320e01},
+ {0x8890, 0x95060004},
+ {0x8894, 0x00074380},
+ {0x8898, 0x00044d01},
+ {0x889c, 0x00074300},
+ {0x88a0, 0x05a30562},
+ {0x88a4, 0xe3509617},
+ {0x88a8, 0xe2d10004},
+ {0x88ac, 0x06a20007},
+ {0x88b0, 0xe35007a3},
+ {0x88b4, 0xe2d10004},
+ {0x88b8, 0x0002e340},
+ {0x88bc, 0x4380e348},
+ {0x88c0, 0x4d000007},
+ {0x88c4, 0x43000004},
+ {0x88c8, 0x00017900},
+ {0x88cc, 0x775e0004},
+ {0x88d0, 0x000731b3},
+ {0x88d4, 0x07a306a2},
+ {0x88d8, 0xe29331dd},
+ {0x88dc, 0x73000005},
+ {0x88e0, 0xe2930001},
+ {0x88e4, 0x5d000006},
+ {0x88e8, 0x42f70004},
+ {0x88ec, 0x6c000005},
+ {0x88f0, 0x42000004},
+ {0x88f4, 0x0004e2ab},
+ {0x88f8, 0x00074380},
+ {0x88fc, 0x4a004e00},
+ {0x8900, 0x00064c00},
+ {0x8904, 0x60007f00},
+ {0x8908, 0x00046f00},
+ {0x890c, 0x00054300},
+ {0x8910, 0x00017300},
+ {0x8914, 0xe2930001},
+ {0x8918, 0x5d010006},
+ {0x891c, 0x61006002},
+ {0x8920, 0x00055601},
+ {0x8924, 0xe2ab7710},
+ {0x8928, 0x73000005},
+ {0x892c, 0x43800004},
+ {0x8930, 0x5e010007},
+ {0x8934, 0x4d205e00},
+ {0x8938, 0x4a084e20},
+ {0x893c, 0x4c3f4960},
+ {0x8940, 0x00064301},
+ {0x8944, 0x63807f01},
+ {0x8948, 0x00046010},
+ {0x894c, 0x00064300},
+ {0x8950, 0x00077402},
+ {0x8954, 0x40004001},
+ {0x8958, 0x0006ab00},
+ {0x895c, 0x00077404},
+ {0x8960, 0x40004001},
+ {0x8964, 0x0004ab00},
+ {0x8968, 0x00074380},
+ {0x896c, 0x4e004d00},
+ {0x8970, 0x4c004a00},
+ {0x8974, 0x00064300},
+ {0x8978, 0x63007f00},
+ {0x897c, 0x6f006000},
+ {0x8980, 0x43000004},
+ {0x8984, 0x00040001},
+ {0x8988, 0x42bf4380},
+ {0x898c, 0x48400007},
+ {0x8990, 0x42ef0004},
+ {0x8994, 0x4d100007},
+ {0x8998, 0x42000004},
+ {0x899c, 0x5f800006},
+ {0x89a0, 0x5a010007},
+ {0x89a4, 0x00044a08},
+ {0x89a8, 0x00054300},
+ {0x89ac, 0x73807381},
+ {0x89b0, 0x003f9300},
+ {0x89b4, 0x00000000},
+ {0x89b8, 0x00000000},
+ {0x89bc, 0x00020000},
+ {0x89c0, 0x5f800006},
+ {0x89c4, 0x99005f00},
+ {0x89c8, 0x43800004},
+ {0x89cc, 0x00074280},
+ {0x89d0, 0x00044800},
+ {0x89d4, 0x000742ef},
+ {0x89d8, 0x00044d00},
+ {0x89dc, 0x00064200},
+ {0x89e0, 0x60005f00},
+ {0x89e4, 0x5a000007},
+ {0x89e8, 0x48004a00},
+ {0x89ec, 0x43000004},
+ {0x89f0, 0x73000005},
+ {0x89f4, 0x43800001},
+ {0x89f8, 0x78006505},
+ {0x89fc, 0x7a007900},
+ {0x8a00, 0x43007b00},
+ {0x8a04, 0x43800001},
+ {0x8a08, 0x43006500},
+ {0x8a0c, 0x43800001},
+ {0x8a10, 0x7c006405},
+ {0x8a14, 0x7e007d00},
+ {0x8a18, 0x43007f00},
+ {0x8a1c, 0x43800001},
+ {0x8a20, 0x43006400},
+ {0x8a24, 0x00060001},
+ {0x8a28, 0x55025601},
+ {0x8a2c, 0x00055400},
+ {0x8a30, 0x7e127f00},
+ {0x8a34, 0x76007710},
+ {0x8a38, 0x74007500},
+ {0x8a3c, 0x42700004},
+ {0x8a40, 0x73810005},
+ {0x8a44, 0x00047380},
+ {0x8a48, 0x93004200},
+ {0x8a4c, 0x77000005},
+ {0x8a50, 0x56000006},
+ {0x8a54, 0x00060001},
+ {0x8a58, 0x5f005f80},
+ {0x8a5c, 0x00059900},
+ {0x8a60, 0x00067300},
+ {0x8a64, 0x63006380},
+ {0x8a68, 0x00019800},
+ {0x8a6c, 0x7b484380},
+ {0x8a70, 0x79007a90},
+ {0x8a74, 0x43007802},
+ {0x8a78, 0x32cd5503},
+ {0x8a7c, 0x7b384380},
+ {0x8a80, 0x79007a80},
+ {0x8a84, 0x43007802},
+ {0x8a88, 0x32cd5513},
+ {0x8a8c, 0x7b404380},
+ {0x8a90, 0x79007a00},
+ {0x8a94, 0x43007802},
+ {0x8a98, 0x74315523},
+ {0x8a9c, 0x8e007430},
+ {0x8aa0, 0x74010001},
+ {0x8aa4, 0x8e007400},
+ {0x8aa8, 0x74310001},
+ {0x8aac, 0x8e007430},
+ {0x8ab0, 0x57020001},
+ {0x8ab4, 0x97005700},
+ {0x8ab8, 0x42ef0001},
+ {0x8abc, 0x56005610},
+ {0x8ac0, 0x8c004200},
+ {0x8ac4, 0x4f780001},
+ {0x8ac8, 0x53884e00},
+ {0x8acc, 0x5b205201},
+ {0x8ad0, 0x5480e2f2},
+ {0x8ad4, 0x54815400},
+ {0x8ad8, 0x54825400},
+ {0x8adc, 0xe2fd5400},
+ {0x8ae0, 0x3012bf1d},
+ {0x8ae4, 0xe2bee2b6},
+ {0x8ae8, 0xe2d9e2c6},
+ {0x8aec, 0x5523e359},
+ {0x8af0, 0x5525e2cd},
+ {0x8af4, 0xe359e2d9},
+ {0x8af8, 0x54bf0001},
+ {0x8afc, 0x54a354c0},
+ {0x8b00, 0x54a454c1},
+ {0x8b04, 0xbf074c18},
+ {0x8b08, 0x54a454c2},
+ {0x8b0c, 0x54c1bf04},
+ {0x8b10, 0xbf0154a3},
+ {0x8b14, 0x54dfe36a},
+ {0x8b18, 0x54bf0001},
+ {0x8b1c, 0x050a54e5},
+ {0x8b20, 0x000154df},
+ {0x8b24, 0x43807b80},
+ {0x8b28, 0x7e007f40},
+ {0x8b2c, 0x7c027d00},
+ {0x8b30, 0x5b404300},
+ {0x8b34, 0x5c015501},
+ {0x8b38, 0x5480e2dd},
+ {0x8b3c, 0x54815400},
+ {0x8b40, 0x54825400},
+ {0x8b44, 0x7b005400},
+ {0x8b48, 0xbfe8e2fd},
+ {0x8b4c, 0x56103012},
+ {0x8b50, 0x8c005600},
+ {0x8b54, 0xe36d0001},
+ {0x8b58, 0xe36de36d},
+ {0x8b5c, 0x0001e36d},
+ {0x8b60, 0x57005704},
+ {0x8b64, 0x57089700},
+ {0x8b68, 0x97005700},
+ {0x8b6c, 0x57805781},
+ {0x8b70, 0x43809700},
+ {0x8b74, 0x5c010007},
+ {0x8b78, 0x00045c00},
+ {0x8b7c, 0x00014300},
+ {0x8b80, 0x0007427f},
+ {0x8b84, 0x62006280},
+ {0x8b88, 0x00049200},
+ {0x8b8c, 0x00014200},
+ {0x8b90, 0x0007427f},
+ {0x8b94, 0x63146394},
+ {0x8b98, 0x00049100},
+ {0x8b9c, 0x00014200},
+ {0x8ba0, 0x79010004},
+ {0x8ba4, 0xe3757420},
+ {0x8ba8, 0x57005710},
+ {0x8bac, 0xe375e375},
+ {0x8bb0, 0x549f0001},
+ {0x8bb4, 0x5c015400},
+ {0x8bb8, 0x540054df},
+ {0x8bbc, 0x00015c02},
+ {0x8bc0, 0x07145c01},
+ {0x8bc4, 0x5c025400},
+ {0x8bc8, 0x5c020001},
+ {0x8bcc, 0x54000714},
+ {0x8bd0, 0x00015c01},
+ {0x8bd4, 0x4c184c98},
+ {0x8bd8, 0x003f0001},
+ {0x8bdc, 0x00000000},
+ {0x8be0, 0x00000000},
+ {0x8be4, 0x00020000},
+ {0x8be8, 0x00000001},
+ {0x8bec, 0x00000000},
+ {0x8bf0, 0x00000000},
+ {0x8bf4, 0x00000000},
+ {0x8bf8, 0x00010000},
+ {0x8bfc, 0x5c020004},
+ {0x8c00, 0x66076204},
+ {0x8c04, 0x743070c0},
+ {0x8c08, 0x0c010901},
+ {0x8c0c, 0x00010ba6},
+ {0x8080, 0x00000004},
+ {0x8080, 0x00000000},
+ {0x8088, 0x00000000},
+};
+
+static const struct rtw89_txpwr_byrate_cfg rtw89_8852a_txpwr_byrate[] = {
+ { 0, 0, 0, 0, 4, 0x50505050, },
+ { 0, 0, 1, 0, 4, 0x50505050, },
+ { 0, 0, 1, 4, 4, 0x484c5050, },
+ { 0, 0, 2, 0, 4, 0x50505050, },
+ { 0, 0, 2, 4, 4, 0x44484c50, },
+ { 0, 0, 2, 8, 4, 0x34383c40, },
+ { 0, 0, 3, 0, 4, 0x50505050, },
+ { 0, 1, 2, 0, 4, 0x50505050, },
+ { 0, 1, 2, 4, 4, 0x44484c50, },
+ { 0, 1, 2, 8, 4, 0x34383c40, },
+ { 0, 1, 3, 0, 4, 0x50505050, },
+ { 0, 0, 4, 1, 4, 0x00000000, },
+ { 0, 0, 4, 0, 1, 0x00000000, },
+ { 1, 0, 1, 0, 4, 0x50505050, },
+ { 1, 0, 1, 4, 4, 0x484c5050, },
+ { 1, 0, 2, 0, 4, 0x50505050, },
+ { 1, 0, 2, 4, 4, 0x44484c50, },
+ { 1, 0, 2, 8, 4, 0x34383c40, },
+ { 1, 0, 3, 0, 4, 0x50505050, },
+ { 1, 1, 2, 0, 4, 0x50505050, },
+ { 1, 1, 2, 4, 4, 0x44484c50, },
+ { 1, 1, 2, 8, 4, 0x34383c40, },
+ { 1, 1, 3, 0, 4, 0x50505050, },
+ { 1, 0, 4, 0, 4, 0x00000000, },
+};
+
+static const u8 _txpwr_track_delta_swingidx_5gb_n[][DELTA_SWINGIDX_SIZE] = {
+ {0, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6, 6, 7,
+ 7, 7, 8, 8, 9, 9, 9, 10, 10, 10, 11, 11, 11},
+ {0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4,
+ 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8},
+ {0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 5, 5,
+ 5, 6, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9},
+};
+
+static const u8 _txpwr_track_delta_swingidx_5gb_p[][DELTA_SWINGIDX_SIZE] = {
+ {0, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 6, 6,
+ 6, 7, 7, 7, 8, 8, 8, 9, 9, 10, 10, 10, 11, 11},
+ {0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4,
+ 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8},
+ {0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 5, 5,
+ 5, 6, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9},
+};
+
+static const u8 _txpwr_track_delta_swingidx_5ga_n[][DELTA_SWINGIDX_SIZE] = {
+ {0, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6, 6, 7,
+ 7, 7, 8, 8, 9, 9, 9, 10, 10, 10, 11, 11, 11},
+ {0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4,
+ 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8},
+ {0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 5, 5,
+ 5, 6, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9},
+};
+
+static const u8 _txpwr_track_delta_swingidx_5ga_p[][DELTA_SWINGIDX_SIZE] = {
+ {0, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 6, 6,
+ 6, 7, 7, 7, 8, 8, 8, 9, 9, 10, 10, 10, 11, 11},
+ {0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4,
+ 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8},
+ {0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 5, 5,
+ 5, 6, 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9},
+};
+
+static const u8 _txpwr_track_delta_swingidx_2gb_n[] = {
+ 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 4,
+ 4, 4, 4, 5, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7};
+
+static const u8 _txpwr_track_delta_swingidx_2gb_p[] = {
+ 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3};
+
+static const u8 _txpwr_track_delta_swingidx_2ga_n[] = {
+ 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3,
+ 3, 3, 3, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5};
+
+static const u8 _txpwr_track_delta_swingidx_2ga_p[] = {
+ 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5,
+ 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10};
+
+static const u8 _txpwr_track_delta_swingidx_2g_cck_b_n[] = {
+ 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 4,
+ 4, 4, 4, 5, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7};
+
+static const u8 _txpwr_track_delta_swingidx_2g_cck_b_p[] = {
+ 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2,
+ 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3};
+
+static const u8 _txpwr_track_delta_swingidx_2g_cck_a_n[] = {
+ 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3,
+ 3, 3, 3, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5};
+
+static const u8 _txpwr_track_delta_swingidx_2g_cck_a_p[] = {
+ 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5,
+ 6, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10};
+
+const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
+ [RTW89_RS_LMT_NUM][RTW89_BF_NUM]
+ [RTW89_REGD_NUM][RTW89_2G_CH_NUM] = {
+ [0][0][0][0][0][0] = 56,
+ [0][0][0][0][0][1] = 56,
+ [0][0][0][0][0][2] = 56,
+ [0][0][0][0][0][3] = 56,
+ [0][0][0][0][0][4] = 56,
+ [0][0][0][0][0][5] = 56,
+ [0][0][0][0][0][6] = 56,
+ [0][0][0][0][0][7] = 56,
+ [0][0][0][0][0][8] = 56,
+ [0][0][0][0][0][9] = 56,
+ [0][0][0][0][0][10] = 56,
+ [0][0][0][0][0][11] = 56,
+ [0][0][0][0][0][12] = 48,
+ [0][0][0][0][0][13] = 76,
+ [0][1][0][0][0][0] = 44,
+ [0][1][0][0][0][1] = 44,
+ [0][1][0][0][0][2] = 44,
+ [0][1][0][0][0][3] = 44,
+ [0][1][0][0][0][4] = 44,
+ [0][1][0][0][0][5] = 44,
+ [0][1][0][0][0][6] = 44,
+ [0][1][0][0][0][7] = 44,
+ [0][1][0][0][0][8] = 44,
+ [0][1][0][0][0][9] = 44,
+ [0][1][0][0][0][10] = 44,
+ [0][1][0][0][0][11] = 44,
+ [0][1][0][0][0][12] = 38,
+ [0][1][0][0][0][13] = 64,
+ [1][0][0][0][0][0] = 0,
+ [1][0][0][0][0][1] = 0,
+ [1][0][0][0][0][2] = 58,
+ [1][0][0][0][0][3] = 58,
+ [1][0][0][0][0][4] = 58,
+ [1][0][0][0][0][5] = 58,
+ [1][0][0][0][0][6] = 46,
+ [1][0][0][0][0][7] = 46,
+ [1][0][0][0][0][8] = 46,
+ [1][0][0][0][0][9] = 32,
+ [1][0][0][0][0][10] = 32,
+ [1][0][0][0][0][11] = 0,
+ [1][0][0][0][0][12] = 0,
+ [1][0][0][0][0][13] = 0,
+ [1][1][0][0][0][0] = 0,
+ [1][1][0][0][0][1] = 0,
+ [1][1][0][0][0][2] = 46,
+ [1][1][0][0][0][3] = 46,
+ [1][1][0][0][0][4] = 46,
+ [1][1][0][0][0][5] = 46,
+ [1][1][0][0][0][6] = 46,
+ [1][1][0][0][0][7] = 46,
+ [1][1][0][0][0][8] = 46,
+ [1][1][0][0][0][9] = 24,
+ [1][1][0][0][0][10] = 24,
+ [1][1][0][0][0][11] = 0,
+ [1][1][0][0][0][12] = 0,
+ [1][1][0][0][0][13] = 0,
+ [0][0][1][0][0][0] = 58,
+ [0][0][1][0][0][1] = 58,
+ [0][0][1][0][0][2] = 58,
+ [0][0][1][0][0][3] = 58,
+ [0][0][1][0][0][4] = 58,
+ [0][0][1][0][0][5] = 58,
+ [0][0][1][0][0][6] = 58,
+ [0][0][1][0][0][7] = 58,
+ [0][0][1][0][0][8] = 58,
+ [0][0][1][0][0][9] = 58,
+ [0][0][1][0][0][10] = 58,
+ [0][0][1][0][0][11] = 56,
+ [0][0][1][0][0][12] = 52,
+ [0][0][1][0][0][13] = 0,
+ [0][1][1][0][0][0] = 46,
+ [0][1][1][0][0][1] = 46,
+ [0][1][1][0][0][2] = 46,
+ [0][1][1][0][0][3] = 46,
+ [0][1][1][0][0][4] = 46,
+ [0][1][1][0][0][5] = 46,
+ [0][1][1][0][0][6] = 46,
+ [0][1][1][0][0][7] = 46,
+ [0][1][1][0][0][8] = 46,
+ [0][1][1][0][0][9] = 46,
+ [0][1][1][0][0][10] = 46,
+ [0][1][1][0][0][11] = 42,
+ [0][1][1][0][0][12] = 40,
+ [0][1][1][0][0][13] = 0,
+ [0][0][2][0][0][0] = 58,
+ [0][0][2][0][0][1] = 58,
+ [0][0][2][0][0][2] = 58,
+ [0][0][2][0][0][3] = 58,
+ [0][0][2][0][0][4] = 58,
+ [0][0][2][0][0][5] = 58,
+ [0][0][2][0][0][6] = 58,
+ [0][0][2][0][0][7] = 58,
+ [0][0][2][0][0][8] = 58,
+ [0][0][2][0][0][9] = 58,
+ [0][0][2][0][0][10] = 58,
+ [0][0][2][0][0][11] = 54,
+ [0][0][2][0][0][12] = 50,
+ [0][0][2][0][0][13] = 0,
+ [0][1][2][0][0][0] = 46,
+ [0][1][2][0][0][1] = 46,
+ [0][1][2][0][0][2] = 46,
+ [0][1][2][0][0][3] = 46,
+ [0][1][2][0][0][4] = 46,
+ [0][1][2][0][0][5] = 46,
+ [0][1][2][0][0][6] = 46,
+ [0][1][2][0][0][7] = 46,
+ [0][1][2][0][0][8] = 46,
+ [0][1][2][0][0][9] = 46,
+ [0][1][2][0][0][10] = 46,
+ [0][1][2][0][0][11] = 42,
+ [0][1][2][0][0][12] = 40,
+ [0][1][2][0][0][13] = 0,
+ [0][1][2][1][0][0] = 34,
+ [0][1][2][1][0][1] = 34,
+ [0][1][2][1][0][2] = 34,
+ [0][1][2][1][0][3] = 34,
+ [0][1][2][1][0][4] = 34,
+ [0][1][2][1][0][5] = 34,
+ [0][1][2][1][0][6] = 34,
+ [0][1][2][1][0][7] = 34,
+ [0][1][2][1][0][8] = 34,
+ [0][1][2][1][0][9] = 34,
+ [0][1][2][1][0][10] = 34,
+ [0][1][2][1][0][11] = 34,
+ [0][1][2][1][0][12] = 34,
+ [0][1][2][1][0][13] = 0,
+ [1][0][2][0][0][0] = 0,
+ [1][0][2][0][0][1] = 0,
+ [1][0][2][0][0][2] = 56,
+ [1][0][2][0][0][3] = 56,
+ [1][0][2][0][0][4] = 58,
+ [1][0][2][0][0][5] = 58,
+ [1][0][2][0][0][6] = 54,
+ [1][0][2][0][0][7] = 50,
+ [1][0][2][0][0][8] = 50,
+ [1][0][2][0][0][9] = 42,
+ [1][0][2][0][0][10] = 40,
+ [1][0][2][0][0][11] = 0,
+ [1][0][2][0][0][12] = 0,
+ [1][0][2][0][0][13] = 0,
+ [1][1][2][0][0][0] = 0,
+ [1][1][2][0][0][1] = 0,
+ [1][1][2][0][0][2] = 46,
+ [1][1][2][0][0][3] = 46,
+ [1][1][2][0][0][4] = 46,
+ [1][1][2][0][0][5] = 46,
+ [1][1][2][0][0][6] = 46,
+ [1][1][2][0][0][7] = 46,
+ [1][1][2][0][0][8] = 46,
+ [1][1][2][0][0][9] = 38,
+ [1][1][2][0][0][10] = 36,
+ [1][1][2][0][0][11] = 0,
+ [1][1][2][0][0][12] = 0,
+ [1][1][2][0][0][13] = 0,
+ [1][1][2][1][0][0] = 0,
+ [1][1][2][1][0][1] = 0,
+ [1][1][2][1][0][2] = 34,
+ [1][1][2][1][0][3] = 34,
+ [1][1][2][1][0][4] = 34,
+ [1][1][2][1][0][5] = 34,
+ [1][1][2][1][0][6] = 34,
+ [1][1][2][1][0][7] = 34,
+ [1][1][2][1][0][8] = 34,
+ [1][1][2][1][0][9] = 34,
+ [1][1][2][1][0][10] = 34,
+ [1][1][2][1][0][11] = 0,
+ [1][1][2][1][0][12] = 0,
+ [1][1][2][1][0][13] = 0,
+ [0][0][0][0][2][0] = 76,
+ [0][0][0][0][1][0] = 56,
+ [0][0][0][0][3][0] = 68,
+ [0][0][0][0][5][0] = 76,
+ [0][0][0][0][6][0] = 56,
+ [0][0][0][0][9][0] = 56,
+ [0][0][0][0][8][0] = 60,
+ [0][0][0][0][11][0] = 56,
+ [0][0][0][0][2][1] = 76,
+ [0][0][0][0][1][1] = 56,
+ [0][0][0][0][3][1] = 68,
+ [0][0][0][0][5][1] = 76,
+ [0][0][0][0][6][1] = 56,
+ [0][0][0][0][9][1] = 56,
+ [0][0][0][0][8][1] = 60,
+ [0][0][0][0][11][1] = 56,
+ [0][0][0][0][2][2] = 76,
+ [0][0][0][0][1][2] = 56,
+ [0][0][0][0][3][2] = 68,
+ [0][0][0][0][5][2] = 76,
+ [0][0][0][0][6][2] = 56,
+ [0][0][0][0][9][2] = 56,
+ [0][0][0][0][8][2] = 60,
+ [0][0][0][0][11][2] = 56,
+ [0][0][0][0][2][3] = 76,
+ [0][0][0][0][1][3] = 56,
+ [0][0][0][0][3][3] = 68,
+ [0][0][0][0][5][3] = 76,
+ [0][0][0][0][6][3] = 56,
+ [0][0][0][0][9][3] = 56,
+ [0][0][0][0][8][3] = 60,
+ [0][0][0][0][11][3] = 56,
+ [0][0][0][0][2][4] = 76,
+ [0][0][0][0][1][4] = 56,
+ [0][0][0][0][3][4] = 68,
+ [0][0][0][0][5][4] = 76,
+ [0][0][0][0][6][4] = 56,
+ [0][0][0][0][9][4] = 56,
+ [0][0][0][0][8][4] = 60,
+ [0][0][0][0][11][4] = 56,
+ [0][0][0][0][2][5] = 76,
+ [0][0][0][0][1][5] = 56,
+ [0][0][0][0][3][5] = 68,
+ [0][0][0][0][5][5] = 76,
+ [0][0][0][0][6][5] = 56,
+ [0][0][0][0][9][5] = 56,
+ [0][0][0][0][8][5] = 60,
+ [0][0][0][0][11][5] = 56,
+ [0][0][0][0][2][6] = 76,
+ [0][0][0][0][1][6] = 56,
+ [0][0][0][0][3][6] = 68,
+ [0][0][0][0][5][6] = 76,
+ [0][0][0][0][6][6] = 56,
+ [0][0][0][0][9][6] = 56,
+ [0][0][0][0][8][6] = 60,
+ [0][0][0][0][11][6] = 56,
+ [0][0][0][0][2][7] = 76,
+ [0][0][0][0][1][7] = 56,
+ [0][0][0][0][3][7] = 68,
+ [0][0][0][0][5][7] = 76,
+ [0][0][0][0][6][7] = 56,
+ [0][0][0][0][9][7] = 56,
+ [0][0][0][0][8][7] = 60,
+ [0][0][0][0][11][7] = 56,
+ [0][0][0][0][2][8] = 76,
+ [0][0][0][0][1][8] = 56,
+ [0][0][0][0][3][8] = 68,
+ [0][0][0][0][5][8] = 76,
+ [0][0][0][0][6][8] = 56,
+ [0][0][0][0][9][8] = 56,
+ [0][0][0][0][8][8] = 60,
+ [0][0][0][0][11][8] = 56,
+ [0][0][0][0][2][9] = 76,
+ [0][0][0][0][1][9] = 56,
+ [0][0][0][0][3][9] = 68,
+ [0][0][0][0][5][9] = 76,
+ [0][0][0][0][6][9] = 56,
+ [0][0][0][0][9][9] = 56,
+ [0][0][0][0][8][9] = 60,
+ [0][0][0][0][11][9] = 56,
+ [0][0][0][0][2][10] = 76,
+ [0][0][0][0][1][10] = 56,
+ [0][0][0][0][3][10] = 68,
+ [0][0][0][0][5][10] = 76,
+ [0][0][0][0][6][10] = 56,
+ [0][0][0][0][9][10] = 56,
+ [0][0][0][0][8][10] = 60,
+ [0][0][0][0][11][10] = 56,
+ [0][0][0][0][2][11] = 68,
+ [0][0][0][0][1][11] = 56,
+ [0][0][0][0][3][11] = 68,
+ [0][0][0][0][5][11] = 68,
+ [0][0][0][0][6][11] = 56,
+ [0][0][0][0][9][11] = 56,
+ [0][0][0][0][8][11] = 60,
+ [0][0][0][0][11][11] = 56,
+ [0][0][0][0][2][12] = 48,
+ [0][0][0][0][1][12] = 56,
+ [0][0][0][0][3][12] = 68,
+ [0][0][0][0][5][12] = 48,
+ [0][0][0][0][6][12] = 56,
+ [0][0][0][0][9][12] = 56,
+ [0][0][0][0][8][12] = 60,
+ [0][0][0][0][11][12] = 56,
+ [0][0][0][0][2][13] = 127,
+ [0][0][0][0][1][13] = 127,
+ [0][0][0][0][3][13] = 76,
+ [0][0][0][0][5][13] = 127,
+ [0][0][0][0][6][13] = 127,
+ [0][0][0][0][9][13] = 127,
+ [0][0][0][0][8][13] = 127,
+ [0][0][0][0][11][13] = 127,
+ [0][1][0][0][2][0] = 74,
+ [0][1][0][0][1][0] = 44,
+ [0][1][0][0][3][0] = 56,
+ [0][1][0][0][5][0] = 74,
+ [0][1][0][0][6][0] = 44,
+ [0][1][0][0][9][0] = 44,
+ [0][1][0][0][8][0] = 48,
+ [0][1][0][0][11][0] = 44,
+ [0][1][0][0][2][1] = 76,
+ [0][1][0][0][1][1] = 44,
+ [0][1][0][0][3][1] = 56,
+ [0][1][0][0][5][1] = 76,
+ [0][1][0][0][6][1] = 44,
+ [0][1][0][0][9][1] = 44,
+ [0][1][0][0][8][1] = 48,
+ [0][1][0][0][11][1] = 44,
+ [0][1][0][0][2][2] = 76,
+ [0][1][0][0][1][2] = 44,
+ [0][1][0][0][3][2] = 56,
+ [0][1][0][0][5][2] = 76,
+ [0][1][0][0][6][2] = 44,
+ [0][1][0][0][9][2] = 44,
+ [0][1][0][0][8][2] = 48,
+ [0][1][0][0][11][2] = 44,
+ [0][1][0][0][2][3] = 76,
+ [0][1][0][0][1][3] = 44,
+ [0][1][0][0][3][3] = 56,
+ [0][1][0][0][5][3] = 76,
+ [0][1][0][0][6][3] = 44,
+ [0][1][0][0][9][3] = 44,
+ [0][1][0][0][8][3] = 48,
+ [0][1][0][0][11][3] = 44,
+ [0][1][0][0][2][4] = 76,
+ [0][1][0][0][1][4] = 44,
+ [0][1][0][0][3][4] = 56,
+ [0][1][0][0][5][4] = 76,
+ [0][1][0][0][6][4] = 44,
+ [0][1][0][0][9][4] = 44,
+ [0][1][0][0][8][4] = 48,
+ [0][1][0][0][11][4] = 44,
+ [0][1][0][0][2][5] = 76,
+ [0][1][0][0][1][5] = 44,
+ [0][1][0][0][3][5] = 56,
+ [0][1][0][0][5][5] = 76,
+ [0][1][0][0][6][5] = 44,
+ [0][1][0][0][9][5] = 44,
+ [0][1][0][0][8][5] = 48,
+ [0][1][0][0][11][5] = 44,
+ [0][1][0][0][2][6] = 76,
+ [0][1][0][0][1][6] = 44,
+ [0][1][0][0][3][6] = 56,
+ [0][1][0][0][5][6] = 76,
+ [0][1][0][0][6][6] = 44,
+ [0][1][0][0][9][6] = 44,
+ [0][1][0][0][8][6] = 48,
+ [0][1][0][0][11][6] = 44,
+ [0][1][0][0][2][7] = 76,
+ [0][1][0][0][1][7] = 44,
+ [0][1][0][0][3][7] = 56,
+ [0][1][0][0][5][7] = 76,
+ [0][1][0][0][6][7] = 44,
+ [0][1][0][0][9][7] = 44,
+ [0][1][0][0][8][7] = 48,
+ [0][1][0][0][11][7] = 44,
+ [0][1][0][0][2][8] = 76,
+ [0][1][0][0][1][8] = 44,
+ [0][1][0][0][3][8] = 56,
+ [0][1][0][0][5][8] = 76,
+ [0][1][0][0][6][8] = 44,
+ [0][1][0][0][9][8] = 44,
+ [0][1][0][0][8][8] = 48,
+ [0][1][0][0][11][8] = 44,
+ [0][1][0][0][2][9] = 76,
+ [0][1][0][0][1][9] = 44,
+ [0][1][0][0][3][9] = 56,
+ [0][1][0][0][5][9] = 76,
+ [0][1][0][0][6][9] = 44,
+ [0][1][0][0][9][9] = 44,
+ [0][1][0][0][8][9] = 48,
+ [0][1][0][0][11][9] = 44,
+ [0][1][0][0][2][10] = 62,
+ [0][1][0][0][1][10] = 44,
+ [0][1][0][0][3][10] = 56,
+ [0][1][0][0][5][10] = 62,
+ [0][1][0][0][6][10] = 44,
+ [0][1][0][0][9][10] = 44,
+ [0][1][0][0][8][10] = 48,
+ [0][1][0][0][11][10] = 44,
+ [0][1][0][0][2][11] = 52,
+ [0][1][0][0][1][11] = 44,
+ [0][1][0][0][3][11] = 56,
+ [0][1][0][0][5][11] = 52,
+ [0][1][0][0][6][11] = 44,
+ [0][1][0][0][9][11] = 44,
+ [0][1][0][0][8][11] = 48,
+ [0][1][0][0][11][11] = 44,
+ [0][1][0][0][2][12] = 38,
+ [0][1][0][0][1][12] = 44,
+ [0][1][0][0][3][12] = 56,
+ [0][1][0][0][5][12] = 38,
+ [0][1][0][0][6][12] = 44,
+ [0][1][0][0][9][12] = 44,
+ [0][1][0][0][8][12] = 48,
+ [0][1][0][0][11][12] = 44,
+ [0][1][0][0][2][13] = 127,
+ [0][1][0][0][1][13] = 127,
+ [0][1][0][0][3][13] = 64,
+ [0][1][0][0][5][13] = 127,
+ [0][1][0][0][6][13] = 127,
+ [0][1][0][0][9][13] = 127,
+ [0][1][0][0][8][13] = 127,
+ [0][1][0][0][11][13] = 127,
+ [1][0][0][0][2][0] = 127,
+ [1][0][0][0][1][0] = 127,
+ [1][0][0][0][3][0] = 127,
+ [1][0][0][0][5][0] = 127,
+ [1][0][0][0][6][0] = 127,
+ [1][0][0][0][9][0] = 127,
+ [1][0][0][0][8][0] = 127,
+ [1][0][0][0][11][0] = 127,
+ [1][0][0][0][2][1] = 127,
+ [1][0][0][0][1][1] = 127,
+ [1][0][0][0][3][1] = 127,
+ [1][0][0][0][5][1] = 127,
+ [1][0][0][0][6][1] = 127,
+ [1][0][0][0][9][1] = 127,
+ [1][0][0][0][8][1] = 127,
+ [1][0][0][0][11][1] = 127,
+ [1][0][0][0][2][2] = 60,
+ [1][0][0][0][1][2] = 58,
+ [1][0][0][0][3][2] = 68,
+ [1][0][0][0][5][2] = 60,
+ [1][0][0][0][6][2] = 58,
+ [1][0][0][0][9][2] = 58,
+ [1][0][0][0][8][2] = 60,
+ [1][0][0][0][11][2] = 58,
+ [1][0][0][0][2][3] = 60,
+ [1][0][0][0][1][3] = 58,
+ [1][0][0][0][3][3] = 68,
+ [1][0][0][0][5][3] = 60,
+ [1][0][0][0][6][3] = 58,
+ [1][0][0][0][9][3] = 58,
+ [1][0][0][0][8][3] = 60,
+ [1][0][0][0][11][3] = 58,
+ [1][0][0][0][2][4] = 60,
+ [1][0][0][0][1][4] = 58,
+ [1][0][0][0][3][4] = 68,
+ [1][0][0][0][5][4] = 60,
+ [1][0][0][0][6][4] = 58,
+ [1][0][0][0][9][4] = 58,
+ [1][0][0][0][8][4] = 60,
+ [1][0][0][0][11][4] = 58,
+ [1][0][0][0][2][5] = 60,
+ [1][0][0][0][1][5] = 58,
+ [1][0][0][0][3][5] = 68,
+ [1][0][0][0][5][5] = 60,
+ [1][0][0][0][6][5] = 58,
+ [1][0][0][0][9][5] = 58,
+ [1][0][0][0][8][5] = 60,
+ [1][0][0][0][11][5] = 58,
+ [1][0][0][0][2][6] = 46,
+ [1][0][0][0][1][6] = 58,
+ [1][0][0][0][3][6] = 68,
+ [1][0][0][0][5][6] = 46,
+ [1][0][0][0][6][6] = 58,
+ [1][0][0][0][9][6] = 58,
+ [1][0][0][0][8][6] = 60,
+ [1][0][0][0][11][6] = 58,
+ [1][0][0][0][2][7] = 46,
+ [1][0][0][0][1][7] = 58,
+ [1][0][0][0][3][7] = 68,
+ [1][0][0][0][5][7] = 46,
+ [1][0][0][0][6][7] = 58,
+ [1][0][0][0][9][7] = 58,
+ [1][0][0][0][8][7] = 60,
+ [1][0][0][0][11][7] = 58,
+ [1][0][0][0][2][8] = 46,
+ [1][0][0][0][1][8] = 58,
+ [1][0][0][0][3][8] = 68,
+ [1][0][0][0][5][8] = 46,
+ [1][0][0][0][6][8] = 58,
+ [1][0][0][0][9][8] = 58,
+ [1][0][0][0][8][8] = 60,
+ [1][0][0][0][11][8] = 58,
+ [1][0][0][0][2][9] = 32,
+ [1][0][0][0][1][9] = 58,
+ [1][0][0][0][3][9] = 68,
+ [1][0][0][0][5][9] = 32,
+ [1][0][0][0][6][9] = 58,
+ [1][0][0][0][9][9] = 58,
+ [1][0][0][0][8][9] = 60,
+ [1][0][0][0][11][9] = 58,
+ [1][0][0][0][2][10] = 32,
+ [1][0][0][0][1][10] = 58,
+ [1][0][0][0][3][10] = 68,
+ [1][0][0][0][5][10] = 32,
+ [1][0][0][0][6][10] = 58,
+ [1][0][0][0][9][10] = 58,
+ [1][0][0][0][8][10] = 60,
+ [1][0][0][0][11][10] = 58,
+ [1][0][0][0][2][11] = 127,
+ [1][0][0][0][1][11] = 127,
+ [1][0][0][0][3][11] = 127,
+ [1][0][0][0][5][11] = 127,
+ [1][0][0][0][6][11] = 127,
+ [1][0][0][0][9][11] = 127,
+ [1][0][0][0][8][11] = 127,
+ [1][0][0][0][11][11] = 127,
+ [1][0][0][0][2][12] = 127,
+ [1][0][0][0][1][12] = 127,
+ [1][0][0][0][3][12] = 127,
+ [1][0][0][0][5][12] = 127,
+ [1][0][0][0][6][12] = 127,
+ [1][0][0][0][9][12] = 127,
+ [1][0][0][0][8][12] = 127,
+ [1][0][0][0][11][12] = 127,
+ [1][0][0][0][2][13] = 127,
+ [1][0][0][0][1][13] = 127,
+ [1][0][0][0][3][13] = 127,
+ [1][0][0][0][5][13] = 127,
+ [1][0][0][0][6][13] = 127,
+ [1][0][0][0][9][13] = 127,
+ [1][0][0][0][8][13] = 127,
+ [1][0][0][0][11][13] = 127,
+ [1][1][0][0][2][0] = 127,
+ [1][1][0][0][1][0] = 127,
+ [1][1][0][0][3][0] = 127,
+ [1][1][0][0][5][0] = 127,
+ [1][1][0][0][6][0] = 127,
+ [1][1][0][0][9][0] = 127,
+ [1][1][0][0][8][0] = 127,
+ [1][1][0][0][11][0] = 127,
+ [1][1][0][0][2][1] = 127,
+ [1][1][0][0][1][1] = 127,
+ [1][1][0][0][3][1] = 127,
+ [1][1][0][0][5][1] = 127,
+ [1][1][0][0][6][1] = 127,
+ [1][1][0][0][9][1] = 127,
+ [1][1][0][0][8][1] = 127,
+ [1][1][0][0][11][1] = 127,
+ [1][1][0][0][2][2] = 48,
+ [1][1][0][0][1][2] = 46,
+ [1][1][0][0][3][2] = 56,
+ [1][1][0][0][5][2] = 48,
+ [1][1][0][0][6][2] = 46,
+ [1][1][0][0][9][2] = 46,
+ [1][1][0][0][8][2] = 48,
+ [1][1][0][0][11][2] = 46,
+ [1][1][0][0][2][3] = 48,
+ [1][1][0][0][1][3] = 46,
+ [1][1][0][0][3][3] = 56,
+ [1][1][0][0][5][3] = 48,
+ [1][1][0][0][6][3] = 46,
+ [1][1][0][0][9][3] = 46,
+ [1][1][0][0][8][3] = 48,
+ [1][1][0][0][11][3] = 46,
+ [1][1][0][0][2][4] = 48,
+ [1][1][0][0][1][4] = 46,
+ [1][1][0][0][3][4] = 56,
+ [1][1][0][0][5][4] = 48,
+ [1][1][0][0][6][4] = 46,
+ [1][1][0][0][9][4] = 46,
+ [1][1][0][0][8][4] = 48,
+ [1][1][0][0][11][4] = 46,
+ [1][1][0][0][2][5] = 58,
+ [1][1][0][0][1][5] = 46,
+ [1][1][0][0][3][5] = 56,
+ [1][1][0][0][5][5] = 58,
+ [1][1][0][0][6][5] = 46,
+ [1][1][0][0][9][5] = 46,
+ [1][1][0][0][8][5] = 48,
+ [1][1][0][0][11][5] = 46,
+ [1][1][0][0][2][6] = 46,
+ [1][1][0][0][1][6] = 46,
+ [1][1][0][0][3][6] = 56,
+ [1][1][0][0][5][6] = 46,
+ [1][1][0][0][6][6] = 46,
+ [1][1][0][0][9][6] = 46,
+ [1][1][0][0][8][6] = 48,
+ [1][1][0][0][11][6] = 46,
+ [1][1][0][0][2][7] = 46,
+ [1][1][0][0][1][7] = 46,
+ [1][1][0][0][3][7] = 56,
+ [1][1][0][0][5][7] = 46,
+ [1][1][0][0][6][7] = 46,
+ [1][1][0][0][9][7] = 46,
+ [1][1][0][0][8][7] = 48,
+ [1][1][0][0][11][7] = 46,
+ [1][1][0][0][2][8] = 46,
+ [1][1][0][0][1][8] = 46,
+ [1][1][0][0][3][8] = 56,
+ [1][1][0][0][5][8] = 46,
+ [1][1][0][0][6][8] = 46,
+ [1][1][0][0][9][8] = 46,
+ [1][1][0][0][8][8] = 48,
+ [1][1][0][0][11][8] = 46,
+ [1][1][0][0][2][9] = 24,
+ [1][1][0][0][1][9] = 46,
+ [1][1][0][0][3][9] = 56,
+ [1][1][0][0][5][9] = 24,
+ [1][1][0][0][6][9] = 46,
+ [1][1][0][0][9][9] = 46,
+ [1][1][0][0][8][9] = 48,
+ [1][1][0][0][11][9] = 46,
+ [1][1][0][0][2][10] = 24,
+ [1][1][0][0][1][10] = 46,
+ [1][1][0][0][3][10] = 56,
+ [1][1][0][0][5][10] = 24,
+ [1][1][0][0][6][10] = 46,
+ [1][1][0][0][9][10] = 46,
+ [1][1][0][0][8][10] = 48,
+ [1][1][0][0][11][10] = 46,
+ [1][1][0][0][2][11] = 127,
+ [1][1][0][0][1][11] = 127,
+ [1][1][0][0][3][11] = 127,
+ [1][1][0][0][5][11] = 127,
+ [1][1][0][0][6][11] = 127,
+ [1][1][0][0][9][11] = 127,
+ [1][1][0][0][8][11] = 127,
+ [1][1][0][0][11][11] = 127,
+ [1][1][0][0][2][12] = 127,
+ [1][1][0][0][1][12] = 127,
+ [1][1][0][0][3][12] = 127,
+ [1][1][0][0][5][12] = 127,
+ [1][1][0][0][6][12] = 127,
+ [1][1][0][0][9][12] = 127,
+ [1][1][0][0][8][12] = 127,
+ [1][1][0][0][11][12] = 127,
+ [1][1][0][0][2][13] = 127,
+ [1][1][0][0][1][13] = 127,
+ [1][1][0][0][3][13] = 127,
+ [1][1][0][0][5][13] = 127,
+ [1][1][0][0][6][13] = 127,
+ [1][1][0][0][9][13] = 127,
+ [1][1][0][0][8][13] = 127,
+ [1][1][0][0][11][13] = 127,
+ [0][0][1][0][2][0] = 66,
+ [0][0][1][0][1][0] = 58,
+ [0][0][1][0][3][0] = 76,
+ [0][0][1][0][5][0] = 66,
+ [0][0][1][0][6][0] = 58,
+ [0][0][1][0][9][0] = 58,
+ [0][0][1][0][8][0] = 60,
+ [0][0][1][0][11][0] = 58,
+ [0][0][1][0][2][1] = 66,
+ [0][0][1][0][1][1] = 58,
+ [0][0][1][0][3][1] = 76,
+ [0][0][1][0][5][1] = 66,
+ [0][0][1][0][6][1] = 58,
+ [0][0][1][0][9][1] = 58,
+ [0][0][1][0][8][1] = 60,
+ [0][0][1][0][11][1] = 58,
+ [0][0][1][0][2][2] = 70,
+ [0][0][1][0][1][2] = 58,
+ [0][0][1][0][3][2] = 76,
+ [0][0][1][0][5][2] = 70,
+ [0][0][1][0][6][2] = 58,
+ [0][0][1][0][9][2] = 58,
+ [0][0][1][0][8][2] = 60,
+ [0][0][1][0][11][2] = 58,
+ [0][0][1][0][2][3] = 74,
+ [0][0][1][0][1][3] = 58,
+ [0][0][1][0][3][3] = 76,
+ [0][0][1][0][5][3] = 74,
+ [0][0][1][0][6][3] = 58,
+ [0][0][1][0][9][3] = 58,
+ [0][0][1][0][8][3] = 60,
+ [0][0][1][0][11][3] = 58,
+ [0][0][1][0][2][4] = 78,
+ [0][0][1][0][1][4] = 58,
+ [0][0][1][0][3][4] = 76,
+ [0][0][1][0][5][4] = 78,
+ [0][0][1][0][6][4] = 58,
+ [0][0][1][0][9][4] = 58,
+ [0][0][1][0][8][4] = 60,
+ [0][0][1][0][11][4] = 58,
+ [0][0][1][0][2][5] = 78,
+ [0][0][1][0][1][5] = 58,
+ [0][0][1][0][3][5] = 76,
+ [0][0][1][0][5][5] = 78,
+ [0][0][1][0][6][5] = 58,
+ [0][0][1][0][9][5] = 58,
+ [0][0][1][0][8][5] = 60,
+ [0][0][1][0][11][5] = 58,
+ [0][0][1][0][2][6] = 78,
+ [0][0][1][0][1][6] = 58,
+ [0][0][1][0][3][6] = 76,
+ [0][0][1][0][5][6] = 78,
+ [0][0][1][0][6][6] = 58,
+ [0][0][1][0][9][6] = 58,
+ [0][0][1][0][8][6] = 60,
+ [0][0][1][0][11][6] = 58,
+ [0][0][1][0][2][7] = 74,
+ [0][0][1][0][1][7] = 58,
+ [0][0][1][0][3][7] = 76,
+ [0][0][1][0][5][7] = 74,
+ [0][0][1][0][6][7] = 58,
+ [0][0][1][0][9][7] = 58,
+ [0][0][1][0][8][7] = 60,
+ [0][0][1][0][11][7] = 58,
+ [0][0][1][0][2][8] = 70,
+ [0][0][1][0][1][8] = 58,
+ [0][0][1][0][3][8] = 76,
+ [0][0][1][0][5][8] = 70,
+ [0][0][1][0][6][8] = 58,
+ [0][0][1][0][9][8] = 58,
+ [0][0][1][0][8][8] = 60,
+ [0][0][1][0][11][8] = 58,
+ [0][0][1][0][2][9] = 66,
+ [0][0][1][0][1][9] = 58,
+ [0][0][1][0][3][9] = 76,
+ [0][0][1][0][5][9] = 66,
+ [0][0][1][0][6][9] = 58,
+ [0][0][1][0][9][9] = 58,
+ [0][0][1][0][8][9] = 60,
+ [0][0][1][0][11][9] = 58,
+ [0][0][1][0][2][10] = 66,
+ [0][0][1][0][1][10] = 58,
+ [0][0][1][0][3][10] = 76,
+ [0][0][1][0][5][10] = 66,
+ [0][0][1][0][6][10] = 58,
+ [0][0][1][0][9][10] = 58,
+ [0][0][1][0][8][10] = 60,
+ [0][0][1][0][11][10] = 58,
+ [0][0][1][0][2][11] = 56,
+ [0][0][1][0][1][11] = 58,
+ [0][0][1][0][3][11] = 76,
+ [0][0][1][0][5][11] = 56,
+ [0][0][1][0][6][11] = 58,
+ [0][0][1][0][9][11] = 58,
+ [0][0][1][0][8][11] = 60,
+ [0][0][1][0][11][11] = 58,
+ [0][0][1][0][2][12] = 52,
+ [0][0][1][0][1][12] = 58,
+ [0][0][1][0][3][12] = 76,
+ [0][0][1][0][5][12] = 52,
+ [0][0][1][0][6][12] = 58,
+ [0][0][1][0][9][12] = 58,
+ [0][0][1][0][8][12] = 60,
+ [0][0][1][0][11][12] = 58,
+ [0][0][1][0][2][13] = 127,
+ [0][0][1][0][1][13] = 127,
+ [0][0][1][0][3][13] = 127,
+ [0][0][1][0][5][13] = 127,
+ [0][0][1][0][6][13] = 127,
+ [0][0][1][0][9][13] = 127,
+ [0][0][1][0][8][13] = 127,
+ [0][0][1][0][11][13] = 127,
+ [0][1][1][0][2][0] = 62,
+ [0][1][1][0][1][0] = 46,
+ [0][1][1][0][3][0] = 64,
+ [0][1][1][0][5][0] = 62,
+ [0][1][1][0][6][0] = 46,
+ [0][1][1][0][9][0] = 46,
+ [0][1][1][0][8][0] = 48,
+ [0][1][1][0][11][0] = 46,
+ [0][1][1][0][2][1] = 62,
+ [0][1][1][0][1][1] = 46,
+ [0][1][1][0][3][1] = 64,
+ [0][1][1][0][5][1] = 62,
+ [0][1][1][0][6][1] = 46,
+ [0][1][1][0][9][1] = 46,
+ [0][1][1][0][8][1] = 48,
+ [0][1][1][0][11][1] = 46,
+ [0][1][1][0][2][2] = 66,
+ [0][1][1][0][1][2] = 46,
+ [0][1][1][0][3][2] = 64,
+ [0][1][1][0][5][2] = 66,
+ [0][1][1][0][6][2] = 46,
+ [0][1][1][0][9][2] = 46,
+ [0][1][1][0][8][2] = 48,
+ [0][1][1][0][11][2] = 46,
+ [0][1][1][0][2][3] = 70,
+ [0][1][1][0][1][3] = 46,
+ [0][1][1][0][3][3] = 64,
+ [0][1][1][0][5][3] = 70,
+ [0][1][1][0][6][3] = 46,
+ [0][1][1][0][9][3] = 46,
+ [0][1][1][0][8][3] = 48,
+ [0][1][1][0][11][3] = 46,
+ [0][1][1][0][2][4] = 78,
+ [0][1][1][0][1][4] = 46,
+ [0][1][1][0][3][4] = 64,
+ [0][1][1][0][5][4] = 78,
+ [0][1][1][0][6][4] = 46,
+ [0][1][1][0][9][4] = 46,
+ [0][1][1][0][8][4] = 48,
+ [0][1][1][0][11][4] = 46,
+ [0][1][1][0][2][5] = 78,
+ [0][1][1][0][1][5] = 46,
+ [0][1][1][0][3][5] = 64,
+ [0][1][1][0][5][5] = 78,
+ [0][1][1][0][6][5] = 46,
+ [0][1][1][0][9][5] = 46,
+ [0][1][1][0][8][5] = 48,
+ [0][1][1][0][11][5] = 46,
+ [0][1][1][0][2][6] = 78,
+ [0][1][1][0][1][6] = 46,
+ [0][1][1][0][3][6] = 64,
+ [0][1][1][0][5][6] = 78,
+ [0][1][1][0][6][6] = 46,
+ [0][1][1][0][9][6] = 46,
+ [0][1][1][0][8][6] = 48,
+ [0][1][1][0][11][6] = 46,
+ [0][1][1][0][2][7] = 70,
+ [0][1][1][0][1][7] = 46,
+ [0][1][1][0][3][7] = 64,
+ [0][1][1][0][5][7] = 70,
+ [0][1][1][0][6][7] = 46,
+ [0][1][1][0][9][7] = 46,
+ [0][1][1][0][8][7] = 48,
+ [0][1][1][0][11][7] = 46,
+ [0][1][1][0][2][8] = 66,
+ [0][1][1][0][1][8] = 46,
+ [0][1][1][0][3][8] = 64,
+ [0][1][1][0][5][8] = 66,
+ [0][1][1][0][6][8] = 46,
+ [0][1][1][0][9][8] = 46,
+ [0][1][1][0][8][8] = 48,
+ [0][1][1][0][11][8] = 46,
+ [0][1][1][0][2][9] = 62,
+ [0][1][1][0][1][9] = 46,
+ [0][1][1][0][3][9] = 64,
+ [0][1][1][0][5][9] = 62,
+ [0][1][1][0][6][9] = 46,
+ [0][1][1][0][9][9] = 46,
+ [0][1][1][0][8][9] = 48,
+ [0][1][1][0][11][9] = 46,
+ [0][1][1][0][2][10] = 62,
+ [0][1][1][0][1][10] = 46,
+ [0][1][1][0][3][10] = 64,
+ [0][1][1][0][5][10] = 62,
+ [0][1][1][0][6][10] = 46,
+ [0][1][1][0][9][10] = 46,
+ [0][1][1][0][8][10] = 48,
+ [0][1][1][0][11][10] = 46,
+ [0][1][1][0][2][11] = 42,
+ [0][1][1][0][1][11] = 46,
+ [0][1][1][0][3][11] = 64,
+ [0][1][1][0][5][11] = 42,
+ [0][1][1][0][6][11] = 46,
+ [0][1][1][0][9][11] = 46,
+ [0][1][1][0][8][11] = 48,
+ [0][1][1][0][11][11] = 46,
+ [0][1][1][0][2][12] = 40,
+ [0][1][1][0][1][12] = 46,
+ [0][1][1][0][3][12] = 64,
+ [0][1][1][0][5][12] = 40,
+ [0][1][1][0][6][12] = 46,
+ [0][1][1][0][9][12] = 46,
+ [0][1][1][0][8][12] = 48,
+ [0][1][1][0][11][12] = 46,
+ [0][1][1][0][2][13] = 127,
+ [0][1][1][0][1][13] = 127,
+ [0][1][1][0][3][13] = 127,
+ [0][1][1][0][5][13] = 127,
+ [0][1][1][0][6][13] = 127,
+ [0][1][1][0][9][13] = 127,
+ [0][1][1][0][8][13] = 127,
+ [0][1][1][0][11][13] = 127,
+ [0][0][2][0][2][0] = 66,
+ [0][0][2][0][1][0] = 58,
+ [0][0][2][0][3][0] = 76,
+ [0][0][2][0][5][0] = 66,
+ [0][0][2][0][6][0] = 58,
+ [0][0][2][0][9][0] = 58,
+ [0][0][2][0][8][0] = 60,
+ [0][0][2][0][11][0] = 58,
+ [0][0][2][0][2][1] = 66,
+ [0][0][2][0][1][1] = 58,
+ [0][0][2][0][3][1] = 76,
+ [0][0][2][0][5][1] = 66,
+ [0][0][2][0][6][1] = 58,
+ [0][0][2][0][9][1] = 58,
+ [0][0][2][0][8][1] = 60,
+ [0][0][2][0][11][1] = 58,
+ [0][0][2][0][2][2] = 70,
+ [0][0][2][0][1][2] = 58,
+ [0][0][2][0][3][2] = 76,
+ [0][0][2][0][5][2] = 70,
+ [0][0][2][0][6][2] = 58,
+ [0][0][2][0][9][2] = 58,
+ [0][0][2][0][8][2] = 60,
+ [0][0][2][0][11][2] = 58,
+ [0][0][2][0][2][3] = 74,
+ [0][0][2][0][1][3] = 58,
+ [0][0][2][0][3][3] = 76,
+ [0][0][2][0][5][3] = 74,
+ [0][0][2][0][6][3] = 58,
+ [0][0][2][0][9][3] = 58,
+ [0][0][2][0][8][3] = 60,
+ [0][0][2][0][11][3] = 58,
+ [0][0][2][0][2][4] = 76,
+ [0][0][2][0][1][4] = 58,
+ [0][0][2][0][3][4] = 76,
+ [0][0][2][0][5][4] = 76,
+ [0][0][2][0][6][4] = 58,
+ [0][0][2][0][9][4] = 58,
+ [0][0][2][0][8][4] = 60,
+ [0][0][2][0][11][4] = 58,
+ [0][0][2][0][2][5] = 76,
+ [0][0][2][0][1][5] = 58,
+ [0][0][2][0][3][5] = 76,
+ [0][0][2][0][5][5] = 76,
+ [0][0][2][0][6][5] = 58,
+ [0][0][2][0][9][5] = 58,
+ [0][0][2][0][8][5] = 60,
+ [0][0][2][0][11][5] = 58,
+ [0][0][2][0][2][6] = 76,
+ [0][0][2][0][1][6] = 58,
+ [0][0][2][0][3][6] = 76,
+ [0][0][2][0][5][6] = 76,
+ [0][0][2][0][6][6] = 58,
+ [0][0][2][0][9][6] = 58,
+ [0][0][2][0][8][6] = 60,
+ [0][0][2][0][11][6] = 58,
+ [0][0][2][0][2][7] = 74,
+ [0][0][2][0][1][7] = 58,
+ [0][0][2][0][3][7] = 76,
+ [0][0][2][0][5][7] = 74,
+ [0][0][2][0][6][7] = 58,
+ [0][0][2][0][9][7] = 58,
+ [0][0][2][0][8][7] = 60,
+ [0][0][2][0][11][7] = 58,
+ [0][0][2][0][2][8] = 70,
+ [0][0][2][0][1][8] = 58,
+ [0][0][2][0][3][8] = 76,
+ [0][0][2][0][5][8] = 70,
+ [0][0][2][0][6][8] = 58,
+ [0][0][2][0][9][8] = 58,
+ [0][0][2][0][8][8] = 60,
+ [0][0][2][0][11][8] = 58,
+ [0][0][2][0][2][9] = 66,
+ [0][0][2][0][1][9] = 58,
+ [0][0][2][0][3][9] = 76,
+ [0][0][2][0][5][9] = 66,
+ [0][0][2][0][6][9] = 58,
+ [0][0][2][0][9][9] = 58,
+ [0][0][2][0][8][9] = 60,
+ [0][0][2][0][11][9] = 58,
+ [0][0][2][0][2][10] = 66,
+ [0][0][2][0][1][10] = 58,
+ [0][0][2][0][3][10] = 76,
+ [0][0][2][0][5][10] = 66,
+ [0][0][2][0][6][10] = 58,
+ [0][0][2][0][9][10] = 58,
+ [0][0][2][0][8][10] = 60,
+ [0][0][2][0][11][10] = 58,
+ [0][0][2][0][2][11] = 54,
+ [0][0][2][0][1][11] = 58,
+ [0][0][2][0][3][11] = 76,
+ [0][0][2][0][5][11] = 54,
+ [0][0][2][0][6][11] = 58,
+ [0][0][2][0][9][11] = 58,
+ [0][0][2][0][8][11] = 60,
+ [0][0][2][0][11][11] = 58,
+ [0][0][2][0][2][12] = 50,
+ [0][0][2][0][1][12] = 58,
+ [0][0][2][0][3][12] = 76,
+ [0][0][2][0][5][12] = 50,
+ [0][0][2][0][6][12] = 58,
+ [0][0][2][0][9][12] = 58,
+ [0][0][2][0][8][12] = 60,
+ [0][0][2][0][11][12] = 58,
+ [0][0][2][0][2][13] = 127,
+ [0][0][2][0][1][13] = 127,
+ [0][0][2][0][3][13] = 127,
+ [0][0][2][0][5][13] = 127,
+ [0][0][2][0][6][13] = 127,
+ [0][0][2][0][9][13] = 127,
+ [0][0][2][0][8][13] = 127,
+ [0][0][2][0][11][13] = 127,
+ [0][1][2][0][2][0] = 62,
+ [0][1][2][0][1][0] = 46,
+ [0][1][2][0][3][0] = 64,
+ [0][1][2][0][5][0] = 62,
+ [0][1][2][0][6][0] = 46,
+ [0][1][2][0][9][0] = 46,
+ [0][1][2][0][8][0] = 48,
+ [0][1][2][0][11][0] = 46,
+ [0][1][2][0][2][1] = 62,
+ [0][1][2][0][1][1] = 46,
+ [0][1][2][0][3][1] = 64,
+ [0][1][2][0][5][1] = 62,
+ [0][1][2][0][6][1] = 46,
+ [0][1][2][0][9][1] = 46,
+ [0][1][2][0][8][1] = 48,
+ [0][1][2][0][11][1] = 46,
+ [0][1][2][0][2][2] = 66,
+ [0][1][2][0][1][2] = 46,
+ [0][1][2][0][3][2] = 64,
+ [0][1][2][0][5][2] = 66,
+ [0][1][2][0][6][2] = 46,
+ [0][1][2][0][9][2] = 46,
+ [0][1][2][0][8][2] = 48,
+ [0][1][2][0][11][2] = 46,
+ [0][1][2][0][2][3] = 70,
+ [0][1][2][0][1][3] = 46,
+ [0][1][2][0][3][3] = 64,
+ [0][1][2][0][5][3] = 70,
+ [0][1][2][0][6][3] = 46,
+ [0][1][2][0][9][3] = 46,
+ [0][1][2][0][8][3] = 48,
+ [0][1][2][0][11][3] = 46,
+ [0][1][2][0][2][4] = 76,
+ [0][1][2][0][1][4] = 46,
+ [0][1][2][0][3][4] = 64,
+ [0][1][2][0][5][4] = 76,
+ [0][1][2][0][6][4] = 46,
+ [0][1][2][0][9][4] = 46,
+ [0][1][2][0][8][4] = 48,
+ [0][1][2][0][11][4] = 46,
+ [0][1][2][0][2][5] = 76,
+ [0][1][2][0][1][5] = 46,
+ [0][1][2][0][3][5] = 64,
+ [0][1][2][0][5][5] = 76,
+ [0][1][2][0][6][5] = 46,
+ [0][1][2][0][9][5] = 46,
+ [0][1][2][0][8][5] = 48,
+ [0][1][2][0][11][5] = 46,
+ [0][1][2][0][2][6] = 76,
+ [0][1][2][0][1][6] = 46,
+ [0][1][2][0][3][6] = 64,
+ [0][1][2][0][5][6] = 76,
+ [0][1][2][0][6][6] = 46,
+ [0][1][2][0][9][6] = 46,
+ [0][1][2][0][8][6] = 48,
+ [0][1][2][0][11][6] = 46,
+ [0][1][2][0][2][7] = 68,
+ [0][1][2][0][1][7] = 46,
+ [0][1][2][0][3][7] = 64,
+ [0][1][2][0][5][7] = 68,
+ [0][1][2][0][6][7] = 46,
+ [0][1][2][0][9][7] = 46,
+ [0][1][2][0][8][7] = 48,
+ [0][1][2][0][11][7] = 46,
+ [0][1][2][0][2][8] = 64,
+ [0][1][2][0][1][8] = 46,
+ [0][1][2][0][3][8] = 64,
+ [0][1][2][0][5][8] = 64,
+ [0][1][2][0][6][8] = 46,
+ [0][1][2][0][9][8] = 46,
+ [0][1][2][0][8][8] = 48,
+ [0][1][2][0][11][8] = 46,
+ [0][1][2][0][2][9] = 60,
+ [0][1][2][0][1][9] = 46,
+ [0][1][2][0][3][9] = 64,
+ [0][1][2][0][5][9] = 60,
+ [0][1][2][0][6][9] = 46,
+ [0][1][2][0][9][9] = 46,
+ [0][1][2][0][8][9] = 48,
+ [0][1][2][0][11][9] = 46,
+ [0][1][2][0][2][10] = 60,
+ [0][1][2][0][1][10] = 46,
+ [0][1][2][0][3][10] = 64,
+ [0][1][2][0][5][10] = 60,
+ [0][1][2][0][6][10] = 46,
+ [0][1][2][0][9][10] = 46,
+ [0][1][2][0][8][10] = 48,
+ [0][1][2][0][11][10] = 46,
+ [0][1][2][0][2][11] = 42,
+ [0][1][2][0][1][11] = 46,
+ [0][1][2][0][3][11] = 64,
+ [0][1][2][0][5][11] = 42,
+ [0][1][2][0][6][11] = 46,
+ [0][1][2][0][9][11] = 46,
+ [0][1][2][0][8][11] = 48,
+ [0][1][2][0][11][11] = 46,
+ [0][1][2][0][2][12] = 40,
+ [0][1][2][0][1][12] = 46,
+ [0][1][2][0][3][12] = 64,
+ [0][1][2][0][5][12] = 40,
+ [0][1][2][0][6][12] = 46,
+ [0][1][2][0][9][12] = 46,
+ [0][1][2][0][8][12] = 48,
+ [0][1][2][0][11][12] = 46,
+ [0][1][2][0][2][13] = 127,
+ [0][1][2][0][1][13] = 127,
+ [0][1][2][0][3][13] = 127,
+ [0][1][2][0][5][13] = 127,
+ [0][1][2][0][6][13] = 127,
+ [0][1][2][0][9][13] = 127,
+ [0][1][2][0][8][13] = 127,
+ [0][1][2][0][11][13] = 127,
+ [0][1][2][1][2][0] = 62,
+ [0][1][2][1][1][0] = 34,
+ [0][1][2][1][3][0] = 64,
+ [0][1][2][1][5][0] = 62,
+ [0][1][2][1][6][0] = 34,
+ [0][1][2][1][9][0] = 34,
+ [0][1][2][1][8][0] = 36,
+ [0][1][2][1][11][0] = 34,
+ [0][1][2][1][2][1] = 62,
+ [0][1][2][1][1][1] = 34,
+ [0][1][2][1][3][1] = 64,
+ [0][1][2][1][5][1] = 62,
+ [0][1][2][1][6][1] = 34,
+ [0][1][2][1][9][1] = 34,
+ [0][1][2][1][8][1] = 36,
+ [0][1][2][1][11][1] = 34,
+ [0][1][2][1][2][2] = 66,
+ [0][1][2][1][1][2] = 34,
+ [0][1][2][1][3][2] = 64,
+ [0][1][2][1][5][2] = 66,
+ [0][1][2][1][6][2] = 34,
+ [0][1][2][1][9][2] = 34,
+ [0][1][2][1][8][2] = 36,
+ [0][1][2][1][11][2] = 34,
+ [0][1][2][1][2][3] = 70,
+ [0][1][2][1][1][3] = 34,
+ [0][1][2][1][3][3] = 64,
+ [0][1][2][1][5][3] = 70,
+ [0][1][2][1][6][3] = 34,
+ [0][1][2][1][9][3] = 34,
+ [0][1][2][1][8][3] = 36,
+ [0][1][2][1][11][3] = 34,
+ [0][1][2][1][2][4] = 76,
+ [0][1][2][1][1][4] = 34,
+ [0][1][2][1][3][4] = 64,
+ [0][1][2][1][5][4] = 76,
+ [0][1][2][1][6][4] = 34,
+ [0][1][2][1][9][4] = 34,
+ [0][1][2][1][8][4] = 36,
+ [0][1][2][1][11][4] = 34,
+ [0][1][2][1][2][5] = 76,
+ [0][1][2][1][1][5] = 34,
+ [0][1][2][1][3][5] = 64,
+ [0][1][2][1][5][5] = 76,
+ [0][1][2][1][6][5] = 34,
+ [0][1][2][1][9][5] = 34,
+ [0][1][2][1][8][5] = 36,
+ [0][1][2][1][11][5] = 34,
+ [0][1][2][1][2][6] = 76,
+ [0][1][2][1][1][6] = 34,
+ [0][1][2][1][3][6] = 64,
+ [0][1][2][1][5][6] = 76,
+ [0][1][2][1][6][6] = 34,
+ [0][1][2][1][9][6] = 34,
+ [0][1][2][1][8][6] = 36,
+ [0][1][2][1][11][6] = 34,
+ [0][1][2][1][2][7] = 68,
+ [0][1][2][1][1][7] = 34,
+ [0][1][2][1][3][7] = 64,
+ [0][1][2][1][5][7] = 68,
+ [0][1][2][1][6][7] = 34,
+ [0][1][2][1][9][7] = 34,
+ [0][1][2][1][8][7] = 36,
+ [0][1][2][1][11][7] = 34,
+ [0][1][2][1][2][8] = 64,
+ [0][1][2][1][1][8] = 34,
+ [0][1][2][1][3][8] = 64,
+ [0][1][2][1][5][8] = 64,
+ [0][1][2][1][6][8] = 34,
+ [0][1][2][1][9][8] = 34,
+ [0][1][2][1][8][8] = 36,
+ [0][1][2][1][11][8] = 34,
+ [0][1][2][1][2][9] = 60,
+ [0][1][2][1][1][9] = 34,
+ [0][1][2][1][3][9] = 64,
+ [0][1][2][1][5][9] = 60,
+ [0][1][2][1][6][9] = 34,
+ [0][1][2][1][9][9] = 34,
+ [0][1][2][1][8][9] = 36,
+ [0][1][2][1][11][9] = 34,
+ [0][1][2][1][2][10] = 60,
+ [0][1][2][1][1][10] = 34,
+ [0][1][2][1][3][10] = 64,
+ [0][1][2][1][5][10] = 60,
+ [0][1][2][1][6][10] = 34,
+ [0][1][2][1][9][10] = 34,
+ [0][1][2][1][8][10] = 36,
+ [0][1][2][1][11][10] = 34,
+ [0][1][2][1][2][11] = 42,
+ [0][1][2][1][1][11] = 34,
+ [0][1][2][1][3][11] = 64,
+ [0][1][2][1][5][11] = 42,
+ [0][1][2][1][6][11] = 34,
+ [0][1][2][1][9][11] = 34,
+ [0][1][2][1][8][11] = 36,
+ [0][1][2][1][11][11] = 34,
+ [0][1][2][1][2][12] = 40,
+ [0][1][2][1][1][12] = 34,
+ [0][1][2][1][3][12] = 64,
+ [0][1][2][1][5][12] = 40,
+ [0][1][2][1][6][12] = 34,
+ [0][1][2][1][9][12] = 34,
+ [0][1][2][1][8][12] = 36,
+ [0][1][2][1][11][12] = 34,
+ [0][1][2][1][2][13] = 127,
+ [0][1][2][1][1][13] = 127,
+ [0][1][2][1][3][13] = 127,
+ [0][1][2][1][5][13] = 127,
+ [0][1][2][1][6][13] = 127,
+ [0][1][2][1][9][13] = 127,
+ [0][1][2][1][8][13] = 127,
+ [0][1][2][1][11][13] = 127,
+ [1][0][2][0][2][0] = 127,
+ [1][0][2][0][1][0] = 127,
+ [1][0][2][0][3][0] = 127,
+ [1][0][2][0][5][0] = 127,
+ [1][0][2][0][6][0] = 127,
+ [1][0][2][0][9][0] = 127,
+ [1][0][2][0][8][0] = 127,
+ [1][0][2][0][11][0] = 127,
+ [1][0][2][0][2][1] = 127,
+ [1][0][2][0][1][1] = 127,
+ [1][0][2][0][3][1] = 127,
+ [1][0][2][0][5][1] = 127,
+ [1][0][2][0][6][1] = 127,
+ [1][0][2][0][9][1] = 127,
+ [1][0][2][0][8][1] = 127,
+ [1][0][2][0][11][1] = 127,
+ [1][0][2][0][2][2] = 56,
+ [1][0][2][0][1][2] = 58,
+ [1][0][2][0][3][2] = 76,
+ [1][0][2][0][5][2] = 56,
+ [1][0][2][0][6][2] = 58,
+ [1][0][2][0][9][2] = 58,
+ [1][0][2][0][8][2] = 60,
+ [1][0][2][0][11][2] = 58,
+ [1][0][2][0][2][3] = 56,
+ [1][0][2][0][1][3] = 58,
+ [1][0][2][0][3][3] = 76,
+ [1][0][2][0][5][3] = 56,
+ [1][0][2][0][6][3] = 58,
+ [1][0][2][0][9][3] = 58,
+ [1][0][2][0][8][3] = 60,
+ [1][0][2][0][11][3] = 58,
+ [1][0][2][0][2][4] = 60,
+ [1][0][2][0][1][4] = 58,
+ [1][0][2][0][3][4] = 76,
+ [1][0][2][0][5][4] = 60,
+ [1][0][2][0][6][4] = 58,
+ [1][0][2][0][9][4] = 58,
+ [1][0][2][0][8][4] = 60,
+ [1][0][2][0][11][4] = 58,
+ [1][0][2][0][2][5] = 64,
+ [1][0][2][0][1][5] = 58,
+ [1][0][2][0][3][5] = 76,
+ [1][0][2][0][5][5] = 64,
+ [1][0][2][0][6][5] = 58,
+ [1][0][2][0][9][5] = 58,
+ [1][0][2][0][8][5] = 60,
+ [1][0][2][0][11][5] = 58,
+ [1][0][2][0][2][6] = 54,
+ [1][0][2][0][1][6] = 58,
+ [1][0][2][0][3][6] = 76,
+ [1][0][2][0][5][6] = 54,
+ [1][0][2][0][6][6] = 58,
+ [1][0][2][0][9][6] = 58,
+ [1][0][2][0][8][6] = 60,
+ [1][0][2][0][11][6] = 58,
+ [1][0][2][0][2][7] = 50,
+ [1][0][2][0][1][7] = 58,
+ [1][0][2][0][3][7] = 76,
+ [1][0][2][0][5][7] = 50,
+ [1][0][2][0][6][7] = 58,
+ [1][0][2][0][9][7] = 58,
+ [1][0][2][0][8][7] = 60,
+ [1][0][2][0][11][7] = 58,
+ [1][0][2][0][2][8] = 50,
+ [1][0][2][0][1][8] = 58,
+ [1][0][2][0][3][8] = 76,
+ [1][0][2][0][5][8] = 50,
+ [1][0][2][0][6][8] = 58,
+ [1][0][2][0][9][8] = 58,
+ [1][0][2][0][8][8] = 60,
+ [1][0][2][0][11][8] = 58,
+ [1][0][2][0][2][9] = 42,
+ [1][0][2][0][1][9] = 58,
+ [1][0][2][0][3][9] = 76,
+ [1][0][2][0][5][9] = 42,
+ [1][0][2][0][6][9] = 58,
+ [1][0][2][0][9][9] = 58,
+ [1][0][2][0][8][9] = 60,
+ [1][0][2][0][11][9] = 58,
+ [1][0][2][0][2][10] = 40,
+ [1][0][2][0][1][10] = 58,
+ [1][0][2][0][3][10] = 76,
+ [1][0][2][0][5][10] = 40,
+ [1][0][2][0][6][10] = 58,
+ [1][0][2][0][9][10] = 58,
+ [1][0][2][0][8][10] = 60,
+ [1][0][2][0][11][10] = 58,
+ [1][0][2][0][2][11] = 127,
+ [1][0][2][0][1][11] = 127,
+ [1][0][2][0][3][11] = 127,
+ [1][0][2][0][5][11] = 127,
+ [1][0][2][0][6][11] = 127,
+ [1][0][2][0][9][11] = 127,
+ [1][0][2][0][8][11] = 127,
+ [1][0][2][0][11][11] = 127,
+ [1][0][2][0][2][12] = 127,
+ [1][0][2][0][1][12] = 127,
+ [1][0][2][0][3][12] = 127,
+ [1][0][2][0][5][12] = 127,
+ [1][0][2][0][6][12] = 127,
+ [1][0][2][0][9][12] = 127,
+ [1][0][2][0][8][12] = 127,
+ [1][0][2][0][11][12] = 127,
+ [1][0][2][0][2][13] = 127,
+ [1][0][2][0][1][13] = 127,
+ [1][0][2][0][3][13] = 127,
+ [1][0][2][0][5][13] = 127,
+ [1][0][2][0][6][13] = 127,
+ [1][0][2][0][9][13] = 127,
+ [1][0][2][0][8][13] = 127,
+ [1][0][2][0][11][13] = 127,
+ [1][1][2][0][2][0] = 127,
+ [1][1][2][0][1][0] = 127,
+ [1][1][2][0][3][0] = 127,
+ [1][1][2][0][5][0] = 127,
+ [1][1][2][0][6][0] = 127,
+ [1][1][2][0][9][0] = 127,
+ [1][1][2][0][8][0] = 127,
+ [1][1][2][0][11][0] = 127,
+ [1][1][2][0][2][1] = 127,
+ [1][1][2][0][1][1] = 127,
+ [1][1][2][0][3][1] = 127,
+ [1][1][2][0][5][1] = 127,
+ [1][1][2][0][6][1] = 127,
+ [1][1][2][0][9][1] = 127,
+ [1][1][2][0][8][1] = 127,
+ [1][1][2][0][11][1] = 127,
+ [1][1][2][0][2][2] = 52,
+ [1][1][2][0][1][2] = 46,
+ [1][1][2][0][3][2] = 64,
+ [1][1][2][0][5][2] = 52,
+ [1][1][2][0][6][2] = 46,
+ [1][1][2][0][9][2] = 46,
+ [1][1][2][0][8][2] = 48,
+ [1][1][2][0][11][2] = 46,
+ [1][1][2][0][2][3] = 52,
+ [1][1][2][0][1][3] = 46,
+ [1][1][2][0][3][3] = 64,
+ [1][1][2][0][5][3] = 52,
+ [1][1][2][0][6][3] = 46,
+ [1][1][2][0][9][3] = 46,
+ [1][1][2][0][8][3] = 48,
+ [1][1][2][0][11][3] = 46,
+ [1][1][2][0][2][4] = 56,
+ [1][1][2][0][1][4] = 46,
+ [1][1][2][0][3][4] = 64,
+ [1][1][2][0][5][4] = 56,
+ [1][1][2][0][6][4] = 46,
+ [1][1][2][0][9][4] = 46,
+ [1][1][2][0][8][4] = 48,
+ [1][1][2][0][11][4] = 46,
+ [1][1][2][0][2][5] = 60,
+ [1][1][2][0][1][5] = 46,
+ [1][1][2][0][3][5] = 64,
+ [1][1][2][0][5][5] = 60,
+ [1][1][2][0][6][5] = 46,
+ [1][1][2][0][9][5] = 46,
+ [1][1][2][0][8][5] = 48,
+ [1][1][2][0][11][5] = 46,
+ [1][1][2][0][2][6] = 54,
+ [1][1][2][0][1][6] = 46,
+ [1][1][2][0][3][6] = 64,
+ [1][1][2][0][5][6] = 52,
+ [1][1][2][0][6][6] = 46,
+ [1][1][2][0][9][6] = 46,
+ [1][1][2][0][8][6] = 48,
+ [1][1][2][0][11][6] = 46,
+ [1][1][2][0][2][7] = 50,
+ [1][1][2][0][1][7] = 46,
+ [1][1][2][0][3][7] = 64,
+ [1][1][2][0][5][7] = 48,
+ [1][1][2][0][6][7] = 46,
+ [1][1][2][0][9][7] = 46,
+ [1][1][2][0][8][7] = 48,
+ [1][1][2][0][11][7] = 46,
+ [1][1][2][0][2][8] = 50,
+ [1][1][2][0][1][8] = 46,
+ [1][1][2][0][3][8] = 64,
+ [1][1][2][0][5][8] = 48,
+ [1][1][2][0][6][8] = 46,
+ [1][1][2][0][9][8] = 46,
+ [1][1][2][0][8][8] = 48,
+ [1][1][2][0][11][8] = 46,
+ [1][1][2][0][2][9] = 38,
+ [1][1][2][0][1][9] = 46,
+ [1][1][2][0][3][9] = 64,
+ [1][1][2][0][5][9] = 38,
+ [1][1][2][0][6][9] = 46,
+ [1][1][2][0][9][9] = 46,
+ [1][1][2][0][8][9] = 48,
+ [1][1][2][0][11][9] = 46,
+ [1][1][2][0][2][10] = 36,
+ [1][1][2][0][1][10] = 46,
+ [1][1][2][0][3][10] = 64,
+ [1][1][2][0][5][10] = 36,
+ [1][1][2][0][6][10] = 46,
+ [1][1][2][0][9][10] = 46,
+ [1][1][2][0][8][10] = 48,
+ [1][1][2][0][11][10] = 46,
+ [1][1][2][0][2][11] = 127,
+ [1][1][2][0][1][11] = 127,
+ [1][1][2][0][3][11] = 127,
+ [1][1][2][0][5][11] = 127,
+ [1][1][2][0][6][11] = 127,
+ [1][1][2][0][9][11] = 127,
+ [1][1][2][0][8][11] = 127,
+ [1][1][2][0][11][11] = 127,
+ [1][1][2][0][2][12] = 127,
+ [1][1][2][0][1][12] = 127,
+ [1][1][2][0][3][12] = 127,
+ [1][1][2][0][5][12] = 127,
+ [1][1][2][0][6][12] = 127,
+ [1][1][2][0][9][12] = 127,
+ [1][1][2][0][8][12] = 127,
+ [1][1][2][0][11][12] = 127,
+ [1][1][2][0][2][13] = 127,
+ [1][1][2][0][1][13] = 127,
+ [1][1][2][0][3][13] = 127,
+ [1][1][2][0][5][13] = 127,
+ [1][1][2][0][6][13] = 127,
+ [1][1][2][0][9][13] = 127,
+ [1][1][2][0][8][13] = 127,
+ [1][1][2][0][11][13] = 127,
+ [1][1][2][1][2][0] = 127,
+ [1][1][2][1][1][0] = 127,
+ [1][1][2][1][3][0] = 127,
+ [1][1][2][1][5][0] = 127,
+ [1][1][2][1][6][0] = 127,
+ [1][1][2][1][9][0] = 127,
+ [1][1][2][1][8][0] = 127,
+ [1][1][2][1][11][0] = 127,
+ [1][1][2][1][2][1] = 127,
+ [1][1][2][1][1][1] = 127,
+ [1][1][2][1][3][1] = 127,
+ [1][1][2][1][5][1] = 127,
+ [1][1][2][1][6][1] = 127,
+ [1][1][2][1][9][1] = 127,
+ [1][1][2][1][8][1] = 127,
+ [1][1][2][1][11][1] = 127,
+ [1][1][2][1][2][2] = 52,
+ [1][1][2][1][1][2] = 34,
+ [1][1][2][1][3][2] = 64,
+ [1][1][2][1][5][2] = 52,
+ [1][1][2][1][6][2] = 34,
+ [1][1][2][1][9][2] = 34,
+ [1][1][2][1][8][2] = 36,
+ [1][1][2][1][11][2] = 34,
+ [1][1][2][1][2][3] = 52,
+ [1][1][2][1][1][3] = 34,
+ [1][1][2][1][3][3] = 64,
+ [1][1][2][1][5][3] = 52,
+ [1][1][2][1][6][3] = 34,
+ [1][1][2][1][9][3] = 34,
+ [1][1][2][1][8][3] = 36,
+ [1][1][2][1][11][3] = 34,
+ [1][1][2][1][2][4] = 56,
+ [1][1][2][1][1][4] = 34,
+ [1][1][2][1][3][4] = 64,
+ [1][1][2][1][5][4] = 56,
+ [1][1][2][1][6][4] = 34,
+ [1][1][2][1][9][4] = 34,
+ [1][1][2][1][8][4] = 36,
+ [1][1][2][1][11][4] = 34,
+ [1][1][2][1][2][5] = 60,
+ [1][1][2][1][1][5] = 34,
+ [1][1][2][1][3][5] = 64,
+ [1][1][2][1][5][5] = 60,
+ [1][1][2][1][6][5] = 34,
+ [1][1][2][1][9][5] = 34,
+ [1][1][2][1][8][5] = 36,
+ [1][1][2][1][11][5] = 34,
+ [1][1][2][1][2][6] = 54,
+ [1][1][2][1][1][6] = 34,
+ [1][1][2][1][3][6] = 64,
+ [1][1][2][1][5][6] = 52,
+ [1][1][2][1][6][6] = 34,
+ [1][1][2][1][9][6] = 34,
+ [1][1][2][1][8][6] = 36,
+ [1][1][2][1][11][6] = 34,
+ [1][1][2][1][2][7] = 50,
+ [1][1][2][1][1][7] = 34,
+ [1][1][2][1][3][7] = 64,
+ [1][1][2][1][5][7] = 48,
+ [1][1][2][1][6][7] = 34,
+ [1][1][2][1][9][7] = 34,
+ [1][1][2][1][8][7] = 36,
+ [1][1][2][1][11][7] = 34,
+ [1][1][2][1][2][8] = 50,
+ [1][1][2][1][1][8] = 34,
+ [1][1][2][1][3][8] = 64,
+ [1][1][2][1][5][8] = 48,
+ [1][1][2][1][6][8] = 34,
+ [1][1][2][1][9][8] = 34,
+ [1][1][2][1][8][8] = 36,
+ [1][1][2][1][11][8] = 34,
+ [1][1][2][1][2][9] = 38,
+ [1][1][2][1][1][9] = 34,
+ [1][1][2][1][3][9] = 64,
+ [1][1][2][1][5][9] = 38,
+ [1][1][2][1][6][9] = 34,
+ [1][1][2][1][9][9] = 34,
+ [1][1][2][1][8][9] = 36,
+ [1][1][2][1][11][9] = 34,
+ [1][1][2][1][2][10] = 36,
+ [1][1][2][1][1][10] = 34,
+ [1][1][2][1][3][10] = 64,
+ [1][1][2][1][5][10] = 36,
+ [1][1][2][1][6][10] = 34,
+ [1][1][2][1][9][10] = 34,
+ [1][1][2][1][8][10] = 36,
+ [1][1][2][1][11][10] = 34,
+ [1][1][2][1][2][11] = 127,
+ [1][1][2][1][1][11] = 127,
+ [1][1][2][1][3][11] = 127,
+ [1][1][2][1][5][11] = 127,
+ [1][1][2][1][6][11] = 127,
+ [1][1][2][1][9][11] = 127,
+ [1][1][2][1][8][11] = 127,
+ [1][1][2][1][11][11] = 127,
+ [1][1][2][1][2][12] = 127,
+ [1][1][2][1][1][12] = 127,
+ [1][1][2][1][3][12] = 127,
+ [1][1][2][1][5][12] = 127,
+ [1][1][2][1][6][12] = 127,
+ [1][1][2][1][9][12] = 127,
+ [1][1][2][1][8][12] = 127,
+ [1][1][2][1][11][12] = 127,
+ [1][1][2][1][2][13] = 127,
+ [1][1][2][1][1][13] = 127,
+ [1][1][2][1][3][13] = 127,
+ [1][1][2][1][5][13] = 127,
+ [1][1][2][1][6][13] = 127,
+ [1][1][2][1][9][13] = 127,
+ [1][1][2][1][8][13] = 127,
+ [1][1][2][1][11][13] = 127,
+};
+
+const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
+ [RTW89_RS_LMT_NUM][RTW89_BF_NUM]
+ [RTW89_REGD_NUM][RTW89_5G_CH_NUM] = {
+ [0][0][1][0][0][0] = 30,
+ [0][0][1][0][0][2] = 30,
+ [0][0][1][0][0][4] = 30,
+ [0][0][1][0][0][6] = 30,
+ [0][0][1][0][0][8] = 52,
+ [0][0][1][0][0][10] = 52,
+ [0][0][1][0][0][12] = 52,
+ [0][0][1][0][0][14] = 52,
+ [0][0][1][0][0][15] = 52,
+ [0][0][1][0][0][17] = 52,
+ [0][0][1][0][0][19] = 52,
+ [0][0][1][0][0][21] = 52,
+ [0][0][1][0][0][23] = 52,
+ [0][0][1][0][0][25] = 52,
+ [0][0][1][0][0][27] = 52,
+ [0][0][1][0][0][29] = 52,
+ [0][0][1][0][0][31] = 52,
+ [0][0][1][0][0][33] = 52,
+ [0][0][1][0][0][35] = 52,
+ [0][0][1][0][0][37] = 54,
+ [0][0][1][0][0][38] = 28,
+ [0][0][1][0][0][40] = 28,
+ [0][0][1][0][0][42] = 28,
+ [0][0][1][0][0][44] = 28,
+ [0][0][1][0][0][46] = 28,
+ [0][1][1][0][0][0] = 18,
+ [0][1][1][0][0][2] = 18,
+ [0][1][1][0][0][4] = 18,
+ [0][1][1][0][0][6] = 18,
+ [0][1][1][0][0][8] = 40,
+ [0][1][1][0][0][10] = 40,
+ [0][1][1][0][0][12] = 40,
+ [0][1][1][0][0][14] = 40,
+ [0][1][1][0][0][15] = 40,
+ [0][1][1][0][0][17] = 40,
+ [0][1][1][0][0][19] = 40,
+ [0][1][1][0][0][21] = 40,
+ [0][1][1][0][0][23] = 40,
+ [0][1][1][0][0][25] = 40,
+ [0][1][1][0][0][27] = 40,
+ [0][1][1][0][0][29] = 40,
+ [0][1][1][0][0][31] = 40,
+ [0][1][1][0][0][33] = 40,
+ [0][1][1][0][0][35] = 40,
+ [0][1][1][0][0][37] = 42,
+ [0][1][1][0][0][38] = 16,
+ [0][1][1][0][0][40] = 16,
+ [0][1][1][0][0][42] = 16,
+ [0][1][1][0][0][44] = 16,
+ [0][1][1][0][0][46] = 16,
+ [0][0][2][0][0][0] = 30,
+ [0][0][2][0][0][2] = 30,
+ [0][0][2][0][0][4] = 30,
+ [0][0][2][0][0][6] = 30,
+ [0][0][2][0][0][8] = 52,
+ [0][0][2][0][0][10] = 52,
+ [0][0][2][0][0][12] = 52,
+ [0][0][2][0][0][14] = 52,
+ [0][0][2][0][0][15] = 52,
+ [0][0][2][0][0][17] = 52,
+ [0][0][2][0][0][19] = 52,
+ [0][0][2][0][0][21] = 52,
+ [0][0][2][0][0][23] = 52,
+ [0][0][2][0][0][25] = 52,
+ [0][0][2][0][0][27] = 52,
+ [0][0][2][0][0][29] = 52,
+ [0][0][2][0][0][31] = 52,
+ [0][0][2][0][0][33] = 52,
+ [0][0][2][0][0][35] = 52,
+ [0][0][2][0][0][37] = 54,
+ [0][0][2][0][0][38] = 28,
+ [0][0][2][0][0][40] = 28,
+ [0][0][2][0][0][42] = 28,
+ [0][0][2][0][0][44] = 28,
+ [0][0][2][0][0][46] = 28,
+ [0][1][2][0][0][0] = 18,
+ [0][1][2][0][0][2] = 18,
+ [0][1][2][0][0][4] = 18,
+ [0][1][2][0][0][6] = 18,
+ [0][1][2][0][0][8] = 40,
+ [0][1][2][0][0][10] = 40,
+ [0][1][2][0][0][12] = 40,
+ [0][1][2][0][0][14] = 40,
+ [0][1][2][0][0][15] = 40,
+ [0][1][2][0][0][17] = 40,
+ [0][1][2][0][0][19] = 40,
+ [0][1][2][0][0][21] = 40,
+ [0][1][2][0][0][23] = 40,
+ [0][1][2][0][0][25] = 40,
+ [0][1][2][0][0][27] = 40,
+ [0][1][2][0][0][29] = 40,
+ [0][1][2][0][0][31] = 40,
+ [0][1][2][0][0][33] = 40,
+ [0][1][2][0][0][35] = 40,
+ [0][1][2][0][0][37] = 42,
+ [0][1][2][0][0][38] = 16,
+ [0][1][2][0][0][40] = 16,
+ [0][1][2][0][0][42] = 16,
+ [0][1][2][0][0][44] = 16,
+ [0][1][2][0][0][46] = 16,
+ [0][1][2][1][0][0] = 6,
+ [0][1][2][1][0][2] = 6,
+ [0][1][2][1][0][4] = 6,
+ [0][1][2][1][0][6] = 6,
+ [0][1][2][1][0][8] = 28,
+ [0][1][2][1][0][10] = 28,
+ [0][1][2][1][0][12] = 28,
+ [0][1][2][1][0][14] = 28,
+ [0][1][2][1][0][15] = 28,
+ [0][1][2][1][0][17] = 28,
+ [0][1][2][1][0][19] = 28,
+ [0][1][2][1][0][21] = 28,
+ [0][1][2][1][0][23] = 28,
+ [0][1][2][1][0][25] = 28,
+ [0][1][2][1][0][27] = 28,
+ [0][1][2][1][0][29] = 28,
+ [0][1][2][1][0][31] = 28,
+ [0][1][2][1][0][33] = 28,
+ [0][1][2][1][0][35] = 28,
+ [0][1][2][1][0][37] = 30,
+ [0][1][2][1][0][38] = 4,
+ [0][1][2][1][0][40] = 4,
+ [0][1][2][1][0][42] = 4,
+ [0][1][2][1][0][44] = 4,
+ [0][1][2][1][0][46] = 4,
+ [1][0][2][0][0][1] = 30,
+ [1][0][2][0][0][5] = 30,
+ [1][0][2][0][0][9] = 52,
+ [1][0][2][0][0][13] = 52,
+ [1][0][2][0][0][16] = 52,
+ [1][0][2][0][0][20] = 52,
+ [1][0][2][0][0][24] = 52,
+ [1][0][2][0][0][28] = 52,
+ [1][0][2][0][0][32] = 52,
+ [1][0][2][0][0][36] = 54,
+ [1][0][2][0][0][39] = 28,
+ [1][0][2][0][0][43] = 28,
+ [1][1][2][0][0][1] = 18,
+ [1][1][2][0][0][5] = 18,
+ [1][1][2][0][0][9] = 40,
+ [1][1][2][0][0][13] = 40,
+ [1][1][2][0][0][16] = 40,
+ [1][1][2][0][0][20] = 40,
+ [1][1][2][0][0][24] = 40,
+ [1][1][2][0][0][28] = 40,
+ [1][1][2][0][0][32] = 40,
+ [1][1][2][0][0][36] = 42,
+ [1][1][2][0][0][39] = 16,
+ [1][1][2][0][0][43] = 16,
+ [1][1][2][1][0][1] = 6,
+ [1][1][2][1][0][5] = 6,
+ [1][1][2][1][0][9] = 28,
+ [1][1][2][1][0][13] = 28,
+ [1][1][2][1][0][16] = 28,
+ [1][1][2][1][0][20] = 28,
+ [1][1][2][1][0][24] = 28,
+ [1][1][2][1][0][28] = 28,
+ [1][1][2][1][0][32] = 28,
+ [1][1][2][1][0][36] = 30,
+ [1][1][2][1][0][39] = 4,
+ [1][1][2][1][0][43] = 4,
+ [2][0][2][0][0][3] = 30,
+ [2][0][2][0][0][11] = 52,
+ [2][0][2][0][0][18] = 52,
+ [2][0][2][0][0][26] = 52,
+ [2][0][2][0][0][34] = 54,
+ [2][0][2][0][0][41] = 28,
+ [2][1][2][0][0][3] = 18,
+ [2][1][2][0][0][11] = 40,
+ [2][1][2][0][0][18] = 40,
+ [2][1][2][0][0][26] = 40,
+ [2][1][2][0][0][34] = 42,
+ [2][1][2][0][0][41] = 16,
+ [2][1][2][1][0][3] = 6,
+ [2][1][2][1][0][11] = 28,
+ [2][1][2][1][0][18] = 28,
+ [2][1][2][1][0][26] = 28,
+ [2][1][2][1][0][34] = 30,
+ [2][1][2][1][0][41] = 4,
+ [0][0][1][0][2][0] = 76,
+ [0][0][1][0][1][0] = 58,
+ [0][0][1][0][3][0] = 62,
+ [0][0][1][0][5][0] = 62,
+ [0][0][1][0][6][0] = 58,
+ [0][0][1][0][9][0] = 58,
+ [0][0][1][0][8][0] = 30,
+ [0][0][1][0][11][0] = 52,
+ [0][0][1][0][2][2] = 76,
+ [0][0][1][0][1][2] = 58,
+ [0][0][1][0][3][2] = 62,
+ [0][0][1][0][5][2] = 62,
+ [0][0][1][0][6][2] = 58,
+ [0][0][1][0][9][2] = 58,
+ [0][0][1][0][8][2] = 30,
+ [0][0][1][0][11][2] = 52,
+ [0][0][1][0][2][4] = 76,
+ [0][0][1][0][1][4] = 58,
+ [0][0][1][0][3][4] = 62,
+ [0][0][1][0][5][4] = 62,
+ [0][0][1][0][6][4] = 58,
+ [0][0][1][0][9][4] = 58,
+ [0][0][1][0][8][4] = 30,
+ [0][0][1][0][11][4] = 52,
+ [0][0][1][0][2][6] = 76,
+ [0][0][1][0][1][6] = 58,
+ [0][0][1][0][3][6] = 62,
+ [0][0][1][0][5][6] = 62,
+ [0][0][1][0][6][6] = 54,
+ [0][0][1][0][9][6] = 58,
+ [0][0][1][0][8][6] = 30,
+ [0][0][1][0][11][6] = 52,
+ [0][0][1][0][2][8] = 76,
+ [0][0][1][0][1][8] = 58,
+ [0][0][1][0][3][8] = 62,
+ [0][0][1][0][5][8] = 64,
+ [0][0][1][0][6][8] = 58,
+ [0][0][1][0][9][8] = 58,
+ [0][0][1][0][8][8] = 54,
+ [0][0][1][0][11][8] = 52,
+ [0][0][1][0][2][10] = 76,
+ [0][0][1][0][1][10] = 58,
+ [0][0][1][0][3][10] = 62,
+ [0][0][1][0][5][10] = 64,
+ [0][0][1][0][6][10] = 58,
+ [0][0][1][0][9][10] = 58,
+ [0][0][1][0][8][10] = 54,
+ [0][0][1][0][11][10] = 52,
+ [0][0][1][0][2][12] = 76,
+ [0][0][1][0][1][12] = 58,
+ [0][0][1][0][3][12] = 62,
+ [0][0][1][0][5][12] = 64,
+ [0][0][1][0][6][12] = 58,
+ [0][0][1][0][9][12] = 58,
+ [0][0][1][0][8][12] = 54,
+ [0][0][1][0][11][12] = 52,
+ [0][0][1][0][2][14] = 76,
+ [0][0][1][0][1][14] = 58,
+ [0][0][1][0][3][14] = 62,
+ [0][0][1][0][5][14] = 64,
+ [0][0][1][0][6][14] = 58,
+ [0][0][1][0][9][14] = 58,
+ [0][0][1][0][8][14] = 54,
+ [0][0][1][0][11][14] = 52,
+ [0][0][1][0][2][15] = 76,
+ [0][0][1][0][1][15] = 58,
+ [0][0][1][0][3][15] = 76,
+ [0][0][1][0][5][15] = 76,
+ [0][0][1][0][6][15] = 58,
+ [0][0][1][0][9][15] = 58,
+ [0][0][1][0][8][15] = 54,
+ [0][0][1][0][11][15] = 52,
+ [0][0][1][0][2][17] = 76,
+ [0][0][1][0][1][17] = 58,
+ [0][0][1][0][3][17] = 76,
+ [0][0][1][0][5][17] = 76,
+ [0][0][1][0][6][17] = 58,
+ [0][0][1][0][9][17] = 58,
+ [0][0][1][0][8][17] = 54,
+ [0][0][1][0][11][17] = 52,
+ [0][0][1][0][2][19] = 76,
+ [0][0][1][0][1][19] = 58,
+ [0][0][1][0][3][19] = 76,
+ [0][0][1][0][5][19] = 76,
+ [0][0][1][0][6][19] = 58,
+ [0][0][1][0][9][19] = 58,
+ [0][0][1][0][8][19] = 54,
+ [0][0][1][0][11][19] = 52,
+ [0][0][1][0][2][21] = 76,
+ [0][0][1][0][1][21] = 58,
+ [0][0][1][0][3][21] = 76,
+ [0][0][1][0][5][21] = 76,
+ [0][0][1][0][6][21] = 58,
+ [0][0][1][0][9][21] = 58,
+ [0][0][1][0][8][21] = 54,
+ [0][0][1][0][11][21] = 52,
+ [0][0][1][0][2][23] = 76,
+ [0][0][1][0][1][23] = 58,
+ [0][0][1][0][3][23] = 76,
+ [0][0][1][0][5][23] = 76,
+ [0][0][1][0][6][23] = 58,
+ [0][0][1][0][9][23] = 58,
+ [0][0][1][0][8][23] = 54,
+ [0][0][1][0][11][23] = 52,
+ [0][0][1][0][2][25] = 76,
+ [0][0][1][0][1][25] = 58,
+ [0][0][1][0][3][25] = 76,
+ [0][0][1][0][5][25] = 127,
+ [0][0][1][0][6][25] = 58,
+ [0][0][1][0][9][25] = 127,
+ [0][0][1][0][8][25] = 54,
+ [0][0][1][0][11][25] = 52,
+ [0][0][1][0][2][27] = 76,
+ [0][0][1][0][1][27] = 58,
+ [0][0][1][0][3][27] = 76,
+ [0][0][1][0][5][27] = 127,
+ [0][0][1][0][6][27] = 58,
+ [0][0][1][0][9][27] = 127,
+ [0][0][1][0][8][27] = 54,
+ [0][0][1][0][11][27] = 52,
+ [0][0][1][0][2][29] = 76,
+ [0][0][1][0][1][29] = 58,
+ [0][0][1][0][3][29] = 76,
+ [0][0][1][0][5][29] = 127,
+ [0][0][1][0][6][29] = 58,
+ [0][0][1][0][9][29] = 127,
+ [0][0][1][0][8][29] = 54,
+ [0][0][1][0][11][29] = 52,
+ [0][0][1][0][2][31] = 76,
+ [0][0][1][0][1][31] = 58,
+ [0][0][1][0][3][31] = 76,
+ [0][0][1][0][5][31] = 76,
+ [0][0][1][0][6][31] = 58,
+ [0][0][1][0][9][31] = 58,
+ [0][0][1][0][8][31] = 54,
+ [0][0][1][0][11][31] = 52,
+ [0][0][1][0][2][33] = 76,
+ [0][0][1][0][1][33] = 58,
+ [0][0][1][0][3][33] = 76,
+ [0][0][1][0][5][33] = 76,
+ [0][0][1][0][6][33] = 58,
+ [0][0][1][0][9][33] = 58,
+ [0][0][1][0][8][33] = 54,
+ [0][0][1][0][11][33] = 52,
+ [0][0][1][0][2][35] = 74,
+ [0][0][1][0][1][35] = 58,
+ [0][0][1][0][3][35] = 76,
+ [0][0][1][0][5][35] = 74,
+ [0][0][1][0][6][35] = 58,
+ [0][0][1][0][9][35] = 58,
+ [0][0][1][0][8][35] = 54,
+ [0][0][1][0][11][35] = 52,
+ [0][0][1][0][2][37] = 76,
+ [0][0][1][0][1][37] = 127,
+ [0][0][1][0][3][37] = 76,
+ [0][0][1][0][5][37] = 76,
+ [0][0][1][0][6][37] = 58,
+ [0][0][1][0][9][37] = 76,
+ [0][0][1][0][8][37] = 54,
+ [0][0][1][0][11][37] = 127,
+ [0][0][1][0][2][38] = 76,
+ [0][0][1][0][1][38] = 28,
+ [0][0][1][0][3][38] = 127,
+ [0][0][1][0][5][38] = 76,
+ [0][0][1][0][6][38] = 28,
+ [0][0][1][0][9][38] = 76,
+ [0][0][1][0][8][38] = 54,
+ [0][0][1][0][11][38] = 52,
+ [0][0][1][0][2][40] = 76,
+ [0][0][1][0][1][40] = 28,
+ [0][0][1][0][3][40] = 127,
+ [0][0][1][0][5][40] = 76,
+ [0][0][1][0][6][40] = 28,
+ [0][0][1][0][9][40] = 76,
+ [0][0][1][0][8][40] = 54,
+ [0][0][1][0][11][40] = 52,
+ [0][0][1][0][2][42] = 76,
+ [0][0][1][0][1][42] = 28,
+ [0][0][1][0][3][42] = 127,
+ [0][0][1][0][5][42] = 76,
+ [0][0][1][0][6][42] = 28,
+ [0][0][1][0][9][42] = 76,
+ [0][0][1][0][8][42] = 54,
+ [0][0][1][0][11][42] = 52,
+ [0][0][1][0][2][44] = 76,
+ [0][0][1][0][1][44] = 28,
+ [0][0][1][0][3][44] = 127,
+ [0][0][1][0][5][44] = 76,
+ [0][0][1][0][6][44] = 28,
+ [0][0][1][0][9][44] = 76,
+ [0][0][1][0][8][44] = 54,
+ [0][0][1][0][11][44] = 52,
+ [0][0][1][0][2][46] = 76,
+ [0][0][1][0][1][46] = 28,
+ [0][0][1][0][3][46] = 127,
+ [0][0][1][0][5][46] = 76,
+ [0][0][1][0][6][46] = 28,
+ [0][0][1][0][9][46] = 76,
+ [0][0][1][0][8][46] = 54,
+ [0][0][1][0][11][46] = 52,
+ [0][1][1][0][2][0] = 68,
+ [0][1][1][0][1][0] = 46,
+ [0][1][1][0][3][0] = 50,
+ [0][1][1][0][5][0] = 40,
+ [0][1][1][0][6][0] = 46,
+ [0][1][1][0][9][0] = 46,
+ [0][1][1][0][8][0] = 18,
+ [0][1][1][0][11][0] = 40,
+ [0][1][1][0][2][2] = 68,
+ [0][1][1][0][1][2] = 46,
+ [0][1][1][0][3][2] = 50,
+ [0][1][1][0][5][2] = 40,
+ [0][1][1][0][6][2] = 46,
+ [0][1][1][0][9][2] = 46,
+ [0][1][1][0][8][2] = 18,
+ [0][1][1][0][11][2] = 40,
+ [0][1][1][0][2][4] = 68,
+ [0][1][1][0][1][4] = 46,
+ [0][1][1][0][3][4] = 50,
+ [0][1][1][0][5][4] = 40,
+ [0][1][1][0][6][4] = 46,
+ [0][1][1][0][9][4] = 46,
+ [0][1][1][0][8][4] = 18,
+ [0][1][1][0][11][4] = 40,
+ [0][1][1][0][2][6] = 68,
+ [0][1][1][0][1][6] = 46,
+ [0][1][1][0][3][6] = 50,
+ [0][1][1][0][5][6] = 40,
+ [0][1][1][0][6][6] = 36,
+ [0][1][1][0][9][6] = 46,
+ [0][1][1][0][8][6] = 18,
+ [0][1][1][0][11][6] = 40,
+ [0][1][1][0][2][8] = 68,
+ [0][1][1][0][1][8] = 46,
+ [0][1][1][0][3][8] = 50,
+ [0][1][1][0][5][8] = 52,
+ [0][1][1][0][6][8] = 46,
+ [0][1][1][0][9][8] = 46,
+ [0][1][1][0][8][8] = 42,
+ [0][1][1][0][11][8] = 40,
+ [0][1][1][0][2][10] = 68,
+ [0][1][1][0][1][10] = 46,
+ [0][1][1][0][3][10] = 50,
+ [0][1][1][0][5][10] = 52,
+ [0][1][1][0][6][10] = 46,
+ [0][1][1][0][9][10] = 46,
+ [0][1][1][0][8][10] = 42,
+ [0][1][1][0][11][10] = 40,
+ [0][1][1][0][2][12] = 68,
+ [0][1][1][0][1][12] = 46,
+ [0][1][1][0][3][12] = 50,
+ [0][1][1][0][5][12] = 52,
+ [0][1][1][0][6][12] = 46,
+ [0][1][1][0][9][12] = 46,
+ [0][1][1][0][8][12] = 42,
+ [0][1][1][0][11][12] = 40,
+ [0][1][1][0][2][14] = 68,
+ [0][1][1][0][1][14] = 46,
+ [0][1][1][0][3][14] = 50,
+ [0][1][1][0][5][14] = 52,
+ [0][1][1][0][6][14] = 46,
+ [0][1][1][0][9][14] = 46,
+ [0][1][1][0][8][14] = 42,
+ [0][1][1][0][11][14] = 40,
+ [0][1][1][0][2][15] = 68,
+ [0][1][1][0][1][15] = 46,
+ [0][1][1][0][3][15] = 70,
+ [0][1][1][0][5][15] = 68,
+ [0][1][1][0][6][15] = 46,
+ [0][1][1][0][9][15] = 46,
+ [0][1][1][0][8][15] = 42,
+ [0][1][1][0][11][15] = 40,
+ [0][1][1][0][2][17] = 68,
+ [0][1][1][0][1][17] = 46,
+ [0][1][1][0][3][17] = 70,
+ [0][1][1][0][5][17] = 68,
+ [0][1][1][0][6][17] = 46,
+ [0][1][1][0][9][17] = 46,
+ [0][1][1][0][8][17] = 42,
+ [0][1][1][0][11][17] = 40,
+ [0][1][1][0][2][19] = 68,
+ [0][1][1][0][1][19] = 46,
+ [0][1][1][0][3][19] = 70,
+ [0][1][1][0][5][19] = 68,
+ [0][1][1][0][6][19] = 46,
+ [0][1][1][0][9][19] = 46,
+ [0][1][1][0][8][19] = 42,
+ [0][1][1][0][11][19] = 40,
+ [0][1][1][0][2][21] = 68,
+ [0][1][1][0][1][21] = 46,
+ [0][1][1][0][3][21] = 70,
+ [0][1][1][0][5][21] = 68,
+ [0][1][1][0][6][21] = 46,
+ [0][1][1][0][9][21] = 46,
+ [0][1][1][0][8][21] = 42,
+ [0][1][1][0][11][21] = 40,
+ [0][1][1][0][2][23] = 68,
+ [0][1][1][0][1][23] = 46,
+ [0][1][1][0][3][23] = 70,
+ [0][1][1][0][5][23] = 68,
+ [0][1][1][0][6][23] = 46,
+ [0][1][1][0][9][23] = 46,
+ [0][1][1][0][8][23] = 42,
+ [0][1][1][0][11][23] = 40,
+ [0][1][1][0][2][25] = 68,
+ [0][1][1][0][1][25] = 46,
+ [0][1][1][0][3][25] = 70,
+ [0][1][1][0][5][25] = 127,
+ [0][1][1][0][6][25] = 46,
+ [0][1][1][0][9][25] = 127,
+ [0][1][1][0][8][25] = 42,
+ [0][1][1][0][11][25] = 40,
+ [0][1][1][0][2][27] = 68,
+ [0][1][1][0][1][27] = 46,
+ [0][1][1][0][3][27] = 70,
+ [0][1][1][0][5][27] = 127,
+ [0][1][1][0][6][27] = 46,
+ [0][1][1][0][9][27] = 127,
+ [0][1][1][0][8][27] = 42,
+ [0][1][1][0][11][27] = 40,
+ [0][1][1][0][2][29] = 68,
+ [0][1][1][0][1][29] = 46,
+ [0][1][1][0][3][29] = 70,
+ [0][1][1][0][5][29] = 127,
+ [0][1][1][0][6][29] = 46,
+ [0][1][1][0][9][29] = 127,
+ [0][1][1][0][8][29] = 42,
+ [0][1][1][0][11][29] = 40,
+ [0][1][1][0][2][31] = 68,
+ [0][1][1][0][1][31] = 46,
+ [0][1][1][0][3][31] = 70,
+ [0][1][1][0][5][31] = 68,
+ [0][1][1][0][6][31] = 46,
+ [0][1][1][0][9][31] = 46,
+ [0][1][1][0][8][31] = 42,
+ [0][1][1][0][11][31] = 40,
+ [0][1][1][0][2][33] = 68,
+ [0][1][1][0][1][33] = 46,
+ [0][1][1][0][3][33] = 70,
+ [0][1][1][0][5][33] = 68,
+ [0][1][1][0][6][33] = 46,
+ [0][1][1][0][9][33] = 46,
+ [0][1][1][0][8][33] = 42,
+ [0][1][1][0][11][33] = 40,
+ [0][1][1][0][2][35] = 66,
+ [0][1][1][0][1][35] = 46,
+ [0][1][1][0][3][35] = 70,
+ [0][1][1][0][5][35] = 66,
+ [0][1][1][0][6][35] = 46,
+ [0][1][1][0][9][35] = 46,
+ [0][1][1][0][8][35] = 42,
+ [0][1][1][0][11][35] = 40,
+ [0][1][1][0][2][37] = 68,
+ [0][1][1][0][1][37] = 127,
+ [0][1][1][0][3][37] = 70,
+ [0][1][1][0][5][37] = 68,
+ [0][1][1][0][6][37] = 46,
+ [0][1][1][0][9][37] = 68,
+ [0][1][1][0][8][37] = 42,
+ [0][1][1][0][11][37] = 127,
+ [0][1][1][0][2][38] = 76,
+ [0][1][1][0][1][38] = 16,
+ [0][1][1][0][3][38] = 127,
+ [0][1][1][0][5][38] = 76,
+ [0][1][1][0][6][38] = 16,
+ [0][1][1][0][9][38] = 76,
+ [0][1][1][0][8][38] = 42,
+ [0][1][1][0][11][38] = 40,
+ [0][1][1][0][2][40] = 76,
+ [0][1][1][0][1][40] = 16,
+ [0][1][1][0][3][40] = 127,
+ [0][1][1][0][5][40] = 76,
+ [0][1][1][0][6][40] = 16,
+ [0][1][1][0][9][40] = 76,
+ [0][1][1][0][8][40] = 42,
+ [0][1][1][0][11][40] = 40,
+ [0][1][1][0][2][42] = 76,
+ [0][1][1][0][1][42] = 16,
+ [0][1][1][0][3][42] = 127,
+ [0][1][1][0][5][42] = 76,
+ [0][1][1][0][6][42] = 16,
+ [0][1][1][0][9][42] = 76,
+ [0][1][1][0][8][42] = 42,
+ [0][1][1][0][11][42] = 40,
+ [0][1][1][0][2][44] = 76,
+ [0][1][1][0][1][44] = 16,
+ [0][1][1][0][3][44] = 127,
+ [0][1][1][0][5][44] = 76,
+ [0][1][1][0][6][44] = 16,
+ [0][1][1][0][9][44] = 76,
+ [0][1][1][0][8][44] = 42,
+ [0][1][1][0][11][44] = 40,
+ [0][1][1][0][2][46] = 76,
+ [0][1][1][0][1][46] = 16,
+ [0][1][1][0][3][46] = 127,
+ [0][1][1][0][5][46] = 76,
+ [0][1][1][0][6][46] = 16,
+ [0][1][1][0][9][46] = 76,
+ [0][1][1][0][8][46] = 42,
+ [0][1][1][0][11][46] = 40,
+ [0][0][2][0][2][0] = 76,
+ [0][0][2][0][1][0] = 58,
+ [0][0][2][0][3][0] = 62,
+ [0][0][2][0][5][0] = 62,
+ [0][0][2][0][6][0] = 58,
+ [0][0][2][0][9][0] = 58,
+ [0][0][2][0][8][0] = 30,
+ [0][0][2][0][11][0] = 52,
+ [0][0][2][0][2][2] = 76,
+ [0][0][2][0][1][2] = 58,
+ [0][0][2][0][3][2] = 62,
+ [0][0][2][0][5][2] = 62,
+ [0][0][2][0][6][2] = 58,
+ [0][0][2][0][9][2] = 58,
+ [0][0][2][0][8][2] = 30,
+ [0][0][2][0][11][2] = 52,
+ [0][0][2][0][2][4] = 76,
+ [0][0][2][0][1][4] = 58,
+ [0][0][2][0][3][4] = 62,
+ [0][0][2][0][5][4] = 62,
+ [0][0][2][0][6][4] = 58,
+ [0][0][2][0][9][4] = 58,
+ [0][0][2][0][8][4] = 30,
+ [0][0][2][0][11][4] = 52,
+ [0][0][2][0][2][6] = 76,
+ [0][0][2][0][1][6] = 58,
+ [0][0][2][0][3][6] = 62,
+ [0][0][2][0][5][6] = 62,
+ [0][0][2][0][6][6] = 54,
+ [0][0][2][0][9][6] = 58,
+ [0][0][2][0][8][6] = 30,
+ [0][0][2][0][11][6] = 52,
+ [0][0][2][0][2][8] = 76,
+ [0][0][2][0][1][8] = 58,
+ [0][0][2][0][3][8] = 62,
+ [0][0][2][0][5][8] = 64,
+ [0][0][2][0][6][8] = 58,
+ [0][0][2][0][9][8] = 58,
+ [0][0][2][0][8][8] = 54,
+ [0][0][2][0][11][8] = 52,
+ [0][0][2][0][2][10] = 76,
+ [0][0][2][0][1][10] = 58,
+ [0][0][2][0][3][10] = 62,
+ [0][0][2][0][5][10] = 64,
+ [0][0][2][0][6][10] = 58,
+ [0][0][2][0][9][10] = 58,
+ [0][0][2][0][8][10] = 54,
+ [0][0][2][0][11][10] = 52,
+ [0][0][2][0][2][12] = 76,
+ [0][0][2][0][1][12] = 58,
+ [0][0][2][0][3][12] = 62,
+ [0][0][2][0][5][12] = 64,
+ [0][0][2][0][6][12] = 58,
+ [0][0][2][0][9][12] = 58,
+ [0][0][2][0][8][12] = 54,
+ [0][0][2][0][11][12] = 52,
+ [0][0][2][0][2][14] = 76,
+ [0][0][2][0][1][14] = 58,
+ [0][0][2][0][3][14] = 62,
+ [0][0][2][0][5][14] = 64,
+ [0][0][2][0][6][14] = 58,
+ [0][0][2][0][9][14] = 58,
+ [0][0][2][0][8][14] = 54,
+ [0][0][2][0][11][14] = 52,
+ [0][0][2][0][2][15] = 74,
+ [0][0][2][0][1][15] = 58,
+ [0][0][2][0][3][15] = 76,
+ [0][0][2][0][5][15] = 74,
+ [0][0][2][0][6][15] = 58,
+ [0][0][2][0][9][15] = 58,
+ [0][0][2][0][8][15] = 54,
+ [0][0][2][0][11][15] = 52,
+ [0][0][2][0][2][17] = 76,
+ [0][0][2][0][1][17] = 58,
+ [0][0][2][0][3][17] = 76,
+ [0][0][2][0][5][17] = 76,
+ [0][0][2][0][6][17] = 58,
+ [0][0][2][0][9][17] = 58,
+ [0][0][2][0][8][17] = 54,
+ [0][0][2][0][11][17] = 52,
+ [0][0][2][0][2][19] = 76,
+ [0][0][2][0][1][19] = 58,
+ [0][0][2][0][3][19] = 76,
+ [0][0][2][0][5][19] = 76,
+ [0][0][2][0][6][19] = 58,
+ [0][0][2][0][9][19] = 58,
+ [0][0][2][0][8][19] = 54,
+ [0][0][2][0][11][19] = 52,
+ [0][0][2][0][2][21] = 76,
+ [0][0][2][0][1][21] = 58,
+ [0][0][2][0][3][21] = 76,
+ [0][0][2][0][5][21] = 76,
+ [0][0][2][0][6][21] = 58,
+ [0][0][2][0][9][21] = 58,
+ [0][0][2][0][8][21] = 54,
+ [0][0][2][0][11][21] = 52,
+ [0][0][2][0][2][23] = 76,
+ [0][0][2][0][1][23] = 58,
+ [0][0][2][0][3][23] = 76,
+ [0][0][2][0][5][23] = 76,
+ [0][0][2][0][6][23] = 58,
+ [0][0][2][0][9][23] = 58,
+ [0][0][2][0][8][23] = 54,
+ [0][0][2][0][11][23] = 52,
+ [0][0][2][0][2][25] = 76,
+ [0][0][2][0][1][25] = 58,
+ [0][0][2][0][3][25] = 76,
+ [0][0][2][0][5][25] = 127,
+ [0][0][2][0][6][25] = 58,
+ [0][0][2][0][9][25] = 127,
+ [0][0][2][0][8][25] = 54,
+ [0][0][2][0][11][25] = 52,
+ [0][0][2][0][2][27] = 76,
+ [0][0][2][0][1][27] = 58,
+ [0][0][2][0][3][27] = 76,
+ [0][0][2][0][5][27] = 127,
+ [0][0][2][0][6][27] = 58,
+ [0][0][2][0][9][27] = 127,
+ [0][0][2][0][8][27] = 54,
+ [0][0][2][0][11][27] = 52,
+ [0][0][2][0][2][29] = 76,
+ [0][0][2][0][1][29] = 58,
+ [0][0][2][0][3][29] = 76,
+ [0][0][2][0][5][29] = 127,
+ [0][0][2][0][6][29] = 58,
+ [0][0][2][0][9][29] = 127,
+ [0][0][2][0][8][29] = 54,
+ [0][0][2][0][11][29] = 52,
+ [0][0][2][0][2][31] = 76,
+ [0][0][2][0][1][31] = 58,
+ [0][0][2][0][3][31] = 76,
+ [0][0][2][0][5][31] = 76,
+ [0][0][2][0][6][31] = 58,
+ [0][0][2][0][9][31] = 58,
+ [0][0][2][0][8][31] = 54,
+ [0][0][2][0][11][31] = 52,
+ [0][0][2][0][2][33] = 76,
+ [0][0][2][0][1][33] = 58,
+ [0][0][2][0][3][33] = 76,
+ [0][0][2][0][5][33] = 76,
+ [0][0][2][0][6][33] = 58,
+ [0][0][2][0][9][33] = 58,
+ [0][0][2][0][8][33] = 54,
+ [0][0][2][0][11][33] = 52,
+ [0][0][2][0][2][35] = 70,
+ [0][0][2][0][1][35] = 58,
+ [0][0][2][0][3][35] = 76,
+ [0][0][2][0][5][35] = 70,
+ [0][0][2][0][6][35] = 58,
+ [0][0][2][0][9][35] = 58,
+ [0][0][2][0][8][35] = 54,
+ [0][0][2][0][11][35] = 52,
+ [0][0][2][0][2][37] = 76,
+ [0][0][2][0][1][37] = 127,
+ [0][0][2][0][3][37] = 76,
+ [0][0][2][0][5][37] = 76,
+ [0][0][2][0][6][37] = 58,
+ [0][0][2][0][9][37] = 76,
+ [0][0][2][0][8][37] = 54,
+ [0][0][2][0][11][37] = 127,
+ [0][0][2][0][2][38] = 76,
+ [0][0][2][0][1][38] = 28,
+ [0][0][2][0][3][38] = 127,
+ [0][0][2][0][5][38] = 76,
+ [0][0][2][0][6][38] = 28,
+ [0][0][2][0][9][38] = 76,
+ [0][0][2][0][8][38] = 54,
+ [0][0][2][0][11][38] = 52,
+ [0][0][2][0][2][40] = 76,
+ [0][0][2][0][1][40] = 28,
+ [0][0][2][0][3][40] = 127,
+ [0][0][2][0][5][40] = 76,
+ [0][0][2][0][6][40] = 28,
+ [0][0][2][0][9][40] = 76,
+ [0][0][2][0][8][40] = 54,
+ [0][0][2][0][11][40] = 52,
+ [0][0][2][0][2][42] = 76,
+ [0][0][2][0][1][42] = 28,
+ [0][0][2][0][3][42] = 127,
+ [0][0][2][0][5][42] = 76,
+ [0][0][2][0][6][42] = 28,
+ [0][0][2][0][9][42] = 76,
+ [0][0][2][0][8][42] = 54,
+ [0][0][2][0][11][42] = 52,
+ [0][0][2][0][2][44] = 76,
+ [0][0][2][0][1][44] = 28,
+ [0][0][2][0][3][44] = 127,
+ [0][0][2][0][5][44] = 76,
+ [0][0][2][0][6][44] = 28,
+ [0][0][2][0][9][44] = 76,
+ [0][0][2][0][8][44] = 54,
+ [0][0][2][0][11][44] = 52,
+ [0][0][2][0][2][46] = 76,
+ [0][0][2][0][1][46] = 28,
+ [0][0][2][0][3][46] = 127,
+ [0][0][2][0][5][46] = 76,
+ [0][0][2][0][6][46] = 28,
+ [0][0][2][0][9][46] = 76,
+ [0][0][2][0][8][46] = 54,
+ [0][0][2][0][11][46] = 52,
+ [0][1][2][0][2][0] = 68,
+ [0][1][2][0][1][0] = 46,
+ [0][1][2][0][3][0] = 50,
+ [0][1][2][0][5][0] = 40,
+ [0][1][2][0][6][0] = 46,
+ [0][1][2][0][9][0] = 46,
+ [0][1][2][0][8][0] = 18,
+ [0][1][2][0][11][0] = 40,
+ [0][1][2][0][2][2] = 68,
+ [0][1][2][0][1][2] = 46,
+ [0][1][2][0][3][2] = 50,
+ [0][1][2][0][5][2] = 40,
+ [0][1][2][0][6][2] = 46,
+ [0][1][2][0][9][2] = 46,
+ [0][1][2][0][8][2] = 18,
+ [0][1][2][0][11][2] = 40,
+ [0][1][2][0][2][4] = 68,
+ [0][1][2][0][1][4] = 46,
+ [0][1][2][0][3][4] = 50,
+ [0][1][2][0][5][4] = 40,
+ [0][1][2][0][6][4] = 46,
+ [0][1][2][0][9][4] = 46,
+ [0][1][2][0][8][4] = 18,
+ [0][1][2][0][11][4] = 40,
+ [0][1][2][0][2][6] = 68,
+ [0][1][2][0][1][6] = 46,
+ [0][1][2][0][3][6] = 50,
+ [0][1][2][0][5][6] = 40,
+ [0][1][2][0][6][6] = 36,
+ [0][1][2][0][9][6] = 46,
+ [0][1][2][0][8][6] = 18,
+ [0][1][2][0][11][6] = 40,
+ [0][1][2][0][2][8] = 68,
+ [0][1][2][0][1][8] = 46,
+ [0][1][2][0][3][8] = 50,
+ [0][1][2][0][5][8] = 52,
+ [0][1][2][0][6][8] = 46,
+ [0][1][2][0][9][8] = 46,
+ [0][1][2][0][8][8] = 42,
+ [0][1][2][0][11][8] = 40,
+ [0][1][2][0][2][10] = 68,
+ [0][1][2][0][1][10] = 46,
+ [0][1][2][0][3][10] = 50,
+ [0][1][2][0][5][10] = 52,
+ [0][1][2][0][6][10] = 46,
+ [0][1][2][0][9][10] = 46,
+ [0][1][2][0][8][10] = 42,
+ [0][1][2][0][11][10] = 40,
+ [0][1][2][0][2][12] = 68,
+ [0][1][2][0][1][12] = 46,
+ [0][1][2][0][3][12] = 50,
+ [0][1][2][0][5][12] = 52,
+ [0][1][2][0][6][12] = 46,
+ [0][1][2][0][9][12] = 46,
+ [0][1][2][0][8][12] = 42,
+ [0][1][2][0][11][12] = 40,
+ [0][1][2][0][2][14] = 68,
+ [0][1][2][0][1][14] = 46,
+ [0][1][2][0][3][14] = 50,
+ [0][1][2][0][5][14] = 52,
+ [0][1][2][0][6][14] = 46,
+ [0][1][2][0][9][14] = 46,
+ [0][1][2][0][8][14] = 42,
+ [0][1][2][0][11][14] = 40,
+ [0][1][2][0][2][15] = 68,
+ [0][1][2][0][1][15] = 46,
+ [0][1][2][0][3][15] = 70,
+ [0][1][2][0][5][15] = 68,
+ [0][1][2][0][6][15] = 46,
+ [0][1][2][0][9][15] = 46,
+ [0][1][2][0][8][15] = 42,
+ [0][1][2][0][11][15] = 40,
+ [0][1][2][0][2][17] = 68,
+ [0][1][2][0][1][17] = 46,
+ [0][1][2][0][3][17] = 70,
+ [0][1][2][0][5][17] = 68,
+ [0][1][2][0][6][17] = 46,
+ [0][1][2][0][9][17] = 46,
+ [0][1][2][0][8][17] = 42,
+ [0][1][2][0][11][17] = 40,
+ [0][1][2][0][2][19] = 68,
+ [0][1][2][0][1][19] = 46,
+ [0][1][2][0][3][19] = 70,
+ [0][1][2][0][5][19] = 68,
+ [0][1][2][0][6][19] = 46,
+ [0][1][2][0][9][19] = 46,
+ [0][1][2][0][8][19] = 42,
+ [0][1][2][0][11][19] = 40,
+ [0][1][2][0][2][21] = 68,
+ [0][1][2][0][1][21] = 46,
+ [0][1][2][0][3][21] = 70,
+ [0][1][2][0][5][21] = 68,
+ [0][1][2][0][6][21] = 46,
+ [0][1][2][0][9][21] = 46,
+ [0][1][2][0][8][21] = 42,
+ [0][1][2][0][11][21] = 40,
+ [0][1][2][0][2][23] = 68,
+ [0][1][2][0][1][23] = 46,
+ [0][1][2][0][3][23] = 70,
+ [0][1][2][0][5][23] = 68,
+ [0][1][2][0][6][23] = 46,
+ [0][1][2][0][9][23] = 46,
+ [0][1][2][0][8][23] = 42,
+ [0][1][2][0][11][23] = 40,
+ [0][1][2][0][2][25] = 68,
+ [0][1][2][0][1][25] = 46,
+ [0][1][2][0][3][25] = 70,
+ [0][1][2][0][5][25] = 127,
+ [0][1][2][0][6][25] = 46,
+ [0][1][2][0][9][25] = 127,
+ [0][1][2][0][8][25] = 42,
+ [0][1][2][0][11][25] = 40,
+ [0][1][2][0][2][27] = 68,
+ [0][1][2][0][1][27] = 46,
+ [0][1][2][0][3][27] = 70,
+ [0][1][2][0][5][27] = 127,
+ [0][1][2][0][6][27] = 46,
+ [0][1][2][0][9][27] = 127,
+ [0][1][2][0][8][27] = 42,
+ [0][1][2][0][11][27] = 40,
+ [0][1][2][0][2][29] = 68,
+ [0][1][2][0][1][29] = 46,
+ [0][1][2][0][3][29] = 70,
+ [0][1][2][0][5][29] = 127,
+ [0][1][2][0][6][29] = 46,
+ [0][1][2][0][9][29] = 127,
+ [0][1][2][0][8][29] = 42,
+ [0][1][2][0][11][29] = 40,
+ [0][1][2][0][2][31] = 68,
+ [0][1][2][0][1][31] = 46,
+ [0][1][2][0][3][31] = 70,
+ [0][1][2][0][5][31] = 68,
+ [0][1][2][0][6][31] = 46,
+ [0][1][2][0][9][31] = 46,
+ [0][1][2][0][8][31] = 42,
+ [0][1][2][0][11][31] = 40,
+ [0][1][2][0][2][33] = 68,
+ [0][1][2][0][1][33] = 46,
+ [0][1][2][0][3][33] = 70,
+ [0][1][2][0][5][33] = 68,
+ [0][1][2][0][6][33] = 46,
+ [0][1][2][0][9][33] = 46,
+ [0][1][2][0][8][33] = 42,
+ [0][1][2][0][11][33] = 40,
+ [0][1][2][0][2][35] = 64,
+ [0][1][2][0][1][35] = 46,
+ [0][1][2][0][3][35] = 70,
+ [0][1][2][0][5][35] = 64,
+ [0][1][2][0][6][35] = 46,
+ [0][1][2][0][9][35] = 46,
+ [0][1][2][0][8][35] = 42,
+ [0][1][2][0][11][35] = 40,
+ [0][1][2][0][2][37] = 68,
+ [0][1][2][0][1][37] = 127,
+ [0][1][2][0][3][37] = 70,
+ [0][1][2][0][5][37] = 68,
+ [0][1][2][0][6][37] = 46,
+ [0][1][2][0][9][37] = 68,
+ [0][1][2][0][8][37] = 42,
+ [0][1][2][0][11][37] = 127,
+ [0][1][2][0][2][38] = 76,
+ [0][1][2][0][1][38] = 16,
+ [0][1][2][0][3][38] = 127,
+ [0][1][2][0][5][38] = 76,
+ [0][1][2][0][6][38] = 16,
+ [0][1][2][0][9][38] = 76,
+ [0][1][2][0][8][38] = 42,
+ [0][1][2][0][11][38] = 40,
+ [0][1][2][0][2][40] = 76,
+ [0][1][2][0][1][40] = 16,
+ [0][1][2][0][3][40] = 127,
+ [0][1][2][0][5][40] = 76,
+ [0][1][2][0][6][40] = 16,
+ [0][1][2][0][9][40] = 76,
+ [0][1][2][0][8][40] = 42,
+ [0][1][2][0][11][40] = 40,
+ [0][1][2][0][2][42] = 76,
+ [0][1][2][0][1][42] = 16,
+ [0][1][2][0][3][42] = 127,
+ [0][1][2][0][5][42] = 76,
+ [0][1][2][0][6][42] = 16,
+ [0][1][2][0][9][42] = 76,
+ [0][1][2][0][8][42] = 42,
+ [0][1][2][0][11][42] = 40,
+ [0][1][2][0][2][44] = 76,
+ [0][1][2][0][1][44] = 16,
+ [0][1][2][0][3][44] = 127,
+ [0][1][2][0][5][44] = 76,
+ [0][1][2][0][6][44] = 16,
+ [0][1][2][0][9][44] = 76,
+ [0][1][2][0][8][44] = 42,
+ [0][1][2][0][11][44] = 40,
+ [0][1][2][0][2][46] = 76,
+ [0][1][2][0][1][46] = 16,
+ [0][1][2][0][3][46] = 127,
+ [0][1][2][0][5][46] = 76,
+ [0][1][2][0][6][46] = 16,
+ [0][1][2][0][9][46] = 76,
+ [0][1][2][0][8][46] = 42,
+ [0][1][2][0][11][46] = 40,
+ [0][1][2][1][2][0] = 68,
+ [0][1][2][1][1][0] = 34,
+ [0][1][2][1][3][0] = 50,
+ [0][1][2][1][5][0] = 38,
+ [0][1][2][1][6][0] = 34,
+ [0][1][2][1][9][0] = 34,
+ [0][1][2][1][8][0] = 6,
+ [0][1][2][1][11][0] = 28,
+ [0][1][2][1][2][2] = 68,
+ [0][1][2][1][1][2] = 34,
+ [0][1][2][1][3][2] = 50,
+ [0][1][2][1][5][2] = 38,
+ [0][1][2][1][6][2] = 34,
+ [0][1][2][1][9][2] = 34,
+ [0][1][2][1][8][2] = 6,
+ [0][1][2][1][11][2] = 28,
+ [0][1][2][1][2][4] = 68,
+ [0][1][2][1][1][4] = 34,
+ [0][1][2][1][3][4] = 50,
+ [0][1][2][1][5][4] = 38,
+ [0][1][2][1][6][4] = 34,
+ [0][1][2][1][9][4] = 34,
+ [0][1][2][1][8][4] = 6,
+ [0][1][2][1][11][4] = 28,
+ [0][1][2][1][2][6] = 68,
+ [0][1][2][1][1][6] = 34,
+ [0][1][2][1][3][6] = 50,
+ [0][1][2][1][5][6] = 38,
+ [0][1][2][1][6][6] = 34,
+ [0][1][2][1][9][6] = 34,
+ [0][1][2][1][8][6] = 6,
+ [0][1][2][1][11][6] = 28,
+ [0][1][2][1][2][8] = 68,
+ [0][1][2][1][1][8] = 34,
+ [0][1][2][1][3][8] = 50,
+ [0][1][2][1][5][8] = 38,
+ [0][1][2][1][6][8] = 34,
+ [0][1][2][1][9][8] = 34,
+ [0][1][2][1][8][8] = 30,
+ [0][1][2][1][11][8] = 28,
+ [0][1][2][1][2][10] = 68,
+ [0][1][2][1][1][10] = 34,
+ [0][1][2][1][3][10] = 50,
+ [0][1][2][1][5][10] = 38,
+ [0][1][2][1][6][10] = 34,
+ [0][1][2][1][9][10] = 34,
+ [0][1][2][1][8][10] = 30,
+ [0][1][2][1][11][10] = 28,
+ [0][1][2][1][2][12] = 68,
+ [0][1][2][1][1][12] = 34,
+ [0][1][2][1][3][12] = 50,
+ [0][1][2][1][5][12] = 38,
+ [0][1][2][1][6][12] = 34,
+ [0][1][2][1][9][12] = 34,
+ [0][1][2][1][8][12] = 30,
+ [0][1][2][1][11][12] = 28,
+ [0][1][2][1][2][14] = 68,
+ [0][1][2][1][1][14] = 34,
+ [0][1][2][1][3][14] = 50,
+ [0][1][2][1][5][14] = 38,
+ [0][1][2][1][6][14] = 34,
+ [0][1][2][1][9][14] = 34,
+ [0][1][2][1][8][14] = 30,
+ [0][1][2][1][11][14] = 28,
+ [0][1][2][1][2][15] = 68,
+ [0][1][2][1][1][15] = 34,
+ [0][1][2][1][3][15] = 70,
+ [0][1][2][1][5][15] = 62,
+ [0][1][2][1][6][15] = 34,
+ [0][1][2][1][9][15] = 34,
+ [0][1][2][1][8][15] = 30,
+ [0][1][2][1][11][15] = 28,
+ [0][1][2][1][2][17] = 68,
+ [0][1][2][1][1][17] = 34,
+ [0][1][2][1][3][17] = 70,
+ [0][1][2][1][5][17] = 62,
+ [0][1][2][1][6][17] = 34,
+ [0][1][2][1][9][17] = 34,
+ [0][1][2][1][8][17] = 30,
+ [0][1][2][1][11][17] = 28,
+ [0][1][2][1][2][19] = 68,
+ [0][1][2][1][1][19] = 34,
+ [0][1][2][1][3][19] = 70,
+ [0][1][2][1][5][19] = 62,
+ [0][1][2][1][6][19] = 34,
+ [0][1][2][1][9][19] = 34,
+ [0][1][2][1][8][19] = 30,
+ [0][1][2][1][11][19] = 28,
+ [0][1][2][1][2][21] = 68,
+ [0][1][2][1][1][21] = 34,
+ [0][1][2][1][3][21] = 70,
+ [0][1][2][1][5][21] = 62,
+ [0][1][2][1][6][21] = 34,
+ [0][1][2][1][9][21] = 34,
+ [0][1][2][1][8][21] = 30,
+ [0][1][2][1][11][21] = 28,
+ [0][1][2][1][2][23] = 68,
+ [0][1][2][1][1][23] = 34,
+ [0][1][2][1][3][23] = 70,
+ [0][1][2][1][5][23] = 62,
+ [0][1][2][1][6][23] = 34,
+ [0][1][2][1][9][23] = 34,
+ [0][1][2][1][8][23] = 30,
+ [0][1][2][1][11][23] = 28,
+ [0][1][2][1][2][25] = 68,
+ [0][1][2][1][1][25] = 34,
+ [0][1][2][1][3][25] = 70,
+ [0][1][2][1][5][25] = 127,
+ [0][1][2][1][6][25] = 34,
+ [0][1][2][1][9][25] = 127,
+ [0][1][2][1][8][25] = 30,
+ [0][1][2][1][11][25] = 28,
+ [0][1][2][1][2][27] = 68,
+ [0][1][2][1][1][27] = 34,
+ [0][1][2][1][3][27] = 70,
+ [0][1][2][1][5][27] = 127,
+ [0][1][2][1][6][27] = 34,
+ [0][1][2][1][9][27] = 127,
+ [0][1][2][1][8][27] = 30,
+ [0][1][2][1][11][27] = 28,
+ [0][1][2][1][2][29] = 68,
+ [0][1][2][1][1][29] = 34,
+ [0][1][2][1][3][29] = 70,
+ [0][1][2][1][5][29] = 127,
+ [0][1][2][1][6][29] = 34,
+ [0][1][2][1][9][29] = 127,
+ [0][1][2][1][8][29] = 30,
+ [0][1][2][1][11][29] = 28,
+ [0][1][2][1][2][31] = 68,
+ [0][1][2][1][1][31] = 34,
+ [0][1][2][1][3][31] = 70,
+ [0][1][2][1][5][31] = 62,
+ [0][1][2][1][6][31] = 34,
+ [0][1][2][1][9][31] = 34,
+ [0][1][2][1][8][31] = 30,
+ [0][1][2][1][11][31] = 28,
+ [0][1][2][1][2][33] = 68,
+ [0][1][2][1][1][33] = 34,
+ [0][1][2][1][3][33] = 70,
+ [0][1][2][1][5][33] = 62,
+ [0][1][2][1][6][33] = 34,
+ [0][1][2][1][9][33] = 34,
+ [0][1][2][1][8][33] = 30,
+ [0][1][2][1][11][33] = 28,
+ [0][1][2][1][2][35] = 64,
+ [0][1][2][1][1][35] = 34,
+ [0][1][2][1][3][35] = 70,
+ [0][1][2][1][5][35] = 62,
+ [0][1][2][1][6][35] = 34,
+ [0][1][2][1][9][35] = 34,
+ [0][1][2][1][8][35] = 30,
+ [0][1][2][1][11][35] = 28,
+ [0][1][2][1][2][37] = 68,
+ [0][1][2][1][1][37] = 127,
+ [0][1][2][1][3][37] = 70,
+ [0][1][2][1][5][37] = 62,
+ [0][1][2][1][6][37] = 34,
+ [0][1][2][1][9][37] = 68,
+ [0][1][2][1][8][37] = 30,
+ [0][1][2][1][11][37] = 127,
+ [0][1][2][1][2][38] = 76,
+ [0][1][2][1][1][38] = 4,
+ [0][1][2][1][3][38] = 127,
+ [0][1][2][1][5][38] = 76,
+ [0][1][2][1][6][38] = 4,
+ [0][1][2][1][9][38] = 76,
+ [0][1][2][1][8][38] = 30,
+ [0][1][2][1][11][38] = 28,
+ [0][1][2][1][2][40] = 76,
+ [0][1][2][1][1][40] = 4,
+ [0][1][2][1][3][40] = 127,
+ [0][1][2][1][5][40] = 76,
+ [0][1][2][1][6][40] = 4,
+ [0][1][2][1][9][40] = 76,
+ [0][1][2][1][8][40] = 30,
+ [0][1][2][1][11][40] = 28,
+ [0][1][2][1][2][42] = 76,
+ [0][1][2][1][1][42] = 4,
+ [0][1][2][1][3][42] = 127,
+ [0][1][2][1][5][42] = 76,
+ [0][1][2][1][6][42] = 4,
+ [0][1][2][1][9][42] = 76,
+ [0][1][2][1][8][42] = 30,
+ [0][1][2][1][11][42] = 28,
+ [0][1][2][1][2][44] = 76,
+ [0][1][2][1][1][44] = 4,
+ [0][1][2][1][3][44] = 127,
+ [0][1][2][1][5][44] = 76,
+ [0][1][2][1][6][44] = 4,
+ [0][1][2][1][9][44] = 76,
+ [0][1][2][1][8][44] = 30,
+ [0][1][2][1][11][44] = 28,
+ [0][1][2][1][2][46] = 76,
+ [0][1][2][1][1][46] = 4,
+ [0][1][2][1][3][46] = 127,
+ [0][1][2][1][5][46] = 76,
+ [0][1][2][1][6][46] = 4,
+ [0][1][2][1][9][46] = 76,
+ [0][1][2][1][8][46] = 30,
+ [0][1][2][1][11][46] = 28,
+ [1][0][2][0][2][1] = 68,
+ [1][0][2][0][1][1] = 64,
+ [1][0][2][0][3][1] = 62,
+ [1][0][2][0][5][1] = 64,
+ [1][0][2][0][6][1] = 64,
+ [1][0][2][0][9][1] = 64,
+ [1][0][2][0][8][1] = 30,
+ [1][0][2][0][11][1] = 52,
+ [1][0][2][0][2][5] = 72,
+ [1][0][2][0][1][5] = 64,
+ [1][0][2][0][3][5] = 62,
+ [1][0][2][0][5][5] = 64,
+ [1][0][2][0][6][5] = 60,
+ [1][0][2][0][9][5] = 64,
+ [1][0][2][0][8][5] = 30,
+ [1][0][2][0][11][5] = 52,
+ [1][0][2][0][2][9] = 72,
+ [1][0][2][0][1][9] = 64,
+ [1][0][2][0][3][9] = 62,
+ [1][0][2][0][5][9] = 64,
+ [1][0][2][0][6][9] = 64,
+ [1][0][2][0][9][9] = 64,
+ [1][0][2][0][8][9] = 54,
+ [1][0][2][0][11][9] = 52,
+ [1][0][2][0][2][13] = 66,
+ [1][0][2][0][1][13] = 64,
+ [1][0][2][0][3][13] = 62,
+ [1][0][2][0][5][13] = 64,
+ [1][0][2][0][6][13] = 64,
+ [1][0][2][0][9][13] = 64,
+ [1][0][2][0][8][13] = 54,
+ [1][0][2][0][11][13] = 52,
+ [1][0][2][0][2][16] = 62,
+ [1][0][2][0][1][16] = 64,
+ [1][0][2][0][3][16] = 72,
+ [1][0][2][0][5][16] = 62,
+ [1][0][2][0][6][16] = 64,
+ [1][0][2][0][9][16] = 64,
+ [1][0][2][0][8][16] = 54,
+ [1][0][2][0][11][16] = 52,
+ [1][0][2][0][2][20] = 72,
+ [1][0][2][0][1][20] = 64,
+ [1][0][2][0][3][20] = 72,
+ [1][0][2][0][5][20] = 72,
+ [1][0][2][0][6][20] = 64,
+ [1][0][2][0][9][20] = 64,
+ [1][0][2][0][8][20] = 54,
+ [1][0][2][0][11][20] = 52,
+ [1][0][2][0][2][24] = 72,
+ [1][0][2][0][1][24] = 64,
+ [1][0][2][0][3][24] = 72,
+ [1][0][2][0][5][24] = 127,
+ [1][0][2][0][6][24] = 64,
+ [1][0][2][0][9][24] = 127,
+ [1][0][2][0][8][24] = 54,
+ [1][0][2][0][11][24] = 52,
+ [1][0][2][0][2][28] = 72,
+ [1][0][2][0][1][28] = 64,
+ [1][0][2][0][3][28] = 72,
+ [1][0][2][0][5][28] = 127,
+ [1][0][2][0][6][28] = 64,
+ [1][0][2][0][9][28] = 127,
+ [1][0][2][0][8][28] = 54,
+ [1][0][2][0][11][28] = 52,
+ [1][0][2][0][2][32] = 72,
+ [1][0][2][0][1][32] = 64,
+ [1][0][2][0][3][32] = 72,
+ [1][0][2][0][5][32] = 72,
+ [1][0][2][0][6][32] = 64,
+ [1][0][2][0][9][32] = 64,
+ [1][0][2][0][8][32] = 54,
+ [1][0][2][0][11][32] = 52,
+ [1][0][2][0][2][36] = 72,
+ [1][0][2][0][1][36] = 127,
+ [1][0][2][0][3][36] = 72,
+ [1][0][2][0][5][36] = 72,
+ [1][0][2][0][6][36] = 64,
+ [1][0][2][0][9][36] = 72,
+ [1][0][2][0][8][36] = 54,
+ [1][0][2][0][11][36] = 127,
+ [1][0][2][0][2][39] = 72,
+ [1][0][2][0][1][39] = 28,
+ [1][0][2][0][3][39] = 127,
+ [1][0][2][0][5][39] = 72,
+ [1][0][2][0][6][39] = 28,
+ [1][0][2][0][9][39] = 72,
+ [1][0][2][0][8][39] = 54,
+ [1][0][2][0][11][39] = 52,
+ [1][0][2][0][2][43] = 72,
+ [1][0][2][0][1][43] = 28,
+ [1][0][2][0][3][43] = 127,
+ [1][0][2][0][5][43] = 72,
+ [1][0][2][0][6][43] = 28,
+ [1][0][2][0][9][43] = 72,
+ [1][0][2][0][8][43] = 54,
+ [1][0][2][0][11][43] = 52,
+ [1][1][2][0][2][1] = 58,
+ [1][1][2][0][1][1] = 52,
+ [1][1][2][0][3][1] = 50,
+ [1][1][2][0][5][1] = 52,
+ [1][1][2][0][6][1] = 52,
+ [1][1][2][0][9][1] = 52,
+ [1][1][2][0][8][1] = 18,
+ [1][1][2][0][11][1] = 40,
+ [1][1][2][0][2][5] = 72,
+ [1][1][2][0][1][5] = 52,
+ [1][1][2][0][3][5] = 50,
+ [1][1][2][0][5][5] = 52,
+ [1][1][2][0][6][5] = 46,
+ [1][1][2][0][9][5] = 52,
+ [1][1][2][0][8][5] = 18,
+ [1][1][2][0][11][5] = 40,
+ [1][1][2][0][2][9] = 72,
+ [1][1][2][0][1][9] = 52,
+ [1][1][2][0][3][9] = 50,
+ [1][1][2][0][5][9] = 52,
+ [1][1][2][0][6][9] = 52,
+ [1][1][2][0][9][9] = 52,
+ [1][1][2][0][8][9] = 42,
+ [1][1][2][0][11][9] = 40,
+ [1][1][2][0][2][13] = 58,
+ [1][1][2][0][1][13] = 52,
+ [1][1][2][0][3][13] = 50,
+ [1][1][2][0][5][13] = 52,
+ [1][1][2][0][6][13] = 52,
+ [1][1][2][0][9][13] = 52,
+ [1][1][2][0][8][13] = 42,
+ [1][1][2][0][11][13] = 40,
+ [1][1][2][0][2][16] = 56,
+ [1][1][2][0][1][16] = 52,
+ [1][1][2][0][3][16] = 72,
+ [1][1][2][0][5][16] = 56,
+ [1][1][2][0][6][16] = 52,
+ [1][1][2][0][9][16] = 52,
+ [1][1][2][0][8][16] = 42,
+ [1][1][2][0][11][16] = 40,
+ [1][1][2][0][2][20] = 72,
+ [1][1][2][0][1][20] = 52,
+ [1][1][2][0][3][20] = 72,
+ [1][1][2][0][5][20] = 72,
+ [1][1][2][0][6][20] = 52,
+ [1][1][2][0][9][20] = 52,
+ [1][1][2][0][8][20] = 42,
+ [1][1][2][0][11][20] = 40,
+ [1][1][2][0][2][24] = 72,
+ [1][1][2][0][1][24] = 52,
+ [1][1][2][0][3][24] = 72,
+ [1][1][2][0][5][24] = 127,
+ [1][1][2][0][6][24] = 52,
+ [1][1][2][0][9][24] = 127,
+ [1][1][2][0][8][24] = 42,
+ [1][1][2][0][11][24] = 40,
+ [1][1][2][0][2][28] = 72,
+ [1][1][2][0][1][28] = 52,
+ [1][1][2][0][3][28] = 72,
+ [1][1][2][0][5][28] = 127,
+ [1][1][2][0][6][28] = 52,
+ [1][1][2][0][9][28] = 127,
+ [1][1][2][0][8][28] = 42,
+ [1][1][2][0][11][28] = 40,
+ [1][1][2][0][2][32] = 68,
+ [1][1][2][0][1][32] = 52,
+ [1][1][2][0][3][32] = 72,
+ [1][1][2][0][5][32] = 68,
+ [1][1][2][0][6][32] = 52,
+ [1][1][2][0][9][32] = 52,
+ [1][1][2][0][8][32] = 42,
+ [1][1][2][0][11][32] = 40,
+ [1][1][2][0][2][36] = 72,
+ [1][1][2][0][1][36] = 127,
+ [1][1][2][0][3][36] = 72,
+ [1][1][2][0][5][36] = 72,
+ [1][1][2][0][6][36] = 52,
+ [1][1][2][0][9][36] = 72,
+ [1][1][2][0][8][36] = 42,
+ [1][1][2][0][11][36] = 127,
+ [1][1][2][0][2][39] = 72,
+ [1][1][2][0][1][39] = 16,
+ [1][1][2][0][3][39] = 127,
+ [1][1][2][0][5][39] = 72,
+ [1][1][2][0][6][39] = 16,
+ [1][1][2][0][9][39] = 72,
+ [1][1][2][0][8][39] = 42,
+ [1][1][2][0][11][39] = 40,
+ [1][1][2][0][2][43] = 72,
+ [1][1][2][0][1][43] = 16,
+ [1][1][2][0][3][43] = 127,
+ [1][1][2][0][5][43] = 72,
+ [1][1][2][0][6][43] = 16,
+ [1][1][2][0][9][43] = 72,
+ [1][1][2][0][8][43] = 42,
+ [1][1][2][0][11][43] = 40,
+ [1][1][2][1][2][1] = 58,
+ [1][1][2][1][1][1] = 40,
+ [1][1][2][1][3][1] = 50,
+ [1][1][2][1][5][1] = 40,
+ [1][1][2][1][6][1] = 40,
+ [1][1][2][1][9][1] = 40,
+ [1][1][2][1][8][1] = 6,
+ [1][1][2][1][11][1] = 28,
+ [1][1][2][1][2][5] = 68,
+ [1][1][2][1][1][5] = 40,
+ [1][1][2][1][3][5] = 50,
+ [1][1][2][1][5][5] = 40,
+ [1][1][2][1][6][5] = 40,
+ [1][1][2][1][9][5] = 40,
+ [1][1][2][1][8][5] = 6,
+ [1][1][2][1][11][5] = 28,
+ [1][1][2][1][2][9] = 68,
+ [1][1][2][1][1][9] = 40,
+ [1][1][2][1][3][9] = 50,
+ [1][1][2][1][5][9] = 40,
+ [1][1][2][1][6][9] = 40,
+ [1][1][2][1][9][9] = 40,
+ [1][1][2][1][8][9] = 30,
+ [1][1][2][1][11][9] = 28,
+ [1][1][2][1][2][13] = 58,
+ [1][1][2][1][1][13] = 40,
+ [1][1][2][1][3][13] = 50,
+ [1][1][2][1][5][13] = 40,
+ [1][1][2][1][6][13] = 40,
+ [1][1][2][1][9][13] = 40,
+ [1][1][2][1][8][13] = 30,
+ [1][1][2][1][11][13] = 28,
+ [1][1][2][1][2][16] = 56,
+ [1][1][2][1][1][16] = 40,
+ [1][1][2][1][3][16] = 72,
+ [1][1][2][1][5][16] = 56,
+ [1][1][2][1][6][16] = 40,
+ [1][1][2][1][9][16] = 40,
+ [1][1][2][1][8][16] = 30,
+ [1][1][2][1][11][16] = 28,
+ [1][1][2][1][2][20] = 68,
+ [1][1][2][1][1][20] = 40,
+ [1][1][2][1][3][20] = 72,
+ [1][1][2][1][5][20] = 68,
+ [1][1][2][1][6][20] = 40,
+ [1][1][2][1][9][20] = 40,
+ [1][1][2][1][8][20] = 30,
+ [1][1][2][1][11][20] = 28,
+ [1][1][2][1][2][24] = 68,
+ [1][1][2][1][1][24] = 40,
+ [1][1][2][1][3][24] = 72,
+ [1][1][2][1][5][24] = 127,
+ [1][1][2][1][6][24] = 40,
+ [1][1][2][1][9][24] = 127,
+ [1][1][2][1][8][24] = 30,
+ [1][1][2][1][11][24] = 28,
+ [1][1][2][1][2][28] = 68,
+ [1][1][2][1][1][28] = 40,
+ [1][1][2][1][3][28] = 72,
+ [1][1][2][1][5][28] = 127,
+ [1][1][2][1][6][28] = 40,
+ [1][1][2][1][9][28] = 127,
+ [1][1][2][1][8][28] = 30,
+ [1][1][2][1][11][28] = 28,
+ [1][1][2][1][2][32] = 68,
+ [1][1][2][1][1][32] = 40,
+ [1][1][2][1][3][32] = 72,
+ [1][1][2][1][5][32] = 68,
+ [1][1][2][1][6][32] = 40,
+ [1][1][2][1][9][32] = 40,
+ [1][1][2][1][8][32] = 30,
+ [1][1][2][1][11][32] = 28,
+ [1][1][2][1][2][36] = 68,
+ [1][1][2][1][1][36] = 127,
+ [1][1][2][1][3][36] = 72,
+ [1][1][2][1][5][36] = 68,
+ [1][1][2][1][6][36] = 40,
+ [1][1][2][1][9][36] = 68,
+ [1][1][2][1][8][36] = 30,
+ [1][1][2][1][11][36] = 127,
+ [1][1][2][1][2][39] = 72,
+ [1][1][2][1][1][39] = 4,
+ [1][1][2][1][3][39] = 127,
+ [1][1][2][1][5][39] = 72,
+ [1][1][2][1][6][39] = 4,
+ [1][1][2][1][9][39] = 72,
+ [1][1][2][1][8][39] = 30,
+ [1][1][2][1][11][39] = 28,
+ [1][1][2][1][2][43] = 72,
+ [1][1][2][1][1][43] = 4,
+ [1][1][2][1][3][43] = 127,
+ [1][1][2][1][5][43] = 72,
+ [1][1][2][1][6][43] = 4,
+ [1][1][2][1][9][43] = 72,
+ [1][1][2][1][8][43] = 30,
+ [1][1][2][1][11][43] = 28,
+ [2][0][2][0][2][3] = 64,
+ [2][0][2][0][1][3] = 64,
+ [2][0][2][0][3][3] = 64,
+ [2][0][2][0][5][3] = 62,
+ [2][0][2][0][6][3] = 64,
+ [2][0][2][0][9][3] = 64,
+ [2][0][2][0][8][3] = 30,
+ [2][0][2][0][11][3] = 52,
+ [2][0][2][0][2][11] = 64,
+ [2][0][2][0][1][11] = 64,
+ [2][0][2][0][3][11] = 64,
+ [2][0][2][0][5][11] = 62,
+ [2][0][2][0][6][11] = 64,
+ [2][0][2][0][9][11] = 64,
+ [2][0][2][0][8][11] = 54,
+ [2][0][2][0][11][11] = 52,
+ [2][0][2][0][2][18] = 62,
+ [2][0][2][0][1][18] = 64,
+ [2][0][2][0][3][18] = 72,
+ [2][0][2][0][5][18] = 66,
+ [2][0][2][0][6][18] = 64,
+ [2][0][2][0][9][18] = 64,
+ [2][0][2][0][8][18] = 54,
+ [2][0][2][0][11][18] = 52,
+ [2][0][2][0][2][26] = 72,
+ [2][0][2][0][1][26] = 64,
+ [2][0][2][0][3][26] = 72,
+ [2][0][2][0][5][26] = 127,
+ [2][0][2][0][6][26] = 64,
+ [2][0][2][0][9][26] = 127,
+ [2][0][2][0][8][26] = 54,
+ [2][0][2][0][11][26] = 52,
+ [2][0][2][0][2][34] = 72,
+ [2][0][2][0][1][34] = 127,
+ [2][0][2][0][3][34] = 72,
+ [2][0][2][0][5][34] = 72,
+ [2][0][2][0][6][34] = 64,
+ [2][0][2][0][9][34] = 72,
+ [2][0][2][0][8][34] = 54,
+ [2][0][2][0][11][34] = 127,
+ [2][0][2][0][2][41] = 72,
+ [2][0][2][0][1][41] = 28,
+ [2][0][2][0][3][41] = 127,
+ [2][0][2][0][5][41] = 72,
+ [2][0][2][0][6][41] = 28,
+ [2][0][2][0][9][41] = 72,
+ [2][0][2][0][8][41] = 54,
+ [2][0][2][0][11][41] = 52,
+ [2][1][2][0][2][3] = 56,
+ [2][1][2][0][1][3] = 52,
+ [2][1][2][0][3][3] = 52,
+ [2][1][2][0][5][3] = 52,
+ [2][1][2][0][6][3] = 52,
+ [2][1][2][0][9][3] = 52,
+ [2][1][2][0][8][3] = 18,
+ [2][1][2][0][11][3] = 40,
+ [2][1][2][0][2][11] = 56,
+ [2][1][2][0][1][11] = 52,
+ [2][1][2][0][3][11] = 52,
+ [2][1][2][0][5][11] = 52,
+ [2][1][2][0][6][11] = 52,
+ [2][1][2][0][9][11] = 52,
+ [2][1][2][0][8][11] = 42,
+ [2][1][2][0][11][11] = 40,
+ [2][1][2][0][2][18] = 56,
+ [2][1][2][0][1][18] = 52,
+ [2][1][2][0][3][18] = 72,
+ [2][1][2][0][5][18] = 56,
+ [2][1][2][0][6][18] = 52,
+ [2][1][2][0][9][18] = 52,
+ [2][1][2][0][8][18] = 42,
+ [2][1][2][0][11][18] = 40,
+ [2][1][2][0][2][26] = 72,
+ [2][1][2][0][1][26] = 52,
+ [2][1][2][0][3][26] = 72,
+ [2][1][2][0][5][26] = 127,
+ [2][1][2][0][6][26] = 52,
+ [2][1][2][0][9][26] = 127,
+ [2][1][2][0][8][26] = 42,
+ [2][1][2][0][11][26] = 40,
+ [2][1][2][0][2][34] = 72,
+ [2][1][2][0][1][34] = 127,
+ [2][1][2][0][3][34] = 72,
+ [2][1][2][0][5][34] = 72,
+ [2][1][2][0][6][34] = 52,
+ [2][1][2][0][9][34] = 72,
+ [2][1][2][0][8][34] = 42,
+ [2][1][2][0][11][34] = 127,
+ [2][1][2][0][2][41] = 72,
+ [2][1][2][0][1][41] = 16,
+ [2][1][2][0][3][41] = 127,
+ [2][1][2][0][5][41] = 72,
+ [2][1][2][0][6][41] = 16,
+ [2][1][2][0][9][41] = 72,
+ [2][1][2][0][8][41] = 42,
+ [2][1][2][0][11][41] = 40,
+ [2][1][2][1][2][3] = 56,
+ [2][1][2][1][1][3] = 40,
+ [2][1][2][1][3][3] = 52,
+ [2][1][2][1][5][3] = 40,
+ [2][1][2][1][6][3] = 40,
+ [2][1][2][1][9][3] = 40,
+ [2][1][2][1][8][3] = 6,
+ [2][1][2][1][11][3] = 28,
+ [2][1][2][1][2][11] = 56,
+ [2][1][2][1][1][11] = 40,
+ [2][1][2][1][3][11] = 52,
+ [2][1][2][1][5][11] = 40,
+ [2][1][2][1][6][11] = 40,
+ [2][1][2][1][9][11] = 40,
+ [2][1][2][1][8][11] = 30,
+ [2][1][2][1][11][11] = 28,
+ [2][1][2][1][2][18] = 56,
+ [2][1][2][1][1][18] = 40,
+ [2][1][2][1][3][18] = 72,
+ [2][1][2][1][5][18] = 56,
+ [2][1][2][1][6][18] = 40,
+ [2][1][2][1][9][18] = 40,
+ [2][1][2][1][8][18] = 30,
+ [2][1][2][1][11][18] = 28,
+ [2][1][2][1][2][26] = 68,
+ [2][1][2][1][1][26] = 40,
+ [2][1][2][1][3][26] = 72,
+ [2][1][2][1][5][26] = 127,
+ [2][1][2][1][6][26] = 40,
+ [2][1][2][1][9][26] = 127,
+ [2][1][2][1][8][26] = 30,
+ [2][1][2][1][11][26] = 28,
+ [2][1][2][1][2][34] = 68,
+ [2][1][2][1][1][34] = 127,
+ [2][1][2][1][3][34] = 72,
+ [2][1][2][1][5][34] = 68,
+ [2][1][2][1][6][34] = 40,
+ [2][1][2][1][9][34] = 68,
+ [2][1][2][1][8][34] = 30,
+ [2][1][2][1][11][34] = 127,
+ [2][1][2][1][2][41] = 72,
+ [2][1][2][1][1][41] = 4,
+ [2][1][2][1][3][41] = 127,
+ [2][1][2][1][5][41] = 72,
+ [2][1][2][1][6][41] = 4,
+ [2][1][2][1][9][41] = 72,
+ [2][1][2][1][8][41] = 30,
+ [2][1][2][1][11][41] = 28,
+};
+
+const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
+ [RTW89_REGD_NUM][RTW89_2G_CH_NUM] = {
+ [0][0][0][0] = 32,
+ [0][0][0][1] = 32,
+ [0][0][0][2] = 32,
+ [0][0][0][3] = 32,
+ [0][0][0][4] = 32,
+ [0][0][0][5] = 32,
+ [0][0][0][6] = 32,
+ [0][0][0][7] = 32,
+ [0][0][0][8] = 32,
+ [0][0][0][9] = 32,
+ [0][0][0][10] = 32,
+ [0][0][0][11] = 32,
+ [0][0][0][12] = 32,
+ [0][0][0][13] = 0,
+ [0][1][0][0] = 20,
+ [0][1][0][1] = 20,
+ [0][1][0][2] = 20,
+ [0][1][0][3] = 20,
+ [0][1][0][4] = 20,
+ [0][1][0][5] = 20,
+ [0][1][0][6] = 20,
+ [0][1][0][7] = 20,
+ [0][1][0][8] = 20,
+ [0][1][0][9] = 20,
+ [0][1][0][10] = 20,
+ [0][1][0][11] = 20,
+ [0][1][0][12] = 20,
+ [0][1][0][13] = 0,
+ [1][0][0][0] = 42,
+ [1][0][0][1] = 42,
+ [1][0][0][2] = 42,
+ [1][0][0][3] = 42,
+ [1][0][0][4] = 42,
+ [1][0][0][5] = 42,
+ [1][0][0][6] = 42,
+ [1][0][0][7] = 42,
+ [1][0][0][8] = 42,
+ [1][0][0][9] = 42,
+ [1][0][0][10] = 42,
+ [1][0][0][11] = 42,
+ [1][0][0][12] = 36,
+ [1][0][0][13] = 0,
+ [1][1][0][0] = 30,
+ [1][1][0][1] = 30,
+ [1][1][0][2] = 30,
+ [1][1][0][3] = 30,
+ [1][1][0][4] = 30,
+ [1][1][0][5] = 30,
+ [1][1][0][6] = 30,
+ [1][1][0][7] = 30,
+ [1][1][0][8] = 30,
+ [1][1][0][9] = 30,
+ [1][1][0][10] = 30,
+ [1][1][0][11] = 30,
+ [1][1][0][12] = 30,
+ [1][1][0][13] = 0,
+ [2][0][0][0] = 52,
+ [2][0][0][1] = 52,
+ [2][0][0][2] = 52,
+ [2][0][0][3] = 52,
+ [2][0][0][4] = 52,
+ [2][0][0][5] = 52,
+ [2][0][0][6] = 52,
+ [2][0][0][7] = 52,
+ [2][0][0][8] = 52,
+ [2][0][0][9] = 52,
+ [2][0][0][10] = 52,
+ [2][0][0][11] = 52,
+ [2][0][0][12] = 40,
+ [2][0][0][13] = 0,
+ [2][1][0][0] = 40,
+ [2][1][0][1] = 40,
+ [2][1][0][2] = 40,
+ [2][1][0][3] = 40,
+ [2][1][0][4] = 40,
+ [2][1][0][5] = 40,
+ [2][1][0][6] = 40,
+ [2][1][0][7] = 40,
+ [2][1][0][8] = 40,
+ [2][1][0][9] = 40,
+ [2][1][0][10] = 40,
+ [2][1][0][11] = 40,
+ [2][1][0][12] = 26,
+ [2][1][0][13] = 0,
+ [0][0][2][0] = 70,
+ [0][0][1][0] = 32,
+ [0][0][3][0] = 40,
+ [0][0][5][0] = 70,
+ [0][0][6][0] = 32,
+ [0][0][9][0] = 32,
+ [0][0][8][0] = 60,
+ [0][0][11][0] = 32,
+ [0][0][2][1] = 70,
+ [0][0][1][1] = 32,
+ [0][0][3][1] = 40,
+ [0][0][5][1] = 70,
+ [0][0][6][1] = 32,
+ [0][0][9][1] = 32,
+ [0][0][8][1] = 60,
+ [0][0][11][1] = 32,
+ [0][0][2][2] = 74,
+ [0][0][1][2] = 32,
+ [0][0][3][2] = 40,
+ [0][0][5][2] = 74,
+ [0][0][6][2] = 32,
+ [0][0][9][2] = 32,
+ [0][0][8][2] = 60,
+ [0][0][11][2] = 32,
+ [0][0][2][3] = 78,
+ [0][0][1][3] = 32,
+ [0][0][3][3] = 40,
+ [0][0][5][3] = 78,
+ [0][0][6][3] = 32,
+ [0][0][9][3] = 32,
+ [0][0][8][3] = 60,
+ [0][0][11][3] = 32,
+ [0][0][2][4] = 78,
+ [0][0][1][4] = 32,
+ [0][0][3][4] = 40,
+ [0][0][5][4] = 78,
+ [0][0][6][4] = 32,
+ [0][0][9][4] = 32,
+ [0][0][8][4] = 60,
+ [0][0][11][4] = 32,
+ [0][0][2][5] = 78,
+ [0][0][1][5] = 32,
+ [0][0][3][5] = 40,
+ [0][0][5][5] = 78,
+ [0][0][6][5] = 32,
+ [0][0][9][5] = 32,
+ [0][0][8][5] = 60,
+ [0][0][11][5] = 32,
+ [0][0][2][6] = 78,
+ [0][0][1][6] = 32,
+ [0][0][3][6] = 40,
+ [0][0][5][6] = 78,
+ [0][0][6][6] = 32,
+ [0][0][9][6] = 32,
+ [0][0][8][6] = 60,
+ [0][0][11][6] = 32,
+ [0][0][2][7] = 78,
+ [0][0][1][7] = 32,
+ [0][0][3][7] = 40,
+ [0][0][5][7] = 78,
+ [0][0][6][7] = 32,
+ [0][0][9][7] = 32,
+ [0][0][8][7] = 60,
+ [0][0][11][7] = 32,
+ [0][0][2][8] = 74,
+ [0][0][1][8] = 32,
+ [0][0][3][8] = 40,
+ [0][0][5][8] = 74,
+ [0][0][6][8] = 32,
+ [0][0][9][8] = 32,
+ [0][0][8][8] = 60,
+ [0][0][11][8] = 32,
+ [0][0][2][9] = 70,
+ [0][0][1][9] = 32,
+ [0][0][3][9] = 40,
+ [0][0][5][9] = 70,
+ [0][0][6][9] = 32,
+ [0][0][9][9] = 32,
+ [0][0][8][9] = 60,
+ [0][0][11][9] = 32,
+ [0][0][2][10] = 70,
+ [0][0][1][10] = 32,
+ [0][0][3][10] = 40,
+ [0][0][5][10] = 70,
+ [0][0][6][10] = 32,
+ [0][0][9][10] = 32,
+ [0][0][8][10] = 60,
+ [0][0][11][10] = 32,
+ [0][0][2][11] = 58,
+ [0][0][1][11] = 32,
+ [0][0][3][11] = 40,
+ [0][0][5][11] = 58,
+ [0][0][6][11] = 32,
+ [0][0][9][11] = 32,
+ [0][0][8][11] = 60,
+ [0][0][11][11] = 32,
+ [0][0][2][12] = 34,
+ [0][0][1][12] = 32,
+ [0][0][3][12] = 40,
+ [0][0][5][12] = 34,
+ [0][0][6][12] = 32,
+ [0][0][9][12] = 32,
+ [0][0][8][12] = 60,
+ [0][0][11][12] = 32,
+ [0][0][2][13] = 127,
+ [0][0][1][13] = 127,
+ [0][0][3][13] = 127,
+ [0][0][5][13] = 127,
+ [0][0][6][13] = 127,
+ [0][0][9][13] = 127,
+ [0][0][8][13] = 127,
+ [0][0][11][13] = 127,
+ [0][1][2][0] = 64,
+ [0][1][1][0] = 20,
+ [0][1][3][0] = 28,
+ [0][1][5][0] = 64,
+ [0][1][6][0] = 20,
+ [0][1][9][0] = 20,
+ [0][1][8][0] = 48,
+ [0][1][11][0] = 20,
+ [0][1][2][1] = 64,
+ [0][1][1][1] = 20,
+ [0][1][3][1] = 28,
+ [0][1][5][1] = 64,
+ [0][1][6][1] = 20,
+ [0][1][9][1] = 20,
+ [0][1][8][1] = 48,
+ [0][1][11][1] = 20,
+ [0][1][2][2] = 68,
+ [0][1][1][2] = 20,
+ [0][1][3][2] = 28,
+ [0][1][5][2] = 68,
+ [0][1][6][2] = 20,
+ [0][1][9][2] = 20,
+ [0][1][8][2] = 48,
+ [0][1][11][2] = 20,
+ [0][1][2][3] = 72,
+ [0][1][1][3] = 20,
+ [0][1][3][3] = 28,
+ [0][1][5][3] = 72,
+ [0][1][6][3] = 20,
+ [0][1][9][3] = 20,
+ [0][1][8][3] = 48,
+ [0][1][11][3] = 20,
+ [0][1][2][4] = 76,
+ [0][1][1][4] = 20,
+ [0][1][3][4] = 28,
+ [0][1][5][4] = 76,
+ [0][1][6][4] = 20,
+ [0][1][9][4] = 20,
+ [0][1][8][4] = 48,
+ [0][1][11][4] = 20,
+ [0][1][2][5] = 78,
+ [0][1][1][5] = 20,
+ [0][1][3][5] = 28,
+ [0][1][5][5] = 78,
+ [0][1][6][5] = 20,
+ [0][1][9][5] = 20,
+ [0][1][8][5] = 48,
+ [0][1][11][5] = 20,
+ [0][1][2][6] = 76,
+ [0][1][1][6] = 20,
+ [0][1][3][6] = 28,
+ [0][1][5][6] = 76,
+ [0][1][6][6] = 20,
+ [0][1][9][6] = 20,
+ [0][1][8][6] = 48,
+ [0][1][11][6] = 20,
+ [0][1][2][7] = 72,
+ [0][1][1][7] = 20,
+ [0][1][3][7] = 28,
+ [0][1][5][7] = 72,
+ [0][1][6][7] = 20,
+ [0][1][9][7] = 20,
+ [0][1][8][7] = 48,
+ [0][1][11][7] = 20,
+ [0][1][2][8] = 68,
+ [0][1][1][8] = 20,
+ [0][1][3][8] = 28,
+ [0][1][5][8] = 68,
+ [0][1][6][8] = 20,
+ [0][1][9][8] = 20,
+ [0][1][8][8] = 48,
+ [0][1][11][8] = 20,
+ [0][1][2][9] = 64,
+ [0][1][1][9] = 20,
+ [0][1][3][9] = 28,
+ [0][1][5][9] = 64,
+ [0][1][6][9] = 20,
+ [0][1][9][9] = 20,
+ [0][1][8][9] = 48,
+ [0][1][11][9] = 20,
+ [0][1][2][10] = 64,
+ [0][1][1][10] = 20,
+ [0][1][3][10] = 28,
+ [0][1][5][10] = 64,
+ [0][1][6][10] = 20,
+ [0][1][9][10] = 20,
+ [0][1][8][10] = 48,
+ [0][1][11][10] = 20,
+ [0][1][2][11] = 54,
+ [0][1][1][11] = 20,
+ [0][1][3][11] = 28,
+ [0][1][5][11] = 54,
+ [0][1][6][11] = 20,
+ [0][1][9][11] = 20,
+ [0][1][8][11] = 48,
+ [0][1][11][11] = 20,
+ [0][1][2][12] = 32,
+ [0][1][1][12] = 20,
+ [0][1][3][12] = 28,
+ [0][1][5][12] = 32,
+ [0][1][6][12] = 20,
+ [0][1][9][12] = 20,
+ [0][1][8][12] = 48,
+ [0][1][11][12] = 20,
+ [0][1][2][13] = 127,
+ [0][1][1][13] = 127,
+ [0][1][3][13] = 127,
+ [0][1][5][13] = 127,
+ [0][1][6][13] = 127,
+ [0][1][9][13] = 127,
+ [0][1][8][13] = 127,
+ [0][1][11][13] = 127,
+ [1][0][2][0] = 72,
+ [1][0][1][0] = 42,
+ [1][0][3][0] = 50,
+ [1][0][5][0] = 72,
+ [1][0][6][0] = 42,
+ [1][0][9][0] = 42,
+ [1][0][8][0] = 60,
+ [1][0][11][0] = 42,
+ [1][0][2][1] = 72,
+ [1][0][1][1] = 42,
+ [1][0][3][1] = 50,
+ [1][0][5][1] = 72,
+ [1][0][6][1] = 42,
+ [1][0][9][1] = 42,
+ [1][0][8][1] = 60,
+ [1][0][11][1] = 42,
+ [1][0][2][2] = 76,
+ [1][0][1][2] = 42,
+ [1][0][3][2] = 50,
+ [1][0][5][2] = 76,
+ [1][0][6][2] = 42,
+ [1][0][9][2] = 42,
+ [1][0][8][2] = 60,
+ [1][0][11][2] = 42,
+ [1][0][2][3] = 78,
+ [1][0][1][3] = 42,
+ [1][0][3][3] = 50,
+ [1][0][5][3] = 78,
+ [1][0][6][3] = 42,
+ [1][0][9][3] = 42,
+ [1][0][8][3] = 60,
+ [1][0][11][3] = 42,
+ [1][0][2][4] = 78,
+ [1][0][1][4] = 42,
+ [1][0][3][4] = 50,
+ [1][0][5][4] = 78,
+ [1][0][6][4] = 42,
+ [1][0][9][4] = 42,
+ [1][0][8][4] = 60,
+ [1][0][11][4] = 42,
+ [1][0][2][5] = 78,
+ [1][0][1][5] = 42,
+ [1][0][3][5] = 50,
+ [1][0][5][5] = 78,
+ [1][0][6][5] = 42,
+ [1][0][9][5] = 42,
+ [1][0][8][5] = 60,
+ [1][0][11][5] = 42,
+ [1][0][2][6] = 78,
+ [1][0][1][6] = 42,
+ [1][0][3][6] = 50,
+ [1][0][5][6] = 78,
+ [1][0][6][6] = 42,
+ [1][0][9][6] = 42,
+ [1][0][8][6] = 60,
+ [1][0][11][6] = 42,
+ [1][0][2][7] = 78,
+ [1][0][1][7] = 42,
+ [1][0][3][7] = 50,
+ [1][0][5][7] = 78,
+ [1][0][6][7] = 42,
+ [1][0][9][7] = 42,
+ [1][0][8][7] = 60,
+ [1][0][11][7] = 42,
+ [1][0][2][8] = 78,
+ [1][0][1][8] = 42,
+ [1][0][3][8] = 50,
+ [1][0][5][8] = 78,
+ [1][0][6][8] = 42,
+ [1][0][9][8] = 42,
+ [1][0][8][8] = 60,
+ [1][0][11][8] = 42,
+ [1][0][2][9] = 74,
+ [1][0][1][9] = 42,
+ [1][0][3][9] = 50,
+ [1][0][5][9] = 74,
+ [1][0][6][9] = 42,
+ [1][0][9][9] = 42,
+ [1][0][8][9] = 60,
+ [1][0][11][9] = 42,
+ [1][0][2][10] = 74,
+ [1][0][1][10] = 42,
+ [1][0][3][10] = 50,
+ [1][0][5][10] = 74,
+ [1][0][6][10] = 42,
+ [1][0][9][10] = 42,
+ [1][0][8][10] = 60,
+ [1][0][11][10] = 42,
+ [1][0][2][11] = 64,
+ [1][0][1][11] = 42,
+ [1][0][3][11] = 50,
+ [1][0][5][11] = 64,
+ [1][0][6][11] = 42,
+ [1][0][9][11] = 42,
+ [1][0][8][11] = 60,
+ [1][0][11][11] = 42,
+ [1][0][2][12] = 36,
+ [1][0][1][12] = 42,
+ [1][0][3][12] = 50,
+ [1][0][5][12] = 36,
+ [1][0][6][12] = 42,
+ [1][0][9][12] = 42,
+ [1][0][8][12] = 60,
+ [1][0][11][12] = 42,
+ [1][0][2][13] = 127,
+ [1][0][1][13] = 127,
+ [1][0][3][13] = 127,
+ [1][0][5][13] = 127,
+ [1][0][6][13] = 127,
+ [1][0][9][13] = 127,
+ [1][0][8][13] = 127,
+ [1][0][11][13] = 127,
+ [1][1][2][0] = 66,
+ [1][1][1][0] = 30,
+ [1][1][3][0] = 38,
+ [1][1][5][0] = 66,
+ [1][1][6][0] = 30,
+ [1][1][9][0] = 30,
+ [1][1][8][0] = 48,
+ [1][1][11][0] = 30,
+ [1][1][2][1] = 66,
+ [1][1][1][1] = 30,
+ [1][1][3][1] = 38,
+ [1][1][5][1] = 66,
+ [1][1][6][1] = 30,
+ [1][1][9][1] = 30,
+ [1][1][8][1] = 48,
+ [1][1][11][1] = 30,
+ [1][1][2][2] = 70,
+ [1][1][1][2] = 30,
+ [1][1][3][2] = 38,
+ [1][1][5][2] = 70,
+ [1][1][6][2] = 30,
+ [1][1][9][2] = 30,
+ [1][1][8][2] = 48,
+ [1][1][11][2] = 30,
+ [1][1][2][3] = 74,
+ [1][1][1][3] = 30,
+ [1][1][3][3] = 38,
+ [1][1][5][3] = 74,
+ [1][1][6][3] = 30,
+ [1][1][9][3] = 30,
+ [1][1][8][3] = 48,
+ [1][1][11][3] = 30,
+ [1][1][2][4] = 78,
+ [1][1][1][4] = 30,
+ [1][1][3][4] = 38,
+ [1][1][5][4] = 78,
+ [1][1][6][4] = 30,
+ [1][1][9][4] = 30,
+ [1][1][8][4] = 48,
+ [1][1][11][4] = 30,
+ [1][1][2][5] = 78,
+ [1][1][1][5] = 30,
+ [1][1][3][5] = 38,
+ [1][1][5][5] = 78,
+ [1][1][6][5] = 30,
+ [1][1][9][5] = 30,
+ [1][1][8][5] = 48,
+ [1][1][11][5] = 30,
+ [1][1][2][6] = 78,
+ [1][1][1][6] = 30,
+ [1][1][3][6] = 38,
+ [1][1][5][6] = 78,
+ [1][1][6][6] = 30,
+ [1][1][9][6] = 30,
+ [1][1][8][6] = 48,
+ [1][1][11][6] = 30,
+ [1][1][2][7] = 74,
+ [1][1][1][7] = 30,
+ [1][1][3][7] = 38,
+ [1][1][5][7] = 74,
+ [1][1][6][7] = 30,
+ [1][1][9][7] = 30,
+ [1][1][8][7] = 48,
+ [1][1][11][7] = 30,
+ [1][1][2][8] = 70,
+ [1][1][1][8] = 30,
+ [1][1][3][8] = 38,
+ [1][1][5][8] = 70,
+ [1][1][6][8] = 30,
+ [1][1][9][8] = 30,
+ [1][1][8][8] = 48,
+ [1][1][11][8] = 30,
+ [1][1][2][9] = 66,
+ [1][1][1][9] = 30,
+ [1][1][3][9] = 38,
+ [1][1][5][9] = 66,
+ [1][1][6][9] = 30,
+ [1][1][9][9] = 30,
+ [1][1][8][9] = 48,
+ [1][1][11][9] = 30,
+ [1][1][2][10] = 66,
+ [1][1][1][10] = 30,
+ [1][1][3][10] = 38,
+ [1][1][5][10] = 66,
+ [1][1][6][10] = 30,
+ [1][1][9][10] = 30,
+ [1][1][8][10] = 48,
+ [1][1][11][10] = 30,
+ [1][1][2][11] = 60,
+ [1][1][1][11] = 30,
+ [1][1][3][11] = 38,
+ [1][1][5][11] = 60,
+ [1][1][6][11] = 30,
+ [1][1][9][11] = 30,
+ [1][1][8][11] = 48,
+ [1][1][11][11] = 30,
+ [1][1][2][12] = 32,
+ [1][1][1][12] = 30,
+ [1][1][3][12] = 38,
+ [1][1][5][12] = 32,
+ [1][1][6][12] = 30,
+ [1][1][9][12] = 30,
+ [1][1][8][12] = 48,
+ [1][1][11][12] = 30,
+ [1][1][2][13] = 127,
+ [1][1][1][13] = 127,
+ [1][1][3][13] = 127,
+ [1][1][5][13] = 127,
+ [1][1][6][13] = 127,
+ [1][1][9][13] = 127,
+ [1][1][8][13] = 127,
+ [1][1][11][13] = 127,
+ [2][0][2][0] = 76,
+ [2][0][1][0] = 52,
+ [2][0][3][0] = 64,
+ [2][0][5][0] = 76,
+ [2][0][6][0] = 52,
+ [2][0][9][0] = 52,
+ [2][0][8][0] = 60,
+ [2][0][11][0] = 52,
+ [2][0][2][1] = 76,
+ [2][0][1][1] = 52,
+ [2][0][3][1] = 64,
+ [2][0][5][1] = 76,
+ [2][0][6][1] = 52,
+ [2][0][9][1] = 52,
+ [2][0][8][1] = 60,
+ [2][0][11][1] = 52,
+ [2][0][2][2] = 78,
+ [2][0][1][2] = 52,
+ [2][0][3][2] = 64,
+ [2][0][5][2] = 78,
+ [2][0][6][2] = 52,
+ [2][0][9][2] = 52,
+ [2][0][8][2] = 60,
+ [2][0][11][2] = 52,
+ [2][0][2][3] = 78,
+ [2][0][1][3] = 52,
+ [2][0][3][3] = 64,
+ [2][0][5][3] = 78,
+ [2][0][6][3] = 52,
+ [2][0][9][3] = 52,
+ [2][0][8][3] = 60,
+ [2][0][11][3] = 52,
+ [2][0][2][4] = 78,
+ [2][0][1][4] = 52,
+ [2][0][3][4] = 64,
+ [2][0][5][4] = 78,
+ [2][0][6][4] = 52,
+ [2][0][9][4] = 52,
+ [2][0][8][4] = 60,
+ [2][0][11][4] = 52,
+ [2][0][2][5] = 78,
+ [2][0][1][5] = 52,
+ [2][0][3][5] = 64,
+ [2][0][5][5] = 78,
+ [2][0][6][5] = 52,
+ [2][0][9][5] = 52,
+ [2][0][8][5] = 60,
+ [2][0][11][5] = 52,
+ [2][0][2][6] = 78,
+ [2][0][1][6] = 52,
+ [2][0][3][6] = 64,
+ [2][0][5][6] = 78,
+ [2][0][6][6] = 52,
+ [2][0][9][6] = 52,
+ [2][0][8][6] = 60,
+ [2][0][11][6] = 52,
+ [2][0][2][7] = 78,
+ [2][0][1][7] = 52,
+ [2][0][3][7] = 64,
+ [2][0][5][7] = 78,
+ [2][0][6][7] = 52,
+ [2][0][9][7] = 52,
+ [2][0][8][7] = 60,
+ [2][0][11][7] = 52,
+ [2][0][2][8] = 78,
+ [2][0][1][8] = 52,
+ [2][0][3][8] = 64,
+ [2][0][5][8] = 78,
+ [2][0][6][8] = 52,
+ [2][0][9][8] = 52,
+ [2][0][8][8] = 60,
+ [2][0][11][8] = 52,
+ [2][0][2][9] = 76,
+ [2][0][1][9] = 52,
+ [2][0][3][9] = 64,
+ [2][0][5][9] = 76,
+ [2][0][6][9] = 52,
+ [2][0][9][9] = 52,
+ [2][0][8][9] = 60,
+ [2][0][11][9] = 52,
+ [2][0][2][10] = 76,
+ [2][0][1][10] = 52,
+ [2][0][3][10] = 64,
+ [2][0][5][10] = 76,
+ [2][0][6][10] = 52,
+ [2][0][9][10] = 52,
+ [2][0][8][10] = 60,
+ [2][0][11][10] = 52,
+ [2][0][2][11] = 68,
+ [2][0][1][11] = 52,
+ [2][0][3][11] = 64,
+ [2][0][5][11] = 68,
+ [2][0][6][11] = 52,
+ [2][0][9][11] = 52,
+ [2][0][8][11] = 60,
+ [2][0][11][11] = 52,
+ [2][0][2][12] = 40,
+ [2][0][1][12] = 52,
+ [2][0][3][12] = 64,
+ [2][0][5][12] = 40,
+ [2][0][6][12] = 52,
+ [2][0][9][12] = 52,
+ [2][0][8][12] = 60,
+ [2][0][11][12] = 52,
+ [2][0][2][13] = 127,
+ [2][0][1][13] = 127,
+ [2][0][3][13] = 127,
+ [2][0][5][13] = 127,
+ [2][0][6][13] = 127,
+ [2][0][9][13] = 127,
+ [2][0][8][13] = 127,
+ [2][0][11][13] = 127,
+ [2][1][2][0] = 68,
+ [2][1][1][0] = 40,
+ [2][1][3][0] = 52,
+ [2][1][5][0] = 68,
+ [2][1][6][0] = 40,
+ [2][1][9][0] = 40,
+ [2][1][8][0] = 48,
+ [2][1][11][0] = 40,
+ [2][1][2][1] = 68,
+ [2][1][1][1] = 40,
+ [2][1][3][1] = 52,
+ [2][1][5][1] = 68,
+ [2][1][6][1] = 40,
+ [2][1][9][1] = 40,
+ [2][1][8][1] = 48,
+ [2][1][11][1] = 40,
+ [2][1][2][2] = 72,
+ [2][1][1][2] = 40,
+ [2][1][3][2] = 52,
+ [2][1][5][2] = 72,
+ [2][1][6][2] = 40,
+ [2][1][9][2] = 40,
+ [2][1][8][2] = 48,
+ [2][1][11][2] = 40,
+ [2][1][2][3] = 76,
+ [2][1][1][3] = 40,
+ [2][1][3][3] = 52,
+ [2][1][5][3] = 76,
+ [2][1][6][3] = 40,
+ [2][1][9][3] = 40,
+ [2][1][8][3] = 48,
+ [2][1][11][3] = 40,
+ [2][1][2][4] = 78,
+ [2][1][1][4] = 40,
+ [2][1][3][4] = 52,
+ [2][1][5][4] = 78,
+ [2][1][6][4] = 40,
+ [2][1][9][4] = 40,
+ [2][1][8][4] = 48,
+ [2][1][11][4] = 40,
+ [2][1][2][5] = 78,
+ [2][1][1][5] = 40,
+ [2][1][3][5] = 52,
+ [2][1][5][5] = 78,
+ [2][1][6][5] = 40,
+ [2][1][9][5] = 40,
+ [2][1][8][5] = 48,
+ [2][1][11][5] = 40,
+ [2][1][2][6] = 78,
+ [2][1][1][6] = 40,
+ [2][1][3][6] = 52,
+ [2][1][5][6] = 78,
+ [2][1][6][6] = 40,
+ [2][1][9][6] = 40,
+ [2][1][8][6] = 48,
+ [2][1][11][6] = 40,
+ [2][1][2][7] = 78,
+ [2][1][1][7] = 40,
+ [2][1][3][7] = 52,
+ [2][1][5][7] = 78,
+ [2][1][6][7] = 40,
+ [2][1][9][7] = 40,
+ [2][1][8][7] = 48,
+ [2][1][11][7] = 40,
+ [2][1][2][8] = 74,
+ [2][1][1][8] = 40,
+ [2][1][3][8] = 52,
+ [2][1][5][8] = 74,
+ [2][1][6][8] = 40,
+ [2][1][9][8] = 40,
+ [2][1][8][8] = 48,
+ [2][1][11][8] = 40,
+ [2][1][2][9] = 70,
+ [2][1][1][9] = 40,
+ [2][1][3][9] = 52,
+ [2][1][5][9] = 70,
+ [2][1][6][9] = 40,
+ [2][1][9][9] = 40,
+ [2][1][8][9] = 48,
+ [2][1][11][9] = 40,
+ [2][1][2][10] = 70,
+ [2][1][1][10] = 40,
+ [2][1][3][10] = 52,
+ [2][1][5][10] = 70,
+ [2][1][6][10] = 40,
+ [2][1][9][10] = 40,
+ [2][1][8][10] = 48,
+ [2][1][11][10] = 40,
+ [2][1][2][11] = 48,
+ [2][1][1][11] = 40,
+ [2][1][3][11] = 52,
+ [2][1][5][11] = 48,
+ [2][1][6][11] = 40,
+ [2][1][9][11] = 40,
+ [2][1][8][11] = 48,
+ [2][1][11][11] = 40,
+ [2][1][2][12] = 26,
+ [2][1][1][12] = 40,
+ [2][1][3][12] = 52,
+ [2][1][5][12] = 26,
+ [2][1][6][12] = 40,
+ [2][1][9][12] = 40,
+ [2][1][8][12] = 48,
+ [2][1][11][12] = 40,
+ [2][1][2][13] = 127,
+ [2][1][1][13] = 127,
+ [2][1][3][13] = 127,
+ [2][1][5][13] = 127,
+ [2][1][6][13] = 127,
+ [2][1][9][13] = 127,
+ [2][1][8][13] = 127,
+ [2][1][11][13] = 127,
+};
+
+const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
+ [RTW89_REGD_NUM][RTW89_5G_CH_NUM] = {
+ [0][0][0][0] = 22,
+ [0][0][0][2] = 22,
+ [0][0][0][4] = 22,
+ [0][0][0][6] = 22,
+ [0][0][0][8] = 24,
+ [0][0][0][10] = 24,
+ [0][0][0][12] = 24,
+ [0][0][0][14] = 24,
+ [0][0][0][15] = 24,
+ [0][0][0][17] = 24,
+ [0][0][0][19] = 24,
+ [0][0][0][21] = 24,
+ [0][0][0][23] = 24,
+ [0][0][0][25] = 24,
+ [0][0][0][27] = 24,
+ [0][0][0][29] = 24,
+ [0][0][0][31] = 24,
+ [0][0][0][33] = 24,
+ [0][0][0][35] = 24,
+ [0][0][0][37] = 24,
+ [0][0][0][38] = 28,
+ [0][0][0][40] = 28,
+ [0][0][0][42] = 28,
+ [0][0][0][44] = 28,
+ [0][0][0][46] = 28,
+ [0][1][0][0] = 8,
+ [0][1][0][2] = 8,
+ [0][1][0][4] = 8,
+ [0][1][0][6] = 8,
+ [0][1][0][8] = 12,
+ [0][1][0][10] = 12,
+ [0][1][0][12] = 12,
+ [0][1][0][14] = 12,
+ [0][1][0][15] = 12,
+ [0][1][0][17] = 12,
+ [0][1][0][19] = 12,
+ [0][1][0][21] = 12,
+ [0][1][0][23] = 12,
+ [0][1][0][25] = 12,
+ [0][1][0][27] = 12,
+ [0][1][0][29] = 12,
+ [0][1][0][31] = 12,
+ [0][1][0][33] = 12,
+ [0][1][0][35] = 12,
+ [0][1][0][37] = 12,
+ [0][1][0][38] = 16,
+ [0][1][0][40] = 16,
+ [0][1][0][42] = 16,
+ [0][1][0][44] = 16,
+ [0][1][0][46] = 16,
+ [1][0][0][0] = 30,
+ [1][0][0][2] = 30,
+ [1][0][0][4] = 30,
+ [1][0][0][6] = 30,
+ [1][0][0][8] = 36,
+ [1][0][0][10] = 36,
+ [1][0][0][12] = 36,
+ [1][0][0][14] = 36,
+ [1][0][0][15] = 36,
+ [1][0][0][17] = 36,
+ [1][0][0][19] = 36,
+ [1][0][0][21] = 36,
+ [1][0][0][23] = 36,
+ [1][0][0][25] = 36,
+ [1][0][0][27] = 36,
+ [1][0][0][29] = 36,
+ [1][0][0][31] = 36,
+ [1][0][0][33] = 36,
+ [1][0][0][35] = 36,
+ [1][0][0][37] = 36,
+ [1][0][0][38] = 28,
+ [1][0][0][40] = 28,
+ [1][0][0][42] = 28,
+ [1][0][0][44] = 28,
+ [1][0][0][46] = 28,
+ [1][1][0][0] = 18,
+ [1][1][0][2] = 18,
+ [1][1][0][4] = 18,
+ [1][1][0][6] = 18,
+ [1][1][0][8] = 22,
+ [1][1][0][10] = 22,
+ [1][1][0][12] = 22,
+ [1][1][0][14] = 22,
+ [1][1][0][15] = 22,
+ [1][1][0][17] = 22,
+ [1][1][0][19] = 22,
+ [1][1][0][21] = 22,
+ [1][1][0][23] = 22,
+ [1][1][0][25] = 22,
+ [1][1][0][27] = 22,
+ [1][1][0][29] = 22,
+ [1][1][0][31] = 22,
+ [1][1][0][33] = 22,
+ [1][1][0][35] = 22,
+ [1][1][0][37] = 22,
+ [1][1][0][38] = 16,
+ [1][1][0][40] = 16,
+ [1][1][0][42] = 16,
+ [1][1][0][44] = 16,
+ [1][1][0][46] = 16,
+ [2][0][0][0] = 30,
+ [2][0][0][2] = 30,
+ [2][0][0][4] = 30,
+ [2][0][0][6] = 30,
+ [2][0][0][8] = 46,
+ [2][0][0][10] = 46,
+ [2][0][0][12] = 46,
+ [2][0][0][14] = 46,
+ [2][0][0][15] = 46,
+ [2][0][0][17] = 46,
+ [2][0][0][19] = 46,
+ [2][0][0][21] = 46,
+ [2][0][0][23] = 46,
+ [2][0][0][25] = 46,
+ [2][0][0][27] = 46,
+ [2][0][0][29] = 46,
+ [2][0][0][31] = 46,
+ [2][0][0][33] = 46,
+ [2][0][0][35] = 46,
+ [2][0][0][37] = 46,
+ [2][0][0][38] = 28,
+ [2][0][0][40] = 28,
+ [2][0][0][42] = 28,
+ [2][0][0][44] = 28,
+ [2][0][0][46] = 28,
+ [2][1][0][0] = 18,
+ [2][1][0][2] = 18,
+ [2][1][0][4] = 18,
+ [2][1][0][6] = 18,
+ [2][1][0][8] = 32,
+ [2][1][0][10] = 32,
+ [2][1][0][12] = 32,
+ [2][1][0][14] = 32,
+ [2][1][0][15] = 32,
+ [2][1][0][17] = 32,
+ [2][1][0][19] = 32,
+ [2][1][0][21] = 32,
+ [2][1][0][23] = 32,
+ [2][1][0][25] = 32,
+ [2][1][0][27] = 32,
+ [2][1][0][29] = 32,
+ [2][1][0][31] = 32,
+ [2][1][0][33] = 32,
+ [2][1][0][35] = 32,
+ [2][1][0][37] = 32,
+ [2][1][0][38] = 16,
+ [2][1][0][40] = 16,
+ [2][1][0][42] = 16,
+ [2][1][0][44] = 16,
+ [2][1][0][46] = 16,
+ [0][0][2][0] = 48,
+ [0][0][1][0] = 24,
+ [0][0][3][0] = 26,
+ [0][0][5][0] = 22,
+ [0][0][6][0] = 24,
+ [0][0][9][0] = 24,
+ [0][0][8][0] = 30,
+ [0][0][11][0] = 24,
+ [0][0][2][2] = 48,
+ [0][0][1][2] = 24,
+ [0][0][3][2] = 26,
+ [0][0][5][2] = 22,
+ [0][0][6][2] = 24,
+ [0][0][9][2] = 24,
+ [0][0][8][2] = 30,
+ [0][0][11][2] = 24,
+ [0][0][2][4] = 48,
+ [0][0][1][4] = 24,
+ [0][0][3][4] = 26,
+ [0][0][5][4] = 22,
+ [0][0][6][4] = 24,
+ [0][0][9][4] = 24,
+ [0][0][8][4] = 30,
+ [0][0][11][4] = 24,
+ [0][0][2][6] = 48,
+ [0][0][1][6] = 24,
+ [0][0][3][6] = 26,
+ [0][0][5][6] = 22,
+ [0][0][6][6] = 24,
+ [0][0][9][6] = 24,
+ [0][0][8][6] = 30,
+ [0][0][11][6] = 24,
+ [0][0][2][8] = 48,
+ [0][0][1][8] = 24,
+ [0][0][3][8] = 26,
+ [0][0][5][8] = 48,
+ [0][0][6][8] = 24,
+ [0][0][9][8] = 24,
+ [0][0][8][8] = 54,
+ [0][0][11][8] = 24,
+ [0][0][2][10] = 48,
+ [0][0][1][10] = 24,
+ [0][0][3][10] = 26,
+ [0][0][5][10] = 48,
+ [0][0][6][10] = 24,
+ [0][0][9][10] = 24,
+ [0][0][8][10] = 54,
+ [0][0][11][10] = 24,
+ [0][0][2][12] = 48,
+ [0][0][1][12] = 24,
+ [0][0][3][12] = 26,
+ [0][0][5][12] = 48,
+ [0][0][6][12] = 24,
+ [0][0][9][12] = 24,
+ [0][0][8][12] = 54,
+ [0][0][11][12] = 24,
+ [0][0][2][14] = 48,
+ [0][0][1][14] = 24,
+ [0][0][3][14] = 26,
+ [0][0][5][14] = 48,
+ [0][0][6][14] = 24,
+ [0][0][9][14] = 24,
+ [0][0][8][14] = 54,
+ [0][0][11][14] = 24,
+ [0][0][2][15] = 48,
+ [0][0][1][15] = 24,
+ [0][0][3][15] = 44,
+ [0][0][5][15] = 48,
+ [0][0][6][15] = 24,
+ [0][0][9][15] = 24,
+ [0][0][8][15] = 54,
+ [0][0][11][15] = 24,
+ [0][0][2][17] = 48,
+ [0][0][1][17] = 24,
+ [0][0][3][17] = 44,
+ [0][0][5][17] = 48,
+ [0][0][6][17] = 24,
+ [0][0][9][17] = 24,
+ [0][0][8][17] = 54,
+ [0][0][11][17] = 24,
+ [0][0][2][19] = 48,
+ [0][0][1][19] = 24,
+ [0][0][3][19] = 44,
+ [0][0][5][19] = 48,
+ [0][0][6][19] = 24,
+ [0][0][9][19] = 24,
+ [0][0][8][19] = 54,
+ [0][0][11][19] = 24,
+ [0][0][2][21] = 48,
+ [0][0][1][21] = 24,
+ [0][0][3][21] = 44,
+ [0][0][5][21] = 48,
+ [0][0][6][21] = 24,
+ [0][0][9][21] = 24,
+ [0][0][8][21] = 54,
+ [0][0][11][21] = 24,
+ [0][0][2][23] = 48,
+ [0][0][1][23] = 24,
+ [0][0][3][23] = 44,
+ [0][0][5][23] = 48,
+ [0][0][6][23] = 24,
+ [0][0][9][23] = 24,
+ [0][0][8][23] = 54,
+ [0][0][11][23] = 24,
+ [0][0][2][25] = 48,
+ [0][0][1][25] = 24,
+ [0][0][3][25] = 44,
+ [0][0][5][25] = 127,
+ [0][0][6][25] = 24,
+ [0][0][9][25] = 127,
+ [0][0][8][25] = 54,
+ [0][0][11][25] = 24,
+ [0][0][2][27] = 48,
+ [0][0][1][27] = 24,
+ [0][0][3][27] = 44,
+ [0][0][5][27] = 127,
+ [0][0][6][27] = 24,
+ [0][0][9][27] = 127,
+ [0][0][8][27] = 54,
+ [0][0][11][27] = 24,
+ [0][0][2][29] = 48,
+ [0][0][1][29] = 24,
+ [0][0][3][29] = 44,
+ [0][0][5][29] = 127,
+ [0][0][6][29] = 24,
+ [0][0][9][29] = 127,
+ [0][0][8][29] = 54,
+ [0][0][11][29] = 24,
+ [0][0][2][31] = 48,
+ [0][0][1][31] = 24,
+ [0][0][3][31] = 44,
+ [0][0][5][31] = 48,
+ [0][0][6][31] = 24,
+ [0][0][9][31] = 24,
+ [0][0][8][31] = 54,
+ [0][0][11][31] = 24,
+ [0][0][2][33] = 48,
+ [0][0][1][33] = 24,
+ [0][0][3][33] = 44,
+ [0][0][5][33] = 48,
+ [0][0][6][33] = 24,
+ [0][0][9][33] = 24,
+ [0][0][8][33] = 54,
+ [0][0][11][33] = 24,
+ [0][0][2][35] = 48,
+ [0][0][1][35] = 24,
+ [0][0][3][35] = 44,
+ [0][0][5][35] = 48,
+ [0][0][6][35] = 24,
+ [0][0][9][35] = 24,
+ [0][0][8][35] = 54,
+ [0][0][11][35] = 24,
+ [0][0][2][37] = 48,
+ [0][0][1][37] = 127,
+ [0][0][3][37] = 44,
+ [0][0][5][37] = 48,
+ [0][0][6][37] = 24,
+ [0][0][9][37] = 48,
+ [0][0][8][37] = 54,
+ [0][0][11][37] = 127,
+ [0][0][2][38] = 76,
+ [0][0][1][38] = 28,
+ [0][0][3][38] = 127,
+ [0][0][5][38] = 76,
+ [0][0][6][38] = 28,
+ [0][0][9][38] = 76,
+ [0][0][8][38] = 54,
+ [0][0][11][38] = 28,
+ [0][0][2][40] = 76,
+ [0][0][1][40] = 28,
+ [0][0][3][40] = 127,
+ [0][0][5][40] = 76,
+ [0][0][6][40] = 28,
+ [0][0][9][40] = 76,
+ [0][0][8][40] = 54,
+ [0][0][11][40] = 28,
+ [0][0][2][42] = 76,
+ [0][0][1][42] = 28,
+ [0][0][3][42] = 127,
+ [0][0][5][42] = 76,
+ [0][0][6][42] = 28,
+ [0][0][9][42] = 76,
+ [0][0][8][42] = 54,
+ [0][0][11][42] = 28,
+ [0][0][2][44] = 76,
+ [0][0][1][44] = 28,
+ [0][0][3][44] = 127,
+ [0][0][5][44] = 76,
+ [0][0][6][44] = 28,
+ [0][0][9][44] = 76,
+ [0][0][8][44] = 54,
+ [0][0][11][44] = 28,
+ [0][0][2][46] = 76,
+ [0][0][1][46] = 28,
+ [0][0][3][46] = 127,
+ [0][0][5][46] = 76,
+ [0][0][6][46] = 28,
+ [0][0][9][46] = 76,
+ [0][0][8][46] = 54,
+ [0][0][11][46] = 28,
+ [0][1][2][0] = 36,
+ [0][1][1][0] = 12,
+ [0][1][3][0] = 14,
+ [0][1][5][0] = 8,
+ [0][1][6][0] = 12,
+ [0][1][9][0] = 12,
+ [0][1][8][0] = 18,
+ [0][1][11][0] = 12,
+ [0][1][2][2] = 36,
+ [0][1][1][2] = 12,
+ [0][1][3][2] = 14,
+ [0][1][5][2] = 8,
+ [0][1][6][2] = 12,
+ [0][1][9][2] = 12,
+ [0][1][8][2] = 18,
+ [0][1][11][2] = 12,
+ [0][1][2][4] = 36,
+ [0][1][1][4] = 12,
+ [0][1][3][4] = 14,
+ [0][1][5][4] = 8,
+ [0][1][6][4] = 12,
+ [0][1][9][4] = 12,
+ [0][1][8][4] = 18,
+ [0][1][11][4] = 12,
+ [0][1][2][6] = 36,
+ [0][1][1][6] = 12,
+ [0][1][3][6] = 14,
+ [0][1][5][6] = 8,
+ [0][1][6][6] = 12,
+ [0][1][9][6] = 12,
+ [0][1][8][6] = 18,
+ [0][1][11][6] = 12,
+ [0][1][2][8] = 36,
+ [0][1][1][8] = 12,
+ [0][1][3][8] = 14,
+ [0][1][5][8] = 36,
+ [0][1][6][8] = 12,
+ [0][1][9][8] = 12,
+ [0][1][8][8] = 42,
+ [0][1][11][8] = 12,
+ [0][1][2][10] = 36,
+ [0][1][1][10] = 12,
+ [0][1][3][10] = 14,
+ [0][1][5][10] = 36,
+ [0][1][6][10] = 12,
+ [0][1][9][10] = 12,
+ [0][1][8][10] = 42,
+ [0][1][11][10] = 12,
+ [0][1][2][12] = 36,
+ [0][1][1][12] = 12,
+ [0][1][3][12] = 14,
+ [0][1][5][12] = 36,
+ [0][1][6][12] = 12,
+ [0][1][9][12] = 12,
+ [0][1][8][12] = 42,
+ [0][1][11][12] = 12,
+ [0][1][2][14] = 36,
+ [0][1][1][14] = 12,
+ [0][1][3][14] = 14,
+ [0][1][5][14] = 36,
+ [0][1][6][14] = 12,
+ [0][1][9][14] = 12,
+ [0][1][8][14] = 42,
+ [0][1][11][14] = 12,
+ [0][1][2][15] = 36,
+ [0][1][1][15] = 12,
+ [0][1][3][15] = 32,
+ [0][1][5][15] = 36,
+ [0][1][6][15] = 12,
+ [0][1][9][15] = 12,
+ [0][1][8][15] = 42,
+ [0][1][11][15] = 12,
+ [0][1][2][17] = 36,
+ [0][1][1][17] = 12,
+ [0][1][3][17] = 32,
+ [0][1][5][17] = 36,
+ [0][1][6][17] = 12,
+ [0][1][9][17] = 12,
+ [0][1][8][17] = 42,
+ [0][1][11][17] = 12,
+ [0][1][2][19] = 36,
+ [0][1][1][19] = 12,
+ [0][1][3][19] = 32,
+ [0][1][5][19] = 36,
+ [0][1][6][19] = 12,
+ [0][1][9][19] = 12,
+ [0][1][8][19] = 42,
+ [0][1][11][19] = 12,
+ [0][1][2][21] = 36,
+ [0][1][1][21] = 12,
+ [0][1][3][21] = 32,
+ [0][1][5][21] = 36,
+ [0][1][6][21] = 12,
+ [0][1][9][21] = 12,
+ [0][1][8][21] = 42,
+ [0][1][11][21] = 12,
+ [0][1][2][23] = 36,
+ [0][1][1][23] = 12,
+ [0][1][3][23] = 32,
+ [0][1][5][23] = 36,
+ [0][1][6][23] = 12,
+ [0][1][9][23] = 12,
+ [0][1][8][23] = 42,
+ [0][1][11][23] = 12,
+ [0][1][2][25] = 36,
+ [0][1][1][25] = 12,
+ [0][1][3][25] = 32,
+ [0][1][5][25] = 127,
+ [0][1][6][25] = 12,
+ [0][1][9][25] = 127,
+ [0][1][8][25] = 42,
+ [0][1][11][25] = 12,
+ [0][1][2][27] = 36,
+ [0][1][1][27] = 12,
+ [0][1][3][27] = 32,
+ [0][1][5][27] = 127,
+ [0][1][6][27] = 12,
+ [0][1][9][27] = 127,
+ [0][1][8][27] = 42,
+ [0][1][11][27] = 12,
+ [0][1][2][29] = 36,
+ [0][1][1][29] = 12,
+ [0][1][3][29] = 32,
+ [0][1][5][29] = 127,
+ [0][1][6][29] = 12,
+ [0][1][9][29] = 127,
+ [0][1][8][29] = 42,
+ [0][1][11][29] = 12,
+ [0][1][2][31] = 36,
+ [0][1][1][31] = 12,
+ [0][1][3][31] = 32,
+ [0][1][5][31] = 36,
+ [0][1][6][31] = 12,
+ [0][1][9][31] = 12,
+ [0][1][8][31] = 42,
+ [0][1][11][31] = 12,
+ [0][1][2][33] = 36,
+ [0][1][1][33] = 12,
+ [0][1][3][33] = 32,
+ [0][1][5][33] = 36,
+ [0][1][6][33] = 12,
+ [0][1][9][33] = 12,
+ [0][1][8][33] = 42,
+ [0][1][11][33] = 12,
+ [0][1][2][35] = 36,
+ [0][1][1][35] = 12,
+ [0][1][3][35] = 32,
+ [0][1][5][35] = 36,
+ [0][1][6][35] = 12,
+ [0][1][9][35] = 12,
+ [0][1][8][35] = 42,
+ [0][1][11][35] = 12,
+ [0][1][2][37] = 36,
+ [0][1][1][37] = 127,
+ [0][1][3][37] = 32,
+ [0][1][5][37] = 36,
+ [0][1][6][37] = 12,
+ [0][1][9][37] = 36,
+ [0][1][8][37] = 42,
+ [0][1][11][37] = 127,
+ [0][1][2][38] = 72,
+ [0][1][1][38] = 16,
+ [0][1][3][38] = 127,
+ [0][1][5][38] = 72,
+ [0][1][6][38] = 16,
+ [0][1][9][38] = 76,
+ [0][1][8][38] = 42,
+ [0][1][11][38] = 16,
+ [0][1][2][40] = 76,
+ [0][1][1][40] = 16,
+ [0][1][3][40] = 127,
+ [0][1][5][40] = 76,
+ [0][1][6][40] = 16,
+ [0][1][9][40] = 76,
+ [0][1][8][40] = 42,
+ [0][1][11][40] = 16,
+ [0][1][2][42] = 76,
+ [0][1][1][42] = 16,
+ [0][1][3][42] = 127,
+ [0][1][5][42] = 76,
+ [0][1][6][42] = 16,
+ [0][1][9][42] = 76,
+ [0][1][8][42] = 42,
+ [0][1][11][42] = 16,
+ [0][1][2][44] = 76,
+ [0][1][1][44] = 16,
+ [0][1][3][44] = 127,
+ [0][1][5][44] = 76,
+ [0][1][6][44] = 16,
+ [0][1][9][44] = 76,
+ [0][1][8][44] = 42,
+ [0][1][11][44] = 16,
+ [0][1][2][46] = 76,
+ [0][1][1][46] = 16,
+ [0][1][3][46] = 127,
+ [0][1][5][46] = 76,
+ [0][1][6][46] = 16,
+ [0][1][9][46] = 76,
+ [0][1][8][46] = 42,
+ [0][1][11][46] = 16,
+ [1][0][2][0] = 62,
+ [1][0][1][0] = 36,
+ [1][0][3][0] = 36,
+ [1][0][5][0] = 34,
+ [1][0][6][0] = 36,
+ [1][0][9][0] = 36,
+ [1][0][8][0] = 30,
+ [1][0][11][0] = 36,
+ [1][0][2][2] = 62,
+ [1][0][1][2] = 36,
+ [1][0][3][2] = 36,
+ [1][0][5][2] = 34,
+ [1][0][6][2] = 36,
+ [1][0][9][2] = 36,
+ [1][0][8][2] = 30,
+ [1][0][11][2] = 36,
+ [1][0][2][4] = 62,
+ [1][0][1][4] = 36,
+ [1][0][3][4] = 36,
+ [1][0][5][4] = 34,
+ [1][0][6][4] = 36,
+ [1][0][9][4] = 36,
+ [1][0][8][4] = 30,
+ [1][0][11][4] = 36,
+ [1][0][2][6] = 62,
+ [1][0][1][6] = 36,
+ [1][0][3][6] = 36,
+ [1][0][5][6] = 34,
+ [1][0][6][6] = 36,
+ [1][0][9][6] = 36,
+ [1][0][8][6] = 30,
+ [1][0][11][6] = 36,
+ [1][0][2][8] = 62,
+ [1][0][1][8] = 36,
+ [1][0][3][8] = 36,
+ [1][0][5][8] = 62,
+ [1][0][6][8] = 36,
+ [1][0][9][8] = 36,
+ [1][0][8][8] = 54,
+ [1][0][11][8] = 36,
+ [1][0][2][10] = 62,
+ [1][0][1][10] = 36,
+ [1][0][3][10] = 36,
+ [1][0][5][10] = 62,
+ [1][0][6][10] = 36,
+ [1][0][9][10] = 36,
+ [1][0][8][10] = 54,
+ [1][0][11][10] = 36,
+ [1][0][2][12] = 62,
+ [1][0][1][12] = 36,
+ [1][0][3][12] = 36,
+ [1][0][5][12] = 62,
+ [1][0][6][12] = 36,
+ [1][0][9][12] = 36,
+ [1][0][8][12] = 54,
+ [1][0][11][12] = 36,
+ [1][0][2][14] = 62,
+ [1][0][1][14] = 36,
+ [1][0][3][14] = 36,
+ [1][0][5][14] = 62,
+ [1][0][6][14] = 36,
+ [1][0][9][14] = 36,
+ [1][0][8][14] = 54,
+ [1][0][11][14] = 36,
+ [1][0][2][15] = 62,
+ [1][0][1][15] = 36,
+ [1][0][3][15] = 58,
+ [1][0][5][15] = 62,
+ [1][0][6][15] = 36,
+ [1][0][9][15] = 36,
+ [1][0][8][15] = 54,
+ [1][0][11][15] = 36,
+ [1][0][2][17] = 62,
+ [1][0][1][17] = 36,
+ [1][0][3][17] = 58,
+ [1][0][5][17] = 62,
+ [1][0][6][17] = 36,
+ [1][0][9][17] = 36,
+ [1][0][8][17] = 54,
+ [1][0][11][17] = 36,
+ [1][0][2][19] = 62,
+ [1][0][1][19] = 36,
+ [1][0][3][19] = 58,
+ [1][0][5][19] = 62,
+ [1][0][6][19] = 36,
+ [1][0][9][19] = 36,
+ [1][0][8][19] = 54,
+ [1][0][11][19] = 36,
+ [1][0][2][21] = 62,
+ [1][0][1][21] = 36,
+ [1][0][3][21] = 58,
+ [1][0][5][21] = 62,
+ [1][0][6][21] = 36,
+ [1][0][9][21] = 36,
+ [1][0][8][21] = 54,
+ [1][0][11][21] = 36,
+ [1][0][2][23] = 62,
+ [1][0][1][23] = 36,
+ [1][0][3][23] = 58,
+ [1][0][5][23] = 62,
+ [1][0][6][23] = 36,
+ [1][0][9][23] = 36,
+ [1][0][8][23] = 54,
+ [1][0][11][23] = 36,
+ [1][0][2][25] = 62,
+ [1][0][1][25] = 36,
+ [1][0][3][25] = 58,
+ [1][0][5][25] = 127,
+ [1][0][6][25] = 36,
+ [1][0][9][25] = 127,
+ [1][0][8][25] = 54,
+ [1][0][11][25] = 36,
+ [1][0][2][27] = 62,
+ [1][0][1][27] = 36,
+ [1][0][3][27] = 58,
+ [1][0][5][27] = 127,
+ [1][0][6][27] = 36,
+ [1][0][9][27] = 127,
+ [1][0][8][27] = 54,
+ [1][0][11][27] = 36,
+ [1][0][2][29] = 62,
+ [1][0][1][29] = 36,
+ [1][0][3][29] = 58,
+ [1][0][5][29] = 127,
+ [1][0][6][29] = 36,
+ [1][0][9][29] = 127,
+ [1][0][8][29] = 54,
+ [1][0][11][29] = 36,
+ [1][0][2][31] = 62,
+ [1][0][1][31] = 36,
+ [1][0][3][31] = 58,
+ [1][0][5][31] = 62,
+ [1][0][6][31] = 36,
+ [1][0][9][31] = 36,
+ [1][0][8][31] = 54,
+ [1][0][11][31] = 36,
+ [1][0][2][33] = 62,
+ [1][0][1][33] = 36,
+ [1][0][3][33] = 58,
+ [1][0][5][33] = 62,
+ [1][0][6][33] = 36,
+ [1][0][9][33] = 36,
+ [1][0][8][33] = 54,
+ [1][0][11][33] = 36,
+ [1][0][2][35] = 62,
+ [1][0][1][35] = 36,
+ [1][0][3][35] = 58,
+ [1][0][5][35] = 62,
+ [1][0][6][35] = 36,
+ [1][0][9][35] = 36,
+ [1][0][8][35] = 54,
+ [1][0][11][35] = 36,
+ [1][0][2][37] = 56,
+ [1][0][1][37] = 62,
+ [1][0][3][37] = 127,
+ [1][0][5][37] = 58,
+ [1][0][6][37] = 62,
+ [1][0][9][37] = 36,
+ [1][0][8][37] = 62,
+ [1][0][11][37] = 54,
+ [1][0][2][38] = 76,
+ [1][0][1][38] = 28,
+ [1][0][3][38] = 127,
+ [1][0][5][38] = 76,
+ [1][0][6][38] = 28,
+ [1][0][9][38] = 76,
+ [1][0][8][38] = 54,
+ [1][0][11][38] = 28,
+ [1][0][2][40] = 76,
+ [1][0][1][40] = 28,
+ [1][0][3][40] = 127,
+ [1][0][5][40] = 76,
+ [1][0][6][40] = 28,
+ [1][0][9][40] = 76,
+ [1][0][8][40] = 54,
+ [1][0][11][40] = 28,
+ [1][0][2][42] = 76,
+ [1][0][1][42] = 28,
+ [1][0][3][42] = 127,
+ [1][0][5][42] = 76,
+ [1][0][6][42] = 28,
+ [1][0][9][42] = 76,
+ [1][0][8][42] = 54,
+ [1][0][11][42] = 28,
+ [1][0][2][44] = 76,
+ [1][0][1][44] = 28,
+ [1][0][3][44] = 127,
+ [1][0][5][44] = 76,
+ [1][0][6][44] = 28,
+ [1][0][9][44] = 76,
+ [1][0][8][44] = 54,
+ [1][0][11][44] = 28,
+ [1][0][2][46] = 76,
+ [1][0][1][46] = 28,
+ [1][0][3][46] = 127,
+ [1][0][5][46] = 76,
+ [1][0][6][46] = 28,
+ [1][0][9][46] = 76,
+ [1][0][8][46] = 54,
+ [1][0][11][46] = 28,
+ [1][1][2][0] = 46,
+ [1][1][1][0] = 22,
+ [1][1][3][0] = 24,
+ [1][1][5][0] = 18,
+ [1][1][6][0] = 22,
+ [1][1][9][0] = 22,
+ [1][1][8][0] = 18,
+ [1][1][11][0] = 22,
+ [1][1][2][2] = 46,
+ [1][1][1][2] = 22,
+ [1][1][3][2] = 24,
+ [1][1][5][2] = 18,
+ [1][1][6][2] = 22,
+ [1][1][9][2] = 22,
+ [1][1][8][2] = 18,
+ [1][1][11][2] = 22,
+ [1][1][2][4] = 46,
+ [1][1][1][4] = 22,
+ [1][1][3][4] = 24,
+ [1][1][5][4] = 18,
+ [1][1][6][4] = 22,
+ [1][1][9][4] = 22,
+ [1][1][8][4] = 18,
+ [1][1][11][4] = 22,
+ [1][1][2][6] = 46,
+ [1][1][1][6] = 22,
+ [1][1][3][6] = 24,
+ [1][1][5][6] = 18,
+ [1][1][6][6] = 22,
+ [1][1][9][6] = 22,
+ [1][1][8][6] = 18,
+ [1][1][11][6] = 22,
+ [1][1][2][8] = 46,
+ [1][1][1][8] = 22,
+ [1][1][3][8] = 24,
+ [1][1][5][8] = 46,
+ [1][1][6][8] = 22,
+ [1][1][9][8] = 22,
+ [1][1][8][8] = 42,
+ [1][1][11][8] = 22,
+ [1][1][2][10] = 46,
+ [1][1][1][10] = 22,
+ [1][1][3][10] = 24,
+ [1][1][5][10] = 46,
+ [1][1][6][10] = 22,
+ [1][1][9][10] = 22,
+ [1][1][8][10] = 42,
+ [1][1][11][10] = 22,
+ [1][1][2][12] = 46,
+ [1][1][1][12] = 22,
+ [1][1][3][12] = 24,
+ [1][1][5][12] = 46,
+ [1][1][6][12] = 22,
+ [1][1][9][12] = 22,
+ [1][1][8][12] = 42,
+ [1][1][11][12] = 22,
+ [1][1][2][14] = 46,
+ [1][1][1][14] = 22,
+ [1][1][3][14] = 24,
+ [1][1][5][14] = 46,
+ [1][1][6][14] = 22,
+ [1][1][9][14] = 22,
+ [1][1][8][14] = 42,
+ [1][1][11][14] = 22,
+ [1][1][2][15] = 46,
+ [1][1][1][15] = 22,
+ [1][1][3][15] = 46,
+ [1][1][5][15] = 46,
+ [1][1][6][15] = 22,
+ [1][1][9][15] = 22,
+ [1][1][8][15] = 42,
+ [1][1][11][15] = 22,
+ [1][1][2][17] = 46,
+ [1][1][1][17] = 22,
+ [1][1][3][17] = 46,
+ [1][1][5][17] = 46,
+ [1][1][6][17] = 22,
+ [1][1][9][17] = 22,
+ [1][1][8][17] = 42,
+ [1][1][11][17] = 22,
+ [1][1][2][19] = 46,
+ [1][1][1][19] = 22,
+ [1][1][3][19] = 46,
+ [1][1][5][19] = 46,
+ [1][1][6][19] = 22,
+ [1][1][9][19] = 22,
+ [1][1][8][19] = 42,
+ [1][1][11][19] = 22,
+ [1][1][2][21] = 46,
+ [1][1][1][21] = 22,
+ [1][1][3][21] = 46,
+ [1][1][5][21] = 46,
+ [1][1][6][21] = 22,
+ [1][1][9][21] = 22,
+ [1][1][8][21] = 42,
+ [1][1][11][21] = 22,
+ [1][1][2][23] = 46,
+ [1][1][1][23] = 22,
+ [1][1][3][23] = 46,
+ [1][1][5][23] = 46,
+ [1][1][6][23] = 22,
+ [1][1][9][23] = 22,
+ [1][1][8][23] = 42,
+ [1][1][11][23] = 22,
+ [1][1][2][25] = 46,
+ [1][1][1][25] = 22,
+ [1][1][3][25] = 46,
+ [1][1][5][25] = 127,
+ [1][1][6][25] = 22,
+ [1][1][9][25] = 127,
+ [1][1][8][25] = 42,
+ [1][1][11][25] = 22,
+ [1][1][2][27] = 46,
+ [1][1][1][27] = 22,
+ [1][1][3][27] = 46,
+ [1][1][5][27] = 127,
+ [1][1][6][27] = 22,
+ [1][1][9][27] = 127,
+ [1][1][8][27] = 42,
+ [1][1][11][27] = 22,
+ [1][1][2][29] = 46,
+ [1][1][1][29] = 22,
+ [1][1][3][29] = 46,
+ [1][1][5][29] = 127,
+ [1][1][6][29] = 22,
+ [1][1][9][29] = 127,
+ [1][1][8][29] = 42,
+ [1][1][11][29] = 22,
+ [1][1][2][31] = 46,
+ [1][1][1][31] = 22,
+ [1][1][3][31] = 46,
+ [1][1][5][31] = 46,
+ [1][1][6][31] = 22,
+ [1][1][9][31] = 22,
+ [1][1][8][31] = 42,
+ [1][1][11][31] = 22,
+ [1][1][2][33] = 46,
+ [1][1][1][33] = 22,
+ [1][1][3][33] = 46,
+ [1][1][5][33] = 46,
+ [1][1][6][33] = 22,
+ [1][1][9][33] = 22,
+ [1][1][8][33] = 42,
+ [1][1][11][33] = 22,
+ [1][1][2][35] = 46,
+ [1][1][1][35] = 22,
+ [1][1][3][35] = 46,
+ [1][1][5][35] = 46,
+ [1][1][6][35] = 22,
+ [1][1][9][35] = 22,
+ [1][1][8][35] = 42,
+ [1][1][11][35] = 22,
+ [1][1][2][37] = 46,
+ [1][1][1][37] = 127,
+ [1][1][3][37] = 46,
+ [1][1][5][37] = 46,
+ [1][1][6][37] = 22,
+ [1][1][9][37] = 50,
+ [1][1][8][37] = 42,
+ [1][1][11][37] = 127,
+ [1][1][2][38] = 74,
+ [1][1][1][38] = 16,
+ [1][1][3][38] = 127,
+ [1][1][5][38] = 74,
+ [1][1][6][38] = 16,
+ [1][1][9][38] = 76,
+ [1][1][8][38] = 42,
+ [1][1][11][38] = 16,
+ [1][1][2][40] = 76,
+ [1][1][1][40] = 16,
+ [1][1][3][40] = 127,
+ [1][1][5][40] = 76,
+ [1][1][6][40] = 16,
+ [1][1][9][40] = 76,
+ [1][1][8][40] = 42,
+ [1][1][11][40] = 16,
+ [1][1][2][42] = 76,
+ [1][1][1][42] = 16,
+ [1][1][3][42] = 127,
+ [1][1][5][42] = 76,
+ [1][1][6][42] = 16,
+ [1][1][9][42] = 76,
+ [1][1][8][42] = 42,
+ [1][1][11][42] = 16,
+ [1][1][2][44] = 76,
+ [1][1][1][44] = 16,
+ [1][1][3][44] = 127,
+ [1][1][5][44] = 76,
+ [1][1][6][44] = 16,
+ [1][1][9][44] = 76,
+ [1][1][8][44] = 42,
+ [1][1][11][44] = 16,
+ [1][1][2][46] = 76,
+ [1][1][1][46] = 16,
+ [1][1][3][46] = 127,
+ [1][1][5][46] = 76,
+ [1][1][6][46] = 16,
+ [1][1][9][46] = 76,
+ [1][1][8][46] = 42,
+ [1][1][11][46] = 16,
+ [2][0][2][0] = 74,
+ [2][0][1][0] = 46,
+ [2][0][3][0] = 50,
+ [2][0][5][0] = 46,
+ [2][0][6][0] = 46,
+ [2][0][9][0] = 46,
+ [2][0][8][0] = 30,
+ [2][0][11][0] = 46,
+ [2][0][2][2] = 74,
+ [2][0][1][2] = 46,
+ [2][0][3][2] = 50,
+ [2][0][5][2] = 46,
+ [2][0][6][2] = 46,
+ [2][0][9][2] = 46,
+ [2][0][8][2] = 30,
+ [2][0][11][2] = 46,
+ [2][0][2][4] = 74,
+ [2][0][1][4] = 46,
+ [2][0][3][4] = 50,
+ [2][0][5][4] = 46,
+ [2][0][6][4] = 46,
+ [2][0][9][4] = 46,
+ [2][0][8][4] = 30,
+ [2][0][11][4] = 46,
+ [2][0][2][6] = 74,
+ [2][0][1][6] = 46,
+ [2][0][3][6] = 50,
+ [2][0][5][6] = 46,
+ [2][0][6][6] = 46,
+ [2][0][9][6] = 46,
+ [2][0][8][6] = 30,
+ [2][0][11][6] = 46,
+ [2][0][2][8] = 74,
+ [2][0][1][8] = 46,
+ [2][0][3][8] = 50,
+ [2][0][5][8] = 66,
+ [2][0][6][8] = 46,
+ [2][0][9][8] = 46,
+ [2][0][8][8] = 54,
+ [2][0][11][8] = 46,
+ [2][0][2][10] = 74,
+ [2][0][1][10] = 46,
+ [2][0][3][10] = 50,
+ [2][0][5][10] = 66,
+ [2][0][6][10] = 46,
+ [2][0][9][10] = 46,
+ [2][0][8][10] = 54,
+ [2][0][11][10] = 46,
+ [2][0][2][12] = 74,
+ [2][0][1][12] = 46,
+ [2][0][3][12] = 50,
+ [2][0][5][12] = 66,
+ [2][0][6][12] = 46,
+ [2][0][9][12] = 46,
+ [2][0][8][12] = 54,
+ [2][0][11][12] = 46,
+ [2][0][2][14] = 74,
+ [2][0][1][14] = 46,
+ [2][0][3][14] = 50,
+ [2][0][5][14] = 66,
+ [2][0][6][14] = 46,
+ [2][0][9][14] = 46,
+ [2][0][8][14] = 54,
+ [2][0][11][14] = 46,
+ [2][0][2][15] = 74,
+ [2][0][1][15] = 46,
+ [2][0][3][15] = 70,
+ [2][0][5][15] = 74,
+ [2][0][6][15] = 46,
+ [2][0][9][15] = 46,
+ [2][0][8][15] = 54,
+ [2][0][11][15] = 46,
+ [2][0][2][17] = 74,
+ [2][0][1][17] = 46,
+ [2][0][3][17] = 70,
+ [2][0][5][17] = 74,
+ [2][0][6][17] = 46,
+ [2][0][9][17] = 46,
+ [2][0][8][17] = 54,
+ [2][0][11][17] = 46,
+ [2][0][2][19] = 74,
+ [2][0][1][19] = 46,
+ [2][0][3][19] = 70,
+ [2][0][5][19] = 74,
+ [2][0][6][19] = 46,
+ [2][0][9][19] = 46,
+ [2][0][8][19] = 54,
+ [2][0][11][19] = 46,
+ [2][0][2][21] = 74,
+ [2][0][1][21] = 46,
+ [2][0][3][21] = 70,
+ [2][0][5][21] = 74,
+ [2][0][6][21] = 46,
+ [2][0][9][21] = 46,
+ [2][0][8][21] = 54,
+ [2][0][11][21] = 46,
+ [2][0][2][23] = 74,
+ [2][0][1][23] = 46,
+ [2][0][3][23] = 70,
+ [2][0][5][23] = 74,
+ [2][0][6][23] = 46,
+ [2][0][9][23] = 46,
+ [2][0][8][23] = 54,
+ [2][0][11][23] = 46,
+ [2][0][2][25] = 74,
+ [2][0][1][25] = 46,
+ [2][0][3][25] = 70,
+ [2][0][5][25] = 127,
+ [2][0][6][25] = 46,
+ [2][0][9][25] = 127,
+ [2][0][8][25] = 54,
+ [2][0][11][25] = 46,
+ [2][0][2][27] = 74,
+ [2][0][1][27] = 46,
+ [2][0][3][27] = 70,
+ [2][0][5][27] = 127,
+ [2][0][6][27] = 46,
+ [2][0][9][27] = 127,
+ [2][0][8][27] = 54,
+ [2][0][11][27] = 46,
+ [2][0][2][29] = 74,
+ [2][0][1][29] = 46,
+ [2][0][3][29] = 70,
+ [2][0][5][29] = 127,
+ [2][0][6][29] = 46,
+ [2][0][9][29] = 127,
+ [2][0][8][29] = 54,
+ [2][0][11][29] = 46,
+ [2][0][2][31] = 74,
+ [2][0][1][31] = 46,
+ [2][0][3][31] = 70,
+ [2][0][5][31] = 74,
+ [2][0][6][31] = 46,
+ [2][0][9][31] = 46,
+ [2][0][8][31] = 54,
+ [2][0][11][31] = 46,
+ [2][0][2][33] = 74,
+ [2][0][1][33] = 46,
+ [2][0][3][33] = 70,
+ [2][0][5][33] = 74,
+ [2][0][6][33] = 46,
+ [2][0][9][33] = 46,
+ [2][0][8][33] = 54,
+ [2][0][11][33] = 46,
+ [2][0][2][35] = 74,
+ [2][0][1][35] = 46,
+ [2][0][3][35] = 70,
+ [2][0][5][35] = 74,
+ [2][0][6][35] = 46,
+ [2][0][9][35] = 46,
+ [2][0][8][35] = 54,
+ [2][0][11][35] = 46,
+ [2][0][2][37] = 74,
+ [2][0][1][37] = 127,
+ [2][0][3][37] = 70,
+ [2][0][5][37] = 74,
+ [2][0][6][37] = 46,
+ [2][0][9][37] = 74,
+ [2][0][8][37] = 54,
+ [2][0][11][37] = 127,
+ [2][0][2][38] = 76,
+ [2][0][1][38] = 28,
+ [2][0][3][38] = 127,
+ [2][0][5][38] = 76,
+ [2][0][6][38] = 28,
+ [2][0][9][38] = 76,
+ [2][0][8][38] = 54,
+ [2][0][11][38] = 28,
+ [2][0][2][40] = 76,
+ [2][0][1][40] = 28,
+ [2][0][3][40] = 127,
+ [2][0][5][40] = 76,
+ [2][0][6][40] = 28,
+ [2][0][9][40] = 76,
+ [2][0][8][40] = 54,
+ [2][0][11][40] = 28,
+ [2][0][2][42] = 76,
+ [2][0][1][42] = 28,
+ [2][0][3][42] = 127,
+ [2][0][5][42] = 76,
+ [2][0][6][42] = 28,
+ [2][0][9][42] = 76,
+ [2][0][8][42] = 54,
+ [2][0][11][42] = 28,
+ [2][0][2][44] = 76,
+ [2][0][1][44] = 28,
+ [2][0][3][44] = 127,
+ [2][0][5][44] = 76,
+ [2][0][6][44] = 28,
+ [2][0][9][44] = 76,
+ [2][0][8][44] = 54,
+ [2][0][11][44] = 28,
+ [2][0][2][46] = 76,
+ [2][0][1][46] = 28,
+ [2][0][3][46] = 127,
+ [2][0][5][46] = 76,
+ [2][0][6][46] = 28,
+ [2][0][9][46] = 76,
+ [2][0][8][46] = 54,
+ [2][0][11][46] = 28,
+ [2][1][2][0] = 58,
+ [2][1][1][0] = 32,
+ [2][1][3][0] = 38,
+ [2][1][5][0] = 30,
+ [2][1][6][0] = 32,
+ [2][1][9][0] = 32,
+ [2][1][8][0] = 18,
+ [2][1][11][0] = 32,
+ [2][1][2][2] = 58,
+ [2][1][1][2] = 32,
+ [2][1][3][2] = 38,
+ [2][1][5][2] = 30,
+ [2][1][6][2] = 32,
+ [2][1][9][2] = 32,
+ [2][1][8][2] = 18,
+ [2][1][11][2] = 32,
+ [2][1][2][4] = 58,
+ [2][1][1][4] = 32,
+ [2][1][3][4] = 38,
+ [2][1][5][4] = 30,
+ [2][1][6][4] = 32,
+ [2][1][9][4] = 32,
+ [2][1][8][4] = 18,
+ [2][1][11][4] = 32,
+ [2][1][2][6] = 58,
+ [2][1][1][6] = 32,
+ [2][1][3][6] = 38,
+ [2][1][5][6] = 30,
+ [2][1][6][6] = 32,
+ [2][1][9][6] = 32,
+ [2][1][8][6] = 18,
+ [2][1][11][6] = 32,
+ [2][1][2][8] = 58,
+ [2][1][1][8] = 32,
+ [2][1][3][8] = 38,
+ [2][1][5][8] = 52,
+ [2][1][6][8] = 32,
+ [2][1][9][8] = 32,
+ [2][1][8][8] = 42,
+ [2][1][11][8] = 32,
+ [2][1][2][10] = 58,
+ [2][1][1][10] = 32,
+ [2][1][3][10] = 38,
+ [2][1][5][10] = 52,
+ [2][1][6][10] = 32,
+ [2][1][9][10] = 32,
+ [2][1][8][10] = 42,
+ [2][1][11][10] = 32,
+ [2][1][2][12] = 58,
+ [2][1][1][12] = 32,
+ [2][1][3][12] = 38,
+ [2][1][5][12] = 52,
+ [2][1][6][12] = 32,
+ [2][1][9][12] = 32,
+ [2][1][8][12] = 42,
+ [2][1][11][12] = 32,
+ [2][1][2][14] = 58,
+ [2][1][1][14] = 32,
+ [2][1][3][14] = 38,
+ [2][1][5][14] = 52,
+ [2][1][6][14] = 32,
+ [2][1][9][14] = 32,
+ [2][1][8][14] = 42,
+ [2][1][11][14] = 32,
+ [2][1][2][15] = 58,
+ [2][1][1][15] = 32,
+ [2][1][3][15] = 58,
+ [2][1][5][15] = 58,
+ [2][1][6][15] = 32,
+ [2][1][9][15] = 32,
+ [2][1][8][15] = 42,
+ [2][1][11][15] = 32,
+ [2][1][2][17] = 58,
+ [2][1][1][17] = 32,
+ [2][1][3][17] = 58,
+ [2][1][5][17] = 58,
+ [2][1][6][17] = 32,
+ [2][1][9][17] = 32,
+ [2][1][8][17] = 42,
+ [2][1][11][17] = 32,
+ [2][1][2][19] = 58,
+ [2][1][1][19] = 32,
+ [2][1][3][19] = 58,
+ [2][1][5][19] = 58,
+ [2][1][6][19] = 32,
+ [2][1][9][19] = 32,
+ [2][1][8][19] = 42,
+ [2][1][11][19] = 32,
+ [2][1][2][21] = 58,
+ [2][1][1][21] = 32,
+ [2][1][3][21] = 58,
+ [2][1][5][21] = 58,
+ [2][1][6][21] = 32,
+ [2][1][9][21] = 32,
+ [2][1][8][21] = 42,
+ [2][1][11][21] = 32,
+ [2][1][2][23] = 58,
+ [2][1][1][23] = 32,
+ [2][1][3][23] = 58,
+ [2][1][5][23] = 58,
+ [2][1][6][23] = 32,
+ [2][1][9][23] = 32,
+ [2][1][8][23] = 42,
+ [2][1][11][23] = 32,
+ [2][1][2][25] = 58,
+ [2][1][1][25] = 32,
+ [2][1][3][25] = 58,
+ [2][1][5][25] = 127,
+ [2][1][6][25] = 32,
+ [2][1][9][25] = 127,
+ [2][1][8][25] = 42,
+ [2][1][11][25] = 32,
+ [2][1][2][27] = 58,
+ [2][1][1][27] = 32,
+ [2][1][3][27] = 58,
+ [2][1][5][27] = 127,
+ [2][1][6][27] = 32,
+ [2][1][9][27] = 127,
+ [2][1][8][27] = 42,
+ [2][1][11][27] = 32,
+ [2][1][2][29] = 58,
+ [2][1][1][29] = 32,
+ [2][1][3][29] = 58,
+ [2][1][5][29] = 127,
+ [2][1][6][29] = 32,
+ [2][1][9][29] = 127,
+ [2][1][8][29] = 42,
+ [2][1][11][29] = 32,
+ [2][1][2][31] = 58,
+ [2][1][1][31] = 32,
+ [2][1][3][31] = 58,
+ [2][1][5][31] = 58,
+ [2][1][6][31] = 32,
+ [2][1][9][31] = 32,
+ [2][1][8][31] = 42,
+ [2][1][11][31] = 32,
+ [2][1][2][33] = 58,
+ [2][1][1][33] = 32,
+ [2][1][3][33] = 58,
+ [2][1][5][33] = 58,
+ [2][1][6][33] = 32,
+ [2][1][9][33] = 32,
+ [2][1][8][33] = 42,
+ [2][1][11][33] = 32,
+ [2][1][2][35] = 58,
+ [2][1][1][35] = 32,
+ [2][1][3][35] = 58,
+ [2][1][5][35] = 58,
+ [2][1][6][35] = 32,
+ [2][1][9][35] = 32,
+ [2][1][8][35] = 42,
+ [2][1][11][35] = 32,
+ [2][1][2][37] = 58,
+ [2][1][1][37] = 127,
+ [2][1][3][37] = 58,
+ [2][1][5][37] = 58,
+ [2][1][6][37] = 32,
+ [2][1][9][37] = 62,
+ [2][1][8][37] = 42,
+ [2][1][11][37] = 127,
+ [2][1][2][38] = 76,
+ [2][1][1][38] = 16,
+ [2][1][3][38] = 127,
+ [2][1][5][38] = 76,
+ [2][1][6][38] = 16,
+ [2][1][9][38] = 76,
+ [2][1][8][38] = 42,
+ [2][1][11][38] = 16,
+ [2][1][2][40] = 76,
+ [2][1][1][40] = 16,
+ [2][1][3][40] = 127,
+ [2][1][5][40] = 76,
+ [2][1][6][40] = 16,
+ [2][1][9][40] = 76,
+ [2][1][8][40] = 42,
+ [2][1][11][40] = 16,
+ [2][1][2][42] = 76,
+ [2][1][1][42] = 16,
+ [2][1][3][42] = 127,
+ [2][1][5][42] = 76,
+ [2][1][6][42] = 16,
+ [2][1][9][42] = 76,
+ [2][1][8][42] = 42,
+ [2][1][11][42] = 16,
+ [2][1][2][44] = 76,
+ [2][1][1][44] = 16,
+ [2][1][3][44] = 127,
+ [2][1][5][44] = 76,
+ [2][1][6][44] = 16,
+ [2][1][9][44] = 76,
+ [2][1][8][44] = 42,
+ [2][1][11][44] = 16,
+ [2][1][2][46] = 76,
+ [2][1][1][46] = 16,
+ [2][1][3][46] = 127,
+ [2][1][5][46] = 76,
+ [2][1][6][46] = 16,
+ [2][1][9][46] = 76,
+ [2][1][8][46] = 42,
+ [2][1][11][46] = 16,
+};
+
+#define DECLARE_DIG_TABLE(name) \
+static const struct rtw89_phy_dig_gain_cfg name##_table = { \
+ .table = name, \
+ .size = ARRAY_SIZE(name) \
+}
+
+static const struct rtw89_reg_def rtw89_8852a_lna_gain_g[] = {
+ {R_PATH0_LNA_ERR1, B_PATH0_LNA_ERR_G0_G_MSK},
+ {R_PATH0_LNA_ERR2, B_PATH0_LNA_ERR_G1_G_MSK},
+ {R_PATH0_LNA_ERR2, B_PATH0_LNA_ERR_G2_G_MSK},
+ {R_PATH0_LNA_ERR3, B_PATH0_LNA_ERR_G3_G_MSK},
+ {R_PATH0_LNA_ERR3, B_PATH0_LNA_ERR_G4_G_MSK},
+ {R_PATH0_LNA_ERR4, B_PATH0_LNA_ERR_G5_G_MSK},
+ {R_PATH0_LNA_ERR5, B_PATH0_LNA_ERR_G6_G_MSK},
+};
+
+DECLARE_DIG_TABLE(rtw89_8852a_lna_gain_g);
+
+static const struct rtw89_reg_def rtw89_8852a_tia_gain_g[] = {
+ {R_PATH0_TIA_ERR_G0, B_PATH0_TIA_ERR_G0_G_MSK},
+ {R_PATH0_TIA_ERR_G1, B_PATH0_TIA_ERR_G1_G_MSK},
+};
+
+DECLARE_DIG_TABLE(rtw89_8852a_tia_gain_g);
+
+static const struct rtw89_reg_def rtw89_8852a_lna_gain_a[] = {
+ {R_PATH0_LNA_ERR1, B_PATH0_LNA_ERR_G0_A_MSK},
+ {R_PATH0_LNA_ERR1, B_PATH0_LNA_ERR_G1_A_MSK},
+ {R_PATH0_LNA_ERR2, B_PATH0_LNA_ERR_G2_A_MSK},
+ {R_PATH0_LNA_ERR3, B_PATH0_LNA_ERR_G3_A_MSK},
+ {R_PATH0_LNA_ERR3, B_PATH0_LNA_ERR_G4_A_MSK},
+ {R_PATH0_LNA_ERR4, B_PATH0_LNA_ERR_G5_A_MSK},
+ {R_PATH0_LNA_ERR4, B_PATH0_LNA_ERR_G6_A_MSK},
+};
+
+DECLARE_DIG_TABLE(rtw89_8852a_lna_gain_a);
+
+static const struct rtw89_reg_def rtw89_8852a_tia_gain_a[] = {
+ {R_PATH0_TIA_ERR_G0, B_PATH0_TIA_ERR_G0_A_MSK},
+ {R_PATH0_TIA_ERR_G1, B_PATH0_TIA_ERR_G1_A_MSK},
+};
+
+DECLARE_DIG_TABLE(rtw89_8852a_tia_gain_a);
+
+const struct rtw89_phy_table rtw89_8852a_phy_bb_table = {
+ .regs = rtw89_8852a_phy_bb_regs,
+ .n_regs = ARRAY_SIZE(rtw89_8852a_phy_bb_regs),
+ .rf_path = 0, /* don't care */
+};
+
+const struct rtw89_phy_table rtw89_8852a_phy_radioa_table = {
+ .regs = rtw89_8852a_phy_radioa_regs,
+ .n_regs = ARRAY_SIZE(rtw89_8852a_phy_radioa_regs),
+ .rf_path = RF_PATH_A,
+};
+
+const struct rtw89_phy_table rtw89_8852a_phy_radiob_table = {
+ .regs = rtw89_8852a_phy_radiob_regs,
+ .n_regs = ARRAY_SIZE(rtw89_8852a_phy_radiob_regs),
+ .rf_path = RF_PATH_B,
+};
+
+const struct rtw89_phy_table rtw89_8852a_phy_nctl_table = {
+ .regs = rtw89_8852a_phy_nctl_regs,
+ .n_regs = ARRAY_SIZE(rtw89_8852a_phy_nctl_regs),
+ .rf_path = 0, /* don't care */
+};
+
+const struct rtw89_txpwr_table rtw89_8852a_byr_table = {
+ .data = rtw89_8852a_txpwr_byrate,
+ .size = ARRAY_SIZE(rtw89_8852a_txpwr_byrate),
+ .load = rtw89_phy_load_txpwr_byrate,
+};
+
+const struct rtw89_txpwr_track_cfg rtw89_8852a_trk_cfg = {
+ .delta_swingidx_5gb_n = _txpwr_track_delta_swingidx_5gb_n,
+ .delta_swingidx_5gb_p = _txpwr_track_delta_swingidx_5gb_p,
+ .delta_swingidx_5ga_n = _txpwr_track_delta_swingidx_5ga_n,
+ .delta_swingidx_5ga_p = _txpwr_track_delta_swingidx_5ga_p,
+ .delta_swingidx_2gb_n = _txpwr_track_delta_swingidx_2gb_n,
+ .delta_swingidx_2gb_p = _txpwr_track_delta_swingidx_2gb_p,
+ .delta_swingidx_2ga_n = _txpwr_track_delta_swingidx_2ga_n,
+ .delta_swingidx_2ga_p = _txpwr_track_delta_swingidx_2ga_p,
+ .delta_swingidx_2g_cck_b_n = _txpwr_track_delta_swingidx_2g_cck_b_n,
+ .delta_swingidx_2g_cck_b_p = _txpwr_track_delta_swingidx_2g_cck_b_p,
+ .delta_swingidx_2g_cck_a_n = _txpwr_track_delta_swingidx_2g_cck_a_n,
+ .delta_swingidx_2g_cck_a_p = _txpwr_track_delta_swingidx_2g_cck_a_p,
+};
+
+const struct rtw89_phy_dig_gain_table rtw89_8852a_phy_dig_table = {
+ .cfg_lna_g = &rtw89_8852a_lna_gain_g_table,
+ .cfg_tia_g = &rtw89_8852a_tia_gain_g_table,
+ .cfg_lna_a = &rtw89_8852a_lna_gain_a_table,
+ .cfg_tia_a = &rtw89_8852a_tia_gain_a_table
+};
diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a_table.h b/drivers/net/wireless/realtek/rtw89/rtw8852a_table.h
new file mode 100644
index 000000000000..913796506286
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/rtw8852a_table.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2019-2020 Realtek Corporation
+ */
+
+#ifndef __RTW89_8852A_TABLE_H__
+#define __RTW89_8852A_TABLE_H__
+
+#include "core.h"
+
+extern const struct rtw89_phy_table rtw89_8852a_phy_bb_table;
+extern const struct rtw89_phy_table rtw89_8852a_phy_radioa_table;
+extern const struct rtw89_phy_table rtw89_8852a_phy_radiob_table;
+extern const struct rtw89_phy_table rtw89_8852a_phy_nctl_table;
+extern const struct rtw89_txpwr_table rtw89_8852a_byr_table;
+extern const struct rtw89_phy_dig_gain_table rtw89_8852a_phy_dig_table;
+extern const struct rtw89_txpwr_track_cfg rtw89_8852a_trk_cfg;
+extern const s8 rtw89_8852a_txpwr_lmt_2g[RTW89_2G_BW_NUM][RTW89_NTX_NUM]
+ [RTW89_RS_LMT_NUM][RTW89_BF_NUM]
+ [RTW89_REGD_NUM][RTW89_2G_CH_NUM];
+extern const s8 rtw89_8852a_txpwr_lmt_5g[RTW89_5G_BW_NUM][RTW89_NTX_NUM]
+ [RTW89_RS_LMT_NUM][RTW89_BF_NUM]
+ [RTW89_REGD_NUM][RTW89_5G_CH_NUM];
+extern const s8 rtw89_8852a_txpwr_lmt_ru_2g[RTW89_RU_NUM][RTW89_NTX_NUM]
+ [RTW89_REGD_NUM][RTW89_2G_CH_NUM];
+extern const s8 rtw89_8852a_txpwr_lmt_ru_5g[RTW89_RU_NUM][RTW89_NTX_NUM]
+ [RTW89_REGD_NUM][RTW89_5G_CH_NUM];
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw89/sar.c b/drivers/net/wireless/realtek/rtw89/sar.c
new file mode 100644
index 000000000000..097c87899cea
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/sar.c
@@ -0,0 +1,190 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2019-2020 Realtek Corporation
+ */
+
+#include "debug.h"
+#include "sar.h"
+
+static int rtw89_query_sar_config_common(struct rtw89_dev *rtwdev, s32 *cfg)
+{
+ struct rtw89_sar_cfg_common *rtwsar = &rtwdev->sar.cfg_common;
+ enum rtw89_subband subband = rtwdev->hal.current_subband;
+
+ if (!rtwsar->set[subband])
+ return -ENODATA;
+
+ *cfg = rtwsar->cfg[subband];
+ return 0;
+}
+
+static const
+struct rtw89_sar_handler rtw89_sar_handlers[RTW89_SAR_SOURCE_NR] = {
+ [RTW89_SAR_SOURCE_COMMON] = {
+ .descr_sar_source = "RTW89_SAR_SOURCE_COMMON",
+ .txpwr_factor_sar = 2,
+ .query_sar_config = rtw89_query_sar_config_common,
+ },
+};
+
+#define rtw89_sar_set_src(_dev, _src, _cfg_name, _cfg_data) \
+ do { \
+ typeof(_src) _s = (_src); \
+ typeof(_dev) _d = (_dev); \
+ BUILD_BUG_ON(!rtw89_sar_handlers[_s].descr_sar_source); \
+ BUILD_BUG_ON(!rtw89_sar_handlers[_s].query_sar_config); \
+ lockdep_assert_held(&_d->mutex); \
+ _d->sar._cfg_name = *(_cfg_data); \
+ _d->sar.src = _s; \
+ } while (0)
+
+static s8 rtw89_txpwr_sar_to_mac(struct rtw89_dev *rtwdev, u8 fct, s32 cfg)
+{
+ const u8 fct_mac = rtwdev->chip->txpwr_factor_mac;
+ s32 cfg_mac;
+
+ cfg_mac = fct > fct_mac ?
+ cfg >> (fct - fct_mac) : cfg << (fct_mac - fct);
+
+ return (s8)clamp_t(s32, cfg_mac,
+ RTW89_SAR_TXPWR_MAC_MIN,
+ RTW89_SAR_TXPWR_MAC_MAX);
+}
+
+s8 rtw89_query_sar(struct rtw89_dev *rtwdev)
+{
+ const enum rtw89_sar_sources src = rtwdev->sar.src;
+ /* its members are protected by rtw89_sar_set_src() */
+ const struct rtw89_sar_handler *sar_hdl = &rtw89_sar_handlers[src];
+ int ret;
+ s32 cfg;
+ u8 fct;
+
+ lockdep_assert_held(&rtwdev->mutex);
+
+ if (src == RTW89_SAR_SOURCE_NONE)
+ return RTW89_SAR_TXPWR_MAC_MAX;
+
+ ret = sar_hdl->query_sar_config(rtwdev, &cfg);
+ if (ret)
+ return RTW89_SAR_TXPWR_MAC_MAX;
+
+ fct = sar_hdl->txpwr_factor_sar;
+
+ return rtw89_txpwr_sar_to_mac(rtwdev, fct, cfg);
+}
+
+void rtw89_print_sar(struct seq_file *m, struct rtw89_dev *rtwdev)
+{
+ const enum rtw89_sar_sources src = rtwdev->sar.src;
+ /* its members are protected by rtw89_sar_set_src() */
+ const struct rtw89_sar_handler *sar_hdl = &rtw89_sar_handlers[src];
+ const u8 fct_mac = rtwdev->chip->txpwr_factor_mac;
+ int ret;
+ s32 cfg;
+ u8 fct;
+
+ lockdep_assert_held(&rtwdev->mutex);
+
+ if (src == RTW89_SAR_SOURCE_NONE) {
+ seq_puts(m, "no SAR is applied\n");
+ return;
+ }
+
+ seq_printf(m, "source: %d (%s)\n", src, sar_hdl->descr_sar_source);
+
+ ret = sar_hdl->query_sar_config(rtwdev, &cfg);
+ if (ret) {
+ seq_printf(m, "config: return code: %d\n", ret);
+ seq_printf(m, "assign: max setting: %d (unit: 1/%lu dBm)\n",
+ RTW89_SAR_TXPWR_MAC_MAX, BIT(fct_mac));
+ return;
+ }
+
+ fct = sar_hdl->txpwr_factor_sar;
+
+ seq_printf(m, "config: %d (unit: 1/%lu dBm)\n", cfg, BIT(fct));
+}
+
+static int rtw89_apply_sar_common(struct rtw89_dev *rtwdev,
+ const struct rtw89_sar_cfg_common *sar)
+{
+ enum rtw89_sar_sources src;
+ int ret = 0;
+
+ mutex_lock(&rtwdev->mutex);
+
+ src = rtwdev->sar.src;
+ if (src != RTW89_SAR_SOURCE_NONE && src != RTW89_SAR_SOURCE_COMMON) {
+ rtw89_warn(rtwdev, "SAR source: %d is in use", src);
+ ret = -EBUSY;
+ goto exit;
+ }
+
+ rtw89_sar_set_src(rtwdev, RTW89_SAR_SOURCE_COMMON, cfg_common, sar);
+ rtw89_chip_set_txpwr(rtwdev);
+
+exit:
+ mutex_unlock(&rtwdev->mutex);
+ return ret;
+}
+
+static const u8 rtw89_common_sar_subband_map[] = {
+ RTW89_CH_2G,
+ RTW89_CH_5G_BAND_1,
+ RTW89_CH_5G_BAND_3,
+ RTW89_CH_5G_BAND_4,
+};
+
+static const struct cfg80211_sar_freq_ranges rtw89_common_sar_freq_ranges[] = {
+ { .start_freq = 2412, .end_freq = 2484, },
+ { .start_freq = 5180, .end_freq = 5320, },
+ { .start_freq = 5500, .end_freq = 5720, },
+ { .start_freq = 5745, .end_freq = 5825, },
+};
+
+static_assert(ARRAY_SIZE(rtw89_common_sar_subband_map) ==
+ ARRAY_SIZE(rtw89_common_sar_freq_ranges));
+
+const struct cfg80211_sar_capa rtw89_sar_capa = {
+ .type = NL80211_SAR_TYPE_POWER,
+ .num_freq_ranges = ARRAY_SIZE(rtw89_common_sar_freq_ranges),
+ .freq_ranges = rtw89_common_sar_freq_ranges,
+};
+
+int rtw89_ops_set_sar_specs(struct ieee80211_hw *hw,
+ const struct cfg80211_sar_specs *sar)
+{
+ struct rtw89_dev *rtwdev = hw->priv;
+ struct rtw89_sar_cfg_common sar_common = {0};
+ u8 fct;
+ u32 freq_start;
+ u32 freq_end;
+ u32 band;
+ s32 power;
+ u32 i, idx;
+
+ if (sar->type != NL80211_SAR_TYPE_POWER)
+ return -EINVAL;
+
+ fct = rtw89_sar_handlers[RTW89_SAR_SOURCE_COMMON].txpwr_factor_sar;
+
+ for (i = 0; i < sar->num_sub_specs; i++) {
+ idx = sar->sub_specs[i].freq_range_index;
+ if (idx >= ARRAY_SIZE(rtw89_common_sar_freq_ranges))
+ return -EINVAL;
+
+ freq_start = rtw89_common_sar_freq_ranges[idx].start_freq;
+ freq_end = rtw89_common_sar_freq_ranges[idx].end_freq;
+ band = rtw89_common_sar_subband_map[idx];
+ power = sar->sub_specs[i].power;
+
+ rtw89_info(rtwdev, "On freq %u to %u, ", freq_start, freq_end);
+ rtw89_info(rtwdev, "set SAR power limit %d (unit: 1/%lu dBm)\n",
+ power, BIT(fct));
+
+ sar_common.set[band] = true;
+ sar_common.cfg[band] = power;
+ }
+
+ return rtw89_apply_sar_common(rtwdev, &sar_common);
+}
diff --git a/drivers/net/wireless/realtek/rtw89/sar.h b/drivers/net/wireless/realtek/rtw89/sar.h
new file mode 100644
index 000000000000..7b5484c84eb1
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/sar.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2019-2020 Realtek Corporation
+ */
+
+#ifndef __RTW89_SAR_H__
+#define __RTW89_SAR_H__
+
+#include "core.h"
+
+#define RTW89_SAR_TXPWR_MAC_MAX S8_MAX
+#define RTW89_SAR_TXPWR_MAC_MIN S8_MIN
+
+struct rtw89_sar_handler {
+ const char *descr_sar_source;
+ u8 txpwr_factor_sar;
+ int (*query_sar_config)(struct rtw89_dev *rtwdev, s32 *cfg);
+};
+
+extern const struct cfg80211_sar_capa rtw89_sar_capa;
+
+s8 rtw89_query_sar(struct rtw89_dev *rtwdev);
+void rtw89_print_sar(struct seq_file *m, struct rtw89_dev *rtwdev);
+int rtw89_ops_set_sar_specs(struct ieee80211_hw *hw,
+ const struct cfg80211_sar_specs *sar);
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw89/ser.c b/drivers/net/wireless/realtek/rtw89/ser.c
new file mode 100644
index 000000000000..837cdc366a61
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/ser.c
@@ -0,0 +1,491 @@
+// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+/* Copyright(c) 2019-2020 Realtek Corporation
+ */
+
+#include "cam.h"
+#include "debug.h"
+#include "mac.h"
+#include "ps.h"
+#include "ser.h"
+#include "util.h"
+
+#define SER_RECFG_TIMEOUT 1000
+
+enum ser_evt {
+ SER_EV_NONE,
+ SER_EV_STATE_IN,
+ SER_EV_STATE_OUT,
+ SER_EV_L1_RESET, /* M1 */
+ SER_EV_DO_RECOVERY, /* M3 */
+ SER_EV_MAC_RESET_DONE, /* M5 */
+ SER_EV_L2_RESET,
+ SER_EV_L2_RECFG_DONE,
+ SER_EV_L2_RECFG_TIMEOUT,
+ SER_EV_M3_TIMEOUT,
+ SER_EV_FW_M5_TIMEOUT,
+ SER_EV_L0_RESET,
+ SER_EV_MAXX
+};
+
+enum ser_state {
+ SER_IDLE_ST,
+ SER_RESET_TRX_ST,
+ SER_DO_HCI_ST,
+ SER_L2_RESET_ST,
+ SER_ST_MAX_ST
+};
+
+struct ser_msg {
+ struct list_head list;
+ u8 event;
+};
+
+struct state_ent {
+ u8 state;
+ char *name;
+ void (*st_func)(struct rtw89_ser *ser, u8 event);
+};
+
+struct event_ent {
+ u8 event;
+ char *name;
+};
+
+static char *ser_ev_name(struct rtw89_ser *ser, u8 event)
+{
+ if (event < SER_EV_MAXX)
+ return ser->ev_tbl[event].name;
+
+ return "err_ev_name";
+}
+
+static char *ser_st_name(struct rtw89_ser *ser)
+{
+ if (ser->state < SER_ST_MAX_ST)
+ return ser->st_tbl[ser->state].name;
+
+ return "err_st_name";
+}
+
+static void ser_state_run(struct rtw89_ser *ser, u8 evt)
+{
+ struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
+
+ rtw89_debug(rtwdev, RTW89_DBG_SER, "ser: %s receive %s\n",
+ ser_st_name(ser), ser_ev_name(ser, evt));
+
+ rtw89_leave_lps(rtwdev);
+ ser->st_tbl[ser->state].st_func(ser, evt);
+}
+
+static void ser_state_goto(struct rtw89_ser *ser, u8 new_state)
+{
+ struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
+
+ if (ser->state == new_state || new_state >= SER_ST_MAX_ST)
+ return;
+ ser_state_run(ser, SER_EV_STATE_OUT);
+
+ rtw89_debug(rtwdev, RTW89_DBG_SER, "ser: %s goto -> %s\n",
+ ser_st_name(ser), ser->st_tbl[new_state].name);
+
+ ser->state = new_state;
+ ser_state_run(ser, SER_EV_STATE_IN);
+}
+
+static struct ser_msg *__rtw89_ser_dequeue_msg(struct rtw89_ser *ser)
+{
+ struct ser_msg *msg;
+
+ spin_lock_irq(&ser->msg_q_lock);
+ msg = list_first_entry_or_null(&ser->msg_q, struct ser_msg, list);
+ if (msg)
+ list_del(&msg->list);
+ spin_unlock_irq(&ser->msg_q_lock);
+
+ return msg;
+}
+
+static void rtw89_ser_hdl_work(struct work_struct *work)
+{
+ struct ser_msg *msg;
+ struct rtw89_ser *ser = container_of(work, struct rtw89_ser,
+ ser_hdl_work);
+
+ while ((msg = __rtw89_ser_dequeue_msg(ser))) {
+ ser_state_run(ser, msg->event);
+ kfree(msg);
+ }
+}
+
+static int ser_send_msg(struct rtw89_ser *ser, u8 event)
+{
+ struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
+ struct ser_msg *msg = NULL;
+
+ if (test_bit(RTW89_SER_DRV_STOP_RUN, ser->flags))
+ return -EIO;
+
+ msg = kmalloc(sizeof(*msg), GFP_ATOMIC);
+ if (!msg)
+ return -ENOMEM;
+
+ msg->event = event;
+
+ spin_lock_irq(&ser->msg_q_lock);
+ list_add(&msg->list, &ser->msg_q);
+ spin_unlock_irq(&ser->msg_q_lock);
+
+ ieee80211_queue_work(rtwdev->hw, &ser->ser_hdl_work);
+ return 0;
+}
+
+static void rtw89_ser_alarm_work(struct work_struct *work)
+{
+ struct rtw89_ser *ser = container_of(work, struct rtw89_ser,
+ ser_alarm_work.work);
+
+ ser_send_msg(ser, ser->alarm_event);
+ ser->alarm_event = SER_EV_NONE;
+}
+
+static void ser_set_alarm(struct rtw89_ser *ser, u32 ms, u8 event)
+{
+ struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
+
+ if (test_bit(RTW89_SER_DRV_STOP_RUN, ser->flags))
+ return;
+
+ ser->alarm_event = event;
+ ieee80211_queue_delayed_work(rtwdev->hw, &ser->ser_alarm_work,
+ msecs_to_jiffies(ms));
+}
+
+static void ser_del_alarm(struct rtw89_ser *ser)
+{
+ cancel_delayed_work(&ser->ser_alarm_work);
+ ser->alarm_event = SER_EV_NONE;
+}
+
+/* driver function */
+static void drv_stop_tx(struct rtw89_ser *ser)
+{
+ struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
+
+ ieee80211_stop_queues(rtwdev->hw);
+ set_bit(RTW89_SER_DRV_STOP_TX, ser->flags);
+}
+
+static void drv_stop_rx(struct rtw89_ser *ser)
+{
+ struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
+
+ clear_bit(RTW89_FLAG_RUNNING, rtwdev->flags);
+ set_bit(RTW89_SER_DRV_STOP_RX, ser->flags);
+}
+
+static void drv_trx_reset(struct rtw89_ser *ser)
+{
+ struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
+
+ rtw89_hci_reset(rtwdev);
+}
+
+static void drv_resume_tx(struct rtw89_ser *ser)
+{
+ struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
+
+ if (!test_bit(RTW89_SER_DRV_STOP_TX, ser->flags))
+ return;
+
+ ieee80211_wake_queues(rtwdev->hw);
+ clear_bit(RTW89_SER_DRV_STOP_TX, ser->flags);
+}
+
+static void drv_resume_rx(struct rtw89_ser *ser)
+{
+ struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
+
+ if (!test_bit(RTW89_SER_DRV_STOP_RX, ser->flags))
+ return;
+
+ set_bit(RTW89_FLAG_RUNNING, rtwdev->flags);
+ clear_bit(RTW89_SER_DRV_STOP_RX, ser->flags);
+}
+
+static void ser_reset_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
+{
+ rtw89_core_release_bit_map(rtwdev->hw_port, rtwvif->port);
+ rtwvif->net_type = RTW89_NET_TYPE_NO_LINK;
+ rtwvif->trigger = false;
+}
+
+static void ser_reset_mac_binding(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_vif *rtwvif;
+
+ rtw89_cam_reset_keys(rtwdev);
+ rtw89_core_release_all_bits_map(rtwdev->mac_id_map, RTW89_MAX_MAC_ID_NUM);
+ rtw89_for_each_rtwvif(rtwdev, rtwvif)
+ ser_reset_vif(rtwdev, rtwvif);
+}
+
+/* hal function */
+static int hal_enable_dma(struct rtw89_ser *ser)
+{
+ struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
+ int ret;
+
+ if (!test_bit(RTW89_SER_HAL_STOP_DMA, ser->flags))
+ return 0;
+
+ if (!rtwdev->hci.ops->mac_lv1_rcvy)
+ return -EIO;
+
+ ret = rtwdev->hci.ops->mac_lv1_rcvy(rtwdev, RTW89_LV1_RCVY_STEP_2);
+ if (!ret)
+ clear_bit(RTW89_SER_HAL_STOP_DMA, ser->flags);
+
+ return ret;
+}
+
+static int hal_stop_dma(struct rtw89_ser *ser)
+{
+ struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
+ int ret;
+
+ if (!rtwdev->hci.ops->mac_lv1_rcvy)
+ return -EIO;
+
+ ret = rtwdev->hci.ops->mac_lv1_rcvy(rtwdev, RTW89_LV1_RCVY_STEP_1);
+ if (!ret)
+ set_bit(RTW89_SER_HAL_STOP_DMA, ser->flags);
+
+ return ret;
+}
+
+static void hal_send_m2_event(struct rtw89_ser *ser)
+{
+ struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
+
+ rtw89_mac_set_err_status(rtwdev, MAC_AX_ERR_L1_DISABLE_EN);
+}
+
+static void hal_send_m4_event(struct rtw89_ser *ser)
+{
+ struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
+
+ rtw89_mac_set_err_status(rtwdev, MAC_AX_ERR_L1_RCVY_EN);
+}
+
+/* state handler */
+static void ser_idle_st_hdl(struct rtw89_ser *ser, u8 evt)
+{
+ switch (evt) {
+ case SER_EV_STATE_IN:
+ break;
+ case SER_EV_L1_RESET:
+ ser_state_goto(ser, SER_RESET_TRX_ST);
+ break;
+ case SER_EV_L2_RESET:
+ ser_state_goto(ser, SER_L2_RESET_ST);
+ break;
+ case SER_EV_STATE_OUT:
+ default:
+ break;
+ }
+}
+
+static void ser_reset_trx_st_hdl(struct rtw89_ser *ser, u8 evt)
+{
+ switch (evt) {
+ case SER_EV_STATE_IN:
+ drv_stop_tx(ser);
+
+ if (hal_stop_dma(ser)) {
+ ser_state_goto(ser, SER_L2_RESET_ST);
+ break;
+ }
+
+ drv_stop_rx(ser);
+ drv_trx_reset(ser);
+
+ /* wait m3 */
+ hal_send_m2_event(ser);
+
+ /* set alarm to prevent FW response timeout */
+ ser_set_alarm(ser, 1000, SER_EV_M3_TIMEOUT);
+ break;
+
+ case SER_EV_DO_RECOVERY:
+ ser_state_goto(ser, SER_DO_HCI_ST);
+ break;
+
+ case SER_EV_M3_TIMEOUT:
+ ser_state_goto(ser, SER_L2_RESET_ST);
+ break;
+
+ case SER_EV_STATE_OUT:
+ ser_del_alarm(ser);
+ hal_enable_dma(ser);
+ drv_resume_rx(ser);
+ drv_resume_tx(ser);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void ser_do_hci_st_hdl(struct rtw89_ser *ser, u8 evt)
+{
+ switch (evt) {
+ case SER_EV_STATE_IN:
+ /* wait m5 */
+ hal_send_m4_event(ser);
+
+ /* prevent FW response timeout */
+ ser_set_alarm(ser, 1000, SER_EV_FW_M5_TIMEOUT);
+ break;
+
+ case SER_EV_FW_M5_TIMEOUT:
+ ser_state_goto(ser, SER_L2_RESET_ST);
+ break;
+
+ case SER_EV_MAC_RESET_DONE:
+ ser_state_goto(ser, SER_IDLE_ST);
+ break;
+
+ case SER_EV_STATE_OUT:
+ ser_del_alarm(ser);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void ser_l2_reset_st_hdl(struct rtw89_ser *ser, u8 evt)
+{
+ struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
+
+ switch (evt) {
+ case SER_EV_STATE_IN:
+ mutex_lock(&rtwdev->mutex);
+ ser_reset_mac_binding(rtwdev);
+ rtw89_core_stop(rtwdev);
+ mutex_unlock(&rtwdev->mutex);
+
+ ieee80211_restart_hw(rtwdev->hw);
+ ser_set_alarm(ser, SER_RECFG_TIMEOUT, SER_EV_L2_RECFG_TIMEOUT);
+ break;
+
+ case SER_EV_L2_RECFG_TIMEOUT:
+ rtw89_info(rtwdev, "Err: ser L2 re-config timeout\n");
+ fallthrough;
+ case SER_EV_L2_RECFG_DONE:
+ ser_state_goto(ser, SER_IDLE_ST);
+ break;
+
+ case SER_EV_STATE_OUT:
+ ser_del_alarm(ser);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static struct event_ent ser_ev_tbl[] = {
+ {SER_EV_NONE, "SER_EV_NONE"},
+ {SER_EV_STATE_IN, "SER_EV_STATE_IN"},
+ {SER_EV_STATE_OUT, "SER_EV_STATE_OUT"},
+ {SER_EV_L1_RESET, "SER_EV_L1_RESET"},
+ {SER_EV_DO_RECOVERY, "SER_EV_DO_RECOVERY m3"},
+ {SER_EV_MAC_RESET_DONE, "SER_EV_MAC_RESET_DONE m5"},
+ {SER_EV_L2_RESET, "SER_EV_L2_RESET"},
+ {SER_EV_L2_RECFG_DONE, "SER_EV_L2_RECFG_DONE"},
+ {SER_EV_L2_RECFG_TIMEOUT, "SER_EV_L2_RECFG_TIMEOUT"},
+ {SER_EV_M3_TIMEOUT, "SER_EV_M3_TIMEOUT"},
+ {SER_EV_FW_M5_TIMEOUT, "SER_EV_FW_M5_TIMEOUT"},
+ {SER_EV_L0_RESET, "SER_EV_L0_RESET"},
+ {SER_EV_MAXX, "SER_EV_MAX"}
+};
+
+static struct state_ent ser_st_tbl[] = {
+ {SER_IDLE_ST, "SER_IDLE_ST", ser_idle_st_hdl},
+ {SER_RESET_TRX_ST, "SER_RESET_TRX_ST", ser_reset_trx_st_hdl},
+ {SER_DO_HCI_ST, "SER_DO_HCI_ST", ser_do_hci_st_hdl},
+ {SER_L2_RESET_ST, "SER_L2_RESET_ST", ser_l2_reset_st_hdl}
+};
+
+int rtw89_ser_init(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_ser *ser = &rtwdev->ser;
+
+ memset(ser, 0, sizeof(*ser));
+ INIT_LIST_HEAD(&ser->msg_q);
+ ser->state = SER_IDLE_ST;
+ ser->st_tbl = ser_st_tbl;
+ ser->ev_tbl = ser_ev_tbl;
+
+ bitmap_zero(ser->flags, RTW89_NUM_OF_SER_FLAGS);
+ spin_lock_init(&ser->msg_q_lock);
+ INIT_WORK(&ser->ser_hdl_work, rtw89_ser_hdl_work);
+ INIT_DELAYED_WORK(&ser->ser_alarm_work, rtw89_ser_alarm_work);
+ return 0;
+}
+
+int rtw89_ser_deinit(struct rtw89_dev *rtwdev)
+{
+ struct rtw89_ser *ser = (struct rtw89_ser *)&rtwdev->ser;
+
+ set_bit(RTW89_SER_DRV_STOP_RUN, ser->flags);
+ cancel_delayed_work_sync(&ser->ser_alarm_work);
+ cancel_work_sync(&ser->ser_hdl_work);
+ clear_bit(RTW89_SER_DRV_STOP_RUN, ser->flags);
+ return 0;
+}
+
+void rtw89_ser_recfg_done(struct rtw89_dev *rtwdev)
+{
+ ser_send_msg(&rtwdev->ser, SER_EV_L2_RECFG_DONE);
+}
+
+int rtw89_ser_notify(struct rtw89_dev *rtwdev, u32 err)
+{
+ u8 event = SER_EV_NONE;
+
+ rtw89_info(rtwdev, "ser event = 0x%04x\n", err);
+
+ switch (err) {
+ case MAC_AX_ERR_L1_ERR_DMAC:
+ case MAC_AX_ERR_L0_PROMOTE_TO_L1:
+ event = SER_EV_L1_RESET; /* M1 */
+ break;
+ case MAC_AX_ERR_L1_RESET_DISABLE_DMAC_DONE:
+ event = SER_EV_DO_RECOVERY; /* M3 */
+ break;
+ case MAC_AX_ERR_L1_RESET_RECOVERY_DONE:
+ event = SER_EV_MAC_RESET_DONE; /* M5 */
+ break;
+ case MAC_AX_ERR_L0_ERR_CMAC0:
+ case MAC_AX_ERR_L0_ERR_CMAC1:
+ case MAC_AX_ERR_L0_RESET_DONE:
+ event = SER_EV_L0_RESET;
+ break;
+ default:
+ if (err == MAC_AX_ERR_L1_PROMOTE_TO_L2 ||
+ (err >= MAC_AX_ERR_L2_ERR_AH_DMA &&
+ err <= MAC_AX_GET_ERR_MAX))
+ event = SER_EV_L2_RESET;
+ break;
+ }
+
+ if (event == SER_EV_NONE)
+ return -EINVAL;
+
+ ser_send_msg(&rtwdev->ser, event);
+ return 0;
+}
+EXPORT_SYMBOL(rtw89_ser_notify);
diff --git a/drivers/net/wireless/realtek/rtw89/ser.h b/drivers/net/wireless/realtek/rtw89/ser.h
new file mode 100644
index 000000000000..6b8e62019942
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/ser.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+ * Copyright(c) 2019-2020 Realtek Corporation
+ */
+#ifndef __SER_H__
+#define __SER_H__
+
+#include "core.h"
+
+int rtw89_ser_init(struct rtw89_dev *rtwdev);
+int rtw89_ser_deinit(struct rtw89_dev *rtwdev);
+int rtw89_ser_notify(struct rtw89_dev *rtwdev, u32 err);
+void rtw89_ser_recfg_done(struct rtw89_dev *rtwdev);
+
+#endif /* __SER_H__*/
+
diff --git a/drivers/net/wireless/realtek/rtw89/txrx.h b/drivers/net/wireless/realtek/rtw89/txrx.h
new file mode 100644
index 000000000000..f1e0fe36107d
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/txrx.h
@@ -0,0 +1,358 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
+/* Copyright(c) 2020 Realtek Corporation
+ */
+
+#ifndef __RTW89_TXRX_H__
+#define __RTW89_TXRX_H__
+
+#include "debug.h"
+
+#define DATA_RATE_MODE_CTRL_MASK GENMASK(8, 7)
+#define DATA_RATE_NOT_HT_IDX_MASK GENMASK(3, 0)
+#define DATA_RATE_MODE_NON_HT 0x0
+#define DATA_RATE_HT_IDX_MASK GENMASK(4, 0)
+#define DATA_RATE_MODE_HT 0x1
+#define DATA_RATE_VHT_HE_NSS_MASK GENMASK(6, 4)
+#define DATA_RATE_VHT_HE_IDX_MASK GENMASK(3, 0)
+#define DATA_RATE_MODE_VHT 0x2
+#define DATA_RATE_MODE_HE 0x3
+#define GET_DATA_RATE_MODE(r) FIELD_GET(DATA_RATE_MODE_CTRL_MASK, r)
+#define GET_DATA_RATE_NOT_HT_IDX(r) FIELD_GET(DATA_RATE_NOT_HT_IDX_MASK, r)
+#define GET_DATA_RATE_HT_IDX(r) FIELD_GET(DATA_RATE_HT_IDX_MASK, r)
+#define GET_DATA_RATE_VHT_HE_IDX(r) FIELD_GET(DATA_RATE_VHT_HE_IDX_MASK, r)
+#define GET_DATA_RATE_NSS(r) FIELD_GET(DATA_RATE_VHT_HE_NSS_MASK, r)
+
+/* TX WD BODY DWORD 0 */
+#define RTW89_TXWD_BODY0_WP_OFFSET GENMASK(31, 24)
+#define RTW89_TXWD_BODY0_MORE_DATA BIT(23)
+#define RTW89_TXWD_BODY0_WD_INFO_EN BIT(22)
+#define RTW89_TXWD_BODY0_FW_DL BIT(20)
+#define RTW89_TXWD_BODY0_CHANNEL_DMA GENMASK(19, 16)
+#define RTW89_TXWD_BODY0_HDR_LLC_LEN GENMASK(15, 11)
+#define RTW89_TXWD_BODY0_WD_PAGE BIT(7)
+#define RTW89_TXWD_BODY0_HW_AMSDU BIT(5)
+
+/* TX WD BODY DWORD 1 */
+#define RTW89_TXWD_BODY1_PAYLOAD_ID GENMASK(31, 16)
+
+/* TX WD BODY DWORD 2 */
+#define RTW89_TXWD_BODY2_MACID GENMASK(30, 24)
+#define RTW89_TXWD_BODY2_TID_INDICATE BIT(23)
+#define RTW89_TXWD_BODY2_QSEL GENMASK(22, 17)
+#define RTW89_TXWD_BODY2_TXPKT_SIZE GENMASK(13, 0)
+
+/* TX WD BODY DWORD 3 */
+#define RTW89_TXWD_BODY3_BK BIT(13)
+#define RTW89_TXWD_BODY3_AGG_EN BIT(12)
+#define RTW89_TXWD_BODY3_SW_SEQ GENMASK(11, 0)
+
+/* TX WD BODY DWORD 4 */
+
+/* TX WD BODY DWORD 5 */
+
+/* TX WD INFO DWORD 0 */
+#define RTW89_TXWD_INFO0_USE_RATE BIT(30)
+#define RTW89_TXWD_INFO0_DATA_BW GENMASK(29, 28)
+#define RTW89_TXWD_INFO0_GI_LTF GENMASK(27, 25)
+#define RTW89_TXWD_INFO0_DATA_RATE GENMASK(24, 16)
+#define RTW89_TXWD_INFO0_DISDATAFB BIT(10)
+
+/* TX WD INFO DWORD 1 */
+#define RTW89_TXWD_INFO1_DATA_RTY_LOWEST_RATE GENMASK(24, 16)
+#define RTW89_TXWD_INFO1_A_CTRL_BSR BIT(14)
+#define RTW89_TXWD_INFO1_MAX_AGGNUM GENMASK(7, 0)
+
+/* TX WD INFO DWORD 2 */
+#define RTW89_TXWD_INFO2_AMPDU_DENSITY GENMASK(20, 18)
+#define RTW89_TXWD_INFO2_SEC_TYPE GENMASK(12, 9)
+#define RTW89_TXWD_INFO2_SEC_HW_ENC BIT(8)
+#define RTW89_TXWD_INFO2_SEC_CAM_IDX GENMASK(7, 0)
+
+/* TX WD INFO DWORD 3 */
+
+/* TX WD INFO DWORD 4 */
+#define RTW89_TXWD_INFO4_RTS_EN BIT(27)
+#define RTW89_TXWD_INFO4_HW_RTS_EN BIT(31)
+
+/* TX WD INFO DWORD 5 */
+
+/* RX DESC helpers */
+/* Short Descriptor */
+#define RTW89_GET_RXWD_LONG_RXD(rxdesc) \
+ le32_get_bits((rxdesc)->dword0, BIT(31))
+#define RTW89_GET_RXWD_DRV_INFO_SIZE(rxdesc) \
+ le32_get_bits((rxdesc)->dword0, GENMASK(30, 28))
+#define RTW89_GET_RXWD_RPKT_TYPE(rxdesc) \
+ le32_get_bits((rxdesc)->dword0, GENMASK(27, 24))
+#define RTW89_GET_RXWD_MAC_INFO_VALID(rxdesc) \
+ le32_get_bits((rxdesc)->dword0, BIT(23))
+#define RTW89_GET_RXWD_BB_SEL(rxdesc) \
+ le32_get_bits((rxdesc)->dword0, BIT(22))
+#define RTW89_GET_RXWD_HD_IV_LEN(rxdesc) \
+ le32_get_bits((rxdesc)->dword0, GENMASK(21, 16))
+#define RTW89_GET_RXWD_SHIFT(rxdesc) \
+ le32_get_bits((rxdesc)->dword0, GENMASK(15, 14))
+#define RTW89_GET_RXWD_PKT_SIZE(rxdesc) \
+ le32_get_bits((rxdesc)->dword0, GENMASK(13, 0))
+#define RTW89_GET_RXWD_BW(rxdesc) \
+ le32_get_bits((rxdesc)->dword1, GENMASK(31, 30))
+#define RTW89_GET_RXWD_GI_LTF(rxdesc) \
+ le32_get_bits((rxdesc)->dword1, GENMASK(27, 25))
+#define RTW89_GET_RXWD_DATA_RATE(rxdesc) \
+ le32_get_bits((rxdesc)->dword1, GENMASK(24, 16))
+#define RTW89_GET_RXWD_USER_ID(rxdesc) \
+ le32_get_bits((rxdesc)->dword1, GENMASK(15, 8))
+#define RTW89_GET_RXWD_SR_EN(rxdesc) \
+ le32_get_bits((rxdesc)->dword1, BIT(7))
+#define RTW89_GET_RXWD_PPDU_CNT(rxdesc) \
+ le32_get_bits((rxdesc)->dword1, GENMASK(6, 4))
+#define RTW89_GET_RXWD_PPDU_TYPE(rxdesc) \
+ le32_get_bits((rxdesc)->dword1, GENMASK(3, 0))
+#define RTW89_GET_RXWD_FREE_RUN_CNT(rxdesc) \
+ le32_get_bits((rxdesc)->dword2, GENMASK(31, 0))
+#define RTW89_GET_RXWD_ICV_ERR(rxdesc) \
+ le32_get_bits((rxdesc)->dword3, BIT(10))
+#define RTW89_GET_RXWD_CRC32_ERR(rxdesc) \
+ le32_get_bits((rxdesc)->dword3, BIT(9))
+#define RTW89_GET_RXWD_HW_DEC(rxdesc) \
+ le32_get_bits((rxdesc)->dword3, BIT(2))
+#define RTW89_GET_RXWD_SW_DEC(rxdesc) \
+ le32_get_bits((rxdesc)->dword3, BIT(1))
+#define RTW89_GET_RXWD_A1_MATCH(rxdesc) \
+ le32_get_bits((rxdesc)->dword3, BIT(0))
+
+/* Long Descriptor */
+#define RTW89_GET_RXWD_FRAG(rxdesc) \
+ le32_get_bits((rxdesc)->dword4, GENMASK(31, 28))
+#define RTW89_GET_RXWD_SEQ(rxdesc) \
+ le32_get_bits((rxdesc)->dword4, GENMASK(27, 16))
+#define RTW89_GET_RXWD_TYPE(rxdesc) \
+ le32_get_bits((rxdesc)->dword4, GENMASK(1, 0))
+#define RTW89_GET_RXWD_ADDR_CAM_VLD(rxdesc) \
+ le32_get_bits((rxdesc)->dword5, BIT(28))
+#define RTW89_GET_RXWD_RX_PL_ID(rxdesc) \
+ le32_get_bits((rxdesc)->dword5, GENMASK(27, 24))
+#define RTW89_GET_RXWD_MAC_ID(rxdesc) \
+ le32_get_bits((rxdesc)->dword5, GENMASK(23, 16))
+#define RTW89_GET_RXWD_ADDR_CAM_ID(rxdesc) \
+ le32_get_bits((rxdesc)->dword5, GENMASK(15, 8))
+#define RTW89_GET_RXWD_SEC_CAM_ID(rxdesc) \
+ le32_get_bits((rxdesc)->dword5, GENMASK(7, 0))
+
+#define RTW89_GET_RXINFO_USR_NUM(rpt) \
+ le32_get_bits(*((__le32 *)rpt), GENMASK(3, 0))
+#define RTW89_GET_RXINFO_FW_DEFINE(rpt) \
+ le32_get_bits(*((__le32 *)rpt), GENMASK(15, 8))
+#define RTW89_GET_RXINFO_LSIG_LEN(rpt) \
+ le32_get_bits(*((__le32 *)rpt), GENMASK(27, 16))
+#define RTW89_GET_RXINFO_IS_TO_SELF(rpt) \
+ le32_get_bits(*((__le32 *)rpt), BIT(28))
+#define RTW89_GET_RXINFO_RX_CNT_VLD(rpt) \
+ le32_get_bits(*((__le32 *)rpt), BIT(29))
+#define RTW89_GET_RXINFO_LONG_RXD(rpt) \
+ le32_get_bits(*((__le32 *)rpt), GENMASK(31, 30))
+#define RTW89_GET_RXINFO_SERVICE(rpt) \
+ le32_get_bits(*((__le32 *)(rpt) + 1), GENMASK(15, 0))
+#define RTW89_GET_RXINFO_PLCP_LEN(rpt) \
+ le32_get_bits(*((__le32 *)(rpt) + 1), GENMASK(23, 16))
+#define RTW89_GET_RXINFO_MAC_ID_VALID(rpt, usr) \
+ le32_get_bits(*((__le32 *)(rpt) + (usr) + 2), BIT(0))
+#define RTW89_GET_RXINFO_DATA(rpt, usr) \
+ le32_get_bits(*((__le32 *)(rpt) + (usr) + 2), BIT(1))
+#define RTW89_GET_RXINFO_CTRL(rpt, usr) \
+ le32_get_bits(*((__le32 *)(rpt) + (usr) + 2), BIT(2))
+#define RTW89_GET_RXINFO_MGMT(rpt, usr) \
+ le32_get_bits(*((__le32 *)(rpt) + (usr) + 2), BIT(3))
+#define RTW89_GET_RXINFO_BCM(rpt, usr) \
+ le32_get_bits(*((__le32 *)(rpt) + (usr) + 2), BIT(4))
+#define RTW89_GET_RXINFO_MACID(rpt, usr) \
+ le32_get_bits(*((__le32 *)(rpt) + (usr) + 2), GENMASK(15, 8))
+
+#define RTW89_GET_PHY_STS_RSSI_A(sts) \
+ le32_get_bits(*((__le32 *)(sts) + 1), GENMASK(7, 0))
+#define RTW89_GET_PHY_STS_RSSI_B(sts) \
+ le32_get_bits(*((__le32 *)(sts) + 1), GENMASK(15, 8))
+#define RTW89_GET_PHY_STS_RSSI_C(sts) \
+ le32_get_bits(*((__le32 *)(sts) + 1), GENMASK(23, 16))
+#define RTW89_GET_PHY_STS_RSSI_D(sts) \
+ le32_get_bits(*((__le32 *)(sts) + 1), GENMASK(31, 24))
+#define RTW89_GET_PHY_STS_LEN(sts) \
+ le32_get_bits(*((__le32 *)sts), GENMASK(15, 8))
+#define RTW89_GET_PHY_STS_RSSI_AVG(sts) \
+ le32_get_bits(*((__le32 *)sts), GENMASK(31, 24))
+#define RTW89_GET_PHY_STS_IE_TYPE(ie) \
+ le32_get_bits(*((__le32 *)ie), GENMASK(4, 0))
+#define RTW89_GET_PHY_STS_IE_LEN(ie) \
+ le32_get_bits(*((__le32 *)ie), GENMASK(11, 5))
+#define RTW89_GET_PHY_STS_IE0_CFO(ie) \
+ le32_get_bits(*((__le32 *)(ie) + 1), GENMASK(31, 20))
+
+enum rtw89_tx_channel {
+ RTW89_TXCH_ACH0 = 0,
+ RTW89_TXCH_ACH1 = 1,
+ RTW89_TXCH_ACH2 = 2,
+ RTW89_TXCH_ACH3 = 3,
+ RTW89_TXCH_ACH4 = 4,
+ RTW89_TXCH_ACH5 = 5,
+ RTW89_TXCH_ACH6 = 6,
+ RTW89_TXCH_ACH7 = 7,
+ RTW89_TXCH_CH8 = 8, /* MGMT Band 0 */
+ RTW89_TXCH_CH9 = 9, /* HI Band 0 */
+ RTW89_TXCH_CH10 = 10, /* MGMT Band 1 */
+ RTW89_TXCH_CH11 = 11, /* HI Band 1 */
+ RTW89_TXCH_CH12 = 12, /* FW CMD */
+
+ /* keep last */
+ RTW89_TXCH_NUM,
+ RTW89_TXCH_MAX = RTW89_TXCH_NUM - 1
+};
+
+enum rtw89_rx_channel {
+ RTW89_RXCH_RXQ = 0,
+ RTW89_RXCH_RPQ = 1,
+
+ /* keep last */
+ RTW89_RXCH_NUM,
+ RTW89_RXCH_MAX = RTW89_RXCH_NUM - 1
+};
+
+enum rtw89_tx_qsel {
+ RTW89_TX_QSEL_BE_0 = 0x00,
+ RTW89_TX_QSEL_BK_0 = 0x01,
+ RTW89_TX_QSEL_VI_0 = 0x02,
+ RTW89_TX_QSEL_VO_0 = 0x03,
+ RTW89_TX_QSEL_BE_1 = 0x04,
+ RTW89_TX_QSEL_BK_1 = 0x05,
+ RTW89_TX_QSEL_VI_1 = 0x06,
+ RTW89_TX_QSEL_VO_1 = 0x07,
+ RTW89_TX_QSEL_BE_2 = 0x08,
+ RTW89_TX_QSEL_BK_2 = 0x09,
+ RTW89_TX_QSEL_VI_2 = 0x0a,
+ RTW89_TX_QSEL_VO_2 = 0x0b,
+ RTW89_TX_QSEL_BE_3 = 0x0c,
+ RTW89_TX_QSEL_BK_3 = 0x0d,
+ RTW89_TX_QSEL_VI_3 = 0x0e,
+ RTW89_TX_QSEL_VO_3 = 0x0f,
+ RTW89_TX_QSEL_B0_BCN = 0x10,
+ RTW89_TX_QSEL_B0_HI = 0x11,
+ RTW89_TX_QSEL_B0_MGMT = 0x12,
+ RTW89_TX_QSEL_B0_NOPS = 0x13,
+ RTW89_TX_QSEL_B0_MGMT_FAST = 0x14,
+ /* reserved */
+ /* reserved */
+ /* reserved */
+ RTW89_TX_QSEL_B1_BCN = 0x18,
+ RTW89_TX_QSEL_B1_HI = 0x19,
+ RTW89_TX_QSEL_B1_MGMT = 0x1a,
+ RTW89_TX_QSEL_B1_NOPS = 0x1b,
+ RTW89_TX_QSEL_B1_MGMT_FAST = 0x1c,
+ /* reserved */
+ /* reserved */
+ /* reserved */
+};
+
+enum rtw89_phy_status_ie_type {
+ RTW89_PHYSTS_IE00_CMN_CCK = 0,
+ RTW89_PHYSTS_IE01_CMN_OFDM = 1,
+ RTW89_PHYSTS_IE02_CMN_EXT_AX = 2,
+ RTW89_PHYSTS_IE03_CMN_EXT_SEG_1 = 3,
+ RTW89_PHYSTS_IE04_CMN_EXT_PATH_A = 4,
+ RTW89_PHYSTS_IE05_CMN_EXT_PATH_B = 5,
+ RTW89_PHYSTS_IE06_CMN_EXT_PATH_C = 6,
+ RTW89_PHYSTS_IE07_CMN_EXT_PATH_D = 7,
+ RTW89_PHYSTS_IE08_FTR_CH = 8,
+ RTW89_PHYSTS_IE09_FTR_PLCP_0 = 9,
+ RTW89_PHYSTS_IE10_FTR_PLCP_EXT = 10,
+ RTW89_PHYSTS_IE11_FTR_PLCP_HISTOGRAM = 11,
+ RTW89_PHYSTS_IE12_MU_EIGEN_INFO = 12,
+ RTW89_PHYSTS_IE13_DL_MU_DEF = 13,
+ RTW89_PHYSTS_IE14_TB_UL_CQI = 14,
+ RTW89_PHYSTS_IE15_TB_UL_DEF = 15,
+ RTW89_PHYSTS_IE16_RSVD16 = 16,
+ RTW89_PHYSTS_IE17_TB_UL_CTRL = 17,
+ RTW89_PHYSTS_IE18_DBG_OFDM_FD_CMN = 18,
+ RTW89_PHYSTS_IE19_DBG_OFDM_TD_CMN = 19,
+ RTW89_PHYSTS_IE20_DBG_OFDM_FD_USER_SEG_0 = 20,
+ RTW89_PHYSTS_IE21_DBG_OFDM_FD_USER_SEG_1 = 21,
+ RTW89_PHYSTS_IE22_DBG_OFDM_FD_USER_AGC = 22,
+ RTW89_PHYSTS_IE23_RSVD23 = 23,
+ RTW89_PHYSTS_IE24_DBG_OFDM_TD_PATH_A = 24,
+ RTW89_PHYSTS_IE25_DBG_OFDM_TD_PATH_B = 25,
+ RTW89_PHYSTS_IE26_DBG_OFDM_TD_PATH_C = 26,
+ RTW89_PHYSTS_IE27_DBG_OFDM_TD_PATH_D = 27,
+ RTW89_PHYSTS_IE28_DBG_CCK_PATH_A = 28,
+ RTW89_PHYSTS_IE29_DBG_CCK_PATH_B = 29,
+ RTW89_PHYSTS_IE30_DBG_CCK_PATH_C = 30,
+ RTW89_PHYSTS_IE31_DBG_CCK_PATH_D = 31,
+
+ /* keep last */
+ RTW89_PHYSTS_IE_NUM,
+ RTW89_PHYSTS_IE_MAX = RTW89_PHYSTS_IE_NUM - 1
+};
+
+static inline u8 rtw89_core_get_qsel(struct rtw89_dev *rtwdev, u8 tid)
+{
+ switch (tid) {
+ default:
+ rtw89_warn(rtwdev, "Should use tag 1d: %d\n", tid);
+ fallthrough;
+ case 0:
+ case 3:
+ return RTW89_TX_QSEL_BE_0;
+ case 1:
+ case 2:
+ return RTW89_TX_QSEL_BK_0;
+ case 4:
+ case 5:
+ return RTW89_TX_QSEL_VI_0;
+ case 6:
+ case 7:
+ return RTW89_TX_QSEL_VO_0;
+ }
+}
+
+static inline u8 rtw89_core_get_ch_dma(struct rtw89_dev *rtwdev, u8 qsel)
+{
+ switch (qsel) {
+ default:
+ rtw89_warn(rtwdev, "Cannot map qsel to dma: %d\n", qsel);
+ fallthrough;
+ case RTW89_TX_QSEL_BE_0:
+ return RTW89_TXCH_ACH0;
+ case RTW89_TX_QSEL_BK_0:
+ return RTW89_TXCH_ACH1;
+ case RTW89_TX_QSEL_VI_0:
+ return RTW89_TXCH_ACH2;
+ case RTW89_TX_QSEL_VO_0:
+ return RTW89_TXCH_ACH3;
+ case RTW89_TX_QSEL_B0_MGMT:
+ return RTW89_TXCH_CH8;
+ case RTW89_TX_QSEL_B0_HI:
+ return RTW89_TXCH_CH9;
+ case RTW89_TX_QSEL_B1_MGMT:
+ return RTW89_TXCH_CH10;
+ case RTW89_TX_QSEL_B1_HI:
+ return RTW89_TXCH_CH11;
+ }
+}
+
+static inline u8 rtw89_core_get_tid_indicate(struct rtw89_dev *rtwdev, u8 tid)
+{
+ switch (tid) {
+ case 3:
+ case 2:
+ case 5:
+ case 7:
+ return 1;
+ default:
+ rtw89_warn(rtwdev, "Should use tag 1d: %d\n", tid);
+ fallthrough;
+ case 0:
+ case 1:
+ case 4:
+ case 6:
+ return 0;
+ }
+}
+
+#endif
diff --git a/drivers/net/wireless/realtek/rtw89/util.h b/drivers/net/wireless/realtek/rtw89/util.h
new file mode 100644
index 000000000000..229e81009de6
--- /dev/null
+++ b/drivers/net/wireless/realtek/rtw89/util.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
+ * Copyright(c) 2019-2020 Realtek Corporation
+ */
+#ifndef __RTW89_UTIL_H__
+#define __RTW89_UTIL_H__
+
+#include "core.h"
+
+#define rtw89_iterate_vifs_bh(rtwdev, iterator, data) \
+ ieee80211_iterate_active_interfaces_atomic((rtwdev)->hw, \
+ IEEE80211_IFACE_ITER_NORMAL, iterator, data)
+
+/* call this function with rtwdev->mutex is held */
+#define rtw89_for_each_rtwvif(rtwdev, rtwvif) \
+ list_for_each_entry(rtwvif, &(rtwdev)->rtwvifs_list, list)
+
+#endif
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 63ce2443f136..ff2448394a1e 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -3501,7 +3501,6 @@ fail:
cancel_delayed_work_sync(&priv->dev_poller_work);
cancel_delayed_work_sync(&priv->scan_work);
cancel_work_sync(&priv->work);
- flush_workqueue(priv->workqueue);
destroy_workqueue(priv->workqueue);
wiphy_free(wiphy);
@@ -3518,7 +3517,6 @@ static void rndis_wlan_unbind(struct usbnet *usbdev, struct usb_interface *intf)
cancel_delayed_work_sync(&priv->dev_poller_work);
cancel_delayed_work_sync(&priv->scan_work);
cancel_work_sync(&priv->work);
- flush_workqueue(priv->workqueue);
destroy_workqueue(priv->workqueue);
rndis_unbind(usbdev, intf);
diff --git a/drivers/net/wireless/rsi/rsi_91x_core.c b/drivers/net/wireless/rsi/rsi_91x_core.c
index a48e616e0fb9..6bfaab48b507 100644
--- a/drivers/net/wireless/rsi/rsi_91x_core.c
+++ b/drivers/net/wireless/rsi/rsi_91x_core.c
@@ -399,6 +399,8 @@ void rsi_core_xmit(struct rsi_common *common, struct sk_buff *skb)
info = IEEE80211_SKB_CB(skb);
tx_params = (struct skb_info *)info->driver_data;
+ /* info->driver_data and info->control part of union so make copy */
+ tx_params->have_key = !!info->control.hw_key;
wh = (struct ieee80211_hdr *)&skb->data[0];
tx_params->sta_id = 0;
diff --git a/drivers/net/wireless/rsi/rsi_91x_hal.c b/drivers/net/wireless/rsi/rsi_91x_hal.c
index f4a26f16f00f..dca81a4bbdd7 100644
--- a/drivers/net/wireless/rsi/rsi_91x_hal.c
+++ b/drivers/net/wireless/rsi/rsi_91x_hal.c
@@ -203,7 +203,7 @@ int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb)
wh->frame_control |= cpu_to_le16(RSI_SET_PS_ENABLE);
if ((!(info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT)) &&
- info->control.hw_key) {
+ tx_params->have_key) {
if (rsi_is_cipher_wep(common))
ieee80211_size += 4;
else
@@ -214,15 +214,17 @@ int rsi_prepare_data_desc(struct rsi_common *common, struct sk_buff *skb)
RSI_WIFI_DATA_Q);
data_desc->header_len = ieee80211_size;
- if (common->min_rate != RSI_RATE_AUTO) {
+ if (common->rate_config[common->band].fixed_enabled) {
/* Send fixed rate */
+ u16 fixed_rate = common->rate_config[common->band].fixed_hw_rate;
+
data_desc->frame_info = cpu_to_le16(RATE_INFO_ENABLE);
- data_desc->rate_info = cpu_to_le16(common->min_rate);
+ data_desc->rate_info = cpu_to_le16(fixed_rate);
if (conf_is_ht40(&common->priv->hw->conf))
data_desc->bbp_info = cpu_to_le16(FULL40M_ENABLE);
- if ((common->vif_info[0].sgi) && (common->min_rate & 0x100)) {
+ if (common->vif_info[0].sgi && (fixed_rate & 0x100)) {
/* Only MCS rates */
data_desc->rate_info |=
cpu_to_le16(ENABLE_SHORTGI_RATE);
diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
index b66975f54567..e70c1c7fdf59 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
@@ -510,7 +510,6 @@ static int rsi_mac80211_add_interface(struct ieee80211_hw *hw,
if ((vif->type == NL80211_IFTYPE_AP) ||
(vif->type == NL80211_IFTYPE_P2P_GO)) {
rsi_send_rx_filter_frame(common, DISALLOW_BEACONS);
- common->min_rate = RSI_RATE_AUTO;
for (i = 0; i < common->max_stations; i++)
common->stations[i].sta = NULL;
}
@@ -1228,20 +1227,32 @@ static int rsi_mac80211_set_rate_mask(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
const struct cfg80211_bitrate_mask *mask)
{
+ const unsigned int mcs_offset = ARRAY_SIZE(rsi_rates);
struct rsi_hw *adapter = hw->priv;
struct rsi_common *common = adapter->priv;
- enum nl80211_band band = hw->conf.chandef.chan->band;
+ int i;
mutex_lock(&common->mutex);
- common->fixedrate_mask[band] = 0;
- if (mask->control[band].legacy == 0xfff) {
- common->fixedrate_mask[band] =
- (mask->control[band].ht_mcs[0] << 12);
- } else {
- common->fixedrate_mask[band] =
- mask->control[band].legacy;
+ for (i = 0; i < ARRAY_SIZE(common->rate_config); i++) {
+ struct rsi_rate_config *cfg = &common->rate_config[i];
+ u32 bm;
+
+ bm = mask->control[i].legacy | (mask->control[i].ht_mcs[0] << mcs_offset);
+ if (hweight32(bm) == 1) { /* single rate */
+ int rate_index = ffs(bm) - 1;
+
+ if (rate_index < mcs_offset)
+ cfg->fixed_hw_rate = rsi_rates[rate_index].hw_value;
+ else
+ cfg->fixed_hw_rate = rsi_mcsrates[rate_index - mcs_offset];
+ cfg->fixed_enabled = true;
+ } else {
+ cfg->configured_mask = bm;
+ cfg->fixed_enabled = false;
+ }
}
+
mutex_unlock(&common->mutex);
return 0;
@@ -1378,46 +1389,6 @@ void rsi_indicate_pkt_to_os(struct rsi_common *common,
ieee80211_rx_irqsafe(hw, skb);
}
-static void rsi_set_min_rate(struct ieee80211_hw *hw,
- struct ieee80211_sta *sta,
- struct rsi_common *common)
-{
- u8 band = hw->conf.chandef.chan->band;
- u8 ii;
- u32 rate_bitmap;
- bool matched = false;
-
- common->bitrate_mask[band] = sta->supp_rates[band];
-
- rate_bitmap = (common->fixedrate_mask[band] & sta->supp_rates[band]);
-
- if (rate_bitmap & 0xfff) {
- /* Find out the min rate */
- for (ii = 0; ii < ARRAY_SIZE(rsi_rates); ii++) {
- if (rate_bitmap & BIT(ii)) {
- common->min_rate = rsi_rates[ii].hw_value;
- matched = true;
- break;
- }
- }
- }
-
- common->vif_info[0].is_ht = sta->ht_cap.ht_supported;
-
- if ((common->vif_info[0].is_ht) && (rate_bitmap >> 12)) {
- for (ii = 0; ii < ARRAY_SIZE(rsi_mcsrates); ii++) {
- if ((rate_bitmap >> 12) & BIT(ii)) {
- common->min_rate = rsi_mcsrates[ii];
- matched = true;
- break;
- }
- }
- }
-
- if (!matched)
- common->min_rate = 0xffff;
-}
-
/**
* rsi_mac80211_sta_add() - This function notifies driver about a peer getting
* connected.
@@ -1516,9 +1487,9 @@ static int rsi_mac80211_sta_add(struct ieee80211_hw *hw,
if ((vif->type == NL80211_IFTYPE_STATION) ||
(vif->type == NL80211_IFTYPE_P2P_CLIENT)) {
- rsi_set_min_rate(hw, sta, common);
+ common->bitrate_mask[common->band] = sta->supp_rates[common->band];
+ common->vif_info[0].is_ht = sta->ht_cap.ht_supported;
if (sta->ht_cap.ht_supported) {
- common->vif_info[0].is_ht = true;
common->bitrate_mask[NL80211_BAND_2GHZ] =
sta->supp_rates[NL80211_BAND_2GHZ];
if ((sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20) ||
@@ -1592,7 +1563,6 @@ static int rsi_mac80211_sta_remove(struct ieee80211_hw *hw,
bss->qos = sta->wme;
common->bitrate_mask[NL80211_BAND_2GHZ] = 0;
common->bitrate_mask[NL80211_BAND_5GHZ] = 0;
- common->min_rate = 0xffff;
common->vif_info[0].is_ht = false;
common->vif_info[0].sgi = false;
common->vif_info[0].seq_start = 0;
diff --git a/drivers/net/wireless/rsi/rsi_91x_main.c b/drivers/net/wireless/rsi/rsi_91x_main.c
index d98483298555..f1bf71e6c608 100644
--- a/drivers/net/wireless/rsi/rsi_91x_main.c
+++ b/drivers/net/wireless/rsi/rsi_91x_main.c
@@ -211,9 +211,10 @@ int rsi_read_pkt(struct rsi_common *common, u8 *rx_pkt, s32 rcv_pkt_len)
bt_pkt_type = frame_desc[offset + BT_RX_PKT_TYPE_OFST];
if (bt_pkt_type == BT_CARD_READY_IND) {
rsi_dbg(INFO_ZONE, "BT Card ready recvd\n");
- if (rsi_bt_ops.attach(common, &g_proto_ops))
- rsi_dbg(ERR_ZONE,
- "Failed to attach BT module\n");
+ if (common->fsm_state == FSM_MAC_INIT_DONE)
+ rsi_attach_bt(common);
+ else
+ common->bt_defer_attach = true;
} else {
if (common->bt_adapter)
rsi_bt_ops.recv_pkt(common->bt_adapter,
@@ -278,6 +279,15 @@ void rsi_set_bt_context(void *priv, void *bt_context)
}
#endif
+void rsi_attach_bt(struct rsi_common *common)
+{
+#ifdef CONFIG_RSI_COEX
+ if (rsi_bt_ops.attach(common, &g_proto_ops))
+ rsi_dbg(ERR_ZONE,
+ "Failed to attach BT module\n");
+#endif
+}
+
/**
* rsi_91x_init() - This function initializes os interface operations.
* @oper_mode: One of DEV_OPMODE_*.
@@ -359,6 +369,7 @@ struct rsi_hw *rsi_91x_init(u16 oper_mode)
if (common->coex_mode > 1) {
if (rsi_coex_attach(common)) {
rsi_dbg(ERR_ZONE, "Failed to init coex module\n");
+ rsi_kill_thread(&common->tx_thread);
goto err;
}
}
diff --git a/drivers/net/wireless/rsi/rsi_91x_mgmt.c b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
index 891fd5f0fa76..0848f7a7e76c 100644
--- a/drivers/net/wireless/rsi/rsi_91x_mgmt.c
+++ b/drivers/net/wireless/rsi/rsi_91x_mgmt.c
@@ -276,7 +276,7 @@ static void rsi_set_default_parameters(struct rsi_common *common)
common->channel_width = BW_20MHZ;
common->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
common->channel = 1;
- common->min_rate = 0xffff;
+ memset(&common->rate_config, 0, sizeof(common->rate_config));
common->fsm_state = FSM_CARD_NOT_READY;
common->iface_down = true;
common->endpoint = EP_2GHZ_20MHZ;
@@ -1314,7 +1314,7 @@ static int rsi_send_auto_rate_request(struct rsi_common *common,
u8 band = hw->conf.chandef.chan->band;
u8 num_supported_rates = 0;
u8 rate_table_offset, rate_offset = 0;
- u32 rate_bitmap;
+ u32 rate_bitmap, configured_rates;
u16 *selected_rates, min_rate;
bool is_ht = false, is_sgi = false;
u16 frame_len = sizeof(struct rsi_auto_rate);
@@ -1364,6 +1364,10 @@ static int rsi_send_auto_rate_request(struct rsi_common *common,
is_sgi = true;
}
+ /* Limit to any rates administratively configured by cfg80211 */
+ configured_rates = common->rate_config[band].configured_mask ?: 0xffffffff;
+ rate_bitmap &= configured_rates;
+
if (band == NL80211_BAND_2GHZ) {
if ((rate_bitmap == 0) && (is_ht))
min_rate = RSI_RATE_MCS0;
@@ -1389,10 +1393,13 @@ static int rsi_send_auto_rate_request(struct rsi_common *common,
num_supported_rates = jj;
if (is_ht) {
- for (ii = 0; ii < ARRAY_SIZE(mcs); ii++)
- selected_rates[jj++] = mcs[ii];
- num_supported_rates += ARRAY_SIZE(mcs);
- rate_offset += ARRAY_SIZE(mcs);
+ for (ii = 0; ii < ARRAY_SIZE(mcs); ii++) {
+ if (configured_rates & BIT(ii + ARRAY_SIZE(rsi_rates))) {
+ selected_rates[jj++] = mcs[ii];
+ num_supported_rates++;
+ rate_offset++;
+ }
+ }
}
sort(selected_rates, jj, sizeof(u16), &rsi_compare, NULL);
@@ -1482,7 +1489,7 @@ void rsi_inform_bss_status(struct rsi_common *common,
qos_enable,
aid, sta_id,
vif);
- if (common->min_rate == 0xffff)
+ if (!common->rate_config[common->band].fixed_enabled)
rsi_send_auto_rate_request(common, sta, sta_id, vif);
if (opmode == RSI_OPMODE_STA &&
!(assoc_cap & WLAN_CAPABILITY_PRIVACY) &&
@@ -2071,6 +2078,9 @@ static int rsi_handle_ta_confirm_type(struct rsi_common *common,
if (common->reinit_hw) {
complete(&common->wlan_init_completion);
} else {
+ if (common->bt_defer_attach)
+ rsi_attach_bt(common);
+
return rsi_mac80211_attach(common);
}
}
diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio.c b/drivers/net/wireless/rsi/rsi_91x_sdio.c
index e0c502bc4270..9f16128e4ffa 100644
--- a/drivers/net/wireless/rsi/rsi_91x_sdio.c
+++ b/drivers/net/wireless/rsi/rsi_91x_sdio.c
@@ -24,10 +24,7 @@
/* Default operating mode is wlan STA + BT */
static u16 dev_oper_mode = DEV_OPMODE_STA_BT_DUAL;
module_param(dev_oper_mode, ushort, 0444);
-MODULE_PARM_DESC(dev_oper_mode,
- "1[Wi-Fi], 4[BT], 8[BT LE], 5[Wi-Fi STA + BT classic]\n"
- "9[Wi-Fi STA + BT LE], 13[Wi-Fi STA + BT classic + BT LE]\n"
- "6[AP + BT classic], 14[AP + BT classic + BT LE]");
+MODULE_PARM_DESC(dev_oper_mode, DEV_OPMODE_PARAM_DESC);
/**
* rsi_sdio_set_cmd52_arg() - This function prepares cmd 52 read/write arg.
diff --git a/drivers/net/wireless/rsi/rsi_91x_usb.c b/drivers/net/wireless/rsi/rsi_91x_usb.c
index 416976f09888..6821ea991895 100644
--- a/drivers/net/wireless/rsi/rsi_91x_usb.c
+++ b/drivers/net/wireless/rsi/rsi_91x_usb.c
@@ -25,10 +25,7 @@
/* Default operating mode is wlan STA + BT */
static u16 dev_oper_mode = DEV_OPMODE_STA_BT_DUAL;
module_param(dev_oper_mode, ushort, 0444);
-MODULE_PARM_DESC(dev_oper_mode,
- "1[Wi-Fi], 4[BT], 8[BT LE], 5[Wi-Fi STA + BT classic]\n"
- "9[Wi-Fi STA + BT LE], 13[Wi-Fi STA + BT classic + BT LE]\n"
- "6[AP + BT classic], 14[AP + BT classic + BT LE]");
+MODULE_PARM_DESC(dev_oper_mode, DEV_OPMODE_PARAM_DESC);
static int rsi_rx_urb_submit(struct rsi_hw *adapter, u8 ep_num, gfp_t flags);
@@ -61,7 +58,7 @@ static int rsi_usb_card_write(struct rsi_hw *adapter,
(void *)seg,
(int)len,
&transfer,
- HZ * 5);
+ USB_CTRL_SET_TIMEOUT);
if (status < 0) {
rsi_dbg(ERR_ZONE,
diff --git a/drivers/net/wireless/rsi/rsi_hal.h b/drivers/net/wireless/rsi/rsi_hal.h
index d044a440fa08..5b07262a9740 100644
--- a/drivers/net/wireless/rsi/rsi_hal.h
+++ b/drivers/net/wireless/rsi/rsi_hal.h
@@ -28,6 +28,17 @@
#define DEV_OPMODE_AP_BT 6
#define DEV_OPMODE_AP_BT_DUAL 14
+#define DEV_OPMODE_PARAM_DESC \
+ __stringify(DEV_OPMODE_WIFI_ALONE) "[Wi-Fi alone], " \
+ __stringify(DEV_OPMODE_BT_ALONE) "[BT classic alone], " \
+ __stringify(DEV_OPMODE_BT_LE_ALONE) "[BT LE alone], " \
+ __stringify(DEV_OPMODE_BT_DUAL) "[BT classic + BT LE alone], " \
+ __stringify(DEV_OPMODE_STA_BT) "[Wi-Fi STA + BT classic], " \
+ __stringify(DEV_OPMODE_STA_BT_LE) "[Wi-Fi STA + BT LE], " \
+ __stringify(DEV_OPMODE_STA_BT_DUAL) "[Wi-Fi STA + BT classic + BT LE], " \
+ __stringify(DEV_OPMODE_AP_BT) "[Wi-Fi AP + BT classic], " \
+ __stringify(DEV_OPMODE_AP_BT_DUAL) "[Wi-Fi AP + BT classic + BT LE]"
+
#define FLASH_WRITE_CHUNK_SIZE (4 * 1024)
#define FLASH_SECTOR_SIZE (4 * 1024)
diff --git a/drivers/net/wireless/rsi/rsi_main.h b/drivers/net/wireless/rsi/rsi_main.h
index 0f535850a383..dcf8fb40698b 100644
--- a/drivers/net/wireless/rsi/rsi_main.h
+++ b/drivers/net/wireless/rsi/rsi_main.h
@@ -61,6 +61,7 @@ enum RSI_FSM_STATES {
extern u32 rsi_zone_enabled;
extern __printf(2, 3) void rsi_dbg(u32 zone, const char *fmt, ...);
+#define RSI_MAX_BANDS 2
#define RSI_MAX_VIFS 3
#define NUM_EDCA_QUEUES 4
#define IEEE80211_ADDR_LEN 6
@@ -139,6 +140,7 @@ struct skb_info {
u8 internal_hdr_size;
struct ieee80211_vif *vif;
u8 vap_id;
+ bool have_key;
};
enum edca_queue {
@@ -229,6 +231,12 @@ struct rsi_9116_features {
u32 ps_options;
};
+struct rsi_rate_config {
+ u32 configured_mask; /* configured by mac80211 bits 0-11=legacy 12+ mcs */
+ u16 fixed_hw_rate;
+ bool fixed_enabled;
+};
+
struct rsi_common {
struct rsi_hw *priv;
struct vif_priv vif_info[RSI_MAX_VIFS];
@@ -254,8 +262,8 @@ struct rsi_common {
u8 channel_width;
u16 rts_threshold;
- u16 bitrate_mask[2];
- u32 fixedrate_mask[2];
+ u32 bitrate_mask[RSI_MAX_BANDS];
+ struct rsi_rate_config rate_config[RSI_MAX_BANDS];
u8 rf_reset;
struct transmit_q_stats tx_stats;
@@ -276,7 +284,6 @@ struct rsi_common {
u8 mac_id;
u8 radio_id;
u16 rate_pwr[20];
- u16 min_rate;
/* WMM algo related */
u8 selected_qnum;
@@ -320,6 +327,7 @@ struct rsi_common {
struct ieee80211_vif *roc_vif;
bool eapol4_confirm;
+ bool bt_defer_attach;
void *bt_adapter;
struct cfg80211_scan_request *hwscan;
@@ -401,5 +409,6 @@ struct rsi_host_intf_ops {
enum rsi_host_intf rsi_get_host_intf(void *priv);
void rsi_set_bt_context(void *priv, void *bt_context);
+void rsi_attach_bt(struct rsi_common *common);
#endif
diff --git a/drivers/net/wireless/st/cw1200/bh.c b/drivers/net/wireless/st/cw1200/bh.c
index 8bade5d89f12..10e019cddcc6 100644
--- a/drivers/net/wireless/st/cw1200/bh.c
+++ b/drivers/net/wireless/st/cw1200/bh.c
@@ -85,8 +85,6 @@ void cw1200_unregister_bh(struct cw1200_common *priv)
atomic_inc(&priv->bh_term);
wake_up(&priv->bh_wq);
- flush_workqueue(priv->bh_workqueue);
-
destroy_workqueue(priv->bh_workqueue);
priv->bh_workqueue = NULL;
diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c
index f26fc150ecd0..354a7e1c3315 100644
--- a/drivers/net/wireless/ti/wlcore/spi.c
+++ b/drivers/net/wireless/ti/wlcore/spi.c
@@ -488,12 +488,9 @@ static int wl1271_probe(struct spi_device *spi)
spi->bits_per_word = 32;
glue->reg = devm_regulator_get(&spi->dev, "vwlan");
- if (PTR_ERR(glue->reg) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- if (IS_ERR(glue->reg)) {
- dev_err(glue->dev, "can't get regulator\n");
- return PTR_ERR(glue->reg);
- }
+ if (IS_ERR(glue->reg))
+ return dev_err_probe(glue->dev, PTR_ERR(glue->reg),
+ "can't get regulator\n");
ret = wlcore_probe_of(spi, glue, pdev_data);
if (ret) {
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index 672f5d5f3f2c..dad38fc04243 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -1945,8 +1945,7 @@ static int wl3501_config(struct pcmcia_device *link)
goto failed;
}
- for (i = 0; i < 6; i++)
- dev->dev_addr[i] = ((char *)&this->mac_addr)[i];
+ eth_hw_addr_set(dev, this->mac_addr);
/* print probe information */
printk(KERN_INFO "%s: wl3501 @ 0x%3.3x, IRQ %d, "
diff --git a/drivers/net/wireless/zydas/zd1201.c b/drivers/net/wireless/zydas/zd1201.c
index 097805b55c59..e64e4e579518 100644
--- a/drivers/net/wireless/zydas/zd1201.c
+++ b/drivers/net/wireless/zydas/zd1201.c
@@ -507,7 +507,7 @@ static int zd1201_getconfig(struct zd1201 *zd, int rid, void *riddata,
* byte data[12]
* total: 16
*/
-static int zd1201_setconfig(struct zd1201 *zd, int rid, void *buf, int len, int wait)
+static int zd1201_setconfig(struct zd1201 *zd, int rid, const void *buf, int len, int wait)
{
int err;
unsigned char *request;
@@ -857,7 +857,7 @@ static int zd1201_set_mac_address(struct net_device *dev, void *p)
addr->sa_data, dev->addr_len, 1);
if (err)
return err;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, addr->sa_data);
return zd1201_mac_reset(zd);
}
@@ -1729,6 +1729,7 @@ static int zd1201_probe(struct usb_interface *interface,
int err;
short porttype;
char buf[IW_ESSID_MAX_SIZE+2];
+ u8 addr[ETH_ALEN];
usb = interface_to_usbdev(interface);
@@ -1779,10 +1780,10 @@ static int zd1201_probe(struct usb_interface *interface,
dev->watchdog_timeo = ZD1201_TX_TIMEOUT;
strcpy(dev->name, "wlan%d");
- err = zd1201_getconfig(zd, ZD1201_RID_CNFOWNMACADDR,
- dev->dev_addr, dev->addr_len);
+ err = zd1201_getconfig(zd, ZD1201_RID_CNFOWNMACADDR, addr, ETH_ALEN);
if (err)
goto err_start;
+ eth_hw_addr_set(dev, addr);
/* Set wildcard essid to match zd->essid */
*(__le16 *)buf = cpu_to_le16(0);
diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
index a7ceef10bf6a..850c26bc9524 100644
--- a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c
@@ -65,7 +65,6 @@ static const struct usb_device_id usb_ids[] = {
{ USB_DEVICE(0x0586, 0x3412), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x0586, 0x3413), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x079b, 0x0062), .driver_info = DEVICE_ZD1211B },
- { USB_DEVICE(0x07b8, 0x6001), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x07fa, 0x1196), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x083a, 0x4505), .driver_info = DEVICE_ZD1211B },
{ USB_DEVICE(0x083a, 0xe501), .driver_info = DEVICE_ZD1211B },
diff --git a/drivers/net/wwan/Kconfig b/drivers/net/wwan/Kconfig
index 77dbfc418bce..17543be14665 100644
--- a/drivers/net/wwan/Kconfig
+++ b/drivers/net/wwan/Kconfig
@@ -71,6 +71,7 @@ config RPMSG_WWAN_CTRL
config IOSM
tristate "IOSM Driver for Intel M.2 WWAN Device"
depends on INTEL_IOMMU
+ select NET_DEVLINK
help
This driver enables Intel M.2 WWAN Device communication.
diff --git a/drivers/net/wwan/iosm/Makefile b/drivers/net/wwan/iosm/Makefile
index 4f9f0ae398e1..b838034bb120 100644
--- a/drivers/net/wwan/iosm/Makefile
+++ b/drivers/net/wwan/iosm/Makefile
@@ -18,6 +18,9 @@ iosm-y = \
iosm_ipc_protocol.o \
iosm_ipc_protocol_ops.o \
iosm_ipc_mux.o \
- iosm_ipc_mux_codec.o
+ iosm_ipc_mux_codec.o \
+ iosm_ipc_devlink.o \
+ iosm_ipc_flash.o \
+ iosm_ipc_coredump.o
obj-$(CONFIG_IOSM) := iosm.o
diff --git a/drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.c b/drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.c
index 519361ec40df..128c999e08bb 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.c
@@ -8,7 +8,7 @@
#include "iosm_ipc_chnl_cfg.h"
/* Max. sizes of a downlink buffers */
-#define IPC_MEM_MAX_DL_FLASH_BUF_SIZE (16 * 1024)
+#define IPC_MEM_MAX_DL_FLASH_BUF_SIZE (64 * 1024)
#define IPC_MEM_MAX_DL_LOOPBACK_SIZE (1 * 1024 * 1024)
#define IPC_MEM_MAX_DL_AT_BUF_SIZE 2048
#define IPC_MEM_MAX_DL_RPC_BUF_SIZE (32 * 1024)
@@ -60,6 +60,10 @@ static struct ipc_chnl_cfg modem_cfg[] = {
{ IPC_MEM_CTRL_CHL_ID_6, IPC_MEM_PIPE_12, IPC_MEM_PIPE_13,
IPC_MEM_MAX_TDS_MBIM, IPC_MEM_MAX_TDS_MBIM,
IPC_MEM_MAX_DL_MBIM_BUF_SIZE, WWAN_PORT_MBIM },
+ /* Flash Channel/Coredump Channel */
+ { IPC_MEM_CTRL_CHL_ID_7, IPC_MEM_PIPE_0, IPC_MEM_PIPE_1,
+ IPC_MEM_MAX_TDS_FLASH_UL, IPC_MEM_MAX_TDS_FLASH_DL,
+ IPC_MEM_MAX_DL_FLASH_BUF_SIZE, WWAN_PORT_UNKNOWN },
};
int ipc_chnl_cfg_get(struct ipc_chnl_cfg *chnl_cfg, int index)
diff --git a/drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.h b/drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.h
index 422471367f78..e77084e76718 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.h
+++ b/drivers/net/wwan/iosm/iosm_ipc_chnl_cfg.h
@@ -23,6 +23,7 @@ enum ipc_channel_id {
IPC_MEM_CTRL_CHL_ID_4,
IPC_MEM_CTRL_CHL_ID_5,
IPC_MEM_CTRL_CHL_ID_6,
+ IPC_MEM_CTRL_CHL_ID_7,
};
/**
diff --git a/drivers/net/wwan/iosm/iosm_ipc_coredump.c b/drivers/net/wwan/iosm/iosm_ipc_coredump.c
new file mode 100644
index 000000000000..9acd87724c9d
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_coredump.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-2021 Intel Corporation.
+ */
+
+#include "iosm_ipc_coredump.h"
+
+/**
+ * ipc_coredump_collect - To collect coredump
+ * @devlink: Pointer to devlink instance.
+ * @data: Pointer to snapshot
+ * @entry: ID of requested snapshot
+ * @region_size: Region size
+ *
+ * Returns: 0 on success, error on failure
+ */
+int ipc_coredump_collect(struct iosm_devlink *devlink, u8 **data, int entry,
+ u32 region_size)
+{
+ int ret, bytes_to_read, bytes_read = 0, i = 0;
+ s32 remaining;
+ u8 *data_ptr;
+
+ data_ptr = vmalloc(region_size);
+ if (!data_ptr)
+ return -ENOMEM;
+
+ remaining = devlink->cd_file_info[entry].actual_size;
+ ret = ipc_devlink_send_cmd(devlink, rpsi_cmd_coredump_get, entry);
+ if (ret) {
+ dev_err(devlink->dev, "Send coredump_get cmd failed");
+ goto get_cd_fail;
+ }
+ while (remaining > 0) {
+ bytes_to_read = min(remaining, MAX_DATA_SIZE);
+ bytes_read = 0;
+ ret = ipc_imem_sys_devlink_read(devlink, data_ptr + i,
+ bytes_to_read, &bytes_read);
+ if (ret) {
+ dev_err(devlink->dev, "CD data read failed");
+ goto get_cd_fail;
+ }
+ remaining -= bytes_read;
+ i += bytes_read;
+ }
+
+ *data = data_ptr;
+
+ return 0;
+
+get_cd_fail:
+ vfree(data_ptr);
+ return ret;
+}
+
+/**
+ * ipc_coredump_get_list - Get coredump list from modem
+ * @devlink: Pointer to devlink instance.
+ * @cmd: RPSI command to be sent
+ *
+ * Returns: 0 on success, error on failure
+ */
+int ipc_coredump_get_list(struct iosm_devlink *devlink, u16 cmd)
+{
+ u32 byte_read, num_entries, file_size;
+ struct iosm_cd_table *cd_table;
+ u8 size[MAX_SIZE_LEN], i;
+ char *filename;
+ int ret;
+
+ cd_table = kzalloc(MAX_CD_LIST_SIZE, GFP_KERNEL);
+ if (!cd_table) {
+ ret = -ENOMEM;
+ goto cd_init_fail;
+ }
+
+ ret = ipc_devlink_send_cmd(devlink, cmd, MAX_CD_LIST_SIZE);
+ if (ret) {
+ dev_err(devlink->dev, "rpsi_cmd_coredump_start failed");
+ goto cd_init_fail;
+ }
+
+ ret = ipc_imem_sys_devlink_read(devlink, (u8 *)cd_table,
+ MAX_CD_LIST_SIZE, &byte_read);
+ if (ret) {
+ dev_err(devlink->dev, "Coredump data is invalid");
+ goto cd_init_fail;
+ }
+
+ if (byte_read != MAX_CD_LIST_SIZE)
+ goto cd_init_fail;
+
+ if (cmd == rpsi_cmd_coredump_start) {
+ num_entries = le32_to_cpu(cd_table->list.num_entries);
+ if (num_entries == 0 || num_entries > IOSM_NOF_CD_REGION) {
+ ret = -EINVAL;
+ goto cd_init_fail;
+ }
+
+ for (i = 0; i < num_entries; i++) {
+ file_size = le32_to_cpu(cd_table->list.entry[i].size);
+ filename = cd_table->list.entry[i].filename;
+
+ if (file_size > devlink->cd_file_info[i].default_size) {
+ ret = -EINVAL;
+ goto cd_init_fail;
+ }
+
+ devlink->cd_file_info[i].actual_size = file_size;
+ dev_dbg(devlink->dev, "file: %s actual size %d",
+ filename, file_size);
+ devlink_flash_update_status_notify(devlink->devlink_ctx,
+ filename,
+ "FILENAME", 0, 0);
+ snprintf(size, sizeof(size), "%d", file_size);
+ devlink_flash_update_status_notify(devlink->devlink_ctx,
+ size, "FILE SIZE",
+ 0, 0);
+ }
+ }
+
+cd_init_fail:
+ kfree(cd_table);
+ return ret;
+}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_coredump.h b/drivers/net/wwan/iosm/iosm_ipc_coredump.h
new file mode 100644
index 000000000000..0809ba664276
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_coredump.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (C) 2020-2021 Intel Corporation.
+ */
+
+#ifndef _IOSM_IPC_COREDUMP_H_
+#define _IOSM_IPC_COREDUMP_H_
+
+#include "iosm_ipc_devlink.h"
+
+/* Max number of bytes to receive for Coredump list structure */
+#define MAX_CD_LIST_SIZE 0x1000
+
+/* Max buffer allocated to receive coredump data */
+#define MAX_DATA_SIZE 0x00010000
+
+/* Max number of file entries */
+#define MAX_NOF_ENTRY 256
+
+/* Max length */
+#define MAX_SIZE_LEN 32
+
+/**
+ * struct iosm_cd_list_entry - Structure to hold coredump file info.
+ * @size: Number of bytes for the entry
+ * @filename: Coredump filename to be generated on host
+ */
+struct iosm_cd_list_entry {
+ __le32 size;
+ char filename[IOSM_MAX_FILENAME_LEN];
+} __packed;
+
+/**
+ * struct iosm_cd_list - Structure to hold list of coredump files
+ * to be collected.
+ * @num_entries: Number of entries to be received
+ * @entry: Contains File info
+ */
+struct iosm_cd_list {
+ __le32 num_entries;
+ struct iosm_cd_list_entry entry[MAX_NOF_ENTRY];
+} __packed;
+
+/**
+ * struct iosm_cd_table - Common Coredump table
+ * @version: Version of coredump structure
+ * @list: Coredump list structure
+ */
+struct iosm_cd_table {
+ __le32 version;
+ struct iosm_cd_list list;
+} __packed;
+
+int ipc_coredump_collect(struct iosm_devlink *devlink, u8 **data, int entry,
+ u32 region_size);
+
+int ipc_coredump_get_list(struct iosm_devlink *devlink, u16 cmd);
+
+#endif /* _IOSM_IPC_COREDUMP_H_ */
diff --git a/drivers/net/wwan/iosm/iosm_ipc_devlink.c b/drivers/net/wwan/iosm/iosm_ipc_devlink.c
new file mode 100644
index 000000000000..17da85a8f337
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_devlink.c
@@ -0,0 +1,321 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-2021 Intel Corporation.
+ */
+
+#include "iosm_ipc_chnl_cfg.h"
+#include "iosm_ipc_coredump.h"
+#include "iosm_ipc_devlink.h"
+#include "iosm_ipc_flash.h"
+
+/* Coredump list */
+static struct iosm_coredump_file_info list[IOSM_NOF_CD_REGION] = {
+ {"report.json", REPORT_JSON_SIZE,},
+ {"coredump.fcd", COREDUMP_FCD_SIZE,},
+ {"cdd.log", CDD_LOG_SIZE,},
+ {"eeprom.bin", EEPROM_BIN_SIZE,},
+ {"bootcore_trace.bin", BOOTCORE_TRC_BIN_SIZE,},
+ {"bootcore_prev_trace.bin", BOOTCORE_PREV_TRC_BIN_SIZE,},
+};
+
+/* Get the param values for the specific param ID's */
+static int ipc_devlink_get_param(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct iosm_devlink *ipc_devlink = devlink_priv(dl);
+
+ if (id == IOSM_DEVLINK_PARAM_ID_ERASE_FULL_FLASH)
+ ctx->val.vu8 = ipc_devlink->param.erase_full_flash;
+
+ return 0;
+}
+
+/* Set the param values for the specific param ID's */
+static int ipc_devlink_set_param(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct iosm_devlink *ipc_devlink = devlink_priv(dl);
+
+ if (id == IOSM_DEVLINK_PARAM_ID_ERASE_FULL_FLASH)
+ ipc_devlink->param.erase_full_flash = ctx->val.vu8;
+
+ return 0;
+}
+
+/* Devlink param structure array */
+static const struct devlink_param iosm_devlink_params[] = {
+ DEVLINK_PARAM_DRIVER(IOSM_DEVLINK_PARAM_ID_ERASE_FULL_FLASH,
+ "erase_full_flash", DEVLINK_PARAM_TYPE_BOOL,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ ipc_devlink_get_param, ipc_devlink_set_param,
+ NULL),
+};
+
+/* Get devlink flash component type */
+static enum iosm_flash_comp_type
+ipc_devlink_get_flash_comp_type(const char comp_str[], u32 len)
+{
+ enum iosm_flash_comp_type fls_type;
+
+ if (!strncmp("PSI", comp_str, len))
+ fls_type = FLASH_COMP_TYPE_PSI;
+ else if (!strncmp("EBL", comp_str, len))
+ fls_type = FLASH_COMP_TYPE_EBL;
+ else if (!strncmp("FLS", comp_str, len))
+ fls_type = FLASH_COMP_TYPE_FLS;
+ else
+ fls_type = FLASH_COMP_TYPE_INVAL;
+
+ return fls_type;
+}
+
+/* Function triggered on devlink flash command
+ * Flash update function which calls multiple functions based on
+ * component type specified in the flash command
+ */
+static int ipc_devlink_flash_update(struct devlink *devlink,
+ struct devlink_flash_update_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct iosm_devlink *ipc_devlink = devlink_priv(devlink);
+ enum iosm_flash_comp_type fls_type;
+ struct iosm_devlink_image *header;
+ int rc = -EINVAL;
+ u8 *mdm_rsp;
+
+ header = (struct iosm_devlink_image *)params->fw->data;
+
+ if (!header || params->fw->size <= IOSM_DEVLINK_HDR_SIZE ||
+ (memcmp(header->magic_header, IOSM_DEVLINK_MAGIC_HEADER,
+ IOSM_DEVLINK_MAGIC_HEADER_LEN) != 0))
+ return -EINVAL;
+
+ mdm_rsp = kzalloc(IOSM_EBL_DW_PACK_SIZE, GFP_KERNEL);
+ if (!mdm_rsp)
+ return -ENOMEM;
+
+ fls_type = ipc_devlink_get_flash_comp_type(header->image_type,
+ IOSM_DEVLINK_MAX_IMG_LEN);
+
+ switch (fls_type) {
+ case FLASH_COMP_TYPE_PSI:
+ rc = ipc_flash_boot_psi(ipc_devlink, params->fw);
+ break;
+ case FLASH_COMP_TYPE_EBL:
+ rc = ipc_flash_boot_ebl(ipc_devlink, params->fw);
+ if (rc)
+ break;
+ rc = ipc_flash_boot_set_capabilities(ipc_devlink, mdm_rsp);
+ if (rc)
+ break;
+ rc = ipc_flash_read_swid(ipc_devlink, mdm_rsp);
+ break;
+ case FLASH_COMP_TYPE_FLS:
+ rc = ipc_flash_send_fls(ipc_devlink, params->fw, mdm_rsp);
+ break;
+ default:
+ devlink_flash_update_status_notify(devlink, "Invalid component",
+ NULL, 0, 0);
+ break;
+ }
+
+ if (!rc)
+ devlink_flash_update_status_notify(devlink, "Flashing success",
+ header->image_type, 0, 0);
+ else
+ devlink_flash_update_status_notify(devlink, "Flashing failed",
+ header->image_type, 0, 0);
+
+ kfree(mdm_rsp);
+ return rc;
+}
+
+/* Call back function for devlink ops */
+static const struct devlink_ops devlink_flash_ops = {
+ .flash_update = ipc_devlink_flash_update,
+};
+
+/**
+ * ipc_devlink_send_cmd - Send command to Modem
+ * @ipc_devlink: Pointer to struct iosm_devlink
+ * @cmd: Command to be sent to modem
+ * @entry: Command entry number
+ *
+ * Returns: 0 on success and failure value on error
+ */
+int ipc_devlink_send_cmd(struct iosm_devlink *ipc_devlink, u16 cmd, u32 entry)
+{
+ struct iosm_rpsi_cmd rpsi_cmd;
+
+ rpsi_cmd.param.dword = cpu_to_le32(entry);
+ rpsi_cmd.cmd = cpu_to_le16(cmd);
+ rpsi_cmd.crc = rpsi_cmd.param.word[0] ^ rpsi_cmd.param.word[1] ^
+ rpsi_cmd.cmd;
+
+ return ipc_imem_sys_devlink_write(ipc_devlink, (u8 *)&rpsi_cmd,
+ sizeof(rpsi_cmd));
+}
+
+/* Function to create snapshot */
+static int ipc_devlink_coredump_snapshot(struct devlink *dl,
+ const struct devlink_region_ops *ops,
+ struct netlink_ext_ack *extack,
+ u8 **data)
+{
+ struct iosm_devlink *ipc_devlink = devlink_priv(dl);
+ struct iosm_coredump_file_info *cd_list = ops->priv;
+ u32 region_size;
+ int rc;
+
+ dev_dbg(ipc_devlink->dev, "Region:%s, ID:%d", ops->name,
+ cd_list->entry);
+ region_size = cd_list->default_size;
+ rc = ipc_coredump_collect(ipc_devlink, data, cd_list->entry,
+ region_size);
+ if (rc) {
+ dev_err(ipc_devlink->dev, "Fail to create snapshot,err %d", rc);
+ goto coredump_collect_err;
+ }
+
+ /* Send coredump end cmd indicating end of coredump collection */
+ if (cd_list->entry == (IOSM_NOF_CD_REGION - 1))
+ ipc_coredump_get_list(ipc_devlink, rpsi_cmd_coredump_end);
+
+ return 0;
+
+coredump_collect_err:
+ ipc_coredump_get_list(ipc_devlink, rpsi_cmd_coredump_end);
+ return rc;
+}
+
+/* To create regions for coredump files */
+static int ipc_devlink_create_region(struct iosm_devlink *devlink)
+{
+ struct devlink_region_ops *mdm_coredump;
+ int rc = 0;
+ int i;
+
+ mdm_coredump = devlink->iosm_devlink_mdm_coredump;
+ for (i = 0; i < IOSM_NOF_CD_REGION; i++) {
+ mdm_coredump[i].name = list[i].filename;
+ mdm_coredump[i].snapshot = ipc_devlink_coredump_snapshot;
+ mdm_coredump[i].destructor = vfree;
+ devlink->cd_regions[i] =
+ devlink_region_create(devlink->devlink_ctx,
+ &mdm_coredump[i], MAX_SNAPSHOTS,
+ list[i].default_size);
+
+ if (IS_ERR(devlink->cd_regions[i])) {
+ rc = PTR_ERR(devlink->cd_regions[i]);
+ dev_err(devlink->dev, "Devlink region fail,err %d", rc);
+ /* Delete previously created regions */
+ for ( ; i >= 0; i--)
+ devlink_region_destroy(devlink->cd_regions[i]);
+ goto region_create_fail;
+ }
+ list[i].entry = i;
+ mdm_coredump[i].priv = list + i;
+ }
+region_create_fail:
+ return rc;
+}
+
+/* To Destroy devlink regions */
+static void ipc_devlink_destroy_region(struct iosm_devlink *ipc_devlink)
+{
+ u8 i;
+
+ for (i = 0; i < IOSM_NOF_CD_REGION; i++)
+ devlink_region_destroy(ipc_devlink->cd_regions[i]);
+}
+
+/**
+ * ipc_devlink_init - Initialize/register devlink to IOSM driver
+ * @ipc_imem: Pointer to struct iosm_imem
+ *
+ * Returns: Pointer to iosm_devlink on success and NULL on failure
+ */
+struct iosm_devlink *ipc_devlink_init(struct iosm_imem *ipc_imem)
+{
+ struct ipc_chnl_cfg chnl_cfg_flash = { 0 };
+ struct iosm_devlink *ipc_devlink;
+ struct devlink *devlink_ctx;
+ int rc;
+
+ devlink_ctx = devlink_alloc(&devlink_flash_ops,
+ sizeof(struct iosm_devlink),
+ ipc_imem->dev);
+ if (!devlink_ctx) {
+ dev_err(ipc_imem->dev, "devlink_alloc failed");
+ goto devlink_alloc_fail;
+ }
+
+ ipc_devlink = devlink_priv(devlink_ctx);
+ ipc_devlink->devlink_ctx = devlink_ctx;
+ ipc_devlink->pcie = ipc_imem->pcie;
+ ipc_devlink->dev = ipc_imem->dev;
+
+ rc = devlink_params_register(devlink_ctx, iosm_devlink_params,
+ ARRAY_SIZE(iosm_devlink_params));
+ if (rc) {
+ dev_err(ipc_devlink->dev,
+ "devlink_params_register failed. rc %d", rc);
+ goto param_reg_fail;
+ }
+
+ ipc_devlink->cd_file_info = list;
+
+ rc = ipc_devlink_create_region(ipc_devlink);
+ if (rc) {
+ dev_err(ipc_devlink->dev, "Devlink Region create failed, rc %d",
+ rc);
+ goto region_create_fail;
+ }
+
+ if (ipc_chnl_cfg_get(&chnl_cfg_flash, IPC_MEM_CTRL_CHL_ID_7) < 0)
+ goto chnl_get_fail;
+
+ ipc_imem_channel_init(ipc_imem, IPC_CTYPE_CTRL,
+ chnl_cfg_flash, IRQ_MOD_OFF);
+
+ init_completion(&ipc_devlink->devlink_sio.read_sem);
+ skb_queue_head_init(&ipc_devlink->devlink_sio.rx_list);
+
+ devlink_register(devlink_ctx);
+ dev_dbg(ipc_devlink->dev, "iosm devlink register success");
+
+ return ipc_devlink;
+
+chnl_get_fail:
+ ipc_devlink_destroy_region(ipc_devlink);
+region_create_fail:
+ devlink_params_unregister(devlink_ctx, iosm_devlink_params,
+ ARRAY_SIZE(iosm_devlink_params));
+param_reg_fail:
+ devlink_free(devlink_ctx);
+devlink_alloc_fail:
+ return NULL;
+}
+
+/**
+ * ipc_devlink_deinit - To unintialize the devlink from IOSM driver.
+ * @ipc_devlink: Devlink instance
+ */
+void ipc_devlink_deinit(struct iosm_devlink *ipc_devlink)
+{
+ struct devlink *devlink_ctx = ipc_devlink->devlink_ctx;
+
+ devlink_unregister(devlink_ctx);
+ ipc_devlink_destroy_region(ipc_devlink);
+ devlink_params_unregister(devlink_ctx, iosm_devlink_params,
+ ARRAY_SIZE(iosm_devlink_params));
+ if (ipc_devlink->devlink_sio.devlink_read_pend) {
+ complete(&ipc_devlink->devlink_sio.read_sem);
+ complete(&ipc_devlink->devlink_sio.channel->ul_sem);
+ }
+ if (!ipc_devlink->devlink_sio.devlink_read_pend)
+ skb_queue_purge(&ipc_devlink->devlink_sio.rx_list);
+
+ ipc_imem_sys_devlink_close(ipc_devlink);
+ devlink_free(devlink_ctx);
+}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_devlink.h b/drivers/net/wwan/iosm/iosm_ipc_devlink.h
new file mode 100644
index 000000000000..35c2d013b9cc
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_devlink.h
@@ -0,0 +1,205 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (C) 2020-2021 Intel Corporation.
+ */
+
+#ifndef _IOSM_IPC_DEVLINK_H_
+#define _IOSM_IPC_DEVLINK_H_
+
+#include <net/devlink.h>
+
+#include "iosm_ipc_imem.h"
+#include "iosm_ipc_imem_ops.h"
+#include "iosm_ipc_pcie.h"
+
+/* Image ext max len */
+#define IOSM_DEVLINK_MAX_IMG_LEN 3
+/* Magic Header */
+#define IOSM_DEVLINK_MAGIC_HEADER "IOSM_DEVLINK_HEADER"
+/* Magic Header len */
+#define IOSM_DEVLINK_MAGIC_HEADER_LEN 20
+/* Devlink image type */
+#define IOSM_DEVLINK_IMG_TYPE 4
+/* Reserve header size */
+#define IOSM_DEVLINK_RESERVED 34
+/* Devlink Image Header size */
+#define IOSM_DEVLINK_HDR_SIZE sizeof(struct iosm_devlink_image)
+/* MAX file name length */
+#define IOSM_MAX_FILENAME_LEN 32
+/* EBL response size */
+#define IOSM_EBL_RSP_SIZE 76
+/* MAX number of regions supported */
+#define IOSM_NOF_CD_REGION 6
+/* MAX number of SNAPSHOTS supported */
+#define MAX_SNAPSHOTS 1
+/* Default Coredump file size */
+#define REPORT_JSON_SIZE 0x800
+#define COREDUMP_FCD_SIZE 0x10E00000
+#define CDD_LOG_SIZE 0x30000
+#define EEPROM_BIN_SIZE 0x10000
+#define BOOTCORE_TRC_BIN_SIZE 0x8000
+#define BOOTCORE_PREV_TRC_BIN_SIZE 0x20000
+
+/**
+ * enum iosm_devlink_param_id - Enum type to different devlink params
+ * @IOSM_DEVLINK_PARAM_ID_BASE: Devlink param base ID
+ * @IOSM_DEVLINK_PARAM_ID_ERASE_FULL_FLASH: Set if full erase required
+ */
+
+enum iosm_devlink_param_id {
+ IOSM_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+ IOSM_DEVLINK_PARAM_ID_ERASE_FULL_FLASH,
+};
+
+/**
+ * enum iosm_rpsi_cmd_code - Enum type for RPSI command list
+ * @rpsi_cmd_code_ebl: Command to load ebl
+ * @rpsi_cmd_coredump_start: Command to get list of files and
+ * file size info from PSI
+ * @rpsi_cmd_coredump_get: Command to get the coredump data
+ * @rpsi_cmd_coredump_end: Command to stop receiving the coredump
+ */
+enum iosm_rpsi_cmd_code {
+ rpsi_cmd_code_ebl = 0x02,
+ rpsi_cmd_coredump_start = 0x10,
+ rpsi_cmd_coredump_get = 0x11,
+ rpsi_cmd_coredump_end = 0x12,
+};
+
+/**
+ * enum iosm_flash_comp_type - Enum for different flash component types
+ * @FLASH_COMP_TYPE_PSI: PSI flash comp type
+ * @FLASH_COMP_TYPE_EBL: EBL flash comp type
+ * @FLASH_COMP_TYPE_FLS: FLS flash comp type
+ * @FLASH_COMP_TYPE_INVAL: Invalid flash comp type
+ */
+enum iosm_flash_comp_type {
+ FLASH_COMP_TYPE_PSI,
+ FLASH_COMP_TYPE_EBL,
+ FLASH_COMP_TYPE_FLS,
+ FLASH_COMP_TYPE_INVAL,
+};
+
+/**
+ * struct iosm_devlink_sio - SIO instance
+ * @rx_list: Downlink skbuf list received from CP
+ * @read_sem: Needed for the blocking read or downlink transfer
+ * @channel_id: Reserved channel id for flashing/CD collection to RAM
+ * @channel: Channel instance for flashing and coredump
+ * @devlink_read_pend: Check if read is pending
+ */
+struct iosm_devlink_sio {
+ struct sk_buff_head rx_list;
+ struct completion read_sem;
+ int channel_id;
+ struct ipc_mem_channel *channel;
+ u32 devlink_read_pend;
+};
+
+/**
+ * struct iosm_flash_params - List of flash params required for flashing
+ * @erase_full_flash: To set the flashing mode
+ * erase_full_flash = 1; full erase
+ * erase_full_flash = 0; no erase
+ * @erase_full_flash_done: Flag to check if it is a full erase
+ */
+struct iosm_flash_params {
+ u8 erase_full_flash;
+ u8 erase_full_flash_done;
+};
+
+/**
+ * struct iosm_devlink_image - Structure with Fls file header info
+ * @magic_header: Header of the firmware image
+ * @image_type: Firmware image type
+ * @region_address: Address of the region to be flashed
+ * @download_region: Field to identify if it is a region
+ * @last_region: Field to identify if it is last region
+ * @reserved: Reserved field
+ */
+struct iosm_devlink_image {
+ char magic_header[IOSM_DEVLINK_MAGIC_HEADER_LEN];
+ char image_type[IOSM_DEVLINK_IMG_TYPE];
+ __le32 region_address;
+ u8 download_region;
+ u8 last_region;
+ u8 reserved[IOSM_DEVLINK_RESERVED];
+} __packed;
+
+/**
+ * struct iosm_ebl_ctx_data - EBL ctx data used during flashing
+ * @ebl_sw_info_version: SWID version info obtained from EBL
+ * @m_ebl_resp: Buffer used to read and write the ebl data
+ */
+struct iosm_ebl_ctx_data {
+ u8 ebl_sw_info_version;
+ u8 m_ebl_resp[IOSM_EBL_RSP_SIZE];
+};
+
+/**
+ * struct iosm_coredump_file_info - Coredump file info
+ * @filename: Name of coredump file
+ * @default_size: Default size of coredump file
+ * @actual_size: Actual size of coredump file
+ * @entry: Index of the coredump file
+ */
+struct iosm_coredump_file_info {
+ char filename[IOSM_MAX_FILENAME_LEN];
+ u32 default_size;
+ u32 actual_size;
+ u32 entry;
+};
+
+/**
+ * struct iosm_devlink - IOSM Devlink structure
+ * @devlink_sio: SIO instance for read/write functionality
+ * @pcie: Pointer to PCIe component
+ * @dev: Pointer to device struct
+ * @devlink_ctx: Pointer to devlink context
+ * @param: Params required for flashing
+ * @ebl_ctx: Data to be read and written to Modem
+ * @cd_file_info: coredump file info
+ * @iosm_devlink_mdm_coredump: region ops for coredump collection
+ * @cd_regions: coredump regions
+ */
+struct iosm_devlink {
+ struct iosm_devlink_sio devlink_sio;
+ struct iosm_pcie *pcie;
+ struct device *dev;
+ struct devlink *devlink_ctx;
+ struct iosm_flash_params param;
+ struct iosm_ebl_ctx_data ebl_ctx;
+ struct iosm_coredump_file_info *cd_file_info;
+ struct devlink_region_ops iosm_devlink_mdm_coredump[IOSM_NOF_CD_REGION];
+ struct devlink_region *cd_regions[IOSM_NOF_CD_REGION];
+};
+
+/**
+ * union iosm_rpsi_param_u - RPSI cmd param for CRC calculation
+ * @word: Words member used in CRC calculation
+ * @dword: Actual data
+ */
+union iosm_rpsi_param_u {
+ __le16 word[2];
+ __le32 dword;
+};
+
+/**
+ * struct iosm_rpsi_cmd - Structure for RPSI Command
+ * @param: Used to calculate CRC
+ * @cmd: Stores the RPSI command
+ * @crc: Stores the CRC value
+ */
+struct iosm_rpsi_cmd {
+ union iosm_rpsi_param_u param;
+ __le16 cmd;
+ __le16 crc;
+};
+
+struct iosm_devlink *ipc_devlink_init(struct iosm_imem *ipc_imem);
+
+void ipc_devlink_deinit(struct iosm_devlink *ipc_devlink);
+
+int ipc_devlink_send_cmd(struct iosm_devlink *ipc_devlink, u16 cmd, u32 entry);
+
+#endif /* _IOSM_IPC_DEVLINK_H */
diff --git a/drivers/net/wwan/iosm/iosm_ipc_flash.c b/drivers/net/wwan/iosm/iosm_ipc_flash.c
new file mode 100644
index 000000000000..d890914aa349
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_flash.c
@@ -0,0 +1,594 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020-2021 Intel Corporation.
+ */
+
+#include "iosm_ipc_coredump.h"
+#include "iosm_ipc_devlink.h"
+#include "iosm_ipc_flash.h"
+
+/* This function will pack the data to be sent to the modem using the
+ * payload, payload length and pack id
+ */
+static int ipc_flash_proc_format_ebl_pack(struct iosm_flash_data *flash_req,
+ u32 pack_length, u16 pack_id,
+ u8 *payload, u32 payload_length)
+{
+ u16 checksum = pack_id;
+ u32 i;
+
+ if (payload_length + IOSM_EBL_HEAD_SIZE > pack_length)
+ return -EINVAL;
+
+ flash_req->pack_id = cpu_to_le16(pack_id);
+ flash_req->msg_length = cpu_to_le32(payload_length);
+ checksum += (payload_length >> IOSM_EBL_PAYL_SHIFT) +
+ (payload_length & IOSM_EBL_CKSM);
+
+ for (i = 0; i < payload_length; i++)
+ checksum += payload[i];
+
+ flash_req->checksum = cpu_to_le16(checksum);
+
+ return 0;
+}
+
+/* validate the response received from modem and
+ * check the type of errors received
+ */
+static int ipc_flash_proc_check_ebl_rsp(void *hdr_rsp, void *payload_rsp)
+{
+ struct iosm_ebl_error *err_info = payload_rsp;
+ u16 *rsp_code = hdr_rsp;
+ u32 i;
+
+ if (*rsp_code == IOSM_EBL_RSP_BUFF) {
+ for (i = 0; i < IOSM_MAX_ERRORS; i++) {
+ if (!err_info->error[i].error_code) {
+ pr_err("EBL: error_class = %d, error_code = %d",
+ err_info->error[i].error_class,
+ err_info->error[i].error_code);
+ }
+ }
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Send data to the modem */
+static int ipc_flash_send_data(struct iosm_devlink *ipc_devlink, u32 size,
+ u16 pack_id, u8 *payload, u32 payload_length)
+{
+ struct iosm_flash_data flash_req;
+ int ret;
+
+ ret = ipc_flash_proc_format_ebl_pack(&flash_req, size,
+ pack_id, payload, payload_length);
+ if (ret) {
+ dev_err(ipc_devlink->dev, "EBL2 pack failed for pack_id:%d",
+ pack_id);
+ goto ipc_free_payload;
+ }
+
+ ret = ipc_imem_sys_devlink_write(ipc_devlink, (u8 *)&flash_req,
+ IOSM_EBL_HEAD_SIZE);
+ if (ret) {
+ dev_err(ipc_devlink->dev, "EBL Header write failed for Id:%x",
+ pack_id);
+ goto ipc_free_payload;
+ }
+
+ ret = ipc_imem_sys_devlink_write(ipc_devlink, payload, payload_length);
+ if (ret) {
+ dev_err(ipc_devlink->dev, "EBL Payload write failed for Id:%x",
+ pack_id);
+ }
+
+ipc_free_payload:
+ return ret;
+}
+
+/**
+ * ipc_flash_link_establish - Flash link establishment
+ * @ipc_imem: Pointer to struct iosm_imem
+ *
+ * Returns: 0 on success and failure value on error
+ */
+int ipc_flash_link_establish(struct iosm_imem *ipc_imem)
+{
+ u8 ler_data[IOSM_LER_RSP_SIZE];
+ u32 bytes_read;
+
+ /* Allocate channel for flashing/cd collection */
+ ipc_imem->ipc_devlink->devlink_sio.channel =
+ ipc_imem_sys_devlink_open(ipc_imem);
+
+ if (!ipc_imem->ipc_devlink->devlink_sio.channel)
+ goto chl_open_fail;
+
+ if (ipc_imem_sys_devlink_read(ipc_imem->ipc_devlink, ler_data,
+ IOSM_LER_RSP_SIZE, &bytes_read))
+ goto devlink_read_fail;
+
+ if (bytes_read != IOSM_LER_RSP_SIZE)
+ goto devlink_read_fail;
+
+ return 0;
+
+devlink_read_fail:
+ ipc_imem_sys_devlink_close(ipc_imem->ipc_devlink);
+chl_open_fail:
+ return -EIO;
+}
+
+/* Receive data from the modem */
+static int ipc_flash_receive_data(struct iosm_devlink *ipc_devlink, u32 size,
+ u8 *mdm_rsp)
+{
+ u8 mdm_rsp_hdr[IOSM_EBL_HEAD_SIZE];
+ u32 bytes_read;
+ int ret;
+
+ ret = ipc_imem_sys_devlink_read(ipc_devlink, mdm_rsp_hdr,
+ IOSM_EBL_HEAD_SIZE, &bytes_read);
+ if (ret) {
+ dev_err(ipc_devlink->dev, "EBL rsp to read %d bytes failed",
+ IOSM_EBL_HEAD_SIZE);
+ goto ipc_flash_recv_err;
+ }
+
+ if (bytes_read != IOSM_EBL_HEAD_SIZE) {
+ ret = -EINVAL;
+ goto ipc_flash_recv_err;
+ }
+
+ ret = ipc_imem_sys_devlink_read(ipc_devlink, mdm_rsp, size,
+ &bytes_read);
+ if (ret) {
+ dev_err(ipc_devlink->dev, "EBL rsp to read %d bytes failed",
+ size);
+ goto ipc_flash_recv_err;
+ }
+
+ if (bytes_read != size) {
+ ret = -EINVAL;
+ goto ipc_flash_recv_err;
+ }
+
+ ret = ipc_flash_proc_check_ebl_rsp(mdm_rsp_hdr + 2, mdm_rsp);
+
+ipc_flash_recv_err:
+ return ret;
+}
+
+/* Function to send command to modem and receive response */
+static int ipc_flash_send_receive(struct iosm_devlink *ipc_devlink, u16 pack_id,
+ u8 *payload, u32 payload_length, u8 *mdm_rsp)
+{
+ size_t frame_len = IOSM_EBL_DW_PACK_SIZE;
+ int ret;
+
+ if (pack_id == FLASH_SET_PROT_CONF)
+ frame_len = IOSM_EBL_W_PACK_SIZE;
+
+ ret = ipc_flash_send_data(ipc_devlink, frame_len, pack_id, payload,
+ payload_length);
+ if (ret)
+ goto ipc_flash_send_rcv;
+
+ ret = ipc_flash_receive_data(ipc_devlink,
+ frame_len - IOSM_EBL_HEAD_SIZE, mdm_rsp);
+
+ipc_flash_send_rcv:
+ return ret;
+}
+
+/**
+ * ipc_flash_boot_set_capabilities - Set modem boot capabilities in flash
+ * @ipc_devlink: Pointer to devlink structure
+ * @mdm_rsp: Pointer to modem response buffer
+ *
+ * Returns: 0 on success and failure value on error
+ */
+int ipc_flash_boot_set_capabilities(struct iosm_devlink *ipc_devlink,
+ u8 *mdm_rsp)
+{
+ ipc_devlink->ebl_ctx.ebl_sw_info_version =
+ ipc_devlink->ebl_ctx.m_ebl_resp[EBL_RSP_SW_INFO_VER];
+ ipc_devlink->ebl_ctx.m_ebl_resp[EBL_SKIP_ERASE] = IOSM_CAP_NOT_ENHANCED;
+ ipc_devlink->ebl_ctx.m_ebl_resp[EBL_SKIP_CRC] = IOSM_CAP_NOT_ENHANCED;
+
+ if (ipc_devlink->ebl_ctx.m_ebl_resp[EBL_CAPS_FLAG] &
+ IOSM_CAP_USE_EXT_CAP) {
+ if (ipc_devlink->param.erase_full_flash)
+ ipc_devlink->ebl_ctx.m_ebl_resp[EBL_OOS_CONFIG] &=
+ ~((u8)IOSM_EXT_CAP_ERASE_ALL);
+ else
+ ipc_devlink->ebl_ctx.m_ebl_resp[EBL_OOS_CONFIG] &=
+ ~((u8)IOSM_EXT_CAP_COMMIT_ALL);
+ ipc_devlink->ebl_ctx.m_ebl_resp[EBL_EXT_CAPS_HANDLED] =
+ IOSM_CAP_USE_EXT_CAP;
+ }
+
+ /* Write back the EBL capability to modem
+ * Request Set Protcnf command
+ */
+ return ipc_flash_send_receive(ipc_devlink, FLASH_SET_PROT_CONF,
+ ipc_devlink->ebl_ctx.m_ebl_resp,
+ IOSM_EBL_RSP_SIZE, mdm_rsp);
+}
+
+/* Read the SWID type and SWID value from the EBL */
+int ipc_flash_read_swid(struct iosm_devlink *ipc_devlink, u8 *mdm_rsp)
+{
+ struct iosm_flash_msg_control cmd_msg;
+ struct iosm_swid_table *swid;
+ char ebl_swid[IOSM_SWID_STR];
+ int ret;
+
+ if (ipc_devlink->ebl_ctx.ebl_sw_info_version !=
+ IOSM_EXT_CAP_SWID_OOS_PACK)
+ return -EINVAL;
+
+ cmd_msg.action = cpu_to_le32(FLASH_OOSC_ACTION_READ);
+ cmd_msg.type = cpu_to_le32(FLASH_OOSC_TYPE_SWID_TABLE);
+ cmd_msg.length = cpu_to_le32(IOSM_MSG_LEN_ARG);
+ cmd_msg.arguments = cpu_to_le32(IOSM_MSG_LEN_ARG);
+
+ ret = ipc_flash_send_receive(ipc_devlink, FLASH_OOS_CONTROL,
+ (u8 *)&cmd_msg, IOSM_MDM_SEND_16, mdm_rsp);
+ if (ret)
+ goto ipc_swid_err;
+
+ cmd_msg.action = cpu_to_le32(*((u32 *)mdm_rsp));
+
+ ret = ipc_flash_send_receive(ipc_devlink, FLASH_OOS_DATA_READ,
+ (u8 *)&cmd_msg, IOSM_MDM_SEND_4, mdm_rsp);
+ if (ret)
+ goto ipc_swid_err;
+
+ swid = (struct iosm_swid_table *)mdm_rsp;
+ dev_dbg(ipc_devlink->dev, "SWID %x RF_ENGINE_ID %x", swid->sw_id_val,
+ swid->rf_engine_id_val);
+
+ snprintf(ebl_swid, sizeof(ebl_swid), "SWID: %x, RF_ENGINE_ID: %x",
+ swid->sw_id_val, swid->rf_engine_id_val);
+
+ devlink_flash_update_status_notify(ipc_devlink->devlink_ctx, ebl_swid,
+ NULL, 0, 0);
+ipc_swid_err:
+ return ret;
+}
+
+/* Function to check if full erase or conditional erase was successful */
+static int ipc_flash_erase_check(struct iosm_devlink *ipc_devlink, u8 *mdm_rsp)
+{
+ int ret, count = 0;
+ u16 mdm_rsp_data;
+
+ /* Request Flash Erase Check */
+ do {
+ mdm_rsp_data = IOSM_MDM_SEND_DATA;
+ ret = ipc_flash_send_receive(ipc_devlink, FLASH_ERASE_CHECK,
+ (u8 *)&mdm_rsp_data,
+ IOSM_MDM_SEND_2, mdm_rsp);
+ if (ret)
+ goto ipc_erase_chk_err;
+
+ mdm_rsp_data = *((u16 *)mdm_rsp);
+ if (mdm_rsp_data > IOSM_MDM_ERASE_RSP) {
+ dev_err(ipc_devlink->dev,
+ "Flash Erase Check resp wrong 0x%04X",
+ mdm_rsp_data);
+ ret = -EINVAL;
+ goto ipc_erase_chk_err;
+ }
+ count++;
+ msleep(IOSM_FLASH_ERASE_CHECK_INTERVAL);
+ } while ((mdm_rsp_data != IOSM_MDM_ERASE_RSP) &&
+ (count < (IOSM_FLASH_ERASE_CHECK_TIMEOUT /
+ IOSM_FLASH_ERASE_CHECK_INTERVAL)));
+
+ if (mdm_rsp_data != IOSM_MDM_ERASE_RSP) {
+ dev_err(ipc_devlink->dev, "Modem erase check timeout failure!");
+ ret = -ETIMEDOUT;
+ }
+
+ipc_erase_chk_err:
+ return ret;
+}
+
+/* Full erase function which will erase the nand flash through EBL command */
+static int ipc_flash_full_erase(struct iosm_devlink *ipc_devlink, u8 *mdm_rsp)
+{
+ u32 erase_address = IOSM_ERASE_START_ADDR;
+ struct iosm_flash_msg_control cmd_msg;
+ u32 erase_length = IOSM_ERASE_LEN;
+ int ret;
+
+ dev_dbg(ipc_devlink->dev, "Erase full nand flash");
+ cmd_msg.action = cpu_to_le32(FLASH_OOSC_ACTION_ERASE);
+ cmd_msg.type = cpu_to_le32(FLASH_OOSC_TYPE_ALL_FLASH);
+ cmd_msg.length = cpu_to_le32(erase_length);
+ cmd_msg.arguments = cpu_to_le32(erase_address);
+
+ ret = ipc_flash_send_receive(ipc_devlink, FLASH_OOS_CONTROL,
+ (unsigned char *)&cmd_msg,
+ IOSM_MDM_SEND_16, mdm_rsp);
+ if (ret)
+ goto ipc_flash_erase_err;
+
+ ipc_devlink->param.erase_full_flash_done = IOSM_SET_FLAG;
+ ret = ipc_flash_erase_check(ipc_devlink, mdm_rsp);
+
+ipc_flash_erase_err:
+ return ret;
+}
+
+/* Logic for flashing all the Loadmaps available for individual fls file */
+static int ipc_flash_download_region(struct iosm_devlink *ipc_devlink,
+ const struct firmware *fw, u8 *mdm_rsp)
+{
+ u32 raw_len, rest_len = fw->size - IOSM_DEVLINK_HDR_SIZE;
+ struct iosm_devlink_image *fls_data;
+ __le32 reg_info[2]; /* 0th position region address, 1st position size */
+ u32 nand_address;
+ char *file_ptr;
+ int ret;
+
+ fls_data = (struct iosm_devlink_image *)fw->data;
+ file_ptr = (void *)(fls_data + 1);
+ nand_address = le32_to_cpu(fls_data->region_address);
+ reg_info[0] = cpu_to_le32(nand_address);
+
+ if (!ipc_devlink->param.erase_full_flash_done) {
+ reg_info[1] = cpu_to_le32(nand_address + rest_len - 2);
+ ret = ipc_flash_send_receive(ipc_devlink, FLASH_ERASE_START,
+ (u8 *)reg_info, IOSM_MDM_SEND_8,
+ mdm_rsp);
+ if (ret)
+ goto dl_region_fail;
+
+ ret = ipc_flash_erase_check(ipc_devlink, mdm_rsp);
+ if (ret)
+ goto dl_region_fail;
+ }
+
+ /* Request Flash Set Address */
+ ret = ipc_flash_send_receive(ipc_devlink, FLASH_SET_ADDRESS,
+ (u8 *)reg_info, IOSM_MDM_SEND_4, mdm_rsp);
+ if (ret)
+ goto dl_region_fail;
+
+ /* Request Flash Write Raw Image */
+ ret = ipc_flash_send_data(ipc_devlink, IOSM_EBL_DW_PACK_SIZE,
+ FLASH_WRITE_IMAGE_RAW, (u8 *)&rest_len,
+ IOSM_MDM_SEND_4);
+ if (ret)
+ goto dl_region_fail;
+
+ do {
+ raw_len = (rest_len > IOSM_FLS_BUF_SIZE) ? IOSM_FLS_BUF_SIZE :
+ rest_len;
+ ret = ipc_imem_sys_devlink_write(ipc_devlink, file_ptr,
+ raw_len);
+ if (ret) {
+ dev_err(ipc_devlink->dev, "Image write failed");
+ goto dl_region_fail;
+ }
+ file_ptr += raw_len;
+ rest_len -= raw_len;
+ } while (rest_len);
+
+ ret = ipc_flash_receive_data(ipc_devlink, IOSM_EBL_DW_PAYL_SIZE,
+ mdm_rsp);
+
+dl_region_fail:
+ return ret;
+}
+
+/**
+ * ipc_flash_send_fls - Inject Modem subsystem fls file to device
+ * @ipc_devlink: Pointer to devlink structure
+ * @fw: FW image
+ * @mdm_rsp: Pointer to modem response buffer
+ *
+ * Returns: 0 on success and failure value on error
+ */
+int ipc_flash_send_fls(struct iosm_devlink *ipc_devlink,
+ const struct firmware *fw, u8 *mdm_rsp)
+{
+ u32 fw_size = fw->size - IOSM_DEVLINK_HDR_SIZE;
+ struct iosm_devlink_image *fls_data;
+ u16 flash_cmd;
+ int ret;
+
+ fls_data = (struct iosm_devlink_image *)fw->data;
+ if (ipc_devlink->param.erase_full_flash) {
+ ipc_devlink->param.erase_full_flash = false;
+ ret = ipc_flash_full_erase(ipc_devlink, mdm_rsp);
+ if (ret)
+ goto ipc_flash_err;
+ }
+
+ /* Request Sec Start */
+ if (!fls_data->download_region) {
+ ret = ipc_flash_send_receive(ipc_devlink, FLASH_SEC_START,
+ (u8 *)fw->data +
+ IOSM_DEVLINK_HDR_SIZE, fw_size,
+ mdm_rsp);
+ if (ret)
+ goto ipc_flash_err;
+ } else {
+ /* Download regions */
+ ret = ipc_flash_download_region(ipc_devlink, fw, mdm_rsp);
+ if (ret)
+ goto ipc_flash_err;
+
+ if (fls_data->last_region) {
+ /* Request Sec End */
+ flash_cmd = IOSM_MDM_SEND_DATA;
+ ret = ipc_flash_send_receive(ipc_devlink, FLASH_SEC_END,
+ (u8 *)&flash_cmd,
+ IOSM_MDM_SEND_2, mdm_rsp);
+ }
+ }
+
+ipc_flash_err:
+ return ret;
+}
+
+/**
+ * ipc_flash_boot_psi - Inject PSI image
+ * @ipc_devlink: Pointer to devlink structure
+ * @fw: FW image
+ *
+ * Returns: 0 on success and failure value on error
+ */
+int ipc_flash_boot_psi(struct iosm_devlink *ipc_devlink,
+ const struct firmware *fw)
+{
+ u32 bytes_read, psi_size = fw->size - IOSM_DEVLINK_HDR_SIZE;
+ u8 psi_ack_byte[IOSM_PSI_ACK], read_data[2];
+ u8 *psi_code;
+ int ret;
+
+ dev_dbg(ipc_devlink->dev, "Boot transfer PSI");
+ psi_code = kmemdup(fw->data + IOSM_DEVLINK_HDR_SIZE, psi_size,
+ GFP_KERNEL);
+ if (!psi_code)
+ return -ENOMEM;
+
+ ret = ipc_imem_sys_devlink_write(ipc_devlink, psi_code, psi_size);
+ if (ret) {
+ dev_err(ipc_devlink->dev, "RPSI Image write failed");
+ goto ipc_flash_psi_free;
+ }
+
+ ret = ipc_imem_sys_devlink_read(ipc_devlink, read_data,
+ IOSM_LER_ACK_SIZE, &bytes_read);
+ if (ret) {
+ dev_err(ipc_devlink->dev, "ipc_devlink_sio_read ACK failed");
+ goto ipc_flash_psi_free;
+ }
+
+ if (bytes_read != IOSM_LER_ACK_SIZE) {
+ ret = -EINVAL;
+ goto ipc_flash_psi_free;
+ }
+
+ snprintf(psi_ack_byte, sizeof(psi_ack_byte), "%x%x", read_data[0],
+ read_data[1]);
+ devlink_flash_update_status_notify(ipc_devlink->devlink_ctx,
+ psi_ack_byte, "PSI ACK", 0, 0);
+
+ if (read_data[0] == 0x00 && read_data[1] == 0xCD) {
+ dev_dbg(ipc_devlink->dev, "Coredump detected");
+ ret = ipc_coredump_get_list(ipc_devlink,
+ rpsi_cmd_coredump_start);
+ if (ret)
+ dev_err(ipc_devlink->dev, "Failed to get cd list");
+ }
+
+ipc_flash_psi_free:
+ kfree(psi_code);
+ return ret;
+}
+
+/**
+ * ipc_flash_boot_ebl - Inject EBL image
+ * @ipc_devlink: Pointer to devlink structure
+ * @fw: FW image
+ *
+ * Returns: 0 on success and failure value on error
+ */
+int ipc_flash_boot_ebl(struct iosm_devlink *ipc_devlink,
+ const struct firmware *fw)
+{
+ u32 ebl_size = fw->size - IOSM_DEVLINK_HDR_SIZE;
+ u8 read_data[2];
+ u32 bytes_read;
+ int ret;
+
+ if (ipc_mmio_get_exec_stage(ipc_devlink->pcie->imem->mmio) !=
+ IPC_MEM_EXEC_STAGE_PSI) {
+ devlink_flash_update_status_notify(ipc_devlink->devlink_ctx,
+ "Invalid execution stage",
+ NULL, 0, 0);
+ return -EINVAL;
+ }
+
+ dev_dbg(ipc_devlink->dev, "Boot transfer EBL");
+ ret = ipc_devlink_send_cmd(ipc_devlink, rpsi_cmd_code_ebl,
+ IOSM_RPSI_LOAD_SIZE);
+ if (ret) {
+ dev_err(ipc_devlink->dev, "Sending rpsi_cmd_code_ebl failed");
+ goto ipc_flash_ebl_err;
+ }
+
+ ret = ipc_imem_sys_devlink_read(ipc_devlink, read_data, IOSM_READ_SIZE,
+ &bytes_read);
+ if (ret) {
+ dev_err(ipc_devlink->dev, "rpsi_cmd_code_ebl read failed");
+ goto ipc_flash_ebl_err;
+ }
+
+ if (bytes_read != IOSM_READ_SIZE) {
+ ret = -EINVAL;
+ goto ipc_flash_ebl_err;
+ }
+
+ ret = ipc_imem_sys_devlink_write(ipc_devlink, (u8 *)&ebl_size,
+ sizeof(ebl_size));
+ if (ret) {
+ dev_err(ipc_devlink->dev, "EBL length write failed");
+ goto ipc_flash_ebl_err;
+ }
+
+ ret = ipc_imem_sys_devlink_read(ipc_devlink, read_data, IOSM_READ_SIZE,
+ &bytes_read);
+ if (ret) {
+ dev_err(ipc_devlink->dev, "EBL read failed");
+ goto ipc_flash_ebl_err;
+ }
+
+ if (bytes_read != IOSM_READ_SIZE) {
+ ret = -EINVAL;
+ goto ipc_flash_ebl_err;
+ }
+
+ ret = ipc_imem_sys_devlink_write(ipc_devlink,
+ (u8 *)fw->data + IOSM_DEVLINK_HDR_SIZE,
+ ebl_size);
+ if (ret) {
+ dev_err(ipc_devlink->dev, "EBL data transfer failed");
+ goto ipc_flash_ebl_err;
+ }
+
+ ret = ipc_imem_sys_devlink_read(ipc_devlink, read_data, IOSM_READ_SIZE,
+ &bytes_read);
+ if (ret) {
+ dev_err(ipc_devlink->dev, "EBL read failed");
+ goto ipc_flash_ebl_err;
+ }
+
+ if (bytes_read != IOSM_READ_SIZE) {
+ ret = -EINVAL;
+ goto ipc_flash_ebl_err;
+ }
+
+ ret = ipc_imem_sys_devlink_read(ipc_devlink,
+ ipc_devlink->ebl_ctx.m_ebl_resp,
+ IOSM_EBL_RSP_SIZE, &bytes_read);
+ if (ret) {
+ dev_err(ipc_devlink->dev, "EBL response read failed");
+ goto ipc_flash_ebl_err;
+ }
+
+ if (bytes_read != IOSM_EBL_RSP_SIZE)
+ ret = -EINVAL;
+
+ipc_flash_ebl_err:
+ return ret;
+}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_flash.h b/drivers/net/wwan/iosm/iosm_ipc_flash.h
new file mode 100644
index 000000000000..132d59d60fbe
--- /dev/null
+++ b/drivers/net/wwan/iosm/iosm_ipc_flash.h
@@ -0,0 +1,229 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (C) 2020-2021 Intel Corporation.
+ */
+
+#ifndef _IOSM_IPC_FLASH_H
+#define _IOSM_IPC_FLASH_H
+
+/* Buffer size used to read the fls image */
+#define IOSM_FLS_BUF_SIZE 0x00100000
+/* Full erase start address */
+#define IOSM_ERASE_START_ADDR 0x00000000
+/* Erase length for NAND flash */
+#define IOSM_ERASE_LEN 0xFFFFFFFF
+/* EBL response Header size */
+#define IOSM_EBL_HEAD_SIZE 8
+/* EBL payload size */
+#define IOSM_EBL_W_PAYL_SIZE 2048
+/* Total EBL pack size */
+#define IOSM_EBL_W_PACK_SIZE (IOSM_EBL_HEAD_SIZE + IOSM_EBL_W_PAYL_SIZE)
+/* EBL payload size */
+#define IOSM_EBL_DW_PAYL_SIZE 16384
+/* Total EBL pack size */
+#define IOSM_EBL_DW_PACK_SIZE (IOSM_EBL_HEAD_SIZE + IOSM_EBL_DW_PAYL_SIZE)
+/* EBL name size */
+#define IOSM_EBL_NAME 32
+/* Maximum supported error types */
+#define IOSM_MAX_ERRORS 8
+/* Read size for RPSI/EBL response */
+#define IOSM_READ_SIZE 2
+/* Link establishment response ack size */
+#define IOSM_LER_ACK_SIZE 2
+/* PSI ACK len */
+#define IOSM_PSI_ACK 8
+/* SWID capability for packed swid type */
+#define IOSM_EXT_CAP_SWID_OOS_PACK 0x02
+/* EBL error response buffer */
+#define IOSM_EBL_RSP_BUFF 0x0041
+/* SWID string length */
+#define IOSM_SWID_STR 64
+/* Load EBL command size */
+#define IOSM_RPSI_LOAD_SIZE 0
+/* EBL payload checksum */
+#define IOSM_EBL_CKSM 0x0000FFFF
+/* SWID msg len and argument */
+#define IOSM_MSG_LEN_ARG 0
+/* Data to be sent to modem */
+#define IOSM_MDM_SEND_DATA 0x0000
+/* Data received from modem as part of erase check */
+#define IOSM_MDM_ERASE_RSP 0x0001
+/* Bit shift to calculate Checksum */
+#define IOSM_EBL_PAYL_SHIFT 16
+/* Flag To be set */
+#define IOSM_SET_FLAG 1
+/* Set flash erase check timeout to 100 msec */
+#define IOSM_FLASH_ERASE_CHECK_TIMEOUT 100
+/* Set flash erase check interval to 20 msec */
+#define IOSM_FLASH_ERASE_CHECK_INTERVAL 20
+/* Link establishment response ack size */
+#define IOSM_LER_RSP_SIZE 60
+
+/**
+ * enum iosm_flash_package_type - Enum for the flashing operations
+ * @FLASH_SET_PROT_CONF: Write EBL capabilities
+ * @FLASH_SEC_START: Start writing the secpack
+ * @FLASH_SEC_END: Validate secpack end
+ * @FLASH_SET_ADDRESS: Set the address for flashing
+ * @FLASH_ERASE_START: Start erase before flashing
+ * @FLASH_ERASE_CHECK: Validate the erase functionality
+ * @FLASH_OOS_CONTROL: Retrieve data based on oos actions
+ * @FLASH_OOS_DATA_READ: Read data from EBL
+ * @FLASH_WRITE_IMAGE_RAW: Write the raw image to flash
+ */
+enum iosm_flash_package_type {
+ FLASH_SET_PROT_CONF = 0x0086,
+ FLASH_SEC_START = 0x0204,
+ FLASH_SEC_END,
+ FLASH_SET_ADDRESS = 0x0802,
+ FLASH_ERASE_START = 0x0805,
+ FLASH_ERASE_CHECK,
+ FLASH_OOS_CONTROL = 0x080C,
+ FLASH_OOS_DATA_READ = 0x080E,
+ FLASH_WRITE_IMAGE_RAW,
+};
+
+/**
+ * enum iosm_out_of_session_action - Actions possible over the
+ * OutOfSession command interface
+ * @FLASH_OOSC_ACTION_READ: Read data according to its type
+ * @FLASH_OOSC_ACTION_ERASE: Erase data according to its type
+ */
+enum iosm_out_of_session_action {
+ FLASH_OOSC_ACTION_READ = 2,
+ FLASH_OOSC_ACTION_ERASE = 3,
+};
+
+/**
+ * enum iosm_out_of_session_type - Data types that can be handled over the
+ * Out Of Session command Interface
+ * @FLASH_OOSC_TYPE_ALL_FLASH: The whole flash area
+ * @FLASH_OOSC_TYPE_SWID_TABLE: Read the swid table from the target
+ */
+enum iosm_out_of_session_type {
+ FLASH_OOSC_TYPE_ALL_FLASH = 8,
+ FLASH_OOSC_TYPE_SWID_TABLE = 16,
+};
+
+/**
+ * enum iosm_ebl_caps - EBL capability settings
+ * @IOSM_CAP_NOT_ENHANCED: If capability not supported
+ * @IOSM_CAP_USE_EXT_CAP: To be set if extended capability is set
+ * @IOSM_EXT_CAP_ERASE_ALL: Set Erase all capability
+ * @IOSM_EXT_CAP_COMMIT_ALL: Set the commit all capability
+ */
+enum iosm_ebl_caps {
+ IOSM_CAP_NOT_ENHANCED = 0x00,
+ IOSM_CAP_USE_EXT_CAP = 0x01,
+ IOSM_EXT_CAP_ERASE_ALL = 0x08,
+ IOSM_EXT_CAP_COMMIT_ALL = 0x20,
+};
+
+/**
+ * enum iosm_ebl_rsp - EBL response field
+ * @EBL_CAPS_FLAG: EBL capability flag
+ * @EBL_SKIP_ERASE: EBL skip erase flag
+ * @EBL_SKIP_CRC: EBL skip wr_pack crc
+ * @EBL_EXT_CAPS_HANDLED: EBL extended capability handled flag
+ * @EBL_OOS_CONFIG: EBL oos configuration
+ * @EBL_RSP_SW_INFO_VER: EBL SW info version
+ */
+enum iosm_ebl_rsp {
+ EBL_CAPS_FLAG = 50,
+ EBL_SKIP_ERASE = 54,
+ EBL_SKIP_CRC = 55,
+ EBL_EXT_CAPS_HANDLED = 57,
+ EBL_OOS_CONFIG = 64,
+ EBL_RSP_SW_INFO_VER = 70,
+};
+
+/**
+ * enum iosm_mdm_send_recv_data - Data to send to modem
+ * @IOSM_MDM_SEND_2: Send 2 bytes of payload
+ * @IOSM_MDM_SEND_4: Send 4 bytes of payload
+ * @IOSM_MDM_SEND_8: Send 8 bytes of payload
+ * @IOSM_MDM_SEND_16: Send 16 bytes of payload
+ */
+enum iosm_mdm_send_recv_data {
+ IOSM_MDM_SEND_2 = 2,
+ IOSM_MDM_SEND_4 = 4,
+ IOSM_MDM_SEND_8 = 8,
+ IOSM_MDM_SEND_16 = 16,
+};
+
+/**
+ * struct iosm_ebl_one_error - Structure containing error details
+ * @error_class: Error type- standard, security and text error
+ * @error_code: Specific error from error type
+ */
+struct iosm_ebl_one_error {
+ u16 error_class;
+ u16 error_code;
+};
+
+/**
+ * struct iosm_ebl_error- Structure with max error type supported
+ * @error: Array of one_error structure with max errors
+ */
+struct iosm_ebl_error {
+ struct iosm_ebl_one_error error[IOSM_MAX_ERRORS];
+};
+
+/**
+ * struct iosm_swid_table - SWID table data for modem
+ * @number_of_data_sets: Number of swid types
+ * @sw_id_type: SWID type - SWID
+ * @sw_id_val: SWID value
+ * @rf_engine_id_type: RF engine ID type - RF_ENGINE_ID
+ * @rf_engine_id_val: RF engine ID value
+ */
+struct iosm_swid_table {
+ u32 number_of_data_sets;
+ char sw_id_type[IOSM_EBL_NAME];
+ u32 sw_id_val;
+ char rf_engine_id_type[IOSM_EBL_NAME];
+ u32 rf_engine_id_val;
+};
+
+/**
+ * struct iosm_flash_msg_control - Data sent to modem
+ * @action: Action to be performed
+ * @type: Type of action
+ * @length: Length of the action
+ * @arguments: Argument value sent to modem
+ */
+struct iosm_flash_msg_control {
+ __le32 action;
+ __le32 type;
+ __le32 length;
+ __le32 arguments;
+};
+
+/**
+ * struct iosm_flash_data - Header Data to be sent to modem
+ * @checksum: Checksum value calculated for the payload data
+ * @pack_id: Flash Action type
+ * @msg_length: Payload length
+ */
+struct iosm_flash_data {
+ __le16 checksum;
+ __le16 pack_id;
+ __le32 msg_length;
+};
+
+int ipc_flash_boot_psi(struct iosm_devlink *ipc_devlink,
+ const struct firmware *fw);
+
+int ipc_flash_boot_ebl(struct iosm_devlink *ipc_devlink,
+ const struct firmware *fw);
+
+int ipc_flash_boot_set_capabilities(struct iosm_devlink *ipc_devlink,
+ u8 *mdm_rsp);
+
+int ipc_flash_link_establish(struct iosm_imem *ipc_imem);
+
+int ipc_flash_read_swid(struct iosm_devlink *ipc_devlink, u8 *mdm_rsp);
+
+int ipc_flash_send_fls(struct iosm_devlink *ipc_devlink,
+ const struct firmware *fw, u8 *mdm_rsp);
+#endif
diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem.c b/drivers/net/wwan/iosm/iosm_ipc_imem.c
index 9f00e36b7f79..cff3b43ca4d7 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_imem.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_imem.c
@@ -6,6 +6,8 @@
#include <linux/delay.h>
#include "iosm_ipc_chnl_cfg.h"
+#include "iosm_ipc_devlink.h"
+#include "iosm_ipc_flash.h"
#include "iosm_ipc_imem.h"
#include "iosm_ipc_port.h"
@@ -263,9 +265,12 @@ static void ipc_imem_dl_skb_process(struct iosm_imem *ipc_imem,
switch (pipe->channel->ctype) {
case IPC_CTYPE_CTRL:
port_id = pipe->channel->channel_id;
-
- /* Pass the packet to the wwan layer. */
- wwan_port_rx(ipc_imem->ipc_port[port_id]->iosm_port, skb);
+ if (port_id == IPC_MEM_CTRL_CHL_ID_7)
+ ipc_imem_sys_devlink_notify_rx(ipc_imem->ipc_devlink,
+ skb);
+ else
+ wwan_port_rx(ipc_imem->ipc_port[port_id]->iosm_port,
+ skb);
break;
case IPC_CTYPE_WWAN:
@@ -399,19 +404,8 @@ static void ipc_imem_rom_irq_exec(struct iosm_imem *ipc_imem)
{
struct ipc_mem_channel *channel;
- if (ipc_imem->flash_channel_id < 0) {
- ipc_imem->rom_exit_code = IMEM_ROM_EXIT_FAIL;
- dev_err(ipc_imem->dev, "Missing flash app:%d",
- ipc_imem->flash_channel_id);
- return;
- }
-
+ channel = ipc_imem->ipc_devlink->devlink_sio.channel;
ipc_imem->rom_exit_code = ipc_mmio_get_rom_exit_code(ipc_imem->mmio);
-
- /* Wake up the flash app to continue or to terminate depending
- * on the CP ROM exit code.
- */
- channel = &ipc_imem->channels[ipc_imem->flash_channel_id];
complete(&channel->ul_sem);
}
@@ -482,8 +476,8 @@ static enum hrtimer_restart ipc_imem_startup_timer_cb(struct hrtimer *hr_timer)
container_of(hr_timer, struct iosm_imem, startup_timer);
if (ktime_to_ns(ipc_imem->hrtimer_period)) {
- hrtimer_forward(&ipc_imem->startup_timer, ktime_get(),
- ipc_imem->hrtimer_period);
+ hrtimer_forward_now(&ipc_imem->startup_timer,
+ ipc_imem->hrtimer_period);
result = HRTIMER_RESTART;
}
@@ -572,7 +566,7 @@ static void ipc_imem_handle_irq(struct iosm_imem *ipc_imem, int irq)
enum ipc_phase old_phase, phase;
bool retry_allocation = false;
bool ul_pending = false;
- int ch_id, i;
+ int i;
if (irq != IMEM_IRQ_DONT_CARE)
ipc_imem->ev_irq_pending[irq] = false;
@@ -696,11 +690,8 @@ static void ipc_imem_handle_irq(struct iosm_imem *ipc_imem, int irq)
if ((phase == IPC_P_PSI || phase == IPC_P_EBL) &&
ipc_imem->ipc_requested_state == IPC_MEM_DEVICE_IPC_RUNNING &&
ipc_mmio_get_ipc_state(ipc_imem->mmio) ==
- IPC_MEM_DEVICE_IPC_RUNNING &&
- ipc_imem->flash_channel_id >= 0) {
- /* Wake up the flash app to open the pipes. */
- ch_id = ipc_imem->flash_channel_id;
- complete(&ipc_imem->channels[ch_id].ul_sem);
+ IPC_MEM_DEVICE_IPC_RUNNING) {
+ complete(&ipc_imem->ipc_devlink->devlink_sio.channel->ul_sem);
}
/* Reset the expected CP state. */
@@ -1176,6 +1167,9 @@ void ipc_imem_cleanup(struct iosm_imem *ipc_imem)
ipc_port_deinit(ipc_imem->ipc_port);
}
+ if (ipc_imem->ipc_devlink)
+ ipc_devlink_deinit(ipc_imem->ipc_devlink);
+
ipc_imem_device_ipc_uninit(ipc_imem);
ipc_imem_channel_reset(ipc_imem);
@@ -1258,6 +1252,7 @@ struct iosm_imem *ipc_imem_init(struct iosm_pcie *pcie, unsigned int device_id,
void __iomem *mmio, struct device *dev)
{
struct iosm_imem *ipc_imem = kzalloc(sizeof(*pcie->imem), GFP_KERNEL);
+ enum ipc_mem_exec_stage stage;
if (!ipc_imem)
return NULL;
@@ -1272,9 +1267,6 @@ struct iosm_imem *ipc_imem_init(struct iosm_pcie *pcie, unsigned int device_id,
ipc_imem->cp_version = 0;
ipc_imem->device_sleep = IPC_HOST_SLEEP_ENTER_SLEEP;
- /* Reset the flash channel id. */
- ipc_imem->flash_channel_id = -1;
-
/* Reset the max number of configured channels */
ipc_imem->nr_of_channels = 0;
@@ -1328,8 +1320,21 @@ struct iosm_imem *ipc_imem_init(struct iosm_pcie *pcie, unsigned int device_id,
goto imem_config_fail;
}
- return ipc_imem;
+ stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
+ if (stage == IPC_MEM_EXEC_STAGE_BOOT) {
+ /* Alloc and Register devlink */
+ ipc_imem->ipc_devlink = ipc_devlink_init(ipc_imem);
+ if (!ipc_imem->ipc_devlink) {
+ dev_err(ipc_imem->dev, "Devlink register failed");
+ goto imem_config_fail;
+ }
+ if (ipc_flash_link_establish(ipc_imem))
+ goto devlink_channel_fail;
+ }
+ return ipc_imem;
+devlink_channel_fail:
+ ipc_devlink_deinit(ipc_imem->ipc_devlink);
imem_config_fail:
hrtimer_cancel(&ipc_imem->td_alloc_timer);
hrtimer_cancel(&ipc_imem->fast_update_timer);
@@ -1361,3 +1366,51 @@ void ipc_imem_td_update_timer_suspend(struct iosm_imem *ipc_imem, bool suspend)
{
ipc_imem->td_update_timer_suspended = suspend;
}
+
+/* Verify the CP execution state, copy the chip info,
+ * change the execution phase to ROM
+ */
+static int ipc_imem_devlink_trigger_chip_info_cb(struct iosm_imem *ipc_imem,
+ int arg, void *msg,
+ size_t msgsize)
+{
+ enum ipc_mem_exec_stage stage;
+ struct sk_buff *skb;
+ int rc = -EINVAL;
+ size_t size;
+
+ /* Test the CP execution state. */
+ stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
+ if (stage != IPC_MEM_EXEC_STAGE_BOOT) {
+ dev_err(ipc_imem->dev,
+ "Execution_stage: expected BOOT, received = %X", stage);
+ goto trigger_chip_info_fail;
+ }
+ /* Allocate a new sk buf for the chip info. */
+ size = ipc_imem->mmio->chip_info_size;
+ if (size > IOSM_CHIP_INFO_SIZE_MAX)
+ goto trigger_chip_info_fail;
+
+ skb = ipc_pcie_alloc_local_skb(ipc_imem->pcie, GFP_ATOMIC, size);
+ if (!skb) {
+ dev_err(ipc_imem->dev, "exhausted skbuf kernel DL memory");
+ rc = -ENOMEM;
+ goto trigger_chip_info_fail;
+ }
+ /* Copy the chip info characters into the ipc_skb. */
+ ipc_mmio_copy_chip_info(ipc_imem->mmio, skb_put(skb, size), size);
+ /* First change to the ROM boot phase. */
+ dev_dbg(ipc_imem->dev, "execution_stage[%X] eq. BOOT", stage);
+ ipc_imem->phase = ipc_imem_phase_update(ipc_imem);
+ ipc_imem_sys_devlink_notify_rx(ipc_imem->ipc_devlink, skb);
+ rc = 0;
+trigger_chip_info_fail:
+ return rc;
+}
+
+int ipc_imem_devlink_trigger_chip_info(struct iosm_imem *ipc_imem)
+{
+ return ipc_task_queue_send_task(ipc_imem,
+ ipc_imem_devlink_trigger_chip_info_cb,
+ 0, NULL, 0, true);
+}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem.h b/drivers/net/wwan/iosm/iosm_ipc_imem.h
index dc65b0712261..6be6708b4eec 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_imem.h
+++ b/drivers/net/wwan/iosm/iosm_ipc_imem.h
@@ -69,7 +69,7 @@ struct ipc_chnl_cfg;
#define IMEM_IRQ_DONT_CARE (-1)
-#define IPC_MEM_MAX_CHANNELS 7
+#define IPC_MEM_MAX_CHANNELS 8
#define IPC_MEM_MUX_IP_SESSION_ENTRIES 8
@@ -98,6 +98,7 @@ struct ipc_chnl_cfg;
#define IPC_MEM_DL_ETH_OFFSET 16
#define IPC_CB(skb) ((struct ipc_skb_cb *)((skb)->cb))
+#define IOSM_CHIP_INFO_SIZE_MAX 100
#define FULLY_FUNCTIONAL 0
@@ -304,9 +305,9 @@ enum ipc_phase {
* @ipc_port: IPC PORT data structure pointer
* @pcie: IPC PCIe
* @dev: Pointer to device structure
- * @flash_channel_id: Reserved channel id for flashing to RAM.
* @ipc_requested_state: Expected IPC state on CP.
* @channels: Channel list with UL/DL pipe pairs.
+ * @ipc_devlink: IPC Devlink data structure pointer
* @ipc_status: local ipc_status
* @nr_of_channels: number of configured channels
* @startup_timer: startup timer for NAND support.
@@ -349,9 +350,9 @@ struct iosm_imem {
struct iosm_cdev *ipc_port[IPC_MEM_MAX_CHANNELS];
struct iosm_pcie *pcie;
struct device *dev;
- int flash_channel_id;
enum ipc_mem_device_ipc_state ipc_requested_state;
struct ipc_mem_channel channels[IPC_MEM_MAX_CHANNELS];
+ struct iosm_devlink *ipc_devlink;
u32 ipc_status;
u32 nr_of_channels;
struct hrtimer startup_timer;
@@ -575,4 +576,15 @@ void ipc_imem_ipc_init_check(struct iosm_imem *ipc_imem);
*/
void ipc_imem_channel_init(struct iosm_imem *ipc_imem, enum ipc_ctype ctype,
struct ipc_chnl_cfg chnl_cfg, u32 irq_moderation);
+
+/**
+ * ipc_imem_devlink_trigger_chip_info - Inform devlink that the chip
+ * information are available if the
+ * flashing to RAM interworking shall be
+ * executed.
+ * @ipc_imem: Pointer to imem structure
+ *
+ * Returns: 0 on success, -1 on failure
+ */
+int ipc_imem_devlink_trigger_chip_info(struct iosm_imem *ipc_imem);
#endif
diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c
index 0a472ce77370..b885a6570235 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c
@@ -6,6 +6,7 @@
#include <linux/delay.h>
#include "iosm_ipc_chnl_cfg.h"
+#include "iosm_ipc_devlink.h"
#include "iosm_ipc_imem.h"
#include "iosm_ipc_imem_ops.h"
#include "iosm_ipc_port.h"
@@ -331,3 +332,319 @@ int ipc_imem_sys_cdev_write(struct iosm_cdev *ipc_cdev, struct sk_buff *skb)
out:
return ret;
}
+
+/* Open a SIO link to CP and return the channel instance */
+struct ipc_mem_channel *ipc_imem_sys_devlink_open(struct iosm_imem *ipc_imem)
+{
+ struct ipc_mem_channel *channel;
+ enum ipc_phase phase;
+ int channel_id;
+
+ phase = ipc_imem_phase_update(ipc_imem);
+ switch (phase) {
+ case IPC_P_OFF:
+ case IPC_P_ROM:
+ /* Get a channel id as flash id and reserve it. */
+ channel_id = ipc_imem_channel_alloc(ipc_imem,
+ IPC_MEM_CTRL_CHL_ID_7,
+ IPC_CTYPE_CTRL);
+
+ if (channel_id < 0) {
+ dev_err(ipc_imem->dev,
+ "reservation of a flash channel id failed");
+ goto error;
+ }
+
+ ipc_imem->ipc_devlink->devlink_sio.channel_id = channel_id;
+ channel = &ipc_imem->channels[channel_id];
+
+ /* Enqueue chip info data to be read */
+ if (ipc_imem_devlink_trigger_chip_info(ipc_imem)) {
+ dev_err(ipc_imem->dev, "Enqueue of chip info failed");
+ channel->state = IMEM_CHANNEL_FREE;
+ goto error;
+ }
+
+ return channel;
+
+ case IPC_P_PSI:
+ case IPC_P_EBL:
+ ipc_imem->cp_version = ipc_mmio_get_cp_version(ipc_imem->mmio);
+ if (ipc_imem->cp_version == -1) {
+ dev_err(ipc_imem->dev, "invalid CP version");
+ goto error;
+ }
+
+ channel_id = ipc_imem->ipc_devlink->devlink_sio.channel_id;
+ return ipc_imem_channel_open(ipc_imem, channel_id,
+ IPC_HP_CDEV_OPEN);
+
+ default:
+ /* CP is in the wrong state (e.g. CRASH or CD_READY) */
+ dev_err(ipc_imem->dev, "SIO open refused, phase %d", phase);
+ }
+error:
+ return NULL;
+}
+
+/* Release a SIO channel link to CP. */
+void ipc_imem_sys_devlink_close(struct iosm_devlink *ipc_devlink)
+{
+ struct iosm_imem *ipc_imem = ipc_devlink->pcie->imem;
+ int boot_check_timeout = BOOT_CHECK_DEFAULT_TIMEOUT;
+ enum ipc_mem_exec_stage exec_stage;
+ struct ipc_mem_channel *channel;
+ enum ipc_phase curr_phase;
+ int status = 0;
+ u32 tail = 0;
+
+ channel = ipc_imem->ipc_devlink->devlink_sio.channel;
+ curr_phase = ipc_imem->phase;
+ /* Increase the total wait time to boot_check_timeout */
+ do {
+ exec_stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
+ if (exec_stage == IPC_MEM_EXEC_STAGE_RUN ||
+ exec_stage == IPC_MEM_EXEC_STAGE_PSI)
+ break;
+ msleep(20);
+ boot_check_timeout -= 20;
+ } while (boot_check_timeout > 0);
+
+ /* If there are any pending TDs then wait for Timeout/Completion before
+ * closing pipe.
+ */
+ if (channel->ul_pipe.old_tail != channel->ul_pipe.old_head) {
+ status = wait_for_completion_interruptible_timeout
+ (&ipc_imem->ul_pend_sem,
+ msecs_to_jiffies(IPC_PEND_DATA_TIMEOUT));
+ if (status == 0) {
+ dev_dbg(ipc_imem->dev,
+ "Data Timeout on UL-Pipe:%d Head:%d Tail:%d",
+ channel->ul_pipe.pipe_nr,
+ channel->ul_pipe.old_head,
+ channel->ul_pipe.old_tail);
+ }
+ }
+
+ ipc_protocol_get_head_tail_index(ipc_imem->ipc_protocol,
+ &channel->dl_pipe, NULL, &tail);
+
+ if (tail != channel->dl_pipe.old_tail) {
+ status = wait_for_completion_interruptible_timeout
+ (&ipc_imem->dl_pend_sem,
+ msecs_to_jiffies(IPC_PEND_DATA_TIMEOUT));
+ if (status == 0) {
+ dev_dbg(ipc_imem->dev,
+ "Data Timeout on DL-Pipe:%d Head:%d Tail:%d",
+ channel->dl_pipe.pipe_nr,
+ channel->dl_pipe.old_head,
+ channel->dl_pipe.old_tail);
+ }
+ }
+
+ /* Due to wait for completion in messages, there is a small window
+ * between closing the pipe and updating the channel is closed. In this
+ * small window there could be HP update from Host Driver. Hence update
+ * the channel state as CLOSING to aviod unnecessary interrupt
+ * towards CP.
+ */
+ channel->state = IMEM_CHANNEL_CLOSING;
+ /* Release the pipe resources */
+ ipc_imem_pipe_cleanup(ipc_imem, &channel->ul_pipe);
+ ipc_imem_pipe_cleanup(ipc_imem, &channel->dl_pipe);
+}
+
+void ipc_imem_sys_devlink_notify_rx(struct iosm_devlink *ipc_devlink,
+ struct sk_buff *skb)
+{
+ skb_queue_tail(&ipc_devlink->devlink_sio.rx_list, skb);
+ complete(&ipc_devlink->devlink_sio.read_sem);
+}
+
+/* PSI transfer */
+static int ipc_imem_sys_psi_transfer(struct iosm_imem *ipc_imem,
+ struct ipc_mem_channel *channel,
+ unsigned char *buf, int count)
+{
+ int psi_start_timeout = PSI_START_DEFAULT_TIMEOUT;
+ enum ipc_mem_exec_stage exec_stage;
+
+ dma_addr_t mapping = 0;
+ int ret;
+
+ ret = ipc_pcie_addr_map(ipc_imem->pcie, buf, count, &mapping,
+ DMA_TO_DEVICE);
+ if (ret)
+ goto pcie_addr_map_fail;
+
+ /* Save the PSI information for the CP ROM driver on the doorbell
+ * scratchpad.
+ */
+ ipc_mmio_set_psi_addr_and_size(ipc_imem->mmio, mapping, count);
+ ipc_doorbell_fire(ipc_imem->pcie, 0, IPC_MEM_EXEC_STAGE_BOOT);
+
+ ret = wait_for_completion_interruptible_timeout
+ (&channel->ul_sem,
+ msecs_to_jiffies(IPC_PSI_TRANSFER_TIMEOUT));
+
+ if (ret <= 0) {
+ dev_err(ipc_imem->dev, "Failed PSI transfer to CP, Error-%d",
+ ret);
+ goto psi_transfer_fail;
+ }
+ /* If the PSI download fails, return the CP boot ROM exit code */
+ if (ipc_imem->rom_exit_code != IMEM_ROM_EXIT_OPEN_EXT &&
+ ipc_imem->rom_exit_code != IMEM_ROM_EXIT_CERT_EXT) {
+ ret = (-1) * ((int)ipc_imem->rom_exit_code);
+ goto psi_transfer_fail;
+ }
+
+ dev_dbg(ipc_imem->dev, "PSI image successfully downloaded");
+
+ /* Wait psi_start_timeout milliseconds until the CP PSI image is
+ * running and updates the execution_stage field with
+ * IPC_MEM_EXEC_STAGE_PSI. Verify the execution stage.
+ */
+ do {
+ exec_stage = ipc_mmio_get_exec_stage(ipc_imem->mmio);
+
+ if (exec_stage == IPC_MEM_EXEC_STAGE_PSI)
+ break;
+
+ msleep(20);
+ psi_start_timeout -= 20;
+ } while (psi_start_timeout > 0);
+
+ if (exec_stage != IPC_MEM_EXEC_STAGE_PSI)
+ goto psi_transfer_fail; /* Unknown status of CP PSI process. */
+
+ ipc_imem->phase = IPC_P_PSI;
+
+ /* Enter the PSI phase. */
+ dev_dbg(ipc_imem->dev, "execution_stage[%X] eq. PSI", exec_stage);
+
+ /* Request the RUNNING state from CP and wait until it was reached
+ * or timeout.
+ */
+ ipc_imem_ipc_init_check(ipc_imem);
+
+ ret = wait_for_completion_interruptible_timeout
+ (&channel->ul_sem, msecs_to_jiffies(IPC_PSI_TRANSFER_TIMEOUT));
+ if (ret <= 0) {
+ dev_err(ipc_imem->dev,
+ "Failed PSI RUNNING state on CP, Error-%d", ret);
+ goto psi_transfer_fail;
+ }
+
+ if (ipc_mmio_get_ipc_state(ipc_imem->mmio) !=
+ IPC_MEM_DEVICE_IPC_RUNNING) {
+ dev_err(ipc_imem->dev,
+ "ch[%d] %s: unexpected CP IPC state %d, not RUNNING",
+ channel->channel_id,
+ ipc_imem_phase_get_string(ipc_imem->phase),
+ ipc_mmio_get_ipc_state(ipc_imem->mmio));
+
+ goto psi_transfer_fail;
+ }
+
+ /* Create the flash channel for the transfer of the images. */
+ if (!ipc_imem_sys_devlink_open(ipc_imem)) {
+ dev_err(ipc_imem->dev, "can't open flash_channel");
+ goto psi_transfer_fail;
+ }
+
+ ret = 0;
+psi_transfer_fail:
+ ipc_pcie_addr_unmap(ipc_imem->pcie, count, mapping, DMA_TO_DEVICE);
+pcie_addr_map_fail:
+ return ret;
+}
+
+int ipc_imem_sys_devlink_write(struct iosm_devlink *ipc_devlink,
+ unsigned char *buf, int count)
+{
+ struct iosm_imem *ipc_imem = ipc_devlink->pcie->imem;
+ struct ipc_mem_channel *channel;
+ struct sk_buff *skb;
+ dma_addr_t mapping;
+ int ret;
+
+ channel = ipc_imem->ipc_devlink->devlink_sio.channel;
+
+ /* In the ROM phase the PSI image is passed to CP about a specific
+ * shared memory area and doorbell scratchpad directly.
+ */
+ if (ipc_imem->phase == IPC_P_ROM) {
+ ret = ipc_imem_sys_psi_transfer(ipc_imem, channel, buf, count);
+ /* If the PSI transfer fails then send crash
+ * Signature.
+ */
+ if (ret > 0)
+ ipc_imem_msg_send_feature_set(ipc_imem,
+ IPC_MEM_INBAND_CRASH_SIG,
+ false);
+ goto out;
+ }
+
+ /* Allocate skb memory for the uplink buffer. */
+ skb = ipc_pcie_alloc_skb(ipc_devlink->pcie, count, GFP_KERNEL, &mapping,
+ DMA_TO_DEVICE, 0);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ memcpy(skb_put(skb, count), buf, count);
+
+ IPC_CB(skb)->op_type = UL_USR_OP_BLOCKED;
+
+ /* Add skb to the uplink skbuf accumulator. */
+ skb_queue_tail(&channel->ul_list, skb);
+
+ /* Inform the IPC tasklet to pass uplink IP packets to CP. */
+ if (!ipc_imem_call_cdev_write(ipc_imem)) {
+ ret = wait_for_completion_interruptible(&channel->ul_sem);
+
+ if (ret < 0) {
+ dev_err(ipc_imem->dev,
+ "ch[%d] no CP confirmation, status = %d",
+ channel->channel_id, ret);
+ ipc_pcie_kfree_skb(ipc_devlink->pcie, skb);
+ goto out;
+ }
+ }
+ ret = 0;
+out:
+ return ret;
+}
+
+int ipc_imem_sys_devlink_read(struct iosm_devlink *devlink, u8 *data,
+ u32 bytes_to_read, u32 *bytes_read)
+{
+ struct sk_buff *skb = NULL;
+ int rc = 0;
+
+ /* check skb is available in rx_list or wait for skb */
+ devlink->devlink_sio.devlink_read_pend = 1;
+ while (!skb && !(skb = skb_dequeue(&devlink->devlink_sio.rx_list))) {
+ if (!wait_for_completion_interruptible_timeout
+ (&devlink->devlink_sio.read_sem,
+ msecs_to_jiffies(IPC_READ_TIMEOUT))) {
+ dev_err(devlink->dev, "Read timedout");
+ rc = -ETIMEDOUT;
+ goto devlink_read_fail;
+ }
+ }
+ devlink->devlink_sio.devlink_read_pend = 0;
+ if (bytes_to_read < skb->len) {
+ dev_err(devlink->dev, "Invalid size,expected len %d", skb->len);
+ rc = -EINVAL;
+ goto devlink_read_fail;
+ }
+ *bytes_read = skb->len;
+ memcpy(data, skb->data, skb->len);
+
+devlink_read_fail:
+ ipc_pcie_kfree_skb(devlink->pcie, skb);
+ return rc;
+}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h
index 2007fe23e9a5..f0c88ac5643c 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h
+++ b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h
@@ -9,7 +9,7 @@
#include "iosm_ipc_mux_codec.h"
/* Maximum wait time for blocking read */
-#define IPC_READ_TIMEOUT 500
+#define IPC_READ_TIMEOUT 3000
/* The delay in ms for defering the unregister */
#define SIO_UNREGISTER_DEFER_DELAY_MS 1
@@ -98,4 +98,51 @@ int ipc_imem_sys_wwan_transmit(struct iosm_imem *ipc_imem, int if_id,
*/
void ipc_imem_wwan_channel_init(struct iosm_imem *ipc_imem,
enum ipc_mux_protocol mux_type);
+
+/**
+ * ipc_imem_sys_devlink_open - Open a Flash/CD Channel link to CP
+ * @ipc_imem: iosm_imem instance
+ *
+ * Return: channel instance on success, NULL for failure
+ */
+struct ipc_mem_channel *ipc_imem_sys_devlink_open(struct iosm_imem *ipc_imem);
+
+/**
+ * ipc_imem_sys_devlink_close - Release a Flash/CD channel link to CP
+ * @ipc_devlink: Pointer to ipc_devlink data-struct
+ *
+ */
+void ipc_imem_sys_devlink_close(struct iosm_devlink *ipc_devlink);
+
+/**
+ * ipc_imem_sys_devlink_notify_rx - Receive downlink characters from CP,
+ * the downlink skbuf is added at the end of the
+ * downlink or rx list
+ * @ipc_devlink: Pointer to ipc_devlink data-struct
+ * @skb: Pointer to sk buffer
+ */
+void ipc_imem_sys_devlink_notify_rx(struct iosm_devlink *ipc_devlink,
+ struct sk_buff *skb);
+
+/**
+ * ipc_imem_sys_devlink_read - Copy the rx data and free the skbuf
+ * @ipc_devlink: Devlink instance
+ * @data: Buffer to read the data from modem
+ * @bytes_to_read: Size of destination buffer
+ * @bytes_read: Number of bytes read
+ *
+ * Return: 0 on success and failure value on error
+ */
+int ipc_imem_sys_devlink_read(struct iosm_devlink *ipc_devlink, u8 *data,
+ u32 bytes_to_read, u32 *bytes_read);
+
+/**
+ * ipc_imem_sys_devlink_write - Route the uplink buffer to CP
+ * @ipc_devlink: Devlink_sio instance
+ * @buf: Pointer to buffer
+ * @count: Number of data bytes to write
+ * Return: 0 on success and failure value on error
+ */
+int ipc_imem_sys_devlink_write(struct iosm_devlink *ipc_devlink,
+ unsigned char *buf, int count);
#endif
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index c58996c1e230..fe8e21ad8ed9 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -494,6 +494,9 @@ static const struct net_device_ops xenvif_netdev_ops = {
struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
unsigned int handle)
{
+ static const u8 dummy_addr[ETH_ALEN] = {
+ 0xfe, 0xff, 0xff, 0xff, 0xff, 0xff,
+ };
int err;
struct net_device *dev;
struct xenvif *vif;
@@ -551,8 +554,7 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
* stolen by an Ethernet bridge for STP purposes.
* (FE:FF:FF:FF:FF:FF)
*/
- eth_broadcast_addr(dev->dev_addr);
- dev->dev_addr[0] &= ~0x01;
+ eth_hw_addr_set(dev, dummy_addr);
netif_carrier_off(dev);
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 32d5bc4919d8..0f7fd159f0f2 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -1474,7 +1474,7 @@ int xenvif_map_frontend_data_rings(struct xenvif_queue *queue,
struct xen_netif_tx_sring *txs;
struct xen_netif_rx_sring *rxs;
RING_IDX rsp_prod, req_prod;
- int err = -ENOMEM;
+ int err;
err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(queue->vif),
&tx_ring_ref, 1, &addr);
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index e31b98403f31..911f43986a8c 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1730,6 +1730,10 @@ static int netfront_resume(struct xenbus_device *dev)
dev_dbg(&dev->dev, "%s\n", dev->nodename);
+ netif_tx_lock_bh(info->netdev);
+ netif_device_detach(info->netdev);
+ netif_tx_unlock_bh(info->netdev);
+
xennet_disconnect_backend(info);
return 0;
}
@@ -2157,6 +2161,7 @@ static int talk_to_netback(struct xenbus_device *dev,
unsigned int max_queues = 0;
struct netfront_queue *queue = NULL;
unsigned int num_queues = 1;
+ u8 addr[ETH_ALEN];
info->netdev->irq = 0;
@@ -2170,11 +2175,12 @@ static int talk_to_netback(struct xenbus_device *dev,
"feature-split-event-channels", 0);
/* Read mac addr. */
- err = xen_net_read_mac(dev, info->netdev->dev_addr);
+ err = xen_net_read_mac(dev, addr);
if (err) {
xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
goto out_unlocked;
}
+ eth_hw_addr_set(info->netdev, addr);
info->netback_has_xdp_headroom = xenbus_read_unsigned(info->xbdev->otherend,
"feature-xdp-headroom", 0);
@@ -2349,6 +2355,10 @@ static int xennet_connect(struct net_device *dev)
* domain a kick because we've probably just requeued some
* packets.
*/
+ netif_tx_lock_bh(np->netdev);
+ netif_device_attach(np->netdev);
+ netif_tx_unlock_bh(np->netdev);
+
netif_carrier_on(np->netdev);
for (j = 0; j < num_queues; ++j) {
queue = &np->queues[j];
diff --git a/drivers/nfc/fdp/i2c.c b/drivers/nfc/fdp/i2c.c
index 051c43a2a52f..f78670bf41e0 100644
--- a/drivers/nfc/fdp/i2c.c
+++ b/drivers/nfc/fdp/i2c.c
@@ -335,7 +335,6 @@ static int fdp_nci_i2c_probe(struct i2c_client *client)
return r;
}
- dev_dbg(dev, "I2C driver loaded\n");
return 0;
}
diff --git a/drivers/nfc/microread/i2c.c b/drivers/nfc/microread/i2c.c
index 86f593c73ed6..067295124eb9 100644
--- a/drivers/nfc/microread/i2c.c
+++ b/drivers/nfc/microread/i2c.c
@@ -237,8 +237,6 @@ static int microread_i2c_probe(struct i2c_client *client,
struct microread_i2c_phy *phy;
int r;
- dev_dbg(&client->dev, "client %p\n", client);
-
phy = devm_kzalloc(&client->dev, sizeof(struct microread_i2c_phy),
GFP_KERNEL);
if (!phy)
@@ -262,8 +260,6 @@ static int microread_i2c_probe(struct i2c_client *client,
if (r < 0)
goto err_irq;
- nfc_info(&client->dev, "Probed\n");
-
return 0;
err_irq:
diff --git a/drivers/nfc/microread/mei.c b/drivers/nfc/microread/mei.c
index 8edf761a6b2a..e2a77a5fc887 100644
--- a/drivers/nfc/microread/mei.c
+++ b/drivers/nfc/microread/mei.c
@@ -23,13 +23,9 @@ static int microread_mei_probe(struct mei_cl_device *cldev,
struct nfc_mei_phy *phy;
int r;
- pr_info("Probing NFC microread\n");
-
phy = nfc_mei_phy_alloc(cldev);
- if (!phy) {
- pr_err("Cannot allocate memory for microread mei phy.\n");
+ if (!phy)
return -ENOMEM;
- }
r = microread_probe(phy, &mei_phy_ops, LLC_NOP_NAME,
MEI_NFC_HEADER_SIZE, 0, MEI_NFC_MAX_HCI_PAYLOAD,
diff --git a/drivers/nfc/nfcmrvl/fw_dnld.c b/drivers/nfc/nfcmrvl/fw_dnld.c
index edac56b01fd1..e83f65596a88 100644
--- a/drivers/nfc/nfcmrvl/fw_dnld.c
+++ b/drivers/nfc/nfcmrvl/fw_dnld.c
@@ -76,10 +76,8 @@ static struct sk_buff *alloc_lc_skb(struct nfcmrvl_private *priv, uint8_t plen)
struct nci_data_hdr *hdr;
skb = nci_skb_alloc(priv->ndev, (NCI_DATA_HDR_SIZE + plen), GFP_KERNEL);
- if (!skb) {
- pr_err("no memory for data\n");
+ if (!skb)
return NULL;
- }
hdr = skb_put(skb, NCI_DATA_HDR_SIZE);
hdr->conn_id = NCI_CORE_LC_CONNID_PROP_FW_DL;
diff --git a/drivers/nfc/pn533/i2c.c b/drivers/nfc/pn533/i2c.c
index e6bf8cfe3aa7..673eb5e9b887 100644
--- a/drivers/nfc/pn533/i2c.c
+++ b/drivers/nfc/pn533/i2c.c
@@ -128,7 +128,6 @@ static int pn533_i2c_read(struct pn533_i2c_phy *phy, struct sk_buff **skb)
static irqreturn_t pn533_i2c_irq_thread_fn(int irq, void *data)
{
struct pn533_i2c_phy *phy = data;
- struct i2c_client *client;
struct sk_buff *skb = NULL;
int r;
@@ -137,9 +136,6 @@ static irqreturn_t pn533_i2c_irq_thread_fn(int irq, void *data)
return IRQ_NONE;
}
- client = phy->i2c_dev;
- dev_dbg(&client->dev, "IRQ\n");
-
if (phy->hard_fault != 0)
return IRQ_HANDLED;
@@ -160,7 +156,7 @@ static irqreturn_t pn533_i2c_irq_thread_fn(int irq, void *data)
return IRQ_HANDLED;
}
-static struct pn533_phy_ops i2c_phy_ops = {
+static const struct pn533_phy_ops i2c_phy_ops = {
.send_frame = pn533_i2c_send_frame,
.send_ack = pn533_i2c_send_ack,
.abort_cmd = pn533_i2c_abort_cmd,
diff --git a/drivers/nfc/pn533/pn533.c b/drivers/nfc/pn533/pn533.c
index 2f3f3fe9a0ba..787bcbd290f7 100644
--- a/drivers/nfc/pn533/pn533.c
+++ b/drivers/nfc/pn533/pn533.c
@@ -1235,8 +1235,6 @@ static void pn533_listen_mode_timer(struct timer_list *t)
{
struct pn533 *dev = from_timer(dev, t, listen_timer);
- dev_dbg(dev->dev, "Listen mode timeout\n");
-
dev->cancel_listen = 1;
pn533_poll_next_mod(dev);
@@ -2173,7 +2171,7 @@ void pn533_recv_frame(struct pn533 *dev, struct sk_buff *skb, int status)
}
if (skb == NULL) {
- pr_err("NULL Frame -> link is dead\n");
+ dev_err(dev->dev, "NULL Frame -> link is dead\n");
goto sched_wq;
}
@@ -2735,7 +2733,7 @@ EXPORT_SYMBOL_GPL(pn533_finalize_setup);
struct pn533 *pn53x_common_init(u32 device_type,
enum pn533_protocol_type protocol_type,
void *phy,
- struct pn533_phy_ops *phy_ops,
+ const struct pn533_phy_ops *phy_ops,
struct pn533_frame_ops *fops,
struct device *dev)
{
diff --git a/drivers/nfc/pn533/pn533.h b/drivers/nfc/pn533/pn533.h
index 5f94f38a2a08..09e35b8693f5 100644
--- a/drivers/nfc/pn533/pn533.h
+++ b/drivers/nfc/pn533/pn533.h
@@ -177,7 +177,7 @@ struct pn533 {
struct device *dev;
void *phy;
- struct pn533_phy_ops *phy_ops;
+ const struct pn533_phy_ops *phy_ops;
};
typedef int (*pn533_send_async_complete_t) (struct pn533 *dev, void *arg,
@@ -232,7 +232,7 @@ struct pn533_phy_ops {
struct pn533 *pn53x_common_init(u32 device_type,
enum pn533_protocol_type protocol_type,
void *phy,
- struct pn533_phy_ops *phy_ops,
+ const struct pn533_phy_ops *phy_ops,
struct pn533_frame_ops *fops,
struct device *dev);
diff --git a/drivers/nfc/pn533/uart.c b/drivers/nfc/pn533/uart.c
index 7bdaf8263070..2caf997f9bc9 100644
--- a/drivers/nfc/pn533/uart.c
+++ b/drivers/nfc/pn533/uart.c
@@ -123,7 +123,7 @@ static int pn532_dev_down(struct pn533 *dev)
return 0;
}
-static struct pn533_phy_ops uart_phy_ops = {
+static const struct pn533_phy_ops uart_phy_ops = {
.send_frame = pn532_uart_send_frame,
.send_ack = pn532_uart_send_ack,
.abort_cmd = pn532_uart_abort_cmd,
@@ -224,7 +224,7 @@ static int pn532_receive_buf(struct serdev_device *serdev,
return i;
}
-static struct serdev_device_ops pn532_serdev_ops = {
+static const struct serdev_device_ops pn532_serdev_ops = {
.receive_buf = pn532_receive_buf,
.write_wakeup = serdev_device_write_wakeup,
};
diff --git a/drivers/nfc/pn533/usb.c b/drivers/nfc/pn533/usb.c
index bd7f7478d189..6f71ac72012e 100644
--- a/drivers/nfc/pn533/usb.c
+++ b/drivers/nfc/pn533/usb.c
@@ -429,7 +429,7 @@ static void pn533_send_complete(struct urb *urb)
}
}
-static struct pn533_phy_ops usb_phy_ops = {
+static const struct pn533_phy_ops usb_phy_ops = {
.send_frame = pn533_usb_send_frame,
.send_ack = pn533_usb_send_ack,
.abort_cmd = pn533_usb_abort_cmd,
diff --git a/drivers/nfc/pn544/mei.c b/drivers/nfc/pn544/mei.c
index 5c10aac085a4..c493f2dbd0e2 100644
--- a/drivers/nfc/pn544/mei.c
+++ b/drivers/nfc/pn544/mei.c
@@ -22,13 +22,9 @@ static int pn544_mei_probe(struct mei_cl_device *cldev,
struct nfc_mei_phy *phy;
int r;
- pr_info("Probing NFC pn544\n");
-
phy = nfc_mei_phy_alloc(cldev);
- if (!phy) {
- pr_err("Cannot allocate memory for pn544 mei phy.\n");
+ if (!phy)
return -ENOMEM;
- }
r = pn544_hci_probe(phy, &mei_phy_ops, LLC_NOP_NAME,
MEI_NFC_HEADER_SIZE, 0, MEI_NFC_MAX_HCI_PAYLOAD,
@@ -46,8 +42,6 @@ static void pn544_mei_remove(struct mei_cl_device *cldev)
{
struct nfc_mei_phy *phy = mei_cldev_get_drvdata(cldev);
- pr_info("Removing pn544\n");
-
pn544_hci_remove(phy->hdev);
nfc_mei_phy_free(phy);
diff --git a/drivers/nfc/port100.c b/drivers/nfc/port100.c
index 517376c43b86..16ceb763594f 100644
--- a/drivers/nfc/port100.c
+++ b/drivers/nfc/port100.c
@@ -1006,11 +1006,11 @@ static u64 port100_get_command_type_mask(struct port100 *dev)
skb = port100_alloc_skb(dev, 0);
if (!skb)
- return -ENOMEM;
+ return 0;
resp = port100_send_cmd_sync(dev, PORT100_CMD_GET_COMMAND_TYPE, skb);
if (IS_ERR(resp))
- return PTR_ERR(resp);
+ return 0;
if (resp->len < 8)
mask = 0;
diff --git a/drivers/nfc/s3fwrn5/firmware.c b/drivers/nfc/s3fwrn5/firmware.c
index 1af7a1e632cf..c20fdbac51c5 100644
--- a/drivers/nfc/s3fwrn5/firmware.c
+++ b/drivers/nfc/s3fwrn5/firmware.c
@@ -357,6 +357,7 @@ s3fwrn5_fw_is_custom(const struct s3fwrn5_fw_cmd_get_bootinfo_rsp *bootinfo)
int s3fwrn5_fw_setup(struct s3fwrn5_fw_info *fw_info)
{
+ struct device *dev = &fw_info->ndev->nfc_dev->dev;
struct s3fwrn5_fw_cmd_get_bootinfo_rsp bootinfo;
int ret;
@@ -364,8 +365,7 @@ int s3fwrn5_fw_setup(struct s3fwrn5_fw_info *fw_info)
ret = s3fwrn5_fw_get_bootinfo(fw_info, &bootinfo);
if (ret < 0) {
- dev_err(&fw_info->ndev->nfc_dev->dev,
- "Failed to get bootinfo, ret=%02x\n", ret);
+ dev_err(dev, "Failed to get bootinfo, ret=%02x\n", ret);
goto err;
}
@@ -373,8 +373,7 @@ int s3fwrn5_fw_setup(struct s3fwrn5_fw_info *fw_info)
ret = s3fwrn5_fw_get_base_addr(&bootinfo, &fw_info->base_addr);
if (ret < 0) {
- dev_err(&fw_info->ndev->nfc_dev->dev,
- "Unknown hardware version\n");
+ dev_err(dev, "Unknown hardware version\n");
goto err;
}
@@ -409,6 +408,7 @@ bool s3fwrn5_fw_check_version(const struct s3fwrn5_fw_info *fw_info, u32 version
int s3fwrn5_fw_download(struct s3fwrn5_fw_info *fw_info)
{
+ struct device *dev = &fw_info->ndev->nfc_dev->dev;
struct s3fwrn5_fw_image *fw = &fw_info->fw;
u8 hash_data[SHA1_DIGEST_SIZE];
struct crypto_shash *tfm;
@@ -421,8 +421,7 @@ int s3fwrn5_fw_download(struct s3fwrn5_fw_info *fw_info)
tfm = crypto_alloc_shash("sha1", 0, 0);
if (IS_ERR(tfm)) {
- dev_err(&fw_info->ndev->nfc_dev->dev,
- "Cannot allocate shash (code=%pe)\n", tfm);
+ dev_err(dev, "Cannot allocate shash (code=%pe)\n", tfm);
return PTR_ERR(tfm);
}
@@ -430,21 +429,18 @@ int s3fwrn5_fw_download(struct s3fwrn5_fw_info *fw_info)
crypto_free_shash(tfm);
if (ret) {
- dev_err(&fw_info->ndev->nfc_dev->dev,
- "Cannot compute hash (code=%d)\n", ret);
+ dev_err(dev, "Cannot compute hash (code=%d)\n", ret);
return ret;
}
/* Firmware update process */
- dev_info(&fw_info->ndev->nfc_dev->dev,
- "Firmware update: %s\n", fw_info->fw_name);
+ dev_info(dev, "Firmware update: %s\n", fw_info->fw_name);
ret = s3fwrn5_fw_enter_update_mode(fw_info, hash_data,
SHA1_DIGEST_SIZE, fw_info->sig, fw_info->sig_size);
if (ret < 0) {
- dev_err(&fw_info->ndev->nfc_dev->dev,
- "Unable to enter update mode\n");
+ dev_err(dev, "Unable to enter update mode\n");
return ret;
}
@@ -452,21 +448,18 @@ int s3fwrn5_fw_download(struct s3fwrn5_fw_info *fw_info)
ret = s3fwrn5_fw_update_sector(fw_info,
fw_info->base_addr + off, fw->image + off);
if (ret < 0) {
- dev_err(&fw_info->ndev->nfc_dev->dev,
- "Firmware update error (code=%d)\n", ret);
+ dev_err(dev, "Firmware update error (code=%d)\n", ret);
return ret;
}
}
ret = s3fwrn5_fw_complete_update_mode(fw_info);
if (ret < 0) {
- dev_err(&fw_info->ndev->nfc_dev->dev,
- "Unable to complete update mode\n");
+ dev_err(dev, "Unable to complete update mode\n");
return ret;
}
- dev_info(&fw_info->ndev->nfc_dev->dev,
- "Firmware update: success\n");
+ dev_info(dev, "Firmware update: success\n");
return ret;
}
diff --git a/drivers/nfc/s3fwrn5/nci.c b/drivers/nfc/s3fwrn5/nci.c
index e374e670b36b..ca6828f55ba0 100644
--- a/drivers/nfc/s3fwrn5/nci.c
+++ b/drivers/nfc/s3fwrn5/nci.c
@@ -47,6 +47,7 @@ const struct nci_driver_ops s3fwrn5_nci_prop_ops[4] = {
int s3fwrn5_nci_rf_configure(struct s3fwrn5_info *info, const char *fw_name)
{
+ struct device *dev = &info->ndev->nfc_dev->dev;
const struct firmware *fw;
struct nci_prop_fw_cfg_cmd fw_cfg;
struct nci_prop_set_rfreg_cmd set_rfreg;
@@ -55,7 +56,7 @@ int s3fwrn5_nci_rf_configure(struct s3fwrn5_info *info, const char *fw_name)
int i, len;
int ret;
- ret = request_firmware(&fw, fw_name, &info->ndev->nfc_dev->dev);
+ ret = request_firmware(&fw, fw_name, dev);
if (ret < 0)
return ret;
@@ -77,13 +78,11 @@ int s3fwrn5_nci_rf_configure(struct s3fwrn5_info *info, const char *fw_name)
/* Start rfreg configuration */
- dev_info(&info->ndev->nfc_dev->dev,
- "rfreg configuration update: %s\n", fw_name);
+ dev_info(dev, "rfreg configuration update: %s\n", fw_name);
ret = nci_prop_cmd(info->ndev, NCI_PROP_START_RFREG, 0, NULL);
if (ret < 0) {
- dev_err(&info->ndev->nfc_dev->dev,
- "Unable to start rfreg update\n");
+ dev_err(dev, "Unable to start rfreg update\n");
goto out;
}
@@ -97,8 +96,7 @@ int s3fwrn5_nci_rf_configure(struct s3fwrn5_info *info, const char *fw_name)
ret = nci_prop_cmd(info->ndev, NCI_PROP_SET_RFREG,
len+1, (__u8 *)&set_rfreg);
if (ret < 0) {
- dev_err(&info->ndev->nfc_dev->dev,
- "rfreg update error (code=%d)\n", ret);
+ dev_err(dev, "rfreg update error (code=%d)\n", ret);
goto out;
}
set_rfreg.index++;
@@ -110,13 +108,11 @@ int s3fwrn5_nci_rf_configure(struct s3fwrn5_info *info, const char *fw_name)
ret = nci_prop_cmd(info->ndev, NCI_PROP_STOP_RFREG,
sizeof(stop_rfreg), (__u8 *)&stop_rfreg);
if (ret < 0) {
- dev_err(&info->ndev->nfc_dev->dev,
- "Unable to stop rfreg update\n");
+ dev_err(dev, "Unable to stop rfreg update\n");
goto out;
}
- dev_info(&info->ndev->nfc_dev->dev,
- "rfreg configuration update: success\n");
+ dev_info(dev, "rfreg configuration update: success\n");
out:
release_firmware(fw);
return ret;
diff --git a/drivers/nfc/st-nci/i2c.c b/drivers/nfc/st-nci/i2c.c
index ccf6152ebb9f..cbd968f013c7 100644
--- a/drivers/nfc/st-nci/i2c.c
+++ b/drivers/nfc/st-nci/i2c.c
@@ -157,7 +157,6 @@ static int st_nci_i2c_read(struct st_nci_i2c_phy *phy,
static irqreturn_t st_nci_irq_thread_fn(int irq, void *phy_id)
{
struct st_nci_i2c_phy *phy = phy_id;
- struct i2c_client *client;
struct sk_buff *skb = NULL;
int r;
@@ -166,9 +165,6 @@ static irqreturn_t st_nci_irq_thread_fn(int irq, void *phy_id)
return IRQ_NONE;
}
- client = phy->i2c_dev;
- dev_dbg(&client->dev, "IRQ\n");
-
if (phy->ndlc->hard_fault)
return IRQ_HANDLED;
diff --git a/drivers/nfc/st-nci/ndlc.c b/drivers/nfc/st-nci/ndlc.c
index e9dc313b333e..755460a73c0d 100644
--- a/drivers/nfc/st-nci/ndlc.c
+++ b/drivers/nfc/st-nci/ndlc.c
@@ -239,8 +239,6 @@ static void ndlc_t1_timeout(struct timer_list *t)
{
struct llt_ndlc *ndlc = from_timer(ndlc, t, t1_timer);
- pr_debug("\n");
-
schedule_work(&ndlc->sm_work);
}
@@ -248,8 +246,6 @@ static void ndlc_t2_timeout(struct timer_list *t)
{
struct llt_ndlc *ndlc = from_timer(ndlc, t, t2_timer);
- pr_debug("\n");
-
schedule_work(&ndlc->sm_work);
}
diff --git a/drivers/nfc/st-nci/se.c b/drivers/nfc/st-nci/se.c
index 5fd89f72969d..7764b1a4c3cf 100644
--- a/drivers/nfc/st-nci/se.c
+++ b/drivers/nfc/st-nci/se.c
@@ -638,8 +638,6 @@ int st_nci_se_io(struct nci_dev *ndev, u32 se_idx,
{
struct st_nci_info *info = nci_get_drvdata(ndev);
- pr_debug("\n");
-
switch (se_idx) {
case ST_NCI_ESE_HOST_ID:
info->se_info.cb = cb;
@@ -671,8 +669,6 @@ static void st_nci_se_wt_timeout(struct timer_list *t)
u8 param = 0x01;
struct st_nci_info *info = from_timer(info, t, se_info.bwi_timer);
- pr_debug("\n");
-
info->se_info.bwi_active = false;
if (!info->se_info.xch_error) {
@@ -692,8 +688,6 @@ static void st_nci_se_activation_timeout(struct timer_list *t)
struct st_nci_info *info = from_timer(info, t,
se_info.se_active_timer);
- pr_debug("\n");
-
info->se_info.se_active = false;
complete(&info->se_info.req_completion);
diff --git a/drivers/nfc/st-nci/spi.c b/drivers/nfc/st-nci/spi.c
index 0875b773fb41..4e723992e74c 100644
--- a/drivers/nfc/st-nci/spi.c
+++ b/drivers/nfc/st-nci/spi.c
@@ -169,7 +169,6 @@ static int st_nci_spi_read(struct st_nci_spi_phy *phy,
static irqreturn_t st_nci_irq_thread_fn(int irq, void *phy_id)
{
struct st_nci_spi_phy *phy = phy_id;
- struct spi_device *dev;
struct sk_buff *skb = NULL;
int r;
@@ -178,9 +177,6 @@ static irqreturn_t st_nci_irq_thread_fn(int irq, void *phy_id)
return IRQ_NONE;
}
- dev = phy->spi_dev;
- dev_dbg(&dev->dev, "IRQ\n");
-
if (phy->ndlc->hard_fault)
return IRQ_HANDLED;
diff --git a/drivers/nfc/st21nfca/i2c.c b/drivers/nfc/st21nfca/i2c.c
index 279d88128b2e..f126ce96a7df 100644
--- a/drivers/nfc/st21nfca/i2c.c
+++ b/drivers/nfc/st21nfca/i2c.c
@@ -421,7 +421,6 @@ static int st21nfca_hci_i2c_read(struct st21nfca_i2c_phy *phy,
static irqreturn_t st21nfca_hci_irq_thread_fn(int irq, void *phy_id)
{
struct st21nfca_i2c_phy *phy = phy_id;
- struct i2c_client *client;
int r;
@@ -430,9 +429,6 @@ static irqreturn_t st21nfca_hci_irq_thread_fn(int irq, void *phy_id)
return IRQ_NONE;
}
- client = phy->i2c_dev;
- dev_dbg(&client->dev, "IRQ\n");
-
if (phy->hard_fault != 0)
return IRQ_HANDLED;
diff --git a/drivers/nfc/st21nfca/se.c b/drivers/nfc/st21nfca/se.c
index c8bdf078d111..a43fc4117fa5 100644
--- a/drivers/nfc/st21nfca/se.c
+++ b/drivers/nfc/st21nfca/se.c
@@ -257,8 +257,6 @@ static void st21nfca_se_wt_timeout(struct timer_list *t)
struct st21nfca_hci_info *info = from_timer(info, t,
se_info.bwi_timer);
- pr_debug("\n");
-
info->se_info.bwi_active = false;
if (!info->se_info.xch_error) {
@@ -278,8 +276,6 @@ static void st21nfca_se_activation_timeout(struct timer_list *t)
struct st21nfca_hci_info *info = from_timer(info, t,
se_info.se_active_timer);
- pr_debug("\n");
-
info->se_info.se_active = false;
complete(&info->se_info.req_completion);
diff --git a/drivers/nfc/st95hf/core.c b/drivers/nfc/st95hf/core.c
index d16cf3ff644e..b23f47936473 100644
--- a/drivers/nfc/st95hf/core.c
+++ b/drivers/nfc/st95hf/core.c
@@ -1226,11 +1226,9 @@ static int st95hf_remove(struct spi_device *nfc_spi_dev)
&reset_cmd,
ST95HF_RESET_CMD_LEN,
ASYNC);
- if (result) {
+ if (result)
dev_err(&spictx->spidev->dev,
"ST95HF reset failed in remove() err = %d\n", result);
- return result;
- }
/* wait for 3 ms to complete the controller reset process */
usleep_range(3000, 4000);
@@ -1239,7 +1237,7 @@ static int st95hf_remove(struct spi_device *nfc_spi_dev)
if (stcontext->st95hf_supply)
regulator_disable(stcontext->st95hf_supply);
- return result;
+ return 0;
}
/* Register as SPI protocol driver */
diff --git a/drivers/nfc/trf7970a.c b/drivers/nfc/trf7970a.c
index 8890fcd59c39..29ca9c328df2 100644
--- a/drivers/nfc/trf7970a.c
+++ b/drivers/nfc/trf7970a.c
@@ -2170,8 +2170,6 @@ static int trf7970a_suspend(struct device *dev)
struct spi_device *spi = to_spi_device(dev);
struct trf7970a *trf = spi_get_drvdata(spi);
- dev_dbg(dev, "Suspend\n");
-
mutex_lock(&trf->lock);
trf7970a_shutdown(trf);
@@ -2187,8 +2185,6 @@ static int trf7970a_resume(struct device *dev)
struct trf7970a *trf = spi_get_drvdata(spi);
int ret;
- dev_dbg(dev, "Resume\n");
-
mutex_lock(&trf->lock);
ret = trf7970a_startup(trf);
@@ -2206,8 +2202,6 @@ static int trf7970a_pm_runtime_suspend(struct device *dev)
struct trf7970a *trf = spi_get_drvdata(spi);
int ret;
- dev_dbg(dev, "Runtime suspend\n");
-
mutex_lock(&trf->lock);
ret = trf7970a_power_down(trf);
@@ -2223,8 +2217,6 @@ static int trf7970a_pm_runtime_resume(struct device *dev)
struct trf7970a *trf = spi_get_drvdata(spi);
int ret;
- dev_dbg(dev, "Runtime resume\n");
-
ret = trf7970a_power_up(trf);
if (!ret)
pm_runtime_mark_last_busy(dev);
diff --git a/drivers/nvdimm/blk.c b/drivers/nvdimm/blk.c
index 088d3dd6f6fa..b6c6866f9259 100644
--- a/drivers/nvdimm/blk.c
+++ b/drivers/nvdimm/blk.c
@@ -162,7 +162,7 @@ static int nsblk_do_bvec(struct nd_namespace_blk *nsblk,
return err;
}
-static blk_qc_t nd_blk_submit_bio(struct bio *bio)
+static void nd_blk_submit_bio(struct bio *bio)
{
struct bio_integrity_payload *bip;
struct nd_namespace_blk *nsblk = bio->bi_bdev->bd_disk->private_data;
@@ -173,7 +173,7 @@ static blk_qc_t nd_blk_submit_bio(struct bio *bio)
bool do_acct;
if (!bio_integrity_prep(bio))
- return BLK_QC_T_NONE;
+ return;
bip = bio_integrity(bio);
rw = bio_data_dir(bio);
@@ -199,7 +199,6 @@ static blk_qc_t nd_blk_submit_bio(struct bio *bio)
bio_end_io_acct(bio, start);
bio_endio(bio);
- return BLK_QC_T_NONE;
}
static int nsblk_rw_bytes(struct nd_namespace_common *ndns,
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index 92dec4952297..4295fa809420 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -1440,7 +1440,7 @@ static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip,
return ret;
}
-static blk_qc_t btt_submit_bio(struct bio *bio)
+static void btt_submit_bio(struct bio *bio)
{
struct bio_integrity_payload *bip = bio_integrity(bio);
struct btt *btt = bio->bi_bdev->bd_disk->private_data;
@@ -1451,7 +1451,7 @@ static blk_qc_t btt_submit_bio(struct bio *bio)
bool do_acct;
if (!bio_integrity_prep(bio))
- return BLK_QC_T_NONE;
+ return;
do_acct = blk_queue_io_stat(bio->bi_bdev->bd_disk->queue);
if (do_acct)
@@ -1483,7 +1483,6 @@ static blk_qc_t btt_submit_bio(struct bio *bio)
bio_end_io_acct(bio, start);
bio_endio(bio);
- return BLK_QC_T_NONE;
}
static int btt_rw_page(struct block_device *bdev, sector_t sector,
diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c
index 7de592d7eff4..6a45fa91e8a3 100644
--- a/drivers/nvdimm/core.c
+++ b/drivers/nvdimm/core.c
@@ -7,6 +7,7 @@
#include <linux/export.h>
#include <linux/module.h>
#include <linux/blkdev.h>
+#include <linux/blk-integrity.h>
#include <linux/device.h>
#include <linux/ctype.h>
#include <linux/ndctl.h>
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index ef4950f80832..c74d7bceb222 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -190,7 +190,7 @@ static blk_status_t pmem_do_write(struct pmem_device *pmem,
return rc;
}
-static blk_qc_t pmem_submit_bio(struct bio *bio)
+static void pmem_submit_bio(struct bio *bio)
{
int ret = 0;
blk_status_t rc = 0;
@@ -229,7 +229,6 @@ static blk_qc_t pmem_submit_bio(struct bio *bio)
bio->bi_status = errno_to_blk_status(ret);
bio_endio(bio);
- return BLK_QC_T_NONE;
}
static int pmem_rw_page(struct block_device *bdev, sector_t sector,
@@ -333,26 +332,6 @@ static const struct attribute_group *pmem_attribute_groups[] = {
NULL,
};
-static void pmem_pagemap_cleanup(struct dev_pagemap *pgmap)
-{
- struct pmem_device *pmem = pgmap->owner;
-
- blk_cleanup_disk(pmem->disk);
-}
-
-static void pmem_release_queue(void *pgmap)
-{
- pmem_pagemap_cleanup(pgmap);
-}
-
-static void pmem_pagemap_kill(struct dev_pagemap *pgmap)
-{
- struct request_queue *q =
- container_of(pgmap->ref, struct request_queue, q_usage_counter);
-
- blk_freeze_queue_start(q);
-}
-
static void pmem_release_disk(void *__pmem)
{
struct pmem_device *pmem = __pmem;
@@ -360,12 +339,9 @@ static void pmem_release_disk(void *__pmem)
kill_dax(pmem->dax_dev);
put_dax(pmem->dax_dev);
del_gendisk(pmem->disk);
-}
-static const struct dev_pagemap_ops fsdax_pagemap_ops = {
- .kill = pmem_pagemap_kill,
- .cleanup = pmem_pagemap_cleanup,
-};
+ blk_cleanup_disk(pmem->disk);
+}
static int pmem_attach_disk(struct device *dev,
struct nd_namespace_common *ndns)
@@ -427,10 +403,8 @@ static int pmem_attach_disk(struct device *dev,
pmem->disk = disk;
pmem->pgmap.owner = pmem;
pmem->pfn_flags = PFN_DEV;
- pmem->pgmap.ref = &q->q_usage_counter;
if (is_nd_pfn(dev)) {
pmem->pgmap.type = MEMORY_DEVICE_FS_DAX;
- pmem->pgmap.ops = &fsdax_pagemap_ops;
addr = devm_memremap_pages(dev, &pmem->pgmap);
pfn_sb = nd_pfn->pfn_sb;
pmem->data_offset = le64_to_cpu(pfn_sb->dataoff);
@@ -444,16 +418,12 @@ static int pmem_attach_disk(struct device *dev,
pmem->pgmap.range.end = res->end;
pmem->pgmap.nr_range = 1;
pmem->pgmap.type = MEMORY_DEVICE_FS_DAX;
- pmem->pgmap.ops = &fsdax_pagemap_ops;
addr = devm_memremap_pages(dev, &pmem->pgmap);
pmem->pfn_flags |= PFN_MAP;
bb_range = pmem->pgmap.range;
} else {
addr = devm_memremap(dev, pmem->phys_addr,
pmem->size, ARCH_MEMREMAP_PMEM);
- if (devm_add_action_or_reset(dev, pmem_release_queue,
- &pmem->pgmap))
- return -ENOMEM;
bb_range.start = res->start;
bb_range.end = res->end;
}
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index f8dd664b2eda..838b5e2058be 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -6,6 +6,7 @@
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
+#include <linux/blk-integrity.h>
#include <linux/compat.h>
#include <linux/delay.h>
#include <linux/errno.h>
@@ -118,25 +119,6 @@ static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
static void nvme_update_keep_alive(struct nvme_ctrl *ctrl,
struct nvme_command *cmd);
-/*
- * Prepare a queue for teardown.
- *
- * This must forcibly unquiesce queues to avoid blocking dispatch, and only set
- * the capacity to 0 after that to avoid blocking dispatchers that may be
- * holding bd_butex. This will end buffered writers dirtying pages that can't
- * be synced.
- */
-static void nvme_set_queue_dying(struct nvme_ns *ns)
-{
- if (test_and_set_bit(NVME_NS_DEAD, &ns->flags))
- return;
-
- blk_set_queue_dying(ns->queue);
- blk_mq_unquiesce_queue(ns->queue);
-
- set_capacity_and_notify(ns->disk, 0);
-}
-
void nvme_queue_scan(struct nvme_ctrl *ctrl)
{
/*
@@ -221,7 +203,7 @@ int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
static void nvme_do_delete_ctrl(struct nvme_ctrl *ctrl)
{
dev_info(ctrl->device,
- "Removing ctrl: NQN \"%s\"\n", ctrl->opts->subsysnqn);
+ "Removing ctrl: NQN \"%s\"\n", nvmf_ctrl_subsysnqn(ctrl));
flush_work(&ctrl->reset_work);
nvme_stop_ctrl(ctrl);
@@ -345,15 +327,19 @@ static inline enum nvme_disposition nvme_decide_disposition(struct request *req)
return RETRY;
}
-static inline void nvme_end_req(struct request *req)
+static inline void nvme_end_req_zoned(struct request *req)
{
- blk_status_t status = nvme_error_status(nvme_req(req)->status);
-
if (IS_ENABLED(CONFIG_BLK_DEV_ZONED) &&
req_op(req) == REQ_OP_ZONE_APPEND)
req->__sector = nvme_lba_to_sect(req->q->queuedata,
le64_to_cpu(nvme_req(req)->result.u64));
+}
+
+static inline void nvme_end_req(struct request *req)
+{
+ blk_status_t status = nvme_error_status(nvme_req(req)->status);
+ nvme_end_req_zoned(req);
nvme_trace_bio_complete(req);
blk_mq_end_request(req, status);
}
@@ -380,6 +366,13 @@ void nvme_complete_rq(struct request *req)
}
EXPORT_SYMBOL_GPL(nvme_complete_rq);
+void nvme_complete_batch_req(struct request *req)
+{
+ nvme_cleanup_cmd(req);
+ nvme_end_req_zoned(req);
+}
+EXPORT_SYMBOL_GPL(nvme_complete_batch_req);
+
/*
* Called to unwind from ->queue_rq on a failed command submission so that the
* multipathing code gets called to potentially failover to another path.
@@ -631,7 +624,7 @@ static inline void nvme_init_request(struct request *req,
req->cmd_flags |= REQ_FAILFAST_DRIVER;
if (req->mq_hctx->type == HCTX_TYPE_POLL)
- req->cmd_flags |= REQ_HIPRI;
+ req->cmd_flags |= REQ_POLLED;
nvme_clear_nvme_request(req);
memcpy(nvme_req(req)->cmd, cmd, sizeof(*cmd));
}
@@ -822,6 +815,7 @@ static void nvme_assign_write_stream(struct nvme_ctrl *ctrl,
static inline void nvme_setup_flush(struct nvme_ns *ns,
struct nvme_command *cmnd)
{
+ memset(cmnd, 0, sizeof(*cmnd));
cmnd->common.opcode = nvme_cmd_flush;
cmnd->common.nsid = cpu_to_le32(ns->head->ns_id);
}
@@ -873,6 +867,7 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
return BLK_STS_IOERR;
}
+ memset(cmnd, 0, sizeof(*cmnd));
cmnd->dsm.opcode = nvme_cmd_dsm;
cmnd->dsm.nsid = cpu_to_le32(ns->head->ns_id);
cmnd->dsm.nr = cpu_to_le32(segments - 1);
@@ -889,6 +884,8 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns,
struct request *req, struct nvme_command *cmnd)
{
+ memset(cmnd, 0, sizeof(*cmnd));
+
if (ns->ctrl->quirks & NVME_QUIRK_DEALLOCATE_ZEROES)
return nvme_setup_discard(ns, req, cmnd);
@@ -922,9 +919,15 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
cmnd->rw.opcode = op;
+ cmnd->rw.flags = 0;
cmnd->rw.nsid = cpu_to_le32(ns->head->ns_id);
+ cmnd->rw.rsvd2 = 0;
+ cmnd->rw.metadata = 0;
cmnd->rw.slba = cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
+ cmnd->rw.reftag = 0;
+ cmnd->rw.apptag = 0;
+ cmnd->rw.appmask = 0;
if (req_op(req) == REQ_OP_WRITE && ctrl->nr_streams)
nvme_assign_write_stream(ctrl, req, &control, &dsmgmt);
@@ -981,10 +984,8 @@ blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req)
struct nvme_ctrl *ctrl = nvme_req(req)->ctrl;
blk_status_t ret = BLK_STS_OK;
- if (!(req->rq_flags & RQF_DONTPREP)) {
+ if (!(req->rq_flags & RQF_DONTPREP))
nvme_clear_nvme_request(req);
- memset(cmd, 0, sizeof(*cmd));
- }
switch (req_op(req)) {
case REQ_OP_DRV_IN:
@@ -2600,6 +2601,24 @@ static ssize_t nvme_subsys_show_nqn(struct device *dev,
}
static SUBSYS_ATTR_RO(subsysnqn, S_IRUGO, nvme_subsys_show_nqn);
+static ssize_t nvme_subsys_show_type(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct nvme_subsystem *subsys =
+ container_of(dev, struct nvme_subsystem, dev);
+
+ switch (subsys->subtype) {
+ case NVME_NQN_DISC:
+ return sysfs_emit(buf, "discovery\n");
+ case NVME_NQN_NVME:
+ return sysfs_emit(buf, "nvm\n");
+ default:
+ return sysfs_emit(buf, "reserved\n");
+ }
+}
+static SUBSYS_ATTR_RO(subsystype, S_IRUGO, nvme_subsys_show_type);
+
#define nvme_subsys_show_str_function(field) \
static ssize_t subsys_##field##_show(struct device *dev, \
struct device_attribute *attr, char *buf) \
@@ -2620,6 +2639,7 @@ static struct attribute *nvme_subsys_attrs[] = {
&subsys_attr_serial.attr,
&subsys_attr_firmware_rev.attr,
&subsys_attr_subsysnqn.attr,
+ &subsys_attr_subsystype.attr,
#ifdef CONFIG_NVME_MULTIPATH
&subsys_attr_iopolicy.attr,
#endif
@@ -2690,6 +2710,21 @@ static int nvme_init_subsystem(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
memcpy(subsys->firmware_rev, id->fr, sizeof(subsys->firmware_rev));
subsys->vendor_id = le16_to_cpu(id->vid);
subsys->cmic = id->cmic;
+
+ /* Versions prior to 1.4 don't necessarily report a valid type */
+ if (id->cntrltype == NVME_CTRL_DISC ||
+ !strcmp(subsys->subnqn, NVME_DISC_SUBSYS_NAME))
+ subsys->subtype = NVME_NQN_DISC;
+ else
+ subsys->subtype = NVME_NQN_NVME;
+
+ if (nvme_discovery_ctrl(ctrl) && subsys->subtype != NVME_NQN_DISC) {
+ dev_err(ctrl->device,
+ "Subsystem %s is not a discovery controller",
+ subsys->subnqn);
+ kfree(subsys);
+ return -EINVAL;
+ }
subsys->awupf = le16_to_cpu(id->awupf);
#ifdef CONFIG_NVME_MULTIPATH
subsys->iopolicy = NVME_IOPOLICY_NUMA;
@@ -4473,6 +4508,37 @@ out:
}
EXPORT_SYMBOL_GPL(nvme_init_ctrl);
+static void nvme_start_ns_queue(struct nvme_ns *ns)
+{
+ if (test_and_clear_bit(NVME_NS_STOPPED, &ns->flags))
+ blk_mq_unquiesce_queue(ns->queue);
+}
+
+static void nvme_stop_ns_queue(struct nvme_ns *ns)
+{
+ if (!test_and_set_bit(NVME_NS_STOPPED, &ns->flags))
+ blk_mq_quiesce_queue(ns->queue);
+}
+
+/*
+ * Prepare a queue for teardown.
+ *
+ * This must forcibly unquiesce queues to avoid blocking dispatch, and only set
+ * the capacity to 0 after that to avoid blocking dispatchers that may be
+ * holding bd_butex. This will end buffered writers dirtying pages that can't
+ * be synced.
+ */
+static void nvme_set_queue_dying(struct nvme_ns *ns)
+{
+ if (test_and_set_bit(NVME_NS_DEAD, &ns->flags))
+ return;
+
+ blk_set_queue_dying(ns->queue);
+ nvme_start_ns_queue(ns);
+
+ set_capacity_and_notify(ns->disk, 0);
+}
+
/**
* nvme_kill_queues(): Ends all namespace queues
* @ctrl: the dead controller that needs to end
@@ -4488,7 +4554,7 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
/* Forcibly unquiesce queues to avoid blocking dispatch */
if (ctrl->admin_q && !blk_queue_dying(ctrl->admin_q))
- blk_mq_unquiesce_queue(ctrl->admin_q);
+ nvme_start_admin_queue(ctrl);
list_for_each_entry(ns, &ctrl->namespaces, list)
nvme_set_queue_dying(ns);
@@ -4551,7 +4617,7 @@ void nvme_stop_queues(struct nvme_ctrl *ctrl)
down_read(&ctrl->namespaces_rwsem);
list_for_each_entry(ns, &ctrl->namespaces, list)
- blk_mq_quiesce_queue(ns->queue);
+ nvme_stop_ns_queue(ns);
up_read(&ctrl->namespaces_rwsem);
}
EXPORT_SYMBOL_GPL(nvme_stop_queues);
@@ -4562,11 +4628,25 @@ void nvme_start_queues(struct nvme_ctrl *ctrl)
down_read(&ctrl->namespaces_rwsem);
list_for_each_entry(ns, &ctrl->namespaces, list)
- blk_mq_unquiesce_queue(ns->queue);
+ nvme_start_ns_queue(ns);
up_read(&ctrl->namespaces_rwsem);
}
EXPORT_SYMBOL_GPL(nvme_start_queues);
+void nvme_stop_admin_queue(struct nvme_ctrl *ctrl)
+{
+ if (!test_and_set_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->flags))
+ blk_mq_quiesce_queue(ctrl->admin_q);
+}
+EXPORT_SYMBOL_GPL(nvme_stop_admin_queue);
+
+void nvme_start_admin_queue(struct nvme_ctrl *ctrl)
+{
+ if (test_and_clear_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->flags))
+ blk_mq_unquiesce_queue(ctrl->admin_q);
+}
+EXPORT_SYMBOL_GPL(nvme_start_admin_queue);
+
void nvme_sync_io_queues(struct nvme_ctrl *ctrl)
{
struct nvme_ns *ns;
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 668c6bb7a567..c5a2b71c5268 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -548,6 +548,7 @@ static const match_table_t opt_tokens = {
{ NVMF_OPT_NR_POLL_QUEUES, "nr_poll_queues=%d" },
{ NVMF_OPT_TOS, "tos=%d" },
{ NVMF_OPT_FAIL_FAST_TMO, "fast_io_fail_tmo=%d" },
+ { NVMF_OPT_DISCOVERY, "discovery" },
{ NVMF_OPT_ERR, NULL }
};
@@ -823,6 +824,9 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts,
}
opts->tos = token;
break;
+ case NVMF_OPT_DISCOVERY:
+ opts->discovery_nqn = true;
+ break;
default:
pr_warn("unknown parameter or missing value '%s' in ctrl creation request\n",
p);
@@ -949,7 +953,7 @@ EXPORT_SYMBOL_GPL(nvmf_free_options);
#define NVMF_ALLOWED_OPTS (NVMF_OPT_QUEUE_SIZE | NVMF_OPT_NR_IO_QUEUES | \
NVMF_OPT_KATO | NVMF_OPT_HOSTNQN | \
NVMF_OPT_HOST_ID | NVMF_OPT_DUP_CONNECT |\
- NVMF_OPT_DISABLE_SQFLOW |\
+ NVMF_OPT_DISABLE_SQFLOW | NVMF_OPT_DISCOVERY |\
NVMF_OPT_FAIL_FAST_TMO)
static struct nvme_ctrl *
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index a146cb903869..c3203ff1c654 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -67,6 +67,7 @@ enum {
NVMF_OPT_TOS = 1 << 19,
NVMF_OPT_FAIL_FAST_TMO = 1 << 20,
NVMF_OPT_HOST_IFACE = 1 << 21,
+ NVMF_OPT_DISCOVERY = 1 << 22,
};
/**
@@ -178,6 +179,13 @@ nvmf_ctlr_matches_baseopts(struct nvme_ctrl *ctrl,
return true;
}
+static inline char *nvmf_ctrl_subsysnqn(struct nvme_ctrl *ctrl)
+{
+ if (!ctrl->subsys)
+ return ctrl->opts->subsysnqn;
+ return ctrl->subsys->subnqn;
+}
+
int nvmf_reg_read32(struct nvme_ctrl *ctrl, u32 off, u32 *val);
int nvmf_reg_read64(struct nvme_ctrl *ctrl, u32 off, u64 *val);
int nvmf_reg_write32(struct nvme_ctrl *ctrl, u32 off, u32 val);
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index aa14ad963d91..71b3108c22f0 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -16,6 +16,7 @@
#include <linux/nvme-fc.h>
#include "fc.h"
#include <scsi/scsi_transport_fc.h>
+#include <linux/blk-mq-pci.h>
/* *************************** Data Structures/Defines ****************** */
@@ -2382,7 +2383,7 @@ nvme_fc_ctrl_free(struct kref *ref)
list_del(&ctrl->ctrl_list);
spin_unlock_irqrestore(&ctrl->rport->lock, flags);
- blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
+ nvme_start_admin_queue(&ctrl->ctrl);
blk_cleanup_queue(ctrl->ctrl.admin_q);
blk_cleanup_queue(ctrl->ctrl.fabrics_q);
blk_mq_free_tag_set(&ctrl->admin_tag_set);
@@ -2510,7 +2511,7 @@ __nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues)
/*
* clean up the admin queue. Same thing as above.
*/
- blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
+ nvme_stop_admin_queue(&ctrl->ctrl);
blk_sync_queue(ctrl->ctrl.admin_q);
blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
nvme_fc_terminate_exchange, &ctrl->ctrl);
@@ -2841,6 +2842,28 @@ nvme_fc_complete_rq(struct request *rq)
nvme_fc_ctrl_put(ctrl);
}
+static int nvme_fc_map_queues(struct blk_mq_tag_set *set)
+{
+ struct nvme_fc_ctrl *ctrl = set->driver_data;
+ int i;
+
+ for (i = 0; i < set->nr_maps; i++) {
+ struct blk_mq_queue_map *map = &set->map[i];
+
+ if (!map->nr_queues) {
+ WARN_ON(i == HCTX_TYPE_DEFAULT);
+ continue;
+ }
+
+ /* Call LLDD map queue functionality if defined */
+ if (ctrl->lport->ops->map_queues)
+ ctrl->lport->ops->map_queues(&ctrl->lport->localport,
+ map);
+ else
+ blk_mq_map_queues(map);
+ }
+ return 0;
+}
static const struct blk_mq_ops nvme_fc_mq_ops = {
.queue_rq = nvme_fc_queue_rq,
@@ -2849,6 +2872,7 @@ static const struct blk_mq_ops nvme_fc_mq_ops = {
.exit_request = nvme_fc_exit_request,
.init_hctx = nvme_fc_init_hctx,
.timeout = nvme_fc_timeout,
+ .map_queues = nvme_fc_map_queues,
};
static int
@@ -3095,7 +3119,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
ctrl->ctrl.max_hw_sectors = ctrl->ctrl.max_segments <<
(ilog2(SZ_4K) - 9);
- blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
+ nvme_start_admin_queue(&ctrl->ctrl);
ret = nvme_init_ctrl_finish(&ctrl->ctrl);
if (ret || test_bit(ASSOC_FAILED, &ctrl->flags))
@@ -3249,7 +3273,7 @@ nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
nvme_fc_free_queue(&ctrl->queues[0]);
/* re-enable the admin_q so anything new can fast fail */
- blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
+ nvme_start_admin_queue(&ctrl->ctrl);
/* resume the io queues so that things will fast fail */
nvme_start_queues(&ctrl->ctrl);
@@ -3572,7 +3596,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
dev_info(ctrl->ctrl.device,
"NVME-FC{%d}: new ctrl: NQN \"%s\"\n",
- ctrl->cnum, ctrl->ctrl.opts->subsysnqn);
+ ctrl->cnum, nvmf_ctrl_subsysnqn(&ctrl->ctrl));
return &ctrl->ctrl;
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index fba06618c6c2..7f2071f2460c 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -85,8 +85,13 @@ void nvme_failover_req(struct request *req)
}
spin_lock_irqsave(&ns->head->requeue_lock, flags);
- for (bio = req->bio; bio; bio = bio->bi_next)
+ for (bio = req->bio; bio; bio = bio->bi_next) {
bio_set_dev(bio, ns->head->disk->part0);
+ if (bio->bi_opf & REQ_POLLED) {
+ bio->bi_opf &= ~REQ_POLLED;
+ bio->bi_cookie = BLK_QC_T_NONE;
+ }
+ }
blk_steal_bios(&ns->head->requeue_list, req);
spin_unlock_irqrestore(&ns->head->requeue_lock, flags);
@@ -100,8 +105,11 @@ void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
down_read(&ctrl->namespaces_rwsem);
list_for_each_entry(ns, &ctrl->namespaces, list) {
- if (ns->head->disk)
- kblockd_schedule_work(&ns->head->requeue_work);
+ if (!ns->head->disk)
+ continue;
+ kblockd_schedule_work(&ns->head->requeue_work);
+ if (ctrl->state == NVME_CTRL_LIVE)
+ disk_uevent(ns->head->disk, KOBJ_CHANGE);
}
up_read(&ctrl->namespaces_rwsem);
}
@@ -138,13 +146,12 @@ void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl)
{
struct nvme_ns *ns;
- mutex_lock(&ctrl->scan_lock);
down_read(&ctrl->namespaces_rwsem);
- list_for_each_entry(ns, &ctrl->namespaces, list)
- if (nvme_mpath_clear_current_path(ns))
- kblockd_schedule_work(&ns->head->requeue_work);
+ list_for_each_entry(ns, &ctrl->namespaces, list) {
+ nvme_mpath_clear_current_path(ns);
+ kblockd_schedule_work(&ns->head->requeue_work);
+ }
up_read(&ctrl->namespaces_rwsem);
- mutex_unlock(&ctrl->scan_lock);
}
void nvme_mpath_revalidate_paths(struct nvme_ns *ns)
@@ -312,12 +319,11 @@ static bool nvme_available_path(struct nvme_ns_head *head)
return false;
}
-static blk_qc_t nvme_ns_head_submit_bio(struct bio *bio)
+static void nvme_ns_head_submit_bio(struct bio *bio)
{
struct nvme_ns_head *head = bio->bi_bdev->bd_disk->private_data;
struct device *dev = disk_to_dev(head->disk);
struct nvme_ns *ns;
- blk_qc_t ret = BLK_QC_T_NONE;
int srcu_idx;
/*
@@ -334,7 +340,7 @@ static blk_qc_t nvme_ns_head_submit_bio(struct bio *bio)
bio->bi_opf |= REQ_NVME_MPATH;
trace_block_bio_remap(bio, disk_devt(ns->head->disk),
bio->bi_iter.bi_sector);
- ret = submit_bio_noacct(bio);
+ submit_bio_noacct(bio);
} else if (nvme_available_path(head)) {
dev_warn_ratelimited(dev, "no usable path - requeuing I/O\n");
@@ -349,7 +355,6 @@ static blk_qc_t nvme_ns_head_submit_bio(struct bio *bio)
}
srcu_read_unlock(&head->srcu, srcu_idx);
- return ret;
}
static int nvme_ns_head_open(struct block_device *bdev, fmode_t mode)
@@ -479,6 +484,15 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
blk_queue_flag_set(QUEUE_FLAG_NONROT, head->disk->queue);
blk_queue_flag_set(QUEUE_FLAG_NOWAIT, head->disk->queue);
+ /*
+ * This assumes all controllers that refer to a namespace either
+ * support poll queues or not. That is not a strict guarantee,
+ * but if the assumption is wrong the effect is only suboptimal
+ * performance but not correctness problem.
+ */
+ if (ctrl->tagset->nr_maps > HCTX_TYPE_POLL &&
+ ctrl->tagset->map[HCTX_TYPE_POLL].nr_queues)
+ blk_queue_flag_set(QUEUE_FLAG_POLL, head->disk->queue);
/* set to a default value of 512 until the disk is validated */
blk_queue_logical_block_size(head->disk->queue, 512);
@@ -494,13 +508,23 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
static void nvme_mpath_set_live(struct nvme_ns *ns)
{
struct nvme_ns_head *head = ns->head;
+ int rc;
if (!head->disk)
return;
+ /*
+ * test_and_set_bit() is used because it is protecting against two nvme
+ * paths simultaneously calling device_add_disk() on the same namespace
+ * head.
+ */
if (!test_and_set_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) {
- device_add_disk(&head->subsys->dev, head->disk,
- nvme_ns_id_attr_groups);
+ rc = device_add_disk(&head->subsys->dev, head->disk,
+ nvme_ns_id_attr_groups);
+ if (rc) {
+ clear_bit(NVME_NSHEAD_DISK_LIVE, &ns->flags);
+ return;
+ }
nvme_add_ns_head_cdev(head);
}
@@ -538,7 +562,7 @@ static int nvme_parse_ana_log(struct nvme_ctrl *ctrl, void *data,
return -EINVAL;
nr_nsids = le32_to_cpu(desc->nnsids);
- nsid_buf_size = nr_nsids * sizeof(__le32);
+ nsid_buf_size = flex_array_size(desc, nsids, nr_nsids);
if (WARN_ON_ONCE(desc->grpid == 0))
return -EINVAL;
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index ed79a6c7e804..b334af8aa264 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -342,6 +342,7 @@ struct nvme_ctrl {
int nr_reconnects;
unsigned long flags;
#define NVME_CTRL_FAILFAST_EXPIRED 0
+#define NVME_CTRL_ADMIN_Q_STOPPED 1
struct nvmf_ctrl_options *opts;
struct page *discard_page;
@@ -372,6 +373,7 @@ struct nvme_subsystem {
char model[40];
char firmware_rev[8];
u8 cmic;
+ enum nvme_subsys_type subtype;
u16 vendor_id;
u16 awupf; /* 0's based awupf value. */
struct ida ns_ida;
@@ -463,6 +465,7 @@ struct nvme_ns {
#define NVME_NS_ANA_PENDING 2
#define NVME_NS_FORCE_RO 3
#define NVME_NS_READY 4
+#define NVME_NS_STOPPED 5
struct cdev cdev;
struct device cdev_device;
@@ -638,6 +641,20 @@ static inline bool nvme_is_aen_req(u16 qid, __u16 command_id)
}
void nvme_complete_rq(struct request *req);
+void nvme_complete_batch_req(struct request *req);
+
+static __always_inline void nvme_complete_batch(struct io_comp_batch *iob,
+ void (*fn)(struct request *rq))
+{
+ struct request *req;
+
+ rq_list_for_each(&iob->req_list, req) {
+ fn(req);
+ nvme_complete_batch_req(req);
+ }
+ blk_mq_end_request_batch(iob);
+}
+
blk_status_t nvme_host_path_error(struct request *req);
bool nvme_cancel_request(struct request *req, void *data, bool reserved);
void nvme_cancel_tagset(struct nvme_ctrl *ctrl);
@@ -665,6 +682,8 @@ void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
void nvme_stop_queues(struct nvme_ctrl *ctrl);
void nvme_start_queues(struct nvme_ctrl *ctrl);
+void nvme_stop_admin_queue(struct nvme_ctrl *ctrl);
+void nvme_start_admin_queue(struct nvme_ctrl *ctrl);
void nvme_kill_queues(struct nvme_ctrl *ctrl);
void nvme_sync_queues(struct nvme_ctrl *ctrl);
void nvme_sync_io_queues(struct nvme_ctrl *ctrl);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 149ecf73df38..ca2ee806d74b 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -10,6 +10,7 @@
#include <linux/blkdev.h>
#include <linux/blk-mq.h>
#include <linux/blk-mq-pci.h>
+#include <linux/blk-integrity.h>
#include <linux/dmi.h>
#include <linux/init.h>
#include <linux/interrupt.h>
@@ -244,8 +245,15 @@ static int nvme_dbbuf_dma_alloc(struct nvme_dev *dev)
{
unsigned int mem_size = nvme_dbbuf_size(dev);
- if (dev->dbbuf_dbs)
+ if (dev->dbbuf_dbs) {
+ /*
+ * Clear the dbbuf memory so the driver doesn't observe stale
+ * values from the previous instantiation.
+ */
+ memset(dev->dbbuf_dbs, 0, mem_size);
+ memset(dev->dbbuf_eis, 0, mem_size);
return 0;
+ }
dev->dbbuf_dbs = dma_alloc_coherent(dev->dev, mem_size,
&dev->dbbuf_dbs_dma_addr,
@@ -958,7 +966,7 @@ out_free_cmd:
return ret;
}
-static void nvme_pci_complete_rq(struct request *req)
+static __always_inline void nvme_pci_unmap_rq(struct request *req)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
struct nvme_dev *dev = iod->nvmeq->dev;
@@ -968,9 +976,19 @@ static void nvme_pci_complete_rq(struct request *req)
rq_integrity_vec(req)->bv_len, rq_data_dir(req));
if (blk_rq_nr_phys_segments(req))
nvme_unmap_data(dev, req);
+}
+
+static void nvme_pci_complete_rq(struct request *req)
+{
+ nvme_pci_unmap_rq(req);
nvme_complete_rq(req);
}
+static void nvme_pci_complete_batch(struct io_comp_batch *iob)
+{
+ nvme_complete_batch(iob, nvme_pci_unmap_rq);
+}
+
/* We read the CQE phase first to check if the rest of the entry is valid */
static inline bool nvme_cqe_pending(struct nvme_queue *nvmeq)
{
@@ -995,7 +1013,8 @@ static inline struct blk_mq_tags *nvme_queue_tagset(struct nvme_queue *nvmeq)
return nvmeq->dev->tagset.tags[nvmeq->qid - 1];
}
-static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
+static inline void nvme_handle_cqe(struct nvme_queue *nvmeq,
+ struct io_comp_batch *iob, u16 idx)
{
struct nvme_completion *cqe = &nvmeq->cqes[idx];
__u16 command_id = READ_ONCE(cqe->command_id);
@@ -1022,7 +1041,9 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx)
}
trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail);
- if (!nvme_try_complete_req(req, cqe->status, cqe->result))
+ if (!nvme_try_complete_req(req, cqe->status, cqe->result) &&
+ !blk_mq_add_to_batch(req, iob, nvme_req(req)->status,
+ nvme_pci_complete_batch))
nvme_pci_complete_rq(req);
}
@@ -1038,7 +1059,8 @@ static inline void nvme_update_cq_head(struct nvme_queue *nvmeq)
}
}
-static inline int nvme_process_cq(struct nvme_queue *nvmeq)
+static inline int nvme_poll_cq(struct nvme_queue *nvmeq,
+ struct io_comp_batch *iob)
{
int found = 0;
@@ -1049,7 +1071,7 @@ static inline int nvme_process_cq(struct nvme_queue *nvmeq)
* the cqe requires a full read memory barrier
*/
dma_rmb();
- nvme_handle_cqe(nvmeq, nvmeq->cq_head);
+ nvme_handle_cqe(nvmeq, iob, nvmeq->cq_head);
nvme_update_cq_head(nvmeq);
}
@@ -1061,9 +1083,13 @@ static inline int nvme_process_cq(struct nvme_queue *nvmeq)
static irqreturn_t nvme_irq(int irq, void *data)
{
struct nvme_queue *nvmeq = data;
+ DEFINE_IO_COMP_BATCH(iob);
- if (nvme_process_cq(nvmeq))
+ if (nvme_poll_cq(nvmeq, &iob)) {
+ if (!rq_list_empty(iob.req_list))
+ nvme_pci_complete_batch(&iob);
return IRQ_HANDLED;
+ }
return IRQ_NONE;
}
@@ -1087,11 +1113,11 @@ static void nvme_poll_irqdisable(struct nvme_queue *nvmeq)
WARN_ON_ONCE(test_bit(NVMEQ_POLLED, &nvmeq->flags));
disable_irq(pci_irq_vector(pdev, nvmeq->cq_vector));
- nvme_process_cq(nvmeq);
+ nvme_poll_cq(nvmeq, NULL);
enable_irq(pci_irq_vector(pdev, nvmeq->cq_vector));
}
-static int nvme_poll(struct blk_mq_hw_ctx *hctx)
+static int nvme_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
{
struct nvme_queue *nvmeq = hctx->driver_data;
bool found;
@@ -1100,7 +1126,7 @@ static int nvme_poll(struct blk_mq_hw_ctx *hctx)
return 0;
spin_lock(&nvmeq->cq_poll_lock);
- found = nvme_process_cq(nvmeq);
+ found = nvme_poll_cq(nvmeq, iob);
spin_unlock(&nvmeq->cq_poll_lock);
return found;
@@ -1273,7 +1299,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
* Did we miss an interrupt?
*/
if (test_bit(NVMEQ_POLLED, &nvmeq->flags))
- nvme_poll(req->mq_hctx);
+ nvme_poll(req->mq_hctx, NULL);
else
nvme_poll_irqdisable(nvmeq);
@@ -1395,7 +1421,7 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq)
nvmeq->dev->online_queues--;
if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q)
- blk_mq_quiesce_queue(nvmeq->dev->ctrl.admin_q);
+ nvme_stop_admin_queue(&nvmeq->dev->ctrl);
if (!test_and_clear_bit(NVMEQ_POLLED, &nvmeq->flags))
pci_free_irq(to_pci_dev(nvmeq->dev->dev), nvmeq->cq_vector, nvmeq);
return 0;
@@ -1433,7 +1459,7 @@ static void nvme_reap_pending_cqes(struct nvme_dev *dev)
for (i = dev->ctrl.queue_count - 1; i > 0; i--) {
spin_lock(&dev->queues[i].cq_poll_lock);
- nvme_process_cq(&dev->queues[i]);
+ nvme_poll_cq(&dev->queues[i], NULL);
spin_unlock(&dev->queues[i].cq_poll_lock);
}
}
@@ -1654,7 +1680,7 @@ static void nvme_dev_remove_admin(struct nvme_dev *dev)
* user requests may be waiting on a stopped queue. Start the
* queue to flush these to completion.
*/
- blk_mq_unquiesce_queue(dev->ctrl.admin_q);
+ nvme_start_admin_queue(&dev->ctrl);
blk_cleanup_queue(dev->ctrl.admin_q);
blk_mq_free_tag_set(&dev->admin_tagset);
}
@@ -1688,7 +1714,7 @@ static int nvme_alloc_admin_tags(struct nvme_dev *dev)
return -ENODEV;
}
} else
- blk_mq_unquiesce_queue(dev->ctrl.admin_q);
+ nvme_start_admin_queue(&dev->ctrl);
return 0;
}
@@ -2623,7 +2649,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
if (shutdown) {
nvme_start_queues(&dev->ctrl);
if (dev->ctrl.admin_q && !blk_queue_dying(dev->ctrl.admin_q))
- blk_mq_unquiesce_queue(dev->ctrl.admin_q);
+ nvme_start_admin_queue(&dev->ctrl);
}
mutex_unlock(&dev->shutdown_lock);
}
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 042c594bc57e..850f84d204d0 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -13,6 +13,7 @@
#include <linux/atomic.h>
#include <linux/blk-mq.h>
#include <linux/blk-mq-rdma.h>
+#include <linux/blk-integrity.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/mutex.h>
@@ -918,7 +919,7 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
else
ctrl->ctrl.max_integrity_segments = 0;
- blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
+ nvme_start_admin_queue(&ctrl->ctrl);
error = nvme_init_ctrl_finish(&ctrl->ctrl);
if (error)
@@ -927,7 +928,7 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
return 0;
out_quiesce_queue:
- blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
+ nvme_stop_admin_queue(&ctrl->ctrl);
blk_sync_queue(ctrl->ctrl.admin_q);
out_stop_queue:
nvme_rdma_stop_queue(&ctrl->queues[0]);
@@ -1025,12 +1026,12 @@ out_free_io_queues:
static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl,
bool remove)
{
- blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
+ nvme_stop_admin_queue(&ctrl->ctrl);
blk_sync_queue(ctrl->ctrl.admin_q);
nvme_rdma_stop_queue(&ctrl->queues[0]);
nvme_cancel_admin_tagset(&ctrl->ctrl);
if (remove)
- blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
+ nvme_start_admin_queue(&ctrl->ctrl);
nvme_rdma_destroy_admin_queue(ctrl, remove);
}
@@ -1095,11 +1096,13 @@ static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new)
return ret;
if (ctrl->ctrl.icdoff) {
+ ret = -EOPNOTSUPP;
dev_err(ctrl->ctrl.device, "icdoff is not supported!\n");
goto destroy_admin;
}
if (!(ctrl->ctrl.sgls & (1 << 2))) {
+ ret = -EOPNOTSUPP;
dev_err(ctrl->ctrl.device,
"Mandatory keyed sgls are not supported!\n");
goto destroy_admin;
@@ -1111,6 +1114,13 @@ static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new)
ctrl->ctrl.opts->queue_size, ctrl->ctrl.sqsize + 1);
}
+ if (ctrl->ctrl.sqsize + 1 > NVME_RDMA_MAX_QUEUE_SIZE) {
+ dev_warn(ctrl->ctrl.device,
+ "ctrl sqsize %u > max queue size %u, clamping down\n",
+ ctrl->ctrl.sqsize + 1, NVME_RDMA_MAX_QUEUE_SIZE);
+ ctrl->ctrl.sqsize = NVME_RDMA_MAX_QUEUE_SIZE - 1;
+ }
+
if (ctrl->ctrl.sqsize + 1 > ctrl->ctrl.maxcmd) {
dev_warn(ctrl->ctrl.device,
"sqsize %u > ctrl maxcmd %u, clamping down\n",
@@ -1153,7 +1163,7 @@ destroy_io:
nvme_rdma_destroy_io_queues(ctrl, new);
}
destroy_admin:
- blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
+ nvme_stop_admin_queue(&ctrl->ctrl);
blk_sync_queue(ctrl->ctrl.admin_q);
nvme_rdma_stop_queue(&ctrl->queues[0]);
nvme_cancel_admin_tagset(&ctrl->ctrl);
@@ -1193,7 +1203,7 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
nvme_rdma_teardown_io_queues(ctrl, false);
nvme_start_queues(&ctrl->ctrl);
nvme_rdma_teardown_admin_queue(ctrl, false);
- blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
+ nvme_start_admin_queue(&ctrl->ctrl);
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
/* state change failure is ok if we started ctrl delete */
@@ -2105,7 +2115,7 @@ unmap_qe:
return ret;
}
-static int nvme_rdma_poll(struct blk_mq_hw_ctx *hctx)
+static int nvme_rdma_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
{
struct nvme_rdma_queue *queue = hctx->driver_data;
@@ -2231,7 +2241,7 @@ static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown)
cancel_delayed_work_sync(&ctrl->reconnect_work);
nvme_rdma_teardown_io_queues(ctrl, shutdown);
- blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
+ nvme_stop_admin_queue(&ctrl->ctrl);
if (shutdown)
nvme_shutdown_ctrl(&ctrl->ctrl);
else
@@ -2385,7 +2395,7 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
goto out_uninit_ctrl;
dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISpcs\n",
- ctrl->ctrl.opts->subsysnqn, &ctrl->addr);
+ nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr);
mutex_lock(&nvme_rdma_ctrl_mutex);
list_add_tail(&ctrl->list, &nvme_rdma_ctrl_list);
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 3c1c29dd3020..33bc83d8d992 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -926,12 +926,14 @@ static void nvme_tcp_fail_request(struct nvme_tcp_request *req)
static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
{
struct nvme_tcp_queue *queue = req->queue;
+ int req_data_len = req->data_len;
while (true) {
struct page *page = nvme_tcp_req_cur_page(req);
size_t offset = nvme_tcp_req_cur_offset(req);
size_t len = nvme_tcp_req_cur_length(req);
bool last = nvme_tcp_pdu_last_send(req, len);
+ int req_data_sent = req->data_sent;
int ret, flags = MSG_DONTWAIT;
if (last && !queue->data_digest && !nvme_tcp_queue_more(queue))
@@ -958,7 +960,7 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req)
* in the request where we don't want to modify it as we may
* compete with the RX path completing the request.
*/
- if (req->data_sent + ret < req->data_len)
+ if (req_data_sent + ret < req_data_len)
nvme_tcp_advance_req(req, ret);
/* fully successful last send in current PDU */
@@ -1048,10 +1050,11 @@ static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req)
static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
{
struct nvme_tcp_queue *queue = req->queue;
+ size_t offset = req->offset;
int ret;
struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
struct kvec iov = {
- .iov_base = &req->ddgst + req->offset,
+ .iov_base = (u8 *)&req->ddgst + req->offset,
.iov_len = NVME_TCP_DIGEST_LENGTH - req->offset
};
@@ -1064,7 +1067,7 @@ static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req)
if (unlikely(ret <= 0))
return ret;
- if (req->offset + ret == NVME_TCP_DIGEST_LENGTH) {
+ if (offset + ret == NVME_TCP_DIGEST_LENGTH) {
nvme_tcp_done_send_req(queue);
return 1;
}
@@ -1915,7 +1918,7 @@ static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
if (error)
goto out_stop_queue;
- blk_mq_unquiesce_queue(ctrl->admin_q);
+ nvme_start_admin_queue(ctrl);
error = nvme_init_ctrl_finish(ctrl);
if (error)
@@ -1924,7 +1927,7 @@ static int nvme_tcp_configure_admin_queue(struct nvme_ctrl *ctrl, bool new)
return 0;
out_quiesce_queue:
- blk_mq_quiesce_queue(ctrl->admin_q);
+ nvme_stop_admin_queue(ctrl);
blk_sync_queue(ctrl->admin_q);
out_stop_queue:
nvme_tcp_stop_queue(ctrl, 0);
@@ -1946,12 +1949,12 @@ out_free_queue:
static void nvme_tcp_teardown_admin_queue(struct nvme_ctrl *ctrl,
bool remove)
{
- blk_mq_quiesce_queue(ctrl->admin_q);
+ nvme_stop_admin_queue(ctrl);
blk_sync_queue(ctrl->admin_q);
nvme_tcp_stop_queue(ctrl, 0);
nvme_cancel_admin_tagset(ctrl);
if (remove)
- blk_mq_unquiesce_queue(ctrl->admin_q);
+ nvme_start_admin_queue(ctrl);
nvme_tcp_destroy_admin_queue(ctrl, remove);
}
@@ -1960,7 +1963,7 @@ static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl,
{
if (ctrl->queue_count <= 1)
return;
- blk_mq_quiesce_queue(ctrl->admin_q);
+ nvme_stop_admin_queue(ctrl);
nvme_start_freeze(ctrl);
nvme_stop_queues(ctrl);
nvme_sync_io_queues(ctrl);
@@ -2055,7 +2058,7 @@ destroy_io:
nvme_tcp_destroy_io_queues(ctrl, new);
}
destroy_admin:
- blk_mq_quiesce_queue(ctrl->admin_q);
+ nvme_stop_admin_queue(ctrl);
blk_sync_queue(ctrl->admin_q);
nvme_tcp_stop_queue(ctrl, 0);
nvme_cancel_admin_tagset(ctrl);
@@ -2098,7 +2101,7 @@ static void nvme_tcp_error_recovery_work(struct work_struct *work)
/* unquiesce to fail fast pending requests */
nvme_start_queues(ctrl);
nvme_tcp_teardown_admin_queue(ctrl, false);
- blk_mq_unquiesce_queue(ctrl->admin_q);
+ nvme_start_admin_queue(ctrl);
if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_CONNECTING)) {
/* state change failure is ok if we started ctrl delete */
@@ -2116,7 +2119,7 @@ static void nvme_tcp_teardown_ctrl(struct nvme_ctrl *ctrl, bool shutdown)
cancel_delayed_work_sync(&to_tcp_ctrl(ctrl)->connect_work);
nvme_tcp_teardown_io_queues(ctrl, shutdown);
- blk_mq_quiesce_queue(ctrl->admin_q);
+ nvme_stop_admin_queue(ctrl);
if (shutdown)
nvme_shutdown_ctrl(ctrl);
else
@@ -2429,7 +2432,7 @@ static int nvme_tcp_map_queues(struct blk_mq_tag_set *set)
return 0;
}
-static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx)
+static int nvme_tcp_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
{
struct nvme_tcp_queue *queue = hctx->driver_data;
struct sock *sk = queue->sock->sk;
@@ -2582,7 +2585,7 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev,
goto out_uninit_ctrl;
dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISp\n",
- ctrl->ctrl.opts->subsysnqn, &ctrl->addr);
+ nvmf_ctrl_subsysnqn(&ctrl->ctrl), &ctrl->addr);
mutex_lock(&nvme_tcp_ctrl_mutex);
list_add_tail(&ctrl->list, &nvme_tcp_ctrl_list);
diff --git a/drivers/nvme/host/zns.c b/drivers/nvme/host/zns.c
index d95010481fce..bfc259e0d7b8 100644
--- a/drivers/nvme/host/zns.c
+++ b/drivers/nvme/host/zns.c
@@ -233,6 +233,8 @@ out_free:
blk_status_t nvme_setup_zone_mgmt_send(struct nvme_ns *ns, struct request *req,
struct nvme_command *c, enum nvme_zone_mgmt_action action)
{
+ memset(c, 0, sizeof(*c));
+
c->zms.opcode = nvme_cmd_zone_mgmt_send;
c->zms.nsid = cpu_to_le32(ns->head->ns_id);
c->zms.slba = cpu_to_le64(nvme_sect_to_lba(ns, blk_rq_pos(req)));
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index aa6d84d8848e..6fb24746de06 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -264,7 +264,7 @@ static u32 nvmet_format_ana_group(struct nvmet_req *req, u32 grpid,
desc->chgcnt = cpu_to_le64(nvmet_ana_chgcnt);
desc->state = req->port->ana_state[grpid];
memset(desc->rsvd17, 0, sizeof(desc->rsvd17));
- return sizeof(struct nvme_ana_group_desc) + count * sizeof(__le32);
+ return struct_size(desc, nsids, count);
}
static void nvmet_execute_get_log_page_ana(struct nvmet_req *req)
@@ -278,8 +278,8 @@ static void nvmet_execute_get_log_page_ana(struct nvmet_req *req)
u16 status;
status = NVME_SC_INTERNAL;
- desc = kmalloc(sizeof(struct nvme_ana_group_desc) +
- NVMET_MAX_NAMESPACES * sizeof(__le32), GFP_KERNEL);
+ desc = kmalloc(struct_size(desc, nsids, NVMET_MAX_NAMESPACES),
+ GFP_KERNEL);
if (!desc)
goto out;
@@ -374,13 +374,19 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
id->rab = 6;
+ if (nvmet_is_disc_subsys(ctrl->subsys))
+ id->cntrltype = NVME_CTRL_DISC;
+ else
+ id->cntrltype = NVME_CTRL_IO;
+
/*
* XXX: figure out how we can assign a IEEE OUI, but until then
* the safest is to leave it as zeroes.
*/
/* we support multiple ports, multiples hosts and ANA: */
- id->cmic = (1 << 0) | (1 << 1) | (1 << 3);
+ id->cmic = NVME_CTRL_CMIC_MULTI_PORT | NVME_CTRL_CMIC_MULTI_CTRL |
+ NVME_CTRL_CMIC_ANA;
/* Limit MDTS according to transport capability */
if (ctrl->ops->get_mdts)
@@ -536,7 +542,7 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req)
* Our namespace might always be shared. Not just with other
* controllers, but also with any other user of the block device.
*/
- id->nmic = (1 << 0);
+ id->nmic = NVME_NS_NMIC_SHARED;
id->anagrpid = cpu_to_le32(req->ns->anagrpid);
memcpy(&id->nguid, &req->ns->nguid, sizeof(id->nguid));
@@ -1008,7 +1014,7 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
if (nvme_is_fabrics(cmd))
return nvmet_parse_fabrics_cmd(req);
- if (nvmet_req_subsys(req)->type == NVME_NQN_DISC)
+ if (nvmet_is_disc_subsys(nvmet_req_subsys(req)))
return nvmet_parse_discovery_cmd(req);
ret = nvmet_check_ctrl_status(req);
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index be5d82421e3a..091a0ca16361 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -1233,6 +1233,44 @@ static ssize_t nvmet_subsys_attr_model_store(struct config_item *item,
}
CONFIGFS_ATTR(nvmet_subsys_, attr_model);
+static ssize_t nvmet_subsys_attr_discovery_nqn_show(struct config_item *item,
+ char *page)
+{
+ return snprintf(page, PAGE_SIZE, "%s\n",
+ nvmet_disc_subsys->subsysnqn);
+}
+
+static ssize_t nvmet_subsys_attr_discovery_nqn_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct nvmet_subsys *subsys = to_subsys(item);
+ char *subsysnqn;
+ int len;
+
+ len = strcspn(page, "\n");
+ if (!len)
+ return -EINVAL;
+
+ subsysnqn = kmemdup_nul(page, len, GFP_KERNEL);
+ if (!subsysnqn)
+ return -ENOMEM;
+
+ /*
+ * The discovery NQN must be different from subsystem NQN.
+ */
+ if (!strcmp(subsysnqn, subsys->subsysnqn)) {
+ kfree(subsysnqn);
+ return -EBUSY;
+ }
+ down_write(&nvmet_config_sem);
+ kfree(nvmet_disc_subsys->subsysnqn);
+ nvmet_disc_subsys->subsysnqn = subsysnqn;
+ up_write(&nvmet_config_sem);
+
+ return count;
+}
+CONFIGFS_ATTR(nvmet_subsys_, attr_discovery_nqn);
+
#ifdef CONFIG_BLK_DEV_INTEGRITY
static ssize_t nvmet_subsys_attr_pi_enable_show(struct config_item *item,
char *page)
@@ -1262,6 +1300,7 @@ static struct configfs_attribute *nvmet_subsys_attrs[] = {
&nvmet_subsys_attr_attr_cntlid_min,
&nvmet_subsys_attr_attr_cntlid_max,
&nvmet_subsys_attr_attr_model,
+ &nvmet_subsys_attr_attr_discovery_nqn,
#ifdef CONFIG_BLK_DEV_INTEGRITY
&nvmet_subsys_attr_attr_pi_enable,
#endif
@@ -1553,6 +1592,8 @@ static void nvmet_port_release(struct config_item *item)
{
struct nvmet_port *port = to_nvmet_port(item);
+ /* Let inflight controllers teardown complete */
+ flush_scheduled_work();
list_del(&port->global_entry);
kfree(port->ana_state);
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index b8425fa34300..5119c687de68 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -1140,7 +1140,7 @@ static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
* should verify iosqes,iocqes are zeroed, however that
* would break backwards compatibility, so don't enforce it.
*/
- if (ctrl->subsys->type != NVME_NQN_DISC &&
+ if (!nvmet_is_disc_subsys(ctrl->subsys) &&
(nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES ||
nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES)) {
ctrl->csts = NVME_CSTS_CFS;
@@ -1205,7 +1205,10 @@ static void nvmet_init_cap(struct nvmet_ctrl *ctrl)
/* CC.EN timeout in 500msec units: */
ctrl->cap |= (15ULL << 24);
/* maximum queue entries supported: */
- ctrl->cap |= NVMET_QUEUE_SIZE - 1;
+ if (ctrl->ops->get_max_queue_size)
+ ctrl->cap |= ctrl->ops->get_max_queue_size(ctrl) - 1;
+ else
+ ctrl->cap |= NVMET_QUEUE_SIZE - 1;
if (nvmet_is_passthru_subsys(ctrl->subsys))
nvmet_passthrough_override_cap(ctrl);
@@ -1278,7 +1281,7 @@ bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn)
if (subsys->allow_any_host)
return true;
- if (subsys->type == NVME_NQN_DISC) /* allow all access to disc subsys */
+ if (nvmet_is_disc_subsys(subsys)) /* allow all access to disc subsys */
return true;
list_for_each_entry(p, &subsys->hosts, entry) {
@@ -1367,6 +1370,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
mutex_init(&ctrl->lock);
ctrl->port = req->port;
+ ctrl->ops = req->ops;
INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
INIT_LIST_HEAD(&ctrl->async_events);
@@ -1405,13 +1409,11 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
}
ctrl->cntlid = ret;
- ctrl->ops = req->ops;
-
/*
* Discovery controllers may use some arbitrary high value
* in order to cleanup stale discovery sessions
*/
- if ((ctrl->subsys->type == NVME_NQN_DISC) && !kato)
+ if (nvmet_is_disc_subsys(ctrl->subsys) && !kato)
kato = NVMET_DISC_KATO_MS;
/* keep-alive timeout in seconds */
@@ -1491,7 +1493,8 @@ static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
if (!port)
return NULL;
- if (!strcmp(NVME_DISC_SUBSYS_NAME, subsysnqn)) {
+ if (!strcmp(NVME_DISC_SUBSYS_NAME, subsysnqn) ||
+ !strcmp(nvmet_disc_subsys->subsysnqn, subsysnqn)) {
if (!kref_get_unless_zero(&nvmet_disc_subsys->ref))
return NULL;
return nvmet_disc_subsys;
@@ -1538,6 +1541,7 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
subsys->max_qid = NVMET_NR_QUEUES;
break;
case NVME_NQN_DISC:
+ case NVME_NQN_CURR:
subsys->max_qid = 0;
break;
default:
diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c
index 7aa62bc6ae84..c2162eef8ce1 100644
--- a/drivers/nvme/target/discovery.c
+++ b/drivers/nvme/target/discovery.c
@@ -146,7 +146,7 @@ static size_t discovery_log_entries(struct nvmet_req *req)
struct nvmet_ctrl *ctrl = req->sq->ctrl;
struct nvmet_subsys_link *p;
struct nvmet_port *r;
- size_t entries = 0;
+ size_t entries = 1;
list_for_each_entry(p, &req->port->subsystems, entry) {
if (!nvmet_host_allowed(p->subsys, ctrl->hostnqn))
@@ -171,6 +171,7 @@ static void nvmet_execute_disc_get_log_page(struct nvmet_req *req)
u32 numrec = 0;
u16 status = 0;
void *buffer;
+ char traddr[NVMF_TRADDR_SIZE];
if (!nvmet_check_transfer_len(req, data_len))
return;
@@ -203,15 +204,19 @@ static void nvmet_execute_disc_get_log_page(struct nvmet_req *req)
status = NVME_SC_INTERNAL;
goto out;
}
-
hdr = buffer;
- list_for_each_entry(p, &req->port->subsystems, entry) {
- char traddr[NVMF_TRADDR_SIZE];
+ nvmet_set_disc_traddr(req, req->port, traddr);
+
+ nvmet_format_discovery_entry(hdr, req->port,
+ nvmet_disc_subsys->subsysnqn,
+ traddr, NVME_NQN_CURR, numrec);
+ numrec++;
+
+ list_for_each_entry(p, &req->port->subsystems, entry) {
if (!nvmet_host_allowed(p->subsys, ctrl->hostnqn))
continue;
- nvmet_set_disc_traddr(req, req->port, traddr);
nvmet_format_discovery_entry(hdr, req->port,
p->subsys->subsysnqn, traddr,
NVME_NQN_NVME, numrec);
@@ -268,6 +273,8 @@ static void nvmet_execute_disc_identify(struct nvmet_req *req)
memcpy_and_pad(id->fr, sizeof(id->fr),
UTS_RELEASE, strlen(UTS_RELEASE), ' ');
+ id->cntrltype = NVME_CTRL_DISC;
+
/* no limit on data transfer sizes for now */
id->mdts = 0;
id->cntlid = cpu_to_le16(ctrl->cntlid);
@@ -387,7 +394,7 @@ u16 nvmet_parse_discovery_cmd(struct nvmet_req *req)
int __init nvmet_init_discovery(void)
{
nvmet_disc_subsys =
- nvmet_subsys_alloc(NVME_DISC_SUBSYS_NAME, NVME_NQN_DISC);
+ nvmet_subsys_alloc(NVME_DISC_SUBSYS_NAME, NVME_NQN_CURR);
return PTR_ERR_OR_ZERO(nvmet_disc_subsys);
}
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
index 7d0454cee920..70fb587e9413 100644
--- a/drivers/nvme/target/fabrics-cmd.c
+++ b/drivers/nvme/target/fabrics-cmd.c
@@ -221,7 +221,8 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
goto out;
}
- pr_info("creating controller %d for subsystem %s for NQN %s%s.\n",
+ pr_info("creating %s controller %d for subsystem %s for NQN %s%s.\n",
+ nvmet_is_disc_subsys(ctrl->subsys) ? "discovery" : "nvm",
ctrl->cntlid, ctrl->subsys->subsysnqn, ctrl->hostnqn,
ctrl->pi_support ? " T10-PI is enabled" : "");
req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid);
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index 0fc2781ab970..70ca9dfc1771 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -5,6 +5,7 @@
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/blkdev.h>
+#include <linux/blk-integrity.h>
#include <linux/module.h>
#include "nvmet.h"
@@ -86,7 +87,7 @@ int nvmet_bdev_ns_enable(struct nvmet_ns *ns)
ns->bdev = NULL;
return ret;
}
- ns->size = i_size_read(ns->bdev->bd_inode);
+ ns->size = bdev_nr_bytes(ns->bdev);
ns->blksize_shift = blksize_bits(bdev_logical_block_size(ns->bdev));
ns->pi_type = 0;
@@ -107,7 +108,7 @@ int nvmet_bdev_ns_enable(struct nvmet_ns *ns)
void nvmet_bdev_ns_revalidate(struct nvmet_ns *ns)
{
- ns->size = i_size_read(ns->bdev->bd_inode);
+ ns->size = bdev_nr_bytes(ns->bdev);
}
u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts)
diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c
index 1dd1a0fe2e81..6aa30f30b572 100644
--- a/drivers/nvme/target/io-cmd-file.c
+++ b/drivers/nvme/target/io-cmd-file.c
@@ -125,7 +125,7 @@ static ssize_t nvmet_file_submit_bvec(struct nvmet_req *req, loff_t pos,
return call_iter(iocb, &iter);
}
-static void nvmet_file_io_done(struct kiocb *iocb, long ret, long ret2)
+static void nvmet_file_io_done(struct kiocb *iocb, long ret)
{
struct nvmet_req *req = container_of(iocb, struct nvmet_req, f.iocb);
u16 status = NVME_SC_SUCCESS;
@@ -222,7 +222,7 @@ static bool nvmet_file_execute_io(struct nvmet_req *req, int ki_flags)
}
complete:
- nvmet_file_io_done(&req->f.iocb, ret, 0);
+ nvmet_file_io_done(&req->f.iocb, ret);
return true;
}
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 0285ccc7541f..eb1094254c82 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -384,6 +384,8 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
error = PTR_ERR(ctrl->ctrl.admin_q);
goto out_cleanup_fabrics_q;
}
+ /* reset stopped state for the fresh admin queue */
+ clear_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->ctrl.flags);
error = nvmf_connect_admin_queue(&ctrl->ctrl);
if (error)
@@ -398,7 +400,7 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
ctrl->ctrl.max_hw_sectors =
(NVME_LOOP_MAX_SEGMENTS - 1) << (PAGE_SHIFT - 9);
- blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
+ nvme_start_admin_queue(&ctrl->ctrl);
error = nvme_init_ctrl_finish(&ctrl->ctrl);
if (error)
@@ -428,7 +430,7 @@ static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
nvme_loop_destroy_io_queues(ctrl);
}
- blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
+ nvme_stop_admin_queue(&ctrl->ctrl);
if (ctrl->ctrl.state == NVME_CTRL_LIVE)
nvme_shutdown_ctrl(&ctrl->ctrl);
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 7143c7fa7464..af193423c10b 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -309,6 +309,7 @@ struct nvmet_fabrics_ops {
u16 (*install_queue)(struct nvmet_sq *nvme_sq);
void (*discovery_chg)(struct nvmet_port *port);
u8 (*get_mdts)(const struct nvmet_ctrl *ctrl);
+ u16 (*get_max_queue_size)(const struct nvmet_ctrl *ctrl);
};
#define NVMET_MAX_INLINE_BIOVEC 8
@@ -576,6 +577,11 @@ static inline struct nvmet_subsys *nvmet_req_subsys(struct nvmet_req *req)
return req->sq->ctrl->subsys;
}
+static inline bool nvmet_is_disc_subsys(struct nvmet_subsys *subsys)
+{
+ return subsys->type != NVME_NQN_NVME;
+}
+
#ifdef CONFIG_NVME_TARGET_PASSTHRU
void nvmet_passthru_subsys_free(struct nvmet_subsys *subsys);
int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys);
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 891174ccd44b..1deb4043e242 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -5,6 +5,7 @@
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/atomic.h>
+#include <linux/blk-integrity.h>
#include <linux/ctype.h>
#include <linux/delay.h>
#include <linux/err.h>
@@ -1818,12 +1819,36 @@ restart:
mutex_unlock(&nvmet_rdma_queue_mutex);
}
+static void nvmet_rdma_destroy_port_queues(struct nvmet_rdma_port *port)
+{
+ struct nvmet_rdma_queue *queue, *tmp;
+ struct nvmet_port *nport = port->nport;
+
+ mutex_lock(&nvmet_rdma_queue_mutex);
+ list_for_each_entry_safe(queue, tmp, &nvmet_rdma_queue_list,
+ queue_list) {
+ if (queue->port != nport)
+ continue;
+
+ list_del_init(&queue->queue_list);
+ __nvmet_rdma_queue_disconnect(queue);
+ }
+ mutex_unlock(&nvmet_rdma_queue_mutex);
+}
+
static void nvmet_rdma_disable_port(struct nvmet_rdma_port *port)
{
struct rdma_cm_id *cm_id = xchg(&port->cm_id, NULL);
if (cm_id)
rdma_destroy_id(cm_id);
+
+ /*
+ * Destroy the remaining queues, which are not belong to any
+ * controller yet. Do it here after the RDMA-CM was destroyed
+ * guarantees that no new queue will be created.
+ */
+ nvmet_rdma_destroy_port_queues(port);
}
static int nvmet_rdma_enable_port(struct nvmet_rdma_port *port)
@@ -1975,6 +2000,11 @@ static u8 nvmet_rdma_get_mdts(const struct nvmet_ctrl *ctrl)
return NVMET_RDMA_MAX_MDTS;
}
+static u16 nvmet_rdma_get_max_queue_size(const struct nvmet_ctrl *ctrl)
+{
+ return NVME_RDMA_MAX_QUEUE_SIZE;
+}
+
static const struct nvmet_fabrics_ops nvmet_rdma_ops = {
.owner = THIS_MODULE,
.type = NVMF_TRTYPE_RDMA,
@@ -1986,6 +2016,7 @@ static const struct nvmet_fabrics_ops nvmet_rdma_ops = {
.delete_ctrl = nvmet_rdma_delete_ctrl,
.disc_traddr = nvmet_rdma_disc_port_addr,
.get_mdts = nvmet_rdma_get_mdts,
+ .get_max_queue_size = nvmet_rdma_get_max_queue_size,
};
static void nvmet_rdma_remove_one(struct ib_device *ib_device, void *client_data)
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index 07ee347ea3f3..84c387e4bf43 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -702,7 +702,7 @@ static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd, bool last_in_batch)
struct nvmet_tcp_queue *queue = cmd->queue;
struct msghdr msg = { .msg_flags = MSG_DONTWAIT };
struct kvec iov = {
- .iov_base = &cmd->exp_ddgst + cmd->offset,
+ .iov_base = (u8 *)&cmd->exp_ddgst + cmd->offset,
.iov_len = NVME_TCP_DIGEST_LENGTH - cmd->offset
};
int ret;
@@ -1096,7 +1096,7 @@ recv:
}
if (queue->hdr_digest &&
- nvmet_tcp_verify_hdgst(queue, &queue->pdu, queue->offset)) {
+ nvmet_tcp_verify_hdgst(queue, &queue->pdu, hdr->hlen)) {
nvmet_tcp_fatal_error(queue); /* fatal */
return -EPROTO;
}
@@ -1428,6 +1428,7 @@ static void nvmet_tcp_uninit_data_in_cmds(struct nvmet_tcp_queue *queue)
static void nvmet_tcp_release_queue_work(struct work_struct *w)
{
+ struct page *page;
struct nvmet_tcp_queue *queue =
container_of(w, struct nvmet_tcp_queue, release_work);
@@ -1447,6 +1448,8 @@ static void nvmet_tcp_release_queue_work(struct work_struct *w)
nvmet_tcp_free_crypto(queue);
ida_simple_remove(&nvmet_tcp_queue_ida, queue->idx);
+ page = virt_to_head_page(queue->pf_cache.va);
+ __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias);
kfree(queue);
}
@@ -1737,6 +1740,17 @@ err_port:
return ret;
}
+static void nvmet_tcp_destroy_port_queues(struct nvmet_tcp_port *port)
+{
+ struct nvmet_tcp_queue *queue;
+
+ mutex_lock(&nvmet_tcp_queue_mutex);
+ list_for_each_entry(queue, &nvmet_tcp_queue_list, queue_list)
+ if (queue->port == port)
+ kernel_sock_shutdown(queue->sock, SHUT_RDWR);
+ mutex_unlock(&nvmet_tcp_queue_mutex);
+}
+
static void nvmet_tcp_remove_port(struct nvmet_port *nport)
{
struct nvmet_tcp_port *port = nport->priv;
@@ -1746,6 +1760,11 @@ static void nvmet_tcp_remove_port(struct nvmet_port *nport)
port->sock->sk->sk_user_data = NULL;
write_unlock_bh(&port->sock->sk->sk_callback_lock);
cancel_work_sync(&port->accept_work);
+ /*
+ * Destroy the remaining queues, which are not belong to any
+ * controller yet.
+ */
+ nvmet_tcp_destroy_port_queues(port);
sock_release(port->sock);
kfree(port);
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index 3dfeae8912df..80b5fd44ab1c 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -70,10 +70,6 @@ config OF_IRQ
def_bool y
depends on !SPARC && IRQ_DOMAIN
-config OF_NET
- depends on NETDEVICES
- def_bool y
-
config OF_RESERVED_MEM
def_bool OF_EARLY_FLATTREE
diff --git a/drivers/of/Makefile b/drivers/of/Makefile
index c13b982084a3..e0360a44306e 100644
--- a/drivers/of/Makefile
+++ b/drivers/of/Makefile
@@ -7,7 +7,6 @@ obj-$(CONFIG_OF_EARLY_FLATTREE) += fdt_address.o
obj-$(CONFIG_OF_PROMTREE) += pdt.o
obj-$(CONFIG_OF_ADDRESS) += address.o
obj-$(CONFIG_OF_IRQ) += irq.o
-obj-$(CONFIG_OF_NET) += of_net.o
obj-$(CONFIG_OF_UNITTEST) += unittest.o
obj-$(CONFIG_OF_RESERVED_MEM) += of_reserved_mem.o
obj-$(CONFIG_OF_RESOLVE) += resolver.o
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 0ac17256258d..61de453b885c 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -287,6 +287,28 @@ const void *of_get_property(const struct device_node *np, const char *name,
}
EXPORT_SYMBOL(of_get_property);
+/**
+ * of_get_cpu_hwid - Get the hardware ID from a CPU device node
+ *
+ * @cpun: CPU number(logical index) for which device node is required
+ * @thread: The local thread number to get the hardware ID for.
+ *
+ * Return: The hardware ID for the CPU node or ~0ULL if not found.
+ */
+u64 of_get_cpu_hwid(struct device_node *cpun, unsigned int thread)
+{
+ const __be32 *cell;
+ int ac, len;
+
+ ac = of_n_addr_cells(cpun);
+ cell = of_get_property(cpun, "reg", &len);
+ if (!cell || !ac || ((sizeof(*cell) * ac * (thread + 1)) > len))
+ return ~0ULL;
+
+ cell += ac * thread;
+ return of_read_number(cell, ac);
+}
+
/*
* arch_match_cpu_phys_id - Match the given logical CPU and physical id
*
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 4546572af24b..bdca35284ceb 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -562,39 +562,35 @@ static int __init __reserved_mem_check_root(unsigned long node)
}
/*
- * __fdt_scan_reserved_mem() - scan a single FDT node for reserved memory
+ * fdt_scan_reserved_mem() - scan a single FDT node for reserved memory
*/
-static int __init __fdt_scan_reserved_mem(unsigned long node, const char *uname,
- int depth, void *data)
+static int __init fdt_scan_reserved_mem(void)
{
- static int found;
- int err;
-
- if (!found && depth == 1 && strcmp(uname, "reserved-memory") == 0) {
- if (__reserved_mem_check_root(node) != 0) {
- pr_err("Reserved memory: unsupported node format, ignoring\n");
- /* break scan */
- return 1;
- }
- found = 1;
- /* scan next node */
- return 0;
- } else if (!found) {
- /* scan next node */
- return 0;
- } else if (found && depth < 2) {
- /* scanning of /reserved-memory has been finished */
- return 1;
+ int node, child;
+ const void *fdt = initial_boot_params;
+
+ node = fdt_path_offset(fdt, "/reserved-memory");
+ if (node < 0)
+ return -ENODEV;
+
+ if (__reserved_mem_check_root(node) != 0) {
+ pr_err("Reserved memory: unsupported node format, ignoring\n");
+ return -EINVAL;
}
- if (!of_fdt_device_is_available(initial_boot_params, node))
- return 0;
+ fdt_for_each_subnode(child, fdt, node) {
+ const char *uname;
+ int err;
- err = __reserved_mem_reserve_reg(node, uname);
- if (err == -ENOENT && of_get_flat_dt_prop(node, "size", NULL))
- fdt_reserved_mem_save_node(node, uname, 0, 0);
+ if (!of_fdt_device_is_available(fdt, child))
+ continue;
- /* scan next node */
+ uname = fdt_get_name(fdt, child, NULL);
+
+ err = __reserved_mem_reserve_reg(child, uname);
+ if (err == -ENOENT && of_get_flat_dt_prop(child, "size", NULL))
+ fdt_reserved_mem_save_node(child, uname, 0, 0);
+ }
return 0;
}
@@ -645,7 +641,7 @@ void __init early_init_fdt_scan_reserved_mem(void)
early_init_dt_reserve_memory_arch(base, size, false);
}
- of_scan_flat_dt(__fdt_scan_reserved_mem, NULL);
+ fdt_scan_reserved_mem();
fdt_init_reserved_mem();
fdt_reserve_elfcorehdr();
}
diff --git a/drivers/of/kobj.c b/drivers/of/kobj.c
index 6675b5e56960..7d3853a5a09a 100644
--- a/drivers/of/kobj.c
+++ b/drivers/of/kobj.c
@@ -5,13 +5,13 @@
#include "of_private.h"
/* true when node is initialized */
-static int of_node_is_initialized(struct device_node *node)
+static int of_node_is_initialized(const struct device_node *node)
{
return node && node->kobj.state_initialized;
}
/* true when node is attached (i.e. present on sysfs) */
-int of_node_is_attached(struct device_node *node)
+int of_node_is_attached(const struct device_node *node)
{
return node && node->kobj.state_in_sysfs;
}
diff --git a/drivers/of/of_net.c b/drivers/of/of_net.c
deleted file mode 100644
index dbac3a172a11..000000000000
--- a/drivers/of/of_net.c
+++ /dev/null
@@ -1,145 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * OF helpers for network devices.
- *
- * Initially copied out of arch/powerpc/kernel/prom_parse.c
- */
-#include <linux/etherdevice.h>
-#include <linux/kernel.h>
-#include <linux/of_net.h>
-#include <linux/of_platform.h>
-#include <linux/phy.h>
-#include <linux/export.h>
-#include <linux/device.h>
-#include <linux/nvmem-consumer.h>
-
-/**
- * of_get_phy_mode - Get phy mode for given device_node
- * @np: Pointer to the given device_node
- * @interface: Pointer to the result
- *
- * The function gets phy interface string from property 'phy-mode' or
- * 'phy-connection-type'. The index in phy_modes table is set in
- * interface and 0 returned. In case of error interface is set to
- * PHY_INTERFACE_MODE_NA and an errno is returned, e.g. -ENODEV.
- */
-int of_get_phy_mode(struct device_node *np, phy_interface_t *interface)
-{
- const char *pm;
- int err, i;
-
- *interface = PHY_INTERFACE_MODE_NA;
-
- err = of_property_read_string(np, "phy-mode", &pm);
- if (err < 0)
- err = of_property_read_string(np, "phy-connection-type", &pm);
- if (err < 0)
- return err;
-
- for (i = 0; i < PHY_INTERFACE_MODE_MAX; i++)
- if (!strcasecmp(pm, phy_modes(i))) {
- *interface = i;
- return 0;
- }
-
- return -ENODEV;
-}
-EXPORT_SYMBOL_GPL(of_get_phy_mode);
-
-static int of_get_mac_addr(struct device_node *np, const char *name, u8 *addr)
-{
- struct property *pp = of_find_property(np, name, NULL);
-
- if (pp && pp->length == ETH_ALEN && is_valid_ether_addr(pp->value)) {
- memcpy(addr, pp->value, ETH_ALEN);
- return 0;
- }
- return -ENODEV;
-}
-
-static int of_get_mac_addr_nvmem(struct device_node *np, u8 *addr)
-{
- struct platform_device *pdev = of_find_device_by_node(np);
- struct nvmem_cell *cell;
- const void *mac;
- size_t len;
- int ret;
-
- /* Try lookup by device first, there might be a nvmem_cell_lookup
- * associated with a given device.
- */
- if (pdev) {
- ret = nvmem_get_mac_address(&pdev->dev, addr);
- put_device(&pdev->dev);
- return ret;
- }
-
- cell = of_nvmem_cell_get(np, "mac-address");
- if (IS_ERR(cell))
- return PTR_ERR(cell);
-
- mac = nvmem_cell_read(cell, &len);
- nvmem_cell_put(cell);
-
- if (IS_ERR(mac))
- return PTR_ERR(mac);
-
- if (len != ETH_ALEN || !is_valid_ether_addr(mac)) {
- kfree(mac);
- return -EINVAL;
- }
-
- memcpy(addr, mac, ETH_ALEN);
- kfree(mac);
-
- return 0;
-}
-
-/**
- * of_get_mac_address()
- * @np: Caller's Device Node
- * @addr: Pointer to a six-byte array for the result
- *
- * Search the device tree for the best MAC address to use. 'mac-address' is
- * checked first, because that is supposed to contain to "most recent" MAC
- * address. If that isn't set, then 'local-mac-address' is checked next,
- * because that is the default address. If that isn't set, then the obsolete
- * 'address' is checked, just in case we're using an old device tree. If any
- * of the above isn't set, then try to get MAC address from nvmem cell named
- * 'mac-address'.
- *
- * Note that the 'address' property is supposed to contain a virtual address of
- * the register set, but some DTS files have redefined that property to be the
- * MAC address.
- *
- * All-zero MAC addresses are rejected, because those could be properties that
- * exist in the device tree, but were not set by U-Boot. For example, the
- * DTS could define 'mac-address' and 'local-mac-address', with zero MAC
- * addresses. Some older U-Boots only initialized 'local-mac-address'. In
- * this case, the real MAC is in 'local-mac-address', and 'mac-address' exists
- * but is all zeros.
- *
- * Return: 0 on success and errno in case of error.
-*/
-int of_get_mac_address(struct device_node *np, u8 *addr)
-{
- int ret;
-
- if (!np)
- return -ENODEV;
-
- ret = of_get_mac_addr(np, "mac-address", addr);
- if (!ret)
- return 0;
-
- ret = of_get_mac_addr(np, "local-mac-address", addr);
- if (!ret)
- return 0;
-
- ret = of_get_mac_addr(np, "address", addr);
- if (!ret)
- return 0;
-
- return of_get_mac_addr_nvmem(np, addr);
-}
-EXPORT_SYMBOL(of_get_mac_address);
diff --git a/drivers/of/of_numa.c b/drivers/of/of_numa.c
index fe6b13608e51..5949829a1b00 100644
--- a/drivers/of/of_numa.c
+++ b/drivers/of/of_numa.c
@@ -111,6 +111,8 @@ static int __init of_numa_parse_distance_map_v1(struct device_node *map)
return -EINVAL;
}
+ node_set(nodea, numa_nodes_parsed);
+
numa_set_distance(nodea, nodeb, distance);
/* Set default distance of node B->A same as A->B */
diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h
index 631489f7f8c0..9324483397f6 100644
--- a/drivers/of/of_private.h
+++ b/drivers/of/of_private.h
@@ -60,7 +60,7 @@ static inline int of_property_notify(int action, struct device_node *np,
#endif /* CONFIG_OF_DYNAMIC */
#if defined(CONFIG_OF_KOBJ)
-int of_node_is_attached(struct device_node *node);
+int of_node_is_attached(const struct device_node *node);
int __of_add_property_sysfs(struct device_node *np, struct property *pp);
void __of_remove_property_sysfs(struct device_node *np, struct property *prop);
void __of_update_property_sysfs(struct device_node *np, struct property *newprop,
@@ -127,19 +127,11 @@ struct device_node *__of_find_node_by_full_path(struct device_node *node,
extern const void *__of_get_property(const struct device_node *np,
const char *name, int *lenp);
extern int __of_add_property(struct device_node *np, struct property *prop);
-extern int __of_add_property_sysfs(struct device_node *np,
- struct property *prop);
extern int __of_remove_property(struct device_node *np, struct property *prop);
-extern void __of_remove_property_sysfs(struct device_node *np,
- struct property *prop);
extern int __of_update_property(struct device_node *np,
struct property *newprop, struct property **oldprop);
-extern void __of_update_property_sysfs(struct device_node *np,
- struct property *newprop, struct property *oldprop);
-extern int __of_attach_node_sysfs(struct device_node *np);
extern void __of_detach_node(struct device_node *np);
-extern void __of_detach_node_sysfs(struct device_node *np);
extern void __of_sysfs_remove_bin_file(struct device_node *np,
struct property *prop);
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
index 59c1390cdf42..9da8835ba5a5 100644
--- a/drivers/of/of_reserved_mem.c
+++ b/drivers/of/of_reserved_mem.c
@@ -21,6 +21,7 @@
#include <linux/sort.h>
#include <linux/slab.h>
#include <linux/memblock.h>
+#include <linux/kmemleak.h>
#include "of_private.h"
@@ -46,6 +47,7 @@ static int __init early_init_dt_alloc_reserved_memory_arch(phys_addr_t size,
err = memblock_mark_nomap(base, size);
if (err)
memblock_free(base, size);
+ kmemleak_ignore_phys(base);
}
return err;
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index 74afbb7a4f5e..07813fb1ef37 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -222,7 +222,7 @@ static struct amba_device *of_amba_device_create(struct device_node *node,
{
struct amba_device *dev;
const void *prop;
- int i, ret;
+ int ret;
pr_debug("Creating amba device %pOF\n", node);
@@ -253,10 +253,6 @@ static struct amba_device *of_amba_device_create(struct device_node *node,
if (prop)
dev->periphid = of_read_ulong(prop, 1);
- /* Decode the IRQs and address ranges */
- for (i = 0; i < AMBA_NR_IRQS; i++)
- dev->irq[i] = irq_of_parse_and_map(node, i);
-
ret = of_address_to_resource(node, 0, &dev->res);
if (ret) {
pr_err("amba: of_address_to_resource() failed (%d) for %pOF\n",
@@ -509,6 +505,7 @@ EXPORT_SYMBOL_GPL(of_platform_default_populate);
static const struct of_device_id reserved_mem_matches[] = {
{ .compatible = "qcom,rmtfs-mem" },
{ .compatible = "qcom,cmd-db" },
+ { .compatible = "qcom,smem" },
{ .compatible = "ramoops" },
{ .compatible = "nvmem-rmem" },
{}
diff --git a/drivers/of/unittest-data/Makefile b/drivers/of/unittest-data/Makefile
index a5d2d9254b2c..fbded24c608c 100644
--- a/drivers/of/unittest-data/Makefile
+++ b/drivers/of/unittest-data/Makefile
@@ -37,7 +37,9 @@ DTC_FLAGS_overlay_base += -@
DTC_FLAGS_testcases += -@
# suppress warnings about intentional errors
-DTC_FLAGS_testcases += -Wno-interrupts_property
+DTC_FLAGS_testcases += -Wno-interrupts_property \
+ -Wno-node_name_vs_property_name \
+ -Wno-interrupt_map
# Apply overlays statically with fdtoverlay. This is a build time test that
# the overlays can be applied successfully by fdtoverlay. This does not
@@ -82,6 +84,10 @@ apply_static_overlay_1 := overlay_0.dtbo \
apply_static_overlay_2 := overlay.dtbo
+DTC_FLAGS_static_base_1 += -Wno-interrupts_property \
+ -Wno-node_name_vs_property_name \
+ -Wno-interrupt_map
+
static_test_1-dtbs := static_base_1.dtb $(apply_static_overlay_1)
static_test_2-dtbs := static_base_2.dtb $(apply_static_overlay_2)
diff --git a/drivers/of/unittest-data/tests-interrupts.dtsi b/drivers/of/unittest-data/tests-interrupts.dtsi
index 9b60a549f502..ecc74dbcc373 100644
--- a/drivers/of/unittest-data/tests-interrupts.dtsi
+++ b/drivers/of/unittest-data/tests-interrupts.dtsi
@@ -31,6 +31,21 @@
test_intmap1: intmap1 {
#interrupt-cells = <2>;
+ /*
+ * #address-cells is required
+ *
+ * The property is not provided in this node to
+ * test that the code will properly handle
+ * this case for legacy .dts files.
+ *
+ * Not having #address-cells will result in a
+ * warning from dtc starting with
+ * version v1.6.1-19-g0a3a9d3449c8
+ * The warning is suppressed by adding
+ * -Wno-interrupt_map to the Makefile for all
+ * .dts files this include this .dtsi
+ #address-cells = <1>;
+ */
interrupt-map = <0x5000 1 2 &test_intc0 15>;
};
@@ -46,6 +61,10 @@
interrupts-extended0 {
reg = <0x5000 0x100>;
+ /*
+ * Do not remove &test_intmap1 from this
+ * property - see comment in node intmap1
+ */
interrupts-extended = <&test_intc0 1>,
<&test_intc1 2 3 4>,
<&test_intc2 5 6>,
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index 8c056972a6dd..481ba8682ebf 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -1129,6 +1129,12 @@ static void __init of_unittest_parse_interrupts_extended(void)
passed &= (args.args[1] == 14);
break;
case 6:
+ /*
+ * Tests child node that is missing property
+ * #address-cells. See the comments in
+ * drivers/of/unittest-data/tests-interrupts.dtsi
+ * nodes intmap1 and interrupts-extended0
+ */
passed &= !rc;
passed &= (args.args_count == 1);
passed &= (args.args[0] == 15);
@@ -1688,19 +1694,19 @@ static void __init of_unittest_overlay_gpio(void)
*/
EXPECT_BEGIN(KERN_INFO,
- "GPIO line <<int>> (line-B-input) hogged as input\n");
+ "gpio-<<int>> (line-B-input): hogged as input\n");
EXPECT_BEGIN(KERN_INFO,
- "GPIO line <<int>> (line-A-input) hogged as input\n");
+ "gpio-<<int>> (line-A-input): hogged as input\n");
ret = platform_driver_register(&unittest_gpio_driver);
if (unittest(ret == 0, "could not register unittest gpio driver\n"))
return;
EXPECT_END(KERN_INFO,
- "GPIO line <<int>> (line-A-input) hogged as input\n");
+ "gpio-<<int>> (line-A-input): hogged as input\n");
EXPECT_END(KERN_INFO,
- "GPIO line <<int>> (line-B-input) hogged as input\n");
+ "gpio-<<int>> (line-B-input): hogged as input\n");
unittest(probe_pass_count + 2 == unittest_gpio_probe_pass_count,
"unittest_gpio_probe() failed or not called\n");
@@ -1727,7 +1733,7 @@ static void __init of_unittest_overlay_gpio(void)
chip_request_count = unittest_gpio_chip_request_count;
EXPECT_BEGIN(KERN_INFO,
- "GPIO line <<int>> (line-D-input) hogged as input\n");
+ "gpio-<<int>> (line-D-input): hogged as input\n");
/* overlay_gpio_03 contains gpio node and child gpio hog node */
@@ -1735,7 +1741,7 @@ static void __init of_unittest_overlay_gpio(void)
"Adding overlay 'overlay_gpio_03' failed\n");
EXPECT_END(KERN_INFO,
- "GPIO line <<int>> (line-D-input) hogged as input\n");
+ "gpio-<<int>> (line-D-input): hogged as input\n");
unittest(probe_pass_count + 1 == unittest_gpio_probe_pass_count,
"unittest_gpio_probe() failed or not called\n");
@@ -1774,7 +1780,7 @@ static void __init of_unittest_overlay_gpio(void)
*/
EXPECT_BEGIN(KERN_INFO,
- "GPIO line <<int>> (line-C-input) hogged as input\n");
+ "gpio-<<int>> (line-C-input): hogged as input\n");
/* overlay_gpio_04b contains child gpio hog node */
@@ -1782,7 +1788,7 @@ static void __init of_unittest_overlay_gpio(void)
"Adding overlay 'overlay_gpio_04b' failed\n");
EXPECT_END(KERN_INFO,
- "GPIO line <<int>> (line-C-input) hogged as input\n");
+ "gpio-<<int>> (line-C-input): hogged as input\n");
unittest(chip_request_count + 1 == unittest_gpio_chip_request_count,
"unittest_gpio_chip_request() called %d times (expected 1 time)\n",
@@ -3094,6 +3100,8 @@ static __init void of_unittest_overlay_high_level(void)
if (!strcmp(np->full_name, base_child->full_name)) {
unittest(0, "illegal node name in overlay_base %pOFn",
np);
+ of_node_put(np);
+ of_node_put(base_child);
return;
}
}
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 260a06fb78a6..a42dbf448860 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -906,7 +906,7 @@ acpi_status pci_acpi_add_pm_notifier(struct acpi_device *dev,
* choose highest power _SxD or any lower power
*/
-static pci_power_t acpi_pci_choose_state(struct pci_dev *pdev)
+pci_power_t acpi_pci_choose_state(struct pci_dev *pdev)
{
int acpi_state, d_max;
@@ -965,22 +965,20 @@ int pci_dev_acpi_reset(struct pci_dev *dev, bool probe)
return 0;
}
-static bool acpi_pci_power_manageable(struct pci_dev *dev)
+bool acpi_pci_power_manageable(struct pci_dev *dev)
{
struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
- if (!adev)
- return false;
- return acpi_device_power_manageable(adev);
+ return adev && acpi_device_power_manageable(adev);
}
-static bool acpi_pci_bridge_d3(struct pci_dev *dev)
+bool acpi_pci_bridge_d3(struct pci_dev *dev)
{
const union acpi_object *obj;
struct acpi_device *adev;
struct pci_dev *rpdev;
- if (!dev->is_hotplug_bridge)
+ if (acpi_pci_disabled || !dev->is_hotplug_bridge)
return false;
/* Assume D3 support if the bridge is power-manageable by ACPI. */
@@ -1008,7 +1006,7 @@ static bool acpi_pci_bridge_d3(struct pci_dev *dev)
return obj->integer.value == 1;
}
-static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
+int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
{
struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
static const u8 state_conv[] = {
@@ -1046,7 +1044,7 @@ static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
return error;
}
-static pci_power_t acpi_pci_get_power_state(struct pci_dev *dev)
+pci_power_t acpi_pci_get_power_state(struct pci_dev *dev)
{
struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
static const pci_power_t state_conv[] = {
@@ -1068,7 +1066,7 @@ static pci_power_t acpi_pci_get_power_state(struct pci_dev *dev)
return state_conv[state];
}
-static void acpi_pci_refresh_power_state(struct pci_dev *dev)
+void acpi_pci_refresh_power_state(struct pci_dev *dev)
{
struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
@@ -1093,17 +1091,23 @@ static int acpi_pci_propagate_wakeup(struct pci_bus *bus, bool enable)
return 0;
}
-static int acpi_pci_wakeup(struct pci_dev *dev, bool enable)
+int acpi_pci_wakeup(struct pci_dev *dev, bool enable)
{
+ if (acpi_pci_disabled)
+ return 0;
+
if (acpi_pm_device_can_wakeup(&dev->dev))
return acpi_pm_set_device_wakeup(&dev->dev, enable);
return acpi_pci_propagate_wakeup(dev->bus, enable);
}
-static bool acpi_pci_need_resume(struct pci_dev *dev)
+bool acpi_pci_need_resume(struct pci_dev *dev)
{
- struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
+ struct acpi_device *adev;
+
+ if (acpi_pci_disabled)
+ return false;
/*
* In some cases (eg. Samsung 305V4A) leaving a bridge in suspend over
@@ -1115,6 +1119,7 @@ static bool acpi_pci_need_resume(struct pci_dev *dev)
if (pci_is_bridge(dev) && acpi_target_system_state() != ACPI_STATE_S0)
return true;
+ adev = ACPI_COMPANION(&dev->dev);
if (!adev || !acpi_device_power_manageable(adev))
return false;
@@ -1128,17 +1133,6 @@ static bool acpi_pci_need_resume(struct pci_dev *dev)
return !!adev->power.flags.dsw_present;
}
-static const struct pci_platform_pm_ops acpi_pci_platform_pm = {
- .bridge_d3 = acpi_pci_bridge_d3,
- .is_manageable = acpi_pci_power_manageable,
- .set_state = acpi_pci_set_power_state,
- .get_state = acpi_pci_get_power_state,
- .refresh_state = acpi_pci_refresh_power_state,
- .choose_state = acpi_pci_choose_state,
- .set_wakeup = acpi_pci_wakeup,
- .need_resume = acpi_pci_need_resume,
-};
-
void acpi_pci_add_bus(struct pci_bus *bus)
{
union acpi_object *obj;
@@ -1356,13 +1350,9 @@ static void pci_acpi_set_external_facing(struct pci_dev *dev)
dev->external_facing = 1;
}
-static void pci_acpi_setup(struct device *dev)
+void pci_acpi_setup(struct device *dev, struct acpi_device *adev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
- struct acpi_device *adev = ACPI_COMPANION(dev);
-
- if (!adev)
- return;
pci_acpi_optimize_delay(pci_dev, adev->handle);
pci_acpi_set_external_facing(pci_dev);
@@ -1386,14 +1376,10 @@ static void pci_acpi_setup(struct device *dev)
acpi_device_power_add_dependent(adev, dev);
}
-static void pci_acpi_cleanup(struct device *dev)
+void pci_acpi_cleanup(struct device *dev, struct acpi_device *adev)
{
- struct acpi_device *adev = ACPI_COMPANION(dev);
struct pci_dev *pci_dev = to_pci_dev(dev);
- if (!adev)
- return;
-
pci_acpi_remove_edr_notifier(pci_dev);
pci_acpi_remove_pm_notifier(adev);
if (adev->wakeup.flags.valid) {
@@ -1405,20 +1391,6 @@ static void pci_acpi_cleanup(struct device *dev)
}
}
-static bool pci_acpi_bus_match(struct device *dev)
-{
- return dev_is_pci(dev);
-}
-
-static struct acpi_bus_type acpi_pci_bus = {
- .name = "PCI",
- .match = pci_acpi_bus_match,
- .find_companion = acpi_pci_find_companion,
- .setup = pci_acpi_setup,
- .cleanup = pci_acpi_cleanup,
-};
-
-
static struct fwnode_handle *(*pci_msi_get_fwnode_cb)(struct device *dev);
/**
@@ -1460,8 +1432,6 @@ struct irq_domain *pci_host_bridge_acpi_msi_domain(struct pci_bus *bus)
static int __init acpi_pci_init(void)
{
- int ret;
-
if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_MSI) {
pr_info("ACPI FADT declares the system doesn't support MSI, so disable it\n");
pci_no_msi();
@@ -1472,11 +1442,9 @@ static int __init acpi_pci_init(void)
pcie_no_aspm();
}
- ret = register_acpi_bus_type(&acpi_pci_bus);
- if (ret)
+ if (acpi_pci_disabled)
return 0;
- pci_set_platform_pm(&acpi_pci_platform_pm);
acpi_pci_slot_init();
acpiphp_init();
diff --git a/drivers/pci/pci-mid.c b/drivers/pci/pci-mid.c
index aafd58da3a89..fbfd78127123 100644
--- a/drivers/pci/pci-mid.c
+++ b/drivers/pci/pci-mid.c
@@ -16,45 +16,23 @@
#include "pci.h"
-static bool mid_pci_power_manageable(struct pci_dev *dev)
+static bool pci_mid_pm_enabled __read_mostly;
+
+bool pci_use_mid_pm(void)
{
- return true;
+ return pci_mid_pm_enabled;
}
-static int mid_pci_set_power_state(struct pci_dev *pdev, pci_power_t state)
+int mid_pci_set_power_state(struct pci_dev *pdev, pci_power_t state)
{
return intel_mid_pci_set_power_state(pdev, state);
}
-static pci_power_t mid_pci_get_power_state(struct pci_dev *pdev)
+pci_power_t mid_pci_get_power_state(struct pci_dev *pdev)
{
return intel_mid_pci_get_power_state(pdev);
}
-static pci_power_t mid_pci_choose_state(struct pci_dev *pdev)
-{
- return PCI_D3hot;
-}
-
-static int mid_pci_wakeup(struct pci_dev *dev, bool enable)
-{
- return 0;
-}
-
-static bool mid_pci_need_resume(struct pci_dev *dev)
-{
- return false;
-}
-
-static const struct pci_platform_pm_ops mid_pci_platform_pm = {
- .is_manageable = mid_pci_power_manageable,
- .set_state = mid_pci_set_power_state,
- .get_state = mid_pci_get_power_state,
- .choose_state = mid_pci_choose_state,
- .set_wakeup = mid_pci_wakeup,
- .need_resume = mid_pci_need_resume,
-};
-
/*
* This table should be in sync with the one in
* arch/x86/platform/intel-mid/pwr.c.
@@ -71,7 +49,8 @@ static int __init mid_pci_init(void)
id = x86_match_cpu(lpss_cpu_ids);
if (id)
- pci_set_platform_pm(&mid_pci_platform_pm);
+ pci_mid_pm_enabled = true;
+
return 0;
}
arch_initcall(mid_pci_init);
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index ce2ab62b64cf..39c65b5f92c1 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -972,61 +972,67 @@ static void pci_restore_bars(struct pci_dev *dev)
pci_update_resource(dev, i);
}
-static const struct pci_platform_pm_ops *pci_platform_pm;
-
-int pci_set_platform_pm(const struct pci_platform_pm_ops *ops)
-{
- if (!ops->is_manageable || !ops->set_state || !ops->get_state ||
- !ops->choose_state || !ops->set_wakeup || !ops->need_resume)
- return -EINVAL;
- pci_platform_pm = ops;
- return 0;
-}
-
static inline bool platform_pci_power_manageable(struct pci_dev *dev)
{
- return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false;
+ if (pci_use_mid_pm())
+ return true;
+
+ return acpi_pci_power_manageable(dev);
}
static inline int platform_pci_set_power_state(struct pci_dev *dev,
pci_power_t t)
{
- return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
+ if (pci_use_mid_pm())
+ return mid_pci_set_power_state(dev, t);
+
+ return acpi_pci_set_power_state(dev, t);
}
static inline pci_power_t platform_pci_get_power_state(struct pci_dev *dev)
{
- return pci_platform_pm ? pci_platform_pm->get_state(dev) : PCI_UNKNOWN;
+ if (pci_use_mid_pm())
+ return mid_pci_get_power_state(dev);
+
+ return acpi_pci_get_power_state(dev);
}
static inline void platform_pci_refresh_power_state(struct pci_dev *dev)
{
- if (pci_platform_pm && pci_platform_pm->refresh_state)
- pci_platform_pm->refresh_state(dev);
+ if (!pci_use_mid_pm())
+ acpi_pci_refresh_power_state(dev);
}
static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
{
- return pci_platform_pm ?
- pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR;
+ if (pci_use_mid_pm())
+ return PCI_POWER_ERROR;
+
+ return acpi_pci_choose_state(dev);
}
static inline int platform_pci_set_wakeup(struct pci_dev *dev, bool enable)
{
- return pci_platform_pm ?
- pci_platform_pm->set_wakeup(dev, enable) : -ENODEV;
+ if (pci_use_mid_pm())
+ return PCI_POWER_ERROR;
+
+ return acpi_pci_wakeup(dev, enable);
}
static inline bool platform_pci_need_resume(struct pci_dev *dev)
{
- return pci_platform_pm ? pci_platform_pm->need_resume(dev) : false;
+ if (pci_use_mid_pm())
+ return false;
+
+ return acpi_pci_need_resume(dev);
}
static inline bool platform_pci_bridge_d3(struct pci_dev *dev)
{
- if (pci_platform_pm && pci_platform_pm->bridge_d3)
- return pci_platform_pm->bridge_d3(dev);
- return false;
+ if (pci_use_mid_pm())
+ return false;
+
+ return acpi_pci_bridge_d3(dev);
}
/**
@@ -1185,9 +1191,7 @@ void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
*/
void pci_refresh_power_state(struct pci_dev *dev)
{
- if (platform_pci_power_manageable(dev))
- platform_pci_refresh_power_state(dev);
-
+ platform_pci_refresh_power_state(dev);
pci_update_current_state(dev, dev->current_state);
}
@@ -1200,14 +1204,10 @@ int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
{
int error;
- if (platform_pci_power_manageable(dev)) {
- error = platform_pci_set_power_state(dev, state);
- if (!error)
- pci_update_current_state(dev, state);
- } else
- error = -ENODEV;
-
- if (error && !dev->pm_cap) /* Fall back to PCI_D0 */
+ error = platform_pci_set_power_state(dev, state);
+ if (!error)
+ pci_update_current_state(dev, state);
+ else if (!dev->pm_cap) /* Fall back to PCI_D0 */
dev->current_state = PCI_D0;
return error;
@@ -1388,44 +1388,6 @@ int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
}
EXPORT_SYMBOL(pci_set_power_state);
-/**
- * pci_choose_state - Choose the power state of a PCI device
- * @dev: PCI device to be suspended
- * @state: target sleep state for the whole system. This is the value
- * that is passed to suspend() function.
- *
- * Returns PCI power state suitable for given device and given system
- * message.
- */
-pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
-{
- pci_power_t ret;
-
- if (!dev->pm_cap)
- return PCI_D0;
-
- ret = platform_pci_choose_state(dev);
- if (ret != PCI_POWER_ERROR)
- return ret;
-
- switch (state.event) {
- case PM_EVENT_ON:
- return PCI_D0;
- case PM_EVENT_FREEZE:
- case PM_EVENT_PRETHAW:
- /* REVISIT both freeze and pre-thaw "should" use D0 */
- case PM_EVENT_SUSPEND:
- case PM_EVENT_HIBERNATE:
- return PCI_D3hot;
- default:
- pci_info(dev, "unrecognized suspend event %d\n",
- state.event);
- BUG();
- }
- return PCI_D0;
-}
-EXPORT_SYMBOL(pci_choose_state);
-
#define PCI_EXP_SAVE_REGS 7
static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev,
@@ -2577,8 +2539,6 @@ EXPORT_SYMBOL(pci_wake_from_d3);
*/
static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup)
{
- pci_power_t target_state = PCI_D3hot;
-
if (platform_pci_power_manageable(dev)) {
/*
* Call the platform to find the target state for the device.
@@ -2588,32 +2548,29 @@ static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup)
switch (state) {
case PCI_POWER_ERROR:
case PCI_UNKNOWN:
- break;
+ return PCI_D3hot;
+
case PCI_D1:
case PCI_D2:
if (pci_no_d1d2(dev))
- break;
- fallthrough;
- default:
- target_state = state;
+ return PCI_D3hot;
}
- return target_state;
+ return state;
}
- if (!dev->pm_cap)
- target_state = PCI_D0;
-
/*
* If the device is in D3cold even though it's not power-manageable by
* the platform, it may have been powered down by non-standard means.
* Best to let it slumber.
*/
if (dev->current_state == PCI_D3cold)
- target_state = PCI_D3cold;
+ return PCI_D3cold;
+ else if (!dev->pm_cap)
+ return PCI_D0;
if (wakeup && dev->pme_support) {
- pci_power_t state = target_state;
+ pci_power_t state = PCI_D3hot;
/*
* Find the deepest state from which the device can generate
@@ -2628,7 +2585,7 @@ static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup)
return PCI_D0;
}
- return target_state;
+ return PCI_D3hot;
}
/**
@@ -2681,8 +2638,13 @@ EXPORT_SYMBOL(pci_prepare_to_sleep);
*/
int pci_back_from_sleep(struct pci_dev *dev)
{
+ int ret = pci_set_power_state(dev, PCI_D0);
+
+ if (ret)
+ return ret;
+
pci_enable_wake(dev, PCI_D0, false);
- return pci_set_power_state(dev, PCI_D0);
+ return 0;
}
EXPORT_SYMBOL(pci_back_from_sleep);
@@ -2842,6 +2804,22 @@ void pci_dev_complete_resume(struct pci_dev *pci_dev)
spin_unlock_irq(&dev->power.lock);
}
+/**
+ * pci_choose_state - Choose the power state of a PCI device.
+ * @dev: Target PCI device.
+ * @state: Target state for the whole system.
+ *
+ * Returns PCI power state suitable for @dev and @state.
+ */
+pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
+{
+ if (state.event == PM_EVENT_ON)
+ return PCI_D0;
+
+ return pci_target_state(dev, false);
+}
+EXPORT_SYMBOL(pci_choose_state);
+
void pci_config_pm_runtime_get(struct pci_dev *pdev)
{
struct device *dev = &pdev->dev;
@@ -4123,6 +4101,7 @@ unsigned long __weak pci_address_to_pio(phys_addr_t address)
* architectures that have memory mapped IO functions defined (and the
* PCI_IOBASE value defined) should call this function.
*/
+#ifndef pci_remap_iospace
int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
{
#if defined(PCI_IOBASE) && defined(CONFIG_MMU)
@@ -4146,6 +4125,7 @@ int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
#endif
}
EXPORT_SYMBOL(pci_remap_iospace);
+#endif
/**
* pci_unmap_iospace - Unmap the memory mapped I/O space
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 1cce56c2aea0..bd39098c57da 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -63,45 +63,6 @@ struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev,
#define PCI_PM_D3HOT_WAIT 10 /* msec */
#define PCI_PM_D3COLD_WAIT 100 /* msec */
-/**
- * struct pci_platform_pm_ops - Firmware PM callbacks
- *
- * @bridge_d3: Does the bridge allow entering into D3
- *
- * @is_manageable: returns 'true' if given device is power manageable by the
- * platform firmware
- *
- * @set_state: invokes the platform firmware to set the device's power state
- *
- * @get_state: queries the platform firmware for a device's current power state
- *
- * @refresh_state: asks the platform to refresh the device's power state data
- *
- * @choose_state: returns PCI power state of given device preferred by the
- * platform; to be used during system-wide transitions from a
- * sleeping state to the working state and vice versa
- *
- * @set_wakeup: enables/disables wakeup capability for the device
- *
- * @need_resume: returns 'true' if the given device (which is currently
- * suspended) needs to be resumed to be configured for system
- * wakeup.
- *
- * If given platform is generally capable of power managing PCI devices, all of
- * these callbacks are mandatory.
- */
-struct pci_platform_pm_ops {
- bool (*bridge_d3)(struct pci_dev *dev);
- bool (*is_manageable)(struct pci_dev *dev);
- int (*set_state)(struct pci_dev *dev, pci_power_t state);
- pci_power_t (*get_state)(struct pci_dev *dev);
- void (*refresh_state)(struct pci_dev *dev);
- pci_power_t (*choose_state)(struct pci_dev *dev);
- int (*set_wakeup)(struct pci_dev *dev, bool enable);
- bool (*need_resume)(struct pci_dev *dev);
-};
-
-int pci_set_platform_pm(const struct pci_platform_pm_ops *ops);
void pci_update_current_state(struct pci_dev *dev, pci_power_t state);
void pci_refresh_power_state(struct pci_dev *dev);
int pci_power_up(struct pci_dev *dev);
@@ -725,17 +686,53 @@ int pci_acpi_program_hp_params(struct pci_dev *dev);
extern const struct attribute_group pci_dev_acpi_attr_group;
void pci_set_acpi_fwnode(struct pci_dev *dev);
int pci_dev_acpi_reset(struct pci_dev *dev, bool probe);
+bool acpi_pci_power_manageable(struct pci_dev *dev);
+bool acpi_pci_bridge_d3(struct pci_dev *dev);
+int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state);
+pci_power_t acpi_pci_get_power_state(struct pci_dev *dev);
+void acpi_pci_refresh_power_state(struct pci_dev *dev);
+int acpi_pci_wakeup(struct pci_dev *dev, bool enable);
+bool acpi_pci_need_resume(struct pci_dev *dev);
+pci_power_t acpi_pci_choose_state(struct pci_dev *pdev);
#else
static inline int pci_dev_acpi_reset(struct pci_dev *dev, bool probe)
{
return -ENOTTY;
}
-
static inline void pci_set_acpi_fwnode(struct pci_dev *dev) {}
static inline int pci_acpi_program_hp_params(struct pci_dev *dev)
{
return -ENODEV;
}
+static inline bool acpi_pci_power_manageable(struct pci_dev *dev)
+{
+ return false;
+}
+static inline bool acpi_pci_bridge_d3(struct pci_dev *dev)
+{
+ return false;
+}
+static inline int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state)
+{
+ return -ENODEV;
+}
+static inline pci_power_t acpi_pci_get_power_state(struct pci_dev *dev)
+{
+ return PCI_UNKNOWN;
+}
+static inline void acpi_pci_refresh_power_state(struct pci_dev *dev) {}
+static inline int acpi_pci_wakeup(struct pci_dev *dev, bool enable)
+{
+ return -ENODEV;
+}
+static inline bool acpi_pci_need_resume(struct pci_dev *dev)
+{
+ return false;
+}
+static inline pci_power_t acpi_pci_choose_state(struct pci_dev *pdev)
+{
+ return PCI_POWER_ERROR;
+}
#endif
#ifdef CONFIG_PCIEASPM
@@ -744,4 +741,23 @@ extern const struct attribute_group aspm_ctrl_attr_group;
extern const struct attribute_group pci_dev_reset_method_attr_group;
+#ifdef CONFIG_X86_INTEL_MID
+bool pci_use_mid_pm(void);
+int mid_pci_set_power_state(struct pci_dev *pdev, pci_power_t state);
+pci_power_t mid_pci_get_power_state(struct pci_dev *pdev);
+#else
+static inline bool pci_use_mid_pm(void)
+{
+ return false;
+}
+static inline int mid_pci_set_power_state(struct pci_dev *pdev, pci_power_t state)
+{
+ return -ENODEV;
+}
+static inline pci_power_t mid_pci_get_power_state(struct pci_dev *pdev)
+{
+ return PCI_UNKNOWN;
+}
+#endif
+
#endif /* DRIVERS_PCI_H */
diff --git a/drivers/pcmcia/db1xxx_ss.c b/drivers/pcmcia/db1xxx_ss.c
index a6fbc709913e..87a33ecc2cf1 100644
--- a/drivers/pcmcia/db1xxx_ss.c
+++ b/drivers/pcmcia/db1xxx_ss.c
@@ -356,6 +356,7 @@ static int db1x_pcmcia_get_status(struct pcmcia_socket *skt,
case 0:
case 2:
status |= SS_3VCARD; /* 3V card */
+ break;
case 3:
break; /* 5V card: set nothing */
default:
diff --git a/drivers/pcmcia/pcmcia_cis.c b/drivers/pcmcia/pcmcia_cis.c
index d2d0ed4b27c8..f650e19a315c 100644
--- a/drivers/pcmcia/pcmcia_cis.c
+++ b/drivers/pcmcia/pcmcia_cis.c
@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
#include <pcmcia/cisreg.h>
#include <pcmcia/cistpl.h>
@@ -398,7 +399,6 @@ static int pcmcia_do_get_mac(struct pcmcia_device *p_dev, tuple_t *tuple,
void *priv)
{
struct net_device *dev = priv;
- int i;
if (tuple->TupleData[0] != CISTPL_FUNCE_LAN_NODE_ID)
return -EINVAL;
@@ -412,8 +412,7 @@ static int pcmcia_do_get_mac(struct pcmcia_device *p_dev, tuple_t *tuple,
dev_warn(&p_dev->dev, "Invalid header for LAN_NODE_ID\n");
return -EINVAL;
}
- for (i = 0; i < 6; i++)
- dev->dev_addr[i] = tuple->TupleData[i+2];
+ eth_hw_addr_set(dev, &tuple->TupleData[2]);
return 0;
}
diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig
index 77522e5efe11..4374af292e6d 100644
--- a/drivers/perf/Kconfig
+++ b/drivers/perf/Kconfig
@@ -36,7 +36,7 @@ config ARM_CCI5xx_PMU
config ARM_CCN
tristate "ARM CCN driver support"
- depends on ARM || ARM64
+ depends on ARM || ARM64 || COMPILE_TEST
help
PMU (perf) driver supporting the ARM CCN (Cache Coherent Network)
interconnect.
@@ -62,7 +62,8 @@ config ARM_PMU_ACPI
config ARM_SMMU_V3_PMU
tristate "ARM SMMUv3 Performance Monitors Extension"
- depends on ARM64 && ACPI
+ depends on (ARM64 && ACPI) || (COMPILE_TEST && 64BIT)
+ depends on GENERIC_MSI_IRQ_DOMAIN
help
Provides support for the ARM SMMUv3 Performance Monitor Counter
Groups (PMCG), which provide monitoring of transactions passing
@@ -80,7 +81,7 @@ config ARM_DSU_PMU
config FSL_IMX8_DDR_PMU
tristate "Freescale i.MX8 DDR perf monitor"
- depends on ARCH_MXC
+ depends on ARCH_MXC || COMPILE_TEST
help
Provides support for the DDR performance monitor in i.MX8, which
can give information about memory throughput and other related
@@ -108,7 +109,8 @@ config QCOM_L3_PMU
config THUNDERX2_PMU
tristate "Cavium ThunderX2 SoC PMU UNCORE"
- depends on ARCH_THUNDER2 && ARM64 && ACPI && NUMA
+ depends on ARCH_THUNDER2 || COMPILE_TEST
+ depends on NUMA && ACPI
default m
help
Provides support for ThunderX2 UNCORE events.
@@ -116,7 +118,7 @@ config THUNDERX2_PMU
in the DDR4 Memory Controller (DMC).
config XGENE_PMU
- depends on ARCH_XGENE
+ depends on ARCH_XGENE || (COMPILE_TEST && 64BIT)
bool "APM X-Gene SoC PMU"
default n
help
diff --git a/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
index 83264ec0a957..bad99d149172 100644
--- a/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
+++ b/drivers/perf/hisilicon/hisi_uncore_pa_pmu.c
@@ -27,7 +27,7 @@
#define PA_INT_CLEAR 0x1c7c
#define PA_EVENT_TYPE0 0x1c80
#define PA_PMU_VERSION 0x1cf0
-#define PA_EVENT_CNT0_L 0x1f00
+#define PA_EVENT_CNT0_L 0x1d00
#define PA_EVTYPE_MASK 0xff
#define PA_NR_COUNTERS 0x8
diff --git a/drivers/perf/qcom_l2_pmu.c b/drivers/perf/qcom_l2_pmu.c
index 5b093badd0f6..7640491aab12 100644
--- a/drivers/perf/qcom_l2_pmu.c
+++ b/drivers/perf/qcom_l2_pmu.c
@@ -840,17 +840,14 @@ static int l2_cache_pmu_probe_cluster(struct device *dev, void *data)
{
struct platform_device *pdev = to_platform_device(dev->parent);
struct platform_device *sdev = to_platform_device(dev);
+ struct acpi_device *adev = ACPI_COMPANION(dev);
struct l2cache_pmu *l2cache_pmu = data;
struct cluster_pmu *cluster;
- struct acpi_device *device;
unsigned long fw_cluster_id;
int err;
int irq;
- if (acpi_bus_get_device(ACPI_HANDLE(dev), &device))
- return -ENODEV;
-
- if (kstrtoul(device->pnp.unique_id, 10, &fw_cluster_id) < 0) {
+ if (!adev || kstrtoul(adev->pnp.unique_id, 10, &fw_cluster_id) < 0) {
dev_err(&pdev->dev, "unable to read ACPI uid\n");
return -ENODEV;
}
diff --git a/drivers/perf/thunderx2_pmu.c b/drivers/perf/thunderx2_pmu.c
index fc1a376ee906..05378c0fd8f3 100644
--- a/drivers/perf/thunderx2_pmu.c
+++ b/drivers/perf/thunderx2_pmu.c
@@ -487,7 +487,7 @@ static void tx2_uncore_event_update(struct perf_event *event)
new = reg_readl(hwc->event_base);
prev = local64_xchg(&hwc->prev_count, new);
/* handles rollover of 32 bit counter */
- delta = (u32)(((1UL << 32) - prev) + new);
+ delta = (u32)(((1ULL << 32) - prev) + new);
}
/* DMC event data_transfers granularity is 16 Bytes, convert it to 64 */
diff --git a/drivers/phy/broadcom/phy-bcm-ns-usb3.c b/drivers/phy/broadcom/phy-bcm-ns-usb3.c
index b1adaecc26f8..bbfad209c890 100644
--- a/drivers/phy/broadcom/phy-bcm-ns-usb3.c
+++ b/drivers/phy/broadcom/phy-bcm-ns-usb3.c
@@ -183,7 +183,7 @@ static int bcm_ns_usb3_mdio_phy_write(struct bcm_ns_usb3 *usb3, u16 reg,
{
struct mdio_device *mdiodev = usb3->mdiodev;
- return mdiobus_write(mdiodev->bus, mdiodev->addr, reg, value);
+ return mdiodev_write(mdiodev, reg, value);
}
static int bcm_ns_usb3_mdio_probe(struct mdio_device *mdiodev)
diff --git a/drivers/phy/broadcom/phy-bcm-ns2-pcie.c b/drivers/phy/broadcom/phy-bcm-ns2-pcie.c
index 4c7d11d2b378..9e7434a0d3e0 100644
--- a/drivers/phy/broadcom/phy-bcm-ns2-pcie.c
+++ b/drivers/phy/broadcom/phy-bcm-ns2-pcie.c
@@ -29,14 +29,12 @@ static int ns2_pci_phy_init(struct phy *p)
int rc;
/* select the AFE 100MHz block page */
- rc = mdiobus_write(mdiodev->bus, mdiodev->addr,
- BLK_ADDR_REG_OFFSET, PLL_AFE1_100MHZ_BLK);
+ rc = mdiodev_write(mdiodev, BLK_ADDR_REG_OFFSET, PLL_AFE1_100MHZ_BLK);
if (rc)
goto err;
/* set the 100 MHz reference clock amplitude to 2.05 v */
- rc = mdiobus_write(mdiodev->bus, mdiodev->addr,
- PLL_CLK_AMP_OFFSET, PLL_CLK_AMP_2P05V);
+ rc = mdiodev_write(mdiodev, PLL_CLK_AMP_OFFSET, PLL_CLK_AMP_2P05V);
if (rc)
goto err;
diff --git a/drivers/pinctrl/bcm/pinctrl-ns.c b/drivers/pinctrl/bcm/pinctrl-ns.c
index e79690bd8b85..d7f8175d2c1c 100644
--- a/drivers/pinctrl/bcm/pinctrl-ns.c
+++ b/drivers/pinctrl/bcm/pinctrl-ns.c
@@ -5,7 +5,6 @@
#include <linux/err.h>
#include <linux/io.h>
-#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -13,7 +12,6 @@
#include <linux/pinctrl/pinctrl.h>
#include <linux/pinctrl/pinmux.h>
#include <linux/platform_device.h>
-#include <linux/regmap.h>
#include <linux/slab.h>
#define FLAG_BCM4708 BIT(1)
@@ -24,8 +22,7 @@ struct ns_pinctrl {
struct device *dev;
unsigned int chipset_flag;
struct pinctrl_dev *pctldev;
- struct regmap *regmap;
- u32 offset;
+ void __iomem *base;
struct pinctrl_desc pctldesc;
struct ns_pinctrl_group *groups;
@@ -232,9 +229,9 @@ static int ns_pinctrl_set_mux(struct pinctrl_dev *pctrl_dev,
unset |= BIT(pin_number);
}
- regmap_read(ns_pinctrl->regmap, ns_pinctrl->offset, &tmp);
+ tmp = readl(ns_pinctrl->base);
tmp &= ~unset;
- regmap_write(ns_pinctrl->regmap, ns_pinctrl->offset, tmp);
+ writel(tmp, ns_pinctrl->base);
return 0;
}
@@ -266,13 +263,13 @@ static const struct of_device_id ns_pinctrl_of_match_table[] = {
static int ns_pinctrl_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct device_node *np = dev->of_node;
const struct of_device_id *of_id;
struct ns_pinctrl *ns_pinctrl;
struct pinctrl_desc *pctldesc;
struct pinctrl_pin_desc *pin;
struct ns_pinctrl_group *group;
struct ns_pinctrl_function *function;
+ struct resource *res;
int i;
ns_pinctrl = devm_kzalloc(dev, sizeof(*ns_pinctrl), GFP_KERNEL);
@@ -290,18 +287,12 @@ static int ns_pinctrl_probe(struct platform_device *pdev)
return -EINVAL;
ns_pinctrl->chipset_flag = (uintptr_t)of_id->data;
- ns_pinctrl->regmap = syscon_node_to_regmap(of_get_parent(np));
- if (IS_ERR(ns_pinctrl->regmap)) {
- int err = PTR_ERR(ns_pinctrl->regmap);
-
- dev_err(dev, "Failed to map pinctrl regs: %d\n", err);
-
- return err;
- }
-
- if (of_property_read_u32(np, "offset", &ns_pinctrl->offset)) {
- dev_err(dev, "Failed to get register offset\n");
- return -ENOENT;
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "cru_gpio_control");
+ ns_pinctrl->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(ns_pinctrl->base)) {
+ dev_err(dev, "Failed to map pinctrl regs\n");
+ return PTR_ERR(ns_pinctrl->base);
}
memcpy(pctldesc, &ns_pinctrl_desc, sizeof(*pctldesc));
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index 8d0f88e9ca88..bae9d429b813 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -840,6 +840,34 @@ static const struct pinconf_ops amd_pinconf_ops = {
.pin_config_group_set = amd_pinconf_group_set,
};
+static void amd_gpio_irq_init(struct amd_gpio *gpio_dev)
+{
+ struct pinctrl_desc *desc = gpio_dev->pctrl->desc;
+ unsigned long flags;
+ u32 pin_reg, mask;
+ int i;
+
+ mask = BIT(WAKE_CNTRL_OFF_S0I3) | BIT(WAKE_CNTRL_OFF_S3) |
+ BIT(INTERRUPT_MASK_OFF) | BIT(INTERRUPT_ENABLE_OFF) |
+ BIT(WAKE_CNTRL_OFF_S4);
+
+ for (i = 0; i < desc->npins; i++) {
+ int pin = desc->pins[i].number;
+ const struct pin_desc *pd = pin_desc_get(gpio_dev->pctrl, pin);
+
+ if (!pd)
+ continue;
+
+ raw_spin_lock_irqsave(&gpio_dev->lock, flags);
+
+ pin_reg = readl(gpio_dev->base + i * 4);
+ pin_reg &= ~mask;
+ writel(pin_reg, gpio_dev->base + i * 4);
+
+ raw_spin_unlock_irqrestore(&gpio_dev->lock, flags);
+ }
+}
+
#ifdef CONFIG_PM_SLEEP
static bool amd_gpio_should_save(struct amd_gpio *gpio_dev, unsigned int pin)
{
@@ -976,6 +1004,9 @@ static int amd_gpio_probe(struct platform_device *pdev)
return PTR_ERR(gpio_dev->pctrl);
}
+ /* Disable and mask interrupts */
+ amd_gpio_irq_init(gpio_dev);
+
girq = &gpio_dev->gc.irq;
girq->chip = &amd_gpio_irqchip;
/* This will let us handle the parent IRQ in the driver */
diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
index 68b3886f9f0f..dfd8888a222a 100644
--- a/drivers/pinctrl/stm32/pinctrl-stm32.c
+++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
@@ -1644,8 +1644,8 @@ int __maybe_unused stm32_pinctrl_resume(struct device *dev)
struct stm32_pinctrl_group *g = pctl->groups;
int i;
- for (i = g->pin; i < g->pin + pctl->ngroups; i++)
- stm32_pinctrl_restore_gpio_regs(pctl, i);
+ for (i = 0; i < pctl->ngroups; i++, g++)
+ stm32_pinctrl_restore_gpio_regs(pctl, g->pin);
return 0;
}
diff --git a/drivers/platform/mellanox/Kconfig b/drivers/platform/mellanox/Kconfig
index edd17e1a1f88..d4c5c170bca0 100644
--- a/drivers/platform/mellanox/Kconfig
+++ b/drivers/platform/mellanox/Kconfig
@@ -34,6 +34,18 @@ config MLXREG_IO
to system resets operation, system reset causes monitoring and some
kinds of mux selection.
+config MLXREG_LC
+ tristate "Mellanox line card platform driver support"
+ depends on REGMAP
+ depends on HWMON
+ depends on I2C
+ help
+ This driver provides support for the Mellanox MSN4800-XX line cards,
+ which are the part of MSN4800 Ethernet modular switch systems
+ providing a high performance switching solution for Enterprise Data
+ Centers (EDC) for building Ethernet based clusters, High-Performance
+ Computing (HPC) and embedded environments.
+
config MLXBF_TMFIFO
tristate "Mellanox BlueField SoC TmFifo platform driver"
depends on ARM64
diff --git a/drivers/platform/mellanox/Makefile b/drivers/platform/mellanox/Makefile
index 000ddaa74c98..a4868366ff18 100644
--- a/drivers/platform/mellanox/Makefile
+++ b/drivers/platform/mellanox/Makefile
@@ -8,3 +8,4 @@ obj-$(CONFIG_MLXBF_PMC) += mlxbf-pmc.o
obj-$(CONFIG_MLXBF_TMFIFO) += mlxbf-tmfifo.o
obj-$(CONFIG_MLXREG_HOTPLUG) += mlxreg-hotplug.o
obj-$(CONFIG_MLXREG_IO) += mlxreg-io.o
+obj-$(CONFIG_MLXREG_LC) += mlxreg-lc.o
diff --git a/drivers/platform/mellanox/mlxreg-hotplug.c b/drivers/platform/mellanox/mlxreg-hotplug.c
index b013445147dd..117bc3f395fd 100644
--- a/drivers/platform/mellanox/mlxreg-hotplug.c
+++ b/drivers/platform/mellanox/mlxreg-hotplug.c
@@ -28,7 +28,7 @@
/* ASIC good health mask. */
#define MLXREG_HOTPLUG_GOOD_HEALTH_MASK 0x02
-#define MLXREG_HOTPLUG_ATTRS_MAX 24
+#define MLXREG_HOTPLUG_ATTRS_MAX 128
#define MLXREG_HOTPLUG_NOT_ASSERT 3
/**
@@ -89,9 +89,20 @@ mlxreg_hotplug_udev_event_send(struct kobject *kobj,
return kobject_uevent_env(kobj, KOBJ_CHANGE, mlxreg_hotplug_udev_envp);
}
+static void
+mlxreg_hotplug_pdata_export(void *pdata, void *regmap)
+{
+ struct mlxreg_core_hotplug_platform_data *dev_pdata = pdata;
+
+ /* Export regmap to underlying device. */
+ dev_pdata->regmap = regmap;
+}
+
static int mlxreg_hotplug_device_create(struct mlxreg_hotplug_priv_data *priv,
- struct mlxreg_core_data *data)
+ struct mlxreg_core_data *data,
+ enum mlxreg_hotplug_kind kind)
{
+ struct i2c_board_info *brdinfo = data->hpdev.brdinfo;
struct mlxreg_core_hotplug_platform_data *pdata;
struct i2c_client *client;
@@ -106,46 +117,88 @@ static int mlxreg_hotplug_device_create(struct mlxreg_hotplug_priv_data *priv,
return 0;
pdata = dev_get_platdata(&priv->pdev->dev);
- data->hpdev.adapter = i2c_get_adapter(data->hpdev.nr +
- pdata->shift_nr);
- if (!data->hpdev.adapter) {
- dev_err(priv->dev, "Failed to get adapter for bus %d\n",
- data->hpdev.nr + pdata->shift_nr);
- return -EFAULT;
- }
+ switch (data->hpdev.action) {
+ case MLXREG_HOTPLUG_DEVICE_DEFAULT_ACTION:
+ data->hpdev.adapter = i2c_get_adapter(data->hpdev.nr +
+ pdata->shift_nr);
+ if (!data->hpdev.adapter) {
+ dev_err(priv->dev, "Failed to get adapter for bus %d\n",
+ data->hpdev.nr + pdata->shift_nr);
+ return -EFAULT;
+ }
- client = i2c_new_client_device(data->hpdev.adapter,
- data->hpdev.brdinfo);
- if (IS_ERR(client)) {
- dev_err(priv->dev, "Failed to create client %s at bus %d at addr 0x%02x\n",
- data->hpdev.brdinfo->type, data->hpdev.nr +
- pdata->shift_nr, data->hpdev.brdinfo->addr);
+ /* Export platform data to underlying device. */
+ if (brdinfo->platform_data)
+ mlxreg_hotplug_pdata_export(brdinfo->platform_data, pdata->regmap);
- i2c_put_adapter(data->hpdev.adapter);
- data->hpdev.adapter = NULL;
- return PTR_ERR(client);
+ client = i2c_new_client_device(data->hpdev.adapter,
+ brdinfo);
+ if (IS_ERR(client)) {
+ dev_err(priv->dev, "Failed to create client %s at bus %d at addr 0x%02x\n",
+ brdinfo->type, data->hpdev.nr +
+ pdata->shift_nr, brdinfo->addr);
+
+ i2c_put_adapter(data->hpdev.adapter);
+ data->hpdev.adapter = NULL;
+ return PTR_ERR(client);
+ }
+
+ data->hpdev.client = client;
+ break;
+ case MLXREG_HOTPLUG_DEVICE_PLATFORM_ACTION:
+ /* Export platform data to underlying device. */
+ if (data->hpdev.brdinfo && data->hpdev.brdinfo->platform_data)
+ mlxreg_hotplug_pdata_export(data->hpdev.brdinfo->platform_data,
+ pdata->regmap);
+ /* Pass parent hotplug device handle to underlying device. */
+ data->notifier = data->hpdev.notifier;
+ data->hpdev.pdev = platform_device_register_resndata(&priv->pdev->dev,
+ brdinfo->type,
+ data->hpdev.nr,
+ NULL, 0, data,
+ sizeof(*data));
+ if (IS_ERR(data->hpdev.pdev))
+ return PTR_ERR(data->hpdev.pdev);
+
+ break;
+ default:
+ break;
}
- data->hpdev.client = client;
+ if (data->hpdev.notifier && data->hpdev.notifier->user_handler)
+ return data->hpdev.notifier->user_handler(data->hpdev.notifier->handle, kind, 1);
return 0;
}
static void
mlxreg_hotplug_device_destroy(struct mlxreg_hotplug_priv_data *priv,
- struct mlxreg_core_data *data)
+ struct mlxreg_core_data *data,
+ enum mlxreg_hotplug_kind kind)
{
/* Notify user by sending hwmon uevent. */
mlxreg_hotplug_udev_event_send(&priv->hwmon->kobj, data, false);
+ if (data->hpdev.notifier && data->hpdev.notifier->user_handler)
+ data->hpdev.notifier->user_handler(data->hpdev.notifier->handle, kind, 0);
+
+ switch (data->hpdev.action) {
+ case MLXREG_HOTPLUG_DEVICE_DEFAULT_ACTION:
+ if (data->hpdev.client) {
+ i2c_unregister_device(data->hpdev.client);
+ data->hpdev.client = NULL;
+ }
- if (data->hpdev.client) {
- i2c_unregister_device(data->hpdev.client);
- data->hpdev.client = NULL;
- }
-
- if (data->hpdev.adapter) {
- i2c_put_adapter(data->hpdev.adapter);
- data->hpdev.adapter = NULL;
+ if (data->hpdev.adapter) {
+ i2c_put_adapter(data->hpdev.adapter);
+ data->hpdev.adapter = NULL;
+ }
+ break;
+ case MLXREG_HOTPLUG_DEVICE_PLATFORM_ACTION:
+ if (data->hpdev.pdev)
+ platform_device_unregister(data->hpdev.pdev);
+ break;
+ default:
+ break;
}
}
@@ -317,14 +370,14 @@ mlxreg_hotplug_work_helper(struct mlxreg_hotplug_priv_data *priv,
data = item->data + bit;
if (regval & BIT(bit)) {
if (item->inversed)
- mlxreg_hotplug_device_destroy(priv, data);
+ mlxreg_hotplug_device_destroy(priv, data, item->kind);
else
- mlxreg_hotplug_device_create(priv, data);
+ mlxreg_hotplug_device_create(priv, data, item->kind);
} else {
if (item->inversed)
- mlxreg_hotplug_device_create(priv, data);
+ mlxreg_hotplug_device_create(priv, data, item->kind);
else
- mlxreg_hotplug_device_destroy(priv, data);
+ mlxreg_hotplug_device_destroy(priv, data, item->kind);
}
}
@@ -381,7 +434,7 @@ mlxreg_hotplug_health_work_helper(struct mlxreg_hotplug_priv_data *priv,
* ASIC is in steady state. Connect associated
* device, if configured.
*/
- mlxreg_hotplug_device_create(priv, data);
+ mlxreg_hotplug_device_create(priv, data, item->kind);
data->attached = true;
}
} else {
@@ -391,7 +444,7 @@ mlxreg_hotplug_health_work_helper(struct mlxreg_hotplug_priv_data *priv,
* in steady state. Disconnect associated
* device, if it has been connected.
*/
- mlxreg_hotplug_device_destroy(priv, data);
+ mlxreg_hotplug_device_destroy(priv, data, item->kind);
data->attached = false;
data->health_cntr = 0;
}
@@ -630,7 +683,7 @@ static void mlxreg_hotplug_unset_irq(struct mlxreg_hotplug_priv_data *priv)
/* Remove all the attached devices in group. */
count = item->count;
for (j = 0; j < count; j++, data++)
- mlxreg_hotplug_device_destroy(priv, data);
+ mlxreg_hotplug_device_destroy(priv, data, item->kind);
}
}
diff --git a/drivers/platform/mellanox/mlxreg-io.c b/drivers/platform/mellanox/mlxreg-io.c
index a916cd89cbbe..2c2686d5c2fc 100644
--- a/drivers/platform/mellanox/mlxreg-io.c
+++ b/drivers/platform/mellanox/mlxreg-io.c
@@ -18,7 +18,7 @@
/* Attribute parameters. */
#define MLXREG_IO_ATT_SIZE 10
-#define MLXREG_IO_ATT_NUM 48
+#define MLXREG_IO_ATT_NUM 96
/**
* struct mlxreg_io_priv_data - driver's private data:
diff --git a/drivers/platform/mellanox/mlxreg-lc.c b/drivers/platform/mellanox/mlxreg-lc.c
new file mode 100644
index 000000000000..0b7f58feb701
--- /dev/null
+++ b/drivers/platform/mellanox/mlxreg-lc.c
@@ -0,0 +1,906 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Nvidia line card driver
+ *
+ * Copyright (C) 2020 Nvidia Technologies Ltd.
+ */
+
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/platform_data/mlxcpld.h>
+#include <linux/platform_data/mlxreg.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+/* I2C bus IO offsets */
+#define MLXREG_LC_REG_CPLD1_VER_OFFSET 0x2500
+#define MLXREG_LC_REG_FPGA1_VER_OFFSET 0x2501
+#define MLXREG_LC_REG_CPLD1_PN_OFFSET 0x2504
+#define MLXREG_LC_REG_FPGA1_PN_OFFSET 0x2506
+#define MLXREG_LC_REG_RESET_CAUSE_OFFSET 0x251d
+#define MLXREG_LC_REG_LED1_OFFSET 0x2520
+#define MLXREG_LC_REG_GP0_OFFSET 0x252e
+#define MLXREG_LC_REG_FIELD_UPGRADE 0x2534
+#define MLXREG_LC_CHANNEL_I2C_REG 0x25dc
+#define MLXREG_LC_REG_CPLD1_MVER_OFFSET 0x25de
+#define MLXREG_LC_REG_FPGA1_MVER_OFFSET 0x25df
+#define MLXREG_LC_REG_MAX_POWER_OFFSET 0x25f1
+#define MLXREG_LC_REG_CONFIG_OFFSET 0x25fb
+#define MLXREG_LC_REG_MAX 0x3fff
+
+/**
+ * enum mlxreg_lc_type - line cards types
+ *
+ * @MLXREG_LC_SN4800_C16: 100GbE line card with 16 QSFP28 ports;
+ */
+enum mlxreg_lc_type {
+ MLXREG_LC_SN4800_C16 = 0x0000,
+};
+
+/**
+ * enum mlxreg_lc_state - line cards state
+ *
+ * @MLXREG_LC_INITIALIZED: line card is initialized;
+ * @MLXREG_LC_POWERED: line card is powered;
+ * @MLXREG_LC_SYNCED: line card is synchronized between hardware and firmware;
+ */
+enum mlxreg_lc_state {
+ MLXREG_LC_INITIALIZED = BIT(0),
+ MLXREG_LC_POWERED = BIT(1),
+ MLXREG_LC_SYNCED = BIT(2),
+};
+
+#define MLXREG_LC_CONFIGURED (MLXREG_LC_INITIALIZED | MLXREG_LC_POWERED | MLXREG_LC_SYNCED)
+
+/* mlxreg_lc - device private data
+ * @dev: platform device;
+ * @lock: line card lock;
+ * @par_regmap: parent device regmap handle;
+ * @data: pltaform core data;
+ * @io_data: register access platform data;
+ * @led_data: LED platform data ;
+ * @mux_data: MUX platform data;
+ * @led: LED device;
+ * @io_regs: register access device;
+ * @mux_brdinfo: mux configuration;
+ * @mux: mux devices;
+ * @aux_devs: I2C devices feeding by auxiliary power;
+ * @aux_devs_num: number of I2C devices feeding by auxiliary power;
+ * @main_devs: I2C devices feeding by main power;
+ * @main_devs_num: number of I2C devices feeding by main power;
+ * @state: line card state;
+ */
+struct mlxreg_lc {
+ struct device *dev;
+ struct mutex lock; /* line card access lock */
+ void *par_regmap;
+ struct mlxreg_core_data *data;
+ struct mlxreg_core_platform_data *io_data;
+ struct mlxreg_core_platform_data *led_data;
+ struct mlxcpld_mux_plat_data *mux_data;
+ struct platform_device *led;
+ struct platform_device *io_regs;
+ struct i2c_board_info *mux_brdinfo;
+ struct platform_device *mux;
+ struct mlxreg_hotplug_device *aux_devs;
+ int aux_devs_num;
+ struct mlxreg_hotplug_device *main_devs;
+ int main_devs_num;
+ enum mlxreg_lc_state state;
+};
+
+static bool mlxreg_lc_writeable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case MLXREG_LC_REG_LED1_OFFSET:
+ case MLXREG_LC_REG_GP0_OFFSET:
+ case MLXREG_LC_REG_FIELD_UPGRADE:
+ case MLXREG_LC_CHANNEL_I2C_REG:
+ return true;
+ }
+ return false;
+}
+
+static bool mlxreg_lc_readable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case MLXREG_LC_REG_CPLD1_VER_OFFSET:
+ case MLXREG_LC_REG_FPGA1_VER_OFFSET:
+ case MLXREG_LC_REG_CPLD1_PN_OFFSET:
+ case MLXREG_LC_REG_FPGA1_PN_OFFSET:
+ case MLXREG_LC_REG_RESET_CAUSE_OFFSET:
+ case MLXREG_LC_REG_LED1_OFFSET:
+ case MLXREG_LC_REG_GP0_OFFSET:
+ case MLXREG_LC_REG_FIELD_UPGRADE:
+ case MLXREG_LC_CHANNEL_I2C_REG:
+ case MLXREG_LC_REG_CPLD1_MVER_OFFSET:
+ case MLXREG_LC_REG_FPGA1_MVER_OFFSET:
+ case MLXREG_LC_REG_MAX_POWER_OFFSET:
+ case MLXREG_LC_REG_CONFIG_OFFSET:
+ return true;
+ }
+ return false;
+}
+
+static bool mlxreg_lc_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case MLXREG_LC_REG_CPLD1_VER_OFFSET:
+ case MLXREG_LC_REG_FPGA1_VER_OFFSET:
+ case MLXREG_LC_REG_CPLD1_PN_OFFSET:
+ case MLXREG_LC_REG_FPGA1_PN_OFFSET:
+ case MLXREG_LC_REG_RESET_CAUSE_OFFSET:
+ case MLXREG_LC_REG_LED1_OFFSET:
+ case MLXREG_LC_REG_GP0_OFFSET:
+ case MLXREG_LC_REG_FIELD_UPGRADE:
+ case MLXREG_LC_CHANNEL_I2C_REG:
+ case MLXREG_LC_REG_CPLD1_MVER_OFFSET:
+ case MLXREG_LC_REG_FPGA1_MVER_OFFSET:
+ case MLXREG_LC_REG_MAX_POWER_OFFSET:
+ case MLXREG_LC_REG_CONFIG_OFFSET:
+ return true;
+ }
+ return false;
+}
+
+static const struct reg_default mlxreg_lc_regmap_default[] = {
+ { MLXREG_LC_CHANNEL_I2C_REG, 0x00 },
+};
+
+/* Configuration for the register map of a device with 2 bytes address space. */
+static const struct regmap_config mlxreg_lc_regmap_conf = {
+ .reg_bits = 16,
+ .val_bits = 8,
+ .max_register = MLXREG_LC_REG_MAX,
+ .cache_type = REGCACHE_FLAT,
+ .writeable_reg = mlxreg_lc_writeable_reg,
+ .readable_reg = mlxreg_lc_readable_reg,
+ .volatile_reg = mlxreg_lc_volatile_reg,
+ .reg_defaults = mlxreg_lc_regmap_default,
+ .num_reg_defaults = ARRAY_SIZE(mlxreg_lc_regmap_default),
+};
+
+/* Default channels vector.
+ * It contains only the channels, which physically connected to the devices,
+ * empty channels are skipped.
+ */
+static int mlxreg_lc_chan[] = {
+ 0x04, 0x05, 0x06, 0x07, 0x08, 0x10, 0x20, 0x21, 0x22, 0x23, 0x40, 0x41,
+ 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d,
+ 0x4e, 0x4f
+};
+
+/* Defaul mux configuration. */
+static struct mlxcpld_mux_plat_data mlxreg_lc_mux_data[] = {
+ {
+ .chan_ids = mlxreg_lc_chan,
+ .num_adaps = ARRAY_SIZE(mlxreg_lc_chan),
+ .sel_reg_addr = MLXREG_LC_CHANNEL_I2C_REG,
+ .reg_size = 2,
+ },
+};
+
+/* Defaul mux board info. */
+static struct i2c_board_info mlxreg_lc_mux_brdinfo = {
+ I2C_BOARD_INFO("i2c-mux-mlxcpld", 0x32),
+};
+
+/* Line card default auxiliary power static devices. */
+static struct i2c_board_info mlxreg_lc_aux_pwr_devices[] = {
+ {
+ I2C_BOARD_INFO("24c32", 0x51),
+ },
+ {
+ I2C_BOARD_INFO("24c32", 0x51),
+ },
+};
+
+/* Line card default auxiliary power board info. */
+static struct mlxreg_hotplug_device mlxreg_lc_aux_pwr_brdinfo[] = {
+ {
+ .brdinfo = &mlxreg_lc_aux_pwr_devices[0],
+ .nr = 3,
+ },
+ {
+ .brdinfo = &mlxreg_lc_aux_pwr_devices[1],
+ .nr = 4,
+ },
+};
+
+/* Line card default main power static devices. */
+static struct i2c_board_info mlxreg_lc_main_pwr_devices[] = {
+ {
+ I2C_BOARD_INFO("mp2975", 0x62),
+ },
+ {
+ I2C_BOARD_INFO("mp2975", 0x64),
+ },
+ {
+ I2C_BOARD_INFO("max11603", 0x6d),
+ },
+ {
+ I2C_BOARD_INFO("lm25066", 0x15),
+ },
+};
+
+/* Line card default main power board info. */
+static struct mlxreg_hotplug_device mlxreg_lc_main_pwr_brdinfo[] = {
+ {
+ .brdinfo = &mlxreg_lc_main_pwr_devices[0],
+ .nr = 0,
+ },
+ {
+ .brdinfo = &mlxreg_lc_main_pwr_devices[1],
+ .nr = 0,
+ },
+ {
+ .brdinfo = &mlxreg_lc_main_pwr_devices[2],
+ .nr = 1,
+ },
+ {
+ .brdinfo = &mlxreg_lc_main_pwr_devices[3],
+ .nr = 2,
+ },
+};
+
+/* LED default data. */
+static struct mlxreg_core_data mlxreg_lc_led_data[] = {
+ {
+ .label = "status:green",
+ .reg = MLXREG_LC_REG_LED1_OFFSET,
+ .mask = GENMASK(7, 4),
+ },
+ {
+ .label = "status:orange",
+ .reg = MLXREG_LC_REG_LED1_OFFSET,
+ .mask = GENMASK(7, 4),
+ },
+};
+
+static struct mlxreg_core_platform_data mlxreg_lc_led = {
+ .identity = "pci",
+ .data = mlxreg_lc_led_data,
+ .counter = ARRAY_SIZE(mlxreg_lc_led_data),
+};
+
+/* Default register access data. */
+static struct mlxreg_core_data mlxreg_lc_io_data[] = {
+ {
+ .label = "cpld1_version",
+ .reg = MLXREG_LC_REG_CPLD1_VER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "fpga1_version",
+ .reg = MLXREG_LC_REG_FPGA1_VER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "cpld1_pn",
+ .reg = MLXREG_LC_REG_CPLD1_PN_OFFSET,
+ .bit = GENMASK(15, 0),
+ .mode = 0444,
+ .regnum = 2,
+ },
+ {
+ .label = "fpga1_pn",
+ .reg = MLXREG_LC_REG_FPGA1_PN_OFFSET,
+ .bit = GENMASK(15, 0),
+ .mode = 0444,
+ .regnum = 2,
+ },
+ {
+ .label = "cpld1_version_min",
+ .reg = MLXREG_LC_REG_CPLD1_MVER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "fpga1_version_min",
+ .reg = MLXREG_LC_REG_FPGA1_MVER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_fpga_not_done",
+ .reg = MLXREG_LC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(1),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_aux_pwr_or_ref",
+ .reg = MLXREG_LC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(2),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_dc_dc_pwr_fail",
+ .reg = MLXREG_LC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(3),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_from_chassis",
+ .reg = MLXREG_LC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(4),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_pwr_off_from_chassis",
+ .reg = MLXREG_LC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(5),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_line_card",
+ .reg = MLXREG_LC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(6),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_line_card_pwr_en",
+ .reg = MLXREG_LC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(7),
+ .mode = 0444,
+ },
+ {
+ .label = "cpld_upgrade_en",
+ .reg = MLXREG_LC_REG_FIELD_UPGRADE,
+ .mask = GENMASK(7, 0) & ~BIT(0),
+ .mode = 0644,
+ .secured = 1,
+ },
+ {
+ .label = "fpga_upgrade_en",
+ .reg = MLXREG_LC_REG_FIELD_UPGRADE,
+ .mask = GENMASK(7, 0) & ~BIT(1),
+ .mode = 0644,
+ .secured = 1,
+ },
+ {
+ .label = "qsfp_pwr_en",
+ .reg = MLXREG_LC_REG_GP0_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(0),
+ .mode = 0644,
+ },
+ {
+ .label = "vpd_wp",
+ .reg = MLXREG_LC_REG_GP0_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(3),
+ .mode = 0644,
+ .secured = 1,
+ },
+ {
+ .label = "agb_spi_burn_en",
+ .reg = MLXREG_LC_REG_GP0_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(5),
+ .mode = 0644,
+ .secured = 1,
+ },
+ {
+ .label = "fpga_spi_burn_en",
+ .reg = MLXREG_LC_REG_GP0_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(6),
+ .mode = 0644,
+ .secured = 1,
+ },
+ {
+ .label = "max_power",
+ .reg = MLXREG_LC_REG_MAX_POWER_OFFSET,
+ .bit = GENMASK(15, 0),
+ .mode = 0444,
+ .regnum = 2,
+ },
+ {
+ .label = "config",
+ .reg = MLXREG_LC_REG_CONFIG_OFFSET,
+ .bit = GENMASK(15, 0),
+ .mode = 0444,
+ .regnum = 2,
+ },
+};
+
+static struct mlxreg_core_platform_data mlxreg_lc_regs_io = {
+ .data = mlxreg_lc_io_data,
+ .counter = ARRAY_SIZE(mlxreg_lc_io_data),
+};
+
+static int
+mlxreg_lc_create_static_devices(struct mlxreg_lc *mlxreg_lc, struct mlxreg_hotplug_device *devs,
+ int size)
+{
+ struct mlxreg_hotplug_device *dev = devs;
+ int i;
+
+ /* Create static I2C device feeding by auxiliary or main power. */
+ for (i = 0; i < size; i++, dev++) {
+ dev->client = i2c_new_client_device(dev->adapter, dev->brdinfo);
+ if (IS_ERR(dev->client)) {
+ dev_err(mlxreg_lc->dev, "Failed to create client %s at bus %d at addr 0x%02x\n",
+ dev->brdinfo->type, dev->nr, dev->brdinfo->addr);
+
+ dev->adapter = NULL;
+ goto fail_create_static_devices;
+ }
+ }
+
+ return 0;
+
+fail_create_static_devices:
+ while (--i >= 0) {
+ dev = devs + i;
+ i2c_unregister_device(dev->client);
+ dev->client = NULL;
+ }
+ return IS_ERR(dev->client);
+}
+
+static void
+mlxreg_lc_destroy_static_devices(struct mlxreg_lc *mlxreg_lc, struct mlxreg_hotplug_device *devs,
+ int size)
+{
+ struct mlxreg_hotplug_device *dev = devs;
+ int i;
+
+ /* Destroy static I2C device feeding by auxiliary or main power. */
+ for (i = 0; i < size; i++, dev++) {
+ if (dev->client) {
+ i2c_unregister_device(dev->client);
+ dev->client = NULL;
+ }
+ }
+}
+
+static int mlxreg_lc_power_on_off(struct mlxreg_lc *mlxreg_lc, u8 action)
+{
+ u32 regval;
+ int err;
+
+ mutex_lock(&mlxreg_lc->lock);
+
+ err = regmap_read(mlxreg_lc->par_regmap, mlxreg_lc->data->reg_pwr, &regval);
+ if (err)
+ goto regmap_read_fail;
+
+ if (action)
+ regval |= BIT(mlxreg_lc->data->slot - 1);
+ else
+ regval &= ~BIT(mlxreg_lc->data->slot - 1);
+
+ err = regmap_write(mlxreg_lc->par_regmap, mlxreg_lc->data->reg_pwr, regval);
+
+regmap_read_fail:
+ mutex_unlock(&mlxreg_lc->lock);
+ return err;
+}
+
+static int mlxreg_lc_enable_disable(struct mlxreg_lc *mlxreg_lc, bool action)
+{
+ u32 regval;
+ int err;
+
+ /*
+ * Hardware holds the line card after powering on in the disabled state. Holding line card
+ * in disabled state protects access to the line components, like FPGA and gearboxes.
+ * Line card should be enabled in order to get it in operational state. Line card could be
+ * disabled for moving it to non-operational state. Enabling line card does not affect the
+ * line card which is already has been enabled. Disabling does not affect the disabled line
+ * card.
+ */
+ mutex_lock(&mlxreg_lc->lock);
+
+ err = regmap_read(mlxreg_lc->par_regmap, mlxreg_lc->data->reg_ena, &regval);
+ if (err)
+ goto regmap_read_fail;
+
+ if (action)
+ regval |= BIT(mlxreg_lc->data->slot - 1);
+ else
+ regval &= ~BIT(mlxreg_lc->data->slot - 1);
+
+ err = regmap_write(mlxreg_lc->par_regmap, mlxreg_lc->data->reg_ena, regval);
+
+regmap_read_fail:
+ mutex_unlock(&mlxreg_lc->lock);
+ return err;
+}
+
+static int
+mlxreg_lc_sn4800_c16_config_init(struct mlxreg_lc *mlxreg_lc, void *regmap,
+ struct mlxreg_core_data *data)
+{
+ struct device *dev = &data->hpdev.client->dev;
+
+ /* Set line card configuration according to the type. */
+ mlxreg_lc->mux_data = mlxreg_lc_mux_data;
+ mlxreg_lc->io_data = &mlxreg_lc_regs_io;
+ mlxreg_lc->led_data = &mlxreg_lc_led;
+ mlxreg_lc->mux_brdinfo = &mlxreg_lc_mux_brdinfo;
+
+ mlxreg_lc->aux_devs = devm_kmemdup(dev, mlxreg_lc_aux_pwr_brdinfo,
+ sizeof(mlxreg_lc_aux_pwr_brdinfo), GFP_KERNEL);
+ if (!mlxreg_lc->aux_devs)
+ return -ENOMEM;
+ mlxreg_lc->aux_devs_num = ARRAY_SIZE(mlxreg_lc_aux_pwr_brdinfo);
+ mlxreg_lc->main_devs = devm_kmemdup(dev, mlxreg_lc_main_pwr_brdinfo,
+ sizeof(mlxreg_lc_main_pwr_brdinfo), GFP_KERNEL);
+ if (!mlxreg_lc->main_devs)
+ return -ENOMEM;
+ mlxreg_lc->main_devs_num = ARRAY_SIZE(mlxreg_lc_main_pwr_brdinfo);
+
+ return 0;
+}
+
+static void
+mlxreg_lc_state_update(struct mlxreg_lc *mlxreg_lc, enum mlxreg_lc_state state, u8 action)
+{
+ mutex_lock(&mlxreg_lc->lock);
+
+ if (action)
+ mlxreg_lc->state |= state;
+ else
+ mlxreg_lc->state &= ~state;
+
+ mutex_unlock(&mlxreg_lc->lock);
+}
+
+/*
+ * Callback is to be called from mlxreg-hotplug driver to notify about line card about received
+ * event.
+ */
+static int mlxreg_lc_event_handler(void *handle, enum mlxreg_hotplug_kind kind, u8 action)
+{
+ struct mlxreg_lc *mlxreg_lc = handle;
+ int err = 0;
+
+ dev_info(mlxreg_lc->dev, "linecard#%d state %d event kind %d action %d\n",
+ mlxreg_lc->data->slot, mlxreg_lc->state, kind, action);
+
+ if (!(mlxreg_lc->state & MLXREG_LC_INITIALIZED))
+ return 0;
+
+ switch (kind) {
+ case MLXREG_HOTPLUG_LC_SYNCED:
+ /*
+ * Synchronization event - hardware and firmware are synchronized. Power on/off
+ * line card - to allow/disallow main power source.
+ */
+ mlxreg_lc_state_update(mlxreg_lc, MLXREG_LC_SYNCED, action);
+ /* Power line card if it is not powered yet. */
+ if (!(mlxreg_lc->state & MLXREG_LC_POWERED) && action) {
+ err = mlxreg_lc_power_on_off(mlxreg_lc, 1);
+ if (err)
+ return err;
+ }
+ /* In case line card is configured - enable it. */
+ if (mlxreg_lc->state & MLXREG_LC_CONFIGURED && action)
+ err = mlxreg_lc_enable_disable(mlxreg_lc, 1);
+ break;
+ case MLXREG_HOTPLUG_LC_POWERED:
+ /* Power event - attach or de-attach line card device feeding by the main power. */
+ if (action) {
+ /* Do not create devices, if line card is already powered. */
+ if (mlxreg_lc->state & MLXREG_LC_POWERED) {
+ /* In case line card is configured - enable it. */
+ if (mlxreg_lc->state & MLXREG_LC_CONFIGURED)
+ err = mlxreg_lc_enable_disable(mlxreg_lc, 1);
+ return err;
+ }
+ err = mlxreg_lc_create_static_devices(mlxreg_lc, mlxreg_lc->main_devs,
+ mlxreg_lc->main_devs_num);
+ if (err)
+ return err;
+
+ /* In case line card is already in ready state - enable it. */
+ if (mlxreg_lc->state & MLXREG_LC_CONFIGURED)
+ err = mlxreg_lc_enable_disable(mlxreg_lc, 1);
+ } else {
+ mlxreg_lc_destroy_static_devices(mlxreg_lc, mlxreg_lc->main_devs,
+ mlxreg_lc->main_devs_num);
+ }
+ mlxreg_lc_state_update(mlxreg_lc, MLXREG_LC_POWERED, action);
+ break;
+ case MLXREG_HOTPLUG_LC_READY:
+ /*
+ * Ready event – enable line card by releasing it from reset or disable it by put
+ * to reset state.
+ */
+ err = mlxreg_lc_enable_disable(mlxreg_lc, !!action);
+ break;
+ case MLXREG_HOTPLUG_LC_THERMAL:
+ /* Thermal shutdown event – power off line card. */
+ if (action)
+ err = mlxreg_lc_power_on_off(mlxreg_lc, 0);
+ break;
+ default:
+ break;
+ }
+
+ return err;
+}
+
+/*
+ * Callback is to be called from i2c-mux-mlxcpld driver to indicate that all adapter devices has
+ * been created.
+ */
+static int mlxreg_lc_completion_notify(void *handle, struct i2c_adapter *parent,
+ struct i2c_adapter *adapters[])
+{
+ struct mlxreg_hotplug_device *main_dev, *aux_dev;
+ struct mlxreg_lc *mlxreg_lc = handle;
+ u32 regval;
+ int i, err;
+
+ /* Update I2C devices feeding by auxiliary power. */
+ aux_dev = mlxreg_lc->aux_devs;
+ for (i = 0; i < mlxreg_lc->aux_devs_num; i++, aux_dev++) {
+ aux_dev->adapter = adapters[aux_dev->nr];
+ aux_dev->nr = adapters[aux_dev->nr]->nr;
+ }
+
+ err = mlxreg_lc_create_static_devices(mlxreg_lc, mlxreg_lc->aux_devs,
+ mlxreg_lc->aux_devs_num);
+ if (err)
+ return err;
+
+ /* Update I2C devices feeding by main power. */
+ main_dev = mlxreg_lc->main_devs;
+ for (i = 0; i < mlxreg_lc->main_devs_num; i++, main_dev++) {
+ main_dev->adapter = adapters[main_dev->nr];
+ main_dev->nr = adapters[main_dev->nr]->nr;
+ }
+
+ /* Verify if line card is powered. */
+ err = regmap_read(mlxreg_lc->par_regmap, mlxreg_lc->data->reg_pwr, &regval);
+ if (err)
+ goto mlxreg_lc_regmap_read_power_fail;
+
+ if (regval & mlxreg_lc->data->mask) {
+ err = mlxreg_lc_create_static_devices(mlxreg_lc, mlxreg_lc->main_devs,
+ mlxreg_lc->main_devs_num);
+ if (err)
+ goto mlxreg_lc_create_static_devices_failed;
+
+ mlxreg_lc_state_update(mlxreg_lc, MLXREG_LC_POWERED, 1);
+ }
+
+ /* Verify if line card is synchronized. */
+ err = regmap_read(mlxreg_lc->par_regmap, mlxreg_lc->data->reg_sync, &regval);
+ if (err)
+ goto mlxreg_lc_regmap_read_sync_fail;
+
+ /* Power on line card if necessary. */
+ if (regval & mlxreg_lc->data->mask) {
+ mlxreg_lc->state |= MLXREG_LC_SYNCED;
+ mlxreg_lc_state_update(mlxreg_lc, MLXREG_LC_SYNCED, 1);
+ if (mlxreg_lc->state & ~MLXREG_LC_POWERED) {
+ err = mlxreg_lc_power_on_off(mlxreg_lc, 1);
+ if (err)
+ goto mlxreg_lc_regmap_power_on_off_fail;
+ }
+ }
+
+ mlxreg_lc_state_update(mlxreg_lc, MLXREG_LC_INITIALIZED, 1);
+
+ return 0;
+
+mlxreg_lc_regmap_power_on_off_fail:
+mlxreg_lc_regmap_read_sync_fail:
+ if (mlxreg_lc->state & MLXREG_LC_POWERED)
+ mlxreg_lc_destroy_static_devices(mlxreg_lc, mlxreg_lc->main_devs,
+ mlxreg_lc->main_devs_num);
+mlxreg_lc_create_static_devices_failed:
+ mlxreg_lc_destroy_static_devices(mlxreg_lc, mlxreg_lc->aux_devs, mlxreg_lc->aux_devs_num);
+mlxreg_lc_regmap_read_power_fail:
+ return err;
+}
+
+static int
+mlxreg_lc_config_init(struct mlxreg_lc *mlxreg_lc, void *regmap,
+ struct mlxreg_core_data *data)
+{
+ struct device *dev = &data->hpdev.client->dev;
+ int lsb, err;
+ u32 regval;
+
+ /* Validate line card type. */
+ err = regmap_read(regmap, MLXREG_LC_REG_CONFIG_OFFSET, &lsb);
+ err = (!err) ? regmap_read(regmap, MLXREG_LC_REG_CONFIG_OFFSET, &regval) : err;
+ if (err)
+ return err;
+ regval = (regval & GENMASK(7, 0)) << 8 | (lsb & GENMASK(7, 0));
+ switch (regval) {
+ case MLXREG_LC_SN4800_C16:
+ err = mlxreg_lc_sn4800_c16_config_init(mlxreg_lc, regmap, data);
+ if (err)
+ return err;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ /* Create mux infrastructure. */
+ mlxreg_lc->mux_data->handle = mlxreg_lc;
+ mlxreg_lc->mux_data->completion_notify = mlxreg_lc_completion_notify;
+ mlxreg_lc->mux_brdinfo->platform_data = mlxreg_lc->mux_data;
+ mlxreg_lc->mux = platform_device_register_resndata(dev, "i2c-mux-mlxcpld", data->hpdev.nr,
+ NULL, 0, mlxreg_lc->mux_data,
+ sizeof(*mlxreg_lc->mux_data));
+ if (IS_ERR(mlxreg_lc->mux))
+ return PTR_ERR(mlxreg_lc->mux);
+
+ /* Register IO access driver. */
+ if (mlxreg_lc->io_data) {
+ mlxreg_lc->io_data->regmap = regmap;
+ mlxreg_lc->io_regs =
+ platform_device_register_resndata(dev, "mlxreg-io", data->hpdev.nr, NULL, 0,
+ mlxreg_lc->io_data, sizeof(*mlxreg_lc->io_data));
+ if (IS_ERR(mlxreg_lc->io_regs)) {
+ err = PTR_ERR(mlxreg_lc->io_regs);
+ goto fail_register_io;
+ }
+ }
+
+ /* Register LED driver. */
+ if (mlxreg_lc->led_data) {
+ mlxreg_lc->led_data->regmap = regmap;
+ mlxreg_lc->led =
+ platform_device_register_resndata(dev, "leds-mlxreg", data->hpdev.nr, NULL, 0,
+ mlxreg_lc->led_data,
+ sizeof(*mlxreg_lc->led_data));
+ if (IS_ERR(mlxreg_lc->led)) {
+ err = PTR_ERR(mlxreg_lc->led);
+ goto fail_register_led;
+ }
+ }
+
+ return 0;
+
+fail_register_led:
+ if (mlxreg_lc->io_regs)
+ platform_device_unregister(mlxreg_lc->io_regs);
+fail_register_io:
+ if (mlxreg_lc->mux)
+ platform_device_unregister(mlxreg_lc->mux);
+
+ return err;
+}
+
+static void mlxreg_lc_config_exit(struct mlxreg_lc *mlxreg_lc)
+{
+ /* Unregister LED driver. */
+ if (mlxreg_lc->led)
+ platform_device_unregister(mlxreg_lc->led);
+ /* Unregister IO access driver. */
+ if (mlxreg_lc->io_regs)
+ platform_device_unregister(mlxreg_lc->io_regs);
+ /* Remove mux infrastructure. */
+ if (mlxreg_lc->mux)
+ platform_device_unregister(mlxreg_lc->mux);
+}
+
+static int mlxreg_lc_probe(struct platform_device *pdev)
+{
+ struct mlxreg_core_hotplug_platform_data *par_pdata;
+ struct mlxreg_core_data *data;
+ struct mlxreg_lc *mlxreg_lc;
+ void *regmap;
+ int i, err;
+
+ data = dev_get_platdata(&pdev->dev);
+ if (!data)
+ return -EINVAL;
+
+ mlxreg_lc = devm_kzalloc(&pdev->dev, sizeof(*mlxreg_lc), GFP_KERNEL);
+ if (!mlxreg_lc)
+ return -ENOMEM;
+
+ mutex_init(&mlxreg_lc->lock);
+ /* Set event notification callback. */
+ if (data->notifier) {
+ data->notifier->user_handler = mlxreg_lc_event_handler;
+ data->notifier->handle = mlxreg_lc;
+ }
+ data->hpdev.adapter = i2c_get_adapter(data->hpdev.nr);
+ if (!data->hpdev.adapter) {
+ dev_err(&pdev->dev, "Failed to get adapter for bus %d\n",
+ data->hpdev.nr);
+ return -EFAULT;
+ }
+
+ /* Create device at the top of line card I2C tree.*/
+ data->hpdev.client = i2c_new_client_device(data->hpdev.adapter,
+ data->hpdev.brdinfo);
+ if (IS_ERR(data->hpdev.client)) {
+ dev_err(&pdev->dev, "Failed to create client %s at bus %d at addr 0x%02x\n",
+ data->hpdev.brdinfo->type, data->hpdev.nr, data->hpdev.brdinfo->addr);
+
+ i2c_put_adapter(data->hpdev.adapter);
+ data->hpdev.adapter = NULL;
+ return PTR_ERR(data->hpdev.client);
+ }
+
+ regmap = devm_regmap_init_i2c(data->hpdev.client,
+ &mlxreg_lc_regmap_conf);
+ if (IS_ERR(regmap)) {
+ err = PTR_ERR(regmap);
+ goto mlxreg_lc_probe_fail;
+ }
+
+ /* Set default registers. */
+ for (i = 0; i < mlxreg_lc_regmap_conf.num_reg_defaults; i++) {
+ err = regmap_write(regmap, mlxreg_lc_regmap_default[i].reg,
+ mlxreg_lc_regmap_default[i].def);
+ if (err)
+ goto mlxreg_lc_probe_fail;
+ }
+
+ /* Sync registers with hardware. */
+ regcache_mark_dirty(regmap);
+ err = regcache_sync(regmap);
+ if (err)
+ goto mlxreg_lc_probe_fail;
+
+ par_pdata = data->hpdev.brdinfo->platform_data;
+ mlxreg_lc->par_regmap = par_pdata->regmap;
+ mlxreg_lc->data = data;
+ mlxreg_lc->dev = &pdev->dev;
+ platform_set_drvdata(pdev, mlxreg_lc);
+
+ /* Configure line card. */
+ err = mlxreg_lc_config_init(mlxreg_lc, regmap, data);
+ if (err)
+ goto mlxreg_lc_probe_fail;
+
+ return err;
+
+mlxreg_lc_probe_fail:
+ i2c_put_adapter(data->hpdev.adapter);
+ return err;
+}
+
+static int mlxreg_lc_remove(struct platform_device *pdev)
+{
+ struct mlxreg_core_data *data = dev_get_platdata(&pdev->dev);
+ struct mlxreg_lc *mlxreg_lc = platform_get_drvdata(pdev);
+
+ /* Clear event notification callback. */
+ if (data->notifier) {
+ data->notifier->user_handler = NULL;
+ data->notifier->handle = NULL;
+ }
+
+ /* Destroy static I2C device feeding by main power. */
+ mlxreg_lc_destroy_static_devices(mlxreg_lc, mlxreg_lc->main_devs,
+ mlxreg_lc->main_devs_num);
+ /* Destroy static I2C device feeding by auxiliary power. */
+ mlxreg_lc_destroy_static_devices(mlxreg_lc, mlxreg_lc->aux_devs, mlxreg_lc->aux_devs_num);
+ /* Unregister underlying drivers. */
+ mlxreg_lc_config_exit(mlxreg_lc);
+ if (data->hpdev.client) {
+ i2c_unregister_device(data->hpdev.client);
+ data->hpdev.client = NULL;
+ i2c_put_adapter(data->hpdev.adapter);
+ data->hpdev.adapter = NULL;
+ }
+
+ return 0;
+}
+
+static struct platform_driver mlxreg_lc_driver = {
+ .probe = mlxreg_lc_probe,
+ .remove = mlxreg_lc_remove,
+ .driver = {
+ .name = "mlxreg-lc",
+ },
+};
+
+module_platform_driver(mlxreg_lc_driver);
+
+MODULE_AUTHOR("Vadim Pasternak <vadimp@nvidia.com>");
+MODULE_DESCRIPTION("Nvidia line card platform driver");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_ALIAS("platform:mlxreg-lc");
diff --git a/drivers/platform/surface/surface3-wmi.c b/drivers/platform/surface/surface3-wmi.c
index fcd1d4fb94d5..09ac9cfc40d8 100644
--- a/drivers/platform/surface/surface3-wmi.c
+++ b/drivers/platform/surface/surface3-wmi.c
@@ -139,13 +139,12 @@ static acpi_status s3_wmi_attach_spi_device(acpi_handle handle,
static int s3_wmi_check_platform_device(struct device *dev, void *data)
{
- struct acpi_device *adev, *ts_adev = NULL;
- acpi_handle handle;
+ struct acpi_device *adev = ACPI_COMPANION(dev);
+ struct acpi_device *ts_adev = NULL;
acpi_status status;
/* ignore non ACPI devices */
- handle = ACPI_HANDLE(dev);
- if (!handle || acpi_bus_get_device(handle, &adev))
+ if (!adev)
return 0;
/* check for LID ACPI switch */
@@ -159,7 +158,7 @@ static int s3_wmi_check_platform_device(struct device *dev, void *data)
strlen(SPI_CTL_OBJ_NAME)))
return 0;
- status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
+ status = acpi_walk_namespace(ACPI_TYPE_DEVICE, adev->handle, 1,
s3_wmi_attach_spi_device, NULL,
&ts_adev, NULL);
if (ACPI_FAILURE(status))
diff --git a/drivers/platform/surface/surface3_power.c b/drivers/platform/surface/surface3_power.c
index 90c1568ea4e0..abac3eec565e 100644
--- a/drivers/platform/surface/surface3_power.c
+++ b/drivers/platform/surface/surface3_power.c
@@ -159,12 +159,11 @@ mshw0011_notify(struct mshw0011_data *cdata, u8 arg1, u8 arg2,
unsigned int *ret_value)
{
union acpi_object *obj;
- struct acpi_device *adev;
acpi_handle handle;
unsigned int i;
handle = ACPI_HANDLE(&cdata->adp1->dev);
- if (!handle || acpi_bus_get_device(handle, &adev))
+ if (!handle)
return -ENODEV;
obj = acpi_evaluate_dsm_typed(handle, &mshw0011_guid, arg1, arg2, NULL,
diff --git a/drivers/platform/surface/surface_aggregator_registry.c b/drivers/platform/surface/surface_aggregator_registry.c
index 4428c4330229..e70f4c63554e 100644
--- a/drivers/platform/surface/surface_aggregator_registry.c
+++ b/drivers/platform/surface/surface_aggregator_registry.c
@@ -77,6 +77,42 @@ static const struct software_node ssam_node_bas_dtx = {
.parent = &ssam_node_root,
};
+/* HID keyboard (TID1). */
+static const struct software_node ssam_node_hid_tid1_keyboard = {
+ .name = "ssam:01:15:01:01:00",
+ .parent = &ssam_node_root,
+};
+
+/* HID pen stash (TID1; pen taken / stashed away evens). */
+static const struct software_node ssam_node_hid_tid1_penstash = {
+ .name = "ssam:01:15:01:02:00",
+ .parent = &ssam_node_root,
+};
+
+/* HID touchpad (TID1). */
+static const struct software_node ssam_node_hid_tid1_touchpad = {
+ .name = "ssam:01:15:01:03:00",
+ .parent = &ssam_node_root,
+};
+
+/* HID device instance 6 (TID1, unknown HID device). */
+static const struct software_node ssam_node_hid_tid1_iid6 = {
+ .name = "ssam:01:15:01:06:00",
+ .parent = &ssam_node_root,
+};
+
+/* HID device instance 7 (TID1, unknown HID device). */
+static const struct software_node ssam_node_hid_tid1_iid7 = {
+ .name = "ssam:01:15:01:07:00",
+ .parent = &ssam_node_root,
+};
+
+/* HID system controls (TID1). */
+static const struct software_node ssam_node_hid_tid1_sysctrl = {
+ .name = "ssam:01:15:01:08:00",
+ .parent = &ssam_node_root,
+};
+
/* HID keyboard. */
static const struct software_node ssam_node_hid_main_keyboard = {
.name = "ssam:01:15:02:01:00",
@@ -159,6 +195,21 @@ static const struct software_node *ssam_node_group_sl3[] = {
NULL,
};
+/* Devices for Surface Laptop Studio. */
+static const struct software_node *ssam_node_group_sls[] = {
+ &ssam_node_root,
+ &ssam_node_bat_ac,
+ &ssam_node_bat_main,
+ &ssam_node_tmp_pprof,
+ &ssam_node_hid_tid1_keyboard,
+ &ssam_node_hid_tid1_penstash,
+ &ssam_node_hid_tid1_touchpad,
+ &ssam_node_hid_tid1_iid6,
+ &ssam_node_hid_tid1_iid7,
+ &ssam_node_hid_tid1_sysctrl,
+ NULL,
+};
+
/* Devices for Surface Laptop Go. */
static const struct software_node *ssam_node_group_slg1[] = {
&ssam_node_root,
@@ -177,6 +228,15 @@ static const struct software_node *ssam_node_group_sp7[] = {
NULL,
};
+static const struct software_node *ssam_node_group_sp8[] = {
+ &ssam_node_root,
+ &ssam_node_bat_ac,
+ &ssam_node_bat_main,
+ &ssam_node_tmp_pprof,
+ /* TODO: Add support for keyboard cover. */
+ NULL,
+};
+
/* -- Device registry helper functions. ------------------------------------- */
@@ -483,6 +543,9 @@ static const struct acpi_device_id ssam_platform_hub_match[] = {
/* Surface Pro 7+ */
{ "MSHW0119", (unsigned long)ssam_node_group_sp7 },
+ /* Surface Pro 8 */
+ { "MSHW0263", (unsigned long)ssam_node_group_sp8 },
+
/* Surface Book 2 */
{ "MSHW0107", (unsigned long)ssam_node_group_gen5 },
@@ -507,6 +570,9 @@ static const struct acpi_device_id ssam_platform_hub_match[] = {
/* Surface Laptop Go 1 */
{ "MSHW0118", (unsigned long)ssam_node_group_slg1 },
+ /* Surface Laptop Studio */
+ { "MSHW0123", (unsigned long)ssam_node_group_sls },
+
{ },
};
MODULE_DEVICE_TABLE(acpi, ssam_platform_hub_match);
diff --git a/drivers/platform/surface/surface_gpe.c b/drivers/platform/surface/surface_gpe.c
index 86f6991b1215..c1775db29efb 100644
--- a/drivers/platform/surface/surface_gpe.c
+++ b/drivers/platform/surface/surface_gpe.c
@@ -26,6 +26,11 @@ static const struct property_entry lid_device_props_l17[] = {
{},
};
+static const struct property_entry lid_device_props_l4B[] = {
+ PROPERTY_ENTRY_U32("gpe", 0x4B),
+ {},
+};
+
static const struct property_entry lid_device_props_l4D[] = {
PROPERTY_ENTRY_U32("gpe", 0x4D),
{},
@@ -158,6 +163,14 @@ static const struct dmi_system_id dmi_lid_device_table[] = {
},
.driver_data = (void *)lid_device_props_l4D,
},
+ {
+ .ident = "Surface Laptop Studio",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Surface Laptop Studio"),
+ },
+ .driver_data = (void *)lid_device_props_l4B,
+ },
{ }
};
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index e21ea3d23e6f..d4c079f4afc6 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -91,6 +91,21 @@ config PEAQ_WMI
help
Say Y here if you want to support WMI-based hotkeys on PEAQ 2-in-1s.
+config NVIDIA_WMI_EC_BACKLIGHT
+ tristate "EC Backlight Driver for Hybrid Graphics Notebook Systems"
+ depends on ACPI_WMI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ This driver provides a sysfs backlight interface for notebook systems
+ which are equipped with NVIDIA hybrid graphics and drive LCD backlight
+ levels through the Embedded Controller (EC).
+
+ Say Y or M here if you want to control the backlight on a notebook
+ system with an EC-driven backlight.
+
+ If you choose to compile this driver as a module the module will be
+ called nvidia-wmi-ec-backlight.
+
config XIAOMI_WMI
tristate "Xiaomi WMI key driver"
depends on ACPI_WMI
@@ -426,6 +441,7 @@ config HP_WMI
depends on RFKILL || RFKILL = n
select INPUT_SPARSEKMAP
select ACPI_PLATFORM_PROFILE
+ select HWMON
help
Say Y here if you want to support WMI-based hotkeys on HP laptops and
to read data from WMI such as docking or ambient light sensor state.
@@ -713,6 +729,16 @@ config PCENGINES_APU2
To compile this driver as a module, choose M here: the module
will be called pcengines-apuv2.
+config BARCO_P50_GPIO
+ tristate "Barco P50 GPIO driver for identify LED/button"
+ depends on GPIOLIB
+ help
+ This driver provides access to the GPIOs for the identify button
+ and led present on Barco P50 board.
+
+ To compile this driver as a module, choose M here: the module
+ will be called barco-p50-gpio.
+
config SAMSUNG_LAPTOP
tristate "Samsung Laptop driver"
depends on RFKILL || RFKILL = n
@@ -905,6 +931,9 @@ config SONYPI_COMPAT
config SYSTEM76_ACPI
tristate "System76 ACPI Driver"
depends on ACPI
+ depends on ACPI_BATTERY
+ depends on HWMON
+ depends on INPUT
select NEW_LEDS
select LEDS_CLASS
select LEDS_TRIGGERS
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index 69690e26bb6d..219478061683 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_WMI_BMOF) += wmi-bmof.o
# WMI drivers
obj-$(CONFIG_HUAWEI_WMI) += huawei-wmi.o
obj-$(CONFIG_MXM_WMI) += mxm-wmi.o
+obj-$(CONFIG_NVIDIA_WMI_EC_BACKLIGHT) += nvidia-wmi-ec-backlight.o
obj-$(CONFIG_PEAQ_WMI) += peaq-wmi.o
obj-$(CONFIG_XIAOMI_WMI) += xiaomi-wmi.o
obj-$(CONFIG_GIGABYTE_WMI) += gigabyte-wmi.o
@@ -80,6 +81,9 @@ obj-$(CONFIG_XO1_RFKILL) += xo1-rfkill.o
# PC Engines
obj-$(CONFIG_PCENGINES_APU2) += pcengines-apuv2.o
+# Barco
+obj-$(CONFIG_BARCO_P50_GPIO) += barco-p50-gpio.o
+
# Samsung
obj-$(CONFIG_SAMSUNG_LAPTOP) += samsung-laptop.o
obj-$(CONFIG_SAMSUNG_Q10) += samsung-q10.o
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index 694b45ed06a2..9c6943e401a6 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -138,7 +138,7 @@ struct event_return_value {
u16 reserved1;
u8 kbd_dock_state;
u8 reserved2;
-} __attribute__((packed));
+} __packed;
/*
* GUID3 Get Device Status device flags
@@ -172,33 +172,33 @@ struct func_input_params {
u8 app_status; /* Acer Device Status. LM, ePM, RF Button... */
u8 app_mask; /* Bit mask to app_status */
u8 reserved;
-} __attribute__((packed));
+} __packed;
struct func_return_value {
u8 error_code; /* Error Code */
u8 ec_return_value; /* EC Return Value */
u16 reserved;
-} __attribute__((packed));
+} __packed;
struct wmid3_gds_set_input_param { /* Set Device Status input parameter */
u8 function_num; /* Function Number */
u8 hotkey_number; /* Hotkey Number */
u16 devices; /* Set Device */
u8 volume_value; /* Volume Value */
-} __attribute__((packed));
+} __packed;
struct wmid3_gds_get_input_param { /* Get Device Status input parameter */
u8 function_num; /* Function Number */
u8 hotkey_number; /* Hotkey Number */
u16 devices; /* Get Device */
-} __attribute__((packed));
+} __packed;
struct wmid3_gds_return_value { /* Get Device Status return value*/
u8 error_code; /* Error Code */
u8 ec_return_value; /* EC Return Value */
u16 devices; /* Current Device Status */
u32 reserved;
-} __attribute__((packed));
+} __packed;
struct hotkey_function_type_aa {
u8 type;
@@ -210,7 +210,7 @@ struct hotkey_function_type_aa {
u16 display_func_bitmap;
u16 others_func_bitmap;
u8 commun_fn_key_number;
-} __attribute__((packed));
+} __packed;
/*
* Interface capability flags
diff --git a/drivers/platform/x86/amd-pmc.c b/drivers/platform/x86/amd-pmc.c
index fc95620101e8..b7e50ed050a8 100644
--- a/drivers/platform/x86/amd-pmc.c
+++ b/drivers/platform/x86/amd-pmc.c
@@ -17,9 +17,11 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/iopoll.h>
+#include <linux/limits.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
+#include <linux/rtc.h>
#include <linux/suspend.h>
#include <linux/seq_file.h>
#include <linux/uaccess.h>
@@ -29,6 +31,10 @@
#define AMD_PMC_REGISTER_RESPONSE 0x980
#define AMD_PMC_REGISTER_ARGUMENT 0x9BC
+/* PMC Scratch Registers */
+#define AMD_PMC_SCRATCH_REG_CZN 0x94
+#define AMD_PMC_SCRATCH_REG_YC 0xD14
+
/* Base address of SMU for mapping physical address to virtual address */
#define AMD_PMC_SMU_INDEX_ADDRESS 0xB8
#define AMD_PMC_SMU_INDEX_DATA 0xBC
@@ -110,6 +116,10 @@ struct amd_pmc_dev {
u32 base_addr;
u32 cpu_id;
u32 active_ips;
+/* SMU version information */
+ u16 major;
+ u16 minor;
+ u16 rev;
struct device *dev;
struct mutex lock; /* generic mutex lock */
#if IS_ENABLED(CONFIG_DEBUG_FS)
@@ -118,7 +128,7 @@ struct amd_pmc_dev {
};
static struct amd_pmc_dev pmc;
-static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, bool set, u32 *data, u8 msg, bool ret);
+static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, u32 arg, u32 *data, u8 msg, bool ret);
static inline u32 amd_pmc_reg_read(struct amd_pmc_dev *dev, int reg_offset)
{
@@ -133,7 +143,7 @@ static inline void amd_pmc_reg_write(struct amd_pmc_dev *dev, int reg_offset, u3
struct smu_metrics {
u32 table_version;
u32 hint_count;
- u32 s0i3_cyclecount;
+ u32 s0i3_last_entry_status;
u32 timein_s0i2;
u64 timeentering_s0i3_lastcapture;
u64 timeentering_s0i3_totaltime;
@@ -147,6 +157,49 @@ struct smu_metrics {
u64 timecondition_notmet_totaltime[SOC_SUBSYSTEM_IP_MAX];
} __packed;
+static int amd_pmc_get_smu_version(struct amd_pmc_dev *dev)
+{
+ int rc;
+ u32 val;
+
+ rc = amd_pmc_send_cmd(dev, 0, &val, SMU_MSG_GETSMUVERSION, 1);
+ if (rc)
+ return rc;
+
+ dev->major = (val >> 16) & GENMASK(15, 0);
+ dev->minor = (val >> 8) & GENMASK(7, 0);
+ dev->rev = (val >> 0) & GENMASK(7, 0);
+
+ dev_dbg(dev->dev, "SMU version is %u.%u.%u\n", dev->major, dev->minor, dev->rev);
+
+ return 0;
+}
+
+static int amd_pmc_idlemask_read(struct amd_pmc_dev *pdev, struct device *dev,
+ struct seq_file *s)
+{
+ u32 val;
+
+ switch (pdev->cpu_id) {
+ case AMD_CPU_ID_CZN:
+ val = amd_pmc_reg_read(pdev, AMD_PMC_SCRATCH_REG_CZN);
+ break;
+ case AMD_CPU_ID_YC:
+ val = amd_pmc_reg_read(pdev, AMD_PMC_SCRATCH_REG_YC);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (dev)
+ dev_dbg(pdev->dev, "SMU idlemask s0i3: 0x%x\n", val);
+
+ if (s)
+ seq_printf(s, "SMU idlemask : 0x%x\n", val);
+
+ return 0;
+}
+
#ifdef CONFIG_DEBUG_FS
static int smu_fw_info_show(struct seq_file *s, void *unused)
{
@@ -162,9 +215,12 @@ static int smu_fw_info_show(struct seq_file *s, void *unused)
seq_puts(s, "\n=== SMU Statistics ===\n");
seq_printf(s, "Table Version: %d\n", table.table_version);
seq_printf(s, "Hint Count: %d\n", table.hint_count);
- seq_printf(s, "S0i3 Cycle Count: %d\n", table.s0i3_cyclecount);
+ seq_printf(s, "Last S0i3 Status: %s\n", table.s0i3_last_entry_status ? "Success" :
+ "Unknown/Fail");
seq_printf(s, "Time (in us) to S0i3: %lld\n", table.timeentering_s0i3_lastcapture);
seq_printf(s, "Time (in us) in S0i3: %lld\n", table.timein_s0i3_lastcapture);
+ seq_printf(s, "Time (in us) to resume from S0i3: %lld\n",
+ table.timeto_resume_to_os_lastcapture);
seq_puts(s, "\n=== Active time (in us) ===\n");
for (idx = 0 ; idx < SOC_SUBSYSTEM_IP_MAX ; idx++) {
@@ -201,6 +257,23 @@ static int s0ix_stats_show(struct seq_file *s, void *unused)
}
DEFINE_SHOW_ATTRIBUTE(s0ix_stats);
+static int amd_pmc_idlemask_show(struct seq_file *s, void *unused)
+{
+ struct amd_pmc_dev *dev = s->private;
+ int rc;
+
+ if (dev->major > 56 || (dev->major >= 55 && dev->minor >= 37)) {
+ rc = amd_pmc_idlemask_read(dev, NULL, s);
+ if (rc)
+ return rc;
+ } else {
+ seq_puts(s, "Unsupported SMU version for Idlemask\n");
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(amd_pmc_idlemask);
+
static void amd_pmc_dbgfs_unregister(struct amd_pmc_dev *dev)
{
debugfs_remove_recursive(dev->dbgfs_dir);
@@ -213,6 +286,8 @@ static void amd_pmc_dbgfs_register(struct amd_pmc_dev *dev)
&smu_fw_info_fops);
debugfs_create_file("s0ix_stats", 0644, dev->dbgfs_dir, dev,
&s0ix_stats_fops);
+ debugfs_create_file("amd_pmc_idlemask", 0644, dev->dbgfs_dir, dev,
+ &amd_pmc_idlemask_fops);
}
#else
static inline void amd_pmc_dbgfs_register(struct amd_pmc_dev *dev)
@@ -264,7 +339,7 @@ static void amd_pmc_dump_registers(struct amd_pmc_dev *dev)
dev_dbg(dev->dev, "AMD_PMC_REGISTER_MESSAGE:%x\n", value);
}
-static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, bool set, u32 *data, u8 msg, bool ret)
+static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, u32 arg, u32 *data, u8 msg, bool ret)
{
int rc;
u32 val;
@@ -283,7 +358,7 @@ static int amd_pmc_send_cmd(struct amd_pmc_dev *dev, bool set, u32 *data, u8 msg
amd_pmc_reg_write(dev, AMD_PMC_REGISTER_RESPONSE, 0);
/* Write argument into response register */
- amd_pmc_reg_write(dev, AMD_PMC_REGISTER_ARGUMENT, set);
+ amd_pmc_reg_write(dev, AMD_PMC_REGISTER_ARGUMENT, arg);
/* Write message ID to message ID register */
amd_pmc_reg_write(dev, AMD_PMC_REGISTER_MESSAGE, msg);
@@ -339,18 +414,73 @@ static int amd_pmc_get_os_hint(struct amd_pmc_dev *dev)
return -EINVAL;
}
+static int amd_pmc_verify_czn_rtc(struct amd_pmc_dev *pdev, u32 *arg)
+{
+ struct rtc_device *rtc_device;
+ time64_t then, now, duration;
+ struct rtc_wkalrm alarm;
+ struct rtc_time tm;
+ int rc;
+
+ if (pdev->major < 64 || (pdev->major == 64 && pdev->minor < 53))
+ return 0;
+
+ rtc_device = rtc_class_open("rtc0");
+ if (!rtc_device)
+ return 0;
+ rc = rtc_read_alarm(rtc_device, &alarm);
+ if (rc)
+ return rc;
+ if (!alarm.enabled) {
+ dev_dbg(pdev->dev, "alarm not enabled\n");
+ return 0;
+ }
+ rc = rtc_read_time(rtc_device, &tm);
+ if (rc)
+ return rc;
+ then = rtc_tm_to_time64(&alarm.time);
+ now = rtc_tm_to_time64(&tm);
+ duration = then-now;
+
+ /* in the past */
+ if (then < now)
+ return 0;
+
+ /* will be stored in upper 16 bits of s0i3 hint argument,
+ * so timer wakeup from s0i3 is limited to ~18 hours or less
+ */
+ if (duration <= 4 || duration > U16_MAX)
+ return -EINVAL;
+
+ *arg |= (duration << 16);
+ rc = rtc_alarm_irq_enable(rtc_device, 0);
+ dev_dbg(pdev->dev, "wakeup timer programmed for %lld seconds\n", duration);
+
+ return rc;
+}
+
static int __maybe_unused amd_pmc_suspend(struct device *dev)
{
struct amd_pmc_dev *pdev = dev_get_drvdata(dev);
int rc;
u8 msg;
+ u32 arg = 1;
/* Reset and Start SMU logging - to monitor the s0i3 stats */
amd_pmc_send_cmd(pdev, 0, NULL, SMU_MSG_LOG_RESET, 0);
amd_pmc_send_cmd(pdev, 0, NULL, SMU_MSG_LOG_START, 0);
+ /* Activate CZN specific RTC functionality */
+ if (pdev->cpu_id == AMD_CPU_ID_CZN) {
+ rc = amd_pmc_verify_czn_rtc(pdev, &arg);
+ if (rc < 0)
+ return rc;
+ }
+
+ /* Dump the IdleMask before we send hint to SMU */
+ amd_pmc_idlemask_read(pdev, dev, NULL);
msg = amd_pmc_get_os_hint(pdev);
- rc = amd_pmc_send_cmd(pdev, 1, NULL, msg, 0);
+ rc = amd_pmc_send_cmd(pdev, arg, NULL, msg, 0);
if (rc)
dev_err(pdev->dev, "suspend failed\n");
@@ -363,14 +493,17 @@ static int __maybe_unused amd_pmc_resume(struct device *dev)
int rc;
u8 msg;
- /* Let SMU know that we are looking for stats */
- amd_pmc_send_cmd(pdev, 0, NULL, SMU_MSG_LOG_DUMP_DATA, 0);
-
msg = amd_pmc_get_os_hint(pdev);
rc = amd_pmc_send_cmd(pdev, 0, NULL, msg, 0);
if (rc)
dev_err(pdev->dev, "resume failed\n");
+ /* Let SMU know that we are looking for stats */
+ amd_pmc_send_cmd(pdev, 0, NULL, SMU_MSG_LOG_DUMP_DATA, 0);
+
+ /* Dump the IdleMask to see the blockers */
+ amd_pmc_idlemask_read(pdev, dev, NULL);
+
return 0;
}
@@ -457,6 +590,7 @@ static int amd_pmc_probe(struct platform_device *pdev)
if (err)
dev_err(dev->dev, "SMU debugging info not supported on this platform\n");
+ amd_pmc_get_smu_version(dev);
platform_set_drvdata(pdev, dev);
amd_pmc_dbgfs_register(dev);
return 0;
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index e14fb5fa7324..8f067ac4e952 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -2169,8 +2169,8 @@ static ssize_t throttle_thermal_policy_store(struct device *dev,
static DEVICE_ATTR_RW(throttle_thermal_policy);
/* Platform profile ***********************************************************/
-static int platform_profile_get(struct platform_profile_handler *pprof,
- enum platform_profile_option *profile)
+static int asus_wmi_platform_profile_get(struct platform_profile_handler *pprof,
+ enum platform_profile_option *profile)
{
struct asus_wmi *asus;
int tp;
@@ -2196,8 +2196,8 @@ static int platform_profile_get(struct platform_profile_handler *pprof,
return 0;
}
-static int platform_profile_set(struct platform_profile_handler *pprof,
- enum platform_profile_option profile)
+static int asus_wmi_platform_profile_set(struct platform_profile_handler *pprof,
+ enum platform_profile_option profile)
{
struct asus_wmi *asus;
int tp;
@@ -2236,8 +2236,8 @@ static int platform_profile_setup(struct asus_wmi *asus)
dev_info(dev, "Using throttle_thermal_policy for platform_profile support\n");
- asus->platform_profile_handler.profile_get = platform_profile_get;
- asus->platform_profile_handler.profile_set = platform_profile_set;
+ asus->platform_profile_handler.profile_get = asus_wmi_platform_profile_get;
+ asus->platform_profile_handler.profile_set = asus_wmi_platform_profile_set;
set_bit(PLATFORM_PROFILE_QUIET, asus->platform_profile_handler.choices);
set_bit(PLATFORM_PROFILE_BALANCED,
diff --git a/drivers/platform/x86/barco-p50-gpio.c b/drivers/platform/x86/barco-p50-gpio.c
new file mode 100644
index 000000000000..f5c72e33f9ae
--- /dev/null
+++ b/drivers/platform/x86/barco-p50-gpio.c
@@ -0,0 +1,436 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/*
+ * Support for EC-connected GPIOs for identify
+ * LED/button on Barco P50 board
+ *
+ * Copyright (C) 2021 Barco NV
+ * Author: Santosh Kumar Yadav <santoshkumar.yadav@barco.com>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/dmi.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/gpio_keys.h>
+#include <linux/gpio/driver.h>
+#include <linux/gpio/machine.h>
+#include <linux/input.h>
+
+
+#define DRIVER_NAME "barco-p50-gpio"
+
+/* GPIO lines */
+#define P50_GPIO_LINE_LED 0
+#define P50_GPIO_LINE_BTN 1
+
+/* GPIO IO Ports */
+#define P50_GPIO_IO_PORT_BASE 0x299
+
+#define P50_PORT_DATA 0x00
+#define P50_PORT_CMD 0x01
+
+#define P50_STATUS_OBF 0x01 /* EC output buffer full */
+#define P50_STATUS_IBF 0x02 /* EC input buffer full */
+
+#define P50_CMD_READ 0xa0
+#define P50_CMD_WRITE 0x50
+
+/* EC mailbox registers */
+#define P50_MBOX_REG_CMD 0x00
+#define P50_MBOX_REG_STATUS 0x01
+#define P50_MBOX_REG_PARAM 0x02
+#define P50_MBOX_REG_DATA 0x03
+
+#define P50_MBOX_CMD_READ_GPIO 0x11
+#define P50_MBOX_CMD_WRITE_GPIO 0x12
+#define P50_MBOX_CMD_CLEAR 0xff
+
+#define P50_MBOX_STATUS_SUCCESS 0x01
+
+#define P50_MBOX_PARAM_LED 0x12
+#define P50_MBOX_PARAM_BTN 0x13
+
+
+struct p50_gpio {
+ struct gpio_chip gc;
+ struct mutex lock;
+ unsigned long base;
+ struct platform_device *leds_pdev;
+ struct platform_device *keys_pdev;
+};
+
+static struct platform_device *gpio_pdev;
+
+static int gpio_params[] = {
+ [P50_GPIO_LINE_LED] = P50_MBOX_PARAM_LED,
+ [P50_GPIO_LINE_BTN] = P50_MBOX_PARAM_BTN,
+};
+
+static const char * const gpio_names[] = {
+ [P50_GPIO_LINE_LED] = "identify-led",
+ [P50_GPIO_LINE_BTN] = "identify-button",
+};
+
+
+static struct gpiod_lookup_table p50_gpio_led_table = {
+ .dev_id = "leds-gpio",
+ .table = {
+ GPIO_LOOKUP_IDX(DRIVER_NAME, P50_GPIO_LINE_LED, NULL, 0, GPIO_ACTIVE_HIGH),
+ {}
+ }
+};
+
+/* GPIO LEDs */
+static struct gpio_led leds[] = {
+ { .name = "identify" }
+};
+
+static struct gpio_led_platform_data leds_pdata = {
+ .num_leds = ARRAY_SIZE(leds),
+ .leds = leds,
+};
+
+/* GPIO keyboard */
+static struct gpio_keys_button buttons[] = {
+ {
+ .code = KEY_VENDOR,
+ .gpio = P50_GPIO_LINE_BTN,
+ .active_low = 1,
+ .type = EV_KEY,
+ .value = 1,
+ },
+};
+
+static struct gpio_keys_platform_data keys_pdata = {
+ .buttons = buttons,
+ .nbuttons = ARRAY_SIZE(buttons),
+ .poll_interval = 100,
+ .rep = 0,
+ .name = "identify",
+};
+
+
+/* low level access routines */
+
+static int p50_wait_ec(struct p50_gpio *p50, int mask, int expected)
+{
+ int i, val;
+
+ for (i = 0; i < 100; i++) {
+ val = inb(p50->base + P50_PORT_CMD) & mask;
+ if (val == expected)
+ return 0;
+ usleep_range(500, 2000);
+ }
+
+ dev_err(p50->gc.parent, "Timed out waiting for EC (0x%x)\n", val);
+ return -ETIMEDOUT;
+}
+
+
+static int p50_read_mbox_reg(struct p50_gpio *p50, int reg)
+{
+ int ret;
+
+ ret = p50_wait_ec(p50, P50_STATUS_IBF, 0);
+ if (ret)
+ return ret;
+
+ /* clear output buffer flag, prevent unfinished commands */
+ inb(p50->base + P50_PORT_DATA);
+
+ /* cmd/address */
+ outb(P50_CMD_READ | reg, p50->base + P50_PORT_CMD);
+
+ ret = p50_wait_ec(p50, P50_STATUS_OBF, P50_STATUS_OBF);
+ if (ret)
+ return ret;
+
+ return inb(p50->base + P50_PORT_DATA);
+}
+
+static int p50_write_mbox_reg(struct p50_gpio *p50, int reg, int val)
+{
+ int ret;
+
+ ret = p50_wait_ec(p50, P50_STATUS_IBF, 0);
+ if (ret)
+ return ret;
+
+ /* cmd/address */
+ outb(P50_CMD_WRITE | reg, p50->base + P50_PORT_CMD);
+
+ ret = p50_wait_ec(p50, P50_STATUS_IBF, 0);
+ if (ret)
+ return ret;
+
+ /* data */
+ outb(val, p50->base + P50_PORT_DATA);
+
+ return 0;
+}
+
+
+/* mbox routines */
+
+static int p50_wait_mbox_idle(struct p50_gpio *p50)
+{
+ int i, val;
+
+ for (i = 0; i < 1000; i++) {
+ val = p50_read_mbox_reg(p50, P50_MBOX_REG_CMD);
+ /* cmd is 0 when idle */
+ if (val <= 0)
+ return val;
+
+ usleep_range(500, 2000);
+ }
+
+ dev_err(p50->gc.parent, "Timed out waiting for EC mbox idle (CMD: 0x%x)\n", val);
+
+ return -ETIMEDOUT;
+}
+
+static int p50_send_mbox_cmd(struct p50_gpio *p50, int cmd, int param, int data)
+{
+ int ret;
+
+ ret = p50_wait_mbox_idle(p50);
+ if (ret)
+ return ret;
+
+ ret = p50_write_mbox_reg(p50, P50_MBOX_REG_DATA, data);
+ if (ret)
+ return ret;
+
+ ret = p50_write_mbox_reg(p50, P50_MBOX_REG_PARAM, param);
+ if (ret)
+ return ret;
+
+ ret = p50_write_mbox_reg(p50, P50_MBOX_REG_CMD, cmd);
+ if (ret)
+ return ret;
+
+ ret = p50_wait_mbox_idle(p50);
+ if (ret)
+ return ret;
+
+ ret = p50_read_mbox_reg(p50, P50_MBOX_REG_STATUS);
+ if (ret < 0)
+ return ret;
+
+ if (ret == P50_MBOX_STATUS_SUCCESS)
+ return 0;
+
+ dev_err(p50->gc.parent, "Mbox command failed (CMD=0x%x STAT=0x%x PARAM=0x%x DATA=0x%x)\n",
+ cmd, ret, param, data);
+
+ return -EIO;
+}
+
+
+/* gpio routines */
+
+static int p50_gpio_get_direction(struct gpio_chip *gc, unsigned int offset)
+{
+ switch (offset) {
+ case P50_GPIO_LINE_BTN:
+ return GPIO_LINE_DIRECTION_IN;
+
+ case P50_GPIO_LINE_LED:
+ return GPIO_LINE_DIRECTION_OUT;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int p50_gpio_get(struct gpio_chip *gc, unsigned int offset)
+{
+ struct p50_gpio *p50 = gpiochip_get_data(gc);
+ int ret;
+
+ mutex_lock(&p50->lock);
+
+ ret = p50_send_mbox_cmd(p50, P50_MBOX_CMD_READ_GPIO, gpio_params[offset], 0);
+ if (ret == 0)
+ ret = p50_read_mbox_reg(p50, P50_MBOX_REG_DATA);
+
+ mutex_unlock(&p50->lock);
+
+ return ret;
+}
+
+static void p50_gpio_set(struct gpio_chip *gc, unsigned int offset, int value)
+{
+ struct p50_gpio *p50 = gpiochip_get_data(gc);
+
+ mutex_lock(&p50->lock);
+
+ p50_send_mbox_cmd(p50, P50_MBOX_CMD_WRITE_GPIO, gpio_params[offset], value);
+
+ mutex_unlock(&p50->lock);
+}
+
+static int p50_gpio_probe(struct platform_device *pdev)
+{
+ struct p50_gpio *p50;
+ struct resource *res;
+ int ret;
+
+ res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Cannot get I/O ports\n");
+ return -ENODEV;
+ }
+
+ if (!devm_request_region(&pdev->dev, res->start, resource_size(res), pdev->name)) {
+ dev_err(&pdev->dev, "Unable to reserve I/O region\n");
+ return -EBUSY;
+ }
+
+ p50 = devm_kzalloc(&pdev->dev, sizeof(*p50), GFP_KERNEL);
+ if (!p50)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, p50);
+ mutex_init(&p50->lock);
+ p50->base = res->start;
+ p50->gc.owner = THIS_MODULE;
+ p50->gc.parent = &pdev->dev;
+ p50->gc.label = dev_name(&pdev->dev);
+ p50->gc.ngpio = ARRAY_SIZE(gpio_names);
+ p50->gc.names = gpio_names;
+ p50->gc.can_sleep = true;
+ p50->gc.base = -1;
+ p50->gc.get_direction = p50_gpio_get_direction;
+ p50->gc.get = p50_gpio_get;
+ p50->gc.set = p50_gpio_set;
+
+
+ /* reset mbox */
+ ret = p50_wait_mbox_idle(p50);
+ if (ret)
+ return ret;
+
+ ret = p50_write_mbox_reg(p50, P50_MBOX_REG_CMD, P50_MBOX_CMD_CLEAR);
+ if (ret)
+ return ret;
+
+ ret = p50_wait_mbox_idle(p50);
+ if (ret)
+ return ret;
+
+
+ ret = devm_gpiochip_add_data(&pdev->dev, &p50->gc, p50);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Could not register gpiochip: %d\n", ret);
+ return ret;
+ }
+
+ gpiod_add_lookup_table(&p50_gpio_led_table);
+
+ p50->leds_pdev = platform_device_register_data(&pdev->dev,
+ "leds-gpio", PLATFORM_DEVID_NONE, &leds_pdata, sizeof(leds_pdata));
+
+ if (IS_ERR(p50->leds_pdev)) {
+ ret = PTR_ERR(p50->leds_pdev);
+ dev_err(&pdev->dev, "Could not register leds-gpio: %d\n", ret);
+ goto err_leds;
+ }
+
+ /* gpio-keys-polled uses old-style gpio interface, pass the right identifier */
+ buttons[0].gpio += p50->gc.base;
+
+ p50->keys_pdev =
+ platform_device_register_data(&pdev->dev, "gpio-keys-polled",
+ PLATFORM_DEVID_NONE,
+ &keys_pdata, sizeof(keys_pdata));
+
+ if (IS_ERR(p50->keys_pdev)) {
+ ret = PTR_ERR(p50->keys_pdev);
+ dev_err(&pdev->dev, "Could not register gpio-keys-polled: %d\n", ret);
+ goto err_keys;
+ }
+
+ return 0;
+
+err_keys:
+ platform_device_unregister(p50->leds_pdev);
+err_leds:
+ gpiod_remove_lookup_table(&p50_gpio_led_table);
+
+ return ret;
+}
+
+static int p50_gpio_remove(struct platform_device *pdev)
+{
+ struct p50_gpio *p50 = platform_get_drvdata(pdev);
+
+ platform_device_unregister(p50->keys_pdev);
+ platform_device_unregister(p50->leds_pdev);
+
+ gpiod_remove_lookup_table(&p50_gpio_led_table);
+
+ return 0;
+}
+
+static struct platform_driver p50_gpio_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+ .probe = p50_gpio_probe,
+ .remove = p50_gpio_remove,
+};
+
+/* Board setup */
+static const struct dmi_system_id dmi_ids[] __initconst = {
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Barco"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_FAMILY, "P50")
+ },
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(dmi, dmi_ids);
+
+static int __init p50_module_init(void)
+{
+ struct resource res = DEFINE_RES_IO(P50_GPIO_IO_PORT_BASE, P50_PORT_CMD + 1);
+
+ if (!dmi_first_match(dmi_ids))
+ return -ENODEV;
+
+ platform_driver_register(&p50_gpio_driver);
+
+ gpio_pdev = platform_device_register_simple(DRIVER_NAME, PLATFORM_DEVID_NONE, &res, 1);
+ if (IS_ERR(gpio_pdev)) {
+ pr_err("failed registering %s: %ld\n", DRIVER_NAME, PTR_ERR(gpio_pdev));
+ platform_driver_unregister(&p50_gpio_driver);
+ return PTR_ERR(gpio_pdev);
+ }
+
+ return 0;
+}
+
+static void __exit p50_module_exit(void)
+{
+ platform_device_unregister(gpio_pdev);
+ platform_driver_unregister(&p50_gpio_driver);
+}
+
+module_init(p50_module_init);
+module_exit(p50_module_exit);
+
+MODULE_AUTHOR("Santosh Kumar Yadav, Barco NV <santoshkumar.yadav@barco.com>");
+MODULE_DESCRIPTION("Barco P50 identify GPIOs driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/dell/dell-wmi-base.c b/drivers/platform/x86/dell/dell-wmi-base.c
index 089c125e18f7..e07d3ba85a3f 100644
--- a/drivers/platform/x86/dell/dell-wmi-base.c
+++ b/drivers/platform/x86/dell/dell-wmi-base.c
@@ -40,6 +40,7 @@ static bool wmi_requires_smbios_request;
struct dell_wmi_priv {
struct input_dev *input_dev;
+ struct input_dev *tabletswitch_dev;
u32 interface_version;
};
@@ -309,6 +310,9 @@ static const struct key_entry dell_wmi_keymap_type_0010[] = {
* Keymap for WMI events of type 0x0011
*/
static const struct key_entry dell_wmi_keymap_type_0011[] = {
+ /* Reflex keyboard switch on 2n1 devices */
+ { KE_IGNORE, 0xe070, { KEY_RESERVED } },
+
/* Battery unplugged */
{ KE_IGNORE, 0xfff0, { KEY_RESERVED } },
@@ -340,21 +344,55 @@ static const struct key_entry dell_wmi_keymap_type_0011[] = {
* They are events with extended data
*/
static const struct key_entry dell_wmi_keymap_type_0012[] = {
+ /* Ultra-performance mode switch request */
+ { KE_IGNORE, 0x000d, { KEY_RESERVED } },
+
/* Fn-lock button pressed */
{ KE_IGNORE, 0xe035, { KEY_RESERVED } },
};
-static void dell_wmi_process_key(struct wmi_device *wdev, int type, int code)
+static void dell_wmi_switch_event(struct input_dev **subdev,
+ const char *devname,
+ int switchid,
+ int value)
+{
+ if (!*subdev) {
+ struct input_dev *dev = input_allocate_device();
+
+ if (!dev) {
+ pr_warn("could not allocate device for %s\n", devname);
+ return;
+ }
+ __set_bit(EV_SW, (dev)->evbit);
+ __set_bit(switchid, (dev)->swbit);
+
+ (dev)->name = devname;
+ (dev)->id.bustype = BUS_HOST;
+ if (input_register_device(dev)) {
+ input_free_device(dev);
+ pr_warn("could not register device for %s\n", devname);
+ return;
+ }
+ *subdev = dev;
+ }
+
+ input_report_switch(*subdev, switchid, value);
+ input_sync(*subdev);
+}
+
+static int dell_wmi_process_key(struct wmi_device *wdev, int type, int code, u16 *buffer, int remaining)
{
struct dell_wmi_priv *priv = dev_get_drvdata(&wdev->dev);
const struct key_entry *key;
+ int used = 0;
+ int value = 1;
key = sparse_keymap_entry_from_scancode(priv->input_dev,
(type << 16) | code);
if (!key) {
pr_info("Unknown key with type 0x%04x and code 0x%04x pressed\n",
type, code);
- return;
+ return 0;
}
pr_debug("Key with type 0x%04x and code 0x%04x pressed\n", type, code);
@@ -363,16 +401,27 @@ static void dell_wmi_process_key(struct wmi_device *wdev, int type, int code)
if ((key->keycode == KEY_BRIGHTNESSUP ||
key->keycode == KEY_BRIGHTNESSDOWN) &&
acpi_video_handles_brightness_key_presses())
- return;
+ return 0;
if (type == 0x0000 && code == 0xe025 && !wmi_requires_smbios_request)
- return;
+ return 0;
- if (key->keycode == KEY_KBDILLUMTOGGLE)
+ if (key->keycode == KEY_KBDILLUMTOGGLE) {
dell_laptop_call_notifier(
DELL_LAPTOP_KBD_BACKLIGHT_BRIGHTNESS_CHANGED, NULL);
+ } else if (type == 0x0011 && code == 0xe070 && remaining > 0) {
+ dell_wmi_switch_event(&priv->tabletswitch_dev,
+ "Dell tablet mode switch",
+ SW_TABLET_MODE, !buffer[0]);
+ return 1;
+ } else if (type == 0x0012 && code == 0x000d && remaining > 0) {
+ value = (buffer[2] == 2);
+ used = 1;
+ }
- sparse_keymap_report_entry(priv->input_dev, key, 1, true);
+ sparse_keymap_report_entry(priv->input_dev, key, value, true);
+
+ return used;
}
static void dell_wmi_notify(struct wmi_device *wdev,
@@ -430,21 +479,26 @@ static void dell_wmi_notify(struct wmi_device *wdev,
case 0x0000: /* One key pressed or event occurred */
if (len > 2)
dell_wmi_process_key(wdev, buffer_entry[1],
- buffer_entry[2]);
+ buffer_entry[2],
+ buffer_entry + 3,
+ len - 3);
/* Extended data is currently ignored */
break;
case 0x0010: /* Sequence of keys pressed */
case 0x0011: /* Sequence of events occurred */
for (i = 2; i < len; ++i)
- dell_wmi_process_key(wdev, buffer_entry[1],
- buffer_entry[i]);
+ i += dell_wmi_process_key(wdev, buffer_entry[1],
+ buffer_entry[i],
+ buffer_entry + i,
+ len - i - 1);
break;
case 0x0012:
if ((len > 4) && dell_privacy_process_event(buffer_entry[1], buffer_entry[3],
buffer_entry[4]))
/* dell_privacy_process_event has handled the event */;
else if (len > 2)
- dell_wmi_process_key(wdev, buffer_entry[1], buffer_entry[2]);
+ dell_wmi_process_key(wdev, buffer_entry[1], buffer_entry[2],
+ buffer_entry + 3, len - 3);
break;
default: /* Unknown event */
pr_info("Unknown WMI event type 0x%x\n",
@@ -661,6 +715,8 @@ static void dell_wmi_input_destroy(struct wmi_device *wdev)
struct dell_wmi_priv *priv = dev_get_drvdata(&wdev->dev);
input_unregister_device(priv->input_dev);
+ if (priv->tabletswitch_dev)
+ input_unregister_device(priv->tabletswitch_dev);
}
/*
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index 027a1467d009..48a46466f086 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -22,9 +22,11 @@
#include <linux/input/sparse-keymap.h>
#include <linux/platform_device.h>
#include <linux/platform_profile.h>
+#include <linux/hwmon.h>
#include <linux/acpi.h>
#include <linux/rfkill.h>
#include <linux/string.h>
+#include <linux/dmi.h>
MODULE_AUTHOR("Matthew Garrett <mjg59@srcf.ucam.org>");
MODULE_DESCRIPTION("HP laptop WMI hotkeys driver");
@@ -39,6 +41,25 @@ MODULE_PARM_DESC(enable_tablet_mode_sw, "Enable SW_TABLET_MODE reporting (-1=aut
#define HPWMI_EVENT_GUID "95F24279-4D7B-4334-9387-ACCDC67EF61C"
#define HPWMI_BIOS_GUID "5FB7F034-2C63-45e9-BE91-3D44E2C707E4"
+#define HP_OMEN_EC_THERMAL_PROFILE_OFFSET 0x95
+
+/* DMI board names of devices that should use the omen specific path for
+ * thermal profiles.
+ * This was obtained by taking a look in the windows omen command center
+ * app and parsing a json file that they use to figure out what capabilities
+ * the device should have.
+ * A device is considered an omen if the DisplayName in that list contains
+ * "OMEN", and it can use the thermal profile stuff if the "Feature" array
+ * contains "PerformanceControl".
+ */
+static const char * const omen_thermal_profile_boards[] = {
+ "84DA", "84DB", "84DC", "8574", "8575", "860A", "87B5", "8572", "8573",
+ "8600", "8601", "8602", "8605", "8606", "8607", "8746", "8747", "8749",
+ "874A", "8603", "8604", "8748", "886B", "886C", "878A", "878B", "878C",
+ "88C8", "88CB", "8786", "8787", "8788", "88D1", "88D2", "88F4", "88FD",
+ "88F5", "88F6", "88F7", "88FE", "88FF", "8900", "8901", "8902", "8912",
+ "8917", "8918", "8949", "894A", "89EB"
+};
enum hp_wmi_radio {
HPWMI_WIFI = 0x0,
@@ -89,10 +110,18 @@ enum hp_wmi_commandtype {
HPWMI_THERMAL_PROFILE_QUERY = 0x4c,
};
+enum hp_wmi_gm_commandtype {
+ HPWMI_FAN_SPEED_GET_QUERY = 0x11,
+ HPWMI_SET_PERFORMANCE_MODE = 0x1A,
+ HPWMI_FAN_SPEED_MAX_GET_QUERY = 0x26,
+ HPWMI_FAN_SPEED_MAX_SET_QUERY = 0x27,
+};
+
enum hp_wmi_command {
HPWMI_READ = 0x01,
HPWMI_WRITE = 0x02,
HPWMI_ODM = 0x03,
+ HPWMI_GM = 0x20008,
};
enum hp_wmi_hardware_mask {
@@ -120,6 +149,12 @@ enum hp_wireless2_bits {
HPWMI_POWER_FW_OR_HW = HPWMI_POWER_BIOS | HPWMI_POWER_HARD,
};
+enum hp_thermal_profile_omen {
+ HP_OMEN_THERMAL_PROFILE_DEFAULT = 0x00,
+ HP_OMEN_THERMAL_PROFILE_PERFORMANCE = 0x01,
+ HP_OMEN_THERMAL_PROFILE_COOL = 0x02,
+};
+
enum hp_thermal_profile {
HP_THERMAL_PROFILE_PERFORMANCE = 0x00,
HP_THERMAL_PROFILE_DEFAULT = 0x01,
@@ -279,6 +314,24 @@ out_free:
return ret;
}
+static int hp_wmi_get_fan_speed(int fan)
+{
+ u8 fsh, fsl;
+ char fan_data[4] = { fan, 0, 0, 0 };
+
+ int ret = hp_wmi_perform_query(HPWMI_FAN_SPEED_GET_QUERY, HPWMI_GM,
+ &fan_data, sizeof(fan_data),
+ sizeof(fan_data));
+
+ if (ret != 0)
+ return -EINVAL;
+
+ fsh = fan_data[2];
+ fsl = fan_data[3];
+
+ return (fsh << 8) | fsl;
+}
+
static int hp_wmi_read_int(int query)
{
int val = 0, ret;
@@ -302,6 +355,73 @@ static int hp_wmi_hw_state(int mask)
return !!(state & mask);
}
+static int omen_thermal_profile_set(int mode)
+{
+ char buffer[2] = {0, mode};
+ int ret;
+
+ if (mode < 0 || mode > 2)
+ return -EINVAL;
+
+ ret = hp_wmi_perform_query(HPWMI_SET_PERFORMANCE_MODE, HPWMI_GM,
+ &buffer, sizeof(buffer), sizeof(buffer));
+
+ if (ret)
+ return ret < 0 ? ret : -EINVAL;
+
+ return mode;
+}
+
+static bool is_omen_thermal_profile(void)
+{
+ const char *board_name = dmi_get_system_info(DMI_BOARD_NAME);
+
+ if (!board_name)
+ return false;
+
+ return match_string(omen_thermal_profile_boards,
+ ARRAY_SIZE(omen_thermal_profile_boards),
+ board_name) >= 0;
+}
+
+static int omen_thermal_profile_get(void)
+{
+ u8 data;
+
+ int ret = ec_read(HP_OMEN_EC_THERMAL_PROFILE_OFFSET, &data);
+
+ if (ret)
+ return ret;
+
+ return data;
+}
+
+static int hp_wmi_fan_speed_max_set(int enabled)
+{
+ int ret;
+
+ ret = hp_wmi_perform_query(HPWMI_FAN_SPEED_MAX_SET_QUERY, HPWMI_GM,
+ &enabled, sizeof(enabled), sizeof(enabled));
+
+ if (ret)
+ return ret < 0 ? ret : -EINVAL;
+
+ return enabled;
+}
+
+static int hp_wmi_fan_speed_max_get(void)
+{
+ int val = 0, ret;
+
+ ret = hp_wmi_perform_query(HPWMI_FAN_SPEED_MAX_GET_QUERY, HPWMI_GM,
+ &val, sizeof(val), sizeof(val));
+
+ if (ret)
+ return ret < 0 ? ret : -EINVAL;
+
+ return val;
+}
+
static int __init hp_wmi_bios_2008_later(void)
{
int state = 0;
@@ -878,6 +998,58 @@ fail:
return err;
}
+static int platform_profile_omen_get(struct platform_profile_handler *pprof,
+ enum platform_profile_option *profile)
+{
+ int tp;
+
+ tp = omen_thermal_profile_get();
+ if (tp < 0)
+ return tp;
+
+ switch (tp) {
+ case HP_OMEN_THERMAL_PROFILE_PERFORMANCE:
+ *profile = PLATFORM_PROFILE_PERFORMANCE;
+ break;
+ case HP_OMEN_THERMAL_PROFILE_DEFAULT:
+ *profile = PLATFORM_PROFILE_BALANCED;
+ break;
+ case HP_OMEN_THERMAL_PROFILE_COOL:
+ *profile = PLATFORM_PROFILE_COOL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int platform_profile_omen_set(struct platform_profile_handler *pprof,
+ enum platform_profile_option profile)
+{
+ int err, tp;
+
+ switch (profile) {
+ case PLATFORM_PROFILE_PERFORMANCE:
+ tp = HP_OMEN_THERMAL_PROFILE_PERFORMANCE;
+ break;
+ case PLATFORM_PROFILE_BALANCED:
+ tp = HP_OMEN_THERMAL_PROFILE_DEFAULT;
+ break;
+ case PLATFORM_PROFILE_COOL:
+ tp = HP_OMEN_THERMAL_PROFILE_COOL;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ err = omen_thermal_profile_set(tp);
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
static int thermal_profile_get(void)
{
return hp_wmi_read_int(HPWMI_THERMAL_PROFILE_QUERY);
@@ -889,8 +1061,8 @@ static int thermal_profile_set(int thermal_profile)
sizeof(thermal_profile), 0);
}
-static int platform_profile_get(struct platform_profile_handler *pprof,
- enum platform_profile_option *profile)
+static int hp_wmi_platform_profile_get(struct platform_profile_handler *pprof,
+ enum platform_profile_option *profile)
{
int tp;
@@ -915,8 +1087,8 @@ static int platform_profile_get(struct platform_profile_handler *pprof,
return 0;
}
-static int platform_profile_set(struct platform_profile_handler *pprof,
- enum platform_profile_option profile)
+static int hp_wmi_platform_profile_set(struct platform_profile_handler *pprof,
+ enum platform_profile_option profile)
{
int err, tp;
@@ -945,20 +1117,39 @@ static int thermal_profile_setup(void)
{
int err, tp;
- tp = thermal_profile_get();
- if (tp < 0)
- return tp;
+ if (is_omen_thermal_profile()) {
+ tp = omen_thermal_profile_get();
+ if (tp < 0)
+ return tp;
- /*
- * call thermal profile write command to ensure that the firmware correctly
- * sets the OEM variables for the DPTF
- */
- err = thermal_profile_set(tp);
- if (err)
- return err;
+ /*
+ * call thermal profile write command to ensure that the
+ * firmware correctly sets the OEM variables
+ */
+
+ err = omen_thermal_profile_set(tp);
+ if (err < 0)
+ return err;
- platform_profile_handler.profile_get = platform_profile_get,
- platform_profile_handler.profile_set = platform_profile_set,
+ platform_profile_handler.profile_get = platform_profile_omen_get;
+ platform_profile_handler.profile_set = platform_profile_omen_set;
+ } else {
+ tp = thermal_profile_get();
+
+ if (tp < 0)
+ return tp;
+
+ /*
+ * call thermal profile write command to ensure that the
+ * firmware correctly sets the OEM variables for the DPTF
+ */
+ err = thermal_profile_set(tp);
+ if (err)
+ return err;
+
+ platform_profile_handler.profile_get = hp_wmi_platform_profile_get;
+ platform_profile_handler.profile_set = hp_wmi_platform_profile_set;
+ }
set_bit(PLATFORM_PROFILE_COOL, platform_profile_handler.choices);
set_bit(PLATFORM_PROFILE_BALANCED, platform_profile_handler.choices);
@@ -973,8 +1164,11 @@ static int thermal_profile_setup(void)
return 0;
}
+static int hp_wmi_hwmon_init(void);
+
static int __init hp_wmi_bios_setup(struct platform_device *device)
{
+ int err;
/* clear detected rfkill devices */
wifi_rfkill = NULL;
bluetooth_rfkill = NULL;
@@ -984,6 +1178,11 @@ static int __init hp_wmi_bios_setup(struct platform_device *device)
if (hp_wmi_rfkill_setup(device))
hp_wmi_rfkill2_setup(device);
+ err = hp_wmi_hwmon_init();
+
+ if (err < 0)
+ return err;
+
thermal_profile_setup();
return 0;
@@ -1068,6 +1267,112 @@ static struct platform_driver hp_wmi_driver = {
.remove = __exit_p(hp_wmi_bios_remove),
};
+static umode_t hp_wmi_hwmon_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ switch (type) {
+ case hwmon_pwm:
+ return 0644;
+ case hwmon_fan:
+ if (hp_wmi_get_fan_speed(channel) >= 0)
+ return 0444;
+ break;
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
+static int hp_wmi_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ int ret;
+
+ switch (type) {
+ case hwmon_fan:
+ ret = hp_wmi_get_fan_speed(channel);
+
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ return 0;
+ case hwmon_pwm:
+ switch (hp_wmi_fan_speed_max_get()) {
+ case 0:
+ /* 0 is automatic fan, which is 2 for hwmon */
+ *val = 2;
+ return 0;
+ case 1:
+ /* 1 is max fan, which is 0
+ * (no fan speed control) for hwmon
+ */
+ *val = 0;
+ return 0;
+ default:
+ /* shouldn't happen */
+ return -ENODATA;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static int hp_wmi_hwmon_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ switch (type) {
+ case hwmon_pwm:
+ switch (val) {
+ case 0:
+ /* 0 is no fan speed control (max), which is 1 for us */
+ return hp_wmi_fan_speed_max_set(1);
+ case 2:
+ /* 2 is automatic speed control, which is 0 for us */
+ return hp_wmi_fan_speed_max_set(0);
+ default:
+ /* we don't support manual fan speed control */
+ return -EINVAL;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct hwmon_channel_info *info[] = {
+ HWMON_CHANNEL_INFO(fan, HWMON_F_INPUT, HWMON_F_INPUT),
+ HWMON_CHANNEL_INFO(pwm, HWMON_PWM_ENABLE),
+ NULL
+};
+
+static const struct hwmon_ops ops = {
+ .is_visible = hp_wmi_hwmon_is_visible,
+ .read = hp_wmi_hwmon_read,
+ .write = hp_wmi_hwmon_write,
+};
+
+static const struct hwmon_chip_info chip_info = {
+ .ops = &ops,
+ .info = info,
+};
+
+static int hp_wmi_hwmon_init(void)
+{
+ struct device *dev = &hp_wmi_platform_dev->dev;
+ struct device *hwmon;
+
+ hwmon = devm_hwmon_device_register_with_info(dev, "hp", &hp_wmi_driver,
+ &chip_info, NULL);
+
+ if (IS_ERR(hwmon)) {
+ dev_err(dev, "Could not register hp hwmon device\n");
+ return PTR_ERR(hwmon);
+ }
+
+ return 0;
+}
+
static int __init hp_wmi_init(void)
{
int event_capable = wmi_has_guid(HPWMI_EVENT_GUID);
diff --git a/drivers/platform/x86/i2c-multi-instantiate.c b/drivers/platform/x86/i2c-multi-instantiate.c
index a50153ecd560..4956a1df5b90 100644
--- a/drivers/platform/x86/i2c-multi-instantiate.c
+++ b/drivers/platform/x86/i2c-multi-instantiate.c
@@ -139,29 +139,13 @@ static const struct i2c_inst_data bsg2150_data[] = {
{}
};
-/*
- * Device with _HID INT3515 (TI PD controllers) has some unresolved interrupt
- * issues. The most common problem seen is interrupt flood.
- *
- * There are at least two known causes. Firstly, on some boards, the
- * I2CSerialBus resource index does not match the Interrupt resource, i.e. they
- * are not one-to-one mapped like in the array below. Secondly, on some boards
- * the IRQ line from the PD controller is not actually connected at all. But the
- * interrupt flood is also seen on some boards where those are not a problem, so
- * there are some other problems as well.
- *
- * Because of the issues with the interrupt, the device is disabled for now. If
- * you wish to debug the issues, uncomment the below, and add an entry for the
- * INT3515 device to the i2c_multi_instance_ids table.
- *
- * static const struct i2c_inst_data int3515_data[] = {
- * { "tps6598x", IRQ_RESOURCE_APIC, 0 },
- * { "tps6598x", IRQ_RESOURCE_APIC, 1 },
- * { "tps6598x", IRQ_RESOURCE_APIC, 2 },
- * { "tps6598x", IRQ_RESOURCE_APIC, 3 },
- * { }
- * };
- */
+static const struct i2c_inst_data int3515_data[] = {
+ { "tps6598x", IRQ_RESOURCE_APIC, 0 },
+ { "tps6598x", IRQ_RESOURCE_APIC, 1 },
+ { "tps6598x", IRQ_RESOURCE_APIC, 2 },
+ { "tps6598x", IRQ_RESOURCE_APIC, 3 },
+ {}
+};
/*
* Note new device-ids must also be added to i2c_multi_instantiate_ids in
@@ -170,6 +154,7 @@ static const struct i2c_inst_data bsg2150_data[] = {
static const struct acpi_device_id i2c_multi_inst_acpi_ids[] = {
{ "BSG1160", (unsigned long)bsg1160_data },
{ "BSG2150", (unsigned long)bsg2150_data },
+ { "INT3515", (unsigned long)int3515_data },
{ }
};
MODULE_DEVICE_TABLE(acpi, i2c_multi_inst_acpi_ids);
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index e7a1299e3776..3ccb7b71dfb1 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -868,6 +868,18 @@ static void dytc_profile_refresh(struct ideapad_private *priv)
}
}
+static const struct dmi_system_id ideapad_dytc_v4_allow_table[] = {
+ {
+ /* Ideapad 5 Pro 16ACH6 */
+ .ident = "LENOVO 82L5",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "82L5")
+ }
+ },
+ {}
+};
+
static int ideapad_dytc_profile_init(struct ideapad_private *priv)
{
int err, dytc_version;
@@ -882,12 +894,21 @@ static int ideapad_dytc_profile_init(struct ideapad_private *priv)
return err;
/* Check DYTC is enabled and supports mode setting */
- if (!test_bit(DYTC_QUERY_ENABLE_BIT, &output))
+ if (!test_bit(DYTC_QUERY_ENABLE_BIT, &output)) {
+ dev_info(&priv->platform_device->dev, "DYTC_QUERY_ENABLE_BIT returned false\n");
return -ENODEV;
+ }
dytc_version = (output >> DYTC_QUERY_REV_BIT) & 0xF;
- if (dytc_version < 5)
- return -ENODEV;
+
+ if (dytc_version < 5) {
+ if (dytc_version < 4 || !dmi_check_system(ideapad_dytc_v4_allow_table)) {
+ dev_info(&priv->platform_device->dev,
+ "DYTC_VERSION is less than 4 or is not allowed: %d\n",
+ dytc_version);
+ return -ENODEV;
+ }
+ }
priv->dytc = kzalloc(sizeof(*priv->dytc), GFP_KERNEL);
if (!priv->dytc)
@@ -1534,17 +1555,13 @@ static void ideapad_check_features(struct ideapad_private *priv)
static int ideapad_acpi_add(struct platform_device *pdev)
{
+ struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
struct ideapad_private *priv;
- struct acpi_device *adev;
acpi_status status;
unsigned long cfg;
int err, i;
- err = acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev);
- if (err)
- return -ENODEV;
-
- if (eval_int(adev->handle, "_CFG", &cfg))
+ if (!adev || eval_int(adev->handle, "_CFG", &cfg))
return -ENODEV;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
diff --git a/drivers/platform/x86/intel/Kconfig b/drivers/platform/x86/intel/Kconfig
index 0b21468e1bd0..38ce3e344589 100644
--- a/drivers/platform/x86/intel/Kconfig
+++ b/drivers/platform/x86/intel/Kconfig
@@ -102,6 +102,22 @@ config INTEL_CHTDC_TI_PWRBTN
To compile this driver as a module, choose M here: the module
will be called intel_chtdc_ti_pwrbtn.
+config INTEL_ISHTP_ECLITE
+ tristate "Intel ISHTP eclite controller Driver"
+ depends on INTEL_ISH_HID
+ depends on ACPI
+ help
+ This driver is for accessing the PSE (Programmable Service Engine) -
+ an Embedded Controller like IP - using ISHTP (Integrated Sensor Hub
+ Transport Protocol) to get battery, thermal and UCSI (USB Type-C
+ Connector System Software Interface) related data from the platform.
+ Users who don't want to use discrete Embedded Controller on Intel's
+ Elkhartlake platform can leverage this integrated solution of
+ ECLite which is part of PSE subsystem.
+
+ To compile this driver as a module, choose M here: the module
+ will be called intel_ishtp_eclite.
+
config INTEL_MRFLD_PWRBTN
tristate "Intel Merrifield Basin Cove power button driver"
depends on INTEL_SOC_PMIC_MRFLD
diff --git a/drivers/platform/x86/intel/Makefile b/drivers/platform/x86/intel/Makefile
index 8b3a3f7bab49..7c24be2423d8 100644
--- a/drivers/platform/x86/intel/Makefile
+++ b/drivers/platform/x86/intel/Makefile
@@ -21,6 +21,7 @@ intel-vbtn-y := vbtn.o
obj-$(CONFIG_INTEL_VBTN) += intel-vbtn.o
# Intel miscellaneous drivers
+obj-$(CONFIG_INTEL_ISHTP_ECLITE) += ishtp_eclite.o
intel_int0002_vgpio-y := int0002_vgpio.o
obj-$(CONFIG_INTEL_INT0002_VGPIO) += intel_int0002_vgpio.o
intel_oaktrail-y := oaktrail.o
diff --git a/drivers/platform/x86/intel/int0002_vgpio.c b/drivers/platform/x86/intel/int0002_vgpio.c
index 569342aa8926..617dbf98980e 100644
--- a/drivers/platform/x86/intel/int0002_vgpio.c
+++ b/drivers/platform/x86/intel/int0002_vgpio.c
@@ -34,13 +34,11 @@
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/platform_data/x86/soc.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/suspend.h>
-#include <asm/cpu_device_id.h>
-#include <asm/intel-family.h>
-
#define DRV_NAME "INT0002 Virtual GPIO"
/* For some reason the virtual GPIO pin tied to the GPE is numbered pin 2 */
@@ -151,12 +149,6 @@ static struct irq_chip int0002_irqchip = {
.irq_set_wake = int0002_irq_set_wake,
};
-static const struct x86_cpu_id int0002_cpu_ids[] = {
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT, NULL),
- X86_MATCH_INTEL_FAM6_MODEL(ATOM_AIRMONT, NULL),
- {}
-};
-
static void int0002_init_irq_valid_mask(struct gpio_chip *chip,
unsigned long *valid_mask,
unsigned int ngpios)
@@ -167,15 +159,13 @@ static void int0002_init_irq_valid_mask(struct gpio_chip *chip,
static int int0002_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- const struct x86_cpu_id *cpu_id;
struct int0002_data *int0002;
struct gpio_irq_chip *girq;
struct gpio_chip *chip;
int irq, ret;
/* Menlow has a different INT0002 device? <sigh> */
- cpu_id = x86_match_cpu(int0002_cpu_ids);
- if (!cpu_id)
+ if (!soc_intel_is_byt() && !soc_intel_is_cht())
return -ENODEV;
irq = platform_get_irq(pdev, 0);
diff --git a/drivers/platform/x86/intel/ishtp_eclite.c b/drivers/platform/x86/intel/ishtp_eclite.c
new file mode 100644
index 000000000000..12fc98a48657
--- /dev/null
+++ b/drivers/platform/x86/intel/ishtp_eclite.c
@@ -0,0 +1,701 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel ECLite opregion driver for talking to ECLite firmware running on
+ * Intel Integrated Sensor Hub (ISH) using ISH Transport Protocol (ISHTP)
+ *
+ * Copyright (c) 2021, Intel Corporation.
+ */
+
+#include <linux/acpi.h>
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/intel-ish-client-if.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/suspend.h>
+#include <linux/types.h>
+#include <linux/uuid.h>
+#include <linux/uaccess.h>
+
+#define ECLITE_DATA_OPREGION_ID 0x9E
+#define ECLITE_CMD_OPREGION_ID 0x9F
+
+#define ECL_MSG_DATA 0x1
+#define ECL_MSG_EVENT 0x2
+
+#define ECL_ISH_READ 0x1
+#define ECL_ISH_WRITE 0x2
+#define ECL_ISH_HEADER_VERSION 0
+
+#define ECL_CL_RX_RING_SIZE 16
+#define ECL_CL_TX_RING_SIZE 8
+
+#define ECL_DATA_OPR_BUFLEN 384
+#define ECL_EVENTS_NOTIFY 333
+
+#define cmd_opr_offsetof(element) offsetof(struct opregion_cmd, element)
+#define cl_data_to_dev(opr_dev) ishtp_device((opr_dev)->cl_device)
+
+#ifndef BITS_TO_BYTES
+#define BITS_TO_BYTES(x) ((x) / 8)
+#endif
+
+struct opregion_cmd {
+ unsigned int command;
+ unsigned int offset;
+ unsigned int length;
+ unsigned int event_id;
+};
+
+struct opregion_data {
+ char data[ECL_DATA_OPR_BUFLEN];
+};
+
+struct opregion_context {
+ struct opregion_cmd cmd_area;
+ struct opregion_data data_area;
+};
+
+struct ecl_message_header {
+ unsigned int version:2;
+ unsigned int data_type:2;
+ unsigned int request_type:2;
+ unsigned int offset:9;
+ unsigned int data_len:9;
+ unsigned int event:8;
+};
+
+struct ecl_message {
+ struct ecl_message_header header;
+ char payload[ECL_DATA_OPR_BUFLEN];
+};
+
+struct ishtp_opregion_dev {
+ struct opregion_context opr_context;
+ struct ishtp_cl *ecl_ishtp_cl;
+ struct ishtp_cl_device *cl_device;
+ struct ishtp_fw_client *fw_client;
+ struct ishtp_cl_rb *rb;
+ struct acpi_device *adev;
+ unsigned int dsm_event_id;
+ unsigned int ish_link_ready;
+ unsigned int ish_read_done;
+ unsigned int acpi_init_done;
+ wait_queue_head_t read_wait;
+ struct work_struct event_work;
+ struct work_struct reset_work;
+ /* lock for opregion context */
+ struct mutex lock;
+
+};
+
+/* eclite ishtp client UUID: 6a19cc4b-d760-4de3-b14d-f25ebd0fbcd9 */
+static const guid_t ecl_ishtp_guid =
+ GUID_INIT(0x6a19cc4b, 0xd760, 0x4de3,
+ 0xb1, 0x4d, 0xf2, 0x5e, 0xbd, 0xf, 0xbc, 0xd9);
+
+/* ACPI DSM UUID: 91d936a7-1f01-49c6-a6b4-72f00ad8d8a5 */
+static const guid_t ecl_acpi_guid =
+ GUID_INIT(0x91d936a7, 0x1f01, 0x49c6, 0xa6,
+ 0xb4, 0x72, 0xf0, 0x0a, 0xd8, 0xd8, 0xa5);
+
+/**
+ * ecl_ish_cl_read() - Read data from eclite FW
+ *
+ * @opr_dev: pointer to opregion device
+ *
+ * This function issues a read request to eclite FW and waits until it
+ * receives a response. When response is received the read data is copied to
+ * opregion buffer.
+ */
+static int ecl_ish_cl_read(struct ishtp_opregion_dev *opr_dev)
+{
+ struct ecl_message_header header;
+ int len, rv;
+
+ if (!opr_dev->ish_link_ready)
+ return -EIO;
+
+ if ((opr_dev->opr_context.cmd_area.offset +
+ opr_dev->opr_context.cmd_area.length) > ECL_DATA_OPR_BUFLEN) {
+ return -EINVAL;
+ }
+
+ header.version = ECL_ISH_HEADER_VERSION;
+ header.data_type = ECL_MSG_DATA;
+ header.request_type = ECL_ISH_READ;
+ header.offset = opr_dev->opr_context.cmd_area.offset;
+ header.data_len = opr_dev->opr_context.cmd_area.length;
+ header.event = opr_dev->opr_context.cmd_area.event_id;
+ len = sizeof(header);
+
+ opr_dev->ish_read_done = false;
+ rv = ishtp_cl_send(opr_dev->ecl_ishtp_cl, (uint8_t *)&header, len);
+ if (rv) {
+ dev_err(cl_data_to_dev(opr_dev), "ish-read : send failed\n");
+ return -EIO;
+ }
+
+ dev_dbg(cl_data_to_dev(opr_dev),
+ "[ish_rd] Req: off : %x, len : %x\n",
+ header.offset,
+ header.data_len);
+
+ rv = wait_event_interruptible_timeout(opr_dev->read_wait,
+ opr_dev->ish_read_done,
+ 2 * HZ);
+ if (!rv) {
+ dev_err(cl_data_to_dev(opr_dev),
+ "[ish_rd] No response from firmware\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * ecl_ish_cl_write() - This function writes data to eclite FW.
+ *
+ * @opr_dev: pointer to opregion device
+ *
+ * This function writes data to eclite FW.
+ */
+static int ecl_ish_cl_write(struct ishtp_opregion_dev *opr_dev)
+{
+ struct ecl_message message;
+ int len;
+
+ if (!opr_dev->ish_link_ready)
+ return -EIO;
+
+ if ((opr_dev->opr_context.cmd_area.offset +
+ opr_dev->opr_context.cmd_area.length) > ECL_DATA_OPR_BUFLEN) {
+ return -EINVAL;
+ }
+
+ message.header.version = ECL_ISH_HEADER_VERSION;
+ message.header.data_type = ECL_MSG_DATA;
+ message.header.request_type = ECL_ISH_WRITE;
+ message.header.offset = opr_dev->opr_context.cmd_area.offset;
+ message.header.data_len = opr_dev->opr_context.cmd_area.length;
+ message.header.event = opr_dev->opr_context.cmd_area.event_id;
+ len = sizeof(struct ecl_message_header) + message.header.data_len;
+
+ memcpy(message.payload,
+ opr_dev->opr_context.data_area.data + message.header.offset,
+ message.header.data_len);
+
+ dev_dbg(cl_data_to_dev(opr_dev),
+ "[ish_wr] off : %x, len : %x\n",
+ message.header.offset,
+ message.header.data_len);
+
+ return ishtp_cl_send(opr_dev->ecl_ishtp_cl, (uint8_t *)&message, len);
+}
+
+static acpi_status
+ecl_opregion_cmd_handler(u32 function, acpi_physical_address address,
+ u32 bits, u64 *value64,
+ void *handler_context, void *region_context)
+{
+ struct ishtp_opregion_dev *opr_dev;
+ struct opregion_cmd *cmd;
+ acpi_status status = AE_OK;
+
+ if (!region_context || !value64)
+ return AE_BAD_PARAMETER;
+
+ if (function == ACPI_READ)
+ return AE_ERROR;
+
+ opr_dev = (struct ishtp_opregion_dev *)region_context;
+
+ mutex_lock(&opr_dev->lock);
+
+ cmd = &opr_dev->opr_context.cmd_area;
+
+ switch (address) {
+ case cmd_opr_offsetof(command):
+ cmd->command = (u32)*value64;
+
+ if (cmd->command == ECL_ISH_READ)
+ status = ecl_ish_cl_read(opr_dev);
+ else if (cmd->command == ECL_ISH_WRITE)
+ status = ecl_ish_cl_write(opr_dev);
+ else
+ status = AE_ERROR;
+ break;
+ case cmd_opr_offsetof(offset):
+ cmd->offset = (u32)*value64;
+ break;
+ case cmd_opr_offsetof(length):
+ cmd->length = (u32)*value64;
+ break;
+ case cmd_opr_offsetof(event_id):
+ cmd->event_id = (u32)*value64;
+ break;
+ default:
+ status = AE_ERROR;
+ }
+
+ mutex_unlock(&opr_dev->lock);
+
+ return status;
+}
+
+static acpi_status
+ecl_opregion_data_handler(u32 function, acpi_physical_address address,
+ u32 bits, u64 *value64,
+ void *handler_context, void *region_context)
+{
+ struct ishtp_opregion_dev *opr_dev;
+ unsigned int bytes = BITS_TO_BYTES(bits);
+ void *data_addr;
+
+ if (!region_context || !value64)
+ return AE_BAD_PARAMETER;
+
+ if (address + bytes > ECL_DATA_OPR_BUFLEN)
+ return AE_BAD_PARAMETER;
+
+ opr_dev = (struct ishtp_opregion_dev *)region_context;
+
+ mutex_lock(&opr_dev->lock);
+
+ data_addr = &opr_dev->opr_context.data_area.data[address];
+
+ if (function == ACPI_READ) {
+ memcpy(value64, data_addr, bytes);
+ } else if (function == ACPI_WRITE) {
+ memcpy(data_addr, value64, bytes);
+ } else {
+ mutex_unlock(&opr_dev->lock);
+ return AE_BAD_PARAMETER;
+ }
+
+ mutex_unlock(&opr_dev->lock);
+
+ return AE_OK;
+}
+
+static int acpi_find_eclite_device(struct ishtp_opregion_dev *opr_dev)
+{
+ struct acpi_device *adev;
+
+ /* Find ECLite device and save reference */
+ adev = acpi_dev_get_first_match_dev("INTC1035", NULL, -1);
+ if (!adev) {
+ dev_err(cl_data_to_dev(opr_dev), "eclite ACPI device not found\n");
+ return -ENODEV;
+ }
+
+ opr_dev->adev = adev;
+
+ return 0;
+}
+
+static int acpi_opregion_init(struct ishtp_opregion_dev *opr_dev)
+{
+ acpi_status status;
+
+ status = acpi_install_address_space_handler(opr_dev->adev->handle,
+ ECLITE_CMD_OPREGION_ID,
+ ecl_opregion_cmd_handler,
+ NULL, opr_dev);
+ if (ACPI_FAILURE(status)) {
+ dev_err(cl_data_to_dev(opr_dev),
+ "cmd space handler install failed\n");
+ return -ENODEV;
+ }
+
+ status = acpi_install_address_space_handler(opr_dev->adev->handle,
+ ECLITE_DATA_OPREGION_ID,
+ ecl_opregion_data_handler,
+ NULL, opr_dev);
+ if (ACPI_FAILURE(status)) {
+ dev_err(cl_data_to_dev(opr_dev),
+ "data space handler install failed\n");
+
+ acpi_remove_address_space_handler(opr_dev->adev->handle,
+ ECLITE_CMD_OPREGION_ID,
+ ecl_opregion_cmd_handler);
+ return -ENODEV;
+ }
+ opr_dev->acpi_init_done = true;
+
+ dev_dbg(cl_data_to_dev(opr_dev), "Opregion handlers are installed\n");
+
+ return 0;
+}
+
+static void acpi_opregion_deinit(struct ishtp_opregion_dev *opr_dev)
+{
+ acpi_remove_address_space_handler(opr_dev->adev->handle,
+ ECLITE_CMD_OPREGION_ID,
+ ecl_opregion_cmd_handler);
+
+ acpi_remove_address_space_handler(opr_dev->adev->handle,
+ ECLITE_DATA_OPREGION_ID,
+ ecl_opregion_data_handler);
+ opr_dev->acpi_init_done = false;
+}
+
+static void ecl_acpi_invoke_dsm(struct work_struct *work)
+{
+ struct ishtp_opregion_dev *opr_dev;
+ union acpi_object *obj;
+
+ opr_dev = container_of(work, struct ishtp_opregion_dev, event_work);
+ if (!opr_dev->acpi_init_done)
+ return;
+
+ obj = acpi_evaluate_dsm(opr_dev->adev->handle, &ecl_acpi_guid, 0,
+ opr_dev->dsm_event_id, NULL);
+ if (!obj) {
+ dev_warn(cl_data_to_dev(opr_dev), "_DSM fn call failed\n");
+ return;
+ }
+
+ dev_dbg(cl_data_to_dev(opr_dev), "Exec DSM function code: %d success\n",
+ opr_dev->dsm_event_id);
+
+ ACPI_FREE(obj);
+}
+
+static void ecl_ish_process_rx_data(struct ishtp_opregion_dev *opr_dev)
+{
+ struct ecl_message *message =
+ (struct ecl_message *)opr_dev->rb->buffer.data;
+
+ dev_dbg(cl_data_to_dev(opr_dev),
+ "[ish_rd] Resp: off : %x, len : %x\n",
+ message->header.offset,
+ message->header.data_len);
+
+ if ((message->header.offset + message->header.data_len) >
+ ECL_DATA_OPR_BUFLEN) {
+ return;
+ }
+
+ memcpy(opr_dev->opr_context.data_area.data + message->header.offset,
+ message->payload, message->header.data_len);
+
+ opr_dev->ish_read_done = true;
+ wake_up_interruptible(&opr_dev->read_wait);
+}
+
+static void ecl_ish_process_rx_event(struct ishtp_opregion_dev *opr_dev)
+{
+ struct ecl_message_header *header =
+ (struct ecl_message_header *)opr_dev->rb->buffer.data;
+
+ dev_dbg(cl_data_to_dev(opr_dev),
+ "[ish_ev] Evt received: %8x\n", header->event);
+
+ opr_dev->dsm_event_id = header->event;
+ schedule_work(&opr_dev->event_work);
+}
+
+static int ecl_ish_cl_enable_events(struct ishtp_opregion_dev *opr_dev,
+ bool config_enable)
+{
+ struct ecl_message message;
+ int len;
+
+ message.header.version = ECL_ISH_HEADER_VERSION;
+ message.header.data_type = ECL_MSG_DATA;
+ message.header.request_type = ECL_ISH_WRITE;
+ message.header.offset = ECL_EVENTS_NOTIFY;
+ message.header.data_len = 1;
+ message.payload[0] = config_enable;
+
+ len = sizeof(struct ecl_message_header) + message.header.data_len;
+
+ return ishtp_cl_send(opr_dev->ecl_ishtp_cl, (uint8_t *)&message, len);
+}
+
+static void ecl_ishtp_cl_event_cb(struct ishtp_cl_device *cl_device)
+{
+ struct ishtp_cl *ecl_ishtp_cl = ishtp_get_drvdata(cl_device);
+ struct ishtp_opregion_dev *opr_dev;
+ struct ecl_message_header *header;
+ struct ishtp_cl_rb *rb;
+
+ opr_dev = ishtp_get_client_data(ecl_ishtp_cl);
+ while ((rb = ishtp_cl_rx_get_rb(opr_dev->ecl_ishtp_cl)) != NULL) {
+ opr_dev->rb = rb;
+ header = (struct ecl_message_header *)rb->buffer.data;
+
+ if (header->data_type == ECL_MSG_DATA)
+ ecl_ish_process_rx_data(opr_dev);
+ else if (header->data_type == ECL_MSG_EVENT)
+ ecl_ish_process_rx_event(opr_dev);
+ else
+ /* Got an event with wrong data_type, ignore it */
+ dev_err(cl_data_to_dev(opr_dev),
+ "[ish_cb] Received wrong data_type\n");
+
+ ishtp_cl_io_rb_recycle(rb);
+ }
+}
+
+static int ecl_ishtp_cl_init(struct ishtp_cl *ecl_ishtp_cl)
+{
+ struct ishtp_opregion_dev *opr_dev =
+ ishtp_get_client_data(ecl_ishtp_cl);
+ struct ishtp_fw_client *fw_client;
+ struct ishtp_device *dev;
+ int rv;
+
+ rv = ishtp_cl_link(ecl_ishtp_cl);
+ if (rv) {
+ dev_err(cl_data_to_dev(opr_dev), "ishtp_cl_link failed\n");
+ return rv;
+ }
+
+ dev = ishtp_get_ishtp_device(ecl_ishtp_cl);
+
+ /* Connect to FW client */
+ ishtp_set_tx_ring_size(ecl_ishtp_cl, ECL_CL_TX_RING_SIZE);
+ ishtp_set_rx_ring_size(ecl_ishtp_cl, ECL_CL_RX_RING_SIZE);
+
+ fw_client = ishtp_fw_cl_get_client(dev, &ecl_ishtp_guid);
+ if (!fw_client) {
+ dev_err(cl_data_to_dev(opr_dev), "fw client not found\n");
+ return -ENOENT;
+ }
+
+ ishtp_cl_set_fw_client_id(ecl_ishtp_cl,
+ ishtp_get_fw_client_id(fw_client));
+
+ ishtp_set_connection_state(ecl_ishtp_cl, ISHTP_CL_CONNECTING);
+
+ rv = ishtp_cl_connect(ecl_ishtp_cl);
+ if (rv) {
+ dev_err(cl_data_to_dev(opr_dev), "client connect failed\n");
+
+ ishtp_cl_unlink(ecl_ishtp_cl);
+ return rv;
+ }
+
+ dev_dbg(cl_data_to_dev(opr_dev), "Host connected to fw client\n");
+
+ return 0;
+}
+
+static void ecl_ishtp_cl_deinit(struct ishtp_cl *ecl_ishtp_cl)
+{
+ ishtp_cl_unlink(ecl_ishtp_cl);
+ ishtp_cl_flush_queues(ecl_ishtp_cl);
+ ishtp_cl_free(ecl_ishtp_cl);
+}
+
+static void ecl_ishtp_cl_reset_handler(struct work_struct *work)
+{
+ struct ishtp_opregion_dev *opr_dev;
+ struct ishtp_cl_device *cl_device;
+ struct ishtp_cl *ecl_ishtp_cl;
+ int rv;
+ int retry;
+
+ opr_dev = container_of(work, struct ishtp_opregion_dev, reset_work);
+
+ opr_dev->ish_link_ready = false;
+
+ cl_device = opr_dev->cl_device;
+ ecl_ishtp_cl = opr_dev->ecl_ishtp_cl;
+
+ ecl_ishtp_cl_deinit(ecl_ishtp_cl);
+
+ ecl_ishtp_cl = ishtp_cl_allocate(cl_device);
+ if (!ecl_ishtp_cl)
+ return;
+
+ ishtp_set_drvdata(cl_device, ecl_ishtp_cl);
+ ishtp_set_client_data(ecl_ishtp_cl, opr_dev);
+
+ opr_dev->ecl_ishtp_cl = ecl_ishtp_cl;
+
+ for (retry = 0; retry < 3; ++retry) {
+ rv = ecl_ishtp_cl_init(ecl_ishtp_cl);
+ if (!rv)
+ break;
+ }
+ if (rv) {
+ ishtp_cl_free(ecl_ishtp_cl);
+ opr_dev->ecl_ishtp_cl = NULL;
+ dev_err(cl_data_to_dev(opr_dev),
+ "[ish_rst] Reset failed. Link not ready.\n");
+ return;
+ }
+
+ ishtp_register_event_cb(cl_device, ecl_ishtp_cl_event_cb);
+ dev_info(cl_data_to_dev(opr_dev),
+ "[ish_rst] Reset Success. Link ready.\n");
+
+ opr_dev->ish_link_ready = true;
+
+ if (opr_dev->acpi_init_done)
+ return;
+
+ rv = acpi_opregion_init(opr_dev);
+ if (rv) {
+ dev_err(cl_data_to_dev(opr_dev),
+ "ACPI opregion init failed\n");
+ }
+}
+
+static int ecl_ishtp_cl_probe(struct ishtp_cl_device *cl_device)
+{
+ struct ishtp_cl *ecl_ishtp_cl;
+ struct ishtp_opregion_dev *opr_dev;
+ int rv;
+
+ opr_dev = devm_kzalloc(ishtp_device(cl_device), sizeof(*opr_dev),
+ GFP_KERNEL);
+ if (!opr_dev)
+ return -ENOMEM;
+
+ ecl_ishtp_cl = ishtp_cl_allocate(cl_device);
+ if (!ecl_ishtp_cl)
+ return -ENOMEM;
+
+ ishtp_set_drvdata(cl_device, ecl_ishtp_cl);
+ ishtp_set_client_data(ecl_ishtp_cl, opr_dev);
+ opr_dev->ecl_ishtp_cl = ecl_ishtp_cl;
+ opr_dev->cl_device = cl_device;
+
+ init_waitqueue_head(&opr_dev->read_wait);
+ INIT_WORK(&opr_dev->event_work, ecl_acpi_invoke_dsm);
+ INIT_WORK(&opr_dev->reset_work, ecl_ishtp_cl_reset_handler);
+
+ /* Initialize ish client device */
+ rv = ecl_ishtp_cl_init(ecl_ishtp_cl);
+ if (rv) {
+ dev_err(cl_data_to_dev(opr_dev), "Client init failed\n");
+ goto err_exit;
+ }
+
+ dev_dbg(cl_data_to_dev(opr_dev), "eclite-ishtp client initialised\n");
+
+ opr_dev->ish_link_ready = true;
+ mutex_init(&opr_dev->lock);
+
+ rv = acpi_find_eclite_device(opr_dev);
+ if (rv) {
+ dev_err(cl_data_to_dev(opr_dev), "ECLite ACPI ID not found\n");
+ goto err_exit;
+ }
+
+ /* Register a handler for eclite fw events */
+ ishtp_register_event_cb(cl_device, ecl_ishtp_cl_event_cb);
+
+ /* Now init opregion handlers */
+ rv = acpi_opregion_init(opr_dev);
+ if (rv) {
+ dev_err(cl_data_to_dev(opr_dev), "ACPI opregion init failed\n");
+ goto err_exit;
+ }
+
+ /* Reprobe devices depending on ECLite - battery, fan, etc. */
+ acpi_dev_clear_dependencies(opr_dev->adev);
+
+ return 0;
+err_exit:
+ ishtp_set_connection_state(ecl_ishtp_cl, ISHTP_CL_DISCONNECTING);
+ ishtp_cl_disconnect(ecl_ishtp_cl);
+ ecl_ishtp_cl_deinit(ecl_ishtp_cl);
+
+ return rv;
+}
+
+static void ecl_ishtp_cl_remove(struct ishtp_cl_device *cl_device)
+{
+ struct ishtp_cl *ecl_ishtp_cl = ishtp_get_drvdata(cl_device);
+ struct ishtp_opregion_dev *opr_dev =
+ ishtp_get_client_data(ecl_ishtp_cl);
+
+ if (opr_dev->acpi_init_done)
+ acpi_opregion_deinit(opr_dev);
+
+ acpi_dev_put(opr_dev->adev);
+
+ ishtp_set_connection_state(ecl_ishtp_cl, ISHTP_CL_DISCONNECTING);
+ ishtp_cl_disconnect(ecl_ishtp_cl);
+ ecl_ishtp_cl_deinit(ecl_ishtp_cl);
+
+ cancel_work_sync(&opr_dev->reset_work);
+ cancel_work_sync(&opr_dev->event_work);
+}
+
+static int ecl_ishtp_cl_reset(struct ishtp_cl_device *cl_device)
+{
+ struct ishtp_cl *ecl_ishtp_cl = ishtp_get_drvdata(cl_device);
+ struct ishtp_opregion_dev *opr_dev =
+ ishtp_get_client_data(ecl_ishtp_cl);
+
+ schedule_work(&opr_dev->reset_work);
+
+ return 0;
+}
+
+static int ecl_ishtp_cl_suspend(struct device *device)
+{
+ struct ishtp_cl_device *cl_device = ishtp_dev_to_cl_device(device);
+ struct ishtp_cl *ecl_ishtp_cl = ishtp_get_drvdata(cl_device);
+ struct ishtp_opregion_dev *opr_dev =
+ ishtp_get_client_data(ecl_ishtp_cl);
+
+ if (acpi_target_system_state() == ACPI_STATE_S0)
+ return 0;
+
+ acpi_opregion_deinit(opr_dev);
+ ecl_ish_cl_enable_events(opr_dev, false);
+
+ return 0;
+}
+
+static int ecl_ishtp_cl_resume(struct device *device)
+{
+ /* A reset is expected to call after an Sx. At this point
+ * we are not sure if the link is up or not to restore anything,
+ * so do nothing in resume path
+ */
+ return 0;
+}
+
+static const struct dev_pm_ops ecl_ishtp_pm_ops = {
+ .suspend = ecl_ishtp_cl_suspend,
+ .resume = ecl_ishtp_cl_resume,
+};
+
+static struct ishtp_cl_driver ecl_ishtp_cl_driver = {
+ .name = "ishtp-eclite",
+ .guid = &ecl_ishtp_guid,
+ .probe = ecl_ishtp_cl_probe,
+ .remove = ecl_ishtp_cl_remove,
+ .reset = ecl_ishtp_cl_reset,
+ .driver.pm = &ecl_ishtp_pm_ops,
+};
+
+static int __init ecl_ishtp_init(void)
+{
+ return ishtp_cl_driver_register(&ecl_ishtp_cl_driver, THIS_MODULE);
+}
+
+static void __exit ecl_ishtp_exit(void)
+{
+ return ishtp_cl_driver_unregister(&ecl_ishtp_cl_driver);
+}
+
+late_initcall(ecl_ishtp_init);
+module_exit(ecl_ishtp_exit);
+
+MODULE_DESCRIPTION("ISH ISHTP eclite client opregion driver");
+MODULE_AUTHOR("K Naduvalath, Sumesh <sumesh.k.naduvalath@intel.com>");
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("ishtp:*");
diff --git a/drivers/platform/x86/lg-laptop.c b/drivers/platform/x86/lg-laptop.c
index 88b551caeaaf..ae9293024c77 100644
--- a/drivers/platform/x86/lg-laptop.c
+++ b/drivers/platform/x86/lg-laptop.c
@@ -60,7 +60,6 @@ MODULE_ALIAS("wmi:" WMI_EVENT_GUID2);
MODULE_ALIAS("wmi:" WMI_EVENT_GUID3);
MODULE_ALIAS("wmi:" WMI_METHOD_WMAB);
MODULE_ALIAS("wmi:" WMI_METHOD_WMBB);
-MODULE_ALIAS("acpi*:LGEX0815:*");
static struct platform_device *pf_device;
static struct input_dev *wmi_input_dev;
@@ -331,7 +330,7 @@ static ssize_t fan_mode_show(struct device *dev,
status = r->integer.value & 0x01;
kfree(r);
- return snprintf(buffer, PAGE_SIZE, "%d\n", status);
+ return sysfs_emit(buffer, "%d\n", status);
}
static ssize_t usb_charge_store(struct device *dev,
@@ -373,7 +372,7 @@ static ssize_t usb_charge_show(struct device *dev,
kfree(r);
- return snprintf(buffer, PAGE_SIZE, "%d\n", status);
+ return sysfs_emit(buffer, "%d\n", status);
}
static ssize_t reader_mode_store(struct device *dev,
@@ -415,7 +414,7 @@ static ssize_t reader_mode_show(struct device *dev,
kfree(r);
- return snprintf(buffer, PAGE_SIZE, "%d\n", status);
+ return sysfs_emit(buffer, "%d\n", status);
}
static ssize_t fn_lock_store(struct device *dev,
@@ -456,7 +455,7 @@ static ssize_t fn_lock_show(struct device *dev,
status = !!r->buffer.pointer[0];
kfree(r);
- return snprintf(buffer, PAGE_SIZE, "%d\n", status);
+ return sysfs_emit(buffer, "%d\n", status);
}
static ssize_t battery_care_limit_store(struct device *dev,
@@ -521,7 +520,7 @@ static ssize_t battery_care_limit_show(struct device *dev,
if (status != 80 && status != 100)
status = 0;
- return snprintf(buffer, PAGE_SIZE, "%d\n", status);
+ return sysfs_emit(buffer, "%d\n", status);
}
static DEVICE_ATTR_RW(fan_mode);
diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c
index 8bce3da32a42..447044fdcb77 100644
--- a/drivers/platform/x86/mlx-platform.c
+++ b/drivers/platform/x86/mlx-platform.c
@@ -27,9 +27,14 @@
#define MLXPLAT_CPLD_LPC_REG_CPLD3_VER_OFFSET 0x02
#define MLXPLAT_CPLD_LPC_REG_CPLD4_VER_OFFSET 0x03
#define MLXPLAT_CPLD_LPC_REG_CPLD1_PN_OFFSET 0x04
+#define MLXPLAT_CPLD_LPC_REG_CPLD1_PN1_OFFSET 0x05
#define MLXPLAT_CPLD_LPC_REG_CPLD2_PN_OFFSET 0x06
+#define MLXPLAT_CPLD_LPC_REG_CPLD2_PN1_OFFSET 0x07
#define MLXPLAT_CPLD_LPC_REG_CPLD3_PN_OFFSET 0x08
+#define MLXPLAT_CPLD_LPC_REG_CPLD3_PN1_OFFSET 0x09
#define MLXPLAT_CPLD_LPC_REG_CPLD4_PN_OFFSET 0x0a
+#define MLXPLAT_CPLD_LPC_REG_CPLD4_PN1_OFFSET 0x0b
+#define MLXPLAT_CPLD_LPC_REG_RESET_GP4_OFFSET 0x1c
#define MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET 0x1d
#define MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET 0x1e
#define MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET 0x1f
@@ -38,13 +43,20 @@
#define MLXPLAT_CPLD_LPC_REG_LED3_OFFSET 0x22
#define MLXPLAT_CPLD_LPC_REG_LED4_OFFSET 0x23
#define MLXPLAT_CPLD_LPC_REG_LED5_OFFSET 0x24
+#define MLXPLAT_CPLD_LPC_REG_LED6_OFFSET 0x25
+#define MLXPLAT_CPLD_LPC_REG_LED7_OFFSET 0x26
#define MLXPLAT_CPLD_LPC_REG_FAN_DIRECTION 0x2a
#define MLXPLAT_CPLD_LPC_REG_GP0_RO_OFFSET 0x2b
+#define MLXPLAT_CPLD_LPC_REG_GPCOM0_OFFSET 0x2d
#define MLXPLAT_CPLD_LPC_REG_GP0_OFFSET 0x2e
+#define MLXPLAT_CPLD_LPC_REG_GP_RST_OFFSET 0x2f
#define MLXPLAT_CPLD_LPC_REG_GP1_OFFSET 0x30
#define MLXPLAT_CPLD_LPC_REG_WP1_OFFSET 0x31
#define MLXPLAT_CPLD_LPC_REG_GP2_OFFSET 0x32
#define MLXPLAT_CPLD_LPC_REG_WP2_OFFSET 0x33
+#define MLXPLAT_CPLD_LPC_REG_FIELD_UPGRADE 0x34
+#define MLXPLAT_CPLD_LPC_SAFE_BIOS_OFFSET 0x35
+#define MLXPLAT_CPLD_LPC_SAFE_BIOS_WP_OFFSET 0x36
#define MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET 0x37
#define MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET 0x3a
#define MLXPLAT_CPLD_LPC_REG_AGGR_MASK_OFFSET 0x3b
@@ -57,15 +69,39 @@
#define MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET 0x50
#define MLXPLAT_CPLD_LPC_REG_ASIC_EVENT_OFFSET 0x51
#define MLXPLAT_CPLD_LPC_REG_ASIC_MASK_OFFSET 0x52
+#define MLXPLAT_CPLD_LPC_REG_AGGRLC_OFFSET 0x56
+#define MLXPLAT_CPLD_LPC_REG_AGGRLC_MASK_OFFSET 0x57
#define MLXPLAT_CPLD_LPC_REG_PSU_OFFSET 0x58
#define MLXPLAT_CPLD_LPC_REG_PSU_EVENT_OFFSET 0x59
#define MLXPLAT_CPLD_LPC_REG_PSU_MASK_OFFSET 0x5a
#define MLXPLAT_CPLD_LPC_REG_PWR_OFFSET 0x64
#define MLXPLAT_CPLD_LPC_REG_PWR_EVENT_OFFSET 0x65
#define MLXPLAT_CPLD_LPC_REG_PWR_MASK_OFFSET 0x66
+#define MLXPLAT_CPLD_LPC_REG_LC_IN_OFFSET 0x70
+#define MLXPLAT_CPLD_LPC_REG_LC_IN_EVENT_OFFSET 0x71
+#define MLXPLAT_CPLD_LPC_REG_LC_IN_MASK_OFFSET 0x72
#define MLXPLAT_CPLD_LPC_REG_FAN_OFFSET 0x88
#define MLXPLAT_CPLD_LPC_REG_FAN_EVENT_OFFSET 0x89
#define MLXPLAT_CPLD_LPC_REG_FAN_MASK_OFFSET 0x8a
+#define MLXPLAT_CPLD_LPC_REG_LC_VR_OFFSET 0x9a
+#define MLXPLAT_CPLD_LPC_REG_LC_VR_EVENT_OFFSET 0x9b
+#define MLXPLAT_CPLD_LPC_REG_LC_VR_MASK_OFFSET 0x9c
+#define MLXPLAT_CPLD_LPC_REG_LC_PG_OFFSET 0x9d
+#define MLXPLAT_CPLD_LPC_REG_LC_PG_EVENT_OFFSET 0x9e
+#define MLXPLAT_CPLD_LPC_REG_LC_PG_MASK_OFFSET 0x9f
+#define MLXPLAT_CPLD_LPC_REG_LC_RD_OFFSET 0xa0
+#define MLXPLAT_CPLD_LPC_REG_LC_RD_EVENT_OFFSET 0xa1
+#define MLXPLAT_CPLD_LPC_REG_LC_RD_MASK_OFFSET 0xa2
+#define MLXPLAT_CPLD_LPC_REG_LC_SN_OFFSET 0xa3
+#define MLXPLAT_CPLD_LPC_REG_LC_SN_EVENT_OFFSET 0xa4
+#define MLXPLAT_CPLD_LPC_REG_LC_SN_MASK_OFFSET 0xa5
+#define MLXPLAT_CPLD_LPC_REG_LC_OK_OFFSET 0xa6
+#define MLXPLAT_CPLD_LPC_REG_LC_OK_EVENT_OFFSET 0xa7
+#define MLXPLAT_CPLD_LPC_REG_LC_OK_MASK_OFFSET 0xa8
+#define MLXPLAT_CPLD_LPC_REG_LC_SD_OFFSET 0xa9
+#define MLXPLAT_CPLD_LPC_REG_LC_SD_EVENT_OFFSET 0xaa
+#define MLXPLAT_CPLD_LPC_REG_LC_SD_MASK_OFFSET 0xab
+#define MLXPLAT_CPLD_LPC_REG_LC_PWR_ON 0xb2
#define MLXPLAT_CPLD_LPC_REG_WD_CLEAR_OFFSET 0xc7
#define MLXPLAT_CPLD_LPC_REG_WD_CLEAR_WP_OFFSET 0xc8
#define MLXPLAT_CPLD_LPC_REG_WD1_TMR_OFFSET 0xc9
@@ -88,23 +124,30 @@
#define MLXPLAT_CPLD_LPC_REG_TACHO4_OFFSET 0xe7
#define MLXPLAT_CPLD_LPC_REG_TACHO5_OFFSET 0xe8
#define MLXPLAT_CPLD_LPC_REG_TACHO6_OFFSET 0xe9
+#define MLXPLAT_CPLD_LPC_REG_PWM2_OFFSET 0xea
#define MLXPLAT_CPLD_LPC_REG_TACHO7_OFFSET 0xeb
#define MLXPLAT_CPLD_LPC_REG_TACHO8_OFFSET 0xec
#define MLXPLAT_CPLD_LPC_REG_TACHO9_OFFSET 0xed
#define MLXPLAT_CPLD_LPC_REG_TACHO10_OFFSET 0xee
#define MLXPLAT_CPLD_LPC_REG_TACHO11_OFFSET 0xef
#define MLXPLAT_CPLD_LPC_REG_TACHO12_OFFSET 0xf0
+#define MLXPLAT_CPLD_LPC_REG_TACHO13_OFFSET 0xf1
+#define MLXPLAT_CPLD_LPC_REG_TACHO14_OFFSET 0xf2
+#define MLXPLAT_CPLD_LPC_REG_PWM3_OFFSET 0xf3
+#define MLXPLAT_CPLD_LPC_REG_PWM4_OFFSET 0xf4
#define MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET 0xf5
#define MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET 0xf6
#define MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET 0xf7
#define MLXPLAT_CPLD_LPC_REG_TACHO_SPEED_OFFSET 0xf8
#define MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET 0xf9
+#define MLXPLAT_CPLD_LPC_REG_SLOT_QTY_OFFSET 0xfa
#define MLXPLAT_CPLD_LPC_REG_CONFIG1_OFFSET 0xfb
#define MLXPLAT_CPLD_LPC_REG_CONFIG2_OFFSET 0xfc
#define MLXPLAT_CPLD_LPC_IO_RANGE 0x100
#define MLXPLAT_CPLD_LPC_I2C_CH1_OFF 0xdb
#define MLXPLAT_CPLD_LPC_I2C_CH2_OFF 0xda
#define MLXPLAT_CPLD_LPC_I2C_CH3_OFF 0xdc
+#define MLXPLAT_CPLD_LPC_I2C_CH4_OFF 0xdd
#define MLXPLAT_CPLD_LPC_PIO_OFFSET 0x10000UL
#define MLXPLAT_CPLD_LPC_REG1 ((MLXPLAT_CPLD_LPC_REG_BASE_ADRR + \
@@ -116,6 +159,9 @@
#define MLXPLAT_CPLD_LPC_REG3 ((MLXPLAT_CPLD_LPC_REG_BASE_ADRR + \
MLXPLAT_CPLD_LPC_I2C_CH3_OFF) | \
MLXPLAT_CPLD_LPC_PIO_OFFSET)
+#define MLXPLAT_CPLD_LPC_REG4 ((MLXPLAT_CPLD_LPC_REG_BASE_ADRR + \
+ MLXPLAT_CPLD_LPC_I2C_CH4_OFF) | \
+ MLXPLAT_CPLD_LPC_PIO_OFFSET)
/* Masks for aggregation, psu, pwr and fan event in CPLD related registers. */
#define MLXPLAT_CPLD_AGGR_ASIC_MASK_DEF 0x04
@@ -128,6 +174,24 @@
#define MLXPLAT_CPLD_AGGR_ASIC_MASK_NG 0x01
#define MLXPLAT_CPLD_AGGR_MASK_NG_DEF 0x04
#define MLXPLAT_CPLD_AGGR_MASK_COMEX BIT(0)
+#define MLXPLAT_CPLD_AGGR_MASK_LC BIT(3)
+#define MLXPLAT_CPLD_AGGR_MASK_MODULAR (MLXPLAT_CPLD_AGGR_MASK_NG_DEF | \
+ MLXPLAT_CPLD_AGGR_MASK_COMEX | \
+ MLXPLAT_CPLD_AGGR_MASK_LC)
+#define MLXPLAT_CPLD_AGGR_MASK_LC_PRSNT BIT(0)
+#define MLXPLAT_CPLD_AGGR_MASK_LC_RDY BIT(1)
+#define MLXPLAT_CPLD_AGGR_MASK_LC_PG BIT(2)
+#define MLXPLAT_CPLD_AGGR_MASK_LC_SCRD BIT(3)
+#define MLXPLAT_CPLD_AGGR_MASK_LC_SYNC BIT(4)
+#define MLXPLAT_CPLD_AGGR_MASK_LC_ACT BIT(5)
+#define MLXPLAT_CPLD_AGGR_MASK_LC_SDWN BIT(6)
+#define MLXPLAT_CPLD_AGGR_MASK_LC_LOW (MLXPLAT_CPLD_AGGR_MASK_LC_PRSNT | \
+ MLXPLAT_CPLD_AGGR_MASK_LC_RDY | \
+ MLXPLAT_CPLD_AGGR_MASK_LC_PG | \
+ MLXPLAT_CPLD_AGGR_MASK_LC_SCRD | \
+ MLXPLAT_CPLD_AGGR_MASK_LC_SYNC | \
+ MLXPLAT_CPLD_AGGR_MASK_LC_ACT | \
+ MLXPLAT_CPLD_AGGR_MASK_LC_SDWN)
#define MLXPLAT_CPLD_LOW_AGGR_MASK_LOW 0xc1
#define MLXPLAT_CPLD_LOW_AGGR_MASK_I2C BIT(6)
#define MLXPLAT_CPLD_PSU_MASK GENMASK(1, 0)
@@ -136,7 +200,7 @@
#define MLXPLAT_CPLD_PWR_EXT_MASK GENMASK(3, 0)
#define MLXPLAT_CPLD_FAN_MASK GENMASK(3, 0)
#define MLXPLAT_CPLD_ASIC_MASK GENMASK(1, 0)
-#define MLXPLAT_CPLD_FAN_NG_MASK GENMASK(5, 0)
+#define MLXPLAT_CPLD_FAN_NG_MASK GENMASK(6, 0)
#define MLXPLAT_CPLD_LED_LO_NIBBLE_MASK GENMASK(7, 4)
#define MLXPLAT_CPLD_LED_HI_NIBBLE_MASK GENMASK(3, 0)
#define MLXPLAT_CPLD_VOLTREG_UPD_MASK GENMASK(5, 4)
@@ -149,6 +213,9 @@
MLXPLAT_CPLD_AGGR_MASK_CARRIER)
#define MLXPLAT_CPLD_LOW_AGGRCX_MASK 0xc1
+/* Masks for aggregation for modular systems */
+#define MLXPLAT_CPLD_LPC_LC_MASK GENMASK(7, 0)
+
/* Default I2C parent bus number */
#define MLXPLAT_CPLD_PHYS_ADAPTER_DEF_NR 1
@@ -163,9 +230,12 @@
#define MLXPLAT_CPLD_CH1 2
#define MLXPLAT_CPLD_CH2 10
#define MLXPLAT_CPLD_CH3 18
+#define MLXPLAT_CPLD_CH2_ETH_MODULAR 3
+#define MLXPLAT_CPLD_CH3_ETH_MODULAR 43
+#define MLXPLAT_CPLD_CH4_ETH_MODULAR 51
/* Number of LPC attached MUX platform devices */
-#define MLXPLAT_CPLD_LPC_MUX_DEVS 3
+#define MLXPLAT_CPLD_LPC_MUX_DEVS 4
/* Hotplug devices adapter numbers */
#define MLXPLAT_CPLD_NR_NONE -1
@@ -175,6 +245,11 @@
#define MLXPLAT_CPLD_FAN2_DEFAULT_NR 12
#define MLXPLAT_CPLD_FAN3_DEFAULT_NR 13
#define MLXPLAT_CPLD_FAN4_DEFAULT_NR 14
+#define MLXPLAT_CPLD_NR_ASIC 3
+#define MLXPLAT_CPLD_NR_LC_BASE 34
+
+#define MLXPLAT_CPLD_NR_LC_SET(nr) (MLXPLAT_CPLD_NR_LC_BASE + (nr))
+#define MLXPLAT_CPLD_LC_ADDR 0x32
/* Masks and default values for watchdogs */
#define MLXPLAT_CPLD_WD1_CLEAR_MASK GENMASK(7, 1)
@@ -190,6 +265,11 @@
#define MLXPLAT_CPLD_WD3_DFLT_TIMEOUT 600
#define MLXPLAT_CPLD_WD_MAX_DEVS 2
+#define MLXPLAT_CPLD_LPC_SYSIRQ 17
+
+/* Minimum power required for turning on Ethernet modular system (WATT) */
+#define MLXPLAT_CPLD_ETH_MODULAR_PWR_MIN 50
+
/* mlxplat_priv - platform private data
* @pdev_i2c - i2c controller platform device
* @pdev_mux - array of mux platform devices
@@ -318,6 +398,58 @@ static struct i2c_mux_reg_platform_data mlxplat_extended_mux_data[] = {
};
+/* Platform channels for modular system family */
+static const int mlxplat_modular_upper_channel[] = { 1 };
+static const int mlxplat_modular_channels[] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20,
+ 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
+ 38, 39, 40
+};
+
+/* Platform modular mux data */
+static struct i2c_mux_reg_platform_data mlxplat_modular_mux_data[] = {
+ {
+ .parent = 1,
+ .base_nr = MLXPLAT_CPLD_CH1,
+ .write_only = 1,
+ .reg = (void __iomem *)MLXPLAT_CPLD_LPC_REG4,
+ .reg_size = 1,
+ .idle_in_use = 1,
+ .values = mlxplat_modular_upper_channel,
+ .n_values = ARRAY_SIZE(mlxplat_modular_upper_channel),
+ },
+ {
+ .parent = 1,
+ .base_nr = MLXPLAT_CPLD_CH2_ETH_MODULAR,
+ .write_only = 1,
+ .reg = (void __iomem *)MLXPLAT_CPLD_LPC_REG1,
+ .reg_size = 1,
+ .idle_in_use = 1,
+ .values = mlxplat_modular_channels,
+ .n_values = ARRAY_SIZE(mlxplat_modular_channels),
+ },
+ {
+ .parent = MLXPLAT_CPLD_CH1,
+ .base_nr = MLXPLAT_CPLD_CH3_ETH_MODULAR,
+ .write_only = 1,
+ .reg = (void __iomem *)MLXPLAT_CPLD_LPC_REG3,
+ .reg_size = 1,
+ .idle_in_use = 1,
+ .values = mlxplat_msn21xx_channels,
+ .n_values = ARRAY_SIZE(mlxplat_msn21xx_channels),
+ },
+ {
+ .parent = 1,
+ .base_nr = MLXPLAT_CPLD_CH4_ETH_MODULAR,
+ .write_only = 1,
+ .reg = (void __iomem *)MLXPLAT_CPLD_LPC_REG2,
+ .reg_size = 1,
+ .idle_in_use = 1,
+ .values = mlxplat_msn21xx_channels,
+ .n_values = ARRAY_SIZE(mlxplat_msn21xx_channels),
+ },
+};
+
/* Platform hotplug devices */
static struct i2c_board_info mlxplat_mlxcpld_pwr[] = {
{
@@ -401,6 +533,21 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_pwr_items_data[] = {
},
};
+static struct mlxreg_core_data mlxplat_mlxcpld_default_pwr_wc_items_data[] = {
+ {
+ .label = "pwr1",
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = BIT(0),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+ {
+ .label = "pwr2",
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = BIT(1),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+};
+
static struct mlxreg_core_data mlxplat_mlxcpld_default_fan_items_data[] = {
{
.label = "fan1",
@@ -529,6 +676,46 @@ struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_default_data = {
.mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW,
};
+static struct mlxreg_core_item mlxplat_mlxcpld_default_wc_items[] = {
+ {
+ .data = mlxplat_mlxcpld_comex_psu_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_CARRIER,
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+ .mask = MLXPLAT_CPLD_PSU_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_psu_items_data),
+ .inversed = 1,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_default_pwr_wc_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_CARRIER,
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = MLXPLAT_CPLD_PWR_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_pwr_items_data),
+ .inversed = 0,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_default_asic_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_ASIC_MASK_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET,
+ .mask = MLXPLAT_CPLD_ASIC_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_asic_items_data),
+ .inversed = 0,
+ .health = true,
+ },
+};
+
+static
+struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_default_wc_data = {
+ .items = mlxplat_mlxcpld_default_wc_items,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_default_wc_items),
+ .cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
+ .mask = MLXPLAT_CPLD_AGGR_MASK_DEF,
+ .cell_low = MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET,
+ .mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW,
+};
+
static
struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_comex_data = {
.items = mlxplat_mlxcpld_comex_items,
@@ -807,6 +994,14 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_fan_items_data[] = {
.bit = BIT(5),
.hpdev.nr = MLXPLAT_CPLD_NR_NONE,
},
+ {
+ .label = "fan7",
+ .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ .mask = BIT(6),
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(6),
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
};
static struct mlxreg_core_item mlxplat_mlxcpld_default_ng_items[] = {
@@ -968,6 +1163,847 @@ struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_ext_data = {
.mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW,
};
+static struct mlxreg_core_data mlxplat_mlxcpld_modular_pwr_items_data[] = {
+ {
+ .label = "pwr1",
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = BIT(0),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_pwr[0],
+ .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR,
+ },
+ {
+ .label = "pwr2",
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = BIT(1),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_pwr[1],
+ .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR,
+ },
+ {
+ .label = "pwr3",
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = BIT(2),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_ext_pwr[0],
+ .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR,
+ },
+ {
+ .label = "pwr4",
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = BIT(3),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_ext_pwr[1],
+ .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR,
+ },
+};
+
+static
+struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_lc_act = {
+ .irq = MLXPLAT_CPLD_LPC_SYSIRQ,
+};
+
+static struct mlxreg_core_data mlxplat_mlxcpld_modular_asic_items_data[] = {
+ {
+ .label = "asic1",
+ .reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET,
+ .mask = MLXPLAT_CPLD_ASIC_MASK,
+ .hpdev.nr = MLXPLAT_CPLD_NR_NONE,
+ },
+};
+
+static struct i2c_board_info mlxplat_mlxcpld_lc_i2c_dev[] = {
+ {
+ I2C_BOARD_INFO("mlxreg-lc", MLXPLAT_CPLD_LC_ADDR),
+ .platform_data = &mlxplat_mlxcpld_lc_act,
+ },
+ {
+ I2C_BOARD_INFO("mlxreg-lc", MLXPLAT_CPLD_LC_ADDR),
+ .platform_data = &mlxplat_mlxcpld_lc_act,
+ },
+ {
+ I2C_BOARD_INFO("mlxreg-lc", MLXPLAT_CPLD_LC_ADDR),
+ .platform_data = &mlxplat_mlxcpld_lc_act,
+ },
+ {
+ I2C_BOARD_INFO("mlxreg-lc", MLXPLAT_CPLD_LC_ADDR),
+ .platform_data = &mlxplat_mlxcpld_lc_act,
+ },
+ {
+ I2C_BOARD_INFO("mlxreg-lc", MLXPLAT_CPLD_LC_ADDR),
+ .platform_data = &mlxplat_mlxcpld_lc_act,
+ },
+ {
+ I2C_BOARD_INFO("mlxreg-lc", MLXPLAT_CPLD_LC_ADDR),
+ .platform_data = &mlxplat_mlxcpld_lc_act,
+ },
+ {
+ I2C_BOARD_INFO("mlxreg-lc", MLXPLAT_CPLD_LC_ADDR),
+ .platform_data = &mlxplat_mlxcpld_lc_act,
+ },
+ {
+ I2C_BOARD_INFO("mlxreg-lc", MLXPLAT_CPLD_LC_ADDR),
+ .platform_data = &mlxplat_mlxcpld_lc_act,
+ },
+};
+
+static struct mlxreg_core_hotplug_notifier mlxplat_mlxcpld_modular_lc_notifier[] = {
+ {
+ .identity = "lc1",
+ },
+ {
+ .identity = "lc2",
+ },
+ {
+ .identity = "lc3",
+ },
+ {
+ .identity = "lc4",
+ },
+ {
+ .identity = "lc5",
+ },
+ {
+ .identity = "lc6",
+ },
+ {
+ .identity = "lc7",
+ },
+ {
+ .identity = "lc8",
+ },
+};
+
+static struct mlxreg_core_data mlxplat_mlxcpld_modular_lc_pr_items_data[] = {
+ {
+ .label = "lc1_present",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_IN_OFFSET,
+ .mask = BIT(0),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[0],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(0),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[0],
+ .slot = 1,
+ },
+ {
+ .label = "lc2_present",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_IN_OFFSET,
+ .mask = BIT(1),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[1],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(1),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[1],
+ .slot = 2,
+ },
+ {
+ .label = "lc3_present",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_IN_OFFSET,
+ .mask = BIT(2),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[2],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(2),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[2],
+ .slot = 3,
+ },
+ {
+ .label = "lc4_present",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_IN_OFFSET,
+ .mask = BIT(3),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[3],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(3),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[3],
+ .slot = 4,
+ },
+ {
+ .label = "lc5_present",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_IN_OFFSET,
+ .mask = BIT(4),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[4],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(4),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[4],
+ .slot = 5,
+ },
+ {
+ .label = "lc6_present",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_IN_OFFSET,
+ .mask = BIT(5),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[5],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(5),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[5],
+ .slot = 6,
+ },
+ {
+ .label = "lc7_present",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_IN_OFFSET,
+ .mask = BIT(6),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[6],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(6),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[6],
+ .slot = 7,
+ },
+ {
+ .label = "lc8_present",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_IN_OFFSET,
+ .mask = BIT(7),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[7],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(7),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[7],
+ .slot = 8,
+ },
+};
+
+static struct mlxreg_core_data mlxplat_mlxcpld_modular_lc_ver_items_data[] = {
+ {
+ .label = "lc1_verified",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_VR_OFFSET,
+ .mask = BIT(0),
+ .reg_prsnt = MLXPLAT_CPLD_LPC_REG_LC_PG_OFFSET,
+ .reg_sync = MLXPLAT_CPLD_LPC_REG_LC_SN_OFFSET,
+ .reg_pwr = MLXPLAT_CPLD_LPC_REG_LC_PWR_ON,
+ .reg_ena = MLXPLAT_CPLD_LPC_REG_RESET_GP4_OFFSET,
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[0],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(0),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_PLATFORM_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[0],
+ .slot = 1,
+ },
+ {
+ .label = "lc2_verified",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_VR_OFFSET,
+ .mask = BIT(1),
+ .reg_prsnt = MLXPLAT_CPLD_LPC_REG_LC_PG_OFFSET,
+ .reg_sync = MLXPLAT_CPLD_LPC_REG_LC_SN_OFFSET,
+ .reg_pwr = MLXPLAT_CPLD_LPC_REG_LC_PWR_ON,
+ .reg_ena = MLXPLAT_CPLD_LPC_REG_RESET_GP4_OFFSET,
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[1],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(1),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_PLATFORM_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[1],
+ .slot = 2,
+ },
+ {
+ .label = "lc3_verified",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_VR_OFFSET,
+ .mask = BIT(2),
+ .reg_prsnt = MLXPLAT_CPLD_LPC_REG_LC_PG_OFFSET,
+ .reg_sync = MLXPLAT_CPLD_LPC_REG_LC_SN_OFFSET,
+ .reg_pwr = MLXPLAT_CPLD_LPC_REG_LC_PWR_ON,
+ .reg_ena = MLXPLAT_CPLD_LPC_REG_RESET_GP4_OFFSET,
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[2],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(2),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_PLATFORM_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[2],
+ .slot = 3,
+ },
+ {
+ .label = "lc4_verified",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_VR_OFFSET,
+ .mask = BIT(3),
+ .reg_prsnt = MLXPLAT_CPLD_LPC_REG_LC_PG_OFFSET,
+ .reg_sync = MLXPLAT_CPLD_LPC_REG_LC_SN_OFFSET,
+ .reg_pwr = MLXPLAT_CPLD_LPC_REG_LC_PWR_ON,
+ .reg_ena = MLXPLAT_CPLD_LPC_REG_RESET_GP4_OFFSET,
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[3],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(3),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_PLATFORM_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[3],
+ .slot = 4,
+ },
+ {
+ .label = "lc5_verified",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_VR_OFFSET,
+ .mask = BIT(4),
+ .reg_prsnt = MLXPLAT_CPLD_LPC_REG_LC_PG_OFFSET,
+ .reg_sync = MLXPLAT_CPLD_LPC_REG_LC_SN_OFFSET,
+ .reg_pwr = MLXPLAT_CPLD_LPC_REG_LC_PWR_ON,
+ .reg_ena = MLXPLAT_CPLD_LPC_REG_RESET_GP4_OFFSET,
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[4],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(4),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_PLATFORM_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[4],
+ .slot = 5,
+ },
+ {
+ .label = "lc6_verified",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_VR_OFFSET,
+ .mask = BIT(5),
+ .reg_prsnt = MLXPLAT_CPLD_LPC_REG_LC_PG_OFFSET,
+ .reg_sync = MLXPLAT_CPLD_LPC_REG_LC_SN_OFFSET,
+ .reg_pwr = MLXPLAT_CPLD_LPC_REG_LC_PWR_ON,
+ .reg_ena = MLXPLAT_CPLD_LPC_REG_RESET_GP4_OFFSET,
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[5],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(5),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_PLATFORM_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[5],
+ .slot = 6,
+ },
+ {
+ .label = "lc7_verified",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_VR_OFFSET,
+ .mask = BIT(6),
+ .reg_prsnt = MLXPLAT_CPLD_LPC_REG_LC_PG_OFFSET,
+ .reg_sync = MLXPLAT_CPLD_LPC_REG_LC_SN_OFFSET,
+ .reg_pwr = MLXPLAT_CPLD_LPC_REG_LC_PWR_ON,
+ .reg_ena = MLXPLAT_CPLD_LPC_REG_RESET_GP4_OFFSET,
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[6],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(6),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_PLATFORM_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[6],
+ .slot = 7,
+ },
+ {
+ .label = "lc8_verified",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_VR_OFFSET,
+ .mask = BIT(7),
+ .reg_prsnt = MLXPLAT_CPLD_LPC_REG_LC_PG_OFFSET,
+ .reg_sync = MLXPLAT_CPLD_LPC_REG_LC_SN_OFFSET,
+ .reg_pwr = MLXPLAT_CPLD_LPC_REG_LC_PWR_ON,
+ .reg_ena = MLXPLAT_CPLD_LPC_REG_RESET_GP4_OFFSET,
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[7],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(7),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_PLATFORM_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[7],
+ .slot = 8,
+ },
+};
+
+static struct mlxreg_core_data mlxplat_mlxcpld_modular_lc_pg_data[] = {
+ {
+ .label = "lc1_powered",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_PG_OFFSET,
+ .mask = BIT(0),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[0],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(0),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[0],
+ .slot = 1,
+ },
+ {
+ .label = "lc2_powered",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_PG_OFFSET,
+ .mask = BIT(1),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[1],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(1),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[1],
+ .slot = 2,
+ },
+ {
+ .label = "lc3_powered",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_PG_OFFSET,
+ .mask = BIT(2),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[2],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(2),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[2],
+ .slot = 3,
+ },
+ {
+ .label = "lc4_powered",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_PG_OFFSET,
+ .mask = BIT(3),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[3],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(3),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[3],
+ .slot = 4,
+ },
+ {
+ .label = "lc5_powered",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_PG_OFFSET,
+ .mask = BIT(4),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[4],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(4),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[4],
+ .slot = 5,
+ },
+ {
+ .label = "lc6_powered",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_PG_OFFSET,
+ .mask = BIT(5),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[5],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(5),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[5],
+ .slot = 6,
+ },
+ {
+ .label = "lc7_powered",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_PG_OFFSET,
+ .mask = BIT(6),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[6],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(6),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[6],
+ .slot = 7,
+ },
+ {
+ .label = "lc8_powered",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_PG_OFFSET,
+ .mask = BIT(7),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[7],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(7),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[7],
+ .slot = 8,
+ },
+};
+
+static struct mlxreg_core_data mlxplat_mlxcpld_modular_lc_ready_data[] = {
+ {
+ .label = "lc1_ready",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_RD_OFFSET,
+ .mask = BIT(0),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[0],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(0),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[0],
+ .slot = 1,
+ },
+ {
+ .label = "lc2_ready",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_RD_OFFSET,
+ .mask = BIT(1),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[1],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(1),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[1],
+ .slot = 2,
+ },
+ {
+ .label = "lc3_ready",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_RD_OFFSET,
+ .mask = BIT(2),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[2],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(2),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[2],
+ .slot = 3,
+ },
+ {
+ .label = "lc4_ready",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_RD_OFFSET,
+ .mask = BIT(3),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[3],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(3),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[3],
+ .slot = 4,
+ },
+ {
+ .label = "lc5_ready",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_RD_OFFSET,
+ .mask = BIT(4),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[4],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(4),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[4],
+ .slot = 5,
+ },
+ {
+ .label = "lc6_ready",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_RD_OFFSET,
+ .mask = BIT(5),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[5],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(5),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[5],
+ .slot = 6,
+ },
+ {
+ .label = "lc7_ready",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_RD_OFFSET,
+ .mask = BIT(6),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[6],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(6),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[6],
+ .slot = 7,
+ },
+ {
+ .label = "lc8_ready",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_RD_OFFSET,
+ .mask = BIT(7),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[7],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(7),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[7],
+ .slot = 8,
+ },
+};
+
+static struct mlxreg_core_data mlxplat_mlxcpld_modular_lc_synced_data[] = {
+ {
+ .label = "lc1_synced",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_SN_OFFSET,
+ .mask = BIT(0),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[0],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(0),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[0],
+ .slot = 1,
+ },
+ {
+ .label = "lc2_synced",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_SN_OFFSET,
+ .mask = BIT(1),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[1],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(1),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[1],
+ .slot = 2,
+ },
+ {
+ .label = "lc3_synced",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_SN_OFFSET,
+ .mask = BIT(2),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[2],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(2),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[2],
+ .slot = 3,
+ },
+ {
+ .label = "lc4_synced",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_SN_OFFSET,
+ .mask = BIT(3),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[3],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(3),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[3],
+ .slot = 4,
+ },
+ {
+ .label = "lc5_synced",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_SN_OFFSET,
+ .mask = BIT(4),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[4],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(4),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[4],
+ .slot = 5,
+ },
+ {
+ .label = "lc6_synced",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_SN_OFFSET,
+ .mask = BIT(5),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[5],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(5),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[5],
+ .slot = 6,
+ },
+ {
+ .label = "lc7_synced",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_SN_OFFSET,
+ .mask = BIT(6),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[6],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(6),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[6],
+ .slot = 7,
+ },
+ {
+ .label = "lc8_synced",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_SN_OFFSET,
+ .mask = BIT(7),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[7],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(7),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[7],
+ .slot = 8,
+ },
+};
+
+static struct mlxreg_core_data mlxplat_mlxcpld_modular_lc_act_data[] = {
+ {
+ .label = "lc1_active",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_OK_OFFSET,
+ .mask = BIT(0),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[0],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(0),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[0],
+ .slot = 1,
+ },
+ {
+ .label = "lc2_active",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_OK_OFFSET,
+ .mask = BIT(1),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[1],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(1),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[1],
+ .slot = 2,
+ },
+ {
+ .label = "lc3_active",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_OK_OFFSET,
+ .mask = BIT(2),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[2],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(2),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[2],
+ .slot = 3,
+ },
+ {
+ .label = "lc4_active",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_OK_OFFSET,
+ .mask = BIT(3),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[3],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(3),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[3],
+ .slot = 4,
+ },
+ {
+ .label = "lc5_active",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_OK_OFFSET,
+ .mask = BIT(4),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[4],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(4),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[4],
+ .slot = 5,
+ },
+ {
+ .label = "lc6_active",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_OK_OFFSET,
+ .mask = BIT(5),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[5],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(5),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[5],
+ .slot = 6,
+ },
+ {
+ .label = "lc7_active",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_OK_OFFSET,
+ .mask = BIT(6),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[6],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(6),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[6],
+ .slot = 7,
+ },
+ {
+ .label = "lc8_active",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_OK_OFFSET,
+ .mask = BIT(7),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[7],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(7),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[7],
+ .slot = 8,
+ },
+};
+
+static struct mlxreg_core_data mlxplat_mlxcpld_modular_lc_sd_data[] = {
+ {
+ .label = "lc1_shutdown",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_SD_OFFSET,
+ .mask = BIT(0),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[0],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(0),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[0],
+ .slot = 1,
+ },
+ {
+ .label = "lc2_shutdown",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_SD_OFFSET,
+ .mask = BIT(1),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[1],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(1),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[1],
+ .slot = 2,
+ },
+ {
+ .label = "lc3_shutdown",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_SD_OFFSET,
+ .mask = BIT(2),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[2],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(2),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[2],
+ .slot = 3,
+ },
+ {
+ .label = "lc4_shutdown",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_SD_OFFSET,
+ .mask = BIT(3),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[3],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(3),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[3],
+ .slot = 4,
+ },
+ {
+ .label = "lc5_shutdown",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_SD_OFFSET,
+ .mask = BIT(4),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[4],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(4),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[4],
+ .slot = 5,
+ },
+ {
+ .label = "lc6_shutdown",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_SD_OFFSET,
+ .mask = BIT(5),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[5],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(5),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[5],
+ .slot = 6,
+ },
+ {
+ .label = "lc7_shutdown",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_SD_OFFSET,
+ .mask = BIT(6),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[6],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(6),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[6],
+ .slot = 7,
+ },
+ {
+ .label = "lc8_shutdown",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_SD_OFFSET,
+ .mask = BIT(7),
+ .hpdev.brdinfo = &mlxplat_mlxcpld_lc_i2c_dev[7],
+ .hpdev.nr = MLXPLAT_CPLD_NR_LC_SET(7),
+ .hpdev.action = MLXREG_HOTPLUG_DEVICE_NO_ACTION,
+ .hpdev.notifier = &mlxplat_mlxcpld_modular_lc_notifier[7],
+ .slot = 8,
+ },
+};
+
+static struct mlxreg_core_item mlxplat_mlxcpld_modular_items[] = {
+ {
+ .data = mlxplat_mlxcpld_ext_psu_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET,
+ .mask = MLXPLAT_CPLD_PSU_EXT_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_ext_psu_items_data),
+ .inversed = 1,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_modular_pwr_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET,
+ .mask = MLXPLAT_CPLD_PWR_EXT_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_ext_pwr_items_data),
+ .inversed = 0,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_default_ng_fan_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
+ .mask = MLXPLAT_CPLD_FAN_NG_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_default_ng_fan_items_data),
+ .inversed = 1,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_modular_asic_items_data,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF,
+ .reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET,
+ .mask = MLXPLAT_CPLD_ASIC_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_modular_asic_items_data),
+ .inversed = 0,
+ .health = true,
+ },
+ {
+ .data = mlxplat_mlxcpld_modular_lc_pr_items_data,
+ .kind = MLXREG_HOTPLUG_LC_PRESENT,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_LC,
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_IN_OFFSET,
+ .mask = MLXPLAT_CPLD_LPC_LC_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_modular_lc_pr_items_data),
+ .inversed = 1,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_modular_lc_ver_items_data,
+ .kind = MLXREG_HOTPLUG_LC_VERIFIED,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_LC,
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_VR_OFFSET,
+ .mask = MLXPLAT_CPLD_LPC_LC_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_modular_lc_ver_items_data),
+ .inversed = 0,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_modular_lc_pg_data,
+ .kind = MLXREG_HOTPLUG_LC_POWERED,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_LC,
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_PG_OFFSET,
+ .mask = MLXPLAT_CPLD_LPC_LC_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_modular_lc_pg_data),
+ .inversed = 0,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_modular_lc_ready_data,
+ .kind = MLXREG_HOTPLUG_LC_READY,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_LC,
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_RD_OFFSET,
+ .mask = MLXPLAT_CPLD_LPC_LC_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_modular_lc_ready_data),
+ .inversed = 0,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_modular_lc_synced_data,
+ .kind = MLXREG_HOTPLUG_LC_SYNCED,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_LC,
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_SN_OFFSET,
+ .mask = MLXPLAT_CPLD_LPC_LC_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_modular_lc_synced_data),
+ .inversed = 0,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_modular_lc_act_data,
+ .kind = MLXREG_HOTPLUG_LC_ACTIVE,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_LC,
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_OK_OFFSET,
+ .mask = MLXPLAT_CPLD_LPC_LC_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_modular_lc_act_data),
+ .inversed = 0,
+ .health = false,
+ },
+ {
+ .data = mlxplat_mlxcpld_modular_lc_sd_data,
+ .kind = MLXREG_HOTPLUG_LC_THERMAL,
+ .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_LC,
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_SD_OFFSET,
+ .mask = MLXPLAT_CPLD_LPC_LC_MASK,
+ .count = ARRAY_SIZE(mlxplat_mlxcpld_modular_lc_sd_data),
+ .inversed = 0,
+ .health = false,
+ },
+};
+
+static
+struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_modular_data = {
+ .items = mlxplat_mlxcpld_modular_items,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_modular_items),
+ .cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET,
+ .mask = MLXPLAT_CPLD_AGGR_MASK_MODULAR,
+ .cell_low = MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET,
+ .mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW,
+};
+
/* Platform led default data */
static struct mlxreg_core_data mlxplat_mlxcpld_default_led_data[] = {
{
@@ -1037,6 +2073,35 @@ static struct mlxreg_core_platform_data mlxplat_default_led_data = {
.counter = ARRAY_SIZE(mlxplat_mlxcpld_default_led_data),
};
+/* Platform led default data for water cooling */
+static struct mlxreg_core_data mlxplat_mlxcpld_default_led_wc_data[] = {
+ {
+ .label = "status:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ },
+ {
+ .label = "status:red",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK
+ },
+ {
+ .label = "psu:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ },
+ {
+ .label = "psu:red",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ },
+};
+
+static struct mlxreg_core_platform_data mlxplat_default_led_wc_data = {
+ .data = mlxplat_mlxcpld_default_led_wc_data,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_default_led_wc_data),
+};
+
/* Platform led MSN21xx system family data */
static struct mlxreg_core_data mlxplat_mlxcpld_msn21xx_led_data[] = {
{
@@ -1198,6 +2263,20 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_led_data[] = {
.bit = BIT(5),
},
{
+ .label = "fan7:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED6_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(6),
+ },
+ {
+ .label = "fan7:orange",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED6_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(6),
+ },
+ {
.label = "uid:blue",
.reg = MLXPLAT_CPLD_LPC_REG_LED5_OFFSET,
.mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
@@ -1283,6 +2362,158 @@ static struct mlxreg_core_platform_data mlxplat_comex_100G_led_data = {
.counter = ARRAY_SIZE(mlxplat_mlxcpld_comex_100G_led_data),
};
+/* Platform led for data for modular systems */
+static struct mlxreg_core_data mlxplat_mlxcpld_modular_led_data[] = {
+ {
+ .label = "status:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ },
+ {
+ .label = "status:orange",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK
+ },
+ {
+ .label = "psu:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ },
+ {
+ .label = "psu:orange",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ },
+ {
+ .label = "fan1:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(0),
+ },
+ {
+ .label = "fan1:orange",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(0),
+ },
+ {
+ .label = "fan2:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(1),
+ },
+ {
+ .label = "fan2:orange",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(1),
+ },
+ {
+ .label = "fan3:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(2),
+ },
+ {
+ .label = "fan3:orange",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(2),
+ },
+ {
+ .label = "fan4:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(3),
+ },
+ {
+ .label = "fan4:orange",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(3),
+ },
+ {
+ .label = "fan5:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED4_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(4),
+ },
+ {
+ .label = "fan5:orange",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED4_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(4),
+ },
+ {
+ .label = "fan6:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED4_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(5),
+ },
+ {
+ .label = "fan6:orange",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED4_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(5),
+ },
+ {
+ .label = "fan7:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED6_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(6),
+ },
+ {
+ .label = "fan7:orange",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED6_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET,
+ .bit = BIT(6),
+ },
+ {
+ .label = "uid:blue",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED5_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ },
+ {
+ .label = "fan_front:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED6_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ },
+ {
+ .label = "fan_front:orange",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED6_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ },
+ {
+ .label = "mgmt:green",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED7_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ },
+ {
+ .label = "mgmt:orange",
+ .reg = MLXPLAT_CPLD_LPC_REG_LED7_OFFSET,
+ .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK,
+ },
+};
+
+static struct mlxreg_core_platform_data mlxplat_modular_led_data = {
+ .data = mlxplat_mlxcpld_modular_led_data,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_modular_led_data),
+};
+
/* Platform register access default */
static struct mlxreg_core_data mlxplat_mlxcpld_default_regs_io_data[] = {
{
@@ -1771,6 +3002,30 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_regs_io_data[] = {
.mode = 0444,
},
{
+ .label = "bios_safe_mode",
+ .reg = MLXPLAT_CPLD_LPC_REG_GPCOM0_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(4),
+ .mode = 0444,
+ },
+ {
+ .label = "bios_active_image",
+ .reg = MLXPLAT_CPLD_LPC_REG_GPCOM0_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(5),
+ .mode = 0444,
+ },
+ {
+ .label = "bios_auth_fail",
+ .reg = MLXPLAT_CPLD_LPC_REG_GPCOM0_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(6),
+ .mode = 0444,
+ },
+ {
+ .label = "bios_upgrade_fail",
+ .reg = MLXPLAT_CPLD_LPC_REG_GPCOM0_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(7),
+ .mode = 0444,
+ },
+ {
.label = "voltreg_update_status",
.reg = MLXPLAT_CPLD_LPC_REG_GP0_RO_OFFSET,
.mask = MLXPLAT_CPLD_VOLTREG_UPD_MASK,
@@ -1814,6 +3069,484 @@ static struct mlxreg_core_platform_data mlxplat_default_ng_regs_io_data = {
.counter = ARRAY_SIZE(mlxplat_mlxcpld_default_ng_regs_io_data),
};
+/* Platform register access for modular systems families data */
+static struct mlxreg_core_data mlxplat_mlxcpld_modular_regs_io_data[] = {
+ {
+ .label = "cpld1_version",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD1_VER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "cpld2_version",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD2_VER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "cpld3_version",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD3_VER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "cpld4_version",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD4_VER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "cpld1_pn",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD1_PN_OFFSET,
+ .bit = GENMASK(15, 0),
+ .mode = 0444,
+ .regnum = 2,
+ },
+ {
+ .label = "cpld2_pn",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD2_PN_OFFSET,
+ .bit = GENMASK(15, 0),
+ .mode = 0444,
+ .regnum = 2,
+ },
+ {
+ .label = "cpld3_pn",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD3_PN_OFFSET,
+ .bit = GENMASK(15, 0),
+ .mode = 0444,
+ .regnum = 2,
+ },
+ {
+ .label = "cpld4_pn",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD4_PN_OFFSET,
+ .bit = GENMASK(15, 0),
+ .mode = 0444,
+ .regnum = 2,
+ },
+ {
+ .label = "cpld1_version_min",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD1_MVER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "cpld2_version_min",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD2_MVER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "cpld3_version_min",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD3_MVER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "cpld4_version_min",
+ .reg = MLXPLAT_CPLD_LPC_REG_CPLD4_MVER_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "lc1_enable",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_GP4_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(0),
+ .mode = 0644,
+ },
+ {
+ .label = "lc2_enable",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_GP4_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(1),
+ .mode = 0644,
+ },
+ {
+ .label = "lc3_enable",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_GP4_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(2),
+ .mode = 0644,
+ },
+ {
+ .label = "lc4_enable",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_GP4_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(3),
+ .mode = 0644,
+ },
+ {
+ .label = "lc5_enable",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_GP4_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(4),
+ .mode = 0644,
+ },
+ {
+ .label = "lc6_enable",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_GP4_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(5),
+ .mode = 0644,
+ },
+ {
+ .label = "lc7_enable",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_GP4_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(6),
+ .mode = 0644,
+ },
+ {
+ .label = "lc8_enable",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_GP4_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(7),
+ .mode = 0644,
+ },
+ {
+ .label = "reset_long_pb",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(0),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_short_pb",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(1),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_aux_pwr_or_fu",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(2),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_mgmt_dc_dc_pwr_fail",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(3),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_sys_comex_bios",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(5),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_sw_reset",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(0),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_aux_pwr_or_reload",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(2),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_comex_pwr_fail",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(3),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_platform",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(4),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_soc",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(5),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_pwr_off_from_carrier",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(7),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_swb_wd",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(0),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_swb_aux_pwr_or_fu",
+ .reg = MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(2),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_swb_dc_dc_pwr_fail",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(3),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_swb_12v_fail",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(4),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_system",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(5),
+ .mode = 0444,
+ },
+ {
+ .label = "reset_thermal_spc_or_pciesw",
+ .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(7),
+ .mode = 0444,
+ },
+ {
+ .label = "bios_safe_mode",
+ .reg = MLXPLAT_CPLD_LPC_REG_GPCOM0_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(4),
+ .mode = 0444,
+ },
+ {
+ .label = "bios_active_image",
+ .reg = MLXPLAT_CPLD_LPC_REG_GPCOM0_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(5),
+ .mode = 0444,
+ },
+ {
+ .label = "bios_auth_fail",
+ .reg = MLXPLAT_CPLD_LPC_REG_GPCOM0_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(6),
+ .mode = 0444,
+ },
+ {
+ .label = "bios_upgrade_fail",
+ .reg = MLXPLAT_CPLD_LPC_REG_GPCOM0_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(7),
+ .mode = 0444,
+ },
+ {
+ .label = "voltreg_update_status",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP0_RO_OFFSET,
+ .mask = MLXPLAT_CPLD_VOLTREG_UPD_MASK,
+ .bit = 5,
+ .mode = 0444,
+ },
+ {
+ .label = "vpd_wp",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP0_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(3),
+ .mode = 0644,
+ },
+ {
+ .label = "pcie_asic_reset_dis",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP0_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(4),
+ .mode = 0644,
+ },
+ {
+ .label = "shutdown_unlock",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP0_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(5),
+ .mode = 0644,
+ },
+ {
+ .label = "lc1_rst_mask",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP_RST_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(0),
+ .mode = 0200,
+ },
+ {
+ .label = "lc2_rst_mask",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP_RST_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(1),
+ .mode = 0200,
+ },
+ {
+ .label = "lc3_rst_mask",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP_RST_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(2),
+ .mode = 0200,
+ },
+ {
+ .label = "lc4_rst_mask",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP_RST_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(3),
+ .mode = 0200,
+ },
+ {
+ .label = "lc5_rst_mask",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP_RST_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(4),
+ .mode = 0200,
+ },
+ {
+ .label = "lc6_rst_mask",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP_RST_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(5),
+ .mode = 0200,
+ },
+ {
+ .label = "lc7_rst_mask",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP_RST_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(6),
+ .mode = 0200,
+ },
+ {
+ .label = "lc8_rst_mask",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP_RST_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(7),
+ .mode = 0200,
+ },
+ {
+ .label = "psu1_on",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(0),
+ .mode = 0200,
+ },
+ {
+ .label = "psu2_on",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(1),
+ .mode = 0200,
+ },
+ {
+ .label = "pwr_cycle",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(2),
+ .mode = 0200,
+ },
+ {
+ .label = "pwr_down",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(3),
+ .mode = 0200,
+ },
+ {
+ .label = "psu3_on",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(4),
+ .mode = 0200,
+ },
+ {
+ .label = "psu4_on",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(5),
+ .mode = 0200,
+ },
+ {
+ .label = "auto_power_mode",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(6),
+ .mode = 0644,
+ },
+ {
+ .label = "pm_mgmt_en",
+ .reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(7),
+ .mode = 0644,
+ },
+ {
+ .label = "jtag_enable",
+ .reg = MLXPLAT_CPLD_LPC_REG_FIELD_UPGRADE,
+ .mask = GENMASK(3, 0),
+ .bit = 1,
+ .mode = 0644,
+ },
+ {
+ .label = "safe_bios_dis",
+ .reg = MLXPLAT_CPLD_LPC_SAFE_BIOS_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(5),
+ .mode = 0644,
+ },
+ {
+ .label = "safe_bios_dis_wp",
+ .reg = MLXPLAT_CPLD_LPC_SAFE_BIOS_WP_OFFSET,
+ .mask = GENMASK(7, 0) & ~BIT(5),
+ .mode = 0644,
+ },
+ {
+ .label = "asic_health",
+ .reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET,
+ .mask = MLXPLAT_CPLD_ASIC_MASK,
+ .bit = 1,
+ .mode = 0444,
+ },
+ {
+ .label = "fan_dir",
+ .reg = MLXPLAT_CPLD_LPC_REG_FAN_DIRECTION,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "lc1_pwr",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_PWR_ON,
+ .mask = GENMASK(7, 0) & ~BIT(0),
+ .mode = 0644,
+ },
+ {
+ .label = "lc2_pwr",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_PWR_ON,
+ .mask = GENMASK(7, 0) & ~BIT(1),
+ .mode = 0644,
+ },
+ {
+ .label = "lc3_pwr",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_PWR_ON,
+ .mask = GENMASK(7, 0) & ~BIT(2),
+ .mode = 0644,
+ },
+ {
+ .label = "lc4_pwr",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_PWR_ON,
+ .mask = GENMASK(7, 0) & ~BIT(3),
+ .mode = 0644,
+ },
+ {
+ .label = "lc5_pwr",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_PWR_ON,
+ .mask = GENMASK(7, 0) & ~BIT(4),
+ .mode = 0644,
+ },
+ {
+ .label = "lc6_pwr",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_PWR_ON,
+ .mask = GENMASK(7, 0) & ~BIT(5),
+ .mode = 0644,
+ },
+ {
+ .label = "lc7_pwr",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_PWR_ON,
+ .mask = GENMASK(7, 0) & ~BIT(6),
+ .mode = 0644,
+ },
+ {
+ .label = "lc8_pwr",
+ .reg = MLXPLAT_CPLD_LPC_REG_LC_PWR_ON,
+ .mask = GENMASK(7, 0) & ~BIT(7),
+ .mode = 0644,
+ },
+ {
+ .label = "config1",
+ .reg = MLXPLAT_CPLD_LPC_REG_CONFIG1_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "config2",
+ .reg = MLXPLAT_CPLD_LPC_REG_CONFIG2_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+ {
+ .label = "ufm_version",
+ .reg = MLXPLAT_CPLD_LPC_REG_UFM_VERSION_OFFSET,
+ .bit = GENMASK(7, 0),
+ .mode = 0444,
+ },
+};
+
+static struct mlxreg_core_platform_data mlxplat_modular_regs_io_data = {
+ .data = mlxplat_mlxcpld_modular_regs_io_data,
+ .counter = ARRAY_SIZE(mlxplat_mlxcpld_modular_regs_io_data),
+};
+
/* Platform FAN default */
static struct mlxreg_core_data mlxplat_mlxcpld_default_fan_data[] = {
{
@@ -1821,6 +3554,18 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_fan_data[] = {
.reg = MLXPLAT_CPLD_LPC_REG_PWM1_OFFSET,
},
{
+ .label = "pwm2",
+ .reg = MLXPLAT_CPLD_LPC_REG_PWM2_OFFSET,
+ },
+ {
+ .label = "pwm3",
+ .reg = MLXPLAT_CPLD_LPC_REG_PWM3_OFFSET,
+ },
+ {
+ .label = "pwm4",
+ .reg = MLXPLAT_CPLD_LPC_REG_PWM4_OFFSET,
+ },
+ {
.label = "tacho1",
.reg = MLXPLAT_CPLD_LPC_REG_TACHO1_OFFSET,
.mask = GENMASK(7, 0),
@@ -1918,6 +3663,20 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_fan_data[] = {
.reg_prsnt = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET,
},
{
+ .label = "tacho13",
+ .reg = MLXPLAT_CPLD_LPC_REG_TACHO13_OFFSET,
+ .mask = GENMASK(7, 0),
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET,
+ .bit = BIT(4),
+ },
+ {
+ .label = "tacho14",
+ .reg = MLXPLAT_CPLD_LPC_REG_TACHO14_OFFSET,
+ .mask = GENMASK(7, 0),
+ .capability = MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET,
+ .bit = BIT(5),
+ },
+ {
.label = "conf",
.capability = MLXPLAT_CPLD_LPC_REG_TACHO_SPEED_OFFSET,
},
@@ -2152,16 +3911,23 @@ static struct mlxreg_core_platform_data mlxplat_mlxcpld_wd_set_type3[] = {
static bool mlxplat_mlxcpld_writeable_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
+ case MLXPLAT_CPLD_LPC_REG_RESET_GP4_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED3_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED4_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED5_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LED6_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LED7_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GP0_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GP_RST_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GP1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WP1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GP2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WP2_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_FIELD_UPGRADE:
+ case MLXPLAT_CPLD_LPC_SAFE_BIOS_OFFSET:
+ case MLXPLAT_CPLD_LPC_SAFE_BIOS_WP_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGR_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRLO_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRCO_MASK_OFFSET:
@@ -2174,6 +3940,23 @@ static bool mlxplat_mlxcpld_writeable_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_PWR_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_AGGRLC_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_IN_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_IN_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_VR_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_VR_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_PG_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_PG_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_PG_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_RD_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_RD_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_OK_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_OK_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_SN_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_SN_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_SD_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_SD_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_PWR_ON:
case MLXPLAT_CPLD_LPC_REG_WD_CLEAR_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD_CLEAR_WP_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD1_TMR_OFFSET:
@@ -2185,6 +3968,9 @@ static bool mlxplat_mlxcpld_writeable_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_WD3_TLEFT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWM1_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_PWM2_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_PWM3_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_PWM4_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET:
return true;
}
@@ -2199,9 +3985,14 @@ static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_CPLD3_VER_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD4_VER_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD1_PN_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CPLD1_PN1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD2_PN_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CPLD2_PN1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD3_PN_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CPLD3_PN1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD4_PN_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CPLD4_PN1_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_RESET_GP4_OFFSET:
case MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET:
case MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET:
@@ -2210,13 +4001,20 @@ static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_LED3_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED4_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED5_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LED6_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LED7_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_DIRECTION:
case MLXPLAT_CPLD_LPC_REG_GP0_RO_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GPCOM0_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GP0_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GP_RST_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GP1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WP1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GP2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WP2_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_FIELD_UPGRADE:
+ case MLXPLAT_CPLD_LPC_SAFE_BIOS_OFFSET:
+ case MLXPLAT_CPLD_LPC_SAFE_BIOS_WP_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGR_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET:
@@ -2237,6 +4035,30 @@ static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_FAN_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_AGGRLC_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_AGGRLC_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_IN_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_IN_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_IN_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_VR_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_VR_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_VR_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_PG_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_PG_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_PG_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_RD_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_RD_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_RD_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_OK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_OK_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_OK_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_SN_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_SN_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_SN_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_SD_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_SD_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_SD_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_PWR_ON:
case MLXPLAT_CPLD_LPC_REG_WD_CLEAR_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD_CLEAR_WP_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD1_TMR_OFFSET:
@@ -2252,6 +4074,9 @@ static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_CPLD3_MVER_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD4_MVER_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWM1_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_PWM2_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_PWM3_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_PWM4_OFFSET:
case MLXPLAT_CPLD_LPC_REG_TACHO1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_TACHO2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_TACHO3_OFFSET:
@@ -2264,12 +4089,15 @@ static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_TACHO10_OFFSET:
case MLXPLAT_CPLD_LPC_REG_TACHO11_OFFSET:
case MLXPLAT_CPLD_LPC_REG_TACHO12_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_TACHO13_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_TACHO14_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET:
case MLXPLAT_CPLD_LPC_REG_TACHO_SPEED_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_SLOT_QTY_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CONFIG1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CONFIG2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_UFM_VERSION_OFFSET:
@@ -2286,9 +4114,14 @@ static bool mlxplat_mlxcpld_volatile_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_CPLD3_VER_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD4_VER_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD1_PN_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CPLD1_PN1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD2_PN_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CPLD2_PN1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD3_PN_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CPLD3_PN1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD4_PN_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_CPLD4_PN1_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_RESET_GP4_OFFSET:
case MLXPLAT_CPLD_LPC_REG_RESET_CAUSE_OFFSET:
case MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET:
@@ -2297,11 +4130,18 @@ static bool mlxplat_mlxcpld_volatile_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_LED3_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED4_OFFSET:
case MLXPLAT_CPLD_LPC_REG_LED5_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LED6_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LED7_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_DIRECTION:
case MLXPLAT_CPLD_LPC_REG_GP0_RO_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GPCOM0_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GP0_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_GP_RST_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GP1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_GP2_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_FIELD_UPGRADE:
+ case MLXPLAT_CPLD_LPC_SAFE_BIOS_OFFSET:
+ case MLXPLAT_CPLD_LPC_SAFE_BIOS_WP_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGR_MASK_OFFSET:
case MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET:
@@ -2322,6 +4162,30 @@ static bool mlxplat_mlxcpld_volatile_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_FAN_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_EVENT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_AGGRLC_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_AGGRLC_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_IN_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_IN_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_IN_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_VR_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_VR_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_VR_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_PG_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_PG_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_PG_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_RD_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_RD_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_RD_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_OK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_OK_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_OK_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_SN_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_SN_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_SN_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_SD_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_SD_EVENT_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_SD_MASK_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_LC_PWR_ON:
case MLXPLAT_CPLD_LPC_REG_WD2_TMR_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD2_TLEFT_OFFSET:
case MLXPLAT_CPLD_LPC_REG_WD3_TMR_OFFSET:
@@ -2331,6 +4195,9 @@ static bool mlxplat_mlxcpld_volatile_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_CPLD3_MVER_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CPLD4_MVER_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWM1_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_PWM2_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_PWM3_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_PWM4_OFFSET:
case MLXPLAT_CPLD_LPC_REG_TACHO1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_TACHO2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_TACHO3_OFFSET:
@@ -2343,12 +4210,15 @@ static bool mlxplat_mlxcpld_volatile_reg(struct device *dev, unsigned int reg)
case MLXPLAT_CPLD_LPC_REG_TACHO10_OFFSET:
case MLXPLAT_CPLD_LPC_REG_TACHO11_OFFSET:
case MLXPLAT_CPLD_LPC_REG_TACHO12_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_TACHO13_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_TACHO14_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_CAP1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET:
case MLXPLAT_CPLD_LPC_REG_TACHO_SPEED_OFFSET:
case MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET:
+ case MLXPLAT_CPLD_LPC_REG_SLOT_QTY_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CONFIG1_OFFSET:
case MLXPLAT_CPLD_LPC_REG_CONFIG2_OFFSET:
case MLXPLAT_CPLD_LPC_REG_UFM_VERSION_OFFSET:
@@ -2382,6 +4252,19 @@ static const struct reg_default mlxplat_mlxcpld_regmap_ng400[] = {
{ MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET, 0x00 },
};
+static const struct reg_default mlxplat_mlxcpld_regmap_eth_modular[] = {
+ { MLXPLAT_CPLD_LPC_REG_GP2_OFFSET, 0x61 },
+ { MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET, 0x00 },
+ { MLXPLAT_CPLD_LPC_REG_PWM2_OFFSET, 0x00 },
+ { MLXPLAT_CPLD_LPC_REG_PWM3_OFFSET, 0x00 },
+ { MLXPLAT_CPLD_LPC_REG_PWM4_OFFSET, 0x00 },
+ { MLXPLAT_CPLD_LPC_REG_WD1_ACT_OFFSET, 0x00 },
+ { MLXPLAT_CPLD_LPC_REG_WD2_ACT_OFFSET, 0x00 },
+ { MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET, 0x00 },
+ { MLXPLAT_CPLD_LPC_REG_AGGRLC_MASK_OFFSET,
+ MLXPLAT_CPLD_AGGR_MASK_LC_LOW },
+};
+
struct mlxplat_mlxcpld_regmap_context {
void __iomem *base;
};
@@ -2462,8 +4345,22 @@ static const struct regmap_config mlxplat_mlxcpld_regmap_config_ng400 = {
.reg_write = mlxplat_mlxcpld_reg_write,
};
+static const struct regmap_config mlxplat_mlxcpld_regmap_config_eth_modular = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = 255,
+ .cache_type = REGCACHE_FLAT,
+ .writeable_reg = mlxplat_mlxcpld_writeable_reg,
+ .readable_reg = mlxplat_mlxcpld_readable_reg,
+ .volatile_reg = mlxplat_mlxcpld_volatile_reg,
+ .reg_defaults = mlxplat_mlxcpld_regmap_eth_modular,
+ .num_reg_defaults = ARRAY_SIZE(mlxplat_mlxcpld_regmap_eth_modular),
+ .reg_read = mlxplat_mlxcpld_reg_read,
+ .reg_write = mlxplat_mlxcpld_reg_write,
+};
+
static struct resource mlxplat_mlxcpld_resources[] = {
- [0] = DEFINE_RES_IRQ_NAMED(17, "mlxreg-hotplug"),
+ [0] = DEFINE_RES_IRQ_NAMED(MLXPLAT_CPLD_LPC_SYSIRQ, "mlxreg-hotplug"),
};
static struct platform_device *mlxplat_dev;
@@ -2498,6 +4395,28 @@ static int __init mlxplat_dmi_default_matched(const struct dmi_system_id *dmi)
return 1;
}
+static int __init mlxplat_dmi_default_wc_matched(const struct dmi_system_id *dmi)
+{
+ int i;
+
+ mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM;
+ mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data);
+ mlxplat_mux_data = mlxplat_default_mux_data;
+ for (i = 0; i < mlxplat_mux_num; i++) {
+ mlxplat_mux_data[i].values = mlxplat_default_channels[i];
+ mlxplat_mux_data[i].n_values =
+ ARRAY_SIZE(mlxplat_default_channels[i]);
+ }
+ mlxplat_hotplug = &mlxplat_mlxcpld_default_wc_data;
+ mlxplat_hotplug->deferred_nr =
+ mlxplat_default_channels[i - 1][MLXPLAT_CPLD_GRP_CHNL_NUM - 1];
+ mlxplat_led = &mlxplat_default_led_wc_data;
+ mlxplat_regs_io = &mlxplat_default_regs_io_data;
+ mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0];
+
+ return 1;
+}
+
static int __init mlxplat_dmi_msn21xx_matched(const struct dmi_system_id *dmi)
{
int i;
@@ -2640,8 +4559,35 @@ static int __init mlxplat_dmi_ng400_matched(const struct dmi_system_id *dmi)
return 1;
}
+static int __init mlxplat_dmi_modular_matched(const struct dmi_system_id *dmi)
+{
+ int i;
+
+ mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM;
+ mlxplat_mux_num = ARRAY_SIZE(mlxplat_modular_mux_data);
+ mlxplat_mux_data = mlxplat_modular_mux_data;
+ mlxplat_hotplug = &mlxplat_mlxcpld_modular_data;
+ mlxplat_hotplug->deferred_nr = MLXPLAT_CPLD_CH4_ETH_MODULAR;
+ mlxplat_led = &mlxplat_modular_led_data;
+ mlxplat_regs_io = &mlxplat_modular_regs_io_data;
+ mlxplat_fan = &mlxplat_default_fan_data;
+ for (i = 0; i < ARRAY_SIZE(mlxplat_mlxcpld_wd_set_type2); i++)
+ mlxplat_wd_data[i] = &mlxplat_mlxcpld_wd_set_type2[i];
+ mlxplat_i2c = &mlxplat_mlxcpld_i2c_ng_data;
+ mlxplat_regmap_config = &mlxplat_mlxcpld_regmap_config_eth_modular;
+
+ return 1;
+}
+
static const struct dmi_system_id mlxplat_dmi_table[] __initconst = {
{
+ .callback = mlxplat_dmi_default_wc_matched,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "VMOD0001"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_SKU, "HI138"),
+ },
+ },
+ {
.callback = mlxplat_dmi_default_matched,
.matches = {
DMI_MATCH(DMI_BOARD_NAME, "VMOD0001"),
@@ -2690,6 +4636,12 @@ static const struct dmi_system_id mlxplat_dmi_table[] __initconst = {
},
},
{
+ .callback = mlxplat_dmi_modular_matched,
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "VMOD0011"),
+ },
+ },
+ {
.callback = mlxplat_dmi_msn274x_matched,
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "Mellanox Technologies"),
diff --git a/drivers/platform/x86/nvidia-wmi-ec-backlight.c b/drivers/platform/x86/nvidia-wmi-ec-backlight.c
new file mode 100644
index 000000000000..61e37194df70
--- /dev/null
+++ b/drivers/platform/x86/nvidia-wmi-ec-backlight.c
@@ -0,0 +1,213 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <linux/acpi.h>
+#include <linux/backlight.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/wmi.h>
+
+/**
+ * enum wmi_brightness_method - WMI method IDs
+ * @WMI_BRIGHTNESS_METHOD_LEVEL: Get/Set EC brightness level status
+ * @WMI_BRIGHTNESS_METHOD_SOURCE: Get/Set EC Brightness Source
+ */
+enum wmi_brightness_method {
+ WMI_BRIGHTNESS_METHOD_LEVEL = 1,
+ WMI_BRIGHTNESS_METHOD_SOURCE = 2,
+ WMI_BRIGHTNESS_METHOD_MAX
+};
+
+/**
+ * enum wmi_brightness_mode - Operation mode for WMI-wrapped method
+ * @WMI_BRIGHTNESS_MODE_GET: Get the current brightness level/source.
+ * @WMI_BRIGHTNESS_MODE_SET: Set the brightness level.
+ * @WMI_BRIGHTNESS_MODE_GET_MAX_LEVEL: Get the maximum brightness level. This
+ * is only valid when the WMI method is
+ * %WMI_BRIGHTNESS_METHOD_LEVEL.
+ */
+enum wmi_brightness_mode {
+ WMI_BRIGHTNESS_MODE_GET = 0,
+ WMI_BRIGHTNESS_MODE_SET = 1,
+ WMI_BRIGHTNESS_MODE_GET_MAX_LEVEL = 2,
+ WMI_BRIGHTNESS_MODE_MAX
+};
+
+/**
+ * enum wmi_brightness_source - Backlight brightness control source selection
+ * @WMI_BRIGHTNESS_SOURCE_GPU: Backlight brightness is controlled by the GPU.
+ * @WMI_BRIGHTNESS_SOURCE_EC: Backlight brightness is controlled by the
+ * system's Embedded Controller (EC).
+ * @WMI_BRIGHTNESS_SOURCE_AUX: Backlight brightness is controlled over the
+ * DisplayPort AUX channel.
+ */
+enum wmi_brightness_source {
+ WMI_BRIGHTNESS_SOURCE_GPU = 1,
+ WMI_BRIGHTNESS_SOURCE_EC = 2,
+ WMI_BRIGHTNESS_SOURCE_AUX = 3,
+ WMI_BRIGHTNESS_SOURCE_MAX
+};
+
+/**
+ * struct wmi_brightness_args - arguments for the WMI-wrapped ACPI method
+ * @mode: Pass in an &enum wmi_brightness_mode value to select between
+ * getting or setting a value.
+ * @val: In parameter for value to set when using %WMI_BRIGHTNESS_MODE_SET
+ * mode. Not used in conjunction with %WMI_BRIGHTNESS_MODE_GET or
+ * %WMI_BRIGHTNESS_MODE_GET_MAX_LEVEL mode.
+ * @ret: Out parameter returning retrieved value when operating in
+ * %WMI_BRIGHTNESS_MODE_GET or %WMI_BRIGHTNESS_MODE_GET_MAX_LEVEL
+ * mode. Not used in %WMI_BRIGHTNESS_MODE_SET mode.
+ * @ignored: Padding; not used. The ACPI method expects a 24 byte params struct.
+ *
+ * This is the parameters structure for the WmiBrightnessNotify ACPI method as
+ * wrapped by WMI. The value passed in to @val or returned by @ret will be a
+ * brightness value when the WMI method ID is %WMI_BRIGHTNESS_METHOD_LEVEL, or
+ * an &enum wmi_brightness_source value with %WMI_BRIGHTNESS_METHOD_SOURCE.
+ */
+struct wmi_brightness_args {
+ u32 mode;
+ u32 val;
+ u32 ret;
+ u32 ignored[3];
+};
+
+/**
+ * wmi_brightness_notify() - helper function for calling WMI-wrapped ACPI method
+ * @w: Pointer to the struct wmi_device identified by %WMI_BRIGHTNESS_GUID
+ * @id: The WMI method ID to call (e.g. %WMI_BRIGHTNESS_METHOD_LEVEL or
+ * %WMI_BRIGHTNESS_METHOD_SOURCE)
+ * @mode: The operation to perform on the method (e.g. %WMI_BRIGHTNESS_MODE_SET
+ * or %WMI_BRIGHTNESS_MODE_GET)
+ * @val: Pointer to a value passed in by the caller when @mode is
+ * %WMI_BRIGHTNESS_MODE_SET, or a value passed out to caller when @mode
+ * is %WMI_BRIGHTNESS_MODE_GET or %WMI_BRIGHTNESS_MODE_GET_MAX_LEVEL.
+ *
+ * Returns 0 on success, or a negative error number on failure.
+ */
+static int wmi_brightness_notify(struct wmi_device *w, enum wmi_brightness_method id, enum wmi_brightness_mode mode, u32 *val)
+{
+ struct wmi_brightness_args args = {
+ .mode = mode,
+ .val = 0,
+ .ret = 0,
+ };
+ struct acpi_buffer buf = { (acpi_size)sizeof(args), &args };
+ acpi_status status;
+
+ if (id < WMI_BRIGHTNESS_METHOD_LEVEL ||
+ id >= WMI_BRIGHTNESS_METHOD_MAX ||
+ mode < WMI_BRIGHTNESS_MODE_GET || mode >= WMI_BRIGHTNESS_MODE_MAX)
+ return -EINVAL;
+
+ if (mode == WMI_BRIGHTNESS_MODE_SET)
+ args.val = *val;
+
+ status = wmidev_evaluate_method(w, 0, id, &buf, &buf);
+ if (ACPI_FAILURE(status)) {
+ dev_err(&w->dev, "EC backlight control failed: %s\n",
+ acpi_format_exception(status));
+ return -EIO;
+ }
+
+ if (mode != WMI_BRIGHTNESS_MODE_SET)
+ *val = args.ret;
+
+ return 0;
+}
+
+static int nvidia_wmi_ec_backlight_update_status(struct backlight_device *bd)
+{
+ struct wmi_device *wdev = bl_get_data(bd);
+
+ return wmi_brightness_notify(wdev, WMI_BRIGHTNESS_METHOD_LEVEL,
+ WMI_BRIGHTNESS_MODE_SET,
+ &bd->props.brightness);
+}
+
+static int nvidia_wmi_ec_backlight_get_brightness(struct backlight_device *bd)
+{
+ struct wmi_device *wdev = bl_get_data(bd);
+ u32 level;
+ int ret;
+
+ ret = wmi_brightness_notify(wdev, WMI_BRIGHTNESS_METHOD_LEVEL,
+ WMI_BRIGHTNESS_MODE_GET, &level);
+ if (ret < 0)
+ return ret;
+
+ return level;
+}
+
+static const struct backlight_ops nvidia_wmi_ec_backlight_ops = {
+ .update_status = nvidia_wmi_ec_backlight_update_status,
+ .get_brightness = nvidia_wmi_ec_backlight_get_brightness,
+};
+
+static int nvidia_wmi_ec_backlight_probe(struct wmi_device *wdev, const void *ctx)
+{
+ struct backlight_properties props = {};
+ struct backlight_device *bdev;
+ u32 source;
+ int ret;
+
+ ret = wmi_brightness_notify(wdev, WMI_BRIGHTNESS_METHOD_SOURCE,
+ WMI_BRIGHTNESS_MODE_GET, &source);
+ if (ret)
+ return ret;
+
+ /*
+ * This driver is only to be used when brightness control is handled
+ * by the EC; otherwise, the GPU driver(s) should control brightness.
+ */
+ if (source != WMI_BRIGHTNESS_SOURCE_EC)
+ return -ENODEV;
+
+ /*
+ * Identify this backlight device as a firmware device so that it can
+ * be prioritized over any exposed GPU-driven raw device(s).
+ */
+ props.type = BACKLIGHT_FIRMWARE;
+
+ ret = wmi_brightness_notify(wdev, WMI_BRIGHTNESS_METHOD_LEVEL,
+ WMI_BRIGHTNESS_MODE_GET_MAX_LEVEL,
+ &props.max_brightness);
+ if (ret)
+ return ret;
+
+ ret = wmi_brightness_notify(wdev, WMI_BRIGHTNESS_METHOD_LEVEL,
+ WMI_BRIGHTNESS_MODE_GET, &props.brightness);
+ if (ret)
+ return ret;
+
+ bdev = devm_backlight_device_register(&wdev->dev,
+ "nvidia_wmi_ec_backlight",
+ &wdev->dev, wdev,
+ &nvidia_wmi_ec_backlight_ops,
+ &props);
+ return PTR_ERR_OR_ZERO(bdev);
+}
+
+#define WMI_BRIGHTNESS_GUID "603E9613-EF25-4338-A3D0-C46177516DB7"
+
+static const struct wmi_device_id nvidia_wmi_ec_backlight_id_table[] = {
+ { .guid_string = WMI_BRIGHTNESS_GUID },
+ { }
+};
+MODULE_DEVICE_TABLE(wmi, nvidia_wmi_ec_backlight_id_table);
+
+static struct wmi_driver nvidia_wmi_ec_backlight_driver = {
+ .driver = {
+ .name = "nvidia-wmi-ec-backlight",
+ },
+ .probe = nvidia_wmi_ec_backlight_probe,
+ .id_table = nvidia_wmi_ec_backlight_id_table,
+};
+module_wmi_driver(nvidia_wmi_ec_backlight_driver);
+
+MODULE_AUTHOR("Daniel Dadap <ddadap@nvidia.com>");
+MODULE_DESCRIPTION("NVIDIA WMI EC Backlight driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
index d4f444401496..37850d07987d 100644
--- a/drivers/platform/x86/panasonic-laptop.c
+++ b/drivers/platform/x86/panasonic-laptop.c
@@ -470,7 +470,7 @@ static ssize_t numbatt_show(struct device *dev, struct device_attribute *attr,
if (!acpi_pcc_retrieve_biosdata(pcc))
return -EIO;
- return snprintf(buf, PAGE_SIZE, "%u\n", pcc->sinf[SINF_NUM_BATTERIES]);
+ return sysfs_emit(buf, "%u\n", pcc->sinf[SINF_NUM_BATTERIES]);
}
static ssize_t lcdtype_show(struct device *dev, struct device_attribute *attr,
@@ -482,7 +482,7 @@ static ssize_t lcdtype_show(struct device *dev, struct device_attribute *attr,
if (!acpi_pcc_retrieve_biosdata(pcc))
return -EIO;
- return snprintf(buf, PAGE_SIZE, "%u\n", pcc->sinf[SINF_LCD_TYPE]);
+ return sysfs_emit(buf, "%u\n", pcc->sinf[SINF_LCD_TYPE]);
}
static ssize_t mute_show(struct device *dev, struct device_attribute *attr,
@@ -494,7 +494,7 @@ static ssize_t mute_show(struct device *dev, struct device_attribute *attr,
if (!acpi_pcc_retrieve_biosdata(pcc))
return -EIO;
- return snprintf(buf, PAGE_SIZE, "%u\n", pcc->sinf[SINF_MUTE]);
+ return sysfs_emit(buf, "%u\n", pcc->sinf[SINF_MUTE]);
}
static ssize_t mute_store(struct device *dev, struct device_attribute *attr,
@@ -524,7 +524,7 @@ static ssize_t sticky_key_show(struct device *dev, struct device_attribute *attr
if (!acpi_pcc_retrieve_biosdata(pcc))
return -EIO;
- return snprintf(buf, PAGE_SIZE, "%u\n", pcc->sticky_key);
+ return sysfs_emit(buf, "%u\n", pcc->sticky_key);
}
static ssize_t sticky_key_store(struct device *dev, struct device_attribute *attr,
@@ -566,7 +566,7 @@ static ssize_t eco_mode_show(struct device *dev, struct device_attribute *attr,
result = -EIO;
break;
}
- return snprintf(buf, PAGE_SIZE, "%u\n", result);
+ return sysfs_emit(buf, "%u\n", result);
}
static ssize_t eco_mode_store(struct device *dev, struct device_attribute *attr,
@@ -625,7 +625,7 @@ static ssize_t ac_brightness_show(struct device *dev, struct device_attribute *a
if (!acpi_pcc_retrieve_biosdata(pcc))
return -EIO;
- return snprintf(buf, PAGE_SIZE, "%u\n", pcc->sinf[SINF_AC_CUR_BRIGHT]);
+ return sysfs_emit(buf, "%u\n", pcc->sinf[SINF_AC_CUR_BRIGHT]);
}
static ssize_t ac_brightness_store(struct device *dev, struct device_attribute *attr,
@@ -655,7 +655,7 @@ static ssize_t dc_brightness_show(struct device *dev, struct device_attribute *a
if (!acpi_pcc_retrieve_biosdata(pcc))
return -EIO;
- return snprintf(buf, PAGE_SIZE, "%u\n", pcc->sinf[SINF_DC_CUR_BRIGHT]);
+ return sysfs_emit(buf, "%u\n", pcc->sinf[SINF_DC_CUR_BRIGHT]);
}
static ssize_t dc_brightness_store(struct device *dev, struct device_attribute *attr,
@@ -685,7 +685,7 @@ static ssize_t current_brightness_show(struct device *dev, struct device_attribu
if (!acpi_pcc_retrieve_biosdata(pcc))
return -EIO;
- return snprintf(buf, PAGE_SIZE, "%u\n", pcc->sinf[SINF_CUR_BRIGHT]);
+ return sysfs_emit(buf, "%u\n", pcc->sinf[SINF_CUR_BRIGHT]);
}
static ssize_t current_brightness_store(struct device *dev, struct device_attribute *attr,
@@ -710,7 +710,7 @@ static ssize_t current_brightness_store(struct device *dev, struct device_attrib
static ssize_t cdpower_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%d\n", get_optd_power_state());
+ return sysfs_emit(buf, "%d\n", get_optd_power_state());
}
static ssize_t cdpower_store(struct device *dev, struct device_attribute *attr,
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index 704813374922..d8d0c0bed5e9 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -964,7 +964,7 @@ static ssize_t sony_nc_sysfs_show(struct device *dev, struct device_attribute *a
if (item->validate)
value = item->validate(SNC_VALIDATE_OUT, value);
- return snprintf(buffer, PAGE_SIZE, "%d\n", value);
+ return sysfs_emit(buffer, "%d\n", value);
}
static ssize_t sony_nc_sysfs_store(struct device *dev,
@@ -1811,9 +1811,7 @@ static ssize_t sony_nc_kbd_backlight_mode_store(struct device *dev,
static ssize_t sony_nc_kbd_backlight_mode_show(struct device *dev,
struct device_attribute *attr, char *buffer)
{
- ssize_t count = 0;
- count = snprintf(buffer, PAGE_SIZE, "%d\n", kbdbl_ctl->mode);
- return count;
+ return sysfs_emit(buffer, "%d\n", kbdbl_ctl->mode);
}
static int __sony_nc_kbd_backlight_timeout_set(u8 value)
@@ -1855,9 +1853,7 @@ static ssize_t sony_nc_kbd_backlight_timeout_store(struct device *dev,
static ssize_t sony_nc_kbd_backlight_timeout_show(struct device *dev,
struct device_attribute *attr, char *buffer)
{
- ssize_t count = 0;
- count = snprintf(buffer, PAGE_SIZE, "%d\n", kbdbl_ctl->timeout);
- return count;
+ return sysfs_emit(buffer, "%d\n", kbdbl_ctl->timeout);
}
static int sony_nc_kbd_backlight_setup(struct platform_device *pd,
@@ -2051,21 +2047,18 @@ static ssize_t sony_nc_battery_care_limit_show(struct device *dev,
break;
}
- return snprintf(buffer, PAGE_SIZE, "%d\n", status);
+ return sysfs_emit(buffer, "%d\n", status);
}
static ssize_t sony_nc_battery_care_health_show(struct device *dev,
struct device_attribute *attr, char *buffer)
{
- ssize_t count = 0;
unsigned int health;
if (sony_call_snc_handle(bcare_ctl->handle, 0x0200, &health))
return -EIO;
- count = snprintf(buffer, PAGE_SIZE, "%d\n", health & 0xff);
-
- return count;
+ return sysfs_emit(buffer, "%d\n", health & 0xff);
}
static int sony_nc_battery_care_setup(struct platform_device *pd,
@@ -2215,15 +2208,12 @@ static ssize_t sony_nc_thermal_mode_store(struct device *dev,
static ssize_t sony_nc_thermal_mode_show(struct device *dev,
struct device_attribute *attr, char *buffer)
{
- ssize_t count = 0;
int mode = sony_nc_thermal_mode_get();
if (mode < 0)
return mode;
- count = snprintf(buffer, PAGE_SIZE, "%s\n", snc_thermal_profiles[mode]);
-
- return count;
+ return sysfs_emit(buffer, "%s\n", snc_thermal_profiles[mode]);
}
static int sony_nc_thermal_setup(struct platform_device *pd)
@@ -2361,7 +2351,7 @@ static ssize_t sony_nc_lid_resume_show(struct device *dev,
while (pos < LID_RESUME_MAX) {
if (&lid_ctl->attrs[pos].attr == &attr->attr)
- return snprintf(buffer, PAGE_SIZE, "%d\n",
+ return sysfs_emit(buffer, "%d\n",
(lid_ctl->status >> pos) & 0x01);
pos++;
}
@@ -2493,7 +2483,7 @@ static ssize_t sony_nc_gfx_switch_status_show(struct device *dev,
if (pos < 0)
return pos;
- return snprintf(buffer, PAGE_SIZE, "%s\n",
+ return sysfs_emit(buffer, "%s\n",
pos == SPEED ? "speed" :
pos == STAMINA ? "stamina" :
pos == AUTO ? "auto" : "unknown");
@@ -2568,7 +2558,7 @@ static ssize_t sony_nc_highspeed_charging_show(struct device *dev,
if (sony_call_snc_handle(0x0131, 0x0100, &result))
return -EIO;
- return snprintf(buffer, PAGE_SIZE, "%d\n", result & 0x01);
+ return sysfs_emit(buffer, "%d\n", result & 0x01);
}
static int sony_nc_highspeed_charging_setup(struct platform_device *pd)
@@ -2642,7 +2632,7 @@ static ssize_t sony_nc_lowbatt_show(struct device *dev,
if (sony_call_snc_handle(0x0121, 0x0200, &result))
return -EIO;
- return snprintf(buffer, PAGE_SIZE, "%d\n", result & 1);
+ return sysfs_emit(buffer, "%d\n", result & 1);
}
static int sony_nc_lowbatt_setup(struct platform_device *pd)
@@ -2708,7 +2698,7 @@ static ssize_t sony_nc_hsfan_show(struct device *dev,
if (sony_call_snc_handle(0x0149, 0x0100, &result))
return -EIO;
- return snprintf(buffer, PAGE_SIZE, "%d\n", result & 0x01);
+ return sysfs_emit(buffer, "%d\n", result & 0x01);
}
static ssize_t sony_nc_fanspeed_show(struct device *dev,
@@ -2719,7 +2709,7 @@ static ssize_t sony_nc_fanspeed_show(struct device *dev,
if (sony_call_snc_handle(0x0149, 0x0300, &result))
return -EIO;
- return snprintf(buffer, PAGE_SIZE, "%d\n", result & 0xff);
+ return sysfs_emit(buffer, "%d\n", result & 0xff);
}
static int sony_nc_fanspeed_setup(struct platform_device *pd)
@@ -2815,7 +2805,7 @@ static ssize_t sony_nc_usb_charge_show(struct device *dev,
if (sony_call_snc_handle(0x0155, 0x0000, &result))
return -EIO;
- return snprintf(buffer, PAGE_SIZE, "%d\n", result & 0x01);
+ return sysfs_emit(buffer, "%d\n", result & 0x01);
}
static int sony_nc_usb_charge_setup(struct platform_device *pd)
@@ -2870,7 +2860,7 @@ static ssize_t sony_nc_panelid_show(struct device *dev,
if (sony_call_snc_handle(0x011D, 0x0000, &result))
return -EIO;
- return snprintf(buffer, PAGE_SIZE, "%d\n", result);
+ return sysfs_emit(buffer, "%d\n", result);
}
static int sony_nc_panelid_setup(struct platform_device *pd)
@@ -2998,7 +2988,7 @@ static ssize_t sony_nc_touchpad_show(struct device *dev,
if (sony_call_snc_handle(tp_ctl->handle, 0x000, &result))
return -EINVAL;
- return snprintf(buffer, PAGE_SIZE, "%d\n", !(result & 0x01));
+ return sysfs_emit(buffer, "%d\n", !(result & 0x01));
}
static int sony_nc_touchpad_setup(struct platform_device *pd,
@@ -3915,7 +3905,7 @@ static ssize_t sony_pic_wwanpower_show(struct device *dev,
{
ssize_t count;
mutex_lock(&spic_dev.lock);
- count = snprintf(buffer, PAGE_SIZE, "%d\n", spic_dev.wwan_power);
+ count = sysfs_emit(buffer, "%d\n", spic_dev.wwan_power);
mutex_unlock(&spic_dev.lock);
return count;
}
@@ -3954,7 +3944,7 @@ static ssize_t sony_pic_bluetoothpower_show(struct device *dev,
{
ssize_t count = 0;
mutex_lock(&spic_dev.lock);
- count = snprintf(buffer, PAGE_SIZE, "%d\n", spic_dev.bluetooth_power);
+ count = sysfs_emit(buffer, "%d\n", spic_dev.bluetooth_power);
mutex_unlock(&spic_dev.lock);
return count;
}
@@ -3996,7 +3986,7 @@ static ssize_t sony_pic_fanspeed_show(struct device *dev,
if (sony_pic_get_fanspeed(&value))
return -EIO;
- return snprintf(buffer, PAGE_SIZE, "%d\n", value);
+ return sysfs_emit(buffer, "%d\n", value);
}
#define SPIC_ATTR(_name, _mode) \
diff --git a/drivers/platform/x86/system76_acpi.c b/drivers/platform/x86/system76_acpi.c
index c14fd22ba196..8b292ee95a14 100644
--- a/drivers/platform/x86/system76_acpi.c
+++ b/drivers/platform/x86/system76_acpi.c
@@ -10,13 +10,20 @@
*/
#include <linux/acpi.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
#include <linux/init.h>
+#include <linux/input.h>
#include <linux/kernel.h>
#include <linux/leds.h>
#include <linux/module.h>
#include <linux/pci_ids.h>
+#include <linux/power_supply.h>
+#include <linux/sysfs.h>
#include <linux/types.h>
+#include <acpi/battery.h>
+
struct system76_data {
struct acpi_device *acpi_dev;
struct led_classdev ap_led;
@@ -24,6 +31,10 @@ struct system76_data {
enum led_brightness kb_brightness;
enum led_brightness kb_toggle_brightness;
int kb_color;
+ struct device *therm;
+ union acpi_object *nfan;
+ union acpi_object *ntmp;
+ struct input_dev *input;
};
static const struct acpi_device_id device_ids[] = {
@@ -63,9 +74,57 @@ static int system76_get(struct system76_data *data, char *method)
handle = acpi_device_handle(data->acpi_dev);
status = acpi_evaluate_integer(handle, method, NULL, &ret);
if (ACPI_SUCCESS(status))
- return (int)ret;
- else
- return -1;
+ return ret;
+ return -ENODEV;
+}
+
+// Get a System76 ACPI device value by name with index
+static int system76_get_index(struct system76_data *data, char *method, int index)
+{
+ union acpi_object obj;
+ struct acpi_object_list obj_list;
+ acpi_handle handle;
+ acpi_status status;
+ unsigned long long ret = 0;
+
+ obj.type = ACPI_TYPE_INTEGER;
+ obj.integer.value = index;
+ obj_list.count = 1;
+ obj_list.pointer = &obj;
+
+ handle = acpi_device_handle(data->acpi_dev);
+ status = acpi_evaluate_integer(handle, method, &obj_list, &ret);
+ if (ACPI_SUCCESS(status))
+ return ret;
+ return -ENODEV;
+}
+
+// Get a System76 ACPI device object by name
+static int system76_get_object(struct system76_data *data, char *method, union acpi_object **obj)
+{
+ acpi_handle handle;
+ acpi_status status;
+ struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
+
+ handle = acpi_device_handle(data->acpi_dev);
+ status = acpi_evaluate_object(handle, method, NULL, &buf);
+ if (ACPI_SUCCESS(status)) {
+ *obj = buf.pointer;
+ return 0;
+ }
+
+ return -ENODEV;
+}
+
+// Get a name from a System76 ACPI device object
+static char *system76_name(union acpi_object *obj, int index)
+{
+ if (obj && obj->type == ACPI_TYPE_PACKAGE && index <= obj->package.count) {
+ if (obj->package.elements[index].type == ACPI_TYPE_STRING)
+ return obj->package.elements[index].string.pointer;
+ }
+
+ return NULL;
}
// Set a System76 ACPI device value by name
@@ -88,6 +147,154 @@ static int system76_set(struct system76_data *data, char *method, int value)
return -1;
}
+#define BATTERY_THRESHOLD_INVALID 0xFF
+
+enum {
+ THRESHOLD_START,
+ THRESHOLD_END,
+};
+
+static ssize_t battery_get_threshold(int which, char *buf)
+{
+ struct acpi_object_list input;
+ union acpi_object param;
+ acpi_handle handle;
+ acpi_status status;
+ unsigned long long ret = BATTERY_THRESHOLD_INVALID;
+
+ handle = ec_get_handle();
+ if (!handle)
+ return -ENODEV;
+
+ input.count = 1;
+ input.pointer = &param;
+ // Start/stop selection
+ param.type = ACPI_TYPE_INTEGER;
+ param.integer.value = which;
+
+ status = acpi_evaluate_integer(handle, "GBCT", &input, &ret);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+ if (ret == BATTERY_THRESHOLD_INVALID)
+ return -EINVAL;
+
+ return sysfs_emit(buf, "%d\n", (int)ret);
+}
+
+static ssize_t battery_set_threshold(int which, const char *buf, size_t count)
+{
+ struct acpi_object_list input;
+ union acpi_object params[2];
+ acpi_handle handle;
+ acpi_status status;
+ unsigned int value;
+ int ret;
+
+ handle = ec_get_handle();
+ if (!handle)
+ return -ENODEV;
+
+ ret = kstrtouint(buf, 10, &value);
+ if (ret)
+ return ret;
+
+ if (value > 100)
+ return -EINVAL;
+
+ input.count = 2;
+ input.pointer = params;
+ // Start/stop selection
+ params[0].type = ACPI_TYPE_INTEGER;
+ params[0].integer.value = which;
+ // Threshold value
+ params[1].type = ACPI_TYPE_INTEGER;
+ params[1].integer.value = value;
+
+ status = acpi_evaluate_object(handle, "SBCT", &input, NULL);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ return count;
+}
+
+static ssize_t charge_control_start_threshold_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return battery_get_threshold(THRESHOLD_START, buf);
+}
+
+static ssize_t charge_control_start_threshold_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ return battery_set_threshold(THRESHOLD_START, buf, count);
+}
+
+static DEVICE_ATTR_RW(charge_control_start_threshold);
+
+static ssize_t charge_control_end_threshold_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return battery_get_threshold(THRESHOLD_END, buf);
+}
+
+static ssize_t charge_control_end_threshold_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ return battery_set_threshold(THRESHOLD_END, buf, count);
+}
+
+static DEVICE_ATTR_RW(charge_control_end_threshold);
+
+static struct attribute *system76_battery_attrs[] = {
+ &dev_attr_charge_control_start_threshold.attr,
+ &dev_attr_charge_control_end_threshold.attr,
+ NULL,
+};
+
+ATTRIBUTE_GROUPS(system76_battery);
+
+static int system76_battery_add(struct power_supply *battery)
+{
+ // System76 EC only supports 1 battery
+ if (strcmp(battery->desc->name, "BAT0") != 0)
+ return -ENODEV;
+
+ if (device_add_groups(&battery->dev, system76_battery_groups))
+ return -ENODEV;
+
+ return 0;
+}
+
+static int system76_battery_remove(struct power_supply *battery)
+{
+ device_remove_groups(&battery->dev, system76_battery_groups);
+ return 0;
+}
+
+static struct acpi_battery_hook system76_battery_hook = {
+ .add_battery = system76_battery_add,
+ .remove_battery = system76_battery_remove,
+ .name = "System76 Battery Extension",
+};
+
+static void system76_battery_init(void)
+{
+ acpi_handle handle;
+
+ handle = ec_get_handle();
+ if (handle && acpi_has_method(handle, "GBCT"))
+ battery_hook_register(&system76_battery_hook);
+}
+
+static void system76_battery_exit(void)
+{
+ acpi_handle handle;
+
+ handle = ec_get_handle();
+ if (handle && acpi_has_method(handle, "GBCT"))
+ battery_hook_unregister(&system76_battery_hook);
+}
+
// Get the airplane mode LED brightness
static enum led_brightness ap_led_get(struct led_classdev *led)
{
@@ -141,7 +348,7 @@ static ssize_t kb_led_color_show(
led = (struct led_classdev *)dev->driver_data;
data = container_of(led, struct system76_data, kb_led);
- return sprintf(buf, "%06X\n", data->kb_color);
+ return sysfs_emit(buf, "%06X\n", data->kb_color);
}
// Set the keyboard LED color
@@ -169,7 +376,7 @@ static ssize_t kb_led_color_store(
return size;
}
-static const struct device_attribute kb_led_color_dev_attr = {
+static struct device_attribute dev_attr_kb_led_color = {
.attr = {
.name = "color",
.mode = 0644,
@@ -178,6 +385,13 @@ static const struct device_attribute kb_led_color_dev_attr = {
.store = kb_led_color_store,
};
+static struct attribute *system76_kb_led_color_attrs[] = {
+ &dev_attr_kb_led_color.attr,
+ NULL,
+};
+
+ATTRIBUTE_GROUPS(system76_kb_led_color);
+
// Notify that the keyboard LED was changed by hardware
static void kb_led_notify(struct system76_data *data)
{
@@ -270,6 +484,155 @@ static void kb_led_hotkey_color(struct system76_data *data)
kb_led_notify(data);
}
+static umode_t thermal_is_visible(const void *drvdata, enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ const struct system76_data *data = drvdata;
+
+ switch (type) {
+ case hwmon_fan:
+ case hwmon_pwm:
+ if (system76_name(data->nfan, channel))
+ return 0444;
+ break;
+
+ case hwmon_temp:
+ if (system76_name(data->ntmp, channel))
+ return 0444;
+ break;
+
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
+static int thermal_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
+ int channel, long *val)
+{
+ struct system76_data *data = dev_get_drvdata(dev);
+ int raw;
+
+ switch (type) {
+ case hwmon_fan:
+ if (attr == hwmon_fan_input) {
+ raw = system76_get_index(data, "GFAN", channel);
+ if (raw < 0)
+ return raw;
+ *val = (raw >> 8) & 0xFFFF;
+ return 0;
+ }
+ break;
+
+ case hwmon_pwm:
+ if (attr == hwmon_pwm_input) {
+ raw = system76_get_index(data, "GFAN", channel);
+ if (raw < 0)
+ return raw;
+ *val = raw & 0xFF;
+ return 0;
+ }
+ break;
+
+ case hwmon_temp:
+ if (attr == hwmon_temp_input) {
+ raw = system76_get_index(data, "GTMP", channel);
+ if (raw < 0)
+ return raw;
+ *val = raw * 1000;
+ return 0;
+ }
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static int thermal_read_string(struct device *dev, enum hwmon_sensor_types type, u32 attr,
+ int channel, const char **str)
+{
+ struct system76_data *data = dev_get_drvdata(dev);
+
+ switch (type) {
+ case hwmon_fan:
+ if (attr == hwmon_fan_label) {
+ *str = system76_name(data->nfan, channel);
+ if (*str)
+ return 0;
+ }
+ break;
+
+ case hwmon_temp:
+ if (attr == hwmon_temp_label) {
+ *str = system76_name(data->ntmp, channel);
+ if (*str)
+ return 0;
+ }
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static const struct hwmon_ops thermal_ops = {
+ .is_visible = thermal_is_visible,
+ .read = thermal_read,
+ .read_string = thermal_read_string,
+};
+
+// Allocate up to 8 fans and temperatures
+static const struct hwmon_channel_info *thermal_channel_info[] = {
+ HWMON_CHANNEL_INFO(fan,
+ HWMON_F_INPUT | HWMON_F_LABEL,
+ HWMON_F_INPUT | HWMON_F_LABEL,
+ HWMON_F_INPUT | HWMON_F_LABEL,
+ HWMON_F_INPUT | HWMON_F_LABEL,
+ HWMON_F_INPUT | HWMON_F_LABEL,
+ HWMON_F_INPUT | HWMON_F_LABEL,
+ HWMON_F_INPUT | HWMON_F_LABEL,
+ HWMON_F_INPUT | HWMON_F_LABEL),
+ HWMON_CHANNEL_INFO(pwm,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL),
+ NULL
+};
+
+static const struct hwmon_chip_info thermal_chip_info = {
+ .ops = &thermal_ops,
+ .info = thermal_channel_info,
+};
+
+static void input_key(struct system76_data *data, unsigned int code)
+{
+ input_report_key(data->input, code, 1);
+ input_sync(data->input);
+
+ input_report_key(data->input, code, 0);
+ input_sync(data->input);
+}
+
// Handle ACPI notification
static void system76_notify(struct acpi_device *acpi_dev, u32 event)
{
@@ -292,6 +655,9 @@ static void system76_notify(struct acpi_device *acpi_dev, u32 event)
case 0x84:
kb_led_hotkey_color(data);
break;
+ case 0x85:
+ input_key(data, KEY_SCREENLOCK);
+ break;
}
}
@@ -326,6 +692,7 @@ static int system76_add(struct acpi_device *acpi_dev)
data->kb_led.brightness_set_blocking = kb_led_set;
if (acpi_has_method(acpi_device_handle(data->acpi_dev), "SKBC")) {
data->kb_led.max_brightness = 255;
+ data->kb_led.groups = system76_kb_led_color_groups;
data->kb_toggle_brightness = 72;
data->kb_color = 0xffffff;
system76_set(data, "SKBC", data->kb_color);
@@ -337,16 +704,42 @@ static int system76_add(struct acpi_device *acpi_dev)
if (err)
return err;
- if (data->kb_color >= 0) {
- err = device_create_file(
- data->kb_led.dev,
- &kb_led_color_dev_attr
- );
- if (err)
- return err;
- }
+ data->input = devm_input_allocate_device(&acpi_dev->dev);
+ if (!data->input)
+ return -ENOMEM;
+
+ data->input->name = "System76 ACPI Hotkeys";
+ data->input->phys = "system76_acpi/input0";
+ data->input->id.bustype = BUS_HOST;
+ data->input->dev.parent = &acpi_dev->dev;
+ input_set_capability(data->input, EV_KEY, KEY_SCREENLOCK);
+
+ err = input_register_device(data->input);
+ if (err)
+ goto error;
+
+ err = system76_get_object(data, "NFAN", &data->nfan);
+ if (err)
+ goto error;
+
+ err = system76_get_object(data, "NTMP", &data->ntmp);
+ if (err)
+ goto error;
+
+ data->therm = devm_hwmon_device_register_with_info(&acpi_dev->dev,
+ "system76_acpi", data, &thermal_chip_info, NULL);
+ err = PTR_ERR_OR_ZERO(data->therm);
+ if (err)
+ goto error;
+
+ system76_battery_init();
return 0;
+
+error:
+ kfree(data->ntmp);
+ kfree(data->nfan);
+ return err;
}
// Remove a System76 ACPI device
@@ -355,13 +748,15 @@ static int system76_remove(struct acpi_device *acpi_dev)
struct system76_data *data;
data = acpi_driver_data(acpi_dev);
- if (data->kb_color >= 0)
- device_remove_file(data->kb_led.dev, &kb_led_color_dev_attr);
- devm_led_classdev_unregister(&acpi_dev->dev, &data->ap_led);
+ system76_battery_exit();
+ devm_led_classdev_unregister(&acpi_dev->dev, &data->ap_led);
devm_led_classdev_unregister(&acpi_dev->dev, &data->kb_led);
+ kfree(data->nfan);
+ kfree(data->ntmp);
+
system76_get(data, "FINI");
return 0;
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 50ff04c84650..9c632df734bb 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -1001,79 +1001,6 @@ static struct platform_driver tpacpi_hwmon_pdriver = {
* sysfs support helpers
*/
-struct attribute_set {
- unsigned int members, max_members;
- struct attribute_group group;
-};
-
-struct attribute_set_obj {
- struct attribute_set s;
- struct attribute *a;
-} __attribute__((packed));
-
-static struct attribute_set *create_attr_set(unsigned int max_members,
- const char *name)
-{
- struct attribute_set_obj *sobj;
-
- if (max_members == 0)
- return NULL;
-
- /* Allocates space for implicit NULL at the end too */
- sobj = kzalloc(sizeof(struct attribute_set_obj) +
- max_members * sizeof(struct attribute *),
- GFP_KERNEL);
- if (!sobj)
- return NULL;
- sobj->s.max_members = max_members;
- sobj->s.group.attrs = &sobj->a;
- sobj->s.group.name = name;
-
- return &sobj->s;
-}
-
-#define destroy_attr_set(_set) \
- kfree(_set)
-
-/* not multi-threaded safe, use it in a single thread per set */
-static int add_to_attr_set(struct attribute_set *s, struct attribute *attr)
-{
- if (!s || !attr)
- return -EINVAL;
-
- if (s->members >= s->max_members)
- return -ENOMEM;
-
- s->group.attrs[s->members] = attr;
- s->members++;
-
- return 0;
-}
-
-static int add_many_to_attr_set(struct attribute_set *s,
- struct attribute **attr,
- unsigned int count)
-{
- int i, res;
-
- for (i = 0; i < count; i++) {
- res = add_to_attr_set(s, attr[i]);
- if (res)
- return res;
- }
-
- return 0;
-}
-
-static void delete_attr_set(struct attribute_set *s, struct kobject *kobj)
-{
- sysfs_remove_group(kobj, &s->group);
- destroy_attr_set(s);
-}
-
-#define register_attr_set_with_sysfs(_attr_set, _kobj) \
- sysfs_create_group(_kobj, &_attr_set->group)
-
static int parse_strtoul(const char *buf,
unsigned long max, unsigned long *value)
{
@@ -1348,7 +1275,7 @@ static ssize_t tpacpi_rfk_sysfs_enable_show(const enum tpacpi_rfk_id id,
return status;
}
- return snprintf(buf, PAGE_SIZE, "%d\n",
+ return sysfs_emit(buf, "%d\n",
(status == TPACPI_RFK_RADIO_ON) ? 1 : 0);
}
@@ -1441,14 +1368,14 @@ static int tpacpi_rfk_procfs_write(const enum tpacpi_rfk_id id, char *buf)
/* interface_version --------------------------------------------------- */
static ssize_t interface_version_show(struct device_driver *drv, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "0x%08x\n", TPACPI_SYSFS_VERSION);
+ return sysfs_emit(buf, "0x%08x\n", TPACPI_SYSFS_VERSION);
}
static DRIVER_ATTR_RO(interface_version);
/* debug_level --------------------------------------------------------- */
static ssize_t debug_level_show(struct device_driver *drv, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "0x%04x\n", dbg_level);
+ return sysfs_emit(buf, "0x%04x\n", dbg_level);
}
static ssize_t debug_level_store(struct device_driver *drv, const char *buf,
@@ -1468,7 +1395,7 @@ static DRIVER_ATTR_RW(debug_level);
/* version ------------------------------------------------------------- */
static ssize_t version_show(struct device_driver *drv, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%s v%s\n",
+ return sysfs_emit(buf, "%s v%s\n",
TPACPI_DESC, TPACPI_VERSION);
}
static DRIVER_ATTR_RO(version);
@@ -1480,7 +1407,7 @@ static DRIVER_ATTR_RO(version);
/* wlsw_emulstate ------------------------------------------------------ */
static ssize_t wlsw_emulstate_show(struct device_driver *drv, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%d\n", !!tpacpi_wlsw_emulstate);
+ return sysfs_emit(buf, "%d\n", !!tpacpi_wlsw_emulstate);
}
static ssize_t wlsw_emulstate_store(struct device_driver *drv, const char *buf,
@@ -1503,7 +1430,7 @@ static DRIVER_ATTR_RW(wlsw_emulstate);
/* bluetooth_emulstate ------------------------------------------------- */
static ssize_t bluetooth_emulstate_show(struct device_driver *drv, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%d\n", !!tpacpi_bluetooth_emulstate);
+ return sysfs_emit(buf, "%d\n", !!tpacpi_bluetooth_emulstate);
}
static ssize_t bluetooth_emulstate_store(struct device_driver *drv,
@@ -1523,7 +1450,7 @@ static DRIVER_ATTR_RW(bluetooth_emulstate);
/* wwan_emulstate ------------------------------------------------- */
static ssize_t wwan_emulstate_show(struct device_driver *drv, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%d\n", !!tpacpi_wwan_emulstate);
+ return sysfs_emit(buf, "%d\n", !!tpacpi_wwan_emulstate);
}
static ssize_t wwan_emulstate_store(struct device_driver *drv, const char *buf,
@@ -1543,7 +1470,7 @@ static DRIVER_ATTR_RW(wwan_emulstate);
/* uwb_emulstate ------------------------------------------------- */
static ssize_t uwb_emulstate_show(struct device_driver *drv, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%d\n", !!tpacpi_uwb_emulstate);
+ return sysfs_emit(buf, "%d\n", !!tpacpi_uwb_emulstate);
}
static ssize_t uwb_emulstate_store(struct device_driver *drv, const char *buf,
@@ -2042,8 +1969,6 @@ static u32 hotkey_acpi_mask; /* events enabled in firmware */
static u16 *hotkey_keycode_map;
-static struct attribute_set *hotkey_dev_attributes;
-
static void tpacpi_driver_event(const unsigned int hkey_event);
static void hotkey_driver_event(const unsigned int scancode);
static void hotkey_poll_setup(const bool may_warn);
@@ -2753,7 +2678,7 @@ static ssize_t hotkey_enable_show(struct device *dev,
if (res)
return res;
- return snprintf(buf, PAGE_SIZE, "%d\n", status);
+ return sysfs_emit(buf, "%d\n", status);
}
static ssize_t hotkey_enable_store(struct device *dev,
@@ -2781,7 +2706,7 @@ static ssize_t hotkey_mask_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- return snprintf(buf, PAGE_SIZE, "0x%08x\n", hotkey_user_mask);
+ return sysfs_emit(buf, "0x%08x\n", hotkey_user_mask);
}
static ssize_t hotkey_mask_store(struct device *dev,
@@ -2829,7 +2754,7 @@ static ssize_t hotkey_bios_mask_show(struct device *dev,
{
printk_deprecated_attribute("hotkey_bios_mask",
"This attribute is useless.");
- return snprintf(buf, PAGE_SIZE, "0x%08x\n", hotkey_orig_mask);
+ return sysfs_emit(buf, "0x%08x\n", hotkey_orig_mask);
}
static DEVICE_ATTR_RO(hotkey_bios_mask);
@@ -2839,7 +2764,7 @@ static ssize_t hotkey_all_mask_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- return snprintf(buf, PAGE_SIZE, "0x%08x\n",
+ return sysfs_emit(buf, "0x%08x\n",
hotkey_all_mask | hotkey_source_mask);
}
@@ -2850,7 +2775,7 @@ static ssize_t hotkey_adaptive_all_mask_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- return snprintf(buf, PAGE_SIZE, "0x%08x\n",
+ return sysfs_emit(buf, "0x%08x\n",
hotkey_adaptive_all_mask | hotkey_source_mask);
}
@@ -2861,7 +2786,7 @@ static ssize_t hotkey_recommended_mask_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- return snprintf(buf, PAGE_SIZE, "0x%08x\n",
+ return sysfs_emit(buf, "0x%08x\n",
(hotkey_all_mask | hotkey_source_mask)
& ~hotkey_reserved_mask);
}
@@ -2875,7 +2800,7 @@ static ssize_t hotkey_source_mask_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- return snprintf(buf, PAGE_SIZE, "0x%08x\n", hotkey_source_mask);
+ return sysfs_emit(buf, "0x%08x\n", hotkey_source_mask);
}
static ssize_t hotkey_source_mask_store(struct device *dev,
@@ -2926,7 +2851,7 @@ static ssize_t hotkey_poll_freq_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%d\n", hotkey_poll_freq);
+ return sysfs_emit(buf, "%d\n", hotkey_poll_freq);
}
static ssize_t hotkey_poll_freq_store(struct device *dev,
@@ -2968,7 +2893,7 @@ static ssize_t hotkey_radio_sw_show(struct device *dev,
/* Opportunistic update */
tpacpi_rfk_update_hwblock_state((res == TPACPI_RFK_RADIO_OFF));
- return snprintf(buf, PAGE_SIZE, "%d\n",
+ return sysfs_emit(buf, "%d\n",
(res == TPACPI_RFK_RADIO_OFF) ? 0 : 1);
}
@@ -2991,7 +2916,7 @@ static ssize_t hotkey_tablet_mode_show(struct device *dev,
if (res < 0)
return res;
- return snprintf(buf, PAGE_SIZE, "%d\n", !!s);
+ return sysfs_emit(buf, "%d\n", !!s);
}
static DEVICE_ATTR_RO(hotkey_tablet_mode);
@@ -3008,7 +2933,7 @@ static ssize_t hotkey_wakeup_reason_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%d\n", hotkey_wakeup_reason);
+ return sysfs_emit(buf, "%d\n", hotkey_wakeup_reason);
}
static DEVICE_ATTR(wakeup_reason, S_IRUGO, hotkey_wakeup_reason_show, NULL);
@@ -3024,7 +2949,7 @@ static ssize_t hotkey_wakeup_hotunplug_complete_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%d\n", hotkey_autosleep_ack);
+ return sysfs_emit(buf, "%d\n", hotkey_autosleep_ack);
}
static DEVICE_ATTR(wakeup_hotunplug_complete, S_IRUGO,
@@ -3059,7 +2984,7 @@ static ssize_t adaptive_kbd_mode_show(struct device *dev,
if (current_mode < 0)
return current_mode;
- return snprintf(buf, PAGE_SIZE, "%d\n", current_mode);
+ return sysfs_emit(buf, "%d\n", current_mode);
}
static ssize_t adaptive_kbd_mode_store(struct device *dev,
@@ -3089,7 +3014,7 @@ static const struct attribute_group adaptive_kbd_attr_group = {
/* --------------------------------------------------------------------- */
-static struct attribute *hotkey_attributes[] __initdata = {
+static struct attribute *hotkey_attributes[] = {
&dev_attr_hotkey_enable.attr,
&dev_attr_hotkey_bios_enabled.attr,
&dev_attr_hotkey_bios_mask.attr,
@@ -3103,6 +3028,26 @@ static struct attribute *hotkey_attributes[] __initdata = {
&dev_attr_hotkey_source_mask.attr,
&dev_attr_hotkey_poll_freq.attr,
#endif
+ NULL
+};
+
+static umode_t hotkey_attr_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ if (attr == &dev_attr_hotkey_tablet_mode.attr) {
+ if (!tp_features.hotkey_tablet)
+ return 0;
+ } else if (attr == &dev_attr_hotkey_radio_sw.attr) {
+ if (!tp_features.hotkey_wlsw)
+ return 0;
+ }
+
+ return attr->mode;
+}
+
+static const struct attribute_group hotkey_attr_group = {
+ .is_visible = hotkey_attr_is_visible,
+ .attrs = hotkey_attributes,
};
/*
@@ -3161,9 +3106,7 @@ static void hotkey_exit(void)
hotkey_poll_stop_sync();
mutex_unlock(&hotkey_mutex);
#endif
-
- if (hotkey_dev_attributes)
- delete_attr_set(hotkey_dev_attributes, &tpacpi_pdev->dev.kobj);
+ sysfs_remove_group(&tpacpi_pdev->dev.kobj, &hotkey_attr_group);
dbg_printk(TPACPI_DBG_EXIT | TPACPI_DBG_HKEY,
"restoring original HKEY status and mask\n");
@@ -3249,11 +3192,6 @@ static int hotkey_init_tablet_mode(void)
pr_info("Tablet mode switch found (type: %s), currently in %s mode\n",
type, in_tablet_mode ? "tablet" : "laptop");
- res = add_to_attr_set(hotkey_dev_attributes,
- &dev_attr_hotkey_tablet_mode.attr);
- if (res)
- return -1;
-
return in_tablet_mode;
}
@@ -3515,19 +3453,6 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
tpacpi_disable_brightness_delay();
- /* MUST have enough space for all attributes to be added to
- * hotkey_dev_attributes */
- hotkey_dev_attributes = create_attr_set(
- ARRAY_SIZE(hotkey_attributes) + 2,
- NULL);
- if (!hotkey_dev_attributes)
- return -ENOMEM;
- res = add_many_to_attr_set(hotkey_dev_attributes,
- hotkey_attributes,
- ARRAY_SIZE(hotkey_attributes));
- if (res)
- goto err_exit;
-
/* mask not supported on 600e/x, 770e, 770x, A21e, A2xm/p,
A30, R30, R31, T20-22, X20-21, X22-24. Detected by checking
for HKEY interface version 0x100 */
@@ -3636,18 +3561,9 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
pr_info("radio switch found; radios are %s\n",
enabled(status, 0));
}
- if (tp_features.hotkey_wlsw)
- res = add_to_attr_set(hotkey_dev_attributes,
- &dev_attr_hotkey_radio_sw.attr);
-
- res = hotkey_init_tablet_mode();
- if (res < 0)
- goto err_exit;
- tabletsw_state = res;
-
- res = register_attr_set_with_sysfs(hotkey_dev_attributes,
- &tpacpi_pdev->dev.kobj);
+ tabletsw_state = hotkey_init_tablet_mode();
+ res = sysfs_create_group(&tpacpi_pdev->dev.kobj, &hotkey_attr_group);
if (res)
goto err_exit;
@@ -3746,11 +3662,8 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
return 0;
err_exit:
- delete_attr_set(hotkey_dev_attributes, &tpacpi_pdev->dev.kobj);
- sysfs_remove_group(&tpacpi_pdev->dev.kobj,
- &adaptive_kbd_attr_group);
-
- hotkey_dev_attributes = NULL;
+ sysfs_remove_group(&tpacpi_pdev->dev.kobj, &hotkey_attr_group);
+ sysfs_remove_group(&tpacpi_pdev->dev.kobj, &adaptive_kbd_attr_group);
return (res < 0) ? res : 1;
}
@@ -6421,7 +6334,7 @@ static ssize_t thermal_temp_input_show(struct device *dev,
if (value == TPACPI_THERMAL_SENSOR_NA)
return -ENXIO;
- return snprintf(buf, PAGE_SIZE, "%d\n", value);
+ return sysfs_emit(buf, "%d\n", value);
}
#define THERMAL_SENSOR_ATTR_TEMP(_idxA, _idxB) \
@@ -8654,7 +8567,7 @@ static ssize_t fan_pwm1_enable_show(struct device *dev,
} else
mode = 1;
- return snprintf(buf, PAGE_SIZE, "%d\n", mode);
+ return sysfs_emit(buf, "%d\n", mode);
}
static ssize_t fan_pwm1_enable_store(struct device *dev,
@@ -8720,7 +8633,7 @@ static ssize_t fan_pwm1_show(struct device *dev,
if (status > 7)
status = 7;
- return snprintf(buf, PAGE_SIZE, "%u\n", (status * 255) / 7);
+ return sysfs_emit(buf, "%u\n", (status * 255) / 7);
}
static ssize_t fan_pwm1_store(struct device *dev,
@@ -8773,7 +8686,7 @@ static ssize_t fan_fan1_input_show(struct device *dev,
if (res < 0)
return res;
- return snprintf(buf, PAGE_SIZE, "%u\n", speed);
+ return sysfs_emit(buf, "%u\n", speed);
}
static DEVICE_ATTR(fan1_input, S_IRUGO, fan_fan1_input_show, NULL);
@@ -8790,7 +8703,7 @@ static ssize_t fan_fan2_input_show(struct device *dev,
if (res < 0)
return res;
- return snprintf(buf, PAGE_SIZE, "%u\n", speed);
+ return sysfs_emit(buf, "%u\n", speed);
}
static DEVICE_ATTR(fan2_input, S_IRUGO, fan_fan2_input_show, NULL);
@@ -8798,7 +8711,7 @@ static DEVICE_ATTR(fan2_input, S_IRUGO, fan_fan2_input_show, NULL);
/* sysfs fan fan_watchdog (hwmon driver) ------------------------------- */
static ssize_t fan_watchdog_show(struct device_driver *drv, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%u\n", fan_watchdog_maxinterval);
+ return sysfs_emit(buf, "%u\n", fan_watchdog_maxinterval);
}
static ssize_t fan_watchdog_store(struct device_driver *drv, const char *buf,
@@ -9145,7 +9058,7 @@ static int fan_write_cmd_level(const char *cmd, int *rc)
if (strlencmp(cmd, "level auto") == 0)
level = TP_EC_FAN_AUTO;
- else if ((strlencmp(cmd, "level disengaged") == 0) |
+ else if ((strlencmp(cmd, "level disengaged") == 0) ||
(strlencmp(cmd, "level full-speed") == 0))
level = TP_EC_FAN_FULLSPEED;
else if (sscanf(cmd, "level %d", &level) != 1)
diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c
index 033f797861d8..fa8812039b82 100644
--- a/drivers/platform/x86/touchscreen_dmi.c
+++ b/drivers/platform/x86/touchscreen_dmi.c
@@ -938,6 +938,23 @@ static const struct ts_dmi_data trekstor_surftab_wintron70_data = {
.properties = trekstor_surftab_wintron70_props,
};
+static const struct property_entry viglen_connect_10_props[] = {
+ PROPERTY_ENTRY_U32("touchscreen-size-x", 1890),
+ PROPERTY_ENTRY_U32("touchscreen-size-y", 1280),
+ PROPERTY_ENTRY_U32("touchscreen-fuzz-x", 6),
+ PROPERTY_ENTRY_U32("touchscreen-fuzz-y", 6),
+ PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"),
+ PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-viglen-connect-10.fw"),
+ PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+ PROPERTY_ENTRY_BOOL("silead,home-button"),
+ { }
+};
+
+static const struct ts_dmi_data viglen_connect_10_data = {
+ .acpi_name = "MSSL1680:00",
+ .properties = viglen_connect_10_props,
+};
+
static const struct property_entry vinga_twizzle_j116_props[] = {
PROPERTY_ENTRY_U32("touchscreen-size-x", 1920),
PROPERTY_ENTRY_U32("touchscreen-size-y", 1280),
@@ -1522,6 +1539,14 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
},
},
{
+ /* Viglen Connect 10 */
+ .driver_data = (void *)&viglen_connect_10_data,
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Viglen Ltd."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Connect 10'' Tablet PC"),
+ },
+ },
+ {
/* Vinga Twizzle J116 */
.driver_data = (void *)&vinga_twizzle_j116_data,
.matches = {
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index a76313006bdc..c34341f4da76 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -17,6 +17,8 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/acpi.h>
+#include <linux/bits.h>
+#include <linux/build_bug.h>
#include <linux/device.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -25,6 +27,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/sysfs.h>
#include <linux/types.h>
#include <linux/uaccess.h>
#include <linux/uuid.h>
@@ -39,7 +42,7 @@ MODULE_LICENSE("GPL");
static LIST_HEAD(wmi_block_list);
struct guid_block {
- char guid[16];
+ guid_t guid;
union {
char object_id[2];
struct {
@@ -49,7 +52,10 @@ struct guid_block {
};
u8 instance_count;
u8 flags;
-};
+} __packed;
+static_assert(sizeof(typeof_member(struct guid_block, guid)) == 16);
+static_assert(sizeof(struct guid_block) == 20);
+static_assert(__alignof__(struct guid_block) == 1);
struct wmi_block {
struct wmi_device dev;
@@ -70,10 +76,10 @@ struct wmi_block {
* If the GUID data block is marked as expensive, we must enable and
* explicitily disable data collection.
*/
-#define ACPI_WMI_EXPENSIVE 0x1
-#define ACPI_WMI_METHOD 0x2 /* GUID is a method */
-#define ACPI_WMI_STRING 0x4 /* GUID takes & returns a string */
-#define ACPI_WMI_EVENT 0x8 /* GUID is an event */
+#define ACPI_WMI_EXPENSIVE BIT(0)
+#define ACPI_WMI_METHOD BIT(1) /* GUID is a method */
+#define ACPI_WMI_STRING BIT(2) /* GUID takes & returns a string */
+#define ACPI_WMI_EVENT BIT(3) /* GUID is an event */
static bool debug_event;
module_param(debug_event, bool, 0444);
@@ -91,7 +97,7 @@ static int acpi_wmi_probe(struct platform_device *device);
static const struct acpi_device_id wmi_device_ids[] = {
{"PNP0C14", 0},
{"pnp0c14", 0},
- {"", 0},
+ { }
};
MODULE_DEVICE_TABLE(acpi, wmi_device_ids);
@@ -108,43 +114,44 @@ static struct platform_driver acpi_wmi_driver = {
* GUID parsing functions
*/
-static bool find_guid(const char *guid_string, struct wmi_block **out)
+static acpi_status find_guid(const char *guid_string, struct wmi_block **out)
{
guid_t guid_input;
struct wmi_block *wblock;
- struct guid_block *block;
+
+ if (!guid_string)
+ return AE_BAD_PARAMETER;
if (guid_parse(guid_string, &guid_input))
- return false;
+ return AE_BAD_PARAMETER;
list_for_each_entry(wblock, &wmi_block_list, list) {
- block = &wblock->gblock;
-
- if (memcmp(block->guid, &guid_input, 16) == 0) {
+ if (guid_equal(&wblock->gblock.guid, &guid_input)) {
if (out)
*out = wblock;
- return true;
+
+ return AE_OK;
}
}
- return false;
+
+ return AE_NOT_FOUND;
}
static const void *find_guid_context(struct wmi_block *wblock,
- struct wmi_driver *wdriver)
+ struct wmi_driver *wdriver)
{
const struct wmi_device_id *id;
- guid_t guid_input;
- if (wblock == NULL || wdriver == NULL)
- return NULL;
- if (wdriver->id_table == NULL)
+ id = wdriver->id_table;
+ if (!id)
return NULL;
- id = wdriver->id_table;
while (*id->guid_string) {
+ guid_t guid_input;
+
if (guid_parse(id->guid_string, &guid_input))
continue;
- if (!memcmp(wblock->gblock.guid, &guid_input, 16))
+ if (guid_equal(&wblock->gblock.guid, &guid_input))
return id->context;
id++;
}
@@ -175,9 +182,9 @@ static int get_subobj_info(acpi_handle handle, const char *pathname,
return 0;
}
-static acpi_status wmi_method_enable(struct wmi_block *wblock, int enable)
+static acpi_status wmi_method_enable(struct wmi_block *wblock, bool enable)
{
- struct guid_block *block = NULL;
+ struct guid_block *block;
char method[5];
acpi_status status;
acpi_handle handle;
@@ -187,11 +194,50 @@ static acpi_status wmi_method_enable(struct wmi_block *wblock, int enable)
snprintf(method, 5, "WE%02X", block->notify_id);
status = acpi_execute_simple_method(handle, method, enable);
+ if (status == AE_NOT_FOUND)
+ return AE_OK;
- if (status != AE_OK && status != AE_NOT_FOUND)
- return status;
+ return status;
+}
+
+#define WMI_ACPI_METHOD_NAME_SIZE 5
+
+static inline void get_acpi_method_name(const struct wmi_block *wblock,
+ const char method,
+ char buffer[static WMI_ACPI_METHOD_NAME_SIZE])
+{
+ static_assert(ARRAY_SIZE(wblock->gblock.object_id) == 2);
+ static_assert(WMI_ACPI_METHOD_NAME_SIZE >= 5);
+
+ buffer[0] = 'W';
+ buffer[1] = method;
+ buffer[2] = wblock->gblock.object_id[0];
+ buffer[3] = wblock->gblock.object_id[1];
+ buffer[4] = '\0';
+}
+
+static inline acpi_object_type get_param_acpi_type(const struct wmi_block *wblock)
+{
+ if (wblock->gblock.flags & ACPI_WMI_STRING)
+ return ACPI_TYPE_STRING;
else
- return AE_OK;
+ return ACPI_TYPE_BUFFER;
+}
+
+static acpi_status get_event_data(const struct wmi_block *wblock, struct acpi_buffer *out)
+{
+ union acpi_object param = {
+ .integer = {
+ .type = ACPI_TYPE_INTEGER,
+ .value = wblock->gblock.notify_id,
+ }
+ };
+ struct acpi_object_list input = {
+ .count = 1,
+ .pointer = &param,
+ };
+
+ return acpi_evaluate_object(wblock->acpi_device->handle, "_WED", &input, out);
}
/*
@@ -226,13 +272,16 @@ EXPORT_SYMBOL_GPL(set_required_buffer_size);
*
* Call an ACPI-WMI method
*/
-acpi_status wmi_evaluate_method(const char *guid_string, u8 instance,
-u32 method_id, const struct acpi_buffer *in, struct acpi_buffer *out)
+acpi_status wmi_evaluate_method(const char *guid_string, u8 instance, u32 method_id,
+ const struct acpi_buffer *in, struct acpi_buffer *out)
{
struct wmi_block *wblock = NULL;
+ acpi_status status;
+
+ status = find_guid(guid_string, &wblock);
+ if (ACPI_FAILURE(status))
+ return status;
- if (!find_guid(guid_string, &wblock))
- return AE_ERROR;
return wmidev_evaluate_method(&wblock->dev, instance, method_id,
in, out);
}
@@ -248,16 +297,15 @@ EXPORT_SYMBOL_GPL(wmi_evaluate_method);
*
* Call an ACPI-WMI method
*/
-acpi_status wmidev_evaluate_method(struct wmi_device *wdev, u8 instance,
- u32 method_id, const struct acpi_buffer *in, struct acpi_buffer *out)
+acpi_status wmidev_evaluate_method(struct wmi_device *wdev, u8 instance, u32 method_id,
+ const struct acpi_buffer *in, struct acpi_buffer *out)
{
- struct guid_block *block = NULL;
- struct wmi_block *wblock = NULL;
+ struct guid_block *block;
+ struct wmi_block *wblock;
acpi_handle handle;
- acpi_status status;
struct acpi_object_list input;
union acpi_object params[3];
- char method[5] = "WM";
+ char method[WMI_ACPI_METHOD_NAME_SIZE];
wblock = container_of(wdev, struct wmi_block, dev);
block = &wblock->gblock;
@@ -279,33 +327,27 @@ acpi_status wmidev_evaluate_method(struct wmi_device *wdev, u8 instance,
if (in) {
input.count = 3;
- if (block->flags & ACPI_WMI_STRING) {
- params[2].type = ACPI_TYPE_STRING;
- } else {
- params[2].type = ACPI_TYPE_BUFFER;
- }
+ params[2].type = get_param_acpi_type(wblock);
params[2].buffer.length = in->length;
params[2].buffer.pointer = in->pointer;
}
- strncat(method, block->object_id, 2);
+ get_acpi_method_name(wblock, 'M', method);
- status = acpi_evaluate_object(handle, method, &input, out);
-
- return status;
+ return acpi_evaluate_object(handle, method, &input, out);
}
EXPORT_SYMBOL_GPL(wmidev_evaluate_method);
static acpi_status __query_block(struct wmi_block *wblock, u8 instance,
struct acpi_buffer *out)
{
- struct guid_block *block = NULL;
+ struct guid_block *block;
acpi_handle handle;
acpi_status status, wc_status = AE_ERROR;
struct acpi_object_list input;
union acpi_object wq_params[1];
- char method[5];
- char wc_method[5] = "WC";
+ char wc_method[WMI_ACPI_METHOD_NAME_SIZE];
+ char method[WMI_ACPI_METHOD_NAME_SIZE];
if (!out)
return AE_BAD_PARAMETER;
@@ -333,7 +375,7 @@ static acpi_status __query_block(struct wmi_block *wblock, u8 instance,
* enable collection.
*/
if (block->flags & ACPI_WMI_EXPENSIVE) {
- strncat(wc_method, block->object_id, 2);
+ get_acpi_method_name(wblock, 'C', wc_method);
/*
* Some GUIDs break the specification by declaring themselves
@@ -343,9 +385,7 @@ static acpi_status __query_block(struct wmi_block *wblock, u8 instance,
wc_status = acpi_execute_simple_method(handle, wc_method, 1);
}
- strcpy(method, "WQ");
- strncat(method, block->object_id, 2);
-
+ get_acpi_method_name(wblock, 'Q', method);
status = acpi_evaluate_object(handle, method, &input, out);
/*
@@ -353,7 +393,14 @@ static acpi_status __query_block(struct wmi_block *wblock, u8 instance,
* the WQxx method failed - we should disable collection anyway.
*/
if ((block->flags & ACPI_WMI_EXPENSIVE) && ACPI_SUCCESS(wc_status)) {
- status = acpi_execute_simple_method(handle, wc_method, 0);
+ /*
+ * Ignore whether this WCxx call succeeds or not since
+ * the previously executed WQxx method call might have
+ * succeeded, and returning the failing status code
+ * of this call would throw away the result of the WQxx
+ * call, potentially leaking memory.
+ */
+ acpi_execute_simple_method(handle, wc_method, 0);
}
return status;
@@ -371,12 +418,11 @@ acpi_status wmi_query_block(const char *guid_string, u8 instance,
struct acpi_buffer *out)
{
struct wmi_block *wblock;
+ acpi_status status;
- if (!guid_string)
- return AE_BAD_PARAMETER;
-
- if (!find_guid(guid_string, &wblock))
- return AE_ERROR;
+ status = find_guid(guid_string, &wblock);
+ if (ACPI_FAILURE(status))
+ return status;
return __query_block(wblock, instance, out);
}
@@ -390,7 +436,7 @@ union acpi_object *wmidev_block_query(struct wmi_device *wdev, u8 instance)
if (ACPI_FAILURE(__query_block(wblock, instance, &out)))
return NULL;
- return (union acpi_object *)out.pointer;
+ return out.pointer;
}
EXPORT_SYMBOL_GPL(wmidev_block_query);
@@ -405,18 +451,20 @@ EXPORT_SYMBOL_GPL(wmidev_block_query);
acpi_status wmi_set_block(const char *guid_string, u8 instance,
const struct acpi_buffer *in)
{
- struct guid_block *block = NULL;
struct wmi_block *wblock = NULL;
+ struct guid_block *block;
acpi_handle handle;
struct acpi_object_list input;
union acpi_object params[2];
- char method[5] = "WS";
+ char method[WMI_ACPI_METHOD_NAME_SIZE];
+ acpi_status status;
- if (!guid_string || !in)
+ if (!in)
return AE_BAD_DATA;
- if (!find_guid(guid_string, &wblock))
- return AE_ERROR;
+ status = find_guid(guid_string, &wblock);
+ if (ACPI_FAILURE(status))
+ return status;
block = &wblock->gblock;
handle = wblock->acpi_device->handle;
@@ -432,16 +480,11 @@ acpi_status wmi_set_block(const char *guid_string, u8 instance,
input.pointer = params;
params[0].type = ACPI_TYPE_INTEGER;
params[0].integer.value = instance;
-
- if (block->flags & ACPI_WMI_STRING) {
- params[1].type = ACPI_TYPE_STRING;
- } else {
- params[1].type = ACPI_TYPE_BUFFER;
- }
+ params[1].type = get_param_acpi_type(wblock);
params[1].buffer.length = in->length;
params[1].buffer.pointer = in->pointer;
- strncat(method, block->object_id, 2);
+ get_acpi_method_name(wblock, 'S', method);
return acpi_evaluate_object(handle, method, &input, NULL);
}
@@ -449,7 +492,7 @@ EXPORT_SYMBOL_GPL(wmi_set_block);
static void wmi_dump_wdg(const struct guid_block *g)
{
- pr_info("%pUL:\n", g->guid);
+ pr_info("%pUL:\n", &g->guid);
if (g->flags & ACPI_WMI_EVENT)
pr_info("\tnotify_id: 0x%02X\n", g->notify_id);
else
@@ -482,15 +525,14 @@ static void wmi_notify_debug(u32 value, void *context)
return;
}
- obj = (union acpi_object *)response.pointer;
-
+ obj = response.pointer;
if (!obj)
return;
- pr_info("DEBUG Event ");
- switch(obj->type) {
+ pr_info("DEBUG: event 0x%02X ", value);
+ switch (obj->type) {
case ACPI_TYPE_BUFFER:
- pr_cont("BUFFER_TYPE - length %d\n", obj->buffer.length);
+ pr_cont("BUFFER_TYPE - length %u\n", obj->buffer.length);
break;
case ACPI_TYPE_STRING:
pr_cont("STRING_TYPE - %s\n", obj->string.pointer);
@@ -499,7 +541,7 @@ static void wmi_notify_debug(u32 value, void *context)
pr_cont("INTEGER_TYPE - %llu\n", obj->integer.value);
break;
case ACPI_TYPE_PACKAGE:
- pr_cont("PACKAGE_TYPE - %d elements\n", obj->package.count);
+ pr_cont("PACKAGE_TYPE - %u elements\n", obj->package.count);
break;
default:
pr_cont("object type 0x%X\n", obj->type);
@@ -516,7 +558,8 @@ static void wmi_notify_debug(u32 value, void *context)
* Register a handler for events sent to the ACPI-WMI mapper device.
*/
acpi_status wmi_install_notify_handler(const char *guid,
-wmi_notify_handler handler, void *data)
+ wmi_notify_handler handler,
+ void *data)
{
struct wmi_block *block;
acpi_status status = AE_NOT_EXIST;
@@ -531,7 +574,7 @@ wmi_notify_handler handler, void *data)
list_for_each_entry(block, &wmi_block_list, list) {
acpi_status wmi_status;
- if (memcmp(block->gblock.guid, &guid_input, 16) == 0) {
+ if (guid_equal(&block->gblock.guid, &guid_input)) {
if (block->handler &&
block->handler != wmi_notify_debug)
return AE_ALREADY_ACQUIRED;
@@ -539,7 +582,7 @@ wmi_notify_handler handler, void *data)
block->handler = handler;
block->handler_data = data;
- wmi_status = wmi_method_enable(block, 1);
+ wmi_status = wmi_method_enable(block, true);
if ((wmi_status != AE_OK) ||
((wmi_status == AE_OK) && (status == AE_NOT_EXIST)))
status = wmi_status;
@@ -551,7 +594,7 @@ wmi_notify_handler handler, void *data)
EXPORT_SYMBOL_GPL(wmi_install_notify_handler);
/**
- * wmi_uninstall_notify_handler - Unregister handler for WMI events
+ * wmi_remove_notify_handler - Unregister handler for WMI events
* @guid: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba
*
* Unregister handler for events sent to the ACPI-WMI mapper device.
@@ -571,7 +614,7 @@ acpi_status wmi_remove_notify_handler(const char *guid)
list_for_each_entry(block, &wmi_block_list, list) {
acpi_status wmi_status;
- if (memcmp(block->gblock.guid, &guid_input, 16) == 0) {
+ if (guid_equal(&block->gblock.guid, &guid_input)) {
if (!block->handler ||
block->handler == wmi_notify_debug)
return AE_NULL_ENTRY;
@@ -580,7 +623,7 @@ acpi_status wmi_remove_notify_handler(const char *guid)
block->handler = wmi_notify_debug;
status = AE_OK;
} else {
- wmi_status = wmi_method_enable(block, 0);
+ wmi_status = wmi_method_enable(block, false);
block->handler = NULL;
block->handler_data = NULL;
if ((wmi_status != AE_OK) ||
@@ -605,23 +648,13 @@ EXPORT_SYMBOL_GPL(wmi_remove_notify_handler);
*/
acpi_status wmi_get_event_data(u32 event, struct acpi_buffer *out)
{
- struct acpi_object_list input;
- union acpi_object params[1];
- struct guid_block *gblock;
struct wmi_block *wblock;
- input.count = 1;
- input.pointer = params;
- params[0].type = ACPI_TYPE_INTEGER;
- params[0].integer.value = event;
-
list_for_each_entry(wblock, &wmi_block_list, list) {
- gblock = &wblock->gblock;
+ struct guid_block *gblock = &wblock->gblock;
- if ((gblock->flags & ACPI_WMI_EVENT) &&
- (gblock->notify_id == event))
- return acpi_evaluate_object(wblock->acpi_device->handle,
- "_WED", &input, out);
+ if ((gblock->flags & ACPI_WMI_EVENT) && gblock->notify_id == event)
+ return get_event_data(wblock, out);
}
return AE_NOT_FOUND;
@@ -636,7 +669,7 @@ EXPORT_SYMBOL_GPL(wmi_get_event_data);
*/
bool wmi_has_guid(const char *guid_string)
{
- return find_guid(guid_string, NULL);
+ return ACPI_SUCCESS(find_guid(guid_string, NULL));
}
EXPORT_SYMBOL_GPL(wmi_has_guid);
@@ -651,8 +684,10 @@ EXPORT_SYMBOL_GPL(wmi_has_guid);
char *wmi_get_acpi_device_uid(const char *guid_string)
{
struct wmi_block *wblock = NULL;
+ acpi_status status;
- if (!find_guid(guid_string, &wblock))
+ status = find_guid(guid_string, &wblock);
+ if (ACPI_FAILURE(status))
return NULL;
return acpi_device_uid(wblock->acpi_device);
@@ -669,6 +704,11 @@ static struct wmi_device *dev_to_wdev(struct device *dev)
return container_of(dev, struct wmi_device, dev);
}
+static inline struct wmi_driver *drv_to_wdrv(struct device_driver *drv)
+{
+ return container_of(drv, struct wmi_driver, driver);
+}
+
/*
* sysfs interface
*/
@@ -677,7 +717,7 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
{
struct wmi_block *wblock = dev_to_wblock(dev);
- return sprintf(buf, "wmi:%pUL\n", wblock->gblock.guid);
+ return sysfs_emit(buf, "wmi:%pUL\n", &wblock->gblock.guid);
}
static DEVICE_ATTR_RO(modalias);
@@ -686,7 +726,7 @@ static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
{
struct wmi_block *wblock = dev_to_wblock(dev);
- return sprintf(buf, "%pUL\n", wblock->gblock.guid);
+ return sysfs_emit(buf, "%pUL\n", &wblock->gblock.guid);
}
static DEVICE_ATTR_RO(guid);
@@ -695,7 +735,7 @@ static ssize_t instance_count_show(struct device *dev,
{
struct wmi_block *wblock = dev_to_wblock(dev);
- return sprintf(buf, "%d\n", (int)wblock->gblock.instance_count);
+ return sysfs_emit(buf, "%d\n", (int)wblock->gblock.instance_count);
}
static DEVICE_ATTR_RO(instance_count);
@@ -704,8 +744,8 @@ static ssize_t expensive_show(struct device *dev,
{
struct wmi_block *wblock = dev_to_wblock(dev);
- return sprintf(buf, "%d\n",
- (wblock->gblock.flags & ACPI_WMI_EXPENSIVE) != 0);
+ return sysfs_emit(buf, "%d\n",
+ (wblock->gblock.flags & ACPI_WMI_EXPENSIVE) != 0);
}
static DEVICE_ATTR_RO(expensive);
@@ -714,7 +754,7 @@ static struct attribute *wmi_attrs[] = {
&dev_attr_guid.attr,
&dev_attr_instance_count.attr,
&dev_attr_expensive.attr,
- NULL,
+ NULL
};
ATTRIBUTE_GROUPS(wmi);
@@ -723,13 +763,13 @@ static ssize_t notify_id_show(struct device *dev, struct device_attribute *attr,
{
struct wmi_block *wblock = dev_to_wblock(dev);
- return sprintf(buf, "%02X\n", (unsigned int)wblock->gblock.notify_id);
+ return sysfs_emit(buf, "%02X\n", (unsigned int)wblock->gblock.notify_id);
}
static DEVICE_ATTR_RO(notify_id);
static struct attribute *wmi_event_attrs[] = {
&dev_attr_notify_id.attr,
- NULL,
+ NULL
};
ATTRIBUTE_GROUPS(wmi_event);
@@ -738,8 +778,8 @@ static ssize_t object_id_show(struct device *dev, struct device_attribute *attr,
{
struct wmi_block *wblock = dev_to_wblock(dev);
- return sprintf(buf, "%c%c\n", wblock->gblock.object_id[0],
- wblock->gblock.object_id[1]);
+ return sysfs_emit(buf, "%c%c\n", wblock->gblock.object_id[0],
+ wblock->gblock.object_id[1]);
}
static DEVICE_ATTR_RO(object_id);
@@ -748,20 +788,20 @@ static ssize_t setable_show(struct device *dev, struct device_attribute *attr,
{
struct wmi_device *wdev = dev_to_wdev(dev);
- return sprintf(buf, "%d\n", (int)wdev->setable);
+ return sysfs_emit(buf, "%d\n", (int)wdev->setable);
}
static DEVICE_ATTR_RO(setable);
static struct attribute *wmi_data_attrs[] = {
&dev_attr_object_id.attr,
&dev_attr_setable.attr,
- NULL,
+ NULL
};
ATTRIBUTE_GROUPS(wmi_data);
static struct attribute *wmi_method_attrs[] = {
&dev_attr_object_id.attr,
- NULL,
+ NULL
};
ATTRIBUTE_GROUPS(wmi_method);
@@ -769,10 +809,10 @@ static int wmi_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
{
struct wmi_block *wblock = dev_to_wblock(dev);
- if (add_uevent_var(env, "MODALIAS=wmi:%pUL", wblock->gblock.guid))
+ if (add_uevent_var(env, "MODALIAS=wmi:%pUL", &wblock->gblock.guid))
return -ENOMEM;
- if (add_uevent_var(env, "WMI_GUID=%pUL", wblock->gblock.guid))
+ if (add_uevent_var(env, "WMI_GUID=%pUL", &wblock->gblock.guid))
return -ENOMEM;
return 0;
@@ -787,8 +827,7 @@ static void wmi_dev_release(struct device *dev)
static int wmi_dev_match(struct device *dev, struct device_driver *driver)
{
- struct wmi_driver *wmi_driver =
- container_of(driver, struct wmi_driver, driver);
+ struct wmi_driver *wmi_driver = drv_to_wdrv(driver);
struct wmi_block *wblock = dev_to_wblock(dev);
const struct wmi_device_id *id = wmi_driver->id_table;
@@ -800,7 +839,7 @@ static int wmi_dev_match(struct device *dev, struct device_driver *driver)
if (WARN_ON(guid_parse(id->guid_string, &driver_guid)))
continue;
- if (!memcmp(&driver_guid, wblock->gblock.guid, 16))
+ if (guid_equal(&driver_guid, &wblock->gblock.guid))
return 1;
id++;
@@ -811,8 +850,8 @@ static int wmi_dev_match(struct device *dev, struct device_driver *driver)
static int wmi_char_open(struct inode *inode, struct file *filp)
{
const char *driver_name = filp->f_path.dentry->d_iname;
- struct wmi_block *wblock = NULL;
- struct wmi_block *next = NULL;
+ struct wmi_block *wblock;
+ struct wmi_block *next;
list_for_each_entry_safe(wblock, next, &wmi_block_list, list) {
if (!wblock->dev.dev.driver)
@@ -830,7 +869,7 @@ static int wmi_char_open(struct inode *inode, struct file *filp)
}
static ssize_t wmi_char_read(struct file *filp, char __user *buffer,
- size_t length, loff_t *offset)
+ size_t length, loff_t *offset)
{
struct wmi_block *wblock = filp->private_data;
@@ -844,8 +883,8 @@ static long wmi_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
struct wmi_ioctl_buffer __user *input =
(struct wmi_ioctl_buffer __user *) arg;
struct wmi_block *wblock = filp->private_data;
- struct wmi_ioctl_buffer *buf = NULL;
- struct wmi_driver *wdriver = NULL;
+ struct wmi_ioctl_buffer *buf;
+ struct wmi_driver *wdriver;
int ret;
if (_IOC_TYPE(cmd) != WMI_IOC)
@@ -885,8 +924,7 @@ static long wmi_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
/* let the driver do any filtering and do the call */
- wdriver = container_of(wblock->dev.dev.driver,
- struct wmi_driver, driver);
+ wdriver = drv_to_wdrv(wblock->dev.dev.driver);
if (!try_module_get(wdriver->driver.owner)) {
ret = -EBUSY;
goto out_ioctl;
@@ -919,12 +957,11 @@ static const struct file_operations wmi_fops = {
static int wmi_dev_probe(struct device *dev)
{
struct wmi_block *wblock = dev_to_wblock(dev);
- struct wmi_driver *wdriver =
- container_of(dev->driver, struct wmi_driver, driver);
+ struct wmi_driver *wdriver = drv_to_wdrv(dev->driver);
int ret = 0;
char *buf;
- if (ACPI_FAILURE(wmi_method_enable(wblock, 1)))
+ if (ACPI_FAILURE(wmi_method_enable(wblock, true)))
dev_warn(dev, "failed to enable device -- probing anyway\n");
if (wdriver->probe) {
@@ -975,7 +1012,7 @@ probe_misc_failure:
probe_string_failure:
kfree(wblock->handler_data);
probe_failure:
- if (ACPI_FAILURE(wmi_method_enable(wblock, 0)))
+ if (ACPI_FAILURE(wmi_method_enable(wblock, false)))
dev_warn(dev, "failed to disable device\n");
return ret;
}
@@ -983,8 +1020,7 @@ probe_failure:
static void wmi_dev_remove(struct device *dev)
{
struct wmi_block *wblock = dev_to_wblock(dev);
- struct wmi_driver *wdriver =
- container_of(dev->driver, struct wmi_driver, driver);
+ struct wmi_driver *wdriver = drv_to_wdrv(dev->driver);
if (wdriver->filter_callback) {
misc_deregister(&wblock->char_dev);
@@ -995,7 +1031,7 @@ static void wmi_dev_remove(struct device *dev)
if (wdriver->remove)
wdriver->remove(dev_to_wdev(dev));
- if (ACPI_FAILURE(wmi_method_enable(wblock, 0)))
+ if (ACPI_FAILURE(wmi_method_enable(wblock, false)))
dev_warn(dev, "failed to disable device\n");
}
@@ -1031,20 +1067,19 @@ static const struct device_type wmi_type_data = {
};
static int wmi_create_device(struct device *wmi_bus_dev,
- const struct guid_block *gblock,
struct wmi_block *wblock,
struct acpi_device *device)
{
struct acpi_device_info *info;
- char method[5];
+ char method[WMI_ACPI_METHOD_NAME_SIZE];
int result;
- if (gblock->flags & ACPI_WMI_EVENT) {
+ if (wblock->gblock.flags & ACPI_WMI_EVENT) {
wblock->dev.dev.type = &wmi_type_event;
goto out_init;
}
- if (gblock->flags & ACPI_WMI_METHOD) {
+ if (wblock->gblock.flags & ACPI_WMI_METHOD) {
wblock->dev.dev.type = &wmi_type_method;
mutex_init(&wblock->char_mutex);
goto out_init;
@@ -1055,8 +1090,7 @@ static int wmi_create_device(struct device *wmi_bus_dev,
* required per the WMI documentation. If it is not present,
* we ignore this data block.
*/
- strcpy(method, "WQ");
- strncat(method, wblock->gblock.object_id, 2);
+ get_acpi_method_name(wblock, 'Q', method);
result = get_subobj_info(device->handle, method, &info);
if (result) {
@@ -1083,8 +1117,7 @@ static int wmi_create_device(struct device *wmi_bus_dev,
kfree(info);
- strcpy(method, "WS");
- strncat(method, wblock->gblock.object_id, 2);
+ get_acpi_method_name(wblock, 'S', method);
result = get_subobj_info(device->handle, method, NULL);
if (result == 0)
@@ -1094,7 +1127,7 @@ static int wmi_create_device(struct device *wmi_bus_dev,
wblock->dev.dev.bus = &wmi_bus_type;
wblock->dev.dev.parent = wmi_bus_dev;
- dev_set_name(&wblock->dev.dev, "%pUL", gblock->guid);
+ dev_set_name(&wblock->dev.dev, "%pUL", &wblock->gblock.guid);
device_initialize(&wblock->dev.dev);
@@ -1114,12 +1147,12 @@ static void wmi_free_devices(struct acpi_device *device)
}
}
-static bool guid_already_parsed(struct acpi_device *device, const u8 *guid)
+static bool guid_already_parsed(struct acpi_device *device, const guid_t *guid)
{
struct wmi_block *wblock;
list_for_each_entry(wblock, &wmi_block_list, list) {
- if (memcmp(wblock->gblock.guid, guid, 16) == 0) {
+ if (guid_equal(&wblock->gblock.guid, guid)) {
/*
* Because we historically didn't track the relationship
* between GUIDs and ACPI nodes, we don't know whether
@@ -1152,7 +1185,7 @@ static int parse_wdg(struct device *wmi_bus_dev, struct acpi_device *device)
if (ACPI_FAILURE(status))
return -ENXIO;
- obj = (union acpi_object *) out.pointer;
+ obj = out.pointer;
if (!obj)
return -ENXIO;
@@ -1174,10 +1207,10 @@ static int parse_wdg(struct device *wmi_bus_dev, struct acpi_device *device)
* case yet, so for now, we'll just ignore the duplicate
* for device creation.
*/
- if (guid_already_parsed(device, gblock[i].guid))
+ if (guid_already_parsed(device, &gblock[i].guid))
continue;
- wblock = kzalloc(sizeof(struct wmi_block), GFP_KERNEL);
+ wblock = kzalloc(sizeof(*wblock), GFP_KERNEL);
if (!wblock) {
retval = -ENOMEM;
break;
@@ -1186,7 +1219,7 @@ static int parse_wdg(struct device *wmi_bus_dev, struct acpi_device *device)
wblock->acpi_device = device;
wblock->gblock = gblock[i];
- retval = wmi_create_device(wmi_bus_dev, &gblock[i], wblock, device);
+ retval = wmi_create_device(wmi_bus_dev, wblock, device);
if (retval) {
kfree(wblock);
continue;
@@ -1196,7 +1229,7 @@ static int parse_wdg(struct device *wmi_bus_dev, struct acpi_device *device)
if (debug_event) {
wblock->handler = wmi_notify_debug;
- wmi_method_enable(wblock, 1);
+ wmi_method_enable(wblock, true);
}
}
@@ -1211,9 +1244,9 @@ static int parse_wdg(struct device *wmi_bus_dev, struct acpi_device *device)
retval = device_add(&wblock->dev.dev);
if (retval) {
dev_err(wmi_bus_dev, "failed to register %pUL\n",
- wblock->gblock.guid);
+ &wblock->gblock.guid);
if (debug_event)
- wmi_method_enable(wblock, 0);
+ wmi_method_enable(wblock, false);
list_del(&wblock->list);
put_device(&wblock->dev.dev);
}
@@ -1230,8 +1263,8 @@ out_free_pointer:
*/
static acpi_status
acpi_wmi_ec_space_handler(u32 function, acpi_physical_address address,
- u32 bits, u64 *value,
- void *handler_context, void *region_context)
+ u32 bits, u64 *value,
+ void *handler_context, void *region_context)
{
int result = 0, i = 0;
u8 temp = 0;
@@ -1268,17 +1301,15 @@ acpi_wmi_ec_space_handler(u32 function, acpi_physical_address address,
static void acpi_wmi_notify_handler(acpi_handle handle, u32 event,
void *context)
{
- struct guid_block *block;
struct wmi_block *wblock;
bool found_it = false;
list_for_each_entry(wblock, &wmi_block_list, list) {
- block = &wblock->gblock;
+ struct guid_block *block = &wblock->gblock;
if (wblock->acpi_device->handle == handle &&
(block->flags & ACPI_WMI_EVENT) &&
- (block->notify_id == event))
- {
+ (block->notify_id == event)) {
found_it = true;
break;
}
@@ -1289,31 +1320,18 @@ static void acpi_wmi_notify_handler(acpi_handle handle, u32 event,
/* If a driver is bound, then notify the driver. */
if (wblock->dev.dev.driver) {
- struct wmi_driver *driver;
- struct acpi_object_list input;
- union acpi_object params[1];
+ struct wmi_driver *driver = drv_to_wdrv(wblock->dev.dev.driver);
struct acpi_buffer evdata = { ACPI_ALLOCATE_BUFFER, NULL };
acpi_status status;
- driver = container_of(wblock->dev.dev.driver,
- struct wmi_driver, driver);
-
- input.count = 1;
- input.pointer = params;
- params[0].type = ACPI_TYPE_INTEGER;
- params[0].integer.value = event;
-
- status = acpi_evaluate_object(wblock->acpi_device->handle,
- "_WED", &input, &evdata);
+ status = get_event_data(wblock, &evdata);
if (ACPI_FAILURE(status)) {
- dev_warn(&wblock->dev.dev,
- "failed to get event data\n");
+ dev_warn(&wblock->dev.dev, "failed to get event data\n");
return;
}
if (driver->notify)
- driver->notify(&wblock->dev,
- (union acpi_object *)evdata.pointer);
+ driver->notify(&wblock->dev, evdata.pointer);
kfree(evdata.pointer);
} else if (wblock->handler) {
@@ -1322,25 +1340,24 @@ static void acpi_wmi_notify_handler(acpi_handle handle, u32 event,
}
if (debug_event)
- pr_info("DEBUG Event GUID: %pUL\n", wblock->gblock.guid);
+ pr_info("DEBUG: GUID %pUL event 0x%02X\n", &wblock->gblock.guid, event);
acpi_bus_generate_netlink_event(
wblock->acpi_device->pnp.device_class,
dev_name(&wblock->dev.dev),
event, 0);
-
}
static int acpi_wmi_remove(struct platform_device *device)
{
struct acpi_device *acpi_device = ACPI_COMPANION(&device->dev);
- acpi_remove_notify_handler(acpi_device->handle, ACPI_DEVICE_NOTIFY,
+ acpi_remove_notify_handler(acpi_device->handle, ACPI_ALL_NOTIFY,
acpi_wmi_notify_handler);
acpi_remove_address_space_handler(acpi_device->handle,
ACPI_ADR_SPACE_EC, &acpi_wmi_ec_space_handler);
wmi_free_devices(acpi_device);
- device_unregister((struct device *)dev_get_drvdata(&device->dev));
+ device_unregister(dev_get_drvdata(&device->dev));
return 0;
}
@@ -1368,7 +1385,7 @@ static int acpi_wmi_probe(struct platform_device *device)
}
status = acpi_install_notify_handler(acpi_device->handle,
- ACPI_DEVICE_NOTIFY,
+ ACPI_ALL_NOTIFY,
acpi_wmi_notify_handler,
NULL);
if (ACPI_FAILURE(status)) {
@@ -1397,7 +1414,7 @@ err_remove_busdev:
device_unregister(wmi_bus_dev);
err_remove_notify_handler:
- acpi_remove_notify_handler(acpi_device->handle, ACPI_DEVICE_NOTIFY,
+ acpi_remove_notify_handler(acpi_device->handle, ACPI_ALL_NOTIFY,
acpi_wmi_notify_handler);
err_remove_ec_handler:
diff --git a/drivers/pnp/system.c b/drivers/pnp/system.c
index 6950503741eb..835113b2cb04 100644
--- a/drivers/pnp/system.c
+++ b/drivers/pnp/system.c
@@ -106,7 +106,7 @@ static int __init pnp_system_init(void)
return pnp_register_driver(&system_pnp_driver);
}
-/**
+/*
* Reserve motherboard resources after PCI claim BARs,
* but before PCI assign resources for uninitialized PCI devices
*/
diff --git a/drivers/powercap/dtpm.c b/drivers/powercap/dtpm.c
index c2185ec5f887..b9fac786246a 100644
--- a/drivers/powercap/dtpm.c
+++ b/drivers/powercap/dtpm.c
@@ -116,8 +116,6 @@ static void __dtpm_sub_power(struct dtpm *dtpm)
parent->power_limit -= dtpm->power_limit;
parent = parent->parent;
}
-
- __dtpm_rebalance_weight(root);
}
static void __dtpm_add_power(struct dtpm *dtpm)
@@ -130,45 +128,45 @@ static void __dtpm_add_power(struct dtpm *dtpm)
parent->power_limit += dtpm->power_limit;
parent = parent->parent;
}
+}
+
+static int __dtpm_update_power(struct dtpm *dtpm)
+{
+ int ret;
+
+ __dtpm_sub_power(dtpm);
+
+ ret = dtpm->ops->update_power_uw(dtpm);
+ if (ret)
+ pr_err("Failed to update power for '%s': %d\n",
+ dtpm->zone.name, ret);
- __dtpm_rebalance_weight(root);
+ if (!test_bit(DTPM_POWER_LIMIT_FLAG, &dtpm->flags))
+ dtpm->power_limit = dtpm->power_max;
+
+ __dtpm_add_power(dtpm);
+
+ if (root)
+ __dtpm_rebalance_weight(root);
+
+ return ret;
}
/**
* dtpm_update_power - Update the power on the dtpm
* @dtpm: a pointer to a dtpm structure to update
- * @power_min: a u64 representing the new power_min value
- * @power_max: a u64 representing the new power_max value
*
* Function to update the power values of the dtpm node specified in
* parameter. These new values will be propagated to the tree.
*
* Return: zero on success, -EINVAL if the values are inconsistent
*/
-int dtpm_update_power(struct dtpm *dtpm, u64 power_min, u64 power_max)
+int dtpm_update_power(struct dtpm *dtpm)
{
- int ret = 0;
+ int ret;
mutex_lock(&dtpm_lock);
-
- if (power_min == dtpm->power_min && power_max == dtpm->power_max)
- goto unlock;
-
- if (power_max < power_min) {
- ret = -EINVAL;
- goto unlock;
- }
-
- __dtpm_sub_power(dtpm);
-
- dtpm->power_min = power_min;
- dtpm->power_max = power_max;
- if (!test_bit(DTPM_POWER_LIMIT_FLAG, &dtpm->flags))
- dtpm->power_limit = power_max;
-
- __dtpm_add_power(dtpm);
-
-unlock:
+ ret = __dtpm_update_power(dtpm);
mutex_unlock(&dtpm_lock);
return ret;
@@ -359,24 +357,18 @@ static struct powercap_zone_ops zone_ops = {
};
/**
- * dtpm_alloc - Allocate and initialize a dtpm struct
- * @name: a string specifying the name of the node
- *
- * Return: a struct dtpm pointer, NULL in case of error
+ * dtpm_init - Allocate and initialize a dtpm struct
+ * @dtpm: The dtpm struct pointer to be initialized
+ * @ops: The dtpm device specific ops, NULL for a virtual node
*/
-struct dtpm *dtpm_alloc(struct dtpm_ops *ops)
+void dtpm_init(struct dtpm *dtpm, struct dtpm_ops *ops)
{
- struct dtpm *dtpm;
-
- dtpm = kzalloc(sizeof(*dtpm), GFP_KERNEL);
if (dtpm) {
INIT_LIST_HEAD(&dtpm->children);
INIT_LIST_HEAD(&dtpm->sibling);
dtpm->weight = 1024;
dtpm->ops = ops;
}
-
- return dtpm;
}
/**
@@ -436,6 +428,7 @@ int dtpm_register(const char *name, struct dtpm *dtpm, struct dtpm *parent)
if (dtpm->ops && !(dtpm->ops->set_power_uw &&
dtpm->ops->get_power_uw &&
+ dtpm->ops->update_power_uw &&
dtpm->ops->release))
return -EINVAL;
@@ -455,7 +448,10 @@ int dtpm_register(const char *name, struct dtpm *dtpm, struct dtpm *parent)
root = dtpm;
}
- __dtpm_add_power(dtpm);
+ if (dtpm->ops && !dtpm->ops->update_power_uw(dtpm)) {
+ __dtpm_add_power(dtpm);
+ dtpm->power_limit = dtpm->power_max;
+ }
pr_info("Registered dtpm node '%s' / %llu-%llu uW, \n",
dtpm->zone.name, dtpm->power_min, dtpm->power_max);
@@ -465,9 +461,9 @@ int dtpm_register(const char *name, struct dtpm *dtpm, struct dtpm *parent)
return 0;
}
-static int __init dtpm_init(void)
+static int __init init_dtpm(void)
{
- struct dtpm_descr **dtpm_descr;
+ struct dtpm_descr *dtpm_descr;
pct = powercap_register_control_type(NULL, "dtpm", NULL);
if (IS_ERR(pct)) {
@@ -476,8 +472,8 @@ static int __init dtpm_init(void)
}
for_each_dtpm_table(dtpm_descr)
- (*dtpm_descr)->init(*dtpm_descr);
+ dtpm_descr->init();
return 0;
}
-late_initcall(dtpm_init);
+late_initcall(init_dtpm);
diff --git a/drivers/powercap/dtpm_cpu.c b/drivers/powercap/dtpm_cpu.c
index 51c366938acd..44faa3a74db6 100644
--- a/drivers/powercap/dtpm_cpu.c
+++ b/drivers/powercap/dtpm_cpu.c
@@ -14,6 +14,8 @@
* The CPU hotplug is supported and the power numbers will be updated
* if a CPU is hot plugged / unplugged.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/cpumask.h>
#include <linux/cpufreq.h>
#include <linux/cpuhotplug.h>
@@ -23,66 +25,29 @@
#include <linux/slab.h>
#include <linux/units.h>
-static struct dtpm *__parent;
-
-static DEFINE_PER_CPU(struct dtpm *, dtpm_per_cpu);
-
struct dtpm_cpu {
+ struct dtpm dtpm;
struct freq_qos_request qos_req;
int cpu;
};
-/*
- * When a new CPU is inserted at hotplug or boot time, add the power
- * contribution and update the dtpm tree.
- */
-static int power_add(struct dtpm *dtpm, struct em_perf_domain *em)
-{
- u64 power_min, power_max;
-
- power_min = em->table[0].power;
- power_min *= MICROWATT_PER_MILLIWATT;
- power_min += dtpm->power_min;
+static DEFINE_PER_CPU(struct dtpm_cpu *, dtpm_per_cpu);
- power_max = em->table[em->nr_perf_states - 1].power;
- power_max *= MICROWATT_PER_MILLIWATT;
- power_max += dtpm->power_max;
-
- return dtpm_update_power(dtpm, power_min, power_max);
-}
-
-/*
- * When a CPU is unplugged, remove its power contribution from the
- * dtpm tree.
- */
-static int power_sub(struct dtpm *dtpm, struct em_perf_domain *em)
+static struct dtpm_cpu *to_dtpm_cpu(struct dtpm *dtpm)
{
- u64 power_min, power_max;
-
- power_min = em->table[0].power;
- power_min *= MICROWATT_PER_MILLIWATT;
- power_min = dtpm->power_min - power_min;
-
- power_max = em->table[em->nr_perf_states - 1].power;
- power_max *= MICROWATT_PER_MILLIWATT;
- power_max = dtpm->power_max - power_max;
-
- return dtpm_update_power(dtpm, power_min, power_max);
+ return container_of(dtpm, struct dtpm_cpu, dtpm);
}
static u64 set_pd_power_limit(struct dtpm *dtpm, u64 power_limit)
{
- struct dtpm_cpu *dtpm_cpu = dtpm->private;
- struct em_perf_domain *pd;
+ struct dtpm_cpu *dtpm_cpu = to_dtpm_cpu(dtpm);
+ struct em_perf_domain *pd = em_cpu_get(dtpm_cpu->cpu);
struct cpumask cpus;
unsigned long freq;
u64 power;
int i, nr_cpus;
- pd = em_cpu_get(dtpm_cpu->cpu);
-
cpumask_and(&cpus, cpu_online_mask, to_cpumask(pd->cpus));
-
nr_cpus = cpumask_weight(&cpus);
for (i = 0; i < pd->nr_perf_states; i++) {
@@ -103,34 +68,88 @@ static u64 set_pd_power_limit(struct dtpm *dtpm, u64 power_limit)
return power_limit;
}
+static u64 scale_pd_power_uw(struct cpumask *pd_mask, u64 power)
+{
+ unsigned long max = 0, sum_util = 0;
+ int cpu;
+
+ for_each_cpu_and(cpu, pd_mask, cpu_online_mask) {
+
+ /*
+ * The capacity is the same for all CPUs belonging to
+ * the same perf domain, so a single call to
+ * arch_scale_cpu_capacity() is enough. However, we
+ * need the CPU parameter to be initialized by the
+ * loop, so the call ends up in this block.
+ *
+ * We can initialize 'max' with a cpumask_first() call
+ * before the loop but the bits computation is not
+ * worth given the arch_scale_cpu_capacity() just
+ * returns a value where the resulting assembly code
+ * will be optimized by the compiler.
+ */
+ max = arch_scale_cpu_capacity(cpu);
+ sum_util += sched_cpu_util(cpu, max);
+ }
+
+ /*
+ * In the improbable case where all the CPUs of the perf
+ * domain are offline, 'max' will be zero and will lead to an
+ * illegal operation with a zero division.
+ */
+ return max ? (power * ((sum_util << 10) / max)) >> 10 : 0;
+}
+
static u64 get_pd_power_uw(struct dtpm *dtpm)
{
- struct dtpm_cpu *dtpm_cpu = dtpm->private;
+ struct dtpm_cpu *dtpm_cpu = to_dtpm_cpu(dtpm);
struct em_perf_domain *pd;
- struct cpumask cpus;
+ struct cpumask *pd_mask;
unsigned long freq;
- int i, nr_cpus;
+ int i;
pd = em_cpu_get(dtpm_cpu->cpu);
+
+ pd_mask = em_span_cpus(pd);
+
freq = cpufreq_quick_get(dtpm_cpu->cpu);
- cpumask_and(&cpus, cpu_online_mask, to_cpumask(pd->cpus));
- nr_cpus = cpumask_weight(&cpus);
for (i = 0; i < pd->nr_perf_states; i++) {
if (pd->table[i].frequency < freq)
continue;
- return pd->table[i].power *
- MICROWATT_PER_MILLIWATT * nr_cpus;
+ return scale_pd_power_uw(pd_mask, pd->table[i].power *
+ MICROWATT_PER_MILLIWATT);
}
return 0;
}
+static int update_pd_power_uw(struct dtpm *dtpm)
+{
+ struct dtpm_cpu *dtpm_cpu = to_dtpm_cpu(dtpm);
+ struct em_perf_domain *em = em_cpu_get(dtpm_cpu->cpu);
+ struct cpumask cpus;
+ int nr_cpus;
+
+ cpumask_and(&cpus, cpu_online_mask, to_cpumask(em->cpus));
+ nr_cpus = cpumask_weight(&cpus);
+
+ dtpm->power_min = em->table[0].power;
+ dtpm->power_min *= MICROWATT_PER_MILLIWATT;
+ dtpm->power_min *= nr_cpus;
+
+ dtpm->power_max = em->table[em->nr_perf_states - 1].power;
+ dtpm->power_max *= MICROWATT_PER_MILLIWATT;
+ dtpm->power_max *= nr_cpus;
+
+ return 0;
+}
+
static void pd_release(struct dtpm *dtpm)
{
- struct dtpm_cpu *dtpm_cpu = dtpm->private;
+ struct dtpm_cpu *dtpm_cpu = to_dtpm_cpu(dtpm);
if (freq_qos_request_active(&dtpm_cpu->qos_req))
freq_qos_remove_request(&dtpm_cpu->qos_req);
@@ -139,44 +158,28 @@ static void pd_release(struct dtpm *dtpm)
}
static struct dtpm_ops dtpm_ops = {
- .set_power_uw = set_pd_power_limit,
- .get_power_uw = get_pd_power_uw,
- .release = pd_release,
+ .set_power_uw = set_pd_power_limit,
+ .get_power_uw = get_pd_power_uw,
+ .update_power_uw = update_pd_power_uw,
+ .release = pd_release,
};
static int cpuhp_dtpm_cpu_offline(unsigned int cpu)
{
- struct cpufreq_policy *policy;
struct em_perf_domain *pd;
- struct dtpm *dtpm;
-
- policy = cpufreq_cpu_get(cpu);
-
- if (!policy)
- return 0;
+ struct dtpm_cpu *dtpm_cpu;
pd = em_cpu_get(cpu);
if (!pd)
return -EINVAL;
- dtpm = per_cpu(dtpm_per_cpu, cpu);
+ dtpm_cpu = per_cpu(dtpm_per_cpu, cpu);
- power_sub(dtpm, pd);
-
- if (cpumask_weight(policy->cpus) != 1)
- return 0;
-
- for_each_cpu(cpu, policy->related_cpus)
- per_cpu(dtpm_per_cpu, cpu) = NULL;
-
- dtpm_unregister(dtpm);
-
- return 0;
+ return dtpm_update_power(&dtpm_cpu->dtpm);
}
static int cpuhp_dtpm_cpu_online(unsigned int cpu)
{
- struct dtpm *dtpm;
struct dtpm_cpu *dtpm_cpu;
struct cpufreq_policy *policy;
struct em_perf_domain *pd;
@@ -184,7 +187,6 @@ static int cpuhp_dtpm_cpu_online(unsigned int cpu)
int ret = -ENOMEM;
policy = cpufreq_cpu_get(cpu);
-
if (!policy)
return 0;
@@ -192,66 +194,82 @@ static int cpuhp_dtpm_cpu_online(unsigned int cpu)
if (!pd)
return -EINVAL;
- dtpm = per_cpu(dtpm_per_cpu, cpu);
- if (dtpm)
- return power_add(dtpm, pd);
-
- dtpm = dtpm_alloc(&dtpm_ops);
- if (!dtpm)
- return -EINVAL;
+ dtpm_cpu = per_cpu(dtpm_per_cpu, cpu);
+ if (dtpm_cpu)
+ return dtpm_update_power(&dtpm_cpu->dtpm);
dtpm_cpu = kzalloc(sizeof(*dtpm_cpu), GFP_KERNEL);
if (!dtpm_cpu)
- goto out_kfree_dtpm;
+ return -ENOMEM;
- dtpm->private = dtpm_cpu;
+ dtpm_init(&dtpm_cpu->dtpm, &dtpm_ops);
dtpm_cpu->cpu = cpu;
for_each_cpu(cpu, policy->related_cpus)
- per_cpu(dtpm_per_cpu, cpu) = dtpm;
+ per_cpu(dtpm_per_cpu, cpu) = dtpm_cpu;
- sprintf(name, "cpu%d", dtpm_cpu->cpu);
+ snprintf(name, sizeof(name), "cpu%d-cpufreq", dtpm_cpu->cpu);
- ret = dtpm_register(name, dtpm, __parent);
+ ret = dtpm_register(name, &dtpm_cpu->dtpm, NULL);
if (ret)
goto out_kfree_dtpm_cpu;
- ret = power_add(dtpm, pd);
- if (ret)
- goto out_dtpm_unregister;
-
ret = freq_qos_add_request(&policy->constraints,
&dtpm_cpu->qos_req, FREQ_QOS_MAX,
pd->table[pd->nr_perf_states - 1].frequency);
if (ret)
- goto out_power_sub;
+ goto out_dtpm_unregister;
return 0;
-out_power_sub:
- power_sub(dtpm, pd);
-
out_dtpm_unregister:
- dtpm_unregister(dtpm);
+ dtpm_unregister(&dtpm_cpu->dtpm);
dtpm_cpu = NULL;
- dtpm = NULL;
out_kfree_dtpm_cpu:
for_each_cpu(cpu, policy->related_cpus)
per_cpu(dtpm_per_cpu, cpu) = NULL;
kfree(dtpm_cpu);
-out_kfree_dtpm:
- kfree(dtpm);
return ret;
}
-int dtpm_register_cpu(struct dtpm *parent)
+static int __init dtpm_cpu_init(void)
{
- __parent = parent;
+ int ret;
+
+ /*
+ * The callbacks at CPU hotplug time are calling
+ * dtpm_update_power() which in turns calls update_pd_power().
+ *
+ * The function update_pd_power() uses the online mask to
+ * figure out the power consumption limits.
+ *
+ * At CPUHP_AP_ONLINE_DYN, the CPU is present in the CPU
+ * online mask when the cpuhp_dtpm_cpu_online function is
+ * called, but the CPU is still in the online mask for the
+ * tear down callback. So the power can not be updated when
+ * the CPU is unplugged.
+ *
+ * At CPUHP_AP_DTPM_CPU_DEAD, the situation is the opposite as
+ * above. The CPU online mask is not up to date when the CPU
+ * is plugged in.
+ *
+ * For this reason, we need to call the online and offline
+ * callbacks at different moments when the CPU online mask is
+ * consistent with the power numbers we want to update.
+ */
+ ret = cpuhp_setup_state(CPUHP_AP_DTPM_CPU_DEAD, "dtpm_cpu:offline",
+ NULL, cpuhp_dtpm_cpu_offline);
+ if (ret < 0)
+ return ret;
+
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "dtpm_cpu:online",
+ cpuhp_dtpm_cpu_online, NULL);
+ if (ret < 0)
+ return ret;
- return cpuhp_setup_state(CPUHP_AP_DTPM_CPU_ONLINE,
- "dtpm_cpu:online",
- cpuhp_dtpm_cpu_online,
- cpuhp_dtpm_cpu_offline);
+ return 0;
}
+
+DTPM_DECLARE(dtpm_cpu, dtpm_cpu_init);
diff --git a/drivers/ptp/idt8a340_reg.h b/drivers/ptp/idt8a340_reg.h
deleted file mode 100644
index ac524cf0f31f..000000000000
--- a/drivers/ptp/idt8a340_reg.h
+++ /dev/null
@@ -1,720 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-/* idt8a340_reg.h
- *
- * Originally generated by regen.tcl on Thu Feb 14 19:23:44 PST 2019
- * https://github.com/richardcochran/regen
- *
- * Hand modified to include some HW registers.
- * Based on 4.8.0, SCSR rev C commit a03c7ae5
- */
-#ifndef HAVE_IDT8A340_REG
-#define HAVE_IDT8A340_REG
-
-#define PAGE_ADDR_BASE 0x0000
-#define PAGE_ADDR 0x00fc
-
-#define HW_REVISION 0x8180
-#define REV_ID 0x007a
-
-#define HW_DPLL_0 (0x8a00)
-#define HW_DPLL_1 (0x8b00)
-#define HW_DPLL_2 (0x8c00)
-#define HW_DPLL_3 (0x8d00)
-#define HW_DPLL_4 (0x8e00)
-#define HW_DPLL_5 (0x8f00)
-#define HW_DPLL_6 (0x9000)
-#define HW_DPLL_7 (0x9100)
-
-#define HW_DPLL_TOD_SW_TRIG_ADDR__0 (0x080)
-#define HW_DPLL_TOD_CTRL_1 (0x089)
-#define HW_DPLL_TOD_CTRL_2 (0x08A)
-#define HW_DPLL_TOD_OVR__0 (0x098)
-#define HW_DPLL_TOD_OUT_0__0 (0x0B0)
-
-#define HW_Q0_Q1_CH_SYNC_CTRL_0 (0xa740)
-#define HW_Q0_Q1_CH_SYNC_CTRL_1 (0xa741)
-#define HW_Q2_Q3_CH_SYNC_CTRL_0 (0xa742)
-#define HW_Q2_Q3_CH_SYNC_CTRL_1 (0xa743)
-#define HW_Q4_Q5_CH_SYNC_CTRL_0 (0xa744)
-#define HW_Q4_Q5_CH_SYNC_CTRL_1 (0xa745)
-#define HW_Q6_Q7_CH_SYNC_CTRL_0 (0xa746)
-#define HW_Q6_Q7_CH_SYNC_CTRL_1 (0xa747)
-#define HW_Q8_CH_SYNC_CTRL_0 (0xa748)
-#define HW_Q8_CH_SYNC_CTRL_1 (0xa749)
-#define HW_Q9_CH_SYNC_CTRL_0 (0xa74a)
-#define HW_Q9_CH_SYNC_CTRL_1 (0xa74b)
-#define HW_Q10_CH_SYNC_CTRL_0 (0xa74c)
-#define HW_Q10_CH_SYNC_CTRL_1 (0xa74d)
-#define HW_Q11_CH_SYNC_CTRL_0 (0xa74e)
-#define HW_Q11_CH_SYNC_CTRL_1 (0xa74f)
-
-#define SYNC_SOURCE_DPLL0_TOD_PPS 0x14
-#define SYNC_SOURCE_DPLL1_TOD_PPS 0x15
-#define SYNC_SOURCE_DPLL2_TOD_PPS 0x16
-#define SYNC_SOURCE_DPLL3_TOD_PPS 0x17
-
-#define SYNCTRL1_MASTER_SYNC_RST BIT(7)
-#define SYNCTRL1_MASTER_SYNC_TRIG BIT(5)
-#define SYNCTRL1_TOD_SYNC_TRIG BIT(4)
-#define SYNCTRL1_FBDIV_FRAME_SYNC_TRIG BIT(3)
-#define SYNCTRL1_FBDIV_SYNC_TRIG BIT(2)
-#define SYNCTRL1_Q1_DIV_SYNC_TRIG BIT(1)
-#define SYNCTRL1_Q0_DIV_SYNC_TRIG BIT(0)
-
-#define HW_Q8_CTRL_SPARE (0xa7d4)
-#define HW_Q11_CTRL_SPARE (0xa7ec)
-
-/**
- * Select FOD5 as sync_trigger for Q8 divider.
- * Transition from logic zero to one
- * sets trigger to sync Q8 divider.
- *
- * Unused when FOD4 is driving Q8 divider (normal operation).
- */
-#define Q9_TO_Q8_SYNC_TRIG BIT(1)
-
-/**
- * Enable FOD5 as driver for clock and sync for Q8 divider.
- * Enable fanout buffer for FOD5.
- *
- * Unused when FOD4 is driving Q8 divider (normal operation).
- */
-#define Q9_TO_Q8_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK (BIT(0) | BIT(2))
-
-/**
- * Select FOD6 as sync_trigger for Q11 divider.
- * Transition from logic zero to one
- * sets trigger to sync Q11 divider.
- *
- * Unused when FOD7 is driving Q11 divider (normal operation).
- */
-#define Q10_TO_Q11_SYNC_TRIG BIT(1)
-
-/**
- * Enable FOD6 as driver for clock and sync for Q11 divider.
- * Enable fanout buffer for FOD6.
- *
- * Unused when FOD7 is driving Q11 divider (normal operation).
- */
-#define Q10_TO_Q11_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK (BIT(0) | BIT(2))
-
-#define RESET_CTRL 0xc000
-#define SM_RESET 0x0012
-#define SM_RESET_CMD 0x5A
-
-#define GENERAL_STATUS 0xc014
-#define BOOT_STATUS 0x0000
-#define HW_REV_ID 0x000A
-#define BOND_ID 0x000B
-#define HW_CSR_ID 0x000C
-#define HW_IRQ_ID 0x000E
-
-#define MAJ_REL 0x0010
-#define MIN_REL 0x0011
-#define HOTFIX_REL 0x0012
-
-#define PIPELINE_ID 0x0014
-#define BUILD_ID 0x0018
-
-#define JTAG_DEVICE_ID 0x001c
-#define PRODUCT_ID 0x001e
-
-#define OTP_SCSR_CONFIG_SELECT 0x0022
-
-#define STATUS 0xc03c
-#define DPLL_SYS_STATUS 0x0020
-#define DPLL_SYS_APLL_STATUS 0x0021
-#define USER_GPIO0_TO_7_STATUS 0x008a
-#define USER_GPIO8_TO_15_STATUS 0x008b
-
-#define GPIO_USER_CONTROL 0xc160
-#define GPIO0_TO_7_OUT 0x0000
-#define GPIO8_TO_15_OUT 0x0001
-
-#define STICKY_STATUS_CLEAR 0xc164
-
-#define GPIO_TOD_NOTIFICATION_CLEAR 0xc16c
-
-#define ALERT_CFG 0xc188
-
-#define SYS_DPLL_XO 0xc194
-
-#define SYS_APLL 0xc19c
-
-#define INPUT_0 0xc1b0
-
-#define INPUT_1 0xc1c0
-
-#define INPUT_2 0xc1d0
-
-#define INPUT_3 0xc200
-
-#define INPUT_4 0xc210
-
-#define INPUT_5 0xc220
-
-#define INPUT_6 0xc230
-
-#define INPUT_7 0xc240
-
-#define INPUT_8 0xc250
-
-#define INPUT_9 0xc260
-
-#define INPUT_10 0xc280
-
-#define INPUT_11 0xc290
-
-#define INPUT_12 0xc2a0
-
-#define INPUT_13 0xc2b0
-
-#define INPUT_14 0xc2c0
-
-#define INPUT_15 0xc2d0
-
-#define REF_MON_0 0xc2e0
-
-#define REF_MON_1 0xc2ec
-
-#define REF_MON_2 0xc300
-
-#define REF_MON_3 0xc30c
-
-#define REF_MON_4 0xc318
-
-#define REF_MON_5 0xc324
-
-#define REF_MON_6 0xc330
-
-#define REF_MON_7 0xc33c
-
-#define REF_MON_8 0xc348
-
-#define REF_MON_9 0xc354
-
-#define REF_MON_10 0xc360
-
-#define REF_MON_11 0xc36c
-
-#define REF_MON_12 0xc380
-
-#define REF_MON_13 0xc38c
-
-#define REF_MON_14 0xc398
-
-#define REF_MON_15 0xc3a4
-
-#define DPLL_0 0xc3b0
-#define DPLL_CTRL_REG_0 0x0002
-#define DPLL_CTRL_REG_1 0x0003
-#define DPLL_CTRL_REG_2 0x0004
-#define DPLL_TOD_SYNC_CFG 0x0031
-#define DPLL_COMBO_SLAVE_CFG_0 0x0032
-#define DPLL_COMBO_SLAVE_CFG_1 0x0033
-#define DPLL_SLAVE_REF_CFG 0x0034
-#define DPLL_REF_MODE 0x0035
-#define DPLL_PHASE_MEASUREMENT_CFG 0x0036
-#define DPLL_MODE 0x0037
-
-#define DPLL_1 0xc400
-
-#define DPLL_2 0xc438
-
-#define DPLL_3 0xc480
-
-#define DPLL_4 0xc4b8
-
-#define DPLL_5 0xc500
-
-#define DPLL_6 0xc538
-
-#define DPLL_7 0xc580
-
-#define SYS_DPLL 0xc5b8
-
-#define DPLL_CTRL_0 0xc600
-#define DPLL_CTRL_DPLL_MANU_REF_CFG 0x0001
-#define DPLL_CTRL_COMBO_MASTER_CFG 0x003a
-
-#define DPLL_CTRL_1 0xc63c
-
-#define DPLL_CTRL_2 0xc680
-
-#define DPLL_CTRL_3 0xc6bc
-
-#define DPLL_CTRL_4 0xc700
-
-#define DPLL_CTRL_5 0xc73c
-
-#define DPLL_CTRL_6 0xc780
-
-#define DPLL_CTRL_7 0xc7bc
-
-#define SYS_DPLL_CTRL 0xc800
-
-#define DPLL_PHASE_0 0xc818
-
-/* Signed 42-bit FFO in units of 2^(-53) */
-#define DPLL_WR_PHASE 0x0000
-
-#define DPLL_PHASE_1 0xc81c
-
-#define DPLL_PHASE_2 0xc820
-
-#define DPLL_PHASE_3 0xc824
-
-#define DPLL_PHASE_4 0xc828
-
-#define DPLL_PHASE_5 0xc82c
-
-#define DPLL_PHASE_6 0xc830
-
-#define DPLL_PHASE_7 0xc834
-
-#define DPLL_FREQ_0 0xc838
-
-/* Signed 42-bit FFO in units of 2^(-53) */
-#define DPLL_WR_FREQ 0x0000
-
-#define DPLL_FREQ_1 0xc840
-
-#define DPLL_FREQ_2 0xc848
-
-#define DPLL_FREQ_3 0xc850
-
-#define DPLL_FREQ_4 0xc858
-
-#define DPLL_FREQ_5 0xc860
-
-#define DPLL_FREQ_6 0xc868
-
-#define DPLL_FREQ_7 0xc870
-
-#define DPLL_PHASE_PULL_IN_0 0xc880
-#define PULL_IN_OFFSET 0x0000 /* Signed 32 bit */
-#define PULL_IN_SLOPE_LIMIT 0x0004 /* Unsigned 24 bit */
-#define PULL_IN_CTRL 0x0007
-
-#define DPLL_PHASE_PULL_IN_1 0xc888
-
-#define DPLL_PHASE_PULL_IN_2 0xc890
-
-#define DPLL_PHASE_PULL_IN_3 0xc898
-
-#define DPLL_PHASE_PULL_IN_4 0xc8a0
-
-#define DPLL_PHASE_PULL_IN_5 0xc8a8
-
-#define DPLL_PHASE_PULL_IN_6 0xc8b0
-
-#define DPLL_PHASE_PULL_IN_7 0xc8b8
-
-#define GPIO_CFG 0xc8c0
-#define GPIO_CFG_GBL 0x0000
-
-#define GPIO_0 0xc8c2
-#define GPIO_DCO_INC_DEC 0x0000
-#define GPIO_OUT_CTRL_0 0x0001
-#define GPIO_OUT_CTRL_1 0x0002
-#define GPIO_TOD_TRIG 0x0003
-#define GPIO_DPLL_INDICATOR 0x0004
-#define GPIO_LOS_INDICATOR 0x0005
-#define GPIO_REF_INPUT_DSQ_0 0x0006
-#define GPIO_REF_INPUT_DSQ_1 0x0007
-#define GPIO_REF_INPUT_DSQ_2 0x0008
-#define GPIO_REF_INPUT_DSQ_3 0x0009
-#define GPIO_MAN_CLK_SEL_0 0x000a
-#define GPIO_MAN_CLK_SEL_1 0x000b
-#define GPIO_MAN_CLK_SEL_2 0x000c
-#define GPIO_SLAVE 0x000d
-#define GPIO_ALERT_OUT_CFG 0x000e
-#define GPIO_TOD_NOTIFICATION_CFG 0x000f
-#define GPIO_CTRL 0x0010
-
-#define GPIO_1 0xc8d4
-
-#define GPIO_2 0xc8e6
-
-#define GPIO_3 0xc900
-
-#define GPIO_4 0xc912
-
-#define GPIO_5 0xc924
-
-#define GPIO_6 0xc936
-
-#define GPIO_7 0xc948
-
-#define GPIO_8 0xc95a
-
-#define GPIO_9 0xc980
-
-#define GPIO_10 0xc992
-
-#define GPIO_11 0xc9a4
-
-#define GPIO_12 0xc9b6
-
-#define GPIO_13 0xc9c8
-
-#define GPIO_14 0xc9da
-
-#define GPIO_15 0xca00
-
-#define OUT_DIV_MUX 0xca12
-
-#define OUTPUT_0 0xca14
-/* FOD frequency output divider value */
-#define OUT_DIV 0x0000
-#define OUT_DUTY_CYCLE_HIGH 0x0004
-#define OUT_CTRL_0 0x0008
-#define OUT_CTRL_1 0x0009
-/* Phase adjustment in FOD cycles */
-#define OUT_PHASE_ADJ 0x000c
-
-#define OUTPUT_1 0xca24
-
-#define OUTPUT_2 0xca34
-
-#define OUTPUT_3 0xca44
-
-#define OUTPUT_4 0xca54
-
-#define OUTPUT_5 0xca64
-
-#define OUTPUT_6 0xca80
-
-#define OUTPUT_7 0xca90
-
-#define OUTPUT_8 0xcaa0
-
-#define OUTPUT_9 0xcab0
-
-#define OUTPUT_10 0xcac0
-
-#define OUTPUT_11 0xcad0
-
-#define SERIAL 0xcae0
-
-#define PWM_ENCODER_0 0xcb00
-
-#define PWM_ENCODER_1 0xcb08
-
-#define PWM_ENCODER_2 0xcb10
-
-#define PWM_ENCODER_3 0xcb18
-
-#define PWM_ENCODER_4 0xcb20
-
-#define PWM_ENCODER_5 0xcb28
-
-#define PWM_ENCODER_6 0xcb30
-
-#define PWM_ENCODER_7 0xcb38
-
-#define PWM_DECODER_0 0xcb40
-
-#define PWM_DECODER_1 0xcb48
-
-#define PWM_DECODER_2 0xcb50
-
-#define PWM_DECODER_3 0xcb58
-
-#define PWM_DECODER_4 0xcb60
-
-#define PWM_DECODER_5 0xcb68
-
-#define PWM_DECODER_6 0xcb70
-
-#define PWM_DECODER_7 0xcb80
-
-#define PWM_DECODER_8 0xcb88
-
-#define PWM_DECODER_9 0xcb90
-
-#define PWM_DECODER_10 0xcb98
-
-#define PWM_DECODER_11 0xcba0
-
-#define PWM_DECODER_12 0xcba8
-
-#define PWM_DECODER_13 0xcbb0
-
-#define PWM_DECODER_14 0xcbb8
-
-#define PWM_DECODER_15 0xcbc0
-
-#define PWM_USER_DATA 0xcbc8
-
-#define TOD_0 0xcbcc
-
-/* Enable TOD counter, output channel sync and even-PPS mode */
-#define TOD_CFG 0x0000
-
-#define TOD_1 0xcbce
-
-#define TOD_2 0xcbd0
-
-#define TOD_3 0xcbd2
-
-
-#define TOD_WRITE_0 0xcc00
-/* 8-bit subns, 32-bit ns, 48-bit seconds */
-#define TOD_WRITE 0x0000
-/* Counter increments after TOD write is completed */
-#define TOD_WRITE_COUNTER 0x000c
-/* TOD write trigger configuration */
-#define TOD_WRITE_SELECT_CFG_0 0x000d
-/* TOD write trigger selection */
-#define TOD_WRITE_CMD 0x000f
-
-#define TOD_WRITE_1 0xcc10
-
-#define TOD_WRITE_2 0xcc20
-
-#define TOD_WRITE_3 0xcc30
-
-#define TOD_READ_PRIMARY_0 0xcc40
-/* 8-bit subns, 32-bit ns, 48-bit seconds */
-#define TOD_READ_PRIMARY 0x0000
-/* Counter increments after TOD write is completed */
-#define TOD_READ_PRIMARY_COUNTER 0x000b
-/* Read trigger configuration */
-#define TOD_READ_PRIMARY_SEL_CFG_0 0x000c
-/* Read trigger selection */
-#define TOD_READ_PRIMARY_CMD 0x000e
-
-#define TOD_READ_PRIMARY_1 0xcc50
-
-#define TOD_READ_PRIMARY_2 0xcc60
-
-#define TOD_READ_PRIMARY_3 0xcc80
-
-#define TOD_READ_SECONDARY_0 0xcc90
-
-#define TOD_READ_SECONDARY_1 0xcca0
-
-#define TOD_READ_SECONDARY_2 0xccb0
-
-#define TOD_READ_SECONDARY_3 0xccc0
-
-#define OUTPUT_TDC_CFG 0xccd0
-
-#define OUTPUT_TDC_0 0xcd00
-
-#define OUTPUT_TDC_1 0xcd08
-
-#define OUTPUT_TDC_2 0xcd10
-
-#define OUTPUT_TDC_3 0xcd18
-
-#define INPUT_TDC 0xcd20
-
-#define SCRATCH 0xcf50
-
-#define EEPROM 0xcf68
-
-#define OTP 0xcf70
-
-#define BYTE 0xcf80
-
-/* Bit definitions for the MAJ_REL register */
-#define MAJOR_SHIFT (1)
-#define MAJOR_MASK (0x7f)
-#define PR_BUILD BIT(0)
-
-/* Bit definitions for the USER_GPIO0_TO_7_STATUS register */
-#define GPIO0_LEVEL BIT(0)
-#define GPIO1_LEVEL BIT(1)
-#define GPIO2_LEVEL BIT(2)
-#define GPIO3_LEVEL BIT(3)
-#define GPIO4_LEVEL BIT(4)
-#define GPIO5_LEVEL BIT(5)
-#define GPIO6_LEVEL BIT(6)
-#define GPIO7_LEVEL BIT(7)
-
-/* Bit definitions for the USER_GPIO8_TO_15_STATUS register */
-#define GPIO8_LEVEL BIT(0)
-#define GPIO9_LEVEL BIT(1)
-#define GPIO10_LEVEL BIT(2)
-#define GPIO11_LEVEL BIT(3)
-#define GPIO12_LEVEL BIT(4)
-#define GPIO13_LEVEL BIT(5)
-#define GPIO14_LEVEL BIT(6)
-#define GPIO15_LEVEL BIT(7)
-
-/* Bit definitions for the GPIO0_TO_7_OUT register */
-#define GPIO0_DRIVE_LEVEL BIT(0)
-#define GPIO1_DRIVE_LEVEL BIT(1)
-#define GPIO2_DRIVE_LEVEL BIT(2)
-#define GPIO3_DRIVE_LEVEL BIT(3)
-#define GPIO4_DRIVE_LEVEL BIT(4)
-#define GPIO5_DRIVE_LEVEL BIT(5)
-#define GPIO6_DRIVE_LEVEL BIT(6)
-#define GPIO7_DRIVE_LEVEL BIT(7)
-
-/* Bit definitions for the GPIO8_TO_15_OUT register */
-#define GPIO8_DRIVE_LEVEL BIT(0)
-#define GPIO9_DRIVE_LEVEL BIT(1)
-#define GPIO10_DRIVE_LEVEL BIT(2)
-#define GPIO11_DRIVE_LEVEL BIT(3)
-#define GPIO12_DRIVE_LEVEL BIT(4)
-#define GPIO13_DRIVE_LEVEL BIT(5)
-#define GPIO14_DRIVE_LEVEL BIT(6)
-#define GPIO15_DRIVE_LEVEL BIT(7)
-
-/* Bit definitions for the DPLL_TOD_SYNC_CFG register */
-#define TOD_SYNC_SOURCE_SHIFT (1)
-#define TOD_SYNC_SOURCE_MASK (0x3)
-#define TOD_SYNC_EN BIT(0)
-
-/* Bit definitions for the DPLL_MODE register */
-#define WRITE_TIMER_MODE BIT(6)
-#define PLL_MODE_SHIFT (3)
-#define PLL_MODE_MASK (0x7)
-#define STATE_MODE_SHIFT (0)
-#define STATE_MODE_MASK (0x7)
-
-/* Bit definitions for the GPIO_CFG_GBL register */
-#define SUPPLY_MODE_SHIFT (0)
-#define SUPPLY_MODE_MASK (0x3)
-
-/* Bit definitions for the GPIO_DCO_INC_DEC register */
-#define INCDEC_DPLL_INDEX_SHIFT (0)
-#define INCDEC_DPLL_INDEX_MASK (0x7)
-
-/* Bit definitions for the GPIO_OUT_CTRL_0 register */
-#define CTRL_OUT_0 BIT(0)
-#define CTRL_OUT_1 BIT(1)
-#define CTRL_OUT_2 BIT(2)
-#define CTRL_OUT_3 BIT(3)
-#define CTRL_OUT_4 BIT(4)
-#define CTRL_OUT_5 BIT(5)
-#define CTRL_OUT_6 BIT(6)
-#define CTRL_OUT_7 BIT(7)
-
-/* Bit definitions for the GPIO_OUT_CTRL_1 register */
-#define CTRL_OUT_8 BIT(0)
-#define CTRL_OUT_9 BIT(1)
-#define CTRL_OUT_10 BIT(2)
-#define CTRL_OUT_11 BIT(3)
-#define CTRL_OUT_12 BIT(4)
-#define CTRL_OUT_13 BIT(5)
-#define CTRL_OUT_14 BIT(6)
-#define CTRL_OUT_15 BIT(7)
-
-/* Bit definitions for the GPIO_TOD_TRIG register */
-#define TOD_TRIG_0 BIT(0)
-#define TOD_TRIG_1 BIT(1)
-#define TOD_TRIG_2 BIT(2)
-#define TOD_TRIG_3 BIT(3)
-
-/* Bit definitions for the GPIO_DPLL_INDICATOR register */
-#define IND_DPLL_INDEX_SHIFT (0)
-#define IND_DPLL_INDEX_MASK (0x7)
-
-/* Bit definitions for the GPIO_LOS_INDICATOR register */
-#define REFMON_INDEX_SHIFT (0)
-#define REFMON_INDEX_MASK (0xf)
-/* Active level of LOS indicator, 0=low 1=high */
-#define ACTIVE_LEVEL BIT(4)
-
-/* Bit definitions for the GPIO_REF_INPUT_DSQ_0 register */
-#define DSQ_INP_0 BIT(0)
-#define DSQ_INP_1 BIT(1)
-#define DSQ_INP_2 BIT(2)
-#define DSQ_INP_3 BIT(3)
-#define DSQ_INP_4 BIT(4)
-#define DSQ_INP_5 BIT(5)
-#define DSQ_INP_6 BIT(6)
-#define DSQ_INP_7 BIT(7)
-
-/* Bit definitions for the GPIO_REF_INPUT_DSQ_1 register */
-#define DSQ_INP_8 BIT(0)
-#define DSQ_INP_9 BIT(1)
-#define DSQ_INP_10 BIT(2)
-#define DSQ_INP_11 BIT(3)
-#define DSQ_INP_12 BIT(4)
-#define DSQ_INP_13 BIT(5)
-#define DSQ_INP_14 BIT(6)
-#define DSQ_INP_15 BIT(7)
-
-/* Bit definitions for the GPIO_REF_INPUT_DSQ_2 register */
-#define DSQ_DPLL_0 BIT(0)
-#define DSQ_DPLL_1 BIT(1)
-#define DSQ_DPLL_2 BIT(2)
-#define DSQ_DPLL_3 BIT(3)
-#define DSQ_DPLL_4 BIT(4)
-#define DSQ_DPLL_5 BIT(5)
-#define DSQ_DPLL_6 BIT(6)
-#define DSQ_DPLL_7 BIT(7)
-
-/* Bit definitions for the GPIO_REF_INPUT_DSQ_3 register */
-#define DSQ_DPLL_SYS BIT(0)
-#define GPIO_DSQ_LEVEL BIT(1)
-
-/* Bit definitions for the GPIO_TOD_NOTIFICATION_CFG register */
-#define DPLL_TOD_SHIFT (0)
-#define DPLL_TOD_MASK (0x3)
-#define TOD_READ_SECONDARY BIT(2)
-#define GPIO_ASSERT_LEVEL BIT(3)
-
-/* Bit definitions for the GPIO_CTRL register */
-#define GPIO_FUNCTION_EN BIT(0)
-#define GPIO_CMOS_OD_MODE BIT(1)
-#define GPIO_CONTROL_DIR BIT(2)
-#define GPIO_PU_PD_MODE BIT(3)
-#define GPIO_FUNCTION_SHIFT (4)
-#define GPIO_FUNCTION_MASK (0xf)
-
-/* Bit definitions for the OUT_CTRL_1 register */
-#define OUT_SYNC_DISABLE BIT(7)
-#define SQUELCH_VALUE BIT(6)
-#define SQUELCH_DISABLE BIT(5)
-#define PAD_VDDO_SHIFT (2)
-#define PAD_VDDO_MASK (0x7)
-#define PAD_CMOSDRV_SHIFT (0)
-#define PAD_CMOSDRV_MASK (0x3)
-
-/* Bit definitions for the TOD_CFG register */
-#define TOD_EVEN_PPS_MODE BIT(2)
-#define TOD_OUT_SYNC_ENABLE BIT(1)
-#define TOD_ENABLE BIT(0)
-
-/* Bit definitions for the TOD_WRITE_SELECT_CFG_0 register */
-#define WR_PWM_DECODER_INDEX_SHIFT (4)
-#define WR_PWM_DECODER_INDEX_MASK (0xf)
-#define WR_REF_INDEX_SHIFT (0)
-#define WR_REF_INDEX_MASK (0xf)
-
-/* Bit definitions for the TOD_WRITE_CMD register */
-#define TOD_WRITE_SELECTION_SHIFT (0)
-#define TOD_WRITE_SELECTION_MASK (0xf)
-/* 4.8.7 */
-#define TOD_WRITE_TYPE_SHIFT (4)
-#define TOD_WRITE_TYPE_MASK (0x3)
-
-/* Bit definitions for the TOD_READ_PRIMARY_SEL_CFG_0 register */
-#define RD_PWM_DECODER_INDEX_SHIFT (4)
-#define RD_PWM_DECODER_INDEX_MASK (0xf)
-#define RD_REF_INDEX_SHIFT (0)
-#define RD_REF_INDEX_MASK (0xf)
-
-/* Bit definitions for the TOD_READ_PRIMARY_CMD register */
-#define TOD_READ_TRIGGER_MODE BIT(4)
-#define TOD_READ_TRIGGER_SHIFT (0)
-#define TOD_READ_TRIGGER_MASK (0xf)
-
-/* Bit definitions for the DPLL_CTRL_COMBO_MASTER_CFG register */
-#define COMBO_MASTER_HOLD BIT(0)
-
-/* Bit definitions for DPLL_SYS_STATUS register */
-#define DPLL_SYS_STATE_MASK (0xf)
-
-/* Bit definitions for SYS_APLL_STATUS register */
-#define SYS_APLL_LOSS_LOCK_LIVE_MASK BIT(0)
-#define SYS_APLL_LOSS_LOCK_LIVE_LOCKED 0
-#define SYS_APLL_LOSS_LOCK_LIVE_UNLOCKED 1
-
-#endif
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index 4dfc52e06704..0e4bc8b9329d 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -170,6 +170,7 @@ static void ptp_clock_release(struct device *dev)
struct ptp_clock *ptp = container_of(dev, struct ptp_clock, dev);
ptp_cleanup_pin_groups(ptp);
+ kfree(ptp->vclock_index);
mutex_destroy(&ptp->tsevq_mux);
mutex_destroy(&ptp->pincfg_mux);
mutex_destroy(&ptp->n_vclocks_mux);
@@ -283,15 +284,20 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
/* Create a posix clock and link it to the device. */
err = posix_clock_register(&ptp->clock, &ptp->dev);
if (err) {
+ if (ptp->pps_source)
+ pps_unregister_source(ptp->pps_source);
+
+ if (ptp->kworker)
+ kthread_destroy_worker(ptp->kworker);
+
+ put_device(&ptp->dev);
+
pr_err("failed to create posix clock\n");
- goto no_clock;
+ return ERR_PTR(err);
}
return ptp;
-no_clock:
- if (ptp->pps_source)
- pps_unregister_source(ptp->pps_source);
no_pps:
ptp_cleanup_pin_groups(ptp);
no_pin_groups:
@@ -321,8 +327,6 @@ int ptp_clock_unregister(struct ptp_clock *ptp)
ptp->defunct = 1;
wake_up_interruptible(&ptp->tsev_wq);
- kfree(ptp->vclock_index);
-
if (ptp->kworker) {
kthread_cancel_delayed_work_sync(&ptp->aux_work);
kthread_destroy_worker(ptp->kworker);
diff --git a/drivers/ptp/ptp_clockmatrix.c b/drivers/ptp/ptp_clockmatrix.c
index fa636951169e..6bc5791a7ec5 100644
--- a/drivers/ptp/ptp_clockmatrix.c
+++ b/drivers/ptp/ptp_clockmatrix.c
@@ -6,7 +6,7 @@
* Copyright (C) 2019 Integrated Device Technology, Inc., a Renesas Company.
*/
#include <linux/firmware.h>
-#include <linux/i2c.h>
+#include <linux/platform_device.h>
#include <linux/module.h>
#include <linux/ptp_clock_kernel.h>
#include <linux/delay.h>
@@ -14,6 +14,10 @@
#include <linux/kernel.h>
#include <linux/timekeeping.h>
#include <linux/string.h>
+#include <linux/of.h>
+#include <linux/mfd/rsmu.h>
+#include <linux/mfd/idt8a340_reg.h>
+#include <asm/unaligned.h>
#include "ptp_private.h"
#include "ptp_clockmatrix.h"
@@ -32,16 +36,43 @@ static char *firmware;
module_param(firmware, charp, 0);
#define SETTIME_CORRECTION (0)
+#define EXTTS_PERIOD_MS (95)
-static int contains_full_configuration(const struct firmware *fw)
+static int _idtcm_adjfine(struct idtcm_channel *channel, long scaled_ppm);
+
+static inline int idtcm_read(struct idtcm *idtcm,
+ u16 module,
+ u16 regaddr,
+ u8 *buf,
+ u16 count)
+{
+ return regmap_bulk_read(idtcm->regmap, module + regaddr, buf, count);
+}
+
+static inline int idtcm_write(struct idtcm *idtcm,
+ u16 module,
+ u16 regaddr,
+ u8 *buf,
+ u16 count)
+{
+ return regmap_bulk_write(idtcm->regmap, module + regaddr, buf, count);
+}
+
+static int contains_full_configuration(struct idtcm *idtcm,
+ const struct firmware *fw)
{
- s32 full_count = FULL_FW_CFG_BYTES - FULL_FW_CFG_SKIPPED_BYTES;
struct idtcm_fwrc *rec = (struct idtcm_fwrc *)fw->data;
+ u16 scratch = IDTCM_FW_REG(idtcm->fw_ver, V520, SCRATCH);
+ s32 full_count;
s32 count = 0;
u16 regaddr;
u8 loaddr;
s32 len;
+ /* 4 bytes skipped every 0x80 */
+ full_count = (scratch - GPIO_USER_CONTROL) -
+ ((scratch >> 7) - (GPIO_USER_CONTROL >> 7)) * 4;
+
/* If the firmware contains 'full configuration' SM_RESET can be used
* to ensure proper configuration.
*
@@ -57,7 +88,7 @@ static int contains_full_configuration(const struct firmware *fw)
rec++;
/* Top (status registers) and bottom are read-only */
- if (regaddr < GPIO_USER_CONTROL || regaddr >= SCRATCH)
+ if (regaddr < GPIO_USER_CONTROL || regaddr >= scratch)
continue;
/* Page size 128, last 4 bytes of page skipped */
@@ -152,132 +183,17 @@ static int idtcm_strverscmp(const char *version1, const char *version2)
return 0;
}
-static int idtcm_xfer_read(struct idtcm *idtcm,
- u8 regaddr,
- u8 *buf,
- u16 count)
-{
- struct i2c_client *client = idtcm->client;
- struct i2c_msg msg[2];
- int cnt;
-
- msg[0].addr = client->addr;
- msg[0].flags = 0;
- msg[0].len = 1;
- msg[0].buf = &regaddr;
-
- msg[1].addr = client->addr;
- msg[1].flags = I2C_M_RD;
- msg[1].len = count;
- msg[1].buf = buf;
-
- cnt = i2c_transfer(client->adapter, msg, 2);
-
- if (cnt < 0) {
- dev_err(&client->dev,
- "i2c_transfer failed at %d in %s, at addr: %04x!",
- __LINE__, __func__, regaddr);
- return cnt;
- } else if (cnt != 2) {
- dev_err(&client->dev,
- "i2c_transfer sent only %d of %d messages", cnt, 2);
- return -EIO;
- }
-
- return 0;
-}
-
-static int idtcm_xfer_write(struct idtcm *idtcm,
- u8 regaddr,
- u8 *buf,
- u16 count)
-{
- struct i2c_client *client = idtcm->client;
- /* we add 1 byte for device register */
- u8 msg[IDTCM_MAX_WRITE_COUNT + 1];
- int cnt;
-
- if (count > IDTCM_MAX_WRITE_COUNT)
- return -EINVAL;
-
- msg[0] = regaddr;
- memcpy(&msg[1], buf, count);
-
- cnt = i2c_master_send(client, msg, count + 1);
-
- if (cnt < 0) {
- dev_err(&client->dev,
- "i2c_master_send failed at %d in %s, at addr: %04x!",
- __LINE__, __func__, regaddr);
- return cnt;
- }
-
- return 0;
-}
-
-static int idtcm_page_offset(struct idtcm *idtcm, u8 val)
-{
- u8 buf[4];
- int err;
-
- if (idtcm->page_offset == val)
- return 0;
-
- buf[0] = 0x0;
- buf[1] = val;
- buf[2] = 0x10;
- buf[3] = 0x20;
-
- err = idtcm_xfer_write(idtcm, PAGE_ADDR, buf, sizeof(buf));
- if (err) {
- idtcm->page_offset = 0xff;
- dev_err(&idtcm->client->dev, "failed to set page offset");
- } else {
- idtcm->page_offset = val;
- }
-
- return err;
-}
-
-static int _idtcm_rdwr(struct idtcm *idtcm,
- u16 regaddr,
- u8 *buf,
- u16 count,
- bool write)
+static enum fw_version idtcm_fw_version(const char *version)
{
- u8 hi;
- u8 lo;
- int err;
+ enum fw_version ver = V_DEFAULT;
- hi = (regaddr >> 8) & 0xff;
- lo = regaddr & 0xff;
-
- err = idtcm_page_offset(idtcm, hi);
- if (err)
- return err;
-
- if (write)
- return idtcm_xfer_write(idtcm, lo, buf, count);
-
- return idtcm_xfer_read(idtcm, lo, buf, count);
-}
+ if (idtcm_strverscmp(version, "4.8.7") >= 0)
+ ver = V487;
-static int idtcm_read(struct idtcm *idtcm,
- u16 module,
- u16 regaddr,
- u8 *buf,
- u16 count)
-{
- return _idtcm_rdwr(idtcm, module + regaddr, buf, count, false);
-}
+ if (idtcm_strverscmp(version, "5.2.0") >= 0)
+ ver = V520;
-static int idtcm_write(struct idtcm *idtcm,
- u16 module,
- u16 regaddr,
- u8 *buf,
- u16 count)
-{
- return _idtcm_rdwr(idtcm, module + regaddr, buf, count, true);
+ return ver;
}
static int clear_boot_status(struct idtcm *idtcm)
@@ -318,11 +234,82 @@ static int wait_for_boot_status_ready(struct idtcm *idtcm)
} while (i);
- dev_warn(&idtcm->client->dev, "%s timed out", __func__);
+ dev_warn(idtcm->dev, "%s timed out", __func__);
return -EBUSY;
}
+static int _idtcm_set_scsr_read_trig(struct idtcm_channel *channel,
+ enum scsr_read_trig_sel trig, u8 ref)
+{
+ struct idtcm *idtcm = channel->idtcm;
+ u16 tod_read_cmd = IDTCM_FW_REG(idtcm->fw_ver, V520, TOD_READ_PRIMARY_CMD);
+ u8 val;
+ int err;
+
+ if (trig == SCSR_TOD_READ_TRIG_SEL_REFCLK) {
+ err = idtcm_read(idtcm, channel->tod_read_primary,
+ TOD_READ_PRIMARY_SEL_CFG_0, &val, sizeof(val));
+ if (err)
+ return err;
+
+ val &= ~(WR_REF_INDEX_MASK << WR_REF_INDEX_SHIFT);
+ val |= (ref << WR_REF_INDEX_SHIFT);
+
+ err = idtcm_write(idtcm, channel->tod_read_primary,
+ TOD_READ_PRIMARY_SEL_CFG_0, &val, sizeof(val));
+ if (err)
+ return err;
+ }
+
+ err = idtcm_read(idtcm, channel->tod_read_primary,
+ tod_read_cmd, &val, sizeof(val));
+ if (err)
+ return err;
+
+ val &= ~(TOD_READ_TRIGGER_MASK << TOD_READ_TRIGGER_SHIFT);
+ val |= (trig << TOD_READ_TRIGGER_SHIFT);
+ val &= ~TOD_READ_TRIGGER_MODE; /* single shot */
+
+ err = idtcm_write(idtcm, channel->tod_read_primary,
+ tod_read_cmd, &val, sizeof(val));
+ return err;
+}
+
+static int idtcm_enable_extts(struct idtcm_channel *channel, u8 todn, u8 ref,
+ bool enable)
+{
+ struct idtcm *idtcm = channel->idtcm;
+ u8 old_mask = idtcm->extts_mask;
+ u8 mask = 1 << todn;
+ int err = 0;
+
+ if (todn >= MAX_TOD)
+ return -EINVAL;
+
+ if (enable) {
+ if (ref > 0xF) /* E_REF_CLK15 */
+ return -EINVAL;
+ if (idtcm->extts_mask & mask)
+ return 0;
+ err = _idtcm_set_scsr_read_trig(&idtcm->channel[todn],
+ SCSR_TOD_READ_TRIG_SEL_REFCLK,
+ ref);
+ if (err == 0) {
+ idtcm->extts_mask |= mask;
+ idtcm->event_channel[todn] = channel;
+ idtcm->channel[todn].refn = ref;
+ }
+ } else
+ idtcm->extts_mask &= ~mask;
+
+ if (old_mask == 0 && idtcm->extts_mask)
+ schedule_delayed_work(&idtcm->extts_work,
+ msecs_to_jiffies(EXTTS_PERIOD_MS));
+
+ return err;
+}
+
static int read_sys_apll_status(struct idtcm *idtcm, u8 *status)
{
return idtcm_read(idtcm, STATUS, DPLL_SYS_APLL_STATUS, status,
@@ -359,7 +346,7 @@ static int wait_for_sys_apll_dpll_lock(struct idtcm *idtcm)
} else if (dpll == DPLL_STATE_FREERUN ||
dpll == DPLL_STATE_HOLDOVER ||
dpll == DPLL_STATE_OPEN_LOOP) {
- dev_warn(&idtcm->client->dev,
+ dev_warn(idtcm->dev,
"No wait state: DPLL_SYS_STATE %d", dpll);
return -EPERM;
}
@@ -367,7 +354,7 @@ static int wait_for_sys_apll_dpll_lock(struct idtcm *idtcm)
msleep(LOCK_POLL_INTERVAL_MS);
} while (time_is_after_jiffies(timeout));
- dev_warn(&idtcm->client->dev,
+ dev_warn(idtcm->dev,
"%d ms lock timeout: SYS APLL Loss Lock %d SYS DPLL state %d",
LOCK_TIMEOUT_MS, apll, dpll);
@@ -377,50 +364,36 @@ static int wait_for_sys_apll_dpll_lock(struct idtcm *idtcm)
static void wait_for_chip_ready(struct idtcm *idtcm)
{
if (wait_for_boot_status_ready(idtcm))
- dev_warn(&idtcm->client->dev, "BOOT_STATUS != 0xA0");
+ dev_warn(idtcm->dev, "BOOT_STATUS != 0xA0");
if (wait_for_sys_apll_dpll_lock(idtcm))
- dev_warn(&idtcm->client->dev,
+ dev_warn(idtcm->dev,
"Continuing while SYS APLL/DPLL is not locked");
}
static int _idtcm_gettime(struct idtcm_channel *channel,
- struct timespec64 *ts)
+ struct timespec64 *ts, u8 timeout)
{
struct idtcm *idtcm = channel->idtcm;
+ u16 tod_read_cmd = IDTCM_FW_REG(idtcm->fw_ver, V520, TOD_READ_PRIMARY_CMD);
u8 buf[TOD_BYTE_COUNT];
- u8 timeout = 10;
u8 trigger;
int err;
- err = idtcm_read(idtcm, channel->tod_read_primary,
- TOD_READ_PRIMARY_CMD, &trigger, sizeof(trigger));
- if (err)
- return err;
-
- trigger &= ~(TOD_READ_TRIGGER_MASK << TOD_READ_TRIGGER_SHIFT);
- trigger |= (1 << TOD_READ_TRIGGER_SHIFT);
- trigger &= ~TOD_READ_TRIGGER_MODE; /* single shot */
-
- err = idtcm_write(idtcm, channel->tod_read_primary,
- TOD_READ_PRIMARY_CMD, &trigger, sizeof(trigger));
- if (err)
- return err;
-
/* wait trigger to be 0 */
- while (trigger & TOD_READ_TRIGGER_MASK) {
+ do {
+ if (timeout-- == 0)
+ return -EIO;
+
if (idtcm->calculate_overhead_flag)
idtcm->start_time = ktime_get_raw();
err = idtcm_read(idtcm, channel->tod_read_primary,
- TOD_READ_PRIMARY_CMD, &trigger,
+ tod_read_cmd, &trigger,
sizeof(trigger));
if (err)
return err;
-
- if (--timeout == 0)
- return -EIO;
- }
+ } while (trigger & TOD_READ_TRIGGER_MASK);
err = idtcm_read(idtcm, channel->tod_read_primary,
TOD_READ_PRIMARY, buf, sizeof(buf));
@@ -432,6 +405,79 @@ static int _idtcm_gettime(struct idtcm_channel *channel,
return err;
}
+static int idtcm_extts_check_channel(struct idtcm *idtcm, u8 todn)
+{
+ struct idtcm_channel *ptp_channel, *extts_channel;
+ struct ptp_clock_event event;
+ struct timespec64 ts;
+ u32 dco_delay = 0;
+ int err;
+
+ extts_channel = &idtcm->channel[todn];
+ ptp_channel = idtcm->event_channel[todn];
+ if (extts_channel == ptp_channel)
+ dco_delay = ptp_channel->dco_delay;
+
+ err = _idtcm_gettime(extts_channel, &ts, 1);
+ if (err == 0) {
+ event.type = PTP_CLOCK_EXTTS;
+ event.index = todn;
+ event.timestamp = timespec64_to_ns(&ts) - dco_delay;
+ ptp_clock_event(ptp_channel->ptp_clock, &event);
+ }
+ return err;
+}
+
+static u8 idtcm_enable_extts_mask(struct idtcm_channel *channel,
+ u8 extts_mask, bool enable)
+{
+ struct idtcm *idtcm = channel->idtcm;
+ int i, err;
+
+ for (i = 0; i < MAX_TOD; i++) {
+ u8 mask = 1 << i;
+ u8 refn = idtcm->channel[i].refn;
+
+ if (extts_mask & mask) {
+ /* check extts before disabling it */
+ if (enable == false) {
+ err = idtcm_extts_check_channel(idtcm, i);
+ /* trigger happened so we won't re-enable it */
+ if (err == 0)
+ extts_mask &= ~mask;
+ }
+ (void)idtcm_enable_extts(channel, i, refn, enable);
+ }
+ }
+
+ return extts_mask;
+}
+
+static int _idtcm_gettime_immediate(struct idtcm_channel *channel,
+ struct timespec64 *ts)
+{
+ struct idtcm *idtcm = channel->idtcm;
+ u8 extts_mask = 0;
+ int err;
+
+ /* Disable extts */
+ if (idtcm->extts_mask) {
+ extts_mask = idtcm_enable_extts_mask(channel, idtcm->extts_mask,
+ false);
+ }
+
+ err = _idtcm_set_scsr_read_trig(channel,
+ SCSR_TOD_READ_TRIG_SEL_IMMEDIATE, 0);
+ if (err == 0)
+ err = _idtcm_gettime(channel, ts, 10);
+
+ /* Re-enable extts */
+ if (extts_mask)
+ idtcm_enable_extts_mask(channel, extts_mask, true);
+
+ return err;
+}
+
static int _sync_pll_output(struct idtcm *idtcm,
u8 pll,
u8 sync_src,
@@ -559,35 +605,10 @@ static int _sync_pll_output(struct idtcm *idtcm,
return err;
}
-static int sync_source_dpll_tod_pps(u16 tod_addr, u8 *sync_src)
-{
- int err = 0;
-
- switch (tod_addr) {
- case TOD_0:
- *sync_src = SYNC_SOURCE_DPLL0_TOD_PPS;
- break;
- case TOD_1:
- *sync_src = SYNC_SOURCE_DPLL1_TOD_PPS;
- break;
- case TOD_2:
- *sync_src = SYNC_SOURCE_DPLL2_TOD_PPS;
- break;
- case TOD_3:
- *sync_src = SYNC_SOURCE_DPLL3_TOD_PPS;
- break;
- default:
- err = -EINVAL;
- }
-
- return err;
-}
-
static int idtcm_sync_pps_output(struct idtcm_channel *channel)
{
struct idtcm *idtcm = channel->idtcm;
u8 pll;
- u8 sync_src;
u8 qn;
u8 qn_plus_1;
int err = 0;
@@ -596,10 +617,6 @@ static int idtcm_sync_pps_output(struct idtcm_channel *channel)
u8 temp;
u16 output_mask = channel->output_mask;
- err = sync_source_dpll_tod_pps(channel->tod_n, &sync_src);
- if (err)
- return err;
-
err = idtcm_read(idtcm, 0, HW_Q8_CTRL_SPARE,
&temp, sizeof(temp));
if (err)
@@ -655,8 +672,8 @@ static int idtcm_sync_pps_output(struct idtcm_channel *channel)
}
if (qn != 0 || qn_plus_1 != 0)
- err = _sync_pll_output(idtcm, pll, sync_src, qn,
- qn_plus_1);
+ err = _sync_pll_output(idtcm, pll, channel->sync_src,
+ qn, qn_plus_1);
if (err)
return err;
@@ -666,8 +683,8 @@ static int idtcm_sync_pps_output(struct idtcm_channel *channel)
}
static int _idtcm_set_dpll_hw_tod(struct idtcm_channel *channel,
- struct timespec64 const *ts,
- enum hw_tod_write_trig_sel wr_trig)
+ struct timespec64 const *ts,
+ enum hw_tod_write_trig_sel wr_trig)
{
struct idtcm *idtcm = channel->idtcm;
u8 buf[TOD_BYTE_COUNT];
@@ -784,7 +801,7 @@ static int _idtcm_set_dpll_scsr_tod(struct idtcm_channel *channel,
break;
if (++count > 20) {
- dev_err(&idtcm->client->dev,
+ dev_err(idtcm->dev,
"Timed out waiting for the write counter");
return -EIO;
}
@@ -793,46 +810,46 @@ static int _idtcm_set_dpll_scsr_tod(struct idtcm_channel *channel,
return 0;
}
-static int get_output_base_addr(u8 outn)
+static int get_output_base_addr(enum fw_version ver, u8 outn)
{
int base;
switch (outn) {
case 0:
- base = OUTPUT_0;
+ base = IDTCM_FW_REG(ver, V520, OUTPUT_0);
break;
case 1:
- base = OUTPUT_1;
+ base = IDTCM_FW_REG(ver, V520, OUTPUT_1);
break;
case 2:
- base = OUTPUT_2;
+ base = IDTCM_FW_REG(ver, V520, OUTPUT_2);
break;
case 3:
- base = OUTPUT_3;
+ base = IDTCM_FW_REG(ver, V520, OUTPUT_3);
break;
case 4:
- base = OUTPUT_4;
+ base = IDTCM_FW_REG(ver, V520, OUTPUT_4);
break;
case 5:
- base = OUTPUT_5;
+ base = IDTCM_FW_REG(ver, V520, OUTPUT_5);
break;
case 6:
- base = OUTPUT_6;
+ base = IDTCM_FW_REG(ver, V520, OUTPUT_6);
break;
case 7:
- base = OUTPUT_7;
+ base = IDTCM_FW_REG(ver, V520, OUTPUT_7);
break;
case 8:
- base = OUTPUT_8;
+ base = IDTCM_FW_REG(ver, V520, OUTPUT_8);
break;
case 9:
- base = OUTPUT_9;
+ base = IDTCM_FW_REG(ver, V520, OUTPUT_9);
break;
case 10:
- base = OUTPUT_10;
+ base = IDTCM_FW_REG(ver, V520, OUTPUT_10);
break;
case 11:
- base = OUTPUT_11;
+ base = IDTCM_FW_REG(ver, V520, OUTPUT_11);
break;
default:
base = -EINVAL;
@@ -849,7 +866,7 @@ static int _idtcm_settime_deprecated(struct idtcm_channel *channel,
err = _idtcm_set_dpll_hw_tod(channel, ts, HW_TOD_WR_TRIG_SEL_MSB);
if (err) {
- dev_err(&idtcm->client->dev,
+ dev_err(idtcm->dev,
"%s: Set HW ToD failed", __func__);
return err;
}
@@ -929,9 +946,9 @@ static int idtcm_start_phase_pull_in(struct idtcm_channel *channel)
return err;
}
-static int idtcm_do_phase_pull_in(struct idtcm_channel *channel,
- s32 offset_ns,
- u32 max_ffo_ppb)
+static int do_phase_pull_in_fw(struct idtcm_channel *channel,
+ s32 offset_ns,
+ u32 max_ffo_ppb)
{
int err;
@@ -1000,7 +1017,7 @@ static int _idtcm_adjtime_deprecated(struct idtcm_channel *channel, s64 delta)
s64 now;
if (abs(delta) < PHASE_PULL_IN_THRESHOLD_NS_DEPRECATED) {
- err = idtcm_do_phase_pull_in(channel, delta, 0);
+ err = channel->do_phase_pull_in(channel, delta, 0);
} else {
idtcm->calculate_overhead_flag = 1;
@@ -1008,7 +1025,7 @@ static int _idtcm_adjtime_deprecated(struct idtcm_channel *channel, s64 delta)
if (err)
return err;
- err = _idtcm_gettime(channel, &ts);
+ err = _idtcm_gettime_immediate(channel, &ts);
if (err)
return err;
@@ -1032,7 +1049,9 @@ static int idtcm_state_machine_reset(struct idtcm *idtcm)
clear_boot_status(idtcm);
- err = idtcm_write(idtcm, RESET_CTRL, SM_RESET, &byte, sizeof(byte));
+ err = idtcm_write(idtcm, RESET_CTRL,
+ IDTCM_FW_REG(idtcm->fw_ver, V520, SM_RESET),
+ &byte, sizeof(byte));
if (!err) {
for (i = 0; i < 30; i++) {
@@ -1040,14 +1059,14 @@ static int idtcm_state_machine_reset(struct idtcm *idtcm)
read_boot_status(idtcm, &status);
if (status == 0xA0) {
- dev_dbg(&idtcm->client->dev,
+ dev_dbg(idtcm->dev,
"SM_RESET completed in %d ms", i * 100);
break;
}
}
if (!status)
- dev_err(&idtcm->client->dev,
+ dev_err(idtcm->dev,
"Timed out waiting for CM_RESET to complete");
}
@@ -1144,12 +1163,12 @@ static int set_pll_output_mask(struct idtcm *idtcm, u16 addr, u8 val)
static int set_tod_ptp_pll(struct idtcm *idtcm, u8 index, u8 pll)
{
if (index >= MAX_TOD) {
- dev_err(&idtcm->client->dev, "ToD%d not supported", index);
+ dev_err(idtcm->dev, "ToD%d not supported", index);
return -EINVAL;
}
if (pll >= MAX_PLL) {
- dev_err(&idtcm->client->dev, "Pll%d not supported", pll);
+ dev_err(idtcm->dev, "Pll%d not supported", pll);
return -EINVAL;
}
@@ -1167,7 +1186,7 @@ static int check_and_set_masks(struct idtcm *idtcm,
switch (regaddr) {
case TOD_MASK_ADDR:
if ((val & 0xf0) || !(val & 0x0f)) {
- dev_err(&idtcm->client->dev, "Invalid TOD mask 0x%02x", val);
+ dev_err(idtcm->dev, "Invalid TOD mask 0x%02x", val);
err = -EINVAL;
} else {
idtcm->tod_mask = val;
@@ -1198,13 +1217,13 @@ static void display_pll_and_masks(struct idtcm *idtcm)
u8 i;
u8 mask;
- dev_dbg(&idtcm->client->dev, "tod_mask = 0x%02x", idtcm->tod_mask);
+ dev_dbg(idtcm->dev, "tod_mask = 0x%02x", idtcm->tod_mask);
for (i = 0; i < MAX_TOD; i++) {
mask = 1 << i;
if (mask & idtcm->tod_mask)
- dev_dbg(&idtcm->client->dev,
+ dev_dbg(idtcm->dev,
"TOD%d pll = %d output_mask = 0x%04x",
i, idtcm->channel[i].pll,
idtcm->channel[i].output_mask);
@@ -1214,6 +1233,7 @@ static void display_pll_and_masks(struct idtcm *idtcm)
static int idtcm_load_firmware(struct idtcm *idtcm,
struct device *dev)
{
+ u16 scratch = IDTCM_FW_REG(idtcm->fw_ver, V520, SCRATCH);
char fname[128] = FW_FILENAME;
const struct firmware *fw;
struct idtcm_fwrc *rec;
@@ -1226,25 +1246,25 @@ static int idtcm_load_firmware(struct idtcm *idtcm,
if (firmware) /* module parameter */
snprintf(fname, sizeof(fname), "%s", firmware);
- dev_dbg(&idtcm->client->dev, "requesting firmware '%s'", fname);
+ dev_info(idtcm->dev, "requesting firmware '%s'", fname);
err = request_firmware(&fw, fname, dev);
if (err) {
- dev_err(&idtcm->client->dev,
+ dev_err(idtcm->dev,
"Failed at line %d in %s!", __LINE__, __func__);
return err;
}
- dev_dbg(&idtcm->client->dev, "firmware size %zu bytes", fw->size);
+ dev_dbg(idtcm->dev, "firmware size %zu bytes", fw->size);
rec = (struct idtcm_fwrc *) fw->data;
- if (contains_full_configuration(fw))
+ if (contains_full_configuration(idtcm, fw))
idtcm_state_machine_reset(idtcm);
for (len = fw->size; len > 0; len -= sizeof(*rec)) {
if (rec->reserved) {
- dev_err(&idtcm->client->dev,
+ dev_err(idtcm->dev,
"bad firmware, reserved field non-zero");
err = -EINVAL;
} else {
@@ -1263,7 +1283,7 @@ static int idtcm_load_firmware(struct idtcm *idtcm,
err = 0;
/* Top (status registers) and bottom are read-only */
- if (regaddr < GPIO_USER_CONTROL || regaddr >= SCRATCH)
+ if (regaddr < GPIO_USER_CONTROL || regaddr >= scratch)
continue;
/* Page size 128, last 4 bytes of page skipped */
@@ -1292,10 +1312,10 @@ static int idtcm_output_enable(struct idtcm_channel *channel,
int err;
u8 val;
- base = get_output_base_addr(outn);
+ base = get_output_base_addr(idtcm->fw_ver, outn);
if (!(base > 0)) {
- dev_err(&idtcm->client->dev,
+ dev_err(idtcm->dev,
"%s - Unsupported out%d", __func__, outn);
return base;
}
@@ -1337,8 +1357,8 @@ static int idtcm_output_mask_enable(struct idtcm_channel *channel,
}
static int idtcm_perout_enable(struct idtcm_channel *channel,
- bool enable,
- struct ptp_perout_request *perout)
+ struct ptp_perout_request *perout,
+ bool enable)
{
struct idtcm *idtcm = channel->idtcm;
unsigned int flags = perout->flags;
@@ -1351,7 +1371,7 @@ static int idtcm_perout_enable(struct idtcm_channel *channel,
err = idtcm_output_enable(channel, enable, perout->index);
if (err) {
- dev_err(&idtcm->client->dev, "Unable to set output enable");
+ dev_err(idtcm->dev, "Unable to set output enable");
return err;
}
@@ -1360,53 +1380,331 @@ static int idtcm_perout_enable(struct idtcm_channel *channel,
}
static int idtcm_get_pll_mode(struct idtcm_channel *channel,
- enum pll_mode *pll_mode)
+ enum pll_mode *mode)
{
struct idtcm *idtcm = channel->idtcm;
int err;
u8 dpll_mode;
- err = idtcm_read(idtcm, channel->dpll_n, DPLL_MODE,
+ err = idtcm_read(idtcm, channel->dpll_n,
+ IDTCM_FW_REG(idtcm->fw_ver, V520, DPLL_MODE),
&dpll_mode, sizeof(dpll_mode));
if (err)
return err;
- *pll_mode = (dpll_mode >> PLL_MODE_SHIFT) & PLL_MODE_MASK;
+ *mode = (dpll_mode >> PLL_MODE_SHIFT) & PLL_MODE_MASK;
return 0;
}
static int idtcm_set_pll_mode(struct idtcm_channel *channel,
- enum pll_mode pll_mode)
+ enum pll_mode mode)
{
struct idtcm *idtcm = channel->idtcm;
int err;
u8 dpll_mode;
- err = idtcm_read(idtcm, channel->dpll_n, DPLL_MODE,
+ err = idtcm_read(idtcm, channel->dpll_n,
+ IDTCM_FW_REG(idtcm->fw_ver, V520, DPLL_MODE),
&dpll_mode, sizeof(dpll_mode));
if (err)
return err;
dpll_mode &= ~(PLL_MODE_MASK << PLL_MODE_SHIFT);
- dpll_mode |= (pll_mode << PLL_MODE_SHIFT);
-
- channel->pll_mode = pll_mode;
+ dpll_mode |= (mode << PLL_MODE_SHIFT);
- err = idtcm_write(idtcm, channel->dpll_n, DPLL_MODE,
+ err = idtcm_write(idtcm, channel->dpll_n,
+ IDTCM_FW_REG(idtcm->fw_ver, V520, DPLL_MODE),
&dpll_mode, sizeof(dpll_mode));
+ return err;
+}
+
+static int idtcm_get_manual_reference(struct idtcm_channel *channel,
+ enum manual_reference *ref)
+{
+ struct idtcm *idtcm = channel->idtcm;
+ u8 dpll_manu_ref_cfg;
+ int err;
+
+ err = idtcm_read(idtcm, channel->dpll_ctrl_n,
+ DPLL_CTRL_DPLL_MANU_REF_CFG,
+ &dpll_manu_ref_cfg, sizeof(dpll_manu_ref_cfg));
if (err)
return err;
+ dpll_manu_ref_cfg &= (MANUAL_REFERENCE_MASK << MANUAL_REFERENCE_SHIFT);
+
+ *ref = dpll_manu_ref_cfg >> MANUAL_REFERENCE_SHIFT;
+
return 0;
}
+static int idtcm_set_manual_reference(struct idtcm_channel *channel,
+ enum manual_reference ref)
+{
+ struct idtcm *idtcm = channel->idtcm;
+ u8 dpll_manu_ref_cfg;
+ int err;
+
+ err = idtcm_read(idtcm, channel->dpll_ctrl_n,
+ DPLL_CTRL_DPLL_MANU_REF_CFG,
+ &dpll_manu_ref_cfg, sizeof(dpll_manu_ref_cfg));
+ if (err)
+ return err;
+
+ dpll_manu_ref_cfg &= ~(MANUAL_REFERENCE_MASK << MANUAL_REFERENCE_SHIFT);
+
+ dpll_manu_ref_cfg |= (ref << MANUAL_REFERENCE_SHIFT);
+
+ err = idtcm_write(idtcm, channel->dpll_ctrl_n,
+ DPLL_CTRL_DPLL_MANU_REF_CFG,
+ &dpll_manu_ref_cfg, sizeof(dpll_manu_ref_cfg));
+
+ return err;
+}
+
+static int configure_dpll_mode_write_frequency(struct idtcm_channel *channel)
+{
+ struct idtcm *idtcm = channel->idtcm;
+ int err;
+
+ err = idtcm_set_pll_mode(channel, PLL_MODE_WRITE_FREQUENCY);
+
+ if (err)
+ dev_err(idtcm->dev, "Failed to set pll mode to write frequency");
+ else
+ channel->mode = PTP_PLL_MODE_WRITE_FREQUENCY;
+
+ return err;
+}
+
+static int configure_dpll_mode_write_phase(struct idtcm_channel *channel)
+{
+ struct idtcm *idtcm = channel->idtcm;
+ int err;
+
+ err = idtcm_set_pll_mode(channel, PLL_MODE_WRITE_PHASE);
+
+ if (err)
+ dev_err(idtcm->dev, "Failed to set pll mode to write phase");
+ else
+ channel->mode = PTP_PLL_MODE_WRITE_PHASE;
+
+ return err;
+}
+
+static int configure_manual_reference_write_frequency(struct idtcm_channel *channel)
+{
+ struct idtcm *idtcm = channel->idtcm;
+ int err;
+
+ err = idtcm_set_manual_reference(channel, MANU_REF_WRITE_FREQUENCY);
+
+ if (err)
+ dev_err(idtcm->dev, "Failed to set manual reference to write frequency");
+ else
+ channel->mode = PTP_PLL_MODE_WRITE_FREQUENCY;
+
+ return err;
+}
+
+static int configure_manual_reference_write_phase(struct idtcm_channel *channel)
+{
+ struct idtcm *idtcm = channel->idtcm;
+ int err;
+
+ err = idtcm_set_manual_reference(channel, MANU_REF_WRITE_PHASE);
+
+ if (err)
+ dev_err(idtcm->dev, "Failed to set manual reference to write phase");
+ else
+ channel->mode = PTP_PLL_MODE_WRITE_PHASE;
+
+ return err;
+}
+
+static int idtcm_stop_phase_pull_in(struct idtcm_channel *channel)
+{
+ int err;
+
+ err = _idtcm_adjfine(channel, channel->current_freq_scaled_ppm);
+ if (err)
+ return err;
+
+ channel->phase_pull_in = false;
+
+ return 0;
+}
+
+static long idtcm_work_handler(struct ptp_clock_info *ptp)
+{
+ struct idtcm_channel *channel = container_of(ptp, struct idtcm_channel, caps);
+ struct idtcm *idtcm = channel->idtcm;
+
+ mutex_lock(idtcm->lock);
+
+ (void)idtcm_stop_phase_pull_in(channel);
+
+ mutex_unlock(idtcm->lock);
+
+ /* Return a negative value here to not reschedule */
+ return -1;
+}
+
+static s32 phase_pull_in_scaled_ppm(s32 current_ppm, s32 phase_pull_in_ppb)
+{
+ /* ppb = scaled_ppm * 125 / 2^13 */
+ /* scaled_ppm = ppb * 2^13 / 125 */
+
+ s64 max_scaled_ppm = div_s64((s64)PHASE_PULL_IN_MAX_PPB << 13, 125);
+ s64 scaled_ppm = div_s64((s64)phase_pull_in_ppb << 13, 125);
+
+ current_ppm += scaled_ppm;
+
+ if (current_ppm > max_scaled_ppm)
+ current_ppm = max_scaled_ppm;
+ else if (current_ppm < -max_scaled_ppm)
+ current_ppm = -max_scaled_ppm;
+
+ return current_ppm;
+}
+
+static int do_phase_pull_in_sw(struct idtcm_channel *channel,
+ s32 delta_ns,
+ u32 max_ffo_ppb)
+{
+ s32 current_ppm = channel->current_freq_scaled_ppm;
+ u32 duration_ms = MSEC_PER_SEC;
+ s32 delta_ppm;
+ s32 ppb;
+ int err;
+
+ /* If the ToD correction is less than PHASE_PULL_IN_MIN_THRESHOLD_NS,
+ * skip. The error introduced by the ToD adjustment procedure would
+ * be bigger than the required ToD correction
+ */
+ if (abs(delta_ns) < PHASE_PULL_IN_MIN_THRESHOLD_NS)
+ return 0;
+
+ if (max_ffo_ppb == 0)
+ max_ffo_ppb = PHASE_PULL_IN_MAX_PPB;
+
+ /* For most cases, keep phase pull-in duration 1 second */
+ ppb = delta_ns;
+ while (abs(ppb) > max_ffo_ppb) {
+ duration_ms *= 2;
+ ppb /= 2;
+ }
+
+ delta_ppm = phase_pull_in_scaled_ppm(current_ppm, ppb);
+
+ err = _idtcm_adjfine(channel, delta_ppm);
+
+ if (err)
+ return err;
+
+ /* schedule the worker to cancel phase pull-in */
+ ptp_schedule_worker(channel->ptp_clock,
+ msecs_to_jiffies(duration_ms) - 1);
+
+ channel->phase_pull_in = true;
+
+ return 0;
+}
+
+static int initialize_operating_mode_with_manual_reference(struct idtcm_channel *channel,
+ enum manual_reference ref)
+{
+ struct idtcm *idtcm = channel->idtcm;
+
+ channel->mode = PTP_PLL_MODE_UNSUPPORTED;
+ channel->configure_write_frequency = configure_manual_reference_write_frequency;
+ channel->configure_write_phase = configure_manual_reference_write_phase;
+ channel->do_phase_pull_in = do_phase_pull_in_sw;
+
+ switch (ref) {
+ case MANU_REF_WRITE_PHASE:
+ channel->mode = PTP_PLL_MODE_WRITE_PHASE;
+ break;
+ case MANU_REF_WRITE_FREQUENCY:
+ channel->mode = PTP_PLL_MODE_WRITE_FREQUENCY;
+ break;
+ default:
+ dev_warn(idtcm->dev,
+ "Unsupported MANUAL_REFERENCE: 0x%02x", ref);
+ }
+
+ return 0;
+}
+
+static int initialize_operating_mode_with_pll_mode(struct idtcm_channel *channel,
+ enum pll_mode mode)
+{
+ struct idtcm *idtcm = channel->idtcm;
+ int err = 0;
+
+ channel->mode = PTP_PLL_MODE_UNSUPPORTED;
+ channel->configure_write_frequency = configure_dpll_mode_write_frequency;
+ channel->configure_write_phase = configure_dpll_mode_write_phase;
+ channel->do_phase_pull_in = do_phase_pull_in_fw;
+
+ switch (mode) {
+ case PLL_MODE_WRITE_PHASE:
+ channel->mode = PTP_PLL_MODE_WRITE_PHASE;
+ break;
+ case PLL_MODE_WRITE_FREQUENCY:
+ channel->mode = PTP_PLL_MODE_WRITE_FREQUENCY;
+ break;
+ default:
+ dev_err(idtcm->dev,
+ "Unsupported PLL_MODE: 0x%02x", mode);
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static int initialize_dco_operating_mode(struct idtcm_channel *channel)
+{
+ enum manual_reference ref = MANU_REF_XO_DPLL;
+ enum pll_mode mode = PLL_MODE_DISABLED;
+ struct idtcm *idtcm = channel->idtcm;
+ int err;
+
+ channel->mode = PTP_PLL_MODE_UNSUPPORTED;
+
+ err = idtcm_get_pll_mode(channel, &mode);
+ if (err) {
+ dev_err(idtcm->dev, "Unable to read pll mode!");
+ return err;
+ }
+
+ if (mode == PLL_MODE_PLL) {
+ err = idtcm_get_manual_reference(channel, &ref);
+ if (err) {
+ dev_err(idtcm->dev, "Unable to read manual reference!");
+ return err;
+ }
+ err = initialize_operating_mode_with_manual_reference(channel, ref);
+ } else {
+ err = initialize_operating_mode_with_pll_mode(channel, mode);
+ }
+
+ if (channel->mode == PTP_PLL_MODE_WRITE_PHASE)
+ channel->configure_write_frequency(channel);
+
+ return err;
+}
+
/* PTP Hardware Clock interface */
-/*
+/**
* Maximum absolute value for write phase offset in picoseconds
*
+ * @channel: channel
+ * @delta_ns: delta in nanoseconds
+ *
* Destination signed register is 32-bit register in resolution of 50ps
*
* 0x7fffffff * 50 = 2147483647 * 50 = 107374182350
@@ -1420,8 +1718,8 @@ static int _idtcm_adjphase(struct idtcm_channel *channel, s32 delta_ns)
s32 phase_50ps;
s64 offset_ps;
- if (channel->pll_mode != PLL_MODE_WRITE_PHASE) {
- err = idtcm_set_pll_mode(channel, PLL_MODE_WRITE_PHASE);
+ if (channel->mode != PTP_PLL_MODE_WRITE_PHASE) {
+ err = channel->configure_write_phase(channel);
if (err)
return err;
}
@@ -1459,8 +1757,8 @@ static int _idtcm_adjfine(struct idtcm_channel *channel, long scaled_ppm)
u8 buf[6] = {0};
s64 fcw;
- if (channel->pll_mode != PLL_MODE_WRITE_FREQUENCY) {
- err = idtcm_set_pll_mode(channel, PLL_MODE_WRITE_FREQUENCY);
+ if (channel->mode != PTP_PLL_MODE_WRITE_FREQUENCY) {
+ err = channel->configure_write_frequency(channel);
if (err)
return err;
}
@@ -1501,15 +1799,14 @@ static int idtcm_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
struct idtcm *idtcm = channel->idtcm;
int err;
- mutex_lock(&idtcm->reg_lock);
+ mutex_lock(idtcm->lock);
+ err = _idtcm_gettime_immediate(channel, ts);
+ mutex_unlock(idtcm->lock);
- err = _idtcm_gettime(channel, ts);
if (err)
- dev_err(&idtcm->client->dev, "Failed at line %d in %s!",
+ dev_err(idtcm->dev, "Failed at line %d in %s!",
__LINE__, __func__);
- mutex_unlock(&idtcm->reg_lock);
-
return err;
}
@@ -1520,15 +1817,14 @@ static int idtcm_settime_deprecated(struct ptp_clock_info *ptp,
struct idtcm *idtcm = channel->idtcm;
int err;
- mutex_lock(&idtcm->reg_lock);
-
+ mutex_lock(idtcm->lock);
err = _idtcm_settime_deprecated(channel, ts);
+ mutex_unlock(idtcm->lock);
+
if (err)
- dev_err(&idtcm->client->dev,
+ dev_err(idtcm->dev,
"Failed at line %d in %s!", __LINE__, __func__);
- mutex_unlock(&idtcm->reg_lock);
-
return err;
}
@@ -1539,15 +1835,14 @@ static int idtcm_settime(struct ptp_clock_info *ptp,
struct idtcm *idtcm = channel->idtcm;
int err;
- mutex_lock(&idtcm->reg_lock);
-
+ mutex_lock(idtcm->lock);
err = _idtcm_settime(channel, ts, SCSR_TOD_WR_TYPE_SEL_ABSOLUTE);
+ mutex_unlock(idtcm->lock);
+
if (err)
- dev_err(&idtcm->client->dev,
+ dev_err(idtcm->dev,
"Failed at line %d in %s!", __LINE__, __func__);
- mutex_unlock(&idtcm->reg_lock);
-
return err;
}
@@ -1557,15 +1852,14 @@ static int idtcm_adjtime_deprecated(struct ptp_clock_info *ptp, s64 delta)
struct idtcm *idtcm = channel->idtcm;
int err;
- mutex_lock(&idtcm->reg_lock);
-
+ mutex_lock(idtcm->lock);
err = _idtcm_adjtime_deprecated(channel, delta);
+ mutex_unlock(idtcm->lock);
+
if (err)
- dev_err(&idtcm->client->dev,
+ dev_err(idtcm->dev,
"Failed at line %d in %s!", __LINE__, __func__);
- mutex_unlock(&idtcm->reg_lock);
-
return err;
}
@@ -1577,31 +1871,30 @@ static int idtcm_adjtime(struct ptp_clock_info *ptp, s64 delta)
enum scsr_tod_write_type_sel type;
int err;
- if (abs(delta) < PHASE_PULL_IN_THRESHOLD_NS) {
- err = idtcm_do_phase_pull_in(channel, delta, 0);
- if (err)
- dev_err(&idtcm->client->dev,
- "Failed at line %d in %s!", __LINE__, __func__);
- return err;
- }
+ if (channel->phase_pull_in == true)
+ return 0;
- if (delta >= 0) {
- ts = ns_to_timespec64(delta);
- type = SCSR_TOD_WR_TYPE_SEL_DELTA_PLUS;
+ mutex_lock(idtcm->lock);
+
+ if (abs(delta) < PHASE_PULL_IN_THRESHOLD_NS) {
+ err = channel->do_phase_pull_in(channel, delta, 0);
} else {
- ts = ns_to_timespec64(-delta);
- type = SCSR_TOD_WR_TYPE_SEL_DELTA_MINUS;
+ if (delta >= 0) {
+ ts = ns_to_timespec64(delta);
+ type = SCSR_TOD_WR_TYPE_SEL_DELTA_PLUS;
+ } else {
+ ts = ns_to_timespec64(-delta);
+ type = SCSR_TOD_WR_TYPE_SEL_DELTA_MINUS;
+ }
+ err = _idtcm_settime(channel, &ts, type);
}
- mutex_lock(&idtcm->reg_lock);
+ mutex_unlock(idtcm->lock);
- err = _idtcm_settime(channel, &ts, type);
if (err)
- dev_err(&idtcm->client->dev,
+ dev_err(idtcm->dev,
"Failed at line %d in %s!", __LINE__, __func__);
- mutex_unlock(&idtcm->reg_lock);
-
return err;
}
@@ -1611,15 +1904,14 @@ static int idtcm_adjphase(struct ptp_clock_info *ptp, s32 delta)
struct idtcm *idtcm = channel->idtcm;
int err;
- mutex_lock(&idtcm->reg_lock);
-
+ mutex_lock(idtcm->lock);
err = _idtcm_adjphase(channel, delta);
+ mutex_unlock(idtcm->lock);
+
if (err)
- dev_err(&idtcm->client->dev,
+ dev_err(idtcm->dev,
"Failed at line %d in %s!", __LINE__, __func__);
- mutex_unlock(&idtcm->reg_lock);
-
return err;
}
@@ -1629,14 +1921,21 @@ static int idtcm_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
struct idtcm *idtcm = channel->idtcm;
int err;
- mutex_lock(&idtcm->reg_lock);
+ if (channel->phase_pull_in == true)
+ return 0;
+ if (scaled_ppm == channel->current_freq_scaled_ppm)
+ return 0;
+
+ mutex_lock(idtcm->lock);
err = _idtcm_adjfine(channel, scaled_ppm);
+ mutex_unlock(idtcm->lock);
+
if (err)
- dev_err(&idtcm->client->dev,
+ dev_err(idtcm->dev,
"Failed at line %d in %s!", __LINE__, __func__);
-
- mutex_unlock(&idtcm->reg_lock);
+ else
+ channel->current_freq_scaled_ppm = scaled_ppm;
return err;
}
@@ -1644,249 +1943,36 @@ static int idtcm_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
static int idtcm_enable(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq, int on)
{
- int err;
struct idtcm_channel *channel = container_of(ptp, struct idtcm_channel, caps);
+ struct idtcm *idtcm = channel->idtcm;
+ int err = -EOPNOTSUPP;
+
+ mutex_lock(idtcm->lock);
switch (rq->type) {
case PTP_CLK_REQ_PEROUT:
- if (!on) {
- err = idtcm_perout_enable(channel, false, &rq->perout);
- if (err)
- dev_err(&channel->idtcm->client->dev,
- "Failed at line %d in %s!",
- __LINE__, __func__);
- return err;
- }
-
+ if (!on)
+ err = idtcm_perout_enable(channel, &rq->perout, false);
/* Only accept a 1-PPS aligned to the second. */
- if (rq->perout.start.nsec || rq->perout.period.sec != 1 ||
- rq->perout.period.nsec)
- return -ERANGE;
-
- err = idtcm_perout_enable(channel, true, &rq->perout);
- if (err)
- dev_err(&channel->idtcm->client->dev,
- "Failed at line %d in %s!", __LINE__, __func__);
- return err;
- default:
- break;
- }
-
- return -EOPNOTSUPP;
-}
-
-static int _enable_pll_tod_sync(struct idtcm *idtcm,
- u8 pll,
- u8 sync_src,
- u8 qn,
- u8 qn_plus_1)
-{
- int err;
- u8 val;
- u16 dpll;
- u16 out0 = 0, out1 = 0;
-
- if (qn == 0 && qn_plus_1 == 0)
- return 0;
-
- switch (pll) {
- case 0:
- dpll = DPLL_0;
- if (qn)
- out0 = OUTPUT_0;
- if (qn_plus_1)
- out1 = OUTPUT_1;
+ else if (rq->perout.start.nsec || rq->perout.period.sec != 1 ||
+ rq->perout.period.nsec)
+ err = -ERANGE;
+ else
+ err = idtcm_perout_enable(channel, &rq->perout, true);
break;
- case 1:
- dpll = DPLL_1;
- if (qn)
- out0 = OUTPUT_2;
- if (qn_plus_1)
- out1 = OUTPUT_3;
- break;
- case 2:
- dpll = DPLL_2;
- if (qn)
- out0 = OUTPUT_4;
- if (qn_plus_1)
- out1 = OUTPUT_5;
- break;
- case 3:
- dpll = DPLL_3;
- if (qn)
- out0 = OUTPUT_6;
- if (qn_plus_1)
- out1 = OUTPUT_7;
- break;
- case 4:
- dpll = DPLL_4;
- if (qn)
- out0 = OUTPUT_8;
- break;
- case 5:
- dpll = DPLL_5;
- if (qn)
- out0 = OUTPUT_9;
- if (qn_plus_1)
- out1 = OUTPUT_8;
- break;
- case 6:
- dpll = DPLL_6;
- if (qn)
- out0 = OUTPUT_10;
- if (qn_plus_1)
- out1 = OUTPUT_11;
- break;
- case 7:
- dpll = DPLL_7;
- if (qn)
- out0 = OUTPUT_11;
+ case PTP_CLK_REQ_EXTTS:
+ err = idtcm_enable_extts(channel, rq->extts.index,
+ rq->extts.rsv[0], on);
break;
default:
- return -EINVAL;
- }
-
- /*
- * Enable OUTPUT OUT_SYNC.
- */
- if (out0) {
- err = idtcm_read(idtcm, out0, OUT_CTRL_1, &val, sizeof(val));
- if (err)
- return err;
-
- val &= ~OUT_SYNC_DISABLE;
-
- err = idtcm_write(idtcm, out0, OUT_CTRL_1, &val, sizeof(val));
- if (err)
- return err;
- }
-
- if (out1) {
- err = idtcm_read(idtcm, out1, OUT_CTRL_1, &val, sizeof(val));
- if (err)
- return err;
-
- val &= ~OUT_SYNC_DISABLE;
-
- err = idtcm_write(idtcm, out1, OUT_CTRL_1, &val, sizeof(val));
- if (err)
- return err;
- }
-
- /* enable dpll sync tod pps, must be set before dpll_mode */
- err = idtcm_read(idtcm, dpll, DPLL_TOD_SYNC_CFG, &val, sizeof(val));
- if (err)
- return err;
-
- val &= ~(TOD_SYNC_SOURCE_MASK << TOD_SYNC_SOURCE_SHIFT);
- val |= (sync_src << TOD_SYNC_SOURCE_SHIFT);
- val |= TOD_SYNC_EN;
-
- return idtcm_write(idtcm, dpll, DPLL_TOD_SYNC_CFG, &val, sizeof(val));
-}
-
-static int idtcm_enable_tod_sync(struct idtcm_channel *channel)
-{
- struct idtcm *idtcm = channel->idtcm;
- u8 pll;
- u8 sync_src;
- u8 qn;
- u8 qn_plus_1;
- u8 cfg;
- int err = 0;
- u16 output_mask = channel->output_mask;
- u8 out8_mux = 0;
- u8 out11_mux = 0;
- u8 temp;
-
- /*
- * set tod_out_sync_enable to 0.
- */
- err = idtcm_read(idtcm, channel->tod_n, TOD_CFG, &cfg, sizeof(cfg));
- if (err)
- return err;
-
- cfg &= ~TOD_OUT_SYNC_ENABLE;
-
- err = idtcm_write(idtcm, channel->tod_n, TOD_CFG, &cfg, sizeof(cfg));
- if (err)
- return err;
-
- switch (channel->tod_n) {
- case TOD_0:
- sync_src = 0;
- break;
- case TOD_1:
- sync_src = 1;
- break;
- case TOD_2:
- sync_src = 2;
- break;
- case TOD_3:
- sync_src = 3;
break;
- default:
- return -EINVAL;
}
- err = idtcm_read(idtcm, 0, HW_Q8_CTRL_SPARE, &temp, sizeof(temp));
- if (err)
- return err;
-
- if ((temp & Q9_TO_Q8_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK) ==
- Q9_TO_Q8_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK)
- out8_mux = 1;
+ mutex_unlock(idtcm->lock);
- err = idtcm_read(idtcm, 0, HW_Q11_CTRL_SPARE, &temp, sizeof(temp));
if (err)
- return err;
-
- if ((temp & Q10_TO_Q11_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK) ==
- Q10_TO_Q11_FANOUT_AND_CLOCK_SYNC_ENABLE_MASK)
- out11_mux = 1;
-
- for (pll = 0; pll < 8; pll++) {
- qn = 0;
- qn_plus_1 = 0;
-
- if (pll < 4) {
- /* First 4 pll has 2 outputs */
- qn = output_mask & 0x1;
- output_mask = output_mask >> 1;
- qn_plus_1 = output_mask & 0x1;
- output_mask = output_mask >> 1;
- } else if (pll == 4) {
- if (out8_mux == 0) {
- qn = output_mask & 0x1;
- output_mask = output_mask >> 1;
- }
- } else if (pll == 5) {
- if (out8_mux) {
- qn_plus_1 = output_mask & 0x1;
- output_mask = output_mask >> 1;
- }
- qn = output_mask & 0x1;
- output_mask = output_mask >> 1;
- } else if (pll == 6) {
- qn = output_mask & 0x1;
- output_mask = output_mask >> 1;
- if (out11_mux) {
- qn_plus_1 = output_mask & 0x1;
- output_mask = output_mask >> 1;
- }
- } else if (pll == 7) {
- if (out11_mux == 0) {
- qn = output_mask & 0x1;
- output_mask = output_mask >> 1;
- }
- }
-
- if (qn != 0 || qn_plus_1 != 0)
- err = _enable_pll_tod_sync(idtcm, pll, sync_src, qn,
- qn_plus_1);
- if (err)
- return err;
- }
+ dev_err(channel->idtcm->dev,
+ "Failed in %s with err %d!", __func__, err);
return err;
}
@@ -1895,23 +1981,31 @@ static int idtcm_enable_tod(struct idtcm_channel *channel)
{
struct idtcm *idtcm = channel->idtcm;
struct timespec64 ts = {0, 0};
+ u16 tod_cfg = IDTCM_FW_REG(idtcm->fw_ver, V520, TOD_CFG);
u8 cfg;
int err;
+ /* STEELAI-366 - Temporary workaround for ts2phc compatibility */
+ if (0) {
+ err = idtcm_output_mask_enable(channel, false);
+ if (err)
+ return err;
+ }
+
/*
* Start the TOD clock ticking.
*/
- err = idtcm_read(idtcm, channel->tod_n, TOD_CFG, &cfg, sizeof(cfg));
+ err = idtcm_read(idtcm, channel->tod_n, tod_cfg, &cfg, sizeof(cfg));
if (err)
return err;
cfg |= TOD_ENABLE;
- err = idtcm_write(idtcm, channel->tod_n, TOD_CFG, &cfg, sizeof(cfg));
+ err = idtcm_write(idtcm, channel->tod_n, tod_cfg, &cfg, sizeof(cfg));
if (err)
return err;
- if (idtcm->deprecated)
+ if (idtcm->fw_ver < V487)
return _idtcm_settime_deprecated(channel, &ts);
else
return _idtcm_settime(channel, &ts,
@@ -1939,12 +2033,9 @@ static void idtcm_set_version_info(struct idtcm *idtcm)
snprintf(idtcm->version, sizeof(idtcm->version), "%u.%u.%u",
major, minor, hotfix);
- if (idtcm_strverscmp(idtcm->version, "4.8.7") >= 0)
- idtcm->deprecated = 0;
- else
- idtcm->deprecated = 1;
+ idtcm->fw_ver = idtcm_fw_version(idtcm->version);
- dev_info(&idtcm->client->dev,
+ dev_info(idtcm->dev,
"%d.%d.%d, Id: 0x%04x HW Rev: %d OTP Config Select: %d",
major, minor, hotfix,
product_id, hw_rev_id, config_select);
@@ -1954,28 +2045,33 @@ static const struct ptp_clock_info idtcm_caps = {
.owner = THIS_MODULE,
.max_adj = 244000,
.n_per_out = 12,
+ .n_ext_ts = MAX_TOD,
.adjphase = &idtcm_adjphase,
.adjfine = &idtcm_adjfine,
.adjtime = &idtcm_adjtime,
.gettime64 = &idtcm_gettime,
.settime64 = &idtcm_settime,
.enable = &idtcm_enable,
+ .do_aux_work = &idtcm_work_handler,
};
static const struct ptp_clock_info idtcm_caps_deprecated = {
.owner = THIS_MODULE,
.max_adj = 244000,
.n_per_out = 12,
+ .n_ext_ts = MAX_TOD,
.adjphase = &idtcm_adjphase,
.adjfine = &idtcm_adjfine,
.adjtime = &idtcm_adjtime_deprecated,
.gettime64 = &idtcm_gettime,
.settime64 = &idtcm_settime_deprecated,
.enable = &idtcm_enable,
+ .do_aux_work = &idtcm_work_handler,
};
static int configure_channel_pll(struct idtcm_channel *channel)
{
+ struct idtcm *idtcm = channel->idtcm;
int err = 0;
switch (channel->pll) {
@@ -1997,7 +2093,7 @@ static int configure_channel_pll(struct idtcm_channel *channel)
break;
case 2:
channel->dpll_freq = DPLL_FREQ_2;
- channel->dpll_n = DPLL_2;
+ channel->dpll_n = IDTCM_FW_REG(idtcm->fw_ver, V520, DPLL_2);
channel->hw_dpll_n = HW_DPLL_2;
channel->dpll_phase = DPLL_PHASE_2;
channel->dpll_ctrl_n = DPLL_CTRL_2;
@@ -2013,7 +2109,7 @@ static int configure_channel_pll(struct idtcm_channel *channel)
break;
case 4:
channel->dpll_freq = DPLL_FREQ_4;
- channel->dpll_n = DPLL_4;
+ channel->dpll_n = IDTCM_FW_REG(idtcm->fw_ver, V520, DPLL_4);
channel->hw_dpll_n = HW_DPLL_4;
channel->dpll_phase = DPLL_PHASE_4;
channel->dpll_ctrl_n = DPLL_CTRL_4;
@@ -2029,7 +2125,7 @@ static int configure_channel_pll(struct idtcm_channel *channel)
break;
case 6:
channel->dpll_freq = DPLL_FREQ_6;
- channel->dpll_n = DPLL_6;
+ channel->dpll_n = IDTCM_FW_REG(idtcm->fw_ver, V520, DPLL_6);
channel->hw_dpll_n = HW_DPLL_6;
channel->dpll_phase = DPLL_PHASE_6;
channel->dpll_ctrl_n = DPLL_CTRL_6;
@@ -2050,50 +2146,104 @@ static int configure_channel_pll(struct idtcm_channel *channel)
return err;
}
-static int idtcm_enable_channel(struct idtcm *idtcm, u32 index)
+/*
+ * Compensate for the PTP DCO input-to-output delay.
+ * This delay is 18 FOD cycles.
+ */
+static u32 idtcm_get_dco_delay(struct idtcm_channel *channel)
{
- struct idtcm_channel *channel;
+ struct idtcm *idtcm = channel->idtcm;
+ u8 mbuf[8] = {0};
+ u8 nbuf[2] = {0};
+ u32 fodFreq;
int err;
+ u64 m;
+ u16 n;
- if (!(index < MAX_TOD))
- return -EINVAL;
-
- channel = &idtcm->channel[index];
+ err = idtcm_read(idtcm, channel->dpll_ctrl_n,
+ DPLL_CTRL_DPLL_FOD_FREQ, mbuf, 6);
+ if (err)
+ return 0;
- /* Set pll addresses */
- err = configure_channel_pll(channel);
+ err = idtcm_read(idtcm, channel->dpll_ctrl_n,
+ DPLL_CTRL_DPLL_FOD_FREQ + 6, nbuf, 2);
if (err)
- return err;
+ return 0;
+
+ m = get_unaligned_le64(mbuf);
+ n = get_unaligned_le16(nbuf);
+
+ if (n == 0)
+ n = 1;
+
+ fodFreq = (u32)div_u64(m, n);
+ if (fodFreq >= 500000000)
+ return 18 * (u32)div_u64(NSEC_PER_SEC, fodFreq);
+
+ return 0;
+}
+
+static int configure_channel_tod(struct idtcm_channel *channel, u32 index)
+{
+ enum fw_version fw_ver = channel->idtcm->fw_ver;
/* Set tod addresses */
switch (index) {
case 0:
- channel->tod_read_primary = TOD_READ_PRIMARY_0;
- channel->tod_write = TOD_WRITE_0;
- channel->tod_n = TOD_0;
+ channel->tod_read_primary = IDTCM_FW_REG(fw_ver, V520, TOD_READ_PRIMARY_0);
+ channel->tod_write = IDTCM_FW_REG(fw_ver, V520, TOD_WRITE_0);
+ channel->tod_n = IDTCM_FW_REG(fw_ver, V520, TOD_0);
+ channel->sync_src = SYNC_SOURCE_DPLL0_TOD_PPS;
break;
case 1:
- channel->tod_read_primary = TOD_READ_PRIMARY_1;
- channel->tod_write = TOD_WRITE_1;
- channel->tod_n = TOD_1;
+ channel->tod_read_primary = IDTCM_FW_REG(fw_ver, V520, TOD_READ_PRIMARY_1);
+ channel->tod_write = IDTCM_FW_REG(fw_ver, V520, TOD_WRITE_1);
+ channel->tod_n = IDTCM_FW_REG(fw_ver, V520, TOD_1);
+ channel->sync_src = SYNC_SOURCE_DPLL1_TOD_PPS;
break;
case 2:
- channel->tod_read_primary = TOD_READ_PRIMARY_2;
- channel->tod_write = TOD_WRITE_2;
- channel->tod_n = TOD_2;
+ channel->tod_read_primary = IDTCM_FW_REG(fw_ver, V520, TOD_READ_PRIMARY_2);
+ channel->tod_write = IDTCM_FW_REG(fw_ver, V520, TOD_WRITE_2);
+ channel->tod_n = IDTCM_FW_REG(fw_ver, V520, TOD_2);
+ channel->sync_src = SYNC_SOURCE_DPLL2_TOD_PPS;
break;
case 3:
- channel->tod_read_primary = TOD_READ_PRIMARY_3;
- channel->tod_write = TOD_WRITE_3;
- channel->tod_n = TOD_3;
+ channel->tod_read_primary = IDTCM_FW_REG(fw_ver, V520, TOD_READ_PRIMARY_3);
+ channel->tod_write = IDTCM_FW_REG(fw_ver, V520, TOD_WRITE_3);
+ channel->tod_n = IDTCM_FW_REG(fw_ver, V520, TOD_3);
+ channel->sync_src = SYNC_SOURCE_DPLL3_TOD_PPS;
break;
default:
return -EINVAL;
}
+ return 0;
+}
+
+static int idtcm_enable_channel(struct idtcm *idtcm, u32 index)
+{
+ struct idtcm_channel *channel;
+ int err;
+
+ if (!(index < MAX_TOD))
+ return -EINVAL;
+
+ channel = &idtcm->channel[index];
+
channel->idtcm = idtcm;
+ channel->current_freq_scaled_ppm = 0;
+
+ /* Set pll addresses */
+ err = configure_channel_pll(channel);
+ if (err)
+ return err;
- if (idtcm->deprecated)
+ /* Set tod addresses */
+ err = configure_channel_tod(channel, index);
+ if (err)
+ return err;
+
+ if (idtcm->fw_ver < V487)
channel->caps = idtcm_caps_deprecated;
else
channel->caps = idtcm_caps;
@@ -2101,30 +2251,19 @@ static int idtcm_enable_channel(struct idtcm *idtcm, u32 index)
snprintf(channel->caps.name, sizeof(channel->caps.name),
"IDT CM TOD%u", index);
- if (!idtcm->deprecated) {
- err = idtcm_enable_tod_sync(channel);
- if (err) {
- dev_err(&idtcm->client->dev,
- "Failed at line %d in %s!", __LINE__, __func__);
- return err;
- }
- }
-
- /* Sync pll mode with hardware */
- err = idtcm_get_pll_mode(channel, &channel->pll_mode);
- if (err) {
- dev_err(&idtcm->client->dev,
- "Error: %s - Unable to read pll mode", __func__);
+ err = initialize_dco_operating_mode(channel);
+ if (err)
return err;
- }
err = idtcm_enable_tod(channel);
if (err) {
- dev_err(&idtcm->client->dev,
+ dev_err(idtcm->dev,
"Failed at line %d in %s!", __LINE__, __func__);
return err;
}
+ channel->dco_delay = idtcm_get_dco_delay(channel);
+
channel->ptp_clock = ptp_clock_register(&channel->caps, NULL);
if (IS_ERR(channel->ptp_clock)) {
@@ -2136,12 +2275,59 @@ static int idtcm_enable_channel(struct idtcm *idtcm, u32 index)
if (!channel->ptp_clock)
return -ENOTSUPP;
- dev_info(&idtcm->client->dev, "PLL%d registered as ptp%d",
+ dev_info(idtcm->dev, "PLL%d registered as ptp%d",
index, channel->ptp_clock->index);
return 0;
}
+static int idtcm_enable_extts_channel(struct idtcm *idtcm, u32 index)
+{
+ struct idtcm_channel *channel;
+ int err;
+
+ if (!(index < MAX_TOD))
+ return -EINVAL;
+
+ channel = &idtcm->channel[index];
+ channel->idtcm = idtcm;
+
+ /* Set tod addresses */
+ err = configure_channel_tod(channel, index);
+ if (err)
+ return err;
+
+ channel->idtcm = idtcm;
+
+ return 0;
+}
+
+static void idtcm_extts_check(struct work_struct *work)
+{
+ struct idtcm *idtcm = container_of(work, struct idtcm, extts_work.work);
+ int err, i;
+
+ if (idtcm->extts_mask == 0)
+ return;
+
+ mutex_lock(idtcm->lock);
+ for (i = 0; i < MAX_TOD; i++) {
+ u8 mask = 1 << i;
+
+ if (idtcm->extts_mask & mask) {
+ err = idtcm_extts_check_channel(idtcm, i);
+ /* trigger clears itself, so clear the mask */
+ if (err == 0)
+ idtcm->extts_mask &= ~mask;
+ }
+ }
+
+ if (idtcm->extts_mask)
+ schedule_delayed_work(&idtcm->extts_work,
+ msecs_to_jiffies(EXTTS_PERIOD_MS));
+ mutex_unlock(idtcm->lock);
+}
+
static void ptp_clock_unregister_all(struct idtcm *idtcm)
{
u8 i;
@@ -2149,7 +2335,6 @@ static void ptp_clock_unregister_all(struct idtcm *idtcm)
for (i = 0; i < MAX_TOD; i++) {
channel = &idtcm->channel[i];
-
if (channel->ptp_clock)
ptp_clock_unregister(channel->ptp_clock);
}
@@ -2158,6 +2343,7 @@ static void ptp_clock_unregister_all(struct idtcm *idtcm)
static void set_default_masks(struct idtcm *idtcm)
{
idtcm->tod_mask = DEFAULT_TOD_MASK;
+ idtcm->extts_mask = 0;
idtcm->channel[0].pll = DEFAULT_TOD0_PTP_PLL;
idtcm->channel[1].pll = DEFAULT_TOD1_PTP_PLL;
@@ -2170,158 +2356,86 @@ static void set_default_masks(struct idtcm *idtcm)
idtcm->channel[3].output_mask = DEFAULT_OUTPUT_MASK_PLL3;
}
-static int idtcm_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int idtcm_probe(struct platform_device *pdev)
{
+ struct rsmu_ddata *ddata = dev_get_drvdata(pdev->dev.parent);
struct idtcm *idtcm;
int err;
u8 i;
- /* Unused for now */
- (void)id;
-
- idtcm = devm_kzalloc(&client->dev, sizeof(struct idtcm), GFP_KERNEL);
+ idtcm = devm_kzalloc(&pdev->dev, sizeof(struct idtcm), GFP_KERNEL);
if (!idtcm)
return -ENOMEM;
- idtcm->client = client;
- idtcm->page_offset = 0xff;
+ idtcm->dev = &pdev->dev;
+ idtcm->mfd = pdev->dev.parent;
+ idtcm->lock = &ddata->lock;
+ idtcm->regmap = ddata->regmap;
idtcm->calculate_overhead_flag = 0;
+ INIT_DELAYED_WORK(&idtcm->extts_work, idtcm_extts_check);
+
set_default_masks(idtcm);
- mutex_init(&idtcm->reg_lock);
- mutex_lock(&idtcm->reg_lock);
+ mutex_lock(idtcm->lock);
idtcm_set_version_info(idtcm);
- err = idtcm_load_firmware(idtcm, &client->dev);
+ err = idtcm_load_firmware(idtcm, &pdev->dev);
+
if (err)
- dev_warn(&idtcm->client->dev, "loading firmware failed with %d", err);
+ dev_warn(idtcm->dev, "loading firmware failed with %d", err);
wait_for_chip_ready(idtcm);
if (idtcm->tod_mask) {
for (i = 0; i < MAX_TOD; i++) {
- if (idtcm->tod_mask & (1 << i)) {
+ if (idtcm->tod_mask & (1 << i))
err = idtcm_enable_channel(idtcm, i);
- if (err) {
- dev_err(&idtcm->client->dev,
- "idtcm_enable_channel %d failed!", i);
- break;
- }
+ else
+ err = idtcm_enable_extts_channel(idtcm, i);
+ if (err) {
+ dev_err(idtcm->dev,
+ "idtcm_enable_channel %d failed!", i);
+ break;
}
}
} else {
- dev_err(&idtcm->client->dev,
+ dev_err(idtcm->dev,
"no PLLs flagged as PHCs, nothing to do");
err = -ENODEV;
}
- mutex_unlock(&idtcm->reg_lock);
+ mutex_unlock(idtcm->lock);
if (err) {
ptp_clock_unregister_all(idtcm);
return err;
}
- i2c_set_clientdata(client, idtcm);
+ platform_set_drvdata(pdev, idtcm);
return 0;
}
-static int idtcm_remove(struct i2c_client *client)
+static int idtcm_remove(struct platform_device *pdev)
{
- struct idtcm *idtcm = i2c_get_clientdata(client);
+ struct idtcm *idtcm = platform_get_drvdata(pdev);
ptp_clock_unregister_all(idtcm);
- mutex_destroy(&idtcm->reg_lock);
+ cancel_delayed_work_sync(&idtcm->extts_work);
return 0;
}
-#ifdef CONFIG_OF
-static const struct of_device_id idtcm_dt_id[] = {
- { .compatible = "idt,8a34000" },
- { .compatible = "idt,8a34001" },
- { .compatible = "idt,8a34002" },
- { .compatible = "idt,8a34003" },
- { .compatible = "idt,8a34004" },
- { .compatible = "idt,8a34005" },
- { .compatible = "idt,8a34006" },
- { .compatible = "idt,8a34007" },
- { .compatible = "idt,8a34008" },
- { .compatible = "idt,8a34009" },
- { .compatible = "idt,8a34010" },
- { .compatible = "idt,8a34011" },
- { .compatible = "idt,8a34012" },
- { .compatible = "idt,8a34013" },
- { .compatible = "idt,8a34014" },
- { .compatible = "idt,8a34015" },
- { .compatible = "idt,8a34016" },
- { .compatible = "idt,8a34017" },
- { .compatible = "idt,8a34018" },
- { .compatible = "idt,8a34019" },
- { .compatible = "idt,8a34040" },
- { .compatible = "idt,8a34041" },
- { .compatible = "idt,8a34042" },
- { .compatible = "idt,8a34043" },
- { .compatible = "idt,8a34044" },
- { .compatible = "idt,8a34045" },
- { .compatible = "idt,8a34046" },
- { .compatible = "idt,8a34047" },
- { .compatible = "idt,8a34048" },
- { .compatible = "idt,8a34049" },
- {},
-};
-MODULE_DEVICE_TABLE(of, idtcm_dt_id);
-#endif
-
-static const struct i2c_device_id idtcm_i2c_id[] = {
- { "8a34000" },
- { "8a34001" },
- { "8a34002" },
- { "8a34003" },
- { "8a34004" },
- { "8a34005" },
- { "8a34006" },
- { "8a34007" },
- { "8a34008" },
- { "8a34009" },
- { "8a34010" },
- { "8a34011" },
- { "8a34012" },
- { "8a34013" },
- { "8a34014" },
- { "8a34015" },
- { "8a34016" },
- { "8a34017" },
- { "8a34018" },
- { "8a34019" },
- { "8a34040" },
- { "8a34041" },
- { "8a34042" },
- { "8a34043" },
- { "8a34044" },
- { "8a34045" },
- { "8a34046" },
- { "8a34047" },
- { "8a34048" },
- { "8a34049" },
- {},
-};
-MODULE_DEVICE_TABLE(i2c, idtcm_i2c_id);
-
-static struct i2c_driver idtcm_driver = {
+static struct platform_driver idtcm_driver = {
.driver = {
- .of_match_table = of_match_ptr(idtcm_dt_id),
- .name = "idtcm",
+ .name = "8a3400x-phc",
},
- .probe = idtcm_probe,
- .remove = idtcm_remove,
- .id_table = idtcm_i2c_id,
+ .probe = idtcm_probe,
+ .remove = idtcm_remove,
};
-module_i2c_driver(idtcm_driver);
+module_platform_driver(idtcm_driver);
diff --git a/drivers/ptp/ptp_clockmatrix.h b/drivers/ptp/ptp_clockmatrix.h
index fb323271063e..0f3059ae1fff 100644
--- a/drivers/ptp/ptp_clockmatrix.h
+++ b/drivers/ptp/ptp_clockmatrix.h
@@ -9,8 +9,8 @@
#define PTP_IDTCLOCKMATRIX_H
#include <linux/ktime.h>
-
-#include "idt8a340_reg.h"
+#include <linux/mfd/idt8a340_reg.h>
+#include <linux/regmap.h>
#define FW_FILENAME "idtcm.bin"
#define MAX_TOD (4)
@@ -44,7 +44,6 @@
#define DEFAULT_TOD2_PTP_PLL (2)
#define DEFAULT_TOD3_PTP_PLL (3)
-#define POST_SM_RESET_DELAY_MS (3000)
#define PHASE_PULL_IN_THRESHOLD_NS_DEPRECATED (150000)
#define PHASE_PULL_IN_THRESHOLD_NS (15000)
#define TOD_WRITE_OVERHEAD_COUNT_MAX (2)
@@ -57,66 +56,26 @@
#define IDTCM_MAX_WRITE_COUNT (512)
-#define FULL_FW_CFG_BYTES (SCRATCH - GPIO_USER_CONTROL)
-#define FULL_FW_CFG_SKIPPED_BYTES (((SCRATCH >> 7) \
- - (GPIO_USER_CONTROL >> 7)) \
- * 4) /* 4 bytes skipped every 0x80 */
-
-/* Values of DPLL_N.DPLL_MODE.PLL_MODE */
-enum pll_mode {
- PLL_MODE_MIN = 0,
- PLL_MODE_NORMAL = PLL_MODE_MIN,
- PLL_MODE_WRITE_PHASE = 1,
- PLL_MODE_WRITE_FREQUENCY = 2,
- PLL_MODE_GPIO_INC_DEC = 3,
- PLL_MODE_SYNTHESIS = 4,
- PLL_MODE_PHASE_MEASUREMENT = 5,
- PLL_MODE_DISABLED = 6,
- PLL_MODE_MAX = PLL_MODE_DISABLED,
-};
-
-enum hw_tod_write_trig_sel {
- HW_TOD_WR_TRIG_SEL_MIN = 0,
- HW_TOD_WR_TRIG_SEL_MSB = HW_TOD_WR_TRIG_SEL_MIN,
- HW_TOD_WR_TRIG_SEL_RESERVED = 1,
- HW_TOD_WR_TRIG_SEL_TOD_PPS = 2,
- HW_TOD_WR_TRIG_SEL_IRIGB_PPS = 3,
- HW_TOD_WR_TRIG_SEL_PWM_PPS = 4,
- HW_TOD_WR_TRIG_SEL_GPIO = 5,
- HW_TOD_WR_TRIG_SEL_FOD_SYNC = 6,
- WR_TRIG_SEL_MAX = HW_TOD_WR_TRIG_SEL_FOD_SYNC,
-};
-
-/* 4.8.7 only */
-enum scsr_tod_write_trig_sel {
- SCSR_TOD_WR_TRIG_SEL_DISABLE = 0,
- SCSR_TOD_WR_TRIG_SEL_IMMEDIATE = 1,
- SCSR_TOD_WR_TRIG_SEL_REFCLK = 2,
- SCSR_TOD_WR_TRIG_SEL_PWMPPS = 3,
- SCSR_TOD_WR_TRIG_SEL_TODPPS = 4,
- SCSR_TOD_WR_TRIG_SEL_SYNCFOD = 5,
- SCSR_TOD_WR_TRIG_SEL_GPIO = 6,
- SCSR_TOD_WR_TRIG_SEL_MAX = SCSR_TOD_WR_TRIG_SEL_GPIO,
-};
+#define PHASE_PULL_IN_MAX_PPB (144000)
+#define PHASE_PULL_IN_MIN_THRESHOLD_NS (2)
-/* 4.8.7 only */
-enum scsr_tod_write_type_sel {
- SCSR_TOD_WR_TYPE_SEL_ABSOLUTE = 0,
- SCSR_TOD_WR_TYPE_SEL_DELTA_PLUS = 1,
- SCSR_TOD_WR_TYPE_SEL_DELTA_MINUS = 2,
- SCSR_TOD_WR_TYPE_SEL_MAX = SCSR_TOD_WR_TYPE_SEL_DELTA_MINUS,
+/*
+ * Return register address based on passed in firmware version
+ */
+#define IDTCM_FW_REG(FW, VER, REG) (((FW) < (VER)) ? (REG) : (REG##_##VER))
+enum fw_version {
+ V_DEFAULT = 0,
+ V487 = 1,
+ V520 = 2,
};
-/* Values STATUS.DPLL_SYS_STATUS.DPLL_SYS_STATE */
-enum dpll_state {
- DPLL_STATE_MIN = 0,
- DPLL_STATE_FREERUN = DPLL_STATE_MIN,
- DPLL_STATE_LOCKACQ = 1,
- DPLL_STATE_LOCKREC = 2,
- DPLL_STATE_LOCKED = 3,
- DPLL_STATE_HOLDOVER = 4,
- DPLL_STATE_OPEN_LOOP = 5,
- DPLL_STATE_MAX = DPLL_STATE_OPEN_LOOP,
+/* PTP PLL Mode */
+enum ptp_pll_mode {
+ PTP_PLL_MODE_MIN = 0,
+ PTP_PLL_MODE_WRITE_FREQUENCY = PTP_PLL_MODE_MIN,
+ PTP_PLL_MODE_WRITE_PHASE,
+ PTP_PLL_MODE_UNSUPPORTED,
+ PTP_PLL_MODE_MAX = PTP_PLL_MODE_UNSUPPORTED,
};
struct idtcm;
@@ -134,26 +93,40 @@ struct idtcm_channel {
u16 tod_write;
u16 tod_n;
u16 hw_dpll_n;
- enum pll_mode pll_mode;
+ u8 sync_src;
+ enum ptp_pll_mode mode;
+ int (*configure_write_frequency)(struct idtcm_channel *channel);
+ int (*configure_write_phase)(struct idtcm_channel *channel);
+ int (*do_phase_pull_in)(struct idtcm_channel *channel,
+ s32 offset_ns, u32 max_ffo_ppb);
+ s32 current_freq_scaled_ppm;
+ bool phase_pull_in;
+ u32 dco_delay;
+ /* last input trigger for extts */
+ u8 refn;
u8 pll;
u16 output_mask;
};
struct idtcm {
struct idtcm_channel channel[MAX_TOD];
- struct i2c_client *client;
- u8 page_offset;
+ struct device *dev;
u8 tod_mask;
char version[16];
- u8 deprecated;
-
+ enum fw_version fw_ver;
+ /* Polls for external time stamps */
+ u8 extts_mask;
+ struct delayed_work extts_work;
+ /* Remember the ptp channel to report extts */
+ struct idtcm_channel *event_channel[MAX_TOD];
+ /* Mutex to protect operations from being interrupted */
+ struct mutex *lock;
+ struct device *mfd;
+ struct regmap *regmap;
/* Overhead calculation for adjtime */
u8 calculate_overhead_flag;
s64 tod_write_overhead_ns;
ktime_t start_time;
-
- /* Protects I2C read/modify/write registers from concurrent access */
- struct mutex reg_lock;
};
struct idtcm_fwrc {
diff --git a/drivers/ptp/ptp_kvm_x86.c b/drivers/ptp/ptp_kvm_x86.c
index d0096cd7096a..4991054a2135 100644
--- a/drivers/ptp/ptp_kvm_x86.c
+++ b/drivers/ptp/ptp_kvm_x86.c
@@ -31,10 +31,10 @@ int kvm_arch_ptp_init(void)
ret = kvm_hypercall2(KVM_HC_CLOCK_PAIRING, clock_pair_gpa,
KVM_CLOCK_PAIRING_WALLCLOCK);
- if (ret == -KVM_ENOSYS || ret == -KVM_EOPNOTSUPP)
+ if (ret == -KVM_ENOSYS)
return -ENODEV;
- return 0;
+ return ret;
}
int kvm_arch_ptp_get_clock(struct timespec64 *ts)
diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c
index caf9b37c5eb1..34f943c8c9fd 100644
--- a/drivers/ptp/ptp_ocp.c
+++ b/drivers/ptp/ptp_ocp.c
@@ -4,6 +4,7 @@
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/debugfs.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/serial_8250.h>
@@ -72,7 +73,7 @@ struct tod_reg {
u32 status;
u32 uart_polarity;
u32 version;
- u32 correction_sec;
+ u32 adj_sec;
u32 __pad0[3];
u32 uart_baud;
u32 __pad1[3];
@@ -124,6 +125,55 @@ struct img_reg {
u32 version;
};
+struct gpio_reg {
+ u32 gpio1;
+ u32 __pad0;
+ u32 gpio2;
+ u32 __pad1;
+};
+
+struct irig_master_reg {
+ u32 ctrl;
+ u32 status;
+ u32 __pad0;
+ u32 version;
+ u32 adj_sec;
+ u32 mode_ctrl;
+};
+
+#define IRIG_M_CTRL_ENABLE BIT(0)
+
+struct irig_slave_reg {
+ u32 ctrl;
+ u32 status;
+ u32 __pad0;
+ u32 version;
+ u32 adj_sec;
+ u32 mode_ctrl;
+};
+
+#define IRIG_S_CTRL_ENABLE BIT(0)
+
+struct dcf_master_reg {
+ u32 ctrl;
+ u32 status;
+ u32 __pad0;
+ u32 version;
+ u32 adj_sec;
+};
+
+#define DCF_M_CTRL_ENABLE BIT(0)
+
+struct dcf_slave_reg {
+ u32 ctrl;
+ u32 status;
+ u32 __pad0;
+ u32 version;
+ u32 adj_sec;
+};
+
+#define DCF_S_CTRL_ENABLE BIT(0)
+
struct ptp_ocp_flash_info {
const char *name;
int pci_offset;
@@ -131,11 +181,17 @@ struct ptp_ocp_flash_info {
void *data;
};
-struct ptp_ocp_ext_info {
+struct ptp_ocp_i2c_info {
const char *name;
+ unsigned long fixed_rate;
+ size_t data_size;
+ void *data;
+};
+
+struct ptp_ocp_ext_info {
int index;
irqreturn_t (*irq_fcn)(int irq, void *priv);
- int (*enable)(void *priv, bool enable);
+ int (*enable)(void *priv, u32 req, bool enable);
};
struct ptp_ocp_ext_src {
@@ -153,9 +209,17 @@ struct ptp_ocp {
struct tod_reg __iomem *tod;
struct pps_reg __iomem *pps_to_ext;
struct pps_reg __iomem *pps_to_clk;
+ struct gpio_reg __iomem *pps_select;
+ struct gpio_reg __iomem *sma;
+ struct irig_master_reg __iomem *irig_out;
+ struct irig_slave_reg __iomem *irig_in;
+ struct dcf_master_reg __iomem *dcf_out;
+ struct dcf_slave_reg __iomem *dcf_in;
+ struct tod_reg __iomem *nmea_out;
struct ptp_ocp_ext_src *pps;
struct ptp_ocp_ext_src *ts0;
struct ptp_ocp_ext_src *ts1;
+ struct ptp_ocp_ext_src *ts2;
struct img_reg __iomem *image;
struct ptp_clock *ptp;
struct ptp_clock_info ptp_info;
@@ -163,16 +227,25 @@ struct ptp_ocp {
struct platform_device *spi_flash;
struct clk_hw *i2c_clk;
struct timer_list watchdog;
+ struct dentry *debug_root;
time64_t gnss_lost;
int id;
int n_irqs;
int gnss_port;
+ int gnss2_port;
int mac_port; /* miniature atomic clock */
+ int nmea_port;
u8 serial[6];
- int flash_start;
bool has_serial;
+ u32 pps_req_map;
+ int flash_start;
+ u32 utc_tai_offset;
+ u32 ts_window_adjust;
};
+#define OCP_REQ_TIMESTAMP BIT(0)
+#define OCP_REQ_PPS BIT(1)
+
struct ocp_resource {
unsigned long offset;
int size;
@@ -180,6 +253,7 @@ struct ocp_resource {
int (*setup)(struct ptp_ocp *bp, struct ocp_resource *r);
void *extra;
unsigned long bp_offset;
+ const char * const name;
};
static int ptp_ocp_register_mem(struct ptp_ocp *bp, struct ocp_resource *r);
@@ -189,7 +263,7 @@ static int ptp_ocp_register_serial(struct ptp_ocp *bp, struct ocp_resource *r);
static int ptp_ocp_register_ext(struct ptp_ocp *bp, struct ocp_resource *r);
static int ptp_ocp_fb_board_init(struct ptp_ocp *bp, struct ocp_resource *r);
static irqreturn_t ptp_ocp_ts_irq(int irq, void *priv);
-static int ptp_ocp_ts_enable(void *priv, bool enable);
+static int ptp_ocp_ts_enable(void *priv, u32 req, bool enable);
#define bp_assign_entry(bp, res, val) ({ \
uintptr_t addr = (uintptr_t)(bp) + (res)->bp_offset; \
@@ -197,7 +271,7 @@ static int ptp_ocp_ts_enable(void *priv, bool enable);
})
#define OCP_RES_LOCATION(member) \
- .bp_offset = offsetof(struct ptp_ocp, member)
+ .name = #member, .bp_offset = offsetof(struct ptp_ocp, member)
#define OCP_MEM_RESOURCE(member) \
OCP_RES_LOCATION(member), .setup = ptp_ocp_register_mem
@@ -215,16 +289,17 @@ static int ptp_ocp_ts_enable(void *priv, bool enable);
OCP_RES_LOCATION(member), .setup = ptp_ocp_register_ext
/* This is the MSI vector mapping used.
- * 0: N/C
+ * 0: TS3 (and PPS)
* 1: TS0
* 2: TS1
- * 3: GPS
- * 4: GPS2 (n/c)
+ * 3: GNSS
+ * 4: GNSS2
* 5: MAC
- * 6: SPI IMU (inertial measurement unit)
- * 7: I2C oscillator
- * 8: HWICAP
+ * 6: TS2
+ * 7: I2C controller
+ * 8: HWICAP (notused)
* 9: SPI Flash
+ * 10: NMEA
*/
static struct ocp_resource ocp_fb_resource[] = {
@@ -236,7 +311,7 @@ static struct ocp_resource ocp_fb_resource[] = {
OCP_EXT_RESOURCE(ts0),
.offset = 0x01010000, .size = 0x10000, .irq_vec = 1,
.extra = &(struct ptp_ocp_ext_info) {
- .name = "ts0", .index = 0,
+ .index = 0,
.irq_fcn = ptp_ocp_ts_irq,
.enable = ptp_ocp_ts_enable,
},
@@ -245,7 +320,25 @@ static struct ocp_resource ocp_fb_resource[] = {
OCP_EXT_RESOURCE(ts1),
.offset = 0x01020000, .size = 0x10000, .irq_vec = 2,
.extra = &(struct ptp_ocp_ext_info) {
- .name = "ts1", .index = 1,
+ .index = 1,
+ .irq_fcn = ptp_ocp_ts_irq,
+ .enable = ptp_ocp_ts_enable,
+ },
+ },
+ {
+ OCP_EXT_RESOURCE(ts2),
+ .offset = 0x01060000, .size = 0x10000, .irq_vec = 6,
+ .extra = &(struct ptp_ocp_ext_info) {
+ .index = 2,
+ .irq_fcn = ptp_ocp_ts_irq,
+ .enable = ptp_ocp_ts_enable,
+ },
+ },
+ {
+ OCP_EXT_RESOURCE(pps),
+ .offset = 0x010C0000, .size = 0x10000, .irq_vec = 0,
+ .extra = &(struct ptp_ocp_ext_info) {
+ .index = 3,
.irq_fcn = ptp_ocp_ts_irq,
.enable = ptp_ocp_ts_enable,
},
@@ -263,22 +356,62 @@ static struct ocp_resource ocp_fb_resource[] = {
.offset = 0x01050000, .size = 0x10000,
},
{
+ OCP_MEM_RESOURCE(irig_in),
+ .offset = 0x01070000, .size = 0x10000,
+ },
+ {
+ OCP_MEM_RESOURCE(irig_out),
+ .offset = 0x01080000, .size = 0x10000,
+ },
+ {
+ OCP_MEM_RESOURCE(dcf_in),
+ .offset = 0x01090000, .size = 0x10000,
+ },
+ {
+ OCP_MEM_RESOURCE(dcf_out),
+ .offset = 0x010A0000, .size = 0x10000,
+ },
+ {
+ OCP_MEM_RESOURCE(nmea_out),
+ .offset = 0x010B0000, .size = 0x10000,
+ },
+ {
OCP_MEM_RESOURCE(image),
.offset = 0x00020000, .size = 0x1000,
},
{
+ OCP_MEM_RESOURCE(pps_select),
+ .offset = 0x00130000, .size = 0x1000,
+ },
+ {
+ OCP_MEM_RESOURCE(sma),
+ .offset = 0x00140000, .size = 0x1000,
+ },
+ {
OCP_I2C_RESOURCE(i2c_ctrl),
.offset = 0x00150000, .size = 0x10000, .irq_vec = 7,
+ .extra = &(struct ptp_ocp_i2c_info) {
+ .name = "xiic-i2c",
+ .fixed_rate = 50000000,
+ },
},
{
OCP_SERIAL_RESOURCE(gnss_port),
.offset = 0x00160000 + 0x1000, .irq_vec = 3,
},
{
+ OCP_SERIAL_RESOURCE(gnss2_port),
+ .offset = 0x00170000 + 0x1000, .irq_vec = 4,
+ },
+ {
OCP_SERIAL_RESOURCE(mac_port),
.offset = 0x00180000 + 0x1000, .irq_vec = 5,
},
{
+ OCP_SERIAL_RESOURCE(nmea_port),
+ .offset = 0x00190000 + 0x1000, .irq_vec = 10,
+ },
+ {
OCP_SPI_RESOURCE(spi_flash),
.offset = 0x00310000, .size = 0x10000, .irq_vec = 9,
.extra = &(struct ptp_ocp_flash_info) {
@@ -309,10 +442,12 @@ MODULE_DEVICE_TABLE(pci, ptp_ocp_pcidev_id);
static DEFINE_MUTEX(ptp_ocp_lock);
static DEFINE_IDR(ptp_ocp_idr);
-static struct {
+struct ocp_selector {
const char *name;
int value;
-} ptp_ocp_clock[] = {
+};
+
+static struct ocp_selector ptp_ocp_clock[] = {
{ .name = "NONE", .value = 0 },
{ .name = "TOD", .value = 1 },
{ .name = "IRIG", .value = 2 },
@@ -322,33 +457,71 @@ static struct {
{ .name = "DCF", .value = 6 },
{ .name = "REGS", .value = 0xfe },
{ .name = "EXT", .value = 0xff },
+ { }
+};
+
+static struct ocp_selector ptp_ocp_sma_in[] = {
+ { .name = "10Mhz", .value = 0x00 },
+ { .name = "PPS1", .value = 0x01 },
+ { .name = "PPS2", .value = 0x02 },
+ { .name = "TS1", .value = 0x04 },
+ { .name = "TS2", .value = 0x08 },
+ { .name = "IRIG", .value = 0x10 },
+ { .name = "DCF", .value = 0x20 },
+ { }
+};
+
+static struct ocp_selector ptp_ocp_sma_out[] = {
+ { .name = "10Mhz", .value = 0x00 },
+ { .name = "PHC", .value = 0x01 },
+ { .name = "MAC", .value = 0x02 },
+ { .name = "GNSS", .value = 0x04 },
+ { .name = "GNSS2", .value = 0x08 },
+ { .name = "IRIG", .value = 0x10 },
+ { .name = "DCF", .value = 0x20 },
+ { }
};
static const char *
-ptp_ocp_clock_name_from_val(int val)
+ptp_ocp_select_name_from_val(struct ocp_selector *tbl, int val)
{
int i;
- for (i = 0; i < ARRAY_SIZE(ptp_ocp_clock); i++)
- if (ptp_ocp_clock[i].value == val)
- return ptp_ocp_clock[i].name;
+ for (i = 0; tbl[i].name; i++)
+ if (tbl[i].value == val)
+ return tbl[i].name;
return NULL;
}
static int
-ptp_ocp_clock_val_from_name(const char *name)
+ptp_ocp_select_val_from_name(struct ocp_selector *tbl, const char *name)
{
- const char *clk;
+ const char *select;
int i;
- for (i = 0; i < ARRAY_SIZE(ptp_ocp_clock); i++) {
- clk = ptp_ocp_clock[i].name;
- if (!strncasecmp(name, clk, strlen(clk)))
- return ptp_ocp_clock[i].value;
+ for (i = 0; tbl[i].name; i++) {
+ select = tbl[i].name;
+ if (!strncasecmp(name, select, strlen(select)))
+ return tbl[i].value;
}
return -EINVAL;
}
+static ssize_t
+ptp_ocp_select_table_show(struct ocp_selector *tbl, char *buf)
+{
+ ssize_t count;
+ int i;
+
+ count = 0;
+ for (i = 0; tbl[i].name; i++)
+ count += sysfs_emit_at(buf, count, "%s ", tbl[i].name);
+ if (count)
+ count--;
+ count += sysfs_emit_at(buf, count, "\n");
+ return count;
+}
+
static int
__ptp_ocp_gettime_locked(struct ptp_ocp *bp, struct timespec64 *ts,
struct ptp_system_timestamp *sts)
@@ -356,10 +529,9 @@ __ptp_ocp_gettime_locked(struct ptp_ocp *bp, struct timespec64 *ts,
u32 ctrl, time_sec, time_ns;
int i;
- ctrl = ioread32(&bp->reg->ctrl);
- ctrl |= OCP_CTRL_READ_TIME_REQ;
-
ptp_read_system_prets(sts);
+
+ ctrl = OCP_CTRL_READ_TIME_REQ | OCP_CTRL_ENABLE;
iowrite32(ctrl, &bp->reg->ctrl);
for (i = 0; i < 100; i++) {
@@ -369,6 +541,12 @@ __ptp_ocp_gettime_locked(struct ptp_ocp *bp, struct timespec64 *ts,
}
ptp_read_system_postts(sts);
+ if (sts && bp->ts_window_adjust) {
+ s64 ns = timespec64_to_ns(&sts->post_ts);
+
+ sts->post_ts = ns_to_timespec64(ns - bp->ts_window_adjust);
+ }
+
time_ns = ioread32(&bp->reg->time_ns);
time_sec = ioread32(&bp->reg->time_sec);
@@ -408,8 +586,7 @@ __ptp_ocp_settime_locked(struct ptp_ocp *bp, const struct timespec64 *ts)
iowrite32(time_ns, &bp->reg->adjust_ns);
iowrite32(time_sec, &bp->reg->adjust_sec);
- ctrl = ioread32(&bp->reg->ctrl);
- ctrl |= OCP_CTRL_ADJUST_TIME;
+ ctrl = OCP_CTRL_ADJUST_TIME | OCP_CTRL_ENABLE;
iowrite32(ctrl, &bp->reg->ctrl);
/* restore clock selection */
@@ -422,9 +599,6 @@ ptp_ocp_settime(struct ptp_clock_info *ptp_info, const struct timespec64 *ts)
struct ptp_ocp *bp = container_of(ptp_info, struct ptp_ocp, ptp_info);
unsigned long flags;
- if (ioread32(&bp->reg->status) & OCP_STATUS_IN_SYNC)
- return 0;
-
spin_lock_irqsave(&bp->lock, flags);
__ptp_ocp_settime_locked(bp, ts);
spin_unlock_irqrestore(&bp->lock, flags);
@@ -432,26 +606,39 @@ ptp_ocp_settime(struct ptp_clock_info *ptp_info, const struct timespec64 *ts)
return 0;
}
+static void
+__ptp_ocp_adjtime_locked(struct ptp_ocp *bp, u64 adj_val)
+{
+ u32 select, ctrl;
+
+ select = ioread32(&bp->reg->select);
+ iowrite32(OCP_SELECT_CLK_REG, &bp->reg->select);
+
+ iowrite32(adj_val, &bp->reg->offset_ns);
+ iowrite32(adj_val & 0x7f, &bp->reg->offset_window_ns);
+
+ ctrl = OCP_CTRL_ADJUST_OFFSET | OCP_CTRL_ENABLE;
+ iowrite32(ctrl, &bp->reg->ctrl);
+
+ /* restore clock selection */
+ iowrite32(select >> 16, &bp->reg->select);
+}
+
static int
ptp_ocp_adjtime(struct ptp_clock_info *ptp_info, s64 delta_ns)
{
struct ptp_ocp *bp = container_of(ptp_info, struct ptp_ocp, ptp_info);
- struct timespec64 ts;
unsigned long flags;
- int err;
+ u32 adj_ns, sign;
- if (ioread32(&bp->reg->status) & OCP_STATUS_IN_SYNC)
- return 0;
+ sign = delta_ns < 0 ? BIT(31) : 0;
+ adj_ns = sign ? -delta_ns : delta_ns;
spin_lock_irqsave(&bp->lock, flags);
- err = __ptp_ocp_gettime_locked(bp, &ts, NULL);
- if (likely(!err)) {
- timespec64_add_ns(&ts, delta_ns);
- __ptp_ocp_settime_locked(bp, &ts);
- }
+ __ptp_ocp_adjtime_locked(bp, sign | adj_ns);
spin_unlock_irqrestore(&bp->lock, flags);
- return err;
+ return 0;
}
static int
@@ -464,7 +651,7 @@ ptp_ocp_null_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm)
}
static int
-ptp_ocp_adjphase(struct ptp_clock_info *ptp_info, s32 phase_ns)
+ptp_ocp_null_adjphase(struct ptp_clock_info *ptp_info, s32 phase_ns)
{
return -EOPNOTSUPP;
}
@@ -475,10 +662,12 @@ ptp_ocp_enable(struct ptp_clock_info *ptp_info, struct ptp_clock_request *rq,
{
struct ptp_ocp *bp = container_of(ptp_info, struct ptp_ocp, ptp_info);
struct ptp_ocp_ext_src *ext = NULL;
+ u32 req;
int err;
switch (rq->type) {
case PTP_CLK_REQ_EXTTS:
+ req = OCP_REQ_TIMESTAMP;
switch (rq->extts.index) {
case 0:
ext = bp->ts0;
@@ -486,18 +675,33 @@ ptp_ocp_enable(struct ptp_clock_info *ptp_info, struct ptp_clock_request *rq,
case 1:
ext = bp->ts1;
break;
+ case 2:
+ ext = bp->ts2;
+ break;
+ case 3:
+ ext = bp->pps;
+ break;
}
break;
case PTP_CLK_REQ_PPS:
+ req = OCP_REQ_PPS;
ext = bp->pps;
break;
+ case PTP_CLK_REQ_PEROUT:
+ if (on &&
+ (rq->perout.period.sec != 1 || rq->perout.period.nsec != 0))
+ return -EINVAL;
+ /* This is a request for 1PPS on an output SMA.
+ * Allow, but assume manual configuration.
+ */
+ return 0;
default:
return -EOPNOTSUPP;
}
err = -ENXIO;
if (ext)
- err = ext->info->enable(ext, on);
+ err = ext->info->enable(ext, req, on);
return err;
}
@@ -510,10 +714,11 @@ static const struct ptp_clock_info ptp_ocp_clock_info = {
.settime64 = ptp_ocp_settime,
.adjtime = ptp_ocp_adjtime,
.adjfine = ptp_ocp_null_adjfine,
- .adjphase = ptp_ocp_adjphase,
+ .adjphase = ptp_ocp_null_adjphase,
.enable = ptp_ocp_enable,
.pps = true,
- .n_ext_ts = 2,
+ .n_ext_ts = 4,
+ .n_per_out = 1,
};
static void
@@ -526,8 +731,7 @@ __ptp_ocp_clear_drift_locked(struct ptp_ocp *bp)
iowrite32(0, &bp->reg->drift_ns);
- ctrl = ioread32(&bp->reg->ctrl);
- ctrl |= OCP_CTRL_ADJUST_DRIFT;
+ ctrl = OCP_CTRL_ADJUST_DRIFT | OCP_CTRL_ENABLE;
iowrite32(ctrl, &bp->reg->ctrl);
/* restore clock selection */
@@ -559,6 +763,28 @@ ptp_ocp_watchdog(struct timer_list *t)
mod_timer(&bp->watchdog, jiffies + HZ);
}
+static void
+ptp_ocp_estimate_pci_timing(struct ptp_ocp *bp)
+{
+ ktime_t start, end;
+ ktime_t delay;
+ u32 ctrl;
+
+ ctrl = ioread32(&bp->reg->ctrl);
+ ctrl = OCP_CTRL_READ_TIME_REQ | OCP_CTRL_ENABLE;
+
+ iowrite32(ctrl, &bp->reg->ctrl);
+
+ start = ktime_get_ns();
+
+ ctrl = ioread32(&bp->reg->ctrl);
+
+ end = ktime_get_ns();
+
+ delay = end - start;
+ bp->ts_window_adjust = (delay >> 5) * 3;
+}
+
static int
ptp_ocp_init_clock(struct ptp_ocp *bp)
{
@@ -566,9 +792,7 @@ ptp_ocp_init_clock(struct ptp_ocp *bp)
bool sync;
u32 ctrl;
- /* make sure clock is enabled */
- ctrl = ioread32(&bp->reg->ctrl);
- ctrl |= OCP_CTRL_ENABLE;
+ ctrl = OCP_CTRL_ENABLE;
iowrite32(ctrl, &bp->reg->ctrl);
/* NO DRIFT Correction */
@@ -587,23 +811,58 @@ ptp_ocp_init_clock(struct ptp_ocp *bp)
return -ENODEV;
}
+ ptp_ocp_estimate_pci_timing(bp);
+
sync = ioread32(&bp->reg->status) & OCP_STATUS_IN_SYNC;
if (!sync) {
- ktime_get_real_ts64(&ts);
+ ktime_get_clocktai_ts64(&ts);
ptp_ocp_settime(&bp->ptp_info, &ts);
}
- if (!ptp_ocp_gettimex(&bp->ptp_info, &ts, NULL))
- dev_info(&bp->pdev->dev, "Time: %lld.%ld, %s\n",
- ts.tv_sec, ts.tv_nsec,
- sync ? "in-sync" : "UNSYNCED");
- timer_setup(&bp->watchdog, ptp_ocp_watchdog, 0);
- mod_timer(&bp->watchdog, jiffies + HZ);
+ /* If there is a clock supervisor, then enable the watchdog */
+ if (bp->pps_to_clk) {
+ timer_setup(&bp->watchdog, ptp_ocp_watchdog, 0);
+ mod_timer(&bp->watchdog, jiffies + HZ);
+ }
return 0;
}
static void
+ptp_ocp_utc_distribute(struct ptp_ocp *bp, u32 val)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&bp->lock, flags);
+
+ bp->utc_tai_offset = val;
+
+ if (bp->irig_out)
+ iowrite32(val, &bp->irig_out->adj_sec);
+ if (bp->dcf_out)
+ iowrite32(val, &bp->dcf_out->adj_sec);
+ if (bp->nmea_out)
+ iowrite32(val, &bp->nmea_out->adj_sec);
+
+ spin_unlock_irqrestore(&bp->lock, flags);
+}
+
+static void
+ptp_ocp_tod_init(struct ptp_ocp *bp)
+{
+ u32 ctrl, reg;
+
+ ctrl = ioread32(&bp->tod->ctrl);
+ ctrl |= TOD_CTRL_PROTOCOL | TOD_CTRL_ENABLE;
+ ctrl &= ~(TOD_CTRL_DISABLE_FMT_A | TOD_CTRL_DISABLE_FMT_B);
+ iowrite32(ctrl, &bp->tod->ctrl);
+
+ reg = ioread32(&bp->tod->utc_status);
+ if (reg & TOD_STATUS_UTC_VALID)
+ ptp_ocp_utc_distribute(bp, reg & TOD_STATUS_UTC_MASK);
+}
+
+static void
ptp_ocp_tod_info(struct ptp_ocp *bp)
{
static const char * const proto_name[] = {
@@ -621,11 +880,6 @@ ptp_ocp_tod_info(struct ptp_ocp *bp)
version >> 24, (version >> 16) & 0xff, version & 0xffff);
ctrl = ioread32(&bp->tod->ctrl);
- ctrl |= TOD_CTRL_PROTOCOL | TOD_CTRL_ENABLE;
- ctrl &= ~(TOD_CTRL_DISABLE_FMT_A | TOD_CTRL_DISABLE_FMT_B);
- iowrite32(ctrl, &bp->tod->ctrl);
-
- ctrl = ioread32(&bp->tod->ctrl);
idx = ctrl & TOD_CTRL_PROTOCOL ? 4 : 0;
idx += (ctrl >> 16) & 3;
dev_info(&bp->pdev->dev, "control: %x\n", ctrl);
@@ -639,7 +893,7 @@ ptp_ocp_tod_info(struct ptp_ocp *bp)
reg = ioread32(&bp->tod->status);
dev_info(&bp->pdev->dev, "status: %x\n", reg);
- reg = ioread32(&bp->tod->correction_sec);
+ reg = ioread32(&bp->tod->adj_sec);
dev_info(&bp->pdev->dev, "correction: %d\n", reg);
reg = ioread32(&bp->tod->utc_status);
@@ -695,6 +949,9 @@ ptp_ocp_get_serial_number(struct ptp_ocp *bp)
struct device *dev;
int err;
+ if (!bp->i2c_ctrl)
+ return;
+
dev = device_find_child(&bp->i2c_ctrl->dev, NULL, ptp_ocp_firstchild);
if (!dev) {
dev_err(&bp->pdev->dev, "Can't find I2C adapter\n");
@@ -720,21 +977,6 @@ out:
put_device(dev);
}
-static void
-ptp_ocp_info(struct ptp_ocp *bp)
-{
- u32 version, select;
-
- version = ioread32(&bp->reg->version);
- select = ioread32(&bp->reg->select);
- dev_info(&bp->pdev->dev, "Version %d.%d.%d, clock %s, device ptp%d\n",
- version >> 24, (version >> 16) & 0xff, version & 0xffff,
- ptp_ocp_clock_name_from_val(select >> 16),
- ptp_clock_index(bp->ptp));
-
- ptp_ocp_tod_info(bp);
-}
-
static struct device *
ptp_ocp_find_flash(struct ptp_ocp *bp)
{
@@ -910,18 +1152,6 @@ ptp_ocp_register_spi(struct ptp_ocp *bp, struct ocp_resource *r)
unsigned long start;
int id;
- /* XXX hack to work around old FPGA */
- if (bp->n_irqs < 10) {
- dev_err(&bp->pdev->dev, "FPGA does not have SPI devices\n");
- return 0;
- }
-
- if (r->irq_vec > bp->n_irqs) {
- dev_err(&bp->pdev->dev, "spi device irq %d out of range\n",
- r->irq_vec);
- return 0;
- }
-
start = pci_resource_start(pdev, 0) + r->offset;
ptp_ocp_set_mem_resource(&res[0], start, r->size);
ptp_ocp_set_irq_resource(&res[1], pci_irq_vector(pdev, r->irq_vec));
@@ -944,41 +1174,41 @@ ptp_ocp_register_spi(struct ptp_ocp *bp, struct ocp_resource *r)
static struct platform_device *
ptp_ocp_i2c_bus(struct pci_dev *pdev, struct ocp_resource *r, int id)
{
+ struct ptp_ocp_i2c_info *info;
struct resource res[2];
unsigned long start;
+ info = r->extra;
start = pci_resource_start(pdev, 0) + r->offset;
ptp_ocp_set_mem_resource(&res[0], start, r->size);
ptp_ocp_set_irq_resource(&res[1], pci_irq_vector(pdev, r->irq_vec));
- return platform_device_register_resndata(&pdev->dev, "xiic-i2c",
- id, res, 2, NULL, 0);
+ return platform_device_register_resndata(&pdev->dev, info->name,
+ id, res, 2,
+ info->data, info->data_size);
}
static int
ptp_ocp_register_i2c(struct ptp_ocp *bp, struct ocp_resource *r)
{
struct pci_dev *pdev = bp->pdev;
+ struct ptp_ocp_i2c_info *info;
struct platform_device *p;
struct clk_hw *clk;
char buf[32];
int id;
- if (r->irq_vec > bp->n_irqs) {
- dev_err(&bp->pdev->dev, "i2c device irq %d out of range\n",
- r->irq_vec);
- return 0;
- }
-
+ info = r->extra;
id = pci_dev_id(bp->pdev);
sprintf(buf, "AXI.%d", id);
- clk = clk_hw_register_fixed_rate(&pdev->dev, buf, NULL, 0, 50000000);
+ clk = clk_hw_register_fixed_rate(&pdev->dev, buf, NULL, 0,
+ info->fixed_rate);
if (IS_ERR(clk))
return PTR_ERR(clk);
bp->i2c_clk = clk;
- sprintf(buf, "xiic-i2c.%d", id);
+ sprintf(buf, "%s.%d", info->name, id);
devm_clk_hw_register_clkdev(&pdev->dev, clk, NULL, buf);
p = ptp_ocp_i2c_bus(bp->pdev, r, id);
if (IS_ERR(p))
@@ -997,26 +1227,51 @@ ptp_ocp_ts_irq(int irq, void *priv)
struct ptp_clock_event ev;
u32 sec, nsec;
+ if (ext == ext->bp->pps) {
+ if (ext->bp->pps_req_map & OCP_REQ_PPS) {
+ ev.type = PTP_CLOCK_PPS;
+ ptp_clock_event(ext->bp->ptp, &ev);
+ }
+
+ if ((ext->bp->pps_req_map & ~OCP_REQ_PPS) == 0)
+ goto out;
+ }
+
/* XXX should fix API - this converts s/ns -> ts -> s/ns */
sec = ioread32(&reg->time_sec);
nsec = ioread32(&reg->time_ns);
ev.type = PTP_CLOCK_EXTTS;
ev.index = ext->info->index;
- ev.timestamp = sec * 1000000000ULL + nsec;
+ ev.timestamp = sec * NSEC_PER_SEC + nsec;
ptp_clock_event(ext->bp->ptp, &ev);
+out:
iowrite32(1, &reg->intr); /* write 1 to ack */
return IRQ_HANDLED;
}
static int
-ptp_ocp_ts_enable(void *priv, bool enable)
+ptp_ocp_ts_enable(void *priv, u32 req, bool enable)
{
struct ptp_ocp_ext_src *ext = priv;
struct ts_reg __iomem *reg = ext->mem;
+ struct ptp_ocp *bp = ext->bp;
+
+ if (ext == bp->pps) {
+ u32 old_map = bp->pps_req_map;
+
+ if (enable)
+ bp->pps_req_map |= req;
+ else
+ bp->pps_req_map &= ~req;
+
+ /* if no state change, just return */
+ if ((!!old_map ^ !!bp->pps_req_map) == 0)
+ return 0;
+ }
if (enable) {
iowrite32(1, &reg->enable);
@@ -1033,7 +1288,7 @@ ptp_ocp_ts_enable(void *priv, bool enable)
static void
ptp_ocp_unregister_ext(struct ptp_ocp_ext_src *ext)
{
- ext->info->enable(ext, false);
+ ext->info->enable(ext, ~0, false);
pci_free_irq(ext->bp->pdev, ext->irq_vec, ext);
kfree(ext);
}
@@ -1059,7 +1314,7 @@ ptp_ocp_register_ext(struct ptp_ocp *bp, struct ocp_resource *r)
ext->irq_vec = r->irq_vec;
err = pci_request_irq(pdev, r->irq_vec, ext->info->irq_fcn, NULL,
- ext, "ocp%d.%s", bp->id, ext->info->name);
+ ext, "ocp%d.%s", bp->id, r->name);
if (err) {
dev_err(&pdev->dev, "Could not get irq %d\n", r->irq_vec);
goto out;
@@ -1101,12 +1356,6 @@ ptp_ocp_register_serial(struct ptp_ocp *bp, struct ocp_resource *r)
{
int port;
- if (r->irq_vec > bp->n_irqs) {
- dev_err(&bp->pdev->dev, "serial device irq %d out of range\n",
- r->irq_vec);
- return 0;
- }
-
port = ptp_ocp_serial_line(bp, r);
if (port < 0)
return port;
@@ -1130,15 +1379,40 @@ ptp_ocp_register_mem(struct ptp_ocp *bp, struct ocp_resource *r)
return 0;
}
+static void
+ptp_ocp_nmea_out_init(struct ptp_ocp *bp)
+{
+ if (!bp->nmea_out)
+ return;
+
+ iowrite32(0, &bp->nmea_out->ctrl); /* disable */
+ iowrite32(7, &bp->nmea_out->uart_baud); /* 115200 */
+ iowrite32(1, &bp->nmea_out->ctrl); /* enable */
+}
+
/* FB specific board initializers; last "resource" registered. */
static int
ptp_ocp_fb_board_init(struct ptp_ocp *bp, struct ocp_resource *r)
{
bp->flash_start = 1024 * 4096;
+ ptp_ocp_tod_init(bp);
+ ptp_ocp_nmea_out_init(bp);
+
return ptp_ocp_init_clock(bp);
}
+static bool
+ptp_ocp_allow_irq(struct ptp_ocp *bp, struct ocp_resource *r)
+{
+ bool allow = !r->irq_vec || r->irq_vec < bp->n_irqs;
+
+ if (!allow)
+ dev_err(&bp->pdev->dev, "irq %d out of range, skipping %s\n",
+ r->irq_vec, r->name);
+ return allow;
+}
+
static int
ptp_ocp_register_resources(struct ptp_ocp *bp, kernel_ulong_t driver_data)
{
@@ -1147,13 +1421,373 @@ ptp_ocp_register_resources(struct ptp_ocp *bp, kernel_ulong_t driver_data)
table = (struct ocp_resource *)driver_data;
for (r = table; r->setup; r++) {
+ if (!ptp_ocp_allow_irq(bp, r))
+ continue;
err = r->setup(bp, r);
- if (err)
+ if (err) {
+ dev_err(&bp->pdev->dev,
+ "Could not register %s: err %d\n",
+ r->name, err);
break;
+ }
}
return err;
}
+static void
+ptp_ocp_enable_fpga(u32 __iomem *reg, u32 bit, bool enable)
+{
+ u32 ctrl;
+ bool on;
+
+ ctrl = ioread32(reg);
+ on = ctrl & bit;
+ if (on ^ enable) {
+ ctrl &= ~bit;
+ ctrl |= enable ? bit : 0;
+ iowrite32(ctrl, reg);
+ }
+}
+
+static void
+ptp_ocp_irig_out(struct ptp_ocp *bp, bool enable)
+{
+ return ptp_ocp_enable_fpga(&bp->irig_out->ctrl,
+ IRIG_M_CTRL_ENABLE, enable);
+}
+
+static void
+ptp_ocp_irig_in(struct ptp_ocp *bp, bool enable)
+{
+ return ptp_ocp_enable_fpga(&bp->irig_in->ctrl,
+ IRIG_S_CTRL_ENABLE, enable);
+}
+
+static void
+ptp_ocp_dcf_out(struct ptp_ocp *bp, bool enable)
+{
+ return ptp_ocp_enable_fpga(&bp->dcf_out->ctrl,
+ DCF_M_CTRL_ENABLE, enable);
+}
+
+static void
+ptp_ocp_dcf_in(struct ptp_ocp *bp, bool enable)
+{
+ return ptp_ocp_enable_fpga(&bp->dcf_in->ctrl,
+ DCF_S_CTRL_ENABLE, enable);
+}
+
+static void
+__handle_signal_outputs(struct ptp_ocp *bp, u32 val)
+{
+ ptp_ocp_irig_out(bp, val & 0x00100010);
+ ptp_ocp_dcf_out(bp, val & 0x00200020);
+}
+
+static void
+__handle_signal_inputs(struct ptp_ocp *bp, u32 val)
+{
+ ptp_ocp_irig_in(bp, val & 0x00100010);
+ ptp_ocp_dcf_in(bp, val & 0x00200020);
+}
+
+/*
+ * ANT0 == gps (in)
+ * ANT1 == sma1 (in)
+ * ANT2 == sma2 (in)
+ * ANT3 == sma3 (out)
+ * ANT4 == sma4 (out)
+ */
+
+enum ptp_ocp_sma_mode {
+ SMA_MODE_IN,
+ SMA_MODE_OUT,
+};
+
+static struct ptp_ocp_sma_connector {
+ enum ptp_ocp_sma_mode mode;
+ bool fixed_mode;
+ u16 default_out_idx;
+} ptp_ocp_sma_map[4] = {
+ {
+ .mode = SMA_MODE_IN,
+ .fixed_mode = true,
+ },
+ {
+ .mode = SMA_MODE_IN,
+ .fixed_mode = true,
+ },
+ {
+ .mode = SMA_MODE_OUT,
+ .fixed_mode = true,
+ .default_out_idx = 0, /* 10Mhz */
+ },
+ {
+ .mode = SMA_MODE_OUT,
+ .fixed_mode = true,
+ .default_out_idx = 1, /* PHC */
+ },
+};
+
+static ssize_t
+ptp_ocp_show_output(u32 val, char *buf, int default_idx)
+{
+ const char *name;
+ ssize_t count;
+
+ count = sysfs_emit(buf, "OUT: ");
+ name = ptp_ocp_select_name_from_val(ptp_ocp_sma_out, val);
+ if (!name)
+ name = ptp_ocp_sma_out[default_idx].name;
+ count += sysfs_emit_at(buf, count, "%s\n", name);
+ return count;
+}
+
+static ssize_t
+ptp_ocp_show_inputs(u32 val, char *buf, const char *zero_in)
+{
+ const char *name;
+ ssize_t count;
+ int i;
+
+ count = sysfs_emit(buf, "IN: ");
+ for (i = 0; i < ARRAY_SIZE(ptp_ocp_sma_in); i++) {
+ if (val & ptp_ocp_sma_in[i].value) {
+ name = ptp_ocp_sma_in[i].name;
+ count += sysfs_emit_at(buf, count, "%s ", name);
+ }
+ }
+ if (!val && zero_in)
+ count += sysfs_emit_at(buf, count, "%s ", zero_in);
+ if (count)
+ count--;
+ count += sysfs_emit_at(buf, count, "\n");
+ return count;
+}
+
+static int
+sma_parse_inputs(const char *buf, enum ptp_ocp_sma_mode *mode)
+{
+ struct ocp_selector *tbl[] = { ptp_ocp_sma_in, ptp_ocp_sma_out };
+ int idx, count, dir;
+ char **argv;
+ int ret;
+
+ argv = argv_split(GFP_KERNEL, buf, &count);
+ if (!argv)
+ return -ENOMEM;
+
+ ret = -EINVAL;
+ if (!count)
+ goto out;
+
+ idx = 0;
+ dir = *mode == SMA_MODE_IN ? 0 : 1;
+ if (!strcasecmp("IN:", argv[idx])) {
+ dir = 0;
+ idx++;
+ }
+ if (!strcasecmp("OUT:", argv[0])) {
+ dir = 1;
+ idx++;
+ }
+ *mode = dir == 0 ? SMA_MODE_IN : SMA_MODE_OUT;
+
+ ret = 0;
+ for (; idx < count; idx++)
+ ret |= ptp_ocp_select_val_from_name(tbl[dir], argv[idx]);
+ if (ret < 0)
+ ret = -EINVAL;
+
+out:
+ argv_free(argv);
+ return ret;
+}
+
+static ssize_t
+ptp_ocp_sma_show(struct ptp_ocp *bp, int sma_nr, u32 val, char *buf,
+ const char *zero_in)
+{
+ struct ptp_ocp_sma_connector *sma = &ptp_ocp_sma_map[sma_nr - 1];
+
+ if (sma->mode == SMA_MODE_IN)
+ return ptp_ocp_show_inputs(val, buf, zero_in);
+
+ return ptp_ocp_show_output(val, buf, sma->default_out_idx);
+}
+
+static ssize_t
+sma1_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct ptp_ocp *bp = dev_get_drvdata(dev);
+ u32 val;
+
+ val = ioread32(&bp->sma->gpio1) & 0x3f;
+ return ptp_ocp_sma_show(bp, 1, val, buf, ptp_ocp_sma_in[0].name);
+}
+
+static ssize_t
+sma2_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct ptp_ocp *bp = dev_get_drvdata(dev);
+ u32 val;
+
+ val = (ioread32(&bp->sma->gpio1) >> 16) & 0x3f;
+ return ptp_ocp_sma_show(bp, 2, val, buf, NULL);
+}
+
+static ssize_t
+sma3_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct ptp_ocp *bp = dev_get_drvdata(dev);
+ u32 val;
+
+ val = ioread32(&bp->sma->gpio2) & 0x3f;
+ return ptp_ocp_sma_show(bp, 3, val, buf, NULL);
+}
+
+static ssize_t
+sma4_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct ptp_ocp *bp = dev_get_drvdata(dev);
+ u32 val;
+
+ val = (ioread32(&bp->sma->gpio2) >> 16) & 0x3f;
+ return ptp_ocp_sma_show(bp, 4, val, buf, NULL);
+}
+
+static void
+ptp_ocp_sma_store_output(struct ptp_ocp *bp, u32 val, u32 shift)
+{
+ unsigned long flags;
+ u32 gpio, mask;
+
+ mask = 0xffff << (16 - shift);
+
+ spin_lock_irqsave(&bp->lock, flags);
+
+ gpio = ioread32(&bp->sma->gpio2);
+ gpio = (gpio & mask) | (val << shift);
+
+ __handle_signal_outputs(bp, gpio);
+
+ iowrite32(gpio, &bp->sma->gpio2);
+
+ spin_unlock_irqrestore(&bp->lock, flags);
+}
+
+static void
+ptp_ocp_sma_store_inputs(struct ptp_ocp *bp, u32 val, u32 shift)
+{
+ unsigned long flags;
+ u32 gpio, mask;
+
+ mask = 0xffff << (16 - shift);
+
+ spin_lock_irqsave(&bp->lock, flags);
+
+ gpio = ioread32(&bp->sma->gpio1);
+ gpio = (gpio & mask) | (val << shift);
+
+ __handle_signal_inputs(bp, gpio);
+
+ iowrite32(gpio, &bp->sma->gpio1);
+
+ spin_unlock_irqrestore(&bp->lock, flags);
+}
+
+static ssize_t
+ptp_ocp_sma_store(struct ptp_ocp *bp, const char *buf, int sma_nr, u32 shift)
+{
+ struct ptp_ocp_sma_connector *sma = &ptp_ocp_sma_map[sma_nr - 1];
+ enum ptp_ocp_sma_mode mode;
+ int val;
+
+ mode = sma->mode;
+ val = sma_parse_inputs(buf, &mode);
+ if (val < 0)
+ return val;
+
+ if (mode != sma->mode && sma->fixed_mode)
+ return -EOPNOTSUPP;
+
+ if (mode != sma->mode) {
+ pr_err("Mode changes not supported yet.\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (sma->mode == SMA_MODE_IN)
+ ptp_ocp_sma_store_inputs(bp, val, shift);
+ else
+ ptp_ocp_sma_store_output(bp, val, shift);
+
+ return 0;
+}
+
+static ssize_t
+sma1_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ptp_ocp *bp = dev_get_drvdata(dev);
+ int err;
+
+ err = ptp_ocp_sma_store(bp, buf, 1, 0);
+ return err ? err : count;
+}
+
+static ssize_t
+sma2_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ptp_ocp *bp = dev_get_drvdata(dev);
+ int err;
+
+ err = ptp_ocp_sma_store(bp, buf, 2, 16);
+ return err ? err : count;
+}
+
+static ssize_t
+sma3_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ptp_ocp *bp = dev_get_drvdata(dev);
+ int err;
+
+ err = ptp_ocp_sma_store(bp, buf, 3, 0);
+ return err ? err : count;
+}
+
+static ssize_t
+sma4_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ptp_ocp *bp = dev_get_drvdata(dev);
+ int err;
+
+ err = ptp_ocp_sma_store(bp, buf, 4, 16);
+ return err ? err : count;
+}
+static DEVICE_ATTR_RW(sma1);
+static DEVICE_ATTR_RW(sma2);
+static DEVICE_ATTR_RW(sma3);
+static DEVICE_ATTR_RW(sma4);
+
+static ssize_t
+available_sma_inputs_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return ptp_ocp_select_table_show(ptp_ocp_sma_in, buf);
+}
+static DEVICE_ATTR_RO(available_sma_inputs);
+
+static ssize_t
+available_sma_outputs_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return ptp_ocp_select_table_show(ptp_ocp_sma_out, buf);
+}
+static DEVICE_ATTR_RO(available_sma_outputs);
+
static ssize_t
serialnum_show(struct device *dev, struct device_attribute *attr, char *buf)
{
@@ -1182,6 +1816,102 @@ gnss_sync_show(struct device *dev, struct device_attribute *attr, char *buf)
static DEVICE_ATTR_RO(gnss_sync);
static ssize_t
+utc_tai_offset_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ptp_ocp *bp = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%d\n", bp->utc_tai_offset);
+}
+
+static ssize_t
+utc_tai_offset_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ptp_ocp *bp = dev_get_drvdata(dev);
+ int err;
+ u32 val;
+
+ err = kstrtou32(buf, 0, &val);
+ if (err)
+ return err;
+
+ ptp_ocp_utc_distribute(bp, val);
+
+ return count;
+}
+static DEVICE_ATTR_RW(utc_tai_offset);
+
+static ssize_t
+ts_window_adjust_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct ptp_ocp *bp = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%d\n", bp->ts_window_adjust);
+}
+
+static ssize_t
+ts_window_adjust_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ptp_ocp *bp = dev_get_drvdata(dev);
+ int err;
+ u32 val;
+
+ err = kstrtou32(buf, 0, &val);
+ if (err)
+ return err;
+
+ bp->ts_window_adjust = val;
+
+ return count;
+}
+static DEVICE_ATTR_RW(ts_window_adjust);
+
+static ssize_t
+irig_b_mode_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct ptp_ocp *bp = dev_get_drvdata(dev);
+ u32 val;
+
+ val = ioread32(&bp->irig_out->ctrl);
+ val = (val >> 16) & 0x07;
+ return sysfs_emit(buf, "%d\n", val);
+}
+
+static ssize_t
+irig_b_mode_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ptp_ocp *bp = dev_get_drvdata(dev);
+ unsigned long flags;
+ int err;
+ u32 reg;
+ u8 val;
+
+ err = kstrtou8(buf, 0, &val);
+ if (err)
+ return err;
+ if (val > 7)
+ return -EINVAL;
+
+ reg = ((val & 0x7) << 16);
+
+ spin_lock_irqsave(&bp->lock, flags);
+ iowrite32(0, &bp->irig_out->ctrl); /* disable */
+ iowrite32(reg, &bp->irig_out->ctrl); /* change mode */
+ iowrite32(reg | IRIG_M_CTRL_ENABLE, &bp->irig_out->ctrl);
+ spin_unlock_irqrestore(&bp->lock, flags);
+
+ return count;
+}
+static DEVICE_ATTR_RW(irig_b_mode);
+
+static ssize_t
clock_source_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct ptp_ocp *bp = dev_get_drvdata(dev);
@@ -1189,7 +1919,7 @@ clock_source_show(struct device *dev, struct device_attribute *attr, char *buf)
u32 select;
select = ioread32(&bp->reg->select);
- p = ptp_ocp_clock_name_from_val(select >> 16);
+ p = ptp_ocp_select_name_from_val(ptp_ocp_clock, select >> 16);
return sysfs_emit(buf, "%s\n", p);
}
@@ -1202,7 +1932,7 @@ clock_source_store(struct device *dev, struct device_attribute *attr,
unsigned long flags;
int val;
- val = ptp_ocp_clock_val_from_name(buf);
+ val = ptp_ocp_select_val_from_name(ptp_ocp_clock, buf);
if (val < 0)
return val;
@@ -1218,19 +1948,7 @@ static ssize_t
available_clock_sources_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- const char *clk;
- ssize_t count;
- int i;
-
- count = 0;
- for (i = 0; i < ARRAY_SIZE(ptp_ocp_clock); i++) {
- clk = ptp_ocp_clock[i].name;
- count += sysfs_emit_at(buf, count, "%s ", clk);
- }
- if (count)
- count--;
- count += sysfs_emit_at(buf, count, "\n");
- return count;
+ return ptp_ocp_select_table_show(ptp_ocp_clock, buf);
}
static DEVICE_ATTR_RO(available_clock_sources);
@@ -1239,10 +1957,258 @@ static struct attribute *timecard_attrs[] = {
&dev_attr_gnss_sync.attr,
&dev_attr_clock_source.attr,
&dev_attr_available_clock_sources.attr,
+ &dev_attr_sma1.attr,
+ &dev_attr_sma2.attr,
+ &dev_attr_sma3.attr,
+ &dev_attr_sma4.attr,
+ &dev_attr_available_sma_inputs.attr,
+ &dev_attr_available_sma_outputs.attr,
+ &dev_attr_irig_b_mode.attr,
+ &dev_attr_utc_tai_offset.attr,
+ &dev_attr_ts_window_adjust.attr,
NULL,
};
ATTRIBUTE_GROUPS(timecard);
+static const char *
+gpio_map(u32 gpio, u32 bit, const char *pri, const char *sec, const char *def)
+{
+ const char *ans;
+
+ if (gpio & (1 << bit))
+ ans = pri;
+ else if (gpio & (1 << (bit + 16)))
+ ans = sec;
+ else
+ ans = def;
+ return ans;
+}
+
+static void
+gpio_multi_map(char *buf, u32 gpio, u32 bit,
+ const char *pri, const char *sec, const char *def)
+{
+ char *ans = buf;
+
+ strcpy(ans, def);
+ if (gpio & (1 << bit))
+ ans += sprintf(ans, "%s ", pri);
+ if (gpio & (1 << (bit + 16)))
+ ans += sprintf(ans, "%s ", sec);
+}
+
+static int
+ptp_ocp_summary_show(struct seq_file *s, void *data)
+{
+ struct device *dev = s->private;
+ struct ptp_system_timestamp sts;
+ u32 sma_in, sma_out, ctrl, val;
+ struct ts_reg __iomem *ts_reg;
+ struct timespec64 ts;
+ struct ptp_ocp *bp;
+ const char *src;
+ bool on, map;
+ char *buf;
+
+ buf = (char *)__get_free_page(GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ bp = dev_get_drvdata(dev);
+ sma_in = ioread32(&bp->sma->gpio1);
+ sma_out = ioread32(&bp->sma->gpio2);
+
+ seq_printf(s, "%7s: /dev/ptp%d\n", "PTP", ptp_clock_index(bp->ptp));
+
+ sma1_show(dev, NULL, buf);
+ seq_printf(s, " sma1: %s", buf);
+
+ sma2_show(dev, NULL, buf);
+ seq_printf(s, " sma2: %s", buf);
+
+ sma3_show(dev, NULL, buf);
+ seq_printf(s, " sma3: %s", buf);
+
+ sma4_show(dev, NULL, buf);
+ seq_printf(s, " sma4: %s", buf);
+
+ if (bp->ts0) {
+ ts_reg = bp->ts0->mem;
+ on = ioread32(&ts_reg->enable);
+ src = "GNSS";
+ seq_printf(s, "%7s: %s, src: %s\n", "TS0",
+ on ? " ON" : "OFF", src);
+ }
+
+ if (bp->ts1) {
+ ts_reg = bp->ts1->mem;
+ on = ioread32(&ts_reg->enable);
+ src = gpio_map(sma_in, 2, "sma1", "sma2", "----");
+ seq_printf(s, "%7s: %s, src: %s\n", "TS1",
+ on ? " ON" : "OFF", src);
+ }
+
+ if (bp->ts2) {
+ ts_reg = bp->ts2->mem;
+ on = ioread32(&ts_reg->enable);
+ src = gpio_map(sma_in, 3, "sma1", "sma2", "----");
+ seq_printf(s, "%7s: %s, src: %s\n", "TS2",
+ on ? " ON" : "OFF", src);
+ }
+
+ if (bp->pps) {
+ ts_reg = bp->pps->mem;
+ src = "PHC";
+ on = ioread32(&ts_reg->enable);
+ map = !!(bp->pps_req_map & OCP_REQ_TIMESTAMP);
+ seq_printf(s, "%7s: %s, src: %s\n", "TS3",
+ on && map ? " ON" : "OFF", src);
+
+ map = !!(bp->pps_req_map & OCP_REQ_PPS);
+ seq_printf(s, "%7s: %s, src: %s\n", "PPS",
+ on && map ? " ON" : "OFF", src);
+ }
+
+ if (bp->irig_out) {
+ ctrl = ioread32(&bp->irig_out->ctrl);
+ on = ctrl & IRIG_M_CTRL_ENABLE;
+ val = ioread32(&bp->irig_out->status);
+ gpio_multi_map(buf, sma_out, 4, "sma3", "sma4", "----");
+ seq_printf(s, "%7s: %s, error: %d, mode %d, out: %s\n", "IRIG",
+ on ? " ON" : "OFF", val, (ctrl >> 16), buf);
+ }
+
+ if (bp->irig_in) {
+ on = ioread32(&bp->irig_in->ctrl) & IRIG_S_CTRL_ENABLE;
+ val = ioread32(&bp->irig_in->status);
+ src = gpio_map(sma_in, 4, "sma1", "sma2", "----");
+ seq_printf(s, "%7s: %s, error: %d, src: %s\n", "IRIG in",
+ on ? " ON" : "OFF", val, src);
+ }
+
+ if (bp->dcf_out) {
+ on = ioread32(&bp->dcf_out->ctrl) & DCF_M_CTRL_ENABLE;
+ val = ioread32(&bp->dcf_out->status);
+ gpio_multi_map(buf, sma_out, 5, "sma3", "sma4", "----");
+ seq_printf(s, "%7s: %s, error: %d, out: %s\n", "DCF",
+ on ? " ON" : "OFF", val, buf);
+ }
+
+ if (bp->dcf_in) {
+ on = ioread32(&bp->dcf_in->ctrl) & DCF_S_CTRL_ENABLE;
+ val = ioread32(&bp->dcf_in->status);
+ src = gpio_map(sma_in, 5, "sma1", "sma2", "----");
+ seq_printf(s, "%7s: %s, error: %d, src: %s\n", "DCF in",
+ on ? " ON" : "OFF", val, src);
+ }
+
+ if (bp->nmea_out) {
+ on = ioread32(&bp->nmea_out->ctrl) & 1;
+ val = ioread32(&bp->nmea_out->status);
+ seq_printf(s, "%7s: %s, error: %d\n", "NMEA",
+ on ? " ON" : "OFF", val);
+ }
+
+ /* compute src for PPS1, used below. */
+ if (bp->pps_select) {
+ val = ioread32(&bp->pps_select->gpio1);
+ if (val & 0x01)
+ src = gpio_map(sma_in, 0, "sma1", "sma2", "----");
+ else if (val & 0x02)
+ src = "MAC";
+ else if (val & 0x04)
+ src = "GNSS";
+ else
+ src = "----";
+ } else {
+ src = "?";
+ }
+
+ /* assumes automatic switchover/selection */
+ val = ioread32(&bp->reg->select);
+ switch (val >> 16) {
+ case 0:
+ sprintf(buf, "----");
+ break;
+ case 2:
+ sprintf(buf, "IRIG");
+ break;
+ case 3:
+ sprintf(buf, "%s via PPS1", src);
+ break;
+ case 6:
+ sprintf(buf, "DCF");
+ break;
+ default:
+ strcpy(buf, "unknown");
+ break;
+ }
+ val = ioread32(&bp->reg->status);
+ seq_printf(s, "%7s: %s, state: %s\n", "PHC src", buf,
+ val & OCP_STATUS_IN_SYNC ? "sync" : "unsynced");
+
+ /* reuses PPS1 src from earlier */
+ seq_printf(s, "MAC PPS1 src: %s\n", src);
+
+ src = gpio_map(sma_in, 1, "sma1", "sma2", "GNSS2");
+ seq_printf(s, "MAC PPS2 src: %s\n", src);
+
+ if (!ptp_ocp_gettimex(&bp->ptp_info, &ts, &sts)) {
+ struct timespec64 sys_ts;
+ s64 pre_ns, post_ns, ns;
+
+ pre_ns = timespec64_to_ns(&sts.pre_ts);
+ post_ns = timespec64_to_ns(&sts.post_ts);
+ ns = (pre_ns + post_ns) / 2;
+ ns += (s64)bp->utc_tai_offset * NSEC_PER_SEC;
+ sys_ts = ns_to_timespec64(ns);
+
+ seq_printf(s, "%7s: %lld.%ld == %ptT TAI\n", "PHC",
+ ts.tv_sec, ts.tv_nsec, &ts);
+ seq_printf(s, "%7s: %lld.%ld == %ptT UTC offset %d\n", "SYS",
+ sys_ts.tv_sec, sys_ts.tv_nsec, &sys_ts,
+ bp->utc_tai_offset);
+ seq_printf(s, "%7s: PHC:SYS offset: %lld window: %lld\n", "",
+ timespec64_to_ns(&ts) - ns,
+ post_ns - pre_ns);
+ }
+
+ free_page((unsigned long)buf);
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(ptp_ocp_summary);
+
+static struct dentry *ptp_ocp_debugfs_root;
+
+static void
+ptp_ocp_debugfs_add_device(struct ptp_ocp *bp)
+{
+ struct dentry *d;
+
+ d = debugfs_create_dir(dev_name(&bp->dev), ptp_ocp_debugfs_root);
+ bp->debug_root = d;
+ debugfs_create_file("summary", 0444, bp->debug_root,
+ &bp->dev, &ptp_ocp_summary_fops);
+}
+
+static void
+ptp_ocp_debugfs_remove_device(struct ptp_ocp *bp)
+{
+ debugfs_remove_recursive(bp->debug_root);
+}
+
+static void
+ptp_ocp_debugfs_init(void)
+{
+ ptp_ocp_debugfs_root = debugfs_create_dir("timecard", NULL);
+}
+
+static void
+ptp_ocp_debugfs_fini(void)
+{
+ debugfs_remove_recursive(ptp_ocp_debugfs_root);
+}
+
static void
ptp_ocp_dev_release(struct device *dev)
{
@@ -1270,7 +2236,9 @@ ptp_ocp_device_init(struct ptp_ocp *bp, struct pci_dev *pdev)
bp->ptp_info = ptp_ocp_clock_info;
spin_lock_init(&bp->lock);
bp->gnss_port = -1;
+ bp->gnss2_port = -1;
bp->mac_port = -1;
+ bp->nmea_port = -1;
bp->pdev = pdev;
device_initialize(&bp->dev);
@@ -1332,10 +2300,18 @@ ptp_ocp_complete(struct ptp_ocp *bp)
sprintf(buf, "ttyS%d", bp->gnss_port);
ptp_ocp_link_child(bp, buf, "ttyGNSS");
}
+ if (bp->gnss2_port != -1) {
+ sprintf(buf, "ttyS%d", bp->gnss2_port);
+ ptp_ocp_link_child(bp, buf, "ttyGNSS2");
+ }
if (bp->mac_port != -1) {
sprintf(buf, "ttyS%d", bp->mac_port);
ptp_ocp_link_child(bp, buf, "ttyMAC");
}
+ if (bp->nmea_port != -1) {
+ sprintf(buf, "ttyS%d", bp->nmea_port);
+ ptp_ocp_link_child(bp, buf, "ttyNMEA");
+ }
sprintf(buf, "ptp%d", ptp_clock_index(bp->ptp));
ptp_ocp_link_child(bp, buf, "ptp");
@@ -1346,13 +2322,53 @@ ptp_ocp_complete(struct ptp_ocp *bp)
if (device_add_groups(&bp->dev, timecard_groups))
pr_err("device add groups failed\n");
+ ptp_ocp_debugfs_add_device(bp);
+
return 0;
}
static void
-ptp_ocp_resource_summary(struct ptp_ocp *bp)
+ptp_ocp_phc_info(struct ptp_ocp *bp)
{
+ struct timespec64 ts;
+ u32 version, select;
+ bool sync;
+
+ version = ioread32(&bp->reg->version);
+ select = ioread32(&bp->reg->select);
+ dev_info(&bp->pdev->dev, "Version %d.%d.%d, clock %s, device ptp%d\n",
+ version >> 24, (version >> 16) & 0xff, version & 0xffff,
+ ptp_ocp_select_name_from_val(ptp_ocp_clock, select >> 16),
+ ptp_clock_index(bp->ptp));
+
+ sync = ioread32(&bp->reg->status) & OCP_STATUS_IN_SYNC;
+ if (!ptp_ocp_gettimex(&bp->ptp_info, &ts, NULL))
+ dev_info(&bp->pdev->dev, "Time: %lld.%ld, %s\n",
+ ts.tv_sec, ts.tv_nsec,
+ sync ? "in-sync" : "UNSYNCED");
+}
+
+static void
+ptp_ocp_serial_info(struct device *dev, const char *name, int port, int baud)
+{
+ if (port != -1)
+ dev_info(dev, "%5s: /dev/ttyS%-2d @ %6d\n", name, port, baud);
+}
+
+static void
+ptp_ocp_info(struct ptp_ocp *bp)
+{
+ static int nmea_baud[] = {
+ 1200, 2400, 4800, 9600, 19200, 38400,
+ 57600, 115200, 230400, 460800, 921600,
+ 1000000, 2000000
+ };
struct device *dev = &bp->pdev->dev;
+ u32 reg;
+
+ ptp_ocp_phc_info(bp);
+ if (bp->tod)
+ ptp_ocp_tod_info(bp);
if (bp->image) {
u32 ver = ioread32(&bp->image->version);
@@ -1365,10 +2381,17 @@ ptp_ocp_resource_summary(struct ptp_ocp *bp)
dev_info(dev, "golden image, version %d\n",
ver >> 16);
}
- if (bp->gnss_port != -1)
- dev_info(dev, "GNSS @ /dev/ttyS%d 115200\n", bp->gnss_port);
- if (bp->mac_port != -1)
- dev_info(dev, "MAC @ /dev/ttyS%d 57600\n", bp->mac_port);
+ ptp_ocp_serial_info(dev, "GNSS", bp->gnss_port, 115200);
+ ptp_ocp_serial_info(dev, "GNSS2", bp->gnss2_port, 115200);
+ ptp_ocp_serial_info(dev, "MAC", bp->mac_port, 57600);
+ if (bp->nmea_out && bp->nmea_port != -1) {
+ int baud = -1;
+
+ reg = ioread32(&bp->nmea_out->uart_baud);
+ if (reg < ARRAY_SIZE(nmea_baud))
+ baud = nmea_baud[reg];
+ ptp_ocp_serial_info(dev, "NMEA", bp->nmea_port, baud);
+ }
}
static void
@@ -1386,6 +2409,7 @@ ptp_ocp_detach_sysfs(struct ptp_ocp *bp)
static void
ptp_ocp_detach(struct ptp_ocp *bp)
{
+ ptp_ocp_debugfs_remove_device(bp);
ptp_ocp_detach_sysfs(bp);
if (timer_pending(&bp->watchdog))
del_timer_sync(&bp->watchdog);
@@ -1393,12 +2417,18 @@ ptp_ocp_detach(struct ptp_ocp *bp)
ptp_ocp_unregister_ext(bp->ts0);
if (bp->ts1)
ptp_ocp_unregister_ext(bp->ts1);
+ if (bp->ts2)
+ ptp_ocp_unregister_ext(bp->ts2);
if (bp->pps)
ptp_ocp_unregister_ext(bp->pps);
if (bp->gnss_port != -1)
serial8250_unregister_port(bp->gnss_port);
+ if (bp->gnss2_port != -1)
+ serial8250_unregister_port(bp->gnss2_port);
if (bp->mac_port != -1)
serial8250_unregister_port(bp->mac_port);
+ if (bp->nmea_port != -1)
+ serial8250_unregister_port(bp->nmea_port);
if (bp->spi_flash)
platform_device_unregister(bp->spi_flash);
if (bp->i2c_ctrl)
@@ -1425,10 +2455,6 @@ ptp_ocp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return -ENOMEM;
}
- err = devlink_register(devlink);
- if (err)
- goto out_free;
-
err = pci_enable_device(pdev);
if (err) {
dev_err(&pdev->dev, "pci_enable_device\n");
@@ -1445,7 +2471,7 @@ ptp_ocp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
* allow this - if not all of the IRQ's are returned, skip the
* extra devices and just register the clock.
*/
- err = pci_alloc_irq_vectors(pdev, 1, 10, PCI_IRQ_MSI | PCI_IRQ_MSIX);
+ err = pci_alloc_irq_vectors(pdev, 1, 11, PCI_IRQ_MSI | PCI_IRQ_MSIX);
if (err < 0) {
dev_err(&pdev->dev, "alloc_irq_vectors err: %d\n", err);
goto out;
@@ -1470,8 +2496,7 @@ ptp_ocp_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto out;
ptp_ocp_info(bp);
- ptp_ocp_resource_summary(bp);
-
+ devlink_register(devlink);
return 0;
out:
@@ -1480,10 +2505,7 @@ out:
out_disable:
pci_disable_device(pdev);
out_unregister:
- devlink_unregister(devlink);
-out_free:
devlink_free(devlink);
-
return err;
}
@@ -1493,11 +2515,11 @@ ptp_ocp_remove(struct pci_dev *pdev)
struct ptp_ocp *bp = pci_get_drvdata(pdev);
struct devlink *devlink = priv_to_devlink(bp);
+ devlink_unregister(devlink);
ptp_ocp_detach(bp);
pci_set_drvdata(pdev, NULL);
pci_disable_device(pdev);
- devlink_unregister(devlink);
devlink_free(devlink);
}
@@ -1554,6 +2576,8 @@ ptp_ocp_init(void)
const char *what;
int err;
+ ptp_ocp_debugfs_init();
+
what = "timecard class";
err = class_register(&timecard_class);
if (err)
@@ -1576,6 +2600,7 @@ out_register:
out_notifier:
class_unregister(&timecard_class);
out:
+ ptp_ocp_debugfs_fini();
pr_err(KBUILD_MODNAME ": failed to register %s: %d\n", what, err);
return err;
}
@@ -1586,6 +2611,7 @@ ptp_ocp_fini(void)
bus_unregister_notifier(&i2c_bus_type, &ptp_ocp_i2c_notifier);
pci_unregister_driver(&ptp_ocp_driver);
class_unregister(&timecard_class);
+ ptp_ocp_debugfs_fini();
}
module_init(ptp_ocp_init);
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 4fd13b06231f..6be9b1c8a615 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -609,12 +609,12 @@ config REGULATOR_MAX8952
modes ranging from 0.77V to 1.40V by 0.01V steps.
config REGULATOR_MAX8973
- tristate "Maxim MAX8973 voltage regulator "
+ tristate "Maxim MAX8973A voltage regulator"
depends on I2C
depends on THERMAL && THERMAL_OF
select REGMAP_I2C
help
- The MAXIM MAX8973 high-efficiency. three phase, DC-DC step-down
+ The MAXIM MAX8973A high-efficiency. three phase, DC-DC step-down
switching regulator delivers up to 9A of output current. Each
phase operates at a 2MHz fixed frequency with a 120 deg shift
from the adjacent phase, allowing the use of small magnetic component.
@@ -1181,7 +1181,7 @@ config REGULATOR_STPMIC1
config REGULATOR_TI_ABB
tristate "TI Adaptive Body Bias on-chip LDO"
- depends on ARCH_OMAP
+ depends on ARCH_OMAP || COMPILE_TEST
help
Select this option to support Texas Instruments' on-chip Adaptive Body
Bias (ABB) LDO regulators. It is recommended that this option be
@@ -1339,15 +1339,6 @@ config REGULATOR_TPS65912
help
This driver supports TPS65912 voltage regulator chip.
-config REGULATOR_TPS80031
- tristate "TI TPS80031/TPS80032 power regulator driver"
- depends on MFD_TPS80031
- help
- TPS80031/ TPS80032 Fully Integrated Power Management with Power
- Path and Battery Charger. It has 5 configurable step-down
- converters, 11 general purpose LDOs, VBUS generator and digital
- output to control regulators.
-
config REGULATOR_TWL4030
tristate "TI TWL4030/TWL5030/TWL6030/TPS659x0 PMIC"
depends on TWL4030_CORE
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 9e382b50a5ef..b07d2a22df0b 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -158,7 +158,6 @@ obj-$(CONFIG_REGULATOR_TPS6524X) += tps6524x-regulator.o
obj-$(CONFIG_REGULATOR_TPS6586X) += tps6586x-regulator.o
obj-$(CONFIG_REGULATOR_TPS65910) += tps65910-regulator.o
obj-$(CONFIG_REGULATOR_TPS65912) += tps65912-regulator.o
-obj-$(CONFIG_REGULATOR_TPS80031) += tps80031-regulator.o
obj-$(CONFIG_REGULATOR_TPS65132) += tps65132-regulator.o
obj-$(CONFIG_REGULATOR_TWL4030) += twl-regulator.o twl6030-regulator.o
obj-$(CONFIG_REGULATOR_UNIPHIER) += uniphier-regulator.o
diff --git a/drivers/regulator/bd71815-regulator.c b/drivers/regulator/bd71815-regulator.c
index 16edd9062ca9..acaa6607898e 100644
--- a/drivers/regulator/bd71815-regulator.c
+++ b/drivers/regulator/bd71815-regulator.c
@@ -461,9 +461,9 @@ static const struct regulator_ops bd7181x_led_regulator_ops = {
.min_uV = (min), \
.uV_step = (step), \
.vsel_reg = (vsel), \
- .vsel_mask = 0x3f, \
+ .vsel_mask = BD71815_VOLT_MASK, \
.enable_reg = (ereg), \
- .enable_mask = 0x04, \
+ .enable_mask = BD71815_BUCK_RUN_ON, \
.ramp_reg = (ereg), \
.ramp_mask = BD71815_BUCK_RAMPRATE_MASK, \
.ramp_delay_table = bd7181x_ramp_table, \
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index ca6caba8a191..86aa4141efa9 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -145,7 +145,7 @@ static inline int regulator_lock_nested(struct regulator_dev *rdev,
mutex_lock(&regulator_nesting_mutex);
- if (ww_ctx || !ww_mutex_trylock(&rdev->mutex)) {
+ if (!ww_mutex_trylock(&rdev->mutex, ww_ctx)) {
if (rdev->mutex_owner == current)
rdev->ref_cnt++;
else
@@ -1151,9 +1151,10 @@ static int machine_constraints_voltage(struct regulator_dev *rdev,
}
if (current_uV < 0) {
- rdev_err(rdev,
- "failed to get the current voltage: %pe\n",
- ERR_PTR(current_uV));
+ if (current_uV != -EPROBE_DEFER)
+ rdev_err(rdev,
+ "failed to get the current voltage: %pe\n",
+ ERR_PTR(current_uV));
return current_uV;
}
@@ -1570,7 +1571,7 @@ static int set_supply(struct regulator_dev *rdev,
{
int err;
- rdev_info(rdev, "supplied by %s\n", rdev_get_name(supply_rdev));
+ rdev_dbg(rdev, "supplied by %s\n", rdev_get_name(supply_rdev));
if (!try_module_get(supply_rdev->owner))
return -ENODEV;
@@ -4248,6 +4249,9 @@ int regulator_sync_voltage(struct regulator *regulator)
struct regulator_voltage *voltage = &regulator->voltage[PM_SUSPEND_ON];
int ret, min_uV, max_uV;
+ if (!regulator_ops_is_valid(rdev, REGULATOR_CHANGE_VOLTAGE))
+ return 0;
+
regulator_lock(rdev);
if (!rdev->desc->ops->set_voltage &&
diff --git a/drivers/regulator/dummy.c b/drivers/regulator/dummy.c
index d8059f596391..24e586f93855 100644
--- a/drivers/regulator/dummy.c
+++ b/drivers/regulator/dummy.c
@@ -45,7 +45,8 @@ static int dummy_regulator_probe(struct platform_device *pdev)
config.dev = &pdev->dev;
config.init_data = &dummy_initdata;
- dummy_regulator_rdev = regulator_register(&dummy_desc, &config);
+ dummy_regulator_rdev = devm_regulator_register(&pdev->dev, &dummy_desc,
+ &config);
if (IS_ERR(dummy_regulator_rdev)) {
ret = PTR_ERR(dummy_regulator_rdev);
pr_err("Failed to register regulator: %d\n", ret);
diff --git a/drivers/regulator/lp872x.c b/drivers/regulator/lp872x.c
index e84be29533f4..35d826fe9def 100644
--- a/drivers/regulator/lp872x.c
+++ b/drivers/regulator/lp872x.c
@@ -10,13 +10,12 @@
#include <linux/i2c.h>
#include <linux/regmap.h>
#include <linux/err.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/delay.h>
#include <linux/regulator/lp872x.h>
#include <linux/regulator/driver.h>
#include <linux/platform_device.h>
#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/regulator/of_regulator.h>
/* Registers : LP8720/8725 shared */
@@ -104,7 +103,7 @@ struct lp872x {
enum lp872x_id chipid;
struct lp872x_platform_data *pdata;
int num_regulators;
- enum lp872x_dvs_state dvs_pin;
+ enum gpiod_flags dvs_pin;
};
/* LP8720/LP8725 shared voltage table for LDOs */
@@ -250,12 +249,12 @@ static int lp872x_regulator_enable_time(struct regulator_dev *rdev)
}
static void lp872x_set_dvs(struct lp872x *lp, enum lp872x_dvs_sel dvs_sel,
- int gpio)
+ struct gpio_desc *gpio)
{
- enum lp872x_dvs_state state;
+ enum gpiod_flags state;
- state = dvs_sel == SEL_V1 ? DVS_HIGH : DVS_LOW;
- gpio_set_value(gpio, state);
+ state = dvs_sel == SEL_V1 ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW;
+ gpiod_set_value(gpio, state);
lp->dvs_pin = state;
}
@@ -270,7 +269,7 @@ static u8 lp872x_select_buck_vout_addr(struct lp872x *lp,
switch (buck) {
case LP8720_ID_BUCK:
if (val & LP8720_EXT_DVS_M) {
- addr = (lp->dvs_pin == DVS_HIGH) ?
+ addr = (lp->dvs_pin == GPIOD_OUT_HIGH) ?
LP8720_BUCK_VOUT1 : LP8720_BUCK_VOUT2;
} else {
if (lp872x_read_byte(lp, LP8720_ENABLE, &val))
@@ -284,7 +283,7 @@ static u8 lp872x_select_buck_vout_addr(struct lp872x *lp,
if (val & LP8725_DVS1_M)
addr = LP8725_BUCK1_VOUT1;
else
- addr = (lp->dvs_pin == DVS_HIGH) ?
+ addr = (lp->dvs_pin == GPIOD_OUT_HIGH) ?
LP8725_BUCK1_VOUT1 : LP8725_BUCK1_VOUT2;
break;
case LP8725_ID_BUCK2:
@@ -321,7 +320,7 @@ static int lp872x_buck_set_voltage_sel(struct regulator_dev *rdev,
u8 addr, mask = LP872X_VOUT_M;
struct lp872x_dvs *dvs = lp->pdata ? lp->pdata->dvs : NULL;
- if (dvs && gpio_is_valid(dvs->gpio))
+ if (dvs && dvs->gpio)
lp872x_set_dvs(lp, dvs->vsel, dvs->gpio);
addr = lp872x_select_buck_vout_addr(lp, buck);
@@ -675,24 +674,23 @@ static const struct regulator_desc lp8725_regulator_desc[] = {
static int lp872x_init_dvs(struct lp872x *lp)
{
- int ret, gpio;
struct lp872x_dvs *dvs = lp->pdata ? lp->pdata->dvs : NULL;
- enum lp872x_dvs_state pinstate;
+ enum gpiod_flags pinstate;
u8 mask[] = { LP8720_EXT_DVS_M, LP8725_DVS1_M | LP8725_DVS2_M };
u8 default_dvs_mode[] = { LP8720_DEFAULT_DVS, LP8725_DEFAULT_DVS };
if (!dvs)
goto set_default_dvs_mode;
- gpio = dvs->gpio;
- if (!gpio_is_valid(gpio))
+ if (!dvs->gpio)
goto set_default_dvs_mode;
pinstate = dvs->init_state;
- ret = devm_gpio_request_one(lp->dev, gpio, pinstate, "LP872X DVS");
- if (ret) {
- dev_err(lp->dev, "gpio request err: %d\n", ret);
- return ret;
+ dvs->gpio = devm_gpiod_get_optional(lp->dev, "ti,dvs", pinstate);
+
+ if (IS_ERR(dvs->gpio)) {
+ dev_err(lp->dev, "gpio request err: %ld\n", PTR_ERR(dvs->gpio));
+ return PTR_ERR(dvs->gpio);
}
lp->dvs_pin = pinstate;
@@ -706,20 +704,17 @@ set_default_dvs_mode:
static int lp872x_hw_enable(struct lp872x *lp)
{
- int ret, gpio;
-
if (!lp->pdata)
return -EINVAL;
- gpio = lp->pdata->enable_gpio;
- if (!gpio_is_valid(gpio))
+ if (!lp->pdata->enable_gpio)
return 0;
/* Always set enable GPIO high. */
- ret = devm_gpio_request_one(lp->dev, gpio, GPIOF_OUT_INIT_HIGH, "LP872X EN");
- if (ret) {
- dev_err(lp->dev, "gpio request err: %d\n", ret);
- return ret;
+ lp->pdata->enable_gpio = devm_gpiod_get_optional(lp->dev, "enable", GPIOD_OUT_HIGH);
+ if (IS_ERR(lp->pdata->enable_gpio)) {
+ dev_err(lp->dev, "gpio request err: %ld\n", PTR_ERR(lp->pdata->enable_gpio));
+ return PTR_ERR(lp->pdata->enable_gpio);
}
/* Each chip has a different enable delay. */
@@ -844,12 +839,9 @@ static struct lp872x_platform_data
if (!pdata->dvs)
return ERR_PTR(-ENOMEM);
- pdata->dvs->gpio = of_get_named_gpio(np, "ti,dvs-gpio", 0);
of_property_read_u8(np, "ti,dvs-vsel", (u8 *)&pdata->dvs->vsel);
of_property_read_u8(np, "ti,dvs-state", &dvs_state);
- pdata->dvs->init_state = dvs_state ? DVS_HIGH : DVS_LOW;
-
- pdata->enable_gpio = of_get_named_gpio(np, "enable-gpios", 0);
+ pdata->dvs->init_state = dvs_state ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW;
if (of_get_child_count(np) == 0)
goto out;
diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
index 8da8f9b6c4fd..80b65cb87cef 100644
--- a/drivers/regulator/max8973-regulator.c
+++ b/drivers/regulator/max8973-regulator.c
@@ -1,7 +1,7 @@
/*
- * max8973-regulator.c -- Maxim max8973
+ * max8973-regulator.c -- Maxim max8973A
*
- * Regulator driver for MAXIM 8973 DC-DC step-down switching regulator.
+ * Regulator driver for MAXIM 8973A DC-DC step-down switching regulator.
*
* Copyright (c) 2012, NVIDIA Corporation.
*
diff --git a/drivers/regulator/pwm-regulator.c b/drivers/regulator/pwm-regulator.c
index 7629476d94ae..b9eeaff1c661 100644
--- a/drivers/regulator/pwm-regulator.c
+++ b/drivers/regulator/pwm-regulator.c
@@ -352,15 +352,9 @@ static int pwm_regulator_probe(struct platform_device *pdev)
config.init_data = init_data;
drvdata->pwm = devm_pwm_get(&pdev->dev, NULL);
- if (IS_ERR(drvdata->pwm)) {
- ret = PTR_ERR(drvdata->pwm);
- if (ret == -EPROBE_DEFER)
- dev_dbg(&pdev->dev,
- "Failed to get PWM, deferring probe\n");
- else
- dev_err(&pdev->dev, "Failed to get PWM: %d\n", ret);
- return ret;
- }
+ if (IS_ERR(drvdata->pwm))
+ return dev_err_probe(&pdev->dev, PTR_ERR(drvdata->pwm),
+ "Failed to get PWM\n");
if (init_data->constraints.boot_on || init_data->constraints.always_on)
gpio_flags = GPIOD_OUT_HIGH;
diff --git a/drivers/regulator/qcom-rpmh-regulator.c b/drivers/regulator/qcom-rpmh-regulator.c
index 7f458d510483..12425f667c00 100644
--- a/drivers/regulator/qcom-rpmh-regulator.c
+++ b/drivers/regulator/qcom-rpmh-regulator.c
@@ -1047,6 +1047,34 @@ static const struct rpmh_vreg_init_data pm6150l_vreg_data[] = {
{}
};
+static const struct rpmh_vreg_init_data pm6350_vreg_data[] = {
+ RPMH_VREG("smps1", "smp%s1", &pmic5_ftsmps510, NULL),
+ RPMH_VREG("smps2", "smp%s2", &pmic5_hfsmps510, NULL),
+ /* smps3 - smps5 not configured */
+ RPMH_VREG("ldo1", "ldo%s1", &pmic5_nldo, NULL),
+ RPMH_VREG("ldo2", "ldo%s2", &pmic5_pldo, NULL),
+ RPMH_VREG("ldo3", "ldo%s3", &pmic5_pldo, NULL),
+ RPMH_VREG("ldo4", "ldo%s4", &pmic5_nldo, NULL),
+ RPMH_VREG("ldo5", "ldo%s5", &pmic5_pldo, NULL),
+ RPMH_VREG("ldo6", "ldo%s6", &pmic5_pldo, NULL),
+ RPMH_VREG("ldo7", "ldo%s7", &pmic5_pldo, NULL),
+ RPMH_VREG("ldo8", "ldo%s8", &pmic5_pldo, NULL),
+ RPMH_VREG("ldo9", "ldo%s9", &pmic5_pldo, NULL),
+ RPMH_VREG("ldo10", "ldo%s10", &pmic5_pldo, NULL),
+ RPMH_VREG("ldo11", "ldo%s11", &pmic5_pldo, NULL),
+ RPMH_VREG("ldo12", "ldo%s12", &pmic5_pldo, NULL),
+ RPMH_VREG("ldo13", "ldo%s13", &pmic5_nldo, NULL),
+ RPMH_VREG("ldo14", "ldo%s14", &pmic5_pldo, NULL),
+ RPMH_VREG("ldo15", "ldo%s15", &pmic5_nldo, NULL),
+ RPMH_VREG("ldo16", "ldo%s16", &pmic5_nldo, NULL),
+ /* ldo17 not configured */
+ RPMH_VREG("ldo18", "ldo%s18", &pmic5_nldo, NULL),
+ RPMH_VREG("ldo19", "ldo%s19", &pmic5_nldo, NULL),
+ RPMH_VREG("ldo20", "ldo%s20", &pmic5_nldo, NULL),
+ RPMH_VREG("ldo21", "ldo%s21", &pmic5_nldo, NULL),
+ RPMH_VREG("ldo22", "ldo%s22", &pmic5_nldo, NULL),
+};
+
static const struct rpmh_vreg_init_data pmx55_vreg_data[] = {
RPMH_VREG("smps1", "smp%s1", &pmic5_ftsmps510, "vdd-s1"),
RPMH_VREG("smps2", "smp%s2", &pmic5_hfsmps510, "vdd-s2"),
@@ -1202,6 +1230,10 @@ static const struct of_device_id __maybe_unused rpmh_regulator_match_table[] = {
.data = pm6150l_vreg_data,
},
{
+ .compatible = "qcom,pm6350-rpmh-regulators",
+ .data = pm6350_vreg_data,
+ },
+ {
.compatible = "qcom,pmc8180-rpmh-regulators",
.data = pm8150_vreg_data,
},
diff --git a/drivers/regulator/qcom_smd-regulator.c b/drivers/regulator/qcom_smd-regulator.c
index 198fcc6551f6..8bac024dde8b 100644
--- a/drivers/regulator/qcom_smd-regulator.c
+++ b/drivers/regulator/qcom_smd-regulator.c
@@ -738,6 +738,24 @@ static const struct regulator_desc mp5496_ldoa2 = {
.ops = &rpm_mp5496_ops,
};
+static const struct regulator_desc pm2250_lvftsmps = {
+ .linear_ranges = (struct linear_range[]) {
+ REGULATOR_LINEAR_RANGE(320000, 0, 269, 4000),
+ },
+ .n_linear_ranges = 1,
+ .n_voltages = 270,
+ .ops = &rpm_smps_ldo_ops,
+};
+
+static const struct regulator_desc pm2250_ftsmps = {
+ .linear_ranges = (struct linear_range[]) {
+ REGULATOR_LINEAR_RANGE(640000, 0, 269, 8000),
+ },
+ .n_linear_ranges = 1,
+ .n_voltages = 270,
+ .ops = &rpm_smps_ldo_ops,
+};
+
struct rpm_regulator_data {
const char *name;
u32 type;
@@ -1170,6 +1188,36 @@ static const struct rpm_regulator_data rpm_pms405_regulators[] = {
{}
};
+static const struct rpm_regulator_data rpm_pm2250_regulators[] = {
+ { "s1", QCOM_SMD_RPM_SMPA, 1, &pm2250_lvftsmps, "vdd_s1" },
+ { "s2", QCOM_SMD_RPM_SMPA, 2, &pm2250_lvftsmps, "vdd_s2" },
+ { "s3", QCOM_SMD_RPM_SMPA, 3, &pm2250_lvftsmps, "vdd_s3" },
+ { "s4", QCOM_SMD_RPM_SMPA, 4, &pm2250_ftsmps, "vdd_s4" },
+ { "l1", QCOM_SMD_RPM_LDOA, 1, &pm660_nldo660, "vdd_l1_l2_l3_l5_l6_l7_l8_l9_l10_l11_l12" },
+ { "l2", QCOM_SMD_RPM_LDOA, 2, &pm660_nldo660, "vdd_l1_l2_l3_l5_l6_l7_l8_l9_l10_l11_l12" },
+ { "l3", QCOM_SMD_RPM_LDOA, 3, &pm660_nldo660, "vdd_l1_l2_l3_l5_l6_l7_l8_l9_l10_l11_l12" },
+ { "l4", QCOM_SMD_RPM_LDOA, 4, &pm660_pldo660, "vdd_l4_l17_l18_l19_l20_l21_l22" },
+ { "l5", QCOM_SMD_RPM_LDOA, 5, &pm660_nldo660, "vdd_l1_l2_l3_l5_l6_l7_l8_l9_l10_l11_l12" },
+ { "l6", QCOM_SMD_RPM_LDOA, 6, &pm660_nldo660, "vdd_l1_l2_l3_l5_l6_l7_l8_l9_l10_l11_l12" },
+ { "l7", QCOM_SMD_RPM_LDOA, 7, &pm660_nldo660, "vdd_l1_l2_l3_l5_l6_l7_l8_l9_l10_l11_l12" },
+ { "l8", QCOM_SMD_RPM_LDOA, 8, &pm660_nldo660, "vdd_l1_l2_l3_l5_l6_l7_l8_l9_l10_l11_l12" },
+ { "l9", QCOM_SMD_RPM_LDOA, 9, &pm660_nldo660, "vdd_l1_l2_l3_l5_l6_l7_l8_l9_l10_l11_l12" },
+ { "l10", QCOM_SMD_RPM_LDOA, 10, &pm660_nldo660, "vdd_l1_l2_l3_l5_l6_l7_l8_l9_l10_l11_l12" },
+ { "l11", QCOM_SMD_RPM_LDOA, 11, &pm660_nldo660, "vdd_l1_l2_l3_l5_l6_l7_l8_l9_l10_l11_l12" },
+ { "l12", QCOM_SMD_RPM_LDOA, 12, &pm660_nldo660, "vdd_l1_l2_l3_l5_l6_l7_l8_l9_l10_l11_l12" },
+ { "l13", QCOM_SMD_RPM_LDOA, 13, &pm660_ht_lvpldo, "vdd_l13_l14_l15_l16" },
+ { "l14", QCOM_SMD_RPM_LDOA, 14, &pm660_ht_lvpldo, "vdd_l13_l14_l15_l16" },
+ { "l15", QCOM_SMD_RPM_LDOA, 15, &pm660_ht_lvpldo, "vdd_l13_l14_l15_l16" },
+ { "l16", QCOM_SMD_RPM_LDOA, 16, &pm660_ht_lvpldo, "vdd_l13_l14_l15_l16" },
+ { "l17", QCOM_SMD_RPM_LDOA, 17, &pm660_pldo660, "vdd_l4_l17_l18_l19_l20_l21_l22" },
+ { "l18", QCOM_SMD_RPM_LDOA, 18, &pm660_pldo660, "vdd_l4_l17_l18_l19_l20_l21_l22" },
+ { "l19", QCOM_SMD_RPM_LDOA, 19, &pm660_pldo660, "vdd_l4_l17_l18_l19_l20_l21_l22" },
+ { "l20", QCOM_SMD_RPM_LDOA, 20, &pm660_pldo660, "vdd_l4_l17_l18_l19_l20_l21_l22" },
+ { "l21", QCOM_SMD_RPM_LDOA, 21, &pm660_pldo660, "vdd_l4_l17_l18_l19_l20_l21_l22" },
+ { "l22", QCOM_SMD_RPM_LDOA, 22, &pm660_pldo660, "vdd_l4_l17_l18_l19_l20_l21_l22" },
+ {}
+};
+
static const struct of_device_id rpm_of_match[] = {
{ .compatible = "qcom,rpm-mp5496-regulators", .data = &rpm_mp5496_regulators },
{ .compatible = "qcom,rpm-pm8841-regulators", .data = &rpm_pm8841_regulators },
@@ -1186,6 +1234,7 @@ static const struct of_device_id rpm_of_match[] = {
{ .compatible = "qcom,rpm-pmi8994-regulators", .data = &rpm_pmi8994_regulators },
{ .compatible = "qcom,rpm-pmi8998-regulators", .data = &rpm_pmi8998_regulators },
{ .compatible = "qcom,rpm-pms405-regulators", .data = &rpm_pms405_regulators },
+ { .compatible = "qcom,rpm-pm2250-regulators", .data = &rpm_pm2250_regulators },
{}
};
MODULE_DEVICE_TABLE(of, rpm_of_match);
diff --git a/drivers/regulator/rtq6752-regulator.c b/drivers/regulator/rtq6752-regulator.c
index 609d3fcf4923..dfe45fb67353 100644
--- a/drivers/regulator/rtq6752-regulator.c
+++ b/drivers/regulator/rtq6752-regulator.c
@@ -54,14 +54,14 @@ static int rtq6752_set_vdd_enable(struct regulator_dev *rdev)
int rid = rdev_get_id(rdev), ret;
mutex_lock(&priv->lock);
- if (priv->enable_gpio) {
- gpiod_set_value(priv->enable_gpio, 1);
+ if (!priv->enable_flag) {
+ if (priv->enable_gpio) {
+ gpiod_set_value(priv->enable_gpio, 1);
- usleep_range(RTQ6752_I2CRDY_TIMEUS,
- RTQ6752_I2CRDY_TIMEUS + 100);
- }
+ usleep_range(RTQ6752_I2CRDY_TIMEUS,
+ RTQ6752_I2CRDY_TIMEUS + 100);
+ }
- if (!priv->enable_flag) {
regcache_cache_only(priv->regmap, false);
ret = regcache_sync(priv->regmap);
if (ret) {
@@ -91,11 +91,11 @@ static int rtq6752_set_vdd_disable(struct regulator_dev *rdev)
if (!priv->enable_flag) {
regcache_cache_only(priv->regmap, true);
regcache_mark_dirty(priv->regmap);
- }
- if (priv->enable_gpio)
- gpiod_set_value(priv->enable_gpio, 0);
+ if (priv->enable_gpio)
+ gpiod_set_value(priv->enable_gpio, 0);
+ }
mutex_unlock(&priv->lock);
return 0;
diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
index 7c111bbdc2af..35269f998210 100644
--- a/drivers/regulator/s5m8767.c
+++ b/drivers/regulator/s5m8767.c
@@ -850,18 +850,15 @@ static int s5m8767_pmic_probe(struct platform_device *pdev)
/* DS4 GPIO */
gpio_direction_output(pdata->buck_ds[2], 0x0);
- if (pdata->buck2_gpiodvs || pdata->buck3_gpiodvs ||
- pdata->buck4_gpiodvs) {
- regmap_update_bits(s5m8767->iodev->regmap_pmic,
- S5M8767_REG_BUCK2CTRL, 1 << 1,
- (pdata->buck2_gpiodvs) ? (1 << 1) : (0 << 1));
- regmap_update_bits(s5m8767->iodev->regmap_pmic,
- S5M8767_REG_BUCK3CTRL, 1 << 1,
- (pdata->buck3_gpiodvs) ? (1 << 1) : (0 << 1));
- regmap_update_bits(s5m8767->iodev->regmap_pmic,
- S5M8767_REG_BUCK4CTRL, 1 << 1,
- (pdata->buck4_gpiodvs) ? (1 << 1) : (0 << 1));
- }
+ regmap_update_bits(s5m8767->iodev->regmap_pmic,
+ S5M8767_REG_BUCK2CTRL, 1 << 1,
+ (pdata->buck2_gpiodvs) ? (1 << 1) : (0 << 1));
+ regmap_update_bits(s5m8767->iodev->regmap_pmic,
+ S5M8767_REG_BUCK3CTRL, 1 << 1,
+ (pdata->buck3_gpiodvs) ? (1 << 1) : (0 << 1));
+ regmap_update_bits(s5m8767->iodev->regmap_pmic,
+ S5M8767_REG_BUCK4CTRL, 1 << 1,
+ (pdata->buck4_gpiodvs) ? (1 << 1) : (0 << 1));
/* Initialize GPIO DVS registers */
for (i = 0; i < 8; i++) {
diff --git a/drivers/regulator/sy7636a-regulator.c b/drivers/regulator/sy7636a-regulator.c
index 8360b3947ead..22fddf868e4c 100644
--- a/drivers/regulator/sy7636a-regulator.c
+++ b/drivers/regulator/sy7636a-regulator.c
@@ -70,7 +70,7 @@ static const struct regulator_desc desc = {
static int sy7636a_regulator_probe(struct platform_device *pdev)
{
- struct regmap *regmap = dev_get_drvdata(pdev->dev.parent);
+ struct regmap *regmap = dev_get_regmap(pdev->dev.parent, NULL);
struct regulator_config config = { };
struct regulator_dev *rdev;
struct gpio_desc *gdp;
diff --git a/drivers/regulator/ti-abb-regulator.c b/drivers/regulator/ti-abb-regulator.c
index 9f0a4d50cead..2931a0b89bff 100644
--- a/drivers/regulator/ti-abb-regulator.c
+++ b/drivers/regulator/ti-abb-regulator.c
@@ -725,9 +725,7 @@ static int ti_abb_probe(struct platform_device *pdev)
/* Map ABB resources */
if (abb->regs->setup_off || abb->regs->control_off) {
- pname = "base-address";
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, pname);
- abb->base = devm_ioremap_resource(dev, res);
+ abb->base = devm_platform_ioremap_resource_byname(pdev, "base-address");
if (IS_ERR(abb->base))
return PTR_ERR(abb->base);
@@ -735,35 +733,18 @@ static int ti_abb_probe(struct platform_device *pdev)
abb->control_reg = abb->base + abb->regs->control_off;
} else {
- pname = "control-address";
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, pname);
- abb->control_reg = devm_ioremap_resource(dev, res);
+ abb->control_reg = devm_platform_ioremap_resource_byname(pdev, "control-address");
if (IS_ERR(abb->control_reg))
return PTR_ERR(abb->control_reg);
- pname = "setup-address";
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, pname);
- abb->setup_reg = devm_ioremap_resource(dev, res);
+ abb->setup_reg = devm_platform_ioremap_resource_byname(pdev, "setup-address");
if (IS_ERR(abb->setup_reg))
return PTR_ERR(abb->setup_reg);
}
- pname = "int-address";
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, pname);
- if (!res) {
- dev_err(dev, "Missing '%s' IO resource\n", pname);
- return -ENODEV;
- }
- /*
- * We may have shared interrupt register offsets which are
- * write-1-to-clear between domains ensuring exclusivity.
- */
- abb->int_base = devm_ioremap(dev, res->start,
- resource_size(res));
- if (!abb->int_base) {
- dev_err(dev, "Unable to map '%s'\n", pname);
- return -ENOMEM;
- }
+ abb->int_base = devm_platform_ioremap_resource_byname(pdev, "int-address");
+ if (IS_ERR(abb->int_base))
+ return PTR_ERR(abb->int_base);
/* Map Optional resources */
pname = "efuse-address";
diff --git a/drivers/regulator/tps62360-regulator.c b/drivers/regulator/tps62360-regulator.c
index 315cd5daf480..574958690ace 100644
--- a/drivers/regulator/tps62360-regulator.c
+++ b/drivers/regulator/tps62360-regulator.c
@@ -28,13 +28,12 @@
#include <linux/err.h>
#include <linux/of.h>
#include <linux/of_device.h>
-#include <linux/of_gpio.h>
#include <linux/regulator/of_regulator.h>
#include <linux/platform_device.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/tps62360.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/regmap.h>
@@ -65,8 +64,8 @@ struct tps62360_chip {
struct regulator_desc desc;
struct regulator_dev *rdev;
struct regmap *regmap;
- int vsel0_gpio;
- int vsel1_gpio;
+ struct gpio_desc *vsel0_gpio;
+ struct gpio_desc *vsel1_gpio;
u8 voltage_reg_mask;
bool en_internal_pulldn;
bool en_discharge;
@@ -165,8 +164,8 @@ static int tps62360_dcdc_set_voltage_sel(struct regulator_dev *dev,
/* Select proper VSET register vio gpios */
if (tps->valid_gpios) {
- gpio_set_value_cansleep(tps->vsel0_gpio, new_vset_id & 0x1);
- gpio_set_value_cansleep(tps->vsel1_gpio,
+ gpiod_set_value_cansleep(tps->vsel0_gpio, new_vset_id & 0x1);
+ gpiod_set_value_cansleep(tps->vsel1_gpio,
(new_vset_id >> 1) & 0x1);
}
return 0;
@@ -310,9 +309,6 @@ static struct tps62360_regulator_platform_data *
return NULL;
}
- pdata->vsel0_gpio = of_get_named_gpio(np, "vsel0-gpio", 0);
- pdata->vsel1_gpio = of_get_named_gpio(np, "vsel1-gpio", 0);
-
if (of_find_property(np, "ti,vsel0-state-high", NULL))
pdata->vsel0_def_state = 1;
@@ -349,6 +345,7 @@ static int tps62360_probe(struct i2c_client *client,
int ret;
int i;
int chip_id;
+ int gpio_flags;
pdata = dev_get_platdata(&client->dev);
@@ -390,8 +387,6 @@ static int tps62360_probe(struct i2c_client *client,
tps->en_discharge = pdata->en_discharge;
tps->en_internal_pulldn = pdata->en_internal_pulldn;
- tps->vsel0_gpio = pdata->vsel0_gpio;
- tps->vsel1_gpio = pdata->vsel1_gpio;
tps->dev = &client->dev;
switch (chip_id) {
@@ -426,29 +421,27 @@ static int tps62360_probe(struct i2c_client *client,
tps->lru_index[0] = tps->curr_vset_id;
tps->valid_gpios = false;
- if (gpio_is_valid(tps->vsel0_gpio) && gpio_is_valid(tps->vsel1_gpio)) {
- int gpio_flags;
- gpio_flags = (pdata->vsel0_def_state) ?
- GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW;
- ret = devm_gpio_request_one(&client->dev, tps->vsel0_gpio,
- gpio_flags, "tps62360-vsel0");
- if (ret) {
- dev_err(&client->dev,
- "%s(): Could not obtain vsel0 GPIO %d: %d\n",
- __func__, tps->vsel0_gpio, ret);
- return ret;
- }
+ gpio_flags = (pdata->vsel0_def_state) ?
+ GPIOD_OUT_HIGH : GPIOD_OUT_LOW;
+ tps->vsel0_gpio = devm_gpiod_get_optional(&client->dev, "vsel0", gpio_flags);
+ if (IS_ERR(tps->vsel0_gpio)) {
+ dev_err(&client->dev,
+ "%s(): Could not obtain vsel0 GPIO: %ld\n",
+ __func__, PTR_ERR(tps->vsel0_gpio));
+ return PTR_ERR(tps->vsel0_gpio);
+ }
- gpio_flags = (pdata->vsel1_def_state) ?
- GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW;
- ret = devm_gpio_request_one(&client->dev, tps->vsel1_gpio,
- gpio_flags, "tps62360-vsel1");
- if (ret) {
- dev_err(&client->dev,
- "%s(): Could not obtain vsel1 GPIO %d: %d\n",
- __func__, tps->vsel1_gpio, ret);
- return ret;
- }
+ gpio_flags = (pdata->vsel1_def_state) ?
+ GPIOD_OUT_HIGH : GPIOD_OUT_LOW;
+ tps->vsel1_gpio = devm_gpiod_get_optional(&client->dev, "vsel1", gpio_flags);
+ if (IS_ERR(tps->vsel1_gpio)) {
+ dev_err(&client->dev,
+ "%s(): Could not obtain vsel1 GPIO: %ld\n",
+ __func__, PTR_ERR(tps->vsel1_gpio));
+ return PTR_ERR(tps->vsel1_gpio);
+ }
+
+ if (tps->vsel0_gpio != NULL && tps->vsel1_gpio != NULL) {
tps->valid_gpios = true;
/*
diff --git a/drivers/regulator/tps80031-regulator.c b/drivers/regulator/tps80031-regulator.c
deleted file mode 100644
index a29e65230132..000000000000
--- a/drivers/regulator/tps80031-regulator.c
+++ /dev/null
@@ -1,753 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-//
-// tps80031-regulator.c -- TI TPS80031 regulator driver.
-//
-// Regulator driver for TI TPS80031/TPS80032 Fully Integrated Power
-// Management with Power Path and Battery Charger.
-//
-// Copyright (c) 2012, NVIDIA Corporation.
-//
-// Author: Laxman Dewangan <ldewangan@nvidia.com>
-
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/mfd/tps80031.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/regulator/driver.h>
-#include <linux/regulator/machine.h>
-#include <linux/slab.h>
-
-/* Flags for DCDC Voltage reading */
-#define DCDC_OFFSET_EN BIT(0)
-#define DCDC_EXTENDED_EN BIT(1)
-#define TRACK_MODE_ENABLE BIT(2)
-
-#define SMPS_MULTOFFSET_VIO BIT(1)
-#define SMPS_MULTOFFSET_SMPS1 BIT(3)
-#define SMPS_MULTOFFSET_SMPS2 BIT(4)
-#define SMPS_MULTOFFSET_SMPS3 BIT(6)
-#define SMPS_MULTOFFSET_SMPS4 BIT(0)
-
-#define SMPS_CMD_MASK 0xC0
-#define SMPS_VSEL_MASK 0x3F
-#define LDO_VSEL_MASK 0x1F
-#define LDO_TRACK_VSEL_MASK 0x3F
-
-#define MISC2_LDOUSB_IN_VSYS BIT(4)
-#define MISC2_LDOUSB_IN_PMID BIT(3)
-#define MISC2_LDOUSB_IN_MASK 0x18
-
-#define MISC2_LDO3_SEL_VIB_VAL BIT(0)
-#define MISC2_LDO3_SEL_VIB_MASK 0x1
-
-#define BOOST_HW_PWR_EN BIT(5)
-#define BOOST_HW_PWR_EN_MASK BIT(5)
-
-#define OPA_MODE_EN BIT(6)
-#define OPA_MODE_EN_MASK BIT(6)
-
-#define USB_VBUS_CTRL_SET 0x04
-#define USB_VBUS_CTRL_CLR 0x05
-#define VBUS_DISCHRG 0x20
-
-struct tps80031_regulator_info {
- /* Regulator register address.*/
- u8 trans_reg;
- u8 state_reg;
- u8 force_reg;
- u8 volt_reg;
- u8 volt_id;
-
- /*Power request bits */
- int preq_bit;
-
- /* used by regulator core */
- struct regulator_desc desc;
-
-};
-
-struct tps80031_regulator {
- struct device *dev;
- struct tps80031_regulator_info *rinfo;
-
- u8 device_flags;
- unsigned int config_flags;
- unsigned int ext_ctrl_flag;
-};
-
-static inline struct device *to_tps80031_dev(struct regulator_dev *rdev)
-{
- return rdev_get_dev(rdev)->parent->parent;
-}
-
-static int tps80031_reg_is_enabled(struct regulator_dev *rdev)
-{
- struct tps80031_regulator *ri = rdev_get_drvdata(rdev);
- struct device *parent = to_tps80031_dev(rdev);
- u8 reg_val;
- int ret;
-
- if (ri->ext_ctrl_flag & TPS80031_EXT_PWR_REQ)
- return true;
-
- ret = tps80031_read(parent, TPS80031_SLAVE_ID1, ri->rinfo->state_reg,
- &reg_val);
- if (ret < 0) {
- dev_err(&rdev->dev, "Reg 0x%02x read failed, err = %d\n",
- ri->rinfo->state_reg, ret);
- return ret;
- }
- return (reg_val & TPS80031_STATE_MASK) == TPS80031_STATE_ON;
-}
-
-static int tps80031_reg_enable(struct regulator_dev *rdev)
-{
- struct tps80031_regulator *ri = rdev_get_drvdata(rdev);
- struct device *parent = to_tps80031_dev(rdev);
- int ret;
-
- if (ri->ext_ctrl_flag & TPS80031_EXT_PWR_REQ)
- return 0;
-
- ret = tps80031_update(parent, TPS80031_SLAVE_ID1, ri->rinfo->state_reg,
- TPS80031_STATE_ON, TPS80031_STATE_MASK);
- if (ret < 0) {
- dev_err(&rdev->dev, "Reg 0x%02x update failed, err = %d\n",
- ri->rinfo->state_reg, ret);
- return ret;
- }
- return ret;
-}
-
-static int tps80031_reg_disable(struct regulator_dev *rdev)
-{
- struct tps80031_regulator *ri = rdev_get_drvdata(rdev);
- struct device *parent = to_tps80031_dev(rdev);
- int ret;
-
- if (ri->ext_ctrl_flag & TPS80031_EXT_PWR_REQ)
- return 0;
-
- ret = tps80031_update(parent, TPS80031_SLAVE_ID1, ri->rinfo->state_reg,
- TPS80031_STATE_OFF, TPS80031_STATE_MASK);
- if (ret < 0)
- dev_err(&rdev->dev, "Reg 0x%02x update failed, err = %d\n",
- ri->rinfo->state_reg, ret);
- return ret;
-}
-
-/* DCDC voltages for the selector of 58 to 63 */
-static const int tps80031_dcdc_voltages[4][5] = {
- { 1350, 1500, 1800, 1900, 2100},
- { 1350, 1500, 1800, 1900, 2100},
- { 2084, 2315, 2778, 2932, 3241},
- { 4167, 2315, 2778, 2932, 3241},
-};
-
-static int tps80031_dcdc_list_voltage(struct regulator_dev *rdev, unsigned sel)
-{
- struct tps80031_regulator *ri = rdev_get_drvdata(rdev);
- int volt_index = ri->device_flags & 0x3;
-
- if (sel == 0)
- return 0;
- else if (sel < 58)
- return regulator_list_voltage_linear(rdev, sel - 1);
- else
- return tps80031_dcdc_voltages[volt_index][sel - 58] * 1000;
-}
-
-static int tps80031_dcdc_set_voltage_sel(struct regulator_dev *rdev,
- unsigned vsel)
-{
- struct tps80031_regulator *ri = rdev_get_drvdata(rdev);
- struct device *parent = to_tps80031_dev(rdev);
- int ret;
- u8 reg_val;
-
- if (ri->rinfo->force_reg) {
- ret = tps80031_read(parent, ri->rinfo->volt_id,
- ri->rinfo->force_reg, &reg_val);
- if (ret < 0) {
- dev_err(ri->dev, "reg 0x%02x read failed, e = %d\n",
- ri->rinfo->force_reg, ret);
- return ret;
- }
- if (!(reg_val & SMPS_CMD_MASK)) {
- ret = tps80031_update(parent, ri->rinfo->volt_id,
- ri->rinfo->force_reg, vsel, SMPS_VSEL_MASK);
- if (ret < 0)
- dev_err(ri->dev,
- "reg 0x%02x update failed, e = %d\n",
- ri->rinfo->force_reg, ret);
- return ret;
- }
- }
- ret = tps80031_update(parent, ri->rinfo->volt_id,
- ri->rinfo->volt_reg, vsel, SMPS_VSEL_MASK);
- if (ret < 0)
- dev_err(ri->dev, "reg 0x%02x update failed, e = %d\n",
- ri->rinfo->volt_reg, ret);
- return ret;
-}
-
-static int tps80031_dcdc_get_voltage_sel(struct regulator_dev *rdev)
-{
- struct tps80031_regulator *ri = rdev_get_drvdata(rdev);
- struct device *parent = to_tps80031_dev(rdev);
- uint8_t vsel = 0;
- int ret;
-
- if (ri->rinfo->force_reg) {
- ret = tps80031_read(parent, ri->rinfo->volt_id,
- ri->rinfo->force_reg, &vsel);
- if (ret < 0) {
- dev_err(ri->dev, "reg 0x%02x read failed, e = %d\n",
- ri->rinfo->force_reg, ret);
- return ret;
- }
-
- if (!(vsel & SMPS_CMD_MASK))
- return vsel & SMPS_VSEL_MASK;
- }
- ret = tps80031_read(parent, ri->rinfo->volt_id,
- ri->rinfo->volt_reg, &vsel);
- if (ret < 0) {
- dev_err(ri->dev, "reg 0x%02x read failed, e = %d\n",
- ri->rinfo->volt_reg, ret);
- return ret;
- }
- return vsel & SMPS_VSEL_MASK;
-}
-
-static int tps80031_ldo_list_voltage(struct regulator_dev *rdev,
- unsigned int sel)
-{
- struct tps80031_regulator *ri = rdev_get_drvdata(rdev);
- struct device *parent = to_tps80031_dev(rdev);
-
- /* Check for valid setting for TPS80031 or TPS80032-ES1.0 */
- if ((ri->rinfo->desc.id == TPS80031_REGULATOR_LDO2) &&
- (ri->device_flags & TRACK_MODE_ENABLE)) {
- unsigned nvsel = (sel) & 0x1F;
- if (((tps80031_get_chip_info(parent) == TPS80031) ||
- ((tps80031_get_chip_info(parent) == TPS80032) &&
- (tps80031_get_pmu_version(parent) == 0x0))) &&
- ((nvsel == 0x0) || (nvsel >= 0x19 && nvsel <= 0x1F))) {
- dev_err(ri->dev,
- "Invalid sel %d in track mode LDO2\n",
- nvsel);
- return -EINVAL;
- }
- }
-
- return regulator_list_voltage_linear(rdev, sel);
-}
-
-static int tps80031_ldo_map_voltage(struct regulator_dev *rdev,
- int min_uV, int max_uV)
-{
- struct tps80031_regulator *ri = rdev_get_drvdata(rdev);
- struct device *parent = to_tps80031_dev(rdev);
-
- /* Check for valid setting for TPS80031 or TPS80032-ES1.0 */
- if ((ri->rinfo->desc.id == TPS80031_REGULATOR_LDO2) &&
- (ri->device_flags & TRACK_MODE_ENABLE)) {
- if (((tps80031_get_chip_info(parent) == TPS80031) ||
- ((tps80031_get_chip_info(parent) == TPS80032) &&
- (tps80031_get_pmu_version(parent) == 0x0)))) {
- return regulator_map_voltage_iterate(rdev, min_uV,
- max_uV);
- }
- }
-
- return regulator_map_voltage_linear(rdev, min_uV, max_uV);
-}
-
-static int tps80031_vbus_is_enabled(struct regulator_dev *rdev)
-{
- struct tps80031_regulator *ri = rdev_get_drvdata(rdev);
- struct device *parent = to_tps80031_dev(rdev);
- int ret;
- uint8_t ctrl1 = 0;
- uint8_t ctrl3 = 0;
-
- ret = tps80031_read(parent, TPS80031_SLAVE_ID2,
- TPS80031_CHARGERUSB_CTRL1, &ctrl1);
- if (ret < 0) {
- dev_err(ri->dev, "reg 0x%02x read failed, e = %d\n",
- TPS80031_CHARGERUSB_CTRL1, ret);
- return ret;
- }
- ret = tps80031_read(parent, TPS80031_SLAVE_ID2,
- TPS80031_CHARGERUSB_CTRL3, &ctrl3);
- if (ret < 0) {
- dev_err(ri->dev, "reg 0x%02x read failed, e = %d\n",
- TPS80031_CHARGERUSB_CTRL3, ret);
- return ret;
- }
- if ((ctrl1 & OPA_MODE_EN) && (ctrl3 & BOOST_HW_PWR_EN))
- return 1;
- return ret;
-}
-
-static int tps80031_vbus_enable(struct regulator_dev *rdev)
-{
- struct tps80031_regulator *ri = rdev_get_drvdata(rdev);
- struct device *parent = to_tps80031_dev(rdev);
- int ret;
-
- ret = tps80031_set_bits(parent, TPS80031_SLAVE_ID2,
- TPS80031_CHARGERUSB_CTRL1, OPA_MODE_EN);
- if (ret < 0) {
- dev_err(ri->dev, "reg 0x%02x read failed, e = %d\n",
- TPS80031_CHARGERUSB_CTRL1, ret);
- return ret;
- }
-
- ret = tps80031_set_bits(parent, TPS80031_SLAVE_ID2,
- TPS80031_CHARGERUSB_CTRL3, BOOST_HW_PWR_EN);
- if (ret < 0) {
- dev_err(ri->dev, "reg 0x%02x read failed, e = %d\n",
- TPS80031_CHARGERUSB_CTRL3, ret);
- return ret;
- }
- return ret;
-}
-
-static int tps80031_vbus_disable(struct regulator_dev *rdev)
-{
- struct tps80031_regulator *ri = rdev_get_drvdata(rdev);
- struct device *parent = to_tps80031_dev(rdev);
- int ret;
-
- if (ri->config_flags & TPS80031_VBUS_DISCHRG_EN_PDN) {
- ret = tps80031_write(parent, TPS80031_SLAVE_ID2,
- USB_VBUS_CTRL_SET, VBUS_DISCHRG);
- if (ret < 0) {
- dev_err(ri->dev, "reg 0x%02x write failed, e = %d\n",
- USB_VBUS_CTRL_SET, ret);
- return ret;
- }
- }
-
- ret = tps80031_clr_bits(parent, TPS80031_SLAVE_ID2,
- TPS80031_CHARGERUSB_CTRL1, OPA_MODE_EN);
- if (ret < 0) {
- dev_err(ri->dev, "reg 0x%02x clearbit failed, e = %d\n",
- TPS80031_CHARGERUSB_CTRL1, ret);
- return ret;
- }
-
- ret = tps80031_clr_bits(parent, TPS80031_SLAVE_ID2,
- TPS80031_CHARGERUSB_CTRL3, BOOST_HW_PWR_EN);
- if (ret < 0) {
- dev_err(ri->dev, "reg 0x%02x clearbit failed, e = %d\n",
- TPS80031_CHARGERUSB_CTRL3, ret);
- return ret;
- }
-
- mdelay(DIV_ROUND_UP(ri->rinfo->desc.enable_time, 1000));
- if (ri->config_flags & TPS80031_VBUS_DISCHRG_EN_PDN) {
- ret = tps80031_write(parent, TPS80031_SLAVE_ID2,
- USB_VBUS_CTRL_CLR, VBUS_DISCHRG);
- if (ret < 0) {
- dev_err(ri->dev, "reg 0x%02x write failed, e = %d\n",
- USB_VBUS_CTRL_CLR, ret);
- return ret;
- }
- }
- return ret;
-}
-
-static const struct regulator_ops tps80031_dcdc_ops = {
- .list_voltage = tps80031_dcdc_list_voltage,
- .set_voltage_sel = tps80031_dcdc_set_voltage_sel,
- .get_voltage_sel = tps80031_dcdc_get_voltage_sel,
- .enable = tps80031_reg_enable,
- .disable = tps80031_reg_disable,
- .is_enabled = tps80031_reg_is_enabled,
-};
-
-static const struct regulator_ops tps80031_ldo_ops = {
- .list_voltage = tps80031_ldo_list_voltage,
- .map_voltage = tps80031_ldo_map_voltage,
- .set_voltage_sel = regulator_set_voltage_sel_regmap,
- .get_voltage_sel = regulator_get_voltage_sel_regmap,
- .enable = tps80031_reg_enable,
- .disable = tps80031_reg_disable,
- .is_enabled = tps80031_reg_is_enabled,
-};
-
-static const struct regulator_ops tps80031_vbus_sw_ops = {
- .list_voltage = regulator_list_voltage_linear,
- .enable = tps80031_vbus_enable,
- .disable = tps80031_vbus_disable,
- .is_enabled = tps80031_vbus_is_enabled,
-};
-
-static const struct regulator_ops tps80031_vbus_hw_ops = {
- .list_voltage = regulator_list_voltage_linear,
-};
-
-static const struct regulator_ops tps80031_ext_reg_ops = {
- .list_voltage = regulator_list_voltage_linear,
- .enable = tps80031_reg_enable,
- .disable = tps80031_reg_disable,
- .is_enabled = tps80031_reg_is_enabled,
-};
-
-/* Non-exiting default definition for some register */
-#define TPS80031_SMPS3_CFG_FORCE 0
-#define TPS80031_SMPS4_CFG_FORCE 0
-
-#define TPS80031_VBUS_CFG_TRANS 0
-#define TPS80031_VBUS_CFG_STATE 0
-
-#define TPS80031_REG_SMPS(_id, _volt_id, _pbit) \
-{ \
- .trans_reg = TPS80031_##_id##_CFG_TRANS, \
- .state_reg = TPS80031_##_id##_CFG_STATE, \
- .force_reg = TPS80031_##_id##_CFG_FORCE, \
- .volt_reg = TPS80031_##_id##_CFG_VOLTAGE, \
- .volt_id = TPS80031_SLAVE_##_volt_id, \
- .preq_bit = _pbit, \
- .desc = { \
- .name = "tps80031_"#_id, \
- .id = TPS80031_REGULATOR_##_id, \
- .n_voltages = 63, \
- .ops = &tps80031_dcdc_ops, \
- .type = REGULATOR_VOLTAGE, \
- .owner = THIS_MODULE, \
- .enable_time = 500, \
- }, \
-}
-
-#define TPS80031_REG_LDO(_id, _preq_bit) \
-{ \
- .trans_reg = TPS80031_##_id##_CFG_TRANS, \
- .state_reg = TPS80031_##_id##_CFG_STATE, \
- .volt_reg = TPS80031_##_id##_CFG_VOLTAGE, \
- .volt_id = TPS80031_SLAVE_ID1, \
- .preq_bit = _preq_bit, \
- .desc = { \
- .owner = THIS_MODULE, \
- .name = "tps80031_"#_id, \
- .id = TPS80031_REGULATOR_##_id, \
- .ops = &tps80031_ldo_ops, \
- .type = REGULATOR_VOLTAGE, \
- .min_uV = 1000000, \
- .uV_step = 100000, \
- .linear_min_sel = 1, \
- .n_voltages = 25, \
- .vsel_reg = TPS80031_##_id##_CFG_VOLTAGE, \
- .vsel_mask = LDO_VSEL_MASK, \
- .enable_time = 500, \
- }, \
-}
-
-#define TPS80031_REG_FIXED(_id, max_mV, _ops, _delay, _pbit) \
-{ \
- .trans_reg = TPS80031_##_id##_CFG_TRANS, \
- .state_reg = TPS80031_##_id##_CFG_STATE, \
- .volt_id = TPS80031_SLAVE_ID1, \
- .preq_bit = _pbit, \
- .desc = { \
- .name = "tps80031_"#_id, \
- .id = TPS80031_REGULATOR_##_id, \
- .min_uV = max_mV * 1000, \
- .n_voltages = 1, \
- .ops = &_ops, \
- .type = REGULATOR_VOLTAGE, \
- .owner = THIS_MODULE, \
- .enable_time = _delay, \
- }, \
-}
-
-static struct tps80031_regulator_info tps80031_rinfo[TPS80031_REGULATOR_MAX] = {
- TPS80031_REG_SMPS(VIO, ID0, 4),
- TPS80031_REG_SMPS(SMPS1, ID0, 0),
- TPS80031_REG_SMPS(SMPS2, ID0, 1),
- TPS80031_REG_SMPS(SMPS3, ID1, 2),
- TPS80031_REG_SMPS(SMPS4, ID1, 3),
- TPS80031_REG_LDO(VANA, -1),
- TPS80031_REG_LDO(LDO1, 8),
- TPS80031_REG_LDO(LDO2, 9),
- TPS80031_REG_LDO(LDO3, 10),
- TPS80031_REG_LDO(LDO4, 11),
- TPS80031_REG_LDO(LDO5, 12),
- TPS80031_REG_LDO(LDO6, 13),
- TPS80031_REG_LDO(LDO7, 14),
- TPS80031_REG_LDO(LDOLN, 15),
- TPS80031_REG_LDO(LDOUSB, 5),
- TPS80031_REG_FIXED(VBUS, 5000, tps80031_vbus_hw_ops, 100000, -1),
- TPS80031_REG_FIXED(REGEN1, 3300, tps80031_ext_reg_ops, 0, 16),
- TPS80031_REG_FIXED(REGEN2, 3300, tps80031_ext_reg_ops, 0, 17),
- TPS80031_REG_FIXED(SYSEN, 3300, tps80031_ext_reg_ops, 0, 18),
-};
-
-static int tps80031_power_req_config(struct device *parent,
- struct tps80031_regulator *ri,
- struct tps80031_regulator_platform_data *tps80031_pdata)
-{
- int ret = 0;
-
- if (ri->rinfo->preq_bit < 0)
- goto skip_pwr_req_config;
-
- ret = tps80031_ext_power_req_config(parent, ri->ext_ctrl_flag,
- ri->rinfo->preq_bit, ri->rinfo->state_reg,
- ri->rinfo->trans_reg);
- if (ret < 0) {
- dev_err(ri->dev, "ext powerreq config failed, err = %d\n", ret);
- return ret;
- }
-
-skip_pwr_req_config:
- if (tps80031_pdata->ext_ctrl_flag & TPS80031_PWR_ON_ON_SLEEP) {
- ret = tps80031_update(parent, TPS80031_SLAVE_ID1,
- ri->rinfo->trans_reg, TPS80031_TRANS_SLEEP_ON,
- TPS80031_TRANS_SLEEP_MASK);
- if (ret < 0) {
- dev_err(ri->dev, "Reg 0x%02x update failed, e %d\n",
- ri->rinfo->trans_reg, ret);
- return ret;
- }
- }
- return ret;
-}
-
-static int tps80031_regulator_config(struct device *parent,
- struct tps80031_regulator *ri,
- struct tps80031_regulator_platform_data *tps80031_pdata)
-{
- int ret = 0;
-
- switch (ri->rinfo->desc.id) {
- case TPS80031_REGULATOR_LDOUSB:
- if (ri->config_flags & (TPS80031_USBLDO_INPUT_VSYS |
- TPS80031_USBLDO_INPUT_PMID)) {
- unsigned val;
-
- if (ri->config_flags & TPS80031_USBLDO_INPUT_VSYS)
- val = MISC2_LDOUSB_IN_VSYS;
- else
- val = MISC2_LDOUSB_IN_PMID;
-
- ret = tps80031_update(parent, TPS80031_SLAVE_ID1,
- TPS80031_MISC2, val,
- MISC2_LDOUSB_IN_MASK);
- if (ret < 0) {
- dev_err(ri->dev,
- "LDOUSB config failed, e= %d\n", ret);
- return ret;
- }
- }
- break;
-
- case TPS80031_REGULATOR_LDO3:
- if (ri->config_flags & TPS80031_LDO3_OUTPUT_VIB) {
- ret = tps80031_update(parent, TPS80031_SLAVE_ID1,
- TPS80031_MISC2, MISC2_LDO3_SEL_VIB_VAL,
- MISC2_LDO3_SEL_VIB_MASK);
- if (ret < 0) {
- dev_err(ri->dev,
- "LDO3 config failed, e = %d\n", ret);
- return ret;
- }
- }
- break;
-
- case TPS80031_REGULATOR_VBUS:
- /* Provide SW control Ops if VBUS is SW control */
- if (!(ri->config_flags & TPS80031_VBUS_SW_ONLY))
- ri->rinfo->desc.ops = &tps80031_vbus_sw_ops;
- break;
- default:
- break;
- }
-
- /* Configure Active state to ON, SLEEP to OFF and OFF_state to OFF */
- ret = tps80031_update(parent, TPS80031_SLAVE_ID1, ri->rinfo->trans_reg,
- TPS80031_TRANS_ACTIVE_ON | TPS80031_TRANS_SLEEP_OFF |
- TPS80031_TRANS_OFF_OFF, TPS80031_TRANS_ACTIVE_MASK |
- TPS80031_TRANS_SLEEP_MASK | TPS80031_TRANS_OFF_MASK);
- if (ret < 0) {
- dev_err(ri->dev, "trans reg update failed, e %d\n", ret);
- return ret;
- }
-
- return ret;
-}
-
-static int check_smps_mode_mult(struct device *parent,
- struct tps80031_regulator *ri)
-{
- int mult_offset;
- int ret;
- u8 smps_offset;
- u8 smps_mult;
-
- ret = tps80031_read(parent, TPS80031_SLAVE_ID1,
- TPS80031_SMPS_OFFSET, &smps_offset);
- if (ret < 0) {
- dev_err(parent, "Error in reading smps offset register\n");
- return ret;
- }
-
- ret = tps80031_read(parent, TPS80031_SLAVE_ID1,
- TPS80031_SMPS_MULT, &smps_mult);
- if (ret < 0) {
- dev_err(parent, "Error in reading smps mult register\n");
- return ret;
- }
-
- switch (ri->rinfo->desc.id) {
- case TPS80031_REGULATOR_VIO:
- mult_offset = SMPS_MULTOFFSET_VIO;
- break;
- case TPS80031_REGULATOR_SMPS1:
- mult_offset = SMPS_MULTOFFSET_SMPS1;
- break;
- case TPS80031_REGULATOR_SMPS2:
- mult_offset = SMPS_MULTOFFSET_SMPS2;
- break;
- case TPS80031_REGULATOR_SMPS3:
- mult_offset = SMPS_MULTOFFSET_SMPS3;
- break;
- case TPS80031_REGULATOR_SMPS4:
- mult_offset = SMPS_MULTOFFSET_SMPS4;
- break;
- case TPS80031_REGULATOR_LDO2:
- ri->device_flags = smps_mult & BIT(5) ? TRACK_MODE_ENABLE : 0;
- /* TRACK mode the ldo2 varies from 600mV to 1300mV */
- if (ri->device_flags & TRACK_MODE_ENABLE) {
- ri->rinfo->desc.min_uV = 600000;
- ri->rinfo->desc.uV_step = 12500;
- ri->rinfo->desc.n_voltages = 57;
- ri->rinfo->desc.vsel_mask = LDO_TRACK_VSEL_MASK;
- }
- return 0;
- default:
- return 0;
- }
-
- ri->device_flags = (smps_offset & mult_offset) ? DCDC_OFFSET_EN : 0;
- ri->device_flags |= (smps_mult & mult_offset) ? DCDC_EXTENDED_EN : 0;
- switch (ri->device_flags) {
- case 0:
- ri->rinfo->desc.min_uV = 607700;
- ri->rinfo->desc.uV_step = 12660;
- break;
- case DCDC_OFFSET_EN:
- ri->rinfo->desc.min_uV = 700000;
- ri->rinfo->desc.uV_step = 12500;
- break;
- case DCDC_EXTENDED_EN:
- ri->rinfo->desc.min_uV = 1852000;
- ri->rinfo->desc.uV_step = 38600;
- break;
- case DCDC_OFFSET_EN | DCDC_EXTENDED_EN:
- ri->rinfo->desc.min_uV = 2161000;
- ri->rinfo->desc.uV_step = 38600;
- break;
- }
- return 0;
-}
-
-static int tps80031_regulator_probe(struct platform_device *pdev)
-{
- struct tps80031_platform_data *pdata;
- struct tps80031_regulator_platform_data *tps_pdata;
- struct tps80031_regulator *ri;
- struct tps80031_regulator *pmic;
- struct regulator_dev *rdev;
- struct regulator_config config = { };
- struct tps80031 *tps80031_mfd = dev_get_drvdata(pdev->dev.parent);
- int ret;
- int num;
-
- pdata = dev_get_platdata(pdev->dev.parent);
-
- if (!pdata) {
- dev_err(&pdev->dev, "No platform data\n");
- return -EINVAL;
- }
-
- pmic = devm_kcalloc(&pdev->dev,
- TPS80031_REGULATOR_MAX, sizeof(*pmic), GFP_KERNEL);
- if (!pmic)
- return -ENOMEM;
-
- for (num = 0; num < TPS80031_REGULATOR_MAX; ++num) {
- tps_pdata = pdata->regulator_pdata[num];
- ri = &pmic[num];
- ri->rinfo = &tps80031_rinfo[num];
- ri->dev = &pdev->dev;
-
- check_smps_mode_mult(pdev->dev.parent, ri);
- config.dev = &pdev->dev;
- config.init_data = NULL;
- config.driver_data = ri;
- config.regmap = tps80031_mfd->regmap[ri->rinfo->volt_id];
-
- if (tps_pdata) {
- config.init_data = tps_pdata->reg_init_data;
- ri->config_flags = tps_pdata->config_flags;
- ri->ext_ctrl_flag = tps_pdata->ext_ctrl_flag;
- ret = tps80031_regulator_config(pdev->dev.parent,
- ri, tps_pdata);
- if (ret < 0) {
- dev_err(&pdev->dev,
- "regulator config failed, e %d\n", ret);
- return ret;
- }
-
- ret = tps80031_power_req_config(pdev->dev.parent,
- ri, tps_pdata);
- if (ret < 0) {
- dev_err(&pdev->dev,
- "pwr_req config failed, err %d\n", ret);
- return ret;
- }
- }
- rdev = devm_regulator_register(&pdev->dev, &ri->rinfo->desc,
- &config);
- if (IS_ERR(rdev)) {
- dev_err(&pdev->dev,
- "register regulator failed %s\n",
- ri->rinfo->desc.name);
- return PTR_ERR(rdev);
- }
- }
-
- platform_set_drvdata(pdev, pmic);
- return 0;
-}
-
-static struct platform_driver tps80031_regulator_driver = {
- .driver = {
- .name = "tps80031-pmic",
- },
- .probe = tps80031_regulator_probe,
-};
-
-static int __init tps80031_regulator_init(void)
-{
- return platform_driver_register(&tps80031_regulator_driver);
-}
-subsys_initcall(tps80031_regulator_init);
-
-static void __exit tps80031_regulator_exit(void)
-{
- platform_driver_unregister(&tps80031_regulator_driver);
-}
-module_exit(tps80031_regulator_exit);
-
-MODULE_ALIAS("platform:tps80031-regulator");
-MODULE_DESCRIPTION("Regulator Driver for TI TPS80031/TPS80032 PMIC");
-MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/regulator/uniphier-regulator.c b/drivers/regulator/uniphier-regulator.c
index e75b0973e325..39a68b01fc38 100644
--- a/drivers/regulator/uniphier-regulator.c
+++ b/drivers/regulator/uniphier-regulator.c
@@ -199,6 +199,10 @@ static const struct of_device_id uniphier_regulator_match[] = {
.compatible = "socionext,uniphier-pxs3-usb3-regulator",
.data = &uniphier_pxs2_usb3_data,
},
+ {
+ .compatible = "socionext,uniphier-nx1-usb3-regulator",
+ .data = &uniphier_pxs2_usb3_data,
+ },
{ /* Sentinel */ },
};
MODULE_DEVICE_TABLE(of, uniphier_regulator_match);
diff --git a/drivers/regulator/vqmmc-ipq4019-regulator.c b/drivers/regulator/vqmmc-ipq4019-regulator.c
index 6d5ae25d08d1..c4213f096fe5 100644
--- a/drivers/regulator/vqmmc-ipq4019-regulator.c
+++ b/drivers/regulator/vqmmc-ipq4019-regulator.c
@@ -48,7 +48,6 @@ static int ipq4019_regulator_probe(struct platform_device *pdev)
struct regulator_init_data *init_data;
struct regulator_config cfg = {};
struct regulator_dev *rdev;
- struct resource *res;
struct regmap *rmap;
void __iomem *base;
@@ -57,8 +56,7 @@ static int ipq4019_regulator_probe(struct platform_device *pdev)
if (!init_data)
return -EINVAL;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index be799a5abf8a..85024eb1d2ea 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -58,7 +58,7 @@ config RESET_BRCMSTB
a SUN_TOP_CTRL_SW_INIT style controller.
config RESET_BRCMSTB_RESCAL
- bool "Broadcom STB RESCAL reset controller"
+ tristate "Broadcom STB RESCAL reset controller"
depends on HAS_IOMEM
depends on ARCH_BRCMSTB || COMPILE_TEST
default ARCH_BRCMSTB
@@ -116,7 +116,7 @@ config RESET_LPC18XX
config RESET_MCHP_SPARX5
bool "Microchip Sparx5 reset driver"
- depends on ARCH_SPARX5 || COMPILE_TEST
+ depends on ARCH_SPARX5 || SOC_LAN966 || COMPILE_TEST
default y if SPARX5_SWITCH
select MFD_SYSCON
help
@@ -147,8 +147,8 @@ config RESET_OXNAS
bool
config RESET_PISTACHIO
- bool "Pistachio Reset Driver" if COMPILE_TEST
- default MACH_PISTACHIO
+ bool "Pistachio Reset Driver"
+ depends on MIPS || COMPILE_TEST
help
This enables the reset driver for ImgTec Pistachio SoCs.
diff --git a/drivers/reset/reset-brcmstb-rescal.c b/drivers/reset/reset-brcmstb-rescal.c
index b6f074d6a65f..433fa0c40e47 100644
--- a/drivers/reset/reset-brcmstb-rescal.c
+++ b/drivers/reset/reset-brcmstb-rescal.c
@@ -38,7 +38,7 @@ static int brcm_rescal_reset_set(struct reset_controller_dev *rcdev,
}
ret = readl_poll_timeout(base + BRCM_RESCAL_STATUS, reg,
- !(reg & BRCM_RESCAL_STATUS_BIT), 100, 1000);
+ (reg & BRCM_RESCAL_STATUS_BIT), 100, 1000);
if (ret) {
dev_err(data->dev, "time out on SATA/PCIe rescal\n");
return ret;
diff --git a/drivers/reset/reset-microchip-sparx5.c b/drivers/reset/reset-microchip-sparx5.c
index f01e7db8e83b..00b612a0effa 100644
--- a/drivers/reset/reset-microchip-sparx5.c
+++ b/drivers/reset/reset-microchip-sparx5.c
@@ -13,15 +13,18 @@
#include <linux/regmap.h>
#include <linux/reset-controller.h>
-#define PROTECT_REG 0x84
-#define PROTECT_BIT BIT(10)
-#define SOFT_RESET_REG 0x00
-#define SOFT_RESET_BIT BIT(1)
+struct reset_props {
+ u32 protect_reg;
+ u32 protect_bit;
+ u32 reset_reg;
+ u32 reset_bit;
+};
struct mchp_reset_context {
struct regmap *cpu_ctrl;
struct regmap *gcb_ctrl;
struct reset_controller_dev rcdev;
+ const struct reset_props *props;
};
static struct regmap_config sparx5_reset_regmap_config = {
@@ -38,14 +41,16 @@ static int sparx5_switch_reset(struct reset_controller_dev *rcdev,
u32 val;
/* Make sure the core is PROTECTED from reset */
- regmap_update_bits(ctx->cpu_ctrl, PROTECT_REG, PROTECT_BIT, PROTECT_BIT);
+ regmap_update_bits(ctx->cpu_ctrl, ctx->props->protect_reg,
+ ctx->props->protect_bit, ctx->props->protect_bit);
/* Start soft reset */
- regmap_write(ctx->gcb_ctrl, SOFT_RESET_REG, SOFT_RESET_BIT);
+ regmap_write(ctx->gcb_ctrl, ctx->props->reset_reg,
+ ctx->props->reset_bit);
/* Wait for soft reset done */
- return regmap_read_poll_timeout(ctx->gcb_ctrl, SOFT_RESET_REG, val,
- (val & SOFT_RESET_BIT) == 0,
+ return regmap_read_poll_timeout(ctx->gcb_ctrl, ctx->props->reset_reg, val,
+ (val & ctx->props->reset_bit) == 0,
1, 100);
}
@@ -115,13 +120,32 @@ static int mchp_sparx5_reset_probe(struct platform_device *pdev)
ctx->rcdev.nr_resets = 1;
ctx->rcdev.ops = &sparx5_reset_ops;
ctx->rcdev.of_node = dn;
+ ctx->props = device_get_match_data(&pdev->dev);
return devm_reset_controller_register(&pdev->dev, &ctx->rcdev);
}
+static const struct reset_props reset_props_sparx5 = {
+ .protect_reg = 0x84,
+ .protect_bit = BIT(10),
+ .reset_reg = 0x0,
+ .reset_bit = BIT(1),
+};
+
+static const struct reset_props reset_props_lan966x = {
+ .protect_reg = 0x88,
+ .protect_bit = BIT(5),
+ .reset_reg = 0x0,
+ .reset_bit = BIT(1),
+};
+
static const struct of_device_id mchp_sparx5_reset_of_match[] = {
{
.compatible = "microchip,sparx5-switch-reset",
+ .data = &reset_props_sparx5,
+ }, {
+ .compatible = "microchip,lan966x-switch-reset",
+ .data = &reset_props_lan966x,
},
{ }
};
diff --git a/drivers/reset/reset-socfpga.c b/drivers/reset/reset-socfpga.c
index 2a72f861f798..8c6492e5693c 100644
--- a/drivers/reset/reset-socfpga.c
+++ b/drivers/reset/reset-socfpga.c
@@ -92,3 +92,29 @@ void __init socfpga_reset_init(void)
for_each_matching_node(np, socfpga_early_reset_dt_ids)
a10_reset_init(np);
}
+
+/*
+ * The early driver is problematic, because it doesn't register
+ * itself as a driver. This causes certain device links to prevent
+ * consumer devices from probing. The hacky solution is to register
+ * an empty driver, whose only job is to attach itself to the reset
+ * manager and call probe.
+ */
+static const struct of_device_id socfpga_reset_dt_ids[] = {
+ { .compatible = "altr,rst-mgr", },
+ { /* sentinel */ },
+};
+
+static int reset_simple_probe(struct platform_device *pdev)
+{
+ return 0;
+}
+
+static struct platform_driver reset_socfpga_driver = {
+ .probe = reset_simple_probe,
+ .driver = {
+ .name = "socfpga-reset",
+ .of_match_table = socfpga_reset_dt_ids,
+ },
+};
+builtin_platform_driver(reset_socfpga_driver);
diff --git a/drivers/reset/reset-uniphier-glue.c b/drivers/reset/reset-uniphier-glue.c
index 027990b79f61..908c1d5bc41e 100644
--- a/drivers/reset/reset-uniphier-glue.c
+++ b/drivers/reset/reset-uniphier-glue.c
@@ -156,6 +156,10 @@ static const struct of_device_id uniphier_glue_reset_match[] = {
.data = &uniphier_pxs2_data,
},
{
+ .compatible = "socionext,uniphier-nx1-usb3-reset",
+ .data = &uniphier_pxs2_data,
+ },
+ {
.compatible = "socionext,uniphier-pro4-ahci-reset",
.data = &uniphier_pro4_data,
},
diff --git a/drivers/reset/reset-uniphier.c b/drivers/reset/reset-uniphier.c
index 5f75783f9397..ff7580f38056 100644
--- a/drivers/reset/reset-uniphier.c
+++ b/drivers/reset/reset-uniphier.c
@@ -136,6 +136,21 @@ static const struct uniphier_reset_data uniphier_pxs3_sys_reset_data[] = {
UNIPHIER_RESETX(28, 0x200c, 7), /* SATA0 */
UNIPHIER_RESETX(29, 0x200c, 8), /* SATA1 */
UNIPHIER_RESETX(30, 0x200c, 21), /* SATA-PHY */
+ UNIPHIER_RESETX(40, 0x2008, 0), /* AIO */
+ UNIPHIER_RESETX(42, 0x2010, 2), /* EXIV */
+ UNIPHIER_RESET_END,
+};
+
+static const struct uniphier_reset_data uniphier_nx1_sys_reset_data[] = {
+ UNIPHIER_RESETX(4, 0x2008, 8), /* eMMC */
+ UNIPHIER_RESETX(6, 0x200c, 0), /* Ether */
+ UNIPHIER_RESETX(12, 0x200c, 16), /* USB30 link */
+ UNIPHIER_RESETX(16, 0x200c, 24), /* USB30-PHY0 */
+ UNIPHIER_RESETX(17, 0x200c, 25), /* USB30-PHY1 */
+ UNIPHIER_RESETX(18, 0x200c, 26), /* USB30-PHY2 */
+ UNIPHIER_RESETX(24, 0x200c, 8), /* PCIe */
+ UNIPHIER_RESETX(52, 0x2010, 0), /* VOC */
+ UNIPHIER_RESETX(58, 0x2010, 8), /* HDMI-Tx */
UNIPHIER_RESET_END,
};
@@ -400,6 +415,10 @@ static const struct of_device_id uniphier_reset_match[] = {
.compatible = "socionext,uniphier-pxs3-reset",
.data = uniphier_pxs3_sys_reset_data,
},
+ {
+ .compatible = "socionext,uniphier-nx1-reset",
+ .data = uniphier_nx1_sys_reset_data,
+ },
/* Media I/O reset, SD reset */
{
.compatible = "socionext,uniphier-ld4-mio-reset",
@@ -437,6 +456,10 @@ static const struct of_device_id uniphier_reset_match[] = {
.compatible = "socionext,uniphier-pxs3-sd-reset",
.data = uniphier_pro5_sd_reset_data,
},
+ {
+ .compatible = "socionext,uniphier-nx1-sd-reset",
+ .data = uniphier_pro5_sd_reset_data,
+ },
/* Peripheral reset */
{
.compatible = "socionext,uniphier-ld4-peri-reset",
@@ -470,6 +493,10 @@ static const struct of_device_id uniphier_reset_match[] = {
.compatible = "socionext,uniphier-pxs3-peri-reset",
.data = uniphier_pro4_peri_reset_data,
},
+ {
+ .compatible = "socionext,uniphier-nx1-peri-reset",
+ .data = uniphier_pro4_peri_reset_data,
+ },
/* Analog signal amplifiers reset */
{
.compatible = "socionext,uniphier-ld11-adamv-reset",
diff --git a/drivers/reset/tegra/reset-bpmp.c b/drivers/reset/tegra/reset-bpmp.c
index 24d3395964cc..4c5bba52b105 100644
--- a/drivers/reset/tegra/reset-bpmp.c
+++ b/drivers/reset/tegra/reset-bpmp.c
@@ -20,6 +20,7 @@ static int tegra_bpmp_reset_common(struct reset_controller_dev *rstc,
struct tegra_bpmp *bpmp = to_tegra_bpmp(rstc);
struct mrq_reset_request request;
struct tegra_bpmp_message msg;
+ int err;
memset(&request, 0, sizeof(request));
request.cmd = command;
@@ -30,7 +31,13 @@ static int tegra_bpmp_reset_common(struct reset_controller_dev *rstc,
msg.tx.data = &request;
msg.tx.size = sizeof(request);
- return tegra_bpmp_transfer(bpmp, &msg);
+ err = tegra_bpmp_transfer(bpmp, &msg);
+ if (err)
+ return err;
+ if (msg.rx.ret)
+ return -EINVAL;
+
+ return 0;
}
static int tegra_bpmp_reset_module(struct reset_controller_dev *rstc,
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index e1bc5214494e..7208eeb8459a 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -1404,16 +1404,10 @@ config RTC_DRV_OMAP
This driver can also be built as a module, if so, module
will be called rtc-omap.
-config HAVE_S3C_RTC
- bool
- help
- This will include RTC support for Samsung SoCs. If
- you want to include RTC support for any machine, kindly
- select this in the respective mach-XXXX/Kconfig file.
-
config RTC_DRV_S3C
tristate "Samsung S3C series SoC RTC"
- depends on ARCH_S3C64XX || HAVE_S3C_RTC || COMPILE_TEST
+ depends on ARCH_EXYNOS || ARCH_S3C64XX || ARCH_S3C24XX || ARCH_S5PV210 || \
+ COMPILE_TEST
help
RTC (Realtime Clock) driver for the clock inbuilt into the
Samsung S3C24XX series of SoCs. This can provide periodic
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index e34c6cc61983..8e87a31e329d 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -2077,12 +2077,15 @@ static void __dasd_device_check_path_events(struct dasd_device *device)
if (device->stopped & ~(DASD_STOPPED_DC_WAIT))
return;
+
+ dasd_path_clear_all_verify(device);
+ dasd_path_clear_all_fcsec(device);
+
rc = device->discipline->pe_handler(device, tbvpm, fcsecpm);
if (rc) {
+ dasd_path_add_tbvpm(device, tbvpm);
+ dasd_path_add_fcsecpm(device, fcsecpm);
dasd_device_set_timer(device, 50);
- } else {
- dasd_path_clear_all_verify(device);
- dasd_path_clear_all_fcsec(device);
}
};
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index 4691a3c35d72..299001ad9a32 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -201,7 +201,7 @@ dasd_3990_erp_DCTL(struct dasd_ccw_req * erp, char modifier)
struct ccw1 *ccw;
struct dasd_ccw_req *dctl_cqr;
- dctl_cqr = dasd_alloc_erp_request((char *) &erp->magic, 1,
+ dctl_cqr = dasd_alloc_erp_request(erp->magic, 1,
sizeof(struct DCTL_data),
device);
if (IS_ERR(dctl_cqr)) {
@@ -1652,7 +1652,7 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense)
}
/* Build new ERP request including DE/LO */
- erp = dasd_alloc_erp_request((char *) &cqr->magic,
+ erp = dasd_alloc_erp_request(cqr->magic,
2 + 1,/* DE/LO + TIC */
sizeof(struct DE_eckd_data) +
sizeof(struct LO_eckd_data), device);
@@ -2388,7 +2388,7 @@ static struct dasd_ccw_req *dasd_3990_erp_add_erp(struct dasd_ccw_req *cqr)
}
/* allocate additional request block */
- erp = dasd_alloc_erp_request((char *) &cqr->magic,
+ erp = dasd_alloc_erp_request(cqr->magic,
cplength, datasize, device);
if (IS_ERR(erp)) {
if (cqr->retries <= 0) {
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 460e0f1cca53..8410a25a65c1 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -560,8 +560,8 @@ static int prefix_LRE(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
return -EINVAL;
}
pfxdata->format = format;
- pfxdata->base_address = basepriv->ned->unit_addr;
- pfxdata->base_lss = basepriv->ned->ID;
+ pfxdata->base_address = basepriv->conf.ned->unit_addr;
+ pfxdata->base_lss = basepriv->conf.ned->ID;
pfxdata->validity.define_extent = 1;
/* private uid is kept up to date, conf_data may be outdated */
@@ -736,32 +736,30 @@ dasd_eckd_cdl_reclen(int recid)
return LABEL_SIZE;
}
/* create unique id from private structure. */
-static void create_uid(struct dasd_eckd_private *private)
+static void create_uid(struct dasd_conf *conf, struct dasd_uid *uid)
{
int count;
- struct dasd_uid *uid;
- uid = &private->uid;
memset(uid, 0, sizeof(struct dasd_uid));
- memcpy(uid->vendor, private->ned->HDA_manufacturer,
+ memcpy(uid->vendor, conf->ned->HDA_manufacturer,
sizeof(uid->vendor) - 1);
EBCASC(uid->vendor, sizeof(uid->vendor) - 1);
- memcpy(uid->serial, &private->ned->serial,
+ memcpy(uid->serial, &conf->ned->serial,
sizeof(uid->serial) - 1);
EBCASC(uid->serial, sizeof(uid->serial) - 1);
- uid->ssid = private->gneq->subsystemID;
- uid->real_unit_addr = private->ned->unit_addr;
- if (private->sneq) {
- uid->type = private->sneq->sua_flags;
+ uid->ssid = conf->gneq->subsystemID;
+ uid->real_unit_addr = conf->ned->unit_addr;
+ if (conf->sneq) {
+ uid->type = conf->sneq->sua_flags;
if (uid->type == UA_BASE_PAV_ALIAS)
- uid->base_unit_addr = private->sneq->base_unit_addr;
+ uid->base_unit_addr = conf->sneq->base_unit_addr;
} else {
uid->type = UA_BASE_DEVICE;
}
- if (private->vdsneq) {
+ if (conf->vdsneq) {
for (count = 0; count < 16; count++) {
sprintf(uid->vduit+2*count, "%02x",
- private->vdsneq->uit[count]);
+ conf->vdsneq->uit[count]);
}
}
}
@@ -776,10 +774,10 @@ static int dasd_eckd_generate_uid(struct dasd_device *device)
if (!private)
return -ENODEV;
- if (!private->ned || !private->gneq)
+ if (!private->conf.ned || !private->conf.gneq)
return -ENODEV;
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
- create_uid(private);
+ create_uid(&private->conf, &private->uid);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
return 0;
}
@@ -803,14 +801,15 @@ static int dasd_eckd_get_uid(struct dasd_device *device, struct dasd_uid *uid)
* return 0 for match
*/
static int dasd_eckd_compare_path_uid(struct dasd_device *device,
- struct dasd_eckd_private *private)
+ struct dasd_conf *path_conf)
{
struct dasd_uid device_uid;
+ struct dasd_uid path_uid;
- create_uid(private);
+ create_uid(path_conf, &path_uid);
dasd_eckd_get_uid(device, &device_uid);
- return memcmp(&device_uid, &private->uid, sizeof(struct dasd_uid));
+ return memcmp(&device_uid, &path_uid, sizeof(struct dasd_uid));
}
static void dasd_eckd_fill_rcd_cqr(struct dasd_device *device,
@@ -946,34 +945,34 @@ out_error:
return ret;
}
-static int dasd_eckd_identify_conf_parts(struct dasd_eckd_private *private)
+static int dasd_eckd_identify_conf_parts(struct dasd_conf *conf)
{
struct dasd_sneq *sneq;
int i, count;
- private->ned = NULL;
- private->sneq = NULL;
- private->vdsneq = NULL;
- private->gneq = NULL;
- count = private->conf_len / sizeof(struct dasd_sneq);
- sneq = (struct dasd_sneq *)private->conf_data;
+ conf->ned = NULL;
+ conf->sneq = NULL;
+ conf->vdsneq = NULL;
+ conf->gneq = NULL;
+ count = conf->len / sizeof(struct dasd_sneq);
+ sneq = (struct dasd_sneq *)conf->data;
for (i = 0; i < count; ++i) {
if (sneq->flags.identifier == 1 && sneq->format == 1)
- private->sneq = sneq;
+ conf->sneq = sneq;
else if (sneq->flags.identifier == 1 && sneq->format == 4)
- private->vdsneq = (struct vd_sneq *)sneq;
+ conf->vdsneq = (struct vd_sneq *)sneq;
else if (sneq->flags.identifier == 2)
- private->gneq = (struct dasd_gneq *)sneq;
+ conf->gneq = (struct dasd_gneq *)sneq;
else if (sneq->flags.identifier == 3 && sneq->res1 == 1)
- private->ned = (struct dasd_ned *)sneq;
+ conf->ned = (struct dasd_ned *)sneq;
sneq++;
}
- if (!private->ned || !private->gneq) {
- private->ned = NULL;
- private->sneq = NULL;
- private->vdsneq = NULL;
- private->gneq = NULL;
+ if (!conf->ned || !conf->gneq) {
+ conf->ned = NULL;
+ conf->sneq = NULL;
+ conf->vdsneq = NULL;
+ conf->gneq = NULL;
return -EINVAL;
}
return 0;
@@ -1016,9 +1015,9 @@ static void dasd_eckd_store_conf_data(struct dasd_device *device,
* with the new one if this points to the same data
*/
cdp = device->path[chp].conf_data;
- if (private->conf_data == cdp) {
- private->conf_data = (void *)conf_data;
- dasd_eckd_identify_conf_parts(private);
+ if (private->conf.data == cdp) {
+ private->conf.data = (void *)conf_data;
+ dasd_eckd_identify_conf_parts(&private->conf);
}
ccw_device_get_schid(device->cdev, &sch_id);
device->path[chp].conf_data = conf_data;
@@ -1036,8 +1035,8 @@ static void dasd_eckd_clear_conf_data(struct dasd_device *device)
struct dasd_eckd_private *private = device->private;
int i;
- private->conf_data = NULL;
- private->conf_len = 0;
+ private->conf.data = NULL;
+ private->conf.len = 0;
for (i = 0; i < 8; i++) {
kfree(device->path[i].conf_data);
device->path[i].conf_data = NULL;
@@ -1071,15 +1070,55 @@ static void dasd_eckd_read_fc_security(struct dasd_device *device)
}
}
+static void dasd_eckd_get_uid_string(struct dasd_conf *conf,
+ char *print_uid)
+{
+ struct dasd_uid uid;
+
+ create_uid(conf, &uid);
+ if (strlen(uid.vduit) > 0)
+ snprintf(print_uid, sizeof(*print_uid),
+ "%s.%s.%04x.%02x.%s",
+ uid.vendor, uid.serial, uid.ssid,
+ uid.real_unit_addr, uid.vduit);
+ else
+ snprintf(print_uid, sizeof(*print_uid),
+ "%s.%s.%04x.%02x",
+ uid.vendor, uid.serial, uid.ssid,
+ uid.real_unit_addr);
+}
+
+static int dasd_eckd_check_cabling(struct dasd_device *device,
+ void *conf_data, __u8 lpm)
+{
+ struct dasd_eckd_private *private = device->private;
+ char print_path_uid[60], print_device_uid[60];
+ struct dasd_conf path_conf;
+
+ path_conf.data = conf_data;
+ path_conf.len = DASD_ECKD_RCD_DATA_SIZE;
+ if (dasd_eckd_identify_conf_parts(&path_conf))
+ return 1;
+
+ if (dasd_eckd_compare_path_uid(device, &path_conf)) {
+ dasd_eckd_get_uid_string(&path_conf, print_path_uid);
+ dasd_eckd_get_uid_string(&private->conf, print_device_uid);
+ dev_err(&device->cdev->dev,
+ "Not all channel paths lead to the same device, path %02X leads to device %s instead of %s\n",
+ lpm, print_path_uid, print_device_uid);
+ return 1;
+ }
+
+ return 0;
+}
+
static int dasd_eckd_read_conf(struct dasd_device *device)
{
void *conf_data;
int conf_len, conf_data_saved;
int rc, path_err, pos;
__u8 lpm, opm;
- struct dasd_eckd_private *private, path_private;
- struct dasd_uid *uid;
- char print_path_uid[60], print_device_uid[60];
+ struct dasd_eckd_private *private;
private = device->private;
opm = ccw_device_get_path_mask(device->cdev);
@@ -1109,11 +1148,11 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
if (!conf_data_saved) {
/* initially clear previously stored conf_data */
dasd_eckd_clear_conf_data(device);
- private->conf_data = conf_data;
- private->conf_len = conf_len;
- if (dasd_eckd_identify_conf_parts(private)) {
- private->conf_data = NULL;
- private->conf_len = 0;
+ private->conf.data = conf_data;
+ private->conf.len = conf_len;
+ if (dasd_eckd_identify_conf_parts(&private->conf)) {
+ private->conf.data = NULL;
+ private->conf.len = 0;
kfree(conf_data);
continue;
}
@@ -1123,59 +1162,11 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
*/
dasd_eckd_generate_uid(device);
conf_data_saved++;
- } else {
- path_private.conf_data = conf_data;
- path_private.conf_len = DASD_ECKD_RCD_DATA_SIZE;
- if (dasd_eckd_identify_conf_parts(
- &path_private)) {
- path_private.conf_data = NULL;
- path_private.conf_len = 0;
- kfree(conf_data);
- continue;
- }
- if (dasd_eckd_compare_path_uid(
- device, &path_private)) {
- uid = &path_private.uid;
- if (strlen(uid->vduit) > 0)
- snprintf(print_path_uid,
- sizeof(print_path_uid),
- "%s.%s.%04x.%02x.%s",
- uid->vendor, uid->serial,
- uid->ssid, uid->real_unit_addr,
- uid->vduit);
- else
- snprintf(print_path_uid,
- sizeof(print_path_uid),
- "%s.%s.%04x.%02x",
- uid->vendor, uid->serial,
- uid->ssid,
- uid->real_unit_addr);
- uid = &private->uid;
- if (strlen(uid->vduit) > 0)
- snprintf(print_device_uid,
- sizeof(print_device_uid),
- "%s.%s.%04x.%02x.%s",
- uid->vendor, uid->serial,
- uid->ssid, uid->real_unit_addr,
- uid->vduit);
- else
- snprintf(print_device_uid,
- sizeof(print_device_uid),
- "%s.%s.%04x.%02x",
- uid->vendor, uid->serial,
- uid->ssid,
- uid->real_unit_addr);
- dev_err(&device->cdev->dev,
- "Not all channel paths lead to "
- "the same device, path %02X leads to "
- "device %s instead of %s\n", lpm,
- print_path_uid, print_device_uid);
- path_err = -EINVAL;
- dasd_path_add_cablepm(device, lpm);
- continue;
- }
- path_private.conf_data = NULL;
- path_private.conf_len = 0;
+ } else if (dasd_eckd_check_cabling(device, conf_data, lpm)) {
+ dasd_path_add_cablepm(device, lpm);
+ path_err = -EINVAL;
+ kfree(conf_data);
+ continue;
}
pos = pathmask_to_pos(lpm);
@@ -1197,8 +1188,6 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
}
}
- dasd_eckd_read_fc_security(device);
-
return path_err;
}
@@ -1213,7 +1202,7 @@ static u32 get_fcx_max_data(struct dasd_device *device)
return 0;
/* is transport mode supported? */
fcx_in_css = css_general_characteristics.fcx;
- fcx_in_gneq = private->gneq->reserved2[7] & 0x04;
+ fcx_in_gneq = private->conf.gneq->reserved2[7] & 0x04;
fcx_in_features = private->features.feature[40] & 0x80;
tpm = fcx_in_css && fcx_in_gneq && fcx_in_features;
@@ -1282,9 +1271,9 @@ static int rebuild_device_uid(struct dasd_device *device,
"returned error %d", rc);
break;
}
- memcpy(private->conf_data, data->rcd_buffer,
+ memcpy(private->conf.data, data->rcd_buffer,
DASD_ECKD_RCD_DATA_SIZE);
- if (dasd_eckd_identify_conf_parts(private)) {
+ if (dasd_eckd_identify_conf_parts(&private->conf)) {
rc = -ENODEV;
} else /* first valid path is enough */
break;
@@ -1299,11 +1288,10 @@ static int rebuild_device_uid(struct dasd_device *device,
static void dasd_eckd_path_available_action(struct dasd_device *device,
struct pe_handler_work_data *data)
{
- struct dasd_eckd_private path_private;
- struct dasd_uid *uid;
__u8 path_rcd_buf[DASD_ECKD_RCD_DATA_SIZE];
__u8 lpm, opm, npm, ppm, epm, hpfpm, cablepm;
struct dasd_conf_data *conf_data;
+ struct dasd_conf path_conf;
unsigned long flags;
char print_uid[60];
int rc, pos;
@@ -1367,11 +1355,11 @@ static void dasd_eckd_path_available_action(struct dasd_device *device,
*/
memcpy(&path_rcd_buf, data->rcd_buffer,
DASD_ECKD_RCD_DATA_SIZE);
- path_private.conf_data = (void *) &path_rcd_buf;
- path_private.conf_len = DASD_ECKD_RCD_DATA_SIZE;
- if (dasd_eckd_identify_conf_parts(&path_private)) {
- path_private.conf_data = NULL;
- path_private.conf_len = 0;
+ path_conf.data = (void *)&path_rcd_buf;
+ path_conf.len = DASD_ECKD_RCD_DATA_SIZE;
+ if (dasd_eckd_identify_conf_parts(&path_conf)) {
+ path_conf.data = NULL;
+ path_conf.len = 0;
continue;
}
@@ -1382,7 +1370,7 @@ static void dasd_eckd_path_available_action(struct dasd_device *device,
* the first working path UID will be used as device UID
*/
if (dasd_path_get_opm(device) &&
- dasd_eckd_compare_path_uid(device, &path_private)) {
+ dasd_eckd_compare_path_uid(device, &path_conf)) {
/*
* the comparison was not successful
* rebuild the device UID with at least one
@@ -1396,20 +1384,8 @@ static void dasd_eckd_path_available_action(struct dasd_device *device,
*/
if (rebuild_device_uid(device, data) ||
dasd_eckd_compare_path_uid(
- device, &path_private)) {
- uid = &path_private.uid;
- if (strlen(uid->vduit) > 0)
- snprintf(print_uid, sizeof(print_uid),
- "%s.%s.%04x.%02x.%s",
- uid->vendor, uid->serial,
- uid->ssid, uid->real_unit_addr,
- uid->vduit);
- else
- snprintf(print_uid, sizeof(print_uid),
- "%s.%s.%04x.%02x",
- uid->vendor, uid->serial,
- uid->ssid,
- uid->real_unit_addr);
+ device, &path_conf)) {
+ dasd_eckd_get_uid_string(&path_conf, print_uid);
dev_err(&device->cdev->dev,
"The newly added channel path %02X "
"will not be used because it leads "
@@ -1427,6 +1403,14 @@ static void dasd_eckd_path_available_action(struct dasd_device *device,
if (conf_data) {
memcpy(conf_data, data->rcd_buffer,
DASD_ECKD_RCD_DATA_SIZE);
+ } else {
+ /*
+ * path is operational but path config data could not
+ * be stored due to low mem condition
+ * add it to the error path mask and schedule a path
+ * verification later that this could be added again
+ */
+ epm |= lpm;
}
pos = pathmask_to_pos(lpm);
dasd_eckd_store_conf_data(device, conf_data, pos);
@@ -1447,7 +1431,10 @@ static void dasd_eckd_path_available_action(struct dasd_device *device,
}
dasd_path_add_nppm(device, npm);
dasd_path_add_ppm(device, ppm);
- dasd_path_add_tbvpm(device, epm);
+ if (epm) {
+ dasd_path_add_tbvpm(device, epm);
+ dasd_device_set_timer(device, 50);
+ }
dasd_path_add_cablepm(device, cablepm);
dasd_path_add_nohpfpm(device, hpfpm);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
@@ -1625,8 +1612,8 @@ static int dasd_eckd_read_vol_info(struct dasd_device *device)
prssdp = cqr->data;
prssdp->order = PSF_ORDER_PRSSD;
prssdp->suborder = PSF_SUBORDER_VSQ; /* Volume Storage Query */
- prssdp->lss = private->ned->ID;
- prssdp->volume = private->ned->unit_addr;
+ prssdp->lss = private->conf.ned->ID;
+ prssdp->volume = private->conf.ned->unit_addr;
ccw = cqr->cpaddr;
ccw->cmd_code = DASD_ECKD_CCW_PSF;
@@ -2085,11 +2072,11 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
device->path_thrhld = DASD_ECKD_PATH_THRHLD;
device->path_interval = DASD_ECKD_PATH_INTERVAL;
- if (private->gneq) {
+ if (private->conf.gneq) {
value = 1;
- for (i = 0; i < private->gneq->timeout.value; i++)
+ for (i = 0; i < private->conf.gneq->timeout.value; i++)
value = 10 * value;
- value = value * private->gneq->timeout.number;
+ value = value * private->conf.gneq->timeout.number;
/* do not accept useless values */
if (value != 0 && value <= DASD_EXPIRES_MAX)
device->default_expires = value;
@@ -2121,6 +2108,7 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
if (rc)
goto out_err3;
+ dasd_eckd_read_fc_security(device);
dasd_path_create_kobjects(device);
/* Read Feature Codes */
@@ -2195,10 +2183,10 @@ static void dasd_eckd_uncheck_device(struct dasd_device *device)
return;
dasd_alias_disconnect_device_from_lcu(device);
- private->ned = NULL;
- private->sneq = NULL;
- private->vdsneq = NULL;
- private->gneq = NULL;
+ private->conf.ned = NULL;
+ private->conf.sneq = NULL;
+ private->conf.vdsneq = NULL;
+ private->conf.gneq = NULL;
dasd_eckd_clear_conf_data(device);
dasd_path_remove_kobjects(device);
}
@@ -3750,8 +3738,8 @@ dasd_eckd_dso_ras(struct dasd_device *device, struct dasd_block *block,
* subset.
*/
ras_data->op_flags.guarantee_init = !!(features->feature[56] & 0x01);
- ras_data->lss = private->ned->ID;
- ras_data->dev_addr = private->ned->unit_addr;
+ ras_data->lss = private->conf.ned->ID;
+ ras_data->dev_addr = private->conf.ned->unit_addr;
ras_data->nr_exts = nr_exts;
if (by_extent) {
@@ -4293,8 +4281,8 @@ static int prepare_itcw(struct itcw *itcw,
memset(&pfxdata, 0, sizeof(pfxdata));
pfxdata.format = 1; /* PFX with LRE */
- pfxdata.base_address = basepriv->ned->unit_addr;
- pfxdata.base_lss = basepriv->ned->ID;
+ pfxdata.base_address = basepriv->conf.ned->unit_addr;
+ pfxdata.base_lss = basepriv->conf.ned->ID;
pfxdata.validity.define_extent = 1;
/* private uid is kept up to date, conf_data may be outdated */
@@ -4963,9 +4951,9 @@ dasd_eckd_fill_info(struct dasd_device * device,
info->characteristics_size = sizeof(private->rdc_data);
memcpy(info->characteristics, &private->rdc_data,
sizeof(private->rdc_data));
- info->confdata_size = min((unsigned long)private->conf_len,
- sizeof(info->configuration_data));
- memcpy(info->configuration_data, private->conf_data,
+ info->confdata_size = min_t(unsigned long, private->conf.len,
+ sizeof(info->configuration_data));
+ memcpy(info->configuration_data, private->conf.data,
info->confdata_size);
return 0;
}
@@ -5808,6 +5796,8 @@ static int dasd_eckd_reload_device(struct dasd_device *device)
if (rc)
goto out_err;
+ dasd_eckd_read_fc_security(device);
+
rc = dasd_eckd_generate_uid(device);
if (rc)
goto out_err;
@@ -5820,15 +5810,7 @@ static int dasd_eckd_reload_device(struct dasd_device *device)
dasd_eckd_get_uid(device, &uid);
if (old_base != uid.base_unit_addr) {
- if (strlen(uid.vduit) > 0)
- snprintf(print_uid, sizeof(print_uid),
- "%s.%s.%04x.%02x.%s", uid.vendor, uid.serial,
- uid.ssid, uid.base_unit_addr, uid.vduit);
- else
- snprintf(print_uid, sizeof(print_uid),
- "%s.%s.%04x.%02x", uid.vendor, uid.serial,
- uid.ssid, uid.base_unit_addr);
-
+ dasd_eckd_get_uid_string(&private->conf, print_uid);
dev_info(&device->cdev->dev,
"An Alias device was reassigned to a new base device "
"with UID: %s\n", print_uid);
@@ -5966,8 +5948,8 @@ static int dasd_eckd_query_host_access(struct dasd_device *device,
prssdp->order = PSF_ORDER_PRSSD;
prssdp->suborder = PSF_SUBORDER_QHA; /* query host access */
/* LSS and Volume that will be queried */
- prssdp->lss = private->ned->ID;
- prssdp->volume = private->ned->unit_addr;
+ prssdp->lss = private->conf.ned->ID;
+ prssdp->volume = private->conf.ned->unit_addr;
/* all other bytes of prssdp must be zero */
ccw = cqr->cpaddr;
diff --git a/drivers/s390/block/dasd_eckd.h b/drivers/s390/block/dasd_eckd.h
index 65e4630ad2ae..a91b265441cc 100644
--- a/drivers/s390/block/dasd_eckd.h
+++ b/drivers/s390/block/dasd_eckd.h
@@ -658,16 +658,19 @@ struct dasd_conf_data {
struct dasd_gneq gneq;
} __packed;
-struct dasd_eckd_private {
- struct dasd_eckd_characteristics rdc_data;
- u8 *conf_data;
- int conf_len;
-
+struct dasd_conf {
+ u8 *data;
+ int len;
/* pointers to specific parts in the conf_data */
struct dasd_ned *ned;
struct dasd_sneq *sneq;
struct vd_sneq *vdsneq;
struct dasd_gneq *gneq;
+};
+
+struct dasd_eckd_private {
+ struct dasd_eckd_characteristics rdc_data;
+ struct dasd_conf conf;
struct eckd_count count_area[5];
int init_cqr_status;
diff --git a/drivers/s390/block/dasd_erp.c b/drivers/s390/block/dasd_erp.c
index ba4fa372d02d..c07e6e713518 100644
--- a/drivers/s390/block/dasd_erp.c
+++ b/drivers/s390/block/dasd_erp.c
@@ -24,7 +24,7 @@
#include "dasd_int.h"
struct dasd_ccw_req *
-dasd_alloc_erp_request(char *magic, int cplength, int datasize,
+dasd_alloc_erp_request(unsigned int magic, int cplength, int datasize,
struct dasd_device * device)
{
unsigned long flags;
@@ -33,8 +33,8 @@ dasd_alloc_erp_request(char *magic, int cplength, int datasize,
int size;
/* Sanity checks */
- BUG_ON( magic == NULL || datasize > PAGE_SIZE ||
- (cplength*sizeof(struct ccw1)) > PAGE_SIZE);
+ BUG_ON(datasize > PAGE_SIZE ||
+ (cplength*sizeof(struct ccw1)) > PAGE_SIZE);
size = (sizeof(struct dasd_ccw_req) + 7L) & -8L;
if (cplength > 0)
@@ -62,7 +62,7 @@ dasd_alloc_erp_request(char *magic, int cplength, int datasize,
cqr->data = data;
memset(cqr->data, 0, datasize);
}
- strncpy((char *) &cqr->magic, magic, 4);
+ cqr->magic = magic;
ASCEBC((char *) &cqr->magic, 4);
set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
dasd_get_device(device);
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
index fa966e0db6ca..3a6f3af240fa 100644
--- a/drivers/s390/block/dasd_genhd.c
+++ b/drivers/s390/block/dasd_genhd.c
@@ -14,6 +14,7 @@
#define KMSG_COMPONENT "dasd"
#include <linux/interrupt.h>
+#include <linux/major.h>
#include <linux/fs.h>
#include <linux/blkpg.h>
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 155428bfed8a..8b458010f88a 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -887,7 +887,7 @@ void dasd_proc_exit(void);
/* externals in dasd_erp.c */
struct dasd_ccw_req *dasd_default_erp_action(struct dasd_ccw_req *);
struct dasd_ccw_req *dasd_default_erp_postaction(struct dasd_ccw_req *);
-struct dasd_ccw_req *dasd_alloc_erp_request(char *, int, int,
+struct dasd_ccw_req *dasd_alloc_erp_request(unsigned int, int, int,
struct dasd_device *);
void dasd_free_erp_request(struct dasd_ccw_req *, struct dasd_device *);
void dasd_log_sense(struct dasd_ccw_req *, struct irb *);
@@ -1305,6 +1305,15 @@ static inline void dasd_path_add_ppm(struct dasd_device *device, __u8 pm)
dasd_path_preferred(device, chp);
}
+static inline void dasd_path_add_fcsecpm(struct dasd_device *device, __u8 pm)
+{
+ int chp;
+
+ for (chp = 0; chp < 8; chp++)
+ if (pm & (0x80 >> chp))
+ dasd_path_fcsec(device, chp);
+}
+
/*
* set functions for path masks
* the existing path mask will be replaced by the given path mask
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index 468cbeb539ff..95349f95758c 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -650,8 +650,8 @@ int dasd_ioctl(struct block_device *bdev, fmode_t mode,
/**
* dasd_biodasdinfo() - fill out the dasd information structure
- * @disk [in]: pointer to gendisk structure that references a DASD
- * @info [out]: pointer to the dasd_information2_t structure
+ * @disk: [in] pointer to gendisk structure that references a DASD
+ * @info: [out] pointer to the dasd_information2_t structure
*
* Provide access to DASD specific information.
* The gendisk structure is checked if it belongs to the DASD driver by
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 5be3d1c39a78..59e513d34b0f 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -30,7 +30,7 @@
static int dcssblk_open(struct block_device *bdev, fmode_t mode);
static void dcssblk_release(struct gendisk *disk, fmode_t mode);
-static blk_qc_t dcssblk_submit_bio(struct bio *bio);
+static void dcssblk_submit_bio(struct bio *bio);
static long dcssblk_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
long nr_pages, void **kaddr, pfn_t *pfn);
@@ -854,7 +854,7 @@ dcssblk_release(struct gendisk *disk, fmode_t mode)
up_write(&dcssblk_devices_sem);
}
-static blk_qc_t
+static void
dcssblk_submit_bio(struct bio *bio)
{
struct dcssblk_dev_info *dev_info;
@@ -907,10 +907,9 @@ dcssblk_submit_bio(struct bio *bio)
bytes_done += bvec.bv_len;
}
bio_endio(bio);
- return BLK_QC_T_NONE;
+ return;
fail:
bio_io_error(bio);
- return BLK_QC_T_NONE;
}
static long
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index 20efafe47897..efbb5e5eca05 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -24,19 +24,6 @@
#define QBUFF_PER_PAGE (PAGE_SIZE / sizeof(struct qdio_buffer))
static struct kmem_cache *qdio_q_cache;
-static struct kmem_cache *qdio_aob_cache;
-
-struct qaob *qdio_allocate_aob(void)
-{
- return kmem_cache_zalloc(qdio_aob_cache, GFP_ATOMIC);
-}
-EXPORT_SYMBOL_GPL(qdio_allocate_aob);
-
-void qdio_release_aob(struct qaob *aob)
-{
- kmem_cache_free(qdio_aob_cache, aob);
-}
-EXPORT_SYMBOL_GPL(qdio_release_aob);
/**
* qdio_free_buffers() - free qdio buffers
@@ -447,39 +434,22 @@ void qdio_print_subchannel_info(struct qdio_irq *irq_ptr)
int __init qdio_setup_init(void)
{
- int rc;
-
qdio_q_cache = kmem_cache_create("qdio_q", sizeof(struct qdio_q),
256, 0, NULL);
if (!qdio_q_cache)
return -ENOMEM;
- qdio_aob_cache = kmem_cache_create("qdio_aob",
- sizeof(struct qaob),
- sizeof(struct qaob),
- 0,
- NULL);
- if (!qdio_aob_cache) {
- rc = -ENOMEM;
- goto free_qdio_q_cache;
- }
-
/* Check for OSA/FCP thin interrupts (bit 67). */
DBF_EVENT("thinint:%1d",
(css_general_characteristics.aif_osa) ? 1 : 0);
/* Check for QEBSM support in general (bit 58). */
DBF_EVENT("cssQEBSM:%1d", css_general_characteristics.qebsm);
- rc = 0;
-out:
- return rc;
-free_qdio_q_cache:
- kmem_cache_destroy(qdio_q_cache);
- goto out;
+
+ return 0;
}
void qdio_setup_exit(void)
{
- kmem_cache_destroy(qdio_aob_cache);
kmem_cache_destroy(qdio_q_cache);
}
diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c
index 76099bcb765b..040742777095 100644
--- a/drivers/s390/cio/vfio_ccw_drv.c
+++ b/drivers/s390/cio/vfio_ccw_drv.c
@@ -137,77 +137,107 @@ static void vfio_ccw_sch_irq(struct subchannel *sch)
vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_INTERRUPT);
}
-static void vfio_ccw_free_regions(struct vfio_ccw_private *private)
+static struct vfio_ccw_private *vfio_ccw_alloc_private(struct subchannel *sch)
{
- if (private->crw_region)
- kmem_cache_free(vfio_ccw_crw_region, private->crw_region);
- if (private->schib_region)
- kmem_cache_free(vfio_ccw_schib_region, private->schib_region);
- if (private->cmd_region)
- kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region);
- if (private->io_region)
- kmem_cache_free(vfio_ccw_io_region, private->io_region);
-}
-
-static int vfio_ccw_sch_probe(struct subchannel *sch)
-{
- struct pmcw *pmcw = &sch->schib.pmcw;
struct vfio_ccw_private *private;
- int ret = -ENOMEM;
- if (pmcw->qf) {
- dev_warn(&sch->dev, "vfio: ccw: does not support QDIO: %s\n",
- dev_name(&sch->dev));
- return -ENODEV;
- }
-
- private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA);
+ private = kzalloc(sizeof(*private), GFP_KERNEL);
if (!private)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
+
+ private->sch = sch;
+ mutex_init(&private->io_mutex);
+ private->state = VFIO_CCW_STATE_NOT_OPER;
+ INIT_LIST_HEAD(&private->crw);
+ INIT_WORK(&private->io_work, vfio_ccw_sch_io_todo);
+ INIT_WORK(&private->crw_work, vfio_ccw_crw_todo);
+ atomic_set(&private->avail, 1);
private->cp.guest_cp = kcalloc(CCWCHAIN_LEN_MAX, sizeof(struct ccw1),
GFP_KERNEL);
if (!private->cp.guest_cp)
- goto out_free;
+ goto out_free_private;
private->io_region = kmem_cache_zalloc(vfio_ccw_io_region,
GFP_KERNEL | GFP_DMA);
if (!private->io_region)
- goto out_free;
+ goto out_free_cp;
private->cmd_region = kmem_cache_zalloc(vfio_ccw_cmd_region,
GFP_KERNEL | GFP_DMA);
if (!private->cmd_region)
- goto out_free;
+ goto out_free_io;
private->schib_region = kmem_cache_zalloc(vfio_ccw_schib_region,
GFP_KERNEL | GFP_DMA);
if (!private->schib_region)
- goto out_free;
+ goto out_free_cmd;
private->crw_region = kmem_cache_zalloc(vfio_ccw_crw_region,
GFP_KERNEL | GFP_DMA);
if (!private->crw_region)
- goto out_free;
+ goto out_free_schib;
+ return private;
+
+out_free_schib:
+ kmem_cache_free(vfio_ccw_schib_region, private->schib_region);
+out_free_cmd:
+ kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region);
+out_free_io:
+ kmem_cache_free(vfio_ccw_io_region, private->io_region);
+out_free_cp:
+ kfree(private->cp.guest_cp);
+out_free_private:
+ mutex_destroy(&private->io_mutex);
+ kfree(private);
+ return ERR_PTR(-ENOMEM);
+}
+
+static void vfio_ccw_free_private(struct vfio_ccw_private *private)
+{
+ struct vfio_ccw_crw *crw, *temp;
+
+ list_for_each_entry_safe(crw, temp, &private->crw, next) {
+ list_del(&crw->next);
+ kfree(crw);
+ }
+
+ kmem_cache_free(vfio_ccw_crw_region, private->crw_region);
+ kmem_cache_free(vfio_ccw_schib_region, private->schib_region);
+ kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region);
+ kmem_cache_free(vfio_ccw_io_region, private->io_region);
+ kfree(private->cp.guest_cp);
+ mutex_destroy(&private->io_mutex);
+ kfree(private);
+}
+
+static int vfio_ccw_sch_probe(struct subchannel *sch)
+{
+ struct pmcw *pmcw = &sch->schib.pmcw;
+ struct vfio_ccw_private *private;
+ int ret = -ENOMEM;
+
+ if (pmcw->qf) {
+ dev_warn(&sch->dev, "vfio: ccw: does not support QDIO: %s\n",
+ dev_name(&sch->dev));
+ return -ENODEV;
+ }
+
+ private = vfio_ccw_alloc_private(sch);
+ if (IS_ERR(private))
+ return PTR_ERR(private);
- private->sch = sch;
dev_set_drvdata(&sch->dev, private);
- mutex_init(&private->io_mutex);
spin_lock_irq(sch->lock);
- private->state = VFIO_CCW_STATE_NOT_OPER;
sch->isc = VFIO_CCW_ISC;
ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
spin_unlock_irq(sch->lock);
if (ret)
goto out_free;
- INIT_LIST_HEAD(&private->crw);
- INIT_WORK(&private->io_work, vfio_ccw_sch_io_todo);
- INIT_WORK(&private->crw_work, vfio_ccw_crw_todo);
- atomic_set(&private->avail, 1);
private->state = VFIO_CCW_STATE_STANDBY;
ret = vfio_ccw_mdev_reg(sch);
@@ -228,31 +258,20 @@ out_disable:
cio_disable_subchannel(sch);
out_free:
dev_set_drvdata(&sch->dev, NULL);
- vfio_ccw_free_regions(private);
- kfree(private->cp.guest_cp);
- kfree(private);
+ vfio_ccw_free_private(private);
return ret;
}
static void vfio_ccw_sch_remove(struct subchannel *sch)
{
struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
- struct vfio_ccw_crw *crw, *temp;
vfio_ccw_sch_quiesce(sch);
-
- list_for_each_entry_safe(crw, temp, &private->crw, next) {
- list_del(&crw->next);
- kfree(crw);
- }
-
vfio_ccw_mdev_unreg(sch);
dev_set_drvdata(&sch->dev, NULL);
- vfio_ccw_free_regions(private);
- kfree(private->cp.guest_cp);
- kfree(private);
+ vfio_ccw_free_private(private);
VFIO_CCW_MSG_EVENT(4, "unbound from subchannel %x.%x.%04x\n",
sch->schid.cssid, sch->schid.ssid,
@@ -449,7 +468,7 @@ static int __init vfio_ccw_sch_init(void)
vfio_ccw_work_q = create_singlethread_workqueue("vfio-ccw");
if (!vfio_ccw_work_q) {
ret = -ENOMEM;
- goto out_err;
+ goto out_regions;
}
vfio_ccw_io_region = kmem_cache_create_usercopy("vfio_ccw_io_region",
@@ -458,7 +477,7 @@ static int __init vfio_ccw_sch_init(void)
sizeof(struct ccw_io_region), NULL);
if (!vfio_ccw_io_region) {
ret = -ENOMEM;
- goto out_err;
+ goto out_regions;
}
vfio_ccw_cmd_region = kmem_cache_create_usercopy("vfio_ccw_cmd_region",
@@ -467,7 +486,7 @@ static int __init vfio_ccw_sch_init(void)
sizeof(struct ccw_cmd_region), NULL);
if (!vfio_ccw_cmd_region) {
ret = -ENOMEM;
- goto out_err;
+ goto out_regions;
}
vfio_ccw_schib_region = kmem_cache_create_usercopy("vfio_ccw_schib_region",
@@ -477,7 +496,7 @@ static int __init vfio_ccw_sch_init(void)
if (!vfio_ccw_schib_region) {
ret = -ENOMEM;
- goto out_err;
+ goto out_regions;
}
vfio_ccw_crw_region = kmem_cache_create_usercopy("vfio_ccw_crw_region",
@@ -487,19 +506,25 @@ static int __init vfio_ccw_sch_init(void)
if (!vfio_ccw_crw_region) {
ret = -ENOMEM;
- goto out_err;
+ goto out_regions;
}
+ ret = mdev_register_driver(&vfio_ccw_mdev_driver);
+ if (ret)
+ goto out_regions;
+
isc_register(VFIO_CCW_ISC);
ret = css_driver_register(&vfio_ccw_sch_driver);
if (ret) {
isc_unregister(VFIO_CCW_ISC);
- goto out_err;
+ goto out_driver;
}
return ret;
-out_err:
+out_driver:
+ mdev_unregister_driver(&vfio_ccw_mdev_driver);
+out_regions:
vfio_ccw_destroy_regions();
destroy_workqueue(vfio_ccw_work_q);
vfio_ccw_debug_exit();
@@ -509,6 +534,7 @@ out_err:
static void __exit vfio_ccw_sch_exit(void)
{
css_driver_unregister(&vfio_ccw_sch_driver);
+ mdev_unregister_driver(&vfio_ccw_mdev_driver);
isc_unregister(VFIO_CCW_ISC);
vfio_ccw_destroy_regions();
destroy_workqueue(vfio_ccw_work_q);
diff --git a/drivers/s390/cio/vfio_ccw_ops.c b/drivers/s390/cio/vfio_ccw_ops.c
index 7f540ad0b568..d8589afac272 100644
--- a/drivers/s390/cio/vfio_ccw_ops.c
+++ b/drivers/s390/cio/vfio_ccw_ops.c
@@ -17,13 +17,13 @@
#include "vfio_ccw_private.h"
-static int vfio_ccw_mdev_reset(struct mdev_device *mdev)
+static const struct vfio_device_ops vfio_ccw_dev_ops;
+
+static int vfio_ccw_mdev_reset(struct vfio_ccw_private *private)
{
- struct vfio_ccw_private *private;
struct subchannel *sch;
int ret;
- private = dev_get_drvdata(mdev_parent_dev(mdev));
sch = private->sch;
/*
* TODO:
@@ -61,7 +61,7 @@ static int vfio_ccw_mdev_notifier(struct notifier_block *nb,
if (!cp_iova_pinned(&private->cp, unmap->iova))
return NOTIFY_OK;
- if (vfio_ccw_mdev_reset(private->mdev))
+ if (vfio_ccw_mdev_reset(private))
return NOTIFY_BAD;
cp_free(&private->cp);
@@ -113,10 +113,10 @@ static struct attribute_group *mdev_type_groups[] = {
NULL,
};
-static int vfio_ccw_mdev_create(struct mdev_device *mdev)
+static int vfio_ccw_mdev_probe(struct mdev_device *mdev)
{
- struct vfio_ccw_private *private =
- dev_get_drvdata(mdev_parent_dev(mdev));
+ struct vfio_ccw_private *private = dev_get_drvdata(mdev->dev.parent);
+ int ret;
if (private->state == VFIO_CCW_STATE_NOT_OPER)
return -ENODEV;
@@ -124,6 +124,10 @@ static int vfio_ccw_mdev_create(struct mdev_device *mdev)
if (atomic_dec_if_positive(&private->avail) < 0)
return -EPERM;
+ memset(&private->vdev, 0, sizeof(private->vdev));
+ vfio_init_group_dev(&private->vdev, &mdev->dev,
+ &vfio_ccw_dev_ops);
+
private->mdev = mdev;
private->state = VFIO_CCW_STATE_IDLE;
@@ -132,19 +136,31 @@ static int vfio_ccw_mdev_create(struct mdev_device *mdev)
private->sch->schid.ssid,
private->sch->schid.sch_no);
+ ret = vfio_register_emulated_iommu_dev(&private->vdev);
+ if (ret)
+ goto err_atomic;
+ dev_set_drvdata(&mdev->dev, private);
return 0;
+
+err_atomic:
+ vfio_uninit_group_dev(&private->vdev);
+ atomic_inc(&private->avail);
+ private->mdev = NULL;
+ private->state = VFIO_CCW_STATE_IDLE;
+ return ret;
}
-static int vfio_ccw_mdev_remove(struct mdev_device *mdev)
+static void vfio_ccw_mdev_remove(struct mdev_device *mdev)
{
- struct vfio_ccw_private *private =
- dev_get_drvdata(mdev_parent_dev(mdev));
+ struct vfio_ccw_private *private = dev_get_drvdata(mdev->dev.parent);
VFIO_CCW_MSG_EVENT(2, "mdev %pUl, sch %x.%x.%04x: remove\n",
mdev_uuid(mdev), private->sch->schid.cssid,
private->sch->schid.ssid,
private->sch->schid.sch_no);
+ vfio_unregister_group_dev(&private->vdev);
+
if ((private->state != VFIO_CCW_STATE_NOT_OPER) &&
(private->state != VFIO_CCW_STATE_STANDBY)) {
if (!vfio_ccw_sch_quiesce(private->sch))
@@ -152,23 +168,22 @@ static int vfio_ccw_mdev_remove(struct mdev_device *mdev)
/* The state will be NOT_OPER on error. */
}
+ vfio_uninit_group_dev(&private->vdev);
cp_free(&private->cp);
private->mdev = NULL;
atomic_inc(&private->avail);
-
- return 0;
}
-static int vfio_ccw_mdev_open_device(struct mdev_device *mdev)
+static int vfio_ccw_mdev_open_device(struct vfio_device *vdev)
{
struct vfio_ccw_private *private =
- dev_get_drvdata(mdev_parent_dev(mdev));
+ container_of(vdev, struct vfio_ccw_private, vdev);
unsigned long events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
int ret;
private->nb.notifier_call = vfio_ccw_mdev_notifier;
- ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
+ ret = vfio_register_notifier(vdev->dev, VFIO_IOMMU_NOTIFY,
&events, &private->nb);
if (ret)
return ret;
@@ -189,27 +204,26 @@ static int vfio_ccw_mdev_open_device(struct mdev_device *mdev)
out_unregister:
vfio_ccw_unregister_dev_regions(private);
- vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
+ vfio_unregister_notifier(vdev->dev, VFIO_IOMMU_NOTIFY,
&private->nb);
return ret;
}
-static void vfio_ccw_mdev_close_device(struct mdev_device *mdev)
+static void vfio_ccw_mdev_close_device(struct vfio_device *vdev)
{
struct vfio_ccw_private *private =
- dev_get_drvdata(mdev_parent_dev(mdev));
+ container_of(vdev, struct vfio_ccw_private, vdev);
if ((private->state != VFIO_CCW_STATE_NOT_OPER) &&
(private->state != VFIO_CCW_STATE_STANDBY)) {
- if (!vfio_ccw_mdev_reset(mdev))
+ if (!vfio_ccw_mdev_reset(private))
private->state = VFIO_CCW_STATE_STANDBY;
/* The state will be NOT_OPER on error. */
}
cp_free(&private->cp);
vfio_ccw_unregister_dev_regions(private);
- vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
- &private->nb);
+ vfio_unregister_notifier(vdev->dev, VFIO_IOMMU_NOTIFY, &private->nb);
}
static ssize_t vfio_ccw_mdev_read_io_region(struct vfio_ccw_private *private,
@@ -233,15 +247,14 @@ static ssize_t vfio_ccw_mdev_read_io_region(struct vfio_ccw_private *private,
return ret;
}
-static ssize_t vfio_ccw_mdev_read(struct mdev_device *mdev,
+static ssize_t vfio_ccw_mdev_read(struct vfio_device *vdev,
char __user *buf,
size_t count,
loff_t *ppos)
{
+ struct vfio_ccw_private *private =
+ container_of(vdev, struct vfio_ccw_private, vdev);
unsigned int index = VFIO_CCW_OFFSET_TO_INDEX(*ppos);
- struct vfio_ccw_private *private;
-
- private = dev_get_drvdata(mdev_parent_dev(mdev));
if (index >= VFIO_CCW_NUM_REGIONS + private->num_regions)
return -EINVAL;
@@ -286,15 +299,14 @@ out_unlock:
return ret;
}
-static ssize_t vfio_ccw_mdev_write(struct mdev_device *mdev,
+static ssize_t vfio_ccw_mdev_write(struct vfio_device *vdev,
const char __user *buf,
size_t count,
loff_t *ppos)
{
+ struct vfio_ccw_private *private =
+ container_of(vdev, struct vfio_ccw_private, vdev);
unsigned int index = VFIO_CCW_OFFSET_TO_INDEX(*ppos);
- struct vfio_ccw_private *private;
-
- private = dev_get_drvdata(mdev_parent_dev(mdev));
if (index >= VFIO_CCW_NUM_REGIONS + private->num_regions)
return -EINVAL;
@@ -311,12 +323,9 @@ static ssize_t vfio_ccw_mdev_write(struct mdev_device *mdev,
return -EINVAL;
}
-static int vfio_ccw_mdev_get_device_info(struct vfio_device_info *info,
- struct mdev_device *mdev)
+static int vfio_ccw_mdev_get_device_info(struct vfio_ccw_private *private,
+ struct vfio_device_info *info)
{
- struct vfio_ccw_private *private;
-
- private = dev_get_drvdata(mdev_parent_dev(mdev));
info->flags = VFIO_DEVICE_FLAGS_CCW | VFIO_DEVICE_FLAGS_RESET;
info->num_regions = VFIO_CCW_NUM_REGIONS + private->num_regions;
info->num_irqs = VFIO_CCW_NUM_IRQS;
@@ -324,14 +333,12 @@ static int vfio_ccw_mdev_get_device_info(struct vfio_device_info *info,
return 0;
}
-static int vfio_ccw_mdev_get_region_info(struct vfio_region_info *info,
- struct mdev_device *mdev,
+static int vfio_ccw_mdev_get_region_info(struct vfio_ccw_private *private,
+ struct vfio_region_info *info,
unsigned long arg)
{
- struct vfio_ccw_private *private;
int i;
- private = dev_get_drvdata(mdev_parent_dev(mdev));
switch (info->index) {
case VFIO_CCW_CONFIG_REGION_INDEX:
info->offset = 0;
@@ -406,19 +413,16 @@ static int vfio_ccw_mdev_get_irq_info(struct vfio_irq_info *info)
return 0;
}
-static int vfio_ccw_mdev_set_irqs(struct mdev_device *mdev,
+static int vfio_ccw_mdev_set_irqs(struct vfio_ccw_private *private,
uint32_t flags,
uint32_t index,
void __user *data)
{
- struct vfio_ccw_private *private;
struct eventfd_ctx **ctx;
if (!(flags & VFIO_IRQ_SET_ACTION_TRIGGER))
return -EINVAL;
- private = dev_get_drvdata(mdev_parent_dev(mdev));
-
switch (index) {
case VFIO_CCW_IO_IRQ_INDEX:
ctx = &private->io_trigger;
@@ -520,10 +524,12 @@ void vfio_ccw_unregister_dev_regions(struct vfio_ccw_private *private)
private->region = NULL;
}
-static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev,
+static ssize_t vfio_ccw_mdev_ioctl(struct vfio_device *vdev,
unsigned int cmd,
unsigned long arg)
{
+ struct vfio_ccw_private *private =
+ container_of(vdev, struct vfio_ccw_private, vdev);
int ret = 0;
unsigned long minsz;
@@ -540,7 +546,7 @@ static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev,
if (info.argsz < minsz)
return -EINVAL;
- ret = vfio_ccw_mdev_get_device_info(&info, mdev);
+ ret = vfio_ccw_mdev_get_device_info(private, &info);
if (ret)
return ret;
@@ -558,7 +564,7 @@ static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev,
if (info.argsz < minsz)
return -EINVAL;
- ret = vfio_ccw_mdev_get_region_info(&info, mdev, arg);
+ ret = vfio_ccw_mdev_get_region_info(private, &info, arg);
if (ret)
return ret;
@@ -603,47 +609,59 @@ static ssize_t vfio_ccw_mdev_ioctl(struct mdev_device *mdev,
return ret;
data = (void __user *)(arg + minsz);
- return vfio_ccw_mdev_set_irqs(mdev, hdr.flags, hdr.index, data);
+ return vfio_ccw_mdev_set_irqs(private, hdr.flags, hdr.index,
+ data);
}
case VFIO_DEVICE_RESET:
- return vfio_ccw_mdev_reset(mdev);
+ return vfio_ccw_mdev_reset(private);
default:
return -ENOTTY;
}
}
/* Request removal of the device*/
-static void vfio_ccw_mdev_request(struct mdev_device *mdev, unsigned int count)
+static void vfio_ccw_mdev_request(struct vfio_device *vdev, unsigned int count)
{
- struct vfio_ccw_private *private = dev_get_drvdata(mdev_parent_dev(mdev));
-
- if (!private)
- return;
+ struct vfio_ccw_private *private =
+ container_of(vdev, struct vfio_ccw_private, vdev);
+ struct device *dev = vdev->dev;
if (private->req_trigger) {
if (!(count % 10))
- dev_notice_ratelimited(mdev_dev(private->mdev),
+ dev_notice_ratelimited(dev,
"Relaying device request to user (#%u)\n",
count);
eventfd_signal(private->req_trigger, 1);
} else if (count == 0) {
- dev_notice(mdev_dev(private->mdev),
+ dev_notice(dev,
"No device request channel registered, blocked until released by user\n");
}
}
+static const struct vfio_device_ops vfio_ccw_dev_ops = {
+ .open_device = vfio_ccw_mdev_open_device,
+ .close_device = vfio_ccw_mdev_close_device,
+ .read = vfio_ccw_mdev_read,
+ .write = vfio_ccw_mdev_write,
+ .ioctl = vfio_ccw_mdev_ioctl,
+ .request = vfio_ccw_mdev_request,
+};
+
+struct mdev_driver vfio_ccw_mdev_driver = {
+ .driver = {
+ .name = "vfio_ccw_mdev",
+ .owner = THIS_MODULE,
+ .mod_name = KBUILD_MODNAME,
+ },
+ .probe = vfio_ccw_mdev_probe,
+ .remove = vfio_ccw_mdev_remove,
+};
+
static const struct mdev_parent_ops vfio_ccw_mdev_ops = {
.owner = THIS_MODULE,
+ .device_driver = &vfio_ccw_mdev_driver,
.supported_type_groups = mdev_type_groups,
- .create = vfio_ccw_mdev_create,
- .remove = vfio_ccw_mdev_remove,
- .open_device = vfio_ccw_mdev_open_device,
- .close_device = vfio_ccw_mdev_close_device,
- .read = vfio_ccw_mdev_read,
- .write = vfio_ccw_mdev_write,
- .ioctl = vfio_ccw_mdev_ioctl,
- .request = vfio_ccw_mdev_request,
};
int vfio_ccw_mdev_reg(struct subchannel *sch)
diff --git a/drivers/s390/cio/vfio_ccw_private.h b/drivers/s390/cio/vfio_ccw_private.h
index b2c762eb42b9..7272eb788612 100644
--- a/drivers/s390/cio/vfio_ccw_private.h
+++ b/drivers/s390/cio/vfio_ccw_private.h
@@ -17,6 +17,7 @@
#include <linux/eventfd.h>
#include <linux/workqueue.h>
#include <linux/vfio_ccw.h>
+#include <linux/vfio.h>
#include <asm/crw.h>
#include <asm/debug.h>
@@ -67,6 +68,7 @@ struct vfio_ccw_crw {
/**
* struct vfio_ccw_private
+ * @vdev: Embedded VFIO device
* @sch: pointer to the subchannel
* @state: internal state of the device
* @completion: synchronization helper of the I/O completion
@@ -90,6 +92,7 @@ struct vfio_ccw_crw {
* @crw_work: work for deferral process of CRW handling
*/
struct vfio_ccw_private {
+ struct vfio_device vdev;
struct subchannel *sch;
int state;
struct completion *completion;
@@ -121,6 +124,8 @@ extern void vfio_ccw_mdev_unreg(struct subchannel *sch);
extern int vfio_ccw_sch_quiesce(struct subchannel *sch);
+extern struct mdev_driver vfio_ccw_mdev_driver;
+
/*
* States of the device statemachine.
*/
diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
index 623d5269a52c..2341425f6967 100644
--- a/drivers/s390/crypto/vfio_ap_ops.c
+++ b/drivers/s390/crypto/vfio_ap_ops.c
@@ -351,7 +351,7 @@ static int vfio_ap_mdev_probe(struct mdev_device *mdev)
list_add(&matrix_mdev->node, &matrix_dev->mdev_list);
mutex_unlock(&matrix_dev->lock);
- ret = vfio_register_group_dev(&matrix_mdev->vdev);
+ ret = vfio_register_emulated_iommu_dev(&matrix_mdev->vdev);
if (ret)
goto err_list;
dev_set_drvdata(&mdev->dev, matrix_mdev);
diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c
index 06281a0a0552..de2423c72b02 100644
--- a/drivers/s390/net/ctcm_fsms.c
+++ b/drivers/s390/net/ctcm_fsms.c
@@ -182,7 +182,7 @@ static void ctcmpc_chx_attnbusy(fsm_instance *, int, void *);
static void ctcmpc_chx_resend(fsm_instance *, int, void *);
static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg);
-/**
+/*
* Check return code of a preceding ccw_device call, halt_IO etc...
*
* ch : The channel, the error belongs to.
@@ -223,7 +223,7 @@ void ctcm_purge_skb_queue(struct sk_buff_head *q)
}
}
-/**
+/*
* NOP action for statemachines
*/
static void ctcm_action_nop(fsm_instance *fi, int event, void *arg)
@@ -234,7 +234,7 @@ static void ctcm_action_nop(fsm_instance *fi, int event, void *arg)
* Actions for channel - statemachines.
*/
-/**
+/*
* Normal data has been send. Free the corresponding
* skb (it's in io_queue), reset dev->tbusy and
* revert to idle state.
@@ -322,7 +322,7 @@ static void chx_txdone(fsm_instance *fi, int event, void *arg)
ctcm_clear_busy_do(dev);
}
-/**
+/*
* Initial data is sent.
* Notify device statemachine that we are up and
* running.
@@ -344,7 +344,7 @@ void ctcm_chx_txidle(fsm_instance *fi, int event, void *arg)
fsm_event(priv->fsm, DEV_EVENT_TXUP, ch->netdev);
}
-/**
+/*
* Got normal data, check for sanity, queue it up, allocate new buffer
* trigger bottom half, and initiate next read.
*
@@ -421,7 +421,7 @@ static void chx_rx(fsm_instance *fi, int event, void *arg)
ctcm_ccw_check_rc(ch, rc, "normal RX");
}
-/**
+/*
* Initialize connection by sending a __u16 of value 0.
*
* fi An instance of a channel statemachine.
@@ -497,7 +497,7 @@ static void chx_firstio(fsm_instance *fi, int event, void *arg)
}
}
-/**
+/*
* Got initial data, check it. If OK,
* notify device statemachine that we are up and
* running.
@@ -538,7 +538,7 @@ static void chx_rxidle(fsm_instance *fi, int event, void *arg)
}
}
-/**
+/*
* Set channel into extended mode.
*
* fi An instance of a channel statemachine.
@@ -578,7 +578,7 @@ static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg)
ch->retry = 0;
}
-/**
+/*
* Setup channel.
*
* fi An instance of a channel statemachine.
@@ -641,7 +641,7 @@ static void ctcm_chx_start(fsm_instance *fi, int event, void *arg)
}
}
-/**
+/*
* Shutdown a channel.
*
* fi An instance of a channel statemachine.
@@ -682,7 +682,7 @@ static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg)
}
}
-/**
+/*
* Cleanup helper for chx_fail and chx_stopped
* cleanup channels queue and notify interface statemachine.
*
@@ -728,7 +728,7 @@ static void ctcm_chx_cleanup(fsm_instance *fi, int state,
}
}
-/**
+/*
* A channel has successfully been halted.
* Cleanup it's queue and notify interface statemachine.
*
@@ -741,7 +741,7 @@ static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg)
ctcm_chx_cleanup(fi, CTC_STATE_STOPPED, arg);
}
-/**
+/*
* A stop command from device statemachine arrived and we are in
* not operational mode. Set state to stopped.
*
@@ -754,7 +754,7 @@ static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg)
fsm_newstate(fi, CTC_STATE_STOPPED);
}
-/**
+/*
* A machine check for no path, not operational status or gone device has
* happened.
* Cleanup queue and notify interface statemachine.
@@ -768,7 +768,7 @@ static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg)
ctcm_chx_cleanup(fi, CTC_STATE_NOTOP, arg);
}
-/**
+/*
* Handle error during setup of channel.
*
* fi An instance of a channel statemachine.
@@ -817,7 +817,7 @@ static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg)
}
}
-/**
+/*
* Restart a channel after an error.
*
* fi An instance of a channel statemachine.
@@ -858,7 +858,7 @@ static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg)
}
}
-/**
+/*
* Handle error during RX initial handshake (exchange of
* 0-length block header)
*
@@ -893,7 +893,7 @@ static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg)
}
}
-/**
+/*
* Notify device statemachine if we gave up initialization
* of RX channel.
*
@@ -914,7 +914,7 @@ static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg)
fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
}
-/**
+/*
* Handle RX Unit check remote reset (remote disconnected)
*
* fi An instance of a channel statemachine.
@@ -946,7 +946,7 @@ static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg)
ccw_device_halt(ch2->cdev, 0);
}
-/**
+/*
* Handle error during TX channel initialization.
*
* fi An instance of a channel statemachine.
@@ -978,7 +978,7 @@ static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg)
}
}
-/**
+/*
* Handle TX timeout by retrying operation.
*
* fi An instance of a channel statemachine.
@@ -1050,7 +1050,7 @@ done:
return;
}
-/**
+/*
* Handle fatal errors during an I/O command.
*
* fi An instance of a channel statemachine.
@@ -1198,7 +1198,7 @@ int ch_fsm_len = ARRAY_SIZE(ch_fsm);
* Actions for mpc channel statemachine.
*/
-/**
+/*
* Normal data has been send. Free the corresponding
* skb (it's in io_queue), reset dev->tbusy and
* revert to idle state.
@@ -1361,7 +1361,7 @@ done:
return;
}
-/**
+/*
* Got normal data, check for sanity, queue it up, allocate new buffer
* trigger bottom half, and initiate next read.
*
@@ -1464,7 +1464,7 @@ again:
}
-/**
+/*
* Initialize connection by sending a __u16 of value 0.
*
* fi An instance of a channel statemachine.
@@ -1516,7 +1516,7 @@ done:
return;
}
-/**
+/*
* Got initial data, check it. If OK,
* notify device statemachine that we are up and
* running.
@@ -2043,7 +2043,7 @@ int mpc_ch_fsm_len = ARRAY_SIZE(ctcmpc_ch_fsm);
* Actions for interface - statemachine.
*/
-/**
+/*
* Startup channels by sending CTC_EVENT_START to each channel.
*
* fi An instance of an interface statemachine.
@@ -2068,7 +2068,7 @@ static void dev_action_start(fsm_instance *fi, int event, void *arg)
}
}
-/**
+/*
* Shutdown channels by sending CTC_EVENT_STOP to each channel.
*
* fi An instance of an interface statemachine.
@@ -2122,7 +2122,7 @@ static void dev_action_restart(fsm_instance *fi, int event, void *arg)
DEV_EVENT_START, dev);
}
-/**
+/*
* Called from channel statemachine
* when a channel is up and running.
*
@@ -2183,7 +2183,7 @@ static void dev_action_chup(fsm_instance *fi, int event, void *arg)
}
}
-/**
+/*
* Called from device statemachine
* when a channel has been shutdown.
*
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index fd705429708e..5ea7eeb07002 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -55,7 +55,7 @@
/* Some common global variables */
-/**
+/*
* The root device for ctcm group devices
*/
static struct device *ctcm_root_dev;
@@ -65,7 +65,7 @@ static struct device *ctcm_root_dev;
*/
struct channel *channels;
-/**
+/*
* Unpack a just received skb and hand it over to
* upper layers.
*
@@ -180,7 +180,7 @@ void ctcm_unpack_skb(struct channel *ch, struct sk_buff *pskb)
}
}
-/**
+/*
* Release a specific channel in the channel list.
*
* ch Pointer to channel struct to be released.
@@ -192,7 +192,7 @@ static void channel_free(struct channel *ch)
fsm_newstate(ch->fsm, CTC_STATE_IDLE);
}
-/**
+/*
* Remove a specific channel in the channel list.
*
* ch Pointer to channel struct to be released.
@@ -240,7 +240,7 @@ static void channel_remove(struct channel *ch)
chid, ok ? "OK" : "failed");
}
-/**
+/*
* Get a specific channel from the channel list.
*
* type Type of channel we are interested in.
@@ -300,7 +300,7 @@ static long ctcm_check_irb_error(struct ccw_device *cdev, struct irb *irb)
}
-/**
+/*
* Check sense of a unit check.
*
* ch The channel, the sense code belongs to.
@@ -414,7 +414,7 @@ int ctcm_ch_alloc_buffer(struct channel *ch)
* Interface API for upper network layers
*/
-/**
+/*
* Open an interface.
* Called from generic network layer when ifconfig up is run.
*
@@ -432,7 +432,7 @@ int ctcm_open(struct net_device *dev)
return 0;
}
-/**
+/*
* Close an interface.
* Called from generic network layer when ifconfig down is run.
*
@@ -451,7 +451,7 @@ int ctcm_close(struct net_device *dev)
}
-/**
+/*
* Transmit a packet.
* This is a helper function for ctcm_tx().
*
@@ -822,7 +822,7 @@ done:
return rc;
}
-/**
+/*
* Start transmission of a packet.
* Called from generic network device layer.
*
@@ -975,7 +975,7 @@ done:
}
-/**
+/*
* Sets MTU of an interface.
*
* dev Pointer to interface struct.
@@ -1007,7 +1007,7 @@ static int ctcm_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
-/**
+/*
* Returns interface statistics of a device.
*
* dev Pointer to interface struct.
@@ -1144,7 +1144,7 @@ static struct net_device *ctcm_init_netdevice(struct ctcm_priv *priv)
return dev;
}
-/**
+/*
* Main IRQ handler.
*
* cdev The ccw_device the interrupt is for.
@@ -1257,7 +1257,7 @@ static const struct device_type ctcm_devtype = {
.groups = ctcm_attr_groups,
};
-/**
+/*
* Add ctcm specific attributes.
* Add ctcm private data.
*
@@ -1293,7 +1293,7 @@ static int ctcm_probe_device(struct ccwgroup_device *cgdev)
return 0;
}
-/**
+/*
* Add a new channel to the list of channels.
* Keeps the channel list sorted.
*
@@ -1343,7 +1343,7 @@ static int add_channel(struct ccw_device *cdev, enum ctcm_channel_types type,
snprintf(ch->id, CTCM_ID_SIZE, "ch-%s", dev_name(&cdev->dev));
ch->type = type;
- /**
+ /*
* "static" ccws are used in the following way:
*
* ccw[0..2] (Channel program for generic I/O):
@@ -1471,7 +1471,7 @@ static enum ctcm_channel_types get_channel_type(struct ccw_device_id *id)
return type;
}
-/**
+/*
*
* Setup an interface.
*
@@ -1595,7 +1595,7 @@ out_err_result:
return result;
}
-/**
+/*
* Shutdown an interface.
*
* cgdev Device to be shut down.
@@ -1738,7 +1738,7 @@ static void print_banner(void)
pr_info("CTCM driver initialized\n");
}
-/**
+/*
* Initialize module.
* This is called just after the module is loaded.
*
diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c
index f0436f555c62..88abfb5e8045 100644
--- a/drivers/s390/net/ctcm_mpc.c
+++ b/drivers/s390/net/ctcm_mpc.c
@@ -1016,7 +1016,7 @@ done:
CTCM_PR_DEBUG("exit %s: ch=0x%p id=%s\n", __func__, ch, ch->id);
}
-/**
+/*
* Unpack a just received skb and hand it over to
* upper layers.
* special MPC version of unpack_skb.
@@ -1211,7 +1211,7 @@ done:
__func__, dev->name, ch, ch->id);
}
-/**
+/*
* tasklet helper for mpc's skb unpacking.
*
* ch The channel to work on.
@@ -1320,7 +1320,7 @@ struct mpc_group *ctcmpc_init_mpc_group(struct ctcm_priv *priv)
* CTCM_PROTO_MPC only
*/
-/**
+/*
* NOP action for statemachines
*/
static void mpc_action_nop(fsm_instance *fi, int event, void *arg)
@@ -1426,7 +1426,7 @@ static void mpc_action_go_inop(fsm_instance *fi, int event, void *arg)
}
}
-/**
+/*
* Handle mpc group action timeout.
* MPC Group Station FSM action
* CTCM_PROTO_MPC only
diff --git a/drivers/s390/net/fsm.c b/drivers/s390/net/fsm.c
index eb07862bd36a..98c4864932d2 100644
--- a/drivers/s390/net/fsm.c
+++ b/drivers/s390/net/fsm.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
-/**
+/*
* A generic FSM based on fsm used in isdn4linux
*
*/
diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c
index 26cc943d2034..5f7e28de8b15 100644
--- a/drivers/s390/net/ism_drv.c
+++ b/drivers/s390/net/ism_drv.c
@@ -555,7 +555,7 @@ static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret)
goto err_disable;
- ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
if (ret)
goto err_resource;
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index 440219bcaa2b..2a6479740600 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -40,18 +40,18 @@
#error Cannot compile lcs.c without some net devices switched on.
#endif
-/**
+/*
* initialization string for output
*/
static char version[] __initdata = "LCS driver";
-/**
+/*
* the root device for lcs group devices
*/
static struct device *lcs_root_dev;
-/**
+/*
* Some prototypes.
*/
static void lcs_tasklet(unsigned long);
@@ -62,14 +62,14 @@ static int lcs_send_delipm(struct lcs_card *, struct lcs_ipm_list *);
#endif /* CONFIG_IP_MULTICAST */
static int lcs_recovery(void *ptr);
-/**
+/*
* Debug Facility Stuff
*/
static char debug_buffer[255];
static debug_info_t *lcs_dbf_setup;
static debug_info_t *lcs_dbf_trace;
-/**
+/*
* LCS Debug Facility functions
*/
static void
@@ -96,7 +96,7 @@ lcs_register_debug_facility(void)
return 0;
}
-/**
+/*
* Allocate io buffers.
*/
static int
@@ -123,7 +123,7 @@ lcs_alloc_channel(struct lcs_channel *channel)
return 0;
}
-/**
+/*
* Free io buffers.
*/
static void
@@ -151,7 +151,7 @@ lcs_cleanup_channel(struct lcs_channel *channel)
lcs_free_channel(channel);
}
-/**
+/*
* LCS free memory for card and channels.
*/
static void
@@ -162,7 +162,7 @@ lcs_free_card(struct lcs_card *card)
kfree(card);
}
-/**
+/*
* LCS alloc memory for card and channels
*/
static struct lcs_card *
@@ -402,7 +402,7 @@ lcs_do_start_thread(struct lcs_card *card, unsigned long thread)
return rc;
}
-/**
+/*
* Initialize channels,card and state machines.
*/
static void
@@ -451,7 +451,8 @@ static void lcs_clear_multicast_list(struct lcs_card *card)
spin_unlock_irqrestore(&card->ipm_lock, flags);
#endif
}
-/**
+
+/*
* Cleanup channels,card and state machines.
*/
static void
@@ -468,7 +469,7 @@ lcs_cleanup_card(struct lcs_card *card)
lcs_cleanup_channel(&card->read);
}
-/**
+/*
* Start channel.
*/
static int
@@ -517,7 +518,7 @@ lcs_clear_channel(struct lcs_channel *channel)
}
-/**
+/*
* Stop channel.
*/
static int
@@ -545,7 +546,7 @@ lcs_stop_channel(struct lcs_channel *channel)
return 0;
}
-/**
+/*
* start read and write channel
*/
static int
@@ -565,7 +566,7 @@ lcs_start_channels(struct lcs_card *card)
return rc;
}
-/**
+/*
* stop read and write channel
*/
static int
@@ -577,7 +578,7 @@ lcs_stop_channels(struct lcs_card *card)
return 0;
}
-/**
+/*
* Get empty buffer.
*/
static struct lcs_buffer *
@@ -610,7 +611,7 @@ lcs_get_buffer(struct lcs_channel *channel)
return buffer;
}
-/**
+/*
* Resume channel program if the channel is suspended.
*/
static int
@@ -636,7 +637,7 @@ __lcs_resume_channel(struct lcs_channel *channel)
}
-/**
+/*
* Make a buffer ready for processing.
*/
static void __lcs_ready_buffer_bits(struct lcs_channel *channel, int index)
@@ -678,7 +679,7 @@ lcs_ready_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
return rc;
}
-/**
+/*
* Mark the buffer as processed. Take care of the suspend bit
* of the previous buffer. This function is called from
* interrupt context, so the lock must not be taken.
@@ -712,7 +713,7 @@ __lcs_processed_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
return __lcs_resume_channel(channel);
}
-/**
+/*
* Put a processed buffer back to state empty.
*/
static void
@@ -728,7 +729,7 @@ lcs_release_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer)
spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags);
}
-/**
+/*
* Get buffer for a lan command.
*/
static struct lcs_buffer *
@@ -785,7 +786,7 @@ lcs_alloc_reply(struct lcs_cmd *cmd)
return reply;
}
-/**
+/*
* Notifier function for lancmd replies. Called from read irq.
*/
static void
@@ -813,7 +814,7 @@ lcs_notify_lancmd_waiters(struct lcs_card *card, struct lcs_cmd *cmd)
spin_unlock(&card->lock);
}
-/**
+/*
* Emit buffer of a lan command.
*/
static void
@@ -877,7 +878,7 @@ lcs_send_lancmd(struct lcs_card *card, struct lcs_buffer *buffer,
return rc ? -EIO : 0;
}
-/**
+/*
* LCS startup command
*/
static int
@@ -895,7 +896,7 @@ lcs_send_startup(struct lcs_card *card, __u8 initiator)
return lcs_send_lancmd(card, buffer, NULL);
}
-/**
+/*
* LCS shutdown command
*/
static int
@@ -912,7 +913,7 @@ lcs_send_shutdown(struct lcs_card *card)
return lcs_send_lancmd(card, buffer, NULL);
}
-/**
+/*
* LCS lanstat command
*/
static void
@@ -939,7 +940,7 @@ lcs_send_lanstat(struct lcs_card *card)
return lcs_send_lancmd(card, buffer, __lcs_lanstat_cb);
}
-/**
+/*
* send stoplan command
*/
static int
@@ -958,7 +959,7 @@ lcs_send_stoplan(struct lcs_card *card, __u8 initiator)
return lcs_send_lancmd(card, buffer, NULL);
}
-/**
+/*
* send startlan command
*/
static void
@@ -986,7 +987,7 @@ lcs_send_startlan(struct lcs_card *card, __u8 initiator)
}
#ifdef CONFIG_IP_MULTICAST
-/**
+/*
* send setipm command (Multicast)
*/
static int
@@ -1010,7 +1011,7 @@ lcs_send_setipm(struct lcs_card *card,struct lcs_ipm_list *ipm_list)
return lcs_send_lancmd(card, buffer, NULL);
}
-/**
+/*
* send delipm command (Multicast)
*/
static int
@@ -1034,7 +1035,7 @@ lcs_send_delipm(struct lcs_card *card,struct lcs_ipm_list *ipm_list)
return lcs_send_lancmd(card, buffer, NULL);
}
-/**
+/*
* check if multicast is supported by LCS
*/
static void
@@ -1074,7 +1075,7 @@ lcs_check_multicast_support(struct lcs_card *card)
return -EOPNOTSUPP;
}
-/**
+/*
* set or del multicast address on LCS card
*/
static void
@@ -1129,7 +1130,7 @@ list_modified:
spin_unlock_irqrestore(&card->ipm_lock, flags);
}
-/**
+/*
* get mac address for the relevant Multicast address
*/
static void
@@ -1139,7 +1140,7 @@ lcs_get_mac_for_ipm(__be32 ipm, char *mac, struct net_device *dev)
ip_eth_mc_map(ipm, mac);
}
-/**
+/*
* function called by net device to handle multicast address relevant things
*/
static void lcs_remove_mc_addresses(struct lcs_card *card,
@@ -1260,7 +1261,7 @@ out:
}
#endif /* CONFIG_IP_MULTICAST */
-/**
+/*
* function called by net device to
* handle multicast address relevant things
*/
@@ -1355,7 +1356,7 @@ lcs_schedule_recovery(struct lcs_card *card)
schedule_work(&card->kernel_thread_starter);
}
-/**
+/*
* IRQ Handler for LCS channels
*/
static void
@@ -1439,7 +1440,7 @@ lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
tasklet_schedule(&channel->irq_tasklet);
}
-/**
+/*
* Tasklet for IRQ handler
*/
static void
@@ -1476,7 +1477,7 @@ lcs_tasklet(unsigned long data)
wake_up(&channel->wait_q);
}
-/**
+/*
* Finish current tx buffer and make it ready for transmit.
*/
static void
@@ -1490,7 +1491,7 @@ __lcs_emit_txbuffer(struct lcs_card *card)
card->tx_emitted++;
}
-/**
+/*
* Callback for finished tx buffers.
*/
static void
@@ -1515,7 +1516,7 @@ lcs_txbuffer_cb(struct lcs_channel *channel, struct lcs_buffer *buffer)
spin_unlock(&card->lock);
}
-/**
+/*
* Packet transmit function called by network stack
*/
static int
@@ -1593,7 +1594,7 @@ lcs_start_xmit(struct sk_buff *skb, struct net_device *dev)
return rc;
}
-/**
+/*
* send startlan and lanstat command to make LCS device ready
*/
static int
@@ -1648,7 +1649,7 @@ lcs_startlan(struct lcs_card *card)
return rc;
}
-/**
+/*
* LCS detect function
* setup channels and make them I/O ready
*/
@@ -1680,7 +1681,7 @@ lcs_detect(struct lcs_card *card)
return rc;
}
-/**
+/*
* LCS Stop card
*/
static int
@@ -1705,7 +1706,7 @@ lcs_stopcard(struct lcs_card *card)
return rc;
}
-/**
+/*
* Kernel Thread helper functions for LGW initiated commands
*/
static void
@@ -1721,7 +1722,7 @@ lcs_start_kernel_thread(struct work_struct *work)
#endif
}
-/**
+/*
* Process control frames.
*/
static void
@@ -1748,7 +1749,7 @@ lcs_get_control(struct lcs_card *card, struct lcs_cmd *cmd)
lcs_notify_lancmd_waiters(card, cmd);
}
-/**
+/*
* Unpack network packet.
*/
static void
@@ -1779,7 +1780,7 @@ lcs_get_skb(struct lcs_card *card, char *skb_data, unsigned int skb_len)
netif_rx(skb);
}
-/**
+/*
* LCS main routine to get packets and lancmd replies from the buffers
*/
static void
@@ -1829,7 +1830,7 @@ lcs_get_frames_cb(struct lcs_channel *channel, struct lcs_buffer *buffer)
lcs_ready_buffer(&card->read, buffer);
}
-/**
+/*
* get network statistics for ifconfig and other user programs
*/
static struct net_device_stats *
@@ -1842,7 +1843,7 @@ lcs_getstats(struct net_device *dev)
return &card->stats;
}
-/**
+/*
* stop lcs device
* This function will be called by user doing ifconfig xxx down
*/
@@ -1866,7 +1867,7 @@ lcs_stop_device(struct net_device *dev)
return rc;
}
-/**
+/*
* start lcs device and make it runnable
* This function will be called by user doing ifconfig xxx up
*/
@@ -1892,7 +1893,7 @@ lcs_open_device(struct net_device *dev)
return rc;
}
-/**
+/*
* show function for portno called by cat or similar things
*/
static ssize_t
@@ -1908,7 +1909,7 @@ lcs_portno_show (struct device *dev, struct device_attribute *attr, char *buf)
return sprintf(buf, "%d\n", card->portno);
}
-/**
+/*
* store the value which is piped to file portno
*/
static ssize_t
@@ -2033,7 +2034,7 @@ static const struct device_type lcs_devtype = {
.groups = lcs_attr_groups,
};
-/**
+/*
* lcs_probe_device is called on establishing a new ccwgroup_device.
*/
static int
@@ -2077,7 +2078,7 @@ lcs_register_netdev(struct ccwgroup_device *ccwgdev)
return register_netdev(card->dev);
}
-/**
+/*
* lcs_new_device will be called by setting the group device online.
*/
static const struct net_device_ops lcs_netdev_ops = {
@@ -2161,7 +2162,7 @@ lcs_new_device(struct ccwgroup_device *ccwgdev)
card->dev->ml_priv = card;
card->dev->netdev_ops = &lcs_netdev_ops;
card->dev->dev_port = card->portno;
- memcpy(card->dev->dev_addr, card->mac, LCS_MAC_LENGTH);
+ eth_hw_addr_set(card->dev, card->mac);
#ifdef CONFIG_IP_MULTICAST
if (!lcs_check_multicast_support(card))
card->dev->netdev_ops = &lcs_mc_netdev_ops;
@@ -2199,7 +2200,7 @@ out_err:
return -ENODEV;
}
-/**
+/*
* lcs_shutdown_device, called when setting the group device offline.
*/
static int
@@ -2240,7 +2241,7 @@ lcs_shutdown_device(struct ccwgroup_device *ccwgdev)
return __lcs_shutdown_device(ccwgdev, 0);
}
-/**
+/*
* drive lcs recovery after startup and startlan initiated by Lan Gateway
*/
static int
@@ -2271,7 +2272,7 @@ lcs_recovery(void *ptr)
return 0;
}
-/**
+/*
* lcs_remove_device, free buffers and card
*/
static void
@@ -2315,7 +2316,7 @@ static struct ccw_driver lcs_ccw_driver = {
.int_class = IRQIO_LCS,
};
-/**
+/*
* LCS ccwgroup driver registration
*/
static struct ccwgroup_driver lcs_group_driver = {
@@ -2351,7 +2352,7 @@ static const struct attribute_group *lcs_drv_attr_groups[] = {
NULL,
};
-/**
+/*
* LCS Module/Kernel initialization function
*/
static int
@@ -2389,7 +2390,7 @@ out_err:
}
-/**
+/*
* LCS module cleanup function
*/
static void
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 5a0c2f07a3a2..981e7b1c6b96 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -58,7 +58,7 @@ MODULE_AUTHOR
("(C) 2001 IBM Corporation by Fritz Elfert (felfert@millenux.com)");
MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver");
-/**
+/*
* Debug Facility stuff
*/
#define IUCV_DBF_SETUP_NAME "iucv_setup"
@@ -107,7 +107,7 @@ DECLARE_PER_CPU(char[256], iucv_dbf_txt_buf);
debug_sprintf_event(iucv_dbf_trace, level, text ); \
} while (0)
-/**
+/*
* some more debug stuff
*/
#define PRINTK_HEADER " iucv: " /* for debugging */
@@ -118,7 +118,7 @@ static struct device_driver netiucv_driver = {
.bus = &iucv_bus,
};
-/**
+/*
* Per connection profiling data
*/
struct connection_profile {
@@ -133,7 +133,7 @@ struct connection_profile {
unsigned long tx_max_pending;
};
-/**
+/*
* Representation of one iucv connection
*/
struct iucv_connection {
@@ -154,13 +154,13 @@ struct iucv_connection {
char userdata[17];
};
-/**
+/*
* Linked list of all connection structs.
*/
static LIST_HEAD(iucv_connection_list);
static DEFINE_RWLOCK(iucv_connection_rwlock);
-/**
+/*
* Representation of event-data for the
* connection state machine.
*/
@@ -169,7 +169,7 @@ struct iucv_event {
void *data;
};
-/**
+/*
* Private part of the network device structure
*/
struct netiucv_priv {
@@ -180,7 +180,7 @@ struct netiucv_priv {
struct device *dev;
};
-/**
+/*
* Link level header for a packet.
*/
struct ll_header {
@@ -195,7 +195,7 @@ struct ll_header {
#define NETIUCV_QUEUELEN_DEFAULT 50
#define NETIUCV_TIMEOUT_5SEC 5000
-/**
+/*
* Compatibility macros for busy handling
* of network devices.
*/
@@ -223,7 +223,7 @@ static u8 iucvMagic_ebcdic[16] = {
0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40
};
-/**
+/*
* Convert an iucv userId to its printable
* form (strip whitespace at end).
*
@@ -262,7 +262,7 @@ static char *netiucv_printuser(struct iucv_connection *conn)
return netiucv_printname(conn->userid, 8);
}
-/**
+/*
* States of the interface statemachine.
*/
enum dev_states {
@@ -270,7 +270,7 @@ enum dev_states {
DEV_STATE_STARTWAIT,
DEV_STATE_STOPWAIT,
DEV_STATE_RUNNING,
- /**
+ /*
* MUST be always the last element!!
*/
NR_DEV_STATES
@@ -283,7 +283,7 @@ static const char *dev_state_names[] = {
"Running",
};
-/**
+/*
* Events of the interface statemachine.
*/
enum dev_events {
@@ -291,7 +291,7 @@ enum dev_events {
DEV_EVENT_STOP,
DEV_EVENT_CONUP,
DEV_EVENT_CONDOWN,
- /**
+ /*
* MUST be always the last element!!
*/
NR_DEV_EVENTS
@@ -304,11 +304,11 @@ static const char *dev_event_names[] = {
"Connection down",
};
-/**
+/*
* Events of the connection statemachine
*/
enum conn_events {
- /**
+ /*
* Events, representing callbacks from
* lowlevel iucv layer)
*/
@@ -320,23 +320,23 @@ enum conn_events {
CONN_EVENT_RX,
CONN_EVENT_TXDONE,
- /**
+ /*
* Events, representing errors return codes from
* calls to lowlevel iucv layer
*/
- /**
+ /*
* Event, representing timer expiry.
*/
CONN_EVENT_TIMER,
- /**
+ /*
* Events, representing commands from upper levels.
*/
CONN_EVENT_START,
CONN_EVENT_STOP,
- /**
+ /*
* MUST be always the last element!!
*/
NR_CONN_EVENTS,
@@ -357,55 +357,55 @@ static const char *conn_event_names[] = {
"Stop",
};
-/**
+/*
* States of the connection statemachine.
*/
enum conn_states {
- /**
+ /*
* Connection not assigned to any device,
* initial state, invalid
*/
CONN_STATE_INVALID,
- /**
+ /*
* Userid assigned but not operating
*/
CONN_STATE_STOPPED,
- /**
+ /*
* Connection registered,
* no connection request sent yet,
* no connection request received
*/
CONN_STATE_STARTWAIT,
- /**
+ /*
* Connection registered and connection request sent,
* no acknowledge and no connection request received yet.
*/
CONN_STATE_SETUPWAIT,
- /**
+ /*
* Connection up and running idle
*/
CONN_STATE_IDLE,
- /**
+ /*
* Data sent, awaiting CONN_EVENT_TXDONE
*/
CONN_STATE_TX,
- /**
+ /*
* Error during registration.
*/
CONN_STATE_REGERR,
- /**
+ /*
* Error during registration.
*/
CONN_STATE_CONNERR,
- /**
+ /*
* MUST be always the last element!!
*/
NR_CONN_STATES,
@@ -424,7 +424,7 @@ static const char *conn_state_names[] = {
};
-/**
+/*
* Debug Facility Stuff
*/
static debug_info_t *iucv_dbf_setup = NULL;
@@ -556,7 +556,7 @@ static void netiucv_callback_connres(struct iucv_path *path, u8 *ipuser)
fsm_event(conn->fsm, CONN_EVENT_CONN_RES, conn);
}
-/**
+/*
* NOP action for statemachines
*/
static void netiucv_action_nop(fsm_instance *fi, int event, void *arg)
@@ -567,7 +567,7 @@ static void netiucv_action_nop(fsm_instance *fi, int event, void *arg)
* Actions of the connection statemachine
*/
-/**
+/*
* netiucv_unpack_skb
* @conn: The connection where this skb has been received.
* @pskb: The received skb.
@@ -993,7 +993,7 @@ static const int CONN_FSM_LEN = sizeof(conn_fsm) / sizeof(fsm_node);
* Actions for interface - statemachine.
*/
-/**
+/*
* dev_action_start
* @fi: An instance of an interface statemachine.
* @event: The event, just happened.
@@ -1012,7 +1012,7 @@ static void dev_action_start(fsm_instance *fi, int event, void *arg)
fsm_event(privptr->conn->fsm, CONN_EVENT_START, privptr->conn);
}
-/**
+/*
* Shutdown connection by sending CONN_EVENT_STOP to it.
*
* @param fi An instance of an interface statemachine.
@@ -1034,7 +1034,7 @@ dev_action_stop(fsm_instance *fi, int event, void *arg)
fsm_event(privptr->conn->fsm, CONN_EVENT_STOP, &ev);
}
-/**
+/*
* Called from connection statemachine
* when a connection is up and running.
*
@@ -1067,7 +1067,7 @@ dev_action_connup(fsm_instance *fi, int event, void *arg)
}
}
-/**
+/*
* Called from connection statemachine
* when a connection has been shutdown.
*
@@ -1107,7 +1107,7 @@ static const fsm_node dev_fsm[] = {
static const int DEV_FSM_LEN = sizeof(dev_fsm) / sizeof(fsm_node);
-/**
+/*
* Transmit a packet.
* This is a helper function for netiucv_tx().
*
@@ -1144,7 +1144,7 @@ static int netiucv_transmit_skb(struct iucv_connection *conn,
spin_unlock_irqrestore(&conn->collect_lock, saveflags);
} else {
struct sk_buff *nskb = skb;
- /**
+ /*
* Copy the skb to a new allocated skb in lowmem only if the
* data is located above 2G in memory or tailroom is < 2.
*/
@@ -1164,7 +1164,7 @@ static int netiucv_transmit_skb(struct iucv_connection *conn,
}
copied = 1;
}
- /**
+ /*
* skb now is below 2G and has enough room. Add headers.
*/
header.next = nskb->len + NETIUCV_HDRLEN;
@@ -1194,7 +1194,7 @@ static int netiucv_transmit_skb(struct iucv_connection *conn,
if (copied)
dev_kfree_skb(nskb);
else {
- /**
+ /*
* Remove our headers. They get added
* again on retransmit.
*/
@@ -1217,7 +1217,7 @@ static int netiucv_transmit_skb(struct iucv_connection *conn,
* Interface API for upper network layers
*/
-/**
+/*
* Open an interface.
* Called from generic network layer when ifconfig up is run.
*
@@ -1233,7 +1233,7 @@ static int netiucv_open(struct net_device *dev)
return 0;
}
-/**
+/*
* Close an interface.
* Called from generic network layer when ifconfig down is run.
*
@@ -1249,7 +1249,7 @@ static int netiucv_close(struct net_device *dev)
return 0;
}
-/**
+/*
* Start transmission of a packet.
* Called from generic network device layer.
*
@@ -1266,7 +1266,7 @@ static int netiucv_tx(struct sk_buff *skb, struct net_device *dev)
int rc;
IUCV_DBF_TEXT(trace, 4, __func__);
- /**
+ /*
* Some sanity checks ...
*/
if (skb == NULL) {
@@ -1282,7 +1282,7 @@ static int netiucv_tx(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
- /**
+ /*
* If connection is not running, try to restart it
* and throw away packet.
*/
@@ -1304,7 +1304,7 @@ static int netiucv_tx(struct sk_buff *skb, struct net_device *dev)
return rc ? NETDEV_TX_BUSY : NETDEV_TX_OK;
}
-/**
+/*
* netiucv_stats
* @dev: Pointer to interface struct.
*
@@ -1745,7 +1745,7 @@ static void netiucv_unregister_device(struct device *dev)
device_unregister(dev);
}
-/**
+/*
* Allocate and initialize a new connection structure.
* Add it to the list of netiucv connections;
*/
@@ -1802,7 +1802,7 @@ out:
return NULL;
}
-/**
+/*
* Release a connection structure and remove it from the
* list of netiucv connections.
*/
@@ -1826,7 +1826,7 @@ static void netiucv_remove_connection(struct iucv_connection *conn)
kfree_skb(conn->tx_buff);
}
-/**
+/*
* Release everything of a net device.
*/
static void netiucv_free_netdevice(struct net_device *dev)
@@ -1848,7 +1848,7 @@ static void netiucv_free_netdevice(struct net_device *dev)
}
}
-/**
+/*
* Initialize a net device. (Called from kernel in alloc_netdev())
*/
static const struct net_device_ops netiucv_netdev_ops = {
@@ -1873,7 +1873,7 @@ static void netiucv_setup_netdevice(struct net_device *dev)
dev->netdev_ops = &netiucv_netdev_ops;
}
-/**
+/*
* Allocate and initialize everything of a net device.
*/
static struct net_device *netiucv_init_netdevice(char *username, char *userdata)
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index a5aa0bdc61d6..20dca4c0384a 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -545,7 +545,6 @@ static inline bool qeth_out_queue_is_empty(struct qeth_qdio_out_q *queue)
struct qeth_qdio_info {
atomic_t state;
/* input */
- int no_in_queues;
struct qeth_qdio_q *in_q;
struct qeth_qdio_q *c_q;
struct qeth_qdio_buffer_pool in_buf_pool;
@@ -771,8 +770,6 @@ struct qeth_discipline {
void (*remove) (struct ccwgroup_device *);
int (*set_online)(struct qeth_card *card, bool carrier_ok);
void (*set_offline)(struct qeth_card *card);
- int (*do_ioctl)(struct net_device *dev, struct ifreq *rq,
- void __user *data, int cmd);
int (*control_event_handler)(struct qeth_card *card,
struct qeth_ipa_cmd *cmd);
};
@@ -1087,6 +1084,7 @@ int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
int qeth_siocdevprivate(struct net_device *dev, struct ifreq *rq,
void __user *data, int cmd);
+__printf(3, 4)
void qeth_dbf_longtext(debug_info_t *id, int level, char *text, ...);
int qeth_configure_cq(struct qeth_card *, enum qeth_cq);
int qeth_hw_trap(struct qeth_card *, enum qeth_diags_trap_action);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index e9807d2996a9..26c55f67289f 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -59,6 +59,7 @@ EXPORT_SYMBOL_GPL(qeth_dbf);
static struct kmem_cache *qeth_core_header_cache;
static struct kmem_cache *qeth_qdio_outbuf_cache;
+static struct kmem_cache *qeth_qaob_cache;
static struct device *qeth_core_root_dev;
static struct dentry *qeth_debugfs_root;
@@ -354,8 +355,8 @@ static int qeth_cq_init(struct qeth_card *card)
qdio_reset_buffers(card->qdio.c_q->qdio_bufs,
QDIO_MAX_BUFFERS_PER_Q);
card->qdio.c_q->next_buf_to_init = 127;
- rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT,
- card->qdio.no_in_queues - 1, 0, 127, NULL);
+ rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 1, 0, 127,
+ NULL);
if (rc) {
QETH_CARD_TEXT_(card, 2, "1err%d", rc);
goto out;
@@ -375,21 +376,16 @@ static int qeth_alloc_cq(struct qeth_card *card)
dev_err(&card->gdev->dev, "Failed to create completion queue\n");
return -ENOMEM;
}
-
- card->qdio.no_in_queues = 2;
} else {
QETH_CARD_TEXT(card, 2, "nocq");
card->qdio.c_q = NULL;
- card->qdio.no_in_queues = 1;
}
- QETH_CARD_TEXT_(card, 2, "iqc%d", card->qdio.no_in_queues);
return 0;
}
static void qeth_free_cq(struct qeth_card *card)
{
if (card->qdio.c_q) {
- --card->qdio.no_in_queues;
qeth_free_qdio_queue(card->qdio.c_q);
card->qdio.c_q = NULL;
}
@@ -1338,7 +1334,7 @@ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
static void qeth_free_out_buf(struct qeth_qdio_out_buffer *buf)
{
if (buf->aob)
- qdio_release_aob(buf->aob);
+ kmem_cache_free(qeth_qaob_cache, buf->aob);
kmem_cache_free(qeth_qdio_outbuf_cache, buf);
}
@@ -1458,7 +1454,6 @@ static void qeth_init_qdio_info(struct qeth_card *card)
card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
/* inbound */
- card->qdio.no_in_queues = 1;
card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
if (IS_IQD(card))
card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_HSDEFAULT;
@@ -1930,9 +1925,9 @@ static struct qeth_cmd_buffer *qeth_mpc_alloc_cmd(struct qeth_card *card,
* @card: qeth_card structure pointer
* @iob: qeth_cmd_buffer pointer
* @reply_cb: callback function pointer
- * @cb_card: pointer to the qeth_card structure
- * @cb_reply: pointer to the qeth_reply structure
- * @cb_cmd: pointer to the original iob for non-IPA
+ * cb_card: pointer to the qeth_card structure
+ * cb_reply: pointer to the qeth_reply structure
+ * cb_cmd: pointer to the original iob for non-IPA
* commands, or to the qeth_ipa_cmd structure
* for the IPA commands.
* @reply_param: private pointer passed to the callback
@@ -2629,7 +2624,7 @@ static void qeth_free_qdio_queues(struct qeth_card *card)
qeth_free_cq(card);
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
if (card->qdio.in_q->bufs[j].rx_skb)
- dev_kfree_skb_any(card->qdio.in_q->bufs[j].rx_skb);
+ consume_skb(card->qdio.in_q->bufs[j].rx_skb);
}
qeth_free_qdio_queue(card->qdio.in_q);
card->qdio.in_q = NULL;
@@ -3039,7 +3034,7 @@ static int qeth_send_ipa_cmd_cb(struct qeth_card *card,
return (cmd->hdr.return_code) ? -EIO : 0;
}
-/**
+/*
* qeth_send_ipa_cmd() - send an IPA command
*
* See qeth_send_control_data() for explanation of the arguments.
@@ -3554,7 +3549,8 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
!qeth_iqd_is_mcast_queue(card, queue) &&
count == 1) {
if (!buf->aob)
- buf->aob = qdio_allocate_aob();
+ buf->aob = kmem_cache_zalloc(qeth_qaob_cache,
+ GFP_ATOMIC);
if (buf->aob) {
struct qeth_qaob_priv1 *priv;
@@ -3780,7 +3776,7 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
qeth_schedule_recovery(card);
}
-/**
+/*
* Note: Function assumes that we have 4 outbound queues.
*/
int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb)
@@ -3877,12 +3873,14 @@ static unsigned int qeth_count_elements(struct sk_buff *skb,
/**
* qeth_add_hw_header() - add a HW header to an skb.
+ * @queue: TX queue that the skb will be placed on.
* @skb: skb that the HW header should be added to.
* @hdr: double pointer to a qeth_hdr. When returning with >= 0,
* it contains a valid pointer to a qeth_hdr.
* @hdr_len: length of the HW header.
* @proto_len: length of protocol headers that need to be in same page as the
* HW header.
+ * @elements: returns the required number of buffer elements for this skb.
*
* Returns the pushed length. If the header can't be pushed on
* (eg. because it would cross a page boundary), it is allocated from
@@ -4375,7 +4373,7 @@ static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
!(adp_cmd->hdr.flags & QETH_SETADP_FLAGS_VIRTUAL_MAC))
return -EADDRNOTAVAIL;
- ether_addr_copy(card->dev->dev_addr, adp_cmd->data.change_addr.addr);
+ eth_hw_addr_set(card->dev, adp_cmd->data.change_addr.addr);
return 0;
}
@@ -5046,7 +5044,7 @@ int qeth_vm_request_mac(struct qeth_card *card)
QETH_CARD_TEXT(card, 2, "badmac");
QETH_CARD_HEX(card, 2, response->mac, ETH_ALEN);
} else {
- ether_addr_copy(card->dev->dev_addr, response->mac);
+ eth_hw_addr_set(card->dev, response->mac);
}
out:
@@ -5139,6 +5137,7 @@ static int qeth_qdio_establish(struct qeth_card *card)
struct qdio_buffer **in_sbal_ptrs[QETH_MAX_IN_QUEUES];
struct qeth_qib_parms *qib_parms = NULL;
struct qdio_initialize init_data;
+ unsigned int no_input_qs = 1;
unsigned int i;
int rc = 0;
@@ -5153,8 +5152,10 @@ static int qeth_qdio_establish(struct qeth_card *card)
}
in_sbal_ptrs[0] = card->qdio.in_q->qdio_bufs;
- if (card->options.cq == QETH_CQ_ENABLED)
+ if (card->options.cq == QETH_CQ_ENABLED) {
in_sbal_ptrs[1] = card->qdio.c_q->qdio_bufs;
+ no_input_qs++;
+ }
for (i = 0; i < card->qdio.no_out_queues; i++)
out_sbal_ptrs[i] = card->qdio.out_qs[i]->qdio_bufs;
@@ -5164,7 +5165,7 @@ static int qeth_qdio_establish(struct qeth_card *card)
QDIO_QETH_QFMT;
init_data.qib_param_field_format = 0;
init_data.qib_param_field = (void *)qib_parms;
- init_data.no_input_qs = card->qdio.no_in_queues;
+ init_data.no_input_qs = no_input_qs;
init_data.no_output_qs = card->qdio.no_out_queues;
init_data.input_handler = qeth_qdio_input_handler;
init_data.output_handler = qeth_qdio_output_handler;
@@ -5604,7 +5605,7 @@ static void qeth_receive_skb(struct qeth_card *card, struct sk_buff *skb,
if (uses_frags)
napi_free_frags(napi);
else
- dev_kfree_skb_any(skb);
+ kfree_skb(skb);
return;
}
@@ -5795,7 +5796,7 @@ walk_packet:
if (uses_frags)
napi_free_frags(napi);
else
- dev_kfree_skb_any(skb);
+ kfree_skb(skb);
QETH_CARD_STAT_INC(card,
rx_length_errors);
}
@@ -6600,10 +6601,7 @@ int qeth_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __user *d
rc = qeth_query_oat_command(card, data);
break;
default:
- if (card->discipline->do_ioctl)
- rc = card->discipline->do_ioctl(dev, rq, data, cmd);
- else
- rc = -EOPNOTSUPP;
+ rc = -EOPNOTSUPP;
}
if (rc)
QETH_CARD_TEXT_(card, 2, "ioce%x", rc);
@@ -7177,6 +7175,16 @@ static int __init qeth_core_init(void)
rc = -ENOMEM;
goto cqslab_err;
}
+
+ qeth_qaob_cache = kmem_cache_create("qeth_qaob",
+ sizeof(struct qaob),
+ sizeof(struct qaob),
+ 0, NULL);
+ if (!qeth_qaob_cache) {
+ rc = -ENOMEM;
+ goto qaob_err;
+ }
+
rc = ccw_driver_register(&qeth_ccw_driver);
if (rc)
goto ccw_err;
@@ -7189,6 +7197,8 @@ static int __init qeth_core_init(void)
ccwgroup_err:
ccw_driver_unregister(&qeth_ccw_driver);
ccw_err:
+ kmem_cache_destroy(qeth_qaob_cache);
+qaob_err:
kmem_cache_destroy(qeth_qdio_outbuf_cache);
cqslab_err:
kmem_cache_destroy(qeth_core_header_cache);
@@ -7207,6 +7217,7 @@ static void __exit qeth_core_exit(void)
qeth_clear_dbf_list();
ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver);
ccw_driver_unregister(&qeth_ccw_driver);
+ kmem_cache_destroy(qeth_qaob_cache);
kmem_cache_destroy(qeth_qdio_outbuf_cache);
kmem_cache_destroy(qeth_core_header_cache);
root_device_unregister(qeth_core_root_dev);
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index dc6c00768d91..0347fc184786 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -71,7 +71,7 @@ static int qeth_l2_send_setdelmac_cb(struct qeth_card *card,
return qeth_l2_setdelmac_makerc(card, cmd->hdr.return_code);
}
-static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac,
+static int qeth_l2_send_setdelmac(struct qeth_card *card, const __u8 *mac,
enum qeth_ipa_cmds ipacmd)
{
struct qeth_ipa_cmd *cmd;
@@ -88,7 +88,7 @@ static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac,
return qeth_send_ipa_cmd(card, iob, qeth_l2_send_setdelmac_cb, NULL);
}
-static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
+static int qeth_l2_send_setmac(struct qeth_card *card, const __u8 *mac)
{
int rc;
@@ -121,11 +121,11 @@ static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac)
QETH_CARD_TEXT(card, 2, "L2Wmac");
rc = qeth_l2_send_setdelmac(card, mac, cmd);
if (rc == -EADDRINUSE)
- QETH_DBF_MESSAGE(2, "MAC already registered on device %x\n",
- CARD_DEVID(card));
+ QETH_DBF_MESSAGE(2, "MAC address %012llx is already registered on device %x\n",
+ ether_addr_to_u64(mac), CARD_DEVID(card));
else if (rc)
- QETH_DBF_MESSAGE(2, "Failed to register MAC on device %x: %d\n",
- CARD_DEVID(card), rc);
+ QETH_DBF_MESSAGE(2, "Failed to register MAC address %012llx on device %x: %d\n",
+ ether_addr_to_u64(mac), CARD_DEVID(card), rc);
return rc;
}
@@ -138,8 +138,8 @@ static int qeth_l2_remove_mac(struct qeth_card *card, u8 *mac)
QETH_CARD_TEXT(card, 2, "L2Rmac");
rc = qeth_l2_send_setdelmac(card, mac, cmd);
if (rc)
- QETH_DBF_MESSAGE(2, "Failed to delete MAC on device %u: %d\n",
- CARD_DEVID(card), rc);
+ QETH_DBF_MESSAGE(2, "Failed to delete MAC address %012llx on device %x: %d\n",
+ ether_addr_to_u64(mac), CARD_DEVID(card), rc);
return rc;
}
@@ -377,7 +377,7 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
if (rc)
return rc;
ether_addr_copy(old_addr, dev->dev_addr);
- ether_addr_copy(dev->dev_addr, addr->sa_data);
+ eth_hw_addr_set(dev, addr->sa_data);
if (card->info.dev_addr_is_registered)
qeth_l2_remove_mac(card, old_addr);
@@ -661,13 +661,13 @@ static void qeth_l2_dev2br_fdb_notify(struct qeth_card *card, u8 code,
card->dev, &info.info, NULL);
QETH_CARD_TEXT(card, 4, "andelmac");
QETH_CARD_TEXT_(card, 4,
- "mc%012lx", ether_addr_to_u64(ntfy_mac));
+ "mc%012llx", ether_addr_to_u64(ntfy_mac));
} else {
call_switchdev_notifiers(SWITCHDEV_FDB_ADD_TO_BRIDGE,
card->dev, &info.info, NULL);
QETH_CARD_TEXT(card, 4, "anaddmac");
QETH_CARD_TEXT_(card, 4,
- "mc%012lx", ether_addr_to_u64(ntfy_mac));
+ "mc%012llx", ether_addr_to_u64(ntfy_mac));
}
}
@@ -765,8 +765,8 @@ static void qeth_l2_br2dev_worker(struct work_struct *work)
int err = 0;
kfree(br2dev_event_work);
- QETH_CARD_TEXT_(card, 4, "b2dw%04x", event);
- QETH_CARD_TEXT_(card, 4, "ma%012lx", ether_addr_to_u64(addr));
+ QETH_CARD_TEXT_(card, 4, "b2dw%04lx", event);
+ QETH_CARD_TEXT_(card, 4, "ma%012llx", ether_addr_to_u64(addr));
rcu_read_lock();
/* Verify preconditions are still valid: */
@@ -795,7 +795,7 @@ static void qeth_l2_br2dev_worker(struct work_struct *work)
if (err) {
QETH_CARD_TEXT(card, 2, "b2derris");
QETH_CARD_TEXT_(card, 2,
- "err%02x%03d", event,
+ "err%02lx%03d", event,
lowerdev->ifindex);
}
}
@@ -813,7 +813,7 @@ static void qeth_l2_br2dev_worker(struct work_struct *work)
break;
}
if (err)
- QETH_CARD_TEXT_(card, 2, "b2derr%02x", event);
+ QETH_CARD_TEXT_(card, 2, "b2derr%02lx", event);
}
unlock:
@@ -878,7 +878,7 @@ static int qeth_l2_switchdev_event(struct notifier_block *unused,
while (lowerdev) {
if (qeth_l2_must_learn(lowerdev, dstdev)) {
card = lowerdev->ml_priv;
- QETH_CARD_TEXT_(card, 4, "b2dqw%03x", event);
+ QETH_CARD_TEXT_(card, 4, "b2dqw%03lx", event);
rc = qeth_l2_br2dev_queue_work(brdev, lowerdev,
dstdev, event,
fdb_info->addr);
@@ -2430,7 +2430,6 @@ const struct qeth_discipline qeth_l2_discipline = {
.remove = qeth_l2_remove_device,
.set_online = qeth_l2_set_online,
.set_offline = qeth_l2_set_offline,
- .do_ioctl = NULL,
.control_event_handler = qeth_l2_control_event,
};
EXPORT_SYMBOL_GPL(qeth_l2_discipline);
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 6fd3e288f059..48a886f7af62 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -492,7 +492,7 @@ int qeth_l3_setrouting_v6(struct qeth_card *card)
* IP address takeover related functions
*/
-/**
+/*
* qeth_l3_update_ipato() - Update 'takeover' property, for all NORMAL IPs.
*
* Caller must hold ip_lock.
@@ -913,8 +913,7 @@ static int qeth_l3_iqd_read_initial_mac_cb(struct qeth_card *card,
if (!is_valid_ether_addr(cmd->data.create_destroy_addr.mac_addr))
return -EADDRNOTAVAIL;
- ether_addr_copy(card->dev->dev_addr,
- cmd->data.create_destroy_addr.mac_addr);
+ eth_hw_addr_set(card->dev, cmd->data.create_destroy_addr.mac_addr);
return 0;
}
@@ -1512,7 +1511,8 @@ static int qeth_l3_arp_flush_cache(struct qeth_card *card)
return rc;
}
-static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, void __user *data, int cmd)
+static int qeth_l3_ndo_siocdevprivate(struct net_device *dev, struct ifreq *rq,
+ void __user *data, int cmd)
{
struct qeth_card *card = dev->ml_priv;
struct qeth_arp_cache_entry arp_entry;
@@ -1553,7 +1553,7 @@ static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, void __use
rc = qeth_l3_arp_flush_cache(card);
break;
default:
- rc = -EOPNOTSUPP;
+ rc = qeth_siocdevprivate(dev, rq, data, cmd);
}
return rc;
}
@@ -1842,7 +1842,7 @@ static const struct net_device_ops qeth_l3_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = qeth_l3_set_rx_mode,
.ndo_eth_ioctl = qeth_do_ioctl,
- .ndo_siocdevprivate = qeth_siocdevprivate,
+ .ndo_siocdevprivate = qeth_l3_ndo_siocdevprivate,
.ndo_fix_features = qeth_fix_features,
.ndo_set_features = qeth_set_features,
.ndo_tx_timeout = qeth_tx_timeout,
@@ -1858,7 +1858,7 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = qeth_l3_set_rx_mode,
.ndo_eth_ioctl = qeth_do_ioctl,
- .ndo_siocdevprivate = qeth_siocdevprivate,
+ .ndo_siocdevprivate = qeth_l3_ndo_siocdevprivate,
.ndo_fix_features = qeth_fix_features,
.ndo_set_features = qeth_set_features,
.ndo_tx_timeout = qeth_tx_timeout,
@@ -2072,7 +2072,6 @@ const struct qeth_discipline qeth_l3_discipline = {
.remove = qeth_l3_remove_device,
.set_online = qeth_l3_set_online,
.set_offline = qeth_l3_set_offline,
- .do_ioctl = qeth_l3_do_ioctl,
.control_event_handler = qeth_l3_control_event,
};
EXPORT_SYMBOL_GPL(qeth_l3_discipline);
diff --git a/drivers/scsi/aic94xx/aic94xx_sds.c b/drivers/scsi/aic94xx/aic94xx_sds.c
index 46815e65f7a4..5def83c88f13 100644
--- a/drivers/scsi/aic94xx/aic94xx_sds.c
+++ b/drivers/scsi/aic94xx/aic94xx_sds.c
@@ -517,8 +517,10 @@ struct asd_ms_conn_map {
u8 num_nodes;
u8 usage_model_id;
u32 _resvd;
- struct asd_ms_conn_desc conn_desc[0];
- struct asd_ms_node_desc node_desc[];
+ union {
+ DECLARE_FLEX_ARRAY(struct asd_ms_conn_desc, conn_desc);
+ DECLARE_FLEX_ARRAY(struct asd_ms_node_desc, node_desc);
+ };
} __attribute__ ((packed));
struct asd_ctrla_phy_entry {
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 3ab669dc806f..27884f3106ab 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -3,6 +3,7 @@
* Copyright (c) 2017 Hisilicon Limited.
*/
+#include <linux/sched/clock.h>
#include "hisi_sas.h"
#define DRV_NAME "hisi_sas_v3_hw"
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
index 3f6f14f0cafb..24b72ee4246f 100644
--- a/drivers/scsi/hosts.c
+++ b/drivers/scsi/hosts.c
@@ -220,7 +220,8 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
goto fail;
}
- shost->cmd_per_lun = min_t(short, shost->cmd_per_lun,
+ /* Use min_t(int, ...) in case shost->can_queue exceeds SHRT_MAX */
+ shost->cmd_per_lun = min_t(int, shost->cmd_per_lun,
shost->can_queue);
error = scsi_init_sense_cache(shost);
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 1f1586ad48fe..01f79991bf4a 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -1696,6 +1696,7 @@ static int ibmvfc_send_event(struct ibmvfc_event *evt,
spin_lock_irqsave(&evt->queue->l_lock, flags);
list_add_tail(&evt->queue_list, &evt->queue->sent);
+ atomic_set(&evt->active, 1);
mb();
@@ -1710,6 +1711,7 @@ static int ibmvfc_send_event(struct ibmvfc_event *evt,
be64_to_cpu(crq_as_u64[1]));
if (rc) {
+ atomic_set(&evt->active, 0);
list_del(&evt->queue_list);
spin_unlock_irqrestore(&evt->queue->l_lock, flags);
del_timer(&evt->timer);
@@ -1737,7 +1739,6 @@ static int ibmvfc_send_event(struct ibmvfc_event *evt,
evt->done(evt);
} else {
- atomic_set(&evt->active, 1);
spin_unlock_irqrestore(&evt->queue->l_lock, flags);
ibmvfc_trc_start(evt);
}
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 50df7dd9cb91..ea8e01f49cba 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -1055,8 +1055,9 @@ static int ibmvscsi_queuecommand_lck(struct scsi_cmnd *cmnd,
return SCSI_MLQUEUE_HOST_BUSY;
/* Set up the actual SRP IU */
+ BUILD_BUG_ON(sizeof(evt_struct->iu.srp) != SRP_MAX_IU_LEN);
+ memset(&evt_struct->iu.srp, 0x00, sizeof(evt_struct->iu.srp));
srp_cmd = &evt_struct->iu.srp.cmd;
- memset(srp_cmd, 0x00, SRP_MAX_IU_LEN);
srp_cmd->opcode = SRP_CMD;
memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(srp_cmd->cdb));
int_to_scsilun(lun, &srp_cmd->lun);
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index befeb7c34290..337e6ed24821 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -22,6 +22,7 @@
*******************************************************************/
#include <scsi/scsi_host.h>
+#include <linux/hashtable.h>
#include <linux/ktime.h>
#include <linux/workqueue.h>
diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c
index 2197988333fe..3cae8803383b 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_os.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_os.c
@@ -3736,7 +3736,7 @@ mpi3mr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
shost->max_lun = -1;
shost->unique_id = mrioc->id;
- shost->max_channel = 1;
+ shost->max_channel = 0;
shost->max_id = 0xFFFFFFFF;
if (prot_mask >= 0)
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index d383d4a03436..ad1b6c2b37a7 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -5065,9 +5065,12 @@ _scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
if (scmd->prot_flags & SCSI_PROT_GUARD_CHECK)
eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
- if (scmd->prot_flags & SCSI_PROT_REF_CHECK) {
- eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
- MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG;
+ if (scmd->prot_flags & SCSI_PROT_REF_CHECK)
+ eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG;
+
+ if (scmd->prot_flags & SCSI_PROT_REF_INCREMENT) {
+ eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG;
+
mpi_request->CDB.EEDP32.PrimaryReferenceTag =
cpu_to_be32(scsi_prot_ref_tag(scmd));
}
diff --git a/drivers/scsi/qedf/drv_fcoe_fw_funcs.c b/drivers/scsi/qedf/drv_fcoe_fw_funcs.c
index 747af96dd15c..e8bc8d9e4583 100644
--- a/drivers/scsi/qedf/drv_fcoe_fw_funcs.c
+++ b/drivers/scsi/qedf/drv_fcoe_fw_funcs.c
@@ -22,9 +22,9 @@ int init_initiator_rw_fcoe_task(struct fcoe_task_params *task_params,
u32 task_retry_id,
u8 fcp_cmd_payload[32])
{
- struct e4_fcoe_task_context *ctx = task_params->context;
+ struct fcoe_task_context *ctx = task_params->context;
const u8 val_byte = ctx->ystorm_ag_context.byte0;
- struct e4_ustorm_fcoe_task_ag_ctx *u_ag_ctx;
+ struct ustorm_fcoe_task_ag_ctx *u_ag_ctx;
struct ystorm_fcoe_task_st_ctx *y_st_ctx;
struct tstorm_fcoe_task_st_ctx *t_st_ctx;
struct mstorm_fcoe_task_st_ctx *m_st_ctx;
@@ -115,9 +115,9 @@ int init_initiator_midpath_unsolicited_fcoe_task(
struct scsi_sgl_task_params *rx_sgl_task_params,
u8 fw_to_place_fc_header)
{
- struct e4_fcoe_task_context *ctx = task_params->context;
+ struct fcoe_task_context *ctx = task_params->context;
const u8 val_byte = ctx->ystorm_ag_context.byte0;
- struct e4_ustorm_fcoe_task_ag_ctx *u_ag_ctx;
+ struct ustorm_fcoe_task_ag_ctx *u_ag_ctx;
struct ystorm_fcoe_task_st_ctx *y_st_ctx;
struct tstorm_fcoe_task_st_ctx *t_st_ctx;
struct mstorm_fcoe_task_st_ctx *m_st_ctx;
diff --git a/drivers/scsi/qedf/drv_fcoe_fw_funcs.h b/drivers/scsi/qedf/drv_fcoe_fw_funcs.h
index 1ee31a5f063b..7125e484bf93 100644
--- a/drivers/scsi/qedf/drv_fcoe_fw_funcs.h
+++ b/drivers/scsi/qedf/drv_fcoe_fw_funcs.h
@@ -10,7 +10,7 @@
struct fcoe_task_params {
/* Output parameter [set/filled by the HSI function] */
- struct e4_fcoe_task_context *context;
+ struct fcoe_task_context *context;
/* Output parameter [set/filled by the HSI function] */
struct fcoe_wqe *sqe;
diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h
index ba94413fe2ea..631a15969d21 100644
--- a/drivers/scsi/qedf/qedf.h
+++ b/drivers/scsi/qedf/qedf.h
@@ -141,7 +141,7 @@ struct qedf_ioreq {
struct completion tm_done;
struct completion abts_done;
struct completion cleanup_done;
- struct e4_fcoe_task_context *task;
+ struct fcoe_task_context *task;
struct fcoe_task_params *task_params;
struct scsi_sgl_task_params *sgl_task_params;
int idx;
@@ -503,7 +503,7 @@ extern void qedf_cmd_timer_set(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
unsigned int timer_msec);
extern int qedf_init_mp_req(struct qedf_ioreq *io_req);
extern void qedf_init_mp_task(struct qedf_ioreq *io_req,
- struct e4_fcoe_task_context *task_ctx, struct fcoe_wqe *sqe);
+ struct fcoe_task_context *task_ctx, struct fcoe_wqe *sqe);
extern u16 qedf_get_sqe_idx(struct qedf_rport *fcport);
extern void qedf_ring_doorbell(struct qedf_rport *fcport);
extern void qedf_process_els_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
diff --git a/drivers/scsi/qedf/qedf_els.c b/drivers/scsi/qedf/qedf_els.c
index 625e58ccb8c8..1ff5bc314fc0 100644
--- a/drivers/scsi/qedf/qedf_els.c
+++ b/drivers/scsi/qedf/qedf_els.c
@@ -16,7 +16,7 @@ static int qedf_initiate_els(struct qedf_rport *fcport, unsigned int op,
struct qedf_ioreq *els_req;
struct qedf_mp_req *mp_req;
struct fc_frame_header *fc_hdr;
- struct e4_fcoe_task_context *task;
+ struct fcoe_task_context *task;
int rc = 0;
uint32_t did, sid;
uint16_t xid;
diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c
index 3404782988d5..b649f835d436 100644
--- a/drivers/scsi/qedf/qedf_io.c
+++ b/drivers/scsi/qedf/qedf_io.c
@@ -584,7 +584,7 @@ static void qedf_build_fcp_cmnd(struct qedf_ioreq *io_req,
}
static void qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport,
- struct qedf_ioreq *io_req, struct e4_fcoe_task_context *task_ctx,
+ struct qedf_ioreq *io_req, struct fcoe_task_context *task_ctx,
struct fcoe_wqe *sqe)
{
enum fcoe_task_type task_type;
@@ -602,7 +602,7 @@ static void qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport,
/* Note init_initiator_rw_fcoe_task memsets the task context */
io_req->task = task_ctx;
- memset(task_ctx, 0, sizeof(struct e4_fcoe_task_context));
+ memset(task_ctx, 0, sizeof(struct fcoe_task_context));
memset(io_req->task_params, 0, sizeof(struct fcoe_task_params));
memset(io_req->sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
@@ -674,7 +674,7 @@ static void qedf_init_task(struct qedf_rport *fcport, struct fc_lport *lport,
}
void qedf_init_mp_task(struct qedf_ioreq *io_req,
- struct e4_fcoe_task_context *task_ctx, struct fcoe_wqe *sqe)
+ struct fcoe_task_context *task_ctx, struct fcoe_wqe *sqe)
{
struct qedf_mp_req *mp_req = &(io_req->mp_req);
struct qedf_rport *fcport = io_req->fcport;
@@ -692,7 +692,7 @@ void qedf_init_mp_task(struct qedf_ioreq *io_req,
memset(&tx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
memset(&rx_sgl_task_params, 0, sizeof(struct scsi_sgl_task_params));
- memset(task_ctx, 0, sizeof(struct e4_fcoe_task_context));
+ memset(task_ctx, 0, sizeof(struct fcoe_task_context));
memset(&task_fc_hdr, 0, sizeof(struct fcoe_tx_mid_path_params));
/* Setup the task from io_req for easy reference */
@@ -850,7 +850,7 @@ int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
struct Scsi_Host *host = sc_cmd->device->host;
struct fc_lport *lport = shost_priv(host);
struct qedf_ctx *qedf = lport_priv(lport);
- struct e4_fcoe_task_context *task_ctx;
+ struct fcoe_task_context *task_ctx;
u16 xid;
struct fcoe_wqe *sqe;
u16 sqe_idx;
@@ -2293,7 +2293,7 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd,
uint8_t tm_flags)
{
struct qedf_ioreq *io_req;
- struct e4_fcoe_task_context *task;
+ struct fcoe_task_context *task;
struct qedf_ctx *qedf = fcport->qedf;
struct fc_lport *lport = qedf->lport;
int rc = 0;
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 42d0d941dba5..0da32fd3302e 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -2170,7 +2170,7 @@ static bool qedf_fp_has_work(struct qedf_fastpath *fp)
struct qedf_ctx *qedf = fp->qedf;
struct global_queue *que;
struct qed_sb_info *sb_info = fp->sb_info;
- struct status_block_e4 *sb = sb_info->sb_virt;
+ struct status_block *sb = sb_info->sb_virt;
u16 prod_idx;
/* Get the pointer to the global CQ this completion is on */
@@ -2197,7 +2197,7 @@ static bool qedf_process_completions(struct qedf_fastpath *fp)
{
struct qedf_ctx *qedf = fp->qedf;
struct qed_sb_info *sb_info = fp->sb_info;
- struct status_block_e4 *sb = sb_info->sb_virt;
+ struct status_block *sb = sb_info->sb_virt;
struct global_queue *que;
u16 prod_idx;
struct fcoe_cqe *cqe;
@@ -2688,12 +2688,12 @@ void qedf_fp_io_handler(struct work_struct *work)
static int qedf_alloc_and_init_sb(struct qedf_ctx *qedf,
struct qed_sb_info *sb_info, u16 sb_id)
{
- struct status_block_e4 *sb_virt;
+ struct status_block *sb_virt;
dma_addr_t sb_phys;
int ret;
sb_virt = dma_alloc_coherent(&qedf->pdev->dev,
- sizeof(struct status_block_e4), &sb_phys, GFP_KERNEL);
+ sizeof(struct status_block), &sb_phys, GFP_KERNEL);
if (!sb_virt) {
QEDF_ERR(&qedf->dbg_ctx,
@@ -3416,7 +3416,9 @@ retry_probe:
qedf->devlink = qed_ops->common->devlink_register(qedf->cdev);
if (IS_ERR(qedf->devlink)) {
QEDF_ERR(&qedf->dbg_ctx, "Cannot register devlink\n");
+ rc = PTR_ERR(qedf->devlink);
qedf->devlink = NULL;
+ goto err2;
}
}
diff --git a/drivers/scsi/qedi/qedi_debugfs.c b/drivers/scsi/qedi/qedi_debugfs.c
index 42f5afb60055..8deb2001dc2f 100644
--- a/drivers/scsi/qedi/qedi_debugfs.c
+++ b/drivers/scsi/qedi/qedi_debugfs.c
@@ -136,7 +136,7 @@ qedi_gbl_ctx_show(struct seq_file *s, void *unused)
{
struct qedi_fastpath *fp = NULL;
struct qed_sb_info *sb_info = NULL;
- struct status_block_e4 *sb = NULL;
+ struct status_block *sb = NULL;
struct global_queue *que = NULL;
int id;
u16 prod_idx;
@@ -152,7 +152,7 @@ qedi_gbl_ctx_show(struct seq_file *s, void *unused)
sb_info = fp->sb_info;
sb = sb_info->sb_virt;
prod_idx = (sb->pi_array[QEDI_PROTO_CQ_PROD_IDX] &
- STATUS_BLOCK_E4_PROD_INDEX_MASK);
+ STATUS_BLOCK_PROD_INDEX_MASK);
seq_printf(s, "SB PROD IDX: %d\n", prod_idx);
que = qedi->global_queues[fp->sb_id];
seq_printf(s, "DRV CONS IDX: %d\n", que->cq_cons_idx);
diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
index d01cd829ef97..84a4204a2cb4 100644
--- a/drivers/scsi/qedi/qedi_fw.c
+++ b/drivers/scsi/qedi/qedi_fw.c
@@ -85,7 +85,7 @@ static void qedi_process_text_resp(struct qedi_ctx *qedi,
{
struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
struct iscsi_session *session = conn->session;
- struct e4_iscsi_task_context *task_ctx;
+ struct iscsi_task_context *task_ctx;
struct iscsi_text_rsp *resp_hdr_ptr;
struct iscsi_text_response_hdr *cqe_text_response;
struct qedi_cmd *cmd;
@@ -261,7 +261,7 @@ static void qedi_process_login_resp(struct qedi_ctx *qedi,
{
struct iscsi_conn *conn = qedi_conn->cls_conn->dd_data;
struct iscsi_session *session = conn->session;
- struct e4_iscsi_task_context *task_ctx;
+ struct iscsi_task_context *task_ctx;
struct iscsi_login_rsp *resp_hdr_ptr;
struct iscsi_login_response_hdr *cqe_login_response;
struct qedi_cmd *cmd;
@@ -970,7 +970,7 @@ int qedi_send_iscsi_login(struct qedi_conn *qedi_conn,
struct scsi_sgl_task_params tx_sgl_task_params;
struct scsi_sgl_task_params rx_sgl_task_params;
struct iscsi_task_params task_params;
- struct e4_iscsi_task_context *fw_task_ctx;
+ struct iscsi_task_context *fw_task_ctx;
struct qedi_ctx *qedi = qedi_conn->qedi;
struct iscsi_login_req *login_hdr;
struct scsi_sge *resp_sge = NULL;
@@ -990,9 +990,9 @@ int qedi_send_iscsi_login(struct qedi_conn *qedi_conn,
return -ENOMEM;
fw_task_ctx =
- (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
+ (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
tid);
- memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
+ memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
qedi_cmd->task_id = tid;
@@ -1073,7 +1073,7 @@ int qedi_send_iscsi_logout(struct qedi_conn *qedi_conn,
struct scsi_sgl_task_params tx_sgl_task_params;
struct scsi_sgl_task_params rx_sgl_task_params;
struct iscsi_task_params task_params;
- struct e4_iscsi_task_context *fw_task_ctx;
+ struct iscsi_task_context *fw_task_ctx;
struct iscsi_logout *logout_hdr = NULL;
struct qedi_ctx *qedi = qedi_conn->qedi;
struct qedi_cmd *qedi_cmd;
@@ -1091,9 +1091,9 @@ int qedi_send_iscsi_logout(struct qedi_conn *qedi_conn,
return -ENOMEM;
fw_task_ctx =
- (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
+ (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
tid);
- memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
+ memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
qedi_cmd->task_id = tid;
@@ -1434,7 +1434,7 @@ static int send_iscsi_tmf(struct qedi_conn *qedi_conn, struct iscsi_task *mtask,
struct iscsi_tmf_request_hdr tmf_pdu_header;
struct iscsi_task_params task_params;
struct qedi_ctx *qedi = qedi_conn->qedi;
- struct e4_iscsi_task_context *fw_task_ctx;
+ struct iscsi_task_context *fw_task_ctx;
struct iscsi_tm *tmf_hdr;
struct qedi_cmd *qedi_cmd;
struct qedi_cmd *cmd;
@@ -1454,9 +1454,9 @@ static int send_iscsi_tmf(struct qedi_conn *qedi_conn, struct iscsi_task *mtask,
return -ENOMEM;
fw_task_ctx =
- (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
+ (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
tid);
- memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
+ memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
qedi_cmd->task_id = tid;
@@ -1548,7 +1548,7 @@ int qedi_send_iscsi_text(struct qedi_conn *qedi_conn,
struct scsi_sgl_task_params tx_sgl_task_params;
struct scsi_sgl_task_params rx_sgl_task_params;
struct iscsi_task_params task_params;
- struct e4_iscsi_task_context *fw_task_ctx;
+ struct iscsi_task_context *fw_task_ctx;
struct qedi_ctx *qedi = qedi_conn->qedi;
struct iscsi_text *text_hdr;
struct scsi_sge *req_sge = NULL;
@@ -1570,9 +1570,9 @@ int qedi_send_iscsi_text(struct qedi_conn *qedi_conn,
return -ENOMEM;
fw_task_ctx =
- (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
+ (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
tid);
- memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
+ memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
qedi_cmd->task_id = tid;
@@ -1649,7 +1649,7 @@ int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn,
struct scsi_sgl_task_params rx_sgl_task_params;
struct iscsi_task_params task_params;
struct qedi_ctx *qedi = qedi_conn->qedi;
- struct e4_iscsi_task_context *fw_task_ctx;
+ struct iscsi_task_context *fw_task_ctx;
struct iscsi_nopout *nopout_hdr;
struct scsi_sge *resp_sge = NULL;
struct qedi_cmd *qedi_cmd;
@@ -1669,9 +1669,9 @@ int qedi_send_iscsi_nopout(struct qedi_conn *qedi_conn,
return -ENOMEM;
fw_task_ctx =
- (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
+ (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
tid);
- memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
+ memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
qedi_cmd->task_id = tid;
@@ -1991,7 +1991,7 @@ int qedi_iscsi_send_ioreq(struct iscsi_task *task)
struct iscsi_task_params task_params;
struct iscsi_conn_params conn_params;
struct scsi_initiator_cmd_params cmd_params;
- struct e4_iscsi_task_context *fw_task_ctx;
+ struct iscsi_task_context *fw_task_ctx;
struct iscsi_cls_conn *cls_conn;
struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr;
enum iscsi_task_type task_type = MAX_ISCSI_TASK_TYPE;
@@ -2014,9 +2014,9 @@ int qedi_iscsi_send_ioreq(struct iscsi_task *task)
return -ENOMEM;
fw_task_ctx =
- (struct e4_iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
+ (struct iscsi_task_context *)qedi_get_task_mem(&qedi->tasks,
tid);
- memset(fw_task_ctx, 0, sizeof(struct e4_iscsi_task_context));
+ memset(fw_task_ctx, 0, sizeof(struct iscsi_task_context));
cmd->task_id = tid;
diff --git a/drivers/scsi/qedi/qedi_fw_api.c b/drivers/scsi/qedi/qedi_fw_api.c
index 52772904ef5d..642556a1ce1c 100644
--- a/drivers/scsi/qedi/qedi_fw_api.c
+++ b/drivers/scsi/qedi/qedi_fw_api.c
@@ -202,7 +202,7 @@ static void init_default_iscsi_task(struct iscsi_task_params *task_params,
struct data_hdr *pdu_header,
enum iscsi_task_type task_type)
{
- struct e4_iscsi_task_context *context;
+ struct iscsi_task_context *context;
u32 val;
u16 index;
u8 val_byte;
@@ -224,7 +224,7 @@ static void init_default_iscsi_task(struct iscsi_task_params *task_params,
cpu_to_le16(task_params->conn_icid);
SET_FIELD(context->ustorm_ag_context.flags1,
- E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
+ USTORM_ISCSI_TASK_AG_CTX_R2T2RECV, 1);
context->ustorm_st_context.task_type = task_type;
context->ustorm_st_context.cq_rss_number = task_params->cq_rss_number;
@@ -254,7 +254,7 @@ void init_initiator_rw_cdb_ystorm_context(struct ystorm_iscsi_task_st_ctx *ystc,
static
void init_ustorm_task_contexts(struct ustorm_iscsi_task_st_ctx *ustorm_st_cxt,
- struct e4_ustorm_iscsi_task_ag_ctx *ustorm_ag_cxt,
+ struct ustorm_iscsi_task_ag_ctx *ustorm_ag_cxt,
u32 remaining_recv_len, u32 expected_data_transfer_len,
u8 num_sges, bool tx_dif_conn_err_en)
{
@@ -266,12 +266,12 @@ void init_ustorm_task_contexts(struct ustorm_iscsi_task_st_ctx *ustorm_st_cxt,
ustorm_st_cxt->exp_data_transfer_len = val;
SET_FIELD(ustorm_st_cxt->reg1.reg1_map, ISCSI_REG1_NUM_SGES, num_sges);
SET_FIELD(ustorm_ag_cxt->flags2,
- E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN,
+ USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN,
tx_dif_conn_err_en ? 1 : 0);
}
static
-void set_rw_exp_data_acked_and_cont_len(struct e4_iscsi_task_context *context,
+void set_rw_exp_data_acked_and_cont_len(struct iscsi_task_context *context,
struct iscsi_conn_params *conn_params,
enum iscsi_task_type task_type,
u32 task_size,
@@ -470,7 +470,7 @@ void init_rtdif_task_context(struct rdif_task_context *rdif_context,
}
}
-static void set_local_completion_context(struct e4_iscsi_task_context *context)
+static void set_local_completion_context(struct iscsi_task_context *context)
{
SET_FIELD(context->ystorm_st_context.state.flags,
YSTORM_ISCSI_TASK_STATE_LOCAL_COMP, 1);
@@ -487,7 +487,7 @@ static int init_rw_iscsi_task(struct iscsi_task_params *task_params,
struct scsi_dif_task_params *dif_task_params)
{
u32 exp_data_transfer_len = conn_params->max_burst_length;
- struct e4_iscsi_task_context *cxt;
+ struct iscsi_task_context *cxt;
bool slow_io = false;
u32 task_size, val;
u8 num_sges = 0;
@@ -615,7 +615,7 @@ int init_initiator_login_request_task(struct iscsi_task_params *task_params,
struct scsi_sgl_task_params *tx_params,
struct scsi_sgl_task_params *rx_params)
{
- struct e4_iscsi_task_context *cxt;
+ struct iscsi_task_context *cxt;
cxt = task_params->context;
@@ -657,7 +657,7 @@ int init_initiator_nop_out_task(struct iscsi_task_params *task_params,
struct scsi_sgl_task_params *tx_sgl_task_params,
struct scsi_sgl_task_params *rx_sgl_task_params)
{
- struct e4_iscsi_task_context *cxt;
+ struct iscsi_task_context *cxt;
cxt = task_params->context;
@@ -703,7 +703,7 @@ int init_initiator_logout_request_task(struct iscsi_task_params *task_params,
struct scsi_sgl_task_params *tx_params,
struct scsi_sgl_task_params *rx_params)
{
- struct e4_iscsi_task_context *cxt;
+ struct iscsi_task_context *cxt;
cxt = task_params->context;
@@ -758,7 +758,7 @@ int init_initiator_text_request_task(struct iscsi_task_params *task_params,
struct scsi_sgl_task_params *tx_params,
struct scsi_sgl_task_params *rx_params)
{
- struct e4_iscsi_task_context *cxt;
+ struct iscsi_task_context *cxt;
cxt = task_params->context;
diff --git a/drivers/scsi/qedi/qedi_fw_iscsi.h b/drivers/scsi/qedi/qedi_fw_iscsi.h
index 10f19f0af0a3..df2d471a7b51 100644
--- a/drivers/scsi/qedi/qedi_fw_iscsi.h
+++ b/drivers/scsi/qedi/qedi_fw_iscsi.h
@@ -10,7 +10,7 @@
#include "qedi_fw_scsi.h"
struct iscsi_task_params {
- struct e4_iscsi_task_context *context;
+ struct iscsi_task_context *context;
struct iscsi_wqe *sqe;
u32 tx_io_size;
u32 rx_io_size;
diff --git a/drivers/scsi/qedi/qedi_iscsi.h b/drivers/scsi/qedi/qedi_iscsi.h
index a31c5de74754..a282860da0aa 100644
--- a/drivers/scsi/qedi/qedi_iscsi.h
+++ b/drivers/scsi/qedi/qedi_iscsi.h
@@ -182,7 +182,7 @@ struct qedi_cmd {
struct scsi_cmnd *scsi_cmd;
struct scatterlist *sg;
struct qedi_io_bdt io_tbl;
- struct e4_iscsi_task_context request;
+ struct iscsi_task_context request;
unsigned char *sense_buffer;
dma_addr_t sense_buffer_dma;
u16 task_id;
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index e6dc0b495a82..1dec814d8788 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -351,12 +351,12 @@ static int qedi_init_uio(struct qedi_ctx *qedi)
static int qedi_alloc_and_init_sb(struct qedi_ctx *qedi,
struct qed_sb_info *sb_info, u16 sb_id)
{
- struct status_block_e4 *sb_virt;
+ struct status_block *sb_virt;
dma_addr_t sb_phys;
int ret;
sb_virt = dma_alloc_coherent(&qedi->pdev->dev,
- sizeof(struct status_block_e4), &sb_phys,
+ sizeof(struct status_block), &sb_phys,
GFP_KERNEL);
if (!sb_virt) {
QEDI_ERR(&qedi->dbg_ctx,
@@ -865,7 +865,8 @@ static int qedi_set_iscsi_pf_param(struct qedi_ctx *qedi)
qedi->pf_params.iscsi_pf_params.num_uhq_pages_in_ring = num_sq_pages;
qedi->pf_params.iscsi_pf_params.num_queues = qedi->num_queues;
qedi->pf_params.iscsi_pf_params.debug_mode = qedi_fw_debug;
- qedi->pf_params.iscsi_pf_params.two_msl_timer = 4000;
+ qedi->pf_params.iscsi_pf_params.two_msl_timer = QED_TWO_MSL_TIMER_DFLT;
+ qedi->pf_params.iscsi_pf_params.tx_sws_timer = QED_TX_SWS_TIMER_DFLT;
qedi->pf_params.iscsi_pf_params.max_fin_rt = 2;
for (log_page_size = 0 ; log_page_size < 32 ; log_page_size++) {
@@ -1259,7 +1260,7 @@ static bool qedi_process_completions(struct qedi_fastpath *fp)
{
struct qedi_ctx *qedi = fp->qedi;
struct qed_sb_info *sb_info = fp->sb_info;
- struct status_block_e4 *sb = sb_info->sb_virt;
+ struct status_block *sb = sb_info->sb_virt;
struct qedi_percpu_s *p = NULL;
struct global_queue *que;
u16 prod_idx;
@@ -1315,7 +1316,7 @@ static bool qedi_fp_has_work(struct qedi_fastpath *fp)
struct qedi_ctx *qedi = fp->qedi;
struct global_queue *que;
struct qed_sb_info *sb_info = fp->sb_info;
- struct status_block_e4 *sb = sb_info->sb_virt;
+ struct status_block *sb = sb_info->sb_virt;
u16 prod_idx;
barrier();
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 4b5d28d89d69..655cf5de604b 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -431,7 +431,7 @@ done_unmap_sg:
goto done_free_fcport;
done_free_fcport:
- if (bsg_request->msgcode == FC_BSG_RPT_ELS)
+ if (bsg_request->msgcode != FC_BSG_RPT_ELS)
qla2x00_free_fcport(fcport);
done:
return rval;
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index 1c5da2dbd6f9..253055cf9daf 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -8,6 +8,8 @@
#include <linux/delay.h>
#include <linux/nvme.h>
#include <linux/nvme-fc.h>
+#include <linux/blk-mq-pci.h>
+#include <linux/blk-mq.h>
static struct nvme_fc_port_template qla_nvme_fc_transport;
@@ -642,6 +644,18 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
return rval;
}
+static void qla_nvme_map_queues(struct nvme_fc_local_port *lport,
+ struct blk_mq_queue_map *map)
+{
+ struct scsi_qla_host *vha = lport->private;
+ int rc;
+
+ rc = blk_mq_pci_map_queues(map, vha->hw->pdev, vha->irq_offset);
+ if (rc)
+ ql_log(ql_log_warn, vha, 0x21de,
+ "pci map queue failed 0x%x", rc);
+}
+
static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport)
{
struct scsi_qla_host *vha = lport->private;
@@ -676,6 +690,7 @@ static struct nvme_fc_port_template qla_nvme_fc_transport = {
.ls_abort = qla_nvme_ls_abort,
.fcp_io = qla_nvme_post_cmd,
.fcp_abort = qla_nvme_fcp_abort,
+ .map_queues = qla_nvme_map_queues,
.max_hw_queues = 8,
.max_sgl_segments = 1024,
.max_dif_sgl_segments = 64,
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index d2e40aaba734..836fedcea241 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -4157,7 +4157,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
ql_dbg_pci(ql_dbg_init, ha->pdev,
0xe0ee, "%s: failed alloc dsd\n",
__func__);
- return 1;
+ return -ENOMEM;
}
ha->dif_bundle_kallocs++;
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index b3478ed9b12e..7d8242c120fc 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -3319,8 +3319,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
"RESET-RSP online/active/old-count/new-count = %d/%d/%d/%d.\n",
vha->flags.online, qla2x00_reset_active(vha),
cmd->reset_count, qpair->chip_reset);
- spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
- return 0;
+ goto out_unmap_unlock;
}
/* Does F/W have an IOCBs for this request */
@@ -3445,10 +3444,6 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
prm.sg = NULL;
prm.req_cnt = 1;
- /* Calculate number of entries and segments required */
- if (qlt_pci_map_calc_cnt(&prm) != 0)
- return -EAGAIN;
-
if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) ||
(cmd->sess && cmd->sess->deleted)) {
/*
@@ -3466,6 +3461,10 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
return 0;
}
+ /* Calculate number of entries and segments required */
+ if (qlt_pci_map_calc_cnt(&prm) != 0)
+ return -EAGAIN;
+
spin_lock_irqsave(qpair->qp_lock_ptr, flags);
/* Does F/W have an IOCBs for this request */
res = qlt_check_reserve_free_req(qpair, prm.req_cnt);
@@ -3870,9 +3869,6 @@ void qlt_free_cmd(struct qla_tgt_cmd *cmd)
BUG_ON(cmd->cmd_in_wq);
- if (cmd->sg_mapped)
- qlt_unmap_sg(cmd->vha, cmd);
-
if (!cmd->q_full)
qlt_decr_num_pend_cmds(cmd->vha);
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index 031569c496e5..69a590546bf9 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -366,13 +366,13 @@ struct qla4_work_evt {
struct {
enum iscsi_host_event_code code;
uint32_t data_size;
- uint8_t data[0];
+ uint8_t data[];
} aen;
struct {
uint32_t status;
uint32_t pid;
uint32_t data_size;
- uint8_t data[0];
+ uint8_t data[];
} ping;
} u;
};
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index b241f9e3885c..291ecc33b1fe 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -553,8 +553,10 @@ EXPORT_SYMBOL(scsi_device_get);
*/
void scsi_device_put(struct scsi_device *sdev)
{
- module_put(sdev->host->hostt->module);
+ struct module *mod = sdev->host->hostt->module;
+
put_device(&sdev->sdev_gendev);
+ module_put(mod);
}
EXPORT_SYMBOL(scsi_device_put);
diff --git a/drivers/scsi/scsi_bsg.c b/drivers/scsi/scsi_bsg.c
index 81c3853a2a80..081b84bb7985 100644
--- a/drivers/scsi/scsi_bsg.c
+++ b/drivers/scsi/scsi_bsg.c
@@ -25,8 +25,8 @@ static int scsi_bsg_sg_io_fn(struct request_queue *q, struct sg_io_v4 *hdr,
return -EOPNOTSUPP;
}
- rq = blk_get_request(q, hdr->dout_xfer_len ?
- REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
+ rq = scsi_alloc_request(q, hdr->dout_xfer_len ?
+ REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
if (IS_ERR(rq))
return PTR_ERR(rq);
rq->timeout = timeout;
@@ -95,7 +95,7 @@ static int scsi_bsg_sg_io_fn(struct request_queue *q, struct sg_io_v4 *hdr,
out_free_cmd:
scsi_req_free_cmd(scsi_req(rq));
out_put_request:
- blk_put_request(rq);
+ blk_mq_free_request(rq);
return ret;
}
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 66f507469a31..40b473eea357 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -5384,7 +5384,7 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
{
bool new_sd_dp;
bool inject = false;
- bool hipri = scsi_cmd_to_rq(cmnd)->cmd_flags & REQ_HIPRI;
+ bool polled = scsi_cmd_to_rq(cmnd)->cmd_flags & REQ_POLLED;
int k, num_in_q, qdepth;
unsigned long iflags;
u64 ns_from_boot = 0;
@@ -5471,7 +5471,7 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
if (sdebug_host_max_queue)
sd_dp->hc_idx = get_tag(cmnd);
- if (hipri)
+ if (polled)
ns_from_boot = ktime_get_boottime_ns();
/* one of the resp_*() response functions is called here */
@@ -5531,7 +5531,7 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
kt -= d;
}
}
- if (hipri) {
+ if (polled) {
sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
spin_lock_irqsave(&sqp->qc_lock, iflags);
if (!sd_dp->init_poll) {
@@ -5562,7 +5562,7 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
atomic_read(&sdeb_inject_pending)))
sd_dp->aborted = true;
- if (hipri) {
+ if (polled) {
sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
spin_lock_irqsave(&sqp->qc_lock, iflags);
if (!sd_dp->init_poll) {
@@ -7331,7 +7331,7 @@ static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
if (kt_from_boot < sd_dp->cmpl_ts)
continue;
- } else /* ignoring non REQ_HIPRI requests */
+ } else /* ignoring non REQ_POLLED requests */
continue;
devip = (struct sdebug_dev_info *)scp->device->hostdata;
if (likely(devip))
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index b6c86cce57bf..36870b41c888 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -1979,7 +1979,7 @@ maybe_retry:
static void eh_lock_door_done(struct request *req, blk_status_t status)
{
- blk_put_request(req);
+ blk_mq_free_request(req);
}
/**
@@ -1998,7 +1998,7 @@ static void scsi_eh_lock_door(struct scsi_device *sdev)
struct request *req;
struct scsi_request *rq;
- req = blk_get_request(sdev->request_queue, REQ_OP_DRV_IN, 0);
+ req = scsi_alloc_request(sdev->request_queue, REQ_OP_DRV_IN, 0);
if (IS_ERR(req))
return;
rq = scsi_req(req);
diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c
index 6ff2207bd45a..34412eac4566 100644
--- a/drivers/scsi/scsi_ioctl.c
+++ b/drivers/scsi/scsi_ioctl.c
@@ -438,7 +438,7 @@ static int sg_io(struct scsi_device *sdev, struct gendisk *disk,
at_head = 1;
ret = -ENOMEM;
- rq = blk_get_request(sdev->request_queue, writing ?
+ rq = scsi_alloc_request(sdev->request_queue, writing ?
REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
if (IS_ERR(rq))
return PTR_ERR(rq);
@@ -490,7 +490,7 @@ static int sg_io(struct scsi_device *sdev, struct gendisk *disk,
out_free_cdb:
scsi_req_free_cmd(req);
out_put_request:
- blk_put_request(rq);
+ blk_mq_free_request(rq);
return ret;
}
@@ -561,7 +561,7 @@ static int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk,
}
- rq = blk_get_request(q, in_len ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
+ rq = scsi_alloc_request(q, in_len ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto error_free_buffer;
@@ -634,7 +634,7 @@ static int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk,
}
error:
- blk_put_request(rq);
+ blk_mq_free_request(rq);
error_free_buffer:
kfree(buffer);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 572673873ddf..9c2b99e12ce3 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -21,6 +21,7 @@
#include <linux/hardirq.h>
#include <linux/scatterlist.h>
#include <linux/blk-mq.h>
+#include <linux/blk-integrity.h>
#include <linux/ratelimit.h>
#include <asm/unaligned.h>
@@ -215,7 +216,7 @@ int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
struct scsi_request *rq;
int ret;
- req = blk_get_request(sdev->request_queue,
+ req = scsi_alloc_request(sdev->request_queue,
data_direction == DMA_TO_DEVICE ?
REQ_OP_DRV_OUT : REQ_OP_DRV_IN,
rq_flags & RQF_PM ? BLK_MQ_REQ_PM : 0);
@@ -259,7 +260,7 @@ int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
scsi_normalize_sense(rq->sense, rq->sense_len, sshdr);
ret = rq->result;
out:
- blk_put_request(req);
+ blk_mq_free_request(req);
return ret;
}
@@ -1078,9 +1079,6 @@ EXPORT_SYMBOL(scsi_alloc_sgtables);
* This function initializes the members of struct scsi_cmnd that must be
* initialized before request processing starts and that won't be
* reinitialized if a SCSI command is requeued.
- *
- * Called from inside blk_get_request() for pass-through requests and from
- * inside scsi_init_command() for filesystem requests.
*/
static void scsi_initialize_rq(struct request *rq)
{
@@ -1097,6 +1095,18 @@ static void scsi_initialize_rq(struct request *rq)
cmd->retries = 0;
}
+struct request *scsi_alloc_request(struct request_queue *q,
+ unsigned int op, blk_mq_req_flags_t flags)
+{
+ struct request *rq;
+
+ rq = blk_mq_alloc_request(q, op, flags);
+ if (!IS_ERR(rq))
+ scsi_initialize_rq(rq);
+ return rq;
+}
+EXPORT_SYMBOL_GPL(scsi_alloc_request);
+
/*
* Only called when the request isn't completed by SCSI, and not freed by
* SCSI
@@ -1783,7 +1793,7 @@ static void scsi_mq_exit_request(struct blk_mq_tag_set *set, struct request *rq,
}
-static int scsi_mq_poll(struct blk_mq_hw_ctx *hctx)
+static int scsi_mq_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
{
struct Scsi_Host *shost = hctx->driver_data;
@@ -1863,7 +1873,6 @@ static const struct blk_mq_ops scsi_mq_ops_no_commit = {
#endif
.init_request = scsi_mq_init_request,
.exit_request = scsi_mq_exit_request,
- .initialize_rq_fn = scsi_initialize_rq,
.cleanup_rq = scsi_cleanup_rq,
.busy = scsi_mq_lld_busy,
.map_queues = scsi_map_queues,
@@ -1893,7 +1902,6 @@ static const struct blk_mq_ops scsi_mq_ops = {
#endif
.init_request = scsi_mq_init_request,
.exit_request = scsi_mq_exit_request,
- .initialize_rq_fn = scsi_initialize_rq,
.cleanup_rq = scsi_cleanup_rq,
.busy = scsi_mq_lld_busy,
.map_queues = scsi_map_queues,
@@ -1959,6 +1967,14 @@ struct scsi_device *scsi_device_from_queue(struct request_queue *q)
return sdev;
}
+/*
+ * pktcdvd should have been integrated into the SCSI layers, but for historical
+ * reasons like the old IDE driver it isn't. This export allows it to safely
+ * probe if a given device is a SCSI one and only attach to that.
+ */
+#ifdef CONFIG_CDROM_PKTCDVD_MODULE
+EXPORT_SYMBOL_GPL(scsi_device_from_queue);
+#endif
/**
* scsi_block_requests - Utility function used by low-level drivers to prevent
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index fe22191522a3..2808c0cb5711 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -280,7 +280,6 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
sdev->request_queue = q;
q->queuedata = sdev;
__scsi_init_queue(sdev->host, q);
- blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, q);
WARN_ON_ONCE(!blk_get_queue(q));
depth = sdev->host->cmd_per_lun ?: 1;
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 86793259e541..a35841b34bfd 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -449,9 +449,12 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work)
struct scsi_vpd *vpd_pg80 = NULL, *vpd_pg83 = NULL;
struct scsi_vpd *vpd_pg0 = NULL, *vpd_pg89 = NULL;
unsigned long flags;
+ struct module *mod;
sdev = container_of(work, struct scsi_device, ew.work);
+ mod = sdev->host->hostt->module;
+
scsi_dh_release_device(sdev);
parent = sdev->sdev_gendev.parent;
@@ -502,11 +505,17 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work)
if (parent)
put_device(parent);
+ module_put(mod);
}
static void scsi_device_dev_release(struct device *dev)
{
struct scsi_device *sdp = to_scsi_device(dev);
+
+ /* Set module pointer as NULL in case of module unloading */
+ if (!try_module_get(sdp->host->hostt->module))
+ sdp->host->hostt->module = NULL;
+
execute_in_process_context(scsi_device_dev_release_usercontext,
&sdp->ew);
}
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 922e4c7bd88e..78343d3f9385 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -2930,8 +2930,6 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev)
session->recovery_tmo = value;
break;
default:
- err = transport->set_param(conn, ev->u.set_param.param,
- data, ev->u.set_param.len);
if ((conn->state == ISCSI_CONN_BOUND) ||
(conn->state == ISCSI_CONN_UP)) {
err = transport->set_param(conn, ev->u.set_param.param,
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 523bf2fdc253..252e43d7c73f 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -48,6 +48,7 @@
#include <linux/blkpg.h>
#include <linux/blk-pm.h>
#include <linux/delay.h>
+#include <linux/major.h>
#include <linux/mutex.h>
#include <linux/string_helpers.h>
#include <linux/async.h>
@@ -1756,6 +1757,44 @@ static void sd_rescan(struct device *dev)
sd_revalidate_disk(sdkp->disk);
}
+static int sd_get_unique_id(struct gendisk *disk, u8 id[16],
+ enum blk_unique_id type)
+{
+ struct scsi_device *sdev = scsi_disk(disk)->device;
+ const struct scsi_vpd *vpd;
+ const unsigned char *d;
+ int ret = -ENXIO, len;
+
+ rcu_read_lock();
+ vpd = rcu_dereference(sdev->vpd_pg83);
+ if (!vpd)
+ goto out_unlock;
+
+ ret = -EINVAL;
+ for (d = vpd->data + 4; d < vpd->data + vpd->len; d += d[3] + 4) {
+ /* we only care about designators with LU association */
+ if (((d[1] >> 4) & 0x3) != 0x00)
+ continue;
+ if ((d[1] & 0xf) != type)
+ continue;
+
+ /*
+ * Only exit early if a 16-byte descriptor was found. Otherwise
+ * keep looking as one with more entropy might still show up.
+ */
+ len = d[3];
+ if (len != 8 && len != 12 && len != 16)
+ continue;
+ ret = len;
+ memcpy(id, d + 4, len);
+ if (len == 16)
+ break;
+ }
+out_unlock:
+ rcu_read_unlock();
+ return ret;
+}
+
static char sd_pr_type(enum pr_type type)
{
switch (type) {
@@ -1860,6 +1899,7 @@ static const struct block_device_operations sd_fops = {
.check_events = sd_check_events,
.unlock_native_capacity = sd_unlock_native_capacity,
.report_zones = sd_zbc_report_zones,
+ .get_unique_id = sd_get_unique_id,
.pr_ops = &sd_pr_ops,
};
@@ -3087,6 +3127,86 @@ static void sd_read_security(struct scsi_disk *sdkp, unsigned char *buffer)
sdkp->security = 1;
}
+static inline sector_t sd64_to_sectors(struct scsi_disk *sdkp, u8 *buf)
+{
+ return logical_to_sectors(sdkp->device, get_unaligned_be64(buf));
+}
+
+/**
+ * sd_read_cpr - Query concurrent positioning ranges
+ * @sdkp: disk to query
+ */
+static void sd_read_cpr(struct scsi_disk *sdkp)
+{
+ struct blk_independent_access_ranges *iars = NULL;
+ unsigned char *buffer = NULL;
+ unsigned int nr_cpr = 0;
+ int i, vpd_len, buf_len = SD_BUF_SIZE;
+ u8 *desc;
+
+ /*
+ * We need to have the capacity set first for the block layer to be
+ * able to check the ranges.
+ */
+ if (sdkp->first_scan)
+ return;
+
+ if (!sdkp->capacity)
+ goto out;
+
+ /*
+ * Concurrent Positioning Ranges VPD: there can be at most 256 ranges,
+ * leading to a maximum page size of 64 + 256*32 bytes.
+ */
+ buf_len = 64 + 256*32;
+ buffer = kmalloc(buf_len, GFP_KERNEL);
+ if (!buffer || scsi_get_vpd_page(sdkp->device, 0xb9, buffer, buf_len))
+ goto out;
+
+ /* We must have at least a 64B header and one 32B range descriptor */
+ vpd_len = get_unaligned_be16(&buffer[2]) + 3;
+ if (vpd_len > buf_len || vpd_len < 64 + 32 || (vpd_len & 31)) {
+ sd_printk(KERN_ERR, sdkp,
+ "Invalid Concurrent Positioning Ranges VPD page\n");
+ goto out;
+ }
+
+ nr_cpr = (vpd_len - 64) / 32;
+ if (nr_cpr == 1) {
+ nr_cpr = 0;
+ goto out;
+ }
+
+ iars = disk_alloc_independent_access_ranges(sdkp->disk, nr_cpr);
+ if (!iars) {
+ nr_cpr = 0;
+ goto out;
+ }
+
+ desc = &buffer[64];
+ for (i = 0; i < nr_cpr; i++, desc += 32) {
+ if (desc[0] != i) {
+ sd_printk(KERN_ERR, sdkp,
+ "Invalid Concurrent Positioning Range number\n");
+ nr_cpr = 0;
+ break;
+ }
+
+ iars->ia_range[i].sector = sd64_to_sectors(sdkp, desc + 8);
+ iars->ia_range[i].nr_sectors = sd64_to_sectors(sdkp, desc + 16);
+ }
+
+out:
+ disk_set_independent_access_ranges(sdkp->disk, iars);
+ if (nr_cpr && sdkp->nr_actuators != nr_cpr) {
+ sd_printk(KERN_NOTICE, sdkp,
+ "%u concurrent positioning ranges\n", nr_cpr);
+ sdkp->nr_actuators = nr_cpr;
+ }
+
+ kfree(buffer);
+}
+
/*
* Determine the device's preferred I/O size for reads and writes
* unless the reported value is unreasonably small, large, not a
@@ -3202,6 +3322,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
sd_read_app_tag_own(sdkp, buffer);
sd_read_write_same(sdkp, buffer);
sd_read_security(sdkp, buffer);
+ sd_read_cpr(sdkp);
}
/*
@@ -3683,7 +3804,12 @@ static int sd_resume(struct device *dev)
static int sd_resume_runtime(struct device *dev)
{
struct scsi_disk *sdkp = dev_get_drvdata(dev);
- struct scsi_device *sdp = sdkp->device;
+ struct scsi_device *sdp;
+
+ if (!sdkp) /* E.g.: runtime resume at the start of sd_probe() */
+ return 0;
+
+ sdp = sdkp->device;
if (sdp->ignore_media_change) {
/* clear the device's sense data */
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index b59136c4125b..2e5932bde43d 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -106,6 +106,7 @@ struct scsi_disk {
u8 protection_type;/* Data Integrity Field */
u8 provisioning_mode;
u8 zeroing_mode;
+ u8 nr_actuators; /* Number of actuators */
unsigned ATO : 1; /* state of disk ATO bit */
unsigned cache_override : 1; /* temp override of WCE,RCD */
unsigned WCE : 1; /* state of disk WCE bit */
diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c
index 4cadb26070a8..349950616adc 100644
--- a/drivers/scsi/sd_dif.c
+++ b/drivers/scsi/sd_dif.c
@@ -6,7 +6,7 @@
* Written by: Martin K. Petersen <martin.petersen@oracle.com>
*/
-#include <linux/blkdev.h>
+#include <linux/blk-integrity.h>
#include <linux/t10-pi.h>
#include <scsi/scsi.h>
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 8f05248920e8..141099ab9092 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -31,6 +31,7 @@ static int sg_version_num = 30536; /* 2 digits for each component */
#include <linux/errno.h>
#include <linux/mtio.h>
#include <linux/ioctl.h>
+#include <linux/major.h>
#include <linux/slab.h>
#include <linux/fcntl.h>
#include <linux/init.h>
@@ -814,7 +815,7 @@ sg_common_write(Sg_fd * sfp, Sg_request * srp,
if (atomic_read(&sdp->detaching)) {
if (srp->bio) {
scsi_req_free_cmd(scsi_req(srp->rq));
- blk_put_request(srp->rq);
+ blk_mq_free_request(srp->rq);
srp->rq = NULL;
}
@@ -1389,7 +1390,7 @@ sg_rq_end_io(struct request *rq, blk_status_t status)
*/
srp->rq = NULL;
scsi_req_free_cmd(scsi_req(rq));
- blk_put_request(rq);
+ blk_mq_free_request(rq);
write_lock_irqsave(&sfp->rq_list_lock, iflags);
if (unlikely(srp->orphan)) {
@@ -1717,13 +1718,13 @@ sg_start_req(Sg_request *srp, unsigned char *cmd)
*
* With scsi-mq enabled, there are a fixed number of preallocated
* requests equal in number to shost->can_queue. If all of the
- * preallocated requests are already in use, then blk_get_request()
+ * preallocated requests are already in use, then scsi_alloc_request()
* will sleep until an active command completes, freeing up a request.
* Although waiting in an asynchronous interface is less than ideal, we
* do not want to use BLK_MQ_REQ_NOWAIT here because userspace might
* not expect an EWOULDBLOCK from this condition.
*/
- rq = blk_get_request(q, hp->dxfer_direction == SG_DXFER_TO_DEV ?
+ rq = scsi_alloc_request(q, hp->dxfer_direction == SG_DXFER_TO_DEV ?
REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
if (IS_ERR(rq)) {
kfree(long_cmdp);
@@ -1829,7 +1830,7 @@ sg_finish_rem_req(Sg_request *srp)
if (srp->rq) {
scsi_req_free_cmd(scsi_req(srp->rq));
- blk_put_request(srp->rq);
+ blk_mq_free_request(srp->rq);
}
if (srp->res_used)
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 8b17b35283aa..3009b986d1d7 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -44,6 +44,7 @@
#include <linux/cdrom.h>
#include <linux/interrupt.h>
#include <linux/init.h>
+#include <linux/major.h>
#include <linux/blkdev.h>
#include <linux/blk-pm.h>
#include <linux/mutex.h>
@@ -966,7 +967,7 @@ static int sr_read_cdda_bpc(struct cdrom_device_info *cdi, void __user *ubuf,
struct bio *bio;
int ret;
- rq = blk_get_request(disk->queue, REQ_OP_DRV_IN, 0);
+ rq = scsi_alloc_request(disk->queue, REQ_OP_DRV_IN, 0);
if (IS_ERR(rq))
return PTR_ERR(rq);
req = scsi_req(rq);
@@ -1002,7 +1003,7 @@ static int sr_read_cdda_bpc(struct cdrom_device_info *cdi, void __user *ubuf,
if (blk_rq_unmap_user(bio))
ret = -EFAULT;
out_put_request:
- blk_put_request(rq);
+ blk_mq_free_request(rq);
return ret;
}
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index ae8636d3780b..c2d5608f6b1a 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -32,6 +32,7 @@ static const char *verstr = "20160209";
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/mtio.h>
+#include <linux/major.h>
#include <linux/cdrom.h>
#include <linux/ioctl.h>
#include <linux/fcntl.h>
@@ -529,7 +530,7 @@ static void st_scsi_execute_end(struct request *req, blk_status_t status)
complete(SRpnt->waiting);
blk_rq_unmap_user(tmp);
- blk_put_request(req);
+ blk_mq_free_request(req);
}
static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd,
@@ -542,7 +543,7 @@ static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd,
int err = 0;
struct scsi_tape *STp = SRpnt->stp;
- req = blk_get_request(SRpnt->stp->device->request_queue,
+ req = scsi_alloc_request(SRpnt->stp->device->request_queue,
data_direction == DMA_TO_DEVICE ?
REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
if (IS_ERR(req))
@@ -556,7 +557,7 @@ static int st_scsi_execute(struct st_request *SRpnt, const unsigned char *cmd,
err = blk_rq_map_user(req->q, req, mdata, NULL, bufflen,
GFP_KERNEL);
if (err) {
- blk_put_request(req);
+ blk_mq_free_request(req);
return err;
}
}
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index ebbbc1299c62..9eb1b88a29dd 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1285,11 +1285,15 @@ static void storvsc_on_channel_callback(void *context)
foreach_vmbus_pkt(desc, channel) {
struct vstor_packet *packet = hv_pkt_data(desc);
struct storvsc_cmd_request *request = NULL;
+ u32 pktlen = hv_pkt_datalen(desc);
u64 rqst_id = desc->trans_id;
+ u32 minlen = rqst_id ? sizeof(struct vstor_packet) -
+ stor_device->vmscsi_size_delta : sizeof(enum vstor_packet_operation);
- if (hv_pkt_datalen(desc) < sizeof(struct vstor_packet) -
- stor_device->vmscsi_size_delta) {
- dev_err(&device->device, "Invalid packet len\n");
+ if (pktlen < minlen) {
+ dev_err(&device->device,
+ "Invalid pkt: id=%llu, len=%u, minlen=%u\n",
+ rqst_id, pktlen, minlen);
continue;
}
@@ -1302,13 +1306,23 @@ static void storvsc_on_channel_callback(void *context)
if (rqst_id == 0) {
/*
* storvsc_on_receive() looks at the vstor_packet in the message
- * from the ring buffer. If the operation in the vstor_packet is
- * COMPLETE_IO, then we call storvsc_on_io_completion(), and
- * dereference the guest memory address. Make sure we don't call
- * storvsc_on_io_completion() with a guest memory address that is
- * zero if Hyper-V were to construct and send such a bogus packet.
+ * from the ring buffer.
+ *
+ * - If the operation in the vstor_packet is COMPLETE_IO, then
+ * we call storvsc_on_io_completion(), and dereference the
+ * guest memory address. Make sure we don't call
+ * storvsc_on_io_completion() with a guest memory address
+ * that is zero if Hyper-V were to construct and send such
+ * a bogus packet.
+ *
+ * - If the operation in the vstor_packet is FCHBA_DATA, then
+ * we call cache_wwn(), and access the data payload area of
+ * the packet (wwn_packet); however, there is no guarantee
+ * that the packet is big enough to contain such area.
+ * Future-proof the code by rejecting such a bogus packet.
*/
- if (packet->operation == VSTOR_OPERATION_COMPLETE_IO) {
+ if (packet->operation == VSTOR_OPERATION_COMPLETE_IO ||
+ packet->operation == VSTOR_OPERATION_FCHBA_DATA) {
dev_err(&device->device, "Invalid packet with ID of 0\n");
continue;
}
diff --git a/drivers/scsi/ufs/ufs-exynos.c b/drivers/scsi/ufs/ufs-exynos.c
index a14dd8ce56d4..bb2dd79a1bcd 100644
--- a/drivers/scsi/ufs/ufs-exynos.c
+++ b/drivers/scsi/ufs/ufs-exynos.c
@@ -642,9 +642,9 @@ static int exynos_ufs_pre_pwr_mode(struct ufs_hba *hba,
}
/* setting for three timeout values for traffic class #0 */
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0), 8064);
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1), 28224);
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2), 20160);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(DL_FC0PROTTIMEOUTVAL), 8064);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(DL_TC0REPLAYTIMEOUTVAL), 28224);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(DL_AFC0REQTIMEOUTVAL), 20160);
return 0;
out:
diff --git a/drivers/scsi/ufs/ufshcd-crypto.c b/drivers/scsi/ufs/ufshcd-crypto.c
index d70cdcd35e43..67402baf6fae 100644
--- a/drivers/scsi/ufs/ufshcd-crypto.c
+++ b/drivers/scsi/ufs/ufshcd-crypto.c
@@ -48,11 +48,12 @@ out:
return err;
}
-static int ufshcd_crypto_keyslot_program(struct blk_keyslot_manager *ksm,
+static int ufshcd_crypto_keyslot_program(struct blk_crypto_profile *profile,
const struct blk_crypto_key *key,
unsigned int slot)
{
- struct ufs_hba *hba = container_of(ksm, struct ufs_hba, ksm);
+ struct ufs_hba *hba =
+ container_of(profile, struct ufs_hba, crypto_profile);
const union ufs_crypto_cap_entry *ccap_array = hba->crypto_cap_array;
const struct ufs_crypto_alg_entry *alg =
&ufs_crypto_algs[key->crypto_cfg.crypto_mode];
@@ -105,11 +106,12 @@ static int ufshcd_clear_keyslot(struct ufs_hba *hba, int slot)
return ufshcd_program_key(hba, &cfg, slot);
}
-static int ufshcd_crypto_keyslot_evict(struct blk_keyslot_manager *ksm,
+static int ufshcd_crypto_keyslot_evict(struct blk_crypto_profile *profile,
const struct blk_crypto_key *key,
unsigned int slot)
{
- struct ufs_hba *hba = container_of(ksm, struct ufs_hba, ksm);
+ struct ufs_hba *hba =
+ container_of(profile, struct ufs_hba, crypto_profile);
return ufshcd_clear_keyslot(hba, slot);
}
@@ -120,11 +122,11 @@ bool ufshcd_crypto_enable(struct ufs_hba *hba)
return false;
/* Reset might clear all keys, so reprogram all the keys. */
- blk_ksm_reprogram_all_keys(&hba->ksm);
+ blk_crypto_reprogram_all_keys(&hba->crypto_profile);
return true;
}
-static const struct blk_ksm_ll_ops ufshcd_ksm_ops = {
+static const struct blk_crypto_ll_ops ufshcd_crypto_ops = {
.keyslot_program = ufshcd_crypto_keyslot_program,
.keyslot_evict = ufshcd_crypto_keyslot_evict,
};
@@ -179,15 +181,16 @@ int ufshcd_hba_init_crypto_capabilities(struct ufs_hba *hba)
}
/* The actual number of configurations supported is (CFGC+1) */
- err = devm_blk_ksm_init(hba->dev, &hba->ksm,
- hba->crypto_capabilities.config_count + 1);
+ err = devm_blk_crypto_profile_init(
+ hba->dev, &hba->crypto_profile,
+ hba->crypto_capabilities.config_count + 1);
if (err)
goto out;
- hba->ksm.ksm_ll_ops = ufshcd_ksm_ops;
+ hba->crypto_profile.ll_ops = ufshcd_crypto_ops;
/* UFS only supports 8 bytes for any DUN */
- hba->ksm.max_dun_bytes_supported = 8;
- hba->ksm.dev = hba->dev;
+ hba->crypto_profile.max_dun_bytes_supported = 8;
+ hba->crypto_profile.dev = hba->dev;
/*
* Cache all the UFS crypto capabilities and advertise the supported
@@ -202,7 +205,7 @@ int ufshcd_hba_init_crypto_capabilities(struct ufs_hba *hba)
blk_mode_num = ufshcd_find_blk_crypto_mode(
hba->crypto_cap_array[cap_idx]);
if (blk_mode_num != BLK_ENCRYPTION_MODE_INVALID)
- hba->ksm.crypto_modes_supported[blk_mode_num] |=
+ hba->crypto_profile.modes_supported[blk_mode_num] |=
hba->crypto_cap_array[cap_idx].sdus_mask * 512;
}
@@ -230,9 +233,8 @@ void ufshcd_init_crypto(struct ufs_hba *hba)
ufshcd_clear_keyslot(hba, slot);
}
-void ufshcd_crypto_setup_rq_keyslot_manager(struct ufs_hba *hba,
- struct request_queue *q)
+void ufshcd_crypto_register(struct ufs_hba *hba, struct request_queue *q)
{
if (hba->caps & UFSHCD_CAP_CRYPTO)
- blk_ksm_register(&hba->ksm, q);
+ blk_crypto_register(&hba->crypto_profile, q);
}
diff --git a/drivers/scsi/ufs/ufshcd-crypto.h b/drivers/scsi/ufs/ufshcd-crypto.h
index 78a58e788dff..e18c01276873 100644
--- a/drivers/scsi/ufs/ufshcd-crypto.h
+++ b/drivers/scsi/ufs/ufshcd-crypto.h
@@ -18,7 +18,7 @@ static inline void ufshcd_prepare_lrbp_crypto(struct request *rq,
return;
}
- lrbp->crypto_key_slot = blk_ksm_get_slot_idx(rq->crypt_keyslot);
+ lrbp->crypto_key_slot = blk_crypto_keyslot_index(rq->crypt_keyslot);
lrbp->data_unit_num = rq->crypt_ctx->bc_dun[0];
}
@@ -40,8 +40,7 @@ int ufshcd_hba_init_crypto_capabilities(struct ufs_hba *hba);
void ufshcd_init_crypto(struct ufs_hba *hba);
-void ufshcd_crypto_setup_rq_keyslot_manager(struct ufs_hba *hba,
- struct request_queue *q);
+void ufshcd_crypto_register(struct ufs_hba *hba, struct request_queue *q);
#else /* CONFIG_SCSI_UFS_CRYPTO */
@@ -64,8 +63,8 @@ static inline int ufshcd_hba_init_crypto_capabilities(struct ufs_hba *hba)
static inline void ufshcd_init_crypto(struct ufs_hba *hba) { }
-static inline void ufshcd_crypto_setup_rq_keyslot_manager(struct ufs_hba *hba,
- struct request_queue *q) { }
+static inline void ufshcd_crypto_register(struct ufs_hba *hba,
+ struct request_queue *q) { }
#endif /* CONFIG_SCSI_UFS_CRYPTO */
diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c
index 149c1aa09103..51424557810d 100644
--- a/drivers/scsi/ufs/ufshcd-pci.c
+++ b/drivers/scsi/ufs/ufshcd-pci.c
@@ -370,20 +370,6 @@ static void ufs_intel_common_exit(struct ufs_hba *hba)
static int ufs_intel_resume(struct ufs_hba *hba, enum ufs_pm_op op)
{
- /*
- * To support S4 (suspend-to-disk) with spm_lvl other than 5, the base
- * address registers must be restored because the restore kernel can
- * have used different addresses.
- */
- ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
- REG_UTP_TRANSFER_REQ_LIST_BASE_L);
- ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
- REG_UTP_TRANSFER_REQ_LIST_BASE_H);
- ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
- REG_UTP_TASK_REQ_LIST_BASE_L);
- ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
- REG_UTP_TASK_REQ_LIST_BASE_H);
-
if (ufshcd_is_link_hibern8(hba)) {
int ret = ufshcd_uic_hibern8_exit(hba);
@@ -463,6 +449,18 @@ static struct ufs_hba_variant_ops ufs_intel_lkf_hba_vops = {
.device_reset = ufs_intel_device_reset,
};
+#ifdef CONFIG_PM_SLEEP
+static int ufshcd_pci_restore(struct device *dev)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ /* Force a full reset and restore */
+ ufshcd_set_link_off(hba);
+
+ return ufshcd_system_resume(dev);
+}
+#endif
+
/**
* ufshcd_pci_shutdown - main function to put the controller in reset state
* @pdev: pointer to PCI device handle
@@ -546,9 +544,14 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
static const struct dev_pm_ops ufshcd_pci_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume)
SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL)
#ifdef CONFIG_PM_SLEEP
+ .suspend = ufshcd_system_suspend,
+ .resume = ufshcd_system_resume,
+ .freeze = ufshcd_system_suspend,
+ .thaw = ufshcd_system_resume,
+ .poweroff = ufshcd_system_suspend,
+ .restore = ufshcd_pci_restore,
.prepare = ufshcd_suspend_prepare,
.complete = ufshcd_resume_complete,
#endif
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 95be7ecdfe10..db1bc8655d46 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -2737,12 +2737,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
lrbp->req_abort_skip = false;
- err = ufshpb_prep(hba, lrbp);
- if (err == -EAGAIN) {
- lrbp->cmd = NULL;
- ufshcd_release(hba);
- goto out;
- }
+ ufshpb_prep(hba, lrbp);
ufshcd_comp_scsi_upiu(hba, lrbp);
@@ -2925,7 +2920,7 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
* Even though we use wait_event() which sleeps indefinitely,
* the maximum wait time is bounded by SCSI request timeout.
*/
- req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
+ req = blk_mq_alloc_request(q, REQ_OP_DRV_OUT, 0);
if (IS_ERR(req)) {
err = PTR_ERR(req);
goto out_unlock;
@@ -2952,7 +2947,7 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
(struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
out:
- blk_put_request(req);
+ blk_mq_free_request(req);
out_unlock:
up_read(&hba->clk_scaling_lock);
return err;
@@ -4986,7 +4981,7 @@ static int ufshcd_slave_configure(struct scsi_device *sdev)
else if (ufshcd_is_rpm_autosuspend_allowed(hba))
sdev->rpm_autosuspend = 1;
- ufshcd_crypto_setup_rq_keyslot_manager(hba, q);
+ ufshcd_crypto_register(hba, q);
return 0;
}
@@ -6517,9 +6512,9 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
int task_tag, err;
/*
- * blk_get_request() is used here only to get a free tag.
+ * blk_mq_alloc_request() is used here only to get a free tag.
*/
- req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
+ req = blk_mq_alloc_request(q, REQ_OP_DRV_OUT, 0);
if (IS_ERR(req))
return PTR_ERR(req);
@@ -6575,7 +6570,7 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
spin_unlock_irqrestore(hba->host->host_lock, flags);
ufshcd_release(hba);
- blk_put_request(req);
+ blk_mq_free_request(req);
return err;
}
@@ -6660,7 +6655,7 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
down_read(&hba->clk_scaling_lock);
- req = blk_get_request(q, REQ_OP_DRV_OUT, 0);
+ req = blk_mq_alloc_request(q, REQ_OP_DRV_OUT, 0);
if (IS_ERR(req)) {
err = PTR_ERR(req);
goto out_unlock;
@@ -6741,7 +6736,7 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
(struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
out:
- blk_put_request(req);
+ blk_mq_free_request(req);
out_unlock:
up_read(&hba->clk_scaling_lock);
return err;
@@ -7912,7 +7907,7 @@ static void ufshcd_request_sense_done(struct request *rq, blk_status_t error)
if (error != BLK_STS_OK)
pr_err("%s: REQUEST SENSE failed (%d)\n", __func__, error);
kfree(rq->end_io_data);
- blk_put_request(rq);
+ blk_mq_free_request(rq);
}
static int
@@ -7932,7 +7927,7 @@ ufshcd_request_sense_async(struct ufs_hba *hba, struct scsi_device *sdev)
if (!buffer)
return -ENOMEM;
- req = blk_get_request(sdev->request_queue, REQ_OP_DRV_IN,
+ req = blk_mq_alloc_request(sdev->request_queue, REQ_OP_DRV_IN,
/*flags=*/BLK_MQ_REQ_PM);
if (IS_ERR(req)) {
ret = PTR_ERR(req);
@@ -7957,7 +7952,7 @@ ufshcd_request_sense_async(struct ufs_hba *hba, struct scsi_device *sdev)
return 0;
out_put:
- blk_put_request(req);
+ blk_mq_free_request(req);
out_free:
kfree(buffer);
return ret;
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 41f6e06f9185..62bdc412d38a 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -32,7 +32,7 @@
#include <linux/regulator/consumer.h>
#include <linux/bitfield.h>
#include <linux/devfreq.h>
-#include <linux/keyslot-manager.h>
+#include <linux/blk-crypto-profile.h>
#include "unipro.h"
#include <asm/irq.h>
@@ -766,7 +766,7 @@ struct ufs_hba_monitor {
* @crypto_capabilities: Content of crypto capabilities register (0x100)
* @crypto_cap_array: Array of crypto capabilities
* @crypto_cfg_register: Start of the crypto cfg array
- * @ksm: the keyslot manager tied to this hba
+ * @crypto_profile: the crypto profile of this hba (if applicable)
*/
struct ufs_hba {
void __iomem *mmio_base;
@@ -911,7 +911,7 @@ struct ufs_hba {
union ufs_crypto_capabilities crypto_capabilities;
union ufs_crypto_cap_entry *crypto_cap_array;
u32 crypto_cfg_register;
- struct blk_keyslot_manager ksm;
+ struct blk_crypto_profile crypto_profile;
#endif
#ifdef CONFIG_DEBUG_FS
struct dentry *debugfs_root;
diff --git a/drivers/scsi/ufs/ufshpb.c b/drivers/scsi/ufs/ufshpb.c
index 589af5f6b940..182bcbf60f8c 100644
--- a/drivers/scsi/ufs/ufshpb.c
+++ b/drivers/scsi/ufs/ufshpb.c
@@ -84,16 +84,6 @@ static bool ufshpb_is_supported_chunk(struct ufshpb_lu *hpb, int transfer_len)
return transfer_len <= hpb->pre_req_max_tr_len;
}
-/*
- * In this driver, WRITE_BUFFER CMD support 36KB (len=9) ~ 1MB (len=256) as
- * default. It is possible to change range of transfer_len through sysfs.
- */
-static inline bool ufshpb_is_required_wb(struct ufshpb_lu *hpb, int len)
-{
- return len > hpb->pre_req_min_tr_len &&
- len <= hpb->pre_req_max_tr_len;
-}
-
static bool ufshpb_is_general_lun(int lun)
{
return lun < UFS_UPIU_MAX_UNIT_NUM_ID;
@@ -334,7 +324,7 @@ ufshpb_get_pos_from_lpn(struct ufshpb_lu *hpb, unsigned long lpn, int *rgn_idx,
static void
ufshpb_set_hpb_read_to_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
- __be64 ppn, u8 transfer_len, int read_id)
+ __be64 ppn, u8 transfer_len)
{
unsigned char *cdb = lrbp->cmd->cmnd;
__be64 ppn_tmp = ppn;
@@ -346,256 +336,11 @@ ufshpb_set_hpb_read_to_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp,
/* ppn value is stored as big-endian in the host memory */
memcpy(&cdb[6], &ppn_tmp, sizeof(__be64));
cdb[14] = transfer_len;
- cdb[15] = read_id;
+ cdb[15] = 0;
lrbp->cmd->cmd_len = UFS_CDB_SIZE;
}
-static inline void ufshpb_set_write_buf_cmd(unsigned char *cdb,
- unsigned long lpn, unsigned int len,
- int read_id)
-{
- cdb[0] = UFSHPB_WRITE_BUFFER;
- cdb[1] = UFSHPB_WRITE_BUFFER_PREFETCH_ID;
-
- put_unaligned_be32(lpn, &cdb[2]);
- cdb[6] = read_id;
- put_unaligned_be16(len * HPB_ENTRY_SIZE, &cdb[7]);
-
- cdb[9] = 0x00; /* Control = 0x00 */
-}
-
-static struct ufshpb_req *ufshpb_get_pre_req(struct ufshpb_lu *hpb)
-{
- struct ufshpb_req *pre_req;
-
- if (hpb->num_inflight_pre_req >= hpb->throttle_pre_req) {
- dev_info(&hpb->sdev_ufs_lu->sdev_dev,
- "pre_req throttle. inflight %d throttle %d",
- hpb->num_inflight_pre_req, hpb->throttle_pre_req);
- return NULL;
- }
-
- pre_req = list_first_entry_or_null(&hpb->lh_pre_req_free,
- struct ufshpb_req, list_req);
- if (!pre_req) {
- dev_info(&hpb->sdev_ufs_lu->sdev_dev, "There is no pre_req");
- return NULL;
- }
-
- list_del_init(&pre_req->list_req);
- hpb->num_inflight_pre_req++;
-
- return pre_req;
-}
-
-static inline void ufshpb_put_pre_req(struct ufshpb_lu *hpb,
- struct ufshpb_req *pre_req)
-{
- pre_req->req = NULL;
- bio_reset(pre_req->bio);
- list_add_tail(&pre_req->list_req, &hpb->lh_pre_req_free);
- hpb->num_inflight_pre_req--;
-}
-
-static void ufshpb_pre_req_compl_fn(struct request *req, blk_status_t error)
-{
- struct ufshpb_req *pre_req = (struct ufshpb_req *)req->end_io_data;
- struct ufshpb_lu *hpb = pre_req->hpb;
- unsigned long flags;
-
- if (error) {
- struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
- struct scsi_sense_hdr sshdr;
-
- dev_err(&hpb->sdev_ufs_lu->sdev_dev, "block status %d", error);
- scsi_command_normalize_sense(cmd, &sshdr);
- dev_err(&hpb->sdev_ufs_lu->sdev_dev,
- "code %x sense_key %x asc %x ascq %x",
- sshdr.response_code,
- sshdr.sense_key, sshdr.asc, sshdr.ascq);
- dev_err(&hpb->sdev_ufs_lu->sdev_dev,
- "byte4 %x byte5 %x byte6 %x additional_len %x",
- sshdr.byte4, sshdr.byte5,
- sshdr.byte6, sshdr.additional_length);
- }
-
- blk_mq_free_request(req);
- spin_lock_irqsave(&hpb->rgn_state_lock, flags);
- ufshpb_put_pre_req(pre_req->hpb, pre_req);
- spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
-}
-
-static int ufshpb_prep_entry(struct ufshpb_req *pre_req, struct page *page)
-{
- struct ufshpb_lu *hpb = pre_req->hpb;
- struct ufshpb_region *rgn;
- struct ufshpb_subregion *srgn;
- __be64 *addr;
- int offset = 0;
- int copied;
- unsigned long lpn = pre_req->wb.lpn;
- int rgn_idx, srgn_idx, srgn_offset;
- unsigned long flags;
-
- addr = page_address(page);
- ufshpb_get_pos_from_lpn(hpb, lpn, &rgn_idx, &srgn_idx, &srgn_offset);
-
- spin_lock_irqsave(&hpb->rgn_state_lock, flags);
-
-next_offset:
- rgn = hpb->rgn_tbl + rgn_idx;
- srgn = rgn->srgn_tbl + srgn_idx;
-
- if (!ufshpb_is_valid_srgn(rgn, srgn))
- goto mctx_error;
-
- if (!srgn->mctx)
- goto mctx_error;
-
- copied = ufshpb_fill_ppn_from_page(hpb, srgn->mctx, srgn_offset,
- pre_req->wb.len - offset,
- &addr[offset]);
-
- if (copied < 0)
- goto mctx_error;
-
- offset += copied;
- srgn_offset += copied;
-
- if (srgn_offset == hpb->entries_per_srgn) {
- srgn_offset = 0;
-
- if (++srgn_idx == hpb->srgns_per_rgn) {
- srgn_idx = 0;
- rgn_idx++;
- }
- }
-
- if (offset < pre_req->wb.len)
- goto next_offset;
-
- spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
- return 0;
-mctx_error:
- spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
- return -ENOMEM;
-}
-
-static int ufshpb_pre_req_add_bio_page(struct ufshpb_lu *hpb,
- struct request_queue *q,
- struct ufshpb_req *pre_req)
-{
- struct page *page = pre_req->wb.m_page;
- struct bio *bio = pre_req->bio;
- int entries_bytes, ret;
-
- if (!page)
- return -ENOMEM;
-
- if (ufshpb_prep_entry(pre_req, page))
- return -ENOMEM;
-
- entries_bytes = pre_req->wb.len * sizeof(__be64);
-
- ret = bio_add_pc_page(q, bio, page, entries_bytes, 0);
- if (ret != entries_bytes) {
- dev_err(&hpb->sdev_ufs_lu->sdev_dev,
- "bio_add_pc_page fail: %d", ret);
- return -ENOMEM;
- }
- return 0;
-}
-
-static inline int ufshpb_get_read_id(struct ufshpb_lu *hpb)
-{
- if (++hpb->cur_read_id >= MAX_HPB_READ_ID)
- hpb->cur_read_id = 1;
- return hpb->cur_read_id;
-}
-
-static int ufshpb_execute_pre_req(struct ufshpb_lu *hpb, struct scsi_cmnd *cmd,
- struct ufshpb_req *pre_req, int read_id)
-{
- struct scsi_device *sdev = cmd->device;
- struct request_queue *q = sdev->request_queue;
- struct request *req;
- struct scsi_request *rq;
- struct bio *bio = pre_req->bio;
-
- pre_req->hpb = hpb;
- pre_req->wb.lpn = sectors_to_logical(cmd->device,
- blk_rq_pos(scsi_cmd_to_rq(cmd)));
- pre_req->wb.len = sectors_to_logical(cmd->device,
- blk_rq_sectors(scsi_cmd_to_rq(cmd)));
- if (ufshpb_pre_req_add_bio_page(hpb, q, pre_req))
- return -ENOMEM;
-
- req = pre_req->req;
-
- /* 1. request setup */
- blk_rq_append_bio(req, bio);
- req->rq_disk = NULL;
- req->end_io_data = (void *)pre_req;
- req->end_io = ufshpb_pre_req_compl_fn;
-
- /* 2. scsi_request setup */
- rq = scsi_req(req);
- rq->retries = 1;
-
- ufshpb_set_write_buf_cmd(rq->cmd, pre_req->wb.lpn, pre_req->wb.len,
- read_id);
- rq->cmd_len = scsi_command_size(rq->cmd);
-
- if (blk_insert_cloned_request(q, req) != BLK_STS_OK)
- return -EAGAIN;
-
- hpb->stats.pre_req_cnt++;
-
- return 0;
-}
-
-static int ufshpb_issue_pre_req(struct ufshpb_lu *hpb, struct scsi_cmnd *cmd,
- int *read_id)
-{
- struct ufshpb_req *pre_req;
- struct request *req = NULL;
- unsigned long flags;
- int _read_id;
- int ret = 0;
-
- req = blk_get_request(cmd->device->request_queue,
- REQ_OP_DRV_OUT | REQ_SYNC, BLK_MQ_REQ_NOWAIT);
- if (IS_ERR(req))
- return -EAGAIN;
-
- spin_lock_irqsave(&hpb->rgn_state_lock, flags);
- pre_req = ufshpb_get_pre_req(hpb);
- if (!pre_req) {
- ret = -EAGAIN;
- goto unlock_out;
- }
- _read_id = ufshpb_get_read_id(hpb);
- spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
-
- pre_req->req = req;
-
- ret = ufshpb_execute_pre_req(hpb, cmd, pre_req, _read_id);
- if (ret)
- goto free_pre_req;
-
- *read_id = _read_id;
-
- return ret;
-free_pre_req:
- spin_lock_irqsave(&hpb->rgn_state_lock, flags);
- ufshpb_put_pre_req(hpb, pre_req);
-unlock_out:
- spin_unlock_irqrestore(&hpb->rgn_state_lock, flags);
- blk_put_request(req);
- return ret;
-}
-
/*
* This function will set up HPB read command using host-side L2P map data.
*/
@@ -609,7 +354,6 @@ int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
__be64 ppn;
unsigned long flags;
int transfer_len, rgn_idx, srgn_idx, srgn_offset;
- int read_id = 0;
int err = 0;
hpb = ufshpb_get_hpb_data(cmd->device);
@@ -685,24 +429,8 @@ int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
dev_err(hba->dev, "get ppn failed. err %d\n", err);
return err;
}
- if (!ufshpb_is_legacy(hba) &&
- ufshpb_is_required_wb(hpb, transfer_len)) {
- err = ufshpb_issue_pre_req(hpb, cmd, &read_id);
- if (err) {
- unsigned long timeout;
-
- timeout = cmd->jiffies_at_alloc + msecs_to_jiffies(
- hpb->params.requeue_timeout_ms);
-
- if (time_before(jiffies, timeout))
- return -EAGAIN;
-
- hpb->stats.miss_cnt++;
- return 0;
- }
- }
- ufshpb_set_hpb_read_to_upiu(hba, lrbp, ppn, transfer_len, read_id);
+ ufshpb_set_hpb_read_to_upiu(hba, lrbp, ppn, transfer_len);
hpb->stats.hit_cnt++;
return 0;
@@ -721,7 +449,7 @@ static struct ufshpb_req *ufshpb_get_req(struct ufshpb_lu *hpb,
return NULL;
retry:
- req = blk_get_request(hpb->sdev_ufs_lu->request_queue, dir,
+ req = blk_mq_alloc_request(hpb->sdev_ufs_lu->request_queue, dir,
BLK_MQ_REQ_NOWAIT);
if (!atomic && (PTR_ERR(req) == -EWOULDBLOCK) && (--retries > 0)) {
@@ -745,7 +473,7 @@ free_rq:
static void ufshpb_put_req(struct ufshpb_lu *hpb, struct ufshpb_req *rq)
{
- blk_put_request(rq->req);
+ blk_mq_free_request(rq->req);
kmem_cache_free(hpb->map_req_cache, rq);
}
@@ -1841,16 +1569,11 @@ static void ufshpb_lu_parameter_init(struct ufs_hba *hba,
u32 entries_per_rgn;
u64 rgn_mem_size, tmp;
- /* for pre_req */
- hpb->pre_req_min_tr_len = hpb_dev_info->max_hpb_single_cmd + 1;
-
if (ufshpb_is_legacy(hba))
hpb->pre_req_max_tr_len = HPB_LEGACY_CHUNK_HIGH;
else
hpb->pre_req_max_tr_len = HPB_MULTI_CHUNK_HIGH;
- hpb->cur_read_id = 0;
-
hpb->lu_pinned_start = hpb_lu_info->pinned_start;
hpb->lu_pinned_end = hpb_lu_info->num_pinned ?
(hpb_lu_info->pinned_start + hpb_lu_info->num_pinned - 1)
diff --git a/drivers/scsi/ufs/ufshpb.h b/drivers/scsi/ufs/ufshpb.h
index a79e07398970..f15d8fdbce2e 100644
--- a/drivers/scsi/ufs/ufshpb.h
+++ b/drivers/scsi/ufs/ufshpb.h
@@ -241,8 +241,6 @@ struct ufshpb_lu {
spinlock_t param_lock;
struct list_head lh_pre_req_free;
- int cur_read_id;
- int pre_req_min_tr_len;
int pre_req_max_tr_len;
/* cached L2P map management worker */
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 07d0250f17c3..b11b57355914 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -22,6 +22,7 @@
#include <linux/virtio_scsi.h>
#include <linux/cpu.h>
#include <linux/blkdev.h>
+#include <linux/blk-integrity.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_cmnd.h>
@@ -977,6 +978,7 @@ static unsigned int features[] = {
static struct virtio_driver virtio_scsi_driver = {
.feature_table = features,
.feature_table_size = ARRAY_SIZE(features),
+ .suppress_used_validation = true,
.driver.name = KBUILD_MODNAME,
.driver.owner = THIS_MODULE,
.id_table = id_table,
diff --git a/drivers/soc/amlogic/meson-canvas.c b/drivers/soc/amlogic/meson-canvas.c
index d0329ad170d1..383b0cfc584e 100644
--- a/drivers/soc/amlogic/meson-canvas.c
+++ b/drivers/soc/amlogic/meson-canvas.c
@@ -168,7 +168,6 @@ EXPORT_SYMBOL_GPL(meson_canvas_free);
static int meson_canvas_probe(struct platform_device *pdev)
{
- struct resource *res;
struct meson_canvas *canvas;
struct device *dev = &pdev->dev;
@@ -176,8 +175,7 @@ static int meson_canvas_probe(struct platform_device *pdev)
if (!canvas)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- canvas->reg_base = devm_ioremap_resource(dev, res);
+ canvas->reg_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(canvas->reg_base))
return PTR_ERR(canvas->reg_base);
diff --git a/drivers/soc/amlogic/meson-clk-measure.c b/drivers/soc/amlogic/meson-clk-measure.c
index 6dd190270123..3f3039600357 100644
--- a/drivers/soc/amlogic/meson-clk-measure.c
+++ b/drivers/soc/amlogic/meson-clk-measure.c
@@ -606,7 +606,6 @@ static int meson_msr_probe(struct platform_device *pdev)
{
const struct meson_msr_id *match_data;
struct meson_msr *priv;
- struct resource *res;
struct dentry *root, *clks;
void __iomem *base;
int i;
@@ -624,8 +623,7 @@ static int meson_msr_probe(struct platform_device *pdev)
memcpy(priv->msr_table, match_data, sizeof(priv->msr_table));
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/soc/amlogic/meson-gx-socinfo.c b/drivers/soc/amlogic/meson-gx-socinfo.c
index 6f54bd832c8b..165f7548401b 100644
--- a/drivers/soc/amlogic/meson-gx-socinfo.c
+++ b/drivers/soc/amlogic/meson-gx-socinfo.c
@@ -65,6 +65,7 @@ static const struct meson_gx_package_id {
{ "A113X", 0x25, 0x37, 0xff },
{ "A113D", 0x25, 0x22, 0xff },
{ "S905D2", 0x28, 0x10, 0xf0 },
+ { "S905Y2", 0x28, 0x30, 0xf0 },
{ "S905X2", 0x28, 0x40, 0xf0 },
{ "A311D", 0x29, 0x10, 0xf0 },
{ "S922X", 0x29, 0x40, 0xf0 },
diff --git a/drivers/soc/aspeed/Kconfig b/drivers/soc/aspeed/Kconfig
index 243ca196e6ad..f579ee0b5afa 100644
--- a/drivers/soc/aspeed/Kconfig
+++ b/drivers/soc/aspeed/Kconfig
@@ -24,6 +24,16 @@ config ASPEED_LPC_SNOOP
allows the BMC to listen on and save the data written by
the host to an arbitrary LPC I/O port.
+config ASPEED_UART_ROUTING
+ tristate "ASPEED uart routing control"
+ select REGMAP
+ select MFD_SYSCON
+ default ARCH_ASPEED
+ help
+ Provides a driver to control the UART routing paths, allowing
+ users to perform runtime configuration of the RX muxes among
+ the UART controllers and I/O pins.
+
config ASPEED_P2A_CTRL
tristate "ASPEED P2A (VGA MMIO to BMC) bridge control"
select REGMAP
diff --git a/drivers/soc/aspeed/Makefile b/drivers/soc/aspeed/Makefile
index fcab7192e1a4..b35d74592964 100644
--- a/drivers/soc/aspeed/Makefile
+++ b/drivers/soc/aspeed/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
-obj-$(CONFIG_ASPEED_LPC_CTRL) += aspeed-lpc-ctrl.o
-obj-$(CONFIG_ASPEED_LPC_SNOOP) += aspeed-lpc-snoop.o
-obj-$(CONFIG_ASPEED_P2A_CTRL) += aspeed-p2a-ctrl.o
-obj-$(CONFIG_ASPEED_SOCINFO) += aspeed-socinfo.o
+obj-$(CONFIG_ASPEED_LPC_CTRL) += aspeed-lpc-ctrl.o
+obj-$(CONFIG_ASPEED_LPC_SNOOP) += aspeed-lpc-snoop.o
+obj-$(CONFIG_ASPEED_UART_ROUTING) += aspeed-uart-routing.o
+obj-$(CONFIG_ASPEED_P2A_CTRL) += aspeed-p2a-ctrl.o
+obj-$(CONFIG_ASPEED_SOCINFO) += aspeed-socinfo.o
diff --git a/drivers/soc/aspeed/aspeed-uart-routing.c b/drivers/soc/aspeed/aspeed-uart-routing.c
new file mode 100644
index 000000000000..ef8b24fd1851
--- /dev/null
+++ b/drivers/soc/aspeed/aspeed-uart-routing.c
@@ -0,0 +1,603 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (c) 2018 Google LLC
+ * Copyright (c) 2021 Aspeed Technology Inc.
+ */
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/platform_device.h>
+
+/* register offsets */
+#define HICR9 0x98
+#define HICRA 0x9c
+
+/* attributes options */
+#define UART_ROUTING_IO1 "io1"
+#define UART_ROUTING_IO2 "io2"
+#define UART_ROUTING_IO3 "io3"
+#define UART_ROUTING_IO4 "io4"
+#define UART_ROUTING_IO5 "io5"
+#define UART_ROUTING_IO6 "io6"
+#define UART_ROUTING_IO10 "io10"
+#define UART_ROUTING_UART1 "uart1"
+#define UART_ROUTING_UART2 "uart2"
+#define UART_ROUTING_UART3 "uart3"
+#define UART_ROUTING_UART4 "uart4"
+#define UART_ROUTING_UART5 "uart5"
+#define UART_ROUTING_UART6 "uart6"
+#define UART_ROUTING_UART10 "uart10"
+#define UART_ROUTING_RES "reserved"
+
+struct aspeed_uart_routing {
+ struct regmap *map;
+ struct attribute_group const *attr_grp;
+};
+
+struct aspeed_uart_routing_selector {
+ struct device_attribute dev_attr;
+ uint8_t reg;
+ uint8_t mask;
+ uint8_t shift;
+ const char *const options[];
+};
+
+#define to_routing_selector(_dev_attr) \
+ container_of(_dev_attr, struct aspeed_uart_routing_selector, dev_attr)
+
+static ssize_t aspeed_uart_routing_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
+
+static ssize_t aspeed_uart_routing_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count);
+
+#define ROUTING_ATTR(_name) { \
+ .attr = {.name = _name, \
+ .mode = VERIFY_OCTAL_PERMISSIONS(0644) }, \
+ .show = aspeed_uart_routing_show, \
+ .store = aspeed_uart_routing_store, \
+}
+
+/* routing selector for AST25xx */
+static struct aspeed_uart_routing_selector ast2500_io6_sel = {
+ .dev_attr = ROUTING_ATTR(UART_ROUTING_IO6),
+ .reg = HICR9,
+ .shift = 8,
+ .mask = 0xf,
+ .options = {
+ UART_ROUTING_UART1,
+ UART_ROUTING_UART2,
+ UART_ROUTING_UART3,
+ UART_ROUTING_UART4,
+ UART_ROUTING_UART5,
+ UART_ROUTING_IO1,
+ UART_ROUTING_IO2,
+ UART_ROUTING_IO3,
+ UART_ROUTING_IO4,
+ UART_ROUTING_IO5,
+ NULL,
+ },
+};
+
+static struct aspeed_uart_routing_selector ast2500_uart5_sel = {
+ .dev_attr = ROUTING_ATTR(UART_ROUTING_UART5),
+ .reg = HICRA,
+ .shift = 28,
+ .mask = 0xf,
+ .options = {
+ UART_ROUTING_IO5,
+ UART_ROUTING_IO1,
+ UART_ROUTING_IO2,
+ UART_ROUTING_IO3,
+ UART_ROUTING_IO4,
+ UART_ROUTING_UART1,
+ UART_ROUTING_UART2,
+ UART_ROUTING_UART3,
+ UART_ROUTING_UART4,
+ UART_ROUTING_IO6,
+ NULL,
+ },
+};
+
+static struct aspeed_uart_routing_selector ast2500_uart4_sel = {
+ .dev_attr = ROUTING_ATTR(UART_ROUTING_UART4),
+ .reg = HICRA,
+ .shift = 25,
+ .mask = 0x7,
+ .options = {
+ UART_ROUTING_IO4,
+ UART_ROUTING_IO1,
+ UART_ROUTING_IO2,
+ UART_ROUTING_IO3,
+ UART_ROUTING_UART1,
+ UART_ROUTING_UART2,
+ UART_ROUTING_UART3,
+ UART_ROUTING_IO6,
+ NULL,
+ },
+};
+
+static struct aspeed_uart_routing_selector ast2500_uart3_sel = {
+ .dev_attr = ROUTING_ATTR(UART_ROUTING_UART3),
+ .reg = HICRA,
+ .shift = 22,
+ .mask = 0x7,
+ .options = {
+ UART_ROUTING_IO3,
+ UART_ROUTING_IO4,
+ UART_ROUTING_IO1,
+ UART_ROUTING_IO2,
+ UART_ROUTING_UART4,
+ UART_ROUTING_UART1,
+ UART_ROUTING_UART2,
+ UART_ROUTING_IO6,
+ NULL,
+ },
+};
+
+static struct aspeed_uart_routing_selector ast2500_uart2_sel = {
+ .dev_attr = ROUTING_ATTR(UART_ROUTING_UART2),
+ .reg = HICRA,
+ .shift = 19,
+ .mask = 0x7,
+ .options = {
+ UART_ROUTING_IO2,
+ UART_ROUTING_IO3,
+ UART_ROUTING_IO4,
+ UART_ROUTING_IO1,
+ UART_ROUTING_UART3,
+ UART_ROUTING_UART4,
+ UART_ROUTING_UART1,
+ UART_ROUTING_IO6,
+ NULL,
+ },
+};
+
+static struct aspeed_uart_routing_selector ast2500_uart1_sel = {
+ .dev_attr = ROUTING_ATTR(UART_ROUTING_UART1),
+ .reg = HICRA,
+ .shift = 16,
+ .mask = 0x7,
+ .options = {
+ UART_ROUTING_IO1,
+ UART_ROUTING_IO2,
+ UART_ROUTING_IO3,
+ UART_ROUTING_IO4,
+ UART_ROUTING_UART2,
+ UART_ROUTING_UART3,
+ UART_ROUTING_UART4,
+ UART_ROUTING_IO6,
+ NULL,
+ },
+};
+
+static struct aspeed_uart_routing_selector ast2500_io5_sel = {
+ .dev_attr = ROUTING_ATTR(UART_ROUTING_IO5),
+ .reg = HICRA,
+ .shift = 12,
+ .mask = 0x7,
+ .options = {
+ UART_ROUTING_UART5,
+ UART_ROUTING_UART1,
+ UART_ROUTING_UART2,
+ UART_ROUTING_UART3,
+ UART_ROUTING_UART4,
+ UART_ROUTING_IO1,
+ UART_ROUTING_IO3,
+ UART_ROUTING_IO6,
+ NULL,
+ },
+};
+
+static struct aspeed_uart_routing_selector ast2500_io4_sel = {
+ .dev_attr = ROUTING_ATTR(UART_ROUTING_IO4),
+ .reg = HICRA,
+ .shift = 9,
+ .mask = 0x7,
+ .options = {
+ UART_ROUTING_UART4,
+ UART_ROUTING_UART5,
+ UART_ROUTING_UART1,
+ UART_ROUTING_UART2,
+ UART_ROUTING_UART3,
+ UART_ROUTING_IO1,
+ UART_ROUTING_IO2,
+ UART_ROUTING_IO6,
+ NULL,
+ },
+};
+
+static struct aspeed_uart_routing_selector ast2500_io3_sel = {
+ .dev_attr = ROUTING_ATTR(UART_ROUTING_IO3),
+ .reg = HICRA,
+ .shift = 6,
+ .mask = 0x7,
+ .options = {
+ UART_ROUTING_UART3,
+ UART_ROUTING_UART4,
+ UART_ROUTING_UART5,
+ UART_ROUTING_UART1,
+ UART_ROUTING_UART2,
+ UART_ROUTING_IO1,
+ UART_ROUTING_IO2,
+ UART_ROUTING_IO6,
+ NULL,
+ },
+};
+
+static struct aspeed_uart_routing_selector ast2500_io2_sel = {
+ .dev_attr = ROUTING_ATTR(UART_ROUTING_IO2),
+ .reg = HICRA,
+ .shift = 3,
+ .mask = 0x7,
+ .options = {
+ UART_ROUTING_UART2,
+ UART_ROUTING_UART3,
+ UART_ROUTING_UART4,
+ UART_ROUTING_UART5,
+ UART_ROUTING_UART1,
+ UART_ROUTING_IO3,
+ UART_ROUTING_IO4,
+ UART_ROUTING_IO6,
+ NULL,
+ },
+};
+
+static struct aspeed_uart_routing_selector ast2500_io1_sel = {
+ .dev_attr = ROUTING_ATTR(UART_ROUTING_IO1),
+ .reg = HICRA,
+ .shift = 0,
+ .mask = 0x7,
+ .options = {
+ UART_ROUTING_UART1,
+ UART_ROUTING_UART2,
+ UART_ROUTING_UART3,
+ UART_ROUTING_UART4,
+ UART_ROUTING_UART5,
+ UART_ROUTING_IO3,
+ UART_ROUTING_IO4,
+ UART_ROUTING_IO6,
+ NULL,
+ },
+};
+
+static struct attribute *ast2500_uart_routing_attrs[] = {
+ &ast2500_io6_sel.dev_attr.attr,
+ &ast2500_uart5_sel.dev_attr.attr,
+ &ast2500_uart4_sel.dev_attr.attr,
+ &ast2500_uart3_sel.dev_attr.attr,
+ &ast2500_uart2_sel.dev_attr.attr,
+ &ast2500_uart1_sel.dev_attr.attr,
+ &ast2500_io5_sel.dev_attr.attr,
+ &ast2500_io4_sel.dev_attr.attr,
+ &ast2500_io3_sel.dev_attr.attr,
+ &ast2500_io2_sel.dev_attr.attr,
+ &ast2500_io1_sel.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group ast2500_uart_routing_attr_group = {
+ .attrs = ast2500_uart_routing_attrs,
+};
+
+/* routing selector for AST26xx */
+static struct aspeed_uart_routing_selector ast2600_uart10_sel = {
+ .dev_attr = ROUTING_ATTR(UART_ROUTING_UART10),
+ .reg = HICR9,
+ .shift = 12,
+ .mask = 0xf,
+ .options = {
+ UART_ROUTING_IO10,
+ UART_ROUTING_IO1,
+ UART_ROUTING_IO2,
+ UART_ROUTING_IO3,
+ UART_ROUTING_IO4,
+ UART_ROUTING_RES,
+ UART_ROUTING_UART1,
+ UART_ROUTING_UART2,
+ UART_ROUTING_UART3,
+ UART_ROUTING_UART4,
+ NULL,
+ },
+};
+
+static struct aspeed_uart_routing_selector ast2600_io10_sel = {
+ .dev_attr = ROUTING_ATTR(UART_ROUTING_IO10),
+ .reg = HICR9,
+ .shift = 8,
+ .mask = 0xf,
+ .options = {
+ UART_ROUTING_UART1,
+ UART_ROUTING_UART2,
+ UART_ROUTING_UART3,
+ UART_ROUTING_UART4,
+ UART_ROUTING_RES,
+ UART_ROUTING_IO1,
+ UART_ROUTING_IO2,
+ UART_ROUTING_IO3,
+ UART_ROUTING_IO4,
+ UART_ROUTING_RES,
+ UART_ROUTING_UART10,
+ NULL,
+ },
+};
+
+static struct aspeed_uart_routing_selector ast2600_uart4_sel = {
+ .dev_attr = ROUTING_ATTR(UART_ROUTING_UART4),
+ .reg = HICRA,
+ .shift = 25,
+ .mask = 0x7,
+ .options = {
+ UART_ROUTING_IO4,
+ UART_ROUTING_IO1,
+ UART_ROUTING_IO2,
+ UART_ROUTING_IO3,
+ UART_ROUTING_UART1,
+ UART_ROUTING_UART2,
+ UART_ROUTING_UART3,
+ UART_ROUTING_IO10,
+ NULL,
+ },
+};
+
+static struct aspeed_uart_routing_selector ast2600_uart3_sel = {
+ .dev_attr = ROUTING_ATTR(UART_ROUTING_UART3),
+ .reg = HICRA,
+ .shift = 22,
+ .mask = 0x7,
+ .options = {
+ UART_ROUTING_IO3,
+ UART_ROUTING_IO4,
+ UART_ROUTING_IO1,
+ UART_ROUTING_IO2,
+ UART_ROUTING_UART4,
+ UART_ROUTING_UART1,
+ UART_ROUTING_UART2,
+ UART_ROUTING_IO10,
+ NULL,
+ },
+};
+
+static struct aspeed_uart_routing_selector ast2600_uart2_sel = {
+ .dev_attr = ROUTING_ATTR(UART_ROUTING_UART2),
+ .reg = HICRA,
+ .shift = 19,
+ .mask = 0x7,
+ .options = {
+ UART_ROUTING_IO2,
+ UART_ROUTING_IO3,
+ UART_ROUTING_IO4,
+ UART_ROUTING_IO1,
+ UART_ROUTING_UART3,
+ UART_ROUTING_UART4,
+ UART_ROUTING_UART1,
+ UART_ROUTING_IO10,
+ NULL,
+ },
+};
+
+static struct aspeed_uart_routing_selector ast2600_uart1_sel = {
+ .dev_attr = ROUTING_ATTR(UART_ROUTING_UART1),
+ .reg = HICRA,
+ .shift = 16,
+ .mask = 0x7,
+ .options = {
+ UART_ROUTING_IO1,
+ UART_ROUTING_IO2,
+ UART_ROUTING_IO3,
+ UART_ROUTING_IO4,
+ UART_ROUTING_UART2,
+ UART_ROUTING_UART3,
+ UART_ROUTING_UART4,
+ UART_ROUTING_IO10,
+ NULL,
+ },
+};
+
+static struct aspeed_uart_routing_selector ast2600_io4_sel = {
+ .dev_attr = ROUTING_ATTR(UART_ROUTING_IO4),
+ .reg = HICRA,
+ .shift = 9,
+ .mask = 0x7,
+ .options = {
+ UART_ROUTING_UART4,
+ UART_ROUTING_UART10,
+ UART_ROUTING_UART1,
+ UART_ROUTING_UART2,
+ UART_ROUTING_UART3,
+ UART_ROUTING_IO1,
+ UART_ROUTING_IO2,
+ UART_ROUTING_IO10,
+ NULL,
+ },
+};
+
+static struct aspeed_uart_routing_selector ast2600_io3_sel = {
+ .dev_attr = ROUTING_ATTR(UART_ROUTING_IO3),
+ .reg = HICRA,
+ .shift = 6,
+ .mask = 0x7,
+ .options = {
+ UART_ROUTING_UART3,
+ UART_ROUTING_UART4,
+ UART_ROUTING_UART10,
+ UART_ROUTING_UART1,
+ UART_ROUTING_UART2,
+ UART_ROUTING_IO1,
+ UART_ROUTING_IO2,
+ UART_ROUTING_IO10,
+ NULL,
+ },
+};
+
+static struct aspeed_uart_routing_selector ast2600_io2_sel = {
+ .dev_attr = ROUTING_ATTR(UART_ROUTING_IO2),
+ .reg = HICRA,
+ .shift = 3,
+ .mask = 0x7,
+ .options = {
+ UART_ROUTING_UART2,
+ UART_ROUTING_UART3,
+ UART_ROUTING_UART4,
+ UART_ROUTING_UART10,
+ UART_ROUTING_UART1,
+ UART_ROUTING_IO3,
+ UART_ROUTING_IO4,
+ UART_ROUTING_IO10,
+ NULL,
+ },
+};
+
+static struct aspeed_uart_routing_selector ast2600_io1_sel = {
+ .dev_attr = ROUTING_ATTR(UART_ROUTING_IO1),
+ .reg = HICRA,
+ .shift = 0,
+ .mask = 0x7,
+ .options = {
+ UART_ROUTING_UART1,
+ UART_ROUTING_UART2,
+ UART_ROUTING_UART3,
+ UART_ROUTING_UART4,
+ UART_ROUTING_UART10,
+ UART_ROUTING_IO3,
+ UART_ROUTING_IO4,
+ UART_ROUTING_IO10,
+ NULL,
+ },
+};
+
+static struct attribute *ast2600_uart_routing_attrs[] = {
+ &ast2600_uart10_sel.dev_attr.attr,
+ &ast2600_io10_sel.dev_attr.attr,
+ &ast2600_uart4_sel.dev_attr.attr,
+ &ast2600_uart3_sel.dev_attr.attr,
+ &ast2600_uart2_sel.dev_attr.attr,
+ &ast2600_uart1_sel.dev_attr.attr,
+ &ast2600_io4_sel.dev_attr.attr,
+ &ast2600_io3_sel.dev_attr.attr,
+ &ast2600_io2_sel.dev_attr.attr,
+ &ast2600_io1_sel.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group ast2600_uart_routing_attr_group = {
+ .attrs = ast2600_uart_routing_attrs,
+};
+
+static ssize_t aspeed_uart_routing_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct aspeed_uart_routing *uart_routing = dev_get_drvdata(dev);
+ struct aspeed_uart_routing_selector *sel = to_routing_selector(attr);
+ int val, pos, len;
+
+ regmap_read(uart_routing->map, sel->reg, &val);
+ val = (val >> sel->shift) & sel->mask;
+
+ len = 0;
+ for (pos = 0; sel->options[pos] != NULL; ++pos) {
+ if (pos == val)
+ len += sysfs_emit_at(buf, len, "[%s] ", sel->options[pos]);
+ else
+ len += sysfs_emit_at(buf, len, "%s ", sel->options[pos]);
+ }
+
+ if (val >= pos)
+ len += sysfs_emit_at(buf, len, "[unknown(%d)]", val);
+
+ len += sysfs_emit_at(buf, len, "\n");
+
+ return len;
+}
+
+static ssize_t aspeed_uart_routing_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct aspeed_uart_routing *uart_routing = dev_get_drvdata(dev);
+ struct aspeed_uart_routing_selector *sel = to_routing_selector(attr);
+ int val;
+
+ val = match_string(sel->options, -1, buf);
+ if (val < 0) {
+ dev_err(dev, "invalid value \"%s\"\n", buf);
+ return -EINVAL;
+ }
+
+ regmap_update_bits(uart_routing->map, sel->reg,
+ (sel->mask << sel->shift),
+ (val & sel->mask) << sel->shift);
+
+ return count;
+}
+
+static int aspeed_uart_routing_probe(struct platform_device *pdev)
+{
+ int rc;
+ struct device *dev = &pdev->dev;
+ struct aspeed_uart_routing *uart_routing;
+
+ uart_routing = devm_kzalloc(&pdev->dev, sizeof(*uart_routing), GFP_KERNEL);
+ if (!uart_routing)
+ return -ENOMEM;
+
+ uart_routing->map = syscon_node_to_regmap(dev->parent->of_node);
+ if (IS_ERR(uart_routing->map)) {
+ dev_err(dev, "cannot get regmap\n");
+ return PTR_ERR(uart_routing->map);
+ }
+
+ uart_routing->attr_grp = of_device_get_match_data(dev);
+
+ rc = sysfs_create_group(&dev->kobj, uart_routing->attr_grp);
+ if (rc < 0)
+ return rc;
+
+ dev_set_drvdata(dev, uart_routing);
+
+ dev_info(dev, "module loaded\n");
+
+ return 0;
+}
+
+static int aspeed_uart_routing_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct aspeed_uart_routing *uart_routing = platform_get_drvdata(pdev);
+
+ sysfs_remove_group(&dev->kobj, uart_routing->attr_grp);
+
+ return 0;
+}
+
+static const struct of_device_id aspeed_uart_routing_table[] = {
+ { .compatible = "aspeed,ast2400-uart-routing",
+ .data = &ast2500_uart_routing_attr_group },
+ { .compatible = "aspeed,ast2500-uart-routing",
+ .data = &ast2500_uart_routing_attr_group },
+ { .compatible = "aspeed,ast2600-uart-routing",
+ .data = &ast2600_uart_routing_attr_group },
+ { },
+};
+
+static struct platform_driver aspeed_uart_routing_driver = {
+ .driver = {
+ .name = "aspeed-uart-routing",
+ .of_match_table = aspeed_uart_routing_table,
+ },
+ .probe = aspeed_uart_routing_probe,
+ .remove = aspeed_uart_routing_remove,
+};
+
+module_platform_driver(aspeed_uart_routing_driver);
+
+MODULE_AUTHOR("Oskar Senft <osk@google.com>");
+MODULE_AUTHOR("Chia-Wei Wang <chiawei_wang@aspeedtech.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Driver to configure Aspeed UART routing");
diff --git a/drivers/soc/bcm/bcm63xx/bcm-pmb.c b/drivers/soc/bcm/bcm63xx/bcm-pmb.c
index 774465c119be..7bbe46ea5f94 100644
--- a/drivers/soc/bcm/bcm63xx/bcm-pmb.c
+++ b/drivers/soc/bcm/bcm63xx/bcm-pmb.c
@@ -276,7 +276,6 @@ static int bcm_pmb_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
const struct bcm_pmb_pd_data *table;
const struct bcm_pmb_pd_data *e;
- struct resource *res;
struct bcm_pmb *pmb;
int max_id;
int err;
@@ -287,8 +286,7 @@ static int bcm_pmb_probe(struct platform_device *pdev)
pmb->dev = dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pmb->base = devm_ioremap_resource(&pdev->dev, res);
+ pmb->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pmb->base))
return PTR_ERR(pmb->base);
diff --git a/drivers/soc/bcm/bcm63xx/bcm63xx-power.c b/drivers/soc/bcm/bcm63xx/bcm63xx-power.c
index 515fe182dc34..aa72e13d5d0e 100644
--- a/drivers/soc/bcm/bcm63xx/bcm63xx-power.c
+++ b/drivers/soc/bcm/bcm63xx/bcm63xx-power.c
@@ -91,7 +91,6 @@ static int bcm63xx_power_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
- struct resource *res;
const struct bcm63xx_power_data *entry, *table;
struct bcm63xx_power *power;
unsigned int ndom;
@@ -102,8 +101,7 @@ static int bcm63xx_power_probe(struct platform_device *pdev)
if (!power)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- power->base = devm_ioremap_resource(&pdev->dev, res);
+ power->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(power->base))
return PTR_ERR(power->base);
diff --git a/drivers/soc/bcm/brcmstb/biuctrl.c b/drivers/soc/bcm/brcmstb/biuctrl.c
index 7f8dc302ae6e..2c975d79fe8e 100644
--- a/drivers/soc/bcm/brcmstb/biuctrl.c
+++ b/drivers/soc/bcm/brcmstb/biuctrl.c
@@ -136,6 +136,8 @@ static int __init mcp_write_pairing_set(void)
static const u32 a72_b53_mach_compat[] = {
0x7211,
+ 0x72113,
+ 0x72116,
0x7216,
0x72164,
0x72165,
diff --git a/drivers/soc/fsl/Kconfig b/drivers/soc/fsl/Kconfig
index 4df32bc4c7a6..07d52cafbb31 100644
--- a/drivers/soc/fsl/Kconfig
+++ b/drivers/soc/fsl/Kconfig
@@ -24,6 +24,7 @@ config FSL_MC_DPIO
tristate "QorIQ DPAA2 DPIO driver"
depends on FSL_MC_BUS
select SOC_BUS
+ select DIMLIB
help
Driver for the DPAA2 DPIO object. A DPIO provides queue and
buffer management facilities for software to interact with
diff --git a/drivers/soc/fsl/dpio/dpio-cmd.h b/drivers/soc/fsl/dpio/dpio-cmd.h
index e13fd3ac1939..2fbcb78cdaaf 100644
--- a/drivers/soc/fsl/dpio/dpio-cmd.h
+++ b/drivers/soc/fsl/dpio/dpio-cmd.h
@@ -46,6 +46,9 @@ struct dpio_rsp_get_attr {
__le64 qbman_portal_ci_addr;
/* cmd word 3 */
__le32 qbman_version;
+ __le32 pad1;
+ /* cmd word 4 */
+ __le32 clk;
};
struct dpio_stashing_dest {
diff --git a/drivers/soc/fsl/dpio/dpio-driver.c b/drivers/soc/fsl/dpio/dpio-driver.c
index 7f397b4ad878..dd948889eeab 100644
--- a/drivers/soc/fsl/dpio/dpio-driver.c
+++ b/drivers/soc/fsl/dpio/dpio-driver.c
@@ -162,6 +162,7 @@ static int dpaa2_dpio_probe(struct fsl_mc_device *dpio_dev)
goto err_get_attr;
}
desc.qman_version = dpio_attrs.qbman_version;
+ desc.qman_clk = dpio_attrs.clk;
err = dpio_enable(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
if (err) {
diff --git a/drivers/soc/fsl/dpio/dpio-service.c b/drivers/soc/fsl/dpio/dpio-service.c
index 7351f3030550..db0d3910fee4 100644
--- a/drivers/soc/fsl/dpio/dpio-service.c
+++ b/drivers/soc/fsl/dpio/dpio-service.c
@@ -12,6 +12,7 @@
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
+#include <linux/dim.h>
#include <linux/slab.h>
#include "dpio.h"
@@ -28,6 +29,14 @@ struct dpaa2_io {
spinlock_t lock_notifications;
struct list_head notifications;
struct device *dev;
+
+ /* Net DIM */
+ struct dim rx_dim;
+ /* protect against concurrent Net DIM updates */
+ spinlock_t dim_lock;
+ u16 event_ctr;
+ u64 bytes;
+ u64 frames;
};
struct dpaa2_io_store {
@@ -100,6 +109,17 @@ struct dpaa2_io *dpaa2_io_service_select(int cpu)
}
EXPORT_SYMBOL_GPL(dpaa2_io_service_select);
+static void dpaa2_io_dim_work(struct work_struct *w)
+{
+ struct dim *dim = container_of(w, struct dim, work);
+ struct dim_cq_moder moder =
+ net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
+ struct dpaa2_io *d = container_of(dim, struct dpaa2_io, rx_dim);
+
+ dpaa2_io_set_irq_coalescing(d, moder.usec);
+ dim->state = DIM_START_MEASURE;
+}
+
/**
* dpaa2_io_create() - create a dpaa2_io object.
* @desc: the dpaa2_io descriptor
@@ -114,6 +134,7 @@ struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc,
struct device *dev)
{
struct dpaa2_io *obj = kmalloc(sizeof(*obj), GFP_KERNEL);
+ u32 qman_256_cycles_per_ns;
if (!obj)
return NULL;
@@ -127,7 +148,15 @@ struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc,
obj->dpio_desc = *desc;
obj->swp_desc.cena_bar = obj->dpio_desc.regs_cena;
obj->swp_desc.cinh_bar = obj->dpio_desc.regs_cinh;
+ obj->swp_desc.qman_clk = obj->dpio_desc.qman_clk;
obj->swp_desc.qman_version = obj->dpio_desc.qman_version;
+
+ /* Compute how many 256 QBMAN cycles fit into one ns. This is because
+ * the interrupt timeout period register needs to be specified in QBMAN
+ * clock cycles in increments of 256.
+ */
+ qman_256_cycles_per_ns = 256000 / (obj->swp_desc.qman_clk / 1000000);
+ obj->swp_desc.qman_256_cycles_per_ns = qman_256_cycles_per_ns;
obj->swp = qbman_swp_init(&obj->swp_desc);
if (!obj->swp) {
@@ -138,6 +167,7 @@ struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc,
INIT_LIST_HEAD(&obj->node);
spin_lock_init(&obj->lock_mgmt_cmd);
spin_lock_init(&obj->lock_notifications);
+ spin_lock_init(&obj->dim_lock);
INIT_LIST_HEAD(&obj->notifications);
/* For now only enable DQRR interrupts */
@@ -155,6 +185,12 @@ struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc,
obj->dev = dev;
+ memset(&obj->rx_dim, 0, sizeof(obj->rx_dim));
+ INIT_WORK(&obj->rx_dim.work, dpaa2_io_dim_work);
+ obj->event_ctr = 0;
+ obj->bytes = 0;
+ obj->frames = 0;
+
return obj;
}
@@ -194,6 +230,8 @@ irqreturn_t dpaa2_io_irq(struct dpaa2_io *obj)
struct qbman_swp *swp;
u32 status;
+ obj->event_ctr++;
+
swp = obj->swp;
status = qbman_swp_interrupt_read_status(swp);
if (!status)
@@ -462,7 +500,7 @@ int dpaa2_io_service_enqueue_multiple_fq(struct dpaa2_io *d,
qbman_eq_desc_set_no_orp(&ed, 0);
qbman_eq_desc_set_fq(&ed, fqid);
- return qbman_swp_enqueue_multiple(d->swp, &ed, fd, 0, nb);
+ return qbman_swp_enqueue_multiple(d->swp, &ed, fd, NULL, nb);
}
EXPORT_SYMBOL(dpaa2_io_service_enqueue_multiple_fq);
@@ -779,3 +817,82 @@ int dpaa2_io_query_bp_count(struct dpaa2_io *d, u16 bpid, u32 *num)
return 0;
}
EXPORT_SYMBOL_GPL(dpaa2_io_query_bp_count);
+
+/**
+ * dpaa2_io_set_irq_coalescing() - Set new IRQ coalescing values
+ * @d: the given DPIO object
+ * @irq_holdoff: interrupt holdoff (timeout) period in us
+ *
+ * Return 0 for success, or negative error code on error.
+ */
+int dpaa2_io_set_irq_coalescing(struct dpaa2_io *d, u32 irq_holdoff)
+{
+ struct qbman_swp *swp = d->swp;
+
+ return qbman_swp_set_irq_coalescing(swp, swp->dqrr.dqrr_size - 1,
+ irq_holdoff);
+}
+EXPORT_SYMBOL(dpaa2_io_set_irq_coalescing);
+
+/**
+ * dpaa2_io_get_irq_coalescing() - Get the current IRQ coalescing parameters
+ * @d: the given DPIO object
+ * @irq_holdoff: interrupt holdoff (timeout) period in us
+ */
+void dpaa2_io_get_irq_coalescing(struct dpaa2_io *d, u32 *irq_holdoff)
+{
+ struct qbman_swp *swp = d->swp;
+
+ qbman_swp_get_irq_coalescing(swp, NULL, irq_holdoff);
+}
+EXPORT_SYMBOL(dpaa2_io_get_irq_coalescing);
+
+/**
+ * dpaa2_io_set_adaptive_coalescing() - Enable/disable adaptive coalescing
+ * @d: the given DPIO object
+ * @use_adaptive_rx_coalesce: adaptive coalescing state
+ */
+void dpaa2_io_set_adaptive_coalescing(struct dpaa2_io *d,
+ int use_adaptive_rx_coalesce)
+{
+ d->swp->use_adaptive_rx_coalesce = use_adaptive_rx_coalesce;
+}
+EXPORT_SYMBOL(dpaa2_io_set_adaptive_coalescing);
+
+/**
+ * dpaa2_io_get_adaptive_coalescing() - Query adaptive coalescing state
+ * @d: the given DPIO object
+ *
+ * Return 1 when adaptive coalescing is enabled on the DPIO object and 0
+ * otherwise.
+ */
+int dpaa2_io_get_adaptive_coalescing(struct dpaa2_io *d)
+{
+ return d->swp->use_adaptive_rx_coalesce;
+}
+EXPORT_SYMBOL(dpaa2_io_get_adaptive_coalescing);
+
+/**
+ * dpaa2_io_update_net_dim() - Update Net DIM
+ * @d: the given DPIO object
+ * @frames: how many frames have been dequeued by the user since the last call
+ * @bytes: how many bytes have been dequeued by the user since the last call
+ */
+void dpaa2_io_update_net_dim(struct dpaa2_io *d, __u64 frames, __u64 bytes)
+{
+ struct dim_sample dim_sample = {};
+
+ if (!d->swp->use_adaptive_rx_coalesce)
+ return;
+
+ spin_lock(&d->dim_lock);
+
+ d->bytes += bytes;
+ d->frames += frames;
+
+ dim_update_sample(d->event_ctr, d->frames, d->bytes, &dim_sample);
+ net_dim(&d->rx_dim, dim_sample);
+
+ spin_unlock(&d->dim_lock);
+}
+EXPORT_SYMBOL(dpaa2_io_update_net_dim);
diff --git a/drivers/soc/fsl/dpio/dpio.c b/drivers/soc/fsl/dpio/dpio.c
index af74c597a675..8ed606ffaac5 100644
--- a/drivers/soc/fsl/dpio/dpio.c
+++ b/drivers/soc/fsl/dpio/dpio.c
@@ -162,6 +162,7 @@ int dpio_get_attributes(struct fsl_mc_io *mc_io,
attr->qbman_portal_ci_offset =
le64_to_cpu(dpio_rsp->qbman_portal_ci_addr);
attr->qbman_version = le32_to_cpu(dpio_rsp->qbman_version);
+ attr->clk = le32_to_cpu(dpio_rsp->clk);
return 0;
}
diff --git a/drivers/soc/fsl/dpio/dpio.h b/drivers/soc/fsl/dpio/dpio.h
index da06f7258098..7fda44f0d7f4 100644
--- a/drivers/soc/fsl/dpio/dpio.h
+++ b/drivers/soc/fsl/dpio/dpio.h
@@ -59,6 +59,7 @@ int dpio_disable(struct fsl_mc_io *mc_io,
* @num_priorities: Number of priorities for the notification channel (1-8);
* relevant only if 'channel_mode = DPIO_LOCAL_CHANNEL'
* @qbman_version: QBMAN version
+ * @clk: QBMAN clock frequency value in Hz
*/
struct dpio_attr {
int id;
@@ -68,6 +69,7 @@ struct dpio_attr {
enum dpio_channel_mode channel_mode;
u8 num_priorities;
u32 qbman_version;
+ u32 clk;
};
int dpio_get_attributes(struct fsl_mc_io *mc_io,
diff --git a/drivers/soc/fsl/dpio/qbman-portal.c b/drivers/soc/fsl/dpio/qbman-portal.c
index f13da4d7d1c5..e46bcf78ed59 100644
--- a/drivers/soc/fsl/dpio/qbman-portal.c
+++ b/drivers/soc/fsl/dpio/qbman-portal.c
@@ -29,6 +29,7 @@
#define QBMAN_CINH_SWP_EQCR_AM_RT 0x980
#define QBMAN_CINH_SWP_RCR_AM_RT 0x9c0
#define QBMAN_CINH_SWP_DQPI 0xa00
+#define QBMAN_CINH_SWP_DQRR_ITR 0xa80
#define QBMAN_CINH_SWP_DCAP 0xac0
#define QBMAN_CINH_SWP_SDQCR 0xb00
#define QBMAN_CINH_SWP_EQCR_AM_RT2 0xb40
@@ -38,6 +39,7 @@
#define QBMAN_CINH_SWP_IER 0xe40
#define QBMAN_CINH_SWP_ISDR 0xe80
#define QBMAN_CINH_SWP_IIR 0xec0
+#define QBMAN_CINH_SWP_ITPR 0xf40
/* CENA register offsets */
#define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((u32)(n) << 6))
@@ -355,6 +357,9 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
& p->eqcr.pi_ci_mask;
p->eqcr.available = p->eqcr.pi_ring_size;
+ /* Initialize the software portal with a irq timeout period of 0us */
+ qbman_swp_set_irq_coalescing(p, p->dqrr.dqrr_size - 1, 0);
+
return p;
}
@@ -688,9 +693,9 @@ int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
p[0] = cl[0] | s->eqcr.pi_vb;
if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
- struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
+ struct qbman_eq_desc *eq_desc = (struct qbman_eq_desc *)p;
- d->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
+ eq_desc->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
}
eqcr_pi++;
@@ -770,9 +775,9 @@ int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
p[0] = cl[0] | s->eqcr.pi_vb;
if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
- struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
+ struct qbman_eq_desc *eq_desc = (struct qbman_eq_desc *)p;
- d->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
+ eq_desc->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
}
eqcr_pi++;
@@ -1796,3 +1801,56 @@ u32 qbman_bp_info_num_free_bufs(struct qbman_bp_query_rslt *a)
{
return le32_to_cpu(a->fill);
}
+
+/**
+ * qbman_swp_set_irq_coalescing() - Set new IRQ coalescing values
+ * @p: the software portal object
+ * @irq_threshold: interrupt threshold
+ * @irq_holdoff: interrupt holdoff (timeout) period in us
+ *
+ * Return 0 for success, or negative error code on error.
+ */
+int qbman_swp_set_irq_coalescing(struct qbman_swp *p, u32 irq_threshold,
+ u32 irq_holdoff)
+{
+ u32 itp, max_holdoff;
+
+ /* Convert irq_holdoff value from usecs to 256 QBMAN clock cycles
+ * increments. This depends on the QBMAN internal frequency.
+ */
+ itp = (irq_holdoff * 1000) / p->desc->qman_256_cycles_per_ns;
+ if (itp > 4096) {
+ max_holdoff = (p->desc->qman_256_cycles_per_ns * 4096) / 1000;
+ pr_err("irq_holdoff must be <= %uus\n", max_holdoff);
+ return -EINVAL;
+ }
+
+ if (irq_threshold >= p->dqrr.dqrr_size) {
+ pr_err("irq_threshold must be < %u\n", p->dqrr.dqrr_size - 1);
+ return -EINVAL;
+ }
+
+ p->irq_threshold = irq_threshold;
+ p->irq_holdoff = irq_holdoff;
+
+ qbman_write_register(p, QBMAN_CINH_SWP_DQRR_ITR, irq_threshold);
+ qbman_write_register(p, QBMAN_CINH_SWP_ITPR, itp);
+
+ return 0;
+}
+
+/**
+ * qbman_swp_get_irq_coalescing() - Get the current IRQ coalescing parameters
+ * @p: the software portal object
+ * @irq_threshold: interrupt threshold (an IRQ is generated when there are more
+ * DQRR entries in the portal than the threshold)
+ * @irq_holdoff: interrupt holdoff (timeout) period in us
+ */
+void qbman_swp_get_irq_coalescing(struct qbman_swp *p, u32 *irq_threshold,
+ u32 *irq_holdoff)
+{
+ if (irq_threshold)
+ *irq_threshold = p->irq_threshold;
+ if (irq_holdoff)
+ *irq_holdoff = p->irq_holdoff;
+}
diff --git a/drivers/soc/fsl/dpio/qbman-portal.h b/drivers/soc/fsl/dpio/qbman-portal.h
index c7c2225b7d91..b23883dd2725 100644
--- a/drivers/soc/fsl/dpio/qbman-portal.h
+++ b/drivers/soc/fsl/dpio/qbman-portal.h
@@ -24,6 +24,8 @@ struct qbman_swp_desc {
void *cena_bar; /* Cache-enabled portal base address */
void __iomem *cinh_bar; /* Cache-inhibited portal base address */
u32 qman_version;
+ u32 qman_clk;
+ u32 qman_256_cycles_per_ns;
};
#define QBMAN_SWP_INTERRUPT_EQRI 0x01
@@ -156,6 +158,11 @@ struct qbman_swp {
} eqcr;
spinlock_t access_spinlock;
+
+ /* Interrupt coalescing */
+ u32 irq_threshold;
+ u32 irq_holdoff;
+ int use_adaptive_rx_coalesce;
};
/* Function pointers */
@@ -648,4 +655,10 @@ static inline const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s)
return qbman_swp_dqrr_next_ptr(s);
}
+int qbman_swp_set_irq_coalescing(struct qbman_swp *p, u32 irq_threshold,
+ u32 irq_holdoff);
+
+void qbman_swp_get_irq_coalescing(struct qbman_swp *p, u32 *irq_threshold,
+ u32 *irq_holdoff);
+
#endif /* __FSL_QBMAN_PORTAL_H */
diff --git a/drivers/soc/fsl/guts.c b/drivers/soc/fsl/guts.c
index d5e9a5f2c087..072473a16f4d 100644
--- a/drivers/soc/fsl/guts.c
+++ b/drivers/soc/fsl/guts.c
@@ -140,7 +140,6 @@ static int fsl_guts_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct device *dev = &pdev->dev;
- struct resource *res;
const struct fsl_soc_die_attr *soc_die;
const char *machine;
u32 svr;
@@ -152,8 +151,7 @@ static int fsl_guts_probe(struct platform_device *pdev)
guts->little_endian = of_property_read_bool(np, "little-endian");
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- guts->regs = devm_ioremap_resource(dev, res);
+ guts->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(guts->regs))
return PTR_ERR(guts->regs);
diff --git a/drivers/soc/fsl/rcpm.c b/drivers/soc/fsl/rcpm.c
index 90d3f4060b0c..3d0cae30c769 100644
--- a/drivers/soc/fsl/rcpm.c
+++ b/drivers/soc/fsl/rcpm.c
@@ -146,7 +146,6 @@ static const struct dev_pm_ops rcpm_pm_ops = {
static int rcpm_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct resource *r;
struct rcpm *rcpm;
int ret;
@@ -154,11 +153,7 @@ static int rcpm_probe(struct platform_device *pdev)
if (!rcpm)
return -ENOMEM;
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!r)
- return -ENODEV;
-
- rcpm->ippdexpcr_base = devm_ioremap_resource(&pdev->dev, r);
+ rcpm->ippdexpcr_base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rcpm->ippdexpcr_base)) {
ret = PTR_ERR(rcpm->ippdexpcr_base);
return ret;
diff --git a/drivers/soc/imx/Kconfig b/drivers/soc/imx/Kconfig
index 05812f8ae734..a840494e849a 100644
--- a/drivers/soc/imx/Kconfig
+++ b/drivers/soc/imx/Kconfig
@@ -6,6 +6,7 @@ config IMX_GPCV2_PM_DOMAINS
depends on ARCH_MXC || (COMPILE_TEST && OF)
depends on PM
select PM_GENERIC_DOMAINS
+ select REGMAP_MMIO
default y if SOC_IMX7D
config SOC_IMX8M
diff --git a/drivers/soc/imx/Makefile b/drivers/soc/imx/Makefile
index 078dc918f4f3..8a707077914c 100644
--- a/drivers/soc/imx/Makefile
+++ b/drivers/soc/imx/Makefile
@@ -5,3 +5,4 @@ endif
obj-$(CONFIG_HAVE_IMX_GPC) += gpc.o
obj-$(CONFIG_IMX_GPCV2_PM_DOMAINS) += gpcv2.o
obj-$(CONFIG_SOC_IMX8M) += soc-imx8m.o
+obj-$(CONFIG_SOC_IMX8M) += imx8m-blk-ctrl.o
diff --git a/drivers/soc/imx/gpcv2.c b/drivers/soc/imx/gpcv2.c
index 34a9ac1f2b9b..b8d52d8d29db 100644
--- a/drivers/soc/imx/gpcv2.c
+++ b/drivers/soc/imx/gpcv2.c
@@ -192,7 +192,7 @@ struct imx_pgc_domain {
struct clk_bulk_data *clks;
int num_clks;
- unsigned int pgc;
+ unsigned long pgc;
const struct {
u32 pxx;
@@ -202,6 +202,7 @@ struct imx_pgc_domain {
} bits;
const int voltage;
+ const bool keep_clocks;
struct device *dev;
};
@@ -220,7 +221,7 @@ to_imx_pgc_domain(struct generic_pm_domain *genpd)
static int imx_pgc_power_up(struct generic_pm_domain *genpd)
{
struct imx_pgc_domain *domain = to_imx_pgc_domain(genpd);
- u32 reg_val;
+ u32 reg_val, pgc;
int ret;
ret = pm_runtime_get_sync(domain->dev);
@@ -244,6 +245,8 @@ static int imx_pgc_power_up(struct generic_pm_domain *genpd)
goto out_regulator_disable;
}
+ reset_control_assert(domain->reset);
+
if (domain->bits.pxx) {
/* request the domain to power up */
regmap_update_bits(domain->regmap, GPC_PU_PGC_SW_PUP_REQ,
@@ -262,12 +265,12 @@ static int imx_pgc_power_up(struct generic_pm_domain *genpd)
}
/* disable power control */
- regmap_clear_bits(domain->regmap, GPC_PGC_CTRL(domain->pgc),
- GPC_PGC_CTRL_PCR);
+ for_each_set_bit(pgc, &domain->pgc, 32) {
+ regmap_clear_bits(domain->regmap, GPC_PGC_CTRL(pgc),
+ GPC_PGC_CTRL_PCR);
+ }
}
- reset_control_assert(domain->reset);
-
/* delay for reset to propagate */
udelay(5);
@@ -293,7 +296,8 @@ static int imx_pgc_power_up(struct generic_pm_domain *genpd)
}
/* Disable reset clocks for all devices in the domain */
- clk_bulk_disable_unprepare(domain->num_clks, domain->clks);
+ if (!domain->keep_clocks)
+ clk_bulk_disable_unprepare(domain->num_clks, domain->clks);
return 0;
@@ -311,14 +315,16 @@ out_put_pm:
static int imx_pgc_power_down(struct generic_pm_domain *genpd)
{
struct imx_pgc_domain *domain = to_imx_pgc_domain(genpd);
- u32 reg_val;
+ u32 reg_val, pgc;
int ret;
/* Enable reset clocks for all devices in the domain */
- ret = clk_bulk_prepare_enable(domain->num_clks, domain->clks);
- if (ret) {
- dev_err(domain->dev, "failed to enable reset clocks\n");
- return ret;
+ if (!domain->keep_clocks) {
+ ret = clk_bulk_prepare_enable(domain->num_clks, domain->clks);
+ if (ret) {
+ dev_err(domain->dev, "failed to enable reset clocks\n");
+ return ret;
+ }
}
/* request the ADB400 to power down */
@@ -338,8 +344,10 @@ static int imx_pgc_power_down(struct generic_pm_domain *genpd)
if (domain->bits.pxx) {
/* enable power control */
- regmap_update_bits(domain->regmap, GPC_PGC_CTRL(domain->pgc),
- GPC_PGC_CTRL_PCR, GPC_PGC_CTRL_PCR);
+ for_each_set_bit(pgc, &domain->pgc, 32) {
+ regmap_update_bits(domain->regmap, GPC_PGC_CTRL(pgc),
+ GPC_PGC_CTRL_PCR, GPC_PGC_CTRL_PCR);
+ }
/* request the domain to power down */
regmap_update_bits(domain->regmap, GPC_PU_PGC_SW_PDN_REQ,
@@ -389,7 +397,7 @@ static const struct imx_pgc_domain imx7_pgc_domains[] = {
.map = IMX7_MIPI_PHY_A_CORE_DOMAIN,
},
.voltage = 1000000,
- .pgc = IMX7_PGC_MIPI,
+ .pgc = BIT(IMX7_PGC_MIPI),
},
[IMX7_POWER_DOMAIN_PCIE_PHY] = {
@@ -401,7 +409,7 @@ static const struct imx_pgc_domain imx7_pgc_domains[] = {
.map = IMX7_PCIE_PHY_A_CORE_DOMAIN,
},
.voltage = 1000000,
- .pgc = IMX7_PGC_PCIE,
+ .pgc = BIT(IMX7_PGC_PCIE),
},
[IMX7_POWER_DOMAIN_USB_HSIC_PHY] = {
@@ -413,7 +421,7 @@ static const struct imx_pgc_domain imx7_pgc_domains[] = {
.map = IMX7_USB_HSIC_PHY_A_CORE_DOMAIN,
},
.voltage = 1200000,
- .pgc = IMX7_PGC_USB_HSIC,
+ .pgc = BIT(IMX7_PGC_USB_HSIC),
},
};
@@ -448,7 +456,7 @@ static const struct imx_pgc_domain imx8m_pgc_domains[] = {
.pxx = IMX8M_MIPI_SW_Pxx_REQ,
.map = IMX8M_MIPI_A53_DOMAIN,
},
- .pgc = IMX8M_PGC_MIPI,
+ .pgc = BIT(IMX8M_PGC_MIPI),
},
[IMX8M_POWER_DOMAIN_PCIE1] = {
@@ -459,7 +467,7 @@ static const struct imx_pgc_domain imx8m_pgc_domains[] = {
.pxx = IMX8M_PCIE1_SW_Pxx_REQ,
.map = IMX8M_PCIE1_A53_DOMAIN,
},
- .pgc = IMX8M_PGC_PCIE1,
+ .pgc = BIT(IMX8M_PGC_PCIE1),
},
[IMX8M_POWER_DOMAIN_USB_OTG1] = {
@@ -470,7 +478,7 @@ static const struct imx_pgc_domain imx8m_pgc_domains[] = {
.pxx = IMX8M_OTG1_SW_Pxx_REQ,
.map = IMX8M_OTG1_A53_DOMAIN,
},
- .pgc = IMX8M_PGC_OTG1,
+ .pgc = BIT(IMX8M_PGC_OTG1),
},
[IMX8M_POWER_DOMAIN_USB_OTG2] = {
@@ -481,7 +489,7 @@ static const struct imx_pgc_domain imx8m_pgc_domains[] = {
.pxx = IMX8M_OTG2_SW_Pxx_REQ,
.map = IMX8M_OTG2_A53_DOMAIN,
},
- .pgc = IMX8M_PGC_OTG2,
+ .pgc = BIT(IMX8M_PGC_OTG2),
},
[IMX8M_POWER_DOMAIN_DDR1] = {
@@ -492,7 +500,7 @@ static const struct imx_pgc_domain imx8m_pgc_domains[] = {
.pxx = IMX8M_DDR1_SW_Pxx_REQ,
.map = IMX8M_DDR2_A53_DOMAIN,
},
- .pgc = IMX8M_PGC_DDR1,
+ .pgc = BIT(IMX8M_PGC_DDR1),
},
[IMX8M_POWER_DOMAIN_GPU] = {
@@ -505,7 +513,7 @@ static const struct imx_pgc_domain imx8m_pgc_domains[] = {
.hskreq = IMX8M_GPU_HSK_PWRDNREQN,
.hskack = IMX8M_GPU_HSK_PWRDNACKN,
},
- .pgc = IMX8M_PGC_GPU,
+ .pgc = BIT(IMX8M_PGC_GPU),
},
[IMX8M_POWER_DOMAIN_VPU] = {
@@ -518,7 +526,8 @@ static const struct imx_pgc_domain imx8m_pgc_domains[] = {
.hskreq = IMX8M_VPU_HSK_PWRDNREQN,
.hskack = IMX8M_VPU_HSK_PWRDNACKN,
},
- .pgc = IMX8M_PGC_VPU,
+ .pgc = BIT(IMX8M_PGC_VPU),
+ .keep_clocks = true,
},
[IMX8M_POWER_DOMAIN_DISP] = {
@@ -531,7 +540,7 @@ static const struct imx_pgc_domain imx8m_pgc_domains[] = {
.hskreq = IMX8M_DISP_HSK_PWRDNREQN,
.hskack = IMX8M_DISP_HSK_PWRDNACKN,
},
- .pgc = IMX8M_PGC_DISP,
+ .pgc = BIT(IMX8M_PGC_DISP),
},
[IMX8M_POWER_DOMAIN_MIPI_CSI1] = {
@@ -542,7 +551,7 @@ static const struct imx_pgc_domain imx8m_pgc_domains[] = {
.pxx = IMX8M_MIPI_CSI1_SW_Pxx_REQ,
.map = IMX8M_MIPI_CSI1_A53_DOMAIN,
},
- .pgc = IMX8M_PGC_MIPI_CSI1,
+ .pgc = BIT(IMX8M_PGC_MIPI_CSI1),
},
[IMX8M_POWER_DOMAIN_MIPI_CSI2] = {
@@ -553,7 +562,7 @@ static const struct imx_pgc_domain imx8m_pgc_domains[] = {
.pxx = IMX8M_MIPI_CSI2_SW_Pxx_REQ,
.map = IMX8M_MIPI_CSI2_A53_DOMAIN,
},
- .pgc = IMX8M_PGC_MIPI_CSI2,
+ .pgc = BIT(IMX8M_PGC_MIPI_CSI2),
},
[IMX8M_POWER_DOMAIN_PCIE2] = {
@@ -564,7 +573,7 @@ static const struct imx_pgc_domain imx8m_pgc_domains[] = {
.pxx = IMX8M_PCIE2_SW_Pxx_REQ,
.map = IMX8M_PCIE2_A53_DOMAIN,
},
- .pgc = IMX8M_PGC_PCIE2,
+ .pgc = BIT(IMX8M_PGC_PCIE2),
},
};
@@ -617,6 +626,7 @@ static const struct imx_pgc_domain imx8mm_pgc_domains[] = {
.hskreq = IMX8MM_HSIO_HSK_PWRDNREQN,
.hskack = IMX8MM_HSIO_HSK_PWRDNACKN,
},
+ .keep_clocks = true,
},
[IMX8MM_POWER_DOMAIN_PCIE] = {
@@ -627,7 +637,7 @@ static const struct imx_pgc_domain imx8mm_pgc_domains[] = {
.pxx = IMX8MM_PCIE_SW_Pxx_REQ,
.map = IMX8MM_PCIE_A53_DOMAIN,
},
- .pgc = IMX8MM_PGC_PCIE,
+ .pgc = BIT(IMX8MM_PGC_PCIE),
},
[IMX8MM_POWER_DOMAIN_OTG1] = {
@@ -638,7 +648,7 @@ static const struct imx_pgc_domain imx8mm_pgc_domains[] = {
.pxx = IMX8MM_OTG1_SW_Pxx_REQ,
.map = IMX8MM_OTG1_A53_DOMAIN,
},
- .pgc = IMX8MM_PGC_OTG1,
+ .pgc = BIT(IMX8MM_PGC_OTG1),
},
[IMX8MM_POWER_DOMAIN_OTG2] = {
@@ -649,7 +659,7 @@ static const struct imx_pgc_domain imx8mm_pgc_domains[] = {
.pxx = IMX8MM_OTG2_SW_Pxx_REQ,
.map = IMX8MM_OTG2_A53_DOMAIN,
},
- .pgc = IMX8MM_PGC_OTG2,
+ .pgc = BIT(IMX8MM_PGC_OTG2),
},
[IMX8MM_POWER_DOMAIN_GPUMIX] = {
@@ -662,7 +672,8 @@ static const struct imx_pgc_domain imx8mm_pgc_domains[] = {
.hskreq = IMX8MM_GPUMIX_HSK_PWRDNREQN,
.hskack = IMX8MM_GPUMIX_HSK_PWRDNACKN,
},
- .pgc = IMX8MM_PGC_GPUMIX,
+ .pgc = BIT(IMX8MM_PGC_GPUMIX),
+ .keep_clocks = true,
},
[IMX8MM_POWER_DOMAIN_GPU] = {
@@ -675,7 +686,7 @@ static const struct imx_pgc_domain imx8mm_pgc_domains[] = {
.hskreq = IMX8MM_GPU_HSK_PWRDNREQN,
.hskack = IMX8MM_GPU_HSK_PWRDNACKN,
},
- .pgc = IMX8MM_PGC_GPU2D,
+ .pgc = BIT(IMX8MM_PGC_GPU2D) | BIT(IMX8MM_PGC_GPU3D),
},
[IMX8MM_POWER_DOMAIN_VPUMIX] = {
@@ -688,7 +699,8 @@ static const struct imx_pgc_domain imx8mm_pgc_domains[] = {
.hskreq = IMX8MM_VPUMIX_HSK_PWRDNREQN,
.hskack = IMX8MM_VPUMIX_HSK_PWRDNACKN,
},
- .pgc = IMX8MM_PGC_VPUMIX,
+ .pgc = BIT(IMX8MM_PGC_VPUMIX),
+ .keep_clocks = true,
},
[IMX8MM_POWER_DOMAIN_VPUG1] = {
@@ -699,7 +711,7 @@ static const struct imx_pgc_domain imx8mm_pgc_domains[] = {
.pxx = IMX8MM_VPUG1_SW_Pxx_REQ,
.map = IMX8MM_VPUG1_A53_DOMAIN,
},
- .pgc = IMX8MM_PGC_VPUG1,
+ .pgc = BIT(IMX8MM_PGC_VPUG1),
},
[IMX8MM_POWER_DOMAIN_VPUG2] = {
@@ -710,7 +722,7 @@ static const struct imx_pgc_domain imx8mm_pgc_domains[] = {
.pxx = IMX8MM_VPUG2_SW_Pxx_REQ,
.map = IMX8MM_VPUG2_A53_DOMAIN,
},
- .pgc = IMX8MM_PGC_VPUG2,
+ .pgc = BIT(IMX8MM_PGC_VPUG2),
},
[IMX8MM_POWER_DOMAIN_VPUH1] = {
@@ -721,7 +733,7 @@ static const struct imx_pgc_domain imx8mm_pgc_domains[] = {
.pxx = IMX8MM_VPUH1_SW_Pxx_REQ,
.map = IMX8MM_VPUH1_A53_DOMAIN,
},
- .pgc = IMX8MM_PGC_VPUH1,
+ .pgc = BIT(IMX8MM_PGC_VPUH1),
},
[IMX8MM_POWER_DOMAIN_DISPMIX] = {
@@ -734,7 +746,8 @@ static const struct imx_pgc_domain imx8mm_pgc_domains[] = {
.hskreq = IMX8MM_DISPMIX_HSK_PWRDNREQN,
.hskack = IMX8MM_DISPMIX_HSK_PWRDNACKN,
},
- .pgc = IMX8MM_PGC_DISPMIX,
+ .pgc = BIT(IMX8MM_PGC_DISPMIX),
+ .keep_clocks = true,
},
[IMX8MM_POWER_DOMAIN_MIPI] = {
@@ -745,7 +758,7 @@ static const struct imx_pgc_domain imx8mm_pgc_domains[] = {
.pxx = IMX8MM_MIPI_SW_Pxx_REQ,
.map = IMX8MM_MIPI_A53_DOMAIN,
},
- .pgc = IMX8MM_PGC_MIPI,
+ .pgc = BIT(IMX8MM_PGC_MIPI),
},
};
@@ -802,6 +815,7 @@ static const struct imx_pgc_domain imx8mn_pgc_domains[] = {
.hskreq = IMX8MN_HSIO_HSK_PWRDNREQN,
.hskack = IMX8MN_HSIO_HSK_PWRDNACKN,
},
+ .keep_clocks = true,
},
[IMX8MN_POWER_DOMAIN_OTG1] = {
@@ -812,7 +826,7 @@ static const struct imx_pgc_domain imx8mn_pgc_domains[] = {
.pxx = IMX8MN_OTG1_SW_Pxx_REQ,
.map = IMX8MN_OTG1_A53_DOMAIN,
},
- .pgc = IMX8MN_PGC_OTG1,
+ .pgc = BIT(IMX8MN_PGC_OTG1),
},
[IMX8MN_POWER_DOMAIN_GPUMIX] = {
@@ -825,7 +839,7 @@ static const struct imx_pgc_domain imx8mn_pgc_domains[] = {
.hskreq = IMX8MN_GPUMIX_HSK_PWRDNREQN,
.hskack = IMX8MN_GPUMIX_HSK_PWRDNACKN,
},
- .pgc = IMX8MN_PGC_GPUMIX,
+ .pgc = BIT(IMX8MN_PGC_GPUMIX),
},
};
@@ -894,6 +908,10 @@ static int imx_pgc_domain_probe(struct platform_device *pdev)
goto out_domain_unmap;
}
+ if (IS_ENABLED(CONFIG_LOCKDEP) &&
+ of_property_read_bool(domain->dev->of_node, "power-domains"))
+ lockdep_set_subclass(&domain->genpd.mlock, 1);
+
ret = of_genpd_add_provider_simple(domain->dev->of_node,
&domain->genpd);
if (ret) {
@@ -930,6 +948,36 @@ static int imx_pgc_domain_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int imx_pgc_domain_suspend(struct device *dev)
+{
+ int ret;
+
+ /*
+ * This may look strange, but is done so the generic PM_SLEEP code
+ * can power down our domain and more importantly power it up again
+ * after resume, without tripping over our usage of runtime PM to
+ * power up/down the nested domains.
+ */
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int imx_pgc_domain_resume(struct device *dev)
+{
+ return pm_runtime_put(dev);
+}
+#endif
+
+static const struct dev_pm_ops imx_pgc_domain_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(imx_pgc_domain_suspend, imx_pgc_domain_resume)
+};
+
static const struct platform_device_id imx_pgc_domain_id[] = {
{ "imx-pgc-domain", },
{ },
@@ -938,6 +986,7 @@ static const struct platform_device_id imx_pgc_domain_id[] = {
static struct platform_driver imx_pgc_domain_driver = {
.driver = {
.name = "imx-pgc",
+ .pm = &imx_pgc_domain_pm_ops,
},
.probe = imx_pgc_domain_probe,
.remove = imx_pgc_domain_remove,
@@ -986,6 +1035,9 @@ static int imx_gpcv2_probe(struct platform_device *pdev)
struct imx_pgc_domain *domain;
u32 domain_index;
+ if (!of_device_is_available(np))
+ continue;
+
ret = of_property_read_u32(np, "reg", &domain_index);
if (ret) {
dev_err(dev, "Failed to read 'reg' property\n");
diff --git a/drivers/soc/imx/imx8m-blk-ctrl.c b/drivers/soc/imx/imx8m-blk-ctrl.c
new file mode 100644
index 000000000000..519b3651d1d9
--- /dev/null
+++ b/drivers/soc/imx/imx8m-blk-ctrl.c
@@ -0,0 +1,523 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/*
+ * Copyright 2021 Pengutronix, Lucas Stach <kernel@pengutronix.de>
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/clk.h>
+
+#include <dt-bindings/power/imx8mm-power.h>
+
+#define BLK_SFT_RSTN 0x0
+#define BLK_CLK_EN 0x4
+
+struct imx8m_blk_ctrl_domain;
+
+struct imx8m_blk_ctrl {
+ struct device *dev;
+ struct notifier_block power_nb;
+ struct device *bus_power_dev;
+ struct regmap *regmap;
+ struct imx8m_blk_ctrl_domain *domains;
+ struct genpd_onecell_data onecell_data;
+};
+
+struct imx8m_blk_ctrl_domain_data {
+ const char *name;
+ const char * const *clk_names;
+ int num_clks;
+ const char *gpc_name;
+ u32 rst_mask;
+ u32 clk_mask;
+};
+
+#define DOMAIN_MAX_CLKS 3
+
+struct imx8m_blk_ctrl_domain {
+ struct generic_pm_domain genpd;
+ const struct imx8m_blk_ctrl_domain_data *data;
+ struct clk_bulk_data clks[DOMAIN_MAX_CLKS];
+ struct device *power_dev;
+ struct imx8m_blk_ctrl *bc;
+};
+
+struct imx8m_blk_ctrl_data {
+ int max_reg;
+ notifier_fn_t power_notifier_fn;
+ const struct imx8m_blk_ctrl_domain_data *domains;
+ int num_domains;
+};
+
+static inline struct imx8m_blk_ctrl_domain *
+to_imx8m_blk_ctrl_domain(struct generic_pm_domain *genpd)
+{
+ return container_of(genpd, struct imx8m_blk_ctrl_domain, genpd);
+}
+
+static int imx8m_blk_ctrl_power_on(struct generic_pm_domain *genpd)
+{
+ struct imx8m_blk_ctrl_domain *domain = to_imx8m_blk_ctrl_domain(genpd);
+ const struct imx8m_blk_ctrl_domain_data *data = domain->data;
+ struct imx8m_blk_ctrl *bc = domain->bc;
+ int ret;
+
+ /* make sure bus domain is awake */
+ ret = pm_runtime_get_sync(bc->bus_power_dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(bc->bus_power_dev);
+ dev_err(bc->dev, "failed to power up bus domain\n");
+ return ret;
+ }
+
+ /* put devices into reset */
+ regmap_clear_bits(bc->regmap, BLK_SFT_RSTN, data->rst_mask);
+
+ /* enable upstream and blk-ctrl clocks to allow reset to propagate */
+ ret = clk_bulk_prepare_enable(data->num_clks, domain->clks);
+ if (ret) {
+ dev_err(bc->dev, "failed to enable clocks\n");
+ goto bus_put;
+ }
+ regmap_set_bits(bc->regmap, BLK_CLK_EN, data->clk_mask);
+
+ /* power up upstream GPC domain */
+ ret = pm_runtime_get_sync(domain->power_dev);
+ if (ret < 0) {
+ dev_err(bc->dev, "failed to power up peripheral domain\n");
+ goto clk_disable;
+ }
+
+ /* wait for reset to propagate */
+ udelay(5);
+
+ /* release reset */
+ regmap_set_bits(bc->regmap, BLK_SFT_RSTN, data->rst_mask);
+
+ /* disable upstream clocks */
+ clk_bulk_disable_unprepare(data->num_clks, domain->clks);
+
+ return 0;
+
+clk_disable:
+ clk_bulk_disable_unprepare(data->num_clks, domain->clks);
+bus_put:
+ pm_runtime_put(bc->bus_power_dev);
+
+ return ret;
+}
+
+static int imx8m_blk_ctrl_power_off(struct generic_pm_domain *genpd)
+{
+ struct imx8m_blk_ctrl_domain *domain = to_imx8m_blk_ctrl_domain(genpd);
+ const struct imx8m_blk_ctrl_domain_data *data = domain->data;
+ struct imx8m_blk_ctrl *bc = domain->bc;
+
+ /* put devices into reset and disable clocks */
+ regmap_clear_bits(bc->regmap, BLK_SFT_RSTN, data->rst_mask);
+ regmap_clear_bits(bc->regmap, BLK_CLK_EN, data->clk_mask);
+
+ /* power down upstream GPC domain */
+ pm_runtime_put(domain->power_dev);
+
+ /* allow bus domain to suspend */
+ pm_runtime_put(bc->bus_power_dev);
+
+ return 0;
+}
+
+static struct generic_pm_domain *
+imx8m_blk_ctrl_xlate(struct of_phandle_args *args, void *data)
+{
+ struct genpd_onecell_data *onecell_data = data;
+ unsigned int index = args->args[0];
+
+ if (args->args_count != 1 ||
+ index >= onecell_data->num_domains)
+ return ERR_PTR(-EINVAL);
+
+ return onecell_data->domains[index];
+}
+
+static struct lock_class_key blk_ctrl_genpd_lock_class;
+
+static int imx8m_blk_ctrl_probe(struct platform_device *pdev)
+{
+ const struct imx8m_blk_ctrl_data *bc_data;
+ struct device *dev = &pdev->dev;
+ struct imx8m_blk_ctrl *bc;
+ void __iomem *base;
+ int i, ret;
+
+ struct regmap_config regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ };
+
+ bc = devm_kzalloc(dev, sizeof(*bc), GFP_KERNEL);
+ if (!bc)
+ return -ENOMEM;
+
+ bc->dev = dev;
+
+ bc_data = of_device_get_match_data(dev);
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ regmap_config.max_register = bc_data->max_reg;
+ bc->regmap = devm_regmap_init_mmio(dev, base, &regmap_config);
+ if (IS_ERR(bc->regmap))
+ return dev_err_probe(dev, PTR_ERR(bc->regmap),
+ "failed to init regmap\n");
+
+ bc->domains = devm_kcalloc(dev, bc_data->num_domains,
+ sizeof(struct imx8m_blk_ctrl_domain),
+ GFP_KERNEL);
+ if (!bc->domains)
+ return -ENOMEM;
+
+ bc->onecell_data.num_domains = bc_data->num_domains;
+ bc->onecell_data.xlate = imx8m_blk_ctrl_xlate;
+ bc->onecell_data.domains =
+ devm_kcalloc(dev, bc_data->num_domains,
+ sizeof(struct generic_pm_domain *), GFP_KERNEL);
+ if (!bc->onecell_data.domains)
+ return -ENOMEM;
+
+ bc->bus_power_dev = genpd_dev_pm_attach_by_name(dev, "bus");
+ if (IS_ERR(bc->bus_power_dev))
+ return dev_err_probe(dev, PTR_ERR(bc->bus_power_dev),
+ "failed to attach power domain\n");
+
+ for (i = 0; i < bc_data->num_domains; i++) {
+ const struct imx8m_blk_ctrl_domain_data *data = &bc_data->domains[i];
+ struct imx8m_blk_ctrl_domain *domain = &bc->domains[i];
+ int j;
+
+ domain->data = data;
+
+ for (j = 0; j < data->num_clks; j++)
+ domain->clks[j].id = data->clk_names[j];
+
+ ret = devm_clk_bulk_get(dev, data->num_clks, domain->clks);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to get clock\n");
+ goto cleanup_pds;
+ }
+
+ domain->power_dev =
+ dev_pm_domain_attach_by_name(dev, data->gpc_name);
+ if (IS_ERR(domain->power_dev)) {
+ dev_err_probe(dev, PTR_ERR(domain->power_dev),
+ "failed to attach power domain\n");
+ ret = PTR_ERR(domain->power_dev);
+ goto cleanup_pds;
+ }
+
+ domain->genpd.name = data->name;
+ domain->genpd.power_on = imx8m_blk_ctrl_power_on;
+ domain->genpd.power_off = imx8m_blk_ctrl_power_off;
+ domain->bc = bc;
+
+ ret = pm_genpd_init(&domain->genpd, NULL, true);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to init power domain\n");
+ dev_pm_domain_detach(domain->power_dev, true);
+ goto cleanup_pds;
+ }
+
+ /*
+ * We use runtime PM to trigger power on/off of the upstream GPC
+ * domain, as a strict hierarchical parent/child power domain
+ * setup doesn't allow us to meet the sequencing requirements.
+ * This means we have nested locking of genpd locks, without the
+ * nesting being visible at the genpd level, so we need a
+ * separate lock class to make lockdep aware of the fact that
+ * this are separate domain locks that can be nested without a
+ * self-deadlock.
+ */
+ lockdep_set_class(&domain->genpd.mlock,
+ &blk_ctrl_genpd_lock_class);
+
+ bc->onecell_data.domains[i] = &domain->genpd;
+ }
+
+ ret = of_genpd_add_provider_onecell(dev->of_node, &bc->onecell_data);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to add power domain provider\n");
+ goto cleanup_pds;
+ }
+
+ bc->power_nb.notifier_call = bc_data->power_notifier_fn;
+ ret = dev_pm_genpd_add_notifier(bc->bus_power_dev, &bc->power_nb);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to add power notifier\n");
+ goto cleanup_provider;
+ }
+
+ dev_set_drvdata(dev, bc);
+
+ return 0;
+
+cleanup_provider:
+ of_genpd_del_provider(dev->of_node);
+cleanup_pds:
+ for (i--; i >= 0; i--) {
+ pm_genpd_remove(&bc->domains[i].genpd);
+ dev_pm_domain_detach(bc->domains[i].power_dev, true);
+ }
+
+ dev_pm_domain_detach(bc->bus_power_dev, true);
+
+ return ret;
+}
+
+static int imx8m_blk_ctrl_remove(struct platform_device *pdev)
+{
+ struct imx8m_blk_ctrl *bc = dev_get_drvdata(&pdev->dev);
+ int i;
+
+ of_genpd_del_provider(pdev->dev.of_node);
+
+ for (i = 0; bc->onecell_data.num_domains; i++) {
+ struct imx8m_blk_ctrl_domain *domain = &bc->domains[i];
+
+ pm_genpd_remove(&domain->genpd);
+ dev_pm_domain_detach(domain->power_dev, true);
+ }
+
+ dev_pm_genpd_remove_notifier(bc->bus_power_dev);
+
+ dev_pm_domain_detach(bc->bus_power_dev, true);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int imx8m_blk_ctrl_suspend(struct device *dev)
+{
+ struct imx8m_blk_ctrl *bc = dev_get_drvdata(dev);
+ int ret, i;
+
+ /*
+ * This may look strange, but is done so the generic PM_SLEEP code
+ * can power down our domains and more importantly power them up again
+ * after resume, without tripping over our usage of runtime PM to
+ * control the upstream GPC domains. Things happen in the right order
+ * in the system suspend/resume paths due to the device parent/child
+ * hierarchy.
+ */
+ ret = pm_runtime_get_sync(bc->bus_power_dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(bc->bus_power_dev);
+ return ret;
+ }
+
+ for (i = 0; i < bc->onecell_data.num_domains; i++) {
+ struct imx8m_blk_ctrl_domain *domain = &bc->domains[i];
+
+ ret = pm_runtime_get_sync(domain->power_dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(domain->power_dev);
+ goto out_fail;
+ }
+ }
+
+ return 0;
+
+out_fail:
+ for (i--; i >= 0; i--)
+ pm_runtime_put(bc->domains[i].power_dev);
+
+ pm_runtime_put(bc->bus_power_dev);
+
+ return ret;
+}
+
+static int imx8m_blk_ctrl_resume(struct device *dev)
+{
+ struct imx8m_blk_ctrl *bc = dev_get_drvdata(dev);
+ int i;
+
+ for (i = 0; i < bc->onecell_data.num_domains; i++)
+ pm_runtime_put(bc->domains[i].power_dev);
+
+ pm_runtime_put(bc->bus_power_dev);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops imx8m_blk_ctrl_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(imx8m_blk_ctrl_suspend, imx8m_blk_ctrl_resume)
+};
+
+static int imx8mm_vpu_power_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct imx8m_blk_ctrl *bc = container_of(nb, struct imx8m_blk_ctrl,
+ power_nb);
+
+ if (action != GENPD_NOTIFY_ON && action != GENPD_NOTIFY_PRE_OFF)
+ return NOTIFY_OK;
+
+ /*
+ * The ADB in the VPUMIX domain has no separate reset and clock
+ * enable bits, but is ungated together with the VPU clocks. To
+ * allow the handshake with the GPC to progress we put the VPUs
+ * in reset and ungate the clocks.
+ */
+ regmap_clear_bits(bc->regmap, BLK_SFT_RSTN, BIT(0) | BIT(1) | BIT(2));
+ regmap_set_bits(bc->regmap, BLK_CLK_EN, BIT(0) | BIT(1) | BIT(2));
+
+ if (action == GENPD_NOTIFY_ON) {
+ /*
+ * On power up we have no software backchannel to the GPC to
+ * wait for the ADB handshake to happen, so we just delay for a
+ * bit. On power down the GPC driver waits for the handshake.
+ */
+ udelay(5);
+
+ /* set "fuse" bits to enable the VPUs */
+ regmap_set_bits(bc->regmap, 0x8, 0xffffffff);
+ regmap_set_bits(bc->regmap, 0xc, 0xffffffff);
+ regmap_set_bits(bc->regmap, 0x10, 0xffffffff);
+ regmap_set_bits(bc->regmap, 0x14, 0xffffffff);
+ }
+
+ return NOTIFY_OK;
+}
+
+static const struct imx8m_blk_ctrl_domain_data imx8mm_vpu_blk_ctl_domain_data[] = {
+ [IMX8MM_VPUBLK_PD_G1] = {
+ .name = "vpublk-g1",
+ .clk_names = (const char *[]){ "g1", },
+ .num_clks = 1,
+ .gpc_name = "g1",
+ .rst_mask = BIT(1),
+ .clk_mask = BIT(1),
+ },
+ [IMX8MM_VPUBLK_PD_G2] = {
+ .name = "vpublk-g2",
+ .clk_names = (const char *[]){ "g2", },
+ .num_clks = 1,
+ .gpc_name = "g2",
+ .rst_mask = BIT(0),
+ .clk_mask = BIT(0),
+ },
+ [IMX8MM_VPUBLK_PD_H1] = {
+ .name = "vpublk-h1",
+ .clk_names = (const char *[]){ "h1", },
+ .num_clks = 1,
+ .gpc_name = "h1",
+ .rst_mask = BIT(2),
+ .clk_mask = BIT(2),
+ },
+};
+
+static const struct imx8m_blk_ctrl_data imx8mm_vpu_blk_ctl_dev_data = {
+ .max_reg = 0x18,
+ .power_notifier_fn = imx8mm_vpu_power_notifier,
+ .domains = imx8mm_vpu_blk_ctl_domain_data,
+ .num_domains = ARRAY_SIZE(imx8mm_vpu_blk_ctl_domain_data),
+};
+
+static int imx8mm_disp_power_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct imx8m_blk_ctrl *bc = container_of(nb, struct imx8m_blk_ctrl,
+ power_nb);
+
+ if (action != GENPD_NOTIFY_ON && action != GENPD_NOTIFY_PRE_OFF)
+ return NOTIFY_OK;
+
+ /* Enable bus clock and deassert bus reset */
+ regmap_set_bits(bc->regmap, BLK_CLK_EN, BIT(12));
+ regmap_set_bits(bc->regmap, BLK_SFT_RSTN, BIT(6));
+
+ /*
+ * On power up we have no software backchannel to the GPC to
+ * wait for the ADB handshake to happen, so we just delay for a
+ * bit. On power down the GPC driver waits for the handshake.
+ */
+ if (action == GENPD_NOTIFY_ON)
+ udelay(5);
+
+
+ return NOTIFY_OK;
+}
+
+static const struct imx8m_blk_ctrl_domain_data imx8mm_disp_blk_ctl_domain_data[] = {
+ [IMX8MM_DISPBLK_PD_CSI_BRIDGE] = {
+ .name = "dispblk-csi-bridge",
+ .clk_names = (const char *[]){ "csi-bridge-axi", "csi-bridge-apb",
+ "csi-bridge-core", },
+ .num_clks = 3,
+ .gpc_name = "csi-bridge",
+ .rst_mask = BIT(0) | BIT(1) | BIT(2),
+ .clk_mask = BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5),
+ },
+ [IMX8MM_DISPBLK_PD_LCDIF] = {
+ .name = "dispblk-lcdif",
+ .clk_names = (const char *[]){ "lcdif-axi", "lcdif-apb", "lcdif-pix", },
+ .num_clks = 3,
+ .gpc_name = "lcdif",
+ .clk_mask = BIT(6) | BIT(7),
+ },
+ [IMX8MM_DISPBLK_PD_MIPI_DSI] = {
+ .name = "dispblk-mipi-dsi",
+ .clk_names = (const char *[]){ "dsi-pclk", "dsi-ref", },
+ .num_clks = 2,
+ .gpc_name = "mipi-dsi",
+ .rst_mask = BIT(5),
+ .clk_mask = BIT(8) | BIT(9),
+ },
+ [IMX8MM_DISPBLK_PD_MIPI_CSI] = {
+ .name = "dispblk-mipi-csi",
+ .clk_names = (const char *[]){ "csi-aclk", "csi-pclk" },
+ .num_clks = 2,
+ .gpc_name = "mipi-csi",
+ .rst_mask = BIT(3) | BIT(4),
+ .clk_mask = BIT(10) | BIT(11),
+ },
+};
+
+static const struct imx8m_blk_ctrl_data imx8mm_disp_blk_ctl_dev_data = {
+ .max_reg = 0x2c,
+ .power_notifier_fn = imx8mm_disp_power_notifier,
+ .domains = imx8mm_disp_blk_ctl_domain_data,
+ .num_domains = ARRAY_SIZE(imx8mm_disp_blk_ctl_domain_data),
+};
+
+static const struct of_device_id imx8m_blk_ctrl_of_match[] = {
+ {
+ .compatible = "fsl,imx8mm-vpu-blk-ctrl",
+ .data = &imx8mm_vpu_blk_ctl_dev_data
+ }, {
+ .compatible = "fsl,imx8mm-disp-blk-ctrl",
+ .data = &imx8mm_disp_blk_ctl_dev_data
+ } ,{
+ /* Sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(of, imx8m_blk_ctrl_of_match);
+
+static struct platform_driver imx8m_blk_ctrl_driver = {
+ .probe = imx8m_blk_ctrl_probe,
+ .remove = imx8m_blk_ctrl_remove,
+ .driver = {
+ .name = "imx8m-blk-ctrl",
+ .pm = &imx8m_blk_ctrl_pm_ops,
+ .of_match_table = imx8m_blk_ctrl_of_match,
+ },
+};
+module_platform_driver(imx8m_blk_ctrl_driver);
diff --git a/drivers/soc/mediatek/mt8192-mmsys.h b/drivers/soc/mediatek/mt8192-mmsys.h
new file mode 100644
index 000000000000..6f0a57044a7b
--- /dev/null
+++ b/drivers/soc/mediatek/mt8192-mmsys.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __SOC_MEDIATEK_MT8192_MMSYS_H
+#define __SOC_MEDIATEK_MT8192_MMSYS_H
+
+#define MT8192_MMSYS_OVL_MOUT_EN 0xf04
+#define MT8192_DISP_OVL1_2L_MOUT_EN 0xf08
+#define MT8192_DISP_OVL0_2L_MOUT_EN 0xf18
+#define MT8192_DISP_OVL0_MOUT_EN 0xf1c
+#define MT8192_DISP_RDMA0_SEL_IN 0xf2c
+#define MT8192_DISP_RDMA0_SOUT_SEL 0xf30
+#define MT8192_DISP_CCORR0_SOUT_SEL 0xf34
+#define MT8192_DISP_AAL0_SEL_IN 0xf38
+#define MT8192_DISP_DITHER0_MOUT_EN 0xf3c
+#define MT8192_DISP_DSI0_SEL_IN 0xf40
+#define MT8192_DISP_OVL2_2L_MOUT_EN 0xf4c
+
+#define MT8192_DISP_OVL0_GO_BLEND BIT(0)
+#define MT8192_DITHER0_MOUT_IN_DSI0 BIT(0)
+#define MT8192_OVL0_MOUT_EN_DISP_RDMA0 BIT(0)
+#define MT8192_OVL2_2L_MOUT_EN_RDMA4 BIT(0)
+#define MT8192_DISP_OVL0_GO_BG BIT(1)
+#define MT8192_DISP_OVL0_2L_GO_BLEND BIT(2)
+#define MT8192_DISP_OVL0_2L_GO_BG BIT(3)
+#define MT8192_OVL1_2L_MOUT_EN_RDMA1 BIT(4)
+#define MT8192_OVL0_MOUT_EN_OVL0_2L BIT(4)
+#define MT8192_RDMA0_SEL_IN_OVL0_2L 0x3
+#define MT8192_RDMA0_SOUT_COLOR0 0x1
+#define MT8192_CCORR0_SOUT_AAL0 0x1
+#define MT8192_AAL0_SEL_IN_CCORR0 0x1
+#define MT8192_DSI0_SEL_IN_DITHER0 0x1
+
+static const struct mtk_mmsys_routes mmsys_mt8192_routing_table[] = {
+ {
+ DDP_COMPONENT_OVL_2L0, DDP_COMPONENT_RDMA0,
+ MT8192_DISP_OVL0_2L_MOUT_EN, MT8192_OVL0_MOUT_EN_DISP_RDMA0,
+ MT8192_OVL0_MOUT_EN_DISP_RDMA0
+ }, {
+ DDP_COMPONENT_OVL_2L2, DDP_COMPONENT_RDMA4,
+ MT8192_DISP_OVL2_2L_MOUT_EN, MT8192_OVL2_2L_MOUT_EN_RDMA4,
+ MT8192_OVL2_2L_MOUT_EN_RDMA4
+ }, {
+ DDP_COMPONENT_DITHER, DDP_COMPONENT_DSI0,
+ MT8192_DISP_DITHER0_MOUT_EN, MT8192_DITHER0_MOUT_IN_DSI0,
+ MT8192_DITHER0_MOUT_IN_DSI0
+ }, {
+ DDP_COMPONENT_OVL_2L0, DDP_COMPONENT_RDMA0,
+ MT8192_DISP_RDMA0_SEL_IN, MT8192_RDMA0_SEL_IN_OVL0_2L,
+ MT8192_RDMA0_SEL_IN_OVL0_2L
+ }, {
+ DDP_COMPONENT_CCORR, DDP_COMPONENT_AAL0,
+ MT8192_DISP_AAL0_SEL_IN, MT8192_AAL0_SEL_IN_CCORR0,
+ MT8192_AAL0_SEL_IN_CCORR0
+ }, {
+ DDP_COMPONENT_DITHER, DDP_COMPONENT_DSI0,
+ MT8192_DISP_DSI0_SEL_IN, MT8192_DSI0_SEL_IN_DITHER0
+ }, {
+ DDP_COMPONENT_RDMA0, DDP_COMPONENT_COLOR0,
+ MT8192_DISP_RDMA0_SOUT_SEL, MT8192_RDMA0_SOUT_COLOR0,
+ MT8192_RDMA0_SOUT_COLOR0
+ }, {
+ DDP_COMPONENT_CCORR, DDP_COMPONENT_AAL0,
+ MT8192_DISP_CCORR0_SOUT_SEL, MT8192_CCORR0_SOUT_AAL0,
+ MT8192_CCORR0_SOUT_AAL0
+ }, {
+ DDP_COMPONENT_OVL0, DDP_COMPONENT_OVL_2L0,
+ MT8192_MMSYS_OVL_MOUT_EN, MT8192_DISP_OVL0_GO_BG,
+ MT8192_DISP_OVL0_GO_BG
+ }, {
+ DDP_COMPONENT_OVL_2L0, DDP_COMPONENT_RDMA0,
+ MT8192_MMSYS_OVL_MOUT_EN, MT8192_DISP_OVL0_2L_GO_BLEND,
+ MT8192_DISP_OVL0_2L_GO_BLEND
+ }
+};
+
+#endif /* __SOC_MEDIATEK_MT8192_MMSYS_H */
diff --git a/drivers/soc/mediatek/mtk-mmsys.c b/drivers/soc/mediatek/mtk-mmsys.c
index a78e88f27b62..1e448f1ffefb 100644
--- a/drivers/soc/mediatek/mtk-mmsys.c
+++ b/drivers/soc/mediatek/mtk-mmsys.c
@@ -4,15 +4,18 @@
* Author: James Liao <jamesjj.liao@mediatek.com>
*/
+#include <linux/delay.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
#include <linux/soc/mediatek/mtk-mmsys.h>
#include "mtk-mmsys.h"
#include "mt8167-mmsys.h"
#include "mt8183-mmsys.h"
+#include "mt8192-mmsys.h"
#include "mt8365-mmsys.h"
static const struct mtk_mmsys_driver_data mt2701_mmsys_driver_data = {
@@ -53,6 +56,12 @@ static const struct mtk_mmsys_driver_data mt8183_mmsys_driver_data = {
.num_routes = ARRAY_SIZE(mmsys_mt8183_routing_table),
};
+static const struct mtk_mmsys_driver_data mt8192_mmsys_driver_data = {
+ .clk_driver = "clk-mt8192-mm",
+ .routes = mmsys_mt8192_routing_table,
+ .num_routes = ARRAY_SIZE(mmsys_mt8192_routing_table),
+};
+
static const struct mtk_mmsys_driver_data mt8365_mmsys_driver_data = {
.clk_driver = "clk-mt8365-mm",
.routes = mt8365_mmsys_routing_table,
@@ -62,6 +71,8 @@ static const struct mtk_mmsys_driver_data mt8365_mmsys_driver_data = {
struct mtk_mmsys {
void __iomem *regs;
const struct mtk_mmsys_driver_data *data;
+ spinlock_t lock; /* protects mmsys_sw_rst_b reg */
+ struct reset_controller_dev rcdev;
};
void mtk_mmsys_ddp_connect(struct device *dev,
@@ -101,6 +112,58 @@ void mtk_mmsys_ddp_disconnect(struct device *dev,
}
EXPORT_SYMBOL_GPL(mtk_mmsys_ddp_disconnect);
+static int mtk_mmsys_reset_update(struct reset_controller_dev *rcdev, unsigned long id,
+ bool assert)
+{
+ struct mtk_mmsys *mmsys = container_of(rcdev, struct mtk_mmsys, rcdev);
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&mmsys->lock, flags);
+
+ reg = readl_relaxed(mmsys->regs + MMSYS_SW0_RST_B);
+
+ if (assert)
+ reg &= ~BIT(id);
+ else
+ reg |= BIT(id);
+
+ writel_relaxed(reg, mmsys->regs + MMSYS_SW0_RST_B);
+
+ spin_unlock_irqrestore(&mmsys->lock, flags);
+
+ return 0;
+}
+
+static int mtk_mmsys_reset_assert(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ return mtk_mmsys_reset_update(rcdev, id, true);
+}
+
+static int mtk_mmsys_reset_deassert(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ return mtk_mmsys_reset_update(rcdev, id, false);
+}
+
+static int mtk_mmsys_reset(struct reset_controller_dev *rcdev, unsigned long id)
+{
+ int ret;
+
+ ret = mtk_mmsys_reset_assert(rcdev, id);
+ if (ret)
+ return ret;
+
+ usleep_range(1000, 1100);
+
+ return mtk_mmsys_reset_deassert(rcdev, id);
+}
+
+static const struct reset_control_ops mtk_mmsys_reset_ops = {
+ .assert = mtk_mmsys_reset_assert,
+ .deassert = mtk_mmsys_reset_deassert,
+ .reset = mtk_mmsys_reset,
+};
+
static int mtk_mmsys_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -120,6 +183,18 @@ static int mtk_mmsys_probe(struct platform_device *pdev)
return ret;
}
+ spin_lock_init(&mmsys->lock);
+
+ mmsys->rcdev.owner = THIS_MODULE;
+ mmsys->rcdev.nr_resets = 32;
+ mmsys->rcdev.ops = &mtk_mmsys_reset_ops;
+ mmsys->rcdev.of_node = pdev->dev.of_node;
+ ret = devm_reset_controller_register(&pdev->dev, &mmsys->rcdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Couldn't register mmsys reset controller: %d\n", ret);
+ return ret;
+ }
+
mmsys->data = of_device_get_match_data(&pdev->dev);
platform_set_drvdata(pdev, mmsys);
@@ -168,6 +243,10 @@ static const struct of_device_id of_match_mtk_mmsys[] = {
.data = &mt8183_mmsys_driver_data,
},
{
+ .compatible = "mediatek,mt8192-mmsys",
+ .data = &mt8192_mmsys_driver_data,
+ },
+ {
.compatible = "mediatek,mt8365-mmsys",
.data = &mt8365_mmsys_driver_data,
},
diff --git a/drivers/soc/mediatek/mtk-mmsys.h b/drivers/soc/mediatek/mtk-mmsys.h
index 9e2b81bd38db..8b0ed05117ea 100644
--- a/drivers/soc/mediatek/mtk-mmsys.h
+++ b/drivers/soc/mediatek/mtk-mmsys.h
@@ -78,6 +78,8 @@
#define DSI_SEL_IN_RDMA 0x1
#define DSI_SEL_IN_MASK 0x1
+#define MMSYS_SW0_RST_B 0x140
+
struct mtk_mmsys_routes {
u32 from_comp;
u32 to_comp;
diff --git a/drivers/soc/mediatek/mtk-mutex.c b/drivers/soc/mediatek/mtk-mutex.c
index 2e4bcc300576..2ca55bb5a8be 100644
--- a/drivers/soc/mediatek/mtk-mutex.c
+++ b/drivers/soc/mediatek/mtk-mutex.c
@@ -39,6 +39,18 @@
#define MT8167_MUTEX_MOD_DISP_DITHER 15
#define MT8167_MUTEX_MOD_DISP_UFOE 16
+#define MT8192_MUTEX_MOD_DISP_OVL0 0
+#define MT8192_MUTEX_MOD_DISP_OVL0_2L 1
+#define MT8192_MUTEX_MOD_DISP_RDMA0 2
+#define MT8192_MUTEX_MOD_DISP_COLOR0 4
+#define MT8192_MUTEX_MOD_DISP_CCORR0 5
+#define MT8192_MUTEX_MOD_DISP_AAL0 6
+#define MT8192_MUTEX_MOD_DISP_GAMMA0 7
+#define MT8192_MUTEX_MOD_DISP_POSTMASK0 8
+#define MT8192_MUTEX_MOD_DISP_DITHER0 9
+#define MT8192_MUTEX_MOD_DISP_OVL2_2L 16
+#define MT8192_MUTEX_MOD_DISP_RDMA4 17
+
#define MT8183_MUTEX_MOD_DISP_RDMA0 0
#define MT8183_MUTEX_MOD_DISP_RDMA1 1
#define MT8183_MUTEX_MOD_DISP_OVL0 9
@@ -214,6 +226,20 @@ static const unsigned int mt8183_mutex_mod[DDP_COMPONENT_ID_MAX] = {
[DDP_COMPONENT_WDMA0] = MT8183_MUTEX_MOD_DISP_WDMA0,
};
+static const unsigned int mt8192_mutex_mod[DDP_COMPONENT_ID_MAX] = {
+ [DDP_COMPONENT_AAL0] = MT8192_MUTEX_MOD_DISP_AAL0,
+ [DDP_COMPONENT_CCORR] = MT8192_MUTEX_MOD_DISP_CCORR0,
+ [DDP_COMPONENT_COLOR0] = MT8192_MUTEX_MOD_DISP_COLOR0,
+ [DDP_COMPONENT_DITHER] = MT8192_MUTEX_MOD_DISP_DITHER0,
+ [DDP_COMPONENT_GAMMA] = MT8192_MUTEX_MOD_DISP_GAMMA0,
+ [DDP_COMPONENT_POSTMASK0] = MT8192_MUTEX_MOD_DISP_POSTMASK0,
+ [DDP_COMPONENT_OVL0] = MT8192_MUTEX_MOD_DISP_OVL0,
+ [DDP_COMPONENT_OVL_2L0] = MT8192_MUTEX_MOD_DISP_OVL0_2L,
+ [DDP_COMPONENT_OVL_2L2] = MT8192_MUTEX_MOD_DISP_OVL2_2L,
+ [DDP_COMPONENT_RDMA0] = MT8192_MUTEX_MOD_DISP_RDMA0,
+ [DDP_COMPONENT_RDMA4] = MT8192_MUTEX_MOD_DISP_RDMA4,
+};
+
static const unsigned int mt2712_mutex_sof[MUTEX_SOF_DSI3 + 1] = {
[MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE,
[MUTEX_SOF_DSI0] = MUTEX_SOF_DSI0,
@@ -275,6 +301,13 @@ static const struct mtk_mutex_data mt8183_mutex_driver_data = {
.no_clk = true,
};
+static const struct mtk_mutex_data mt8192_mutex_driver_data = {
+ .mutex_mod = mt8192_mutex_mod,
+ .mutex_sof = mt8183_mutex_sof,
+ .mutex_mod_reg = MT8183_MUTEX0_MOD0,
+ .mutex_sof_reg = MT8183_MUTEX0_SOF0,
+};
+
struct mtk_mutex *mtk_mutex_get(struct device *dev)
{
struct mtk_mutex_ctx *mtx = dev_get_drvdata(dev);
@@ -507,6 +540,8 @@ static const struct of_device_id mutex_driver_dt_match[] = {
.data = &mt8173_mutex_driver_data},
{ .compatible = "mediatek,mt8183-disp-mutex",
.data = &mt8183_mutex_driver_data},
+ { .compatible = "mediatek,mt8192-disp-mutex",
+ .data = &mt8192_mutex_driver_data},
{},
};
MODULE_DEVICE_TABLE(of, mutex_driver_dt_match);
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 79b568f82a1c..e718b8735444 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -190,6 +190,25 @@ config QCOM_SOCINFO
Say yes here to support the Qualcomm socinfo driver, providing
information about the SoC to user space.
+config QCOM_SPM
+ tristate "Qualcomm Subsystem Power Manager (SPM)"
+ depends on ARCH_QCOM || COMPILE_TEST
+ select QCOM_SCM
+ help
+ Enable the support for the Qualcomm Subsystem Power Manager, used
+ to manage cores, L2 low power modes and to configure the internal
+ Adaptive Voltage Scaler parameters, where supported.
+
+config QCOM_STATS
+ tristate "Qualcomm Technologies, Inc. (QTI) Sleep stats driver"
+ depends on (ARCH_QCOM && DEBUG_FS) || COMPILE_TEST
+ depends on QCOM_SMEM
+ help
+ Qualcomm Technologies, Inc. (QTI) Sleep stats driver to read
+ the shared memory exported by the remote processor related to
+ various SoC level low power modes statistics and export to debugfs
+ interface.
+
config QCOM_WCNSS_CTRL
tristate "Qualcomm WCNSS control driver"
depends on ARCH_QCOM || COMPILE_TEST
@@ -199,7 +218,7 @@ config QCOM_WCNSS_CTRL
firmware to a newly booted WCNSS chip.
config QCOM_APR
- tristate "Qualcomm APR Bus (Asynchronous Packet Router)"
+ tristate "Qualcomm APR/GPR Bus (Asynchronous/Generic Packet Router)"
depends on ARCH_QCOM || COMPILE_TEST
depends on RPMSG
depends on NET
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index ad675a6593d0..70d5de69fd7b 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -20,6 +20,8 @@ obj-$(CONFIG_QCOM_SMEM_STATE) += smem_state.o
obj-$(CONFIG_QCOM_SMP2P) += smp2p.o
obj-$(CONFIG_QCOM_SMSM) += smsm.o
obj-$(CONFIG_QCOM_SOCINFO) += socinfo.o
+obj-$(CONFIG_QCOM_SPM) += spm.o
+obj-$(CONFIG_QCOM_STATS) += qcom_stats.o
obj-$(CONFIG_QCOM_WCNSS_CTRL) += wcnss_ctrl.o
obj-$(CONFIG_QCOM_APR) += apr.o
obj-$(CONFIG_QCOM_LLCC) += llcc-qcom.o
diff --git a/drivers/soc/qcom/apr.c b/drivers/soc/qcom/apr.c
index 475a57b435b2..82ca12c9328a 100644
--- a/drivers/soc/qcom/apr.c
+++ b/drivers/soc/qcom/apr.c
@@ -15,13 +15,23 @@
#include <linux/rpmsg.h>
#include <linux/of.h>
-struct apr {
+enum {
+ PR_TYPE_APR = 0,
+ PR_TYPE_GPR,
+};
+
+/* Some random values tbh which does not collide with static modules */
+#define GPR_DYNAMIC_PORT_START 0x10000000
+#define GPR_DYNAMIC_PORT_END 0x20000000
+
+struct packet_router {
struct rpmsg_endpoint *ch;
struct device *dev;
spinlock_t svcs_lock;
spinlock_t rx_lock;
struct idr svcs_idr;
int dest_domain_id;
+ int type;
struct pdr_handle *pdr;
struct workqueue_struct *rxwq;
struct work_struct rx_work;
@@ -44,26 +54,103 @@ struct apr_rx_buf {
*/
int apr_send_pkt(struct apr_device *adev, struct apr_pkt *pkt)
{
- struct apr *apr = dev_get_drvdata(adev->dev.parent);
+ struct packet_router *apr = dev_get_drvdata(adev->dev.parent);
struct apr_hdr *hdr;
unsigned long flags;
int ret;
- spin_lock_irqsave(&adev->lock, flags);
+ spin_lock_irqsave(&adev->svc.lock, flags);
hdr = &pkt->hdr;
hdr->src_domain = APR_DOMAIN_APPS;
- hdr->src_svc = adev->svc_id;
+ hdr->src_svc = adev->svc.id;
hdr->dest_domain = adev->domain_id;
- hdr->dest_svc = adev->svc_id;
+ hdr->dest_svc = adev->svc.id;
ret = rpmsg_trysend(apr->ch, pkt, hdr->pkt_size);
- spin_unlock_irqrestore(&adev->lock, flags);
+ spin_unlock_irqrestore(&adev->svc.lock, flags);
return ret ? ret : hdr->pkt_size;
}
EXPORT_SYMBOL_GPL(apr_send_pkt);
+void gpr_free_port(gpr_port_t *port)
+{
+ struct packet_router *gpr = port->pr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&gpr->svcs_lock, flags);
+ idr_remove(&gpr->svcs_idr, port->id);
+ spin_unlock_irqrestore(&gpr->svcs_lock, flags);
+
+ kfree(port);
+}
+EXPORT_SYMBOL_GPL(gpr_free_port);
+
+gpr_port_t *gpr_alloc_port(struct apr_device *gdev, struct device *dev,
+ gpr_port_cb cb, void *priv)
+{
+ struct packet_router *pr = dev_get_drvdata(gdev->dev.parent);
+ gpr_port_t *port;
+ struct pkt_router_svc *svc;
+ int id;
+
+ port = kzalloc(sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return ERR_PTR(-ENOMEM);
+
+ svc = port;
+ svc->callback = cb;
+ svc->pr = pr;
+ svc->priv = priv;
+ svc->dev = dev;
+ spin_lock_init(&svc->lock);
+
+ spin_lock(&pr->svcs_lock);
+ id = idr_alloc_cyclic(&pr->svcs_idr, svc, GPR_DYNAMIC_PORT_START,
+ GPR_DYNAMIC_PORT_END, GFP_ATOMIC);
+ if (id < 0) {
+ dev_err(dev, "Unable to allocate dynamic GPR src port\n");
+ kfree(port);
+ spin_unlock(&pr->svcs_lock);
+ return ERR_PTR(id);
+ }
+
+ svc->id = id;
+ spin_unlock(&pr->svcs_lock);
+
+ return port;
+}
+EXPORT_SYMBOL_GPL(gpr_alloc_port);
+
+static int pkt_router_send_svc_pkt(struct pkt_router_svc *svc, struct gpr_pkt *pkt)
+{
+ struct packet_router *pr = svc->pr;
+ struct gpr_hdr *hdr;
+ unsigned long flags;
+ int ret;
+
+ hdr = &pkt->hdr;
+
+ spin_lock_irqsave(&svc->lock, flags);
+ ret = rpmsg_trysend(pr->ch, pkt, hdr->pkt_size);
+ spin_unlock_irqrestore(&svc->lock, flags);
+
+ return ret ? ret : hdr->pkt_size;
+}
+
+int gpr_send_pkt(struct apr_device *gdev, struct gpr_pkt *pkt)
+{
+ return pkt_router_send_svc_pkt(&gdev->svc, pkt);
+}
+EXPORT_SYMBOL_GPL(gpr_send_pkt);
+
+int gpr_send_port_pkt(gpr_port_t *port, struct gpr_pkt *pkt)
+{
+ return pkt_router_send_svc_pkt(port, pkt);
+}
+EXPORT_SYMBOL_GPL(gpr_send_port_pkt);
+
static void apr_dev_release(struct device *dev)
{
struct apr_device *adev = to_apr_device(dev);
@@ -74,7 +161,7 @@ static void apr_dev_release(struct device *dev)
static int apr_callback(struct rpmsg_device *rpdev, void *buf,
int len, void *priv, u32 addr)
{
- struct apr *apr = dev_get_drvdata(&rpdev->dev);
+ struct packet_router *apr = dev_get_drvdata(&rpdev->dev);
struct apr_rx_buf *abuf;
unsigned long flags;
@@ -100,11 +187,11 @@ static int apr_callback(struct rpmsg_device *rpdev, void *buf,
return 0;
}
-
-static int apr_do_rx_callback(struct apr *apr, struct apr_rx_buf *abuf)
+static int apr_do_rx_callback(struct packet_router *apr, struct apr_rx_buf *abuf)
{
uint16_t hdr_size, msg_type, ver, svc_id;
- struct apr_device *svc = NULL;
+ struct pkt_router_svc *svc;
+ struct apr_device *adev;
struct apr_driver *adrv = NULL;
struct apr_resp_pkt resp;
struct apr_hdr *hdr;
@@ -145,12 +232,15 @@ static int apr_do_rx_callback(struct apr *apr, struct apr_rx_buf *abuf)
svc_id = hdr->dest_svc;
spin_lock_irqsave(&apr->svcs_lock, flags);
svc = idr_find(&apr->svcs_idr, svc_id);
- if (svc && svc->dev.driver)
- adrv = to_apr_driver(svc->dev.driver);
+ if (svc && svc->dev->driver) {
+ adev = svc_to_apr_device(svc);
+ adrv = to_apr_driver(adev->dev.driver);
+ }
spin_unlock_irqrestore(&apr->svcs_lock, flags);
- if (!adrv) {
- dev_err(apr->dev, "APR: service is not registered\n");
+ if (!adrv || !adev) {
+ dev_err(apr->dev, "APR: service is not registered (%d)\n",
+ svc_id);
return -EINVAL;
}
@@ -164,20 +254,82 @@ static int apr_do_rx_callback(struct apr *apr, struct apr_rx_buf *abuf)
if (resp.payload_size > 0)
resp.payload = buf + hdr_size;
- adrv->callback(svc, &resp);
+ adrv->callback(adev, &resp);
+
+ return 0;
+}
+
+static int gpr_do_rx_callback(struct packet_router *gpr, struct apr_rx_buf *abuf)
+{
+ uint16_t hdr_size, ver;
+ struct pkt_router_svc *svc = NULL;
+ struct gpr_resp_pkt resp;
+ struct gpr_hdr *hdr;
+ unsigned long flags;
+ void *buf = abuf->buf;
+ int len = abuf->len;
+
+ hdr = buf;
+ ver = hdr->version;
+ if (ver > GPR_PKT_VER + 1)
+ return -EINVAL;
+
+ hdr_size = hdr->hdr_size;
+ if (hdr_size < GPR_PKT_HEADER_WORD_SIZE) {
+ dev_err(gpr->dev, "GPR: Wrong hdr size:%d\n", hdr_size);
+ return -EINVAL;
+ }
+
+ if (hdr->pkt_size < GPR_PKT_HEADER_BYTE_SIZE || hdr->pkt_size != len) {
+ dev_err(gpr->dev, "GPR: Wrong packet size\n");
+ return -EINVAL;
+ }
+
+ resp.hdr = *hdr;
+ resp.payload_size = hdr->pkt_size - (hdr_size * 4);
+
+ /*
+ * NOTE: hdr_size is not same as GPR_HDR_SIZE as remote can include
+ * optional headers in to gpr_hdr which should be ignored
+ */
+ if (resp.payload_size > 0)
+ resp.payload = buf + (hdr_size * 4);
+
+
+ spin_lock_irqsave(&gpr->svcs_lock, flags);
+ svc = idr_find(&gpr->svcs_idr, hdr->dest_port);
+ spin_unlock_irqrestore(&gpr->svcs_lock, flags);
+
+ if (!svc) {
+ dev_err(gpr->dev, "GPR: Port(%x) is not registered\n",
+ hdr->dest_port);
+ return -EINVAL;
+ }
+
+ if (svc->callback)
+ svc->callback(&resp, svc->priv, 0);
return 0;
}
static void apr_rxwq(struct work_struct *work)
{
- struct apr *apr = container_of(work, struct apr, rx_work);
+ struct packet_router *apr = container_of(work, struct packet_router, rx_work);
struct apr_rx_buf *abuf, *b;
unsigned long flags;
if (!list_empty(&apr->rx_list)) {
list_for_each_entry_safe(abuf, b, &apr->rx_list, node) {
- apr_do_rx_callback(apr, abuf);
+ switch (apr->type) {
+ case PR_TYPE_APR:
+ apr_do_rx_callback(apr, abuf);
+ break;
+ case PR_TYPE_GPR:
+ gpr_do_rx_callback(apr, abuf);
+ break;
+ default:
+ break;
+ }
spin_lock_irqsave(&apr->rx_lock, flags);
list_del(&abuf->node);
spin_unlock_irqrestore(&apr->rx_lock, flags);
@@ -201,7 +353,7 @@ static int apr_device_match(struct device *dev, struct device_driver *drv)
while (id->domain_id != 0 || id->svc_id != 0) {
if (id->domain_id == adev->domain_id &&
- id->svc_id == adev->svc_id)
+ id->svc_id == adev->svc.id)
return 1;
id++;
}
@@ -213,22 +365,27 @@ static int apr_device_probe(struct device *dev)
{
struct apr_device *adev = to_apr_device(dev);
struct apr_driver *adrv = to_apr_driver(dev->driver);
+ int ret;
- return adrv->probe(adev);
+ ret = adrv->probe(adev);
+ if (!ret)
+ adev->svc.callback = adrv->gpr_callback;
+
+ return ret;
}
static void apr_device_remove(struct device *dev)
{
struct apr_device *adev = to_apr_device(dev);
struct apr_driver *adrv;
- struct apr *apr = dev_get_drvdata(adev->dev.parent);
+ struct packet_router *apr = dev_get_drvdata(adev->dev.parent);
if (dev->driver) {
adrv = to_apr_driver(dev->driver);
if (adrv->remove)
adrv->remove(adev);
spin_lock(&apr->svcs_lock);
- idr_remove(&apr->svcs_idr, adev->svc_id);
+ idr_remove(&apr->svcs_idr, adev->svc.id);
spin_unlock(&apr->svcs_lock);
}
}
@@ -255,28 +412,43 @@ struct bus_type aprbus = {
EXPORT_SYMBOL_GPL(aprbus);
static int apr_add_device(struct device *dev, struct device_node *np,
- const struct apr_device_id *id)
+ u32 svc_id, u32 domain_id)
{
- struct apr *apr = dev_get_drvdata(dev);
+ struct packet_router *apr = dev_get_drvdata(dev);
struct apr_device *adev = NULL;
+ struct pkt_router_svc *svc;
int ret;
adev = kzalloc(sizeof(*adev), GFP_KERNEL);
if (!adev)
return -ENOMEM;
- spin_lock_init(&adev->lock);
+ adev->svc_id = svc_id;
+ svc = &adev->svc;
+
+ svc->id = svc_id;
+ svc->pr = apr;
+ svc->priv = adev;
+ svc->dev = dev;
+ spin_lock_init(&svc->lock);
+
+ adev->domain_id = domain_id;
- adev->svc_id = id->svc_id;
- adev->domain_id = id->domain_id;
- adev->version = id->svc_version;
if (np)
snprintf(adev->name, APR_NAME_SIZE, "%pOFn", np);
- else
- strscpy(adev->name, id->name, APR_NAME_SIZE);
- dev_set_name(&adev->dev, "aprsvc:%s:%x:%x", adev->name,
- id->domain_id, id->svc_id);
+ switch (apr->type) {
+ case PR_TYPE_APR:
+ dev_set_name(&adev->dev, "aprsvc:%s:%x:%x", adev->name,
+ domain_id, svc_id);
+ break;
+ case PR_TYPE_GPR:
+ dev_set_name(&adev->dev, "gprsvc:%s:%x:%x", adev->name,
+ domain_id, svc_id);
+ break;
+ default:
+ break;
+ }
adev->dev.bus = &aprbus;
adev->dev.parent = dev;
@@ -285,14 +457,13 @@ static int apr_add_device(struct device *dev, struct device_node *np,
adev->dev.driver = NULL;
spin_lock(&apr->svcs_lock);
- idr_alloc(&apr->svcs_idr, adev, id->svc_id,
- id->svc_id + 1, GFP_ATOMIC);
+ idr_alloc(&apr->svcs_idr, svc, svc_id, svc_id + 1, GFP_ATOMIC);
spin_unlock(&apr->svcs_lock);
of_property_read_string_index(np, "qcom,protection-domain",
1, &adev->service_path);
- dev_info(dev, "Adding APR dev: %s\n", dev_name(&adev->dev));
+ dev_info(dev, "Adding APR/GPR dev: %s\n", dev_name(&adev->dev));
ret = device_register(&adev->dev);
if (ret) {
@@ -306,7 +477,7 @@ static int apr_add_device(struct device *dev, struct device_node *np,
static int of_apr_add_pd_lookups(struct device *dev)
{
const char *service_name, *service_path;
- struct apr *apr = dev_get_drvdata(dev);
+ struct packet_router *apr = dev_get_drvdata(dev);
struct device_node *node;
struct pdr_service *pds;
int ret;
@@ -321,12 +492,14 @@ static int of_apr_add_pd_lookups(struct device *dev)
1, &service_path);
if (ret < 0) {
dev_err(dev, "pdr service path missing: %d\n", ret);
+ of_node_put(node);
return ret;
}
pds = pdr_add_lookup(apr->pdr, service_name, service_path);
if (IS_ERR(pds) && PTR_ERR(pds) != -EALREADY) {
dev_err(dev, "pdr add lookup failed: %ld\n", PTR_ERR(pds));
+ of_node_put(node);
return PTR_ERR(pds);
}
}
@@ -336,13 +509,14 @@ static int of_apr_add_pd_lookups(struct device *dev)
static void of_register_apr_devices(struct device *dev, const char *svc_path)
{
- struct apr *apr = dev_get_drvdata(dev);
+ struct packet_router *apr = dev_get_drvdata(dev);
struct device_node *node;
const char *service_path;
int ret;
for_each_child_of_node(dev->of_node, node) {
- struct apr_device_id id = { {0} };
+ u32 svc_id;
+ u32 domain_id;
/*
* This function is called with svc_path NULL during
@@ -372,13 +546,13 @@ static void of_register_apr_devices(struct device *dev, const char *svc_path)
continue;
}
- if (of_property_read_u32(node, "reg", &id.svc_id))
+ if (of_property_read_u32(node, "reg", &svc_id))
continue;
- id.domain_id = apr->dest_domain_id;
+ domain_id = apr->dest_domain_id;
- if (apr_add_device(dev, node, &id))
- dev_err(dev, "Failed to add apr %d svc\n", id.svc_id);
+ if (apr_add_device(dev, node, svc_id, domain_id))
+ dev_err(dev, "Failed to add apr %d svc\n", svc_id);
}
}
@@ -398,7 +572,7 @@ static int apr_remove_device(struct device *dev, void *svc_path)
static void apr_pd_status(int state, char *svc_path, void *priv)
{
- struct apr *apr = (struct apr *)priv;
+ struct packet_router *apr = (struct packet_router *)priv;
switch (state) {
case SERVREG_SERVICE_STATE_UP:
@@ -413,16 +587,26 @@ static void apr_pd_status(int state, char *svc_path, void *priv)
static int apr_probe(struct rpmsg_device *rpdev)
{
struct device *dev = &rpdev->dev;
- struct apr *apr;
+ struct packet_router *apr;
int ret;
apr = devm_kzalloc(dev, sizeof(*apr), GFP_KERNEL);
if (!apr)
return -ENOMEM;
- ret = of_property_read_u32(dev->of_node, "qcom,apr-domain", &apr->dest_domain_id);
+ ret = of_property_read_u32(dev->of_node, "qcom,domain", &apr->dest_domain_id);
+
+ if (of_device_is_compatible(dev->of_node, "qcom,gpr")) {
+ apr->type = PR_TYPE_GPR;
+ } else {
+ if (ret) /* try deprecated apr-domain property */
+ ret = of_property_read_u32(dev->of_node, "qcom,apr-domain",
+ &apr->dest_domain_id);
+ apr->type = PR_TYPE_APR;
+ }
+
if (ret) {
- dev_err(dev, "APR Domain ID not specified in DT\n");
+ dev_err(dev, "Domain ID not specified in DT\n");
return ret;
}
@@ -465,7 +649,7 @@ destroy_wq:
static void apr_remove(struct rpmsg_device *rpdev)
{
- struct apr *apr = dev_get_drvdata(&rpdev->dev);
+ struct packet_router *apr = dev_get_drvdata(&rpdev->dev);
pdr_handle_release(apr->pdr);
device_for_each_child(&rpdev->dev, NULL, apr_remove_device);
@@ -502,20 +686,21 @@ void apr_driver_unregister(struct apr_driver *drv)
}
EXPORT_SYMBOL_GPL(apr_driver_unregister);
-static const struct of_device_id apr_of_match[] = {
+static const struct of_device_id pkt_router_of_match[] = {
{ .compatible = "qcom,apr"},
{ .compatible = "qcom,apr-v2"},
+ { .compatible = "qcom,gpr"},
{}
};
-MODULE_DEVICE_TABLE(of, apr_of_match);
+MODULE_DEVICE_TABLE(of, pkt_router_of_match);
-static struct rpmsg_driver apr_driver = {
+static struct rpmsg_driver packet_router_driver = {
.probe = apr_probe,
.remove = apr_remove,
.callback = apr_callback,
.drv = {
.name = "qcom,apr",
- .of_match_table = apr_of_match,
+ .of_match_table = pkt_router_of_match,
},
};
@@ -525,7 +710,7 @@ static int __init apr_init(void)
ret = bus_register(&aprbus);
if (!ret)
- ret = register_rpmsg_driver(&apr_driver);
+ ret = register_rpmsg_driver(&packet_router_driver);
else
bus_unregister(&aprbus);
@@ -535,7 +720,7 @@ static int __init apr_init(void)
static void __exit apr_exit(void)
{
bus_unregister(&aprbus);
- unregister_rpmsg_driver(&apr_driver);
+ unregister_rpmsg_driver(&packet_router_driver);
}
subsys_initcall(apr_init);
diff --git a/drivers/soc/qcom/cpr.c b/drivers/soc/qcom/cpr.c
index 4ce8e816154f..1d818a8ba208 100644
--- a/drivers/soc/qcom/cpr.c
+++ b/drivers/soc/qcom/cpr.c
@@ -1614,7 +1614,6 @@ static void cpr_debugfs_init(struct cpr_drv *drv)
static int cpr_probe(struct platform_device *pdev)
{
- struct resource *res;
struct device *dev = &pdev->dev;
struct cpr_drv *drv;
int irq, ret;
@@ -1648,8 +1647,7 @@ static int cpr_probe(struct platform_device *pdev)
if (IS_ERR(drv->tcsr))
return PTR_ERR(drv->tcsr);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- drv->base = devm_ioremap_resource(dev, res);
+ drv->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(drv->base))
return PTR_ERR(drv->base);
diff --git a/drivers/soc/qcom/llcc-qcom.c b/drivers/soc/qcom/llcc-qcom.c
index 15a36dcab990..6bf2f1d1f2c5 100644
--- a/drivers/soc/qcom/llcc-qcom.c
+++ b/drivers/soc/qcom/llcc-qcom.c
@@ -115,7 +115,7 @@ static const struct llcc_slice_config sc7280_data[] = {
{ LLCC_CMPT, 10, 768, 1, 1, 0x3f, 0x0, 0, 0, 0, 1, 0, 0},
{ LLCC_GPUHTW, 11, 256, 1, 1, 0x3f, 0x0, 0, 0, 0, 1, 0, 0},
{ LLCC_GPU, 12, 512, 1, 0, 0x3f, 0x0, 0, 0, 0, 1, 0, 0},
- { LLCC_MMUHWT, 13, 256, 1, 1, 0x3f, 0x0, 0, 0, 0, 1, 1, 0},
+ { LLCC_MMUHWT, 13, 256, 1, 1, 0x3f, 0x0, 0, 0, 0, 0, 1, 0},
{ LLCC_MDMPNG, 21, 768, 0, 1, 0x3f, 0x0, 0, 0, 0, 1, 0, 0},
{ LLCC_WLHW, 24, 256, 1, 1, 0x3f, 0x0, 0, 0, 0, 1, 0, 0},
{ LLCC_MODPE, 29, 64, 1, 1, 0x3f, 0x0, 0, 0, 0, 1, 0, 0},
@@ -142,6 +142,16 @@ static const struct llcc_slice_config sdm845_data[] = {
{ LLCC_AUDHW, 22, 1024, 1, 1, 0xffc, 0x2, 0, 0, 1, 1, 0 },
};
+static const struct llcc_slice_config sm6350_data[] = {
+ { LLCC_CPUSS, 1, 768, 1, 0, 0xFFF, 0x0, 0, 0, 0, 0, 1, 1 },
+ { LLCC_MDM, 8, 512, 2, 0, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0 },
+ { LLCC_GPUHTW, 11, 256, 1, 0, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0 },
+ { LLCC_GPU, 12, 512, 1, 0, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0 },
+ { LLCC_MDMPNG, 21, 768, 0, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0 },
+ { LLCC_NPU, 23, 768, 1, 0, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0 },
+ { LLCC_MODPE, 29, 64, 1, 1, 0xFFF, 0x0, 0, 0, 0, 0, 1, 0 },
+};
+
static const struct llcc_slice_config sm8150_data[] = {
{ LLCC_CPUSS, 1, 3072, 1, 1, 0xFFF, 0x0, 0, 0, 0, 1, 1 },
{ LLCC_VIDSC0, 2, 512, 2, 1, 0xFFF, 0x0, 0, 0, 0, 1, 0 },
@@ -203,6 +213,11 @@ static const struct qcom_llcc_config sdm845_cfg = {
.need_llcc_cfg = false,
};
+static const struct qcom_llcc_config sm6350_cfg = {
+ .sct_data = sm6350_data,
+ .size = ARRAY_SIZE(sm6350_data),
+};
+
static const struct qcom_llcc_config sm8150_cfg = {
.sct_data = sm8150_data,
.size = ARRAY_SIZE(sm8150_data),
@@ -626,6 +641,7 @@ static const struct of_device_id qcom_llcc_of_match[] = {
{ .compatible = "qcom,sc7180-llcc", .data = &sc7180_cfg },
{ .compatible = "qcom,sc7280-llcc", .data = &sc7280_cfg },
{ .compatible = "qcom,sdm845-llcc", .data = &sdm845_cfg },
+ { .compatible = "qcom,sm6350-llcc", .data = &sm6350_cfg },
{ .compatible = "qcom,sm8150-llcc", .data = &sm8150_cfg },
{ .compatible = "qcom,sm8250-llcc", .data = &sm8250_cfg },
{ }
diff --git a/drivers/soc/qcom/ocmem.c b/drivers/soc/qcom/ocmem.c
index f1875dc31ae2..d2dacbbaafbd 100644
--- a/drivers/soc/qcom/ocmem.c
+++ b/drivers/soc/qcom/ocmem.c
@@ -300,7 +300,6 @@ static int ocmem_dev_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
unsigned long reg, region_size;
int i, j, ret, num_banks;
- struct resource *res;
struct ocmem *ocmem;
if (!qcom_scm_is_available())
@@ -321,8 +320,7 @@ static int ocmem_dev_probe(struct platform_device *pdev)
return ret;
}
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ctrl");
- ocmem->mmio = devm_ioremap_resource(&pdev->dev, res);
+ ocmem->mmio = devm_platform_ioremap_resource_byname(pdev, "ctrl");
if (IS_ERR(ocmem->mmio)) {
dev_err(&pdev->dev, "Failed to ioremap ocmem_ctrl resource\n");
return PTR_ERR(ocmem->mmio);
diff --git a/drivers/soc/qcom/pdr_interface.c b/drivers/soc/qcom/pdr_interface.c
index 915d5bc3d46e..fc580a3c4336 100644
--- a/drivers/soc/qcom/pdr_interface.c
+++ b/drivers/soc/qcom/pdr_interface.c
@@ -131,7 +131,7 @@ static int pdr_register_listener(struct pdr_handle *pdr,
return ret;
req.enable = enable;
- strcpy(req.service_path, pds->service_path);
+ strscpy(req.service_path, pds->service_path, sizeof(req.service_path));
ret = qmi_send_request(&pdr->notifier_hdl, &pds->addr,
&txn, SERVREG_REGISTER_LISTENER_REQ,
@@ -257,7 +257,7 @@ static int pdr_send_indack_msg(struct pdr_handle *pdr, struct pdr_service *pds,
return ret;
req.transaction_id = tid;
- strcpy(req.service_path, pds->service_path);
+ strscpy(req.service_path, pds->service_path, sizeof(req.service_path));
ret = qmi_send_request(&pdr->notifier_hdl, &pds->addr,
&txn, SERVREG_SET_ACK_REQ,
@@ -406,7 +406,7 @@ static int pdr_locate_service(struct pdr_handle *pdr, struct pdr_service *pds)
return -ENOMEM;
/* Prepare req message */
- strcpy(req.service_name, pds->service_name);
+ strscpy(req.service_name, pds->service_name, sizeof(req.service_name));
req.domain_offset_valid = true;
req.domain_offset = 0;
@@ -531,8 +531,8 @@ struct pdr_service *pdr_add_lookup(struct pdr_handle *pdr,
return ERR_PTR(-ENOMEM);
pds->service = SERVREG_NOTIFIER_SERVICE;
- strcpy(pds->service_name, service_name);
- strcpy(pds->service_path, service_path);
+ strscpy(pds->service_name, service_name, sizeof(pds->service_name));
+ strscpy(pds->service_path, service_path, sizeof(pds->service_path));
pds->need_locator_lookup = true;
mutex_lock(&pdr->list_lock);
@@ -587,7 +587,7 @@ int pdr_restart_pd(struct pdr_handle *pdr, struct pdr_service *pds)
break;
/* Prepare req message */
- strcpy(req.service_path, pds->service_path);
+ strscpy(req.service_path, pds->service_path, sizeof(req.service_path));
addr = pds->addr;
break;
}
diff --git a/drivers/soc/qcom/qcom-geni-se.c b/drivers/soc/qcom/qcom-geni-se.c
index 7d649d2cf31e..28a8c0dda66c 100644
--- a/drivers/soc/qcom/qcom-geni-se.c
+++ b/drivers/soc/qcom/qcom-geni-se.c
@@ -871,7 +871,6 @@ EXPORT_SYMBOL(geni_icc_disable);
static int geni_se_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
- struct resource *res;
struct geni_wrapper *wrapper;
int ret;
@@ -880,8 +879,7 @@ static int geni_se_probe(struct platform_device *pdev)
return -ENOMEM;
wrapper->dev = dev;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- wrapper->base = devm_ioremap_resource(dev, res);
+ wrapper->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(wrapper->base))
return PTR_ERR(wrapper->base);
diff --git a/drivers/soc/qcom/qcom_aoss.c b/drivers/soc/qcom/qcom_aoss.c
index 536c3e4114fb..34acf58bbb0d 100644
--- a/drivers/soc/qcom/qcom_aoss.c
+++ b/drivers/soc/qcom/qcom_aoss.c
@@ -2,16 +2,16 @@
/*
* Copyright (c) 2019, Linaro Ltd
*/
-#include <dt-bindings/power/qcom-aoss-qmp.h>
#include <linux/clk-provider.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/mailbox_client.h>
#include <linux/module.h>
+#include <linux/of_platform.h>
#include <linux/platform_device.h>
-#include <linux/pm_domain.h>
#include <linux/thermal.h>
#include <linux/slab.h>
+#include <linux/soc/qcom/qcom_aoss.h>
#define QMP_DESC_MAGIC 0x0
#define QMP_DESC_VERSION 0x4
@@ -64,7 +64,6 @@ struct qmp_cooling_device {
* @event: wait_queue for synchronization with the IRQ
* @tx_lock: provides synchronization between multiple callers of qmp_send()
* @qdss_clk: QDSS clock hw struct
- * @pd_data: genpd data
* @cooling_devs: thermal cooling devices
*/
struct qmp {
@@ -82,17 +81,9 @@ struct qmp {
struct mutex tx_lock;
struct clk_hw qdss_clk;
- struct genpd_onecell_data pd_data;
struct qmp_cooling_device *cooling_devs;
};
-struct qmp_pd {
- struct qmp *qmp;
- struct generic_pm_domain pd;
-};
-
-#define to_qmp_pd_resource(res) container_of(res, struct qmp_pd, pd)
-
static void qmp_kick(struct qmp *qmp)
{
mbox_send_message(qmp->mbox_chan, NULL);
@@ -223,11 +214,14 @@ static bool qmp_message_empty(struct qmp *qmp)
*
* Return: 0 on success, negative errno on failure
*/
-static int qmp_send(struct qmp *qmp, const void *data, size_t len)
+int qmp_send(struct qmp *qmp, const void *data, size_t len)
{
long time_left;
int ret;
+ if (WARN_ON(IS_ERR_OR_NULL(qmp) || !data))
+ return -EINVAL;
+
if (WARN_ON(len + sizeof(u32) > qmp->size))
return -EINVAL;
@@ -261,6 +255,7 @@ static int qmp_send(struct qmp *qmp, const void *data, size_t len)
return ret;
}
+EXPORT_SYMBOL(qmp_send);
static int qmp_qdss_clk_prepare(struct clk_hw *hw)
{
@@ -314,95 +309,6 @@ static void qmp_qdss_clk_remove(struct qmp *qmp)
clk_hw_unregister(&qmp->qdss_clk);
}
-static int qmp_pd_power_toggle(struct qmp_pd *res, bool enable)
-{
- char buf[QMP_MSG_LEN] = {};
-
- snprintf(buf, sizeof(buf),
- "{class: image, res: load_state, name: %s, val: %s}",
- res->pd.name, enable ? "on" : "off");
- return qmp_send(res->qmp, buf, sizeof(buf));
-}
-
-static int qmp_pd_power_on(struct generic_pm_domain *domain)
-{
- return qmp_pd_power_toggle(to_qmp_pd_resource(domain), true);
-}
-
-static int qmp_pd_power_off(struct generic_pm_domain *domain)
-{
- return qmp_pd_power_toggle(to_qmp_pd_resource(domain), false);
-}
-
-static const char * const sdm845_resources[] = {
- [AOSS_QMP_LS_CDSP] = "cdsp",
- [AOSS_QMP_LS_LPASS] = "adsp",
- [AOSS_QMP_LS_MODEM] = "modem",
- [AOSS_QMP_LS_SLPI] = "slpi",
- [AOSS_QMP_LS_SPSS] = "spss",
- [AOSS_QMP_LS_VENUS] = "venus",
-};
-
-static int qmp_pd_add(struct qmp *qmp)
-{
- struct genpd_onecell_data *data = &qmp->pd_data;
- struct device *dev = qmp->dev;
- struct qmp_pd *res;
- size_t num = ARRAY_SIZE(sdm845_resources);
- int ret;
- int i;
-
- res = devm_kcalloc(dev, num, sizeof(*res), GFP_KERNEL);
- if (!res)
- return -ENOMEM;
-
- data->domains = devm_kcalloc(dev, num, sizeof(*data->domains),
- GFP_KERNEL);
- if (!data->domains)
- return -ENOMEM;
-
- for (i = 0; i < num; i++) {
- res[i].qmp = qmp;
- res[i].pd.name = sdm845_resources[i];
- res[i].pd.power_on = qmp_pd_power_on;
- res[i].pd.power_off = qmp_pd_power_off;
-
- ret = pm_genpd_init(&res[i].pd, NULL, true);
- if (ret < 0) {
- dev_err(dev, "failed to init genpd\n");
- goto unroll_genpds;
- }
-
- data->domains[i] = &res[i].pd;
- }
-
- data->num_domains = i;
-
- ret = of_genpd_add_provider_onecell(dev->of_node, data);
- if (ret < 0)
- goto unroll_genpds;
-
- return 0;
-
-unroll_genpds:
- for (i--; i >= 0; i--)
- pm_genpd_remove(data->domains[i]);
-
- return ret;
-}
-
-static void qmp_pd_remove(struct qmp *qmp)
-{
- struct genpd_onecell_data *data = &qmp->pd_data;
- struct device *dev = qmp->dev;
- int i;
-
- of_genpd_del_provider(dev->of_node);
-
- for (i = 0; i < data->num_domains; i++)
- pm_genpd_remove(data->domains[i]);
-}
-
static int qmp_cdev_get_max_state(struct thermal_cooling_device *cdev,
unsigned long *state)
{
@@ -519,9 +425,53 @@ static void qmp_cooling_devices_remove(struct qmp *qmp)
thermal_cooling_device_unregister(qmp->cooling_devs[i].cdev);
}
+/**
+ * qmp_get() - get a qmp handle from a device
+ * @dev: client device pointer
+ *
+ * Return: handle to qmp device on success, ERR_PTR() on failure
+ */
+struct qmp *qmp_get(struct device *dev)
+{
+ struct platform_device *pdev;
+ struct device_node *np;
+ struct qmp *qmp;
+
+ if (!dev || !dev->of_node)
+ return ERR_PTR(-EINVAL);
+
+ np = of_parse_phandle(dev->of_node, "qcom,qmp", 0);
+ if (!np)
+ return ERR_PTR(-ENODEV);
+
+ pdev = of_find_device_by_node(np);
+ of_node_put(np);
+ if (!pdev)
+ return ERR_PTR(-EINVAL);
+
+ qmp = platform_get_drvdata(pdev);
+
+ return qmp ? qmp : ERR_PTR(-EPROBE_DEFER);
+}
+EXPORT_SYMBOL(qmp_get);
+
+/**
+ * qmp_put() - release a qmp handle
+ * @qmp: qmp handle obtained from qmp_get()
+ */
+void qmp_put(struct qmp *qmp)
+{
+ /*
+ * Match get_device() inside of_find_device_by_node() in
+ * qmp_get()
+ */
+ if (!IS_ERR_OR_NULL(qmp))
+ put_device(qmp->dev);
+}
+EXPORT_SYMBOL(qmp_put);
+
static int qmp_probe(struct platform_device *pdev)
{
- struct resource *res;
struct qmp *qmp;
int irq;
int ret;
@@ -534,8 +484,7 @@ static int qmp_probe(struct platform_device *pdev)
init_waitqueue_head(&qmp->event);
mutex_init(&qmp->tx_lock);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- qmp->msgram = devm_ioremap_resource(&pdev->dev, res);
+ qmp->msgram = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(qmp->msgram))
return PTR_ERR(qmp->msgram);
@@ -563,10 +512,6 @@ static int qmp_probe(struct platform_device *pdev)
if (ret)
goto err_close_qmp;
- ret = qmp_pd_add(qmp);
- if (ret)
- goto err_remove_qdss_clk;
-
ret = qmp_cooling_devices_register(qmp);
if (ret)
dev_err(&pdev->dev, "failed to register aoss cooling devices\n");
@@ -575,8 +520,6 @@ static int qmp_probe(struct platform_device *pdev)
return 0;
-err_remove_qdss_clk:
- qmp_qdss_clk_remove(qmp);
err_close_qmp:
qmp_close(qmp);
err_free_mbox:
@@ -590,7 +533,6 @@ static int qmp_remove(struct platform_device *pdev)
struct qmp *qmp = platform_get_drvdata(pdev);
qmp_qdss_clk_remove(qmp);
- qmp_pd_remove(qmp);
qmp_cooling_devices_remove(qmp);
qmp_close(qmp);
@@ -615,6 +557,7 @@ static struct platform_driver qmp_driver = {
.driver = {
.name = "qcom_aoss_qmp",
.of_match_table = qmp_dt_match,
+ .suppress_bind_attrs = true,
},
.probe = qmp_probe,
.remove = qmp_remove,
diff --git a/drivers/soc/qcom/qcom_gsbi.c b/drivers/soc/qcom/qcom_gsbi.c
index 304afc223a58..290bdefbf28a 100644
--- a/drivers/soc/qcom/qcom_gsbi.c
+++ b/drivers/soc/qcom/qcom_gsbi.c
@@ -127,7 +127,6 @@ static int gsbi_probe(struct platform_device *pdev)
struct device_node *node = pdev->dev.of_node;
struct device_node *tcsr_node;
const struct of_device_id *match;
- struct resource *res;
void __iomem *base;
struct gsbi_info *gsbi;
int i, ret;
@@ -139,8 +138,7 @@ static int gsbi_probe(struct platform_device *pdev)
if (!gsbi)
return -ENOMEM;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/soc/qcom/qcom_stats.c b/drivers/soc/qcom/qcom_stats.c
new file mode 100644
index 000000000000..131d24caabf8
--- /dev/null
+++ b/drivers/soc/qcom/qcom_stats.c
@@ -0,0 +1,277 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2011-2021, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/seq_file.h>
+
+#include <linux/soc/qcom/smem.h>
+#include <clocksource/arm_arch_timer.h>
+
+#define RPM_DYNAMIC_ADDR 0x14
+#define RPM_DYNAMIC_ADDR_MASK 0xFFFF
+
+#define STAT_TYPE_OFFSET 0x0
+#define COUNT_OFFSET 0x4
+#define LAST_ENTERED_AT_OFFSET 0x8
+#define LAST_EXITED_AT_OFFSET 0x10
+#define ACCUMULATED_OFFSET 0x18
+#define CLIENT_VOTES_OFFSET 0x20
+
+struct subsystem_data {
+ const char *name;
+ u32 smem_item;
+ u32 pid;
+};
+
+static const struct subsystem_data subsystems[] = {
+ { "modem", 605, 1 },
+ { "wpss", 605, 13 },
+ { "adsp", 606, 2 },
+ { "cdsp", 607, 5 },
+ { "slpi", 608, 3 },
+ { "gpu", 609, 0 },
+ { "display", 610, 0 },
+ { "adsp_island", 613, 2 },
+ { "slpi_island", 613, 3 },
+};
+
+struct stats_config {
+ size_t stats_offset;
+ size_t num_records;
+ bool appended_stats_avail;
+ bool dynamic_offset;
+ bool subsystem_stats_in_smem;
+};
+
+struct stats_data {
+ bool appended_stats_avail;
+ void __iomem *base;
+};
+
+struct sleep_stats {
+ u32 stat_type;
+ u32 count;
+ u64 last_entered_at;
+ u64 last_exited_at;
+ u64 accumulated;
+};
+
+struct appended_stats {
+ u32 client_votes;
+ u32 reserved[3];
+};
+
+static void qcom_print_stats(struct seq_file *s, const struct sleep_stats *stat)
+{
+ u64 accumulated = stat->accumulated;
+ /*
+ * If a subsystem is in sleep when reading the sleep stats adjust
+ * the accumulated sleep duration to show actual sleep time.
+ */
+ if (stat->last_entered_at > stat->last_exited_at)
+ accumulated += arch_timer_read_counter() - stat->last_entered_at;
+
+ seq_printf(s, "Count: %u\n", stat->count);
+ seq_printf(s, "Last Entered At: %llu\n", stat->last_entered_at);
+ seq_printf(s, "Last Exited At: %llu\n", stat->last_exited_at);
+ seq_printf(s, "Accumulated Duration: %llu\n", accumulated);
+}
+
+static int qcom_subsystem_sleep_stats_show(struct seq_file *s, void *unused)
+{
+ struct subsystem_data *subsystem = s->private;
+ struct sleep_stats *stat;
+
+ /* Items are allocated lazily, so lookup pointer each time */
+ stat = qcom_smem_get(subsystem->pid, subsystem->smem_item, NULL);
+ if (IS_ERR(stat))
+ return -EIO;
+
+ qcom_print_stats(s, stat);
+
+ return 0;
+}
+
+static int qcom_soc_sleep_stats_show(struct seq_file *s, void *unused)
+{
+ struct stats_data *d = s->private;
+ void __iomem *reg = d->base;
+ struct sleep_stats stat;
+
+ memcpy_fromio(&stat, reg, sizeof(stat));
+ qcom_print_stats(s, &stat);
+
+ if (d->appended_stats_avail) {
+ struct appended_stats votes;
+
+ memcpy_fromio(&votes, reg + CLIENT_VOTES_OFFSET, sizeof(votes));
+ seq_printf(s, "Client Votes: %#x\n", votes.client_votes);
+ }
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(qcom_soc_sleep_stats);
+DEFINE_SHOW_ATTRIBUTE(qcom_subsystem_sleep_stats);
+
+static void qcom_create_soc_sleep_stat_files(struct dentry *root, void __iomem *reg,
+ struct stats_data *d,
+ const struct stats_config *config)
+{
+ char stat_type[sizeof(u32) + 1] = {0};
+ size_t stats_offset = config->stats_offset;
+ u32 offset = 0, type;
+ int i, j;
+
+ /*
+ * On RPM targets, stats offset location is dynamic and changes from target
+ * to target and sometimes from build to build for same target.
+ *
+ * In such cases the dynamic address is present at 0x14 offset from base
+ * address in devicetree. The last 16bits indicates the stats_offset.
+ */
+ if (config->dynamic_offset) {
+ stats_offset = readl(reg + RPM_DYNAMIC_ADDR);
+ stats_offset &= RPM_DYNAMIC_ADDR_MASK;
+ }
+
+ for (i = 0; i < config->num_records; i++) {
+ d[i].base = reg + offset + stats_offset;
+
+ /*
+ * Read the low power mode name and create debugfs file for it.
+ * The names read could be of below,
+ * (may change depending on low power mode supported).
+ * For rpmh-sleep-stats: "aosd", "cxsd" and "ddr".
+ * For rpm-sleep-stats: "vmin" and "vlow".
+ */
+ type = readl(d[i].base);
+ for (j = 0; j < sizeof(u32); j++) {
+ stat_type[j] = type & 0xff;
+ type = type >> 8;
+ }
+ strim(stat_type);
+ debugfs_create_file(stat_type, 0400, root, &d[i],
+ &qcom_soc_sleep_stats_fops);
+
+ offset += sizeof(struct sleep_stats);
+ if (d[i].appended_stats_avail)
+ offset += sizeof(struct appended_stats);
+ }
+}
+
+static void qcom_create_subsystem_stat_files(struct dentry *root,
+ const struct stats_config *config)
+{
+ const struct sleep_stats *stat;
+ int i;
+
+ if (!config->subsystem_stats_in_smem)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(subsystems); i++) {
+ stat = qcom_smem_get(subsystems[i].pid, subsystems[i].smem_item, NULL);
+ if (IS_ERR(stat))
+ continue;
+
+ debugfs_create_file(subsystems[i].name, 0400, root, (void *)&subsystems[i],
+ &qcom_subsystem_sleep_stats_fops);
+ }
+}
+
+static int qcom_stats_probe(struct platform_device *pdev)
+{
+ void __iomem *reg;
+ struct dentry *root;
+ const struct stats_config *config;
+ struct stats_data *d;
+ int i;
+
+ config = device_get_match_data(&pdev->dev);
+ if (!config)
+ return -ENODEV;
+
+ reg = devm_platform_get_and_ioremap_resource(pdev, 0, NULL);
+ if (IS_ERR(reg))
+ return -ENOMEM;
+
+ d = devm_kcalloc(&pdev->dev, config->num_records,
+ sizeof(*d), GFP_KERNEL);
+ if (!d)
+ return -ENOMEM;
+
+ for (i = 0; i < config->num_records; i++)
+ d[i].appended_stats_avail = config->appended_stats_avail;
+
+ root = debugfs_create_dir("qcom_stats", NULL);
+
+ qcom_create_subsystem_stat_files(root, config);
+ qcom_create_soc_sleep_stat_files(root, reg, d, config);
+
+ platform_set_drvdata(pdev, root);
+
+ return 0;
+}
+
+static int qcom_stats_remove(struct platform_device *pdev)
+{
+ struct dentry *root = platform_get_drvdata(pdev);
+
+ debugfs_remove_recursive(root);
+
+ return 0;
+}
+
+static const struct stats_config rpm_data = {
+ .stats_offset = 0,
+ .num_records = 2,
+ .appended_stats_avail = true,
+ .dynamic_offset = true,
+ .subsystem_stats_in_smem = false,
+};
+
+static const struct stats_config rpmh_data = {
+ .stats_offset = 0x48,
+ .num_records = 3,
+ .appended_stats_avail = false,
+ .dynamic_offset = false,
+ .subsystem_stats_in_smem = true,
+};
+
+static const struct of_device_id qcom_stats_table[] = {
+ { .compatible = "qcom,rpm-stats", .data = &rpm_data },
+ { .compatible = "qcom,rpmh-stats", .data = &rpmh_data },
+ { }
+};
+MODULE_DEVICE_TABLE(of, qcom_stats_table);
+
+static struct platform_driver qcom_stats = {
+ .probe = qcom_stats_probe,
+ .remove = qcom_stats_remove,
+ .driver = {
+ .name = "qcom_stats",
+ .of_match_table = qcom_stats_table,
+ },
+};
+
+static int __init qcom_stats_init(void)
+{
+ return platform_driver_register(&qcom_stats);
+}
+late_initcall(qcom_stats_init);
+
+static void __exit qcom_stats_exit(void)
+{
+ platform_driver_unregister(&qcom_stats);
+}
+module_exit(qcom_stats_exit)
+
+MODULE_DESCRIPTION("Qualcomm Technologies, Inc. (QTI) Stats driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/qcom/rpmh-rsc.c b/drivers/soc/qcom/rpmh-rsc.c
index e749a2b285d8..3a12a482f6b2 100644
--- a/drivers/soc/qcom/rpmh-rsc.c
+++ b/drivers/soc/qcom/rpmh-rsc.c
@@ -910,7 +910,6 @@ static int rpmh_rsc_probe(struct platform_device *pdev)
{
struct device_node *dn = pdev->dev.of_node;
struct rsc_drv *drv;
- struct resource *res;
char drv_id[10] = {0};
int ret, irq;
u32 solver_config;
@@ -941,8 +940,7 @@ static int rpmh_rsc_probe(struct platform_device *pdev)
drv->name = dev_name(&pdev->dev);
snprintf(drv_id, ARRAY_SIZE(drv_id), "drv-%d", drv->id);
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, drv_id);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource_byname(pdev, drv_id);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/soc/qcom/rpmhpd.c b/drivers/soc/qcom/rpmhpd.c
index fa209b479ab3..1118345d8824 100644
--- a/drivers/soc/qcom/rpmhpd.c
+++ b/drivers/soc/qcom/rpmhpd.c
@@ -30,6 +30,7 @@
* @active_only: True if it represents an Active only peer
* @corner: current corner
* @active_corner: current active corner
+ * @enable_corner: lowest non-zero corner
* @level: An array of level (vlvl) to corner (hlvl) mappings
* derived from cmd-db
* @level_count: Number of levels supported by the power domain. max
@@ -47,6 +48,7 @@ struct rpmhpd {
const bool active_only;
unsigned int corner;
unsigned int active_corner;
+ unsigned int enable_corner;
u32 level[RPMH_ARC_MAX_LEVELS];
size_t level_count;
bool enabled;
@@ -147,6 +149,21 @@ static const struct rpmhpd_desc sdx55_desc = {
.num_pds = ARRAY_SIZE(sdx55_rpmhpds),
};
+/* SM6350 RPMH powerdomains */
+static struct rpmhpd *sm6350_rpmhpds[] = {
+ [SM6350_CX] = &sdm845_cx,
+ [SM6350_GFX] = &sdm845_gfx,
+ [SM6350_LCX] = &sdm845_lcx,
+ [SM6350_LMX] = &sdm845_lmx,
+ [SM6350_MSS] = &sdm845_mss,
+ [SM6350_MX] = &sdm845_mx,
+};
+
+static const struct rpmhpd_desc sm6350_desc = {
+ .rpmhpds = sm6350_rpmhpds,
+ .num_pds = ARRAY_SIZE(sm6350_rpmhpds),
+};
+
/* SM8150 RPMH powerdomains */
static struct rpmhpd sm8150_mmcx_ao;
@@ -204,7 +221,7 @@ static const struct rpmhpd_desc sm8250_desc = {
static struct rpmhpd sm8350_mxc_ao;
static struct rpmhpd sm8350_mxc = {
.pd = { .name = "mxc", },
- .peer = &sm8150_mmcx_ao,
+ .peer = &sm8350_mxc_ao,
.res_name = "mxc.lvl",
};
@@ -297,6 +314,7 @@ static const struct of_device_id rpmhpd_match_table[] = {
{ .compatible = "qcom,sc8180x-rpmhpd", .data = &sc8180x_desc },
{ .compatible = "qcom,sdm845-rpmhpd", .data = &sdm845_desc },
{ .compatible = "qcom,sdx55-rpmhpd", .data = &sdx55_desc},
+ { .compatible = "qcom,sm6350-rpmhpd", .data = &sm6350_desc },
{ .compatible = "qcom,sm8150-rpmhpd", .data = &sm8150_desc },
{ .compatible = "qcom,sm8250-rpmhpd", .data = &sm8250_desc },
{ .compatible = "qcom,sm8350-rpmhpd", .data = &sm8350_desc },
@@ -385,13 +403,13 @@ static int rpmhpd_aggregate_corner(struct rpmhpd *pd, unsigned int corner)
static int rpmhpd_power_on(struct generic_pm_domain *domain)
{
struct rpmhpd *pd = domain_to_rpmhpd(domain);
- int ret = 0;
+ unsigned int corner;
+ int ret;
mutex_lock(&rpmhpd_lock);
- if (pd->corner)
- ret = rpmhpd_aggregate_corner(pd, pd->corner);
-
+ corner = max(pd->corner, pd->enable_corner);
+ ret = rpmhpd_aggregate_corner(pd, corner);
if (!ret)
pd->enabled = true;
@@ -436,6 +454,10 @@ static int rpmhpd_set_performance_state(struct generic_pm_domain *domain,
i--;
if (pd->enabled) {
+ /* Ensure that the domain isn't turn off */
+ if (i < pd->enable_corner)
+ i = pd->enable_corner;
+
ret = rpmhpd_aggregate_corner(pd, i);
if (ret)
goto out;
@@ -472,6 +494,10 @@ static int rpmhpd_update_level_mapping(struct rpmhpd *rpmhpd)
for (i = 0; i < rpmhpd->level_count; i++) {
rpmhpd->level[i] = buf[i];
+ /* Remember the first corner with non-zero level */
+ if (!rpmhpd->level[rpmhpd->enable_corner] && rpmhpd->level[i])
+ rpmhpd->enable_corner = i;
+
/*
* The AUX data may be zero padded. These 0 valued entries at
* the end of the map must be ignored.
diff --git a/drivers/soc/qcom/rpmpd.c b/drivers/soc/qcom/rpmpd.c
index dbf494e92574..4f69fb9b2e0e 100644
--- a/drivers/soc/qcom/rpmpd.c
+++ b/drivers/soc/qcom/rpmpd.c
@@ -185,6 +185,29 @@ static const struct rpmpd_desc msm8916_desc = {
.max_state = MAX_CORNER_RPMPD_STATE,
};
+/* msm8953 RPM Power Domains */
+DEFINE_RPMPD_PAIR(msm8953, vddmd, vddmd_ao, SMPA, LEVEL, 1);
+DEFINE_RPMPD_PAIR(msm8953, vddcx, vddcx_ao, SMPA, LEVEL, 2);
+DEFINE_RPMPD_PAIR(msm8953, vddmx, vddmx_ao, SMPA, LEVEL, 7);
+
+DEFINE_RPMPD_VFL(msm8953, vddcx_vfl, SMPA, 2);
+
+static struct rpmpd *msm8953_rpmpds[] = {
+ [MSM8953_VDDMD] = &msm8953_vddmd,
+ [MSM8953_VDDMD_AO] = &msm8953_vddmd_ao,
+ [MSM8953_VDDCX] = &msm8953_vddcx,
+ [MSM8953_VDDCX_AO] = &msm8953_vddcx_ao,
+ [MSM8953_VDDCX_VFL] = &msm8953_vddcx_vfl,
+ [MSM8953_VDDMX] = &msm8953_vddmx,
+ [MSM8953_VDDMX_AO] = &msm8953_vddmx_ao,
+};
+
+static const struct rpmpd_desc msm8953_desc = {
+ .rpmpds = msm8953_rpmpds,
+ .num_pds = ARRAY_SIZE(msm8953_rpmpds),
+ .max_state = RPM_SMD_LEVEL_TURBO,
+};
+
/* msm8976 RPM Power Domains */
DEFINE_RPMPD_PAIR(msm8976, vddcx, vddcx_ao, SMPA, LEVEL, 2);
DEFINE_RPMPD_PAIR(msm8976, vddmx, vddmx_ao, SMPA, LEVEL, 6);
@@ -377,6 +400,7 @@ static const struct of_device_id rpmpd_match_table[] = {
{ .compatible = "qcom,mdm9607-rpmpd", .data = &mdm9607_desc },
{ .compatible = "qcom,msm8916-rpmpd", .data = &msm8916_desc },
{ .compatible = "qcom,msm8939-rpmpd", .data = &msm8939_desc },
+ { .compatible = "qcom,msm8953-rpmpd", .data = &msm8953_desc },
{ .compatible = "qcom,msm8976-rpmpd", .data = &msm8976_desc },
{ .compatible = "qcom,msm8994-rpmpd", .data = &msm8994_desc },
{ .compatible = "qcom,msm8996-rpmpd", .data = &msm8996_desc },
diff --git a/drivers/soc/qcom/smd-rpm.c b/drivers/soc/qcom/smd-rpm.c
index dfdd4f20f5fd..30dda1af63c8 100644
--- a/drivers/soc/qcom/smd-rpm.c
+++ b/drivers/soc/qcom/smd-rpm.c
@@ -236,6 +236,7 @@ static const struct of_device_id qcom_smd_rpm_of_match[] = {
{ .compatible = "qcom,rpm-msm8226" },
{ .compatible = "qcom,rpm-msm8916" },
{ .compatible = "qcom,rpm-msm8936" },
+ { .compatible = "qcom,rpm-msm8953" },
{ .compatible = "qcom,rpm-msm8974" },
{ .compatible = "qcom,rpm-msm8976" },
{ .compatible = "qcom,rpm-msm8994" },
@@ -244,6 +245,7 @@ static const struct of_device_id qcom_smd_rpm_of_match[] = {
{ .compatible = "qcom,rpm-sdm660" },
{ .compatible = "qcom,rpm-sm6115" },
{ .compatible = "qcom,rpm-sm6125" },
+ { .compatible = "qcom,rpm-qcm2290" },
{ .compatible = "qcom,rpm-qcs404" },
{}
};
diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c
index 4fb5aeeb0843..c7e519bfdc8a 100644
--- a/drivers/soc/qcom/smem.c
+++ b/drivers/soc/qcom/smem.c
@@ -9,6 +9,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
#include <linux/sizes.h>
#include <linux/slab.h>
@@ -240,7 +241,7 @@ static const u8 SMEM_INFO_MAGIC[] = { 0x53, 0x49, 0x49, 0x49 }; /* SIII */
* @size: size of the memory region
*/
struct smem_region {
- u32 aux_base;
+ phys_addr_t aux_base;
void __iomem *virt_base;
size_t size;
};
@@ -499,7 +500,7 @@ static void *qcom_smem_get_global(struct qcom_smem *smem,
for (i = 0; i < smem->num_regions; i++) {
region = &smem->regions[i];
- if (region->aux_base == aux_base || !aux_base) {
+ if ((u32)region->aux_base == aux_base || !aux_base) {
if (size != NULL)
*size = le32_to_cpu(entry->size);
return region->virt_base + le32_to_cpu(entry->offset);
@@ -664,7 +665,7 @@ phys_addr_t qcom_smem_virt_to_phys(void *p)
if (p < region->virt_base + region->size) {
u64 offset = p - region->virt_base;
- return (phys_addr_t)region->aux_base + offset;
+ return region->aux_base + offset;
}
}
@@ -863,12 +864,12 @@ qcom_smem_enumerate_partitions(struct qcom_smem *smem, u16 local_host)
return 0;
}
-static int qcom_smem_map_memory(struct qcom_smem *smem, struct device *dev,
- const char *name, int i)
+static int qcom_smem_resolve_mem(struct qcom_smem *smem, const char *name,
+ struct smem_region *region)
{
+ struct device *dev = smem->dev;
struct device_node *np;
struct resource r;
- resource_size_t size;
int ret;
np = of_parse_phandle(dev->of_node, name, 0);
@@ -881,13 +882,9 @@ static int qcom_smem_map_memory(struct qcom_smem *smem, struct device *dev,
of_node_put(np);
if (ret)
return ret;
- size = resource_size(&r);
- smem->regions[i].virt_base = devm_ioremap_wc(dev, r.start, size);
- if (!smem->regions[i].virt_base)
- return -ENOMEM;
- smem->regions[i].aux_base = (u32)r.start;
- smem->regions[i].size = size;
+ region->aux_base = r.start;
+ region->size = resource_size(&r);
return 0;
}
@@ -895,12 +892,14 @@ static int qcom_smem_map_memory(struct qcom_smem *smem, struct device *dev,
static int qcom_smem_probe(struct platform_device *pdev)
{
struct smem_header *header;
+ struct reserved_mem *rmem;
struct qcom_smem *smem;
size_t array_size;
int num_regions;
int hwlock_id;
u32 version;
int ret;
+ int i;
num_regions = 1;
if (of_find_property(pdev->dev.of_node, "qcom,rpm-msg-ram", NULL))
@@ -914,13 +913,35 @@ static int qcom_smem_probe(struct platform_device *pdev)
smem->dev = &pdev->dev;
smem->num_regions = num_regions;
- ret = qcom_smem_map_memory(smem, &pdev->dev, "memory-region", 0);
- if (ret)
- return ret;
+ rmem = of_reserved_mem_lookup(pdev->dev.of_node);
+ if (rmem) {
+ smem->regions[0].aux_base = rmem->base;
+ smem->regions[0].size = rmem->size;
+ } else {
+ /*
+ * Fall back to the memory-region reference, if we're not a
+ * reserved-memory node.
+ */
+ ret = qcom_smem_resolve_mem(smem, "memory-region", &smem->regions[0]);
+ if (ret)
+ return ret;
+ }
- if (num_regions > 1 && (ret = qcom_smem_map_memory(smem, &pdev->dev,
- "qcom,rpm-msg-ram", 1)))
- return ret;
+ if (num_regions > 1) {
+ ret = qcom_smem_resolve_mem(smem, "qcom,rpm-msg-ram", &smem->regions[1]);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < num_regions; i++) {
+ smem->regions[i].virt_base = devm_ioremap_wc(&pdev->dev,
+ smem->regions[i].aux_base,
+ smem->regions[i].size);
+ if (!smem->regions[i].virt_base) {
+ dev_err(&pdev->dev, "failed to remap %pa\n", &smem->regions[i].aux_base);
+ return -ENOMEM;
+ }
+ }
header = smem->regions[0].virt_base;
if (le32_to_cpu(header->initialized) != 1 ||
diff --git a/drivers/soc/qcom/smp2p.c b/drivers/soc/qcom/smp2p.c
index 2df488333be9..4a157240f419 100644
--- a/drivers/soc/qcom/smp2p.c
+++ b/drivers/soc/qcom/smp2p.c
@@ -14,6 +14,7 @@
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/pm_wakeirq.h>
#include <linux/regmap.h>
#include <linux/soc/qcom/smem.h>
#include <linux/soc/qcom/smem_state.h>
@@ -40,8 +41,11 @@
#define SMP2P_MAX_ENTRY_NAME 16
#define SMP2P_FEATURE_SSR_ACK 0x1
+#define SMP2P_FLAGS_RESTART_DONE_BIT 0
+#define SMP2P_FLAGS_RESTART_ACK_BIT 1
#define SMP2P_MAGIC 0x504d5324
+#define SMP2P_ALL_FEATURES SMP2P_FEATURE_SSR_ACK
/**
* struct smp2p_smem_item - in memory communication structure
@@ -135,6 +139,10 @@ struct qcom_smp2p {
unsigned valid_entries;
+ bool ssr_ack_enabled;
+ bool ssr_ack;
+ bool negotiation_done;
+
unsigned local_pid;
unsigned remote_pid;
@@ -162,22 +170,53 @@ static void qcom_smp2p_kick(struct qcom_smp2p *smp2p)
}
}
-/**
- * qcom_smp2p_intr() - interrupt handler for incoming notifications
- * @irq: unused
- * @data: smp2p driver context
- *
- * Handle notifications from the remote side to handle newly allocated entries
- * or any changes to the state bits of existing entries.
- */
-static irqreturn_t qcom_smp2p_intr(int irq, void *data)
+static bool qcom_smp2p_check_ssr(struct qcom_smp2p *smp2p)
+{
+ struct smp2p_smem_item *in = smp2p->in;
+ bool restart;
+
+ if (!smp2p->ssr_ack_enabled)
+ return false;
+
+ restart = in->flags & BIT(SMP2P_FLAGS_RESTART_DONE_BIT);
+
+ return restart != smp2p->ssr_ack;
+}
+
+static void qcom_smp2p_do_ssr_ack(struct qcom_smp2p *smp2p)
+{
+ struct smp2p_smem_item *out = smp2p->out;
+ u32 val;
+
+ smp2p->ssr_ack = !smp2p->ssr_ack;
+
+ val = out->flags & ~BIT(SMP2P_FLAGS_RESTART_ACK_BIT);
+ if (smp2p->ssr_ack)
+ val |= BIT(SMP2P_FLAGS_RESTART_ACK_BIT);
+ out->flags = val;
+
+ qcom_smp2p_kick(smp2p);
+}
+
+static void qcom_smp2p_negotiate(struct qcom_smp2p *smp2p)
+{
+ struct smp2p_smem_item *out = smp2p->out;
+ struct smp2p_smem_item *in = smp2p->in;
+
+ if (in->version == out->version) {
+ out->features &= in->features;
+
+ if (out->features & SMP2P_FEATURE_SSR_ACK)
+ smp2p->ssr_ack_enabled = true;
+
+ smp2p->negotiation_done = true;
+ }
+}
+
+static void qcom_smp2p_notify_in(struct qcom_smp2p *smp2p)
{
struct smp2p_smem_item *in;
struct smp2p_entry *entry;
- struct qcom_smp2p *smp2p = data;
- unsigned smem_id = smp2p->smem_items[SMP2P_INBOUND];
- unsigned pid = smp2p->remote_pid;
- size_t size;
int irq_pin;
u32 status;
char buf[SMP2P_MAX_ENTRY_NAME];
@@ -186,18 +225,6 @@ static irqreturn_t qcom_smp2p_intr(int irq, void *data)
in = smp2p->in;
- /* Acquire smem item, if not already found */
- if (!in) {
- in = qcom_smem_get(pid, smem_id, &size);
- if (IS_ERR(in)) {
- dev_err(smp2p->dev,
- "Unable to acquire remote smp2p item\n");
- return IRQ_HANDLED;
- }
-
- smp2p->in = in;
- }
-
/* Match newly created entries */
for (i = smp2p->valid_entries; i < in->valid_entries; i++) {
list_for_each_entry(entry, &smp2p->inbound, node) {
@@ -236,7 +263,51 @@ static irqreturn_t qcom_smp2p_intr(int irq, void *data)
}
}
}
+}
+
+/**
+ * qcom_smp2p_intr() - interrupt handler for incoming notifications
+ * @irq: unused
+ * @data: smp2p driver context
+ *
+ * Handle notifications from the remote side to handle newly allocated entries
+ * or any changes to the state bits of existing entries.
+ */
+static irqreturn_t qcom_smp2p_intr(int irq, void *data)
+{
+ struct smp2p_smem_item *in;
+ struct qcom_smp2p *smp2p = data;
+ unsigned int smem_id = smp2p->smem_items[SMP2P_INBOUND];
+ unsigned int pid = smp2p->remote_pid;
+ bool ack_restart;
+ size_t size;
+
+ in = smp2p->in;
+
+ /* Acquire smem item, if not already found */
+ if (!in) {
+ in = qcom_smem_get(pid, smem_id, &size);
+ if (IS_ERR(in)) {
+ dev_err(smp2p->dev,
+ "Unable to acquire remote smp2p item\n");
+ goto out;
+ }
+
+ smp2p->in = in;
+ }
+
+ if (!smp2p->negotiation_done)
+ qcom_smp2p_negotiate(smp2p);
+
+ if (smp2p->negotiation_done) {
+ ack_restart = qcom_smp2p_check_ssr(smp2p);
+ qcom_smp2p_notify_in(smp2p);
+
+ if (ack_restart)
+ qcom_smp2p_do_ssr_ack(smp2p);
+ }
+out:
return IRQ_HANDLED;
}
@@ -392,6 +463,7 @@ static int qcom_smp2p_alloc_outbound_item(struct qcom_smp2p *smp2p)
out->remote_pid = smp2p->remote_pid;
out->total_entries = SMP2P_MAX_ENTRY;
out->valid_entries = 0;
+ out->features = SMP2P_ALL_FEATURES;
/*
* Make sure the rest of the header is written before we validate the
@@ -501,6 +573,7 @@ static int qcom_smp2p_probe(struct platform_device *pdev)
entry = devm_kzalloc(&pdev->dev, sizeof(*entry), GFP_KERNEL);
if (!entry) {
ret = -ENOMEM;
+ of_node_put(node);
goto unwind_interfaces;
}
@@ -508,19 +581,25 @@ static int qcom_smp2p_probe(struct platform_device *pdev)
spin_lock_init(&entry->lock);
ret = of_property_read_string(node, "qcom,entry-name", &entry->name);
- if (ret < 0)
+ if (ret < 0) {
+ of_node_put(node);
goto unwind_interfaces;
+ }
if (of_property_read_bool(node, "interrupt-controller")) {
ret = qcom_smp2p_inbound_entry(smp2p, entry, node);
- if (ret < 0)
+ if (ret < 0) {
+ of_node_put(node);
goto unwind_interfaces;
+ }
list_add(&entry->node, &smp2p->inbound);
} else {
ret = qcom_smp2p_outbound_entry(smp2p, entry, node);
- if (ret < 0)
+ if (ret < 0) {
+ of_node_put(node);
goto unwind_interfaces;
+ }
list_add(&entry->node, &smp2p->outbound);
}
@@ -538,9 +617,26 @@ static int qcom_smp2p_probe(struct platform_device *pdev)
goto unwind_interfaces;
}
+ /*
+ * Treat smp2p interrupt as wakeup source, but keep it disabled
+ * by default. User space can decide enabling it depending on its
+ * use cases. For example if remoteproc crashes and device wants
+ * to handle it immediatedly (e.g. to not miss phone calls) it can
+ * enable wakeup source from user space, while other devices which
+ * do not have proper autosleep feature may want to handle it with
+ * other wakeup events (e.g. Power button) instead waking up immediately.
+ */
+ device_set_wakeup_capable(&pdev->dev, true);
+
+ ret = dev_pm_set_wake_irq(&pdev->dev, irq);
+ if (ret)
+ goto set_wake_irq_fail;
return 0;
+set_wake_irq_fail:
+ dev_pm_clear_wake_irq(&pdev->dev);
+
unwind_interfaces:
list_for_each_entry(entry, &smp2p->inbound, node)
irq_domain_remove(entry->domain);
@@ -565,6 +661,8 @@ static int qcom_smp2p_remove(struct platform_device *pdev)
struct qcom_smp2p *smp2p = platform_get_drvdata(pdev);
struct smp2p_entry *entry;
+ dev_pm_clear_wake_irq(&pdev->dev);
+
list_for_each_entry(entry, &smp2p->inbound, node)
irq_domain_remove(entry->domain);
diff --git a/drivers/soc/qcom/socinfo.c b/drivers/soc/qcom/socinfo.c
index 52e581167115..9a0eb59405e8 100644
--- a/drivers/soc/qcom/socinfo.c
+++ b/drivers/soc/qcom/socinfo.c
@@ -87,8 +87,8 @@ static const char *const pmic_models[] = {
[15] = "PM8901",
[16] = "PM8950/PM8027",
[17] = "PMI8950/ISL9519",
- [18] = "PM8921",
- [19] = "PM8018",
+ [18] = "PMK8001/PM8921",
+ [19] = "PMI8996/PM8018",
[20] = "PM8998/PM8015",
[21] = "PMI8998/PM8014",
[22] = "PM8821",
@@ -102,6 +102,8 @@ static const char *const pmic_models[] = {
[32] = "PM8150B",
[33] = "PMK8002",
[36] = "PM8009",
+ [38] = "PM8150C",
+ [41] = "SMB2351",
};
#endif /* CONFIG_DEBUG_FS */
@@ -281,19 +283,31 @@ static const struct soc_id soc_id[] = {
{ 319, "APQ8098" },
{ 321, "SDM845" },
{ 322, "MDM9206" },
+ { 323, "IPQ8074" },
{ 324, "SDA660" },
{ 325, "SDM658" },
{ 326, "SDA658" },
{ 327, "SDA630" },
{ 338, "SDM450" },
{ 341, "SDA845" },
+ { 342, "IPQ8072" },
+ { 343, "IPQ8076" },
+ { 344, "IPQ8078" },
{ 345, "SDM636" },
{ 346, "SDA636" },
{ 349, "SDM632" },
{ 350, "SDA632" },
{ 351, "SDA450" },
{ 356, "SM8250" },
+ { 375, "IPQ8070" },
+ { 376, "IPQ8071" },
+ { 389, "IPQ8072A" },
+ { 390, "IPQ8074A" },
+ { 391, "IPQ8076A" },
+ { 392, "IPQ8078A" },
{ 394, "SM6125" },
+ { 395, "IPQ8070A" },
+ { 396, "IPQ8071A" },
{ 402, "IPQ6018" },
{ 403, "IPQ6028" },
{ 421, "IPQ6000" },
diff --git a/drivers/soc/qcom/spm.c b/drivers/soc/qcom/spm.c
new file mode 100644
index 000000000000..f831420b7fd4
--- /dev/null
+++ b/drivers/soc/qcom/spm.c
@@ -0,0 +1,279 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014,2015, Linaro Ltd.
+ *
+ * SAW power controller driver
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <soc/qcom/spm.h>
+
+#define SPM_CTL_INDEX 0x7f
+#define SPM_CTL_INDEX_SHIFT 4
+#define SPM_CTL_EN BIT(0)
+
+enum spm_reg {
+ SPM_REG_CFG,
+ SPM_REG_SPM_CTL,
+ SPM_REG_DLY,
+ SPM_REG_PMIC_DLY,
+ SPM_REG_PMIC_DATA_0,
+ SPM_REG_PMIC_DATA_1,
+ SPM_REG_VCTL,
+ SPM_REG_SEQ_ENTRY,
+ SPM_REG_SPM_STS,
+ SPM_REG_PMIC_STS,
+ SPM_REG_AVS_CTL,
+ SPM_REG_AVS_LIMIT,
+ SPM_REG_NR,
+};
+
+static const u16 spm_reg_offset_v4_1[SPM_REG_NR] = {
+ [SPM_REG_AVS_CTL] = 0x904,
+ [SPM_REG_AVS_LIMIT] = 0x908,
+};
+
+static const struct spm_reg_data spm_reg_660_gold_l2 = {
+ .reg_offset = spm_reg_offset_v4_1,
+ .avs_ctl = 0x1010031,
+ .avs_limit = 0x4580458,
+};
+
+static const struct spm_reg_data spm_reg_660_silver_l2 = {
+ .reg_offset = spm_reg_offset_v4_1,
+ .avs_ctl = 0x101c031,
+ .avs_limit = 0x4580458,
+};
+
+static const struct spm_reg_data spm_reg_8998_gold_l2 = {
+ .reg_offset = spm_reg_offset_v4_1,
+ .avs_ctl = 0x1010031,
+ .avs_limit = 0x4700470,
+};
+
+static const struct spm_reg_data spm_reg_8998_silver_l2 = {
+ .reg_offset = spm_reg_offset_v4_1,
+ .avs_ctl = 0x1010031,
+ .avs_limit = 0x4200420,
+};
+
+static const u16 spm_reg_offset_v3_0[SPM_REG_NR] = {
+ [SPM_REG_CFG] = 0x08,
+ [SPM_REG_SPM_CTL] = 0x30,
+ [SPM_REG_DLY] = 0x34,
+ [SPM_REG_SEQ_ENTRY] = 0x400,
+};
+
+/* SPM register data for 8916 */
+static const struct spm_reg_data spm_reg_8916_cpu = {
+ .reg_offset = spm_reg_offset_v3_0,
+ .spm_cfg = 0x1,
+ .spm_dly = 0x3C102800,
+ .seq = { 0x60, 0x03, 0x60, 0x0B, 0x0F, 0x20, 0x10, 0x80, 0x30, 0x90,
+ 0x5B, 0x60, 0x03, 0x60, 0x3B, 0x76, 0x76, 0x0B, 0x94, 0x5B,
+ 0x80, 0x10, 0x26, 0x30, 0x0F },
+ .start_index[PM_SLEEP_MODE_STBY] = 0,
+ .start_index[PM_SLEEP_MODE_SPC] = 5,
+};
+
+static const u16 spm_reg_offset_v2_1[SPM_REG_NR] = {
+ [SPM_REG_CFG] = 0x08,
+ [SPM_REG_SPM_CTL] = 0x30,
+ [SPM_REG_DLY] = 0x34,
+ [SPM_REG_SEQ_ENTRY] = 0x80,
+};
+
+/* SPM register data for 8974, 8084 */
+static const struct spm_reg_data spm_reg_8974_8084_cpu = {
+ .reg_offset = spm_reg_offset_v2_1,
+ .spm_cfg = 0x1,
+ .spm_dly = 0x3C102800,
+ .seq = { 0x03, 0x0B, 0x0F, 0x00, 0x20, 0x80, 0x10, 0xE8, 0x5B, 0x03,
+ 0x3B, 0xE8, 0x5B, 0x82, 0x10, 0x0B, 0x30, 0x06, 0x26, 0x30,
+ 0x0F },
+ .start_index[PM_SLEEP_MODE_STBY] = 0,
+ .start_index[PM_SLEEP_MODE_SPC] = 3,
+};
+
+/* SPM register data for 8226 */
+static const struct spm_reg_data spm_reg_8226_cpu = {
+ .reg_offset = spm_reg_offset_v2_1,
+ .spm_cfg = 0x0,
+ .spm_dly = 0x3C102800,
+ .seq = { 0x60, 0x03, 0x60, 0x0B, 0x0F, 0x20, 0x10, 0x80, 0x30, 0x90,
+ 0x5B, 0x60, 0x03, 0x60, 0x3B, 0x76, 0x76, 0x0B, 0x94, 0x5B,
+ 0x80, 0x10, 0x26, 0x30, 0x0F },
+ .start_index[PM_SLEEP_MODE_STBY] = 0,
+ .start_index[PM_SLEEP_MODE_SPC] = 5,
+};
+
+static const u16 spm_reg_offset_v1_1[SPM_REG_NR] = {
+ [SPM_REG_CFG] = 0x08,
+ [SPM_REG_SPM_CTL] = 0x20,
+ [SPM_REG_PMIC_DLY] = 0x24,
+ [SPM_REG_PMIC_DATA_0] = 0x28,
+ [SPM_REG_PMIC_DATA_1] = 0x2C,
+ [SPM_REG_SEQ_ENTRY] = 0x80,
+};
+
+/* SPM register data for 8064 */
+static const struct spm_reg_data spm_reg_8064_cpu = {
+ .reg_offset = spm_reg_offset_v1_1,
+ .spm_cfg = 0x1F,
+ .pmic_dly = 0x02020004,
+ .pmic_data[0] = 0x0084009C,
+ .pmic_data[1] = 0x00A4001C,
+ .seq = { 0x03, 0x0F, 0x00, 0x24, 0x54, 0x10, 0x09, 0x03, 0x01,
+ 0x10, 0x54, 0x30, 0x0C, 0x24, 0x30, 0x0F },
+ .start_index[PM_SLEEP_MODE_STBY] = 0,
+ .start_index[PM_SLEEP_MODE_SPC] = 2,
+};
+
+static inline void spm_register_write(struct spm_driver_data *drv,
+ enum spm_reg reg, u32 val)
+{
+ if (drv->reg_data->reg_offset[reg])
+ writel_relaxed(val, drv->reg_base +
+ drv->reg_data->reg_offset[reg]);
+}
+
+/* Ensure a guaranteed write, before return */
+static inline void spm_register_write_sync(struct spm_driver_data *drv,
+ enum spm_reg reg, u32 val)
+{
+ u32 ret;
+
+ if (!drv->reg_data->reg_offset[reg])
+ return;
+
+ do {
+ writel_relaxed(val, drv->reg_base +
+ drv->reg_data->reg_offset[reg]);
+ ret = readl_relaxed(drv->reg_base +
+ drv->reg_data->reg_offset[reg]);
+ if (ret == val)
+ break;
+ cpu_relax();
+ } while (1);
+}
+
+static inline u32 spm_register_read(struct spm_driver_data *drv,
+ enum spm_reg reg)
+{
+ return readl_relaxed(drv->reg_base + drv->reg_data->reg_offset[reg]);
+}
+
+void spm_set_low_power_mode(struct spm_driver_data *drv,
+ enum pm_sleep_mode mode)
+{
+ u32 start_index;
+ u32 ctl_val;
+
+ start_index = drv->reg_data->start_index[mode];
+
+ ctl_val = spm_register_read(drv, SPM_REG_SPM_CTL);
+ ctl_val &= ~(SPM_CTL_INDEX << SPM_CTL_INDEX_SHIFT);
+ ctl_val |= start_index << SPM_CTL_INDEX_SHIFT;
+ ctl_val |= SPM_CTL_EN;
+ spm_register_write_sync(drv, SPM_REG_SPM_CTL, ctl_val);
+}
+
+static const struct of_device_id spm_match_table[] = {
+ { .compatible = "qcom,sdm660-gold-saw2-v4.1-l2",
+ .data = &spm_reg_660_gold_l2 },
+ { .compatible = "qcom,sdm660-silver-saw2-v4.1-l2",
+ .data = &spm_reg_660_silver_l2 },
+ { .compatible = "qcom,msm8226-saw2-v2.1-cpu",
+ .data = &spm_reg_8226_cpu },
+ { .compatible = "qcom,msm8916-saw2-v3.0-cpu",
+ .data = &spm_reg_8916_cpu },
+ { .compatible = "qcom,msm8974-saw2-v2.1-cpu",
+ .data = &spm_reg_8974_8084_cpu },
+ { .compatible = "qcom,msm8998-gold-saw2-v4.1-l2",
+ .data = &spm_reg_8998_gold_l2 },
+ { .compatible = "qcom,msm8998-silver-saw2-v4.1-l2",
+ .data = &spm_reg_8998_silver_l2 },
+ { .compatible = "qcom,apq8084-saw2-v2.1-cpu",
+ .data = &spm_reg_8974_8084_cpu },
+ { .compatible = "qcom,apq8064-saw2-v1.1-cpu",
+ .data = &spm_reg_8064_cpu },
+ { },
+};
+MODULE_DEVICE_TABLE(of, spm_match_table);
+
+static int spm_dev_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *match_id;
+ struct spm_driver_data *drv;
+ struct resource *res;
+ void __iomem *addr;
+
+ drv = devm_kzalloc(&pdev->dev, sizeof(*drv), GFP_KERNEL);
+ if (!drv)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ drv->reg_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(drv->reg_base))
+ return PTR_ERR(drv->reg_base);
+
+ match_id = of_match_node(spm_match_table, pdev->dev.of_node);
+ if (!match_id)
+ return -ENODEV;
+
+ drv->reg_data = match_id->data;
+ platform_set_drvdata(pdev, drv);
+
+ /* Write the SPM sequences first.. */
+ addr = drv->reg_base + drv->reg_data->reg_offset[SPM_REG_SEQ_ENTRY];
+ __iowrite32_copy(addr, drv->reg_data->seq,
+ ARRAY_SIZE(drv->reg_data->seq) / 4);
+
+ /*
+ * ..and then the control registers.
+ * On some SoC if the control registers are written first and if the
+ * CPU was held in reset, the reset signal could trigger the SPM state
+ * machine, before the sequences are completely written.
+ */
+ spm_register_write(drv, SPM_REG_AVS_CTL, drv->reg_data->avs_ctl);
+ spm_register_write(drv, SPM_REG_AVS_LIMIT, drv->reg_data->avs_limit);
+ spm_register_write(drv, SPM_REG_CFG, drv->reg_data->spm_cfg);
+ spm_register_write(drv, SPM_REG_DLY, drv->reg_data->spm_dly);
+ spm_register_write(drv, SPM_REG_PMIC_DLY, drv->reg_data->pmic_dly);
+ spm_register_write(drv, SPM_REG_PMIC_DATA_0,
+ drv->reg_data->pmic_data[0]);
+ spm_register_write(drv, SPM_REG_PMIC_DATA_1,
+ drv->reg_data->pmic_data[1]);
+
+ /* Set up Standby as the default low power mode */
+ if (drv->reg_data->reg_offset[SPM_REG_SPM_CTL])
+ spm_set_low_power_mode(drv, PM_SLEEP_MODE_STBY);
+
+ return 0;
+}
+
+static struct platform_driver spm_driver = {
+ .probe = spm_dev_probe,
+ .driver = {
+ .name = "qcom_spm",
+ .of_match_table = spm_match_table,
+ },
+};
+
+static int __init qcom_spm_init(void)
+{
+ return platform_driver_register(&spm_driver);
+}
+arch_initcall(qcom_spm_init);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig
index 07e0ecd64319..ce16ef5c939c 100644
--- a/drivers/soc/renesas/Kconfig
+++ b/drivers/soc/renesas/Kconfig
@@ -186,6 +186,7 @@ config ARCH_R8A77995
select SYSC_R8A77995
help
This enables support for the Renesas R-Car D3 SoC.
+ This includes different gradings like R-Car D3e.
config ARCH_R8A77990
bool "ARM64 Platform support for R-Car E3"
@@ -193,6 +194,7 @@ config ARCH_R8A77990
select SYSC_R8A77990
help
This enables support for the Renesas R-Car E3 SoC.
+ This includes different gradings like R-Car E3e.
config ARCH_R8A77950
bool "ARM64 Platform support for R-Car H3 ES1.x"
@@ -208,7 +210,7 @@ config ARCH_R8A77951
help
This enables support for the Renesas R-Car H3 SoC (revisions 2.0 and
later).
- This includes different gradings like R-Car H3e-2G.
+ This includes different gradings like R-Car H3e, H3e-2G, and H3Ne.
config ARCH_R8A77965
bool "ARM64 Platform support for R-Car M3-N"
@@ -216,6 +218,7 @@ config ARCH_R8A77965
select SYSC_R8A77965
help
This enables support for the Renesas R-Car M3-N SoC.
+ This includes different gradings like R-Car M3Ne and M3Ne-2G.
config ARCH_R8A77960
bool "ARM64 Platform support for R-Car M3-W"
@@ -230,7 +233,7 @@ config ARCH_R8A77961
select SYSC_R8A77961
help
This enables support for the Renesas R-Car M3-W+ SoC.
- This includes different gradings like R-Car M3e-2G.
+ This includes different gradings like R-Car M3e and M3e-2G.
config ARCH_R8A77980
bool "ARM64 Platform support for R-Car V3H"
diff --git a/drivers/soc/renesas/renesas-soc.c b/drivers/soc/renesas/renesas-soc.c
index dab9f5a0aad0..7961b0be1850 100644
--- a/drivers/soc/renesas/renesas-soc.c
+++ b/drivers/soc/renesas/renesas-soc.c
@@ -285,17 +285,22 @@ static const struct of_device_id renesas_socs[] __initconst = {
{ .compatible = "renesas,r8a7795", .data = &soc_rcar_h3 },
#endif
#ifdef CONFIG_ARCH_R8A77951
+ { .compatible = "renesas,r8a779m0", .data = &soc_rcar_h3 },
{ .compatible = "renesas,r8a779m1", .data = &soc_rcar_h3 },
+ { .compatible = "renesas,r8a779m8", .data = &soc_rcar_h3 },
#endif
#ifdef CONFIG_ARCH_R8A77960
{ .compatible = "renesas,r8a7796", .data = &soc_rcar_m3_w },
#endif
#ifdef CONFIG_ARCH_R8A77961
{ .compatible = "renesas,r8a77961", .data = &soc_rcar_m3_w },
+ { .compatible = "renesas,r8a779m2", .data = &soc_rcar_m3_w },
{ .compatible = "renesas,r8a779m3", .data = &soc_rcar_m3_w },
#endif
#ifdef CONFIG_ARCH_R8A77965
{ .compatible = "renesas,r8a77965", .data = &soc_rcar_m3_n },
+ { .compatible = "renesas,r8a779m4", .data = &soc_rcar_m3_n },
+ { .compatible = "renesas,r8a779m5", .data = &soc_rcar_m3_n },
#endif
#ifdef CONFIG_ARCH_R8A77970
{ .compatible = "renesas,r8a77970", .data = &soc_rcar_v3m },
@@ -305,9 +310,11 @@ static const struct of_device_id renesas_socs[] __initconst = {
#endif
#ifdef CONFIG_ARCH_R8A77990
{ .compatible = "renesas,r8a77990", .data = &soc_rcar_e3 },
+ { .compatible = "renesas,r8a779m6", .data = &soc_rcar_e3 },
#endif
#ifdef CONFIG_ARCH_R8A77995
{ .compatible = "renesas,r8a77995", .data = &soc_rcar_d3 },
+ { .compatible = "renesas,r8a779m7", .data = &soc_rcar_d3 },
#endif
#ifdef CONFIG_ARCH_R8A779A0
{ .compatible = "renesas,r8a779a0", .data = &soc_rcar_v3u },
diff --git a/drivers/soc/samsung/Kconfig b/drivers/soc/samsung/Kconfig
index 5745d7e5908e..e2cedef1e8d1 100644
--- a/drivers/soc/samsung/Kconfig
+++ b/drivers/soc/samsung/Kconfig
@@ -13,18 +13,21 @@ config EXYNOS_ASV_ARM
depends on EXYNOS_CHIPID
config EXYNOS_CHIPID
- bool "Exynos ChipID controller and ASV driver" if COMPILE_TEST
+ tristate "Exynos ChipID controller and ASV driver"
depends on ARCH_EXYNOS || COMPILE_TEST
+ default ARCH_EXYNOS
select EXYNOS_ASV_ARM if ARM && ARCH_EXYNOS
select MFD_SYSCON
select SOC_BUS
help
Support for Samsung Exynos SoC ChipID and Adaptive Supply Voltage.
+ This driver can also be built as module (exynos_chipid).
config EXYNOS_PMU
bool "Exynos PMU controller driver" if COMPILE_TEST
depends on ARCH_EXYNOS || ((ARM || ARM64) && COMPILE_TEST)
select EXYNOS_PMU_ARM_DRIVERS if ARM && ARCH_EXYNOS
+ select MFD_CORE
# There is no need to enable these drivers for ARMv8
config EXYNOS_PMU_ARM_DRIVERS
diff --git a/drivers/soc/samsung/Makefile b/drivers/soc/samsung/Makefile
index 0c523a8de4eb..2ae4bea804cf 100644
--- a/drivers/soc/samsung/Makefile
+++ b/drivers/soc/samsung/Makefile
@@ -1,8 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_EXYNOS_ASV_ARM) += exynos5422-asv.o
+obj-$(CONFIG_EXYNOS_CHIPID) += exynos_chipid.o
+exynos_chipid-y += exynos-chipid.o exynos-asv.o
-obj-$(CONFIG_EXYNOS_CHIPID) += exynos-chipid.o exynos-asv.o
obj-$(CONFIG_EXYNOS_PMU) += exynos-pmu.o
obj-$(CONFIG_EXYNOS_PMU_ARM_DRIVERS) += exynos3250-pmu.o exynos4-pmu.o \
diff --git a/drivers/soc/samsung/exynos-chipid.c b/drivers/soc/samsung/exynos-chipid.c
index 5c1d0f97f766..a28053ec7e6a 100644
--- a/drivers/soc/samsung/exynos-chipid.c
+++ b/drivers/soc/samsung/exynos-chipid.c
@@ -15,7 +15,9 @@
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/mfd/syscon.h>
+#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/slab.h>
@@ -24,6 +26,17 @@
#include "exynos-asv.h"
+struct exynos_chipid_variant {
+ unsigned int rev_reg; /* revision register offset */
+ unsigned int main_rev_shift; /* main revision offset in rev_reg */
+ unsigned int sub_rev_shift; /* sub revision offset in rev_reg */
+};
+
+struct exynos_chipid_info {
+ u32 product_id;
+ u32 revision;
+};
+
static const struct exynos_soc_id {
const char *name;
unsigned int id;
@@ -42,6 +55,8 @@ static const struct exynos_soc_id {
{ "EXYNOS5440", 0xE5440000 },
{ "EXYNOS5800", 0xE5422000 },
{ "EXYNOS7420", 0xE7420000 },
+ { "EXYNOS850", 0xE3830000 },
+ { "EXYNOSAUTOV9", 0xAAA80000 },
};
static const char *product_id_to_soc_id(unsigned int product_id)
@@ -49,31 +64,57 @@ static const char *product_id_to_soc_id(unsigned int product_id)
int i;
for (i = 0; i < ARRAY_SIZE(soc_ids); i++)
- if ((product_id & EXYNOS_MASK) == soc_ids[i].id)
+ if (product_id == soc_ids[i].id)
return soc_ids[i].name;
return NULL;
}
+static int exynos_chipid_get_chipid_info(struct regmap *regmap,
+ const struct exynos_chipid_variant *data,
+ struct exynos_chipid_info *soc_info)
+{
+ int ret;
+ unsigned int val, main_rev, sub_rev;
+
+ ret = regmap_read(regmap, EXYNOS_CHIPID_REG_PRO_ID, &val);
+ if (ret < 0)
+ return ret;
+ soc_info->product_id = val & EXYNOS_MASK;
+
+ if (data->rev_reg != EXYNOS_CHIPID_REG_PRO_ID) {
+ ret = regmap_read(regmap, data->rev_reg, &val);
+ if (ret < 0)
+ return ret;
+ }
+ main_rev = (val >> data->main_rev_shift) & EXYNOS_REV_PART_MASK;
+ sub_rev = (val >> data->sub_rev_shift) & EXYNOS_REV_PART_MASK;
+ soc_info->revision = (main_rev << EXYNOS_REV_PART_SHIFT) | sub_rev;
+
+ return 0;
+}
+
static int exynos_chipid_probe(struct platform_device *pdev)
{
+ const struct exynos_chipid_variant *drv_data;
+ struct exynos_chipid_info soc_info;
struct soc_device_attribute *soc_dev_attr;
struct soc_device *soc_dev;
struct device_node *root;
struct regmap *regmap;
- u32 product_id;
- u32 revision;
int ret;
+ drv_data = of_device_get_match_data(&pdev->dev);
+ if (!drv_data)
+ return -EINVAL;
+
regmap = device_node_to_regmap(pdev->dev.of_node);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
- ret = regmap_read(regmap, EXYNOS_CHIPID_REG_PRO_ID, &product_id);
+ ret = exynos_chipid_get_chipid_info(regmap, drv_data, &soc_info);
if (ret < 0)
return ret;
- revision = product_id & EXYNOS_REV_MASK;
-
soc_dev_attr = devm_kzalloc(&pdev->dev, sizeof(*soc_dev_attr),
GFP_KERNEL);
if (!soc_dev_attr)
@@ -86,8 +127,8 @@ static int exynos_chipid_probe(struct platform_device *pdev)
of_node_put(root);
soc_dev_attr->revision = devm_kasprintf(&pdev->dev, GFP_KERNEL,
- "%x", revision);
- soc_dev_attr->soc_id = product_id_to_soc_id(product_id);
+ "%x", soc_info.revision);
+ soc_dev_attr->soc_id = product_id_to_soc_id(soc_info.product_id);
if (!soc_dev_attr->soc_id) {
pr_err("Unknown SoC\n");
return -ENODEV;
@@ -104,9 +145,8 @@ static int exynos_chipid_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, soc_dev);
- dev_info(soc_device_to_device(soc_dev),
- "Exynos: CPU[%s] PRO_ID[0x%x] REV[0x%x] Detected\n",
- soc_dev_attr->soc_id, product_id, revision);
+ dev_info(&pdev->dev, "Exynos: CPU[%s] PRO_ID[0x%x] REV[0x%x] Detected\n",
+ soc_dev_attr->soc_id, soc_info.product_id, soc_info.revision);
return 0;
@@ -125,10 +165,29 @@ static int exynos_chipid_remove(struct platform_device *pdev)
return 0;
}
+static const struct exynos_chipid_variant exynos4210_chipid_drv_data = {
+ .rev_reg = 0x0,
+ .main_rev_shift = 4,
+ .sub_rev_shift = 0,
+};
+
+static const struct exynos_chipid_variant exynos850_chipid_drv_data = {
+ .rev_reg = 0x10,
+ .main_rev_shift = 20,
+ .sub_rev_shift = 16,
+};
+
static const struct of_device_id exynos_chipid_of_device_ids[] = {
- { .compatible = "samsung,exynos4210-chipid" },
- {}
+ {
+ .compatible = "samsung,exynos4210-chipid",
+ .data = &exynos4210_chipid_drv_data,
+ }, {
+ .compatible = "samsung,exynos850-chipid",
+ .data = &exynos850_chipid_drv_data,
+ },
+ { }
};
+MODULE_DEVICE_TABLE(of, exynos_chipid_of_device_ids);
static struct platform_driver exynos_chipid_driver = {
.driver = {
@@ -138,4 +197,11 @@ static struct platform_driver exynos_chipid_driver = {
.probe = exynos_chipid_probe,
.remove = exynos_chipid_remove,
};
-builtin_platform_driver(exynos_chipid_driver);
+module_platform_driver(exynos_chipid_driver);
+
+MODULE_DESCRIPTION("Samsung Exynos ChipID controller and ASV driver");
+MODULE_AUTHOR("Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>");
+MODULE_AUTHOR("Krzysztof Kozlowski <krzysztof.kozlowski@canonical.com>");
+MODULE_AUTHOR("Pankaj Dubey <pankaj.dubey@samsung.com>");
+MODULE_AUTHOR("Sylwester Nawrocki <s.nawrocki@samsung.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/samsung/exynos5422-asv.c b/drivers/soc/samsung/exynos5422-asv.c
index ca409a976e34..475ae5276529 100644
--- a/drivers/soc/samsung/exynos5422-asv.c
+++ b/drivers/soc/samsung/exynos5422-asv.c
@@ -503,3 +503,4 @@ int exynos5422_asv_init(struct exynos_asv *asv)
return 0;
}
+EXPORT_SYMBOL_GPL(exynos5422_asv_init);
diff --git a/drivers/soc/samsung/pm_domains.c b/drivers/soc/samsung/pm_domains.c
index 5ec0c13f0aaf..d07f3c9d6903 100644
--- a/drivers/soc/samsung/pm_domains.c
+++ b/drivers/soc/samsung/pm_domains.c
@@ -28,7 +28,6 @@ struct exynos_pm_domain_config {
*/
struct exynos_pm_domain {
void __iomem *base;
- bool is_off;
struct generic_pm_domain pd;
u32 local_pwr_cfg;
};
diff --git a/drivers/soc/sunxi/sunxi_sram.c b/drivers/soc/sunxi/sunxi_sram.c
index 42833e33a96c..a8f3876963a0 100644
--- a/drivers/soc/sunxi/sunxi_sram.c
+++ b/drivers/soc/sunxi/sunxi_sram.c
@@ -331,7 +331,6 @@ static struct regmap_config sunxi_sram_emac_clock_regmap = {
static int sunxi_sram_probe(struct platform_device *pdev)
{
- struct resource *res;
struct dentry *d;
struct regmap *emac_clock;
const struct sunxi_sramc_variant *variant;
@@ -342,8 +341,7 @@ static int sunxi_sram_probe(struct platform_device *pdev)
if (!variant)
return -EINVAL;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
diff --git a/drivers/soc/tegra/Makefile b/drivers/soc/tegra/Makefile
index 9c809c1814bd..054e862b63d8 100644
--- a/drivers/soc/tegra/Makefile
+++ b/drivers/soc/tegra/Makefile
@@ -7,3 +7,4 @@ obj-$(CONFIG_SOC_TEGRA_PMC) += pmc.o
obj-$(CONFIG_SOC_TEGRA_POWERGATE_BPMP) += powergate-bpmp.o
obj-$(CONFIG_SOC_TEGRA20_VOLTAGE_COUPLER) += regulators-tegra20.o
obj-$(CONFIG_SOC_TEGRA30_VOLTAGE_COUPLER) += regulators-tegra30.o
+obj-$(CONFIG_ARCH_TEGRA_186_SOC) += ari-tegra186.o
diff --git a/drivers/soc/tegra/ari-tegra186.c b/drivers/soc/tegra/ari-tegra186.c
new file mode 100644
index 000000000000..02577853ec49
--- /dev/null
+++ b/drivers/soc/tegra/ari-tegra186.c
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
+ */
+
+#include <linux/arm-smccc.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/panic_notifier.h>
+
+#define SMC_SIP_INVOKE_MCE 0xc2ffff00
+#define MCE_SMC_READ_MCA 12
+
+#define MCA_ARI_CMD_RD_SERR 1
+
+#define MCA_ARI_RW_SUBIDX_STAT 1
+#define SERR_STATUS_VAL BIT_ULL(63)
+
+#define MCA_ARI_RW_SUBIDX_ADDR 2
+#define MCA_ARI_RW_SUBIDX_MSC1 3
+#define MCA_ARI_RW_SUBIDX_MSC2 4
+
+static const char * const bank_names[] = {
+ "SYS:DPMU", "ROC:IOB", "ROC:MCB", "ROC:CCE", "ROC:CQX", "ROC:CTU",
+};
+
+static void read_uncore_mca(u8 cmd, u8 idx, u8 subidx, u8 inst, u64 *data)
+{
+ struct arm_smccc_res res;
+
+ arm_smccc_smc(SMC_SIP_INVOKE_MCE | MCE_SMC_READ_MCA,
+ ((u64)inst << 24) | ((u64)idx << 16) |
+ ((u64)subidx << 8) | ((u64)cmd << 0),
+ 0, 0, 0, 0, 0, 0, &res);
+
+ *data = res.a2;
+}
+
+static int tegra186_ari_panic_handler(struct notifier_block *nb,
+ unsigned long code, void *unused)
+{
+ u64 status;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(bank_names); i++) {
+ read_uncore_mca(MCA_ARI_CMD_RD_SERR, i, MCA_ARI_RW_SUBIDX_STAT,
+ 0, &status);
+
+ if (status & SERR_STATUS_VAL) {
+ u64 addr, misc1, misc2;
+
+ read_uncore_mca(MCA_ARI_CMD_RD_SERR, i,
+ MCA_ARI_RW_SUBIDX_ADDR, 0, &addr);
+ read_uncore_mca(MCA_ARI_CMD_RD_SERR, i,
+ MCA_ARI_RW_SUBIDX_MSC1, 0, &misc1);
+ read_uncore_mca(MCA_ARI_CMD_RD_SERR, i,
+ MCA_ARI_RW_SUBIDX_MSC2, 0, &misc2);
+
+ pr_crit("Machine Check Error in %s\n"
+ " status=0x%llx addr=0x%llx\n"
+ " msc1=0x%llx msc2=0x%llx\n",
+ bank_names[i], status, addr, misc1, misc2);
+ }
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block tegra186_ari_panic_nb = {
+ .notifier_call = tegra186_ari_panic_handler,
+};
+
+static int __init tegra186_ari_init(void)
+{
+ if (of_machine_is_compatible("nvidia,tegra186"))
+ atomic_notifier_chain_register(&panic_notifier_list, &tegra186_ari_panic_nb);
+
+ return 0;
+}
+early_initcall(tegra186_ari_init);
diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c
index 50091c4ec948..575d6d5b4294 100644
--- a/drivers/soc/tegra/pmc.c
+++ b/drivers/soc/tegra/pmc.c
@@ -360,6 +360,7 @@ struct tegra_pmc_soc {
unsigned int num_pmc_clks;
bool has_blink_output;
bool has_usb_sleepwalk;
+ bool supports_core_domain;
};
/**
@@ -782,7 +783,7 @@ static int tegra_powergate_power_up(struct tegra_powergate *pg,
err = reset_control_deassert(pg->reset);
if (err)
- goto powergate_off;
+ goto disable_clks;
usleep_range(10, 20);
@@ -2815,8 +2816,7 @@ static int tegra_pmc_probe(struct platform_device *pdev)
return err;
/* take over the memory region from the early initialization */
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- base = devm_ioremap_resource(&pdev->dev, res);
+ base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
@@ -3041,6 +3041,7 @@ static void tegra20_pmc_setup_irq_polarity(struct tegra_pmc *pmc,
}
static const struct tegra_pmc_soc tegra20_pmc_soc = {
+ .supports_core_domain = false,
.num_powergates = ARRAY_SIZE(tegra20_powergates),
.powergates = tegra20_powergates,
.num_cpu_powergates = 0,
@@ -3065,7 +3066,7 @@ static const struct tegra_pmc_soc tegra20_pmc_soc = {
.pmc_clks_data = NULL,
.num_pmc_clks = 0,
.has_blink_output = true,
- .has_usb_sleepwalk = false,
+ .has_usb_sleepwalk = true,
};
static const char * const tegra30_powergates[] = {
@@ -3101,6 +3102,7 @@ static const char * const tegra30_reset_sources[] = {
};
static const struct tegra_pmc_soc tegra30_pmc_soc = {
+ .supports_core_domain = false,
.num_powergates = ARRAY_SIZE(tegra30_powergates),
.powergates = tegra30_powergates,
.num_cpu_powergates = ARRAY_SIZE(tegra30_cpu_powergates),
@@ -3125,7 +3127,7 @@ static const struct tegra_pmc_soc tegra30_pmc_soc = {
.pmc_clks_data = tegra_pmc_clks_data,
.num_pmc_clks = ARRAY_SIZE(tegra_pmc_clks_data),
.has_blink_output = true,
- .has_usb_sleepwalk = false,
+ .has_usb_sleepwalk = true,
};
static const char * const tegra114_powergates[] = {
@@ -3157,6 +3159,7 @@ static const u8 tegra114_cpu_powergates[] = {
};
static const struct tegra_pmc_soc tegra114_pmc_soc = {
+ .supports_core_domain = false,
.num_powergates = ARRAY_SIZE(tegra114_powergates),
.powergates = tegra114_powergates,
.num_cpu_powergates = ARRAY_SIZE(tegra114_cpu_powergates),
@@ -3181,7 +3184,7 @@ static const struct tegra_pmc_soc tegra114_pmc_soc = {
.pmc_clks_data = tegra_pmc_clks_data,
.num_pmc_clks = ARRAY_SIZE(tegra_pmc_clks_data),
.has_blink_output = true,
- .has_usb_sleepwalk = false,
+ .has_usb_sleepwalk = true,
};
static const char * const tegra124_powergates[] = {
@@ -3273,6 +3276,7 @@ static const struct pinctrl_pin_desc tegra124_pin_descs[] = {
};
static const struct tegra_pmc_soc tegra124_pmc_soc = {
+ .supports_core_domain = false,
.num_powergates = ARRAY_SIZE(tegra124_powergates),
.powergates = tegra124_powergates,
.num_cpu_powergates = ARRAY_SIZE(tegra124_cpu_powergates),
@@ -3398,6 +3402,7 @@ static const struct tegra_wake_event tegra210_wake_events[] = {
};
static const struct tegra_pmc_soc tegra210_pmc_soc = {
+ .supports_core_domain = false,
.num_powergates = ARRAY_SIZE(tegra210_powergates),
.powergates = tegra210_powergates,
.num_cpu_powergates = ARRAY_SIZE(tegra210_cpu_powergates),
@@ -3555,6 +3560,7 @@ static const struct tegra_wake_event tegra186_wake_events[] = {
};
static const struct tegra_pmc_soc tegra186_pmc_soc = {
+ .supports_core_domain = false,
.num_powergates = 0,
.powergates = NULL,
.num_cpu_powergates = 0,
@@ -3689,6 +3695,7 @@ static const struct tegra_wake_event tegra194_wake_events[] = {
};
static const struct tegra_pmc_soc tegra194_pmc_soc = {
+ .supports_core_domain = false,
.num_powergates = 0,
.powergates = NULL,
.num_cpu_powergates = 0,
@@ -3757,6 +3764,7 @@ static const char * const tegra234_reset_sources[] = {
};
static const struct tegra_pmc_soc tegra234_pmc_soc = {
+ .supports_core_domain = false,
.num_powergates = 0,
.powergates = NULL,
.num_cpu_powergates = 0,
@@ -3804,6 +3812,14 @@ static void tegra_pmc_sync_state(struct device *dev)
int err;
/*
+ * Newer device-trees have power domains, but we need to prepare all
+ * device drivers with runtime PM and OPP support first, otherwise
+ * state syncing is unsafe.
+ */
+ if (!pmc->soc->supports_core_domain)
+ return;
+
+ /*
* Older device-trees don't have core PD, and thus, there are
* no dependencies that will block the state syncing. We shouldn't
* mark the domain as synced in this case.
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 83e352b0c8f9..596705d24400 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -228,6 +228,18 @@ config SPI_CADENCE_QUADSPI
device with a Cadence QSPI controller and want to access the
Flash as an MTD device.
+config SPI_CADENCE_XSPI
+ tristate "Cadence XSPI controller"
+ depends on (OF || COMPILE_TEST) && HAS_IOMEM
+ depends on SPI_MEM
+ help
+ Enable support for the Cadence XSPI Flash controller.
+
+ Cadence XSPI is a specialized controller for connecting an SPI
+ Flash over upto 8bit wide bus. Enable this option if you have a
+ device with a Cadence XSPI controller and want to access the
+ Flash as an MTD device.
+
config SPI_CLPS711X
tristate "CLPS711X host SPI controller"
depends on ARCH_CLPS711X || COMPILE_TEST
@@ -406,6 +418,15 @@ config SPI_IMX
help
This enables support for the Freescale i.MX SPI controllers.
+config SPI_INGENIC
+ tristate "Ingenic JZ47xx SoCs SPI controller"
+ depends on MACH_INGENIC || COMPILE_TEST
+ help
+ This enables support for the Ingenic JZ47xx SoCs SPI controller.
+
+ To compile this driver as a module, choose M here: the module
+ will be called spi-ingenic.
+
config SPI_JCORE
tristate "J-Core SPI Master"
depends on OF && (SUPERH || COMPILE_TEST)
@@ -738,10 +759,11 @@ config SPI_S3C24XX_FIQ
TX and RX data paths.
config SPI_S3C64XX
- tristate "Samsung S3C64XX series type SPI"
+ tristate "Samsung S3C64XX/Exynos SoC series type SPI"
depends on (PLAT_SAMSUNG || ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST)
help
- SPI driver for Samsung S3C64XX and newer SoCs.
+ SPI driver for Samsung S3C64XX, S5Pv210 and Exynos SoCs.
+ Choose Y/M here only if you build for such Samsung SoC.
config SPI_SC18IS602
tristate "NXP SC18IS602/602B/603 I2C to SPI bridge"
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 699db95c8441..dd7393a6046f 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -34,6 +34,7 @@ obj-$(CONFIG_SPI_BITBANG) += spi-bitbang.o
obj-$(CONFIG_SPI_BUTTERFLY) += spi-butterfly.o
obj-$(CONFIG_SPI_CADENCE) += spi-cadence.o
obj-$(CONFIG_SPI_CADENCE_QUADSPI) += spi-cadence-quadspi.o
+obj-$(CONFIG_SPI_CADENCE_XSPI) += spi-cadence-xspi.o
obj-$(CONFIG_SPI_CLPS711X) += spi-clps711x.o
obj-$(CONFIG_SPI_COLDFIRE_QSPI) += spi-coldfire-qspi.o
obj-$(CONFIG_SPI_DAVINCI) += spi-davinci.o
@@ -59,6 +60,7 @@ obj-$(CONFIG_SPI_HISI_KUNPENG) += spi-hisi-kunpeng.o
obj-$(CONFIG_SPI_HISI_SFC_V3XX) += spi-hisi-sfc-v3xx.o
obj-$(CONFIG_SPI_IMG_SPFI) += spi-img-spfi.o
obj-$(CONFIG_SPI_IMX) += spi-imx.o
+obj-$(CONFIG_SPI_INGENIC) += spi-ingenic.o
obj-$(CONFIG_SPI_LANTIQ_SSC) += spi-lantiq-ssc.o
obj-$(CONFIG_SPI_JCORE) += spi-jcore.o
obj-$(CONFIG_SPI_LM70_LLP) += spi-lm70llp.o
diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c
index 95d4fa32c299..92d9610df1fd 100644
--- a/drivers/spi/atmel-quadspi.c
+++ b/drivers/spi/atmel-quadspi.c
@@ -310,7 +310,7 @@ static int atmel_qspi_set_cfg(struct atmel_qspi *aq,
return mode;
ifr |= atmel_qspi_modes[mode].config;
- if (op->dummy.buswidth && op->dummy.nbytes)
+ if (op->dummy.nbytes)
dummy_cycles = op->dummy.nbytes * 8 / op->dummy.buswidth;
/*
diff --git a/drivers/spi/spi-altera-dfl.c b/drivers/spi/spi-altera-dfl.c
index 44fc9ee13fc7..ca40923258af 100644
--- a/drivers/spi/spi-altera-dfl.c
+++ b/drivers/spi/spi-altera-dfl.c
@@ -134,7 +134,7 @@ static int dfl_spi_altera_probe(struct dfl_device *dfl_dev)
if (!master)
return -ENOMEM;
- master->bus_num = dfl_dev->id;
+ master->bus_num = -1;
hw = spi_master_get_devdata(master);
diff --git a/drivers/spi/spi-altera-platform.c b/drivers/spi/spi-altera-platform.c
index f7a7c14e3679..65147aae82a1 100644
--- a/drivers/spi/spi-altera-platform.c
+++ b/drivers/spi/spi-altera-platform.c
@@ -48,7 +48,7 @@ static int altera_spi_probe(struct platform_device *pdev)
return err;
/* setup the master state. */
- master->bus_num = pdev->id;
+ master->bus_num = -1;
if (pdata) {
if (pdata->num_chipselect > ALTERA_SPI_MAX_CS) {
diff --git a/drivers/spi/spi-amd.c b/drivers/spi/spi-amd.c
index 3cf76096a76d..4b3ac7aceaf6 100644
--- a/drivers/spi/spi-amd.c
+++ b/drivers/spi/spi-amd.c
@@ -38,126 +38,102 @@ struct amd_spi {
void __iomem *io_remap_addr;
unsigned long io_base_addr;
u32 rom_addr;
- u8 chip_select;
};
-static inline u8 amd_spi_readreg8(struct spi_master *master, int idx)
+static inline u8 amd_spi_readreg8(struct amd_spi *amd_spi, int idx)
{
- struct amd_spi *amd_spi = spi_master_get_devdata(master);
-
return ioread8((u8 __iomem *)amd_spi->io_remap_addr + idx);
}
-static inline void amd_spi_writereg8(struct spi_master *master, int idx,
- u8 val)
+static inline void amd_spi_writereg8(struct amd_spi *amd_spi, int idx, u8 val)
{
- struct amd_spi *amd_spi = spi_master_get_devdata(master);
-
iowrite8(val, ((u8 __iomem *)amd_spi->io_remap_addr + idx));
}
-static inline void amd_spi_setclear_reg8(struct spi_master *master, int idx,
- u8 set, u8 clear)
+static void amd_spi_setclear_reg8(struct amd_spi *amd_spi, int idx, u8 set, u8 clear)
{
- u8 tmp = amd_spi_readreg8(master, idx);
+ u8 tmp = amd_spi_readreg8(amd_spi, idx);
tmp = (tmp & ~clear) | set;
- amd_spi_writereg8(master, idx, tmp);
+ amd_spi_writereg8(amd_spi, idx, tmp);
}
-static inline u32 amd_spi_readreg32(struct spi_master *master, int idx)
+static inline u32 amd_spi_readreg32(struct amd_spi *amd_spi, int idx)
{
- struct amd_spi *amd_spi = spi_master_get_devdata(master);
-
return ioread32((u8 __iomem *)amd_spi->io_remap_addr + idx);
}
-static inline void amd_spi_writereg32(struct spi_master *master, int idx,
- u32 val)
+static inline void amd_spi_writereg32(struct amd_spi *amd_spi, int idx, u32 val)
{
- struct amd_spi *amd_spi = spi_master_get_devdata(master);
-
iowrite32(val, ((u8 __iomem *)amd_spi->io_remap_addr + idx));
}
-static inline void amd_spi_setclear_reg32(struct spi_master *master, int idx,
- u32 set, u32 clear)
+static inline void amd_spi_setclear_reg32(struct amd_spi *amd_spi, int idx, u32 set, u32 clear)
{
- u32 tmp = amd_spi_readreg32(master, idx);
+ u32 tmp = amd_spi_readreg32(amd_spi, idx);
tmp = (tmp & ~clear) | set;
- amd_spi_writereg32(master, idx, tmp);
+ amd_spi_writereg32(amd_spi, idx, tmp);
}
-static void amd_spi_select_chip(struct spi_master *master)
+static void amd_spi_select_chip(struct amd_spi *amd_spi, u8 cs)
{
- struct amd_spi *amd_spi = spi_master_get_devdata(master);
- u8 chip_select = amd_spi->chip_select;
-
- amd_spi_setclear_reg8(master, AMD_SPI_ALT_CS_REG, chip_select,
- AMD_SPI_ALT_CS_MASK);
+ amd_spi_setclear_reg8(amd_spi, AMD_SPI_ALT_CS_REG, cs, AMD_SPI_ALT_CS_MASK);
}
-static void amd_spi_clear_fifo_ptr(struct spi_master *master)
+static void amd_spi_clear_fifo_ptr(struct amd_spi *amd_spi)
{
- amd_spi_setclear_reg32(master, AMD_SPI_CTRL0_REG, AMD_SPI_FIFO_CLEAR,
- AMD_SPI_FIFO_CLEAR);
+ amd_spi_setclear_reg32(amd_spi, AMD_SPI_CTRL0_REG, AMD_SPI_FIFO_CLEAR, AMD_SPI_FIFO_CLEAR);
}
-static void amd_spi_set_opcode(struct spi_master *master, u8 cmd_opcode)
+static void amd_spi_set_opcode(struct amd_spi *amd_spi, u8 cmd_opcode)
{
- amd_spi_setclear_reg32(master, AMD_SPI_CTRL0_REG, cmd_opcode,
- AMD_SPI_OPCODE_MASK);
+ amd_spi_setclear_reg32(amd_spi, AMD_SPI_CTRL0_REG, cmd_opcode, AMD_SPI_OPCODE_MASK);
}
-static inline void amd_spi_set_rx_count(struct spi_master *master,
- u8 rx_count)
+static inline void amd_spi_set_rx_count(struct amd_spi *amd_spi, u8 rx_count)
{
- amd_spi_setclear_reg8(master, AMD_SPI_RX_COUNT_REG, rx_count, 0xff);
+ amd_spi_setclear_reg8(amd_spi, AMD_SPI_RX_COUNT_REG, rx_count, 0xff);
}
-static inline void amd_spi_set_tx_count(struct spi_master *master,
- u8 tx_count)
+static inline void amd_spi_set_tx_count(struct amd_spi *amd_spi, u8 tx_count)
{
- amd_spi_setclear_reg8(master, AMD_SPI_TX_COUNT_REG, tx_count, 0xff);
+ amd_spi_setclear_reg8(amd_spi, AMD_SPI_TX_COUNT_REG, tx_count, 0xff);
}
-static inline int amd_spi_busy_wait(struct amd_spi *amd_spi)
+static int amd_spi_busy_wait(struct amd_spi *amd_spi)
{
- bool spi_busy;
int timeout = 100000;
/* poll for SPI bus to become idle */
- spi_busy = (ioread32((u8 __iomem *)amd_spi->io_remap_addr +
- AMD_SPI_CTRL0_REG) & AMD_SPI_BUSY) == AMD_SPI_BUSY;
- while (spi_busy) {
+ while (amd_spi_readreg32(amd_spi, AMD_SPI_CTRL0_REG) & AMD_SPI_BUSY) {
usleep_range(10, 20);
if (timeout-- < 0)
return -ETIMEDOUT;
-
- spi_busy = (ioread32((u8 __iomem *)amd_spi->io_remap_addr +
- AMD_SPI_CTRL0_REG) & AMD_SPI_BUSY) == AMD_SPI_BUSY;
}
return 0;
}
-static void amd_spi_execute_opcode(struct spi_master *master)
+static int amd_spi_execute_opcode(struct amd_spi *amd_spi)
{
- struct amd_spi *amd_spi = spi_master_get_devdata(master);
+ int ret;
+
+ ret = amd_spi_busy_wait(amd_spi);
+ if (ret)
+ return ret;
/* Set ExecuteOpCode bit in the CTRL0 register */
- amd_spi_setclear_reg32(master, AMD_SPI_CTRL0_REG, AMD_SPI_EXEC_CMD,
- AMD_SPI_EXEC_CMD);
+ amd_spi_setclear_reg32(amd_spi, AMD_SPI_CTRL0_REG, AMD_SPI_EXEC_CMD, AMD_SPI_EXEC_CMD);
- amd_spi_busy_wait(amd_spi);
+ return 0;
}
static int amd_spi_master_setup(struct spi_device *spi)
{
- struct spi_master *master = spi->master;
+ struct amd_spi *amd_spi = spi_master_get_devdata(spi->master);
- amd_spi_clear_fifo_ptr(master);
+ amd_spi_clear_fifo_ptr(amd_spi);
return 0;
}
@@ -185,19 +161,18 @@ static inline int amd_spi_fifo_xfer(struct amd_spi *amd_spi,
tx_len = xfer->len - 1;
cmd_opcode = *(u8 *)xfer->tx_buf;
buf++;
- amd_spi_set_opcode(master, cmd_opcode);
+ amd_spi_set_opcode(amd_spi, cmd_opcode);
/* Write data into the FIFO. */
for (i = 0; i < tx_len; i++) {
- iowrite8(buf[i],
- ((u8 __iomem *)amd_spi->io_remap_addr +
+ iowrite8(buf[i], ((u8 __iomem *)amd_spi->io_remap_addr +
AMD_SPI_FIFO_BASE + i));
}
- amd_spi_set_tx_count(master, tx_len);
- amd_spi_clear_fifo_ptr(master);
+ amd_spi_set_tx_count(amd_spi, tx_len);
+ amd_spi_clear_fifo_ptr(amd_spi);
/* Execute command */
- amd_spi_execute_opcode(master);
+ amd_spi_execute_opcode(amd_spi);
}
if (m_cmd & AMD_SPI_XFER_RX) {
/*
@@ -206,15 +181,14 @@ static inline int amd_spi_fifo_xfer(struct amd_spi *amd_spi,
*/
rx_len = xfer->len;
buf = (u8 *)xfer->rx_buf;
- amd_spi_set_rx_count(master, rx_len);
- amd_spi_clear_fifo_ptr(master);
+ amd_spi_set_rx_count(amd_spi, rx_len);
+ amd_spi_clear_fifo_ptr(amd_spi);
/* Execute command */
- amd_spi_execute_opcode(master);
+ amd_spi_execute_opcode(amd_spi);
+ amd_spi_busy_wait(amd_spi);
/* Read data from FIFO to receive buffer */
for (i = 0; i < rx_len; i++)
- buf[i] = amd_spi_readreg8(master,
- AMD_SPI_FIFO_BASE +
- tx_len + i);
+ buf[i] = amd_spi_readreg8(amd_spi, AMD_SPI_FIFO_BASE + tx_len + i);
}
}
@@ -233,8 +207,7 @@ static int amd_spi_master_transfer(struct spi_master *master,
struct amd_spi *amd_spi = spi_master_get_devdata(master);
struct spi_device *spi = msg->spi;
- amd_spi->chip_select = spi->chip_select;
- amd_spi_select_chip(master);
+ amd_spi_select_chip(amd_spi, spi->chip_select);
/*
* Extract spi_transfers from the spi message and
diff --git a/drivers/spi/spi-at91-usart.c b/drivers/spi/spi-at91-usart.c
index 8c8352625d23..9cd738682aab 100644
--- a/drivers/spi/spi-at91-usart.c
+++ b/drivers/spi/spi-at91-usart.c
@@ -14,7 +14,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_platform.h>
-#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
@@ -482,29 +482,12 @@ static void at91_usart_spi_init(struct at91_usart_spi *aus)
static int at91_usart_gpio_setup(struct platform_device *pdev)
{
- struct device_node *np = pdev->dev.parent->of_node;
- int i;
- int ret;
- int nb;
-
- if (!np)
- return -EINVAL;
-
- nb = of_gpio_named_count(np, "cs-gpios");
- for (i = 0; i < nb; i++) {
- int cs_gpio = of_get_named_gpio(np, "cs-gpios", i);
+ struct gpio_descs *cs_gpios;
- if (cs_gpio < 0)
- return cs_gpio;
+ cs_gpios = devm_gpiod_get_array_optional(&pdev->dev, "cs", GPIOD_OUT_LOW);
- if (gpio_is_valid(cs_gpio)) {
- ret = devm_gpio_request_one(&pdev->dev, cs_gpio,
- GPIOF_DIR_OUT,
- dev_name(&pdev->dev));
- if (ret)
- return ret;
- }
- }
+ if (IS_ERR(cs_gpios))
+ return PTR_ERR(cs_gpios);
return 0;
}
diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c
index 3043677ba222..f3de3305d0f5 100644
--- a/drivers/spi/spi-bcm-qspi.c
+++ b/drivers/spi/spi-bcm-qspi.c
@@ -83,6 +83,9 @@
/* MSPI register offsets */
#define MSPI_SPCR0_LSB 0x000
#define MSPI_SPCR0_MSB 0x004
+#define MSPI_SPCR0_MSB_CPHA BIT(0)
+#define MSPI_SPCR0_MSB_CPOL BIT(1)
+#define MSPI_SPCR0_MSB_BITS_SHIFT 0x2
#define MSPI_SPCR1_LSB 0x008
#define MSPI_SPCR1_MSB 0x00c
#define MSPI_NEWQP 0x010
@@ -100,8 +103,10 @@
#define MSPI_MASTER_BIT BIT(7)
#define MSPI_NUM_CDRAM 16
+#define MSPI_CDRAM_OUTP BIT(8)
#define MSPI_CDRAM_CONT_BIT BIT(7)
#define MSPI_CDRAM_BITSE_BIT BIT(6)
+#define MSPI_CDRAM_DT_BIT BIT(5)
#define MSPI_CDRAM_PCS 0xf
#define MSPI_SPCR2_SPE BIT(6)
@@ -114,6 +119,14 @@
~(BIT(10) | BIT(11)))
#define MSPI_SPCR3_SYSCLKSEL_108 (MSPI_SPCR3_SYSCLKSEL_MASK & \
BIT(11))
+#define MSPI_SPCR3_TXRXDAM_MASK GENMASK(4, 2)
+#define MSPI_SPCR3_DAM_8BYTE 0
+#define MSPI_SPCR3_DAM_16BYTE (BIT(2) | BIT(4))
+#define MSPI_SPCR3_DAM_32BYTE (BIT(3) | BIT(5))
+#define MSPI_SPCR3_HALFDUPLEX BIT(6)
+#define MSPI_SPCR3_HDOUTTYPE BIT(7)
+#define MSPI_SPCR3_DATA_REG_SZ BIT(8)
+#define MSPI_SPCR3_CPHARX BIT(9)
#define MSPI_MSPI_STATUS_SPIF BIT(0)
@@ -153,6 +166,14 @@
#define TRANS_STATUS_BREAK_DESELECT (TRANS_STATUS_BREAK_EOM | \
TRANS_STATUS_BREAK_CS_CHANGE)
+/*
+ * Used for writing and reading data in the right order
+ * to TXRAM and RXRAM when used as 32-bit registers respectively
+ */
+#define swap4bytes(__val) \
+ ((((__val) >> 24) & 0x000000FF) | (((__val) >> 8) & 0x0000FF00) | \
+ (((__val) << 8) & 0x00FF0000) | (((__val) << 24) & 0xFF000000))
+
struct bcm_qspi_parms {
u32 speed_hz;
u8 mode;
@@ -261,7 +282,7 @@ static inline bool bcm_qspi_has_sysclk_108(struct bcm_qspi *qspi)
static inline int bcm_qspi_spbr_min(struct bcm_qspi *qspi)
{
if (bcm_qspi_has_fastbr(qspi))
- return 1;
+ return (bcm_qspi_has_sysclk_108(qspi) ? 4 : 1);
else
return 8;
}
@@ -395,7 +416,8 @@ static int bcm_qspi_bspi_set_flex_mode(struct bcm_qspi *qspi,
if (addrlen == BSPI_ADDRLEN_4BYTES)
bpp = BSPI_BPP_ADDR_SELECT_MASK;
- bpp |= (op->dummy.nbytes * 8) / op->dummy.buswidth;
+ if (op->dummy.nbytes)
+ bpp |= (op->dummy.nbytes * 8) / op->dummy.buswidth;
switch (width) {
case SPI_NBITS_SINGLE:
@@ -570,23 +592,23 @@ static void bcm_qspi_hw_set_parms(struct bcm_qspi *qspi,
{
u32 spcr, spbr = 0;
- if (xp->speed_hz)
- spbr = qspi->base_clk / (2 * xp->speed_hz);
-
- spcr = clamp_val(spbr, bcm_qspi_spbr_min(qspi), QSPI_SPBR_MAX);
- bcm_qspi_write(qspi, MSPI, MSPI_SPCR0_LSB, spcr);
-
if (!qspi->mspi_maj_rev)
/* legacy controller */
spcr = MSPI_MASTER_BIT;
else
spcr = 0;
- /* for 16 bit the data should be zero */
- if (xp->bits_per_word != 16)
- spcr |= xp->bits_per_word << 2;
- spcr |= xp->mode & 3;
+ /*
+ * Bits per transfer. BITS determines the number of data bits
+ * transferred if the command control bit (BITSE of a
+ * CDRAM Register) is equal to 1.
+ * If CDRAM BITSE is equal to 0, 8 data bits are transferred
+ * regardless
+ */
+ if (xp->bits_per_word != 16 && xp->bits_per_word != 64)
+ spcr |= xp->bits_per_word << MSPI_SPCR0_MSB_BITS_SHIFT;
+ spcr |= xp->mode & (MSPI_SPCR0_MSB_CPHA | MSPI_SPCR0_MSB_CPOL);
bcm_qspi_write(qspi, MSPI, MSPI_SPCR0_MSB, spcr);
if (bcm_qspi_has_fastbr(qspi)) {
@@ -595,17 +617,44 @@ static void bcm_qspi_hw_set_parms(struct bcm_qspi *qspi,
/* enable fastbr */
spcr |= MSPI_SPCR3_FASTBR;
+ if (xp->mode & SPI_3WIRE)
+ spcr |= MSPI_SPCR3_HALFDUPLEX | MSPI_SPCR3_HDOUTTYPE;
+
if (bcm_qspi_has_sysclk_108(qspi)) {
/* SYSCLK_108 */
spcr |= MSPI_SPCR3_SYSCLKSEL_108;
qspi->base_clk = MSPI_BASE_FREQ * 4;
- /* Change spbr as we changed sysclk */
- bcm_qspi_write(qspi, MSPI, MSPI_SPCR0_LSB, 4);
}
+ if (xp->bits_per_word > 16) {
+ /* data_reg_size 1 (64bit) */
+ spcr |= MSPI_SPCR3_DATA_REG_SZ;
+ /* TxRx RAM data access mode 2 for 32B and set fastdt */
+ spcr |= MSPI_SPCR3_DAM_32BYTE | MSPI_SPCR3_FASTDT;
+ /*
+ * Set length of delay after transfer
+ * DTL from 0(256) to 1
+ */
+ bcm_qspi_write(qspi, MSPI, MSPI_SPCR1_LSB, 1);
+ } else {
+ /* data_reg_size[8] = 0 */
+ spcr &= ~(MSPI_SPCR3_DATA_REG_SZ);
+
+ /*
+ * TxRx RAM access mode 8B
+ * and disable fastdt
+ */
+ spcr &= ~(MSPI_SPCR3_DAM_32BYTE);
+ }
bcm_qspi_write(qspi, MSPI, MSPI_SPCR3, spcr);
}
+ if (xp->speed_hz)
+ spbr = qspi->base_clk / (2 * xp->speed_hz);
+
+ spbr = clamp_val(spbr, bcm_qspi_spbr_min(qspi), QSPI_SPBR_MAX);
+ bcm_qspi_write(qspi, MSPI, MSPI_SPCR0_LSB, spbr);
+
qspi->last_parms = *xp;
}
@@ -626,7 +675,7 @@ static int bcm_qspi_setup(struct spi_device *spi)
{
struct bcm_qspi_parms *xp;
- if (spi->bits_per_word > 16)
+ if (spi->bits_per_word > 64)
return -EINVAL;
xp = spi_get_ctldata(spi);
@@ -665,8 +714,12 @@ static int update_qspi_trans_byte_count(struct bcm_qspi *qspi,
/* count the last transferred bytes */
if (qt->trans->bits_per_word <= 8)
qt->byte++;
- else
+ else if (qt->trans->bits_per_word <= 16)
qt->byte += 2;
+ else if (qt->trans->bits_per_word <= 32)
+ qt->byte += 4;
+ else if (qt->trans->bits_per_word <= 64)
+ qt->byte += 8;
if (qt->byte >= qt->trans->len) {
/* we're at the end of the spi_transfer */
@@ -709,6 +762,33 @@ static inline u16 read_rxram_slot_u16(struct bcm_qspi *qspi, int slot)
((bcm_qspi_read(qspi, MSPI, msb_offset) & 0xff) << 8);
}
+static inline u32 read_rxram_slot_u32(struct bcm_qspi *qspi, int slot)
+{
+ u32 reg_offset = MSPI_RXRAM;
+ u32 offset = reg_offset + (slot << 3);
+ u32 val;
+
+ val = bcm_qspi_read(qspi, MSPI, offset);
+ val = swap4bytes(val);
+
+ return val;
+}
+
+static inline u64 read_rxram_slot_u64(struct bcm_qspi *qspi, int slot)
+{
+ u32 reg_offset = MSPI_RXRAM;
+ u32 lsb_offset = reg_offset + (slot << 3) + 0x4;
+ u32 msb_offset = reg_offset + (slot << 3);
+ u32 msb, lsb;
+
+ msb = bcm_qspi_read(qspi, MSPI, msb_offset);
+ msb = swap4bytes(msb);
+ lsb = bcm_qspi_read(qspi, MSPI, lsb_offset);
+ lsb = swap4bytes(lsb);
+
+ return ((u64)msb << 32 | lsb);
+}
+
static void read_from_hw(struct bcm_qspi *qspi, int slots)
{
struct qspi_trans tp;
@@ -732,7 +812,7 @@ static void read_from_hw(struct bcm_qspi *qspi, int slots)
buf[tp.byte] = read_rxram_slot_u8(qspi, slot);
dev_dbg(&qspi->pdev->dev, "RD %02x\n",
buf ? buf[tp.byte] : 0x0);
- } else {
+ } else if (tp.trans->bits_per_word <= 16) {
u16 *buf = tp.trans->rx_buf;
if (buf)
@@ -740,6 +820,25 @@ static void read_from_hw(struct bcm_qspi *qspi, int slots)
slot);
dev_dbg(&qspi->pdev->dev, "RD %04x\n",
buf ? buf[tp.byte / 2] : 0x0);
+ } else if (tp.trans->bits_per_word <= 32) {
+ u32 *buf = tp.trans->rx_buf;
+
+ if (buf)
+ buf[tp.byte / 4] = read_rxram_slot_u32(qspi,
+ slot);
+ dev_dbg(&qspi->pdev->dev, "RD %08x\n",
+ buf ? buf[tp.byte / 4] : 0x0);
+
+ } else if (tp.trans->bits_per_word <= 64) {
+ u64 *buf = tp.trans->rx_buf;
+
+ if (buf)
+ buf[tp.byte / 8] = read_rxram_slot_u64(qspi,
+ slot);
+ dev_dbg(&qspi->pdev->dev, "RD %llx\n",
+ buf ? buf[tp.byte / 8] : 0x0);
+
+
}
update_qspi_trans_byte_count(qspi, &tp,
@@ -769,6 +868,28 @@ static inline void write_txram_slot_u16(struct bcm_qspi *qspi, int slot,
bcm_qspi_write(qspi, MSPI, lsb_offset, (val & 0xff));
}
+static inline void write_txram_slot_u32(struct bcm_qspi *qspi, int slot,
+ u32 val)
+{
+ u32 reg_offset = MSPI_TXRAM;
+ u32 msb_offset = reg_offset + (slot << 3);
+
+ bcm_qspi_write(qspi, MSPI, msb_offset, swap4bytes(val));
+}
+
+static inline void write_txram_slot_u64(struct bcm_qspi *qspi, int slot,
+ u64 val)
+{
+ u32 reg_offset = MSPI_TXRAM;
+ u32 msb_offset = reg_offset + (slot << 3);
+ u32 lsb_offset = reg_offset + (slot << 3) + 0x4;
+ u32 msb = upper_32_bits(val);
+ u32 lsb = lower_32_bits(val);
+
+ bcm_qspi_write(qspi, MSPI, msb_offset, swap4bytes(msb));
+ bcm_qspi_write(qspi, MSPI, lsb_offset, swap4bytes(lsb));
+}
+
static inline u32 read_cdram_slot(struct bcm_qspi *qspi, int slot)
{
return bcm_qspi_read(qspi, MSPI, MSPI_CDRAM + (slot << 2));
@@ -792,20 +913,43 @@ static int write_to_hw(struct bcm_qspi *qspi, struct spi_device *spi)
/* Run until end of transfer or reached the max data */
while (!tstatus && slot < MSPI_NUM_CDRAM) {
+ mspi_cdram = MSPI_CDRAM_CONT_BIT;
if (tp.trans->bits_per_word <= 8) {
const u8 *buf = tp.trans->tx_buf;
u8 val = buf ? buf[tp.byte] : 0x00;
write_txram_slot_u8(qspi, slot, val);
dev_dbg(&qspi->pdev->dev, "WR %02x\n", val);
- } else {
+ } else if (tp.trans->bits_per_word <= 16) {
const u16 *buf = tp.trans->tx_buf;
u16 val = buf ? buf[tp.byte / 2] : 0x0000;
write_txram_slot_u16(qspi, slot, val);
dev_dbg(&qspi->pdev->dev, "WR %04x\n", val);
+ } else if (tp.trans->bits_per_word <= 32) {
+ const u32 *buf = tp.trans->tx_buf;
+ u32 val = buf ? buf[tp.byte/4] : 0x0;
+
+ write_txram_slot_u32(qspi, slot, val);
+ dev_dbg(&qspi->pdev->dev, "WR %08x\n", val);
+ } else if (tp.trans->bits_per_word <= 64) {
+ const u64 *buf = tp.trans->tx_buf;
+ u64 val = (buf ? buf[tp.byte/8] : 0x0);
+
+ /* use the length of delay from SPCR1_LSB */
+ if (bcm_qspi_has_fastbr(qspi))
+ mspi_cdram |= MSPI_CDRAM_DT_BIT;
+
+ write_txram_slot_u64(qspi, slot, val);
+ dev_dbg(&qspi->pdev->dev, "WR %llx\n", val);
}
- mspi_cdram = MSPI_CDRAM_CONT_BIT;
+
+ mspi_cdram |= ((tp.trans->bits_per_word <= 8) ? 0 :
+ MSPI_CDRAM_BITSE_BIT);
+
+ /* set 3wrire halfduplex mode data from master to slave */
+ if ((spi->mode & SPI_3WIRE) && tp.trans->tx_buf)
+ mspi_cdram |= MSPI_CDRAM_OUTP;
if (has_bspi(qspi))
mspi_cdram &= ~1;
@@ -813,9 +957,6 @@ static int write_to_hw(struct bcm_qspi *qspi, struct spi_device *spi)
mspi_cdram |= (~(1 << spi->chip_select) &
MSPI_CDRAM_PCS);
- mspi_cdram |= ((tp.trans->bits_per_word <= 8) ? 0 :
- MSPI_CDRAM_BITSE_BIT);
-
write_cdram_slot(qspi, slot, mspi_cdram);
tstatus = update_qspi_trans_byte_count(qspi, &tp,
@@ -1350,7 +1491,8 @@ int bcm_qspi_probe(struct platform_device *pdev,
qspi->master = master;
master->bus_num = -1;
- master->mode_bits = SPI_CPHA | SPI_CPOL | SPI_RX_DUAL | SPI_RX_QUAD;
+ master->mode_bits = SPI_CPHA | SPI_CPOL | SPI_RX_DUAL | SPI_RX_QUAD |
+ SPI_3WIRE;
master->setup = bcm_qspi_setup;
master->transfer_one = bcm_qspi_transfer_one;
master->mem_ops = &bcm_qspi_mem_ops;
@@ -1460,7 +1602,7 @@ int bcm_qspi_probe(struct platform_device *pdev,
&qspi->dev_ids[val]);
if (ret < 0) {
dev_err(&pdev->dev, "IRQ %s not found\n", name);
- goto qspi_probe_err;
+ goto qspi_unprepare_err;
}
qspi->dev_ids[val].dev = qspi;
@@ -1475,7 +1617,7 @@ int bcm_qspi_probe(struct platform_device *pdev,
if (!num_ints) {
dev_err(&pdev->dev, "no IRQs registered, cannot init driver\n");
ret = -EINVAL;
- goto qspi_probe_err;
+ goto qspi_unprepare_err;
}
bcm_qspi_hw_init(qspi);
@@ -1499,6 +1641,7 @@ int bcm_qspi_probe(struct platform_device *pdev,
qspi_reg_err:
bcm_qspi_hw_uninit(qspi);
+qspi_unprepare_err:
clk_disable_unprepare(qspi->clk);
qspi_probe_err:
kfree(qspi->dev_ids);
diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
index 101cc71bffa7..8b3d268ac63c 100644
--- a/drivers/spi/spi-cadence-quadspi.c
+++ b/drivers/spi/spi-cadence-quadspi.c
@@ -13,6 +13,7 @@
#include <linux/dmaengine.h>
#include <linux/err.h>
#include <linux/errno.h>
+#include <linux/firmware/xlnx-zynqmp.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
@@ -35,6 +36,7 @@
/* Quirks */
#define CQSPI_NEEDS_WR_DELAY BIT(0)
#define CQSPI_DISABLE_DAC_MODE BIT(1)
+#define CQSPI_SUPPORT_EXTERNAL_DMA BIT(2)
/* Capabilities */
#define CQSPI_SUPPORTS_OCTAL BIT(0)
@@ -82,11 +84,16 @@ struct cqspi_st {
u32 wr_delay;
bool use_direct_mode;
struct cqspi_flash_pdata f_pdata[CQSPI_MAX_CHIPSELECT];
+ bool use_dma_read;
+ u32 pd_dev_id;
};
struct cqspi_driver_platdata {
u32 hwcaps_mask;
u8 quirks;
+ int (*indirect_read_dma)(struct cqspi_flash_pdata *f_pdata,
+ u_char *rxbuf, loff_t from_addr, size_t n_rx);
+ u32 (*get_dma_status)(struct cqspi_st *cqspi);
};
/* Operation timeout value */
@@ -217,6 +224,8 @@ struct cqspi_driver_platdata {
#define CQSPI_REG_INDIRECTWRSTARTADDR 0x78
#define CQSPI_REG_INDIRECTWRBYTES 0x7C
+#define CQSPI_REG_INDTRIG_ADDRRANGE 0x80
+
#define CQSPI_REG_CMDADDRESS 0x94
#define CQSPI_REG_CMDREADDATALOWER 0xA0
#define CQSPI_REG_CMDREADDATAUPPER 0xA4
@@ -231,6 +240,23 @@ struct cqspi_driver_platdata {
#define CQSPI_REG_OP_EXT_WRITE_LSB 16
#define CQSPI_REG_OP_EXT_STIG_LSB 0
+#define CQSPI_REG_VERSAL_DMA_SRC_ADDR 0x1000
+
+#define CQSPI_REG_VERSAL_DMA_DST_ADDR 0x1800
+#define CQSPI_REG_VERSAL_DMA_DST_SIZE 0x1804
+
+#define CQSPI_REG_VERSAL_DMA_DST_CTRL 0x180C
+
+#define CQSPI_REG_VERSAL_DMA_DST_I_STS 0x1814
+#define CQSPI_REG_VERSAL_DMA_DST_I_EN 0x1818
+#define CQSPI_REG_VERSAL_DMA_DST_I_DIS 0x181C
+#define CQSPI_REG_VERSAL_DMA_DST_DONE_MASK BIT(1)
+
+#define CQSPI_REG_VERSAL_DMA_DST_ADDR_MSB 0x1828
+
+#define CQSPI_REG_VERSAL_DMA_DST_CTRL_VAL 0xF43FFA00
+#define CQSPI_REG_VERSAL_ADDRRANGE_WIDTH_VAL 0x6
+
/* Interrupt status bits */
#define CQSPI_REG_IRQ_MODE_ERR BIT(0)
#define CQSPI_REG_IRQ_UNDERFLOW BIT(1)
@@ -250,6 +276,9 @@ struct cqspi_driver_platdata {
CQSPI_REG_IRQ_UNDERFLOW)
#define CQSPI_IRQ_STATUS_MASK 0x1FFFF
+#define CQSPI_DMA_UNALIGN 0x3
+
+#define CQSPI_REG_VERSAL_DMA_VAL 0x602
static int cqspi_wait_for_bit(void __iomem *reg, const u32 mask, bool clr)
{
@@ -275,10 +304,26 @@ static u32 cqspi_get_rd_sram_level(struct cqspi_st *cqspi)
return reg & CQSPI_REG_SDRAMLEVEL_RD_MASK;
}
+static u32 cqspi_get_versal_dma_status(struct cqspi_st *cqspi)
+{
+ u32 dma_status;
+
+ dma_status = readl(cqspi->iobase +
+ CQSPI_REG_VERSAL_DMA_DST_I_STS);
+ writel(dma_status, cqspi->iobase +
+ CQSPI_REG_VERSAL_DMA_DST_I_STS);
+
+ return dma_status & CQSPI_REG_VERSAL_DMA_DST_DONE_MASK;
+}
+
static irqreturn_t cqspi_irq_handler(int this_irq, void *dev)
{
struct cqspi_st *cqspi = dev;
unsigned int irq_status;
+ struct device *device = &cqspi->pdev->dev;
+ const struct cqspi_driver_platdata *ddata;
+
+ ddata = of_device_get_match_data(device);
/* Read interrupt status */
irq_status = readl(cqspi->iobase + CQSPI_REG_IRQSTATUS);
@@ -286,6 +331,13 @@ static irqreturn_t cqspi_irq_handler(int this_irq, void *dev)
/* Clear interrupt */
writel(irq_status, cqspi->iobase + CQSPI_REG_IRQSTATUS);
+ if (cqspi->use_dma_read && ddata && ddata->get_dma_status) {
+ if (ddata->get_dma_status(cqspi)) {
+ complete(&cqspi->transfer_complete);
+ return IRQ_HANDLED;
+ }
+ }
+
irq_status &= CQSPI_IRQ_MASK_RD | CQSPI_IRQ_MASK_WR;
if (irq_status)
@@ -781,6 +833,131 @@ failrd:
return ret;
}
+static int cqspi_versal_indirect_read_dma(struct cqspi_flash_pdata *f_pdata,
+ u_char *rxbuf, loff_t from_addr,
+ size_t n_rx)
+{
+ struct cqspi_st *cqspi = f_pdata->cqspi;
+ struct device *dev = &cqspi->pdev->dev;
+ void __iomem *reg_base = cqspi->iobase;
+ u32 reg, bytes_to_dma;
+ loff_t addr = from_addr;
+ void *buf = rxbuf;
+ dma_addr_t dma_addr;
+ u8 bytes_rem;
+ int ret = 0;
+
+ bytes_rem = n_rx % 4;
+ bytes_to_dma = (n_rx - bytes_rem);
+
+ if (!bytes_to_dma)
+ goto nondmard;
+
+ ret = zynqmp_pm_ospi_mux_select(cqspi->pd_dev_id, PM_OSPI_MUX_SEL_DMA);
+ if (ret)
+ return ret;
+
+ reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
+ reg |= CQSPI_REG_CONFIG_DMA_MASK;
+ writel(reg, cqspi->iobase + CQSPI_REG_CONFIG);
+
+ dma_addr = dma_map_single(dev, rxbuf, bytes_to_dma, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, dma_addr)) {
+ dev_err(dev, "dma mapping failed\n");
+ return -ENOMEM;
+ }
+
+ writel(from_addr, reg_base + CQSPI_REG_INDIRECTRDSTARTADDR);
+ writel(bytes_to_dma, reg_base + CQSPI_REG_INDIRECTRDBYTES);
+ writel(CQSPI_REG_VERSAL_ADDRRANGE_WIDTH_VAL,
+ reg_base + CQSPI_REG_INDTRIG_ADDRRANGE);
+
+ /* Clear all interrupts. */
+ writel(CQSPI_IRQ_STATUS_MASK, reg_base + CQSPI_REG_IRQSTATUS);
+
+ /* Enable DMA done interrupt */
+ writel(CQSPI_REG_VERSAL_DMA_DST_DONE_MASK,
+ reg_base + CQSPI_REG_VERSAL_DMA_DST_I_EN);
+
+ /* Default DMA periph configuration */
+ writel(CQSPI_REG_VERSAL_DMA_VAL, reg_base + CQSPI_REG_DMA);
+
+ /* Configure DMA Dst address */
+ writel(lower_32_bits(dma_addr),
+ reg_base + CQSPI_REG_VERSAL_DMA_DST_ADDR);
+ writel(upper_32_bits(dma_addr),
+ reg_base + CQSPI_REG_VERSAL_DMA_DST_ADDR_MSB);
+
+ /* Configure DMA Src address */
+ writel(cqspi->trigger_address, reg_base +
+ CQSPI_REG_VERSAL_DMA_SRC_ADDR);
+
+ /* Set DMA destination size */
+ writel(bytes_to_dma, reg_base + CQSPI_REG_VERSAL_DMA_DST_SIZE);
+
+ /* Set DMA destination control */
+ writel(CQSPI_REG_VERSAL_DMA_DST_CTRL_VAL,
+ reg_base + CQSPI_REG_VERSAL_DMA_DST_CTRL);
+
+ writel(CQSPI_REG_INDIRECTRD_START_MASK,
+ reg_base + CQSPI_REG_INDIRECTRD);
+
+ reinit_completion(&cqspi->transfer_complete);
+
+ if (!wait_for_completion_timeout(&cqspi->transfer_complete,
+ msecs_to_jiffies(CQSPI_READ_TIMEOUT_MS))) {
+ ret = -ETIMEDOUT;
+ goto failrd;
+ }
+
+ /* Disable DMA interrupt */
+ writel(0x0, cqspi->iobase + CQSPI_REG_VERSAL_DMA_DST_I_DIS);
+
+ /* Clear indirect completion status */
+ writel(CQSPI_REG_INDIRECTRD_DONE_MASK,
+ cqspi->iobase + CQSPI_REG_INDIRECTRD);
+ dma_unmap_single(dev, dma_addr, bytes_to_dma, DMA_FROM_DEVICE);
+
+ reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
+ reg &= ~CQSPI_REG_CONFIG_DMA_MASK;
+ writel(reg, cqspi->iobase + CQSPI_REG_CONFIG);
+
+ ret = zynqmp_pm_ospi_mux_select(cqspi->pd_dev_id,
+ PM_OSPI_MUX_SEL_LINEAR);
+ if (ret)
+ return ret;
+
+nondmard:
+ if (bytes_rem) {
+ addr += bytes_to_dma;
+ buf += bytes_to_dma;
+ ret = cqspi_indirect_read_execute(f_pdata, buf, addr,
+ bytes_rem);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+
+failrd:
+ /* Disable DMA interrupt */
+ writel(0x0, reg_base + CQSPI_REG_VERSAL_DMA_DST_I_DIS);
+
+ /* Cancel the indirect read */
+ writel(CQSPI_REG_INDIRECTWR_CANCEL_MASK,
+ reg_base + CQSPI_REG_INDIRECTRD);
+
+ dma_unmap_single(dev, dma_addr, bytes_to_dma, DMA_FROM_DEVICE);
+
+ reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
+ reg &= ~CQSPI_REG_CONFIG_DMA_MASK;
+ writel(reg, cqspi->iobase + CQSPI_REG_CONFIG);
+
+ zynqmp_pm_ospi_mux_select(cqspi->pd_dev_id, PM_OSPI_MUX_SEL_LINEAR);
+
+ return ret;
+}
+
static int cqspi_write_setup(struct cqspi_flash_pdata *f_pdata,
const struct spi_mem_op *op)
{
@@ -1180,11 +1357,15 @@ static ssize_t cqspi_read(struct cqspi_flash_pdata *f_pdata,
const struct spi_mem_op *op)
{
struct cqspi_st *cqspi = f_pdata->cqspi;
+ struct device *dev = &cqspi->pdev->dev;
+ const struct cqspi_driver_platdata *ddata;
loff_t from = op->addr.val;
size_t len = op->data.nbytes;
u_char *buf = op->data.buf.in;
+ u64 dma_align = (u64)(uintptr_t)buf;
int ret;
+ ddata = of_device_get_match_data(dev);
ret = cqspi_set_protocol(f_pdata, op);
if (ret)
return ret;
@@ -1196,6 +1377,10 @@ static ssize_t cqspi_read(struct cqspi_flash_pdata *f_pdata,
if (cqspi->use_direct_mode && ((from + len) <= cqspi->ahb_size))
return cqspi_direct_read_execute(f_pdata, buf, from, len);
+ if (cqspi->use_dma_read && ddata && ddata->indirect_read_dma &&
+ virt_addr_valid(buf) && ((dma_align & CQSPI_DMA_UNALIGN) == 0))
+ return ddata->indirect_read_dma(f_pdata, buf, from, len);
+
return cqspi_indirect_read_execute(f_pdata, buf, from, len);
}
@@ -1299,6 +1484,7 @@ static int cqspi_of_get_pdata(struct cqspi_st *cqspi)
{
struct device *dev = &cqspi->pdev->dev;
struct device_node *np = dev->of_node;
+ u32 id[2];
cqspi->is_decoded_cs = of_property_read_bool(np, "cdns,is-decoded-cs");
@@ -1323,6 +1509,10 @@ static int cqspi_of_get_pdata(struct cqspi_st *cqspi)
cqspi->rclk_en = of_property_read_bool(np, "cdns,rclk-en");
+ if (!of_property_read_u32_array(np, "power-domains", id,
+ ARRAY_SIZE(id)))
+ cqspi->pd_dev_id = id[1];
+
return 0;
}
@@ -1359,6 +1549,13 @@ static void cqspi_controller_init(struct cqspi_st *cqspi)
writel(reg, cqspi->iobase + CQSPI_REG_CONFIG);
}
+ /* Enable DMA interface */
+ if (cqspi->use_dma_read) {
+ reg = readl(cqspi->iobase + CQSPI_REG_CONFIG);
+ reg |= CQSPI_REG_CONFIG_DMA_MASK;
+ writel(reg, cqspi->iobase + CQSPI_REG_CONFIG);
+ }
+
cqspi_controller_enable(cqspi, 1);
}
@@ -1548,6 +1745,12 @@ static int cqspi_probe(struct platform_device *pdev)
master->mode_bits |= SPI_RX_OCTAL | SPI_TX_OCTAL;
if (!(ddata->quirks & CQSPI_DISABLE_DAC_MODE))
cqspi->use_direct_mode = true;
+ if (ddata->quirks & CQSPI_SUPPORT_EXTERNAL_DMA)
+ cqspi->use_dma_read = true;
+
+ if (of_device_is_compatible(pdev->dev.of_node,
+ "xlnx,versal-ospi-1.0"))
+ dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
}
ret = devm_request_irq(dev, irq, cqspi_irq_handler, 0,
@@ -1656,6 +1859,13 @@ static const struct cqspi_driver_platdata intel_lgm_qspi = {
.quirks = CQSPI_DISABLE_DAC_MODE,
};
+static const struct cqspi_driver_platdata versal_ospi = {
+ .hwcaps_mask = CQSPI_SUPPORTS_OCTAL,
+ .quirks = CQSPI_DISABLE_DAC_MODE | CQSPI_SUPPORT_EXTERNAL_DMA,
+ .indirect_read_dma = cqspi_versal_indirect_read_dma,
+ .get_dma_status = cqspi_get_versal_dma_status,
+};
+
static const struct of_device_id cqspi_dt_ids[] = {
{
.compatible = "cdns,qspi-nor",
@@ -1673,6 +1883,10 @@ static const struct of_device_id cqspi_dt_ids[] = {
.compatible = "intel,lgm-qspi",
.data = &intel_lgm_qspi,
},
+ {
+ .compatible = "xlnx,versal-ospi-1.0",
+ .data = (void *)&versal_ospi,
+ },
{ /* end of table */ }
};
diff --git a/drivers/spi/spi-cadence-xspi.c b/drivers/spi/spi-cadence-xspi.c
new file mode 100644
index 000000000000..4bc1b93fc276
--- /dev/null
+++ b/drivers/spi/spi-cadence-xspi.c
@@ -0,0 +1,642 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Cadence XSPI flash controller driver
+// Copyright (C) 2020-21 Cadence
+
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi-mem.h>
+#include <linux/bitfield.h>
+#include <linux/limits.h>
+#include <linux/log2.h>
+
+#define CDNS_XSPI_MAGIC_NUM_VALUE 0x6522
+#define CDNS_XSPI_MAX_BANKS 8
+#define CDNS_XSPI_NAME "cadence-xspi"
+
+/*
+ * Note: below are additional auxiliary registers to
+ * configure XSPI controller pin-strap settings
+ */
+
+/* PHY DQ timing register */
+#define CDNS_XSPI_CCP_PHY_DQ_TIMING 0x0000
+
+/* PHY DQS timing register */
+#define CDNS_XSPI_CCP_PHY_DQS_TIMING 0x0004
+
+/* PHY gate loopback control register */
+#define CDNS_XSPI_CCP_PHY_GATE_LPBCK_CTRL 0x0008
+
+/* PHY DLL slave control register */
+#define CDNS_XSPI_CCP_PHY_DLL_SLAVE_CTRL 0x0010
+
+/* DLL PHY control register */
+#define CDNS_XSPI_DLL_PHY_CTRL 0x1034
+
+/* Command registers */
+#define CDNS_XSPI_CMD_REG_0 0x0000
+#define CDNS_XSPI_CMD_REG_1 0x0004
+#define CDNS_XSPI_CMD_REG_2 0x0008
+#define CDNS_XSPI_CMD_REG_3 0x000C
+#define CDNS_XSPI_CMD_REG_4 0x0010
+#define CDNS_XSPI_CMD_REG_5 0x0014
+
+/* Command status registers */
+#define CDNS_XSPI_CMD_STATUS_REG 0x0044
+
+/* Controller status register */
+#define CDNS_XSPI_CTRL_STATUS_REG 0x0100
+#define CDNS_XSPI_INIT_COMPLETED BIT(16)
+#define CDNS_XSPI_INIT_LEGACY BIT(9)
+#define CDNS_XSPI_INIT_FAIL BIT(8)
+#define CDNS_XSPI_CTRL_BUSY BIT(7)
+
+/* Controller interrupt status register */
+#define CDNS_XSPI_INTR_STATUS_REG 0x0110
+#define CDNS_XSPI_STIG_DONE BIT(23)
+#define CDNS_XSPI_SDMA_ERROR BIT(22)
+#define CDNS_XSPI_SDMA_TRIGGER BIT(21)
+#define CDNS_XSPI_CMD_IGNRD_EN BIT(20)
+#define CDNS_XSPI_DDMA_TERR_EN BIT(18)
+#define CDNS_XSPI_CDMA_TREE_EN BIT(17)
+#define CDNS_XSPI_CTRL_IDLE_EN BIT(16)
+
+#define CDNS_XSPI_TRD_COMP_INTR_STATUS 0x0120
+#define CDNS_XSPI_TRD_ERR_INTR_STATUS 0x0130
+#define CDNS_XSPI_TRD_ERR_INTR_EN 0x0134
+
+/* Controller interrupt enable register */
+#define CDNS_XSPI_INTR_ENABLE_REG 0x0114
+#define CDNS_XSPI_INTR_EN BIT(31)
+#define CDNS_XSPI_STIG_DONE_EN BIT(23)
+#define CDNS_XSPI_SDMA_ERROR_EN BIT(22)
+#define CDNS_XSPI_SDMA_TRIGGER_EN BIT(21)
+
+#define CDNS_XSPI_INTR_MASK (CDNS_XSPI_INTR_EN | \
+ CDNS_XSPI_STIG_DONE_EN | \
+ CDNS_XSPI_SDMA_ERROR_EN | \
+ CDNS_XSPI_SDMA_TRIGGER_EN)
+
+/* Controller config register */
+#define CDNS_XSPI_CTRL_CONFIG_REG 0x0230
+#define CDNS_XSPI_CTRL_WORK_MODE GENMASK(6, 5)
+
+#define CDNS_XSPI_WORK_MODE_DIRECT 0
+#define CDNS_XSPI_WORK_MODE_STIG 1
+#define CDNS_XSPI_WORK_MODE_ACMD 3
+
+/* SDMA trigger transaction registers */
+#define CDNS_XSPI_SDMA_SIZE_REG 0x0240
+#define CDNS_XSPI_SDMA_TRD_INFO_REG 0x0244
+#define CDNS_XSPI_SDMA_DIR BIT(8)
+
+/* Controller features register */
+#define CDNS_XSPI_CTRL_FEATURES_REG 0x0F04
+#define CDNS_XSPI_NUM_BANKS GENMASK(25, 24)
+#define CDNS_XSPI_DMA_DATA_WIDTH BIT(21)
+#define CDNS_XSPI_NUM_THREADS GENMASK(3, 0)
+
+/* Controller version register */
+#define CDNS_XSPI_CTRL_VERSION_REG 0x0F00
+#define CDNS_XSPI_MAGIC_NUM GENMASK(31, 16)
+#define CDNS_XSPI_CTRL_REV GENMASK(7, 0)
+
+/* STIG Profile 1.0 instruction fields (split into registers) */
+#define CDNS_XSPI_CMD_INSTR_TYPE GENMASK(6, 0)
+#define CDNS_XSPI_CMD_P1_R1_ADDR0 GENMASK(31, 24)
+#define CDNS_XSPI_CMD_P1_R2_ADDR1 GENMASK(7, 0)
+#define CDNS_XSPI_CMD_P1_R2_ADDR2 GENMASK(15, 8)
+#define CDNS_XSPI_CMD_P1_R2_ADDR3 GENMASK(23, 16)
+#define CDNS_XSPI_CMD_P1_R2_ADDR4 GENMASK(31, 24)
+#define CDNS_XSPI_CMD_P1_R3_ADDR5 GENMASK(7, 0)
+#define CDNS_XSPI_CMD_P1_R3_CMD GENMASK(23, 16)
+#define CDNS_XSPI_CMD_P1_R3_NUM_ADDR_BYTES GENMASK(30, 28)
+#define CDNS_XSPI_CMD_P1_R4_ADDR_IOS GENMASK(1, 0)
+#define CDNS_XSPI_CMD_P1_R4_CMD_IOS GENMASK(9, 8)
+#define CDNS_XSPI_CMD_P1_R4_BANK GENMASK(14, 12)
+
+/* STIG data sequence instruction fields (split into registers) */
+#define CDNS_XSPI_CMD_DSEQ_R2_DCNT_L GENMASK(31, 16)
+#define CDNS_XSPI_CMD_DSEQ_R3_DCNT_H GENMASK(15, 0)
+#define CDNS_XSPI_CMD_DSEQ_R3_NUM_OF_DUMMY GENMASK(25, 20)
+#define CDNS_XSPI_CMD_DSEQ_R4_BANK GENMASK(14, 12)
+#define CDNS_XSPI_CMD_DSEQ_R4_DATA_IOS GENMASK(9, 8)
+#define CDNS_XSPI_CMD_DSEQ_R4_DIR BIT(4)
+
+/* STIG command status fields */
+#define CDNS_XSPI_CMD_STATUS_COMPLETED BIT(15)
+#define CDNS_XSPI_CMD_STATUS_FAILED BIT(14)
+#define CDNS_XSPI_CMD_STATUS_DQS_ERROR BIT(3)
+#define CDNS_XSPI_CMD_STATUS_CRC_ERROR BIT(2)
+#define CDNS_XSPI_CMD_STATUS_BUS_ERROR BIT(1)
+#define CDNS_XSPI_CMD_STATUS_INV_SEQ_ERROR BIT(0)
+
+#define CDNS_XSPI_STIG_DONE_FLAG BIT(0)
+#define CDNS_XSPI_TRD_STATUS 0x0104
+
+/* Helper macros for filling command registers */
+#define CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_1(op, data_phase) ( \
+ FIELD_PREP(CDNS_XSPI_CMD_INSTR_TYPE, (data_phase) ? \
+ CDNS_XSPI_STIG_INSTR_TYPE_1 : CDNS_XSPI_STIG_INSTR_TYPE_0) | \
+ FIELD_PREP(CDNS_XSPI_CMD_P1_R1_ADDR0, (op)->addr.val & 0xff))
+
+#define CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_2(op) ( \
+ FIELD_PREP(CDNS_XSPI_CMD_P1_R2_ADDR1, ((op)->addr.val >> 8) & 0xFF) | \
+ FIELD_PREP(CDNS_XSPI_CMD_P1_R2_ADDR2, ((op)->addr.val >> 16) & 0xFF) | \
+ FIELD_PREP(CDNS_XSPI_CMD_P1_R2_ADDR3, ((op)->addr.val >> 24) & 0xFF) | \
+ FIELD_PREP(CDNS_XSPI_CMD_P1_R2_ADDR4, ((op)->addr.val >> 32) & 0xFF))
+
+#define CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_3(op) ( \
+ FIELD_PREP(CDNS_XSPI_CMD_P1_R3_ADDR5, ((op)->addr.val >> 40) & 0xFF) | \
+ FIELD_PREP(CDNS_XSPI_CMD_P1_R3_CMD, (op)->cmd.opcode) | \
+ FIELD_PREP(CDNS_XSPI_CMD_P1_R3_NUM_ADDR_BYTES, (op)->addr.nbytes))
+
+#define CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_4(op, chipsel) ( \
+ FIELD_PREP(CDNS_XSPI_CMD_P1_R4_ADDR_IOS, ilog2((op)->addr.buswidth)) | \
+ FIELD_PREP(CDNS_XSPI_CMD_P1_R4_CMD_IOS, ilog2((op)->cmd.buswidth)) | \
+ FIELD_PREP(CDNS_XSPI_CMD_P1_R4_BANK, chipsel))
+
+#define CDNS_XSPI_CMD_FLD_DSEQ_CMD_1(op) \
+ FIELD_PREP(CDNS_XSPI_CMD_INSTR_TYPE, CDNS_XSPI_STIG_INSTR_TYPE_DATA_SEQ)
+
+#define CDNS_XSPI_CMD_FLD_DSEQ_CMD_2(op) \
+ FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R2_DCNT_L, (op)->data.nbytes & 0xFFFF)
+
+#define CDNS_XSPI_CMD_FLD_DSEQ_CMD_3(op) ( \
+ FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R3_DCNT_H, \
+ ((op)->data.nbytes >> 16) & 0xffff) | \
+ FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R3_NUM_OF_DUMMY, (op)->dummy.nbytes * 8))
+
+#define CDNS_XSPI_CMD_FLD_DSEQ_CMD_4(op, chipsel) ( \
+ FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R4_BANK, chipsel) | \
+ FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R4_DATA_IOS, \
+ ilog2((op)->data.buswidth)) | \
+ FIELD_PREP(CDNS_XSPI_CMD_DSEQ_R4_DIR, \
+ ((op)->data.dir == SPI_MEM_DATA_IN) ? \
+ CDNS_XSPI_STIG_CMD_DIR_READ : CDNS_XSPI_STIG_CMD_DIR_WRITE))
+
+enum cdns_xspi_stig_instr_type {
+ CDNS_XSPI_STIG_INSTR_TYPE_0,
+ CDNS_XSPI_STIG_INSTR_TYPE_1,
+ CDNS_XSPI_STIG_INSTR_TYPE_DATA_SEQ = 127,
+};
+
+enum cdns_xspi_sdma_dir {
+ CDNS_XSPI_SDMA_DIR_READ,
+ CDNS_XSPI_SDMA_DIR_WRITE,
+};
+
+enum cdns_xspi_stig_cmd_dir {
+ CDNS_XSPI_STIG_CMD_DIR_READ,
+ CDNS_XSPI_STIG_CMD_DIR_WRITE,
+};
+
+struct cdns_xspi_dev {
+ struct platform_device *pdev;
+ struct device *dev;
+
+ void __iomem *iobase;
+ void __iomem *auxbase;
+ void __iomem *sdmabase;
+
+ int irq;
+ int cur_cs;
+ unsigned int sdmasize;
+
+ struct completion cmd_complete;
+ struct completion auto_cmd_complete;
+ struct completion sdma_complete;
+ bool sdma_error;
+
+ void *in_buffer;
+ const void *out_buffer;
+
+ u8 hw_num_banks;
+};
+
+static int cdns_xspi_wait_for_controller_idle(struct cdns_xspi_dev *cdns_xspi)
+{
+ u32 ctrl_stat;
+
+ return readl_relaxed_poll_timeout(cdns_xspi->iobase +
+ CDNS_XSPI_CTRL_STATUS_REG,
+ ctrl_stat,
+ ((ctrl_stat &
+ CDNS_XSPI_CTRL_BUSY) == 0),
+ 100, 1000);
+}
+
+static void cdns_xspi_trigger_command(struct cdns_xspi_dev *cdns_xspi,
+ u32 cmd_regs[6])
+{
+ writel(cmd_regs[5], cdns_xspi->iobase + CDNS_XSPI_CMD_REG_5);
+ writel(cmd_regs[4], cdns_xspi->iobase + CDNS_XSPI_CMD_REG_4);
+ writel(cmd_regs[3], cdns_xspi->iobase + CDNS_XSPI_CMD_REG_3);
+ writel(cmd_regs[2], cdns_xspi->iobase + CDNS_XSPI_CMD_REG_2);
+ writel(cmd_regs[1], cdns_xspi->iobase + CDNS_XSPI_CMD_REG_1);
+ writel(cmd_regs[0], cdns_xspi->iobase + CDNS_XSPI_CMD_REG_0);
+}
+
+static int cdns_xspi_check_command_status(struct cdns_xspi_dev *cdns_xspi)
+{
+ int ret = 0;
+ u32 cmd_status = readl(cdns_xspi->iobase + CDNS_XSPI_CMD_STATUS_REG);
+
+ if (cmd_status & CDNS_XSPI_CMD_STATUS_COMPLETED) {
+ if ((cmd_status & CDNS_XSPI_CMD_STATUS_FAILED) != 0) {
+ if (cmd_status & CDNS_XSPI_CMD_STATUS_DQS_ERROR) {
+ dev_err(cdns_xspi->dev,
+ "Incorrect DQS pulses detected\n");
+ ret = -EPROTO;
+ }
+ if (cmd_status & CDNS_XSPI_CMD_STATUS_CRC_ERROR) {
+ dev_err(cdns_xspi->dev,
+ "CRC error received\n");
+ ret = -EPROTO;
+ }
+ if (cmd_status & CDNS_XSPI_CMD_STATUS_BUS_ERROR) {
+ dev_err(cdns_xspi->dev,
+ "Error resp on system DMA interface\n");
+ ret = -EPROTO;
+ }
+ if (cmd_status & CDNS_XSPI_CMD_STATUS_INV_SEQ_ERROR) {
+ dev_err(cdns_xspi->dev,
+ "Invalid command sequence detected\n");
+ ret = -EPROTO;
+ }
+ }
+ } else {
+ dev_err(cdns_xspi->dev, "Fatal err - command not completed\n");
+ ret = -EPROTO;
+ }
+
+ return ret;
+}
+
+static void cdns_xspi_set_interrupts(struct cdns_xspi_dev *cdns_xspi,
+ bool enabled)
+{
+ u32 intr_enable;
+
+ intr_enable = readl(cdns_xspi->iobase + CDNS_XSPI_INTR_ENABLE_REG);
+ if (enabled)
+ intr_enable |= CDNS_XSPI_INTR_MASK;
+ else
+ intr_enable &= ~CDNS_XSPI_INTR_MASK;
+ writel(intr_enable, cdns_xspi->iobase + CDNS_XSPI_INTR_ENABLE_REG);
+}
+
+static int cdns_xspi_controller_init(struct cdns_xspi_dev *cdns_xspi)
+{
+ u32 ctrl_ver;
+ u32 ctrl_features;
+ u16 hw_magic_num;
+
+ ctrl_ver = readl(cdns_xspi->iobase + CDNS_XSPI_CTRL_VERSION_REG);
+ hw_magic_num = FIELD_GET(CDNS_XSPI_MAGIC_NUM, ctrl_ver);
+ if (hw_magic_num != CDNS_XSPI_MAGIC_NUM_VALUE) {
+ dev_err(cdns_xspi->dev,
+ "Incorrect XSPI magic number: %x, expected: %x\n",
+ hw_magic_num, CDNS_XSPI_MAGIC_NUM_VALUE);
+ return -EIO;
+ }
+
+ ctrl_features = readl(cdns_xspi->iobase + CDNS_XSPI_CTRL_FEATURES_REG);
+ cdns_xspi->hw_num_banks = FIELD_GET(CDNS_XSPI_NUM_BANKS, ctrl_features);
+ cdns_xspi_set_interrupts(cdns_xspi, false);
+
+ return 0;
+}
+
+static void cdns_xspi_sdma_handle(struct cdns_xspi_dev *cdns_xspi)
+{
+ u32 sdma_size, sdma_trd_info;
+ u8 sdma_dir;
+
+ sdma_size = readl(cdns_xspi->iobase + CDNS_XSPI_SDMA_SIZE_REG);
+ sdma_trd_info = readl(cdns_xspi->iobase + CDNS_XSPI_SDMA_TRD_INFO_REG);
+ sdma_dir = FIELD_GET(CDNS_XSPI_SDMA_DIR, sdma_trd_info);
+
+ switch (sdma_dir) {
+ case CDNS_XSPI_SDMA_DIR_READ:
+ ioread8_rep(cdns_xspi->sdmabase,
+ cdns_xspi->in_buffer, sdma_size);
+ break;
+
+ case CDNS_XSPI_SDMA_DIR_WRITE:
+ iowrite8_rep(cdns_xspi->sdmabase,
+ cdns_xspi->out_buffer, sdma_size);
+ break;
+ }
+}
+
+static int cdns_xspi_send_stig_command(struct cdns_xspi_dev *cdns_xspi,
+ const struct spi_mem_op *op,
+ bool data_phase)
+{
+ u32 cmd_regs[6];
+ u32 cmd_status;
+ int ret;
+
+ ret = cdns_xspi_wait_for_controller_idle(cdns_xspi);
+ if (ret < 0)
+ return -EIO;
+
+ writel(FIELD_PREP(CDNS_XSPI_CTRL_WORK_MODE, CDNS_XSPI_WORK_MODE_STIG),
+ cdns_xspi->iobase + CDNS_XSPI_CTRL_CONFIG_REG);
+
+ cdns_xspi_set_interrupts(cdns_xspi, true);
+ cdns_xspi->sdma_error = false;
+
+ memset(cmd_regs, 0, sizeof(cmd_regs));
+ cmd_regs[1] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_1(op, data_phase);
+ cmd_regs[2] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_2(op);
+ cmd_regs[3] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_3(op);
+ cmd_regs[4] = CDNS_XSPI_CMD_FLD_P1_INSTR_CMD_4(op,
+ cdns_xspi->cur_cs);
+
+ cdns_xspi_trigger_command(cdns_xspi, cmd_regs);
+
+ if (data_phase) {
+ cmd_regs[0] = CDNS_XSPI_STIG_DONE_FLAG;
+ cmd_regs[1] = CDNS_XSPI_CMD_FLD_DSEQ_CMD_1(op);
+ cmd_regs[2] = CDNS_XSPI_CMD_FLD_DSEQ_CMD_2(op);
+ cmd_regs[3] = CDNS_XSPI_CMD_FLD_DSEQ_CMD_3(op);
+ cmd_regs[4] = CDNS_XSPI_CMD_FLD_DSEQ_CMD_4(op,
+ cdns_xspi->cur_cs);
+
+ cdns_xspi->in_buffer = op->data.buf.in;
+ cdns_xspi->out_buffer = op->data.buf.out;
+
+ cdns_xspi_trigger_command(cdns_xspi, cmd_regs);
+
+ wait_for_completion(&cdns_xspi->sdma_complete);
+ if (cdns_xspi->sdma_error) {
+ cdns_xspi_set_interrupts(cdns_xspi, false);
+ return -EIO;
+ }
+ cdns_xspi_sdma_handle(cdns_xspi);
+ }
+
+ wait_for_completion(&cdns_xspi->cmd_complete);
+ cdns_xspi_set_interrupts(cdns_xspi, false);
+
+ cmd_status = cdns_xspi_check_command_status(cdns_xspi);
+ if (cmd_status)
+ return -EPROTO;
+
+ return 0;
+}
+
+static int cdns_xspi_mem_op(struct cdns_xspi_dev *cdns_xspi,
+ struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ enum spi_mem_data_dir dir = op->data.dir;
+
+ if (cdns_xspi->cur_cs != mem->spi->chip_select)
+ cdns_xspi->cur_cs = mem->spi->chip_select;
+
+ return cdns_xspi_send_stig_command(cdns_xspi, op,
+ (dir != SPI_MEM_NO_DATA));
+}
+
+static int cdns_xspi_mem_op_execute(struct spi_mem *mem,
+ const struct spi_mem_op *op)
+{
+ struct cdns_xspi_dev *cdns_xspi =
+ spi_master_get_devdata(mem->spi->master);
+ int ret = 0;
+
+ ret = cdns_xspi_mem_op(cdns_xspi, mem, op);
+
+ return ret;
+}
+
+static int cdns_xspi_adjust_mem_op_size(struct spi_mem *mem, struct spi_mem_op *op)
+{
+ struct cdns_xspi_dev *cdns_xspi =
+ spi_master_get_devdata(mem->spi->master);
+
+ op->data.nbytes = clamp_val(op->data.nbytes, 0, cdns_xspi->sdmasize);
+
+ return 0;
+}
+
+static const struct spi_controller_mem_ops cadence_xspi_mem_ops = {
+ .exec_op = cdns_xspi_mem_op_execute,
+ .adjust_op_size = cdns_xspi_adjust_mem_op_size,
+};
+
+static irqreturn_t cdns_xspi_irq_handler(int this_irq, void *dev)
+{
+ struct cdns_xspi_dev *cdns_xspi = dev;
+ u32 irq_status;
+ irqreturn_t result = IRQ_NONE;
+
+ irq_status = readl(cdns_xspi->iobase + CDNS_XSPI_INTR_STATUS_REG);
+ writel(irq_status, cdns_xspi->iobase + CDNS_XSPI_INTR_STATUS_REG);
+
+ if (irq_status &
+ (CDNS_XSPI_SDMA_ERROR | CDNS_XSPI_SDMA_TRIGGER |
+ CDNS_XSPI_STIG_DONE)) {
+ if (irq_status & CDNS_XSPI_SDMA_ERROR) {
+ dev_err(cdns_xspi->dev,
+ "Slave DMA transaction error\n");
+ cdns_xspi->sdma_error = true;
+ complete(&cdns_xspi->sdma_complete);
+ }
+
+ if (irq_status & CDNS_XSPI_SDMA_TRIGGER)
+ complete(&cdns_xspi->sdma_complete);
+
+ if (irq_status & CDNS_XSPI_STIG_DONE)
+ complete(&cdns_xspi->cmd_complete);
+
+ result = IRQ_HANDLED;
+ }
+
+ irq_status = readl(cdns_xspi->iobase + CDNS_XSPI_TRD_COMP_INTR_STATUS);
+ if (irq_status) {
+ writel(irq_status,
+ cdns_xspi->iobase + CDNS_XSPI_TRD_COMP_INTR_STATUS);
+
+ complete(&cdns_xspi->auto_cmd_complete);
+
+ result = IRQ_HANDLED;
+ }
+
+ return result;
+}
+
+static int cdns_xspi_of_get_plat_data(struct platform_device *pdev)
+{
+ struct device_node *node_prop = pdev->dev.of_node;
+ struct device_node *node_child;
+ unsigned int cs;
+
+ for_each_child_of_node(node_prop, node_child) {
+ if (!of_device_is_available(node_child))
+ continue;
+
+ if (of_property_read_u32(node_child, "reg", &cs)) {
+ dev_err(&pdev->dev, "Couldn't get memory chip select\n");
+ of_node_put(node_child);
+ return -ENXIO;
+ } else if (cs >= CDNS_XSPI_MAX_BANKS) {
+ dev_err(&pdev->dev, "reg (cs) parameter value too large\n");
+ of_node_put(node_child);
+ return -ENXIO;
+ }
+ }
+
+ return 0;
+}
+
+static void cdns_xspi_print_phy_config(struct cdns_xspi_dev *cdns_xspi)
+{
+ struct device *dev = cdns_xspi->dev;
+
+ dev_info(dev, "PHY configuration\n");
+ dev_info(dev, " * xspi_dll_phy_ctrl: %08x\n",
+ readl(cdns_xspi->iobase + CDNS_XSPI_DLL_PHY_CTRL));
+ dev_info(dev, " * phy_dq_timing: %08x\n",
+ readl(cdns_xspi->auxbase + CDNS_XSPI_CCP_PHY_DQ_TIMING));
+ dev_info(dev, " * phy_dqs_timing: %08x\n",
+ readl(cdns_xspi->auxbase + CDNS_XSPI_CCP_PHY_DQS_TIMING));
+ dev_info(dev, " * phy_gate_loopback_ctrl: %08x\n",
+ readl(cdns_xspi->auxbase + CDNS_XSPI_CCP_PHY_GATE_LPBCK_CTRL));
+ dev_info(dev, " * phy_dll_slave_ctrl: %08x\n",
+ readl(cdns_xspi->auxbase + CDNS_XSPI_CCP_PHY_DLL_SLAVE_CTRL));
+}
+
+static int cdns_xspi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct spi_master *master = NULL;
+ struct cdns_xspi_dev *cdns_xspi = NULL;
+ struct resource *res;
+ int ret;
+
+ master = devm_spi_alloc_master(dev, sizeof(*cdns_xspi));
+ if (!master)
+ return -ENOMEM;
+
+ master->mode_bits = SPI_3WIRE | SPI_TX_DUAL | SPI_TX_QUAD |
+ SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_OCTAL | SPI_RX_OCTAL |
+ SPI_MODE_0 | SPI_MODE_3;
+
+ master->mem_ops = &cadence_xspi_mem_ops;
+ master->dev.of_node = pdev->dev.of_node;
+ master->bus_num = -1;
+
+ platform_set_drvdata(pdev, master);
+
+ cdns_xspi = spi_master_get_devdata(master);
+ cdns_xspi->pdev = pdev;
+ cdns_xspi->dev = &pdev->dev;
+ cdns_xspi->cur_cs = 0;
+
+ init_completion(&cdns_xspi->cmd_complete);
+ init_completion(&cdns_xspi->auto_cmd_complete);
+ init_completion(&cdns_xspi->sdma_complete);
+
+ ret = cdns_xspi_of_get_plat_data(pdev);
+ if (ret)
+ return -ENODEV;
+
+ cdns_xspi->iobase = devm_platform_ioremap_resource_byname(pdev, "io");
+ if (IS_ERR(cdns_xspi->iobase)) {
+ dev_err(dev, "Failed to remap controller base address\n");
+ return PTR_ERR(cdns_xspi->iobase);
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sdma");
+ cdns_xspi->sdmabase = devm_ioremap_resource(dev, res);
+ if (IS_ERR(cdns_xspi->sdmabase)) {
+ dev_err(dev, "Failed to remap SDMA address\n");
+ return PTR_ERR(cdns_xspi->sdmabase);
+ }
+ cdns_xspi->sdmasize = resource_size(res);
+
+ cdns_xspi->auxbase = devm_platform_ioremap_resource_byname(pdev, "aux");
+ if (IS_ERR(cdns_xspi->auxbase)) {
+ dev_err(dev, "Failed to remap AUX address\n");
+ return PTR_ERR(cdns_xspi->auxbase);
+ }
+
+ cdns_xspi->irq = platform_get_irq(pdev, 0);
+ if (cdns_xspi->irq < 0) {
+ dev_err(dev, "Failed to get IRQ\n");
+ return -ENXIO;
+ }
+
+ ret = devm_request_irq(dev, cdns_xspi->irq, cdns_xspi_irq_handler,
+ IRQF_SHARED, pdev->name, cdns_xspi);
+ if (ret) {
+ dev_err(dev, "Failed to request IRQ: %d\n", cdns_xspi->irq);
+ return ret;
+ }
+
+ cdns_xspi_print_phy_config(cdns_xspi);
+
+ ret = cdns_xspi_controller_init(cdns_xspi);
+ if (ret) {
+ dev_err(dev, "Failed to initialize controller\n");
+ return ret;
+ }
+
+ master->num_chipselect = 1 << cdns_xspi->hw_num_banks;
+
+ ret = devm_spi_register_master(dev, master);
+ if (ret) {
+ dev_err(dev, "Failed to register SPI master\n");
+ return ret;
+ }
+
+ dev_info(dev, "Successfully registered SPI master\n");
+
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id cdns_xspi_of_match[] = {
+ {
+ .compatible = "cdns,xspi-nor",
+ },
+ { /* end of table */}
+};
+MODULE_DEVICE_TABLE(of, cdns_xspi_of_match);
+#else
+#define cdns_xspi_of_match NULL
+#endif /* CONFIG_OF */
+
+static struct platform_driver cdns_xspi_platform_driver = {
+ .probe = cdns_xspi_probe,
+ .remove = NULL,
+ .driver = {
+ .name = CDNS_XSPI_NAME,
+ .of_match_table = cdns_xspi_of_match,
+ },
+};
+
+module_platform_driver(cdns_xspi_platform_driver);
+
+MODULE_DESCRIPTION("Cadence XSPI Controller Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" CDNS_XSPI_NAME);
+MODULE_AUTHOR("Konrad Kociolek <konrad@cadence.com>");
+MODULE_AUTHOR("Jayshri Pawar <jpawar@cadence.com>");
+MODULE_AUTHOR("Parshuram Thombare <pthombar@cadence.com>");
diff --git a/drivers/spi/spi-fsi.c b/drivers/spi/spi-fsi.c
index 829770b8ec74..b6c7467f0b59 100644
--- a/drivers/spi/spi-fsi.c
+++ b/drivers/spi/spi-fsi.c
@@ -67,9 +67,14 @@
SPI_FSI_STATUS_RDR_OVERRUN)
#define SPI_FSI_PORT_CTRL 0x9
+struct fsi2spi {
+ struct fsi_device *fsi; /* FSI2SPI CFAM engine device */
+ struct mutex lock; /* lock access to the device */
+};
+
struct fsi_spi {
struct device *dev; /* SPI controller device */
- struct fsi_device *fsi; /* FSI2SPI CFAM engine device */
+ struct fsi2spi *bridge; /* FSI2SPI device */
u32 base;
};
@@ -104,7 +109,7 @@ static int fsi_spi_check_status(struct fsi_spi *ctx)
u32 sts;
__be32 sts_be;
- rc = fsi_device_read(ctx->fsi, FSI2SPI_STATUS, &sts_be,
+ rc = fsi_device_read(ctx->bridge->fsi, FSI2SPI_STATUS, &sts_be,
sizeof(sts_be));
if (rc)
return rc;
@@ -120,73 +125,91 @@ static int fsi_spi_check_status(struct fsi_spi *ctx)
static int fsi_spi_read_reg(struct fsi_spi *ctx, u32 offset, u64 *value)
{
- int rc;
+ int rc = 0;
__be32 cmd_be;
__be32 data_be;
u32 cmd = offset + ctx->base;
+ struct fsi2spi *bridge = ctx->bridge;
*value = 0ULL;
if (cmd & FSI2SPI_CMD_WRITE)
return -EINVAL;
- cmd_be = cpu_to_be32(cmd);
- rc = fsi_device_write(ctx->fsi, FSI2SPI_CMD, &cmd_be, sizeof(cmd_be));
+ rc = mutex_lock_interruptible(&bridge->lock);
if (rc)
return rc;
+ cmd_be = cpu_to_be32(cmd);
+ rc = fsi_device_write(bridge->fsi, FSI2SPI_CMD, &cmd_be,
+ sizeof(cmd_be));
+ if (rc)
+ goto unlock;
+
rc = fsi_spi_check_status(ctx);
if (rc)
- return rc;
+ goto unlock;
- rc = fsi_device_read(ctx->fsi, FSI2SPI_DATA0, &data_be,
+ rc = fsi_device_read(bridge->fsi, FSI2SPI_DATA0, &data_be,
sizeof(data_be));
if (rc)
- return rc;
+ goto unlock;
*value |= (u64)be32_to_cpu(data_be) << 32;
- rc = fsi_device_read(ctx->fsi, FSI2SPI_DATA1, &data_be,
+ rc = fsi_device_read(bridge->fsi, FSI2SPI_DATA1, &data_be,
sizeof(data_be));
if (rc)
- return rc;
+ goto unlock;
*value |= (u64)be32_to_cpu(data_be);
dev_dbg(ctx->dev, "Read %02x[%016llx].\n", offset, *value);
- return 0;
+unlock:
+ mutex_unlock(&bridge->lock);
+ return rc;
}
static int fsi_spi_write_reg(struct fsi_spi *ctx, u32 offset, u64 value)
{
- int rc;
+ int rc = 0;
__be32 cmd_be;
__be32 data_be;
u32 cmd = offset + ctx->base;
+ struct fsi2spi *bridge = ctx->bridge;
if (cmd & FSI2SPI_CMD_WRITE)
return -EINVAL;
+ rc = mutex_lock_interruptible(&bridge->lock);
+ if (rc)
+ return rc;
+
dev_dbg(ctx->dev, "Write %02x[%016llx].\n", offset, value);
data_be = cpu_to_be32(upper_32_bits(value));
- rc = fsi_device_write(ctx->fsi, FSI2SPI_DATA0, &data_be,
+ rc = fsi_device_write(bridge->fsi, FSI2SPI_DATA0, &data_be,
sizeof(data_be));
if (rc)
- return rc;
+ goto unlock;
data_be = cpu_to_be32(lower_32_bits(value));
- rc = fsi_device_write(ctx->fsi, FSI2SPI_DATA1, &data_be,
+ rc = fsi_device_write(bridge->fsi, FSI2SPI_DATA1, &data_be,
sizeof(data_be));
if (rc)
- return rc;
+ goto unlock;
cmd_be = cpu_to_be32(cmd | FSI2SPI_CMD_WRITE);
- rc = fsi_device_write(ctx->fsi, FSI2SPI_CMD, &cmd_be, sizeof(cmd_be));
+ rc = fsi_device_write(bridge->fsi, FSI2SPI_CMD, &cmd_be,
+ sizeof(cmd_be));
if (rc)
- return rc;
+ goto unlock;
- return fsi_spi_check_status(ctx);
+ rc = fsi_spi_check_status(ctx);
+
+unlock:
+ mutex_unlock(&bridge->lock);
+ return rc;
}
static int fsi_spi_data_in(u64 in, u8 *rx, int len)
@@ -234,6 +257,26 @@ static int fsi_spi_reset(struct fsi_spi *ctx)
return fsi_spi_write_reg(ctx, SPI_FSI_STATUS, 0ULL);
}
+static int fsi_spi_status(struct fsi_spi *ctx, u64 *status, const char *dir)
+{
+ int rc = fsi_spi_read_reg(ctx, SPI_FSI_STATUS, status);
+
+ if (rc)
+ return rc;
+
+ if (*status & SPI_FSI_STATUS_ANY_ERROR) {
+ dev_err(ctx->dev, "%s error: %016llx\n", dir, *status);
+
+ rc = fsi_spi_reset(ctx);
+ if (rc)
+ return rc;
+
+ return -EREMOTEIO;
+ }
+
+ return 0;
+}
+
static void fsi_spi_sequence_add(struct fsi_spi_sequence *seq, u8 val)
{
/*
@@ -273,18 +316,9 @@ static int fsi_spi_transfer_data(struct fsi_spi *ctx,
return rc;
do {
- rc = fsi_spi_read_reg(ctx, SPI_FSI_STATUS,
- &status);
+ rc = fsi_spi_status(ctx, &status, "TX");
if (rc)
return rc;
-
- if (status & SPI_FSI_STATUS_ANY_ERROR) {
- rc = fsi_spi_reset(ctx);
- if (rc)
- return rc;
-
- return -EREMOTEIO;
- }
} while (status & SPI_FSI_STATUS_TDR_FULL);
sent += nb;
@@ -296,18 +330,9 @@ static int fsi_spi_transfer_data(struct fsi_spi *ctx,
while (transfer->len > recv) {
do {
- rc = fsi_spi_read_reg(ctx, SPI_FSI_STATUS,
- &status);
+ rc = fsi_spi_status(ctx, &status, "RX");
if (rc)
return rc;
-
- if (status & SPI_FSI_STATUS_ANY_ERROR) {
- rc = fsi_spi_reset(ctx);
- if (rc)
- return rc;
-
- return -EREMOTEIO;
- }
} while (!(status & SPI_FSI_STATUS_RDR_FULL));
rc = fsi_spi_read_reg(ctx, SPI_FSI_DATA_RX, &in);
@@ -348,8 +373,12 @@ static int fsi_spi_transfer_init(struct fsi_spi *ctx)
if (status & (SPI_FSI_STATUS_ANY_ERROR |
SPI_FSI_STATUS_TDR_FULL |
SPI_FSI_STATUS_RDR_FULL)) {
- if (reset)
+ if (reset) {
+ dev_err(ctx->dev,
+ "Initialization error: %08llx\n",
+ status);
return -EIO;
+ }
rc = fsi_spi_reset(ctx);
if (rc)
@@ -388,7 +417,7 @@ static int fsi_spi_transfer_one_message(struct spi_controller *ctlr,
struct spi_transfer *transfer;
struct fsi_spi *ctx = spi_controller_get_devdata(ctlr);
- rc = fsi_spi_check_mux(ctx->fsi, ctx->dev);
+ rc = fsi_spi_check_mux(ctx->bridge->fsi, ctx->dev);
if (rc)
goto error;
@@ -478,12 +507,20 @@ static int fsi_spi_probe(struct device *dev)
int rc;
struct device_node *np;
int num_controllers_registered = 0;
+ struct fsi2spi *bridge;
struct fsi_device *fsi = to_fsi_dev(dev);
rc = fsi_spi_check_mux(fsi, dev);
if (rc)
return -ENODEV;
+ bridge = devm_kzalloc(dev, sizeof(*bridge), GFP_KERNEL);
+ if (!bridge)
+ return -ENOMEM;
+
+ bridge->fsi = fsi;
+ mutex_init(&bridge->lock);
+
for_each_available_child_of_node(dev->of_node, np) {
u32 base;
struct fsi_spi *ctx;
@@ -506,7 +543,7 @@ static int fsi_spi_probe(struct device *dev)
ctx = spi_controller_get_devdata(ctlr);
ctx->dev = &ctlr->dev;
- ctx->fsi = fsi;
+ ctx->bridge = bridge;
ctx->base = base + SPI_FSI_BASE;
rc = devm_spi_register_controller(dev, ctlr);
diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
index 2f51421e2a71..27a446faf143 100644
--- a/drivers/spi/spi-geni-qcom.c
+++ b/drivers/spi/spi-geni-qcom.c
@@ -2,6 +2,9 @@
// Copyright (c) 2017-2018, The Linux foundation. All rights reserved.
#include <linux/clk.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma/qcom-gpi-dma.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/log2.h>
@@ -63,6 +66,15 @@
#define TIMESTAMP_AFTER BIT(3)
#define POST_CMD_DELAY BIT(4)
+#define GSI_LOOPBACK_EN BIT(0)
+#define GSI_CS_TOGGLE BIT(3)
+#define GSI_CPHA BIT(4)
+#define GSI_CPOL BIT(5)
+
+#define MAX_TX_SG 3
+#define NUM_SPI_XFER 8
+#define SPI_XFER_TIMEOUT_MS 250
+
struct spi_geni_master {
struct geni_se se;
struct device *dev;
@@ -84,6 +96,9 @@ struct spi_geni_master {
int irq;
bool cs_flag;
bool abort_failed;
+ struct dma_chan *tx;
+ struct dma_chan *rx;
+ int cur_xfer_mode;
};
static int get_spi_clk_cfg(unsigned int speed_hz,
@@ -330,34 +345,197 @@ static int setup_fifo_params(struct spi_device *spi_slv,
return geni_spi_set_clock_and_bw(mas, spi_slv->max_speed_hz);
}
+static void
+spi_gsi_callback_result(void *cb, const struct dmaengine_result *result)
+{
+ struct spi_master *spi = cb;
+
+ if (result->result != DMA_TRANS_NOERROR) {
+ dev_err(&spi->dev, "DMA txn failed: %d\n", result->result);
+ return;
+ }
+
+ if (!result->residue) {
+ dev_dbg(&spi->dev, "DMA txn completed\n");
+ spi_finalize_current_transfer(spi);
+ } else {
+ dev_err(&spi->dev, "DMA xfer has pending: %d\n", result->residue);
+ }
+}
+
+static int setup_gsi_xfer(struct spi_transfer *xfer, struct spi_geni_master *mas,
+ struct spi_device *spi_slv, struct spi_master *spi)
+{
+ unsigned long flags = DMA_PREP_INTERRUPT | DMA_CTRL_ACK;
+ struct dma_slave_config config = {};
+ struct gpi_spi_config peripheral = {};
+ struct dma_async_tx_descriptor *tx_desc, *rx_desc;
+ int ret;
+
+ config.peripheral_config = &peripheral;
+ config.peripheral_size = sizeof(peripheral);
+ peripheral.set_config = true;
+
+ if (xfer->bits_per_word != mas->cur_bits_per_word ||
+ xfer->speed_hz != mas->cur_speed_hz) {
+ mas->cur_bits_per_word = xfer->bits_per_word;
+ mas->cur_speed_hz = xfer->speed_hz;
+ }
+
+ if (xfer->tx_buf && xfer->rx_buf) {
+ peripheral.cmd = SPI_DUPLEX;
+ } else if (xfer->tx_buf) {
+ peripheral.cmd = SPI_TX;
+ peripheral.rx_len = 0;
+ } else if (xfer->rx_buf) {
+ peripheral.cmd = SPI_RX;
+ if (!(mas->cur_bits_per_word % MIN_WORD_LEN)) {
+ peripheral.rx_len = ((xfer->len << 3) / mas->cur_bits_per_word);
+ } else {
+ int bytes_per_word = (mas->cur_bits_per_word / BITS_PER_BYTE) + 1;
+
+ peripheral.rx_len = (xfer->len / bytes_per_word);
+ }
+ }
+
+ peripheral.loopback_en = !!(spi_slv->mode & SPI_LOOP);
+ peripheral.clock_pol_high = !!(spi_slv->mode & SPI_CPOL);
+ peripheral.data_pol_high = !!(spi_slv->mode & SPI_CPHA);
+ peripheral.cs = spi_slv->chip_select;
+ peripheral.pack_en = true;
+ peripheral.word_len = xfer->bits_per_word - MIN_WORD_LEN;
+
+ ret = get_spi_clk_cfg(mas->cur_speed_hz, mas,
+ &peripheral.clk_src, &peripheral.clk_div);
+ if (ret) {
+ dev_err(mas->dev, "Err in get_spi_clk_cfg() :%d\n", ret);
+ return ret;
+ }
+
+ if (!xfer->cs_change) {
+ if (!list_is_last(&xfer->transfer_list, &spi->cur_msg->transfers))
+ peripheral.fragmentation = FRAGMENTATION;
+ }
+
+ if (peripheral.cmd & SPI_RX) {
+ dmaengine_slave_config(mas->rx, &config);
+ rx_desc = dmaengine_prep_slave_sg(mas->rx, xfer->rx_sg.sgl, xfer->rx_sg.nents,
+ DMA_DEV_TO_MEM, flags);
+ if (!rx_desc) {
+ dev_err(mas->dev, "Err setting up rx desc\n");
+ return -EIO;
+ }
+ }
+
+ /*
+ * Prepare the TX always, even for RX or tx_buf being null, we would
+ * need TX to be prepared per GSI spec
+ */
+ dmaengine_slave_config(mas->tx, &config);
+ tx_desc = dmaengine_prep_slave_sg(mas->tx, xfer->tx_sg.sgl, xfer->tx_sg.nents,
+ DMA_MEM_TO_DEV, flags);
+ if (!tx_desc) {
+ dev_err(mas->dev, "Err setting up tx desc\n");
+ return -EIO;
+ }
+
+ tx_desc->callback_result = spi_gsi_callback_result;
+ tx_desc->callback_param = spi;
+
+ if (peripheral.cmd & SPI_RX)
+ dmaengine_submit(rx_desc);
+ dmaengine_submit(tx_desc);
+
+ if (peripheral.cmd & SPI_RX)
+ dma_async_issue_pending(mas->rx);
+
+ dma_async_issue_pending(mas->tx);
+ return 1;
+}
+
+static bool geni_can_dma(struct spi_controller *ctlr,
+ struct spi_device *slv, struct spi_transfer *xfer)
+{
+ struct spi_geni_master *mas = spi_master_get_devdata(slv->master);
+
+ /* check if dma is supported */
+ return mas->cur_xfer_mode != GENI_SE_FIFO;
+}
+
static int spi_geni_prepare_message(struct spi_master *spi,
struct spi_message *spi_msg)
{
- int ret;
struct spi_geni_master *mas = spi_master_get_devdata(spi);
+ int ret;
- if (spi_geni_is_abort_still_pending(mas))
- return -EBUSY;
+ switch (mas->cur_xfer_mode) {
+ case GENI_SE_FIFO:
+ if (spi_geni_is_abort_still_pending(mas))
+ return -EBUSY;
+ ret = setup_fifo_params(spi_msg->spi, spi);
+ if (ret)
+ dev_err(mas->dev, "Couldn't select mode %d\n", ret);
+ return ret;
- ret = setup_fifo_params(spi_msg->spi, spi);
- if (ret)
- dev_err(mas->dev, "Couldn't select mode %d\n", ret);
+ case GENI_GPI_DMA:
+ /* nothing to do for GPI DMA */
+ return 0;
+ }
+
+ dev_err(mas->dev, "Mode not supported %d", mas->cur_xfer_mode);
+ return -EINVAL;
+}
+
+static int spi_geni_grab_gpi_chan(struct spi_geni_master *mas)
+{
+ int ret;
+
+ mas->tx = dma_request_chan(mas->dev, "tx");
+ ret = dev_err_probe(mas->dev, IS_ERR(mas->tx), "Failed to get tx DMA ch\n");
+ if (ret < 0)
+ goto err_tx;
+
+ mas->rx = dma_request_chan(mas->dev, "rx");
+ ret = dev_err_probe(mas->dev, IS_ERR(mas->rx), "Failed to get rx DMA ch\n");
+ if (ret < 0)
+ goto err_rx;
+
+ return 0;
+
+err_rx:
+ dma_release_channel(mas->tx);
+ mas->tx = NULL;
+err_tx:
+ mas->rx = NULL;
return ret;
}
+static void spi_geni_release_dma_chan(struct spi_geni_master *mas)
+{
+ if (mas->rx) {
+ dma_release_channel(mas->rx);
+ mas->rx = NULL;
+ }
+
+ if (mas->tx) {
+ dma_release_channel(mas->tx);
+ mas->tx = NULL;
+ }
+}
+
static int spi_geni_init(struct spi_geni_master *mas)
{
struct geni_se *se = &mas->se;
unsigned int proto, major, minor, ver;
- u32 spi_tx_cfg;
+ u32 spi_tx_cfg, fifo_disable;
+ int ret = -ENXIO;
pm_runtime_get_sync(mas->dev);
proto = geni_se_read_proto(se);
if (proto != GENI_SE_SPI) {
dev_err(mas->dev, "Invalid proto %d\n", proto);
- pm_runtime_put(mas->dev);
- return -ENXIO;
+ goto out_pm;
}
mas->tx_fifo_depth = geni_se_get_tx_fifo_depth(se);
@@ -380,15 +558,38 @@ static int spi_geni_init(struct spi_geni_master *mas)
else
mas->oversampling = 1;
- geni_se_select_mode(se, GENI_SE_FIFO);
+ fifo_disable = readl(se->base + GENI_IF_DISABLE_RO) & FIFO_IF_DISABLE;
+ switch (fifo_disable) {
+ case 1:
+ ret = spi_geni_grab_gpi_chan(mas);
+ if (!ret) { /* success case */
+ mas->cur_xfer_mode = GENI_GPI_DMA;
+ geni_se_select_mode(se, GENI_GPI_DMA);
+ dev_dbg(mas->dev, "Using GPI DMA mode for SPI\n");
+ break;
+ }
+ /*
+ * in case of failure to get dma channel, we can still do the
+ * FIFO mode, so fallthrough
+ */
+ dev_warn(mas->dev, "FIFO mode disabled, but couldn't get DMA, fall back to FIFO mode\n");
+ fallthrough;
+
+ case 0:
+ mas->cur_xfer_mode = GENI_SE_FIFO;
+ geni_se_select_mode(se, GENI_SE_FIFO);
+ ret = 0;
+ break;
+ }
/* We always control CS manually */
spi_tx_cfg = readl(se->base + SE_SPI_TRANS_CFG);
spi_tx_cfg &= ~CS_TOGGLE;
writel(spi_tx_cfg, se->base + SE_SPI_TRANS_CFG);
+out_pm:
pm_runtime_put(mas->dev);
- return 0;
+ return ret;
}
static unsigned int geni_byte_per_fifo_word(struct spi_geni_master *mas)
@@ -569,8 +770,11 @@ static int spi_geni_transfer_one(struct spi_master *spi,
if (!xfer->len)
return 0;
- setup_fifo_xfer(xfer, mas, slv->mode, spi);
- return 1;
+ if (mas->cur_xfer_mode == GENI_SE_FIFO) {
+ setup_fifo_xfer(xfer, mas, slv->mode, spi);
+ return 1;
+ }
+ return setup_gsi_xfer(xfer, mas, slv, spi);
}
static irqreturn_t geni_spi_isr(int irq, void *data)
@@ -665,6 +869,13 @@ static int spi_geni_probe(struct platform_device *pdev)
if (irq < 0)
return irq;
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
+ if (ret) {
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (ret)
+ return dev_err_probe(dev, ret, "could not set DMA mask\n");
+ }
+
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
@@ -704,9 +915,10 @@ static int spi_geni_probe(struct platform_device *pdev)
spi->max_speed_hz = 50000000;
spi->prepare_message = spi_geni_prepare_message;
spi->transfer_one = spi_geni_transfer_one;
+ spi->can_dma = geni_can_dma;
+ spi->dma_map_dev = dev->parent;
spi->auto_runtime_pm = true;
spi->handle_err = handle_fifo_timeout;
- spi->set_cs = spi_geni_set_cs;
spi->use_gpio_descriptors = true;
init_completion(&mas->cs_done);
@@ -732,9 +944,17 @@ static int spi_geni_probe(struct platform_device *pdev)
if (ret)
goto spi_geni_probe_runtime_disable;
+ /*
+ * check the mode supported and set_cs for fifo mode only
+ * for dma (gsi) mode, the gsi will set cs based on params passed in
+ * TRE
+ */
+ if (mas->cur_xfer_mode == GENI_SE_FIFO)
+ spi->set_cs = spi_geni_set_cs;
+
ret = request_irq(mas->irq, geni_spi_isr, 0, dev_name(dev), spi);
if (ret)
- goto spi_geni_probe_runtime_disable;
+ goto spi_geni_release_dma;
ret = spi_register_master(spi);
if (ret)
@@ -743,6 +963,8 @@ static int spi_geni_probe(struct platform_device *pdev)
return 0;
spi_geni_probe_free_irq:
free_irq(mas->irq, spi);
+spi_geni_release_dma:
+ spi_geni_release_dma_chan(mas);
spi_geni_probe_runtime_disable:
pm_runtime_disable(dev);
return ret;
@@ -756,6 +978,8 @@ static int spi_geni_remove(struct platform_device *pdev)
/* Unregister _before_ disabling pm_runtime() so we stop transfers */
spi_unregister_master(spi);
+ spi_geni_release_dma_chan(mas);
+
free_irq(mas->irq, spi);
pm_runtime_disable(&pdev->dev);
return 0;
diff --git a/drivers/spi/spi-ingenic.c b/drivers/spi/spi-ingenic.c
new file mode 100644
index 000000000000..03077a7e11c8
--- /dev/null
+++ b/drivers/spi/spi-ingenic.c
@@ -0,0 +1,482 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SPI bus driver for the Ingenic JZ47xx SoCs
+ * Copyright (c) 2017-2021 Artur Rojek <contact@artur-rojek.eu>
+ * Copyright (c) 2017-2021 Paul Cercueil <paul@crapouillou.net>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+
+#define REG_SSIDR 0x0
+#define REG_SSICR0 0x4
+#define REG_SSICR1 0x8
+#define REG_SSISR 0xc
+#define REG_SSIGR 0x18
+
+#define REG_SSICR0_TENDIAN_LSB BIT(19)
+#define REG_SSICR0_RENDIAN_LSB BIT(17)
+#define REG_SSICR0_SSIE BIT(15)
+#define REG_SSICR0_LOOP BIT(10)
+#define REG_SSICR0_EACLRUN BIT(7)
+#define REG_SSICR0_FSEL BIT(6)
+#define REG_SSICR0_TFLUSH BIT(2)
+#define REG_SSICR0_RFLUSH BIT(1)
+
+#define REG_SSICR1_FRMHL_MASK (BIT(31) | BIT(30))
+#define REG_SSICR1_FRMHL BIT(30)
+#define REG_SSICR1_LFST BIT(25)
+#define REG_SSICR1_UNFIN BIT(23)
+#define REG_SSICR1_PHA BIT(1)
+#define REG_SSICR1_POL BIT(0)
+
+#define REG_SSISR_END BIT(7)
+#define REG_SSISR_BUSY BIT(6)
+#define REG_SSISR_TFF BIT(5)
+#define REG_SSISR_RFE BIT(4)
+#define REG_SSISR_RFHF BIT(2)
+#define REG_SSISR_UNDR BIT(1)
+#define REG_SSISR_OVER BIT(0)
+
+#define SPI_INGENIC_FIFO_SIZE 128u
+
+struct jz_soc_info {
+ u32 bits_per_word_mask;
+ struct reg_field flen_field;
+ bool has_trendian;
+};
+
+struct ingenic_spi {
+ const struct jz_soc_info *soc_info;
+ struct clk *clk;
+ struct resource *mem_res;
+
+ struct regmap *map;
+ struct regmap_field *flen_field;
+};
+
+static int spi_ingenic_wait(struct ingenic_spi *priv,
+ unsigned long mask,
+ bool condition)
+{
+ unsigned int val;
+
+ return regmap_read_poll_timeout(priv->map, REG_SSISR, val,
+ !!(val & mask) == condition,
+ 100, 10000);
+}
+
+static void spi_ingenic_set_cs(struct spi_device *spi, bool disable)
+{
+ struct ingenic_spi *priv = spi_controller_get_devdata(spi->controller);
+
+ if (disable) {
+ regmap_clear_bits(priv->map, REG_SSICR1, REG_SSICR1_UNFIN);
+ regmap_clear_bits(priv->map, REG_SSISR,
+ REG_SSISR_UNDR | REG_SSISR_OVER);
+
+ spi_ingenic_wait(priv, REG_SSISR_END, true);
+ } else {
+ regmap_set_bits(priv->map, REG_SSICR1, REG_SSICR1_UNFIN);
+ }
+
+ regmap_set_bits(priv->map, REG_SSICR0,
+ REG_SSICR0_RFLUSH | REG_SSICR0_TFLUSH);
+}
+
+static void spi_ingenic_prepare_transfer(struct ingenic_spi *priv,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ unsigned long clk_hz = clk_get_rate(priv->clk);
+ u32 cdiv, speed_hz = xfer->speed_hz ?: spi->max_speed_hz,
+ bits_per_word = xfer->bits_per_word ?: spi->bits_per_word;
+
+ cdiv = clk_hz / (speed_hz * 2);
+ cdiv = clamp(cdiv, 1u, 0x100u) - 1;
+
+ regmap_write(priv->map, REG_SSIGR, cdiv);
+
+ regmap_field_write(priv->flen_field, bits_per_word - 2);
+}
+
+static void spi_ingenic_finalize_transfer(void *controller)
+{
+ spi_finalize_current_transfer(controller);
+}
+
+static struct dma_async_tx_descriptor *
+spi_ingenic_prepare_dma(struct spi_controller *ctlr, struct dma_chan *chan,
+ struct sg_table *sg, enum dma_transfer_direction dir,
+ unsigned int bits)
+{
+ struct ingenic_spi *priv = spi_controller_get_devdata(ctlr);
+ struct dma_slave_config cfg = {
+ .direction = dir,
+ .src_addr = priv->mem_res->start + REG_SSIDR,
+ .dst_addr = priv->mem_res->start + REG_SSIDR,
+ };
+ struct dma_async_tx_descriptor *desc;
+ dma_cookie_t cookie;
+ int ret;
+
+ if (bits > 16) {
+ cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ cfg.src_maxburst = cfg.dst_maxburst = 4;
+ } else if (bits > 8) {
+ cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ cfg.src_maxburst = cfg.dst_maxburst = 2;
+ } else {
+ cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ cfg.src_maxburst = cfg.dst_maxburst = 1;
+ }
+
+ ret = dmaengine_slave_config(chan, &cfg);
+ if (ret)
+ return ERR_PTR(ret);
+
+ desc = dmaengine_prep_slave_sg(chan, sg->sgl, sg->nents, dir,
+ DMA_PREP_INTERRUPT);
+ if (!desc)
+ return ERR_PTR(-ENOMEM);
+
+ if (dir == DMA_DEV_TO_MEM) {
+ desc->callback = spi_ingenic_finalize_transfer;
+ desc->callback_param = ctlr;
+ }
+
+ cookie = dmaengine_submit(desc);
+
+ ret = dma_submit_error(cookie);
+ if (ret) {
+ dmaengine_desc_free(desc);
+ return ERR_PTR(ret);
+ }
+
+ return desc;
+}
+
+static int spi_ingenic_dma_tx(struct spi_controller *ctlr,
+ struct spi_transfer *xfer, unsigned int bits)
+{
+ struct dma_async_tx_descriptor *rx_desc, *tx_desc;
+
+ rx_desc = spi_ingenic_prepare_dma(ctlr, ctlr->dma_rx,
+ &xfer->rx_sg, DMA_DEV_TO_MEM, bits);
+ if (IS_ERR(rx_desc))
+ return PTR_ERR(rx_desc);
+
+ tx_desc = spi_ingenic_prepare_dma(ctlr, ctlr->dma_tx,
+ &xfer->tx_sg, DMA_MEM_TO_DEV, bits);
+ if (IS_ERR(tx_desc)) {
+ dmaengine_terminate_async(ctlr->dma_rx);
+ dmaengine_desc_free(rx_desc);
+ return PTR_ERR(tx_desc);
+ }
+
+ dma_async_issue_pending(ctlr->dma_rx);
+ dma_async_issue_pending(ctlr->dma_tx);
+
+ return 1;
+}
+
+#define SPI_INGENIC_TX(x) \
+static int spi_ingenic_tx##x(struct ingenic_spi *priv, \
+ struct spi_transfer *xfer) \
+{ \
+ unsigned int count = xfer->len / (x / 8); \
+ unsigned int prefill = min(count, SPI_INGENIC_FIFO_SIZE); \
+ const u##x *tx_buf = xfer->tx_buf; \
+ u##x *rx_buf = xfer->rx_buf; \
+ unsigned int i, val; \
+ int err; \
+ \
+ /* Fill up the TX fifo */ \
+ for (i = 0; i < prefill; i++) { \
+ val = tx_buf ? tx_buf[i] : 0; \
+ \
+ regmap_write(priv->map, REG_SSIDR, val); \
+ } \
+ \
+ for (i = 0; i < count; i++) { \
+ err = spi_ingenic_wait(priv, REG_SSISR_RFE, false); \
+ if (err) \
+ return err; \
+ \
+ regmap_read(priv->map, REG_SSIDR, &val); \
+ if (rx_buf) \
+ rx_buf[i] = val; \
+ \
+ if (i < count - prefill) { \
+ val = tx_buf ? tx_buf[i + prefill] : 0; \
+ \
+ regmap_write(priv->map, REG_SSIDR, val); \
+ } \
+ } \
+ \
+ return 0; \
+}
+SPI_INGENIC_TX(8)
+SPI_INGENIC_TX(16)
+SPI_INGENIC_TX(32)
+#undef SPI_INGENIC_TX
+
+static int spi_ingenic_transfer_one(struct spi_controller *ctlr,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct ingenic_spi *priv = spi_controller_get_devdata(ctlr);
+ unsigned int bits = xfer->bits_per_word ?: spi->bits_per_word;
+ bool can_dma = ctlr->can_dma && ctlr->can_dma(ctlr, spi, xfer);
+
+ spi_ingenic_prepare_transfer(priv, spi, xfer);
+
+ if (ctlr->cur_msg_mapped && can_dma)
+ return spi_ingenic_dma_tx(ctlr, xfer, bits);
+
+ if (bits > 16)
+ return spi_ingenic_tx32(priv, xfer);
+
+ if (bits > 8)
+ return spi_ingenic_tx16(priv, xfer);
+
+ return spi_ingenic_tx8(priv, xfer);
+}
+
+static int spi_ingenic_prepare_message(struct spi_controller *ctlr,
+ struct spi_message *message)
+{
+ struct ingenic_spi *priv = spi_controller_get_devdata(ctlr);
+ struct spi_device *spi = message->spi;
+ unsigned int cs = REG_SSICR1_FRMHL << spi->chip_select;
+ unsigned int ssicr0_mask = REG_SSICR0_LOOP | REG_SSICR0_FSEL;
+ unsigned int ssicr1_mask = REG_SSICR1_PHA | REG_SSICR1_POL | cs;
+ unsigned int ssicr0 = 0, ssicr1 = 0;
+
+ if (priv->soc_info->has_trendian) {
+ ssicr0_mask |= REG_SSICR0_RENDIAN_LSB | REG_SSICR0_TENDIAN_LSB;
+
+ if (spi->mode & SPI_LSB_FIRST)
+ ssicr0 |= REG_SSICR0_RENDIAN_LSB | REG_SSICR0_TENDIAN_LSB;
+ } else {
+ ssicr1_mask |= REG_SSICR1_LFST;
+
+ if (spi->mode & SPI_LSB_FIRST)
+ ssicr1 |= REG_SSICR1_LFST;
+ }
+
+ if (spi->mode & SPI_LOOP)
+ ssicr0 |= REG_SSICR0_LOOP;
+ if (spi->chip_select)
+ ssicr0 |= REG_SSICR0_FSEL;
+
+ if (spi->mode & SPI_CPHA)
+ ssicr1 |= REG_SSICR1_PHA;
+ if (spi->mode & SPI_CPOL)
+ ssicr1 |= REG_SSICR1_POL;
+ if (spi->mode & SPI_CS_HIGH)
+ ssicr1 |= cs;
+
+ regmap_update_bits(priv->map, REG_SSICR0, ssicr0_mask, ssicr0);
+ regmap_update_bits(priv->map, REG_SSICR1, ssicr1_mask, ssicr1);
+
+ return 0;
+}
+
+static int spi_ingenic_prepare_hardware(struct spi_controller *ctlr)
+{
+ struct ingenic_spi *priv = spi_controller_get_devdata(ctlr);
+ int ret;
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
+
+ regmap_write(priv->map, REG_SSICR0, REG_SSICR0_EACLRUN);
+ regmap_write(priv->map, REG_SSICR1, 0);
+ regmap_write(priv->map, REG_SSISR, 0);
+ regmap_set_bits(priv->map, REG_SSICR0, REG_SSICR0_SSIE);
+
+ return 0;
+}
+
+static int spi_ingenic_unprepare_hardware(struct spi_controller *ctlr)
+{
+ struct ingenic_spi *priv = spi_controller_get_devdata(ctlr);
+
+ regmap_clear_bits(priv->map, REG_SSICR0, REG_SSICR0_SSIE);
+
+ clk_disable_unprepare(priv->clk);
+
+ return 0;
+}
+
+static bool spi_ingenic_can_dma(struct spi_controller *ctlr,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct dma_slave_caps caps;
+ int ret;
+
+ ret = dma_get_slave_caps(ctlr->dma_tx, &caps);
+ if (ret) {
+ dev_err(&spi->dev, "Unable to get slave caps: %d\n", ret);
+ return false;
+ }
+
+ return !caps.max_sg_burst ||
+ xfer->len <= caps.max_sg_burst * SPI_INGENIC_FIFO_SIZE;
+}
+
+static int spi_ingenic_request_dma(struct spi_controller *ctlr,
+ struct device *dev)
+{
+ ctlr->dma_tx = dma_request_slave_channel(dev, "tx");
+ if (!ctlr->dma_tx)
+ return -ENODEV;
+
+ ctlr->dma_rx = dma_request_slave_channel(dev, "rx");
+
+ if (!ctlr->dma_rx)
+ return -ENODEV;
+
+ ctlr->can_dma = spi_ingenic_can_dma;
+
+ return 0;
+}
+
+static void spi_ingenic_release_dma(void *data)
+{
+ struct spi_controller *ctlr = data;
+
+ if (ctlr->dma_tx)
+ dma_release_channel(ctlr->dma_tx);
+ if (ctlr->dma_rx)
+ dma_release_channel(ctlr->dma_rx);
+}
+
+static const struct regmap_config spi_ingenic_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = REG_SSIGR,
+};
+
+static int spi_ingenic_probe(struct platform_device *pdev)
+{
+ const struct jz_soc_info *pdata;
+ struct device *dev = &pdev->dev;
+ struct spi_controller *ctlr;
+ struct ingenic_spi *priv;
+ void __iomem *base;
+ int ret;
+
+ pdata = of_device_get_match_data(dev);
+ if (!pdata) {
+ dev_err(dev, "Missing platform data.\n");
+ return -EINVAL;
+ }
+
+ ctlr = devm_spi_alloc_master(dev, sizeof(*priv));
+ if (!ctlr) {
+ dev_err(dev, "Unable to allocate SPI controller.\n");
+ return -ENOMEM;
+ }
+
+ priv = spi_controller_get_devdata(ctlr);
+ priv->soc_info = pdata;
+
+ priv->clk = devm_clk_get(dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ return dev_err_probe(dev, PTR_ERR(priv->clk),
+ "Unable to get clock.\n");
+ }
+
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, &priv->mem_res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ priv->map = devm_regmap_init_mmio(dev, base, &spi_ingenic_regmap_config);
+ if (IS_ERR(priv->map))
+ return PTR_ERR(priv->map);
+
+ priv->flen_field = devm_regmap_field_alloc(dev, priv->map,
+ pdata->flen_field);
+ if (IS_ERR(priv->flen_field))
+ return PTR_ERR(priv->flen_field);
+
+ platform_set_drvdata(pdev, ctlr);
+
+ ctlr->prepare_transfer_hardware = spi_ingenic_prepare_hardware;
+ ctlr->unprepare_transfer_hardware = spi_ingenic_unprepare_hardware;
+ ctlr->prepare_message = spi_ingenic_prepare_message;
+ ctlr->set_cs = spi_ingenic_set_cs;
+ ctlr->transfer_one = spi_ingenic_transfer_one;
+ ctlr->mode_bits = SPI_MODE_3 | SPI_LSB_FIRST | SPI_LOOP | SPI_CS_HIGH;
+ ctlr->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX;
+ ctlr->max_dma_len = SPI_INGENIC_FIFO_SIZE;
+ ctlr->bits_per_word_mask = pdata->bits_per_word_mask;
+ ctlr->min_speed_hz = 7200;
+ ctlr->max_speed_hz = 54000000;
+ ctlr->num_chipselect = 2;
+ ctlr->dev.of_node = pdev->dev.of_node;
+
+ if (spi_ingenic_request_dma(ctlr, dev))
+ dev_warn(dev, "DMA not available.\n");
+
+ ret = devm_add_action_or_reset(dev, spi_ingenic_release_dma, ctlr);
+ if (ret) {
+ dev_err(dev, "Unable to add action.\n");
+ return ret;
+ }
+
+ ret = devm_spi_register_controller(dev, ctlr);
+ if (ret)
+ dev_err(dev, "Unable to register SPI controller.\n");
+
+ return ret;
+}
+
+static const struct jz_soc_info jz4750_soc_info = {
+ .bits_per_word_mask = SPI_BPW_RANGE_MASK(2, 17),
+ .flen_field = REG_FIELD(REG_SSICR1, 4, 7),
+ .has_trendian = false,
+};
+
+static const struct jz_soc_info jz4780_soc_info = {
+ .bits_per_word_mask = SPI_BPW_RANGE_MASK(2, 32),
+ .flen_field = REG_FIELD(REG_SSICR1, 3, 7),
+ .has_trendian = true,
+};
+
+static const struct of_device_id spi_ingenic_of_match[] = {
+ { .compatible = "ingenic,jz4750-spi", .data = &jz4750_soc_info },
+ { .compatible = "ingenic,jz4780-spi", .data = &jz4780_soc_info },
+ {}
+};
+MODULE_DEVICE_TABLE(of, spi_ingenic_of_match);
+
+static struct platform_driver spi_ingenic_driver = {
+ .driver = {
+ .name = "spi-ingenic",
+ .of_match_table = spi_ingenic_of_match,
+ },
+ .probe = spi_ingenic_probe,
+};
+
+module_platform_driver(spi_ingenic_driver);
+MODULE_DESCRIPTION("SPI bus driver for the Ingenic JZ47xx SoCs");
+MODULE_AUTHOR("Artur Rojek <contact@artur-rojek.eu>");
+MODULE_AUTHOR("Paul Cercueil <paul@crapouillou.net>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-mtk-nor.c b/drivers/spi/spi-mtk-nor.c
index 41e7b341d261..5c93730615f8 100644
--- a/drivers/spi/spi-mtk-nor.c
+++ b/drivers/spi/spi-mtk-nor.c
@@ -160,7 +160,7 @@ static bool mtk_nor_match_read(const struct spi_mem_op *op)
{
int dummy = 0;
- if (op->dummy.buswidth)
+ if (op->dummy.nbytes)
dummy = op->dummy.nbytes * BITS_PER_BYTE / op->dummy.buswidth;
if ((op->data.buswidth == 2) || (op->data.buswidth == 4)) {
diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c
index e8de3cbbfb2a..565cd4c48d7b 100644
--- a/drivers/spi/spi-orion.c
+++ b/drivers/spi/spi-orion.c
@@ -769,6 +769,7 @@ static int orion_spi_probe(struct platform_device *pdev)
dir_acc->vaddr = devm_ioremap(&pdev->dev, r->start, PAGE_SIZE);
if (!dir_acc->vaddr) {
status = -ENOMEM;
+ of_node_put(np);
goto out_rel_axi_clk;
}
dir_acc->size = PAGE_SIZE;
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index feebda66f56e..e4484ace584e 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -1716,12 +1716,13 @@ static int verify_controller_parameters(struct pl022 *pl022,
return -EINVAL;
}
} else {
- if (chip_info->duplex != SSP_MICROWIRE_CHANNEL_FULL_DUPLEX)
+ if (chip_info->duplex != SSP_MICROWIRE_CHANNEL_FULL_DUPLEX) {
dev_err(&pl022->adev->dev,
"Microwire half duplex mode requested,"
" but this is only available in the"
" ST version of PL022\n");
- return -EINVAL;
+ return -EINVAL;
+ }
}
}
return 0;
diff --git a/drivers/spi/spi-rpc-if.c b/drivers/spi/spi-rpc-if.c
index c53138ce0030..83796a4ead34 100644
--- a/drivers/spi/spi-rpc-if.c
+++ b/drivers/spi/spi-rpc-if.c
@@ -139,7 +139,9 @@ static int rpcif_spi_probe(struct platform_device *pdev)
return -ENOMEM;
rpc = spi_controller_get_devdata(ctlr);
- rpcif_sw_init(rpc, parent);
+ error = rpcif_sw_init(rpc, parent);
+ if (error)
+ return error;
platform_set_drvdata(pdev, ctlr);
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index d16ed88802d3..41761f0d892a 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -1427,4 +1427,3 @@ module_platform_driver(rspi_driver);
MODULE_DESCRIPTION("Renesas RSPI bus driver");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Yoshihiro Shimoda");
-MODULE_ALIAS("platform:rspi");
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index f88d9acd20d9..d0012b30410c 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -1426,4 +1426,3 @@ module_platform_driver(sh_msiof_spi_drv);
MODULE_DESCRIPTION("SuperH MSIOF SPI Controller Interface Driver");
MODULE_AUTHOR("Magnus Damm");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:spi_sh_msiof");
diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c
index 27f35aa2d746..514337c86d2c 100644
--- a/drivers/spi/spi-stm32-qspi.c
+++ b/drivers/spi/spi-stm32-qspi.c
@@ -397,7 +397,7 @@ static int stm32_qspi_send(struct spi_mem *mem, const struct spi_mem_op *op)
ccr |= FIELD_PREP(CCR_ADSIZE_MASK, op->addr.nbytes - 1);
}
- if (op->dummy.buswidth && op->dummy.nbytes)
+ if (op->dummy.nbytes)
ccr |= FIELD_PREP(CCR_DCYC_MASK,
op->dummy.nbytes * 8 / op->dummy.buswidth);
diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c
index 713292b0c71e..e8204e155484 100644
--- a/drivers/spi/spi-tegra20-slink.c
+++ b/drivers/spi/spi-tegra20-slink.c
@@ -1124,7 +1124,7 @@ exit_free_irq:
exit_pm_put:
pm_runtime_put(&pdev->dev);
exit_pm_disable:
- pm_runtime_disable(&pdev->dev);
+ pm_runtime_force_suspend(&pdev->dev);
tegra_slink_deinit_dma_param(tspi, false);
exit_rx_dma_free:
@@ -1143,7 +1143,7 @@ static int tegra_slink_remove(struct platform_device *pdev)
free_irq(tspi->irq, tspi);
- pm_runtime_disable(&pdev->dev);
+ pm_runtime_force_suspend(&pdev->dev);
if (tspi->tx_dma_chan)
tegra_slink_deinit_dma_param(tspi, false);
@@ -1194,7 +1194,7 @@ static int __maybe_unused tegra_slink_runtime_suspend(struct device *dev)
return 0;
}
-static int tegra_slink_runtime_resume(struct device *dev)
+static int __maybe_unused tegra_slink_runtime_resume(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct tegra_slink_data *tspi = spi_master_get_devdata(master);
diff --git a/drivers/spi/spi-tegra210-quad.c b/drivers/spi/spi-tegra210-quad.c
index 2354ca1e3858..c0f9a75b44b5 100644
--- a/drivers/spi/spi-tegra210-quad.c
+++ b/drivers/spi/spi-tegra210-quad.c
@@ -1318,7 +1318,7 @@ static int tegra_qspi_probe(struct platform_device *pdev)
exit_free_irq:
free_irq(qspi_irq, tqspi);
exit_pm_disable:
- pm_runtime_disable(&pdev->dev);
+ pm_runtime_force_suspend(&pdev->dev);
tegra_qspi_deinit_dma(tqspi);
return ret;
}
@@ -1330,7 +1330,7 @@ static int tegra_qspi_remove(struct platform_device *pdev)
spi_unregister_master(master);
free_irq(tqspi->irq, tqspi);
- pm_runtime_disable(&pdev->dev);
+ pm_runtime_force_suspend(&pdev->dev);
tegra_qspi_deinit_dma(tqspi);
return 0;
diff --git a/drivers/spi/spi-tle62x0.c b/drivers/spi/spi-tle62x0.c
index 60dc69a39ace..f8ad0709d015 100644
--- a/drivers/spi/spi-tle62x0.c
+++ b/drivers/spi/spi-tle62x0.c
@@ -141,7 +141,7 @@ static ssize_t tle62x0_gpio_show(struct device *dev,
value = (st->gpio_state >> gpio_num) & 1;
mutex_unlock(&st->lock);
- return snprintf(buf, PAGE_SIZE, "%d", value);
+ return sysfs_emit(buf, "%d", value);
}
static ssize_t tle62x0_gpio_store(struct device *dev,
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 926b68aa45d3..b23e675953e1 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -285,9 +285,9 @@ static const struct attribute_group *spi_master_groups[] = {
NULL,
};
-void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
- struct spi_transfer *xfer,
- struct spi_controller *ctlr)
+static void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
+ struct spi_transfer *xfer,
+ struct spi_controller *ctlr)
{
unsigned long flags;
int l2len = min(fls(xfer->len), SPI_STATISTICS_HISTO_SIZE) - 1;
@@ -310,7 +310,6 @@ void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
spin_unlock_irqrestore(&stats->lock, flags);
}
-EXPORT_SYMBOL_GPL(spi_statistics_add_transfer_stats);
/* modalias support makes "modprobe $MODALIAS" new-style hotplug work,
* and the sysfs version makes coldplug work too.
@@ -451,6 +450,47 @@ int __spi_register_driver(struct module *owner, struct spi_driver *sdrv)
{
sdrv->driver.owner = owner;
sdrv->driver.bus = &spi_bus_type;
+
+ /*
+ * For Really Good Reasons we use spi: modaliases not of:
+ * modaliases for DT so module autoloading won't work if we
+ * don't have a spi_device_id as well as a compatible string.
+ */
+ if (sdrv->driver.of_match_table) {
+ const struct of_device_id *of_id;
+
+ for (of_id = sdrv->driver.of_match_table; of_id->compatible[0];
+ of_id++) {
+ const char *of_name;
+
+ /* Strip off any vendor prefix */
+ of_name = strnchr(of_id->compatible,
+ sizeof(of_id->compatible), ',');
+ if (of_name)
+ of_name++;
+ else
+ of_name = of_id->compatible;
+
+ if (sdrv->id_table) {
+ const struct spi_device_id *spi_id;
+
+ for (spi_id = sdrv->id_table; spi_id->name[0];
+ spi_id++)
+ if (strcmp(spi_id->name, of_name) == 0)
+ break;
+
+ if (spi_id->name[0])
+ continue;
+ } else {
+ if (strcmp(sdrv->driver.name, of_name) == 0)
+ continue;
+ }
+
+ pr_warn("SPI driver %s has no spi_device_id for %s\n",
+ sdrv->driver.name, of_id->compatible);
+ }
+ }
+
return driver_register(&sdrv->driver);
}
EXPORT_SYMBOL_GPL(__spi_register_driver);
@@ -495,7 +535,7 @@ static DEFINE_MUTEX(board_lock);
*
* Return: a pointer to the new device, or NULL.
*/
-struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
+static struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
{
struct spi_device *spi;
@@ -520,7 +560,6 @@ struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
device_initialize(&spi->dev);
return spi;
}
-EXPORT_SYMBOL_GPL(spi_alloc_device);
static void spi_dev_set_name(struct spi_device *spi)
{
@@ -558,6 +597,11 @@ static int __spi_add_device(struct spi_device *spi)
struct device *dev = ctlr->dev.parent;
int status;
+ /*
+ * We need to make sure there's no other device with this
+ * chipselect **BEFORE** we call setup(), else we'll trash
+ * its configuration.
+ */
status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
if (status) {
dev_err(dev, "chipselect %d already in use\n",
@@ -610,7 +654,7 @@ static int __spi_add_device(struct spi_device *spi)
*
* Return: 0 on success; negative errno on failure
*/
-int spi_add_device(struct spi_device *spi)
+static int spi_add_device(struct spi_device *spi)
{
struct spi_controller *ctlr = spi->controller;
struct device *dev = ctlr->dev.parent;
@@ -626,16 +670,11 @@ int spi_add_device(struct spi_device *spi)
/* Set the bus ID string */
spi_dev_set_name(spi);
- /* We need to make sure there's no other device with this
- * chipselect **BEFORE** we call setup(), else we'll trash
- * its configuration. Lock against concurrent add() calls.
- */
mutex_lock(&ctlr->add_lock);
status = __spi_add_device(spi);
mutex_unlock(&ctlr->add_lock);
return status;
}
-EXPORT_SYMBOL_GPL(spi_add_device);
static int spi_add_device_locked(struct spi_device *spi)
{
@@ -810,6 +849,87 @@ int spi_register_board_info(struct spi_board_info const *info, unsigned n)
/*-------------------------------------------------------------------------*/
+/* Core methods for SPI resource management */
+
+/**
+ * spi_res_alloc - allocate a spi resource that is life-cycle managed
+ * during the processing of a spi_message while using
+ * spi_transfer_one
+ * @spi: the spi device for which we allocate memory
+ * @release: the release code to execute for this resource
+ * @size: size to alloc and return
+ * @gfp: GFP allocation flags
+ *
+ * Return: the pointer to the allocated data
+ *
+ * This may get enhanced in the future to allocate from a memory pool
+ * of the @spi_device or @spi_controller to avoid repeated allocations.
+ */
+static void *spi_res_alloc(struct spi_device *spi, spi_res_release_t release,
+ size_t size, gfp_t gfp)
+{
+ struct spi_res *sres;
+
+ sres = kzalloc(sizeof(*sres) + size, gfp);
+ if (!sres)
+ return NULL;
+
+ INIT_LIST_HEAD(&sres->entry);
+ sres->release = release;
+
+ return sres->data;
+}
+
+/**
+ * spi_res_free - free an spi resource
+ * @res: pointer to the custom data of a resource
+ *
+ */
+static void spi_res_free(void *res)
+{
+ struct spi_res *sres = container_of(res, struct spi_res, data);
+
+ if (!res)
+ return;
+
+ WARN_ON(!list_empty(&sres->entry));
+ kfree(sres);
+}
+
+/**
+ * spi_res_add - add a spi_res to the spi_message
+ * @message: the spi message
+ * @res: the spi_resource
+ */
+static void spi_res_add(struct spi_message *message, void *res)
+{
+ struct spi_res *sres = container_of(res, struct spi_res, data);
+
+ WARN_ON(!list_empty(&sres->entry));
+ list_add_tail(&sres->entry, &message->resources);
+}
+
+/**
+ * spi_res_release - release all spi resources for this message
+ * @ctlr: the @spi_controller
+ * @message: the @spi_message
+ */
+static void spi_res_release(struct spi_controller *ctlr, struct spi_message *message)
+{
+ struct spi_res *res, *tmp;
+
+ list_for_each_entry_safe_reverse(res, tmp, &message->resources, entry) {
+ if (res->release)
+ res->release(ctlr, message, res->data);
+
+ list_del(&res->entry);
+
+ kfree(res);
+ }
+}
+
+/*-------------------------------------------------------------------------*/
+
static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
{
bool activate = enable;
@@ -3027,127 +3147,6 @@ int spi_controller_resume(struct spi_controller *ctlr)
}
EXPORT_SYMBOL_GPL(spi_controller_resume);
-static int __spi_controller_match(struct device *dev, const void *data)
-{
- struct spi_controller *ctlr;
- const u16 *bus_num = data;
-
- ctlr = container_of(dev, struct spi_controller, dev);
- return ctlr->bus_num == *bus_num;
-}
-
-/**
- * spi_busnum_to_master - look up master associated with bus_num
- * @bus_num: the master's bus number
- * Context: can sleep
- *
- * This call may be used with devices that are registered after
- * arch init time. It returns a refcounted pointer to the relevant
- * spi_controller (which the caller must release), or NULL if there is
- * no such master registered.
- *
- * Return: the SPI master structure on success, else NULL.
- */
-struct spi_controller *spi_busnum_to_master(u16 bus_num)
-{
- struct device *dev;
- struct spi_controller *ctlr = NULL;
-
- dev = class_find_device(&spi_master_class, NULL, &bus_num,
- __spi_controller_match);
- if (dev)
- ctlr = container_of(dev, struct spi_controller, dev);
- /* reference got in class_find_device */
- return ctlr;
-}
-EXPORT_SYMBOL_GPL(spi_busnum_to_master);
-
-/*-------------------------------------------------------------------------*/
-
-/* Core methods for SPI resource management */
-
-/**
- * spi_res_alloc - allocate a spi resource that is life-cycle managed
- * during the processing of a spi_message while using
- * spi_transfer_one
- * @spi: the spi device for which we allocate memory
- * @release: the release code to execute for this resource
- * @size: size to alloc and return
- * @gfp: GFP allocation flags
- *
- * Return: the pointer to the allocated data
- *
- * This may get enhanced in the future to allocate from a memory pool
- * of the @spi_device or @spi_controller to avoid repeated allocations.
- */
-void *spi_res_alloc(struct spi_device *spi,
- spi_res_release_t release,
- size_t size, gfp_t gfp)
-{
- struct spi_res *sres;
-
- sres = kzalloc(sizeof(*sres) + size, gfp);
- if (!sres)
- return NULL;
-
- INIT_LIST_HEAD(&sres->entry);
- sres->release = release;
-
- return sres->data;
-}
-EXPORT_SYMBOL_GPL(spi_res_alloc);
-
-/**
- * spi_res_free - free an spi resource
- * @res: pointer to the custom data of a resource
- *
- */
-void spi_res_free(void *res)
-{
- struct spi_res *sres = container_of(res, struct spi_res, data);
-
- if (!res)
- return;
-
- WARN_ON(!list_empty(&sres->entry));
- kfree(sres);
-}
-EXPORT_SYMBOL_GPL(spi_res_free);
-
-/**
- * spi_res_add - add a spi_res to the spi_message
- * @message: the spi message
- * @res: the spi_resource
- */
-void spi_res_add(struct spi_message *message, void *res)
-{
- struct spi_res *sres = container_of(res, struct spi_res, data);
-
- WARN_ON(!list_empty(&sres->entry));
- list_add_tail(&sres->entry, &message->resources);
-}
-EXPORT_SYMBOL_GPL(spi_res_add);
-
-/**
- * spi_res_release - release all spi resources for this message
- * @ctlr: the @spi_controller
- * @message: the @spi_message
- */
-void spi_res_release(struct spi_controller *ctlr, struct spi_message *message)
-{
- struct spi_res *res, *tmp;
-
- list_for_each_entry_safe_reverse(res, tmp, &message->resources, entry) {
- if (res->release)
- res->release(ctlr, message, res->data);
-
- list_del(&res->entry);
-
- kfree(res);
- }
-}
-EXPORT_SYMBOL_GPL(spi_res_release);
-
/*-------------------------------------------------------------------------*/
/* Core methods for spi_message alterations */
@@ -3186,7 +3185,7 @@ static void __spi_replace_transfers_release(struct spi_controller *ctlr,
* Returns: pointer to @spi_replaced_transfers,
* PTR_ERR(...) in case of errors.
*/
-struct spi_replaced_transfers *spi_replace_transfers(
+static struct spi_replaced_transfers *spi_replace_transfers(
struct spi_message *msg,
struct spi_transfer *xfer_first,
size_t remove,
@@ -3278,7 +3277,6 @@ struct spi_replaced_transfers *spi_replace_transfers(
return rxfer;
}
-EXPORT_SYMBOL_GPL(spi_replace_transfers);
static int __spi_split_transfer_maxsize(struct spi_controller *ctlr,
struct spi_message *msg,
@@ -3828,7 +3826,7 @@ EXPORT_SYMBOL_GPL(spi_async);
*
* Return: zero on success, else a negative error code.
*/
-int spi_async_locked(struct spi_device *spi, struct spi_message *message)
+static int spi_async_locked(struct spi_device *spi, struct spi_message *message)
{
struct spi_controller *ctlr = spi->controller;
int ret;
@@ -3847,7 +3845,6 @@ int spi_async_locked(struct spi_device *spi, struct spi_message *message)
return ret;
}
-EXPORT_SYMBOL_GPL(spi_async_locked);
/*-------------------------------------------------------------------------*/
@@ -4105,18 +4102,15 @@ EXPORT_SYMBOL_GPL(spi_write_then_read);
/*-------------------------------------------------------------------------*/
-#if IS_ENABLED(CONFIG_OF)
+#if IS_ENABLED(CONFIG_OF_DYNAMIC)
/* must call put_device() when done with returned spi_device device */
-struct spi_device *of_find_spi_device_by_node(struct device_node *node)
+static struct spi_device *of_find_spi_device_by_node(struct device_node *node)
{
struct device *dev = bus_find_device_by_of_node(&spi_bus_type, node);
return dev ? to_spi_device(dev) : NULL;
}
-EXPORT_SYMBOL_GPL(of_find_spi_device_by_node);
-#endif /* IS_ENABLED(CONFIG_OF) */
-#if IS_ENABLED(CONFIG_OF_DYNAMIC)
/* the spi controllers are not using spi_bus, so we find it with another way */
static struct spi_controller *of_find_spi_controller_by_node(struct device_node *node)
{
diff --git a/drivers/staging/axis-fifo/axis-fifo.c b/drivers/staging/axis-fifo/axis-fifo.c
index b23eabb863d1..632f140dddbc 100644
--- a/drivers/staging/axis-fifo/axis-fifo.c
+++ b/drivers/staging/axis-fifo/axis-fifo.c
@@ -30,6 +30,7 @@
#include <linux/types.h>
#include <linux/uaccess.h>
#include <linux/jiffies.h>
+#include <linux/miscdevice.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
@@ -102,9 +103,6 @@
* globals
* ----------------------------
*/
-
-static struct class *axis_fifo_driver_class; /* char device class */
-
static int read_timeout = 1000; /* ms to wait before read() times out */
static int write_timeout = 1000; /* ms to wait before write() times out */
@@ -140,9 +138,7 @@ struct axis_fifo {
unsigned int read_flags; /* read file flags */
struct device *dt_device; /* device created from the device tree */
- struct device *device; /* device associated with char_device */
- dev_t devt; /* our char device number */
- struct cdev char_device; /* our char device */
+ struct miscdevice miscdev;
};
/* ----------------------------
@@ -319,6 +315,11 @@ static const struct attribute_group axis_fifo_attrs_group = {
.attrs = axis_fifo_attrs,
};
+static const struct attribute_group *axis_fifo_attrs_groups[] = {
+ &axis_fifo_attrs_group,
+ NULL,
+};
+
/* ----------------------------
* implementation
* ----------------------------
@@ -684,8 +685,8 @@ static irqreturn_t axis_fifo_irq(int irq, void *dw)
static int axis_fifo_open(struct inode *inod, struct file *f)
{
- struct axis_fifo *fifo = (struct axis_fifo *)container_of(inod->i_cdev,
- struct axis_fifo, char_device);
+ struct axis_fifo *fifo = container_of(f->private_data,
+ struct axis_fifo, miscdev);
f->private_data = fifo;
if (((f->f_flags & O_ACCMODE) == O_WRONLY) ||
@@ -812,9 +813,7 @@ static int axis_fifo_probe(struct platform_device *pdev)
struct resource *r_mem; /* IO mem resources */
struct device *dev = &pdev->dev; /* OS device (from device tree) */
struct axis_fifo *fifo = NULL;
-
- char device_name[32];
-
+ char *device_name;
int rc = 0; /* error return value */
/* ----------------------------
@@ -822,8 +821,12 @@ static int axis_fifo_probe(struct platform_device *pdev)
* ----------------------------
*/
+ device_name = devm_kzalloc(dev, 32, GFP_KERNEL);
+ if (!device_name)
+ return -ENOMEM;
+
/* allocate device wrapper memory */
- fifo = devm_kmalloc(dev, sizeof(*fifo), GFP_KERNEL);
+ fifo = devm_kzalloc(dev, sizeof(*fifo), GFP_KERNEL);
if (!fifo)
return -ENOMEM;
@@ -859,9 +862,7 @@ static int axis_fifo_probe(struct platform_device *pdev)
dev_dbg(fifo->dt_device, "remapped memory to 0x%p\n", fifo->base_addr);
/* create unique device name */
- snprintf(device_name, sizeof(device_name), "%s_%pa",
- DRIVER_NAME, &r_mem->start);
-
+ snprintf(device_name, 32, "%s_%pa", DRIVER_NAME, &r_mem->start);
dev_dbg(fifo->dt_device, "device name [%s]\n", device_name);
/* ----------------------------
@@ -904,51 +905,21 @@ static int axis_fifo_probe(struct platform_device *pdev)
* ----------------------------
*/
- /* allocate device number */
- rc = alloc_chrdev_region(&fifo->devt, 0, 1, DRIVER_NAME);
+ /* create character device */
+ fifo->miscdev.fops = &fops;
+ fifo->miscdev.minor = MISC_DYNAMIC_MINOR;
+ fifo->miscdev.name = device_name;
+ fifo->miscdev.groups = axis_fifo_attrs_groups;
+ fifo->miscdev.parent = dev;
+ rc = misc_register(&fifo->miscdev);
if (rc < 0)
goto err_initial;
- dev_dbg(fifo->dt_device, "allocated device number major %i minor %i\n",
- MAJOR(fifo->devt), MINOR(fifo->devt));
-
- /* create driver file */
- fifo->device = device_create(axis_fifo_driver_class, NULL, fifo->devt,
- NULL, device_name);
- if (IS_ERR(fifo->device)) {
- dev_err(fifo->dt_device,
- "couldn't create driver file\n");
- rc = PTR_ERR(fifo->device);
- goto err_chrdev_region;
- }
- dev_set_drvdata(fifo->device, fifo);
-
- /* create character device */
- cdev_init(&fifo->char_device, &fops);
- rc = cdev_add(&fifo->char_device, fifo->devt, 1);
- if (rc < 0) {
- dev_err(fifo->dt_device, "couldn't create character device\n");
- goto err_dev;
- }
-
- /* create sysfs entries */
- rc = devm_device_add_group(fifo->device, &axis_fifo_attrs_group);
- if (rc < 0) {
- dev_err(fifo->dt_device, "couldn't register sysfs group\n");
- goto err_cdev;
- }
- dev_info(fifo->dt_device, "axis-fifo created at %pa mapped to 0x%pa, irq=%i, major=%i, minor=%i\n",
- &r_mem->start, &fifo->base_addr, fifo->irq,
- MAJOR(fifo->devt), MINOR(fifo->devt));
+ dev_info(fifo->dt_device, "axis-fifo created at %pa mapped to 0x%pa, irq=%i\n",
+ &r_mem->start, &fifo->base_addr, fifo->irq);
return 0;
-err_cdev:
- cdev_del(&fifo->char_device);
-err_dev:
- device_destroy(axis_fifo_driver_class, fifo->devt);
-err_chrdev_region:
- unregister_chrdev_region(fifo->devt, 1);
err_initial:
dev_set_drvdata(dev, NULL);
return rc;
@@ -959,10 +930,7 @@ static int axis_fifo_remove(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct axis_fifo *fifo = dev_get_drvdata(dev);
- cdev_del(&fifo->char_device);
- dev_set_drvdata(fifo->device, NULL);
- device_destroy(axis_fifo_driver_class, fifo->devt);
- unregister_chrdev_region(fifo->devt, 1);
+ misc_deregister(&fifo->miscdev);
dev_set_drvdata(dev, NULL);
return 0;
@@ -987,9 +955,6 @@ static int __init axis_fifo_init(void)
{
pr_info("axis-fifo driver loaded with parameters read_timeout = %i, write_timeout = %i\n",
read_timeout, write_timeout);
- axis_fifo_driver_class = class_create(THIS_MODULE, DRIVER_NAME);
- if (IS_ERR(axis_fifo_driver_class))
- return PTR_ERR(axis_fifo_driver_class);
return platform_driver_register(&axis_fifo_driver);
}
@@ -998,7 +963,6 @@ module_init(axis_fifo_init);
static void __exit axis_fifo_exit(void)
{
platform_driver_unregister(&axis_fifo_driver);
- class_destroy(axis_fifo_driver_class);
}
module_exit(axis_fifo_exit);
diff --git a/drivers/staging/fbtft/fbtft-core.c b/drivers/staging/fbtft/fbtft-core.c
index ed992ca605eb..ecb5f75f6dd5 100644
--- a/drivers/staging/fbtft/fbtft-core.c
+++ b/drivers/staging/fbtft/fbtft-core.c
@@ -1038,7 +1038,8 @@ int fbtft_init_display(struct fbtft_par *par)
i++;
/* make debug message */
- for (j = 0; par->init_sequence[i + 1 + j] >= 0; j++);
+ for (j = 0; par->init_sequence[i + 1 + j] >= 0; j++)
+ ;
fbtft_par_dbg(DEBUG_INIT_DISPLAY, par,
"init: write(0x%02X) %*ph\n",
@@ -1318,23 +1319,17 @@ EXPORT_SYMBOL(fbtft_probe_common);
* @info: Framebuffer
*
* Unregisters and releases the framebuffer
- *
- * Return: 0 if successful, negative if error
*/
-int fbtft_remove_common(struct device *dev, struct fb_info *info)
+void fbtft_remove_common(struct device *dev, struct fb_info *info)
{
struct fbtft_par *par;
- if (!info)
- return -EINVAL;
par = info->par;
if (par)
fbtft_par_dbg(DEBUG_DRIVER_INIT_FUNCTIONS, par,
"%s()\n", __func__);
fbtft_unregister_framebuffer(info);
fbtft_framebuffer_release(info);
-
- return 0;
}
EXPORT_SYMBOL(fbtft_remove_common);
diff --git a/drivers/staging/fbtft/fbtft.h b/drivers/staging/fbtft/fbtft.h
index 76f8c090a837..6869f3603b0e 100644
--- a/drivers/staging/fbtft/fbtft.h
+++ b/drivers/staging/fbtft/fbtft.h
@@ -252,7 +252,7 @@ void fbtft_unregister_backlight(struct fbtft_par *par);
int fbtft_init_display(struct fbtft_par *par);
int fbtft_probe_common(struct fbtft_display *display, struct spi_device *sdev,
struct platform_device *pdev);
-int fbtft_remove_common(struct device *dev, struct fb_info *info);
+void fbtft_remove_common(struct device *dev, struct fb_info *info);
/* fbtft-io.c */
int fbtft_write_spi(struct fbtft_par *par, void *buf, size_t len);
@@ -283,7 +283,8 @@ static int fbtft_driver_remove_spi(struct spi_device *spi) \
{ \
struct fb_info *info = spi_get_drvdata(spi); \
\
- return fbtft_remove_common(&spi->dev, info); \
+ fbtft_remove_common(&spi->dev, info); \
+ return 0; \
} \
\
static int fbtft_driver_probe_pdev(struct platform_device *pdev) \
@@ -295,7 +296,8 @@ static int fbtft_driver_remove_pdev(struct platform_device *pdev) \
{ \
struct fb_info *info = platform_get_drvdata(pdev); \
\
- return fbtft_remove_common(&pdev->dev, info); \
+ fbtft_remove_common(&pdev->dev, info); \
+ return 0; \
} \
\
static const struct of_device_id dt_ids[] = { \
diff --git a/drivers/staging/fieldbus/anybuss/host.c b/drivers/staging/fieldbus/anybuss/host.c
index 8a75f6642c78..a344410e48fe 100644
--- a/drivers/staging/fieldbus/anybuss/host.c
+++ b/drivers/staging/fieldbus/anybuss/host.c
@@ -1318,11 +1318,11 @@ anybuss_host_common_probe(struct device *dev,
}
/*
* startup sequence:
- * perform dummy IND_AB read to prevent false 'init done' irq
+ * a) perform dummy IND_AB read to prevent false 'init done' irq
* (already done by test_dpram() above)
- * release reset
- * wait for first interrupt
- * interrupt came in: ready to go !
+ * b) release reset
+ * c) wait for first interrupt
+ * d) interrupt came in: ready to go !
*/
reset_deassert(cd);
if (!wait_for_completion_timeout(&cd->card_boot, TIMEOUT)) {
diff --git a/drivers/staging/gdm724x/gdm_lte.c b/drivers/staging/gdm724x/gdm_lte.c
index e390c924ec1c..493ed4821515 100644
--- a/drivers/staging/gdm724x/gdm_lte.c
+++ b/drivers/staging/gdm724x/gdm_lte.c
@@ -867,6 +867,7 @@ int register_lte_device(struct phy_dev *phy_dev,
struct nic *nic;
struct net_device *net;
char pdn_dev_name[16];
+ u8 addr[ETH_ALEN];
int ret = 0;
u8 index;
@@ -893,11 +894,12 @@ int register_lte_device(struct phy_dev *phy_dev,
nic->phy_dev = phy_dev;
nic->nic_id = index;
- form_mac_address(net->dev_addr,
+ form_mac_address(addr,
nic->src_mac_addr,
nic->dest_mac_addr,
mac_address,
index);
+ eth_hw_addr_set(net, addr);
SET_NETDEV_DEV(net, dev);
SET_NETDEV_DEVTYPE(net, &wwan_type);
diff --git a/drivers/staging/ks7010/Kconfig b/drivers/staging/ks7010/Kconfig
index 0987fdc2f70d..8ea6c0928679 100644
--- a/drivers/staging/ks7010/Kconfig
+++ b/drivers/staging/ks7010/Kconfig
@@ -5,6 +5,9 @@ config KS7010
select WIRELESS_EXT
select WEXT_PRIV
select FW_LOADER
+ select CRYPTO
+ select CRYPTO_HASH
+ select CRYPTO_MICHAEL_MIC
help
This is a driver for KeyStream KS7010 based SDIO WIFI cards. It is
found on at least later Spectec SDW-821 (FCC-ID "S2Y-WLAN-11G-K" only,
diff --git a/drivers/staging/ks7010/ks_hostif.c b/drivers/staging/ks7010/ks_hostif.c
index eaa70893224a..1c63d595313d 100644
--- a/drivers/staging/ks7010/ks_hostif.c
+++ b/drivers/staging/ks7010/ks_hostif.c
@@ -541,7 +541,7 @@ void hostif_mib_get_confirm(struct ks_wlan_private *priv)
hostif_sme_enqueue(priv, SME_GET_MAC_ADDRESS);
ether_addr_copy(priv->eth_addr, priv->rxp);
priv->mac_address_valid = true;
- ether_addr_copy(dev->dev_addr, priv->eth_addr);
+ eth_hw_addr_set(dev, priv->eth_addr);
netdev_info(dev, "MAC ADDRESS = %pM\n", priv->eth_addr);
break;
case DOT11_PRODUCT_VERSION:
diff --git a/drivers/staging/ks7010/ks_wlan_net.c b/drivers/staging/ks7010/ks_wlan_net.c
index 631ad769c3d5..7e8d37c169f0 100644
--- a/drivers/staging/ks7010/ks_wlan_net.c
+++ b/drivers/staging/ks7010/ks_wlan_net.c
@@ -2490,7 +2490,7 @@ int ks_wlan_set_mac_address(struct net_device *dev, void *addr)
if (netif_running(dev))
return -EBUSY;
- memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, mac_addr->sa_data);
ether_addr_copy(priv->eth_addr, mac_addr->sa_data);
priv->mac_address_valid = false;
@@ -2625,7 +2625,7 @@ int ks_wlan_net_start(struct net_device *dev)
/* dummy address set */
ether_addr_copy(priv->eth_addr, dummy_addr);
- ether_addr_copy(dev->dev_addr, priv->eth_addr);
+ eth_hw_addr_set(dev, priv->eth_addr);
/* The ks_wlan-specific entries in the device structure. */
dev->netdev_ops = &ks_wlan_netdev_ops;
diff --git a/drivers/staging/media/atomisp/i2c/atomisp-lm3554.c b/drivers/staging/media/atomisp/i2c/atomisp-lm3554.c
index 362ed44b4eff..e046489cd253 100644
--- a/drivers/staging/media/atomisp/i2c/atomisp-lm3554.c
+++ b/drivers/staging/media/atomisp/i2c/atomisp-lm3554.c
@@ -835,7 +835,6 @@ static int lm3554_probe(struct i2c_client *client)
int err = 0;
struct lm3554 *flash;
unsigned int i;
- int ret;
flash = kzalloc(sizeof(*flash), GFP_KERNEL);
if (!flash)
@@ -844,7 +843,7 @@ static int lm3554_probe(struct i2c_client *client)
flash->pdata = lm3554_platform_data_func(client);
if (IS_ERR(flash->pdata)) {
err = PTR_ERR(flash->pdata);
- goto fail1;
+ goto free_flash;
}
v4l2_i2c_subdev_init(&flash->sd, client, &lm3554_ops);
@@ -852,12 +851,12 @@ static int lm3554_probe(struct i2c_client *client)
flash->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
flash->mode = ATOMISP_FLASH_MODE_OFF;
flash->timeout = LM3554_MAX_TIMEOUT / LM3554_TIMEOUT_STEPSIZE - 1;
- ret =
+ err =
v4l2_ctrl_handler_init(&flash->ctrl_handler,
ARRAY_SIZE(lm3554_controls));
- if (ret) {
+ if (err) {
dev_err(&client->dev, "error initialize a ctrl_handler.\n");
- goto fail3;
+ goto unregister_subdev;
}
for (i = 0; i < ARRAY_SIZE(lm3554_controls); i++)
@@ -866,14 +865,15 @@ static int lm3554_probe(struct i2c_client *client)
if (flash->ctrl_handler.error) {
dev_err(&client->dev, "ctrl_handler error.\n");
- goto fail3;
+ err = flash->ctrl_handler.error;
+ goto free_handler;
}
flash->sd.ctrl_handler = &flash->ctrl_handler;
err = media_entity_pads_init(&flash->sd.entity, 0, NULL);
if (err) {
dev_err(&client->dev, "error initialize a media entity.\n");
- goto fail2;
+ goto free_handler;
}
flash->sd.entity.function = MEDIA_ENT_F_FLASH;
@@ -884,16 +884,27 @@ static int lm3554_probe(struct i2c_client *client)
err = lm3554_gpio_init(client);
if (err) {
- dev_err(&client->dev, "gpio request/direction_output fail");
- goto fail3;
+ dev_err(&client->dev, "gpio request/direction_output fail.\n");
+ goto cleanup_media;
+ }
+
+ err = atomisp_register_i2c_module(&flash->sd, NULL, LED_FLASH);
+ if (err) {
+ dev_err(&client->dev, "fail to register atomisp i2c module.\n");
+ goto uninit_gpio;
}
- return atomisp_register_i2c_module(&flash->sd, NULL, LED_FLASH);
-fail3:
+
+ return 0;
+
+uninit_gpio:
+ lm3554_gpio_uninit(client);
+cleanup_media:
media_entity_cleanup(&flash->sd.entity);
+free_handler:
v4l2_ctrl_handler_free(&flash->ctrl_handler);
-fail2:
+unregister_subdev:
v4l2_device_unregister_subdev(&flash->sd);
-fail1:
+free_flash:
kfree(flash);
return err;
diff --git a/drivers/staging/media/atomisp/i2c/ov5693/atomisp-ov5693.c b/drivers/staging/media/atomisp/i2c/ov5693/atomisp-ov5693.c
index 5e3670c4fc29..6c95f57a52e9 100644
--- a/drivers/staging/media/atomisp/i2c/ov5693/atomisp-ov5693.c
+++ b/drivers/staging/media/atomisp/i2c/ov5693/atomisp-ov5693.c
@@ -1714,6 +1714,8 @@ static int ov5693_detect(struct i2c_client *client)
}
ret = ov5693_read_reg(client, OV5693_8BIT,
OV5693_SC_CMMN_CHIP_ID_L, &low);
+ if (ret)
+ return ret;
id = ((((u16)high) << 8) | (u16)low);
if (id != OV5693_ID) {
diff --git a/drivers/staging/media/atomisp/pci/atomisp_csi2.c b/drivers/staging/media/atomisp/pci/atomisp_csi2.c
index 11b6b1216473..4a9268bac8a9 100644
--- a/drivers/staging/media/atomisp/pci/atomisp_csi2.c
+++ b/drivers/staging/media/atomisp/pci/atomisp_csi2.c
@@ -22,13 +22,11 @@
#include "atomisp_internal.h"
#include "atomisp-regs.h"
-static struct v4l2_mbus_framefmt *__csi2_get_format(struct
- atomisp_mipi_csi2_device
- * csi2,
- struct v4l2_subdev_state *sd_state,
- enum
- v4l2_subdev_format_whence
- which, unsigned int pad)
+static struct
+v4l2_mbus_framefmt *__csi2_get_format(struct atomisp_mipi_csi2_device *csi2,
+ struct v4l2_subdev_state *sd_state,
+ enum v4l2_subdev_format_whence which,
+ unsigned int pad)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
return v4l2_subdev_get_try_format(&csi2->subdev, sd_state,
@@ -43,7 +41,7 @@ static struct v4l2_mbus_framefmt *__csi2_get_format(struct
* @fh : V4L2 subdev file handle
* @code : pointer to v4l2_subdev_pad_mbus_code_enum structure
* return -EINVAL or zero on success
-*/
+ */
static int csi2_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_mbus_code_enum *code)
@@ -69,7 +67,7 @@ static int csi2_enum_mbus_code(struct v4l2_subdev *sd,
* @pad: pad num
* @fmt: pointer to v4l2 format structure
* return -EINVAL or zero on success
-*/
+ */
static int csi2_get_format(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
@@ -104,12 +102,12 @@ int atomisp_csi2_set_ffmt(struct v4l2_subdev *sd,
else
actual_ffmt->code = atomisp_in_fmt_conv[0].code;
- actual_ffmt->width = clamp_t(
- u32, ffmt->width, ATOM_ISP_MIN_WIDTH,
- ATOM_ISP_MAX_WIDTH);
- actual_ffmt->height = clamp_t(
- u32, ffmt->height, ATOM_ISP_MIN_HEIGHT,
- ATOM_ISP_MAX_HEIGHT);
+ actual_ffmt->width = clamp_t(u32, ffmt->width,
+ ATOM_ISP_MIN_WIDTH,
+ ATOM_ISP_MAX_WIDTH);
+ actual_ffmt->height = clamp_t(u32, ffmt->height,
+ ATOM_ISP_MIN_HEIGHT,
+ ATOM_ISP_MAX_HEIGHT);
tmp_ffmt = *ffmt = *actual_ffmt;
@@ -132,7 +130,7 @@ int atomisp_csi2_set_ffmt(struct v4l2_subdev *sd,
* @pad: pad num
* @fmt: pointer to v4l2 format structure
* return -EINVAL or zero on success
-*/
+ */
static int csi2_set_format(struct v4l2_subdev *sd,
struct v4l2_subdev_state *sd_state,
struct v4l2_subdev_format *fmt)
@@ -147,7 +145,7 @@ static int csi2_set_format(struct v4l2_subdev *sd,
* @enable: Enable/disable stream (1/0)
*
* Return 0 on success or a negative error code otherwise.
-*/
+ */
static int csi2_set_stream(struct v4l2_subdev *sd, int enable)
{
return 0;
@@ -184,7 +182,7 @@ static const struct v4l2_subdev_ops csi2_ops = {
* @remote : Pointer to remote pad array
* @flags : Link flags
* return -EINVAL or zero on success
-*/
+ */
static int csi2_link_setup(struct media_entity *entity,
const struct media_pad *local,
const struct media_pad *remote, u32 flags)
@@ -222,10 +220,10 @@ static const struct media_entity_operations csi2_media_ops = {
};
/*
-* ispcsi2_init_entities - Initialize subdev and media entity.
-* @csi2: Pointer to ispcsi2 structure.
-* return -ENOMEM or zero on success
-*/
+ * ispcsi2_init_entities - Initialize subdev and media entity.
+ * @csi2: Pointer to ispcsi2 structure.
+ * return -ENOMEM or zero on success
+ */
static int mipi_csi2_init_entities(struct atomisp_mipi_csi2_device *csi2,
int port)
{
@@ -249,9 +247,8 @@ static int mipi_csi2_init_entities(struct atomisp_mipi_csi2_device *csi2,
if (ret < 0)
return ret;
- csi2->formats[CSI2_PAD_SINK].code =
- csi2->formats[CSI2_PAD_SOURCE].code =
- atomisp_in_fmt_conv[0].code;
+ csi2->formats[CSI2_PAD_SINK].code = atomisp_in_fmt_conv[0].code;
+ csi2->formats[CSI2_PAD_SOURCE].code = atomisp_in_fmt_conv[0].code;
return 0;
}
@@ -379,21 +376,22 @@ static void atomisp_csi2_configure_isp2401(struct atomisp_sub_device *asd)
(isp->inputs[asd->input_curr].camera->ctrl_handler, &ctrl) == 0)
mipi_freq = ctrl.value;
- clk_termen = atomisp_csi2_configure_calc(coeff_clk_termen,
- mipi_freq, TERMEN_DEFAULT);
- clk_settle = atomisp_csi2_configure_calc(coeff_clk_settle,
- mipi_freq, SETTLE_DEFAULT);
- dat_termen = atomisp_csi2_configure_calc(coeff_dat_termen,
- mipi_freq, TERMEN_DEFAULT);
- dat_settle = atomisp_csi2_configure_calc(coeff_dat_settle,
- mipi_freq, SETTLE_DEFAULT);
+ clk_termen = atomisp_csi2_configure_calc(coeff_clk_termen, mipi_freq,
+ TERMEN_DEFAULT);
+ clk_settle = atomisp_csi2_configure_calc(coeff_clk_settle, mipi_freq,
+ SETTLE_DEFAULT);
+ dat_termen = atomisp_csi2_configure_calc(coeff_dat_termen, mipi_freq,
+ TERMEN_DEFAULT);
+ dat_settle = atomisp_csi2_configure_calc(coeff_dat_settle, mipi_freq,
+ SETTLE_DEFAULT);
+
for (n = 0; n < csi2_port_lanes[port] + 1; n++) {
hrt_address base = csi2_port_base[port] + csi2_lane_base[n];
atomisp_css2_hw_store_32(base + CSI2_REG_RX_CSI_DLY_CNT_TERMEN,
- n == 0 ? clk_termen : dat_termen);
+ n == 0 ? clk_termen : dat_termen);
atomisp_css2_hw_store_32(base + CSI2_REG_RX_CSI_DLY_CNT_SETTLE,
- n == 0 ? clk_settle : dat_settle);
+ n == 0 ? clk_settle : dat_settle);
}
}
@@ -405,7 +403,7 @@ void atomisp_csi2_configure(struct atomisp_sub_device *asd)
/*
* atomisp_mipi_csi2_cleanup - Routine for module driver cleanup
-*/
+ */
void atomisp_mipi_csi2_cleanup(struct atomisp_device *isp)
{
}
diff --git a/drivers/staging/media/hantro/hantro_drv.c b/drivers/staging/media/hantro/hantro_drv.c
index 20e508158871..fb82b9297a2b 100644
--- a/drivers/staging/media/hantro/hantro_drv.c
+++ b/drivers/staging/media/hantro/hantro_drv.c
@@ -179,7 +179,7 @@ err_cancel_job:
hantro_job_finish_no_pm(ctx->dev, ctx, VB2_BUF_STATE_ERROR);
}
-static struct v4l2_m2m_ops vpu_m2m_ops = {
+static const struct v4l2_m2m_ops vpu_m2m_ops = {
.device_run = device_run,
};
@@ -263,9 +263,6 @@ static int hantro_try_ctrl(struct v4l2_ctrl *ctrl)
if (sps->bit_depth_luma_minus8 != 0)
/* Only 8-bit is supported */
return -EINVAL;
- if (sps->flags & V4L2_HEVC_SPS_FLAG_SCALING_LIST_ENABLED)
- /* No scaling support */
- return -EINVAL;
}
return 0;
}
@@ -450,6 +447,11 @@ static const struct hantro_ctrl controls[] = {
}, {
.codec = HANTRO_HEVC_DECODER,
.cfg = {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_SCALING_MATRIX,
+ },
+ }, {
+ .codec = HANTRO_HEVC_DECODER,
+ .cfg = {
.id = V4L2_CID_HANTRO_HEVC_SLICE_HEADER_SKIP,
.name = "Hantro HEVC slice header skip bytes",
.type = V4L2_CTRL_TYPE_INTEGER,
@@ -978,7 +980,7 @@ static int hantro_probe(struct platform_device *pdev)
vpu->mdev.dev = vpu->dev;
strscpy(vpu->mdev.model, DRIVER_NAME, sizeof(vpu->mdev.model));
strscpy(vpu->mdev.bus_info, "platform: " DRIVER_NAME,
- sizeof(vpu->mdev.model));
+ sizeof(vpu->mdev.bus_info));
media_device_init(&vpu->mdev);
vpu->mdev.ops = &hantro_m2m_media_ops;
vpu->v4l2_dev.mdev = &vpu->mdev;
diff --git a/drivers/staging/media/hantro/hantro_g1_h264_dec.c b/drivers/staging/media/hantro/hantro_g1_h264_dec.c
index 236ce24ca00c..f49dbfb8a843 100644
--- a/drivers/staging/media/hantro/hantro_g1_h264_dec.c
+++ b/drivers/staging/media/hantro/hantro_g1_h264_dec.c
@@ -29,7 +29,7 @@ static void set_params(struct hantro_ctx *ctx, struct vb2_v4l2_buffer *src_buf)
u32 reg;
/* Decoder control register 0. */
- reg = G1_REG_DEC_CTRL0_DEC_AXI_WR_ID(0x0);
+ reg = G1_REG_DEC_CTRL0_DEC_AXI_AUTO;
if (sps->flags & V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD)
reg |= G1_REG_DEC_CTRL0_SEQ_MBAFF_E;
if (sps->profile_idc > 66) {
diff --git a/drivers/staging/media/hantro/hantro_g1_regs.h b/drivers/staging/media/hantro/hantro_g1_regs.h
index c1756e3d5391..c623b3b0be18 100644
--- a/drivers/staging/media/hantro/hantro_g1_regs.h
+++ b/drivers/staging/media/hantro/hantro_g1_regs.h
@@ -68,6 +68,8 @@
#define G1_REG_DEC_CTRL0_PICORD_COUNT_E BIT(9)
#define G1_REG_DEC_CTRL0_DEC_AHB_HLOCK_E BIT(8)
#define G1_REG_DEC_CTRL0_DEC_AXI_WR_ID(x) (((x) & 0xff) << 0)
+/* Setting AXI ID to 0xff to get auto generated ID to avoid possible conflicts */
+#define G1_REG_DEC_CTRL0_DEC_AXI_AUTO G1_REG_DEC_CTRL0_DEC_AXI_WR_ID(0xff)
#define G1_REG_DEC_CTRL1 0x010
#define G1_REG_DEC_CTRL1_PIC_MB_WIDTH(x) (((x) & 0x1ff) << 23)
#define G1_REG_DEC_CTRL1_MB_WIDTH_OFF(x) (((x) & 0xf) << 19)
diff --git a/drivers/staging/media/hantro/hantro_g1_vp8_dec.c b/drivers/staging/media/hantro/hantro_g1_vp8_dec.c
index 6180b23e7d94..851eb67f19f5 100644
--- a/drivers/staging/media/hantro/hantro_g1_vp8_dec.c
+++ b/drivers/staging/media/hantro/hantro_g1_vp8_dec.c
@@ -463,7 +463,8 @@ int hantro_g1_vp8_dec_run(struct hantro_ctx *ctx)
G1_REG_CONFIG_DEC_MAX_BURST(16);
vdpu_write_relaxed(vpu, reg, G1_REG_CONFIG);
- reg = G1_REG_DEC_CTRL0_DEC_MODE(10);
+ reg = G1_REG_DEC_CTRL0_DEC_MODE(10) |
+ G1_REG_DEC_CTRL0_DEC_AXI_AUTO;
if (!V4L2_VP8_FRAME_IS_KEY_FRAME(hdr))
reg |= G1_REG_DEC_CTRL0_PIC_INTER_E;
if (!(hdr->flags & V4L2_VP8_FRAME_FLAG_MB_NO_SKIP_COEFF))
diff --git a/drivers/staging/media/hantro/hantro_g2_hevc_dec.c b/drivers/staging/media/hantro/hantro_g2_hevc_dec.c
index 340efb57fd18..76a921163b9a 100644
--- a/drivers/staging/media/hantro/hantro_g2_hevc_dec.c
+++ b/drivers/staging/media/hantro/hantro_g2_hevc_dec.c
@@ -516,6 +516,56 @@ static void set_buffers(struct hantro_ctx *ctx)
hantro_write_addr(vpu, G2_TILE_BSD, ctx->hevc_dec.tile_bsd.dma);
}
+static void prepare_scaling_list_buffer(struct hantro_ctx *ctx)
+{
+ struct hantro_dev *vpu = ctx->dev;
+ const struct hantro_hevc_dec_ctrls *ctrls = &ctx->hevc_dec.ctrls;
+ const struct v4l2_ctrl_hevc_scaling_matrix *sc = ctrls->scaling;
+ const struct v4l2_ctrl_hevc_sps *sps = ctrls->sps;
+ u8 *p = ((u8 *)ctx->hevc_dec.scaling_lists.cpu);
+ unsigned int scaling_list_enabled;
+ unsigned int i, j, k;
+
+ scaling_list_enabled = !!(sps->flags & V4L2_HEVC_SPS_FLAG_SCALING_LIST_ENABLED);
+ hantro_reg_write(vpu, &g2_scaling_list_e, scaling_list_enabled);
+
+ if (!scaling_list_enabled)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(sc->scaling_list_dc_coef_16x16); i++)
+ *p++ = sc->scaling_list_dc_coef_16x16[i];
+
+ for (i = 0; i < ARRAY_SIZE(sc->scaling_list_dc_coef_32x32); i++)
+ *p++ = sc->scaling_list_dc_coef_32x32[i];
+
+ /* 128-bit boundary */
+ p += 8;
+
+ /* write scaling lists column by column */
+
+ for (i = 0; i < 6; i++)
+ for (j = 0; j < 4; j++)
+ for (k = 0; k < 4; k++)
+ *p++ = sc->scaling_list_4x4[i][4 * k + j];
+
+ for (i = 0; i < 6; i++)
+ for (j = 0; j < 8; j++)
+ for (k = 0; k < 8; k++)
+ *p++ = sc->scaling_list_8x8[i][8 * k + j];
+
+ for (i = 0; i < 6; i++)
+ for (j = 0; j < 8; j++)
+ for (k = 0; k < 8; k++)
+ *p++ = sc->scaling_list_16x16[i][8 * k + j];
+
+ for (i = 0; i < 2; i++)
+ for (j = 0; j < 8; j++)
+ for (k = 0; k < 8; k++)
+ *p++ = sc->scaling_list_32x32[i][8 * k + j];
+
+ hantro_write_addr(vpu, HEVC_SCALING_LIST, ctx->hevc_dec.scaling_lists.dma);
+}
+
static void hantro_g2_check_idle(struct hantro_dev *vpu)
{
int i;
@@ -556,6 +606,8 @@ int hantro_g2_hevc_dec_run(struct hantro_ctx *ctx)
set_buffers(ctx);
prepare_tile_info_buffer(ctx);
+ prepare_scaling_list_buffer(ctx);
+
hantro_end_prepare_run(ctx);
hantro_reg_write(vpu, &g2_mode, HEVC_DEC_MODE);
diff --git a/drivers/staging/media/hantro/hantro_hevc.c b/drivers/staging/media/hantro/hantro_hevc.c
index 5347f5a41c2a..ee03123e7704 100644
--- a/drivers/staging/media/hantro/hantro_hevc.c
+++ b/drivers/staging/media/hantro/hantro_hevc.c
@@ -20,6 +20,8 @@
/* tile border coefficients of filter */
#define VERT_SAO_RAM_SIZE 48 /* bytes per pixel */
+#define SCALING_LIST_SIZE (16 * 64)
+
#define MAX_TILE_COLS 20
#define MAX_TILE_ROWS 22
@@ -256,6 +258,11 @@ int hantro_hevc_dec_prepare_run(struct hantro_ctx *ctx)
if (WARN_ON(!ctrls->decode_params))
return -EINVAL;
+ ctrls->scaling =
+ hantro_get_ctrl(ctx, V4L2_CID_MPEG_VIDEO_HEVC_SCALING_MATRIX);
+ if (WARN_ON(!ctrls->scaling))
+ return -EINVAL;
+
ctrls->sps =
hantro_get_ctrl(ctx, V4L2_CID_MPEG_VIDEO_HEVC_SPS);
if (WARN_ON(!ctrls->sps))
@@ -284,6 +291,12 @@ void hantro_hevc_dec_exit(struct hantro_ctx *ctx)
hevc_dec->tile_sizes.dma);
hevc_dec->tile_sizes.cpu = NULL;
+ if (hevc_dec->scaling_lists.cpu)
+ dma_free_coherent(vpu->dev, hevc_dec->scaling_lists.size,
+ hevc_dec->scaling_lists.cpu,
+ hevc_dec->scaling_lists.dma);
+ hevc_dec->scaling_lists.cpu = NULL;
+
if (hevc_dec->tile_filter.cpu)
dma_free_coherent(vpu->dev, hevc_dec->tile_filter.size,
hevc_dec->tile_filter.cpu,
@@ -327,6 +340,14 @@ int hantro_hevc_dec_init(struct hantro_ctx *ctx)
hevc_dec->tile_sizes.size = size;
+ hevc_dec->scaling_lists.cpu = dma_alloc_coherent(vpu->dev, SCALING_LIST_SIZE,
+ &hevc_dec->scaling_lists.dma,
+ GFP_KERNEL);
+ if (!hevc_dec->scaling_lists.cpu)
+ return -ENOMEM;
+
+ hevc_dec->scaling_lists.size = SCALING_LIST_SIZE;
+
hantro_hevc_ref_init(ctx);
return 0;
diff --git a/drivers/staging/media/hantro/hantro_hw.h b/drivers/staging/media/hantro/hantro_hw.h
index df7b5e3a57b9..267a6d33a47b 100644
--- a/drivers/staging/media/hantro/hantro_hw.h
+++ b/drivers/staging/media/hantro/hantro_hw.h
@@ -104,6 +104,7 @@ struct hantro_h264_dec_hw_ctx {
/**
* struct hantro_hevc_dec_ctrls
* @decode_params: Decode params
+ * @scaling: Scaling matrix
* @sps: SPS info
* @pps: PPS info
* @hevc_hdr_skip_length: the number of data (in bits) to skip in the
@@ -112,6 +113,7 @@ struct hantro_h264_dec_hw_ctx {
*/
struct hantro_hevc_dec_ctrls {
const struct v4l2_ctrl_hevc_decode_params *decode_params;
+ const struct v4l2_ctrl_hevc_scaling_matrix *scaling;
const struct v4l2_ctrl_hevc_sps *sps;
const struct v4l2_ctrl_hevc_pps *pps;
u32 hevc_hdr_skip_length;
@@ -124,6 +126,7 @@ struct hantro_hevc_dec_ctrls {
* @tile_sao: Tile SAO buffer
* @tile_bsd: Tile BSD control buffer
* @ref_bufs: Internal reference buffers
+ * @scaling_lists: Scaling lists buffer
* @ref_bufs_poc: Internal reference buffers picture order count
* @ref_bufs_used: Bitfield of used reference buffers
* @ctrls: V4L2 controls attached to a run
@@ -135,6 +138,7 @@ struct hantro_hevc_dec_hw_ctx {
struct hantro_aux_buf tile_sao;
struct hantro_aux_buf tile_bsd;
struct hantro_aux_buf ref_bufs[NUM_REF_PICTURES];
+ struct hantro_aux_buf scaling_lists;
int ref_bufs_poc[NUM_REF_PICTURES];
u32 ref_bufs_used;
struct hantro_hevc_dec_ctrls ctrls;
diff --git a/drivers/staging/media/imx/TODO b/drivers/staging/media/imx/TODO
index 2384f4c6b09d..06c94f20ecf8 100644
--- a/drivers/staging/media/imx/TODO
+++ b/drivers/staging/media/imx/TODO
@@ -27,8 +27,3 @@
- i.MX7: all of the above, since it uses the imx media core
- i.MX7: use Frame Interval Monitor
-
-- i.MX7: runtime testing with parallel sensor, links setup and streaming
-
-- i.MX7: runtime testing with different formats, for the time only 10-bit bayer
- is tested
diff --git a/drivers/staging/media/imx/imx-media-csi.c b/drivers/staging/media/imx/imx-media-csi.c
index bb1305c9daaf..1caa100be33d 100644
--- a/drivers/staging/media/imx/imx-media-csi.c
+++ b/drivers/staging/media/imx/imx-media-csi.c
@@ -139,6 +139,8 @@ static inline bool is_parallel_16bit_bus(struct v4l2_fwnode_endpoint *ep)
* Check for conditions that require the IPU to handle the
* data internally as generic data, aka passthrough mode:
* - raw bayer media bus formats, or
+ * - BT.656 and BT.1120 (8/10-bit YUV422) data can always be processed
+ * on-the-fly
* - the CSI is receiving from a 16-bit parallel bus, or
* - the CSI is receiving from an 8-bit parallel bus and the incoming
* media bus format is other than UYVY8_2X8/YUYV8_2X8.
@@ -147,6 +149,9 @@ static inline bool requires_passthrough(struct v4l2_fwnode_endpoint *ep,
struct v4l2_mbus_framefmt *infmt,
const struct imx_media_pixfmt *incc)
{
+ if (ep->bus_type == V4L2_MBUS_BT656) // including BT.1120
+ return 0;
+
return incc->bayer || is_parallel_16bit_bus(ep) ||
(is_parallel_bus(ep) &&
infmt->code != MEDIA_BUS_FMT_UYVY8_2X8 &&
@@ -1924,7 +1929,7 @@ static int imx_csi_async_register(struct csi_priv *priv)
unsigned int port;
int ret;
- v4l2_async_notifier_init(&priv->notifier);
+ v4l2_async_nf_init(&priv->notifier);
/* get this CSI's port id */
ret = fwnode_property_read_u32(dev_fwnode(priv->dev), "reg", &port);
@@ -1935,8 +1940,8 @@ static int imx_csi_async_register(struct csi_priv *priv)
port, 0,
FWNODE_GRAPH_ENDPOINT_NEXT);
if (ep) {
- asd = v4l2_async_notifier_add_fwnode_remote_subdev(
- &priv->notifier, ep, struct v4l2_async_subdev);
+ asd = v4l2_async_nf_add_fwnode_remote(&priv->notifier, ep,
+ struct v4l2_async_subdev);
fwnode_handle_put(ep);
@@ -1950,8 +1955,7 @@ static int imx_csi_async_register(struct csi_priv *priv)
priv->notifier.ops = &csi_notify_ops;
- ret = v4l2_async_subdev_notifier_register(&priv->sd,
- &priv->notifier);
+ ret = v4l2_async_subdev_nf_register(&priv->sd, &priv->notifier);
if (ret)
return ret;
@@ -2040,8 +2044,8 @@ static int imx_csi_probe(struct platform_device *pdev)
return 0;
cleanup:
- v4l2_async_notifier_unregister(&priv->notifier);
- v4l2_async_notifier_cleanup(&priv->notifier);
+ v4l2_async_nf_unregister(&priv->notifier);
+ v4l2_async_nf_cleanup(&priv->notifier);
free:
v4l2_ctrl_handler_free(&priv->ctrl_hdlr);
mutex_destroy(&priv->lock);
@@ -2055,8 +2059,8 @@ static int imx_csi_remove(struct platform_device *pdev)
v4l2_ctrl_handler_free(&priv->ctrl_hdlr);
mutex_destroy(&priv->lock);
- v4l2_async_notifier_unregister(&priv->notifier);
- v4l2_async_notifier_cleanup(&priv->notifier);
+ v4l2_async_nf_unregister(&priv->notifier);
+ v4l2_async_nf_cleanup(&priv->notifier);
v4l2_async_unregister_subdev(sd);
media_entity_cleanup(&sd->entity);
@@ -2082,4 +2086,3 @@ module_platform_driver(imx_csi_driver);
MODULE_DESCRIPTION("i.MX CSI subdev driver");
MODULE_AUTHOR("Steve Longerbeam <steve_longerbeam@mentor.com>");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:imx-ipuv3-csi");
diff --git a/drivers/staging/media/imx/imx-media-dev-common.c b/drivers/staging/media/imx/imx-media-dev-common.c
index d186179388d0..80b69a9a752c 100644
--- a/drivers/staging/media/imx/imx-media-dev-common.c
+++ b/drivers/staging/media/imx/imx-media-dev-common.c
@@ -367,6 +367,8 @@ struct imx_media_dev *imx_media_dev_init(struct device *dev,
imxmd->v4l2_dev.notify = imx_media_notify;
strscpy(imxmd->v4l2_dev.name, "imx-media",
sizeof(imxmd->v4l2_dev.name));
+ snprintf(imxmd->md.bus_info, sizeof(imxmd->md.bus_info),
+ "platform:%s", dev_name(imxmd->md.dev));
media_device_init(&imxmd->md);
@@ -379,7 +381,7 @@ struct imx_media_dev *imx_media_dev_init(struct device *dev,
INIT_LIST_HEAD(&imxmd->vdev_list);
- v4l2_async_notifier_init(&imxmd->notifier);
+ v4l2_async_nf_init(&imxmd->notifier);
return imxmd;
@@ -403,11 +405,10 @@ int imx_media_dev_notifier_register(struct imx_media_dev *imxmd,
/* prepare the async subdev notifier and register it */
imxmd->notifier.ops = ops ? ops : &imx_media_notifier_ops;
- ret = v4l2_async_notifier_register(&imxmd->v4l2_dev,
- &imxmd->notifier);
+ ret = v4l2_async_nf_register(&imxmd->v4l2_dev, &imxmd->notifier);
if (ret) {
v4l2_err(&imxmd->v4l2_dev,
- "v4l2_async_notifier_register failed with %d\n", ret);
+ "v4l2_async_nf_register failed with %d\n", ret);
return ret;
}
diff --git a/drivers/staging/media/imx/imx-media-dev.c b/drivers/staging/media/imx/imx-media-dev.c
index 338b8bd0bb07..f85462214e22 100644
--- a/drivers/staging/media/imx/imx-media-dev.c
+++ b/drivers/staging/media/imx/imx-media-dev.c
@@ -94,7 +94,7 @@ static int imx_media_probe(struct platform_device *pdev)
return 0;
cleanup:
- v4l2_async_notifier_cleanup(&imxmd->notifier);
+ v4l2_async_nf_cleanup(&imxmd->notifier);
v4l2_device_unregister(&imxmd->v4l2_dev);
media_device_cleanup(&imxmd->md);
@@ -113,9 +113,9 @@ static int imx_media_remove(struct platform_device *pdev)
imxmd->m2m_vdev = NULL;
}
- v4l2_async_notifier_unregister(&imxmd->notifier);
+ v4l2_async_nf_unregister(&imxmd->notifier);
imx_media_unregister_ipu_internal_subdevs(imxmd);
- v4l2_async_notifier_cleanup(&imxmd->notifier);
+ v4l2_async_nf_cleanup(&imxmd->notifier);
media_device_unregister(&imxmd->md);
v4l2_device_unregister(&imxmd->v4l2_dev);
media_device_cleanup(&imxmd->md);
diff --git a/drivers/staging/media/imx/imx-media-of.c b/drivers/staging/media/imx/imx-media-of.c
index b677cf0e0c84..59f1eb7b62bc 100644
--- a/drivers/staging/media/imx/imx-media-of.c
+++ b/drivers/staging/media/imx/imx-media-of.c
@@ -29,9 +29,9 @@ int imx_media_of_add_csi(struct imx_media_dev *imxmd,
}
/* add CSI fwnode to async notifier */
- asd = v4l2_async_notifier_add_fwnode_subdev(&imxmd->notifier,
- of_fwnode_handle(csi_np),
- struct v4l2_async_subdev);
+ asd = v4l2_async_nf_add_fwnode(&imxmd->notifier,
+ of_fwnode_handle(csi_np),
+ struct v4l2_async_subdev);
if (IS_ERR(asd)) {
ret = PTR_ERR(asd);
if (ret == -EEXIST)
diff --git a/drivers/staging/media/imx/imx6-mipi-csi2.c b/drivers/staging/media/imx/imx6-mipi-csi2.c
index 9de0ebd439dc..a0941fc2907b 100644
--- a/drivers/staging/media/imx/imx6-mipi-csi2.c
+++ b/drivers/staging/media/imx/imx6-mipi-csi2.c
@@ -647,7 +647,7 @@ static int csi2_async_register(struct csi2_dev *csi2)
struct fwnode_handle *ep;
int ret;
- v4l2_async_notifier_init(&csi2->notifier);
+ v4l2_async_nf_init(&csi2->notifier);
ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(csi2->dev), 0, 0,
FWNODE_GRAPH_ENDPOINT_NEXT);
@@ -663,8 +663,8 @@ static int csi2_async_register(struct csi2_dev *csi2)
dev_dbg(csi2->dev, "data lanes: %d\n", vep.bus.mipi_csi2.num_data_lanes);
dev_dbg(csi2->dev, "flags: 0x%08x\n", vep.bus.mipi_csi2.flags);
- asd = v4l2_async_notifier_add_fwnode_remote_subdev(
- &csi2->notifier, ep, struct v4l2_async_subdev);
+ asd = v4l2_async_nf_add_fwnode_remote(&csi2->notifier, ep,
+ struct v4l2_async_subdev);
fwnode_handle_put(ep);
if (IS_ERR(asd))
@@ -672,8 +672,7 @@ static int csi2_async_register(struct csi2_dev *csi2)
csi2->notifier.ops = &csi2_notify_ops;
- ret = v4l2_async_subdev_notifier_register(&csi2->sd,
- &csi2->notifier);
+ ret = v4l2_async_subdev_nf_register(&csi2->sd, &csi2->notifier);
if (ret)
return ret;
@@ -768,8 +767,8 @@ static int csi2_probe(struct platform_device *pdev)
return 0;
clean_notifier:
- v4l2_async_notifier_unregister(&csi2->notifier);
- v4l2_async_notifier_cleanup(&csi2->notifier);
+ v4l2_async_nf_unregister(&csi2->notifier);
+ v4l2_async_nf_cleanup(&csi2->notifier);
clk_disable_unprepare(csi2->dphy_clk);
pllref_off:
clk_disable_unprepare(csi2->pllref_clk);
@@ -783,8 +782,8 @@ static int csi2_remove(struct platform_device *pdev)
struct v4l2_subdev *sd = platform_get_drvdata(pdev);
struct csi2_dev *csi2 = sd_to_dev(sd);
- v4l2_async_notifier_unregister(&csi2->notifier);
- v4l2_async_notifier_cleanup(&csi2->notifier);
+ v4l2_async_nf_unregister(&csi2->notifier);
+ v4l2_async_nf_cleanup(&csi2->notifier);
v4l2_async_unregister_subdev(sd);
clk_disable_unprepare(csi2->dphy_clk);
clk_disable_unprepare(csi2->pllref_clk);
diff --git a/drivers/staging/media/imx/imx7-media-csi.c b/drivers/staging/media/imx/imx7-media-csi.c
index 127183732912..2288dadb2683 100644
--- a/drivers/staging/media/imx/imx7-media-csi.c
+++ b/drivers/staging/media/imx/imx7-media-csi.c
@@ -1099,13 +1099,13 @@ static int imx7_csi_async_register(struct imx7_csi *csi)
struct fwnode_handle *ep;
int ret;
- v4l2_async_notifier_init(&csi->notifier);
+ v4l2_async_nf_init(&csi->notifier);
ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(csi->dev), 0, 0,
FWNODE_GRAPH_ENDPOINT_NEXT);
if (ep) {
- asd = v4l2_async_notifier_add_fwnode_remote_subdev(
- &csi->notifier, ep, struct v4l2_async_subdev);
+ asd = v4l2_async_nf_add_fwnode_remote(&csi->notifier, ep,
+ struct v4l2_async_subdev);
fwnode_handle_put(ep);
@@ -1119,7 +1119,7 @@ static int imx7_csi_async_register(struct imx7_csi *csi)
csi->notifier.ops = &imx7_csi_notify_ops;
- ret = v4l2_async_subdev_notifier_register(&csi->sd, &csi->notifier);
+ ret = v4l2_async_subdev_nf_register(&csi->sd, &csi->notifier);
if (ret)
return ret;
@@ -1210,12 +1210,12 @@ static int imx7_csi_probe(struct platform_device *pdev)
return 0;
subdev_notifier_cleanup:
- v4l2_async_notifier_unregister(&csi->notifier);
- v4l2_async_notifier_cleanup(&csi->notifier);
+ v4l2_async_nf_unregister(&csi->notifier);
+ v4l2_async_nf_cleanup(&csi->notifier);
cleanup:
- v4l2_async_notifier_unregister(&imxmd->notifier);
- v4l2_async_notifier_cleanup(&imxmd->notifier);
+ v4l2_async_nf_unregister(&imxmd->notifier);
+ v4l2_async_nf_cleanup(&imxmd->notifier);
v4l2_device_unregister(&imxmd->v4l2_dev);
media_device_unregister(&imxmd->md);
media_device_cleanup(&imxmd->md);
@@ -1232,15 +1232,15 @@ static int imx7_csi_remove(struct platform_device *pdev)
struct imx7_csi *csi = v4l2_get_subdevdata(sd);
struct imx_media_dev *imxmd = csi->imxmd;
- v4l2_async_notifier_unregister(&imxmd->notifier);
- v4l2_async_notifier_cleanup(&imxmd->notifier);
+ v4l2_async_nf_unregister(&imxmd->notifier);
+ v4l2_async_nf_cleanup(&imxmd->notifier);
media_device_unregister(&imxmd->md);
v4l2_device_unregister(&imxmd->v4l2_dev);
media_device_cleanup(&imxmd->md);
- v4l2_async_notifier_unregister(&csi->notifier);
- v4l2_async_notifier_cleanup(&csi->notifier);
+ v4l2_async_nf_unregister(&csi->notifier);
+ v4l2_async_nf_cleanup(&csi->notifier);
v4l2_async_unregister_subdev(sd);
mutex_destroy(&csi->lock);
diff --git a/drivers/staging/media/imx/imx7-mipi-csis.c b/drivers/staging/media/imx/imx7-mipi-csis.c
index 41e33535de55..2b73fa55c938 100644
--- a/drivers/staging/media/imx/imx7-mipi-csis.c
+++ b/drivers/staging/media/imx/imx7-mipi-csis.c
@@ -1162,7 +1162,7 @@ static int mipi_csis_async_register(struct csi_state *state)
unsigned int i;
int ret;
- v4l2_async_notifier_init(&state->notifier);
+ v4l2_async_nf_init(&state->notifier);
ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(state->dev), 0, 0,
FWNODE_GRAPH_ENDPOINT_NEXT);
@@ -1187,8 +1187,8 @@ static int mipi_csis_async_register(struct csi_state *state)
dev_dbg(state->dev, "data lanes: %d\n", state->bus.num_data_lanes);
dev_dbg(state->dev, "flags: 0x%08x\n", state->bus.flags);
- asd = v4l2_async_notifier_add_fwnode_remote_subdev(
- &state->notifier, ep, struct v4l2_async_subdev);
+ asd = v4l2_async_nf_add_fwnode_remote(&state->notifier, ep,
+ struct v4l2_async_subdev);
if (IS_ERR(asd)) {
ret = PTR_ERR(asd);
goto err_parse;
@@ -1198,7 +1198,7 @@ static int mipi_csis_async_register(struct csi_state *state)
state->notifier.ops = &mipi_csis_notify_ops;
- ret = v4l2_async_subdev_notifier_register(&state->sd, &state->notifier);
+ ret = v4l2_async_subdev_nf_register(&state->sd, &state->notifier);
if (ret)
return ret;
@@ -1429,8 +1429,8 @@ unregister_all:
mipi_csis_debugfs_exit(state);
cleanup:
media_entity_cleanup(&state->sd.entity);
- v4l2_async_notifier_unregister(&state->notifier);
- v4l2_async_notifier_cleanup(&state->notifier);
+ v4l2_async_nf_unregister(&state->notifier);
+ v4l2_async_nf_cleanup(&state->notifier);
v4l2_async_unregister_subdev(&state->sd);
disable_clock:
mipi_csis_clk_disable(state);
@@ -1445,8 +1445,8 @@ static int mipi_csis_remove(struct platform_device *pdev)
struct csi_state *state = mipi_sd_to_csis_state(sd);
mipi_csis_debugfs_exit(state);
- v4l2_async_notifier_unregister(&state->notifier);
- v4l2_async_notifier_cleanup(&state->notifier);
+ v4l2_async_nf_unregister(&state->notifier);
+ v4l2_async_nf_cleanup(&state->notifier);
v4l2_async_unregister_subdev(&state->sd);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/staging/media/imx/imx8mq-mipi-csi2.c b/drivers/staging/media/imx/imx8mq-mipi-csi2.c
index a6f562009b9a..7adbdd14daa9 100644
--- a/drivers/staging/media/imx/imx8mq-mipi-csi2.c
+++ b/drivers/staging/media/imx/imx8mq-mipi-csi2.c
@@ -643,7 +643,7 @@ static int imx8mq_mipi_csi_async_register(struct csi_state *state)
unsigned int i;
int ret;
- v4l2_async_notifier_init(&state->notifier);
+ v4l2_async_nf_init(&state->notifier);
ep = fwnode_graph_get_endpoint_by_id(dev_fwnode(state->dev), 0, 0,
FWNODE_GRAPH_ENDPOINT_NEXT);
@@ -669,8 +669,8 @@ static int imx8mq_mipi_csi_async_register(struct csi_state *state)
state->bus.num_data_lanes,
state->bus.flags);
- asd = v4l2_async_notifier_add_fwnode_remote_subdev(&state->notifier,
- ep, struct v4l2_async_subdev);
+ asd = v4l2_async_nf_add_fwnode_remote(&state->notifier, ep,
+ struct v4l2_async_subdev);
if (IS_ERR(asd)) {
ret = PTR_ERR(asd);
goto err_parse;
@@ -680,7 +680,7 @@ static int imx8mq_mipi_csi_async_register(struct csi_state *state)
state->notifier.ops = &imx8mq_mipi_csi_notify_ops;
- ret = v4l2_async_subdev_notifier_register(&state->sd, &state->notifier);
+ ret = v4l2_async_subdev_nf_register(&state->sd, &state->notifier);
if (ret)
return ret;
@@ -937,8 +937,8 @@ cleanup:
imx8mq_mipi_csi_pm_suspend(&pdev->dev, true);
media_entity_cleanup(&state->sd.entity);
- v4l2_async_notifier_unregister(&state->notifier);
- v4l2_async_notifier_cleanup(&state->notifier);
+ v4l2_async_nf_unregister(&state->notifier);
+ v4l2_async_nf_cleanup(&state->notifier);
v4l2_async_unregister_subdev(&state->sd);
icc:
imx8mq_mipi_csi_release_icc(pdev);
@@ -953,8 +953,8 @@ static int imx8mq_mipi_csi_remove(struct platform_device *pdev)
struct v4l2_subdev *sd = platform_get_drvdata(pdev);
struct csi_state *state = mipi_sd_to_csi2_state(sd);
- v4l2_async_notifier_unregister(&state->notifier);
- v4l2_async_notifier_cleanup(&state->notifier);
+ v4l2_async_nf_unregister(&state->notifier);
+ v4l2_async_nf_cleanup(&state->notifier);
v4l2_async_unregister_subdev(&state->sd);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/staging/media/ipu3/include/uapi/intel-ipu3.h b/drivers/staging/media/ipu3/include/uapi/intel-ipu3.h
index fa3d6ee5adf2..585f55981c86 100644
--- a/drivers/staging/media/ipu3/include/uapi/intel-ipu3.h
+++ b/drivers/staging/media/ipu3/include/uapi/intel-ipu3.h
@@ -234,7 +234,9 @@ struct ipu3_uapi_ae_ccm {
* struct ipu3_uapi_ae_config - AE config
*
* @grid_cfg: config for auto exposure statistics grid. See struct
- * &ipu3_uapi_ae_grid_config
+ * &ipu3_uapi_ae_grid_config, as Imgu did not support output
+ * auto exposure statistics, so user can ignore this configuration
+ * and use the RGB table in auto-whitebalance statistics instead.
* @weights: &IPU3_UAPI_AE_WEIGHTS is based on 32x24 blocks in the grid.
* Each grid cell has a corresponding value in weights LUT called
* grid value, global histogram is updated based on grid value and
@@ -534,6 +536,9 @@ struct ipu3_uapi_ff_status {
*
* @awb_raw_buffer: auto white balance meta data &ipu3_uapi_awb_raw_buffer
* @ae_raw_buffer: auto exposure raw data &ipu3_uapi_ae_raw_buffer_aligned
+ * current Imgu does not output the auto exposure statistics
+ * to ae_raw_buffer, the user such as 3A algorithm can use the
+ * RGB table in &ipu3_uapi_awb_raw_buffer to do auto-exposure.
* @af_raw_buffer: &ipu3_uapi_af_raw_buffer for auto focus meta data
* @awb_fr_raw_buffer: value as specified by &ipu3_uapi_awb_fr_raw_buffer
* @stats_4a_config: 4a statistics config as defined by &ipu3_uapi_4a_config.
diff --git a/drivers/staging/media/ipu3/ipu3-css-fw.c b/drivers/staging/media/ipu3/ipu3-css-fw.c
index 45aff76198e2..981693eed815 100644
--- a/drivers/staging/media/ipu3/ipu3-css-fw.c
+++ b/drivers/staging/media/ipu3/ipu3-css-fw.c
@@ -124,12 +124,11 @@ int imgu_css_fw_init(struct imgu_css *css)
/* Check and display fw header info */
css->fwp = (struct imgu_fw_header *)css->fw->data;
- if (css->fw->size < sizeof(struct imgu_fw_header *) ||
+ if (css->fw->size < struct_size(css->fwp, binary_header, 1) ||
css->fwp->file_header.h_size != sizeof(struct imgu_fw_bi_file_h))
goto bad_fw;
- if (sizeof(struct imgu_fw_bi_file_h) +
- css->fwp->file_header.binary_nr * sizeof(struct imgu_fw_info) >
- css->fw->size)
+ if (struct_size(css->fwp, binary_header,
+ css->fwp->file_header.binary_nr) > css->fw->size)
goto bad_fw;
dev_info(dev, "loaded firmware version %.64s, %u binaries, %zu bytes\n",
diff --git a/drivers/staging/media/ipu3/ipu3-css-fw.h b/drivers/staging/media/ipu3/ipu3-css-fw.h
index 3c078f15a295..c0bc57fd678a 100644
--- a/drivers/staging/media/ipu3/ipu3-css-fw.h
+++ b/drivers/staging/media/ipu3/ipu3-css-fw.h
@@ -171,7 +171,7 @@ struct imgu_fw_bi_file_h {
struct imgu_fw_header {
struct imgu_fw_bi_file_h file_header;
- struct imgu_fw_info binary_header[1]; /* binary_nr items */
+ struct imgu_fw_info binary_header[]; /* binary_nr items */
};
/******************* Firmware functions *******************/
diff --git a/drivers/staging/media/ipu3/ipu3-css.c b/drivers/staging/media/ipu3/ipu3-css.c
index 608dcacf12b2..8c70497d744c 100644
--- a/drivers/staging/media/ipu3/ipu3-css.c
+++ b/drivers/staging/media/ipu3/ipu3-css.c
@@ -5,6 +5,7 @@
#include <linux/iopoll.h>
#include <linux/slab.h>
+#include "ipu3.h"
#include "ipu3-css.h"
#include "ipu3-css-fw.h"
#include "ipu3-css-params.h"
@@ -53,7 +54,6 @@ static const struct imgu_css_format imgu_css_formats[] = {
.frame_format = IMGU_ABI_FRAME_FORMAT_NV12,
.osys_format = IMGU_ABI_OSYS_FORMAT_NV12,
.osys_tiling = IMGU_ABI_OSYS_TILING_NONE,
- .bytesperpixel_num = 1 * IPU3_CSS_FORMAT_BPP_DEN,
.chroma_decim = 4,
.width_align = IPU3_UAPI_ISP_VEC_ELEMS,
.flags = IPU3_CSS_FORMAT_FL_OUT | IPU3_CSS_FORMAT_FL_VF,
@@ -64,7 +64,6 @@ static const struct imgu_css_format imgu_css_formats[] = {
.frame_format = IMGU_ABI_FRAME_FORMAT_RAW_PACKED,
.bayer_order = IMGU_ABI_BAYER_ORDER_BGGR,
.bit_depth = 10,
- .bytesperpixel_num = 64,
.width_align = 2 * IPU3_UAPI_ISP_VEC_ELEMS,
.flags = IPU3_CSS_FORMAT_FL_IN,
}, {
@@ -73,7 +72,6 @@ static const struct imgu_css_format imgu_css_formats[] = {
.frame_format = IMGU_ABI_FRAME_FORMAT_RAW_PACKED,
.bayer_order = IMGU_ABI_BAYER_ORDER_GBRG,
.bit_depth = 10,
- .bytesperpixel_num = 64,
.width_align = 2 * IPU3_UAPI_ISP_VEC_ELEMS,
.flags = IPU3_CSS_FORMAT_FL_IN,
}, {
@@ -82,7 +80,6 @@ static const struct imgu_css_format imgu_css_formats[] = {
.frame_format = IMGU_ABI_FRAME_FORMAT_RAW_PACKED,
.bayer_order = IMGU_ABI_BAYER_ORDER_GRBG,
.bit_depth = 10,
- .bytesperpixel_num = 64,
.width_align = 2 * IPU3_UAPI_ISP_VEC_ELEMS,
.flags = IPU3_CSS_FORMAT_FL_IN,
}, {
@@ -91,7 +88,6 @@ static const struct imgu_css_format imgu_css_formats[] = {
.frame_format = IMGU_ABI_FRAME_FORMAT_RAW_PACKED,
.bayer_order = IMGU_ABI_BAYER_ORDER_RGGB,
.bit_depth = 10,
- .bytesperpixel_num = 64,
.width_align = 2 * IPU3_UAPI_ISP_VEC_ELEMS,
.flags = IPU3_CSS_FORMAT_FL_IN,
},
@@ -150,17 +146,8 @@ static int imgu_css_queue_init(struct imgu_css_queue *queue,
f->height = ALIGN(clamp_t(u32, f->height,
IPU3_CSS_MIN_RES, IPU3_CSS_MAX_H), 2);
queue->width_pad = ALIGN(f->width, queue->css_fmt->width_align);
- if (queue->css_fmt->frame_format != IMGU_ABI_FRAME_FORMAT_RAW_PACKED)
- f->plane_fmt[0].bytesperline = DIV_ROUND_UP(queue->width_pad *
- queue->css_fmt->bytesperpixel_num,
- IPU3_CSS_FORMAT_BPP_DEN);
- else
- /* For packed raw, alignment for bpl is by 50 to the width */
- f->plane_fmt[0].bytesperline =
- DIV_ROUND_UP(f->width,
- IPU3_CSS_FORMAT_BPP_DEN) *
- queue->css_fmt->bytesperpixel_num;
-
+ f->plane_fmt[0].bytesperline =
+ imgu_bytesperline(f->width, queue->css_fmt->frame_format);
sizeimage = f->height * f->plane_fmt[0].bytesperline;
if (queue->css_fmt->chroma_decim)
sizeimage += 2 * sizeimage / queue->css_fmt->chroma_decim;
diff --git a/drivers/staging/media/ipu3/ipu3-css.h b/drivers/staging/media/ipu3/ipu3-css.h
index 6108a068b228..ab64e9521203 100644
--- a/drivers/staging/media/ipu3/ipu3-css.h
+++ b/drivers/staging/media/ipu3/ipu3-css.h
@@ -82,7 +82,6 @@ struct imgu_css_format {
enum imgu_abi_bayer_order bayer_order;
enum imgu_abi_osys_format osys_format;
enum imgu_abi_osys_tiling osys_tiling;
- u32 bytesperpixel_num; /* Bytes per pixel in first plane * 50 */
u8 bit_depth; /* Effective bits per pixel */
u8 chroma_decim; /* Chroma plane decimation, 0=no chroma plane */
u8 width_align; /* Alignment requirement for width_pad */
diff --git a/drivers/staging/media/ipu3/ipu3-v4l2.c b/drivers/staging/media/ipu3/ipu3-v4l2.c
index 38a240764509..0473457b4e64 100644
--- a/drivers/staging/media/ipu3/ipu3-v4l2.c
+++ b/drivers/staging/media/ipu3/ipu3-v4l2.c
@@ -592,11 +592,12 @@ static const struct imgu_fmt *find_format(struct v4l2_format *f, u32 type)
static int imgu_vidioc_querycap(struct file *file, void *fh,
struct v4l2_capability *cap)
{
- struct imgu_video_device *node = file_to_intel_imgu_node(file);
+ struct imgu_device *imgu = video_drvdata(file);
strscpy(cap->driver, IMGU_NAME, sizeof(cap->driver));
strscpy(cap->card, IMGU_NAME, sizeof(cap->card));
- snprintf(cap->bus_info, sizeof(cap->bus_info), "PCI:%s", node->name);
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "PCI:%s",
+ pci_name(imgu->pci_dev));
return 0;
}
@@ -696,7 +697,7 @@ static int imgu_fmt(struct imgu_device *imgu, unsigned int pipe, int node,
/* CSS expects some format on OUT queue */
if (i != IPU3_CSS_QUEUE_OUT &&
- !imgu_pipe->nodes[inode].enabled) {
+ !imgu_pipe->nodes[inode].enabled && !try) {
fmts[i] = NULL;
continue;
}
@@ -864,7 +865,7 @@ static int imgu_vidioc_g_meta_fmt(struct file *file, void *fh,
/******************** function pointers ********************/
-static struct v4l2_subdev_internal_ops imgu_subdev_internal_ops = {
+static const struct v4l2_subdev_internal_ops imgu_subdev_internal_ops = {
.open = imgu_subdev_open,
};
@@ -1136,7 +1137,9 @@ static int imgu_v4l2_node_setup(struct imgu_device *imgu, unsigned int pipe,
def_pix_fmt.height = def_bus_fmt.height;
def_pix_fmt.field = def_bus_fmt.field;
def_pix_fmt.num_planes = 1;
- def_pix_fmt.plane_fmt[0].bytesperline = def_pix_fmt.width * 2;
+ def_pix_fmt.plane_fmt[0].bytesperline =
+ imgu_bytesperline(def_pix_fmt.width,
+ IMGU_ABI_FRAME_FORMAT_RAW_PACKED);
def_pix_fmt.plane_fmt[0].sizeimage =
def_pix_fmt.height * def_pix_fmt.plane_fmt[0].bytesperline;
def_pix_fmt.flags = 0;
diff --git a/drivers/staging/media/ipu3/ipu3.h b/drivers/staging/media/ipu3/ipu3.h
index eb46b527dd23..d2ad0a95c5aa 100644
--- a/drivers/staging/media/ipu3/ipu3.h
+++ b/drivers/staging/media/ipu3/ipu3.h
@@ -164,4 +164,16 @@ void imgu_v4l2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state);
int imgu_s_stream(struct imgu_device *imgu, int enable);
+static inline u32 imgu_bytesperline(const unsigned int width,
+ enum imgu_abi_frame_format frame_format)
+{
+ if (frame_format == IMGU_ABI_FRAME_FORMAT_NV12)
+ return ALIGN(width, IPU3_UAPI_ISP_VEC_ELEMS);
+ /*
+ * 64 bytes for every 50 pixels, the line length
+ * in bytes is multiple of 64 (line end alignment).
+ */
+ return DIV_ROUND_UP(width, 50) * 64;
+}
+
#endif
diff --git a/drivers/staging/media/meson/vdec/esparser.h b/drivers/staging/media/meson/vdec/esparser.h
index ff51fe7fda66..9351e62c70e6 100644
--- a/drivers/staging/media/meson/vdec/esparser.h
+++ b/drivers/staging/media/meson/vdec/esparser.h
@@ -17,13 +17,17 @@ int esparser_power_up(struct amvdec_session *sess);
/**
* esparser_queue_eos() - write End Of Stream sequence to the ESPARSER
*
- * @core vdec core struct
+ * @core: vdec core struct
+ * @data: EOS sequence
+ * @len: length of EOS sequence
*/
int esparser_queue_eos(struct amvdec_core *core, const u8 *data, u32 len);
/**
* esparser_queue_all_src() - work handler that writes as many src buffers
* as possible to the ESPARSER
+ *
+ * @work: work struct
*/
void esparser_queue_all_src(struct work_struct *work);
diff --git a/drivers/staging/media/meson/vdec/vdec.c b/drivers/staging/media/meson/vdec/vdec.c
index e51d69c4729d..8549d95be0f2 100644
--- a/drivers/staging/media/meson/vdec/vdec.c
+++ b/drivers/staging/media/meson/vdec/vdec.c
@@ -994,7 +994,6 @@ static int vdec_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
struct video_device *vdev;
struct amvdec_core *core;
- struct resource *r;
const struct of_device_id *of_id;
int irq;
int ret;
@@ -1006,13 +1005,11 @@ static int vdec_probe(struct platform_device *pdev)
core->dev = dev;
platform_set_drvdata(pdev, core);
- r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dos");
- core->dos_base = devm_ioremap_resource(dev, r);
+ core->dos_base = devm_platform_ioremap_resource_byname(pdev, "dos");
if (IS_ERR(core->dos_base))
return PTR_ERR(core->dos_base);
- r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "esparser");
- core->esparser_base = devm_ioremap_resource(dev, r);
+ core->esparser_base = devm_platform_ioremap_resource_byname(pdev, "esparser");
if (IS_ERR(core->esparser_base))
return PTR_ERR(core->esparser_base);
diff --git a/drivers/staging/media/meson/vdec/vdec.h b/drivers/staging/media/meson/vdec/vdec.h
index f95445ac0658..0906b8fb5cc6 100644
--- a/drivers/staging/media/meson/vdec/vdec.h
+++ b/drivers/staging/media/meson/vdec/vdec.h
@@ -60,10 +60,12 @@ struct amvdec_session;
* @dos_clk: DOS clock
* @vdec_1_clk: VDEC_1 clock
* @vdec_hevc_clk: VDEC_HEVC clock
+ * @vdec_hevcf_clk: VDEC_HEVCF clock
* @esparser_reset: RESET for the PARSER
- * @vdec_dec: video device for the decoder
+ * @vdev_dec: video device for the decoder
* @v4l2_dev: v4l2 device
* @cur_sess: current decoding session
+ * @lock: video device lock
*/
struct amvdec_core {
void __iomem *dos_base;
@@ -88,7 +90,7 @@ struct amvdec_core {
struct v4l2_device v4l2_dev;
struct amvdec_session *cur_sess;
- struct mutex lock; /* video device lock */
+ struct mutex lock;
};
/**
@@ -120,6 +122,7 @@ struct amvdec_ops {
* @recycle: optional call to tell the codec to recycle a dst buffer. Must go
* in pair with @can_recycle
* @drain: optional call if the codec has a custom way of draining
+ * @resume: optional call to resume after a resolution change
* @eos_sequence: optional call to get an end sequence to send to esparser
* for flush. Mutually exclusive with @drain.
* @isr: mandatory call when the ISR triggers
@@ -185,6 +188,7 @@ enum amvdec_status {
* @m2m_ctx: v4l2 m2m context
* @ctrl_handler: V4L2 control handler
* @ctrl_min_buf_capture: V4L2 control V4L2_CID_MIN_BUFFERS_FOR_CAPTURE
+ * @lock: cap & out queues lock
* @fmt_out: vdec pixel format for the OUTPUT queue
* @pixfmt_cap: V4L2 pixel format for the CAPTURE queue
* @src_buffer_size: size in bytes of the OUTPUT buffers' only plane
@@ -200,9 +204,12 @@ enum amvdec_status {
* @streamon_cap: stream on flag for capture queue
* @streamon_out: stream on flag for output queue
* @sequence_cap: capture sequence counter
+ * @sequence_out: output sequence counter
* @should_stop: flag set if userspace signaled EOS via command
* or empty buffer
* @keyframe_found: flag set once a keyframe has been parsed
+ * @num_dst_bufs: number of destination buffers
+ * @changed_format: the format changed
* @canvas_alloc: array of all the canvas IDs allocated
* @canvas_num: number of canvas IDs allocated
* @vififo_vaddr: virtual address for the VIFIFO
@@ -214,6 +221,9 @@ enum amvdec_status {
* @timestamps: chronological list of src timestamps
* @ts_spinlock: spinlock for the timestamps list
* @last_irq_jiffies: tracks last time the vdec triggered an IRQ
+ * @last_offset: tracks last offset of vififo
+ * @wrap_count: number of times the vififo wrapped around
+ * @fw_idx_to_vb2_idx: firmware buffer index to vb2 buffer index
* @status: current decoding status
* @priv: codec private data
*/
@@ -225,7 +235,7 @@ struct amvdec_session {
struct v4l2_m2m_ctx *m2m_ctx;
struct v4l2_ctrl_handler ctrl_handler;
struct v4l2_ctrl *ctrl_min_buf_capture;
- struct mutex lock; /* cap & out queues lock */
+ struct mutex lock;
const struct amvdec_format *fmt_out;
u32 pixfmt_cap;
diff --git a/drivers/staging/media/meson/vdec/vdec_helpers.h b/drivers/staging/media/meson/vdec/vdec_helpers.h
index cfaed52ab526..88137d15aa3a 100644
--- a/drivers/staging/media/meson/vdec/vdec_helpers.h
+++ b/drivers/staging/media/meson/vdec/vdec_helpers.h
@@ -52,8 +52,9 @@ void amvdec_dst_buf_done_offset(struct amvdec_session *sess,
*
* @sess: current session
* @ts: timestamp to add
+ * @tc: timecode to add
* @offset: offset in the VIFIFO where the associated packet was written
- * @flags the vb2_v4l2_buffer flags
+ * @flags: the vb2_v4l2_buffer flags
*/
void amvdec_add_ts(struct amvdec_session *sess, u64 ts,
struct v4l2_timecode tc, u32 offset, u32 flags);
diff --git a/drivers/staging/media/rkvdec/rkvdec-h264.c b/drivers/staging/media/rkvdec/rkvdec-h264.c
index 76e97cbe2512..951e19231da2 100644
--- a/drivers/staging/media/rkvdec/rkvdec-h264.c
+++ b/drivers/staging/media/rkvdec/rkvdec-h264.c
@@ -1015,8 +1015,9 @@ static int rkvdec_h264_adjust_fmt(struct rkvdec_ctx *ctx,
struct v4l2_pix_format_mplane *fmt = &f->fmt.pix_mp;
fmt->num_planes = 1;
- fmt->plane_fmt[0].sizeimage = fmt->width * fmt->height *
- RKVDEC_H264_MAX_DEPTH_IN_BYTES;
+ if (!fmt->plane_fmt[0].sizeimage)
+ fmt->plane_fmt[0].sizeimage = fmt->width * fmt->height *
+ RKVDEC_H264_MAX_DEPTH_IN_BYTES;
return 0;
}
diff --git a/drivers/staging/media/rkvdec/rkvdec.c b/drivers/staging/media/rkvdec/rkvdec.c
index 7131156c1f2c..4fd4a2907da7 100644
--- a/drivers/staging/media/rkvdec/rkvdec.c
+++ b/drivers/staging/media/rkvdec/rkvdec.c
@@ -280,31 +280,20 @@ static int rkvdec_try_output_fmt(struct file *file, void *priv,
return 0;
}
-static int rkvdec_s_fmt(struct file *file, void *priv,
- struct v4l2_format *f,
- int (*try_fmt)(struct file *, void *,
- struct v4l2_format *))
+static int rkvdec_s_capture_fmt(struct file *file, void *priv,
+ struct v4l2_format *f)
{
struct rkvdec_ctx *ctx = fh_to_rkvdec_ctx(priv);
struct vb2_queue *vq;
+ int ret;
- if (!try_fmt)
- return -EINVAL;
-
- vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type);
+ /* Change not allowed if queue is busy */
+ vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx,
+ V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
if (vb2_is_busy(vq))
return -EBUSY;
- return try_fmt(file, priv, f);
-}
-
-static int rkvdec_s_capture_fmt(struct file *file, void *priv,
- struct v4l2_format *f)
-{
- struct rkvdec_ctx *ctx = fh_to_rkvdec_ctx(priv);
- int ret;
-
- ret = rkvdec_s_fmt(file, priv, f, rkvdec_try_capture_fmt);
+ ret = rkvdec_try_capture_fmt(file, priv, f);
if (ret)
return ret;
@@ -319,10 +308,21 @@ static int rkvdec_s_output_fmt(struct file *file, void *priv,
struct v4l2_m2m_ctx *m2m_ctx = ctx->fh.m2m_ctx;
const struct rkvdec_coded_fmt_desc *desc;
struct v4l2_format *cap_fmt;
- struct vb2_queue *peer_vq;
+ struct vb2_queue *peer_vq, *vq;
int ret;
/*
+ * In order to support dynamic resolution change, the decoder admits
+ * a resolution change, as long as the pixelformat remains. Can't be
+ * done if streaming.
+ */
+ vq = v4l2_m2m_get_vq(m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
+ if (vb2_is_streaming(vq) ||
+ (vb2_is_busy(vq) &&
+ f->fmt.pix_mp.pixelformat != ctx->coded_fmt.fmt.pix_mp.pixelformat))
+ return -EBUSY;
+
+ /*
* Since format change on the OUTPUT queue will reset the CAPTURE
* queue, we can't allow doing so when the CAPTURE queue has buffers
* allocated.
@@ -331,7 +331,7 @@ static int rkvdec_s_output_fmt(struct file *file, void *priv,
if (vb2_is_busy(peer_vq))
return -EBUSY;
- ret = rkvdec_s_fmt(file, priv, f, rkvdec_try_output_fmt);
+ ret = rkvdec_try_output_fmt(file, priv, f);
if (ret)
return ret;
@@ -967,7 +967,6 @@ static const char * const rkvdec_clk_names[] = {
static int rkvdec_probe(struct platform_device *pdev)
{
struct rkvdec_dev *rkvdec;
- struct resource *res;
unsigned int i;
int ret, irq;
@@ -999,8 +998,7 @@ static int rkvdec_probe(struct platform_device *pdev)
*/
clk_set_rate(rkvdec->clocks[0].clk, 500 * 1000 * 1000);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- rkvdec->regs = devm_ioremap_resource(&pdev->dev, res);
+ rkvdec->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(rkvdec->regs))
return PTR_ERR(rkvdec->regs);
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.c b/drivers/staging/media/sunxi/cedrus/cedrus.c
index c0d005dafc6c..c76fc97d97a0 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus.c
@@ -28,6 +28,50 @@
#include "cedrus_dec.h"
#include "cedrus_hw.h"
+static int cedrus_try_ctrl(struct v4l2_ctrl *ctrl)
+{
+ if (ctrl->id == V4L2_CID_STATELESS_H264_SPS) {
+ const struct v4l2_ctrl_h264_sps *sps = ctrl->p_new.p_h264_sps;
+
+ if (sps->chroma_format_idc != 1)
+ /* Only 4:2:0 is supported */
+ return -EINVAL;
+ if (sps->bit_depth_luma_minus8 != sps->bit_depth_chroma_minus8)
+ /* Luma and chroma bit depth mismatch */
+ return -EINVAL;
+ if (sps->bit_depth_luma_minus8 != 0)
+ /* Only 8-bit is supported */
+ return -EINVAL;
+ } else if (ctrl->id == V4L2_CID_MPEG_VIDEO_HEVC_SPS) {
+ const struct v4l2_ctrl_hevc_sps *sps = ctrl->p_new.p_hevc_sps;
+ struct cedrus_ctx *ctx = container_of(ctrl->handler, struct cedrus_ctx, hdl);
+
+ if (sps->chroma_format_idc != 1)
+ /* Only 4:2:0 is supported */
+ return -EINVAL;
+
+ if (sps->bit_depth_luma_minus8 != sps->bit_depth_chroma_minus8)
+ /* Luma and chroma bit depth mismatch */
+ return -EINVAL;
+
+ if (ctx->dev->capabilities & CEDRUS_CAPABILITY_H265_10_DEC) {
+ if (sps->bit_depth_luma_minus8 != 0 && sps->bit_depth_luma_minus8 != 2)
+ /* Only 8-bit and 10-bit are supported */
+ return -EINVAL;
+ } else {
+ if (sps->bit_depth_luma_minus8 != 0)
+ /* Only 8-bit is supported */
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops cedrus_ctrl_ops = {
+ .try_ctrl = cedrus_try_ctrl,
+};
+
static const struct cedrus_control cedrus_controls[] = {
{
.cfg = {
@@ -62,6 +106,7 @@ static const struct cedrus_control cedrus_controls[] = {
{
.cfg = {
.id = V4L2_CID_STATELESS_H264_SPS,
+ .ops = &cedrus_ctrl_ops,
},
.codec = CEDRUS_CODEC_H264,
},
@@ -120,6 +165,7 @@ static const struct cedrus_control cedrus_controls[] = {
{
.cfg = {
.id = V4L2_CID_MPEG_VIDEO_HEVC_SPS,
+ .ops = &cedrus_ctrl_ops,
},
.codec = CEDRUS_CODEC_H265,
},
@@ -137,6 +183,12 @@ static const struct cedrus_control cedrus_controls[] = {
},
{
.cfg = {
+ .id = V4L2_CID_MPEG_VIDEO_HEVC_SCALING_MATRIX,
+ },
+ .codec = CEDRUS_CODEC_H265,
+ },
+ {
+ .cfg = {
.id = V4L2_CID_MPEG_VIDEO_HEVC_DECODE_MODE,
.max = V4L2_MPEG_VIDEO_HEVC_DECODE_MODE_SLICE_BASED,
.def = V4L2_MPEG_VIDEO_HEVC_DECODE_MODE_SLICE_BASED,
@@ -207,6 +259,7 @@ static int cedrus_init_ctrls(struct cedrus_dev *dev, struct cedrus_ctx *ctx)
v4l2_ctrl_handler_free(hdl);
kfree(ctx->ctrls);
+ ctx->ctrls = NULL;
return hdl->error;
}
@@ -282,7 +335,7 @@ static int cedrus_open(struct file *file)
ret = PTR_ERR(ctx->fh.m2m_ctx);
goto err_ctrls;
}
- ctx->dst_fmt.pixelformat = V4L2_PIX_FMT_SUNXI_TILED_NV12;
+ ctx->dst_fmt.pixelformat = V4L2_PIX_FMT_NV12_32L32;
cedrus_prepare_format(&ctx->dst_fmt);
ctx->src_fmt.pixelformat = V4L2_PIX_FMT_MPEG2_SLICE;
/*
@@ -550,6 +603,7 @@ static const struct cedrus_variant sun50i_h6_cedrus_variant = {
CEDRUS_CAPABILITY_MPEG2_DEC |
CEDRUS_CAPABILITY_H264_DEC |
CEDRUS_CAPABILITY_H265_DEC |
+ CEDRUS_CAPABILITY_H265_10_DEC |
CEDRUS_CAPABILITY_VP8_DEC,
.mod_rate = 600000000,
};
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus.h b/drivers/staging/media/sunxi/cedrus/cedrus.h
index 88afba17b78b..c345f2984041 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus.h
+++ b/drivers/staging/media/sunxi/cedrus/cedrus.h
@@ -32,6 +32,7 @@
#define CEDRUS_CAPABILITY_H264_DEC BIT(2)
#define CEDRUS_CAPABILITY_MPEG2_DEC BIT(3)
#define CEDRUS_CAPABILITY_VP8_DEC BIT(4)
+#define CEDRUS_CAPABILITY_H265_10_DEC BIT(5)
enum cedrus_codec {
CEDRUS_CODEC_MPEG2,
@@ -78,6 +79,7 @@ struct cedrus_h265_run {
const struct v4l2_ctrl_hevc_pps *pps;
const struct v4l2_ctrl_hevc_slice_params *slice_params;
const struct v4l2_ctrl_hevc_decode_params *decode_params;
+ const struct v4l2_ctrl_hevc_scaling_matrix *scaling_matrix;
};
struct cedrus_vp8_run {
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_dec.c b/drivers/staging/media/sunxi/cedrus/cedrus_dec.c
index 40e8c4123f76..a16c1422558f 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_dec.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_dec.c
@@ -72,6 +72,8 @@ void cedrus_device_run(void *priv)
V4L2_CID_MPEG_VIDEO_HEVC_SLICE_PARAMS);
run.h265.decode_params = cedrus_find_control_data(ctx,
V4L2_CID_MPEG_VIDEO_HEVC_DECODE_PARAMS);
+ run.h265.scaling_matrix = cedrus_find_control_data(ctx,
+ V4L2_CID_MPEG_VIDEO_HEVC_SCALING_MATRIX);
break;
case V4L2_PIX_FMT_VP8_FRAME:
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_h264.c b/drivers/staging/media/sunxi/cedrus/cedrus_h264.c
index de7442d4834d..b4173a8926d6 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_h264.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_h264.c
@@ -520,6 +520,11 @@ static int cedrus_h264_start(struct cedrus_ctx *ctx)
unsigned int mv_col_size;
int ret;
+ /*
+ * NOTE: All buffers allocated here are only used by HW, so we
+ * can add DMA_ATTR_NO_KERNEL_MAPPING flag when allocating them.
+ */
+
/* Formula for picture buffer size is taken from CedarX source. */
if (ctx->src_fmt.width > 2048)
@@ -538,23 +543,23 @@ static int cedrus_h264_start(struct cedrus_ctx *ctx)
ctx->codec.h264.pic_info_buf_size = pic_info_size;
ctx->codec.h264.pic_info_buf =
- dma_alloc_coherent(dev->dev, ctx->codec.h264.pic_info_buf_size,
- &ctx->codec.h264.pic_info_buf_dma,
- GFP_KERNEL);
+ dma_alloc_attrs(dev->dev, ctx->codec.h264.pic_info_buf_size,
+ &ctx->codec.h264.pic_info_buf_dma,
+ GFP_KERNEL, DMA_ATTR_NO_KERNEL_MAPPING);
if (!ctx->codec.h264.pic_info_buf)
return -ENOMEM;
/*
* That buffer is supposed to be 16kiB in size, and be aligned
- * on 16kiB as well. However, dma_alloc_coherent provides the
- * guarantee that we'll have a CPU and DMA address aligned on
- * the smallest page order that is greater to the requested
- * size, so we don't have to overallocate.
+ * on 16kiB as well. However, dma_alloc_attrs provides the
+ * guarantee that we'll have a DMA address aligned on the
+ * smallest page order that is greater to the requested size,
+ * so we don't have to overallocate.
*/
ctx->codec.h264.neighbor_info_buf =
- dma_alloc_coherent(dev->dev, CEDRUS_NEIGHBOR_INFO_BUF_SIZE,
- &ctx->codec.h264.neighbor_info_buf_dma,
- GFP_KERNEL);
+ dma_alloc_attrs(dev->dev, CEDRUS_NEIGHBOR_INFO_BUF_SIZE,
+ &ctx->codec.h264.neighbor_info_buf_dma,
+ GFP_KERNEL, DMA_ATTR_NO_KERNEL_MAPPING);
if (!ctx->codec.h264.neighbor_info_buf) {
ret = -ENOMEM;
goto err_pic_buf;
@@ -582,10 +587,11 @@ static int cedrus_h264_start(struct cedrus_ctx *ctx)
mv_col_size = field_size * 2 * CEDRUS_H264_FRAME_NUM;
ctx->codec.h264.mv_col_buf_size = mv_col_size;
- ctx->codec.h264.mv_col_buf = dma_alloc_coherent(dev->dev,
- ctx->codec.h264.mv_col_buf_size,
- &ctx->codec.h264.mv_col_buf_dma,
- GFP_KERNEL);
+ ctx->codec.h264.mv_col_buf =
+ dma_alloc_attrs(dev->dev,
+ ctx->codec.h264.mv_col_buf_size,
+ &ctx->codec.h264.mv_col_buf_dma,
+ GFP_KERNEL, DMA_ATTR_NO_KERNEL_MAPPING);
if (!ctx->codec.h264.mv_col_buf) {
ret = -ENOMEM;
goto err_neighbor_buf;
@@ -600,10 +606,10 @@ static int cedrus_h264_start(struct cedrus_ctx *ctx)
ctx->codec.h264.deblk_buf_size =
ALIGN(ctx->src_fmt.width, 32) * 12;
ctx->codec.h264.deblk_buf =
- dma_alloc_coherent(dev->dev,
- ctx->codec.h264.deblk_buf_size,
- &ctx->codec.h264.deblk_buf_dma,
- GFP_KERNEL);
+ dma_alloc_attrs(dev->dev,
+ ctx->codec.h264.deblk_buf_size,
+ &ctx->codec.h264.deblk_buf_dma,
+ GFP_KERNEL, DMA_ATTR_NO_KERNEL_MAPPING);
if (!ctx->codec.h264.deblk_buf) {
ret = -ENOMEM;
goto err_mv_col_buf;
@@ -616,10 +622,10 @@ static int cedrus_h264_start(struct cedrus_ctx *ctx)
ctx->codec.h264.intra_pred_buf_size =
ALIGN(ctx->src_fmt.width, 64) * 5 * 2;
ctx->codec.h264.intra_pred_buf =
- dma_alloc_coherent(dev->dev,
- ctx->codec.h264.intra_pred_buf_size,
- &ctx->codec.h264.intra_pred_buf_dma,
- GFP_KERNEL);
+ dma_alloc_attrs(dev->dev,
+ ctx->codec.h264.intra_pred_buf_size,
+ &ctx->codec.h264.intra_pred_buf_dma,
+ GFP_KERNEL, DMA_ATTR_NO_KERNEL_MAPPING);
if (!ctx->codec.h264.intra_pred_buf) {
ret = -ENOMEM;
goto err_deblk_buf;
@@ -629,24 +635,28 @@ static int cedrus_h264_start(struct cedrus_ctx *ctx)
return 0;
err_deblk_buf:
- dma_free_coherent(dev->dev, ctx->codec.h264.deblk_buf_size,
- ctx->codec.h264.deblk_buf,
- ctx->codec.h264.deblk_buf_dma);
+ dma_free_attrs(dev->dev, ctx->codec.h264.deblk_buf_size,
+ ctx->codec.h264.deblk_buf,
+ ctx->codec.h264.deblk_buf_dma,
+ DMA_ATTR_NO_KERNEL_MAPPING);
err_mv_col_buf:
- dma_free_coherent(dev->dev, ctx->codec.h264.mv_col_buf_size,
- ctx->codec.h264.mv_col_buf,
- ctx->codec.h264.mv_col_buf_dma);
+ dma_free_attrs(dev->dev, ctx->codec.h264.mv_col_buf_size,
+ ctx->codec.h264.mv_col_buf,
+ ctx->codec.h264.mv_col_buf_dma,
+ DMA_ATTR_NO_KERNEL_MAPPING);
err_neighbor_buf:
- dma_free_coherent(dev->dev, CEDRUS_NEIGHBOR_INFO_BUF_SIZE,
- ctx->codec.h264.neighbor_info_buf,
- ctx->codec.h264.neighbor_info_buf_dma);
+ dma_free_attrs(dev->dev, CEDRUS_NEIGHBOR_INFO_BUF_SIZE,
+ ctx->codec.h264.neighbor_info_buf,
+ ctx->codec.h264.neighbor_info_buf_dma,
+ DMA_ATTR_NO_KERNEL_MAPPING);
err_pic_buf:
- dma_free_coherent(dev->dev, ctx->codec.h264.pic_info_buf_size,
- ctx->codec.h264.pic_info_buf,
- ctx->codec.h264.pic_info_buf_dma);
+ dma_free_attrs(dev->dev, ctx->codec.h264.pic_info_buf_size,
+ ctx->codec.h264.pic_info_buf,
+ ctx->codec.h264.pic_info_buf_dma,
+ DMA_ATTR_NO_KERNEL_MAPPING);
return ret;
}
@@ -654,23 +664,28 @@ static void cedrus_h264_stop(struct cedrus_ctx *ctx)
{
struct cedrus_dev *dev = ctx->dev;
- dma_free_coherent(dev->dev, ctx->codec.h264.mv_col_buf_size,
- ctx->codec.h264.mv_col_buf,
- ctx->codec.h264.mv_col_buf_dma);
- dma_free_coherent(dev->dev, CEDRUS_NEIGHBOR_INFO_BUF_SIZE,
- ctx->codec.h264.neighbor_info_buf,
- ctx->codec.h264.neighbor_info_buf_dma);
- dma_free_coherent(dev->dev, ctx->codec.h264.pic_info_buf_size,
- ctx->codec.h264.pic_info_buf,
- ctx->codec.h264.pic_info_buf_dma);
+ dma_free_attrs(dev->dev, ctx->codec.h264.mv_col_buf_size,
+ ctx->codec.h264.mv_col_buf,
+ ctx->codec.h264.mv_col_buf_dma,
+ DMA_ATTR_NO_KERNEL_MAPPING);
+ dma_free_attrs(dev->dev, CEDRUS_NEIGHBOR_INFO_BUF_SIZE,
+ ctx->codec.h264.neighbor_info_buf,
+ ctx->codec.h264.neighbor_info_buf_dma,
+ DMA_ATTR_NO_KERNEL_MAPPING);
+ dma_free_attrs(dev->dev, ctx->codec.h264.pic_info_buf_size,
+ ctx->codec.h264.pic_info_buf,
+ ctx->codec.h264.pic_info_buf_dma,
+ DMA_ATTR_NO_KERNEL_MAPPING);
if (ctx->codec.h264.deblk_buf_size)
- dma_free_coherent(dev->dev, ctx->codec.h264.deblk_buf_size,
- ctx->codec.h264.deblk_buf,
- ctx->codec.h264.deblk_buf_dma);
+ dma_free_attrs(dev->dev, ctx->codec.h264.deblk_buf_size,
+ ctx->codec.h264.deblk_buf,
+ ctx->codec.h264.deblk_buf_dma,
+ DMA_ATTR_NO_KERNEL_MAPPING);
if (ctx->codec.h264.intra_pred_buf_size)
- dma_free_coherent(dev->dev, ctx->codec.h264.intra_pred_buf_size,
- ctx->codec.h264.intra_pred_buf,
- ctx->codec.h264.intra_pred_buf_dma);
+ dma_free_attrs(dev->dev, ctx->codec.h264.intra_pred_buf_size,
+ ctx->codec.h264.intra_pred_buf,
+ ctx->codec.h264.intra_pred_buf_dma,
+ DMA_ATTR_NO_KERNEL_MAPPING);
}
static void cedrus_h264_trigger(struct cedrus_ctx *ctx)
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_h265.c b/drivers/staging/media/sunxi/cedrus/cedrus_h265.c
index ef0311a16d01..8829a7bab07e 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_h265.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_h265.c
@@ -238,6 +238,69 @@ static void cedrus_h265_skip_bits(struct cedrus_dev *dev, int num)
}
}
+static void cedrus_h265_write_scaling_list(struct cedrus_ctx *ctx,
+ struct cedrus_run *run)
+{
+ const struct v4l2_ctrl_hevc_scaling_matrix *scaling;
+ struct cedrus_dev *dev = ctx->dev;
+ u32 i, j, k, val;
+
+ scaling = run->h265.scaling_matrix;
+
+ cedrus_write(dev, VE_DEC_H265_SCALING_LIST_DC_COEF0,
+ (scaling->scaling_list_dc_coef_32x32[1] << 24) |
+ (scaling->scaling_list_dc_coef_32x32[0] << 16) |
+ (scaling->scaling_list_dc_coef_16x16[1] << 8) |
+ (scaling->scaling_list_dc_coef_16x16[0] << 0));
+
+ cedrus_write(dev, VE_DEC_H265_SCALING_LIST_DC_COEF1,
+ (scaling->scaling_list_dc_coef_16x16[5] << 24) |
+ (scaling->scaling_list_dc_coef_16x16[4] << 16) |
+ (scaling->scaling_list_dc_coef_16x16[3] << 8) |
+ (scaling->scaling_list_dc_coef_16x16[2] << 0));
+
+ cedrus_h265_sram_write_offset(dev, VE_DEC_H265_SRAM_OFFSET_SCALING_LISTS);
+
+ for (i = 0; i < 6; i++)
+ for (j = 0; j < 8; j++)
+ for (k = 0; k < 8; k += 4) {
+ val = ((u32)scaling->scaling_list_8x8[i][j + (k + 3) * 8] << 24) |
+ ((u32)scaling->scaling_list_8x8[i][j + (k + 2) * 8] << 16) |
+ ((u32)scaling->scaling_list_8x8[i][j + (k + 1) * 8] << 8) |
+ scaling->scaling_list_8x8[i][j + k * 8];
+ cedrus_write(dev, VE_DEC_H265_SRAM_DATA, val);
+ }
+
+ for (i = 0; i < 2; i++)
+ for (j = 0; j < 8; j++)
+ for (k = 0; k < 8; k += 4) {
+ val = ((u32)scaling->scaling_list_32x32[i][j + (k + 3) * 8] << 24) |
+ ((u32)scaling->scaling_list_32x32[i][j + (k + 2) * 8] << 16) |
+ ((u32)scaling->scaling_list_32x32[i][j + (k + 1) * 8] << 8) |
+ scaling->scaling_list_32x32[i][j + k * 8];
+ cedrus_write(dev, VE_DEC_H265_SRAM_DATA, val);
+ }
+
+ for (i = 0; i < 6; i++)
+ for (j = 0; j < 8; j++)
+ for (k = 0; k < 8; k += 4) {
+ val = ((u32)scaling->scaling_list_16x16[i][j + (k + 3) * 8] << 24) |
+ ((u32)scaling->scaling_list_16x16[i][j + (k + 2) * 8] << 16) |
+ ((u32)scaling->scaling_list_16x16[i][j + (k + 1) * 8] << 8) |
+ scaling->scaling_list_16x16[i][j + k * 8];
+ cedrus_write(dev, VE_DEC_H265_SRAM_DATA, val);
+ }
+
+ for (i = 0; i < 6; i++)
+ for (j = 0; j < 4; j++) {
+ val = ((u32)scaling->scaling_list_4x4[i][j + 12] << 24) |
+ ((u32)scaling->scaling_list_4x4[i][j + 8] << 16) |
+ ((u32)scaling->scaling_list_4x4[i][j + 4] << 8) |
+ scaling->scaling_list_4x4[i][j];
+ cedrus_write(dev, VE_DEC_H265_SRAM_DATA, val);
+ }
+}
+
static void cedrus_h265_setup(struct cedrus_ctx *ctx,
struct cedrus_run *run)
{
@@ -287,11 +350,12 @@ static void cedrus_h265_setup(struct cedrus_ctx *ctx,
ctx->codec.h265.mv_col_buf_size = num_buffers *
ctx->codec.h265.mv_col_buf_unit_size;
+ /* Buffer is never accessed by CPU, so we can skip kernel mapping. */
ctx->codec.h265.mv_col_buf =
- dma_alloc_coherent(dev->dev,
- ctx->codec.h265.mv_col_buf_size,
- &ctx->codec.h265.mv_col_buf_addr,
- GFP_KERNEL);
+ dma_alloc_attrs(dev->dev,
+ ctx->codec.h265.mv_col_buf_size,
+ &ctx->codec.h265.mv_col_buf_addr,
+ GFP_KERNEL, DMA_ATTR_NO_KERNEL_MAPPING);
if (!ctx->codec.h265.mv_col_buf) {
ctx->codec.h265.mv_col_buf_size = 0;
// TODO: Abort the process here.
@@ -527,7 +591,12 @@ static void cedrus_h265_setup(struct cedrus_ctx *ctx,
/* Scaling list. */
- reg = VE_DEC_H265_SCALING_LIST_CTRL0_DEFAULT;
+ if (sps->flags & V4L2_HEVC_SPS_FLAG_SCALING_LIST_ENABLED) {
+ cedrus_h265_write_scaling_list(ctx, run);
+ reg = VE_DEC_H265_SCALING_LIST_CTRL0_FLAG_ENABLED;
+ } else {
+ reg = VE_DEC_H265_SCALING_LIST_CTRL0_DEFAULT;
+ }
cedrus_write(dev, VE_DEC_H265_SCALING_LIST_CTRL0, reg);
/* Neightbor information address. */
@@ -599,10 +668,11 @@ static int cedrus_h265_start(struct cedrus_ctx *ctx)
/* The buffer size is calculated at setup time. */
ctx->codec.h265.mv_col_buf_size = 0;
+ /* Buffer is never accessed by CPU, so we can skip kernel mapping. */
ctx->codec.h265.neighbor_info_buf =
- dma_alloc_coherent(dev->dev, CEDRUS_H265_NEIGHBOR_INFO_BUF_SIZE,
- &ctx->codec.h265.neighbor_info_buf_addr,
- GFP_KERNEL);
+ dma_alloc_attrs(dev->dev, CEDRUS_H265_NEIGHBOR_INFO_BUF_SIZE,
+ &ctx->codec.h265.neighbor_info_buf_addr,
+ GFP_KERNEL, DMA_ATTR_NO_KERNEL_MAPPING);
if (!ctx->codec.h265.neighbor_info_buf)
return -ENOMEM;
@@ -614,16 +684,18 @@ static void cedrus_h265_stop(struct cedrus_ctx *ctx)
struct cedrus_dev *dev = ctx->dev;
if (ctx->codec.h265.mv_col_buf_size > 0) {
- dma_free_coherent(dev->dev, ctx->codec.h265.mv_col_buf_size,
- ctx->codec.h265.mv_col_buf,
- ctx->codec.h265.mv_col_buf_addr);
+ dma_free_attrs(dev->dev, ctx->codec.h265.mv_col_buf_size,
+ ctx->codec.h265.mv_col_buf,
+ ctx->codec.h265.mv_col_buf_addr,
+ DMA_ATTR_NO_KERNEL_MAPPING);
ctx->codec.h265.mv_col_buf_size = 0;
}
- dma_free_coherent(dev->dev, CEDRUS_H265_NEIGHBOR_INFO_BUF_SIZE,
- ctx->codec.h265.neighbor_info_buf,
- ctx->codec.h265.neighbor_info_buf_addr);
+ dma_free_attrs(dev->dev, CEDRUS_H265_NEIGHBOR_INFO_BUF_SIZE,
+ ctx->codec.h265.neighbor_info_buf,
+ ctx->codec.h265.neighbor_info_buf_addr,
+ DMA_ATTR_NO_KERNEL_MAPPING);
}
static void cedrus_h265_trigger(struct cedrus_ctx *ctx)
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_hw.c b/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
index e2f2ff609c7e..2d7663726467 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_hw.c
@@ -99,7 +99,7 @@ void cedrus_dst_format_set(struct cedrus_dev *dev,
cedrus_write(dev, VE_PRIMARY_FB_LINE_STRIDE, reg);
break;
- case V4L2_PIX_FMT_SUNXI_TILED_NV12:
+ case V4L2_PIX_FMT_NV12_32L32:
default:
reg = VE_PRIMARY_OUT_FMT_TILED_32_NV12;
cedrus_write(dev, VE_PRIMARY_OUT_FMT, reg);
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_regs.h b/drivers/staging/media/sunxi/cedrus/cedrus_regs.h
index 92ace87c1c7d..bdb062ad8682 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_regs.h
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_regs.h
@@ -494,6 +494,8 @@
#define VE_DEC_H265_ENTRY_POINT_OFFSET_ADDR (VE_ENGINE_DEC_H265 + 0x64)
#define VE_DEC_H265_TILE_START_CTB (VE_ENGINE_DEC_H265 + 0x68)
#define VE_DEC_H265_TILE_END_CTB (VE_ENGINE_DEC_H265 + 0x6c)
+#define VE_DEC_H265_SCALING_LIST_DC_COEF0 (VE_ENGINE_DEC_H265 + 0x78)
+#define VE_DEC_H265_SCALING_LIST_DC_COEF1 (VE_ENGINE_DEC_H265 + 0x7c)
#define VE_DEC_H265_LOW_ADDR (VE_ENGINE_DEC_H265 + 0x80)
diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_video.c b/drivers/staging/media/sunxi/cedrus/cedrus_video.c
index 825af5fd35e0..33726175d980 100644
--- a/drivers/staging/media/sunxi/cedrus/cedrus_video.c
+++ b/drivers/staging/media/sunxi/cedrus/cedrus_video.c
@@ -56,7 +56,7 @@ static struct cedrus_format cedrus_formats[] = {
.capabilities = CEDRUS_CAPABILITY_VP8_DEC,
},
{
- .pixelformat = V4L2_PIX_FMT_SUNXI_TILED_NV12,
+ .pixelformat = V4L2_PIX_FMT_NV12_32L32,
.directions = CEDRUS_DECODE_DST,
},
{
@@ -124,7 +124,7 @@ void cedrus_prepare_format(struct v4l2_pix_format *pix_fmt)
sizeimage = max_t(u32, SZ_1K, sizeimage);
break;
- case V4L2_PIX_FMT_SUNXI_TILED_NV12:
+ case V4L2_PIX_FMT_NV12_32L32:
/* 32-aligned stride. */
bytesperline = ALIGN(width, 32);
@@ -568,9 +568,9 @@ int cedrus_queue_init(void *priv, struct vb2_queue *src_vq,
src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
src_vq->io_modes = VB2_MMAP | VB2_DMABUF;
+ src_vq->dma_attrs = DMA_ATTR_NO_KERNEL_MAPPING;
src_vq->drv_priv = ctx;
src_vq->buf_struct_size = sizeof(struct cedrus_buffer);
- src_vq->min_buffers_needed = 1;
src_vq->ops = &cedrus_qops;
src_vq->mem_ops = &vb2_dma_contig_memops;
src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
@@ -587,7 +587,6 @@ int cedrus_queue_init(void *priv, struct vb2_queue *src_vq,
dst_vq->io_modes = VB2_MMAP | VB2_DMABUF;
dst_vq->drv_priv = ctx;
dst_vq->buf_struct_size = sizeof(struct cedrus_buffer);
- dst_vq->min_buffers_needed = 1;
dst_vq->ops = &cedrus_qops;
dst_vq->mem_ops = &vb2_dma_contig_memops;
dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
diff --git a/drivers/staging/media/tegra-video/vi.c b/drivers/staging/media/tegra-video/vi.c
index d321790b07d9..69d9787d5338 100644
--- a/drivers/staging/media/tegra-video/vi.c
+++ b/drivers/staging/media/tegra-video/vi.c
@@ -1272,7 +1272,7 @@ static int tegra_channel_init(struct tegra_vi_channel *chan)
}
if (!IS_ENABLED(CONFIG_VIDEO_TEGRA_TPG))
- v4l2_async_notifier_init(&chan->notifier);
+ v4l2_async_nf_init(&chan->notifier);
return 0;
@@ -1811,8 +1811,8 @@ static int tegra_vi_graph_parse_one(struct tegra_vi_channel *chan,
continue;
}
- tvge = v4l2_async_notifier_add_fwnode_subdev(&chan->notifier, remote,
- struct tegra_vi_graph_entity);
+ tvge = v4l2_async_nf_add_fwnode(&chan->notifier, remote,
+ struct tegra_vi_graph_entity);
if (IS_ERR(tvge)) {
ret = PTR_ERR(tvge);
dev_err(vi->dev,
@@ -1834,7 +1834,7 @@ static int tegra_vi_graph_parse_one(struct tegra_vi_channel *chan,
cleanup:
dev_err(vi->dev, "failed parsing the graph: %d\n", ret);
- v4l2_async_notifier_cleanup(&chan->notifier);
+ v4l2_async_nf_cleanup(&chan->notifier);
of_node_put(node);
return ret;
}
@@ -1868,13 +1868,12 @@ static int tegra_vi_graph_init(struct tegra_vi *vi)
continue;
chan->notifier.ops = &tegra_vi_async_ops;
- ret = v4l2_async_notifier_register(&vid->v4l2_dev,
- &chan->notifier);
+ ret = v4l2_async_nf_register(&vid->v4l2_dev, &chan->notifier);
if (ret < 0) {
dev_err(vi->dev,
"failed to register channel %d notifier: %d\n",
chan->portnos[0], ret);
- v4l2_async_notifier_cleanup(&chan->notifier);
+ v4l2_async_nf_cleanup(&chan->notifier);
}
}
@@ -1887,8 +1886,8 @@ static void tegra_vi_graph_cleanup(struct tegra_vi *vi)
list_for_each_entry(chan, &vi->vi_chans, list) {
vb2_video_unregister_device(&chan->video);
- v4l2_async_notifier_unregister(&chan->notifier);
- v4l2_async_notifier_cleanup(&chan->notifier);
+ v4l2_async_nf_unregister(&chan->notifier);
+ v4l2_async_nf_cleanup(&chan->notifier);
}
}
diff --git a/drivers/staging/most/dim2/Makefile b/drivers/staging/most/dim2/Makefile
index 861adacf6c72..5f9612af3fa3 100644
--- a/drivers/staging/most/dim2/Makefile
+++ b/drivers/staging/most/dim2/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_MOST_DIM2) += most_dim2.o
-most_dim2-objs := dim2.o hal.o sysfs.o
+most_dim2-objs := dim2.o hal.o
diff --git a/drivers/staging/most/dim2/dim2.c b/drivers/staging/most/dim2/dim2.c
index 093ef9a2b291..bd102329d8c8 100644
--- a/drivers/staging/most/dim2/dim2.c
+++ b/drivers/staging/most/dim2/dim2.c
@@ -108,6 +108,7 @@ struct dim2_hdm {
struct dim2_platform_data {
int (*enable)(struct platform_device *pdev);
void (*disable)(struct platform_device *pdev);
+ u8 fcnt;
};
#define iface_to_hdm(iface) container_of(iface, struct dim2_hdm, most_iface)
@@ -117,7 +118,8 @@ struct dim2_platform_data {
(((p)[1] == 0x18) && ((p)[2] == 0x05) && ((p)[3] == 0x0C) && \
((p)[13] == 0x3C) && ((p)[14] == 0x00) && ((p)[15] == 0x0A))
-bool dim2_sysfs_get_state_cb(void)
+static ssize_t state_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
bool state;
unsigned long flags;
@@ -126,9 +128,18 @@ bool dim2_sysfs_get_state_cb(void)
state = dim_get_lock_state();
spin_unlock_irqrestore(&dim_lock, flags);
- return state;
+ return sysfs_emit(buf, "%s\n", state ? "locked" : "");
}
+static DEVICE_ATTR_RO(state);
+
+static struct attribute *dim2_attrs[] = {
+ &dev_attr_state.attr,
+ NULL,
+};
+
+ATTRIBUTE_GROUPS(dim2);
+
/**
* dimcb_on_error - callback from HAL to report miscommunication between
* HDM and HAL
@@ -716,6 +727,23 @@ static int get_dim2_clk_speed(const char *clock_speed, u8 *val)
return -EINVAL;
}
+static void dim2_release(struct device *d)
+{
+ struct dim2_hdm *dev = container_of(d, struct dim2_hdm, dev);
+ unsigned long flags;
+
+ kthread_stop(dev->netinfo_task);
+
+ spin_lock_irqsave(&dim_lock, flags);
+ dim_shutdown();
+ spin_unlock_irqrestore(&dim_lock, flags);
+
+ if (dev->disable_platform)
+ dev->disable_platform(to_platform_device(d->parent));
+
+ kfree(dev);
+}
+
/*
* dim2_probe - dim2 probe handler
* @pdev: platform device structure
@@ -732,11 +760,12 @@ static int dim2_probe(struct platform_device *pdev)
struct resource *res;
int ret, i;
u8 hal_ret;
+ u8 dev_fcnt = fcnt;
int irq;
enum { MLB_INT_IDX, AHB0_INT_IDX };
- dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
@@ -748,30 +777,38 @@ static int dim2_probe(struct platform_device *pdev)
"microchip,clock-speed", &clock_speed);
if (ret) {
dev_err(&pdev->dev, "missing dt property clock-speed\n");
- return ret;
+ goto err_free_dev;
}
ret = get_dim2_clk_speed(clock_speed, &dev->clk_speed);
if (ret) {
dev_err(&pdev->dev, "bad dt property clock-speed\n");
- return ret;
+ goto err_free_dev;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
dev->io_base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(dev->io_base))
- return PTR_ERR(dev->io_base);
+ if (IS_ERR(dev->io_base)) {
+ ret = PTR_ERR(dev->io_base);
+ goto err_free_dev;
+ }
of_id = of_match_node(dim2_of_match, pdev->dev.of_node);
pdata = of_id->data;
- ret = pdata && pdata->enable ? pdata->enable(pdev) : 0;
- if (ret)
- return ret;
-
- dev->disable_platform = pdata ? pdata->disable : NULL;
+ if (pdata) {
+ if (pdata->enable) {
+ ret = pdata->enable(pdev);
+ if (ret)
+ goto err_free_dev;
+ }
+ dev->disable_platform = pdata->disable;
+ if (pdata->fcnt)
+ dev_fcnt = pdata->fcnt;
+ }
- dev_info(&pdev->dev, "sync: num of frames per sub-buffer: %u\n", fcnt);
- hal_ret = dim_startup(dev->io_base, dev->clk_speed, fcnt);
+ dev_info(&pdev->dev, "sync: num of frames per sub-buffer: %u\n",
+ dev_fcnt);
+ hal_ret = dim_startup(dev->io_base, dev->clk_speed, dev_fcnt);
if (hal_ret != DIM_NO_ERROR) {
dev_err(&pdev->dev, "dim_startup failed: %d\n", hal_ret);
ret = -ENODEV;
@@ -857,32 +894,19 @@ static int dim2_probe(struct platform_device *pdev)
dev->most_iface.request_netinfo = request_netinfo;
dev->most_iface.driver_dev = &pdev->dev;
dev->most_iface.dev = &dev->dev;
- dev->dev.init_name = "dim2_state";
+ dev->dev.init_name = dev->name;
dev->dev.parent = &pdev->dev;
+ dev->dev.release = dim2_release;
- ret = most_register_interface(&dev->most_iface);
- if (ret) {
- dev_err(&pdev->dev, "failed to register MOST interface\n");
- goto err_stop_thread;
- }
-
- ret = dim2_sysfs_probe(&dev->dev);
- if (ret) {
- dev_err(&pdev->dev, "failed to create sysfs attribute\n");
- goto err_unreg_iface;
- }
+ return most_register_interface(&dev->most_iface);
- return 0;
-
-err_unreg_iface:
- most_deregister_interface(&dev->most_iface);
-err_stop_thread:
- kthread_stop(dev->netinfo_task);
err_shutdown_dim:
dim_shutdown();
err_disable_platform:
if (dev->disable_platform)
dev->disable_platform(pdev);
+err_free_dev:
+ kfree(dev);
return ret;
}
@@ -896,18 +920,8 @@ err_disable_platform:
static int dim2_remove(struct platform_device *pdev)
{
struct dim2_hdm *dev = platform_get_drvdata(pdev);
- unsigned long flags;
- dim2_sysfs_destroy(&dev->dev);
most_deregister_interface(&dev->most_iface);
- kthread_stop(dev->netinfo_task);
-
- spin_lock_irqsave(&dim_lock, flags);
- dim_shutdown();
- spin_unlock_irqrestore(&dim_lock, flags);
-
- if (dev->disable_platform)
- dev->disable_platform(pdev);
return 0;
}
@@ -1047,9 +1061,19 @@ static void rcar_m3_disable(struct platform_device *pdev)
enum dim2_platforms { FSL_MX6, RCAR_H2, RCAR_M3 };
static struct dim2_platform_data plat_data[] = {
- [FSL_MX6] = { .enable = fsl_mx6_enable, .disable = fsl_mx6_disable },
- [RCAR_H2] = { .enable = rcar_h2_enable, .disable = rcar_h2_disable },
- [RCAR_M3] = { .enable = rcar_m3_enable, .disable = rcar_m3_disable },
+ [FSL_MX6] = {
+ .enable = fsl_mx6_enable,
+ .disable = fsl_mx6_disable,
+ },
+ [RCAR_H2] = {
+ .enable = rcar_h2_enable,
+ .disable = rcar_h2_disable,
+ },
+ [RCAR_M3] = {
+ .enable = rcar_m3_enable,
+ .disable = rcar_m3_disable,
+ .fcnt = 3,
+ },
};
static const struct of_device_id dim2_of_match[] = {
@@ -1082,6 +1106,7 @@ static struct platform_driver dim2_driver = {
.driver = {
.name = "hdm_dim2",
.of_match_table = dim2_of_match,
+ .dev_groups = dim2_groups,
},
};
diff --git a/drivers/staging/most/dim2/sysfs.c b/drivers/staging/most/dim2/sysfs.c
deleted file mode 100644
index c85b2cdcdca3..000000000000
--- a/drivers/staging/most/dim2/sysfs.c
+++ /dev/null
@@ -1,49 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * sysfs.c - MediaLB sysfs information
- *
- * Copyright (C) 2015, Microchip Technology Germany II GmbH & Co. KG
- */
-
-/* Author: Andrey Shvetsov <andrey.shvetsov@k2l.de> */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include "sysfs.h"
-#include <linux/device.h>
-
-static ssize_t state_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- bool state = dim2_sysfs_get_state_cb();
-
- return sprintf(buf, "%s\n", state ? "locked" : "");
-}
-
-static DEVICE_ATTR_RO(state);
-
-static struct attribute *dev_attrs[] = {
- &dev_attr_state.attr,
- NULL,
-};
-
-static struct attribute_group dev_attr_group = {
- .attrs = dev_attrs,
-};
-
-static const struct attribute_group *dev_attr_groups[] = {
- &dev_attr_group,
- NULL,
-};
-
-int dim2_sysfs_probe(struct device *dev)
-{
- dev->groups = dev_attr_groups;
- return device_register(dev);
-}
-
-void dim2_sysfs_destroy(struct device *dev)
-{
- device_unregister(dev);
-}
diff --git a/drivers/staging/most/dim2/sysfs.h b/drivers/staging/most/dim2/sysfs.h
index 24277a17cff3..09115cf4ed00 100644
--- a/drivers/staging/most/dim2/sysfs.h
+++ b/drivers/staging/most/dim2/sysfs.h
@@ -16,15 +16,4 @@ struct medialb_bus {
struct kobject kobj_group;
};
-struct device;
-
-int dim2_sysfs_probe(struct device *dev);
-void dim2_sysfs_destroy(struct device *dev);
-
-/*
- * callback,
- * must deliver MediaLB state as true if locked or false if unlocked
- */
-bool dim2_sysfs_get_state_cb(void);
-
#endif /* DIM2_SYSFS_H */
diff --git a/drivers/staging/most/net/net.c b/drivers/staging/most/net/net.c
index a5fd14246046..47039f0d262f 100644
--- a/drivers/staging/most/net/net.c
+++ b/drivers/staging/most/net/net.c
@@ -564,7 +564,7 @@ static void on_netinfo(struct most_interface *iface,
if (m && is_valid_ether_addr(m)) {
if (!is_valid_ether_addr(dev->dev_addr)) {
netdev_info(dev, "set mac %pM\n", m);
- ether_addr_copy(dev->dev_addr, m);
+ eth_hw_addr_set(dev, m);
netif_dormant_off(dev);
} else if (!ether_addr_equal(dev->dev_addr, m)) {
netdev_warn(dev, "reject mac %pM\n", m);
diff --git a/drivers/staging/mt7621-dma/hsdma-mt7621.c b/drivers/staging/mt7621-dma/hsdma-mt7621.c
index b0ed935de7ac..1424d01d434b 100644
--- a/drivers/staging/mt7621-dma/hsdma-mt7621.c
+++ b/drivers/staging/mt7621-dma/hsdma-mt7621.c
@@ -162,8 +162,7 @@ struct mtk_hsdam_engine {
struct mtk_hsdma_chan chan[1];
};
-static inline struct mtk_hsdam_engine *mtk_hsdma_chan_get_dev(
- struct mtk_hsdma_chan *chan)
+static inline struct mtk_hsdam_engine *mtk_hsdma_chan_get_dev(struct mtk_hsdma_chan *chan)
{
return container_of(chan->vchan.chan.device, struct mtk_hsdam_engine,
ddev);
@@ -174,8 +173,7 @@ static inline struct mtk_hsdma_chan *to_mtk_hsdma_chan(struct dma_chan *c)
return container_of(c, struct mtk_hsdma_chan, vchan.chan);
}
-static inline struct mtk_hsdma_desc *to_mtk_hsdma_desc(
- struct virt_dma_desc *vdesc)
+static inline struct mtk_hsdma_desc *to_mtk_hsdma_desc(struct virt_dma_desc *vdesc)
{
return container_of(vdesc, struct mtk_hsdma_desc, vdesc);
}
diff --git a/drivers/staging/mt7621-dts/gbpc1.dts b/drivers/staging/mt7621-dts/gbpc1.dts
index b65d71686814..e38a083811e5 100644
--- a/drivers/staging/mt7621-dts/gbpc1.dts
+++ b/drivers/staging/mt7621-dts/gbpc1.dts
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/dts-v1/;
#include "mt7621.dtsi"
@@ -18,7 +19,7 @@
bootargs = "console=ttyS0,57600";
};
- palmbus: palmbus@1E000000 {
+ palmbus: palmbus@1e000000 {
i2c@900 {
status = "okay";
};
diff --git a/drivers/staging/mt7621-dts/gbpc2.dts b/drivers/staging/mt7621-dts/gbpc2.dts
index 52760e7351f6..6fe603c7711d 100644
--- a/drivers/staging/mt7621-dts/gbpc2.dts
+++ b/drivers/staging/mt7621-dts/gbpc2.dts
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/dts-v1/;
#include "gbpc1.dts"
diff --git a/drivers/staging/mt7621-dts/mt7621.dtsi b/drivers/staging/mt7621-dts/mt7621.dtsi
index eeabe9c0f4fb..6d158e4f4b8c 100644
--- a/drivers/staging/mt7621-dts/mt7621.dtsi
+++ b/drivers/staging/mt7621-dts/mt7621.dtsi
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
#include <dt-bindings/interrupt-controller/mips-gic.h>
#include <dt-bindings/gpio/gpio.h>
#include <dt-bindings/clock/mt7621-clk.h>
@@ -8,12 +9,19 @@
compatible = "mediatek,mt7621-soc";
cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
cpu@0 {
+ device_type = "cpu";
compatible = "mips,mips1004Kc";
+ reg = <0>;
};
cpu@1 {
+ device_type = "cpu";
compatible = "mips,mips1004Kc";
+ reg = <1>;
};
};
@@ -47,10 +55,10 @@
regulator-always-on;
};
- palmbus: palmbus@1E000000 {
+ palmbus: palmbus@1e000000 {
compatible = "palmbus";
- reg = <0x1E000000 0x100000>;
- ranges = <0x0 0x1E000000 0x0FFFFF>;
+ reg = <0x1e000000 0x100000>;
+ ranges = <0x0 0x1e000000 0x0fffff>;
#address-cells = <1>;
#size-cells = <1>;
@@ -100,43 +108,11 @@
pinctrl-0 = <&i2c_pins>;
};
- i2s: i2s@a00 {
- compatible = "mediatek,mt7621-i2s";
- reg = <0xa00 0x100>;
-
- clocks = <&sysc MT7621_CLK_I2S>;
- clock-names = "i2s";
- resets = <&rstctrl 17>;
- reset-names = "i2s";
-
- interrupt-parent = <&gic>;
- interrupts = <GIC_SHARED 16 IRQ_TYPE_LEVEL_HIGH>;
-
- txdma-req = <2>;
- rxdma-req = <3>;
-
- dmas = <&gdma 4>,
- <&gdma 6>;
- dma-names = "tx", "rx";
-
- status = "disabled";
- };
-
memc: syscon@5000 {
compatible = "mediatek,mt7621-memc", "syscon";
reg = <0x5000 0x1000>;
};
- cpc: cpc@1fbf0000 {
- compatible = "mediatek,mt7621-cpc";
- reg = <0x1fbf0000 0x8000>;
- };
-
- mc: mc@1fbf8000 {
- compatible = "mediatek,mt7621-mc";
- reg = <0x1fbf8000 0x8000>;
- };
-
uartlite: uartlite@c00 {
compatible = "ns16550a";
reg = <0xc00 0x100>;
@@ -181,7 +157,7 @@
reset-names = "dma";
interrupt-parent = <&gic>;
- interrupts = <0 13 4>;
+ interrupts = <0 13 IRQ_TYPE_LEVEL_HIGH>;
#dma-cells = <1>;
#dma-channels = <16>;
@@ -200,7 +176,7 @@
reset-names = "hsdma";
interrupt-parent = <&gic>;
- interrupts = <0 11 4>;
+ interrupts = <0 11 IRQ_TYPE_LEVEL_HIGH>;
#dma-cells = <1>;
#dma-channels = <1>;
@@ -301,11 +277,11 @@
#reset-cells = <1>;
};
- sdhci: sdhci@1E130000 {
+ sdhci: sdhci@1e130000 {
status = "disabled";
compatible = "mediatek,mt7620-mmc";
- reg = <0x1E130000 0x4000>;
+ reg = <0x1e130000 0x4000>;
bus-width = <4>;
max-frequency = <48000000>;
@@ -327,7 +303,7 @@
interrupts = <GIC_SHARED 20 IRQ_TYPE_LEVEL_HIGH>;
};
- xhci: xhci@1E1C0000 {
+ xhci: xhci@1e1c0000 {
status = "okay";
compatible = "mediatek,mt8173-xhci";
@@ -358,18 +334,14 @@
};
};
- nand: nand@1e003000 {
- status = "disabled";
-
- compatible = "mediatek,mt7621-nand";
- bank-width = <2>;
- reg = <0x1e003000 0x800
- 0x1e003800 0x800>;
- #address-cells = <1>;
- #size-cells = <1>;
+ cpc: cpc@1fbf0000 {
+ compatible = "mti,mips-cpc";
+ reg = <0x1fbf0000 0x8000>;
+ };
- clocks = <&sysc MT7621_CLK_NAND>;
- clock-names = "nand";
+ cdmm: cdmm@1fbf8000 {
+ compatible = "mti,mips-cdmm";
+ reg = <0x1fbf8000 0x8000>;
};
ethernet: ethernet@1e100000 {
diff --git a/drivers/staging/mt7621-pci/pci-mt7621.c b/drivers/staging/mt7621-pci/pci-mt7621.c
index 6acfc94a16e7..503cb1fca2e0 100644
--- a/drivers/staging/mt7621-pci/pci-mt7621.c
+++ b/drivers/staging/mt7621-pci/pci-mt7621.c
@@ -482,7 +482,7 @@ static int mt7621_pcie_enable_ports(struct pci_host_bridge *host)
/* Setup MEMWIN and IOWIN */
pcie_write(pcie, 0xffffffff, RALINK_PCI_MEMBASE);
- pcie_write(pcie, entry->res->start, RALINK_PCI_IOBASE);
+ pcie_write(pcie, entry->res->start - entry->offset, RALINK_PCI_IOBASE);
list_for_each_entry(port, &pcie->ports, list) {
if (port->enabled) {
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index 5d24c1b6663b..f662739137b5 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -350,7 +350,7 @@ static int cvm_oct_set_mac_filter(struct net_device *dev)
(cvmx_helper_interface_get_mode(interface) !=
CVMX_HELPER_INTERFACE_MODE_SPI)) {
int i;
- u8 *ptr = dev->dev_addr;
+ const u8 *ptr = dev->dev_addr;
u64 mac = 0;
int index = INDEX(priv->port);
@@ -409,7 +409,7 @@ int cvm_oct_common_init(struct net_device *dev)
struct octeon_ethernet *priv = netdev_priv(dev);
int ret;
- ret = of_get_mac_address(priv->of_node, dev->dev_addr);
+ ret = of_get_ethdev_address(priv->of_node, dev);
if (ret)
eth_hw_addr_random(dev);
diff --git a/drivers/staging/pi433/pi433_if.c b/drivers/staging/pi433/pi433_if.c
index c8d0c63fdd1d..29bd37669059 100644
--- a/drivers/staging/pi433/pi433_if.c
+++ b/drivers/staging/pi433/pi433_if.c
@@ -649,7 +649,7 @@ pi433_tx_thread(void *data)
/* clear fifo, set fifo threshold, set payload length */
retval = rf69_set_mode(spi, standby); /* this clears the fifo */
if (retval < 0)
- return retval;
+ goto abort;
if (device->rx_active && !rx_interrupted) {
/*
@@ -661,33 +661,33 @@ pi433_tx_thread(void *data)
retval = rf69_set_fifo_threshold(spi, FIFO_THRESHOLD);
if (retval < 0)
- return retval;
+ goto abort;
if (tx_cfg.enable_length_byte == OPTION_ON) {
retval = rf69_set_payload_length(spi, size * tx_cfg.repetitions);
if (retval < 0)
- return retval;
+ goto abort;
} else {
retval = rf69_set_payload_length(spi, 0);
if (retval < 0)
- return retval;
+ goto abort;
}
/* configure the rf chip */
retval = rf69_set_tx_cfg(device, &tx_cfg);
if (retval < 0)
- return retval;
+ goto abort;
/* enable fifo level interrupt */
retval = rf69_set_dio_mapping(spi, DIO1, DIO_FIFO_LEVEL);
if (retval < 0)
- return retval;
+ goto abort;
device->irq_state[DIO1] = DIO_FIFO_LEVEL;
irq_set_irq_type(device->irq_num[DIO1], IRQ_TYPE_EDGE_FALLING);
/* enable packet sent interrupt */
retval = rf69_set_dio_mapping(spi, DIO0, DIO_PACKET_SENT);
if (retval < 0)
- return retval;
+ goto abort;
device->irq_state[DIO0] = DIO_PACKET_SENT;
irq_set_irq_type(device->irq_num[DIO0], IRQ_TYPE_EDGE_RISING);
enable_irq(device->irq_num[DIO0]); /* was disabled by rx active check */
@@ -695,7 +695,7 @@ pi433_tx_thread(void *data)
/* enable transmission */
retval = rf69_set_mode(spi, transmit);
if (retval < 0)
- return retval;
+ goto abort;
/* transfer this msg (and repetitions) to chip fifo */
device->free_in_fifo = FIFO_SIZE;
@@ -742,7 +742,7 @@ pi433_tx_thread(void *data)
dev_dbg(device->dev, "thread: Packet sent. Set mode to stby.");
retval = rf69_set_mode(spi, standby);
if (retval < 0)
- return retval;
+ goto abort;
/* everything sent? */
if (kfifo_is_empty(&device->tx_fifo)) {
diff --git a/drivers/staging/pi433/pi433_if.h b/drivers/staging/pi433/pi433_if.h
index d5c1521192c1..855f0bebdc1c 100644
--- a/drivers/staging/pi433/pi433_if.h
+++ b/drivers/staging/pi433/pi433_if.h
@@ -5,14 +5,13 @@
* userspace interface for pi433 radio module
*
* Pi433 is a 433MHz radio module for the Raspberry Pi.
- * It is based on the HopeRf Module RFM69CW. Therefore inside of this
- * driver, you'll find an abstraction of the rf69 chip.
+ * It is based on the HopeRf Module RFM69CW. Therefore, inside of this
+ * driver you'll find an abstraction of the rf69 chip.
*
- * If needed, this driver could be extended, to also support other
- * devices, basing on HopeRfs rf69.
+ * If needed this driver could also be extended to support other
+ * devices based on HopeRf rf69 as well as HopeRf modules with a similar
+ * interface such as RFM69HCW, RFM12, RFM95 and so on.
*
- * The driver can also be extended, to support other modules of
- * HopeRf with a similar interace - e. g. RFM69HCW, RFM12, RFM95, ...
* Copyright (C) 2016 Wolf-Entwicklungen
* Marcus Wolf <linux@wolf-entwicklungen.de>
*/
@@ -33,8 +32,8 @@ enum option_on_off {
/* IOCTL structs and commands */
/**
- * struct pi433_tx_config
- * describes the configuration of the radio module for sending
+ * struct pi433_tx_cfg
+ * describes the configuration of the radio module for sending data
* @frequency:
* @bit_rate:
* @modulation:
@@ -46,7 +45,7 @@ enum option_on_off {
* @repetitions:
*
* ATTENTION:
- * If the contents of 'pi433_tx_config' ever change
+ * If the contents of 'pi433_tx_cfg' ever change
* incompatibly, then the ioctl number (see define below) must change.
*
* NOTE: struct layout is the same in 64bit and 32bit userspace.
@@ -81,8 +80,8 @@ struct pi433_tx_cfg {
};
/**
- * struct pi433_rx_config
- * describes the configuration of the radio module for sending
+ * struct pi433_rx_cfg
+ * describes the configuration of the radio module for receiving data
* @frequency:
* @bit_rate:
* @modulation:
@@ -94,7 +93,7 @@ struct pi433_tx_cfg {
* @repetitions:
*
* ATTENTION:
- * If the contents of 'pi433_rx_config' ever change
+ * If the contents of 'pi433_rx_cfg' ever change
* incompatibly, then the ioctl number (see define below) must change
*
* NOTE: struct layout is the same in 64bit and 32bit userspace.
diff --git a/drivers/staging/qlge/qlge_main.c b/drivers/staging/qlge/qlge_main.c
index 8fcdf89da8aa..9873bb2a9ee4 100644
--- a/drivers/staging/qlge/qlge_main.c
+++ b/drivers/staging/qlge/qlge_main.c
@@ -321,8 +321,8 @@ int qlge_get_mac_addr_reg(struct qlge_adapter *qdev, u32 type, u16 index,
/* Set up a MAC, multicast or VLAN address for the
* inbound frame matching.
*/
-static int qlge_set_mac_addr_reg(struct qlge_adapter *qdev, u8 *addr, u32 type,
- u16 index)
+static int qlge_set_mac_addr_reg(struct qlge_adapter *qdev, const u8 *addr,
+ u32 type, u16 index)
{
u32 offset = 0;
int status = 0;
@@ -441,7 +441,7 @@ static int qlge_set_mac_addr(struct qlge_adapter *qdev, int set)
status = qlge_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
if (status)
return status;
- status = qlge_set_mac_addr_reg(qdev, (u8 *)addr,
+ status = qlge_set_mac_addr_reg(qdev, (const u8 *)addr,
MAC_ADDR_TYPE_CAM_MAC,
qdev->func * MAX_CQ);
qlge_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
@@ -724,9 +724,7 @@ static int qlge_get_8000_flash_params(struct qlge_adapter *qdev)
goto exit;
}
- memcpy(qdev->ndev->dev_addr,
- mac_addr,
- qdev->ndev->addr_len);
+ eth_hw_addr_set(qdev->ndev, mac_addr);
exit:
qlge_sem_unlock(qdev, SEM_FLASH_MASK);
@@ -774,9 +772,7 @@ static int qlge_get_8012_flash_params(struct qlge_adapter *qdev)
goto exit;
}
- memcpy(qdev->ndev->dev_addr,
- qdev->flash.flash_params_8012.mac_addr,
- qdev->ndev->addr_len);
+ eth_hw_addr_set(qdev->ndev, qdev->flash.flash_params_8012.mac_addr);
exit:
qlge_sem_unlock(qdev, SEM_FLASH_MASK);
@@ -4214,14 +4210,14 @@ static int qlge_set_mac_address(struct net_device *ndev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
+ eth_hw_addr_set(ndev, addr->sa_data);
/* Update local copy of current mac address. */
memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
status = qlge_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
if (status)
return status;
- status = qlge_set_mac_addr_reg(qdev, (u8 *)ndev->dev_addr,
+ status = qlge_set_mac_addr_reg(qdev, (const u8 *)ndev->dev_addr,
MAC_ADDR_TYPE_CAM_MAC,
qdev->func * MAX_CQ);
if (status)
@@ -4614,14 +4610,9 @@ static int qlge_probe(struct pci_dev *pdev,
goto netdev_free;
}
- err = devlink_register(devlink);
- if (err)
- goto netdev_free;
-
err = qlge_health_create_reporters(qdev);
-
if (err)
- goto devlink_unregister;
+ goto netdev_free;
/* Start up the timer to trigger EEH if
* the bus goes dead
@@ -4632,10 +4623,9 @@ static int qlge_probe(struct pci_dev *pdev,
qlge_display_dev_info(ndev);
atomic_set(&qdev->lb_count, 0);
cards_found++;
+ devlink_register(devlink);
return 0;
-devlink_unregister:
- devlink_unregister(devlink);
netdev_free:
free_netdev(ndev);
devlink_free:
@@ -4660,13 +4650,13 @@ static void qlge_remove(struct pci_dev *pdev)
struct net_device *ndev = qdev->ndev;
struct devlink *devlink = priv_to_devlink(qdev);
+ devlink_unregister(devlink);
del_timer_sync(&qdev->timer);
qlge_cancel_all_work_sync(qdev);
unregister_netdev(ndev);
qlge_release_all(pdev);
pci_disable_device(pdev);
devlink_health_reporter_destroy(qdev->reporter);
- devlink_unregister(devlink);
devlink_free(devlink);
free_netdev(ndev);
}
diff --git a/drivers/staging/qlge/qlge_mpi.c b/drivers/staging/qlge/qlge_mpi.c
index 2630ebf50341..96a4de6d2b34 100644
--- a/drivers/staging/qlge/qlge_mpi.c
+++ b/drivers/staging/qlge/qlge_mpi.c
@@ -862,7 +862,7 @@ int qlge_mb_wol_set_magic(struct qlge_adapter *qdev, u32 enable_wol)
struct mbox_params mbc;
struct mbox_params *mbcp = &mbc;
int status;
- u8 *addr = qdev->ndev->dev_addr;
+ const u8 *addr = qdev->ndev->dev_addr;
memset(mbcp, 0, sizeof(struct mbox_params));
diff --git a/drivers/staging/r8188eu/Kconfig b/drivers/staging/r8188eu/Kconfig
index dc1719d3f2e4..f5fe423530f0 100644
--- a/drivers/staging/r8188eu/Kconfig
+++ b/drivers/staging/r8188eu/Kconfig
@@ -14,13 +14,3 @@ config R8188EU
sources for version v4.1.4_6773.20130222, and contains modifications for
newer kernel features. If built as a module, it will be called r8188eu.
-if R8188EU
-
-config 88EU_AP_MODE
- bool "Realtek RTL8188EU AP mode"
- help
- This option enables Access Point mode. Unless you know that your system
- will never be used as an AP, or the target system has limited memory,
- "Y" should be selected.
-
-endif
diff --git a/drivers/staging/r8188eu/Makefile b/drivers/staging/r8188eu/Makefile
index aebaf29990fd..62933b0f29b5 100644
--- a/drivers/staging/r8188eu/Makefile
+++ b/drivers/staging/r8188eu/Makefile
@@ -1,105 +1,58 @@
-SHELL := /bin/bash
-EXTRA_CFLAGS += $(USER_EXTRA_CFLAGS)
-EXTRA_CFLAGS += -O1
-ccflags-y += -D__CHECK_ENDIAN__
-
-CONFIG_BT_COEXIST = n
-CONFIG_WOWLAN = n
-
-OUTSRC_FILES := \
- hal/HalHWImg8188E_MAC.o \
- hal/HalHWImg8188E_BB.o \
- hal/HalHWImg8188E_RF.o \
- hal/HalPhyRf_8188e.o \
- hal/HalPwrSeqCmd.o \
- hal/Hal8188EPwrSeq.o \
- hal/Hal8188ERateAdaptive.o\
- hal/hal_intf.o \
- hal/hal_com.o \
- hal/odm.o \
- hal/odm_debug.o \
- hal/odm_interface.o \
- hal/odm_HWConfig.o \
- hal/odm_RegConfig8188E.o\
- hal/odm_RTL8188E.o \
- hal/rtl8188e_cmd.o \
- hal/rtl8188e_dm.o \
- hal/rtl8188e_hal_init.o \
- hal/rtl8188e_mp.o \
- hal/rtl8188e_phycfg.o \
- hal/rtl8188e_rf6052.o \
- hal/rtl8188e_rxdesc.o \
- hal/rtl8188e_sreset.o \
- hal/rtl8188e_xmit.o \
- hal/rtl8188eu_led.o \
- hal/rtl8188eu_recv.o \
- hal/rtl8188eu_xmit.o \
- hal/usb_halinit.o \
- hal/usb_ops_linux.o
-
-RTL871X = rtl8188e
-
-HCI_NAME = usb
-
-_OS_INTFS_FILES := \
- os_dep/ioctl_linux.o \
- os_dep/mlme_linux.o \
- os_dep/os_intfs.o \
- os_dep/osdep_service.o \
- os_dep/recv_linux.o \
- os_dep/usb_intf.o \
- os_dep/usb_ops_linux.o \
- os_dep/xmit_linux.o
-
-_HAL_INTFS_FILES += $(OUTSRC_FILES)
-
-ifeq ($(CONFIG_BT_COEXIST), y)
-EXTRA_CFLAGS += -DCONFIG_BT_COEXIST
-endif
-
-ifeq ($(CONFIG_WOWLAN), y)
-EXTRA_CFLAGS += -DCONFIG_WOWLAN
-endif
-
-SUBARCH := $(shell uname -m | sed -e "s/i.86/i386/; s/ppc.*/powerpc/; s/armv.l/arm/; s/aarch64/arm64/;")
-
-ARCH ?= $(SUBARCH)
-CROSS_COMPILE ?=
-KVER ?= $(if $(KERNELRELEASE),$(KERNELRELEASE),$(shell uname -r))
-KSRC ?= $(if $(KERNEL_SRC),$(KERNEL_SRC),/lib/modules/$(KVER)/build)
-MODDESTDIR := /lib/modules/$(KVER)/kernel/drivers/net/wireless
-INSTALL_PREFIX :=
-
-rtk_core := \
- core/rtw_ap.o \
- core/rtw_br_ext.o \
- core/rtw_cmd.o \
- core/rtw_debug.o \
- core/rtw_efuse.o \
- core/rtw_ieee80211.o \
- core/rtw_io.o \
- core/rtw_ioctl_set.o \
- core/rtw_iol.o \
- core/rtw_led.o \
- core/rtw_mlme.o \
- core/rtw_mlme_ext.o \
- core/rtw_mp.o \
- core/rtw_mp_ioctl.o \
- core/rtw_pwrctrl.o \
- core/rtw_p2p.o \
- core/rtw_recv.o \
- core/rtw_rf.o \
- core/rtw_security.o \
- core/rtw_sreset.o \
- core/rtw_sta_mgt.o \
- core/rtw_wlan_util.o \
+r8188eu-y = \
+ hal/HalHWImg8188E_MAC.o \
+ hal/HalHWImg8188E_BB.o \
+ hal/HalHWImg8188E_RF.o \
+ hal/HalPhyRf_8188e.o \
+ hal/HalPwrSeqCmd.o \
+ hal/Hal8188EPwrSeq.o \
+ hal/Hal8188ERateAdaptive.o \
+ hal/hal_intf.o \
+ hal/hal_com.o \
+ hal/odm.o \
+ hal/odm_debug.o \
+ hal/odm_interface.o \
+ hal/odm_HWConfig.o \
+ hal/odm_RegConfig8188E.o \
+ hal/odm_RTL8188E.o \
+ hal/rtl8188e_cmd.o \
+ hal/rtl8188e_dm.o \
+ hal/rtl8188e_hal_init.o \
+ hal/rtl8188e_phycfg.o \
+ hal/rtl8188e_rf6052.o \
+ hal/rtl8188e_rxdesc.o \
+ hal/rtl8188e_sreset.o \
+ hal/rtl8188e_xmit.o \
+ hal/rtl8188eu_led.o \
+ hal/rtl8188eu_recv.o \
+ hal/rtl8188eu_xmit.o \
+ hal/usb_halinit.o \
+ hal/usb_ops_linux.o \
+ os_dep/ioctl_linux.o \
+ os_dep/mlme_linux.o \
+ os_dep/os_intfs.o \
+ os_dep/osdep_service.o \
+ os_dep/recv_linux.o \
+ os_dep/usb_intf.o \
+ os_dep/usb_ops_linux.o \
+ os_dep/xmit_linux.o \
+ core/rtw_ap.o \
+ core/rtw_br_ext.o \
+ core/rtw_cmd.o \
+ core/rtw_efuse.o \
+ core/rtw_ieee80211.o \
+ core/rtw_ioctl_set.o \
+ core/rtw_iol.o \
+ core/rtw_led.o \
+ core/rtw_mlme.o \
+ core/rtw_mlme_ext.o \
+ core/rtw_pwrctrl.o \
+ core/rtw_p2p.o \
+ core/rtw_recv.o \
+ core/rtw_rf.o \
+ core/rtw_security.o \
+ core/rtw_sta_mgt.o \
+ core/rtw_wlan_util.o \
core/rtw_xmit.o
-r8188eu-y += $(rtk_core)
-
-r8188eu-y += $(_HAL_INTFS_FILES)
-
-r8188eu-y += $(_OS_INTFS_FILES)
-
obj-$(CONFIG_R8188EU) := r8188eu.o
diff --git a/drivers/staging/r8188eu/core/rtw_ap.c b/drivers/staging/r8188eu/core/rtw_ap.c
index 1c07ad28b242..c78feeb9c862 100644
--- a/drivers/staging/r8188eu/core/rtw_ap.c
+++ b/drivers/staging/r8188eu/core/rtw_ap.c
@@ -7,8 +7,7 @@
#include "../include/drv_types.h"
#include "../include/wifi.h"
#include "../include/ieee80211.h"
-
-#ifdef CONFIG_88EU_AP_MODE
+#include "../include/rtl8188e_cmd.h"
void init_mlme_ap_info(struct adapter *padapter)
{
@@ -19,7 +18,7 @@ void init_mlme_ap_info(struct adapter *padapter)
spin_lock_init(&pmlmepriv->bcn_update_lock);
/* for ACL */
- _rtw_init_queue(&pacl_list->acl_node_q);
+ rtw_init_queue(&pacl_list->acl_node_q);
start_ap_mode(padapter);
}
@@ -343,7 +342,7 @@ void add_RATid(struct adapter *padapter, struct sta_info *psta, u8 rssi_level)
}
/* n mode ra_bitmap */
if (psta_ht->ht_option) {
- rtw_hal_get_hwreg(padapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type));
+ GetHwReg8188EU(padapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type));
if (rf_type == RF_2T2R)
limit = 16;/* 2R */
else
@@ -393,7 +392,7 @@ void add_RATid(struct adapter *padapter, struct sta_info *psta, u8 rssi_level)
/* bitmap[28:31]= Rate Adaptive id */
/* arg[0:4] = macid */
/* arg[5] = Short GI */
- rtw_hal_add_ra_tid(padapter, tx_ra_bitmap, arg, rssi_level);
+ rtl8188e_Add_RateATid(padapter, tx_ra_bitmap, arg, rssi_level);
if (shortGIrate)
init_rate |= BIT(6);
@@ -453,7 +452,7 @@ void update_bmc_sta(struct adapter *padapter)
init_rate = get_highest_rate_idx(tx_ra_bitmap & 0x0fffffff) & 0x3f;
/* ap mode */
- rtw_hal_set_odm_var(padapter, HAL_ODM_STA_INFO, psta, true);
+ rtl8188e_SetHalODMVar(padapter, HAL_ODM_STA_INFO, psta, true);
{
u8 arg = 0;
@@ -467,7 +466,7 @@ void update_bmc_sta(struct adapter *padapter)
/* bitmap[28:31]= Rate Adaptive id */
/* arg[0:4] = macid */
/* arg[5] = Short GI */
- rtw_hal_add_ra_tid(padapter, tx_ra_bitmap, arg, 0);
+ rtl8188e_Add_RateATid(padapter, tx_ra_bitmap, arg, 0);
}
/* set ra_id, init_rate */
psta->raid = raid;
@@ -505,7 +504,7 @@ void update_sta_info_apmode(struct adapter *padapter, struct sta_info *psta)
DBG_88E("%s\n", __func__);
/* ap mode */
- rtw_hal_set_odm_var(padapter, HAL_ODM_STA_INFO, psta, true);
+ rtl8188e_SetHalODMVar(padapter, HAL_ODM_STA_INFO, psta, true);
if (psecuritypriv->dot11AuthAlgrthm == dot11AuthAlgrthm_8021X)
psta->ieee8021x_blocked = true;
@@ -558,550 +557,6 @@ void update_sta_info_apmode(struct adapter *padapter, struct sta_info *psta)
spin_unlock_bh(&psta->lock);
}
-static void update_hw_ht_param(struct adapter *padapter)
-{
- unsigned char max_AMPDU_len;
- unsigned char min_MPDU_spacing;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- DBG_88E("%s\n", __func__);
-
- /* handle A-MPDU parameter field */
- /*
- AMPDU_para [1:0]:Max AMPDU Len => 0:8k , 1:16k, 2:32k, 3:64k
- AMPDU_para [4:2]:Min MPDU Start Spacing
- */
- max_AMPDU_len = pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para & 0x03;
-
- min_MPDU_spacing = (pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para & 0x1c) >> 2;
-
- rtw_hal_set_hwreg(padapter, HW_VAR_AMPDU_MIN_SPACE, (u8 *)(&min_MPDU_spacing));
-
- rtw_hal_set_hwreg(padapter, HW_VAR_AMPDU_FACTOR, (u8 *)(&max_AMPDU_len));
-
- /* */
- /* Config SM Power Save setting */
- /* */
- pmlmeinfo->SM_PS = (le16_to_cpu(pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info) & 0x0C) >> 2;
- if (pmlmeinfo->SM_PS == WLAN_HT_CAP_SM_PS_STATIC)
- DBG_88E("%s(): WLAN_HT_CAP_SM_PS_STATIC\n", __func__);
-}
-
-static void start_bss_network(struct adapter *padapter, u8 *pbuf)
-{
- u8 *p;
- u8 val8, cur_channel, cur_bwmode, cur_ch_offset;
- u16 bcn_interval;
- u32 acparm;
- int ie_len;
- struct registry_priv *pregpriv = &padapter->registrypriv;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct security_priv *psecuritypriv = &padapter->securitypriv;
- struct wlan_bssid_ex *pnetwork = (struct wlan_bssid_ex *)&pmlmepriv->cur_network.network;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct wlan_bssid_ex *pnetwork_mlmeext = &pmlmeinfo->network;
- struct HT_info_element *pht_info = NULL;
-#ifdef CONFIG_88EU_P2P
- struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-#endif /* CONFIG_88EU_P2P */
-
- bcn_interval = (u16)pnetwork->Configuration.BeaconPeriod;
- cur_channel = pnetwork->Configuration.DSConfig;
- cur_bwmode = HT_CHANNEL_WIDTH_20;
- cur_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
-
- /* check if there is wps ie, */
- /* if there is wpsie in beacon, the hostapd will update beacon twice when stating hostapd, */
- /* and at first time the security ie (RSN/WPA IE) will not include in beacon. */
- if (!rtw_get_wps_ie(pnetwork->IEs + _FIXED_IE_LENGTH_, pnetwork->IELength - _FIXED_IE_LENGTH_, NULL, NULL))
- pmlmeext->bstart_bss = true;
-
- /* todo: update wmm, ht cap */
- if (pmlmepriv->qospriv.qos_option)
- pmlmeinfo->WMM_enable = true;
- if (pmlmepriv->htpriv.ht_option) {
- pmlmeinfo->WMM_enable = true;
- pmlmeinfo->HT_enable = true;
-
- update_hw_ht_param(padapter);
- }
-
- if (pmlmepriv->cur_network.join_res != true) { /* setting only at first time */
- /* WEP Key will be set before this function, do not clear CAM. */
- if ((psecuritypriv->dot11PrivacyAlgrthm != _WEP40_) &&
- (psecuritypriv->dot11PrivacyAlgrthm != _WEP104_))
- flush_all_cam_entry(padapter); /* clear CAM */
- }
-
- /* set MSR to AP_Mode */
- Set_MSR(padapter, _HW_STATE_AP_);
-
- /* Set BSSID REG */
- rtw_hal_set_hwreg(padapter, HW_VAR_BSSID, pnetwork->MacAddress);
-
- /* Set EDCA param reg */
- acparm = 0x002F3217; /* VO */
- rtw_hal_set_hwreg(padapter, HW_VAR_AC_PARAM_VO, (u8 *)(&acparm));
- acparm = 0x005E4317; /* VI */
- rtw_hal_set_hwreg(padapter, HW_VAR_AC_PARAM_VI, (u8 *)(&acparm));
- acparm = 0x005ea42b;
- rtw_hal_set_hwreg(padapter, HW_VAR_AC_PARAM_BE, (u8 *)(&acparm));
- acparm = 0x0000A444; /* BK */
- rtw_hal_set_hwreg(padapter, HW_VAR_AC_PARAM_BK, (u8 *)(&acparm));
-
- /* Set Security */
- val8 = (psecuritypriv->dot11AuthAlgrthm == dot11AuthAlgrthm_8021X) ? 0xcc : 0xcf;
- rtw_hal_set_hwreg(padapter, HW_VAR_SEC_CFG, (u8 *)(&val8));
-
- /* Beacon Control related register */
- rtw_hal_set_hwreg(padapter, HW_VAR_BEACON_INTERVAL, (u8 *)(&bcn_interval));
-
- UpdateBrateTbl(padapter, pnetwork->SupportedRates);
- rtw_hal_set_hwreg(padapter, HW_VAR_BASIC_RATE, pnetwork->SupportedRates);
-
- if (!pmlmepriv->cur_network.join_res) { /* setting only at first time */
- /* turn on all dynamic functions */
- Switch_DM_Func(padapter, DYNAMIC_ALL_FUNC_ENABLE, true);
- }
- /* set channel, bwmode */
- p = rtw_get_ie((pnetwork->IEs + sizeof(struct ndis_802_11_fixed_ie)), _HT_ADD_INFO_IE_, &ie_len, (pnetwork->IELength - sizeof(struct ndis_802_11_fixed_ie)));
- if (p && ie_len) {
- pht_info = (struct HT_info_element *)(p + 2);
-
- if ((pregpriv->cbw40_enable) && (pht_info->infos[0] & BIT(2))) {
- /* switch to the 40M Hz mode */
- cur_bwmode = HT_CHANNEL_WIDTH_40;
- switch (pht_info->infos[0] & 0x3) {
- case 1:
- cur_ch_offset = HAL_PRIME_CHNL_OFFSET_LOWER;
- break;
- case 3:
- cur_ch_offset = HAL_PRIME_CHNL_OFFSET_UPPER;
- break;
- default:
- cur_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
- break;
- }
- }
- }
- /* TODO: need to judge the phy parameters on concurrent mode for single phy */
- set_channel_bwmode(padapter, cur_channel, cur_ch_offset, cur_bwmode);
-
- DBG_88E("CH =%d, BW =%d, offset =%d\n", cur_channel, cur_bwmode, cur_ch_offset);
-
- /* */
- pmlmeext->cur_channel = cur_channel;
- pmlmeext->cur_bwmode = cur_bwmode;
- pmlmeext->cur_ch_offset = cur_ch_offset;
- pmlmeext->cur_wireless_mode = pmlmepriv->cur_network.network_type;
-
- /* update cur_wireless_mode */
- update_wireless_mode(padapter);
-
- /* udpate capability after cur_wireless_mode updated */
- update_capinfo(padapter, rtw_get_capability((struct wlan_bssid_ex *)pnetwork));
-
- /* let pnetwork_mlmeext == pnetwork_mlme. */
- memcpy(pnetwork_mlmeext, pnetwork, pnetwork->Length);
-
-#ifdef CONFIG_88EU_P2P
- memcpy(pwdinfo->p2p_group_ssid, pnetwork->Ssid.Ssid, pnetwork->Ssid.SsidLength);
- pwdinfo->p2p_group_ssid_len = pnetwork->Ssid.SsidLength;
-#endif /* CONFIG_88EU_P2P */
-
- if (pmlmeext->bstart_bss) {
- update_beacon(padapter, _TIM_IE_, NULL, false);
-
- /* issue beacon frame */
- if (send_beacon(padapter) == _FAIL)
- DBG_88E("issue_beacon, fail!\n");
- }
-
- /* update bc/mc sta_info */
- update_bmc_sta(padapter);
-}
-
-int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
-{
- int ret = _SUCCESS;
- u8 *p;
- u8 *pHT_caps_ie = NULL;
- u8 *pHT_info_ie = NULL;
- struct sta_info *psta = NULL;
- u16 cap, ht_cap = false;
- uint ie_len = 0;
- int group_cipher, pairwise_cipher;
- u8 channel, network_type, supportRate[NDIS_802_11_LENGTH_RATES_EX];
- int supportRateNum = 0;
- u8 OUI1[] = {0x00, 0x50, 0xf2, 0x01};
- u8 WMM_PARA_IE[] = {0x00, 0x50, 0xf2, 0x02, 0x01, 0x01};
- struct registry_priv *pregistrypriv = &padapter->registrypriv;
- struct security_priv *psecuritypriv = &padapter->securitypriv;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct wlan_bssid_ex *pbss_network = (struct wlan_bssid_ex *)&pmlmepriv->cur_network.network;
- u8 *ie = pbss_network->IEs;
-
- /* SSID */
- /* Supported rates */
- /* DS Params */
- /* WLAN_EID_COUNTRY */
- /* ERP Information element */
- /* Extended supported rates */
- /* WPA/WPA2 */
- /* Wi-Fi Wireless Multimedia Extensions */
- /* ht_capab, ht_oper */
- /* WPS IE */
-
- DBG_88E("%s, len =%d\n", __func__, len);
-
- if (check_fwstate(pmlmepriv, WIFI_AP_STATE) != true)
- return _FAIL;
-
- if (len > MAX_IE_SZ)
- return _FAIL;
-
- pbss_network->IELength = len;
-
- memset(ie, 0, MAX_IE_SZ);
-
- memcpy(ie, pbuf, pbss_network->IELength);
-
- if (pbss_network->InfrastructureMode != Ndis802_11APMode)
- return _FAIL;
-
- pbss_network->Rssi = 0;
-
- memcpy(pbss_network->MacAddress, myid(&padapter->eeprompriv), ETH_ALEN);
-
- /* beacon interval */
- p = rtw_get_beacon_interval_from_ie(ie);/* 8: TimeStamp, 2: Beacon Interval 2:Capability */
- pbss_network->Configuration.BeaconPeriod = get_unaligned_le16(p);
-
- /* capability */
- cap = get_unaligned_le16(ie);
-
- /* SSID */
- p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _SSID_IE_, &ie_len, (pbss_network->IELength - _BEACON_IE_OFFSET_));
- if (p && ie_len > 0) {
- memset(&pbss_network->Ssid, 0, sizeof(struct ndis_802_11_ssid));
- memcpy(pbss_network->Ssid.Ssid, (p + 2), ie_len);
- pbss_network->Ssid.SsidLength = ie_len;
- }
-
- /* channel */
- channel = 0;
- pbss_network->Configuration.Length = 0;
- p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _DSSET_IE_, &ie_len, (pbss_network->IELength - _BEACON_IE_OFFSET_));
- if (p && ie_len > 0)
- channel = *(p + 2);
-
- pbss_network->Configuration.DSConfig = channel;
-
- memset(supportRate, 0, NDIS_802_11_LENGTH_RATES_EX);
- /* get supported rates */
- p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _SUPPORTEDRATES_IE_, &ie_len, (pbss_network->IELength - _BEACON_IE_OFFSET_));
- if (p) {
- memcpy(supportRate, p + 2, ie_len);
- supportRateNum = ie_len;
- }
-
- /* get ext_supported rates */
- p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _EXT_SUPPORTEDRATES_IE_, &ie_len, pbss_network->IELength - _BEACON_IE_OFFSET_);
- if (p) {
- memcpy(supportRate + supportRateNum, p + 2, ie_len);
- supportRateNum += ie_len;
- }
-
- network_type = rtw_check_network_type(supportRate, supportRateNum, channel);
-
- rtw_set_supported_rate(pbss_network->SupportedRates, network_type);
-
- /* parsing ERP_IE */
- p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _ERPINFO_IE_, &ie_len, (pbss_network->IELength - _BEACON_IE_OFFSET_));
- if (p && ie_len > 0)
- ERP_IE_handler(padapter, (struct ndis_802_11_var_ie *)p);
-
- /* update privacy/security */
- if (cap & BIT(4))
- pbss_network->Privacy = 1;
- else
- pbss_network->Privacy = 0;
-
- psecuritypriv->wpa_psk = 0;
-
- /* wpa2 */
- group_cipher = 0;
- pairwise_cipher = 0;
- psecuritypriv->wpa2_group_cipher = _NO_PRIVACY_;
- psecuritypriv->wpa2_pairwise_cipher = _NO_PRIVACY_;
- p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _RSN_IE_2_, &ie_len, (pbss_network->IELength - _BEACON_IE_OFFSET_));
- if (p && ie_len > 0) {
- if (rtw_parse_wpa2_ie(p, ie_len + 2, &group_cipher, &pairwise_cipher, NULL) == _SUCCESS) {
- psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_8021X;
-
- psecuritypriv->dot8021xalg = 1;/* psk, todo:802.1x */
- psecuritypriv->wpa_psk |= BIT(1);
-
- psecuritypriv->wpa2_group_cipher = group_cipher;
- psecuritypriv->wpa2_pairwise_cipher = pairwise_cipher;
- }
- }
- /* wpa */
- ie_len = 0;
- group_cipher = 0;
- pairwise_cipher = 0;
- psecuritypriv->wpa_group_cipher = _NO_PRIVACY_;
- psecuritypriv->wpa_pairwise_cipher = _NO_PRIVACY_;
- for (p = ie + _BEACON_IE_OFFSET_;; p += (ie_len + 2)) {
- p = rtw_get_ie(p, _SSN_IE_1_, &ie_len,
- (pbss_network->IELength - _BEACON_IE_OFFSET_ - (ie_len + 2)));
- if ((p) && (!memcmp(p + 2, OUI1, 4))) {
- if (rtw_parse_wpa_ie(p, ie_len + 2, &group_cipher,
- &pairwise_cipher, NULL) == _SUCCESS) {
- psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_8021X;
-
- psecuritypriv->dot8021xalg = 1;/* psk, todo:802.1x */
-
- psecuritypriv->wpa_psk |= BIT(0);
-
- psecuritypriv->wpa_group_cipher = group_cipher;
- psecuritypriv->wpa_pairwise_cipher = pairwise_cipher;
- }
- break;
- }
- if (!p || ie_len == 0)
- break;
- }
-
- /* wmm */
- ie_len = 0;
- pmlmepriv->qospriv.qos_option = 0;
- if (pregistrypriv->wmm_enable) {
- for (p = ie + _BEACON_IE_OFFSET_;; p += (ie_len + 2)) {
- p = rtw_get_ie(p, _VENDOR_SPECIFIC_IE_, &ie_len,
- (pbss_network->IELength - _BEACON_IE_OFFSET_ - (ie_len + 2)));
- if ((p) && !memcmp(p + 2, WMM_PARA_IE, 6)) {
- pmlmepriv->qospriv.qos_option = 1;
-
- *(p + 8) |= BIT(7);/* QoS Info, support U-APSD */
-
- /* disable all ACM bits since the WMM admission control is not supported */
- *(p + 10) &= ~BIT(4); /* BE */
- *(p + 14) &= ~BIT(4); /* BK */
- *(p + 18) &= ~BIT(4); /* VI */
- *(p + 22) &= ~BIT(4); /* VO */
- break;
- }
-
- if (!p || ie_len == 0)
- break;
- }
- }
- /* parsing HT_CAP_IE */
- p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _HT_CAPABILITY_IE_, &ie_len,
- (pbss_network->IELength - _BEACON_IE_OFFSET_));
- if (p && ie_len > 0) {
- u8 rf_type;
- struct ieee80211_ht_cap *pht_cap = (struct ieee80211_ht_cap *)(p + 2);
-
- pHT_caps_ie = p;
- ht_cap = true;
- network_type |= WIRELESS_11_24N;
-
- rtw_hal_get_hwreg(padapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type));
-
- if ((psecuritypriv->wpa_pairwise_cipher & WPA_CIPHER_CCMP) ||
- (psecuritypriv->wpa2_pairwise_cipher & WPA_CIPHER_CCMP))
- pht_cap->ampdu_params_info |= (IEEE80211_HT_AMPDU_PARM_DENSITY & (0x07 << 2));
- else
- pht_cap->ampdu_params_info |= (IEEE80211_HT_AMPDU_PARM_DENSITY & 0x00);
-
- /* set Max Rx AMPDU size to 64K */
- pht_cap->ampdu_params_info |= (IEEE80211_HT_AMPDU_PARM_FACTOR & 0x03);
-
- if (rf_type == RF_1T1R) {
- pht_cap->mcs.rx_mask[0] = 0xff;
- pht_cap->mcs.rx_mask[1] = 0x0;
- }
- memcpy(&pmlmepriv->htpriv.ht_cap, p + 2, ie_len);
- }
-
- /* parsing HT_INFO_IE */
- p = rtw_get_ie(ie + _BEACON_IE_OFFSET_, _HT_ADD_INFO_IE_, &ie_len,
- (pbss_network->IELength - _BEACON_IE_OFFSET_));
- if (p && ie_len > 0)
- pHT_info_ie = p;
- switch (network_type) {
- case WIRELESS_11B:
- pbss_network->NetworkTypeInUse = Ndis802_11DS;
- break;
- case WIRELESS_11G:
- case WIRELESS_11BG:
- case WIRELESS_11G_24N:
- case WIRELESS_11BG_24N:
- pbss_network->NetworkTypeInUse = Ndis802_11OFDM24;
- break;
- default:
- pbss_network->NetworkTypeInUse = Ndis802_11OFDM24;
- break;
- }
-
- pmlmepriv->cur_network.network_type = network_type;
-
- pmlmepriv->htpriv.ht_option = false;
-
- if ((psecuritypriv->wpa2_pairwise_cipher & WPA_CIPHER_TKIP) ||
- (psecuritypriv->wpa_pairwise_cipher & WPA_CIPHER_TKIP)) {
- /* todo: */
- /* ht_cap = false; */
- }
-
- /* ht_cap */
- if (pregistrypriv->ht_enable && ht_cap) {
- pmlmepriv->htpriv.ht_option = true;
- pmlmepriv->qospriv.qos_option = 1;
-
- if (pregistrypriv->ampdu_enable == 1)
- pmlmepriv->htpriv.ampdu_enable = true;
- HT_caps_handler(padapter, (struct ndis_802_11_var_ie *)pHT_caps_ie);
-
- HT_info_handler(padapter, (struct ndis_802_11_var_ie *)pHT_info_ie);
- }
-
- pbss_network->Length = get_wlan_bssid_ex_sz((struct wlan_bssid_ex *)pbss_network);
-
- /* issue beacon to start bss network */
- start_bss_network(padapter, (u8 *)pbss_network);
-
- /* alloc sta_info for ap itself */
- psta = rtw_get_stainfo(&padapter->stapriv, pbss_network->MacAddress);
- if (!psta) {
- psta = rtw_alloc_stainfo(&padapter->stapriv, pbss_network->MacAddress);
- if (!psta)
- return _FAIL;
- }
-
- /* fix bug of flush_cam_entry at STOP AP mode */
- psta->state |= WIFI_AP_STATE;
- rtw_indicate_connect(padapter);
- pmlmepriv->cur_network.join_res = true;/* for check if already set beacon */
- return ret;
-}
-
-void rtw_set_macaddr_acl(struct adapter *padapter, int mode)
-{
- struct sta_priv *pstapriv = &padapter->stapriv;
- struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
-
- DBG_88E("%s, mode =%d\n", __func__, mode);
-
- pacl_list->mode = mode;
-}
-
-int rtw_acl_add_sta(struct adapter *padapter, u8 *addr)
-{
- struct list_head *plist, *phead;
- u8 added = false;
- int i, ret = 0;
- struct rtw_wlan_acl_node *paclnode;
- struct sta_priv *pstapriv = &padapter->stapriv;
- struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
- struct __queue *pacl_node_q = &pacl_list->acl_node_q;
-
- DBG_88E("%s(acl_num =%d) =%pM\n", __func__, pacl_list->num, (addr));
-
- if ((NUM_ACL - 1) < pacl_list->num)
- return -1;
-
- spin_lock_bh(&pacl_node_q->lock);
-
- phead = get_list_head(pacl_node_q);
- plist = phead->next;
-
- while (phead != plist) {
- paclnode = container_of(plist, struct rtw_wlan_acl_node, list);
- plist = plist->next;
-
- if (!memcmp(paclnode->addr, addr, ETH_ALEN)) {
- if (paclnode->valid) {
- added = true;
- DBG_88E("%s, sta has been added\n", __func__);
- break;
- }
- }
- }
-
- spin_unlock_bh(&pacl_node_q->lock);
-
- if (added)
- return ret;
-
- spin_lock_bh(&pacl_node_q->lock);
-
- for (i = 0; i < NUM_ACL; i++) {
- paclnode = &pacl_list->aclnode[i];
-
- if (!paclnode->valid) {
- INIT_LIST_HEAD(&paclnode->list);
-
- memcpy(paclnode->addr, addr, ETH_ALEN);
-
- paclnode->valid = true;
-
- list_add_tail(&paclnode->list, get_list_head(pacl_node_q));
-
- pacl_list->num++;
-
- break;
- }
- }
-
- DBG_88E("%s, acl_num =%d\n", __func__, pacl_list->num);
-
- spin_unlock_bh(&pacl_node_q->lock);
-
- return ret;
-}
-
-int rtw_acl_remove_sta(struct adapter *padapter, u8 *addr)
-{
- struct list_head *plist, *phead;
- int ret = 0;
- struct rtw_wlan_acl_node *paclnode;
- struct sta_priv *pstapriv = &padapter->stapriv;
- struct wlan_acl_pool *pacl_list = &pstapriv->acl_list;
- struct __queue *pacl_node_q = &pacl_list->acl_node_q;
-
- DBG_88E("%s(acl_num =%d) =%pM\n", __func__, pacl_list->num, (addr));
-
- spin_lock_bh(&pacl_node_q->lock);
-
- phead = get_list_head(pacl_node_q);
- plist = phead->next;
-
- while (phead != plist) {
- paclnode = container_of(plist, struct rtw_wlan_acl_node, list);
- plist = plist->next;
-
- if (!memcmp(paclnode->addr, addr, ETH_ALEN)) {
- if (paclnode->valid) {
- paclnode->valid = false;
-
- list_del_init(&paclnode->list);
-
- pacl_list->num--;
- }
- }
- }
-
- spin_unlock_bh(&pacl_node_q->lock);
-
- DBG_88E("%s, acl_num =%d\n", __func__, pacl_list->num);
- return ret;
-}
-
static void update_bcn_fixed_ie(struct adapter *padapter)
{
DBG_88E("%s\n", __func__);
@@ -1289,7 +744,7 @@ void update_beacon(struct adapter *padapter, u8 ie_id, u8 *oui, u8 tx)
/*
op_mode
-Set to 0 (HT pure) under the followign conditions
+Set to 0 (HT pure) under the following conditions
- all STAs in the BSS are 20/40 MHz HT in 20/40 MHz BSS or
- all STAs in the BSS are 20 MHz HT in 20 MHz BSS
Set to 1 (HT non-member protection) if there may be non-HT STAs
@@ -1633,41 +1088,6 @@ u8 ap_free_sta(struct adapter *padapter, struct sta_info *psta,
return beacon_updated;
}
-int rtw_ap_inform_ch_switch(struct adapter *padapter, u8 new_ch, u8 ch_offset)
-{
- struct list_head *phead, *plist;
- int ret = 0;
- struct sta_info *psta = NULL;
- struct sta_priv *pstapriv = &padapter->stapriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
-
- if ((pmlmeinfo->state & 0x03) != WIFI_FW_AP_STATE)
- return ret;
-
- DBG_88E(FUNC_NDEV_FMT" with ch:%u, offset:%u\n",
- FUNC_NDEV_ARG(padapter->pnetdev), new_ch, ch_offset);
-
- spin_lock_bh(&pstapriv->asoc_list_lock);
- phead = &pstapriv->asoc_list;
- plist = phead->next;
-
- /* for each sta in asoc_queue */
- while (phead != plist) {
- psta = container_of(plist, struct sta_info, asoc_list);
- plist = plist->next;
-
- issue_action_spct_ch_switch(padapter, psta->hwaddr, new_ch, ch_offset);
- psta->expire_to = ((pstapriv->expire_to * 2) > 5) ? 5 : (pstapriv->expire_to * 2);
- }
- spin_unlock_bh(&pstapriv->asoc_list_lock);
-
- issue_action_spct_ch_switch(padapter, bc_addr, new_ch, ch_offset);
-
- return ret;
-}
-
int rtw_sta_flush(struct adapter *padapter)
{
struct list_head *phead, *plist;
@@ -1736,15 +1156,6 @@ void sta_info_update(struct adapter *padapter, struct sta_info *psta)
update_sta_info_apmode(padapter, psta);
}
-/* called >= TSR LEVEL for USB or SDIO Interface*/
-void ap_sta_info_defer_update(struct adapter *padapter, struct sta_info *psta)
-{
- if (psta->state & _FW_LINKED) {
- /* add ratid */
- add_RATid(padapter, psta, 0);/* DM_RATR_STA_INIT */
- }
-}
-
void start_ap_mode(struct adapter *padapter)
{
int i;
@@ -1846,5 +1257,3 @@ void stop_ap_mode(struct adapter *padapter)
rtw_free_mlme_priv_ie_data(pmlmepriv);
}
-
-#endif /* CONFIG_88EU_AP_MODE */
diff --git a/drivers/staging/r8188eu/core/rtw_br_ext.c b/drivers/staging/r8188eu/core/rtw_br_ext.c
index 62a672243696..bcd0f9dd64b1 100644
--- a/drivers/staging/r8188eu/core/rtw_br_ext.c
+++ b/drivers/staging/r8188eu/core/rtw_br_ext.c
@@ -695,8 +695,7 @@ void dhcp_flag_bcast(struct adapter *priv, struct sk_buff *skb)
}
}
-void *scdb_findEntry(struct adapter *priv, unsigned char *macAddr,
- unsigned char *ipAddr)
+void *scdb_findEntry(struct adapter *priv, unsigned char *ipAddr)
{
unsigned char networkAddr[MAX_NETWORK_ADDR_LEN];
struct nat25_network_db_entry *db;
diff --git a/drivers/staging/r8188eu/core/rtw_cmd.c b/drivers/staging/r8188eu/core/rtw_cmd.c
index ce73ac7cf973..5d5f25364b2f 100644
--- a/drivers/staging/r8188eu/core/rtw_cmd.c
+++ b/drivers/staging/r8188eu/core/rtw_cmd.c
@@ -9,6 +9,8 @@
#include "../include/mlme_osdep.h"
#include "../include/rtw_br_ext.h"
#include "../include/rtw_mlme_ext.h"
+#include "../include/rtl8188e_dm.h"
+#include "../include/rtl8188e_sreset.h"
/*
Caller and the rtw_cmd_thread can protect cmd_q by spin_lock.
@@ -19,11 +21,12 @@ static int _rtw_init_cmd_priv(struct cmd_priv *pcmdpriv)
{
int res = _SUCCESS;
- sema_init(&pcmdpriv->cmd_queue_sema, 0);
+ init_completion(&pcmdpriv->enqueue_cmd);
/* sema_init(&(pcmdpriv->cmd_done_sema), 0); */
- sema_init(&pcmdpriv->terminate_cmdthread_sema, 0);
+ init_completion(&pcmdpriv->start_cmd_thread);
+ init_completion(&pcmdpriv->stop_cmd_thread);
- _rtw_init_queue(&pcmdpriv->cmd_queue);
+ rtw_init_queue(&pcmdpriv->cmd_queue);
/* allocate DMA-able/Non-Page memory for cmd_buf and rsp_buf */
@@ -167,16 +170,6 @@ static int rtw_cmd_filter(struct cmd_priv *pcmdpriv, struct cmd_obj *cmd_obj)
{
u8 bAllow = false; /* set to true to allow enqueuing cmd when hw_init_completed is false */
- /* To decide allow or not */
- if ((pcmdpriv->padapter->pwrctrlpriv.bHWPwrPindetect) &&
- (!pcmdpriv->padapter->registrypriv.usbss_enable)) {
- if (cmd_obj->cmdcode == GEN_CMD_CODE(_Set_Drv_Extra)) {
- struct drvextra_cmd_parm *pdrvextra_cmd_parm = (struct drvextra_cmd_parm *)cmd_obj->parmbuf;
- if (pdrvextra_cmd_parm->ec_id == POWER_SAVING_CTRL_WK_CID)
- bAllow = true;
- }
- }
-
if (cmd_obj->cmdcode == GEN_CMD_CODE(_SetChannelPlan))
bAllow = true;
@@ -205,7 +198,7 @@ u32 rtw_enqueue_cmd(struct cmd_priv *pcmdpriv, struct cmd_obj *cmd_obj)
res = _rtw_enqueue_cmd(&pcmdpriv->cmd_queue, cmd_obj);
if (res == _SUCCESS)
- up(&pcmdpriv->cmd_queue_sema);
+ complete(&pcmdpriv->enqueue_cmd);
exit:
@@ -221,14 +214,6 @@ struct cmd_obj *rtw_dequeue_cmd(struct cmd_priv *pcmdpriv)
return cmd_obj;
}
-void rtw_cmd_clr_isr(struct cmd_priv *pcmdpriv)
-{
-
- pcmdpriv->cmd_done_cnt++;
- /* up(&(pcmdpriv->cmd_done_sema)); */
-
-}
-
void rtw_free_cmd_obj(struct cmd_obj *pcmd)
{
@@ -259,23 +244,14 @@ int rtw_cmd_thread(void *context)
struct adapter *padapter = (struct adapter *)context;
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
- thread_enter("RTW_CMD_THREAD");
-
pcmdbuf = pcmdpriv->cmd_buf;
pcmdpriv->cmdthd_running = true;
- up(&pcmdpriv->terminate_cmdthread_sema);
+ complete(&pcmdpriv->start_cmd_thread);
while (1) {
- if (_rtw_down_sema(&pcmdpriv->cmd_queue_sema) == _FAIL)
- break;
+ wait_for_completion(&pcmdpriv->enqueue_cmd);
- if (padapter->bDriverStopped ||
- padapter->bSurpriseRemoved) {
- DBG_88E("%s: DriverStopped(%d) SurpriseRemoved(%d) break at line %d\n",
- __func__, padapter->bDriverStopped, padapter->bSurpriseRemoved, __LINE__);
- break;
- }
_next:
if (padapter->bDriverStopped ||
padapter->bSurpriseRemoved) {
@@ -345,43 +321,11 @@ post_process:
rtw_free_cmd_obj(pcmd);
} while (1);
- up(&pcmdpriv->terminate_cmdthread_sema);
+ complete(&pcmdpriv->stop_cmd_thread);
thread_exit();
}
-u8 rtw_setstandby_cmd(struct adapter *padapter, uint action)
-{
- struct cmd_obj *ph2c;
- struct usb_suspend_parm *psetusbsuspend;
- struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
-
- u8 ret = _SUCCESS;
-
- ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
- if (!ph2c) {
- ret = _FAIL;
- goto exit;
- }
-
- psetusbsuspend = kzalloc(sizeof(struct usb_suspend_parm), GFP_ATOMIC);
- if (!psetusbsuspend) {
- kfree(ph2c);
- ret = _FAIL;
- goto exit;
- }
-
- psetusbsuspend->action = action;
-
- init_h2fwcmd_w_parm_no_rsp(ph2c, psetusbsuspend, GEN_CMD_CODE(_SetUsbSuspend));
-
- ret = rtw_enqueue_cmd(pcmdpriv, ph2c);
-
-exit:
-
- return ret;
-}
-
/*
rtw_sitesurvey_cmd(~)
### NOTE:#### (!!!!)
@@ -491,228 +435,12 @@ exit:
return res;
}
-u8 rtw_setbasicrate_cmd(struct adapter *padapter, u8 *rateset)
-{
- struct cmd_obj *ph2c;
- struct setbasicrate_parm *pssetbasicratepara;
- struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
- u8 res = _SUCCESS;
-
- ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
- if (!ph2c) {
- res = _FAIL;
- goto exit;
- }
- pssetbasicratepara = kzalloc(sizeof(struct setbasicrate_parm), GFP_ATOMIC);
-
- if (!pssetbasicratepara) {
- kfree(ph2c);
- res = _FAIL;
- goto exit;
- }
-
- init_h2fwcmd_w_parm_no_rsp(ph2c, pssetbasicratepara, _SetBasicRate_CMD_);
-
- memcpy(pssetbasicratepara->basicrates, rateset, NumRates);
-
- res = rtw_enqueue_cmd(pcmdpriv, ph2c);
-exit:
-
- return res;
-}
-
-/*
-unsigned char rtw_setphy_cmd(unsigned char *adapter)
-
-1. be called only after rtw_update_registrypriv_dev_network(~) or mp testing program
-2. for AdHoc/Ap mode or mp mode?
-
-*/
-u8 rtw_setphy_cmd(struct adapter *padapter, u8 modem, u8 ch)
-{
- struct cmd_obj *ph2c;
- struct setphy_parm *psetphypara;
- struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
- u8 res = _SUCCESS;
-
- ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
- if (!ph2c) {
- res = _FAIL;
- goto exit;
- }
- psetphypara = kzalloc(sizeof(struct setphy_parm), GFP_ATOMIC);
-
- if (!psetphypara) {
- kfree(ph2c);
- res = _FAIL;
- goto exit;
- }
-
- init_h2fwcmd_w_parm_no_rsp(ph2c, psetphypara, _SetPhy_CMD_);
-
- psetphypara->modem = modem;
- psetphypara->rfchannel = ch;
-
- res = rtw_enqueue_cmd(pcmdpriv, ph2c);
-exit:
-
- return res;
-}
-
-u8 rtw_setbbreg_cmd(struct adapter *padapter, u8 offset, u8 val)
-{
- struct cmd_obj *ph2c;
- struct writeBB_parm *pwritebbparm;
- struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
- u8 res = _SUCCESS;
-
- ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
- if (!ph2c) {
- res = _FAIL;
- goto exit;
- }
- pwritebbparm = kzalloc(sizeof(struct writeBB_parm), GFP_ATOMIC);
-
- if (!pwritebbparm) {
- kfree(ph2c);
- res = _FAIL;
- goto exit;
- }
-
- init_h2fwcmd_w_parm_no_rsp(ph2c, pwritebbparm, GEN_CMD_CODE(_SetBBReg));
-
- pwritebbparm->offset = offset;
- pwritebbparm->value = val;
-
- res = rtw_enqueue_cmd(pcmdpriv, ph2c);
-exit:
-
- return res;
-}
-
-u8 rtw_getbbreg_cmd(struct adapter *padapter, u8 offset, u8 *pval)
-{
- struct cmd_obj *ph2c;
- struct readBB_parm *prdbbparm;
- struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
- u8 res = _SUCCESS;
-
- ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
- if (!ph2c) {
- res = _FAIL;
- goto exit;
- }
- prdbbparm = kzalloc(sizeof(struct readBB_parm), GFP_ATOMIC);
-
- if (!prdbbparm) {
- kfree(ph2c);
- return _FAIL;
- }
-
- INIT_LIST_HEAD(&ph2c->list);
- ph2c->cmdcode = GEN_CMD_CODE(_GetBBReg);
- ph2c->parmbuf = (unsigned char *)prdbbparm;
- ph2c->cmdsz = sizeof(struct readBB_parm);
- ph2c->rsp = pval;
- ph2c->rspsz = sizeof(struct readBB_rsp);
-
- prdbbparm->offset = offset;
-
- res = rtw_enqueue_cmd(pcmdpriv, ph2c);
-exit:
-
- return res;
-}
-
-u8 rtw_setrfreg_cmd(struct adapter *padapter, u8 offset, u32 val)
-{
- struct cmd_obj *ph2c;
- struct writeRF_parm *pwriterfparm;
- struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
- u8 res = _SUCCESS;
-
- ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
- if (!ph2c) {
- res = _FAIL;
- goto exit;
- }
- pwriterfparm = kzalloc(sizeof(struct writeRF_parm), GFP_ATOMIC);
-
- if (!pwriterfparm) {
- kfree(ph2c);
- res = _FAIL;
- goto exit;
- }
-
- init_h2fwcmd_w_parm_no_rsp(ph2c, pwriterfparm, GEN_CMD_CODE(_SetRFReg));
-
- pwriterfparm->offset = offset;
- pwriterfparm->value = val;
-
- res = rtw_enqueue_cmd(pcmdpriv, ph2c);
-exit:
-
- return res;
-}
-
-u8 rtw_getrfreg_cmd(struct adapter *padapter, u8 offset, u8 *pval)
-{
- struct cmd_obj *ph2c;
- struct readRF_parm *prdrfparm;
- struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
- u8 res = _SUCCESS;
-
- ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
- if (!ph2c) {
- res = _FAIL;
- goto exit;
- }
-
- prdrfparm = kzalloc(sizeof(struct readRF_parm), GFP_ATOMIC);
- if (!prdrfparm) {
- kfree(ph2c);
- res = _FAIL;
- goto exit;
- }
-
- INIT_LIST_HEAD(&ph2c->list);
- ph2c->cmdcode = GEN_CMD_CODE(_GetRFReg);
- ph2c->parmbuf = (unsigned char *)prdrfparm;
- ph2c->cmdsz = sizeof(struct readRF_parm);
- ph2c->rsp = pval;
- ph2c->rspsz = sizeof(struct readRF_rsp);
-
- prdrfparm->offset = offset;
-
- res = rtw_enqueue_cmd(pcmdpriv, ph2c);
-
-exit:
-
- return res;
-}
-
void rtw_getbbrfreg_cmdrsp_callback(struct adapter *padapter, struct cmd_obj *pcmd)
{
kfree(pcmd->parmbuf);
kfree(pcmd);
-
- if (padapter->registrypriv.mp_mode == 1)
- padapter->mppriv.workparam.bcompleted = true;
-
-}
-
-void rtw_readtssi_cmdrsp_callback(struct adapter *padapter, struct cmd_obj *pcmd)
-{
-
-
- kfree(pcmd->parmbuf);
- kfree(pcmd);
-
- if (padapter->registrypriv.mp_mode == 1)
- padapter->mppriv.workparam.bcompleted = true;
-
}
u8 rtw_createbss_cmd(struct adapter *padapter)
@@ -743,32 +471,6 @@ exit:
return res;
}
-u8 rtw_createbss_cmd_ex(struct adapter *padapter, unsigned char *pbss, unsigned int sz)
-{
- struct cmd_obj *pcmd;
- struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
- u8 res = _SUCCESS;
-
- pcmd = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
- if (!pcmd) {
- res = _FAIL;
- goto exit;
- }
-
- INIT_LIST_HEAD(&pcmd->list);
- pcmd->cmdcode = GEN_CMD_CODE(_CreateBss);
- pcmd->parmbuf = pbss;
- pcmd->cmdsz = sz;
- pcmd->rsp = NULL;
- pcmd->rspsz = 0;
-
- res = rtw_enqueue_cmd(pcmdpriv, pcmd);
-
-exit:
-
- return res;
-}
-
u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network *pnetwork)
{
u8 res = _SUCCESS;
@@ -1073,115 +775,6 @@ exit:
return res;
}
-u8 rtw_setrttbl_cmd(struct adapter *padapter, struct setratable_parm *prate_table)
-{
- struct cmd_obj *ph2c;
- struct setratable_parm *psetrttblparm;
- struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
- u8 res = _SUCCESS;
-
- ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
- if (!ph2c) {
- res = _FAIL;
- goto exit;
- }
- psetrttblparm = kzalloc(sizeof(struct setratable_parm), GFP_KERNEL);
-
- if (!psetrttblparm) {
- kfree(ph2c);
- res = _FAIL;
- goto exit;
- }
-
- init_h2fwcmd_w_parm_no_rsp(ph2c, psetrttblparm, GEN_CMD_CODE(_SetRaTable));
-
- memcpy(psetrttblparm, prate_table, sizeof(struct setratable_parm));
-
- res = rtw_enqueue_cmd(pcmdpriv, ph2c);
-exit:
-
- return res;
-}
-
-u8 rtw_getrttbl_cmd(struct adapter *padapter, struct getratable_rsp *pval)
-{
- struct cmd_obj *ph2c;
- struct getratable_parm *pgetrttblparm;
- struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
- u8 res = _SUCCESS;
-
- ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
- if (!ph2c) {
- res = _FAIL;
- goto exit;
- }
- pgetrttblparm = kzalloc(sizeof(struct getratable_parm), GFP_KERNEL);
-
- if (!pgetrttblparm) {
- kfree(ph2c);
- res = _FAIL;
- goto exit;
- }
-
-/* init_h2fwcmd_w_parm_no_rsp(ph2c, psetrttblparm, GEN_CMD_CODE(_SetRaTable)); */
-
- INIT_LIST_HEAD(&ph2c->list);
- ph2c->cmdcode = GEN_CMD_CODE(_GetRaTable);
- ph2c->parmbuf = (unsigned char *)pgetrttblparm;
- ph2c->cmdsz = sizeof(struct getratable_parm);
- ph2c->rsp = (u8 *)pval;
- ph2c->rspsz = sizeof(struct getratable_rsp);
-
- pgetrttblparm->rsvd = 0x0;
-
- res = rtw_enqueue_cmd(pcmdpriv, ph2c);
-exit:
-
- return res;
-}
-
-u8 rtw_setassocsta_cmd(struct adapter *padapter, u8 *mac_addr)
-{
- struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
- struct cmd_obj *ph2c;
- struct set_assocsta_parm *psetassocsta_para;
- struct set_stakey_rsp *psetassocsta_rsp = NULL;
-
- u8 res = _SUCCESS;
-
- ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
- if (!ph2c) {
- res = _FAIL;
- goto exit;
- }
-
- psetassocsta_para = kzalloc(sizeof(struct set_assocsta_parm), GFP_ATOMIC);
- if (!psetassocsta_para) {
- kfree(ph2c);
- res = _FAIL;
- goto exit;
- }
-
- psetassocsta_rsp = kzalloc(sizeof(struct set_assocsta_rsp), GFP_ATOMIC);
- if (!psetassocsta_rsp) {
- kfree(ph2c);
- kfree(psetassocsta_para);
- return _FAIL;
- }
-
- init_h2fwcmd_w_parm_no_rsp(ph2c, psetassocsta_para, _SetAssocSta_CMD_);
- ph2c->rsp = (u8 *)psetassocsta_rsp;
- ph2c->rspsz = sizeof(struct set_assocsta_rsp);
-
- memcpy(psetassocsta_para->addr, mac_addr, ETH_ALEN);
-
- res = rtw_enqueue_cmd(pcmdpriv, ph2c);
-
-exit:
-
- return res;
- }
-
u8 rtw_addbareq_cmd(struct adapter *padapter, u8 tid, u8 *addr)
{
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
@@ -1250,57 +843,6 @@ exit:
return res;
}
-u8 rtw_set_ch_cmd(struct adapter *padapter, u8 ch, u8 bw, u8 ch_offset, u8 enqueue)
-{
- struct cmd_obj *pcmdobj;
- struct set_ch_parm *set_ch_parm;
- struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
-
- u8 res = _SUCCESS;
-
- DBG_88E(FUNC_NDEV_FMT" ch:%u, bw:%u, ch_offset:%u\n",
- FUNC_NDEV_ARG(padapter->pnetdev), ch, bw, ch_offset);
-
- /* check input parameter */
-
- /* prepare cmd parameter */
- set_ch_parm = kzalloc(sizeof(*set_ch_parm), GFP_ATOMIC);
- if (!set_ch_parm) {
- res = _FAIL;
- goto exit;
- }
- set_ch_parm->ch = ch;
- set_ch_parm->bw = bw;
- set_ch_parm->ch_offset = ch_offset;
-
- if (enqueue) {
- /* need enqueue, prepare cmd_obj and enqueue */
- pcmdobj = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
- if (!pcmdobj) {
- kfree(set_ch_parm);
- res = _FAIL;
- goto exit;
- }
-
- init_h2fwcmd_w_parm_no_rsp(pcmdobj, set_ch_parm, GEN_CMD_CODE(_SetChannel));
- res = rtw_enqueue_cmd(pcmdpriv, pcmdobj);
- } else {
- /* no need to enqueue, do the cmd hdl directly and free cmd parameter */
- if (H2C_SUCCESS != set_ch_hdl(padapter, (u8 *)set_ch_parm))
- res = _FAIL;
-
- kfree(set_ch_parm);
- }
-
- /* do something based on res... */
-
-exit:
-
- DBG_88E(FUNC_NDEV_FMT" res:%u\n", FUNC_NDEV_ARG(padapter->pnetdev), res);
-
- return res;
-}
-
u8 rtw_set_chplan_cmd(struct adapter *padapter, u8 chplan, u8 enqueue)
{
struct cmd_obj *pcmdobj;
@@ -1352,74 +894,6 @@ exit:
return res;
}
-u8 rtw_led_blink_cmd(struct adapter *padapter, struct LED_871x *pLed)
-{
- struct cmd_obj *pcmdobj;
- struct LedBlink_param *ledBlink_param;
- struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
-
- u8 res = _SUCCESS;
-
- pcmdobj = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
- if (!pcmdobj) {
- res = _FAIL;
- goto exit;
- }
-
- ledBlink_param = kzalloc(sizeof(struct LedBlink_param), GFP_ATOMIC);
- if (!ledBlink_param) {
- kfree(pcmdobj);
- res = _FAIL;
- goto exit;
- }
-
- ledBlink_param->pLed = pLed;
-
- init_h2fwcmd_w_parm_no_rsp(pcmdobj, ledBlink_param, GEN_CMD_CODE(_LedBlink));
- res = rtw_enqueue_cmd(pcmdpriv, pcmdobj);
-
-exit:
-
- return res;
-}
-
-u8 rtw_set_csa_cmd(struct adapter *padapter, u8 new_ch_no)
-{
- struct cmd_obj *pcmdobj;
- struct SetChannelSwitch_param *setChannelSwitch_param;
- struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
-
- u8 res = _SUCCESS;
-
- pcmdobj = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
- if (!pcmdobj) {
- res = _FAIL;
- goto exit;
- }
-
- setChannelSwitch_param = kzalloc(sizeof(struct SetChannelSwitch_param),
- GFP_ATOMIC);
- if (!setChannelSwitch_param) {
- kfree(pcmdobj);
- res = _FAIL;
- goto exit;
- }
-
- setChannelSwitch_param->new_ch_no = new_ch_no;
-
- init_h2fwcmd_w_parm_no_rsp(pcmdobj, setChannelSwitch_param, GEN_CMD_CODE(_SetChannelSwitch));
- res = rtw_enqueue_cmd(pcmdpriv, pcmdobj);
-
-exit:
-
- return res;
-}
-
-u8 rtw_tdls_cmd(struct adapter *padapter, u8 *addr, u8 option)
-{
- return _SUCCESS;
-}
-
static void traffic_status_watchdog(struct adapter *padapter)
{
u8 bEnterPS;
@@ -1486,17 +960,15 @@ static void dynamic_chk_wk_hdl(struct adapter *padapter, u8 *pbuf, int sz)
padapter = (struct adapter *)pbuf;
pmlmepriv = &padapter->mlmepriv;
-#ifdef CONFIG_88EU_AP_MODE
if (check_fwstate(pmlmepriv, WIFI_AP_STATE))
expire_timeout_chk(padapter);
-#endif
- rtw_hal_sreset_xmit_status_check(padapter);
+ rtl8188e_sreset_xmit_status_check(padapter);
linked_status_chk(padapter);
traffic_status_watchdog(padapter);
- rtw_hal_dm_watchdog(padapter);
+ rtl8188e_HalDmWatchDog(padapter);
}
static void lps_ctrl_wk_hdl(struct adapter *padapter, u8 lps_ctrl_type)
@@ -1523,12 +995,12 @@ static void lps_ctrl_wk_hdl(struct adapter *padapter, u8 lps_ctrl_type)
mstatus = 1;/* connect */
/* Reset LPS Setting */
padapter->pwrctrlpriv.LpsIdleCount = 0;
- rtw_hal_set_hwreg(padapter, HW_VAR_H2C_FW_JOINBSSRPT, (u8 *)(&mstatus));
+ SetHwReg8188EU(padapter, HW_VAR_H2C_FW_JOINBSSRPT, (u8 *)(&mstatus));
break;
case LPS_CTRL_DISCONNECT:
mstatus = 0;/* disconnect */
LPS_Leave(padapter);
- rtw_hal_set_hwreg(padapter, HW_VAR_H2C_FW_JOINBSSRPT, (u8 *)(&mstatus));
+ SetHwReg8188EU(padapter, HW_VAR_H2C_FW_JOINBSSRPT, (u8 *)(&mstatus));
break;
case LPS_CTRL_SPECIAL_PACKET:
/* DBG_88E("LPS_CTRL_SPECIAL_PACKET\n"); */
@@ -1588,7 +1060,7 @@ exit:
static void rpt_timer_setting_wk_hdl(struct adapter *padapter, u16 min_time)
{
- rtw_hal_set_hwreg(padapter, HW_VAR_RPT_TIMER_SETTING, (u8 *)(&min_time));
+ SetHwReg8188EU(padapter, HW_VAR_RPT_TIMER_SETTING, (u8 *)(&min_time));
}
u8 rtw_rpt_timer_cfg_cmd(struct adapter *padapter, u16 min_time)
@@ -1625,7 +1097,7 @@ exit:
static void antenna_select_wk_hdl(struct adapter *padapter, u8 antenna)
{
- rtw_hal_set_hwreg(padapter, HW_VAR_ANTENNA_DIVERSITY_SELECT, (u8 *)(&antenna));
+ SetHwReg8188EU(padapter, HW_VAR_ANTENNA_DIVERSITY_SELECT, (u8 *)(&antenna));
}
u8 rtw_antenna_select_cmd(struct adapter *padapter, u8 antenna, u8 enqueue)
@@ -1636,7 +1108,7 @@ u8 rtw_antenna_select_cmd(struct adapter *padapter, u8 antenna, u8 enqueue)
u8 support_ant_div;
u8 res = _SUCCESS;
- rtw_hal_get_def_var(padapter, HAL_DEF_IS_SUPPORT_ANT_DIV, &support_ant_div);
+ GetHalDefVar8188EUsb(padapter, HAL_DEF_IS_SUPPORT_ANT_DIV, &support_ant_div);
if (!support_ant_div)
return res;
@@ -1669,12 +1141,6 @@ exit:
return res;
}
-static void power_saving_wk_hdl(struct adapter *padapter, u8 *pbuf, int sz)
-{
- rtw_ps_processor(padapter);
-}
-
-#ifdef CONFIG_88EU_P2P
u8 p2p_protocol_wk_cmd(struct adapter *padapter, int intCmdType)
{
struct cmd_obj *ph2c;
@@ -1711,7 +1177,6 @@ exit:
return res;
}
-#endif /* CONFIG_88EU_P2P */
u8 rtw_ps_cmd(struct adapter *padapter)
{
@@ -1745,8 +1210,6 @@ exit:
return res;
}
-#ifdef CONFIG_88EU_AP_MODE
-
static void rtw_chk_hi_queue_hdl(struct adapter *padapter)
{
int cnt = 0;
@@ -1763,7 +1226,7 @@ static void rtw_chk_hi_queue_hdl(struct adapter *padapter)
/* while ((rtw_read32(padapter, 0x414)&0x00ffff00)!= 0) */
/* while ((rtw_read32(padapter, 0x414)&0x0000ff00)!= 0) */
- rtw_hal_get_hwreg(padapter, HW_VAR_CHK_HI_QUEUE_EMPTY, &val);
+ GetHwReg8188EU(padapter, HW_VAR_CHK_HI_QUEUE_EMPTY, &val);
while (!val) {
msleep(100);
@@ -1773,7 +1236,7 @@ static void rtw_chk_hi_queue_hdl(struct adapter *padapter)
if (cnt > 10)
break;
- rtw_hal_get_hwreg(padapter, HW_VAR_CHK_HI_QUEUE_EMPTY, &val);
+ GetHwReg8188EU(padapter, HW_VAR_CHK_HI_QUEUE_EMPTY, &val);
}
if (cnt <= 10) {
@@ -1817,7 +1280,6 @@ u8 rtw_chk_hi_queue_cmd(struct adapter *padapter)
exit:
return res;
}
-#endif
u8 rtw_c2h_wk_cmd(struct adapter *padapter, u8 *c2h_evt)
{
@@ -1852,29 +1314,12 @@ exit:
return res;
}
-static s32 c2h_evt_hdl(struct adapter *adapter, struct c2h_evt_hdr *c2h_evt, c2h_id_filter filter)
+static void c2h_evt_hdl(struct adapter *adapter, struct c2h_evt_hdr *c2h_evt, c2h_id_filter filter)
{
- s32 ret = _FAIL;
u8 buf[16];
- if (!c2h_evt) {
- /* No c2h event in cmd_obj, read c2h event before handling*/
- if (c2h_evt_read(adapter, buf) == _SUCCESS) {
- c2h_evt = (struct c2h_evt_hdr *)buf;
-
- if (filter && !filter(c2h_evt->id))
- goto exit;
-
- ret = rtw_hal_c2h_handler(adapter, c2h_evt);
- }
- } else {
- if (filter && !filter(c2h_evt->id))
- goto exit;
-
- ret = rtw_hal_c2h_handler(adapter, c2h_evt);
- }
-exit:
- return ret;
+ if (!c2h_evt)
+ c2h_evt_read(adapter, buf);
}
static void c2h_wk_callback(struct work_struct *work)
@@ -1882,7 +1327,6 @@ static void c2h_wk_callback(struct work_struct *work)
struct evt_priv *evtpriv = container_of(work, struct evt_priv, c2h_wk);
struct adapter *adapter = container_of(evtpriv, struct adapter, evtpriv);
struct c2h_evt_hdr *c2h_evt;
- c2h_id_filter ccx_id_filter = rtw_hal_c2h_id_filter_ccx(adapter);
evtpriv->c2h_wk_alive = true;
@@ -1912,16 +1356,8 @@ static void c2h_wk_callback(struct work_struct *work)
continue;
}
- if (ccx_id_filter(c2h_evt->id)) {
- /* Handle CCX report here */
- rtw_hal_c2h_handler(adapter, c2h_evt);
- kfree(c2h_evt);
- } else {
-#ifdef CONFIG_88EU_P2P
- /* Enqueue into cmd_thread for others */
- rtw_c2h_wk_cmd(adapter, (u8 *)c2h_evt);
-#endif
- }
+ /* Enqueue into cmd_thread for others */
+ rtw_c2h_wk_cmd(adapter, (u8 *)c2h_evt);
}
evtpriv->c2h_wk_alive = false;
@@ -1941,7 +1377,7 @@ u8 rtw_drvextra_cmd_hdl(struct adapter *padapter, unsigned char *pbuf)
dynamic_chk_wk_hdl(padapter, pdrvextra_cmd->pbuf, pdrvextra_cmd->type_size);
break;
case POWER_SAVING_CTRL_WK_CID:
- power_saving_wk_hdl(padapter, pdrvextra_cmd->pbuf, pdrvextra_cmd->type_size);
+ rtw_ps_processor(padapter);
break;
case LPS_CTRL_WK_CID:
lps_ctrl_wk_hdl(padapter, (u8)pdrvextra_cmd->type_size);
@@ -1952,7 +1388,6 @@ u8 rtw_drvextra_cmd_hdl(struct adapter *padapter, unsigned char *pbuf)
case ANT_SELECT_WK_CID:
antenna_select_wk_hdl(padapter, pdrvextra_cmd->type_size);
break;
-#ifdef CONFIG_88EU_P2P
case P2P_PS_WK_CID:
p2p_ps_wk_hdl(padapter, pdrvextra_cmd->type_size);
break;
@@ -1961,12 +1396,9 @@ u8 rtw_drvextra_cmd_hdl(struct adapter *padapter, unsigned char *pbuf)
/* I used the type_size as the type command */
p2p_protocol_wk_hdl(padapter, pdrvextra_cmd->type_size);
break;
-#endif
-#ifdef CONFIG_88EU_AP_MODE
case CHECK_HIQ_WK_CID:
rtw_chk_hi_queue_hdl(padapter);
break;
-#endif /* CONFIG_88EU_AP_MODE */
case C2H_WK_CID:
c2h_evt_hdl(padapter, (struct c2h_evt_hdr *)pdrvextra_cmd->pbuf, NULL);
break;
diff --git a/drivers/staging/r8188eu/core/rtw_debug.c b/drivers/staging/r8188eu/core/rtw_debug.c
deleted file mode 100644
index 2ee64cef73f7..000000000000
--- a/drivers/staging/r8188eu/core/rtw_debug.c
+++ /dev/null
@@ -1,904 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2007 - 2012 Realtek Corporation. */
-
-#define _RTW_DEBUG_C_
-
-#include "../include/rtw_debug.h"
-#include "../include/drv_types.h"
-
-int proc_get_drv_version(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- int len = 0;
-
- len += snprintf(page + len, count - len, "%s\n", DRIVERVERSION);
-
- *eof = 1;
- return len;
-}
-
-int proc_get_write_reg(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- *eof = 1;
- return 0;
-}
-
-int proc_set_write_reg(struct file *file, const char __user *buffer,
- unsigned long count, void *data)
-{
- struct net_device *dev = (struct net_device *)data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- char tmp[32];
- u32 addr, val, len;
-
- if (count < 3) {
- DBG_88E("argument size is less than 3\n");
- return -EFAULT;
- }
-
- if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
- int num = sscanf(tmp, "%x %x %x", &addr, &val, &len);
-
- if (num != 3) {
- DBG_88E("invalid write_reg parameter!\n");
- return count;
- }
- switch (len) {
- case 1:
- rtw_write8(padapter, addr, (u8)val);
- break;
- case 2:
- rtw_write16(padapter, addr, (u16)val);
- break;
- case 4:
- rtw_write32(padapter, addr, val);
- break;
- default:
- DBG_88E("error write length =%d", len);
- break;
- }
- }
- return count;
-}
-
-static u32 proc_get_read_addr = 0xeeeeeeee;
-static u32 proc_get_read_len = 0x4;
-
-int proc_get_read_reg(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
-
- int len = 0;
-
- if (proc_get_read_addr == 0xeeeeeeee) {
- *eof = 1;
- return len;
- }
-
- switch (proc_get_read_len) {
- case 1:
- len += snprintf(page + len, count - len, "rtw_read8(0x%x)=0x%x\n", proc_get_read_addr, rtw_read8(padapter, proc_get_read_addr));
- break;
- case 2:
- len += snprintf(page + len, count - len, "rtw_read16(0x%x)=0x%x\n", proc_get_read_addr, rtw_read16(padapter, proc_get_read_addr));
- break;
- case 4:
- len += snprintf(page + len, count - len, "rtw_read32(0x%x)=0x%x\n", proc_get_read_addr, rtw_read32(padapter, proc_get_read_addr));
- break;
- default:
- len += snprintf(page + len, count - len, "error read length=%d\n", proc_get_read_len);
- break;
- }
-
- *eof = 1;
- return len;
-}
-
-int proc_set_read_reg(struct file *file, const char __user *buffer,
- unsigned long count, void *data)
-{
- char tmp[16];
- u32 addr, len;
-
- if (count < 2) {
- DBG_88E("argument size is less than 2\n");
- return -EFAULT;
- }
-
- if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
- int num = sscanf(tmp, "%x %x", &addr, &len);
-
- if (num != 2) {
- DBG_88E("invalid read_reg parameter!\n");
- return count;
- }
-
- proc_get_read_addr = addr;
-
- proc_get_read_len = len;
- }
-
- return count;
-}
-
-int proc_get_fwstate(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-
- int len = 0;
-
- len += snprintf(page + len, count - len, "fwstate=0x%x\n", get_fwstate(pmlmepriv));
-
- *eof = 1;
- return len;
-}
-
-int proc_get_sec_info(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct security_priv *psecuritypriv = &padapter->securitypriv;
-
- int len = 0;
-
- len += snprintf(page + len, count - len, "auth_alg=0x%x, enc_alg=0x%x, auth_type=0x%x, enc_type=0x%x\n",
- psecuritypriv->dot11AuthAlgrthm, psecuritypriv->dot11PrivacyAlgrthm,
- psecuritypriv->ndisauthtype, psecuritypriv->ndisencryptstatus);
-
- *eof = 1;
- return len;
-}
-
-int proc_get_mlmext_state(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-
- int len = 0;
-
- len += snprintf(page + len, count - len, "pmlmeinfo->state=0x%x\n", pmlmeinfo->state);
-
- *eof = 1;
- return len;
-}
-
-int proc_get_qos_option(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-
- int len = 0;
-
- len += snprintf(page + len, count - len, "qos_option=%d\n", pmlmepriv->qospriv.qos_option);
-
- *eof = 1;
- return len;
-}
-
-int proc_get_ht_option(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-
- int len = 0;
- len += snprintf(page + len, count - len, "ht_option=%d\n", pmlmepriv->htpriv.ht_option);
- *eof = 1;
- return len;
-}
-
-int proc_get_rf_info(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- int len = 0;
-
- len += snprintf(page + len, count - len, "cur_ch=%d, cur_bw=%d, cur_ch_offet=%d\n",
- pmlmeext->cur_channel, pmlmeext->cur_bwmode, pmlmeext->cur_ch_offset);
- *eof = 1;
- return len;
-}
-
-int proc_get_ap_info(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct sta_info *psta;
- struct net_device *dev = data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct wlan_network *cur_network = &pmlmepriv->cur_network;
- struct sta_priv *pstapriv = &padapter->stapriv;
- int len = 0;
-
- psta = rtw_get_stainfo(pstapriv, cur_network->network.MacAddress);
- if (psta) {
- int i;
- struct recv_reorder_ctrl *preorder_ctrl;
-
- len += snprintf(page + len, count - len, "SSID=%s\n", cur_network->network.Ssid.Ssid);
- len += snprintf(page + len, count - len, "sta's macaddr:%pM\n", psta->hwaddr);
- len += snprintf(page + len, count - len, "cur_channel=%d, cur_bwmode=%d, cur_ch_offset=%d\n", pmlmeext->cur_channel, pmlmeext->cur_bwmode, pmlmeext->cur_ch_offset);
- len += snprintf(page + len, count - len, "rtsen=%d, cts2slef=%d\n", psta->rtsen, psta->cts2self);
- len += snprintf(page + len, count - len, "state=0x%x, aid=%d, macid=%d, raid=%d\n", psta->state, psta->aid, psta->mac_id, psta->raid);
- len += snprintf(page + len, count - len, "qos_en=%d, ht_en=%d, init_rate=%d\n", psta->qos_option, psta->htpriv.ht_option, psta->init_rate);
- len += snprintf(page + len, count - len, "bwmode=%d, ch_offset=%d, sgi=%d\n", psta->htpriv.bwmode, psta->htpriv.ch_offset, psta->htpriv.sgi);
- len += snprintf(page + len, count - len, "ampdu_enable = %d\n", psta->htpriv.ampdu_enable);
- len += snprintf(page + len, count - len, "agg_enable_bitmap=%x, candidate_tid_bitmap=%x\n", psta->htpriv.agg_enable_bitmap, psta->htpriv.candidate_tid_bitmap);
-
- for (i = 0; i < 16; i++) {
- preorder_ctrl = &psta->recvreorder_ctrl[i];
- if (preorder_ctrl->enable)
- len += snprintf(page + len, count - len, "tid=%d, indicate_seq=%d\n", i, preorder_ctrl->indicate_seq);
- }
- } else {
- len += snprintf(page + len, count - len, "can't get sta's macaddr, cur_network's macaddr: %pM\n", cur_network->network.MacAddress);
- }
-
- *eof = 1;
- return len;
-}
-
-int proc_get_adapter_state(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- int len = 0;
-
- len += snprintf(page + len, count - len, "bSurpriseRemoved=%d, bDriverStopped=%d\n",
- padapter->bSurpriseRemoved, padapter->bDriverStopped);
-
- *eof = 1;
- return len;
-}
-
-int proc_get_trx_info(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- struct recv_priv *precvpriv = &padapter->recvpriv;
- int len = 0;
-
- len += snprintf(page + len, count - len, "free_xmitbuf_cnt=%d, free_xmitframe_cnt=%d, free_ext_xmitbuf_cnt=%d, free_recvframe_cnt=%d\n",
- pxmitpriv->free_xmitbuf_cnt, pxmitpriv->free_xmitframe_cnt, pxmitpriv->free_xmit_extbuf_cnt, precvpriv->free_recvframe_cnt);
- len += snprintf(page + len, count - len, "rx_urb_pending_cn=%d\n", precvpriv->rx_pending_cnt);
-
- *eof = 1;
- return len;
-}
-
-int proc_get_mac_reg_dump1(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- int len = 0;
- int i, j = 1;
-
- len += snprintf(page + len, count - len, "\n======= MAC REG =======\n");
-
- for (i = 0x0; i < 0x300; i += 4) {
- if (j % 4 == 1)
- len += snprintf(page + len, count - len, "0x%02x", i);
- len += snprintf(page + len, count - len, " 0x%08x ", rtw_read32(padapter, i));
- if ((j++) % 4 == 0)
- len += snprintf(page + len, count - len, "\n");
- }
-
- *eof = 1;
- return len;
-}
-
-int proc_get_mac_reg_dump2(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- int len = 0;
- int i, j = 1;
-
- len += snprintf(page + len, count - len, "\n======= MAC REG =======\n");
- memset(page, 0, count);
- for (i = 0x300; i < 0x600; i += 4) {
- if (j % 4 == 1)
- len += snprintf(page + len, count - len, "0x%02x", i);
- len += snprintf(page + len, count - len, " 0x%08x ", rtw_read32(padapter, i));
- if ((j++) % 4 == 0)
- len += snprintf(page + len, count - len, "\n");
- }
-
- *eof = 1;
- return len;
-}
-
-int proc_get_mac_reg_dump3(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- int len = 0;
- int i, j = 1;
-
- len += snprintf(page + len, count - len, "\n======= MAC REG =======\n");
-
- for (i = 0x600; i < 0x800; i += 4) {
- if (j % 4 == 1)
- len += snprintf(page + len, count - len, "0x%02x", i);
- len += snprintf(page + len, count - len, " 0x%08x ", rtw_read32(padapter, i));
- if ((j++) % 4 == 0)
- len += snprintf(page + len, count - len, "\n");
- }
-
- *eof = 1;
- return len;
-}
-
-int proc_get_bb_reg_dump1(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- int len = 0;
- int i, j = 1;
-
- len += snprintf(page + len, count - len, "\n======= BB REG =======\n");
- for (i = 0x800; i < 0xB00; i += 4) {
- if (j % 4 == 1)
- len += snprintf(page + len, count - len, "0x%02x", i);
- len += snprintf(page + len, count - len, " 0x%08x ", rtw_read32(padapter, i));
- if ((j++) % 4 == 0)
- len += snprintf(page + len, count - len, "\n");
- }
- *eof = 1;
- return len;
-}
-
-int proc_get_bb_reg_dump2(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- int len = 0;
- int i, j = 1;
-
- len += snprintf(page + len, count - len, "\n======= BB REG =======\n");
- for (i = 0xB00; i < 0xE00; i += 4) {
- if (j % 4 == 1)
- len += snprintf(page + len, count - len, "0x%02x", i);
- len += snprintf(page + len, count - len, " 0x%08x ", rtw_read32(padapter, i));
- if ((j++) % 4 == 0)
- len += snprintf(page + len, count - len, "\n");
- }
- *eof = 1;
- return len;
-}
-
-int proc_get_bb_reg_dump3(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- int len = 0;
- int i, j = 1;
-
- len += snprintf(page + len, count - len, "\n======= BB REG =======\n");
- for (i = 0xE00; i < 0x1000; i += 4) {
- if (j % 4 == 1)
- len += snprintf(page + len, count - len, "0x%02x", i);
- len += snprintf(page + len, count - len, " 0x%08x ", rtw_read32(padapter, i));
- if ((j++) % 4 == 0)
- len += snprintf(page + len, count - len, "\n");
- }
- *eof = 1;
- return len;
-}
-
-int proc_get_rf_reg_dump1(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- int len = 0;
- int i, j = 1, path;
- u32 value;
-
- len += snprintf(page + len, count - len, "\n======= RF REG =======\n");
- path = 1;
- len += snprintf(page + len, count - len, "\nRF_Path(%x)\n", path);
- for (i = 0; i < 0xC0; i++) {
- value = rtw_hal_read_rfreg(padapter, path, i, 0xffffffff);
- if (j % 4 == 1)
- len += snprintf(page + len, count - len, "0x%02x ", i);
- len += snprintf(page + len, count - len, " 0x%08x ", value);
- if ((j++) % 4 == 0)
- len += snprintf(page + len, count - len, "\n");
- }
- *eof = 1;
- return len;
-}
-
-int proc_get_rf_reg_dump2(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- int len = 0;
- int i, j = 1, path;
- u32 value;
-
- len += snprintf(page + len, count - len, "\n======= RF REG =======\n");
- path = 1;
- len += snprintf(page + len, count - len, "\nRF_Path(%x)\n", path);
- for (i = 0xC0; i < 0x100; i++) {
- value = rtw_hal_read_rfreg(padapter, path, i, 0xffffffff);
- if (j % 4 == 1)
- len += snprintf(page + len, count - len, "0x%02x ", i);
- len += snprintf(page + len, count - len, " 0x%08x ", value);
- if ((j++) % 4 == 0)
- len += snprintf(page + len, count - len, "\n");
- }
- *eof = 1;
- return len;
-}
-
-int proc_get_rf_reg_dump3(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- int len = 0;
- int i, j = 1, path;
- u32 value;
-
- len += snprintf(page + len, count - len, "\n======= RF REG =======\n");
- path = 2;
- len += snprintf(page + len, count - len, "\nRF_Path(%x)\n", path);
- for (i = 0; i < 0xC0; i++) {
- value = rtw_hal_read_rfreg(padapter, path, i, 0xffffffff);
- if (j % 4 == 1)
- len += snprintf(page + len, count - len, "0x%02x ", i);
- len += snprintf(page + len, count - len, " 0x%08x ", value);
- if ((j++) % 4 == 0)
- len += snprintf(page + len, count - len, "\n");
- }
-
- *eof = 1;
- return len;
-}
-
-int proc_get_rf_reg_dump4(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- int len = 0;
- int i, j = 1, path;
- u32 value;
-
- len += snprintf(page + len, count - len, "\n======= RF REG =======\n");
- path = 2;
- len += snprintf(page + len, count - len, "\nRF_Path(%x)\n", path);
- for (i = 0xC0; i < 0x100; i++) {
- value = rtw_hal_read_rfreg(padapter, path, i, 0xffffffff);
- if (j % 4 == 1)
- len += snprintf(page + len, count - len, "0x%02x ", i);
- len += snprintf(page + len, count - len, " 0x%08x ", value);
- if ((j++) % 4 == 0)
- len += snprintf(page + len, count - len, "\n");
- }
- *eof = 1;
- return len;
-}
-
-int proc_get_rx_signal(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- int len = 0;
-
- len = snprintf(page + len, count,
- "rssi:%d\n"
- "rxpwdb:%d\n"
- "signal_strength:%u\n"
- "signal_qual:%u\n"
- "noise:%u\n",
- padapter->recvpriv.rssi,
- padapter->recvpriv.rxpwdb,
- padapter->recvpriv.signal_strength,
- padapter->recvpriv.signal_qual,
- padapter->recvpriv.noise
- );
-
- *eof = 1;
- return len;
-}
-
-int proc_set_rx_signal(struct file *file, const char __user *buffer,
- unsigned long count, void *data)
-{
- struct net_device *dev = (struct net_device *)data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- char tmp[32];
- u32 is_signal_dbg;
- s32 signal_strength;
-
- if (count < 1)
- return -EFAULT;
-
- if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
- int num = sscanf(tmp, "%u %u", &is_signal_dbg, &signal_strength);
- is_signal_dbg = is_signal_dbg == 0 ? 0 : 1;
- if (is_signal_dbg && num != 2)
- return count;
-
- signal_strength = signal_strength > 100 ? 100 : signal_strength;
- signal_strength = signal_strength < 0 ? 0 : signal_strength;
-
- padapter->recvpriv.is_signal_dbg = is_signal_dbg;
- padapter->recvpriv.signal_strength_dbg = signal_strength;
-
- if (is_signal_dbg)
- DBG_88E("set %s %u\n", "DBG_SIGNAL_STRENGTH", signal_strength);
- else
- DBG_88E("set %s\n", "HW_SIGNAL_STRENGTH");
- }
- return count;
-}
-
-int proc_get_ht_enable(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct registry_priv *pregpriv = &padapter->registrypriv;
- int len = 0;
-
- if (pregpriv)
- len += snprintf(page + len, count - len,
- "%d\n",
- pregpriv->ht_enable
- );
- *eof = 1;
- return len;
-}
-
-int proc_set_ht_enable(struct file *file, const char __user *buffer,
- unsigned long count, void *data)
-{
- struct net_device *dev = (struct net_device *)data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct registry_priv *pregpriv = &padapter->registrypriv;
- char tmp[32];
- s32 mode = 0;
-
- if (count < 1)
- return -EFAULT;
-
- if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
- if (pregpriv) {
- pregpriv->ht_enable = mode;
- pr_info("ht_enable=%d\n", pregpriv->ht_enable);
- }
- }
-
- return count;
-}
-
-int proc_get_cbw40_enable(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct registry_priv *pregpriv = &padapter->registrypriv;
-
- int len = 0;
-
- if (pregpriv)
- len += snprintf(page + len, count - len,
- "%d\n",
- pregpriv->cbw40_enable
- );
-
- *eof = 1;
- return len;
-}
-
-int proc_set_cbw40_enable(struct file *file, const char __user *buffer,
- unsigned long count, void *data)
-{
- struct net_device *dev = (struct net_device *)data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct registry_priv *pregpriv = &padapter->registrypriv;
- char tmp[32];
- s32 mode = 0;
-
- if (count < 1)
- return -EFAULT;
-
- if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
- if (pregpriv) {
- pregpriv->cbw40_enable = mode;
- pr_info("cbw40_enable=%d\n", mode);
- }
- }
- return count;
-}
-
-int proc_get_ampdu_enable(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct registry_priv *pregpriv = &padapter->registrypriv;
-
- int len = 0;
-
- if (pregpriv)
- len += snprintf(page + len, count - len,
- "%d\n",
- pregpriv->ampdu_enable
- );
-
- *eof = 1;
- return len;
-}
-
-int proc_set_ampdu_enable(struct file *file, const char __user *buffer,
- unsigned long count, void *data)
-{
- struct net_device *dev = (struct net_device *)data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct registry_priv *pregpriv = &padapter->registrypriv;
- char tmp[32];
- s32 mode = 0;
-
- if (count < 1)
- return -EFAULT;
-
- if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
- if (pregpriv) {
- pregpriv->ampdu_enable = mode;
- pr_info("ampdu_enable=%d\n", mode);
- }
- }
- return count;
-}
-
-int proc_get_two_path_rssi(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
-
- int len = 0;
-
- if (padapter)
- len += snprintf(page + len, count - len,
- "%d %d\n",
- padapter->recvpriv.RxRssi[0],
- padapter->recvpriv.RxRssi[1]
- );
-
- *eof = 1;
- return len;
-}
-
-int proc_get_rx_stbc(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct registry_priv *pregpriv = &padapter->registrypriv;
-
- int len = 0;
-
- if (pregpriv)
- len += snprintf(page + len, count - len,
- "%d\n",
- pregpriv->rx_stbc
- );
-
- *eof = 1;
- return len;
-}
-
-int proc_set_rx_stbc(struct file *file, const char __user *buffer,
- unsigned long count, void *data)
-{
- struct net_device *dev = (struct net_device *)data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct registry_priv *pregpriv = &padapter->registrypriv;
- char tmp[32];
- u32 mode = 0;
-
- if (count < 1)
- return -EFAULT;
-
- if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
- if (pregpriv) {
- pregpriv->rx_stbc = mode;
- printk("rx_stbc=%d\n", mode);
- }
- }
- return count;
-}
-
-int proc_get_rssi_disp(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- *eof = 1;
- return 0;
-}
-
-int proc_set_rssi_disp(struct file *file, const char __user *buffer,
- unsigned long count, void *data)
-{
- struct net_device *dev = (struct net_device *)data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- char tmp[32];
- u32 enable = 0;
-
- if (count < 1) {
- DBG_88E("argument size is less than 1\n");
- return -EFAULT;
- }
-
- if (buffer && !copy_from_user(tmp, buffer, sizeof(tmp))) {
- int num = sscanf(tmp, "%x", &enable);
-
- if (num != 1) {
- DBG_88E("invalid set_rssi_disp parameter!\n");
- return count;
- }
-
- if (enable) {
- DBG_88E("Turn On Rx RSSI Display Function\n");
- padapter->bRxRSSIDisplay = enable;
- } else {
- DBG_88E("Turn Off Rx RSSI Display Function\n");
- padapter->bRxRSSIDisplay = 0;
- }
- }
- return count;
-}
-
-#ifdef CONFIG_88EU_AP_MODE
-
-int proc_get_all_sta_info(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct sta_info *psta;
- struct net_device *dev = data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct sta_priv *pstapriv = &padapter->stapriv;
- int i, j;
- struct list_head *plist, *phead;
- struct recv_reorder_ctrl *preorder_ctrl;
- int len = 0;
-
- len += snprintf(page + len, count - len, "sta_dz_bitmap=0x%x, tim_bitmap=0x%x\n", pstapriv->sta_dz_bitmap, pstapriv->tim_bitmap);
-
- spin_lock_bh(&pstapriv->sta_hash_lock);
-
- for (i = 0; i < NUM_STA; i++) {
- phead = &pstapriv->sta_hash[i];
- plist = phead->next;
-
- while (phead != plist) {
- psta = container_of(plist, struct sta_info, hash_list);
-
- plist = plist->next;
-
- len += snprintf(page + len, count - len, "sta's macaddr: %pM\n", psta->hwaddr);
- len += snprintf(page + len, count - len, "rtsen=%d, cts2slef=%d\n", psta->rtsen, psta->cts2self);
- len += snprintf(page + len, count - len, "state=0x%x, aid=%d, macid=%d, raid=%d\n", psta->state, psta->aid, psta->mac_id, psta->raid);
- len += snprintf(page + len, count - len, "qos_en=%d, ht_en=%d, init_rate=%d\n", psta->qos_option, psta->htpriv.ht_option, psta->init_rate);
- len += snprintf(page + len, count - len, "bwmode=%d, ch_offset=%d, sgi=%d\n", psta->htpriv.bwmode, psta->htpriv.ch_offset, psta->htpriv.sgi);
- len += snprintf(page + len, count - len, "ampdu_enable = %d\n", psta->htpriv.ampdu_enable);
- len += snprintf(page + len, count - len, "agg_enable_bitmap=%x, candidate_tid_bitmap=%x\n", psta->htpriv.agg_enable_bitmap, psta->htpriv.candidate_tid_bitmap);
- len += snprintf(page + len, count - len, "sleepq_len=%d\n", psta->sleepq_len);
- len += snprintf(page + len, count - len, "capability=0x%x\n", psta->capability);
- len += snprintf(page + len, count - len, "flags=0x%x\n", psta->flags);
- len += snprintf(page + len, count - len, "wpa_psk=0x%x\n", psta->wpa_psk);
- len += snprintf(page + len, count - len, "wpa2_group_cipher=0x%x\n", psta->wpa2_group_cipher);
- len += snprintf(page + len, count - len, "wpa2_pairwise_cipher=0x%x\n", psta->wpa2_pairwise_cipher);
- len += snprintf(page + len, count - len, "qos_info=0x%x\n", psta->qos_info);
- len += snprintf(page + len, count - len, "dot118021XPrivacy=0x%x\n", psta->dot118021XPrivacy);
-
- for (j = 0; j < 16; j++) {
- preorder_ctrl = &psta->recvreorder_ctrl[j];
- if (preorder_ctrl->enable)
- len += snprintf(page + len, count - len, "tid=%d, indicate_seq=%d\n", j, preorder_ctrl->indicate_seq);
- }
- }
- }
- spin_unlock_bh(&pstapriv->sta_hash_lock);
-
- *eof = 1;
- return len;
-}
-#endif
-
-int proc_get_best_channel(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data)
-{
- struct net_device *dev = data;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- int len = 0;
- u32 i, best_channel_24G = 1, index_24G = 0;
-
- for (i = 0; pmlmeext->channel_set[i].ChannelNum != 0; i++) {
- if (pmlmeext->channel_set[i].ChannelNum == 1)
- index_24G = i;
- }
-
- for (i = 0; pmlmeext->channel_set[i].ChannelNum != 0; i++) {
- /* 2.4G */
- if (pmlmeext->channel_set[i].ChannelNum == 6) {
- if (pmlmeext->channel_set[i].rx_count < pmlmeext->channel_set[index_24G].rx_count) {
- index_24G = i;
- best_channel_24G = pmlmeext->channel_set[i].ChannelNum;
- }
- }
-
- /* debug */
- len += snprintf(page + len, count - len, "The rx cnt of channel %3d = %d\n",
- pmlmeext->channel_set[i].ChannelNum, pmlmeext->channel_set[i].rx_count);
- }
-
- len += snprintf(page + len, count - len, "best_channel_24G = %d\n", best_channel_24G);
-
- *eof = 1;
- return len;
-}
diff --git a/drivers/staging/r8188eu/core/rtw_efuse.c b/drivers/staging/r8188eu/core/rtw_efuse.c
index c1c70648f5bc..03c8431b2ed3 100644
--- a/drivers/staging/r8188eu/core/rtw_efuse.c
+++ b/drivers/staging/r8188eu/core/rtw_efuse.c
@@ -25,13 +25,10 @@ u8 fakeBTEfuseInitMap[EFUSE_BT_MAX_MAP_LEN] = {0};
u8 fakeBTEfuseModifiedMap[EFUSE_BT_MAX_MAP_LEN] = {0};
/*------------------------Define local variable------------------------------*/
-/* */
#define REG_EFUSE_CTRL 0x0030
#define EFUSE_CTRL REG_EFUSE_CTRL /* E-Fuse Control. */
-/* */
-static bool Efuse_Read1ByteFromFakeContent(struct adapter *pAdapter,
- u16 Offset,
- u8 *Value)
+
+static bool Efuse_Read1ByteFromFakeContent(u16 Offset, u8 *Value)
{
if (Offset >= EFUSE_MAX_HW_SIZE)
return false;
@@ -58,62 +55,6 @@ Efuse_Write1ByteToFakeContent(
return true;
}
-/*-----------------------------------------------------------------------------
- * Function: Efuse_PowerSwitch
- *
- * Overview: When we want to enable write operation, we should change to
- * pwr on state. When we stop write, we should switch to 500k mode
- * and disable LDO 2.5V.
- *
- * Input: NONE
- *
- * Output: NONE
- *
- * Return: NONE
- *
- * Revised History:
- * When Who Remark
- * 11/17/2008 MHC Create Version 0.
- *
- *---------------------------------------------------------------------------*/
-void
-Efuse_PowerSwitch(
- struct adapter *pAdapter,
- u8 write,
- u8 PwrState)
-{
- pAdapter->HalFunc.EfusePowerSwitch(pAdapter, write, PwrState);
-}
-
-/*-----------------------------------------------------------------------------
- * Function: efuse_GetCurrentSize
- *
- * Overview: Get current efuse size!!!
- *
- * Input: NONE
- *
- * Output: NONE
- *
- * Return: NONE
- *
- * Revised History:
- * When Who Remark
- * 11/16/2008 MHC Create Version 0.
- *
- *---------------------------------------------------------------------------*/
-u16
-Efuse_GetCurrentSize(
- struct adapter *pAdapter,
- u8 efuseType,
- bool pseudo)
-{
- u16 ret = 0;
-
- ret = pAdapter->HalFunc.EfuseGetCurrentSize(pAdapter, efuseType, pseudo);
-
- return ret;
-}
-
/* 11/16/2008 MH Add description. Get current efuse area enabled word!!. */
u8
Efuse_CalculateWordCnts(u8 word_en)
@@ -153,7 +94,7 @@ ReadEFuseByte(
u16 retry;
if (pseudo) {
- Efuse_Read1ByteFromFakeContent(Adapter, _offset, pbuf);
+ Efuse_Read1ByteFromFakeContent(_offset, pbuf);
return;
}
@@ -184,93 +125,6 @@ ReadEFuseByte(
*pbuf = (u8)(value32 & 0xff);
}
-/* */
-/* Description: */
-/* 1. Execute E-Fuse read byte operation according as map offset and */
-/* save to E-Fuse table. */
-/* 2. Referred from SD1 Richard. */
-/* */
-/* Assumption: */
-/* 1. Boot from E-Fuse and successfully auto-load. */
-/* 2. PASSIVE_LEVEL (USB interface) */
-/* */
-/* Created by Roger, 2008.10.21. */
-/* */
-/* 2008/12/12 MH 1. Reorganize code flow and reserve bytes. and add description. */
-/* 2. Add efuse utilization collect. */
-/* 2008/12/22 MH Read Efuse must check if we write section 1 data again!!! Sec1 */
-/* write addr must be after sec5. */
-/* */
-
-static void efuse_ReadEFuse(struct adapter *Adapter, u8 efuseType, u16 _offset, u16 _size_byte, u8 *pbuf, bool pseudo)
-{
- Adapter->HalFunc.ReadEFuse(Adapter, efuseType, _offset, _size_byte, pbuf, pseudo);
-}
-
-void EFUSE_GetEfuseDefinition(struct adapter *pAdapter, u8 efuseType, u8 type, void *pOut, bool pseudo
- )
-{
- pAdapter->HalFunc.EFUSEGetEfuseDefinition(pAdapter, efuseType, type, pOut, pseudo);
-}
-
-/*-----------------------------------------------------------------------------
- * Function: EFUSE_Read1Byte
- *
- * Overview: Copy from WMAC fot EFUSE read 1 byte.
- *
- * Input: NONE
- *
- * Output: NONE
- *
- * Return: NONE
- *
- * Revised History:
- * When Who Remark
- * 09/23/2008 MHC Copy from WMAC.
- *
- *---------------------------------------------------------------------------*/
-u8 EFUSE_Read1Byte(struct adapter *Adapter, u16 Address)
-{
- u8 data;
- u8 Bytetemp = {0x00};
- u8 temp = {0x00};
- u32 k = 0;
- u16 contentLen = 0;
-
- EFUSE_GetEfuseDefinition(Adapter, EFUSE_WIFI, TYPE_EFUSE_REAL_CONTENT_LEN, (void *)&contentLen, false);
-
- if (Address < contentLen) { /* E-fuse 512Byte */
- /* Write E-fuse Register address bit0~7 */
- temp = Address & 0xFF;
- rtw_write8(Adapter, EFUSE_CTRL + 1, temp);
- Bytetemp = rtw_read8(Adapter, EFUSE_CTRL + 2);
- /* Write E-fuse Register address bit8~9 */
- temp = ((Address >> 8) & 0x03) | (Bytetemp & 0xFC);
- rtw_write8(Adapter, EFUSE_CTRL + 2, temp);
-
- /* Write 0x30[31]= 0 */
- Bytetemp = rtw_read8(Adapter, EFUSE_CTRL + 3);
- temp = Bytetemp & 0x7F;
- rtw_write8(Adapter, EFUSE_CTRL + 3, temp);
-
- /* Wait Write-ready (0x30[31]= 1) */
- Bytetemp = rtw_read8(Adapter, EFUSE_CTRL + 3);
- while (!(Bytetemp & 0x80)) {
- Bytetemp = rtw_read8(Adapter, EFUSE_CTRL + 3);
- k++;
- if (k == 1000) {
- k = 0;
- break;
- }
- }
- data = rtw_read8(Adapter, EFUSE_CTRL);
- return data;
- } else {
- return 0xFF;
- }
-
-} /* EFUSE_Read1Byte */
-
/* 11/16/2008 MH Read one byte from real Efuse. */
u8 efuse_OneByteRead(struct adapter *pAdapter, u16 addr, u8 *data, bool pseudo)
{
@@ -278,7 +132,7 @@ u8 efuse_OneByteRead(struct adapter *pAdapter, u16 addr, u8 *data, bool pseudo)
u8 result;
if (pseudo) {
- result = Efuse_Read1ByteFromFakeContent(pAdapter, addr, data);
+ result = Efuse_Read1ByteFromFakeContent(addr, data);
return result;
}
/* -----------------e-fuse reg ctrl --------------------------------- */
@@ -333,33 +187,6 @@ u8 efuse_OneByteWrite(struct adapter *pAdapter, u16 addr, u8 data, bool pseudo)
return result;
}
-int Efuse_PgPacketRead(struct adapter *pAdapter, u8 offset, u8 *data, bool pseudo)
-{
- int ret = 0;
-
- ret = pAdapter->HalFunc.Efuse_PgPacketRead(pAdapter, offset, data, pseudo);
-
- return ret;
-}
-
-int Efuse_PgPacketWrite(struct adapter *pAdapter, u8 offset, u8 word_en, u8 *data, bool pseudo)
-{
- int ret;
-
- ret = pAdapter->HalFunc.Efuse_PgPacketWrite(pAdapter, offset, word_en, data, pseudo);
-
- return ret;
-}
-
-static int Efuse_PgPacketWrite_BT(struct adapter *pAdapter, u8 offset, u8 word_en, u8 *data, bool pseudo)
-{
- int ret;
-
- ret = pAdapter->HalFunc.Efuse_PgPacketWrite_BT(pAdapter, offset, word_en, data, pseudo);
-
- return ret;
-}
-
/*-----------------------------------------------------------------------------
* Function: efuse_WordEnableDataRead
*
@@ -397,368 +224,6 @@ void efuse_WordEnableDataRead(u8 word_en, u8 *sourdata, u8 *targetdata)
}
}
-u8 Efuse_WordEnableDataWrite(struct adapter *pAdapter, u16 efuse_addr, u8 word_en, u8 *data, bool pseudo)
-{
- u8 ret = 0;
-
- ret = pAdapter->HalFunc.Efuse_WordEnableDataWrite(pAdapter, efuse_addr, word_en, data, pseudo);
-
- return ret;
-}
-
-static u8 efuse_read8(struct adapter *padapter, u16 address, u8 *value)
-{
- return efuse_OneByteRead(padapter, address, value, false);
-}
-
-static u8 efuse_write8(struct adapter *padapter, u16 address, u8 *value)
-{
- return efuse_OneByteWrite(padapter, address, *value, false);
-}
-
-/*
- * read/wirte raw efuse data
- */
-u8 rtw_efuse_access(struct adapter *padapter, u8 write, u16 start_addr, u16 cnts, u8 *data)
-{
- int i = 0;
- u16 real_content_len = 0, max_available_size = 0;
- u8 res = _FAIL;
- u8 (*rw8)(struct adapter *, u16, u8*);
-
- EFUSE_GetEfuseDefinition(padapter, EFUSE_WIFI, TYPE_EFUSE_REAL_CONTENT_LEN, (void *)&real_content_len, false);
- EFUSE_GetEfuseDefinition(padapter, EFUSE_WIFI, TYPE_AVAILABLE_EFUSE_BYTES_TOTAL, (void *)&max_available_size, false);
-
- if (start_addr > real_content_len)
- return _FAIL;
-
- if (write) {
- if ((start_addr + cnts) > max_available_size)
- return _FAIL;
- rw8 = &efuse_write8;
- } else {
- rw8 = &efuse_read8;
- }
-
- Efuse_PowerSwitch(padapter, write, true);
-
- /* e-fuse one byte read / write */
- for (i = 0; i < cnts; i++) {
- if (start_addr >= real_content_len) {
- res = _FAIL;
- break;
- }
-
- res = rw8(padapter, start_addr++, data++);
- if (_FAIL == res)
- break;
- }
-
- Efuse_PowerSwitch(padapter, write, false);
-
- return res;
-}
-/* */
-u16 efuse_GetMaxSize(struct adapter *padapter)
-{
- u16 max_size;
- EFUSE_GetEfuseDefinition(padapter, EFUSE_WIFI, TYPE_AVAILABLE_EFUSE_BYTES_TOTAL, (void *)&max_size, false);
- return max_size;
-}
-/* */
-u8 efuse_GetCurrentSize(struct adapter *padapter, u16 *size)
-{
- Efuse_PowerSwitch(padapter, false, true);
- *size = Efuse_GetCurrentSize(padapter, EFUSE_WIFI, false);
- Efuse_PowerSwitch(padapter, false, false);
-
- return _SUCCESS;
-}
-/* */
-u8 rtw_efuse_map_read(struct adapter *padapter, u16 addr, u16 cnts, u8 *data)
-{
- u16 mapLen = 0;
-
- EFUSE_GetEfuseDefinition(padapter, EFUSE_WIFI, TYPE_EFUSE_MAP_LEN, (void *)&mapLen, false);
-
- if ((addr + cnts) > mapLen)
- return _FAIL;
-
- Efuse_PowerSwitch(padapter, false, true);
-
- efuse_ReadEFuse(padapter, EFUSE_WIFI, addr, cnts, data, false);
-
- Efuse_PowerSwitch(padapter, false, false);
-
- return _SUCCESS;
-}
-
-u8 rtw_BT_efuse_map_read(struct adapter *padapter, u16 addr, u16 cnts, u8 *data)
-{
- u16 mapLen = 0;
-
- EFUSE_GetEfuseDefinition(padapter, EFUSE_BT, TYPE_EFUSE_MAP_LEN, (void *)&mapLen, false);
-
- if ((addr + cnts) > mapLen)
- return _FAIL;
-
- Efuse_PowerSwitch(padapter, false, true);
-
- efuse_ReadEFuse(padapter, EFUSE_BT, addr, cnts, data, false);
-
- Efuse_PowerSwitch(padapter, false, false);
-
- return _SUCCESS;
-}
-/* */
-u8 rtw_efuse_map_write(struct adapter *padapter, u16 addr, u16 cnts, u8 *data)
-{
- u8 offset, word_en;
- u8 *map;
- u8 newdata[PGPKT_DATA_SIZE + 1];
- s32 i, idx;
- u8 ret = _SUCCESS;
- u16 mapLen = 0;
-
- EFUSE_GetEfuseDefinition(padapter, EFUSE_WIFI, TYPE_EFUSE_MAP_LEN, (void *)&mapLen, false);
-
- if ((addr + cnts) > mapLen)
- return _FAIL;
-
- map = kzalloc(mapLen, GFP_KERNEL);
- if (!map)
- return _FAIL;
-
- ret = rtw_efuse_map_read(padapter, 0, mapLen, map);
- if (ret == _FAIL)
- goto exit;
-
- Efuse_PowerSwitch(padapter, true, true);
-
- offset = (addr >> 3);
- word_en = 0xF;
- memset(newdata, 0xFF, PGPKT_DATA_SIZE + 1);
- i = addr & 0x7; /* index of one package */
- idx = 0; /* data index */
-
- if (i & 0x1) {
- /* odd start */
- if (data[idx] != map[addr + idx]) {
- word_en &= ~BIT(i >> 1);
- newdata[i - 1] = map[addr + idx - 1];
- newdata[i] = data[idx];
- }
- i++;
- idx++;
- }
- do {
- for (; i < PGPKT_DATA_SIZE; i += 2) {
- if (cnts == idx)
- break;
- if ((cnts - idx) == 1) {
- if (data[idx] != map[addr + idx]) {
- word_en &= ~BIT(i >> 1);
- newdata[i] = data[idx];
- newdata[i + 1] = map[addr + idx + 1];
- }
- idx++;
- break;
- } else {
- if ((data[idx] != map[addr + idx]) ||
- (data[idx + 1] != map[addr + idx + 1])) {
- word_en &= ~BIT(i >> 1);
- newdata[i] = data[idx];
- newdata[i + 1] = data[idx + 1];
- }
- idx += 2;
- }
- if (idx == cnts)
- break;
- }
-
- if (word_en != 0xF) {
- ret = Efuse_PgPacketWrite(padapter, offset, word_en, newdata, false);
- DBG_88E("offset=%x\n", offset);
- DBG_88E("word_en=%x\n", word_en);
-
- for (i = 0; i < PGPKT_DATA_SIZE; i++)
- DBG_88E("data=%x \t", newdata[i]);
- if (ret == _FAIL)
- break;
- }
-
- if (idx == cnts)
- break;
-
- offset++;
- i = 0;
- word_en = 0xF;
- memset(newdata, 0xFF, PGPKT_DATA_SIZE);
- } while (1);
-
- Efuse_PowerSwitch(padapter, true, false);
-exit:
- kfree(map);
- return ret;
-}
-
-/* */
-u8 rtw_BT_efuse_map_write(struct adapter *padapter, u16 addr, u16 cnts, u8 *data)
-{
- u8 offset, word_en;
- u8 *map;
- u8 newdata[PGPKT_DATA_SIZE + 1];
- s32 i, idx;
- u8 ret = _SUCCESS;
- u16 mapLen = 0;
-
- EFUSE_GetEfuseDefinition(padapter, EFUSE_BT, TYPE_EFUSE_MAP_LEN, (void *)&mapLen, false);
-
- if ((addr + cnts) > mapLen)
- return _FAIL;
-
- map = kzalloc(mapLen, GFP_KERNEL);
- if (!map)
- return _FAIL;
-
- ret = rtw_BT_efuse_map_read(padapter, 0, mapLen, map);
- if (ret == _FAIL)
- goto exit;
-
- Efuse_PowerSwitch(padapter, true, true);
-
- offset = (addr >> 3);
- word_en = 0xF;
- memset(newdata, 0xFF, PGPKT_DATA_SIZE + 1);
- i = addr & 0x7; /* index of one package */
- idx = 0; /* data index */
-
- if (i & 0x1) {
- /* odd start */
- if (data[idx] != map[addr + idx]) {
- word_en &= ~BIT(i >> 1);
- newdata[i - 1] = map[addr + idx - 1];
- newdata[i] = data[idx];
- }
- i++;
- idx++;
- }
- do {
- for (; i < PGPKT_DATA_SIZE; i += 2) {
- if (cnts == idx)
- break;
- if ((cnts - idx) == 1) {
- if (data[idx] != map[addr + idx]) {
- word_en &= ~BIT(i >> 1);
- newdata[i] = data[idx];
- newdata[i + 1] = map[addr + idx + 1];
- }
- idx++;
- break;
- } else {
- if ((data[idx] != map[addr + idx]) ||
- (data[idx + 1] != map[addr + idx + 1])) {
- word_en &= ~BIT(i >> 1);
- newdata[i] = data[idx];
- newdata[i + 1] = data[idx + 1];
- }
- idx += 2;
- }
- if (idx == cnts)
- break;
- }
-
- if (word_en != 0xF) {
- DBG_88E("%s: offset=%#X\n", __func__, offset);
- DBG_88E("%s: word_en=%#X\n", __func__, word_en);
- DBG_88E("%s: data=", __func__);
- for (i = 0; i < PGPKT_DATA_SIZE; i++)
- DBG_88E("0x%02X ", newdata[i]);
- DBG_88E("\n");
-
- ret = Efuse_PgPacketWrite_BT(padapter, offset, word_en, newdata, false);
- if (ret == _FAIL)
- break;
- }
-
- if (idx == cnts)
- break;
-
- offset++;
- i = 0;
- word_en = 0xF;
- memset(newdata, 0xFF, PGPKT_DATA_SIZE);
- } while (1);
-
- Efuse_PowerSwitch(padapter, true, false);
-
-exit:
-
- kfree(map);
-
- return ret;
-}
-
-/*-----------------------------------------------------------------------------
- * Function: efuse_ShadowRead1Byte
- * efuse_ShadowRead2Byte
- * efuse_ShadowRead4Byte
- *
- * Overview: Read from efuse init map by one/two/four bytes !!!!!
- *
- * Input: NONE
- *
- * Output: NONE
- *
- * Return: NONE
- *
- * Revised History:
- * When Who Remark
- * 11/12/2008 MHC Create Version 0.
- *
- *---------------------------------------------------------------------------*/
-static void
-efuse_ShadowRead1Byte(
- struct adapter *pAdapter,
- u16 Offset,
- u8 *Value)
-{
- struct eeprom_priv *pEEPROM = GET_EEPROM_EFUSE_PRIV(pAdapter);
-
- *Value = pEEPROM->efuse_eeprom_data[Offset];
-
-} /* EFUSE_ShadowRead1Byte */
-
-/* Read Two Bytes */
-static void
-efuse_ShadowRead2Byte(
- struct adapter *pAdapter,
- u16 Offset,
- u16 *Value)
-{
- struct eeprom_priv *pEEPROM = GET_EEPROM_EFUSE_PRIV(pAdapter);
-
- *Value = pEEPROM->efuse_eeprom_data[Offset];
- *Value |= pEEPROM->efuse_eeprom_data[Offset + 1] << 8;
-
-} /* EFUSE_ShadowRead2Byte */
-
-/* Read Four Bytes */
-static void
-efuse_ShadowRead4Byte(
- struct adapter *pAdapter,
- u16 Offset,
- u32 *Value)
-{
- struct eeprom_priv *pEEPROM = GET_EEPROM_EFUSE_PRIV(pAdapter);
-
- *Value = pEEPROM->efuse_eeprom_data[Offset];
- *Value |= pEEPROM->efuse_eeprom_data[Offset + 1] << 8;
- *Value |= pEEPROM->efuse_eeprom_data[Offset + 2] << 16;
- *Value |= pEEPROM->efuse_eeprom_data[Offset + 3] << 24;
-
-} /* efuse_ShadowRead4Byte */
-
/*-----------------------------------------------------------------------------
* Function: Efuse_ReadAllMap
*
@@ -779,13 +244,13 @@ static void Efuse_ReadAllMap(struct adapter *pAdapter, u8 efuseType, u8 *Efuse,
{
u16 mapLen = 0;
- Efuse_PowerSwitch(pAdapter, false, true);
+ rtl8188e_EfusePowerSwitch(pAdapter, false, true);
- EFUSE_GetEfuseDefinition(pAdapter, efuseType, TYPE_EFUSE_MAP_LEN, (void *)&mapLen, pseudo);
+ rtl8188e_EFUSE_GetEfuseDefinition(pAdapter, efuseType, TYPE_EFUSE_MAP_LEN, (void *)&mapLen, pseudo);
- efuse_ReadEFuse(pAdapter, efuseType, 0, mapLen, Efuse, pseudo);
+ rtl8188e_ReadEFuse(pAdapter, efuseType, 0, mapLen, Efuse, pseudo);
- Efuse_PowerSwitch(pAdapter, false, false);
+ rtl8188e_EfusePowerSwitch(pAdapter, false, false);
}
/*-----------------------------------------------------------------------------
@@ -809,40 +274,13 @@ void EFUSE_ShadowMapUpdate(
u8 efuseType,
bool pseudo)
{
- struct eeprom_priv *pEEPROM = GET_EEPROM_EFUSE_PRIV(pAdapter);
+ struct eeprom_priv *pEEPROM = &pAdapter->eeprompriv;
u16 mapLen = 0;
- EFUSE_GetEfuseDefinition(pAdapter, efuseType, TYPE_EFUSE_MAP_LEN, (void *)&mapLen, pseudo);
+ rtl8188e_EFUSE_GetEfuseDefinition(pAdapter, efuseType, TYPE_EFUSE_MAP_LEN, (void *)&mapLen, pseudo);
if (pEEPROM->bautoload_fail_flag)
memset(pEEPROM->efuse_eeprom_data, 0xFF, mapLen);
else
Efuse_ReadAllMap(pAdapter, efuseType, pEEPROM->efuse_eeprom_data, pseudo);
} /* EFUSE_ShadowMapUpdate */
-
-/*-----------------------------------------------------------------------------
- * Function: EFUSE_ShadowRead
- *
- * Overview: Read from efuse init map !!!!!
- *
- * Input: NONE
- *
- * Output: NONE
- *
- * Return: NONE
- *
- * Revised History:
- * When Who Remark
- * 11/12/2008 MHC Create Version 0.
- *
- *---------------------------------------------------------------------------*/
-void EFUSE_ShadowRead(struct adapter *pAdapter, u8 Type, u16 Offset, u32 *Value)
-{
- if (Type == 1)
- efuse_ShadowRead1Byte(pAdapter, Offset, (u8 *)Value);
- else if (Type == 2)
- efuse_ShadowRead2Byte(pAdapter, Offset, (u16 *)Value);
- else if (Type == 4)
- efuse_ShadowRead4Byte(pAdapter, Offset, (u32 *)Value);
-
-} /* EFUSE_ShadowRead */
diff --git a/drivers/staging/r8188eu/core/rtw_ieee80211.c b/drivers/staging/r8188eu/core/rtw_ieee80211.c
index b3a74198596a..343c2f9a4ce8 100644
--- a/drivers/staging/r8188eu/core/rtw_ieee80211.c
+++ b/drivers/staging/r8188eu/core/rtw_ieee80211.c
@@ -140,59 +140,6 @@ u8 *rtw_set_ie
return pbuf + len + 2;
}
-inline u8 *rtw_set_ie_ch_switch(u8 *buf, u32 *buf_len, u8 ch_switch_mode,
- u8 new_ch, u8 ch_switch_cnt)
-{
- u8 ie_data[3];
-
- ie_data[0] = ch_switch_mode;
- ie_data[1] = new_ch;
- ie_data[2] = ch_switch_cnt;
- return rtw_set_ie(buf, WLAN_EID_CHANNEL_SWITCH, 3, ie_data, buf_len);
-}
-
-inline u8 secondary_ch_offset_to_hal_ch_offset(u8 ch_offset)
-{
- if (ch_offset == SCN)
- return HAL_PRIME_CHNL_OFFSET_DONT_CARE;
- else if (ch_offset == SCA)
- return HAL_PRIME_CHNL_OFFSET_UPPER;
- else if (ch_offset == SCB)
- return HAL_PRIME_CHNL_OFFSET_LOWER;
-
- return HAL_PRIME_CHNL_OFFSET_DONT_CARE;
-}
-
-inline u8 hal_ch_offset_to_secondary_ch_offset(u8 ch_offset)
-{
- if (ch_offset == HAL_PRIME_CHNL_OFFSET_DONT_CARE)
- return SCN;
- else if (ch_offset == HAL_PRIME_CHNL_OFFSET_LOWER)
- return SCB;
- else if (ch_offset == HAL_PRIME_CHNL_OFFSET_UPPER)
- return SCA;
-
- return SCN;
-}
-
-inline u8 *rtw_set_ie_secondary_ch_offset(u8 *buf, u32 *buf_len, u8 secondary_ch_offset)
-{
- return rtw_set_ie(buf, WLAN_EID_SECONDARY_CHANNEL_OFFSET, 1, &secondary_ch_offset, buf_len);
-}
-
-inline u8 *rtw_set_ie_mesh_ch_switch_parm(u8 *buf, u32 *buf_len, u8 ttl,
- u8 flags, u16 reason, u16 precedence)
-{
- u8 ie_data[6];
-
- ie_data[0] = ttl;
- ie_data[1] = flags;
- *(u16 *)(ie_data + 2) = cpu_to_le16(reason);
- *(u16 *)(ie_data + 4) = cpu_to_le16(precedence);
-
- return rtw_set_ie(buf, 0x118, 6, ie_data, buf_len);
-}
-
/*----------------------------------------------------------------------------
index: the information element id index, limit is the limit for search
-----------------------------------------------------------------------------*/
@@ -225,96 +172,6 @@ u8 *rtw_get_ie(u8 *pbuf, int index, int *len, int limit)
return NULL;
}
-/**
- * rtw_get_ie_ex - Search specific IE from a series of IEs
- * @in_ie: Address of IEs to search
- * @in_len: Length limit from in_ie
- * @eid: Element ID to match
- * @oui: OUI to match
- * @oui_len: OUI length
- * @ie: If not NULL and the specific IE is found, the IE will be copied to the buf starting from the specific IE
- * @ielen: If not NULL and the specific IE is found, will set to the length of the entire IE
- *
- * Returns: The address of the specific IE found, or NULL
- */
-u8 *rtw_get_ie_ex(u8 *in_ie, uint in_len, u8 eid, u8 *oui, u8 oui_len, u8 *ie, uint *ielen)
-{
- uint cnt;
- u8 *target_ie = NULL;
-
- if (ielen)
- *ielen = 0;
-
- if (!in_ie || in_len <= 0)
- return target_ie;
-
- cnt = 0;
-
- while (cnt < in_len) {
- if (eid == in_ie[cnt] && (!oui || !memcmp(&in_ie[cnt + 2], oui, oui_len))) {
- target_ie = &in_ie[cnt];
-
- if (ie)
- memcpy(ie, &in_ie[cnt], in_ie[cnt + 1] + 2);
-
- if (ielen)
- *ielen = in_ie[cnt + 1] + 2;
-
- break;
- } else {
- cnt += in_ie[cnt + 1] + 2; /* goto next */
- }
- }
- return target_ie;
-}
-
-/**
- * rtw_ies_remove_ie - Find matching IEs and remove
- * @ies: Address of IEs to search
- * @ies_len: Pointer of length of ies, will update to new length
- * @offset: The offset to start scarch
- * @eid: Element ID to match
- * @oui: OUI to match
- * @oui_len: OUI length
- *
- * Returns: _SUCCESS: ies is updated, _FAIL: not updated
- */
-int rtw_ies_remove_ie(u8 *ies, uint *ies_len, uint offset, u8 eid, u8 *oui, u8 oui_len)
-{
- int ret = _FAIL;
- u8 *target_ie;
- u32 target_ielen;
- u8 *start;
- uint search_len;
-
- if (!ies || !ies_len || *ies_len <= offset)
- goto exit;
-
- start = ies + offset;
- search_len = *ies_len - offset;
-
- while (1) {
- target_ie = rtw_get_ie_ex(start, search_len, eid, oui, oui_len, NULL, &target_ielen);
- if (target_ie && target_ielen) {
- u8 buf[MAX_IE_SZ] = {0};
- u8 *remain_ies = target_ie + target_ielen;
- uint remain_len = search_len - (remain_ies - start);
-
- memcpy(buf, remain_ies, remain_len);
- memcpy(target_ie, buf, remain_len);
- *ies_len = *ies_len - target_ielen;
- ret = _SUCCESS;
-
- start = target_ie;
- search_len = remain_len;
- } else {
- break;
- }
- }
-exit:
- return ret;
-}
-
void rtw_set_supported_rate(u8 *SupportedRates, uint mode)
{
@@ -1021,97 +878,24 @@ u8 key_2char2num(u8 hch, u8 lch)
void rtw_macaddr_cfg(u8 *mac_addr)
{
u8 mac[ETH_ALEN];
+
if (!mac_addr)
return;
- if (rtw_initmac) { /* Users specify the mac address */
- int jj, kk;
-
- for (jj = 0, kk = 0; jj < ETH_ALEN; jj++, kk += 3)
- mac[jj] = key_2char2num(rtw_initmac[kk], rtw_initmac[kk + 1]);
- memcpy(mac_addr, mac, ETH_ALEN);
- } else { /* Use the mac address stored in the Efuse */
- memcpy(mac, mac_addr, ETH_ALEN);
+ if (rtw_initmac && mac_pton(rtw_initmac, mac)) {
+ /* Users specify the mac address */
+ ether_addr_copy(mac_addr, mac);
+ } else {
+ /* Use the mac address stored in the Efuse */
+ ether_addr_copy(mac, mac_addr);
}
- if (((mac[0] == 0xff) && (mac[1] == 0xff) && (mac[2] == 0xff) &&
- (mac[3] == 0xff) && (mac[4] == 0xff) && (mac[5] == 0xff)) ||
- ((mac[0] == 0x0) && (mac[1] == 0x0) && (mac[2] == 0x0) &&
- (mac[3] == 0x0) && (mac[4] == 0x0) && (mac[5] == 0x0))) {
- mac[0] = 0x00;
- mac[1] = 0xe0;
- mac[2] = 0x4c;
- mac[3] = 0x87;
- mac[4] = 0x00;
- mac[5] = 0x00;
- /* use default mac addresss */
- memcpy(mac_addr, mac, ETH_ALEN);
- DBG_88E("MAC Address from efuse error, assign default one !!!\n");
+ if (is_broadcast_ether_addr(mac) || is_zero_ether_addr(mac)) {
+ eth_random_addr(mac_addr);
+ DBG_88E("MAC Address from efuse error, assign random one !!!\n");
}
- DBG_88E("rtw_macaddr_cfg MAC Address = %pM\n", (mac_addr));
-}
-
-void dump_ies(u8 *buf, u32 buf_len)
-{
- u8 *pos = (u8 *)buf;
- u8 id, len;
-
- while (pos - buf <= buf_len) {
- id = *pos;
- len = *(pos + 1);
-
- DBG_88E("%s ID:%u, LEN:%u\n", __func__, id, len);
- #ifdef CONFIG_88EU_P2P
- dump_p2p_ie(pos, len);
- #endif
- dump_wps_ie(pos, len);
-
- pos += (2 + len);
- }
-}
-
-void dump_wps_ie(u8 *ie, u32 ie_len)
-{
- u8 *pos = (u8 *)ie;
- u16 id;
- u16 len;
- u8 *wps_ie;
- uint wps_ielen;
-
- wps_ie = rtw_get_wps_ie(ie, ie_len, NULL, &wps_ielen);
- if (wps_ie != ie || wps_ielen == 0)
- return;
-
- pos += 6;
- while (pos - ie < ie_len) {
- id = RTW_GET_BE16(pos);
- len = RTW_GET_BE16(pos + 2);
- DBG_88E("%s ID:0x%04x, LEN:%u\n", __func__, id, len);
- pos += (4 + len);
- }
-}
-
-#ifdef CONFIG_88EU_P2P
-void dump_p2p_ie(u8 *ie, u32 ie_len)
-{
- u8 *pos = (u8 *)ie;
- u8 id;
- u16 len;
- u8 *p2p_ie;
- uint p2p_ielen;
-
- p2p_ie = rtw_get_p2p_ie(ie, ie_len, NULL, &p2p_ielen);
- if (p2p_ie != ie || p2p_ielen == 0)
- return;
-
- pos += 6;
- while (pos - ie < ie_len) {
- id = *pos;
- len = get_unaligned_le16(pos + 1);
- DBG_88E("%s ID:%u, LEN:%u\n", __func__, id, len);
- pos += (3 + len);
- }
+ DBG_88E("rtw_macaddr_cfg MAC Address = %pM\n", mac_addr);
}
/**
@@ -1294,52 +1078,6 @@ void rtw_wlan_bssid_ex_remove_p2p_attr(struct wlan_bssid_ex *bss_ex, u8 attr_id)
}
}
-#endif /* CONFIG_88EU_P2P */
-
-/* Baron adds to avoid FreeBSD warning */
-int ieee80211_is_empty_essid(const char *essid, int essid_len)
-{
- /* Single white space is for Linksys APs */
- if (essid_len == 1 && essid[0] == ' ')
- return 1;
-
- /* Otherwise, if the entire essid is 0, we assume it is hidden */
- while (essid_len) {
- essid_len--;
- if (essid[essid_len] != '\0')
- return 0;
- }
-
- return 1;
-}
-
-int ieee80211_get_hdrlen(u16 fc)
-{
- int hdrlen = 24;
-
- switch (WLAN_FC_GET_TYPE(fc)) {
- case RTW_IEEE80211_FTYPE_DATA:
- if (fc & RTW_IEEE80211_STYPE_QOS_DATA)
- hdrlen += 2;
- if ((fc & RTW_IEEE80211_FCTL_FROMDS) && (fc & RTW_IEEE80211_FCTL_TODS))
- hdrlen += 6; /* Addr4 */
- break;
- case RTW_IEEE80211_FTYPE_CTL:
- switch (WLAN_FC_GET_STYPE(fc)) {
- case RTW_IEEE80211_STYPE_CTS:
- case RTW_IEEE80211_STYPE_ACK:
- hdrlen = 10;
- break;
- default:
- hdrlen = 16;
- break;
- }
- break;
- }
-
- return hdrlen;
-}
-
static int rtw_get_cipher_info(struct wlan_network *pnetwork)
{
u32 wpa_ielen;
@@ -1482,58 +1220,3 @@ u16 rtw_mcs_rate(u8 rf_type, u8 bw_40MHz, u8 short_GI_20, u8 short_GI_40, unsign
}
return max_rate;
}
-
-int rtw_action_frame_parse(const u8 *frame, u32 frame_len, u8 *category, u8 *action)
-{
- const u8 *frame_body = frame + sizeof(struct rtw_ieee80211_hdr_3addr);
- u16 fc;
- u8 c, a = 0;
-
- fc = le16_to_cpu(((struct rtw_ieee80211_hdr_3addr *)frame)->frame_ctl);
-
- if ((fc & (RTW_IEEE80211_FCTL_FTYPE | RTW_IEEE80211_FCTL_STYPE)) !=
- (RTW_IEEE80211_FTYPE_MGMT | RTW_IEEE80211_STYPE_ACTION))
- return false;
-
- c = frame_body[0];
-
- switch (c) {
- case RTW_WLAN_CATEGORY_P2P: /* vendor-specific */
- break;
- default:
- a = frame_body[1];
- }
-
- if (category)
- *category = c;
- if (action)
- *action = a;
-
- return true;
-}
-
-static const char *_action_public_str[] = {
- "ACT_PUB_BSSCOEXIST",
- "ACT_PUB_DSE_ENABLE",
- "ACT_PUB_DSE_DEENABLE",
- "ACT_PUB_DSE_REG_LOCATION",
- "ACT_PUB_EXT_CHL_SWITCH",
- "ACT_PUB_DSE_MSR_REQ",
- "ACT_PUB_DSE_MSR_RPRT",
- "ACT_PUB_MP",
- "ACT_PUB_DSE_PWR_CONSTRAINT",
- "ACT_PUB_VENDOR",
- "ACT_PUB_GAS_INITIAL_REQ",
- "ACT_PUB_GAS_INITIAL_RSP",
- "ACT_PUB_GAS_COMEBACK_REQ",
- "ACT_PUB_GAS_COMEBACK_RSP",
- "ACT_PUB_TDLS_DISCOVERY_RSP",
- "ACT_PUB_LOCATION_TRACK",
- "ACT_PUB_RSVD",
-};
-
-const char *action_public_str(u8 action)
-{
- action = (action >= ACT_PUBLIC_MAX) ? ACT_PUBLIC_MAX : action;
- return _action_public_str[action];
-}
diff --git a/drivers/staging/r8188eu/core/rtw_io.c b/drivers/staging/r8188eu/core/rtw_io.c
deleted file mode 100644
index cde0205816b1..000000000000
--- a/drivers/staging/r8188eu/core/rtw_io.c
+++ /dev/null
@@ -1,299 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-/*
-
-The purpose of rtw_io.c
-
-a. provides the API
-
-b. provides the protocol engine
-
-c. provides the software interface between caller and the hardware interface
-
-Compiler Flag Option:
-
-USB:
- a. USE_ASYNC_IRP: Both sync/async operations are provided.
-
-Only sync read/rtw_write_mem operations are provided.
-
-jackson@realtek.com.tw
-
-*/
-
-#define _RTW_IO_C_
-#include "../include/osdep_service.h"
-#include "../include/drv_types.h"
-#include "../include/rtw_io.h"
-#include "../include/osdep_intf.h"
-#include "../include/usb_ops.h"
-
-#define rtw_le16_to_cpu(val) le16_to_cpu(val)
-#define rtw_le32_to_cpu(val) le32_to_cpu(val)
-#define rtw_cpu_to_le16(val) cpu_to_le16(val)
-#define rtw_cpu_to_le32(val) cpu_to_le32(val)
-
-u8 _rtw_read8(struct adapter *adapter, u32 addr)
-{
- u8 r_val;
- struct io_priv *pio_priv = &adapter->iopriv;
- struct intf_hdl *pintfhdl = &pio_priv->intf;
- u8 (*_read8)(struct intf_hdl *pintfhdl, u32 addr);
-
-
- _read8 = pintfhdl->io_ops._read8;
- r_val = _read8(pintfhdl, addr);
-
- return r_val;
-}
-
-u16 _rtw_read16(struct adapter *adapter, u32 addr)
-{
- u16 r_val;
- struct io_priv *pio_priv = &adapter->iopriv;
- struct intf_hdl *pintfhdl = &pio_priv->intf;
- u16 (*_read16)(struct intf_hdl *pintfhdl, u32 addr);
-
- _read16 = pintfhdl->io_ops._read16;
-
- r_val = _read16(pintfhdl, addr);
-
- return r_val;
-}
-
-u32 _rtw_read32(struct adapter *adapter, u32 addr)
-{
- u32 r_val;
- struct io_priv *pio_priv = &adapter->iopriv;
- struct intf_hdl *pintfhdl = &pio_priv->intf;
- u32 (*_read32)(struct intf_hdl *pintfhdl, u32 addr);
-
- _read32 = pintfhdl->io_ops._read32;
-
- r_val = _read32(pintfhdl, addr);
-
- return r_val;
-}
-
-int _rtw_write8(struct adapter *adapter, u32 addr, u8 val)
-{
- struct io_priv *pio_priv = &adapter->iopriv;
- struct intf_hdl *pintfhdl = &pio_priv->intf;
- int (*_write8)(struct intf_hdl *pintfhdl, u32 addr, u8 val);
- int ret;
-
- _write8 = pintfhdl->io_ops._write8;
-
- ret = _write8(pintfhdl, addr, val);
-
-
- return RTW_STATUS_CODE(ret);
-}
-
-int _rtw_write16(struct adapter *adapter, u32 addr, u16 val)
-{
- struct io_priv *pio_priv = &adapter->iopriv;
- struct intf_hdl *pintfhdl = &pio_priv->intf;
- int (*_write16)(struct intf_hdl *pintfhdl, u32 addr, u16 val);
- int ret;
-
- _write16 = pintfhdl->io_ops._write16;
-
- ret = _write16(pintfhdl, addr, val);
-
-
- return RTW_STATUS_CODE(ret);
-}
-int _rtw_write32(struct adapter *adapter, u32 addr, u32 val)
-{
- struct io_priv *pio_priv = &adapter->iopriv;
- struct intf_hdl *pintfhdl = &pio_priv->intf;
- int (*_write32)(struct intf_hdl *pintfhdl, u32 addr, u32 val);
- int ret;
-
- _write32 = pintfhdl->io_ops._write32;
-
- ret = _write32(pintfhdl, addr, val);
-
-
- return RTW_STATUS_CODE(ret);
-}
-
-int _rtw_writeN(struct adapter *adapter, u32 addr, u32 length, u8 *pdata)
-{
- struct io_priv *pio_priv = &adapter->iopriv;
- struct intf_hdl *pintfhdl = (struct intf_hdl *)(&pio_priv->intf);
- int (*_writeN)(struct intf_hdl *pintfhdl, u32 addr, u32 length, u8 *pdata);
- int ret;
-
- _writeN = pintfhdl->io_ops._writeN;
-
- ret = _writeN(pintfhdl, addr, length, pdata);
-
-
- return RTW_STATUS_CODE(ret);
-}
-int _rtw_write8_async(struct adapter *adapter, u32 addr, u8 val)
-{
- struct io_priv *pio_priv = &adapter->iopriv;
- struct intf_hdl *pintfhdl = &pio_priv->intf;
- int (*_write8_async)(struct intf_hdl *pintfhdl, u32 addr, u8 val);
- int ret;
-
- _write8_async = pintfhdl->io_ops._write8_async;
-
- ret = _write8_async(pintfhdl, addr, val);
-
-
- return RTW_STATUS_CODE(ret);
-}
-
-int _rtw_write16_async(struct adapter *adapter, u32 addr, u16 val)
-{
- struct io_priv *pio_priv = &adapter->iopriv;
- struct intf_hdl *pintfhdl = &pio_priv->intf;
- int (*_write16_async)(struct intf_hdl *pintfhdl, u32 addr, u16 val);
- int ret;
-
- _write16_async = pintfhdl->io_ops._write16_async;
- ret = _write16_async(pintfhdl, addr, val);
-
- return RTW_STATUS_CODE(ret);
-}
-
-int _rtw_write32_async(struct adapter *adapter, u32 addr, u32 val)
-{
- struct io_priv *pio_priv = &adapter->iopriv;
- struct intf_hdl *pintfhdl = &pio_priv->intf;
- int (*_write32_async)(struct intf_hdl *pintfhdl, u32 addr, u32 val);
- int ret;
-
- _write32_async = pintfhdl->io_ops._write32_async;
- ret = _write32_async(pintfhdl, addr, val);
-
- return RTW_STATUS_CODE(ret);
-}
-
-void _rtw_read_mem(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem)
-{
- void (*_read_mem)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *pmem);
- struct io_priv *pio_priv = &adapter->iopriv;
- struct intf_hdl *pintfhdl = &pio_priv->intf;
-
-
- if (adapter->bDriverStopped || adapter->bSurpriseRemoved)
- return;
- _read_mem = pintfhdl->io_ops._read_mem;
- _read_mem(pintfhdl, addr, cnt, pmem);
-
-}
-
-void _rtw_write_mem(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem)
-{
- void (*_write_mem)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *pmem);
- struct io_priv *pio_priv = &adapter->iopriv;
- struct intf_hdl *pintfhdl = &pio_priv->intf;
-
-
-
- _write_mem = pintfhdl->io_ops._write_mem;
-
- _write_mem(pintfhdl, addr, cnt, pmem);
-
-
-}
-
-void _rtw_read_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem)
-{
- u32 (*_read_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *pmem);
- struct io_priv *pio_priv = &adapter->iopriv;
- struct intf_hdl *pintfhdl = &pio_priv->intf;
-
-
-
- if (adapter->bDriverStopped || adapter->bSurpriseRemoved)
- return;
-
- _read_port = pintfhdl->io_ops._read_port;
-
- _read_port(pintfhdl, addr, cnt, pmem);
-
-
-}
-
-void _rtw_read_port_cancel(struct adapter *adapter)
-{
- void (*_read_port_cancel)(struct intf_hdl *pintfhdl);
- struct io_priv *pio_priv = &adapter->iopriv;
- struct intf_hdl *pintfhdl = &pio_priv->intf;
-
- _read_port_cancel = pintfhdl->io_ops._read_port_cancel;
-
- if (_read_port_cancel)
- _read_port_cancel(pintfhdl);
-}
-
-u32 _rtw_write_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem)
-{
- u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *pmem);
- struct io_priv *pio_priv = &adapter->iopriv;
- struct intf_hdl *pintfhdl = &pio_priv->intf;
- u32 ret = _SUCCESS;
-
-
-
- _write_port = pintfhdl->io_ops._write_port;
-
- ret = _write_port(pintfhdl, addr, cnt, pmem);
-
-
-
- return ret;
-}
-
-u32 _rtw_write_port_and_wait(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem, int timeout_ms)
-{
- int ret = _SUCCESS;
- struct xmit_buf *pxmitbuf = (struct xmit_buf *)pmem;
- struct submit_ctx sctx;
-
- rtw_sctx_init(&sctx, timeout_ms);
- pxmitbuf->sctx = &sctx;
-
- ret = _rtw_write_port(adapter, addr, cnt, pmem);
-
- if (ret == _SUCCESS)
- ret = rtw_sctx_wait(&sctx);
-
- return ret;
-}
-
-void _rtw_write_port_cancel(struct adapter *adapter)
-{
- void (*_write_port_cancel)(struct intf_hdl *pintfhdl);
- struct io_priv *pio_priv = &adapter->iopriv;
- struct intf_hdl *pintfhdl = &pio_priv->intf;
-
- _write_port_cancel = pintfhdl->io_ops._write_port_cancel;
-
- if (_write_port_cancel)
- _write_port_cancel(pintfhdl);
-}
-
-int rtw_init_io_priv(struct adapter *padapter, void (*set_intf_ops)(struct _io_ops *pops))
-{
- struct io_priv *piopriv = &padapter->iopriv;
- struct intf_hdl *pintf = &piopriv->intf;
-
- if (!set_intf_ops)
- return _FAIL;
-
- piopriv->padapter = padapter;
- pintf->padapter = padapter;
- pintf->pintf_dev = adapter_to_dvobj(padapter);
-
- set_intf_ops(&pintf->io_ops);
-
- return _SUCCESS;
-}
diff --git a/drivers/staging/r8188eu/core/rtw_ioctl_set.c b/drivers/staging/r8188eu/core/rtw_ioctl_set.c
index c187de78b4ac..411b06e135be 100644
--- a/drivers/staging/r8188eu/core/rtw_ioctl_set.c
+++ b/drivers/staging/r8188eu/core/rtw_ioctl_set.c
@@ -13,29 +13,6 @@
extern void indicate_wx_scan_complete_event(struct adapter *padapter);
-u8 rtw_validate_ssid(struct ndis_802_11_ssid *ssid)
-{
- u8 i;
- u8 ret = true;
-
- if (ssid->SsidLength > 32) {
- ret = false;
- goto exit;
- }
-
- for (i = 0; i < ssid->SsidLength; i++) {
- /* wifi, printable ascii code must be supported */
- if (!((ssid->Ssid[i] >= 0x20) && (ssid->Ssid[i] <= 0x7e))) {
- ret = false;
- break;
- }
- }
-
-exit:
-
- return ret;
-}
-
u8 rtw_do_join(struct adapter *padapter)
{
struct list_head *plist, *phead;
@@ -74,7 +51,7 @@ u8 rtw_do_join(struct adapter *padapter)
ret = _FAIL;
}
- goto exit;
+ return ret;
} else {
int select_ret;
@@ -101,10 +78,9 @@ u8 rtw_do_join(struct adapter *padapter)
rtw_generate_random_ibss(pibss);
- if (rtw_createbss_cmd(padapter) != _SUCCESS) {
- ret = false;
- goto exit;
- }
+ if (rtw_createbss_cmd(padapter) != _SUCCESS)
+ return false;
+
pmlmepriv->to_join = false;
} else {
/* can't associate ; reset under-linking */
@@ -125,8 +101,6 @@ u8 rtw_do_join(struct adapter *padapter)
}
}
-exit:
-
return ret;
}
@@ -312,9 +286,7 @@ u8 rtw_set_802_11_infrastructure_mode(struct adapter *padapter,
/* change to other mode from Ndis802_11APMode */
cur_network->join_res = -1;
-#ifdef CONFIG_88EU_AP_MODE
stop_ap_mode(padapter);
-#endif
}
if ((check_fwstate(pmlmepriv, _FW_LINKED)) ||
@@ -343,9 +315,6 @@ u8 rtw_set_802_11_infrastructure_mode(struct adapter *padapter,
break;
case Ndis802_11APMode:
set_fwstate(pmlmepriv, WIFI_AP_STATE);
-#ifdef CONFIG_88EU_AP_MODE
- start_ap_mode(padapter);
-#endif
break;
case Ndis802_11AutoUnknown:
case Ndis802_11InfrastructureMax:
@@ -472,298 +441,6 @@ exit:
return ret;
}
-u8 rtw_set_802_11_remove_wep(struct adapter *padapter, u32 keyindex)
-{
- u8 ret = _SUCCESS;
-
- if (keyindex >= 0x80000000 || !padapter) {
- ret = false;
- goto exit;
- } else {
- int res;
- struct security_priv *psecuritypriv = &padapter->securitypriv;
- if (keyindex < 4) {
- memset(&psecuritypriv->dot11DefKey[keyindex], 0, 16);
- res = rtw_set_key(padapter, psecuritypriv, keyindex, 0);
- psecuritypriv->dot11DefKeylen[keyindex] = 0;
- if (res == _FAIL)
- ret = _FAIL;
- } else {
- ret = _FAIL;
- }
- }
-exit:
-
- return ret;
-}
-
-u8 rtw_set_802_11_add_key(struct adapter *padapter, struct ndis_802_11_key *key)
-{
- uint encryptionalgo;
- u8 *pbssid;
- struct sta_info *stainfo;
- u8 bgroup = false;
- u8 bgrouptkey = false;/* can be removed later */
- u8 ret = _SUCCESS;
-
- if (((key->KeyIndex & 0x80000000) == 0) && ((key->KeyIndex & 0x40000000) > 0)) {
- /* It is invalid to clear bit 31 and set bit 30. If the miniport driver encounters this combination, */
- /* it must fail the request and return NDIS_STATUS_INVALID_DATA. */
- ret = _FAIL;
- goto exit;
- }
-
- if (key->KeyIndex & 0x40000000) {
- /* Pairwise key */
-
- pbssid = get_bssid(&padapter->mlmepriv);
- stainfo = rtw_get_stainfo(&padapter->stapriv, pbssid);
-
- if (stainfo && padapter->securitypriv.dot11AuthAlgrthm == dot11AuthAlgrthm_8021X)
- encryptionalgo = stainfo->dot118021XPrivacy;
- else
- encryptionalgo = padapter->securitypriv.dot11PrivacyAlgrthm;
-
- if (key->KeyIndex & 0x000000FF) {
- /* The key index is specified in the lower 8 bits by values of zero to 255. */
- /* The key index should be set to zero for a Pairwise key, and the driver should fail with */
- /* NDIS_STATUS_INVALID_DATA if the lower 8 bits is not zero */
- ret = _FAIL;
- goto exit;
- }
-
- /* check BSSID */
- if (is_broadcast_ether_addr(key->BSSID)) {
- ret = false;
- goto exit;
- }
-
- /* Check key length for TKIP. */
- if ((encryptionalgo == _TKIP_) && (key->KeyLength != 32)) {
- ret = _FAIL;
- goto exit;
- }
-
- /* Check key length for AES. */
- if ((encryptionalgo == _AES_) && (key->KeyLength != 16)) {
- /* For our supplicant, EAPPkt9x.vxd, cannot differentiate TKIP and AES case. */
- if (key->KeyLength == 32) {
- key->KeyLength = 16;
- } else {
- ret = _FAIL;
- goto exit;
- }
- }
-
- /* Check key length for WEP. For NDTEST, 2005.01.27, by rcnjko. */
- if ((encryptionalgo == _WEP40_ || encryptionalgo == _WEP104_) &&
- (key->KeyLength != 5 && key->KeyLength != 13)) {
- ret = _FAIL;
- goto exit;
- }
-
- bgroup = false;
- } else {
- /* Group key - KeyIndex(BIT(30) == 0) */
- /* when add wep key through add key and didn't assigned encryption type before */
- if ((padapter->securitypriv.ndisauthtype <= 3) &&
- (padapter->securitypriv.dot118021XGrpPrivacy == 0)) {
- switch (key->KeyLength) {
- case 5:
- padapter->securitypriv.dot11PrivacyAlgrthm = _WEP40_;
- break;
- case 13:
- padapter->securitypriv.dot11PrivacyAlgrthm = _WEP104_;
- break;
- default:
- padapter->securitypriv.dot11PrivacyAlgrthm = _NO_PRIVACY_;
- break;
- }
-
- encryptionalgo = padapter->securitypriv.dot11PrivacyAlgrthm;
- } else {
- encryptionalgo = padapter->securitypriv.dot118021XGrpPrivacy;
- }
-
- if (check_fwstate(&padapter->mlmepriv, WIFI_ADHOC_STATE) && !is_broadcast_ether_addr(key->BSSID)) {
- ret = _FAIL;
- goto exit;
- }
-
- /* Check key length for TKIP */
- if ((encryptionalgo == _TKIP_) && (key->KeyLength != 32)) {
- ret = _FAIL;
- goto exit;
- } else if (encryptionalgo == _AES_ && (key->KeyLength != 16 && key->KeyLength != 32)) {
- /* Check key length for AES */
- /* For NDTEST, we allow keylen = 32 in this case. 2005.01.27, by rcnjko. */
- ret = _FAIL;
- goto exit;
- }
-
- /* Change the key length for EAPPkt9x.vxd. Added by Annie, 2005-11-03. */
- if ((encryptionalgo == _AES_) && (key->KeyLength == 32))
- key->KeyLength = 16;
-
- if (key->KeyIndex & 0x8000000) {/* error ??? 0x8000_0000 */
- bgrouptkey = true;
- }
-
- if ((check_fwstate(&padapter->mlmepriv, WIFI_ADHOC_STATE)) &&
- (check_fwstate(&padapter->mlmepriv, _FW_LINKED)))
- bgrouptkey = true;
- bgroup = true;
- }
-
- /* If WEP encryption algorithm, just call rtw_set_802_11_add_wep(). */
- if ((padapter->securitypriv.dot11AuthAlgrthm != dot11AuthAlgrthm_8021X) &&
- (encryptionalgo == _WEP40_ || encryptionalgo == _WEP104_)) {
- u32 keyindex;
- u32 len = FIELD_OFFSET(struct ndis_802_11_key, KeyMaterial) + key->KeyLength;
- struct ndis_802_11_wep *wep = &padapter->securitypriv.ndiswep;
-
- wep->Length = len;
- keyindex = key->KeyIndex & 0x7fffffff;
- wep->KeyIndex = keyindex;
- wep->KeyLength = key->KeyLength;
-
- memcpy(wep->KeyMaterial, key->KeyMaterial, key->KeyLength);
- memcpy(&padapter->securitypriv.dot11DefKey[keyindex].skey[0], key->KeyMaterial, key->KeyLength);
-
- padapter->securitypriv.dot11DefKeylen[keyindex] = key->KeyLength;
- padapter->securitypriv.dot11PrivacyKeyIndex = keyindex;
-
- ret = rtw_set_802_11_add_wep(padapter, wep);
- goto exit;
- }
- if (key->KeyIndex & 0x20000000) {
- /* SetRSC */
- if (bgroup) {
- unsigned long long keysrc = key->KeyRSC & 0x00FFFFFFFFFFFFULL;
- memcpy(&padapter->securitypriv.dot11Grprxpn, &keysrc, 8);
- } else {
- unsigned long long keysrc = key->KeyRSC & 0x00FFFFFFFFFFFFULL;
- memcpy(&padapter->securitypriv.dot11Grptxpn, &keysrc, 8);
- }
- }
-
- /* Indicate this key idx is used for TX */
- /* Save the key in KeyMaterial */
- if (bgroup) { /* Group transmit key */
- int res;
-
- if (bgrouptkey)
- padapter->securitypriv.dot118021XGrpKeyid = (u8)key->KeyIndex;
- if ((key->KeyIndex & 0x3) == 0) {
- ret = _FAIL;
- goto exit;
- }
- memset(&padapter->securitypriv.dot118021XGrpKey[(u8)((key->KeyIndex) & 0x03)], 0, 16);
- memset(&padapter->securitypriv.dot118021XGrptxmickey[(u8)((key->KeyIndex) & 0x03)], 0, 16);
- memset(&padapter->securitypriv.dot118021XGrprxmickey[(u8)((key->KeyIndex) & 0x03)], 0, 16);
-
- if ((key->KeyIndex & 0x10000000)) {
- memcpy(&padapter->securitypriv.dot118021XGrptxmickey[(u8)((key->KeyIndex) & 0x03)], key->KeyMaterial + 16, 8);
- memcpy(&padapter->securitypriv.dot118021XGrprxmickey[(u8)((key->KeyIndex) & 0x03)], key->KeyMaterial + 24, 8);
- } else {
- memcpy(&padapter->securitypriv.dot118021XGrptxmickey[(u8)((key->KeyIndex) & 0x03)], key->KeyMaterial + 24, 8);
- memcpy(&padapter->securitypriv.dot118021XGrprxmickey[(u8)((key->KeyIndex) & 0x03)], key->KeyMaterial + 16, 8);
- }
-
- /* set group key by index */
- memcpy(&padapter->securitypriv.dot118021XGrpKey[(u8)((key->KeyIndex) & 0x03)], key->KeyMaterial, key->KeyLength);
-
- key->KeyIndex = key->KeyIndex & 0x03;
-
- padapter->securitypriv.binstallGrpkey = true;
-
- padapter->securitypriv.bcheck_grpkey = false;
-
- res = rtw_set_key(padapter, &padapter->securitypriv, key->KeyIndex, 1);
-
- if (res == _FAIL)
- ret = _FAIL;
-
- goto exit;
-
- } else { /* Pairwise Key */
- u8 res;
-
- pbssid = get_bssid(&padapter->mlmepriv);
- stainfo = rtw_get_stainfo(&padapter->stapriv, pbssid);
-
- if (stainfo) {
- memset(&stainfo->dot118021x_UncstKey, 0, 16);/* clear keybuffer */
-
- memcpy(&stainfo->dot118021x_UncstKey, key->KeyMaterial, 16);
-
- if (encryptionalgo == _TKIP_) {
- padapter->securitypriv.busetkipkey = false;
-
- /* _set_timer(&padapter->securitypriv.tkip_timer, 50); */
-
- /* if TKIP, save the Receive/Transmit MIC key in KeyMaterial[128-255] */
- if ((key->KeyIndex & 0x10000000)) {
- memcpy(&stainfo->dot11tkiptxmickey, key->KeyMaterial + 16, 8);
- memcpy(&stainfo->dot11tkiprxmickey, key->KeyMaterial + 24, 8);
-
- } else {
- memcpy(&stainfo->dot11tkiptxmickey, key->KeyMaterial + 24, 8);
- memcpy(&stainfo->dot11tkiprxmickey, key->KeyMaterial + 16, 8);
- }
- }
-
- /* Set key to CAM through H2C command */
- if (bgrouptkey) /* never go to here */
- res = rtw_setstakey_cmd(padapter, (unsigned char *)stainfo, false);
- else
- res = rtw_setstakey_cmd(padapter, (unsigned char *)stainfo, true);
- if (!res)
- ret = _FAIL;
- }
- }
-exit:
-
- return ret;
-}
-
-u8 rtw_set_802_11_remove_key(struct adapter *padapter, struct ndis_802_11_remove_key *key)
-{
- u8 *pbssid;
- struct sta_info *stainfo;
- u8 bgroup = (key->KeyIndex & 0x4000000) > 0 ? false : true;
- u8 keyIndex = (u8)key->KeyIndex & 0x03;
- u8 ret = _SUCCESS;
-
- if ((key->KeyIndex & 0xbffffffc) > 0) {
- ret = _FAIL;
- goto exit;
- }
-
- if (bgroup) {
- /* clear group key by index */
-
- memset(&padapter->securitypriv.dot118021XGrpKey[keyIndex], 0, 16);
-
- /* \todo Send a H2C Command to Firmware for removing this Key in CAM Entry. */
- } else {
- pbssid = get_bssid(&padapter->mlmepriv);
- stainfo = rtw_get_stainfo(&padapter->stapriv, pbssid);
- if (stainfo) {
- /* clear key by BSSID */
- memset(&stainfo->dot118021x_UncstKey, 0, 16);
-
- /* \todo Send a H2C Command to Firmware for disable this Key in CAM Entry. */
- } else {
- ret = _FAIL;
- goto exit;
- }
- }
-exit:
-
- return ret;
-}
-
/*
* rtw_get_cur_max_rate -
* @adapter: pointer to struct adapter structure
@@ -786,11 +463,6 @@ u16 rtw_get_cur_max_rate(struct adapter *adapter)
u16 mcs_rate = 0;
u32 ht_ielen = 0;
- if (adapter->registrypriv.mp_mode == 1) {
- if (check_fwstate(pmlmepriv, WIFI_MP_STATE))
- return 0;
- }
-
if ((!check_fwstate(pmlmepriv, _FW_LINKED)) &&
(!check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)))
return 0;
@@ -808,7 +480,7 @@ u16 rtw_get_cur_max_rate(struct adapter *adapter)
short_GI_20 = (le16_to_cpu(pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info) & IEEE80211_HT_CAP_SGI_20) ? 1 : 0;
short_GI_40 = (le16_to_cpu(pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info) & IEEE80211_HT_CAP_SGI_40) ? 1 : 0;
- rtw_hal_get_hwreg(adapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type));
+ GetHwReg8188EU(adapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type));
max_rate = rtw_mcs_rate(
rf_type,
bw_40MHz & (pregistrypriv->cbw40_enable),
@@ -830,62 +502,3 @@ u16 rtw_get_cur_max_rate(struct adapter *adapter)
return max_rate;
}
-
-/*
-* rtw_set_scan_mode -
-* @adapter: pointer to struct adapter structure
-* @scan_mode:
-*
-* Return _SUCCESS or _FAIL
-*/
-int rtw_set_scan_mode(struct adapter *adapter, enum rt_scan_type scan_mode)
-{
- if (scan_mode != SCAN_ACTIVE && scan_mode != SCAN_PASSIVE)
- return _FAIL;
-
- adapter->mlmepriv.scan_mode = scan_mode;
-
- return _SUCCESS;
-}
-
-/*
-* rtw_set_channel_plan -
-* @adapter: pointer to struct adapter structure
-* @channel_plan:
-*
-* Return _SUCCESS or _FAIL
-*/
-int rtw_set_channel_plan(struct adapter *adapter, u8 channel_plan)
-{
- /* handle by cmd_thread to sync with scan operation */
- return rtw_set_chplan_cmd(adapter, channel_plan, 1);
-}
-
-/*
-* rtw_set_country -
-* @adapter: pointer to struct adapter structure
-* @country_code: string of country code
-*
-* Return _SUCCESS or _FAIL
-*/
-int rtw_set_country(struct adapter *adapter, const char *country_code)
-{
- int channel_plan = RT_CHANNEL_DOMAIN_GLOBAL_DOAMIN_2G;
-
- DBG_88E("%s country_code:%s\n", __func__, country_code);
-
- /* TODO: should have a table to match country code and RT_CHANNEL_DOMAIN */
- /* TODO: should consider 2-character and 3-character country code */
- if (0 == strcmp(country_code, "US"))
- channel_plan = RT_CHANNEL_DOMAIN_FCC;
- else if (0 == strcmp(country_code, "EU"))
- channel_plan = RT_CHANNEL_DOMAIN_ETSI;
- else if (0 == strcmp(country_code, "JP"))
- channel_plan = RT_CHANNEL_DOMAIN_MKK;
- else if (0 == strcmp(country_code, "CN"))
- channel_plan = RT_CHANNEL_DOMAIN_CHINA;
- else
- DBG_88E("%s unknown country_code:%s\n", __func__, country_code);
-
- return rtw_set_channel_plan(adapter, channel_plan);
-}
diff --git a/drivers/staging/r8188eu/core/rtw_iol.c b/drivers/staging/r8188eu/core/rtw_iol.c
index 5c1b19679cad..7e78b47c1284 100644
--- a/drivers/staging/r8188eu/core/rtw_iol.c
+++ b/drivers/staging/r8188eu/core/rtw_iol.c
@@ -13,15 +13,14 @@ struct xmit_frame *rtw_IOL_accquire_xmit_frame(struct adapter *adapter)
xmit_frame = rtw_alloc_xmitframe(pxmitpriv);
if (!xmit_frame) {
DBG_88E("%s rtw_alloc_xmitframe return null\n", __func__);
- goto exit;
+ return NULL;
}
xmitbuf = rtw_alloc_xmitbuf(pxmitpriv);
if (!xmitbuf) {
DBG_88E("%s rtw_alloc_xmitbuf return null\n", __func__);
rtw_free_xmitframe(pxmitpriv, xmit_frame);
- xmit_frame = NULL;
- goto exit;
+ return NULL;
}
xmit_frame->frame_tag = MGNT_FRAMETAG;
@@ -35,7 +34,7 @@ struct xmit_frame *rtw_IOL_accquire_xmit_frame(struct adapter *adapter)
pattrib->subtype = WIFI_BEACON;
pattrib->pktlen = 0;
pattrib->last_txcmdsz = 0;
-exit:
+
return xmit_frame;
}
@@ -72,16 +71,6 @@ bool rtw_IOL_applied(struct adapter *adapter)
return false;
}
-int rtw_IOL_exec_cmds_sync(struct adapter *adapter, struct xmit_frame *xmit_frame, u32 max_wating_ms, u32 bndy_cnt)
-{
- return rtw_hal_iol_cmd(adapter, xmit_frame, max_wating_ms, bndy_cnt);
-}
-
-int rtw_IOL_append_LLT_cmd(struct xmit_frame *xmit_frame, u8 page_boundary)
-{
- return _SUCCESS;
-}
-
int _rtw_IOL_append_WB_cmd(struct xmit_frame *xmit_frame, u16 addr, u8 value, u8 mask)
{
struct ioreg_cfg cmd = {8, IOREG_CMD_WB_REG, 0x0, 0x0, 0x0};
@@ -173,20 +162,3 @@ u8 rtw_IOL_cmd_boundary_handle(struct xmit_frame *pxmit_frame)
}
return is_cmd_bndy;
}
-
-void rtw_IOL_cmd_buf_dump(struct adapter *Adapter, int buf_len, u8 *pbuf)
-{
- int i;
- int j = 1;
-
- pr_info("###### %s ######\n", __func__);
- for (i = 0; i < buf_len; i++) {
- printk("%02x-", *(pbuf + i));
-
- if (j % 32 == 0)
- printk("\n");
- j++;
- }
- printk("\n");
- pr_info("=============ioreg_cmd len=%d===============\n", buf_len);
-}
diff --git a/drivers/staging/r8188eu/core/rtw_led.c b/drivers/staging/r8188eu/core/rtw_led.c
index b33e34cce12e..0e3453639a8b 100644
--- a/drivers/staging/r8188eu/core/rtw_led.c
+++ b/drivers/staging/r8188eu/core/rtw_led.c
@@ -4,11 +4,6 @@
#include "../include/drv_types.h"
#include "../include/rtw_led.h"
-/* */
-/* Description: */
-/* Callback function of LED BlinkTimer, */
-/* it just schedules to corresponding BlinkWorkItem/led_blink_hdl */
-/* */
void BlinkTimerCallback(struct timer_list *t)
{
struct LED_871x *pLed = from_timer(pLed, t, BlinkTimer);
@@ -20,21 +15,12 @@ void BlinkTimerCallback(struct timer_list *t)
_set_workitem(&pLed->BlinkWorkItem);
}
-/* */
-/* Description: */
-/* Callback function of LED BlinkWorkItem. */
-/* We dispatch acture LED blink action according to LedStrategy. */
-/* */
void BlinkWorkItemCallback(struct work_struct *work)
{
struct LED_871x *pLed = container_of(work, struct LED_871x, BlinkWorkItem);
BlinkHandler(pLed);
}
-/* */
-/* Description: */
-/* Reset status of LED_871x object. */
-/* */
void ResetLedStatus(struct LED_871x *pLed)
{
pLed->CurrLedState = RTW_LED_OFF; /* Current LED state. */
@@ -52,8 +38,6 @@ void ResetLedStatus(struct LED_871x *pLed)
pLed->bLedScanBlinkInProgress = false;
}
-/*Description: */
-/* Initialize an LED_871x object. */
void InitLed871x(struct adapter *padapter, struct LED_871x *pLed, enum LED_PIN_871x LedPin)
{
pLed->padapter = padapter;
@@ -65,10 +49,6 @@ void InitLed871x(struct adapter *padapter, struct LED_871x *pLed, enum LED_PIN_8
_init_workitem(&pLed->BlinkWorkItem, BlinkWorkItemCallback, pLed);
}
-/* */
-/* Description: */
-/* DeInitialize an LED_871x object. */
-/* */
void DeInitLed871x(struct LED_871x *pLed)
{
_cancel_workitem_sync(&pLed->BlinkWorkItem);
@@ -76,84 +56,6 @@ void DeInitLed871x(struct LED_871x *pLed)
ResetLedStatus(pLed);
}
-/* */
-/* Description: */
-/* Implementation of LED blinking behavior. */
-/* It toggle off LED and schedule corresponding timer if necessary. */
-/* */
-
-static void SwLedBlink(struct LED_871x *pLed)
-{
- struct adapter *padapter = pLed->padapter;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- u8 bStopBlinking = false;
-
- /* Change LED according to BlinkingLedState specified. */
- if (pLed->BlinkingLedState == RTW_LED_ON)
- SwLedOn(padapter, pLed);
- else
- SwLedOff(padapter, pLed);
-
- /* Determine if we shall change LED state again. */
- pLed->BlinkTimes--;
- switch (pLed->CurrLedState) {
- case LED_BLINK_NORMAL:
- if (pLed->BlinkTimes == 0)
- bStopBlinking = true;
- break;
- case LED_BLINK_StartToBlink:
- if (check_fwstate(pmlmepriv, _FW_LINKED) && check_fwstate(pmlmepriv, WIFI_STATION_STATE))
- bStopBlinking = true;
- if (check_fwstate(pmlmepriv, _FW_LINKED) &&
- (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) ||
- check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)))
- bStopBlinking = true;
- else if (pLed->BlinkTimes == 0)
- bStopBlinking = true;
- break;
- case LED_BLINK_WPS:
- if (pLed->BlinkTimes == 0)
- bStopBlinking = true;
- break;
- default:
- bStopBlinking = true;
- break;
- }
-
- if (bStopBlinking) {
- if (check_fwstate(pmlmepriv, _FW_LINKED) && !pLed->bLedOn) {
- SwLedOn(padapter, pLed);
- } else if (check_fwstate(pmlmepriv, _FW_LINKED) && pLed->bLedOn) {
- SwLedOff(padapter, pLed);
- }
- pLed->BlinkTimes = 0;
- pLed->bLedBlinkInProgress = false;
- } else {
- /* Assign LED state to toggle. */
- if (pLed->BlinkingLedState == RTW_LED_ON)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
-
- /* Schedule a timer to toggle LED state. */
- switch (pLed->CurrLedState) {
- case LED_BLINK_NORMAL:
- _set_timer(&pLed->BlinkTimer, LED_BLINK_NORMAL_INTERVAL);
- break;
- case LED_BLINK_SLOWLY:
- case LED_BLINK_StartToBlink:
- _set_timer(&pLed->BlinkTimer, LED_BLINK_SLOWLY_INTERVAL);
- break;
- case LED_BLINK_WPS:
- _set_timer(&pLed->BlinkTimer, LED_BLINK_LONG_INTERVAL);
- break;
- default:
- _set_timer(&pLed->BlinkTimer, LED_BLINK_SLOWLY_INTERVAL);
- break;
- }
- }
-}
-
static void SwLedBlink1(struct LED_871x *pLed)
{
struct adapter *padapter = pLed->padapter;
@@ -240,7 +142,6 @@ static void SwLedBlink1(struct LED_871x *pLed)
pLed->BlinkingLedState = RTW_LED_ON;
_set_timer(&pLed->BlinkTimer, LED_BLINK_NO_LINK_INTERVAL_ALPHA);
}
- pLed->BlinkTimes = 0;
pLed->bLedBlinkInProgress = false;
} else {
if (pLed->bLedOn)
@@ -283,421 +184,6 @@ static void SwLedBlink1(struct LED_871x *pLed)
}
}
-static void SwLedBlink2(struct LED_871x *pLed)
-{
- struct adapter *padapter = pLed->padapter;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- u8 bStopBlinking = false;
-
- /* Change LED according to BlinkingLedState specified. */
- if (pLed->BlinkingLedState == RTW_LED_ON)
- SwLedOn(padapter, pLed);
- else
- SwLedOff(padapter, pLed);
-
- switch (pLed->CurrLedState) {
- case LED_BLINK_SCAN:
- pLed->BlinkTimes--;
- if (pLed->BlinkTimes == 0)
- bStopBlinking = true;
- if (bStopBlinking) {
- if (padapter->pwrctrlpriv.rf_pwrstate != rf_on) {
- SwLedOff(padapter, pLed);
- } else if (check_fwstate(pmlmepriv, _FW_LINKED)) {
- pLed->CurrLedState = RTW_LED_ON;
- pLed->BlinkingLedState = RTW_LED_ON;
- SwLedOn(padapter, pLed);
- } else if (!check_fwstate(pmlmepriv, _FW_LINKED)) {
- pLed->CurrLedState = RTW_LED_OFF;
- pLed->BlinkingLedState = RTW_LED_OFF;
- SwLedOff(padapter, pLed);
- }
- pLed->bLedScanBlinkInProgress = false;
- } else {
- if (padapter->pwrctrlpriv.rf_pwrstate != rf_on) {
- SwLedOff(padapter, pLed);
- } else {
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- _set_timer(&pLed->BlinkTimer, LED_BLINK_SCAN_INTERVAL_ALPHA);
- }
- }
- break;
- case LED_BLINK_TXRX:
- pLed->BlinkTimes--;
- if (pLed->BlinkTimes == 0)
- bStopBlinking = true;
- if (bStopBlinking) {
- if (padapter->pwrctrlpriv.rf_pwrstate != rf_on) {
- SwLedOff(padapter, pLed);
- } else if (check_fwstate(pmlmepriv, _FW_LINKED)) {
- pLed->CurrLedState = RTW_LED_ON;
- pLed->BlinkingLedState = RTW_LED_ON;
- SwLedOn(padapter, pLed);
- } else if (!check_fwstate(pmlmepriv, _FW_LINKED)) {
- pLed->CurrLedState = RTW_LED_OFF;
- pLed->BlinkingLedState = RTW_LED_OFF;
- SwLedOff(padapter, pLed);
- }
- pLed->bLedBlinkInProgress = false;
- } else {
- if (padapter->pwrctrlpriv.rf_pwrstate != rf_on) {
- SwLedOff(padapter, pLed);
- } else {
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- _set_timer(&pLed->BlinkTimer, LED_BLINK_FASTER_INTERVAL_ALPHA);
- }
- }
- break;
- default:
- break;
- }
-}
-
-static void SwLedBlink3(struct LED_871x *pLed)
-{
- struct adapter *padapter = pLed->padapter;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- u8 bStopBlinking = false;
-
- /* Change LED according to BlinkingLedState specified. */
- if (pLed->BlinkingLedState == RTW_LED_ON) {
- SwLedOn(padapter, pLed);
- } else {
- if (pLed->CurrLedState != LED_BLINK_WPS_STOP)
- SwLedOff(padapter, pLed);
- }
-
- switch (pLed->CurrLedState) {
- case LED_BLINK_SCAN:
- pLed->BlinkTimes--;
- if (pLed->BlinkTimes == 0)
- bStopBlinking = true;
- if (bStopBlinking) {
- if (padapter->pwrctrlpriv.rf_pwrstate != rf_on) {
- SwLedOff(padapter, pLed);
- } else if (check_fwstate(pmlmepriv, _FW_LINKED)) {
- pLed->CurrLedState = RTW_LED_ON;
- pLed->BlinkingLedState = RTW_LED_ON;
- if (!pLed->bLedOn)
- SwLedOn(padapter, pLed);
- } else if (!check_fwstate(pmlmepriv, _FW_LINKED)) {
- pLed->CurrLedState = RTW_LED_OFF;
- pLed->BlinkingLedState = RTW_LED_OFF;
- if (pLed->bLedOn)
- SwLedOff(padapter, pLed);
- }
- pLed->bLedScanBlinkInProgress = false;
- } else {
- if (padapter->pwrctrlpriv.rf_pwrstate != rf_on) {
- SwLedOff(padapter, pLed);
- } else {
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- _set_timer(&pLed->BlinkTimer, LED_BLINK_SCAN_INTERVAL_ALPHA);
- }
- }
- break;
- case LED_BLINK_TXRX:
- pLed->BlinkTimes--;
- if (pLed->BlinkTimes == 0)
- bStopBlinking = true;
- if (bStopBlinking) {
- if (padapter->pwrctrlpriv.rf_pwrstate != rf_on) {
- SwLedOff(padapter, pLed);
- } else if (check_fwstate(pmlmepriv, _FW_LINKED)) {
- pLed->CurrLedState = RTW_LED_ON;
- pLed->BlinkingLedState = RTW_LED_ON;
- if (!pLed->bLedOn)
- SwLedOn(padapter, pLed);
- } else if (!check_fwstate(pmlmepriv, _FW_LINKED)) {
- pLed->CurrLedState = RTW_LED_OFF;
- pLed->BlinkingLedState = RTW_LED_OFF;
-
- if (pLed->bLedOn)
- SwLedOff(padapter, pLed);
- }
- pLed->bLedBlinkInProgress = false;
- } else {
- if (padapter->pwrctrlpriv.rf_pwrstate != rf_on) {
- SwLedOff(padapter, pLed);
- } else {
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- _set_timer(&pLed->BlinkTimer, LED_BLINK_FASTER_INTERVAL_ALPHA);
- }
- }
- break;
- case LED_BLINK_WPS:
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- _set_timer(&pLed->BlinkTimer, LED_BLINK_SCAN_INTERVAL_ALPHA);
- break;
- case LED_BLINK_WPS_STOP: /* WPS success */
- if (pLed->BlinkingLedState == RTW_LED_ON) {
- pLed->BlinkingLedState = RTW_LED_OFF;
- _set_timer(&pLed->BlinkTimer, LED_BLINK_WPS_SUCESS_INTERVAL_ALPHA);
- bStopBlinking = false;
- } else {
- bStopBlinking = true;
- }
- if (bStopBlinking) {
- if (padapter->pwrctrlpriv.rf_pwrstate != rf_on) {
- SwLedOff(padapter, pLed);
- } else {
- pLed->CurrLedState = RTW_LED_ON;
- pLed->BlinkingLedState = RTW_LED_ON;
- SwLedOn(padapter, pLed);
- }
- pLed->bLedWPSBlinkInProgress = false;
- }
- break;
- default:
- break;
- }
-}
-
-static void SwLedBlink4(struct LED_871x *pLed)
-{
- struct adapter *padapter = pLed->padapter;
- struct led_priv *ledpriv = &padapter->ledpriv;
- struct LED_871x *pLed1 = &ledpriv->SwLed1;
- u8 bStopBlinking = false;
-
- /* Change LED according to BlinkingLedState specified. */
- if (pLed->BlinkingLedState == RTW_LED_ON)
- SwLedOn(padapter, pLed);
- else
- SwLedOff(padapter, pLed);
-
- if (!pLed1->bLedWPSBlinkInProgress && pLed1->BlinkingLedState == LED_UNKNOWN) {
- pLed1->BlinkingLedState = RTW_LED_OFF;
- pLed1->CurrLedState = RTW_LED_OFF;
- SwLedOff(padapter, pLed1);
- }
-
- switch (pLed->CurrLedState) {
- case LED_BLINK_SLOWLY:
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- _set_timer(&pLed->BlinkTimer, LED_BLINK_NO_LINK_INTERVAL_ALPHA);
- break;
- case LED_BLINK_StartToBlink:
- if (pLed->bLedOn) {
- pLed->BlinkingLedState = RTW_LED_OFF;
- _set_timer(&pLed->BlinkTimer, LED_BLINK_SLOWLY_INTERVAL);
- } else {
- pLed->BlinkingLedState = RTW_LED_ON;
- _set_timer(&pLed->BlinkTimer, LED_BLINK_NORMAL_INTERVAL);
- }
- break;
- case LED_BLINK_SCAN:
- pLed->BlinkTimes--;
- if (pLed->BlinkTimes == 0)
- bStopBlinking = false;
- if (bStopBlinking) {
- if (padapter->pwrctrlpriv.rf_pwrstate != rf_on && padapter->pwrctrlpriv.rfoff_reason > RF_CHANGE_BY_PS) {
- SwLedOff(padapter, pLed);
- } else {
- pLed->bLedNoLinkBlinkInProgress = false;
- pLed->CurrLedState = LED_BLINK_SLOWLY;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- _set_timer(&pLed->BlinkTimer, LED_BLINK_NO_LINK_INTERVAL_ALPHA);
- }
- pLed->bLedScanBlinkInProgress = false;
- } else {
- if (padapter->pwrctrlpriv.rf_pwrstate != rf_on && padapter->pwrctrlpriv.rfoff_reason > RF_CHANGE_BY_PS) {
- SwLedOff(padapter, pLed);
- } else {
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- _set_timer(&pLed->BlinkTimer, LED_BLINK_SCAN_INTERVAL_ALPHA);
- }
- }
- break;
- case LED_BLINK_TXRX:
- pLed->BlinkTimes--;
- if (pLed->BlinkTimes == 0)
- bStopBlinking = true;
- if (bStopBlinking) {
- if (padapter->pwrctrlpriv.rf_pwrstate != rf_on && padapter->pwrctrlpriv.rfoff_reason > RF_CHANGE_BY_PS) {
- SwLedOff(padapter, pLed);
- } else {
- pLed->bLedNoLinkBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_SLOWLY;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- _set_timer(&pLed->BlinkTimer, LED_BLINK_NO_LINK_INTERVAL_ALPHA);
- }
- pLed->bLedBlinkInProgress = false;
- } else {
- if (padapter->pwrctrlpriv.rf_pwrstate != rf_on && padapter->pwrctrlpriv.rfoff_reason > RF_CHANGE_BY_PS) {
- SwLedOff(padapter, pLed);
- } else {
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- _set_timer(&pLed->BlinkTimer, LED_BLINK_FASTER_INTERVAL_ALPHA);
- }
- }
- break;
- case LED_BLINK_WPS:
- if (pLed->bLedOn) {
- pLed->BlinkingLedState = RTW_LED_OFF;
- _set_timer(&pLed->BlinkTimer, LED_BLINK_SLOWLY_INTERVAL);
- } else {
- pLed->BlinkingLedState = RTW_LED_ON;
- _set_timer(&pLed->BlinkTimer, LED_BLINK_NORMAL_INTERVAL);
- }
- break;
- case LED_BLINK_WPS_STOP: /* WPS authentication fail */
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
-
- _set_timer(&pLed->BlinkTimer, LED_BLINK_NORMAL_INTERVAL);
- break;
- case LED_BLINK_WPS_STOP_OVERLAP: /* WPS session overlap */
- pLed->BlinkTimes--;
- if (pLed->BlinkTimes == 0) {
- if (pLed->bLedOn)
- pLed->BlinkTimes = 1;
- else
- bStopBlinking = true;
- }
-
- if (bStopBlinking) {
- pLed->BlinkTimes = 10;
- pLed->BlinkingLedState = RTW_LED_ON;
- _set_timer(&pLed->BlinkTimer, LED_BLINK_LINK_INTERVAL_ALPHA);
- } else {
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
-
- _set_timer(&pLed->BlinkTimer, LED_BLINK_NORMAL_INTERVAL);
- }
- break;
- default:
- break;
- }
-}
-
-static void SwLedBlink5(struct LED_871x *pLed)
-{
- struct adapter *padapter = pLed->padapter;
- u8 bStopBlinking = false;
-
- /* Change LED according to BlinkingLedState specified. */
- if (pLed->BlinkingLedState == RTW_LED_ON)
- SwLedOn(padapter, pLed);
- else
- SwLedOff(padapter, pLed);
-
- switch (pLed->CurrLedState) {
- case LED_BLINK_SCAN:
- pLed->BlinkTimes--;
- if (pLed->BlinkTimes == 0)
- bStopBlinking = true;
-
- if (bStopBlinking) {
- if (padapter->pwrctrlpriv.rf_pwrstate != rf_on && padapter->pwrctrlpriv.rfoff_reason > RF_CHANGE_BY_PS) {
- pLed->CurrLedState = RTW_LED_OFF;
- pLed->BlinkingLedState = RTW_LED_OFF;
- if (pLed->bLedOn)
- SwLedOff(padapter, pLed);
- } else {
- pLed->CurrLedState = RTW_LED_ON;
- pLed->BlinkingLedState = RTW_LED_ON;
- if (!pLed->bLedOn)
- _set_timer(&pLed->BlinkTimer, LED_BLINK_FASTER_INTERVAL_ALPHA);
- }
-
- pLed->bLedScanBlinkInProgress = false;
- } else {
- if (padapter->pwrctrlpriv.rf_pwrstate != rf_on && padapter->pwrctrlpriv.rfoff_reason > RF_CHANGE_BY_PS) {
- SwLedOff(padapter, pLed);
- } else {
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- _set_timer(&pLed->BlinkTimer, LED_BLINK_SCAN_INTERVAL_ALPHA);
- }
- }
- break;
- case LED_BLINK_TXRX:
- pLed->BlinkTimes--;
- if (pLed->BlinkTimes == 0)
- bStopBlinking = true;
-
- if (bStopBlinking) {
- if (padapter->pwrctrlpriv.rf_pwrstate != rf_on && padapter->pwrctrlpriv.rfoff_reason > RF_CHANGE_BY_PS) {
- pLed->CurrLedState = RTW_LED_OFF;
- pLed->BlinkingLedState = RTW_LED_OFF;
- if (pLed->bLedOn)
- SwLedOff(padapter, pLed);
- } else {
- pLed->CurrLedState = RTW_LED_ON;
- pLed->BlinkingLedState = RTW_LED_ON;
- if (!pLed->bLedOn)
- _set_timer(&pLed->BlinkTimer, LED_BLINK_FASTER_INTERVAL_ALPHA);
- }
-
- pLed->bLedBlinkInProgress = false;
- } else {
- if (padapter->pwrctrlpriv.rf_pwrstate != rf_on && padapter->pwrctrlpriv.rfoff_reason > RF_CHANGE_BY_PS) {
- SwLedOff(padapter, pLed);
- } else {
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- _set_timer(&pLed->BlinkTimer, LED_BLINK_FASTER_INTERVAL_ALPHA);
- }
- }
- break;
-
- default:
- break;
- }
-}
-
-static void SwLedBlink6(struct LED_871x *pLed)
-{
- struct adapter *padapter = pLed->padapter;
-
- /* Change LED according to BlinkingLedState specified. */
- if (pLed->BlinkingLedState == RTW_LED_ON)
- SwLedOn(padapter, pLed);
- else
- SwLedOff(padapter, pLed);
-}
-
- /* ALPHA, added by chiyoko, 20090106 */
static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAction)
{
struct led_priv *ledpriv = &padapter->ledpriv;
@@ -902,662 +388,14 @@ static void SwLedControlMode1(struct adapter *padapter, enum LED_CTL_MODE LedAct
}
}
- /* Arcadyan/Sitecom , added by chiyoko, 20090216 */
-static void SwLedControlMode2(struct adapter *padapter, enum LED_CTL_MODE LedAction)
-{
- struct led_priv *ledpriv = &padapter->ledpriv;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct LED_871x *pLed = &ledpriv->SwLed0;
-
- switch (LedAction) {
- case LED_CTL_SITE_SURVEY:
- if (pmlmepriv->LinkDetectInfo.bBusyTraffic) {
- } else if (!pLed->bLedScanBlinkInProgress) {
- if (IS_LED_WPS_BLINKING(pLed))
- return;
-
- if (pLed->bLedBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
- pLed->bLedBlinkInProgress = false;
- }
- pLed->bLedScanBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_SCAN;
- pLed->BlinkTimes = 24;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- _set_timer(&pLed->BlinkTimer, LED_BLINK_SCAN_INTERVAL_ALPHA);
- }
- break;
- case LED_CTL_TX:
- case LED_CTL_RX:
- if ((!pLed->bLedBlinkInProgress) && (check_fwstate(pmlmepriv, _FW_LINKED))) {
- if (pLed->CurrLedState == LED_BLINK_SCAN || IS_LED_WPS_BLINKING(pLed))
- return;
- pLed->bLedBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_TXRX;
- pLed->BlinkTimes = 2;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- _set_timer(&pLed->BlinkTimer, LED_BLINK_FASTER_INTERVAL_ALPHA);
- }
- break;
- case LED_CTL_LINK:
- pLed->CurrLedState = RTW_LED_ON;
- pLed->BlinkingLedState = RTW_LED_ON;
- if (pLed->bLedBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
- pLed->bLedBlinkInProgress = false;
- }
- if (pLed->bLedScanBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
- pLed->bLedScanBlinkInProgress = false;
- }
- _set_timer(&pLed->BlinkTimer, 0);
- break;
- case LED_CTL_START_WPS: /* wait until xinpin finish */
- case LED_CTL_START_WPS_BOTTON:
- if (!pLed->bLedWPSBlinkInProgress) {
- if (pLed->bLedBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
- pLed->bLedBlinkInProgress = false;
- }
- if (pLed->bLedScanBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
- pLed->bLedScanBlinkInProgress = false;
- }
- pLed->bLedWPSBlinkInProgress = true;
- pLed->CurrLedState = RTW_LED_ON;
- pLed->BlinkingLedState = RTW_LED_ON;
- _set_timer(&pLed->BlinkTimer, 0);
- }
- break;
- case LED_CTL_STOP_WPS:
- pLed->bLedWPSBlinkInProgress = false;
- if (padapter->pwrctrlpriv.rf_pwrstate != rf_on) {
- SwLedOff(padapter, pLed);
- } else {
- pLed->CurrLedState = RTW_LED_ON;
- pLed->BlinkingLedState = RTW_LED_ON;
- _set_timer(&pLed->BlinkTimer, 0);
- }
- break;
- case LED_CTL_STOP_WPS_FAIL:
- pLed->bLedWPSBlinkInProgress = false;
- if (padapter->pwrctrlpriv.rf_pwrstate != rf_on) {
- SwLedOff(padapter, pLed);
- } else {
- pLed->CurrLedState = RTW_LED_OFF;
- pLed->BlinkingLedState = RTW_LED_OFF;
- _set_timer(&pLed->BlinkTimer, 0);
- }
- break;
- case LED_CTL_START_TO_LINK:
- case LED_CTL_NO_LINK:
- if (!IS_LED_BLINKING(pLed)) {
- pLed->CurrLedState = RTW_LED_OFF;
- pLed->BlinkingLedState = RTW_LED_OFF;
- _set_timer(&pLed->BlinkTimer, 0);
- }
- break;
- case LED_CTL_POWER_OFF:
- pLed->CurrLedState = RTW_LED_OFF;
- pLed->BlinkingLedState = RTW_LED_OFF;
- if (pLed->bLedBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
- pLed->bLedBlinkInProgress = false;
- }
- if (pLed->bLedScanBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
- pLed->bLedScanBlinkInProgress = false;
- }
- if (pLed->bLedWPSBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
- pLed->bLedWPSBlinkInProgress = false;
- }
-
- _set_timer(&pLed->BlinkTimer, 0);
- break;
- default:
- break;
- }
-}
-
- /* COREGA, added by chiyoko, 20090316 */
- static void SwLedControlMode3(struct adapter *padapter, enum LED_CTL_MODE LedAction)
-{
- struct led_priv *ledpriv = &padapter->ledpriv;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct LED_871x *pLed = &ledpriv->SwLed0;
-
- switch (LedAction) {
- case LED_CTL_SITE_SURVEY:
- if (pmlmepriv->LinkDetectInfo.bBusyTraffic) {
- } else if (!pLed->bLedScanBlinkInProgress) {
- if (IS_LED_WPS_BLINKING(pLed))
- return;
-
- if (pLed->bLedBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
- pLed->bLedBlinkInProgress = false;
- }
- pLed->bLedScanBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_SCAN;
- pLed->BlinkTimes = 24;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- _set_timer(&pLed->BlinkTimer, LED_BLINK_SCAN_INTERVAL_ALPHA);
- }
- break;
- case LED_CTL_TX:
- case LED_CTL_RX:
- if ((!pLed->bLedBlinkInProgress) && (check_fwstate(pmlmepriv, _FW_LINKED))) {
- if (pLed->CurrLedState == LED_BLINK_SCAN || IS_LED_WPS_BLINKING(pLed))
- return;
- pLed->bLedBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_TXRX;
- pLed->BlinkTimes = 2;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- _set_timer(&pLed->BlinkTimer, LED_BLINK_FASTER_INTERVAL_ALPHA);
- }
- break;
- case LED_CTL_LINK:
- if (IS_LED_WPS_BLINKING(pLed))
- return;
- pLed->CurrLedState = RTW_LED_ON;
- pLed->BlinkingLedState = RTW_LED_ON;
- if (pLed->bLedBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
- pLed->bLedBlinkInProgress = false;
- }
- if (pLed->bLedScanBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
- pLed->bLedScanBlinkInProgress = false;
- }
-
- _set_timer(&pLed->BlinkTimer, 0);
- break;
- case LED_CTL_START_WPS: /* wait until xinpin finish */
- case LED_CTL_START_WPS_BOTTON:
- if (!pLed->bLedWPSBlinkInProgress) {
- if (pLed->bLedBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
- pLed->bLedBlinkInProgress = false;
- }
- if (pLed->bLedScanBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
- pLed->bLedScanBlinkInProgress = false;
- }
- pLed->bLedWPSBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_WPS;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- _set_timer(&pLed->BlinkTimer, LED_BLINK_SCAN_INTERVAL_ALPHA);
- }
- break;
- case LED_CTL_STOP_WPS:
- if (pLed->bLedWPSBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
- pLed->bLedWPSBlinkInProgress = false;
- } else {
- pLed->bLedWPSBlinkInProgress = true;
- }
-
- pLed->CurrLedState = LED_BLINK_WPS_STOP;
- if (pLed->bLedOn) {
- pLed->BlinkingLedState = RTW_LED_OFF;
- _set_timer(&pLed->BlinkTimer, LED_BLINK_WPS_SUCESS_INTERVAL_ALPHA);
- } else {
- pLed->BlinkingLedState = RTW_LED_ON;
- _set_timer(&pLed->BlinkTimer, 0);
- }
- break;
- case LED_CTL_STOP_WPS_FAIL:
- if (pLed->bLedWPSBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
- pLed->bLedWPSBlinkInProgress = false;
- }
- pLed->CurrLedState = RTW_LED_OFF;
- pLed->BlinkingLedState = RTW_LED_OFF;
- _set_timer(&pLed->BlinkTimer, 0);
- break;
- case LED_CTL_START_TO_LINK:
- case LED_CTL_NO_LINK:
- if (!IS_LED_BLINKING(pLed)) {
- pLed->CurrLedState = RTW_LED_OFF;
- pLed->BlinkingLedState = RTW_LED_OFF;
- _set_timer(&pLed->BlinkTimer, 0);
- }
- break;
- case LED_CTL_POWER_OFF:
- pLed->CurrLedState = RTW_LED_OFF;
- pLed->BlinkingLedState = RTW_LED_OFF;
- if (pLed->bLedBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
- pLed->bLedBlinkInProgress = false;
- }
- if (pLed->bLedScanBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
- pLed->bLedScanBlinkInProgress = false;
- }
- if (pLed->bLedWPSBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
- pLed->bLedWPSBlinkInProgress = false;
- }
-
- _set_timer(&pLed->BlinkTimer, 0);
- break;
- default:
- break;
- }
-}
-
- /* Edimax-Belkin, added by chiyoko, 20090413 */
-static void SwLedControlMode4(struct adapter *padapter, enum LED_CTL_MODE LedAction)
-{
- struct led_priv *ledpriv = &padapter->ledpriv;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct LED_871x *pLed = &ledpriv->SwLed0;
- struct LED_871x *pLed1 = &ledpriv->SwLed1;
-
- switch (LedAction) {
- case LED_CTL_START_TO_LINK:
- if (pLed1->bLedWPSBlinkInProgress) {
- pLed1->bLedWPSBlinkInProgress = false;
- _cancel_timer_ex(&pLed1->BlinkTimer);
-
- pLed1->BlinkingLedState = RTW_LED_OFF;
- pLed1->CurrLedState = RTW_LED_OFF;
-
- if (pLed1->bLedOn)
- _set_timer(&pLed->BlinkTimer, 0);
- }
-
- if (!pLed->bLedStartToLinkBlinkInProgress) {
- if (pLed->CurrLedState == LED_BLINK_SCAN || IS_LED_WPS_BLINKING(pLed))
- return;
- if (pLed->bLedBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
- pLed->bLedBlinkInProgress = false;
- }
- if (pLed->bLedNoLinkBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
- pLed->bLedNoLinkBlinkInProgress = false;
- }
-
- pLed->bLedStartToLinkBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_StartToBlink;
- if (pLed->bLedOn) {
- pLed->BlinkingLedState = RTW_LED_OFF;
- _set_timer(&pLed->BlinkTimer, LED_BLINK_SLOWLY_INTERVAL);
- } else {
- pLed->BlinkingLedState = RTW_LED_ON;
- _set_timer(&pLed->BlinkTimer, LED_BLINK_NORMAL_INTERVAL);
- }
- }
- break;
- case LED_CTL_LINK:
- case LED_CTL_NO_LINK:
- /* LED1 settings */
- if (LedAction == LED_CTL_LINK) {
- if (pLed1->bLedWPSBlinkInProgress) {
- pLed1->bLedWPSBlinkInProgress = false;
- _cancel_timer_ex(&pLed1->BlinkTimer);
-
- pLed1->BlinkingLedState = RTW_LED_OFF;
- pLed1->CurrLedState = RTW_LED_OFF;
-
- if (pLed1->bLedOn)
- _set_timer(&pLed->BlinkTimer, 0);
- }
- }
-
- if (!pLed->bLedNoLinkBlinkInProgress) {
- if (pLed->CurrLedState == LED_BLINK_SCAN || IS_LED_WPS_BLINKING(pLed))
- return;
- if (pLed->bLedBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
- pLed->bLedBlinkInProgress = false;
- }
-
- pLed->bLedNoLinkBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_SLOWLY;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- _set_timer(&pLed->BlinkTimer, LED_BLINK_NO_LINK_INTERVAL_ALPHA);
- }
- break;
- case LED_CTL_SITE_SURVEY:
- if ((pmlmepriv->LinkDetectInfo.bBusyTraffic) && (check_fwstate(pmlmepriv, _FW_LINKED))) {
- } else if (!pLed->bLedScanBlinkInProgress) {
- if (IS_LED_WPS_BLINKING(pLed))
- return;
-
- if (pLed->bLedNoLinkBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
- pLed->bLedNoLinkBlinkInProgress = false;
- }
- if (pLed->bLedBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
- pLed->bLedBlinkInProgress = false;
- }
- pLed->bLedScanBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_SCAN;
- pLed->BlinkTimes = 24;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- _set_timer(&pLed->BlinkTimer, LED_BLINK_SCAN_INTERVAL_ALPHA);
- }
- break;
- case LED_CTL_TX:
- case LED_CTL_RX:
- if (!pLed->bLedBlinkInProgress) {
- if (pLed->CurrLedState == LED_BLINK_SCAN || IS_LED_WPS_BLINKING(pLed))
- return;
- if (pLed->bLedNoLinkBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
- pLed->bLedNoLinkBlinkInProgress = false;
- }
- pLed->bLedBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_TXRX;
- pLed->BlinkTimes = 2;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- _set_timer(&pLed->BlinkTimer, LED_BLINK_FASTER_INTERVAL_ALPHA);
- }
- break;
- case LED_CTL_START_WPS: /* wait until xinpin finish */
- case LED_CTL_START_WPS_BOTTON:
- if (pLed1->bLedWPSBlinkInProgress) {
- pLed1->bLedWPSBlinkInProgress = false;
- _cancel_timer_ex(&pLed1->BlinkTimer);
-
- pLed1->BlinkingLedState = RTW_LED_OFF;
- pLed1->CurrLedState = RTW_LED_OFF;
-
- if (pLed1->bLedOn)
- _set_timer(&pLed->BlinkTimer, 0);
- }
-
- if (!pLed->bLedWPSBlinkInProgress) {
- if (pLed->bLedNoLinkBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
- pLed->bLedNoLinkBlinkInProgress = false;
- }
- if (pLed->bLedBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
- pLed->bLedBlinkInProgress = false;
- }
- if (pLed->bLedScanBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
- pLed->bLedScanBlinkInProgress = false;
- }
- pLed->bLedWPSBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_WPS;
- if (pLed->bLedOn) {
- pLed->BlinkingLedState = RTW_LED_OFF;
- _set_timer(&pLed->BlinkTimer, LED_BLINK_SLOWLY_INTERVAL);
- } else {
- pLed->BlinkingLedState = RTW_LED_ON;
- _set_timer(&pLed->BlinkTimer, LED_BLINK_NORMAL_INTERVAL);
- }
- }
- break;
- case LED_CTL_STOP_WPS: /* WPS connect success */
- if (pLed->bLedWPSBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
- pLed->bLedWPSBlinkInProgress = false;
- }
-
- pLed->bLedNoLinkBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_SLOWLY;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- _set_timer(&pLed->BlinkTimer, LED_BLINK_NO_LINK_INTERVAL_ALPHA);
-
- break;
- case LED_CTL_STOP_WPS_FAIL: /* WPS authentication fail */
- if (pLed->bLedWPSBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
- pLed->bLedWPSBlinkInProgress = false;
- }
- pLed->bLedNoLinkBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_SLOWLY;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- _set_timer(&pLed->BlinkTimer, LED_BLINK_NO_LINK_INTERVAL_ALPHA);
-
- /* LED1 settings */
- if (pLed1->bLedWPSBlinkInProgress)
- _cancel_timer_ex(&pLed1->BlinkTimer);
- else
- pLed1->bLedWPSBlinkInProgress = true;
- pLed1->CurrLedState = LED_BLINK_WPS_STOP;
- if (pLed1->bLedOn)
- pLed1->BlinkingLedState = RTW_LED_OFF;
- else
- pLed1->BlinkingLedState = RTW_LED_ON;
- _set_timer(&pLed->BlinkTimer, LED_BLINK_NORMAL_INTERVAL);
- break;
- case LED_CTL_STOP_WPS_FAIL_OVERLAP: /* WPS session overlap */
- if (pLed->bLedWPSBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
- pLed->bLedWPSBlinkInProgress = false;
- }
- pLed->bLedNoLinkBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_SLOWLY;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- _set_timer(&pLed->BlinkTimer, LED_BLINK_NO_LINK_INTERVAL_ALPHA);
-
- /* LED1 settings */
- if (pLed1->bLedWPSBlinkInProgress)
- _cancel_timer_ex(&pLed1->BlinkTimer);
- else
- pLed1->bLedWPSBlinkInProgress = true;
- pLed1->CurrLedState = LED_BLINK_WPS_STOP_OVERLAP;
- pLed1->BlinkTimes = 10;
- if (pLed1->bLedOn)
- pLed1->BlinkingLedState = RTW_LED_OFF;
- else
- pLed1->BlinkingLedState = RTW_LED_ON;
- _set_timer(&pLed->BlinkTimer, LED_BLINK_NORMAL_INTERVAL);
- break;
- case LED_CTL_POWER_OFF:
- pLed->CurrLedState = RTW_LED_OFF;
- pLed->BlinkingLedState = RTW_LED_OFF;
-
- if (pLed->bLedNoLinkBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
- pLed->bLedNoLinkBlinkInProgress = false;
- }
- if (pLed->bLedLinkBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
- pLed->bLedLinkBlinkInProgress = false;
- }
- if (pLed->bLedBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
- pLed->bLedBlinkInProgress = false;
- }
- if (pLed->bLedWPSBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
- pLed->bLedWPSBlinkInProgress = false;
- }
- if (pLed->bLedScanBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
- pLed->bLedScanBlinkInProgress = false;
- }
- if (pLed->bLedStartToLinkBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
- pLed->bLedStartToLinkBlinkInProgress = false;
- }
- if (pLed1->bLedWPSBlinkInProgress) {
- _cancel_timer_ex(&pLed1->BlinkTimer);
- pLed1->bLedWPSBlinkInProgress = false;
- }
- pLed1->BlinkingLedState = LED_UNKNOWN;
- SwLedOff(padapter, pLed);
- SwLedOff(padapter, pLed1);
- break;
- default:
- break;
- }
-}
-
- /* Sercomm-Belkin, added by chiyoko, 20090415 */
-static void
-SwLedControlMode5(
- struct adapter *padapter,
- enum LED_CTL_MODE LedAction
-)
-{
- struct led_priv *ledpriv = &padapter->ledpriv;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct LED_871x *pLed = &ledpriv->SwLed0;
-
- switch (LedAction) {
- case LED_CTL_POWER_ON:
- case LED_CTL_NO_LINK:
- case LED_CTL_LINK: /* solid blue */
- pLed->CurrLedState = RTW_LED_ON;
- pLed->BlinkingLedState = RTW_LED_ON;
-
- _set_timer(&pLed->BlinkTimer, 0);
- break;
- case LED_CTL_SITE_SURVEY:
- if ((pmlmepriv->LinkDetectInfo.bBusyTraffic) && (check_fwstate(pmlmepriv, _FW_LINKED))) {
- } else if (!pLed->bLedScanBlinkInProgress) {
- if (pLed->bLedBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
- pLed->bLedBlinkInProgress = false;
- }
- pLed->bLedScanBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_SCAN;
- pLed->BlinkTimes = 24;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- _set_timer(&pLed->BlinkTimer, LED_BLINK_SCAN_INTERVAL_ALPHA);
- }
- break;
- case LED_CTL_TX:
- case LED_CTL_RX:
- if (!pLed->bLedBlinkInProgress) {
- if (pLed->CurrLedState == LED_BLINK_SCAN)
- return;
- pLed->bLedBlinkInProgress = true;
- pLed->CurrLedState = LED_BLINK_TXRX;
- pLed->BlinkTimes = 2;
- if (pLed->bLedOn)
- pLed->BlinkingLedState = RTW_LED_OFF;
- else
- pLed->BlinkingLedState = RTW_LED_ON;
- _set_timer(&pLed->BlinkTimer, LED_BLINK_FASTER_INTERVAL_ALPHA);
- }
- break;
- case LED_CTL_POWER_OFF:
- pLed->CurrLedState = RTW_LED_OFF;
- pLed->BlinkingLedState = RTW_LED_OFF;
-
- if (pLed->bLedBlinkInProgress) {
- _cancel_timer_ex(&pLed->BlinkTimer);
- pLed->bLedBlinkInProgress = false;
- }
- SwLedOff(padapter, pLed);
- break;
- default:
- break;
- }
-}
-
- /* WNC-Corega, added by chiyoko, 20090902 */
-static void
-SwLedControlMode6(
- struct adapter *padapter,
- enum LED_CTL_MODE LedAction
-)
-{
- struct led_priv *ledpriv = &padapter->ledpriv;
- struct LED_871x *pLed0 = &ledpriv->SwLed0;
-
- switch (LedAction) {
- case LED_CTL_POWER_ON:
- case LED_CTL_LINK:
- case LED_CTL_NO_LINK:
- _cancel_timer_ex(&pLed0->BlinkTimer);
- pLed0->CurrLedState = RTW_LED_ON;
- pLed0->BlinkingLedState = RTW_LED_ON;
- _set_timer(&pLed0->BlinkTimer, 0);
- break;
- case LED_CTL_POWER_OFF:
- SwLedOff(padapter, pLed0);
- break;
- default:
- break;
- }
-}
-
-/* */
-/* Description: */
-/* Handler function of LED Blinking. */
-/* We dispatch acture LED blink action according to LedStrategy. */
-/* */
void BlinkHandler(struct LED_871x *pLed)
{
struct adapter *padapter = pLed->padapter;
- struct led_priv *ledpriv = &padapter->ledpriv;
if ((padapter->bSurpriseRemoved) || (padapter->bDriverStopped))
return;
- switch (ledpriv->LedStrategy) {
- case SW_LED_MODE0:
- SwLedBlink(pLed);
- break;
- case SW_LED_MODE1:
- SwLedBlink1(pLed);
- break;
- case SW_LED_MODE2:
- SwLedBlink2(pLed);
- break;
- case SW_LED_MODE3:
- SwLedBlink3(pLed);
- break;
- case SW_LED_MODE4:
- SwLedBlink4(pLed);
- break;
- case SW_LED_MODE5:
- SwLedBlink5(pLed);
- break;
- case SW_LED_MODE6:
- SwLedBlink6(pLed);
- break;
- default:
- break;
- }
+ SwLedBlink1(pLed);
}
void LedControl8188eu(struct adapter *padapter, enum LED_CTL_MODE LedAction)
@@ -1585,28 +423,5 @@ void LedControl8188eu(struct adapter *padapter, enum LED_CTL_MODE LedAction)
LedAction == LED_CTL_POWER_ON))
return;
- switch (ledpriv->LedStrategy) {
- case SW_LED_MODE0:
- break;
- case SW_LED_MODE1:
- SwLedControlMode1(padapter, LedAction);
- break;
- case SW_LED_MODE2:
- SwLedControlMode2(padapter, LedAction);
- break;
- case SW_LED_MODE3:
- SwLedControlMode3(padapter, LedAction);
- break;
- case SW_LED_MODE4:
- SwLedControlMode4(padapter, LedAction);
- break;
- case SW_LED_MODE5:
- SwLedControlMode5(padapter, LedAction);
- break;
- case SW_LED_MODE6:
- SwLedControlMode6(padapter, LedAction);
- break;
- default:
- break;
- }
+ SwLedControlMode1(padapter, LedAction);
}
diff --git a/drivers/staging/r8188eu/core/rtw_mlme.c b/drivers/staging/r8188eu/core/rtw_mlme.c
index 1115ff5d865a..8d14aff32f61 100644
--- a/drivers/staging/r8188eu/core/rtw_mlme.c
+++ b/drivers/staging/r8188eu/core/rtw_mlme.c
@@ -14,6 +14,7 @@
#include "../include/wlan_bssdef.h"
#include "../include/rtw_ioctl_set.h"
#include "../include/usb_osintf.h"
+#include "../include/rtl8188e_dm.h"
extern unsigned char MCS_rate_2R[16];
extern unsigned char MCS_rate_1R[16];
@@ -48,8 +49,8 @@ int _rtw_init_mlme_priv(struct adapter *padapter)
pmlmepriv->scan_mode = SCAN_ACTIVE;/* 1: active, 0: pasive. Maybe someday we should rename this varable to "active_mode" (Jeff) */
spin_lock_init(&pmlmepriv->lock);
- _rtw_init_queue(&pmlmepriv->free_bss_pool);
- _rtw_init_queue(&pmlmepriv->scanned_queue);
+ rtw_init_queue(&pmlmepriv->free_bss_pool);
+ rtw_init_queue(&pmlmepriv->scanned_queue);
set_scanned_network_val(pmlmepriv, 0);
@@ -88,7 +89,6 @@ static void rtw_mfree_mlme_priv_lock(struct mlme_priv *pmlmepriv)
{
}
-#if defined(CONFIG_88EU_AP_MODE)
static void rtw_free_mlme_ie_data(u8 **ppie, u32 *plen)
{
kfree(*ppie);
@@ -111,11 +111,6 @@ void rtw_free_mlme_priv_ie_data(struct mlme_priv *pmlmepriv)
rtw_free_mlme_ie_data(&pmlmepriv->p2p_go_probe_resp_ie, &pmlmepriv->p2p_go_probe_resp_ie_len);
rtw_free_mlme_ie_data(&pmlmepriv->p2p_assoc_req_ie, &pmlmepriv->p2p_assoc_req_ie_len);
}
-#else
-void rtw_free_mlme_priv_ie_data(struct mlme_priv *pmlmepriv)
-{
-}
-#endif
void _rtw_free_mlme_priv(struct mlme_priv *pmlmepriv)
{
@@ -130,42 +125,6 @@ void _rtw_free_mlme_priv(struct mlme_priv *pmlmepriv)
}
-int _rtw_enqueue_network(struct __queue *queue, struct wlan_network *pnetwork)
-{
-
- if (!pnetwork)
- goto exit;
-
- spin_lock_bh(&queue->lock);
-
- list_add_tail(&pnetwork->list, &queue->queue);
-
- spin_unlock_bh(&queue->lock);
-
-exit:
-
- return _SUCCESS;
-}
-
-struct wlan_network *_rtw_dequeue_network(struct __queue *queue)
-{
- struct wlan_network *pnetwork;
-
- spin_lock_bh(&queue->lock);
-
- if (list_empty(&queue->queue)) {
- pnetwork = NULL;
- } else {
- pnetwork = container_of((&queue->queue)->next, struct wlan_network, list);
-
- list_del_init(&pnetwork->list);
- }
-
- spin_unlock_bh(&queue->lock);
-
- return pnetwork;
-}
-
struct wlan_network *_rtw_alloc_network(struct mlme_priv *pmlmepriv)/* _queue *free_queue) */
{
struct wlan_network *pnetwork;
@@ -331,11 +290,6 @@ u16 rtw_get_capability(struct wlan_bssid_ex *bss)
return le16_to_cpu(val);
}
-u8 *rtw_get_timestampe_from_ie(u8 *ie)
-{
- return ie + 0;
-}
-
u8 *rtw_get_beacon_interval_from_ie(u8 *ie)
{
return ie + 8;
@@ -469,7 +423,7 @@ void update_network(struct wlan_bssid_ex *dst, struct wlan_bssid_ex *src,
u8 sq_final;
long rssi_final;
- rtw_hal_antdiv_rssi_compared(padapter, dst, src); /* this will update src.Rssi, need consider again */
+ AntDivCompare8188E(padapter, dst, src); /* this will update src.Rssi, need consider again */
/* The rule below is 1/5 for sample value, 4/5 for history value */
if (check_fwstate(&padapter->mlmepriv, _FW_LINKED) && is_same_network(&padapter->mlmepriv.cur_network.network, src)) {
@@ -550,7 +504,7 @@ void rtw_update_scanned_network(struct adapter *adapter, struct wlan_bssid_ex *t
/* If there are no more slots, expire the oldest */
pnetwork = oldest;
- rtw_hal_get_def_var(adapter, HAL_DEF_CURRENT_ANTENNA, &target->PhyInfo.Optimum_antenna);
+ GetHalDefVar8188EUsb(adapter, HAL_DEF_CURRENT_ANTENNA, &target->PhyInfo.Optimum_antenna);
memcpy(&pnetwork->network, target, get_wlan_bssid_ex_sz(target));
/* variable initialize */
pnetwork->fixed = false;
@@ -573,7 +527,7 @@ void rtw_update_scanned_network(struct adapter *adapter, struct wlan_bssid_ex *t
bssid_ex_sz = get_wlan_bssid_ex_sz(target);
target->Length = bssid_ex_sz;
- rtw_hal_get_def_var(adapter, HAL_DEF_CURRENT_ANTENNA, &target->PhyInfo.Optimum_antenna);
+ GetHalDefVar8188EUsb(adapter, HAL_DEF_CURRENT_ANTENNA, &target->PhyInfo.Optimum_antenna);
memcpy(&pnetwork->network, target, bssid_ex_sz);
pnetwork->last_scanned = jiffies;
@@ -611,9 +565,7 @@ static void rtw_add_network(struct adapter *adapter,
struct wlan_bssid_ex *pnetwork)
{
-#if defined(CONFIG_88EU_P2P)
rtw_wlan_bssid_ex_remove_p2p_attr(pnetwork, P2P_ATTR_GROUP_INFO);
-#endif
update_current_network(adapter, pnetwork);
rtw_update_scanned_network(adapter, pnetwork);
@@ -945,29 +897,6 @@ inline void rtw_indicate_scan_done(struct adapter *padapter, bool aborted)
rtw_os_indicate_scan_done(padapter, aborted);
}
-void rtw_scan_abort(struct adapter *adapter)
-{
- u32 start;
- struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
- struct mlme_ext_priv *pmlmeext = &adapter->mlmeextpriv;
-
- start = jiffies;
- pmlmeext->scan_abort = true;
- while (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY) &&
- rtw_get_passing_time_ms(start) <= 200) {
- if (adapter->bDriverStopped || adapter->bSurpriseRemoved)
- break;
- DBG_88E(FUNC_NDEV_FMT"fw_state=_FW_UNDER_SURVEY!\n", FUNC_NDEV_ARG(adapter->pnetdev));
- msleep(20);
- }
- if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY)) {
- if (!adapter->bDriverStopped && !adapter->bSurpriseRemoved)
- DBG_88E(FUNC_NDEV_FMT"waiting for scan_abort time out!\n", FUNC_NDEV_ARG(adapter->pnetdev));
- rtw_indicate_scan_done(adapter, true);
- }
- pmlmeext->scan_abort = false;
-}
-
static struct sta_info *rtw_joinbss_update_stainfo(struct adapter *padapter, struct wlan_network *pnetwork)
{
int i;
@@ -984,7 +913,7 @@ static struct sta_info *rtw_joinbss_update_stainfo(struct adapter *padapter, str
psta->aid = pnetwork->join_res;
psta->mac_id = 0;
/* sta mode */
- rtw_hal_set_odm_var(padapter, HAL_ODM_STA_INFO, psta, true);
+ rtl8188e_SetHalODMVar(padapter, HAL_ODM_STA_INFO, psta, true);
/* security related */
if (padapter->securitypriv.dot11AuthAlgrthm == dot11AuthAlgrthm_8021X) {
padapter->securitypriv.binstallGrpkey = false;
@@ -1200,24 +1129,20 @@ void rtw_joinbss_event_callback(struct adapter *adapter, u8 *pbuf)
static u8 search_max_mac_id(struct adapter *padapter)
{
u8 mac_id;
-#if defined(CONFIG_88EU_AP_MODE)
u8 aid;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct sta_priv *pstapriv = &padapter->stapriv;
-#endif
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
-#if defined(CONFIG_88EU_AP_MODE)
if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
for (aid = (pstapriv->max_num_sta); aid > 0; aid--) {
if (pstapriv->sta_aid[aid - 1])
break;
}
mac_id = aid + 1;
- } else
-#endif
- {/* adhoc id = 31~2 */
+ } else {
+ /* adhoc id = 31~2 */
for (mac_id = (NUM_STA - 1); mac_id >= IBSS_START_MAC_ID; mac_id--) {
if (pmlmeinfo->FW_sta_info[mac_id].status == 1)
break;
@@ -1237,11 +1162,10 @@ void rtw_sta_media_status_rpt(struct adapter *adapter, struct sta_info *psta,
return;
macid = search_max_mac_id(adapter);
- rtw_hal_set_hwreg(adapter, HW_VAR_TX_RPT_MAX_MACID, (u8 *)&macid);
+ SetHwReg8188EU(adapter, HW_VAR_TX_RPT_MAX_MACID, (u8 *)&macid);
/* MACID|OPMODE:1 connect */
media_status_rpt = (u16)((psta->mac_id << 8) | mstatus);
- rtw_hal_set_hwreg(adapter, HW_VAR_H2C_MEDIA_STATUS_RPT,
- (u8 *)&media_status_rpt);
+ SetHwReg8188EU(adapter, HW_VAR_H2C_MEDIA_STATUS_RPT, (u8 *)&media_status_rpt);
}
void rtw_stassoc_event_callback(struct adapter *adapter, u8 *pbuf)
@@ -1255,14 +1179,12 @@ void rtw_stassoc_event_callback(struct adapter *adapter, u8 *pbuf)
if (!rtw_access_ctrl(adapter, pstassoc->macaddr))
return;
-#if defined(CONFIG_88EU_AP_MODE)
if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
psta = rtw_get_stainfo(&adapter->stapriv, pstassoc->macaddr);
if (psta)
rtw_indicate_sta_assoc_event(adapter, psta);
return;
}
-#endif
/* for AD-HOC mode */
psta = rtw_get_stainfo(&adapter->stapriv, pstassoc->macaddr);
if (psta)
@@ -1276,7 +1198,7 @@ void rtw_stassoc_event_callback(struct adapter *adapter, u8 *pbuf)
psta->mac_id = (uint)pstassoc->cam_id;
DBG_88E("%s\n", __func__);
/* for ad-hoc mode */
- rtw_hal_set_odm_var(adapter, HAL_ODM_STA_INFO, psta, true);
+ rtl8188e_SetHalODMVar(adapter, HAL_ODM_STA_INFO, psta, true);
rtw_sta_media_status_rpt(adapter, psta, 1);
if (adapter->securitypriv.dot11AuthAlgrthm == dot11AuthAlgrthm_8021X)
psta->dot118021XPrivacy = adapter->securitypriv.dot11PrivacyAlgrthm;
@@ -1323,7 +1245,7 @@ void rtw_stadel_event_callback(struct adapter *adapter, u8 *pbuf)
u16 media_status;
media_status = (mac_id << 8) | 0; /* MACID|OPMODE:0 means disconnect */
/* for STA, AP, ADHOC mode, report disconnect stauts to FW */
- rtw_hal_set_hwreg(adapter, HW_VAR_H2C_MEDIA_STATUS_RPT, (u8 *)&media_status);
+ SetHwReg8188EU(adapter, HW_VAR_H2C_MEDIA_STATUS_RPT, (u8 *)&media_status);
}
if (check_fwstate(pmlmepriv, WIFI_AP_STATE))
@@ -1486,10 +1408,8 @@ void rtw_dynamic_check_timer_handlder(struct adapter *adapter)
rtw_dynamic_chk_wk_cmd(adapter);
if (pregistrypriv->wifi_spec == 1) {
-#ifdef CONFIG_88EU_P2P
struct wifidirect_info *pwdinfo = &adapter->wdinfo;
if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE))
-#endif
{
/* auto site survey */
rtw_auto_scan_handler(adapter);
@@ -1618,10 +1538,10 @@ int rtw_select_and_join_from_scanned_queue(struct mlme_priv *pmlmepriv)
rtw_free_assoc_resources(adapter, 0);
}
- rtw_hal_get_def_var(adapter, HAL_DEF_IS_SUPPORT_ANT_DIV, &(supp_ant_div));
+ GetHalDefVar8188EUsb(adapter, HAL_DEF_IS_SUPPORT_ANT_DIV, &supp_ant_div);
if (supp_ant_div) {
u8 cur_ant;
- rtw_hal_get_def_var(adapter, HAL_DEF_CURRENT_ANTENNA, &(cur_ant));
+ GetHalDefVar8188EUsb(adapter, HAL_DEF_CURRENT_ANTENNA, &cur_ant);
DBG_88E("#### Opt_Ant_(%s), cur_Ant(%s)\n",
(2 == candidate->network.PhyInfo.Optimum_antenna) ? "A" : "B",
(2 == cur_ant) ? "A" : "B"
@@ -1690,8 +1610,6 @@ int rtw_set_key(struct adapter *adapter, struct security_priv *psecuritypriv, in
goto exit;
}
- memset(psetkeyparm, 0, sizeof(struct setkey_parm));
-
if (psecuritypriv->dot11AuthAlgrthm == dot11AuthAlgrthm_8021X)
psetkeyparm->algorithm = (unsigned char)psecuritypriv->dot118021XGrpPrivacy;
else
@@ -1722,6 +1640,8 @@ int rtw_set_key(struct adapter *adapter, struct security_priv *psecuritypriv, in
psetkeyparm->grpkey = 1;
break;
default:
+ kfree(psetkeyparm);
+ kfree(pcmd);
res = _FAIL;
goto exit;
}
@@ -1959,10 +1879,10 @@ void rtw_joinbss_reset(struct adapter *padapter)
threshold = 1;
else
threshold = 0;
- rtw_hal_set_hwreg(padapter, HW_VAR_RXDMA_AGG_PG_TH, (u8 *)(&threshold));
+ SetHwReg8188EU(padapter, HW_VAR_RXDMA_AGG_PG_TH, (u8 *)(&threshold));
} else {
threshold = 1;
- rtw_hal_set_hwreg(padapter, HW_VAR_RXDMA_AGG_PG_TH, (u8 *)(&threshold));
+ SetHwReg8188EU(padapter, HW_VAR_RXDMA_AGG_PG_TH, (u8 *)(&threshold));
}
}
@@ -2002,15 +1922,15 @@ unsigned int rtw_restructure_ht_ie(struct adapter *padapter, u8 *in_ie, u8 *out_
IEEE80211_HT_CAP_TX_STBC |
IEEE80211_HT_CAP_DSSSCCK40);
- rtw_hal_get_def_var(padapter, HAL_DEF_RX_PACKET_OFFSET, &rx_packet_offset);
- rtw_hal_get_def_var(padapter, HAL_DEF_MAX_RECVBUF_SZ, &max_recvbuf_sz);
+ GetHalDefVar8188EUsb(padapter, HAL_DEF_RX_PACKET_OFFSET, &rx_packet_offset);
+ GetHalDefVar8188EUsb(padapter, HAL_DEF_MAX_RECVBUF_SZ, &max_recvbuf_sz);
/*
AMPDU_para [1:0]:Max AMPDU Len => 0:8k , 1:16k, 2:32k, 3:64k
AMPDU_para [4:2]:Min MPDU Start Spacing
*/
- rtw_hal_get_def_var(padapter, HW_VAR_MAX_RX_AMPDU_FACTOR, &max_rx_ampdu_factor);
+ GetHalDefVar8188EUsb(padapter, HW_VAR_MAX_RX_AMPDU_FACTOR, &max_rx_ampdu_factor);
ht_capie.ampdu_params_info = (max_rx_ampdu_factor & 0x03);
if (padapter->securitypriv.dot11PrivacyAlgrthm == _AES_)
@@ -2081,7 +2001,7 @@ void rtw_update_ht_cap(struct adapter *padapter, u8 *pie, uint ie_len)
int i;
u8 rf_type;
- padapter->HalFunc.GetHwRegHandler(padapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type));
+ GetHwReg8188EU(padapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type));
/* update the MCS rates */
for (i = 0; i < 16; i++) {
diff --git a/drivers/staging/r8188eu/core/rtw_mlme_ext.c b/drivers/staging/r8188eu/core/rtw_mlme_ext.c
index 5a472a4954b0..55c3d4a6faeb 100644
--- a/drivers/staging/r8188eu/core/rtw_mlme_ext.c
+++ b/drivers/staging/r8188eu/core/rtw_mlme_ext.c
@@ -10,6 +10,9 @@
#include "../include/wlan_bssdef.h"
#include "../include/mlme_osdep.h"
#include "../include/recv_osdep.h"
+#include "../include/rtl8188e_sreset.h"
+#include "../include/rtl8188e_xmit.h"
+#include "../include/rtl8188e_dm.h"
static struct mlme_handler mlme_sta_tbl[] = {
{WIFI_ASSOCREQ, "OnAssocReq", &OnAssocReq},
@@ -362,9 +365,7 @@ int init_mlme_ext_priv(struct adapter *padapter)
init_mlme_ext_timer(padapter);
-#ifdef CONFIG_88EU_AP_MODE
init_mlme_ap_info(padapter);
-#endif
pmlmeext->max_chan_nums = init_channel_set(padapter, pmlmepriv->ChannelPlan, pmlmeext->channel_set);
init_channel_list(padapter, pmlmeext->channel_set, pmlmeext->max_chan_nums, &pmlmeext->channel_list);
@@ -393,13 +394,12 @@ void free_mlme_ext_priv(struct mlme_ext_priv *pmlmeext)
static void _mgt_dispatcher(struct adapter *padapter, struct mlme_handler *ptable, struct recv_frame *precv_frame)
{
- u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
u8 *pframe = precv_frame->rx_data;
if (ptable->func) {
/* receive the frames that ra(a1) is my address or ra(a1) is bc address. */
if (memcmp(GetAddr1Ptr(pframe), myid(&padapter->eeprompriv), ETH_ALEN) &&
- memcmp(GetAddr1Ptr(pframe), bc_addr, ETH_ALEN))
+ !is_broadcast_ether_addr(GetAddr1Ptr(pframe)))
return;
ptable->func(padapter, precv_frame);
}
@@ -409,10 +409,7 @@ void mgt_dispatcher(struct adapter *padapter, struct recv_frame *precv_frame)
{
int index;
struct mlme_handler *ptable;
-#ifdef CONFIG_88EU_AP_MODE
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-#endif /* CONFIG_88EU_AP_MODE */
- u8 bc_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
u8 *pframe = precv_frame->rx_data;
struct sta_info *psta = rtw_get_stainfo(&padapter->stapriv, GetAddr2Ptr(pframe));
@@ -421,7 +418,7 @@ void mgt_dispatcher(struct adapter *padapter, struct recv_frame *precv_frame)
/* receive the frames that ra(a1) is my address or ra(a1) is bc address. */
if (memcmp(GetAddr1Ptr(pframe), myid(&padapter->eeprompriv), ETH_ALEN) &&
- memcmp(GetAddr1Ptr(pframe), bc_addr, ETH_ALEN))
+ !is_broadcast_ether_addr(GetAddr1Ptr(pframe)))
return;
ptable = mlme_sta_tbl;
@@ -443,7 +440,6 @@ void mgt_dispatcher(struct adapter *padapter, struct recv_frame *precv_frame)
psta->RxMgmtFrameSeqNum = precv_frame->attrib.seq_num;
}
-#ifdef CONFIG_88EU_AP_MODE
switch (GetFrameSubType(pframe)) {
case WIFI_AUTH:
if (check_fwstate(pmlmepriv, WIFI_AP_STATE))
@@ -453,17 +449,8 @@ void mgt_dispatcher(struct adapter *padapter, struct recv_frame *precv_frame)
fallthrough;
case WIFI_ASSOCREQ:
case WIFI_REASSOCREQ:
- _mgt_dispatcher(padapter, ptable, precv_frame);
- break;
case WIFI_PROBEREQ:
- if (check_fwstate(pmlmepriv, WIFI_AP_STATE))
- _mgt_dispatcher(padapter, ptable, precv_frame);
- else
- _mgt_dispatcher(padapter, ptable, precv_frame);
- break;
case WIFI_BEACON:
- _mgt_dispatcher(padapter, ptable, precv_frame);
- break;
case WIFI_ACTION:
_mgt_dispatcher(padapter, ptable, precv_frame);
break;
@@ -473,12 +460,8 @@ void mgt_dispatcher(struct adapter *padapter, struct recv_frame *precv_frame)
rtw_hostapd_mlme_rx(padapter, precv_frame);
break;
}
-#else
- _mgt_dispatcher(padapter, ptable, precv_frame);
-#endif
}
-#ifdef CONFIG_88EU_P2P
static u32 p2p_listen_state_process(struct adapter *padapter, unsigned char *da)
{
bool response = true;
@@ -492,7 +475,6 @@ static u32 p2p_listen_state_process(struct adapter *padapter, unsigned char *da)
return _SUCCESS;
}
-#endif /* CONFIG_88EU_P2P */
/****************************************************************************
@@ -512,7 +494,6 @@ unsigned int OnProbeReq(struct adapter *padapter, struct recv_frame *precv_frame
uint len = precv_frame->len;
u8 is_valid_p2p_probereq = false;
-#ifdef CONFIG_88EU_P2P
struct wifidirect_info *pwdinfo = &padapter->wdinfo;
u8 wifi_test_chk_rate = 1;
@@ -547,7 +528,6 @@ unsigned int OnProbeReq(struct adapter *padapter, struct recv_frame *precv_frame
}
_continue:
-#endif /* CONFIG_88EU_P2P */
if (check_fwstate(pmlmepriv, WIFI_STATION_STATE))
return _SUCCESS;
@@ -581,12 +561,9 @@ _issue_probersp:
unsigned int OnProbeRsp(struct adapter *padapter, struct recv_frame *precv_frame)
{
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
-#ifdef CONFIG_88EU_P2P
struct wifidirect_info *pwdinfo = &padapter->wdinfo;
u8 *pframe = precv_frame->rx_data;
-#endif
-#ifdef CONFIG_88EU_P2P
if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_TX_PROVISION_DIS_REQ)) {
if (pwdinfo->tx_prov_disc_info.benable) {
if (!memcmp(pwdinfo->tx_prov_disc_info.peerIFAddr, GetAddr2Ptr(pframe), ETH_ALEN)) {
@@ -621,8 +598,6 @@ unsigned int OnProbeRsp(struct adapter *padapter, struct recv_frame *precv_frame
}
}
}
-#endif
-
if (pmlmeext->sitesurvey_res.state == SCAN_PROCESS) {
report_survey_event(padapter, precv_frame);
return _SUCCESS;
@@ -723,7 +698,6 @@ _END_ONBEACON_:
unsigned int OnAuth(struct adapter *padapter, struct recv_frame *precv_frame)
{
-#ifdef CONFIG_88EU_AP_MODE
unsigned int auth_mode, ie_len;
u16 seq;
unsigned char *sa, *p;
@@ -867,9 +841,7 @@ unsigned int OnAuth(struct adapter *padapter, struct recv_frame *precv_frame)
/* Now, we are going to issue_auth... */
pstat->auth_seq = seq + 1;
-#ifdef CONFIG_88EU_AP_MODE
issue_auth(padapter, pstat, (unsigned short)(_STATS_SUCCESSFUL_));
-#endif
if (pstat->state & WIFI_FW_AUTH_SUCCESS)
pstat->auth_seq = 0;
@@ -886,11 +858,7 @@ auth_fail:
pstat->auth_seq = 2;
memcpy(pstat->hwaddr, sa, 6);
-#ifdef CONFIG_88EU_AP_MODE
issue_auth(padapter, pstat, (unsigned short)status);
-#endif
-
-#endif
return _FAIL;
}
@@ -971,7 +939,6 @@ authclnt_fail:
unsigned int OnAssocReq(struct adapter *padapter, struct recv_frame *precv_frame)
{
-#ifdef CONFIG_88EU_AP_MODE
u16 capab_info;
struct rtw_ieee802_11_elems elems;
struct sta_info *pstat;
@@ -990,12 +957,10 @@ unsigned int OnAssocReq(struct adapter *padapter, struct recv_frame *precv_frame
struct sta_priv *pstapriv = &padapter->stapriv;
u8 *pframe = precv_frame->rx_data;
uint pkt_len = precv_frame->len;
-#ifdef CONFIG_88EU_P2P
struct wifidirect_info *pwdinfo = &padapter->wdinfo;
u8 p2p_status_code = P2P_STATUS_SUCCESS;
u8 *p2pie;
u32 p2pielen = 0;
-#endif /* CONFIG_88EU_P2P */
if ((pmlmeinfo->state & 0x03) != WIFI_FW_AP_STATE)
return _FAIL;
@@ -1319,7 +1284,6 @@ unsigned int OnAssocReq(struct adapter *padapter, struct recv_frame *precv_frame
if (status != _STATS_SUCCESSFUL_)
goto OnAssocReqFail;
-#ifdef CONFIG_88EU_P2P
pstat->is_p2p_device = false;
if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO)) {
p2pie = rtw_get_p2p_ie(pframe + WLAN_HDR_A3_LEN + ie_offset, pkt_len - WLAN_HDR_A3_LEN - ie_offset, NULL, &p2pielen);
@@ -1334,7 +1298,6 @@ unsigned int OnAssocReq(struct adapter *padapter, struct recv_frame *precv_frame
}
}
pstat->p2p_status_code = p2p_status_code;
-#endif /* CONFIG_88EU_P2P */
/* TODO: identify_proprietary_vendor_ie(); */
/* Realtek proprietary IE */
@@ -1385,7 +1348,6 @@ unsigned int OnAssocReq(struct adapter *padapter, struct recv_frame *precv_frame
/* now the station is qualified to join our BSS... */
if (pstat && (pstat->state & WIFI_FW_ASSOC_SUCCESS) && (_STATS_SUCCESSFUL_ == status)) {
-#ifdef CONFIG_88EU_AP_MODE
/* 1 bss_cap_update & sta_info_update */
bss_cap_update_on_sta_join(padapter, pstat);
sta_info_update(padapter, pstat);
@@ -1402,30 +1364,23 @@ unsigned int OnAssocReq(struct adapter *padapter, struct recv_frame *precv_frame
/* 3-(1) report sta add event */
report_add_sta_event(padapter, pstat->hwaddr, pstat->aid);
-#endif
}
return _SUCCESS;
asoc_class2_error:
-#ifdef CONFIG_88EU_AP_MODE
issue_deauth(padapter, (void *)GetAddr2Ptr(pframe), status);
-#endif
return _FAIL;
OnAssocReqFail:
-#ifdef CONFIG_88EU_AP_MODE
pstat->aid = 0;
if (frame_type == WIFI_ASSOCREQ)
issue_asocrsp(padapter, status, pstat, WIFI_ASSOCRSP);
else
issue_asocrsp(padapter, status, pstat, WIFI_REASSOCRSP);
-#endif
-
-#endif /* CONFIG_88EU_AP_MODE */
return _FAIL;
}
@@ -1527,26 +1482,21 @@ unsigned int OnDeAuth(struct adapter *padapter, struct recv_frame *precv_frame)
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
u8 *pframe = precv_frame->rx_data;
-#ifdef CONFIG_88EU_P2P
struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-#endif /* CONFIG_88EU_P2P */
/* check A3 */
if (!(!memcmp(GetAddr3Ptr(pframe), get_my_bssid(&pmlmeinfo->network), ETH_ALEN)))
return _SUCCESS;
-#ifdef CONFIG_88EU_P2P
if (pwdinfo->rx_invitereq_info.scan_op_ch_only) {
_cancel_timer_ex(&pwdinfo->reset_ch_sitesurvey);
_set_timer(&pwdinfo->reset_ch_sitesurvey, 10);
}
-#endif /* CONFIG_88EU_P2P */
reason = le16_to_cpu(*(__le16 *)(pframe + WLAN_HDR_A3_LEN));
DBG_88E("%s Reason code(%d)\n", __func__, reason);
-#ifdef CONFIG_88EU_AP_MODE
if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
struct sta_info *psta;
struct sta_priv *pstapriv = &padapter->stapriv;
@@ -1570,9 +1520,7 @@ unsigned int OnDeAuth(struct adapter *padapter, struct recv_frame *precv_frame)
}
return _SUCCESS;
- } else
-#endif
- {
+ } else {
int ignore_received_deauth = 0;
/* Before sending the auth frame to start the STA/GC mode connection with AP/GO,
@@ -1607,26 +1555,21 @@ unsigned int OnDisassoc(struct adapter *padapter, struct recv_frame *precv_frame
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
u8 *pframe = precv_frame->rx_data;
-#ifdef CONFIG_88EU_P2P
struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-#endif /* CONFIG_88EU_P2P */
/* check A3 */
if (!(!memcmp(GetAddr3Ptr(pframe), get_my_bssid(&pmlmeinfo->network), ETH_ALEN)))
return _SUCCESS;
-#ifdef CONFIG_88EU_P2P
if (pwdinfo->rx_invitereq_info.scan_op_ch_only) {
_cancel_timer_ex(&pwdinfo->reset_ch_sitesurvey);
_set_timer(&pwdinfo->reset_ch_sitesurvey, 10);
}
-#endif /* CONFIG_88EU_P2P */
reason = le16_to_cpu(*(__le16 *)(pframe + WLAN_HDR_A3_LEN));
DBG_88E("%s Reason code(%d)\n", __func__, reason);
-#ifdef CONFIG_88EU_AP_MODE
if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
struct sta_info *psta;
struct sta_priv *pstapriv = &padapter->stapriv;
@@ -1650,9 +1593,7 @@ unsigned int OnDisassoc(struct adapter *padapter, struct recv_frame *precv_frame
}
return _SUCCESS;
- } else
-#endif
- {
+ } else {
DBG_88E_LEVEL(_drv_always_, "ap recv disassoc reason code(%d) sta:%pM\n",
reason, GetAddr3Ptr(pframe));
@@ -1794,8 +1735,6 @@ unsigned int OnAction_back(struct adapter *padapter, struct recv_frame *precv_fr
return _SUCCESS;
}
-#ifdef CONFIG_88EU_P2P
-
static int get_reg_classes_full_count(struct p2p_channels *channel_list)
{
int cnt = 0;
@@ -3472,7 +3411,6 @@ static int _issue_probereq_p2p(struct adapter *padapter, u8 *da, int wait_ack)
unsigned char *mac;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- u8 bc_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
struct wifidirect_info *pwdinfo = &padapter->wdinfo;
u8 wpsie[255] = { 0x00 }, p2pie[255] = { 0x00 };
u16 wpsielen = 0, p2pielen = 0;
@@ -3506,8 +3444,8 @@ static int _issue_probereq_p2p(struct adapter *padapter, u8 *da, int wait_ack)
memcpy(pwlanhdr->addr3, pwdinfo->p2p_peer_interface_addr, ETH_ALEN);
} else {
/* broadcast probe request frame */
- memcpy(pwlanhdr->addr1, bc_addr, ETH_ALEN);
- memcpy(pwlanhdr->addr3, bc_addr, ETH_ALEN);
+ eth_broadcast_addr(pwlanhdr->addr1);
+ eth_broadcast_addr(pwlanhdr->addr3);
}
}
memcpy(pwlanhdr->addr2, mac, ETH_ALEN);
@@ -3751,45 +3689,6 @@ inline void issue_probereq_p2p(struct adapter *adapter, u8 *da)
_issue_probereq_p2p(adapter, da, false);
}
-int issue_probereq_p2p_ex(struct adapter *adapter, u8 *da, int try_cnt, int wait_ms)
-{
- int ret;
- int i = 0;
- u32 start = jiffies;
-
- do {
- ret = _issue_probereq_p2p(adapter, da, wait_ms > 0);
-
- i++;
-
- if (adapter->bDriverStopped || adapter->bSurpriseRemoved)
- break;
-
- if (i < try_cnt && wait_ms > 0 && ret == _FAIL)
- msleep(wait_ms);
- } while ((i < try_cnt) && ((ret == _FAIL) || (wait_ms == 0)));
-
- if (ret != _FAIL) {
- ret = _SUCCESS;
- goto exit;
- }
-
- if (try_cnt && wait_ms) {
- if (da)
- DBG_88E(FUNC_ADPT_FMT" to %pM, ch:%u%s, %d/%d in %u ms\n",
- FUNC_ADPT_ARG(adapter), da, rtw_get_oper_ch(adapter),
- ret == _SUCCESS ? ", acked" : "", i, try_cnt, rtw_get_passing_time_ms(start));
- else
- DBG_88E(FUNC_ADPT_FMT", ch:%u%s, %d/%d in %u ms\n",
- FUNC_ADPT_ARG(adapter), rtw_get_oper_ch(adapter),
- ret == _SUCCESS ? ", acked" : "", i, try_cnt, rtw_get_passing_time_ms(start));
- }
-exit:
- return ret;
-}
-
-#endif /* CONFIG_88EU_P2P */
-
static s32 rtw_action_public_decache(struct recv_frame *recv_frame, s32 token)
{
struct adapter *adapter = recv_frame->adapter;
@@ -3827,7 +3726,6 @@ static unsigned int on_action_public_p2p(struct recv_frame *precv_frame)
u8 *pframe = precv_frame->rx_data;
u8 *frame_body;
u8 dialogToken = 0;
-#ifdef CONFIG_88EU_P2P
struct adapter *padapter = precv_frame->adapter;
uint len = precv_frame->len;
u8 *p2p_ie;
@@ -3835,7 +3733,6 @@ static unsigned int on_action_public_p2p(struct recv_frame *precv_frame)
struct wifidirect_info *pwdinfo = &padapter->wdinfo;
u8 result = P2P_STATUS_SUCCESS;
u8 empty_addr[ETH_ALEN] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
-#endif /* CONFIG_88EU_P2P */
frame_body = (unsigned char *)(pframe + sizeof(struct rtw_ieee80211_hdr_3addr));
@@ -3844,7 +3741,6 @@ static unsigned int on_action_public_p2p(struct recv_frame *precv_frame)
if (rtw_action_public_decache(precv_frame, dialogToken) == _FAIL)
return _FAIL;
-#ifdef CONFIG_88EU_P2P
_cancel_timer_ex(&pwdinfo->reset_ch_sitesurvey);
/* Do nothing if the driver doesn't enable the P2P function. */
if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE) || rtw_p2p_chk_state(pwdinfo, P2P_STATE_IDLE))
@@ -4107,7 +4003,6 @@ static unsigned int on_action_public_p2p(struct recv_frame *precv_frame)
_set_timer(&pwdinfo->restore_p2p_state_timer, P2P_PROVISION_TIMEOUT);
break;
}
-#endif /* CONFIG_88EU_P2P */
return _SUCCESS;
}
@@ -4184,7 +4079,6 @@ unsigned int OnAction_wmm(struct adapter *padapter, struct recv_frame *precv_fra
unsigned int OnAction_p2p(struct adapter *padapter, struct recv_frame *precv_frame)
{
-#ifdef CONFIG_88EU_P2P
u8 *frame_body;
u8 category, OUI_Subtype;
u8 *pframe = precv_frame->rx_data;
@@ -4222,7 +4116,6 @@ unsigned int OnAction_p2p(struct adapter *padapter, struct recv_frame *precv_fra
default:
break;
}
-#endif /* CONFIG_88EU_P2P */
return _SUCCESS;
}
@@ -4327,7 +4220,7 @@ void dump_mgntframe(struct adapter *padapter, struct xmit_frame *pmgntframe)
if (padapter->bSurpriseRemoved || padapter->bDriverStopped)
return;
- rtw_hal_mgnt_xmit(padapter, pmgntframe);
+ rtl8188eu_mgnt_xmit(padapter, pmgntframe);
}
s32 dump_mgntframe_and_wait(struct adapter *padapter, struct xmit_frame *pmgntframe, int timeout_ms)
@@ -4342,7 +4235,7 @@ s32 dump_mgntframe_and_wait(struct adapter *padapter, struct xmit_frame *pmgntfr
rtw_sctx_init(&sctx, timeout_ms);
pxmitbuf->sctx = &sctx;
- ret = rtw_hal_mgnt_xmit(padapter, pmgntframe);
+ ret = rtl8188eu_mgnt_xmit(padapter, pmgntframe);
if (ret == _SUCCESS)
ret = rtw_sctx_wait(&sctx);
@@ -4359,16 +4252,16 @@ s32 dump_mgntframe_and_wait_ack(struct adapter *padapter, struct xmit_frame *pmg
if (padapter->bSurpriseRemoved || padapter->bDriverStopped)
return -1;
- _enter_critical_mutex(&pxmitpriv->ack_tx_mutex, NULL);
+ mutex_lock(&pxmitpriv->ack_tx_mutex);
pxmitpriv->ack_tx = true;
pmgntframe->ack_report = 1;
- if (rtw_hal_mgnt_xmit(padapter, pmgntframe) == _SUCCESS) {
+ if (rtl8188eu_mgnt_xmit(padapter, pmgntframe) == _SUCCESS) {
ret = rtw_ack_tx_wait(pxmitpriv, timeout_ms);
}
pxmitpriv->ack_tx = false;
- _exit_critical_mutex(&pxmitpriv->ack_tx_mutex, NULL);
+ mutex_unlock(&pxmitpriv->ack_tx_mutex);
return ret;
}
@@ -4419,19 +4312,14 @@ void issue_beacon(struct adapter *padapter, int timeout_ms)
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
struct wlan_bssid_ex *cur_network = &pmlmeinfo->network;
- u8 bc_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
-#ifdef CONFIG_88EU_P2P
struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-#endif /* CONFIG_88EU_P2P */
pmgntframe = alloc_mgtxmitframe(pxmitpriv);
if (!pmgntframe) {
DBG_88E("%s, alloc mgnt frame fail\n", __func__);
return;
}
-#if defined(CONFIG_88EU_AP_MODE)
spin_lock_bh(&pmlmepriv->bcn_update_lock);
-#endif /* if defined (CONFIG_88EU_AP_MODE) */
/* update attribute */
pattrib = &pmgntframe->attrib;
@@ -4446,7 +4334,7 @@ void issue_beacon(struct adapter *padapter, int timeout_ms)
fctrl = &pwlanhdr->frame_ctl;
*(fctrl) = 0;
- memcpy(pwlanhdr->addr1, bc_addr, ETH_ALEN);
+ eth_broadcast_addr(pwlanhdr->addr1);
memcpy(pwlanhdr->addr2, myid(&padapter->eeprompriv), ETH_ALEN);
memcpy(pwlanhdr->addr3, get_my_bssid(cur_network), ETH_ALEN);
@@ -4458,7 +4346,6 @@ void issue_beacon(struct adapter *padapter, int timeout_ms)
pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
if ((pmlmeinfo->state & 0x03) == WIFI_FW_AP_STATE) {
-#ifdef CONFIG_88EU_P2P
/* for P2P : Primary Device Type & Device Name */
u32 wpsielen = 0, insert_len = 0;
u8 *wpsie = NULL;
@@ -4523,9 +4410,7 @@ void issue_beacon(struct adapter *padapter, int timeout_ms)
memcpy(pframe, premainder_ie, remainder_ielen);
pframe += remainder_ielen;
pattrib->pktlen += remainder_ielen;
- } else
-#endif /* CONFIG_88EU_P2P */
- {
+ } else {
int len_diff;
memcpy(pframe, cur_network->IEs, cur_network->IELength);
len_diff = update_hidden_ssid(
@@ -4551,7 +4436,6 @@ void issue_beacon(struct adapter *padapter, int timeout_ms)
_clr_fwstate_(pmlmepriv, WIFI_UNDER_WPS);
}
-#ifdef CONFIG_88EU_P2P
if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO)) {
u32 len;
len = build_beacon_p2p_ie(pwdinfo, pframe);
@@ -4559,7 +4443,6 @@ void issue_beacon(struct adapter *padapter, int timeout_ms)
pframe += len;
pattrib->pktlen += len;
}
-#endif /* CONFIG_88EU_P2P */
goto _issue_bcn;
}
@@ -4611,11 +4494,9 @@ void issue_beacon(struct adapter *padapter, int timeout_ms)
/* todo:HT for adhoc */
_issue_bcn:
-#if defined(CONFIG_88EU_AP_MODE)
pmlmepriv->update_bcn = false;
spin_unlock_bh(&pmlmepriv->bcn_update_lock);
-#endif /* if defined (CONFIG_88EU_AP_MODE) */
if ((pattrib->pktlen + TXDESC_SIZE) > 512) {
DBG_88E("beacon frame too large\n");
@@ -4640,18 +4521,14 @@ void issue_probersp(struct adapter *padapter, unsigned char *da, u8 is_valid_p2p
__le16 *fctrl;
unsigned char *mac, *bssid;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
-#if defined(CONFIG_88EU_AP_MODE)
u8 *pwps_ie;
uint wps_ielen;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-#endif /* if defined (CONFIG_88EU_AP_MODE) */
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
struct wlan_bssid_ex *cur_network = &pmlmeinfo->network;
unsigned int rate_len;
-#ifdef CONFIG_88EU_P2P
struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-#endif /* CONFIG_88EU_P2P */
pmgntframe = alloc_mgtxmitframe(pxmitpriv);
if (!pmgntframe) {
@@ -4688,7 +4565,6 @@ void issue_probersp(struct adapter *padapter, unsigned char *da, u8 is_valid_p2p
if (cur_network->IELength > MAX_IE_SZ)
return;
-#if defined(CONFIG_88EU_AP_MODE)
if ((pmlmeinfo->state & 0x03) == WIFI_FW_AP_STATE) {
pwps_ie = rtw_get_wps_ie(cur_network->IEs + _FIXED_IE_LENGTH_, cur_network->IELength - _FIXED_IE_LENGTH_, NULL, &wps_ielen);
@@ -4724,9 +4600,7 @@ void issue_probersp(struct adapter *padapter, unsigned char *da, u8 is_valid_p2p
pframe += cur_network->IELength;
pattrib->pktlen += cur_network->IELength;
}
- } else
-#endif
- {
+ } else {
/* timestamp will be inserted by hardware */
pframe += 8;
pattrib->pktlen += 8;
@@ -4775,7 +4649,6 @@ void issue_probersp(struct adapter *padapter, unsigned char *da, u8 is_valid_p2p
/* todo:HT for adhoc */
}
-#ifdef CONFIG_88EU_P2P
if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO) && is_valid_p2p_probereq) {
u32 len;
len = build_probe_resp_p2p_ie(pwdinfo, pframe);
@@ -4783,7 +4656,6 @@ void issue_probersp(struct adapter *padapter, unsigned char *da, u8 is_valid_p2p
pframe += len;
pattrib->pktlen += len;
}
-#endif /* CONFIG_88EU_P2P */
pattrib->last_txcmdsz = pattrib->pktlen;
@@ -4804,7 +4676,6 @@ static int _issue_probereq(struct adapter *padapter, struct ndis_802_11_ssid *ps
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
int bssrate_len = 0;
- u8 bc_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
pmgntframe = alloc_mgtxmitframe(pxmitpriv);
if (!pmgntframe)
@@ -4830,8 +4701,8 @@ static int _issue_probereq(struct adapter *padapter, struct ndis_802_11_ssid *ps
memcpy(pwlanhdr->addr3, da, ETH_ALEN);
} else {
/* broadcast probe request frame */
- memcpy(pwlanhdr->addr1, bc_addr, ETH_ALEN);
- memcpy(pwlanhdr->addr3, bc_addr, ETH_ALEN);
+ eth_broadcast_addr(pwlanhdr->addr1);
+ eth_broadcast_addr(pwlanhdr->addr3);
}
memcpy(pwlanhdr->addr2, mac, ETH_ALEN);
@@ -4931,9 +4802,7 @@ void issue_auth(struct adapter *padapter, struct sta_info *psta, unsigned short
__le16 *fctrl;
unsigned int val32;
u16 val16;
-#ifdef CONFIG_88EU_AP_MODE
__le16 le_val16;
-#endif
int use_shared_key = 0;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
@@ -4963,8 +4832,6 @@ void issue_auth(struct adapter *padapter, struct sta_info *psta, unsigned short
pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
if (psta) {/* for AP mode */
-#ifdef CONFIG_88EU_AP_MODE
-
memcpy(pwlanhdr->addr1, psta->hwaddr, ETH_ALEN);
memcpy(pwlanhdr->addr2, myid(&padapter->eeprompriv), ETH_ALEN);
memcpy(pwlanhdr->addr3, myid(&padapter->eeprompriv), ETH_ALEN);
@@ -4997,7 +4864,6 @@ void issue_auth(struct adapter *padapter, struct sta_info *psta, unsigned short
/* added challenging text... */
if ((psta->auth_seq == 2) && (psta->state & WIFI_FW_AUTH_STATE) && (use_shared_key == 1))
pframe = rtw_set_ie(pframe, _CHLGETXT_IE_, 128, psta->chg_txt, &pattrib->pktlen);
-#endif
} else {
__le32 le_tmp32;
__le16 le_tmp16;
@@ -5049,14 +4915,13 @@ void issue_auth(struct adapter *padapter, struct sta_info *psta, unsigned short
pattrib->last_txcmdsz = pattrib->pktlen;
- rtw_wep_encrypt(padapter, (u8 *)pmgntframe);
+ rtw_wep_encrypt(padapter, pmgntframe);
DBG_88E("%s\n", __func__);
dump_mgntframe(padapter, pmgntframe);
}
void issue_asocrsp(struct adapter *padapter, unsigned short status, struct sta_info *pstat, int pkt_type)
{
-#ifdef CONFIG_88EU_AP_MODE
struct xmit_frame *pmgntframe;
struct rtw_ieee80211_hdr *pwlanhdr;
struct pkt_attrib *pattrib;
@@ -5070,9 +4935,7 @@ void issue_asocrsp(struct adapter *padapter, unsigned short status, struct sta_i
struct wlan_bssid_ex *pnetwork = &pmlmeinfo->network;
u8 *ie = pnetwork->IEs;
__le16 lestatus, leval;
-#ifdef CONFIG_88EU_P2P
struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-#endif /* CONFIG_88EU_P2P */
DBG_88E("%s\n", __func__);
@@ -5175,7 +5038,6 @@ void issue_asocrsp(struct adapter *padapter, unsigned short status, struct sta_i
pattrib->pktlen += pmlmepriv->wps_assoc_resp_ie_len;
}
-#ifdef CONFIG_88EU_P2P
if (rtw_p2p_chk_role(pwdinfo, P2P_ROLE_GO) && (pstat->is_p2p_device)) {
u32 len;
@@ -5184,10 +5046,8 @@ void issue_asocrsp(struct adapter *padapter, unsigned short status, struct sta_i
pframe += len;
pattrib->pktlen += len;
}
-#endif /* CONFIG_88EU_P2P */
pattrib->last_txcmdsz = pattrib->pktlen;
dump_mgntframe(padapter, pmgntframe);
-#endif
}
void issue_assocreq(struct adapter *padapter)
@@ -5208,11 +5068,9 @@ void issue_assocreq(struct adapter *padapter)
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
int bssrate_len = 0, sta_bssrate_len = 0;
-#ifdef CONFIG_88EU_P2P
struct wifidirect_info *pwdinfo = &padapter->wdinfo;
u8 p2pie[255] = { 0x00 };
u16 p2pielen = 0;
-#endif /* CONFIG_88EU_P2P */
pmgntframe = alloc_mgtxmitframe(pxmitpriv);
if (!pmgntframe)
@@ -5327,7 +5185,7 @@ void issue_assocreq(struct adapter *padapter)
/* todo: disable SM power save mode */
pmlmeinfo->HT_caps.u.HT_cap_element.HT_caps_info |= cpu_to_le16(0x000c);
- rtw_hal_get_hwreg(padapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type));
+ GetHwReg8188EU(padapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type));
switch (rf_type) {
case RF_1T1R:
if (pregpriv->rx_stbc)
@@ -5378,8 +5236,6 @@ void issue_assocreq(struct adapter *padapter)
if (pmlmeinfo->assoc_AP_vendor == HT_IOT_PEER_REALTEK)
pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, 6, REALTEK_96B_IE, &pattrib->pktlen);
-#ifdef CONFIG_88EU_P2P
-
if (!rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE) && !rtw_p2p_chk_state(pwdinfo, P2P_STATE_IDLE)) {
/* Should add the P2P IE in the association request frame. */
/* P2P OUI */
@@ -5507,8 +5363,6 @@ void issue_assocreq(struct adapter *padapter)
pframe = rtw_set_ie(pframe, _VENDOR_SPECIFIC_IE_, p2pielen, (unsigned char *)p2pie, &pattrib->pktlen);
}
-#endif /* CONFIG_88EU_P2P */
-
pattrib->last_txcmdsz = pattrib->pktlen;
dump_mgntframe(padapter, pmgntframe);
@@ -5678,9 +5532,6 @@ static int _issue_qos_nulldata(struct adapter *padapter, unsigned char *da, u16
else if ((pmlmeinfo->state & 0x03) == WIFI_FW_STATION_STATE)
SetToDs(fctrl);
- if (pattrib->mdata)
- SetMData(fctrl);
-
qc = (unsigned short *)(pframe + pattrib->hdrlen - 2);
SetPriority(qc, tid);
@@ -5770,16 +5621,12 @@ static int _issue_deauth(struct adapter *padapter, unsigned char *da, unsigned s
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
int ret = _FAIL;
__le16 le_tmp;
-#ifdef CONFIG_88EU_P2P
struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-#endif /* CONFIG_88EU_P2P */
-#ifdef CONFIG_88EU_P2P
if (!(rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE)) && (pwdinfo->rx_invitereq_info.scan_op_ch_only)) {
_cancel_timer_ex(&pwdinfo->reset_ch_sitesurvey);
_set_timer(&pwdinfo->reset_ch_sitesurvey, 10);
}
-#endif /* CONFIG_88EU_P2P */
pmgntframe = alloc_mgtxmitframe(pxmitpriv);
if (!pmgntframe)
@@ -5869,65 +5716,6 @@ exit:
return ret;
}
-void issue_action_spct_ch_switch(struct adapter *padapter, u8 *ra, u8 new_ch, u8 ch_offset)
-{
- struct xmit_frame *pmgntframe;
- struct pkt_attrib *pattrib;
- unsigned char *pframe;
- struct rtw_ieee80211_hdr *pwlanhdr;
- __le16 *fctrl;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
-
- DBG_88E(FUNC_NDEV_FMT" ra =%pM, ch:%u, offset:%u\n",
- FUNC_NDEV_ARG(padapter->pnetdev), ra, new_ch, ch_offset);
-
- pmgntframe = alloc_mgtxmitframe(pxmitpriv);
- if (!pmgntframe)
- return;
-
- /* update attribute */
- pattrib = &pmgntframe->attrib;
- update_mgntframe_attrib(padapter, pattrib);
-
- memset(pmgntframe->buf_addr, 0, WLANHDR_OFFSET + TXDESC_OFFSET);
-
- pframe = (u8 *)(pmgntframe->buf_addr) + TXDESC_OFFSET;
- pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
-
- fctrl = &pwlanhdr->frame_ctl;
- *(fctrl) = 0;
-
- memcpy(pwlanhdr->addr1, ra, ETH_ALEN); /* RA */
- memcpy(pwlanhdr->addr2, myid(&padapter->eeprompriv), ETH_ALEN); /* TA */
- memcpy(pwlanhdr->addr3, ra, ETH_ALEN); /* DA = RA */
-
- SetSeqNum(pwlanhdr, pmlmeext->mgnt_seq);
- pmlmeext->mgnt_seq++;
- SetFrameSubType(pframe, WIFI_ACTION);
-
- pframe += sizeof(struct rtw_ieee80211_hdr_3addr);
- pattrib->pktlen = sizeof(struct rtw_ieee80211_hdr_3addr);
-
- /* category, action */
- {
- u8 category, action;
- category = RTW_WLAN_CATEGORY_SPECTRUM_MGMT;
- action = RTW_WLAN_ACTION_SPCT_CHL_SWITCH;
-
- pframe = rtw_set_fixed_ie(pframe, 1, &category, &pattrib->pktlen);
- pframe = rtw_set_fixed_ie(pframe, 1, &action, &pattrib->pktlen);
- }
-
- pframe = rtw_set_ie_ch_switch(pframe, &pattrib->pktlen, 0, new_ch, 0);
- pframe = rtw_set_ie_secondary_ch_offset(pframe, &pattrib->pktlen,
- hal_ch_offset_to_secondary_ch_offset(ch_offset));
-
- pattrib->last_txcmdsz = pattrib->pktlen;
-
- dump_mgntframe(padapter, pmgntframe);
-}
-
void issue_action_BA(struct adapter *padapter, unsigned char *raddr, unsigned char action, unsigned short status)
{
u8 category = RTW_WLAN_CATEGORY_BACK;
@@ -6016,7 +5804,7 @@ void issue_action_BA(struct adapter *padapter, unsigned char *raddr, unsigned ch
pframe = rtw_set_fixed_ie(pframe, 1, &pmlmeinfo->ADDBA_req.dialog_token, &pattrib->pktlen);
pframe = rtw_set_fixed_ie(pframe, 2, (unsigned char *)&status, &pattrib->pktlen);
BA_para_set = le16_to_cpu(pmlmeinfo->ADDBA_req.BA_para_set) & 0x3f;
- rtw_hal_get_def_var(padapter, HW_VAR_MAX_RX_AMPDU_FACTOR, &max_rx_ampdu_factor);
+ GetHalDefVar8188EUsb(padapter, HW_VAR_MAX_RX_AMPDU_FACTOR, &max_rx_ampdu_factor);
switch (max_rx_ampdu_factor) {
case MAX_AMPDU_FACTOR_64K:
BA_para_set |= 0x1000; /* 64 buffer size */
@@ -6240,13 +6028,13 @@ unsigned int send_beacon(struct adapter *padapter)
u32 start = jiffies;
- rtw_hal_set_hwreg(padapter, HW_VAR_BCN_VALID, NULL);
+ SetHwReg8188EU(padapter, HW_VAR_BCN_VALID, NULL);
do {
issue_beacon(padapter, 100);
issue++;
do {
yield();
- rtw_hal_get_hwreg(padapter, HW_VAR_BCN_VALID, (u8 *)(&bxmitok));
+ GetHwReg8188EU(padapter, HW_VAR_BCN_VALID, (u8 *)(&bxmitok));
poll++;
} while ((poll % 10) != 0 && !bxmitok && !padapter->bSurpriseRemoved && !padapter->bDriverStopped);
} while (!bxmitok && issue < 100 && !padapter->bSurpriseRemoved && !padapter->bDriverStopped);
@@ -6278,8 +6066,6 @@ void site_survey(struct adapter *padapter)
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
u32 initialgain = 0;
-
-#ifdef CONFIG_88EU_P2P
struct wifidirect_info *pwdinfo = &padapter->wdinfo;
if ((pwdinfo->rx_invitereq_info.scan_op_ch_only) || (pwdinfo->p2p_info.scan_op_ch_only)) {
@@ -6299,9 +6085,7 @@ void site_survey(struct adapter *padapter)
ScanType = pmlmeext->channel_set[ch_set_idx].ScanType;
else
ScanType = SCAN_ACTIVE;
- } else
-#endif /* CONFIG_88EU_P2P */
- {
+ } else {
struct rtw_ieee80211_channel *ch;
if (pmlmeext->sitesurvey_res.channel_idx < pmlmeext->sitesurvey_res.ch_num) {
ch = &pmlmeext->sitesurvey_res.ch[pmlmeext->sitesurvey_res.channel_idx];
@@ -6311,25 +6095,18 @@ void site_survey(struct adapter *padapter)
}
if (survey_channel != 0) {
- /* PAUSE 4-AC Queue when site_survey */
- /* rtw_hal_get_hwreg(padapter, HW_VAR_TXPAUSE, (u8 *)(&val8)); */
- /* val8 |= 0x0f; */
- /* rtw_hal_set_hwreg(padapter, HW_VAR_TXPAUSE, (u8 *)(&val8)); */
if (pmlmeext->sitesurvey_res.channel_idx == 0)
set_channel_bwmode(padapter, survey_channel, HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
else
SelectChannel(padapter, survey_channel);
if (ScanType == SCAN_ACTIVE) { /* obey the channel plan setting... */
- #ifdef CONFIG_88EU_P2P
if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_SCAN) ||
rtw_p2p_chk_state(pwdinfo, P2P_STATE_FIND_PHASE_SEARCH)) {
issue_probereq_p2p(padapter, NULL);
issue_probereq_p2p(padapter, NULL);
issue_probereq_p2p(padapter, NULL);
- } else
- #endif /* CONFIG_88EU_P2P */
- {
+ } else {
int i;
for (i = 0; i < RTW_SSID_SCAN_AMOUNT; i++) {
if (pmlmeext->sitesurvey_res.ssid[i].SsidLength) {
@@ -6352,8 +6129,6 @@ void site_survey(struct adapter *padapter)
set_survey_timer(pmlmeext, pmlmeext->chan_scan_time);
} else {
/* channel number is 0 or this channel is not valid. */
-
-#ifdef CONFIG_88EU_P2P
if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_SCAN) || rtw_p2p_chk_state(pwdinfo, P2P_STATE_FIND_PHASE_SEARCH)) {
if ((pwdinfo->rx_invitereq_info.scan_op_ch_only) || (pwdinfo->p2p_info.scan_op_ch_only)) {
/* Set the find_phase_state_exchange_cnt to P2P_FINDPHASE_EX_CNT. */
@@ -6369,51 +6144,41 @@ void site_survey(struct adapter *padapter)
pmlmeext->sitesurvey_res.state = SCAN_DISABLE;
initialgain = 0xff; /* restore RX GAIN */
- rtw_hal_set_hwreg(padapter, HW_VAR_INITIAL_GAIN, (u8 *)(&initialgain));
+ SetHwReg8188EU(padapter, HW_VAR_INITIAL_GAIN, (u8 *)(&initialgain));
/* turn on dynamic functions */
Restore_DM_Func_Flag(padapter);
/* Switch_DM_Func(padapter, DYNAMIC_FUNC_DIG|DYNAMIC_FUNC_HP|DYNAMIC_FUNC_SS, true); */
_set_timer(&pwdinfo->find_phase_timer, (u32)((u32)(pwdinfo->listen_dwell) * 100));
- } else
-#endif /* CONFIG_88EU_P2P */
- {
+ } else {
/* 20100721:Interrupt scan operation here. */
/* For SW antenna diversity before link, it needs to switch to another antenna and scan again. */
/* It compares the scan result and select beter one to do connection. */
- if (rtw_hal_antdiv_before_linked(padapter)) {
+ if (AntDivBeforeLink8188E(padapter)) {
pmlmeext->sitesurvey_res.bss_cnt = 0;
pmlmeext->sitesurvey_res.channel_idx = -1;
pmlmeext->chan_scan_time = SURVEY_TO / 2;
set_survey_timer(pmlmeext, pmlmeext->chan_scan_time);
return;
}
-#ifdef CONFIG_88EU_P2P
if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_SCAN) || rtw_p2p_chk_state(pwdinfo, P2P_STATE_FIND_PHASE_SEARCH))
rtw_p2p_set_state(pwdinfo, rtw_p2p_pre_state(pwdinfo));
rtw_p2p_findphase_ex_set(pwdinfo, P2P_FINDPHASE_EX_NONE);
-#endif /* CONFIG_88EU_P2P */
pmlmeext->sitesurvey_res.state = SCAN_COMPLETE;
/* switch back to the original channel */
-#ifdef CONFIG_88EU_P2P
if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_LISTEN))
set_channel_bwmode(padapter, pwdinfo->listen_channel, HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
else
set_channel_bwmode(padapter, pmlmeext->cur_channel, pmlmeext->cur_ch_offset, pmlmeext->cur_bwmode);
-#endif /* CONFIG_88EU_P2P */
-
- /* flush 4-AC Queue after site_survey */
- /* val8 = 0; */
- /* rtw_hal_set_hwreg(padapter, HW_VAR_TXPAUSE, (u8 *)(&val8)); */
/* config MSR */
Set_MSR(padapter, (pmlmeinfo->state & 0x3));
initialgain = 0xff; /* restore RX GAIN */
- rtw_hal_set_hwreg(padapter, HW_VAR_INITIAL_GAIN, (u8 *)(&initialgain));
+ SetHwReg8188EU(padapter, HW_VAR_INITIAL_GAIN, (u8 *)(&initialgain));
/* turn on dynamic functions */
Restore_DM_Func_Flag(padapter);
/* Switch_DM_Func(padapter, DYNAMIC_ALL_FUNC_ENABLE, true); */
@@ -6422,7 +6187,7 @@ void site_survey(struct adapter *padapter)
issue_nulldata(padapter, NULL, 0, 3, 500);
val8 = 0; /* survey done */
- rtw_hal_set_hwreg(padapter, HW_VAR_MLME_SITESURVEY, (u8 *)(&val8));
+ SetHwReg8188EU(padapter, HW_VAR_MLME_SITESURVEY, (u8 *)(&val8));
report_surveydone_event(padapter);
@@ -6487,7 +6252,7 @@ u8 collect_bss_info(struct adapter *padapter, struct recv_frame *precv_frame, st
bssid->Rssi = precv_frame->attrib.phy_info.recvpower; /* in dBM.raw data */
bssid->PhyInfo.SignalQuality = precv_frame->attrib.phy_info.SignalQuality;/* in percentage */
bssid->PhyInfo.SignalStrength = precv_frame->attrib.phy_info.SignalStrength;/* in percentage */
- rtw_hal_get_def_var(padapter, HAL_DEF_CURRENT_ANTENNA, &bssid->PhyInfo.Optimum_antenna);
+ GetHalDefVar8188EUsb(padapter, HAL_DEF_CURRENT_ANTENNA, &bssid->PhyInfo.Optimum_antenna);
/* checking SSID */
p = rtw_get_ie(bssid->IEs + ie_offset, _SSID_IE_, &len, bssid->IELength - ie_offset);
@@ -6615,7 +6380,7 @@ void start_create_ibss(struct adapter *padapter)
update_capinfo(padapter, caps);
if (caps & cap_IBSS) {/* adhoc master */
val8 = 0xcf;
- rtw_hal_set_hwreg(padapter, HW_VAR_SEC_CFG, (u8 *)(&val8));
+ SetHwReg8188EU(padapter, HW_VAR_SEC_CFG, (u8 *)(&val8));
/* switch channel */
/* SelectChannel(padapter, pmlmeext->cur_channel, HAL_PRIME_CHNL_OFFSET_DONT_CARE); */
@@ -6632,9 +6397,9 @@ void start_create_ibss(struct adapter *padapter)
report_join_res(padapter, -1);
pmlmeinfo->state = WIFI_FW_NULL_STATE;
} else {
- rtw_hal_set_hwreg(padapter, HW_VAR_BSSID, padapter->registrypriv.dev_network.MacAddress);
+ SetHwReg8188EU(padapter, HW_VAR_BSSID, padapter->registrypriv.dev_network.MacAddress);
join_type = 0;
- rtw_hal_set_hwreg(padapter, HW_VAR_MLME_JOIN, (u8 *)(&join_type));
+ SetHwReg8188EU(padapter, HW_VAR_MLME_JOIN, (u8 *)(&join_type));
report_join_res(padapter, 1);
pmlmeinfo->state |= WIFI_FW_ASSOC_SUCCESS;
@@ -6671,7 +6436,7 @@ void start_clnt_join(struct adapter *padapter)
val8 = (pmlmeinfo->auth_algo == dot11AuthAlgrthm_8021X) ? 0xcc : 0xcf;
- rtw_hal_set_hwreg(padapter, HW_VAR_SEC_CFG, (u8 *)(&val8));
+ SetHwReg8188EU(padapter, HW_VAR_SEC_CFG, (u8 *)(&val8));
/* switch channel */
set_channel_bwmode(padapter, pmlmeext->cur_channel, pmlmeext->cur_ch_offset, pmlmeext->cur_bwmode);
@@ -6688,7 +6453,7 @@ void start_clnt_join(struct adapter *padapter)
Set_MSR(padapter, WIFI_FW_ADHOC_STATE);
val8 = 0xcf;
- rtw_hal_set_hwreg(padapter, HW_VAR_SEC_CFG, (u8 *)(&val8));
+ SetHwReg8188EU(padapter, HW_VAR_SEC_CFG, (u8 *)(&val8));
/* switch channel */
set_channel_bwmode(padapter, pmlmeext->cur_channel, pmlmeext->cur_ch_offset, pmlmeext->cur_bwmode);
@@ -7225,8 +6990,8 @@ void mlmeext_joinbss_event_callback(struct adapter *padapter, int join_res)
if (join_res < 0) {
join_type = 1;
- rtw_hal_set_hwreg(padapter, HW_VAR_MLME_JOIN, (u8 *)(&join_type));
- rtw_hal_set_hwreg(padapter, HW_VAR_BSSID, null_addr);
+ SetHwReg8188EU(padapter, HW_VAR_MLME_JOIN, (u8 *)(&join_type));
+ SetHwReg8188EU(padapter, HW_VAR_BSSID, null_addr);
/* restore to initial setting. */
update_tx_basic_rate(padapter, padapter->registrypriv.wireless_mode);
@@ -7250,10 +7015,10 @@ void mlmeext_joinbss_event_callback(struct adapter *padapter, int join_res)
/* update IOT-releated issue */
update_IOT_info(padapter);
- rtw_hal_set_hwreg(padapter, HW_VAR_BASIC_RATE, cur_network->SupportedRates);
+ SetHwReg8188EU(padapter, HW_VAR_BASIC_RATE, cur_network->SupportedRates);
/* BCN interval */
- rtw_hal_set_hwreg(padapter, HW_VAR_BEACON_INTERVAL, (u8 *)(&pmlmeinfo->bcn_interval));
+ SetHwReg8188EU(padapter, HW_VAR_BEACON_INTERVAL, (u8 *)(&pmlmeinfo->bcn_interval));
/* udpate capability */
update_capinfo(padapter, pmlmeinfo->capability);
@@ -7274,13 +7039,13 @@ void mlmeext_joinbss_event_callback(struct adapter *padapter, int join_res)
/* set per sta rate after updating HT cap. */
set_sta_rate(padapter, psta);
- rtw_hal_set_hwreg(padapter, HW_VAR_TX_RPT_MAX_MACID, (u8 *)&psta->mac_id);
+ SetHwReg8188EU(padapter, HW_VAR_TX_RPT_MAX_MACID, (u8 *)&psta->mac_id);
media_status = (psta->mac_id << 8) | 1; /* MACID|OPMODE: 1 means connect */
- rtw_hal_set_hwreg(padapter, HW_VAR_H2C_MEDIA_STATUS_RPT, (u8 *)&media_status);
+ SetHwReg8188EU(padapter, HW_VAR_H2C_MEDIA_STATUS_RPT, (u8 *)&media_status);
}
join_type = 2;
- rtw_hal_set_hwreg(padapter, HW_VAR_MLME_JOIN, (u8 *)(&join_type));
+ SetHwReg8188EU(padapter, HW_VAR_MLME_JOIN, (u8 *)(&join_type));
if ((pmlmeinfo->state & 0x03) == WIFI_FW_STATION_STATE) {
/* correcting TSF */
@@ -7318,7 +7083,7 @@ void mlmeext_sta_add_event_callback(struct adapter *padapter, struct sta_info *p
}
join_type = 2;
- rtw_hal_set_hwreg(padapter, HW_VAR_MLME_JOIN, (u8 *)(&join_type));
+ SetHwReg8188EU(padapter, HW_VAR_MLME_JOIN, (u8 *)(&join_type));
}
pmlmeinfo->FW_sta_info[psta->mac_id].psta = psta;
@@ -7336,8 +7101,8 @@ void mlmeext_sta_del_event_callback(struct adapter *padapter)
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
if (is_client_associated_to_ap(padapter) || is_IBSS_empty(padapter)) {
- rtw_hal_set_hwreg(padapter, HW_VAR_MLME_DISCONNECT, NULL);
- rtw_hal_set_hwreg(padapter, HW_VAR_BSSID, null_addr);
+ SetHwReg8188EU(padapter, HW_VAR_MLME_DISCONNECT, NULL);
+ SetHwReg8188EU(padapter, HW_VAR_BSSID, null_addr);
/* restore to initial setting. */
update_tx_basic_rate(padapter, padapter->registrypriv.wireless_mode);
@@ -7378,9 +7143,9 @@ void _linked_rx_signal_strehgth_display(struct adapter *padapter)
else if ((pmlmeinfo->state & 0x03) == _HW_STATE_AP_)
mac_id = 2;
- rtw_hal_get_def_var(padapter, HW_DEF_RA_INFO_DUMP, &mac_id);
+ GetHalDefVar8188EUsb(padapter, HW_DEF_RA_INFO_DUMP, &mac_id);
- rtw_hal_get_def_var(padapter, HAL_DEF_UNDERCORATEDSMOOTHEDPWDB, &UndecoratedSmoothedPWDB);
+ GetHalDefVar8188EUsb(padapter, HAL_DEF_UNDERCORATEDSMOOTHEDPWDB, &UndecoratedSmoothedPWDB);
DBG_88E("UndecoratedSmoothedPWDB:%d\n", UndecoratedSmoothedPWDB);
}
@@ -7412,7 +7177,7 @@ void linked_status_chk(struct adapter *padapter)
if (padapter->bRxRSSIDisplay)
_linked_rx_signal_strehgth_display(padapter);
- rtw_hal_sreset_linked_status_check(padapter);
+ rtl8188e_sreset_linked_status_check(padapter);
if (is_client_associated_to_ap(padapter)) {
/* linked infrastructure client mode */
@@ -7424,9 +7189,7 @@ void linked_status_chk(struct adapter *padapter)
psta = rtw_get_stainfo(pstapriv, pmlmeinfo->network.MacAddress);
if (psta) {
bool is_p2p_enable = false;
- #ifdef CONFIG_88EU_P2P
is_p2p_enable = !rtw_p2p_chk_state(&padapter->wdinfo, P2P_STATE_NONE);
- #endif
if (!chk_ap_is_alive(padapter, psta))
rx_chk = _FAIL;
@@ -7524,9 +7287,7 @@ void survey_timer_hdl(struct adapter *padapter)
struct sitesurvey_parm *psurveyPara;
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
-#ifdef CONFIG_88EU_P2P
struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-#endif
/* issue rtw_sitesurvey_cmd */
if (pmlmeext->sitesurvey_res.state > SCAN_START) {
@@ -7534,7 +7295,6 @@ void survey_timer_hdl(struct adapter *padapter)
pmlmeext->sitesurvey_res.channel_idx++;
if (pmlmeext->scan_abort) {
- #ifdef CONFIG_88EU_P2P
if (!rtw_p2p_chk_state(&padapter->wdinfo, P2P_STATE_NONE)) {
rtw_p2p_findphase_ex_set(pwdinfo, P2P_FINDPHASE_EX_MAX);
pmlmeext->sitesurvey_res.channel_idx = 3;
@@ -7542,9 +7302,7 @@ void survey_timer_hdl(struct adapter *padapter)
, pmlmeext->sitesurvey_res.channel_idx
, pwdinfo->find_phase_state_exchange_cnt
);
- } else
- #endif
- {
+ } else {
pmlmeext->sitesurvey_res.channel_idx = pmlmeext->sitesurvey_res.ch_num;
DBG_88E("%s idx:%d\n", __func__
, pmlmeext->sitesurvey_res.channel_idx
@@ -7647,8 +7405,7 @@ u8 setopmode_hdl(struct adapter *padapter, u8 *pbuf)
type = _HW_STATE_NOLINK_;
}
- rtw_hal_set_hwreg(padapter, HW_VAR_SET_OPMODE, (u8 *)(&type));
- /* Set_NETYPE0_MSR(padapter, type); */
+ SetHwReg8188EU(padapter, HW_VAR_SET_OPMODE, (u8 *)(&type));
return H2C_SUCCESS;
}
@@ -7662,13 +7419,10 @@ u8 createbss_hdl(struct adapter *padapter, u8 *pbuf)
/* u32 initialgain; */
if (pparm->network.InfrastructureMode == Ndis802_11APMode) {
-#ifdef CONFIG_88EU_AP_MODE
-
if (pmlmeinfo->state == WIFI_FW_AP_STATE) {
/* todo: */
return H2C_SUCCESS;
}
-#endif
}
/* below is for ad-hoc master */
@@ -7689,10 +7443,6 @@ u8 createbss_hdl(struct adapter *padapter, u8 *pbuf)
Save_DM_Func_Flag(padapter);
Switch_DM_Func(padapter, DYNAMIC_FUNC_DISABLE, false);
- /* config the initial gain under linking, need to write the BB registers */
- /* initialgain = 0x1E; */
- /* rtw_hal_set_hwreg(padapter, HW_VAR_INITIAL_GAIN, (u8 *)(&initialgain)); */
-
/* cancel link timer */
_cancel_timer_ex(&pmlmeext->link_timer);
@@ -7739,7 +7489,7 @@ u8 join_cmd_hdl(struct adapter *padapter, u8 *pbuf)
/* set MSR to nolink -> infra. mode */
Set_MSR(padapter, _HW_STATE_STATION_);
- rtw_hal_set_hwreg(padapter, HW_VAR_MLME_DISCONNECT, NULL);
+ SetHwReg8188EU(padapter, HW_VAR_MLME_DISCONNECT, NULL);
}
rtw_antenna_select_cmd(padapter, pparm->network.PhyInfo.Optimum_antenna, false);
@@ -7814,9 +7564,9 @@ u8 join_cmd_hdl(struct adapter *padapter, u8 *pbuf)
/* config the initial gain under linking, need to write the BB registers */
- rtw_hal_set_hwreg(padapter, HW_VAR_BSSID, pmlmeinfo->network.MacAddress);
+ SetHwReg8188EU(padapter, HW_VAR_BSSID, pmlmeinfo->network.MacAddress);
join_type = 0;
- rtw_hal_set_hwreg(padapter, HW_VAR_MLME_JOIN, (u8 *)(&join_type));
+ SetHwReg8188EU(padapter, HW_VAR_MLME_JOIN, (u8 *)(&join_type));
/* cancel link timer */
_cancel_timer_ex(&pmlmeext->link_timer);
@@ -7837,8 +7587,8 @@ u8 disconnect_hdl(struct adapter *padapter, unsigned char *pbuf)
if (is_client_associated_to_ap(padapter))
issue_deauth_ex(padapter, pnetwork->MacAddress, WLAN_REASON_DEAUTH_LEAVING, param->deauth_timeout_ms / 100, 100);
- rtw_hal_set_hwreg(padapter, HW_VAR_MLME_DISCONNECT, NULL);
- rtw_hal_set_hwreg(padapter, HW_VAR_BSSID, null_addr);
+ SetHwReg8188EU(padapter, HW_VAR_MLME_DISCONNECT, NULL);
+ SetHwReg8188EU(padapter, HW_VAR_BSSID, null_addr);
/* restore to initial setting. */
update_tx_basic_rate(padapter, padapter->registrypriv.wireless_mode);
@@ -7846,7 +7596,7 @@ u8 disconnect_hdl(struct adapter *padapter, unsigned char *pbuf)
if (((pmlmeinfo->state & 0x03) == WIFI_FW_ADHOC_STATE) || ((pmlmeinfo->state & 0x03) == WIFI_FW_AP_STATE)) {
/* Stop BCN */
val8 = 0;
- rtw_hal_set_hwreg(padapter, HW_VAR_BCN_FUNC, (u8 *)(&val8));
+ SetHwReg8188EU(padapter, HW_VAR_BCN_FUNC, (u8 *)(&val8));
}
/* set MSR to no link state -> infra. mode */
@@ -7919,14 +7669,10 @@ u8 sitesurvey_cmd_hdl(struct adapter *padapter, u8 *pbuf)
u8 val8;
u32 initialgain;
u32 i;
-
-#ifdef CONFIG_88EU_P2P
struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-#endif
if (pmlmeext->sitesurvey_res.state == SCAN_DISABLE) {
/* for first time sitesurvey_cmd */
- rtw_hal_set_hwreg(padapter, HW_VAR_CHECK_TXBUF, NULL);
pmlmeext->sitesurvey_res.state = SCAN_START;
pmlmeext->sitesurvey_res.bss_cnt = 0;
@@ -7969,22 +7715,18 @@ u8 sitesurvey_cmd_hdl(struct adapter *padapter, u8 *pbuf)
Switch_DM_Func(padapter, DYNAMIC_FUNC_DISABLE, false);
/* config the initial gain under scanning, need to write the BB registers */
-#ifdef CONFIG_88EU_P2P
if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE))
initialgain = 0x1E;
else
initialgain = 0x28;
-#else /* CONFIG_88EU_P2P */
- initialgain = 0x1E;
-#endif /* CONFIG_88EU_P2P */
- rtw_hal_set_hwreg(padapter, HW_VAR_INITIAL_GAIN, (u8 *)(&initialgain));
+ SetHwReg8188EU(padapter, HW_VAR_INITIAL_GAIN, (u8 *)(&initialgain));
/* set MSR to no link state */
Set_MSR(padapter, _HW_STATE_NOLINK_);
val8 = 1; /* under site survey */
- rtw_hal_set_hwreg(padapter, HW_VAR_MLME_SITESURVEY, (u8 *)(&val8));
+ SetHwReg8188EU(padapter, HW_VAR_MLME_SITESURVEY, (u8 *)(&val8));
pmlmeext->sitesurvey_res.state = SCAN_PROCESS;
}
@@ -8208,9 +7950,8 @@ u8 tx_beacon_hdl(struct adapter *padapter, unsigned char *pbuf)
if (send_beacon(padapter) == _FAIL) {
DBG_88E("issue_beacon, fail!\n");
return H2C_PARAMETERS_ERROR;
- }
-#ifdef CONFIG_88EU_AP_MODE
- else { /* tx bc/mc frames after update TIM */
+ } else {
+ /* tx bc/mc frames after update TIM */
struct sta_info *psta_bmc;
struct list_head *xmitframe_plist, *xmitframe_phead;
struct xmit_frame *pxmitframe = NULL;
@@ -8246,14 +7987,13 @@ u8 tx_beacon_hdl(struct adapter *padapter, unsigned char *pbuf)
pxmitframe->attrib.qsel = 0x11;/* HIQ */
spin_unlock_bh(&psta_bmc->sleep_q.lock);
- if (rtw_hal_xmit(padapter, pxmitframe))
+ if (rtl8188eu_hal_xmit(padapter, pxmitframe))
rtw_os_xmit_complete(padapter, pxmitframe);
spin_lock_bh(&psta_bmc->sleep_q.lock);
}
spin_unlock_bh(&psta_bmc->sleep_q.lock);
}
}
-#endif
return H2C_SUCCESS;
}
diff --git a/drivers/staging/r8188eu/core/rtw_mp.c b/drivers/staging/r8188eu/core/rtw_mp.c
deleted file mode 100644
index dabdd0406f30..000000000000
--- a/drivers/staging/r8188eu/core/rtw_mp.c
+++ /dev/null
@@ -1,935 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#define _RTW_MP_C_
-
-#include "../include/drv_types.h"
-#include "../include/odm_precomp.h"
-#include "../include/rtl8188e_hal.h"
-
-u32 read_bbreg(struct adapter *padapter, u32 addr, u32 bitmask)
-{
- return rtw_hal_read_bbreg(padapter, addr, bitmask);
-}
-
-void write_bbreg(struct adapter *padapter, u32 addr, u32 bitmask, u32 val)
-{
- rtw_hal_write_bbreg(padapter, addr, bitmask, val);
-}
-
-u32 _read_rfreg(struct adapter *padapter, u8 rfpath, u32 addr, u32 bitmask)
-{
- return rtw_hal_read_rfreg(padapter, (enum rf_radio_path)rfpath, addr, bitmask);
-}
-
-void _write_rfreg(struct adapter *padapter, u8 rfpath, u32 addr, u32 bitmask, u32 val)
-{
- rtw_hal_write_rfreg(padapter, (enum rf_radio_path)rfpath, addr, bitmask, val);
-}
-
-u32 read_rfreg(struct adapter *padapter, u8 rfpath, u32 addr)
-{
- return _read_rfreg(padapter, (enum rf_radio_path)rfpath, addr, bRFRegOffsetMask);
-}
-
-void write_rfreg(struct adapter *padapter, u8 rfpath, u32 addr, u32 val)
-{
- _write_rfreg(padapter, (enum rf_radio_path)rfpath, addr, bRFRegOffsetMask, val);
-}
-
-static void _init_mp_priv_(struct mp_priv *pmp_priv)
-{
- struct wlan_bssid_ex *pnetwork;
-
- memset(pmp_priv, 0, sizeof(struct mp_priv));
-
- pmp_priv->mode = MP_OFF;
-
- pmp_priv->channel = 1;
- pmp_priv->bandwidth = HT_CHANNEL_WIDTH_20;
- pmp_priv->prime_channel_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
- pmp_priv->rateidx = MPT_RATE_1M;
- pmp_priv->txpoweridx = 0x2A;
-
- pmp_priv->antenna_tx = ANTENNA_A;
- pmp_priv->antenna_rx = ANTENNA_AB;
-
- pmp_priv->check_mp_pkt = 0;
-
- pmp_priv->tx_pktcount = 0;
-
- pmp_priv->rx_pktcount = 0;
- pmp_priv->rx_crcerrpktcount = 0;
-
- pmp_priv->network_macaddr[0] = 0x00;
- pmp_priv->network_macaddr[1] = 0xE0;
- pmp_priv->network_macaddr[2] = 0x4C;
- pmp_priv->network_macaddr[3] = 0x87;
- pmp_priv->network_macaddr[4] = 0x66;
- pmp_priv->network_macaddr[5] = 0x55;
-
- pnetwork = &pmp_priv->mp_network.network;
- memcpy(pnetwork->MacAddress, pmp_priv->network_macaddr, ETH_ALEN);
-
- pnetwork->Ssid.SsidLength = 8;
- memcpy(pnetwork->Ssid.Ssid, "mp_871x", pnetwork->Ssid.SsidLength);
-}
-
-static void mp_init_xmit_attrib(struct mp_tx *pmptx, struct adapter *padapter)
-{
- struct pkt_attrib *pattrib;
- struct tx_desc *desc;
-
- /* init xmitframe attribute */
- pattrib = &pmptx->attrib;
- memset(pattrib, 0, sizeof(struct pkt_attrib));
- desc = &pmptx->desc;
- memset(desc, 0, TXDESC_SIZE);
-
- pattrib->ether_type = 0x8712;
- memset(pattrib->dst, 0xFF, ETH_ALEN);
- pattrib->ack_policy = 0;
- pattrib->hdrlen = WLAN_HDR_A3_LEN;
- pattrib->subtype = WIFI_DATA;
- pattrib->priority = 0;
- pattrib->qsel = pattrib->priority;
- pattrib->nr_frags = 1;
- pattrib->encrypt = 0;
- pattrib->bswenc = false;
- pattrib->qos_en = false;
-}
-
-s32 init_mp_priv(struct adapter *padapter)
-{
- struct mp_priv *pmppriv = &padapter->mppriv;
-
- _init_mp_priv_(pmppriv);
- pmppriv->papdater = padapter;
-
- pmppriv->tx.stop = 1;
- mp_init_xmit_attrib(&pmppriv->tx, padapter);
-
- switch (padapter->registrypriv.rf_config) {
- case RF_1T1R:
- pmppriv->antenna_tx = ANTENNA_A;
- pmppriv->antenna_rx = ANTENNA_A;
- break;
- case RF_1T2R:
- default:
- pmppriv->antenna_tx = ANTENNA_A;
- pmppriv->antenna_rx = ANTENNA_AB;
- break;
- case RF_2T2R:
- case RF_2T2R_GREEN:
- pmppriv->antenna_tx = ANTENNA_AB;
- pmppriv->antenna_rx = ANTENNA_AB;
- break;
- case RF_2T4R:
- pmppriv->antenna_tx = ANTENNA_AB;
- pmppriv->antenna_rx = ANTENNA_ABCD;
- break;
- }
-
- return _SUCCESS;
-}
-
-void free_mp_priv(struct mp_priv *pmp_priv)
-{
- kfree(pmp_priv->pallocated_mp_xmitframe_buf);
- pmp_priv->pallocated_mp_xmitframe_buf = NULL;
- pmp_priv->pmp_xmtframe_buf = NULL;
-}
-
-#define PHY_IQCalibrate(a, b) PHY_IQCalibrate_8188E(a, b)
-#define PHY_LCCalibrate(a) PHY_LCCalibrate_8188E(a)
-#define PHY_SetRFPathSwitch(a, b) PHY_SetRFPathSwitch_8188E(a, b)
-
-s32 MPT_InitializeAdapter(struct adapter *pAdapter, u8 Channel)
-{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(pAdapter);
- s32 rtStatus = _SUCCESS;
- struct mpt_context *pMptCtx = &pAdapter->mppriv.MptCtx;
- struct mlme_priv *pmlmepriv = &pAdapter->mlmepriv;
-
- /* HW Initialization for 8190 MPT. */
- /* SW Initialization for 8190 MP. */
- pMptCtx->bMptDrvUnload = false;
- pMptCtx->bMassProdTest = false;
- pMptCtx->bMptIndexEven = true; /* default gain index is -6.0db */
- pMptCtx->h2cReqNum = 0x0;
- /* Init mpt event. */
- /* init for BT MP */
-
- pMptCtx->bMptWorkItemInProgress = false;
- pMptCtx->CurrMptAct = NULL;
- /* */
-
- /* Don't accept any packets */
- rtw_write32(pAdapter, REG_RCR, 0);
-
- PHY_IQCalibrate(pAdapter, false);
- dm_CheckTXPowerTracking(&pHalData->odmpriv); /* trigger thermal meter */
- PHY_LCCalibrate(pAdapter);
-
- pMptCtx->backup0xc50 = (u8)PHY_QueryBBReg(pAdapter, rOFDM0_XAAGCCore1, bMaskByte0);
- pMptCtx->backup0xc58 = (u8)PHY_QueryBBReg(pAdapter, rOFDM0_XBAGCCore1, bMaskByte0);
- pMptCtx->backup0xc30 = (u8)PHY_QueryBBReg(pAdapter, rOFDM0_RxDetector1, bMaskByte0);
- pMptCtx->backup0x52_RF_A = (u8)PHY_QueryRFReg(pAdapter, RF_PATH_A, RF_0x52, 0x000F0);
- pMptCtx->backup0x52_RF_B = (u8)PHY_QueryRFReg(pAdapter, RF_PATH_A, RF_0x52, 0x000F0);
-
- /* set ant to wifi side in mp mode */
- rtw_write16(pAdapter, 0x870, 0x300);
- rtw_write16(pAdapter, 0x860, 0x110);
-
- if (pAdapter->registrypriv.mp_mode == 1)
- pmlmepriv->fw_state = WIFI_MP_STATE;
-
- return rtStatus;
-}
-
-/*-----------------------------------------------------------------------------
- * Function: MPT_DeInitAdapter()
- *
- * Overview: Extra DeInitialization for Mass Production Test.
- *
- * Input: struct adapter * pAdapter
- *
- * Output: NONE
- *
- * Return: NONE
- *
- * Revised History:
- * When Who Remark
- * 05/08/2007 MHC Create Version 0.
- * 05/18/2007 MHC Add normal driver MPHalt code.
- *
- *---------------------------------------------------------------------------*/
-void MPT_DeInitAdapter(struct adapter *pAdapter)
-{
- struct mpt_context *pMptCtx = &pAdapter->mppriv.MptCtx;
-
- pMptCtx->bMptDrvUnload = true;
-}
-
-static u8 mpt_ProStartTest(struct adapter *padapter)
-{
- struct mpt_context *pMptCtx = &padapter->mppriv.MptCtx;
-
- pMptCtx->bMassProdTest = true;
- pMptCtx->bStartContTx = false;
- pMptCtx->bCckContTx = false;
- pMptCtx->bOfdmContTx = false;
- pMptCtx->bSingleCarrier = false;
- pMptCtx->bCarrierSuppression = false;
- pMptCtx->bSingleTone = false;
-
- return _SUCCESS;
-}
-
-/*
- * General use
- */
-s32 SetPowerTracking(struct adapter *padapter, u8 enable)
-{
- Hal_SetPowerTracking(padapter, enable);
- return 0;
-}
-
-void GetPowerTracking(struct adapter *padapter, u8 *enable)
-{
- Hal_GetPowerTracking(padapter, enable);
-}
-
-static void disable_dm(struct adapter *padapter)
-{
- u8 v8;
-
- /* 3 1. disable firmware dynamic mechanism */
- /* disable Power Training, Rate Adaptive */
- v8 = rtw_read8(padapter, REG_BCN_CTRL);
- v8 &= ~EN_BCN_FUNCTION;
- rtw_write8(padapter, REG_BCN_CTRL, v8);
-
- /* 3 2. disable driver dynamic mechanism */
- /* disable Dynamic Initial Gain */
- /* disable High Power */
- /* disable Power Tracking */
- Switch_DM_Func(padapter, DYNAMIC_FUNC_DISABLE, false);
-
- /* enable APK, LCK and IQK but disable power tracking */
- Switch_DM_Func(padapter, DYNAMIC_RF_CALIBRATION, true);
-}
-
-/* This function initializes the DUT to the MP test mode */
-s32 mp_start_test(struct adapter *padapter)
-{
- struct wlan_bssid_ex bssid;
- struct sta_info *psta;
- u32 length;
- u8 val8;
- s32 res = _SUCCESS;
- struct mp_priv *pmppriv = &padapter->mppriv;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct wlan_network *tgt_network = &pmlmepriv->cur_network;
-
- padapter->registrypriv.mp_mode = 1;
- pmppriv->bSetTxPower = 0; /* for manually set tx power */
-
- /* 3 disable dynamic mechanism */
- disable_dm(padapter);
-
- /* 3 0. update mp_priv */
-
- if (padapter->registrypriv.rf_config == RF_819X_MAX_TYPE) {
- switch (GET_RF_TYPE(padapter)) {
- case RF_1T1R:
- pmppriv->antenna_tx = ANTENNA_A;
- pmppriv->antenna_rx = ANTENNA_A;
- break;
- case RF_1T2R:
- default:
- pmppriv->antenna_tx = ANTENNA_A;
- pmppriv->antenna_rx = ANTENNA_AB;
- break;
- case RF_2T2R:
- case RF_2T2R_GREEN:
- pmppriv->antenna_tx = ANTENNA_AB;
- pmppriv->antenna_rx = ANTENNA_AB;
- break;
- case RF_2T4R:
- pmppriv->antenna_tx = ANTENNA_AB;
- pmppriv->antenna_rx = ANTENNA_ABCD;
- break;
- }
- }
-
- mpt_ProStartTest(padapter);
-
- /* 3 1. initialize a new struct wlan_bssid_ex */
-/* memset(&bssid, 0, sizeof(struct wlan_bssid_ex)); */
- memcpy(bssid.MacAddress, pmppriv->network_macaddr, ETH_ALEN);
- bssid.Ssid.SsidLength = strlen("mp_pseudo_adhoc");
- memcpy(bssid.Ssid.Ssid, (u8 *)"mp_pseudo_adhoc", bssid.Ssid.SsidLength);
- bssid.InfrastructureMode = Ndis802_11IBSS;
- bssid.NetworkTypeInUse = Ndis802_11DS;
- bssid.IELength = 0;
-
- length = get_wlan_bssid_ex_sz(&bssid);
- if (length % 4)
- bssid.Length = ((length >> 2) + 1) << 2; /* round up to multiple of 4 bytes. */
- else
- bssid.Length = length;
-
- spin_lock_bh(&pmlmepriv->lock);
-
- if (check_fwstate(pmlmepriv, WIFI_MP_STATE))
- goto end_of_mp_start_test;
-
- /* init mp_start_test status */
- if (check_fwstate(pmlmepriv, _FW_LINKED)) {
- rtw_disassoc_cmd(padapter, 500, true);
- rtw_indicate_disconnect(padapter);
- rtw_free_assoc_resources(padapter, 1);
- }
- pmppriv->prev_fw_state = get_fwstate(pmlmepriv);
- if (padapter->registrypriv.mp_mode == 1)
- pmlmepriv->fw_state = WIFI_MP_STATE;
- set_fwstate(pmlmepriv, _FW_UNDER_LINKING);
-
- /* 3 2. create a new psta for mp driver */
- /* clear psta in the cur_network, if any */
- psta = rtw_get_stainfo(&padapter->stapriv, tgt_network->network.MacAddress);
- if (psta)
- rtw_free_stainfo(padapter, psta);
-
- psta = rtw_alloc_stainfo(&padapter->stapriv, bssid.MacAddress);
- if (!psta) {
- pmlmepriv->fw_state = pmppriv->prev_fw_state;
- res = _FAIL;
- goto end_of_mp_start_test;
- }
-
- /* 3 3. join psudo AdHoc */
- tgt_network->join_res = 1;
- tgt_network->aid = 1;
- psta->aid = 1;
- memcpy(&tgt_network->network, &bssid, length);
-
- rtw_indicate_connect(padapter);
- _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
-
-end_of_mp_start_test:
-
- spin_unlock_bh(&pmlmepriv->lock);
-
- if (res == _SUCCESS) {
- /* set MSR to WIFI_FW_ADHOC_STATE */
- val8 = rtw_read8(padapter, MSR) & 0xFC; /* 0x0102 */
- val8 |= WIFI_FW_ADHOC_STATE;
- rtw_write8(padapter, MSR, val8); /* Link in ad hoc network */
- }
- return res;
-}
-/* */
-/* This function change the DUT from the MP test mode into normal mode */
-void mp_stop_test(struct adapter *padapter)
-{
- struct mp_priv *pmppriv = &padapter->mppriv;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct wlan_network *tgt_network = &pmlmepriv->cur_network;
- struct sta_info *psta;
-
- if (pmppriv->mode == MP_ON) {
- pmppriv->bSetTxPower = 0;
- spin_lock_bh(&pmlmepriv->lock);
- if (!check_fwstate(pmlmepriv, WIFI_MP_STATE))
- goto end_of_mp_stop_test;
-
- /* 3 1. disconnect psudo AdHoc */
- rtw_indicate_disconnect(padapter);
-
- /* 3 2. clear psta used in mp test mode. */
- psta = rtw_get_stainfo(&padapter->stapriv, tgt_network->network.MacAddress);
- if (psta)
- rtw_free_stainfo(padapter, psta);
-
- /* 3 3. return to normal state (default:station mode) */
- pmlmepriv->fw_state = pmppriv->prev_fw_state; /* WIFI_STATION_STATE; */
-
- /* flush the cur_network */
- memset(tgt_network, 0, sizeof(struct wlan_network));
-
- _clr_fwstate_(pmlmepriv, WIFI_MP_STATE);
-
-end_of_mp_stop_test:
-
- spin_unlock_bh(&pmlmepriv->lock);
- }
-}
-
-/*---------------------------hal\rtl8192c\MPT_HelperFunc.c---------------------------*/
-/*
- * SetChannel
- * Description
- * Use H2C command to change channel,
- * not only modify rf register, but also other setting need to be done.
- */
-void SetChannel(struct adapter *pAdapter)
-{
- Hal_SetChannel(pAdapter);
-}
-
-/*
- * Notice
- * Switch bandwitdth may change center frequency(channel)
- */
-void SetBandwidth(struct adapter *pAdapter)
-{
- Hal_SetBandwidth(pAdapter);
-}
-
-void SetAntenna(struct adapter *pAdapter)
-{
- Hal_SetAntenna(pAdapter);
-}
-
-void SetAntennaPathPower(struct adapter *pAdapter)
-{
- Hal_SetAntennaPathPower(pAdapter);
-}
-
-void SetTxPower(struct adapter *pAdapter)
-{
- Hal_SetTxPower(pAdapter);
- }
-
-void SetDataRate(struct adapter *pAdapter)
-{
- Hal_SetDataRate(pAdapter);
-}
-
-void MP_PHY_SetRFPathSwitch(struct adapter *pAdapter, bool bMain)
-{
- PHY_SetRFPathSwitch(pAdapter, bMain);
-}
-
-s32 SetThermalMeter(struct adapter *pAdapter, u8 target_ther)
-{
- return Hal_SetThermalMeter(pAdapter, target_ther);
-}
-
-void GetThermalMeter(struct adapter *pAdapter, u8 *value)
-{
- Hal_GetThermalMeter(pAdapter, value);
-}
-
-void SetSingleCarrierTx(struct adapter *pAdapter, u8 bStart)
-{
- PhySetTxPowerLevel(pAdapter);
- Hal_SetSingleCarrierTx(pAdapter, bStart);
-}
-
-void SetSingleToneTx(struct adapter *pAdapter, u8 bStart)
-{
- PhySetTxPowerLevel(pAdapter);
- Hal_SetSingleToneTx(pAdapter, bStart);
-}
-
-void SetCarrierSuppressionTx(struct adapter *pAdapter, u8 bStart)
-{
- PhySetTxPowerLevel(pAdapter);
- Hal_SetCarrierSuppressionTx(pAdapter, bStart);
-}
-
-void SetContinuousTx(struct adapter *pAdapter, u8 bStart)
-{
- PhySetTxPowerLevel(pAdapter);
- Hal_SetContinuousTx(pAdapter, bStart);
-}
-
-void PhySetTxPowerLevel(struct adapter *pAdapter)
-{
- struct mp_priv *pmp_priv = &pAdapter->mppriv;
-
- if (pmp_priv->bSetTxPower == 0) /* for NO manually set power index */
- PHY_SetTxPowerLevel8188E(pAdapter, pmp_priv->channel);
-}
-
-/* */
-static void dump_mpframe(struct adapter *padapter, struct xmit_frame *pmpframe)
-{
- rtw_hal_mgnt_xmit(padapter, pmpframe);
-}
-
-static struct xmit_frame *alloc_mp_xmitframe(struct xmit_priv *pxmitpriv)
-{
- struct xmit_frame *pmpframe;
- struct xmit_buf *pxmitbuf;
-
- pmpframe = rtw_alloc_xmitframe(pxmitpriv);
- if (!pmpframe)
- return NULL;
-
- pxmitbuf = rtw_alloc_xmitbuf(pxmitpriv);
- if (!pxmitbuf) {
- rtw_free_xmitframe(pxmitpriv, pmpframe);
- return NULL;
- }
-
- pmpframe->frame_tag = MP_FRAMETAG;
-
- pmpframe->pxmitbuf = pxmitbuf;
-
- pmpframe->buf_addr = pxmitbuf->pbuf;
-
- pxmitbuf->priv_data = pmpframe;
-
- return pmpframe;
-}
-
-static int mp_xmit_packet_thread(void *context)
-{
- struct xmit_frame *pxmitframe;
- struct mp_tx *pmptx;
- struct mp_priv *pmp_priv;
- struct xmit_priv *pxmitpriv;
- struct adapter *padapter;
-
- pmp_priv = (struct mp_priv *)context;
- pmptx = &pmp_priv->tx;
- padapter = pmp_priv->papdater;
- pxmitpriv = &padapter->xmitpriv;
-
- thread_enter("RTW_MP_THREAD");
-
- /* DBG_88E("%s:pkTx Start\n", __func__); */
- while (1) {
- pxmitframe = alloc_mp_xmitframe(pxmitpriv);
- if (!pxmitframe) {
- if (pmptx->stop ||
- padapter->bSurpriseRemoved ||
- padapter->bDriverStopped) {
- goto exit;
- } else {
- msleep(1);
- continue;
- }
- }
-
- memcpy((u8 *)(pxmitframe->buf_addr + TXDESC_OFFSET), pmptx->buf, pmptx->write_size);
- memcpy(&pxmitframe->attrib, &pmptx->attrib, sizeof(struct pkt_attrib));
-
- dump_mpframe(padapter, pxmitframe);
-
- pmptx->sended++;
- pmp_priv->tx_pktcount++;
-
- if (pmptx->stop ||
- padapter->bSurpriseRemoved ||
- padapter->bDriverStopped)
- goto exit;
- if ((pmptx->count != 0) &&
- (pmptx->count == pmptx->sended))
- goto exit;
-
- flush_signals_thread();
- }
-
-exit:
- kfree(pmptx->pallocated_buf);
- pmptx->pallocated_buf = NULL;
- pmptx->stop = 1;
-
- thread_exit();
-}
-
-void fill_txdesc_for_mp(struct adapter *padapter, struct tx_desc *ptxdesc)
-{
- struct mp_priv *pmp_priv = &padapter->mppriv;
- memcpy(ptxdesc, &pmp_priv->tx.desc, TXDESC_SIZE);
-}
-
-void SetPacketTx(struct adapter *padapter)
-{
- u8 *ptr, *pkt_start, *pkt_end;
- u32 pkt_size;
- struct tx_desc *desc;
- struct rtw_ieee80211_hdr *hdr;
- u8 payload;
- bool bmcast;
- struct pkt_attrib *pattrib;
- struct mp_priv *pmp_priv;
-
- pmp_priv = &padapter->mppriv;
- if (pmp_priv->tx.stop)
- return;
- pmp_priv->tx.sended = 0;
- pmp_priv->tx.stop = 0;
- pmp_priv->tx_pktcount = 0;
-
- /* 3 1. update_attrib() */
- pattrib = &pmp_priv->tx.attrib;
- memcpy(pattrib->src, padapter->eeprompriv.mac_addr, ETH_ALEN);
- memcpy(pattrib->ta, pattrib->src, ETH_ALEN);
- memcpy(pattrib->ra, pattrib->dst, ETH_ALEN);
- bmcast = is_multicast_ether_addr(pattrib->ra);
- if (bmcast) {
- pattrib->mac_id = 1;
- pattrib->psta = rtw_get_bcmc_stainfo(padapter);
- } else {
- pattrib->mac_id = 0;
- pattrib->psta = rtw_get_stainfo(&padapter->stapriv, get_bssid(&padapter->mlmepriv));
- }
-
- pattrib->last_txcmdsz = pattrib->hdrlen + pattrib->pktlen;
-
- /* 3 2. allocate xmit buffer */
- pkt_size = pattrib->last_txcmdsz;
-
- kfree(pmp_priv->tx.pallocated_buf);
- pmp_priv->tx.write_size = pkt_size;
- pmp_priv->tx.buf_size = pkt_size + XMITBUF_ALIGN_SZ;
- pmp_priv->tx.pallocated_buf = kzalloc(pmp_priv->tx.buf_size, GFP_KERNEL);
- if (!pmp_priv->tx.pallocated_buf) {
- DBG_88E("%s: malloc(%d) fail!!\n", __func__, pmp_priv->tx.buf_size);
- return;
- }
- pmp_priv->tx.buf = (u8 *)N_BYTE_ALIGMENT((size_t)(pmp_priv->tx.pallocated_buf), XMITBUF_ALIGN_SZ);
- ptr = pmp_priv->tx.buf;
-
- desc = &pmp_priv->tx.desc;
- memset(desc, 0, TXDESC_SIZE);
- pkt_start = ptr;
- pkt_end = pkt_start + pkt_size;
-
- /* 3 3. init TX descriptor */
- /* offset 0 */
- desc->txdw0 |= cpu_to_le32(OWN | FSG | LSG);
- desc->txdw0 |= cpu_to_le32(pkt_size & 0x0000FFFF); /* packet size */
- desc->txdw0 |= cpu_to_le32(((TXDESC_SIZE + OFFSET_SZ) << OFFSET_SHT) & 0x00FF0000); /* 32 bytes for TX Desc */
- if (bmcast)
- desc->txdw0 |= cpu_to_le32(BMC); /* broadcast packet */
-
- desc->txdw1 |= cpu_to_le32((0x01 << 26) & 0xff000000);
- /* offset 4 */
- desc->txdw1 |= cpu_to_le32((pattrib->mac_id) & 0x3F); /* CAM_ID(MAC_ID) */
- desc->txdw1 |= cpu_to_le32((pattrib->qsel << QSEL_SHT) & 0x00001F00); /* Queue Select, TID */
-
- desc->txdw1 |= cpu_to_le32((pattrib->raid << RATE_ID_SHT) & 0x000F0000); /* Rate Adaptive ID */
- /* offset 8 */
- /* offset 12 */
-
- desc->txdw3 |= cpu_to_le32((pattrib->seqnum << 16) & 0x0fff0000);
-
- /* offset 16 */
- desc->txdw4 |= cpu_to_le32(HW_SSN);
- desc->txdw4 |= cpu_to_le32(USERATE);
- desc->txdw4 |= cpu_to_le32(DISDATAFB);
-
- if (pmp_priv->preamble) {
- if (pmp_priv->rateidx <= MPT_RATE_54M)
- desc->txdw4 |= cpu_to_le32(DATA_SHORT); /* CCK Short Preamble */
- }
- if (pmp_priv->bandwidth == HT_CHANNEL_WIDTH_40)
- desc->txdw4 |= cpu_to_le32(DATA_BW);
-
- /* offset 20 */
- desc->txdw5 |= cpu_to_le32(pmp_priv->rateidx & 0x0000001F);
-
- if (pmp_priv->preamble) {
- if (pmp_priv->rateidx > MPT_RATE_54M)
- desc->txdw5 |= cpu_to_le32(SGI); /* MCS Short Guard Interval */
- }
- desc->txdw5 |= cpu_to_le32(RTY_LMT_EN); /* retry limit enable */
- desc->txdw5 |= cpu_to_le32(0x00180000); /* DATA/RTS Rate Fallback Limit */
-
- /* 3 4. make wlan header, make_wlanhdr() */
- hdr = (struct rtw_ieee80211_hdr *)pkt_start;
- SetFrameSubType(&hdr->frame_ctl, pattrib->subtype);
- memcpy(hdr->addr1, pattrib->dst, ETH_ALEN); /* DA */
- memcpy(hdr->addr2, pattrib->src, ETH_ALEN); /* SA */
- memcpy(hdr->addr3, get_bssid(&padapter->mlmepriv), ETH_ALEN); /* RA, BSSID */
-
- /* 3 5. make payload */
- ptr = pkt_start + pattrib->hdrlen;
-
- switch (pmp_priv->tx.payload) {
- case 0:
- payload = 0x00;
- break;
- case 1:
- payload = 0x5a;
- break;
- case 2:
- payload = 0xa5;
- break;
- case 3:
- payload = 0xff;
- break;
- default:
- payload = 0x00;
- break;
- }
-
- memset(ptr, payload, pkt_end - ptr);
-
- /* 3 6. start thread */
- pmp_priv->tx.PktTxThread = kthread_run(mp_xmit_packet_thread, pmp_priv, "RTW_MP_THREAD");
- if (IS_ERR(pmp_priv->tx.PktTxThread))
- DBG_88E("Create PktTx Thread Fail !!!!!\n");
-}
-
-void SetPacketRx(struct adapter *pAdapter, u8 bStartRx)
-{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(pAdapter);
-
- if (bStartRx) {
- /* Accept CRC error and destination address */
- pHalData->ReceiveConfig = AAP | APM | AM | AB | APP_ICV |
- AMF | ADF | APP_FCS | HTC_LOC_CTRL |
- APP_MIC | APP_PHYSTS;
-
- pHalData->ReceiveConfig |= (RCR_ACRC32 | RCR_AAP);
-
- rtw_write32(pAdapter, REG_RCR, pHalData->ReceiveConfig);
-
- /* Accept all data frames */
- rtw_write16(pAdapter, REG_RXFLTMAP2, 0xFFFF);
- } else {
- rtw_write32(pAdapter, REG_RCR, 0);
- }
-}
-
-void ResetPhyRxPktCount(struct adapter *pAdapter)
-{
- u32 i, phyrx_set = 0;
-
- for (i = 0; i <= 0xF; i++) {
- phyrx_set = 0;
- phyrx_set |= _RXERR_RPT_SEL(i); /* select */
- phyrx_set |= RXERR_RPT_RST; /* set counter to zero */
- rtw_write32(pAdapter, REG_RXERR_RPT, phyrx_set);
- }
-}
-
-static u32 GetPhyRxPktCounts(struct adapter *pAdapter, u32 selbit)
-{
- /* selection */
- u32 phyrx_set = 0, count = 0;
-
- phyrx_set = _RXERR_RPT_SEL(selbit & 0xF);
- rtw_write32(pAdapter, REG_RXERR_RPT, phyrx_set);
-
- /* Read packet count */
- count = rtw_read32(pAdapter, REG_RXERR_RPT) & RXERR_COUNTER_MASK;
-
- return count;
-}
-
-u32 GetPhyRxPktReceived(struct adapter *pAdapter)
-{
- u32 OFDM_cnt = 0, CCK_cnt = 0, HT_cnt = 0;
-
- OFDM_cnt = GetPhyRxPktCounts(pAdapter, RXERR_TYPE_OFDM_MPDU_OK);
- CCK_cnt = GetPhyRxPktCounts(pAdapter, RXERR_TYPE_CCK_MPDU_OK);
- HT_cnt = GetPhyRxPktCounts(pAdapter, RXERR_TYPE_HT_MPDU_OK);
-
- return OFDM_cnt + CCK_cnt + HT_cnt;
-}
-
-u32 GetPhyRxPktCRC32Error(struct adapter *pAdapter)
-{
- u32 OFDM_cnt = 0, CCK_cnt = 0, HT_cnt = 0;
-
- OFDM_cnt = GetPhyRxPktCounts(pAdapter, RXERR_TYPE_OFDM_MPDU_FAIL);
- CCK_cnt = GetPhyRxPktCounts(pAdapter, RXERR_TYPE_CCK_MPDU_FAIL);
- HT_cnt = GetPhyRxPktCounts(pAdapter, RXERR_TYPE_HT_MPDU_FAIL);
-
- return OFDM_cnt + CCK_cnt + HT_cnt;
-}
-
-/* reg 0x808[9:0]: FFT data x */
-/* reg 0x808[22]: 0 --> 1 to get 1 FFT data y */
-/* reg 0x8B4[15:0]: FFT data y report */
-static u32 rtw_GetPSDData(struct adapter *pAdapter, u32 point)
-{
- int psd_val;
-
- psd_val = rtw_read32(pAdapter, 0x808);
- psd_val &= 0xFFBFFC00;
- psd_val |= point;
-
- rtw_write32(pAdapter, 0x808, psd_val);
- mdelay(1);
- psd_val |= 0x00400000;
-
- rtw_write32(pAdapter, 0x808, psd_val);
- mdelay(1);
- psd_val = rtw_read32(pAdapter, 0x8B4);
-
- psd_val &= 0x0000FFFF;
-
- return psd_val;
-}
-
-/*
- *pts start_point_min stop_point_max
- * 128 64 64 + 128 = 192
- * 256 128 128 + 256 = 384
- * 512 256 256 + 512 = 768
- * 1024 512 512 + 1024 = 1536
- */
-u32 mp_query_psd(struct adapter *pAdapter, u8 *data)
-{
- u32 i, psd_pts = 0, psd_start = 0, psd_stop = 0;
- u32 psd_data = 0;
-
- if (!netif_running(pAdapter->pnetdev))
- return 0;
-
- if (!check_fwstate(&pAdapter->mlmepriv, WIFI_MP_STATE))
- return 0;
-
- if (strlen(data) == 0) { /* default value */
- psd_pts = 128;
- psd_start = 64;
- psd_stop = 128;
- } else {
- sscanf(data, "pts =%d, start =%d, stop =%d", &psd_pts, &psd_start, &psd_stop);
- }
-
- memset(data, '\0', sizeof(*data));
-
- i = psd_start;
- while (i < psd_stop) {
- if (i >= psd_pts) {
- psd_data = rtw_GetPSDData(pAdapter, i - psd_pts);
- } else {
- psd_data = rtw_GetPSDData(pAdapter, i);
- }
- sprintf(data + strlen(data), "%x ", psd_data);
- i++;
- }
-
- msleep(100);
- return strlen(data) + 1;
-}
-
-void _rtw_mp_xmit_priv(struct xmit_priv *pxmitpriv)
-{
- int i, res;
- struct adapter *padapter = pxmitpriv->adapter;
- struct xmit_buf *pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmitbuf;
- u32 max_xmit_extbuf_size = MAX_XMIT_EXTBUF_SZ;
- u32 num_xmit_extbuf = NR_XMIT_EXTBUFF;
-
- if (padapter->registrypriv.mp_mode == 0) {
- max_xmit_extbuf_size = MAX_XMIT_EXTBUF_SZ;
- num_xmit_extbuf = NR_XMIT_EXTBUFF;
- } else {
- max_xmit_extbuf_size = 6000;
- num_xmit_extbuf = 8;
- }
-
- pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmit_extbuf;
- for (i = 0; i < num_xmit_extbuf; i++) {
- rtw_os_xmit_resource_free(padapter, pxmitbuf, (max_xmit_extbuf_size + XMITBUF_ALIGN_SZ));
-
- pxmitbuf++;
- }
-
- vfree(pxmitpriv->pallocated_xmit_extbuf);
-
- if (padapter->registrypriv.mp_mode == 0) {
- max_xmit_extbuf_size = 6000;
- num_xmit_extbuf = 8;
- } else {
- max_xmit_extbuf_size = MAX_XMIT_EXTBUF_SZ;
- num_xmit_extbuf = NR_XMIT_EXTBUFF;
- }
-
- /* Init xmit extension buff */
- _rtw_init_queue(&pxmitpriv->free_xmit_extbuf_queue);
-
- pxmitpriv->pallocated_xmit_extbuf = vzalloc(num_xmit_extbuf * sizeof(struct xmit_buf) + 4);
-
- if (!pxmitpriv->pallocated_xmit_extbuf) {
- res = _FAIL;
- goto exit;
- }
-
- pxmitpriv->pxmit_extbuf = (u8 *)N_BYTE_ALIGMENT((size_t)(pxmitpriv->pallocated_xmit_extbuf), 4);
-
- pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmit_extbuf;
-
- for (i = 0; i < num_xmit_extbuf; i++) {
- INIT_LIST_HEAD(&pxmitbuf->list);
-
- pxmitbuf->priv_data = NULL;
- pxmitbuf->padapter = padapter;
- pxmitbuf->ext_tag = true;
-
- res = rtw_os_xmit_resource_alloc(padapter, pxmitbuf, max_xmit_extbuf_size + XMITBUF_ALIGN_SZ);
- if (res == _FAIL) {
- res = _FAIL;
- goto exit;
- }
-
- list_add_tail(&pxmitbuf->list, &pxmitpriv->free_xmit_extbuf_queue.queue);
- pxmitbuf++;
- }
-
- pxmitpriv->free_xmit_extbuf_cnt = num_xmit_extbuf;
-
-exit:
- ;
-}
-
-void Hal_ProSetCrystalCap(struct adapter *pAdapter, u32 CrystalCapVal)
-{
- CrystalCapVal = CrystalCapVal & 0x3F;
-
- // write 0x24[16:11] = 0x24[22:17] = CrystalCap
- PHY_SetBBReg(pAdapter, REG_AFE_XTAL_CTRL, 0x7FF800,
- (CrystalCapVal | (CrystalCapVal << 6)));
-}
diff --git a/drivers/staging/r8188eu/core/rtw_mp_ioctl.c b/drivers/staging/r8188eu/core/rtw_mp_ioctl.c
deleted file mode 100644
index c85f8e467337..000000000000
--- a/drivers/staging/r8188eu/core/rtw_mp_ioctl.c
+++ /dev/null
@@ -1,1170 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#define _RTW_MP_IOCTL_C_
-
-#include "../include/osdep_service.h"
-#include "../include/drv_types.h"
-#include "../include/mlme_osdep.h"
-#include "../include/rtw_mp_ioctl.h"
-
-/* rtl8188eu_oid_rtl_seg_81_85 section start **************** */
-int rtl8188eu_oid_rt_wireless_mode_hdl(struct oid_par_priv *poid_par_priv)
-{
- int status = NDIS_STATUS_SUCCESS;
- struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-
- if (poid_par_priv->information_buf_len < sizeof(u8))
- return NDIS_STATUS_INVALID_LENGTH;
-
- if (poid_par_priv->type_of_oid == SET_OID) {
- Adapter->registrypriv.wireless_mode = *(u8 *)poid_par_priv->information_buf;
- } else if (poid_par_priv->type_of_oid == QUERY_OID) {
- *(u8 *)poid_par_priv->information_buf = Adapter->registrypriv.wireless_mode;
- *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
- } else {
- status = NDIS_STATUS_NOT_ACCEPTED;
- }
-
- return status;
-}
-/* rtl8188eu_oid_rtl_seg_81_87_80 section start **************** */
-int rtl8188eu_oid_rt_pro_write_bb_reg_hdl(struct oid_par_priv *poid_par_priv)
-{
- struct bb_reg_param *pbbreg;
- u16 offset;
- u32 value;
- int status = NDIS_STATUS_SUCCESS;
- struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-
- if (poid_par_priv->type_of_oid != SET_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
-
- if (poid_par_priv->information_buf_len < sizeof(struct bb_reg_param))
- return NDIS_STATUS_INVALID_LENGTH;
-
- pbbreg = (struct bb_reg_param *)(poid_par_priv->information_buf);
-
- offset = (u16)(pbbreg->offset) & 0xFFF; /* 0ffset :0x800~0xfff */
- if (offset < BB_REG_BASE_ADDR)
- offset |= BB_REG_BASE_ADDR;
-
- value = pbbreg->value;
-
- _irqlevel_changed_(&oldirql, LOWER);
- write_bbreg(Adapter, offset, 0xFFFFFFFF, value);
- _irqlevel_changed_(&oldirql, RAISE);
-
- return status;
-}
-/* */
-int rtl8188eu_oid_rt_pro_read_bb_reg_hdl(struct oid_par_priv *poid_par_priv)
-{
- struct bb_reg_param *pbbreg;
- u16 offset;
- u32 value;
- int status = NDIS_STATUS_SUCCESS;
- struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-
- if (poid_par_priv->type_of_oid != QUERY_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
-
- if (poid_par_priv->information_buf_len < sizeof(struct bb_reg_param))
- return NDIS_STATUS_INVALID_LENGTH;
-
- pbbreg = (struct bb_reg_param *)(poid_par_priv->information_buf);
-
- offset = (u16)(pbbreg->offset) & 0xFFF; /* 0ffset :0x800~0xfff */
- if (offset < BB_REG_BASE_ADDR)
- offset |= BB_REG_BASE_ADDR;
-
- _irqlevel_changed_(&oldirql, LOWER);
- value = read_bbreg(Adapter, offset, 0xFFFFFFFF);
- _irqlevel_changed_(&oldirql, RAISE);
-
- pbbreg->value = value;
- *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
-
- return status;
-}
-/* */
-int rtl8188eu_oid_rt_pro_write_rf_reg_hdl(struct oid_par_priv *poid_par_priv)
-{
- struct rf_reg_param *pbbreg;
- u8 path;
- u8 offset;
- u32 value;
- int status = NDIS_STATUS_SUCCESS;
- struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-
- if (poid_par_priv->type_of_oid != SET_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
-
- if (poid_par_priv->information_buf_len < sizeof(struct rf_reg_param))
- return NDIS_STATUS_INVALID_LENGTH;
-
- pbbreg = (struct rf_reg_param *)(poid_par_priv->information_buf);
-
- if (pbbreg->path >= RF_PATH_MAX)
- return NDIS_STATUS_NOT_ACCEPTED;
- if (pbbreg->offset > 0xFF)
- return NDIS_STATUS_NOT_ACCEPTED;
- if (pbbreg->value > 0xFFFFF)
- return NDIS_STATUS_NOT_ACCEPTED;
-
- path = (u8)pbbreg->path;
- offset = (u8)pbbreg->offset;
- value = pbbreg->value;
-
- _irqlevel_changed_(&oldirql, LOWER);
- write_rfreg(Adapter, path, offset, value);
- _irqlevel_changed_(&oldirql, RAISE);
-
- return status;
-}
-/* */
-int rtl8188eu_oid_rt_pro_read_rf_reg_hdl(struct oid_par_priv *poid_par_priv)
-{
- struct rf_reg_param *pbbreg;
- u8 path;
- u8 offset;
- u32 value;
- struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
- int status = NDIS_STATUS_SUCCESS;
-
- if (poid_par_priv->type_of_oid != QUERY_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
-
- if (poid_par_priv->information_buf_len < sizeof(struct rf_reg_param))
- return NDIS_STATUS_INVALID_LENGTH;
-
- pbbreg = (struct rf_reg_param *)(poid_par_priv->information_buf);
-
- if (pbbreg->path >= RF_PATH_MAX)
- return NDIS_STATUS_NOT_ACCEPTED;
- if (pbbreg->offset > 0xFF)
- return NDIS_STATUS_NOT_ACCEPTED;
-
- path = (u8)pbbreg->path;
- offset = (u8)pbbreg->offset;
-
- _irqlevel_changed_(&oldirql, LOWER);
- value = read_rfreg(Adapter, path, offset);
- _irqlevel_changed_(&oldirql, RAISE);
-
- pbbreg->value = value;
-
- *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
-
- return status;
-}
-/* rtl8188eu_oid_rtl_seg_81_87_00 section end**************** */
-/* */
-
-/* rtl8188eu_oid_rtl_seg_81_80_00 section start **************** */
-/* */
-int rtl8188eu_oid_rt_pro_set_data_rate_hdl(struct oid_par_priv *poid_par_priv)
-{
- u32 ratevalue;/* 4 */
- int status = NDIS_STATUS_SUCCESS;
- struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-
- if (poid_par_priv->type_of_oid != SET_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
-
- if (poid_par_priv->information_buf_len != sizeof(u32))
- return NDIS_STATUS_INVALID_LENGTH;
-
- ratevalue = *((u32 *)poid_par_priv->information_buf);/* 4 */
- if (ratevalue >= MPT_RATE_LAST)
- return NDIS_STATUS_INVALID_DATA;
-
- Adapter->mppriv.rateidx = ratevalue;
-
- _irqlevel_changed_(&oldirql, LOWER);
- SetDataRate(Adapter);
- _irqlevel_changed_(&oldirql, RAISE);
-
- return status;
-}
-/* */
-int rtl8188eu_oid_rt_pro_start_test_hdl(struct oid_par_priv *poid_par_priv)
-{
- u32 mode;
- int status = NDIS_STATUS_SUCCESS;
- struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-
- if (Adapter->registrypriv.mp_mode == 0)
- return NDIS_STATUS_NOT_ACCEPTED;
-
- if (poid_par_priv->type_of_oid != SET_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
-
- _irqlevel_changed_(&oldirql, LOWER);
-
- /* IQCalibrateBcut(Adapter); */
-
- mode = *((u32 *)poid_par_priv->information_buf);
- Adapter->mppriv.mode = mode;/* 1 for loopback */
-
- if (mp_start_test(Adapter) == _FAIL) {
- status = NDIS_STATUS_NOT_ACCEPTED;
- goto exit;
- }
-
-exit:
- _irqlevel_changed_(&oldirql, RAISE);
-
- return status;
-}
-/* */
-int rtl8188eu_oid_rt_pro_stop_test_hdl(struct oid_par_priv *poid_par_priv)
-{
- int status = NDIS_STATUS_SUCCESS;
- struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-
- if (poid_par_priv->type_of_oid != SET_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
-
- _irqlevel_changed_(&oldirql, LOWER);
- mp_stop_test(Adapter);
- _irqlevel_changed_(&oldirql, RAISE);
-
- return status;
-}
-/* */
-int rtl8188eu_oid_rt_pro_set_channel_direct_call_hdl(struct oid_par_priv *poid_par_priv)
-{
- u32 Channel;
- int status = NDIS_STATUS_SUCCESS;
- struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-
- if (poid_par_priv->information_buf_len != sizeof(u32))
- return NDIS_STATUS_INVALID_LENGTH;
-
- if (poid_par_priv->type_of_oid == QUERY_OID) {
- *((u32 *)poid_par_priv->information_buf) = Adapter->mppriv.channel;
- return NDIS_STATUS_SUCCESS;
- }
-
- if (poid_par_priv->type_of_oid != SET_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
-
- Channel = *((u32 *)poid_par_priv->information_buf);
- if (Channel > 14)
- return NDIS_STATUS_NOT_ACCEPTED;
- Adapter->mppriv.channel = Channel;
-
- _irqlevel_changed_(&oldirql, LOWER);
- SetChannel(Adapter);
- _irqlevel_changed_(&oldirql, RAISE);
-
- return status;
-}
-/* */
-int rtl8188eu_oid_rt_set_bandwidth_hdl(struct oid_par_priv *poid_par_priv)
-{
- u16 bandwidth;
- u16 channel_offset;
- int status = NDIS_STATUS_SUCCESS;
- struct adapter *padapter = (struct adapter *)(poid_par_priv->adapter_context);
-
- if (poid_par_priv->type_of_oid != SET_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
-
- if (poid_par_priv->information_buf_len < sizeof(u32))
- return NDIS_STATUS_INVALID_LENGTH;
-
- bandwidth = *((u32 *)poid_par_priv->information_buf);/* 4 */
- channel_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE;
-
- if (bandwidth != HT_CHANNEL_WIDTH_40)
- bandwidth = HT_CHANNEL_WIDTH_20;
- padapter->mppriv.bandwidth = (u8)bandwidth;
- padapter->mppriv.prime_channel_offset = (u8)channel_offset;
-
- _irqlevel_changed_(&oldirql, LOWER);
- SetBandwidth(padapter);
- _irqlevel_changed_(&oldirql, RAISE);
-
- return status;
-}
-/* */
-int rtl8188eu_oid_rt_pro_set_antenna_bb_hdl(struct oid_par_priv *poid_par_priv)
-{
- u32 antenna;
- int status = NDIS_STATUS_SUCCESS;
- struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-
- if (poid_par_priv->information_buf_len != sizeof(u32))
- return NDIS_STATUS_INVALID_LENGTH;
-
- if (poid_par_priv->type_of_oid == SET_OID) {
- antenna = *(u32 *)poid_par_priv->information_buf;
-
- Adapter->mppriv.antenna_tx = (u16)((antenna & 0xFFFF0000) >> 16);
- Adapter->mppriv.antenna_rx = (u16)(antenna & 0x0000FFFF);
-
- _irqlevel_changed_(&oldirql, LOWER);
- SetAntenna(Adapter);
- _irqlevel_changed_(&oldirql, RAISE);
- } else {
- antenna = (Adapter->mppriv.antenna_tx << 16) | Adapter->mppriv.antenna_rx;
- *(u32 *)poid_par_priv->information_buf = antenna;
- }
-
- return status;
-}
-
-int rtl8188eu_oid_rt_pro_set_tx_power_control_hdl(struct oid_par_priv *poid_par_priv)
-{
- u32 tx_pwr_idx;
- int status = NDIS_STATUS_SUCCESS;
- struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-
- if (poid_par_priv->type_of_oid != SET_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
-
- if (poid_par_priv->information_buf_len != sizeof(u32))
- return NDIS_STATUS_INVALID_LENGTH;
-
- tx_pwr_idx = *((u32 *)poid_par_priv->information_buf);
- if (tx_pwr_idx > MAX_TX_PWR_INDEX_N_MODE)
- return NDIS_STATUS_NOT_ACCEPTED;
-
- Adapter->mppriv.txpoweridx = (u8)tx_pwr_idx;
-
- _irqlevel_changed_(&oldirql, LOWER);
- SetTxPower(Adapter);
- _irqlevel_changed_(&oldirql, RAISE);
-
- return status;
-}
-
-/* */
-/* rtl8188eu_oid_rtl_seg_81_80_20 section start **************** */
-/* */
-int rtl8188eu_oid_rt_pro_query_tx_packet_sent_hdl(struct oid_par_priv *poid_par_priv)
-{
- int status = NDIS_STATUS_SUCCESS;
- struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-
- if (poid_par_priv->type_of_oid != QUERY_OID) {
- status = NDIS_STATUS_NOT_ACCEPTED;
- return status;
- }
-
- if (poid_par_priv->information_buf_len == sizeof(u32)) {
- *(u32 *)poid_par_priv->information_buf = Adapter->mppriv.tx_pktcount;
- *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
- } else {
- status = NDIS_STATUS_INVALID_LENGTH;
- }
-
- return status;
-}
-/* */
-int rtl8188eu_oid_rt_pro_query_rx_packet_received_hdl(struct oid_par_priv *poid_par_priv)
-{
- int status = NDIS_STATUS_SUCCESS;
- struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-
- if (poid_par_priv->type_of_oid != QUERY_OID) {
- status = NDIS_STATUS_NOT_ACCEPTED;
- return status;
- }
- if (poid_par_priv->information_buf_len == sizeof(u32)) {
- *(u32 *)poid_par_priv->information_buf = Adapter->mppriv.rx_pktcount;
- *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
- } else {
- status = NDIS_STATUS_INVALID_LENGTH;
- }
-
- return status;
-}
-/* */
-int rtl8188eu_oid_rt_pro_query_rx_packet_crc32_error_hdl(struct oid_par_priv *poid_par_priv)
-{
- int status = NDIS_STATUS_SUCCESS;
- struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-
- if (poid_par_priv->type_of_oid != QUERY_OID) {
- status = NDIS_STATUS_NOT_ACCEPTED;
- return status;
- }
- if (poid_par_priv->information_buf_len == sizeof(u32)) {
- *(u32 *)poid_par_priv->information_buf = Adapter->mppriv.rx_crcerrpktcount;
- *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
- } else {
- status = NDIS_STATUS_INVALID_LENGTH;
- }
-
- return status;
-}
-/* */
-
-int rtl8188eu_oid_rt_pro_reset_tx_packet_sent_hdl(struct oid_par_priv *poid_par_priv)
-{
- int status = NDIS_STATUS_SUCCESS;
- struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-
- if (poid_par_priv->type_of_oid != SET_OID) {
- status = NDIS_STATUS_NOT_ACCEPTED;
- return status;
- }
-
- Adapter->mppriv.tx_pktcount = 0;
-
- return status;
-}
-/* */
-int rtl8188eu_oid_rt_pro_reset_rx_packet_received_hdl(struct oid_par_priv *poid_par_priv)
-{
- int status = NDIS_STATUS_SUCCESS;
- struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-
- if (poid_par_priv->type_of_oid != SET_OID) {
- status = NDIS_STATUS_NOT_ACCEPTED;
- return status;
- }
-
- if (poid_par_priv->information_buf_len == sizeof(u32)) {
- Adapter->mppriv.rx_pktcount = 0;
- Adapter->mppriv.rx_crcerrpktcount = 0;
- } else {
- status = NDIS_STATUS_INVALID_LENGTH;
- }
-
- return status;
-}
-/* */
-int rtl8188eu_oid_rt_reset_phy_rx_packet_count_hdl(struct oid_par_priv *poid_par_priv)
-{
- int status = NDIS_STATUS_SUCCESS;
- struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-
- if (poid_par_priv->type_of_oid != SET_OID) {
- status = NDIS_STATUS_NOT_ACCEPTED;
- return status;
- }
-
- _irqlevel_changed_(&oldirql, LOWER);
- ResetPhyRxPktCount(Adapter);
- _irqlevel_changed_(&oldirql, RAISE);
-
- return status;
-}
-/* */
-int rtl8188eu_oid_rt_get_phy_rx_packet_received_hdl(struct oid_par_priv *poid_par_priv)
-{
- int status = NDIS_STATUS_SUCCESS;
- struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-
- if (poid_par_priv->type_of_oid != QUERY_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
-
- if (poid_par_priv->information_buf_len != sizeof(u32))
- return NDIS_STATUS_INVALID_LENGTH;
-
- _irqlevel_changed_(&oldirql, LOWER);
- *(u32 *)poid_par_priv->information_buf = GetPhyRxPktReceived(Adapter);
- _irqlevel_changed_(&oldirql, RAISE);
-
- *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
-
- return status;
-}
-/* */
-int rtl8188eu_oid_rt_get_phy_rx_packet_crc32_error_hdl(struct oid_par_priv *poid_par_priv)
-{
- int status = NDIS_STATUS_SUCCESS;
- struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-
- if (poid_par_priv->type_of_oid != QUERY_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
-
- if (poid_par_priv->information_buf_len != sizeof(u32))
- return NDIS_STATUS_INVALID_LENGTH;
-
- _irqlevel_changed_(&oldirql, LOWER);
- *(u32 *)poid_par_priv->information_buf = GetPhyRxPktCRC32Error(Adapter);
- _irqlevel_changed_(&oldirql, RAISE);
-
- *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
-
- return status;
-}
-/* rtl8188eu_oid_rtl_seg_81_80_20 section end **************** */
-int rtl8188eu_oid_rt_pro_set_continuous_tx_hdl(struct oid_par_priv *poid_par_priv)
-{
- u32 bStartTest;
- int status = NDIS_STATUS_SUCCESS;
- struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-
- if (poid_par_priv->type_of_oid != SET_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
-
- bStartTest = *((u32 *)poid_par_priv->information_buf);
-
- _irqlevel_changed_(&oldirql, LOWER);
- SetContinuousTx(Adapter, (u8)bStartTest);
- if (bStartTest) {
- struct mp_priv *pmp_priv = &Adapter->mppriv;
- if (pmp_priv->tx.stop == 0) {
- pmp_priv->tx.stop = 1;
- DBG_88E("%s: pkt tx is running...\n", __func__);
- msleep(5);
- }
- pmp_priv->tx.stop = 0;
- pmp_priv->tx.count = 1;
- SetPacketTx(Adapter);
- }
- _irqlevel_changed_(&oldirql, RAISE);
-
- return status;
-}
-
-int rtl8188eu_oid_rt_pro_set_single_carrier_tx_hdl(struct oid_par_priv *poid_par_priv)
-{
- u32 bStartTest;
- int status = NDIS_STATUS_SUCCESS;
- struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-
- if (poid_par_priv->type_of_oid != SET_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
-
- bStartTest = *((u32 *)poid_par_priv->information_buf);
-
- _irqlevel_changed_(&oldirql, LOWER);
- SetSingleCarrierTx(Adapter, (u8)bStartTest);
- if (bStartTest) {
- struct mp_priv *pmp_priv = &Adapter->mppriv;
- if (pmp_priv->tx.stop == 0) {
- pmp_priv->tx.stop = 1;
- DBG_88E("%s: pkt tx is running...\n", __func__);
- msleep(5);
- }
- pmp_priv->tx.stop = 0;
- pmp_priv->tx.count = 1;
- SetPacketTx(Adapter);
- }
- _irqlevel_changed_(&oldirql, RAISE);
-
- return status;
-}
-
-int rtl8188eu_oid_rt_pro_set_carrier_suppression_tx_hdl(struct oid_par_priv *poid_par_priv)
-{
- u32 bStartTest;
- int status = NDIS_STATUS_SUCCESS;
- struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-
- if (poid_par_priv->type_of_oid != SET_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
-
- bStartTest = *((u32 *)poid_par_priv->information_buf);
-
- _irqlevel_changed_(&oldirql, LOWER);
- SetCarrierSuppressionTx(Adapter, (u8)bStartTest);
- if (bStartTest) {
- struct mp_priv *pmp_priv = &Adapter->mppriv;
- if (pmp_priv->tx.stop == 0) {
- pmp_priv->tx.stop = 1;
- DBG_88E("%s: pkt tx is running...\n", __func__);
- msleep(5);
- }
- pmp_priv->tx.stop = 0;
- pmp_priv->tx.count = 1;
- SetPacketTx(Adapter);
- }
- _irqlevel_changed_(&oldirql, RAISE);
-
- return status;
-}
-
-int rtl8188eu_oid_rt_pro_set_single_tone_tx_hdl(struct oid_par_priv *poid_par_priv)
-{
- u32 bStartTest;
- int status = NDIS_STATUS_SUCCESS;
- struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-
- if (poid_par_priv->type_of_oid != SET_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
-
- bStartTest = *((u32 *)poid_par_priv->information_buf);
-
- _irqlevel_changed_(&oldirql, LOWER);
- SetSingleToneTx(Adapter, (u8)bStartTest);
- _irqlevel_changed_(&oldirql, RAISE);
-
- return status;
-}
-
-int rtl8188eu_oid_rt_pro_set_modulation_hdl(struct oid_par_priv *poid_par_priv)
-{
- return 0;
-}
-
-int rtl8188eu_oid_rt_pro_trigger_gpio_hdl(struct oid_par_priv *poid_par_priv)
-{
- struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
- int status = NDIS_STATUS_SUCCESS;
-
- if (poid_par_priv->type_of_oid != SET_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
-
- _irqlevel_changed_(&oldirql, LOWER);
- rtw_hal_set_hwreg(Adapter, HW_VAR_TRIGGER_GPIO_0, NULL);
- _irqlevel_changed_(&oldirql, RAISE);
-
- return status;
-}
-/* rtl8188eu_oid_rtl_seg_81_80_00 section end **************** */
-/* */
-int rtl8188eu_oid_rt_pro8711_join_bss_hdl(struct oid_par_priv *poid_par_priv)
-{
- return 0;
-}
-/* */
-int rtl8188eu_oid_rt_pro_read_register_hdl(struct oid_par_priv *poid_par_priv)
-{
- struct mp_rw_reg *RegRWStruct;
- u32 offset, width;
- int status = NDIS_STATUS_SUCCESS;
- struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-
- if (poid_par_priv->type_of_oid != QUERY_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
-
- RegRWStruct = (struct mp_rw_reg *)poid_par_priv->information_buf;
- offset = RegRWStruct->offset;
- width = RegRWStruct->width;
-
- if (offset > 0xFFF)
- return NDIS_STATUS_NOT_ACCEPTED;
-
- _irqlevel_changed_(&oldirql, LOWER);
-
- switch (width) {
- case 1:
- RegRWStruct->value = rtw_read8(Adapter, offset);
- break;
- case 2:
- RegRWStruct->value = rtw_read16(Adapter, offset);
- break;
- default:
- width = 4;
- RegRWStruct->value = rtw_read32(Adapter, offset);
- break;
- }
-
- _irqlevel_changed_(&oldirql, RAISE);
-
- *poid_par_priv->bytes_rw = width;
-
- return status;
-}
-/* */
-int rtl8188eu_oid_rt_pro_write_register_hdl(struct oid_par_priv *poid_par_priv)
-{
- struct mp_rw_reg *RegRWStruct;
- u32 offset, value;
- int status = NDIS_STATUS_SUCCESS;
- struct adapter *padapter = (struct adapter *)(poid_par_priv->adapter_context);
-
- if (poid_par_priv->type_of_oid != SET_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
-
- RegRWStruct = (struct mp_rw_reg *)poid_par_priv->information_buf;
- offset = RegRWStruct->offset;
- value = RegRWStruct->value;
-
- if (offset > 0xFFF)
- return NDIS_STATUS_NOT_ACCEPTED;
-
- _irqlevel_changed_(&oldirql, LOWER);
-
- switch (RegRWStruct->width) {
- case 1:
- if (value > 0xFF) {
- status = NDIS_STATUS_NOT_ACCEPTED;
- break;
- }
- rtw_write8(padapter, offset, (u8)value);
- break;
- case 2:
- if (value > 0xFFFF) {
- status = NDIS_STATUS_NOT_ACCEPTED;
- break;
- }
- rtw_write16(padapter, offset, (u16)value);
- break;
- case 4:
- rtw_write32(padapter, offset, value);
- break;
- default:
- status = NDIS_STATUS_NOT_ACCEPTED;
- break;
- }
-
- _irqlevel_changed_(&oldirql, RAISE);
-
- return status;
-}
-/* */
-int rtl8188eu_oid_rt_pro_burst_read_register_hdl(struct oid_par_priv *poid_par_priv)
-{
- return 0;
-}
-/* */
-int rtl8188eu_oid_rt_pro_burst_write_register_hdl(struct oid_par_priv *poid_par_priv)
-{
- return 0;
-}
-/* */
-int rtl8188eu_oid_rt_pro_write_txcmd_hdl(struct oid_par_priv *poid_par_priv)
-{
- return 0;
-}
-
-/* */
-int rtl8188eu_oid_rt_pro_read16_eeprom_hdl(struct oid_par_priv *poid_par_priv)
-{
- return 0;
-}
-
-/* */
-int rtl8188eu_oid_rt_pro_write16_eeprom_hdl(struct oid_par_priv *poid_par_priv)
-{
- return 0;
-}
-/* */
-int rtl8188eu_oid_rt_pro8711_wi_poll_hdl(struct oid_par_priv *poid_par_priv)
-{
- return 0;
-}
-/* */
-int rtl8188eu_oid_rt_pro8711_pkt_loss_hdl(struct oid_par_priv *poid_par_priv)
-{
- return 0;
-}
-/* */
-int rtl8188eu_oid_rt_rd_attrib_mem_hdl(struct oid_par_priv *poid_par_priv)
-{
- return 0;
-}
-/* */
-int rtl8188eu_oid_rt_wr_attrib_mem_hdl(struct oid_par_priv *poid_par_priv)
-{
- return 0;
-}
-/* */
-int rtl8188eu_oid_rt_pro_set_rf_intfs_hdl(struct oid_par_priv *poid_par_priv)
-{
- return 0;
-}
-/* */
-int rtl8188eu_oid_rt_poll_rx_status_hdl(struct oid_par_priv *poid_par_priv)
-{
- return 0;
-}
-/* */
-int rtl8188eu_oid_rt_pro_cfg_debug_message_hdl(struct oid_par_priv *poid_par_priv)
-{
- return 0;
-}
-/* */
-int rtl8188eu_oid_rt_pro_set_data_rate_ex_hdl(struct oid_par_priv *poid_par_priv)
-{
- struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-
- int status = NDIS_STATUS_SUCCESS;
-
- if (poid_par_priv->type_of_oid != SET_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
-
- _irqlevel_changed_(&oldirql, LOWER);
-
- if (rtw_setdatarate_cmd(Adapter, poid_par_priv->information_buf) != _SUCCESS)
- status = NDIS_STATUS_NOT_ACCEPTED;
-
- _irqlevel_changed_(&oldirql, RAISE);
-
- return status;
-}
-/* */
-int rtl8188eu_oid_rt_get_thermal_meter_hdl(struct oid_par_priv *poid_par_priv)
-{
- int status = NDIS_STATUS_SUCCESS;
- u8 thermal = 0;
- struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-
- if (poid_par_priv->type_of_oid != QUERY_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
-
- if (poid_par_priv->information_buf_len < sizeof(u32))
- return NDIS_STATUS_INVALID_LENGTH;
-
- _irqlevel_changed_(&oldirql, LOWER);
- GetThermalMeter(Adapter, &thermal);
- _irqlevel_changed_(&oldirql, RAISE);
-
- *(u32 *)poid_par_priv->information_buf = (u32)thermal;
- *poid_par_priv->bytes_rw = sizeof(u32);
-
- return status;
-}
-/* */
-int rtl8188eu_oid_rt_pro_read_tssi_hdl(struct oid_par_priv *poid_par_priv)
-{
- return 0;
-}
-/* */
-int rtl8188eu_oid_rt_pro_set_power_tracking_hdl(struct oid_par_priv *poid_par_priv)
-{
- int status = NDIS_STATUS_SUCCESS;
- struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-
- if (poid_par_priv->information_buf_len < sizeof(u8))
- return NDIS_STATUS_INVALID_LENGTH;
-
- _irqlevel_changed_(&oldirql, LOWER);
- if (poid_par_priv->type_of_oid == SET_OID) {
- u8 enable;
-
- enable = *(u8 *)poid_par_priv->information_buf;
-
- SetPowerTracking(Adapter, enable);
- } else {
- GetPowerTracking(Adapter, (u8 *)poid_par_priv->information_buf);
- }
- _irqlevel_changed_(&oldirql, RAISE);
-
- return status;
-}
-/* */
-int rtl8188eu_oid_rt_pro_set_basic_rate_hdl(struct oid_par_priv *poid_par_priv)
-{
- return 0;
-}
-/* */
-int rtl8188eu_oid_rt_pro_qry_pwrstate_hdl(struct oid_par_priv *poid_par_priv)
-{
- return 0;
-}
-/* */
-int rtl8188eu_oid_rt_pro_set_pwrstate_hdl(struct oid_par_priv *poid_par_priv)
-{
- return 0;
-}
-/* */
-int rtl8188eu_oid_rt_pro_h2c_set_rate_table_hdl(struct oid_par_priv *poid_par_priv)
-{
- return 0;
-}
-/* */
-int rtl8188eu_oid_rt_pro_h2c_get_rate_table_hdl(struct oid_par_priv *poid_par_priv)
-{
- return 0;
-}
-
-/* rtl8188eu_oid_rtl_seg_87_12_00 section start **************** */
-int rtl8188eu_oid_rt_pro_encryption_ctrl_hdl(struct oid_par_priv *poid_par_priv)
-{
- return 0;
-}
-
-int rtl8188eu_oid_rt_pro_add_sta_info_hdl(struct oid_par_priv *poid_par_priv)
-{
- return 0;
-}
-
-int rtl8188eu_oid_rt_pro_dele_sta_info_hdl(struct oid_par_priv *poid_par_priv)
-{
- return 0;
-}
-
-int rtl8188eu_oid_rt_pro_query_dr_variable_hdl(struct oid_par_priv *poid_par_priv)
-{
- return 0;
-}
-
-int rtl8188eu_oid_rt_pro_rx_packet_type_hdl(struct oid_par_priv *poid_par_priv)
-{
- return NDIS_STATUS_SUCCESS;
-}
-/* */
-int rtl8188eu_oid_rt_pro_read_efuse_hdl(struct oid_par_priv *poid_par_priv)
-{
- struct efuse_access_struct *pefuse;
- u8 *data;
- u16 addr = 0, cnts = 0, max_available_size = 0;
- int status = NDIS_STATUS_SUCCESS;
- struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-
- if (poid_par_priv->type_of_oid != QUERY_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
-
- if (poid_par_priv->information_buf_len < sizeof(struct efuse_access_struct))
- return NDIS_STATUS_INVALID_LENGTH;
-
- pefuse = (struct efuse_access_struct *)poid_par_priv->information_buf;
- addr = pefuse->start_addr;
- cnts = pefuse->cnts;
- data = pefuse->data;
-
- EFUSE_GetEfuseDefinition(Adapter, EFUSE_WIFI, TYPE_AVAILABLE_EFUSE_BYTES_TOTAL, (void *)&max_available_size, false);
-
- if ((addr + cnts) > max_available_size)
- return NDIS_STATUS_NOT_ACCEPTED;
-
- _irqlevel_changed_(&oldirql, LOWER);
- if (rtw_efuse_access(Adapter, false, addr, cnts, data) == _FAIL)
- status = NDIS_STATUS_FAILURE;
- else
- *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
- _irqlevel_changed_(&oldirql, RAISE);
-
- return status;
-}
-/* */
-int rtl8188eu_oid_rt_pro_write_efuse_hdl(struct oid_par_priv *poid_par_priv)
-{
- struct efuse_access_struct *pefuse;
- u8 *data;
- u16 addr = 0, cnts = 0, max_available_size = 0;
- int status = NDIS_STATUS_SUCCESS;
- struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-
- if (poid_par_priv->type_of_oid != SET_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
-
- pefuse = (struct efuse_access_struct *)poid_par_priv->information_buf;
- addr = pefuse->start_addr;
- cnts = pefuse->cnts;
- data = pefuse->data;
-
- EFUSE_GetEfuseDefinition(Adapter, EFUSE_WIFI, TYPE_AVAILABLE_EFUSE_BYTES_TOTAL, (void *)&max_available_size, false);
-
- if ((addr + cnts) > max_available_size)
- return NDIS_STATUS_NOT_ACCEPTED;
-
- _irqlevel_changed_(&oldirql, LOWER);
- if (rtw_efuse_access(Adapter, true, addr, cnts, data) == _FAIL)
- status = NDIS_STATUS_FAILURE;
- _irqlevel_changed_(&oldirql, RAISE);
-
- return status;
-}
-/* */
-int rtl8188eu_oid_rt_pro_rw_efuse_pgpkt_hdl(struct oid_par_priv *poid_par_priv)
-{
- struct pgpkt *ppgpkt;
- int status = NDIS_STATUS_SUCCESS;
- struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-
- *poid_par_priv->bytes_rw = 0;
-
- if (poid_par_priv->information_buf_len < sizeof(struct pgpkt *))
- return NDIS_STATUS_INVALID_LENGTH;
-
- ppgpkt = (struct pgpkt *)poid_par_priv->information_buf;
-
- _irqlevel_changed_(&oldirql, LOWER);
-
- if (poid_par_priv->type_of_oid == QUERY_OID) {
- Efuse_PowerSwitch(Adapter, false, true);
- if (Efuse_PgPacketRead(Adapter, ppgpkt->offset, ppgpkt->data, false))
- *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
- else
- status = NDIS_STATUS_FAILURE;
- Efuse_PowerSwitch(Adapter, false, false);
- } else {
- Efuse_PowerSwitch(Adapter, true, true);
- if (Efuse_PgPacketWrite(Adapter, ppgpkt->offset, ppgpkt->word_en, ppgpkt->data, false))
- *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
- else
- status = NDIS_STATUS_FAILURE;
- Efuse_PowerSwitch(Adapter, true, false);
- }
-
- _irqlevel_changed_(&oldirql, RAISE);
-
- return status;
-}
-/* */
-int rtl8188eu_oid_rt_get_efuse_current_size_hdl(struct oid_par_priv *poid_par_priv)
-{
- u16 size;
- u8 ret;
- int status = NDIS_STATUS_SUCCESS;
- struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-
- if (poid_par_priv->type_of_oid != QUERY_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
-
- if (poid_par_priv->information_buf_len < sizeof(u32))
- return NDIS_STATUS_INVALID_LENGTH;
-
- _irqlevel_changed_(&oldirql, LOWER);
- ret = efuse_GetCurrentSize(Adapter, &size);
- _irqlevel_changed_(&oldirql, RAISE);
- if (ret == _SUCCESS) {
- *(u32 *)poid_par_priv->information_buf = size;
- *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
- } else {
- status = NDIS_STATUS_FAILURE;
- }
-
- return status;
-}
-/* */
-int rtl8188eu_oid_rt_get_efuse_max_size_hdl(struct oid_par_priv *poid_par_priv)
-{
- int status = NDIS_STATUS_SUCCESS;
- struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
-
- if (poid_par_priv->type_of_oid != QUERY_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
-
- if (poid_par_priv->information_buf_len < sizeof(u32))
- return NDIS_STATUS_INVALID_LENGTH;
-
- *(u32 *)poid_par_priv->information_buf = efuse_GetMaxSize(Adapter);
- *poid_par_priv->bytes_rw = poid_par_priv->information_buf_len;
-
- return status;
-}
-/* */
-int rtl8188eu_oid_rt_pro_efuse_hdl(struct oid_par_priv *poid_par_priv)
-{
- int status;
-
- if (poid_par_priv->type_of_oid == QUERY_OID)
- status = rtl8188eu_oid_rt_pro_read_efuse_hdl(poid_par_priv);
- else
- status = rtl8188eu_oid_rt_pro_write_efuse_hdl(poid_par_priv);
-
- return status;
-}
-/* */
-int rtl8188eu_oid_rt_pro_efuse_map_hdl(struct oid_par_priv *poid_par_priv)
-{
- u8 *data;
- int status = NDIS_STATUS_SUCCESS;
- struct adapter *Adapter = (struct adapter *)(poid_par_priv->adapter_context);
- u16 maplen = 0;
-
- EFUSE_GetEfuseDefinition(Adapter, EFUSE_WIFI, TYPE_EFUSE_MAP_LEN, (void *)&maplen, false);
-
- *poid_par_priv->bytes_rw = 0;
-
- if (poid_par_priv->information_buf_len < maplen)
- return NDIS_STATUS_INVALID_LENGTH;
-
- data = (u8 *)poid_par_priv->information_buf;
-
- _irqlevel_changed_(&oldirql, LOWER);
-
- if (poid_par_priv->type_of_oid == QUERY_OID) {
- if (rtw_efuse_map_read(Adapter, 0, maplen, data) == _SUCCESS)
- *poid_par_priv->bytes_rw = maplen;
- else
- status = NDIS_STATUS_FAILURE;
- } else {
- /* SET_OID */
- if (rtw_efuse_map_write(Adapter, 0, maplen, data) == _SUCCESS)
- *poid_par_priv->bytes_rw = maplen;
- else
- status = NDIS_STATUS_FAILURE;
- }
-
- _irqlevel_changed_(&oldirql, RAISE);
-
- return status;
-}
-
-int rtl8188eu_oid_rt_set_crystal_cap_hdl(struct oid_par_priv *poid_par_priv)
-{
- int status = NDIS_STATUS_SUCCESS;
- return status;
-}
-
-int rtl8188eu_oid_rt_set_rx_packet_type_hdl(struct oid_par_priv *poid_par_priv)
-{
- int status = NDIS_STATUS_SUCCESS;
-
- if (poid_par_priv->type_of_oid != SET_OID)
- return NDIS_STATUS_NOT_ACCEPTED;
-
- if (poid_par_priv->information_buf_len < sizeof(u8))
- return NDIS_STATUS_INVALID_LENGTH;
-
- return status;
-}
-
-int rtl8188eu_oid_rt_pro_set_tx_agc_offset_hdl(struct oid_par_priv *poid_par_priv)
-{
- return 0;
-}
-
-int rtl8188eu_oid_rt_pro_set_pkt_test_mode_hdl(struct oid_par_priv *poid_par_priv)
-{
- return 0;
-}
-
-int rtl8188eu_mp_ioctl_xmit_packet_hdl(struct oid_par_priv *poid_par_priv)
-{
- struct mp_xmit_parm *pparm;
- struct adapter *padapter;
- struct mp_priv *pmp_priv;
- struct pkt_attrib *pattrib;
-
- pparm = (struct mp_xmit_parm *)poid_par_priv->information_buf;
- padapter = (struct adapter *)poid_par_priv->adapter_context;
- pmp_priv = &padapter->mppriv;
-
- if (poid_par_priv->type_of_oid == QUERY_OID) {
- pparm->enable = !pmp_priv->tx.stop;
- pparm->count = pmp_priv->tx.sended;
- } else {
- if (pparm->enable == 0) {
- pmp_priv->tx.stop = 1;
- } else if (pmp_priv->tx.stop == 1) {
- pmp_priv->tx.stop = 0;
- pmp_priv->tx.count = pparm->count;
- pmp_priv->tx.payload = pparm->payload_type;
- pattrib = &pmp_priv->tx.attrib;
- pattrib->pktlen = pparm->length;
- memcpy(pattrib->dst, pparm->da, ETH_ALEN);
- SetPacketTx(padapter);
- } else {
- return NDIS_STATUS_FAILURE;
- }
- }
-
- return NDIS_STATUS_SUCCESS;
-}
-
-/* */
-int rtl8188eu_oid_rt_set_power_down_hdl(struct oid_par_priv *poid_par_priv)
-{
- int status = NDIS_STATUS_SUCCESS;
-
- if (poid_par_priv->type_of_oid != SET_OID) {
- status = NDIS_STATUS_NOT_ACCEPTED;
- return status;
- }
-
- _irqlevel_changed_(&oldirql, LOWER);
-
- /* CALL the power_down function */
- _irqlevel_changed_(&oldirql, RAISE);
-
- return status;
-}
-/* */
-int rtl8188eu_oid_rt_get_power_mode_hdl(struct oid_par_priv *poid_par_priv)
-{
- return 0;
-}
diff --git a/drivers/staging/r8188eu/core/rtw_p2p.c b/drivers/staging/r8188eu/core/rtw_p2p.c
index e2b6cf2386e0..b265b5e46851 100644
--- a/drivers/staging/r8188eu/core/rtw_p2p.c
+++ b/drivers/staging/r8188eu/core/rtw_p2p.c
@@ -7,8 +7,6 @@
#include "../include/rtw_p2p.h"
#include "../include/wifi.h"
-#ifdef CONFIG_88EU_P2P
-
static int rtw_p2p_is_channel_list_ok(u8 desired_ch, u8 *ch_list, u8 ch_cnt)
{
int found = 0, i = 0;
@@ -735,13 +733,6 @@ u32 build_assoc_resp_p2p_ie(struct wifidirect_info *pwdinfo, u8 *pbuf, u8 status
return len;
}
-u32 build_deauth_p2p_ie(struct wifidirect_info *pwdinfo, u8 *pbuf)
-{
- u32 len = 0;
-
- return len;
-}
-
u32 process_probe_req_p2p_ie(struct wifidirect_info *pwdinfo, u8 *pframe, uint len)
{
u8 *p;
@@ -1494,7 +1485,7 @@ static void pre_tx_invitereq_handler(struct adapter *padapter)
u8 val8 = 1;
set_channel_bwmode(padapter, pwdinfo->invitereq_info.peer_ch, HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
- padapter->HalFunc.SetHwRegHandler(padapter, HW_VAR_MLME_SITESURVEY, (u8 *)(&val8));
+ SetHwReg8188EU(padapter, HW_VAR_MLME_SITESURVEY, (u8 *)(&val8));
issue_probereq_p2p(padapter, NULL);
_set_timer(&pwdinfo->pre_tx_scan_timer, P2P_TX_PRESCAN_TIMEOUT);
@@ -1506,7 +1497,7 @@ static void pre_tx_provdisc_handler(struct adapter *padapter)
u8 val8 = 1;
set_channel_bwmode(padapter, pwdinfo->tx_prov_disc_info.peer_channel_num[0], HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
- rtw_hal_set_hwreg(padapter, HW_VAR_MLME_SITESURVEY, (u8 *)(&val8));
+ SetHwReg8188EU(padapter, HW_VAR_MLME_SITESURVEY, (u8 *)(&val8));
issue_probereq_p2p(padapter, NULL);
_set_timer(&pwdinfo->pre_tx_scan_timer, P2P_TX_PRESCAN_TIMEOUT);
@@ -1518,7 +1509,7 @@ static void pre_tx_negoreq_handler(struct adapter *padapter)
u8 val8 = 1;
set_channel_bwmode(padapter, pwdinfo->nego_req_info.peer_channel_num[0], HAL_PRIME_CHNL_OFFSET_DONT_CARE, HT_CHANNEL_WIDTH_20);
- rtw_hal_set_hwreg(padapter, HW_VAR_MLME_SITESURVEY, (u8 *)(&val8));
+ SetHwReg8188EU(padapter, HW_VAR_MLME_SITESURVEY, (u8 *)(&val8));
issue_probereq_p2p(padapter, NULL);
_set_timer(&pwdinfo->pre_tx_scan_timer, P2P_TX_PRESCAN_TIMEOUT);
@@ -1643,7 +1634,7 @@ void p2p_ps_wk_hdl(struct adapter *padapter, u8 p2p_ps_state)
case P2P_PS_DISABLE:
pwdinfo->p2p_ps_state = p2p_ps_state;
- rtw_hal_set_hwreg(padapter, HW_VAR_H2C_FW_P2P_PS_OFFLOAD, (u8 *)(&p2p_ps_state));
+ SetHwReg8188EU(padapter, HW_VAR_H2C_FW_P2P_PS_OFFLOAD, (u8 *)(&p2p_ps_state));
pwdinfo->noa_index = 0;
pwdinfo->ctwindow = 0;
@@ -1653,7 +1644,7 @@ void p2p_ps_wk_hdl(struct adapter *padapter, u8 p2p_ps_state)
if (padapter->pwrctrlpriv.bFwCurrentInPSMode) {
if (pwrpriv->smart_ps == 0) {
pwrpriv->smart_ps = 2;
- rtw_hal_set_hwreg(padapter, HW_VAR_H2C_FW_PWRMODE, (u8 *)(&padapter->pwrctrlpriv.pwr_mode));
+ SetHwReg8188EU(padapter, HW_VAR_H2C_FW_PWRMODE, (u8 *)(&padapter->pwrctrlpriv.pwr_mode));
}
}
break;
@@ -1665,10 +1656,10 @@ void p2p_ps_wk_hdl(struct adapter *padapter, u8 p2p_ps_state)
if (pwrpriv->smart_ps != 0) {
pwrpriv->smart_ps = 0;
DBG_88E("%s(): Enter CTW, change SmartPS\n", __func__);
- rtw_hal_set_hwreg(padapter, HW_VAR_H2C_FW_PWRMODE, (u8 *)(&padapter->pwrctrlpriv.pwr_mode));
+ SetHwReg8188EU(padapter, HW_VAR_H2C_FW_PWRMODE, (u8 *)(&padapter->pwrctrlpriv.pwr_mode));
}
}
- rtw_hal_set_hwreg(padapter, HW_VAR_H2C_FW_P2P_PS_OFFLOAD, (u8 *)(&p2p_ps_state));
+ SetHwReg8188EU(padapter, HW_VAR_H2C_FW_P2P_PS_OFFLOAD, (u8 *)(&p2p_ps_state));
}
break;
case P2P_PS_SCAN:
@@ -1676,7 +1667,7 @@ void p2p_ps_wk_hdl(struct adapter *padapter, u8 p2p_ps_state)
case P2P_PS_ALLSTASLEEP:
if (pwdinfo->p2p_ps_mode > P2P_PS_NONE) {
pwdinfo->p2p_ps_state = p2p_ps_state;
- rtw_hal_set_hwreg(padapter, HW_VAR_H2C_FW_P2P_PS_OFFLOAD, (u8 *)(&p2p_ps_state));
+ SetHwReg8188EU(padapter, HW_VAR_H2C_FW_P2P_PS_OFFLOAD, (u8 *)(&p2p_ps_state));
}
break;
default:
@@ -1832,7 +1823,6 @@ void rtw_init_wifidirect_timers(struct adapter *padapter)
void rtw_init_wifidirect_addrs(struct adapter *padapter, u8 *dev_addr, u8 *iface_addr)
{
-#ifdef CONFIG_88EU_P2P
struct wifidirect_info *pwdinfo = &padapter->wdinfo;
/*init device&interface address */
@@ -1840,7 +1830,6 @@ void rtw_init_wifidirect_addrs(struct adapter *padapter, u8 *dev_addr, u8 *iface
memcpy(pwdinfo->device_addr, dev_addr, ETH_ALEN);
if (iface_addr)
memcpy(pwdinfo->interface_addr, iface_addr, ETH_ALEN);
-#endif
}
void init_wifidirect_info(struct adapter *padapter, enum P2P_ROLE role)
@@ -1955,7 +1944,7 @@ int rtw_p2p_enable(struct adapter *padapter, enum P2P_ROLE role)
/* Enable P2P function */
init_wifidirect_info(padapter, role);
- rtw_hal_set_odm_var(padapter, HAL_ODM_P2P_STATE, NULL, true);
+ rtl8188e_SetHalODMVar(padapter, HAL_ODM_P2P_STATE, NULL, true);
} else if (role == P2P_ROLE_DISABLE) {
if (_FAIL == rtw_pwr_wakeup(padapter)) {
ret = _FAIL;
@@ -1974,7 +1963,7 @@ int rtw_p2p_enable(struct adapter *padapter, enum P2P_ROLE role)
memset(&pwdinfo->rx_prov_disc_info, 0x00, sizeof(struct rx_provdisc_req_info));
}
- rtw_hal_set_odm_var(padapter, HAL_ODM_P2P_STATE, NULL, false);
+ rtl8188e_SetHalODMVar(padapter, HAL_ODM_P2P_STATE, NULL, false);
/* Restore to initial setting. */
update_tx_basic_rate(padapter, padapter->registrypriv.wireless_mode);
@@ -1983,15 +1972,3 @@ int rtw_p2p_enable(struct adapter *padapter, enum P2P_ROLE role)
exit:
return ret;
}
-
-#else
-u8 p2p_ps_wk_cmd(struct adapter *padapter, u8 p2p_ps_state, u8 enqueue)
-{
- return _FAIL;
-}
-
-void process_p2p_ps_ie(struct adapter *padapter, u8 *IEs, u32 IELength)
-{
-}
-
-#endif /* CONFIG_88EU_P2P */
diff --git a/drivers/staging/r8188eu/core/rtw_pwrctrl.c b/drivers/staging/r8188eu/core/rtw_pwrctrl.c
index c3897b29121c..5d595cf2a47e 100644
--- a/drivers/staging/r8188eu/core/rtw_pwrctrl.c
+++ b/drivers/staging/r8188eu/core/rtw_pwrctrl.c
@@ -13,9 +13,6 @@ void ips_enter(struct adapter *padapter)
struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
struct xmit_priv *pxmit_priv = &padapter->xmitpriv;
- if (padapter->registrypriv.mp_mode == 1)
- return;
-
if (pxmit_priv->free_xmitbuf_cnt != NR_XMITBUFF ||
pxmit_priv->free_xmit_extbuf_cnt != NR_XMIT_EXTBUFF) {
DBG_88E_LEVEL(_drv_info_, "There are some pkts to transmit\n");
@@ -24,7 +21,7 @@ void ips_enter(struct adapter *padapter)
return;
}
- _enter_pwrlock(&pwrpriv->lock);
+ mutex_lock(&pwrpriv->lock);
pwrpriv->bips_processing = true;
@@ -45,7 +42,7 @@ void ips_enter(struct adapter *padapter)
}
pwrpriv->bips_processing = false;
- _exit_pwrlock(&pwrpriv->lock);
+ mutex_unlock(&pwrpriv->lock);
}
int ips_leave(struct adapter *padapter)
@@ -56,7 +53,7 @@ int ips_leave(struct adapter *padapter)
int result = _SUCCESS;
int keyid;
- _enter_pwrlock(&pwrpriv->lock);
+ mutex_lock(&pwrpriv->lock);
if ((pwrpriv->rf_pwrstate == rf_off) && (!pwrpriv->bips_processing)) {
pwrpriv->bips_processing = true;
@@ -90,7 +87,7 @@ int ips_leave(struct adapter *padapter)
pwrpriv->bpower_saving = false;
}
- _exit_pwrlock(&pwrpriv->lock);
+ mutex_unlock(&pwrpriv->lock);
return result;
}
@@ -99,10 +96,7 @@ static bool rtw_pwr_unassociated_idle(struct adapter *adapter)
{
struct adapter *buddy = adapter->pbuddy_adapter;
struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
-#ifdef CONFIG_88EU_P2P
struct wifidirect_info *pwdinfo = &adapter->wdinfo;
-#endif
-
bool ret = false;
if (adapter->pwrctrlpriv.ips_deny_time >= jiffies)
@@ -113,29 +107,19 @@ static bool rtw_pwr_unassociated_idle(struct adapter *adapter)
check_fwstate(pmlmepriv, WIFI_UNDER_WPS) ||
check_fwstate(pmlmepriv, WIFI_AP_STATE) ||
check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE | WIFI_ADHOC_STATE) ||
-#if defined(CONFIG_88EU_P2P)
!rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE))
-#else
- 0)
-#endif
goto exit;
/* consider buddy, if exist */
if (buddy) {
struct mlme_priv *b_pmlmepriv = &buddy->mlmepriv;
- #ifdef CONFIG_88EU_P2P
struct wifidirect_info *b_pwdinfo = &buddy->wdinfo;
- #endif
if (check_fwstate(b_pmlmepriv, WIFI_ASOC_STATE | WIFI_SITE_MONITOR) ||
check_fwstate(b_pmlmepriv, WIFI_UNDER_LINKING | WIFI_UNDER_WPS) ||
check_fwstate(b_pmlmepriv, WIFI_AP_STATE) ||
check_fwstate(b_pmlmepriv, WIFI_ADHOC_MASTER_STATE | WIFI_ADHOC_STATE) ||
-#if defined(CONFIG_88EU_P2P)
!rtw_p2p_chk_state(b_pwdinfo, P2P_STATE_NONE))
-#else
- 0)
-#endif
goto exit;
}
ret = true;
@@ -148,32 +132,12 @@ void rtw_ps_processor(struct adapter *padapter)
{
struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- enum rt_rf_power_state rfpwrstate;
pwrpriv->ps_processing = true;
if (pwrpriv->bips_processing)
goto exit;
- if (padapter->pwrctrlpriv.bHWPwrPindetect) {
- rfpwrstate = RfOnOffDetect(padapter);
- DBG_88E("@@@@- #2 %s==> rfstate:%s\n", __func__, (rfpwrstate == rf_on) ? "rf_on" : "rf_off");
-
- if (rfpwrstate != pwrpriv->rf_pwrstate) {
- if (rfpwrstate == rf_off) {
- pwrpriv->change_rfpwrstate = rf_off;
- pwrpriv->brfoffbyhw = true;
- padapter->bCardDisableWOHSM = true;
- rtw_hw_suspend(padapter);
- } else {
- pwrpriv->change_rfpwrstate = rf_on;
- rtw_hw_resume(padapter);
- }
- DBG_88E("current rf_pwrstate(%s)\n", (pwrpriv->rf_pwrstate == rf_off) ? "rf_off" : "rf_on");
- }
- pwrpriv->pwr_state_check_cnts++;
- }
-
if (pwrpriv->ips_mode_req == IPS_NONE)
goto exit;
@@ -199,51 +163,6 @@ static void pwr_state_check_handler(struct timer_list *t)
rtw_ps_cmd(padapter);
}
-/*
- *
- * Parameters
- * padapter
- * pslv power state level, only could be PS_STATE_S0 ~ PS_STATE_S4
- *
- */
-void rtw_set_rpwm(struct adapter *padapter, u8 pslv)
-{
- u8 rpwm;
- struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
-
- pslv = PS_STATE(pslv);
-
- if (pwrpriv->btcoex_rfon) {
- if (pslv < PS_STATE_S4)
- pslv = PS_STATE_S3;
- }
-
- if (pwrpriv->rpwm == pslv)
- return;
-
- if ((padapter->bSurpriseRemoved) ||
- (!padapter->hw_init_completed)) {
- pwrpriv->cpwm = PS_STATE_S4;
-
- return;
- }
-
- if (padapter->bDriverStopped) {
- if (pslv < PS_STATE_S2)
- return;
- }
-
- rpwm = pslv | pwrpriv->tog;
-
- pwrpriv->rpwm = pslv;
-
- rtw_hal_set_hwreg(padapter, HW_VAR_SET_RPWM, (u8 *)(&rpwm));
-
- pwrpriv->tog += 0x80;
- pwrpriv->cpwm = pslv;
-
-}
-
static u8 PS_RDY_CHECK(struct adapter *padapter)
{
u32 curr_time, delta_time;
@@ -274,9 +193,7 @@ static u8 PS_RDY_CHECK(struct adapter *padapter)
void rtw_set_ps_mode(struct adapter *padapter, u8 ps_mode, u8 smart_ps, u8 bcn_ant_mode)
{
struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
-#ifdef CONFIG_88EU_P2P
struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-#endif /* CONFIG_88EU_P2P */
if (ps_mode > PM_Card_Disable)
return;
@@ -292,31 +209,24 @@ void rtw_set_ps_mode(struct adapter *padapter, u8 ps_mode, u8 smart_ps, u8 bcn_a
/* if (pwrpriv->pwr_mode == PS_MODE_ACTIVE) */
if (ps_mode == PS_MODE_ACTIVE) {
-#ifdef CONFIG_88EU_P2P
if (pwdinfo->opp_ps == 0) {
DBG_88E("rtw_set_ps_mode: Leave 802.11 power save\n");
pwrpriv->pwr_mode = ps_mode;
- rtw_set_rpwm(padapter, PS_STATE_S4);
- rtw_hal_set_hwreg(padapter, HW_VAR_H2C_FW_PWRMODE, (u8 *)(&ps_mode));
+ SetHwReg8188EU(padapter, HW_VAR_H2C_FW_PWRMODE, (u8 *)(&ps_mode));
pwrpriv->bFwCurrentInPSMode = false;
}
} else {
-#endif /* CONFIG_88EU_P2P */
if (PS_RDY_CHECK(padapter)) {
DBG_88E("%s: Enter 802.11 power save\n", __func__);
pwrpriv->bFwCurrentInPSMode = true;
pwrpriv->pwr_mode = ps_mode;
pwrpriv->smart_ps = smart_ps;
pwrpriv->bcn_ant_mode = bcn_ant_mode;
- rtw_hal_set_hwreg(padapter, HW_VAR_H2C_FW_PWRMODE, (u8 *)(&ps_mode));
+ SetHwReg8188EU(padapter, HW_VAR_H2C_FW_PWRMODE, (u8 *)(&ps_mode));
-#ifdef CONFIG_88EU_P2P
/* Set CTWindow after LPS */
if (pwdinfo->opp_ps == 1)
p2p_ps_wk_cmd(padapter, P2P_PS_ENABLE, 0);
-#endif /* CONFIG_88EU_P2P */
-
- rtw_set_rpwm(padapter, PS_STATE_S2);
}
}
@@ -336,7 +246,7 @@ s32 LPS_RF_ON_check(struct adapter *padapter, u32 delay_ms)
start_time = jiffies;
while (1) {
- rtw_hal_get_hwreg(padapter, HW_VAR_FWLPS_RF_ON, &bAwake);
+ GetHwReg8188EU(padapter, HW_VAR_FWLPS_RF_ON, &bAwake);
if (bAwake)
break;
@@ -427,7 +337,7 @@ void rtw_init_pwrctrl_priv(struct adapter *padapter)
{
struct pwrctrl_priv *pwrctrlpriv = &padapter->pwrctrlpriv;
- _init_pwrlock(&pwrctrlpriv->lock);
+ mutex_init(&pwrctrlpriv->lock);
pwrctrlpriv->rf_pwrstate = rf_on;
pwrctrlpriv->ips_enter_cnts = 0;
pwrctrlpriv->ips_leave_cnts = 0;
@@ -443,50 +353,18 @@ void rtw_init_pwrctrl_priv(struct adapter *padapter)
pwrctrlpriv->bkeepfwalive = false;
pwrctrlpriv->LpsIdleCount = 0;
- if (padapter->registrypriv.mp_mode == 1)
- pwrctrlpriv->power_mgnt = PS_MODE_ACTIVE;
- else
- pwrctrlpriv->power_mgnt = padapter->registrypriv.power_mgnt;/* PS_MODE_MIN; */
+ pwrctrlpriv->power_mgnt = padapter->registrypriv.power_mgnt;/* PS_MODE_MIN; */
pwrctrlpriv->bLeisurePs = (PS_MODE_ACTIVE != pwrctrlpriv->power_mgnt) ? true : false;
pwrctrlpriv->bFwCurrentInPSMode = false;
- pwrctrlpriv->rpwm = 0;
- pwrctrlpriv->cpwm = PS_STATE_S4;
-
pwrctrlpriv->pwr_mode = PS_MODE_ACTIVE;
pwrctrlpriv->smart_ps = padapter->registrypriv.smart_ps;
pwrctrlpriv->bcn_ant_mode = 0;
- pwrctrlpriv->tog = 0x80;
-
- pwrctrlpriv->btcoex_rfon = false;
-
timer_setup(&pwrctrlpriv->pwr_state_check_timer, pwr_state_check_handler, 0);
}
-void rtw_free_pwrctrl_priv(struct adapter *adapter)
-{
- struct pwrctrl_priv *pwrctrlpriv = &adapter->pwrctrlpriv;
-
- _free_pwrlock(&pwrctrlpriv->lock);
-
-}
-
-u8 rtw_interface_ps_func(struct adapter *padapter, enum hal_intf_ps_func efunc_id, u8 *val)
-{
- u8 bResult = true;
- rtw_hal_intf_ps_func(padapter, efunc_id, val);
-
- return bResult;
-}
-
-inline void rtw_set_ips_deny(struct adapter *padapter, u32 ms)
-{
- struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
- pwrpriv->ips_deny_time = jiffies + rtw_ms_to_systime(ms);
-}
-
/*
* rtw_pwr_wakeup - Wake the NIC up from: 1)IPS. 2)USB autosuspend
* @adapter: pointer to struct adapter structure
diff --git a/drivers/staging/r8188eu/core/rtw_recv.c b/drivers/staging/r8188eu/core/rtw_recv.c
index e082edfbaad8..51a13262a226 100644
--- a/drivers/staging/r8188eu/core/rtw_recv.c
+++ b/drivers/staging/r8188eu/core/rtw_recv.c
@@ -9,6 +9,7 @@
#include "../include/mlme_osdep.h"
#include "../include/usb_ops.h"
#include "../include/wifi.h"
+#include "../include/rtl8188e_recv.h"
static u8 SNAP_ETH_TYPE_IPX[2] = {0x81, 0x37};
static u8 SNAP_ETH_TYPE_APPLETALK_AARP[2] = {0x80, 0xf3};
@@ -31,7 +32,7 @@ void _rtw_init_sta_recv_priv(struct sta_recv_priv *psta_recvpriv)
spin_lock_init(&psta_recvpriv->lock);
- _rtw_init_queue(&psta_recvpriv->defrag_q);
+ rtw_init_queue(&psta_recvpriv->defrag_q);
}
@@ -45,9 +46,9 @@ int _rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *padapter)
spin_lock_init(&precvpriv->lock);
- _rtw_init_queue(&precvpriv->free_recv_queue);
- _rtw_init_queue(&precvpriv->recv_pending_queue);
- _rtw_init_queue(&precvpriv->uc_swdec_pending_queue);
+ rtw_init_queue(&precvpriv->free_recv_queue);
+ rtw_init_queue(&precvpriv->recv_pending_queue);
+ rtw_init_queue(&precvpriv->uc_swdec_pending_queue);
precvpriv->adapter = padapter;
@@ -82,7 +83,7 @@ int _rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *padapter)
sema_init(&precvpriv->allrxreturnevt, 0);
- res = rtw_hal_init_recv_priv(padapter);
+ res = rtl8188eu_init_recv_priv(padapter);
timer_setup(&precvpriv->signal_stat_timer, rtw_signal_stat_timer_hdl, 0);
precvpriv->signal_stat_sampling_interval = 1000; /* ms */
@@ -103,7 +104,7 @@ void _rtw_free_recv_priv(struct recv_priv *precvpriv)
vfree(precvpriv->pallocated_frame_buf);
- rtw_hal_free_recv_priv(padapter);
+ rtl8188eu_free_recv_priv(padapter);
}
struct recv_frame *_rtw_alloc_recvframe(struct __queue *pfree_recv_queue)
@@ -147,14 +148,6 @@ struct recv_frame *rtw_alloc_recvframe(struct __queue *pfree_recv_queue)
return precvframe;
}
-void rtw_init_recvframe(struct recv_frame *precvframe, struct recv_priv *precvpriv)
-{
- /* Perry: This can be removed */
- INIT_LIST_HEAD(&precvframe->list);
-
- precvframe->len = 0;
-}
-
int rtw_free_recvframe(struct recv_frame *precvframe, struct __queue *pfree_recv_queue)
{
struct adapter *padapter;
@@ -257,56 +250,6 @@ u32 rtw_free_uc_swdec_pending_queue(struct adapter *adapter)
return cnt;
}
-int rtw_enqueue_recvbuf_to_head(struct recv_buf *precvbuf, struct __queue *queue)
-{
- spin_lock_bh(&queue->lock);
-
- list_del_init(&precvbuf->list);
- list_add(&precvbuf->list, get_list_head(queue));
-
- spin_unlock_bh(&queue->lock);
-
- return _SUCCESS;
-}
-
-int rtw_enqueue_recvbuf(struct recv_buf *precvbuf, struct __queue *queue)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&queue->lock, flags);
-
- list_del_init(&precvbuf->list);
-
- list_add_tail(&precvbuf->list, get_list_head(queue));
- spin_unlock_irqrestore(&queue->lock, flags);
- return _SUCCESS;
-}
-
-struct recv_buf *rtw_dequeue_recvbuf(struct __queue *queue)
-{
- struct recv_buf *precvbuf;
- struct list_head *plist, *phead;
- unsigned long flags;
-
- spin_lock_irqsave(&queue->lock, flags);
-
- if (list_empty(&queue->queue)) {
- precvbuf = NULL;
- } else {
- phead = get_list_head(queue);
-
- plist = phead->next;
-
- precvbuf = container_of(plist, struct recv_buf, list);
-
- list_del_init(&precvbuf->list);
- }
-
- spin_unlock_irqrestore(&queue->lock, flags);
-
- return precvbuf;
-}
-
static int recvframe_chkmic(struct adapter *adapter, struct recv_frame *precvframe)
{
int i, res = _SUCCESS;
@@ -418,13 +361,13 @@ static struct recv_frame *decryptor(struct adapter *padapter, struct recv_frame
switch (prxattrib->encrypt) {
case _WEP40_:
case _WEP104_:
- rtw_wep_decrypt(padapter, (u8 *)precv_frame);
+ rtw_wep_decrypt(padapter, precv_frame);
break;
case _TKIP_:
- res = rtw_tkip_decrypt(padapter, (u8 *)precv_frame);
+ res = rtw_tkip_decrypt(padapter, precv_frame);
break;
case _AES_:
- res = rtw_aes_decrypt(padapter, (u8 *)precv_frame);
+ res = rtw_aes_decrypt(padapter, precv_frame);
break;
default:
break;
@@ -523,7 +466,6 @@ static int recv_decache(struct recv_frame *precv_frame, u8 bretry, struct stainf
void process_pwrbit_data(struct adapter *padapter, struct recv_frame *precv_frame);
void process_pwrbit_data(struct adapter *padapter, struct recv_frame *precv_frame)
{
-#ifdef CONFIG_88EU_AP_MODE
unsigned char pwrbit;
u8 *ptr = precv_frame->rx_data;
struct rx_pkt_attrib *pattrib = &precv_frame->attrib;
@@ -543,13 +485,10 @@ void process_pwrbit_data(struct adapter *padapter, struct recv_frame *precv_fram
wakeup_sta_to_xmit(padapter, psta);
}
}
-
-#endif
}
static void process_wmmps_data(struct adapter *padapter, struct recv_frame *precv_frame)
{
-#ifdef CONFIG_88EU_AP_MODE
struct rx_pkt_attrib *pattrib = &precv_frame->attrib;
struct sta_priv *pstapriv = &padapter->stapriv;
struct sta_info *psta = NULL;
@@ -598,8 +537,6 @@ static void process_wmmps_data(struct adapter *padapter, struct recv_frame *prec
}
}
}
-
-#endif
}
static void count_rx_stats(struct adapter *padapter, struct recv_frame *prframe, struct sta_info *sta)
@@ -710,14 +647,8 @@ int sta2sta_data_frame(struct adapter *adapter, struct recv_frame *precv_frame,
else
*psta = rtw_get_stainfo(pstapriv, sta_addr); /* get ap_info */
- if (!*psta) {
- if (adapter->registrypriv.mp_mode == 1) {
- if (check_fwstate(pmlmepriv, WIFI_MP_STATE))
- adapter->mppriv.rx_pktloss++;
- }
- ret = _FAIL;
+ if (!*psta)
goto exit;
- }
exit:
@@ -883,7 +814,6 @@ exit:
static int validate_recv_ctrl_frame(struct adapter *padapter,
struct recv_frame *precv_frame)
{
-#ifdef CONFIG_88EU_AP_MODE
struct rx_pkt_attrib *pattrib = &precv_frame->attrib;
struct sta_priv *pstapriv = &padapter->stapriv;
u8 *pframe = precv_frame->rx_data;
@@ -966,8 +896,6 @@ static int validate_recv_ctrl_frame(struct adapter *padapter,
pxmitframe->attrib.triggered = 1;
- rtw_hal_xmitframe_enqueue(padapter, pxmitframe);
-
if (psta->sleepq_len == 0) {
pstapriv->tim_bitmap &= ~BIT(psta->aid);
@@ -998,8 +926,6 @@ static int validate_recv_ctrl_frame(struct adapter *padapter,
}
}
-#endif
-
return _FAIL;
}
@@ -1189,7 +1115,7 @@ static int validate_recv_frame(struct adapter *adapter, struct recv_frame *precv
pattrib->order = GetOrder(ptr);
/* Dump rx packets */
- rtw_hal_get_def_var(adapter, HAL_DEF_DBG_DUMP_RXPKT, &(bDumpRxPkt));
+ GetHalDefVar8188EUsb(adapter, HAL_DEF_DBG_DUMP_RXPKT, &bDumpRxPkt);
if (bDumpRxPkt == 1) {/* dump all rx packets */
int i;
DBG_88E("#############################\n");
@@ -1860,22 +1786,7 @@ static int process_recv_indicatepkts(struct adapter *padapter, struct recv_frame
static int recv_func_prehandle(struct adapter *padapter, struct recv_frame *rframe)
{
int ret = _SUCCESS;
- struct rx_pkt_attrib *pattrib = &rframe->attrib;
struct __queue *pfree_recv_queue = &padapter->recvpriv.free_recv_queue;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-
- if (padapter->registrypriv.mp_mode == 1) {
- if (pattrib->crc_err == 1)
- padapter->mppriv.rx_crcerrpktcount++;
- else
- padapter->mppriv.rx_pktcount++;
-
- if (!check_fwstate(pmlmepriv, WIFI_MP_LPBK_STATE)) {
- ret = _FAIL;
- rtw_free_recvframe(rframe, pfree_recv_queue);/* free this recv_frame */
- goto exit;
- }
- }
/* check the frame crtl field and decache */
ret = validate_recv_frame(padapter, rframe);
@@ -1998,9 +1909,6 @@ s32 rtw_recv_entry(struct recv_frame *precvframe)
_recv_entry_drop:
- if (padapter->registrypriv.mp_mode == 1)
- padapter->mppriv.rx_pktloss = precvpriv->rx_drop;
-
return ret;
}
diff --git a/drivers/staging/r8188eu/core/rtw_rf.c b/drivers/staging/r8188eu/core/rtw_rf.c
index 321546c40446..2ec56012516e 100644
--- a/drivers/staging/r8188eu/core/rtw_rf.c
+++ b/drivers/staging/r8188eu/core/rtw_rf.c
@@ -53,20 +53,3 @@ u32 rtw_ch2freq(u32 channel)
return freq;
}
-
-u32 rtw_freq2ch(u32 freq)
-{
- u8 i;
- u32 ch = 0;
-
- for (i = 0; i < ch_freq_map_num; i++) {
- if (freq == ch_freq_map[i].frequency) {
- ch = ch_freq_map[i].channel;
- break;
- }
- }
- if (i == ch_freq_map_num)
- ch = 1;
-
- return ch;
-}
diff --git a/drivers/staging/r8188eu/core/rtw_security.c b/drivers/staging/r8188eu/core/rtw_security.c
index 5aa893ab46e9..db35f326bbb1 100644
--- a/drivers/staging/r8188eu/core/rtw_security.c
+++ b/drivers/staging/r8188eu/core/rtw_security.c
@@ -10,80 +10,15 @@
/* WEP related ===== */
-#define CRC32_POLY 0x04c11db7
-
-struct arc4context {
- u32 x;
- u32 y;
- u8 state[256];
-};
-
-static void arcfour_init(struct arc4context *parc4ctx, u8 *key, u32 key_len)
-{
- u32 t, u;
- u32 keyindex;
- u32 stateindex;
- u8 *state;
- u32 counter;
-
- state = parc4ctx->state;
- parc4ctx->x = 0;
- parc4ctx->y = 0;
- for (counter = 0; counter < 256; counter++)
- state[counter] = (u8)counter;
- keyindex = 0;
- stateindex = 0;
- for (counter = 0; counter < 256; counter++) {
- t = state[counter];
- stateindex = (stateindex + key[keyindex] + t) & 0xff;
- u = state[stateindex];
- state[stateindex] = (u8)t;
- state[counter] = (u8)u;
- if (++keyindex >= key_len)
- keyindex = 0;
- }
-
-}
-
-static u32 arcfour_byte(struct arc4context *parc4ctx)
-{
- u32 x;
- u32 y;
- u32 sx, sy;
- u8 *state;
-
- state = parc4ctx->state;
- x = (parc4ctx->x + 1) & 0xff;
- sx = state[x];
- y = (sx + parc4ctx->y) & 0xff;
- sy = state[y];
- parc4ctx->x = x;
- parc4ctx->y = y;
- state[y] = (u8)sx;
- state[x] = (u8)sy;
-
- return state[(sx + sy) & 0xff];
-}
-
-static void arcfour_encrypt(struct arc4context *parc4ctx, u8 *dest, u8 *src, u32 len)
-{
- u32 i;
-
- for (i = 0; i < len; i++)
- dest[i] = src[i] ^ (unsigned char)arcfour_byte(parc4ctx);
-
-}
-
/*
Need to consider the fragment situation
*/
-void rtw_wep_encrypt(struct adapter *padapter, u8 *pxmitframe)
+void rtw_wep_encrypt(struct adapter *padapter, struct xmit_frame *pxmitframe)
{ /* exclude ICV */
union {
__le32 f0;
u8 f1[4];
} crc;
- struct arc4context mycontext;
int curfragnum, length;
u32 keylength;
@@ -91,17 +26,16 @@ void rtw_wep_encrypt(struct adapter *padapter, u8 *pxmitframe)
u8 *pframe, *payload, *iv; /* wepkey */
u8 wepkey[16];
u8 hw_hdr_offset = 0;
- struct pkt_attrib *pattrib = &((struct xmit_frame *)pxmitframe)->attrib;
+ struct pkt_attrib *pattrib = &pxmitframe->attrib;
struct security_priv *psecuritypriv = &padapter->securitypriv;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct arc4_ctx *ctx = &psecuritypriv->xmit_arc4_ctx;
- if (!((struct xmit_frame *)pxmitframe)->buf_addr)
+ if (!pxmitframe->buf_addr)
return;
- hw_hdr_offset = TXDESC_SIZE +
- (((struct xmit_frame *)pxmitframe)->pkt_offset * PACKET_OFFSET_SZ);
-
- pframe = ((struct xmit_frame *)pxmitframe)->buf_addr + hw_hdr_offset;
+ hw_hdr_offset = TXDESC_SIZE + pxmitframe->pkt_offset * PACKET_OFFSET_SZ;
+ pframe = pxmitframe->buf_addr + hw_hdr_offset;
/* start to encrypt each fragment */
if ((pattrib->encrypt == _WEP40_) || (pattrib->encrypt == _WEP104_)) {
@@ -118,15 +52,15 @@ void rtw_wep_encrypt(struct adapter *padapter, u8 *pxmitframe)
crc.f0 = cpu_to_le32(~crc32_le(~0, payload, length));
- arcfour_init(&mycontext, wepkey, 3 + keylength);
- arcfour_encrypt(&mycontext, payload, payload, length);
- arcfour_encrypt(&mycontext, payload + length, crc.f1, 4);
+ arc4_setkey(ctx, wepkey, 3 + keylength);
+ arc4_crypt(ctx, payload, payload, length);
+ arc4_crypt(ctx, payload + length, crc.f1, 4);
} else {
length = pxmitpriv->frag_len - pattrib->hdrlen - pattrib->iv_len - pattrib->icv_len;
crc.f0 = cpu_to_le32(~crc32_le(~0, payload, length));
- arcfour_init(&mycontext, wepkey, 3 + keylength);
- arcfour_encrypt(&mycontext, payload, payload, length);
- arcfour_encrypt(&mycontext, payload + length, crc.f1, 4);
+ arc4_setkey(ctx, wepkey, 3 + keylength);
+ arc4_crypt(ctx, payload, payload, length);
+ arc4_crypt(ctx, payload + length, crc.f1, 4);
pframe += pxmitpriv->frag_len;
pframe = (u8 *)RND4((size_t)(pframe));
@@ -136,18 +70,18 @@ void rtw_wep_encrypt(struct adapter *padapter, u8 *pxmitframe)
}
-void rtw_wep_decrypt(struct adapter *padapter, u8 *precvframe)
+void rtw_wep_decrypt(struct adapter *padapter, struct recv_frame *precvframe)
{
/* exclude ICV */
- struct arc4context mycontext;
int length;
u32 keylength;
u8 *pframe, *payload, *iv, wepkey[16];
u8 keyindex;
- struct rx_pkt_attrib *prxattrib = &(((struct recv_frame *)precvframe)->attrib);
+ struct rx_pkt_attrib *prxattrib = &precvframe->attrib;
struct security_priv *psecuritypriv = &padapter->securitypriv;
+ struct arc4_ctx *ctx = &psecuritypriv->recv_arc4_ctx;
- pframe = (unsigned char *)((struct recv_frame *)precvframe)->rx_data;
+ pframe = precvframe->rx_data;
/* start to decrypt recvframe */
if ((prxattrib->encrypt == _WEP40_) || (prxattrib->encrypt == _WEP104_)) {
@@ -156,13 +90,13 @@ void rtw_wep_decrypt(struct adapter *padapter, u8 *precvframe)
keylength = psecuritypriv->dot11DefKeylen[keyindex];
memcpy(&wepkey[0], iv, 3);
memcpy(&wepkey[3], &psecuritypriv->dot11DefKey[keyindex].skey[0], keylength);
- length = ((struct recv_frame *)precvframe)->len - prxattrib->hdrlen - prxattrib->iv_len;
+ length = precvframe->len - prxattrib->hdrlen - prxattrib->iv_len;
payload = pframe + prxattrib->iv_len + prxattrib->hdrlen;
/* decrypt payload include icv */
- arcfour_init(&mycontext, wepkey, 3 + keylength);
- arcfour_encrypt(&mycontext, payload, payload, length);
+ arc4_setkey(ctx, wepkey, 3 + keylength);
+ arc4_crypt(ctx, payload, payload, length);
}
}
@@ -502,7 +436,7 @@ static void phase2(u8 *rc4key, const u8 *tk, const u16 *p1k, u16 iv16)
}
/* The hlen isn't include the IV */
-u32 rtw_tkip_encrypt(struct adapter *padapter, u8 *pxmitframe)
+u32 rtw_tkip_encrypt(struct adapter *padapter, struct xmit_frame *pxmitframe)
{ /* exclude ICV */
u16 pnl;
u32 pnh;
@@ -513,23 +447,23 @@ u32 rtw_tkip_encrypt(struct adapter *padapter, u8 *pxmitframe)
u8 f1[4];
} crc;
u8 hw_hdr_offset = 0;
- struct arc4context mycontext;
int curfragnum, length;
u8 *pframe, *payload, *iv, *prwskey;
union pn48 dot11txpn;
struct sta_info *stainfo;
- struct pkt_attrib *pattrib = &((struct xmit_frame *)pxmitframe)->attrib;
+ struct pkt_attrib *pattrib = &pxmitframe->attrib;
struct security_priv *psecuritypriv = &padapter->securitypriv;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
+ struct arc4_ctx *ctx = &psecuritypriv->xmit_arc4_ctx;
u32 res = _SUCCESS;
- if (!((struct xmit_frame *)pxmitframe)->buf_addr)
+ if (!pxmitframe->buf_addr)
return _FAIL;
- hw_hdr_offset = TXDESC_SIZE +
- (((struct xmit_frame *)pxmitframe)->pkt_offset * PACKET_OFFSET_SZ);
- pframe = ((struct xmit_frame *)pxmitframe)->buf_addr + hw_hdr_offset;
+ hw_hdr_offset = TXDESC_SIZE + pxmitframe->pkt_offset * PACKET_OFFSET_SZ;
+ pframe = pxmitframe->buf_addr + hw_hdr_offset;
+
/* 4 start to encrypt each fragment */
if (pattrib->encrypt == _TKIP_) {
if (pattrib->psta)
@@ -558,16 +492,16 @@ u32 rtw_tkip_encrypt(struct adapter *padapter, u8 *pxmitframe)
length = pattrib->last_txcmdsz - pattrib->hdrlen - pattrib->iv_len - pattrib->icv_len;
crc.f0 = cpu_to_le32(~crc32_le(~0, payload, length));
- arcfour_init(&mycontext, rc4key, 16);
- arcfour_encrypt(&mycontext, payload, payload, length);
- arcfour_encrypt(&mycontext, payload + length, crc.f1, 4);
+ arc4_setkey(ctx, rc4key, 16);
+ arc4_crypt(ctx, payload, payload, length);
+ arc4_crypt(ctx, payload + length, crc.f1, 4);
} else {
length = pxmitpriv->frag_len - pattrib->hdrlen - pattrib->iv_len - pattrib->icv_len;
crc.f0 = cpu_to_le32(~crc32_le(~0, payload, length));
- arcfour_init(&mycontext, rc4key, 16);
- arcfour_encrypt(&mycontext, payload, payload, length);
- arcfour_encrypt(&mycontext, payload + length, crc.f1, 4);
+ arc4_setkey(ctx, rc4key, 16);
+ arc4_crypt(ctx, payload, payload, length);
+ arc4_crypt(ctx, payload + length, crc.f1, 4);
pframe += pxmitpriv->frag_len;
pframe = (u8 *)RND4((size_t)(pframe));
@@ -582,7 +516,7 @@ u32 rtw_tkip_encrypt(struct adapter *padapter, u8 *pxmitframe)
}
/* The hlen isn't include the IV */
-u32 rtw_tkip_decrypt(struct adapter *padapter, u8 *precvframe)
+u32 rtw_tkip_decrypt(struct adapter *padapter, struct recv_frame *precvframe)
{ /* exclude ICV */
u16 pnl;
u32 pnh;
@@ -592,17 +526,17 @@ u32 rtw_tkip_decrypt(struct adapter *padapter, u8 *precvframe)
__le32 f0;
u8 f1[4];
} crc;
- struct arc4context mycontext;
int length;
u8 *pframe, *payload, *iv, *prwskey;
union pn48 dot11txpn;
struct sta_info *stainfo;
- struct rx_pkt_attrib *prxattrib = &((struct recv_frame *)precvframe)->attrib;
+ struct rx_pkt_attrib *prxattrib = &precvframe->attrib;
struct security_priv *psecuritypriv = &padapter->securitypriv;
+ struct arc4_ctx *ctx = &psecuritypriv->recv_arc4_ctx;
u32 res = _SUCCESS;
- pframe = (unsigned char *)((struct recv_frame *)precvframe)->rx_data;
+ pframe = precvframe->rx_data;
/* 4 start to decrypt recvframe */
if (prxattrib->encrypt == _TKIP_) {
@@ -621,7 +555,7 @@ u32 rtw_tkip_decrypt(struct adapter *padapter, u8 *precvframe)
iv = pframe + prxattrib->hdrlen;
payload = pframe + prxattrib->iv_len + prxattrib->hdrlen;
- length = ((struct recv_frame *)precvframe)->len - prxattrib->hdrlen - prxattrib->iv_len;
+ length = precvframe->len - prxattrib->hdrlen - prxattrib->iv_len;
GET_TKIP_PN(iv, dot11txpn);
@@ -633,8 +567,8 @@ u32 rtw_tkip_decrypt(struct adapter *padapter, u8 *precvframe)
/* 4 decrypt payload include icv */
- arcfour_init(&mycontext, rc4key, 16);
- arcfour_encrypt(&mycontext, payload, payload, length);
+ arc4_setkey(ctx, rc4key, 16);
+ arc4_crypt(ctx, payload, payload, length);
crc.f0 = cpu_to_le32(~crc32_le(~0, payload, length));
@@ -1154,7 +1088,7 @@ static int aes_cipher(u8 *key, uint hdrlen, u8 *pframe, uint plen)
return _SUCCESS;
}
-u32 rtw_aes_encrypt(struct adapter *padapter, u8 *pxmitframe)
+u32 rtw_aes_encrypt(struct adapter *padapter, struct xmit_frame *pxmitframe)
{ /* exclude ICV */
/*static*/
@@ -1165,20 +1099,18 @@ u32 rtw_aes_encrypt(struct adapter *padapter, u8 *pxmitframe)
u8 *pframe, *prwskey; /* *payload,*iv */
u8 hw_hdr_offset = 0;
struct sta_info *stainfo;
- struct pkt_attrib *pattrib = &((struct xmit_frame *)pxmitframe)->attrib;
+ struct pkt_attrib *pattrib = &pxmitframe->attrib;
struct security_priv *psecuritypriv = &padapter->securitypriv;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
/* uint offset = 0; */
u32 res = _SUCCESS;
- if (!((struct xmit_frame *)pxmitframe)->buf_addr)
+ if (!pxmitframe->buf_addr)
return _FAIL;
- hw_hdr_offset = TXDESC_SIZE +
- (((struct xmit_frame *)pxmitframe)->pkt_offset * PACKET_OFFSET_SZ);
-
- pframe = ((struct xmit_frame *)pxmitframe)->buf_addr + hw_hdr_offset;
+ hw_hdr_offset = TXDESC_SIZE + pxmitframe->pkt_offset * PACKET_OFFSET_SZ;
+ pframe = pxmitframe->buf_addr + hw_hdr_offset;
/* 4 start to encrypt each fragment */
if (pattrib->encrypt == _AES_) {
@@ -1406,17 +1338,18 @@ static int aes_decipher(u8 *key, uint hdrlen,
return res;
}
-u32 rtw_aes_decrypt(struct adapter *padapter, u8 *precvframe)
+u32 rtw_aes_decrypt(struct adapter *padapter, struct recv_frame *precvframe)
{ /* exclude ICV */
/* Intermediate Buffers */
int length;
u8 *pframe, *prwskey; /* *payload,*iv */
struct sta_info *stainfo;
- struct rx_pkt_attrib *prxattrib = &((struct recv_frame *)precvframe)->attrib;
+ struct rx_pkt_attrib *prxattrib = &precvframe->attrib;
struct security_priv *psecuritypriv = &padapter->securitypriv;
u32 res = _SUCCESS;
- pframe = (unsigned char *)((struct recv_frame *)precvframe)->rx_data;
+ pframe = precvframe->rx_data;
+
/* 4 start to encrypt each fragment */
if (prxattrib->encrypt == _AES_) {
stainfo = rtw_get_stainfo(&padapter->stapriv, &prxattrib->ta[0]);
@@ -1438,7 +1371,7 @@ u32 rtw_aes_decrypt(struct adapter *padapter, u8 *precvframe)
} else {
prwskey = &stainfo->dot118021x_UncstKey.skey[0];
}
- length = ((struct recv_frame *)precvframe)->len - prxattrib->hdrlen - prxattrib->iv_len;
+ length = precvframe->len - prxattrib->hdrlen - prxattrib->iv_len;
res = aes_decipher(prwskey, prxattrib->hdrlen, pframe, length);
} else {
res = _FAIL;
@@ -1622,35 +1555,3 @@ const u8 rcons[] = {
0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1B, 0x36
/* for 128-bit blocks, Rijndael never uses more than 10 rcon values */
};
-
-/**
- * Expand the cipher key into the encryption key schedule.
- *
- * @return the number of rounds for the given cipher key size.
- */
-#define ROUND(i, d, s) \
-do { \
- d##0 = TE0(s##0) ^ TE1(s##1) ^ TE2(s##2) ^ TE3(s##3) ^ rk[4 * i]; \
- d##1 = TE0(s##1) ^ TE1(s##2) ^ TE2(s##3) ^ TE3(s##0) ^ rk[4 * i + 1]; \
- d##2 = TE0(s##2) ^ TE1(s##3) ^ TE2(s##0) ^ TE3(s##1) ^ rk[4 * i + 2]; \
- d##3 = TE0(s##3) ^ TE1(s##0) ^ TE2(s##1) ^ TE3(s##2) ^ rk[4 * i + 3]; \
-} while (0);
-
-/**
- * omac1_aes_128 - One-Key CBC MAC (OMAC1) hash with AES-128 (aka AES-CMAC)
- * @key: 128-bit key for the hash operation
- * @data: Data buffer for which a MAC is determined
- * @data_len: Length of data buffer in bytes
- * @mac: Buffer for MAC (128 bits, i.e., 16 bytes)
- * Returns: 0 on success, -1 on failure
- *
- * This is a mode for using block cipher (AES in this case) for authentication.
- * OMAC1 was standardized with the name CMAC by NIST in a Special Publication
- * (SP) 800-38B.
- */
-void rtw_use_tkipkey_handler(void *FunctionContext)
-{
- struct adapter *padapter = (struct adapter *)FunctionContext;
-
- padapter->securitypriv.busetkipkey = true;
-}
diff --git a/drivers/staging/r8188eu/core/rtw_sreset.c b/drivers/staging/r8188eu/core/rtw_sreset.c
deleted file mode 100644
index c831033d20a9..000000000000
--- a/drivers/staging/r8188eu/core/rtw_sreset.c
+++ /dev/null
@@ -1,62 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2007 - 2012 Realtek Corporation. */
-
-#include "../include/rtw_sreset.h"
-
-void sreset_init_value(struct adapter *padapter)
-{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(padapter);
- struct sreset_priv *psrtpriv = &pHalData->srestpriv;
-
- _rtw_mutex_init(&psrtpriv->silentreset_mutex);
- psrtpriv->silent_reset_inprogress = false;
- psrtpriv->wifi_error_status = WIFI_STATUS_SUCCESS;
- psrtpriv->last_tx_time = 0;
- psrtpriv->last_tx_complete_time = 0;
-}
-void sreset_reset_value(struct adapter *padapter)
-{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(padapter);
- struct sreset_priv *psrtpriv = &pHalData->srestpriv;
-
- psrtpriv->silent_reset_inprogress = false;
- psrtpriv->wifi_error_status = WIFI_STATUS_SUCCESS;
- psrtpriv->last_tx_time = 0;
- psrtpriv->last_tx_complete_time = 0;
-}
-
-u8 sreset_get_wifi_status(struct adapter *padapter)
-{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(padapter);
- struct sreset_priv *psrtpriv = &pHalData->srestpriv;
-
- u8 status = WIFI_STATUS_SUCCESS;
- u32 val32 = 0;
-
- if (psrtpriv->silent_reset_inprogress)
- return status;
- val32 = rtw_read32(padapter, REG_TXDMA_STATUS);
- if (val32 == 0xeaeaeaea) {
- psrtpriv->wifi_error_status = WIFI_IF_NOT_EXIST;
- } else if (val32 != 0) {
- DBG_88E("txdmastatu(%x)\n", val32);
- psrtpriv->wifi_error_status = WIFI_MAC_TXDMA_ERROR;
- }
-
- if (WIFI_STATUS_SUCCESS != psrtpriv->wifi_error_status) {
- DBG_88E("==>%s error_status(0x%x)\n", __func__, psrtpriv->wifi_error_status);
- status = (psrtpriv->wifi_error_status & (~(USB_READ_PORT_FAIL | USB_WRITE_PORT_FAIL)));
- }
- DBG_88E("==> %s wifi_status(0x%x)\n", __func__, status);
-
- /* status restore */
- psrtpriv->wifi_error_status = WIFI_STATUS_SUCCESS;
-
- return status;
-}
-
-void sreset_set_wifi_error_status(struct adapter *padapter, u32 status)
-{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(padapter);
- pHalData->srestpriv.wifi_error_status = status;
-}
diff --git a/drivers/staging/r8188eu/core/rtw_sta_mgt.c b/drivers/staging/r8188eu/core/rtw_sta_mgt.c
index f6dffed53a60..a3d4d5d8a785 100644
--- a/drivers/staging/r8188eu/core/rtw_sta_mgt.c
+++ b/drivers/staging/r8188eu/core/rtw_sta_mgt.c
@@ -18,14 +18,12 @@ static void _rtw_init_stainfo(struct sta_info *psta)
spin_lock_init(&psta->lock);
INIT_LIST_HEAD(&psta->list);
INIT_LIST_HEAD(&psta->hash_list);
- _rtw_init_queue(&psta->sleep_q);
+ rtw_init_queue(&psta->sleep_q);
psta->sleepq_len = 0;
_rtw_init_sta_xmit_priv(&psta->sta_xmitpriv);
_rtw_init_sta_recv_priv(&psta->sta_recvpriv);
-#ifdef CONFIG_88EU_AP_MODE
-
INIT_LIST_HEAD(&psta->asoc_list);
INIT_LIST_HEAD(&psta->auth_list);
@@ -38,21 +36,16 @@ static void _rtw_init_stainfo(struct sta_info *psta)
psta->bpairwise_key_installed = false;
-#ifdef CONFIG_88EU_AP_MODE
psta->nonerp_set = 0;
psta->no_short_slot_time_set = 0;
psta->no_short_preamble_set = 0;
psta->no_ht_gf_set = 0;
psta->no_ht_set = 0;
psta->ht_20mhz_set = 0;
-#endif
psta->under_exist_checking = 0;
psta->keep_alive_trycnt = 0;
-
-#endif /* CONFIG_88EU_AP_MODE */
-
}
u32 _rtw_init_sta_priv(struct sta_priv *pstapriv)
@@ -68,13 +61,13 @@ u32 _rtw_init_sta_priv(struct sta_priv *pstapriv)
pstapriv->pstainfo_buf = pstapriv->pallocated_stainfo_buf + 4 -
((size_t)(pstapriv->pallocated_stainfo_buf) & 3);
- _rtw_init_queue(&pstapriv->free_sta_queue);
+ rtw_init_queue(&pstapriv->free_sta_queue);
spin_lock_init(&pstapriv->sta_hash_lock);
pstapriv->asoc_sta_count = 0;
- _rtw_init_queue(&pstapriv->sleep_q);
- _rtw_init_queue(&pstapriv->wakeup_q);
+ rtw_init_queue(&pstapriv->sleep_q);
+ rtw_init_queue(&pstapriv->wakeup_q);
psta = (struct sta_info *)(pstapriv->pstainfo_buf);
@@ -88,8 +81,6 @@ u32 _rtw_init_sta_priv(struct sta_priv *pstapriv)
psta++;
}
-#ifdef CONFIG_88EU_AP_MODE
-
pstapriv->sta_dz_bitmap = 0;
pstapriv->tim_bitmap = 0;
@@ -104,7 +95,6 @@ u32 _rtw_init_sta_priv(struct sta_priv *pstapriv)
pstapriv->assoc_to = 3;
pstapriv->expire_to = 3; /* 3*2 = 6 sec */
pstapriv->max_num_sta = NUM_STA;
-#endif
return _SUCCESS;
}
@@ -155,9 +145,8 @@ u32 _rtw_free_sta_priv(struct sta_priv *pstapriv)
spin_unlock_bh(&pstapriv->sta_hash_lock);
/*===============================*/
- if (pstapriv->pallocated_stainfo_buf)
- vfree(pstapriv->pallocated_stainfo_buf);
- }
+ vfree(pstapriv->pallocated_stainfo_buf);
+ }
return _SUCCESS;
}
@@ -222,7 +211,7 @@ struct sta_info *rtw_alloc_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
preorder_ctrl->wend_b = 0xffff;
preorder_ctrl->wsize_b = 64;/* 64; */
- _rtw_init_queue(&preorder_ctrl->pending_recvframe_queue);
+ rtw_init_queue(&preorder_ctrl->pending_recvframe_queue);
rtw_init_recv_timer(preorder_ctrl);
}
@@ -321,9 +310,7 @@ u32 rtw_free_stainfo(struct adapter *padapter, struct sta_info *psta)
}
if (!(psta->state & WIFI_AP_STATE))
- rtw_hal_set_odm_var(padapter, HAL_ODM_STA_INFO, psta, false);
-
-#ifdef CONFIG_88EU_AP_MODE
+ rtl8188e_SetHalODMVar(padapter, HAL_ODM_STA_INFO, psta, false);
spin_lock_bh(&pstapriv->auth_list_lock);
if (!list_empty(&psta->auth_list)) {
@@ -354,8 +341,6 @@ u32 rtw_free_stainfo(struct adapter *padapter, struct sta_info *psta)
psta->under_exist_checking = 0;
-#endif /* CONFIG_88EU_AP_MODE */
-
spin_lock_bh(&pfree_sta_queue->lock);
list_add_tail(&psta->list, get_list_head(pfree_sta_queue));
spin_unlock_bh(&pfree_sta_queue->lock);
@@ -471,7 +456,6 @@ struct sta_info *rtw_get_bcmc_stainfo(struct adapter *padapter)
u8 rtw_access_ctrl(struct adapter *padapter, u8 *mac_addr)
{
u8 res = true;
-#ifdef CONFIG_88EU_AP_MODE
struct list_head *plist, *phead;
struct rtw_wlan_acl_node *paclnode;
u8 match = false;
@@ -502,7 +486,5 @@ u8 rtw_access_ctrl(struct adapter *padapter, u8 *mac_addr)
else
res = true;
-#endif
-
return res;
}
diff --git a/drivers/staging/r8188eu/core/rtw_wlan_util.c b/drivers/staging/r8188eu/core/rtw_wlan_util.c
index a3a5e1c64c4a..6d4e21a16783 100644
--- a/drivers/staging/r8188eu/core/rtw_wlan_util.c
+++ b/drivers/staging/r8188eu/core/rtw_wlan_util.c
@@ -266,27 +266,27 @@ void Save_DM_Func_Flag(struct adapter *padapter)
{
u8 saveflag = true;
- rtw_hal_set_hwreg(padapter, HW_VAR_DM_FUNC_OP, (u8 *)(&saveflag));
+ SetHwReg8188EU(padapter, HW_VAR_DM_FUNC_OP, (u8 *)(&saveflag));
}
void Restore_DM_Func_Flag(struct adapter *padapter)
{
u8 saveflag = false;
- rtw_hal_set_hwreg(padapter, HW_VAR_DM_FUNC_OP, (u8 *)(&saveflag));
+ SetHwReg8188EU(padapter, HW_VAR_DM_FUNC_OP, (u8 *)(&saveflag));
}
void Switch_DM_Func(struct adapter *padapter, u32 mode, u8 enable)
{
if (enable)
- rtw_hal_set_hwreg(padapter, HW_VAR_DM_FUNC_SET, (u8 *)(&mode));
+ SetHwReg8188EU(padapter, HW_VAR_DM_FUNC_SET, (u8 *)(&mode));
else
- rtw_hal_set_hwreg(padapter, HW_VAR_DM_FUNC_CLR, (u8 *)(&mode));
+ SetHwReg8188EU(padapter, HW_VAR_DM_FUNC_CLR, (u8 *)(&mode));
}
static void Set_NETYPE0_MSR(struct adapter *padapter, u8 type)
{
- rtw_hal_set_hwreg(padapter, HW_VAR_MEDIA_STATUS, (u8 *)(&type));
+ SetHwReg8188EU(padapter, HW_VAR_MEDIA_STATUS, (u8 *)(&type));
}
void Set_MSR(struct adapter *padapter, u8 type)
@@ -304,21 +304,11 @@ inline void rtw_set_oper_ch(struct adapter *adapter, u8 ch)
adapter->mlmeextpriv.oper_channel = ch;
}
-inline u8 rtw_get_oper_bw(struct adapter *adapter)
-{
- return adapter->mlmeextpriv.oper_bwmode;
-}
-
inline void rtw_set_oper_bw(struct adapter *adapter, u8 bw)
{
adapter->mlmeextpriv.oper_bwmode = bw;
}
-inline u8 rtw_get_oper_choffset(struct adapter *adapter)
-{
- return adapter->mlmeextpriv.oper_ch_offset;
-}
-
inline void rtw_set_oper_choffset(struct adapter *adapter, u8 offset)
{
adapter->mlmeextpriv.oper_ch_offset = offset;
@@ -328,7 +318,7 @@ void SelectChannel(struct adapter *padapter, unsigned char channel)
{
/* saved channel info */
rtw_set_oper_ch(padapter, channel);
- rtw_hal_set_chan(padapter, channel);
+ PHY_SwChnl8188E(padapter, channel);
}
void SetBWMode(struct adapter *padapter, unsigned short bwmode,
@@ -338,7 +328,7 @@ void SetBWMode(struct adapter *padapter, unsigned short bwmode,
rtw_set_oper_bw(padapter, bwmode);
rtw_set_oper_choffset(padapter, channel_offset);
- rtw_hal_set_bwmode(padapter, (enum ht_channel_width)bwmode, channel_offset);
+ PHY_SetBWMode8188E(padapter, (enum ht_channel_width)bwmode, channel_offset);
}
void set_channel_bwmode(struct adapter *padapter, unsigned char channel, unsigned char channel_offset, unsigned short bwmode)
@@ -369,20 +359,10 @@ void set_channel_bwmode(struct adapter *padapter, unsigned char channel, unsigne
rtw_set_oper_bw(padapter, bwmode);
rtw_set_oper_choffset(padapter, channel_offset);
- rtw_hal_set_chan(padapter, center_ch); /* set center channel */
+ PHY_SwChnl8188E(padapter, center_ch); /* set center channel */
SetBWMode(padapter, bwmode, channel_offset);
}
-int get_bsstype(unsigned short capability)
-{
- if (capability & BIT(0))
- return WIFI_FW_AP_STATE;
- else if (capability & BIT(1))
- return WIFI_FW_ADHOC_STATE;
- else
- return 0;
-}
-
__inline u8 *get_my_bssid(struct wlan_bssid_ex *pnetwork)
{
return pnetwork->MacAddress;
@@ -447,14 +427,9 @@ unsigned int decide_wait_for_beacon_timeout(unsigned int bcn_interval)
return bcn_interval << 2;
}
-void CAM_empty_entry(struct adapter *Adapter, u8 ucIndex)
-{
- rtw_hal_set_hwreg(Adapter, HW_VAR_CAM_EMPTY_ENTRY, (u8 *)(&ucIndex));
-}
-
void invalidate_cam_all(struct adapter *padapter)
{
- rtw_hal_set_hwreg(padapter, HW_VAR_CAM_INVALID_ALL, NULL);
+ SetHwReg8188EU(padapter, HW_VAR_CAM_INVALID_ALL, NULL);
}
void write_cam(struct adapter *padapter, u8 entry, u16 ctrl, u8 *mac, u8 *key)
@@ -482,7 +457,7 @@ void write_cam(struct adapter *padapter, u8 entry, u16 ctrl, u8 *mac, u8 *key)
cam_val[0] = val;
cam_val[1] = addr + (unsigned int)j;
- rtw_hal_set_hwreg(padapter, HW_VAR_CAM_WRITE, (u8 *)cam_val);
+ SetHwReg8188EU(padapter, HW_VAR_CAM_WRITE, (u8 *)cam_val);
}
}
@@ -517,7 +492,7 @@ void flush_all_cam_entry(struct adapter *padapter)
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- rtw_hal_set_hwreg(padapter, HW_VAR_CAM_INVALID_ALL, NULL);
+ SetHwReg8188EU(padapter, HW_VAR_CAM_INVALID_ALL, NULL);
memset((u8 *)(pmlmeinfo->FW_sta_info), 0, sizeof(pmlmeinfo->FW_sta_info));
}
@@ -578,21 +553,21 @@ void WMMOnAssocRsp(struct adapter *padapter)
switch (ACI) {
case 0x0:
- rtw_hal_set_hwreg(padapter, HW_VAR_AC_PARAM_BE, (u8 *)(&acParm));
+ SetHwReg8188EU(padapter, HW_VAR_AC_PARAM_BE, (u8 *)(&acParm));
acm_mask |= (ACM ? BIT(1) : 0);
edca[XMIT_BE_QUEUE] = acParm;
break;
case 0x1:
- rtw_hal_set_hwreg(padapter, HW_VAR_AC_PARAM_BK, (u8 *)(&acParm));
+ SetHwReg8188EU(padapter, HW_VAR_AC_PARAM_BK, (u8 *)(&acParm));
edca[XMIT_BK_QUEUE] = acParm;
break;
case 0x2:
- rtw_hal_set_hwreg(padapter, HW_VAR_AC_PARAM_VI, (u8 *)(&acParm));
+ SetHwReg8188EU(padapter, HW_VAR_AC_PARAM_VI, (u8 *)(&acParm));
acm_mask |= (ACM ? BIT(2) : 0);
edca[XMIT_VI_QUEUE] = acParm;
break;
case 0x3:
- rtw_hal_set_hwreg(padapter, HW_VAR_AC_PARAM_VO, (u8 *)(&acParm));
+ SetHwReg8188EU(padapter, HW_VAR_AC_PARAM_VO, (u8 *)(&acParm));
acm_mask |= (ACM ? BIT(3) : 0);
edca[XMIT_VO_QUEUE] = acParm;
break;
@@ -602,14 +577,14 @@ void WMMOnAssocRsp(struct adapter *padapter)
}
if (padapter->registrypriv.acm_method == 1)
- rtw_hal_set_hwreg(padapter, HW_VAR_ACM_CTRL, (u8 *)(&acm_mask));
+ SetHwReg8188EU(padapter, HW_VAR_ACM_CTRL, (u8 *)(&acm_mask));
else
padapter->mlmepriv.acm_mask = acm_mask;
inx[0] = 0; inx[1] = 1; inx[2] = 2; inx[3] = 3;
if (pregpriv->wifi_spec == 1) {
- u32 j, tmp, change_inx = false;
+ u32 j, change_inx = false;
/* entry indx: 0->vo, 1->vi, 2->be, 3->bk. */
for (i = 0; i < 4; i++) {
@@ -624,13 +599,8 @@ void WMMOnAssocRsp(struct adapter *padapter)
}
if (change_inx) {
- tmp = edca[i];
- edca[i] = edca[j];
- edca[j] = tmp;
-
- tmp = inx[i];
- inx[i] = inx[j];
- inx[j] = tmp;
+ swap(edca[i], edca[j]);
+ swap(inx[i], inx[j]);
change_inx = false;
}
@@ -760,7 +730,7 @@ void HT_caps_handler(struct adapter *padapter, struct ndis_802_11_var_ie *pIE)
}
}
- rtw_hal_get_hwreg(padapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type));
+ GetHwReg8188EU(padapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type));
/* update the MCS rates */
for (i = 0; i < 16; i++) {
@@ -817,9 +787,9 @@ void HTOnAssocRsp(struct adapter *padapter)
min_MPDU_spacing = (pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para & 0x1c) >> 2;
- rtw_hal_set_hwreg(padapter, HW_VAR_AMPDU_MIN_SPACE, (u8 *)(&min_MPDU_spacing));
+ SetHwReg8188EU(padapter, HW_VAR_AMPDU_MIN_SPACE, (u8 *)(&min_MPDU_spacing));
- rtw_hal_set_hwreg(padapter, HW_VAR_AMPDU_FACTOR, (u8 *)(&max_AMPDU_len));
+ SetHwReg8188EU(padapter, HW_VAR_AMPDU_FACTOR, (u8 *)(&max_AMPDU_len));
}
void ERP_IE_handler(struct adapter *padapter, struct ndis_802_11_var_ie *pIE)
@@ -1123,72 +1093,6 @@ unsigned int is_ap_in_tkip(struct adapter *padapter)
}
}
-unsigned int should_forbid_n_rate(struct adapter *padapter)
-{
- u32 i;
- struct ndis_802_11_var_ie *pIE;
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- struct wlan_bssid_ex *cur_network = &pmlmepriv->cur_network.network;
-
- if (rtw_get_capability((struct wlan_bssid_ex *)cur_network) & WLAN_CAPABILITY_PRIVACY) {
- for (i = sizeof(struct ndis_802_11_fixed_ie); i < cur_network->IELength;) {
- pIE = (struct ndis_802_11_var_ie *)(cur_network->IEs + i);
-
- switch (pIE->ElementID) {
- case _VENDOR_SPECIFIC_IE_:
- if (!memcmp(pIE->data, RTW_WPA_OUI, 4) &&
- ((!memcmp((pIE->data + 12), WPA_CIPHER_SUITE_CCMP, 4)) ||
- (!memcmp((pIE->data + 16), WPA_CIPHER_SUITE_CCMP, 4))))
- return false;
- break;
- case _RSN_IE_2_:
- if ((!memcmp((pIE->data + 8), RSN_CIPHER_SUITE_CCMP, 4)) ||
- (!memcmp((pIE->data + 12), RSN_CIPHER_SUITE_CCMP, 4)))
- return false;
- break;
- default:
- break;
- }
-
- i += (pIE->Length + 2);
- }
-
- return true;
- } else {
- return false;
- }
-}
-
-unsigned int is_ap_in_wep(struct adapter *padapter)
-{
- u32 i;
- struct ndis_802_11_var_ie *pIE;
- struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- struct wlan_bssid_ex *cur_network = &pmlmeinfo->network;
-
- if (rtw_get_capability((struct wlan_bssid_ex *)cur_network) & WLAN_CAPABILITY_PRIVACY) {
- for (i = sizeof(struct ndis_802_11_fixed_ie); i < pmlmeinfo->network.IELength;) {
- pIE = (struct ndis_802_11_var_ie *)(pmlmeinfo->network.IEs + i);
-
- switch (pIE->ElementID) {
- case _VENDOR_SPECIFIC_IE_:
- if (!memcmp(pIE->data, RTW_WPA_OUI, 4))
- return false;
- break;
- case _RSN_IE_2_:
- return false;
- default:
- break;
- }
- i += (pIE->Length + 2);
- }
- return true;
- } else {
- return false;
- }
-}
-
int wifirate2_ratetbl_inx(unsigned char rate)
{
int inx = 0;
@@ -1324,7 +1228,6 @@ void set_sta_rate(struct adapter *padapter, struct sta_info *psta)
void update_tx_basic_rate(struct adapter *padapter, u8 wirelessmode)
{
unsigned char supported_rates[NDIS_802_11_LENGTH_RATES_EX];
-#ifdef CONFIG_88EU_P2P
struct wifidirect_info *pwdinfo = &padapter->wdinfo;
/* Added by Albert 2011/03/22 */
@@ -1332,7 +1235,6 @@ void update_tx_basic_rate(struct adapter *padapter, u8 wirelessmode)
/* So, the Tx packet shouldn't use the CCK rate */
if (!rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE))
return;
-#endif /* CONFIG_88EU_P2P */
memset(supported_rates, 0, NDIS_802_11_LENGTH_RATES_EX);
if ((wirelessmode & WIRELESS_11B) && (wirelessmode == WIRELESS_11B))
@@ -1347,7 +1249,7 @@ void update_tx_basic_rate(struct adapter *padapter, u8 wirelessmode)
else
update_mgnt_tx_rate(padapter, IEEE80211_OFDM_RATE_6MB);
- rtw_hal_set_hwreg(padapter, HW_VAR_BASIC_RATE, supported_rates);
+ SetHwReg8188EU(padapter, HW_VAR_BASIC_RATE, supported_rates);
}
unsigned char check_assoc_AP(u8 *pframe, uint len)
@@ -1369,7 +1271,6 @@ unsigned char check_assoc_AP(u8 *pframe, uint len)
DBG_88E("link to Artheros AP\n");
return HT_IOT_PEER_ATHEROS;
} else if ((!memcmp(pIE->data, BROADCOM_OUI1, 3)) ||
- (!memcmp(pIE->data, BROADCOM_OUI2, 3)) ||
(!memcmp(pIE->data, BROADCOM_OUI2, 3))) {
DBG_88E("link to Broadcom AP\n");
return HT_IOT_PEER_BROADCOM;
@@ -1465,13 +1366,13 @@ void update_capinfo(struct adapter *Adapter, u16 updateCap)
if (pmlmeinfo->preamble_mode != PREAMBLE_SHORT) { /* PREAMBLE_LONG or PREAMBLE_AUTO */
ShortPreamble = true;
pmlmeinfo->preamble_mode = PREAMBLE_SHORT;
- rtw_hal_set_hwreg(Adapter, HW_VAR_ACK_PREAMBLE, (u8 *)&ShortPreamble);
+ SetHwReg8188EU(Adapter, HW_VAR_ACK_PREAMBLE, (u8 *)&ShortPreamble);
}
} else { /* Long Preamble */
if (pmlmeinfo->preamble_mode != PREAMBLE_LONG) { /* PREAMBLE_SHORT or PREAMBLE_AUTO */
ShortPreamble = false;
pmlmeinfo->preamble_mode = PREAMBLE_LONG;
- rtw_hal_set_hwreg(Adapter, HW_VAR_ACK_PREAMBLE, (u8 *)&ShortPreamble);
+ SetHwReg8188EU(Adapter, HW_VAR_ACK_PREAMBLE, (u8 *)&ShortPreamble);
}
}
@@ -1493,7 +1394,7 @@ void update_capinfo(struct adapter *Adapter, u16 updateCap)
}
}
- rtw_hal_set_hwreg(Adapter, HW_VAR_SLOT_TIME, &pmlmeinfo->slotTime);
+ SetHwReg8188EU(Adapter, HW_VAR_SLOT_TIME, &pmlmeinfo->slotTime);
}
void update_wireless_mode(struct adapter *padapter)
@@ -1529,7 +1430,7 @@ void update_wireless_mode(struct adapter *padapter)
SIFS_Timer = 0x0a0a0808;/* 0x0808 -> for CCK, 0x0a0a -> for OFDM */
/* change this value if having IOT issues. */
- padapter->HalFunc.SetHwRegHandler(padapter, HW_VAR_RESP_SIFS, (u8 *)&SIFS_Timer);
+ SetHwReg8188EU(padapter, HW_VAR_RESP_SIFS, (u8 *)&SIFS_Timer);
if (pmlmeext->cur_wireless_mode & WIRELESS_11B)
update_mgnt_tx_rate(padapter, IEEE80211_CCK_RATE_1MB);
@@ -1611,12 +1512,12 @@ void update_TSF(struct mlme_ext_priv *pmlmeext, u8 *pframe, uint len)
void correct_TSF(struct adapter *padapter, struct mlme_ext_priv *pmlmeext)
{
- rtw_hal_set_hwreg(padapter, HW_VAR_CORRECT_TSF, NULL);
+ SetHwReg8188EU(padapter, HW_VAR_CORRECT_TSF, NULL);
}
void beacon_timing_control(struct adapter *padapter)
{
- rtw_hal_bcn_related_reg_setting(padapter);
+ SetBeaconRelatedRegisters8188EUsb(padapter);
}
static struct adapter *pbuddy_padapter;
diff --git a/drivers/staging/r8188eu/core/rtw_xmit.c b/drivers/staging/r8188eu/core/rtw_xmit.c
index 46fe62c7c32c..0c033a077bf9 100644
--- a/drivers/staging/r8188eu/core/rtw_xmit.c
+++ b/drivers/staging/r8188eu/core/rtw_xmit.c
@@ -9,6 +9,7 @@
#include "../include/osdep_intf.h"
#include "../include/usb_ops.h"
#include "../include/usb_osintf.h"
+#include "../include/rtl8188e_xmit.h"
static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 };
static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 };
@@ -17,7 +18,7 @@ static void _init_txservq(struct tx_servq *ptxservq)
{
INIT_LIST_HEAD(&ptxservq->tx_pending);
- _rtw_init_queue(&ptxservq->sta_pending);
+ rtw_init_queue(&ptxservq->sta_pending);
ptxservq->qcnt = 0;
}
@@ -48,22 +49,21 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
/* We don't need to memset padapter->XXX to zero, because adapter is allocated by vzalloc(). */
spin_lock_init(&pxmitpriv->lock);
- sema_init(&pxmitpriv->xmit_sema, 0);
sema_init(&pxmitpriv->terminate_xmitthread_sema, 0);
/*
- Please insert all the queue initializaiton using _rtw_init_queue below
+ Please insert all the queue initializaiton using rtw_init_queue below
*/
pxmitpriv->adapter = padapter;
- _rtw_init_queue(&pxmitpriv->be_pending);
- _rtw_init_queue(&pxmitpriv->bk_pending);
- _rtw_init_queue(&pxmitpriv->vi_pending);
- _rtw_init_queue(&pxmitpriv->vo_pending);
- _rtw_init_queue(&pxmitpriv->bm_pending);
+ rtw_init_queue(&pxmitpriv->be_pending);
+ rtw_init_queue(&pxmitpriv->bk_pending);
+ rtw_init_queue(&pxmitpriv->vi_pending);
+ rtw_init_queue(&pxmitpriv->vo_pending);
+ rtw_init_queue(&pxmitpriv->bm_pending);
- _rtw_init_queue(&pxmitpriv->free_xmit_queue);
+ rtw_init_queue(&pxmitpriv->free_xmit_queue);
/*
Please allocate memory with the sz = (struct xmit_frame) * NR_XMITFRAME,
@@ -105,8 +105,8 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
pxmitpriv->frag_len = MAX_FRAG_THRESHOLD;
/* init xmit_buf */
- _rtw_init_queue(&pxmitpriv->free_xmitbuf_queue);
- _rtw_init_queue(&pxmitpriv->pending_xmitbuf_queue);
+ rtw_init_queue(&pxmitpriv->free_xmitbuf_queue);
+ rtw_init_queue(&pxmitpriv->pending_xmitbuf_queue);
pxmitpriv->pallocated_xmitbuf = vzalloc(NR_XMITBUFF * sizeof(struct xmit_buf) + 4);
@@ -147,7 +147,7 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
pxmitpriv->free_xmitbuf_cnt = NR_XMITBUFF;
/* Init xmit extension buff */
- _rtw_init_queue(&pxmitpriv->free_xmit_extbuf_queue);
+ rtw_init_queue(&pxmitpriv->free_xmit_extbuf_queue);
pxmitpriv->pallocated_xmit_extbuf = vzalloc(num_xmit_extbuf * sizeof(struct xmit_buf) + 4);
@@ -196,10 +196,10 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
pxmitpriv->voq_cnt = 0;
pxmitpriv->ack_tx = false;
- _rtw_mutex_init(&pxmitpriv->ack_tx_mutex);
+ mutex_init(&pxmitpriv->ack_tx_mutex);
rtw_sctx_init(&pxmitpriv->ack_tx_ops, 0);
- rtw_hal_init_xmit_priv(padapter);
+ rtl8188eu_init_xmit_priv(padapter);
exit:
@@ -243,7 +243,7 @@ void _rtw_free_xmit_priv(struct xmit_priv *pxmitpriv)
rtw_free_hwxmits(padapter);
- _rtw_mutex_free(&pxmitpriv->ack_tx_mutex);
+ mutex_destroy(&pxmitpriv->ack_tx_mutex);
}
static void update_attrib_vcs_info(struct adapter *padapter, struct xmit_frame *pxmitframe)
@@ -576,8 +576,6 @@ static s32 update_attrib(struct adapter *padapter, struct sk_buff *pkt, struct p
else
pattrib->bswenc = false;
- rtw_set_tx_chksum_offload(pkt, pattrib);
-
update_attrib_phy_info(pattrib, psta);
exit:
@@ -683,13 +681,13 @@ static s32 xmitframe_swencrypt(struct adapter *padapter, struct xmit_frame *pxmi
switch (pattrib->encrypt) {
case _WEP40_:
case _WEP104_:
- rtw_wep_encrypt(padapter, (u8 *)pxmitframe);
+ rtw_wep_encrypt(padapter, pxmitframe);
break;
case _TKIP_:
- rtw_tkip_encrypt(padapter, (u8 *)pxmitframe);
+ rtw_tkip_encrypt(padapter, pxmitframe);
break;
case _AES_:
- rtw_aes_encrypt(padapter, (u8 *)pxmitframe);
+ rtw_aes_encrypt(padapter, pxmitframe);
break;
default:
break;
@@ -863,24 +861,6 @@ s32 rtw_txframes_sta_ac_pending(struct adapter *padapter, struct pkt_attrib *pat
}
/*
- * Calculate wlan 802.11 packet MAX size from pkt_attrib
- * This function doesn't consider fragment case
- */
-u32 rtw_calculate_wlan_pkt_size_by_attribue(struct pkt_attrib *pattrib)
-{
- u32 len = 0;
-
- len = pattrib->hdrlen + pattrib->iv_len; /* WLAN Header and IV */
- len += SNAP_SIZE + sizeof(u16); /* LLC */
- len += pattrib->pktlen;
- if (pattrib->encrypt == _TKIP_)
- len += 8; /* MIC */
- len += ((pattrib->bswenc) ? pattrib->icv_len : 0); /* ICV */
-
- return len;
-}
-
-/*
This sub-routine will perform all the following:
@@ -1572,31 +1552,31 @@ static int rtw_br_client_tx(struct adapter *padapter, struct sk_buff **pskb)
rcu_read_unlock();
spin_lock_bh(&padapter->br_ext_lock);
if (!(skb->data[0] & 1) && br_port &&
- memcmp(skb->data + MACADDRLEN, padapter->br_mac, MACADDRLEN) &&
- *((__be16 *)(skb->data + MACADDRLEN * 2)) != __constant_htons(ETH_P_8021Q) &&
- *((__be16 *)(skb->data + MACADDRLEN * 2)) == __constant_htons(ETH_P_IP) &&
- !memcmp(padapter->scdb_mac, skb->data + MACADDRLEN, MACADDRLEN) && padapter->scdb_entry) {
- memcpy(skb->data + MACADDRLEN, GET_MY_HWADDR(padapter), MACADDRLEN);
+ memcmp(skb->data + ETH_ALEN, padapter->br_mac, ETH_ALEN) &&
+ *((__be16 *)(skb->data + ETH_ALEN * 2)) != __constant_htons(ETH_P_8021Q) &&
+ *((__be16 *)(skb->data + ETH_ALEN * 2)) == __constant_htons(ETH_P_IP) &&
+ !memcmp(padapter->scdb_mac, skb->data + ETH_ALEN, ETH_ALEN) && padapter->scdb_entry) {
+ memcpy(skb->data + ETH_ALEN, GET_MY_HWADDR(padapter), ETH_ALEN);
padapter->scdb_entry->ageing_timer = jiffies;
spin_unlock_bh(&padapter->br_ext_lock);
} else {
- if (*((__be16 *)(skb->data + MACADDRLEN * 2)) == __constant_htons(ETH_P_8021Q)) {
+ if (*((__be16 *)(skb->data + ETH_ALEN * 2)) == __constant_htons(ETH_P_8021Q)) {
is_vlan_tag = 1;
- vlan_hdr = *((unsigned short *)(skb->data + MACADDRLEN * 2 + 2));
+ vlan_hdr = *((unsigned short *)(skb->data + ETH_ALEN * 2 + 2));
for (i = 0; i < 6; i++)
- *((unsigned short *)(skb->data + MACADDRLEN * 2 + 2 - i * 2)) = *((unsigned short *)(skb->data + MACADDRLEN * 2 - 2 - i * 2));
+ *((unsigned short *)(skb->data + ETH_ALEN * 2 + 2 - i * 2)) = *((unsigned short *)(skb->data + ETH_ALEN * 2 - 2 - i * 2));
skb_pull(skb, 4);
}
- if (!memcmp(skb->data + MACADDRLEN, padapter->br_mac, MACADDRLEN) &&
- (*((__be16 *)(skb->data + MACADDRLEN * 2)) == __constant_htons(ETH_P_IP)))
+ if (!memcmp(skb->data + ETH_ALEN, padapter->br_mac, ETH_ALEN) &&
+ (*((__be16 *)(skb->data + ETH_ALEN * 2)) == __constant_htons(ETH_P_IP)))
memcpy(padapter->br_ip, skb->data + WLAN_ETHHDR_LEN + 12, 4);
- if (*((__be16 *)(skb->data + MACADDRLEN * 2)) == __constant_htons(ETH_P_IP)) {
- if (memcmp(padapter->scdb_mac, skb->data + MACADDRLEN, MACADDRLEN)) {
+ if (*((__be16 *)(skb->data + ETH_ALEN * 2)) == __constant_htons(ETH_P_IP)) {
+ if (memcmp(padapter->scdb_mac, skb->data + ETH_ALEN, ETH_ALEN)) {
padapter->scdb_entry = (struct nat25_network_db_entry *)scdb_findEntry(padapter,
- skb->data + MACADDRLEN, skb->data + WLAN_ETHHDR_LEN + 12);
+ skb->data + WLAN_ETHHDR_LEN + 12);
if (padapter->scdb_entry) {
- memcpy(padapter->scdb_mac, skb->data + MACADDRLEN, MACADDRLEN);
+ memcpy(padapter->scdb_mac, skb->data + ETH_ALEN, ETH_ALEN);
memcpy(padapter->scdb_ip, skb->data + WLAN_ETHHDR_LEN + 12, 4);
padapter->scdb_entry->ageing_timer = jiffies;
do_nat25 = 0;
@@ -1606,7 +1586,7 @@ static int rtw_br_client_tx(struct adapter *padapter, struct sk_buff **pskb)
padapter->scdb_entry->ageing_timer = jiffies;
do_nat25 = 0;
} else {
- memset(padapter->scdb_mac, 0, MACADDRLEN);
+ memset(padapter->scdb_mac, 0, ETH_ALEN);
memset(padapter->scdb_ip, 0, 4);
}
}
@@ -1620,8 +1600,8 @@ static int rtw_br_client_tx(struct adapter *padapter, struct sk_buff **pskb)
skb_push(skb, 4);
for (i = 0; i < 6; i++)
*((unsigned short *)(skb->data + i * 2)) = *((unsigned short *)(skb->data + 4 + i * 2));
- *((__be16 *)(skb->data + MACADDRLEN * 2)) = __constant_htons(ETH_P_8021Q);
- *((unsigned short *)(skb->data + MACADDRLEN * 2 + 2)) = vlan_hdr;
+ *((__be16 *)(skb->data + ETH_ALEN * 2)) = __constant_htons(ETH_P_8021Q);
+ *((unsigned short *)(skb->data + ETH_ALEN * 2 + 2)) = vlan_hdr;
}
newskb = skb_copy(skb, GFP_ATOMIC);
@@ -1633,9 +1613,9 @@ static int rtw_br_client_tx(struct adapter *padapter, struct sk_buff **pskb)
*pskb = skb = newskb;
if (is_vlan_tag) {
- vlan_hdr = *((unsigned short *)(skb->data + MACADDRLEN * 2 + 2));
+ vlan_hdr = *((unsigned short *)(skb->data + ETH_ALEN * 2 + 2));
for (i = 0; i < 6; i++)
- *((unsigned short *)(skb->data + MACADDRLEN * 2 + 2 - i * 2)) = *((unsigned short *)(skb->data + MACADDRLEN * 2 - 2 - i * 2));
+ *((unsigned short *)(skb->data + ETH_ALEN * 2 + 2 - i * 2)) = *((unsigned short *)(skb->data + ETH_ALEN * 2 - 2 - i * 2));
skb_pull(skb, 4);
}
}
@@ -1659,7 +1639,7 @@ static int rtw_br_client_tx(struct adapter *padapter, struct sk_buff **pskb)
}
}
- memcpy(skb->data + MACADDRLEN, GET_MY_HWADDR(padapter), MACADDRLEN);
+ memcpy(skb->data + ETH_ALEN, GET_MY_HWADDR(padapter), ETH_ALEN);
dhcp_flag_bcast(padapter, skb);
@@ -1667,13 +1647,13 @@ static int rtw_br_client_tx(struct adapter *padapter, struct sk_buff **pskb)
skb_push(skb, 4);
for (i = 0; i < 6; i++)
*((unsigned short *)(skb->data + i * 2)) = *((unsigned short *)(skb->data + 4 + i * 2));
- *((__be16 *)(skb->data + MACADDRLEN * 2)) = __constant_htons(ETH_P_8021Q);
- *((unsigned short *)(skb->data + MACADDRLEN * 2 + 2)) = vlan_hdr;
+ *((__be16 *)(skb->data + ETH_ALEN * 2)) = __constant_htons(ETH_P_8021Q);
+ *((unsigned short *)(skb->data + ETH_ALEN * 2 + 2)) = vlan_hdr;
}
}
/* check if SA is equal to our MAC */
- if (memcmp(skb->data + MACADDRLEN, GET_MY_HWADDR(padapter), MACADDRLEN)) {
+ if (memcmp(skb->data + ETH_ALEN, GET_MY_HWADDR(padapter), ETH_ALEN)) {
DEBUG_ERR("TX DROP: untransformed frame SA:%02X%02X%02X%02X%02X%02X!\n",
skb->data[6], skb->data[7], skb->data[8], skb->data[9], skb->data[10], skb->data[11]);
return -1;
@@ -1773,23 +1753,19 @@ s32 rtw_xmit(struct adapter *padapter, struct sk_buff **ppkt)
do_queue_select(padapter, &pxmitframe->attrib);
-#ifdef CONFIG_88EU_AP_MODE
spin_lock_bh(&pxmitpriv->lock);
if (xmitframe_enqueue_for_sleeping_sta(padapter, pxmitframe)) {
spin_unlock_bh(&pxmitpriv->lock);
return 1;
}
spin_unlock_bh(&pxmitpriv->lock);
-#endif
- if (!rtw_hal_xmit(padapter, pxmitframe))
+ if (!rtl8188eu_hal_xmit(padapter, pxmitframe))
return 1;
return 0;
}
-#if defined(CONFIG_88EU_AP_MODE)
-
int xmitframe_enqueue_for_sleeping_sta(struct adapter *padapter, struct xmit_frame *pxmitframe)
{
int ret = false;
@@ -2018,7 +1994,7 @@ void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta)
pxmitframe->attrib.triggered = 1;
spin_unlock_bh(&psta->sleep_q.lock);
- if (rtw_hal_xmit(padapter, pxmitframe))
+ if (rtl8188eu_hal_xmit(padapter, pxmitframe))
rtw_os_xmit_complete(padapter, pxmitframe);
spin_lock_bh(&psta->sleep_q.lock);
}
@@ -2068,7 +2044,7 @@ void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta)
pxmitframe->attrib.triggered = 1;
spin_unlock_bh(&psta_bmc->sleep_q.lock);
- if (rtw_hal_xmit(padapter, pxmitframe))
+ if (rtl8188eu_hal_xmit(padapter, pxmitframe))
rtw_os_xmit_complete(padapter, pxmitframe);
spin_lock_bh(&psta_bmc->sleep_q.lock);
}
@@ -2142,7 +2118,7 @@ void xmit_delivery_enabled_frames(struct adapter *padapter, struct sta_info *pst
pxmitframe->attrib.triggered = 1;
- if (rtw_hal_xmit(padapter, pxmitframe))
+ if (rtl8188eu_hal_xmit(padapter, pxmitframe))
rtw_os_xmit_complete(padapter, pxmitframe);
if ((psta->sleepq_ac_len == 0) && (!psta->has_legacy_ac) && (wmmps_ac)) {
@@ -2156,8 +2132,6 @@ void xmit_delivery_enabled_frames(struct adapter *padapter, struct sta_info *pst
spin_unlock_bh(&psta->sleep_q.lock);
}
-#endif
-
void rtw_sctx_init(struct submit_ctx *sctx, int timeout_ms)
{
sctx->timeout_ms = timeout_ms;
@@ -2213,11 +2187,6 @@ void rtw_sctx_done_err(struct submit_ctx **sctx, int status)
}
}
-void rtw_sctx_done(struct submit_ctx **sctx)
-{
- rtw_sctx_done_err(sctx, RTW_SCTX_DONE_SUCCESS);
-}
-
int rtw_ack_tx_wait(struct xmit_priv *pxmitpriv, u32 timeout_ms)
{
struct submit_ctx *pack_tx_ops = &pxmitpriv->ack_tx_ops;
diff --git a/drivers/staging/r8188eu/hal/Hal8188ERateAdaptive.c b/drivers/staging/r8188eu/hal/Hal8188ERateAdaptive.c
index d873672feb27..2d351f831289 100644
--- a/drivers/staging/r8188eu/hal/Hal8188ERateAdaptive.c
+++ b/drivers/staging/r8188eu/hal/Hal8188ERateAdaptive.c
@@ -471,16 +471,6 @@ odm_RATxRPTTimerSetting(
}
}
-void
-ODM_RASupport_Init(
- struct odm_dm_struct *dm_odm
- )
-{
- /* 2012/02/14 MH Be noticed, the init must be after IC type is recognized!!!!! */
- if (dm_odm->SupportICType == ODM_RTL8188E)
- dm_odm->RaSupport88E = true;
-}
-
int ODM_RAInfo_Init(struct odm_dm_struct *dm_odm, u8 macid)
{
struct odm_ra_info *pRaInfo = &dm_odm->RAInfo[macid];
@@ -548,7 +538,7 @@ int ODM_RAInfo_Init_all(struct odm_dm_struct *dm_odm)
u8 ODM_RA_GetShortGI_8188E(struct odm_dm_struct *dm_odm, u8 macid)
{
- if ((NULL == dm_odm) || (macid >= ASSOCIATE_ENTRY_NUM))
+ if ((NULL == dm_odm) || (macid >= ODM_ASSOCIATE_ENTRY_NUM))
return 0;
return dm_odm->RAInfo[macid].RateSGI;
}
@@ -557,7 +547,7 @@ u8 ODM_RA_GetDecisionRate_8188E(struct odm_dm_struct *dm_odm, u8 macid)
{
u8 DecisionRate = 0;
- if ((NULL == dm_odm) || (macid >= ASSOCIATE_ENTRY_NUM))
+ if ((NULL == dm_odm) || (macid >= ODM_ASSOCIATE_ENTRY_NUM))
return 0;
DecisionRate = (dm_odm->RAInfo[macid].DecisionRate);
return DecisionRate;
@@ -567,7 +557,7 @@ u8 ODM_RA_GetHwPwrStatus_8188E(struct odm_dm_struct *dm_odm, u8 macid)
{
u8 PTStage = 5;
- if ((NULL == dm_odm) || (macid >= ASSOCIATE_ENTRY_NUM))
+ if ((NULL == dm_odm) || (macid >= ODM_ASSOCIATE_ENTRY_NUM))
return 0;
PTStage = (dm_odm->RAInfo[macid].PTStage);
return PTStage;
@@ -577,7 +567,7 @@ void ODM_RA_UpdateRateInfo_8188E(struct odm_dm_struct *dm_odm, u8 macid, u8 Rate
{
struct odm_ra_info *pRaInfo = NULL;
- if ((NULL == dm_odm) || (macid >= ASSOCIATE_ENTRY_NUM))
+ if ((NULL == dm_odm) || (macid >= ODM_ASSOCIATE_ENTRY_NUM))
return;
pRaInfo = &dm_odm->RAInfo[macid];
@@ -591,7 +581,7 @@ void ODM_RA_SetRSSI_8188E(struct odm_dm_struct *dm_odm, u8 macid, u8 Rssi)
{
struct odm_ra_info *pRaInfo = NULL;
- if ((NULL == dm_odm) || (macid >= ASSOCIATE_ENTRY_NUM))
+ if ((NULL == dm_odm) || (macid >= ODM_ASSOCIATE_ENTRY_NUM))
return;
pRaInfo = &dm_odm->RAInfo[macid];
@@ -615,7 +605,7 @@ void ODM_RA_TxRPT2Handle_8188E(struct odm_dm_struct *dm_odm, u8 *TxRPT_Buf, u16
pBuffer = TxRPT_Buf;
do {
- if (MacId >= ASSOCIATE_ENTRY_NUM)
+ if (MacId >= ODM_ASSOCIATE_ENTRY_NUM)
valid = 0;
else if (MacId >= 32)
valid = (1 << (MacId - 32)) & macid_entry1;
diff --git a/drivers/staging/r8188eu/hal/HalHWImg8188E_BB.c b/drivers/staging/r8188eu/hal/HalHWImg8188E_BB.c
index 55aa20a30342..f6e4243e0c7b 100644
--- a/drivers/staging/r8188eu/hal/HalHWImg8188E_BB.c
+++ b/drivers/staging/r8188eu/hal/HalHWImg8188E_BB.c
@@ -13,7 +13,6 @@
static bool CheckCondition(const u32 condition, const u32 hex)
{
- u32 _board = (hex & 0x000000FF);
u32 _interface = (hex & 0x0000FF00) >> 8;
u32 _platform = (hex & 0x00FF0000) >> 16;
u32 cond = condition;
@@ -21,10 +20,6 @@ static bool CheckCondition(const u32 condition, const u32 hex)
if (condition == 0xCDCDCDCD)
return true;
- cond = condition & 0x000000FF;
- if ((_board == cond) && cond != 0x00)
- return false;
-
cond = condition & 0x0000FF00;
cond = cond >> 8;
if ((_interface & cond) == 0 && cond != 0x07)
@@ -176,9 +171,6 @@ enum HAL_STATUS ODM_ReadAndConfig_AGC_TAB_1T_8188E(struct odm_dm_struct *dm_odm)
{
u32 hex = 0;
u32 i = 0;
- u8 platform = dm_odm->SupportPlatform;
- u8 interfaceValue = dm_odm->SupportInterface;
- u8 board = dm_odm->BoardType;
u32 arraylen = sizeof(array_agc_tab_1t_8188e) / sizeof(u32);
u32 *array = array_agc_tab_1t_8188e;
bool biol = false;
@@ -187,9 +179,8 @@ enum HAL_STATUS ODM_ReadAndConfig_AGC_TAB_1T_8188E(struct odm_dm_struct *dm_odm)
u8 bndy_cnt = 1;
enum HAL_STATUS rst = HAL_STATUS_SUCCESS;
- hex += board;
- hex += interfaceValue << 8;
- hex += platform << 16;
+ hex += ODM_ITRF_USB << 8;
+ hex += ODM_CE << 16;
hex += 0xFF000000;
biol = rtw_IOL_applied(adapter);
@@ -246,7 +237,7 @@ enum HAL_STATUS ODM_ReadAndConfig_AGC_TAB_1T_8188E(struct odm_dm_struct *dm_odm)
}
}
if (biol) {
- if (!rtw_IOL_exec_cmds_sync(dm_odm->Adapter, pxmit_frame, 1000, bndy_cnt)) {
+ if (!rtl8188e_IOL_exec_cmds_sync(dm_odm->Adapter, pxmit_frame, 1000, bndy_cnt)) {
printk("~~~ %s IOL_exec_cmds Failed !!!\n", __func__);
rst = HAL_STATUS_FAILURE;
}
@@ -456,9 +447,6 @@ enum HAL_STATUS ODM_ReadAndConfig_PHY_REG_1T_8188E(struct odm_dm_struct *dm_odm)
{
u32 hex = 0;
u32 i = 0;
- u8 platform = dm_odm->SupportPlatform;
- u8 interfaceValue = dm_odm->SupportInterface;
- u8 board = dm_odm->BoardType;
u32 arraylen = sizeof(array_phy_reg_1t_8188e) / sizeof(u32);
u32 *array = array_phy_reg_1t_8188e;
bool biol = false;
@@ -466,9 +454,8 @@ enum HAL_STATUS ODM_ReadAndConfig_PHY_REG_1T_8188E(struct odm_dm_struct *dm_odm)
struct xmit_frame *pxmit_frame = NULL;
u8 bndy_cnt = 1;
enum HAL_STATUS rst = HAL_STATUS_SUCCESS;
- hex += board;
- hex += interfaceValue << 8;
- hex += platform << 16;
+ hex += ODM_ITRF_USB << 8;
+ hex += ODM_CE << 16;
hex += 0xFF000000;
biol = rtw_IOL_applied(adapter);
@@ -557,7 +544,7 @@ enum HAL_STATUS ODM_ReadAndConfig_PHY_REG_1T_8188E(struct odm_dm_struct *dm_odm)
}
}
if (biol) {
- if (!rtw_IOL_exec_cmds_sync(dm_odm->Adapter, pxmit_frame, 1000, bndy_cnt)) {
+ if (!rtl8188e_IOL_exec_cmds_sync(dm_odm->Adapter, pxmit_frame, 1000, bndy_cnt)) {
rst = HAL_STATUS_FAILURE;
pr_info("~~~ IOL Config %s Failed !!!\n", __func__);
}
@@ -665,14 +652,11 @@ void ODM_ReadAndConfig_PHY_REG_PG_8188E(struct odm_dm_struct *dm_odm)
{
u32 hex;
u32 i = 0;
- u8 platform = dm_odm->SupportPlatform;
- u8 interfaceValue = dm_odm->SupportInterface;
- u8 board = dm_odm->BoardType;
u32 arraylen = sizeof(array_phy_reg_pg_8188e) / sizeof(u32);
u32 *array = array_phy_reg_pg_8188e;
- hex = board + (interfaceValue << 8);
- hex += (platform << 16) + 0xFF000000;
+ hex = ODM_ITRF_USB << 8;
+ hex += (ODM_CE << 16) + 0xFF000000;
for (i = 0; i < arraylen; i += 3) {
u32 v1 = array[i];
diff --git a/drivers/staging/r8188eu/hal/HalHWImg8188E_MAC.c b/drivers/staging/r8188eu/hal/HalHWImg8188E_MAC.c
index 0ff2609c26bb..b4c55863d3fb 100644
--- a/drivers/staging/r8188eu/hal/HalHWImg8188E_MAC.c
+++ b/drivers/staging/r8188eu/hal/HalHWImg8188E_MAC.c
@@ -133,9 +133,6 @@ enum HAL_STATUS ODM_ReadAndConfig_MAC_REG_8188E(struct odm_dm_struct *dm_odm)
u32 hex = 0;
u32 i;
- u8 platform = dm_odm->SupportPlatform;
- u8 interface_val = dm_odm->SupportInterface;
- u8 board = dm_odm->BoardType;
u32 array_len = sizeof(array_MAC_REG_8188E) / sizeof(u32);
u32 *array = array_MAC_REG_8188E;
bool biol = false;
@@ -144,9 +141,8 @@ enum HAL_STATUS ODM_ReadAndConfig_MAC_REG_8188E(struct odm_dm_struct *dm_odm)
struct xmit_frame *pxmit_frame = NULL;
u8 bndy_cnt = 1;
enum HAL_STATUS rst = HAL_STATUS_SUCCESS;
- hex += board;
- hex += interface_val << 8;
- hex += platform << 16;
+ hex += ODM_ITRF_USB << 8;
+ hex += ODM_CE << 16;
hex += 0xFF000000;
biol = rtw_IOL_applied(adapt);
@@ -204,7 +200,7 @@ enum HAL_STATUS ODM_ReadAndConfig_MAC_REG_8188E(struct odm_dm_struct *dm_odm)
}
}
if (biol) {
- if (!rtw_IOL_exec_cmds_sync(dm_odm->Adapter, pxmit_frame, 1000, bndy_cnt)) {
+ if (!rtl8188e_IOL_exec_cmds_sync(dm_odm->Adapter, pxmit_frame, 1000, bndy_cnt)) {
pr_info("~~~ MAC IOL_exec_cmds Failed !!!\n");
rst = HAL_STATUS_FAILURE;
}
diff --git a/drivers/staging/r8188eu/hal/HalHWImg8188E_RF.c b/drivers/staging/r8188eu/hal/HalHWImg8188E_RF.c
index 55e4b4a877a4..5e0a96200078 100644
--- a/drivers/staging/r8188eu/hal/HalHWImg8188E_RF.c
+++ b/drivers/staging/r8188eu/hal/HalHWImg8188E_RF.c
@@ -6,7 +6,6 @@
static bool CheckCondition(const u32 Condition, const u32 Hex)
{
- u32 _board = (Hex & 0x000000FF);
u32 _interface = (Hex & 0x0000FF00) >> 8;
u32 _platform = (Hex & 0x00FF0000) >> 16;
u32 cond = Condition;
@@ -14,10 +13,6 @@ static bool CheckCondition(const u32 Condition, const u32 Hex)
if (Condition == 0xCDCDCDCD)
return true;
- cond = Condition & 0x000000FF;
- if ((_board == cond) && cond != 0x00)
- return false;
-
cond = Condition & 0x0000FF00;
cond = cond >> 8;
if ((_interface & cond) == 0 && cond != 0x07)
@@ -144,9 +139,6 @@ enum HAL_STATUS ODM_ReadAndConfig_RadioA_1T_8188E(struct odm_dm_struct *pDM_Odm)
u32 hex = 0;
u32 i = 0;
- u8 platform = pDM_Odm->SupportPlatform;
- u8 interfaceValue = pDM_Odm->SupportInterface;
- u8 board = pDM_Odm->BoardType;
u32 ArrayLen = sizeof(Array_RadioA_1T_8188E) / sizeof(u32);
u32 *Array = Array_RadioA_1T_8188E;
bool biol = false;
@@ -155,9 +147,8 @@ enum HAL_STATUS ODM_ReadAndConfig_RadioA_1T_8188E(struct odm_dm_struct *pDM_Odm)
u8 bndy_cnt = 1;
enum HAL_STATUS rst = HAL_STATUS_SUCCESS;
- hex += board;
- hex += interfaceValue << 8;
- hex += platform << 16;
+ hex += ODM_ITRF_USB << 8;
+ hex += ODM_CE << 16;
hex += 0xFF000000;
biol = rtw_IOL_applied(Adapter);
@@ -241,7 +232,7 @@ enum HAL_STATUS ODM_ReadAndConfig_RadioA_1T_8188E(struct odm_dm_struct *pDM_Odm)
}
}
if (biol) {
- if (!rtw_IOL_exec_cmds_sync(pDM_Odm->Adapter, pxmit_frame, 1000, bndy_cnt)) {
+ if (!rtl8188e_IOL_exec_cmds_sync(pDM_Odm->Adapter, pxmit_frame, 1000, bndy_cnt)) {
rst = HAL_STATUS_FAILURE;
pr_info("~~~ IOL Config %s Failed !!!\n", __func__);
}
diff --git a/drivers/staging/r8188eu/hal/HalPhyRf_8188e.c b/drivers/staging/r8188eu/hal/HalPhyRf_8188e.c
index 356885e27edd..60d4ba275196 100644
--- a/drivers/staging/r8188eu/hal/HalPhyRf_8188e.c
+++ b/drivers/staging/r8188eu/hal/HalPhyRf_8188e.c
@@ -110,7 +110,6 @@ odm_TXPowerTrackingCallback_ThermalMeter_8188E(
bool is2t = false;
u8 OFDM_min_index = 6, rf; /* OFDM BB Swing should be less than +3.0dB, which is required by Arthur */
- u8 Indexforchannel = 0/*GetRightChnlPlaceforIQK(pHalData->CurrentChannel)*/;
s8 OFDM_index_mapping[2][index_mapping_NUM_88E] = {
{0, 0, 2, 3, 4, 4, /* 2.4G, decrease power */
5, 6, 7, 7, 8, 9,
@@ -280,8 +279,8 @@ odm_TXPowerTrackingCallback_ThermalMeter_8188E(
/* Adujst OFDM Ant_A according to IQK result */
ele_D = (OFDMSwingTable[(u8)OFDM_index[0]] & 0xFFC00000) >> 22;
- X = dm_odm->RFCalibrateInfo.IQKMatrixRegSetting[Indexforchannel].Value[0][0];
- Y = dm_odm->RFCalibrateInfo.IQKMatrixRegSetting[Indexforchannel].Value[0][1];
+ X = dm_odm->RFCalibrateInfo.IQKMatrixRegSetting.Value[0][0];
+ Y = dm_odm->RFCalibrateInfo.IQKMatrixRegSetting.Value[0][1];
/* Revse TX power table. */
dm_odm->BbSwingIdxOfdm = (u8)OFDM_index[0];
@@ -315,10 +314,10 @@ odm_TXPowerTrackingCallback_ThermalMeter_8188E(
ele_D = (OFDMSwingTable[(u8)OFDM_index[1]] & 0xFFC00000) >> 22;
/* new element A = element D x X */
- X = dm_odm->RFCalibrateInfo.IQKMatrixRegSetting[Indexforchannel].Value[0][4];
- Y = dm_odm->RFCalibrateInfo.IQKMatrixRegSetting[Indexforchannel].Value[0][5];
+ X = dm_odm->RFCalibrateInfo.IQKMatrixRegSetting.Value[0][4];
+ Y = dm_odm->RFCalibrateInfo.IQKMatrixRegSetting.Value[0][5];
- if ((X != 0) && (*dm_odm->pBandType == ODM_BAND_2_4G)) {
+ if (X != 0) {
if ((X & 0x00000200) != 0) /* consider minus */
X = X | 0xFFFFFC00;
ele_A = ((X * ele_D) >> 8) & 0x000003FF;
@@ -584,68 +583,12 @@ static void patha_fill_iqk(struct adapter *adapt, bool iqkok, s32 result[][8], u
}
}
-static void pathb_fill_iqk(struct adapter *adapt, bool iqkok, s32 result[][8], u8 final_candidate, bool txonly)
-{
- u32 Oldval_1, X, TX1_A, reg;
- s32 Y, TX1_C;
- struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
- struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
-
- if (final_candidate == 0xFF) {
- return;
- } else if (iqkok) {
- Oldval_1 = (ODM_GetBBReg(dm_odm, rOFDM0_XBTxIQImbalance, bMaskDWord) >> 22) & 0x3FF;
-
- X = result[final_candidate][4];
- if ((X & 0x00000200) != 0)
- X = X | 0xFFFFFC00;
- TX1_A = (X * Oldval_1) >> 8;
- ODM_SetBBReg(dm_odm, rOFDM0_XBTxIQImbalance, 0x3FF, TX1_A);
-
- ODM_SetBBReg(dm_odm, rOFDM0_ECCAThreshold, BIT(27), ((X * Oldval_1 >> 7) & 0x1));
-
- Y = result[final_candidate][5];
- if ((Y & 0x00000200) != 0)
- Y = Y | 0xFFFFFC00;
-
- TX1_C = (Y * Oldval_1) >> 8;
- ODM_SetBBReg(dm_odm, rOFDM0_XDTxAFE, 0xF0000000, ((TX1_C & 0x3C0) >> 6));
- ODM_SetBBReg(dm_odm, rOFDM0_XBTxIQImbalance, 0x003F0000, (TX1_C & 0x3F));
-
- ODM_SetBBReg(dm_odm, rOFDM0_ECCAThreshold, BIT(25), ((Y * Oldval_1 >> 7) & 0x1));
-
- if (txonly)
- return;
-
- reg = result[final_candidate][6];
- ODM_SetBBReg(dm_odm, rOFDM0_XBRxIQImbalance, 0x3FF, reg);
-
- reg = result[final_candidate][7] & 0x3F;
- ODM_SetBBReg(dm_odm, rOFDM0_XBRxIQImbalance, 0xFC00, reg);
-
- reg = (result[final_candidate][7] >> 6) & 0xF;
- ODM_SetBBReg(dm_odm, rOFDM0_AGCRSSITable, 0x0000F000, reg);
- }
-}
-
-/* */
-/* 2011/07/26 MH Add an API for testing IQK fail case. */
-/* */
-/* MP Already declare in odm.c */
-static bool ODM_CheckPowerStatus(struct adapter *Adapter)
-{
- return true;
-}
-
void _PHY_SaveADDARegisters(struct adapter *adapt, u32 *ADDAReg, u32 *ADDABackup, u32 RegisterNum)
{
u32 i;
struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
- if (!ODM_CheckPowerStatus(adapt))
- return;
-
for (i = 0; i < RegisterNum; i++) {
ADDABackup[i] = ODM_GetBBReg(dm_odm, ADDAReg[i], bMaskDWord);
}
@@ -772,23 +715,11 @@ static bool phy_SimularityCompare_8188E(
)
{
u32 i, j, diff, sim_bitmap, bound = 0;
- struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
- struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
u8 final_candidate[2] = {0xFF, 0xFF}; /* for path A and path B */
bool result = true;
- bool is2t;
s32 tmp1 = 0, tmp2 = 0;
- if ((dm_odm->RFType == ODM_2T2R) || (dm_odm->RFType == ODM_2T3R) || (dm_odm->RFType == ODM_2T4R))
- is2t = true;
- else
- is2t = false;
-
- if (is2t)
- bound = 8;
- else
- bound = 4;
-
+ bound = 4;
sim_bitmap = 0;
for (i = 0; i < bound; i++) {
@@ -881,12 +812,7 @@ static void phy_IQCalibrate_8188E(struct adapter *adapt, s32 result[][8], u8 t,
rFPGA0_XAB_RFInterfaceSW, rFPGA0_XA_RFInterfaceOE,
rFPGA0_XB_RFInterfaceOE, rFPGA0_RFMOD
};
-
- u32 retryCount = 9;
- if (*dm_odm->mp_mode == 1)
- retryCount = 9;
- else
- retryCount = 2;
+ u32 retryCount = 2;
/* Note: IQ calibration must be performed after loading */
/* PHY_REG.txt , and radio_a, radio_b.txt */
@@ -1065,11 +991,10 @@ void PHY_IQCalibrate_8188E(struct adapter *adapt, bool recovery)
{
struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
- struct mpt_context *pMptCtx = &adapt->mppriv.MptCtx;
s32 result[4][8]; /* last is final result */
u8 i, final_candidate;
- bool pathaok, pathbok;
- s32 RegE94, RegE9C, RegEA4, RegEB4, RegEBC, RegEC4;
+ bool pathaok;
+ s32 RegE94, RegE9C, RegEA4, RegEB4, RegEBC;
bool is12simular, is13simular, is23simular;
bool singletone = false, carrier_sup = false;
u32 IQK_BB_REG_92C[IQK_BB_REG_NUM] = {
@@ -1078,20 +1003,10 @@ void PHY_IQCalibrate_8188E(struct adapter *adapt, bool recovery)
rOFDM0_XATxIQImbalance, rOFDM0_XBTxIQImbalance,
rOFDM0_XCTxAFE, rOFDM0_XDTxAFE,
rOFDM0_RxIQExtAnta};
- bool is2t;
-
- is2t = (dm_odm->RFType == ODM_2T2R) ? true : false;
- if (!ODM_CheckPowerStatus(adapt))
- return;
if (!(dm_odm->SupportAbility & ODM_RF_CALIBRATION))
return;
- if (*dm_odm->mp_mode == 1) {
- singletone = pMptCtx->bSingleTone;
- carrier_sup = pMptCtx->bCarrierSuppression;
- }
-
/* 20120213<Kordan> Turn on when continuous Tx to pass lab testing. (required by Edlu) */
if (singletone || carrier_sup)
return;
@@ -1112,13 +1027,12 @@ void PHY_IQCalibrate_8188E(struct adapter *adapt, bool recovery)
}
final_candidate = 0xff;
pathaok = false;
- pathbok = false;
is12simular = false;
is23simular = false;
is13simular = false;
for (i = 0; i < 3; i++) {
- phy_IQCalibrate_8188E(adapt, result, i, is2t);
+ phy_IQCalibrate_8188E(adapt, result, i, false);
if (i == 1) {
is12simular = phy_SimularityCompare_8188E(adapt, result, 0, 1);
@@ -1150,7 +1064,6 @@ void PHY_IQCalibrate_8188E(struct adapter *adapt, bool recovery)
RegEA4 = result[i][2];
RegEB4 = result[i][4];
RegEBC = result[i][5];
- RegEC4 = result[i][6];
}
if (final_candidate != 0xff) {
@@ -1163,9 +1076,7 @@ void PHY_IQCalibrate_8188E(struct adapter *adapt, bool recovery)
dm_odm->RFCalibrateInfo.RegE9C = RegE9C;
dm_odm->RFCalibrateInfo.RegEB4 = RegEB4;
dm_odm->RFCalibrateInfo.RegEBC = RegEBC;
- RegEC4 = result[final_candidate][6];
pathaok = true;
- pathbok = true;
} else {
dm_odm->RFCalibrateInfo.RegE94 = 0x100;
dm_odm->RFCalibrateInfo.RegEB4 = 0x100; /* X default value */
@@ -1174,17 +1085,13 @@ void PHY_IQCalibrate_8188E(struct adapter *adapt, bool recovery)
}
if (RegE94 != 0)
patha_fill_iqk(adapt, pathaok, result, final_candidate, (RegEA4 == 0));
- if (is2t) {
- if (RegEB4 != 0)
- pathb_fill_iqk(adapt, pathbok, result, final_candidate, (RegEC4 == 0));
- }
/* To Fix BSOD when final_candidate is 0xff */
/* by sherry 20120321 */
if (final_candidate < 4) {
for (i = 0; i < IQK_Matrix_REG_NUM; i++)
- dm_odm->RFCalibrateInfo.IQKMatrixRegSetting[0].Value[0][i] = result[final_candidate][i];
- dm_odm->RFCalibrateInfo.IQKMatrixRegSetting[0].bIQKDone = true;
+ dm_odm->RFCalibrateInfo.IQKMatrixRegSetting.Value[0][i] = result[final_candidate][i];
+ dm_odm->RFCalibrateInfo.IQKMatrixRegSetting.bIQKDone = true;
}
_PHY_SaveADDARegisters(adapt, IQK_BB_REG_92C, dm_odm->RFCalibrateInfo.IQK_BB_backup_recover, 9);
@@ -1196,12 +1103,7 @@ void PHY_LCCalibrate_8188E(struct adapter *adapt)
u32 timeout = 2000, timecount = 0;
struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
- struct mpt_context *pMptCtx = &adapt->mppriv.MptCtx;
- if (*dm_odm->mp_mode == 1) {
- singletone = pMptCtx->bSingleTone;
- carrier_sup = pMptCtx->bCarrierSuppression;
- }
if (!(dm_odm->SupportAbility & ODM_RF_CALIBRATION))
return;
/* 20120213<Kordan> Turn on when continuous Tx to pass lab testing. (required by Edlu) */
@@ -1213,52 +1115,5 @@ void PHY_LCCalibrate_8188E(struct adapter *adapt)
timecount += 50;
}
- dm_odm->RFCalibrateInfo.bLCKInProgress = true;
-
- if (dm_odm->RFType == ODM_2T2R) {
- phy_LCCalibrate_8188E(adapt, true);
- } else {
- /* For 88C 1T1R */
- phy_LCCalibrate_8188E(adapt, false);
- }
-
- dm_odm->RFCalibrateInfo.bLCKInProgress = false;
-}
-
-static void phy_setrfpathswitch_8188e(struct adapter *adapt, bool main, bool is2t)
-{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
- struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
-
- if (!adapt->hw_init_completed) {
- u8 u1btmp;
- u1btmp = ODM_Read1Byte(dm_odm, REG_LEDCFG2) | BIT(7);
- ODM_Write1Byte(dm_odm, REG_LEDCFG2, u1btmp);
- ODM_SetBBReg(dm_odm, rFPGA0_XAB_RFParameter, BIT(13), 0x01);
- }
-
- if (is2t) { /* 92C */
- if (main)
- ODM_SetBBReg(dm_odm, rFPGA0_XB_RFInterfaceOE, BIT(5) | BIT(6), 0x1); /* 92C_Path_A */
- else
- ODM_SetBBReg(dm_odm, rFPGA0_XB_RFInterfaceOE, BIT(5) | BIT(6), 0x2); /* BT */
- } else { /* 88C */
- if (main)
- ODM_SetBBReg(dm_odm, rFPGA0_XA_RFInterfaceOE, BIT(8) | BIT(9), 0x2); /* Main */
- else
- ODM_SetBBReg(dm_odm, rFPGA0_XA_RFInterfaceOE, BIT(8) | BIT(9), 0x1); /* Aux */
- }
-}
-
-void PHY_SetRFPathSwitch_8188E(struct adapter *adapt, bool main)
-{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(adapt);
- struct odm_dm_struct *dm_odm = &pHalData->odmpriv;
-
- if (dm_odm->RFType == ODM_2T2R) {
- phy_setrfpathswitch_8188e(adapt, main, true);
- } else {
- /* For 88C 1T1R */
- phy_setrfpathswitch_8188e(adapt, main, false);
- }
+ phy_LCCalibrate_8188E(adapt, false);
}
diff --git a/drivers/staging/r8188eu/hal/hal_com.c b/drivers/staging/r8188eu/hal/hal_com.c
index f09d4d49b159..ba5d027d765f 100644
--- a/drivers/staging/r8188eu/hal/hal_com.c
+++ b/drivers/staging/r8188eu/hal/hal_com.c
@@ -15,18 +15,7 @@ void dump_chip_info(struct HAL_VERSION chip_vers)
uint cnt = 0;
char buf[128];
- if (IS_81XXC(chip_vers)) {
- cnt += sprintf((buf + cnt), "Chip Version Info: %s_",
- IS_92C_SERIAL(chip_vers) ?
- "CHIP_8192C" : "CHIP_8188C");
- } else if (IS_92D(chip_vers)) {
- cnt += sprintf((buf + cnt), "Chip Version Info: CHIP_8192D_");
- } else if (IS_8723_SERIES(chip_vers)) {
- cnt += sprintf((buf + cnt), "Chip Version Info: CHIP_8723A_");
- } else if (IS_8188E(chip_vers)) {
- cnt += sprintf((buf + cnt), "Chip Version Info: CHIP_8188E_");
- }
-
+ cnt += sprintf((buf + cnt), "Chip Version Info: CHIP_8188E_");
cnt += sprintf((buf + cnt), "%s_", IS_NORMAL_CHIP(chip_vers) ?
"Normal_Chip" : "Test_Chip");
cnt += sprintf((buf + cnt), "%s_", IS_CHIP_VENDOR_TSMC(chip_vers) ?
@@ -45,15 +34,7 @@ void dump_chip_info(struct HAL_VERSION chip_vers)
cnt += sprintf((buf + cnt), "UNKNOWN_CUT(%d)_",
chip_vers.CUTVersion);
- if (IS_1T1R(chip_vers))
- cnt += sprintf((buf + cnt), "1T1R_");
- else if (IS_1T2R(chip_vers))
- cnt += sprintf((buf + cnt), "1T2R_");
- else if (IS_2T2R(chip_vers))
- cnt += sprintf((buf + cnt), "2T2R_");
- else
- cnt += sprintf((buf + cnt), "UNKNOWN_RFTYPE(%d)_",
- chip_vers.RFType);
+ cnt += sprintf((buf + cnt), "1T1R_");
cnt += sprintf((buf + cnt), "RomVer(%d)\n", chip_vers.ROMVer);
@@ -300,8 +281,7 @@ bool Hal_MappingOutPipe(struct adapter *adapter, u8 numoutpipe)
void hal_init_macaddr(struct adapter *adapter)
{
- rtw_hal_set_hwreg(adapter, HW_VAR_MAC_ADDR,
- adapter->eeprompriv.mac_addr);
+ SetHwReg8188EU(adapter, HW_VAR_MAC_ADDR, adapter->eeprompriv.mac_addr);
}
/*
diff --git a/drivers/staging/r8188eu/hal/hal_intf.c b/drivers/staging/r8188eu/hal/hal_intf.c
index f27eba72d646..fee3a598b59d 100644
--- a/drivers/staging/r8188eu/hal/hal_intf.c
+++ b/drivers/staging/r8188eu/hal/hal_intf.c
@@ -6,83 +6,19 @@
#include "../include/drv_types.h"
#include "../include/hal_intf.h"
-void rtw_hal_chip_configure(struct adapter *adapt)
-{
- if (adapt->HalFunc.intf_chip_configure)
- adapt->HalFunc.intf_chip_configure(adapt);
-}
-
-void rtw_hal_read_chip_info(struct adapter *adapt)
-{
- if (adapt->HalFunc.read_adapter_info)
- adapt->HalFunc.read_adapter_info(adapt);
-}
-
-void rtw_hal_read_chip_version(struct adapter *adapt)
-{
- if (adapt->HalFunc.read_chip_version)
- adapt->HalFunc.read_chip_version(adapt);
-}
-
-void rtw_hal_def_value_init(struct adapter *adapt)
-{
- if (adapt->HalFunc.init_default_value)
- adapt->HalFunc.init_default_value(adapt);
-}
-
-void rtw_hal_free_data(struct adapter *adapt)
-{
- if (adapt->HalFunc.free_hal_data)
- adapt->HalFunc.free_hal_data(adapt);
-}
-
-void rtw_hal_dm_init(struct adapter *adapt)
-{
- if (adapt->HalFunc.dm_init)
- adapt->HalFunc.dm_init(adapt);
-}
-
-void rtw_hal_dm_deinit(struct adapter *adapt)
-{
- /* cancel dm timer */
- if (adapt->HalFunc.dm_deinit)
- adapt->HalFunc.dm_deinit(adapt);
-}
-
-void rtw_hal_sw_led_init(struct adapter *adapt)
-{
- if (adapt->HalFunc.InitSwLeds)
- adapt->HalFunc.InitSwLeds(adapt);
-}
-
-void rtw_hal_sw_led_deinit(struct adapter *adapt)
-{
- if (adapt->HalFunc.DeInitSwLeds)
- adapt->HalFunc.DeInitSwLeds(adapt);
-}
-
-u32 rtw_hal_power_on(struct adapter *adapt)
-{
- if (adapt->HalFunc.hal_power_on)
- return adapt->HalFunc.hal_power_on(adapt);
- return _FAIL;
-}
-
uint rtw_hal_init(struct adapter *adapt)
{
uint status = _SUCCESS;
adapt->hw_init_completed = false;
- status = adapt->HalFunc.hal_init(adapt);
+ status = rtl8188eu_hal_init(adapt);
if (status == _SUCCESS) {
adapt->hw_init_completed = true;
if (adapt->registrypriv.notch_filter == 1)
- rtw_hal_notch_filter(adapt, 1);
-
- rtw_hal_reset_security_engine(adapt);
+ hal_notch_filter_8188e(adapt, 1);
} else {
adapt->hw_init_completed = false;
DBG_88E("rtw_hal_init: hal__init fail\n");
@@ -95,7 +31,7 @@ uint rtw_hal_deinit(struct adapter *adapt)
{
uint status = _SUCCESS;
- status = adapt->HalFunc.hal_deinit(adapt);
+ status = rtl8188eu_hal_deinit(adapt);
if (status == _SUCCESS)
adapt->hw_init_completed = false;
@@ -105,337 +41,18 @@ uint rtw_hal_deinit(struct adapter *adapt)
return status;
}
-void rtw_hal_set_hwreg(struct adapter *adapt, u8 variable, u8 *val)
-{
- if (adapt->HalFunc.SetHwRegHandler)
- adapt->HalFunc.SetHwRegHandler(adapt, variable, val);
-}
-
-void rtw_hal_get_hwreg(struct adapter *adapt, u8 variable, u8 *val)
-{
- if (adapt->HalFunc.GetHwRegHandler)
- adapt->HalFunc.GetHwRegHandler(adapt, variable, val);
-}
-
-u8 rtw_hal_set_def_var(struct adapter *adapt, enum hal_def_variable var,
- void *val)
-{
- if (adapt->HalFunc.SetHalDefVarHandler)
- return adapt->HalFunc.SetHalDefVarHandler(adapt, var, val);
- return _FAIL;
-}
-
-u8 rtw_hal_get_def_var(struct adapter *adapt,
- enum hal_def_variable var, void *val)
-{
- if (adapt->HalFunc.GetHalDefVarHandler)
- return adapt->HalFunc.GetHalDefVarHandler(adapt, var, val);
- return _FAIL;
-}
-
-void rtw_hal_set_odm_var(struct adapter *adapt,
- enum hal_odm_variable var, void *val1,
- bool set)
-{
- if (adapt->HalFunc.SetHalODMVarHandler)
- adapt->HalFunc.SetHalODMVarHandler(adapt, var,
- val1, set);
-}
-
-void rtw_hal_get_odm_var(struct adapter *adapt,
- enum hal_odm_variable var, void *val1,
- bool set)
-{
- if (adapt->HalFunc.GetHalODMVarHandler)
- adapt->HalFunc.GetHalODMVarHandler(adapt, var,
- val1, set);
-}
-
-void rtw_hal_enable_interrupt(struct adapter *adapt)
-{
- if (adapt->HalFunc.enable_interrupt)
- adapt->HalFunc.enable_interrupt(adapt);
- else
- DBG_88E("%s: HalFunc.enable_interrupt is NULL!\n", __func__);
-}
-
-void rtw_hal_disable_interrupt(struct adapter *adapt)
-{
- if (adapt->HalFunc.disable_interrupt)
- adapt->HalFunc.disable_interrupt(adapt);
- else
- DBG_88E("%s: HalFunc.disable_interrupt is NULL!\n", __func__);
-}
-
-u32 rtw_hal_inirp_init(struct adapter *adapt)
-{
- u32 rst = _FAIL;
-
- if (adapt->HalFunc.inirp_init)
- rst = adapt->HalFunc.inirp_init(adapt);
- else
- DBG_88E(" %s HalFunc.inirp_init is NULL!!!\n", __func__);
- return rst;
-}
-
-u32 rtw_hal_inirp_deinit(struct adapter *adapt)
-{
- if (adapt->HalFunc.inirp_deinit)
- return adapt->HalFunc.inirp_deinit(adapt);
-
- return _FAIL;
-}
-
-u8 rtw_hal_intf_ps_func(struct adapter *adapt,
- enum hal_intf_ps_func efunc_id, u8 *val)
-{
- if (adapt->HalFunc.interface_ps_func)
- return adapt->HalFunc.interface_ps_func(adapt, efunc_id,
- val);
- return _FAIL;
-}
-
-s32 rtw_hal_xmitframe_enqueue(struct adapter *padapter,
- struct xmit_frame *pxmitframe)
-{
- if (padapter->HalFunc.hal_xmitframe_enqueue)
- return padapter->HalFunc.hal_xmitframe_enqueue(padapter, pxmitframe);
- return false;
-}
-
-s32 rtw_hal_xmit(struct adapter *adapt, struct xmit_frame *pxmitframe)
-{
- if (adapt->HalFunc.hal_xmit)
- return adapt->HalFunc.hal_xmit(adapt, pxmitframe);
-
- return false;
-}
-
-s32 rtw_hal_mgnt_xmit(struct adapter *adapt, struct xmit_frame *pmgntframe)
-{
- s32 ret = _FAIL;
- if (adapt->HalFunc.mgnt_xmit)
- ret = adapt->HalFunc.mgnt_xmit(adapt, pmgntframe);
- return ret;
-}
-
-s32 rtw_hal_init_xmit_priv(struct adapter *adapt)
-{
- if (adapt->HalFunc.init_xmit_priv)
- return adapt->HalFunc.init_xmit_priv(adapt);
- return _FAIL;
-}
-
-s32 rtw_hal_init_recv_priv(struct adapter *adapt)
-{
- if (adapt->HalFunc.init_recv_priv)
- return adapt->HalFunc.init_recv_priv(adapt);
-
- return _FAIL;
-}
-
-void rtw_hal_free_recv_priv(struct adapter *adapt)
-{
- if (adapt->HalFunc.free_recv_priv)
- adapt->HalFunc.free_recv_priv(adapt);
-}
-
void rtw_hal_update_ra_mask(struct adapter *adapt, u32 mac_id, u8 rssi_level)
{
struct mlme_priv *pmlmepriv = &adapt->mlmepriv;
if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
-#ifdef CONFIG_88EU_AP_MODE
struct sta_info *psta = NULL;
struct sta_priv *pstapriv = &adapt->stapriv;
if (mac_id >= 2)
psta = pstapriv->sta_aid[(mac_id - 1) - 1];
if (psta)
add_RATid(adapt, psta, 0);/* todo: based on rssi_level*/
-#endif
} else {
- if (adapt->HalFunc.UpdateRAMaskHandler)
- adapt->HalFunc.UpdateRAMaskHandler(adapt, mac_id,
- rssi_level);
+ UpdateHalRAMask8188EUsb(adapt, mac_id, rssi_level);
}
}
-
-void rtw_hal_add_ra_tid(struct adapter *adapt, u32 bitmap, u8 arg,
- u8 rssi_level)
-{
- if (adapt->HalFunc.Add_RateATid)
- adapt->HalFunc.Add_RateATid(adapt, bitmap, arg,
- rssi_level);
-}
-
-/* Start specifical interface thread */
-void rtw_hal_start_thread(struct adapter *adapt)
-{
- if (adapt->HalFunc.run_thread)
- adapt->HalFunc.run_thread(adapt);
-}
-
-/* Start specifical interface thread */
-void rtw_hal_stop_thread(struct adapter *adapt)
-{
- if (adapt->HalFunc.cancel_thread)
- adapt->HalFunc.cancel_thread(adapt);
-}
-
-u32 rtw_hal_read_bbreg(struct adapter *adapt, u32 regaddr, u32 bitmask)
-{
- u32 data = 0;
-
- if (adapt->HalFunc.read_bbreg)
- data = adapt->HalFunc.read_bbreg(adapt, regaddr, bitmask);
- return data;
-}
-
-void rtw_hal_write_bbreg(struct adapter *adapt, u32 regaddr, u32 bitmask,
- u32 data)
-{
- if (adapt->HalFunc.write_bbreg)
- adapt->HalFunc.write_bbreg(adapt, regaddr, bitmask, data);
-}
-
-u32 rtw_hal_read_rfreg(struct adapter *adapt, enum rf_radio_path rfpath,
- u32 regaddr, u32 bitmask)
-{
- u32 data = 0;
-
- if (adapt->HalFunc.read_rfreg)
- data = adapt->HalFunc.read_rfreg(adapt, rfpath, regaddr,
- bitmask);
- return data;
-}
-
-void rtw_hal_write_rfreg(struct adapter *adapt, enum rf_radio_path rfpath,
- u32 regaddr, u32 bitmask, u32 data)
-{
- if (adapt->HalFunc.write_rfreg)
- adapt->HalFunc.write_rfreg(adapt, rfpath, regaddr,
- bitmask, data);
-}
-
-s32 rtw_hal_interrupt_handler(struct adapter *adapt)
-{
- if (adapt->HalFunc.interrupt_handler)
- return adapt->HalFunc.interrupt_handler(adapt);
- return _FAIL;
-}
-
-void rtw_hal_set_bwmode(struct adapter *adapt,
- enum ht_channel_width bandwidth, u8 offset)
-{
- if (adapt->HalFunc.set_bwmode_handler)
- adapt->HalFunc.set_bwmode_handler(adapt, bandwidth,
- offset);
-}
-
-void rtw_hal_set_chan(struct adapter *adapt, u8 channel)
-{
- if (adapt->HalFunc.set_channel_handler)
- adapt->HalFunc.set_channel_handler(adapt, channel);
-}
-
-void rtw_hal_dm_watchdog(struct adapter *adapt)
-{
- if (adapt->HalFunc.hal_dm_watchdog)
- adapt->HalFunc.hal_dm_watchdog(adapt);
-}
-
-void rtw_hal_bcn_related_reg_setting(struct adapter *adapt)
-{
- if (adapt->HalFunc.SetBeaconRelatedRegistersHandler)
- adapt->HalFunc.SetBeaconRelatedRegistersHandler(adapt);
-}
-
-u8 rtw_hal_antdiv_before_linked(struct adapter *adapt)
-{
- if (adapt->HalFunc.AntDivBeforeLinkHandler)
- return adapt->HalFunc.AntDivBeforeLinkHandler(adapt);
- return false;
-}
-
-void rtw_hal_antdiv_rssi_compared(struct adapter *adapt,
- struct wlan_bssid_ex *dst,
- struct wlan_bssid_ex *src)
-{
- if (adapt->HalFunc.AntDivCompareHandler)
- adapt->HalFunc.AntDivCompareHandler(adapt, dst, src);
-}
-
-void rtw_hal_sreset_init(struct adapter *adapt)
-{
- if (adapt->HalFunc.sreset_init_value)
- adapt->HalFunc.sreset_init_value(adapt);
-}
-
-void rtw_hal_sreset_reset(struct adapter *adapt)
-{
- if (adapt->HalFunc.silentreset)
- adapt->HalFunc.silentreset(adapt);
-}
-
-void rtw_hal_sreset_reset_value(struct adapter *adapt)
-{
- if (adapt->HalFunc.sreset_reset_value)
- adapt->HalFunc.sreset_reset_value(adapt);
-}
-
-void rtw_hal_sreset_xmit_status_check(struct adapter *adapt)
-{
- if (adapt->HalFunc.sreset_xmit_status_check)
- adapt->HalFunc.sreset_xmit_status_check(adapt);
-}
-
-void rtw_hal_sreset_linked_status_check(struct adapter *adapt)
-{
- if (adapt->HalFunc.sreset_linked_status_check)
- adapt->HalFunc.sreset_linked_status_check(adapt);
-}
-
-u8 rtw_hal_sreset_get_wifi_status(struct adapter *adapt)
-{
- u8 status = 0;
-
- if (adapt->HalFunc.sreset_get_wifi_status)
- status = adapt->HalFunc.sreset_get_wifi_status(adapt);
- return status;
-}
-
-int rtw_hal_iol_cmd(struct adapter *adapter, struct xmit_frame *xmit_frame,
- u32 max_wating_ms, u32 bndy_cnt)
-{
- if (adapter->HalFunc.IOL_exec_cmds_sync)
- return adapter->HalFunc.IOL_exec_cmds_sync(adapter, xmit_frame,
- max_wating_ms,
- bndy_cnt);
- return _FAIL;
-}
-
-void rtw_hal_notch_filter(struct adapter *adapter, bool enable)
-{
- if (adapter->HalFunc.hal_notch_filter)
- adapter->HalFunc.hal_notch_filter(adapter, enable);
-}
-
-void rtw_hal_reset_security_engine(struct adapter *adapter)
-{
- if (adapter->HalFunc.hal_reset_security_engine)
- adapter->HalFunc.hal_reset_security_engine(adapter);
-}
-
-s32 rtw_hal_c2h_handler(struct adapter *adapter, struct c2h_evt_hdr *c2h_evt)
-{
- s32 ret = _FAIL;
-
- if (adapter->HalFunc.c2h_handler)
- ret = adapter->HalFunc.c2h_handler(adapter, c2h_evt);
- return ret;
-}
-
-c2h_id_filter rtw_hal_c2h_id_filter_ccx(struct adapter *adapter)
-{
- return adapter->HalFunc.c2h_id_filter_ccx;
-}
diff --git a/drivers/staging/r8188eu/hal/odm.c b/drivers/staging/r8188eu/hal/odm.c
index ed94f64d878d..21f115194df8 100644
--- a/drivers/staging/r8188eu/hal/odm.c
+++ b/drivers/staging/r8188eu/hal/odm.c
@@ -5,17 +5,6 @@
#include "../include/odm_precomp.h"
-static const u16 dB_Invert_Table[8][12] = {
- {1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4},
- {4, 5, 6, 6, 7, 8, 9, 10, 11, 13, 14, 16},
- {18, 20, 22, 25, 28, 32, 35, 40, 45, 50, 56, 63},
- {71, 79, 89, 100, 112, 126, 141, 158, 178, 200, 224, 251},
- {282, 316, 355, 398, 447, 501, 562, 631, 708, 794, 891, 1000},
- {1122, 1259, 1413, 1585, 1778, 1995, 2239, 2512, 2818, 3162, 3548, 3981},
- {4467, 5012, 5623, 6310, 7079, 7943, 8913, 10000, 11220, 12589, 14125, 15849},
- {17783, 19953, 22387, 25119, 28184, 31623, 35481, 39811, 44668, 50119, 56234, 65535}
-};
-
/* avoid to warn in FreeBSD ==> To DO modify */
static u32 EDCAParam[HT_IOT_PEER_MAX][3] = {
/* UL DL */
@@ -163,22 +152,15 @@ void ODM_DMInit(struct odm_dm_struct *pDM_Odm)
odm_DIGInit(pDM_Odm);
odm_RateAdaptiveMaskInit(pDM_Odm);
- if (pDM_Odm->SupportICType & ODM_IC_11AC_SERIES) {
- ;
- } else if (pDM_Odm->SupportICType & ODM_IC_11N_SERIES) {
- odm_PrimaryCCA_Init(pDM_Odm); /* Gary */
- odm_DynamicBBPowerSavingInit(pDM_Odm);
- odm_DynamicTxPowerInit(pDM_Odm);
- odm_TXPowerTrackingInit(pDM_Odm);
- ODM_EdcaTurboInit(pDM_Odm);
- ODM_RAInfo_Init_all(pDM_Odm);
- if ((pDM_Odm->AntDivType == CG_TRX_HW_ANTDIV) ||
- (pDM_Odm->AntDivType == CGCS_RX_HW_ANTDIV) ||
- (pDM_Odm->AntDivType == CG_TRX_SMART_ANTDIV))
- odm_InitHybridAntDiv(pDM_Odm);
- else if (pDM_Odm->AntDivType == CGCS_RX_SW_ANTDIV)
- odm_SwAntDivInit(pDM_Odm);
- }
+ odm_PrimaryCCA_Init(pDM_Odm); /* Gary */
+ odm_DynamicBBPowerSavingInit(pDM_Odm);
+ odm_TXPowerTrackingInit(pDM_Odm);
+ ODM_EdcaTurboInit(pDM_Odm);
+ ODM_RAInfo_Init_all(pDM_Odm);
+ if ((pDM_Odm->AntDivType == CG_TRX_HW_ANTDIV) ||
+ (pDM_Odm->AntDivType == CGCS_RX_HW_ANTDIV) ||
+ (pDM_Odm->AntDivType == CG_TRX_SMART_ANTDIV))
+ odm_InitHybridAntDiv(pDM_Odm);
}
/* 2011/09/20 MH This is the entry pointer for all team to execute HW out source DM. */
@@ -187,22 +169,11 @@ void ODM_DMInit(struct odm_dm_struct *pDM_Odm)
void ODM_DMWatchdog(struct odm_dm_struct *pDM_Odm)
{
/* 2012.05.03 Luke: For all IC series */
- odm_GlobalAdapterCheck();
odm_CommonInfoSelfUpdate(pDM_Odm);
odm_FalseAlarmCounterStatistics(pDM_Odm);
odm_RSSIMonitorCheck(pDM_Odm);
- /* For CE Platform(SPRD or Tablet) */
- /* 8723A or 8189ES platform */
- /* NeilChen--2012--08--24-- */
- /* Fix Leave LPS issue */
- if ((pDM_Odm->Adapter->pwrctrlpriv.pwr_mode != PS_MODE_ACTIVE) &&/* in LPS mode */
- ((pDM_Odm->SupportICType & (ODM_RTL8723A)) ||
- (pDM_Odm->SupportICType & (ODM_RTL8188E) &&
- ((pDM_Odm->SupportInterface == ODM_ITRF_SDIO)))))
- odm_DIGbyRSSI_LPS(pDM_Odm);
- else
- odm_DIG(pDM_Odm);
+ odm_DIG(pDM_Odm);
odm_CCKPacketDetectionThresh(pDM_Odm);
if (*pDM_Odm->pbPowerSaving)
@@ -210,22 +181,13 @@ void ODM_DMWatchdog(struct odm_dm_struct *pDM_Odm)
odm_RefreshRateAdaptiveMask(pDM_Odm);
- odm_DynamicBBPowerSaving(pDM_Odm);
if ((pDM_Odm->AntDivType == CG_TRX_HW_ANTDIV) ||
(pDM_Odm->AntDivType == CGCS_RX_HW_ANTDIV) ||
(pDM_Odm->AntDivType == CG_TRX_SMART_ANTDIV))
odm_HwAntDiv(pDM_Odm);
- else if (pDM_Odm->AntDivType == CGCS_RX_SW_ANTDIV)
- odm_SwAntDivChkAntSwitch(pDM_Odm, SWAW_STEP_PEAK);
-
- if (pDM_Odm->SupportICType & ODM_IC_11AC_SERIES) {
- ;
- } else if (pDM_Odm->SupportICType & ODM_IC_11N_SERIES) {
- ODM_TXPowerTrackingCheck(pDM_Odm);
- odm_EdcaTurboCheck(pDM_Odm);
- odm_DynamicTxPower(pDM_Odm);
- }
- odm_dtc(pDM_Odm);
+
+ ODM_TXPowerTrackingCheck(pDM_Odm);
+ odm_EdcaTurboCheck(pDM_Odm);
}
/* Init /.. Fixed HW value. Only init time. */
@@ -237,54 +199,12 @@ void ODM_CmnInfoInit(struct odm_dm_struct *pDM_Odm, enum odm_common_info_def Cmn
case ODM_CMNINFO_ABILITY:
pDM_Odm->SupportAbility = (u32)Value;
break;
- case ODM_CMNINFO_PLATFORM:
- pDM_Odm->SupportPlatform = (u8)Value;
- break;
- case ODM_CMNINFO_INTERFACE:
- pDM_Odm->SupportInterface = (u8)Value;
- break;
case ODM_CMNINFO_MP_TEST_CHIP:
pDM_Odm->bIsMPChip = (u8)Value;
break;
- case ODM_CMNINFO_IC_TYPE:
- pDM_Odm->SupportICType = Value;
- break;
- case ODM_CMNINFO_CUT_VER:
- pDM_Odm->CutVersion = (u8)Value;
- break;
- case ODM_CMNINFO_FAB_VER:
- pDM_Odm->FabVersion = (u8)Value;
- break;
- case ODM_CMNINFO_RF_TYPE:
- pDM_Odm->RFType = (u8)Value;
- break;
case ODM_CMNINFO_RF_ANTENNA_TYPE:
pDM_Odm->AntDivType = (u8)Value;
break;
- case ODM_CMNINFO_BOARD_TYPE:
- pDM_Odm->BoardType = (u8)Value;
- break;
- case ODM_CMNINFO_EXT_LNA:
- pDM_Odm->ExtLNA = (u8)Value;
- break;
- case ODM_CMNINFO_EXT_PA:
- pDM_Odm->ExtPA = (u8)Value;
- break;
- case ODM_CMNINFO_EXT_TRSW:
- pDM_Odm->ExtTRSW = (u8)Value;
- break;
- case ODM_CMNINFO_PATCH_ID:
- pDM_Odm->PatchID = (u8)Value;
- break;
- case ODM_CMNINFO_BINHCT_TEST:
- pDM_Odm->bInHctTest = (bool)Value;
- break;
- case ODM_CMNINFO_BWIFI_TEST:
- pDM_Odm->bWIFITest = (bool)Value;
- break;
- case ODM_CMNINFO_SMART_CONCURRENT:
- pDM_Odm->bDualMacSmartConcurrent = (bool)Value;
- break;
/* To remove the compiler warning, must add an empty default statement to handle the other values. */
default:
/* do nothing */
@@ -305,9 +225,6 @@ void ODM_CmnInfoHook(struct odm_dm_struct *pDM_Odm, enum odm_common_info_def Cmn
/* */
switch (CmnInfo) {
/* Dynamic call by reference pointer. */
- case ODM_CMNINFO_MAC_PHY_MODE:
- pDM_Odm->pMacPhyMode = (u8 *)pValue;
- break;
case ODM_CMNINFO_TX_UNI:
pDM_Odm->pNumTxBytesUnicast = (u64 *)pValue;
break;
@@ -317,9 +234,6 @@ void ODM_CmnInfoHook(struct odm_dm_struct *pDM_Odm, enum odm_common_info_def Cmn
case ODM_CMNINFO_WM_MODE:
pDM_Odm->pWirelessMode = (u8 *)pValue;
break;
- case ODM_CMNINFO_BAND:
- pDM_Odm->pBandType = (u8 *)pValue;
- break;
case ODM_CMNINFO_SEC_CHNL_OFFSET:
pDM_Odm->pSecChOffset = (u8 *)pValue;
break;
@@ -332,57 +246,15 @@ void ODM_CmnInfoHook(struct odm_dm_struct *pDM_Odm, enum odm_common_info_def Cmn
case ODM_CMNINFO_CHNL:
pDM_Odm->pChannel = (u8 *)pValue;
break;
- case ODM_CMNINFO_DMSP_GET_VALUE:
- pDM_Odm->pbGetValueFromOtherMac = (bool *)pValue;
- break;
- case ODM_CMNINFO_BUDDY_ADAPTOR:
- pDM_Odm->pBuddyAdapter = (struct adapter **)pValue;
- break;
- case ODM_CMNINFO_DMSP_IS_MASTER:
- pDM_Odm->pbMasterOfDMSP = (bool *)pValue;
- break;
case ODM_CMNINFO_SCAN:
pDM_Odm->pbScanInProcess = (bool *)pValue;
break;
case ODM_CMNINFO_POWER_SAVING:
pDM_Odm->pbPowerSaving = (bool *)pValue;
break;
- case ODM_CMNINFO_ONE_PATH_CCA:
- pDM_Odm->pOnePathCCA = (u8 *)pValue;
- break;
- case ODM_CMNINFO_DRV_STOP:
- pDM_Odm->pbDriverStopped = (bool *)pValue;
- break;
- case ODM_CMNINFO_PNP_IN:
- pDM_Odm->pbDriverIsGoingToPnpSetPowerSleep = (bool *)pValue;
- break;
- case ODM_CMNINFO_INIT_ON:
- pDM_Odm->pinit_adpt_in_progress = (bool *)pValue;
- break;
- case ODM_CMNINFO_ANT_TEST:
- pDM_Odm->pAntennaTest = (u8 *)pValue;
- break;
case ODM_CMNINFO_NET_CLOSED:
pDM_Odm->pbNet_closed = (bool *)pValue;
break;
- case ODM_CMNINFO_MP_MODE:
- pDM_Odm->mp_mode = (u8 *)pValue;
- break;
- /* To remove the compiler warning, must add an empty default statement to handle the other values. */
- default:
- /* do nothing */
- break;
- }
-}
-
-void ODM_CmnInfoPtrArrayHook(struct odm_dm_struct *pDM_Odm, enum odm_common_info_def CmnInfo, u16 Index, void *pValue)
-{
- /* Hook call by reference pointer. */
- switch (CmnInfo) {
- /* Dynamic call by reference pointer. */
- case ODM_CMNINFO_STA_STATUS:
- pDM_Odm->pODM_StaInfo[Index] = (struct sta_info *)pValue;
- break;
/* To remove the compiler warning, must add an empty default statement to handle the other values. */
default:
/* do nothing */
@@ -400,9 +272,6 @@ void ODM_CmnInfoUpdate(struct odm_dm_struct *pDM_Odm, u32 CmnInfo, u64 Value)
case ODM_CMNINFO_ABILITY:
pDM_Odm->SupportAbility = (u32)Value;
break;
- case ODM_CMNINFO_RF_TYPE:
- pDM_Odm->RFType = (u8)Value;
- break;
case ODM_CMNINFO_WIFI_DIRECT:
pDM_Odm->bWIFI_Direct = (bool)Value;
break;
@@ -415,12 +284,6 @@ void ODM_CmnInfoUpdate(struct odm_dm_struct *pDM_Odm, u32 CmnInfo, u64 Value)
case ODM_CMNINFO_RSSI_MIN:
pDM_Odm->RSSI_Min = (u8)Value;
break;
- case ODM_CMNINFO_RA_THRESHOLD_HIGH:
- pDM_Odm->RateAdaptive.HighRSSIThresh = (u8)Value;
- break;
- case ODM_CMNINFO_RA_THRESHOLD_LOW:
- pDM_Odm->RateAdaptive.LowRSSIThresh = (u8)Value;
- break;
}
}
@@ -428,10 +291,6 @@ void odm_CommonInfoSelfInit(struct odm_dm_struct *pDM_Odm)
{
pDM_Odm->bCckHighPower = (bool)ODM_GetBBReg(pDM_Odm, 0x824, BIT(9));
pDM_Odm->RFPathRxEnable = (u8)ODM_GetBBReg(pDM_Odm, 0xc04, 0x0F);
- if (pDM_Odm->SupportICType & (ODM_RTL8192C | ODM_RTL8192D))
- pDM_Odm->AntDivType = CG_TRX_HW_ANTDIV;
- if (pDM_Odm->SupportICType & (ODM_RTL8723A))
- pDM_Odm->AntDivType = CGCS_RX_SW_ANTDIV;
}
void odm_CommonInfoSelfUpdate(struct odm_dm_struct *pDM_Odm)
@@ -460,118 +319,27 @@ void odm_CommonInfoSelfUpdate(struct odm_dm_struct *pDM_Odm)
pDM_Odm->bOneEntryOnly = false;
}
-static int getIGIForDiff(int value_IGI)
-{
- #define ONERCCA_LOW_TH 0x30
- #define ONERCCA_LOW_DIFF 8
-
- if (value_IGI < ONERCCA_LOW_TH) {
- if ((ONERCCA_LOW_TH - value_IGI) < ONERCCA_LOW_DIFF)
- return ONERCCA_LOW_TH;
- else
- return value_IGI + ONERCCA_LOW_DIFF;
- } else {
- return value_IGI;
- }
-}
-
void ODM_Write_DIG(struct odm_dm_struct *pDM_Odm, u8 CurrentIGI)
{
struct rtw_dig *pDM_DigTable = &pDM_Odm->DM_DigTable;
if (pDM_DigTable->CurIGValue != CurrentIGI) {
- if (pDM_Odm->SupportPlatform & (ODM_CE | ODM_MP)) {
- ODM_SetBBReg(pDM_Odm, ODM_REG(IGI_A, pDM_Odm), ODM_BIT(IGI, pDM_Odm), CurrentIGI);
- if (pDM_Odm->SupportICType != ODM_RTL8188E)
- ODM_SetBBReg(pDM_Odm, ODM_REG(IGI_B, pDM_Odm), ODM_BIT(IGI, pDM_Odm), CurrentIGI);
- } else if (pDM_Odm->SupportPlatform & (ODM_AP | ODM_ADSL)) {
- switch (*pDM_Odm->pOnePathCCA) {
- case ODM_CCA_2R:
- ODM_SetBBReg(pDM_Odm, ODM_REG(IGI_A, pDM_Odm), ODM_BIT(IGI, pDM_Odm), CurrentIGI);
- if (pDM_Odm->SupportICType != ODM_RTL8188E)
- ODM_SetBBReg(pDM_Odm, ODM_REG(IGI_B, pDM_Odm), ODM_BIT(IGI, pDM_Odm), CurrentIGI);
- break;
- case ODM_CCA_1R_A:
- ODM_SetBBReg(pDM_Odm, ODM_REG(IGI_A, pDM_Odm), ODM_BIT(IGI, pDM_Odm), CurrentIGI);
- if (pDM_Odm->SupportICType != ODM_RTL8188E)
- ODM_SetBBReg(pDM_Odm, ODM_REG(IGI_B, pDM_Odm), ODM_BIT(IGI, pDM_Odm), getIGIForDiff(CurrentIGI));
- break;
- case ODM_CCA_1R_B:
- ODM_SetBBReg(pDM_Odm, ODM_REG(IGI_A, pDM_Odm), ODM_BIT(IGI, pDM_Odm), getIGIForDiff(CurrentIGI));
- if (pDM_Odm->SupportICType != ODM_RTL8188E)
- ODM_SetBBReg(pDM_Odm, ODM_REG(IGI_B, pDM_Odm), ODM_BIT(IGI, pDM_Odm), CurrentIGI);
- break;
- }
- }
- /* pDM_DigTable->PreIGValue = pDM_DigTable->CurIGValue; */
+ ODM_SetBBReg(pDM_Odm, ODM_REG_IGI_A_11N, ODM_BIT_IGI_11N, CurrentIGI);
pDM_DigTable->CurIGValue = CurrentIGI;
}
-/* Add by Neil Chen to enable edcca to MP Platform */
-}
-
-/* Need LPS mode for CE platform --2012--08--24--- */
-/* 8723AS/8189ES */
-void odm_DIGbyRSSI_LPS(struct odm_dm_struct *pDM_Odm)
-{
- struct adapter *pAdapter = pDM_Odm->Adapter;
- struct false_alarm_stats *pFalseAlmCnt = &pDM_Odm->FalseAlmCnt;
-
- u8 RSSI_Lower = DM_DIG_MIN_NIC; /* 0x1E or 0x1C */
- u8 bFwCurrentInPSMode = false;
- u8 CurrentIGI = pDM_Odm->RSSI_Min;
-
- if (!(pDM_Odm->SupportICType & (ODM_RTL8723A | ODM_RTL8188E)))
- return;
-
- CurrentIGI = CurrentIGI + RSSI_OFFSET_DIG;
- bFwCurrentInPSMode = pAdapter->pwrctrlpriv.bFwCurrentInPSMode;
-
- /* Using FW PS mode to make IGI */
- if (bFwCurrentInPSMode) {
- /* Adjust by FA in LPS MODE */
- if (pFalseAlmCnt->Cnt_all > DM_DIG_FA_TH2_LPS)
- CurrentIGI = CurrentIGI + 2;
- else if (pFalseAlmCnt->Cnt_all > DM_DIG_FA_TH1_LPS)
- CurrentIGI = CurrentIGI + 1;
- else if (pFalseAlmCnt->Cnt_all < DM_DIG_FA_TH0_LPS)
- CurrentIGI = CurrentIGI - 1;
- } else {
- CurrentIGI = RSSI_Lower;
- }
-
- /* Lower bound checking */
-
- /* RSSI Lower bound check */
- if ((pDM_Odm->RSSI_Min - 10) > DM_DIG_MIN_NIC)
- RSSI_Lower = (pDM_Odm->RSSI_Min - 10);
- else
- RSSI_Lower = DM_DIG_MIN_NIC;
-
- /* Upper and Lower Bound checking */
- if (CurrentIGI > DM_DIG_MAX_NIC)
- CurrentIGI = DM_DIG_MAX_NIC;
- else if (CurrentIGI < RSSI_Lower)
- CurrentIGI = RSSI_Lower;
-
- ODM_Write_DIG(pDM_Odm, CurrentIGI);/* ODM_Write_DIG(pDM_Odm, pDM_DigTable->CurIGValue); */
}
void odm_DIGInit(struct odm_dm_struct *pDM_Odm)
{
struct rtw_dig *pDM_DigTable = &pDM_Odm->DM_DigTable;
- pDM_DigTable->CurIGValue = (u8)ODM_GetBBReg(pDM_Odm, ODM_REG(IGI_A, pDM_Odm), ODM_BIT(IGI, pDM_Odm));
+ pDM_DigTable->CurIGValue = (u8)ODM_GetBBReg(pDM_Odm, ODM_REG_IGI_A_11N, ODM_BIT_IGI_11N);
pDM_DigTable->RssiLowThresh = DM_DIG_THRESH_LOW;
pDM_DigTable->RssiHighThresh = DM_DIG_THRESH_HIGH;
pDM_DigTable->FALowThresh = DM_false_ALARM_THRESH_LOW;
pDM_DigTable->FAHighThresh = DM_false_ALARM_THRESH_HIGH;
- if (pDM_Odm->BoardType == ODM_BOARD_HIGHPWR) {
- pDM_DigTable->rx_gain_range_max = DM_DIG_MAX_NIC;
- pDM_DigTable->rx_gain_range_min = DM_DIG_MIN_NIC;
- } else {
- pDM_DigTable->rx_gain_range_max = DM_DIG_MAX_NIC;
- pDM_DigTable->rx_gain_range_min = DM_DIG_MIN_NIC;
- }
+ pDM_DigTable->rx_gain_range_max = DM_DIG_MAX_NIC;
+ pDM_DigTable->rx_gain_range_min = DM_DIG_MIN_NIC;
pDM_DigTable->BackoffVal = DM_DIG_BACKOFF_DEFAULT;
pDM_DigTable->BackoffVal_range_max = DM_DIG_BACKOFF_MAX;
pDM_DigTable->BackoffVal_range_min = DM_DIG_BACKOFF_MIN;
@@ -609,86 +377,38 @@ void odm_DIG(struct odm_dm_struct *pDM_Odm)
if (!pDM_Odm->bDMInitialGainEnable)
return;
- if (pDM_Odm->SupportICType == ODM_RTL8192D) {
- if (*pDM_Odm->pMacPhyMode == ODM_DMSP) {
- if (*pDM_Odm->pbMasterOfDMSP) {
- DIG_Dynamic_MIN = pDM_DigTable->DIG_Dynamic_MIN_0;
- FirstConnect = (pDM_Odm->bLinked) && (!pDM_DigTable->bMediaConnect_0);
- FirstDisConnect = (!pDM_Odm->bLinked) && (pDM_DigTable->bMediaConnect_0);
- } else {
- DIG_Dynamic_MIN = pDM_DigTable->DIG_Dynamic_MIN_1;
- FirstConnect = (pDM_Odm->bLinked) && (!pDM_DigTable->bMediaConnect_1);
- FirstDisConnect = (!pDM_Odm->bLinked) && (pDM_DigTable->bMediaConnect_1);
- }
- } else {
- DIG_Dynamic_MIN = pDM_DigTable->DIG_Dynamic_MIN_1;
- FirstConnect = (pDM_Odm->bLinked) && (!pDM_DigTable->bMediaConnect_1);
- FirstDisConnect = (!pDM_Odm->bLinked) && (pDM_DigTable->bMediaConnect_1);
- }
- } else {
- DIG_Dynamic_MIN = pDM_DigTable->DIG_Dynamic_MIN_0;
- FirstConnect = (pDM_Odm->bLinked) && (!pDM_DigTable->bMediaConnect_0);
- FirstDisConnect = (!pDM_Odm->bLinked) && (pDM_DigTable->bMediaConnect_0);
- }
+ DIG_Dynamic_MIN = pDM_DigTable->DIG_Dynamic_MIN_0;
+ FirstConnect = (pDM_Odm->bLinked) && (!pDM_DigTable->bMediaConnect_0);
+ FirstDisConnect = (!pDM_Odm->bLinked) && (pDM_DigTable->bMediaConnect_0);
/* 1 Boundary Decision */
- if ((pDM_Odm->SupportICType & (ODM_RTL8192C | ODM_RTL8723A)) &&
- ((pDM_Odm->BoardType == ODM_BOARD_HIGHPWR) || pDM_Odm->ExtLNA)) {
- if (pDM_Odm->SupportPlatform & (ODM_AP | ODM_ADSL)) {
- dm_dig_max = DM_DIG_MAX_AP_HP;
- dm_dig_min = DM_DIG_MIN_AP_HP;
- } else {
- dm_dig_max = DM_DIG_MAX_NIC_HP;
- dm_dig_min = DM_DIG_MIN_NIC_HP;
- }
- DIG_MaxOfMin = DM_DIG_MAX_AP_HP;
- } else {
- if (pDM_Odm->SupportPlatform & (ODM_AP | ODM_ADSL)) {
- dm_dig_max = DM_DIG_MAX_AP;
- dm_dig_min = DM_DIG_MIN_AP;
- DIG_MaxOfMin = dm_dig_max;
- } else {
- dm_dig_max = DM_DIG_MAX_NIC;
- dm_dig_min = DM_DIG_MIN_NIC;
- DIG_MaxOfMin = DM_DIG_MAX_AP;
- }
- }
+ dm_dig_max = DM_DIG_MAX_NIC;
+ dm_dig_min = DM_DIG_MIN_NIC;
+ DIG_MaxOfMin = DM_DIG_MAX_AP;
+
if (pDM_Odm->bLinked) {
- /* 2 8723A Series, offset need to be 10 */
- if (pDM_Odm->SupportICType == (ODM_RTL8723A)) {
- /* 2 Upper Bound */
- if ((pDM_Odm->RSSI_Min + 10) > DM_DIG_MAX_NIC)
- pDM_DigTable->rx_gain_range_max = DM_DIG_MAX_NIC;
- else if ((pDM_Odm->RSSI_Min + 10) < DM_DIG_MIN_NIC)
- pDM_DigTable->rx_gain_range_max = DM_DIG_MIN_NIC;
+ /* 2 8723A Series, offset need to be 10 */
+ /* 2 Modify DIG upper bound */
+ if ((pDM_Odm->RSSI_Min + 20) > dm_dig_max)
+ pDM_DigTable->rx_gain_range_max = dm_dig_max;
+ else if ((pDM_Odm->RSSI_Min + 20) < dm_dig_min)
+ pDM_DigTable->rx_gain_range_max = dm_dig_min;
+ else
+ pDM_DigTable->rx_gain_range_max = pDM_Odm->RSSI_Min + 20;
+ /* 2 Modify DIG lower bound */
+ if (pDM_Odm->bOneEntryOnly) {
+ if (pDM_Odm->RSSI_Min < dm_dig_min)
+ DIG_Dynamic_MIN = dm_dig_min;
+ else if (pDM_Odm->RSSI_Min > DIG_MaxOfMin)
+ DIG_Dynamic_MIN = DIG_MaxOfMin;
else
- pDM_DigTable->rx_gain_range_max = pDM_Odm->RSSI_Min + 10;
- /* 2 If BT is Concurrent, need to set Lower Bound */
- DIG_Dynamic_MIN = DM_DIG_MIN_NIC;
+ DIG_Dynamic_MIN = pDM_Odm->RSSI_Min;
+ } else if (pDM_Odm->SupportAbility & ODM_BB_ANT_DIV) {
+ /* 1 Lower Bound for 88E AntDiv */
+ if (pDM_Odm->AntDivType == CG_TRX_HW_ANTDIV)
+ DIG_Dynamic_MIN = (u8)pDM_DigTable->AntDiv_RSSI_max;
} else {
- /* 2 Modify DIG upper bound */
- if ((pDM_Odm->RSSI_Min + 20) > dm_dig_max)
- pDM_DigTable->rx_gain_range_max = dm_dig_max;
- else if ((pDM_Odm->RSSI_Min + 20) < dm_dig_min)
- pDM_DigTable->rx_gain_range_max = dm_dig_min;
- else
- pDM_DigTable->rx_gain_range_max = pDM_Odm->RSSI_Min + 20;
- /* 2 Modify DIG lower bound */
- if (pDM_Odm->bOneEntryOnly) {
- if (pDM_Odm->RSSI_Min < dm_dig_min)
- DIG_Dynamic_MIN = dm_dig_min;
- else if (pDM_Odm->RSSI_Min > DIG_MaxOfMin)
- DIG_Dynamic_MIN = DIG_MaxOfMin;
- else
- DIG_Dynamic_MIN = pDM_Odm->RSSI_Min;
- } else if ((pDM_Odm->SupportICType == ODM_RTL8188E) &&
- (pDM_Odm->SupportAbility & ODM_BB_ANT_DIV)) {
- /* 1 Lower Bound for 88E AntDiv */
- if (pDM_Odm->AntDivType == CG_TRX_HW_ANTDIV)
- DIG_Dynamic_MIN = (u8)pDM_DigTable->AntDiv_RSSI_max;
- } else {
- DIG_Dynamic_MIN = dm_dig_min;
- }
+ DIG_Dynamic_MIN = dm_dig_min;
}
} else {
pDM_DigTable->rx_gain_range_max = dm_dig_max;
@@ -736,21 +456,12 @@ void odm_DIG(struct odm_dm_struct *pDM_Odm)
if (FirstConnect) {
CurrentIGI = pDM_Odm->RSSI_Min;
} else {
- if (pDM_Odm->SupportICType == ODM_RTL8192D) {
- if (pFalseAlmCnt->Cnt_all > DM_DIG_FA_TH2_92D)
- CurrentIGI = CurrentIGI + 2;/* pDM_DigTable->CurIGValue = pDM_DigTable->PreIGValue+2; */
- else if (pFalseAlmCnt->Cnt_all > DM_DIG_FA_TH1_92D)
- CurrentIGI = CurrentIGI + 1; /* pDM_DigTable->CurIGValue = pDM_DigTable->PreIGValue+1; */
- else if (pFalseAlmCnt->Cnt_all < DM_DIG_FA_TH0_92D)
- CurrentIGI = CurrentIGI - 1;/* pDM_DigTable->CurIGValue =pDM_DigTable->PreIGValue-1; */
- } else {
- if (pFalseAlmCnt->Cnt_all > DM_DIG_FA_TH2)
- CurrentIGI = CurrentIGI + 4;/* pDM_DigTable->CurIGValue = pDM_DigTable->PreIGValue+2; */
- else if (pFalseAlmCnt->Cnt_all > DM_DIG_FA_TH1)
- CurrentIGI = CurrentIGI + 2;/* pDM_DigTable->CurIGValue = pDM_DigTable->PreIGValue+1; */
- else if (pFalseAlmCnt->Cnt_all < DM_DIG_FA_TH0)
- CurrentIGI = CurrentIGI - 2;/* pDM_DigTable->CurIGValue =pDM_DigTable->PreIGValue-1; */
- }
+ if (pFalseAlmCnt->Cnt_all > DM_DIG_FA_TH2)
+ CurrentIGI = CurrentIGI + 4;/* pDM_DigTable->CurIGValue = pDM_DigTable->PreIGValue+2; */
+ else if (pFalseAlmCnt->Cnt_all > DM_DIG_FA_TH1)
+ CurrentIGI = CurrentIGI + 2;/* pDM_DigTable->CurIGValue = pDM_DigTable->PreIGValue+1; */
+ else if (pFalseAlmCnt->Cnt_all < DM_DIG_FA_TH0)
+ CurrentIGI = CurrentIGI - 2;/* pDM_DigTable->CurIGValue =pDM_DigTable->PreIGValue-1; */
}
} else {
if (FirstDisConnect) {
@@ -790,85 +501,51 @@ void odm_FalseAlarmCounterStatistics(struct odm_dm_struct *pDM_Odm)
if (!(pDM_Odm->SupportAbility & ODM_BB_FA_CNT))
return;
- if (pDM_Odm->SupportICType & ODM_IC_11N_SERIES) {
- /* hold ofdm counter */
- ODM_SetBBReg(pDM_Odm, ODM_REG_OFDM_FA_HOLDC_11N, BIT(31), 1); /* hold page C counter */
- ODM_SetBBReg(pDM_Odm, ODM_REG_OFDM_FA_RSTD_11N, BIT(31), 1); /* hold page D counter */
-
- ret_value = ODM_GetBBReg(pDM_Odm, ODM_REG_OFDM_FA_TYPE1_11N, bMaskDWord);
- FalseAlmCnt->Cnt_Fast_Fsync = (ret_value & 0xffff);
- FalseAlmCnt->Cnt_SB_Search_fail = ((ret_value & 0xffff0000) >> 16);
- ret_value = ODM_GetBBReg(pDM_Odm, ODM_REG_OFDM_FA_TYPE2_11N, bMaskDWord);
- FalseAlmCnt->Cnt_OFDM_CCA = (ret_value & 0xffff);
- FalseAlmCnt->Cnt_Parity_Fail = ((ret_value & 0xffff0000) >> 16);
- ret_value = ODM_GetBBReg(pDM_Odm, ODM_REG_OFDM_FA_TYPE3_11N, bMaskDWord);
- FalseAlmCnt->Cnt_Rate_Illegal = (ret_value & 0xffff);
- FalseAlmCnt->Cnt_Crc8_fail = ((ret_value & 0xffff0000) >> 16);
- ret_value = ODM_GetBBReg(pDM_Odm, ODM_REG_OFDM_FA_TYPE4_11N, bMaskDWord);
- FalseAlmCnt->Cnt_Mcs_fail = (ret_value & 0xffff);
-
- FalseAlmCnt->Cnt_Ofdm_fail = FalseAlmCnt->Cnt_Parity_Fail + FalseAlmCnt->Cnt_Rate_Illegal +
- FalseAlmCnt->Cnt_Crc8_fail + FalseAlmCnt->Cnt_Mcs_fail +
- FalseAlmCnt->Cnt_Fast_Fsync + FalseAlmCnt->Cnt_SB_Search_fail;
-
- if (pDM_Odm->SupportICType == ODM_RTL8188E) {
- ret_value = ODM_GetBBReg(pDM_Odm, ODM_REG_SC_CNT_11N, bMaskDWord);
- FalseAlmCnt->Cnt_BW_LSC = (ret_value & 0xffff);
- FalseAlmCnt->Cnt_BW_USC = ((ret_value & 0xffff0000) >> 16);
- }
-
- /* hold cck counter */
- ODM_SetBBReg(pDM_Odm, ODM_REG_CCK_FA_RST_11N, BIT(12), 1);
- ODM_SetBBReg(pDM_Odm, ODM_REG_CCK_FA_RST_11N, BIT(14), 1);
-
- ret_value = ODM_GetBBReg(pDM_Odm, ODM_REG_CCK_FA_LSB_11N, bMaskByte0);
- FalseAlmCnt->Cnt_Cck_fail = ret_value;
- ret_value = ODM_GetBBReg(pDM_Odm, ODM_REG_CCK_FA_MSB_11N, bMaskByte3);
- FalseAlmCnt->Cnt_Cck_fail += (ret_value & 0xff) << 8;
-
- ret_value = ODM_GetBBReg(pDM_Odm, ODM_REG_CCK_CCA_CNT_11N, bMaskDWord);
- FalseAlmCnt->Cnt_CCK_CCA = ((ret_value & 0xFF) << 8) | ((ret_value & 0xFF00) >> 8);
-
- FalseAlmCnt->Cnt_all = (FalseAlmCnt->Cnt_Fast_Fsync +
- FalseAlmCnt->Cnt_SB_Search_fail +
- FalseAlmCnt->Cnt_Parity_Fail +
- FalseAlmCnt->Cnt_Rate_Illegal +
- FalseAlmCnt->Cnt_Crc8_fail +
- FalseAlmCnt->Cnt_Mcs_fail +
- FalseAlmCnt->Cnt_Cck_fail);
-
- FalseAlmCnt->Cnt_CCA_all = FalseAlmCnt->Cnt_OFDM_CCA + FalseAlmCnt->Cnt_CCK_CCA;
-
- if (pDM_Odm->SupportICType >= ODM_RTL8723A) {
- /* reset false alarm counter registers */
- ODM_SetBBReg(pDM_Odm, ODM_REG_OFDM_FA_RSTC_11N, BIT(31), 1);
- ODM_SetBBReg(pDM_Odm, ODM_REG_OFDM_FA_RSTC_11N, BIT(31), 0);
- ODM_SetBBReg(pDM_Odm, ODM_REG_OFDM_FA_RSTD_11N, BIT(27), 1);
- ODM_SetBBReg(pDM_Odm, ODM_REG_OFDM_FA_RSTD_11N, BIT(27), 0);
- /* update ofdm counter */
- ODM_SetBBReg(pDM_Odm, ODM_REG_OFDM_FA_HOLDC_11N, BIT(31), 0); /* update page C counter */
- ODM_SetBBReg(pDM_Odm, ODM_REG_OFDM_FA_RSTD_11N, BIT(31), 0); /* update page D counter */
-
- /* reset CCK CCA counter */
- ODM_SetBBReg(pDM_Odm, ODM_REG_CCK_FA_RST_11N, BIT(13) | BIT(12), 0);
- ODM_SetBBReg(pDM_Odm, ODM_REG_CCK_FA_RST_11N, BIT(13) | BIT(12), 2);
- /* reset CCK FA counter */
- ODM_SetBBReg(pDM_Odm, ODM_REG_CCK_FA_RST_11N, BIT(15) | BIT(14), 0);
- ODM_SetBBReg(pDM_Odm, ODM_REG_CCK_FA_RST_11N, BIT(15) | BIT(14), 2);
- }
- } else { /* FOR ODM_IC_11AC_SERIES */
- /* read OFDM FA counter */
- FalseAlmCnt->Cnt_Ofdm_fail = ODM_GetBBReg(pDM_Odm, ODM_REG_OFDM_FA_11AC, bMaskLWord);
- FalseAlmCnt->Cnt_Cck_fail = ODM_GetBBReg(pDM_Odm, ODM_REG_CCK_FA_11AC, bMaskLWord);
- FalseAlmCnt->Cnt_all = FalseAlmCnt->Cnt_Ofdm_fail + FalseAlmCnt->Cnt_Cck_fail;
-
- /* reset OFDM FA coutner */
- ODM_SetBBReg(pDM_Odm, ODM_REG_OFDM_FA_RST_11AC, BIT(17), 1);
- ODM_SetBBReg(pDM_Odm, ODM_REG_OFDM_FA_RST_11AC, BIT(17), 0);
- /* reset CCK FA counter */
- ODM_SetBBReg(pDM_Odm, ODM_REG_CCK_FA_RST_11AC, BIT(15), 0);
- ODM_SetBBReg(pDM_Odm, ODM_REG_CCK_FA_RST_11AC, BIT(15), 1);
- }
+ /* hold ofdm counter */
+ ODM_SetBBReg(pDM_Odm, ODM_REG_OFDM_FA_HOLDC_11N, BIT(31), 1); /* hold page C counter */
+ ODM_SetBBReg(pDM_Odm, ODM_REG_OFDM_FA_RSTD_11N, BIT(31), 1); /* hold page D counter */
+
+ ret_value = ODM_GetBBReg(pDM_Odm, ODM_REG_OFDM_FA_TYPE1_11N, bMaskDWord);
+ FalseAlmCnt->Cnt_Fast_Fsync = (ret_value & 0xffff);
+ FalseAlmCnt->Cnt_SB_Search_fail = ((ret_value & 0xffff0000) >> 16);
+ ret_value = ODM_GetBBReg(pDM_Odm, ODM_REG_OFDM_FA_TYPE2_11N, bMaskDWord);
+ FalseAlmCnt->Cnt_OFDM_CCA = (ret_value & 0xffff);
+ FalseAlmCnt->Cnt_Parity_Fail = ((ret_value & 0xffff0000) >> 16);
+ ret_value = ODM_GetBBReg(pDM_Odm, ODM_REG_OFDM_FA_TYPE3_11N, bMaskDWord);
+ FalseAlmCnt->Cnt_Rate_Illegal = (ret_value & 0xffff);
+ FalseAlmCnt->Cnt_Crc8_fail = ((ret_value & 0xffff0000) >> 16);
+ ret_value = ODM_GetBBReg(pDM_Odm, ODM_REG_OFDM_FA_TYPE4_11N, bMaskDWord);
+ FalseAlmCnt->Cnt_Mcs_fail = (ret_value & 0xffff);
+
+ FalseAlmCnt->Cnt_Ofdm_fail = FalseAlmCnt->Cnt_Parity_Fail + FalseAlmCnt->Cnt_Rate_Illegal +
+ FalseAlmCnt->Cnt_Crc8_fail + FalseAlmCnt->Cnt_Mcs_fail +
+ FalseAlmCnt->Cnt_Fast_Fsync + FalseAlmCnt->Cnt_SB_Search_fail;
+
+ ret_value = ODM_GetBBReg(pDM_Odm, ODM_REG_SC_CNT_11N, bMaskDWord);
+ FalseAlmCnt->Cnt_BW_LSC = (ret_value & 0xffff);
+ FalseAlmCnt->Cnt_BW_USC = ((ret_value & 0xffff0000) >> 16);
+
+ /* hold cck counter */
+ ODM_SetBBReg(pDM_Odm, ODM_REG_CCK_FA_RST_11N, BIT(12), 1);
+ ODM_SetBBReg(pDM_Odm, ODM_REG_CCK_FA_RST_11N, BIT(14), 1);
+
+ ret_value = ODM_GetBBReg(pDM_Odm, ODM_REG_CCK_FA_LSB_11N, bMaskByte0);
+ FalseAlmCnt->Cnt_Cck_fail = ret_value;
+ ret_value = ODM_GetBBReg(pDM_Odm, ODM_REG_CCK_FA_MSB_11N, bMaskByte3);
+ FalseAlmCnt->Cnt_Cck_fail += (ret_value & 0xff) << 8;
+
+ ret_value = ODM_GetBBReg(pDM_Odm, ODM_REG_CCK_CCA_CNT_11N, bMaskDWord);
+ FalseAlmCnt->Cnt_CCK_CCA = ((ret_value & 0xFF) << 8) | ((ret_value & 0xFF00) >> 8);
+
+ FalseAlmCnt->Cnt_all = (FalseAlmCnt->Cnt_Fast_Fsync +
+ FalseAlmCnt->Cnt_SB_Search_fail +
+ FalseAlmCnt->Cnt_Parity_Fail +
+ FalseAlmCnt->Cnt_Rate_Illegal +
+ FalseAlmCnt->Cnt_Crc8_fail +
+ FalseAlmCnt->Cnt_Mcs_fail +
+ FalseAlmCnt->Cnt_Cck_fail);
+
+ FalseAlmCnt->Cnt_CCA_all = FalseAlmCnt->Cnt_OFDM_CCA + FalseAlmCnt->Cnt_CCK_CCA;
}
/* 3============================================================ */
@@ -882,8 +559,6 @@ void odm_CCKPacketDetectionThresh(struct odm_dm_struct *pDM_Odm)
if (!(pDM_Odm->SupportAbility & (ODM_BB_CCK_PD | ODM_BB_FA_CNT)))
return;
- if (pDM_Odm->ExtLNA)
- return;
if (pDM_Odm->bLinked) {
if (pDM_Odm->RSSI_Min > 25) {
CurCCK_CCAThres = 0xcd;
@@ -909,7 +584,7 @@ void ODM_Write_CCK_CCA_Thres(struct odm_dm_struct *pDM_Odm, u8 CurCCK_CCAThres)
struct rtw_dig *pDM_DigTable = &pDM_Odm->DM_DigTable;
if (pDM_DigTable->CurCCK_CCAThres != CurCCK_CCAThres) /* modify by Guo.Mingzhi 2012-01-03 */
- ODM_Write1Byte(pDM_Odm, ODM_REG(CCK_CCA, pDM_Odm), CurCCK_CCAThres);
+ ODM_Write1Byte(pDM_Odm, ODM_REG_CCK_CCA_11N, CurCCK_CCAThres);
pDM_DigTable->PreCCK_CCAThres = pDM_DigTable->CurCCK_CCAThres;
pDM_DigTable->CurCCK_CCAThres = CurCCK_CCAThres;
}
@@ -929,69 +604,12 @@ void odm_DynamicBBPowerSavingInit(struct odm_dm_struct *pDM_Odm)
pDM_PSTable->initialize = 0;
}
-void odm_DynamicBBPowerSaving(struct odm_dm_struct *pDM_Odm)
-{
- if ((pDM_Odm->SupportICType != ODM_RTL8192C) && (pDM_Odm->SupportICType != ODM_RTL8723A))
- return;
- if (!(pDM_Odm->SupportAbility & ODM_BB_PWR_SAVE))
- return;
- if (!(pDM_Odm->SupportPlatform & (ODM_MP | ODM_CE)))
- return;
-
- /* 1 2.Power Saving for 92C */
- if ((pDM_Odm->SupportICType == ODM_RTL8192C) && (pDM_Odm->RFType == ODM_2T2R)) {
- odm_1R_CCA(pDM_Odm);
- } else {
- /* 20100628 Joseph: Turn off BB power save for 88CE because it makesthroughput unstable. */
- /* 20100831 Joseph: Turn ON BB power save again after modifying AGC delay from 900ns ot 600ns. */
- /* 1 3.Power Saving for 88C */
- ODM_RF_Saving(pDM_Odm, false);
- }
-}
-
-void odm_1R_CCA(struct odm_dm_struct *pDM_Odm)
-{
- struct rtl_ps *pDM_PSTable = &pDM_Odm->DM_PSTable;
-
- if (pDM_Odm->RSSI_Min != 0xFF) {
- if (pDM_PSTable->pre_cca_state == CCA_2R) {
- if (pDM_Odm->RSSI_Min >= 35)
- pDM_PSTable->cur_cca_state = CCA_1R;
- else
- pDM_PSTable->cur_cca_state = CCA_2R;
- } else {
- if (pDM_Odm->RSSI_Min <= 30)
- pDM_PSTable->cur_cca_state = CCA_2R;
- else
- pDM_PSTable->cur_cca_state = CCA_1R;
- }
- } else {
- pDM_PSTable->cur_cca_state = CCA_MAX;
- }
-
- if (pDM_PSTable->pre_cca_state != pDM_PSTable->cur_cca_state) {
- if (pDM_PSTable->cur_cca_state == CCA_1R) {
- if (pDM_Odm->RFType == ODM_2T2R)
- ODM_SetBBReg(pDM_Odm, 0xc04, bMaskByte0, 0x13);
- else
- ODM_SetBBReg(pDM_Odm, 0xc04, bMaskByte0, 0x23);
- } else {
- ODM_SetBBReg(pDM_Odm, 0xc04, bMaskByte0, 0x33);
- }
- pDM_PSTable->pre_cca_state = pDM_PSTable->cur_cca_state;
- }
-}
-
void ODM_RF_Saving(struct odm_dm_struct *pDM_Odm, u8 bForceInNormal)
{
struct rtl_ps *pDM_PSTable = &pDM_Odm->DM_PSTable;
u8 Rssi_Up_bound = 30;
u8 Rssi_Low_bound = 25;
- if (pDM_Odm->PatchID == 40) { /* RT_CID_819x_FUNAI_TV */
- Rssi_Up_bound = 50;
- Rssi_Low_bound = 45;
- }
if (pDM_PSTable->initialize == 0) {
pDM_PSTable->reg_874 = (ODM_GetBBReg(pDM_Odm, 0x874, bMaskDWord) & 0x1CC000) >> 14;
pDM_PSTable->reg_c70 = (ODM_GetBBReg(pDM_Odm, 0xc70, bMaskDWord) & BIT(3)) >> 3;
@@ -1022,10 +640,6 @@ void ODM_RF_Saving(struct odm_dm_struct *pDM_Odm, u8 bForceInNormal)
if (pDM_PSTable->pre_rf_state != pDM_PSTable->cur_rf_state) {
if (pDM_PSTable->cur_rf_state == RF_Save) {
- /* <tynli_note> 8723 RSSI report will be wrong. Set 0x874[5]=1 when enter BB power saving mode. */
- /* Suggested by SD3 Yu-Nan. 2011.01.20. */
- if (pDM_Odm->SupportICType == ODM_RTL8723A)
- ODM_SetBBReg(pDM_Odm, 0x874, BIT(5), 0x1); /* Reg874[5]=1b'1 */
ODM_SetBBReg(pDM_Odm, 0x874, 0x1C0000, 0x2); /* Reg874[20:18]=3'b010 */
ODM_SetBBReg(pDM_Odm, 0xc70, BIT(3), 0); /* RegC70[3]=1'b0 */
ODM_SetBBReg(pDM_Odm, 0x85c, 0xFF000000, 0x63); /* Reg85C[31:24]=0x63 */
@@ -1039,9 +653,6 @@ void ODM_RF_Saving(struct odm_dm_struct *pDM_Odm, u8 bForceInNormal)
ODM_SetBBReg(pDM_Odm, 0x85c, 0xFF000000, pDM_PSTable->reg_85c);
ODM_SetBBReg(pDM_Odm, 0xa74, 0xF000, pDM_PSTable->reg_a74);
ODM_SetBBReg(pDM_Odm, 0x818, BIT(28), 0x0);
-
- if (pDM_Odm->SupportICType == ODM_RTL8723A)
- ODM_SetBBReg(pDM_Odm, 0x874, BIT(5), 0x0); /* Reg874[5]=1b'0 */
}
pDM_PSTable->pre_rf_state = pDM_PSTable->cur_rf_state;
}
@@ -1058,12 +669,6 @@ void odm_RateAdaptiveMaskInit(struct odm_dm_struct *pDM_Odm)
{
struct odm_rate_adapt *pOdmRA = &pDM_Odm->RateAdaptive;
- pOdmRA->Type = DM_Type_ByDriver;
- if (pOdmRA->Type == DM_Type_ByDriver)
- pDM_Odm->bUseRAMask = true;
- else
- pDM_Odm->bUseRAMask = false;
-
pOdmRA->RATRState = DM_RATR_STA_INIT;
pOdmRA->HighRSSIThresh = 50;
pOdmRA->LowRSSIThresh = 20;
@@ -1097,36 +702,20 @@ u32 ODM_Get_Rate_Bitmap(struct odm_dm_struct *pDM_Odm, u32 macid, u32 ra_mask, u
rate_bitmap = 0x00000ff5;
break;
case (ODM_WM_B | ODM_WM_G | ODM_WM_N24G):
- if (pDM_Odm->RFType == ODM_1T2R || pDM_Odm->RFType == ODM_1T1R) {
- if (rssi_level == DM_RATR_STA_HIGH) {
- rate_bitmap = 0x000f0000;
- } else if (rssi_level == DM_RATR_STA_MIDDLE) {
- rate_bitmap = 0x000ff000;
- } else {
- if (*pDM_Odm->pBandWidth == ODM_BW40M)
- rate_bitmap = 0x000ff015;
- else
- rate_bitmap = 0x000ff005;
- }
+ if (rssi_level == DM_RATR_STA_HIGH) {
+ rate_bitmap = 0x000f0000;
+ } else if (rssi_level == DM_RATR_STA_MIDDLE) {
+ rate_bitmap = 0x000ff000;
} else {
- if (rssi_level == DM_RATR_STA_HIGH) {
- rate_bitmap = 0x0f8f0000;
- } else if (rssi_level == DM_RATR_STA_MIDDLE) {
- rate_bitmap = 0x0f8ff000;
- } else {
- if (*pDM_Odm->pBandWidth == ODM_BW40M)
- rate_bitmap = 0x0f8ff015;
- else
- rate_bitmap = 0x0f8ff005;
- }
+ if (*pDM_Odm->pBandWidth == ODM_BW40M)
+ rate_bitmap = 0x000ff015;
+ else
+ rate_bitmap = 0x000ff005;
}
break;
default:
/* case WIRELESS_11_24N: */
- if (pDM_Odm->RFType == RF_1T2R)
- rate_bitmap = 0x000fffff;
- else
- rate_bitmap = 0x0fffffff;
+ rate_bitmap = 0x0fffffff;
break;
}
@@ -1151,40 +740,13 @@ u32 ODM_Get_Rate_Bitmap(struct odm_dm_struct *pDM_Odm, u32 macid, u32 ra_mask, u
*---------------------------------------------------------------------------*/
void odm_RefreshRateAdaptiveMask(struct odm_dm_struct *pDM_Odm)
{
- if (!(pDM_Odm->SupportAbility & ODM_BB_RA_MASK))
- return;
- /* */
- /* 2011/09/29 MH In HW integration first stage, we provide 4 different handle to operate */
- /* at the same time. In the stage2/3, we need to prive universal interface and merge all */
- /* HW dynamic mechanism. */
- /* */
- switch (pDM_Odm->SupportPlatform) {
- case ODM_MP:
- odm_RefreshRateAdaptiveMaskMP(pDM_Odm);
- break;
- case ODM_CE:
- odm_RefreshRateAdaptiveMaskCE(pDM_Odm);
- break;
- case ODM_AP:
- case ODM_ADSL:
- odm_RefreshRateAdaptiveMaskAPADSL(pDM_Odm);
- break;
- }
-}
-
-void odm_RefreshRateAdaptiveMaskMP(struct odm_dm_struct *pDM_Odm)
-{
-}
-
-void odm_RefreshRateAdaptiveMaskCE(struct odm_dm_struct *pDM_Odm)
-{
u8 i;
struct adapter *pAdapter = pDM_Odm->Adapter;
- if (pAdapter->bDriverStopped)
+ if (!(pDM_Odm->SupportAbility & ODM_BB_RA_MASK))
return;
- if (!pDM_Odm->bUseRAMask)
+ if (pAdapter->bDriverStopped)
return;
for (i = 0; i < ODM_ASSOCIATE_ENTRY_NUM; i++) {
@@ -1196,10 +758,6 @@ void odm_RefreshRateAdaptiveMaskCE(struct odm_dm_struct *pDM_Odm)
}
}
-void odm_RefreshRateAdaptiveMaskAPADSL(struct odm_dm_struct *pDM_Odm)
-{
-}
-
/* Return Value: bool */
/* - true: RATRState is changed. */
bool ODM_RAStateCheck(struct odm_dm_struct *pDM_Odm, s32 RSSI, bool bForceUpdate, u8 *pRATRState)
@@ -1244,97 +802,9 @@ bool ODM_RAStateCheck(struct odm_dm_struct *pDM_Odm, s32 RSSI, bool bForceUpdate
}
/* 3============================================================ */
-/* 3 Dynamic Tx Power */
-/* 3============================================================ */
-
-void odm_DynamicTxPowerInit(struct odm_dm_struct *pDM_Odm)
-{
- struct adapter *Adapter = pDM_Odm->Adapter;
- struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
- struct dm_priv *pdmpriv = &pHalData->dmpriv;
- pdmpriv->bDynamicTxPowerEnable = false;
- pdmpriv->LastDTPLvl = TxHighPwrLevel_Normal;
- pdmpriv->DynamicTxHighPowerLvl = TxHighPwrLevel_Normal;
-}
-
-void odm_DynamicTxPower(struct odm_dm_struct *pDM_Odm)
-{
- /* For AP/ADSL use struct rtl8192cd_priv * */
- /* For CE/NIC use struct adapter * */
-
- if (!(pDM_Odm->SupportAbility & ODM_BB_DYNAMIC_TXPWR))
- return;
-
- /* 2012/01/12 MH According to Luke's suggestion, only high power will support the feature. */
- if (!pDM_Odm->ExtPA)
- return;
-
- /* 2011/09/29 MH In HW integration first stage, we provide 4 different handle to operate */
- /* at the same time. In the stage2/3, we need to prive universal interface and merge all */
- /* HW dynamic mechanism. */
- switch (pDM_Odm->SupportPlatform) {
- case ODM_MP:
- case ODM_CE:
- odm_DynamicTxPowerNIC(pDM_Odm);
- break;
- case ODM_AP:
- odm_DynamicTxPowerAP(pDM_Odm);
- break;
- case ODM_ADSL:
- break;
- }
-}
-
-void odm_DynamicTxPowerNIC(struct odm_dm_struct *pDM_Odm)
-{
- if (!(pDM_Odm->SupportAbility & ODM_BB_DYNAMIC_TXPWR))
- return;
-
- if (pDM_Odm->SupportICType == ODM_RTL8188E) {
- /* ??? */
- /* This part need to be redefined. */
- }
-}
-
-void odm_DynamicTxPowerAP(struct odm_dm_struct *pDM_Odm)
-{
-}
-
-/* 3============================================================ */
/* 3 RSSI Monitor */
/* 3============================================================ */
-void odm_RSSIMonitorCheck(struct odm_dm_struct *pDM_Odm)
-{
- if (!(pDM_Odm->SupportAbility & ODM_BB_RSSI_MONITOR))
- return;
-
- /* */
- /* 2011/09/29 MH In HW integration first stage, we provide 4 different handle to operate */
- /* at the same time. In the stage2/3, we need to prive universal interface and merge all */
- /* HW dynamic mechanism. */
- /* */
- switch (pDM_Odm->SupportPlatform) {
- case ODM_MP:
- odm_RSSIMonitorCheckMP(pDM_Odm);
- break;
- case ODM_CE:
- odm_RSSIMonitorCheckCE(pDM_Odm);
- break;
- case ODM_AP:
- odm_RSSIMonitorCheckAP(pDM_Odm);
- break;
- case ODM_ADSL:
- /* odm_DIGAP(pDM_Odm); */
- break;
- }
-
-} /* odm_RSSIMonitorCheck */
-
-void odm_RSSIMonitorCheckMP(struct odm_dm_struct *pDM_Odm)
-{
-}
-
static void FindMinimumRSSI(struct adapter *pAdapter)
{
struct hal_data_8188e *pHalData = GET_HAL_DATA(pAdapter);
@@ -1345,13 +815,11 @@ static void FindMinimumRSSI(struct adapter *pAdapter)
if (!check_fwstate(pmlmepriv, _FW_LINKED) &&
pdmpriv->EntryMinUndecoratedSmoothedPWDB == 0)
pdmpriv->MinUndecoratedPWDBForDM = 0;
- if (check_fwstate(pmlmepriv, _FW_LINKED)) /* Default port */
- pdmpriv->MinUndecoratedPWDBForDM = pdmpriv->EntryMinUndecoratedSmoothedPWDB;
- else /* associated entry pwdb */
- pdmpriv->MinUndecoratedPWDBForDM = pdmpriv->EntryMinUndecoratedSmoothedPWDB;
+
+ pdmpriv->MinUndecoratedPWDBForDM = pdmpriv->EntryMinUndecoratedSmoothedPWDB;
}
-void odm_RSSIMonitorCheckCE(struct odm_dm_struct *pDM_Odm)
+void odm_RSSIMonitorCheck(struct odm_dm_struct *pDM_Odm)
{
struct adapter *Adapter = pDM_Odm->Adapter;
struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
@@ -1361,7 +829,9 @@ void odm_RSSIMonitorCheckCE(struct odm_dm_struct *pDM_Odm)
u8 sta_cnt = 0;
u32 PWDB_rssi[NUM_STA] = {0};/* 0~15]:MACID, [16~31]:PWDB_rssi */
struct sta_info *psta;
- u8 bcast_addr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+
+ if (!(pDM_Odm->SupportAbility & ODM_BB_RSSI_MONITOR))
+ return;
if (!check_fwstate(&Adapter->mlmepriv, _FW_LINKED))
return;
@@ -1370,7 +840,7 @@ void odm_RSSIMonitorCheckCE(struct odm_dm_struct *pDM_Odm)
psta = pDM_Odm->pODM_StaInfo[i];
if (IS_STA_VALID(psta) &&
(psta->state & WIFI_ASOC_STATE) &&
- memcmp(psta->hwaddr, bcast_addr, ETH_ALEN) &&
+ !is_broadcast_ether_addr(psta->hwaddr) &&
memcmp(psta->hwaddr, myid(&Adapter->eeprompriv), ETH_ALEN)) {
if (psta->rssi_stat.UndecoratedSmoothedPWDB < tmpEntryMinPWDB)
tmpEntryMinPWDB = psta->rssi_stat.UndecoratedSmoothedPWDB;
@@ -1407,27 +877,6 @@ void odm_RSSIMonitorCheckCE(struct odm_dm_struct *pDM_Odm)
ODM_CmnInfoUpdate(&pHalData->odmpriv, ODM_CMNINFO_RSSI_MIN, pdmpriv->MinUndecoratedPWDBForDM);
}
-void odm_RSSIMonitorCheckAP(struct odm_dm_struct *pDM_Odm)
-{
-}
-
-void ODM_InitAllTimers(struct odm_dm_struct *pDM_Odm)
-{
- timer_setup(&pDM_Odm->DM_SWAT_Table.SwAntennaSwitchTimer, odm_SwAntDivChkAntSwitchCallback, 0);
-}
-
-void ODM_CancelAllTimers(struct odm_dm_struct *pDM_Odm)
-{
- ODM_CancelTimer(pDM_Odm, &pDM_Odm->DM_SWAT_Table.SwAntennaSwitchTimer);
-}
-
-void ODM_ReleaseAllTimers(struct odm_dm_struct *pDM_Odm)
-{
- ODM_ReleaseTimer(pDM_Odm, &pDM_Odm->DM_SWAT_Table.SwAntennaSwitchTimer);
-
- ODM_ReleaseTimer(pDM_Odm, &pDM_Odm->FastAntTrainingTimer);
-}
-
/* 3============================================================ */
/* 3 Tx Power Tracking */
/* 3============================================================ */
@@ -1442,8 +891,6 @@ void odm_TXPowerTrackingThermalMeterInit(struct odm_dm_struct *pDM_Odm)
pDM_Odm->RFCalibrateInfo.bTXPowerTracking = true;
pDM_Odm->RFCalibrateInfo.TXPowercount = 0;
pDM_Odm->RFCalibrateInfo.bTXPowerTrackingInit = false;
- if (*pDM_Odm->mp_mode != 1)
- pDM_Odm->RFCalibrateInfo.TxPowerTrackControl = true;
MSG_88E("pDM_Odm TxPowerTrackControl = %d\n", pDM_Odm->RFCalibrateInfo.TxPowerTrackControl);
pDM_Odm->RFCalibrateInfo.TxPowerTrackControl = true;
@@ -1451,26 +898,6 @@ void odm_TXPowerTrackingThermalMeterInit(struct odm_dm_struct *pDM_Odm)
void ODM_TXPowerTrackingCheck(struct odm_dm_struct *pDM_Odm)
{
- /* 2011/09/29 MH In HW integration first stage, we provide 4 different handle to operate */
- /* at the same time. In the stage2/3, we need to prive universal interface and merge all */
- /* HW dynamic mechanism. */
- switch (pDM_Odm->SupportPlatform) {
- case ODM_MP:
- odm_TXPowerTrackingCheckMP(pDM_Odm);
- break;
- case ODM_CE:
- odm_TXPowerTrackingCheckCE(pDM_Odm);
- break;
- case ODM_AP:
- odm_TXPowerTrackingCheckAP(pDM_Odm);
- break;
- case ODM_ADSL:
- break;
- }
-}
-
-void odm_TXPowerTrackingCheckCE(struct odm_dm_struct *pDM_Odm)
-{
struct adapter *Adapter = pDM_Odm->Adapter;
if (!(pDM_Odm->SupportAbility & ODM_RF_TX_PWR_TRACK))
@@ -1487,79 +914,12 @@ void odm_TXPowerTrackingCheckCE(struct odm_dm_struct *pDM_Odm)
}
}
-void odm_TXPowerTrackingCheckMP(struct odm_dm_struct *pDM_Odm)
-{
-}
-
-void odm_TXPowerTrackingCheckAP(struct odm_dm_struct *pDM_Odm)
-{
-}
-
-/* antenna mapping info */
-/* 1: right-side antenna */
-/* 2/0: left-side antenna */
-/* PDM_SWAT_Table->CCK_Ant1_Cnt /OFDM_Ant1_Cnt: for right-side antenna: Ant:1 RxDefaultAnt1 */
-/* PDM_SWAT_Table->CCK_Ant2_Cnt /OFDM_Ant2_Cnt: for left-side antenna: Ant:0 RxDefaultAnt2 */
-/* We select left antenna as default antenna in initial process, modify it as needed */
-/* */
-
-/* 3============================================================ */
-/* 3 SW Antenna Diversity */
-/* 3============================================================ */
-void odm_SwAntDivInit(struct odm_dm_struct *pDM_Odm)
-{
-}
-
-void ODM_SwAntDivChkPerPktRssi(struct odm_dm_struct *pDM_Odm, u8 StationID, struct odm_phy_status_info *pPhyInfo)
-{
-}
-
-void odm_SwAntDivChkAntSwitch(struct odm_dm_struct *pDM_Odm, u8 Step)
-{
-}
-
-void ODM_SwAntDivRestAfterLink(struct odm_dm_struct *pDM_Odm)
-{
-}
-
-void odm_SwAntDivChkAntSwitchCallback(struct timer_list *t)
-{
-}
-
-/* 3============================================================ */
-/* 3 SW Antenna Diversity */
-/* 3============================================================ */
-
void odm_InitHybridAntDiv(struct odm_dm_struct *pDM_Odm)
{
if (!(pDM_Odm->SupportAbility & ODM_BB_ANT_DIV))
return;
- if (pDM_Odm->SupportICType & (ODM_RTL8192C | ODM_RTL8192D))
- ;
- else if (pDM_Odm->SupportICType == ODM_RTL8188E)
- ODM_AntennaDiversityInit_88E(pDM_Odm);
-}
-
-void ODM_AntselStatistics_88C(struct odm_dm_struct *pDM_Odm, u8 MacId, u32 PWDBAll, bool isCCKrate)
-{
- struct sw_ant_switch *pDM_SWAT_Table = &pDM_Odm->DM_SWAT_Table;
-
- if (pDM_SWAT_Table->antsel == 1) {
- if (isCCKrate) {
- pDM_SWAT_Table->CCK_Ant1_Cnt[MacId]++;
- } else {
- pDM_SWAT_Table->OFDM_Ant1_Cnt[MacId]++;
- pDM_SWAT_Table->RSSI_Ant1_Sum[MacId] += PWDBAll;
- }
- } else {
- if (isCCKrate) {
- pDM_SWAT_Table->CCK_Ant2_Cnt[MacId]++;
- } else {
- pDM_SWAT_Table->OFDM_Ant2_Cnt[MacId]++;
- pDM_SWAT_Table->RSSI_Ant2_Sum[MacId] += PWDBAll;
- }
- }
+ ODM_AntennaDiversityInit_88E(pDM_Odm);
}
void odm_HwAntDiv(struct odm_dm_struct *pDM_Odm)
@@ -1567,8 +927,7 @@ void odm_HwAntDiv(struct odm_dm_struct *pDM_Odm)
if (!(pDM_Odm->SupportAbility & ODM_BB_ANT_DIV))
return;
- if (pDM_Odm->SupportICType == ODM_RTL8188E)
- ODM_AntennaDiversity_88E(pDM_Odm);
+ ODM_AntennaDiversity_88E(pDM_Odm);
}
/* EDCA Turbo */
@@ -1583,26 +942,6 @@ void ODM_EdcaTurboInit(struct odm_dm_struct *pDM_Odm)
void odm_EdcaTurboCheck(struct odm_dm_struct *pDM_Odm)
{
- /* 2011/09/29 MH In HW integration first stage, we provide 4 different handle to operate */
- /* at the same time. In the stage2/3, we need to prive universal interface and merge all */
- /* HW dynamic mechanism. */
- if (!(pDM_Odm->SupportAbility & ODM_MAC_EDCA_TURBO))
- return;
-
- switch (pDM_Odm->SupportPlatform) {
- case ODM_MP:
- break;
- case ODM_CE:
- odm_EdcaTurboCheckCE(pDM_Odm);
- break;
- case ODM_AP:
- case ODM_ADSL:
- break;
- }
-} /* odm_CheckEdcaTurbo */
-
-void odm_EdcaTurboCheckCE(struct odm_dm_struct *pDM_Odm)
-{
struct adapter *Adapter = pDM_Odm->Adapter;
u32 trafficIndex;
u32 edca_param;
@@ -1616,6 +955,9 @@ void odm_EdcaTurboCheckCE(struct odm_dm_struct *pDM_Odm)
struct mlme_ext_priv *pmlmeext = &Adapter->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ if (!(pDM_Odm->SupportAbility & ODM_MAC_EDCA_TURBO))
+ return;
+
if (pregpriv->wifi_spec == 1)
goto dm_CheckEdcaTurbo_EXIT;
@@ -1674,295 +1016,3 @@ dm_CheckEdcaTurbo_EXIT:
pxmitpriv->last_tx_bytes = pxmitpriv->tx_bytes;
precvpriv->last_rx_bytes = precvpriv->rx_bytes;
}
-
-/* need to ODM CE Platform */
-/* move to here for ANT detection mechanism using */
-
-u32 GetPSDData(struct odm_dm_struct *pDM_Odm, unsigned int point, u8 initial_gain_psd)
-{
- u32 psd_report;
-
- /* Set DCO frequency index, offset=(40MHz/SamplePts)*point */
- ODM_SetBBReg(pDM_Odm, 0x808, 0x3FF, point);
-
- /* Start PSD calculation, Reg808[22]=0->1 */
- ODM_SetBBReg(pDM_Odm, 0x808, BIT(22), 1);
- /* Need to wait for HW PSD report */
- ODM_StallExecution(30);
- ODM_SetBBReg(pDM_Odm, 0x808, BIT(22), 0);
- /* Read PSD report, Reg8B4[15:0] */
- psd_report = ODM_GetBBReg(pDM_Odm, 0x8B4, bMaskDWord) & 0x0000FFFF;
-
- psd_report = (u32)(ConvertTo_dB(psd_report)) + (u32)(initial_gain_psd - 0x1c);
-
- return psd_report;
-}
-
-u32 ConvertTo_dB(u32 Value)
-{
- u8 i;
- u8 j;
- u32 dB;
-
- Value = Value & 0xFFFF;
- for (i = 0; i < 8; i++) {
- if (Value <= dB_Invert_Table[i][11])
- break;
- }
-
- if (i >= 8)
- return 96; /* maximum 96 dB */
-
- for (j = 0; j < 12; j++) {
- if (Value <= dB_Invert_Table[i][j])
- break;
- }
-
- dB = i * 12 + j + 1;
-
- return dB;
-}
-
-/* 2011/09/22 MH Add for 92D global spin lock utilization. */
-void odm_GlobalAdapterCheck(void)
-{
-} /* odm_GlobalAdapterCheck */
-
-/* Description: */
-/* Set Single/Dual Antenna default setting for products that do not do detection in advance. */
-/* Added by Joseph, 2012.03.22 */
-void ODM_SingleDualAntennaDefaultSetting(struct odm_dm_struct *pDM_Odm)
-{
- struct sw_ant_switch *pDM_SWAT_Table = &pDM_Odm->DM_SWAT_Table;
-
- pDM_SWAT_Table->ANTA_ON = true;
- pDM_SWAT_Table->ANTB_ON = true;
-}
-
-/* 2 8723A ANT DETECT */
-
-static void odm_PHY_SaveAFERegisters(struct odm_dm_struct *pDM_Odm, u32 *AFEReg, u32 *AFEBackup, u32 RegisterNum)
-{
- u32 i;
-
- /* RTPRINT(FINIT, INIT_IQK, ("Save ADDA parameters.\n")); */
- for (i = 0; i < RegisterNum; i++)
- AFEBackup[i] = ODM_GetBBReg(pDM_Odm, AFEReg[i], bMaskDWord);
-}
-
-static void odm_PHY_ReloadAFERegisters(struct odm_dm_struct *pDM_Odm, u32 *AFEReg, u32 *AFEBackup, u32 RegiesterNum)
-{
- u32 i;
-
- for (i = 0; i < RegiesterNum; i++)
- ODM_SetBBReg(pDM_Odm, AFEReg[i], bMaskDWord, AFEBackup[i]);
-}
-
-/* 2 8723A ANT DETECT */
-/* Description: */
-/* Implement IQK single tone for RF DPK loopback and BB PSD scanning. */
-/* This function is cooperated with BB team Neil. */
-bool ODM_SingleDualAntennaDetection(struct odm_dm_struct *pDM_Odm, u8 mode)
-{
- struct sw_ant_switch *pDM_SWAT_Table = &pDM_Odm->DM_SWAT_Table;
- u32 CurrentChannel, RfLoopReg;
- u8 n;
- u32 Reg88c, Regc08, Reg874, Regc50;
- u8 initial_gain = 0x5a;
- u32 PSD_report_tmp;
- u32 AntA_report = 0x0, AntB_report = 0x0, AntO_report = 0x0;
- bool bResult = true;
- u32 AFE_Backup[16];
- u32 AFE_REG_8723A[16] = {
- rRx_Wait_CCA, rTx_CCK_RFON,
- rTx_CCK_BBON, rTx_OFDM_RFON,
- rTx_OFDM_BBON, rTx_To_Rx,
- rTx_To_Tx, rRx_CCK,
- rRx_OFDM, rRx_Wait_RIFS,
- rRx_TO_Rx, rStandby,
- rSleep, rPMPD_ANAEN,
- rFPGA0_XCD_SwitchControl, rBlue_Tooth};
-
- if (!(pDM_Odm->SupportICType & (ODM_RTL8723A | ODM_RTL8192C)))
- return bResult;
-
- if (!(pDM_Odm->SupportAbility & ODM_BB_ANT_DIV))
- return bResult;
-
- if (pDM_Odm->SupportICType == ODM_RTL8192C) {
- /* Which path in ADC/DAC is turnned on for PSD: both I/Q */
- ODM_SetBBReg(pDM_Odm, 0x808, BIT(10) | BIT(11), 0x3);
- /* Ageraged number: 8 */
- ODM_SetBBReg(pDM_Odm, 0x808, BIT(12) | BIT(13), 0x1);
- /* pts = 128; */
- ODM_SetBBReg(pDM_Odm, 0x808, BIT(14) | BIT(15), 0x0);
- }
-
- /* 1 Backup Current RF/BB Settings */
-
- CurrentChannel = ODM_GetRFReg(pDM_Odm, RF_PATH_A, ODM_CHANNEL, bRFRegOffsetMask);
- RfLoopReg = ODM_GetRFReg(pDM_Odm, RF_PATH_A, 0x00, bRFRegOffsetMask);
- ODM_SetBBReg(pDM_Odm, rFPGA0_XA_RFInterfaceOE, ODM_DPDT, Antenna_A); /* change to Antenna A */
- /* Step 1: USE IQK to transmitter single tone */
-
- ODM_StallExecution(10);
-
- /* Store A Path Register 88c, c08, 874, c50 */
- Reg88c = ODM_GetBBReg(pDM_Odm, rFPGA0_AnalogParameter4, bMaskDWord);
- Regc08 = ODM_GetBBReg(pDM_Odm, rOFDM0_TRMuxPar, bMaskDWord);
- Reg874 = ODM_GetBBReg(pDM_Odm, rFPGA0_XCD_RFInterfaceSW, bMaskDWord);
- Regc50 = ODM_GetBBReg(pDM_Odm, rOFDM0_XAAGCCore1, bMaskDWord);
-
- /* Store AFE Registers */
- odm_PHY_SaveAFERegisters(pDM_Odm, AFE_REG_8723A, AFE_Backup, 16);
-
- /* Set PSD 128 pts */
- ODM_SetBBReg(pDM_Odm, rFPGA0_PSDFunction, BIT(14) | BIT(15), 0x0); /* 128 pts */
-
- /* To SET CH1 to do */
- ODM_SetRFReg(pDM_Odm, RF_PATH_A, ODM_CHANNEL, bRFRegOffsetMask, 0x01); /* Channel 1 */
-
- /* AFE all on step */
- ODM_SetBBReg(pDM_Odm, rRx_Wait_CCA, bMaskDWord, 0x6FDB25A4);
- ODM_SetBBReg(pDM_Odm, rTx_CCK_RFON, bMaskDWord, 0x6FDB25A4);
- ODM_SetBBReg(pDM_Odm, rTx_CCK_BBON, bMaskDWord, 0x6FDB25A4);
- ODM_SetBBReg(pDM_Odm, rTx_OFDM_RFON, bMaskDWord, 0x6FDB25A4);
- ODM_SetBBReg(pDM_Odm, rTx_OFDM_BBON, bMaskDWord, 0x6FDB25A4);
- ODM_SetBBReg(pDM_Odm, rTx_To_Rx, bMaskDWord, 0x6FDB25A4);
- ODM_SetBBReg(pDM_Odm, rTx_To_Tx, bMaskDWord, 0x6FDB25A4);
- ODM_SetBBReg(pDM_Odm, rRx_CCK, bMaskDWord, 0x6FDB25A4);
- ODM_SetBBReg(pDM_Odm, rRx_OFDM, bMaskDWord, 0x6FDB25A4);
- ODM_SetBBReg(pDM_Odm, rRx_Wait_RIFS, bMaskDWord, 0x6FDB25A4);
- ODM_SetBBReg(pDM_Odm, rRx_TO_Rx, bMaskDWord, 0x6FDB25A4);
- ODM_SetBBReg(pDM_Odm, rStandby, bMaskDWord, 0x6FDB25A4);
- ODM_SetBBReg(pDM_Odm, rSleep, bMaskDWord, 0x6FDB25A4);
- ODM_SetBBReg(pDM_Odm, rPMPD_ANAEN, bMaskDWord, 0x6FDB25A4);
- ODM_SetBBReg(pDM_Odm, rFPGA0_XCD_SwitchControl, bMaskDWord, 0x6FDB25A4);
- ODM_SetBBReg(pDM_Odm, rBlue_Tooth, bMaskDWord, 0x6FDB25A4);
-
- /* 3 wire Disable */
- ODM_SetBBReg(pDM_Odm, rFPGA0_AnalogParameter4, bMaskDWord, 0xCCF000C0);
-
- /* BB IQK Setting */
- ODM_SetBBReg(pDM_Odm, rOFDM0_TRMuxPar, bMaskDWord, 0x000800E4);
- ODM_SetBBReg(pDM_Odm, rFPGA0_XCD_RFInterfaceSW, bMaskDWord, 0x22208000);
-
- /* IQK setting tone@ 4.34Mhz */
- ODM_SetBBReg(pDM_Odm, rTx_IQK_Tone_A, bMaskDWord, 0x10008C1C);
- ODM_SetBBReg(pDM_Odm, rTx_IQK, bMaskDWord, 0x01007c00);
-
- /* Page B init */
- ODM_SetBBReg(pDM_Odm, rConfig_AntA, bMaskDWord, 0x00080000);
- ODM_SetBBReg(pDM_Odm, rConfig_AntA, bMaskDWord, 0x0f600000);
- ODM_SetBBReg(pDM_Odm, rRx_IQK, bMaskDWord, 0x01004800);
- ODM_SetBBReg(pDM_Odm, rRx_IQK_Tone_A, bMaskDWord, 0x10008c1f);
- ODM_SetBBReg(pDM_Odm, rTx_IQK_PI_A, bMaskDWord, 0x82150008);
- ODM_SetBBReg(pDM_Odm, rRx_IQK_PI_A, bMaskDWord, 0x28150008);
- ODM_SetBBReg(pDM_Odm, rIQK_AGC_Rsp, bMaskDWord, 0x001028d0);
-
- /* RF loop Setting */
- ODM_SetRFReg(pDM_Odm, RF_PATH_A, 0x0, 0xFFFFF, 0x50008);
-
- /* IQK Single tone start */
- ODM_SetBBReg(pDM_Odm, rFPGA0_IQK, bMaskDWord, 0x80800000);
- ODM_SetBBReg(pDM_Odm, rIQK_AGC_Pts, bMaskDWord, 0xf8000000);
- ODM_StallExecution(1000);
- PSD_report_tmp = 0x0;
-
- for (n = 0; n < 2; n++) {
- PSD_report_tmp = GetPSDData(pDM_Odm, 14, initial_gain);
- if (PSD_report_tmp > AntA_report)
- AntA_report = PSD_report_tmp;
- }
-
- PSD_report_tmp = 0x0;
-
- ODM_SetBBReg(pDM_Odm, rFPGA0_XA_RFInterfaceOE, 0x300, Antenna_B); /* change to Antenna B */
- ODM_StallExecution(10);
-
- for (n = 0; n < 2; n++) {
- PSD_report_tmp = GetPSDData(pDM_Odm, 14, initial_gain);
- if (PSD_report_tmp > AntB_report)
- AntB_report = PSD_report_tmp;
- }
-
- /* change to open case */
- ODM_SetBBReg(pDM_Odm, rFPGA0_XA_RFInterfaceOE, 0x300, 0); /* change to Ant A and B all open case */
- ODM_StallExecution(10);
-
- for (n = 0; n < 2; n++) {
- PSD_report_tmp = GetPSDData(pDM_Odm, 14, initial_gain);
- if (PSD_report_tmp > AntO_report)
- AntO_report = PSD_report_tmp;
- }
-
- /* Close IQK Single Tone function */
- ODM_SetBBReg(pDM_Odm, rFPGA0_IQK, bMaskDWord, 0x00000000);
- PSD_report_tmp = 0x0;
-
- /* 1 Return to antanna A */
- ODM_SetBBReg(pDM_Odm, rFPGA0_XA_RFInterfaceOE, 0x300, Antenna_A);
- ODM_SetBBReg(pDM_Odm, rFPGA0_AnalogParameter4, bMaskDWord, Reg88c);
- ODM_SetBBReg(pDM_Odm, rOFDM0_TRMuxPar, bMaskDWord, Regc08);
- ODM_SetBBReg(pDM_Odm, rFPGA0_XCD_RFInterfaceSW, bMaskDWord, Reg874);
- ODM_SetBBReg(pDM_Odm, rOFDM0_XAAGCCore1, 0x7F, 0x40);
- ODM_SetBBReg(pDM_Odm, rOFDM0_XAAGCCore1, bMaskDWord, Regc50);
- ODM_SetRFReg(pDM_Odm, RF_PATH_A, RF_CHNLBW, bRFRegOffsetMask, CurrentChannel);
- ODM_SetRFReg(pDM_Odm, RF_PATH_A, 0x00, bRFRegOffsetMask, RfLoopReg);
-
- /* Reload AFE Registers */
- odm_PHY_ReloadAFERegisters(pDM_Odm, AFE_REG_8723A, AFE_Backup, 16);
-
- if (pDM_Odm->SupportICType == ODM_RTL8723A) {
- /* 2 Test Ant B based on Ant A is ON */
- if (mode == ANTTESTB) {
- if (AntA_report >= 100) {
- if (AntB_report > (AntA_report + 1))
- pDM_SWAT_Table->ANTB_ON = false;
- else
- pDM_SWAT_Table->ANTB_ON = true;
- } else {
- pDM_SWAT_Table->ANTB_ON = false; /* Set Antenna B off as default */
- bResult = false;
- }
- } else if (mode == ANTTESTALL) {
- /* 2 Test Ant A and B based on DPDT Open */
- if ((AntO_report >= 100) & (AntO_report < 118)) {
- if (AntA_report > (AntO_report + 1))
- pDM_SWAT_Table->ANTA_ON = false;
- else
- pDM_SWAT_Table->ANTA_ON = true;
-
- if (AntB_report > (AntO_report + 2))
- pDM_SWAT_Table->ANTB_ON = false;
- else
- pDM_SWAT_Table->ANTB_ON = true;
- }
- }
- } else if (pDM_Odm->SupportICType == ODM_RTL8192C) {
- if (AntA_report >= 100) {
- if (AntB_report > (AntA_report + 2)) {
- pDM_SWAT_Table->ANTA_ON = false;
- pDM_SWAT_Table->ANTB_ON = true;
- ODM_SetBBReg(pDM_Odm, rFPGA0_XA_RFInterfaceOE, 0x300, Antenna_B);
- } else if (AntA_report > (AntB_report + 2)) {
- pDM_SWAT_Table->ANTA_ON = true;
- pDM_SWAT_Table->ANTB_ON = false;
- ODM_SetBBReg(pDM_Odm, rFPGA0_XA_RFInterfaceOE, 0x300, Antenna_A);
- } else {
- pDM_SWAT_Table->ANTA_ON = true;
- pDM_SWAT_Table->ANTB_ON = true;
- }
- } else {
- pDM_SWAT_Table->ANTA_ON = true; /* Set Antenna A on as default */
- pDM_SWAT_Table->ANTB_ON = false; /* Set Antenna B off as default */
- bResult = false;
- }
- }
- return bResult;
-}
-
-/* Justin: According to the current RRSI to adjust Response Frame TX power, 2012/11/05 */
-void odm_dtc(struct odm_dm_struct *pDM_Odm)
-{
-}
diff --git a/drivers/staging/r8188eu/hal/odm_HWConfig.c b/drivers/staging/r8188eu/hal/odm_HWConfig.c
index ada22a526fee..3125886e6731 100644
--- a/drivers/staging/r8188eu/hal/odm_HWConfig.c
+++ b/drivers/staging/r8188eu/hal/odm_HWConfig.c
@@ -6,7 +6,6 @@
#define READ_AND_CONFIG READ_AND_CONFIG_MP
#define READ_AND_CONFIG_MP(ic, txt) (ODM_ReadAndConfig##txt##ic(dm_odm))
-#define READ_AND_CONFIG_TC(ic, txt) (ODM_ReadAndConfig_TC##txt##ic(dm_odm))
static u8 odm_QueryRxPwrPercentage(s8 AntPower)
{
@@ -18,63 +17,28 @@ static u8 odm_QueryRxPwrPercentage(s8 AntPower)
return 100 + AntPower;
}
-/* 2012/01/12 MH MOve some signal strength smooth method to MP HAL layer. */
-/* IF other SW team do not support the feature, remove this section.?? */
-static s32 odm_sig_patch_lenove(struct odm_dm_struct *dm_odm, s32 CurrSig)
-{
- return 0;
-}
-
-static s32 odm_sig_patch_netcore(struct odm_dm_struct *dm_odm, s32 CurrSig)
-{
- return 0;
-}
-
-static s32 odm_SignalScaleMapping_92CSeries(struct odm_dm_struct *dm_odm, s32 CurrSig)
+static s32 odm_SignalScaleMapping(struct odm_dm_struct *dm_odm, s32 CurrSig)
{
s32 RetSig = 0;
- if ((dm_odm->SupportInterface == ODM_ITRF_USB) ||
- (dm_odm->SupportInterface == ODM_ITRF_SDIO)) {
- if (CurrSig >= 51 && CurrSig <= 100)
- RetSig = 100;
- else if (CurrSig >= 41 && CurrSig <= 50)
- RetSig = 80 + ((CurrSig - 40) * 2);
- else if (CurrSig >= 31 && CurrSig <= 40)
- RetSig = 66 + (CurrSig - 30);
- else if (CurrSig >= 21 && CurrSig <= 30)
- RetSig = 54 + (CurrSig - 20);
- else if (CurrSig >= 10 && CurrSig <= 20)
- RetSig = 42 + (((CurrSig - 10) * 2) / 3);
- else if (CurrSig >= 5 && CurrSig <= 9)
- RetSig = 22 + (((CurrSig - 5) * 3) / 2);
- else if (CurrSig >= 1 && CurrSig <= 4)
- RetSig = 6 + (((CurrSig - 1) * 3) / 2);
- else
- RetSig = CurrSig;
- }
- return RetSig;
-}
-
-static s32 odm_SignalScaleMapping(struct odm_dm_struct *dm_odm, s32 CurrSig)
-{
- if ((dm_odm->SupportPlatform == ODM_MP) &&
- (dm_odm->SupportInterface != ODM_ITRF_PCIE) && /* USB & SDIO */
- (dm_odm->PatchID == 10))
- return odm_sig_patch_netcore(dm_odm, CurrSig);
- else if ((dm_odm->SupportPlatform == ODM_MP) &&
- (dm_odm->SupportInterface == ODM_ITRF_PCIE) &&
- (dm_odm->PatchID == 19))
- return odm_sig_patch_lenove(dm_odm, CurrSig);
+ if (CurrSig >= 51 && CurrSig <= 100)
+ RetSig = 100;
+ else if (CurrSig >= 41 && CurrSig <= 50)
+ RetSig = 80 + ((CurrSig - 40) * 2);
+ else if (CurrSig >= 31 && CurrSig <= 40)
+ RetSig = 66 + (CurrSig - 30);
+ else if (CurrSig >= 21 && CurrSig <= 30)
+ RetSig = 54 + (CurrSig - 20);
+ else if (CurrSig >= 10 && CurrSig <= 20)
+ RetSig = 42 + (((CurrSig - 10) * 2) / 3);
+ else if (CurrSig >= 5 && CurrSig <= 9)
+ RetSig = 22 + (((CurrSig - 5) * 3) / 2);
+ else if (CurrSig >= 1 && CurrSig <= 4)
+ RetSig = 6 + (((CurrSig - 1) * 3) / 2);
else
- return odm_SignalScaleMapping_92CSeries(dm_odm, CurrSig);
-}
+ RetSig = CurrSig;
-/* pMgntInfo->CustomerID == RT_CID_819x_Lenovo */
-static u8 odm_SQ_process_patch_RT_CID_819x_Lenovo(struct odm_dm_struct *dm_odm,
- u8 isCCKrate, u8 PWDB_ALL, u8 path, u8 RSSI)
-{
- return 0;
+ return RetSig;
}
static u8 odm_evm_db_to_percentage(s8 value)
@@ -89,15 +53,14 @@ static u8 odm_evm_db_to_percentage(s8 value)
}
static void odm_RxPhyStatus92CSeries_Parsing(struct odm_dm_struct *dm_odm,
- struct odm_phy_status_info *pPhyInfo,
+ struct phy_info *pPhyInfo,
u8 *pPhyStatus,
struct odm_per_pkt_info *pPktinfo,
struct adapter *adapt)
{
- struct sw_ant_switch *pDM_SWAT_Table = &dm_odm->DM_SWAT_Table;
u8 i, Max_spatial_stream;
s8 rx_pwr[4], rx_pwr_all = 0;
- u8 EVM, PWDB_ALL = 0, PWDB_ALL_BT;
+ u8 EVM, PWDB_ALL = 0;
u8 RSSI, total_rssi = 0;
u8 isCCKrate = 0;
u8 rf_rx_num = 0;
@@ -112,7 +75,6 @@ static void odm_RxPhyStatus92CSeries_Parsing(struct odm_dm_struct *dm_odm,
pPhyInfo->RxMIMOSignalQuality[RF_PATH_B] = -1;
if (isCCKrate) {
- u8 report;
u8 cck_agc_rpt;
dm_odm->PhyDbgInfo.NumQryPhyStatusCCK++;
@@ -126,125 +88,60 @@ static void odm_RxPhyStatus92CSeries_Parsing(struct odm_dm_struct *dm_odm,
/* 2011.11.28 LukeLee: 88E use different LNA & VGA gain table */
/* The RSSI formula should be modified according to the gain table */
/* In 88E, cck_highpwr is always set to 1 */
- if (dm_odm->SupportICType & (ODM_RTL8188E | ODM_RTL8812)) {
- LNA_idx = ((cck_agc_rpt & 0xE0) >> 5);
- VGA_idx = (cck_agc_rpt & 0x1F);
- switch (LNA_idx) {
- case 7:
- if (VGA_idx <= 27)
- rx_pwr_all = -100 + 2 * (27 - VGA_idx); /* VGA_idx = 27~2 */
- else
- rx_pwr_all = -100;
- break;
- case 6:
- rx_pwr_all = -48 + 2 * (2 - VGA_idx); /* VGA_idx = 2~0 */
- break;
- case 5:
- rx_pwr_all = -42 + 2 * (7 - VGA_idx); /* VGA_idx = 7~5 */
- break;
- case 4:
- rx_pwr_all = -36 + 2 * (7 - VGA_idx); /* VGA_idx = 7~4 */
- break;
- case 3:
- rx_pwr_all = -24 + 2 * (7 - VGA_idx); /* VGA_idx = 7~0 */
- break;
- case 2:
- if (cck_highpwr)
- rx_pwr_all = -12 + 2 * (5 - VGA_idx); /* VGA_idx = 5~0 */
- else
- rx_pwr_all = -6 + 2 * (5 - VGA_idx);
- break;
- case 1:
- rx_pwr_all = 8 - 2 * VGA_idx;
- break;
- case 0:
- rx_pwr_all = 14 - 2 * VGA_idx;
- break;
- default:
- break;
- }
- rx_pwr_all += 6;
- PWDB_ALL = odm_QueryRxPwrPercentage(rx_pwr_all);
- if (!cck_highpwr) {
- if (PWDB_ALL >= 80)
- PWDB_ALL = ((PWDB_ALL - 80) << 1) + ((PWDB_ALL - 80) >> 1) + 80;
- else if ((PWDB_ALL <= 78) && (PWDB_ALL >= 20))
- PWDB_ALL += 3;
- if (PWDB_ALL > 100)
- PWDB_ALL = 100;
- }
- } else {
- if (!cck_highpwr) {
- report = (cck_agc_rpt & 0xc0) >> 6;
- switch (report) {
- /* 03312009 modified by cosa */
- /* Modify the RF RNA gain value to -40, -20, -2, 14 by Jenyu's suggestion */
- /* Note: different RF with the different RNA gain. */
- case 0x3:
- rx_pwr_all = -46 - (cck_agc_rpt & 0x3e);
- break;
- case 0x2:
- rx_pwr_all = -26 - (cck_agc_rpt & 0x3e);
- break;
- case 0x1:
- rx_pwr_all = -12 - (cck_agc_rpt & 0x3e);
- break;
- case 0x0:
- rx_pwr_all = 16 - (cck_agc_rpt & 0x3e);
- break;
- }
- } else {
- report = (cck_agc_rpt & 0x60) >> 5;
- switch (report) {
- case 0x3:
- rx_pwr_all = -46 - ((cck_agc_rpt & 0x1f) << 1);
- break;
- case 0x2:
- rx_pwr_all = -26 - ((cck_agc_rpt & 0x1f) << 1);
- break;
- case 0x1:
- rx_pwr_all = -12 - ((cck_agc_rpt & 0x1f) << 1);
- break;
- case 0x0:
- rx_pwr_all = 16 - ((cck_agc_rpt & 0x1f) << 1);
- break;
- }
- }
-
- PWDB_ALL = odm_QueryRxPwrPercentage(rx_pwr_all);
-
- /* Modification for ext-LNA board */
- if (dm_odm->BoardType == ODM_BOARD_HIGHPWR) {
- if ((cck_agc_rpt >> 7) == 0) {
- PWDB_ALL = (PWDB_ALL > 94) ? 100 : (PWDB_ALL + 6);
- } else {
- if (PWDB_ALL > 38)
- PWDB_ALL -= 16;
- else
- PWDB_ALL = (PWDB_ALL <= 16) ? (PWDB_ALL >> 2) : (PWDB_ALL - 12);
- }
-
- /* CCK modification */
- if (PWDB_ALL > 25 && PWDB_ALL <= 60)
- PWDB_ALL += 6;
- } else {/* Modification for int-LNA board */
- if (PWDB_ALL > 99)
- PWDB_ALL -= 8;
- else if (PWDB_ALL > 50 && PWDB_ALL <= 68)
- PWDB_ALL += 4;
- }
+ LNA_idx = ((cck_agc_rpt & 0xE0) >> 5);
+ VGA_idx = (cck_agc_rpt & 0x1F);
+ switch (LNA_idx) {
+ case 7:
+ if (VGA_idx <= 27)
+ rx_pwr_all = -100 + 2 * (27 - VGA_idx); /* VGA_idx = 27~2 */
+ else
+ rx_pwr_all = -100;
+ break;
+ case 6:
+ rx_pwr_all = -48 + 2 * (2 - VGA_idx); /* VGA_idx = 2~0 */
+ break;
+ case 5:
+ rx_pwr_all = -42 + 2 * (7 - VGA_idx); /* VGA_idx = 7~5 */
+ break;
+ case 4:
+ rx_pwr_all = -36 + 2 * (7 - VGA_idx); /* VGA_idx = 7~4 */
+ break;
+ case 3:
+ rx_pwr_all = -24 + 2 * (7 - VGA_idx); /* VGA_idx = 7~0 */
+ break;
+ case 2:
+ if (cck_highpwr)
+ rx_pwr_all = -12 + 2 * (5 - VGA_idx); /* VGA_idx = 5~0 */
+ else
+ rx_pwr_all = -6 + 2 * (5 - VGA_idx);
+ break;
+ case 1:
+ rx_pwr_all = 8 - 2 * VGA_idx;
+ break;
+ case 0:
+ rx_pwr_all = 14 - 2 * VGA_idx;
+ break;
+ default:
+ break;
+ }
+ rx_pwr_all += 6;
+ PWDB_ALL = odm_QueryRxPwrPercentage(rx_pwr_all);
+ if (!cck_highpwr) {
+ if (PWDB_ALL >= 80)
+ PWDB_ALL = ((PWDB_ALL - 80) << 1) + ((PWDB_ALL - 80) >> 1) + 80;
+ else if ((PWDB_ALL <= 78) && (PWDB_ALL >= 20))
+ PWDB_ALL += 3;
+ if (PWDB_ALL > 100)
+ PWDB_ALL = 100;
}
pPhyInfo->RxPWDBAll = PWDB_ALL;
- pPhyInfo->BTRxRSSIPercentage = PWDB_ALL;
- pPhyInfo->RecvSignalPower = rx_pwr_all;
+ pPhyInfo->recvpower = rx_pwr_all;
/* (3) Get Signal Quality (EVM) */
if (pPktinfo->bPacketMatchBSSID) {
u8 SQ, SQ_rpt;
- if ((dm_odm->SupportPlatform == ODM_MP) && (dm_odm->PatchID == 19)) {
- SQ = odm_SQ_process_patch_RT_CID_819x_Lenovo(dm_odm, isCCKrate, PWDB_ALL, 0, 0);
- } else if (pPhyInfo->RxPWDBAll > 40 && !dm_odm->bInHctTest) {
+ if (pPhyInfo->RxPWDBAll > 40) {
SQ = 100;
} else {
SQ_rpt = pPhyStaRpt->cck_sig_qual_ofdm_pwdb_all;
@@ -280,62 +177,37 @@ static void odm_RxPhyStatus92CSeries_Parsing(struct odm_dm_struct *dm_odm,
RSSI = odm_QueryRxPwrPercentage(rx_pwr[i]);
total_rssi += RSSI;
- /* Modification for ext-LNA board */
- if (dm_odm->BoardType == ODM_BOARD_HIGHPWR) {
- if ((pPhyStaRpt->path_agc[i].trsw) == 1)
- RSSI = (RSSI > 94) ? 100 : (RSSI + 6);
- else
- RSSI = (RSSI <= 16) ? (RSSI >> 3) : (RSSI - 16);
-
- if ((RSSI <= 34) && (RSSI >= 4))
- RSSI -= 4;
- }
-
pPhyInfo->RxMIMOSignalStrength[i] = (u8)RSSI;
/* Get Rx snr value in DB */
pPhyInfo->RxSNR[i] = (s32)(pPhyStaRpt->path_rxsnr[i] / 2);
dm_odm->PhyDbgInfo.RxSNRdB[i] = (s32)(pPhyStaRpt->path_rxsnr[i] / 2);
-
- /* Record Signal Strength for next packet */
- if (pPktinfo->bPacketMatchBSSID) {
- if ((dm_odm->SupportPlatform == ODM_MP) && (dm_odm->PatchID == 19)) {
- if (i == RF_PATH_A)
- pPhyInfo->SignalQuality = odm_SQ_process_patch_RT_CID_819x_Lenovo(dm_odm, isCCKrate, PWDB_ALL, i, RSSI);
- }
- }
}
/* (2)PWDB, Average PWDB cacluated by hardware (for rate adaptive) */
rx_pwr_all = (((pPhyStaRpt->cck_sig_qual_ofdm_pwdb_all) >> 1) & 0x7f) - 110;
PWDB_ALL = odm_QueryRxPwrPercentage(rx_pwr_all);
- PWDB_ALL_BT = PWDB_ALL;
pPhyInfo->RxPWDBAll = PWDB_ALL;
- pPhyInfo->BTRxRSSIPercentage = PWDB_ALL_BT;
pPhyInfo->RxPower = rx_pwr_all;
- pPhyInfo->RecvSignalPower = rx_pwr_all;
+ pPhyInfo->recvpower = rx_pwr_all;
- if ((dm_odm->SupportPlatform == ODM_MP) && (dm_odm->PatchID == 19)) {
- /* do nothing */
- } else {
- /* (3)EVM of HT rate */
- if (pPktinfo->Rate >= DESC92C_RATEMCS8 && pPktinfo->Rate <= DESC92C_RATEMCS15)
- Max_spatial_stream = 2; /* both spatial stream make sense */
- else
- Max_spatial_stream = 1; /* only spatial stream 1 makes sense */
-
- for (i = 0; i < Max_spatial_stream; i++) {
- /* Do not use shift operation like "rx_evmX >>= 1" because the compilor of free build environment */
- /* fill most significant bit to "zero" when doing shifting operation which may change a negative */
- /* value to positive one, then the dbm value (which is supposed to be negative) is not correct anymore. */
- EVM = odm_evm_db_to_percentage((pPhyStaRpt->stream_rxevm[i])); /* dbm */
-
- if (pPktinfo->bPacketMatchBSSID) {
- if (i == RF_PATH_A) /* Fill value in RFD, Get the first spatial stream only */
- pPhyInfo->SignalQuality = (u8)(EVM & 0xff);
- pPhyInfo->RxMIMOSignalQuality[i] = (u8)(EVM & 0xff);
- }
+ /* (3)EVM of HT rate */
+ if (pPktinfo->Rate >= DESC92C_RATEMCS8 && pPktinfo->Rate <= DESC92C_RATEMCS15)
+ Max_spatial_stream = 2; /* both spatial stream make sense */
+ else
+ Max_spatial_stream = 1; /* only spatial stream 1 makes sense */
+
+ for (i = 0; i < Max_spatial_stream; i++) {
+ /* Do not use shift operation like "rx_evmX >>= 1" because the compilor of free build environment */
+ /* fill most significant bit to "zero" when doing shifting operation which may change a negative */
+ /* value to positive one, then the dbm value (which is supposed to be negative) is not correct anymore. */
+ EVM = odm_evm_db_to_percentage((pPhyStaRpt->stream_rxevm[i])); /* dbm */
+
+ if (pPktinfo->bPacketMatchBSSID) {
+ if (i == RF_PATH_A) /* Fill value in RFD, Get the first spatial stream only */
+ pPhyInfo->SignalQuality = (u8)(EVM & 0xff);
+ pPhyInfo->RxMIMOSignalQuality[i] = (u8)(EVM & 0xff);
}
}
}
@@ -348,20 +220,14 @@ static void odm_RxPhyStatus92CSeries_Parsing(struct odm_dm_struct *dm_odm,
pPhyInfo->SignalStrength = (u8)(odm_SignalScaleMapping(dm_odm, total_rssi /= rf_rx_num));
}
- /* For 92C/92D HW (Hybrid) Antenna Diversity */
- pDM_SWAT_Table->antsel = pPhyStaRpt->ant_sel;
/* For 88E HW Antenna Diversity */
dm_odm->DM_FatTable.antsel_rx_keep_0 = pPhyStaRpt->ant_sel;
dm_odm->DM_FatTable.antsel_rx_keep_1 = pPhyStaRpt->ant_sel_b;
dm_odm->DM_FatTable.antsel_rx_keep_2 = pPhyStaRpt->antsel_rx_keep_2;
}
-void odm_Init_RSSIForDM(struct odm_dm_struct *dm_odm)
-{
-}
-
static void odm_Process_RSSIForDM(struct odm_dm_struct *dm_odm,
- struct odm_phy_status_info *pPhyInfo,
+ struct phy_info *pPhyInfo,
struct odm_per_pkt_info *pPktinfo)
{
s32 UndecoratedSmoothedPWDB, UndecoratedSmoothedCCK;
@@ -371,6 +237,8 @@ static void odm_Process_RSSIForDM(struct odm_dm_struct *dm_odm,
u32 OFDM_pkt = 0;
u32 Weighting = 0;
struct sta_info *pEntry;
+ u8 antsel_tr_mux;
+ struct fast_ant_train *pDM_FatTable = &dm_odm->DM_FatTable;
if (pPktinfo->StationID == 0xFF)
return;
@@ -383,28 +251,24 @@ static void odm_Process_RSSIForDM(struct odm_dm_struct *dm_odm,
isCCKrate = ((pPktinfo->Rate >= DESC92C_RATE1M) && (pPktinfo->Rate <= DESC92C_RATE11M)) ? true : false;
/* Smart Antenna Debug Message------------------ */
- if (dm_odm->SupportICType == ODM_RTL8188E) {
- u8 antsel_tr_mux;
- struct fast_ant_train *pDM_FatTable = &dm_odm->DM_FatTable;
-
- if (dm_odm->AntDivType == CG_TRX_SMART_ANTDIV) {
- if (pDM_FatTable->FAT_State == FAT_TRAINING_STATE) {
- if (pPktinfo->bPacketToSelf) {
- antsel_tr_mux = (pDM_FatTable->antsel_rx_keep_2 << 2) |
- (pDM_FatTable->antsel_rx_keep_1 << 1) |
- pDM_FatTable->antsel_rx_keep_0;
- pDM_FatTable->antSumRSSI[antsel_tr_mux] += pPhyInfo->RxPWDBAll;
- pDM_FatTable->antRSSIcnt[antsel_tr_mux]++;
- }
- }
- } else if ((dm_odm->AntDivType == CG_TRX_HW_ANTDIV) || (dm_odm->AntDivType == CGCS_RX_HW_ANTDIV)) {
- if (pPktinfo->bPacketToSelf || pPktinfo->bPacketBeacon) {
+ if (dm_odm->AntDivType == CG_TRX_SMART_ANTDIV) {
+ if (pDM_FatTable->FAT_State == FAT_TRAINING_STATE) {
+ if (pPktinfo->bPacketToSelf) {
antsel_tr_mux = (pDM_FatTable->antsel_rx_keep_2 << 2) |
- (pDM_FatTable->antsel_rx_keep_1 << 1) | pDM_FatTable->antsel_rx_keep_0;
- ODM_AntselStatistics_88E(dm_odm, antsel_tr_mux, pPktinfo->StationID, pPhyInfo->RxPWDBAll);
+ (pDM_FatTable->antsel_rx_keep_1 << 1) |
+ pDM_FatTable->antsel_rx_keep_0;
+ pDM_FatTable->antSumRSSI[antsel_tr_mux] += pPhyInfo->RxPWDBAll;
+ pDM_FatTable->antRSSIcnt[antsel_tr_mux]++;
}
}
+ } else if ((dm_odm->AntDivType == CG_TRX_HW_ANTDIV) || (dm_odm->AntDivType == CGCS_RX_HW_ANTDIV)) {
+ if (pPktinfo->bPacketToSelf || pPktinfo->bPacketBeacon) {
+ antsel_tr_mux = (pDM_FatTable->antsel_rx_keep_2 << 2) |
+ (pDM_FatTable->antsel_rx_keep_1 << 1) | pDM_FatTable->antsel_rx_keep_0;
+ ODM_AntselStatistics_88E(dm_odm, antsel_tr_mux, pPktinfo->StationID, pPhyInfo->RxPWDBAll);
+ }
}
+
/* Smart Antenna Debug Message------------------ */
UndecoratedSmoothedCCK = pEntry->rssi_stat.UndecoratedSmoothedCCK;
@@ -498,47 +362,24 @@ static void odm_Process_RSSIForDM(struct odm_dm_struct *dm_odm,
}
/* Endianness before calling this API */
-static void ODM_PhyStatusQuery_92CSeries(struct odm_dm_struct *dm_odm,
- struct odm_phy_status_info *pPhyInfo,
- u8 *pPhyStatus,
- struct odm_per_pkt_info *pPktinfo,
- struct adapter *adapt)
+void ODM_PhyStatusQuery(struct odm_dm_struct *dm_odm,
+ struct phy_info *pPhyInfo,
+ u8 *pPhyStatus,
+ struct odm_per_pkt_info *pPktinfo,
+ struct adapter *adapt)
{
odm_RxPhyStatus92CSeries_Parsing(dm_odm, pPhyInfo, pPhyStatus,
pPktinfo, adapt);
- if (dm_odm->RSSI_test) {
- /* Select the packets to do RSSI checking for antenna switching. */
- if (pPktinfo->bPacketToSelf || pPktinfo->bPacketBeacon)
- ODM_SwAntDivChkPerPktRssi(dm_odm, pPktinfo->StationID, pPhyInfo);
- } else {
+ if (!dm_odm->RSSI_test)
odm_Process_RSSIForDM(dm_odm, pPhyInfo, pPktinfo);
- }
-}
-
-void ODM_PhyStatusQuery(struct odm_dm_struct *dm_odm,
- struct odm_phy_status_info *pPhyInfo,
- u8 *pPhyStatus, struct odm_per_pkt_info *pPktinfo,
- struct adapter *adapt)
-{
- ODM_PhyStatusQuery_92CSeries(dm_odm, pPhyInfo, pPhyStatus, pPktinfo, adapt);
-}
-
-/* For future use. */
-void ODM_MacStatusQuery(struct odm_dm_struct *dm_odm, u8 *mac_stat,
- u8 macid, bool pkt_match_bssid,
- bool pkttoself, bool pkt_beacon)
-{
- /* 2011/10/19 Driver team will handle in the future. */
}
enum HAL_STATUS ODM_ConfigRFWithHeaderFile(struct odm_dm_struct *dm_odm,
enum rf_radio_path content,
enum rf_radio_path rfpath)
{
- if (dm_odm->SupportICType == ODM_RTL8188E) {
- if (rfpath == RF_PATH_A)
- READ_AND_CONFIG(8188E, _RadioA_1T_);
- }
+ if (rfpath == RF_PATH_A)
+ READ_AND_CONFIG(8188E, _RadioA_1T_);
return HAL_STATUS_SUCCESS;
}
@@ -546,22 +387,20 @@ enum HAL_STATUS ODM_ConfigRFWithHeaderFile(struct odm_dm_struct *dm_odm,
enum HAL_STATUS ODM_ConfigBBWithHeaderFile(struct odm_dm_struct *dm_odm,
enum odm_bb_config_type config_tp)
{
- if (dm_odm->SupportICType == ODM_RTL8188E) {
- if (config_tp == CONFIG_BB_PHY_REG) {
- READ_AND_CONFIG(8188E, _PHY_REG_1T_);
- } else if (config_tp == CONFIG_BB_AGC_TAB) {
- READ_AND_CONFIG(8188E, _AGC_TAB_1T_);
- } else if (config_tp == CONFIG_BB_PHY_REG_PG) {
- READ_AND_CONFIG(8188E, _PHY_REG_PG_);
- }
+ if (config_tp == CONFIG_BB_PHY_REG) {
+ READ_AND_CONFIG(8188E, _PHY_REG_1T_);
+ } else if (config_tp == CONFIG_BB_AGC_TAB) {
+ READ_AND_CONFIG(8188E, _AGC_TAB_1T_);
+ } else if (config_tp == CONFIG_BB_PHY_REG_PG) {
+ READ_AND_CONFIG(8188E, _PHY_REG_PG_);
}
+
return HAL_STATUS_SUCCESS;
}
enum HAL_STATUS ODM_ConfigMACWithHeaderFile(struct odm_dm_struct *dm_odm)
{
u8 result = HAL_STATUS_SUCCESS;
- if (dm_odm->SupportICType == ODM_RTL8188E)
- result = READ_AND_CONFIG(8188E, _MAC_REG_);
+ result = READ_AND_CONFIG(8188E, _MAC_REG_);
return result;
}
diff --git a/drivers/staging/r8188eu/hal/odm_RTL8188E.c b/drivers/staging/r8188eu/hal/odm_RTL8188E.c
index c64a291f9966..e7a765f375d6 100644
--- a/drivers/staging/r8188eu/hal/odm_RTL8188E.c
+++ b/drivers/staging/r8188eu/hal/odm_RTL8188E.c
@@ -3,26 +3,10 @@
#include "../include/odm_precomp.h"
-void ODM_DIG_LowerBound_88E(struct odm_dm_struct *dm_odm)
-{
- struct rtw_dig *pDM_DigTable = &dm_odm->DM_DigTable;
-
- if (dm_odm->AntDivType == CG_TRX_HW_ANTDIV)
- pDM_DigTable->rx_gain_range_min = (u8)pDM_DigTable->AntDiv_RSSI_max;
- /* If only one Entry connected */
-}
-
static void odm_RX_HWAntDivInit(struct odm_dm_struct *dm_odm)
{
u32 value32;
- if (*dm_odm->mp_mode == 1) {
- dm_odm->AntDivType = CGCS_RX_SW_ANTDIV;
- ODM_SetBBReg(dm_odm, ODM_REG_IGI_A_11N, BIT(7), 0); /* disable HW AntDiv */
- ODM_SetBBReg(dm_odm, ODM_REG_LNA_SWITCH_11N, BIT(31), 1); /* 1:CG, 0:CS */
- return;
- }
-
/* MAC Setting */
value32 = ODM_GetMACReg(dm_odm, ODM_REG_ANTSEL_PIN_11N, bMaskDWord);
ODM_SetMACReg(dm_odm, ODM_REG_ANTSEL_PIN_11N, bMaskDWord, value32 | (BIT(23) | BIT(25))); /* Reg4C[25]=1, Reg4C[23]=1 for pin output */
@@ -44,13 +28,6 @@ static void odm_TRX_HWAntDivInit(struct odm_dm_struct *dm_odm)
{
u32 value32;
- if (*dm_odm->mp_mode == 1) {
- dm_odm->AntDivType = CGCS_RX_SW_ANTDIV;
- ODM_SetBBReg(dm_odm, ODM_REG_IGI_A_11N, BIT(7), 0); /* disable HW AntDiv */
- ODM_SetBBReg(dm_odm, ODM_REG_RX_ANT_CTRL_11N, BIT(5) | BIT(4) | BIT(3), 0); /* Default RX (0/1) */
- return;
- }
-
/* MAC Setting */
value32 = ODM_GetMACReg(dm_odm, ODM_REG_ANTSEL_PIN_11N, bMaskDWord);
ODM_SetMACReg(dm_odm, ODM_REG_ANTSEL_PIN_11N, bMaskDWord, value32 | (BIT(23) | BIT(25))); /* Reg4C[25]=1, Reg4C[23]=1 for pin output */
@@ -83,9 +60,6 @@ static void odm_FastAntTrainingInit(struct odm_dm_struct *dm_odm)
struct fast_ant_train *dm_fat_tbl = &dm_odm->DM_FatTable;
u32 AntCombination = 2;
- if (*dm_odm->mp_mode == 1)
- return;
-
for (i = 0; i < 6; i++) {
dm_fat_tbl->Bssid[i] = 0;
dm_fat_tbl->antSumRSSI[i] = 0;
@@ -155,9 +129,6 @@ static void odm_FastAntTrainingInit(struct odm_dm_struct *dm_odm)
void ODM_AntennaDiversityInit_88E(struct odm_dm_struct *dm_odm)
{
- if (dm_odm->SupportICType != ODM_RTL8188E)
- return;
-
if (dm_odm->AntDivType == CGCS_RX_HW_ANTDIV)
odm_RX_HWAntDivInit(dm_odm);
else if (dm_odm->AntDivType == CG_TRX_HW_ANTDIV)
@@ -296,7 +267,7 @@ static void odm_HWAntDiv(struct odm_dm_struct *dm_odm)
void ODM_AntennaDiversity_88E(struct odm_dm_struct *dm_odm)
{
struct fast_ant_train *dm_fat_tbl = &dm_odm->DM_FatTable;
- if ((dm_odm->SupportICType != ODM_RTL8188E) || (!(dm_odm->SupportAbility & ODM_BB_ANT_DIV)))
+ if (!(dm_odm->SupportAbility & ODM_BB_ANT_DIV))
return;
if (!dm_odm->bLinked) {
if (dm_fat_tbl->bBecomeLinked) {
diff --git a/drivers/staging/r8188eu/hal/odm_RegConfig8188E.c b/drivers/staging/r8188eu/hal/odm_RegConfig8188E.c
index 1bc3b49cd67f..5f6f0ae5196e 100644
--- a/drivers/staging/r8188eu/hal/odm_RegConfig8188E.c
+++ b/drivers/staging/r8188eu/hal/odm_RegConfig8188E.c
@@ -34,14 +34,6 @@ void odm_ConfigRF_RadioA_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr, u32 Data
odm_ConfigRFReg_8188E(pDM_Odm, Addr, Data, RF_PATH_A, Addr | maskforPhySet);
}
-void odm_ConfigRF_RadioB_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr, u32 Data)
-{
- u32 content = 0x1001; /* RF_Content: radiob_txt */
- u32 maskforPhySet = (u32)(content & 0xE000);
-
- odm_ConfigRFReg_8188E(pDM_Odm, Addr, Data, RF_PATH_B, Addr | maskforPhySet);
-}
-
void odm_ConfigMAC_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr, u8 Data)
{
ODM_Write1Byte(pDM_Odm, Addr, Data);
diff --git a/drivers/staging/r8188eu/hal/odm_interface.c b/drivers/staging/r8188eu/hal/odm_interface.c
index 5a01495d74bc..7ddba39a0f4b 100644
--- a/drivers/staging/r8188eu/hal/odm_interface.c
+++ b/drivers/staging/r8188eu/hal/odm_interface.c
@@ -10,12 +10,6 @@ u8 ODM_Read1Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr)
return rtw_read8(Adapter, RegAddr);
}
-u16 ODM_Read2Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr)
-{
- struct adapter *Adapter = pDM_Odm->Adapter;
- return rtw_read16(Adapter, RegAddr);
-}
-
u32 ODM_Read4Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr)
{
struct adapter *Adapter = pDM_Odm->Adapter;
@@ -77,64 +71,12 @@ u32 ODM_GetRFReg(struct odm_dm_struct *pDM_Odm, enum rf_radio_path eRFPath, u32
}
/* ODM Memory relative API. */
-void ODM_AllocateMemory(struct odm_dm_struct *pDM_Odm, void **pPtr, u32 length)
-{
- *pPtr = vzalloc(length);
-}
-
-/* length could be ignored, used to detect memory leakage. */
-void ODM_FreeMemory(struct odm_dm_struct *pDM_Odm, void *pPtr, u32 length)
-{
- vfree(pPtr);
-}
-
s32 ODM_CompareMemory(struct odm_dm_struct *pDM_Odm, void *pBuf1, void *pBuf2, u32 length)
{
return !memcmp(pBuf1, pBuf2, length);
}
-/* ODM MISC relative API. */
-void ODM_AcquireSpinLock(struct odm_dm_struct *pDM_Odm, enum RT_SPINLOCK_TYPE type)
-{
-}
-
-void ODM_ReleaseSpinLock(struct odm_dm_struct *pDM_Odm, enum RT_SPINLOCK_TYPE type)
-{
-}
-
-/* Work item relative API. FOr MP driver only~! */
-void ODM_InitializeWorkItem(struct odm_dm_struct *pDM_Odm, void *pRtWorkItem,
- RT_WORKITEM_CALL_BACK RtWorkItemCallback,
- void *pContext, const char *szID)
-{
-}
-
-void ODM_StartWorkItem(void *pRtWorkItem)
-{
-}
-
-void ODM_StopWorkItem(void *pRtWorkItem)
-{
-}
-
-void ODM_FreeWorkItem(void *pRtWorkItem)
-{
-}
-
-void ODM_ScheduleWorkItem(void *pRtWorkItem)
-{
-}
-
-void ODM_IsWorkItemScheduled(void *pRtWorkItem)
-{
-}
-
/* ODM Timer relative API. */
-void ODM_StallExecution(u32 usDelay)
-{
- udelay(usDelay);
-}
-
void ODM_delay_ms(u32 ms)
{
mdelay(ms);
@@ -149,30 +91,3 @@ void ODM_sleep_ms(u32 ms)
{
msleep(ms);
}
-
-void ODM_sleep_us(u32 us)
-{
- rtw_usleep_os(us);
-}
-
-void ODM_SetTimer(struct odm_dm_struct *pDM_Odm, struct timer_list *pTimer, u32 msDelay)
-{
- _set_timer(pTimer, msDelay); /* ms */
-}
-
-void ODM_CancelTimer(struct odm_dm_struct *pDM_Odm, struct timer_list *pTimer)
-{
- _cancel_timer_ex(pTimer);
-}
-
-void ODM_ReleaseTimer(struct odm_dm_struct *pDM_Odm, struct timer_list *pTimer)
-{
-}
-
-/* ODM FW relative API. */
-u32 ODM_FillH2CCmd(u8 *pH2CBuffer, u32 H2CBufferLen, u32 CmdNum,
- u32 *pElementID, u32 *pCmdLen,
- u8 **pCmbBuffer, u8 *CmdStartSeq)
-{
- return true;
-}
diff --git a/drivers/staging/r8188eu/hal/rtl8188e_cmd.c b/drivers/staging/r8188eu/hal/rtl8188e_cmd.c
index 7d50d64cf34d..e44bcde92cc3 100644
--- a/drivers/staging/r8188eu/hal/rtl8188e_cmd.c
+++ b/drivers/staging/r8188eu/hal/rtl8188e_cmd.c
@@ -53,19 +53,14 @@ static s32 FillH2CCmd_88E(struct adapter *adapt, u8 ElementID, u32 CmdLen, u8 *p
u8 cmd_idx, ext_cmd_len;
u32 h2c_cmd = 0;
u32 h2c_cmd_ex = 0;
- s32 ret = _FAIL;
if (!adapt->bFWReady) {
DBG_88E("FillH2CCmd_88E(): return H2C cmd because fw is not ready\n");
- return ret;
+ return _FAIL;
}
- if (!pCmdBuffer)
- goto exit;
- if (CmdLen > RTL88E_MAX_CMD_LEN)
- goto exit;
- if (adapt->bSurpriseRemoved)
- goto exit;
+ if (!pCmdBuffer || CmdLen > RTL88E_MAX_CMD_LEN || adapt->bSurpriseRemoved)
+ return _FAIL;
/* pay attention to if race condition happened in H2C cmd setting. */
do {
@@ -73,7 +68,7 @@ static s32 FillH2CCmd_88E(struct adapter *adapt, u8 ElementID, u32 CmdLen, u8 *p
if (!_is_fw_read_cmd_down(adapt, h2c_box_num)) {
DBG_88E(" fw read cmd failed...\n");
- goto exit;
+ return _FAIL;
}
*(u8 *)(&h2c_cmd) = ElementID;
@@ -102,26 +97,7 @@ static s32 FillH2CCmd_88E(struct adapter *adapt, u8 ElementID, u32 CmdLen, u8 *p
} while ((!bcmd_down) && (retry_cnts--));
- ret = _SUCCESS;
-
-exit:
-
- return ret;
-}
-
-u8 rtl8188e_set_rssi_cmd(struct adapter *adapt, u8 *param)
-{
- u8 res = _SUCCESS;
- struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
-
- if (haldata->fw_ractrl) {
- ;
- } else {
- DBG_88E("==>%s fw dont support RA\n", __func__);
- res = _FAIL;
- }
-
- return res;
+ return _SUCCESS;
}
u8 rtl8188e_set_raid_cmd(struct adapter *adapt, u32 mask)
@@ -241,14 +217,13 @@ static void ConstructBeacon(struct adapter *adapt, u8 *pframe, u32 *pLength)
struct mlme_ext_priv *pmlmeext = &adapt->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
struct wlan_bssid_ex *cur_network = &pmlmeinfo->network;
- u8 bc_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
pwlanhdr = (struct rtw_ieee80211_hdr *)pframe;
fctrl = &pwlanhdr->frame_ctl;
*(fctrl) = 0;
- memcpy(pwlanhdr->addr1, bc_addr, ETH_ALEN);
+ eth_broadcast_addr(pwlanhdr->addr1);
memcpy(pwlanhdr->addr2, myid(&adapt->eeprompriv), ETH_ALEN);
memcpy(pwlanhdr->addr3, get_my_bssid(cur_network), ETH_ALEN);
@@ -561,7 +536,7 @@ static void SetFwRsvdPagePkt(struct adapter *adapt, bool bDLFinished)
pattrib->pktlen = pattrib->last_txcmdsz;
memcpy(pmgntframe->buf_addr, ReservedPagePacket, TotalPacketLen);
- rtw_hal_mgnt_xmit(adapt, pmgntframe);
+ rtl8188eu_mgnt_xmit(adapt, pmgntframe);
DBG_88E("%s: Set RSVD page location to Fw\n", __func__);
FillH2CCmd_88E(adapt, H2C_COM_RSVD_PAGE, sizeof(RsvdPageLoc), (u8 *)&RsvdPageLoc);
@@ -608,7 +583,7 @@ void rtl8188e_set_FwJoinBssReport_cmd(struct adapter *adapt, u8 mstatus)
haldata->RegFwHwTxQCtrl &= (~BIT(6));
/* Clear beacon valid check bit. */
- rtw_hal_set_hwreg(adapt, HW_VAR_BCN_VALID, NULL);
+ SetHwReg8188EU(adapt, HW_VAR_BCN_VALID, NULL);
DLBcnCount = 0;
poll = 0;
do {
@@ -619,7 +594,7 @@ void rtl8188e_set_FwJoinBssReport_cmd(struct adapter *adapt, u8 mstatus)
yield();
/* mdelay(10); */
/* check rsvd page download OK. */
- rtw_hal_get_hwreg(adapt, HW_VAR_BCN_VALID, (u8 *)(&bcn_valid));
+ GetHwReg8188EU(adapt, HW_VAR_BCN_VALID, (u8 *)(&bcn_valid));
poll++;
} while (!bcn_valid && (poll % 10) != 0 && !adapt->bSurpriseRemoved && !adapt->bDriverStopped);
} while (!bcn_valid && DLBcnCount <= 100 && !adapt->bSurpriseRemoved && !adapt->bDriverStopped);
@@ -653,7 +628,7 @@ void rtl8188e_set_FwJoinBssReport_cmd(struct adapter *adapt, u8 mstatus)
/* Update RSVD page location H2C to Fw. */
if (bcn_valid) {
- rtw_hal_set_hwreg(adapt, HW_VAR_BCN_VALID, NULL);
+ SetHwReg8188EU(adapt, HW_VAR_BCN_VALID, NULL);
DBG_88E("Set RSVD page location to Fw.\n");
}
@@ -667,7 +642,6 @@ void rtl8188e_set_FwJoinBssReport_cmd(struct adapter *adapt, u8 mstatus)
void rtl8188e_set_p2p_ps_offload_cmd(struct adapter *adapt, u8 p2p_ps_state)
{
-#ifdef CONFIG_88EU_P2P
struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
struct wifidirect_info *pwdinfo = &adapt->wdinfo;
struct P2P_PS_Offload_t *p2p_ps_offload = &haldata->p2p_ps_offload;
@@ -732,6 +706,4 @@ void rtl8188e_set_p2p_ps_offload_cmd(struct adapter *adapt, u8 p2p_ps_state)
}
FillH2CCmd_88E(adapt, H2C_PS_P2P_OFFLOAD, 1, (u8 *)p2p_ps_offload);
-#endif
-
}
diff --git a/drivers/staging/r8188eu/hal/rtl8188e_dm.c b/drivers/staging/r8188eu/hal/rtl8188e_dm.c
index 78552303c990..5d76f6ea91c4 100644
--- a/drivers/staging/r8188eu/hal/rtl8188e_dm.c
+++ b/drivers/staging/r8188eu/hal/rtl8188e_dm.c
@@ -8,10 +8,6 @@
#include "../include/drv_types.h"
#include "../include/rtl8188e_hal.h"
-static void dm_CheckStatistics(struct adapter *Adapter)
-{
-}
-
/* Initialize GPIO setting registers */
static void dm_InitGPIOSetting(struct adapter *Adapter)
{
@@ -31,40 +27,14 @@ static void Init_ODM_ComInfo_88E(struct adapter *Adapter)
struct hal_data_8188e *hal_data = GET_HAL_DATA(Adapter);
struct dm_priv *pdmpriv = &hal_data->dmpriv;
struct odm_dm_struct *dm_odm = &hal_data->odmpriv;
- u8 cut_ver, fab_ver;
/* Init Value */
memset(dm_odm, 0, sizeof(*dm_odm));
dm_odm->Adapter = Adapter;
- ODM_CmnInfoInit(dm_odm, ODM_CMNINFO_PLATFORM, ODM_CE);
-
- if (Adapter->interface_type == RTW_GSPI)
- ODM_CmnInfoInit(dm_odm, ODM_CMNINFO_INTERFACE, ODM_ITRF_SDIO);
- else
- ODM_CmnInfoInit(dm_odm, ODM_CMNINFO_INTERFACE, Adapter->interface_type);/* RTL871X_HCI_TYPE */
-
- ODM_CmnInfoInit(dm_odm, ODM_CMNINFO_IC_TYPE, ODM_RTL8188E);
-
- fab_ver = ODM_TSMC;
- cut_ver = ODM_CUT_A;
-
- ODM_CmnInfoInit(dm_odm, ODM_CMNINFO_FAB_VER, fab_ver);
- ODM_CmnInfoInit(dm_odm, ODM_CMNINFO_CUT_VER, cut_ver);
-
ODM_CmnInfoInit(dm_odm, ODM_CMNINFO_MP_TEST_CHIP, IS_NORMAL_CHIP(hal_data->VersionID));
- ODM_CmnInfoInit(dm_odm, ODM_CMNINFO_PATCH_ID, hal_data->CustomerID);
- ODM_CmnInfoInit(dm_odm, ODM_CMNINFO_BWIFI_TEST, Adapter->registrypriv.wifi_spec);
-
- if (hal_data->rf_type == RF_1T1R)
- ODM_CmnInfoUpdate(dm_odm, ODM_CMNINFO_RF_TYPE, ODM_1T1R);
- else if (hal_data->rf_type == RF_2T2R)
- ODM_CmnInfoUpdate(dm_odm, ODM_CMNINFO_RF_TYPE, ODM_2T2R);
- else if (hal_data->rf_type == RF_1T2R)
- ODM_CmnInfoUpdate(dm_odm, ODM_CMNINFO_RF_TYPE, ODM_1T2R);
-
ODM_CmnInfoInit(dm_odm, ODM_CMNINFO_RF_ANTENNA_TYPE, hal_data->TRxAntDivType);
pdmpriv->InitODMFlag = ODM_RF_CALIBRATION |
@@ -96,11 +66,6 @@ static void Update_ODM_ComInfo_88E(struct adapter *Adapter)
if (hal_data->AntDivCfg)
pdmpriv->InitODMFlag |= ODM_BB_ANT_DIV;
- if (Adapter->registrypriv.mp_mode == 1) {
- pdmpriv->InitODMFlag = ODM_RF_CALIBRATION |
- ODM_RF_TX_PWR_TRACK;
- }
-
ODM_CmnInfoUpdate(dm_odm, ODM_CMNINFO_ABILITY, pdmpriv->InitODMFlag);
ODM_CmnInfoHook(dm_odm, ODM_CMNINFO_TX_UNI, &Adapter->xmitpriv.tx_bytes);
@@ -111,24 +76,20 @@ static void Update_ODM_ComInfo_88E(struct adapter *Adapter)
ODM_CmnInfoHook(dm_odm, ODM_CMNINFO_BW, &hal_data->CurrentChannelBW);
ODM_CmnInfoHook(dm_odm, ODM_CMNINFO_CHNL, &hal_data->CurrentChannel);
ODM_CmnInfoHook(dm_odm, ODM_CMNINFO_NET_CLOSED, &Adapter->net_closed);
- ODM_CmnInfoHook(dm_odm, ODM_CMNINFO_MP_MODE, &Adapter->registrypriv.mp_mode);
ODM_CmnInfoHook(dm_odm, ODM_CMNINFO_SCAN, &pmlmepriv->bScanInProcess);
ODM_CmnInfoHook(dm_odm, ODM_CMNINFO_POWER_SAVING, &pwrctrlpriv->bpower_saving);
ODM_CmnInfoInit(dm_odm, ODM_CMNINFO_RF_ANTENNA_TYPE, hal_data->TRxAntDivType);
for (i = 0; i < NUM_STA; i++)
- ODM_CmnInfoPtrArrayHook(dm_odm, ODM_CMNINFO_STA_STATUS, i, NULL);
+ dm_odm->pODM_StaInfo[i] = NULL;
}
void rtl8188e_InitHalDm(struct adapter *Adapter)
{
struct hal_data_8188e *hal_data = GET_HAL_DATA(Adapter);
- struct dm_priv *pdmpriv = &hal_data->dmpriv;
struct odm_dm_struct *dm_odm = &hal_data->odmpriv;
dm_InitGPIOSetting(Adapter);
- pdmpriv->DM_Type = DM_Type_ByDriver;
- pdmpriv->DMFlag = DYNAMIC_FUNC_DISABLE;
Update_ODM_ComInfo_88E(Adapter);
ODM_DMInit(dm_odm);
Adapter->fix_rate = 0xFF;
@@ -136,49 +97,25 @@ void rtl8188e_InitHalDm(struct adapter *Adapter)
void rtl8188e_HalDmWatchDog(struct adapter *Adapter)
{
- bool fw_cur_in_ps = false;
- bool fw_ps_awake = true;
- u8 hw_init_completed = false;
+ u8 hw_init_completed = Adapter->hw_init_completed;
struct hal_data_8188e *hal_data = GET_HAL_DATA(Adapter);
-
-
- hw_init_completed = Adapter->hw_init_completed;
+ struct mlme_priv *pmlmepriv = &Adapter->mlmepriv;
+ u8 bLinked = false;
if (!hw_init_completed)
return;
- fw_cur_in_ps = Adapter->pwrctrlpriv.bFwCurrentInPSMode;
- rtw_hal_get_hwreg(Adapter, HW_VAR_FWLPS_RF_ON, (u8 *)(&fw_ps_awake));
-
- /* Fw is under p2p powersaving mode, driver should stop dynamic mechanism. */
- /* modifed by thomas. 2011.06.11. */
- if (Adapter->wdinfo.p2p_ps_mode)
- fw_ps_awake = false;
-
- if (hw_init_completed && ((!fw_cur_in_ps) && fw_ps_awake)) {
- /* Calculate Tx/Rx statistics. */
- dm_CheckStatistics(Adapter);
-
-
+ if ((check_fwstate(pmlmepriv, WIFI_AP_STATE)) ||
+ (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE | WIFI_ADHOC_MASTER_STATE))) {
+ if (Adapter->stapriv.asoc_sta_count > 2)
+ bLinked = true;
+ } else {/* Station mode */
+ if (check_fwstate(pmlmepriv, _FW_LINKED))
+ bLinked = true;
}
- /* ODM */
- if (hw_init_completed) {
- struct mlme_priv *pmlmepriv = &Adapter->mlmepriv;
- u8 bLinked = false;
-
- if ((check_fwstate(pmlmepriv, WIFI_AP_STATE)) ||
- (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE | WIFI_ADHOC_MASTER_STATE))) {
- if (Adapter->stapriv.asoc_sta_count > 2)
- bLinked = true;
- } else {/* Station mode */
- if (check_fwstate(pmlmepriv, _FW_LINKED))
- bLinked = true;
- }
-
- ODM_CmnInfoUpdate(&hal_data->odmpriv, ODM_CMNINFO_LINK, bLinked);
- ODM_DMWatchdog(&hal_data->odmpriv);
- }
+ ODM_CmnInfoUpdate(&hal_data->odmpriv, ODM_CMNINFO_LINK, bLinked);
+ ODM_DMWatchdog(&hal_data->odmpriv);
}
void rtl8188e_init_dm_priv(struct adapter *Adapter)
@@ -190,10 +127,6 @@ void rtl8188e_init_dm_priv(struct adapter *Adapter)
Init_ODM_ComInfo_88E(Adapter);
}
-void rtl8188e_deinit_dm_priv(struct adapter *Adapter)
-{
-}
-
/* Add new function to reset the state of antenna diversity before link. */
/* Compare RSSI for deciding antenna */
void AntDivCompare8188E(struct adapter *Adapter, struct wlan_bssid_ex *dst, struct wlan_bssid_ex *src)
diff --git a/drivers/staging/r8188eu/hal/rtl8188e_hal_init.c b/drivers/staging/r8188eu/hal/rtl8188e_hal_init.c
index 14758361960c..8c00f2dd67da 100644
--- a/drivers/staging/r8188eu/hal/rtl8188e_hal_init.c
+++ b/drivers/staging/r8188eu/hal/rtl8188e_hal_init.c
@@ -306,7 +306,7 @@ static s32 iol_ioconfig(struct adapter *padapter, u8 iocfg_bndy)
return rst;
}
-static int rtl8188e_IOL_exec_cmds_sync(struct adapter *adapter, struct xmit_frame *xmit_frame, u32 max_wating_ms, u32 bndy_cnt)
+int rtl8188e_IOL_exec_cmds_sync(struct adapter *adapter, struct xmit_frame *xmit_frame, u32 max_wating_ms, u32 bndy_cnt)
{
struct pkt_attrib *pattrib = &xmit_frame->attrib;
u8 i;
@@ -336,38 +336,6 @@ exit:
return ret;
}
-void rtw_IOL_cmd_tx_pkt_buf_dump(struct adapter *Adapter, int data_len)
-{
- u32 fifo_data, reg_140;
- u32 addr, rstatus, loop = 0;
- u16 data_cnts = (data_len / 8) + 1;
- u8 *pbuf = vzalloc(data_len + 10);
- DBG_88E("###### %s ######\n", __func__);
-
- rtw_write8(Adapter, REG_PKT_BUFF_ACCESS_CTRL, TXPKT_BUF_SELECT);
- if (pbuf) {
- for (addr = 0; addr < data_cnts; addr++) {
- rtw_write32(Adapter, 0x140, addr);
- rtw_usleep_os(2);
- loop = 0;
- do {
- rstatus = (reg_140 = rtw_read32(Adapter, REG_PKTBUF_DBG_CTRL) & BIT(24));
- if (rstatus) {
- fifo_data = rtw_read32(Adapter, REG_PKTBUF_DBG_DATA_L);
- memcpy(pbuf + (addr * 8), &fifo_data, 4);
-
- fifo_data = rtw_read32(Adapter, REG_PKTBUF_DBG_DATA_H);
- memcpy(pbuf + (addr * 8 + 4), &fifo_data, 4);
- }
- rtw_usleep_os(2);
- } while (!rstatus && (loop++ < 10));
- }
- rtw_IOL_cmd_buf_dump(Adapter, data_len, pbuf);
- vfree(pbuf);
- }
- DBG_88E("###### %s ######\n", __func__);
-}
-
static void _FWDownloadEnable(struct adapter *padapter, bool enable)
{
u8 tmp;
@@ -669,12 +637,10 @@ void rtl8188e_InitializeFirmwareVars(struct adapter *padapter)
pHalData->LastHMEBoxNum = 0;
}
-static void rtl8188e_free_hal_data(struct adapter *padapter)
+void rtl8188e_free_hal_data(struct adapter *padapter)
{
-
kfree(padapter->HalData);
padapter->HalData = NULL;
-
}
/* */
@@ -707,11 +673,7 @@ hal_EfusePgPacketWriteData(
struct pgpkt *pTargetPkt,
bool bPseudoTest);
-static void
-hal_EfusePowerSwitch_RTL8188E(
- struct adapter *pAdapter,
- u8 bWrite,
- u8 PwrState)
+void rtl8188e_EfusePowerSwitch(struct adapter *pAdapter, u8 bWrite, u8 PwrState)
{
u8 tempval;
u16 tmpV16;
@@ -757,15 +719,6 @@ hal_EfusePowerSwitch_RTL8188E(
}
}
-static void
-rtl8188e_EfusePowerSwitch(
- struct adapter *pAdapter,
- u8 bWrite,
- u8 PwrState)
-{
- hal_EfusePowerSwitch_RTL8188E(pAdapter, bWrite, PwrState);
-}
-
static void Hal_EfuseReadEFuse88E(struct adapter *Adapter,
u16 _offset,
u16 _size_byte,
@@ -892,7 +845,7 @@ static void Hal_EfuseReadEFuse88E(struct adapter *Adapter,
pbuf[i] = efuseTbl[_offset + i];
/* 5. Calculate Efuse utilization. */
- rtw_hal_set_hwreg(Adapter, HW_VAR_EFUSE_BYTES, (u8 *)&eFuse_Addr);
+ SetHwReg8188EU(Adapter, HW_VAR_EFUSE_BYTES, (u8 *)&eFuse_Addr);
exit:
kfree(efuseTbl);
@@ -904,7 +857,7 @@ static void ReadEFuseByIC(struct adapter *Adapter, u8 efuseType, u16 _offset, u1
if (!bPseudoTest) {
int ret = _FAIL;
if (rtw_IOL_applied(Adapter)) {
- rtw_hal_power_on(Adapter);
+ rtl8188eu_InitPowerOn(Adapter);
iol_mode_enable(Adapter, 1);
ret = iol_read_efuse(Adapter, 0, _offset, _size_byte, pbuf);
@@ -925,9 +878,9 @@ static void ReadEFuse_Pseudo(struct adapter *Adapter, u8 efuseType, u16 _offset,
Hal_EfuseReadEFuse88E(Adapter, _offset, _size_byte, pbuf, bPseudoTest);
}
-static void rtl8188e_ReadEFuse(struct adapter *Adapter, u8 efuseType,
- u16 _offset, u16 _size_byte, u8 *pbuf,
- bool bPseudoTest)
+void rtl8188e_ReadEFuse(struct adapter *Adapter, u8 efuseType,
+ u16 _offset, u16 _size_byte, u8 *pbuf,
+ bool bPseudoTest)
{
if (bPseudoTest)
ReadEFuse_Pseudo(Adapter, efuseType, _offset, _size_byte, pbuf, bPseudoTest);
@@ -1060,7 +1013,7 @@ static void Hal_EFUSEGetEfuseDefinition_Pseudo88E(struct adapter *pAdapter, u8 e
}
}
-static void rtl8188e_EFUSE_GetEfuseDefinition(struct adapter *pAdapter, u8 efuseType, u8 type, void *pOut, bool bPseudoTest)
+void rtl8188e_EFUSE_GetEfuseDefinition(struct adapter *pAdapter, u8 efuseType, u8 type, void *pOut, bool bPseudoTest)
{
if (bPseudoTest)
Hal_EFUSEGetEfuseDefinition_Pseudo88E(pAdapter, efuseType, type, pOut);
@@ -1143,31 +1096,28 @@ static u16 hal_EfuseGetCurrentSize_8188e(struct adapter *pAdapter, bool bPseudoT
{
int bContinual = true;
u16 efuse_addr = 0;
- u8 hoffset = 0, hworden = 0;
+ u8 hworden = 0;
u8 efuse_data, word_cnts = 0;
if (bPseudoTest)
efuse_addr = (u16)(fakeEfuseUsedBytes);
else
- rtw_hal_get_hwreg(pAdapter, HW_VAR_EFUSE_BYTES, (u8 *)&efuse_addr);
+ GetHwReg8188EU(pAdapter, HW_VAR_EFUSE_BYTES, (u8 *)&efuse_addr);
while (bContinual &&
efuse_OneByteRead(pAdapter, efuse_addr, &efuse_data, bPseudoTest) &&
AVAILABLE_EFUSE_ADDR(efuse_addr)) {
if (efuse_data != 0xFF) {
if ((efuse_data & 0x1F) == 0x0F) { /* extended header */
- hoffset = efuse_data;
efuse_addr++;
efuse_OneByteRead(pAdapter, efuse_addr, &efuse_data, bPseudoTest);
if ((efuse_data & 0x0F) == 0x0F) {
efuse_addr++;
continue;
} else {
- hoffset = ((hoffset & 0xE0) >> 5) | ((efuse_data & 0xF0) >> 1);
hworden = efuse_data & 0x0F;
}
} else {
- hoffset = (efuse_data >> 4) & 0x0F;
hworden = efuse_data & 0x0F;
}
word_cnts = Efuse_CalculateWordCnts(hworden);
@@ -1181,7 +1131,7 @@ static u16 hal_EfuseGetCurrentSize_8188e(struct adapter *pAdapter, bool bPseudoT
if (bPseudoTest)
fakeEfuseUsedBytes = efuse_addr;
else
- rtw_hal_set_hwreg(pAdapter, HW_VAR_EFUSE_BYTES, (u8 *)&efuse_addr);
+ SetHwReg8188EU(pAdapter, HW_VAR_EFUSE_BYTES, (u8 *)&efuse_addr);
return efuse_addr;
}
@@ -1194,7 +1144,7 @@ static u16 Hal_EfuseGetCurrentSize_Pseudo(struct adapter *pAdapter, bool bPseudo
return ret;
}
-static u16 rtl8188e_EfuseGetCurrentSize(struct adapter *pAdapter, u8 efuseType, bool bPseudoTest)
+u16 rtl8188e_EfuseGetCurrentSize(struct adapter *pAdapter, u8 efuseType, bool bPseudoTest)
{
u16 ret = 0;
@@ -1218,7 +1168,7 @@ static int hal_EfusePgPacketRead_8188e(struct adapter *pAdapter, u8 offset, u8 *
u8 max_section = 0;
u8 tmp_header = 0;
- EFUSE_GetEfuseDefinition(pAdapter, EFUSE_WIFI, TYPE_EFUSE_MAX_SECTION, (void *)&max_section, bPseudoTest);
+ rtl8188e_EFUSE_GetEfuseDefinition(pAdapter, EFUSE_WIFI, TYPE_EFUSE_MAX_SECTION, (void *)&max_section, bPseudoTest);
if (!data)
return false;
@@ -1307,7 +1257,7 @@ static int Hal_EfusePgPacketRead_Pseudo(struct adapter *pAdapter, u8 offset, u8
return ret;
}
-static int rtl8188e_Efuse_PgPacketRead(struct adapter *pAdapter, u8 offset, u8 *data, bool bPseudoTest)
+int rtl8188e_Efuse_PgPacketRead(struct adapter *pAdapter, u8 offset, u8 *data, bool bPseudoTest)
{
int ret;
@@ -1326,17 +1276,17 @@ static bool hal_EfuseFixHeaderProcess(struct adapter *pAdapter, u8 efuseType, st
memset((void *)originaldata, 0xff, 8);
- if (Efuse_PgPacketRead(pAdapter, pFixPkt->offset, originaldata, bPseudoTest)) {
+ if (rtl8188e_Efuse_PgPacketRead(pAdapter, pFixPkt->offset, originaldata, bPseudoTest)) {
/* check if data exist */
- badworden = Efuse_WordEnableDataWrite(pAdapter, efuse_addr + 1, pFixPkt->word_en, originaldata, bPseudoTest);
+ badworden = rtl8188e_Efuse_WordEnableDataWrite(pAdapter, efuse_addr + 1, pFixPkt->word_en, originaldata, bPseudoTest);
if (badworden != 0xf) { /* write fail */
- PgWriteSuccess = Efuse_PgPacketWrite(pAdapter, pFixPkt->offset, badworden, originaldata, bPseudoTest);
+ PgWriteSuccess = rtl8188e_Efuse_PgPacketWrite(pAdapter, pFixPkt->offset, badworden, originaldata, bPseudoTest);
if (!PgWriteSuccess)
return false;
else
- efuse_addr = Efuse_GetCurrentSize(pAdapter, efuseType, bPseudoTest);
+ efuse_addr = rtl8188e_EfuseGetCurrentSize(pAdapter, efuseType, bPseudoTest);
} else {
efuse_addr = efuse_addr + (pFixPkt->word_cnts * 2) + 1;
}
@@ -1354,7 +1304,7 @@ static bool hal_EfusePgPacketWrite2ByteHeader(struct adapter *pAdapter, u8 efuse
u8 pg_header = 0, tmp_header = 0, pg_header_temp = 0;
u8 repeatcnt = 0;
- EFUSE_GetEfuseDefinition(pAdapter, efuseType, TYPE_AVAILABLE_EFUSE_BYTES_BANK, (void *)&efuse_max_available_len, bPseudoTest);
+ rtl8188e_EFUSE_GetEfuseDefinition(pAdapter, efuseType, TYPE_AVAILABLE_EFUSE_BYTES_BANK, (void *)&efuse_max_available_len, bPseudoTest);
while (efuse_addr < efuse_max_available_len) {
pg_header = ((pTargetPkt->offset & 0x07) << 5) | 0x0F;
@@ -1451,17 +1401,16 @@ static bool hal_EfusePgPacketWrite1ByteHeader(struct adapter *pAdapter, u8 efuse
static bool hal_EfusePgPacketWriteData(struct adapter *pAdapter, u8 efuseType, u16 *pAddr, struct pgpkt *pTargetPkt, bool bPseudoTest)
{
u16 efuse_addr = *pAddr;
- u8 badworden = 0;
+ u8 badworden;
u32 PgWriteSuccess = 0;
- badworden = 0x0f;
- badworden = Efuse_WordEnableDataWrite(pAdapter, efuse_addr + 1, pTargetPkt->word_en, pTargetPkt->data, bPseudoTest);
+ badworden = rtl8188e_Efuse_WordEnableDataWrite(pAdapter, efuse_addr + 1, pTargetPkt->word_en, pTargetPkt->data, bPseudoTest);
if (badworden == 0x0F) {
/* write ok */
return true;
} else {
/* reorganize other pg packet */
- PgWriteSuccess = Efuse_PgPacketWrite(pAdapter, pTargetPkt->offset, badworden, pTargetPkt->data, bPseudoTest);
+ PgWriteSuccess = rtl8188e_Efuse_PgPacketWrite(pAdapter, pTargetPkt->offset, badworden, pTargetPkt->data, bPseudoTest);
if (!PgWriteSuccess)
return false;
else
@@ -1534,14 +1483,14 @@ static bool hal_EfusePartialWriteCheck(struct adapter *pAdapter, u8 efuseType, u
u16 startAddr = 0, efuse_max_available_len = 0, efuse_max = 0;
struct pgpkt curPkt;
- EFUSE_GetEfuseDefinition(pAdapter, efuseType, TYPE_AVAILABLE_EFUSE_BYTES_BANK, (void *)&efuse_max_available_len, bPseudoTest);
- EFUSE_GetEfuseDefinition(pAdapter, efuseType, TYPE_EFUSE_REAL_CONTENT_LEN, (void *)&efuse_max, bPseudoTest);
+ rtl8188e_EFUSE_GetEfuseDefinition(pAdapter, efuseType, TYPE_AVAILABLE_EFUSE_BYTES_BANK, (void *)&efuse_max_available_len, bPseudoTest);
+ rtl8188e_EFUSE_GetEfuseDefinition(pAdapter, efuseType, TYPE_EFUSE_REAL_CONTENT_LEN, (void *)&efuse_max, bPseudoTest);
if (efuseType == EFUSE_WIFI) {
if (bPseudoTest) {
startAddr = (u16)(fakeEfuseUsedBytes % EFUSE_REAL_CONTENT_LEN);
} else {
- rtw_hal_get_hwreg(pAdapter, HW_VAR_EFUSE_BYTES, (u8 *)&startAddr);
+ GetHwReg8188EU(pAdapter, HW_VAR_EFUSE_BYTES, (u8 *)&startAddr);
startAddr %= EFUSE_REAL_CONTENT_LEN;
}
} else {
@@ -1582,12 +1531,12 @@ static bool hal_EfusePartialWriteCheck(struct adapter *pAdapter, u8 efuseType, u
(!hal_EfuseCheckIfDatafollowed(pAdapter, curPkt.word_cnts, startAddr + 1, bPseudoTest)) &&
wordEnMatched(pTargetPkt, &curPkt, &matched_wden)) {
/* Here to write partial data */
- badworden = Efuse_WordEnableDataWrite(pAdapter, startAddr + 1, matched_wden, pTargetPkt->data, bPseudoTest);
+ badworden = rtl8188e_Efuse_WordEnableDataWrite(pAdapter, startAddr + 1, matched_wden, pTargetPkt->data, bPseudoTest);
if (badworden != 0x0F) {
u32 PgWriteSuccess = 0;
/* if write fail on some words, write these bad words again */
- PgWriteSuccess = Efuse_PgPacketWrite(pAdapter, pTargetPkt->offset, badworden, pTargetPkt->data, bPseudoTest);
+ PgWriteSuccess = rtl8188e_Efuse_PgPacketWrite(pAdapter, pTargetPkt->offset, badworden, pTargetPkt->data, bPseudoTest);
if (!PgWriteSuccess) {
bRet = false; /* write fail, return */
@@ -1623,9 +1572,9 @@ hal_EfusePgCheckAvailableAddr(
u16 efuse_max_available_len = 0;
/* Change to check TYPE_EFUSE_MAP_LEN , because 8188E raw 256, logic map over 256. */
- EFUSE_GetEfuseDefinition(pAdapter, EFUSE_WIFI, TYPE_EFUSE_MAP_LEN, (void *)&efuse_max_available_len, false);
+ rtl8188e_EFUSE_GetEfuseDefinition(pAdapter, EFUSE_WIFI, TYPE_EFUSE_MAP_LEN, (void *)&efuse_max_available_len, false);
- if (Efuse_GetCurrentSize(pAdapter, efuseType, bPseudoTest) >= efuse_max_available_len)
+ if (rtl8188e_EfuseGetCurrentSize(pAdapter, efuseType, bPseudoTest) >= efuse_max_available_len)
return false;
return true;
}
@@ -1678,7 +1627,7 @@ static int Hal_EfusePgPacketWrite(struct adapter *pAdapter, u8 offset, u8 word_e
return ret;
}
-static int rtl8188e_Efuse_PgPacketWrite(struct adapter *pAdapter, u8 offset, u8 word_en, u8 *data, bool bPseudoTest)
+int rtl8188e_Efuse_PgPacketWrite(struct adapter *pAdapter, u8 offset, u8 word_en, u8 *data, bool bPseudoTest)
{
int ret;
@@ -1689,7 +1638,7 @@ static int rtl8188e_Efuse_PgPacketWrite(struct adapter *pAdapter, u8 offset, u8
return ret;
}
-static struct HAL_VERSION ReadChipVersion8188E(struct adapter *padapter)
+void rtl8188e_read_chip_version(struct adapter *padapter)
{
u32 value32;
struct HAL_VERSION ChipVersion;
@@ -1698,49 +1647,23 @@ static struct HAL_VERSION ReadChipVersion8188E(struct adapter *padapter)
pHalData = GET_HAL_DATA(padapter);
value32 = rtw_read32(padapter, REG_SYS_CFG);
- ChipVersion.ICType = CHIP_8188E;
ChipVersion.ChipType = ((value32 & RTL_ID) ? TEST_CHIP : NORMAL_CHIP);
ChipVersion.RFType = RF_TYPE_1T1R;
ChipVersion.VendorType = ((value32 & VENDOR_ID) ? CHIP_VENDOR_UMC : CHIP_VENDOR_TSMC);
ChipVersion.CUTVersion = (value32 & CHIP_VER_RTL_MASK) >> CHIP_VER_RTL_SHIFT; /* IC version (CUT) */
-
- /* For regulator mode. by tynli. 2011.01.14 */
- pHalData->RegulatorMode = ((value32 & TRP_BT_EN) ? RT_LDO_REGULATOR : RT_SWITCHING_REGULATOR);
-
ChipVersion.ROMVer = 0; /* ROM code version. */
- pHalData->MultiFunc = RT_MULTI_FUNC_NONE;
dump_chip_info(ChipVersion);
pHalData->VersionID = ChipVersion;
- if (IS_1T2R(ChipVersion)) {
- pHalData->rf_type = RF_1T2R;
- pHalData->NumTotalRFPath = 2;
- } else if (IS_2T2R(ChipVersion)) {
- pHalData->rf_type = RF_2T2R;
- pHalData->NumTotalRFPath = 2;
- } else {
- pHalData->rf_type = RF_1T1R;
- pHalData->NumTotalRFPath = 1;
- }
+ pHalData->rf_type = RF_1T1R;
MSG_88E("RF_Type is %x!!\n", pHalData->rf_type);
-
- return ChipVersion;
}
-static void rtl8188e_read_chip_version(struct adapter *padapter)
-{
- ReadChipVersion8188E(padapter);
-}
-
-static void rtl8188e_GetHalODMVar(struct adapter *Adapter, enum hal_odm_variable eVariable, void *pValue1, bool bSet)
-{
-}
-
-static void rtl8188e_SetHalODMVar(struct adapter *Adapter, enum hal_odm_variable eVariable, void *pValue1, bool bSet)
+void rtl8188e_SetHalODMVar(struct adapter *Adapter, enum hal_odm_variable eVariable, void *pValue1, bool bSet)
{
struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
struct odm_dm_struct *podmpriv = &pHalData->odmpriv;
@@ -1748,13 +1671,14 @@ static void rtl8188e_SetHalODMVar(struct adapter *Adapter, enum hal_odm_variable
case HAL_ODM_STA_INFO:
{
struct sta_info *psta = (struct sta_info *)pValue1;
+
if (bSet) {
DBG_88E("### Set STA_(%d) info\n", psta->mac_id);
- ODM_CmnInfoPtrArrayHook(podmpriv, ODM_CMNINFO_STA_STATUS, psta->mac_id, psta);
+ podmpriv->pODM_StaInfo[psta->mac_id] = psta;
ODM_RAInfo_Init(podmpriv, psta->mac_id);
} else {
DBG_88E("### Clean STA_(%d) info\n", psta->mac_id);
- ODM_CmnInfoPtrArrayHook(podmpriv, ODM_CMNINFO_STA_STATUS, psta->mac_id, NULL);
+ podmpriv->pODM_StaInfo[psta->mac_id] = NULL;
}
}
break;
@@ -1769,20 +1693,7 @@ static void rtl8188e_SetHalODMVar(struct adapter *Adapter, enum hal_odm_variable
}
}
-void rtl8188e_clone_haldata(struct adapter *dst_adapter, struct adapter *src_adapter)
-{
- memcpy(dst_adapter->HalData, src_adapter->HalData, dst_adapter->hal_data_sz);
-}
-
-void rtl8188e_start_thread(struct adapter *padapter)
-{
-}
-
-void rtl8188e_stop_thread(struct adapter *padapter)
-{
-}
-
-static void hal_notch_filter_8188e(struct adapter *adapter, bool enable)
+void hal_notch_filter_8188e(struct adapter *adapter, bool enable)
{
if (enable) {
DBG_88E("Enable notch filter\n");
@@ -1792,54 +1703,6 @@ static void hal_notch_filter_8188e(struct adapter *adapter, bool enable)
rtw_write8(adapter, rOFDM0_RxDSP + 1, rtw_read8(adapter, rOFDM0_RxDSP + 1) & ~BIT(1));
}
}
-void rtl8188e_set_hal_ops(struct hal_ops *pHalFunc)
-{
- pHalFunc->free_hal_data = &rtl8188e_free_hal_data;
-
- pHalFunc->dm_init = &rtl8188e_init_dm_priv;
- pHalFunc->dm_deinit = &rtl8188e_deinit_dm_priv;
-
- pHalFunc->read_chip_version = &rtl8188e_read_chip_version;
-
- pHalFunc->set_bwmode_handler = &PHY_SetBWMode8188E;
- pHalFunc->set_channel_handler = &PHY_SwChnl8188E;
-
- pHalFunc->hal_dm_watchdog = &rtl8188e_HalDmWatchDog;
-
- pHalFunc->Add_RateATid = &rtl8188e_Add_RateATid;
- pHalFunc->run_thread = &rtl8188e_start_thread;
- pHalFunc->cancel_thread = &rtl8188e_stop_thread;
-
- pHalFunc->AntDivBeforeLinkHandler = &AntDivBeforeLink8188E;
- pHalFunc->AntDivCompareHandler = &AntDivCompare8188E;
- pHalFunc->read_bbreg = &rtl8188e_PHY_QueryBBReg;
- pHalFunc->write_bbreg = &rtl8188e_PHY_SetBBReg;
- pHalFunc->read_rfreg = &rtl8188e_PHY_QueryRFReg;
- pHalFunc->write_rfreg = &rtl8188e_PHY_SetRFReg;
-
- /* Efuse related function */
- pHalFunc->EfusePowerSwitch = &rtl8188e_EfusePowerSwitch;
- pHalFunc->ReadEFuse = &rtl8188e_ReadEFuse;
- pHalFunc->EFUSEGetEfuseDefinition = &rtl8188e_EFUSE_GetEfuseDefinition;
- pHalFunc->EfuseGetCurrentSize = &rtl8188e_EfuseGetCurrentSize;
- pHalFunc->Efuse_PgPacketRead = &rtl8188e_Efuse_PgPacketRead;
- pHalFunc->Efuse_PgPacketWrite = &rtl8188e_Efuse_PgPacketWrite;
- pHalFunc->Efuse_WordEnableDataWrite = &rtl8188e_Efuse_WordEnableDataWrite;
-
- pHalFunc->sreset_init_value = &sreset_init_value;
- pHalFunc->sreset_reset_value = &sreset_reset_value;
- pHalFunc->silentreset = &rtl8188e_silentreset_for_specific_platform;
- pHalFunc->sreset_xmit_status_check = &rtl8188e_sreset_xmit_status_check;
- pHalFunc->sreset_linked_status_check = &rtl8188e_sreset_linked_status_check;
- pHalFunc->sreset_get_wifi_status = &sreset_get_wifi_status;
-
- pHalFunc->GetHalODMVarHandler = &rtl8188e_GetHalODMVar;
- pHalFunc->SetHalODMVarHandler = &rtl8188e_SetHalODMVar;
-
- pHalFunc->IOL_exec_cmds_sync = &rtl8188e_IOL_exec_cmds_sync;
-
- pHalFunc->hal_notch_filter = &hal_notch_filter_8188e;
-}
u8 GetEEPROMSize8188E(struct adapter *padapter)
{
@@ -1926,7 +1789,7 @@ s32 InitLLTTable(struct adapter *padapter, u8 txpktbuf_bndy)
void
Hal_InitPGData88E(struct adapter *padapter)
{
- struct eeprom_priv *pEEPROM = GET_EEPROM_EFUSE_PRIV(padapter);
+ struct eeprom_priv *pEEPROM = &padapter->eeprompriv;
if (!pEEPROM->bautoload_fail_flag) { /* autoload OK. */
if (!is_boot_from_eeprom(padapter)) {
@@ -1946,7 +1809,7 @@ Hal_EfuseParseIDCode88E(
u8 *hwinfo
)
{
- struct eeprom_priv *pEEPROM = GET_EEPROM_EFUSE_PRIV(padapter);
+ struct eeprom_priv *pEEPROM = &padapter->eeprompriv;
u16 EEPROMId;
/* Check 0x8129 again for making sure autoload status!! */
@@ -2093,8 +1956,8 @@ void Hal_ReadPowerSavingMode88E(struct adapter *padapter, u8 *hwinfo, bool AutoL
/* if hw supported, 8051 (SIE) will generate WeakUP signal(D+/D- toggle) when autoresume */
padapter->pwrctrlpriv.bSupportRemoteWakeup = (hwinfo[EEPROM_USB_OPTIONAL_FUNCTION0] & BIT(1)) ? true : false;
- DBG_88E("%s...bHWPwrPindetect(%x)-bHWPowerdown(%x) , bSupportRemoteWakeup(%x)\n", __func__,
- padapter->pwrctrlpriv.bHWPwrPindetect, padapter->pwrctrlpriv.bHWPowerdown, padapter->pwrctrlpriv.bSupportRemoteWakeup);
+ DBG_88E("%s...bHWPowerdown(%x) , bSupportRemoteWakeup(%x)\n", __func__,
+ padapter->pwrctrlpriv.bHWPowerdown, padapter->pwrctrlpriv.bSupportRemoteWakeup);
DBG_88E("### PS params => power_mgnt(%x), usbss_enable(%x) ###\n", padapter->registrypriv.power_mgnt, padapter->registrypriv.usbss_enable);
}
@@ -2104,7 +1967,8 @@ void Hal_ReadTxPowerInfo88E(struct adapter *padapter, u8 *PROMContent, bool Auto
{
struct hal_data_8188e *pHalData = GET_HAL_DATA(padapter);
struct txpowerinfo24g pwrInfo24G;
- u8 rfPath, ch, group;
+ u8 rfPath = 0;
+ u8 ch, group;
u8 TxCount;
Hal_ReadPowerValueFromPROM_8188E(&pwrInfo24G, PROMContent, AutoLoadFail);
@@ -2112,31 +1976,29 @@ void Hal_ReadTxPowerInfo88E(struct adapter *padapter, u8 *PROMContent, bool Auto
if (!AutoLoadFail)
pHalData->bTXPowerDataReadFromEEPORM = true;
- for (rfPath = 0; rfPath < pHalData->NumTotalRFPath; rfPath++) {
- for (ch = 0; ch < CHANNEL_MAX_NUMBER; ch++) {
- hal_get_chnl_group_88e(ch, &group);
+ for (ch = 0; ch < CHANNEL_MAX_NUMBER; ch++) {
+ hal_get_chnl_group_88e(ch, &group);
- pHalData->Index24G_CCK_Base[rfPath][ch] = pwrInfo24G.IndexCCK_Base[rfPath][group];
- if (ch == 14)
- pHalData->Index24G_BW40_Base[rfPath][ch] = pwrInfo24G.IndexBW40_Base[rfPath][4];
- else
- pHalData->Index24G_BW40_Base[rfPath][ch] = pwrInfo24G.IndexBW40_Base[rfPath][group];
+ pHalData->Index24G_CCK_Base[rfPath][ch] = pwrInfo24G.IndexCCK_Base[rfPath][group];
+ if (ch == 14)
+ pHalData->Index24G_BW40_Base[rfPath][ch] = pwrInfo24G.IndexBW40_Base[rfPath][4];
+ else
+ pHalData->Index24G_BW40_Base[rfPath][ch] = pwrInfo24G.IndexBW40_Base[rfPath][group];
- DBG_88E("======= Path %d, Channel %d =======\n", rfPath, ch);
- DBG_88E("Index24G_CCK_Base[%d][%d] = 0x%x\n", rfPath, ch, pHalData->Index24G_CCK_Base[rfPath][ch]);
- DBG_88E("Index24G_BW40_Base[%d][%d] = 0x%x\n", rfPath, ch, pHalData->Index24G_BW40_Base[rfPath][ch]);
- }
- for (TxCount = 0; TxCount < MAX_TX_COUNT; TxCount++) {
- pHalData->CCK_24G_Diff[rfPath][TxCount] = pwrInfo24G.CCK_Diff[rfPath][TxCount];
- pHalData->OFDM_24G_Diff[rfPath][TxCount] = pwrInfo24G.OFDM_Diff[rfPath][TxCount];
- pHalData->BW20_24G_Diff[rfPath][TxCount] = pwrInfo24G.BW20_Diff[rfPath][TxCount];
- pHalData->BW40_24G_Diff[rfPath][TxCount] = pwrInfo24G.BW40_Diff[rfPath][TxCount];
- DBG_88E("======= TxCount %d =======\n", TxCount);
- DBG_88E("CCK_24G_Diff[%d][%d] = %d\n", rfPath, TxCount, pHalData->CCK_24G_Diff[rfPath][TxCount]);
- DBG_88E("OFDM_24G_Diff[%d][%d] = %d\n", rfPath, TxCount, pHalData->OFDM_24G_Diff[rfPath][TxCount]);
- DBG_88E("BW20_24G_Diff[%d][%d] = %d\n", rfPath, TxCount, pHalData->BW20_24G_Diff[rfPath][TxCount]);
- DBG_88E("BW40_24G_Diff[%d][%d] = %d\n", rfPath, TxCount, pHalData->BW40_24G_Diff[rfPath][TxCount]);
- }
+ DBG_88E("======= Path %d, Channel %d =======\n", rfPath, ch);
+ DBG_88E("Index24G_CCK_Base[%d][%d] = 0x%x\n", rfPath, ch, pHalData->Index24G_CCK_Base[rfPath][ch]);
+ DBG_88E("Index24G_BW40_Base[%d][%d] = 0x%x\n", rfPath, ch, pHalData->Index24G_BW40_Base[rfPath][ch]);
+ }
+ for (TxCount = 0; TxCount < MAX_TX_COUNT; TxCount++) {
+ pHalData->CCK_24G_Diff[rfPath][TxCount] = pwrInfo24G.CCK_Diff[rfPath][TxCount];
+ pHalData->OFDM_24G_Diff[rfPath][TxCount] = pwrInfo24G.OFDM_Diff[rfPath][TxCount];
+ pHalData->BW20_24G_Diff[rfPath][TxCount] = pwrInfo24G.BW20_Diff[rfPath][TxCount];
+ pHalData->BW40_24G_Diff[rfPath][TxCount] = pwrInfo24G.BW40_Diff[rfPath][TxCount];
+ DBG_88E("======= TxCount %d =======\n", TxCount);
+ DBG_88E("CCK_24G_Diff[%d][%d] = %d\n", rfPath, TxCount, pHalData->CCK_24G_Diff[rfPath][TxCount]);
+ DBG_88E("OFDM_24G_Diff[%d][%d] = %d\n", rfPath, TxCount, pHalData->OFDM_24G_Diff[rfPath][TxCount]);
+ DBG_88E("BW20_24G_Diff[%d][%d] = %d\n", rfPath, TxCount, pHalData->BW20_24G_Diff[rfPath][TxCount]);
+ DBG_88E("BW40_24G_Diff[%d][%d] = %d\n", rfPath, TxCount, pHalData->BW40_24G_Diff[rfPath][TxCount]);
}
/* 2010/10/19 MH Add Regulator recognize for CU. */
@@ -2240,7 +2102,6 @@ void Hal_ReadAntennaDiversity88E(struct adapter *pAdapter, u8 *PROMContent, bool
pHalData->AntDivCfg = 1; /* 0xC1[3] is ignored. */
} else {
pHalData->AntDivCfg = 0;
- pHalData->TRxAntDivType = pHalData->TRxAntDivType; /* The value in the driver setting of device manager. */
}
DBG_88E("EEPROM : AntDivCfg = %x, TRxAntDivType = %x\n", pHalData->AntDivCfg, pHalData->TRxAntDivType);
}
@@ -2261,44 +2122,3 @@ void Hal_ReadThermalMeter_88E(struct adapter *Adapter, u8 *PROMContent, bool Aut
}
DBG_88E("ThermalMeter = 0x%x\n", pHalData->EEPROMThermalMeter);
}
-
-void Hal_InitChannelPlan(struct adapter *padapter)
-{
-}
-
-bool HalDetectPwrDownMode88E(struct adapter *Adapter)
-{
- u8 tmpvalue = 0;
- struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
- struct pwrctrl_priv *pwrctrlpriv = &Adapter->pwrctrlpriv;
-
- EFUSE_ShadowRead(Adapter, 1, EEPROM_RF_FEATURE_OPTION_88E, (u32 *)&tmpvalue);
-
- /* 2010/08/25 MH INF priority > PDN Efuse value. */
- if (tmpvalue & BIT(4) && pwrctrlpriv->reg_pdnmode)
- pHalData->pwrdown = true;
- else
- pHalData->pwrdown = false;
-
- DBG_88E("HalDetectPwrDownMode(): PDN =%d\n", pHalData->pwrdown);
-
- return pHalData->pwrdown;
-} /* HalDetectPwrDownMode */
-
-/* This function is used only for 92C to set REG_BCN_CTRL(0x550) register. */
-/* We just reserve the value of the register in variable pHalData->RegBcnCtrlVal and then operate */
-/* the value of the register via atomic operation. */
-/* This prevents from race condition when setting this register. */
-/* The value of pHalData->RegBcnCtrlVal is initialized in HwConfigureRTL8192CE() function. */
-
-void SetBcnCtrlReg(struct adapter *padapter, u8 SetBits, u8 ClearBits)
-{
- struct hal_data_8188e *pHalData;
-
- pHalData = GET_HAL_DATA(padapter);
-
- pHalData->RegBcnCtrlVal |= SetBits;
- pHalData->RegBcnCtrlVal &= ~ClearBits;
-
- rtw_write8(padapter, REG_BCN_CTRL, (u8)pHalData->RegBcnCtrlVal);
-}
diff --git a/drivers/staging/r8188eu/hal/rtl8188e_mp.c b/drivers/staging/r8188eu/hal/rtl8188e_mp.c
deleted file mode 100644
index fc13db705511..000000000000
--- a/drivers/staging/r8188eu/hal/rtl8188e_mp.c
+++ /dev/null
@@ -1,798 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#define _RTL8188E_MP_C_
-
-#include "../include/drv_types.h"
-#include "../include/rtw_mp.h"
-#include "../include/rtl8188e_hal.h"
-#include "../include/rtl8188e_dm.h"
-
-s32 Hal_SetPowerTracking(struct adapter *padapter, u8 enable)
-{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(padapter);
- struct odm_dm_struct *pDM_Odm = &pHalData->odmpriv;
-
- if (!netif_running(padapter->pnetdev))
- return _FAIL;
-
- if (!check_fwstate(&padapter->mlmepriv, WIFI_MP_STATE))
- return _FAIL;
-
- if (enable)
- pDM_Odm->RFCalibrateInfo.bTXPowerTracking = true;
- else
- pDM_Odm->RFCalibrateInfo.bTXPowerTrackingInit = false;
-
- return _SUCCESS;
-}
-
-void Hal_GetPowerTracking(struct adapter *padapter, u8 *enable)
-{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(padapter);
- struct odm_dm_struct *pDM_Odm = &pHalData->odmpriv;
-
- *enable = pDM_Odm->RFCalibrateInfo.TxPowerTrackControl;
-}
-
-/*-----------------------------------------------------------------------------
- * Function: mpt_SwitchRfSetting
- *
- * Overview: Change RF Setting when we siwthc channel/rate/BW for MP.
- *
- * Input: struct adapter * pAdapter
- *
- * Output: NONE
- *
- * Return: NONE
- *
- * Revised History:
- * When Who Remark
- * 01/08/2009 MHC Suggestion from SD3 Willis for 92S series.
- * 01/09/2009 MHC Add CCK modification for 40MHZ. Suggestion from SD3.
- *
- *---------------------------------------------------------------------------*/
-void Hal_mpt_SwitchRfSetting(struct adapter *pAdapter)
-{
- struct mp_priv *pmp = &pAdapter->mppriv;
-
- /* <20120525, Kordan> Dynamic mechanism for APK, asked by Dennis. */
- pmp->MptCtx.backup0x52_RF_A = (u8)PHY_QueryRFReg(pAdapter, RF_PATH_A, RF_0x52, 0x000F0);
- pmp->MptCtx.backup0x52_RF_B = (u8)PHY_QueryRFReg(pAdapter, RF_PATH_B, RF_0x52, 0x000F0);
- PHY_SetRFReg(pAdapter, RF_PATH_A, RF_0x52, 0x000F0, 0xD);
- PHY_SetRFReg(pAdapter, RF_PATH_B, RF_0x52, 0x000F0, 0xD);
-}
-/*---------------------------hal\rtl8192c\MPT_Phy.c---------------------------*/
-
-/*---------------------------hal\rtl8192c\MPT_HelperFunc.c---------------------------*/
-void Hal_MPT_CCKTxPowerAdjust(struct adapter *Adapter, bool bInCH14)
-{
- u32 TempVal = 0, TempVal2 = 0, TempVal3 = 0;
- u32 CurrCCKSwingVal = 0, CCKSwingIndex = 12;
- u8 i;
-
- /* get current cck swing value and check 0xa22 & 0xa23 later to match the table. */
- CurrCCKSwingVal = read_bbreg(Adapter, rCCK0_TxFilter1, bMaskHWord);
-
- if (!bInCH14) {
- /* Readback the current bb cck swing value and compare with the table to */
- /* get the current swing index */
- for (i = 0; i < CCK_TABLE_SIZE; i++) {
- if (((CurrCCKSwingVal & 0xff) == (u32)CCKSwingTable_Ch1_Ch13[i][0]) &&
- (((CurrCCKSwingVal & 0xff00) >> 8) == (u32)CCKSwingTable_Ch1_Ch13[i][1])) {
- CCKSwingIndex = i;
- break;
- }
- }
-
- /* Write 0xa22 0xa23 */
- TempVal = CCKSwingTable_Ch1_Ch13[CCKSwingIndex][0] +
- (CCKSwingTable_Ch1_Ch13[CCKSwingIndex][1] << 8);
-
- /* Write 0xa24 ~ 0xa27 */
- TempVal2 = 0;
- TempVal2 = CCKSwingTable_Ch1_Ch13[CCKSwingIndex][2] +
- (CCKSwingTable_Ch1_Ch13[CCKSwingIndex][3] << 8) +
- (CCKSwingTable_Ch1_Ch13[CCKSwingIndex][4] << 16) +
- (CCKSwingTable_Ch1_Ch13[CCKSwingIndex][5] << 24);
-
- /* Write 0xa28 0xa29 */
- TempVal3 = 0;
- TempVal3 = CCKSwingTable_Ch1_Ch13[CCKSwingIndex][6] +
- (CCKSwingTable_Ch1_Ch13[CCKSwingIndex][7] << 8);
- } else {
- for (i = 0; i < CCK_TABLE_SIZE; i++) {
- if (((CurrCCKSwingVal & 0xff) == (u32)CCKSwingTable_Ch14[i][0]) &&
- (((CurrCCKSwingVal & 0xff00) >> 8) == (u32)CCKSwingTable_Ch14[i][1])) {
- CCKSwingIndex = i;
- break;
- }
- }
-
- /* Write 0xa22 0xa23 */
- TempVal = CCKSwingTable_Ch14[CCKSwingIndex][0] +
- (CCKSwingTable_Ch14[CCKSwingIndex][1] << 8);
-
- /* Write 0xa24 ~ 0xa27 */
- TempVal2 = 0;
- TempVal2 = CCKSwingTable_Ch14[CCKSwingIndex][2] +
- (CCKSwingTable_Ch14[CCKSwingIndex][3] << 8) +
- (CCKSwingTable_Ch14[CCKSwingIndex][4] << 16) +
- (CCKSwingTable_Ch14[CCKSwingIndex][5] << 24);
-
- /* Write 0xa28 0xa29 */
- TempVal3 = 0;
- TempVal3 = CCKSwingTable_Ch14[CCKSwingIndex][6] +
- (CCKSwingTable_Ch14[CCKSwingIndex][7] << 8);
- }
-
- write_bbreg(Adapter, rCCK0_TxFilter1, bMaskHWord, TempVal);
- write_bbreg(Adapter, rCCK0_TxFilter2, bMaskDWord, TempVal2);
- write_bbreg(Adapter, rCCK0_DebugPort, bMaskLWord, TempVal3);
-}
-
-void Hal_MPT_CCKTxPowerAdjustbyIndex(struct adapter *pAdapter, bool beven)
-{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(pAdapter);
- struct mpt_context *pMptCtx = &pAdapter->mppriv.MptCtx;
- struct odm_dm_struct *pDM_Odm = &pHalData->odmpriv;
- s32 TempCCk;
- u8 CCK_index, CCK_index_old = 0;
- u8 Action = 0; /* 0: no action, 1: even->odd, 2:odd->even */
- s32 i = 0;
-
- if (!IS_92C_SERIAL(pHalData->VersionID))
- return;
- if (beven && !pMptCtx->bMptIndexEven) {
- /* odd->even */
- Action = 2;
- pMptCtx->bMptIndexEven = true;
- } else if (!beven && pMptCtx->bMptIndexEven) {
- /* even->odd */
- Action = 1;
- pMptCtx->bMptIndexEven = false;
- }
-
- if (Action != 0) {
- /* Query CCK default setting From 0xa24 */
- TempCCk = read_bbreg(pAdapter, rCCK0_TxFilter2, bMaskDWord) & bMaskCCK;
- for (i = 0; i < CCK_TABLE_SIZE; i++) {
- if (pDM_Odm->RFCalibrateInfo.bCCKinCH14) {
- if (!memcmp((void *)&TempCCk, (void *)&CCKSwingTable_Ch14[i][2], 4)) {
- CCK_index_old = (u8)i;
- break;
- }
- } else {
- if (!memcmp((void *)&TempCCk, (void *)&CCKSwingTable_Ch1_Ch13[i][2], 4)) {
- CCK_index_old = (u8)i;
- break;
- }
- }
- }
-
- if (Action == 1)
- CCK_index = CCK_index_old - 1;
- else
- CCK_index = CCK_index_old + 1;
-
- /* Adjust CCK according to gain index */
- if (!pDM_Odm->RFCalibrateInfo.bCCKinCH14) {
- rtw_write8(pAdapter, 0xa22, CCKSwingTable_Ch1_Ch13[CCK_index][0]);
- rtw_write8(pAdapter, 0xa23, CCKSwingTable_Ch1_Ch13[CCK_index][1]);
- rtw_write8(pAdapter, 0xa24, CCKSwingTable_Ch1_Ch13[CCK_index][2]);
- rtw_write8(pAdapter, 0xa25, CCKSwingTable_Ch1_Ch13[CCK_index][3]);
- rtw_write8(pAdapter, 0xa26, CCKSwingTable_Ch1_Ch13[CCK_index][4]);
- rtw_write8(pAdapter, 0xa27, CCKSwingTable_Ch1_Ch13[CCK_index][5]);
- rtw_write8(pAdapter, 0xa28, CCKSwingTable_Ch1_Ch13[CCK_index][6]);
- rtw_write8(pAdapter, 0xa29, CCKSwingTable_Ch1_Ch13[CCK_index][7]);
- } else {
- rtw_write8(pAdapter, 0xa22, CCKSwingTable_Ch14[CCK_index][0]);
- rtw_write8(pAdapter, 0xa23, CCKSwingTable_Ch14[CCK_index][1]);
- rtw_write8(pAdapter, 0xa24, CCKSwingTable_Ch14[CCK_index][2]);
- rtw_write8(pAdapter, 0xa25, CCKSwingTable_Ch14[CCK_index][3]);
- rtw_write8(pAdapter, 0xa26, CCKSwingTable_Ch14[CCK_index][4]);
- rtw_write8(pAdapter, 0xa27, CCKSwingTable_Ch14[CCK_index][5]);
- rtw_write8(pAdapter, 0xa28, CCKSwingTable_Ch14[CCK_index][6]);
- rtw_write8(pAdapter, 0xa29, CCKSwingTable_Ch14[CCK_index][7]);
- }
- }
-}
-/*---------------------------hal\rtl8192c\MPT_HelperFunc.c---------------------------*/
-
-/*
- * SetChannel
- * Description
- * Use H2C command to change channel,
- * not only modify rf register, but also other setting need to be done.
- */
-void Hal_SetChannel(struct adapter *pAdapter)
-{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(pAdapter);
- struct mp_priv *pmp = &pAdapter->mppriv;
- struct odm_dm_struct *pDM_Odm = &pHalData->odmpriv;
- u8 eRFPath;
- u8 channel = pmp->channel;
-
- /* set RF channel register */
- for (eRFPath = 0; eRFPath < pHalData->NumTotalRFPath; eRFPath++)
- _write_rfreg(pAdapter, eRFPath, ODM_CHANNEL, 0x3FF, channel);
- Hal_mpt_SwitchRfSetting(pAdapter);
-
- SelectChannel(pAdapter, channel);
-
- if (pHalData->CurrentChannel == 14 && !pDM_Odm->RFCalibrateInfo.bCCKinCH14) {
- pDM_Odm->RFCalibrateInfo.bCCKinCH14 = true;
- Hal_MPT_CCKTxPowerAdjust(pAdapter, pDM_Odm->RFCalibrateInfo.bCCKinCH14);
- } else if (pHalData->CurrentChannel != 14 && pDM_Odm->RFCalibrateInfo.bCCKinCH14) {
- pDM_Odm->RFCalibrateInfo.bCCKinCH14 = false;
- Hal_MPT_CCKTxPowerAdjust(pAdapter, pDM_Odm->RFCalibrateInfo.bCCKinCH14);
- }
-}
-
-/*
- * Notice
- * Switch bandwitdth may change center frequency(channel)
- */
-void Hal_SetBandwidth(struct adapter *pAdapter)
-{
- struct mp_priv *pmp = &pAdapter->mppriv;
-
- SetBWMode(pAdapter, pmp->bandwidth, pmp->prime_channel_offset);
- Hal_mpt_SwitchRfSetting(pAdapter);
-}
-
-void Hal_SetCCKTxPower(struct adapter *pAdapter, u8 *TxPower)
-{
- u32 tmpval = 0;
-
- /* rf-A cck tx power */
- write_bbreg(pAdapter, rTxAGC_A_CCK1_Mcs32, bMaskByte1, TxPower[RF_PATH_A]);
- tmpval = (TxPower[RF_PATH_A] << 16) | (TxPower[RF_PATH_A] << 8) | TxPower[RF_PATH_A];
- write_bbreg(pAdapter, rTxAGC_B_CCK11_A_CCK2_11, 0xffffff00, tmpval);
-
- /* rf-B cck tx power */
- write_bbreg(pAdapter, rTxAGC_B_CCK11_A_CCK2_11, bMaskByte0, TxPower[RF_PATH_B]);
- tmpval = (TxPower[RF_PATH_B] << 16) | (TxPower[RF_PATH_B] << 8) | TxPower[RF_PATH_B];
- write_bbreg(pAdapter, rTxAGC_B_CCK1_55_Mcs32, 0xffffff00, tmpval);
-}
-
-void Hal_SetOFDMTxPower(struct adapter *pAdapter, u8 *TxPower)
-{
- u32 TxAGC = 0;
- u8 tmpval = 0;
-
- /* HT Tx-rf(A) */
- tmpval = TxPower[RF_PATH_A];
- TxAGC = (tmpval << 24) | (tmpval << 16) | (tmpval << 8) | tmpval;
-
- write_bbreg(pAdapter, rTxAGC_A_Rate18_06, bMaskDWord, TxAGC);
- write_bbreg(pAdapter, rTxAGC_A_Rate54_24, bMaskDWord, TxAGC);
- write_bbreg(pAdapter, rTxAGC_A_Mcs03_Mcs00, bMaskDWord, TxAGC);
- write_bbreg(pAdapter, rTxAGC_A_Mcs07_Mcs04, bMaskDWord, TxAGC);
- write_bbreg(pAdapter, rTxAGC_A_Mcs11_Mcs08, bMaskDWord, TxAGC);
- write_bbreg(pAdapter, rTxAGC_A_Mcs15_Mcs12, bMaskDWord, TxAGC);
-
- /* HT Tx-rf(B) */
- tmpval = TxPower[RF_PATH_B];
- TxAGC = (tmpval << 24) | (tmpval << 16) | (tmpval << 8) | tmpval;
-
- write_bbreg(pAdapter, rTxAGC_B_Rate18_06, bMaskDWord, TxAGC);
- write_bbreg(pAdapter, rTxAGC_B_Rate54_24, bMaskDWord, TxAGC);
- write_bbreg(pAdapter, rTxAGC_B_Mcs03_Mcs00, bMaskDWord, TxAGC);
- write_bbreg(pAdapter, rTxAGC_B_Mcs07_Mcs04, bMaskDWord, TxAGC);
- write_bbreg(pAdapter, rTxAGC_B_Mcs11_Mcs08, bMaskDWord, TxAGC);
- write_bbreg(pAdapter, rTxAGC_B_Mcs15_Mcs12, bMaskDWord, TxAGC);
-}
-
-void Hal_SetAntennaPathPower(struct adapter *pAdapter)
-{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(pAdapter);
- u8 TxPowerLevel[RF_PATH_MAX];
- u8 rfPath;
-
- TxPowerLevel[RF_PATH_A] = pAdapter->mppriv.txpoweridx;
- TxPowerLevel[RF_PATH_B] = pAdapter->mppriv.txpoweridx_b;
-
- switch (pAdapter->mppriv.antenna_tx) {
- case ANTENNA_A:
- default:
- rfPath = RF_PATH_A;
- break;
- case ANTENNA_B:
- rfPath = RF_PATH_B;
- break;
- case ANTENNA_C:
- rfPath = RF_PATH_C;
- break;
- }
-
- switch (pHalData->rf_chip) {
- case RF_8225:
- case RF_8256:
- case RF_6052:
- Hal_SetCCKTxPower(pAdapter, TxPowerLevel);
- if (pAdapter->mppriv.rateidx < MPT_RATE_6M) /* CCK rate */
- Hal_MPT_CCKTxPowerAdjustbyIndex(pAdapter, TxPowerLevel[rfPath] % 2 == 0);
- Hal_SetOFDMTxPower(pAdapter, TxPowerLevel);
- break;
- default:
- break;
- }
-}
-
-void Hal_SetTxPower(struct adapter *pAdapter)
-{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(pAdapter);
- u8 TxPower = pAdapter->mppriv.txpoweridx;
- u8 TxPowerLevel[RF_PATH_MAX];
- u8 rf, rfPath;
-
- for (rf = 0; rf < RF_PATH_MAX; rf++)
- TxPowerLevel[rf] = TxPower;
-
- switch (pAdapter->mppriv.antenna_tx) {
- case ANTENNA_A:
- default:
- rfPath = RF_PATH_A;
- break;
- case ANTENNA_B:
- rfPath = RF_PATH_B;
- break;
- case ANTENNA_C:
- rfPath = RF_PATH_C;
- break;
- }
-
- switch (pHalData->rf_chip) {
- /* 2008/09/12 MH Test only !! We enable the TX power tracking for MP!!!!! */
- /* We should call normal driver API later!! */
- case RF_8225:
- case RF_8256:
- case RF_6052:
- Hal_SetCCKTxPower(pAdapter, TxPowerLevel);
- if (pAdapter->mppriv.rateidx < MPT_RATE_6M) /* CCK rate */
- Hal_MPT_CCKTxPowerAdjustbyIndex(pAdapter, TxPowerLevel[rfPath] % 2 == 0);
- Hal_SetOFDMTxPower(pAdapter, TxPowerLevel);
- break;
- default:
- break;
- }
-}
-
-void Hal_SetDataRate(struct adapter *pAdapter)
-{
- Hal_mpt_SwitchRfSetting(pAdapter);
-}
-
-void Hal_SetAntenna(struct adapter *pAdapter)
-{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(pAdapter);
-
- struct ant_sel_ofdm *p_ofdm_tx; /* OFDM Tx register */
- struct ant_sel_cck *p_cck_txrx;
- u8 r_rx_antenna_ofdm = 0, r_ant_select_cck_val = 0;
- u8 chgTx = 0, chgRx = 0;
- u32 r_ant_select_ofdm_val = 0, r_ofdm_tx_en_val = 0;
-
- p_ofdm_tx = (struct ant_sel_ofdm *)&r_ant_select_ofdm_val;
- p_cck_txrx = (struct ant_sel_cck *)&r_ant_select_cck_val;
-
- p_ofdm_tx->r_ant_ht1 = 0x1;
- p_ofdm_tx->r_ant_ht2 = 0x2; /* Second TX RF path is A */
- p_ofdm_tx->r_ant_non_ht = 0x3; /* 0x1+0x2=0x3 */
-
- switch (pAdapter->mppriv.antenna_tx) {
- case ANTENNA_A:
- p_ofdm_tx->r_tx_antenna = 0x1;
- r_ofdm_tx_en_val = 0x1;
- p_ofdm_tx->r_ant_l = 0x1;
- p_ofdm_tx->r_ant_ht_s1 = 0x1;
- p_ofdm_tx->r_ant_non_ht_s1 = 0x1;
- p_cck_txrx->r_ccktx_enable = 0x8;
- chgTx = 1;
-
- /* From SD3 Willis suggestion !!! Set RF A=TX and B as standby */
- write_bbreg(pAdapter, rFPGA0_XA_HSSIParameter2, 0xe, 2);
- write_bbreg(pAdapter, rFPGA0_XB_HSSIParameter2, 0xe, 1);
- r_ofdm_tx_en_val = 0x3;
-
- /* Power save */
-
- /* We need to close RFB by SW control */
- if (pHalData->rf_type == RF_2T2R) {
- PHY_SetBBReg(pAdapter, rFPGA0_XAB_RFInterfaceSW, BIT(10), 0);
- PHY_SetBBReg(pAdapter, rFPGA0_XAB_RFInterfaceSW, BIT(26), 1);
- PHY_SetBBReg(pAdapter, rFPGA0_XB_RFInterfaceOE, BIT(10), 0);
- PHY_SetBBReg(pAdapter, rFPGA0_XAB_RFParameter, BIT(1), 1);
- PHY_SetBBReg(pAdapter, rFPGA0_XAB_RFParameter, BIT(17), 0);
- }
- break;
- case ANTENNA_B:
- p_ofdm_tx->r_tx_antenna = 0x2;
- r_ofdm_tx_en_val = 0x2;
- p_ofdm_tx->r_ant_l = 0x2;
- p_ofdm_tx->r_ant_ht_s1 = 0x2;
- p_ofdm_tx->r_ant_non_ht_s1 = 0x2;
- p_cck_txrx->r_ccktx_enable = 0x4;
- chgTx = 1;
- /* From SD3 Willis suggestion !!! Set RF A as standby */
- PHY_SetBBReg(pAdapter, rFPGA0_XA_HSSIParameter2, 0xe, 1);
- PHY_SetBBReg(pAdapter, rFPGA0_XB_HSSIParameter2, 0xe, 2);
-
- /* Power save */
- /* cosa r_ant_select_ofdm_val = 0x22222222; */
-
- /* 2008/10/31 MH From SD3 Willi's suggestion. We must read RF 1T table. */
- /* 2009/01/08 MH From Sd3 Willis. We need to close RFA by SW control */
- if (pHalData->rf_type == RF_2T2R || pHalData->rf_type == RF_1T2R) {
- PHY_SetBBReg(pAdapter, rFPGA0_XAB_RFInterfaceSW, BIT(10), 1);
- PHY_SetBBReg(pAdapter, rFPGA0_XA_RFInterfaceOE, BIT(10), 0);
- PHY_SetBBReg(pAdapter, rFPGA0_XAB_RFInterfaceSW, BIT(26), 0);
- PHY_SetBBReg(pAdapter, rFPGA0_XAB_RFParameter, BIT(1), 0);
- PHY_SetBBReg(pAdapter, rFPGA0_XAB_RFParameter, BIT(17), 1);
- }
- break;
- case ANTENNA_AB: /* For 8192S */
- p_ofdm_tx->r_tx_antenna = 0x3;
- r_ofdm_tx_en_val = 0x3;
- p_ofdm_tx->r_ant_l = 0x3;
- p_ofdm_tx->r_ant_ht_s1 = 0x3;
- p_ofdm_tx->r_ant_non_ht_s1 = 0x3;
- p_cck_txrx->r_ccktx_enable = 0xC;
- chgTx = 1;
-
- /* From SD3 Willis suggestion !!! Set RF B as standby */
- PHY_SetBBReg(pAdapter, rFPGA0_XA_HSSIParameter2, 0xe, 2);
- PHY_SetBBReg(pAdapter, rFPGA0_XB_HSSIParameter2, 0xe, 2);
-
- /* Disable Power save */
- /* cosa r_ant_select_ofdm_val = 0x3321333; */
- /* 2009/01/08 MH From Sd3 Willis. We need to enable RFA/B by SW control */
- if (pHalData->rf_type == RF_2T2R) {
- PHY_SetBBReg(pAdapter, rFPGA0_XAB_RFInterfaceSW, BIT(10), 0);
- PHY_SetBBReg(pAdapter, rFPGA0_XAB_RFInterfaceSW, BIT(26), 0);
- PHY_SetBBReg(pAdapter, rFPGA0_XAB_RFParameter, BIT(1), 1);
- PHY_SetBBReg(pAdapter, rFPGA0_XAB_RFParameter, BIT(17), 1);
- }
- break;
- default:
- break;
- }
-
- /* r_rx_antenna_ofdm, bit0=A, bit1=B, bit2=C, bit3=D */
- /* r_cckrx_enable : CCK default, 0=A, 1=B, 2=C, 3=D */
- /* r_cckrx_enable_2 : CCK option, 0=A, 1=B, 2=C, 3=D */
- switch (pAdapter->mppriv.antenna_rx) {
- case ANTENNA_A:
- r_rx_antenna_ofdm = 0x1; /* A */
- p_cck_txrx->r_cckrx_enable = 0x0; /* default: A */
- p_cck_txrx->r_cckrx_enable_2 = 0x0; /* option: A */
- chgRx = 1;
- break;
- case ANTENNA_B:
- r_rx_antenna_ofdm = 0x2; /* B */
- p_cck_txrx->r_cckrx_enable = 0x1; /* default: B */
- p_cck_txrx->r_cckrx_enable_2 = 0x1; /* option: B */
- chgRx = 1;
- break;
- case ANTENNA_AB:
- r_rx_antenna_ofdm = 0x3; /* AB */
- p_cck_txrx->r_cckrx_enable = 0x0; /* default:A */
- p_cck_txrx->r_cckrx_enable_2 = 0x1; /* option:B */
- chgRx = 1;
- break;
- default:
- break;
- }
-
- if (chgTx && chgRx) {
- switch (pHalData->rf_chip) {
- case RF_8225:
- case RF_8256:
- case RF_6052:
- /* r_ant_sel_cck_val = r_ant_select_cck_val; */
- PHY_SetBBReg(pAdapter, rFPGA1_TxInfo, 0x7fffffff, r_ant_select_ofdm_val); /* OFDM Tx */
- PHY_SetBBReg(pAdapter, rFPGA0_TxInfo, 0x0000000f, r_ofdm_tx_en_val); /* OFDM Tx */
- PHY_SetBBReg(pAdapter, rOFDM0_TRxPathEnable, 0x0000000f, r_rx_antenna_ofdm); /* OFDM Rx */
- PHY_SetBBReg(pAdapter, rOFDM1_TRxPathEnable, 0x0000000f, r_rx_antenna_ofdm); /* OFDM Rx */
- PHY_SetBBReg(pAdapter, rCCK0_AFESetting, bMaskByte3, r_ant_select_cck_val); /* CCK TxRx */
-
- break;
- default:
- break;
- }
- }
-}
-
-s32 Hal_SetThermalMeter(struct adapter *pAdapter, u8 target_ther)
-{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(pAdapter);
-
- if (!netif_running(pAdapter->pnetdev))
- return _FAIL;
-
- if (!check_fwstate(&pAdapter->mlmepriv, WIFI_MP_STATE))
- return _FAIL;
-
- target_ther &= 0xff;
- if (target_ther < 0x07)
- target_ther = 0x07;
- else if (target_ther > 0x1d)
- target_ther = 0x1d;
-
- pHalData->EEPROMThermalMeter = target_ther;
-
- return _SUCCESS;
-}
-
-void Hal_TriggerRFThermalMeter(struct adapter *pAdapter)
-{
- _write_rfreg(pAdapter, RF_PATH_A, RF_T_METER_88E, BIT(17) | BIT(16), 0x03);
-}
-
-u8 Hal_ReadRFThermalMeter(struct adapter *pAdapter)
-{
- u32 ThermalValue = 0;
-
- ThermalValue = _read_rfreg(pAdapter, RF_PATH_A, RF_T_METER_88E, 0xfc00);
- return (u8)ThermalValue;
-}
-
-void Hal_GetThermalMeter(struct adapter *pAdapter, u8 *value)
-{
- Hal_TriggerRFThermalMeter(pAdapter);
- msleep(1000);
- *value = Hal_ReadRFThermalMeter(pAdapter);
-}
-
-void Hal_SetSingleCarrierTx(struct adapter *pAdapter, u8 bStart)
-{
- pAdapter->mppriv.MptCtx.bSingleCarrier = bStart;
- if (bStart) {
- /* Start Single Carrier. */
- /* 1. if OFDM block on? */
- if (!read_bbreg(pAdapter, rFPGA0_RFMOD, bOFDMEn))
- write_bbreg(pAdapter, rFPGA0_RFMOD, bOFDMEn, bEnable);/* set OFDM block on */
-
- /* 2. set CCK test mode off, set to CCK normal mode */
- write_bbreg(pAdapter, rCCK0_System, bCCKBBMode, bDisable);
- /* 3. turn on scramble setting */
- write_bbreg(pAdapter, rCCK0_System, bCCKScramble, bEnable);
- /* 4. Turn On Single Carrier Tx and turn off the other test modes. */
- write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMContinueTx, bDisable);
- write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMSingleCarrier, bEnable);
- write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMSingleTone, bDisable);
- /* for dynamic set Power index. */
- write_bbreg(pAdapter, rFPGA0_XA_HSSIParameter1, bMaskDWord, 0x01000500);
- write_bbreg(pAdapter, rFPGA0_XB_HSSIParameter1, bMaskDWord, 0x01000500);
- } else {
- /* Stop Single Carrier. */
- /* Turn off all test modes. */
- write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMContinueTx, bDisable);
- write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMSingleCarrier, bDisable);
- write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMSingleTone, bDisable);
- msleep(10);
-
- /* BB Reset */
- write_bbreg(pAdapter, rPMAC_Reset, bBBResetB, 0x0);
- write_bbreg(pAdapter, rPMAC_Reset, bBBResetB, 0x1);
-
- /* Stop for dynamic set Power index. */
- write_bbreg(pAdapter, rFPGA0_XA_HSSIParameter1, bMaskDWord, 0x01000100);
- write_bbreg(pAdapter, rFPGA0_XB_HSSIParameter1, bMaskDWord, 0x01000100);
- }
-}
-
-void Hal_SetSingleToneTx(struct adapter *pAdapter, u8 bStart)
-{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(pAdapter);
- bool is92C = IS_92C_SERIAL(pHalData->VersionID);
-
- u8 rfPath;
- u32 reg58 = 0x0;
- switch (pAdapter->mppriv.antenna_tx) {
- case ANTENNA_A:
- default:
- rfPath = RF_PATH_A;
- break;
- case ANTENNA_B:
- rfPath = RF_PATH_B;
- break;
- case ANTENNA_C:
- rfPath = RF_PATH_C;
- break;
- }
-
- pAdapter->mppriv.MptCtx.bSingleTone = bStart;
- if (bStart) {
- /* Start Single Tone. */
- /* <20120326, Kordan> To amplify the power of tone for Xtal calibration. (asked by Edlu) */
- if (IS_HARDWARE_TYPE_8188E(pAdapter)) {
- reg58 = PHY_QueryRFReg(pAdapter, RF_PATH_A, LNA_Low_Gain_3, bRFRegOffsetMask);
- reg58 &= 0xFFFFFFF0;
- reg58 += 2;
- PHY_SetRFReg(pAdapter, RF_PATH_A, LNA_Low_Gain_3, bRFRegOffsetMask, reg58);
- }
- PHY_SetBBReg(pAdapter, rFPGA0_RFMOD, bCCKEn, 0x0);
- PHY_SetBBReg(pAdapter, rFPGA0_RFMOD, bOFDMEn, 0x0);
-
- if (is92C) {
- _write_rfreg(pAdapter, RF_PATH_A, 0x21, BIT(19), 0x01);
- rtw_usleep_os(100);
- if (rfPath == RF_PATH_A)
- write_rfreg(pAdapter, RF_PATH_B, 0x00, 0x10000); /* PAD all on. */
- else if (rfPath == RF_PATH_B)
- write_rfreg(pAdapter, RF_PATH_A, 0x00, 0x10000); /* PAD all on. */
- write_rfreg(pAdapter, rfPath, 0x00, 0x2001f); /* PAD all on. */
- rtw_usleep_os(100);
- } else {
- write_rfreg(pAdapter, rfPath, 0x21, 0xd4000);
- rtw_usleep_os(100);
- write_rfreg(pAdapter, rfPath, 0x00, 0x2001f); /* PAD all on. */
- rtw_usleep_os(100);
- }
-
- /* for dynamic set Power index. */
- write_bbreg(pAdapter, rFPGA0_XA_HSSIParameter1, bMaskDWord, 0x01000500);
- write_bbreg(pAdapter, rFPGA0_XB_HSSIParameter1, bMaskDWord, 0x01000500);
-
- } else {
- /* Stop Single Tone. */
- /* <20120326, Kordan> To amplify the power of tone for Xtal calibration. (asked by Edlu) */
- /* <20120326, Kordan> Only in single tone mode. (asked by Edlu) */
- if (IS_HARDWARE_TYPE_8188E(pAdapter)) {
- reg58 = PHY_QueryRFReg(pAdapter, RF_PATH_A, LNA_Low_Gain_3, bRFRegOffsetMask);
- reg58 &= 0xFFFFFFF0;
- PHY_SetRFReg(pAdapter, RF_PATH_A, LNA_Low_Gain_3, bRFRegOffsetMask, reg58);
- }
- write_bbreg(pAdapter, rFPGA0_RFMOD, bCCKEn, 0x1);
- write_bbreg(pAdapter, rFPGA0_RFMOD, bOFDMEn, 0x1);
- if (is92C) {
- _write_rfreg(pAdapter, RF_PATH_A, 0x21, BIT(19), 0x00);
- rtw_usleep_os(100);
- write_rfreg(pAdapter, RF_PATH_A, 0x00, 0x32d75); /* PAD all on. */
- write_rfreg(pAdapter, RF_PATH_B, 0x00, 0x32d75); /* PAD all on. */
- rtw_usleep_os(100);
- } else {
- write_rfreg(pAdapter, rfPath, 0x21, 0x54000);
- rtw_usleep_os(100);
- write_rfreg(pAdapter, rfPath, 0x00, 0x30000); /* PAD all on. */
- rtw_usleep_os(100);
- }
-
- /* Stop for dynamic set Power index. */
- write_bbreg(pAdapter, rFPGA0_XA_HSSIParameter1, bMaskDWord, 0x01000100);
- write_bbreg(pAdapter, rFPGA0_XB_HSSIParameter1, bMaskDWord, 0x01000100);
- }
-}
-
-void Hal_SetCarrierSuppressionTx(struct adapter *pAdapter, u8 bStart)
-{
- pAdapter->mppriv.MptCtx.bCarrierSuppression = bStart;
- if (bStart) {
- /* Start Carrier Suppression. */
- if (pAdapter->mppriv.rateidx <= MPT_RATE_11M) {
- /* 1. if CCK block on? */
- if (!read_bbreg(pAdapter, rFPGA0_RFMOD, bCCKEn))
- write_bbreg(pAdapter, rFPGA0_RFMOD, bCCKEn, bEnable);/* set CCK block on */
-
- /* Turn Off All Test Mode */
- write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMContinueTx, bDisable);
- write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMSingleCarrier, bDisable);
- write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMSingleTone, bDisable);
-
- write_bbreg(pAdapter, rCCK0_System, bCCKBBMode, 0x2); /* transmit mode */
- write_bbreg(pAdapter, rCCK0_System, bCCKScramble, 0x0); /* turn off scramble setting */
-
- /* Set CCK Tx Test Rate */
- write_bbreg(pAdapter, rCCK0_System, bCCKTxRate, 0x0); /* Set FTxRate to 1Mbps */
- }
-
- /* for dynamic set Power index. */
- write_bbreg(pAdapter, rFPGA0_XA_HSSIParameter1, bMaskDWord, 0x01000500);
- write_bbreg(pAdapter, rFPGA0_XB_HSSIParameter1, bMaskDWord, 0x01000500);
- } else {
- /* Stop Carrier Suppression. */
- if (pAdapter->mppriv.rateidx <= MPT_RATE_11M) {
- write_bbreg(pAdapter, rCCK0_System, bCCKBBMode, 0x0); /* normal mode */
- write_bbreg(pAdapter, rCCK0_System, bCCKScramble, 0x1); /* turn on scramble setting */
-
- /* BB Reset */
- write_bbreg(pAdapter, rPMAC_Reset, bBBResetB, 0x0);
- write_bbreg(pAdapter, rPMAC_Reset, bBBResetB, 0x1);
- }
-
- /* Stop for dynamic set Power index. */
- write_bbreg(pAdapter, rFPGA0_XA_HSSIParameter1, bMaskDWord, 0x01000100);
- write_bbreg(pAdapter, rFPGA0_XB_HSSIParameter1, bMaskDWord, 0x01000100);
- }
-}
-
-void Hal_SetCCKContinuousTx(struct adapter *pAdapter, u8 bStart)
-{
- u32 cckrate;
-
- if (bStart) {
- /* 1. if CCK block on? */
- if (!read_bbreg(pAdapter, rFPGA0_RFMOD, bCCKEn))
- write_bbreg(pAdapter, rFPGA0_RFMOD, bCCKEn, bEnable);/* set CCK block on */
-
- /* Turn Off All Test Mode */
- write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMContinueTx, bDisable);
- write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMSingleCarrier, bDisable);
- write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMSingleTone, bDisable);
- /* Set CCK Tx Test Rate */
- cckrate = pAdapter->mppriv.rateidx;
- write_bbreg(pAdapter, rCCK0_System, bCCKTxRate, cckrate);
- write_bbreg(pAdapter, rCCK0_System, bCCKBBMode, 0x2); /* transmit mode */
- write_bbreg(pAdapter, rCCK0_System, bCCKScramble, bEnable); /* turn on scramble setting */
-
- /* for dynamic set Power index. */
- write_bbreg(pAdapter, rFPGA0_XA_HSSIParameter1, bMaskDWord, 0x01000500);
- write_bbreg(pAdapter, rFPGA0_XB_HSSIParameter1, bMaskDWord, 0x01000500);
- } else {
- write_bbreg(pAdapter, rCCK0_System, bCCKBBMode, 0x0); /* normal mode */
- write_bbreg(pAdapter, rCCK0_System, bCCKScramble, bEnable); /* turn on scramble setting */
-
- /* BB Reset */
- write_bbreg(pAdapter, rPMAC_Reset, bBBResetB, 0x0);
- write_bbreg(pAdapter, rPMAC_Reset, bBBResetB, 0x1);
-
- /* Stop for dynamic set Power index. */
- write_bbreg(pAdapter, rFPGA0_XA_HSSIParameter1, bMaskDWord, 0x01000100);
- write_bbreg(pAdapter, rFPGA0_XB_HSSIParameter1, bMaskDWord, 0x01000100);
- }
-
- pAdapter->mppriv.MptCtx.bCckContTx = bStart;
- pAdapter->mppriv.MptCtx.bOfdmContTx = false;
-} /* mpt_StartCckContTx */
-
-void Hal_SetOFDMContinuousTx(struct adapter *pAdapter, u8 bStart)
-{
- if (bStart) {
- /* 1. if OFDM block on? */
- if (!read_bbreg(pAdapter, rFPGA0_RFMOD, bOFDMEn))
- write_bbreg(pAdapter, rFPGA0_RFMOD, bOFDMEn, bEnable);/* set OFDM block on */
-
- /* 2. set CCK test mode off, set to CCK normal mode */
- write_bbreg(pAdapter, rCCK0_System, bCCKBBMode, bDisable);
-
- /* 3. turn on scramble setting */
- write_bbreg(pAdapter, rCCK0_System, bCCKScramble, bEnable);
- /* 4. Turn On Continue Tx and turn off the other test modes. */
- write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMContinueTx, bEnable);
- write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMSingleCarrier, bDisable);
- write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMSingleTone, bDisable);
-
- /* for dynamic set Power index. */
- write_bbreg(pAdapter, rFPGA0_XA_HSSIParameter1, bMaskDWord, 0x01000500);
- write_bbreg(pAdapter, rFPGA0_XB_HSSIParameter1, bMaskDWord, 0x01000500);
-
- } else {
- write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMContinueTx, bDisable);
- write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMSingleCarrier, bDisable);
- write_bbreg(pAdapter, rOFDM1_LSTF, bOFDMSingleTone, bDisable);
- /* Delay 10 ms */
- msleep(10);
- /* BB Reset */
- write_bbreg(pAdapter, rPMAC_Reset, bBBResetB, 0x0);
- write_bbreg(pAdapter, rPMAC_Reset, bBBResetB, 0x1);
-
- /* Stop for dynamic set Power index. */
- write_bbreg(pAdapter, rFPGA0_XA_HSSIParameter1, bMaskDWord, 0x01000100);
- write_bbreg(pAdapter, rFPGA0_XB_HSSIParameter1, bMaskDWord, 0x01000100);
- }
-
- pAdapter->mppriv.MptCtx.bCckContTx = false;
- pAdapter->mppriv.MptCtx.bOfdmContTx = bStart;
-} /* mpt_StartOfdmContTx */
-
-void Hal_SetContinuousTx(struct adapter *pAdapter, u8 bStart)
-{
- pAdapter->mppriv.MptCtx.bStartContTx = bStart;
- if (pAdapter->mppriv.rateidx <= MPT_RATE_11M)
- Hal_SetCCKContinuousTx(pAdapter, bStart);
- else if ((pAdapter->mppriv.rateidx >= MPT_RATE_6M) &&
- (pAdapter->mppriv.rateidx <= MPT_RATE_MCS15))
- Hal_SetOFDMContinuousTx(pAdapter, bStart);
-}
diff --git a/drivers/staging/r8188eu/hal/rtl8188e_phycfg.c b/drivers/staging/r8188eu/hal/rtl8188e_phycfg.c
index 30a9dca8f453..bb0cda0c16a0 100644
--- a/drivers/staging/r8188eu/hal/rtl8188e_phycfg.c
+++ b/drivers/staging/r8188eu/hal/rtl8188e_phycfg.c
@@ -532,7 +532,7 @@ void storePwrIndexDiffRateOffset(struct adapter *Adapter, u32 RegAddr, u32 BitMa
static int phy_BB8188E_Config_ParaFile(struct adapter *Adapter)
{
- struct eeprom_priv *pEEPROM = GET_EEPROM_EFUSE_PRIV(Adapter);
+ struct eeprom_priv *pEEPROM = &Adapter->eeprompriv;
struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
int rtStatus = _SUCCESS;
@@ -609,166 +609,6 @@ int PHY_RFConfig8188E(struct adapter *Adapter)
return rtStatus;
}
-/*-----------------------------------------------------------------------------
- * Function: PHY_ConfigRFWithParaFile()
- *
- * Overview: This function read RF parameters from general file format, and do RF 3-wire
- *
- * Input: struct adapter *Adapter
- * ps8 pFileName
- * enum rf_radio_path eRFPath
- *
- * Output: NONE
- *
- * Return: RT_STATUS_SUCCESS: configuration file exist
- *
- * Note: Delay may be required for RF configuration
- *---------------------------------------------------------------------------*/
-int rtl8188e_PHY_ConfigRFWithParaFile(struct adapter *Adapter, u8 *pFileName, enum rf_radio_path eRFPath)
-{
- return _SUCCESS;
-}
-
-void
-rtl8192c_PHY_GetHWRegOriginalValue(
- struct adapter *Adapter
- )
-{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
-
- /* read rx initial gain */
- pHalData->DefaultInitialGain[0] = (u8)PHY_QueryBBReg(Adapter, rOFDM0_XAAGCCore1, bMaskByte0);
- pHalData->DefaultInitialGain[1] = (u8)PHY_QueryBBReg(Adapter, rOFDM0_XBAGCCore1, bMaskByte0);
- pHalData->DefaultInitialGain[2] = (u8)PHY_QueryBBReg(Adapter, rOFDM0_XCAGCCore1, bMaskByte0);
- pHalData->DefaultInitialGain[3] = (u8)PHY_QueryBBReg(Adapter, rOFDM0_XDAGCCore1, bMaskByte0);
-
- /* read framesync */
- pHalData->framesync = (u8)PHY_QueryBBReg(Adapter, rOFDM0_RxDetector3, bMaskByte0);
- pHalData->framesyncC34 = PHY_QueryBBReg(Adapter, rOFDM0_RxDetector2, bMaskDWord);
-}
-
-/* */
-/* Description: */
-/* Map dBm into Tx power index according to */
-/* current HW model, for example, RF and PA, and */
-/* current wireless mode. */
-/* By Bruce, 2008-01-29. */
-/* */
-static u8 phy_DbmToTxPwrIdx(struct adapter *Adapter, enum wireless_mode WirelessMode, int PowerInDbm)
-{
- u8 TxPwrIdx = 0;
- int Offset = 0;
-
- /* */
- /* Tested by MP, we found that CCK Index 0 equals to 8dbm, OFDM legacy equals to */
- /* 3dbm, and OFDM HT equals to 0dbm respectively. */
- /* Note: */
- /* The mapping may be different by different NICs. Do not use this formula for what needs accurate result. */
- /* By Bruce, 2008-01-29. */
- /* */
- switch (WirelessMode) {
- case WIRELESS_MODE_B:
- Offset = -7;
- break;
-
- case WIRELESS_MODE_G:
- case WIRELESS_MODE_N_24G:
- default:
- Offset = -8;
- break;
- }
-
- if ((PowerInDbm - Offset) > 0)
- TxPwrIdx = (u8)((PowerInDbm - Offset) * 2);
- else
- TxPwrIdx = 0;
-
- /* Tx Power Index is too large. */
- if (TxPwrIdx > MAX_TXPWR_IDX_NMODE_92S)
- TxPwrIdx = MAX_TXPWR_IDX_NMODE_92S;
-
- return TxPwrIdx;
-}
-
-/* */
-/* Description: */
-/* Map Tx power index into dBm according to */
-/* current HW model, for example, RF and PA, and */
-/* current wireless mode. */
-/* By Bruce, 2008-01-29. */
-/* */
-static int phy_TxPwrIdxToDbm(struct adapter *Adapter, enum wireless_mode WirelessMode, u8 TxPwrIdx)
-{
- int Offset = 0;
- int PwrOutDbm = 0;
-
- /* */
- /* Tested by MP, we found that CCK Index 0 equals to -7dbm, OFDM legacy equals to -8dbm. */
- /* Note: */
- /* The mapping may be different by different NICs. Do not use this formula for what needs accurate result. */
- /* By Bruce, 2008-01-29. */
- /* */
- switch (WirelessMode) {
- case WIRELESS_MODE_B:
- Offset = -7;
- break;
- case WIRELESS_MODE_G:
- case WIRELESS_MODE_N_24G:
- default:
- Offset = -8;
- break;
- }
-
- PwrOutDbm = TxPwrIdx / 2 + Offset; /* Discard the decimal part. */
-
- return PwrOutDbm;
-}
-
-/*-----------------------------------------------------------------------------
- * Function: GetTxPowerLevel8190()
- *
- * Overview: This function is export to "common" moudule
- *
- * Input: struct adapter *Adapter
- * psByte Power Level
- *
- * Output: NONE
- *
- * Return: NONE
- *
- *---------------------------------------------------------------------------*/
-void PHY_GetTxPowerLevel8188E(struct adapter *Adapter, u32 *powerlevel)
-{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
- u8 TxPwrLevel = 0;
- int TxPwrDbm;
-
- /* */
- /* Because the Tx power indexes are different, we report the maximum of them to */
- /* meet the CCX TPC request. By Bruce, 2008-01-31. */
- /* */
-
- /* CCK */
- TxPwrLevel = pHalData->CurrentCckTxPwrIdx;
- TxPwrDbm = phy_TxPwrIdxToDbm(Adapter, WIRELESS_MODE_B, TxPwrLevel);
-
- /* Legacy OFDM */
- TxPwrLevel = pHalData->CurrentOfdm24GTxPwrIdx + pHalData->LegacyHTTxPowerDiff;
-
- /* Compare with Legacy OFDM Tx power. */
- if (phy_TxPwrIdxToDbm(Adapter, WIRELESS_MODE_G, TxPwrLevel) > TxPwrDbm)
- TxPwrDbm = phy_TxPwrIdxToDbm(Adapter, WIRELESS_MODE_G, TxPwrLevel);
-
- /* HT OFDM */
- TxPwrLevel = pHalData->CurrentOfdm24GTxPwrIdx;
-
- /* Compare with HT OFDM Tx power. */
- if (phy_TxPwrIdxToDbm(Adapter, WIRELESS_MODE_N_24G, TxPwrLevel) > TxPwrDbm)
- TxPwrDbm = phy_TxPwrIdxToDbm(Adapter, WIRELESS_MODE_N_24G, TxPwrLevel);
-
- *powerlevel = TxPwrDbm;
-}
-
static void getTxPowerIndex88E(struct adapter *Adapter, u8 channel, u8 *cckPowerLevel,
u8 *ofdmPowerLevel, u8 *BW20PowerLevel,
u8 *BW40PowerLevel)
@@ -892,51 +732,6 @@ PHY_SetTxPowerLevel8188E(
rtl8188e_PHY_RF6052SetOFDMTxPower(Adapter, &ofdmPowerLevel[0], &BW20PowerLevel[0], &BW40PowerLevel[0], channel);
}
-/* */
-/* Description: */
-/* Update transmit power level of all channel supported. */
-/* */
-/* TODO: */
-/* A mode. */
-/* By Bruce, 2008-02-04. */
-/* */
-bool
-PHY_UpdateTxPowerDbm8188E(
- struct adapter *Adapter,
- int powerInDbm
- )
-{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
- u8 idx;
- u8 rf_path;
-
- /* TODO: A mode Tx power. */
- u8 CckTxPwrIdx = phy_DbmToTxPwrIdx(Adapter, WIRELESS_MODE_B, powerInDbm);
- u8 OfdmTxPwrIdx = phy_DbmToTxPwrIdx(Adapter, WIRELESS_MODE_N_24G, powerInDbm);
-
- if (OfdmTxPwrIdx - pHalData->LegacyHTTxPowerDiff > 0)
- OfdmTxPwrIdx -= pHalData->LegacyHTTxPowerDiff;
- else
- OfdmTxPwrIdx = 0;
-
- for (idx = 0; idx < 14; idx++) {
- for (rf_path = 0; rf_path < 2; rf_path++) {
- pHalData->TxPwrLevelCck[rf_path][idx] = CckTxPwrIdx;
- pHalData->TxPwrLevelHT40_1S[rf_path][idx] =
- pHalData->TxPwrLevelHT40_2S[rf_path][idx] = OfdmTxPwrIdx;
- }
- }
- return true;
-}
-
-void
-PHY_ScanOperationBackup8188E(
- struct adapter *Adapter,
- u8 Operation
- )
-{
-}
-
/*-----------------------------------------------------------------------------
* Function: PHY_SetBWModeCallback8192C()
*
@@ -1068,7 +863,7 @@ void PHY_SetBWMode8188E(struct adapter *Adapter, enum ht_channel_width Bandwidth
static void _PHY_SwChnl8192C(struct adapter *Adapter, u8 channel)
{
- u8 eRFPath;
+ u8 eRFPath = 0;
u32 param1, param2;
struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
@@ -1081,10 +876,8 @@ static void _PHY_SwChnl8192C(struct adapter *Adapter, u8 channel)
/* s2. RF dependent command - CmdID_RF_WriteReg, param1=RF_CHNLBW, param2=channel */
param1 = RF_CHNLBW;
param2 = channel;
- for (eRFPath = 0; eRFPath < pHalData->NumTotalRFPath; eRFPath++) {
- pHalData->RfRegChnlVal[eRFPath] = ((pHalData->RfRegChnlVal[eRFPath] & 0xfffffc00) | param2);
- PHY_SetRFReg(Adapter, (enum rf_radio_path)eRFPath, param1, bRFRegOffsetMask, pHalData->RfRegChnlVal[eRFPath]);
- }
+ pHalData->RfRegChnlVal[eRFPath] = ((pHalData->RfRegChnlVal[eRFPath] & 0xfffffc00) | param2);
+ PHY_SetRFReg(Adapter, (enum rf_radio_path)eRFPath, param1, bRFRegOffsetMask, pHalData->RfRegChnlVal[eRFPath]);
}
void PHY_SwChnl8188E(struct adapter *Adapter, u8 channel)
diff --git a/drivers/staging/r8188eu/hal/rtl8188e_rf6052.c b/drivers/staging/r8188eu/hal/rtl8188e_rf6052.c
index ad0782259654..946a1b97d96f 100644
--- a/drivers/staging/r8188eu/hal/rtl8188e_rf6052.c
+++ b/drivers/staging/r8188eu/hal/rtl8188e_rf6052.c
@@ -29,49 +29,6 @@
#include "../include/drv_types.h"
#include "../include/rtl8188e_hal.h"
-/*---------------------------Define Local Constant---------------------------*/
-/* Define local structure for debug!!!!! */
-struct rf_shadow {
- /* Shadow register value */
- u32 Value;
- /* Compare or not flag */
- u8 Compare;
- /* Record If it had ever modified unpredicted */
- u8 ErrorOrNot;
- /* Recorver Flag */
- u8 Recorver;
- /* */
- u8 Driver_Write;
-};
-
-/*---------------------------Define Local Constant---------------------------*/
-
-/*------------------------Define global variable-----------------------------*/
-
-/*------------------------Define local variable------------------------------*/
-
-/*-----------------------------------------------------------------------------
- * Function: RF_ChangeTxPath
- *
- * Overview: For RL6052, we must change some RF settign for 1T or 2T.
- *
- * Input: u16 DataRate 0x80-8f, 0x90-9f
- *
- * Output: NONE
- *
- * Return: NONE
- *
- * Revised History:
- * When Who Remark
- * 09/25/2008 MHC Create Version 0.
- * Firmwaer support the utility later.
- *
- *---------------------------------------------------------------------------*/
-void rtl8188e_RF_ChangeTxPath(struct adapter *Adapter, u16 DataRate)
-{
-/* We do not support gain table change inACUT now !!!! Delete later !!! */
-} /* RF_ChangeTxPath */
-
/*-----------------------------------------------------------------------------
* Function: PHY_RF6052SetBandwidth()
*
@@ -128,7 +85,6 @@ rtl8188e_PHY_RF6052SetCckTxPower(
u8 *pPowerlevel)
{
struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
- struct dm_priv *pdmpriv = &pHalData->dmpriv;
struct mlme_ext_priv *pmlmeext = &Adapter->mlmeextpriv;
u32 TxAGC[2] = {0, 0}, tmpval = 0, pwrtrac_value;
bool TurboScanOff = false;
@@ -155,34 +111,19 @@ rtl8188e_PHY_RF6052SetCckTxPower(
}
}
} else {
- /* Driver dynamic Tx power shall not affect Tx power.
- * It shall be determined by power training mechanism.
-i * Currently, we cannot fully disable driver dynamic
- * tx power mechanism because it is referenced by BT
- * coexist mechanism.
- * In the future, two mechanism shall be separated from
- * each other and maintained independently. */
- if (pdmpriv->DynamicTxHighPowerLvl == TxHighPwrLevel_Level1) {
- TxAGC[RF_PATH_A] = 0x10101010;
- TxAGC[RF_PATH_B] = 0x10101010;
- } else if (pdmpriv->DynamicTxHighPowerLvl == TxHighPwrLevel_Level2) {
- TxAGC[RF_PATH_A] = 0x00000000;
- TxAGC[RF_PATH_B] = 0x00000000;
- } else {
- for (idx1 = RF_PATH_A; idx1 <= RF_PATH_B; idx1++) {
- TxAGC[idx1] =
- pPowerlevel[idx1] | (pPowerlevel[idx1] << 8) |
- (pPowerlevel[idx1] << 16) | (pPowerlevel[idx1] << 24);
- }
- if (pHalData->EEPROMRegulatory == 0) {
- tmpval = (pHalData->MCSTxPowerLevelOriginalOffset[0][6]) +
- (pHalData->MCSTxPowerLevelOriginalOffset[0][7] << 8);
- TxAGC[RF_PATH_A] += tmpval;
-
- tmpval = (pHalData->MCSTxPowerLevelOriginalOffset[0][14]) +
- (pHalData->MCSTxPowerLevelOriginalOffset[0][15] << 24);
- TxAGC[RF_PATH_B] += tmpval;
- }
+ for (idx1 = RF_PATH_A; idx1 <= RF_PATH_B; idx1++) {
+ TxAGC[idx1] =
+ pPowerlevel[idx1] | (pPowerlevel[idx1] << 8) |
+ (pPowerlevel[idx1] << 16) | (pPowerlevel[idx1] << 24);
+ }
+ if (pHalData->EEPROMRegulatory == 0) {
+ tmpval = (pHalData->MCSTxPowerLevelOriginalOffset[0][6]) +
+ (pHalData->MCSTxPowerLevelOriginalOffset[0][7] << 8);
+ TxAGC[RF_PATH_A] += tmpval;
+
+ tmpval = (pHalData->MCSTxPowerLevelOriginalOffset[0][14]) +
+ (pHalData->MCSTxPowerLevelOriginalOffset[0][15] << 24);
+ TxAGC[RF_PATH_B] += tmpval;
}
}
for (idx1 = RF_PATH_A; idx1 <= RF_PATH_B; idx1++) {
@@ -227,7 +168,7 @@ static void getpowerbase88e(struct adapter *Adapter, u8 *pPowerLevelOFDM,
{
struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
u32 powerBase0, powerBase1;
- u8 i, powerlevel[2];
+ u8 i;
for (i = 0; i < 2; i++) {
powerBase0 = pPowerLevelOFDM[i];
@@ -235,23 +176,21 @@ static void getpowerbase88e(struct adapter *Adapter, u8 *pPowerLevelOFDM,
powerBase0 = (powerBase0 << 24) | (powerBase0 << 16) | (powerBase0 << 8) | powerBase0;
*(OfdmBase + i) = powerBase0;
}
- for (i = 0; i < pHalData->NumTotalRFPath; i++) {
- /* Check HT20 to HT40 diff */
- if (pHalData->CurrentChannelBW == HT_CHANNEL_WIDTH_20)
- powerlevel[i] = pPowerLevelBW20[i];
- else
- powerlevel[i] = pPowerLevelBW40[i];
- powerBase1 = powerlevel[i];
- powerBase1 = (powerBase1 << 24) | (powerBase1 << 16) | (powerBase1 << 8) | powerBase1;
- *(MCSBase + i) = powerBase1;
- }
+
+ /* Check HT20 to HT40 diff */
+ if (pHalData->CurrentChannelBW == HT_CHANNEL_WIDTH_20)
+ powerBase1 = pPowerLevelBW20[0];
+ else
+ powerBase1 = pPowerLevelBW40[0];
+ powerBase1 = (powerBase1 << 24) | (powerBase1 << 16) | (powerBase1 << 8) | powerBase1;
+ *MCSBase = powerBase1;
}
+
static void get_rx_power_val_by_reg(struct adapter *Adapter, u8 Channel,
u8 index, u32 *powerBase0, u32 *powerBase1,
u32 *pOutWriteVal)
{
struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
- struct dm_priv *pdmpriv = &pHalData->dmpriv;
u8 i, chnlGroup = 0, pwr_diff_limit[4], customer_pwr_limit;
s8 pwr_diff = 0;
u32 writeVal, customer_limit, rf;
@@ -327,19 +266,7 @@ static void get_rx_power_val_by_reg(struct adapter *Adapter, u8 Channel,
((index < 2) ? powerBase0[rf] : powerBase1[rf]);
break;
}
-/* 20100427 Joseph: Driver dynamic Tx power shall not affect Tx power. It shall be determined by power training mechanism. */
-/* Currently, we cannot fully disable driver dynamic tx power mechanism because it is referenced by BT coexist mechanism. */
-/* In the future, two mechanism shall be separated from each other and maintained independently. Thanks for Lanhsin's reminder. */
- /* 92d do not need this */
- if (pdmpriv->DynamicTxHighPowerLvl == TxHighPwrLevel_Level1)
- writeVal = 0x14141414;
- else if (pdmpriv->DynamicTxHighPowerLvl == TxHighPwrLevel_Level2)
- writeVal = 0x00000000;
-
- /* 20100628 Joseph: High power mode for BT-Coexist mechanism. */
- /* This mechanism is only applied when Driver-Highpower-Mechanism is OFF. */
- if (pdmpriv->DynamicTxHighPowerLvl == TxHighPwrLevel_BT1)
- writeVal = writeVal - 0x06060606;
+
*(pOutWriteVal + rf) = writeVal;
}
}
@@ -458,70 +385,41 @@ static int phy_RF6052_Config_ParaFile(struct adapter *Adapter)
struct bb_reg_def *pPhyReg;
struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
u32 u4RegValue = 0;
- u8 eRFPath;
+ u8 eRFPath = 0;
int rtStatus = _SUCCESS;
- /* 3----------------------------------------------------------------- */
- /* 3 <2> Initialize RF */
- /* 3----------------------------------------------------------------- */
- for (eRFPath = 0; eRFPath < pHalData->NumTotalRFPath; eRFPath++) {
- pPhyReg = &pHalData->PHYRegDef[eRFPath];
-
- /*----Store original RFENV control type----*/
- switch (eRFPath) {
- case RF_PATH_A:
- case RF_PATH_C:
- u4RegValue = PHY_QueryBBReg(Adapter, pPhyReg->rfintfs, bRFSI_RFENV);
- break;
- case RF_PATH_B:
- case RF_PATH_D:
- u4RegValue = PHY_QueryBBReg(Adapter, pPhyReg->rfintfs, bRFSI_RFENV << 16);
- break;
- }
- /*----Set RF_ENV enable----*/
- PHY_SetBBReg(Adapter, pPhyReg->rfintfe, bRFSI_RFENV << 16, 0x1);
- udelay(1);/* PlatformStallExecution(1); */
-
- /*----Set RF_ENV output high----*/
- PHY_SetBBReg(Adapter, pPhyReg->rfintfo, bRFSI_RFENV, 0x1);
- udelay(1);/* PlatformStallExecution(1); */
-
- /* Set bit number of Address and Data for RF register */
- PHY_SetBBReg(Adapter, pPhyReg->rfHSSIPara2, b3WireAddressLength, 0x0); /* Set 1 to 4 bits for 8255 */
- udelay(1);/* PlatformStallExecution(1); */
-
- PHY_SetBBReg(Adapter, pPhyReg->rfHSSIPara2, b3WireDataLength, 0x0); /* Set 0 to 12 bits for 8255 */
- udelay(1);/* PlatformStallExecution(1); */
-
- /*----Initialize RF fom connfiguration file----*/
- switch (eRFPath) {
- case RF_PATH_A:
- if (HAL_STATUS_FAILURE == ODM_ConfigRFWithHeaderFile(&pHalData->odmpriv, (enum rf_radio_path)eRFPath, (enum rf_radio_path)eRFPath))
- rtStatus = _FAIL;
- break;
- case RF_PATH_B:
- if (HAL_STATUS_FAILURE == ODM_ConfigRFWithHeaderFile(&pHalData->odmpriv, (enum rf_radio_path)eRFPath, (enum rf_radio_path)eRFPath))
- rtStatus = _FAIL;
- break;
- case RF_PATH_C:
- break;
- case RF_PATH_D:
- break;
- }
- /*----Restore RFENV control type----*/;
- switch (eRFPath) {
- case RF_PATH_A:
- case RF_PATH_C:
- PHY_SetBBReg(Adapter, pPhyReg->rfintfs, bRFSI_RFENV, u4RegValue);
- break;
- case RF_PATH_B:
- case RF_PATH_D:
- PHY_SetBBReg(Adapter, pPhyReg->rfintfs, bRFSI_RFENV << 16, u4RegValue);
- break;
- }
- if (rtStatus != _SUCCESS)
- goto phy_RF6052_Config_ParaFile_Fail;
- }
+ /* Initialize RF */
+
+ pPhyReg = &pHalData->PHYRegDef[eRFPath];
+
+ /*----Store original RFENV control type----*/
+ u4RegValue = PHY_QueryBBReg(Adapter, pPhyReg->rfintfs, bRFSI_RFENV);
+
+ /*----Set RF_ENV enable----*/
+ PHY_SetBBReg(Adapter, pPhyReg->rfintfe, bRFSI_RFENV << 16, 0x1);
+ udelay(1);/* PlatformStallExecution(1); */
+
+ /*----Set RF_ENV output high----*/
+ PHY_SetBBReg(Adapter, pPhyReg->rfintfo, bRFSI_RFENV, 0x1);
+ udelay(1);/* PlatformStallExecution(1); */
+
+ /* Set bit number of Address and Data for RF register */
+ PHY_SetBBReg(Adapter, pPhyReg->rfHSSIPara2, b3WireAddressLength, 0x0); /* Set 1 to 4 bits for 8255 */
+ udelay(1);/* PlatformStallExecution(1); */
+
+ PHY_SetBBReg(Adapter, pPhyReg->rfHSSIPara2, b3WireDataLength, 0x0); /* Set 0 to 12 bits for 8255 */
+ udelay(1);/* PlatformStallExecution(1); */
+
+ /*----Initialize RF fom connfiguration file----*/
+ if (HAL_STATUS_FAILURE == ODM_ConfigRFWithHeaderFile(&pHalData->odmpriv, (enum rf_radio_path)eRFPath, (enum rf_radio_path)eRFPath))
+ rtStatus = _FAIL;
+
+ /*----Restore RFENV control type----*/;
+ PHY_SetBBReg(Adapter, pPhyReg->rfintfs, bRFSI_RFENV, u4RegValue);
+
+ if (rtStatus != _SUCCESS)
+ goto phy_RF6052_Config_ParaFile_Fail;
+
return rtStatus;
phy_RF6052_Config_ParaFile_Fail:
@@ -530,19 +428,9 @@ phy_RF6052_Config_ParaFile_Fail:
int PHY_RF6052_Config8188E(struct adapter *Adapter)
{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(Adapter);
int rtStatus = _SUCCESS;
/* */
- /* Initialize general global value */
- /* */
- /* TODO: Extend RF_PATH_C and RF_PATH_D in the future */
- if (pHalData->rf_type == RF_1T1R)
- pHalData->NumTotalRFPath = 1;
- else
- pHalData->NumTotalRFPath = 2;
-
- /* */
/* Config BB and RF */
/* */
rtStatus = phy_RF6052_Config_ParaFile(Adapter);
diff --git a/drivers/staging/r8188eu/hal/rtl8188e_rxdesc.c b/drivers/staging/r8188eu/hal/rtl8188e_rxdesc.c
index 244286789b6d..053d9549873d 100644
--- a/drivers/staging/r8188eu/hal/rtl8188e_rxdesc.c
+++ b/drivers/staging/r8188eu/hal/rtl8188e_rxdesc.c
@@ -127,7 +127,7 @@ void update_recvframe_phyinfo_88e(struct recv_frame *precvframe, struct phy_stat
struct adapter *padapter = precvframe->adapter;
struct rx_pkt_attrib *pattrib = &precvframe->attrib;
struct hal_data_8188e *pHalData = GET_HAL_DATA(padapter);
- struct odm_phy_status_info *pPHYInfo = (struct odm_phy_status_info *)(&pattrib->phy_info);
+ struct phy_info *pPHYInfo = &pattrib->phy_info;
u8 *wlanhdr;
struct odm_per_pkt_info pkt_info;
u8 *sa = NULL;
diff --git a/drivers/staging/r8188eu/hal/rtl8188e_sreset.c b/drivers/staging/r8188eu/hal/rtl8188e_sreset.c
index 16fa249e35d3..7b3ac6e306ce 100644
--- a/drivers/staging/r8188eu/hal/rtl8188e_sreset.c
+++ b/drivers/staging/r8188eu/hal/rtl8188e_sreset.c
@@ -6,43 +6,16 @@
#include "../include/rtl8188e_sreset.h"
#include "../include/rtl8188e_hal.h"
-void rtl8188e_silentreset_for_specific_platform(struct adapter *padapter)
-{
-}
-
void rtl8188e_sreset_xmit_status_check(struct adapter *padapter)
{
- struct hal_data_8188e *pHalData = GET_HAL_DATA(padapter);
- struct sreset_priv *psrtpriv = &pHalData->srestpriv;
-
- unsigned long current_time;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- unsigned int diff_time;
u32 txdma_status;
txdma_status = rtw_read32(padapter, REG_TXDMA_STATUS);
if (txdma_status != 0x00) {
DBG_88E("%s REG_TXDMA_STATUS:0x%08x\n", __func__, txdma_status);
rtw_write32(padapter, REG_TXDMA_STATUS, txdma_status);
- rtl8188e_silentreset_for_specific_platform(padapter);
}
/* total xmit irp = 4 */
- current_time = jiffies;
- if (0 == pxmitpriv->free_xmitbuf_cnt) {
- diff_time = jiffies_to_msecs(current_time - psrtpriv->last_tx_time);
-
- if (diff_time > 2000) {
- if (psrtpriv->last_tx_complete_time == 0) {
- psrtpriv->last_tx_complete_time = current_time;
- } else {
- diff_time = jiffies_to_msecs(current_time - psrtpriv->last_tx_complete_time);
- if (diff_time > 4000) {
- DBG_88E("%s tx hang\n", __func__);
- rtl8188e_silentreset_for_specific_platform(padapter);
- }
- }
- }
- }
}
void rtl8188e_sreset_linked_status_check(struct adapter *padapter)
diff --git a/drivers/staging/r8188eu/hal/rtl8188eu_recv.c b/drivers/staging/r8188eu/hal/rtl8188eu_recv.c
index 2da7bde80cc0..8031ac9f9d43 100644
--- a/drivers/staging/r8188eu/hal/rtl8188eu_recv.c
+++ b/drivers/staging/r8188eu/hal/rtl8188eu_recv.c
@@ -12,7 +12,7 @@
#include "../include/rtl8188e_hal.h"
-void rtl8188eu_init_recvbuf(struct adapter *padapter, struct recv_buf *precvbuf)
+void rtl8188eu_init_recvbuf(struct recv_buf *precvbuf)
{
precvbuf->transfer_len = 0;
@@ -39,7 +39,7 @@ int rtl8188eu_init_recv_priv(struct adapter *padapter)
(unsigned long)padapter);
/* init recv_buf */
- _rtw_init_queue(&precvpriv->free_recv_buf_queue);
+ rtw_init_queue(&precvpriv->free_recv_buf_queue);
precvpriv->pallocated_recv_buf = kzalloc(NR_RECVBUFF * sizeof(struct recv_buf) + 4,
GFP_KERNEL);
diff --git a/drivers/staging/r8188eu/hal/rtl8188eu_xmit.c b/drivers/staging/r8188eu/hal/rtl8188eu_xmit.c
index 17be67ac5fae..b7feb4d8c8aa 100644
--- a/drivers/staging/r8188eu/hal/rtl8188eu_xmit.c
+++ b/drivers/staging/r8188eu/hal/rtl8188eu_xmit.c
@@ -19,15 +19,6 @@ s32 rtl8188eu_init_xmit_priv(struct adapter *adapt)
return _SUCCESS;
}
-static u8 urb_zero_packet_chk(struct adapter *adapt, int sz)
-{
- u8 set_tx_desc_offset;
- struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
- set_tx_desc_offset = (((sz + TXDESC_SIZE) % haldata->UsbBulkOutSize) == 0) ? 1 : 0;
-
- return set_tx_desc_offset;
-}
-
static void rtl8188eu_cal_txdesc_chksum(struct tx_desc *ptxdesc)
{
u16 *usptr = (u16 *)ptxdesc;
@@ -168,13 +159,6 @@ static s32 update_txdesc(struct xmit_frame *pxmitframe, u8 *pmem, s32 sz, u8 bag
struct mlme_ext_priv *pmlmeext = &adapt->mlmeextpriv;
struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
- if (adapt->registrypriv.mp_mode == 0) {
- if ((!bagg_pkt) && (urb_zero_packet_chk(adapt, sz) == 0)) {
- ptxdesc = (struct tx_desc *)(pmem + PACKET_OFFSET_SZ);
- pull = 1;
- }
- }
-
memset(ptxdesc, 0, sizeof(struct tx_desc));
/* 4 offset 0 */
@@ -188,13 +172,6 @@ static s32 update_txdesc(struct xmit_frame *pxmitframe, u8 *pmem, s32 sz, u8 bag
if (is_multicast_ether_addr(pattrib->ra))
ptxdesc->txdw0 |= cpu_to_le32(BMC);
- if (adapt->registrypriv.mp_mode == 0) {
- if (!bagg_pkt) {
- if ((pull) && (pxmitframe->pkt_offset > 0))
- pxmitframe->pkt_offset = pxmitframe->pkt_offset - 1;
- }
- }
-
/* pkt_offset, unit:8 bytes padding */
if (pxmitframe->pkt_offset > 0)
ptxdesc->txdw1 |= cpu_to_le32((pxmitframe->pkt_offset << 26) & 0x7c000000);
@@ -289,9 +266,6 @@ static s32 update_txdesc(struct xmit_frame *pxmitframe, u8 *pmem, s32 sz, u8 bag
ptxdesc->txdw5 |= cpu_to_le32(MRateToHwRate(pmlmeext->tx_rate));
} else if ((pxmitframe->frame_tag & 0x0f) == TXAGG_FRAMETAG) {
DBG_88E("pxmitframe->frame_tag == TXAGG_FRAMETAG\n");
- } else if (((pxmitframe->frame_tag & 0x0f) == MP_FRAMETAG) &&
- (adapt->registrypriv.mp_mode == 1)) {
- fill_txdesc_for_mp(adapt, ptxdesc);
} else {
DBG_88E("pxmitframe->frame_tag = %d\n", pxmitframe->frame_tag);
@@ -437,30 +411,26 @@ s32 rtl8188eu_xmitframe_complete(struct adapter *adapt, struct xmit_priv *pxmitp
}
/* 3 1. pick up first frame */
- do {
- rtw_free_xmitframe(pxmitpriv, pxmitframe);
-
- pxmitframe = rtw_dequeue_xframe(pxmitpriv, pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry);
- if (!pxmitframe) {
- /* no more xmit frame, release xmit buffer */
- rtw_free_xmitbuf(pxmitpriv, pxmitbuf);
- return false;
- }
+ rtw_free_xmitframe(pxmitpriv, pxmitframe);
- pxmitframe->pxmitbuf = pxmitbuf;
- pxmitframe->buf_addr = pxmitbuf->pbuf;
- pxmitbuf->priv_data = pxmitframe;
+ pxmitframe = rtw_dequeue_xframe(pxmitpriv, pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry);
+ if (!pxmitframe) {
+ /* no more xmit frame, release xmit buffer */
+ rtw_free_xmitbuf(pxmitpriv, pxmitbuf);
+ return false;
+ }
- pxmitframe->agg_num = 1; /* alloc xmitframe should assign to 1. */
- pxmitframe->pkt_offset = 1; /* first frame of aggregation, reserve offset */
+ pxmitframe->pxmitbuf = pxmitbuf;
+ pxmitframe->buf_addr = pxmitbuf->pbuf;
+ pxmitbuf->priv_data = pxmitframe;
- rtw_xmitframe_coalesce(adapt, pxmitframe->pkt, pxmitframe);
+ pxmitframe->agg_num = 1; /* alloc xmitframe should assign to 1. */
+ pxmitframe->pkt_offset = 1; /* first frame of aggregation, reserve offset */
- /* always return ndis_packet after rtw_xmitframe_coalesce */
- rtw_os_xmit_complete(adapt, pxmitframe);
+ rtw_xmitframe_coalesce(adapt, pxmitframe->pkt, pxmitframe);
- break;
- } while (1);
+ /* always return ndis_packet after rtw_xmitframe_coalesce */
+ rtw_os_xmit_complete(adapt, pxmitframe);
/* 3 2. aggregate same priority and same DA(AP or STA) frames */
pfirstframe = pxmitframe;
diff --git a/drivers/staging/r8188eu/hal/usb_halinit.c b/drivers/staging/r8188eu/hal/usb_halinit.c
index 5cdabf43d4fd..ef1ae95d7db0 100644
--- a/drivers/staging/r8188eu/hal/usb_halinit.c
+++ b/drivers/staging/r8188eu/hal/usb_halinit.c
@@ -60,7 +60,7 @@ static bool HalUsbSetQueuePipeMapping8188EUsb(struct adapter *adapt, u8 NumInPip
return result;
}
-static void rtl8188eu_interface_configure(struct adapter *adapt)
+void rtl8188eu_interface_configure(struct adapter *adapt)
{
struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(adapt);
@@ -85,7 +85,7 @@ static void rtl8188eu_interface_configure(struct adapter *adapt)
pdvobjpriv->RtNumInPipes, pdvobjpriv->RtNumOutPipes);
}
-static u32 rtl8188eu_InitPowerOn(struct adapter *adapt)
+u32 rtl8188eu_InitPowerOn(struct adapter *adapt)
{
u16 value16;
/* HW Power on sequence */
@@ -119,18 +119,15 @@ static void _InitInterrupt(struct adapter *Adapter)
{
u32 imr, imr_ex;
u8 usb_opt;
- struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
/* HISR write one to clear */
rtw_write32(Adapter, REG_HISR_88E, 0xFFFFFFFF);
/* HIMR - */
imr = IMR_PSTIMEOUT_88E | IMR_TBDER_88E | IMR_CPWM_88E | IMR_CPWM2_88E;
rtw_write32(Adapter, REG_HIMR_88E, imr);
- haldata->IntrMask[0] = imr;
imr_ex = IMR_TXERR_88E | IMR_RXERR_88E | IMR_TXFOVW_88E | IMR_RXFOVW_88E;
rtw_write32(Adapter, REG_HIMRE_88E, imr_ex);
- haldata->IntrMask[1] = imr_ex;
/* REG_USB_SPECIAL_OPTION - BIT(4) */
/* 0; Use interrupt endpoint to upload interrupt pkt */
@@ -403,22 +400,6 @@ static void _InitEDCA(struct adapter *Adapter)
rtw_write32(Adapter, REG_EDCA_VO_PARAM, 0x002FA226);
}
-static void _InitBeaconMaxError(struct adapter *Adapter, bool InfraMode)
-{
-}
-
-static void _InitHWLed(struct adapter *Adapter)
-{
- struct led_priv *pledpriv = &Adapter->ledpriv;
-
- if (pledpriv->LedStrategy != HW_LED)
- return;
-
-/* HW led control */
-/* to do .... */
-/* must consider cases of antenna diversity/ commbo card/solo card/mini card */
-}
-
static void _InitRDGSetting(struct adapter *Adapter)
{
rtw_write8(Adapter, REG_RD_CTRL, 0xFF);
@@ -426,12 +407,6 @@ static void _InitRDGSetting(struct adapter *Adapter)
rtw_write8(Adapter, REG_RD_RESP_PKT_TH, 0x05);
}
-static void _InitRxSetting(struct adapter *Adapter)
-{
- rtw_write32(Adapter, REG_MACID, 0x87654321);
- rtw_write32(Adapter, 0x0700, 0x87654321);
-}
-
static void _InitRetryFunction(struct adapter *Adapter)
{
u8 value8;
@@ -546,26 +521,6 @@ usb_AggSettingRxUpdate(
/* TODO: */
break;
}
-
- switch (PBP_128) {
- case PBP_128:
- haldata->HwRxPageSize = 128;
- break;
- case PBP_64:
- haldata->HwRxPageSize = 64;
- break;
- case PBP_256:
- haldata->HwRxPageSize = 256;
- break;
- case PBP_512:
- haldata->HwRxPageSize = 512;
- break;
- case PBP_1024:
- haldata->HwRxPageSize = 1024;
- break;
- default:
- break;
- }
} /* usb_AggSettingRxUpdate */
static void InitUsbAggregationSetting(struct adapter *Adapter)
@@ -601,8 +556,6 @@ static void _InitBeaconParameters(struct adapter *Adapter)
/* beacause test chip does not contension before sending beacon. by tynli. 2009.11.03 */
rtw_write16(Adapter, REG_BCNTCFG, 0x660F);
- haldata->RegBcnCtrlVal = rtw_read8(Adapter, REG_BCN_CTRL);
- haldata->RegTxPause = rtw_read8(Adapter, REG_TXPAUSE);
haldata->RegFwHwTxQCtrl = rtw_read8(Adapter, REG_FWHW_TXQ_CTRL + 2);
haldata->RegReg542 = rtw_read8(Adapter, REG_TBTT_PROHIBIT + 2);
haldata->RegCR_1 = rtw_read8(Adapter, REG_CR + 1);
@@ -646,40 +599,7 @@ static void _InitAntenna_Selection(struct adapter *Adapter)
DBG_88E("%s,Cur_ant:(%x)%s\n", __func__, haldata->CurAntenna, (haldata->CurAntenna == Antenna_A) ? "Antenna_A" : "Antenna_B");
}
-/*-----------------------------------------------------------------------------
- * Function: HwSuspendModeEnable92Cu()
- *
- * Overview: HW suspend mode switch.
- *
- * Input: NONE
- *
- * Output: NONE
- *
- * Return: NONE
- *
- * Revised History:
- * When Who Remark
- * 08/23/2010 MHC HW suspend mode switch test..
- *---------------------------------------------------------------------------*/
-enum rt_rf_power_state RfOnOffDetect(struct adapter *adapt)
-{
- u8 val8;
- enum rt_rf_power_state rfpowerstate = rf_off;
-
- if (adapt->pwrctrlpriv.bHWPowerdown) {
- val8 = rtw_read8(adapt, REG_HSISR);
- DBG_88E("pwrdown, 0x5c(BIT(7))=%02x\n", val8);
- rfpowerstate = (val8 & BIT(7)) ? rf_off : rf_on;
- } else { /* rf on/off */
- rtw_write8(adapt, REG_MAC_PINMUX_CFG, rtw_read8(adapt, REG_MAC_PINMUX_CFG) & ~(BIT(3)));
- val8 = rtw_read8(adapt, REG_GPIO_IO_SEL);
- DBG_88E("GPIO_IN=%02x\n", val8);
- rfpowerstate = (val8 & BIT(3)) ? rf_on : rf_off;
- }
- return rfpowerstate;
-} /* HalDetectPwrDownMode */
-
-static u32 rtl8188eu_hal_init(struct adapter *Adapter)
+u32 rtl8188eu_hal_init(struct adapter *Adapter)
{
u8 value8 = 0;
u16 value16;
@@ -742,22 +662,16 @@ static u32 rtl8188eu_hal_init(struct adapter *Adapter)
_InitTxBufferBoundary(Adapter, 0);
HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_DOWNLOAD_FW);
- if (Adapter->registrypriv.mp_mode == 1) {
- _InitRxSetting(Adapter);
+ status = rtl8188e_FirmwareDownload(Adapter);
+
+ if (status != _SUCCESS) {
+ DBG_88E("%s: Download Firmware failed!!\n", __func__);
Adapter->bFWReady = false;
haldata->fw_ractrl = false;
+ return status;
} else {
- status = rtl8188e_FirmwareDownload(Adapter);
-
- if (status != _SUCCESS) {
- DBG_88E("%s: Download Firmware failed!!\n", __func__);
- Adapter->bFWReady = false;
- haldata->fw_ractrl = false;
- return status;
- } else {
- Adapter->bFWReady = true;
- haldata->fw_ractrl = false;
- }
+ Adapter->bFWReady = true;
+ haldata->fw_ractrl = false;
}
rtl8188e_InitializeFirmwareVars(Adapter);
@@ -819,7 +733,6 @@ static u32 rtl8188eu_hal_init(struct adapter *Adapter)
InitUsbAggregationSetting(Adapter);
_InitOperationMode(Adapter);/* todo */
_InitBeaconParameters(Adapter);
- _InitBeaconMaxError(Adapter, true);
/* */
/* Init CR MACTXEN, MACRXEN after setting RxFF boundary REG_TRXFF_BNDY to patch */
@@ -847,8 +760,6 @@ static u32 rtl8188eu_hal_init(struct adapter *Adapter)
rtw_write16(Adapter, REG_PKT_VO_VI_LIFE_TIME, 0x0400); /* unit: 256us. 256ms */
rtw_write16(Adapter, REG_PKT_BE_BK_LIFE_TIME, 0x0400); /* unit: 256us. 256ms */
- _InitHWLed(Adapter);
-
/* Keep RfRegChnlVal for later use. */
haldata->RfRegChnlVal[0] = PHY_QueryRFReg(Adapter, (enum rf_radio_path)0, RF_CHNLBW, bRFRegOffsetMask);
haldata->RfRegChnlVal[1] = PHY_QueryRFReg(Adapter, (enum rf_radio_path)1, RF_CHNLBW, bRFRegOffsetMask);
@@ -887,48 +798,43 @@ static u32 rtl8188eu_hal_init(struct adapter *Adapter)
HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_INIT_HAL_DM);
rtl8188e_InitHalDm(Adapter);
- if (Adapter->registrypriv.mp_mode == 1) {
- Adapter->mppriv.channel = haldata->CurrentChannel;
- MPT_InitializeAdapter(Adapter, Adapter->mppriv.channel);
- } else {
- /* 2010/08/11 MH Merge from 8192SE for Minicard init. We need to confirm current radio status */
- /* and then decide to enable RF or not.!!!??? For Selective suspend mode. We may not */
- /* call initstruct adapter. May cause some problem?? */
- /* Fix the bug that Hw/Sw radio off before S3/S4, the RF off action will not be executed */
- /* in MgntActSet_RF_State() after wake up, because the value of haldata->eRFPowerState */
- /* is the same as eRfOff, we should change it to eRfOn after we config RF parameters. */
- /* Added by tynli. 2010.03.30. */
- pwrctrlpriv->rf_pwrstate = rf_on;
-
- /* enable Tx report. */
- rtw_write8(Adapter, REG_FWHW_TXQ_CTRL + 1, 0x0F);
-
- /* Suggested by SD1 pisa. Added by tynli. 2011.10.21. */
- rtw_write8(Adapter, REG_EARLY_MODE_CONTROL + 3, 0x01);/* Pretx_en, for WEP/TKIP SEC */
-
- /* tynli_test_tx_report. */
- rtw_write16(Adapter, REG_TX_RPT_TIME, 0x3DF0);
-
- /* enable tx DMA to drop the redundate data of packet */
- rtw_write16(Adapter, REG_TXDMA_OFFSET_CHK, (rtw_read16(Adapter, REG_TXDMA_OFFSET_CHK) | DROP_DATA_EN));
-
- HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_IQK);
- /* 2010/08/26 MH Merge from 8192CE. */
- if (pwrctrlpriv->rf_pwrstate == rf_on) {
- if (haldata->odmpriv.RFCalibrateInfo.bIQKInitialized) {
- PHY_IQCalibrate_8188E(Adapter, true);
- } else {
- PHY_IQCalibrate_8188E(Adapter, false);
- haldata->odmpriv.RFCalibrateInfo.bIQKInitialized = true;
- }
+ /* 2010/08/11 MH Merge from 8192SE for Minicard init. We need to confirm current radio status */
+ /* and then decide to enable RF or not.!!!??? For Selective suspend mode. We may not */
+ /* call initstruct adapter. May cause some problem?? */
+ /* Fix the bug that Hw/Sw radio off before S3/S4, the RF off action will not be executed */
+ /* in MgntActSet_RF_State() after wake up, because the value of haldata->eRFPowerState */
+ /* is the same as eRfOff, we should change it to eRfOn after we config RF parameters. */
+ /* Added by tynli. 2010.03.30. */
+ pwrctrlpriv->rf_pwrstate = rf_on;
+
+ /* enable Tx report. */
+ rtw_write8(Adapter, REG_FWHW_TXQ_CTRL + 1, 0x0F);
- HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_PW_TRACK);
+ /* Suggested by SD1 pisa. Added by tynli. 2011.10.21. */
+ rtw_write8(Adapter, REG_EARLY_MODE_CONTROL + 3, 0x01);/* Pretx_en, for WEP/TKIP SEC */
- ODM_TXPowerTrackingCheck(&haldata->odmpriv);
+ /* tynli_test_tx_report. */
+ rtw_write16(Adapter, REG_TX_RPT_TIME, 0x3DF0);
- HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_LCK);
- PHY_LCCalibrate_8188E(Adapter);
+ /* enable tx DMA to drop the redundate data of packet */
+ rtw_write16(Adapter, REG_TXDMA_OFFSET_CHK, (rtw_read16(Adapter, REG_TXDMA_OFFSET_CHK) | DROP_DATA_EN));
+
+ HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_IQK);
+ /* 2010/08/26 MH Merge from 8192CE. */
+ if (pwrctrlpriv->rf_pwrstate == rf_on) {
+ if (haldata->odmpriv.RFCalibrateInfo.bIQKInitialized) {
+ PHY_IQCalibrate_8188E(Adapter, true);
+ } else {
+ PHY_IQCalibrate_8188E(Adapter, false);
+ haldata->odmpriv.RFCalibrateInfo.bIQKInitialized = true;
}
+
+ HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_PW_TRACK);
+
+ ODM_TXPowerTrackingCheck(&haldata->odmpriv);
+
+ HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_LCK);
+ PHY_LCCalibrate_8188E(Adapter);
}
/* HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_INIT_PABIAS); */
@@ -1013,17 +919,8 @@ static void CardDisableRTL8188EU(struct adapter *Adapter)
haldata->bMacPwrCtrlOn = false;
Adapter->bFWReady = false;
}
-static void rtl8192cu_hw_power_down(struct adapter *adapt)
-{
- /* 2010/-8/09 MH For power down module, we need to enable register block contrl reg at 0x1c. */
- /* Then enable power down control bit of register 0x04 BIT(4) and BIT(15) as 1. */
- /* Enable register area 0x0-0xc. */
- rtw_write8(adapt, REG_RSV_CTRL, 0x0);
- rtw_write16(adapt, REG_APS_FSMCO, 0x8812);
-}
-
-static u32 rtl8188eu_hal_deinit(struct adapter *Adapter)
+u32 rtl8188eu_hal_deinit(struct adapter *Adapter)
{
DBG_88E("==> %s\n", __func__);
@@ -1034,29 +931,20 @@ static u32 rtl8188eu_hal_deinit(struct adapter *Adapter)
DBG_88E("bkeepfwalive(%x)\n", Adapter->pwrctrlpriv.bkeepfwalive);
if (Adapter->pwrctrlpriv.bkeepfwalive) {
_ps_close_RF(Adapter);
- if ((Adapter->pwrctrlpriv.bHWPwrPindetect) && (Adapter->pwrctrlpriv.bHWPowerdown))
- rtl8192cu_hw_power_down(Adapter);
} else {
if (Adapter->hw_init_completed) {
CardDisableRTL8188EU(Adapter);
-
- if ((Adapter->pwrctrlpriv.bHWPwrPindetect) && (Adapter->pwrctrlpriv.bHWPowerdown))
- rtl8192cu_hw_power_down(Adapter);
}
}
return _SUCCESS;
}
-static unsigned int rtl8188eu_inirp_init(struct adapter *Adapter)
+unsigned int rtl8188eu_inirp_init(struct adapter *Adapter)
{
u8 i;
struct recv_buf *precvbuf;
uint status;
- struct intf_hdl *pintfhdl = &Adapter->iopriv.intf;
struct recv_priv *precvpriv = &Adapter->recvpriv;
- u32 (*_read_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *pmem);
-
- _read_port = pintfhdl->io_ops._read_port;
status = _SUCCESS;
@@ -1065,7 +953,7 @@ static unsigned int rtl8188eu_inirp_init(struct adapter *Adapter)
/* issue Rx irp to receive data */
precvbuf = (struct recv_buf *)precvpriv->precv_buf;
for (i = 0; i < NR_RECVBUFF; i++) {
- if (!_read_port(pintfhdl, precvpriv->ff_hwaddr, 0, (unsigned char *)precvbuf)) {
+ if (!rtw_read_port(Adapter, precvpriv->ff_hwaddr, 0, (unsigned char *)precvbuf)) {
status = _FAIL;
goto exit;
}
@@ -1078,13 +966,6 @@ exit:
return status;
}
-static unsigned int rtl8188eu_inirp_deinit(struct adapter *Adapter)
-{
- rtw_read_port_cancel(Adapter);
-
- return _SUCCESS;
-}
-
/* */
/* */
/* EEPROM/EFUSE Content Parsing */
@@ -1096,7 +977,6 @@ static void _ReadLEDSetting(struct adapter *Adapter, u8 *PROMContent, bool Autol
struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
pledpriv->bRegUseLed = true;
- pledpriv->LedStrategy = SW_LED_MODE1;
haldata->bLedOpenDrain = true;/* Support Open-drain arrangement for controlling the LED. */
}
@@ -1129,7 +1009,7 @@ static void Hal_EfuseParseMACAddr_8188EU(struct adapter *adapt, u8 *hwinfo, bool
{
u16 i;
u8 sMacAddr[6] = {0x00, 0xE0, 0x4C, 0x81, 0x88, 0x02};
- struct eeprom_priv *eeprom = GET_EEPROM_EFUSE_PRIV(adapt);
+ struct eeprom_priv *eeprom = &adapt->eeprompriv;
if (AutoLoadFail) {
for (i = 0; i < 6; i++)
@@ -1140,16 +1020,12 @@ static void Hal_EfuseParseMACAddr_8188EU(struct adapter *adapt, u8 *hwinfo, bool
}
}
-static void Hal_CustomizeByCustomerID_8188EU(struct adapter *adapt)
-{
-}
-
static void
readAdapterInfo_8188EU(
struct adapter *adapt
)
{
- struct eeprom_priv *eeprom = GET_EEPROM_EFUSE_PRIV(adapt);
+ struct eeprom_priv *eeprom = &adapt->eeprompriv;
/* parse the eeprom/efuse content */
Hal_EfuseParseIDCode88E(adapt, eeprom->efuse_eeprom_data);
@@ -1166,12 +1042,6 @@ readAdapterInfo_8188EU(
Hal_EfuseParseBoardType88E(adapt, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
Hal_ReadThermalMeter_88E(adapt, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
- /* */
- /* The following part initialize some vars by PG info. */
- /* */
- Hal_InitChannelPlan(adapt);
- Hal_CustomizeByCustomerID_8188EU(adapt);
-
_ReadLEDSetting(adapt, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
}
@@ -1179,7 +1049,7 @@ static void _ReadPROMContent(
struct adapter *Adapter
)
{
- struct eeprom_priv *eeprom = GET_EEPROM_EFUSE_PRIV(Adapter);
+ struct eeprom_priv *eeprom = &Adapter->eeprompriv;
u8 eeValue;
/* check system boot selection */
@@ -1203,19 +1073,13 @@ static void _ReadRFType(struct adapter *Adapter)
static int _ReadAdapterInfo8188EU(struct adapter *Adapter)
{
- u32 start = jiffies;
-
- MSG_88E("====> %s\n", __func__);
-
_ReadRFType(Adapter);/* rf_chip -> _InitRFType() */
_ReadPROMContent(Adapter);
- MSG_88E("<==== %s in %d ms\n", __func__, rtw_get_passing_time_ms(start));
-
return _SUCCESS;
}
-static void ReadAdapterInfo8188EU(struct adapter *Adapter)
+void ReadAdapterInfo8188EU(struct adapter *Adapter)
{
/* Read EEPROM size before call any EEPROM function */
Adapter->EepromAddressSize = GetEEPROMSize8188E(Adapter);
@@ -1223,11 +1087,6 @@ static void ReadAdapterInfo8188EU(struct adapter *Adapter)
_ReadAdapterInfo8188EU(Adapter);
}
-#define GPIO_DEBUG_PORT_NUM 0
-static void rtl8192cu_trigger_gpio_0(struct adapter *adapt)
-{
-}
-
static void ResumeTxBeacon(struct adapter *adapt)
{
struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
@@ -1349,7 +1208,7 @@ static void hw_var_set_bcn_func(struct adapter *Adapter, u8 variable, u8 *val)
rtw_write8(Adapter, bcn_ctrl_reg, rtw_read8(Adapter, bcn_ctrl_reg) & (~(EN_BCN_FUNCTION | EN_TXBCN_RPT)));
}
-static void SetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
+void SetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
{
struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
struct dm_priv *pdmpriv = &haldata->dmpriv;
@@ -1532,7 +1391,7 @@ static void SetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
}
if (check_fwstate(pmlmepriv, WIFI_STATION_STATE))
- RetryLimit = (haldata->CustomerID == RT_CID_CCX) ? 7 : 48;
+ RetryLimit = 48;
else /* Ad-hoc Mode */
RetryLimit = 0x7;
} else if (type == 1) {
@@ -1610,7 +1469,6 @@ static void SetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
break;
case HW_VAR_DM_FUNC_SET:
if (*((u32 *)val) == DYNAMIC_ALL_FUNC_ENABLE) {
- pdmpriv->DMFlag = pdmpriv->InitDMFlag;
podmpriv->SupportAbility = pdmpriv->InitODMFlag;
} else {
podmpriv->SupportAbility |= *((u32 *)val);
@@ -1757,15 +1615,13 @@ static void SetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
rtw_write8(Adapter, REG_RXDMA_AGG_PG_TH, threshold);
}
break;
- case HW_VAR_SET_RPWM:
- break;
case HW_VAR_H2C_FW_PWRMODE:
{
u8 psmode = (*(u8 *)val);
/* Forece leave RF low power mode for 1T1R to prevent conficting setting in Fw power */
/* saving sequence. 2010.06.07. Added by tynli. Suggested by SD3 yschang. */
- if ((psmode != PS_MODE_ACTIVE) && (!IS_92C_SERIAL(haldata->VersionID)))
+ if (psmode != PS_MODE_ACTIVE)
ODM_RF_Saving(podmpriv, true);
rtl8188e_set_FwPwrMode_cmd(Adapter, psmode);
}
@@ -1776,14 +1632,12 @@ static void SetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
rtl8188e_set_FwJoinBssReport_cmd(Adapter, mstatus);
}
break;
-#ifdef CONFIG_88EU_P2P
case HW_VAR_H2C_FW_P2P_PS_OFFLOAD:
{
u8 p2p_ps_state = (*(u8 *)val);
rtl8188e_set_p2p_ps_offload_cmd(Adapter, p2p_ps_state);
}
break;
-#endif
case HW_VAR_INITIAL_GAIN:
{
struct rtw_dig *pDigTable = &podmpriv->DM_DigTable;
@@ -1797,9 +1651,6 @@ static void SetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
}
}
break;
- case HW_VAR_TRIGGER_GPIO_0:
- rtl8192cu_trigger_gpio_0(Adapter);
- break;
case HW_VAR_RPT_TIMER_SETTING:
{
u16 min_rpt_time = (*(u16 *)val);
@@ -1850,8 +1701,6 @@ static void SetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
}
}
break;
- case HW_VAR_CHECK_TXBUF:
- break;
case HW_VAR_APFM_ON_MAC:
haldata->bMacPwrCtrlOn = *val;
DBG_88E("%s: bMacPwrCtrlOn=%d\n", __func__, haldata->bMacPwrCtrlOn);
@@ -1876,7 +1725,7 @@ static void SetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
}
-static void GetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
+void GetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
{
struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
struct odm_dm_struct *podmpriv = &haldata->odmpriv;
@@ -1934,16 +1783,8 @@ static void GetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
}
-/* */
-/* Description: */
-/* Query setting of specified variable. */
-/* */
-static u8
-GetHalDefVar8188EUsb(
- struct adapter *Adapter,
- enum hal_def_variable eVariable,
- void *pValue
- )
+/* Query setting of specified variable. */
+u8 GetHalDefVar8188EUsb(struct adapter *Adapter, enum hal_def_variable eVariable, void *pValue)
{
struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
u8 bResult = _SUCCESS;
@@ -2027,11 +1868,8 @@ GetHalDefVar8188EUsb(
return bResult;
}
-/* */
-/* Description: */
-/* Change default setting of specified variable. */
-/* */
-static u8 SetHalDefVar8188EUsb(struct adapter *Adapter, enum hal_def_variable eVariable, void *pValue)
+/* Change default setting of specified variable. */
+u8 SetHalDefVar8188EUsb(struct adapter *Adapter, enum hal_def_variable eVariable, void *pValue)
{
struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
u8 bResult = _SUCCESS;
@@ -2079,7 +1917,7 @@ static u8 SetHalDefVar8188EUsb(struct adapter *Adapter, enum hal_def_variable eV
return bResult;
}
-static void UpdateHalRAMask8188EUsb(struct adapter *adapt, u32 mac_id, u8 rssi_level)
+void UpdateHalRAMask8188EUsb(struct adapter *adapt, u32 mac_id, u8 rssi_level)
{
u8 init_rate = 0;
u8 networkType, raid;
@@ -2162,7 +2000,7 @@ static void UpdateHalRAMask8188EUsb(struct adapter *adapt, u32 mac_id, u8 rssi_l
psta->init_rate = init_rate;
}
-static void SetBeaconRelatedRegisters8188EUsb(struct adapter *adapt)
+void SetBeaconRelatedRegisters8188EUsb(struct adapter *adapt)
{
u32 value32;
struct mlme_ext_priv *pmlmeext = &adapt->mlmeextpriv;
@@ -2196,7 +2034,7 @@ static void SetBeaconRelatedRegisters8188EUsb(struct adapter *adapt)
rtw_write8(adapt, bcn_ctrl_reg, rtw_read8(adapt, bcn_ctrl_reg) | BIT(1));
}
-static void rtl8188eu_init_default_value(struct adapter *adapt)
+void rtl8188eu_init_default_value(struct adapter *adapt)
{
struct hal_data_8188e *haldata;
struct pwrctrl_priv *pwrctrlpriv;
@@ -2220,52 +2058,10 @@ static void rtl8188eu_init_default_value(struct adapter *adapt)
haldata->odmpriv.RFCalibrateInfo.ThermalValue_HP[i] = 0;
}
-static u8 rtl8188eu_ps_func(struct adapter *Adapter, enum hal_intf_ps_func efunc_id, u8 *val)
-{
- u8 bResult = true;
- return bResult;
-}
-
-void rtl8188eu_set_hal_ops(struct adapter *adapt)
+void rtl8188eu_alloc_haldata(struct adapter *adapt)
{
- struct hal_ops *halfunc = &adapt->HalFunc;
-
adapt->HalData = kzalloc(sizeof(struct hal_data_8188e), GFP_KERNEL);
if (!adapt->HalData)
DBG_88E("cant not alloc memory for HAL DATA\n");
adapt->hal_data_sz = sizeof(struct hal_data_8188e);
-
- halfunc->hal_power_on = rtl8188eu_InitPowerOn;
- halfunc->hal_init = &rtl8188eu_hal_init;
- halfunc->hal_deinit = &rtl8188eu_hal_deinit;
-
- halfunc->inirp_init = &rtl8188eu_inirp_init;
- halfunc->inirp_deinit = &rtl8188eu_inirp_deinit;
-
- halfunc->init_xmit_priv = &rtl8188eu_init_xmit_priv;
-
- halfunc->init_recv_priv = &rtl8188eu_init_recv_priv;
- halfunc->free_recv_priv = &rtl8188eu_free_recv_priv;
- halfunc->InitSwLeds = &rtl8188eu_InitSwLeds;
- halfunc->DeInitSwLeds = &rtl8188eu_DeInitSwLeds;
-
- halfunc->init_default_value = &rtl8188eu_init_default_value;
- halfunc->intf_chip_configure = &rtl8188eu_interface_configure;
- halfunc->read_adapter_info = &ReadAdapterInfo8188EU;
-
- halfunc->SetHwRegHandler = &SetHwReg8188EU;
- halfunc->GetHwRegHandler = &GetHwReg8188EU;
- halfunc->GetHalDefVarHandler = &GetHalDefVar8188EUsb;
- halfunc->SetHalDefVarHandler = &SetHalDefVar8188EUsb;
-
- halfunc->UpdateRAMaskHandler = &UpdateHalRAMask8188EUsb;
- halfunc->SetBeaconRelatedRegistersHandler = &SetBeaconRelatedRegisters8188EUsb;
-
- halfunc->hal_xmit = &rtl8188eu_hal_xmit;
- halfunc->mgnt_xmit = &rtl8188eu_mgnt_xmit;
-
- halfunc->interface_ps_func = &rtl8188eu_ps_func;
-
- rtl8188e_set_hal_ops(halfunc);
-
}
diff --git a/drivers/staging/r8188eu/hal/usb_ops_linux.c b/drivers/staging/r8188eu/hal/usb_ops_linux.c
index 0cf69033c529..e4a9350376bf 100644
--- a/drivers/staging/r8188eu/hal/usb_ops_linux.c
+++ b/drivers/staging/r8188eu/hal/usb_ops_linux.c
@@ -8,159 +8,179 @@
#include "../include/recv_osdep.h"
#include "../include/rtl8188e_hal.h"
-static int usbctrl_vendorreq(struct intf_hdl *pintfhdl, u16 value, void *pdata, u16 len, u8 requesttype)
+static int usb_read(struct intf_hdl *intf, u16 value, void *data, u8 size)
{
- struct adapter *adapt = pintfhdl->padapter;
- struct dvobj_priv *dvobjpriv = adapter_to_dvobj(adapt);
+ struct adapter *adapt = intf->padapter;
+ struct dvobj_priv *dvobjpriv = adapter_to_dvobj(adapt);
struct usb_device *udev = dvobjpriv->pusbdev;
- unsigned int pipe;
- int status = 0;
- u8 *pIo_buf;
- int vendorreq_times = 0;
-
- if ((adapt->bSurpriseRemoved) || (adapt->pwrctrlpriv.pnp_bstop_trx)) {
- status = -EPERM;
- goto exit;
+ int status;
+ u8 io_buf[4];
+
+ if (adapt->bSurpriseRemoved || adapt->pwrctrlpriv.pnp_bstop_trx)
+ return -EPERM;
+
+ status = usb_control_msg_recv(udev, 0, REALTEK_USB_VENQT_CMD_REQ,
+ REALTEK_USB_VENQT_READ, value,
+ REALTEK_USB_VENQT_CMD_IDX, io_buf,
+ size, RTW_USB_CONTROL_MSG_TIMEOUT,
+ GFP_KERNEL);
+
+ if (status == -ESHUTDOWN ||
+ status == -ENODEV ||
+ status == -ENOENT) {
+ /*
+ * device or controller has been disabled due to
+ * some problem that could not be worked around,
+ * device or bus doesn’t exist, endpoint does not
+ * exist or is not enabled.
+ */
+ adapt->bSurpriseRemoved = true;
+ return status;
}
- if (len > MAX_VENDOR_REQ_CMD_SIZE) {
- DBG_88E("[%s] Buffer len error ,vendor request failed\n", __func__);
- status = -EINVAL;
- goto exit;
+ if (status < 0) {
+ if (rtw_inc_and_chk_continual_urb_error(dvobjpriv))
+ adapt->bSurpriseRemoved = true;
+
+ return status;
}
- _enter_critical_mutex(&dvobjpriv->usb_vendor_req_mutex, NULL);
+ rtw_reset_continual_urb_error(dvobjpriv);
+ memcpy(data, io_buf, size);
- /* Acquire IO memory for vendorreq */
- pIo_buf = dvobjpriv->usb_vendor_req_buf;
+ return status;
+}
- if (!pIo_buf) {
- DBG_88E("[%s] pIo_buf == NULL\n", __func__);
- status = -ENOMEM;
- goto release_mutex;
+static int usb_write(struct intf_hdl *intf, u16 value, void *data, u8 size)
+{
+ struct adapter *adapt = intf->padapter;
+ struct dvobj_priv *dvobjpriv = adapter_to_dvobj(adapt);
+ struct usb_device *udev = dvobjpriv->pusbdev;
+ int status;
+ u8 io_buf[VENDOR_CMD_MAX_DATA_LEN];
+
+ if (adapt->bSurpriseRemoved || adapt->pwrctrlpriv.pnp_bstop_trx)
+ return -EPERM;
+
+ memcpy(io_buf, data, size);
+ status = usb_control_msg_send(udev, 0, REALTEK_USB_VENQT_CMD_REQ,
+ REALTEK_USB_VENQT_WRITE, value,
+ REALTEK_USB_VENQT_CMD_IDX, io_buf,
+ size, RTW_USB_CONTROL_MSG_TIMEOUT,
+ GFP_KERNEL);
+
+ if (status == -ESHUTDOWN ||
+ status == -ENODEV ||
+ status == -ENOENT) {
+ /*
+ * device or controller has been disabled due to
+ * some problem that could not be worked around,
+ * device or bus doesn’t exist, endpoint does not
+ * exist or is not enabled.
+ */
+ adapt->bSurpriseRemoved = true;
+ return status;
}
- if (requesttype == REALTEK_USB_VENQT_READ)
- pipe = usb_rcvctrlpipe(udev, 0);/* read_in */
- else
- pipe = usb_sndctrlpipe(udev, 0);/* write_out */
-
- while (++vendorreq_times <= MAX_USBCTRL_VENDORREQ_TIMES) {
- if (requesttype == REALTEK_USB_VENQT_READ)
- memset(pIo_buf, 0, len);
- else
- memcpy(pIo_buf, pdata, len);
-
- status = usb_control_msg(udev, pipe, REALTEK_USB_VENQT_CMD_REQ,
- requesttype, value, REALTEK_USB_VENQT_CMD_IDX,
- pIo_buf, len, RTW_USB_CONTROL_MSG_TIMEOUT);
-
- if (status == len) { /* Success this control transfer. */
- rtw_reset_continual_urb_error(dvobjpriv);
- if (requesttype == REALTEK_USB_VENQT_READ)
- memcpy(pdata, pIo_buf, len);
- } else { /* error cases */
- DBG_88E("reg 0x%x, usb %s %u fail, status:%d value=0x%x, vendorreq_times:%d\n",
- value, (requesttype == REALTEK_USB_VENQT_READ) ? "read" : "write",
- len, status, *(u32 *)pdata, vendorreq_times);
-
- if (status < 0) {
- if (status == (-ESHUTDOWN) || status == -ENODEV) {
- adapt->bSurpriseRemoved = true;
- } else {
- struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
- haldata->srestpriv.wifi_error_status = USB_VEN_REQ_CMD_FAIL;
- }
- } else { /* status != len && status >= 0 */
- if (status > 0) {
- if (requesttype == REALTEK_USB_VENQT_READ) {
- /* For Control read transfer, we have to copy the read data from pIo_buf to pdata. */
- memcpy(pdata, pIo_buf, len);
- }
- }
- }
+ if (status < 0) {
+ if (rtw_inc_and_chk_continual_urb_error(dvobjpriv))
+ adapt->bSurpriseRemoved = true;
- if (rtw_inc_and_chk_continual_urb_error(dvobjpriv)) {
- adapt->bSurpriseRemoved = true;
- break;
- }
+ return status;
+ }
- }
+ rtw_reset_continual_urb_error(dvobjpriv);
- /* firmware download is checksumed, don't retry */
- if ((value >= FW_8188E_START_ADDRESS && value <= FW_8188E_END_ADDRESS) || status == len)
- break;
- }
-release_mutex:
- _exit_critical_mutex(&dvobjpriv->usb_vendor_req_mutex, NULL);
-exit:
return status;
}
-static u8 usb_read8(struct intf_hdl *pintfhdl, u32 addr)
+u8 rtw_read8(struct adapter *adapter, u32 addr)
{
- u16 wvalue = (u16)(addr & 0x0000ffff);
+ struct io_priv *io_priv = &adapter->iopriv;
+ struct intf_hdl *intf = &io_priv->intf;
+ u16 value = addr & 0xffff;
u8 data;
- usbctrl_vendorreq(pintfhdl, wvalue, &data, 1, REALTEK_USB_VENQT_READ);
+ usb_read(intf, value, &data, 1);
return data;
}
-static u16 usb_read16(struct intf_hdl *pintfhdl, u32 addr)
+u16 rtw_read16(struct adapter *adapter, u32 addr)
{
- u16 wvalue = (u16)(addr & 0x0000ffff);
- __le32 data;
+ struct io_priv *io_priv = &adapter->iopriv;
+ struct intf_hdl *intf = &io_priv->intf;
+ u16 value = addr & 0xffff;
+ __le16 data;
- usbctrl_vendorreq(pintfhdl, wvalue, &data, 2, REALTEK_USB_VENQT_READ);
+ usb_read(intf, value, &data, 2);
- return (u16)(le32_to_cpu(data) & 0xffff);
+ return le16_to_cpu(data);
}
-static u32 usb_read32(struct intf_hdl *pintfhdl, u32 addr)
+u32 rtw_read32(struct adapter *adapter, u32 addr)
{
- u16 wvalue = (u16)(addr & 0x0000ffff);
+ struct io_priv *io_priv = &adapter->iopriv;
+ struct intf_hdl *intf = &io_priv->intf;
+ u16 value = addr & 0xffff;
__le32 data;
- usbctrl_vendorreq(pintfhdl, wvalue, &data, 4, REALTEK_USB_VENQT_READ);
+ usb_read(intf, value, &data, 4);
return le32_to_cpu(data);
}
-static int usb_write8(struct intf_hdl *pintfhdl, u32 addr, u8 val)
+int rtw_write8(struct adapter *adapter, u32 addr, u8 val)
{
- u16 wvalue = (u16)(addr & 0x0000ffff);
+ struct io_priv *io_priv = &adapter->iopriv;
+ struct intf_hdl *intf = &io_priv->intf;
+ u16 value = addr & 0xffff;
+ int ret;
- return usbctrl_vendorreq(pintfhdl, wvalue, &val, 1, REALTEK_USB_VENQT_WRITE);
+ ret = usb_write(intf, value, &val, 1);
+
+ return RTW_STATUS_CODE(ret);
}
-static int usb_write16(struct intf_hdl *pintfhdl, u32 addr, u16 val)
+int rtw_write16(struct adapter *adapter, u32 addr, u16 val)
{
- u16 wvalue = (u16)(addr & 0x0000ffff);
- __le32 data = cpu_to_le32(val & 0x0000ffff);
+ struct io_priv *io_priv = &adapter->iopriv;
+ struct intf_hdl *intf = &io_priv->intf;
+ u16 value = addr & 0xffff;
+ __le16 data = cpu_to_le16(val);
+ int ret;
+
+ ret = usb_write(intf, value, &data, 2);
- return usbctrl_vendorreq(pintfhdl, wvalue, &data, 2, REALTEK_USB_VENQT_WRITE);
+ return RTW_STATUS_CODE(ret);
}
-static int usb_write32(struct intf_hdl *pintfhdl, u32 addr, u32 val)
+int rtw_write32(struct adapter *adapter, u32 addr, u32 val)
{
- u16 wvalue = (u16)(addr & 0x0000ffff);
+ struct io_priv *io_priv = &adapter->iopriv;
+ struct intf_hdl *intf = &io_priv->intf;
+ u16 value = addr & 0xffff;
__le32 data = cpu_to_le32(val);
+ int ret;
- return usbctrl_vendorreq(pintfhdl, wvalue, &data, 4, REALTEK_USB_VENQT_WRITE);
+ ret = usb_write(intf, value, &data, 4);
+
+ return RTW_STATUS_CODE(ret);
}
-static int usb_writeN(struct intf_hdl *pintfhdl, u32 addr, u32 length, u8 *pdata)
+int rtw_writeN(struct adapter *adapter, u32 addr, u32 length, u8 *data)
{
- u16 wvalue = (u16)(addr & 0x0000ffff);
- u8 buf[VENDOR_CMD_MAX_DATA_LEN] = {0};
+ struct io_priv *io_priv = &adapter->iopriv;
+ struct intf_hdl *intf = &io_priv->intf;
+ u16 value = addr & 0xffff;
+ int ret;
if (length > VENDOR_CMD_MAX_DATA_LEN)
- return -EINVAL;
+ return _FAIL;
- memcpy(buf, pdata, length);
+ ret = usb_write(intf, value, data, length);
- return usbctrl_vendorreq(pintfhdl, wvalue, buf, (length & 0xffff), REALTEK_USB_VENQT_WRITE);
+ return RTW_STATUS_CODE(ret);
}
static void interrupt_handler_8188eu(struct adapter *adapt, u16 pkt_len, u8 *pbuf)
@@ -415,10 +435,6 @@ static void usb_read_port_complete(struct urb *purb, struct pt_regs *regs)
break;
case -EPROTO:
case -EOVERFLOW:
- {
- struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
- haldata->srestpriv.wifi_error_status = USB_READ_PORT_FAIL;
- }
precvbuf->reuse = true;
rtw_read_port(adapt, precvpriv->ff_hwaddr, 0, (unsigned char *)precvbuf);
break;
@@ -431,11 +447,10 @@ static void usb_read_port_complete(struct urb *purb, struct pt_regs *regs)
}
}
-static u32 usb_read_port(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *rmem)
+u32 rtw_read_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *rmem)
{
struct urb *purb = NULL;
struct recv_buf *precvbuf = (struct recv_buf *)rmem;
- struct adapter *adapter = pintfhdl->padapter;
struct dvobj_priv *pdvobj = adapter_to_dvobj(adapter);
struct recv_priv *precvpriv = &adapter->recvpriv;
struct usb_device *pusbd = pdvobj->pusbdev;
@@ -458,7 +473,7 @@ static u32 usb_read_port(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *rmem)
precvbuf->reuse = true;
}
- rtl8188eu_init_recvbuf(adapter, precvbuf);
+ rtl8188eu_init_recvbuf(precvbuf);
/* re-assign for linux based on skb */
if (!precvbuf->reuse || !precvbuf->pskb) {
@@ -533,30 +548,3 @@ void rtl8188eu_xmit_tasklet(unsigned long priv)
break;
}
}
-
-void rtl8188eu_set_intf_ops(struct _io_ops *pops)
-{
-
- memset((u8 *)pops, 0, sizeof(struct _io_ops));
- pops->_read8 = &usb_read8;
- pops->_read16 = &usb_read16;
- pops->_read32 = &usb_read32;
- pops->_read_mem = &usb_read_mem;
- pops->_read_port = &usb_read_port;
- pops->_write8 = &usb_write8;
- pops->_write16 = &usb_write16;
- pops->_write32 = &usb_write32;
- pops->_writeN = &usb_writeN;
- pops->_write_mem = &usb_write_mem;
- pops->_write_port = &usb_write_port;
- pops->_read_port_cancel = &usb_read_port_cancel;
- pops->_write_port_cancel = &usb_write_port_cancel;
-
-}
-
-void rtl8188eu_set_hw_type(struct adapter *adapt)
-{
- adapt->chip_type = RTL8188E;
- adapt->HardwareType = HARDWARE_TYPE_RTL8188EU;
- DBG_88E("CHIP TYPE: RTL8188E\n");
-}
diff --git a/drivers/staging/r8188eu/include/Hal8188EPhyCfg.h b/drivers/staging/r8188eu/include/Hal8188EPhyCfg.h
index 4370ec2fa981..6f901ce607e8 100644
--- a/drivers/staging/r8188eu/include/Hal8188EPhyCfg.h
+++ b/drivers/staging/r8188eu/include/Hal8188EPhyCfg.h
@@ -21,23 +21,6 @@
/*--------------------------Define Parameters-------------------------------*/
/*------------------------------Define structure----------------------------*/
-enum sw_chnl_cmd_id {
- CmdID_End,
- CmdID_SetTxPowerLevel,
- CmdID_BBRegWrite10,
- CmdID_WritePortUlong,
- CmdID_WritePortUshort,
- CmdID_WritePortUchar,
- CmdID_RF_WriteReg,
-};
-
-/* 1. Switch channel related */
-struct sw_chnl_cmd {
- enum sw_chnl_cmd_id CmdID;
- u32 Para1;
- u32 Para2;
- u32 msDelay;
-};
enum hw90_block {
HW90_BLOCK_MAC = 0,
@@ -65,24 +48,6 @@ enum rf_radio_path {
* total three groups */
#define CHANNEL_GROUP_MAX_88E 6
-enum wireless_mode {
- WIRELESS_MODE_UNKNOWN = 0x00,
- WIRELESS_MODE_B = BIT(0),
- WIRELESS_MODE_G = BIT(1),
- WIRELESS_MODE_AUTO = BIT(5),
- WIRELESS_MODE_N_24G = BIT(3),
-};
-
-enum phy_rate_tx_offset_area {
- RA_OFFSET_LEGACY_OFDM1,
- RA_OFFSET_LEGACY_OFDM2,
- RA_OFFSET_HT_OFDM1,
- RA_OFFSET_HT_OFDM2,
- RA_OFFSET_HT_OFDM3,
- RA_OFFSET_HT_OFDM4,
- RA_OFFSET_HT_CCK,
-};
-
/* BB/RF related */
enum RF_TYPE_8190P {
RF_TYPE_MIN, /* 0 */
@@ -141,24 +106,6 @@ struct bb_reg_def {
* Path A and B */
};
-struct ant_sel_ofdm {
- u32 r_tx_antenna:4;
- u32 r_ant_l:4;
- u32 r_ant_non_ht:4;
- u32 r_ant_ht1:4;
- u32 r_ant_ht2:4;
- u32 r_ant_ht_s1:4;
- u32 r_ant_non_ht_s1:4;
- u32 OFDM_TXSC:2;
- u32 reserved:2;
-};
-
-struct ant_sel_cck {
- u8 r_cckrx_enable_2:2;
- u8 r_cckrx_enable:2;
- u8 r_ccktx_enable:4;
-};
-
/*------------------------------Define structure----------------------------*/
/*------------------------Export global variable----------------------------*/
@@ -185,21 +132,8 @@ int PHY_MACConfig8188E(struct adapter *adapter);
int PHY_BBConfig8188E(struct adapter *adapter);
int PHY_RFConfig8188E(struct adapter *adapter);
-/* RF config */
-int rtl8188e_PHY_ConfigRFWithParaFile(struct adapter *adapter, u8 *filename,
- enum rf_radio_path rfpath);
-int rtl8188e_PHY_ConfigRFWithHeaderFile(struct adapter *adapter,
- enum rf_radio_path rfpath);
-
-/* Read initi reg value for tx power setting. */
-void rtl8192c_PHY_GetHWRegOriginalValue(struct adapter *adapter);
-
/* BB TX Power R/W */
-void PHY_GetTxPowerLevel8188E(struct adapter *adapter, u32 *powerlevel);
void PHY_SetTxPowerLevel8188E(struct adapter *adapter, u8 channel);
-bool PHY_UpdateTxPowerDbm8188E(struct adapter *adapter, int power);
-
-void PHY_ScanOperationBackup8188E(struct adapter *Adapter, u8 Operation);
/* Switch bandwidth for 8192S */
void PHY_SetBWMode8188E(struct adapter *adapter,
@@ -207,17 +141,6 @@ void PHY_SetBWMode8188E(struct adapter *adapter,
/* channel switch related funciton */
void PHY_SwChnl8188E(struct adapter *adapter, u8 channel);
-/* Call after initialization */
-void ChkFwCmdIoDone(struct adapter *adapter);
-
-/* BB/MAC/RF other monitor API */
-void PHY_SetRFPathSwitch_8188E(struct adapter *adapter, bool main);
-
-void PHY_SwitchEphyParameter(struct adapter *adapter);
-
-void PHY_EnableHostClkReq(struct adapter *adapter);
-
-bool SetAntennaConfig92C(struct adapter *adapter, u8 defaultant);
void storePwrIndexDiffRateOffset(struct adapter *adapter, u32 regaddr,
u32 mask, u32 data);
@@ -232,18 +155,4 @@ void storePwrIndexDiffRateOffset(struct adapter *adapter, u32 regaddr,
#define PHY_SetRFReg(adapt, rfpath, regaddr, bitmask, data) \
rtl8188e_PHY_SetRFReg((adapt), (rfpath), (regaddr), (bitmask), (data))
-#define PHY_SetMacReg PHY_SetBBReg
-
-#define SIC_HW_SUPPORT 0
-
-#define SIC_MAX_POLL_CNT 5
-
-#define SIC_CMD_READY 0
-#define SIC_CMD_WRITE 1
-#define SIC_CMD_READ 2
-
-#define SIC_CMD_REG 0x1EB /* 1byte */
-#define SIC_ADDR_REG 0x1E8 /* 1b9~1ba, 2 bytes */
-#define SIC_DATA_REG 0x1EC /* 1bc~1bf */
-
#endif /* __INC_HAL8192CPHYCFG_H */
diff --git a/drivers/staging/r8188eu/include/Hal8188ERateAdaptive.h b/drivers/staging/r8188eu/include/Hal8188ERateAdaptive.h
index d5ced507a648..20d73ca781e8 100644
--- a/drivers/staging/r8188eu/include/Hal8188ERateAdaptive.h
+++ b/drivers/staging/r8188eu/include/Hal8188ERateAdaptive.h
@@ -37,8 +37,6 @@
LE_BITS_TO_1BYTE(__paddr + 6, 0, 8)
/* End rate adaptive define */
-void ODM_RASupport_Init(struct odm_dm_struct *dm_odm);
-
int ODM_RAInfo_Init_all(struct odm_dm_struct *dm_odm);
int ODM_RAInfo_Init(struct odm_dm_struct *dm_odm, u8 MacID);
diff --git a/drivers/staging/r8188eu/include/HalHWImg8188E_FW.h b/drivers/staging/r8188eu/include/HalHWImg8188E_FW.h
deleted file mode 100644
index 5ddcd283097b..000000000000
--- a/drivers/staging/r8188eu/include/HalHWImg8188E_FW.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#ifndef __INC_FW_8188E_HW_IMG_H
-#define __INC_FW_8188E_HW_IMG_H
-
-/******************************************************************************
-* FW_AP.TXT
-******************************************************************************/
-/******************************************************************************
-* FW_WoWLAN.TXT
-******************************************************************************/
-#define ArrayLength_8188E_FW_WoWLAN 15764
-extern const u8 Array_8188E_FW_WoWLAN[ArrayLength_8188E_FW_WoWLAN];
-
-#endif
diff --git a/drivers/staging/r8188eu/include/HalVerDef.h b/drivers/staging/r8188eu/include/HalVerDef.h
index a0f5bf52e75a..796a44a1e697 100644
--- a/drivers/staging/r8188eu/include/HalVerDef.h
+++ b/drivers/staging/r8188eu/include/HalVerDef.h
@@ -3,20 +3,6 @@
#ifndef __HAL_VERSION_DEF_H__
#define __HAL_VERSION_DEF_H__
-enum HAL_IC_TYPE {
- CHIP_8192S = 0,
- CHIP_8188C = 1,
- CHIP_8192C = 2,
- CHIP_8192D = 3,
- CHIP_8723A = 4,
- CHIP_8188E = 5,
- CHIP_8881A = 6,
- CHIP_8812A = 7,
- CHIP_8821A = 8,
- CHIP_8723B = 9,
- CHIP_8192E = 10,
-};
-
enum HAL_CHIP_TYPE {
TEST_CHIP = 0,
NORMAL_CHIP = 1,
@@ -50,7 +36,6 @@ enum HAL_RF_TYPE {
};
struct HAL_VERSION {
- enum HAL_IC_TYPE ICType;
enum HAL_CHIP_TYPE ChipType;
enum HAL_CUT_VERSION CUTVersion;
enum HAL_VENDOR VendorType;
@@ -59,9 +44,7 @@ struct HAL_VERSION {
};
/* Get element */
-#define GET_CVID_IC_TYPE(version) (((version).ICType))
#define GET_CVID_CHIP_TYPE(version) (((version).ChipType))
-#define GET_CVID_RF_TYPE(version) (((version).RFType))
#define GET_CVID_MANUFACTUER(version) (((version).VendorType))
#define GET_CVID_CUT_VERSION(version) (((version).CUTVersion))
#define GET_CVID_ROM_VERSION(version) (((version).ROMVer) & ROM_VERSION_MASK)
@@ -69,17 +52,6 @@ struct HAL_VERSION {
/* Common Macro. -- */
/* HAL_VERSION VersionID */
-/* HAL_IC_TYPE_E */
-#define IS_81XXC(version) \
- (((GET_CVID_IC_TYPE(version) == CHIP_8192C) || \
- (GET_CVID_IC_TYPE(version) == CHIP_8188C)) ? true : false)
-#define IS_8723_SERIES(version) \
- ((GET_CVID_IC_TYPE(version) == CHIP_8723A) ? true : false)
-#define IS_92D(version) \
- ((GET_CVID_IC_TYPE(version) == CHIP_8192D) ? true : false)
-#define IS_8188E(version) \
- ((GET_CVID_IC_TYPE(version) == CHIP_8188E) ? true : false)
-
/* HAL_CHIP_TYPE_E */
#define IS_TEST_CHIP(version) \
((GET_CVID_CHIP_TYPE(version) == TEST_CHIP) ? true : false)
@@ -104,46 +76,4 @@ struct HAL_VERSION {
#define IS_CHIP_VENDOR_UMC(version) \
((GET_CVID_MANUFACTUER(version) == CHIP_VENDOR_UMC) ? true : false)
-/* HAL_RF_TYPE_E */
-#define IS_1T1R(version) \
- ((GET_CVID_RF_TYPE(version) == RF_TYPE_1T1R) ? true : false)
-#define IS_1T2R(version) \
- ((GET_CVID_RF_TYPE(version) == RF_TYPE_1T2R) ? true : false)
-#define IS_2T2R(version) \
- ((GET_CVID_RF_TYPE(version) == RF_TYPE_2T2R) ? true : false)
-
-/* Chip version Macro. -- */
-#define IS_81XXC_TEST_CHIP(version) \
- ((IS_81XXC(version) && (!IS_NORMAL_CHIP(version))) ? true : false)
-
-#define IS_92C_SERIAL(version) \
- ((IS_81XXC(version) && IS_2T2R(version)) ? true : false)
-#define IS_81xxC_VENDOR_UMC_A_CUT(version) \
- (IS_81XXC(version) ? (IS_CHIP_VENDOR_UMC(version) ? \
- (IS_A_CUT(version) ? true : false) : false) : false)
-#define IS_81xxC_VENDOR_UMC_B_CUT(version) \
- (IS_81XXC(version) ? (IS_CHIP_VENDOR_UMC(version) ? \
- (IS_B_CUT(version) ? true : false) : false) : false)
-#define IS_81xxC_VENDOR_UMC_C_CUT(version) \
- (IS_81XXC(version) ? (IS_CHIP_VENDOR_UMC(version) ? \
- (IS_C_CUT(version) ? true : false) : false) : false)
-
-#define IS_NORMAL_CHIP92D(version) \
- ((IS_92D(version)) ? \
- ((GET_CVID_CHIP_TYPE(version) == NORMAL_CHIP) ? true : false) : false)
-
-#define IS_92D_SINGLEPHY(version) \
- ((IS_92D(version)) ? (IS_2T2R(version) ? true : false) : false)
-#define IS_92D_C_CUT(version) \
- ((IS_92D(version)) ? (IS_C_CUT(version) ? true : false) : false)
-#define IS_92D_D_CUT(version) \
- ((IS_92D(version)) ? (IS_D_CUT(version) ? true : false) : false)
-#define IS_92D_E_CUT(version) \
- ((IS_92D(version)) ? (IS_E_CUT(version) ? true : false) : false)
-
-#define IS_8723A_A_CUT(version) \
- ((IS_8723_SERIES(version)) ? (IS_A_CUT(version) ? true : false) : false)
-#define IS_8723A_B_CUT(version) \
- ((IS_8723_SERIES(version)) ? (IS_B_CUT(version) ? true : false) : false)
-
#endif
diff --git a/drivers/staging/r8188eu/include/drv_types.h b/drivers/staging/r8188eu/include/drv_types.h
index 04f4224c11de..3e4928320f17 100644
--- a/drivers/staging/r8188eu/include/drv_types.h
+++ b/drivers/staging/r8188eu/include/drv_types.h
@@ -11,8 +11,6 @@
#define __DRV_TYPES_H__
#define DRV_NAME "r8188eu"
-#define CONFIG_88EU_AP_MODE 1
-#define CONFIG_88EU_P2P 1
#include "osdep_service.h"
#include "wlan_bssdef.h"
@@ -35,24 +33,10 @@
#include "rtw_mlme_ext.h"
#include "rtw_p2p.h"
#include "rtw_ap.h"
-#include "rtw_mp.h"
#include "rtw_br_ext.h"
#define DRIVERVERSION "v4.1.4_6773.20130222"
-#define SPEC_DEV_ID_NONE BIT(0)
-#define SPEC_DEV_ID_DISABLE_HT BIT(1)
-#define SPEC_DEV_ID_ENABLE_PS BIT(2)
-#define SPEC_DEV_ID_RF_CONFIG_1T1R BIT(3)
-#define SPEC_DEV_ID_RF_CONFIG_2T2R BIT(4)
-#define SPEC_DEV_ID_ASSIGN_IFNAME BIT(5)
-
-struct specific_device_id {
- u32 flags;
- u16 idVendor;
- u16 idProduct;
-};
-
struct registry_priv {
u8 chip_version;
u8 rfintfs;
@@ -78,7 +62,6 @@ struct registry_priv {
u8 short_retry_lmt;
u16 busy_thresh;
u8 ack_policy;
- u8 mp_mode;
u8 software_encrypt;
u8 software_decrypt;
u8 acm_method;
@@ -172,9 +155,6 @@ struct dvobj_priv {
struct semaphore usb_suspend_sema;
struct mutex usb_vendor_req_mutex;
- u8 *usb_alloc_vendor_req_buf;
- u8 *usb_vendor_req_buf;
-
struct usb_interface *pusbintf;
struct usb_device *pusbdev;
@@ -212,9 +192,6 @@ struct adapter {
* replace module. */
int pid[3];/* process id from UI, 0:wps, 1:hostapd, 2:dhcpcd */
int bDongle;/* build-in module or external dongle */
- u16 chip_type;
- u16 HardwareType;
- u16 interface_type;/* USB,SDIO,SPI,PCI */
struct dvobj_priv *dvobj;
struct mlme_priv mlmepriv;
@@ -230,17 +207,11 @@ struct adapter {
struct pwrctrl_priv pwrctrlpriv;
struct eeprom_priv eeprompriv;
struct led_priv ledpriv;
- struct mp_priv mppriv;
-
-#ifdef CONFIG_88EU_AP_MODE
struct hostapd_priv *phostapdpriv;
-#endif
-
struct wifidirect_info wdinfo;
void *HalData;
u32 hal_data_sz;
- struct hal_ops HalFunc;
s32 bDriverStopped;
s32 bSurpriseRemoved;
@@ -286,11 +257,9 @@ struct adapter {
/* The driver will show up the desired channel number
* when this flag is 1. */
u8 bNotifyChannelChange;
-#ifdef CONFIG_88EU_P2P
/* The driver will show the current P2P status when the
* upper application reads it. */
u8 bShowGetP2PState;
-#endif
struct adapter *pbuddy_adapter;
struct mutex *hw_init_mutex;
@@ -298,11 +267,11 @@ struct adapter {
spinlock_t br_ext_lock;
struct nat25_network_db_entry *nethash[NAT25_HASH_SIZE];
int pppoe_connection_in_progress;
- unsigned char pppoe_addr[MACADDRLEN];
- unsigned char scdb_mac[MACADDRLEN];
+ unsigned char pppoe_addr[ETH_ALEN];
+ unsigned char scdb_mac[ETH_ALEN];
unsigned char scdb_ip[4];
struct nat25_network_db_entry *scdb_entry;
- unsigned char br_mac[MACADDRLEN];
+ unsigned char br_mac[ETH_ALEN];
unsigned char br_ip[4];
struct br_ext_info ethBrExtInfo;
diff --git a/drivers/staging/r8188eu/include/hal_intf.h b/drivers/staging/r8188eu/include/hal_intf.h
index fa252540e596..d777ad9071e2 100644
--- a/drivers/staging/r8188eu/include/hal_intf.h
+++ b/drivers/staging/r8188eu/include/hal_intf.h
@@ -8,23 +8,6 @@
#include "drv_types.h"
#include "Hal8188EPhyCfg.h"
-enum RTL871X_HCI_TYPE {
- RTW_PCIE = BIT(0),
- RTW_USB = BIT(1),
- RTW_SDIO = BIT(2),
- RTW_GSPI = BIT(3),
-};
-
-enum _CHIP_TYPE {
- NULL_CHIP_TYPE,
- RTL8712_8188S_8191S_8192S,
- RTL8188C_8192C,
- RTL8192D,
- RTL8723A,
- RTL8188E,
- MAX_CHIP_TYPE
-};
-
enum hw_variables {
HW_VAR_MEDIA_STATUS,
HW_VAR_MEDIA_STATUS1,
@@ -63,7 +46,6 @@ enum hw_variables {
HW_VAR_AMPDU_MIN_SPACE,
HW_VAR_AMPDU_FACTOR,
HW_VAR_RXDMA_AGG_PG_TH,
- HW_VAR_SET_RPWM,
HW_VAR_H2C_FW_PWRMODE,
HW_VAR_H2C_FW_JOINBSSRPT,
HW_VAR_FWLPS_RF_ON,
@@ -73,7 +55,6 @@ enum hw_variables {
HW_VAR_TDLS_RS_RCR,
HW_VAR_TDLS_DONE_CH_SEN,
HW_VAR_INITIAL_GAIN,
- HW_VAR_TRIGGER_GPIO_0,
HW_VAR_BT_SET_COEXIST,
HW_VAR_BT_ISSUE_DELBA,
HW_VAR_CURRENT_ANTENNA,
@@ -85,7 +66,6 @@ enum hw_variables {
HW_VAR_EFUSE_BT_USAGE,
HW_VAR_EFUSE_BT_BYTES,
HW_VAR_FIFO_CLEARN_UP,
- HW_VAR_CHECK_TXBUF,
HW_VAR_APFM_ON_MAC, /* Auto FSM to Turn On, include clock, isolation,
* power control for MAC only */
/* The valid upper nav range for the HW updating, if the true value is
@@ -121,290 +101,62 @@ enum hal_odm_variable {
HAL_ODM_WIFI_DISPLAY_STATE,
};
-enum hal_intf_ps_func {
- HAL_USB_SELECT_SUSPEND,
- HAL_MAX_ID,
-};
-
typedef s32 (*c2h_id_filter)(u8 id);
-struct hal_ops {
- u32 (*hal_power_on)(struct adapter *padapter);
- u32 (*hal_init)(struct adapter *padapter);
- u32 (*hal_deinit)(struct adapter *padapter);
-
- void (*free_hal_data)(struct adapter *padapter);
-
- u32 (*inirp_init)(struct adapter *padapter);
- u32 (*inirp_deinit)(struct adapter *padapter);
-
- s32 (*init_xmit_priv)(struct adapter *padapter);
-
- s32 (*init_recv_priv)(struct adapter *padapter);
- void (*free_recv_priv)(struct adapter *padapter);
-
- void (*InitSwLeds)(struct adapter *padapter);
- void (*DeInitSwLeds)(struct adapter *padapter);
-
- void (*dm_init)(struct adapter *padapter);
- void (*dm_deinit)(struct adapter *padapter);
- void (*read_chip_version)(struct adapter *padapter);
-
- void (*init_default_value)(struct adapter *padapter);
-
- void (*intf_chip_configure)(struct adapter *padapter);
-
- void (*read_adapter_info)(struct adapter *padapter);
-
- void (*enable_interrupt)(struct adapter *padapter);
- void (*disable_interrupt)(struct adapter *padapter);
- s32 (*interrupt_handler)(struct adapter *padapter);
-
- void (*set_bwmode_handler)(struct adapter *padapter,
- enum ht_channel_width Bandwidth,
- u8 Offset);
- void (*set_channel_handler)(struct adapter *padapter, u8 channel);
-
- void (*hal_dm_watchdog)(struct adapter *padapter);
-
- void (*SetHwRegHandler)(struct adapter *padapter, u8 variable,
- u8 *val);
- void (*GetHwRegHandler)(struct adapter *padapter, u8 variable,
- u8 *val);
-
- u8 (*GetHalDefVarHandler)(struct adapter *padapter,
- enum hal_def_variable eVariable,
- void *pValue);
- u8 (*SetHalDefVarHandler)(struct adapter *padapter,
- enum hal_def_variable eVariable,
- void *pValue);
-
- void (*GetHalODMVarHandler)(struct adapter *padapter,
- enum hal_odm_variable eVariable,
- void *pValue1, bool bSet);
- void (*SetHalODMVarHandler)(struct adapter *padapter,
- enum hal_odm_variable eVariable,
- void *pValue1, bool bSet);
-
- void (*UpdateRAMaskHandler)(struct adapter *padapter,
- u32 mac_id, u8 rssi_level);
- void (*SetBeaconRelatedRegistersHandler)(struct adapter *padapter);
-
- void (*Add_RateATid)(struct adapter *adapter, u32 bitmap, u8 arg,
- u8 rssi_level);
- void (*run_thread)(struct adapter *adapter);
- void (*cancel_thread)(struct adapter *adapter);
-
- u8 (*AntDivBeforeLinkHandler)(struct adapter *adapter);
- void (*AntDivCompareHandler)(struct adapter *adapter,
- struct wlan_bssid_ex *dst,
- struct wlan_bssid_ex *src);
- u8 (*interface_ps_func)(struct adapter *padapter,
- enum hal_intf_ps_func efunc_id, u8 *val);
-
- s32 (*hal_xmit)(struct adapter *padapter,
- struct xmit_frame *pxmitframe);
- s32 (*mgnt_xmit)(struct adapter *padapter,
- struct xmit_frame *pmgntframe);
- s32 (*hal_xmitframe_enqueue)(struct adapter *padapter,
- struct xmit_frame *pxmitframe);
-
- u32 (*read_bbreg)(struct adapter *padapter, u32 RegAddr,
- u32 BitMask);
- void (*write_bbreg)(struct adapter *padapter, u32 RegAddr,
- u32 BitMask, u32 Data);
- u32 (*read_rfreg)(struct adapter *padapter,
- enum rf_radio_path eRFPath, u32 RegAddr,
- u32 BitMask);
- void (*write_rfreg)(struct adapter *padapter,
- enum rf_radio_path eRFPath, u32 RegAddr,
- u32 BitMask, u32 Data);
-
- void (*EfusePowerSwitch)(struct adapter *padapter, u8 bWrite,
- u8 PwrState);
- void (*ReadEFuse)(struct adapter *padapter, u8 efuseType, u16 _offset,
- u16 _size_byte, u8 *pbuf, bool bPseudoTest);
- void (*EFUSEGetEfuseDefinition)(struct adapter *padapter, u8 efuseType,
- u8 type, void *pOut, bool bPseudoTest);
- u16 (*EfuseGetCurrentSize)(struct adapter *padapter, u8 efuseType,
- bool bPseudoTest);
- int (*Efuse_PgPacketRead)(struct adapter *adapter, u8 offset,
- u8 *data, bool bPseudoTest);
- int (*Efuse_PgPacketWrite)(struct adapter *padapter, u8 offset,
- u8 word_en, u8 *data, bool bPseudoTest);
- u8 (*Efuse_WordEnableDataWrite)(struct adapter *padapter,
- u16 efuse_addr, u8 word_en,
- u8 *data, bool bPseudoTest);
- bool (*Efuse_PgPacketWrite_BT)(struct adapter *padapter, u8 offset,
- u8 word_en, u8 *data, bool test);
-
- void (*sreset_init_value)(struct adapter *padapter);
- void (*sreset_reset_value)(struct adapter *padapter);
- void (*silentreset)(struct adapter *padapter);
- void (*sreset_xmit_status_check)(struct adapter *padapter);
- void (*sreset_linked_status_check) (struct adapter *padapter);
- u8 (*sreset_get_wifi_status)(struct adapter *padapter);
-
- int (*IOL_exec_cmds_sync)(struct adapter *padapter,
- struct xmit_frame *frame, u32 max_wait,
- u32 bndy_cnt);
-
- void (*hal_notch_filter)(struct adapter *adapter, bool enable);
- void (*hal_reset_security_engine)(struct adapter *adapter);
- s32 (*c2h_handler)(struct adapter *padapter,
- struct c2h_evt_hdr *c2h_evt);
- c2h_id_filter c2h_id_filter_ccx;
-};
-
-enum rt_eeprom_type {
- EEPROM_93C46,
- EEPROM_93C56,
- EEPROM_BOOT_EFUSE,
-};
-
#define RF_CHANGE_BY_INIT 0
#define RF_CHANGE_BY_IPS BIT(28)
#define RF_CHANGE_BY_PS BIT(29)
#define RF_CHANGE_BY_HW BIT(30)
#define RF_CHANGE_BY_SW BIT(31)
-enum hardware_type {
- HARDWARE_TYPE_RTL8180,
- HARDWARE_TYPE_RTL8185,
- HARDWARE_TYPE_RTL8187,
- HARDWARE_TYPE_RTL8188,
- HARDWARE_TYPE_RTL8190P,
- HARDWARE_TYPE_RTL8192E,
- HARDWARE_TYPE_RTL819xU,
- HARDWARE_TYPE_RTL8192SE,
- HARDWARE_TYPE_RTL8192SU,
- HARDWARE_TYPE_RTL8192CE,
- HARDWARE_TYPE_RTL8192CU,
- HARDWARE_TYPE_RTL8192DE,
- HARDWARE_TYPE_RTL8192DU,
- HARDWARE_TYPE_RTL8723AE,
- HARDWARE_TYPE_RTL8723AU,
- HARDWARE_TYPE_RTL8723AS,
- HARDWARE_TYPE_RTL8188EE,
- HARDWARE_TYPE_RTL8188EU,
- HARDWARE_TYPE_RTL8188ES,
- HARDWARE_TYPE_MAX,
-};
-
-/* RTL8188E Series */
-#define IS_HARDWARE_TYPE_8188EE(_Adapter) \
-(((struct adapter *)_Adapter)->HardwareType == HARDWARE_TYPE_RTL8188EE)
-#define IS_HARDWARE_TYPE_8188EU(_Adapter) \
-(((struct adapter *)_Adapter)->HardwareType == HARDWARE_TYPE_RTL8188EU)
-#define IS_HARDWARE_TYPE_8188ES(_Adapter) \
-(((struct adapter *)_Adapter)->HardwareType == HARDWARE_TYPE_RTL8188ES)
-#define IS_HARDWARE_TYPE_8188E(_Adapter) \
-(IS_HARDWARE_TYPE_8188EE(_Adapter) || IS_HARDWARE_TYPE_8188EU(_Adapter) || \
- IS_HARDWARE_TYPE_8188ES(_Adapter))
-
-#define GET_EEPROM_EFUSE_PRIV(adapter) (&adapter->eeprompriv)
-
#define is_boot_from_eeprom(adapter) (adapter->eeprompriv.EepromOrEfuse)
-void rtl8188eu_set_hal_ops(struct adapter *padapter);
-void rtw_hal_def_value_init(struct adapter *padapter);
-
-void rtw_hal_free_data(struct adapter *padapter);
+void rtl8188eu_alloc_haldata(struct adapter *adapt);
-void rtw_hal_dm_init(struct adapter *padapter);
-void rtw_hal_dm_deinit(struct adapter *padapter);
-void rtw_hal_sw_led_init(struct adapter *padapter);
-void rtw_hal_sw_led_deinit(struct adapter *padapter);
+void rtl8188eu_interface_configure(struct adapter *adapt);
+void ReadAdapterInfo8188EU(struct adapter *Adapter);
+void rtl8188eu_init_default_value(struct adapter *adapt);
+void rtl8188e_SetHalODMVar(struct adapter *Adapter,
+ enum hal_odm_variable eVariable, void *pValue1, bool bSet);
+u32 rtl8188eu_InitPowerOn(struct adapter *adapt);
+void rtl8188e_free_hal_data(struct adapter *padapter);
+void rtl8188e_EfusePowerSwitch(struct adapter *pAdapter, u8 bWrite, u8 PwrState);
+void rtl8188e_ReadEFuse(struct adapter *Adapter, u8 efuseType,
+ u16 _offset, u16 _size_byte, u8 *pbuf,
+ bool bPseudoTest);
+void rtl8188e_EFUSE_GetEfuseDefinition(struct adapter *pAdapter, u8 efuseType,
+ u8 type, void *pOut, bool bPseudoTest);
+u16 rtl8188e_EfuseGetCurrentSize(struct adapter *pAdapter, u8 efuseType, bool bPseudoTest);
+int rtl8188e_Efuse_PgPacketRead(struct adapter *pAdapter, u8 offset, u8 *data, bool bPseudoTest);
+int rtl8188e_Efuse_PgPacketWrite(struct adapter *pAdapter, u8 offset, u8 word_en, u8 *data, bool bPseudoTest);
-u32 rtw_hal_power_on(struct adapter *padapter);
-uint rtw_hal_init(struct adapter *padapter);
-uint rtw_hal_deinit(struct adapter *padapter);
-void rtw_hal_stop(struct adapter *padapter);
-void rtw_hal_set_hwreg(struct adapter *padapter, u8 variable, u8 *val);
-void rtw_hal_get_hwreg(struct adapter *padapter, u8 variable, u8 *val);
+void hal_notch_filter_8188e(struct adapter *adapter, bool enable);
-void rtw_hal_chip_configure(struct adapter *padapter);
-void rtw_hal_read_chip_info(struct adapter *padapter);
-void rtw_hal_read_chip_version(struct adapter *padapter);
+void SetBeaconRelatedRegisters8188EUsb(struct adapter *adapt);
+void UpdateHalRAMask8188EUsb(struct adapter *adapt, u32 mac_id, u8 rssi_level);
-u8 rtw_hal_set_def_var(struct adapter *padapter,
- enum hal_def_variable eVariable, void *pValue);
-u8 rtw_hal_get_def_var(struct adapter *padapter,
- enum hal_def_variable eVariable, void *pValue);
+int rtl8188e_IOL_exec_cmds_sync(struct adapter *adapter,
+ struct xmit_frame *xmit_frame, u32 max_wating_ms, u32 bndy_cnt);
-void rtw_hal_set_odm_var(struct adapter *padapter,
- enum hal_odm_variable eVariable, void *pValue1,
- bool bSet);
-void rtw_hal_get_odm_var(struct adapter *padapter,
- enum hal_odm_variable eVariable,
- void *pValue1, bool bSet);
+u8 SetHalDefVar8188EUsb(struct adapter *Adapter, enum hal_def_variable eVariable, void *pValue);
+u8 GetHalDefVar8188EUsb(struct adapter *Adapter, enum hal_def_variable eVariable, void *pValue);
-void rtw_hal_enable_interrupt(struct adapter *padapter);
-void rtw_hal_disable_interrupt(struct adapter *padapter);
+unsigned int rtl8188eu_inirp_init(struct adapter *Adapter);
-u32 rtw_hal_inirp_init(struct adapter *padapter);
-u32 rtw_hal_inirp_deinit(struct adapter *padapter);
+void SetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val);
+void GetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val);
-u8 rtw_hal_intf_ps_func(struct adapter *padapter,
- enum hal_intf_ps_func efunc_id, u8 *val);
-s32 rtw_hal_xmitframe_enqueue(struct adapter *padapter,
- struct xmit_frame *pxmitframe);
-
-s32 rtw_hal_xmit(struct adapter *padapter, struct xmit_frame *pxmitframe);
-s32 rtw_hal_mgnt_xmit(struct adapter *padapter,
- struct xmit_frame *pmgntframe);
-
-s32 rtw_hal_init_xmit_priv(struct adapter *padapter);
+uint rtw_hal_init(struct adapter *padapter);
+uint rtw_hal_deinit(struct adapter *padapter);
+void rtw_hal_stop(struct adapter *padapter);
-s32 rtw_hal_init_recv_priv(struct adapter *padapter);
-void rtw_hal_free_recv_priv(struct adapter *padapter);
+u32 rtl8188eu_hal_init(struct adapter *Adapter);
+u32 rtl8188eu_hal_deinit(struct adapter *Adapter);
void rtw_hal_update_ra_mask(struct adapter *padapter, u32 mac_id, u8 level);
-void rtw_hal_add_ra_tid(struct adapter *adapt, u32 bitmap, u8 arg, u8 level);
void rtw_hal_clone_data(struct adapter *dst_adapt,
struct adapter *src_adapt);
-void rtw_hal_start_thread(struct adapter *padapter);
-void rtw_hal_stop_thread(struct adapter *padapter);
-
-void rtw_hal_bcn_related_reg_setting(struct adapter *padapter);
-
-u32 rtw_hal_read_bbreg(struct adapter *padapter, u32 RegAddr, u32 BitMask);
-void rtw_hal_write_bbreg(struct adapter *padapter, u32 RegAddr, u32 BitMask,
- u32 Data);
-u32 rtw_hal_read_rfreg(struct adapter *padapter, enum rf_radio_path eRFPath,
- u32 RegAddr, u32 BitMask);
-void rtw_hal_write_rfreg(struct adapter *padapter,
- enum rf_radio_path eRFPath, u32 RegAddr,
- u32 BitMask, u32 Data);
-
-s32 rtw_hal_interrupt_handler(struct adapter *padapter);
-
-void rtw_hal_set_bwmode(struct adapter *padapter,
- enum ht_channel_width Bandwidth, u8 Offset);
-void rtw_hal_set_chan(struct adapter *padapter, u8 channel);
-void rtw_hal_dm_watchdog(struct adapter *padapter);
-
-u8 rtw_hal_antdiv_before_linked(struct adapter *padapter);
-void rtw_hal_antdiv_rssi_compared(struct adapter *padapter,
- struct wlan_bssid_ex *dst,
- struct wlan_bssid_ex *src);
-
-void rtw_hal_sreset_init(struct adapter *padapter);
-void rtw_hal_sreset_reset(struct adapter *padapter);
-void rtw_hal_sreset_reset_value(struct adapter *padapter);
-void rtw_hal_sreset_xmit_status_check(struct adapter *padapter);
-void rtw_hal_sreset_linked_status_check(struct adapter *padapter);
-u8 rtw_hal_sreset_get_wifi_status(struct adapter *padapter);
-
-int rtw_hal_iol_cmd(struct adapter *adapter, struct xmit_frame *xmit_frame,
- u32 max_wating_ms, u32 bndy_cnt);
-
-void rtw_hal_notch_filter(struct adapter *adapter, bool enable);
-void rtw_hal_reset_security_engine(struct adapter *adapter);
-s32 rtw_hal_c2h_handler(struct adapter *adapter,
- struct c2h_evt_hdr *c2h_evt);
-c2h_id_filter rtw_hal_c2h_id_filter_ccx(struct adapter *adapter);
void indicate_wx_scan_complete_event(struct adapter *padapter);
u8 rtw_do_join(struct adapter *padapter);
diff --git a/drivers/staging/r8188eu/include/ieee80211.h b/drivers/staging/r8188eu/include/ieee80211.h
index bc5b030e9c40..6c8206bd5466 100644
--- a/drivers/staging/r8188eu/include/ieee80211.h
+++ b/drivers/staging/r8188eu/include/ieee80211.h
@@ -15,38 +15,8 @@
#define ETH_TYPE_LEN 2
#define PAYLOAD_TYPE_LEN 1
-#ifdef CONFIG_88EU_AP_MODE
-
#define RTL_IOCTL_HOSTAPD (SIOCIWFIRSTPRIV + 28)
-/* RTL871X_IOCTL_HOSTAPD ioctl() cmd: */
-enum {
- RTL871X_HOSTAPD_FLUSH = 1,
- RTL871X_HOSTAPD_ADD_STA = 2,
- RTL871X_HOSTAPD_REMOVE_STA = 3,
- RTL871X_HOSTAPD_GET_INFO_STA = 4,
- /* REMOVED: PRISM2_HOSTAPD_RESET_TXEXC_STA = 5, */
- RTL871X_HOSTAPD_GET_WPAIE_STA = 5,
- RTL871X_SET_ENCRYPTION = 6,
- RTL871X_GET_ENCRYPTION = 7,
- RTL871X_HOSTAPD_SET_FLAGS_STA = 8,
- RTL871X_HOSTAPD_GET_RID = 9,
- RTL871X_HOSTAPD_SET_RID = 10,
- RTL871X_HOSTAPD_SET_ASSOC_AP_ADDR = 11,
- RTL871X_HOSTAPD_SET_GENERIC_ELEMENT = 12,
- RTL871X_HOSTAPD_MLME = 13,
- RTL871X_HOSTAPD_SCAN_REQ = 14,
- RTL871X_HOSTAPD_STA_CLEAR_STATS = 15,
- RTL871X_HOSTAPD_SET_BEACON = 16,
- RTL871X_HOSTAPD_SET_WPS_BEACON = 17,
- RTL871X_HOSTAPD_SET_WPS_PROBE_RESP = 18,
- RTL871X_HOSTAPD_SET_WPS_ASSOC_RESP = 19,
- RTL871X_HOSTAPD_SET_HIDDEN_SSID = 20,
- RTL871X_HOSTAPD_SET_MACADDR_ACL = 21,
- RTL871X_HOSTAPD_ACL_ADD_STA = 22,
- RTL871X_HOSTAPD_ACL_REMOVE_STA = 23,
-};
-
/* STA flags */
#define WLAN_STA_AUTH BIT(0)
#define WLAN_STA_ASSOC BIT(1)
@@ -64,8 +34,6 @@ enum {
#define WLAN_STA_MAYBE_WPS BIT(13)
#define WLAN_STA_NONERP BIT(31)
-#endif
-
#define IEEE_CMD_SET_WPA_PARAM 1
#define IEEE_CMD_SET_WPA_IE 2
#define IEEE_CMD_SET_ENCRYPTION 3
@@ -185,7 +153,7 @@ struct ieee_param {
struct {
u32 len;
u8 reserved[32];
- u8 data[0];
+ u8 data[];
} wpa_ie;
struct {
int command;
@@ -198,9 +166,8 @@ struct ieee_param {
u8 idx;
u8 seq[8]; /* sequence counter (set: RX, get: TX) */
u16 key_len;
- u8 key[0];
+ u8 key[];
} crypt;
-#ifdef CONFIG_88EU_AP_MODE
struct {
u16 aid;
u16 capability;
@@ -210,14 +177,11 @@ struct ieee_param {
} add_sta;
struct {
u8 reserved[2];/* for set max_num_sta */
- u8 buf[0];
+ u8 buf[];
} bcn_ie;
-#endif
-
} u;
};
-#ifdef CONFIG_88EU_AP_MODE
struct ieee_param_ex {
u32 cmd;
u8 sta_addr[ETH_ALEN];
@@ -239,7 +203,6 @@ struct sta_data {
u64 tx_bytes;
u64 tx_drops;
};
-#endif
#define IEEE80211_DATA_LEN 2304
/* Maximum size for the MA-UNITDATA primitive, 802.11 standard section
@@ -925,10 +888,6 @@ struct tx_pending {
#define IEEE_G (1<<2)
#define IEEE_MODE_MASK (IEEE_A|IEEE_B|IEEE_G)
-/* Baron move to ieee80211.c */
-int ieee80211_is_empty_essid(const char *essid, int essid_len);
-int ieee80211_get_hdrlen(u16 fc);
-
/* Action category code */
enum rtw_ieee80211_category {
RTW_WLAN_CATEGORY_SPECTRUM_MGMT = 0,
@@ -1130,26 +1089,7 @@ enum parse_res rtw_ieee802_11_parse_elems(u8 *start, uint len,
u8 *rtw_set_fixed_ie(unsigned char *pbuf, unsigned int len,
unsigned char *source, unsigned int *frlen);
u8 *rtw_set_ie(u8 *pbuf, int index, uint len, u8 *source, uint *frlen);
-
-enum secondary_ch_offset {
- SCN = 0, /* no secondary channel */
- SCA = 1, /* secondary channel above */
- SCB = 3, /* secondary channel below */
-};
-u8 secondary_ch_offset_to_hal_ch_offset(u8 ch_offset);
-u8 hal_ch_offset_to_secondary_ch_offset(u8 ch_offset);
-u8 *rtw_set_ie_ch_switch(u8 *buf, u32 *buf_len, u8 ch_switch_mode,
- u8 new_ch, u8 ch_switch_cnt);
-u8 *rtw_set_ie_secondary_ch_offset(u8 *buf, u32 *buf_len,
- u8 secondary_ch_offset);
-u8 *rtw_set_ie_mesh_ch_switch_parm(u8 *buf, u32 *buf_len, u8 ttl,
- u8 flags, u16 reason, u16 precedence);
-
u8 *rtw_get_ie(u8 *pbuf, int index, int *len, int limit);
-u8 *rtw_get_ie_ex(u8 *in_ie, uint in_len, u8 eid, u8 *oui,
- u8 oui_len, u8 *ie, uint *ielen);
-int rtw_ies_remove_ie(u8 *ies, uint *ies_len, uint offset,
- u8 eid, u8 *oui, u8 oui_len);
void rtw_set_supported_rate(u8 *SupportedRates, uint mode);
@@ -1183,11 +1123,6 @@ u8 *rtw_get_wps_attr_content(u8 *wps_ie, uint wps_ielen, u16 target_attr_id,
for (ie = (void *)buf; (((u8 *)ie) - ((u8 *)buf) + 1) < buf_len; \
ie = (void *)(((u8 *)ie) + *(((u8 *)ie)+1) + 2))
-void dump_ies(u8 *buf, u32 buf_len);
-void dump_wps_ie(u8 *ie, u32 ie_len);
-
-#ifdef CONFIG_88EU_P2P
-void dump_p2p_ie(u8 *ie, u32 ie_len);
u8 *rtw_get_p2p_ie(u8 *in_ie, int in_len, u8 *p2p_ie, uint *p2p_ielen);
u8 *rtw_get_p2p_attr(u8 *p2p_ie, uint p2p_ielen, u8 target_attr_id,
u8 *buf_attr, u32 *len_attr);
@@ -1197,8 +1132,6 @@ u32 rtw_set_p2p_attr_content(u8 *pbuf, u8 attr_id, u16 attr_len,
u8 *pdata_attr);
void rtw_wlan_bssid_ex_remove_p2p_attr(struct wlan_bssid_ex *bss_ex,
u8 attr_id);
-#endif
-
uint rtw_get_rateset_len(u8 *rateset);
struct registry_priv;
@@ -1219,8 +1152,4 @@ void rtw_macaddr_cfg(u8 *mac_addr);
u16 rtw_mcs_rate(u8 rf_type, u8 bw_40MHz, u8 short_GI_20, u8 short_GI_40,
unsigned char *MCS_rate);
-int rtw_action_frame_parse(const u8 *frame, u32 frame_len, u8 *category,
- u8 *action);
-const char *action_public_str(u8 action);
-
#endif /* IEEE80211_H */
diff --git a/drivers/staging/r8188eu/include/ioctl_cfg80211.h b/drivers/staging/r8188eu/include/ioctl_cfg80211.h
index e22481050ef8..738f645f9bbc 100644
--- a/drivers/staging/r8188eu/include/ioctl_cfg80211.h
+++ b/drivers/staging/r8188eu/include/ioctl_cfg80211.h
@@ -61,13 +61,11 @@ void rtw_cfg80211_indicate_disconnect(struct adapter *padapter);
void rtw_cfg80211_indicate_scan_done(struct rtw_wdev_priv *pwdev_priv,
bool aborted);
-#ifdef CONFIG_88EU_AP_MODE
void rtw_cfg80211_indicate_sta_assoc(struct adapter *padapter,
u8 *pmgmt_frame, uint frame_len);
void rtw_cfg80211_indicate_sta_disassoc(struct adapter *padapter,
unsigned char *da,
unsigned short reason);
-#endif /* CONFIG_88EU_AP_MODE */
void rtw_cfg80211_issue_p2p_provision_request(struct adapter *padapter,
const u8 *buf, size_t len);
diff --git a/drivers/staging/r8188eu/include/mp_custom_oid.h b/drivers/staging/r8188eu/include/mp_custom_oid.h
deleted file mode 100644
index 7bcb857c795d..000000000000
--- a/drivers/staging/r8188eu/include/mp_custom_oid.h
+++ /dev/null
@@ -1,333 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#ifndef __CUSTOM_OID_H
-#define __CUSTOM_OID_H
-
-/* by Owen */
-/* 0xFF818000 - 0xFF81802F RTL8180 Mass Production Kit */
-/* 0xFF818500 - 0xFF81850F RTL8185 Setup Utility */
-/* 0xFF818580 - 0xFF81858F RTL8185 Phy Status Utility */
-
-/* */
-
-/* by Owen for Production Kit */
-/* For Production Kit with Agilent Equipments */
-/* in order to make our custom oids hopefully somewhat unique */
-/* we will use 0xFF (indicating implementation specific OID) */
-/* 81(first byte of non zero Realtek unique identifier) */
-/* 80 (second byte of non zero Realtek unique identifier) */
-/* XX (the custom OID number - providing 255 possible custom oids) */
-
-#define OID_RT_PRO_RESET_DUT 0xFF818000
-#define OID_RT_PRO_SET_DATA_RATE 0xFF818001
-#define OID_RT_PRO_START_TEST 0xFF818002
-#define OID_RT_PRO_STOP_TEST 0xFF818003
-#define OID_RT_PRO_SET_PREAMBLE 0xFF818004
-#define OID_RT_PRO_SET_SCRAMBLER 0xFF818005
-#define OID_RT_PRO_SET_FILTER_BB 0xFF818006
-#define OID_RT_PRO_SET_MANUAL_DIVERSITY_BB 0xFF818007
-#define OID_RT_PRO_SET_CHANNEL_DIRECT_CALL 0xFF818008
-#define OID_RT_PRO_SET_SLEEP_MODE_DIRECT_CALL 0xFF818009
-#define OID_RT_PRO_SET_WAKE_MODE_DIRECT_CALL 0xFF81800A
-
-#define OID_RT_PRO_SET_TX_ANTENNA_BB 0xFF81800D
-#define OID_RT_PRO_SET_ANTENNA_BB 0xFF81800E
-#define OID_RT_PRO_SET_CR_SCRAMBLER 0xFF81800F
-#define OID_RT_PRO_SET_CR_NEW_FILTER 0xFF818010
-#define OID_RT_PRO_SET_TX_POWER_CONTROL 0xFF818011
-#define OID_RT_PRO_SET_CR_TX_CONFIG 0xFF818012
-#define OID_RT_PRO_GET_TX_POWER_CONTROL 0xFF818013
-#define OID_RT_PRO_GET_CR_SIGNAL_QUALITY 0xFF818014
-#define OID_RT_PRO_SET_CR_SETPOINT 0xFF818015
-#define OID_RT_PRO_SET_INTEGRATOR 0xFF818016
-#define OID_RT_PRO_SET_SIGNAL_QUALITY 0xFF818017
-#define OID_RT_PRO_GET_INTEGRATOR 0xFF818018
-#define OID_RT_PRO_GET_SIGNAL_QUALITY 0xFF818019
-#define OID_RT_PRO_QUERY_EEPROM_TYPE 0xFF81801A
-#define OID_RT_PRO_WRITE_MAC_ADDRESS 0xFF81801B
-#define OID_RT_PRO_READ_MAC_ADDRESS 0xFF81801C
-#define OID_RT_PRO_WRITE_CIS_DATA 0xFF81801D
-#define OID_RT_PRO_READ_CIS_DATA 0xFF81801E
-#define OID_RT_PRO_WRITE_POWER_CONTROL 0xFF81801F
-#define OID_RT_PRO_READ_POWER_CONTROL 0xFF818020
-#define OID_RT_PRO_WRITE_EEPROM 0xFF818021
-#define OID_RT_PRO_READ_EEPROM 0xFF818022
-#define OID_RT_PRO_RESET_TX_PACKET_SENT 0xFF818023
-#define OID_RT_PRO_QUERY_TX_PACKET_SENT 0xFF818024
-#define OID_RT_PRO_RESET_RX_PACKET_RECEIVED 0xFF818025
-#define OID_RT_PRO_QUERY_RX_PACKET_RECEIVED 0xFF818026
-#define OID_RT_PRO_QUERY_RX_PACKET_CRC32_ERROR 0xFF818027
-#define OID_RT_PRO_QUERY_CURRENT_ADDRESS 0xFF818028
-#define OID_RT_PRO_QUERY_PERMANENT_ADDRESS 0xFF818029
-#define OID_RT_PRO_SET_PHILIPS_RF_PARAMETERS 0xFF81802A
-#define OID_RT_PRO_RECEIVE_PACKET 0xFF81802C
-/* added by Owen on 04/08/03 for Cameo's request */
-#define OID_RT_PRO_WRITE_EEPROM_BYTE 0xFF81802D
-#define OID_RT_PRO_READ_EEPROM_BYTE 0xFF81802E
-#define OID_RT_PRO_SET_MODULATION 0xFF81802F
-/* */
-
-/* Sean */
-#define OID_RT_DRIVER_OPTION 0xFF818080
-#define OID_RT_RF_OFF 0xFF818081
-#define OID_RT_AUTH_STATUS 0xFF818082
-
-/* */
-#define OID_RT_PRO_SET_CONTINUOUS_TX 0xFF81800B
-#define OID_RT_PRO_SET_SINGLE_CARRIER_TX 0xFF81800C
-#define OID_RT_PRO_SET_CARRIER_SUPPRESSION_TX 0xFF81802B
-#define OID_RT_PRO_SET_SINGLE_TONE_TX 0xFF818043
-/* */
-
-/* by Owen for RTL8185 Phy Status Report Utility */
-#define OID_RT_UTILITY_false_ALARM_COUNTERS 0xFF818580
-#define OID_RT_UTILITY_SELECT_DEBUG_MODE 0xFF818581
-#define OID_RT_UTILITY_SELECT_SUBCARRIER_NUMBER 0xFF818582
-#define OID_RT_UTILITY_GET_RSSI_STATUS 0xFF818583
-#define OID_RT_UTILITY_GET_FRAME_DETECTION_STATUS 0xFF818584
-#define OID_RT_UTILITY_GET_AGC_AND_FREQUENCY_OFFSET_ESTIMATION_STATUS \
- 0xFF818585
-#define OID_RT_UTILITY_GET_CHANNEL_ESTIMATION_STATUS 0xFF818586
-/* */
-
-/* by Owen on 03/09/19-03/09/22 for RTL8185 */
-#define OID_RT_WIRELESS_MODE 0xFF818500
-#define OID_RT_SUPPORTED_RATES 0xFF818501
-#define OID_RT_DESIRED_RATES 0xFF818502
-#define OID_RT_WIRELESS_MODE_STARTING_ADHOC 0xFF818503
-/* */
-
-#define OID_RT_GET_CONNECT_STATE 0xFF030001
-#define OID_RT_RESCAN 0xFF030002
-#define OID_RT_SET_KEY_LENGTH 0xFF030003
-#define OID_RT_SET_DEFAULT_KEY_ID 0xFF030004
-
-#define OID_RT_SET_CHANNEL 0xFF010182
-#define OID_RT_SET_SNIFFER_MODE 0xFF010183
-#define OID_RT_GET_SIGNAL_QUALITY 0xFF010184
-#define OID_RT_GET_SMALL_PACKET_CRC 0xFF010185
-#define OID_RT_GET_MIDDLE_PACKET_CRC 0xFF010186
-#define OID_RT_GET_LARGE_PACKET_CRC 0xFF010187
-#define OID_RT_GET_TX_RETRY 0xFF010188
-#define OID_RT_GET_RX_RETRY 0xFF010189
-#define OID_RT_PRO_SET_FW_DIG_STATE 0xFF01018A/* S */
-#define OID_RT_PRO_SET_FW_RA_STATE 0xFF01018B/* S */
-
-#define OID_RT_GET_RX_TOTAL_PACKET 0xFF010190
-#define OID_RT_GET_TX_BEACON_OK 0xFF010191
-#define OID_RT_GET_TX_BEACON_ERR 0xFF010192
-#define OID_RT_GET_RX_ICV_ERR 0xFF010193
-#define OID_RT_SET_ENCRYPTION_ALGORITHM 0xFF010194
-#define OID_RT_SET_NO_AUTO_RESCAN 0xFF010195
-#define OID_RT_GET_PREAMBLE_MODE 0xFF010196
-#define OID_RT_GET_DRIVER_UP_DELTA_TIME 0xFF010197
-#define OID_RT_GET_AP_IP 0xFF010198
-#define OID_RT_GET_CHANNELPLAN 0xFF010199
-#define OID_RT_SET_PREAMBLE_MODE 0xFF01019A
-#define OID_RT_SET_BCN_INTVL 0xFF01019B
-#define OID_RT_GET_RF_VENDER 0xFF01019C
-#define OID_RT_DEDICATE_PROBE 0xFF01019D
-#define OID_RT_PRO_RX_FILTER_PATTERN 0xFF01019E
-
-#define OID_RT_GET_DCST_CURRENT_THRESHOLD 0xFF01019F
-
-#define OID_RT_GET_CCA_ERR 0xFF0101A0
-#define OID_RT_GET_CCA_UPGRADE_THRESHOLD 0xFF0101A1
-#define OID_RT_GET_CCA_FALLBACK_THRESHOLD 0xFF0101A2
-
-#define OID_RT_GET_CCA_UPGRADE_EVALUATE_TIMES 0xFF0101A3
-#define OID_RT_GET_CCA_FALLBACK_EVALUATE_TIMES 0xFF0101A4
-
-/* by Owen on 03/31/03 for Cameo's request */
-#define OID_RT_SET_RATE_ADAPTIVE 0xFF0101A5
-/* */
-#define OID_RT_GET_DCST_EVALUATE_PERIOD 0xFF0101A5
-#define OID_RT_GET_DCST_TIME_UNIT_INDEX 0xFF0101A6
-#define OID_RT_GET_TOTAL_TX_BYTES 0xFF0101A7
-#define OID_RT_GET_TOTAL_RX_BYTES 0xFF0101A8
-#define OID_RT_CURRENT_TX_POWER_LEVEL 0xFF0101A9
-#define OID_RT_GET_ENC_KEY_MISMATCH_COUNT 0xFF0101AA
-#define OID_RT_GET_ENC_KEY_MATCH_COUNT 0xFF0101AB
-#define OID_RT_GET_CHANNEL 0xFF0101AC
-
-#define OID_RT_SET_CHANNELPLAN 0xFF0101AD
-#define OID_RT_GET_HARDWARE_RADIO_OFF 0xFF0101AE
-#define OID_RT_CHANNELPLAN_BY_COUNTRY 0xFF0101AF
-#define OID_RT_SCAN_AVAILABLE_BSSID 0xFF0101B0
-#define OID_RT_GET_HARDWARE_VERSION 0xFF0101B1
-#define OID_RT_GET_IS_ROAMING 0xFF0101B2
-#define OID_RT_GET_IS_PRIVACY 0xFF0101B3
-#define OID_RT_GET_KEY_MISMATCH 0xFF0101B4
-#define OID_RT_SET_RSSI_ROAM_TRAFFIC_TH 0xFF0101B5
-#define OID_RT_SET_RSSI_ROAM_SIGNAL_TH 0xFF0101B6
-#define OID_RT_RESET_LOG 0xFF0101B7
-#define OID_RT_GET_LOG 0xFF0101B8
-#define OID_RT_SET_INDICATE_HIDDEN_AP 0xFF0101B9
-#define OID_RT_GET_HEADER_FAIL 0xFF0101BA
-#define OID_RT_SUPPORTED_WIRELESS_MODE 0xFF0101BB
-#define OID_RT_GET_CHANNEL_LIST 0xFF0101BC
-#define OID_RT_GET_SCAN_IN_PROGRESS 0xFF0101BD
-#define OID_RT_GET_TX_INFO 0xFF0101BE
-#define OID_RT_RF_READ_WRITE_OFFSET 0xFF0101BF
-#define OID_RT_RF_READ_WRITE 0xFF0101C0
-
-/* For Netgear request. 2005.01.13, by rcnjko. */
-#define OID_RT_FORCED_DATA_RATE 0xFF0101C1
-#define OID_RT_WIRELESS_MODE_FOR_SCAN_LIST 0xFF0101C2
-/* For Netgear request. 2005.02.17, by rcnjko. */
-#define OID_RT_GET_BSS_WIRELESS_MODE 0xFF0101C3
-/* For AZ project. 2005.06.27, by rcnjko. */
-#define OID_RT_SCAN_WITH_MAGIC_PACKET 0xFF0101C4
-
-/* Vincent 8185MP */
-#define OID_RT_PRO_RX_FILTER 0xFF0111C0
-
-#define OID_CE_USB_WRITE_REGISTRY 0xFF0111C1
-#define OID_CE_USB_READ_REGISTRY 0xFF0111C2
-
-#define OID_RT_PRO_SET_INITIAL_GA 0xFF0111C3
-#define OID_RT_PRO_SET_BB_RF_STANDBY_MODE 0xFF0111C4
-#define OID_RT_PRO_SET_BB_RF_SHUTDOWN_MODE 0xFF0111C5
-#define OID_RT_PRO_SET_TX_CHARGE_PUMP 0xFF0111C6
-#define OID_RT_PRO_SET_RX_CHARGE_PUMP 0xFF0111C7
-#define OID_RT_PRO_RF_WRITE_REGISTRY 0xFF0111C8
-#define OID_RT_PRO_RF_READ_REGISTRY 0xFF0111C9
-#define OID_RT_PRO_QUERY_RF_TYPE 0xFF0111CA
-
-/* AP OID */
-#define OID_RT_AP_GET_ASSOCIATED_STATION_LIST 0xFF010300
-#define OID_RT_AP_GET_CURRENT_TIME_STAMP 0xFF010301
-#define OID_RT_AP_SWITCH_INTO_AP_MODE 0xFF010302
-#define OID_RT_AP_SET_DTIM_PERIOD 0xFF010303
-/* Determine if driver supports AP mode. */
-#define OID_RT_AP_SUPPORTED 0xFF010304
-/* Set WPA-PSK passphrase into authenticator. */
-#define OID_RT_AP_SET_PASSPHRASE 0xFF010305
-
-/* 8187MP. 2004.09.06, by rcnjko. */
-#define OID_RT_PRO8187_WI_POLL 0xFF818780
-#define OID_RT_PRO_WRITE_BB_REG 0xFF818781
-#define OID_RT_PRO_READ_BB_REG 0xFF818782
-#define OID_RT_PRO_WRITE_RF_REG 0xFF818783
-#define OID_RT_PRO_READ_RF_REG 0xFF818784
-
-/* Meeting House. added by Annie, 2005-07-20. */
-#define OID_RT_MH_VENDER_ID 0xFFEDC100
-
-/* 8711 MP OID added 20051230. */
-#define OID_RT_PRO8711_JOIN_BSS 0xFF871100/* S */
-
-#define OID_RT_PRO_READ_REGISTER 0xFF871101 /* Q */
-#define OID_RT_PRO_WRITE_REGISTER 0xFF871102 /* S */
-
-#define OID_RT_PRO_BURST_READ_REGISTER 0xFF871103 /* Q */
-#define OID_RT_PRO_BURST_WRITE_REGISTER 0xFF871104 /* S */
-
-#define OID_RT_PRO_WRITE_TXCMD 0xFF871105 /* S */
-
-#define OID_RT_PRO_READ16_EEPROM 0xFF871106 /* Q */
-#define OID_RT_PRO_WRITE16_EEPROM 0xFF871107 /* S */
-
-#define OID_RT_PRO_H2C_SET_COMMAND 0xFF871108 /* S */
-#define OID_RT_PRO_H2C_QUERY_RESULT 0xFF871109 /* Q */
-
-#define OID_RT_PRO8711_WI_POLL 0xFF87110A /* Q */
-#define OID_RT_PRO8711_PKT_LOSS 0xFF87110B /* Q */
-#define OID_RT_RD_ATTRIB_MEM 0xFF87110C/* Q */
-#define OID_RT_WR_ATTRIB_MEM 0xFF87110D/* S */
-
-/* Method 2 for H2C/C2H */
-#define OID_RT_PRO_H2C_CMD_MODE 0xFF871110 /* S */
-#define OID_RT_PRO_H2C_CMD_RSP_MODE 0xFF871111 /* Q */
-#define OID_RT_PRO_H2C_CMD_EVENT_MODE 0xFF871112 /* S */
-#define OID_RT_PRO_WAIT_C2H_EVENT 0xFF871113 /* Q */
-#define OID_RT_PRO_RW_ACCESS_PROTOCOL_TEST 0xFF871114/* Q */
-
-#define OID_RT_PRO_SCSI_ACCESS_TEST 0xFF871115 /* Q, S */
-
-#define OID_RT_PRO_SCSI_TCPIPOFFLOAD_OUT 0xFF871116 /* S */
-#define OID_RT_PRO_SCSI_TCPIPOFFLOAD_IN 0xFF871117 /* Q,S */
-#define OID_RT_RRO_RX_PKT_VIA_IOCTRL 0xFF871118 /* Q */
-#define OID_RT_RRO_RX_PKTARRAY_VIA_IOCTRL 0xFF871119 /* Q */
-
-#define OID_RT_RPO_SET_PWRMGT_TEST 0xFF87111A /* S */
-#define OID_RT_PRO_QRY_PWRMGT_TEST 0XFF87111B /* Q */
-#define OID_RT_RPO_ASYNC_RWIO_TEST 0xFF87111C /* S */
-#define OID_RT_RPO_ASYNC_RWIO_POLL 0xFF87111D /* Q */
-#define OID_RT_PRO_SET_RF_INTFS 0xFF87111E /* S */
-#define OID_RT_POLL_RX_STATUS 0xFF87111F /* Q */
-
-#define OID_RT_PRO_CFG_DEBUG_MESSAGE 0xFF871120 /* Q,S */
-#define OID_RT_PRO_SET_DATA_RATE_EX 0xFF871121/* S */
-#define OID_RT_PRO_SET_BASIC_RATE 0xFF871122/* S */
-#define OID_RT_PRO_READ_TSSI 0xFF871123/* S */
-#define OID_RT_PRO_SET_POWER_TRACKING 0xFF871124/* S */
-
-#define OID_RT_PRO_QRY_PWRSTATE 0xFF871150 /* Q */
-#define OID_RT_PRO_SET_PWRSTATE 0xFF871151 /* S */
-
-/* Method 2 , using workitem */
-#define OID_RT_SET_READ_REG 0xFF871181 /* S */
-#define OID_RT_SET_WRITE_REG 0xFF871182 /* S */
-#define OID_RT_SET_BURST_READ_REG 0xFF871183 /* S */
-#define OID_RT_SET_BURST_WRITE_REG 0xFF871184 /* S */
-#define OID_RT_SET_WRITE_TXCMD 0xFF871185 /* S */
-#define OID_RT_SET_READ16_EEPROM 0xFF871186 /* S */
-#define OID_RT_SET_WRITE16_EEPROM 0xFF871187 /* S */
-#define OID_RT_QRY_POLL_WKITEM 0xFF871188 /* Q */
-
-/* For SDIO INTERFACE only */
-#define OID_RT_PRO_SYNCPAGERW_SRAM 0xFF8711A0 /* Q, S */
-#define OID_RT_PRO_871X_DRV_EXT 0xFF8711A1
-
-/* For USB INTERFACE only */
-#define OID_RT_PRO_USB_VENDOR_REQ 0xFF8711B0 /* Q, S */
-#define OID_RT_PRO_SCSI_AUTO_TEST 0xFF8711B1 /* S */
-#define OID_RT_PRO_USB_MAC_AC_FIFO_WRITE 0xFF8711B2 /* S */
-#define OID_RT_PRO_USB_MAC_RX_FIFO_READ 0xFF8711B3 /* Q */
-#define OID_RT_PRO_USB_MAC_RX_FIFO_POLLING 0xFF8711B4 /* Q */
-
-#define OID_RT_PRO_H2C_SET_RATE_TABLE 0xFF8711FB /* S */
-#define OID_RT_PRO_H2C_GET_RATE_TABLE 0xFF8711FC /* S */
-#define OID_RT_PRO_H2C_C2H_LBK_TEST 0xFF8711FE
-
-#define OID_RT_PRO_ENCRYPTION_CTRL 0xFF871200 /* Q, S */
-#define OID_RT_PRO_ADD_STA_INFO 0xFF871201 /* S */
-#define OID_RT_PRO_DELE_STA_INFO 0xFF871202 /* S */
-#define OID_RT_PRO_QUERY_DR_VARIABLE 0xFF871203 /* Q */
-
-#define OID_RT_PRO_RX_PACKET_TYPE 0xFF871204 /* Q, S */
-
-#define OID_RT_PRO_READ_EFUSE 0xFF871205 /* Q */
-#define OID_RT_PRO_WRITE_EFUSE 0xFF871206 /* S */
-#define OID_RT_PRO_RW_EFUSE_PGPKT 0xFF871207 /* Q, S */
-#define OID_RT_GET_EFUSE_CURRENT_SIZE 0xFF871208 /* Q */
-
-#define OID_RT_SET_BANDWIDTH 0xFF871209 /* S */
-#define OID_RT_SET_CRYSTAL_CAP 0xFF87120A /* S */
-
-#define OID_RT_SET_RX_PACKET_TYPE 0xFF87120B /* S */
-
-#define OID_RT_GET_EFUSE_MAX_SIZE 0xFF87120C /* Q */
-
-#define OID_RT_PRO_SET_TX_AGC_OFFSET 0xFF87120D /* S */
-
-#define OID_RT_PRO_SET_PKT_TEST_MODE 0xFF87120E /* S */
-
-#define OID_RT_PRO_FOR_EVM_TEST_SETTING 0xFF87120F /* S */
-
-#define OID_RT_PRO_GET_THERMAL_METER 0xFF871210 /* Q */
-
-#define OID_RT_RESET_PHY_RX_PACKET_COUNT 0xFF871211 /* S */
-#define OID_RT_GET_PHY_RX_PACKET_RECEIVED 0xFF871212 /* Q */
-#define OID_RT_GET_PHY_RX_PACKET_CRC32_ERROR 0xFF871213 /* Q */
-
-#define OID_RT_SET_POWER_DOWN 0xFF871214 /* S */
-
-#define OID_RT_GET_POWER_MODE 0xFF871215 /* Q */
-
-#define OID_RT_PRO_EFUSE 0xFF871216 /* Q, S */
-#define OID_RT_PRO_EFUSE_MAP 0xFF871217 /* Q, S */
-
-#endif /* ifndef __CUSTOM_OID_H */
diff --git a/drivers/staging/r8188eu/include/odm.h b/drivers/staging/r8188eu/include/odm.h
index d9041ee576bb..f08655208b32 100644
--- a/drivers/staging/r8188eu/include/odm.h
+++ b/drivers/staging/r8188eu/include/odm.h
@@ -4,75 +4,6 @@
#ifndef __HALDMOUTSRC_H__
#define __HALDMOUTSRC_H__
-/* Definition */
-/* Define all team support ability. */
-
-/* Define for all teams. Please Define the constant in your precomp header. */
-
-/* define DM_ODM_SUPPORT_AP 0 */
-/* define DM_ODM_SUPPORT_ADSL 0 */
-/* define DM_ODM_SUPPORT_CE 0 */
-/* define DM_ODM_SUPPORT_MP 1 */
-
-/* Define ODM SW team support flag. */
-
-/* Antenna Switch Relative Definition. */
-
-/* Add new function SwAntDivCheck8192C(). */
-/* This is the main function of Antenna diversity function before link. */
-/* Mainly, it just retains last scan result and scan again. */
-/* After that, it compares the scan result to see which one gets better
- * RSSI. It selects antenna with better receiving power and returns better
- * scan result. */
-
-#define TP_MODE 0
-#define RSSI_MODE 1
-#define TRAFFIC_LOW 0
-#define TRAFFIC_HIGH 1
-
-/* 3 Tx Power Tracking */
-/* 3============================================================ */
-#define DPK_DELTA_MAPPING_NUM 13
-#define index_mapping_HP_NUM 15
-
-/* */
-/* 3 PSD Handler */
-/* 3============================================================ */
-
-#define AFH_PSD 1 /* 0:normal PSD scan, 1: only do 20 pts PSD */
-#define MODE_40M 0 /* 0:20M, 1:40M */
-#define PSD_TH2 3
-#define PSD_CHM 20 /* Minimum channel number for BT AFH */
-#define SIR_STEP_SIZE 3
-#define Smooth_Size_1 5
-#define Smooth_TH_1 3
-#define Smooth_Size_2 10
-#define Smooth_TH_2 4
-#define Smooth_Size_3 20
-#define Smooth_TH_3 4
-#define Smooth_Step_Size 5
-#define Adaptive_SIR 1
-#define PSD_RESCAN 4
-#define PSD_SCAN_INTERVAL 700 /* ms */
-
-/* 8723A High Power IGI Setting */
-#define DM_DIG_HIGH_PWR_IGI_LOWER_BOUND 0x22
-#define DM_DIG_Gmode_HIGH_PWR_IGI_LOWER_BOUND 0x28
-#define DM_DIG_HIGH_PWR_THRESHOLD 0x3a
-
-/* LPS define */
-#define DM_DIG_FA_TH0_LPS 4 /* 4 in lps */
-#define DM_DIG_FA_TH1_LPS 15 /* 15 lps */
-#define DM_DIG_FA_TH2_LPS 30 /* 30 lps */
-#define RSSI_OFFSET_DIG 0x05;
-
-/* ANT Test */
-#define ANTTESTALL 0x00 /* Ant A or B will be Testing */
-#define ANTTESTA 0x01 /* Ant A will be Testing */
-#define ANTTESTB 0x02 /* Ant B will be testing */
-
-/* structure and define */
-
/* Add for AP/ADSLpseudo DM structuer requirement. */
/* We need to remove to other position??? */
struct rtl8192cd_priv {
@@ -178,23 +109,7 @@ struct rx_hpc {
struct timer_list PSDTimer;
};
-#define ASSOCIATE_ENTRY_NUM 32 /* Max size of AsocEntry[]. */
-#define ODM_ASSOCIATE_ENTRY_NUM ASSOCIATE_ENTRY_NUM
-
-/* This indicates two different steps. */
-/* In SWAW_STEP_PEAK, driver needs to switch antenna and listen to
- * the signal on the air. */
-/* In SWAW_STEP_DETERMINE, driver just compares the signal captured in
- * SWAW_STEP_PEAK with original RSSI to determine if it is necessary to
- * switch antenna. */
-
-#define SWAW_STEP_PEAK 0
-#define SWAW_STEP_DETERMINE 1
-
-#define TP_MODE 0
-#define RSSI_MODE 1
-#define TRAFFIC_LOW 0
-#define TRAFFIC_HIGH 1
+#define ODM_ASSOCIATE_ENTRY_NUM 32 /* Max size of AsocEntry[]. */
struct sw_ant_switch {
u8 try_flag;
@@ -210,8 +125,6 @@ struct sw_ant_switch {
/* Before link Antenna Switch check */
u8 SWAS_NoLink_State;
u32 SWAS_NoLink_BK_Reg860;
- bool ANTA_ON; /* To indicate Ant A is or not */
- bool ANTB_ON; /* To indicate Ant B is on or not */
s32 RSSI_sum_A;
s32 RSSI_sum_B;
@@ -225,16 +138,8 @@ struct sw_ant_switch {
u64 RXByteCnt_B;
u8 TrafficLoad;
struct timer_list SwAntennaSwitchTimer;
- /* Hybrid Antenna Diversity */
- u32 CCK_Ant1_Cnt[ASSOCIATE_ENTRY_NUM];
- u32 CCK_Ant2_Cnt[ASSOCIATE_ENTRY_NUM];
- u32 OFDM_Ant1_Cnt[ASSOCIATE_ENTRY_NUM];
- u32 OFDM_Ant2_Cnt[ASSOCIATE_ENTRY_NUM];
- u32 RSSI_Ant1_Sum[ASSOCIATE_ENTRY_NUM];
- u32 RSSI_Ant2_Sum[ASSOCIATE_ENTRY_NUM];
- u8 TxAnt[ASSOCIATE_ENTRY_NUM];
+ u8 TxAnt[ODM_ASSOCIATE_ENTRY_NUM];
u8 TargetSTA;
- u8 antsel;
u8 RxIdleAnt;
};
@@ -245,7 +150,6 @@ struct edca_turbo {
};
struct odm_rate_adapt {
- u8 Type; /* DM_Type_ByFW/DM_Type_ByDriver */
u8 HighRSSIThresh; /* if RSSI > HighRSSIThresh => RATRState is DM_RATR_STA_HIGH */
u8 LowRSSIThresh; /* if RSSI <= LowRSSIThresh => RATRState is DM_RATR_STA_LOW */
u8 RATRState; /* Current RSSI level, DM_RATR_STA_HIGH/DM_RATR_STA_MIDDLE/DM_RATR_STA_LOW */
@@ -254,33 +158,11 @@ struct odm_rate_adapt {
#define IQK_MAC_REG_NUM 4
#define IQK_ADDA_REG_NUM 16
-#define IQK_BB_REG_NUM_MAX 10
#define IQK_BB_REG_NUM 9
#define HP_THERMAL_NUM 8
#define AVG_THERMAL_NUM 8
#define IQK_Matrix_REG_NUM 8
-#define IQK_Matrix_Settings_NUM 1+24+21
-
-#define DM_Type_ByFWi 0
-#define DM_Type_ByDriver 1
-
-/* Declare for common info */
-
-struct odm_phy_status_info {
- u8 RxPWDBAll;
- u8 SignalQuality; /* in 0-100 index. */
- u8 RxMIMOSignalQuality[MAX_PATH_NUM_92CS]; /* EVM */
- u8 RxMIMOSignalStrength[MAX_PATH_NUM_92CS];/* in 0~100 index */
- s8 RxPower; /* in dBm Translate from PWdB */
- s8 RecvSignalPower;/* Real power in dBm for this packet, no
- * beautification and aggregation. Keep this raw
- * info to be used for the other procedures. */
- u8 BTRxRSSIPercentage;
- u8 SignalStrength; /* in 0-100 index. */
- u8 RxPwr[MAX_PATH_NUM_92CS];/* per-path's pwdb */
- u8 RxSNR[MAX_PATH_NUM_92CS];/* per-path's SNR */
-};
struct odm_phy_dbg_info {
/* ODM Write,debug info */
@@ -342,48 +224,23 @@ enum odm_common_info_def {
/* Fixed value: */
/* HOOK BEFORE REG INIT----------- */
- ODM_CMNINFO_PLATFORM = 0,
ODM_CMNINFO_ABILITY, /* ODM_ABILITY_E */
- ODM_CMNINFO_INTERFACE, /* ODM_INTERFACE_E */
ODM_CMNINFO_MP_TEST_CHIP,
- ODM_CMNINFO_IC_TYPE, /* ODM_IC_TYPE_E */
- ODM_CMNINFO_CUT_VER, /* ODM_CUT_VERSION_E */
- ODM_CMNINFO_FAB_VER, /* ODM_FAB_E */
- ODM_CMNINFO_RF_TYPE, /* RF_PATH_E or ODM_RF_TYPE_E? */
- ODM_CMNINFO_BOARD_TYPE, /* ODM_BOARD_TYPE_E */
- ODM_CMNINFO_EXT_LNA, /* true */
- ODM_CMNINFO_EXT_PA,
- ODM_CMNINFO_EXT_TRSW,
- ODM_CMNINFO_PATCH_ID, /* CUSTOMER ID */
- ODM_CMNINFO_BINHCT_TEST,
- ODM_CMNINFO_BWIFI_TEST,
- ODM_CMNINFO_SMART_CONCURRENT,
/* HOOK BEFORE REG INIT----------- */
/* Dynamic value: */
/* POINTER REFERENCE----------- */
- ODM_CMNINFO_MAC_PHY_MODE, /* ODM_MAC_PHY_MODE_E */
ODM_CMNINFO_TX_UNI,
ODM_CMNINFO_RX_UNI,
ODM_CMNINFO_WM_MODE, /* ODM_WIRELESS_MODE_E */
- ODM_CMNINFO_BAND, /* ODM_BAND_TYPE_E */
ODM_CMNINFO_SEC_CHNL_OFFSET, /* ODM_SEC_CHNL_OFFSET_E */
ODM_CMNINFO_SEC_MODE, /* ODM_SECURITY_E */
ODM_CMNINFO_BW, /* ODM_BW_E */
ODM_CMNINFO_CHNL,
- ODM_CMNINFO_DMSP_GET_VALUE,
- ODM_CMNINFO_BUDDY_ADAPTOR,
- ODM_CMNINFO_DMSP_IS_MASTER,
ODM_CMNINFO_SCAN,
ODM_CMNINFO_POWER_SAVING,
- ODM_CMNINFO_ONE_PATH_CCA, /* ODM_CCA_PATH_E */
- ODM_CMNINFO_DRV_STOP,
- ODM_CMNINFO_PNP_IN,
- ODM_CMNINFO_INIT_ON,
- ODM_CMNINFO_ANT_TEST,
ODM_CMNINFO_NET_CLOSED,
- ODM_CMNINFO_MP_MODE,
/* POINTER REFERENCE----------- */
/* CALL BY VALUE------------- */
@@ -391,21 +248,8 @@ enum odm_common_info_def {
ODM_CMNINFO_WIFI_DISPLAY,
ODM_CMNINFO_LINK,
ODM_CMNINFO_RSSI_MIN,
- ODM_CMNINFO_RA_THRESHOLD_HIGH, /* u8 */
- ODM_CMNINFO_RA_THRESHOLD_LOW, /* u8 */
ODM_CMNINFO_RF_ANTENNA_TYPE, /* u8 */
- ODM_CMNINFO_BT_DISABLED,
- ODM_CMNINFO_BT_OPERATION,
- ODM_CMNINFO_BT_DIG,
- ODM_CMNINFO_BT_BUSY, /* Check Bt is using or not */
- ODM_CMNINFO_BT_DISABLE_EDCA,
/* CALL BY VALUE-------------*/
-
- /* Dynamic ptr array hook itms. */
- ODM_CMNINFO_STA_STATUS,
- ODM_CMNINFO_PHY_STATUS,
- ODM_CMNINFO_MAC_STATUS,
- ODM_CMNINFO_MAX,
};
/* 2011/10/20 MH Define ODM support ability. ODM_CMNINFO_ABILITY */
@@ -436,85 +280,7 @@ enum odm_ability_def {
ODM_RF_CALIBRATION = BIT(26),
};
-/* ODM_CMNINFO_INTERFACE */
-enum odm_interface_def {
- ODM_ITRF_PCIE = 0x1,
- ODM_ITRF_USB = 0x2,
- ODM_ITRF_SDIO = 0x4,
- ODM_ITRF_ALL = 0x7,
-};
-
-/* ODM_CMNINFO_IC_TYPE */
-enum odm_ic_type {
- ODM_RTL8192S = BIT(0),
- ODM_RTL8192C = BIT(1),
- ODM_RTL8192D = BIT(2),
- ODM_RTL8723A = BIT(3),
- ODM_RTL8188E = BIT(4),
- ODM_RTL8812 = BIT(5),
- ODM_RTL8821 = BIT(6),
-};
-
-#define ODM_IC_11N_SERIES \
- (ODM_RTL8192S | ODM_RTL8192C | ODM_RTL8192D | \
- ODM_RTL8723A | ODM_RTL8188E)
-#define ODM_IC_11AC_SERIES (ODM_RTL8812)
-
-/* ODM_CMNINFO_CUT_VER */
-enum odm_cut_version {
- ODM_CUT_A = 1,
- ODM_CUT_B = 2,
- ODM_CUT_C = 3,
- ODM_CUT_D = 4,
- ODM_CUT_E = 5,
- ODM_CUT_F = 6,
- ODM_CUT_TEST = 7,
-};
-
-/* ODM_CMNINFO_FAB_VER */
-enum odm_fab_Version {
- ODM_TSMC = 0,
- ODM_UMC = 1,
-};
-
-/* ODM_CMNINFO_RF_TYPE */
-/* For example 1T2R (A+AB = BIT(0)|BIT(4)|BIT(5)) */
-enum odm_rf_path {
- ODM_RF_TX_A = BIT(0),
- ODM_RF_TX_B = BIT(1),
- ODM_RF_TX_C = BIT(2),
- ODM_RF_TX_D = BIT(3),
- ODM_RF_RX_A = BIT(4),
- ODM_RF_RX_B = BIT(5),
- ODM_RF_RX_C = BIT(6),
- ODM_RF_RX_D = BIT(7),
-};
-
-enum odm_rf_type {
- ODM_1T1R = 0,
- ODM_1T2R = 1,
- ODM_2T2R = 2,
- ODM_2T3R = 3,
- ODM_2T4R = 4,
- ODM_3T3R = 5,
- ODM_3T4R = 6,
- ODM_4T4R = 7,
-};
-
-/* ODM Dynamic common info value definition */
-
-enum odm_mac_phy_mode {
- ODM_SMSP = 0,
- ODM_DMSP = 1,
- ODM_DMDP = 2,
-};
-
-enum odm_bt_coexist {
- ODM_BT_BUSY = 1,
- ODM_BT_ON = 2,
- ODM_BT_OFF = 3,
- ODM_BT_NONE = 4,
-};
+# define ODM_ITRF_USB 0x2
/* ODM_CMNINFO_OP_MODE */
enum odm_operation_mode {
@@ -538,52 +304,12 @@ enum odm_wireless_mode {
ODM_WM_AUTO = BIT(5),
};
-/* ODM_CMNINFO_BAND */
-enum odm_band_type {
- ODM_BAND_2_4G = BIT(0),
-};
-
-/* ODM_CMNINFO_SEC_CHNL_OFFSET */
-enum odm_sec_chnl_offset {
- ODM_DONT_CARE = 0,
- ODM_BELOW = 1,
- ODM_ABOVE = 2
-};
-
-/* ODM_CMNINFO_SEC_MODE */
-enum odm_security {
- ODM_SEC_OPEN = 0,
- ODM_SEC_WEP40 = 1,
- ODM_SEC_TKIP = 2,
- ODM_SEC_RESERVE = 3,
- ODM_SEC_AESCCMP = 4,
- ODM_SEC_WEP104 = 5,
- ODM_WEP_WPA_MIXED = 6, /* WEP + WPA */
- ODM_SEC_SMS4 = 7,
-};
-
/* ODM_CMNINFO_BW */
enum odm_bw {
ODM_BW20M = 0,
ODM_BW40M = 1,
};
-/* ODM_CMNINFO_BOARD_TYPE */
-enum odm_board_type {
- ODM_BOARD_NORMAL = 0,
- ODM_BOARD_HIGHPWR = 1,
- ODM_BOARD_MINICARD = 2,
- ODM_BOARD_SLIM = 3,
- ODM_BOARD_COMBO = 4,
-};
-
-/* ODM_CMNINFO_ONE_PATH_CCA */
-enum odm_cca_path {
- ODM_CCA_2R = 0,
- ODM_CCA_1R_A = 1,
- ODM_CCA_1R_B = 2,
-};
-
struct odm_ra_info {
u8 RateID;
u32 RateMask;
@@ -664,7 +390,7 @@ struct odm_rf_cal {
u8 ThermalValue_HP[HP_THERMAL_NUM];
u8 ThermalValue_HP_index;
- struct ijk_matrix_regs_set IQKMatrixRegSetting[IQK_Matrix_Settings_NUM];
+ struct ijk_matrix_regs_set IQKMatrixRegSetting;
u8 Delta_IQK;
u8 Delta_LCK;
@@ -680,7 +406,6 @@ struct odm_rf_cal {
u32 Reg864;
bool bIQKInitialized;
- bool bLCKInProgress;
bool bAntennaDetected;
u32 ADDA_backup[IQK_ADDA_REG_NUM];
u32 IQK_MAC_backup[IQK_MAC_REG_NUM];
@@ -730,7 +455,6 @@ enum ant_div_type {
CGCS_RX_HW_ANTDIV = 0x02,
FIXED_HW_ANTDIV = 0x03,
CG_TRX_SMART_ANTDIV = 0x04,
- CGCS_RX_SW_ANTDIV = 0x05,
};
/* Copy from SD4 defined structure. We use to support PHY DM integration. */
@@ -752,34 +476,9 @@ struct odm_dm_struct {
/* 1 COMMON INFORMATION */
/* Init Value */
/* HOOK BEFORE REG INIT----------- */
- /* ODM Platform info AP/ADSL/CE/MP = 1/2/3/4 */
- u8 SupportPlatform;
/* ODM Support Ability DIG/RATR/TX_PWR_TRACK/ �K�K = 1/2/3/�K */
u32 SupportAbility;
- /* ODM PCIE/USB/SDIO/GSPI = 0/1/2/3 */
- u8 SupportInterface;
- /* ODM composite or independent. Bit oriented/ 92C+92D+ .... or any
- * other type = 1/2/3/... */
- u32 SupportICType;
- /* Cut Version TestChip/A-cut/B-cut... = 0/1/2/3/... */
- u8 CutVersion;
- /* Fab Version TSMC/UMC = 0/1 */
- u8 FabVersion;
- /* RF Type 4T4R/3T3R/2T2R/1T2R/1T1R/... */
- u8 RFType;
- /* Board Type Normal/HighPower/MiniCard/SLIM/Combo/. = 0/1/2/3/4/. */
- u8 BoardType;
- /* with external LNA NO/Yes = 0/1 */
- u8 ExtLNA;
- /* with external PA NO/Yes = 0/1 */
- u8 ExtPA;
- /* with external TRSW NO/Yes = 0/1 */
- u8 ExtTRSW;
- u8 PatchID; /* Customer ID */
- bool bInHctTest;
- bool bWIFITest;
-
- bool bDualMacSmartConcurrent;
+
u32 BK_SupportAbility;
u8 AntDivType;
/* HOOK BEFORE REG INIT----------- */
@@ -791,16 +490,12 @@ struct odm_dm_struct {
bool bool_temp;
struct adapter *adapter_temp;
- /* MAC PHY Mode SMSP/DMSP/DMDP = 0/1/2 */
- u8 *pMacPhyMode;
/* TX Unicast byte count */
u64 *pNumTxBytesUnicast;
/* RX Unicast byte count */
u64 *pNumRxBytesUnicast;
/* Wireless mode B/G/A/N = BIT(0)/BIT(1)/BIT(2)/BIT(3) */
u8 *pWirelessMode; /* ODM_WIRELESS_MODE_E */
- /* Frequence band 2.4G/5G = 0/1 */
- u8 *pBandType;
/* Secondary channel offset don't_care/below/above = 0/1/2 */
u8 *pSecChOffset;
/* Security mode Open/WEP/AES/TKIP = 0/1/2/3 */
@@ -850,13 +545,6 @@ struct odm_dm_struct {
struct odm_ra_info RAInfo[ODM_ASSOCIATE_ENTRY_NUM]; /* Use MacID as
* array index. STA MacID=0,
* VWiFi Client MacID={1, ODM_ASSOCIATE_ENTRY_NUM-1} */
- /* */
- /* 2012/02/14 MH Add to share 88E ra with other SW team. */
- /* We need to colelct all support abilit to a proper area. */
- /* */
- bool RaSupport88E;
-
- /* Define ........... */
/* Latest packet phy info (ODM write) */
struct odm_phy_dbg_info PhyDbgInfo;
@@ -895,9 +583,6 @@ struct odm_dm_struct {
bool bPSDinProcess;
bool bDMInitialGainEnable;
- /* for rate adaptive, in fact, 88c/92c fw will handle this */
- u8 bUseRAMask;
-
struct odm_rate_adapt RateAdaptive;
struct odm_rf_cal RFCalibrateInfo;
@@ -911,7 +596,6 @@ struct odm_dm_struct {
u8 BbSwingIdxCckCurrent;
u8 BbSwingIdxCckBase;
bool BbSwingFlagCck;
- u8 *mp_mode;
/* ODM system resource. */
/* ODM relative time. */
@@ -921,13 +605,6 @@ struct odm_dm_struct {
struct timer_list FastAntTrainingTimer;
}; /* DM_Dynamic_Mechanism_Structure */
-enum ODM_RF_CONTENT {
- odm_radioa_txt = 0x1000,
- odm_radiob_txt = 0x1001,
- odm_radioc_txt = 0x1002,
- odm_radiod_txt = 0x1003
-};
-
enum odm_bb_config_type {
CONFIG_BB_PHY_REG,
CONFIG_BB_AGC_TAB,
@@ -935,38 +612,9 @@ enum odm_bb_config_type {
CONFIG_BB_PHY_REG_PG,
};
-/* Status code */
-enum rt_status {
- RT_STATUS_SUCCESS,
- RT_STATUS_FAILURE,
- RT_STATUS_PENDING,
- RT_STATUS_RESOURCE,
- RT_STATUS_INVALID_CONTEXT,
- RT_STATUS_INVALID_PARAMETER,
- RT_STATUS_NOT_SUPPORT,
- RT_STATUS_OS_API_FAILED,
-};
-
-/* 3=========================================================== */
-/* 3 DIG */
-/* 3=========================================================== */
-
-enum dm_dig_op {
- RT_TYPE_THRESH_HIGH = 0,
- RT_TYPE_THRESH_LOW = 1,
- RT_TYPE_BACKOFF = 2,
- RT_TYPE_RX_GAIN_MIN = 3,
- RT_TYPE_RX_GAIN_MAX = 4,
- RT_TYPE_ENABLE = 5,
- RT_TYPE_DISABLE = 6,
- DIG_OP_TYPE_MAX
-};
-
#define DM_DIG_THRESH_HIGH 40
#define DM_DIG_THRESH_LOW 35
-#define DM_SCAN_RSSI_TH 0x14 /* scan return issue for LC */
-
#define DM_false_ALARM_THRESH_LOW 400
#define DM_false_ALARM_THRESH_HIGH 1000
@@ -974,65 +622,18 @@ enum dm_dig_op {
#define DM_DIG_MIN_NIC 0x1e /* 0x22/0x1c */
#define DM_DIG_MAX_AP 0x32
-#define DM_DIG_MIN_AP 0x20
-
-#define DM_DIG_MAX_NIC_HP 0x46
-#define DM_DIG_MIN_NIC_HP 0x2e
-
-#define DM_DIG_MAX_AP_HP 0x42
-#define DM_DIG_MIN_AP_HP 0x30
/* vivi 92c&92d has different definition, 20110504 */
/* this is for 92c */
#define DM_DIG_FA_TH0 0x200/* 0x20 */
#define DM_DIG_FA_TH1 0x300/* 0x100 */
#define DM_DIG_FA_TH2 0x400/* 0x200 */
-/* this is for 92d */
-#define DM_DIG_FA_TH0_92D 0x100
-#define DM_DIG_FA_TH1_92D 0x400
-#define DM_DIG_FA_TH2_92D 0x600
#define DM_DIG_BACKOFF_MAX 12
#define DM_DIG_BACKOFF_MIN -4
#define DM_DIG_BACKOFF_DEFAULT 10
/* 3=========================================================== */
-/* 3 AGC RX High Power Mode */
-/* 3=========================================================== */
-#define LNA_Low_Gain_1 0x64
-#define LNA_Low_Gain_2 0x5A
-#define LNA_Low_Gain_3 0x58
-
-#define FA_RXHP_TH1 5000
-#define FA_RXHP_TH2 1500
-#define FA_RXHP_TH3 800
-#define FA_RXHP_TH4 600
-#define FA_RXHP_TH5 500
-
-/* 3=========================================================== */
-/* 3 EDCA */
-/* 3=========================================================== */
-
-/* 3=========================================================== */
-/* 3 Dynamic Tx Power */
-/* 3=========================================================== */
-/* Dynamic Tx Power Control Threshold */
-#define TX_POWER_NEAR_FIELD_THRESH_LVL2 74
-#define TX_POWER_NEAR_FIELD_THRESH_LVL1 67
-#define TX_POWER_NEAR_FIELD_THRESH_AP 0x3F
-
-#define TxHighPwrLevel_Normal 0
-#define TxHighPwrLevel_Level1 1
-#define TxHighPwrLevel_Level2 2
-#define TxHighPwrLevel_BT1 3
-#define TxHighPwrLevel_BT2 4
-#define TxHighPwrLevel_15 5
-#define TxHighPwrLevel_35 6
-#define TxHighPwrLevel_50 7
-#define TxHighPwrLevel_70 8
-#define TxHighPwrLevel_100 9
-
-/* 3=========================================================== */
/* 3 Rate Adaptive */
/* 3=========================================================== */
#define DM_RATR_STA_INIT 0
@@ -1065,11 +666,7 @@ enum dm_swas {
Antenna_MAX = 3,
};
-/* Maximal number of antenna detection mechanism needs to perform. */
-#define MAX_ANTENNA_DETECTION_CNT 10
-
/* Extern Global Variables. */
-#define OFDM_TABLE_SIZE_92C 37
#define OFDM_TABLE_SIZE_92D 43
#define CCK_TABLE_SIZE 33
@@ -1079,44 +676,19 @@ extern u8 CCKSwingTable_Ch14 [CCK_TABLE_SIZE][8];
/* check Sta pointer valid or not */
#define IS_STA_VALID(pSta) (pSta)
-/* 20100514 Joseph: Add definition for antenna switching test after link. */
-/* This indicates two different the steps. */
-/* In SWAW_STEP_PEAK, driver needs to switch antenna and listen to the
- * signal on the air. */
-/* In SWAW_STEP_DETERMINE, driver just compares the signal captured in
- * SWAW_STEP_PEAK */
-/* with original RSSI to determine if it is necessary to switch antenna. */
-#define SWAW_STEP_PEAK 0
-#define SWAW_STEP_DETERMINE 1
void ODM_Write_DIG(struct odm_dm_struct *pDM_Odm, u8 CurrentIGI);
void ODM_Write_CCK_CCA_Thres(struct odm_dm_struct *pDM_Odm, u8 CurCCK_CCAThres);
void ODM_SetAntenna(struct odm_dm_struct *pDM_Odm, u8 Antenna);
-#define dm_RF_Saving ODM_RF_Saving
void ODM_RF_Saving(struct odm_dm_struct *pDM_Odm, u8 bForceInNormal);
-#define SwAntDivRestAfterLink ODM_SwAntDivRestAfterLink
-void ODM_SwAntDivRestAfterLink(struct odm_dm_struct *pDM_Odm);
-
-#define dm_CheckTXPowerTracking ODM_TXPowerTrackingCheck
void ODM_TXPowerTrackingCheck(struct odm_dm_struct *pDM_Odm);
bool ODM_RAStateCheck(struct odm_dm_struct *pDM_Odm, s32 RSSI,
bool bForceUpdate, u8 *pRATRState);
-#define dm_SWAW_RSSI_Check ODM_SwAntDivChkPerPktRssi
-void ODM_SwAntDivChkPerPktRssi(struct odm_dm_struct *pDM_Odm, u8 StationID,
- struct odm_phy_status_info *pPhyInfo);
-
-u32 ConvertTo_dB(u32 Value);
-
-u32 GetPSDData(struct odm_dm_struct *pDM_Odm, unsigned int point,
- u8 initial_gain_psd);
-
-void odm_DIGbyRSSI_LPS(struct odm_dm_struct *pDM_Odm);
-
u32 ODM_Get_Rate_Bitmap(struct odm_dm_struct *pDM_Odm, u32 macid,
u32 ra_mask, u8 rssi_level);
@@ -1130,25 +702,6 @@ void ODM_CmnInfoInit(struct odm_dm_struct *pDM_Odm,
void ODM_CmnInfoHook(struct odm_dm_struct *pDM_Odm,
enum odm_common_info_def CmnInfo, void *pValue);
-void ODM_CmnInfoPtrArrayHook(struct odm_dm_struct *pDM_Odm,
- enum odm_common_info_def CmnInfo,
- u16 Index, void *pValue);
-
void ODM_CmnInfoUpdate(struct odm_dm_struct *pDM_Odm, u32 CmnInfo, u64 Value);
-void ODM_InitAllTimers(struct odm_dm_struct *pDM_Odm);
-
-void ODM_CancelAllTimers(struct odm_dm_struct *pDM_Odm);
-
-void ODM_ReleaseAllTimers(struct odm_dm_struct *pDM_Odm);
-
-void ODM_AntselStatistics_88C(struct odm_dm_struct *pDM_Odm, u8 MacId,
- u32 PWDBAll, bool isCCKrate);
-
-void ODM_SingleDualAntennaDefaultSetting(struct odm_dm_struct *pDM_Odm);
-
-bool ODM_SingleDualAntennaDetection(struct odm_dm_struct *pDM_Odm, u8 mode);
-
-void odm_dtc(struct odm_dm_struct *pDM_Odm);
-
#endif
diff --git a/drivers/staging/r8188eu/include/odm_HWConfig.h b/drivers/staging/r8188eu/include/odm_HWConfig.h
index 9b2ab3bcf992..3ed265e00ac1 100644
--- a/drivers/staging/r8188eu/include/odm_HWConfig.h
+++ b/drivers/staging/r8188eu/include/odm_HWConfig.h
@@ -89,21 +89,12 @@ struct phy_status_rpt {
#endif
};
-void odm_Init_RSSIForDM(struct odm_dm_struct *pDM_Odm);
-
void ODM_PhyStatusQuery(struct odm_dm_struct *pDM_Odm,
- struct odm_phy_status_info *pPhyInfo,
+ struct phy_info *pPhyInfo,
u8 *pPhyStatus,
struct odm_per_pkt_info *pPktinfo,
struct adapter *adapt);
-void ODM_MacStatusQuery(struct odm_dm_struct *pDM_Odm,
- u8 *pMacStatus,
- u8 MacID,
- bool bPacketMatchBSSID,
- bool bPacketToSelf,
- bool bPacketBeacon);
-
enum HAL_STATUS ODM_ConfigRFWithHeaderFile(struct odm_dm_struct *pDM_Odm,
enum rf_radio_path Content,
enum rf_radio_path eRFPath);
diff --git a/drivers/staging/r8188eu/include/odm_RTL8188E.h b/drivers/staging/r8188eu/include/odm_RTL8188E.h
index 00d2678532f8..96e50c9224aa 100644
--- a/drivers/staging/r8188eu/include/odm_RTL8188E.h
+++ b/drivers/staging/r8188eu/include/odm_RTL8188E.h
@@ -11,8 +11,6 @@
#define MAIN_ANT_CGCS_RX 0
#define AUX_ANT_CGCS_RX 1
-void ODM_DIG_LowerBound_88E(struct odm_dm_struct *pDM_Odm);
-
void ODM_AntennaDiversityInit_88E(struct odm_dm_struct *pDM_Odm);
void ODM_AntennaDiversity_88E(struct odm_dm_struct *pDM_Odm);
diff --git a/drivers/staging/r8188eu/include/odm_RegConfig8188E.h b/drivers/staging/r8188eu/include/odm_RegConfig8188E.h
index 86b5b2d24210..634454bffdb6 100644
--- a/drivers/staging/r8188eu/include/odm_RegConfig8188E.h
+++ b/drivers/staging/r8188eu/include/odm_RegConfig8188E.h
@@ -10,9 +10,6 @@ void odm_ConfigRFReg_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr, u32 Data,
void odm_ConfigRF_RadioA_8188E(struct odm_dm_struct *pDM_Odm,
u32 Addr, u32 Data);
-void odm_ConfigRF_RadioB_8188E(struct odm_dm_struct *pDM_Odm,
- u32 Addr, u32 Data);
-
void odm_ConfigMAC_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr, u8 Data);
void odm_ConfigBB_AGC_8188E(struct odm_dm_struct *pDM_Odm, u32 Addr,
diff --git a/drivers/staging/r8188eu/include/odm_RegDefine11AC.h b/drivers/staging/r8188eu/include/odm_RegDefine11AC.h
deleted file mode 100644
index bba7511cf244..000000000000
--- a/drivers/staging/r8188eu/include/odm_RegDefine11AC.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#ifndef __ODM_REGDEFINE11AC_H__
-#define __ODM_REGDEFINE11AC_H__
-
-/* 2 RF REG LIST */
-
-/* 2 BB REG LIST */
-/* PAGE 8 */
-/* PAGE 9 */
-#define ODM_REG_OFDM_FA_RST_11AC 0x9A4
-/* PAGE A */
-#define ODM_REG_CCK_CCA_11AC 0xA0A
-#define ODM_REG_CCK_FA_RST_11AC 0xA2C
-#define ODM_REG_CCK_FA_11AC 0xA5C
-/* PAGE C */
-#define ODM_REG_IGI_A_11AC 0xC50
-/* PAGE E */
-#define ODM_REG_IGI_B_11AC 0xE50
-/* PAGE F */
-#define ODM_REG_OFDM_FA_11AC 0xF48
-
-/* 2 MAC REG LIST */
-
-/* DIG Related */
-#define ODM_BIT_IGI_11AC 0xFFFFFFFF
-
-#endif
diff --git a/drivers/staging/r8188eu/include/odm_RegDefine11N.h b/drivers/staging/r8188eu/include/odm_RegDefine11N.h
index 5d1d73490c1c..82a602b39cc7 100644
--- a/drivers/staging/r8188eu/include/odm_RegDefine11N.h
+++ b/drivers/staging/r8188eu/include/odm_RegDefine11N.h
@@ -4,56 +4,20 @@
#ifndef __ODM_REGDEFINE11N_H__
#define __ODM_REGDEFINE11N_H__
-/* 2 RF REG LIST */
-#define ODM_REG_RF_MODE_11N 0x00
-#define ODM_REG_RF_0B_11N 0x0B
-#define ODM_REG_CHNBW_11N 0x18
-#define ODM_REG_T_METER_11N 0x24
-#define ODM_REG_RF_25_11N 0x25
-#define ODM_REG_RF_26_11N 0x26
-#define ODM_REG_RF_27_11N 0x27
-#define ODM_REG_RF_2B_11N 0x2B
-#define ODM_REG_RF_2C_11N 0x2C
-#define ODM_REG_RXRF_A3_11N 0x3C
-#define ODM_REG_T_METER_92D_11N 0x42
-#define ODM_REG_T_METER_88E_11N 0x42
-
/* 2 BB REG LIST */
/* PAGE 8 */
-#define ODM_REG_BB_CTRL_11N 0x800
-#define ODM_REG_RF_PIN_11N 0x804
-#define ODM_REG_PSD_CTRL_11N 0x808
#define ODM_REG_TX_ANT_CTRL_11N 0x80C
-#define ODM_REG_BB_PWR_SAV5_11N 0x818
-#define ODM_REG_CCK_RPT_FORMAT_11N 0x824
#define ODM_REG_RX_DEFUALT_A_11N 0x858
-#define ODM_REG_RX_DEFUALT_B_11N 0x85A
-#define ODM_REG_BB_PWR_SAV3_11N 0x85C
#define ODM_REG_ANTSEL_CTRL_11N 0x860
#define ODM_REG_RX_ANT_CTRL_11N 0x864
-#define ODM_REG_PIN_CTRL_11N 0x870
-#define ODM_REG_BB_PWR_SAV1_11N 0x874
-#define ODM_REG_ANTSEL_PATH_11N 0x878
-#define ODM_REG_BB_3WIRE_11N 0x88C
-#define ODM_REG_SC_CNT_11N 0x8C4
-#define ODM_REG_PSD_DATA_11N 0x8B4
+#define ODM_REG_PIN_CTRL_11N 0x870
+#define ODM_REG_SC_CNT_11N 0x8C4
/* PAGE 9 */
#define ODM_REG_ANT_MAPPING1_11N 0x914
-#define ODM_REG_ANT_MAPPING2_11N 0x918
/* PAGE A */
-#define ODM_REG_CCK_ANTDIV_PARA1_11N 0xA00
-#define ODM_REG_CCK_CCA_11N 0xA0A
-#define ODM_REG_CCK_ANTDIV_PARA2_11N 0xA0C
-#define ODM_REG_CCK_ANTDIV_PARA3_11N 0xA10
-#define ODM_REG_CCK_ANTDIV_PARA4_11N 0xA14
-#define ODM_REG_CCK_FILTER_PARA1_11N 0xA22
-#define ODM_REG_CCK_FILTER_PARA2_11N 0xA23
-#define ODM_REG_CCK_FILTER_PARA3_11N 0xA24
-#define ODM_REG_CCK_FILTER_PARA4_11N 0xA25
-#define ODM_REG_CCK_FILTER_PARA5_11N 0xA26
-#define ODM_REG_CCK_FILTER_PARA6_11N 0xA27
-#define ODM_REG_CCK_FILTER_PARA7_11N 0xA28
-#define ODM_REG_CCK_FILTER_PARA8_11N 0xA29
+#define ODM_REG_CCK_ANTDIV_PARA1_11N 0xA00
+#define ODM_REG_CCK_CCA_11N 0xA0A
+#define ODM_REG_CCK_ANTDIV_PARA2_11N 0xA0C
#define ODM_REG_CCK_FA_RST_11N 0xA2C
#define ODM_REG_CCK_FA_MSB_11N 0xA58
#define ODM_REG_CCK_FA_LSB_11N 0xA5C
@@ -61,28 +25,10 @@
#define ODM_REG_BB_PWR_SAV4_11N 0xA74
/* PAGE B */
#define ODM_REG_LNA_SWITCH_11N 0xB2C
-#define ODM_REG_PATH_SWITCH_11N 0xB30
-#define ODM_REG_RSSI_CTRL_11N 0xB38
-#define ODM_REG_CONFIG_ANTA_11N 0xB68
-#define ODM_REG_RSSI_BT_11N 0xB9C
/* PAGE C */
#define ODM_REG_OFDM_FA_HOLDC_11N 0xC00
-#define ODM_REG_RX_PATH_11N 0xC04
-#define ODM_REG_TRMUX_11N 0xC08
#define ODM_REG_OFDM_FA_RSTC_11N 0xC0C
-#define ODM_REG_RXIQI_MATRIX_11N 0xC14
-#define ODM_REG_TXIQK_MATRIX_LSB1_11N 0xC4C
-#define ODM_REG_IGI_A_11N 0xC50
-#define ODM_REG_ANTDIV_PARA2_11N 0xC54
-#define ODM_REG_IGI_B_11N 0xC58
-#define ODM_REG_ANTDIV_PARA3_11N 0xC5C
-#define ODM_REG_BB_PWR_SAV2_11N 0xC70
-#define ODM_REG_RX_OFF_11N 0xC7C
-#define ODM_REG_TXIQK_MATRIXA_11N 0xC80
-#define ODM_REG_TXIQK_MATRIXB_11N 0xC88
-#define ODM_REG_TXIQK_MATRIXA_LSB2_11N 0xC94
-#define ODM_REG_TXIQK_MATRIXB_LSB2_11N 0xC9C
-#define ODM_REG_RXIQK_MATRIX_LSB_11N 0xCA0
+#define ODM_REG_IGI_A_11N 0xC50
#define ODM_REG_ANTDIV_PARA1_11N 0xCA4
#define ODM_REG_OFDM_FA_TYPE1_11N 0xCF0
/* PAGE D */
@@ -90,54 +36,12 @@
#define ODM_REG_OFDM_FA_TYPE2_11N 0xDA0
#define ODM_REG_OFDM_FA_TYPE3_11N 0xDA4
#define ODM_REG_OFDM_FA_TYPE4_11N 0xDA8
-/* PAGE E */
-#define ODM_REG_TXAGC_A_6_18_11N 0xE00
-#define ODM_REG_TXAGC_A_24_54_11N 0xE04
-#define ODM_REG_TXAGC_A_1_MCS32_11N 0xE08
-#define ODM_REG_TXAGC_A_MCS0_3_11N 0xE10
-#define ODM_REG_TXAGC_A_MCS4_7_11N 0xE14
-#define ODM_REG_TXAGC_A_MCS8_11_11N 0xE18
-#define ODM_REG_TXAGC_A_MCS12_15_11N 0xE1C
-#define ODM_REG_FPGA0_IQK_11N 0xE28
-#define ODM_REG_TXIQK_TONE_A_11N 0xE30
-#define ODM_REG_RXIQK_TONE_A_11N 0xE34
-#define ODM_REG_TXIQK_PI_A_11N 0xE38
-#define ODM_REG_RXIQK_PI_A_11N 0xE3C
-#define ODM_REG_TXIQK_11N 0xE40
-#define ODM_REG_RXIQK_11N 0xE44
-#define ODM_REG_IQK_AGC_PTS_11N 0xE48
-#define ODM_REG_IQK_AGC_RSP_11N 0xE4C
-#define ODM_REG_BLUETOOTH_11N 0xE6C
-#define ODM_REG_RX_WAIT_CCA_11N 0xE70
-#define ODM_REG_TX_CCK_RFON_11N 0xE74
-#define ODM_REG_TX_CCK_BBON_11N 0xE78
-#define ODM_REG_OFDM_RFON_11N 0xE7C
-#define ODM_REG_OFDM_BBON_11N 0xE80
-#define ODM_REG_TX2RX_11N 0xE84
-#define ODM_REG_TX2TX_11N 0xE88
-#define ODM_REG_RX_CCK_11N 0xE8C
-#define ODM_REG_RX_OFDM_11N 0xED0
-#define ODM_REG_RX_WAIT_RIFS_11N 0xED4
-#define ODM_REG_RX2RX_11N 0xED8
-#define ODM_REG_STANDBY_11N 0xEDC
-#define ODM_REG_SLEEP_11N 0xEE0
-#define ODM_REG_PMPD_ANAEN_11N 0xEEC
/* 2 MAC REG LIST */
-#define ODM_REG_BB_RST_11N 0x02
#define ODM_REG_ANTSEL_PIN_11N 0x4C
-#define ODM_REG_EARLY_MODE_11N 0x4D0
-#define ODM_REG_RSSI_MONITOR_11N 0x4FE
-#define ODM_REG_EDCA_VO_11N 0x500
-#define ODM_REG_EDCA_VI_11N 0x504
-#define ODM_REG_EDCA_BE_11N 0x508
-#define ODM_REG_EDCA_BK_11N 0x50C
-#define ODM_REG_TXPAUSE_11N 0x522
-#define ODM_REG_RESP_TX_11N 0x6D8
-#define ODM_REG_ANT_TRAIN_PARA1_11N 0x7b0
-#define ODM_REG_ANT_TRAIN_PARA2_11N 0x7b4
+#define ODM_REG_RESP_TX_11N 0x6D8
/* DIG Related */
-#define ODM_BIT_IGI_11N 0x0000007F
+#define ODM_BIT_IGI_11N 0x0000007F
#endif
diff --git a/drivers/staging/r8188eu/include/odm_interface.h b/drivers/staging/r8188eu/include/odm_interface.h
index 6b589413d56c..17a315d19a50 100644
--- a/drivers/staging/r8188eu/include/odm_interface.h
+++ b/drivers/staging/r8188eu/include/odm_interface.h
@@ -4,47 +4,6 @@
#ifndef __ODM_INTERFACE_H__
#define __ODM_INTERFACE_H__
-/* */
-/* =========== Constant/Structure/Enum/... Define */
-/* */
-
-/* */
-/* =========== Macro Define */
-/* */
-
-#define _reg_all(_name) ODM_##_name
-#define _reg_ic(_name, _ic) ODM_##_name##_ic
-#define _bit_all(_name) BIT_##_name
-#define _bit_ic(_name, _ic) BIT_##_name##_ic
-
-/* _cat: implemented by Token-Pasting Operator. */
-
-/*===================================
-
-#define ODM_REG_DIG_11N 0xC50
-#define ODM_REG_DIG_11AC 0xDDD
-
-ODM_REG(DIG,_pDM_Odm)
-=====================================*/
-
-#define _reg_11N(_name) ODM_REG_##_name##_11N
-#define _reg_11AC(_name) ODM_REG_##_name##_11AC
-#define _bit_11N(_name) ODM_BIT_##_name##_11N
-#define _bit_11AC(_name) ODM_BIT_##_name##_11AC
-
-#define _cat(_name, _ic_type, _func) \
- ( \
- ((_ic_type) & ODM_IC_11N_SERIES) ? _func##_11N(_name) : \
- _func##_11AC(_name) \
- )
-
-/* _name: name of register or bit. */
-/* Example: "ODM_REG(R_A_AGC_CORE1, pDM_Odm)" */
-/* gets "ODM_R_A_AGC_CORE1" or "ODM_R_A_AGC_CORE1_8192C",
- * depends on SupportICType. */
-#define ODM_REG(_name, _pDM_Odm) _cat(_name, _pDM_Odm->SupportICType, _reg)
-#define ODM_BIT(_name, _pDM_Odm) _cat(_name, _pDM_Odm->SupportICType, _bit)
-
enum odm_h2c_cmd {
ODM_H2C_RSSI_REPORT = 0,
ODM_H2C_PSD_RESULT= 1,
@@ -62,8 +21,6 @@ typedef void (*RT_WORKITEM_CALL_BACK)(void *pContext);
u8 ODM_Read1Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr);
-u16 ODM_Read2Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr);
-
u32 ODM_Read4Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr);
void ODM_Write1Byte(struct odm_dm_struct *pDM_Odm, u32 RegAddr, u8 Data);
@@ -89,59 +46,14 @@ u32 ODM_GetRFReg(struct odm_dm_struct *pDM_Odm, enum rf_radio_path eRFPath,
u32 RegAddr, u32 BitMask);
/* Memory Relative Function. */
-void ODM_AllocateMemory(struct odm_dm_struct *pDM_Odm, void **pPtr, u32 length);
-void ODM_FreeMemory(struct odm_dm_struct *pDM_Odm, void *pPtr, u32 length);
-
s32 ODM_CompareMemory(struct odm_dm_struct *pDM_Odm, void *pBuf1, void *pBuf2,
u32 length);
-/* ODM MISC-spin lock relative API. */
-void ODM_AcquireSpinLock(struct odm_dm_struct *pDM_Odm,
- enum RT_SPINLOCK_TYPE type);
-
-void ODM_ReleaseSpinLock(struct odm_dm_struct *pDM_Odm,
- enum RT_SPINLOCK_TYPE type);
-
-/* ODM MISC-workitem relative API. */
-void ODM_InitializeWorkItem(struct odm_dm_struct *pDM_Odm, void *pRtWorkItem,
- RT_WORKITEM_CALL_BACK RtWorkItemCallback,
- void *pContext, const char *szID);
-
-void ODM_StartWorkItem(void *pRtWorkItem);
-
-void ODM_StopWorkItem(void *pRtWorkItem);
-
-void ODM_FreeWorkItem(void *pRtWorkItem);
-
-void ODM_ScheduleWorkItem(void *pRtWorkItem);
-
-void ODM_IsWorkItemScheduled(void *pRtWorkItem);
-
/* ODM Timer relative API. */
-void ODM_StallExecution(u32 usDelay);
-
void ODM_delay_ms(u32 ms);
void ODM_delay_us(u32 us);
void ODM_sleep_ms(u32 ms);
-void ODM_sleep_us(u32 us);
-
-void ODM_SetTimer(struct odm_dm_struct *pDM_Odm, struct timer_list *pTimer,
- u32 msDelay);
-
-void ODM_InitializeTimer(struct odm_dm_struct *pDM_Odm,
- struct timer_list *pTimer, void *CallBackFunc,
- void *pContext, const char *szID);
-
-void ODM_CancelTimer(struct odm_dm_struct *pDM_Odm, struct timer_list *pTimer);
-
-void ODM_ReleaseTimer(struct odm_dm_struct *pDM_Odm, struct timer_list *pTimer);
-
-/* ODM FW relative API. */
-u32 ODM_FillH2CCmd(u8 *pH2CBuffer, u32 H2CBufferLen, u32 CmdNum,
- u32 *pElementID, u32 *pCmdLen, u8 **pCmbBuffer,
- u8 *CmdStartSeq);
-
#endif /* __ODM_INTERFACE_H__ */
diff --git a/drivers/staging/r8188eu/include/odm_precomp.h b/drivers/staging/r8188eu/include/odm_precomp.h
index a1d6d674bda6..22299f167af8 100644
--- a/drivers/staging/r8188eu/include/odm_precomp.h
+++ b/drivers/staging/r8188eu/include/odm_precomp.h
@@ -18,7 +18,6 @@
#include "odm.h"
#include "odm_HWConfig.h"
-#include "odm_RegDefine11AC.h"
#include "odm_RegDefine11N.h"
#include "HalPhyRf_8188e.h"/* for IQK,LCK,Power-tracking */
@@ -26,7 +25,6 @@
#include "rtl8188e_hal.h"
#include "odm_interface.h"
-#include "odm_reg.h"
#include "HalHWImg8188E_MAC.h"
#include "HalHWImg8188E_RF.h"
@@ -38,38 +36,18 @@
void odm_DIGInit(struct odm_dm_struct *pDM_Odm);
void odm_RateAdaptiveMaskInit(struct odm_dm_struct *pDM_Odm);
void odm_DynamicBBPowerSavingInit(struct odm_dm_struct *pDM_Odm);
-void odm_DynamicTxPowerInit(struct odm_dm_struct *pDM_Odm);
void odm_TXPowerTrackingInit(struct odm_dm_struct *pDM_Odm);
void ODM_EdcaTurboInit(struct odm_dm_struct *pDM_Odm);
void odm_SwAntDivInit_NIC(struct odm_dm_struct *pDM_Odm);
-void odm_GlobalAdapterCheck(void);
void odm_CommonInfoSelfUpdate(struct odm_dm_struct *pDM_Odm);
void odm_FalseAlarmCounterStatistics(struct odm_dm_struct *pDM_Odm);
void odm_DIG(struct odm_dm_struct *pDM_Odm);
void odm_CCKPacketDetectionThresh(struct odm_dm_struct *pDM_Odm);
-void odm_RefreshRateAdaptiveMaskMP(struct odm_dm_struct *pDM_Odm);
-void odm_DynamicBBPowerSaving(struct odm_dm_struct *pDM_Odm);
-void odm_SwAntDivChkAntSwitch(struct odm_dm_struct *pDM_Odm, u8 Step);
void odm_EdcaTurboCheck(struct odm_dm_struct *pDM_Odm);
-void odm_DynamicTxPower(struct odm_dm_struct *pDM_Odm);
void odm_CommonInfoSelfInit(struct odm_dm_struct *pDM_Odm);
-void odm_SwAntDivInit(struct odm_dm_struct *pDM_Odm);
void odm_RSSIMonitorCheck(struct odm_dm_struct *pDM_Odm);
void odm_RefreshRateAdaptiveMask(struct odm_dm_struct *pDM_Odm);
-void odm_1R_CCA(struct odm_dm_struct *pDM_Odm);
-void odm_RefreshRateAdaptiveMaskCE(struct odm_dm_struct *pDM_Odm);
-void odm_RefreshRateAdaptiveMaskAPADSL(struct odm_dm_struct *pDM_Odm);
-void odm_DynamicTxPowerNIC(struct odm_dm_struct *pDM_Odm);
-void odm_DynamicTxPowerAP(struct odm_dm_struct *pDM_Odm);
-void odm_RSSIMonitorCheckMP(struct odm_dm_struct *pDM_Odm);
-void odm_RSSIMonitorCheckCE(struct odm_dm_struct *pDM_Odm);
-void odm_RSSIMonitorCheckAP(struct odm_dm_struct *pDM_Odm);
void odm_TXPowerTrackingThermalMeterInit(struct odm_dm_struct *pDM_Odm);
-void odm_EdcaTurboCheckCE(struct odm_dm_struct *pDM_Odm);
-void odm_TXPowerTrackingCheckCE(struct odm_dm_struct *pDM_Odm);
-void odm_TXPowerTrackingCheckMP(struct odm_dm_struct *pDM_Odm);
-void odm_TXPowerTrackingCheckAP(struct odm_dm_struct *pDM_Odm);
-void odm_SwAntDivChkAntSwitchCallback(struct timer_list *t);
void odm_InitHybridAntDiv(struct odm_dm_struct *pDM_Odm);
void odm_HwAntDiv(struct odm_dm_struct *pDM_Odm);
diff --git a/drivers/staging/r8188eu/include/odm_reg.h b/drivers/staging/r8188eu/include/odm_reg.h
deleted file mode 100644
index 78d7e904947c..000000000000
--- a/drivers/staging/r8188eu/include/odm_reg.h
+++ /dev/null
@@ -1,89 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. i*/
-
-#ifndef __HAL_ODM_REG_H__
-#define __HAL_ODM_REG_H__
-
-/* Register Definition */
-
-/* MAC REG */
-#define ODM_BB_RESET 0x002
-#define ODM_DUMMY 0x4fe
-#define ODM_EDCA_VO_PARAM 0x500
-#define ODM_EDCA_VI_PARAM 0x504
-#define ODM_EDCA_BE_PARAM 0x508
-#define ODM_EDCA_BK_PARAM 0x50C
-#define ODM_TXPAUSE 0x522
-
-/* BB REG */
-#define ODM_FPGA_PHY0_PAGE8 0x800
-#define ODM_PSD_SETTING 0x808
-#define ODM_AFE_SETTING 0x818
-#define ODM_TXAGC_B_6_18 0x830
-#define ODM_TXAGC_B_24_54 0x834
-#define ODM_TXAGC_B_MCS32_5 0x838
-#define ODM_TXAGC_B_MCS0_MCS3 0x83c
-#define ODM_TXAGC_B_MCS4_MCS7 0x848
-#define ODM_TXAGC_B_MCS8_MCS11 0x84c
-#define ODM_ANALOG_REGISTER 0x85c
-#define ODM_RF_INTERFACE_OUTPUT 0x860
-#define ODM_TXAGC_B_MCS12_MCS15 0x868
-#define ODM_TXAGC_B_11_A_2_11 0x86c
-#define ODM_AD_DA_LSB_MASK 0x874
-#define ODM_ENABLE_3_WIRE 0x88c
-#define ODM_PSD_REPORT 0x8b4
-#define ODM_R_ANT_SELECT 0x90c
-#define ODM_CCK_ANT_SELECT 0xa07
-#define ODM_CCK_PD_THRESH 0xa0a
-#define ODM_CCK_RF_REG1 0xa11
-#define ODM_CCK_MATCH_FILTER 0xa20
-#define ODM_CCK_RAKE_MAC 0xa2e
-#define ODM_CCK_CNT_RESET 0xa2d
-#define ODM_CCK_TX_DIVERSITY 0xa2f
-#define ODM_CCK_FA_CNT_MSB 0xa5b
-#define ODM_CCK_FA_CNT_LSB 0xa5c
-#define ODM_CCK_NEW_FUNCTION 0xa75
-#define ODM_OFDM_PHY0_PAGE_C 0xc00
-#define ODM_OFDM_RX_ANT 0xc04
-#define ODM_R_A_RXIQI 0xc14
-#define ODM_R_A_AGC_CORE1 0xc50
-#define ODM_R_A_AGC_CORE2 0xc54
-#define ODM_R_B_AGC_CORE1 0xc58
-#define ODM_R_AGC_PAR 0xc70
-#define ODM_R_HTSTF_AGC_PAR 0xc7c
-#define ODM_TX_PWR_TRAINING_A 0xc90
-#define ODM_TX_PWR_TRAINING_B 0xc98
-#define ODM_OFDM_FA_CNT1 0xcf0
-#define ODM_OFDM_PHY0_PAGE_D 0xd00
-#define ODM_OFDM_FA_CNT2 0xda0
-#define ODM_OFDM_FA_CNT3 0xda4
-#define ODM_OFDM_FA_CNT4 0xda8
-#define ODM_TXAGC_A_6_18 0xe00
-#define ODM_TXAGC_A_24_54 0xe04
-#define ODM_TXAGC_A_1_MCS32 0xe08
-#define ODM_TXAGC_A_MCS0_MCS3 0xe10
-#define ODM_TXAGC_A_MCS4_MCS7 0xe14
-#define ODM_TXAGC_A_MCS8_MCS11 0xe18
-#define ODM_TXAGC_A_MCS12_MCS15 0xe1c
-
-/* RF REG */
-#define ODM_GAIN_SETTING 0x00
-#define ODM_CHANNEL 0x18
-
-/* Ant Detect Reg */
-#define ODM_DPDT 0x300
-
-/* PSD Init */
-#define ODM_PSDREG 0x808
-
-/* 92D Path Div */
-#define PATHDIV_REG 0xB30
-#define PATHDIV_TRI 0xBA0
-
-/* */
-/* Bitmap Definition */
-/* */
-
-#define BIT_FA_RESET BIT(0)
-
-#endif
diff --git a/drivers/staging/r8188eu/include/odm_types.h b/drivers/staging/r8188eu/include/odm_types.h
index 6f4a4bd37ec1..08ba7a418ba8 100644
--- a/drivers/staging/r8188eu/include/odm_types.h
+++ b/drivers/staging/r8188eu/include/odm_types.h
@@ -4,31 +4,13 @@
#ifndef __ODM_TYPES_H__
#define __ODM_TYPES_H__
-/* */
-/* Define Different SW team support */
-/* */
-#define ODM_AP 0x01 /* BIT(0) */
-#define ODM_ADSL 0x02 /* BIT(1) */
#define ODM_CE 0x04 /* BIT(2) */
-#define ODM_MP 0x08 /* BIT(3) */
-
-#define RT_PCI_INTERFACE 1
-#define RT_USB_INTERFACE 2
-#define RT_SDIO_INTERFACE 3
enum HAL_STATUS {
HAL_STATUS_SUCCESS,
HAL_STATUS_FAILURE,
};
-enum RT_SPINLOCK_TYPE {
- RT_TEMP = 1,
-};
-
-#include "basic_types.h"
-
-#define DEV_BUS_TYPE RT_USB_INTERFACE
-
#define SET_TX_DESC_ANTSEL_A_88E(__ptxdesc, __value) \
le32p_replace_bits((__le32 *)(__ptxdesc + 8), __value, BIT(24))
#define SET_TX_DESC_ANTSEL_B_88E(__ptxdesc, __value) \
@@ -36,10 +18,4 @@ enum RT_SPINLOCK_TYPE {
#define SET_TX_DESC_ANTSEL_C_88E(__ptxdesc, __value) \
le32p_replace_bits((__le32 *)(__ptxdesc + 28), __value, BIT(29))
-/* define useless flag to avoid compile warning */
-#define USE_WORKITEM 0
-#define FOR_BRAZIL_PRETEST 0
-#define BT_30_SUPPORT 0
-#define FPGA_TWO_MAC_VERIFICATION 0
-
#endif /* __ODM_TYPES_H__ */
diff --git a/drivers/staging/r8188eu/include/osdep_intf.h b/drivers/staging/r8188eu/include/osdep_intf.h
index 3ea60feee2db..0d7009269aab 100644
--- a/drivers/staging/r8188eu/include/osdep_intf.h
+++ b/drivers/staging/r8188eu/include/osdep_intf.h
@@ -34,7 +34,6 @@ The protection mechanism is through the pending queue.
struct urb *piorw_urb;
u8 io_irp_cnt;
u8 bio_irp_pending;
- struct semaphore io_retevt;
struct timer_list io_timer;
u8 bio_irp_timeout;
u8 bio_timer_cancel;
@@ -51,14 +50,10 @@ void rtw_cancel_all_timer(struct adapter *padapter);
int rtw_init_netdev_name(struct net_device *pnetdev, const char *ifname);
struct net_device *rtw_init_netdev(struct adapter *padapter);
u16 rtw_recv_select_queue(struct sk_buff *skb);
-void rtw_proc_init_one(struct net_device *dev);
-void rtw_proc_remove_one(struct net_device *dev);
void rtw_ips_dev_unload(struct adapter *padapter);
int rtw_ips_pwr_up(struct adapter *padapter);
void rtw_ips_pwr_down(struct adapter *padapter);
-int rtw_hw_suspend(struct adapter *padapter);
-int rtw_hw_resume(struct adapter *padapter);
#endif /* _OSDEP_INTF_H_ */
diff --git a/drivers/staging/r8188eu/include/osdep_service.h b/drivers/staging/r8188eu/include/osdep_service.h
index 029aa4e92c9b..efab3a97eb46 100644
--- a/drivers/staging/r8188eu/include/osdep_service.h
+++ b/drivers/staging/r8188eu/include/osdep_service.h
@@ -56,19 +56,6 @@ static inline struct list_head *get_list_head(struct __queue *queue)
return (&(queue->queue));
}
-static inline int _enter_critical_mutex(struct mutex *pmutex, unsigned long *pirqL)
-{
- int ret;
-
- ret = mutex_lock_interruptible(pmutex);
- return ret;
-}
-
-static inline void _exit_critical_mutex(struct mutex *pmutex, unsigned long *pirqL)
-{
- mutex_unlock(pmutex);
-}
-
static inline void rtw_list_delete(struct list_head *plist)
{
list_del_init(plist);
@@ -154,11 +141,11 @@ extern unsigned char RSN_TKIP_CIPHER[4];
void *rtw_malloc2d(int h, int w, int size);
-u32 _rtw_down_sema(struct semaphore *sema);
-void _rtw_mutex_init(struct mutex *pmutex);
-void _rtw_mutex_free(struct mutex *pmutex);
-
-void _rtw_init_queue(struct __queue *pqueue);
+#define rtw_init_queue(q) \
+ do { \
+ INIT_LIST_HEAD(&((q)->queue)); \
+ spin_lock_init(&((q)->lock)); \
+ } while (0)
u32 rtw_systime_to_ms(u32 systime);
u32 rtw_ms_to_systime(u32 ms);
@@ -166,32 +153,17 @@ s32 rtw_get_passing_time_ms(u32 start);
void rtw_usleep_os(int us);
-u32 rtw_atoi(u8 *s);
-
static inline unsigned char _cancel_timer_ex(struct timer_list *ptimer)
{
return del_timer_sync(ptimer);
}
-static __inline void thread_enter(char *name)
-{
-#ifdef daemonize
- daemonize("%s", name);
-#endif
- allow_signal(SIGTERM);
-}
-
static inline void flush_signals_thread(void)
{
if (signal_pending (current))
flush_signals(current);
}
-static inline int res_to_status(int res)
-{
- return res;
-}
-
#define _RND(sz, r) ((((sz)+((r)-1))/(r))*(r))
#define RND4(x) (((x >> 2) + (((x & 3) == 0) ? 0: 1)) << 2)
@@ -302,12 +274,10 @@ struct rtw_cbuf {
u32 write;
u32 read;
u32 size;
- void *bufs[0];
+ void *bufs[];
};
-bool rtw_cbuf_full(struct rtw_cbuf *cbuf);
bool rtw_cbuf_empty(struct rtw_cbuf *cbuf);
-bool rtw_cbuf_push(struct rtw_cbuf *cbuf, void *buf);
void *rtw_cbuf_pop(struct rtw_cbuf *cbuf);
struct rtw_cbuf *rtw_cbuf_alloc(u32 size);
int wifirate2_ratetbl_inx(unsigned char rate);
diff --git a/drivers/staging/r8188eu/include/recv_osdep.h b/drivers/staging/r8188eu/include/recv_osdep.h
index 72ddf515071e..e87467022b39 100644
--- a/drivers/staging/r8188eu/include/recv_osdep.h
+++ b/drivers/staging/r8188eu/include/recv_osdep.h
@@ -27,8 +27,6 @@ void rtw_os_recv_resource_free(struct recv_priv *precvpriv);
int rtw_os_recvbuf_resource_alloc(struct adapter *adapt, struct recv_buf *buf);
int rtw_os_recvbuf_resource_free(struct adapter *adapt, struct recv_buf *buf);
-void rtw_os_read_port(struct adapter *padapter, struct recv_buf *precvbuf);
-
void rtw_init_recv_timer(struct recv_reorder_ctrl *preorder_ctrl);
int _netdev_open(struct net_device *pnetdev);
int netdev_open(struct net_device *pnetdev);
diff --git a/drivers/staging/r8188eu/include/rtl8188e_cmd.h b/drivers/staging/r8188eu/include/rtl8188e_cmd.h
index 6fbf9a47430b..1e01c1662f9a 100644
--- a/drivers/staging/r8188eu/include/rtl8188e_cmd.h
+++ b/drivers/staging/r8188eu/include/rtl8188e_cmd.h
@@ -27,15 +27,6 @@ enum RTL8188E_H2C_CMD_ID {
/* Class DM */
H2C_DM_MACID_CFG = 0x40,
H2C_DM_TXBF = 0x41,
-
- /* Class BT */
- H2C_BT_COEX_MASK = 0x60,
- H2C_BT_COEX_GPIO_MODE = 0x61,
- H2C_BT_DAC_SWING_VAL = 0x62,
- H2C_BT_PSD_RST = 0x63,
-
- /* Class */
- H2C_RESET_TSF = 0xc0,
};
struct cmd_msg_parm {
@@ -44,10 +35,6 @@ struct cmd_msg_parm {
u8 buf[6];
};
-enum {
- PWRS
-};
-
struct setpwrmode_parm {
u8 Mode;/* 0:Active,1:LPS,2:WMMPS */
u8 SmartPS_RLBM;/* LPS= 0:PS_Poll,1:PS_Poll,2:NullData,WMM= 0:PS_Poll,1:NullData */
@@ -91,14 +78,11 @@ struct P2P_PS_CTWPeriod_t {
/* host message to firmware cmd */
void rtl8188e_set_FwPwrMode_cmd(struct adapter *padapter, u8 Mode);
void rtl8188e_set_FwJoinBssReport_cmd(struct adapter *padapter, u8 mstatus);
-u8 rtl8188e_set_rssi_cmd(struct adapter *padapter, u8 *param);
u8 rtl8188e_set_raid_cmd(struct adapter *padapter, u32 mask);
void rtl8188e_Add_RateATid(struct adapter *padapter, u32 bitmap, u8 arg,
u8 rssi_level);
-#ifdef CONFIG_88EU_P2P
void rtl8188e_set_p2p_ps_offload_cmd(struct adapter *adapt, u8 p2p_ps_state);
-#endif /* CONFIG_88EU_P2P */
void CheckFwRsvdPageContent(struct adapter *adapt);
void rtl8188e_set_FwMediaStatus_cmd(struct adapter *adapt, __le16 mstatus_rpt);
diff --git a/drivers/staging/r8188eu/include/rtl8188e_dm.h b/drivers/staging/r8188eu/include/rtl8188e_dm.h
index 3ead20b321a9..208bea050f6f 100644
--- a/drivers/staging/r8188eu/include/rtl8188e_dm.h
+++ b/drivers/staging/r8188eu/include/rtl8188e_dm.h
@@ -15,28 +15,15 @@ enum{
#define HP_THERMAL_NUM 8
/* duplicate code,will move to ODM ######### */
struct dm_priv {
- u8 DM_Type;
- u8 DMFlag;
- u8 InitDMFlag;
u32 InitODMFlag;
/* Upper and Lower Signal threshold for Rate Adaptive*/
- int UndecoratedSmoothedPWDB;
- int UndecoratedSmoothedCCK;
int EntryMinUndecoratedSmoothedPWDB;
int EntryMaxUndecoratedSmoothedPWDB;
int MinUndecoratedPWDBForDM;
- int LastMinUndecoratedPWDBForDM;
-
- /* for High Power */
- u8 bDynamicTxPowerEnable;
- u8 LastDTPLvl;
- u8 DynamicTxHighPowerLvl;/* Tx Power Control for Near/Far Range */
- u8 PowerIndex_backup[6];
};
void rtl8188e_init_dm_priv(struct adapter *adapt);
-void rtl8188e_deinit_dm_priv(struct adapter *adapt);
void rtl8188e_InitHalDm(struct adapter *adapt);
void rtl8188e_HalDmWatchDog(struct adapter *adapt);
diff --git a/drivers/staging/r8188eu/include/rtl8188e_hal.h b/drivers/staging/r8188eu/include/rtl8188e_hal.h
index 3939bf053de1..d7db1dfc39d0 100644
--- a/drivers/staging/r8188eu/include/rtl8188e_hal.h
+++ b/drivers/staging/r8188eu/include/rtl8188e_hal.h
@@ -19,29 +19,10 @@
#include "odm_precomp.h"
-/* Fw Array */
-#define Rtl8188E_FwImageArray Rtl8188EFwImgArray
-#define Rtl8188E_FWImgArrayLength Rtl8188EFWImgArrayLength
-
-#define RTL8188E_FW_UMC_IMG "rtl8188E\\rtl8188efw.bin"
-#define RTL8188E_PHY_REG "rtl8188E\\PHY_REG_1T.txt"
-#define RTL8188E_PHY_RADIO_A "rtl8188E\\radio_a_1T.txt"
-#define RTL8188E_PHY_RADIO_B "rtl8188E\\radio_b_1T.txt"
-#define RTL8188E_AGC_TAB "rtl8188E\\AGC_TAB_1T.txt"
-#define RTL8188E_PHY_MACREG "rtl8188E\\MAC_REG.txt"
-#define RTL8188E_PHY_REG_PG "rtl8188E\\PHY_REG_PG.txt"
-#define RTL8188E_PHY_REG_MP "rtl8188E\\PHY_REG_MP.txt"
-
/* RTL8188E Power Configuration CMDs for USB/SDIO interfaces */
#define Rtl8188E_NIC_PWR_ON_FLOW rtl8188E_power_on_flow
-#define Rtl8188E_NIC_RF_OFF_FLOW rtl8188E_radio_off_flow
#define Rtl8188E_NIC_DISABLE_FLOW rtl8188E_card_disable_flow
-#define Rtl8188E_NIC_ENABLE_FLOW rtl8188E_card_enable_flow
-#define Rtl8188E_NIC_SUSPEND_FLOW rtl8188E_suspend_flow
-#define Rtl8188E_NIC_RESUME_FLOW rtl8188E_resume_flow
-#define Rtl8188E_NIC_PDN_FLOW rtl8188E_hwpdn_flow
#define Rtl8188E_NIC_LPS_ENTER_FLOW rtl8188E_enter_lps_flow
-#define Rtl8188E_NIC_LPS_LEAVE_FLOW rtl8188E_leave_lps_flow
#define DRVINFO_SZ 4 /* unit is 8bytes */
#define PageNum_128(_Len) (u32)(((_Len)>>7) + ((_Len) & 0x7F ? 1 : 0))
@@ -49,7 +30,6 @@
/* download firmware related data structure */
#define FW_8188E_SIZE 0x4000 /* 16384,16k */
#define FW_8188E_START_ADDRESS 0x1000
-#define FW_8188E_END_ADDRESS 0x1FFF /* 0x5FFF */
#define MAX_PAGE_SIZE 4096 /* @ page : 4k bytes */
@@ -108,11 +88,6 @@ enum usb_rx_agg_mode {
0x2400 /* 9k for 88E nornal chip , MaxRxBuff=10k-max(TxReportSize(64*8),
* WOLPattern(16*24)) */
-#define MAX_TX_REPORT_BUFFER_SIZE 0x0400 /* 1k */
-
-/* BK, BE, VI, VO, HCCA, MANAGEMENT, COMMAND, HIGH, BEACON. */
-#define MAX_TX_QUEUE 9
-
#define TX_SELE_HQ BIT(0) /* High Queue */
#define TX_SELE_LQ BIT(1) /* Low Queue */
#define TX_SELE_NQ BIT(2) /* Normal Queue */
@@ -134,11 +109,6 @@ enum usb_rx_agg_mode {
#define WMM_NORMAL_TX_PAGE_BOUNDARY_88E \
(WMM_NORMAL_TX_TOTAL_PAGE_NUMBER + 1) /* 0xA9 */
-/* Chip specific */
-#define CHIP_BONDING_IDENTIFIER(_value) (((_value)>>22)&0x3)
-#define CHIP_BONDING_92C_1T2R 0x1
-#define CHIP_BONDING_88C_USB_MCARD 0x2
-#define CHIP_BONDING_88C_USB_HP 0x1
#include "HalVerDef.h"
#include "hal_com.h"
@@ -168,29 +138,11 @@ struct txpowerinfo24g {
};
#define EFUSE_REAL_CONTENT_LEN 512
-#define EFUSE_MAX_SECTION 16
-#define EFUSE_IC_ID_OFFSET 506 /* For some inferior IC purpose*/
#define AVAILABLE_EFUSE_ADDR(addr) (addr < EFUSE_REAL_CONTENT_LEN)
-/* To prevent out of boundary programming case, */
-/* leave 1byte and program full section */
-/* 9bytes + 1byt + 5bytes and pre 1byte. */
-/* For worst case: */
-/* | 1byte|----8bytes----|1byte|--5bytes--| */
-/* | | Reserved(14bytes) | */
-
-/* PG data exclude header, dummy 6 bytes frome CP test and reserved 1byte. */
-#define EFUSE_OOB_PROTECT_BYTES 15
-
-#define HWSET_MAX_SIZE_88E 512
#define EFUSE_REAL_CONTENT_LEN_88E 256
#define EFUSE_MAP_LEN_88E 512
-#define EFUSE_MAP_LEN EFUSE_MAP_LEN_88E
#define EFUSE_MAX_SECTION_88E 64
-#define EFUSE_MAX_WORD_UNIT_88E 4
-#define EFUSE_IC_ID_OFFSET_88E 506
-#define AVAILABLE_EFUSE_ADDR_88E(addr) \
- (addr < EFUSE_REAL_CONTENT_LEN_88E)
/* To prevent out of boundary programming case, leave 1byte and program
* full section */
/* 9bytes + 1byt + 5bytes and pre 1byte. */
@@ -198,35 +150,11 @@ struct txpowerinfo24g {
/* | 2byte|----8bytes----|1byte|--7bytes--| 92D */
/* PG data exclude header, dummy 7 bytes frome CP test and reserved 1byte. */
#define EFUSE_OOB_PROTECT_BYTES_88E 18
-#define EFUSE_PROTECT_BYTES_BANK_88E 16
-
-/* EFUSE for BT definition */
-#define EFUSE_BT_REAL_CONTENT_LEN 1536 /* 512*3 */
-#define EFUSE_BT_MAP_LEN 1024 /* 1k bytes */
-#define EFUSE_BT_MAX_SECTION 128 /* 1024/8 */
#define EFUSE_PROTECT_BYTES_BANK 16
-/* For RTL8723 WiFi/BT/GPS multi-function configuration. */
-enum rt_multi_func {
- RT_MULTI_FUNC_NONE = 0x00,
- RT_MULTI_FUNC_WIFI = 0x01,
- RT_MULTI_FUNC_BT = 0x02,
- RT_MULTI_FUNC_GPS = 0x04,
-};
-
-/* For RTL8723 regulator mode. */
-enum rt_regulator_mode {
- RT_SWITCHING_REGULATOR = 0,
- RT_LDO_REGULATOR = 1,
-};
-
struct hal_data_8188e {
struct HAL_VERSION VersionID;
- enum rt_multi_func MultiFunc; /* For multi-function consideration. */
- enum rt_regulator_mode RegulatorMode; /* switching regulator or LDO */
- u16 CustomerID;
-
u16 FirmwareVersion;
u16 FirmwareVersionRev;
u16 FirmwareSubVersion;
@@ -234,7 +162,6 @@ struct hal_data_8188e {
u8 PGMaxGroup;
/* current WIFI_PHY values */
u32 ReceiveConfig;
- enum wireless_mode CurrentWirelessMode;
enum ht_channel_width CurrentChannelBW;
u8 CurrentChannel;
u8 nCur40MhzPrimeSC;/* Control channel sub-carrier */
@@ -244,7 +171,6 @@ struct hal_data_8188e {
/* rf_ctrl */
u8 rf_chip;
u8 rf_type;
- u8 NumTotalRFPath;
u8 BoardType;
@@ -305,7 +231,6 @@ struct hal_data_8188e {
u8 CrystalCap;
u32 AntennaTxPath; /* Antenna path Tx */
u32 AntennaRxPath; /* Antenna path Rx */
- u8 BluetoothCoexist;
u8 ExternalPA;
u8 bLedOpenDrain; /* Open-drain support for controlling the LED.*/
@@ -325,16 +250,12 @@ struct hal_data_8188e {
u8 LastHMEBoxNum;
u8 fw_ractrl;
- u8 RegTxPause;
- /* Beacon function related global variable. */
- u32 RegBcnCtrlVal;
u8 RegFwHwTxQCtrl;
u8 RegReg542;
u8 RegCR_1;
struct dm_priv dmpriv;
struct odm_dm_struct odmpriv;
- struct sreset_priv srestpriv;
u8 CurAntenna;
u8 AntDivCfg;
@@ -364,9 +285,7 @@ struct hal_data_8188e {
u16 EfuseUsedBytes;
-#ifdef CONFIG_88EU_P2P
struct P2P_PS_Offload_t p2p_ps_offload;
-#endif
/* Auto FSM to Turn On, include clock, isolation, power control
* for MAC only */
@@ -376,11 +295,9 @@ struct hal_data_8188e {
/* Interrupt relatd register information. */
u32 IntArray[3];/* HISR0,HISR1,HSISR */
- u32 IntrMask[3];
u8 C2hArray[16];
u8 UsbTxAggMode;
u8 UsbTxAggDescNum;
- u16 HwRxPageSize; /* Hardware setting */
u32 MaxUsbRxAggBlock;
enum usb_rx_agg_mode UsbRxAggMode;
@@ -394,12 +311,6 @@ struct hal_data_8188e {
#define GET_HAL_DATA(__pAdapter) \
((struct hal_data_8188e *)((__pAdapter)->HalData))
-#define GET_RF_TYPE(priv) (GET_HAL_DATA(priv)->rf_type)
-
-#define INCLUDE_MULTI_FUNC_BT(_Adapter) \
- (GET_HAL_DATA(_Adapter)->MultiFunc & RT_MULTI_FUNC_BT)
-#define INCLUDE_MULTI_FUNC_GPS(_Adapter) \
- (GET_HAL_DATA(_Adapter)->MultiFunc & RT_MULTI_FUNC_GPS)
/* rtl8188e_hal_init.c */
s32 rtl8188e_FirmwareDownload(struct adapter *padapter);
@@ -432,19 +343,8 @@ void Hal_EfuseParseBoardType88E(struct adapter *pAdapter, u8 *hwinfo,
void Hal_ReadPowerSavingMode88E(struct adapter *pAdapter, u8 *hwinfo,
bool AutoLoadFail);
-bool HalDetectPwrDownMode88E(struct adapter *Adapter);
-
-void Hal_InitChannelPlan(struct adapter *padapter);
-void rtl8188e_set_hal_ops(struct hal_ops *pHalFunc);
-
-/* register */
-void SetBcnCtrlReg(struct adapter *padapter, u8 SetBits, u8 ClearBits);
-
-void rtl8188e_clone_haldata(struct adapter *dst, struct adapter *src);
-void rtl8188e_start_thread(struct adapter *padapter);
-void rtl8188e_stop_thread(struct adapter *padapter);
+void rtl8188e_read_chip_version(struct adapter *padapter);
-void rtw_IOL_cmd_tx_pkt_buf_dump(struct adapter *Adapter, int len);
s32 rtl8188e_iol_efuse_patch(struct adapter *padapter);
void rtw_cancel_all_timer(struct adapter *padapter);
void _ps_open_RF(struct adapter *adapt);
diff --git a/drivers/staging/r8188eu/include/rtl8188e_led.h b/drivers/staging/r8188eu/include/rtl8188e_led.h
index b00954198764..02cdc970bb17 100644
--- a/drivers/staging/r8188eu/include/rtl8188e_led.h
+++ b/drivers/staging/r8188eu/include/rtl8188e_led.h
@@ -12,7 +12,5 @@
/* */
void rtl8188eu_InitSwLeds(struct adapter *padapter);
void rtl8188eu_DeInitSwLeds(struct adapter *padapter);
-void SwLedOn(struct adapter *padapter, struct LED_871x *pLed);
-void SwLedOff(struct adapter *padapter, struct LED_871x *pLed);
#endif
diff --git a/drivers/staging/r8188eu/include/rtl8188e_recv.h b/drivers/staging/r8188eu/include/rtl8188e_recv.h
index a91daf84d6c3..2ab395ef579b 100644
--- a/drivers/staging/r8188eu/include/rtl8188e_recv.h
+++ b/drivers/staging/r8188eu/include/rtl8188e_recv.h
@@ -40,7 +40,7 @@ enum rx_packet_type {
};
#define INTERRUPT_MSG_FORMAT_LEN 60
-void rtl8188eu_init_recvbuf(struct adapter *padapter, struct recv_buf *buf);
+void rtl8188eu_init_recvbuf(struct recv_buf *buf);
s32 rtl8188eu_init_recv_priv(struct adapter *padapter);
void rtl8188eu_free_recv_priv(struct adapter * padapter);
void rtl8188eu_recv_hdl(struct adapter * padapter, struct recv_buf *precvbuf);
diff --git a/drivers/staging/r8188eu/include/rtl8188e_rf.h b/drivers/staging/r8188eu/include/rtl8188e_rf.h
index da6b7f8212a3..04556496baad 100644
--- a/drivers/staging/r8188eu/include/rtl8188e_rf.h
+++ b/drivers/staging/r8188eu/include/rtl8188e_rf.h
@@ -9,7 +9,6 @@
#define RF6052_MAX_PATH 2
int PHY_RF6052_Config8188E(struct adapter *Adapter);
-void rtl8188e_RF_ChangeTxPath(struct adapter *Adapter, u16 DataRate);
void rtl8188e_PHY_RF6052SetBandwidth(struct adapter *Adapter,
enum ht_channel_width Bandwidth);
void rtl8188e_PHY_RF6052SetCckTxPower(struct adapter *Adapter, u8 *level);
diff --git a/drivers/staging/r8188eu/include/rtl8188e_spec.h b/drivers/staging/r8188eu/include/rtl8188e_spec.h
index 1c96f7b81245..01aeaa4ac605 100644
--- a/drivers/staging/r8188eu/include/rtl8188e_spec.h
+++ b/drivers/staging/r8188eu/include/rtl8188e_spec.h
@@ -893,11 +893,7 @@ Current IOREG MAP
#define _PSRX(x) (x)
#define _PSTX(x) ((x) << 4)
-#define PBP_64 0x0
#define PBP_128 0x1
-#define PBP_256 0x2
-#define PBP_512 0x3
-#define PBP_1024 0x4
/* 2 TX/RXDMA */
#define RXDMA_ARBBW_EN BIT(0)
diff --git a/drivers/staging/r8188eu/include/rtl8188e_sreset.h b/drivers/staging/r8188eu/include/rtl8188e_sreset.h
index 880c5792d5dd..bb8b0048fbf9 100644
--- a/drivers/staging/r8188eu/include/rtl8188e_sreset.h
+++ b/drivers/staging/r8188eu/include/rtl8188e_sreset.h
@@ -6,9 +6,7 @@
#include "osdep_service.h"
#include "drv_types.h"
-#include "rtw_sreset.h"
-void rtl8188e_silentreset_for_specific_platform(struct adapter *padapter);
void rtl8188e_sreset_xmit_status_check(struct adapter *padapter);
void rtl8188e_sreset_linked_status_check(struct adapter *padapter);
diff --git a/drivers/staging/r8188eu/include/rtw_ap.h b/drivers/staging/r8188eu/include/rtw_ap.h
index 2eb556968509..724229fe84aa 100644
--- a/drivers/staging/r8188eu/include/rtw_ap.h
+++ b/drivers/staging/r8188eu/include/rtw_ap.h
@@ -7,8 +7,6 @@
#include "osdep_service.h"
#include "drv_types.h"
-#ifdef CONFIG_88EU_AP_MODE
-
/* external function */
void rtw_indicate_sta_assoc_event(struct adapter *padapter,
struct sta_info *psta);
@@ -22,26 +20,17 @@ void add_RATid(struct adapter *padapter, struct sta_info *psta,
u8 rssi_level);
void expire_timeout_chk(struct adapter *padapter);
void update_sta_info_apmode(struct adapter *padapter, struct sta_info *psta);
-int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len);
void rtw_ap_restore_network(struct adapter *padapter);
-void rtw_set_macaddr_acl(struct adapter *padapter, int mode);
-int rtw_acl_add_sta(struct adapter *padapter, u8 *addr);
-int rtw_acl_remove_sta(struct adapter *padapter, u8 *addr);
-#ifdef CONFIG_88EU_AP_MODE
void associated_clients_update(struct adapter *padapter, u8 updated);
void bss_cap_update_on_sta_join(struct adapter *padapter, struct sta_info *psta);
u8 bss_cap_update_on_sta_leave(struct adapter *padapter, struct sta_info *psta);
void sta_info_update(struct adapter *padapter, struct sta_info *psta);
-void ap_sta_info_defer_update(struct adapter *padapter, struct sta_info *psta);
u8 ap_free_sta(struct adapter *padapter, struct sta_info *psta,
bool active, u16 reason);
int rtw_sta_flush(struct adapter *padapter);
-int rtw_ap_inform_ch_switch(struct adapter *padapter, u8 new_ch, u8 ch_offset);
void start_ap_mode(struct adapter *padapter);
void stop_ap_mode(struct adapter *padapter);
-#endif
-#endif /* end of CONFIG_88EU_AP_MODE */
void update_bmc_sta(struct adapter *padapter);
#endif
diff --git a/drivers/staging/r8188eu/include/rtw_br_ext.h b/drivers/staging/r8188eu/include/rtw_br_ext.h
index 69905d30c191..17a6154e760a 100644
--- a/drivers/staging/r8188eu/include/rtw_br_ext.h
+++ b/drivers/staging/r8188eu/include/rtw_br_ext.h
@@ -4,7 +4,6 @@
#ifndef _RTW_BR_EXT_H_
#define _RTW_BR_EXT_H_
-#define MACADDRLEN 6
#define _DEBUG_ERR DBG_88E
#define _DEBUG_INFO DBG_88E
#define DEBUG_WARN DBG_88E
@@ -40,7 +39,7 @@ struct br_ext_info {
unsigned int macclone_enable;
unsigned int dhcp_bcst_disable;
int addPPPoETag; /* 1: Add PPPoE relay-SID, 0: disable */
- unsigned char nat25_dmzMac[MACADDRLEN];
+ unsigned char nat25_dmzMac[ETH_ALEN];
unsigned int nat25sc_disable;
};
diff --git a/drivers/staging/r8188eu/include/rtw_cmd.h b/drivers/staging/r8188eu/include/rtw_cmd.h
index c14d9052b997..47c3c80cc24a 100644
--- a/drivers/staging/r8188eu/include/rtw_cmd.h
+++ b/drivers/staging/r8188eu/include/rtw_cmd.h
@@ -33,8 +33,9 @@ struct cmd_obj {
};
struct cmd_priv {
- struct semaphore cmd_queue_sema;
- struct semaphore terminate_cmdthread_sema;
+ struct completion enqueue_cmd;
+ struct completion start_cmd_thread;
+ struct completion stop_cmd_thread;
struct __queue cmd_queue;
u8 cmd_seq;
u8 *cmd_buf; /* shall be non-paged, and 4 bytes aligned */
@@ -89,11 +90,8 @@ void rtw_free_cmd_priv(struct cmd_priv *pcmdpriv);
u32 rtw_init_evt_priv(struct evt_priv *pevtpriv);
void rtw_free_evt_priv(struct evt_priv *pevtpriv);
-void rtw_cmd_clr_isr(struct cmd_priv *pcmdpriv);
void rtw_evt_notify_isr(struct evt_priv *pevtpriv);
-#ifdef CONFIG_88EU_P2P
u8 p2p_protocol_wk_cmd(struct adapter *padapter, int intCmdType);
-#endif /* CONFIG_88EU_P2P */
enum rtw_drvextra_cmd_id {
NONE_WK_CID,
@@ -747,29 +745,17 @@ Result:
#define H2C_CMD_OVERFLOW 0x06
#define H2C_RESERVED 0x07
-u8 rtw_setassocsta_cmd(struct adapter *padapter, u8 *mac_addr);
-u8 rtw_setstandby_cmd(struct adapter *padapter, uint action);
u8 rtw_sitesurvey_cmd(struct adapter *padapter, struct ndis_802_11_ssid *ssid,
int ssid_num, struct rtw_ieee80211_channel *ch,
int ch_num);
u8 rtw_createbss_cmd(struct adapter *padapter);
-u8 rtw_createbss_cmd_ex(struct adapter *padapter, unsigned char *pbss,
- unsigned int sz);
-u8 rtw_setphy_cmd(struct adapter *padapter, u8 modem, u8 ch);
u8 rtw_setstakey_cmd(struct adapter *padapter, u8 *psta, u8 unicast_key);
u8 rtw_clearstakey_cmd(struct adapter *padapter, u8 *psta, u8 entry, u8 enqueue);
u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network* pnetwork);
u8 rtw_disassoc_cmd(struct adapter *padapter, u32 deauth_timeout_ms, bool enqueue);
u8 rtw_setopmode_cmd(struct adapter *padapter, enum ndis_802_11_network_infra networktype);
u8 rtw_setdatarate_cmd(struct adapter *padapter, u8 *rateset);
-u8 rtw_setbasicrate_cmd(struct adapter *padapter, u8 *rateset);
-u8 rtw_setbbreg_cmd(struct adapter * padapter, u8 offset, u8 val);
-u8 rtw_setrfreg_cmd(struct adapter * padapter, u8 offset, u32 val);
-u8 rtw_getbbreg_cmd(struct adapter * padapter, u8 offset, u8 * pval);
-u8 rtw_getrfreg_cmd(struct adapter * padapter, u8 offset, u8 * pval);
u8 rtw_setrfintfs_cmd(struct adapter *padapter, u8 mode);
-u8 rtw_setrttbl_cmd(struct adapter *padapter, struct setratable_parm *prate_table);
-u8 rtw_getrttbl_cmd(struct adapter *padapter, struct getratable_rsp *pval);
u8 rtw_gettssi_cmd(struct adapter *padapter, u8 offset,u8 *pval);
u8 rtw_setfwdig_cmd(struct adapter*padapter, u8 type);
@@ -785,15 +771,9 @@ u8 rtw_rpt_timer_cfg_cmd(struct adapter*padapter, u16 minRptTime);
u8 rtw_antenna_select_cmd(struct adapter*padapter, u8 antenna,u8 enqueue);
u8 rtw_ps_cmd(struct adapter*padapter);
-#ifdef CONFIG_88EU_AP_MODE
u8 rtw_chk_hi_queue_cmd(struct adapter*padapter);
-#endif
-u8 rtw_set_ch_cmd(struct adapter*padapter, u8 ch, u8 bw, u8 ch_offset, u8 enqueue);
u8 rtw_set_chplan_cmd(struct adapter*padapter, u8 chplan, u8 enqueue);
-u8 rtw_led_blink_cmd(struct adapter*padapter, struct LED_871x * pLed);
-u8 rtw_set_csa_cmd(struct adapter*padapter, u8 new_ch_no);
-u8 rtw_tdls_cmd(struct adapter *padapter, u8 *addr, u8 option);
u8 rtw_c2h_wk_cmd(struct adapter *padapter, u8 *c2h_evt);
@@ -804,7 +784,6 @@ void rtw_disassoc_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd);
void rtw_joinbss_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd);
void rtw_createbss_cmd_callback(struct adapter *adapt, struct cmd_obj *pcmd);
void rtw_getbbrfreg_cmdrsp_callback(struct adapter *adapt, struct cmd_obj *cmd);
-void rtw_readtssi_cmdrsp_callback(struct adapter *adapt, struct cmd_obj *cmd);
void rtw_setstaKey_cmdrsp_callback(struct adapter *adapt, struct cmd_obj *cmd);
void rtw_setassocsta_cmdrsp_callback(struct adapter *adapt, struct cmd_obj *cm);
diff --git a/drivers/staging/r8188eu/include/rtw_debug.h b/drivers/staging/r8188eu/include/rtw_debug.h
index 3c3bf2a4f30e..0a77e3e73a45 100644
--- a/drivers/staging/r8188eu/include/rtw_debug.h
+++ b/drivers/staging/r8188eu/include/rtw_debug.h
@@ -72,160 +72,4 @@ extern u32 GlobalDebugLevel;
pr_info(DRIVER_PREFIX __VA_ARGS__); \
} while (0)
-int proc_get_drv_version(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data);
-
-int proc_get_write_reg(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data);
-
-int proc_set_write_reg(struct file *file, const char __user *buffer,
- unsigned long count, void *data);
-int proc_get_read_reg(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data);
-
-int proc_set_read_reg(struct file *file, const char __user *buffer,
- unsigned long count, void *data);
-
-int proc_get_fwstate(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data);
-int proc_get_sec_info(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data);
-int proc_get_mlmext_state(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data);
-
-int proc_get_qos_option(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data);
-int proc_get_ht_option(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data);
-int proc_get_rf_info(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data);
-int proc_get_ap_info(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data);
-
-int proc_get_adapter_state(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data);
-
-int proc_get_trx_info(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data);
-
-int proc_get_mac_reg_dump1(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data);
-
-int proc_get_mac_reg_dump2(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data);
-
-int proc_get_mac_reg_dump3(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data);
-
-int proc_get_bb_reg_dump1(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data);
-
-int proc_get_bb_reg_dump2(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data);
-
-int proc_get_bb_reg_dump3(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data);
-
-int proc_get_rf_reg_dump1(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data);
-
-int proc_get_rf_reg_dump2(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data);
-
-int proc_get_rf_reg_dump3(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data);
-
-int proc_get_rf_reg_dump4(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data);
-
-#ifdef CONFIG_88EU_AP_MODE
-
-int proc_get_all_sta_info(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data);
-
-#endif
-
-int proc_get_best_channel(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data);
-
-int proc_get_rx_signal(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data);
-
-int proc_set_rx_signal(struct file *file, const char __user *buffer,
- unsigned long count, void *data);
-
-int proc_get_ht_enable(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data);
-
-int proc_set_ht_enable(struct file *file, const char __user *buffer,
- unsigned long count, void *data);
-
-int proc_get_cbw40_enable(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data);
-
-int proc_set_cbw40_enable(struct file *file, const char __user *buffer,
- unsigned long count, void *data);
-
-int proc_get_ampdu_enable(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data);
-
-int proc_set_ampdu_enable(struct file *file, const char __user *buffer,
- unsigned long count, void *data);
-
-int proc_get_rx_stbc(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data);
-
-int proc_set_rx_stbc(struct file *file, const char __user *buffer,
- unsigned long count, void *data);
-
-int proc_get_two_path_rssi(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data);
-
-int proc_get_rssi_disp(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data);
-
-int proc_set_rssi_disp(struct file *file, const char __user *buffer,
- unsigned long count, void *data);
-
-#ifdef CONFIG_BT_COEXIST
-int proc_get_btcoex_dbg(char *page, char **start,
- off_t offset, int count,
- int *eof, void *data);
-
-int proc_set_btcoex_dbg(struct file *file, const char *buffer,
- signed long count, void *data);
-
-#endif /* CONFIG_BT_COEXIST */
-
#endif /* __RTW_DEBUG_H__ */
diff --git a/drivers/staging/r8188eu/include/rtw_eeprom.h b/drivers/staging/r8188eu/include/rtw_eeprom.h
index 9f8a9c070339..e517239bd75e 100644
--- a/drivers/staging/r8188eu/include/rtw_eeprom.h
+++ b/drivers/staging/r8188eu/include/rtw_eeprom.h
@@ -40,66 +40,11 @@
#define EEPROM_CID_CLEVO 0x13
#define EEPROM_CID_WHQL 0xFE
-/* Customer ID, note that: */
-/* This variable is initiailzed through EEPROM or registry, */
-/* however, its definition may be different with that in EEPROM for */
-/* EEPROM size consideration. So, we have to perform proper translation
- * between them. */
-/* Besides, CustomerID of registry has precedence of that of EEPROM. */
-/* defined below. 060703, by rcnjko. */
-enum RT_CUSTOMER_ID {
- RT_CID_DEFAULT = 0,
- RT_CID_8187_ALPHA0 = 1,
- RT_CID_8187_SERCOMM_PS = 2,
- RT_CID_8187_HW_LED = 3,
- RT_CID_8187_NETGEAR = 4,
- RT_CID_WHQL = 5,
- RT_CID_819x_CAMEO = 6,
- RT_CID_819x_RUNTOP = 7,
- RT_CID_819x_Senao = 8,
- RT_CID_TOSHIBA = 9, /* Merge by Jacken, 2008/01/31. */
- RT_CID_819x_Netcore = 10,
- RT_CID_Nettronix = 11,
- RT_CID_DLINK = 12,
- RT_CID_PRONET = 13,
- RT_CID_COREGA = 14,
- RT_CID_CHINA_MOBILE = 15,
- RT_CID_819x_ALPHA = 16,
- RT_CID_819x_Sitecom = 17,
- RT_CID_CCX = 18, /* It's set under CCX logo test and isn't demanded
- * for CCX functions, but for test behavior like retry
- * limit and tx report. By Bruce, 2009-02-17. */
- RT_CID_819x_Lenovo = 19,
- RT_CID_819x_QMI = 20,
- RT_CID_819x_Edimax_Belkin = 21,
- RT_CID_819x_Sercomm_Belkin = 22,
- RT_CID_819x_CAMEO1 = 23,
- RT_CID_819x_MSI = 24,
- RT_CID_819x_Acer = 25,
- RT_CID_819x_AzWave_ASUS = 26,
- RT_CID_819x_AzWave = 27, /* For AzWave in PCIe,i
- * The ID is AzWave use and not only Asus */
- RT_CID_819x_HP = 28,
- RT_CID_819x_WNC_COREGA = 29,
- RT_CID_819x_Arcadyan_Belkin = 30,
- RT_CID_819x_SAMSUNG = 31,
- RT_CID_819x_CLEVO = 32,
- RT_CID_819x_DELL = 33,
- RT_CID_819x_PRONETS = 34,
- RT_CID_819x_Edimax_ASUS = 35,
- RT_CID_819x_CAMEO_NETGEAR = 36,
- RT_CID_PLANEX = 37,
- RT_CID_CC_C = 38,
- RT_CID_819x_Xavi = 39,
- RT_CID_819x_FUNAI_TV = 40,
- RT_CID_819x_ALPHA_WD=41,
-};
-
struct eeprom_priv {
u8 bautoload_fail_flag;
u8 bloadfile_fail_flag;
u8 bloadmac_fail_flag;
- u8 mac_addr[6]; /* PermanentAddress */
+ u8 mac_addr[ETH_ALEN] __aligned(2); /* PermanentAddress */
u16 channel_plan;
u8 EepromOrEfuse;
u8 efuse_eeprom_data[HWSET_MAX_SIZE_512] __aligned(4);
diff --git a/drivers/staging/r8188eu/include/rtw_efuse.h b/drivers/staging/r8188eu/include/rtw_efuse.h
index b3ff46db2091..2e19b7be1075 100644
--- a/drivers/staging/r8188eu/include/rtw_efuse.h
+++ b/drivers/staging/r8188eu/include/rtw_efuse.h
@@ -101,34 +101,13 @@ extern u8 fakeBTEfuseInitMap[];
extern u8 fakeBTEfuseModifiedMap[];
/*------------------------Export global variable----------------------------*/
-u8 efuse_GetCurrentSize(struct adapter *adapter, u16 *size);
-u16 efuse_GetMaxSize(struct adapter *adapter);
-u8 rtw_efuse_access(struct adapter *adapter, u8 read, u16 start_addr,
- u16 cnts, u8 *data);
-u8 rtw_efuse_map_read(struct adapter *adapter, u16 addr, u16 cnts, u8 *data);
-u8 rtw_efuse_map_write(struct adapter *adapter, u16 addr, u16 cnts, u8 *data);
-u8 rtw_BT_efuse_map_read(struct adapter *adapter, u16 addr,
- u16 cnts, u8 *data);
-u8 rtw_BT_efuse_map_write(struct adapter *adapter, u16 addr,
- u16 cnts, u8 *data);
-u16 Efuse_GetCurrentSize(struct adapter *adapter, u8 efusetype, bool test);
u8 Efuse_CalculateWordCnts(u8 word_en);
void ReadEFuseByte(struct adapter *adapter, u16 _offset, u8 *pbuf, bool test);
-void EFUSE_GetEfuseDefinition(struct adapter *adapt, u8 type, u8 type1,
- void *out, bool bPseudoTest);
u8 efuse_OneByteRead(struct adapter *adapter, u16 addr, u8 *data, bool test);
u8 efuse_OneByteWrite(struct adapter *adapter, u16 addr, u8 data, bool test);
-void Efuse_PowerSwitch(struct adapter *adapt,u8 bWrite,u8 PwrState);
-int Efuse_PgPacketRead(struct adapter *adapt, u8 offset, u8 *data, bool test);
-int Efuse_PgPacketWrite(struct adapter *adapter, u8 offset, u8 word, u8 *data,
- bool test);
void efuse_WordEnableDataRead(u8 word_en, u8 *sourdata, u8 *targetdata);
-u8 Efuse_WordEnableDataWrite(struct adapter *adapter, u16 efuse_addr,
- u8 word_en, u8 *data, bool test);
-u8 EFUSE_Read1Byte(struct adapter *adapter, u16 address);
void EFUSE_ShadowMapUpdate(struct adapter *adapter, u8 efusetype, bool test);
-void EFUSE_ShadowRead(struct adapter *adapt, u8 type, u16 offset, u32 *val);
#endif
diff --git a/drivers/staging/r8188eu/include/rtw_io.h b/drivers/staging/r8188eu/include/rtw_io.h
index 4b41c7b03972..c6a078210eeb 100644
--- a/drivers/staging/r8188eu/include/rtw_io.h
+++ b/drivers/staging/r8188eu/include/rtw_io.h
@@ -84,30 +84,6 @@ struct intf_priv;
struct intf_hdl;
struct io_queue;
-struct _io_ops {
- u8 (*_read8)(struct intf_hdl *pintfhdl, u32 addr);
- u16 (*_read16)(struct intf_hdl *pintfhdl, u32 addr);
- u32 (*_read32)(struct intf_hdl *pintfhdl, u32 addr);
- int (*_write8)(struct intf_hdl *pintfhdl, u32 addr, u8 val);
- int (*_write16)(struct intf_hdl *pintfhdl, u32 addr, u16 val);
- int (*_write32)(struct intf_hdl *pintfhdl, u32 addr, u32 val);
- int (*_writeN)(struct intf_hdl *pintfhdl, u32 addr, u32 length,
- u8 *pdata);
- int (*_write8_async)(struct intf_hdl *pintfhdl, u32 addr, u8 val);
- int (*_write16_async)(struct intf_hdl *pintfhdl, u32 addr, u16 val);
- int (*_write32_async)(struct intf_hdl *pintfhdl, u32 addr, u32 val);
- void (*_read_mem)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
- u8 *pmem);
- void (*_write_mem)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
- u8 *pmem);
- u32 (*_read_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
- u8 *pmem);
- u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
- u8 *pmem);
- void (*_read_port_cancel)(struct intf_hdl *pintfhdl);
- void (*_write_port_cancel)(struct intf_hdl *pintfhdl);
-};
-
struct io_req {
struct list_head list;
u32 addr;
@@ -125,7 +101,6 @@ struct io_req {
struct intf_hdl {
struct adapter *padapter;
struct dvobj_priv *pintf_dev;
- struct _io_ops io_ops;
};
struct reg_protocol_rd {
@@ -245,58 +220,21 @@ void unregister_intf_hdl(struct intf_hdl *pintfhdl);
void _rtw_attrib_read(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
void _rtw_attrib_write(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
-u8 _rtw_read8(struct adapter *adapter, u32 addr);
-u16 _rtw_read16(struct adapter *adapter, u32 addr);
-u32 _rtw_read32(struct adapter *adapter, u32 addr);
+u8 rtw_read8(struct adapter *adapter, u32 addr);
+u16 rtw_read16(struct adapter *adapter, u32 addr);
+u32 rtw_read32(struct adapter *adapter, u32 addr);
void _rtw_read_mem(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
-void _rtw_read_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
-void _rtw_read_port_cancel(struct adapter *adapter);
+u32 rtw_read_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
+void rtw_read_port_cancel(struct adapter *adapter);
-int _rtw_write8(struct adapter *adapter, u32 addr, u8 val);
-int _rtw_write16(struct adapter *adapter, u32 addr, u16 val);
-int _rtw_write32(struct adapter *adapter, u32 addr, u32 val);
-int _rtw_writeN(struct adapter *adapter, u32 addr, u32 length, u8 *pdata);
-
-int _rtw_write8_async(struct adapter *adapter, u32 addr, u8 val);
-int _rtw_write16_async(struct adapter *adapter, u32 addr, u16 val);
-int _rtw_write32_async(struct adapter *adapter, u32 addr, u32 val);
+int rtw_write8(struct adapter *adapter, u32 addr, u8 val);
+int rtw_write16(struct adapter *adapter, u32 addr, u16 val);
+int rtw_write32(struct adapter *adapter, u32 addr, u32 val);
+int rtw_writeN(struct adapter *adapter, u32 addr, u32 length, u8 *pdata);
void _rtw_write_mem(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
-u32 _rtw_write_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
-u32 _rtw_write_port_and_wait(struct adapter *adapter, u32 addr, u32 cnt,
- u8 *pmem, int timeout_ms);
-void _rtw_write_port_cancel(struct adapter *adapter);
-
-#define rtw_read8(adapter, addr) _rtw_read8((adapter), (addr))
-#define rtw_read16(adapter, addr) _rtw_read16((adapter), (addr))
-#define rtw_read32(adapter, addr) _rtw_read32((adapter), (addr))
-#define rtw_read_mem(adapter, addr, cnt, mem) \
- _rtw_read_mem((adapter), (addr), (cnt), (mem))
-#define rtw_read_port(adapter, addr, cnt, mem) \
- _rtw_read_port((adapter), (addr), (cnt), (mem))
-#define rtw_read_port_cancel(adapter) _rtw_read_port_cancel((adapter))
-
-#define rtw_write8(adapter, addr, val) \
- _rtw_write8((adapter), (addr), (val))
-#define rtw_write16(adapter, addr, val) \
- _rtw_write16((adapter), (addr), (val))
-#define rtw_write32(adapter, addr, val) \
- _rtw_write32((adapter), (addr), (val))
-#define rtw_writeN(adapter, addr, length, data) \
- _rtw_writeN((adapter), (addr), (length), (data))
-#define rtw_write8_async(adapter, addr, val) \
- _rtw_write8_async((adapter), (addr), (val))
-#define rtw_write16_async(adapter, addr, val) \
- _rtw_write16_async((adapter), (addr), (val))
-#define rtw_write32_async(adapter, addr, val) \
- _rtw_write32_async((adapter), (addr), (val))
-#define rtw_write_mem(adapter, addr, cnt, mem) \
- _rtw_write_mem((adapter), (addr), (cnt), (mem))
-#define rtw_write_port(adapter, addr, cnt, mem) \
- _rtw_write_port((adapter), (addr), (cnt), (mem))
-#define rtw_write_port_and_wait(adapter, addr, cnt, mem, timeout_ms) \
- _rtw_write_port_and_wait((adapter), (addr), (cnt), (mem), (timeout_ms))
-#define rtw_write_port_cancel(adapter) _rtw_write_port_cancel((adapter))
+u32 rtw_write_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
+void rtw_write_port_cancel(struct adapter *adapter);
void rtw_write_scsi(struct adapter *adapter, u32 cnt, u8 *pmem);
@@ -340,9 +278,6 @@ void async_write32(struct adapter *adapter, u32 addr, u32 val,
void async_write_mem(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
void async_write_port(struct adapter *adapter, u32 addr, u32 cnt, u8 *pmem);
-int rtw_init_io_priv(struct adapter *padapter,
- void (*set_intf_ops)(struct _io_ops *pops));
-
uint alloc_io_queue(struct adapter *adapter);
void free_io_queue(struct adapter *adapter);
void async_bus_io(struct io_queue *pio_q);
diff --git a/drivers/staging/r8188eu/include/rtw_ioctl_rtl.h b/drivers/staging/r8188eu/include/rtw_ioctl_rtl.h
deleted file mode 100644
index 6d3d1ef923f6..000000000000
--- a/drivers/staging/r8188eu/include/rtw_ioctl_rtl.h
+++ /dev/null
@@ -1,63 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#ifndef _RTW_IOCTL_RTL_H_
-#define _RTW_IOCTL_RTL_H_
-
-#include "osdep_service.h"
-#include "drv_types.h"
-
-/* oid_rtl_seg_01_01 ************** */
-int oid_rt_get_signal_quality_hdl(struct oid_par_priv *poid_par_priv);/* 84 */
-int oid_rt_get_small_packet_crc_hdl(struct oid_par_priv *poid_par_priv);
-int oid_rt_get_middle_packet_crc_hdl(struct oid_par_priv *poid_par_priv);
-int oid_rt_get_large_packet_crc_hdl(struct oid_par_priv *poid_par_priv);
-int oid_rt_get_tx_retry_hdl(struct oid_par_priv *poid_par_priv);
-int oid_rt_get_rx_retry_hdl(struct oid_par_priv *poid_par_priv);
-int oid_rt_get_rx_total_packet_hdl(struct oid_par_priv *poid_par_priv);
-int oid_rt_get_tx_beacon_ok_hdl(struct oid_par_priv *poid_par_priv);
-int oid_rt_get_tx_beacon_err_hdl(struct oid_par_priv *poid_par_priv);
-
-int oid_rt_pro_set_fw_dig_state_hdl(struct oid_par_priv *poid_par_priv);/* 8a */
-int oid_rt_pro_set_fw_ra_state_hdl(struct oid_par_priv *poid_par_priv); /* 8b */
-
-int oid_rt_get_rx_icv_err_hdl(struct oid_par_priv *poid_par_priv);/* 93 */
-int oid_rt_set_encryption_algorithm_hdl(struct oid_par_priv *poid_par_priv);
-int oid_rt_get_preamble_mode_hdl(struct oid_par_priv *poid_par_priv);
-int oid_rt_get_ap_ip_hdl(struct oid_par_priv *poid_par_priv);
-int oid_rt_get_channelplan_hdl(struct oid_par_priv *poid_par_priv);
-int oid_rt_set_channelplan_hdl(struct oid_par_priv *poid_par_priv);
-int oid_rt_set_preamble_mode_hdl(struct oid_par_priv *poid_par_priv);
-int oid_rt_set_bcn_intvl_hdl(struct oid_par_priv *poid_par_priv);
-int oid_rt_dedicate_probe_hdl(struct oid_par_priv *poid_par_priv);
-int oid_rt_get_total_tx_bytes_hdl(struct oid_par_priv *poid_par_priv);
-int oid_rt_get_total_rx_bytes_hdl(struct oid_par_priv *poid_par_priv);
-int oid_rt_current_tx_power_level_hdl(struct oid_par_priv *poid_par_priv);
-int oid_rt_get_enc_key_mismatch_count_hdl(struct oid_par_priv *poid_par_priv);
-int oid_rt_get_enc_key_match_count_hdl(struct oid_par_priv *poid_par_priv);
-int oid_rt_get_channel_hdl(struct oid_par_priv *poid_par_priv);
-int oid_rt_get_hardware_radio_off_hdl(struct oid_par_priv *poid_par_priv);
-int oid_rt_get_key_mismatch_hdl(struct oid_par_priv *poid_par_priv);
-int oid_rt_supported_wireless_mode_hdl(struct oid_par_priv *poid_par_priv);
-int oid_rt_get_channel_list_hdl(struct oid_par_priv *poid_par_priv);
-int oid_rt_get_scan_in_progress_hdl(struct oid_par_priv *poid_par_priv);
-int oid_rt_forced_data_rate_hdl(struct oid_par_priv *poid_par_priv);
-int oid_rt_wireless_mode_for_scan_list_hdl(struct oid_par_priv *poid_par_priv);
-int oid_rt_get_bss_wireless_mode_hdl(struct oid_par_priv *poid_par_priv);
-int oid_rt_scan_with_magic_packet_hdl(struct oid_par_priv *poid_par_priv);
-
-/* oid_rtl_seg_01_03 section start ************** */
-int oid_rt_ap_get_associated_station_list_hdl(struct oid_par_priv *priv);
-int oid_rt_ap_switch_into_ap_mode_hdl(struct oid_par_priv *poid_par_priv);
-int oid_rt_ap_supported_hdl(struct oid_par_priv *poid_par_priv);
-int oid_rt_ap_set_passphrase_hdl(struct oid_par_priv *poid_par_priv);
-
-/* oid_rtl_seg_01_11 */
-int oid_rt_pro_rf_write_registry_hdl(struct oid_par_priv *poid_par_priv);
-int oid_rt_pro_rf_read_registry_hdl(struct oid_par_priv *poid_par_priv);
-
-/* oid_rtl_seg_03_00 section start ************** */
-int oid_rt_get_connect_state_hdl(struct oid_par_priv *poid_par_priv);
-int oid_rt_set_default_key_id_hdl(struct oid_par_priv *poid_par_priv);
-
-#endif
diff --git a/drivers/staging/r8188eu/include/rtw_ioctl_set.h b/drivers/staging/r8188eu/include/rtw_ioctl_set.h
index 6216b8ab3a79..7365079c704f 100644
--- a/drivers/staging/r8188eu/include/rtw_ioctl_set.h
+++ b/drivers/staging/r8188eu/include/rtw_ioctl_set.h
@@ -8,7 +8,6 @@
typedef u8 NDIS_802_11_PMKID_VALUE[16];
-u8 rtw_set_802_11_add_key(struct adapter *adapt, struct ndis_802_11_key *key);
u8 rtw_set_802_11_authentication_mode(struct adapter *adapt,
enum ndis_802_11_auth_mode authmode);
u8 rtw_set_802_11_bssid(struct adapter*adapter, u8 *bssid);
@@ -19,15 +18,8 @@ u8 rtw_set_802_11_bssid_list_scan(struct adapter*adapter,
int ssid_max_num);
u8 rtw_set_802_11_infrastructure_mode(struct adapter *adapter,
enum ndis_802_11_network_infra type);
-u8 rtw_set_802_11_remove_wep(struct adapter *adapter, u32 keyindex);
u8 rtw_set_802_11_ssid(struct adapter *adapt, struct ndis_802_11_ssid *ssid);
-u8 rtw_set_802_11_remove_key(struct adapter *adapt,
- struct ndis_802_11_remove_key *key);
-u8 rtw_validate_ssid(struct ndis_802_11_ssid *ssid);
u16 rtw_get_cur_max_rate(struct adapter *adapter);
-int rtw_set_scan_mode(struct adapter *adapter, enum rt_scan_type scan_mode);
-int rtw_set_channel_plan(struct adapter *adapter, u8 channel_plan);
-int rtw_set_country(struct adapter *adapter, const char *country_code);
int rtw_change_ifname(struct adapter *padapter, const char *ifname);
#endif
diff --git a/drivers/staging/r8188eu/include/rtw_iol.h b/drivers/staging/r8188eu/include/rtw_iol.h
index 471f9ca092a8..fb88ebc1dabb 100644
--- a/drivers/staging/r8188eu/include/rtw_iol.h
+++ b/drivers/staging/r8188eu/include/rtw_iol.h
@@ -33,10 +33,6 @@ enum ioreg_cmd {
struct xmit_frame *rtw_IOL_accquire_xmit_frame(struct adapter *adapter);
int rtw_IOL_append_cmds(struct xmit_frame *xmit_frame, u8 *IOL_cmds,
u32 cmd_len);
-int rtw_IOL_append_LLT_cmd(struct xmit_frame *xmit_frame, u8 page_boundary);
-int rtw_IOL_exec_cmds_sync(struct adapter *adapter,
- struct xmit_frame *xmit_frame, u32 max_wating_ms,
- u32 bndy_cnt);
bool rtw_IOL_applied(struct adapter *adapter);
int rtw_IOL_append_DELAY_US_cmd(struct xmit_frame *xmit_frame, u16 us);
int rtw_IOL_append_DELAY_MS_cmd(struct xmit_frame *xmit_frame, u16 ms);
@@ -63,6 +59,5 @@ int _rtw_IOL_append_WRF_cmd(struct xmit_frame *xmit_frame, u8 rf_path,
_rtw_IOL_append_WRF_cmd((xmit_frame),(rf_path), (addr), (value), (mask))
u8 rtw_IOL_cmd_boundary_handle(struct xmit_frame *pxmit_frame);
-void rtw_IOL_cmd_buf_dump(struct adapter *Adapter,int buf_len,u8 *pbuf);
#endif /* __RTW_IOL_H_ */
diff --git a/drivers/staging/r8188eu/include/rtw_led.h b/drivers/staging/r8188eu/include/rtw_led.h
index f0965aa5b470..c035fe267635 100644
--- a/drivers/staging/r8188eu/include/rtw_led.h
+++ b/drivers/staging/r8188eu/include/rtw_led.h
@@ -107,8 +107,6 @@ struct LED_871x {
struct timer_list BlinkTimer; /* Timer object for led blinking. */
- u8 bSWLedCtrl;
-
/* ALPHA, added by chiyoko, 20090106 */
u8 bLedNoLinkBlinkInProgress;
u8 bLedLinkBlinkInProgress;
@@ -127,30 +125,12 @@ struct LED_871x {
(((struct LED_871x *)_LED_871x)->bLedWPSBlinkInProgress || \
((struct LED_871x *)_LED_871x)->bLedScanBlinkInProgress)
-/* LED customization. */
-
-enum LED_STRATEGY_871x {
- SW_LED_MODE0 = 0, /* SW control 1 LED via GPIO0. It is default option.*/
- SW_LED_MODE1= 1, /* 2 LEDs, through LED0 and LED1. For ALPHA. */
- SW_LED_MODE2 = 2, /* SW control 1 LED via GPIO0, customized for AzWave
- * 8187 minicard. */
- SW_LED_MODE3 = 3, /* SW control 1 LED via GPIO0, customized for Sercomm
- * Printer Server case. */
- SW_LED_MODE4 = 4, /* for Edimax / Belkin */
- SW_LED_MODE5 = 5, /* for Sercomm / Belkin */
- SW_LED_MODE6 = 6, /* for 88CU minicard, porting from ce SW_LED_MODE7 */
- HW_LED = 50, /* HW control 2 LEDs, LED0 and LED1 (there are 4
- * different control modes, see MAC.CONFIG1 for details.)*/
- LED_ST_NONE = 99,
-};
-
void LedControl8188eu(struct adapter *padapter, enum LED_CTL_MODE LedAction);
struct led_priv{
/* add for led control */
struct LED_871x SwLed0;
struct LED_871x SwLed1;
- enum LED_STRATEGY_871x LedStrategy;
u8 bRegUseLed;
void (*LedControlHandler)(struct adapter *padapter,
enum LED_CTL_MODE LedAction);
diff --git a/drivers/staging/r8188eu/include/rtw_mlme.h b/drivers/staging/r8188eu/include/rtw_mlme.h
index bbb41471472d..e8d51f495702 100644
--- a/drivers/staging/r8188eu/include/rtw_mlme.h
+++ b/drivers/staging/r8188eu/include/rtw_mlme.h
@@ -379,7 +379,6 @@ struct mlme_priv {
u8 *assoc_rsp;
u32 assoc_rsp_len;
-#if defined (CONFIG_88EU_AP_MODE)
/* Number of associated Non-ERP stations (i.e., stations using 802.11b
* in 802.11g BSS) */
int num_sta_non_erp;
@@ -428,18 +427,14 @@ struct mlme_priv {
u32 p2p_assoc_req_ie_len;
spinlock_t bcn_update_lock;
u8 update_bcn;
-#endif /* if defined (CONFIG_88EU_AP_MODE) */
};
-#ifdef CONFIG_88EU_AP_MODE
-
struct hostapd_priv {
struct adapter *padapter;
};
int hostapd_mode_init(struct adapter *padapter);
void hostapd_mode_unload(struct adapter *padapter);
-#endif
extern unsigned char WPA_TKIP_CIPHER[4];
extern unsigned char RSN_TKIP_CIPHER[4];
@@ -562,7 +557,6 @@ void rtw_free_assoc_resources(struct adapter *adapter, int lock_scanned_queue);
void rtw_indicate_disconnect(struct adapter *adapter);
void rtw_indicate_connect(struct adapter *adapter);
void rtw_indicate_scan_done( struct adapter *padapter, bool aborted);
-void rtw_scan_abort(struct adapter *adapter);
int rtw_restruct_sec_ie(struct adapter *adapter, u8 *in_ie, u8 *out_ie,
uint in_len);
@@ -589,10 +583,6 @@ void rtw_free_mlme_priv_ie_data(struct mlme_priv *pmlmepriv);
void _rtw_free_mlme_priv(struct mlme_priv *pmlmepriv);
-int _rtw_enqueue_network(struct __queue *queue, struct wlan_network *pnetwork);
-
-struct wlan_network *_rtw_dequeue_network(struct __queue *queue);
-
struct wlan_network *_rtw_alloc_network(struct mlme_priv *pmlmepriv);
void _rtw_free_network(struct mlme_priv *pmlmepriv,
@@ -607,7 +597,6 @@ void _rtw_free_network_queue(struct adapter *padapter, u8 isfreeall);
int rtw_if_up(struct adapter *padapter);
u8 *rtw_get_capability_from_ie(u8 *ie);
-u8 *rtw_get_timestampe_from_ie(u8 *ie);
u8 *rtw_get_beacon_interval_from_ie(u8 *ie);
void rtw_joinbss_reset(struct adapter *padapter);
diff --git a/drivers/staging/r8188eu/include/rtw_mlme_ext.h b/drivers/staging/r8188eu/include/rtw_mlme_ext.h
index d2f4d3ce7b90..5b307ad3afa5 100644
--- a/drivers/staging/r8188eu/include/rtw_mlme_ext.h
+++ b/drivers/staging/r8188eu/include/rtw_mlme_ext.h
@@ -416,9 +416,7 @@ struct mlme_ext_priv {
u64 TSFValue;
-#ifdef CONFIG_88EU_AP_MODE
unsigned char bstart_bss;
-#endif
u8 update_channel_plan_by_ap_done;
/* recv_decache check for Action_public frame */
u8 action_public_dialog_token;
@@ -447,9 +445,7 @@ void Set_MSR(struct adapter *padapter, u8 type);
u8 rtw_get_oper_ch(struct adapter *adapter);
void rtw_set_oper_ch(struct adapter *adapter, u8 ch);
-u8 rtw_get_oper_bw(struct adapter *adapter);
void rtw_set_oper_bw(struct adapter *adapter, u8 bw);
-u8 rtw_get_oper_choffset(struct adapter *adapter);
void rtw_set_oper_choffset(struct adapter *adapter, u8 offset);
void set_channel_bwmode(struct adapter *padapter, unsigned char channel,
@@ -464,7 +460,6 @@ void write_cam(struct adapter *padapter, u8 entry, u16 ctrl, u8 *mac, u8 *key);
void clear_cam_entry(struct adapter *padapter, u8 entry);
void invalidate_cam_all(struct adapter *padapter);
-void CAM_empty_entry(struct adapter * Adapter, u8 ucIndex);
int allocate_fw_sta_entry(struct adapter *padapter);
void flush_all_cam_entry(struct adapter *padapter);
@@ -475,7 +470,6 @@ u8 collect_bss_info(struct adapter *padapter, struct recv_frame *precv_frame,
void update_network(struct wlan_bssid_ex *dst, struct wlan_bssid_ex *src,
struct adapter *adapter, bool update_ie);
-int get_bsstype(unsigned short capability);
u8 *get_my_bssid(struct wlan_bssid_ex *pnetwork);
u16 get_beacon_interval(struct wlan_bssid_ex *bss);
@@ -520,8 +514,6 @@ unsigned int receive_disconnect(struct adapter *padapter,
unsigned char get_highest_rate_idx(u32 mask);
int support_short_GI(struct adapter *padapter, struct HT_caps_element *caps);
unsigned int is_ap_in_tkip(struct adapter *padapter);
-unsigned int is_ap_in_wep(struct adapter *padapter);
-unsigned int should_forbid_n_rate(struct adapter *padapter);
void report_join_res(struct adapter *padapter, int res);
void report_survey_event(struct adapter *padapter, struct recv_frame *precv_frame);
@@ -544,18 +536,14 @@ s32 dump_mgntframe_and_wait(struct adapter *padapter,
s32 dump_mgntframe_and_wait_ack(struct adapter *padapter,
struct xmit_frame *pmgntframe);
-#ifdef CONFIG_88EU_P2P
void issue_probersp_p2p(struct adapter *padapter, unsigned char *da);
void issue_p2p_provision_request(struct adapter *padapter, u8 *pssid,
u8 ussidlen, u8 *pdev_raddr);
void issue_p2p_GO_request(struct adapter *padapter, u8 *raddr);
void issue_probereq_p2p(struct adapter *padapter, u8 *da);
-int issue_probereq_p2p_ex(struct adapter *adapter, u8 *da, int try_cnt,
- int wait_ms);
void issue_p2p_invitation_response(struct adapter *padapter, u8 *raddr,
u8 dialogToken, u8 success);
void issue_p2p_invitation_request(struct adapter *padapter, u8* raddr);
-#endif /* CONFIG_88EU_P2P */
void issue_beacon(struct adapter *padapter, int timeout_ms);
void issue_probersp(struct adapter *padapter, unsigned char *da,
u8 is_valid_p2p_probereq);
@@ -576,8 +564,6 @@ int issue_deauth(struct adapter *padapter, unsigned char *da,
unsigned short reason);
int issue_deauth_ex(struct adapter *padapter, u8 *da, unsigned short reason,
int try_cnt, int wait_ms);
-void issue_action_spct_ch_switch(struct adapter *padapter, u8 *ra, u8 new_ch,
- u8 ch_offset);
void issue_action_BA(struct adapter *padapter, unsigned char *raddr,
unsigned char action, unsigned short status);
unsigned int send_delba(struct adapter *padapter, u8 initiator, u8 *addr);
diff --git a/drivers/staging/r8188eu/include/rtw_mp.h b/drivers/staging/r8188eu/include/rtw_mp.h
deleted file mode 100644
index 3a259d991348..000000000000
--- a/drivers/staging/r8188eu/include/rtw_mp.h
+++ /dev/null
@@ -1,474 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#ifndef _RTW_MP_H_
-#define _RTW_MP_H_
-
-/* 00 - Success */
-/* 11 - Error */
-#define STATUS_SUCCESS (0x00000000L)
-#define STATUS_PENDING (0x00000103L)
-
-#define STATUS_UNSUCCESSFUL (0xC0000001L)
-#define STATUS_INSUFFICIENT_RESOURCES (0xC000009AL)
-#define STATUS_NOT_SUPPORTED (0xC00000BBL)
-
-#define NDIS_STATUS_SUCCESS ((int)STATUS_SUCCESS)
-#define NDIS_STATUS_PENDING ((int)STATUS_PENDING)
-#define NDIS_STATUS_NOT_RECOGNIZED ((int)0x00010001L)
-#define NDIS_STATUS_NOT_COPIED ((int)0x00010002L)
-#define NDIS_STATUS_NOT_ACCEPTED ((int)0x00010003L)
-#define NDIS_STATUS_CALL_ACTIVE ((int)0x00010007L)
-
-#define NDIS_STATUS_FAILURE ((int)STATUS_UNSUCCESSFUL)
-#define NDIS_STATUS_RESOURCES ((int)STATUS_INSUFFICIENT_RESOURCES)
-#define NDIS_STATUS_CLOSING ((int)0xC0010002L)
-#define NDIS_STATUS_BAD_VERSION ((int)0xC0010004L)
-#define NDIS_STATUS_BAD_CHARACTERISTICS ((int)0xC0010005L)
-#define NDIS_STATUS_ADAPTER_NOT_FOUND ((int)0xC0010006L)
-#define NDIS_STATUS_OPEN_FAILED ((int)0xC0010007L)
-#define NDIS_STATUS_DEVICE_FAILED ((int)0xC0010008L)
-#define NDIS_STATUS_MULTICAST_FULL ((int)0xC0010009L)
-#define NDIS_STATUS_MULTICAST_EXISTS ((int)0xC001000AL)
-#define NDIS_STATUS_MULTICAST_NOT_FOUND ((int)0xC001000BL)
-#define NDIS_STATUS_REQUEST_ABORTED ((int)0xC001000CL)
-#define NDIS_STATUS_RESET_IN_PROGRESS ((int)0xC001000DL)
-#define NDIS_STATUS_CLOSING_INDICATING ((int)0xC001000EL)
-#define NDIS_STATUS_NOT_SUPPORTED ((int)STATUS_NOT_SUPPORTED)
-#define NDIS_STATUS_INVALID_PACKET ((int)0xC001000FL)
-#define NDIS_STATUS_OPEN_LIST_FULL ((int)0xC0010010L)
-#define NDIS_STATUS_ADAPTER_NOT_READY ((int)0xC0010011L)
-#define NDIS_STATUS_ADAPTER_NOT_OPEN ((int)0xC0010012L)
-#define NDIS_STATUS_NOT_INDICATING ((int)0xC0010013L)
-#define NDIS_STATUS_INVALID_LENGTH ((int)0xC0010014L)
-#define NDIS_STATUS_INVALID_DATA ((int)0xC0010015L)
-#define NDIS_STATUS_BUFFER_TOO_SHORT ((int)0xC0010016L)
-#define NDIS_STATUS_INVALID_OID ((int)0xC0010017L)
-#define NDIS_STATUS_ADAPTER_REMOVED ((int)0xC0010018L)
-#define NDIS_STATUS_UNSUPPORTED_MEDIA ((int)0xC0010019L)
-#define NDIS_STATUS_GROUP_ADDRESS_IN_USE ((int)0xC001001AL)
-#define NDIS_STATUS_FILE_NOT_FOUND ((int)0xC001001BL)
-#define NDIS_STATUS_ERROR_READING_FILE ((int)0xC001001CL)
-#define NDIS_STATUS_ALREADY_MAPPED ((int)0xC001001DL)
-#define NDIS_STATUS_RESOURCE_CONFLICT ((int)0xC001001EL)
-#define NDIS_STATUS_NO_CABLE ((int)0xC001001FL)
-
-#define NDIS_STATUS_INVALID_SAP ((int)0xC0010020L)
-#define NDIS_STATUS_SAP_IN_USE ((int)0xC0010021L)
-#define NDIS_STATUS_INVALID_ADDRESS ((int)0xC0010022L)
-#define NDIS_STATUS_VC_NOT_ACTIVATED ((int)0xC0010023L)
-#define NDIS_STATUS_DEST_OUT_OF_ORDER ((int)0xC0010024L) /*cause 27*/
-#define NDIS_STATUS_VC_NOT_AVAILABLE ((int)0xC0010025L) /*cause 35,45 */
-#define NDIS_STATUS_CELLRATE_NOT_AVAILABLE ((int)0xC0010026L) /*cause 37*/
-#define NDIS_STATUS_INCOMPATABLE_QOS ((int)0xC0010027L) /*cause 49*/
-#define NDIS_STATUS_AAL_PARAMS_UNSUPPORTED ((int)0xC0010028L) /*cause 93*/
-#define NDIS_STATUS_NO_ROUTE_TO_DESTINATION ((int)0xC0010029L) /*cause 3 */
-
-enum antenna_path {
- ANTENNA_NONE = 0x00,
- ANTENNA_D,
- ANTENNA_C,
- ANTENNA_CD,
- ANTENNA_B,
- ANTENNA_BD,
- ANTENNA_BC,
- ANTENNA_BCD,
- ANTENNA_A,
- ANTENNA_AD,
- ANTENNA_AC,
- ANTENNA_ACD,
- ANTENNA_AB,
- ANTENNA_ABD,
- ANTENNA_ABC,
- ANTENNA_ABCD
-};
-
-#define MAX_MP_XMITBUF_SZ 2048
-#define NR_MP_XMITFRAME 8
-
-struct mp_xmit_frame {
- struct list_head list;
- struct pkt_attrib attrib;
- struct sk_buff *pkt;
- int frame_tag;
- struct adapter *padapter;
- struct urb *pxmit_urb[8];
- /* insert urb, irp, and irpcnt info below... */
- u8 *mem_addr;
- u32 sz[8];
- u8 bpending[8];
- int ac_tag[8];
- int last[8];
- uint irpcnt;
- uint fragcnt;
- uint mem[(MAX_MP_XMITBUF_SZ >> 2)];
-};
-
-struct mp_wiparam {
- u32 bcompleted;
- u32 act_type;
- u32 io_offset;
- u32 io_value;
-};
-
-typedef void(*wi_act_func)(void *padapter);
-
-struct mp_tx {
- u8 stop;
- u32 count, sended;
- u8 payload;
- struct pkt_attrib attrib;
- struct tx_desc desc;
- u8 *pallocated_buf;
- u8 *buf;
- u32 buf_size, write_size;
- void *PktTxThread;
-};
-
-#include "Hal8188EPhyCfg.h"
-
-#define MP_MAX_LINES 1000
-#define MP_MAX_LINES_BYTES 256
-
-typedef void (*MPT_WORK_ITEM_HANDLER)(void *Adapter);
-
-struct mpt_context {
- /* Indicate if we have started Mass Production Test. */
- bool bMassProdTest;
-
- /* Indicate if the driver is unloading or unloaded. */
- bool bMptDrvUnload;
-
- struct semaphore MPh2c_Sema;
- struct timer_list MPh2c_timeout_timer;
-/* Event used to sync H2c for BT control */
-
- bool MptH2cRspEvent;
- bool MptBtC2hEvent;
- bool bMPh2c_timeout;
-
- /* 8190 PCI does not support NDIS_WORK_ITEM. */
- /* Work Item for Mass Production Test. */
- /* Event used to sync the case unloading driver and MptWorkItem
- * is still in progress. */
- /* Indicate a MptWorkItem is scheduled and not yet finished. */
- bool bMptWorkItemInProgress;
- /* An instance which implements function and context of MptWorkItem. */
- MPT_WORK_ITEM_HANDLER CurrMptAct;
-
- /* 1=Start, 0=Stop from UI. */
- u32 MptTestStart;
- /* _TEST_MODE, defined in MPT_Req2.h */
- u32 MptTestItem;
- /* Variable needed in each implementation of CurrMptAct. */
- u32 MptActType; /* Type of action performed in CurrMptAct. */
- /* The Offset of IO operation is depend of MptActType. */
- u32 MptIoOffset;
- /* The Value of IO operation is depend of MptActType. */
- u32 MptIoValue;
- /* The RfPath of IO operation is depend of MptActType. */
- u32 MptRfPath;
-
- enum wireless_mode MptWirelessModeToSw; /* Wireless mode to switch. */
- u8 MptChannelToSw; /* Channel to switch. */
- u8 MptInitGainToSet; /* Initial gain to set. */
- u32 MptBandWidth; /* bandwidth to switch. */
- u32 MptRateIndex; /* rate index. */
- /* Register value kept for Single Carrier Tx test. */
- u8 btMpCckTxPower;
- /* Register value kept for Single Carrier Tx test. */
- u8 btMpOfdmTxPower;
- /* For MP Tx Power index */
- u8 TxPwrLevel[2]; /* rf-A, rf-B */
-
- /* Content of RCR Regsiter for Mass Production Test. */
- u32 MptRCR;
- /* true if we only receive packets with specific pattern. */
- bool bMptFilterPattern;
- /* Rx OK count, statistics used in Mass Production Test. */
- u32 MptRxOkCnt;
- /* Rx CRC32 error count, statistics used in Mass Production Test. */
- u32 MptRxCrcErrCnt;
-
- bool bCckContTx; /* true if we are in CCK Continuous Tx test. */
- bool bOfdmContTx; /* true if we are in OFDM Continuous Tx test. */
- bool bStartContTx; /* true if we have start Continuous Tx test. */
- /* true if we are in Single Carrier Tx test. */
- bool bSingleCarrier;
- /* true if we are in Carrier Suppression Tx Test. */
- bool bCarrierSuppression;
- /* true if we are in Single Tone Tx test. */
- bool bSingleTone;
-
- /* ACK counter asked by K.Y.. */
- bool bMptEnableAckCounter;
- u32 MptAckCounter;
-
- u8 APK_bound[2]; /* for APK path A/path B */
- bool bMptIndexEven;
-
- u8 backup0xc50;
- u8 backup0xc58;
- u8 backup0xc30;
- u8 backup0x52_RF_A;
- u8 backup0x52_RF_B;
-
- u8 h2cReqNum;
- u8 c2hBuf[20];
-
- u8 btInBuf[100];
- u32 mptOutLen;
- u8 mptOutBuf[100];
-};
-
-enum {
- WRITE_REG = 1,
- READ_REG,
- WRITE_RF,
- READ_RF,
- MP_START,
- MP_STOP,
- MP_RATE,
- MP_CHANNEL,
- MP_BANDWIDTH,
- MP_TXPOWER,
- MP_ANT_TX,
- MP_ANT_RX,
- MP_CTX,
- MP_QUERY,
- MP_ARX,
- MP_PSD,
- MP_PWRTRK,
- MP_THER,
- MP_IOCTL,
- EFUSE_GET,
- EFUSE_SET,
- MP_RESET_STATS,
- MP_DUMP,
- MP_PHYPARA,
- MP_SetRFPathSwh,
- MP_QueryDrvStats,
- MP_SetBT,
- CTA_TEST,
- MP_NULL,
-};
-
-struct mp_priv {
- struct adapter *papdater;
-
- /* Testing Flag */
- /* 0 for normal type packet, 1 for loopback packet (16bytes TXCMD) */
- u32 mode;
-
- u32 prev_fw_state;
-
- /* OID cmd handler */
- struct mp_wiparam workparam;
-
- /* Tx Section */
- u8 TID;
- u32 tx_pktcount;
- struct mp_tx tx;
-
- /* Rx Section */
- u32 rx_pktcount;
- u32 rx_crcerrpktcount;
- u32 rx_pktloss;
-
- struct recv_stat rxstat;
-
- /* RF/BB relative */
- u8 channel;
- u8 bandwidth;
- u8 prime_channel_offset;
- u8 txpoweridx;
- u8 txpoweridx_b;
- u8 rateidx;
- u32 preamble;
- u32 CrystalCap;
-
- u16 antenna_tx;
- u16 antenna_rx;
-
- u8 check_mp_pkt;
-
- u8 bSetTxPower;
-
- struct wlan_network mp_network;
- unsigned char network_macaddr[ETH_ALEN];
-
- u8 *pallocated_mp_xmitframe_buf;
- u8 *pmp_xmtframe_buf;
- struct __queue free_mp_xmitqueue;
- u32 free_mp_xmitframe_cnt;
-
- struct mpt_context MptCtx;
-};
-
-struct iocmd_struct {
- u8 cmdclass;
- u16 value;
- u8 index;
-};
-
-struct rf_reg_param {
- u32 path;
- u32 offset;
- u32 value;
-};
-
-struct bb_reg_param {
- u32 offset;
- u32 value;
-};
-/* */
-
-#define LOWER true
-#define RAISE false
-
-/* Hardware Registers */
-#define BB_REG_BASE_ADDR 0x800
-
-/* MP variables */
-enum mp_mode_{
- MP_OFF,
- MP_ON,
- MP_ERR,
- MP_CONTINUOUS_TX,
- MP_SINGLE_CARRIER_TX,
- MP_CARRIER_SUPPRISSION_TX,
- MP_SINGLE_TONE_TX,
- MP_PACKET_TX,
- MP_PACKET_RX
-};
-
-extern u8 mpdatarate[NumRates];
-
-/* MP set force data rate base on the definition. */
-enum mpt_rate_index {
- /* CCK rate. */
- MPT_RATE_1M, /* 0 */
- MPT_RATE_2M,
- MPT_RATE_55M,
- MPT_RATE_11M, /* 3 */
-
- /* OFDM rate. */
- MPT_RATE_6M, /* 4 */
- MPT_RATE_9M,
- MPT_RATE_12M,
- MPT_RATE_18M,
- MPT_RATE_24M,
- MPT_RATE_36M,
- MPT_RATE_48M,
- MPT_RATE_54M, /* 11 */
-
- /* HT rate. */
- MPT_RATE_MCS0, /* 12 */
- MPT_RATE_MCS1,
- MPT_RATE_MCS2,
- MPT_RATE_MCS3,
- MPT_RATE_MCS4,
- MPT_RATE_MCS5,
- MPT_RATE_MCS6,
- MPT_RATE_MCS7, /* 19 */
- MPT_RATE_MCS8,
- MPT_RATE_MCS9,
- MPT_RATE_MCS10,
- MPT_RATE_MCS11,
- MPT_RATE_MCS12,
- MPT_RATE_MCS13,
- MPT_RATE_MCS14,
- MPT_RATE_MCS15, /* 27 */
- MPT_RATE_LAST
-};
-
-#define MAX_TX_PWR_INDEX_N_MODE 64 /* 0x3F */
-
-enum power_mode {
- POWER_LOW = 0,
- POWER_NORMAL
-};
-
-#define RX_PKT_BROADCAST 1
-#define RX_PKT_DEST_ADDR 2
-#define RX_PKT_PHY_MATCH 3
-
-enum encry_ctrl_state {
- HW_CONTROL, /* hw encryption& decryption */
- SW_CONTROL, /* sw encryption& decryption */
- HW_ENCRY_SW_DECRY, /* hw encryption & sw decryption */
- SW_ENCRY_HW_DECRY /* sw encryption & hw decryption */
-};
-
-s32 init_mp_priv(struct adapter *padapter);
-void free_mp_priv(struct mp_priv *pmp_priv);
-s32 MPT_InitializeAdapter(struct adapter *padapter, u8 Channel);
-void MPT_DeInitAdapter(struct adapter *padapter);
-s32 mp_start_test(struct adapter *padapter);
-void mp_stop_test(struct adapter *padapter);
-
-u32 _read_rfreg(struct adapter *padapter, u8 rfpath, u32 addr, u32 bitmask);
-void _write_rfreg(struct adapter *padapter, u8 rfpath, u32 addr, u32 bitmask, u32 val);
-
-u32 read_bbreg(struct adapter *padapter, u32 addr, u32 bitmask);
-void write_bbreg(struct adapter *padapter, u32 addr, u32 bitmask, u32 val);
-u32 read_rfreg(struct adapter *padapter, u8 rfpath, u32 addr);
-void write_rfreg(struct adapter *padapter, u8 rfpath, u32 addr, u32 val);
-
-void SetChannel(struct adapter *pAdapter);
-void SetBandwidth(struct adapter *pAdapter);
-void SetTxPower(struct adapter *pAdapter);
-void SetAntennaPathPower(struct adapter *pAdapter);
-void SetDataRate(struct adapter *pAdapter);
-
-void SetAntenna(struct adapter *pAdapter);
-
-s32 SetThermalMeter(struct adapter *pAdapter, u8 target_ther);
-void GetThermalMeter(struct adapter *pAdapter, u8 *value);
-
-void SetContinuousTx(struct adapter *pAdapter, u8 bStart);
-void SetSingleCarrierTx(struct adapter *pAdapter, u8 bStart);
-void SetSingleToneTx(struct adapter *pAdapter, u8 bStart);
-void SetCarrierSuppressionTx(struct adapter *pAdapter, u8 bStart);
-void PhySetTxPowerLevel(struct adapter *pAdapter);
-
-void fill_txdesc_for_mp(struct adapter *padapter, struct tx_desc *ptxdesc);
-void SetPacketTx(struct adapter *padapter);
-void SetPacketRx(struct adapter *pAdapter, u8 bStartRx);
-
-void ResetPhyRxPktCount(struct adapter *pAdapter);
-u32 GetPhyRxPktReceived(struct adapter *pAdapter);
-u32 GetPhyRxPktCRC32Error(struct adapter *pAdapter);
-
-s32 SetPowerTracking(struct adapter *padapter, u8 enable);
-void GetPowerTracking(struct adapter *padapter, u8 *enable);
-u32 mp_query_psd(struct adapter *pAdapter, u8 *data);
-void Hal_SetAntenna(struct adapter *pAdapter);
-void Hal_SetBandwidth(struct adapter *pAdapter);
-void Hal_SetTxPower(struct adapter *pAdapter);
-void Hal_SetCarrierSuppressionTx(struct adapter *pAdapter, u8 bStart);
-void Hal_SetSingleToneTx(struct adapter *pAdapter, u8 bStart);
-void Hal_SetSingleCarrierTx (struct adapter *pAdapter, u8 bStart);
-void Hal_SetContinuousTx (struct adapter *pAdapter, u8 bStart);
-void Hal_SetBandwidth(struct adapter *pAdapter);
-void Hal_SetDataRate(struct adapter *pAdapter);
-void Hal_SetChannel(struct adapter *pAdapter);
-void Hal_SetAntennaPathPower(struct adapter *pAdapter);
-s32 Hal_SetThermalMeter(struct adapter *pAdapter, u8 target_ther);
-s32 Hal_SetPowerTracking(struct adapter *padapter, u8 enable);
-void Hal_GetPowerTracking(struct adapter *padapter, u8 * enable);
-void Hal_GetThermalMeter(struct adapter *pAdapter, u8 *value);
-void Hal_mpt_SwitchRfSetting(struct adapter *pAdapter);
-void Hal_MPT_CCKTxPowerAdjust(struct adapter * Adapter, bool bInCH14);
-void Hal_MPT_CCKTxPowerAdjustbyIndex(struct adapter *pAdapter, bool beven);
-void Hal_SetCCKTxPower(struct adapter *pAdapter, u8 * TxPower);
-void Hal_SetOFDMTxPower(struct adapter *pAdapter, u8 * TxPower);
-void Hal_TriggerRFThermalMeter(struct adapter *pAdapter);
-u8 Hal_ReadRFThermalMeter(struct adapter *pAdapter);
-void Hal_SetCCKContinuousTx(struct adapter *pAdapter, u8 bStart);
-void Hal_SetOFDMContinuousTx(struct adapter *pAdapter, u8 bStart);
-void Hal_ProSetCrystalCap (struct adapter *pAdapter , u32 CrystalCapVal);
-void _rtw_mp_xmit_priv(struct xmit_priv *pxmitpriv);
-void MP_PHY_SetRFPathSwitch(struct adapter *pAdapter ,bool bMain);
-
-#endif /* _RTW_MP_H_ */
diff --git a/drivers/staging/r8188eu/include/rtw_mp_ioctl.h b/drivers/staging/r8188eu/include/rtw_mp_ioctl.h
deleted file mode 100644
index cf99f39a582e..000000000000
--- a/drivers/staging/r8188eu/include/rtw_mp_ioctl.h
+++ /dev/null
@@ -1,242 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-#ifndef _RTW_MP_IOCTL_H_
-#define _RTW_MP_IOCTL_H_
-
-#include "drv_types.h"
-#include "mp_custom_oid.h"
-#include "rtw_ioctl.h"
-#include "rtw_ioctl_rtl.h"
-#include "rtw_efuse.h"
-#include "rtw_mp.h"
-
-struct cfg_dbg_msg_struct {
- u32 DebugLevel;
- u32 DebugComponent_H32;
- u32 DebugComponent_L32;
-};
-
-struct mp_rw_reg {
- u32 offset;
- u32 width;
- u32 value;
-};
-
-struct efuse_access_struct {
- u16 start_addr;
- u16 cnts;
- u8 data[0];
-};
-
-struct burst_rw_reg {
- u32 offset;
- u32 len;
- u8 Data[256];
-};
-
-struct usb_vendor_req {
- u8 bRequest;
- u16 wValue;
- u16 wIndex;
- u16 wLength;
- u8 u8Dir;/* 0:OUT, 1:IN */
- u8 u8InData;
-};
-
-struct dr_variable_struct {
- u8 offset;
- u32 variable;
-};
-
-#define _irqlevel_changed_(a, b)
-
-int rtl8188eu_oid_rt_pro_set_data_rate_hdl(struct oid_par_priv *poid_par_priv);
-int rtl8188eu_oid_rt_pro_start_test_hdl(struct oid_par_priv *poid_par_priv);
-int rtl8188eu_oid_rt_pro_stop_test_hdl(struct oid_par_priv *poid_par_priv);
-int rtl8188eu_oid_rt_pro_set_channel_direct_call_hdl(struct oid_par_priv *poid_par_priv);
-int rtl8188eu_oid_rt_pro_set_antenna_bb_hdl(struct oid_par_priv *poid_par_priv);
-int rtl8188eu_oid_rt_pro_set_tx_power_control_hdl(struct oid_par_priv *poid_par_priv);
-
-int rtl8188eu_oid_rt_pro_query_tx_packet_sent_hdl(struct oid_par_priv *poid_par_priv);
-int rtl8188eu_oid_rt_pro_query_rx_packet_received_hdl(struct oid_par_priv *poid_par_priv);
-int rtl8188eu_oid_rt_pro_query_rx_packet_crc32_error_hdl(struct oid_par_priv *par_priv);
-int rtl8188eu_oid_rt_pro_reset_tx_packet_sent_hdl(struct oid_par_priv *poid_par_priv);
-int rtl8188eu_oid_rt_pro_reset_rx_packet_received_hdl(struct oid_par_priv *par_priv);
-int rtl8188eu_oid_rt_pro_set_modulation_hdl(struct oid_par_priv *poid_par_priv);
-int rtl8188eu_oid_rt_pro_set_continuous_tx_hdl(struct oid_par_priv *poid_par_priv);
-int rtl8188eu_oid_rt_pro_set_single_carrier_tx_hdl(struct oid_par_priv *poid_par_priv);
-int rtl8188eu_oid_rt_pro_set_carrier_suppression_tx_hdl(struct oid_par_priv *par_priv);
-int rtl8188eu_oid_rt_pro_set_single_tone_tx_hdl(struct oid_par_priv *poid_par_priv);
-
-/* rtl8188eu_oid_rtl_seg_81_87 */
-int rtl8188eu_oid_rt_pro_write_bb_reg_hdl(struct oid_par_priv *poid_par_priv);
-int rtl8188eu_oid_rt_pro_read_bb_reg_hdl(struct oid_par_priv *poid_par_priv);
-int rtl8188eu_oid_rt_pro_write_rf_reg_hdl(struct oid_par_priv *poid_par_priv);
-int rtl8188eu_oid_rt_pro_read_rf_reg_hdl(struct oid_par_priv *poid_par_priv);
-
-int rtl8188eu_oid_rt_wireless_mode_hdl(struct oid_par_priv *poid_par_priv);
-
-/* rtl8188eu_oid_rtl_seg_87_11_00 */
-int rtl8188eu_oid_rt_pro8711_join_bss_hdl(struct oid_par_priv *poid_par_priv);
-int rtl8188eu_oid_rt_pro_read_register_hdl(struct oid_par_priv *poid_par_priv);
-int rtl8188eu_oid_rt_pro_write_register_hdl(struct oid_par_priv *poid_par_priv);
-int rtl8188eu_oid_rt_pro_burst_read_register_hdl(struct oid_par_priv *poid_par_priv);
-int rtl8188eu_oid_rt_pro_burst_write_register_hdl(struct oid_par_priv *poid_par_priv);
-int rtl8188eu_oid_rt_pro_write_txcmd_hdl(struct oid_par_priv *poid_par_priv);
-int rtl8188eu_oid_rt_pro_read16_eeprom_hdl(struct oid_par_priv *poid_par_priv);
-int rtl8188eu_oid_rt_pro_write16_eeprom_hdl(struct oid_par_priv *poid_par_priv);
-int rtl8188eu_oid_rt_pro8711_wi_poll_hdl(struct oid_par_priv *poid_par_priv);
-int rtl8188eu_oid_rt_pro8711_pkt_loss_hdl(struct oid_par_priv *poid_par_priv);
-int rtl8188eu_oid_rt_rd_attrib_mem_hdl(struct oid_par_priv *poid_par_priv);
-int rtl8188eu_oid_rt_wr_attrib_mem_hdl (struct oid_par_priv *poid_par_priv);
-int rtl8188eu_oid_rt_pro_set_rf_intfs_hdl(struct oid_par_priv *poid_par_priv);
-int rtl8188eu_oid_rt_poll_rx_status_hdl(struct oid_par_priv *poid_par_priv);
-/* rtl8188eu_oid_rtl_seg_87_11_20 */
-int rtl8188eu_oid_rt_pro_cfg_debug_message_hdl(struct oid_par_priv *poid_par_priv);
-int rtl8188eu_oid_rt_pro_set_data_rate_ex_hdl(struct oid_par_priv *poid_par_priv);
-int rtl8188eu_oid_rt_pro_set_basic_rate_hdl(struct oid_par_priv *poid_par_priv);
-int rtl8188eu_oid_rt_pro_read_tssi_hdl(struct oid_par_priv *poid_par_priv);
-int rtl8188eu_oid_rt_pro_set_power_tracking_hdl(struct oid_par_priv *poid_par_priv);
-/* rtl8188eu_oid_rtl_seg_87_11_50 */
-int rtl8188eu_oid_rt_pro_qry_pwrstate_hdl(struct oid_par_priv *poid_par_priv);
-int rtl8188eu_oid_rt_pro_set_pwrstate_hdl(struct oid_par_priv *poid_par_priv);
-/* rtl8188eu_oid_rtl_seg_87_11_F0 */
-int rtl8188eu_oid_rt_pro_h2c_set_rate_table_hdl(struct oid_par_priv *poid_par_priv);
-int rtl8188eu_oid_rt_pro_h2c_get_rate_table_hdl(struct oid_par_priv *poid_par_priv);
-
-/* rtl8188eu_oid_rtl_seg_87_12_00 */
-int rtl8188eu_oid_rt_pro_encryption_ctrl_hdl(struct oid_par_priv *poid_par_priv);
-int rtl8188eu_oid_rt_pro_add_sta_info_hdl(struct oid_par_priv *poid_par_priv);
-int rtl8188eu_oid_rt_pro_dele_sta_info_hdl(struct oid_par_priv *poid_par_priv);
-int rtl8188eu_oid_rt_pro_query_dr_variable_hdl(struct oid_par_priv *poid_par_priv);
-int rtl8188eu_oid_rt_pro_rx_packet_type_hdl(struct oid_par_priv *poid_par_priv);
-int rtl8188eu_oid_rt_pro_read_efuse_hdl(struct oid_par_priv *poid_par_priv);
-int rtl8188eu_oid_rt_pro_write_efuse_hdl(struct oid_par_priv *poid_par_priv);
-int rtl8188eu_oid_rt_pro_rw_efuse_pgpkt_hdl(struct oid_par_priv *poid_par_priv);
-int rtl8188eu_oid_rt_get_efuse_current_size_hdl(struct oid_par_priv *poid_par_priv);
-int rtl8188eu_oid_rt_pro_efuse_hdl(struct oid_par_priv *poid_par_priv);
-int rtl8188eu_oid_rt_pro_efuse_map_hdl(struct oid_par_priv *poid_par_priv);
-int rtl8188eu_oid_rt_set_bandwidth_hdl(struct oid_par_priv *poid_par_priv);
-int rtl8188eu_oid_rt_set_crystal_cap_hdl(struct oid_par_priv *poid_par_priv);
-int rtl8188eu_oid_rt_set_rx_packet_type_hdl(struct oid_par_priv *poid_par_priv);
-int rtl8188eu_oid_rt_get_efuse_max_size_hdl(struct oid_par_priv *poid_par_priv);
-int rtl8188eu_oid_rt_pro_set_tx_agc_offset_hdl(struct oid_par_priv *poid_par_priv);
-int rtl8188eu_oid_rt_pro_set_pkt_test_mode_hdl(struct oid_par_priv *poid_par_priv);
-int rtl8188eu_oid_rt_get_thermal_meter_hdl(struct oid_par_priv *poid_par_priv);
-int rtl8188eu_oid_rt_reset_phy_rx_packet_count_hdl(struct oid_par_priv *poid_par_priv);
-int rtl8188eu_oid_rt_get_phy_rx_packet_received_hdl(struct oid_par_priv *poid_par_priv);
-int rtl8188eu_oid_rt_get_phy_rx_packet_crc32_error_hdl(struct oid_par_priv *par_priv);
-int rtl8188eu_oid_rt_set_power_down_hdl(struct oid_par_priv *poid_par_priv);
-int rtl8188eu_oid_rt_get_power_mode_hdl(struct oid_par_priv *poid_par_priv);
-int rtl8188eu_oid_rt_pro_trigger_gpio_hdl(struct oid_par_priv *poid_par_priv);
-
-struct rwreg_param {
- u32 offset;
- u32 width;
- u32 value;
-};
-
-struct bbreg_param {
- u32 offset;
- u32 phymask;
- u32 value;
-};
-
-struct txpower_param {
- u32 pwr_index;
-};
-
-struct datarate_param {
- u32 rate_index;
-};
-
-struct rfintfs_parm {
- u32 rfintfs;
-};
-
-struct mp_xmit_parm {
- u8 enable;
- u32 count;
- u16 length;
- u8 payload_type;
- u8 da[ETH_ALEN];
-};
-
-struct mp_xmit_packet {
- u32 len;
- u32 mem[MAX_MP_XMITBUF_SZ >> 2];
-};
-
-struct psmode_param {
- u32 ps_mode;
- u32 smart_ps;
-};
-
-/* for OID_RT_PRO_READ16_EEPROM & OID_RT_PRO_WRITE16_EEPROM */
-struct eeprom_rw_param {
- u32 offset;
- u16 value;
-};
-
-struct mp_ioctl_handler {
- u32 paramsize;
- s32 (*handler)(struct oid_par_priv* poid_par_priv);
- u32 oid;
-};
-
-struct mp_ioctl_param{
- u32 subcode;
- u32 len;
- u8 data[0];
-};
-
-#define GEN_MP_IOCTL_SUBCODE(code) _MP_IOCTL_ ## code ## _CMD_
-
-enum RTL871X_MP_IOCTL_SUBCODE {
- GEN_MP_IOCTL_SUBCODE(MP_START), /*0*/
- GEN_MP_IOCTL_SUBCODE(MP_STOP),
- GEN_MP_IOCTL_SUBCODE(READ_REG),
- GEN_MP_IOCTL_SUBCODE(WRITE_REG),
- GEN_MP_IOCTL_SUBCODE(READ_BB_REG),
- GEN_MP_IOCTL_SUBCODE(WRITE_BB_REG), /*5*/
- GEN_MP_IOCTL_SUBCODE(READ_RF_REG),
- GEN_MP_IOCTL_SUBCODE(WRITE_RF_REG),
- GEN_MP_IOCTL_SUBCODE(SET_CHANNEL),
- GEN_MP_IOCTL_SUBCODE(SET_TXPOWER),
- GEN_MP_IOCTL_SUBCODE(SET_DATARATE), /*10*/
- GEN_MP_IOCTL_SUBCODE(SET_BANDWIDTH),
- GEN_MP_IOCTL_SUBCODE(SET_ANTENNA),
- GEN_MP_IOCTL_SUBCODE(CNTU_TX),
- GEN_MP_IOCTL_SUBCODE(SC_TX),
- GEN_MP_IOCTL_SUBCODE(CS_TX), /*15*/
- GEN_MP_IOCTL_SUBCODE(ST_TX),
- GEN_MP_IOCTL_SUBCODE(IOCTL_XMIT_PACKET),
- GEN_MP_IOCTL_SUBCODE(SET_RX_PKT_TYPE),
- GEN_MP_IOCTL_SUBCODE(RESET_PHY_RX_PKT_CNT),
- GEN_MP_IOCTL_SUBCODE(GET_PHY_RX_PKT_RECV), /*20*/
- GEN_MP_IOCTL_SUBCODE(GET_PHY_RX_PKT_ERROR),
- GEN_MP_IOCTL_SUBCODE(READ16_EEPROM),
- GEN_MP_IOCTL_SUBCODE(WRITE16_EEPROM),
- GEN_MP_IOCTL_SUBCODE(EFUSE),
- GEN_MP_IOCTL_SUBCODE(EFUSE_MAP), /*25*/
- GEN_MP_IOCTL_SUBCODE(GET_EFUSE_MAX_SIZE),
- GEN_MP_IOCTL_SUBCODE(GET_EFUSE_CURRENT_SIZE),
- GEN_MP_IOCTL_SUBCODE(GET_THERMAL_METER),
- GEN_MP_IOCTL_SUBCODE(SET_PTM),
- GEN_MP_IOCTL_SUBCODE(SET_POWER_DOWN), /*30*/
- GEN_MP_IOCTL_SUBCODE(TRIGGER_GPIO),
- GEN_MP_IOCTL_SUBCODE(SET_DM_BT), /*35*/
- GEN_MP_IOCTL_SUBCODE(DEL_BA), /*36*/
- GEN_MP_IOCTL_SUBCODE(GET_WIFI_STATUS), /*37*/
- MAX_MP_IOCTL_SUBCODE,
-};
-
-s32 rtl8188eu_mp_ioctl_xmit_packet_hdl(struct oid_par_priv *poid_par_priv);
-
-#define GEN_HANDLER(sz, hdl, oid) {sz, hdl, oid},
-
-#define EXT_MP_IOCTL_HANDLER(sz, subcode, oid) \
- {sz, rtl8188eu_mp_ioctl_##subcode##_hdl, oid},
-
-#endif
diff --git a/drivers/staging/r8188eu/include/rtw_mp_phy_regdef.h b/drivers/staging/r8188eu/include/rtw_mp_phy_regdef.h
deleted file mode 100644
index c2be770a5f5d..000000000000
--- a/drivers/staging/r8188eu/include/rtw_mp_phy_regdef.h
+++ /dev/null
@@ -1,1063 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-
-/*****************************************************************************
- *
- * Module: __RTW_MP_PHY_REGDEF_H_
- *
- *
- * Note: 1. Define PMAC/BB register map
- * 2. Define RF register map
- * 3. PMAC/BB register bit mask.
- * 4. RF reg bit mask.
- * 5. Other BB/RF relative definition.
- *
- *
- * Export: Constants, macro, functions(API), global variables(None).
- *
- * Abbrev:
- *
- * History:
- * Data Who Remark
- * 08/07/2007 MHC 1. Porting from 9x series PHYCFG.h.
- * 2. Reorganize code architecture.
- * 09/25/2008 MH 1. Add RL6052 register definition
- *
- *****************************************************************************/
-#ifndef __RTW_MP_PHY_REGDEF_H_
-#define __RTW_MP_PHY_REGDEF_H_
-
-/*--------------------------Define Parameters-------------------------------*/
-
-/* */
-/* 8192S Regsiter offset definition */
-/* */
-
-/* */
-/* BB-PHY register PMAC 0x100 PHY 0x800 - 0xEFF */
-/* 1. PMAC duplicate register due to connection: RF_Mode, TRxRN, NumOf L-STF */
-/* 2. 0x800/0x900/0xA00/0xC00/0xD00/0xE00 */
-/* 3. RF register 0x00-2E */
-/* 4. Bit Mask for BB/RF register */
-/* 5. Other definition for BB/RF R/W */
-/* */
-
-/* */
-/* 1. PMAC duplicate register due to connection: RF_Mode, TRxRN, NumOf L-STF */
-/* 1. Page1(0x100) */
-/* */
-#define rPMAC_Reset 0x100
-#define rPMAC_TxStart 0x104
-#define rPMAC_TxLegacySIG 0x108
-#define rPMAC_TxHTSIG1 0x10c
-#define rPMAC_TxHTSIG2 0x110
-#define rPMAC_PHYDebug 0x114
-#define rPMAC_TxPacketNum 0x118
-#define rPMAC_TxIdle 0x11c
-#define rPMAC_TxMACHeader0 0x120
-#define rPMAC_TxMACHeader1 0x124
-#define rPMAC_TxMACHeader2 0x128
-#define rPMAC_TxMACHeader3 0x12c
-#define rPMAC_TxMACHeader4 0x130
-#define rPMAC_TxMACHeader5 0x134
-#define rPMAC_TxDataType 0x138
-#define rPMAC_TxRandomSeed 0x13c
-#define rPMAC_CCKPLCPPreamble 0x140
-#define rPMAC_CCKPLCPHeader 0x144
-#define rPMAC_CCKCRC16 0x148
-#define rPMAC_OFDMRxCRC32OK 0x170
-#define rPMAC_OFDMRxCRC32Er 0x174
-#define rPMAC_OFDMRxParityEr 0x178
-#define rPMAC_OFDMRxCRC8Er 0x17c
-#define rPMAC_CCKCRxRC16Er 0x180
-#define rPMAC_CCKCRxRC32Er 0x184
-#define rPMAC_CCKCRxRC32OK 0x188
-#define rPMAC_TxStatus 0x18c
-
-/* */
-/* 2. Page2(0x200) */
-/* */
-/* The following two definition are only used for USB interface. */
-/* define RF_BB_CMD_ADDR 0x02c0 RF/BB read/write command address. */
-/* define RF_BB_CMD_DATA 0x02c4 RF/BB read/write command data. */
-
-/* */
-/* 3. Page8(0x800) */
-/* */
-#define rFPGA0_RFMOD 0x800 /* RF mode & CCK TxSC RF BW Setting?? */
-
-#define rFPGA0_TxInfo 0x804 /* Status report?? */
-#define rFPGA0_PSDFunction 0x808
-
-#define rFPGA0_TxGainStage 0x80c /* Set TX PWR init gain? */
-
-#define rFPGA0_RFTiming1 0x810 /* Useless now */
-#define rFPGA0_RFTiming2 0x814
-/* define rFPGA0_XC_RFTiming 0x818 */
-/* define rFPGA0_XD_RFTiming 0x81c */
-
-#define rFPGA0_XA_HSSIParameter1 0x820 /* RF 3 wire register */
-#define rFPGA0_XA_HSSIParameter2 0x824
-#define rFPGA0_XB_HSSIParameter1 0x828
-#define rFPGA0_XB_HSSIParameter2 0x82c
-#define rFPGA0_XC_HSSIParameter1 0x830
-#define rFPGA0_XC_HSSIParameter2 0x834
-#define rFPGA0_XD_HSSIParameter1 0x838
-#define rFPGA0_XD_HSSIParameter2 0x83c
-#define rFPGA0_XA_LSSIParameter 0x840
-#define rFPGA0_XB_LSSIParameter 0x844
-#define rFPGA0_XC_LSSIParameter 0x848
-#define rFPGA0_XD_LSSIParameter 0x84c
-
-#define rFPGA0_RFWakeUpParameter 0x850 /* Useless now */
-#define rFPGA0_RFSleepUpParameter 0x854
-
-#define rFPGA0_XAB_SwitchControl 0x858 /* RF Channel switch */
-#define rFPGA0_XCD_SwitchControl 0x85c
-
-#define rFPGA0_XA_RFInterfaceOE 0x860 /* RF Channel switch */
-#define rFPGA0_XB_RFInterfaceOE 0x864
-#define rFPGA0_XC_RFInterfaceOE 0x868
-#define rFPGA0_XD_RFInterfaceOE 0x86c
-
-#define rFPGA0_XAB_RFInterfaceSW 0x870 /* RF Interface Software Control */
-#define rFPGA0_XCD_RFInterfaceSW 0x874
-
-#define rFPGA0_XAB_RFParameter 0x878 /* RF Parameter */
-#define rFPGA0_XCD_RFParameter 0x87c
-
-#define rFPGA0_AnalogParameter1 0x880 /* Crystal cap setting RF-R/W protection for parameter4?? */
-#define rFPGA0_AnalogParameter2 0x884
-#define rFPGA0_AnalogParameter3 0x888 /* Useless now */
-#define rFPGA0_AnalogParameter4 0x88c
-
-#define rFPGA0_XA_LSSIReadBack 0x8a0 /* Tranceiver LSSI Readback */
-#define rFPGA0_XB_LSSIReadBack 0x8a4
-#define rFPGA0_XC_LSSIReadBack 0x8a8
-#define rFPGA0_XD_LSSIReadBack 0x8ac
-
-#define rFPGA0_PSDReport 0x8b4 /* Useless now */
-#define rFPGA0_XAB_RFInterfaceRB 0x8e0 /* Useless now RF Interface Readback Value */
-#define rFPGA0_XCD_RFInterfaceRB 0x8e4 /* Useless now */
-
-/* */
-/* 4. Page9(0x900) */
-/* */
-#define rFPGA1_RFMOD 0x900 /* RF mode & OFDM TxSC RF BW Setting?? */
-
-#define rFPGA1_TxBlock 0x904 /* Useless now */
-#define rFPGA1_DebugSelect 0x908 /* Useless now */
-#define rFPGA1_TxInfo 0x90c /* Useless now Status report?? */
-
-/* */
-/* 5. PageA(0xA00) */
-/* */
-/* Set Control channel to upper or lower. These settings are required only for 40MHz */
-#define rCCK0_System 0xa00
-
-#define rCCK0_AFESetting 0xa04 /* Disable init gain now Select RX path by RSSI */
-#define rCCK0_CCA 0xa08 /* Disable init gain now Init gain */
-
-#define rCCK0_RxAGC1 0xa0c /* AGC default value, saturation level Antenna Diversity, RX AGC, LNA Threshold, RX LNA Threshold useless now. Not the same as 90 series */
-#define rCCK0_RxAGC2 0xa10 /* AGC & DAGC */
-
-#define rCCK0_RxHP 0xa14
-
-#define rCCK0_DSPParameter1 0xa18 /* Timing recovery & Channel estimation threshold */
-#define rCCK0_DSPParameter2 0xa1c /* SQ threshold */
-
-#define rCCK0_TxFilter1 0xa20
-#define rCCK0_TxFilter2 0xa24
-#define rCCK0_DebugPort 0xa28 /* debug port and Tx filter3 */
-#define rCCK0_FalseAlarmReport 0xa2c /* 0xa2d useless now 0xa30-a4f channel report */
-#define rCCK0_TRSSIReport 0xa50
-#define rCCK0_RxReport 0xa54 /* 0xa57 */
-#define rCCK0_FACounterLower 0xa5c /* 0xa5b */
-#define rCCK0_FACounterUpper 0xa58 /* 0xa5c */
-
-/* */
-/* 6. PageC(0xC00) */
-/* */
-#define rOFDM0_LSTF 0xc00
-
-#define rOFDM0_TRxPathEnable 0xc04
-#define rOFDM0_TRMuxPar 0xc08
-#define rOFDM0_TRSWIsolation 0xc0c
-
-#define rOFDM0_XARxAFE 0xc10 /* RxIQ DC offset, Rx digital filter, DC notch filter */
-#define rOFDM0_XARxIQImbalance 0xc14 /* RxIQ imblance matrix */
-#define rOFDM0_XBRxAFE 0xc18
-#define rOFDM0_XBRxIQImbalance 0xc1c
-#define rOFDM0_XCRxAFE 0xc20
-#define rOFDM0_XCRxIQImbalance 0xc24
-#define rOFDM0_XDRxAFE 0xc28
-#define rOFDM0_XDRxIQImbalance 0xc2c
-
-#define rOFDM0_RxDetector1 0xc30 /* PD,BW & SBD DM tune init gain */
-#define rOFDM0_RxDetector2 0xc34 /* SBD & Fame Sync. */
-#define rOFDM0_RxDetector3 0xc38 /* Frame Sync. */
-#define rOFDM0_RxDetector4 0xc3c /* PD, SBD, Frame Sync & Short-GI */
-
-#define rOFDM0_RxDSP 0xc40 /* Rx Sync Path */
-#define rOFDM0_CFOandDAGC 0xc44 /* CFO & DAGC */
-#define rOFDM0_CCADropThreshold 0xc48 /* CCA Drop threshold */
-#define rOFDM0_ECCAThreshold 0xc4c /* energy CCA */
-
-#define rOFDM0_XAAGCCore1 0xc50 /* DIG */
-#define rOFDM0_XAAGCCore2 0xc54
-#define rOFDM0_XBAGCCore1 0xc58
-#define rOFDM0_XBAGCCore2 0xc5c
-#define rOFDM0_XCAGCCore1 0xc60
-#define rOFDM0_XCAGCCore2 0xc64
-#define rOFDM0_XDAGCCore1 0xc68
-#define rOFDM0_XDAGCCore2 0xc6c
-
-#define rOFDM0_AGCParameter1 0xc70
-#define rOFDM0_AGCParameter2 0xc74
-#define rOFDM0_AGCRSSITable 0xc78
-#define rOFDM0_HTSTFAGC 0xc7c
-
-#define rOFDM0_XATxIQImbalance 0xc80 /* TX PWR TRACK and DIG */
-#define rOFDM0_XATxAFE 0xc84
-#define rOFDM0_XBTxIQImbalance 0xc88
-#define rOFDM0_XBTxAFE 0xc8c
-#define rOFDM0_XCTxIQImbalance 0xc90
-#define rOFDM0_XCTxAFE 0xc94
-#define rOFDM0_XDTxIQImbalance 0xc98
-#define rOFDM0_XDTxAFE 0xc9c
-#define rOFDM0_RxIQExtAnta 0xca0
-
-#define rOFDM0_RxHPParameter 0xce0
-#define rOFDM0_TxPseudoNoiseWgt 0xce4
-#define rOFDM0_FrameSync 0xcf0
-#define rOFDM0_DFSReport 0xcf4
-#define rOFDM0_TxCoeff1 0xca4
-#define rOFDM0_TxCoeff2 0xca8
-#define rOFDM0_TxCoeff3 0xcac
-#define rOFDM0_TxCoeff4 0xcb0
-#define rOFDM0_TxCoeff5 0xcb4
-#define rOFDM0_TxCoeff6 0xcb8
-
-/* 7. PageD(0xD00) */
-#define rOFDM1_LSTF 0xd00
-#define rOFDM1_TRxPathEnable 0xd04
-
-#define rOFDM1_CFO 0xd08 /* No setting now */
-#define rOFDM1_CSI1 0xd10
-#define rOFDM1_SBD 0xd14
-#define rOFDM1_CSI2 0xd18
-#define rOFDM1_CFOTracking 0xd2c
-#define rOFDM1_TRxMesaure1 0xd34
-#define rOFDM1_IntfDet 0xd3c
-#define rOFDM1_PseudoNoiseStateAB 0xd50
-#define rOFDM1_PseudoNoiseStateCD 0xd54
-#define rOFDM1_RxPseudoNoiseWgt 0xd58
-
-#define rOFDM_PHYCounter1 0xda0 /* cca, parity fail */
-#define rOFDM_PHYCounter2 0xda4 /* rate illegal, crc8 fail */
-#define rOFDM_PHYCounter3 0xda8 /* MCS not support */
-
-#define rOFDM_ShortCFOAB 0xdac /* No setting now */
-#define rOFDM_ShortCFOCD 0xdb0
-#define rOFDM_LongCFOAB 0xdb4
-#define rOFDM_LongCFOCD 0xdb8
-#define rOFDM_TailCFOAB 0xdbc
-#define rOFDM_TailCFOCD 0xdc0
-#define rOFDM_PWMeasure1 0xdc4
-#define rOFDM_PWMeasure2 0xdc8
-#define rOFDM_BWReport 0xdcc
-#define rOFDM_AGCReport 0xdd0
-#define rOFDM_RxSNR 0xdd4
-#define rOFDM_RxEVMCSI 0xdd8
-#define rOFDM_SIGReport 0xddc
-
-/* */
-/* 8. PageE(0xE00) */
-/* */
-#define rTxAGC_Rate18_06 0xe00
-#define rTxAGC_Rate54_24 0xe04
-#define rTxAGC_CCK_Mcs32 0xe08
-#define rTxAGC_Mcs03_Mcs00 0xe10
-#define rTxAGC_Mcs07_Mcs04 0xe14
-#define rTxAGC_Mcs11_Mcs08 0xe18
-#define rTxAGC_Mcs15_Mcs12 0xe1c
-
-/* Analog- control in RX_WAIT_CCA : REG: EE0 [Analog- Power & Control Register] */
-#define rRx_Wait_CCCA 0xe70
-#define rAnapar_Ctrl_BB 0xee0
-
-/* */
-/* 7. RF Register 0x00-0x2E (RF 8256) */
-/* RF-0222D 0x00-3F */
-/* */
-/* Zebra1 */
-#define RTL92SE_FPGA_VERIFY 0
-#define rZebra1_HSSIEnable 0x0 /* Useless now */
-#define rZebra1_TRxEnable1 0x1
-#define rZebra1_TRxEnable2 0x2
-#define rZebra1_AGC 0x4
-#define rZebra1_ChargePump 0x5
-/* if (RTL92SE_FPGA_VERIFY == 1) */
-#define rZebra1_Channel 0x7 /* RF channel switch */
-/* else */
-
-/* endif */
-#define rZebra1_TxGain 0x8 /* Useless now */
-#define rZebra1_TxLPF 0x9
-#define rZebra1_RxLPF 0xb
-#define rZebra1_RxHPFCorner 0xc
-
-/* Zebra4 */
-#define rGlobalCtrl 0 /* Useless now */
-#define rRTL8256_TxLPF 19
-#define rRTL8256_RxLPF 11
-
-/* RTL8258 */
-#define rRTL8258_TxLPF 0x11 /* Useless now */
-#define rRTL8258_RxLPF 0x13
-#define rRTL8258_RSSILPF 0xa
-
-/* */
-/* RL6052 Register definition */
-#define RF_AC 0x00 /* */
-
-#define RF_IQADJ_G1 0x01 /* */
-#define RF_IQADJ_G2 0x02 /* */
-#define RF_POW_TRSW 0x05 /* */
-
-#define RF_GAIN_RX 0x06 /* */
-#define RF_GAIN_TX 0x07 /* */
-
-#define RF_TXM_IDAC 0x08 /* */
-#define RF_BS_IQGEN 0x0F /* */
-
-#define RF_MODE1 0x10 /* */
-#define RF_MODE2 0x11 /* */
-
-#define RF_RX_AGC_HP 0x12 /* */
-#define RF_TX_AGC 0x13 /* */
-#define RF_BIAS 0x14 /* */
-#define RF_IPA 0x15 /* */
-#define RF_TXBIAS 0x16 /* */
-#define RF_POW_ABILITY 0x17 /* */
-#define RF_MODE_AG 0x18 /* */
-#define rRfChannel 0x18 /* RF channel and BW switch */
-#define RF_CHNLBW 0x18 /* RF channel and BW switch */
-#define RF_TOP 0x19 /* */
-
-#define RF_RX_G1 0x1A /* */
-#define RF_RX_G2 0x1B /* */
-
-#define RF_RX_BB2 0x1C /* */
-#define RF_RX_BB1 0x1D /* */
-
-#define RF_RCK1 0x1E /* */
-#define RF_RCK2 0x1F /* */
-
-#define RF_TX_G1 0x20 /* */
-#define RF_TX_G2 0x21 /* */
-#define RF_TX_G3 0x22 /* */
-
-#define RF_TX_BB1 0x23 /* */
-
-#define RF_T_METER 0x24 /* */
-
-#define RF_SYN_G1 0x25 /* RF TX Power control */
-#define RF_SYN_G2 0x26 /* RF TX Power control */
-#define RF_SYN_G3 0x27 /* RF TX Power control */
-#define RF_SYN_G4 0x28 /* RF TX Power control */
-#define RF_SYN_G5 0x29 /* RF TX Power control */
-#define RF_SYN_G6 0x2A /* RF TX Power control */
-#define RF_SYN_G7 0x2B /* RF TX Power control */
-#define RF_SYN_G8 0x2C /* RF TX Power control */
-
-#define RF_RCK_OS 0x30 /* RF TX PA control */
-#define RF_TXPA_G1 0x31 /* RF TX PA control */
-#define RF_TXPA_G2 0x32 /* RF TX PA control */
-#define RF_TXPA_G3 0x33 /* RF TX PA control */
-
-/* */
-/* Bit Mask */
-/* */
-/* 1. Page1(0x100) */
-#define bBBResetB 0x100 /* Useless now? */
-#define bGlobalResetB 0x200
-#define bOFDMTxStart 0x4
-#define bCCKTxStart 0x8
-#define bCRC32Debug 0x100
-#define bPMACLoopback 0x10
-#define bTxLSIG 0xffffff
-#define bOFDMTxRate 0xf
-#define bOFDMTxReserved 0x10
-#define bOFDMTxLength 0x1ffe0
-#define bOFDMTxParity 0x20000
-#define bTxHTSIG1 0xffffff
-#define bTxHTMCSRate 0x7f
-#define bTxHTBW 0x80
-#define bTxHTLength 0xffff00
-#define bTxHTSIG2 0xffffff
-#define bTxHTSmoothing 0x1
-#define bTxHTSounding 0x2
-#define bTxHTReserved 0x4
-#define bTxHTAggreation 0x8
-#define bTxHTSTBC 0x30
-#define bTxHTAdvanceCoding 0x40
-#define bTxHTShortGI 0x80
-#define bTxHTNumberHT_LTF 0x300
-#define bTxHTCRC8 0x3fc00
-#define bCounterReset 0x10000
-#define bNumOfOFDMTx 0xffff
-#define bNumOfCCKTx 0xffff0000
-#define bTxIdleInterval 0xffff
-#define bOFDMService 0xffff0000
-#define bTxMACHeader 0xffffffff
-#define bTxDataInit 0xff
-#define bTxHTMode 0x100
-#define bTxDataType 0x30000
-#define bTxRandomSeed 0xffffffff
-#define bCCKTxPreamble 0x1
-#define bCCKTxSFD 0xffff0000
-#define bCCKTxSIG 0xff
-#define bCCKTxService 0xff00
-#define bCCKLengthExt 0x8000
-#define bCCKTxLength 0xffff0000
-#define bCCKTxCRC16 0xffff
-#define bCCKTxStatus 0x1
-#define bOFDMTxStatus 0x2
-
-#define IS_BB_REG_OFFSET_92S(_Offset) ((_Offset >= 0x800) && (_Offset <= 0xfff))
-
-/* 2. Page8(0x800) */
-#define bRFMOD 0x1 /* Reg 0x800 rFPGA0_RFMOD */
-#define bJapanMode 0x2
-#define bCCKTxSC 0x30
-#define bCCKEn 0x1000000
-#define bOFDMEn 0x2000000
-
-#define bOFDMRxADCPhase 0x10000 /* Useless now */
-#define bOFDMTxDACPhase 0x40000
-#define bXATxAGC 0x3f
-
-#define bXBTxAGC 0xf00 /* Reg 80c rFPGA0_TxGainStage */
-#define bXCTxAGC 0xf000
-#define bXDTxAGC 0xf0000
-
-#define bPAStart 0xf0000000 /* Useless now */
-#define bTRStart 0x00f00000
-#define bRFStart 0x0000f000
-#define bBBStart 0x000000f0
-#define bBBCCKStart 0x0000000f
-#define bPAEnd 0xf /* Reg0x814 */
-#define bTREnd 0x0f000000
-#define bRFEnd 0x000f0000
-#define bCCAMask 0x000000f0 /* T2R */
-#define bR2RCCAMask 0x00000f00
-#define bHSSI_R2TDelay 0xf8000000
-#define bHSSI_T2RDelay 0xf80000
-#define bContTxHSSI 0x400 /* chane gain at continue Tx */
-#define bIGFromCCK 0x200
-#define bAGCAddress 0x3f
-#define bRxHPTx 0x7000
-#define bRxHPT2R 0x38000
-#define bRxHPCCKIni 0xc0000
-#define bAGCTxCode 0xc00000
-#define bAGCRxCode 0x300000
-
-#define b3WireDataLength 0x800 /* Reg 0x820~84f rFPGA0_XA_HSSIParameter1 */
-#define b3WireAddressLength 0x400
-
-#define b3WireRFPowerDown 0x1 /* Useless now */
-/* define bHWSISelect 0x8 */
-#define b5GPAPEPolarity 0x40000000
-#define b2GPAPEPolarity 0x80000000
-#define bRFSW_TxDefaultAnt 0x3
-#define bRFSW_TxOptionAnt 0x30
-#define bRFSW_RxDefaultAnt 0x300
-#define bRFSW_RxOptionAnt 0x3000
-#define bRFSI_3WireData 0x1
-#define bRFSI_3WireClock 0x2
-#define bRFSI_3WireLoad 0x4
-#define bRFSI_3WireRW 0x8
-#define bRFSI_3Wire 0xf
-
-#define bRFSI_RFENV 0x10 /* Reg 0x870 rFPGA0_XAB_RFInterfaceSW */
-
-#define bRFSI_TRSW 0x20 /* Useless now */
-#define bRFSI_TRSWB 0x40
-#define bRFSI_ANTSW 0x100
-#define bRFSI_ANTSWB 0x200
-#define bRFSI_PAPE 0x400
-#define bRFSI_PAPE5G 0x800
-#define bBandSelect 0x1
-#define bHTSIG2_GI 0x80
-#define bHTSIG2_Smoothing 0x01
-#define bHTSIG2_Sounding 0x02
-#define bHTSIG2_Aggreaton 0x08
-#define bHTSIG2_STBC 0x30
-#define bHTSIG2_AdvCoding 0x40
-#define bHTSIG2_NumOfHTLTF 0x300
-#define bHTSIG2_CRC8 0x3fc
-#define bHTSIG1_MCS 0x7f
-#define bHTSIG1_BandWidth 0x80
-#define bHTSIG1_HTLength 0xffff
-#define bLSIG_Rate 0xf
-#define bLSIG_Reserved 0x10
-#define bLSIG_Length 0x1fffe
-#define bLSIG_Parity 0x20
-#define bCCKRxPhase 0x4
-#if (RTL92SE_FPGA_VERIFY == 1)
-#define bLSSIReadAddress 0x3f000000 /* LSSI "Read" Address
- Reg 0x824 rFPGA0_XA_HSSIParameter2 */
-#else
-#define bLSSIReadAddress 0x7f800000 /* T65 RF */
-#endif
-#define bLSSIReadEdge 0x80000000 /* LSSI "Read" edge signal */
-#if (RTL92SE_FPGA_VERIFY == 1)
-#define bLSSIReadBackData 0xfff /* Reg 0x8a0
- rFPGA0_XA_LSSIReadBack */
-#else
-#define bLSSIReadBackData 0xfffff /* T65 RF */
-#endif
-#define bLSSIReadOKFlag 0x1000 /* Useless now */
-#define bCCKSampleRate 0x8 /* 0: 44MHz, 1:88MHz */
-#define bRegulator0Standby 0x1
-#define bRegulatorPLLStandby 0x2
-#define bRegulator1Standby 0x4
-#define bPLLPowerUp 0x8
-#define bDPLLPowerUp 0x10
-#define bDA10PowerUp 0x20
-#define bAD7PowerUp 0x200
-#define bDA6PowerUp 0x2000
-#define bXtalPowerUp 0x4000
-#define b40MDClkPowerUP 0x8000
-#define bDA6DebugMode 0x20000
-#define bDA6Swing 0x380000
-
-#define bADClkPhase 0x4000000 /* Reg 0x880
- rFPGA0_AnalogParameter1 20/40 CCK support switch 40/80 BB MHZ */
-
-#define b80MClkDelay 0x18000000 /* Useless */
-#define bAFEWatchDogEnable 0x20000000
-
-#define bXtalCap01 0xc0000000 /* Reg 0x884
- rFPGA0_AnalogParameter2 Crystal cap */
-#define bXtalCap23 0x3
-#define bXtalCap92x 0x0f000000
-#define bXtalCap 0x0f000000
-
-#define bIntDifClkEnable 0x400 /* Useless */
-#define bExtSigClkEnable 0x800
-#define bBandgapMbiasPowerUp 0x10000
-#define bAD11SHGain 0xc0000
-#define bAD11InputRange 0x700000
-#define bAD11OPCurrent 0x3800000
-#define bIPathLoopback 0x4000000
-#define bQPathLoopback 0x8000000
-#define bAFELoopback 0x10000000
-#define bDA10Swing 0x7e0
-#define bDA10Reverse 0x800
-#define bDAClkSource 0x1000
-#define bAD7InputRange 0x6000
-#define bAD7Gain 0x38000
-#define bAD7OutputCMMode 0x40000
-#define bAD7InputCMMode 0x380000
-#define bAD7Current 0xc00000
-#define bRegulatorAdjust 0x7000000
-#define bAD11PowerUpAtTx 0x1
-#define bDA10PSAtTx 0x10
-#define bAD11PowerUpAtRx 0x100
-#define bDA10PSAtRx 0x1000
-#define bCCKRxAGCFormat 0x200
-#define bPSDFFTSamplepPoint 0xc000
-#define bPSDAverageNum 0x3000
-#define bIQPathControl 0xc00
-#define bPSDFreq 0x3ff
-#define bPSDAntennaPath 0x30
-#define bPSDIQSwitch 0x40
-#define bPSDRxTrigger 0x400000
-#define bPSDTxTrigger 0x80000000
-#define bPSDSineToneScale 0x7f000000
-#define bPSDReport 0xffff
-
-/* 3. Page9(0x900) */
-#define bOFDMTxSC 0x30000000 /* Useless */
-#define bCCKTxOn 0x1
-#define bOFDMTxOn 0x2
-#define bDebugPage 0xfff /* reset debug page and HWord,
- * LWord */
-#define bDebugItem 0xff /* reset debug page and LWord */
-#define bAntL 0x10
-#define bAntNonHT 0x100
-#define bAntHT1 0x1000
-#define bAntHT2 0x10000
-#define bAntHT1S1 0x100000
-#define bAntNonHTS1 0x1000000
-
-/* 4. PageA(0xA00) */
-#define bCCKBBMode 0x3 /* Useless */
-#define bCCKTxPowerSaving 0x80
-#define bCCKRxPowerSaving 0x40
-
-#define bCCKSideBand 0x10 /* Reg 0xa00 rCCK0 20/40 sw */
-
-#define bCCKScramble 0x8 /* Useless */
-#define bCCKAntDiversity 0x8000
-#define bCCKCarrierRecovery 0x4000
-#define bCCKTxRate 0x3000
-#define bCCKDCCancel 0x0800
-#define bCCKISICancel 0x0400
-#define bCCKMatchFilter 0x0200
-#define bCCKEqualizer 0x0100
-#define bCCKPreambleDetect 0x800000
-#define bCCKFastFalseCCA 0x400000
-#define bCCKChEstStart 0x300000
-#define bCCKCCACount 0x080000
-#define bCCKcs_lim 0x070000
-#define bCCKBistMode 0x80000000
-#define bCCKCCAMask 0x40000000
-#define bCCKTxDACPhase 0x4
-#define bCCKRxADCPhase 0x20000000 /* r_rx_clk */
-#define bCCKr_cp_mode0 0x0100
-#define bCCKTxDCOffset 0xf0
-#define bCCKRxDCOffset 0xf
-#define bCCKCCAMode 0xc000
-#define bCCKFalseCS_lim 0x3f00
-#define bCCKCS_ratio 0xc00000
-#define bCCKCorgBit_sel 0x300000
-#define bCCKPD_lim 0x0f0000
-#define bCCKNewCCA 0x80000000
-#define bCCKRxHPofIG 0x8000
-#define bCCKRxIG 0x7f00
-#define bCCKLNAPolarity 0x800000
-#define bCCKRx1stGain 0x7f0000
-#define bCCKRFExtend 0x20000000 /* CCK Rx init gain polar */
-#define bCCKRxAGCSatLevel 0x1f000000
-#define bCCKRxAGCSatCount 0xe0
-#define bCCKRxRFSettle 0x1f /* AGCsamp_dly */
-#define bCCKFixedRxAGC 0x8000
-#define bCCKAntennaPolarity 0x2000
-#define bCCKTxFilterType 0x0c00
-#define bCCKRxAGCReportType 0x0300
-#define bCCKRxDAGCEn 0x80000000
-#define bCCKRxDAGCPeriod 0x20000000
-#define bCCKRxDAGCSatLevel 0x1f000000
-#define bCCKTimingRecovery 0x800000
-#define bCCKTxC0 0x3f0000
-#define bCCKTxC1 0x3f000000
-#define bCCKTxC2 0x3f
-#define bCCKTxC3 0x3f00
-#define bCCKTxC4 0x3f0000
-#define bCCKTxC5 0x3f000000
-#define bCCKTxC6 0x3f
-#define bCCKTxC7 0x3f00
-#define bCCKDebugPort 0xff0000
-#define bCCKDACDebug 0x0f000000
-#define bCCKFalseAlarmEnable 0x8000
-#define bCCKFalseAlarmRead 0x4000
-#define bCCKTRSSI 0x7f
-#define bCCKRxAGCReport 0xfe
-#define bCCKRxReport_AntSel 0x80000000
-#define bCCKRxReport_MFOff 0x40000000
-#define bCCKRxRxReport_SQLoss 0x20000000
-#define bCCKRxReport_Pktloss 0x10000000
-#define bCCKRxReport_Lockedbit 0x08000000
-#define bCCKRxReport_RateError 0x04000000
-#define bCCKRxReport_RxRate 0x03000000
-#define bCCKRxFACounterLower 0xff
-#define bCCKRxFACounterUpper 0xff000000
-#define bCCKRxHPAGCStart 0xe000
-#define bCCKRxHPAGCFinal 0x1c00
-#define bCCKRxFalseAlarmEnable 0x8000
-#define bCCKFACounterFreeze 0x4000
-#define bCCKTxPathSel 0x10000000
-#define bCCKDefaultRxPath 0xc000000
-#define bCCKOptionRxPath 0x3000000
-
-/* 5. PageC(0xC00) */
-#define bNumOfSTF 0x3 /* Useless */
-#define bShift_L 0xc0
-#define bGI_TH 0xc
-#define bRxPathA 0x1
-#define bRxPathB 0x2
-#define bRxPathC 0x4
-#define bRxPathD 0x8
-#define bTxPathA 0x1
-#define bTxPathB 0x2
-#define bTxPathC 0x4
-#define bTxPathD 0x8
-#define bTRSSIFreq 0x200
-#define bADCBackoff 0x3000
-#define bDFIRBackoff 0xc000
-#define bTRSSILatchPhase 0x10000
-#define bRxIDCOffset 0xff
-#define bRxQDCOffset 0xff00
-#define bRxDFIRMode 0x1800000
-#define bRxDCNFType 0xe000000
-#define bRXIQImb_A 0x3ff
-#define bRXIQImb_B 0xfc00
-#define bRXIQImb_C 0x3f0000
-#define bRXIQImb_D 0xffc00000
-#define bDC_dc_Notch 0x60000
-#define bRxNBINotch 0x1f000000
-#define bPD_TH 0xf
-#define bPD_TH_Opt2 0xc000
-#define bPWED_TH 0x700
-#define bIfMF_Win_L 0x800
-#define bPD_Option 0x1000
-#define bMF_Win_L 0xe000
-#define bBW_Search_L 0x30000
-#define bwin_enh_L 0xc0000
-#define bBW_TH 0x700000
-#define bED_TH2 0x3800000
-#define bBW_option 0x4000000
-#define bRatio_TH 0x18000000
-#define bWindow_L 0xe0000000
-#define bSBD_Option 0x1
-#define bFrame_TH 0x1c
-#define bFS_Option 0x60
-#define bDC_Slope_check 0x80
-#define bFGuard_Counter_DC_L 0xe00
-#define bFrame_Weight_Short 0x7000
-#define bSub_Tune 0xe00000
-#define bFrame_DC_Length 0xe000000
-#define bSBD_start_offset 0x30000000
-#define bFrame_TH_2 0x7
-#define bFrame_GI2_TH 0x38
-#define bGI2_Sync_en 0x40
-#define bSarch_Short_Early 0x300
-#define bSarch_Short_Late 0xc00
-#define bSarch_GI2_Late 0x70000
-#define bCFOAntSum 0x1
-#define bCFOAcc 0x2
-#define bCFOStartOffset 0xc
-#define bCFOLookBack 0x70
-#define bCFOSumWeight 0x80
-#define bDAGCEnable 0x10000
-#define bTXIQImb_A 0x3ff
-#define bTXIQImb_B 0xfc00
-#define bTXIQImb_C 0x3f0000
-#define bTXIQImb_D 0xffc00000
-#define bTxIDCOffset 0xff
-#define bTxQDCOffset 0xff00
-#define bTxDFIRMode 0x10000
-#define bTxPesudoNoiseOn 0x4000000
-#define bTxPesudoNoise_A 0xff
-#define bTxPesudoNoise_B 0xff00
-#define bTxPesudoNoise_C 0xff0000
-#define bTxPesudoNoise_D 0xff000000
-#define bCCADropOption 0x20000
-#define bCCADropThres 0xfff00000
-#define bEDCCA_H 0xf
-#define bEDCCA_L 0xf0
-#define bLambda_ED 0x300
-#define bRxInitialGain 0x7f
-#define bRxAntDivEn 0x80
-#define bRxAGCAddressForLNA 0x7f00
-#define bRxHighPowerFlow 0x8000
-#define bRxAGCFreezeThres 0xc0000
-#define bRxFreezeStep_AGC1 0x300000
-#define bRxFreezeStep_AGC2 0xc00000
-#define bRxFreezeStep_AGC3 0x3000000
-#define bRxFreezeStep_AGC0 0xc000000
-#define bRxRssi_Cmp_En 0x10000000
-#define bRxQuickAGCEn 0x20000000
-#define bRxAGCFreezeThresMode 0x40000000
-#define bRxOverFlowCheckType 0x80000000
-#define bRxAGCShift 0x7f
-#define bTRSW_Tri_Only 0x80
-#define bPowerThres 0x300
-#define bRxAGCEn 0x1
-#define bRxAGCTogetherEn 0x2
-#define bRxAGCMin 0x4
-#define bRxHP_Ini 0x7
-#define bRxHP_TRLNA 0x70
-#define bRxHP_RSSI 0x700
-#define bRxHP_BBP1 0x7000
-#define bRxHP_BBP2 0x70000
-#define bRxHP_BBP3 0x700000
-#define bRSSI_H 0x7f0000 /* thresh for hi power */
-#define bRSSI_Gen 0x7f000000 /* thresh for ant div */
-#define bRxSettle_TRSW 0x7
-#define bRxSettle_LNA 0x38
-#define bRxSettle_RSSI 0x1c0
-#define bRxSettle_BBP 0xe00
-#define bRxSettle_RxHP 0x7000
-#define bRxSettle_AntSW_RSSI 0x38000
-#define bRxSettle_AntSW 0xc0000
-#define bRxProcessTime_DAGC 0x300000
-#define bRxSettle_HSSI 0x400000
-#define bRxProcessTime_BBPPW 0x800000
-#define bRxAntennaPowerShift 0x3000000
-#define bRSSITableSelect 0xc000000
-#define bRxHP_Final 0x7000000
-#define bRxHTSettle_BBP 0x7
-#define bRxHTSettle_HSSI 0x8
-#define bRxHTSettle_RxHP 0x70
-#define bRxHTSettle_BBPPW 0x80
-#define bRxHTSettle_Idle 0x300
-#define bRxHTSettle_Reserved 0x1c00
-#define bRxHTRxHPEn 0x8000
-#define bRxHTAGCFreezeThres 0x30000
-#define bRxHTAGCTogetherEn 0x40000
-#define bRxHTAGCMin 0x80000
-#define bRxHTAGCEn 0x100000
-#define bRxHTDAGCEn 0x200000
-#define bRxHTRxHP_BBP 0x1c00000
-#define bRxHTRxHP_Final 0xe0000000
-#define bRxPWRatioTH 0x3
-#define bRxPWRatioEn 0x4
-#define bRxMFHold 0x3800
-#define bRxPD_Delay_TH1 0x38
-#define bRxPD_Delay_TH2 0x1c0
-#define bRxPD_DC_COUNT_MAX 0x600
-/* define bRxMF_Hold 0x3800 */
-#define bRxPD_Delay_TH 0x8000
-#define bRxProcess_Delay 0xf0000
-#define bRxSearchrange_GI2_Early 0x700000
-#define bRxFrame_Guard_Counter_L 0x3800000
-#define bRxSGI_Guard_L 0xc000000
-#define bRxSGI_Search_L 0x30000000
-#define bRxSGI_TH 0xc0000000
-#define bDFSCnt0 0xff
-#define bDFSCnt1 0xff00
-#define bDFSFlag 0xf0000
-#define bMFWeightSum 0x300000
-#define bMinIdxTH 0x7f000000
-#define bDAFormat 0x40000
-#define bTxChEmuEnable 0x01000000
-#define bTRSWIsolation_A 0x7f
-#define bTRSWIsolation_B 0x7f00
-#define bTRSWIsolation_C 0x7f0000
-#define bTRSWIsolation_D 0x7f000000
-#define bExtLNAGain 0x7c00
-
-/* 6. PageE(0xE00) */
-#define bSTBCEn 0x4 /* Useless */
-#define bAntennaMapping 0x10
-#define bNss 0x20
-#define bCFOAntSumD 0x200
-#define bPHYCounterReset 0x8000000
-#define bCFOReportGet 0x4000000
-#define bOFDMContinueTx 0x10000000
-#define bOFDMSingleCarrier 0x20000000
-#define bOFDMSingleTone 0x40000000
-/* define bRxPath1 0x01 */
-/* define bRxPath2 0x02 */
-/* define bRxPath3 0x04 */
-/* define bRxPath4 0x08 */
-/* define bTxPath1 0x10 */
-/* define bTxPath2 0x20 */
-#define bHTDetect 0x100
-#define bCFOEn 0x10000
-#define bCFOValue 0xfff00000
-#define bSigTone_Re 0x3f
-#define bSigTone_Im 0x7f00
-#define bCounter_CCA 0xffff
-#define bCounter_ParityFail 0xffff0000
-#define bCounter_RateIllegal 0xffff
-#define bCounter_CRC8Fail 0xffff0000
-#define bCounter_MCSNoSupport 0xffff
-#define bCounter_FastSync 0xffff
-#define bShortCFO 0xfff
-#define bShortCFOTLength 12 /* total */
-#define bShortCFOFLength 11 /* fraction */
-#define bLongCFO 0x7ff
-#define bLongCFOTLength 11
-#define bLongCFOFLength 11
-#define bTailCFO 0x1fff
-#define bTailCFOTLength 13
-#define bTailCFOFLength 12
-#define bmax_en_pwdB 0xffff
-#define bCC_power_dB 0xffff0000
-#define bnoise_pwdB 0xffff
-#define bPowerMeasTLength 10
-#define bPowerMeasFLength 3
-#define bRx_HT_BW 0x1
-#define bRxSC 0x6
-#define bRx_HT 0x8
-#define bNB_intf_det_on 0x1
-#define bIntf_win_len_cfg 0x30
-#define bNB_Intf_TH_cfg 0x1c0
-#define bRFGain 0x3f
-#define bTableSel 0x40
-#define bTRSW 0x80
-#define bRxSNR_A 0xff
-#define bRxSNR_B 0xff00
-#define bRxSNR_C 0xff0000
-#define bRxSNR_D 0xff000000
-#define bSNREVMTLength 8
-#define bSNREVMFLength 1
-#define bCSI1st 0xff
-#define bCSI2nd 0xff00
-#define bRxEVM1st 0xff0000
-#define bRxEVM2nd 0xff000000
-#define bSIGEVM 0xff
-#define bPWDB 0xff00
-#define bSGIEN 0x10000
-
-#define bSFactorQAM1 0xf /* Useless */
-#define bSFactorQAM2 0xf0
-#define bSFactorQAM3 0xf00
-#define bSFactorQAM4 0xf000
-#define bSFactorQAM5 0xf0000
-#define bSFactorQAM6 0xf0000
-#define bSFactorQAM7 0xf00000
-#define bSFactorQAM8 0xf000000
-#define bSFactorQAM9 0xf0000000
-#define bCSIScheme 0x100000
-
-#define bNoiseLvlTopSet 0x3 /* Useless */
-#define bChSmooth 0x4
-#define bChSmoothCfg1 0x38
-#define bChSmoothCfg2 0x1c0
-#define bChSmoothCfg3 0xe00
-#define bChSmoothCfg4 0x7000
-#define bMRCMode 0x800000
-#define bTHEVMCfg 0x7000000
-
-#define bLoopFitType 0x1 /* Useless */
-#define bUpdCFO 0x40
-#define bUpdCFOOffData 0x80
-#define bAdvUpdCFO 0x100
-#define bAdvTimeCtrl 0x800
-#define bUpdClko 0x1000
-#define bFC 0x6000
-#define bTrackingMode 0x8000
-#define bPhCmpEnable 0x10000
-#define bUpdClkoLTF 0x20000
-#define bComChCFO 0x40000
-#define bCSIEstiMode 0x80000
-#define bAdvUpdEqz 0x100000
-#define bUChCfg 0x7000000
-#define bUpdEqz 0x8000000
-
-#define bTxAGCRate18_06 0x7f7f7f7f /* Useless */
-#define bTxAGCRate54_24 0x7f7f7f7f
-#define bTxAGCRateMCS32 0x7f
-#define bTxAGCRateCCK 0x7f00
-#define bTxAGCRateMCS3_MCS0 0x7f7f7f7f
-#define bTxAGCRateMCS7_MCS4 0x7f7f7f7f
-#define bTxAGCRateMCS11_MCS8 0x7f7f7f7f
-#define bTxAGCRateMCS15_MCS12 0x7f7f7f7f
-
-/* Rx Pseduo noise */
-#define bRxPesudoNoiseOn 0x20000000 /* Useless */
-#define bRxPesudoNoise_A 0xff
-#define bRxPesudoNoise_B 0xff00
-#define bRxPesudoNoise_C 0xff0000
-#define bRxPesudoNoise_D 0xff000000
-#define bPesudoNoiseState_A 0xffff
-#define bPesudoNoiseState_B 0xffff0000
-#define bPesudoNoiseState_C 0xffff
-#define bPesudoNoiseState_D 0xffff0000
-
-/* 7. RF Register */
-/* Zebra1 */
-#define bZebra1_HSSIEnable 0x8 /* Useless */
-#define bZebra1_TRxControl 0xc00
-#define bZebra1_TRxGainSetting 0x07f
-#define bZebra1_RxCorner 0xc00
-#define bZebra1_TxChargePump 0x38
-#define bZebra1_RxChargePump 0x7
-#define bZebra1_ChannelNum 0xf80
-#define bZebra1_TxLPFBW 0x400
-#define bZebra1_RxLPFBW 0x600
-
-/* Zebra4 */
-#define bRTL8256RegModeCtrl1 0x100 /* Useless */
-#define bRTL8256RegModeCtrl0 0x40
-#define bRTL8256_TxLPFBW 0x18
-#define bRTL8256_RxLPFBW 0x600
-
-/* RTL8258 */
-#define bRTL8258_TxLPFBW 0xc /* Useless */
-#define bRTL8258_RxLPFBW 0xc00
-#define bRTL8258_RSSILPFBW 0xc0
-
-/* */
-/* Other Definition */
-/* */
-
-/* byte endable for sb_write */
-#define bByte0 0x1 /* Useless */
-#define bByte1 0x2
-#define bByte2 0x4
-#define bByte3 0x8
-#define bWord0 0x3
-#define bWord1 0xc
-#define bDWord 0xf
-
-/* for PutRegsetting & GetRegSetting BitMask */
-#define bMaskByte0 0xff /* Reg 0xc50 rOFDM0_XAAGCCore~0xC6f */
-#define bMaskByte1 0xff00
-#define bMaskByte2 0xff0000
-#define bMaskByte3 0xff000000
-#define bMaskHWord 0xffff0000
-#define bMaskLWord 0x0000ffff
-#define bMaskDWord 0xffffffff
-#define bMaskH4Bits 0xf0000000
-#define bMaskOFDM_D 0xffc00000
-#define bMaskCCK 0x3f3f3f3f
-#define bMask12Bits 0xfff
-
-/* for PutRFRegsetting & GetRFRegSetting BitMask */
-#if (RTL92SE_FPGA_VERIFY == 1)
-#define bRFRegOffsetMask 0xfff
-#else
-#define bRFRegOffsetMask 0xfffff
-#endif
-#define bEnable 0x1 /* Useless */
-#define bDisabl 0x0
-
-#define LeftAntenna 0x0 /* Useless */
-#define RightAntenna 0x1
-
-#define tCheckTxStatus 500 /* 500ms Useless */
-#define tUpdateRxCounter 100 /* 100ms */
-
-#define rateCCK 0 /* Useless */
-#define rateOFDM 1
-#define rateHT 2
-
-/* define Register-End */
-#define bPMAC_End 0x1ff /* Useless */
-#define bFPGAPHY0_End 0x8ff
-#define bFPGAPHY1_End 0x9ff
-#define bCCKPHY0_End 0xaff
-#define bOFDMPHY0_End 0xcff
-#define bOFDMPHY1_End 0xdff
-
-/* define max debug item in each debug page */
-/* define bMaxItem_FPGA_PHY0 0x9 */
-/* define bMaxItem_FPGA_PHY1 0x3 */
-/* define bMaxItem_PHY_11B 0x16 */
-/* define bMaxItem_OFDM_PHY0 0x29 */
-/* define bMaxItem_OFDM_PHY1 0x0 */
-
-#define bPMACControl 0x0 /* Useless */
-#define bWMACControl 0x1
-#define bWNICControl 0x2
-
-#define RCR_AAP BIT(0) /* accept all physical address */
-#define RCR_APM BIT(1) /* accept physical match */
-#define RCR_AM BIT(2) /* accept multicast */
-#define RCR_AB BIT(3) /* accept broadcast */
-#define RCR_ACRC32 BIT(5) /* accept error packet */
-#define RCR_9356SEL BIT(6)
-#define RCR_AICV BIT(12) /* Accept ICV error packet */
-#define RCR_RXFTH0 (BIT(13)|BIT(14)|BIT(15)) /* Rx FIFO threshold */
-#define RCR_ADF BIT(18) /* Accept Data(frame type) frame */
-#define RCR_ACF BIT(19) /* Accept control frame */
-#define RCR_AMF BIT(20) /* Accept management frame */
-#define RCR_ADD3 BIT(21)
-#define RCR_APWRMGT BIT(22) /* Accept power management packet */
-#define RCR_CBSSID BIT(23) /* Accept BSSID match packet */
-#define RCR_ENMARP BIT(28) /* enable mac auto reset phy */
-#define RCR_EnCS1 BIT(29) /* enable carrier sense method 1 */
-#define RCR_EnCS2 BIT(30) /* enable carrier sense method 2 */
-#define RCR_OnlyErlPkt BIT(31) /* Rx Early mode is performed for
- * packet size greater than 1536 */
-
-/*--------------------------Define Parameters-------------------------------*/
-
-#endif /* __INC_HAL8192SPHYREG_H */
diff --git a/drivers/staging/r8188eu/include/rtw_p2p.h b/drivers/staging/r8188eu/include/rtw_p2p.h
index 92b9bfe3ea0b..b91322a1fe10 100644
--- a/drivers/staging/r8188eu/include/rtw_p2p.h
+++ b/drivers/staging/r8188eu/include/rtw_p2p.h
@@ -13,7 +13,6 @@ u32 build_prov_disc_request_p2p_ie(struct wifidirect_info *pwdinfo,
u8 *pdev_raddr);
u32 build_assoc_resp_p2p_ie(struct wifidirect_info *pwdinfo,
u8 *pbuf, u8 status_code);
-u32 build_deauth_p2p_ie(struct wifidirect_info *pwdinfo, u8 *pbuf);
u32 process_probe_req_p2p_ie(struct wifidirect_info *pwdinfo,
u8 *pframe, uint len);
u32 process_assoc_req_p2p_ie(struct wifidirect_info *pwdinfo,
diff --git a/drivers/staging/r8188eu/include/rtw_pwrctrl.h b/drivers/staging/r8188eu/include/rtw_pwrctrl.h
index 543f928e8089..b19ef796ab54 100644
--- a/drivers/staging/r8188eu/include/rtw_pwrctrl.h
+++ b/drivers/staging/r8188eu/include/rtw_pwrctrl.h
@@ -7,18 +7,6 @@
#include "osdep_service.h"
#include "drv_types.h"
-#define FW_PWR0 0
-#define FW_PWR1 1
-#define FW_PWR2 2
-#define FW_PWR3 3
-#define HW_PWR0 7
-#define HW_PWR1 6
-#define HW_PWR2 2
-#define HW_PWR3 0
-#define HW_PWR4 8
-
-#define FW_PWRMSK 0x7
-
#define XMIT_ALIVE BIT(0)
#define RECV_ALIVE BIT(1)
#define CMD_ALIVE BIT(2)
@@ -39,73 +27,8 @@ enum power_mgnt {
PS_MODE_NUM
};
-/*
- BIT[2:0] = HW state
- BIT[3] = Protocol PS state, 0: register active state,
- 1: register sleep state
- BIT[4] = sub-state
-*/
-
-#define PS_DPS BIT(0)
-#define PS_LCLK (PS_DPS)
-#define PS_RF_OFF BIT(1)
-#define PS_ALL_ON BIT(2)
-#define PS_ST_ACTIVE BIT(3)
-
-#define PS_ISR_ENABLE BIT(4)
-#define PS_IMR_ENABLE BIT(5)
-#define PS_ACK BIT(6)
-#define PS_TOGGLE BIT(7)
-
-#define PS_STATE_MASK (0x0F)
-#define PS_STATE_HW_MASK (0x07)
-#define PS_SEQ_MASK (0xc0)
-
-#define PS_STATE(x) (PS_STATE_MASK & (x))
-#define PS_STATE_HW(x) (PS_STATE_HW_MASK & (x))
-#define PS_SEQ(x) (PS_SEQ_MASK & (x))
-
-#define PS_STATE_S0 (PS_DPS)
-#define PS_STATE_S1 (PS_LCLK)
-#define PS_STATE_S2 (PS_RF_OFF)
-#define PS_STATE_S3 (PS_ALL_ON)
-#define PS_STATE_S4 ((PS_ST_ACTIVE) | (PS_ALL_ON))
-
-#define PS_IS_RF_ON(x) ((x) & (PS_ALL_ON))
-#define PS_IS_ACTIVE(x) ((x) & (PS_ST_ACTIVE))
-#define CLR_PS_STATE(x) ((x) = ((x) & (0xF0)))
-
-struct reportpwrstate_parm {
- unsigned char mode;
- unsigned char state; /* the CPWM value */
- unsigned short rsvd;
-};
-
-static inline void _init_pwrlock(struct semaphore *plock)
-{
- sema_init(plock, 1);
-}
-
-static inline void _free_pwrlock(struct semaphore *plock)
-{
-}
-
-static inline void _enter_pwrlock(struct semaphore *plock)
-{
- _rtw_down_sema(plock);
-}
-
-static inline void _exit_pwrlock(struct semaphore *plock)
-{
- up(plock);
-}
-
#define LPS_DELAY_TIME 1*HZ /* 1 sec */
-#define EXE_PWR_NONE 0x01
-#define EXE_PWR_IPS 0x02
-#define EXE_PWR_LPS 0x04
-
/* RF state. */
enum rt_rf_power_state {
rf_on, /* RF is on after RFSleep or RFOff */
@@ -115,34 +38,6 @@ enum rt_rf_power_state {
rf_max
};
-/* RF Off Level for IPS or HW/SW radio off */
-#define RT_RF_OFF_LEVL_ASPM BIT(0) /* PCI ASPM */
-#define RT_RF_OFF_LEVL_CLK_REQ BIT(1) /* PCI clock request */
-#define RT_RF_OFF_LEVL_PCI_D3 BIT(2) /* PCI D3 mode */
-#define RT_RF_OFF_LEVL_HALT_NIC BIT(3) /* NIC halt, re-init hw param*/
-#define RT_RF_OFF_LEVL_FREE_FW BIT(4) /* FW free, re-download the FW*/
-#define RT_RF_OFF_LEVL_FW_32K BIT(5) /* FW in 32k */
-#define RT_RF_PS_LEVEL_ALWAYS_ASPM BIT(6) /* Always enable ASPM and Clock
- * Req in initialization. */
-#define RT_RF_LPS_DISALBE_2R BIT(30) /* When LPS is on, disable 2R
- * if no packet is RX or TX. */
-#define RT_RF_LPS_LEVEL_ASPM BIT(31) /* LPS with ASPM */
-
-#define RT_IN_PS_LEVEL(ppsc, _PS_FLAG) \
- ((ppsc->cur_ps_level & _PS_FLAG) ? true : false)
-#define RT_CLEAR_PS_LEVEL(ppsc, _PS_FLAG) \
- (ppsc->cur_ps_level &= (~(_PS_FLAG)))
-#define RT_SET_PS_LEVEL(ppsc, _PS_FLAG) \
- (ppsc->cur_ps_level |= _PS_FLAG)
-
-enum _PS_BBRegBackup_ {
- PSBBREG_RF0 = 0,
- PSBBREG_RF1,
- PSBBREG_RF2,
- PSBBREG_AFE0,
- PSBBREG_TOTALCNT
-};
-
enum { /* for ips_mode */
IPS_NONE = 0,
IPS_NORMAL,
@@ -150,12 +45,7 @@ enum { /* for ips_mode */
};
struct pwrctrl_priv {
- struct semaphore lock;
- volatile u8 rpwm; /* requested power state for fw */
- volatile u8 cpwm; /* fw current power state. updated when
- * 1. read from HCPWM 2. driver lowers power level */
- volatile u8 tog; /* toggling */
- volatile u8 cpwm_tog; /* toggling */
+ struct mutex lock; /* Mutex used to protect struct pwrctrl_priv */
u8 pwr_mode;
u8 smart_ps;
@@ -165,7 +55,6 @@ struct pwrctrl_priv {
struct work_struct cpwm_event;
u8 bpower_saving;
- u8 b_hw_radio_off;
u8 reg_rfoff;
u8 reg_pdnmode; /* powerdown mode */
u32 rfoff_reason;
@@ -188,16 +77,11 @@ struct pwrctrl_priv {
u8 power_mgnt;
u8 bFwCurrentInPSMode;
u32 DelayLPSLastTimeStamp;
- u8 btcoex_rfon;
s32 pnp_current_pwr_state;
u8 pnp_bstop_trx;
u8 bInternalAutoSuspend;
u8 bInSuspend;
-#ifdef CONFIG_BT_COEXIST
- u8 bAutoResume;
- u8 autopm_cnt;
-#endif
u8 bSupportRemoteWakeup;
struct timer_list pwr_state_check_timer;
int pwr_state_check_interval;
@@ -210,10 +94,7 @@ struct pwrctrl_priv {
u8 wepkeymask;
u8 bHWPowerdown;/* if support hw power down */
- u8 bHWPwrPindetect;
u8 bkeepfwalive;
- u8 brfoffbyhw;
- unsigned long PS_BBRegBackup[PSBBREG_TOTALCNT];
};
#define rtw_get_ips_mode_req(pwrctrlpriv) \
@@ -234,32 +115,23 @@ struct pwrctrl_priv {
(pwrctrl)->pwr_state_check_interval)
void rtw_init_pwrctrl_priv(struct adapter *adapter);
-void rtw_free_pwrctrl_priv(struct adapter *adapter);
void rtw_set_ps_mode(struct adapter *adapter, u8 ps_mode, u8 smart_ps,
u8 bcn_ant_mode);
-void rtw_set_rpwm(struct adapter *adapter, u8 val8);
void LeaveAllPowerSaveMode(struct adapter *adapter);
void ips_enter(struct adapter *padapter);
int ips_leave(struct adapter *padapter);
void rtw_ps_processor(struct adapter *padapter);
-enum rt_rf_power_state RfOnOffDetect(struct adapter *iadapter);
-
s32 LPS_RF_ON_check(struct adapter *adapter, u32 delay_ms);
void LPS_Enter(struct adapter *adapter);
void LPS_Leave(struct adapter *adapter);
-u8 rtw_interface_ps_func(struct adapter *adapter,
- enum hal_intf_ps_func efunc_id, u8 *val);
-void rtw_set_ips_deny(struct adapter *adapter, u32 ms);
int _rtw_pwr_wakeup(struct adapter *adapter, u32 ips_defer_ms,
const char *caller);
#define rtw_pwr_wakeup(adapter) \
_rtw_pwr_wakeup(adapter, RTW_PWR_STATE_CHK_INTERVAL, __func__)
-#define rtw_pwr_wakeup_ex(adapter, ips_deffer_ms) \
- _rtw_pwr_wakeup(adapter, ips_deffer_ms, __func__)
int rtw_pm_set_ips(struct adapter *adapter, u8 mode);
int rtw_pm_set_lps(struct adapter *adapter, u8 mode);
diff --git a/drivers/staging/r8188eu/include/rtw_recv.h b/drivers/staging/r8188eu/include/rtw_recv.h
index d1d1ca0e56d6..1e28ec731547 100644
--- a/drivers/staging/r8188eu/include/rtw_recv.h
+++ b/drivers/staging/r8188eu/include/rtw_recv.h
@@ -78,7 +78,6 @@ struct phy_info {
/* Real power in dBm for this packet, no beautification and aggregation.
* Keep this raw info to be used for the other procedures. */
s8 recvpower;
- u8 BTRxRSSIPercentage;
u8 SignalStrength; /* in 0-100 index. */
u8 RxPwr[MAX_PATH_NUM_92CS];/* per-path's pwdb */
u8 RxSNR[MAX_PATH_NUM_92CS];/* per-path's SNR */
@@ -274,8 +273,6 @@ struct recv_frame {
struct recv_frame *_rtw_alloc_recvframe(struct __queue *pfree_recv_queue);
struct recv_frame *rtw_alloc_recvframe(struct __queue *pfree_recv_queue);
-void rtw_init_recvframe(struct recv_frame *precvframe,
- struct recv_priv *precvpriv);
int rtw_free_recvframe(struct recv_frame *precvframe,
struct __queue *pfree_recv_queue);
#define rtw_dequeue_recvframe(queue) rtw_alloc_recvframe(queue)
@@ -284,9 +281,6 @@ int rtw_enqueue_recvframe(struct recv_frame *precvframe, struct __queue *queue);
void rtw_free_recvframe_queue(struct __queue *pframequeue,
struct __queue *pfree_recv_queue);
u32 rtw_free_uc_swdec_pending_queue(struct adapter *adapter);
-int rtw_enqueue_recvbuf_to_head(struct recv_buf *buf, struct __queue *queue);
-int rtw_enqueue_recvbuf(struct recv_buf *precvbuf, struct __queue *queue);
-struct recv_buf *rtw_dequeue_recvbuf(struct __queue *queue);
void rtw_reordering_ctrl_timeout_handler(void *pcontext);
diff --git a/drivers/staging/r8188eu/include/rtw_rf.h b/drivers/staging/r8188eu/include/rtw_rf.h
index 48129da9c93f..7ec252fec054 100644
--- a/drivers/staging/r8188eu/include/rtw_rf.h
+++ b/drivers/staging/r8188eu/include/rtw_rf.h
@@ -69,17 +69,6 @@ enum _REG_PREAMBLE_MODE {
PREAMBLE_SHORT = 3,
};
-enum _RTL8712_RF_MIMO_CONFIG_ {
- RTL8712_RFCONFIG_1T = 0x10,
- RTL8712_RFCONFIG_2T = 0x20,
- RTL8712_RFCONFIG_1R = 0x01,
- RTL8712_RFCONFIG_2R = 0x02,
- RTL8712_RFCONFIG_1T1R = 0x11,
- RTL8712_RFCONFIG_1T2R = 0x12,
- RTL8712_RFCONFIG_TURBO = 0x92,
- RTL8712_RFCONFIG_2T2R = 0x22
-};
-
enum rf90_radio_path {
RF90_PATH_A = 0, /* Radio Path A */
RF90_PATH_B = 1, /* Radio Path B */
@@ -121,6 +110,5 @@ enum rt_rf_type_def {
};
u32 rtw_ch2freq(u32 ch);
-u32 rtw_freq2ch(u32 freq);
#endif /* _RTL8711_RF_H_ */
diff --git a/drivers/staging/r8188eu/include/rtw_security.h b/drivers/staging/r8188eu/include/rtw_security.h
index ec6ecdb7bc98..9231201f1009 100644
--- a/drivers/staging/r8188eu/include/rtw_security.h
+++ b/drivers/staging/r8188eu/include/rtw_security.h
@@ -6,6 +6,7 @@
#include "osdep_service.h"
#include "drv_types.h"
+#include <crypto/arc4.h>
#define _NO_PRIVACY_ 0x0
#define _WEP40_ 0x1
@@ -106,7 +107,10 @@ struct security_priv {
union Keytype dot118021XGrprxmickey[4];
union pn48 dot11Grptxpn; /* PN48 used for Grp Key xmit.*/
union pn48 dot11Grprxpn; /* PN48 used for Grp Key recv.*/
-#ifdef CONFIG_88EU_AP_MODE
+
+ struct arc4_ctx xmit_arc4_ctx;
+ struct arc4_ctx recv_arc4_ctx;
+
/* extend security capabilities for AP_MODE */
unsigned int dot8021xalg;/* 0:disable, 1:psk, 2:802.1x */
unsigned int wpa_psk;/* 0:disable, bit(0): WPA, bit(1):WPA2 */
@@ -114,7 +118,6 @@ struct security_priv {
unsigned int wpa2_group_cipher;
unsigned int wpa_pairwise_cipher;
unsigned int wpa2_pairwise_cipher;
-#endif
u8 wps_ie[MAX_WPS_IE_LEN];/* added in assoc req */
int wps_ie_len;
u8 binstallGrpkey;
@@ -330,12 +333,11 @@ void rtw_secmicappend(struct mic_data *pmicdata, u8 *src, u32 nBytes);
void rtw_secgetmic(struct mic_data *pmicdata, u8 *dst);
void rtw_seccalctkipmic(u8 *key, u8 *header, u8 *data, u32 data_len,
u8 *Miccode, u8 priority);
-u32 rtw_aes_encrypt(struct adapter *padapter, u8 *pxmitframe);
-u32 rtw_tkip_encrypt(struct adapter *padapter, u8 *pxmitframe);
-void rtw_wep_encrypt(struct adapter *padapter, u8 *pxmitframe);
-u32 rtw_aes_decrypt(struct adapter *padapter, u8 *precvframe);
-u32 rtw_tkip_decrypt(struct adapter *padapter, u8 *precvframe);
-void rtw_wep_decrypt(struct adapter *padapter, u8 *precvframe);
-void rtw_use_tkipkey_handler(void *FunctionContext);
+u32 rtw_aes_encrypt(struct adapter *padapter, struct xmit_frame *pxmitframe);
+u32 rtw_tkip_encrypt(struct adapter *padapter, struct xmit_frame *pxmitframe);
+void rtw_wep_encrypt(struct adapter *padapter, struct xmit_frame *pxmitframe);
+u32 rtw_aes_decrypt(struct adapter *padapter, struct recv_frame *precvframe);
+u32 rtw_tkip_decrypt(struct adapter *padapter, struct recv_frame *precvframe);
+void rtw_wep_decrypt(struct adapter *padapter, struct recv_frame *precvframe);
#endif /* __RTL871X_SECURITY_H_ */
diff --git a/drivers/staging/r8188eu/include/rtw_sreset.h b/drivers/staging/r8188eu/include/rtw_sreset.h
deleted file mode 100644
index 4e97997c305b..000000000000
--- a/drivers/staging/r8188eu/include/rtw_sreset.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
-/* Copyright(c) 2007 - 2012 Realtek Corporation. */
-
-#ifndef _RTW_SRESET_C_
-#define _RTW_SRESET_C_
-
-#include "osdep_service.h"
-#include "drv_types.h"
-
-struct sreset_priv {
- struct mutex silentreset_mutex;
- u8 silent_reset_inprogress;
- u8 wifi_error_status;
- unsigned long last_tx_time;
- unsigned long last_tx_complete_time;
-};
-
-#include "rtl8188e_hal.h"
-
-#define WIFI_STATUS_SUCCESS 0
-#define USB_VEN_REQ_CMD_FAIL BIT(0)
-#define USB_READ_PORT_FAIL BIT(1)
-#define USB_WRITE_PORT_FAIL BIT(2)
-#define WIFI_MAC_TXDMA_ERROR BIT(3)
-#define WIFI_TX_HANG BIT(4)
-#define WIFI_RX_HANG BIT(5)
-#define WIFI_IF_NOT_EXIST BIT(6)
-
-void sreset_init_value(struct adapter *padapter);
-void sreset_reset_value(struct adapter *padapter);
-u8 sreset_get_wifi_status(struct adapter *padapter);
-void sreset_set_wifi_error_status(struct adapter *padapter, u32 status);
-
-#endif
diff --git a/drivers/staging/r8188eu/include/rtw_xmit.h b/drivers/staging/r8188eu/include/rtw_xmit.h
index 5f6e2402e5c4..b2df1480d66b 100644
--- a/drivers/staging/r8188eu/include/rtw_xmit.h
+++ b/drivers/staging/r8188eu/include/rtw_xmit.h
@@ -185,7 +185,6 @@ enum {
void rtw_sctx_init(struct submit_ctx *sctx, int timeout_ms);
int rtw_sctx_wait(struct submit_ctx *sctx);
void rtw_sctx_done_err(struct submit_ctx **sctx, int status);
-void rtw_sctx_done(struct submit_ctx **sctx);
struct xmit_buf {
struct list_head list;
@@ -257,7 +256,6 @@ struct agg_pkt_info {
struct xmit_priv {
spinlock_t lock;
- struct semaphore xmit_sema;
struct semaphore terminate_xmitthread_sema;
struct __queue be_pending;
struct __queue bk_pending;
@@ -333,8 +331,6 @@ struct xmit_frame *rtw_dequeue_xframe(struct xmit_priv *pxmitpriv,
s32 rtw_xmit_classifier(struct adapter *padapter,
struct xmit_frame *pxmitframe);
-u32 rtw_calculate_wlan_pkt_size_by_attribue(struct pkt_attrib *pattrib);
-#define rtw_wlan_pkt_size(f) rtw_calculate_wlan_pkt_size_by_attribue(&f->attrib)
s32 rtw_xmitframe_coalesce(struct adapter *padapter, struct sk_buff *pkt,
struct xmit_frame *pxmitframe);
s32 _rtw_init_hw_txqueue(struct hw_txqueue *phw_txqueue, u8 ac_tag);
@@ -349,12 +345,10 @@ void rtw_alloc_hwxmits(struct adapter *padapter);
void rtw_free_hwxmits(struct adapter *padapter);
s32 rtw_xmit(struct adapter *padapter, struct sk_buff **pkt);
-#if defined(CONFIG_88EU_AP_MODE)
int xmitframe_enqueue_for_sleeping_sta(struct adapter *padapter, struct xmit_frame *pxmitframe);
void stop_sta_xmit(struct adapter *padapter, struct sta_info *psta);
void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta);
void xmit_delivery_enabled_frames(struct adapter *padapter, struct sta_info *psta);
-#endif
u8 qos_acm(u8 acm_mask, u8 priority);
u32 rtw_get_ff_hwaddr(struct xmit_frame *pxmitframe);
diff --git a/drivers/staging/r8188eu/include/sta_info.h b/drivers/staging/r8188eu/include/sta_info.h
index 8ff99fc6381d..24b1254310b2 100644
--- a/drivers/staging/r8188eu/include/sta_info.h
+++ b/drivers/staging/r8188eu/include/sta_info.h
@@ -124,7 +124,6 @@ struct sta_info {
/* sta_info: (AP & STA) CAP/INFO */
struct list_head asoc_list;
-#ifdef CONFIG_88EU_AP_MODE
struct list_head auth_list;
unsigned int expire_to;
@@ -164,9 +163,7 @@ struct sta_info {
u8 has_legacy_ac;
unsigned int sleepq_ac_len;
-#endif /* CONFIG_88EU_AP_MODE */
-#ifdef CONFIG_88EU_P2P
/* p2p priv data */
u8 is_p2p_device;
u8 p2p_status_code;
@@ -180,7 +177,6 @@ struct sta_info {
u8 secdev_types_list[32];/* 32/8 == 4; */
u16 dev_name_len;
u8 dev_name[32];
-#endif /* CONFIG_88EU_P2P */
u8 under_exist_checking;
u8 keep_alive_trycnt;
@@ -306,7 +302,6 @@ struct sta_priv {
spinlock_t asoc_list_lock;
struct list_head asoc_list;
-#ifdef CONFIG_88EU_AP_MODE
struct list_head auth_list;
spinlock_t auth_list_lock;
u8 asoc_list_cnt;
@@ -330,8 +325,6 @@ struct sta_priv {
u16 max_num_sta;
struct wlan_acl_pool acl_list;
-#endif
-
};
static inline u32 wifi_mac_hash(u8 *mac)
diff --git a/drivers/staging/r8188eu/include/usb_ops.h b/drivers/staging/r8188eu/include/usb_ops.h
index c53cc54b6b87..0a1155bbc7b8 100644
--- a/drivers/staging/r8188eu/include/usb_ops.h
+++ b/drivers/staging/r8188eu/include/usb_ops.h
@@ -19,11 +19,6 @@
#include "usb_ops_linux.h"
-void rtl8188eu_set_hw_type(struct adapter *padapter);
-#define hal_set_hw_type rtl8188eu_set_hw_type
-void rtl8188eu_set_intf_ops(struct _io_ops *pops);
-#define usb_set_intf_ops rtl8188eu_set_intf_ops
-
/*
* Increase and check if the continual_urb_error of this @param dvobjprivei
* is larger than MAX_CONTINUAL_URB_ERR
diff --git a/drivers/staging/r8188eu/include/usb_ops_linux.h b/drivers/staging/r8188eu/include/usb_ops_linux.h
index c357a3b1560e..641f059ffaf7 100644
--- a/drivers/staging/r8188eu/include/usb_ops_linux.h
+++ b/drivers/staging/r8188eu/include/usb_ops_linux.h
@@ -28,12 +28,4 @@
unsigned int ffaddr2pipehdl(struct dvobj_priv *pdvobj, u32 addr);
-void usb_read_mem(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *rmem);
-void usb_write_mem(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *wmem);
-
-void usb_read_port_cancel(struct intf_hdl *pintfhdl);
-
-u32 usb_write_port(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *wmem);
-void usb_write_port_cancel(struct intf_hdl *pintfhdl);
-
#endif
diff --git a/drivers/staging/r8188eu/include/usb_osintf.h b/drivers/staging/r8188eu/include/usb_osintf.h
index d1a1f739309c..624298b4bd0b 100644
--- a/drivers/staging/r8188eu/include/usb_osintf.h
+++ b/drivers/staging/r8188eu/include/usb_osintf.h
@@ -19,11 +19,8 @@ u8 usbvendorrequest(struct dvobj_priv *pdvobjpriv, enum bt_usb_request brequest,
int pm_netdev_open(struct net_device *pnetdev, u8 bnormal);
void netdev_br_init(struct net_device *netdev);
void dhcp_flag_bcast(struct adapter *priv, struct sk_buff *skb);
-void *scdb_findEntry(struct adapter *priv, unsigned char *macAddr,
- unsigned char *ipAddr);
+void *scdb_findEntry(struct adapter *priv, unsigned char *ipAddr);
void nat25_db_expire(struct adapter *priv);
int nat25_db_handle(struct adapter *priv, struct sk_buff *skb, int method);
-int rtw_resume_process(struct adapter *padapter);
-
#endif
diff --git a/drivers/staging/r8188eu/include/wifi.h b/drivers/staging/r8188eu/include/wifi.h
index 0b3fd94cea18..193a557f0f47 100644
--- a/drivers/staging/r8188eu/include/wifi.h
+++ b/drivers/staging/r8188eu/include/wifi.h
@@ -114,35 +114,6 @@ enum WIFI_REASON_CODE {
_RSON_TDLS_TEAR_UN_RSN_ = 26,
};
-/* Reason codes (IEEE 802.11-2007, 7.3.1.7, Table 7-22)
-
-#define WLAN_REASON_UNSPECIFIED 1
-#define WLAN_REASON_PREV_AUTH_NOT_VALID 2
-#define WLAN_REASON_DEAUTH_LEAVING 3
-#define WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY 4
-#define WLAN_REASON_DISASSOC_AP_BUSY 5
-#define WLAN_REASON_CLASS2_FRAME_FROM_NONAUTH_STA 6
-#define WLAN_REASON_CLASS3_FRAME_FROM_NONASSOC_STA 7
-#define WLAN_REASON_DISASSOC_STA_HAS_LEFT 8
-#define WLAN_REASON_STA_REQ_ASSOC_WITHOUT_AUTH 9 */
-/* IEEE 802.11h */
-#define WLAN_REASON_PWR_CAPABILITY_NOT_VALID 10
-#define WLAN_REASON_SUPPORTED_CHANNEL_NOT_VALID 11
-
-/* IEEE 802.11i
-#define WLAN_REASON_INVALID_IE 13
-#define WLAN_REASON_MICHAEL_MIC_FAILURE 14
-#define WLAN_REASON_4WAY_HANDSHAKE_TIMEOUT 15
-#define WLAN_REASON_GROUP_KEY_UPDATE_TIMEOUT 16
-#define WLAN_REASON_IE_IN_4WAY_DIFFERS 17
-#define WLAN_REASON_GROUP_CIPHER_NOT_VALID 18
-#define WLAN_REASON_PAIRWISE_CIPHER_NOT_VALID 19
-#define WLAN_REASON_AKMP_NOT_VALID 20
-#define WLAN_REASON_UNSUPPORTED_RSN_IE_VERSION 21
-#define WLAN_REASON_INVALID_RSN_IE_CAPAB 22
-#define WLAN_REASON_IEEE_802_1X_AUTH_FAILED 23
-#define WLAN_REASON_CIPHER_SUITE_REJECTED 24 */
-
enum WIFI_STATUS_CODE {
_STATS_SUCCESSFUL_ = 0,
_STATS_FAILURE_ = 1,
@@ -157,19 +128,6 @@ enum WIFI_STATUS_CODE {
_STATS_RATE_FAIL_ = 18,
};
-/* Status codes (IEEE 802.11-2007, 7.3.1.9, Table 7-23)
-#define WLAN_STATUS_SUCCESS 0
-#define WLAN_STATUS_UNSPECIFIED_FAILURE 1
-#define WLAN_STATUS_CAPS_UNSUPPORTED 10
-#define WLAN_STATUS_REASSOC_NO_ASSOC 11
-#define WLAN_STATUS_ASSOC_DENIED_UNSPEC 12
-#define WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG 13
-#define WLAN_STATUS_UNKNOWN_AUTH_TRANSACTION 14
-#define WLAN_STATUS_CHALLENGE_FAIL 15
-#define WLAN_STATUS_AUTH_TIMEOUT 16
-#define WLAN_STATUS_AP_UNABLE_TO_HANDLE_NEW_STA 17
-#define WLAN_STATUS_ASSOC_DENIED_RATES 18 */
-
/* entended */
/* IEEE 802.11b */
#define WLAN_STATUS_ASSOC_DENIED_NOSHORT 19
@@ -554,16 +512,6 @@ static inline int IsFrameTypeCtrl(unsigned char *pframe)
#define _IEEE8021X_MGT_ 1 /* WPA */
#define _IEEE8021X_PSK_ 2 /* WPA with pre-shared key */
-/*
-#define _NO_PRIVACY_ 0
-#define _WEP_40_PRIVACY_ 1
-#define _TKIP_PRIVACY_ 2
-#define _WRAP_PRIVACY_ 3
-#define _CCMP_PRIVACY_ 4
-#define _WEP_104_PRIVACY_ 5
-#define _WEP_WPA_MIXED_PRIVACY_ 6 WEP + WPA
-*/
-
/*-----------------------------------------------------------------------------
Below is the definition for WMM
------------------------------------------------------------------------------*/
diff --git a/drivers/staging/r8188eu/include/xmit_osdep.h b/drivers/staging/r8188eu/include/xmit_osdep.h
index 191c36361b63..3e778dff0ed8 100644
--- a/drivers/staging/r8188eu/include/xmit_osdep.h
+++ b/drivers/staging/r8188eu/include/xmit_osdep.h
@@ -37,8 +37,6 @@ int rtw_os_xmit_resource_alloc(struct adapter *padapter,
void rtw_os_xmit_resource_free(struct adapter *padapter,
struct xmit_buf *pxmitbuf, u32 free_sz);
-void rtw_set_tx_chksum_offload(struct sk_buff *pkt, struct pkt_attrib *pattrib);
-
uint rtw_remainder_len(struct pkt_file *pfile);
void _rtw_open_pktfile(struct sk_buff *pkt, struct pkt_file *pfile);
uint _rtw_pktfile_read(struct pkt_file *pfile, u8 *rmem, uint rlen);
diff --git a/drivers/staging/r8188eu/os_dep/ioctl_linux.c b/drivers/staging/r8188eu/os_dep/ioctl_linux.c
index 1fd375076001..52d42e576443 100644
--- a/drivers/staging/r8188eu/os_dep/ioctl_linux.c
+++ b/drivers/staging/r8188eu/os_dep/ioctl_linux.c
@@ -12,11 +12,10 @@
#include "../include/rtw_mlme_ext.h"
#include "../include/rtw_ioctl.h"
#include "../include/rtw_ioctl_set.h"
-#include "../include/rtw_mp_ioctl.h"
#include "../include/usb_ops.h"
#include "../include/rtl8188e_hal.h"
+#include "../include/rtl8188e_led.h"
-#include "../include/rtw_mp.h"
#include "../include/rtw_iol.h"
#define RTL_IOCTL_WPA_SUPPLICANT (SIOCIWFIRSTPRIV + 30)
@@ -38,48 +37,6 @@
#define WEXT_CSCAN_HOME_DWELL_SECTION 'H'
#define WEXT_CSCAN_TYPE_SECTION 'T'
-static struct mp_ioctl_handler mp_ioctl_hdl[] = {
-/*0*/ GEN_HANDLER(sizeof(u32), rtl8188eu_oid_rt_pro_start_test_hdl, OID_RT_PRO_START_TEST)
- GEN_HANDLER(sizeof(u32), rtl8188eu_oid_rt_pro_stop_test_hdl, OID_RT_PRO_STOP_TEST)
-
- GEN_HANDLER(sizeof(struct rwreg_param), rtl8188eu_oid_rt_pro_read_register_hdl, OID_RT_PRO_READ_REGISTER)
- GEN_HANDLER(sizeof(struct rwreg_param), rtl8188eu_oid_rt_pro_write_register_hdl, OID_RT_PRO_WRITE_REGISTER)
- GEN_HANDLER(sizeof(struct bb_reg_param), rtl8188eu_oid_rt_pro_read_bb_reg_hdl, OID_RT_PRO_READ_BB_REG)
-/*5*/ GEN_HANDLER(sizeof(struct bb_reg_param), rtl8188eu_oid_rt_pro_write_bb_reg_hdl, OID_RT_PRO_WRITE_BB_REG)
- GEN_HANDLER(sizeof(struct rf_reg_param), rtl8188eu_oid_rt_pro_read_rf_reg_hdl, OID_RT_PRO_RF_READ_REGISTRY)
- GEN_HANDLER(sizeof(struct rf_reg_param), rtl8188eu_oid_rt_pro_write_rf_reg_hdl, OID_RT_PRO_RF_WRITE_REGISTRY)
-
- GEN_HANDLER(sizeof(u32), rtl8188eu_oid_rt_pro_set_channel_direct_call_hdl, OID_RT_PRO_SET_CHANNEL_DIRECT_CALL)
- GEN_HANDLER(sizeof(struct txpower_param), rtl8188eu_oid_rt_pro_set_tx_power_control_hdl, OID_RT_PRO_SET_TX_POWER_CONTROL)
-/*10*/ GEN_HANDLER(sizeof(u32), rtl8188eu_oid_rt_pro_set_data_rate_hdl, OID_RT_PRO_SET_DATA_RATE)
- GEN_HANDLER(sizeof(u32), rtl8188eu_oid_rt_set_bandwidth_hdl, OID_RT_SET_BANDWIDTH)
- GEN_HANDLER(sizeof(u32), rtl8188eu_oid_rt_pro_set_antenna_bb_hdl, OID_RT_PRO_SET_ANTENNA_BB)
-
- GEN_HANDLER(sizeof(u32), rtl8188eu_oid_rt_pro_set_continuous_tx_hdl, OID_RT_PRO_SET_CONTINUOUS_TX)
- GEN_HANDLER(sizeof(u32), rtl8188eu_oid_rt_pro_set_single_carrier_tx_hdl, OID_RT_PRO_SET_SINGLE_CARRIER_TX)
-/*15*/ GEN_HANDLER(sizeof(u32), rtl8188eu_oid_rt_pro_set_carrier_suppression_tx_hdl, OID_RT_PRO_SET_CARRIER_SUPPRESSION_TX)
- GEN_HANDLER(sizeof(u32), rtl8188eu_oid_rt_pro_set_single_tone_tx_hdl, OID_RT_PRO_SET_SINGLE_TONE_TX)
-
- EXT_MP_IOCTL_HANDLER(0, xmit_packet, 0)
-
- GEN_HANDLER(sizeof(u32), rtl8188eu_oid_rt_set_rx_packet_type_hdl, OID_RT_SET_RX_PACKET_TYPE)
- GEN_HANDLER(0, rtl8188eu_oid_rt_reset_phy_rx_packet_count_hdl, OID_RT_RESET_PHY_RX_PACKET_COUNT)
-/*20*/ GEN_HANDLER(sizeof(u32), rtl8188eu_oid_rt_get_phy_rx_packet_received_hdl, OID_RT_GET_PHY_RX_PACKET_RECEIVED)
- GEN_HANDLER(sizeof(u32), rtl8188eu_oid_rt_get_phy_rx_packet_crc32_error_hdl, OID_RT_GET_PHY_RX_PACKET_CRC32_ERROR)
-
- GEN_HANDLER(sizeof(struct eeprom_rw_param), NULL, 0)
- GEN_HANDLER(sizeof(struct eeprom_rw_param), NULL, 0)
- GEN_HANDLER(sizeof(struct efuse_access_struct), rtl8188eu_oid_rt_pro_efuse_hdl, OID_RT_PRO_EFUSE)
-/*25*/ GEN_HANDLER(0, rtl8188eu_oid_rt_pro_efuse_map_hdl, OID_RT_PRO_EFUSE_MAP)
- GEN_HANDLER(sizeof(u32), rtl8188eu_oid_rt_get_efuse_max_size_hdl, OID_RT_GET_EFUSE_MAX_SIZE)
- GEN_HANDLER(sizeof(u32), rtl8188eu_oid_rt_get_efuse_current_size_hdl, OID_RT_GET_EFUSE_CURRENT_SIZE)
-
- GEN_HANDLER(sizeof(u32), rtl8188eu_oid_rt_get_thermal_meter_hdl, OID_RT_PRO_GET_THERMAL_METER)
- GEN_HANDLER(sizeof(u8), rtl8188eu_oid_rt_pro_set_power_tracking_hdl, OID_RT_PRO_SET_POWER_TRACKING)
-/*30*/ GEN_HANDLER(sizeof(u8), rtl8188eu_oid_rt_set_power_down_hdl, OID_RT_SET_POWER_DOWN)
-/*31*/ GEN_HANDLER(0, rtl8188eu_oid_rt_pro_trigger_gpio_hdl, 0)
-};
-
static u32 rtw_rates[] = {1000000, 2000000, 5500000, 11000000,
6000000, 9000000, 12000000, 18000000, 24000000, 36000000,
48000000, 54000000};
@@ -137,7 +94,6 @@ static char *translate_scan(struct adapter *padapter,
u8 bw_40MHz = 0, short_GI = 0;
u16 mcs_rate = 0;
u8 ss, sq;
-#ifdef CONFIG_88EU_P2P
struct wifidirect_info *pwdinfo = &padapter->wdinfo;
if (!rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE)) {
@@ -165,7 +121,6 @@ static char *translate_scan(struct adapter *padapter,
if (!blnGotP2PIE)
return start;
}
-#endif /* CONFIG_88EU_P2P */
/* AP MAC address */
iwe.cmd = SIOCGIWAP;
@@ -424,9 +379,7 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct security_priv *psecuritypriv = &padapter->securitypriv;
-#ifdef CONFIG_88EU_P2P
struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-#endif /* CONFIG_88EU_P2P */
param->u.crypt.err = 0;
param->u.crypt.alg[IEEE_CRYPT_ALG_NAME_LEN - 1] = '\0';
@@ -466,11 +419,10 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
if (wep_key_len > 0) {
wep_key_len = wep_key_len <= 5 ? 5 : 13;
wep_total_len = wep_key_len + FIELD_OFFSET(struct ndis_802_11_wep, KeyMaterial);
- pwep = kmalloc(wep_total_len, GFP_KERNEL);
+ pwep = kzalloc(wep_total_len, GFP_KERNEL);
if (!pwep)
goto exit;
- memset(pwep, 0, wep_total_len);
pwep->KeyLength = wep_key_len;
pwep->Length = wep_total_len;
if (wep_key_len == 13) {
@@ -539,10 +491,8 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
padapter->securitypriv.dot118021XGrpKeyid = param->u.crypt.idx;
rtw_set_key(padapter, &padapter->securitypriv, param->u.crypt.idx, 1);
-#ifdef CONFIG_88EU_P2P
if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_PROVISIONING_ING))
rtw_p2p_set_state(pwdinfo, P2P_STATE_PROVISIONING_DONE);
-#endif /* CONFIG_88EU_P2P */
}
}
pbcmc_sta = rtw_get_bcmc_stainfo(padapter);
@@ -572,9 +522,7 @@ static int rtw_set_wpa_ie(struct adapter *padapter, char *pie, unsigned short ie
u8 *buf = NULL;
int group_cipher = 0, pairwise_cipher = 0;
int ret = 0;
-#ifdef CONFIG_88EU_P2P
struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-#endif /* CONFIG_88EU_P2P */
if (ielen > MAX_WPA_IE_LEN || !pie) {
_clr_fwstate_(&padapter->mlmepriv, WIFI_UNDER_WPS);
@@ -585,14 +533,12 @@ static int rtw_set_wpa_ie(struct adapter *padapter, char *pie, unsigned short ie
}
if (ielen) {
- buf = kzalloc(ielen, GFP_KERNEL);
+ buf = kmemdup(pie, ielen, GFP_KERNEL);
if (!buf) {
ret = -ENOMEM;
goto exit;
}
- memcpy(buf, pie, ielen);
-
/* dump */
{
int i;
@@ -679,10 +625,8 @@ static int rtw_set_wpa_ie(struct adapter *padapter, char *pie, unsigned short ie
memcpy(padapter->securitypriv.wps_ie, &buf[cnt], padapter->securitypriv.wps_ie_len);
set_fwstate(&padapter->mlmepriv, WIFI_UNDER_WPS);
-#ifdef CONFIG_88EU_P2P
if (rtw_p2p_chk_state(pwdinfo, P2P_STATE_GONEGO_OK))
rtw_p2p_set_state(pwdinfo, P2P_STATE_PROVISIONING_ING);
-#endif /* CONFIG_88EU_P2P */
cnt += buf[cnt + 1] + 2;
break;
} else {
@@ -1151,16 +1095,8 @@ static int rtw_wx_set_scan(struct net_device *dev, struct iw_request_info *a,
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct ndis_802_11_ssid ssid[RTW_SSID_SCAN_AMOUNT];
-#ifdef CONFIG_88EU_P2P
struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-#endif /* CONFIG_88EU_P2P */
- if (padapter->registrypriv.mp_mode == 1) {
- if (check_fwstate(pmlmepriv, WIFI_MP_STATE)) {
- ret = -1;
- goto exit;
- }
- }
if (_FAIL == rtw_pwr_wakeup(padapter)) {
ret = -1;
goto exit;
@@ -1199,14 +1135,12 @@ static int rtw_wx_set_scan(struct net_device *dev, struct iw_request_info *a,
/* the pmlmepriv->scan_interval is always equal to 3. */
/* So, the wpa_supplicant won't find out the WPS SoftAP. */
-#ifdef CONFIG_88EU_P2P
if (pwdinfo->p2p_state != P2P_STATE_NONE) {
rtw_p2p_set_pre_state(pwdinfo, rtw_p2p_state(pwdinfo));
rtw_p2p_set_state(pwdinfo, P2P_STATE_FIND_PHASE_SEARCH);
rtw_p2p_findphase_ex_set(pwdinfo, P2P_FINDPHASE_EX_FULL);
rtw_free_network_queue(padapter, true);
}
-#endif /* CONFIG_88EU_P2P */
memset(ssid, 0, sizeof(struct ndis_802_11_ssid) * RTW_SSID_SCAN_AMOUNT);
@@ -1302,16 +1236,8 @@ static int rtw_wx_get_scan(struct net_device *dev, struct iw_request_info *a,
u32 cnt = 0;
u32 wait_for_surveydone;
int wait_status;
-#ifdef CONFIG_88EU_P2P
struct wifidirect_info *pwdinfo = &padapter->wdinfo;
-#endif /* CONFIG_88EU_P2P */
- if (padapter->pwrctrlpriv.brfoffbyhw && padapter->bDriverStopped) {
- ret = -EINVAL;
- goto exit;
- }
-
-#ifdef CONFIG_88EU_P2P
if (!rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE)) {
/* P2P is enabled */
wait_for_surveydone = 200;
@@ -1319,11 +1245,6 @@ static int rtw_wx_get_scan(struct net_device *dev, struct iw_request_info *a,
/* P2P is disabled */
wait_for_surveydone = 100;
}
-#else
- {
- wait_for_surveydone = 100;
- }
-#endif /* CONFIG_88EU_P2P */
wait_status = _FW_UNDER_SURVEY | _FW_UNDER_LINKING;
@@ -1359,8 +1280,6 @@ static int rtw_wx_get_scan(struct net_device *dev, struct iw_request_info *a,
wrqu->data.length = ev - extra;
wrqu->data.flags = 0;
-exit:
-
return ret;
}
@@ -1978,7 +1897,7 @@ static int rtw_wx_set_enc_ext(struct net_device *dev,
struct ieee_param *param = NULL;
struct iw_point *pencoding = &wrqu->encoding;
struct iw_encode_ext *pext = (struct iw_encode_ext *)extra;
- int ret = 0;
+ int ret = -1;
param_len = sizeof(struct ieee_param) + pext->key_len;
param = kzalloc(param_len, GFP_KERNEL);
@@ -2004,10 +1923,10 @@ static int rtw_wx_set_enc_ext(struct net_device *dev,
alg_name = "CCMP";
break;
default:
- return -1;
+ goto out;
}
- strncpy((char *)param->u.crypt.alg, alg_name, IEEE_CRYPT_ALG_NAME_LEN);
+ strlcpy((char *)param->u.crypt.alg, alg_name, IEEE_CRYPT_ALG_NAME_LEN);
if (pext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY)
param->u.crypt.set_tx = 1;
@@ -2031,6 +1950,7 @@ static int rtw_wx_set_enc_ext(struct net_device *dev,
ret = wpa_set_encryption(dev, param, param_len);
+out:
kfree(param);
return ret;
}
@@ -2064,14 +1984,9 @@ static int rtw_wx_read32(struct net_device *dev,
padapter = (struct adapter *)rtw_netdev_priv(dev);
p = &wrqu->data;
len = p->length;
- ptmp = kmalloc(len, GFP_KERNEL);
- if (!ptmp)
- return -ENOMEM;
-
- if (copy_from_user(ptmp, p->pointer, len)) {
- kfree(ptmp);
- return -EFAULT;
- }
+ ptmp = memdup_user(p->pointer, len);
+ if (IS_ERR(ptmp))
+ return PTR_ERR(ptmp);
bytes = 0;
addr = 0;
@@ -2145,7 +2060,7 @@ static int rtw_wx_read_rf(struct net_device *dev,
path = *(u32 *)extra;
addr = *((u32 *)extra + 1);
- data32 = rtw_hal_read_rfreg(padapter, path, addr, 0xFFFFF);
+ data32 = rtl8188e_PHY_QueryRFReg(padapter, path, addr, 0xFFFFF);
/*
* IMPORTANT!!
* Only when wireless private ioctl is at odd order,
@@ -2166,7 +2081,7 @@ static int rtw_wx_write_rf(struct net_device *dev,
path = *(u32 *)extra;
addr = *((u32 *)extra + 1);
data32 = *((u32 *)extra + 2);
- rtw_hal_write_rfreg(padapter, path, addr, 0xFFFFF, data32);
+ rtl8188e_PHY_SetRFReg(padapter, path, addr, 0xFFFFF, data32);
return 0;
}
@@ -2230,183 +2145,6 @@ static int rtw_drvext_hdl(struct net_device *dev, struct iw_request_info *info,
return 0;
}
-static void rtw_dbg_mode_hdl(struct adapter *padapter, u32 id, u8 *pdata, u32 len)
-{
- struct mp_rw_reg *RegRWStruct;
- struct rf_reg_param *prfreg;
- u8 path;
- u8 offset;
- u32 value;
-
- DBG_88E("%s\n", __func__);
-
- switch (id) {
- case GEN_MP_IOCTL_SUBCODE(MP_START):
- DBG_88E("871x_driver is only for normal mode, can't enter mp mode\n");
- break;
- case GEN_MP_IOCTL_SUBCODE(READ_REG):
- RegRWStruct = (struct mp_rw_reg *)pdata;
- switch (RegRWStruct->width) {
- case 1:
- RegRWStruct->value = rtw_read8(padapter, RegRWStruct->offset);
- break;
- case 2:
- RegRWStruct->value = rtw_read16(padapter, RegRWStruct->offset);
- break;
- case 4:
- RegRWStruct->value = rtw_read32(padapter, RegRWStruct->offset);
- break;
- default:
- break;
- }
-
- break;
- case GEN_MP_IOCTL_SUBCODE(WRITE_REG):
- RegRWStruct = (struct mp_rw_reg *)pdata;
- switch (RegRWStruct->width) {
- case 1:
- rtw_write8(padapter, RegRWStruct->offset, (u8)RegRWStruct->value);
- break;
- case 2:
- rtw_write16(padapter, RegRWStruct->offset, (u16)RegRWStruct->value);
- break;
- case 4:
- rtw_write32(padapter, RegRWStruct->offset, (u32)RegRWStruct->value);
- break;
- default:
- break;
- }
-
- break;
- case GEN_MP_IOCTL_SUBCODE(READ_RF_REG):
-
- prfreg = (struct rf_reg_param *)pdata;
-
- path = (u8)prfreg->path;
- offset = (u8)prfreg->offset;
-
- value = rtw_hal_read_rfreg(padapter, path, offset, 0xffffffff);
-
- prfreg->value = value;
-
- break;
- case GEN_MP_IOCTL_SUBCODE(WRITE_RF_REG):
-
- prfreg = (struct rf_reg_param *)pdata;
-
- path = (u8)prfreg->path;
- offset = (u8)prfreg->offset;
- value = prfreg->value;
-
- rtw_hal_write_rfreg(padapter, path, offset, 0xffffffff, value);
-
- break;
- case GEN_MP_IOCTL_SUBCODE(TRIGGER_GPIO):
- DBG_88E("==> trigger gpio 0\n");
- rtw_hal_set_hwreg(padapter, HW_VAR_TRIGGER_GPIO_0, NULL);
- break;
- case GEN_MP_IOCTL_SUBCODE(GET_WIFI_STATUS):
- *pdata = rtw_hal_sreset_get_wifi_status(padapter);
- break;
- default:
- break;
- }
-}
-
-static int rtw_mp_ioctl_hdl(struct net_device *dev, struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- int ret = 0;
- u32 BytesRead, BytesWritten, BytesNeeded;
- struct oid_par_priv oid_par;
- struct mp_ioctl_handler *phandler;
- struct mp_ioctl_param *poidparam;
- uint status = 0;
- u16 len;
- u8 *pparmbuf = NULL, bset;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- struct iw_point *p = &wrqu->data;
-
- if ((!p->length) || (!p->pointer)) {
- ret = -EINVAL;
- goto _rtw_mp_ioctl_hdl_exit;
- }
- pparmbuf = NULL;
- bset = (u8)(p->flags & 0xFFFF);
- len = p->length;
- pparmbuf = kmalloc(len, GFP_KERNEL);
- if (!pparmbuf) {
- ret = -ENOMEM;
- goto _rtw_mp_ioctl_hdl_exit;
- }
-
- if (copy_from_user(pparmbuf, p->pointer, len)) {
- ret = -EFAULT;
- goto _rtw_mp_ioctl_hdl_exit;
- }
-
- poidparam = (struct mp_ioctl_param *)pparmbuf;
-
- if (poidparam->subcode >= MAX_MP_IOCTL_SUBCODE) {
- ret = -EINVAL;
- goto _rtw_mp_ioctl_hdl_exit;
- }
-
- if (padapter->registrypriv.mp_mode == 1) {
- phandler = mp_ioctl_hdl + poidparam->subcode;
-
- if ((phandler->paramsize != 0) && (poidparam->len < phandler->paramsize)) {
- ret = -EINVAL;
- goto _rtw_mp_ioctl_hdl_exit;
- }
-
- if (phandler->handler) {
- oid_par.adapter_context = padapter;
- oid_par.oid = phandler->oid;
- oid_par.information_buf = poidparam->data;
- oid_par.information_buf_len = poidparam->len;
- oid_par.dbg = 0;
-
- BytesWritten = 0;
- BytesNeeded = 0;
-
- if (bset) {
- oid_par.bytes_rw = &BytesRead;
- oid_par.bytes_needed = &BytesNeeded;
- oid_par.type_of_oid = SET_OID;
- } else {
- oid_par.bytes_rw = &BytesWritten;
- oid_par.bytes_needed = &BytesNeeded;
- oid_par.type_of_oid = QUERY_OID;
- }
-
- status = phandler->handler(&oid_par);
- } else {
- DBG_88E("rtw_mp_ioctl_hdl(): err!, subcode =%d, oid =%d, handler =%p\n",
- poidparam->subcode, phandler->oid, phandler->handler);
- ret = -EFAULT;
- goto _rtw_mp_ioctl_hdl_exit;
- }
- } else {
- rtw_dbg_mode_hdl(padapter, poidparam->subcode, poidparam->data, poidparam->len);
- }
-
- if (bset == 0x00) {/* query info */
- if (copy_to_user(p->pointer, pparmbuf, len))
- ret = -EFAULT;
- }
-
- if (status) {
- ret = -EFAULT;
- goto _rtw_mp_ioctl_hdl_exit;
- }
-
-_rtw_mp_ioctl_hdl_exit:
-
- kfree(pparmbuf);
- return ret;
-}
-
static int rtw_get_ap_info(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
@@ -2559,7 +2297,6 @@ exit:
return ret;
}
-#ifdef CONFIG_88EU_P2P
static int rtw_wext_p2p_enable(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
@@ -3664,15 +3401,12 @@ static int rtw_p2p_got_wpsinfo(struct net_device *dev,
return ret;
}
-#endif /* CONFIG_88EU_P2P */
-
static int rtw_p2p_set(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
int ret = 0;
-#ifdef CONFIG_88EU_P2P
DBG_88E("[%s] extra = %s\n", __func__, extra);
if (!memcmp(extra, "enable =", 7)) {
rtw_wext_p2p_enable(dev, info, wrqu, &extra[7]);
@@ -3719,7 +3453,6 @@ static int rtw_p2p_set(struct net_device *dev,
wrqu->data.length -= 11;
rtw_p2p_set_persistent(dev, info, wrqu, &extra[11]);
}
-#endif /* CONFIG_88EU_P2P */
return ret;
}
@@ -3730,7 +3463,6 @@ static int rtw_p2p_get(struct net_device *dev,
{
int ret = 0;
-#ifdef CONFIG_88EU_P2P
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
if (padapter->bShowGetP2PState)
@@ -3754,7 +3486,6 @@ static int rtw_p2p_get(struct net_device *dev,
} else if (!memcmp(wrqu->data.pointer, "op_ch", 5)) {
rtw_p2p_get_op_ch(dev, info, wrqu, extra);
}
-#endif /* CONFIG_88EU_P2P */
return ret;
}
@@ -3764,7 +3495,6 @@ static int rtw_p2p_get2(struct net_device *dev,
{
int ret = 0;
-#ifdef CONFIG_88EU_P2P
DBG_88E("[%s] extra = %s\n", __func__, (char *)wrqu->data.pointer);
if (!memcmp(extra, "wpsCM =", 6)) {
wrqu->data.length -= 6;
@@ -3783,34 +3513,6 @@ static int rtw_p2p_get2(struct net_device *dev,
rtw_p2p_get_invitation_procedure(dev, info, wrqu, &extra[8]);
}
-#endif /* CONFIG_88EU_P2P */
-
- return ret;
-}
-
-static int rtw_cta_test_start(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- int ret = 0;
- struct adapter *padapter = (struct adapter *)rtw_netdev_priv(dev);
- DBG_88E("%s %s\n", __func__, extra);
- if (!strcmp(extra, "1"))
- padapter->in_cta_test = 1;
- else
- padapter->in_cta_test = 0;
-
- if (padapter->in_cta_test) {
- u32 v = rtw_read32(padapter, REG_RCR);
- v &= ~(RCR_CBSSID_DATA | RCR_CBSSID_BCN);/* RCR_ADF */
- rtw_write32(padapter, REG_RCR, v);
- DBG_88E("enable RCR_ADF\n");
- } else {
- u32 v = rtw_read32(padapter, REG_RCR);
- v |= RCR_CBSSID_DATA | RCR_CBSSID_BCN;/* RCR_ADF */
- rtw_write32(padapter, REG_RCR, v);
- DBG_88E("disable RCR_ADF\n");
- }
return ret;
}
@@ -3847,7 +3549,7 @@ static int rtw_rereg_nd_name(struct net_device *dev,
if (!memcmp(rereg_priv->old_ifname, "disable%d", 9)) {
padapter->ledpriv.bRegUseLed = rereg_priv->old_bRegUseLed;
- rtw_hal_sw_led_init(padapter);
+ rtl8188eu_InitSwLeds(padapter);
rtw_ips_mode_req(&padapter->pwrctrlpriv, rereg_priv->old_ips_mode);
}
@@ -3863,7 +3565,7 @@ static int rtw_rereg_nd_name(struct net_device *dev,
rtw_led_control(padapter, LED_CTL_POWER_OFF);
rereg_priv->old_bRegUseLed = padapter->ledpriv.bRegUseLed;
padapter->ledpriv.bRegUseLed = false;
- rtw_hal_sw_led_deinit(padapter);
+ rtl8188eu_DeInitSwLeds(padapter);
/* the interface is being "disabled", we can do deeper IPS */
rereg_priv->old_ips_mode = rtw_get_ips_mode_req(&padapter->pwrctrlpriv);
@@ -3912,7 +3614,7 @@ static void rf_reg_dump(struct adapter *padapter)
int i, j = 1, path;
u32 value;
u8 rf_type, path_nums = 0;
- rtw_hal_get_hwreg(padapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type));
+ GetHwReg8188EU(padapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type));
pr_info("\n ======= RF REG =======\n");
if ((RF_1T2R == rf_type) || (RF_1T1R == rf_type))
@@ -3923,7 +3625,7 @@ static void rf_reg_dump(struct adapter *padapter)
for (path = 0; path < path_nums; path++) {
pr_info("\nRF_Path(%x)\n", path);
for (i = 0; i < 0x100; i++) {
- value = rtw_hal_read_rfreg(padapter, path, i, 0xffffffff);
+ value = rtl8188e_PHY_QueryRFReg(padapter, path, i, 0xffffffff);
if (j % 4 == 1)
pr_info("0x%02x ", i);
pr_info(" 0x%08x ", value);
@@ -3991,18 +3693,18 @@ static int rtw_dbg_port(struct net_device *dev,
}
break;
case 0x72:/* read_bb */
- DBG_88E("read_bbreg(0x%x) = 0x%x\n", arg, rtw_hal_read_bbreg(padapter, arg, 0xffffffff));
+ DBG_88E("read_bbreg(0x%x) = 0x%x\n", arg, rtl8188e_PHY_QueryBBReg(padapter, arg, 0xffffffff));
break;
case 0x73:/* write_bb */
- rtw_hal_write_bbreg(padapter, arg, 0xffffffff, extra_arg);
- DBG_88E("write_bbreg(0x%x) = 0x%x\n", arg, rtw_hal_read_bbreg(padapter, arg, 0xffffffff));
+ rtl8188e_PHY_SetBBReg(padapter, arg, 0xffffffff, extra_arg);
+ DBG_88E("write_bbreg(0x%x) = 0x%x\n", arg, rtl8188e_PHY_QueryBBReg(padapter, arg, 0xffffffff));
break;
case 0x74:/* read_rf */
- DBG_88E("read RF_reg path(0x%02x), offset(0x%x), value(0x%08x)\n", minor_cmd, arg, rtw_hal_read_rfreg(padapter, minor_cmd, arg, 0xffffffff));
+ DBG_88E("read RF_reg path(0x%02x), offset(0x%x), value(0x%08x)\n", minor_cmd, arg, rtl8188e_PHY_QueryRFReg(padapter, minor_cmd, arg, 0xffffffff));
break;
case 0x75:/* write_rf */
- rtw_hal_write_rfreg(padapter, minor_cmd, arg, 0xffffffff, extra_arg);
- DBG_88E("write RF_reg path(0x%02x), offset(0x%x), value(0x%08x)\n", minor_cmd, arg, rtw_hal_read_rfreg(padapter, minor_cmd, arg, 0xffffffff));
+ rtl8188e_PHY_SetRFReg(padapter, minor_cmd, arg, 0xffffffff, extra_arg);
+ DBG_88E("write RF_reg path(0x%02x), offset(0x%x), value(0x%08x)\n", minor_cmd, arg, rtl8188e_PHY_QueryRFReg(padapter, minor_cmd, arg, 0xffffffff));
break;
case 0x76:
@@ -4022,7 +3724,6 @@ static int rtw_dbg_port(struct net_device *dev,
switch (minor_cmd) {
case 0x04: /* LLT table initialization test */
{
- u8 page_boundary = 0xf9;
struct xmit_frame *xmit_frame;
xmit_frame = rtw_IOL_accquire_xmit_frame(padapter);
@@ -4031,9 +3732,7 @@ static int rtw_dbg_port(struct net_device *dev,
break;
}
- rtw_IOL_append_LLT_cmd(xmit_frame, page_boundary);
-
- if (_SUCCESS != rtw_IOL_exec_cmds_sync(padapter, xmit_frame, 500, 0))
+ if (rtl8188e_IOL_exec_cmds_sync(padapter, xmit_frame, 500, 0) != _SUCCESS)
ret = -EPERM;
}
break;
@@ -4057,7 +3756,7 @@ static int rtw_dbg_port(struct net_device *dev,
rtw_IOL_append_WB_cmd(xmit_frame, reg, 0x08, 0xff);
rtw_IOL_append_DELAY_MS_cmd(xmit_frame, blink_delay_ms);
}
- if (_SUCCESS != rtw_IOL_exec_cmds_sync(padapter, xmit_frame, (blink_delay_ms * blink_num * 2) + 200, 0))
+ if (rtl8188e_IOL_exec_cmds_sync(padapter, xmit_frame, (blink_delay_ms * blink_num * 2) + 200, 0) != _SUCCESS)
ret = -EPERM;
}
break;
@@ -4079,7 +3778,7 @@ static int rtw_dbg_port(struct net_device *dev,
for (i = 0; i < write_num; i++)
rtw_IOL_append_WB_cmd(xmit_frame, reg, i + start_value, 0xFF);
- if (_SUCCESS != rtw_IOL_exec_cmds_sync(padapter, xmit_frame, 5000, 0))
+ if (rtl8188e_IOL_exec_cmds_sync(padapter, xmit_frame, 5000, 0) != _SUCCESS)
ret = -EPERM;
final = rtw_read8(padapter, reg);
@@ -4108,7 +3807,7 @@ static int rtw_dbg_port(struct net_device *dev,
for (i = 0; i < write_num; i++)
rtw_IOL_append_WW_cmd(xmit_frame, reg, i + start_value, 0xFFFF);
- if (_SUCCESS != rtw_IOL_exec_cmds_sync(padapter, xmit_frame, 5000, 0))
+ if (rtl8188e_IOL_exec_cmds_sync(padapter, xmit_frame, 5000, 0) != _SUCCESS)
ret = -EPERM;
final = rtw_read16(padapter, reg);
@@ -4136,7 +3835,7 @@ static int rtw_dbg_port(struct net_device *dev,
for (i = 0; i < write_num; i++)
rtw_IOL_append_WD_cmd(xmit_frame, reg, i + start_value, 0xFFFFFFFF);
- if (_SUCCESS != rtw_IOL_exec_cmds_sync(padapter, xmit_frame, 5000, 0))
+ if (rtl8188e_IOL_exec_cmds_sync(padapter, xmit_frame, 5000, 0) != _SUCCESS)
ret = -EPERM;
final = rtw_read32(padapter, reg);
@@ -4222,11 +3921,11 @@ static int rtw_dbg_port(struct net_device *dev,
case 0x06:
{
u32 ODMFlag;
- rtw_hal_get_hwreg(padapter, HW_VAR_DM_FLAG, (u8 *)(&ODMFlag));
+ GetHwReg8188EU(padapter, HW_VAR_DM_FLAG, (u8 *)(&ODMFlag));
DBG_88E("(B)DMFlag = 0x%x, arg = 0x%x\n", ODMFlag, arg);
ODMFlag = (u32)(0x0f & arg);
DBG_88E("(A)DMFlag = 0x%x\n", ODMFlag);
- rtw_hal_set_hwreg(padapter, HW_VAR_DM_FLAG, (u8 *)(&ODMFlag));
+ SetHwReg8188EU(padapter, HW_VAR_DM_FLAG, (u8 *)(&ODMFlag));
}
break;
case 0x07:
@@ -4249,9 +3948,7 @@ static int rtw_dbg_port(struct net_device *dev,
struct list_head *plist, *phead;
struct recv_reorder_ctrl *preorder_ctrl;
-#ifdef CONFIG_88EU_AP_MODE
DBG_88E("sta_dz_bitmap = 0x%x, tim_bitmap = 0x%x\n", pstapriv->sta_dz_bitmap, pstapriv->tim_bitmap);
-#endif
spin_lock_bh(&pstapriv->sta_hash_lock);
for (i = 0; i < NUM_STA; i++) {
@@ -4272,14 +3969,12 @@ static int rtw_dbg_port(struct net_device *dev,
DBG_88E("ampdu_enable = %d\n", psta->htpriv.ampdu_enable);
DBG_88E("agg_enable_bitmap =%x, candidate_tid_bitmap =%x\n", psta->htpriv.agg_enable_bitmap, psta->htpriv.candidate_tid_bitmap);
-#ifdef CONFIG_88EU_AP_MODE
DBG_88E("capability = 0x%x\n", psta->capability);
DBG_88E("flags = 0x%x\n", psta->flags);
DBG_88E("wpa_psk = 0x%x\n", psta->wpa_psk);
DBG_88E("wpa2_group_cipher = 0x%x\n", psta->wpa2_group_cipher);
DBG_88E("wpa2_pairwise_cipher = 0x%x\n", psta->wpa2_pairwise_cipher);
DBG_88E("qos_info = 0x%x\n", psta->qos_info);
-#endif
DBG_88E("dot118021XPrivacy = 0x%x\n", psta->dot118021XPrivacy);
for (j = 0; j < 16; j++) {
@@ -4296,16 +3991,10 @@ static int rtw_dbg_port(struct net_device *dev,
case 0x0c:/* dump rx/tx packet */
if (arg == 0) {
DBG_88E("dump rx packet (%d)\n", extra_arg);
- rtw_hal_set_def_var(padapter, HAL_DEF_DBG_DUMP_RXPKT, &(extra_arg));
+ SetHalDefVar8188EUsb(padapter, HAL_DEF_DBG_DUMP_RXPKT, &extra_arg);
} else if (arg == 1) {
DBG_88E("dump tx packet (%d)\n", extra_arg);
- rtw_hal_set_def_var(padapter, HAL_DEF_DBG_DUMP_TXPKT, &(extra_arg));
- }
- break;
- case 0x0f:
- if (extra_arg == 0) {
- DBG_88E("###### silent reset test.......#####\n");
- rtw_hal_sreset_reset(padapter);
+ SetHalDefVar8188EUsb(padapter, HAL_DEF_DBG_DUMP_TXPKT, &extra_arg);
}
break;
case 0x15:
@@ -4326,11 +4015,10 @@ static int rtw_dbg_port(struct net_device *dev,
struct registry_priv *pregpriv = &padapter->registrypriv;
/* 0: disable, bit(0):enable 2.4g, bit(1):enable 5g, 0x3: enable both 2.4g and 5g */
/* default is set to enable 2.4GHZ for IOT issue with bufflao's AP at 5GHZ */
- if (pregpriv &&
- (extra_arg == 0 ||
- extra_arg == 1 ||
- extra_arg == 2 ||
- extra_arg == 3)) {
+ if (extra_arg == 0 ||
+ extra_arg == 1 ||
+ extra_arg == 2 ||
+ extra_arg == 3) {
pregpriv->rx_stbc = extra_arg;
DBG_88E("set rx_stbc =%d\n", pregpriv->rx_stbc);
} else {
@@ -4342,7 +4030,7 @@ static int rtw_dbg_port(struct net_device *dev,
{
struct registry_priv *pregpriv = &padapter->registrypriv;
/* 0: disable, 0x1:enable (but wifi_spec should be 0), 0x2: force enable (don't care wifi_spec) */
- if (pregpriv && extra_arg >= 0 && extra_arg < 3) {
+ if (extra_arg >= 0 && extra_arg < 3) {
pregpriv->ampdu_enable = extra_arg;
DBG_88E("set ampdu_enable =%d\n", pregpriv->ampdu_enable);
} else {
@@ -4361,10 +4049,8 @@ static int rtw_dbg_port(struct net_device *dev,
padapter->bNotifyChannelChange = extra_arg;
break;
case 0x24:
-#ifdef CONFIG_88EU_P2P
DBG_88E("turn %s the bShowGetP2PState Variable\n", (extra_arg == 1) ? "on" : "off");
padapter->bShowGetP2PState = extra_arg;
-#endif /* CONFIG_88EU_P2P */
break;
case 0xaa:
if (extra_arg > 0x13)
@@ -4385,7 +4071,7 @@ static int rtw_dbg_port(struct net_device *dev,
u32 odm_flag;
if (0xf == extra_arg) {
- rtw_hal_get_def_var(padapter, HAL_DEF_DBG_DM_FUNC, &odm_flag);
+ GetHalDefVar8188EUsb(padapter, HAL_DEF_DBG_DM_FUNC, &odm_flag);
DBG_88E(" === DMFlag(0x%08x) ===\n", odm_flag);
DBG_88E("extra_arg = 0 - disable all dynamic func\n");
DBG_88E("extra_arg = 1 - disable DIG- BIT(0)\n");
@@ -4400,8 +4086,8 @@ static int rtw_dbg_port(struct net_device *dev,
extra_arg = 2 - disable tx power tracking
extra_arg = 3 - turn on all dynamic func
*/
- rtw_hal_set_def_var(padapter, HAL_DEF_DBG_DM_FUNC, &(extra_arg));
- rtw_hal_get_def_var(padapter, HAL_DEF_DBG_DM_FUNC, &odm_flag);
+ SetHalDefVar8188EUsb(padapter, HAL_DEF_DBG_DM_FUNC, &extra_arg);
+ GetHalDefVar8188EUsb(padapter, HAL_DEF_DBG_DM_FUNC, &odm_flag);
DBG_88E(" === DMFlag(0x%08x) ===\n", odm_flag);
}
}
@@ -4492,12 +4178,11 @@ static int rtw_wx_set_priv(struct net_device *dev,
kfree(pmlmepriv->wps_probe_req_ie);
pmlmepriv->wps_probe_req_ie = NULL;
- pmlmepriv->wps_probe_req_ie = kmalloc(cp_sz, GFP_KERNEL);
+ pmlmepriv->wps_probe_req_ie = kmemdup(probereq_wpsie, cp_sz, GFP_KERNEL);
if (!pmlmepriv->wps_probe_req_ie) {
ret = -EINVAL;
goto FREE_EXT;
}
- memcpy(pmlmepriv->wps_probe_req_ie, probereq_wpsie, cp_sz);
pmlmepriv->wps_probe_req_ie_len = cp_sz;
}
goto FREE_EXT;
@@ -4539,1817 +4224,8 @@ static int rtw_pm_set(struct net_device *dev,
return ret;
}
-static int rtw_mp_efuse_get(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wdata, char *extra)
-{
- struct adapter *padapter = rtw_netdev_priv(dev);
- struct eeprom_priv *pEEPROM = GET_EEPROM_EFUSE_PRIV(padapter);
- struct hal_data_8188e *haldata = GET_HAL_DATA(padapter);
- struct efuse_hal *pEfuseHal;
- struct iw_point *wrqu;
-
- u8 *PROMContent = pEEPROM->efuse_eeprom_data;
- u8 ips_mode = 0, lps_mode = 0;
- struct pwrctrl_priv *pwrctrlpriv;
- u8 *data = NULL;
- u8 *rawdata = NULL;
- char *pch, *ptmp, *token, *tmp[3] = {NULL, NULL, NULL};
- u16 i = 0, j = 0, mapLen = 0, addr = 0, cnts = 0;
- u16 max_available_size = 0, raw_cursize = 0, raw_maxsize = 0;
- int err;
- u8 org_fw_iol = padapter->registrypriv.fw_iol;/* 0:Disable, 1:enable, 2:by usb speed */
-
- wrqu = (struct iw_point *)wdata;
- pwrctrlpriv = &padapter->pwrctrlpriv;
- pEfuseHal = &haldata->EfuseHal;
-
- err = 0;
- data = kzalloc(EFUSE_BT_MAX_MAP_LEN, GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto exit;
- }
- rawdata = kzalloc(EFUSE_BT_MAX_MAP_LEN, GFP_KERNEL);
- if (!rawdata) {
- err = -ENOMEM;
- goto exit;
- }
-
- if (copy_from_user(extra, wrqu->pointer, wrqu->length)) {
- err = -EFAULT;
- goto exit;
- }
- lps_mode = pwrctrlpriv->power_mgnt;/* keep org value */
- rtw_pm_set_lps(padapter, PS_MODE_ACTIVE);
-
- ips_mode = pwrctrlpriv->ips_mode;/* keep org value */
- rtw_pm_set_ips(padapter, IPS_NONE);
-
- pch = extra;
- DBG_88E("%s: in =%s\n", __func__, extra);
-
- i = 0;
- /* mac 16 "00e04c871200" rmap, 00, 2 */
- while ((token = strsep(&pch, ",")) != NULL) {
- if (i > 2)
- break;
- tmp[i] = token;
- i++;
- }
- padapter->registrypriv.fw_iol = 0;/* 0:Disable, 1:enable, 2:by usb speed */
-
- if (strcmp(tmp[0], "status") == 0) {
- sprintf(extra, "Load File efuse =%s, Load File MAC =%s", (pEEPROM->bloadfile_fail_flag ? "FAIL" : "OK"), (pEEPROM->bloadmac_fail_flag ? "FAIL" : "OK"));
-
- goto exit;
- } else if (strcmp(tmp[0], "filemap") == 0) {
- mapLen = EFUSE_MAP_SIZE;
-
- sprintf(extra, "\n");
- for (i = 0; i < EFUSE_MAP_SIZE; i += 16) {
- sprintf(extra + strlen(extra), "0x%02x\t", i);
- for (j = 0; j < 8; j++)
- sprintf(extra + strlen(extra), "%02X ", PROMContent[i + j]);
- sprintf(extra + strlen(extra), "\t");
- for (; j < 16; j++)
- sprintf(extra + strlen(extra), "%02X ", PROMContent[i + j]);
- sprintf(extra + strlen(extra), "\n");
- }
- } else if (strcmp(tmp[0], "realmap") == 0) {
- mapLen = EFUSE_MAP_SIZE;
- if (rtw_efuse_map_read(padapter, 0, mapLen, pEfuseHal->fakeEfuseInitMap) == _FAIL) {
- DBG_88E("%s: read realmap Fail!!\n", __func__);
- err = -EFAULT;
- goto exit;
- }
-
- sprintf(extra, "\n");
- for (i = 0; i < EFUSE_MAP_SIZE; i += 16) {
- sprintf(extra + strlen(extra), "0x%02x\t", i);
- for (j = 0; j < 8; j++)
- sprintf(extra + strlen(extra), "%02X ", pEfuseHal->fakeEfuseInitMap[i + j]);
- sprintf(extra + strlen(extra), "\t");
- for (; j < 16; j++)
- sprintf(extra + strlen(extra), "%02X ", pEfuseHal->fakeEfuseInitMap[i + j]);
- sprintf(extra + strlen(extra), "\n");
- }
- } else if (strcmp(tmp[0], "rmap") == 0) {
- if (!tmp[1] || !tmp[2]) {
- DBG_88E("%s: rmap Fail!! Parameters error!\n", __func__);
- err = -EINVAL;
- goto exit;
- }
-
- /* rmap addr cnts */
- addr = simple_strtoul(tmp[1], &ptmp, 16);
- DBG_88E("%s: addr =%x\n", __func__, addr);
-
- cnts = simple_strtoul(tmp[2], &ptmp, 10);
- if (cnts == 0) {
- DBG_88E("%s: rmap Fail!! cnts error!\n", __func__);
- err = -EINVAL;
- goto exit;
- }
- DBG_88E("%s: cnts =%d\n", __func__, cnts);
-
- EFUSE_GetEfuseDefinition(padapter, EFUSE_WIFI, TYPE_AVAILABLE_EFUSE_BYTES_TOTAL, (void *)&max_available_size, false);
- if ((addr + cnts) > max_available_size) {
- DBG_88E("%s: addr(0x%X)+cnts(%d) parameter error!\n", __func__, addr, cnts);
- err = -EINVAL;
- goto exit;
- }
-
- if (rtw_efuse_map_read(padapter, addr, cnts, data) == _FAIL) {
- DBG_88E("%s: rtw_efuse_map_read error!\n", __func__);
- err = -EFAULT;
- goto exit;
- }
-
- *extra = 0;
- for (i = 0; i < cnts; i++)
- sprintf(extra + strlen(extra), "0x%02X ", data[i]);
- } else if (strcmp(tmp[0], "realraw") == 0) {
- addr = 0;
- mapLen = EFUSE_MAX_SIZE;
- if (rtw_efuse_access(padapter, false, addr, mapLen, rawdata) == _FAIL) {
- DBG_88E("%s: rtw_efuse_access Fail!!\n", __func__);
- err = -EFAULT;
- goto exit;
- }
-
- sprintf(extra, "\n");
- for (i = 0; i < mapLen; i++) {
- sprintf(extra + strlen(extra), "%02X", rawdata[i]);
-
- if ((i & 0xF) == 0xF)
- sprintf(extra + strlen(extra), "\n");
- else if ((i & 0x7) == 0x7)
- sprintf(extra + strlen(extra), "\t");
- else
- sprintf(extra + strlen(extra), " ");
- }
- } else if (strcmp(tmp[0], "mac") == 0) {
- cnts = 6;
-
- EFUSE_GetEfuseDefinition(padapter, EFUSE_WIFI, TYPE_AVAILABLE_EFUSE_BYTES_TOTAL, (void *)&max_available_size, false);
- if ((addr + cnts) > max_available_size) {
- DBG_88E("%s: addr(0x%02x)+cnts(%d) parameter error!\n", __func__, addr, cnts);
- err = -EFAULT;
- goto exit;
- }
-
- if (rtw_efuse_map_read(padapter, addr, cnts, data) == _FAIL) {
- DBG_88E("%s: rtw_efuse_map_read error!\n", __func__);
- err = -EFAULT;
- goto exit;
- }
-
- *extra = 0;
- for (i = 0; i < cnts; i++) {
- sprintf(extra + strlen(extra), "%02X", data[i]);
- if (i != (cnts - 1))
- sprintf(extra + strlen(extra), ":");
- }
- } else if (strcmp(tmp[0], "vidpid") == 0) {
- cnts = 4;
-
- EFUSE_GetEfuseDefinition(padapter, EFUSE_WIFI, TYPE_AVAILABLE_EFUSE_BYTES_TOTAL, (void *)&max_available_size, false);
- if ((addr + cnts) > max_available_size) {
- DBG_88E("%s: addr(0x%02x)+cnts(%d) parameter error!\n", __func__, addr, cnts);
- err = -EFAULT;
- goto exit;
- }
- if (rtw_efuse_map_read(padapter, addr, cnts, data) == _FAIL) {
- DBG_88E("%s: rtw_efuse_access error!!\n", __func__);
- err = -EFAULT;
- goto exit;
- }
-
- *extra = 0;
- for (i = 0; i < cnts; i++) {
- sprintf(extra + strlen(extra), "0x%02X", data[i]);
- if (i != (cnts - 1))
- sprintf(extra + strlen(extra), ",");
- }
- } else if (strcmp(tmp[0], "ableraw") == 0) {
- efuse_GetCurrentSize(padapter, &raw_cursize);
- raw_maxsize = efuse_GetMaxSize(padapter);
- sprintf(extra, "[available raw size] = %d bytes", raw_maxsize - raw_cursize);
- } else if (strcmp(tmp[0], "btfmap") == 0) {
- mapLen = EFUSE_BT_MAX_MAP_LEN;
- if (rtw_BT_efuse_map_read(padapter, 0, mapLen, pEfuseHal->BTEfuseInitMap) == _FAIL) {
- DBG_88E("%s: rtw_BT_efuse_map_read Fail!!\n", __func__);
- err = -EFAULT;
- goto exit;
- }
-
- sprintf(extra, "\n");
- for (i = 0; i < 512; i += 16) {
- /* set 512 because the iwpriv's extra size have limit 0x7FF */
- sprintf(extra + strlen(extra), "0x%03x\t", i);
- for (j = 0; j < 8; j++)
- sprintf(extra + strlen(extra), "%02X ", pEfuseHal->BTEfuseInitMap[i + j]);
- sprintf(extra + strlen(extra), "\t");
- for (; j < 16; j++)
- sprintf(extra + strlen(extra), "%02X ", pEfuseHal->BTEfuseInitMap[i + j]);
- sprintf(extra + strlen(extra), "\n");
- }
- } else if (strcmp(tmp[0], "btbmap") == 0) {
- mapLen = EFUSE_BT_MAX_MAP_LEN;
- if (rtw_BT_efuse_map_read(padapter, 0, mapLen, pEfuseHal->BTEfuseInitMap) == _FAIL) {
- DBG_88E("%s: rtw_BT_efuse_map_read Fail!!\n", __func__);
- err = -EFAULT;
- goto exit;
- }
-
- sprintf(extra, "\n");
- for (i = 512; i < 1024; i += 16) {
- sprintf(extra + strlen(extra), "0x%03x\t", i);
- for (j = 0; j < 8; j++)
- sprintf(extra + strlen(extra), "%02X ", pEfuseHal->BTEfuseInitMap[i + j]);
- sprintf(extra + strlen(extra), "\t");
- for (; j < 16; j++)
- sprintf(extra + strlen(extra), "%02X ", pEfuseHal->BTEfuseInitMap[i + j]);
- sprintf(extra + strlen(extra), "\n");
- }
- } else if (strcmp(tmp[0], "btrmap") == 0) {
- if (!tmp[1] || !tmp[2]) {
- err = -EINVAL;
- goto exit;
- }
-
- /* rmap addr cnts */
- addr = simple_strtoul(tmp[1], &ptmp, 16);
- DBG_88E("%s: addr = 0x%X\n", __func__, addr);
-
- cnts = simple_strtoul(tmp[2], &ptmp, 10);
- if (cnts == 0) {
- DBG_88E("%s: btrmap Fail!! cnts error!\n", __func__);
- err = -EINVAL;
- goto exit;
- }
- DBG_88E("%s: cnts =%d\n", __func__, cnts);
-
- EFUSE_GetEfuseDefinition(padapter, EFUSE_BT, TYPE_AVAILABLE_EFUSE_BYTES_TOTAL, (void *)&max_available_size, false);
- if ((addr + cnts) > max_available_size) {
- DBG_88E("%s: addr(0x%X)+cnts(%d) parameter error!\n", __func__, addr, cnts);
- err = -EFAULT;
- goto exit;
- }
-
- if (rtw_BT_efuse_map_read(padapter, addr, cnts, data) == _FAIL) {
- DBG_88E("%s: rtw_BT_efuse_map_read error!!\n", __func__);
- err = -EFAULT;
- goto exit;
- }
-
- *extra = 0;
- for (i = 0; i < cnts; i++)
- sprintf(extra + strlen(extra), " 0x%02X ", data[i]);
- } else if (strcmp(tmp[0], "btffake") == 0) {
- sprintf(extra, "\n");
- for (i = 0; i < 512; i += 16) {
- sprintf(extra + strlen(extra), "0x%03x\t", i);
- for (j = 0; j < 8; j++)
- sprintf(extra + strlen(extra), "%02X ", pEfuseHal->fakeBTEfuseModifiedMap[i + j]);
- sprintf(extra + strlen(extra), "\t");
- for (; j < 16; j++)
- sprintf(extra + strlen(extra), "%02X ", pEfuseHal->fakeBTEfuseModifiedMap[i + j]);
- sprintf(extra + strlen(extra), "\n");
- }
- } else if (strcmp(tmp[0], "btbfake") == 0) {
- sprintf(extra, "\n");
- for (i = 512; i < 1024; i += 16) {
- sprintf(extra + strlen(extra), "0x%03x\t", i);
- for (j = 0; j < 8; j++)
- sprintf(extra + strlen(extra), "%02X ", pEfuseHal->fakeBTEfuseModifiedMap[i + j]);
- sprintf(extra + strlen(extra), "\t");
- for (; j < 16; j++)
- sprintf(extra + strlen(extra), "%02X ", pEfuseHal->fakeBTEfuseModifiedMap[i + j]);
- sprintf(extra + strlen(extra), "\n");
- }
- } else if (strcmp(tmp[0], "wlrfkmap") == 0) {
- sprintf(extra, "\n");
- for (i = 0; i < EFUSE_MAP_SIZE; i += 16) {
- sprintf(extra + strlen(extra), "0x%02x\t", i);
- for (j = 0; j < 8; j++)
- sprintf(extra + strlen(extra), "%02X ", pEfuseHal->fakeEfuseModifiedMap[i + j]);
- sprintf(extra + strlen(extra), "\t");
- for (; j < 16; j++)
- sprintf(extra + strlen(extra), " %02X", pEfuseHal->fakeEfuseModifiedMap[i + j]);
- sprintf(extra + strlen(extra), "\n");
- }
- } else {
- sprintf(extra, "Command not found!");
- }
-
-exit:
- kfree(data);
- kfree(rawdata);
- if (!err)
- wrqu->length = strlen(extra);
-
- rtw_pm_set_ips(padapter, ips_mode);
- rtw_pm_set_lps(padapter, lps_mode);
- padapter->registrypriv.fw_iol = org_fw_iol;/* 0:Disable, 1:enable, 2:by usb speed */
- return err;
-}
-
-static int rtw_mp_efuse_set(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wdata, char *extra)
-{
- struct adapter *padapter;
- struct pwrctrl_priv *pwrctrlpriv;
- struct hal_data_8188e *haldata;
- struct efuse_hal *pEfuseHal;
-
- u8 ips_mode = 0, lps_mode = 0;
- u32 i, jj, kk;
- u8 *setdata = NULL;
- u8 *ShadowMapBT = NULL;
- u8 *ShadowMapWiFi = NULL;
- u8 *setrawdata = NULL;
- char *pch, *ptmp, *token, *tmp[3] = {NULL, NULL, NULL};
- u16 addr = 0, cnts = 0, max_available_size = 0;
- int err;
-
- padapter = rtw_netdev_priv(dev);
- pwrctrlpriv = &padapter->pwrctrlpriv;
- haldata = GET_HAL_DATA(padapter);
- pEfuseHal = &haldata->EfuseHal;
- err = 0;
- setdata = kzalloc(1024, GFP_KERNEL);
- if (!setdata) {
- err = -ENOMEM;
- goto exit;
- }
- ShadowMapBT = kmalloc(EFUSE_BT_MAX_MAP_LEN, GFP_KERNEL);
- if (!ShadowMapBT) {
- err = -ENOMEM;
- goto exit;
- }
- ShadowMapWiFi = kmalloc(EFUSE_MAP_SIZE, GFP_KERNEL);
- if (!ShadowMapWiFi) {
- err = -ENOMEM;
- goto exit;
- }
- setrawdata = kmalloc(EFUSE_MAX_SIZE, GFP_KERNEL);
- if (!setrawdata) {
- err = -ENOMEM;
- goto exit;
- }
-
- lps_mode = pwrctrlpriv->power_mgnt;/* keep org value */
- rtw_pm_set_lps(padapter, PS_MODE_ACTIVE);
-
- ips_mode = pwrctrlpriv->ips_mode;/* keep org value */
- rtw_pm_set_ips(padapter, IPS_NONE);
-
- pch = extra;
- DBG_88E("%s: in =%s\n", __func__, extra);
-
- i = 0;
- while ((token = strsep(&pch, ",")) != NULL) {
- if (i > 2)
- break;
- tmp[i] = token;
- i++;
- }
-
- /* tmp[0],[1],[2] */
- /* wmap, addr, 00e04c871200 */
- if (strcmp(tmp[0], "wmap") == 0) {
- if (!tmp[1] || !tmp[2]) {
- err = -EINVAL;
- goto exit;
- }
-
- addr = simple_strtoul(tmp[1], &ptmp, 16);
- addr &= 0xFFF;
-
- cnts = strlen(tmp[2]);
- if (cnts % 2) {
- err = -EINVAL;
- goto exit;
- }
- cnts /= 2;
- if (cnts == 0) {
- err = -EINVAL;
- goto exit;
- }
-
- DBG_88E("%s: addr = 0x%X\n", __func__, addr);
- DBG_88E("%s: cnts =%d\n", __func__, cnts);
- DBG_88E("%s: map data =%s\n", __func__, tmp[2]);
-
- for (jj = 0, kk = 0; jj < cnts; jj++, kk += 2)
- setdata[jj] = key_2char2num(tmp[2][kk], tmp[2][kk + 1]);
- /* Change to check TYPE_EFUSE_MAP_LEN, because 8188E raw 256, logic map over 256. */
- EFUSE_GetEfuseDefinition(padapter, EFUSE_WIFI, TYPE_EFUSE_MAP_LEN, (void *)&max_available_size, false);
- if ((addr + cnts) > max_available_size) {
- DBG_88E("%s: addr(0x%X)+cnts(%d) parameter error!\n", __func__, addr, cnts);
- err = -EFAULT;
- goto exit;
- }
-
- if (rtw_efuse_map_write(padapter, addr, cnts, setdata) == _FAIL) {
- DBG_88E("%s: rtw_efuse_map_write error!!\n", __func__);
- err = -EFAULT;
- goto exit;
- }
- } else if (strcmp(tmp[0], "wraw") == 0) {
- if (!tmp[1] || !tmp[2]) {
- err = -EINVAL;
- goto exit;
- }
-
- addr = simple_strtoul(tmp[1], &ptmp, 16);
- addr &= 0xFFF;
-
- cnts = strlen(tmp[2]);
- if (cnts % 2) {
- err = -EINVAL;
- goto exit;
- }
- cnts /= 2;
- if (cnts == 0) {
- err = -EINVAL;
- goto exit;
- }
-
- DBG_88E("%s: addr = 0x%X\n", __func__, addr);
- DBG_88E("%s: cnts =%d\n", __func__, cnts);
- DBG_88E("%s: raw data =%s\n", __func__, tmp[2]);
-
- for (jj = 0, kk = 0; jj < cnts; jj++, kk += 2)
- setrawdata[jj] = key_2char2num(tmp[2][kk], tmp[2][kk + 1]);
-
- if (rtw_efuse_access(padapter, true, addr, cnts, setrawdata) == _FAIL) {
- DBG_88E("%s: rtw_efuse_access error!!\n", __func__);
- err = -EFAULT;
- goto exit;
- }
- } else if (strcmp(tmp[0], "mac") == 0) {
- if (!tmp[1]) {
- err = -EINVAL;
- goto exit;
- }
-
- /* mac, 00e04c871200 */
- addr = EEPROM_MAC_ADDR_88EU;
- cnts = strlen(tmp[1]);
- if (cnts % 2) {
- err = -EINVAL;
- goto exit;
- }
- cnts /= 2;
- if (cnts == 0) {
- err = -EINVAL;
- goto exit;
- }
- if (cnts > 6) {
- DBG_88E("%s: error data for mac addr =\"%s\"\n", __func__, tmp[1]);
- err = -EFAULT;
- goto exit;
- }
-
- DBG_88E("%s: addr = 0x%X\n", __func__, addr);
- DBG_88E("%s: cnts =%d\n", __func__, cnts);
- DBG_88E("%s: MAC address =%s\n", __func__, tmp[1]);
-
- for (jj = 0, kk = 0; jj < cnts; jj++, kk += 2)
- setdata[jj] = key_2char2num(tmp[1][kk], tmp[1][kk + 1]);
- /* Change to check TYPE_EFUSE_MAP_LEN, because 8188E raw 256, logic map over 256. */
- EFUSE_GetEfuseDefinition(padapter, EFUSE_WIFI, TYPE_EFUSE_MAP_LEN, (void *)&max_available_size, false);
- if ((addr + cnts) > max_available_size) {
- DBG_88E("%s: addr(0x%X)+cnts(%d) parameter error!\n", __func__, addr, cnts);
- err = -EFAULT;
- goto exit;
- }
-
- if (rtw_efuse_map_write(padapter, addr, cnts, setdata) == _FAIL) {
- DBG_88E("%s: rtw_efuse_map_write error!!\n", __func__);
- err = -EFAULT;
- goto exit;
- }
- } else if (strcmp(tmp[0], "vidpid") == 0) {
- if (!tmp[1]) {
- err = -EINVAL;
- goto exit;
- }
-
- /* pidvid, da0b7881 */
- addr = EEPROM_VID_88EE;
- cnts = strlen(tmp[1]);
- if (cnts % 2) {
- err = -EINVAL;
- goto exit;
- }
- cnts /= 2;
- if (cnts == 0) {
- err = -EINVAL;
- goto exit;
- }
-
- DBG_88E("%s: addr = 0x%X\n", __func__, addr);
- DBG_88E("%s: cnts =%d\n", __func__, cnts);
- DBG_88E("%s: VID/PID =%s\n", __func__, tmp[1]);
-
- for (jj = 0, kk = 0; jj < cnts; jj++, kk += 2)
- setdata[jj] = key_2char2num(tmp[1][kk], tmp[1][kk + 1]);
-
- EFUSE_GetEfuseDefinition(padapter, EFUSE_WIFI, TYPE_AVAILABLE_EFUSE_BYTES_TOTAL, (void *)&max_available_size, false);
- if ((addr + cnts) > max_available_size) {
- DBG_88E("%s: addr(0x%X)+cnts(%d) parameter error!\n", __func__, addr, cnts);
- err = -EFAULT;
- goto exit;
- }
-
- if (rtw_efuse_map_write(padapter, addr, cnts, setdata) == _FAIL) {
- DBG_88E("%s: rtw_efuse_map_write error!!\n", __func__);
- err = -EFAULT;
- goto exit;
- }
- } else if (strcmp(tmp[0], "btwmap") == 0) {
- if (!tmp[1] || !tmp[2]) {
- err = -EINVAL;
- goto exit;
- }
-
- addr = simple_strtoul(tmp[1], &ptmp, 16);
- addr &= 0xFFF;
-
- cnts = strlen(tmp[2]);
- if (cnts % 2) {
- err = -EINVAL;
- goto exit;
- }
- cnts /= 2;
- if (cnts == 0) {
- err = -EINVAL;
- goto exit;
- }
-
- DBG_88E("%s: addr = 0x%X\n", __func__, addr);
- DBG_88E("%s: cnts =%d\n", __func__, cnts);
- DBG_88E("%s: BT data =%s\n", __func__, tmp[2]);
-
- for (jj = 0, kk = 0; jj < cnts; jj++, kk += 2)
- setdata[jj] = key_2char2num(tmp[2][kk], tmp[2][kk + 1]);
-
- EFUSE_GetEfuseDefinition(padapter, EFUSE_BT, TYPE_AVAILABLE_EFUSE_BYTES_TOTAL, (void *)&max_available_size, false);
- if ((addr + cnts) > max_available_size) {
- DBG_88E("%s: addr(0x%X)+cnts(%d) parameter error!\n", __func__, addr, cnts);
- err = -EFAULT;
- goto exit;
- }
-
- if (rtw_BT_efuse_map_write(padapter, addr, cnts, setdata) == _FAIL) {
- DBG_88E("%s: rtw_BT_efuse_map_write error!!\n", __func__);
- err = -EFAULT;
- goto exit;
- }
- } else if (strcmp(tmp[0], "btwfake") == 0) {
- if (!tmp[1] || !tmp[2]) {
- err = -EINVAL;
- goto exit;
- }
-
- addr = simple_strtoul(tmp[1], &ptmp, 16);
- addr &= 0xFFF;
-
- cnts = strlen(tmp[2]);
- if (cnts % 2) {
- err = -EINVAL;
- goto exit;
- }
- cnts /= 2;
- if (cnts == 0) {
- err = -EINVAL;
- goto exit;
- }
-
- DBG_88E("%s: addr = 0x%X\n", __func__, addr);
- DBG_88E("%s: cnts =%d\n", __func__, cnts);
- DBG_88E("%s: BT tmp data =%s\n", __func__, tmp[2]);
-
- for (jj = 0, kk = 0; jj < cnts; jj++, kk += 2)
- pEfuseHal->fakeBTEfuseModifiedMap[addr + jj] = key_2char2num(tmp[2][kk], tmp[2][kk + 1]);
- } else if (strcmp(tmp[0], "btdumpfake") == 0) {
- if (rtw_BT_efuse_map_read(padapter, 0, EFUSE_BT_MAX_MAP_LEN, pEfuseHal->fakeBTEfuseModifiedMap) == _SUCCESS) {
- DBG_88E("%s: BT read all map success\n", __func__);
- } else {
- DBG_88E("%s: BT read all map Fail!\n", __func__);
- err = -EFAULT;
- }
- } else if (strcmp(tmp[0], "wldumpfake") == 0) {
- if (rtw_efuse_map_read(padapter, 0, EFUSE_BT_MAX_MAP_LEN, pEfuseHal->fakeEfuseModifiedMap) == _SUCCESS) {
- DBG_88E("%s: BT read all map success\n", __func__);
- } else {
- DBG_88E("%s: BT read all map Fail\n", __func__);
- err = -EFAULT;
- }
- } else if (strcmp(tmp[0], "btfk2map") == 0) {
- memcpy(pEfuseHal->BTEfuseModifiedMap, pEfuseHal->fakeBTEfuseModifiedMap, EFUSE_BT_MAX_MAP_LEN);
-
- EFUSE_GetEfuseDefinition(padapter, EFUSE_BT, TYPE_AVAILABLE_EFUSE_BYTES_TOTAL, (void *)&max_available_size, false);
- if (max_available_size < 1) {
- err = -EFAULT;
- goto exit;
- }
-
- if (rtw_BT_efuse_map_write(padapter, 0x00, EFUSE_BT_MAX_MAP_LEN, pEfuseHal->fakeBTEfuseModifiedMap) == _FAIL) {
- DBG_88E("%s: rtw_BT_efuse_map_write error!\n", __func__);
- err = -EFAULT;
- goto exit;
- }
- } else if (strcmp(tmp[0], "wlfk2map") == 0) {
- EFUSE_GetEfuseDefinition(padapter, EFUSE_WIFI, TYPE_AVAILABLE_EFUSE_BYTES_TOTAL, (void *)&max_available_size, false);
- if (max_available_size < 1) {
- err = -EFAULT;
- goto exit;
- }
-
- if (rtw_efuse_map_write(padapter, 0x00, EFUSE_MAX_MAP_LEN, pEfuseHal->fakeEfuseModifiedMap) == _FAIL) {
- DBG_88E("%s: rtw_efuse_map_write error!\n", __func__);
- err = -EFAULT;
- goto exit;
- }
- } else if (strcmp(tmp[0], "wlwfake") == 0) {
- if (!tmp[1] || !tmp[2]) {
- err = -EINVAL;
- goto exit;
- }
-
- addr = simple_strtoul(tmp[1], &ptmp, 16);
- addr &= 0xFFF;
-
- cnts = strlen(tmp[2]);
- if (cnts % 2) {
- err = -EINVAL;
- goto exit;
- }
- cnts /= 2;
- if (cnts == 0) {
- err = -EINVAL;
- goto exit;
- }
-
- DBG_88E("%s: addr = 0x%X\n", __func__, addr);
- DBG_88E("%s: cnts =%d\n", __func__, cnts);
- DBG_88E("%s: map tmp data =%s\n", __func__, tmp[2]);
-
- for (jj = 0, kk = 0; jj < cnts; jj++, kk += 2)
- pEfuseHal->fakeEfuseModifiedMap[addr + jj] = key_2char2num(tmp[2][kk], tmp[2][kk + 1]);
- }
-
-exit:
- kfree(setdata);
- kfree(ShadowMapBT);
- kfree(ShadowMapWiFi);
- kfree(setrawdata);
-
- rtw_pm_set_ips(padapter, ips_mode);
- rtw_pm_set_lps(padapter, lps_mode);
-
- return err;
-}
-
-/*
- * Input Format: %s,%d,%d
- * %s is width, could be
- * "b" for 1 byte
- * "w" for WORD (2 bytes)
- * "dw" for DWORD (4 bytes)
- * 1st %d is address(offset)
- * 2st %d is data to write
- */
-static int rtw_mp_write_reg(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *wrqu, char *extra)
-{
- char *pch, *pnext, *ptmp;
- char *width_str;
- char width;
- u32 addr, data;
- int ret;
- struct adapter *padapter = rtw_netdev_priv(dev);
-
- pch = extra;
- pnext = strpbrk(pch, ",.-");
- if (!pnext)
- return -EINVAL;
- *pnext = 0;
- width_str = pch;
-
- pch = pnext + 1;
- pnext = strpbrk(pch, ",.-");
- if (!pnext)
- return -EINVAL;
- *pnext = 0;
- addr = simple_strtoul(pch, &ptmp, 16);
- if (addr > 0x3FFF)
- return -EINVAL;
-
- pch = pnext + 1;
- if ((pch - extra) >= wrqu->length)
- return -EINVAL;
- data = simple_strtoul(pch, &ptmp, 16);
-
- ret = 0;
- width = width_str[0];
- switch (width) {
- case 'b':
- /* 1 byte */
- if (data > 0xFF) {
- ret = -EINVAL;
- break;
- }
- rtw_write8(padapter, addr, data);
- break;
- case 'w':
- /* 2 bytes */
- if (data > 0xFFFF) {
- ret = -EINVAL;
- break;
- }
- rtw_write16(padapter, addr, data);
- break;
- case 'd':
- /* 4 bytes */
- rtw_write32(padapter, addr, data);
- break;
- default:
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
-/*
- * Input Format: %s,%d
- * %s is width, could be
- * "b" for 1 byte
- * "w" for WORD (2 bytes)
- * "dw" for DWORD (4 bytes)
- * %d is address(offset)
- *
- * Return:
- * %d for data readed
- */
-static int rtw_mp_read_reg(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *wrqu, char *extra)
-{
- struct adapter *padapter = rtw_netdev_priv(dev);
- char *input = kmalloc(wrqu->length, GFP_KERNEL);
- char *pch, *pnext, *ptmp;
- char *width_str;
- char width;
- char data[20], tmp[20];
- u32 addr;
- u32 ret, i = 0, j = 0, strtout = 0;
-
- if (!input)
- return -ENOMEM;
- if (copy_from_user(input, wrqu->pointer, wrqu->length)) {
- kfree(input);
- return -EFAULT;
- }
- memset(data, 0, 20);
- memset(tmp, 0, 20);
- memset(extra, 0, wrqu->length);
-
- pch = input;
- pnext = strpbrk(pch, ",.-");
- if (!pnext) {
- kfree(input);
- return -EINVAL;
- }
- *pnext = 0;
- width_str = pch;
-
- pch = pnext + 1;
- if ((pch - input) >= wrqu->length) {
- kfree(input);
- return -EINVAL;
- }
- kfree(input);
- addr = simple_strtoul(pch, &ptmp, 16);
- if (addr > 0x3FFF)
- return -EINVAL;
-
- ret = 0;
- width = width_str[0];
- switch (width) {
- case 'b':
- /* 1 byte */
- sprintf(extra, "%d\n", rtw_read8(padapter, addr));
- wrqu->length = strlen(extra);
- break;
- case 'w':
- /* 2 bytes */
- sprintf(data, "%04x\n", rtw_read16(padapter, addr));
- for (i = 0; i <= strlen(data); i++) {
- if (i % 2 == 0) {
- tmp[j] = ' ';
- j++;
- }
- if (data[i] != '\0')
- tmp[j] = data[i];
- j++;
- }
- pch = tmp;
- DBG_88E("pch =%s", pch);
-
- while (*pch != '\0') {
- pnext = strpbrk(pch, " ");
- if (!pnext)
- break;
-
- pnext++;
- if (*pnext != '\0') {
- strtout = simple_strtoul(pnext, &ptmp, 16);
- sprintf(extra + strlen(extra), " %d", strtout);
- } else {
- break;
- }
- pch = pnext;
- }
- wrqu->length = 6;
- break;
- case 'd':
- /* 4 bytes */
- sprintf(data, "%08x", rtw_read32(padapter, addr));
- /* add read data format blank */
- for (i = 0; i <= strlen(data); i++) {
- if (i % 2 == 0) {
- tmp[j] = ' ';
- j++;
- }
- if (data[i] != '\0')
- tmp[j] = data[i];
-
- j++;
- }
- pch = tmp;
- DBG_88E("pch =%s", pch);
-
- while (*pch != '\0') {
- pnext = strpbrk(pch, " ");
- if (!pnext)
- break;
- pnext++;
- if (*pnext != '\0') {
- strtout = simple_strtoul(pnext, &ptmp, 16);
- sprintf(extra + strlen(extra), " %d", strtout);
- } else {
- break;
- }
- pch = pnext;
- }
- wrqu->length = strlen(extra);
- break;
- default:
- wrqu->length = 0;
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
-/*
- * Input Format: %d,%x,%x
- * %d is RF path, should be smaller than RF_PATH_MAX
- * 1st %x is address(offset)
- * 2st %x is data to write
- */
- static int rtw_mp_write_rf(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *wrqu, char *extra)
-{
- u32 path, addr, data;
- int ret;
- struct adapter *padapter = rtw_netdev_priv(dev);
-
- ret = sscanf(extra, "%d,%x,%x", &path, &addr, &data);
- if (ret < 3)
- return -EINVAL;
-
- if (path >= RF_PATH_MAX)
- return -EINVAL;
- if (addr > 0xFF)
- return -EINVAL;
- if (data > 0xFFFFF)
- return -EINVAL;
-
- memset(extra, 0, wrqu->length);
-
- write_rfreg(padapter, path, addr, data);
-
- sprintf(extra, "write_rf completed\n");
- wrqu->length = strlen(extra);
-
- return 0;
-}
-
-/*
- * Input Format: %d,%x
- * %d is RF path, should be smaller than RF_PATH_MAX
- * %x is address(offset)
- *
- * Return:
- * %d for data readed
- */
-static int rtw_mp_read_rf(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *wrqu, char *extra)
-{
- char *input = kmalloc(wrqu->length, GFP_KERNEL);
- char *pch, *pnext, *ptmp;
- char data[20], tmp[20];
- u32 path, addr;
- u32 ret, i = 0, j = 0, strtou = 0;
- struct adapter *padapter = rtw_netdev_priv(dev);
-
- if (!input)
- return -ENOMEM;
- if (copy_from_user(input, wrqu->pointer, wrqu->length)) {
- kfree(input);
- return -EFAULT;
- }
- ret = sscanf(input, "%d,%x", &path, &addr);
- kfree(input);
- if (ret < 2)
- return -EINVAL;
-
- if (path >= RF_PATH_MAX)
- return -EINVAL;
- if (addr > 0xFF)
- return -EINVAL;
-
- memset(extra, 0, wrqu->length);
-
- sprintf(data, "%08x", read_rfreg(padapter, path, addr));
- /* add read data format blank */
- for (i = 0; i <= strlen(data); i++) {
- if (i % 2 == 0) {
- tmp[j] = ' ';
- j++;
- }
- tmp[j] = data[i];
- j++;
- }
- pch = tmp;
- DBG_88E("pch =%s", pch);
-
- while (*pch != '\0') {
- pnext = strpbrk(pch, " ");
- pnext++;
- if (*pnext != '\0') {
- strtou = simple_strtoul(pnext, &ptmp, 16);
- sprintf(extra + strlen(extra), " %d", strtou);
- } else {
- break;
- }
- pch = pnext;
- }
- wrqu->length = strlen(extra);
- return 0;
-}
-
-static int rtw_mp_start(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *wrqu, char *extra)
-{
- struct adapter *padapter = rtw_netdev_priv(dev);
-
- if (padapter->registrypriv.mp_mode == 0) {
- padapter->registrypriv.mp_mode = 1;
-
- rtw_pm_set_ips(padapter, IPS_NONE);
- LeaveAllPowerSaveMode(padapter);
-
- MPT_InitializeAdapter(padapter, 1);
- }
- if (padapter->registrypriv.mp_mode == 0)
- return -EPERM;
- if (padapter->mppriv.mode == MP_OFF) {
- if (mp_start_test(padapter) == _FAIL)
- return -EPERM;
- padapter->mppriv.mode = MP_ON;
- }
- return 0;
-}
-
-static int rtw_mp_stop(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *wrqu, char *extra)
-{
- struct adapter *padapter = rtw_netdev_priv(dev);
-
- if (padapter->registrypriv.mp_mode == 1) {
- MPT_DeInitAdapter(padapter);
- padapter->registrypriv.mp_mode = 0;
- }
-
- if (padapter->mppriv.mode != MP_OFF) {
- mp_stop_test(padapter);
- padapter->mppriv.mode = MP_OFF;
- }
-
- return 0;
-}
-
extern int wifirate2_ratetbl_inx(unsigned char rate);
-static int rtw_mp_rate(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *wrqu, char *extra)
-{
- u32 rate = MPT_RATE_1M;
- char *input = kmalloc(wrqu->length, GFP_KERNEL);
- struct adapter *padapter = rtw_netdev_priv(dev);
-
- if (!input)
- return -ENOMEM;
- if (copy_from_user(input, wrqu->pointer, wrqu->length)) {
- kfree(input);
- return -EFAULT;
- }
- rate = rtw_atoi(input);
- sprintf(extra, "Set data rate to %d", rate);
- kfree(input);
- if (rate <= 0x7f)
- rate = wifirate2_ratetbl_inx((u8)rate);
- else
- rate = (rate - 0x80 + MPT_RATE_MCS0);
-
- if (rate >= MPT_RATE_LAST)
- return -EINVAL;
-
- padapter->mppriv.rateidx = rate;
- Hal_SetDataRate(padapter);
-
- wrqu->length = strlen(extra) + 1;
- return 0;
-}
-
-static int rtw_mp_channel(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *wrqu, char *extra)
-{
- struct adapter *padapter = rtw_netdev_priv(dev);
- char *input = kmalloc(wrqu->length, GFP_KERNEL);
- u32 channel = 1;
-
- if (!input)
- return -ENOMEM;
- if (copy_from_user(input, wrqu->pointer, wrqu->length)) {
- kfree(input);
- return -EFAULT;
- }
- channel = rtw_atoi(input);
- sprintf(extra, "Change channel %d to channel %d", padapter->mppriv.channel, channel);
-
- padapter->mppriv.channel = channel;
- Hal_SetChannel(padapter);
-
- wrqu->length = strlen(extra) + 1;
- kfree(input);
- return 0;
-}
-
-static int rtw_mp_bandwidth(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *wrqu, char *extra)
-{
- u32 bandwidth = 0, sg = 0;
- struct adapter *padapter = rtw_netdev_priv(dev);
-
- sscanf(extra, "40M =%d, shortGI =%d", &bandwidth, &sg);
-
- if (bandwidth != HT_CHANNEL_WIDTH_40)
- bandwidth = HT_CHANNEL_WIDTH_20;
-
- padapter->mppriv.bandwidth = (u8)bandwidth;
- padapter->mppriv.preamble = sg;
-
- SetBandwidth(padapter);
-
- return 0;
-}
-
-static int rtw_mp_txpower(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *wrqu, char *extra)
-{
- u32 idx_a = 0, idx_b = 0;
- char *input = kmalloc(wrqu->length, GFP_KERNEL);
- struct adapter *padapter = rtw_netdev_priv(dev);
-
- if (!input)
- return -ENOMEM;
- if (copy_from_user(input, wrqu->pointer, wrqu->length)) {
- kfree(input);
- return -EFAULT;
- }
- sscanf(input, "patha =%d, pathb =%d", &idx_a, &idx_b);
-
- sprintf(extra, "Set power level path_A:%d path_B:%d", idx_a, idx_b);
- padapter->mppriv.txpoweridx = (u8)idx_a;
- padapter->mppriv.txpoweridx_b = (u8)idx_b;
- padapter->mppriv.bSetTxPower = 1;
- Hal_SetAntennaPathPower(padapter);
-
- wrqu->length = strlen(extra) + 1;
- kfree(input);
- return 0;
-}
-
-static int rtw_mp_ant_tx(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *wrqu, char *extra)
-{
- u8 i;
- char *input = kmalloc(wrqu->length, GFP_KERNEL);
- u16 antenna = 0;
- struct adapter *padapter = rtw_netdev_priv(dev);
-
- if (!input)
- return -ENOMEM;
- if (copy_from_user(input, wrqu->pointer, wrqu->length)) {
- kfree(input);
- return -EFAULT;
- }
-
- sprintf(extra, "switch Tx antenna to %s", input);
-
- for (i = 0; i < strlen(input); i++) {
- switch (input[i]) {
- case 'a':
- antenna |= ANTENNA_A;
- break;
- case 'b':
- antenna |= ANTENNA_B;
- break;
- }
- }
- padapter->mppriv.antenna_tx = antenna;
-
- Hal_SetAntenna(padapter);
-
- wrqu->length = strlen(extra) + 1;
- kfree(input);
- return 0;
-}
-
-static int rtw_mp_ant_rx(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *wrqu, char *extra)
-{
- u8 i;
- u16 antenna = 0;
- char *input = kmalloc(wrqu->length, GFP_KERNEL);
- struct adapter *padapter = rtw_netdev_priv(dev);
-
- if (!input)
- return -ENOMEM;
- if (copy_from_user(input, wrqu->pointer, wrqu->length)) {
- kfree(input);
- return -EFAULT;
- }
- memset(extra, 0, wrqu->length);
-
- sprintf(extra, "switch Rx antenna to %s", input);
-
- for (i = 0; i < strlen(input); i++) {
- switch (input[i]) {
- case 'a':
- antenna |= ANTENNA_A;
- break;
- case 'b':
- antenna |= ANTENNA_B;
- break;
- }
- }
-
- padapter->mppriv.antenna_rx = antenna;
- Hal_SetAntenna(padapter);
- wrqu->length = strlen(extra);
- kfree(input);
- return 0;
-}
-
-static int rtw_mp_ctx(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *wrqu, char *extra)
-{
- u32 pkTx = 1, countPkTx = 1, cotuTx = 1, CarrSprTx = 1, scTx = 1, sgleTx = 1, stop = 1;
- u32 bStartTest = 1;
- u32 count = 0;
- struct mp_priv *pmp_priv;
- struct pkt_attrib *pattrib;
-
- struct adapter *padapter = rtw_netdev_priv(dev);
-
- pmp_priv = &padapter->mppriv;
-
- if (copy_from_user(extra, wrqu->pointer, wrqu->length))
- return -EFAULT;
-
- DBG_88E("%s: in =%s\n", __func__, extra);
-
- countPkTx = strncmp(extra, "count =", 5); /* strncmp true is 0 */
- cotuTx = strncmp(extra, "background", 20);
- CarrSprTx = strncmp(extra, "background, cs", 20);
- scTx = strncmp(extra, "background, sc", 20);
- sgleTx = strncmp(extra, "background, stone", 20);
- pkTx = strncmp(extra, "background, pkt", 20);
- stop = strncmp(extra, "stop", 4);
- sscanf(extra, "count =%d, pkt", &count);
-
- memset(extra, '\0', sizeof(*extra));
-
- if (stop == 0) {
- bStartTest = 0; /* To set Stop */
- pmp_priv->tx.stop = 1;
- sprintf(extra, "Stop continuous Tx");
- } else {
- bStartTest = 1;
- if (pmp_priv->mode != MP_ON) {
- if (pmp_priv->tx.stop != 1) {
- DBG_88E("%s: MP_MODE != ON %d\n", __func__, pmp_priv->mode);
- return -EFAULT;
- }
- }
- }
-
- if (pkTx == 0 || countPkTx == 0)
- pmp_priv->mode = MP_PACKET_TX;
- if (sgleTx == 0)
- pmp_priv->mode = MP_SINGLE_TONE_TX;
- if (cotuTx == 0)
- pmp_priv->mode = MP_CONTINUOUS_TX;
- if (CarrSprTx == 0)
- pmp_priv->mode = MP_CARRIER_SUPPRISSION_TX;
- if (scTx == 0)
- pmp_priv->mode = MP_SINGLE_CARRIER_TX;
-
- switch (pmp_priv->mode) {
- case MP_PACKET_TX:
- if (bStartTest == 0) {
- pmp_priv->tx.stop = 1;
- pmp_priv->mode = MP_ON;
- sprintf(extra, "Stop continuous Tx");
- } else if (pmp_priv->tx.stop == 1) {
- sprintf(extra, "Start continuous DA = ffffffffffff len = 1500 count =%u,\n", count);
- pmp_priv->tx.stop = 0;
- pmp_priv->tx.count = count;
- pmp_priv->tx.payload = 2;
- pattrib = &pmp_priv->tx.attrib;
- pattrib->pktlen = 1500;
- memset(pattrib->dst, 0xFF, ETH_ALEN);
- SetPacketTx(padapter);
- } else {
- return -EFAULT;
- }
- wrqu->length = strlen(extra);
- return 0;
- case MP_SINGLE_TONE_TX:
- if (bStartTest != 0)
- sprintf(extra, "Start continuous DA = ffffffffffff len = 1500\n infinite = yes.");
- Hal_SetSingleToneTx(padapter, (u8)bStartTest);
- break;
- case MP_CONTINUOUS_TX:
- if (bStartTest != 0)
- sprintf(extra, "Start continuous DA = ffffffffffff len = 1500\n infinite = yes.");
- Hal_SetContinuousTx(padapter, (u8)bStartTest);
- break;
- case MP_CARRIER_SUPPRISSION_TX:
- if (bStartTest != 0) {
- if (pmp_priv->rateidx <= MPT_RATE_11M) {
- sprintf(extra, "Start continuous DA = ffffffffffff len = 1500\n infinite = yes.");
- Hal_SetCarrierSuppressionTx(padapter, (u8)bStartTest);
- } else {
- sprintf(extra, "Specify carrier suppression but not CCK rate");
- }
- }
- break;
- case MP_SINGLE_CARRIER_TX:
- if (bStartTest != 0)
- sprintf(extra, "Start continuous DA = ffffffffffff len = 1500\n infinite = yes.");
- Hal_SetSingleCarrierTx(padapter, (u8)bStartTest);
- break;
- default:
- sprintf(extra, "Error! Continuous-Tx is not on-going.");
- return -EFAULT;
- }
-
- if (bStartTest == 1 && pmp_priv->mode != MP_ON) {
- struct mp_priv *pmp_priv = &padapter->mppriv;
- if (pmp_priv->tx.stop == 0) {
- pmp_priv->tx.stop = 1;
- msleep(5);
- }
- pmp_priv->tx.stop = 0;
- pmp_priv->tx.count = 1;
- SetPacketTx(padapter);
- } else {
- pmp_priv->mode = MP_ON;
- }
-
- wrqu->length = strlen(extra);
- return 0;
-}
-
-static int rtw_mp_arx(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *wrqu, char *extra)
-{
- u8 bStartRx = 0, bStopRx = 0, bQueryPhy;
- u32 cckok = 0, cckcrc = 0, ofdmok = 0, ofdmcrc = 0, htok = 0, htcrc = 0, OFDM_FA = 0, CCK_FA = 0;
- char *input = kmalloc(wrqu->length, GFP_KERNEL);
- struct adapter *padapter = rtw_netdev_priv(dev);
-
- if (!input)
- return -ENOMEM;
-
- if (copy_from_user(input, wrqu->pointer, wrqu->length)) {
- kfree(input);
- return -EFAULT;
- }
- DBG_88E("%s: %s\n", __func__, input);
-
- bStartRx = (strncmp(input, "start", 5) == 0) ? 1 : 0; /* strncmp true is 0 */
- bStopRx = (strncmp(input, "stop", 5) == 0) ? 1 : 0; /* strncmp true is 0 */
- bQueryPhy = (strncmp(input, "phy", 3) == 0) ? 1 : 0; /* strncmp true is 0 */
-
- if (bStartRx) {
- sprintf(extra, "start");
- SetPacketRx(padapter, bStartRx);
- } else if (bStopRx) {
- SetPacketRx(padapter, 0);
- sprintf(extra, "Received packet OK:%d CRC error:%d", padapter->mppriv.rx_pktcount, padapter->mppriv.rx_crcerrpktcount);
- } else if (bQueryPhy) {
- /*
- OFDM FA
- RegCF0[15:0]
- RegCF2[31:16]
- RegDA0[31:16]
- RegDA4[15:0]
- RegDA4[31:16]
- RegDA8[15:0]
- CCK FA
- (RegA5B<<8) | RegA5C
- */
- cckok = read_bbreg(padapter, 0xf88, 0xffffffff);
- cckcrc = read_bbreg(padapter, 0xf84, 0xffffffff);
- ofdmok = read_bbreg(padapter, 0xf94, 0x0000FFFF);
- ofdmcrc = read_bbreg(padapter, 0xf94, 0xFFFF0000);
- htok = read_bbreg(padapter, 0xf90, 0x0000FFFF);
- htcrc = read_bbreg(padapter, 0xf90, 0xFFFF0000);
-
- OFDM_FA = read_bbreg(padapter, 0xcf0, 0x0000FFFF);
- OFDM_FA = read_bbreg(padapter, 0xcf2, 0xFFFF0000);
- OFDM_FA = read_bbreg(padapter, 0xda0, 0xFFFF0000);
- OFDM_FA = read_bbreg(padapter, 0xda4, 0x0000FFFF);
- OFDM_FA = read_bbreg(padapter, 0xda4, 0xFFFF0000);
- OFDM_FA = read_bbreg(padapter, 0xda8, 0x0000FFFF);
- CCK_FA = (rtw_read8(padapter, 0xa5b) << 8) | (rtw_read8(padapter, 0xa5c));
-
- sprintf(extra, "Phy Received packet OK:%d CRC error:%d FA Counter: %d", cckok + ofdmok + htok, cckcrc + ofdmcrc + htcrc, OFDM_FA + CCK_FA);
- }
- wrqu->length = strlen(extra) + 1;
- kfree(input);
- return 0;
-}
-
-static int rtw_mp_trx_query(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *wrqu, char *extra)
-{
- u32 txok, txfail, rxok, rxfail;
- struct adapter *padapter = rtw_netdev_priv(dev);
-
- txok = padapter->mppriv.tx.sended;
- txfail = 0;
- rxok = padapter->mppriv.rx_pktcount;
- rxfail = padapter->mppriv.rx_crcerrpktcount;
-
- memset(extra, '\0', 128);
-
- sprintf(extra, "Tx OK:%d, Tx Fail:%d, Rx OK:%d, CRC error:%d ", txok, txfail, rxok, rxfail);
-
- wrqu->length = strlen(extra) + 1;
-
- return 0;
-}
-
-static int rtw_mp_pwrtrk(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *wrqu, char *extra)
-{
- u8 enable;
- u32 thermal;
- s32 ret;
- struct adapter *padapter = rtw_netdev_priv(dev);
- char *input = kmalloc(wrqu->length, GFP_KERNEL);
-
- if (!input)
- return -ENOMEM;
- if (copy_from_user(input, wrqu->pointer, wrqu->length)) {
- kfree(input);
- return -EFAULT;
- }
- memset(extra, 0, wrqu->length);
-
- enable = 1;
- if (wrqu->length > 1) {/* not empty string */
- if (strncmp(input, "stop", 4) == 0) {
- enable = 0;
- sprintf(extra, "mp tx power tracking stop");
- } else if (sscanf(input, "ther =%d", &thermal)) {
- ret = Hal_SetThermalMeter(padapter, (u8)thermal);
- if (ret == _FAIL)
- return -EPERM;
- sprintf(extra, "mp tx power tracking start, target value =%d ok ", thermal);
- } else {
- kfree(input);
- return -EINVAL;
- }
- }
-
- kfree(input);
- ret = Hal_SetPowerTracking(padapter, enable);
- if (ret == _FAIL)
- return -EPERM;
-
- wrqu->length = strlen(extra);
- return 0;
-}
-
-static int rtw_mp_psd(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *wrqu, char *extra)
-{
- struct adapter *padapter = rtw_netdev_priv(dev);
- char *input = kmalloc(wrqu->length, GFP_KERNEL);
-
- if (!input)
- return -ENOMEM;
- if (copy_from_user(input, wrqu->pointer, wrqu->length)) {
- kfree(input);
- return -EFAULT;
- }
-
- strcpy(extra, input);
-
- wrqu->length = mp_query_psd(padapter, extra);
- kfree(input);
- return 0;
-}
-
-static int rtw_mp_thermal(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *wrqu, char *extra)
-{
- u8 val;
- u16 bwrite = 1;
- u16 addr = EEPROM_THERMAL_METER_88E;
-
- u16 cnt = 1;
- u16 max_available_size = 0;
- struct adapter *padapter = rtw_netdev_priv(dev);
-
- if (copy_from_user(extra, wrqu->pointer, wrqu->length))
- return -EFAULT;
-
- bwrite = strncmp(extra, "write", 6); /* strncmp true is 0 */
-
- Hal_GetThermalMeter(padapter, &val);
-
- if (bwrite == 0) {
- EFUSE_GetEfuseDefinition(padapter, EFUSE_WIFI, TYPE_AVAILABLE_EFUSE_BYTES_TOTAL, (void *)&max_available_size, false);
- if (2 > max_available_size) {
- DBG_88E("no available efuse!\n");
- return -EFAULT;
- }
- if (rtw_efuse_map_write(padapter, addr, cnt, &val) == _FAIL) {
- DBG_88E("rtw_efuse_map_write error\n");
- return -EFAULT;
- } else {
- sprintf(extra, " efuse write ok :%d", val);
- }
- } else {
- sprintf(extra, "%d", val);
- }
- wrqu->length = strlen(extra);
-
- return 0;
-}
-
-static int rtw_mp_reset_stats(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *wrqu, char *extra)
-{
- struct mp_priv *pmp_priv;
- struct adapter *padapter = rtw_netdev_priv(dev);
-
- pmp_priv = &padapter->mppriv;
-
- pmp_priv->tx.sended = 0;
- pmp_priv->tx_pktcount = 0;
- pmp_priv->rx_pktcount = 0;
- pmp_priv->rx_crcerrpktcount = 0;
-
- /* reset phy counter */
- write_bbreg(padapter, 0xf14, BIT(16), 0x1);
- msleep(10);
- write_bbreg(padapter, 0xf14, BIT(16), 0x0);
-
- return 0;
-}
-
-static int rtw_mp_dump(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *wrqu, char *extra)
-{
- u32 value;
- u8 rf_type, path_nums = 0;
- u32 i, j = 1, path;
- struct adapter *padapter = rtw_netdev_priv(dev);
-
- if (strncmp(extra, "all", 4) == 0) {
- DBG_88E("\n ======= MAC REG =======\n");
- for (i = 0x0; i < 0x300; i += 4) {
- if (j % 4 == 1)
- DBG_88E("0x%02x", i);
- DBG_88E(" 0x%08x ", rtw_read32(padapter, i));
- if ((j++) % 4 == 0)
- DBG_88E("\n");
- }
- for (i = 0x400; i < 0x1000; i += 4) {
- if (j % 4 == 1)
- DBG_88E("0x%02x", i);
- DBG_88E(" 0x%08x ", rtw_read32(padapter, i));
- if ((j++) % 4 == 0)
- DBG_88E("\n");
- }
-
- j = 1;
- rtw_hal_get_hwreg(padapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type));
-
- DBG_88E("\n ======= RF REG =======\n");
- if ((RF_1T2R == rf_type) || (RF_1T1R == rf_type))
- path_nums = 1;
- else
- path_nums = 2;
-
- for (path = 0; path < path_nums; path++) {
- for (i = 0; i < 0x34; i++) {
- value = rtw_hal_read_rfreg(padapter, path, i, 0xffffffff);
- if (j % 4 == 1)
- DBG_88E("0x%02x ", i);
- DBG_88E(" 0x%08x ", value);
- if ((j++) % 4 == 0)
- DBG_88E("\n");
- }
- }
- }
- return 0;
-}
-
-static int rtw_mp_phypara(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_point *wrqu, char *extra)
-{
- char *input = kmalloc(wrqu->length, GFP_KERNEL);
- u32 valxcap;
-
- if (!input)
- return -ENOMEM;
- if (copy_from_user(input, wrqu->pointer, wrqu->length)) {
- kfree(input);
- return -EFAULT;
- }
-
- DBG_88E("%s:iwpriv in =%s\n", __func__, input);
-
- sscanf(input, "xcap =%d", &valxcap);
-
- kfree(input);
- return 0;
-}
-
-static int rtw_mp_SetRFPath(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct adapter *padapter = rtw_netdev_priv(dev);
- char *input = kmalloc(wrqu->data.length, GFP_KERNEL);
- u8 bMain = 1, bTurnoff = 1;
-
- if (!input)
- return -ENOMEM;
- if (copy_from_user(input, wrqu->data.pointer, wrqu->data.length))
- return -EFAULT;
- DBG_88E("%s:iwpriv in =%s\n", __func__, input);
-
- bMain = strncmp(input, "1", 2); /* strncmp true is 0 */
- bTurnoff = strncmp(input, "0", 3); /* strncmp true is 0 */
-
- if (bMain == 0) {
- MP_PHY_SetRFPathSwitch(padapter, true);
- DBG_88E("%s:PHY_SetRFPathSwitch = true\n", __func__);
- } else if (bTurnoff == 0) {
- MP_PHY_SetRFPathSwitch(padapter, false);
- DBG_88E("%s:PHY_SetRFPathSwitch = false\n", __func__);
- }
- kfree(input);
- return 0;
-}
-
-static int rtw_mp_QueryDrv(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wrqu, char *extra)
-{
- struct adapter *padapter = rtw_netdev_priv(dev);
- char *input = kmalloc(wrqu->data.length, GFP_KERNEL);
- u8 qAutoLoad = 1;
- struct eeprom_priv *pEEPROM = GET_EEPROM_EFUSE_PRIV(padapter);
-
- if (!input)
- return -ENOMEM;
-
- if (copy_from_user(input, wrqu->data.pointer, wrqu->data.length))
- return -EFAULT;
- DBG_88E("%s:iwpriv in =%s\n", __func__, input);
-
- qAutoLoad = strncmp(input, "autoload", 8); /* strncmp true is 0 */
-
- if (qAutoLoad == 0) {
- DBG_88E("%s:qAutoLoad\n", __func__);
-
- if (pEEPROM->bautoload_fail_flag)
- sprintf(extra, "fail");
- else
- sprintf(extra, "ok");
- }
- wrqu->data.length = strlen(extra) + 1;
- kfree(input);
- return 0;
-}
-
-static int rtw_mp_set(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wdata, char *extra)
-{
- struct iw_point *wrqu = (struct iw_point *)wdata;
- u32 subcmd = wrqu->flags;
- struct adapter *padapter = rtw_netdev_priv(dev);
-
- if (!padapter)
- return -ENETDOWN;
-
- if (!extra) {
- wrqu->length = 0;
- return -EIO;
- }
-
- switch (subcmd) {
- case MP_START:
- DBG_88E("set case mp_start\n");
- rtw_mp_start(dev, info, wrqu, extra);
- break;
- case MP_STOP:
- DBG_88E("set case mp_stop\n");
- rtw_mp_stop(dev, info, wrqu, extra);
- break;
- case MP_BANDWIDTH:
- DBG_88E("set case mp_bandwidth\n");
- rtw_mp_bandwidth(dev, info, wrqu, extra);
- break;
- case MP_RESET_STATS:
- DBG_88E("set case MP_RESET_STATS\n");
- rtw_mp_reset_stats(dev, info, wrqu, extra);
- break;
- case MP_SetRFPathSwh:
- DBG_88E("set MP_SetRFPathSwitch\n");
- rtw_mp_SetRFPath(dev, info, wdata, extra);
- break;
- case CTA_TEST:
- DBG_88E("set CTA_TEST\n");
- rtw_cta_test_start(dev, info, wdata, extra);
- break;
- }
-
- return 0;
-}
-
-static int rtw_mp_get(struct net_device *dev,
- struct iw_request_info *info,
- union iwreq_data *wdata, char *extra)
-{
- struct iw_point *wrqu = (struct iw_point *)wdata;
- u32 subcmd = wrqu->flags;
- struct adapter *padapter = rtw_netdev_priv(dev);
-
- if (!padapter)
- return -ENETDOWN;
- if (!extra) {
- wrqu->length = 0;
- return -EIO;
- }
-
- switch (subcmd) {
- case WRITE_REG:
- rtw_mp_write_reg(dev, info, wrqu, extra);
- break;
- case WRITE_RF:
- rtw_mp_write_rf(dev, info, wrqu, extra);
- break;
- case MP_PHYPARA:
- DBG_88E("mp_get MP_PHYPARA\n");
- rtw_mp_phypara(dev, info, wrqu, extra);
- break;
- case MP_CHANNEL:
- DBG_88E("set case mp_channel\n");
- rtw_mp_channel(dev, info, wrqu, extra);
- break;
- case READ_REG:
- DBG_88E("mp_get READ_REG\n");
- rtw_mp_read_reg(dev, info, wrqu, extra);
- break;
- case READ_RF:
- DBG_88E("mp_get READ_RF\n");
- rtw_mp_read_rf(dev, info, wrqu, extra);
- break;
- case MP_RATE:
- DBG_88E("set case mp_rate\n");
- rtw_mp_rate(dev, info, wrqu, extra);
- break;
- case MP_TXPOWER:
- DBG_88E("set case MP_TXPOWER\n");
- rtw_mp_txpower(dev, info, wrqu, extra);
- break;
- case MP_ANT_TX:
- DBG_88E("set case MP_ANT_TX\n");
- rtw_mp_ant_tx(dev, info, wrqu, extra);
- break;
- case MP_ANT_RX:
- DBG_88E("set case MP_ANT_RX\n");
- rtw_mp_ant_rx(dev, info, wrqu, extra);
- break;
- case MP_QUERY:
- rtw_mp_trx_query(dev, info, wrqu, extra);
- break;
- case MP_CTX:
- DBG_88E("set case MP_CTX\n");
- rtw_mp_ctx(dev, info, wrqu, extra);
- break;
- case MP_ARX:
- DBG_88E("set case MP_ARX\n");
- rtw_mp_arx(dev, info, wrqu, extra);
- break;
- case EFUSE_GET:
- DBG_88E("efuse get EFUSE_GET\n");
- rtw_mp_efuse_get(dev, info, wdata, extra);
- break;
- case MP_DUMP:
- DBG_88E("set case MP_DUMP\n");
- rtw_mp_dump(dev, info, wrqu, extra);
- break;
- case MP_PSD:
- DBG_88E("set case MP_PSD\n");
- rtw_mp_psd(dev, info, wrqu, extra);
- break;
- case MP_THER:
- DBG_88E("set case MP_THER\n");
- rtw_mp_thermal(dev, info, wrqu, extra);
- break;
- case MP_QueryDrvStats:
- DBG_88E("mp_get MP_QueryDrvStats\n");
- rtw_mp_QueryDrv(dev, info, wdata, extra);
- break;
- case MP_PWRTRK:
- DBG_88E("set case MP_PWRTRK\n");
- rtw_mp_pwrtrk(dev, info, wrqu, extra);
- break;
- case EFUSE_SET:
- DBG_88E("set case efuse set\n");
- rtw_mp_efuse_set(dev, info, wdata, extra);
- break;
- }
-
- msleep(10); /* delay 5ms for sending pkt before exit adb shell operation */
- return 0;
-}
-
static int rtw_tdls(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
@@ -6530,51 +4406,18 @@ static const struct iw_priv_args rtw_private_args[] = {
{SIOCIWFIRSTPRIV + 0x18, IW_PRIV_TYPE_CHAR | IFNAMSIZ, 0, "rereg_nd_name"},
- {SIOCIWFIRSTPRIV + 0x1A, IW_PRIV_TYPE_CHAR | 1024, 0, "efuse_set"},
- {SIOCIWFIRSTPRIV + 0x1B, IW_PRIV_TYPE_CHAR | 128, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "efuse_get"},
{SIOCIWFIRSTPRIV + 0x1D, IW_PRIV_TYPE_CHAR | 40, IW_PRIV_TYPE_CHAR | 0x7FF, "test"
},
{SIOCIWFIRSTPRIV + 0x0E, IW_PRIV_TYPE_CHAR | 1024, 0, ""}, /* set */
{SIOCIWFIRSTPRIV + 0x0F, IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, ""},/* get */
-/* --- sub-ioctls definitions --- */
-
- {MP_START, IW_PRIV_TYPE_CHAR | 1024, 0, "mp_start"}, /* set */
- {MP_PHYPARA, IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "mp_phypara"},/* get */
- {MP_STOP, IW_PRIV_TYPE_CHAR | 1024, 0, "mp_stop"}, /* set */
- {MP_CHANNEL, IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "mp_channel"},/* get */
- {MP_BANDWIDTH, IW_PRIV_TYPE_CHAR | 1024, 0, "mp_bandwidth"}, /* set */
- {MP_RATE, IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "mp_rate"},/* get */
- {MP_RESET_STATS, IW_PRIV_TYPE_CHAR | 1024, 0, "mp_reset_stats"},
- {MP_QUERY, IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "mp_query"}, /* get */
- {READ_REG, IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "read_reg"},
- {MP_RATE, IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "mp_rate"},
- {READ_RF, IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "read_rf"},
- {MP_PSD, IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "mp_psd"},
- {MP_DUMP, IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "mp_dump"},
- {MP_TXPOWER, IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "mp_txpower"},
- {MP_ANT_TX, IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "mp_ant_tx"},
- {MP_ANT_RX, IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "mp_ant_rx"},
- {WRITE_REG, IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "write_reg"},
- {WRITE_RF, IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "write_rf"},
- {MP_CTX, IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "mp_ctx"},
- {MP_ARX, IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "mp_arx"},
- {MP_THER, IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "mp_ther"},
- {EFUSE_SET, IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "efuse_set"},
- {EFUSE_GET, IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "efuse_get"},
- {MP_PWRTRK, IW_PRIV_TYPE_CHAR | 1024, 0, "mp_pwrtrk"},
- {MP_QueryDrvStats, IW_PRIV_TYPE_CHAR | 1024, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_MASK, "mp_drvquery"},
- {MP_IOCTL, IW_PRIV_TYPE_CHAR | 1024, 0, "mp_ioctl"}, /* mp_ioctl */
- {MP_SetRFPathSwh, IW_PRIV_TYPE_CHAR | 1024, 0, "mp_setrfpath"},
- {CTA_TEST, IW_PRIV_TYPE_CHAR | 1024, 0, "cta_test"},
};
static iw_handler rtw_private_handler[] = {
rtw_wx_write32, /* 0x00 */
rtw_wx_read32, /* 0x01 */
rtw_drvext_hdl, /* 0x02 */
-rtw_mp_ioctl_hdl, /* 0x03 */
-
+NULL, /* 0x03 */
/* for MM DTV platform */
rtw_get_ap_info, /* 0x04 */
@@ -6591,9 +4434,9 @@ rtw_mp_ioctl_hdl, /* 0x03 */
rtw_dbg_port, /* 0x0B */
rtw_wx_write_rf, /* 0x0C */
rtw_wx_read_rf, /* 0x0D */
+ NULL, /* 0x0E */
+ NULL, /* 0x0F */
- rtw_mp_set, /* 0x0E */
- rtw_mp_get, /* 0x0F */
rtw_p2p_set, /* 0x10 */
rtw_p2p_get, /* 0x11 */
rtw_p2p_get2, /* 0x12 */
@@ -6607,8 +4450,8 @@ rtw_mp_ioctl_hdl, /* 0x03 */
rtw_rereg_nd_name, /* 0x18 */
rtw_wx_priv_null, /* 0x19 */
- rtw_mp_efuse_set, /* 0x1A */
- rtw_mp_efuse_get, /* 0x1B */
+ NULL, /* 0x1A */
+ NULL, /* 0x1B */
NULL, /* 0x1C is reserved for hostapd */
rtw_test, /* 0x1D */
};
diff --git a/drivers/staging/r8188eu/os_dep/mlme_linux.c b/drivers/staging/r8188eu/os_dep/mlme_linux.c
index e3ee9dc7ab90..a9b6ffdbf31a 100644
--- a/drivers/staging/r8188eu/os_dep/mlme_linux.c
+++ b/drivers/staging/r8188eu/os_dep/mlme_linux.c
@@ -25,8 +25,6 @@ static void _dynamic_check_timer_handlder(struct timer_list *t)
{
struct adapter *adapter = from_timer(adapter, t, mlmepriv.dynamic_chk_timer);
- if (adapter->registrypriv.mp_mode == 1)
- return;
rtw_dynamic_check_timer_handlder(adapter);
_set_timer(&adapter->mlmepriv.dynamic_chk_timer, 2000);
}
@@ -165,8 +163,6 @@ void init_mlme_ext_timer(struct adapter *padapter)
timer_setup(&pmlmeext->link_timer, _link_timer_hdl, 0);
}
-#ifdef CONFIG_88EU_AP_MODE
-
void rtw_indicate_sta_assoc_event(struct adapter *padapter, struct sta_info *psta)
{
union iwreq_data wrqu;
@@ -212,5 +208,3 @@ void rtw_indicate_sta_disassoc_event(struct adapter *padapter, struct sta_info *
wireless_send_event(padapter->pnetdev, IWEVEXPIRED, &wrqu, NULL);
}
-
-#endif
diff --git a/drivers/staging/r8188eu/os_dep/os_intfs.c b/drivers/staging/r8188eu/os_dep/os_intfs.c
index 8d0158f4a45d..10059240bf54 100644
--- a/drivers/staging/r8188eu/os_dep/os_intfs.c
+++ b/drivers/staging/r8188eu/os_dep/os_intfs.c
@@ -9,9 +9,10 @@
#include "../include/recv_osdep.h"
#include "../include/hal_intf.h"
#include "../include/rtw_ioctl.h"
-
#include "../include/usb_osintf.h"
#include "../include/rtw_br_ext.h"
+#include "../include/rtl8188e_led.h"
+#include "../include/rtl8188e_dm.h"
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Realtek Wireless Lan Driver");
@@ -51,8 +52,6 @@ static int rtw_short_retry_lmt = 7;
static int rtw_busy_thresh = 40;
static int rtw_ack_policy = NORMAL_ACK;
-static int rtw_mp_mode;
-
static int rtw_software_encrypt;
static int rtw_software_decrypt;
@@ -85,7 +84,6 @@ static int rtw_AcceptAddbaReq = true;/* 0:Reject AP's Add BA req, 1:Accept AP's
static int rtw_antdiv_cfg = 2; /* 0:OFF , 1:ON, 2:decide by Efuse config */
static int rtw_antdiv_type; /* 0:decide by efuse 1: for 88EE, 1Tx and 1RxCG are diversity.(2 Ant with SPDT), 2: for 88EE, 1Tx and 2Rx are diversity.(2 Ant, Tx and RxCG are both on aux port, RxCS is on main port), 3: for 88EE, 1Tx and 1RxCG are fixed.(1Ant, Tx and RxCG are both on aux port) */
-static int rtw_enusbss;/* 0:disable, 1:enable */
static int rtw_hwpdn_mode = 2;/* 0:disable, 1:enable, 2: by EFUSE config */
@@ -114,7 +112,6 @@ module_param(rtw_rfintfs, int, 0644);
module_param(rtw_lbkmode, int, 0644);
module_param(rtw_network_mode, int, 0644);
module_param(rtw_channel, int, 0644);
-module_param(rtw_mp_mode, int, 0644);
module_param(rtw_wmm_enable, int, 0644);
module_param(rtw_vrtl_carrier_sense, int, 0644);
module_param(rtw_vcs_type, int, 0644);
@@ -133,7 +130,6 @@ module_param(rtw_low_power, int, 0644);
module_param(rtw_wifi_spec, int, 0644);
module_param(rtw_antdiv_cfg, int, 0644);
module_param(rtw_antdiv_type, int, 0644);
-module_param(rtw_enusbss, int, 0644);
module_param(rtw_hwpdn_mode, int, 0644);
module_param(rtw_hwpwrp_detect, int, 0644);
module_param(rtw_hw_wps_pbc, int, 0644);
@@ -157,348 +153,6 @@ MODULE_PARM_DESC(rtw_notch_filter, "0:Disable, 1:Enable, 2:Enable only for P2P")
module_param_named(debug, rtw_debug, int, 0444);
MODULE_PARM_DESC(debug, "Set debug level (1-9) (default 1)");
-/* dummy routines */
-void rtw_proc_remove_one(struct net_device *dev)
-{
-}
-
-void rtw_proc_init_one(struct net_device *dev)
-{
-}
-
-#if 0 /* TODO: Convert these to /sys */
-void rtw_proc_init_one(struct net_device *dev)
-{
- struct proc_dir_entry *dir_dev = NULL;
- struct proc_dir_entry *entry = NULL;
- struct adapter *padapter = rtw_netdev_priv(dev);
- u8 rf_type;
-
- if (!rtw_proc) {
- memcpy(rtw_proc_name, DRV_NAME, sizeof(DRV_NAME));
-
- rtw_proc = create_proc_entry(rtw_proc_name, S_IFDIR, init_net.proc_net);
- if (!rtw_proc) {
- DBG_88E(KERN_ERR "Unable to create rtw_proc directory\n");
- return;
- }
-
- entry = create_proc_read_entry("ver_info", S_IFREG | S_IRUGO, rtw_proc, proc_get_drv_version, dev);
- if (!entry) {
- pr_info("Unable to create_proc_read_entry!\n");
- return;
- }
- }
-
- if (!padapter->dir_dev) {
- padapter->dir_dev = create_proc_entry(dev->name,
- S_IFDIR | S_IRUGO | S_IXUGO,
- rtw_proc);
- dir_dev = padapter->dir_dev;
- if (!dir_dev) {
- if (rtw_proc_cnt == 0) {
- if (rtw_proc) {
- remove_proc_entry(rtw_proc_name, init_net.proc_net);
- rtw_proc = NULL;
- }
- }
-
- pr_info("Unable to create dir_dev directory\n");
- return;
- }
- } else {
- return;
- }
-
- rtw_proc_cnt++;
-
- entry = create_proc_read_entry("write_reg", S_IFREG | S_IRUGO,
- dir_dev, proc_get_write_reg, dev);
- if (!entry) {
- pr_info("Unable to create_proc_read_entry!\n");
- return;
- }
- entry->write_proc = proc_set_write_reg;
-
- entry = create_proc_read_entry("read_reg", S_IFREG | S_IRUGO,
- dir_dev, proc_get_read_reg, dev);
- if (!entry) {
- pr_info("Unable to create_proc_read_entry!\n");
- return;
- }
- entry->write_proc = proc_set_read_reg;
-
- entry = create_proc_read_entry("fwstate", S_IFREG | S_IRUGO,
- dir_dev, proc_get_fwstate, dev);
- if (!entry) {
- pr_info("Unable to create_proc_read_entry!\n");
- return;
- }
-
- entry = create_proc_read_entry("sec_info", S_IFREG | S_IRUGO,
- dir_dev, proc_get_sec_info, dev);
- if (!entry) {
- pr_info("Unable to create_proc_read_entry!\n");
- return;
- }
-
- entry = create_proc_read_entry("mlmext_state", S_IFREG | S_IRUGO,
- dir_dev, proc_get_mlmext_state, dev);
- if (!entry) {
- pr_info("Unable to create_proc_read_entry!\n");
- return;
- }
-
- entry = create_proc_read_entry("qos_option", S_IFREG | S_IRUGO,
- dir_dev, proc_get_qos_option, dev);
- if (!entry) {
- pr_info("Unable to create_proc_read_entry!\n");
- return;
- }
-
- entry = create_proc_read_entry("ht_option", S_IFREG | S_IRUGO,
- dir_dev, proc_get_ht_option, dev);
- if (!entry) {
- pr_info("Unable to create_proc_read_entry!\n");
- return;
- }
-
- entry = create_proc_read_entry("rf_info", S_IFREG | S_IRUGO,
- dir_dev, proc_get_rf_info, dev);
- if (!entry) {
- pr_info("Unable to create_proc_read_entry!\n");
- return;
- }
-
- entry = create_proc_read_entry("ap_info", S_IFREG | S_IRUGO,
- dir_dev, proc_get_ap_info, dev);
- if (!entry) {
- pr_info("Unable to create_proc_read_entry!\n");
- return;
- }
-
- entry = create_proc_read_entry("adapter_state", S_IFREG | S_IRUGO,
- dir_dev, proc_getstruct adapter_state, dev);
- if (!entry) {
- pr_info("Unable to create_proc_read_entry!\n");
- return;
- }
-
- entry = create_proc_read_entry("trx_info", S_IFREG | S_IRUGO,
- dir_dev, proc_get_trx_info, dev);
- if (!entry) {
- pr_info("Unable to create_proc_read_entry!\n");
- return;
- }
-
- entry = create_proc_read_entry("mac_reg_dump1", S_IFREG | S_IRUGO,
- dir_dev, proc_get_mac_reg_dump1, dev);
- if (!entry) {
- pr_info("Unable to create_proc_read_entry!\n");
- return;
- }
-
- entry = create_proc_read_entry("mac_reg_dump2", S_IFREG | S_IRUGO,
- dir_dev, proc_get_mac_reg_dump2, dev);
- if (!entry) {
- pr_info("Unable to create_proc_read_entry!\n");
- return;
- }
-
- entry = create_proc_read_entry("mac_reg_dump3", S_IFREG | S_IRUGO,
- dir_dev, proc_get_mac_reg_dump3, dev);
- if (!entry) {
- pr_info("Unable to create_proc_read_entry!\n");
- return;
- }
-
- entry = create_proc_read_entry("bb_reg_dump1", S_IFREG | S_IRUGO,
- dir_dev, proc_get_bb_reg_dump1, dev);
- if (!entry) {
- pr_info("Unable to create_proc_read_entry!\n");
- return;
- }
-
- entry = create_proc_read_entry("bb_reg_dump2", S_IFREG | S_IRUGO,
- dir_dev, proc_get_bb_reg_dump2, dev);
- if (!entry) {
- pr_info("Unable to create_proc_read_entry!\n");
- return;
- }
-
- entry = create_proc_read_entry("bb_reg_dump3", S_IFREG | S_IRUGO,
- dir_dev, proc_get_bb_reg_dump3, dev);
- if (!entry) {
- pr_info("Unable to create_proc_read_entry!\n");
- return;
- }
-
- entry = create_proc_read_entry("rf_reg_dump1", S_IFREG | S_IRUGO,
- dir_dev, proc_get_rf_reg_dump1, dev);
- if (!entry) {
- pr_info("Unable to create_proc_read_entry!\n");
- return;
- }
-
- entry = create_proc_read_entry("rf_reg_dump2", S_IFREG | S_IRUGO,
- dir_dev, proc_get_rf_reg_dump2, dev);
- if (!entry) {
- pr_info("Unable to create_proc_read_entry!\n");
- return;
- }
-
- rtw_hal_get_hwreg(padapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type));
- if ((RF_1T2R == rf_type) || (RF_1T1R == rf_type)) {
- entry = create_proc_read_entry("rf_reg_dump3", S_IFREG | S_IRUGO,
- dir_dev, proc_get_rf_reg_dump3, dev);
- if (!entry) {
- pr_info("Unable to create_proc_read_entry!\n");
- return;
- }
-
- entry = create_proc_read_entry("rf_reg_dump4", S_IFREG | S_IRUGO,
- dir_dev, proc_get_rf_reg_dump4, dev);
- if (!entry) {
- pr_info("Unable to create_proc_read_entry!\n");
- return;
- }
- }
-
-#ifdef CONFIG_88EU_AP_MODE
-
- entry = create_proc_read_entry("all_sta_info", S_IFREG | S_IRUGO,
- dir_dev, proc_get_all_sta_info, dev);
- if (!entry) {
- pr_info("Unable to create_proc_read_entry!\n");
- return;
- }
-#endif
-
- entry = create_proc_read_entry("best_channel", S_IFREG | S_IRUGO,
- dir_dev, proc_get_best_channel, dev);
- if (!entry) {
- pr_info("Unable to create_proc_read_entry!\n");
- return;
- }
-
- entry = create_proc_read_entry("rx_signal", S_IFREG | S_IRUGO,
- dir_dev, proc_get_rx_signal, dev);
- if (!entry) {
- pr_info("Unable to create_proc_read_entry!\n");
- return;
- }
- entry->write_proc = proc_set_rx_signal;
- entry = create_proc_read_entry("ht_enable", S_IFREG | S_IRUGO,
- dir_dev, proc_get_ht_enable, dev);
- if (!entry) {
- pr_info("Unable to create_proc_read_entry!\n");
- return;
- }
- entry->write_proc = proc_set_ht_enable;
-
- entry = create_proc_read_entry("cbw40_enable", S_IFREG | S_IRUGO,
- dir_dev, proc_get_cbw40_enable, dev);
- if (!entry) {
- pr_info("Unable to create_proc_read_entry!\n");
- return;
- }
- entry->write_proc = proc_set_cbw40_enable;
-
- entry = create_proc_read_entry("ampdu_enable", S_IFREG | S_IRUGO,
- dir_dev, proc_get_ampdu_enable, dev);
- if (!entry) {
- pr_info("Unable to create_proc_read_entry!\n");
- return;
- }
- entry->write_proc = proc_set_ampdu_enable;
-
- entry = create_proc_read_entry("rx_stbc", S_IFREG | S_IRUGO,
- dir_dev, proc_get_rx_stbc, dev);
- if (!entry) {
- pr_info("Unable to create_proc_read_entry!\n");
- return;
- }
- entry->write_proc = proc_set_rx_stbc;
-
- entry = create_proc_read_entry("path_rssi", S_IFREG | S_IRUGO,
- dir_dev, proc_get_two_path_rssi, dev);
- if (!entry) {
- pr_info("Unable to create_proc_read_entry!\n");
- return;
- }
- entry = create_proc_read_entry("rssi_disp", S_IFREG | S_IRUGO,
- dir_dev, proc_get_rssi_disp, dev);
- if (!entry) {
- pr_info("Unable to create_proc_read_entry!\n");
- return;
- }
- entry->write_proc = proc_set_rssi_disp;
-}
-
-void rtw_proc_remove_one(struct net_device *dev)
-{
- struct proc_dir_entry *dir_dev = NULL;
- struct adapter *padapter = rtw_netdev_priv(dev);
- u8 rf_type;
-
- dir_dev = padapter->dir_dev;
- padapter->dir_dev = NULL;
-
- if (dir_dev) {
- remove_proc_entry("write_reg", dir_dev);
- remove_proc_entry("read_reg", dir_dev);
- remove_proc_entry("fwstate", dir_dev);
- remove_proc_entry("sec_info", dir_dev);
- remove_proc_entry("mlmext_state", dir_dev);
- remove_proc_entry("qos_option", dir_dev);
- remove_proc_entry("ht_option", dir_dev);
- remove_proc_entry("rf_info", dir_dev);
- remove_proc_entry("ap_info", dir_dev);
- remove_proc_entry("adapter_state", dir_dev);
- remove_proc_entry("trx_info", dir_dev);
- remove_proc_entry("mac_reg_dump1", dir_dev);
- remove_proc_entry("mac_reg_dump2", dir_dev);
- remove_proc_entry("mac_reg_dump3", dir_dev);
- remove_proc_entry("bb_reg_dump1", dir_dev);
- remove_proc_entry("bb_reg_dump2", dir_dev);
- remove_proc_entry("bb_reg_dump3", dir_dev);
- remove_proc_entry("rf_reg_dump1", dir_dev);
- remove_proc_entry("rf_reg_dump2", dir_dev);
- rtw_hal_get_hwreg(padapter, HW_VAR_RF_TYPE, (u8 *)(&rf_type));
- if ((RF_1T2R == rf_type) || (RF_1T1R == rf_type)) {
- remove_proc_entry("rf_reg_dump3", dir_dev);
- remove_proc_entry("rf_reg_dump4", dir_dev);
- }
-#ifdef CONFIG_88EU_AP_MODE
- remove_proc_entry("all_sta_info", dir_dev);
-#endif
-
- remove_proc_entry("best_channel", dir_dev);
- remove_proc_entry("rx_signal", dir_dev);
- remove_proc_entry("cbw40_enable", dir_dev);
- remove_proc_entry("ht_enable", dir_dev);
- remove_proc_entry("ampdu_enable", dir_dev);
- remove_proc_entry("rx_stbc", dir_dev);
- remove_proc_entry("path_rssi", dir_dev);
- remove_proc_entry("rssi_disp", dir_dev);
- remove_proc_entry(dev->name, rtw_proc);
- dir_dev = NULL;
- } else {
- return;
- }
- rtw_proc_cnt--;
-
- if (rtw_proc_cnt == 0) {
- if (rtw_proc) {
- remove_proc_entry("ver_info", rtw_proc);
-
- remove_proc_entry(rtw_proc_name, init_net.proc_net);
- rtw_proc = NULL;
- }
- }
-}
-#endif
-
static uint loadparam(struct adapter *padapter, struct net_device *pnetdev)
{
struct registry_priv *registry_par = &padapter->registrypriv;
@@ -530,7 +184,6 @@ static uint loadparam(struct adapter *padapter, struct net_device *pnetdev)
registry_par->short_retry_lmt = (u8)rtw_short_retry_lmt;
registry_par->busy_thresh = (u16)rtw_busy_thresh;
registry_par->ack_policy = (u8)rtw_ack_policy;
- registry_par->mp_mode = (u8)rtw_mp_mode;
registry_par->software_encrypt = (u8)rtw_software_encrypt;
registry_par->software_decrypt = (u8)rtw_software_decrypt;
registry_par->acm_method = (u8)rtw_acm_method;
@@ -731,20 +384,19 @@ u32 rtw_start_drv_threads(struct adapter *padapter)
if (IS_ERR(padapter->cmdThread))
_status = _FAIL;
else
- _rtw_down_sema(&padapter->cmdpriv.terminate_cmdthread_sema); /* wait for cmd_thread to run */
+ /* wait for rtw_cmd_thread() to start running */
+ wait_for_completion(&padapter->cmdpriv.start_cmd_thread);
- rtw_hal_start_thread(padapter);
return _status;
}
void rtw_stop_drv_threads(struct adapter *padapter)
{
/* Below is to termindate rtw_cmd_thread & event_thread... */
- up(&padapter->cmdpriv.cmd_queue_sema);
+ complete(&padapter->cmdpriv.enqueue_cmd);
if (padapter->cmdThread)
- _rtw_down_sema(&padapter->cmdpriv.terminate_cmdthread_sema);
-
- rtw_hal_stop_thread(padapter);
+ /* wait for rtw_cmd_thread() to stop running */
+ wait_for_completion(&padapter->cmdpriv.stop_cmd_thread);
}
static u8 rtw_init_default_value(struct adapter *padapter)
@@ -784,16 +436,14 @@ static u8 rtw_init_default_value(struct adapter *padapter)
rtw_update_registrypriv_dev_network(padapter);
/* hal_priv */
- rtw_hal_def_value_init(padapter);
+ rtl8188eu_init_default_value(padapter);
/* misc. */
padapter->bReadPortCancel = false;
padapter->bWritePortCancel = false;
padapter->bRxRSSIDisplay = 0;
padapter->bNotifyChannelChange = 0;
-#ifdef CONFIG_88EU_P2P
padapter->bShowGetP2PState = 1;
-#endif
return _SUCCESS;
}
@@ -803,7 +453,7 @@ u8 rtw_reset_drv_sw(struct adapter *padapter)
struct pwrctrl_priv *pwrctrlpriv = &padapter->pwrctrlpriv;
/* hal_priv */
- rtw_hal_def_value_init(padapter);
+ rtl8188eu_init_default_value(padapter);
padapter->bReadPortCancel = false;
padapter->bWritePortCancel = false;
padapter->bRxRSSIDisplay = 0;
@@ -816,7 +466,6 @@ u8 rtw_reset_drv_sw(struct adapter *padapter)
_clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY | _FW_UNDER_LINKING);
- rtw_hal_sreset_reset_value(padapter);
pwrctrlpriv->pwr_state_check_cnts = 0;
/* mlmeextpriv */
@@ -848,11 +497,9 @@ u8 rtw_init_drv_sw(struct adapter *padapter)
goto exit;
}
-#ifdef CONFIG_88EU_P2P
rtw_init_wifidirect_timers(padapter);
init_wifidirect_info(padapter, P2P_ROLE_DISABLE);
reset_global_wifidirect_info(padapter);
-#endif /* CONFIG_88EU_P2P */
if (init_mlme_ext_priv(padapter) == _FAIL) {
ret8 = _FAIL;
@@ -883,15 +530,10 @@ u8 rtw_init_drv_sw(struct adapter *padapter)
rtw_init_pwrctrl_priv(padapter);
- if (init_mp_priv(padapter) == _FAIL)
- DBG_88E("%s: initialize MP private data Fail!\n", __func__);
-
ret8 = rtw_init_default_value(padapter);
- rtw_hal_dm_init(padapter);
- rtw_hal_sw_led_init(padapter);
-
- rtw_hal_sreset_init(padapter);
+ rtl8188e_init_dm_priv(padapter);
+ rtl8188eu_InitSwLeds(padapter);
spin_lock_init(&padapter->br_ext_lock);
@@ -908,13 +550,11 @@ void rtw_cancel_all_timer(struct adapter *padapter)
_cancel_timer_ex(&padapter->mlmepriv.dynamic_chk_timer);
/* cancel sw led timer */
- rtw_hal_sw_led_deinit(padapter);
+ rtl8188eu_DeInitSwLeds(padapter);
_cancel_timer_ex(&padapter->pwrctrlpriv.pwr_state_check_timer);
_cancel_timer_ex(&padapter->recvpriv.signal_stat_timer);
- /* cancel dm timer */
- rtw_hal_dm_deinit(padapter);
}
u8 rtw_free_drv_sw(struct adapter *padapter)
@@ -922,7 +562,6 @@ u8 rtw_free_drv_sw(struct adapter *padapter)
/* we can call rtw_p2p_enable here, but: */
/* 1. rtw_p2p_enable may have IO operation */
/* 2. rtw_p2p_enable is bundled with wext interface */
- #ifdef CONFIG_88EU_P2P
{
struct wifidirect_info *pwdinfo = &padapter->wdinfo;
if (!rtw_p2p_chk_state(pwdinfo, P2P_STATE_NONE)) {
@@ -932,7 +571,6 @@ u8 rtw_free_drv_sw(struct adapter *padapter)
rtw_p2p_set_state(pwdinfo, P2P_STATE_NONE);
}
}
- #endif
free_mlme_ext_priv(&padapter->mlmeextpriv);
@@ -947,9 +585,7 @@ u8 rtw_free_drv_sw(struct adapter *padapter)
_rtw_free_recv_priv(&padapter->recvpriv);
- rtw_free_pwrctrl_priv(padapter);
-
- rtw_hal_free_data(padapter);
+ rtl8188e_free_hal_data(padapter);
/* free the old_pnetdev */
if (padapter->rereg_nd_name_priv.old_pnetdev) {
@@ -1025,7 +661,6 @@ int _netdev_open(struct net_device *pnetdev)
}
if (padapter->intf_start)
padapter->intf_start(padapter);
- rtw_proc_init_one(pnetdev);
rtw_led_control(padapter, LED_CTL_NO_LINK);
@@ -1062,9 +697,9 @@ int netdev_open(struct net_device *pnetdev)
int ret;
struct adapter *padapter = (struct adapter *)rtw_netdev_priv(pnetdev);
- _enter_critical_mutex(padapter->hw_init_mutex, NULL);
+ mutex_lock(padapter->hw_init_mutex);
ret = _netdev_open(pnetdev);
- _exit_critical_mutex(padapter->hw_init_mutex, NULL);
+ mutex_unlock(padapter->hw_init_mutex);
return ret;
}
@@ -1130,7 +765,7 @@ void rtw_ips_dev_unload(struct adapter *padapter)
{
DBG_88E("====> %s...\n", __func__);
- rtw_hal_set_hwreg(padapter, HW_VAR_FIFO_CLEARN_UP, NULL);
+ SetHwReg8188EU(padapter, HW_VAR_FIFO_CLEARN_UP, NULL);
if (padapter->intf_stop)
padapter->intf_stop(padapter);
@@ -1187,9 +822,7 @@ int netdev_close(struct net_device *pnetdev)
nat25_db_cleanup(padapter);
-#ifdef CONFIG_88EU_P2P
rtw_p2p_enable(padapter, P2P_ROLE_DISABLE);
-#endif /* CONFIG_88EU_P2P */
kfree(dvobj->firmware.szFwBuffer);
dvobj->firmware.szFwBuffer = NULL;
diff --git a/drivers/staging/r8188eu/os_dep/osdep_service.c b/drivers/staging/r8188eu/os_dep/osdep_service.c
index 95ac6086370b..59bdd0abea7e 100644
--- a/drivers/staging/r8188eu/os_dep/osdep_service.c
+++ b/drivers/staging/r8188eu/os_dep/osdep_service.c
@@ -19,23 +19,6 @@ inline int RTW_STATUS_CODE(int error_code)
return _FAIL;
}
-u32 rtw_atoi(u8 *s)
-{
- int num = 0, flag = 0;
- int i;
- for (i = 0; i <= strlen(s); i++) {
- if (s[i] >= '0' && s[i] <= '9')
- num = num * 10 + s[i] - '0';
- else if (s[0] == '-' && i == 0)
- flag = 1;
- else
- break;
- }
- if (flag == 1)
- num = num * -1;
- return num;
-}
-
void *rtw_malloc2d(int h, int w, int size)
{
int j;
@@ -59,30 +42,6 @@ Otherwise, there will be racing condition.
Caller must check if the list is empty before calling rtw_list_delete
*/
-u32 _rtw_down_sema(struct semaphore *sema)
-{
- if (down_interruptible(sema))
- return _FAIL;
- else
- return _SUCCESS;
-}
-
-void _rtw_mutex_init(struct mutex *pmutex)
-{
- mutex_init(pmutex);
-}
-
-void _rtw_mutex_free(struct mutex *pmutex)
-{
- mutex_destroy(pmutex);
-}
-
-void _rtw_init_queue(struct __queue *pqueue)
-{
- INIT_LIST_HEAD(&pqueue->queue);
- spin_lock_init(&pqueue->lock);
-}
-
inline u32 rtw_systime_to_ms(u32 systime)
{
return systime * 1000 / HZ;
@@ -107,8 +66,6 @@ void rtw_usleep_os(int us)
msleep((us / 1000) + 1);
}
-#define RTW_SUSPEND_LOCK_NAME "rtw_wifi"
-
static const struct device_type wlan_type = {
.name = "wlan",
};
@@ -198,8 +155,6 @@ int rtw_change_ifname(struct adapter *padapter, const char *ifname)
else
unregister_netdevice(cur_pnetdev);
- rtw_proc_remove_one(cur_pnetdev);
-
rereg_priv->old_pnetdev = cur_pnetdev;
pnetdev = rtw_init_netdev(padapter);
@@ -212,7 +167,7 @@ int rtw_change_ifname(struct adapter *padapter, const char *ifname)
rtw_init_netdev_name(pnetdev, ifname);
- memcpy(pnetdev->dev_addr, padapter->eeprompriv.mac_addr, ETH_ALEN);
+ eth_hw_addr_set(pnetdev, padapter->eeprompriv.mac_addr);
if (!rtnl_is_locked())
ret = register_netdev(pnetdev);
@@ -221,7 +176,6 @@ int rtw_change_ifname(struct adapter *padapter, const char *ifname)
if (ret != 0)
goto error;
- rtw_proc_init_one(pnetdev);
return 0;
error:
return -1;
@@ -259,17 +213,6 @@ keep_ori:
}
/**
- * rtw_cbuf_full - test if cbuf is full
- * @cbuf: pointer of struct rtw_cbuf
- *
- * Returns: true if cbuf is full
- */
-inline bool rtw_cbuf_full(struct rtw_cbuf *cbuf)
-{
- return (cbuf->write == cbuf->read - 1) ? true : false;
-}
-
-/**
* rtw_cbuf_empty - test if cbuf is empty
* @cbuf: pointer of struct rtw_cbuf
*
@@ -281,27 +224,6 @@ inline bool rtw_cbuf_empty(struct rtw_cbuf *cbuf)
}
/**
- * rtw_cbuf_push - push a pointer into cbuf
- * @cbuf: pointer of struct rtw_cbuf
- * @buf: pointer to push in
- *
- * Lock free operation, be careful of the use scheme
- * Returns: true push success
- */
-bool rtw_cbuf_push(struct rtw_cbuf *cbuf, void *buf)
-{
- if (rtw_cbuf_full(cbuf))
- return _FAIL;
-
- if (0)
- DBG_88E("%s on %u\n", __func__, cbuf->write);
- cbuf->bufs[cbuf->write] = buf;
- cbuf->write = (cbuf->write + 1) % cbuf->size;
-
- return _SUCCESS;
-}
-
-/**
* rtw_cbuf_pop - pop a pointer from cbuf
* @cbuf: pointer of struct rtw_cbuf
*
@@ -332,7 +254,7 @@ struct rtw_cbuf *rtw_cbuf_alloc(u32 size)
{
struct rtw_cbuf *cbuf;
- cbuf = kmalloc(sizeof(*cbuf) + sizeof(void *) * size, GFP_KERNEL);
+ cbuf = kmalloc(struct_size(cbuf, bufs, size), GFP_KERNEL);
if (cbuf) {
cbuf->write = 0;
diff --git a/drivers/staging/r8188eu/os_dep/recv_linux.c b/drivers/staging/r8188eu/os_dep/recv_linux.c
index 917a63e3e94c..5a7fb94e21c1 100644
--- a/drivers/staging/r8188eu/os_dep/recv_linux.c
+++ b/drivers/staging/r8188eu/os_dep/recv_linux.c
@@ -186,20 +186,6 @@ _recv_indicatepkt_drop:
return _FAIL;
}
-void rtw_os_read_port(struct adapter *padapter, struct recv_buf *precvbuf)
-{
- struct recv_priv *precvpriv = &padapter->recvpriv;
-
- precvbuf->ref_cnt--;
- /* free skb in recv_buf */
- dev_kfree_skb_any(precvbuf->pskb);
- precvbuf->pskb = NULL;
- precvbuf->reuse = false;
- if (!precvbuf->irp_pending)
- rtw_read_port(padapter, precvpriv->ff_hwaddr, 0,
- (unsigned char *)precvbuf);
-}
-
static void _rtw_reordering_ctrl_timeout_handler(struct timer_list *t)
{
struct recv_reorder_ctrl *preorder_ctrl;
diff --git a/drivers/staging/r8188eu/os_dep/usb_intf.c b/drivers/staging/r8188eu/os_dep/usb_intf.c
index bb85ab77fd26..5a35d9fe3fc9 100644
--- a/drivers/staging/r8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/r8188eu/os_dep/usb_intf.c
@@ -1,8 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright(c) 2007 - 2011 Realtek Corporation. */
-#define _HCI_INTF_C_
-
#include <linux/usb.h>
#include "../include/osdep_service.h"
#include "../include/drv_types.h"
@@ -14,6 +12,7 @@
#include "../include/usb_ops.h"
#include "../include/usb_osintf.h"
#include "../include/rtw_ioctl.h"
+#include "../include/rtl8188e_hal.h"
int ui_pid[3] = {0, 0, 0};
@@ -50,10 +49,6 @@ static struct usb_device_id rtw_usb_id_tbl[] = {
MODULE_DEVICE_TABLE(usb, rtw_usb_id_tbl);
-static struct specific_device_id specific_device_id_tbl[] = {
- {} /* empty table for now */
-};
-
struct rtw_usb_drv {
struct usb_driver usbdrv;
int drv_registered;
@@ -72,73 +67,9 @@ static struct rtw_usb_drv rtl8188e_usb_drv = {
static struct rtw_usb_drv *usb_drv = &rtl8188e_usb_drv;
-static inline int RT_usb_endpoint_dir_in(const struct usb_endpoint_descriptor *epd)
-{
- return (epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN;
-}
-
-static inline int RT_usb_endpoint_dir_out(const struct usb_endpoint_descriptor *epd)
-{
- return (epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_OUT;
-}
-
-static inline int RT_usb_endpoint_xfer_int(const struct usb_endpoint_descriptor *epd)
-{
- return (epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == USB_ENDPOINT_XFER_INT;
-}
-
-static inline int RT_usb_endpoint_xfer_bulk(const struct usb_endpoint_descriptor *epd)
-{
- return (epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == USB_ENDPOINT_XFER_BULK;
-}
-
-static inline int RT_usb_endpoint_is_bulk_in(const struct usb_endpoint_descriptor *epd)
-{
- return RT_usb_endpoint_xfer_bulk(epd) && RT_usb_endpoint_dir_in(epd);
-}
-
-static inline int RT_usb_endpoint_is_bulk_out(const struct usb_endpoint_descriptor *epd)
-{
- return RT_usb_endpoint_xfer_bulk(epd) && RT_usb_endpoint_dir_out(epd);
-}
-
-static inline int usb_endpoint_is_int(const struct usb_endpoint_descriptor *epd)
-{
- return RT_usb_endpoint_xfer_int(epd) && RT_usb_endpoint_dir_in(epd);
-}
-
-static inline int RT_usb_endpoint_num(const struct usb_endpoint_descriptor *epd)
-{
- return epd->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
-}
-
-static u8 rtw_init_intf_priv(struct dvobj_priv *dvobj)
-{
- u8 rst = _SUCCESS;
-
- _rtw_mutex_init(&dvobj->usb_vendor_req_mutex);
-
- dvobj->usb_alloc_vendor_req_buf = kzalloc(MAX_USB_IO_CTL_SIZE, GFP_KERNEL);
- if (!dvobj->usb_alloc_vendor_req_buf) {
- DBG_88E("alloc usb_vendor_req_buf failed... /n");
- rst = _FAIL;
- goto exit;
- }
- dvobj->usb_vendor_req_buf = (u8 *)N_BYTE_ALIGMENT((size_t)(dvobj->usb_alloc_vendor_req_buf), ALIGNMENT_UNIT);
-exit:
- return rst;
-}
-
-static void rtw_deinit_intf_priv(struct dvobj_priv *dvobj)
-{
- kfree(dvobj->usb_alloc_vendor_req_buf);
- _rtw_mutex_free(&dvobj->usb_vendor_req_mutex);
-}
-
static struct dvobj_priv *usb_dvobj_init(struct usb_interface *usb_intf)
{
int i;
- int status = _FAIL;
struct dvobj_priv *pdvobjpriv;
struct usb_host_config *phost_conf;
struct usb_config_descriptor *pconf_desc;
@@ -197,23 +128,12 @@ static struct dvobj_priv *usb_dvobj_init(struct usb_interface *usb_intf)
DBG_88E("NON USB_SPEED_HIGH\n");
}
- if (rtw_init_intf_priv(pdvobjpriv) == _FAIL)
- goto free_dvobj;
-
/* 3 misc */
sema_init(&pdvobjpriv->usb_suspend_sema, 0);
rtw_reset_continual_urb_error(pdvobjpriv);
usb_get_dev(pusbd);
- status = _SUCCESS;
-
-free_dvobj:
- if (status != _SUCCESS && pdvobjpriv) {
- usb_set_intfdata(usb_intf, NULL);
- kfree(pdvobjpriv);
- pdvobjpriv = NULL;
- }
exit:
return pdvobjpriv;
}
@@ -239,7 +159,6 @@ static void usb_dvobj_deinit(struct usb_interface *usb_intf)
usb_reset_device(interface_to_usbdev(usb_intf));
}
}
- rtw_deinit_intf_priv(dvobj);
kfree(dvobj);
}
@@ -247,21 +166,15 @@ static void usb_dvobj_deinit(struct usb_interface *usb_intf)
}
-static void chip_by_usb_id(struct adapter *padapter)
-{
- padapter->chip_type = NULL_CHIP_TYPE;
- hal_set_hw_type(padapter);
-}
-
static void usb_intf_start(struct adapter *padapter)
{
- rtw_hal_inirp_init(padapter);
+ rtl8188eu_inirp_init(padapter);
}
static void usb_intf_stop(struct adapter *padapter)
{
/* cancel in irp */
- rtw_hal_inirp_deinit(padapter);
+ rtw_read_port_cancel(padapter);
/* cancel out irp */
rtw_write_port_cancel(padapter);
@@ -295,130 +208,6 @@ static void rtw_dev_unload(struct adapter *padapter)
DBG_88E("<=== rtw_dev_unload\n");
}
-static void process_spec_devid(const struct usb_device_id *pdid)
-{
- u16 vid, pid;
- u32 flags;
- int i;
- int num = sizeof(specific_device_id_tbl) /
- sizeof(struct specific_device_id);
-
- for (i = 0; i < num; i++) {
- vid = specific_device_id_tbl[i].idVendor;
- pid = specific_device_id_tbl[i].idProduct;
- flags = specific_device_id_tbl[i].flags;
-
- if ((pdid->idVendor == vid) && (pdid->idProduct == pid) &&
- (flags & SPEC_DEV_ID_DISABLE_HT)) {
- rtw_ht_enable = 0;
- rtw_cbw40_enable = 0;
- rtw_ampdu_enable = 0;
- }
- }
-}
-
-int rtw_hw_suspend(struct adapter *padapter)
-{
- struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
- struct net_device *pnetdev = padapter->pnetdev;
-
-
- if (!padapter)
- goto error_exit;
- if ((!padapter->bup) || (padapter->bDriverStopped) ||
- (padapter->bSurpriseRemoved)) {
- DBG_88E("padapter->bup=%d bDriverStopped=%d bSurpriseRemoved = %d\n",
- padapter->bup, padapter->bDriverStopped,
- padapter->bSurpriseRemoved);
- goto error_exit;
- }
-
- LeaveAllPowerSaveMode(padapter);
-
- DBG_88E("==> rtw_hw_suspend\n");
- _enter_pwrlock(&pwrpriv->lock);
- pwrpriv->bips_processing = true;
- /* s1. */
- if (pnetdev) {
- netif_carrier_off(pnetdev);
- rtw_netif_stop_queue(pnetdev);
- }
-
- /* s2. */
- rtw_disassoc_cmd(padapter, 500, false);
-
- /* s2-2. indicate disconnect to os */
- {
- struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
-
- if (check_fwstate(pmlmepriv, _FW_LINKED)) {
- _clr_fwstate_(pmlmepriv, _FW_LINKED);
-
- rtw_led_control(padapter, LED_CTL_NO_LINK);
-
- rtw_os_indicate_disconnect(padapter);
-
- /* donnot enqueue cmd */
- rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_DISCONNECT, 0);
- }
- }
- /* s2-3. */
- rtw_free_assoc_resources(padapter, 1);
-
- /* s2-4. */
- rtw_free_network_queue(padapter, true);
- rtw_ips_dev_unload(padapter);
- pwrpriv->rf_pwrstate = rf_off;
- pwrpriv->bips_processing = false;
-
- _exit_pwrlock(&pwrpriv->lock);
- return 0;
-
-error_exit:
- DBG_88E("%s, failed\n", __func__);
- return -1;
-}
-
-int rtw_hw_resume(struct adapter *padapter)
-{
- struct pwrctrl_priv *pwrpriv;
- struct net_device *pnetdev = padapter->pnetdev;
-
- if (!padapter)
- goto error_exit;
- pwrpriv = &padapter->pwrctrlpriv;
- DBG_88E("==> rtw_hw_resume\n");
- _enter_pwrlock(&pwrpriv->lock);
- pwrpriv->bips_processing = true;
- rtw_reset_drv_sw(padapter);
-
- if (pm_netdev_open(pnetdev, false) != 0) {
- _exit_pwrlock(&pwrpriv->lock);
- goto error_exit;
- }
-
- netif_device_attach(pnetdev);
- netif_carrier_on(pnetdev);
-
- if (!netif_queue_stopped(pnetdev))
- netif_start_queue(pnetdev);
- else
- netif_wake_queue(pnetdev);
-
- pwrpriv->bkeepfwalive = false;
- pwrpriv->brfoffbyhw = false;
-
- pwrpriv->rf_pwrstate = rf_on;
- pwrpriv->bips_processing = false;
-
- _exit_pwrlock(&pwrpriv->lock);
-
- return 0;
-error_exit:
- DBG_88E("%s, Open net dev failed\n", __func__);
- return -1;
-}
-
static int rtw_suspend(struct usb_interface *pusb_intf, pm_message_t message)
{
struct dvobj_priv *dvobj = usb_get_intfdata(pusb_intf);
@@ -445,7 +234,7 @@ static int rtw_suspend(struct usb_interface *pusb_intf, pm_message_t message)
rtw_cancel_all_timer(padapter);
LeaveAllPowerSaveMode(padapter);
- _enter_pwrlock(&pwrpriv->lock);
+ mutex_lock(&pwrpriv->lock);
/* s1. */
if (pnetdev) {
netif_carrier_off(pnetdev);
@@ -474,7 +263,7 @@ static int rtw_suspend(struct usb_interface *pusb_intf, pm_message_t message)
rtw_free_network_queue(padapter, true);
rtw_dev_unload(padapter);
- _exit_pwrlock(&pwrpriv->lock);
+ mutex_unlock(&pwrpriv->lock);
if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY))
rtw_indicate_scan_done(padapter, 1);
@@ -493,18 +282,6 @@ static int rtw_resume(struct usb_interface *pusb_intf)
{
struct dvobj_priv *dvobj = usb_get_intfdata(pusb_intf);
struct adapter *padapter = dvobj->if1;
- struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
- int ret = 0;
-
- if (pwrpriv->bInternalAutoSuspend)
- ret = rtw_resume_process(padapter);
- else
- ret = rtw_resume_process(padapter);
- return ret;
-}
-
-int rtw_resume_process(struct adapter *padapter)
-{
struct net_device *pnetdev;
struct pwrctrl_priv *pwrpriv = NULL;
int ret = -1;
@@ -512,26 +289,24 @@ int rtw_resume_process(struct adapter *padapter)
DBG_88E("==> %s (%s:%d)\n", __func__, current->comm, current->pid);
- if (padapter) {
- pnetdev = padapter->pnetdev;
- pwrpriv = &padapter->pwrctrlpriv;
- } else {
- goto exit;
- }
+ pnetdev = padapter->pnetdev;
+ pwrpriv = &padapter->pwrctrlpriv;
- _enter_pwrlock(&pwrpriv->lock);
+ mutex_lock(&pwrpriv->lock);
rtw_reset_drv_sw(padapter);
if (pwrpriv)
pwrpriv->bkeepfwalive = false;
DBG_88E("bkeepfwalive(%x)\n", pwrpriv->bkeepfwalive);
- if (pm_netdev_open(pnetdev, true) != 0)
+ if (pm_netdev_open(pnetdev, true) != 0) {
+ mutex_unlock(&pwrpriv->lock);
goto exit;
+ }
netif_device_attach(pnetdev);
netif_carrier_on(pnetdev);
- _exit_pwrlock(&pwrpriv->lock);
+ mutex_unlock(&pwrpriv->lock);
if (padapter->pid[1] != 0) {
DBG_88E("pid[1]:%d\n", padapter->pid[1]);
@@ -565,6 +340,8 @@ static struct adapter *rtw_usb_if1_init(struct dvobj_priv *dvobj,
struct adapter *padapter = NULL;
struct net_device *pnetdev = NULL;
int status = _FAIL;
+ struct io_priv *piopriv;
+ struct intf_hdl *pintf;
padapter = vzalloc(sizeof(*padapter));
if (!padapter)
@@ -576,10 +353,6 @@ static struct adapter *rtw_usb_if1_init(struct dvobj_priv *dvobj,
padapter->hw_init_mutex = &usb_drv->hw_init_mutex;
- /* step 1-1., decide the chip_type via vid/pid */
- padapter->interface_type = RTW_USB;
- chip_by_usb_id(padapter);
-
if (rtw_handle_dualmac(padapter, 1) != _SUCCESS)
goto free_adapter;
@@ -589,23 +362,27 @@ static struct adapter *rtw_usb_if1_init(struct dvobj_priv *dvobj,
SET_NETDEV_DEV(pnetdev, dvobj_to_dev(dvobj));
padapter = rtw_netdev_priv(pnetdev);
- /* step 2. hook HalFunc, allocate HalData */
- rtl8188eu_set_hal_ops(padapter);
+ /* step 2. allocate HalData */
+ rtl8188eu_alloc_haldata(padapter);
padapter->intf_start = &usb_intf_start;
padapter->intf_stop = &usb_intf_stop;
/* step init_io_priv */
- rtw_init_io_priv(padapter, usb_set_intf_ops);
+ piopriv = &padapter->iopriv;
+ pintf = &piopriv->intf;
+ piopriv->padapter = padapter;
+ pintf->padapter = padapter;
+ pintf->pintf_dev = adapter_to_dvobj(padapter);
/* step read_chip_version */
- rtw_hal_read_chip_version(padapter);
+ rtl8188e_read_chip_version(padapter);
/* step usb endpoint mapping */
- rtw_hal_chip_configure(padapter);
+ rtl8188eu_interface_configure(padapter);
/* step read efuse/eeprom data and get mac_addr */
- rtw_hal_read_chip_info(padapter);
+ ReadAdapterInfo8188EU(padapter);
/* step 5. */
if (rtw_init_drv_sw(padapter) == _FAIL)
@@ -629,11 +406,9 @@ static struct adapter *rtw_usb_if1_init(struct dvobj_priv *dvobj,
/* alloc dev name after read efuse. */
rtw_init_netdev_name(pnetdev, padapter->registrypriv.ifname);
rtw_macaddr_cfg(padapter->eeprompriv.mac_addr);
-#ifdef CONFIG_88EU_P2P
rtw_init_wifidirect_addrs(padapter, padapter->eeprompriv.mac_addr,
padapter->eeprompriv.mac_addr);
-#endif
- memcpy(pnetdev->dev_addr, padapter->eeprompriv.mac_addr, ETH_ALEN);
+ eth_hw_addr_set(pnetdev, padapter->eeprompriv.mac_addr);
DBG_88E("MAC Address from pnetdev->dev_addr = %pM\n",
pnetdev->dev_addr);
@@ -676,15 +451,12 @@ static void rtw_usb_if1_deinit(struct adapter *if1)
if (check_fwstate(pmlmepriv, _FW_LINKED))
rtw_disassoc_cmd(if1, 0, false);
-#ifdef CONFIG_88EU_AP_MODE
free_mlme_ap_info(if1);
-#endif
if (if1->DriverState != DRIVER_DISAPPEAR) {
if (pnetdev) {
/* will call netdev_close() */
unregister_netdev(pnetdev);
- rtw_proc_remove_one(pnetdev);
}
}
rtw_cancel_all_timer(if1);
@@ -703,9 +475,6 @@ static int rtw_drv_init(struct usb_interface *pusb_intf, const struct usb_device
struct adapter *if1 = NULL;
struct dvobj_priv *dvobj;
- /* step 0. */
- process_spec_devid(pdid);
-
/* Initialize dvobj_priv */
dvobj = usb_dvobj_init(pusb_intf);
if (!dvobj)
@@ -760,7 +529,7 @@ static int __init rtw_drv_entry(void)
{
DBG_88E(DRV_NAME " driver version=%s\n", DRIVERVERSION);
- _rtw_mutex_init(&usb_drv->hw_init_mutex);
+ mutex_init(&usb_drv->hw_init_mutex);
usb_drv->drv_registered = true;
return usb_register(&usb_drv->usbdrv);
@@ -773,7 +542,7 @@ static void __exit rtw_drv_halt(void)
usb_drv->drv_registered = false;
usb_deregister(&usb_drv->usbdrv);
- _rtw_mutex_free(&usb_drv->hw_init_mutex);
+ mutex_destroy(&usb_drv->hw_init_mutex);
DBG_88E("-rtw_drv_halt\n");
}
diff --git a/drivers/staging/r8188eu/os_dep/usb_ops_linux.c b/drivers/staging/r8188eu/os_dep/usb_ops_linux.c
index 62dd4a131534..ef2ea68ae873 100644
--- a/drivers/staging/r8188eu/os_dep/usb_ops_linux.c
+++ b/drivers/staging/r8188eu/os_dep/usb_ops_linux.c
@@ -5,7 +5,7 @@
#include "../include/drv_types.h"
#include "../include/usb_ops_linux.h"
-#include "../include/rtw_sreset.h"
+#include "../include/rtl8188e_recv.h"
unsigned int ffaddr2pipehdl(struct dvobj_priv *pdvobj, u32 addr)
{
@@ -31,20 +31,10 @@ struct zero_bulkout_context {
void *padapter;
};
-void usb_read_mem(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *rmem)
-{
-}
-
-void usb_write_mem(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *wmem)
-{
-}
-
-void usb_read_port_cancel(struct intf_hdl *pintfhdl)
+void rtw_read_port_cancel(struct adapter *padapter)
{
int i;
- struct recv_buf *precvbuf;
- struct adapter *padapter = pintfhdl->padapter;
- precvbuf = (struct recv_buf *)padapter->recvpriv.precv_buf;
+ struct recv_buf *precvbuf = (struct recv_buf *)padapter->recvpriv.precv_buf;
DBG_88E("%s\n", __func__);
@@ -63,7 +53,6 @@ static void usb_write_port_complete(struct urb *purb, struct pt_regs *regs)
struct xmit_buf *pxmitbuf = (struct xmit_buf *)purb->context;
struct adapter *padapter = pxmitbuf->padapter;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- struct hal_data_8188e *haldata;
switch (pxmitbuf->flags) {
case VO_QUEUE_INX:
@@ -79,9 +68,7 @@ static void usb_write_port_complete(struct urb *purb, struct pt_regs *regs)
pxmitpriv->bkq_cnt--;
break;
case HIGH_QUEUE_INX:
-#ifdef CONFIG_88EU_AP_MODE
rtw_chk_hi_queue_cmd(padapter);
-#endif
break;
default:
break;
@@ -99,9 +86,7 @@ static void usb_write_port_complete(struct urb *purb, struct pt_regs *regs)
if (purb->status) {
DBG_88E("###=> urb_write_port_complete status(%d)\n", purb->status);
- if ((purb->status == -EPIPE) || (purb->status == -EPROTO)) {
- sreset_set_wifi_error_status(padapter, USB_WRITE_PORT_FAIL);
- } else if (purb->status == -EINPROGRESS) {
+ if (purb->status == -EINPROGRESS) {
goto check_completion;
} else if (purb->status == -ENOENT) {
DBG_88E("%s: -ENOENT\n", __func__);
@@ -112,7 +97,7 @@ static void usb_write_port_complete(struct urb *purb, struct pt_regs *regs)
} else if (purb->status == -ESHUTDOWN) {
padapter->bDriverStopped = true;
goto check_completion;
- } else {
+ } else if ((purb->status != -EPIPE) && (purb->status != -EPROTO)) {
padapter->bSurpriseRemoved = true;
DBG_88E("bSurpriseRemoved = true\n");
@@ -120,9 +105,6 @@ static void usb_write_port_complete(struct urb *purb, struct pt_regs *regs)
}
}
- haldata = GET_HAL_DATA(padapter);
- haldata->srestpriv.last_tx_complete_time = jiffies;
-
check_completion:
rtw_sctx_done_err(&pxmitbuf->sctx,
purb->status ? RTW_SCTX_DONE_WRITE_PORT_ERR :
@@ -134,14 +116,13 @@ check_completion:
}
-u32 usb_write_port(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *wmem)
+u32 rtw_write_port(struct adapter *padapter, u32 addr, u32 cnt, u8 *wmem)
{
unsigned long irqL;
unsigned int pipe;
int status;
u32 ret = _FAIL;
struct urb *purb = NULL;
- struct adapter *padapter = (struct adapter *)pintfhdl->padapter;
struct dvobj_priv *pdvobj = adapter_to_dvobj(padapter);
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
struct xmit_buf *pxmitbuf = (struct xmit_buf *)wmem;
@@ -195,11 +176,7 @@ u32 usb_write_port(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *wmem)
pxmitbuf);/* context is pxmitbuf */
status = usb_submit_urb(purb, GFP_ATOMIC);
- if (!status) {
- struct hal_data_8188e *haldata = GET_HAL_DATA(padapter);
-
- haldata->srestpriv.last_tx_time = jiffies;
- } else {
+ if (status) {
rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_WRITE_PORT_ERR);
DBG_88E("usb_write_port, status =%d\n", status);
@@ -224,10 +201,9 @@ exit:
return ret;
}
-void usb_write_port_cancel(struct intf_hdl *pintfhdl)
+void rtw_write_port_cancel(struct adapter *padapter)
{
int i, j;
- struct adapter *padapter = pintfhdl->padapter;
struct xmit_buf *pxmitbuf = (struct xmit_buf *)padapter->xmitpriv.pxmitbuf;
DBG_88E("%s\n", __func__);
diff --git a/drivers/staging/r8188eu/os_dep/xmit_linux.c b/drivers/staging/r8188eu/os_dep/xmit_linux.c
index 565ac5be7db3..088c294f2586 100644
--- a/drivers/staging/r8188eu/os_dep/xmit_linux.c
+++ b/drivers/staging/r8188eu/os_dep/xmit_linux.c
@@ -65,10 +65,6 @@ int rtw_endofpktfile(struct pkt_file *pfile)
return false;
}
-void rtw_set_tx_chksum_offload(struct sk_buff *pkt, struct pkt_attrib *pattrib)
-{
-}
-
int rtw_os_xmit_resource_alloc(struct adapter *padapter, struct xmit_buf *pxmitbuf, u32 alloc_sz)
{
int i;
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
index 358b629d2cc6..7f9dee42a04d 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
@@ -360,13 +360,16 @@ static void _rtl92e_read_eeprom_info(struct net_device *dev)
priv->eeprom_CustomerID);
if (!priv->AutoloadFailFlag) {
+ u8 addr[ETH_ALEN];
+
for (i = 0; i < 6; i += 2) {
usValue = rtl92e_eeprom_read(dev,
(EEPROM_NODE_ADDRESS_BYTE_0 + i) >> 1);
- *(u16 *)(&dev->dev_addr[i]) = usValue;
+ *(u16 *)(&addr[i]) = usValue;
}
+ eth_hw_addr_set(dev, addr);
} else {
- ether_addr_copy(dev->dev_addr, bMac_Tmp_Addr);
+ eth_hw_addr_set(dev, bMac_Tmp_Addr);
}
RT_TRACE(COMP_INIT, "Permanent Address = %pM\n",
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c b/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c
index f75a12543781..d7630f02a910 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c
@@ -184,7 +184,7 @@ void rtl92e_cam_restore(struct net_device *dev)
if (priv->rtllib->iw_mode == IW_MODE_ADHOC) {
rtl92e_set_key(dev, 4, 0,
priv->rtllib->pairwise_key_type,
- (u8 *)dev->dev_addr, 0,
+ (const u8 *)dev->dev_addr, 0,
(u32 *)(&priv->rtllib->swcamtable[4].key_buf[0]));
} else {
rtl92e_set_key(dev, 4, 0,
@@ -197,7 +197,7 @@ void rtl92e_cam_restore(struct net_device *dev)
if (priv->rtllib->iw_mode == IW_MODE_ADHOC) {
rtl92e_set_key(dev, 4, 0,
priv->rtllib->pairwise_key_type,
- (u8 *)dev->dev_addr, 0,
+ (const u8 *)dev->dev_addr, 0,
(u32 *)(&priv->rtllib->swcamtable[4].key_buf[0]));
} else {
rtl92e_set_key(dev, 4, 0,
diff --git a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
index a7dd1578b2c6..d2e9df60e9ba 100644
--- a/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
@@ -2235,7 +2235,7 @@ static int _rtl92e_set_mac_adr(struct net_device *dev, void *mac)
mutex_lock(&priv->wx_mutex);
- ether_addr_copy(dev->dev_addr, addr->sa_data);
+ eth_hw_addr_set(dev, addr->sa_data);
schedule_work(&priv->reset_wq);
mutex_unlock(&priv->wx_mutex);
diff --git a/drivers/staging/rtl8192e/rtl819x_BAProc.c b/drivers/staging/rtl8192e/rtl819x_BAProc.c
index 7dfe7a055876..97afea4c3511 100644
--- a/drivers/staging/rtl8192e/rtl819x_BAProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_BAProc.c
@@ -10,8 +10,7 @@
#include "rtllib.h"
#include "rtl819x_BA.h"
-static void ActivateBAEntry(struct rtllib_device *ieee, struct ba_record *pBA,
- u16 Time)
+static void ActivateBAEntry(struct ba_record *pBA, u16 Time)
{
pBA->b_valid = true;
if (Time != 0)
@@ -288,7 +287,7 @@ int rtllib_rx_ADDBAReq(struct rtllib_device *ieee, struct sk_buff *skb)
else
pBA->ba_param_set.field.buffer_size = 32;
- ActivateBAEntry(ieee, pBA, 0);
+ ActivateBAEntry(pBA, 0);
rtllib_send_ADDBARsp(ieee, dst, pBA, ADDBA_STATUS_SUCCESS);
return 0;
@@ -390,7 +389,7 @@ int rtllib_rx_ADDBARsp(struct rtllib_device *ieee, struct sk_buff *skb)
pAdmittedBA->ba_start_seq_ctrl = pPendingBA->ba_start_seq_ctrl;
pAdmittedBA->ba_param_set = *pBaParamSet;
DeActivateBAEntry(ieee, pAdmittedBA);
- ActivateBAEntry(ieee, pAdmittedBA, *pBaTimeoutVal);
+ ActivateBAEntry(pAdmittedBA, *pBaTimeoutVal);
} else {
pTS->bAddBaReqDelayed = true;
pTS->bDisable_AddBa = true;
@@ -490,7 +489,7 @@ void TsInitAddBA(struct rtllib_device *ieee, struct tx_ts_record *pTS,
pBA->ba_timeout_value = 0;
pBA->ba_start_seq_ctrl.field.seq_num = (pTS->TxCurSeq + 3) % 4096;
- ActivateBAEntry(ieee, pBA, BA_SETUP_TIMEOUT);
+ ActivateBAEntry(pBA, BA_SETUP_TIMEOUT);
rtllib_send_ADDBAReq(ieee, pTS->TsCommonInfo.Addr, pBA);
}
diff --git a/drivers/staging/rtl8192u/r8192U.h b/drivers/staging/rtl8192u/r8192U.h
index 4013107cd93a..14ca00a2789b 100644
--- a/drivers/staging/rtl8192u/r8192U.h
+++ b/drivers/staging/rtl8192u/r8192U.h
@@ -1114,6 +1114,7 @@ void rtl8192_set_rxconf(struct net_device *dev);
void rtl819xusb_beacon_tx(struct net_device *dev, u16 tx_rate);
void EnableHWSecurityConfig8192(struct net_device *dev);
-void setKey(struct net_device *dev, u8 EntryNo, u8 KeyIndex, u16 KeyType, u8 *MacAddr, u8 DefaultKey, u32 *KeyContent);
+void setKey(struct net_device *dev, u8 EntryNo, u8 KeyIndex, u16 KeyType,
+ const u8 *MacAddr, u8 DefaultKey, u32 *KeyContent);
#endif
diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
index b6698656fc01..726d7ad9408b 100644
--- a/drivers/staging/rtl8192u/r8192U_core.c
+++ b/drivers/staging/rtl8192u/r8192U_core.c
@@ -96,15 +96,12 @@ MODULE_DESCRIPTION("Linux driver for Realtek RTL8192 USB WiFi cards");
static char *ifname = "wlan%d";
static int hwwep = 1; /* default use hw. set 0 to use software security */
-static int channels = 0x3fff;
module_param(ifname, charp, 0644);
module_param(hwwep, int, 0644);
-module_param(channels, int, 0644);
MODULE_PARM_DESC(ifname, " Net interface name, wlan%d=default");
MODULE_PARM_DESC(hwwep, " Try to use hardware security support. ");
-MODULE_PARM_DESC(channels, " Channel bitmask for specific locales. NYI");
static int rtl8192_usb_probe(struct usb_interface *intf,
const struct usb_device_id *id);
@@ -229,7 +226,7 @@ int write_nic_byte_E(struct net_device *dev, int indx, u8 data)
status = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
RTL8187_REQ_SET_REGS, RTL8187_REQT_WRITE,
- indx | 0xfe00, 0, usbdata, 1, HZ / 2);
+ indx | 0xfe00, 0, usbdata, 1, 500);
kfree(usbdata);
if (status < 0) {
@@ -251,7 +248,7 @@ int read_nic_byte_E(struct net_device *dev, int indx, u8 *data)
status = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
RTL8187_REQ_GET_REGS, RTL8187_REQT_READ,
- indx | 0xfe00, 0, usbdata, 1, HZ / 2);
+ indx | 0xfe00, 0, usbdata, 1, 500);
*data = *usbdata;
kfree(usbdata);
@@ -279,7 +276,7 @@ int write_nic_byte(struct net_device *dev, int indx, u8 data)
status = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
RTL8187_REQ_SET_REGS, RTL8187_REQT_WRITE,
(indx & 0xff) | 0xff00, (indx >> 8) & 0x0f,
- usbdata, 1, HZ / 2);
+ usbdata, 1, 500);
kfree(usbdata);
if (status < 0) {
@@ -305,7 +302,7 @@ int write_nic_word(struct net_device *dev, int indx, u16 data)
status = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
RTL8187_REQ_SET_REGS, RTL8187_REQT_WRITE,
(indx & 0xff) | 0xff00, (indx >> 8) & 0x0f,
- usbdata, 2, HZ / 2);
+ usbdata, 2, 500);
kfree(usbdata);
if (status < 0) {
@@ -331,7 +328,7 @@ int write_nic_dword(struct net_device *dev, int indx, u32 data)
status = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
RTL8187_REQ_SET_REGS, RTL8187_REQT_WRITE,
(indx & 0xff) | 0xff00, (indx >> 8) & 0x0f,
- usbdata, 4, HZ / 2);
+ usbdata, 4, 500);
kfree(usbdata);
if (status < 0) {
@@ -355,7 +352,7 @@ int read_nic_byte(struct net_device *dev, int indx, u8 *data)
status = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
RTL8187_REQ_GET_REGS, RTL8187_REQT_READ,
(indx & 0xff) | 0xff00, (indx >> 8) & 0x0f,
- usbdata, 1, HZ / 2);
+ usbdata, 1, 500);
*data = *usbdata;
kfree(usbdata);
@@ -380,7 +377,7 @@ int read_nic_word(struct net_device *dev, int indx, u16 *data)
status = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
RTL8187_REQ_GET_REGS, RTL8187_REQT_READ,
(indx & 0xff) | 0xff00, (indx >> 8) & 0x0f,
- usbdata, 2, HZ / 2);
+ usbdata, 2, 500);
*data = *usbdata;
kfree(usbdata);
@@ -404,7 +401,7 @@ static int read_nic_word_E(struct net_device *dev, int indx, u16 *data)
status = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
RTL8187_REQ_GET_REGS, RTL8187_REQT_READ,
- indx | 0xfe00, 0, usbdata, 2, HZ / 2);
+ indx | 0xfe00, 0, usbdata, 2, 500);
*data = *usbdata;
kfree(usbdata);
@@ -430,7 +427,7 @@ int read_nic_dword(struct net_device *dev, int indx, u32 *data)
status = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
RTL8187_REQ_GET_REGS, RTL8187_REQT_READ,
(indx & 0xff) | 0xff00, (indx >> 8) & 0x0f,
- usbdata, 4, HZ / 2);
+ usbdata, 4, 500);
*data = *usbdata;
kfree(usbdata);
@@ -2303,14 +2300,17 @@ static int rtl8192_read_eeprom_info(struct net_device *dev)
/* set channelplan from eeprom */
priv->ChannelPlan = priv->eeprom_ChannelPlan;
if (bLoad_From_EEPOM) {
+ u8 addr[ETH_ALEN];
+
for (i = 0; i < 6; i += 2) {
ret = eprom_read(dev, (u16)((EEPROM_NODE_ADDRESS_BYTE_0 + i) >> 1));
if (ret < 0)
return ret;
- *(u16 *)(&dev->dev_addr[i]) = (u16)ret;
+ *(u16 *)(&addr[i]) = (u16)ret;
}
+ eth_hw_addr_set(dev, addr);
} else {
- memcpy(dev->dev_addr, bMac_Tmp_Addr, 6);
+ eth_hw_addr_set(dev, bMac_Tmp_Addr);
/* should I set IDR0 here? */
}
RT_TRACE(COMP_EPROM, "MAC addr:%pM\n", dev->dev_addr);
@@ -3048,14 +3048,14 @@ static void CamRestoreAllEntry(struct net_device *dev)
} else if (priv->ieee80211->pairwise_key_type == KEY_TYPE_TKIP) {
if (priv->ieee80211->iw_mode == IW_MODE_ADHOC)
setKey(dev, 4, 0, priv->ieee80211->pairwise_key_type,
- (u8 *)dev->dev_addr, 0, NULL);
+ (const u8 *)dev->dev_addr, 0, NULL);
else
setKey(dev, 4, 0, priv->ieee80211->pairwise_key_type,
MacAddr, 0, NULL);
} else if (priv->ieee80211->pairwise_key_type == KEY_TYPE_CCMP) {
if (priv->ieee80211->iw_mode == IW_MODE_ADHOC)
setKey(dev, 4, 0, priv->ieee80211->pairwise_key_type,
- (u8 *)dev->dev_addr, 0, NULL);
+ (const u8 *)dev->dev_addr, 0, NULL);
else
setKey(dev, 4, 0, priv->ieee80211->pairwise_key_type,
MacAddr, 0, NULL);
@@ -3457,7 +3457,7 @@ static int r8192_set_mac_adr(struct net_device *dev, void *mac)
mutex_lock(&priv->wx_mutex);
- ether_addr_copy(dev->dev_addr, addr->sa_data);
+ eth_hw_addr_set(dev, addr->sa_data);
schedule_work(&priv->reset_wq);
mutex_unlock(&priv->wx_mutex);
@@ -4871,7 +4871,7 @@ void EnableHWSecurityConfig8192(struct net_device *dev)
}
void setKey(struct net_device *dev, u8 entryno, u8 keyindex, u16 keytype,
- u8 *macaddr, u8 defaultkey, u32 *keycontent)
+ const u8 *macaddr, u8 defaultkey, u32 *keycontent)
{
u32 target_command = 0;
u32 target_content = 0;
diff --git a/drivers/staging/rtl8712/ieee80211.h b/drivers/staging/rtl8712/ieee80211.h
index 61eff7c5746b..65ceaca9b51e 100644
--- a/drivers/staging/rtl8712/ieee80211.h
+++ b/drivers/staging/rtl8712/ieee80211.h
@@ -78,7 +78,7 @@ struct ieee_param {
struct {
u32 len;
u8 reserved[32];
- u8 data[0];
+ u8 data[];
} wpa_ie;
struct {
int command;
@@ -91,7 +91,7 @@ struct ieee_param {
u8 idx;
u8 seq[8]; /* sequence counter (set: RX, get: TX) */
u16 key_len;
- u8 key[0];
+ u8 key[];
} crypt;
} u;
};
diff --git a/drivers/staging/rtl8712/os_intfs.c b/drivers/staging/rtl8712/os_intfs.c
index 9502f6aa5306..d15d52c0d1a7 100644
--- a/drivers/staging/rtl8712/os_intfs.c
+++ b/drivers/staging/rtl8712/os_intfs.c
@@ -166,7 +166,7 @@ static int r871x_net_set_mac_address(struct net_device *pnetdev, void *p)
struct sockaddr *addr = p;
if (!padapter->bup)
- ether_addr_copy(pnetdev->dev_addr, addr->sa_data);
+ eth_hw_addr_set(pnetdev, addr->sa_data);
return 0;
}
@@ -381,14 +381,15 @@ static int netdev_open(struct net_device *pnetdev)
goto netdev_open_error;
if (!r8712_initmac) {
/* Use the mac address stored in the Efuse */
- memcpy(pnetdev->dev_addr,
- padapter->eeprompriv.mac_addr, ETH_ALEN);
+ eth_hw_addr_set(pnetdev,
+ padapter->eeprompriv.mac_addr);
} else {
/* We have to inform f/w to use user-supplied MAC
* address.
*/
msleep(200);
- r8712_setMacAddr_cmd(padapter, (u8 *)pnetdev->dev_addr);
+ r8712_setMacAddr_cmd(padapter,
+ (const u8 *)pnetdev->dev_addr);
/*
* The "myid" function will get the wifi mac address
* from eeprompriv structure instead of netdev
diff --git a/drivers/staging/rtl8712/rtl871x_cmd.c b/drivers/staging/rtl8712/rtl871x_cmd.c
index 75716f59044d..acda930722b2 100644
--- a/drivers/staging/rtl8712/rtl871x_cmd.c
+++ b/drivers/staging/rtl8712/rtl871x_cmd.c
@@ -554,7 +554,7 @@ void r8712_setstakey_cmd(struct _adapter *padapter, u8 *psta, u8 unicast_key)
r8712_enqueue_cmd(pcmdpriv, ph2c);
}
-void r8712_setMacAddr_cmd(struct _adapter *padapter, u8 *mac_addr)
+void r8712_setMacAddr_cmd(struct _adapter *padapter, const u8 *mac_addr)
{
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
struct cmd_obj *ph2c;
diff --git a/drivers/staging/rtl8712/rtl871x_cmd.h b/drivers/staging/rtl8712/rtl871x_cmd.h
index bf6f0c6a86e5..ddd69c4ae208 100644
--- a/drivers/staging/rtl8712/rtl871x_cmd.h
+++ b/drivers/staging/rtl8712/rtl871x_cmd.h
@@ -718,7 +718,7 @@ struct DisconnectCtrlEx_param {
#define H2C_CMD_OVERFLOW 0x06
#define H2C_RESERVED 0x07
-void r8712_setMacAddr_cmd(struct _adapter *padapter, u8 *mac_addr);
+void r8712_setMacAddr_cmd(struct _adapter *padapter, const u8 *mac_addr);
u8 r8712_sitesurvey_cmd(struct _adapter *padapter,
struct ndis_802_11_ssid *pssid);
int r8712_createbss_cmd(struct _adapter *padapter);
diff --git a/drivers/staging/rtl8712/rtl871x_xmit.h b/drivers/staging/rtl8712/rtl871x_xmit.h
index 2e6afc7bb0a1..cdcbc87a3cad 100644
--- a/drivers/staging/rtl8712/rtl871x_xmit.h
+++ b/drivers/staging/rtl8712/rtl871x_xmit.h
@@ -182,11 +182,11 @@ struct sta_xmit_priv {
};
struct hw_txqueue {
- /*volatile*/ sint head;
- /*volatile*/ sint tail;
- /*volatile*/ sint free_sz; /*in units of 64 bytes*/
- /*volatile*/ sint free_cmdsz;
- /*volatile*/ sint txsz[8];
+ sint head;
+ sint tail;
+ sint free_sz; /* in units of 64 bytes */
+ sint free_cmdsz;
+ sint txsz[8];
uint ff_hwaddr;
uint cmd_hwaddr;
sint ac_tag;
diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
index 505ebeb643dc..ee4c61f85a07 100644
--- a/drivers/staging/rtl8712/usb_intf.c
+++ b/drivers/staging/rtl8712/usb_intf.c
@@ -563,7 +563,7 @@ static int r871xu_drv_init(struct usb_interface *pusb_intf,
dev_info(&udev->dev,
"r8712u: MAC Address from efuse = %pM\n", mac);
}
- ether_addr_copy(pnetdev->dev_addr, mac);
+ eth_hw_addr_set(pnetdev, mac);
}
/* step 6. Load the firmware asynchronously */
if (rtl871x_load_fw(padapter))
@@ -595,12 +595,12 @@ static void r871xu_dev_remove(struct usb_interface *pusb_intf)
/* never exit with a firmware callback pending */
wait_for_completion(&padapter->rtl8712_fw_ready);
+ if (pnetdev->reg_state != NETREG_UNINITIALIZED)
+ unregister_netdev(pnetdev); /* will call netdev_close() */
usb_set_intfdata(pusb_intf, NULL);
release_firmware(padapter->fw);
if (drvpriv.drv_registered)
padapter->surprise_removed = true;
- if (pnetdev->reg_state != NETREG_UNINITIALIZED)
- unregister_netdev(pnetdev); /* will call netdev_close() */
r8712_flush_rwctrl_works(padapter);
r8712_flush_led_works(padapter);
udelay(1);
diff --git a/drivers/staging/rtl8712/usb_ops_linux.c b/drivers/staging/rtl8712/usb_ops_linux.c
index 655497cead12..f984a5ab2c6f 100644
--- a/drivers/staging/rtl8712/usb_ops_linux.c
+++ b/drivers/staging/rtl8712/usb_ops_linux.c
@@ -494,7 +494,7 @@ int r8712_usbctrl_vendorreq(struct intf_priv *pintfpriv, u8 request, u16 value,
memcpy(pIo_buf, pdata, len);
}
status = usb_control_msg(udev, pipe, request, reqtype, value, index,
- pIo_buf, len, HZ / 2);
+ pIo_buf, len, 500);
if (status > 0) { /* Success this control transfer. */
if (requesttype == 0x01) {
/* For Control read transfer, we have to copy the read
diff --git a/drivers/staging/rtl8723bs/Kconfig b/drivers/staging/rtl8723bs/Kconfig
index f40b3021fe8a..f23e29b679fb 100644
--- a/drivers/staging/rtl8723bs/Kconfig
+++ b/drivers/staging/rtl8723bs/Kconfig
@@ -4,6 +4,7 @@ config RTL8723BS
depends on WLAN && MMC && CFG80211
depends on m
select CFG80211_WEXT
+ select CRYPTO
select CRYPTO_LIB_ARC4
help
This option enables support for RTL8723BS SDIO drivers, such as
diff --git a/drivers/staging/rtl8723bs/core/rtw_ap.c b/drivers/staging/rtl8723bs/core/rtw_ap.c
index 6064dd6a76b4..5478188be991 100644
--- a/drivers/staging/rtl8723bs/core/rtw_ap.c
+++ b/drivers/staging/rtl8723bs/core/rtw_ap.c
@@ -18,7 +18,8 @@ void init_mlme_ap_info(struct adapter *padapter)
spin_lock_init(&pmlmepriv->bcn_update_lock);
/* for ACL */
- _rtw_init_queue(&pacl_list->acl_node_q);
+ INIT_LIST_HEAD(&pacl_list->acl_node_q.queue);
+ spin_lock_init(&pacl_list->acl_node_q.lock);
/* pmlmeext->bstart_bss = false; */
@@ -891,7 +892,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
&ie_len,
(pbss_network->ie_length - _BEACON_IE_OFFSET_)
);
- if (p != NULL) {
+ if (p) {
memcpy(supportRate, p + 2, ie_len);
supportRateNum = ie_len;
}
@@ -903,7 +904,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
&ie_len,
pbss_network->ie_length - _BEACON_IE_OFFSET_
);
- if (p != NULL) {
+ if (p) {
memcpy(supportRate + supportRateNum, p + 2, ie_len);
supportRateNum += ie_len;
}
@@ -991,7 +992,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
break;
}
- if ((p == NULL) || (ie_len == 0))
+ if (!p || ie_len == 0)
break;
}
@@ -1021,7 +1022,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
break;
}
- if ((p == NULL) || (ie_len == 0))
+ if (!p || ie_len == 0)
break;
}
}
@@ -1145,7 +1146,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
psta = rtw_get_stainfo(&padapter->stapriv, pbss_network->mac_address);
if (!psta) {
psta = rtw_alloc_stainfo(&padapter->stapriv, pbss_network->mac_address);
- if (psta == NULL)
+ if (!psta)
return _FAIL;
}
@@ -1275,7 +1276,7 @@ u8 rtw_ap_set_pairwise_key(struct adapter *padapter, struct sta_info *psta)
}
psetstakey_para = rtw_zmalloc(sizeof(struct set_stakey_parm));
- if (psetstakey_para == NULL) {
+ if (!psetstakey_para) {
kfree(ph2c);
res = _FAIL;
goto exit;
@@ -1311,12 +1312,12 @@ static int rtw_ap_set_key(
int res = _SUCCESS;
pcmd = rtw_zmalloc(sizeof(struct cmd_obj));
- if (pcmd == NULL) {
+ if (!pcmd) {
res = _FAIL;
goto exit;
}
psetkeyparm = rtw_zmalloc(sizeof(struct setkey_parm));
- if (psetkeyparm == NULL) {
+ if (!psetkeyparm) {
kfree(pcmd);
res = _FAIL;
goto exit;
@@ -1474,11 +1475,11 @@ static void update_bcn_wps_ie(struct adapter *padapter)
&wps_ielen
);
- if (pwps_ie == NULL || wps_ielen == 0)
+ if (!pwps_ie || wps_ielen == 0)
return;
pwps_ie_src = pmlmepriv->wps_beacon_ie;
- if (pwps_ie_src == NULL)
+ if (!pwps_ie_src)
return;
wps_offset = (uint)(pwps_ie - ie);
diff --git a/drivers/staging/rtl8723bs/core/rtw_cmd.c b/drivers/staging/rtl8723bs/core/rtw_cmd.c
index d494c06dab96..639459d52261 100644
--- a/drivers/staging/rtl8723bs/core/rtw_cmd.c
+++ b/drivers/staging/rtl8723bs/core/rtw_cmd.c
@@ -166,7 +166,8 @@ int rtw_init_cmd_priv(struct cmd_priv *pcmdpriv)
init_completion(&pcmdpriv->cmd_queue_comp);
init_completion(&pcmdpriv->terminate_cmdthread_comp);
- _rtw_init_queue(&(pcmdpriv->cmd_queue));
+ INIT_LIST_HEAD(&pcmdpriv->cmd_queue.queue);
+ spin_lock_init(&pcmdpriv->cmd_queue.lock);
/* allocate DMA-able/Non-Page memory for cmd_buf and rsp_buf */
@@ -255,7 +256,7 @@ int _rtw_enqueue_cmd(struct __queue *queue, struct cmd_obj *obj)
{
unsigned long irqL;
- if (obj == NULL)
+ if (!obj)
goto exit;
/* spin_lock_bh(&queue->lock); */
@@ -277,10 +278,10 @@ struct cmd_obj *_rtw_dequeue_cmd(struct __queue *queue)
/* spin_lock_bh(&(queue->lock)); */
spin_lock_irqsave(&queue->lock, irqL);
- if (list_empty(&(queue->queue)))
+ if (list_empty(&queue->queue))
obj = NULL;
else {
- obj = container_of(get_next(&(queue->queue)), struct cmd_obj, list);
+ obj = container_of(get_next(&queue->queue), struct cmd_obj, list);
list_del_init(&obj->list);
}
@@ -308,22 +309,19 @@ int rtw_cmd_filter(struct cmd_priv *pcmdpriv, struct cmd_obj *cmd_obj)
if (cmd_obj->cmdcode == GEN_CMD_CODE(_SetChannelPlan))
bAllow = true;
- if ((pcmdpriv->padapter->hw_init_completed == false && bAllow == false)
- || atomic_read(&(pcmdpriv->cmdthd_running)) == false /* com_thread not running */
- )
+ if ((!pcmdpriv->padapter->hw_init_completed && !bAllow) ||
+ !atomic_read(&pcmdpriv->cmdthd_running)) /* com_thread not running */
return _FAIL;
return _SUCCESS;
}
-
-
int rtw_enqueue_cmd(struct cmd_priv *pcmdpriv, struct cmd_obj *cmd_obj)
{
int res = _FAIL;
struct adapter *padapter = pcmdpriv->padapter;
- if (cmd_obj == NULL)
+ if (!cmd_obj)
goto exit;
cmd_obj->padapter = padapter;
@@ -367,11 +365,10 @@ void rtw_free_cmd_obj(struct cmd_obj *pcmd)
kfree(pcmd);
}
-
void rtw_stop_cmd_thread(struct adapter *adapter)
{
if (adapter->cmdThread &&
- atomic_read(&(adapter->cmdpriv.cmdthd_running)) == true &&
+ atomic_read(&adapter->cmdpriv.cmdthd_running) &&
adapter->cmdpriv.stop_req == 0) {
adapter->cmdpriv.stop_req = 1;
complete(&adapter->cmdpriv.cmd_queue_comp);
@@ -387,7 +384,7 @@ int rtw_cmd_thread(void *context)
u8 (*cmd_hdl)(struct adapter *padapter, u8 *pbuf);
void (*pcmd_callback)(struct adapter *dev, struct cmd_obj *pcmd);
struct adapter *padapter = context;
- struct cmd_priv *pcmdpriv = &(padapter->cmdpriv);
+ struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
struct drvextra_cmd_parm *extra_parm = NULL;
thread_enter("RTW_CMD_THREAD");
@@ -395,7 +392,7 @@ int rtw_cmd_thread(void *context)
pcmdbuf = pcmdpriv->cmd_buf;
pcmdpriv->stop_req = 0;
- atomic_set(&(pcmdpriv->cmdthd_running), true);
+ atomic_set(&pcmdpriv->cmdthd_running, true);
complete(&pcmdpriv->terminate_cmdthread_comp);
while (1) {
@@ -406,7 +403,7 @@ int rtw_cmd_thread(void *context)
break;
}
- if ((padapter->bDriverStopped == true) || (padapter->bSurpriseRemoved == true)) {
+ if (padapter->bDriverStopped || padapter->bSurpriseRemoved) {
netdev_dbg(padapter->pnetdev,
"%s: DriverStopped(%d) SurpriseRemoved(%d) break at line %d\n",
__func__, padapter->bDriverStopped,
@@ -429,7 +426,7 @@ int rtw_cmd_thread(void *context)
continue;
_next:
- if ((padapter->bDriverStopped == true) || (padapter->bSurpriseRemoved == true)) {
+ if (padapter->bDriverStopped || padapter->bSurpriseRemoved) {
netdev_dbg(padapter->pnetdev,
"%s: DriverStopped(%d) SurpriseRemoved(%d) break at line %d\n",
__func__, padapter->bDriverStopped,
@@ -471,7 +468,7 @@ _next:
post_process:
- if (mutex_lock_interruptible(&(pcmd->padapter->cmdpriv.sctx_mutex)) == 0) {
+ if (mutex_lock_interruptible(&pcmd->padapter->cmdpriv.sctx_mutex) == 0) {
if (pcmd->sctx) {
netdev_dbg(padapter->pnetdev,
FUNC_ADPT_FMT " pcmd->sctx\n",
@@ -482,13 +479,13 @@ post_process:
else
rtw_sctx_done_err(&pcmd->sctx, RTW_SCTX_DONE_CMD_ERROR);
}
- mutex_unlock(&(pcmd->padapter->cmdpriv.sctx_mutex));
+ mutex_unlock(&pcmd->padapter->cmdpriv.sctx_mutex);
}
/* call callback function for post-processed */
if (pcmd->cmdcode < ARRAY_SIZE(rtw_cmd_callback)) {
pcmd_callback = rtw_cmd_callback[pcmd->cmdcode].callback;
- if (pcmd_callback == NULL) {
+ if (!pcmd_callback) {
rtw_free_cmd_obj(pcmd);
} else {
/* todo: !!! fill rsp_buf to pcmd->rsp if (pcmd->rsp!= NULL) */
@@ -497,17 +494,14 @@ post_process:
} else {
rtw_free_cmd_obj(pcmd);
}
-
flush_signals_thread();
-
goto _next;
-
}
/* free all cmd_obj resources */
do {
pcmd = rtw_dequeue_cmd(pcmdpriv);
- if (pcmd == NULL) {
+ if (!pcmd) {
rtw_unregister_cmd_alive(padapter);
break;
}
@@ -522,7 +516,7 @@ post_process:
} while (1);
complete(&pcmdpriv->terminate_cmdthread_comp);
- atomic_set(&(pcmdpriv->cmdthd_running), false);
+ atomic_set(&pcmdpriv->cmdthd_running, false);
thread_exit();
}
@@ -542,15 +536,15 @@ u8 rtw_sitesurvey_cmd(struct adapter *padapter, struct ndis_802_11_ssid *ssid,
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
- if (check_fwstate(pmlmepriv, _FW_LINKED) == true)
+ if (check_fwstate(pmlmepriv, _FW_LINKED))
rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_SCAN, 1);
ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
- if (ph2c == NULL)
+ if (!ph2c)
return _FAIL;
psurveyPara = rtw_zmalloc(sizeof(struct sitesurvey_parm));
- if (psurveyPara == NULL) {
+ if (!psurveyPara) {
kfree(ph2c);
return _FAIL;
}
@@ -591,7 +585,6 @@ u8 rtw_sitesurvey_cmd(struct adapter *padapter, struct ndis_802_11_ssid *ssid,
res = rtw_enqueue_cmd(pcmdpriv, ph2c);
if (res == _SUCCESS) {
-
pmlmepriv->scan_start_time = jiffies;
_set_timer(&pmlmepriv->scan_to_timer, SCANNING_TIMEOUT);
} else {
@@ -608,13 +601,13 @@ u8 rtw_setdatarate_cmd(struct adapter *padapter, u8 *rateset)
u8 res = _SUCCESS;
ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
- if (ph2c == NULL) {
+ if (!ph2c) {
res = _FAIL;
goto exit;
}
pbsetdataratepara = rtw_zmalloc(sizeof(struct setdatarate_parm));
- if (pbsetdataratepara == NULL) {
+ if (!pbsetdataratepara) {
kfree(ph2c);
res = _FAIL;
goto exit;
@@ -644,7 +637,7 @@ u8 rtw_createbss_cmd(struct adapter *padapter)
u8 res = _SUCCESS;
pcmd = rtw_zmalloc(sizeof(struct cmd_obj));
- if (pcmd == NULL) {
+ if (!pcmd) {
res = _FAIL;
goto exit;
}
@@ -677,7 +670,7 @@ int rtw_startbss_cmd(struct adapter *padapter, int flags)
} else {
/* need enqueue, prepare cmd_obj and enqueue */
pcmd = rtw_zmalloc(sizeof(struct cmd_obj));
- if (pcmd == NULL) {
+ if (!pcmd) {
res = _FAIL;
goto exit;
}
@@ -724,12 +717,12 @@ u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network *pnetwork)
struct ht_priv *phtpriv = &pmlmepriv->htpriv;
enum ndis_802_11_network_infrastructure ndis_network_mode = pnetwork->network.infrastructure_mode;
struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
u32 tmp_len;
u8 *ptmp = NULL;
pcmd = rtw_zmalloc(sizeof(struct cmd_obj));
- if (pcmd == NULL) {
+ if (!pcmd) {
res = _FAIL;
goto exit;
}
@@ -752,7 +745,6 @@ u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network *pnetwork)
case Ndis802_11AutoUnknown:
case Ndis802_11InfrastructureMax:
break;
-
}
}
@@ -775,7 +767,7 @@ u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network *pnetwork)
/* If not, we have to copy the connecting AP's MAC address to it so that */
/* the driver just has the bssid information for PMKIDList searching. */
- if (pmlmepriv->assoc_by_bssid == false)
+ if (!pmlmepriv->assoc_by_bssid)
memcpy(&pmlmepriv->assoc_bssid[0], &pnetwork->network.mac_address[0], ETH_ALEN);
psecnetwork->ie_length = rtw_restruct_sec_ie(padapter, &pnetwork->network.ies[0], &psecnetwork->ies[0], pnetwork->network.ie_length);
@@ -841,7 +833,7 @@ u8 rtw_disassoc_cmd(struct adapter *padapter, u32 deauth_timeout_ms, bool enqueu
/* prepare cmd parameter */
param = rtw_zmalloc(sizeof(*param));
- if (param == NULL) {
+ if (!param) {
res = _FAIL;
goto exit;
}
@@ -850,7 +842,7 @@ u8 rtw_disassoc_cmd(struct adapter *padapter, u32 deauth_timeout_ms, bool enqueu
if (enqueue) {
/* need enqueue, prepare cmd_obj and enqueue */
cmdobj = rtw_zmalloc(sizeof(*cmdobj));
- if (cmdobj == NULL) {
+ if (!cmdobj) {
res = _FAIL;
kfree(param);
goto exit;
@@ -878,7 +870,7 @@ u8 rtw_setopmode_cmd(struct adapter *padapter, enum ndis_802_11_network_infrast
psetop = rtw_zmalloc(sizeof(struct setopmode_parm));
- if (psetop == NULL) {
+ if (!psetop) {
res = _FAIL;
goto exit;
}
@@ -886,7 +878,7 @@ u8 rtw_setopmode_cmd(struct adapter *padapter, enum ndis_802_11_network_infrast
if (enqueue) {
ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
- if (ph2c == NULL) {
+ if (!ph2c) {
kfree(psetop);
res = _FAIL;
goto exit;
@@ -914,7 +906,7 @@ u8 rtw_setstakey_cmd(struct adapter *padapter, struct sta_info *sta, u8 unicast_
u8 res = _SUCCESS;
psetstakey_para = rtw_zmalloc(sizeof(struct set_stakey_parm));
- if (psetstakey_para == NULL) {
+ if (!psetstakey_para) {
res = _FAIL;
goto exit;
}
@@ -922,11 +914,11 @@ u8 rtw_setstakey_cmd(struct adapter *padapter, struct sta_info *sta, u8 unicast_
memcpy(psetstakey_para->addr, sta->hwaddr, ETH_ALEN);
if (check_fwstate(pmlmepriv, WIFI_STATION_STATE))
- psetstakey_para->algorithm = (unsigned char) psecuritypriv->dot11PrivacyAlgrthm;
+ psetstakey_para->algorithm = (unsigned char)psecuritypriv->dot11PrivacyAlgrthm;
else
GET_ENCRY_ALGO(psecuritypriv, sta, psetstakey_para->algorithm, false);
- if (unicast_key == true)
+ if (unicast_key)
memcpy(&psetstakey_para->key, &sta->dot118021x_UncstKey, 16);
else
memcpy(&psetstakey_para->key, &psecuritypriv->dot118021XGrpKey[psecuritypriv->dot118021XGrpKeyid].skey, 16);
@@ -936,14 +928,14 @@ u8 rtw_setstakey_cmd(struct adapter *padapter, struct sta_info *sta, u8 unicast_
if (enqueue) {
ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
- if (ph2c == NULL) {
+ if (!ph2c) {
kfree(psetstakey_para);
res = _FAIL;
goto exit;
}
psetstakey_rsp = rtw_zmalloc(sizeof(struct set_stakey_rsp));
- if (psetstakey_rsp == NULL) {
+ if (!psetstakey_rsp) {
kfree(ph2c);
kfree(psetstakey_para);
res = _FAIL;
@@ -951,7 +943,7 @@ u8 rtw_setstakey_cmd(struct adapter *padapter, struct sta_info *sta, u8 unicast_
}
init_h2fwcmd_w_parm_no_rsp(ph2c, psetstakey_para, _SetStaKey_CMD_);
- ph2c->rsp = (u8 *) psetstakey_rsp;
+ ph2c->rsp = (u8 *)psetstakey_rsp;
ph2c->rspsz = sizeof(struct set_stakey_rsp);
res = rtw_enqueue_cmd(pcmdpriv, ph2c);
} else {
@@ -981,20 +973,20 @@ u8 rtw_clearstakey_cmd(struct adapter *padapter, struct sta_info *sta, u8 enqueu
}
} else {
ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
- if (ph2c == NULL) {
+ if (!ph2c) {
res = _FAIL;
goto exit;
}
psetstakey_para = rtw_zmalloc(sizeof(struct set_stakey_parm));
- if (psetstakey_para == NULL) {
+ if (!psetstakey_para) {
kfree(ph2c);
res = _FAIL;
goto exit;
}
psetstakey_rsp = rtw_zmalloc(sizeof(struct set_stakey_rsp));
- if (psetstakey_rsp == NULL) {
+ if (!psetstakey_rsp) {
kfree(ph2c);
kfree(psetstakey_para);
res = _FAIL;
@@ -1002,7 +994,7 @@ u8 rtw_clearstakey_cmd(struct adapter *padapter, struct sta_info *sta, u8 enqueu
}
init_h2fwcmd_w_parm_no_rsp(ph2c, psetstakey_para, _SetStaKey_CMD_);
- ph2c->rsp = (u8 *) psetstakey_rsp;
+ ph2c->rsp = (u8 *)psetstakey_rsp;
ph2c->rspsz = sizeof(struct set_stakey_rsp);
memcpy(psetstakey_para->addr, sta->hwaddr, ETH_ALEN);
@@ -1010,9 +1002,7 @@ u8 rtw_clearstakey_cmd(struct adapter *padapter, struct sta_info *sta, u8 enqueu
psetstakey_para->algorithm = _NO_PRIVACY_;
res = rtw_enqueue_cmd(pcmdpriv, ph2c);
-
}
-
exit:
return res;
}
@@ -1026,13 +1016,13 @@ u8 rtw_addbareq_cmd(struct adapter *padapter, u8 tid, u8 *addr)
u8 res = _SUCCESS;
ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
- if (ph2c == NULL) {
+ if (!ph2c) {
res = _FAIL;
goto exit;
}
paddbareq_parm = rtw_zmalloc(sizeof(struct addBaReq_parm));
- if (paddbareq_parm == NULL) {
+ if (!paddbareq_parm) {
kfree(ph2c);
res = _FAIL;
goto exit;
@@ -1058,13 +1048,13 @@ u8 rtw_reset_securitypriv_cmd(struct adapter *padapter)
u8 res = _SUCCESS;
ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
- if (ph2c == NULL) {
+ if (!ph2c) {
res = _FAIL;
goto exit;
}
pdrvextra_cmd_parm = rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
- if (pdrvextra_cmd_parm == NULL) {
+ if (!pdrvextra_cmd_parm) {
kfree(ph2c);
res = _FAIL;
goto exit;
@@ -1080,7 +1070,6 @@ u8 rtw_reset_securitypriv_cmd(struct adapter *padapter)
/* rtw_enqueue_cmd(pcmdpriv, ph2c); */
res = rtw_enqueue_cmd(pcmdpriv, ph2c);
-
exit:
return res;
}
@@ -1093,13 +1082,13 @@ u8 rtw_free_assoc_resources_cmd(struct adapter *padapter)
u8 res = _SUCCESS;
ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
- if (ph2c == NULL) {
+ if (!ph2c) {
res = _FAIL;
goto exit;
}
pdrvextra_cmd_parm = rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
- if (pdrvextra_cmd_parm == NULL) {
+ if (!pdrvextra_cmd_parm) {
kfree(ph2c);
res = _FAIL;
goto exit;
@@ -1112,10 +1101,8 @@ u8 rtw_free_assoc_resources_cmd(struct adapter *padapter)
init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm, GEN_CMD_CODE(_Set_Drv_Extra));
-
/* rtw_enqueue_cmd(pcmdpriv, ph2c); */
res = rtw_enqueue_cmd(pcmdpriv, ph2c);
-
exit:
return res;
}
@@ -1129,13 +1116,13 @@ u8 rtw_dynamic_chk_wk_cmd(struct adapter *padapter)
/* only primary padapter does this cmd */
ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
- if (ph2c == NULL) {
+ if (!ph2c) {
res = _FAIL;
goto exit;
}
pdrvextra_cmd_parm = rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
- if (pdrvextra_cmd_parm == NULL) {
+ if (!pdrvextra_cmd_parm) {
kfree(ph2c);
res = _FAIL;
goto exit;
@@ -1150,7 +1137,6 @@ u8 rtw_dynamic_chk_wk_cmd(struct adapter *padapter)
/* rtw_enqueue_cmd(pcmdpriv, ph2c); */
res = rtw_enqueue_cmd(pcmdpriv, ph2c);
-
exit:
return res;
}
@@ -1164,7 +1150,7 @@ u8 rtw_set_chplan_cmd(struct adapter *padapter, u8 chplan, u8 enqueue, u8 swconf
u8 res = _SUCCESS;
/* check if allow software config */
- if (swconfig && rtw_hal_is_disable_sw_channel_plan(padapter) == true) {
+ if (swconfig && rtw_hal_is_disable_sw_channel_plan(padapter)) {
res = _FAIL;
goto exit;
}
@@ -1177,7 +1163,7 @@ u8 rtw_set_chplan_cmd(struct adapter *padapter, u8 chplan, u8 enqueue, u8 swconf
/* prepare cmd parameter */
setChannelPlan_param = rtw_zmalloc(sizeof(struct SetChannelPlan_param));
- if (setChannelPlan_param == NULL) {
+ if (!setChannelPlan_param) {
res = _FAIL;
goto exit;
}
@@ -1186,7 +1172,7 @@ u8 rtw_set_chplan_cmd(struct adapter *padapter, u8 chplan, u8 enqueue, u8 swconf
if (enqueue) {
/* need enqueue, prepare cmd_obj and enqueue */
pcmdobj = rtw_zmalloc(sizeof(struct cmd_obj));
- if (pcmdobj == NULL) {
+ if (!pcmdobj) {
kfree(setChannelPlan_param);
res = _FAIL;
goto exit;
@@ -1243,14 +1229,14 @@ u8 traffic_status_watchdog(struct adapter *padapter, u8 from_timer)
u8 bBusyTraffic = false, bTxBusyTraffic = false, bRxBusyTraffic = false;
u8 bHigherBusyTraffic = false, bHigherBusyRxTraffic = false, bHigherBusyTxTraffic = false;
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
collect_traffic_statistics(padapter);
/* */
/* Determine if our traffic is busy now */
/* */
- if ((check_fwstate(pmlmepriv, _FW_LINKED) == true)
+ if ((check_fwstate(pmlmepriv, _FW_LINKED))
/*&& !MgntInitAdapterInProgress(pMgntInfo)*/) {
/* if we raise bBusyTraffic in last watchdog, using lower threshold. */
if (pmlmepriv->LinkDetectInfo.bBusyTraffic)
@@ -1282,7 +1268,7 @@ u8 traffic_status_watchdog(struct adapter *padapter, u8 from_timer)
(pmlmepriv->LinkDetectInfo.NumRxUnicastOkInPeriod > 2)) {
bEnterPS = false;
- if (bBusyTraffic == true) {
+ if (bBusyTraffic) {
if (pmlmepriv->LinkDetectInfo.TrafficTransitionCount <= 4)
pmlmepriv->LinkDetectInfo.TrafficTransitionCount = 4;
@@ -1315,7 +1301,7 @@ u8 traffic_status_watchdog(struct adapter *padapter, u8 from_timer)
struct dvobj_priv *dvobj = adapter_to_dvobj(padapter);
int n_assoc_iface = 0;
- if (check_fwstate(&(dvobj->padapters->mlmepriv), WIFI_ASOC_STATE))
+ if (check_fwstate(&dvobj->padapters->mlmepriv, WIFI_ASOC_STATE))
n_assoc_iface++;
if (!from_timer && n_assoc_iface == 0)
@@ -1340,21 +1326,18 @@ static void dynamic_chk_wk_hdl(struct adapter *padapter)
{
struct mlme_priv *pmlmepriv;
- pmlmepriv = &(padapter->mlmepriv);
+ pmlmepriv = &padapter->mlmepriv;
- if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == true)
+ if (check_fwstate(pmlmepriv, WIFI_AP_STATE))
expire_timeout_chk(padapter);
/* for debug purpose */
_linked_info_dump(padapter);
-
-
/* if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING|_FW_UNDER_SURVEY) ==false) */
{
linked_status_chk(padapter);
traffic_status_watchdog(padapter, 0);
}
-
rtw_hal_dm_watchdog(padapter);
/* check_hw_pbc(padapter, pdrvextra_cmd->pbuf, pdrvextra_cmd->type); */
@@ -1374,11 +1357,11 @@ void lps_ctrl_wk_hdl(struct adapter *padapter, u8 lps_ctrl_type);
void lps_ctrl_wk_hdl(struct adapter *padapter, u8 lps_ctrl_type)
{
struct pwrctrl_priv *pwrpriv = adapter_to_pwrctl(padapter);
- struct mlme_priv *pmlmepriv = &(padapter->mlmepriv);
+ struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
u8 mstatus;
- if ((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true)
- || (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == true)) {
+ if (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) ||
+ check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)) {
return;
}
@@ -1386,7 +1369,7 @@ void lps_ctrl_wk_hdl(struct adapter *padapter, u8 lps_ctrl_type)
case LPS_CTRL_SCAN:
hal_btcoex_ScanNotify(padapter, true);
- if (check_fwstate(pmlmepriv, _FW_LINKED) == true) {
+ if (check_fwstate(pmlmepriv, _FW_LINKED)) {
/* connect */
LPS_Leave(padapter, "LPS_CTRL_SCAN");
}
@@ -1436,13 +1419,13 @@ u8 rtw_lps_ctrl_wk_cmd(struct adapter *padapter, u8 lps_ctrl_type, u8 enqueue)
if (enqueue) {
ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
- if (ph2c == NULL) {
+ if (!ph2c) {
res = _FAIL;
goto exit;
}
pdrvextra_cmd_parm = rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
- if (pdrvextra_cmd_parm == NULL) {
+ if (!pdrvextra_cmd_parm) {
kfree(ph2c);
res = _FAIL;
goto exit;
@@ -1478,13 +1461,13 @@ u8 rtw_dm_in_lps_wk_cmd(struct adapter *padapter)
ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
- if (ph2c == NULL) {
+ if (!ph2c) {
res = _FAIL;
goto exit;
}
pdrvextra_cmd_parm = rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
- if (pdrvextra_cmd_parm == NULL) {
+ if (!pdrvextra_cmd_parm) {
kfree(ph2c);
res = _FAIL;
goto exit;
@@ -1500,9 +1483,7 @@ u8 rtw_dm_in_lps_wk_cmd(struct adapter *padapter)
res = rtw_enqueue_cmd(pcmdpriv, ph2c);
exit:
-
return res;
-
}
static void rtw_lps_change_dtim_hdl(struct adapter *padapter, u8 dtim)
@@ -1512,7 +1493,7 @@ static void rtw_lps_change_dtim_hdl(struct adapter *padapter, u8 dtim)
if (dtim <= 0 || dtim > 16)
return;
- if (hal_btcoex_IsBtControlLps(padapter) == true)
+ if (hal_btcoex_IsBtControlLps(padapter))
return;
mutex_lock(&pwrpriv->lock);
@@ -1542,15 +1523,14 @@ u8 rtw_dm_ra_mask_wk_cmd(struct adapter *padapter, u8 *psta)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
-
ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
- if (ph2c == NULL) {
+ if (!ph2c) {
res = _FAIL;
goto exit;
}
pdrvextra_cmd_parm = rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
- if (pdrvextra_cmd_parm == NULL) {
+ if (!pdrvextra_cmd_parm) {
kfree(ph2c);
res = _FAIL;
goto exit;
@@ -1577,15 +1557,14 @@ u8 rtw_ps_cmd(struct adapter *padapter)
struct drvextra_cmd_parm *pdrvextra_cmd_parm;
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
u8 res = _SUCCESS;
-
ppscmd = rtw_zmalloc(sizeof(struct cmd_obj));
- if (ppscmd == NULL) {
+ if (!ppscmd) {
res = _FAIL;
goto exit;
}
pdrvextra_cmd_parm = rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
- if (pdrvextra_cmd_parm == NULL) {
+ if (!pdrvextra_cmd_parm) {
kfree(ppscmd);
res = _FAIL;
goto exit;
@@ -1618,7 +1597,7 @@ static void rtw_chk_hi_queue_hdl(struct adapter *padapter)
rtw_hal_get_hwreg(padapter, HW_VAR_CHK_HI_QUEUE_EMPTY, &empty);
- while (false == empty && jiffies_to_msecs(jiffies - start) < g_wait_hiq_empty) {
+ while (!empty && jiffies_to_msecs(jiffies - start) < g_wait_hiq_empty) {
msleep(100);
rtw_hal_get_hwreg(padapter, HW_VAR_CHK_HI_QUEUE_EMPTY, &empty);
}
@@ -1651,13 +1630,13 @@ u8 rtw_chk_hi_queue_cmd(struct adapter *padapter)
u8 res = _SUCCESS;
ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
- if (ph2c == NULL) {
+ if (!ph2c) {
res = _FAIL;
goto exit;
}
pdrvextra_cmd_parm = rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
- if (pdrvextra_cmd_parm == NULL) {
+ if (!pdrvextra_cmd_parm) {
kfree(ph2c);
res = _FAIL;
goto exit;
@@ -1673,9 +1652,7 @@ u8 rtw_chk_hi_queue_cmd(struct adapter *padapter)
res = rtw_enqueue_cmd(pcmdpriv, ph2c);
exit:
-
return res;
-
}
struct btinfo {
@@ -1745,13 +1722,13 @@ u8 rtw_c2h_packet_wk_cmd(struct adapter *padapter, u8 *pbuf, u16 length)
u8 res = _SUCCESS;
ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
- if (ph2c == NULL) {
+ if (!ph2c) {
res = _FAIL;
goto exit;
}
pdrvextra_cmd_parm = rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
- if (pdrvextra_cmd_parm == NULL) {
+ if (!pdrvextra_cmd_parm) {
kfree(ph2c);
res = _FAIL;
goto exit;
@@ -1780,13 +1757,13 @@ u8 rtw_c2h_wk_cmd(struct adapter *padapter, u8 *c2h_evt)
u8 res = _SUCCESS;
ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
- if (ph2c == NULL) {
+ if (!ph2c) {
res = _FAIL;
goto exit;
}
pdrvextra_cmd_parm = rtw_zmalloc(sizeof(struct drvextra_cmd_parm));
- if (pdrvextra_cmd_parm == NULL) {
+ if (!pdrvextra_cmd_parm) {
kfree(ph2c);
res = _FAIL;
goto exit;
@@ -1840,7 +1817,7 @@ static void c2h_wk_callback(struct work_struct *work)
continue;
}
- if (ccx_id_filter(c2h_evt) == true) {
+ if (ccx_id_filter(c2h_evt)) {
/* Handle CCX report here */
rtw_hal_c2h_handler(adapter, c2h_evt);
kfree(c2h_evt);
@@ -1959,9 +1936,9 @@ void rtw_createbss_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
struct wlan_network *pwlan = NULL;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct wlan_bssid_ex *pnetwork = (struct wlan_bssid_ex *)pcmd->parmbuf;
- struct wlan_network *tgt_network = &(pmlmepriv->cur_network);
+ struct wlan_network *tgt_network = &pmlmepriv->cur_network;
- if (pcmd->parmbuf == NULL)
+ if (!pcmd->parmbuf)
goto exit;
if (pcmd->res != H2C_SUCCESS)
@@ -1983,20 +1960,20 @@ void rtw_createbss_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
rtw_indicate_connect(padapter);
} else {
pwlan = rtw_alloc_network(pmlmepriv);
- spin_lock_bh(&(pmlmepriv->scanned_queue.lock));
- if (pwlan == NULL) {
+ spin_lock_bh(&pmlmepriv->scanned_queue.lock);
+ if (!pwlan) {
pwlan = rtw_get_oldest_wlan_network(&pmlmepriv->scanned_queue);
- if (pwlan == NULL) {
- spin_unlock_bh(&(pmlmepriv->scanned_queue.lock));
+ if (!pwlan) {
+ spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
goto createbss_cmd_fail;
}
pwlan->last_scanned = jiffies;
} else {
- list_add_tail(&(pwlan->list), &pmlmepriv->scanned_queue.queue);
+ list_add_tail(&pwlan->list, &pmlmepriv->scanned_queue.queue);
}
pnetwork->length = get_wlan_bssid_ex_sz(pnetwork);
- memcpy(&(pwlan->network), pnetwork, pnetwork->length);
+ memcpy(&pwlan->network, pnetwork, pnetwork->length);
/* pwlan->fixed = true; */
/* list_add_tail(&(pwlan->list), &pmlmepriv->scanned_queue.queue); */
@@ -2009,7 +1986,7 @@ void rtw_createbss_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd)
_clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
- spin_unlock_bh(&(pmlmepriv->scanned_queue.lock));
+ spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
/* we will set _FW_LINKED when there is one more sat to join us (rtw_stassoc_event_callback) */
}
@@ -2021,13 +1998,10 @@ exit:
rtw_free_cmd_obj(pcmd);
}
-
-
void rtw_setstaKey_cmdrsp_callback(struct adapter *padapter, struct cmd_obj *pcmd)
{
-
struct sta_priv *pstapriv = &padapter->stapriv;
- struct set_stakey_rsp *psetstakey_rsp = (struct set_stakey_rsp *) (pcmd->rsp);
+ struct set_stakey_rsp *psetstakey_rsp = (struct set_stakey_rsp *)(pcmd->rsp);
struct sta_info *psta = rtw_get_stainfo(pstapriv, psetstakey_rsp->addr);
if (!psta)
@@ -2042,7 +2016,7 @@ void rtw_setassocsta_cmdrsp_callback(struct adapter *padapter, struct cmd_obj *
struct sta_priv *pstapriv = &padapter->stapriv;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct set_assocsta_parm *passocsta_parm = (struct set_assocsta_parm *)(pcmd->parmbuf);
- struct set_assocsta_rsp *passocsta_rsp = (struct set_assocsta_rsp *) (pcmd->rsp);
+ struct set_assocsta_rsp *passocsta_rsp = (struct set_assocsta_rsp *)(pcmd->rsp);
struct sta_info *psta = rtw_get_stainfo(pstapriv, passocsta_parm->addr);
if (!psta)
@@ -2053,7 +2027,7 @@ void rtw_setassocsta_cmdrsp_callback(struct adapter *padapter, struct cmd_obj *
spin_lock_bh(&pmlmepriv->lock);
- if ((check_fwstate(pmlmepriv, WIFI_MP_STATE) == true) && (check_fwstate(pmlmepriv, _FW_UNDER_LINKING) == true))
+ if (check_fwstate(pmlmepriv, WIFI_MP_STATE) && check_fwstate(pmlmepriv, _FW_UNDER_LINKING))
_clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING);
set_fwstate(pmlmepriv, _FW_LINKED);
diff --git a/drivers/staging/rtl8723bs/core/rtw_ioctl_set.c b/drivers/staging/rtl8723bs/core/rtw_ioctl_set.c
index 5cfde7176617..8c11daff2d59 100644
--- a/drivers/staging/rtl8723bs/core/rtw_ioctl_set.c
+++ b/drivers/staging/rtl8723bs/core/rtw_ioctl_set.c
@@ -370,7 +370,7 @@ u8 rtw_set_802_11_bssid_list_scan(struct adapter *padapter, struct ndis_802_11_s
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
u8 res = true;
- if (padapter == NULL) {
+ if (!padapter) {
res = false;
goto exit;
}
@@ -481,7 +481,7 @@ u16 rtw_get_cur_max_rate(struct adapter *adapter)
return 0;
psta = rtw_get_stainfo(&adapter->stapriv, get_bssid(pmlmepriv));
- if (psta == NULL)
+ if (!psta)
return 0;
short_GI = query_ra_short_GI(psta);
diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme.c b/drivers/staging/rtl8723bs/core/rtw_mlme.c
index ab6a24d70cc9..9202223ebc0c 100644
--- a/drivers/staging/rtl8723bs/core/rtw_mlme.c
+++ b/drivers/staging/rtl8723bs/core/rtw_mlme.c
@@ -27,8 +27,10 @@ int rtw_init_mlme_priv(struct adapter *padapter)
pmlmepriv->scan_mode = SCAN_ACTIVE;/* 1: active, 0: pasive. Maybe someday we should rename this varable to "active_mode" (Jeff) */
spin_lock_init(&pmlmepriv->lock);
- _rtw_init_queue(&pmlmepriv->free_bss_pool);
- _rtw_init_queue(&pmlmepriv->scanned_queue);
+ INIT_LIST_HEAD(&pmlmepriv->free_bss_pool.queue);
+ spin_lock_init(&pmlmepriv->free_bss_pool.lock);
+ INIT_LIST_HEAD(&pmlmepriv->scanned_queue.queue);
+ spin_lock_init(&pmlmepriv->scanned_queue.lock);
set_scanned_network_val(pmlmepriv, 0);
@@ -439,7 +441,7 @@ struct wlan_network *rtw_get_oldest_wlan_network(struct __queue *scanned_queue)
pwlan = list_entry(plist, struct wlan_network, list);
if (!pwlan->fixed) {
- if (oldest == NULL || time_after(oldest->last_scanned, pwlan->last_scanned))
+ if (!oldest || time_after(oldest->last_scanned, pwlan->last_scanned))
oldest = pwlan;
}
}
@@ -542,7 +544,7 @@ void rtw_update_scanned_network(struct adapter *adapter, struct wlan_bssid_ex *t
/* TODO: don't select network in the same ess as oldest if it's new enough*/
}
- if (oldest == NULL || time_after(oldest->last_scanned, pnetwork->last_scanned))
+ if (!oldest || time_after(oldest->last_scanned, pnetwork->last_scanned))
oldest = pnetwork;
}
@@ -897,7 +899,6 @@ void rtw_free_assoc_resources(struct adapter *adapter, int lock_scanned_queue)
{
struct mlme_priv *pmlmepriv = &adapter->mlmepriv;
struct wlan_network *tgt_network = &pmlmepriv->cur_network;
- struct sta_priv *pstapriv = &adapter->stapriv;
struct dvobj_priv *psdpriv = adapter->dvobj;
struct debug_priv *pdbgpriv = &psdpriv->drv_dbg;
@@ -905,11 +906,7 @@ void rtw_free_assoc_resources(struct adapter *adapter, int lock_scanned_queue)
struct sta_info *psta;
psta = rtw_get_stainfo(&adapter->stapriv, tgt_network->network.mac_address);
- spin_lock_bh(&(pstapriv->sta_hash_lock));
rtw_free_stainfo(adapter, psta);
-
- spin_unlock_bh(&(pstapriv->sta_hash_lock));
-
}
if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE|WIFI_ADHOC_MASTER_STATE|WIFI_AP_STATE)) {
@@ -1239,16 +1236,13 @@ void rtw_joinbss_event_prehandle(struct adapter *adapter, u8 *pbuf)
rtw_indicate_connect(adapter);
}
+ spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
+
/* s5. Cancel assoc_timer */
del_timer_sync(&pmlmepriv->assoc_timer);
-
} else {
spin_unlock_bh(&(pmlmepriv->scanned_queue.lock));
- goto ignore_joinbss_callback;
}
-
- spin_unlock_bh(&(pmlmepriv->scanned_queue.lock));
-
} else if (pnetwork->join_res == -4) {
rtw_reset_securitypriv(adapter);
_set_timer(&pmlmepriv->assoc_timer, 1);
@@ -1820,7 +1814,7 @@ static int rtw_check_join_candidate(struct mlme_priv *mlme
goto exit;
}
- if (*candidate == NULL || (*candidate)->network.rssi < competitor->network.rssi) {
+ if (!*candidate || (*candidate)->network.rssi < competitor->network.rssi) {
*candidate = competitor;
updated = true;
}
diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
index 375d2a742dd2..0f82f5031c43 100644
--- a/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8723bs/core/rtw_mlme_ext.c
@@ -742,11 +742,11 @@ unsigned int OnAuth(struct adapter *padapter, union recv_frame *precv_frame)
}
pstat = rtw_get_stainfo(pstapriv, sa);
- if (pstat == NULL) {
+ if (!pstat) {
/* allocate a new one */
pstat = rtw_alloc_stainfo(pstapriv, sa);
- if (pstat == NULL) {
+ if (!pstat) {
status = WLAN_STATUS_AP_UNABLE_TO_HANDLE_NEW_STA;
goto auth_fail;
}
@@ -808,13 +808,12 @@ unsigned int OnAuth(struct adapter *padapter, union recv_frame *precv_frame)
pstat->state &= ~WIFI_FW_AUTH_NULL;
pstat->state |= WIFI_FW_AUTH_STATE;
pstat->authalg = algorithm;
- pstat->auth_seq = 2;
} else if (seq == 3) {
p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + 4 + _AUTH_IE_OFFSET_, WLAN_EID_CHALLENGE, (int *)&ie_len,
len - WLAN_HDR_A3_LEN - _AUTH_IE_OFFSET_ - 4);
- if ((p == NULL) || (ie_len <= 0)) {
+ if (!p || ie_len <= 0) {
status = WLAN_STATUS_CHALLENGE_FAIL;
goto auth_fail;
}
@@ -1034,7 +1033,7 @@ unsigned int OnAssocReq(struct adapter *padapter, union recv_frame *precv_frame)
/* check if the supported rate is ok */
p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + ie_offset, WLAN_EID_SUPP_RATES, &ie_len, pkt_len - WLAN_HDR_A3_LEN - ie_offset);
- if (p == NULL) {
+ if (!p) {
/* use our own rate set as statoin used */
/* memcpy(supportRate, AP_BSSRATE, AP_BSSRATE_LEN); */
/* supportRateNum = AP_BSSRATE_LEN; */
@@ -1047,7 +1046,7 @@ unsigned int OnAssocReq(struct adapter *padapter, union recv_frame *precv_frame)
p = rtw_get_ie(pframe + WLAN_HDR_A3_LEN + ie_offset, WLAN_EID_EXT_SUPP_RATES, &ie_len,
pkt_len - WLAN_HDR_A3_LEN - ie_offset);
- if (p != NULL) {
+ if (p) {
if (supportRateNum <= sizeof(supportRate)) {
memcpy(supportRate+supportRateNum, p+2, ie_len);
@@ -1294,7 +1293,7 @@ unsigned int OnAssocReq(struct adapter *padapter, union recv_frame *precv_frame)
/* get a unique AID */
if (pstat->aid == 0) {
for (pstat->aid = 1; pstat->aid <= NUM_STA; pstat->aid++)
- if (pstapriv->sta_aid[pstat->aid - 1] == NULL)
+ if (!pstapriv->sta_aid[pstat->aid - 1])
break;
/* if (pstat->aid > NUM_STA) { */
@@ -1344,12 +1343,8 @@ unsigned int OnAssocReq(struct adapter *padapter, union recv_frame *precv_frame)
issue_asocrsp(padapter, status, pstat, WIFI_REASSOCRSP);
spin_lock_bh(&pstat->lock);
- if (pstat->passoc_req) {
- kfree(pstat->passoc_req);
- pstat->passoc_req = NULL;
- pstat->assoc_req_len = 0;
- }
-
+ kfree(pstat->passoc_req);
+ pstat->assoc_req_len = 0;
pstat->passoc_req = rtw_zmalloc(pkt_len);
if (pstat->passoc_req) {
memcpy(pstat->passoc_req, pframe, pkt_len);
@@ -1489,9 +1484,7 @@ unsigned int OnDeAuth(struct adapter *padapter, union recv_frame *precv_frame)
struct sta_info *psta;
struct sta_priv *pstapriv = &padapter->stapriv;
- /* spin_lock_bh(&(pstapriv->sta_hash_lock)); */
/* rtw_free_stainfo(padapter, psta); */
- /* spin_unlock_bh(&(pstapriv->sta_hash_lock)); */
netdev_dbg(padapter->pnetdev,
"ap recv deauth reason code(%d) sta:%pM\n", reason,
@@ -1565,9 +1558,7 @@ unsigned int OnDisassoc(struct adapter *padapter, union recv_frame *precv_frame)
struct sta_info *psta;
struct sta_priv *pstapriv = &padapter->stapriv;
- /* spin_lock_bh(&(pstapriv->sta_hash_lock)); */
/* rtw_free_stainfo(padapter, psta); */
- /* spin_unlock_bh(&(pstapriv->sta_hash_lock)); */
netdev_dbg(padapter->pnetdev,
"ap recv disassoc reason code(%d) sta:%pM\n",
@@ -1944,7 +1935,7 @@ static struct xmit_frame *_alloc_mgtxmitframe(struct xmit_priv *pxmitpriv, bool
goto exit;
pxmitbuf = rtw_alloc_xmitbuf_ext(pxmitpriv);
- if (pxmitbuf == NULL) {
+ if (!pxmitbuf) {
rtw_free_xmitframe(pxmitpriv, pmgntframe);
pmgntframe = NULL;
goto exit;
@@ -2297,7 +2288,7 @@ void issue_probersp(struct adapter *padapter, unsigned char *da, u8 is_valid_p2p
struct wlan_bssid_ex *cur_network = &(pmlmeinfo->network);
unsigned int rate_len;
- if (da == NULL)
+ if (!da)
return;
pmgntframe = alloc_mgtxmitframe(pxmitpriv);
@@ -2621,7 +2612,7 @@ void issue_auth(struct adapter *padapter, struct sta_info *psta, unsigned short
__le16 le_tmp;
pmgntframe = alloc_mgtxmitframe(pxmitpriv);
- if (pmgntframe == NULL)
+ if (!pmgntframe)
return;
/* update attribute */
@@ -2752,7 +2743,7 @@ void issue_asocrsp(struct adapter *padapter, unsigned short status, struct sta_i
__le16 lestatus, le_tmp;
pmgntframe = alloc_mgtxmitframe(pxmitpriv);
- if (pmgntframe == NULL)
+ if (!pmgntframe)
return;
/* update attribute */
@@ -2840,7 +2831,7 @@ void issue_asocrsp(struct adapter *padapter, unsigned short status, struct sta_i
break;
}
- if ((pbuf == NULL) || (ie_len == 0)) {
+ if (!pbuf || ie_len == 0) {
break;
}
}
@@ -2884,7 +2875,7 @@ void issue_assocreq(struct adapter *padapter)
u8 vs_ie_length = 0;
pmgntframe = alloc_mgtxmitframe(pxmitpriv);
- if (pmgntframe == NULL)
+ if (!pmgntframe)
goto exit;
/* update attribute */
@@ -3061,7 +3052,7 @@ static int _issue_nulldata(struct adapter *padapter, unsigned char *da,
pmlmeinfo = &(pmlmeext->mlmext_info);
pmgntframe = alloc_mgtxmitframe(pxmitpriv);
- if (pmgntframe == NULL)
+ if (!pmgntframe)
goto exit;
/* update attribute */
@@ -3200,7 +3191,7 @@ static int _issue_qos_nulldata(struct adapter *padapter, unsigned char *da,
struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
pmgntframe = alloc_mgtxmitframe(pxmitpriv);
- if (pmgntframe == NULL)
+ if (!pmgntframe)
goto exit;
/* update attribute */
@@ -3226,9 +3217,6 @@ static int _issue_qos_nulldata(struct adapter *padapter, unsigned char *da,
else if ((pmlmeinfo->state&0x03) == WIFI_FW_STATION_STATE)
SetToDs(fctrl);
- if (pattrib->mdata)
- SetMData(fctrl);
-
qc = (unsigned short *)(pframe + pattrib->hdrlen - 2);
SetPriority(qc, tid);
@@ -3313,7 +3301,7 @@ static int _issue_deauth(struct adapter *padapter, unsigned char *da,
__le16 le_tmp;
pmgntframe = alloc_mgtxmitframe(pxmitpriv);
- if (pmgntframe == NULL) {
+ if (!pmgntframe) {
goto exit;
}
@@ -3639,7 +3627,7 @@ static void issue_action_BSSCoexistPacket(struct adapter *padapter)
action = ACT_PUBLIC_BSSCOEXIST;
pmgntframe = alloc_mgtxmitframe(pxmitpriv);
- if (pmgntframe == NULL) {
+ if (!pmgntframe) {
return;
}
@@ -3706,7 +3694,7 @@ static void issue_action_BSSCoexistPacket(struct adapter *padapter)
pbss_network = (struct wlan_bssid_ex *)&pnetwork->network;
p = rtw_get_ie(pbss_network->ies + _FIXED_IE_LENGTH_, WLAN_EID_HT_CAPABILITY, &len, pbss_network->ie_length - _FIXED_IE_LENGTH_);
- if ((p == NULL) || (len == 0)) {/* non-HT */
+ if (!p || len == 0) {/* non-HT */
if (pbss_network->configuration.ds_config <= 0)
continue;
@@ -3769,7 +3757,7 @@ unsigned int send_delba(struct adapter *padapter, u8 initiator, u8 *addr)
return _SUCCESS;
psta = rtw_get_stainfo(pstapriv, addr);
- if (psta == NULL)
+ if (!psta)
return _SUCCESS;
if (initiator == 0) {/* recipient */
@@ -4641,13 +4629,13 @@ void report_del_sta_event(struct adapter *padapter, unsigned char *MacAddr, unsi
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
pcmd_obj = rtw_zmalloc(sizeof(struct cmd_obj));
- if (pcmd_obj == NULL) {
+ if (!pcmd_obj) {
return;
}
cmdsz = (sizeof(struct stadel_event) + sizeof(struct C2HEvent_Header));
pevtcmd = rtw_zmalloc(cmdsz);
- if (pevtcmd == NULL) {
+ if (!pevtcmd) {
kfree(pcmd_obj);
return;
}
@@ -4693,12 +4681,12 @@ void report_add_sta_event(struct adapter *padapter, unsigned char *MacAddr, int
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
pcmd_obj = rtw_zmalloc(sizeof(struct cmd_obj));
- if (pcmd_obj == NULL)
+ if (!pcmd_obj)
return;
cmdsz = (sizeof(struct stassoc_event) + sizeof(struct C2HEvent_Header));
pevtcmd = rtw_zmalloc(cmdsz);
- if (pevtcmd == NULL) {
+ if (!pevtcmd) {
kfree(pcmd_obj);
return;
}
@@ -5147,12 +5135,12 @@ void survey_timer_hdl(struct timer_list *t)
}
ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
- if (ph2c == NULL) {
+ if (!ph2c) {
goto exit_survey_timer_hdl;
}
psurveyPara = rtw_zmalloc(sizeof(struct sitesurvey_parm));
- if (psurveyPara == NULL) {
+ if (!psurveyPara) {
kfree(ph2c);
goto exit_survey_timer_hdl;
}
@@ -5781,7 +5769,7 @@ u8 chk_bmc_sleepq_cmd(struct adapter *padapter)
u8 res = _SUCCESS;
ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
- if (ph2c == NULL) {
+ if (!ph2c) {
res = _FAIL;
goto exit;
}
@@ -5805,13 +5793,13 @@ u8 set_tx_beacon_cmd(struct adapter *padapter)
int len_diff = 0;
ph2c = rtw_zmalloc(sizeof(struct cmd_obj));
- if (ph2c == NULL) {
+ if (!ph2c) {
res = _FAIL;
goto exit;
}
ptxBeacon_parm = rtw_zmalloc(sizeof(struct Tx_Beacon_param));
- if (ptxBeacon_parm == NULL) {
+ if (!ptxBeacon_parm) {
kfree(ph2c);
res = _FAIL;
goto exit;
@@ -5871,7 +5859,7 @@ u8 mlme_evt_hdl(struct adapter *padapter, unsigned char *pbuf)
void (*event_callback)(struct adapter *dev, u8 *pbuf);
struct evt_priv *pevt_priv = &(padapter->evtpriv);
- if (pbuf == NULL)
+ if (!pbuf)
goto _abort_event_;
peventbuf = (uint *)pbuf;
@@ -5919,7 +5907,6 @@ u8 chk_bmc_sleepq_hdl(struct adapter *padapter, unsigned char *pbuf)
struct sta_info *psta_bmc;
struct list_head *xmitframe_plist, *xmitframe_phead, *tmp;
struct xmit_frame *pxmitframe = NULL;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
struct sta_priv *pstapriv = &padapter->stapriv;
/* for BC/MC Frames */
@@ -5930,8 +5917,7 @@ u8 chk_bmc_sleepq_hdl(struct adapter *padapter, unsigned char *pbuf)
if ((pstapriv->tim_bitmap&BIT(0)) && (psta_bmc->sleepq_len > 0)) {
msleep(10);/* 10ms, ATIM(HIQ) Windows */
- /* spin_lock_bh(&psta_bmc->sleep_q.lock); */
- spin_lock_bh(&pxmitpriv->lock);
+ spin_lock_bh(&psta_bmc->sleep_q.lock);
xmitframe_phead = get_list_head(&psta_bmc->sleep_q);
list_for_each_safe(xmitframe_plist, tmp, xmitframe_phead) {
@@ -5954,8 +5940,7 @@ u8 chk_bmc_sleepq_hdl(struct adapter *padapter, unsigned char *pbuf)
rtw_hal_xmitframe_enqueue(padapter, pxmitframe);
}
- /* spin_unlock_bh(&psta_bmc->sleep_q.lock); */
- spin_unlock_bh(&pxmitpriv->lock);
+ spin_unlock_bh(&psta_bmc->sleep_q.lock);
/* check hi queue and bmc_sleepq */
rtw_chk_hi_queue_cmd(padapter);
diff --git a/drivers/staging/rtl8723bs/core/rtw_recv.c b/drivers/staging/rtl8723bs/core/rtw_recv.c
index 5b0a596eefb7..41bfca549c64 100644
--- a/drivers/staging/rtl8723bs/core/rtw_recv.c
+++ b/drivers/staging/rtl8723bs/core/rtw_recv.c
@@ -25,7 +25,8 @@ void _rtw_init_sta_recv_priv(struct sta_recv_priv *psta_recvpriv)
/* for (i = 0; i<MAX_RX_NUMBLKS; i++) */
/* _rtw_init_queue(&psta_recvpriv->blk_strms[i]); */
- _rtw_init_queue(&psta_recvpriv->defrag_q);
+ INIT_LIST_HEAD(&psta_recvpriv->defrag_q.queue);
+ spin_lock_init(&psta_recvpriv->defrag_q.lock);
}
signed int _rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *padapter)
@@ -36,9 +37,12 @@ signed int _rtw_init_recv_priv(struct recv_priv *precvpriv, struct adapter *pada
spin_lock_init(&precvpriv->lock);
- _rtw_init_queue(&precvpriv->free_recv_queue);
- _rtw_init_queue(&precvpriv->recv_pending_queue);
- _rtw_init_queue(&precvpriv->uc_swdec_pending_queue);
+ INIT_LIST_HEAD(&precvpriv->free_recv_queue.queue);
+ spin_lock_init(&precvpriv->free_recv_queue.lock);
+ INIT_LIST_HEAD(&precvpriv->recv_pending_queue.queue);
+ spin_lock_init(&precvpriv->recv_pending_queue.lock);
+ INIT_LIST_HEAD(&precvpriv->uc_swdec_pending_queue.queue);
+ spin_lock_init(&precvpriv->uc_swdec_pending_queue.lock);
precvpriv->adapter = padapter;
@@ -953,10 +957,8 @@ static signed int validate_recv_ctrl_frame(struct adapter *padapter, union recv_
if ((psta->state&WIFI_SLEEP_STATE) && (pstapriv->sta_dz_bitmap&BIT(psta->aid))) {
struct list_head *xmitframe_plist, *xmitframe_phead;
struct xmit_frame *pxmitframe = NULL;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- /* spin_lock_bh(&psta->sleep_q.lock); */
- spin_lock_bh(&pxmitpriv->lock);
+ spin_lock_bh(&psta->sleep_q.lock);
xmitframe_phead = get_list_head(&psta->sleep_q);
xmitframe_plist = get_next(xmitframe_phead);
@@ -987,12 +989,10 @@ static signed int validate_recv_ctrl_frame(struct adapter *padapter, union recv_
update_beacon(padapter, WLAN_EID_TIM, NULL, true);
}
- /* spin_unlock_bh(&psta->sleep_q.lock); */
- spin_unlock_bh(&pxmitpriv->lock);
+ spin_unlock_bh(&psta->sleep_q.lock);
} else {
- /* spin_unlock_bh(&psta->sleep_q.lock); */
- spin_unlock_bh(&pxmitpriv->lock);
+ spin_unlock_bh(&psta->sleep_q.lock);
if (pstapriv->tim_bitmap&BIT(psta->aid)) {
if (psta->sleepq_len == 0) {
diff --git a/drivers/staging/rtl8723bs/core/rtw_security.c b/drivers/staging/rtl8723bs/core/rtw_security.c
index b050bf62e3b9..ac731415f733 100644
--- a/drivers/staging/rtl8723bs/core/rtw_security.c
+++ b/drivers/staging/rtl8723bs/core/rtw_security.c
@@ -51,7 +51,7 @@ void rtw_wep_encrypt(struct adapter *padapter, u8 *pxmitframe)
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
struct arc4_ctx *ctx = &psecuritypriv->xmit_arc4_ctx;
- if (((struct xmit_frame *)pxmitframe)->buf_addr == NULL)
+ if (!((struct xmit_frame *)pxmitframe)->buf_addr)
return;
hw_hdr_offset = TXDESC_OFFSET;
@@ -476,7 +476,7 @@ u32 rtw_tkip_encrypt(struct adapter *padapter, u8 *pxmitframe)
struct arc4_ctx *ctx = &psecuritypriv->xmit_arc4_ctx;
u32 res = _SUCCESS;
- if (((struct xmit_frame *)pxmitframe)->buf_addr == NULL)
+ if (!((struct xmit_frame *)pxmitframe)->buf_addr)
return _FAIL;
hw_hdr_offset = TXDESC_OFFSET;
@@ -1043,7 +1043,7 @@ u32 rtw_aes_encrypt(struct adapter *padapter, u8 *pxmitframe)
u32 res = _SUCCESS;
- if (((struct xmit_frame *)pxmitframe)->buf_addr == NULL)
+ if (!((struct xmit_frame *)pxmitframe)->buf_addr)
return _FAIL;
hw_hdr_offset = TXDESC_OFFSET;
diff --git a/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c b/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c
index 67ca219f95bf..0c9ea1520fd0 100644
--- a/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c
+++ b/drivers/staging/rtl8723bs/core/rtw_sta_mgt.c
@@ -19,7 +19,8 @@ void _rtw_init_stainfo(struct sta_info *psta)
/* INIT_LIST_HEAD(&psta->sleep_list); */
/* INIT_LIST_HEAD(&psta->wakeup_list); */
- _rtw_init_queue(&psta->sleep_q);
+ INIT_LIST_HEAD(&psta->sleep_q.queue);
+ spin_lock_init(&psta->sleep_q.lock);
psta->sleepq_len = 0;
_rtw_init_sta_xmit_priv(&psta->sta_xmitpriv);
@@ -62,14 +63,17 @@ u32 _rtw_init_sta_priv(struct sta_priv *pstapriv)
pstapriv->pstainfo_buf = pstapriv->pallocated_stainfo_buf + 4 -
((SIZE_PTR)(pstapriv->pallocated_stainfo_buf) & 3);
- _rtw_init_queue(&pstapriv->free_sta_queue);
+ INIT_LIST_HEAD(&pstapriv->free_sta_queue.queue);
+ spin_lock_init(&pstapriv->free_sta_queue.lock);
spin_lock_init(&pstapriv->sta_hash_lock);
/* _rtw_init_queue(&pstapriv->asoc_q); */
pstapriv->asoc_sta_count = 0;
- _rtw_init_queue(&pstapriv->sleep_q);
- _rtw_init_queue(&pstapriv->wakeup_q);
+ INIT_LIST_HEAD(&pstapriv->sleep_q.queue);
+ spin_lock_init(&pstapriv->sleep_q.lock);
+ INIT_LIST_HEAD(&pstapriv->wakeup_q.queue);
+ spin_lock_init(&pstapriv->wakeup_q.lock);
psta = (struct sta_info *)(pstapriv->pstainfo_buf);
@@ -242,7 +246,8 @@ struct sta_info *rtw_alloc_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
/* preorder_ctrl->wsize_b = (NR_RECVBUFF-2); */
preorder_ctrl->wsize_b = 64;/* 64; */
- _rtw_init_queue(&preorder_ctrl->pending_recvframe_queue);
+ INIT_LIST_HEAD(&preorder_ctrl->pending_recvframe_queue.queue);
+ spin_lock_init(&preorder_ctrl->pending_recvframe_queue.lock);
rtw_init_recv_timer(preorder_ctrl);
}
@@ -263,7 +268,6 @@ exit:
return psta;
}
-/* using pstapriv->sta_hash_lock to protect */
u32 rtw_free_stainfo(struct adapter *padapter, struct sta_info *psta)
{
int i;
@@ -289,51 +293,55 @@ u32 rtw_free_stainfo(struct adapter *padapter, struct sta_info *psta)
/* list_del_init(&psta->wakeup_list); */
- spin_lock_bh(&pxmitpriv->lock);
-
+ spin_lock_bh(&psta->sleep_q.lock);
rtw_free_xmitframe_queue(pxmitpriv, &psta->sleep_q);
psta->sleepq_len = 0;
+ spin_unlock_bh(&psta->sleep_q.lock);
+
+ spin_lock_bh(&pxmitpriv->lock);
/* vo */
- /* spin_lock_bh(&(pxmitpriv->vo_pending.lock)); */
+ spin_lock_bh(&pstaxmitpriv->vo_q.sta_pending.lock);
rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->vo_q.sta_pending);
list_del_init(&(pstaxmitpriv->vo_q.tx_pending));
phwxmit = pxmitpriv->hwxmits;
phwxmit->accnt -= pstaxmitpriv->vo_q.qcnt;
pstaxmitpriv->vo_q.qcnt = 0;
- /* spin_unlock_bh(&(pxmitpriv->vo_pending.lock)); */
+ spin_unlock_bh(&pstaxmitpriv->vo_q.sta_pending.lock);
/* vi */
- /* spin_lock_bh(&(pxmitpriv->vi_pending.lock)); */
+ spin_lock_bh(&pstaxmitpriv->vi_q.sta_pending.lock);
rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->vi_q.sta_pending);
list_del_init(&(pstaxmitpriv->vi_q.tx_pending));
phwxmit = pxmitpriv->hwxmits+1;
phwxmit->accnt -= pstaxmitpriv->vi_q.qcnt;
pstaxmitpriv->vi_q.qcnt = 0;
- /* spin_unlock_bh(&(pxmitpriv->vi_pending.lock)); */
+ spin_unlock_bh(&pstaxmitpriv->vi_q.sta_pending.lock);
/* be */
- /* spin_lock_bh(&(pxmitpriv->be_pending.lock)); */
+ spin_lock_bh(&pstaxmitpriv->be_q.sta_pending.lock);
rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->be_q.sta_pending);
list_del_init(&(pstaxmitpriv->be_q.tx_pending));
phwxmit = pxmitpriv->hwxmits+2;
phwxmit->accnt -= pstaxmitpriv->be_q.qcnt;
pstaxmitpriv->be_q.qcnt = 0;
- /* spin_unlock_bh(&(pxmitpriv->be_pending.lock)); */
+ spin_unlock_bh(&pstaxmitpriv->be_q.sta_pending.lock);
/* bk */
- /* spin_lock_bh(&(pxmitpriv->bk_pending.lock)); */
+ spin_lock_bh(&pstaxmitpriv->bk_q.sta_pending.lock);
rtw_free_xmitframe_queue(pxmitpriv, &pstaxmitpriv->bk_q.sta_pending);
list_del_init(&(pstaxmitpriv->bk_q.tx_pending));
phwxmit = pxmitpriv->hwxmits+3;
phwxmit->accnt -= pstaxmitpriv->bk_q.qcnt;
pstaxmitpriv->bk_q.qcnt = 0;
- /* spin_unlock_bh(&(pxmitpriv->bk_pending.lock)); */
+ spin_unlock_bh(&pstaxmitpriv->bk_q.sta_pending.lock);
spin_unlock_bh(&pxmitpriv->lock);
+ spin_lock_bh(&pstapriv->sta_hash_lock);
list_del_init(&psta->hash_list);
pstapriv->asoc_sta_count--;
+ spin_unlock_bh(&pstapriv->sta_hash_lock);
/* re-init sta_info; 20061114 will be init in alloc_stainfo */
/* _rtw_init_sta_xmit_priv(&psta->sta_xmitpriv); */
@@ -428,6 +436,7 @@ void rtw_free_all_stainfo(struct adapter *padapter)
struct sta_info *psta = NULL;
struct sta_priv *pstapriv = &padapter->stapriv;
struct sta_info *pbcmc_stainfo = rtw_get_bcmc_stainfo(padapter);
+ LIST_HEAD(stainfo_free_list);
if (pstapriv->asoc_sta_count == 1)
return;
@@ -440,11 +449,16 @@ void rtw_free_all_stainfo(struct adapter *padapter)
psta = list_entry(plist, struct sta_info, hash_list);
if (pbcmc_stainfo != psta)
- rtw_free_stainfo(padapter, psta);
+ list_move(&psta->hash_list, &stainfo_free_list);
}
}
spin_unlock_bh(&pstapriv->sta_hash_lock);
+
+ list_for_each_safe(plist, tmp, &stainfo_free_list) {
+ psta = list_entry(plist, struct sta_info, hash_list);
+ rtw_free_stainfo(padapter, psta);
+ }
}
/* any station allocated can be searched by hash list */
diff --git a/drivers/staging/rtl8723bs/core/rtw_xmit.c b/drivers/staging/rtl8723bs/core/rtw_xmit.c
index 79e4d7df1ef5..46054d6a1fb5 100644
--- a/drivers/staging/rtl8723bs/core/rtw_xmit.c
+++ b/drivers/staging/rtl8723bs/core/rtw_xmit.c
@@ -13,7 +13,8 @@ static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 };
static void _init_txservq(struct tx_servq *ptxservq)
{
INIT_LIST_HEAD(&ptxservq->tx_pending);
- _rtw_init_queue(&ptxservq->sta_pending);
+ INIT_LIST_HEAD(&ptxservq->sta_pending.queue);
+ spin_lock_init(&ptxservq->sta_pending.lock);
ptxservq->qcnt = 0;
}
@@ -49,13 +50,19 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
pxmitpriv->adapter = padapter;
- _rtw_init_queue(&pxmitpriv->be_pending);
- _rtw_init_queue(&pxmitpriv->bk_pending);
- _rtw_init_queue(&pxmitpriv->vi_pending);
- _rtw_init_queue(&pxmitpriv->vo_pending);
- _rtw_init_queue(&pxmitpriv->bm_pending);
+ INIT_LIST_HEAD(&pxmitpriv->be_pending.queue);
+ spin_lock_init(&pxmitpriv->be_pending.lock);
+ INIT_LIST_HEAD(&pxmitpriv->bk_pending.queue);
+ spin_lock_init(&pxmitpriv->bk_pending.lock);
+ INIT_LIST_HEAD(&pxmitpriv->vi_pending.queue);
+ spin_lock_init(&pxmitpriv->vi_pending.lock);
+ INIT_LIST_HEAD(&pxmitpriv->vo_pending.queue);
+ spin_lock_init(&pxmitpriv->vo_pending.lock);
+ INIT_LIST_HEAD(&pxmitpriv->bm_pending.queue);
+ spin_lock_init(&pxmitpriv->bm_pending.lock);
- _rtw_init_queue(&pxmitpriv->free_xmit_queue);
+ INIT_LIST_HEAD(&pxmitpriv->free_xmit_queue.queue);
+ spin_lock_init(&pxmitpriv->free_xmit_queue.lock);
/*
* Please allocate memory with the sz = (struct xmit_frame) * NR_XMITFRAME,
@@ -96,8 +103,10 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
pxmitpriv->frag_len = MAX_FRAG_THRESHOLD;
/* init xmit_buf */
- _rtw_init_queue(&pxmitpriv->free_xmitbuf_queue);
- _rtw_init_queue(&pxmitpriv->pending_xmitbuf_queue);
+ INIT_LIST_HEAD(&pxmitpriv->free_xmitbuf_queue.queue);
+ spin_lock_init(&pxmitpriv->free_xmitbuf_queue.lock);
+ INIT_LIST_HEAD(&pxmitpriv->pending_xmitbuf_queue.queue);
+ spin_lock_init(&pxmitpriv->pending_xmitbuf_queue.lock);
pxmitpriv->pallocated_xmitbuf = vzalloc(NR_XMITBUFF * sizeof(struct xmit_buf) + 4);
@@ -145,7 +154,8 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
pxmitpriv->free_xmitbuf_cnt = NR_XMITBUFF;
/* init xframe_ext queue, the same count as extbuf */
- _rtw_init_queue(&pxmitpriv->free_xframe_ext_queue);
+ INIT_LIST_HEAD(&pxmitpriv->free_xframe_ext_queue.queue);
+ spin_lock_init(&pxmitpriv->free_xframe_ext_queue.lock);
pxmitpriv->xframe_ext_alloc_addr = vzalloc(NR_XMIT_EXTBUFF * sizeof(struct xmit_frame) + 4);
@@ -178,7 +188,8 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter)
pxmitpriv->free_xframe_ext_cnt = NR_XMIT_EXTBUFF;
/* Init xmit extension buff */
- _rtw_init_queue(&pxmitpriv->free_xmit_extbuf_queue);
+ INIT_LIST_HEAD(&pxmitpriv->free_xmit_extbuf_queue.queue);
+ spin_lock_init(&pxmitpriv->free_xmit_extbuf_queue.lock);
pxmitpriv->pallocated_xmit_extbuf = vzalloc(NR_XMIT_EXTBUFF * sizeof(struct xmit_buf) + 4);
@@ -1723,15 +1734,12 @@ void rtw_free_xmitframe_queue(struct xmit_priv *pxmitpriv, struct __queue *pfram
struct list_head *plist, *phead, *tmp;
struct xmit_frame *pxmitframe;
- spin_lock_bh(&pframequeue->lock);
-
phead = get_list_head(pframequeue);
list_for_each_safe(plist, tmp, phead) {
pxmitframe = list_entry(plist, struct xmit_frame, list);
rtw_free_xmitframe(pxmitpriv, pxmitframe);
}
- spin_unlock_bh(&pframequeue->lock);
}
s32 rtw_xmitframe_enqueue(struct adapter *padapter, struct xmit_frame *pxmitframe)
@@ -1786,6 +1794,7 @@ s32 rtw_xmit_classifier(struct adapter *padapter, struct xmit_frame *pxmitframe)
struct sta_info *psta;
struct tx_servq *ptxservq;
struct pkt_attrib *pattrib = &pxmitframe->attrib;
+ struct xmit_priv *xmit_priv = &padapter->xmitpriv;
struct hw_xmit *phwxmits = padapter->xmitpriv.hwxmits;
signed int res = _SUCCESS;
@@ -1803,12 +1812,14 @@ s32 rtw_xmit_classifier(struct adapter *padapter, struct xmit_frame *pxmitframe)
ptxservq = rtw_get_sta_pending(padapter, psta, pattrib->priority, (u8 *)(&ac_index));
+ spin_lock_bh(&xmit_priv->lock);
if (list_empty(&ptxservq->tx_pending))
list_add_tail(&ptxservq->tx_pending, get_list_head(phwxmits[ac_index].sta_queue));
list_add_tail(&pxmitframe->list, get_list_head(&ptxservq->sta_pending));
ptxservq->qcnt++;
phwxmits[ac_index].accnt++;
+ spin_unlock_bh(&xmit_priv->lock);
exit:
@@ -2191,11 +2202,10 @@ void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta)
struct list_head *xmitframe_plist, *xmitframe_phead, *tmp;
struct xmit_frame *pxmitframe = NULL;
struct sta_priv *pstapriv = &padapter->stapriv;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
psta_bmc = rtw_get_bcmc_stainfo(padapter);
- spin_lock_bh(&pxmitpriv->lock);
+ spin_lock_bh(&psta->sleep_q.lock);
xmitframe_phead = get_list_head(&psta->sleep_q);
list_for_each_safe(xmitframe_plist, tmp, xmitframe_phead) {
@@ -2296,7 +2306,7 @@ void wakeup_sta_to_xmit(struct adapter *padapter, struct sta_info *psta)
_exit:
- spin_unlock_bh(&pxmitpriv->lock);
+ spin_unlock_bh(&psta->sleep_q.lock);
if (update_mask)
update_beacon(padapter, WLAN_EID_TIM, NULL, true);
@@ -2308,9 +2318,8 @@ void xmit_delivery_enabled_frames(struct adapter *padapter, struct sta_info *pst
struct list_head *xmitframe_plist, *xmitframe_phead, *tmp;
struct xmit_frame *pxmitframe = NULL;
struct sta_priv *pstapriv = &padapter->stapriv;
- struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
- spin_lock_bh(&pxmitpriv->lock);
+ spin_lock_bh(&psta->sleep_q.lock);
xmitframe_phead = get_list_head(&psta->sleep_q);
list_for_each_safe(xmitframe_plist, tmp, xmitframe_phead) {
@@ -2363,7 +2372,7 @@ void xmit_delivery_enabled_frames(struct adapter *padapter, struct sta_info *pst
}
}
- spin_unlock_bh(&pxmitpriv->lock);
+ spin_unlock_bh(&psta->sleep_q.lock);
}
void enqueue_pending_xmitbuf(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf)
diff --git a/drivers/staging/rtl8723bs/hal/odm_DIG.c b/drivers/staging/rtl8723bs/hal/odm_DIG.c
index beda7b8a7c6a..7e92c373cddb 100644
--- a/drivers/staging/rtl8723bs/hal/odm_DIG.c
+++ b/drivers/staging/rtl8723bs/hal/odm_DIG.c
@@ -544,7 +544,7 @@ void odm_DIG(void *pDM_VOID)
/* 1 Adjust initial gain by false alarm */
if (pDM_Odm->bLinked && bPerformance) {
- if (bFirstTpTarget || (FirstConnect && bPerformance)) {
+ if (bFirstTpTarget || FirstConnect) {
pDM_DigTable->LargeFAHit = 0;
if (pDM_Odm->RSSI_Min < DIG_MaxOfMin) {
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c b/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c
index cce3e7e80953..f1fc077ed29c 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723b_hal_init.c
@@ -2746,19 +2746,7 @@ void rtl8723b_update_txdesc(struct xmit_frame *pxmitframe, u8 *pbuf)
struct tx_desc *pdesc;
rtl8723b_fill_default_txdesc(pxmitframe, pbuf);
-
pdesc = (struct tx_desc *)pbuf;
- pdesc->txdw0 = pdesc->txdw0;
- pdesc->txdw1 = pdesc->txdw1;
- pdesc->txdw2 = pdesc->txdw2;
- pdesc->txdw3 = pdesc->txdw3;
- pdesc->txdw4 = pdesc->txdw4;
- pdesc->txdw5 = pdesc->txdw5;
- pdesc->txdw6 = pdesc->txdw6;
- pdesc->txdw7 = pdesc->txdw7;
- pdesc->txdw8 = pdesc->txdw8;
- pdesc->txdw9 = pdesc->txdw9;
-
rtl8723b_cal_txdesc_chksum(pdesc);
}
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c b/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c
index 418016930728..c0a1a6fbeb91 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723bs_recv.c
@@ -378,8 +378,10 @@ s32 rtl8723bs_init_recv_priv(struct adapter *padapter)
precvpriv = &padapter->recvpriv;
/* 3 1. init recv buffer */
- _rtw_init_queue(&precvpriv->free_recv_buf_queue);
- _rtw_init_queue(&precvpriv->recv_buf_pending_queue);
+ INIT_LIST_HEAD(&precvpriv->free_recv_buf_queue.queue);
+ spin_lock_init(&precvpriv->free_recv_buf_queue.lock);
+ INIT_LIST_HEAD(&precvpriv->recv_buf_pending_queue.queue);
+ spin_lock_init(&precvpriv->recv_buf_pending_queue.lock);
n = NR_RECVBUFF * sizeof(struct recv_buf) + 4;
precvpriv->pallocated_recv_buf = rtw_zmalloc(n);
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c b/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
index 156d6aba18ca..5f5c4719b586 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
@@ -507,9 +507,7 @@ s32 rtl8723bs_hal_xmit(
rtw_issue_addbareq_cmd(padapter, pxmitframe);
}
- spin_lock_bh(&pxmitpriv->lock);
err = rtw_xmitframe_enqueue(padapter, pxmitframe);
- spin_unlock_bh(&pxmitpriv->lock);
if (err != _SUCCESS) {
rtw_free_xmitframe(pxmitpriv, pxmitframe);
diff --git a/drivers/staging/rtl8723bs/include/ieee80211.h b/drivers/staging/rtl8723bs/include/ieee80211.h
index d6236f5b069d..c11d7e2d2347 100644
--- a/drivers/staging/rtl8723bs/include/ieee80211.h
+++ b/drivers/staging/rtl8723bs/include/ieee80211.h
@@ -172,7 +172,7 @@ struct ieee_param {
struct {
u32 len;
u8 reserved[32];
- u8 data[0];
+ u8 data[];
} wpa_ie;
struct{
int command;
@@ -185,7 +185,7 @@ struct ieee_param {
u8 idx;
u8 seq[8]; /* sequence counter (set: RX, get: TX) */
u16 key_len;
- u8 key[0];
+ u8 key[];
} crypt;
struct {
u16 aid;
@@ -196,7 +196,7 @@ struct ieee_param {
} add_sta;
struct {
u8 reserved[2];/* for set max_num_sta */
- u8 buf[0];
+ u8 buf[];
} bcn_ie;
} u;
};
diff --git a/drivers/staging/rtl8723bs/include/osdep_service.h b/drivers/staging/rtl8723bs/include/osdep_service.h
index bde415db4114..cf96b5f7a776 100644
--- a/drivers/staging/rtl8723bs/include/osdep_service.h
+++ b/drivers/staging/rtl8723bs/include/osdep_service.h
@@ -113,7 +113,7 @@ struct rtw_cbuf {
u32 write;
u32 read;
u32 size;
- void *bufs[0];
+ void *bufs[];
};
bool rtw_cbuf_full(struct rtw_cbuf *cbuf);
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
index 499ac3a77512..0868f56e2979 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c
@@ -47,7 +47,7 @@ static const u32 rtw_cipher_suites[] = {
* Moreover wowlan has to be enabled via a the nl80211_set_wowlan callback.
* (from user space, e.g. iw phy0 wowlan enable)
*/
-static const struct wiphy_wowlan_support wowlan_stub = {
+static __maybe_unused const struct wiphy_wowlan_support wowlan_stub = {
.flags = WIPHY_WOWLAN_ANY,
.n_patterns = 0,
.pattern_max_len = 0,
@@ -391,7 +391,7 @@ void rtw_cfg80211_ibss_indicate_connect(struct adapter *padapter)
}
else
{
- if (scanned == NULL) {
+ if (!scanned) {
rtw_warn_on(1);
return;
}
@@ -432,7 +432,7 @@ void rtw_cfg80211_indicate_connect(struct adapter *padapter)
struct wlan_bssid_ex *pnetwork = &(padapter->mlmeextpriv.mlmext_info.network);
struct wlan_network *scanned = pmlmepriv->cur_network_scanned;
- if (scanned == NULL) {
+ if (!scanned) {
rtw_warn_on(1);
goto check_bss;
}
@@ -551,10 +551,10 @@ static int rtw_cfg80211_ap_set_encryption(struct net_device *dev, struct ieee_pa
goto exit;
}
- if (strcmp(param->u.crypt.alg, "none") == 0 && (psta == NULL))
+ if (strcmp(param->u.crypt.alg, "none") == 0 && !psta)
goto exit;
- if (strcmp(param->u.crypt.alg, "WEP") == 0 && (psta == NULL))
+ if (strcmp(param->u.crypt.alg, "WEP") == 0 && !psta)
{
wep_key_idx = param->u.crypt.idx;
wep_key_len = param->u.crypt.key_len;
@@ -907,7 +907,7 @@ static int rtw_cfg80211_set_encryption(struct net_device *dev, struct ieee_param
}
pbcmc_sta = rtw_get_bcmc_stainfo(padapter);
- if (pbcmc_sta == NULL)
+ if (!pbcmc_sta)
{
/* DEBUG_ERR(("Set OID_802_11_ADD_KEY: bcmc stainfo is null\n")); */
}
@@ -947,7 +947,7 @@ static int cfg80211_rtw_add_key(struct wiphy *wiphy, struct net_device *ndev,
param_len = sizeof(struct ieee_param) + params->key_len;
param = rtw_malloc(param_len);
- if (param == NULL)
+ if (!param)
return -1;
memset(param, 0, param_len);
@@ -1098,7 +1098,7 @@ static int cfg80211_rtw_get_station(struct wiphy *wiphy,
}
psta = rtw_get_stainfo(pstapriv, (u8 *)mac);
- if (psta == NULL) {
+ if (!psta) {
ret = -ENOENT;
goto exit;
}
@@ -1327,7 +1327,7 @@ static int cfg80211_rtw_scan(struct wiphy *wiphy
struct rtw_wdev_priv *pwdev_priv;
struct mlme_priv *pmlmepriv;
- if (ndev == NULL) {
+ if (!ndev) {
ret = -EINVAL;
goto exit;
}
@@ -1571,7 +1571,7 @@ static int rtw_cfg80211_set_wpa_ie(struct adapter *padapter, u8 *pie, size_t iel
u8 *pwpa, *pwpa2;
u8 null_addr[] = {0, 0, 0, 0, 0, 0};
- if (pie == NULL || !ielen) {
+ if (!pie || !ielen) {
/* Treat this as normal case, but need to clear WIFI_UNDER_WPS */
_clr_fwstate_(&padapter->mlmepriv, WIFI_UNDER_WPS);
goto exit;
@@ -1583,7 +1583,7 @@ static int rtw_cfg80211_set_wpa_ie(struct adapter *padapter, u8 *pie, size_t iel
}
buf = rtw_zmalloc(ielen);
- if (buf == NULL) {
+ if (!buf) {
ret = -ENOMEM;
goto exit;
}
@@ -1873,7 +1873,7 @@ static int cfg80211_rtw_connect(struct wiphy *wiphy, struct net_device *ndev,
wep_key_len = wep_key_len <= 5 ? 5 : 13;
wep_total_len = wep_key_len + FIELD_OFFSET(struct ndis_802_11_wep, key_material);
pwep = rtw_malloc(wep_total_len);
- if (pwep == NULL) {
+ if (!pwep) {
ret = -ENOMEM;
goto exit;
}
@@ -2708,7 +2708,7 @@ static int cfg80211_rtw_mgmt_tx(struct wiphy *wiphy,
struct adapter *padapter;
struct rtw_wdev_priv *pwdev_priv;
- if (ndev == NULL) {
+ if (!ndev) {
ret = -EINVAL;
goto exit;
}
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
index 9d4a233a861e..ece97e37ac91 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
@@ -15,20 +15,6 @@
#define RTL_IOCTL_WPA_SUPPLICANT (SIOCIWFIRSTPRIV+30)
-#define SCAN_ITEM_SIZE 768
-#define MAX_CUSTOM_LEN 64
-#define RATE_COUNT 4
-
-/* combo scan */
-#define WEXT_CSCAN_HEADER "CSCAN S\x01\x00\x00S\x00"
-#define WEXT_CSCAN_HEADER_SIZE 12
-#define WEXT_CSCAN_SSID_SECTION 'S'
-#define WEXT_CSCAN_CHANNEL_SECTION 'C'
-#define WEXT_CSCAN_ACTV_DWELL_SECTION 'A'
-#define WEXT_CSCAN_PASV_DWELL_SECTION 'P'
-#define WEXT_CSCAN_HOME_DWELL_SECTION 'H'
-#define WEXT_CSCAN_TYPE_SECTION 'T'
-
static int wpa_set_auth_algs(struct net_device *dev, u32 value)
{
struct adapter *padapter = rtw_netdev_priv(dev);
@@ -153,7 +139,7 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
if (check_fwstate(pmlmepriv, WIFI_STATION_STATE | WIFI_MP_STATE) == true) { /* sta mode */
psta = rtw_get_stainfo(pstapriv, get_bssid(pmlmepriv));
- if (psta == NULL) {
+ if (!psta) {
/* DEBUG_ERR(("Set wpa_set_encryption: Obtain Sta_info fail\n")); */
} else {
/* Jeff: don't disable ieee8021x_blocked while clearing key */
@@ -206,7 +192,7 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
}
pbcmc_sta = rtw_get_bcmc_stainfo(padapter);
- if (pbcmc_sta == NULL) {
+ if (!pbcmc_sta) {
/* DEBUG_ERR(("Set OID_802_11_ADD_KEY: bcmc stainfo is null\n")); */
} else {
/* Jeff: don't disable ieee8021x_blocked while clearing key */
@@ -236,9 +222,9 @@ static int rtw_set_wpa_ie(struct adapter *padapter, char *pie, unsigned short ie
int ret = 0;
u8 null_addr[] = {0, 0, 0, 0, 0, 0};
- if ((ielen > MAX_WPA_IE_LEN) || (pie == NULL)) {
+ if (ielen > MAX_WPA_IE_LEN || !pie) {
_clr_fwstate_(&padapter->mlmepriv, WIFI_UNDER_WPS);
- if (pie == NULL)
+ if (!pie)
return ret;
else
return -EINVAL;
@@ -246,7 +232,7 @@ static int rtw_set_wpa_ie(struct adapter *padapter, char *pie, unsigned short ie
if (ielen) {
buf = rtw_zmalloc(ielen);
- if (buf == NULL) {
+ if (!buf) {
ret = -ENOMEM;
goto exit;
}
@@ -491,7 +477,7 @@ static int wpa_supplicant_ioctl(struct net_device *dev, struct iw_point *p)
return -EINVAL;
param = rtw_malloc(p->length);
- if (param == NULL)
+ if (!param)
return -ENOMEM;
if (copy_from_user(param, p->pointer, p->length)) {
@@ -571,7 +557,7 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
goto exit;
}
- if (strcmp(param->u.crypt.alg, "none") == 0 && (psta == NULL)) {
+ if (strcmp(param->u.crypt.alg, "none") == 0 && !psta) {
/* todo:clear default encryption keys */
psecuritypriv->dot11AuthAlgrthm = dot11AuthAlgrthm_Open;
@@ -583,7 +569,7 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param,
}
- if (strcmp(param->u.crypt.alg, "WEP") == 0 && (psta == NULL)) {
+ if (strcmp(param->u.crypt.alg, "WEP") == 0 && !psta) {
wep_key_idx = param->u.crypt.idx;
wep_key_len = param->u.crypt.key_len;
@@ -835,9 +821,7 @@ static int rtw_add_sta(struct net_device *dev, struct ieee_param *param)
psta = rtw_get_stainfo(pstapriv, param->sta_addr);
if (psta)
{
- spin_lock_bh(&(pstapriv->sta_hash_lock));
rtw_free_stainfo(padapter, psta);
- spin_unlock_bh(&(pstapriv->sta_hash_lock));
psta = NULL;
}
@@ -1229,7 +1213,7 @@ static int rtw_hostapd_ioctl(struct net_device *dev, struct iw_point *p)
return -EINVAL;
param = rtw_malloc(p->length);
- if (param == NULL)
+ if (!param)
return -ENOMEM;
if (copy_from_user(param, p->pointer, p->length)) {
diff --git a/drivers/staging/rtl8723bs/os_dep/os_intfs.c b/drivers/staging/rtl8723bs/os_dep/os_intfs.c
index f78bf174de8e..05482341eefe 100644
--- a/drivers/staging/rtl8723bs/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8723bs/os_dep/os_intfs.c
@@ -283,7 +283,7 @@ static int rtw_net_set_mac_address(struct net_device *pnetdev, void *p)
if (!padapter->bup) {
/* addr->sa_data[4], addr->sa_data[5]); */
memcpy(padapter->eeprompriv.mac_addr, addr->sa_data, ETH_ALEN);
- /* memcpy(pnetdev->dev_addr, addr->sa_data, ETH_ALEN); */
+ /* eth_hw_addr_set(pnetdev, addr->sa_data); */
/* padapter->bset_hwaddr = true; */
}
@@ -488,7 +488,7 @@ void rtw_unregister_netdevs(struct dvobj_priv *dvobj)
padapter = dvobj->padapters;
- if (padapter == NULL)
+ if (!padapter)
return;
pnetdev = padapter->pnetdev;
@@ -594,7 +594,7 @@ struct dvobj_priv *devobj_init(void)
struct dvobj_priv *pdvobj = NULL;
pdvobj = rtw_zmalloc(sizeof(*pdvobj));
- if (pdvobj == NULL)
+ if (!pdvobj)
return NULL;
mutex_init(&pdvobj->hw_init_mutex);
@@ -789,7 +789,7 @@ static int _rtw_drv_register_netdev(struct adapter *padapter, char *name)
if (rtw_init_netdev_name(pnetdev, name))
return _FAIL;
- memcpy(pnetdev->dev_addr, padapter->eeprompriv.mac_addr, ETH_ALEN);
+ eth_hw_addr_set(pnetdev, padapter->eeprompriv.mac_addr);
/* Tell the network stack we exist */
if (register_netdev(pnetdev) != 0) {
diff --git a/drivers/staging/rtl8723bs/os_dep/osdep_service.c b/drivers/staging/rtl8723bs/os_dep/osdep_service.c
index c58555a4012f..4fbfa75c05d7 100644
--- a/drivers/staging/rtl8723bs/os_dep/osdep_service.c
+++ b/drivers/staging/rtl8723bs/os_dep/osdep_service.c
@@ -49,13 +49,6 @@ inline int _rtw_netif_rx(struct net_device *ndev, struct sk_buff *skb)
return netif_rx(skb);
}
-void _rtw_init_queue(struct __queue *pqueue)
-{
- INIT_LIST_HEAD(&(pqueue->queue));
-
- spin_lock_init(&(pqueue->lock));
-}
-
struct net_device *rtw_alloc_etherdev_with_old_priv(int sizeof_priv, void *old_priv)
{
struct net_device *pnetdev;
@@ -149,7 +142,7 @@ int rtw_change_ifname(struct adapter *padapter, const char *ifname)
rtw_init_netdev_name(pnetdev, ifname);
- memcpy(pnetdev->dev_addr, padapter->eeprompriv.mac_addr, ETH_ALEN);
+ eth_hw_addr_set(pnetdev, padapter->eeprompriv.mac_addr);
if (!rtnl_is_locked())
ret = register_netdev(pnetdev);
@@ -281,7 +274,7 @@ struct rtw_cbuf *rtw_cbuf_alloc(u32 size)
{
struct rtw_cbuf *cbuf;
- cbuf = rtw_malloc(sizeof(*cbuf) + sizeof(void *) * size);
+ cbuf = rtw_malloc(struct_size(cbuf, bufs, size));
if (cbuf) {
cbuf->write = cbuf->read = 0;
diff --git a/drivers/staging/rts5208/ms.c b/drivers/staging/rts5208/ms.c
index c6ad34a7fa33..2a6fab5c117a 100644
--- a/drivers/staging/rts5208/ms.c
+++ b/drivers/staging/rts5208/ms.c
@@ -165,7 +165,7 @@ static int ms_write_bytes(struct rtsx_chip *chip,
struct ms_info *ms_card = &chip->ms_card;
int retval, i;
- if (!data || (data_len < cnt))
+ if (!data || data_len < cnt)
return STATUS_ERROR;
rtsx_init_cmd(chip);
@@ -290,7 +290,7 @@ static int ms_read_bytes(struct rtsx_chip *chip,
for (i = 0; i < data_len; i++)
data[i] = ptr[i];
- if ((tpc == PRO_READ_SHORT_DATA) && (data_len == 8)) {
+ if (tpc == PRO_READ_SHORT_DATA && data_len == 8) {
dev_dbg(rtsx_dev(chip), "Read format progress:\n");
print_hex_dump_bytes(KBUILD_MODNAME ": ", DUMP_PREFIX_NONE, ptr,
cnt);
@@ -964,13 +964,13 @@ static int ms_read_attribute_info(struct rtsx_chip *chip)
i++;
} while (i < 1024);
- if ((buf[0] != 0xa5) && (buf[1] != 0xc3)) {
+ if (buf[0] != 0xa5 && buf[1] != 0xc3) {
/* Signature code is wrong */
kfree(buf);
return STATUS_FAIL;
}
- if ((buf[4] < 1) || (buf[4] > 12)) {
+ if (buf[4] < 1 || buf[4] > 12) {
kfree(buf);
return STATUS_FAIL;
}
@@ -979,8 +979,8 @@ static int ms_read_attribute_info(struct rtsx_chip *chip)
int cur_addr_off = 16 + i * 12;
#ifdef SUPPORT_MSXC
- if ((buf[cur_addr_off + 8] == 0x10) ||
- (buf[cur_addr_off + 8] == 0x13)) {
+ if (buf[cur_addr_off + 8] == 0x10 ||
+ buf[cur_addr_off + 8] == 0x13) {
#else
if (buf[cur_addr_off + 8] == 0x10) {
#endif
@@ -1109,8 +1109,8 @@ static int ms_read_attribute_info(struct rtsx_chip *chip)
#endif
if (device_type != 0x00) {
- if ((device_type == 0x01) || (device_type == 0x02) ||
- (device_type == 0x03)) {
+ if (device_type == 0x01 || device_type == 0x02 ||
+ device_type == 0x03) {
chip->card_wp |= MS_CARD;
} else {
return STATUS_FAIL;
@@ -1336,7 +1336,7 @@ static int ms_write_extra_data(struct rtsx_chip *chip, u16 block_addr,
int retval, i;
u8 val, data[16];
- if (!buf || (buf_len < MS_EXTRA_SIZE))
+ if (!buf || buf_len < MS_EXTRA_SIZE)
return STATUS_FAIL;
retval = ms_set_rw_reg_addr(chip, OVERWRITE_FLAG, MS_EXTRA_SIZE,
@@ -1574,7 +1574,7 @@ ERASE_RTY:
static void ms_set_page_status(u16 log_blk, u8 type, u8 *extra, int extra_len)
{
- if (!extra || (extra_len < MS_EXTRA_SIZE))
+ if (!extra || extra_len < MS_EXTRA_SIZE)
return;
memset(extra, 0xFF, MS_EXTRA_SIZE);
@@ -2008,7 +2008,7 @@ RE_SEARCH:
goto RE_SEARCH;
}
- if ((ptr[14] == 1) || (ptr[14] == 3))
+ if (ptr[14] == 1 || ptr[14] == 3)
chip->card_wp |= MS_CARD;
/* BLOCK_SIZE_0, BLOCK_SIZE_1 */
@@ -2378,8 +2378,8 @@ static int ms_build_l2p_tbl(struct rtsx_chip *chip, int seg_no)
continue;
}
- if ((log_blk < ms_start_idx[seg_no]) ||
- (log_blk >= ms_start_idx[seg_no + 1])) {
+ if (log_blk < ms_start_idx[seg_no] ||
+ log_blk >= ms_start_idx[seg_no + 1]) {
if (!(chip->card_wp & MS_CARD)) {
retval = ms_erase_block(chip, phy_blk);
if (retval != STATUS_SUCCESS)
@@ -2662,7 +2662,7 @@ static int mspro_rw_multi_sector(struct scsi_cmnd *srb,
return retval;
if (ms_card->seq_mode) {
- if ((ms_card->pre_dir != srb->sc_data_direction) ||
+ if (ms_card->pre_dir != srb->sc_data_direction ||
((ms_card->pre_sec_addr + ms_card->pre_sec_cnt) !=
start_sector) ||
(mode_2k && (ms_card->seq_mode & MODE_512_SEQ)) ||
@@ -3294,8 +3294,8 @@ static int ms_write_multiple_pages(struct rtsx_chip *chip, u16 old_blk,
return STATUS_FAIL;
}
- if ((page_addr == (end_page - 1)) ||
- (page_addr == ms_card->page_off)) {
+ if (page_addr == (end_page - 1) ||
+ page_addr == ms_card->page_off) {
if (!(val & INT_REG_CED)) {
ms_set_err_code(chip,
MS_FLASH_WRITE_ERROR);
@@ -3434,8 +3434,8 @@ static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip,
if (srb->sc_data_direction == DMA_TO_DEVICE) {
#ifdef MS_DELAY_WRITE
if (delay_write->delay_write_flag &&
- (delay_write->logblock == log_blk) &&
- (start_page > delay_write->pageoff)) {
+ delay_write->logblock == log_blk &&
+ start_page > delay_write->pageoff) {
delay_write->delay_write_flag = 0;
retval = ms_copy_page(chip,
delay_write->old_phyblock,
@@ -3467,7 +3467,7 @@ static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip,
(chip, seg_no,
log_blk - ms_start_idx[seg_no]);
new_blk = ms_get_unused_block(chip, seg_no);
- if ((old_blk == 0xFFFF) || (new_blk == 0xFFFF)) {
+ if (old_blk == 0xFFFF || new_blk == 0xFFFF) {
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_WRITE_ERR);
return STATUS_FAIL;
@@ -4179,7 +4179,7 @@ int mg_set_ICV(struct scsi_cmnd *srb, struct rtsx_chip *chip)
retval = rtsx_transfer_data(chip, MS_CARD, buf + 4 + i * 512,
512, 0, DMA_TO_DEVICE, 3000);
- if ((retval < 0) || check_ms_err(chip)) {
+ if (retval < 0 || check_ms_err(chip)) {
rtsx_clear_ms_error(chip);
if (ms_card->mg_auth == 0) {
if ((buf[5] & 0xC0) != 0)
@@ -4200,7 +4200,7 @@ int mg_set_ICV(struct scsi_cmnd *srb, struct rtsx_chip *chip)
#else
retval = ms_transfer_data(chip, MS_TM_AUTO_WRITE, PRO_WRITE_LONG_DATA,
2, WAIT_INT, 0, 0, buf + 4, 1024);
- if ((retval != STATUS_SUCCESS) || check_ms_err(chip)) {
+ if (retval != STATUS_SUCCESS || check_ms_err(chip)) {
rtsx_clear_ms_error(chip);
if (ms_card->mg_auth == 0) {
if ((buf[5] & 0xC0) != 0)
diff --git a/drivers/staging/rts5208/rtsx.c b/drivers/staging/rts5208/rtsx.c
index 898add4d1fc8..41d13becec5c 100644
--- a/drivers/staging/rts5208/rtsx.c
+++ b/drivers/staging/rts5208/rtsx.c
@@ -558,7 +558,7 @@ static irqreturn_t rtsx_interrupt(int irq, void *dev_id)
complete(dev->done);
} else if (status & DATA_DONE_INT) {
dev->trans_result = TRANS_NOT_READY;
- if (dev->done && (dev->trans_state == STATE_TRANS_SG))
+ if (dev->done && dev->trans_state == STATE_TRANS_SG)
complete(dev->done);
}
}
diff --git a/drivers/staging/rts5208/rtsx_card.c b/drivers/staging/rts5208/rtsx_card.c
index 294f381518fa..326b04756f62 100644
--- a/drivers/staging/rts5208/rtsx_card.c
+++ b/drivers/staging/rts5208/rtsx_card.c
@@ -165,7 +165,7 @@ void do_reset_sd_card(struct rtsx_chip *chip)
chip->card_fail &= ~SD_CARD;
chip->rw_card[chip->card2lun[SD_CARD]] = sd_rw;
} else {
- if (chip->sd_io || (chip->sd_reset_counter >= MAX_RESET_CNT)) {
+ if (chip->sd_io || chip->sd_reset_counter >= MAX_RESET_CNT) {
clear_bit(SD_NR, &chip->need_reset);
chip->sd_reset_counter = 0;
chip->sd_show_cnt = 0;
@@ -636,7 +636,7 @@ int switch_ssc_clock(struct rtsx_chip *chip, int clk)
dev_dbg(rtsx_dev(chip), "Switch SSC clock to %dMHz (cur_clk = %d)\n",
clk, chip->cur_clk);
- if ((clk <= 2) || (n > max_n))
+ if (clk <= 2 || n > max_n)
return STATUS_FAIL;
mcu_cnt = (u8)(125 / clk + 3);
@@ -886,7 +886,7 @@ int card_power_on(struct rtsx_chip *chip, u8 card)
int retval;
u8 mask, val1, val2;
- if (CHECK_LUN_MODE(chip, SD_MS_2LUN) && (card == MS_CARD)) {
+ if (CHECK_LUN_MODE(chip, SD_MS_2LUN) && card == MS_CARD) {
mask = MS_POWER_MASK;
val1 = MS_PARTIAL_POWER_ON;
val2 = MS_POWER_ON;
@@ -920,7 +920,7 @@ int card_power_off(struct rtsx_chip *chip, u8 card)
int retval;
u8 mask, val;
- if (CHECK_LUN_MODE(chip, SD_MS_2LUN) && (card == MS_CARD)) {
+ if (CHECK_LUN_MODE(chip, SD_MS_2LUN) && card == MS_CARD) {
mask = MS_POWER_MASK;
val = MS_POWER_OFF;
} else {
diff --git a/drivers/staging/rts5208/rtsx_chip.c b/drivers/staging/rts5208/rtsx_chip.c
index ee9ddc4eb94d..6375032918d4 100644
--- a/drivers/staging/rts5208/rtsx_chip.c
+++ b/drivers/staging/rts5208/rtsx_chip.c
@@ -325,7 +325,7 @@ static int rtsx_enable_pcie_intr(struct rtsx_chip *chip)
return STATUS_FAIL;
}
- if (chip->driver_first_load && (chip->ic_version < IC_VER_C))
+ if (chip->driver_first_load && chip->ic_version < IC_VER_C)
rtsx_calibration(chip);
return STATUS_SUCCESS;
@@ -496,7 +496,7 @@ int rtsx_reset_chip(struct rtsx_chip *chip)
chip->int_reg);
if (chip->int_reg & SD_EXIST) {
#ifdef HW_AUTO_SWITCH_SD_BUS
- if (CHECK_PID(chip, 0x5208) && (chip->ic_version < IC_VER_C))
+ if (CHECK_PID(chip, 0x5208) && chip->ic_version < IC_VER_C)
retval = rtsx_pre_handle_sdio_old(chip);
else
retval = rtsx_pre_handle_sdio_new(chip);
@@ -563,7 +563,7 @@ nextcard:
return retval;
}
- if (CHECK_PID(chip, 0x5208) && (chip->ic_version >= IC_VER_D)) {
+ if (CHECK_PID(chip, 0x5208) && chip->ic_version >= IC_VER_D) {
retval = rtsx_write_register(chip, PETXCFG, 0x1C, 0x14);
if (retval)
return retval;
@@ -606,7 +606,7 @@ static inline int valid_sd_speed_prior(u32 sd_speed_prior)
for (i = 0; i < 4; i++) {
u8 tmp = (u8)(sd_speed_prior >> (i * 8));
- if ((tmp < 0x01) || (tmp > 0x04)) {
+ if (tmp < 0x01 || tmp > 0x04) {
valid_para = false;
break;
}
@@ -808,10 +808,10 @@ int rtsx_init_chip(struct rtsx_chip *chip)
dev_dbg(rtsx_dev(chip), "sd_current_prior = 0x%08x\n",
chip->sd_current_prior);
- if ((chip->sd_ddr_tx_phase > 31) || (chip->sd_ddr_tx_phase < 0))
+ if (chip->sd_ddr_tx_phase > 31 || chip->sd_ddr_tx_phase < 0)
chip->sd_ddr_tx_phase = 0;
- if ((chip->mmc_ddr_tx_phase > 31) || (chip->mmc_ddr_tx_phase < 0))
+ if (chip->mmc_ddr_tx_phase > 31 || chip->mmc_ddr_tx_phase < 0)
chip->mmc_ddr_tx_phase = 0;
retval = rtsx_write_register(chip, FPDCTL, SSC_POWER_DOWN, 0);
@@ -1840,7 +1840,7 @@ int rtsx_pre_handle_interrupt(struct rtsx_chip *chip)
chip->int_reg = rtsx_readl(chip, RTSX_BIPR);
if (((chip->int_reg & int_enable) == 0) ||
- (chip->int_reg == 0xFFFFFFFF))
+ chip->int_reg == 0xFFFFFFFF)
return STATUS_FAIL;
status = chip->int_reg &= (int_enable | 0x7FFFFF);
@@ -1939,7 +1939,7 @@ void rtsx_do_before_power_down(struct rtsx_chip *chip, int pm_stat)
}
#endif
- if (CHECK_PID(chip, 0x5208) && (chip->ic_version >= IC_VER_D)) {
+ if (CHECK_PID(chip, 0x5208) && chip->ic_version >= IC_VER_D) {
/* u_force_clkreq_0 */
rtsx_write_register(chip, PETXCFG, 0x08, 0x08);
}
diff --git a/drivers/staging/rts5208/rtsx_scsi.c b/drivers/staging/rts5208/rtsx_scsi.c
index 11d9d9155eef..08bd768ad34d 100644
--- a/drivers/staging/rts5208/rtsx_scsi.c
+++ b/drivers/staging/rts5208/rtsx_scsi.c
@@ -500,12 +500,12 @@ static int inquiry(struct scsi_cmnd *srb, struct rtsx_chip *chip)
return TRANSPORT_ERROR;
#ifdef SUPPORT_MAGIC_GATE
- if ((chip->mspro_formatter_enable) &&
+ if (chip->mspro_formatter_enable &&
(chip->lun2card[lun] & MS_CARD))
#else
if (chip->mspro_formatter_enable)
#endif
- if (!card || (card == MS_CARD))
+ if (!card || card == MS_CARD)
pro_formatter_flag = true;
if (pro_formatter_flag) {
@@ -754,7 +754,7 @@ static int mode_sense(struct scsi_cmnd *srb, struct rtsx_chip *chip)
data_size = 8;
#ifdef SUPPORT_MAGIC_GATE
if ((chip->lun2card[lun] & MS_CARD)) {
- if (!card || (card == MS_CARD)) {
+ if (!card || card == MS_CARD) {
data_size = 108;
if (chip->mspro_formatter_enable)
pro_formatter_flag = true;
@@ -775,11 +775,11 @@ static int mode_sense(struct scsi_cmnd *srb, struct rtsx_chip *chip)
page_code = srb->cmnd[2] & 0x3f;
- if ((page_code == 0x3F) || (page_code == 0x1C) ||
- (page_code == 0x00) ||
- (pro_formatter_flag && (page_code == 0x20))) {
+ if (page_code == 0x3F || page_code == 0x1C ||
+ page_code == 0x00 ||
+ (pro_formatter_flag && page_code == 0x20)) {
if (srb->cmnd[0] == MODE_SENSE) {
- if ((page_code == 0x3F) || (page_code == 0x20)) {
+ if (page_code == 0x3F || page_code == 0x20) {
ms_mode_sense(chip, srb->cmnd[0],
lun, buf, data_size);
} else {
@@ -794,7 +794,7 @@ static int mode_sense(struct scsi_cmnd *srb, struct rtsx_chip *chip)
buf[3] = 0x00;
}
} else {
- if ((page_code == 0x3F) || (page_code == 0x20)) {
+ if (page_code == 0x3F || page_code == 0x20) {
ms_mode_sense(chip, srb->cmnd[0],
lun, buf, data_size);
} else {
@@ -879,7 +879,7 @@ static int read_write(struct scsi_cmnd *srb, struct rtsx_chip *chip)
}
#endif
- if ((srb->cmnd[0] == READ_10) || (srb->cmnd[0] == WRITE_10)) {
+ if (srb->cmnd[0] == READ_10 || srb->cmnd[0] == WRITE_10) {
start_sec = ((u32)srb->cmnd[2] << 24) |
((u32)srb->cmnd[3] << 16) |
((u32)srb->cmnd[4] << 8) | ((u32)srb->cmnd[5]);
@@ -906,7 +906,7 @@ static int read_write(struct scsi_cmnd *srb, struct rtsx_chip *chip)
* In this situation, start_sec + sec_cnt will overflow, so we
* need to judge start_sec at first
*/
- if ((start_sec > get_card_size(chip, lun)) ||
+ if (start_sec > get_card_size(chip, lun) ||
((start_sec + sec_cnt) > get_card_size(chip, lun))) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_LBA_OVER_RANGE);
return TRANSPORT_FAILED;
@@ -993,9 +993,9 @@ static int read_format_capacity(struct scsi_cmnd *srb, struct rtsx_chip *chip)
buf[i++] = 0;
/* Capacity List Length */
- if ((buf_len > 12) && chip->mspro_formatter_enable &&
+ if (buf_len > 12 && chip->mspro_formatter_enable &&
(chip->lun2card[lun] & MS_CARD) &&
- (!card || (card == MS_CARD))) {
+ (!card || card == MS_CARD)) {
buf[i++] = 0x10;
desc_cnt = 2;
} else {
@@ -1569,7 +1569,7 @@ static int get_dev_status(struct scsi_cmnd *srb, struct rtsx_chip *chip)
#ifdef SUPPORT_OCP
status[8] = 0;
if (CHECK_LUN_MODE(chip, SD_MS_2LUN) &&
- (chip->lun2card[lun] == MS_CARD)) {
+ chip->lun2card[lun] == MS_CARD) {
oc_now_mask = MS_OC_NOW;
oc_ever_mask = MS_OC_EVER;
} else {
@@ -2544,7 +2544,7 @@ static int get_card_bus_width(struct scsi_cmnd *srb, struct rtsx_chip *chip)
}
card = get_lun_card(chip, lun);
- if ((card == SD_CARD) || (card == MS_CARD)) {
+ if (card == SD_CARD || card == MS_CARD) {
bus_width = chip->card_bus_width[lun];
} else {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR);
@@ -2685,7 +2685,7 @@ void led_shine(struct scsi_cmnd *srb, struct rtsx_chip *chip)
unsigned int lun = SCSI_LUN(srb);
u16 sec_cnt;
- if ((srb->cmnd[0] == READ_10) || (srb->cmnd[0] == WRITE_10)) {
+ if (srb->cmnd[0] == READ_10 || srb->cmnd[0] == WRITE_10) {
sec_cnt = ((u16)(srb->cmnd[7]) << 8) | srb->cmnd[8];
} else if ((srb->cmnd[0] == READ_6) || (srb->cmnd[0] == WRITE_6)) {
sec_cnt = srb->cmnd[4];
@@ -2716,9 +2716,9 @@ static int ms_format_cmnd(struct scsi_cmnd *srb, struct rtsx_chip *chip)
return TRANSPORT_FAILED;
}
- if ((srb->cmnd[3] != 0x4D) || (srb->cmnd[4] != 0x47) ||
- (srb->cmnd[5] != 0x66) || (srb->cmnd[6] != 0x6D) ||
- (srb->cmnd[7] != 0x74)) {
+ if (srb->cmnd[3] != 0x4D || srb->cmnd[4] != 0x47 ||
+ srb->cmnd[5] != 0x66 || srb->cmnd[6] != 0x6D ||
+ srb->cmnd[7] != 0x74) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
return TRANSPORT_FAILED;
}
@@ -2786,16 +2786,16 @@ static int get_ms_information(struct scsi_cmnd *srb, struct rtsx_chip *chip)
return TRANSPORT_FAILED;
}
- if ((srb->cmnd[2] != 0xB0) || (srb->cmnd[4] != 0x4D) ||
- (srb->cmnd[5] != 0x53) || (srb->cmnd[6] != 0x49) ||
- (srb->cmnd[7] != 0x44)) {
+ if (srb->cmnd[2] != 0xB0 || srb->cmnd[4] != 0x4D ||
+ srb->cmnd[5] != 0x53 || srb->cmnd[6] != 0x49 ||
+ srb->cmnd[7] != 0x44) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
return TRANSPORT_FAILED;
}
dev_info_id = srb->cmnd[3];
- if ((CHK_MSXC(ms_card) && (dev_info_id == 0x10)) ||
- (!CHK_MSXC(ms_card) && (dev_info_id == 0x13)) ||
+ if ((CHK_MSXC(ms_card) && dev_info_id == 0x10) ||
+ (!CHK_MSXC(ms_card) && dev_info_id == 0x13) ||
!CHK_MSPRO(ms_card)) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
return TRANSPORT_FAILED;
@@ -2846,7 +2846,7 @@ static int get_ms_information(struct scsi_cmnd *srb, struct rtsx_chip *chip)
buf[i++] = data_len; /* Data length LSB */
/* Valid Bit */
buf[i++] = 0x80;
- if ((dev_info_id == 0x10) || (dev_info_id == 0x13)) {
+ if (dev_info_id == 0x10 || dev_info_id == 0x13) {
/* System Information */
memcpy(buf + i, ms_card->raw_sys_info, 96);
} else {
@@ -2978,8 +2978,8 @@ static int mg_report_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
switch (key_format) {
case KF_GET_LOC_EKB:
if ((scsi_bufflen(srb) == 0x41C) &&
- (srb->cmnd[8] == 0x04) &&
- (srb->cmnd[9] == 0x1C)) {
+ srb->cmnd[8] == 0x04 &&
+ srb->cmnd[9] == 0x1C) {
retval = mg_get_local_EKB(srb, chip);
if (retval != STATUS_SUCCESS)
return TRANSPORT_FAILED;
@@ -2993,8 +2993,8 @@ static int mg_report_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
case KF_RSP_CHG:
if ((scsi_bufflen(srb) == 0x24) &&
- (srb->cmnd[8] == 0x00) &&
- (srb->cmnd[9] == 0x24)) {
+ srb->cmnd[8] == 0x00 &&
+ srb->cmnd[9] == 0x24) {
retval = mg_get_rsp_chg(srb, chip);
if (retval != STATUS_SUCCESS)
return TRANSPORT_FAILED;
@@ -3009,12 +3009,12 @@ static int mg_report_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
case KF_GET_ICV:
ms_card->mg_entry_num = srb->cmnd[5];
if ((scsi_bufflen(srb) == 0x404) &&
- (srb->cmnd[8] == 0x04) &&
- (srb->cmnd[9] == 0x04) &&
- (srb->cmnd[2] == 0x00) &&
- (srb->cmnd[3] == 0x00) &&
- (srb->cmnd[4] == 0x00) &&
- (srb->cmnd[5] < 32)) {
+ srb->cmnd[8] == 0x04 &&
+ srb->cmnd[9] == 0x04 &&
+ srb->cmnd[2] == 0x00 &&
+ srb->cmnd[3] == 0x00 &&
+ srb->cmnd[4] == 0x00 &&
+ srb->cmnd[5] < 32) {
retval = mg_get_ICV(srb, chip);
if (retval != STATUS_SUCCESS)
return TRANSPORT_FAILED;
@@ -3081,8 +3081,8 @@ static int mg_send_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
switch (key_format) {
case KF_SET_LEAF_ID:
if ((scsi_bufflen(srb) == 0x0C) &&
- (srb->cmnd[8] == 0x00) &&
- (srb->cmnd[9] == 0x0C)) {
+ srb->cmnd[8] == 0x00 &&
+ srb->cmnd[9] == 0x0C) {
retval = mg_set_leaf_id(srb, chip);
if (retval != STATUS_SUCCESS)
return TRANSPORT_FAILED;
@@ -3096,8 +3096,8 @@ static int mg_send_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
case KF_CHG_HOST:
if ((scsi_bufflen(srb) == 0x0C) &&
- (srb->cmnd[8] == 0x00) &&
- (srb->cmnd[9] == 0x0C)) {
+ srb->cmnd[8] == 0x00 &&
+ srb->cmnd[9] == 0x0C) {
retval = mg_chg(srb, chip);
if (retval != STATUS_SUCCESS)
return TRANSPORT_FAILED;
@@ -3111,8 +3111,8 @@ static int mg_send_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
case KF_RSP_HOST:
if ((scsi_bufflen(srb) == 0x0C) &&
- (srb->cmnd[8] == 0x00) &&
- (srb->cmnd[9] == 0x0C)) {
+ srb->cmnd[8] == 0x00 &&
+ srb->cmnd[9] == 0x0C) {
retval = mg_rsp(srb, chip);
if (retval != STATUS_SUCCESS)
return TRANSPORT_FAILED;
@@ -3127,12 +3127,12 @@ static int mg_send_key(struct scsi_cmnd *srb, struct rtsx_chip *chip)
case KF_SET_ICV:
ms_card->mg_entry_num = srb->cmnd[5];
if ((scsi_bufflen(srb) == 0x404) &&
- (srb->cmnd[8] == 0x04) &&
- (srb->cmnd[9] == 0x04) &&
- (srb->cmnd[2] == 0x00) &&
- (srb->cmnd[3] == 0x00) &&
- (srb->cmnd[4] == 0x00) &&
- (srb->cmnd[5] < 32)) {
+ srb->cmnd[8] == 0x04 &&
+ srb->cmnd[9] == 0x04 &&
+ srb->cmnd[2] == 0x00 &&
+ srb->cmnd[3] == 0x00 &&
+ srb->cmnd[4] == 0x00 &&
+ srb->cmnd[5] < 32) {
retval = mg_set_ICV(srb, chip);
if (retval != STATUS_SUCCESS)
return TRANSPORT_FAILED;
@@ -3168,10 +3168,10 @@ int rtsx_scsi_handler(struct scsi_cmnd *srb, struct rtsx_chip *chip)
/* Block all SCSI command except for
* REQUEST_SENSE and rs_ppstatus
*/
- if (!((srb->cmnd[0] == VENDOR_CMND) &&
- (srb->cmnd[1] == SCSI_APP_CMD) &&
- (srb->cmnd[2] == GET_DEV_STATUS)) &&
- (srb->cmnd[0] != REQUEST_SENSE)) {
+ if (!(srb->cmnd[0] == VENDOR_CMND &&
+ srb->cmnd[1] == SCSI_APP_CMD &&
+ srb->cmnd[2] == GET_DEV_STATUS) &&
+ srb->cmnd[0] != REQUEST_SENSE) {
/* Logical Unit Not Ready Format in Progress */
set_sense_data(chip, lun, CUR_ERR,
0x02, 0, 0x04, 0x04, 0, 0);
@@ -3181,9 +3181,9 @@ int rtsx_scsi_handler(struct scsi_cmnd *srb, struct rtsx_chip *chip)
#endif
if ((get_lun_card(chip, lun) == MS_CARD) &&
- (ms_card->format_status == FORMAT_IN_PROGRESS)) {
- if ((srb->cmnd[0] != REQUEST_SENSE) &&
- (srb->cmnd[0] != INQUIRY)) {
+ ms_card->format_status == FORMAT_IN_PROGRESS) {
+ if (srb->cmnd[0] != REQUEST_SENSE &&
+ srb->cmnd[0] != INQUIRY) {
/* Logical Unit Not Ready Format in Progress */
set_sense_data(chip, lun, CUR_ERR, 0x02, 0, 0x04, 0x04,
0, (u16)(ms_card->progress));
diff --git a/drivers/staging/rts5208/rtsx_transport.c b/drivers/staging/rts5208/rtsx_transport.c
index 909a3e663ef6..805dc18fac0a 100644
--- a/drivers/staging/rts5208/rtsx_transport.c
+++ b/drivers/staging/rts5208/rtsx_transport.c
@@ -326,7 +326,7 @@ static int rtsx_transfer_sglist_adma_partial(struct rtsx_chip *chip, u8 card,
struct scatterlist *sg_ptr;
u32 val = TRIG_DMA;
- if (!sg || (num_sg <= 0) || !offset || !index)
+ if (!sg || num_sg <= 0 || !offset || !index)
return -EIO;
if (dma_dir == DMA_TO_DEVICE)
@@ -489,7 +489,7 @@ static int rtsx_transfer_sglist_adma(struct rtsx_chip *chip, u8 card,
long timeleft;
struct scatterlist *sg_ptr;
- if (!sg || (num_sg <= 0))
+ if (!sg || num_sg <= 0)
return -EIO;
if (dma_dir == DMA_TO_DEVICE)
@@ -635,7 +635,7 @@ static int rtsx_transfer_buf(struct rtsx_chip *chip, u8 card, void *buf,
u32 val = BIT(31);
long timeleft;
- if (!buf || (len <= 0))
+ if (!buf || len <= 0)
return -EIO;
if (dma_dir == DMA_TO_DEVICE)
diff --git a/drivers/staging/rts5208/sd.c b/drivers/staging/rts5208/sd.c
index 25c31496757e..d1fafd530c80 100644
--- a/drivers/staging/rts5208/sd.c
+++ b/drivers/staging/rts5208/sd.c
@@ -218,9 +218,9 @@ RTY_SEND_CMD:
}
}
- if ((rsp_type == SD_RSP_TYPE_R1) || (rsp_type == SD_RSP_TYPE_R1b)) {
- if ((cmd_idx != SEND_RELATIVE_ADDR) &&
- (cmd_idx != SEND_IF_COND)) {
+ if (rsp_type == SD_RSP_TYPE_R1 || rsp_type == SD_RSP_TYPE_R1b) {
+ if (cmd_idx != SEND_RELATIVE_ADDR &&
+ cmd_idx != SEND_IF_COND) {
if (cmd_idx != STOP_TRANSMISSION) {
if (ptr[1] & 0x80)
return STATUS_FAIL;
@@ -462,7 +462,7 @@ static int sd_check_csd(struct rtsx_chip *chip, char check_wp)
if (CHK_MMC_SECTOR_MODE(sd_card)) {
sd_card->capacity = 0;
} else {
- if ((!CHK_SD_HCXC(sd_card)) || (csd_ver == 0)) {
+ if ((!CHK_SD_HCXC(sd_card)) || csd_ver == 0) {
u8 blk_size, c_size_mult;
u16 c_size;
@@ -1077,7 +1077,7 @@ static int sd_query_switch_result(struct rtsx_chip *chip, u8 func_group,
}
/* Check 'Busy Status' */
- if ((buf[DATA_STRUCTURE_VER_OFFSET] == 0x01) &&
+ if (buf[DATA_STRUCTURE_VER_OFFSET] == 0x01 &&
((buf[check_busy_offset] & switch_busy) == switch_busy)) {
return STATUS_FAIL;
}
@@ -1148,7 +1148,7 @@ static int sd_check_switch_mode(struct rtsx_chip *chip, u8 mode, u8 func_group,
dev_dbg(rtsx_dev(chip), "Maximum current consumption: %dmA\n",
cc);
- if ((cc == 0) || (cc > 800))
+ if (cc == 0 || cc > 800)
return STATUS_FAIL;
retval = sd_query_switch_result(chip, func_group,
@@ -1156,7 +1156,7 @@ static int sd_check_switch_mode(struct rtsx_chip *chip, u8 mode, u8 func_group,
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
- if ((cc > 400) || (func_to_switch > CURRENT_LIMIT_400)) {
+ if (cc > 400 || func_to_switch > CURRENT_LIMIT_400) {
retval = rtsx_write_register(chip, OCPPARA2,
SD_OCP_THD_MASK,
chip->sd_800mA_ocp_thd);
@@ -1292,7 +1292,7 @@ static int sd_switch_function(struct rtsx_chip *chip, u8 bus_width)
#ifdef SUPPORT_SD_LOCK
if ((sd_card->sd_lock_status & SD_SDR_RST) &&
- (func_to_switch == DDR50_SUPPORT) &&
+ func_to_switch == DDR50_SUPPORT &&
(sd_card->func_group1_mask & SDR50_SUPPORT_MASK)) {
func_to_switch = SDR50_SUPPORT;
dev_dbg(rtsx_dev(chip), "Using SDR50 instead of DDR50 for SD Lock\n");
@@ -1335,7 +1335,7 @@ static int sd_switch_function(struct rtsx_chip *chip, u8 bus_width)
return STATUS_FAIL;
}
- if (!func_to_switch || (func_to_switch == HS_SUPPORT)) {
+ if (!func_to_switch || func_to_switch == HS_SUPPORT) {
/* Do not try to switch current limit if the card doesn't
* support UHS mode or we don't want it to support UHS mode
*/
@@ -1664,8 +1664,8 @@ static u8 sd_search_final_phase(struct rtsx_chip *chip, u32 phase_map,
path[idx].mid = path[idx].start + path[idx].len / 2;
}
- if ((path[0].start == 0) &&
- (path[cont_path_cnt - 1].end == MAX_PHASE)) {
+ if (path[0].start == 0 &&
+ path[cont_path_cnt - 1].end == MAX_PHASE) {
path[0].start = path[cont_path_cnt - 1].start - MAX_PHASE - 1;
path[0].len += path[cont_path_cnt - 1].len;
path[0].mid = path[0].start + path[0].len / 2;
@@ -1811,7 +1811,7 @@ static int sd_ddr_pre_tuning_tx(struct rtsx_chip *chip)
retval = sd_send_cmd_get_rsp(chip, SEND_STATUS,
sd_card->sd_addr, SD_RSP_TYPE_R1,
NULL, 0);
- if ((retval == STATUS_SUCCESS) ||
+ if (retval == STATUS_SUCCESS ||
!sd_check_err_code(chip, SD_RSP_TIMEOUT))
phase_map |= 1 << i;
}
@@ -2269,7 +2269,7 @@ static int sd_check_wp_state(struct rtsx_chip *chip)
sd_card_type = ((u16)buf[2] << 8) | buf[3];
dev_dbg(rtsx_dev(chip), "sd_card_type = 0x%04x\n", sd_card_type);
- if ((sd_card_type == 0x0001) || (sd_card_type == 0x0002)) {
+ if (sd_card_type == 0x0001 || sd_card_type == 0x0002) {
/* ROM card or OTP */
chip->card_wp |= SD_CARD;
}
@@ -2361,7 +2361,7 @@ RTY_SD_RST:
retval = sd_send_cmd_get_rsp(chip, SEND_IF_COND, 0x000001AA,
SD_RSP_TYPE_R7, rsp, 5);
if (retval == STATUS_SUCCESS) {
- if ((rsp[4] == 0xAA) && ((rsp[3] & 0x0f) == 0x01)) {
+ if (rsp[4] == 0xAA && ((rsp[3] & 0x0f) == 0x01)) {
hi_cap_flow = true;
voltage = SUPPORT_VOLTAGE | 0x40000000;
}
@@ -2713,7 +2713,7 @@ static int mmc_test_switch_bus(struct rtsx_chip *chip, u8 width)
if (width == MMC_8BIT_BUS) {
dev_dbg(rtsx_dev(chip), "BUSTEST_R [8bits]: 0x%02x 0x%02x\n",
ptr[0], ptr[1]);
- if ((ptr[0] == 0xAA) && (ptr[1] == 0x55)) {
+ if (ptr[0] == 0xAA && ptr[1] == 0x55) {
u8 rsp[5];
u32 arg;
@@ -2724,7 +2724,7 @@ static int mmc_test_switch_bus(struct rtsx_chip *chip, u8 width)
retval = sd_send_cmd_get_rsp(chip, SWITCH, arg,
SD_RSP_TYPE_R1b, rsp, 5);
- if ((retval == STATUS_SUCCESS) &&
+ if (retval == STATUS_SUCCESS &&
!(rsp[4] & MMC_SWITCH_ERR))
return SWITCH_SUCCESS;
}
@@ -2741,7 +2741,7 @@ static int mmc_test_switch_bus(struct rtsx_chip *chip, u8 width)
retval = sd_send_cmd_get_rsp(chip, SWITCH, arg,
SD_RSP_TYPE_R1b, rsp, 5);
- if ((retval == STATUS_SUCCESS) &&
+ if (retval == STATUS_SUCCESS &&
!(rsp[4] & MMC_SWITCH_ERR))
return SWITCH_SUCCESS;
}
@@ -2830,7 +2830,7 @@ static int mmc_switch_timing_bus(struct rtsx_chip *chip, bool switch_ddr)
retval = sd_send_cmd_get_rsp(chip, SWITCH, 0x03B90100,
SD_RSP_TYPE_R1b, rsp, 5);
- if ((retval != STATUS_SUCCESS) || (rsp[4] & MMC_SWITCH_ERR))
+ if (retval != STATUS_SUCCESS || (rsp[4] & MMC_SWITCH_ERR))
CLR_MMC_HS(sd_card);
}
@@ -2989,7 +2989,7 @@ MMC_UNLOCK_ENTRY:
}
}
- if (CHK_MMC_SECTOR_MODE(sd_card) && (sd_card->capacity == 0))
+ if (CHK_MMC_SECTOR_MODE(sd_card) && sd_card->capacity == 0)
return STATUS_FAIL;
if (switch_ddr && CHK_MMC_DDR52(sd_card)) {
@@ -3333,11 +3333,11 @@ int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 start_sector,
}
if (sd_card->seq_mode &&
- ((sd_card->pre_dir != srb->sc_data_direction) ||
+ (sd_card->pre_dir != srb->sc_data_direction ||
((sd_card->pre_sec_addr + sd_card->pre_sec_cnt) !=
start_sector))) {
- if ((sd_card->pre_sec_cnt < 0x80) &&
- (sd_card->pre_dir == DMA_FROM_DEVICE) &&
+ if (sd_card->pre_sec_cnt < 0x80 &&
+ sd_card->pre_dir == DMA_FROM_DEVICE &&
!CHK_SD30_SPEED(sd_card) &&
!CHK_SD_HS(sd_card) &&
!CHK_MMC_HS(sd_card)) {
@@ -3361,7 +3361,7 @@ int sd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 start_sector,
goto RW_FAIL;
}
- if ((sd_card->pre_sec_cnt < 0x80) &&
+ if (sd_card->pre_sec_cnt < 0x80 &&
!CHK_SD30_SPEED(sd_card) &&
!CHK_SD_HS(sd_card) &&
!CHK_MMC_HS(sd_card)) {
@@ -3666,9 +3666,9 @@ RTY_SEND_CMD:
}
}
- if ((cmd_idx == SELECT_CARD) || (cmd_idx == APP_CMD) ||
- (cmd_idx == SEND_STATUS) || (cmd_idx == STOP_TRANSMISSION)) {
- if ((cmd_idx != STOP_TRANSMISSION) && !special_check) {
+ if (cmd_idx == SELECT_CARD || cmd_idx == APP_CMD ||
+ cmd_idx == SEND_STATUS || cmd_idx == STOP_TRANSMISSION) {
+ if (cmd_idx != STOP_TRANSMISSION && !special_check) {
if (ptr[1] & 0x80)
return STATUS_FAIL;
}
@@ -3772,10 +3772,10 @@ int sd_pass_thru_mode(struct scsi_cmnd *srb, struct rtsx_chip *chip)
return TRANSPORT_FAILED;
}
- if ((srb->cmnd[2] != 0x53) || (srb->cmnd[3] != 0x44) ||
- (srb->cmnd[4] != 0x20) || (srb->cmnd[5] != 0x43) ||
- (srb->cmnd[6] != 0x61) || (srb->cmnd[7] != 0x72) ||
- (srb->cmnd[8] != 0x64)) {
+ if (srb->cmnd[2] != 0x53 || srb->cmnd[3] != 0x44 ||
+ srb->cmnd[4] != 0x20 || srb->cmnd[5] != 0x43 ||
+ srb->cmnd[6] != 0x61 || srb->cmnd[7] != 0x72 ||
+ srb->cmnd[8] != 0x64) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
return TRANSPORT_FAILED;
}
@@ -4596,10 +4596,10 @@ int sd_hw_rst(struct scsi_cmnd *srb, struct rtsx_chip *chip)
return TRANSPORT_FAILED;
}
- if ((srb->cmnd[2] != 0x53) || (srb->cmnd[3] != 0x44) ||
- (srb->cmnd[4] != 0x20) || (srb->cmnd[5] != 0x43) ||
- (srb->cmnd[6] != 0x61) || (srb->cmnd[7] != 0x72) ||
- (srb->cmnd[8] != 0x64)) {
+ if (srb->cmnd[2] != 0x53 || srb->cmnd[3] != 0x44 ||
+ srb->cmnd[4] != 0x20 || srb->cmnd[5] != 0x43 ||
+ srb->cmnd[6] != 0x61 || srb->cmnd[7] != 0x72 ||
+ srb->cmnd[8] != 0x64) {
set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD);
return TRANSPORT_FAILED;
}
diff --git a/drivers/staging/rts5208/xd.c b/drivers/staging/rts5208/xd.c
index a305e15dfb9c..42cab93982c0 100644
--- a/drivers/staging/rts5208/xd.c
+++ b/drivers/staging/rts5208/xd.c
@@ -177,7 +177,7 @@ static int xd_read_data_from_ppb(struct rtsx_chip *chip, int offset,
{
int retval, i;
- if (!buf || (buf_len < 0))
+ if (!buf || buf_len < 0)
return STATUS_FAIL;
rtsx_init_cmd(chip);
@@ -203,7 +203,7 @@ static int xd_read_cis(struct rtsx_chip *chip, u32 page_addr, u8 *buf,
int retval;
u8 reg;
- if (!buf || (buf_len < 10))
+ if (!buf || buf_len < 10)
return STATUS_FAIL;
rtsx_init_cmd(chip);
@@ -713,7 +713,7 @@ static int reset_xd(struct rtsx_chip *chip)
}
/* Check CIS data */
- if ((redunt[BLOCK_STATUS] == XD_GBLK) &&
+ if (redunt[BLOCK_STATUS] == XD_GBLK &&
(redunt[PARITY] & XD_BA1_ALL0)) {
u8 buf[10];
@@ -723,12 +723,12 @@ static int reset_xd(struct rtsx_chip *chip)
if (retval != STATUS_SUCCESS)
return STATUS_FAIL;
- if ((buf[0] == 0x01) && (buf[1] == 0x03) &&
- (buf[2] == 0xD9) &&
- (buf[3] == 0x01) && (buf[4] == 0xFF) &&
- (buf[5] == 0x18) && (buf[6] == 0x02) &&
- (buf[7] == 0xDF) && (buf[8] == 0x01) &&
- (buf[9] == 0x20)) {
+ if (buf[0] == 0x01 && buf[1] == 0x03 &&
+ buf[2] == 0xD9 &&
+ buf[3] == 0x01 && buf[4] == 0xFF &&
+ buf[5] == 0x18 && buf[6] == 0x02 &&
+ buf[7] == 0xDF && buf[8] == 0x01 &&
+ buf[9] == 0x20) {
xd_card->cis_block = (u16)i;
}
}
@@ -847,8 +847,8 @@ static void xd_set_unused_block(struct rtsx_chip *chip, u32 phy_blk)
return;
}
- if ((zone->set_index >= XD_FREE_TABLE_CNT) ||
- (zone->set_index < 0)) {
+ if (zone->set_index >= XD_FREE_TABLE_CNT ||
+ zone->set_index < 0) {
free_zone(zone);
dev_dbg(rtsx_dev(chip), "Set unused block fail, invalid set_index\n");
return;
@@ -876,13 +876,13 @@ static u32 xd_get_unused_block(struct rtsx_chip *chip, int zone_no)
}
zone = &xd_card->zone[zone_no];
- if ((zone->unused_blk_cnt == 0) ||
- (zone->set_index == zone->get_index)) {
+ if (zone->unused_blk_cnt == 0 ||
+ zone->set_index == zone->get_index) {
free_zone(zone);
dev_dbg(rtsx_dev(chip), "Get unused block fail, no unused block available\n");
return BLK_NOT_FOUND;
}
- if ((zone->get_index >= XD_FREE_TABLE_CNT) || (zone->get_index < 0)) {
+ if (zone->get_index >= XD_FREE_TABLE_CNT || zone->get_index < 0) {
free_zone(zone);
dev_dbg(rtsx_dev(chip), "Get unused block fail, invalid get_index\n");
return BLK_NOT_FOUND;
@@ -1109,7 +1109,7 @@ static int xd_copy_page(struct rtsx_chip *chip, u32 old_blk, u32 new_blk,
if (start_page > end_page)
return STATUS_FAIL;
- if ((old_blk == BLK_NOT_FOUND) || (new_blk == BLK_NOT_FOUND))
+ if (old_blk == BLK_NOT_FOUND || new_blk == BLK_NOT_FOUND)
return STATUS_FAIL;
old_page = (old_blk << xd_card->block_shift) + start_page;
@@ -1375,16 +1375,16 @@ static int xd_build_l2p_tbl(struct rtsx_chip *chip, int zone_no)
}
cur_fst_page_logoff = xd_load_log_block_addr(redunt);
- if ((cur_fst_page_logoff == 0xFFFF) ||
- (cur_fst_page_logoff > max_logoff)) {
+ if (cur_fst_page_logoff == 0xFFFF ||
+ cur_fst_page_logoff > max_logoff) {
retval = xd_erase_block(chip, i);
if (retval == STATUS_SUCCESS)
xd_set_unused_block(chip, i);
continue;
}
- if ((zone_no == 0) && (cur_fst_page_logoff == 0) &&
- (redunt[PAGE_STATUS] != XD_GPG))
+ if (zone_no == 0 && cur_fst_page_logoff == 0 &&
+ redunt[PAGE_STATUS] != XD_GPG)
XD_SET_MBR_FAIL(xd_card);
if (zone->l2p_table[cur_fst_page_logoff] == 0xFFFF) {
@@ -1874,8 +1874,8 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
if (srb->sc_data_direction == DMA_TO_DEVICE) {
#ifdef XD_DELAY_WRITE
if (delay_write->delay_write_flag &&
- (delay_write->logblock == log_blk) &&
- (start_page > delay_write->pageoff)) {
+ delay_write->logblock == log_blk &&
+ start_page > delay_write->pageoff) {
delay_write->delay_write_flag = 0;
if (delay_write->old_phyblock != BLK_NOT_FOUND) {
retval = xd_copy_page(chip,
@@ -1907,8 +1907,8 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
#endif
old_blk = xd_get_l2p_tbl(chip, zone_no, log_off);
new_blk = xd_get_unused_block(chip, zone_no);
- if ((old_blk == BLK_NOT_FOUND) ||
- (new_blk == BLK_NOT_FOUND)) {
+ if (old_blk == BLK_NOT_FOUND ||
+ new_blk == BLK_NOT_FOUND) {
set_sense_type(chip, lun,
SENSE_TYPE_MEDIA_WRITE_ERR);
return STATUS_FAIL;
@@ -2034,7 +2034,7 @@ int xd_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip,
start_page = 0;
}
- if ((srb->sc_data_direction == DMA_TO_DEVICE) &&
+ if (srb->sc_data_direction == DMA_TO_DEVICE &&
(end_page != (xd_card->page_off + 1))) {
#ifdef XD_DELAY_WRITE
delay_write->delay_write_flag = 1;
diff --git a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c
index a3bc568c660d..62cd9b783732 100644
--- a/drivers/staging/unisys/visornic/visornic_main.c
+++ b/drivers/staging/unisys/visornic/visornic_main.c
@@ -1782,6 +1782,7 @@ static int visornic_probe(struct visor_device *dev)
struct net_device *netdev = NULL;
int err;
int channel_offset = 0;
+ u8 addr[ETH_ALEN];
u64 features;
netdev = alloc_etherdev(sizeof(struct visornic_devdata));
@@ -1798,14 +1799,14 @@ static int visornic_probe(struct visor_device *dev)
/* Get MAC address from channel and read it into the device. */
netdev->addr_len = ETH_ALEN;
channel_offset = offsetof(struct visor_io_channel, vnic.macaddr);
- err = visorbus_read_channel(dev, channel_offset, netdev->dev_addr,
- ETH_ALEN);
+ err = visorbus_read_channel(dev, channel_offset, addr, ETH_ALEN);
if (err < 0) {
dev_err(&dev->device,
"%s failed to get mac addr from chan (%d)\n",
__func__, err);
goto cleanup_netdev;
}
+ eth_hw_addr_set(netdev, addr);
devdata = devdata_initialize(netdev_priv(netdev), dev);
if (!devdata) {
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
index 967f10b9582a..c650a32bcedf 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c
@@ -68,6 +68,11 @@ struct vchiq_state g_state;
static struct platform_device *bcm2835_camera;
static struct platform_device *bcm2835_audio;
+struct vchiq_drvdata {
+ const unsigned int cache_line_size;
+ struct rpi_firmware *fw;
+};
+
static struct vchiq_drvdata bcm2835_drvdata = {
.cache_line_size = 32,
};
@@ -76,6 +81,40 @@ static struct vchiq_drvdata bcm2836_drvdata = {
.cache_line_size = 64,
};
+struct vchiq_arm_state {
+ /* Keepalive-related data */
+ struct task_struct *ka_thread;
+ struct completion ka_evt;
+ atomic_t ka_use_count;
+ atomic_t ka_use_ack_count;
+ atomic_t ka_release_count;
+
+ rwlock_t susp_res_lock;
+
+ struct vchiq_state *state;
+
+ /*
+ * Global use count for videocore.
+ * This is equal to the sum of the use counts for all services. When
+ * this hits zero the videocore suspend procedure will be initiated.
+ */
+ int videocore_use_count;
+
+ /*
+ * Use count to track requests from videocore peer.
+ * This use count is not associated with a service, so needs to be
+ * tracked separately with the state.
+ */
+ int peer_use_count;
+
+ /*
+ * Flag to indicate that the first vchiq connect has made it through.
+ * This means that both sides should be fully ready, and we should
+ * be able to suspend after this point.
+ */
+ int first_connect;
+};
+
struct vchiq_2835_state {
int inited;
struct vchiq_arm_state arm_state;
@@ -115,7 +154,7 @@ static DEFINE_SEMAPHORE(g_free_fragments_mutex);
static enum vchiq_status
vchiq_blocking_bulk_transfer(unsigned int handle, void *data,
- unsigned int size, enum vchiq_bulk_dir dir);
+ unsigned int size, enum vchiq_bulk_dir dir);
static irqreturn_t
vchiq_doorbell_irq(int irq, void *dev_id)
@@ -251,11 +290,8 @@ create_pagelist(char *buf, char __user *ubuf,
}
/* do not try and release vmalloc pages */
} else {
- actual_pages = pin_user_pages_fast(
- (unsigned long)ubuf & PAGE_MASK,
- num_pages,
- type == PAGELIST_READ,
- pages);
+ actual_pages = pin_user_pages_fast((unsigned long)ubuf & PAGE_MASK, num_pages,
+ type == PAGELIST_READ, pages);
if (actual_pages != num_pages) {
vchiq_log_info(vchiq_arm_log_level,
@@ -325,9 +361,9 @@ create_pagelist(char *buf, char __user *ubuf,
/* Partial cache lines (fragments) require special measures */
if ((type == PAGELIST_READ) &&
- ((pagelist->offset & (g_cache_line_size - 1)) ||
- ((pagelist->offset + pagelist->length) &
- (g_cache_line_size - 1)))) {
+ ((pagelist->offset & (g_cache_line_size - 1)) ||
+ ((pagelist->offset + pagelist->length) &
+ (g_cache_line_size - 1)))) {
char *fragments;
if (down_interruptible(&g_free_fragments_sema)) {
@@ -340,7 +376,7 @@ create_pagelist(char *buf, char __user *ubuf,
down(&g_free_fragments_mutex);
fragments = g_free_fragments;
WARN_ON(!fragments);
- g_free_fragments = *(char **) g_free_fragments;
+ g_free_fragments = *(char **)g_free_fragments;
up(&g_free_fragments_mutex);
pagelist->type = PAGELIST_READ_WITH_FRAGMENTS +
(fragments - g_fragments_base) / g_fragments_size;
@@ -391,7 +427,7 @@ free_pagelist(struct vchiq_pagelist_info *pagelistinfo,
kunmap(pages[0]);
}
if ((actual >= 0) && (head_bytes < actual) &&
- (tail_bytes != 0)) {
+ (tail_bytes != 0)) {
memcpy((char *)kmap(pages[num_pages - 1]) +
((pagelist->offset + actual) &
(PAGE_SIZE - 1) & ~(g_cache_line_size - 1)),
@@ -469,8 +505,8 @@ int vchiq_platform_init(struct platform_device *pdev, struct vchiq_state *state)
g_free_fragments = g_fragments_base;
for (i = 0; i < (MAX_FRAGMENTS - 1); i++) {
- *(char **)&g_fragments_base[i*g_fragments_size] =
- &g_fragments_base[(i + 1)*g_fragments_size];
+ *(char **)&g_fragments_base[i * g_fragments_size] =
+ &g_fragments_base[(i + 1) * g_fragments_size];
}
*(char **)&g_fragments_base[i * g_fragments_size] = NULL;
sema_init(&g_free_fragments_sema, MAX_FRAGMENTS);
@@ -504,15 +540,31 @@ int vchiq_platform_init(struct platform_device *pdev, struct vchiq_state *state)
}
g_dev = dev;
- vchiq_log_info(vchiq_arm_log_level,
- "vchiq_init - done (slots %pK, phys %pad)",
- vchiq_slot_zero, &slot_phys);
+ vchiq_log_info(vchiq_arm_log_level, "vchiq_init - done (slots %pK, phys %pad)",
+ vchiq_slot_zero, &slot_phys);
vchiq_call_connected_callbacks();
return 0;
}
+static void
+vchiq_arm_init_state(struct vchiq_state *state,
+ struct vchiq_arm_state *arm_state)
+{
+ if (arm_state) {
+ rwlock_init(&arm_state->susp_res_lock);
+
+ init_completion(&arm_state->ka_evt);
+ atomic_set(&arm_state->ka_use_count, 0);
+ atomic_set(&arm_state->ka_use_ack_count, 0);
+ atomic_set(&arm_state->ka_release_count, 0);
+
+ arm_state->state = state;
+ arm_state->first_connect = 0;
+ }
+}
+
int
vchiq_platform_init_state(struct vchiq_state *state)
{
@@ -593,8 +645,7 @@ int vchiq_dump_platform_state(void *dump_context)
char buf[80];
int len;
- len = snprintf(buf, sizeof(buf),
- " Platform: 2835 (VC master)");
+ len = snprintf(buf, sizeof(buf), " Platform: 2835 (VC master)");
return vchiq_dump(dump_context, buf, len + 1);
}
@@ -617,20 +668,18 @@ int vchiq_initialise(struct vchiq_instance **instance_out)
usleep_range(500, 600);
}
if (i == VCHIQ_INIT_RETRIES) {
- vchiq_log_error(vchiq_core_log_level,
- "%s: videocore not initialized\n", __func__);
+ vchiq_log_error(vchiq_core_log_level, "%s: videocore not initialized\n", __func__);
ret = -ENOTCONN;
goto failed;
} else if (i > 0) {
vchiq_log_warning(vchiq_core_log_level,
- "%s: videocore initialized after %d retries\n",
- __func__, i);
+ "%s: videocore initialized after %d retries\n", __func__, i);
}
instance = kzalloc(sizeof(*instance), GFP_KERNEL);
if (!instance) {
vchiq_log_error(vchiq_core_log_level,
- "%s: error allocating vchiq instance\n", __func__);
+ "%s: error allocating vchiq instance\n", __func__);
ret = -ENOMEM;
goto failed;
}
@@ -645,8 +694,7 @@ int vchiq_initialise(struct vchiq_instance **instance_out)
ret = 0;
failed:
- vchiq_log_trace(vchiq_core_log_level,
- "%s(%p): returning %d", __func__, instance, ret);
+ vchiq_log_trace(vchiq_core_log_level, "%s(%p): returning %d", __func__, instance, ret);
return ret;
}
@@ -659,9 +707,8 @@ void free_bulk_waiter(struct vchiq_instance *instance)
list_for_each_entry_safe(waiter, next,
&instance->bulk_waiter_list, list) {
list_del(&waiter->list);
- vchiq_log_info(vchiq_arm_log_level,
- "bulk_waiter - cleaned up %pK for pid %d",
- waiter, waiter->pid);
+ vchiq_log_info(vchiq_arm_log_level, "bulk_waiter - cleaned up %pK for pid %d",
+ waiter, waiter->pid);
kfree(waiter);
}
}
@@ -679,8 +726,7 @@ enum vchiq_status vchiq_shutdown(struct vchiq_instance *instance)
mutex_unlock(&state->mutex);
- vchiq_log_trace(vchiq_core_log_level,
- "%s(%p): returning %d", __func__, instance, status);
+ vchiq_log_trace(vchiq_core_log_level, "%s(%p): returning %d", __func__, instance, status);
free_bulk_waiter(instance);
kfree(instance);
@@ -700,8 +746,7 @@ enum vchiq_status vchiq_connect(struct vchiq_instance *instance)
struct vchiq_state *state = instance->state;
if (mutex_lock_killable(&state->mutex)) {
- vchiq_log_trace(vchiq_core_log_level,
- "%s: call to mutex_lock failed", __func__);
+ vchiq_log_trace(vchiq_core_log_level, "%s: call to mutex_lock failed", __func__);
status = VCHIQ_RETRY;
goto failed;
}
@@ -713,8 +758,7 @@ enum vchiq_status vchiq_connect(struct vchiq_instance *instance)
mutex_unlock(&state->mutex);
failed:
- vchiq_log_trace(vchiq_core_log_level,
- "%s(%p): returning %d", __func__, instance, status);
+ vchiq_log_trace(vchiq_core_log_level, "%s(%p): returning %d", __func__, instance, status);
return status;
}
@@ -736,12 +780,7 @@ vchiq_add_service(struct vchiq_instance *instance,
? VCHIQ_SRVSTATE_LISTENING
: VCHIQ_SRVSTATE_HIDDEN;
- service = vchiq_add_service_internal(
- state,
- params,
- srvstate,
- instance,
- NULL);
+ service = vchiq_add_service_internal(state, params, srvstate, instance, NULL);
if (service) {
*phandle = service->handle;
@@ -750,8 +789,7 @@ vchiq_add_service(struct vchiq_instance *instance,
status = VCHIQ_ERROR;
}
- vchiq_log_trace(vchiq_core_log_level,
- "%s(%p): returning %d", __func__, instance, status);
+ vchiq_log_trace(vchiq_core_log_level, "%s(%p): returning %d", __func__, instance, status);
return status;
}
@@ -770,11 +808,7 @@ vchiq_open_service(struct vchiq_instance *instance,
if (!vchiq_is_connected(instance))
goto failed;
- service = vchiq_add_service_internal(state,
- params,
- VCHIQ_SRVSTATE_OPENING,
- instance,
- NULL);
+ service = vchiq_add_service_internal(state, params, VCHIQ_SRVSTATE_OPENING, instance, NULL);
if (service) {
*phandle = service->handle;
@@ -786,8 +820,7 @@ vchiq_open_service(struct vchiq_instance *instance,
}
failed:
- vchiq_log_trace(vchiq_core_log_level,
- "%s(%p): returning %d", __func__, instance, status);
+ vchiq_log_trace(vchiq_core_log_level, "%s(%p): returning %d", __func__, instance, status);
return status;
}
@@ -809,8 +842,8 @@ vchiq_bulk_transmit(unsigned int handle, const void *data, unsigned int size,
VCHIQ_BULK_TRANSMIT);
break;
case VCHIQ_BULK_MODE_BLOCKING:
- status = vchiq_blocking_bulk_transfer(handle,
- (void *)data, size, VCHIQ_BULK_TRANSMIT);
+ status = vchiq_blocking_bulk_transfer(handle, (void *)data, size,
+ VCHIQ_BULK_TRANSMIT);
break;
default:
return VCHIQ_ERROR;
@@ -846,8 +879,8 @@ enum vchiq_status vchiq_bulk_receive(unsigned int handle, void *data,
mode, VCHIQ_BULK_RECEIVE);
break;
case VCHIQ_BULK_MODE_BLOCKING:
- status = vchiq_blocking_bulk_transfer(handle,
- (void *)data, size, VCHIQ_BULK_RECEIVE);
+ status = vchiq_blocking_bulk_transfer(handle, (void *)data, size,
+ VCHIQ_BULK_RECEIVE);
break;
default:
return VCHIQ_ERROR;
@@ -902,8 +935,7 @@ vchiq_blocking_bulk_transfer(unsigned int handle, void *data, unsigned int size,
if (bulk) {
/* This thread has an outstanding bulk transfer. */
/* FIXME: why compare a dma address to a pointer? */
- if ((bulk->data != (dma_addr_t)(uintptr_t)data) ||
- (bulk->size != size)) {
+ if ((bulk->data != (dma_addr_t)(uintptr_t)data) || (bulk->size != size)) {
/*
* This is not a retry of the previous one.
* Cancel the signal when the transfer completes.
@@ -916,8 +948,7 @@ vchiq_blocking_bulk_transfer(unsigned int handle, void *data, unsigned int size,
} else {
waiter = kzalloc(sizeof(*waiter), GFP_KERNEL);
if (!waiter) {
- vchiq_log_error(vchiq_core_log_level,
- "%s - out of memory", __func__);
+ vchiq_log_error(vchiq_core_log_level, "%s - out of memory", __func__);
return VCHIQ_ERROR;
}
}
@@ -925,8 +956,7 @@ vchiq_blocking_bulk_transfer(unsigned int handle, void *data, unsigned int size,
status = vchiq_bulk_transfer(handle, data, NULL, size,
&waiter->bulk_waiter,
VCHIQ_BULK_MODE_BLOCKING, dir);
- if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) ||
- !waiter->bulk_waiter.bulk) {
+ if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) || !waiter->bulk_waiter.bulk) {
struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk;
if (bulk) {
@@ -941,9 +971,8 @@ vchiq_blocking_bulk_transfer(unsigned int handle, void *data, unsigned int size,
mutex_lock(&instance->bulk_waiter_list_mutex);
list_add(&waiter->list, &instance->bulk_waiter_list);
mutex_unlock(&instance->bulk_waiter_list_mutex);
- vchiq_log_info(vchiq_arm_log_level,
- "saved bulk_waiter %pK for pid %d",
- waiter, current->pid);
+ vchiq_log_info(vchiq_arm_log_level, "saved bulk_waiter %pK for pid %d", waiter,
+ current->pid);
}
return status;
@@ -957,23 +986,19 @@ add_completion(struct vchiq_instance *instance, enum vchiq_reason reason,
struct vchiq_completion_data_kernel *completion;
int insert;
- DEBUG_INITIALISE(g_state.local)
+ DEBUG_INITIALISE(g_state.local);
insert = instance->completion_insert;
while ((insert - instance->completion_remove) >= MAX_COMPLETIONS) {
/* Out of space - wait for the client */
DEBUG_TRACE(SERVICE_CALLBACK_LINE);
- vchiq_log_trace(vchiq_arm_log_level,
- "%s - completion queue full", __func__);
+ vchiq_log_trace(vchiq_arm_log_level, "%s - completion queue full", __func__);
DEBUG_COUNT(COMPLETION_QUEUE_FULL_COUNT);
- if (wait_for_completion_interruptible(
- &instance->remove_event)) {
- vchiq_log_info(vchiq_arm_log_level,
- "service_callback interrupted");
+ if (wait_for_completion_interruptible(&instance->remove_event)) {
+ vchiq_log_info(vchiq_arm_log_level, "service_callback interrupted");
return VCHIQ_RETRY;
} else if (instance->closing) {
- vchiq_log_info(vchiq_arm_log_level,
- "service_callback closing");
+ vchiq_log_info(vchiq_arm_log_level, "service_callback closing");
return VCHIQ_SUCCESS;
}
DEBUG_TRACE(SERVICE_CALLBACK_LINE);
@@ -1029,7 +1054,7 @@ service_callback(enum vchiq_reason reason, struct vchiq_header *header,
struct vchiq_instance *instance;
bool skip_completion = false;
- DEBUG_INITIALISE(g_state.local)
+ DEBUG_INITIALISE(g_state.local);
DEBUG_TRACE(SERVICE_CALLBACK_LINE);
@@ -1044,11 +1069,10 @@ service_callback(enum vchiq_reason reason, struct vchiq_header *header,
return VCHIQ_SUCCESS;
vchiq_log_trace(vchiq_arm_log_level,
- "%s - service %lx(%d,%p), reason %d, header %lx, instance %lx, bulk_userdata %lx",
- __func__, (unsigned long)user_service,
- service->localport, user_service->userdata,
- reason, (unsigned long)header,
- (unsigned long)instance, (unsigned long)bulk_userdata);
+ "%s - service %lx(%d,%p), reason %d, header %lx, instance %lx, bulk_userdata %lx",
+ __func__, (unsigned long)user_service, service->localport,
+ user_service->userdata, reason, (unsigned long)header,
+ (unsigned long)instance, (unsigned long)bulk_userdata);
if (header && user_service->is_vchi) {
spin_lock(&msg_queue_spinlock);
@@ -1057,8 +1081,7 @@ service_callback(enum vchiq_reason reason, struct vchiq_header *header,
spin_unlock(&msg_queue_spinlock);
DEBUG_TRACE(SERVICE_CALLBACK_LINE);
DEBUG_COUNT(MSG_QUEUE_FULL_COUNT);
- vchiq_log_trace(vchiq_arm_log_level,
- "service_callback - msg queue full");
+ vchiq_log_trace(vchiq_arm_log_level, "%s - msg queue full", __func__);
/*
* If there is no MESSAGE_AVAILABLE in the completion
* queue, add one
@@ -1068,10 +1091,10 @@ service_callback(enum vchiq_reason reason, struct vchiq_header *header,
enum vchiq_status status;
vchiq_log_info(vchiq_arm_log_level,
- "Inserting extra MESSAGE_AVAILABLE");
+ "Inserting extra MESSAGE_AVAILABLE");
DEBUG_TRACE(SERVICE_CALLBACK_LINE);
- status = add_completion(instance, reason,
- NULL, user_service, bulk_userdata);
+ status = add_completion(instance, reason, NULL, user_service,
+ bulk_userdata);
if (status != VCHIQ_SUCCESS) {
DEBUG_TRACE(SERVICE_CALLBACK_LINE);
return status;
@@ -1079,15 +1102,12 @@ service_callback(enum vchiq_reason reason, struct vchiq_header *header,
}
DEBUG_TRACE(SERVICE_CALLBACK_LINE);
- if (wait_for_completion_interruptible(
- &user_service->remove_event)) {
- vchiq_log_info(vchiq_arm_log_level,
- "%s interrupted", __func__);
+ if (wait_for_completion_interruptible(&user_service->remove_event)) {
+ vchiq_log_info(vchiq_arm_log_level, "%s interrupted", __func__);
DEBUG_TRACE(SERVICE_CALLBACK_LINE);
return VCHIQ_RETRY;
} else if (instance->closing) {
- vchiq_log_info(vchiq_arm_log_level,
- "%s closing", __func__);
+ vchiq_log_info(vchiq_arm_log_level, "%s closing", __func__);
DEBUG_TRACE(SERVICE_CALLBACK_LINE);
return VCHIQ_ERROR;
}
@@ -1238,12 +1258,10 @@ int vchiq_dump_platform_service_state(void *dump_context,
len = scnprintf(buf, sizeof(buf), " instance %pK", service->instance);
- if ((service->base.callback == service_callback) &&
- user_service->is_vchi) {
- len += scnprintf(buf + len, sizeof(buf) - len,
- ", %d/%d messages",
- user_service->msg_insert - user_service->msg_remove,
- MSG_QUEUE_SIZE);
+ if ((service->base.callback == service_callback) && user_service->is_vchi) {
+ len += scnprintf(buf + len, sizeof(buf) - len, ", %d/%d messages",
+ user_service->msg_insert - user_service->msg_remove,
+ MSG_QUEUE_SIZE);
if (user_service->dequeue_pending)
len += scnprintf(buf + len, sizeof(buf) - len,
@@ -1256,7 +1274,6 @@ int vchiq_dump_platform_service_state(void *dump_context,
struct vchiq_state *
vchiq_get_state(void)
{
-
if (!g_state.remote)
pr_err("%s: g_state.remote == NULL\n", __func__);
else if (g_state.remote->initialised != 1)
@@ -1276,8 +1293,7 @@ vchiq_keepalive_vchiq_callback(enum vchiq_reason reason,
struct vchiq_header *header,
unsigned int service_user, void *bulk_user)
{
- vchiq_log_error(vchiq_susp_log_level,
- "%s callback reason %d", __func__, reason);
+ vchiq_log_error(vchiq_susp_log_level, "%s callback reason %d", __func__, reason);
return 0;
}
@@ -1301,22 +1317,22 @@ vchiq_keepalive_thread_func(void *v)
ret = vchiq_initialise(&instance);
if (ret) {
- vchiq_log_error(vchiq_susp_log_level,
- "%s vchiq_initialise failed %d", __func__, ret);
+ vchiq_log_error(vchiq_susp_log_level, "%s vchiq_initialise failed %d", __func__,
+ ret);
goto exit;
}
status = vchiq_connect(instance);
if (status != VCHIQ_SUCCESS) {
- vchiq_log_error(vchiq_susp_log_level,
- "%s vchiq_connect failed %d", __func__, status);
+ vchiq_log_error(vchiq_susp_log_level, "%s vchiq_connect failed %d", __func__,
+ status);
goto shutdown;
}
status = vchiq_add_service(instance, &params, &ka_handle);
if (status != VCHIQ_SUCCESS) {
- vchiq_log_error(vchiq_susp_log_level,
- "%s vchiq_open_service failed %d", __func__, status);
+ vchiq_log_error(vchiq_susp_log_level, "%s vchiq_open_service failed %d", __func__,
+ status);
goto shutdown;
}
@@ -1324,8 +1340,7 @@ vchiq_keepalive_thread_func(void *v)
long rc = 0, uc = 0;
if (wait_for_completion_interruptible(&arm_state->ka_evt)) {
- vchiq_log_error(vchiq_susp_log_level,
- "%s interrupted", __func__);
+ vchiq_log_error(vchiq_susp_log_level, "%s interrupted", __func__);
flush_signals(current);
continue;
}
@@ -1346,16 +1361,15 @@ vchiq_keepalive_thread_func(void *v)
status = vchiq_use_service(ka_handle);
if (status != VCHIQ_SUCCESS) {
vchiq_log_error(vchiq_susp_log_level,
- "%s vchiq_use_service error %d",
- __func__, status);
+ "%s vchiq_use_service error %d", __func__, status);
}
}
while (rc--) {
status = vchiq_release_service(ka_handle);
if (status != VCHIQ_SUCCESS) {
vchiq_log_error(vchiq_susp_log_level,
- "%s vchiq_release_service error %d",
- __func__, status);
+ "%s vchiq_release_service error %d", __func__,
+ status);
}
}
}
@@ -1366,24 +1380,6 @@ exit:
return 0;
}
-void
-vchiq_arm_init_state(struct vchiq_state *state,
- struct vchiq_arm_state *arm_state)
-{
- if (arm_state) {
- rwlock_init(&arm_state->susp_res_lock);
-
- init_completion(&arm_state->ka_evt);
- atomic_set(&arm_state->ka_use_count, 0);
- atomic_set(&arm_state->ka_use_ack_count, 0);
- atomic_set(&arm_state->ka_release_count, 0);
-
- arm_state->state = state;
- arm_state->first_connect = 0;
-
- }
-}
-
int
vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
enum USE_TYPE_E use_type)
@@ -1417,9 +1413,8 @@ vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
local_uc = ++arm_state->videocore_use_count;
++(*entity_uc);
- vchiq_log_trace(vchiq_susp_log_level,
- "%s %s count %d, state count %d",
- __func__, entity, *entity_uc, local_uc);
+ vchiq_log_trace(vchiq_susp_log_level, "%s %s count %d, state count %d", __func__, entity,
+ *entity_uc, local_uc);
write_unlock_bh(&arm_state->susp_res_lock);
@@ -1433,8 +1428,7 @@ vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
if (status == VCHIQ_SUCCESS)
ack_cnt--;
else
- atomic_add(ack_cnt,
- &arm_state->ka_use_ack_count);
+ atomic_add(ack_cnt, &arm_state->ka_use_ack_count);
}
}
@@ -1477,10 +1471,8 @@ vchiq_release_internal(struct vchiq_state *state, struct vchiq_service *service)
--arm_state->videocore_use_count;
--(*entity_uc);
- vchiq_log_trace(vchiq_susp_log_level,
- "%s %s count %d, state count %d",
- __func__, entity, *entity_uc,
- arm_state->videocore_use_count);
+ vchiq_log_trace(vchiq_susp_log_level, "%s %s count %d, state count %d", __func__, entity,
+ *entity_uc, arm_state->videocore_use_count);
unlock:
write_unlock_bh(&arm_state->susp_res_lock);
@@ -1575,8 +1567,7 @@ vchiq_use_service(unsigned int handle)
struct vchiq_service *service = find_service_by_handle(handle);
if (service) {
- ret = vchiq_use_internal(service->state, service,
- USE_TYPE_SERVICE);
+ ret = vchiq_use_internal(service->state, service, USE_TYPE_SERVICE);
vchiq_service_put(service);
}
return ret;
@@ -1661,22 +1652,18 @@ vchiq_dump_service_use_state(struct vchiq_state *state)
read_unlock_bh(&arm_state->susp_res_lock);
if (only_nonzero)
- vchiq_log_warning(vchiq_susp_log_level, "Too many active "
- "services (%d). Only dumping up to first %d services "
- "with non-zero use-count", active_services, found);
+ vchiq_log_warning(vchiq_susp_log_level, "Too many active services (%d). Only dumping up to first %d services with non-zero use-count",
+ active_services, found);
for (i = 0; i < found; i++) {
- vchiq_log_warning(vchiq_susp_log_level,
- "----- %c%c%c%c:%d service count %d %s",
- VCHIQ_FOURCC_AS_4CHARS(service_data[i].fourcc),
- service_data[i].clientid,
- service_data[i].use_count,
- service_data[i].use_count ? nz : "");
+ vchiq_log_warning(vchiq_susp_log_level, "----- %c%c%c%c:%d service count %d %s",
+ VCHIQ_FOURCC_AS_4CHARS(service_data[i].fourcc),
+ service_data[i].clientid, service_data[i].use_count,
+ service_data[i].use_count ? nz : "");
}
- vchiq_log_warning(vchiq_susp_log_level,
- "----- VCHIQ use count count %d", peer_count);
- vchiq_log_warning(vchiq_susp_log_level,
- "--- Overall vchiq instance use count %d", vc_use_count);
+ vchiq_log_warning(vchiq_susp_log_level, "----- VCHIQ use count count %d", peer_count);
+ vchiq_log_warning(vchiq_susp_log_level, "--- Overall vchiq instance use count %d",
+ vc_use_count);
kfree(service_data);
}
@@ -1699,10 +1686,9 @@ vchiq_check_service(struct vchiq_service *service)
if (ret == VCHIQ_ERROR) {
vchiq_log_error(vchiq_susp_log_level,
- "%s ERROR - %c%c%c%c:%d service count %d, state count %d", __func__,
- VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
- service->client_id, service->service_use_count,
- arm_state->videocore_use_count);
+ "%s ERROR - %c%c%c%c:%d service count %d, state count %d", __func__,
+ VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc), service->client_id,
+ service->service_use_count, arm_state->videocore_use_count);
vchiq_dump_service_use_state(service->state);
}
out:
@@ -1717,7 +1703,7 @@ void vchiq_platform_conn_state_changed(struct vchiq_state *state,
char threadname[16];
vchiq_log_info(vchiq_susp_log_level, "%d: %s->%s", state->id,
- get_conn_state_name(oldstate), get_conn_state_name(newstate));
+ get_conn_state_name(oldstate), get_conn_state_name(newstate));
if (state->conn_state != VCHIQ_CONNSTATE_CONNECTED)
return;
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h
index e8e39a154c74..2aa46b119a46 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.h
@@ -25,45 +25,6 @@ enum USE_TYPE_E {
USE_TYPE_VCHIQ
};
-struct vchiq_arm_state {
- /* Keepalive-related data */
- struct task_struct *ka_thread;
- struct completion ka_evt;
- atomic_t ka_use_count;
- atomic_t ka_use_ack_count;
- atomic_t ka_release_count;
-
- rwlock_t susp_res_lock;
-
- struct vchiq_state *state;
-
- /*
- * Global use count for videocore.
- * This is equal to the sum of the use counts for all services. When
- * this hits zero the videocore suspend procedure will be initiated.
- */
- int videocore_use_count;
-
- /*
- * Use count to track requests from videocore peer.
- * This use count is not associated with a service, so needs to be
- * tracked separately with the state.
- */
- int peer_use_count;
-
- /*
- * Flag to indicate that the first vchiq connect has made it through.
- * This means that both sides should be fully ready, and we should
- * be able to suspend after this point.
- */
- int first_connect;
-};
-
-struct vchiq_drvdata {
- const unsigned int cache_line_size;
- struct rpi_firmware *fw;
-};
-
struct user_service {
struct vchiq_service *service;
void __user *userdata;
@@ -121,18 +82,9 @@ extern int vchiq_susp_log_level;
extern spinlock_t msg_queue_spinlock;
extern struct vchiq_state g_state;
-int vchiq_platform_init(struct platform_device *pdev,
- struct vchiq_state *state);
-
extern struct vchiq_state *
vchiq_get_state(void);
-extern void
-vchiq_arm_init_state(struct vchiq_state *state,
- struct vchiq_arm_state *arm_state);
-
-extern void
-vchiq_check_suspend(struct vchiq_state *state);
enum vchiq_status
vchiq_use_service(unsigned int handle);
@@ -148,10 +100,6 @@ vchiq_dump_platform_use_state(struct vchiq_state *state);
extern void
vchiq_dump_service_use_state(struct vchiq_state *state);
-extern struct vchiq_arm_state*
-vchiq_platform_get_arm_state(struct vchiq_state *state);
-
-
extern enum vchiq_status
vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
enum USE_TYPE_E use_type);
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c
index 0ee96d1d0481..bdb0ab617d8b 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.c
@@ -10,16 +10,15 @@
static int g_connected;
static int g_num_deferred_callbacks;
-static VCHIQ_CONNECTED_CALLBACK_T g_deferred_callback[MAX_CALLBACKS];
+static void (*g_deferred_callback[MAX_CALLBACKS])(void);
static int g_once_init;
static DEFINE_MUTEX(g_connected_mutex);
/* Function to initialize our lock */
static void connected_init(void)
{
- if (!g_once_init) {
+ if (!g_once_init)
g_once_init = 1;
- }
}
/*
@@ -28,23 +27,22 @@ static void connected_init(void)
* be made immediately, otherwise it will be deferred until
* vchiq_call_connected_callbacks is called.
*/
-void vchiq_add_connected_callback(VCHIQ_CONNECTED_CALLBACK_T callback)
+void vchiq_add_connected_callback(void (*callback)(void))
{
connected_init();
if (mutex_lock_killable(&g_connected_mutex))
return;
- if (g_connected)
+ if (g_connected) {
/* We're already connected. Call the callback immediately. */
-
callback();
- else {
- if (g_num_deferred_callbacks >= MAX_CALLBACKS)
+ } else {
+ if (g_num_deferred_callbacks >= MAX_CALLBACKS) {
vchiq_log_error(vchiq_core_log_level,
- "There already %d callback registered - please increase MAX_CALLBACKS",
- g_num_deferred_callbacks);
- else {
+ "There already %d callback registered - please increase MAX_CALLBACKS",
+ g_num_deferred_callbacks);
+ } else {
g_deferred_callback[g_num_deferred_callbacks] =
callback;
g_num_deferred_callbacks++;
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.h
index 95c18670e839..4caf5e30099d 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_connected.h
@@ -4,9 +4,7 @@
#ifndef VCHIQ_CONNECTED_H
#define VCHIQ_CONNECTED_H
-typedef void (*VCHIQ_CONNECTED_CALLBACK_T)(void);
-
-void vchiq_add_connected_callback(VCHIQ_CONNECTED_CALLBACK_T callback);
+void vchiq_add_connected_callback(void (*callback)(void));
void vchiq_call_connected_callbacks(void);
#endif /* VCHIQ_CONNECTED_H */
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
index 9429b8a642fb..7fe20d4b7ba2 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
@@ -228,9 +228,9 @@ static inline void
vchiq_set_service_state(struct vchiq_service *service, int newstate)
{
vchiq_log_info(vchiq_core_log_level, "%d: srv:%d %s->%s",
- service->state->id, service->localport,
- srvstate_names[service->srvstate],
- srvstate_names[newstate]);
+ service->state->id, service->localport,
+ srvstate_names[service->srvstate],
+ srvstate_names[newstate]);
service->srvstate = newstate;
}
@@ -257,7 +257,6 @@ find_service_by_handle(unsigned int handle)
struct vchiq_service *
find_service_by_port(struct vchiq_state *state, int localport)
{
-
if ((unsigned int)localport <= VCHIQ_PORT_MAX) {
struct vchiq_service *service;
@@ -277,8 +276,7 @@ find_service_by_port(struct vchiq_state *state, int localport)
}
struct vchiq_service *
-find_service_for_instance(struct vchiq_instance *instance,
- unsigned int handle)
+find_service_for_instance(struct vchiq_instance *instance, unsigned int handle)
{
struct vchiq_service *service;
@@ -299,8 +297,7 @@ find_service_for_instance(struct vchiq_instance *instance,
}
struct vchiq_service *
-find_closed_service_for_instance(struct vchiq_instance *instance,
- unsigned int handle)
+find_closed_service_for_instance(struct vchiq_instance *instance, unsigned int handle)
{
struct vchiq_service *service;
@@ -467,14 +464,13 @@ make_service_callback(struct vchiq_service *service, enum vchiq_reason reason,
enum vchiq_status status;
vchiq_log_trace(vchiq_core_log_level, "%d: callback:%d (%s, %pK, %pK)",
- service->state->id, service->localport, reason_names[reason],
- header, bulk_userdata);
- status = service->base.callback(reason, header, service->handle,
- bulk_userdata);
+ service->state->id, service->localport, reason_names[reason],
+ header, bulk_userdata);
+ status = service->base.callback(reason, header, service->handle, bulk_userdata);
if (status == VCHIQ_ERROR) {
vchiq_log_warning(vchiq_core_log_level,
- "%d: ignoring ERROR from callback to service %x",
- service->state->id, service->handle);
+ "%d: ignoring ERROR from callback to service %x",
+ service->state->id, service->handle);
status = VCHIQ_SUCCESS;
}
@@ -489,9 +485,8 @@ vchiq_set_conn_state(struct vchiq_state *state, enum vchiq_connstate newstate)
{
enum vchiq_connstate oldstate = state->conn_state;
- vchiq_log_info(vchiq_core_log_level, "%d: %s->%s", state->id,
- conn_state_names[oldstate],
- conn_state_names[newstate]);
+ vchiq_log_info(vchiq_core_log_level, "%d: %s->%s", state->id, conn_state_names[oldstate],
+ conn_state_names[newstate]);
state->conn_state = newstate;
vchiq_platform_conn_state_changed(state, oldstate, newstate);
}
@@ -694,8 +689,7 @@ reserve_space(struct vchiq_state *state, size_t space, int is_blocking)
remote_event_signal(&state->remote->trigger);
if (!is_blocking ||
- (wait_for_completion_interruptible(
- &state->slot_available_event)))
+ (wait_for_completion_interruptible(&state->slot_available_event)))
return NULL; /* No space available */
}
@@ -705,8 +699,7 @@ reserve_space(struct vchiq_state *state, size_t space, int is_blocking)
return NULL;
}
- slot_index = local->slot_queue[
- SLOT_QUEUE_INDEX_FROM_POS_MASKED(tx_pos)];
+ slot_index = local->slot_queue[SLOT_QUEUE_INDEX_FROM_POS_MASKED(tx_pos)];
state->tx_data =
(char *)SLOT_DATA_FROM_INDEX(state, slot_index);
}
@@ -718,7 +711,7 @@ reserve_space(struct vchiq_state *state, size_t space, int is_blocking)
}
static void
-process_free_data_message(struct vchiq_state *state, BITSET_T *service_found,
+process_free_data_message(struct vchiq_state *state, u32 *service_found,
struct vchiq_header *header)
{
int msgid = header->msgid;
@@ -740,11 +733,9 @@ process_free_data_message(struct vchiq_state *state, BITSET_T *service_found,
complete(&quota->quota_event);
} else if (count == 0) {
vchiq_log_error(vchiq_core_log_level,
- "service %d message_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)",
- port,
- quota->message_use_count,
- header, msgid, header->msgid,
- header->size);
+ "service %d message_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)",
+ port, quota->message_use_count, header, msgid, header->msgid,
+ header->size);
WARN(1, "invalid message use count\n");
}
if (!BITSET_IS_SET(service_found, port)) {
@@ -763,17 +754,12 @@ process_free_data_message(struct vchiq_state *state, BITSET_T *service_found,
* it has dropped below its quota
*/
complete(&quota->quota_event);
- vchiq_log_trace(vchiq_core_log_level,
- "%d: pfq:%d %x@%pK - slot_use->%d",
- state->id, port,
- header->size, header,
- count - 1);
+ vchiq_log_trace(vchiq_core_log_level, "%d: pfq:%d %x@%pK - slot_use->%d",
+ state->id, port, header->size, header, count - 1);
} else {
vchiq_log_error(vchiq_core_log_level,
"service %d slot_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)",
- port, count, header,
- msgid, header->msgid,
- header->size);
+ port, count, header, msgid, header->msgid, header->size);
WARN(1, "bad slot use count\n");
}
}
@@ -781,7 +767,7 @@ process_free_data_message(struct vchiq_state *state, BITSET_T *service_found,
/* Called by the recycle thread. */
static void
-process_free_queue(struct vchiq_state *state, BITSET_T *service_found,
+process_free_queue(struct vchiq_state *state, u32 *service_found,
size_t length)
{
struct vchiq_shared_state *local = state->local;
@@ -815,8 +801,8 @@ process_free_queue(struct vchiq_state *state, BITSET_T *service_found,
rmb();
vchiq_log_trace(vchiq_core_log_level, "%d: pfq %d=%pK %x %x",
- state->id, slot_index, data,
- local->slot_queue_recycle, slot_queue_available);
+ state->id, slot_index, data, local->slot_queue_recycle,
+ slot_queue_available);
/* Initialise the bitmask for services which have used this slot */
memset(service_found, 0, length);
@@ -837,9 +823,8 @@ process_free_queue(struct vchiq_state *state, BITSET_T *service_found,
pos += calc_stride(header->size);
if (pos > VCHIQ_SLOT_SIZE) {
vchiq_log_error(vchiq_core_log_level,
- "pfq - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x",
- pos, header, msgid, header->msgid,
- header->size);
+ "pfq - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x",
+ pos, header, msgid, header->msgid, header->size);
WARN(1, "invalid slot position\n");
}
}
@@ -868,18 +853,15 @@ process_free_queue(struct vchiq_state *state, BITSET_T *service_found,
}
static ssize_t
-memcpy_copy_callback(
- void *context, void *dest,
- size_t offset, size_t maxsize)
+memcpy_copy_callback(void *context, void *dest, size_t offset, size_t maxsize)
{
memcpy(dest + offset, context + offset, maxsize);
return maxsize;
}
static ssize_t
-copy_message_data(
- ssize_t (*copy_callback)(void *context, void *dest,
- size_t offset, size_t maxsize),
+copy_message_data(ssize_t (*copy_callback)(void *context, void *dest, size_t offset,
+ size_t maxsize),
void *context,
void *dest,
size_t size)
@@ -960,29 +942,26 @@ queue_message(struct vchiq_state *state, struct vchiq_service *service,
* Ensure this service doesn't use more than its quota of
* messages or slots
*/
- tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
- state->local_tx_pos + stride - 1);
+ tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos + stride - 1);
/*
* Ensure data messages don't use more than their quota of
* slots
*/
while ((tx_end_index != state->previous_data_index) &&
- (state->data_use_count == state->data_quota)) {
+ (state->data_use_count == state->data_quota)) {
VCHIQ_STATS_INC(state, data_stalls);
spin_unlock(&quota_spinlock);
mutex_unlock(&state->slot_mutex);
- if (wait_for_completion_interruptible(
- &state->data_quota_event))
+ if (wait_for_completion_interruptible(&state->data_quota_event))
return VCHIQ_RETRY;
mutex_lock(&state->slot_mutex);
spin_lock(&quota_spinlock);
- tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
- state->local_tx_pos + stride - 1);
+ tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos + stride - 1);
if ((tx_end_index == state->previous_data_index) ||
- (state->data_use_count < state->data_quota)) {
+ (state->data_use_count < state->data_quota)) {
/* Pass the signal on to other waiters */
complete(&state->data_quota_event);
break;
@@ -990,19 +969,16 @@ queue_message(struct vchiq_state *state, struct vchiq_service *service,
}
while ((quota->message_use_count == quota->message_quota) ||
- ((tx_end_index != quota->previous_tx_index) &&
+ ((tx_end_index != quota->previous_tx_index) &&
(quota->slot_use_count == quota->slot_quota))) {
spin_unlock(&quota_spinlock);
vchiq_log_trace(vchiq_core_log_level,
- "%d: qm:%d %s,%zx - quota stall (msg %d, slot %d)",
- state->id, service->localport,
- msg_type_str(type), size,
- quota->message_use_count,
- quota->slot_use_count);
+ "%d: qm:%d %s,%zx - quota stall (msg %d, slot %d)",
+ state->id, service->localport, msg_type_str(type), size,
+ quota->message_use_count, quota->slot_use_count);
VCHIQ_SERVICE_STATS_INC(service, quota_stalls);
mutex_unlock(&state->slot_mutex);
- if (wait_for_completion_interruptible(
- &quota->quota_event))
+ if (wait_for_completion_interruptible(&quota->quota_event))
return VCHIQ_RETRY;
if (service->closing)
return VCHIQ_ERROR;
@@ -1014,8 +990,7 @@ queue_message(struct vchiq_state *state, struct vchiq_service *service,
return VCHIQ_ERROR;
}
spin_lock(&quota_spinlock);
- tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
- state->local_tx_pos + stride - 1);
+ tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos + stride - 1);
}
spin_unlock(&quota_spinlock);
@@ -1040,11 +1015,9 @@ queue_message(struct vchiq_state *state, struct vchiq_service *service,
int tx_end_index;
int slot_use_count;
- vchiq_log_info(vchiq_core_log_level,
- "%d: qm %s@%pK,%zx (%d->%d)",
- state->id, msg_type_str(VCHIQ_MSG_TYPE(msgid)),
- header, size, VCHIQ_MSG_SRCPORT(msgid),
- VCHIQ_MSG_DSTPORT(msgid));
+ vchiq_log_info(vchiq_core_log_level, "%d: qm %s@%pK,%zx (%d->%d)", state->id,
+ msg_type_str(VCHIQ_MSG_TYPE(msgid)), header, size,
+ VCHIQ_MSG_SRCPORT(msgid), VCHIQ_MSG_DSTPORT(msgid));
WARN_ON(flags & (QMFLAGS_NO_MUTEX_LOCK |
QMFLAGS_NO_MUTEX_UNLOCK));
@@ -1097,19 +1070,16 @@ queue_message(struct vchiq_state *state, struct vchiq_service *service,
if (slot_use_count)
vchiq_log_trace(vchiq_core_log_level,
- "%d: qm:%d %s,%zx - slot_use->%d (hdr %p)",
- state->id, service->localport,
- msg_type_str(VCHIQ_MSG_TYPE(msgid)), size,
- slot_use_count, header);
+ "%d: qm:%d %s,%zx - slot_use->%d (hdr %p)", state->id,
+ service->localport, msg_type_str(VCHIQ_MSG_TYPE(msgid)),
+ size, slot_use_count, header);
VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
} else {
- vchiq_log_info(vchiq_core_log_level,
- "%d: qm %s@%pK,%zx (%d->%d)", state->id,
- msg_type_str(VCHIQ_MSG_TYPE(msgid)),
- header, size, VCHIQ_MSG_SRCPORT(msgid),
- VCHIQ_MSG_DSTPORT(msgid));
+ vchiq_log_info(vchiq_core_log_level, "%d: qm %s@%pK,%zx (%d->%d)", state->id,
+ msg_type_str(VCHIQ_MSG_TYPE(msgid)), header, size,
+ VCHIQ_MSG_SRCPORT(msgid), VCHIQ_MSG_DSTPORT(msgid));
if (size != 0) {
/*
* It is assumed for now that this code path
@@ -1138,13 +1108,10 @@ queue_message(struct vchiq_state *state, struct vchiq_service *service,
: VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
vchiq_log_info(SRVTRACE_LEVEL(service),
- "Sent Msg %s(%u) to %c%c%c%c s:%u d:%d len:%zu",
- msg_type_str(VCHIQ_MSG_TYPE(msgid)),
- VCHIQ_MSG_TYPE(msgid),
- VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
- VCHIQ_MSG_SRCPORT(msgid),
- VCHIQ_MSG_DSTPORT(msgid),
- size);
+ "Sent Msg %s(%u) to %c%c%c%c s:%u d:%d len:%zu",
+ msg_type_str(VCHIQ_MSG_TYPE(msgid)), VCHIQ_MSG_TYPE(msgid),
+ VCHIQ_FOURCC_AS_4CHARS(svc_fourcc), VCHIQ_MSG_SRCPORT(msgid),
+ VCHIQ_MSG_DSTPORT(msgid), size);
}
/* Make sure the new header is visible to the peer. */
@@ -1194,9 +1161,8 @@ queue_message_sync(struct vchiq_state *state, struct vchiq_service *service,
int oldmsgid = header->msgid;
if (oldmsgid != VCHIQ_MSGID_PADDING)
- vchiq_log_error(vchiq_core_log_level,
- "%d: qms - msgid %x, not PADDING",
- state->id, oldmsgid);
+ vchiq_log_error(vchiq_core_log_level, "%d: qms - msgid %x, not PADDING",
+ state->id, oldmsgid);
}
vchiq_log_info(vchiq_sync_log_level,
@@ -1241,13 +1207,10 @@ queue_message_sync(struct vchiq_state *state, struct vchiq_service *service,
: VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
vchiq_log_trace(vchiq_sync_log_level,
- "Sent Sync Msg %s(%u) to %c%c%c%c s:%u d:%d len:%d",
- msg_type_str(VCHIQ_MSG_TYPE(msgid)),
- VCHIQ_MSG_TYPE(msgid),
- VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
- VCHIQ_MSG_SRCPORT(msgid),
- VCHIQ_MSG_DSTPORT(msgid),
- size);
+ "Sent Sync Msg %s(%u) to %c%c%c%c s:%u d:%d len:%d",
+ msg_type_str(VCHIQ_MSG_TYPE(msgid)), VCHIQ_MSG_TYPE(msgid),
+ VCHIQ_FOURCC_AS_4CHARS(svc_fourcc), VCHIQ_MSG_SRCPORT(msgid),
+ VCHIQ_MSG_DSTPORT(msgid), size);
}
remote_event_signal(&state->remote->sync_trigger);
@@ -1273,8 +1236,7 @@ release_slot(struct vchiq_state *state, struct vchiq_slot_info *slot_info,
if (header) {
int msgid = header->msgid;
- if (((msgid & VCHIQ_MSGID_CLAIMED) == 0) ||
- (service && service->closing)) {
+ if (((msgid & VCHIQ_MSGID_CLAIMED) == 0) || (service && service->closing)) {
mutex_unlock(&state->recycle_mutex);
return;
}
@@ -1301,10 +1263,9 @@ release_slot(struct vchiq_state *state, struct vchiq_slot_info *slot_info,
VCHIQ_SLOT_QUEUE_MASK] =
SLOT_INDEX_FROM_INFO(state, slot_info);
state->remote->slot_queue_recycle = slot_queue_recycle + 1;
- vchiq_log_info(vchiq_core_log_level,
- "%d: %s %d - recycle->%x", state->id, __func__,
- SLOT_INDEX_FROM_INFO(state, slot_info),
- state->remote->slot_queue_recycle);
+ vchiq_log_info(vchiq_core_log_level, "%d: %s %d - recycle->%x", state->id, __func__,
+ SLOT_INDEX_FROM_INFO(state, slot_info),
+ state->remote->slot_queue_recycle);
/*
* A write barrier is necessary, but remote_event_signal
@@ -1339,11 +1300,9 @@ notify_bulks(struct vchiq_service *service, struct vchiq_bulk_queue *queue,
{
enum vchiq_status status = VCHIQ_SUCCESS;
- vchiq_log_trace(vchiq_core_log_level,
- "%d: nb:%d %cx - p=%x rn=%x r=%x",
- service->state->id, service->localport,
- (queue == &service->bulk_tx) ? 't' : 'r',
- queue->process, queue->remote_notify, queue->remove);
+ vchiq_log_trace(vchiq_core_log_level, "%d: nb:%d %cx - p=%x rn=%x r=%x", service->state->id,
+ service->localport, (queue == &service->bulk_tx) ? 't' : 'r',
+ queue->process, queue->remote_notify, queue->remove);
queue->remote_notify = queue->process;
@@ -1358,21 +1317,16 @@ notify_bulks(struct vchiq_service *service, struct vchiq_bulk_queue *queue,
if (bulk->data && service->instance) {
if (bulk->actual != VCHIQ_BULK_ACTUAL_ABORTED) {
if (bulk->dir == VCHIQ_BULK_TRANSMIT) {
- VCHIQ_SERVICE_STATS_INC(service,
- bulk_tx_count);
- VCHIQ_SERVICE_STATS_ADD(service,
- bulk_tx_bytes,
- bulk->actual);
+ VCHIQ_SERVICE_STATS_INC(service, bulk_tx_count);
+ VCHIQ_SERVICE_STATS_ADD(service, bulk_tx_bytes,
+ bulk->actual);
} else {
- VCHIQ_SERVICE_STATS_INC(service,
- bulk_rx_count);
- VCHIQ_SERVICE_STATS_ADD(service,
- bulk_rx_bytes,
- bulk->actual);
+ VCHIQ_SERVICE_STATS_INC(service, bulk_rx_count);
+ VCHIQ_SERVICE_STATS_ADD(service, bulk_rx_bytes,
+ bulk->actual);
}
} else {
- VCHIQ_SERVICE_STATS_INC(service,
- bulk_aborted_count);
+ VCHIQ_SERVICE_STATS_INC(service, bulk_aborted_count);
}
if (bulk->mode == VCHIQ_BULK_MODE_BLOCKING) {
struct bulk_waiter *waiter;
@@ -1387,8 +1341,8 @@ notify_bulks(struct vchiq_service *service, struct vchiq_bulk_queue *queue,
} else if (bulk->mode == VCHIQ_BULK_MODE_CALLBACK) {
enum vchiq_reason reason =
get_bulk_reason(bulk);
- status = make_service_callback(service,
- reason, NULL, bulk->userdata);
+ status = make_service_callback(service, reason, NULL,
+ bulk->userdata);
if (status == VCHIQ_RETRY)
break;
}
@@ -1401,9 +1355,8 @@ notify_bulks(struct vchiq_service *service, struct vchiq_bulk_queue *queue,
status = VCHIQ_SUCCESS;
if (status == VCHIQ_RETRY)
- request_poll(service->state, service,
- (queue == &service->bulk_tx) ?
- VCHIQ_POLL_TXNOTIFY : VCHIQ_POLL_RXNOTIFY);
+ request_poll(service->state, service, (queue == &service->bulk_tx) ?
+ VCHIQ_POLL_TXNOTIFY : VCHIQ_POLL_RXNOTIFY);
return status;
}
@@ -1444,15 +1397,10 @@ poll_services_of_group(struct vchiq_state *state, int group)
VCHIQ_SUCCESS)
request_poll(state, service, VCHIQ_POLL_REMOVE);
} else if (service_flags & BIT(VCHIQ_POLL_TERMINATE)) {
- vchiq_log_info(vchiq_core_log_level,
- "%d: ps - terminate %d<->%d",
- state->id, service->localport,
- service->remoteport);
- if (vchiq_close_service_internal(
- service, NO_CLOSE_RECVD) !=
- VCHIQ_SUCCESS)
- request_poll(state, service,
- VCHIQ_POLL_TERMINATE);
+ vchiq_log_info(vchiq_core_log_level, "%d: ps - terminate %d<->%d",
+ state->id, service->localport, service->remoteport);
+ if (vchiq_close_service_internal(service, NO_CLOSE_RECVD) != VCHIQ_SUCCESS)
+ request_poll(state, service, VCHIQ_POLL_TERMINATE);
}
if (service_flags & BIT(VCHIQ_POLL_TXNOTIFY))
notify_bulks(service, &service->bulk_tx, RETRY_POLL);
@@ -1479,18 +1427,16 @@ abort_outstanding_bulks(struct vchiq_service *service,
{
int is_tx = (queue == &service->bulk_tx);
- vchiq_log_trace(vchiq_core_log_level,
- "%d: aob:%d %cx - li=%x ri=%x p=%x",
- service->state->id, service->localport, is_tx ? 't' : 'r',
- queue->local_insert, queue->remote_insert, queue->process);
+ vchiq_log_trace(vchiq_core_log_level, "%d: aob:%d %cx - li=%x ri=%x p=%x",
+ service->state->id, service->localport, is_tx ? 't' : 'r',
+ queue->local_insert, queue->remote_insert, queue->process);
WARN_ON((int)(queue->local_insert - queue->process) < 0);
WARN_ON((int)(queue->remote_insert - queue->process) < 0);
while ((queue->process != queue->local_insert) ||
- (queue->process != queue->remote_insert)) {
- struct vchiq_bulk *bulk =
- &queue->bulks[BULK_INDEX(queue->process)];
+ (queue->process != queue->remote_insert)) {
+ struct vchiq_bulk *bulk = &queue->bulks[BULK_INDEX(queue->process)];
if (queue->process == queue->remote_insert) {
/* fabricate a matching dummy bulk */
@@ -1503,12 +1449,10 @@ abort_outstanding_bulks(struct vchiq_service *service,
vchiq_complete_bulk(bulk);
vchiq_log_info(SRVTRACE_LEVEL(service),
- "%s %c%c%c%c d:%d ABORTED - tx len:%d, rx len:%d",
- is_tx ? "Send Bulk to" : "Recv Bulk from",
- VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
- service->remoteport,
- bulk->size,
- bulk->remote_size);
+ "%s %c%c%c%c d:%d ABORTED - tx len:%d, rx len:%d",
+ is_tx ? "Send Bulk to" : "Recv Bulk from",
+ VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
+ service->remoteport, bulk->size, bulk->remote_size);
} else {
/* fabricate a matching dummy bulk */
bulk->data = 0;
@@ -1541,10 +1485,8 @@ parse_open(struct vchiq_state *state, struct vchiq_header *header)
payload = (struct vchiq_open_payload *)header->data;
fourcc = payload->fourcc;
- vchiq_log_info(vchiq_core_log_level,
- "%d: prs OPEN@%pK (%d->'%c%c%c%c')",
- state->id, header, localport,
- VCHIQ_FOURCC_AS_4CHARS(fourcc));
+ vchiq_log_info(vchiq_core_log_level, "%d: prs OPEN@%pK (%d->'%c%c%c%c')",
+ state->id, header, localport, VCHIQ_FOURCC_AS_4CHARS(fourcc));
service = get_listening_service(state, fourcc);
if (!service)
@@ -1554,17 +1496,12 @@ parse_open(struct vchiq_state *state, struct vchiq_header *header)
version = payload->version;
version_min = payload->version_min;
- if ((service->version < version_min) ||
- (version < service->version_min)) {
+ if ((service->version < version_min) || (version < service->version_min)) {
/* Version mismatch */
vchiq_loud_error_header();
- vchiq_loud_error("%d: service %d (%c%c%c%c) "
- "version mismatch - local (%d, min %d)"
- " vs. remote (%d, min %d)",
- state->id, service->localport,
- VCHIQ_FOURCC_AS_4CHARS(fourcc),
- service->version, service->version_min,
- version, version_min);
+ vchiq_loud_error("%d: service %d (%c%c%c%c) version mismatch - local (%d, min %d) vs. remote (%d, min %d)",
+ state->id, service->localport, VCHIQ_FOURCC_AS_4CHARS(fourcc),
+ service->version, service->version_min, version, version_min);
vchiq_loud_error_footer();
vchiq_service_put(service);
service = NULL;
@@ -1584,25 +1521,18 @@ parse_open(struct vchiq_state *state, struct vchiq_header *header)
/* Acknowledge the OPEN */
if (service->sync) {
- if (queue_message_sync(state, NULL, openack_id,
- memcpy_copy_callback,
- &ack_payload,
- sizeof(ack_payload),
- 0) == VCHIQ_RETRY)
+ if (queue_message_sync(state, NULL, openack_id, memcpy_copy_callback,
+ &ack_payload, sizeof(ack_payload), 0) == VCHIQ_RETRY)
goto bail_not_ready;
} else {
- if (queue_message(state, NULL, openack_id,
- memcpy_copy_callback,
- &ack_payload,
- sizeof(ack_payload),
- 0) == VCHIQ_RETRY)
+ if (queue_message(state, NULL, openack_id, memcpy_copy_callback,
+ &ack_payload, sizeof(ack_payload), 0) == VCHIQ_RETRY)
goto bail_not_ready;
}
/* The service is now open */
- vchiq_set_service_state(service,
- service->sync ? VCHIQ_SRVSTATE_OPENSYNC
- : VCHIQ_SRVSTATE_OPEN);
+ vchiq_set_service_state(service, service->sync ? VCHIQ_SRVSTATE_OPENSYNC
+ : VCHIQ_SRVSTATE_OPEN);
}
/* Success - the message has been dealt with */
@@ -1612,7 +1542,7 @@ parse_open(struct vchiq_state *state, struct vchiq_header *header)
fail_open:
/* No available service, or an invalid request - send a CLOSE */
if (queue_message(state, NULL, MAKE_CLOSE(0, VCHIQ_MSG_SRCPORT(msgid)),
- NULL, NULL, 0, 0) == VCHIQ_RETRY)
+ NULL, NULL, 0, 0) == VCHIQ_RETRY)
goto bail_not_ready;
return 1;
@@ -1642,7 +1572,7 @@ parse_message(struct vchiq_state *state, struct vchiq_header *header)
unsigned int localport, remoteport;
int msgid, size, type, ret = -EINVAL;
- DEBUG_INITIALISE(state->local)
+ DEBUG_INITIALISE(state->local);
DEBUG_VALUE(PARSE_HEADER, (int)(long)header);
msgid = header->msgid;
@@ -1676,22 +1606,19 @@ parse_message(struct vchiq_state *state, struct vchiq_header *header)
*/
if (service)
vchiq_service_put(service);
- service = get_connected_service(state,
- remoteport);
+ service = get_connected_service(state, remoteport);
if (service)
vchiq_log_warning(vchiq_core_log_level,
- "%d: prs %s@%pK (%d->%d) - found connected service %d",
- state->id, msg_type_str(type),
- header, remoteport, localport,
- service->localport);
+ "%d: prs %s@%pK (%d->%d) - found connected service %d",
+ state->id, msg_type_str(type), header,
+ remoteport, localport, service->localport);
}
if (!service) {
vchiq_log_error(vchiq_core_log_level,
- "%d: prs %s@%pK (%d->%d) - invalid/closed service %d",
- state->id, msg_type_str(type),
- header, remoteport, localport,
- localport);
+ "%d: prs %s@%pK (%d->%d) - invalid/closed service %d",
+ state->id, msg_type_str(type), header, remoteport,
+ localport, localport);
goto skip_message;
}
break;
@@ -1706,21 +1633,18 @@ parse_message(struct vchiq_state *state, struct vchiq_header *header)
? service->base.fourcc
: VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
vchiq_log_info(SRVTRACE_LEVEL(service),
- "Rcvd Msg %s(%u) from %c%c%c%c s:%d d:%d len:%d",
- msg_type_str(type), type,
- VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
- remoteport, localport, size);
+ "Rcvd Msg %s(%u) from %c%c%c%c s:%d d:%d len:%d",
+ msg_type_str(type), type, VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
+ remoteport, localport, size);
if (size > 0)
- vchiq_log_dump_mem("Rcvd", 0, header->data,
- min(16, size));
+ vchiq_log_dump_mem("Rcvd", 0, header->data, min(16, size));
}
if (((unsigned long)header & VCHIQ_SLOT_MASK) +
calc_stride(size) > VCHIQ_SLOT_SIZE) {
vchiq_log_error(vchiq_core_log_level,
- "header %pK (msgid %x) - size %x too big for slot",
- header, (unsigned int)msgid,
- (unsigned int)size);
+ "header %pK (msgid %x) - size %x too big for slot",
+ header, (unsigned int)msgid, (unsigned int)size);
WARN(1, "oversized for slot\n");
}
@@ -1737,66 +1661,55 @@ parse_message(struct vchiq_state *state, struct vchiq_header *header)
header->data;
service->peer_version = payload->version;
}
- vchiq_log_info(vchiq_core_log_level,
- "%d: prs OPENACK@%pK,%x (%d->%d) v:%d",
- state->id, header, size, remoteport, localport,
- service->peer_version);
+ vchiq_log_info(vchiq_core_log_level, "%d: prs OPENACK@%pK,%x (%d->%d) v:%d",
+ state->id, header, size, remoteport, localport,
+ service->peer_version);
if (service->srvstate == VCHIQ_SRVSTATE_OPENING) {
service->remoteport = remoteport;
- vchiq_set_service_state(service,
- VCHIQ_SRVSTATE_OPEN);
+ vchiq_set_service_state(service, VCHIQ_SRVSTATE_OPEN);
complete(&service->remove_event);
} else {
- vchiq_log_error(vchiq_core_log_level,
- "OPENACK received in state %s",
- srvstate_names[service->srvstate]);
+ vchiq_log_error(vchiq_core_log_level, "OPENACK received in state %s",
+ srvstate_names[service->srvstate]);
}
break;
case VCHIQ_MSG_CLOSE:
WARN_ON(size); /* There should be no data */
- vchiq_log_info(vchiq_core_log_level,
- "%d: prs CLOSE@%pK (%d->%d)",
- state->id, header, remoteport, localport);
+ vchiq_log_info(vchiq_core_log_level, "%d: prs CLOSE@%pK (%d->%d)",
+ state->id, header, remoteport, localport);
mark_service_closing_internal(service, 1);
- if (vchiq_close_service_internal(service,
- CLOSE_RECVD) == VCHIQ_RETRY)
+ if (vchiq_close_service_internal(service, CLOSE_RECVD) == VCHIQ_RETRY)
goto bail_not_ready;
- vchiq_log_info(vchiq_core_log_level,
- "Close Service %c%c%c%c s:%u d:%d",
- VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
- service->localport,
- service->remoteport);
+ vchiq_log_info(vchiq_core_log_level, "Close Service %c%c%c%c s:%u d:%d",
+ VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
+ service->localport, service->remoteport);
break;
case VCHIQ_MSG_DATA:
- vchiq_log_info(vchiq_core_log_level,
- "%d: prs DATA@%pK,%x (%d->%d)",
- state->id, header, size, remoteport, localport);
+ vchiq_log_info(vchiq_core_log_level, "%d: prs DATA@%pK,%x (%d->%d)",
+ state->id, header, size, remoteport, localport);
if ((service->remoteport == remoteport) &&
(service->srvstate == VCHIQ_SRVSTATE_OPEN)) {
header->msgid = msgid | VCHIQ_MSGID_CLAIMED;
claim_slot(state->rx_info);
DEBUG_TRACE(PARSE_LINE);
- if (make_service_callback(service,
- VCHIQ_MESSAGE_AVAILABLE, header,
- NULL) == VCHIQ_RETRY) {
+ if (make_service_callback(service, VCHIQ_MESSAGE_AVAILABLE, header,
+ NULL) == VCHIQ_RETRY) {
DEBUG_TRACE(PARSE_LINE);
goto bail_not_ready;
}
VCHIQ_SERVICE_STATS_INC(service, ctrl_rx_count);
- VCHIQ_SERVICE_STATS_ADD(service, ctrl_rx_bytes,
- size);
+ VCHIQ_SERVICE_STATS_ADD(service, ctrl_rx_bytes, size);
} else {
VCHIQ_STATS_INC(state, error_count);
}
break;
case VCHIQ_MSG_CONNECT:
- vchiq_log_info(vchiq_core_log_level,
- "%d: prs CONNECT@%pK", state->id, header);
+ vchiq_log_info(vchiq_core_log_level, "%d: prs CONNECT@%pK", state->id, header);
state->version_common = ((struct vchiq_slot_zero *)
state->slot_data)->version;
complete(&state->connect);
@@ -1828,11 +1741,10 @@ parse_message(struct vchiq_state *state, struct vchiq_header *header)
if ((int)(queue->remote_insert -
queue->local_insert) >= 0) {
vchiq_log_error(vchiq_core_log_level,
- "%d: prs %s@%pK (%d->%d) unexpected (ri=%d,li=%d)",
- state->id, msg_type_str(type),
- header, remoteport, localport,
- queue->remote_insert,
- queue->local_insert);
+ "%d: prs %s@%pK (%d->%d) unexpected (ri=%d,li=%d)",
+ state->id, msg_type_str(type), header, remoteport,
+ localport, queue->remote_insert,
+ queue->local_insert);
mutex_unlock(&service->bulk_mutex);
break;
}
@@ -1845,24 +1757,18 @@ parse_message(struct vchiq_state *state, struct vchiq_header *header)
goto bail_not_ready;
}
- bulk = &queue->bulks[
- BULK_INDEX(queue->remote_insert)];
+ bulk = &queue->bulks[BULK_INDEX(queue->remote_insert)];
bulk->actual = *(int *)header->data;
queue->remote_insert++;
- vchiq_log_info(vchiq_core_log_level,
- "%d: prs %s@%pK (%d->%d) %x@%pad",
- state->id, msg_type_str(type),
- header, remoteport, localport,
- bulk->actual, &bulk->data);
+ vchiq_log_info(vchiq_core_log_level, "%d: prs %s@%pK (%d->%d) %x@%pad",
+ state->id, msg_type_str(type), header, remoteport, localport,
+ bulk->actual, &bulk->data);
- vchiq_log_trace(vchiq_core_log_level,
- "%d: prs:%d %cx li=%x ri=%x p=%x",
- state->id, localport,
- (type == VCHIQ_MSG_BULK_RX_DONE) ?
- 'r' : 't',
- queue->local_insert,
- queue->remote_insert, queue->process);
+ vchiq_log_trace(vchiq_core_log_level, "%d: prs:%d %cx li=%x ri=%x p=%x",
+ state->id, localport,
+ (type == VCHIQ_MSG_BULK_RX_DONE) ? 'r' : 't',
+ queue->local_insert, queue->remote_insert, queue->process);
DEBUG_TRACE(PARSE_LINE);
WARN_ON(queue->process == queue->local_insert);
@@ -1875,35 +1781,30 @@ parse_message(struct vchiq_state *state, struct vchiq_header *header)
}
break;
case VCHIQ_MSG_PADDING:
- vchiq_log_trace(vchiq_core_log_level,
- "%d: prs PADDING@%pK,%x",
- state->id, header, size);
+ vchiq_log_trace(vchiq_core_log_level, "%d: prs PADDING@%pK,%x",
+ state->id, header, size);
break;
case VCHIQ_MSG_PAUSE:
/* If initiated, signal the application thread */
- vchiq_log_trace(vchiq_core_log_level,
- "%d: prs PAUSE@%pK,%x",
- state->id, header, size);
+ vchiq_log_trace(vchiq_core_log_level, "%d: prs PAUSE@%pK,%x",
+ state->id, header, size);
if (state->conn_state == VCHIQ_CONNSTATE_PAUSED) {
- vchiq_log_error(vchiq_core_log_level,
- "%d: PAUSE received in state PAUSED",
- state->id);
+ vchiq_log_error(vchiq_core_log_level, "%d: PAUSE received in state PAUSED",
+ state->id);
break;
}
if (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT) {
/* Send a PAUSE in response */
- if (queue_message(state, NULL, MAKE_PAUSE,
- NULL, NULL, 0, QMFLAGS_NO_MUTEX_UNLOCK)
- == VCHIQ_RETRY)
+ if (queue_message(state, NULL, MAKE_PAUSE, NULL, NULL, 0,
+ QMFLAGS_NO_MUTEX_UNLOCK) == VCHIQ_RETRY)
goto bail_not_ready;
}
/* At this point slot_mutex is held */
vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSED);
break;
case VCHIQ_MSG_RESUME:
- vchiq_log_trace(vchiq_core_log_level,
- "%d: prs RESUME@%pK,%x",
- state->id, header, size);
+ vchiq_log_trace(vchiq_core_log_level, "%d: prs RESUME@%pK,%x",
+ state->id, header, size);
/* Release the slot mutex */
mutex_unlock(&state->slot_mutex);
vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
@@ -1919,9 +1820,8 @@ parse_message(struct vchiq_state *state, struct vchiq_header *header)
break;
default:
- vchiq_log_error(vchiq_core_log_level,
- "%d: prs invalid msgid %x@%pK,%x",
- state->id, msgid, header, size);
+ vchiq_log_error(vchiq_core_log_level, "%d: prs invalid msgid %x@%pK,%x",
+ state->id, msgid, header, size);
WARN(1, "invalid message\n");
break;
}
@@ -1943,7 +1843,7 @@ parse_rx_slots(struct vchiq_state *state)
struct vchiq_shared_state *remote = state->remote;
int tx_pos;
- DEBUG_INITIALISE(state->local)
+ DEBUG_INITIALISE(state->local);
tx_pos = remote->tx_pos;
@@ -2031,8 +1931,7 @@ handle_poll(struct vchiq_state *state)
* since the PAUSE should have flushed
* through outstanding messages.
*/
- vchiq_log_error(vchiq_core_log_level,
- "Failed to send RESUME message");
+ vchiq_log_error(vchiq_core_log_level, "Failed to send RESUME message");
}
break;
default:
@@ -2049,7 +1948,7 @@ slot_handler_func(void *v)
struct vchiq_state *state = v;
struct vchiq_shared_state *local = state->local;
- DEBUG_INITIALISE(local)
+ DEBUG_INITIALISE(local);
while (1) {
DEBUG_COUNT(SLOT_HANDLER_COUNT);
@@ -2082,7 +1981,7 @@ recycle_func(void *v)
{
struct vchiq_state *state = v;
struct vchiq_shared_state *local = state->local;
- BITSET_T *found;
+ u32 *found;
size_t length;
length = sizeof(*found) * BITSET_SIZE(VCHIQ_MAX_SERVICES);
@@ -2130,9 +2029,9 @@ sync_func(void *v)
if (!service) {
vchiq_log_error(vchiq_sync_log_level,
- "%d: sf %s@%pK (%d->%d) - invalid/closed service %d",
- state->id, msg_type_str(type),
- header, remoteport, localport, localport);
+ "%d: sf %s@%pK (%d->%d) - invalid/closed service %d",
+ state->id, msg_type_str(type), header,
+ remoteport, localport, localport);
release_message_sync(state, header);
continue;
}
@@ -2144,13 +2043,11 @@ sync_func(void *v)
? service->base.fourcc
: VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
vchiq_log_trace(vchiq_sync_log_level,
- "Rcvd Msg %s from %c%c%c%c s:%d d:%d len:%d",
- msg_type_str(type),
- VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
- remoteport, localport, size);
+ "Rcvd Msg %s from %c%c%c%c s:%d d:%d len:%d",
+ msg_type_str(type), VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
+ remoteport, localport, size);
if (size > 0)
- vchiq_log_dump_mem("Rcvd", 0, header->data,
- min(16, size));
+ vchiq_log_dump_mem("Rcvd", 0, header->data, min(16, size));
}
switch (type) {
@@ -2161,14 +2058,12 @@ sync_func(void *v)
header->data;
service->peer_version = payload->version;
}
- vchiq_log_info(vchiq_sync_log_level,
- "%d: sf OPENACK@%pK,%x (%d->%d) v:%d",
- state->id, header, size, remoteport, localport,
- service->peer_version);
+ vchiq_log_info(vchiq_sync_log_level, "%d: sf OPENACK@%pK,%x (%d->%d) v:%d",
+ state->id, header, size, remoteport, localport,
+ service->peer_version);
if (service->srvstate == VCHIQ_SRVSTATE_OPENING) {
service->remoteport = remoteport;
- vchiq_set_service_state(service,
- VCHIQ_SRVSTATE_OPENSYNC);
+ vchiq_set_service_state(service, VCHIQ_SRVSTATE_OPENSYNC);
service->sync = 1;
complete(&service->remove_event);
}
@@ -2176,25 +2071,22 @@ sync_func(void *v)
break;
case VCHIQ_MSG_DATA:
- vchiq_log_trace(vchiq_sync_log_level,
- "%d: sf DATA@%pK,%x (%d->%d)",
- state->id, header, size, remoteport, localport);
+ vchiq_log_trace(vchiq_sync_log_level, "%d: sf DATA@%pK,%x (%d->%d)",
+ state->id, header, size, remoteport, localport);
if ((service->remoteport == remoteport) &&
(service->srvstate == VCHIQ_SRVSTATE_OPENSYNC)) {
- if (make_service_callback(service,
- VCHIQ_MESSAGE_AVAILABLE, header,
- NULL) == VCHIQ_RETRY)
+ if (make_service_callback(service, VCHIQ_MESSAGE_AVAILABLE, header,
+ NULL) == VCHIQ_RETRY)
vchiq_log_error(vchiq_sync_log_level,
- "synchronous callback to service %d returns VCHIQ_RETRY",
- localport);
+ "synchronous callback to service %d returns VCHIQ_RETRY",
+ localport);
}
break;
default:
- vchiq_log_error(vchiq_sync_log_level,
- "%d: sf unexpected msgid %x@%pK,%x",
- state->id, msgid, header, size);
+ vchiq_log_error(vchiq_sync_log_level, "%d: sf unexpected msgid %x@%pK,%x",
+ state->id, msgid, header, size);
release_message_sync(state, header);
break;
}
@@ -2237,9 +2129,8 @@ vchiq_init_slots(void *mem_base, int mem_size)
num_slots -= first_data_slot;
if (num_slots < 4) {
- vchiq_log_error(vchiq_core_log_level,
- "%s - insufficient memory %x bytes",
- __func__, mem_size);
+ vchiq_log_error(vchiq_core_log_level, "%s - insufficient memory %x bytes",
+ __func__, mem_size);
return NULL;
}
@@ -2330,7 +2221,7 @@ vchiq_init_state(struct vchiq_state *state, struct vchiq_slot_zero *slot_zero)
state->default_slot_quota = state->slot_queue_available / 2;
state->default_message_quota =
min((unsigned short)(state->default_slot_quota * 256),
- (unsigned short)~0);
+ (unsigned short)~0);
state->previous_data_index = -1;
state->data_use_count = 0;
@@ -2359,9 +2250,7 @@ vchiq_init_state(struct vchiq_state *state, struct vchiq_slot_zero *slot_zero)
* bring up slot handler thread
*/
snprintf(threadname, sizeof(threadname), "vchiq-slot/%d", state->id);
- state->slot_handler_thread = kthread_create(&slot_handler_func,
- (void *)state,
- threadname);
+ state->slot_handler_thread = kthread_create(&slot_handler_func, (void *)state, threadname);
if (IS_ERR(state->slot_handler_thread)) {
vchiq_loud_error_header();
@@ -2372,9 +2261,7 @@ vchiq_init_state(struct vchiq_state *state, struct vchiq_slot_zero *slot_zero)
set_user_nice(state->slot_handler_thread, -19);
snprintf(threadname, sizeof(threadname), "vchiq-recy/%d", state->id);
- state->recycle_thread = kthread_create(&recycle_func,
- (void *)state,
- threadname);
+ state->recycle_thread = kthread_create(&recycle_func, (void *)state, threadname);
if (IS_ERR(state->recycle_thread)) {
vchiq_loud_error_header();
vchiq_loud_error("couldn't create thread %s", threadname);
@@ -2385,9 +2272,7 @@ vchiq_init_state(struct vchiq_state *state, struct vchiq_slot_zero *slot_zero)
set_user_nice(state->recycle_thread, -19);
snprintf(threadname, sizeof(threadname), "vchiq-sync/%d", state->id);
- state->sync_thread = kthread_create(&sync_func,
- (void *)state,
- threadname);
+ state->sync_thread = kthread_create(&sync_func, (void *)state, threadname);
if (IS_ERR(state->sync_thread)) {
vchiq_loud_error_header();
vchiq_loud_error("couldn't create thread %s", threadname);
@@ -2474,7 +2359,7 @@ struct vchiq_service *
vchiq_add_service_internal(struct vchiq_state *state,
const struct vchiq_service_params_kernel *params,
int srvstate, struct vchiq_instance *instance,
- vchiq_userdata_term userdata_term)
+ void (*userdata_term)(void *userdata))
{
struct vchiq_service *service;
struct vchiq_service __rcu **pservice = NULL;
@@ -2603,12 +2488,9 @@ vchiq_add_service_internal(struct vchiq_state *state,
/* Bring this service online */
vchiq_set_service_state(service, srvstate);
- vchiq_log_info(vchiq_core_msg_log_level,
- "%s Service %c%c%c%c SrcPort:%d",
- (srvstate == VCHIQ_SRVSTATE_OPENING)
- ? "Open" : "Add",
- VCHIQ_FOURCC_AS_4CHARS(params->fourcc),
- service->localport);
+ vchiq_log_info(vchiq_core_msg_log_level, "%s Service %c%c%c%c SrcPort:%d",
+ (srvstate == VCHIQ_SRVSTATE_OPENING) ? "Open" : "Add",
+ VCHIQ_FOURCC_AS_4CHARS(params->fourcc), service->localport);
/* Don't unlock the service - leave it with a ref_count of 1. */
@@ -2703,19 +2585,15 @@ release_service_messages(struct vchiq_service *service)
int msgid = header->msgid;
int port = VCHIQ_MSG_DSTPORT(msgid);
- if ((port == service->localport) &&
- (msgid & VCHIQ_MSGID_CLAIMED)) {
- vchiq_log_info(vchiq_core_log_level,
- " fsi - hdr %pK", header);
- release_slot(state, slot_info, header,
- NULL);
+ if ((port == service->localport) && (msgid & VCHIQ_MSGID_CLAIMED)) {
+ vchiq_log_info(vchiq_core_log_level, " fsi - hdr %pK", header);
+ release_slot(state, slot_info, header, NULL);
}
pos += calc_stride(header->size);
if (pos > VCHIQ_SLOT_SIZE) {
vchiq_log_error(vchiq_core_log_level,
- "fsi - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x",
- pos, header, msgid,
- header->msgid, header->size);
+ "fsi - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x",
+ pos, header, msgid, header->msgid, header->size);
WARN(1, "invalid slot position\n");
}
}
@@ -2769,15 +2647,13 @@ close_service_complete(struct vchiq_service *service, int failstate)
case VCHIQ_SRVSTATE_LISTENING:
break;
default:
- vchiq_log_error(vchiq_core_log_level,
- "%s(%x) called in state %s", __func__,
- service->handle, srvstate_names[service->srvstate]);
+ vchiq_log_error(vchiq_core_log_level, "%s(%x) called in state %s", __func__,
+ service->handle, srvstate_names[service->srvstate]);
WARN(1, "%s in unexpected state\n", __func__);
return VCHIQ_ERROR;
}
- status = make_service_callback(service,
- VCHIQ_SERVICE_CLOSED, NULL, NULL);
+ status = make_service_callback(service, VCHIQ_SERVICE_CLOSED, NULL, NULL);
if (status != VCHIQ_RETRY) {
int uc = service->service_use_count;
@@ -2818,9 +2694,8 @@ vchiq_close_service_internal(struct vchiq_service *service, int close_recvd)
int close_id = MAKE_CLOSE(service->localport,
VCHIQ_MSG_DSTPORT(service->remoteport));
- vchiq_log_info(vchiq_core_log_level, "%d: csi:%d,%d (%s)",
- service->state->id, service->localport, close_recvd,
- srvstate_names[service->srvstate]);
+ vchiq_log_info(vchiq_core_log_level, "%d: csi:%d,%d (%s)", service->state->id,
+ service->localport, close_recvd, srvstate_names[service->srvstate]);
switch (service->srvstate) {
case VCHIQ_SRVSTATE_CLOSED:
@@ -2828,9 +2703,8 @@ vchiq_close_service_internal(struct vchiq_service *service, int close_recvd)
case VCHIQ_SRVSTATE_LISTENING:
case VCHIQ_SRVSTATE_CLOSEWAIT:
if (close_recvd) {
- vchiq_log_error(vchiq_core_log_level,
- "%s(1) called in state %s",
- __func__, srvstate_names[service->srvstate]);
+ vchiq_log_error(vchiq_core_log_level, "%s(1) called in state %s",
+ __func__, srvstate_names[service->srvstate]);
} else if (is_server) {
if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
status = VCHIQ_ERROR;
@@ -2839,8 +2713,7 @@ vchiq_close_service_internal(struct vchiq_service *service, int close_recvd)
service->remoteport = VCHIQ_PORT_FREE;
if (service->srvstate ==
VCHIQ_SRVSTATE_CLOSEWAIT)
- vchiq_set_service_state(service,
- VCHIQ_SRVSTATE_LISTENING);
+ vchiq_set_service_state(service, VCHIQ_SRVSTATE_LISTENING);
}
complete(&service->remove_event);
} else {
@@ -2850,13 +2723,11 @@ vchiq_close_service_internal(struct vchiq_service *service, int close_recvd)
case VCHIQ_SRVSTATE_OPENING:
if (close_recvd) {
/* The open was rejected - tell the user */
- vchiq_set_service_state(service,
- VCHIQ_SRVSTATE_CLOSEWAIT);
+ vchiq_set_service_state(service, VCHIQ_SRVSTATE_CLOSEWAIT);
complete(&service->remove_event);
} else {
/* Shutdown mid-open - let the other side know */
- status = queue_message(state, service, close_id,
- NULL, NULL, 0, 0);
+ status = queue_message(state, service, close_id, NULL, NULL, 0, 0);
}
break;
@@ -2872,8 +2743,8 @@ vchiq_close_service_internal(struct vchiq_service *service, int close_recvd)
release_service_messages(service);
if (status == VCHIQ_SUCCESS)
- status = queue_message(state, service, close_id,
- NULL, NULL, 0, QMFLAGS_NO_MUTEX_UNLOCK);
+ status = queue_message(state, service, close_id, NULL,
+ NULL, 0, QMFLAGS_NO_MUTEX_UNLOCK);
if (status != VCHIQ_SUCCESS) {
if (service->srvstate == VCHIQ_SRVSTATE_OPENSYNC)
@@ -2897,8 +2768,7 @@ vchiq_close_service_internal(struct vchiq_service *service, int close_recvd)
if (service->sync)
mutex_unlock(&state->sync_mutex);
- status = close_service_complete(service,
- VCHIQ_SRVSTATE_CLOSERECVD);
+ status = close_service_complete(service, VCHIQ_SRVSTATE_CLOSERECVD);
break;
case VCHIQ_SRVSTATE_CLOSESENT:
@@ -2912,23 +2782,19 @@ vchiq_close_service_internal(struct vchiq_service *service, int close_recvd)
}
if (status == VCHIQ_SUCCESS)
- status = close_service_complete(service,
- VCHIQ_SRVSTATE_CLOSERECVD);
+ status = close_service_complete(service, VCHIQ_SRVSTATE_CLOSERECVD);
break;
case VCHIQ_SRVSTATE_CLOSERECVD:
if (!close_recvd && is_server)
/* Force into LISTENING mode */
- vchiq_set_service_state(service,
- VCHIQ_SRVSTATE_LISTENING);
- status = close_service_complete(service,
- VCHIQ_SRVSTATE_CLOSERECVD);
+ vchiq_set_service_state(service, VCHIQ_SRVSTATE_LISTENING);
+ status = close_service_complete(service, VCHIQ_SRVSTATE_CLOSERECVD);
break;
default:
- vchiq_log_error(vchiq_core_log_level,
- "%s(%d) called in state %s", __func__,
- close_recvd, srvstate_names[service->srvstate]);
+ vchiq_log_error(vchiq_core_log_level, "%s(%d) called in state %s", __func__,
+ close_recvd, srvstate_names[service->srvstate]);
break;
}
@@ -2941,8 +2807,8 @@ vchiq_terminate_service_internal(struct vchiq_service *service)
{
struct vchiq_state *state = service->state;
- vchiq_log_info(vchiq_core_log_level, "%d: tsi - (%d<->%d)",
- state->id, service->localport, service->remoteport);
+ vchiq_log_info(vchiq_core_log_level, "%d: tsi - (%d<->%d)", state->id,
+ service->localport, service->remoteport);
mark_service_closing(service);
@@ -2956,8 +2822,7 @@ vchiq_free_service_internal(struct vchiq_service *service)
{
struct vchiq_state *state = service->state;
- vchiq_log_info(vchiq_core_log_level, "%d: fsi - (%d)",
- state->id, service->localport);
+ vchiq_log_info(vchiq_core_log_level, "%d: fsi - (%d)", state->id, service->localport);
switch (service->srvstate) {
case VCHIQ_SRVSTATE_OPENING:
@@ -2967,10 +2832,8 @@ vchiq_free_service_internal(struct vchiq_service *service)
case VCHIQ_SRVSTATE_CLOSEWAIT:
break;
default:
- vchiq_log_error(vchiq_core_log_level,
- "%d: fsi - (%d) in state %s",
- state->id, service->localport,
- srvstate_names[service->srvstate]);
+ vchiq_log_error(vchiq_core_log_level, "%d: fsi - (%d) in state %s", state->id,
+ service->localport, srvstate_names[service->srvstate]);
return;
}
@@ -2990,17 +2853,15 @@ vchiq_connect_internal(struct vchiq_state *state, struct vchiq_instance *instanc
/* Find all services registered to this client and enable them. */
i = 0;
- while ((service = next_service_by_instance(state, instance,
- &i)) != NULL) {
+ while ((service = next_service_by_instance(state, instance, &i)) != NULL) {
if (service->srvstate == VCHIQ_SRVSTATE_HIDDEN)
- vchiq_set_service_state(service,
- VCHIQ_SRVSTATE_LISTENING);
+ vchiq_set_service_state(service, VCHIQ_SRVSTATE_LISTENING);
vchiq_service_put(service);
}
if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED) {
- if (queue_message(state, NULL, MAKE_CONNECT, NULL, NULL,
- 0, QMFLAGS_IS_BLOCKING) == VCHIQ_RETRY)
+ if (queue_message(state, NULL, MAKE_CONNECT, NULL, NULL, 0,
+ QMFLAGS_IS_BLOCKING) == VCHIQ_RETRY)
return VCHIQ_RETRY;
vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTING);
@@ -3025,8 +2886,7 @@ vchiq_shutdown_internal(struct vchiq_state *state, struct vchiq_instance *instan
/* Find all services registered to this client and remove them. */
i = 0;
- while ((service = next_service_by_instance(state, instance,
- &i)) != NULL) {
+ while ((service = next_service_by_instance(state, instance, &i)) != NULL) {
(void)vchiq_remove_service(service->handle);
vchiq_service_put(service);
}
@@ -3042,9 +2902,8 @@ vchiq_close_service(unsigned int handle)
if (!service)
return VCHIQ_ERROR;
- vchiq_log_info(vchiq_core_log_level,
- "%d: close_service:%d",
- service->state->id, service->localport);
+ vchiq_log_info(vchiq_core_log_level, "%d: close_service:%d",
+ service->state->id, service->localport);
if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
(service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
@@ -3075,9 +2934,9 @@ vchiq_close_service(unsigned int handle)
break;
vchiq_log_warning(vchiq_core_log_level,
- "%d: close_service:%d - waiting in state %s",
- service->state->id, service->localport,
- srvstate_names[service->srvstate]);
+ "%d: close_service:%d - waiting in state %s",
+ service->state->id, service->localport,
+ srvstate_names[service->srvstate]);
}
if ((status == VCHIQ_SUCCESS) &&
@@ -3101,9 +2960,8 @@ vchiq_remove_service(unsigned int handle)
if (!service)
return VCHIQ_ERROR;
- vchiq_log_info(vchiq_core_log_level,
- "%d: remove_service:%d",
- service->state->id, service->localport);
+ vchiq_log_info(vchiq_core_log_level, "%d: remove_service:%d",
+ service->state->id, service->localport);
if (service->srvstate == VCHIQ_SRVSTATE_FREE) {
vchiq_service_put(service);
@@ -3137,9 +2995,9 @@ vchiq_remove_service(unsigned int handle)
break;
vchiq_log_warning(vchiq_core_log_level,
- "%d: remove_service:%d - waiting in state %s",
- service->state->id, service->localport,
- srvstate_names[service->srvstate]);
+ "%d: remove_service:%d - waiting in state %s",
+ service->state->id, service->localport,
+ srvstate_names[service->srvstate]);
}
if ((status == VCHIQ_SUCCESS) &&
@@ -3159,11 +3017,9 @@ vchiq_remove_service(unsigned int handle)
* When called in blocking mode, the userdata field points to a bulk_waiter
* structure.
*/
-enum vchiq_status vchiq_bulk_transfer(unsigned int handle,
- void *offset, void __user *uoffset,
- int size, void *userdata,
- enum vchiq_bulk_mode mode,
- enum vchiq_bulk_dir dir)
+enum vchiq_status vchiq_bulk_transfer(unsigned int handle, void *offset, void __user *uoffset,
+ int size, void *userdata, enum vchiq_bulk_mode mode,
+ enum vchiq_bulk_dir dir)
{
struct vchiq_service *service = find_service_by_handle(handle);
struct vchiq_bulk_queue *queue;
@@ -3220,8 +3076,7 @@ enum vchiq_status vchiq_bulk_transfer(unsigned int handle,
VCHIQ_SERVICE_STATS_INC(service, bulk_stalls);
do {
mutex_unlock(&service->bulk_mutex);
- if (wait_for_completion_interruptible(
- &service->bulk_remove_event)) {
+ if (wait_for_completion_interruptible(&service->bulk_remove_event)) {
status = VCHIQ_RETRY;
goto error_exit;
}
@@ -3246,10 +3101,9 @@ enum vchiq_status vchiq_bulk_transfer(unsigned int handle,
wmb();
- vchiq_log_info(vchiq_core_log_level,
- "%d: bt (%d->%d) %cx %x@%pad %pK",
- state->id, service->localport, service->remoteport, dir_char,
- size, &bulk->data, userdata);
+ vchiq_log_info(vchiq_core_log_level, "%d: bt (%d->%d) %cx %x@%pad %pK",
+ state->id, service->localport, service->remoteport,
+ dir_char, size, &bulk->data, userdata);
/*
* The slot mutex must be held when the service is being closed, so
@@ -3284,11 +3138,9 @@ enum vchiq_status vchiq_bulk_transfer(unsigned int handle,
mutex_unlock(&state->slot_mutex);
mutex_unlock(&service->bulk_mutex);
- vchiq_log_trace(vchiq_core_log_level,
- "%d: bt:%d %cx li=%x ri=%x p=%x",
- state->id,
- service->localport, dir_char,
- queue->local_insert, queue->remote_insert, queue->process);
+ vchiq_log_trace(vchiq_core_log_level, "%d: bt:%d %cx li=%x ri=%x p=%x",
+ state->id, service->localport, dir_char, queue->local_insert,
+ queue->remote_insert, queue->process);
waiting:
vchiq_service_put(service);
@@ -3338,7 +3190,6 @@ vchiq_queue_message(unsigned int handle,
if (!size) {
VCHIQ_SERVICE_STATS_INC(service, error_count);
goto error_exit;
-
}
if (size > VCHIQ_MAX_MSG_SIZE) {
@@ -3351,11 +3202,11 @@ vchiq_queue_message(unsigned int handle,
switch (service->srvstate) {
case VCHIQ_SRVSTATE_OPEN:
status = queue_message(service->state, service, data_id,
- copy_callback, context, size, 1);
+ copy_callback, context, size, 1);
break;
case VCHIQ_SRVSTATE_OPENSYNC:
status = queue_message_sync(service->state, service, data_id,
- copy_callback, context, size, 1);
+ copy_callback, context, size, 1);
break;
default:
status = VCHIQ_ERROR;
@@ -3470,8 +3321,7 @@ void vchiq_get_config(struct vchiq_config *config)
}
int
-vchiq_set_service_option(unsigned int handle,
- enum vchiq_service_option option, int value)
+vchiq_set_service_option(unsigned int handle, enum vchiq_service_option option, int value)
{
struct vchiq_service *service = find_service_by_handle(handle);
struct vchiq_service_quota *quota;
@@ -3565,16 +3415,14 @@ vchiq_dump_shared_state(void *dump_context, struct vchiq_state *state,
int len;
int err;
- len = scnprintf(buf, sizeof(buf),
- " %s: slots %d-%d tx_pos=%x recycle=%x",
- label, shared->slot_first, shared->slot_last,
- shared->tx_pos, shared->slot_queue_recycle);
+ len = scnprintf(buf, sizeof(buf), " %s: slots %d-%d tx_pos=%x recycle=%x",
+ label, shared->slot_first, shared->slot_last,
+ shared->tx_pos, shared->slot_queue_recycle);
err = vchiq_dump(dump_context, buf, len + 1);
if (err)
return err;
- len = scnprintf(buf, sizeof(buf),
- " Slots claimed:");
+ len = scnprintf(buf, sizeof(buf), " Slots claimed:");
err = vchiq_dump(dump_context, buf, len + 1);
if (err)
return err;
@@ -3583,9 +3431,8 @@ vchiq_dump_shared_state(void *dump_context, struct vchiq_state *state,
struct vchiq_slot_info slot_info =
*SLOT_INFO_FROM_INDEX(state, i);
if (slot_info.use_count != slot_info.release_count) {
- len = scnprintf(buf, sizeof(buf),
- " %d: %d/%d", i, slot_info.use_count,
- slot_info.release_count);
+ len = scnprintf(buf, sizeof(buf), " %d: %d/%d", i, slot_info.use_count,
+ slot_info.release_count);
err = vchiq_dump(dump_context, buf, len + 1);
if (err)
return err;
@@ -3594,7 +3441,7 @@ vchiq_dump_shared_state(void *dump_context, struct vchiq_state *state,
for (i = 1; i < shared->debug[DEBUG_ENTRIES]; i++) {
len = scnprintf(buf, sizeof(buf), " DEBUG: %s = %d(%x)",
- debug_names[i], shared->debug[i], shared->debug[i]);
+ debug_names[i], shared->debug[i], shared->debug[i]);
err = vchiq_dump(dump_context, buf, len + 1);
if (err)
return err;
@@ -3610,45 +3457,43 @@ int vchiq_dump_state(void *dump_context, struct vchiq_state *state)
int err;
len = scnprintf(buf, sizeof(buf), "State %d: %s", state->id,
- conn_state_names[state->conn_state]);
+ conn_state_names[state->conn_state]);
err = vchiq_dump(dump_context, buf, len + 1);
if (err)
return err;
- len = scnprintf(buf, sizeof(buf),
- " tx_pos=%x(@%pK), rx_pos=%x(@%pK)",
- state->local->tx_pos,
- state->tx_data + (state->local_tx_pos & VCHIQ_SLOT_MASK),
- state->rx_pos,
- state->rx_data + (state->rx_pos & VCHIQ_SLOT_MASK));
+ len = scnprintf(buf, sizeof(buf), " tx_pos=%x(@%pK), rx_pos=%x(@%pK)",
+ state->local->tx_pos,
+ state->tx_data + (state->local_tx_pos & VCHIQ_SLOT_MASK),
+ state->rx_pos,
+ state->rx_data + (state->rx_pos & VCHIQ_SLOT_MASK));
err = vchiq_dump(dump_context, buf, len + 1);
if (err)
return err;
- len = scnprintf(buf, sizeof(buf),
- " Version: %d (min %d)",
- VCHIQ_VERSION, VCHIQ_VERSION_MIN);
+ len = scnprintf(buf, sizeof(buf), " Version: %d (min %d)",
+ VCHIQ_VERSION, VCHIQ_VERSION_MIN);
err = vchiq_dump(dump_context, buf, len + 1);
if (err)
return err;
if (VCHIQ_ENABLE_STATS) {
len = scnprintf(buf, sizeof(buf),
- " Stats: ctrl_tx_count=%d, ctrl_rx_count=%d, error_count=%d",
- state->stats.ctrl_tx_count, state->stats.ctrl_rx_count,
- state->stats.error_count);
+ " Stats: ctrl_tx_count=%d, ctrl_rx_count=%d, error_count=%d",
+ state->stats.ctrl_tx_count, state->stats.ctrl_rx_count,
+ state->stats.error_count);
err = vchiq_dump(dump_context, buf, len + 1);
if (err)
return err;
}
len = scnprintf(buf, sizeof(buf),
- " Slots: %d available (%d data), %d recyclable, %d stalls (%d data)",
- ((state->slot_queue_available * VCHIQ_SLOT_SIZE) -
+ " Slots: %d available (%d data), %d recyclable, %d stalls (%d data)",
+ ((state->slot_queue_available * VCHIQ_SLOT_SIZE) -
state->local_tx_pos) / VCHIQ_SLOT_SIZE,
- state->data_quota - state->data_use_count,
- state->local->slot_queue_recycle - state->slot_queue_available,
- state->stats.slot_stalls, state->stats.data_stalls);
+ state->data_quota - state->data_use_count,
+ state->local->slot_queue_recycle - state->slot_queue_available,
+ state->stats.slot_stalls, state->stats.data_stalls);
err = vchiq_dump(dump_context, buf, len + 1);
if (err)
return err;
@@ -3712,21 +3557,17 @@ int vchiq_dump_service_state(void *dump_context, struct vchiq_service *service)
"%u", service->remoteport);
if (service->public_fourcc != VCHIQ_FOURCC_INVALID)
- scnprintf(remoteport + len2,
- sizeof(remoteport) - len2,
- " (client %x)", service->client_id);
+ scnprintf(remoteport + len2, sizeof(remoteport) - len2,
+ " (client %x)", service->client_id);
} else {
strscpy(remoteport, "n/a", sizeof(remoteport));
}
len += scnprintf(buf + len, sizeof(buf) - len,
- " '%c%c%c%c' remote %s (msg use %d/%d, slot use %d/%d)",
- VCHIQ_FOURCC_AS_4CHARS(fourcc),
- remoteport,
- quota->message_use_count,
- quota->message_quota,
- quota->slot_use_count,
- quota->slot_quota);
+ " '%c%c%c%c' remote %s (msg use %d/%d, slot use %d/%d)",
+ VCHIQ_FOURCC_AS_4CHARS(fourcc), remoteport,
+ quota->message_use_count, quota->message_quota,
+ quota->slot_use_count, quota->slot_quota);
err = vchiq_dump(dump_context, buf, len + 1);
if (err)
@@ -3739,13 +3580,13 @@ int vchiq_dump_service_state(void *dump_context, struct vchiq_service *service)
service->bulk_rx.remote_insert;
len = scnprintf(buf, sizeof(buf),
- " Bulk: tx_pending=%d (size %d), rx_pending=%d (size %d)",
- tx_pending,
- tx_pending ? service->bulk_tx.bulks[
- BULK_INDEX(service->bulk_tx.remove)].size : 0,
- rx_pending,
- rx_pending ? service->bulk_rx.bulks[
- BULK_INDEX(service->bulk_rx.remove)].size : 0);
+ " Bulk: tx_pending=%d (size %d), rx_pending=%d (size %d)",
+ tx_pending,
+ tx_pending ?
+ service->bulk_tx.bulks[BULK_INDEX(service->bulk_tx.remove)].size :
+ 0, rx_pending, rx_pending ?
+ service->bulk_rx.bulks[BULK_INDEX(service->bulk_rx.remove)].size :
+ 0);
if (VCHIQ_ENABLE_STATS) {
err = vchiq_dump(dump_context, buf, len + 1);
@@ -3753,32 +3594,27 @@ int vchiq_dump_service_state(void *dump_context, struct vchiq_service *service)
return err;
len = scnprintf(buf, sizeof(buf),
- " Ctrl: tx_count=%d, tx_bytes=%llu, rx_count=%d, rx_bytes=%llu",
- service->stats.ctrl_tx_count,
- service->stats.ctrl_tx_bytes,
- service->stats.ctrl_rx_count,
- service->stats.ctrl_rx_bytes);
+ " Ctrl: tx_count=%d, tx_bytes=%llu, rx_count=%d, rx_bytes=%llu",
+ service->stats.ctrl_tx_count, service->stats.ctrl_tx_bytes,
+ service->stats.ctrl_rx_count, service->stats.ctrl_rx_bytes);
err = vchiq_dump(dump_context, buf, len + 1);
if (err)
return err;
len = scnprintf(buf, sizeof(buf),
- " Bulk: tx_count=%d, tx_bytes=%llu, rx_count=%d, rx_bytes=%llu",
- service->stats.bulk_tx_count,
- service->stats.bulk_tx_bytes,
- service->stats.bulk_rx_count,
- service->stats.bulk_rx_bytes);
+ " Bulk: tx_count=%d, tx_bytes=%llu, rx_count=%d, rx_bytes=%llu",
+ service->stats.bulk_tx_count, service->stats.bulk_tx_bytes,
+ service->stats.bulk_rx_count, service->stats.bulk_rx_bytes);
err = vchiq_dump(dump_context, buf, len + 1);
if (err)
return err;
len = scnprintf(buf, sizeof(buf),
- " %d quota stalls, %d slot stalls, %d bulk stalls, %d aborted, %d errors",
- service->stats.quota_stalls,
- service->stats.slot_stalls,
- service->stats.bulk_stalls,
- service->stats.bulk_aborted_count,
- service->stats.error_count);
+ " %d quota stalls, %d slot stalls, %d bulk stalls, %d aborted, %d errors",
+ service->stats.quota_stalls, service->stats.slot_stalls,
+ service->stats.bulk_stalls,
+ service->stats.bulk_aborted_count,
+ service->stats.error_count);
}
}
@@ -3795,9 +3631,9 @@ void
vchiq_loud_error_header(void)
{
vchiq_log_error(vchiq_core_log_level,
- "============================================================================");
+ "============================================================================");
vchiq_log_error(vchiq_core_log_level,
- "============================================================================");
+ "============================================================================");
vchiq_log_error(vchiq_core_log_level, "=====");
}
@@ -3806,9 +3642,9 @@ vchiq_loud_error_footer(void)
{
vchiq_log_error(vchiq_core_log_level, "=====");
vchiq_log_error(vchiq_core_log_level,
- "============================================================================");
+ "============================================================================");
vchiq_log_error(vchiq_core_log_level,
- "============================================================================");
+ "============================================================================");
}
enum vchiq_status vchiq_send_remote_use(struct vchiq_state *state)
@@ -3828,8 +3664,7 @@ enum vchiq_status vchiq_send_remote_use_active(struct vchiq_state *state)
NULL, NULL, 0, 0);
}
-void vchiq_log_dump_mem(const char *label, u32 addr, const void *void_mem,
- size_t num_bytes)
+void vchiq_log_dump_mem(const char *label, u32 addr, const void *void_mem, size_t num_bytes)
{
const u8 *mem = void_mem;
size_t offset;
@@ -3858,11 +3693,9 @@ void vchiq_log_dump_mem(const char *label, u32 addr, const void *void_mem,
*s++ = '\0';
if (label && (*label != '\0'))
- vchiq_log_trace(VCHIQ_LOG_TRACE,
- "%s: %08x: %s", label, addr, line_buf);
+ vchiq_log_trace(VCHIQ_LOG_TRACE, "%s: %08x: %s", label, addr, line_buf);
else
- vchiq_log_trace(VCHIQ_LOG_TRACE,
- "%08x: %s", addr, line_buf);
+ vchiq_log_trace(VCHIQ_LOG_TRACE, "%08x: %s", addr, line_buf);
addr += 16;
mem += 16;
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
index 957fea1f574f..53a98949b294 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h
@@ -14,7 +14,6 @@
#include "vchiq_cfg.h"
-
/* Do this so that we can test-build the code on non-rpi systems */
#if IS_ENABLED(CONFIG_RASPBERRYPI_FIRMWARE)
@@ -75,9 +74,7 @@
((fourcc) >> 8) & 0xff, \
(fourcc) & 0xff
-typedef uint32_t BITSET_T;
-
-static_assert((sizeof(BITSET_T) * 8) == 32);
+static_assert((sizeof(u32) * 8) == 32);
#define BITSET_SIZE(b) ((b + 31) >> 5)
#define BITSET_WORD(b) (b >> 5)
@@ -105,7 +102,7 @@ enum {
#if VCHIQ_ENABLE_DEBUG
-#define DEBUG_INITIALISE(local) int *debug_ptr = (local)->debug;
+#define DEBUG_INITIALISE(local) int *debug_ptr = (local)->debug
#define DEBUG_TRACE(d) \
do { debug_ptr[DEBUG_ ## d] = __LINE__; dsb(sy); } while (0)
#define DEBUG_VALUE(d, v) \
@@ -152,8 +149,6 @@ enum vchiq_bulk_dir {
VCHIQ_BULK_RECEIVE
};
-typedef void (*vchiq_userdata_term)(void *userdata);
-
struct vchiq_bulk {
short mode;
short dir;
@@ -198,7 +193,7 @@ struct vchiq_service {
struct kref ref_count;
struct rcu_head rcu;
int srvstate;
- vchiq_userdata_term userdata_term;
+ void (*userdata_term)(void *userdata);
unsigned int localport;
unsigned int remoteport;
int public_fourcc;
@@ -234,10 +229,10 @@ struct vchiq_service {
int bulk_tx_count;
int bulk_rx_count;
int bulk_aborted_count;
- uint64_t ctrl_tx_bytes;
- uint64_t ctrl_rx_bytes;
- uint64_t bulk_tx_bytes;
- uint64_t bulk_rx_bytes;
+ u64 ctrl_tx_bytes;
+ u64 ctrl_rx_bytes;
+ u64 bulk_tx_bytes;
+ u64 bulk_rx_bytes;
} stats;
int msg_queue_read;
@@ -262,7 +257,6 @@ struct vchiq_service_quota {
};
struct vchiq_shared_state {
-
/* A non-zero value here indicates that the content is valid. */
int initialised;
@@ -451,7 +445,6 @@ struct vchiq_config {
short version_min; /* The minimum compatible version of VCHIQ */
};
-
extern spinlock_t bulk_waiter_spinlock;
extern int vchiq_core_log_level;
@@ -476,7 +469,7 @@ struct vchiq_service *
vchiq_add_service_internal(struct vchiq_state *state,
const struct vchiq_service_params_kernel *params,
int srvstate, struct vchiq_instance *instance,
- vchiq_userdata_term userdata_term);
+ void (*userdata_term)(void *userdata));
extern enum vchiq_status
vchiq_open_service_internal(struct vchiq_service *service, int client_id);
@@ -536,12 +529,10 @@ extern struct vchiq_service *
find_service_by_port(struct vchiq_state *state, int localport);
extern struct vchiq_service *
-find_service_for_instance(struct vchiq_instance *instance,
- unsigned int handle);
+find_service_for_instance(struct vchiq_instance *instance, unsigned int handle);
extern struct vchiq_service *
-find_closed_service_for_instance(struct vchiq_instance *instance,
- unsigned int handle);
+find_closed_service_for_instance(struct vchiq_instance *instance, unsigned int handle);
extern struct vchiq_service *
__next_service_by_instance(struct vchiq_state *state,
@@ -566,81 +557,53 @@ vchiq_queue_message(unsigned int handle,
void *context,
size_t size);
-/*
- * The following functions are called from vchiq_core, and external
- * implementations must be provided.
- */
-
-extern int
-vchiq_prepare_bulk_data(struct vchiq_bulk *bulk, void *offset,
- void __user *uoffset, int size, int dir);
+int vchiq_prepare_bulk_data(struct vchiq_bulk *bulk, void *offset, void __user *uoffset,
+ int size, int dir);
-extern void
-vchiq_complete_bulk(struct vchiq_bulk *bulk);
+void vchiq_complete_bulk(struct vchiq_bulk *bulk);
-extern void
-remote_event_signal(struct remote_event *event);
+void remote_event_signal(struct remote_event *event);
-extern int
-vchiq_dump(void *dump_context, const char *str, int len);
+int vchiq_dump(void *dump_context, const char *str, int len);
-extern int
-vchiq_dump_platform_state(void *dump_context);
+int vchiq_dump_platform_state(void *dump_context);
-extern int
-vchiq_dump_platform_instances(void *dump_context);
+int vchiq_dump_platform_instances(void *dump_context);
-extern int
-vchiq_dump_platform_service_state(void *dump_context,
- struct vchiq_service *service);
+int vchiq_dump_platform_service_state(void *dump_context, struct vchiq_service *service);
-extern int
-vchiq_use_service_internal(struct vchiq_service *service);
+int vchiq_use_service_internal(struct vchiq_service *service);
-extern int
-vchiq_release_service_internal(struct vchiq_service *service);
+int vchiq_release_service_internal(struct vchiq_service *service);
-extern void
-vchiq_on_remote_use(struct vchiq_state *state);
+void vchiq_on_remote_use(struct vchiq_state *state);
-extern void
-vchiq_on_remote_release(struct vchiq_state *state);
+void vchiq_on_remote_release(struct vchiq_state *state);
-extern int
-vchiq_platform_init_state(struct vchiq_state *state);
+int vchiq_platform_init_state(struct vchiq_state *state);
-extern enum vchiq_status
-vchiq_check_service(struct vchiq_service *service);
+enum vchiq_status vchiq_check_service(struct vchiq_service *service);
-extern void
-vchiq_on_remote_use_active(struct vchiq_state *state);
+void vchiq_on_remote_use_active(struct vchiq_state *state);
-extern enum vchiq_status
-vchiq_send_remote_use(struct vchiq_state *state);
+enum vchiq_status vchiq_send_remote_use(struct vchiq_state *state);
-extern enum vchiq_status
-vchiq_send_remote_use_active(struct vchiq_state *state);
+enum vchiq_status vchiq_send_remote_use_active(struct vchiq_state *state);
-extern void
-vchiq_platform_conn_state_changed(struct vchiq_state *state,
- enum vchiq_connstate oldstate,
+void vchiq_platform_conn_state_changed(struct vchiq_state *state,
+ enum vchiq_connstate oldstate,
enum vchiq_connstate newstate);
-extern void
-vchiq_set_conn_state(struct vchiq_state *state, enum vchiq_connstate newstate);
+void vchiq_set_conn_state(struct vchiq_state *state, enum vchiq_connstate newstate);
-extern void
-vchiq_log_dump_mem(const char *label, uint32_t addr, const void *voidMem,
- size_t numBytes);
+void vchiq_log_dump_mem(const char *label, u32 addr, const void *void_mem, size_t num_bytes);
-extern enum vchiq_status vchiq_remove_service(unsigned int service);
+enum vchiq_status vchiq_remove_service(unsigned int service);
-extern int vchiq_get_client_id(unsigned int service);
+int vchiq_get_client_id(unsigned int service);
-extern void vchiq_get_config(struct vchiq_config *config);
+void vchiq_get_config(struct vchiq_config *config);
-extern int
-vchiq_set_service_option(unsigned int service, enum vchiq_service_option option,
- int value);
+int vchiq_set_service_option(unsigned int service, enum vchiq_service_option option, int value);
#endif
diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c
index bf1a88c9d1ee..2325844b0880 100644
--- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c
+++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_dev.c
@@ -9,18 +9,13 @@
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/compat.h>
+#include <linux/miscdevice.h>
#include "vchiq_core.h"
#include "vchiq_ioctl.h"
#include "vchiq_arm.h"
#include "vchiq_debugfs.h"
-#define DEVICE_NAME "vchiq"
-
-static struct cdev vchiq_cdev;
-static dev_t vchiq_devid;
-static struct class *vchiq_class;
-
static const char *const ioctl_names[] = {
"CONNECT",
"SHUTDOWN",
@@ -53,8 +48,8 @@ user_service_free(void *userdata)
static void close_delivered(struct user_service *user_service)
{
vchiq_log_info(vchiq_arm_log_level,
- "%s(handle=%x)",
- __func__, user_service->service->handle);
+ "%s(handle=%x)",
+ __func__, user_service->service->handle);
if (user_service->close_pending) {
/* Allow the underlying service to be culled */
@@ -95,8 +90,8 @@ static ssize_t vchiq_ioc_copy_element_data(void *context, void *dest,
maxsize - total_bytes_copied);
if (copy_from_user(dest + total_bytes_copied,
- cc->element->data + cc->element_offset,
- bytes_this_round))
+ cc->element->data + cc->element_offset,
+ bytes_this_round))
return -EFAULT;
cc->element_offset += bytes_this_round;
@@ -215,7 +210,7 @@ static int vchiq_ioc_dequeue_message(struct vchiq_instance *instance,
struct vchiq_header *header;
int ret;
- DEBUG_INITIALISE(g_state.local)
+ DEBUG_INITIALISE(g_state.local);
DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
service = find_service_for_instance(instance, args->handle);
if (!service)
@@ -240,10 +235,9 @@ static int vchiq_ioc_dequeue_message(struct vchiq_instance *instance,
do {
spin_unlock(&msg_queue_spinlock);
DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
- if (wait_for_completion_interruptible(
- &user_service->insert_event)) {
+ if (wait_for_completion_interruptible(&user_service->insert_event)) {
vchiq_log_info(vchiq_arm_log_level,
- "DEQUEUE_MESSAGE interrupted");
+ "DEQUEUE_MESSAGE interrupted");
ret = -EINTR;
break;
}
@@ -271,8 +265,7 @@ static int vchiq_ioc_dequeue_message(struct vchiq_instance *instance,
ret = -ENOTCONN;
} else if (header->size <= args->bufsize) {
/* Copy to user space if msgbuf is not NULL */
- if (!args->buf || (copy_to_user(args->buf,
- header->data, header->size) == 0)) {
+ if (!args->buf || (copy_to_user(args->buf, header->data, header->size) == 0)) {
ret = header->size;
vchiq_release_message(service->handle, header);
} else {
@@ -280,8 +273,8 @@ static int vchiq_ioc_dequeue_message(struct vchiq_instance *instance,
}
} else {
vchiq_log_error(vchiq_arm_log_level,
- "header %pK: bufsize %x < size %x",
- header, args->bufsize, header->size);
+ "header %pK: bufsize %x < size %x",
+ header, args->bufsize, header->size);
WARN(1, "invalid size\n");
ret = -EMSGSIZE;
}
@@ -328,14 +321,12 @@ static int vchiq_irq_queue_bulk_tx_rx(struct vchiq_instance *instance,
mutex_unlock(&instance->bulk_waiter_list_mutex);
if (!found) {
vchiq_log_error(vchiq_arm_log_level,
- "no bulk_waiter found for pid %d",
- current->pid);
+ "no bulk_waiter found for pid %d", current->pid);
ret = -ESRCH;
goto out;
}
vchiq_log_info(vchiq_arm_log_level,
- "found bulk_waiter %pK for pid %d", waiter,
- current->pid);
+ "found bulk_waiter %pK for pid %d", waiter, current->pid);
userdata = &waiter->bulk_waiter;
} else {
userdata = args->userdata;
@@ -350,7 +341,7 @@ static int vchiq_irq_queue_bulk_tx_rx(struct vchiq_instance *instance,
}
if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) ||
- !waiter->bulk_waiter.bulk) {
+ !waiter->bulk_waiter.bulk) {
if (waiter->bulk_waiter.bulk) {
/* Cancel the signal when the transfer completes. */
spin_lock(&bulk_waiter_spinlock);
@@ -367,8 +358,7 @@ static int vchiq_irq_queue_bulk_tx_rx(struct vchiq_instance *instance,
list_add(&waiter->list, &instance->bulk_waiter_list);
mutex_unlock(&instance->bulk_waiter_list_mutex);
vchiq_log_info(vchiq_arm_log_level,
- "saved bulk_waiter %pK for pid %d",
- waiter, current->pid);
+ "saved bulk_waiter %pK for pid %d", waiter, current->pid);
ret = put_user(mode_waiting, mode);
}
@@ -449,29 +439,26 @@ static int vchiq_ioc_await_completion(struct vchiq_instance *instance,
int remove;
int ret;
- DEBUG_INITIALISE(g_state.local)
+ DEBUG_INITIALISE(g_state.local);
DEBUG_TRACE(AWAIT_COMPLETION_LINE);
- if (!instance->connected) {
+ if (!instance->connected)
return -ENOTCONN;
- }
mutex_lock(&instance->completion_mutex);
DEBUG_TRACE(AWAIT_COMPLETION_LINE);
- while ((instance->completion_remove == instance->completion_insert)
- && !instance->closing) {
+ while ((instance->completion_remove == instance->completion_insert) && !instance->closing) {
int rc;
DEBUG_TRACE(AWAIT_COMPLETION_LINE);
mutex_unlock(&instance->completion_mutex);
- rc = wait_for_completion_interruptible(
- &instance->insert_event);
+ rc = wait_for_completion_interruptible(&instance->insert_event);
mutex_lock(&instance->completion_mutex);
if (rc) {
DEBUG_TRACE(AWAIT_COMPLETION_LINE);
vchiq_log_info(vchiq_arm_log_level,
- "AWAIT_COMPLETION interrupted");
+ "AWAIT_COMPLETION interrupted");
ret = -EINTR;
goto out;
}
@@ -491,8 +478,7 @@ static int vchiq_ioc_await_completion(struct vchiq_instance *instance,
if (remove == instance->completion_insert)
break;
- completion = &instance->completions[
- remove & (MAX_COMPLETIONS - 1)];
+ completion = &instance->completions[remove & (MAX_COMPLETIONS - 1)];
/*
* A read memory barrier is needed to stop
@@ -518,9 +504,9 @@ static int vchiq_ioc_await_completion(struct vchiq_instance *instance,
/* This must be a VCHIQ-style service */
if (args->msgbufsize < msglen) {
vchiq_log_error(vchiq_arm_log_level,
- "header %pK: msgbufsize %x < msglen %x",
- header, args->msgbufsize, msglen);
- WARN(1, "invalid message size\n");
+ "header %pK: msgbufsize %x < msglen %x",
+ header, args->msgbufsize, msglen);
+ WARN(1, "invalid message size\n");
if (ret == 0)
ret = -EMSGSIZE;
break;
@@ -531,7 +517,7 @@ static int vchiq_ioc_await_completion(struct vchiq_instance *instance,
/* Get the pointer from user space */
msgbufcount--;
if (vchiq_get_user_ptr(&msgbuf, args->msgbufs,
- msgbufcount)) {
+ msgbufcount)) {
if (ret == 0)
ret = -EFAULT;
break;
@@ -599,11 +585,9 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
int i, rc;
vchiq_log_trace(vchiq_arm_log_level,
- "%s - instance %pK, cmd %s, arg %lx",
- __func__, instance,
- ((_IOC_TYPE(cmd) == VCHIQ_IOC_MAGIC) &&
- (_IOC_NR(cmd) <= VCHIQ_IOC_MAX)) ?
- ioctl_names[_IOC_NR(cmd)] : "<invalid>", arg);
+ "%s - instance %pK, cmd %s, arg %lx", __func__, instance,
+ ((_IOC_TYPE(cmd) == VCHIQ_IOC_MAGIC) && (_IOC_NR(cmd) <= VCHIQ_IOC_MAX)) ?
+ ioctl_names[_IOC_NR(cmd)] : "<invalid>", arg);
switch (cmd) {
case VCHIQ_IOC_SHUTDOWN:
@@ -613,7 +597,7 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
/* Remove all services */
i = 0;
while ((service = next_service_by_instance(instance->state,
- instance, &i))) {
+ instance, &i))) {
status = vchiq_remove_service(service->handle);
vchiq_service_put(service);
if (status != VCHIQ_SUCCESS)
@@ -637,8 +621,8 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
rc = mutex_lock_killable(&instance->state->mutex);
if (rc) {
vchiq_log_error(vchiq_arm_log_level,
- "vchiq: connect: could not lock mutex for state %d: %d",
- instance->state->id, rc);
+ "vchiq: connect: could not lock mutex for state %d: %d",
+ instance->state->id, rc);
ret = -EINTR;
break;
}
@@ -649,7 +633,7 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
instance->connected = 1;
else
vchiq_log_error(vchiq_arm_log_level,
- "vchiq: could not connect: %d", status);
+ "vchiq: could not connect: %d", status);
break;
case VCHIQ_IOC_CREATE_SERVICE: {
@@ -703,8 +687,7 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
* CLOSE_DELIVERED ioctl, signalling close_event.
*/
if (user_service->close_pending &&
- wait_for_completion_interruptible(
- &user_service->close_event))
+ wait_for_completion_interruptible(&user_service->close_event))
status = VCHIQ_RETRY;
break;
}
@@ -720,14 +703,12 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
vchiq_release_service_internal(service);
if (ret) {
vchiq_log_error(vchiq_susp_log_level,
- "%s: cmd %s returned error %ld for service %c%c%c%c:%03d",
- __func__,
- (cmd == VCHIQ_IOC_USE_SERVICE) ?
+ "%s: cmd %s returned error %ld for service %c%c%c%c:%03d",
+ __func__, (cmd == VCHIQ_IOC_USE_SERVICE) ?
"VCHIQ_IOC_USE_SERVICE" :
"VCHIQ_IOC_RELEASE_SERVICE",
ret,
- VCHIQ_FOURCC_AS_4CHARS(
- service->base.fourcc),
+ VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
service->client_id);
}
} else {
@@ -751,7 +732,7 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
struct vchiq_element elements[MAX_ELEMENTS];
if (copy_from_user(elements, args.elements,
- args.count * sizeof(struct vchiq_element)) == 0)
+ args.count * sizeof(struct vchiq_element)) == 0)
ret = vchiq_ioc_queue_message(args.handle, elements,
args.count);
else
@@ -889,23 +870,16 @@ vchiq_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
ret = -EINTR;
}
- if ((status == VCHIQ_SUCCESS) && (ret < 0) && (ret != -EINTR) &&
- (ret != -EWOULDBLOCK))
+ if ((status == VCHIQ_SUCCESS) && (ret < 0) && (ret != -EINTR) && (ret != -EWOULDBLOCK))
vchiq_log_info(vchiq_arm_log_level,
- " ioctl instance %pK, cmd %s -> status %d, %ld",
- instance,
- (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
- ioctl_names[_IOC_NR(cmd)] :
- "<invalid>",
- status, ret);
+ " ioctl instance %pK, cmd %s -> status %d, %ld",
+ instance, (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
+ ioctl_names[_IOC_NR(cmd)] : "<invalid>", status, ret);
else
vchiq_log_trace(vchiq_arm_log_level,
- " ioctl instance %pK, cmd %s -> status %d, %ld",
- instance,
- (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
- ioctl_names[_IOC_NR(cmd)] :
- "<invalid>",
- status, ret);
+ " ioctl instance %pK, cmd %s -> status %d, %ld",
+ instance, (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
+ ioctl_names[_IOC_NR(cmd)] : "<invalid>", status, ret);
return ret;
}
@@ -1303,8 +1277,8 @@ static int vchiq_release(struct inode *inode, struct file *file)
struct vchiq_completion_data_kernel *completion;
struct vchiq_service *service;
- completion = &instance->completions[
- instance->completion_remove & (MAX_COMPLETIONS - 1)];
+ completion = &instance->completions[instance->completion_remove
+ & (MAX_COMPLETIONS - 1)];
service = completion->service_userdata;
if (completion->reason == VCHIQ_SERVICE_CLOSED) {
struct user_service *user_service =
@@ -1364,6 +1338,13 @@ vchiq_fops = {
.read = vchiq_read
};
+static struct miscdevice vchiq_miscdev = {
+ .fops = &vchiq_fops,
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "vchiq",
+
+};
+
/**
* vchiq_register_chrdev - Register the char driver for vchiq
* and create the necessary class and
@@ -1374,57 +1355,9 @@ vchiq_fops = {
*/
int vchiq_register_chrdev(struct device *parent)
{
- struct device *vchiq_dev;
- int ret;
+ vchiq_miscdev.parent = parent;
- vchiq_class = class_create(THIS_MODULE, DEVICE_NAME);
- if (IS_ERR(vchiq_class)) {
- pr_err("Failed to create vchiq class\n");
- ret = PTR_ERR(vchiq_class);
- goto error_exit;
- }
-
- ret = alloc_chrdev_region(&vchiq_devid, 0, 1, DEVICE_NAME);
- if (ret) {
- pr_err("vchiq: Failed to allocate vchiq's chrdev region\n");
- goto alloc_region_error;
- }
-
- cdev_init(&vchiq_cdev, &vchiq_fops);
- vchiq_cdev.owner = THIS_MODULE;
- ret = cdev_add(&vchiq_cdev, vchiq_devid, 1);
- if (ret) {
- vchiq_log_error(vchiq_arm_log_level,
- "Unable to register vchiq char device");
- goto cdev_add_error;
- }
-
- vchiq_dev = device_create(vchiq_class, parent, vchiq_devid, NULL,
- DEVICE_NAME);
- if (IS_ERR(vchiq_dev)) {
- vchiq_log_error(vchiq_arm_log_level,
- "Failed to create vchiq char device node");
- ret = PTR_ERR(vchiq_dev);
- goto device_create_error;
- }
-
- vchiq_log_info(vchiq_arm_log_level,
- "vchiq char dev initialised successfully - device %d.%d",
- MAJOR(vchiq_devid), MINOR(vchiq_devid));
-
- return 0;
-
-device_create_error:
- cdev_del(&vchiq_cdev);
-
-cdev_add_error:
- unregister_chrdev_region(vchiq_devid, 1);
-
-alloc_region_error:
- class_destroy(vchiq_class);
-
-error_exit:
- return ret;
+ return misc_register(&vchiq_miscdev);
}
/**
@@ -1433,8 +1366,5 @@ error_exit:
*/
void vchiq_deregister_chrdev(void)
{
- device_destroy(vchiq_class, vchiq_devid);
- cdev_del(&vchiq_cdev);
- unregister_chrdev_region(vchiq_devid, 1);
- class_destroy(vchiq_class);
+ misc_deregister(&vchiq_miscdev);
}
diff --git a/drivers/staging/vt6655/baseband.c b/drivers/staging/vt6655/baseband.c
index f73f3fad3e05..84fa6ea3e2e6 100644
--- a/drivers/staging/vt6655/baseband.c
+++ b/drivers/staging/vt6655/baseband.c
@@ -1691,7 +1691,7 @@ static const unsigned short awc_frame_time[MAX_RATE] = {
*
* Parameters:
* In:
- * by_preamble_type - Preamble Type
+ * preamble_type - Preamble Type
* by_pkt_type - PK_TYPE_11A, PK_TYPE_11B, PK_TYPE_11GB, PK_TYPE_11GA
* cb_frame_length - Baseband Type
* tx_rate - Tx Rate
@@ -1700,7 +1700,7 @@ static const unsigned short awc_frame_time[MAX_RATE] = {
* Return Value: FrameTime
*
*/
-unsigned int bb_get_frame_time(unsigned char by_preamble_type,
+unsigned int bb_get_frame_time(unsigned char preamble_type,
unsigned char by_pkt_type,
unsigned int cb_frame_length,
unsigned short tx_rate)
@@ -1717,7 +1717,7 @@ unsigned int bb_get_frame_time(unsigned char by_preamble_type,
rate = (unsigned int)awc_frame_time[rate_idx];
if (rate_idx <= 3) { /* CCK mode */
- if (by_preamble_type == 1) /* Short */
+ if (preamble_type == PREAMBLE_SHORT)
preamble = 96;
else
preamble = 192;
@@ -1764,7 +1764,7 @@ void vnt_get_phy_field(struct vnt_private *priv, u32 frame_length,
u32 count = 0;
u32 tmp;
int ext_bit;
- u8 preamble_type = priv->byPreambleType;
+ u8 preamble_type = priv->preamble_type;
bit_count = frame_length * 8;
ext_bit = false;
@@ -1779,7 +1779,7 @@ void vnt_get_phy_field(struct vnt_private *priv, u32 frame_length,
case RATE_2M:
count = bit_count / 2;
- if (preamble_type == 1)
+ if (preamble_type == PREAMBLE_SHORT)
phy->signal = 0x09;
else
phy->signal = 0x01;
@@ -1792,7 +1792,7 @@ void vnt_get_phy_field(struct vnt_private *priv, u32 frame_length,
if (tmp != bit_count)
count++;
- if (preamble_type == 1)
+ if (preamble_type == PREAMBLE_SHORT)
phy->signal = 0x0a;
else
phy->signal = 0x02;
@@ -1809,7 +1809,7 @@ void vnt_get_phy_field(struct vnt_private *priv, u32 frame_length,
ext_bit = true;
}
- if (preamble_type == 1)
+ if (preamble_type == PREAMBLE_SHORT)
phy->signal = 0x0b;
else
phy->signal = 0x03;
@@ -1905,7 +1905,7 @@ void vnt_get_phy_field(struct vnt_private *priv, u32 frame_length,
bool bb_read_embedded(struct vnt_private *priv, unsigned char by_bb_addr,
unsigned char *pby_data)
{
- void __iomem *iobase = priv->PortOffset;
+ void __iomem *iobase = priv->port_offset;
unsigned short ww;
unsigned char by_value;
@@ -1948,7 +1948,7 @@ bool bb_read_embedded(struct vnt_private *priv, unsigned char by_bb_addr,
bool bb_write_embedded(struct vnt_private *priv, unsigned char by_bb_addr,
unsigned char by_data)
{
- void __iomem *iobase = priv->PortOffset;
+ void __iomem *iobase = priv->port_offset;
unsigned short ww;
unsigned char by_value;
@@ -1992,9 +1992,9 @@ bool bb_vt3253_init(struct vnt_private *priv)
{
bool result = true;
int ii;
- void __iomem *iobase = priv->PortOffset;
+ void __iomem *iobase = priv->port_offset;
unsigned char by_rf_type = priv->byRFType;
- unsigned char by_local_id = priv->byLocalID;
+ unsigned char by_local_id = priv->local_id;
if (by_rf_type == RF_RFMD2959) {
if (by_local_id <= REV_ID_VT3253_A1) {
@@ -2021,10 +2021,10 @@ bool bb_vt3253_init(struct vnt_private *priv)
priv->abyBBVGA[1] = 0x0A;
priv->abyBBVGA[2] = 0x0;
priv->abyBBVGA[3] = 0x0;
- priv->ldBmThreshold[0] = -70;
- priv->ldBmThreshold[1] = -50;
- priv->ldBmThreshold[2] = 0;
- priv->ldBmThreshold[3] = 0;
+ priv->dbm_threshold[0] = -70;
+ priv->dbm_threshold[1] = -50;
+ priv->dbm_threshold[2] = 0;
+ priv->dbm_threshold[3] = 0;
} else if ((by_rf_type == RF_AIROHA) || (by_rf_type == RF_AL2230S)) {
for (ii = 0; ii < CB_VT3253B0_INIT_FOR_AIROHA2230; ii++)
result &= bb_write_embedded(priv,
@@ -2039,10 +2039,10 @@ bool bb_vt3253_init(struct vnt_private *priv)
priv->abyBBVGA[1] = 0x10;
priv->abyBBVGA[2] = 0x0;
priv->abyBBVGA[3] = 0x0;
- priv->ldBmThreshold[0] = -70;
- priv->ldBmThreshold[1] = -48;
- priv->ldBmThreshold[2] = 0;
- priv->ldBmThreshold[3] = 0;
+ priv->dbm_threshold[0] = -70;
+ priv->dbm_threshold[1] = -48;
+ priv->dbm_threshold[2] = 0;
+ priv->dbm_threshold[3] = 0;
} else if (by_rf_type == RF_UW2451) {
for (ii = 0; ii < CB_VT3253B0_INIT_FOR_UW2451; ii++)
result &= bb_write_embedded(priv,
@@ -2061,10 +2061,10 @@ bool bb_vt3253_init(struct vnt_private *priv)
priv->abyBBVGA[1] = 0x0A;
priv->abyBBVGA[2] = 0x0;
priv->abyBBVGA[3] = 0x0;
- priv->ldBmThreshold[0] = -60;
- priv->ldBmThreshold[1] = -50;
- priv->ldBmThreshold[2] = 0;
- priv->ldBmThreshold[3] = 0;
+ priv->dbm_threshold[0] = -60;
+ priv->dbm_threshold[1] = -50;
+ priv->dbm_threshold[2] = 0;
+ priv->dbm_threshold[3] = 0;
} else if (by_rf_type == RF_UW2452) {
for (ii = 0; ii < CB_VT3253B0_INIT_FOR_UW2451; ii++)
result &= bb_write_embedded(priv,
@@ -2107,10 +2107,10 @@ bool bb_vt3253_init(struct vnt_private *priv)
priv->abyBBVGA[1] = 0x0A;
priv->abyBBVGA[2] = 0x0;
priv->abyBBVGA[3] = 0x0;
- priv->ldBmThreshold[0] = -60;
- priv->ldBmThreshold[1] = -50;
- priv->ldBmThreshold[2] = 0;
- priv->ldBmThreshold[3] = 0;
+ priv->dbm_threshold[0] = -60;
+ priv->dbm_threshold[1] = -50;
+ priv->dbm_threshold[2] = 0;
+ priv->dbm_threshold[3] = 0;
/* }} RobertYu */
} else if (by_rf_type == RF_VT3226) {
@@ -2127,10 +2127,10 @@ bool bb_vt3253_init(struct vnt_private *priv)
priv->abyBBVGA[1] = 0x10;
priv->abyBBVGA[2] = 0x0;
priv->abyBBVGA[3] = 0x0;
- priv->ldBmThreshold[0] = -70;
- priv->ldBmThreshold[1] = -48;
- priv->ldBmThreshold[2] = 0;
- priv->ldBmThreshold[3] = 0;
+ priv->dbm_threshold[0] = -70;
+ priv->dbm_threshold[1] = -48;
+ priv->dbm_threshold[2] = 0;
+ priv->dbm_threshold[3] = 0;
/* Fix VT3226 DFC system timing issue */
MACvSetRFLE_LatchBase(iobase);
/* {{ RobertYu: 20050104 */
@@ -2161,10 +2161,10 @@ bool bb_vt3253_init(struct vnt_private *priv)
priv->abyBBVGA[1] = 0x10;
priv->abyBBVGA[2] = 0x0;
priv->abyBBVGA[3] = 0x0;
- priv->ldBmThreshold[0] = -70;
- priv->ldBmThreshold[1] = -48;
- priv->ldBmThreshold[2] = 0;
- priv->ldBmThreshold[3] = 0;
+ priv->dbm_threshold[0] = -70;
+ priv->dbm_threshold[1] = -48;
+ priv->dbm_threshold[2] = 0;
+ priv->dbm_threshold[3] = 0;
/* }} RobertYu */
} else {
/* No VGA Table now */
@@ -2200,7 +2200,7 @@ bb_set_short_slot_time(struct vnt_private *priv)
bb_read_embedded(priv, 0x0A, &by_bb_rx_conf); /* CR10 */
- if (priv->bShortSlotTime)
+ if (priv->short_slot_time)
by_bb_rx_conf &= 0xDF; /* 1101 1111 */
else
by_bb_rx_conf |= 0x20; /* 0010 0000 */
@@ -2223,7 +2223,7 @@ void bb_set_vga_gain_offset(struct vnt_private *priv, unsigned char by_data)
/* patch for 3253B0 Baseband with Cardbus module */
if (by_data == priv->abyBBVGA[0])
by_bb_rx_conf |= 0x20; /* 0010 0000 */
- else if (priv->bShortSlotTime)
+ else if (priv->short_slot_time)
by_bb_rx_conf &= 0xDF; /* 1101 1111 */
else
by_bb_rx_conf |= 0x20; /* 0010 0000 */
diff --git a/drivers/staging/vt6655/baseband.h b/drivers/staging/vt6655/baseband.h
index 0a30afaa7cc3..15b2802ed727 100644
--- a/drivers/staging/vt6655/baseband.h
+++ b/drivers/staging/vt6655/baseband.h
@@ -44,7 +44,7 @@
#define TOP_RATE_2M 0x00200000
#define TOP_RATE_1M 0x00100000
-unsigned int bb_get_frame_time(unsigned char by_preamble_type,
+unsigned int bb_get_frame_time(unsigned char preamble_type,
unsigned char by_pkt_type,
unsigned int cb_frame_length,
unsigned short w_rate);
diff --git a/drivers/staging/vt6655/card.c b/drivers/staging/vt6655/card.c
index 3ef3a6e0e6e1..26e08fec6e6a 100644
--- a/drivers/staging/vt6655/card.c
+++ b/drivers/staging/vt6655/card.c
@@ -56,7 +56,7 @@ static const unsigned short cwRXBCNTSFOff[MAX_RATE] = {
/*--------------------- Static Functions --------------------------*/
-static void s_vCalculateOFDMRParameter(unsigned char byRate, u8 bb_type,
+static void s_vCalculateOFDMRParameter(unsigned char rate, u8 bb_type,
unsigned char *pbyTxRate,
unsigned char *pbyRsvTime);
@@ -75,12 +75,12 @@ static void s_vCalculateOFDMRParameter(unsigned char byRate, u8 bb_type,
*
* Return Value: none
*/
-static void s_vCalculateOFDMRParameter(unsigned char byRate,
+static void s_vCalculateOFDMRParameter(unsigned char rate,
u8 bb_type,
unsigned char *pbyTxRate,
unsigned char *pbyRsvTime)
{
- switch (byRate) {
+ switch (rate) {
case RATE_6M:
if (bb_type == BB_TYPE_11A) { /* 5GHZ */
*pbyTxRate = 0x9B;
@@ -190,7 +190,7 @@ bool CARDbSetPhyParameter(struct vnt_private *priv, u8 bb_type)
if (bb_type == BB_TYPE_11A) {
if (priv->byRFType == RF_AIROHA7230) {
/* AL7230 use single PAPE and connect to PAPE_2.4G */
- MACvSetBBType(priv->PortOffset, BB_TYPE_11G);
+ MACvSetBBType(priv->port_offset, BB_TYPE_11G);
priv->abyBBVGA[0] = 0x20;
priv->abyBBVGA[2] = 0x10;
priv->abyBBVGA[3] = 0x10;
@@ -199,7 +199,7 @@ bool CARDbSetPhyParameter(struct vnt_private *priv, u8 bb_type)
bb_write_embedded(priv, 0xE7, priv->abyBBVGA[0]);
} else if (priv->byRFType == RF_UW2452) {
- MACvSetBBType(priv->PortOffset, BB_TYPE_11A);
+ MACvSetBBType(priv->port_offset, BB_TYPE_11A);
priv->abyBBVGA[0] = 0x18;
bb_read_embedded(priv, 0xE7, &byData);
if (byData == 0x14) {
@@ -207,7 +207,7 @@ bool CARDbSetPhyParameter(struct vnt_private *priv, u8 bb_type)
bb_write_embedded(priv, 0xE1, 0x57);
}
} else {
- MACvSetBBType(priv->PortOffset, BB_TYPE_11A);
+ MACvSetBBType(priv->port_offset, BB_TYPE_11A);
}
bb_write_embedded(priv, 0x88, 0x03);
bySlot = C_SLOT_SHORT;
@@ -215,7 +215,7 @@ bool CARDbSetPhyParameter(struct vnt_private *priv, u8 bb_type)
byDIFS = C_SIFS_A + 2 * C_SLOT_SHORT;
byCWMaxMin = 0xA4;
} else if (bb_type == BB_TYPE_11B) {
- MACvSetBBType(priv->PortOffset, BB_TYPE_11B);
+ MACvSetBBType(priv->port_offset, BB_TYPE_11B);
if (priv->byRFType == RF_AIROHA7230) {
priv->abyBBVGA[0] = 0x1C;
priv->abyBBVGA[2] = 0x00;
@@ -238,7 +238,7 @@ bool CARDbSetPhyParameter(struct vnt_private *priv, u8 bb_type)
byDIFS = C_SIFS_BG + 2 * C_SLOT_LONG;
byCWMaxMin = 0xA5;
} else { /* PK_TYPE_11GA & PK_TYPE_11GB */
- MACvSetBBType(priv->PortOffset, BB_TYPE_11G);
+ MACvSetBBType(priv->port_offset, BB_TYPE_11G);
if (priv->byRFType == RF_AIROHA7230) {
priv->abyBBVGA[0] = 0x1C;
priv->abyBBVGA[2] = 0x00;
@@ -258,7 +258,7 @@ bool CARDbSetPhyParameter(struct vnt_private *priv, u8 bb_type)
bb_write_embedded(priv, 0x88, 0x08);
bySIFS = C_SIFS_BG;
- if (priv->bShortSlotTime) {
+ if (priv->short_slot_time) {
bySlot = C_SLOT_SHORT;
byDIFS = C_SIFS_BG + 2 * C_SLOT_SHORT;
} else {
@@ -292,25 +292,25 @@ bool CARDbSetPhyParameter(struct vnt_private *priv, u8 bb_type)
if (priv->bySIFS != bySIFS) {
priv->bySIFS = bySIFS;
- VNSvOutPortB(priv->PortOffset + MAC_REG_SIFS, priv->bySIFS);
+ VNSvOutPortB(priv->port_offset + MAC_REG_SIFS, priv->bySIFS);
}
if (priv->byDIFS != byDIFS) {
priv->byDIFS = byDIFS;
- VNSvOutPortB(priv->PortOffset + MAC_REG_DIFS, priv->byDIFS);
+ VNSvOutPortB(priv->port_offset + MAC_REG_DIFS, priv->byDIFS);
}
if (priv->byEIFS != C_EIFS) {
priv->byEIFS = C_EIFS;
- VNSvOutPortB(priv->PortOffset + MAC_REG_EIFS, priv->byEIFS);
+ VNSvOutPortB(priv->port_offset + MAC_REG_EIFS, priv->byEIFS);
}
if (priv->bySlot != bySlot) {
priv->bySlot = bySlot;
- VNSvOutPortB(priv->PortOffset + MAC_REG_SLOT, priv->bySlot);
+ VNSvOutPortB(priv->port_offset + MAC_REG_SLOT, priv->bySlot);
bb_set_short_slot_time(priv);
}
if (priv->byCWMaxMin != byCWMaxMin) {
priv->byCWMaxMin = byCWMaxMin;
- VNSvOutPortB(priv->PortOffset + MAC_REG_CWMAXMIN0,
+ VNSvOutPortB(priv->port_offset + MAC_REG_CWMAXMIN0,
priv->byCWMaxMin);
}
@@ -348,11 +348,11 @@ bool CARDbUpdateTSF(struct vnt_private *priv, unsigned char byRxRate,
qwTSFOffset = CARDqGetTSFOffset(byRxRate, qwBSSTimestamp,
local_tsf);
/* adjust TSF, HW's TSF add TSF Offset reg */
- VNSvOutPortD(priv->PortOffset + MAC_REG_TSFOFST,
+ VNSvOutPortD(priv->port_offset + MAC_REG_TSFOFST,
(u32)qwTSFOffset);
- VNSvOutPortD(priv->PortOffset + MAC_REG_TSFOFST + 4,
+ VNSvOutPortD(priv->port_offset + MAC_REG_TSFOFST + 4,
(u32)(qwTSFOffset >> 32));
- MACvRegBitsOn(priv->PortOffset, MAC_REG_TFTCTL,
+ MACvRegBitsOn(priv->port_offset, MAC_REG_TFTCTL,
TFTCTL_TSFSYNCEN);
}
return true;
@@ -381,13 +381,13 @@ bool CARDbSetBeaconPeriod(struct vnt_private *priv,
qwNextTBTT = CARDqGetNextTBTT(qwNextTBTT, wBeaconInterval);
/* set HW beacon interval */
- VNSvOutPortW(priv->PortOffset + MAC_REG_BI, wBeaconInterval);
+ VNSvOutPortW(priv->port_offset + MAC_REG_BI, wBeaconInterval);
priv->wBeaconInterval = wBeaconInterval;
/* Set NextTBTT */
- VNSvOutPortD(priv->PortOffset + MAC_REG_NEXTTBTT, (u32)qwNextTBTT);
- VNSvOutPortD(priv->PortOffset + MAC_REG_NEXTTBTT + 4,
+ VNSvOutPortD(priv->port_offset + MAC_REG_NEXTTBTT, (u32)qwNextTBTT);
+ VNSvOutPortD(priv->port_offset + MAC_REG_NEXTTBTT + 4,
(u32)(qwNextTBTT >> 32));
- MACvRegBitsOn(priv->PortOffset, MAC_REG_TFTCTL, TFTCTL_TBTTSYNCEN);
+ MACvRegBitsOn(priv->port_offset, MAC_REG_TFTCTL, TFTCTL_TBTTSYNCEN);
return true;
}
@@ -409,29 +409,29 @@ void CARDbRadioPowerOff(struct vnt_private *priv)
switch (priv->byRFType) {
case RF_RFMD2959:
- MACvWordRegBitsOff(priv->PortOffset, MAC_REG_SOFTPWRCTL,
+ MACvWordRegBitsOff(priv->port_offset, MAC_REG_SOFTPWRCTL,
SOFTPWRCTL_TXPEINV);
- MACvWordRegBitsOn(priv->PortOffset, MAC_REG_SOFTPWRCTL,
+ MACvWordRegBitsOn(priv->port_offset, MAC_REG_SOFTPWRCTL,
SOFTPWRCTL_SWPE1);
break;
case RF_AIROHA:
case RF_AL2230S:
case RF_AIROHA7230:
- MACvWordRegBitsOff(priv->PortOffset, MAC_REG_SOFTPWRCTL,
+ MACvWordRegBitsOff(priv->port_offset, MAC_REG_SOFTPWRCTL,
SOFTPWRCTL_SWPE2);
- MACvWordRegBitsOff(priv->PortOffset, MAC_REG_SOFTPWRCTL,
+ MACvWordRegBitsOff(priv->port_offset, MAC_REG_SOFTPWRCTL,
SOFTPWRCTL_SWPE3);
break;
}
- MACvRegBitsOff(priv->PortOffset, MAC_REG_HOSTCR, HOSTCR_RXON);
+ MACvRegBitsOff(priv->port_offset, MAC_REG_HOSTCR, HOSTCR_RXON);
- bb_set_deep_sleep(priv, priv->byLocalID);
+ bb_set_deep_sleep(priv, priv->local_id);
priv->bRadioOff = true;
pr_debug("chester power off\n");
- MACvRegBitsOn(priv->PortOffset, MAC_REG_GPIOCTL0,
+ MACvRegBitsOn(priv->port_offset, MAC_REG_GPIOCTL0,
LED_ACTSET); /* LED issue */
}
@@ -467,7 +467,7 @@ void CARDvSafeResetTx(struct vnt_private *priv)
MACvSetCurrTXDescAddr(TYPE_AC0DMA, priv, priv->td1_pool_dma);
/* set MAC Beacon TX pointer */
- MACvSetCurrBCNTxDescAddr(priv->PortOffset,
+ MACvSetCurrBCNTxDescAddr(priv->port_offset,
(priv->tx_beacon_dma));
}
@@ -509,8 +509,8 @@ void CARDvSafeResetRx(struct vnt_private *priv)
}
/* set perPkt mode */
- MACvRx0PerPktMode(priv->PortOffset);
- MACvRx1PerPktMode(priv->PortOffset);
+ MACvRx0PerPktMode(priv->port_offset);
+ MACvRx1PerPktMode(priv->port_offset);
/* set MAC RD pointer */
MACvSetCurrRx0DescAddr(priv, priv->rd0_pool_dma);
@@ -599,7 +599,7 @@ void CARDvSetRSPINF(struct vnt_private *priv, u8 bb_type)
spin_lock_irqsave(&priv->lock, flags);
/* Set to Page1 */
- MACvSelectPage1(priv->PortOffset);
+ MACvSelectPage1(priv->port_offset);
/* RSPINF_b_1 */
vnt_get_phy_field(priv, 14,
@@ -609,7 +609,7 @@ void CARDvSetRSPINF(struct vnt_private *priv, u8 bb_type)
/* swap over to get correct write order */
swap(phy.swap[0], phy.swap[1]);
- VNSvOutPortD(priv->PortOffset + MAC_REG_RSPINF_B_1, phy.field_write);
+ VNSvOutPortD(priv->port_offset + MAC_REG_RSPINF_B_1, phy.field_write);
/* RSPINF_b_2 */
vnt_get_phy_field(priv, 14,
@@ -618,7 +618,7 @@ void CARDvSetRSPINF(struct vnt_private *priv, u8 bb_type)
swap(phy.swap[0], phy.swap[1]);
- VNSvOutPortD(priv->PortOffset + MAC_REG_RSPINF_B_2, phy.field_write);
+ VNSvOutPortD(priv->port_offset + MAC_REG_RSPINF_B_2, phy.field_write);
/* RSPINF_b_5 */
vnt_get_phy_field(priv, 14,
@@ -627,7 +627,7 @@ void CARDvSetRSPINF(struct vnt_private *priv, u8 bb_type)
swap(phy.swap[0], phy.swap[1]);
- VNSvOutPortD(priv->PortOffset + MAC_REG_RSPINF_B_5, phy.field_write);
+ VNSvOutPortD(priv->port_offset + MAC_REG_RSPINF_B_5, phy.field_write);
/* RSPINF_b_11 */
vnt_get_phy_field(priv, 14,
@@ -636,42 +636,42 @@ void CARDvSetRSPINF(struct vnt_private *priv, u8 bb_type)
swap(phy.swap[0], phy.swap[1]);
- VNSvOutPortD(priv->PortOffset + MAC_REG_RSPINF_B_11, phy.field_write);
+ VNSvOutPortD(priv->port_offset + MAC_REG_RSPINF_B_11, phy.field_write);
/* RSPINF_a_6 */
s_vCalculateOFDMRParameter(RATE_6M,
bb_type,
&byTxRate,
&byRsvTime);
- VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_6,
+ VNSvOutPortW(priv->port_offset + MAC_REG_RSPINF_A_6,
MAKEWORD(byTxRate, byRsvTime));
/* RSPINF_a_9 */
s_vCalculateOFDMRParameter(RATE_9M,
bb_type,
&byTxRate,
&byRsvTime);
- VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_9,
+ VNSvOutPortW(priv->port_offset + MAC_REG_RSPINF_A_9,
MAKEWORD(byTxRate, byRsvTime));
/* RSPINF_a_12 */
s_vCalculateOFDMRParameter(RATE_12M,
bb_type,
&byTxRate,
&byRsvTime);
- VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_12,
+ VNSvOutPortW(priv->port_offset + MAC_REG_RSPINF_A_12,
MAKEWORD(byTxRate, byRsvTime));
/* RSPINF_a_18 */
s_vCalculateOFDMRParameter(RATE_18M,
bb_type,
&byTxRate,
&byRsvTime);
- VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_18,
+ VNSvOutPortW(priv->port_offset + MAC_REG_RSPINF_A_18,
MAKEWORD(byTxRate, byRsvTime));
/* RSPINF_a_24 */
s_vCalculateOFDMRParameter(RATE_24M,
bb_type,
&byTxRate,
&byRsvTime);
- VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_24,
+ VNSvOutPortW(priv->port_offset + MAC_REG_RSPINF_A_24,
MAKEWORD(byTxRate, byRsvTime));
/* RSPINF_a_36 */
s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate((void *)priv,
@@ -679,7 +679,7 @@ void CARDvSetRSPINF(struct vnt_private *priv, u8 bb_type)
bb_type,
&byTxRate,
&byRsvTime);
- VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_36,
+ VNSvOutPortW(priv->port_offset + MAC_REG_RSPINF_A_36,
MAKEWORD(byTxRate, byRsvTime));
/* RSPINF_a_48 */
s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate((void *)priv,
@@ -687,7 +687,7 @@ void CARDvSetRSPINF(struct vnt_private *priv, u8 bb_type)
bb_type,
&byTxRate,
&byRsvTime);
- VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_48,
+ VNSvOutPortW(priv->port_offset + MAC_REG_RSPINF_A_48,
MAKEWORD(byTxRate, byRsvTime));
/* RSPINF_a_54 */
s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate((void *)priv,
@@ -695,7 +695,7 @@ void CARDvSetRSPINF(struct vnt_private *priv, u8 bb_type)
bb_type,
&byTxRate,
&byRsvTime);
- VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_54,
+ VNSvOutPortW(priv->port_offset + MAC_REG_RSPINF_A_54,
MAKEWORD(byTxRate, byRsvTime));
/* RSPINF_a_72 */
s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate((void *)priv,
@@ -703,10 +703,10 @@ void CARDvSetRSPINF(struct vnt_private *priv, u8 bb_type)
bb_type,
&byTxRate,
&byRsvTime);
- VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_72,
+ VNSvOutPortW(priv->port_offset + MAC_REG_RSPINF_A_72,
MAKEWORD(byTxRate, byRsvTime));
/* Set to Page0 */
- MACvSelectPage0(priv->PortOffset);
+ MACvSelectPage0(priv->port_offset);
spin_unlock_irqrestore(&priv->lock, flags);
}
@@ -796,7 +796,7 @@ u64 CARDqGetTSFOffset(unsigned char byRxRate, u64 qwTSF1, u64 qwTSF2)
*/
bool CARDbGetCurrentTSF(struct vnt_private *priv, u64 *pqwCurrTSF)
{
- void __iomem *iobase = priv->PortOffset;
+ void __iomem *iobase = priv->port_offset;
unsigned short ww;
unsigned char byData;
@@ -857,7 +857,7 @@ u64 CARDqGetNextTBTT(u64 qwTSF, unsigned short wBeaconInterval)
void CARDvSetFirstNextTBTT(struct vnt_private *priv,
unsigned short wBeaconInterval)
{
- void __iomem *iobase = priv->PortOffset;
+ void __iomem *iobase = priv->port_offset;
u64 qwNextTBTT = 0;
CARDbGetCurrentTSF(priv, &qwNextTBTT); /* Get Local TSF counter */
@@ -886,7 +886,7 @@ void CARDvSetFirstNextTBTT(struct vnt_private *priv,
void CARDvUpdateNextTBTT(struct vnt_private *priv, u64 qwTSF,
unsigned short wBeaconInterval)
{
- void __iomem *iobase = priv->PortOffset;
+ void __iomem *iobase = priv->port_offset;
qwTSF = CARDqGetNextTBTT(qwTSF, wBeaconInterval);
/* Set NextTBTT */
diff --git a/drivers/staging/vt6655/channel.c b/drivers/staging/vt6655/channel.c
index 52b6538a201a..b550a1a0844e 100644
--- a/drivers/staging/vt6655/channel.c
+++ b/drivers/staging/vt6655/channel.c
@@ -173,7 +173,7 @@ bool set_channel(struct vnt_private *priv, struct ieee80211_channel *ch)
}
/* clear NAV */
- MACvRegBitsOn(priv->PortOffset, MAC_REG_MACCR, MACCR_CLRNAV);
+ MACvRegBitsOn(priv->port_offset, MAC_REG_MACCR, MACCR_CLRNAV);
/* TX_PE will reserve 3 us for MAX2829 A mode only,
* it is for better TX throughput
@@ -193,20 +193,20 @@ bool set_channel(struct vnt_private *priv, struct ieee80211_channel *ch)
bb_software_reset(priv);
- if (priv->byLocalID > REV_ID_VT3253_B1) {
+ if (priv->local_id > REV_ID_VT3253_B1) {
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
/* set HW default power register */
- MACvSelectPage1(priv->PortOffset);
+ MACvSelectPage1(priv->port_offset);
RFbSetPower(priv, RATE_1M, priv->byCurrentCh);
- VNSvOutPortB(priv->PortOffset + MAC_REG_PWRCCK,
+ VNSvOutPortB(priv->port_offset + MAC_REG_PWRCCK,
priv->byCurPwr);
RFbSetPower(priv, RATE_6M, priv->byCurrentCh);
- VNSvOutPortB(priv->PortOffset + MAC_REG_PWROFDM,
+ VNSvOutPortB(priv->port_offset + MAC_REG_PWROFDM,
priv->byCurPwr);
- MACvSelectPage0(priv->PortOffset);
+ MACvSelectPage0(priv->port_offset);
spin_unlock_irqrestore(&priv->lock, flags);
}
diff --git a/drivers/staging/vt6655/device.h b/drivers/staging/vt6655/device.h
index 2af769174e33..4706bde1ec1d 100644
--- a/drivers/staging/vt6655/device.h
+++ b/drivers/staging/vt6655/device.h
@@ -124,7 +124,7 @@ struct vnt_private {
unsigned char *tx1_bufs;
unsigned char *tx_beacon_bufs;
- void __iomem *PortOffset;
+ void __iomem *port_offset;
u32 memaddr;
u32 ioaddr;
@@ -154,7 +154,7 @@ struct vnt_private {
u32 rx_bytes;
/* Version control */
- unsigned char byLocalID;
+ unsigned char local_id;
unsigned char byRFType;
unsigned char byMaxPwrLevel;
@@ -203,7 +203,7 @@ struct vnt_private {
unsigned char byMinChannel;
unsigned char byMaxChannel;
- unsigned char byPreambleType;
+ unsigned char preamble_type;
unsigned char byShortPreamble;
unsigned short wCurrentRate;
@@ -215,7 +215,7 @@ struct vnt_private {
bool bEncryptionEnable;
bool bLongHeader;
- bool bShortSlotTime;
+ bool short_slot_time;
bool bProtectMode;
bool bNonERPPresent;
bool bBarkerPreambleMd;
@@ -253,7 +253,7 @@ struct vnt_private {
unsigned char byBBVGANew;
unsigned char byBBVGACurrent;
unsigned char abyBBVGA[BB_VGA_LEVEL];
- long ldBmThreshold[BB_VGA_LEVEL];
+ long dbm_threshold[BB_VGA_LEVEL];
unsigned char byBBPreEDRSSI;
unsigned char byBBPreEDIndex;
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index d40c2ac14928..212d2a287b2c 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -177,11 +177,11 @@ device_set_options(struct vnt_private *priv)
priv->byPacketType = priv->byBBType;
priv->byAutoFBCtrl = AUTO_FB_0;
priv->bUpdateBBVGA = true;
- priv->byPreambleType = 0;
+ priv->preamble_type = 0;
pr_debug(" byShortRetryLimit= %d\n", (int)priv->byShortRetryLimit);
pr_debug(" byLongRetryLimit= %d\n", (int)priv->byLongRetryLimit);
- pr_debug(" byPreambleType= %d\n", (int)priv->byPreambleType);
+ pr_debug(" preamble_type= %d\n", (int)priv->preamble_type);
pr_debug(" byShortPreamble= %d\n", (int)priv->byShortPreamble);
pr_debug(" byBBType= %d\n", (int)priv->byBBType);
}
@@ -219,11 +219,11 @@ static void device_init_registers(struct vnt_private *priv)
MACvInitialize(priv);
/* Get Local ID */
- VNSvInPortB(priv->PortOffset + MAC_REG_LOCALID, &priv->byLocalID);
+ VNSvInPortB(priv->port_offset + MAC_REG_LOCALID, &priv->local_id);
spin_lock_irqsave(&priv->lock, flags);
- SROMvReadAllContents(priv->PortOffset, priv->abyEEPROM);
+ SROMvReadAllContents(priv->port_offset, priv->abyEEPROM);
spin_unlock_irqrestore(&priv->lock, flags);
@@ -232,7 +232,7 @@ static void device_init_registers(struct vnt_private *priv)
priv->byMaxChannel = CB_MAX_CHANNEL;
/* Get Antena */
- byValue = SROMbyReadEmbedded(priv->PortOffset, EEP_OFS_ANTENNA);
+ byValue = SROMbyReadEmbedded(priv->port_offset, EEP_OFS_ANTENNA);
if (byValue & EEP_ANTINV)
priv->bTxRxAntInv = true;
else
@@ -292,20 +292,20 @@ static void device_init_registers(struct vnt_private *priv)
/* Get Desire Power Value */
priv->byCurPwr = 0xFF;
- priv->byCCKPwr = SROMbyReadEmbedded(priv->PortOffset, EEP_OFS_PWR_CCK);
- priv->byOFDMPwrG = SROMbyReadEmbedded(priv->PortOffset,
+ priv->byCCKPwr = SROMbyReadEmbedded(priv->port_offset, EEP_OFS_PWR_CCK);
+ priv->byOFDMPwrG = SROMbyReadEmbedded(priv->port_offset,
EEP_OFS_PWR_OFDMG);
/* Load power Table */
for (ii = 0; ii < CB_MAX_CHANNEL_24G; ii++) {
priv->abyCCKPwrTbl[ii + 1] =
- SROMbyReadEmbedded(priv->PortOffset,
+ SROMbyReadEmbedded(priv->port_offset,
(unsigned char)(ii + EEP_OFS_CCK_PWR_TBL));
if (priv->abyCCKPwrTbl[ii + 1] == 0)
priv->abyCCKPwrTbl[ii + 1] = priv->byCCKPwr;
priv->abyOFDMPwrTbl[ii + 1] =
- SROMbyReadEmbedded(priv->PortOffset,
+ SROMbyReadEmbedded(priv->port_offset,
(unsigned char)(ii + EEP_OFS_OFDM_PWR_TBL));
if (priv->abyOFDMPwrTbl[ii + 1] == 0)
priv->abyOFDMPwrTbl[ii + 1] = priv->byOFDMPwrG;
@@ -323,25 +323,25 @@ static void device_init_registers(struct vnt_private *priv)
/* Load OFDM A Power Table */
for (ii = 0; ii < CB_MAX_CHANNEL_5G; ii++) {
priv->abyOFDMPwrTbl[ii + CB_MAX_CHANNEL_24G + 1] =
- SROMbyReadEmbedded(priv->PortOffset,
+ SROMbyReadEmbedded(priv->port_offset,
(unsigned char)(ii + EEP_OFS_OFDMA_PWR_TBL));
priv->abyOFDMDefaultPwr[ii + CB_MAX_CHANNEL_24G + 1] =
- SROMbyReadEmbedded(priv->PortOffset,
+ SROMbyReadEmbedded(priv->port_offset,
(unsigned char)(ii + EEP_OFS_OFDMA_PWR_dBm));
}
- if (priv->byLocalID > REV_ID_VT3253_B1) {
- MACvSelectPage1(priv->PortOffset);
+ if (priv->local_id > REV_ID_VT3253_B1) {
+ MACvSelectPage1(priv->port_offset);
- VNSvOutPortB(priv->PortOffset + MAC_REG_MSRCTL + 1,
+ VNSvOutPortB(priv->port_offset + MAC_REG_MSRCTL + 1,
(MSRCTL1_TXPWR | MSRCTL1_CSAPAREN));
- MACvSelectPage0(priv->PortOffset);
+ MACvSelectPage0(priv->port_offset);
}
/* use relative tx timeout and 802.11i D4 */
- MACvWordRegBitsOn(priv->PortOffset,
+ MACvWordRegBitsOn(priv->port_offset,
MAC_REG_CFG, (CFG_TKIPOPT | CFG_NOTXTIMEOUT));
/* set performance parameter by registry */
@@ -349,9 +349,9 @@ static void device_init_registers(struct vnt_private *priv)
MACvSetLongRetryLimit(priv, priv->byLongRetryLimit);
/* reset TSF counter */
- VNSvOutPortB(priv->PortOffset + MAC_REG_TFTCTL, TFTCTL_TSFCNTRST);
+ VNSvOutPortB(priv->port_offset + MAC_REG_TFTCTL, TFTCTL_TSFCNTRST);
/* enable TSF counter */
- VNSvOutPortB(priv->PortOffset + MAC_REG_TFTCTL, TFTCTL_TSFCNTREN);
+ VNSvOutPortB(priv->port_offset + MAC_REG_TFTCTL, TFTCTL_TSFCNTREN);
/* initialize BBP registers */
bb_vt3253_init(priv);
@@ -371,13 +371,13 @@ static void device_init_registers(struct vnt_private *priv)
priv->bRadioOff = false;
- priv->byRadioCtl = SROMbyReadEmbedded(priv->PortOffset,
+ priv->byRadioCtl = SROMbyReadEmbedded(priv->port_offset,
EEP_OFS_RADIOCTL);
priv->bHWRadioOff = false;
if (priv->byRadioCtl & EEP_RADIOCTL_ENABLE) {
/* Get GPIO */
- MACvGPIOIn(priv->PortOffset, &priv->byGPIO);
+ MACvGPIOIn(priv->port_offset, &priv->byGPIO);
if (((priv->byGPIO & GPIO0_DATA) &&
!(priv->byRadioCtl & EEP_RADIOCTL_INV)) ||
@@ -390,7 +390,7 @@ static void device_init_registers(struct vnt_private *priv)
CARDbRadioPowerOff(priv);
/* get Permanent network address */
- SROMvReadEtherAddress(priv->PortOffset, priv->abyCurrentNetAddr);
+ SROMvReadEtherAddress(priv->port_offset, priv->abyCurrentNetAddr);
pr_debug("Network address = %pM\n", priv->abyCurrentNetAddr);
/* reset Tx pointer */
@@ -398,22 +398,22 @@ static void device_init_registers(struct vnt_private *priv)
/* reset Rx pointer */
CARDvSafeResetTx(priv);
- if (priv->byLocalID <= REV_ID_VT3253_A1)
- MACvRegBitsOn(priv->PortOffset, MAC_REG_RCR, RCR_WPAERR);
+ if (priv->local_id <= REV_ID_VT3253_A1)
+ MACvRegBitsOn(priv->port_offset, MAC_REG_RCR, RCR_WPAERR);
/* Turn On Rx DMA */
- MACvReceive0(priv->PortOffset);
- MACvReceive1(priv->PortOffset);
+ MACvReceive0(priv->port_offset);
+ MACvReceive1(priv->port_offset);
/* start the adapter */
- MACvStart(priv->PortOffset);
+ MACvStart(priv->port_offset);
}
static void device_print_info(struct vnt_private *priv)
{
dev_info(&priv->pcid->dev, "MAC=%pM IO=0x%lx Mem=0x%lx IRQ=%d\n",
priv->abyCurrentNetAddr, (unsigned long)priv->ioaddr,
- (unsigned long)priv->PortOffset, priv->pcid->irq);
+ (unsigned long)priv->port_offset, priv->pcid->irq);
}
static void device_free_info(struct vnt_private *priv)
@@ -424,8 +424,8 @@ static void device_free_info(struct vnt_private *priv)
if (priv->mac_hw)
ieee80211_unregister_hw(priv->hw);
- if (priv->PortOffset)
- iounmap(priv->PortOffset);
+ if (priv->port_offset)
+ iounmap(priv->port_offset);
if (priv->pcid)
pci_release_regions(priv->pcid);
@@ -986,7 +986,7 @@ static void vnt_check_bb_vga(struct vnt_private *priv)
RFvRSSITodBm(priv, (u8)priv->uCurrRSSI, &dbm);
for (i = 0; i < BB_VGA_LEVEL; i++) {
- if (dbm < priv->ldBmThreshold[i]) {
+ if (dbm < priv->dbm_threshold[i]) {
priv->byBBVGANew = priv->abyBBVGA[i];
break;
}
@@ -1029,7 +1029,7 @@ static void vnt_interrupt_process(struct vnt_private *priv)
u32 isr;
unsigned long flags;
- MACvReadISR(priv->PortOffset, &isr);
+ MACvReadISR(priv->port_offset, &isr);
if (isr == 0)
return;
@@ -1042,7 +1042,7 @@ static void vnt_interrupt_process(struct vnt_private *priv)
spin_lock_irqsave(&priv->lock, flags);
/* Read low level stats */
- MACvReadMIBCounter(priv->PortOffset, &mib_counter);
+ MACvReadMIBCounter(priv->port_offset, &mib_counter);
low_stats->dot11RTSSuccessCount += mib_counter & 0xff;
low_stats->dot11RTSFailureCount += (mib_counter >> 8) & 0xff;
@@ -1056,12 +1056,12 @@ static void vnt_interrupt_process(struct vnt_private *priv)
* update ISR counter
*/
while (isr && priv->vif) {
- MACvWriteISR(priv->PortOffset, isr);
+ MACvWriteISR(priv->port_offset, isr);
if (isr & ISR_FETALERR) {
pr_debug(" ISR_FETALERR\n");
- VNSvOutPortB(priv->PortOffset + MAC_REG_SOFTPWRCTL, 0);
- VNSvOutPortW(priv->PortOffset +
+ VNSvOutPortB(priv->port_offset + MAC_REG_SOFTPWRCTL, 0);
+ VNSvOutPortW(priv->port_offset +
MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPECTI);
device_error(priv, isr);
}
@@ -1116,10 +1116,10 @@ static void vnt_interrupt_process(struct vnt_private *priv)
ieee80211_queue_stopped(priv->hw, 0))
ieee80211_wake_queues(priv->hw);
- MACvReadISR(priv->PortOffset, &isr);
+ MACvReadISR(priv->port_offset, &isr);
- MACvReceive0(priv->PortOffset);
- MACvReceive1(priv->PortOffset);
+ MACvReceive0(priv->port_offset);
+ MACvReceive1(priv->port_offset);
if (max_count > priv->opts.int_works)
break;
@@ -1136,7 +1136,7 @@ static void vnt_interrupt_work(struct work_struct *work)
if (priv->vif)
vnt_interrupt_process(priv);
- MACvIntEnable(priv->PortOffset, IMR_MASK_VALUE);
+ MACvIntEnable(priv->port_offset, IMR_MASK_VALUE);
}
static irqreturn_t vnt_interrupt(int irq, void *arg)
@@ -1145,7 +1145,7 @@ static irqreturn_t vnt_interrupt(int irq, void *arg)
schedule_work(&priv->interrupt_work);
- MACvIntDisable(priv->PortOffset);
+ MACvIntDisable(priv->port_offset);
return IRQ_HANDLED;
}
@@ -1201,9 +1201,9 @@ static int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
wmb(); /* second memory barrier */
if (head_td->td_info->flags & TD_FLAGS_NETIF_SKB)
- MACvTransmitAC0(priv->PortOffset);
+ MACvTransmitAC0(priv->port_offset);
else
- MACvTransmit0(priv->PortOffset);
+ MACvTransmit0(priv->port_offset);
priv->iTDUsed[dma_idx]++;
@@ -1255,7 +1255,7 @@ static int vnt_start(struct ieee80211_hw *hw)
device_init_registers(priv);
dev_dbg(&priv->pcid->dev, "call MACvIntEnable\n");
- MACvIntEnable(priv->PortOffset, IMR_MASK_VALUE);
+ MACvIntEnable(priv->port_offset, IMR_MASK_VALUE);
ieee80211_wake_queues(hw);
@@ -1305,15 +1305,15 @@ static int vnt_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
case NL80211_IFTYPE_STATION:
break;
case NL80211_IFTYPE_ADHOC:
- MACvRegBitsOff(priv->PortOffset, MAC_REG_RCR, RCR_UNICAST);
+ MACvRegBitsOff(priv->port_offset, MAC_REG_RCR, RCR_UNICAST);
- MACvRegBitsOn(priv->PortOffset, MAC_REG_HOSTCR, HOSTCR_ADHOC);
+ MACvRegBitsOn(priv->port_offset, MAC_REG_HOSTCR, HOSTCR_ADHOC);
break;
case NL80211_IFTYPE_AP:
- MACvRegBitsOff(priv->PortOffset, MAC_REG_RCR, RCR_UNICAST);
+ MACvRegBitsOff(priv->port_offset, MAC_REG_RCR, RCR_UNICAST);
- MACvRegBitsOn(priv->PortOffset, MAC_REG_HOSTCR, HOSTCR_AP);
+ MACvRegBitsOn(priv->port_offset, MAC_REG_HOSTCR, HOSTCR_AP);
break;
default:
@@ -1334,16 +1334,16 @@ static void vnt_remove_interface(struct ieee80211_hw *hw,
case NL80211_IFTYPE_STATION:
break;
case NL80211_IFTYPE_ADHOC:
- MACvRegBitsOff(priv->PortOffset, MAC_REG_TCR, TCR_AUTOBCNTX);
- MACvRegBitsOff(priv->PortOffset,
+ MACvRegBitsOff(priv->port_offset, MAC_REG_TCR, TCR_AUTOBCNTX);
+ MACvRegBitsOff(priv->port_offset,
MAC_REG_TFTCTL, TFTCTL_TSFCNTREN);
- MACvRegBitsOff(priv->PortOffset, MAC_REG_HOSTCR, HOSTCR_ADHOC);
+ MACvRegBitsOff(priv->port_offset, MAC_REG_HOSTCR, HOSTCR_ADHOC);
break;
case NL80211_IFTYPE_AP:
- MACvRegBitsOff(priv->PortOffset, MAC_REG_TCR, TCR_AUTOBCNTX);
- MACvRegBitsOff(priv->PortOffset,
+ MACvRegBitsOff(priv->port_offset, MAC_REG_TCR, TCR_AUTOBCNTX);
+ MACvRegBitsOff(priv->port_offset,
MAC_REG_TFTCTL, TFTCTL_TSFCNTREN);
- MACvRegBitsOff(priv->PortOffset, MAC_REG_HOSTCR, HOSTCR_AP);
+ MACvRegBitsOff(priv->port_offset, MAC_REG_HOSTCR, HOSTCR_AP);
break;
default:
break;
@@ -1407,7 +1407,7 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
spin_lock_irqsave(&priv->lock, flags);
- MACvWriteBSSIDAddress(priv->PortOffset, (u8 *)conf->bssid);
+ MACvWriteBSSIDAddress(priv->port_offset, (u8 *)conf->bssid);
spin_unlock_irqrestore(&priv->lock, flags);
}
@@ -1423,26 +1423,26 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_ERP_PREAMBLE) {
if (conf->use_short_preamble) {
- MACvEnableBarkerPreambleMd(priv->PortOffset);
- priv->byPreambleType = true;
+ MACvEnableBarkerPreambleMd(priv->port_offset);
+ priv->preamble_type = true;
} else {
- MACvDisableBarkerPreambleMd(priv->PortOffset);
- priv->byPreambleType = false;
+ MACvDisableBarkerPreambleMd(priv->port_offset);
+ priv->preamble_type = false;
}
}
if (changed & BSS_CHANGED_ERP_CTS_PROT) {
if (conf->use_cts_prot)
- MACvEnableProtectMD(priv->PortOffset);
+ MACvEnableProtectMD(priv->port_offset);
else
- MACvDisableProtectMD(priv->PortOffset);
+ MACvDisableProtectMD(priv->port_offset);
}
if (changed & BSS_CHANGED_ERP_SLOT) {
if (conf->use_short_slot)
- priv->bShortSlotTime = true;
+ priv->short_slot_time = true;
else
- priv->bShortSlotTime = false;
+ priv->short_slot_time = false;
CARDbSetPhyParameter(priv, priv->byBBType);
bb_set_vga_gain_offset(priv, priv->abyBBVGA[0]);
@@ -1459,10 +1459,10 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
if (conf->enable_beacon) {
vnt_beacon_enable(priv, vif, conf);
- MACvRegBitsOn(priv->PortOffset, MAC_REG_TCR,
+ MACvRegBitsOn(priv->port_offset, MAC_REG_TCR,
TCR_AUTOBCNTX);
} else {
- MACvRegBitsOff(priv->PortOffset, MAC_REG_TCR,
+ MACvRegBitsOff(priv->port_offset, MAC_REG_TCR,
TCR_AUTOBCNTX);
}
}
@@ -1477,9 +1477,9 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
CARDvSetFirstNextTBTT(priv, conf->beacon_int);
} else {
- VNSvOutPortB(priv->PortOffset + MAC_REG_TFTCTL,
+ VNSvOutPortB(priv->port_offset + MAC_REG_TFTCTL,
TFTCTL_TSFCNTRST);
- VNSvOutPortB(priv->PortOffset + MAC_REG_TFTCTL,
+ VNSvOutPortB(priv->port_offset + MAC_REG_TFTCTL,
TFTCTL_TSFCNTREN);
}
}
@@ -1513,7 +1513,7 @@ static void vnt_configure(struct ieee80211_hw *hw,
*total_flags &= FIF_ALLMULTI | FIF_OTHER_BSS | FIF_BCN_PRBRESP_PROMISC;
- VNSvInPortB(priv->PortOffset + MAC_REG_RCR, &rx_mode);
+ VNSvInPortB(priv->port_offset + MAC_REG_RCR, &rx_mode);
dev_dbg(&priv->pcid->dev, "rx mode in = %x\n", rx_mode);
@@ -1524,24 +1524,24 @@ static void vnt_configure(struct ieee80211_hw *hw,
spin_lock_irqsave(&priv->lock, flags);
if (priv->mc_list_count > 2) {
- MACvSelectPage1(priv->PortOffset);
+ MACvSelectPage1(priv->port_offset);
- VNSvOutPortD(priv->PortOffset +
+ VNSvOutPortD(priv->port_offset +
MAC_REG_MAR0, 0xffffffff);
- VNSvOutPortD(priv->PortOffset +
+ VNSvOutPortD(priv->port_offset +
MAC_REG_MAR0 + 4, 0xffffffff);
- MACvSelectPage0(priv->PortOffset);
+ MACvSelectPage0(priv->port_offset);
} else {
- MACvSelectPage1(priv->PortOffset);
+ MACvSelectPage1(priv->port_offset);
- VNSvOutPortD(priv->PortOffset +
+ VNSvOutPortD(priv->port_offset +
MAC_REG_MAR0, (u32)multicast);
- VNSvOutPortD(priv->PortOffset +
+ VNSvOutPortD(priv->port_offset +
MAC_REG_MAR0 + 4,
(u32)(multicast >> 32));
- MACvSelectPage0(priv->PortOffset);
+ MACvSelectPage0(priv->port_offset);
}
spin_unlock_irqrestore(&priv->lock, flags);
@@ -1561,7 +1561,7 @@ static void vnt_configure(struct ieee80211_hw *hw,
rx_mode |= RCR_BSSID;
}
- VNSvOutPortB(priv->PortOffset + MAC_REG_RCR, rx_mode);
+ VNSvOutPortB(priv->port_offset + MAC_REG_RCR, rx_mode);
dev_dbg(&priv->pcid->dev, "rx mode out= %x\n", rx_mode);
}
@@ -1621,7 +1621,7 @@ static void vnt_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
struct vnt_private *priv = hw->priv;
/* reset TSF counter */
- VNSvOutPortB(priv->PortOffset + MAC_REG_TFTCTL, TFTCTL_TSFCNTRST);
+ VNSvOutPortB(priv->port_offset + MAC_REG_TFTCTL, TFTCTL_TSFCNTRST);
}
static const struct ieee80211_ops vnt_mac_ops = {
@@ -1698,9 +1698,9 @@ vt6655_probe(struct pci_dev *pcid, const struct pci_device_id *ent)
priv->memaddr = pci_resource_start(pcid, 0);
priv->ioaddr = pci_resource_start(pcid, 1);
- priv->PortOffset = ioremap(priv->memaddr & PCI_BASE_ADDRESS_MEM_MASK,
+ priv->port_offset = ioremap(priv->memaddr & PCI_BASE_ADDRESS_MEM_MASK,
256);
- if (!priv->PortOffset) {
+ if (!priv->port_offset) {
dev_err(&pcid->dev, ": Failed to IO remapping ..\n");
device_free_info(priv);
return -ENODEV;
@@ -1729,10 +1729,10 @@ vt6655_probe(struct pci_dev *pcid, const struct pci_device_id *ent)
}
/* initial to reload eeprom */
MACvInitialize(priv);
- MACvReadEtherAddress(priv->PortOffset, priv->abyCurrentNetAddr);
+ MACvReadEtherAddress(priv->port_offset, priv->abyCurrentNetAddr);
/* Get RFType */
- priv->byRFType = SROMbyReadEmbedded(priv->PortOffset, EEP_OFS_RFTYPE);
+ priv->byRFType = SROMbyReadEmbedded(priv->port_offset, EEP_OFS_RFTYPE);
priv->byRFType &= RF_MASK;
dev_dbg(&pcid->dev, "RF Type = %x\n", priv->byRFType);
diff --git a/drivers/staging/vt6655/dpc.c b/drivers/staging/vt6655/dpc.c
index 2d06cecc0307..a7d1d35de5d4 100644
--- a/drivers/staging/vt6655/dpc.c
+++ b/drivers/staging/vt6655/dpc.c
@@ -100,7 +100,7 @@ static bool vnt_rx_data(struct vnt_private *priv, struct sk_buff *skb,
rx_status.rate_idx = rate_idx;
if (ieee80211_has_protected(fc)) {
- if (priv->byLocalID > REV_ID_VT3253_A1)
+ if (priv->local_id > REV_ID_VT3253_A1)
rx_status.flag |= RX_FLAG_DECRYPTED;
/* Drop packet */
diff --git a/drivers/staging/vt6655/key.c b/drivers/staging/vt6655/key.c
index 20881cf2f394..f843966a3ea4 100644
--- a/drivers/staging/vt6655/key.c
+++ b/drivers/staging/vt6655/key.c
@@ -81,7 +81,7 @@ static int vnt_set_keymode(struct ieee80211_hw *hw, u8 *mac_addr,
}
MACvSetKeyEntry(priv, key_mode, entry, key_inx,
- bssid, (u32 *)key->key, priv->byLocalID);
+ bssid, (u32 *)key->key, priv->local_id);
return 0;
}
diff --git a/drivers/staging/vt6655/mac.c b/drivers/staging/vt6655/mac.c
index 9721c2234bf2..80cced7dfda8 100644
--- a/drivers/staging/vt6655/mac.c
+++ b/drivers/staging/vt6655/mac.c
@@ -57,7 +57,7 @@
bool MACbIsRegBitsOff(struct vnt_private *priv, unsigned char byRegOfs,
unsigned char byTestBits)
{
- void __iomem *io_base = priv->PortOffset;
+ void __iomem *io_base = priv->port_offset;
return !(ioread8(io_base + byRegOfs) & byTestBits);
}
@@ -77,7 +77,7 @@ bool MACbIsRegBitsOff(struct vnt_private *priv, unsigned char byRegOfs,
*/
bool MACbIsIntDisable(struct vnt_private *priv)
{
- void __iomem *io_base = priv->PortOffset;
+ void __iomem *io_base = priv->port_offset;
if (ioread32(io_base + MAC_REG_IMR))
return false;
@@ -102,7 +102,7 @@ bool MACbIsIntDisable(struct vnt_private *priv)
void MACvSetShortRetryLimit(struct vnt_private *priv,
unsigned char byRetryLimit)
{
- void __iomem *io_base = priv->PortOffset;
+ void __iomem *io_base = priv->port_offset;
/* set SRT */
iowrite8(byRetryLimit, io_base + MAC_REG_SRT);
}
@@ -124,7 +124,7 @@ void MACvSetShortRetryLimit(struct vnt_private *priv,
void MACvSetLongRetryLimit(struct vnt_private *priv,
unsigned char byRetryLimit)
{
- void __iomem *io_base = priv->PortOffset;
+ void __iomem *io_base = priv->port_offset;
/* set LRT */
iowrite8(byRetryLimit, io_base + MAC_REG_LRT);
}
@@ -145,7 +145,7 @@ void MACvSetLongRetryLimit(struct vnt_private *priv,
*/
void MACvSetLoopbackMode(struct vnt_private *priv, unsigned char byLoopbackMode)
{
- void __iomem *io_base = priv->PortOffset;
+ void __iomem *io_base = priv->port_offset;
byLoopbackMode <<= 6;
/* set TCR */
@@ -168,7 +168,7 @@ void MACvSetLoopbackMode(struct vnt_private *priv, unsigned char byLoopbackMode)
*/
void MACvSaveContext(struct vnt_private *priv, unsigned char *cxt_buf)
{
- void __iomem *io_base = priv->PortOffset;
+ void __iomem *io_base = priv->port_offset;
/* read page0 register */
memcpy_fromio(cxt_buf, io_base, MAC_MAX_CONTEXT_SIZE_PAGE0);
@@ -198,7 +198,7 @@ void MACvSaveContext(struct vnt_private *priv, unsigned char *cxt_buf)
*/
void MACvRestoreContext(struct vnt_private *priv, unsigned char *cxt_buf)
{
- void __iomem *io_base = priv->PortOffset;
+ void __iomem *io_base = priv->port_offset;
MACvSelectPage1(io_base);
/* restore page1 */
@@ -249,7 +249,7 @@ void MACvRestoreContext(struct vnt_private *priv, unsigned char *cxt_buf)
*/
bool MACbSoftwareReset(struct vnt_private *priv)
{
- void __iomem *io_base = priv->PortOffset;
+ void __iomem *io_base = priv->port_offset;
unsigned short ww;
/* turn on HOSTCR_SOFTRST, just write 0x01 to reset */
@@ -312,7 +312,7 @@ bool MACbSafeSoftwareReset(struct vnt_private *priv)
*/
bool MACbSafeRxOff(struct vnt_private *priv)
{
- void __iomem *io_base = priv->PortOffset;
+ void __iomem *io_base = priv->port_offset;
unsigned short ww;
/* turn off wow temp for turn off Rx safely */
@@ -366,7 +366,7 @@ bool MACbSafeRxOff(struct vnt_private *priv)
*/
bool MACbSafeTxOff(struct vnt_private *priv)
{
- void __iomem *io_base = priv->PortOffset;
+ void __iomem *io_base = priv->port_offset;
unsigned short ww;
/* Clear TX DMA */
@@ -422,7 +422,7 @@ bool MACbSafeTxOff(struct vnt_private *priv)
*/
bool MACbSafeStop(struct vnt_private *priv)
{
- void __iomem *io_base = priv->PortOffset;
+ void __iomem *io_base = priv->port_offset;
MACvRegBitsOff(io_base, MAC_REG_TCR, TCR_AUTOBCNTX);
@@ -457,7 +457,7 @@ bool MACbSafeStop(struct vnt_private *priv)
*/
bool MACbShutdown(struct vnt_private *priv)
{
- void __iomem *io_base = priv->PortOffset;
+ void __iomem *io_base = priv->port_offset;
/* disable MAC IMR */
MACvIntDisable(io_base);
MACvSetLoopbackMode(priv, MAC_LB_INTERNAL);
@@ -485,7 +485,7 @@ bool MACbShutdown(struct vnt_private *priv)
*/
void MACvInitialize(struct vnt_private *priv)
{
- void __iomem *io_base = priv->PortOffset;
+ void __iomem *io_base = priv->port_offset;
/* clear sticky bits */
MACvClearStckDS(io_base);
/* disable force PME-enable */
@@ -517,7 +517,7 @@ void MACvInitialize(struct vnt_private *priv)
*/
void MACvSetCurrRx0DescAddr(struct vnt_private *priv, u32 curr_desc_addr)
{
- void __iomem *io_base = priv->PortOffset;
+ void __iomem *io_base = priv->port_offset;
unsigned short ww;
unsigned char org_dma_ctl;
@@ -551,7 +551,7 @@ void MACvSetCurrRx0DescAddr(struct vnt_private *priv, u32 curr_desc_addr)
*/
void MACvSetCurrRx1DescAddr(struct vnt_private *priv, u32 curr_desc_addr)
{
- void __iomem *io_base = priv->PortOffset;
+ void __iomem *io_base = priv->port_offset;
unsigned short ww;
unsigned char org_dma_ctl;
@@ -586,7 +586,7 @@ void MACvSetCurrRx1DescAddr(struct vnt_private *priv, u32 curr_desc_addr)
void MACvSetCurrTx0DescAddrEx(struct vnt_private *priv,
u32 curr_desc_addr)
{
- void __iomem *io_base = priv->PortOffset;
+ void __iomem *io_base = priv->port_offset;
unsigned short ww;
unsigned char org_dma_ctl;
@@ -622,7 +622,7 @@ void MACvSetCurrTx0DescAddrEx(struct vnt_private *priv,
void MACvSetCurrAC0DescAddrEx(struct vnt_private *priv,
u32 curr_desc_addr)
{
- void __iomem *io_base = priv->PortOffset;
+ void __iomem *io_base = priv->port_offset;
unsigned short ww;
unsigned char org_dma_ctl;
@@ -666,7 +666,7 @@ void MACvSetCurrTXDescAddr(int iTxType, struct vnt_private *priv,
*/
void MACvTimer0MicroSDelay(struct vnt_private *priv, unsigned int uDelay)
{
- void __iomem *io_base = priv->PortOffset;
+ void __iomem *io_base = priv->port_offset;
unsigned char byValue;
unsigned int uu, ii;
@@ -703,7 +703,7 @@ void MACvTimer0MicroSDelay(struct vnt_private *priv, unsigned int uDelay)
void MACvOneShotTimer1MicroSec(struct vnt_private *priv,
unsigned int uDelayTime)
{
- void __iomem *io_base = priv->PortOffset;
+ void __iomem *io_base = priv->port_offset;
iowrite8(0, io_base + MAC_REG_TMCTL1);
iowrite32(uDelayTime, io_base + MAC_REG_TMDATA1);
@@ -713,7 +713,7 @@ void MACvOneShotTimer1MicroSec(struct vnt_private *priv,
void MACvSetMISCFifo(struct vnt_private *priv, unsigned short offset,
u32 data)
{
- void __iomem *io_base = priv->PortOffset;
+ void __iomem *io_base = priv->port_offset;
if (offset > 273)
return;
@@ -724,7 +724,7 @@ void MACvSetMISCFifo(struct vnt_private *priv, unsigned short offset,
bool MACbPSWakeup(struct vnt_private *priv)
{
- void __iomem *io_base = priv->PortOffset;
+ void __iomem *io_base = priv->port_offset;
unsigned int ww;
/* Read PSCTL */
if (MACbIsRegBitsOff(priv, MAC_REG_PSCTL, PSCTL_PS))
@@ -763,14 +763,14 @@ bool MACbPSWakeup(struct vnt_private *priv)
void MACvSetKeyEntry(struct vnt_private *priv, unsigned short wKeyCtl,
unsigned int uEntryIdx, unsigned int uKeyIdx,
unsigned char *pbyAddr, u32 *pdwKey,
- unsigned char byLocalID)
+ unsigned char local_id)
{
- void __iomem *io_base = priv->PortOffset;
+ void __iomem *io_base = priv->port_offset;
unsigned short offset;
u32 data;
int ii;
- if (byLocalID <= 1)
+ if (local_id <= 1)
return;
offset = MISCFIFO_KEYETRY0;
@@ -830,7 +830,7 @@ void MACvSetKeyEntry(struct vnt_private *priv, unsigned short wKeyCtl,
*/
void MACvDisableKeyEntry(struct vnt_private *priv, unsigned int uEntryIdx)
{
- void __iomem *io_base = priv->PortOffset;
+ void __iomem *io_base = priv->port_offset;
unsigned short offset;
offset = MISCFIFO_KEYETRY0;
diff --git a/drivers/staging/vt6655/mac.h b/drivers/staging/vt6655/mac.h
index 9797eddaea01..550dc4da80a9 100644
--- a/drivers/staging/vt6655/mac.h
+++ b/drivers/staging/vt6655/mac.h
@@ -886,8 +886,8 @@ void MACvSetLongRetryLimit(struct vnt_private *priv, unsigned char byRetryLimit)
void MACvSetLoopbackMode(struct vnt_private *priv, unsigned char byLoopbackMode);
-void MACvSaveContext(struct vnt_private *priv, unsigned char *pbyCxtBuf);
-void MACvRestoreContext(struct vnt_private *priv, unsigned char *pbyCxtBuf);
+void MACvSaveContext(struct vnt_private *priv, unsigned char *cxt_buf);
+void MACvRestoreContext(struct vnt_private *priv, unsigned char *cxt_buf);
bool MACbSoftwareReset(struct vnt_private *priv);
bool MACbSafeSoftwareReset(struct vnt_private *priv);
@@ -921,7 +921,7 @@ bool MACbPSWakeup(struct vnt_private *priv);
void MACvSetKeyEntry(struct vnt_private *priv, unsigned short wKeyCtl,
unsigned int uEntryIdx, unsigned int uKeyIdx,
unsigned char *pbyAddr, u32 *pdwKey,
- unsigned char byLocalID);
+ unsigned char local_id);
void MACvDisableKeyEntry(struct vnt_private *priv, unsigned int uEntryIdx);
#endif /* __MAC_H__ */
diff --git a/drivers/staging/vt6655/power.c b/drivers/staging/vt6655/power.c
index aac021e983d1..06066fa56dd5 100644
--- a/drivers/staging/vt6655/power.c
+++ b/drivers/staging/vt6655/power.c
@@ -52,30 +52,30 @@ void PSvEnablePowerSaving(struct vnt_private *priv,
u16 wAID = priv->current_aid | BIT(14) | BIT(15);
/* set period of power up before TBTT */
- VNSvOutPortW(priv->PortOffset + MAC_REG_PWBT, C_PWBT);
+ VNSvOutPortW(priv->port_offset + MAC_REG_PWBT, C_PWBT);
if (priv->op_mode != NL80211_IFTYPE_ADHOC) {
/* set AID */
- VNSvOutPortW(priv->PortOffset + MAC_REG_AIDATIM, wAID);
+ VNSvOutPortW(priv->port_offset + MAC_REG_AIDATIM, wAID);
}
/* Set AutoSleep */
- MACvRegBitsOn(priv->PortOffset, MAC_REG_PSCFG, PSCFG_AUTOSLEEP);
+ MACvRegBitsOn(priv->port_offset, MAC_REG_PSCFG, PSCFG_AUTOSLEEP);
/* Set HWUTSF */
- MACvRegBitsOn(priv->PortOffset, MAC_REG_TFTCTL, TFTCTL_HWUTSF);
+ MACvRegBitsOn(priv->port_offset, MAC_REG_TFTCTL, TFTCTL_HWUTSF);
if (wListenInterval >= 2) {
/* clear always listen beacon */
- MACvRegBitsOff(priv->PortOffset, MAC_REG_PSCTL, PSCTL_ALBCN);
+ MACvRegBitsOff(priv->port_offset, MAC_REG_PSCTL, PSCTL_ALBCN);
/* first time set listen next beacon */
- MACvRegBitsOn(priv->PortOffset, MAC_REG_PSCTL, PSCTL_LNBCN);
+ MACvRegBitsOn(priv->port_offset, MAC_REG_PSCTL, PSCTL_LNBCN);
} else {
/* always listen beacon */
- MACvRegBitsOn(priv->PortOffset, MAC_REG_PSCTL, PSCTL_ALBCN);
+ MACvRegBitsOn(priv->port_offset, MAC_REG_PSCTL, PSCTL_ALBCN);
}
/* enable power saving hw function */
- MACvRegBitsOn(priv->PortOffset, MAC_REG_PSCTL, PSCTL_PSEN);
+ MACvRegBitsOn(priv->port_offset, MAC_REG_PSCTL, PSCTL_PSEN);
priv->bEnablePSMode = true;
priv->bPWBitOn = true;
@@ -98,13 +98,13 @@ void PSvDisablePowerSaving(struct vnt_private *priv)
MACbPSWakeup(priv);
/* clear AutoSleep */
- MACvRegBitsOff(priv->PortOffset, MAC_REG_PSCFG, PSCFG_AUTOSLEEP);
+ MACvRegBitsOff(priv->port_offset, MAC_REG_PSCFG, PSCFG_AUTOSLEEP);
/* clear HWUTSF */
- MACvRegBitsOff(priv->PortOffset, MAC_REG_TFTCTL, TFTCTL_HWUTSF);
+ MACvRegBitsOff(priv->port_offset, MAC_REG_TFTCTL, TFTCTL_HWUTSF);
/* set always listen beacon */
- MACvRegBitsOn(priv->PortOffset, MAC_REG_PSCTL, PSCTL_ALBCN);
+ MACvRegBitsOn(priv->port_offset, MAC_REG_PSCTL, PSCTL_ALBCN);
priv->bEnablePSMode = false;
@@ -135,7 +135,7 @@ bool PSbIsNextTBTTWakeUp(struct vnt_private *priv)
if (priv->wake_up_count == 1) {
/* Turn on wake up to listen next beacon */
- MACvRegBitsOn(priv->PortOffset,
+ MACvRegBitsOn(priv->port_offset,
MAC_REG_PSCTL, PSCTL_LNBCN);
wake_up = true;
}
diff --git a/drivers/staging/vt6655/rf.c b/drivers/staging/vt6655/rf.c
index 0dae593c6944..bc4abe77db7b 100644
--- a/drivers/staging/vt6655/rf.c
+++ b/drivers/staging/vt6655/rf.c
@@ -33,7 +33,7 @@
#define SWITCH_CHANNEL_DELAY_AL7230 200 /* us */
#define AL7230_PWR_IDX_LEN 64
-static const unsigned long dwAL2230InitTable[CB_AL2230_INIT_SEQ] = {
+static const unsigned long al2230_init_table[CB_AL2230_INIT_SEQ] = {
0x03F79000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x03333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x01A00200 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
@@ -51,7 +51,7 @@ static const unsigned long dwAL2230InitTable[CB_AL2230_INIT_SEQ] = {
0x00580F00 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW
};
-static const unsigned long dwAL2230ChannelTable0[CB_MAX_CHANNEL] = {
+static const unsigned long al2230_channel_table0[CB_MAX_CHANNEL] = {
0x03F79000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 1, Tf = 2412MHz */
0x03F79000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 2, Tf = 2417MHz */
0x03E79000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 3, Tf = 2422MHz */
@@ -68,7 +68,7 @@ static const unsigned long dwAL2230ChannelTable0[CB_MAX_CHANNEL] = {
0x03E7C000 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW /* channel = 14, Tf = 2412M */
};
-static const unsigned long dwAL2230ChannelTable1[CB_MAX_CHANNEL] = {
+static const unsigned long al2230_channel_table1[CB_MAX_CHANNEL] = {
0x03333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 1, Tf = 2412MHz */
0x0B333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 2, Tf = 2417MHz */
0x03333100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 3, Tf = 2422MHz */
@@ -85,7 +85,7 @@ static const unsigned long dwAL2230ChannelTable1[CB_MAX_CHANNEL] = {
0x06666100 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW /* channel = 14, Tf = 2412M */
};
-static unsigned long dwAL2230PowerTable[AL2230_PWR_IDX_LEN] = {
+static unsigned long al2230_power_table[AL2230_PWR_IDX_LEN] = {
0x04040900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x04041900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
0x04042900 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW,
@@ -155,7 +155,7 @@ static unsigned long dwAL2230PowerTable[AL2230_PWR_IDX_LEN] = {
/* 40MHz reference frequency
* Need to Pull PLLON(PE3) low when writing channel registers through 3-wire.
*/
-static const unsigned long dwAL7230InitTable[CB_AL7230_INIT_SEQ] = {
+static const unsigned long al7230_init_table[CB_AL7230_INIT_SEQ] = {
0x00379000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Channel1 // Need modify for 11a */
0x13333100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Channel1 // Need modify for 11a */
0x841FF200 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Need modify for 11a: 451FE2 */
@@ -176,7 +176,7 @@ static const unsigned long dwAL7230InitTable[CB_AL7230_INIT_SEQ] = {
0x1ABA8F00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW /* Need modify for 11a: 12BACF */
};
-static const unsigned long dwAL7230InitTableAMode[CB_AL7230_INIT_SEQ] = {
+static const unsigned long al7230_init_table_a_mode[CB_AL7230_INIT_SEQ] = {
0x0FF52000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Channel184 // Need modify for 11b/g */
0x00000100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Channel184 // Need modify for 11b/g */
0x451FE200 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* Need modify for 11b/g */
@@ -195,7 +195,7 @@ static const unsigned long dwAL7230InitTableAMode[CB_AL7230_INIT_SEQ] = {
0x12BACF00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW /* Need modify for 11b/g */
};
-static const unsigned long dwAL7230ChannelTable0[CB_MAX_CHANNEL] = {
+static const unsigned long al7230_channel_table0[CB_MAX_CHANNEL] = {
0x00379000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 1, Tf = 2412MHz */
0x00379000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 2, Tf = 2417MHz */
0x00379000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 3, Tf = 2422MHz */
@@ -262,7 +262,7 @@ static const unsigned long dwAL7230ChannelTable0[CB_MAX_CHANNEL] = {
0x0FF61000 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW /* channel = 165, Tf = 5825MHz (56) */
};
-static const unsigned long dwAL7230ChannelTable1[CB_MAX_CHANNEL] = {
+static const unsigned long al7230_channel_table1[CB_MAX_CHANNEL] = {
0x13333100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 1, Tf = 2412MHz */
0x1B333100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 2, Tf = 2417MHz */
0x03333100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 3, Tf = 2422MHz */
@@ -327,7 +327,7 @@ static const unsigned long dwAL7230ChannelTable1[CB_MAX_CHANNEL] = {
0x02AAA100 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW /* channel = 165, Tf = 5825MHz (56) */
};
-static const unsigned long dwAL7230ChannelTable2[CB_MAX_CHANNEL] = {
+static const unsigned long al7230_channel_table2[CB_MAX_CHANNEL] = {
0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 1, Tf = 2412MHz */
0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 2, Tf = 2417MHz */
0x7FD78400 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW, /* channel = 3, Tf = 2422MHz */
@@ -406,7 +406,7 @@ static const unsigned long dwAL7230ChannelTable2[CB_MAX_CHANNEL] = {
*/
static bool s_bAL7230Init(struct vnt_private *priv)
{
- void __iomem *iobase = priv->PortOffset;
+ void __iomem *iobase = priv->port_offset;
int ii;
bool ret;
@@ -420,7 +420,7 @@ static bool s_bAL7230Init(struct vnt_private *priv)
bb_power_save_mode_off(priv); /* RobertYu:20050106, have DC value for Calibration */
for (ii = 0; ii < CB_AL7230_INIT_SEQ; ii++)
- ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[ii]);
+ ret &= IFRFbWriteEmbedded(priv, al7230_init_table[ii]);
/* PLL On */
MACvWordRegBitsOn(iobase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3);
@@ -434,7 +434,7 @@ static bool s_bAL7230Init(struct vnt_private *priv)
ret &= IFRFbWriteEmbedded(priv, (0x3ABA8F00 + (BY_AL7230_REG_LEN << 3) + IFREGCTL_REGW));
MACvTimer0MicroSDelay(priv, 30);/* 30us */
/* TXDCOC:disable, RCK:disable */
- ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[CB_AL7230_INIT_SEQ - 1]);
+ ret &= IFRFbWriteEmbedded(priv, al7230_init_table[CB_AL7230_INIT_SEQ - 1]);
MACvWordRegBitsOn(iobase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPE3 |
SOFTPWRCTL_SWPE2 |
@@ -455,7 +455,7 @@ static bool s_bAL7230Init(struct vnt_private *priv)
*/
static bool s_bAL7230SelectChannel(struct vnt_private *priv, unsigned char byChannel)
{
- void __iomem *iobase = priv->PortOffset;
+ void __iomem *iobase = priv->port_offset;
bool ret;
ret = true;
@@ -463,9 +463,9 @@ static bool s_bAL7230SelectChannel(struct vnt_private *priv, unsigned char byCha
/* PLLON Off */
MACvWordRegBitsOff(iobase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3);
- ret &= IFRFbWriteEmbedded(priv, dwAL7230ChannelTable0[byChannel - 1]);
- ret &= IFRFbWriteEmbedded(priv, dwAL7230ChannelTable1[byChannel - 1]);
- ret &= IFRFbWriteEmbedded(priv, dwAL7230ChannelTable2[byChannel - 1]);
+ ret &= IFRFbWriteEmbedded(priv, al7230_channel_table0[byChannel - 1]);
+ ret &= IFRFbWriteEmbedded(priv, al7230_channel_table1[byChannel - 1]);
+ ret &= IFRFbWriteEmbedded(priv, al7230_channel_table2[byChannel - 1]);
/* PLLOn On */
MACvWordRegBitsOn(iobase, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3);
@@ -494,7 +494,7 @@ static bool s_bAL7230SelectChannel(struct vnt_private *priv, unsigned char byCha
*/
bool IFRFbWriteEmbedded(struct vnt_private *priv, unsigned long dwData)
{
- void __iomem *iobase = priv->PortOffset;
+ void __iomem *iobase = priv->port_offset;
unsigned short ww;
unsigned long dwValue;
@@ -527,7 +527,7 @@ bool IFRFbWriteEmbedded(struct vnt_private *priv, unsigned long dwData)
*/
static bool RFbAL2230Init(struct vnt_private *priv)
{
- void __iomem *iobase = priv->PortOffset;
+ void __iomem *iobase = priv->port_offset;
int ii;
bool ret;
@@ -545,7 +545,7 @@ static bool RFbAL2230Init(struct vnt_private *priv)
IFRFbWriteEmbedded(priv, (0x07168700 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW));
for (ii = 0; ii < CB_AL2230_INIT_SEQ; ii++)
- ret &= IFRFbWriteEmbedded(priv, dwAL2230InitTable[ii]);
+ ret &= IFRFbWriteEmbedded(priv, al2230_init_table[ii]);
MACvTimer0MicroSDelay(priv, 30); /* delay 30 us */
/* PLL On */
@@ -557,7 +557,7 @@ static bool RFbAL2230Init(struct vnt_private *priv)
ret &= IFRFbWriteEmbedded(priv, (0x00780f00 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW));
MACvTimer0MicroSDelay(priv, 30);/* 30us */
ret &= IFRFbWriteEmbedded(priv,
- dwAL2230InitTable[CB_AL2230_INIT_SEQ - 1]);
+ al2230_init_table[CB_AL2230_INIT_SEQ - 1]);
MACvWordRegBitsOn(iobase, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPE3 |
SOFTPWRCTL_SWPE2 |
@@ -572,13 +572,13 @@ static bool RFbAL2230Init(struct vnt_private *priv)
static bool RFbAL2230SelectChannel(struct vnt_private *priv, unsigned char byChannel)
{
- void __iomem *iobase = priv->PortOffset;
+ void __iomem *iobase = priv->port_offset;
bool ret;
ret = true;
- ret &= IFRFbWriteEmbedded(priv, dwAL2230ChannelTable0[byChannel - 1]);
- ret &= IFRFbWriteEmbedded(priv, dwAL2230ChannelTable1[byChannel - 1]);
+ ret &= IFRFbWriteEmbedded(priv, al2230_channel_table0[byChannel - 1]);
+ ret &= IFRFbWriteEmbedded(priv, al2230_channel_table1[byChannel - 1]);
/* Set Channel[7] = 0 to tell H/W channel is changing now. */
VNSvOutPortB(iobase + MAC_REG_CHANNEL, (byChannel & 0x7F));
@@ -670,63 +670,63 @@ bool RFbSelectChannel(struct vnt_private *priv, unsigned char byRFType,
* Parameters:
* In:
* iobase - I/O base address
- * uChannel - channel number
+ * channel - channel number
* bySleepCnt - SleepProgSyn count
*
* Return Value: None.
*
*/
-bool RFvWriteWakeProgSyn(struct vnt_private *priv, unsigned char byRFType,
- u16 uChannel)
+bool RFvWriteWakeProgSyn(struct vnt_private *priv, unsigned char rf_type,
+ u16 channel)
{
- void __iomem *iobase = priv->PortOffset;
- int ii;
- unsigned char byInitCount = 0;
- unsigned char bySleepCount = 0;
+ void __iomem *iobase = priv->port_offset;
+ int i;
+ unsigned char init_count = 0;
+ unsigned char sleep_count = 0;
VNSvOutPortW(iobase + MAC_REG_MISCFFNDEX, 0);
- switch (byRFType) {
+ switch (rf_type) {
case RF_AIROHA:
case RF_AL2230S:
- if (uChannel > CB_MAX_CHANNEL_24G)
+ if (channel > CB_MAX_CHANNEL_24G)
return false;
/* Init Reg + Channel Reg (2) */
- byInitCount = CB_AL2230_INIT_SEQ + 2;
- bySleepCount = 0;
- if (byInitCount > (MISCFIFO_SYNDATASIZE - bySleepCount))
+ init_count = CB_AL2230_INIT_SEQ + 2;
+ sleep_count = 0;
+ if (init_count > (MISCFIFO_SYNDATASIZE - sleep_count))
return false;
- for (ii = 0; ii < CB_AL2230_INIT_SEQ; ii++)
- MACvSetMISCFifo(priv, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL2230InitTable[ii]);
+ for (i = 0; i < CB_AL2230_INIT_SEQ; i++)
+ MACvSetMISCFifo(priv, (unsigned short)(MISCFIFO_SYNDATA_IDX + i), al2230_init_table[i]);
- MACvSetMISCFifo(priv, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL2230ChannelTable0[uChannel - 1]);
- ii++;
- MACvSetMISCFifo(priv, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL2230ChannelTable1[uChannel - 1]);
+ MACvSetMISCFifo(priv, (unsigned short)(MISCFIFO_SYNDATA_IDX + i), al2230_channel_table0[channel - 1]);
+ i++;
+ MACvSetMISCFifo(priv, (unsigned short)(MISCFIFO_SYNDATA_IDX + i), al2230_channel_table1[channel - 1]);
break;
/* Need to check, PLLON need to be low for channel setting */
case RF_AIROHA7230:
/* Init Reg + Channel Reg (3) */
- byInitCount = CB_AL7230_INIT_SEQ + 3;
- bySleepCount = 0;
- if (byInitCount > (MISCFIFO_SYNDATASIZE - bySleepCount))
+ init_count = CB_AL7230_INIT_SEQ + 3;
+ sleep_count = 0;
+ if (init_count > (MISCFIFO_SYNDATASIZE - sleep_count))
return false;
- if (uChannel <= CB_MAX_CHANNEL_24G) {
- for (ii = 0; ii < CB_AL7230_INIT_SEQ; ii++)
- MACvSetMISCFifo(priv, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL7230InitTable[ii]);
+ if (channel <= CB_MAX_CHANNEL_24G) {
+ for (i = 0; i < CB_AL7230_INIT_SEQ; i++)
+ MACvSetMISCFifo(priv, (unsigned short)(MISCFIFO_SYNDATA_IDX + i), al7230_init_table[i]);
} else {
- for (ii = 0; ii < CB_AL7230_INIT_SEQ; ii++)
- MACvSetMISCFifo(priv, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL7230InitTableAMode[ii]);
+ for (i = 0; i < CB_AL7230_INIT_SEQ; i++)
+ MACvSetMISCFifo(priv, (unsigned short)(MISCFIFO_SYNDATA_IDX + i), al7230_init_table_a_mode[i]);
}
- MACvSetMISCFifo(priv, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL7230ChannelTable0[uChannel - 1]);
- ii++;
- MACvSetMISCFifo(priv, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL7230ChannelTable1[uChannel - 1]);
- ii++;
- MACvSetMISCFifo(priv, (unsigned short)(MISCFIFO_SYNDATA_IDX + ii), dwAL7230ChannelTable2[uChannel - 1]);
+ MACvSetMISCFifo(priv, (unsigned short)(MISCFIFO_SYNDATA_IDX + i), al7230_channel_table0[channel - 1]);
+ i++;
+ MACvSetMISCFifo(priv, (unsigned short)(MISCFIFO_SYNDATA_IDX + i), al7230_channel_table1[channel - 1]);
+ i++;
+ MACvSetMISCFifo(priv, (unsigned short)(MISCFIFO_SYNDATA_IDX + i), al7230_channel_table2[channel - 1]);
break;
case RF_NOTHING:
@@ -736,7 +736,7 @@ bool RFvWriteWakeProgSyn(struct vnt_private *priv, unsigned char byRFType,
return false;
}
- MACvSetMISCFifo(priv, MISCFIFO_SYNINFO_IDX, (unsigned long)MAKEWORD(bySleepCount, byInitCount));
+ MACvSetMISCFifo(priv, MISCFIFO_SYNINFO_IDX, (unsigned long)MAKEWORD(sleep_count, init_count));
return true;
}
@@ -834,7 +834,7 @@ bool RFbRawSetPower(struct vnt_private *priv, unsigned char byPwr,
switch (priv->byRFType) {
case RF_AIROHA:
- ret &= IFRFbWriteEmbedded(priv, dwAL2230PowerTable[byPwr]);
+ ret &= IFRFbWriteEmbedded(priv, al2230_power_table[byPwr]);
if (rate <= RATE_11M)
ret &= IFRFbWriteEmbedded(priv, 0x0001B400 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW);
else
@@ -843,7 +843,7 @@ bool RFbRawSetPower(struct vnt_private *priv, unsigned char byPwr,
break;
case RF_AL2230S:
- ret &= IFRFbWriteEmbedded(priv, dwAL2230PowerTable[byPwr]);
+ ret &= IFRFbWriteEmbedded(priv, al2230_power_table[byPwr]);
if (rate <= RATE_11M) {
ret &= IFRFbWriteEmbedded(priv, 0x040C1400 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW);
ret &= IFRFbWriteEmbedded(priv, 0x00299B00 + (BY_AL2230_REG_LEN << 3) + IFREGCTL_REGW);
@@ -923,22 +923,22 @@ bool RFbAL7230SelectChannelPostProcess(struct vnt_private *priv,
*/
if ((byOldChannel <= CB_MAX_CHANNEL_24G) && (byNewChannel > CB_MAX_CHANNEL_24G)) {
/* Change from 2.4G to 5G [Reg] */
- ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTableAMode[2]);
- ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTableAMode[3]);
- ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTableAMode[5]);
- ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTableAMode[7]);
- ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTableAMode[10]);
- ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTableAMode[12]);
- ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTableAMode[15]);
+ ret &= IFRFbWriteEmbedded(priv, al7230_init_table_a_mode[2]);
+ ret &= IFRFbWriteEmbedded(priv, al7230_init_table_a_mode[3]);
+ ret &= IFRFbWriteEmbedded(priv, al7230_init_table_a_mode[5]);
+ ret &= IFRFbWriteEmbedded(priv, al7230_init_table_a_mode[7]);
+ ret &= IFRFbWriteEmbedded(priv, al7230_init_table_a_mode[10]);
+ ret &= IFRFbWriteEmbedded(priv, al7230_init_table_a_mode[12]);
+ ret &= IFRFbWriteEmbedded(priv, al7230_init_table_a_mode[15]);
} else if ((byOldChannel > CB_MAX_CHANNEL_24G) && (byNewChannel <= CB_MAX_CHANNEL_24G)) {
/* Change from 5G to 2.4G [Reg] */
- ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[2]);
- ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[3]);
- ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[5]);
- ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[7]);
- ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[10]);
- ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[12]);
- ret &= IFRFbWriteEmbedded(priv, dwAL7230InitTable[15]);
+ ret &= IFRFbWriteEmbedded(priv, al7230_init_table[2]);
+ ret &= IFRFbWriteEmbedded(priv, al7230_init_table[3]);
+ ret &= IFRFbWriteEmbedded(priv, al7230_init_table[5]);
+ ret &= IFRFbWriteEmbedded(priv, al7230_init_table[7]);
+ ret &= IFRFbWriteEmbedded(priv, al7230_init_table[10]);
+ ret &= IFRFbWriteEmbedded(priv, al7230_init_table[12]);
+ ret &= IFRFbWriteEmbedded(priv, al7230_init_table[15]);
}
return ret;
diff --git a/drivers/staging/vt6655/rf.h b/drivers/staging/vt6655/rf.h
index d499aed45c9f..0939937d47a8 100644
--- a/drivers/staging/vt6655/rf.h
+++ b/drivers/staging/vt6655/rf.h
@@ -60,7 +60,7 @@
bool IFRFbWriteEmbedded(struct vnt_private *priv, unsigned long dwData);
bool RFbSelectChannel(struct vnt_private *priv, unsigned char byRFType, u16 byChannel);
bool RFbInit(struct vnt_private *priv);
-bool RFvWriteWakeProgSyn(struct vnt_private *priv, unsigned char byRFType, u16 uChannel);
+bool RFvWriteWakeProgSyn(struct vnt_private *priv, unsigned char rf_type, u16 channel);
bool RFbSetPower(struct vnt_private *priv, unsigned int rate, u16 uCH);
bool RFbRawSetPower(struct vnt_private *priv, unsigned char byPwr,
unsigned int rate);
diff --git a/drivers/staging/vt6655/rxtx.c b/drivers/staging/vt6655/rxtx.c
index 5395c3a3e35a..0de801b666da 100644
--- a/drivers/staging/vt6655/rxtx.c
+++ b/drivers/staging/vt6655/rxtx.c
@@ -142,7 +142,7 @@ s_uFillDataHead(
static __le16 vnt_time_stamp_off(struct vnt_private *priv, u16 rate)
{
- return cpu_to_le16(wTimeStampOff[priv->byPreambleType % 2]
+ return cpu_to_le16(wTimeStampOff[priv->preamble_type % 2]
[rate % MAX_RATE]);
}
@@ -163,7 +163,7 @@ s_uGetTxRsvTime(
{
unsigned int uDataTime, uAckTime;
- uDataTime = bb_get_frame_time(pDevice->byPreambleType, byPktType, cbFrameLength, wRate);
+ uDataTime = bb_get_frame_time(pDevice->preamble_type, byPktType, cbFrameLength, wRate);
if (!bNeedAck)
return uDataTime;
@@ -172,7 +172,7 @@ s_uGetTxRsvTime(
* CCK mode - 11b
* OFDM mode - 11g 2.4G & 11a 5G
*/
- uAckTime = bb_get_frame_time(pDevice->byPreambleType, byPktType, 14,
+ uAckTime = bb_get_frame_time(pDevice->preamble_type, byPktType, 14,
byPktType == PK_TYPE_11B ?
pDevice->byTopCCKBasicRate :
pDevice->byTopOFDMBasicRate);
@@ -200,22 +200,22 @@ static __le16 get_rtscts_time(struct vnt_private *priv,
unsigned int ack_time = 0;
unsigned int data_time = 0;
- data_time = bb_get_frame_time(priv->byPreambleType, pkt_type, frame_length, current_rate);
+ data_time = bb_get_frame_time(priv->preamble_type, pkt_type, frame_length, current_rate);
if (rts_rsvtype == 0) { /* RTSTxRrvTime_bb */
- rts_time = bb_get_frame_time(priv->byPreambleType, pkt_type, 20, priv->byTopCCKBasicRate);
- ack_time = bb_get_frame_time(priv->byPreambleType, pkt_type, 14, priv->byTopCCKBasicRate);
+ rts_time = bb_get_frame_time(priv->preamble_type, pkt_type, 20, priv->byTopCCKBasicRate);
+ ack_time = bb_get_frame_time(priv->preamble_type, pkt_type, 14, priv->byTopCCKBasicRate);
cts_time = ack_time;
} else if (rts_rsvtype == 1) { /* RTSTxRrvTime_ba, only in 2.4GHZ */
- rts_time = bb_get_frame_time(priv->byPreambleType, pkt_type, 20, priv->byTopCCKBasicRate);
- cts_time = bb_get_frame_time(priv->byPreambleType, pkt_type, 14, priv->byTopCCKBasicRate);
- ack_time = bb_get_frame_time(priv->byPreambleType, pkt_type, 14, priv->byTopOFDMBasicRate);
+ rts_time = bb_get_frame_time(priv->preamble_type, pkt_type, 20, priv->byTopCCKBasicRate);
+ cts_time = bb_get_frame_time(priv->preamble_type, pkt_type, 14, priv->byTopCCKBasicRate);
+ ack_time = bb_get_frame_time(priv->preamble_type, pkt_type, 14, priv->byTopOFDMBasicRate);
} else if (rts_rsvtype == 2) { /* RTSTxRrvTime_aa */
- rts_time = bb_get_frame_time(priv->byPreambleType, pkt_type, 20, priv->byTopOFDMBasicRate);
- ack_time = bb_get_frame_time(priv->byPreambleType, pkt_type, 14, priv->byTopOFDMBasicRate);
+ rts_time = bb_get_frame_time(priv->preamble_type, pkt_type, 20, priv->byTopOFDMBasicRate);
+ ack_time = bb_get_frame_time(priv->preamble_type, pkt_type, 14, priv->byTopOFDMBasicRate);
cts_time = ack_time;
} else if (rts_rsvtype == 3) { /* CTSTxRrvTime_ba, only in 2.4GHZ */
- cts_time = bb_get_frame_time(priv->byPreambleType, pkt_type, 14, priv->byTopCCKBasicRate);
- ack_time = bb_get_frame_time(priv->byPreambleType, pkt_type, 14, priv->byTopOFDMBasicRate);
+ cts_time = bb_get_frame_time(priv->preamble_type, pkt_type, 14, priv->byTopCCKBasicRate);
+ ack_time = bb_get_frame_time(priv->preamble_type, pkt_type, 14, priv->byTopOFDMBasicRate);
rrv_time = cts_time + ack_time + data_time + 2 * priv->uSIFS;
return cpu_to_le16((u16)rrv_time);
}
@@ -255,7 +255,7 @@ s_uGetDataDuration(
switch (byDurType) {
case DATADUR_B: /* DATADUR_B */
if (bNeedAck) {
- uAckTime = bb_get_frame_time(pDevice->byPreambleType,
+ uAckTime = bb_get_frame_time(pDevice->preamble_type,
byPktType, 14,
pDevice->byTopCCKBasicRate);
}
@@ -273,7 +273,7 @@ s_uGetDataDuration(
case DATADUR_A: /* DATADUR_A */
if (bNeedAck) {
- uAckTime = bb_get_frame_time(pDevice->byPreambleType,
+ uAckTime = bb_get_frame_time(pDevice->preamble_type,
byPktType, 14,
pDevice->byTopOFDMBasicRate);
}
@@ -292,7 +292,7 @@ s_uGetDataDuration(
case DATADUR_A_F0: /* DATADUR_A_F0 */
case DATADUR_A_F1: /* DATADUR_A_F1 */
if (bNeedAck) {
- uAckTime = bb_get_frame_time(pDevice->byPreambleType,
+ uAckTime = bb_get_frame_time(pDevice->preamble_type,
byPktType, 14,
pDevice->byTopOFDMBasicRate);
}
@@ -344,17 +344,17 @@ s_uGetRTSCTSDuration(
switch (byDurType) {
case RTSDUR_BB: /* RTSDuration_bb */
- uCTSTime = bb_get_frame_time(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate);
+ uCTSTime = bb_get_frame_time(pDevice->preamble_type, byPktType, 14, pDevice->byTopCCKBasicRate);
uDurTime = uCTSTime + 2 * pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wRate, bNeedAck);
break;
case RTSDUR_BA: /* RTSDuration_ba */
- uCTSTime = bb_get_frame_time(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate);
+ uCTSTime = bb_get_frame_time(pDevice->preamble_type, byPktType, 14, pDevice->byTopCCKBasicRate);
uDurTime = uCTSTime + 2 * pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wRate, bNeedAck);
break;
case RTSDUR_AA: /* RTSDuration_aa */
- uCTSTime = bb_get_frame_time(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
+ uCTSTime = bb_get_frame_time(pDevice->preamble_type, byPktType, 14, pDevice->byTopOFDMBasicRate);
uDurTime = uCTSTime + 2 * pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wRate, bNeedAck);
break;
@@ -363,7 +363,7 @@ s_uGetRTSCTSDuration(
break;
case RTSDUR_BA_F0: /* RTSDuration_ba_f0 */
- uCTSTime = bb_get_frame_time(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate);
+ uCTSTime = bb_get_frame_time(pDevice->preamble_type, byPktType, 14, pDevice->byTopCCKBasicRate);
if ((byFBOption == AUTO_FB_0) && (wRate >= RATE_18M) && (wRate <= RATE_54M))
uDurTime = uCTSTime + 2 * pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt0[FB_RATE0][wRate - RATE_18M], bNeedAck);
else if ((byFBOption == AUTO_FB_1) && (wRate >= RATE_18M) && (wRate <= RATE_54M))
@@ -372,7 +372,7 @@ s_uGetRTSCTSDuration(
break;
case RTSDUR_AA_F0: /* RTSDuration_aa_f0 */
- uCTSTime = bb_get_frame_time(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
+ uCTSTime = bb_get_frame_time(pDevice->preamble_type, byPktType, 14, pDevice->byTopOFDMBasicRate);
if ((byFBOption == AUTO_FB_0) && (wRate >= RATE_18M) && (wRate <= RATE_54M))
uDurTime = uCTSTime + 2 * pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt0[FB_RATE0][wRate - RATE_18M], bNeedAck);
else if ((byFBOption == AUTO_FB_1) && (wRate >= RATE_18M) && (wRate <= RATE_54M))
@@ -381,7 +381,7 @@ s_uGetRTSCTSDuration(
break;
case RTSDUR_BA_F1: /* RTSDuration_ba_f1 */
- uCTSTime = bb_get_frame_time(pDevice->byPreambleType, byPktType, 14, pDevice->byTopCCKBasicRate);
+ uCTSTime = bb_get_frame_time(pDevice->preamble_type, byPktType, 14, pDevice->byTopCCKBasicRate);
if ((byFBOption == AUTO_FB_0) && (wRate >= RATE_18M) && (wRate <= RATE_54M))
uDurTime = uCTSTime + 2 * pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt0[FB_RATE1][wRate - RATE_18M], bNeedAck);
else if ((byFBOption == AUTO_FB_1) && (wRate >= RATE_18M) && (wRate <= RATE_54M))
@@ -390,7 +390,7 @@ s_uGetRTSCTSDuration(
break;
case RTSDUR_AA_F1: /* RTSDuration_aa_f1 */
- uCTSTime = bb_get_frame_time(pDevice->byPreambleType, byPktType, 14, pDevice->byTopOFDMBasicRate);
+ uCTSTime = bb_get_frame_time(pDevice->preamble_type, byPktType, 14, pDevice->byTopOFDMBasicRate);
if ((byFBOption == AUTO_FB_0) && (wRate >= RATE_18M) && (wRate <= RATE_54M))
uDurTime = uCTSTime + 2 * pDevice->uSIFS + s_uGetTxRsvTime(pDevice, byPktType, cbFrameLength, wFB_Opt0[FB_RATE1][wRate - RATE_18M], bNeedAck);
else if ((byFBOption == AUTO_FB_1) && (wRate >= RATE_18M) && (wRate <= RATE_54M))
@@ -1012,7 +1012,7 @@ s_cbFillTxBufHead(struct vnt_private *pDevice, unsigned char byPktType,
cbFrameSize += info->control.hw_key->icv_len;
- if (pDevice->byLocalID > REV_ID_VT3253_A1) {
+ if (pDevice->local_id > REV_ID_VT3253_A1) {
/* MAC Header should be padding 0 to DW alignment. */
uPadding = 4 - (ieee80211_get_hdrlen_from_skb(skb) % 4);
uPadding %= 4;
@@ -1289,9 +1289,9 @@ int vnt_generate_fifo_header(struct vnt_private *priv, u32 dma_idx,
tx_buffer_head->fifo_ctl |= cpu_to_le16(FIFOCTL_LRETRY);
if (tx_rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
- priv->byPreambleType = PREAMBLE_SHORT;
+ priv->preamble_type = PREAMBLE_SHORT;
else
- priv->byPreambleType = PREAMBLE_LONG;
+ priv->preamble_type = PREAMBLE_LONG;
if (tx_rate->flags & IEEE80211_TX_RC_USE_RTS_CTS)
tx_buffer_head->fifo_ctl |= cpu_to_le16(FIFOCTL_RTS);
@@ -1422,13 +1422,13 @@ static int vnt_beacon_xmit(struct vnt_private *priv,
priv->wBCNBufLen = sizeof(*short_head) + skb->len;
- MACvSetCurrBCNTxDescAddr(priv->PortOffset, priv->tx_beacon_dma);
+ MACvSetCurrBCNTxDescAddr(priv->port_offset, priv->tx_beacon_dma);
- MACvSetCurrBCNLength(priv->PortOffset, priv->wBCNBufLen);
+ MACvSetCurrBCNLength(priv->port_offset, priv->wBCNBufLen);
/* Set auto Transmit on */
- MACvRegBitsOn(priv->PortOffset, MAC_REG_TCR, TCR_AUTOBCNTX);
+ MACvRegBitsOn(priv->port_offset, MAC_REG_TCR, TCR_AUTOBCNTX);
/* Poll Transmit the adapter */
- MACvTransmitBCN(priv->PortOffset);
+ MACvTransmitBCN(priv->port_offset);
return 0;
}
@@ -1452,9 +1452,9 @@ int vnt_beacon_make(struct vnt_private *priv, struct ieee80211_vif *vif)
int vnt_beacon_enable(struct vnt_private *priv, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *conf)
{
- VNSvOutPortB(priv->PortOffset + MAC_REG_TFTCTL, TFTCTL_TSFCNTRST);
+ VNSvOutPortB(priv->port_offset + MAC_REG_TFTCTL, TFTCTL_TSFCNTRST);
- VNSvOutPortB(priv->PortOffset + MAC_REG_TFTCTL, TFTCTL_TSFCNTREN);
+ VNSvOutPortB(priv->port_offset + MAC_REG_TFTCTL, TFTCTL_TSFCNTREN);
CARDvSetFirstNextTBTT(priv, conf->beacon_int);
diff --git a/drivers/staging/wfx/bh.c b/drivers/staging/wfx/bh.c
index ed53d0b45592..a0f9d1b53019 100644
--- a/drivers/staging/wfx/bh.c
+++ b/drivers/staging/wfx/bh.c
@@ -32,18 +32,20 @@ static void device_wakeup(struct wfx_dev *wdev)
}
for (;;) {
gpiod_set_value_cansleep(wdev->pdata.gpio_wakeup, 1);
- // completion.h does not provide any function to wait
- // completion without consume it (a kind of
- // wait_for_completion_done_timeout()). So we have to emulate
- // it.
+ /* completion.h does not provide any function to wait
+ * completion without consume it (a kind of
+ * wait_for_completion_done_timeout()). So we have to emulate
+ * it.
+ */
if (wait_for_completion_timeout(&wdev->hif.ctrl_ready,
msecs_to_jiffies(2))) {
complete(&wdev->hif.ctrl_ready);
return;
} else if (max_retry-- > 0) {
- // Older firmwares have a race in sleep/wake-up process.
- // Redo the process is sufficient to unfreeze the
- // chip.
+ /* Older firmwares have a race in sleep/wake-up process.
+ * Redo the process is sufficient to unfreeze the
+ * chip.
+ */
dev_err(wdev->dev, "timeout while wake up chip\n");
gpiod_set_value_cansleep(wdev->pdata.gpio_wakeup, 0);
usleep_range(2000, 2500);
@@ -72,9 +74,9 @@ static int rx_helper(struct wfx_dev *wdev, size_t read_len, int *is_cnf)
int piggyback = 0;
WARN(read_len > round_down(0xFFF, 2) * sizeof(u16),
- "%s: request exceed WFx capability", __func__);
+ "%s: request exceed the chip capability", __func__);
- // Add 2 to take into account piggyback size
+ /* Add 2 to take into account piggyback size */
alloc_len = wdev->hwbus_ops->align_size(wdev->hwbus_priv, read_len + 2);
skb = dev_alloc_skb(alloc_len);
if (!skb)
@@ -119,7 +121,7 @@ static int rx_helper(struct wfx_dev *wdev, size_t read_len, int *is_cnf)
}
skb_put(skb, le16_to_cpu(hif->len));
- // wfx_handle_rx takes care on SKB livetime
+ /* wfx_handle_rx takes care on SKB livetime */
wfx_handle_rx(wdev, skb);
if (!wdev->hif.tx_buffers_used)
wake_up(&wdev->hif.tx_buffers_empty);
@@ -148,7 +150,7 @@ static int bh_work_rx(struct wfx_dev *wdev, int max_msg, int *num_cnf)
ctrl_reg = 0;
if (!(ctrl_reg & CTRL_NEXT_LEN_MASK))
return i;
- // ctrl_reg units are 16bits words
+ /* ctrl_reg units are 16bits words */
len = (ctrl_reg & CTRL_NEXT_LEN_MASK) * 2;
piggyback = rx_helper(wdev, len, num_cnf);
if (piggyback < 0)
@@ -181,7 +183,7 @@ static void tx_helper(struct wfx_dev *wdev, struct hif_msg *hif)
data = hif;
WARN(len > wdev->hw_caps.size_inp_ch_buf,
- "%s: request exceed WFx capability: %zu > %d\n", __func__,
+ "%s: request exceed the chip capability: %zu > %d\n", __func__,
len, wdev->hw_caps.size_inp_ch_buf);
len = wdev->hwbus_ops->align_size(wdev->hwbus_priv, len);
ret = wfx_data_write(wdev, data, len);
@@ -263,9 +265,7 @@ static void bh_work(struct work_struct *work)
wdev->hif.tx_buffers_used, release_chip);
}
-/*
- * An IRQ from chip did occur
- */
+/* An IRQ from chip did occur */
void wfx_bh_request_rx(struct wfx_dev *wdev)
{
u32 cur, prev;
@@ -283,16 +283,13 @@ void wfx_bh_request_rx(struct wfx_dev *wdev)
prev, cur);
}
-/*
- * Driver want to send data
- */
+/* Driver want to send data */
void wfx_bh_request_tx(struct wfx_dev *wdev)
{
queue_work(system_highpri_wq, &wdev->hif.bh);
}
-/*
- * If IRQ is not available, this function allow to manually poll the control
+/* If IRQ is not available, this function allow to manually poll the control
* register and simulate an IRQ ahen an event happened.
*
* Note that the device has a bug: If an IRQ raise while host read control
diff --git a/drivers/staging/wfx/bh.h b/drivers/staging/wfx/bh.h
index 78c49329e22a..6c121ce4dd3f 100644
--- a/drivers/staging/wfx/bh.h
+++ b/drivers/staging/wfx/bh.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Interrupt bottom half.
+ * Interrupt bottom half (BH).
*
* Copyright (c) 2017-2020, Silicon Laboratories, Inc.
* Copyright (c) 2010, ST-Ericsson
@@ -30,4 +30,4 @@ void wfx_bh_request_rx(struct wfx_dev *wdev);
void wfx_bh_request_tx(struct wfx_dev *wdev);
void wfx_bh_poll_irq(struct wfx_dev *wdev);
-#endif /* WFX_BH_H */
+#endif
diff --git a/drivers/staging/wfx/bus_sdio.c b/drivers/staging/wfx/bus_sdio.c
index e06d7e1ebe9c..a670176ba06f 100644
--- a/drivers/staging/wfx/bus_sdio.c
+++ b/drivers/staging/wfx/bus_sdio.c
@@ -67,7 +67,7 @@ static int wfx_sdio_copy_to_io(void *priv, unsigned int reg_id,
/* Use queue mode buffers */
if (reg_id == WFX_REG_IN_OUT_QUEUE)
sdio_addr |= bus->buf_id_tx << 7;
- // FIXME: discards 'const' qualifier for src
+ /* FIXME: discards 'const' qualifier for src */
ret = sdio_memcpy_toio(bus->func, sdio_addr, (void *)src, count);
if (!ret && reg_id == WFX_REG_IN_OUT_QUEUE)
bus->buf_id_tx = (bus->buf_id_tx + 1) % 32;
@@ -120,19 +120,22 @@ static int wfx_sdio_irq_subscribe(void *priv)
return ret;
}
+ flags = irq_get_trigger_type(bus->of_irq);
+ if (!flags)
+ flags = IRQF_TRIGGER_HIGH;
+ flags |= IRQF_ONESHOT;
+ ret = devm_request_threaded_irq(&bus->func->dev, bus->of_irq, NULL,
+ wfx_sdio_irq_handler_ext, flags,
+ "wfx", bus);
+ if (ret)
+ return ret;
sdio_claim_host(bus->func);
cccr = sdio_f0_readb(bus->func, SDIO_CCCR_IENx, NULL);
cccr |= BIT(0);
cccr |= BIT(bus->func->num);
sdio_f0_writeb(bus->func, cccr, SDIO_CCCR_IENx, NULL);
sdio_release_host(bus->func);
- flags = irq_get_trigger_type(bus->of_irq);
- if (!flags)
- flags = IRQF_TRIGGER_HIGH;
- flags |= IRQF_ONESHOT;
- return devm_request_threaded_irq(&bus->func->dev, bus->of_irq, NULL,
- wfx_sdio_irq_handler_ext, flags,
- "wfx", bus);
+ return 0;
}
static int wfx_sdio_irq_unsubscribe(void *priv)
@@ -198,7 +201,7 @@ static int wfx_sdio_probe(struct sdio_func *func,
} else {
dev_warn(&func->dev,
"device is not declared in DT, features will be limited\n");
- // FIXME: ignore VID/PID and only rely on device tree
+ /* FIXME: ignore VID/PID and only rely on device tree */
// return -ENODEV;
}
@@ -210,7 +213,7 @@ static int wfx_sdio_probe(struct sdio_func *func,
sdio_claim_host(func);
ret = sdio_enable_func(func);
- // Block of 64 bytes is more efficient than 512B for frame sizes < 4k
+ /* Block of 64 bytes is more efficient than 512B for frame sizes < 4k */
sdio_set_block_size(func, 64);
sdio_release_host(func);
if (ret)
@@ -251,7 +254,7 @@ static void wfx_sdio_remove(struct sdio_func *func)
#define SDIO_DEVICE_ID_SILABS_WF200 0x1000
static const struct sdio_device_id wfx_sdio_ids[] = {
{ SDIO_DEVICE(SDIO_VENDOR_ID_SILABS, SDIO_DEVICE_ID_SILABS_WF200) },
- // FIXME: ignore VID/PID and only rely on device tree
+ /* FIXME: ignore VID/PID and only rely on device tree */
// { SDIO_DEVICE(SDIO_ANY_ID, SDIO_ANY_ID) },
{ },
};
diff --git a/drivers/staging/wfx/bus_spi.c b/drivers/staging/wfx/bus_spi.c
index a99125d1a30d..55ffcd7c42e2 100644
--- a/drivers/staging/wfx/bus_spi.c
+++ b/drivers/staging/wfx/bus_spi.c
@@ -38,10 +38,9 @@ struct wfx_spi_priv {
bool need_swab;
};
-/*
- * WFx chip read data 16bits at time and place them directly into (little
- * endian) CPU register. So, chip expect byte order like "B1 B0 B3 B2" (while
- * LE is "B0 B1 B2 B3" and BE is "B3 B2 B1 B0")
+/* The chip reads 16bits of data at time and place them directly into (little
+ * endian) CPU register. So, the chip expects bytes order to be "B1 B0 B3 B2"
+ * (while LE is "B0 B1 B2 B3" and BE is "B3 B2 B1 B0")
*
* A little endian host with bits_per_word == 16 should do the right job
* natively. The code below to support big endian host and commonly used SPI
@@ -86,7 +85,7 @@ static int wfx_spi_copy_to_io(void *priv, unsigned int addr,
{
struct wfx_spi_priv *bus = priv;
u16 regaddr = (addr << 12) | (count / 2);
- // FIXME: use a bounce buffer
+ /* FIXME: use a bounce buffer */
u16 *src16 = (void *)src;
int ret, i;
struct spi_message m;
@@ -104,8 +103,9 @@ static int wfx_spi_copy_to_io(void *priv, unsigned int addr,
cpu_to_le16s(&regaddr);
- // Register address and CONFIG content always use 16bit big endian
- // ("BADC" order)
+ /* Register address and CONFIG content always use 16bit big endian
+ * ("BADC" order)
+ */
if (bus->need_swab)
swab16s(&regaddr);
if (bus->need_swab && addr == WFX_REG_CONFIG)
@@ -163,7 +163,8 @@ static int wfx_spi_irq_unsubscribe(void *priv)
static size_t wfx_spi_align_size(void *priv, size_t size)
{
- // Most of SPI controllers avoid DMA if buffer size is not 32bit aligned
+ /* Most of SPI controllers avoid DMA if buffer size is not 32bit aligned
+ */
return ALIGN(size, 4);
}
@@ -187,7 +188,7 @@ static int wfx_spi_probe(struct spi_device *func)
ret = spi_setup(func);
if (ret)
return ret;
- // Trace below is also displayed by spi_setup() if compiled with DEBUG
+ /* Trace below is also displayed by spi_setup() if compiled with DEBUG */
dev_dbg(&func->dev, "SPI params: CS=%d, mode=%d bits/word=%d speed=%d\n",
func->chip_select, func->mode, func->bits_per_word,
func->max_speed_hz);
@@ -239,8 +240,7 @@ static int wfx_spi_remove(struct spi_device *func)
return 0;
}
-/*
- * For dynamic driver binding, kernel does not use OF to match driver. It only
+/* For dynamic driver binding, kernel does not use OF to match driver. It only
* use modalias and modalias is a copy of 'compatible' DT node with vendor
* stripped.
*/
diff --git a/drivers/staging/wfx/data_rx.c b/drivers/staging/wfx/data_rx.c
index 385f2d42a0e2..bfc3961b7b89 100644
--- a/drivers/staging/wfx/data_rx.c
+++ b/drivers/staging/wfx/data_rx.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Datapath implementation.
+ * Data receiving implementation.
*
* Copyright (c) 2017-2020, Silicon Laboratories, Inc.
* Copyright (c) 2010, ST-Ericsson
@@ -76,8 +76,9 @@ void wfx_rx_cb(struct wfx_vif *wvif,
if (arg->encryp)
hdr->flag |= RX_FLAG_DECRYPTED;
- // Block ack negotiation is offloaded by the firmware. However,
- // re-ordering must be done by the mac80211.
+ /* Block ack negotiation is offloaded by the firmware. However,
+ * re-ordering must be done by the mac80211.
+ */
if (ieee80211_is_action(frame->frame_control) &&
mgmt->u.action.category == WLAN_CATEGORY_BACK &&
skb->len > IEEE80211_MIN_ACTION_SIZE) {
diff --git a/drivers/staging/wfx/data_rx.h b/drivers/staging/wfx/data_rx.h
index 4c0da37f2084..84d0e3c0507b 100644
--- a/drivers/staging/wfx/data_rx.h
+++ b/drivers/staging/wfx/data_rx.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Datapath implementation.
+ * Data receiving implementation.
*
* Copyright (c) 2017-2020, Silicon Laboratories, Inc.
* Copyright (c) 2010, ST-Ericsson
@@ -15,4 +15,4 @@ struct hif_ind_rx;
void wfx_rx_cb(struct wfx_vif *wvif,
const struct hif_ind_rx *arg, struct sk_buff *skb);
-#endif /* WFX_DATA_RX_H */
+#endif
diff --git a/drivers/staging/wfx/data_tx.c b/drivers/staging/wfx/data_tx.c
index 77fb104efdec..052a19161dc5 100644
--- a/drivers/staging/wfx/data_tx.c
+++ b/drivers/staging/wfx/data_tx.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Datapath implementation.
+ * Data transmitting implementation.
*
* Copyright (c) 2017-2020, Silicon Laboratories, Inc.
* Copyright (c) 2010, ST-Ericsson
@@ -31,8 +31,9 @@ static int wfx_get_hw_rate(struct wfx_dev *wdev,
}
return rate->idx + 14;
}
- // WFx only support 2GHz, else band information should be retrieved
- // from ieee80211_tx_info
+ /* The device only support 2GHz, else band information should be
+ * retrieved from ieee80211_tx_info
+ */
band = wdev->hw->wiphy->bands[NL80211_BAND_2GHZ];
if (rate->idx >= band->n_bitrates) {
WARN(1, "wrong rate->idx value: %d", rate->idx);
@@ -57,7 +58,7 @@ static void wfx_tx_policy_build(struct wfx_vif *wvif, struct tx_policy *policy,
break;
WARN_ON(rates[i].count > 15);
rateid = wfx_get_hw_rate(wdev, &rates[i]);
- // Pack two values in each byte of policy->rates
+ /* Pack two values in each byte of policy->rates */
count = rates[i].count;
if (rateid % 2)
count <<= 4;
@@ -108,6 +109,7 @@ static int wfx_tx_policy_get(struct wfx_vif *wvif,
int idx;
struct tx_policy_cache *cache = &wvif->tx_policy_cache;
struct tx_policy wanted;
+ struct tx_policy *entry;
wfx_tx_policy_build(wvif, &wanted, rates);
@@ -121,11 +123,10 @@ static int wfx_tx_policy_get(struct wfx_vif *wvif,
if (idx >= 0) {
*renew = false;
} else {
- struct tx_policy *entry;
- *renew = true;
- /* If policy is not found create a new one
- * using the oldest entry in "free" list
+ /* If policy is not found create a new one using the oldest
+ * entry in "free" list
*/
+ *renew = true;
entry = list_entry(cache->free.prev, struct tx_policy, link);
memcpy(entry->rates, wanted.rates, sizeof(entry->rates));
entry->uploaded = false;
@@ -238,7 +239,7 @@ static void wfx_tx_fixup_rates(struct ieee80211_tx_rate *rates)
int i;
bool finished;
- // Firmware is not able to mix rates with different flags
+ /* Firmware is not able to mix rates with different flags */
for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
if (rates[0].flags & IEEE80211_TX_RC_SHORT_GI)
rates[i].flags |= IEEE80211_TX_RC_SHORT_GI;
@@ -248,7 +249,7 @@ static void wfx_tx_fixup_rates(struct ieee80211_tx_rate *rates)
rates[i].flags &= ~IEEE80211_TX_RC_USE_RTS_CTS;
}
- // Sort rates and remove duplicates
+ /* Sort rates and remove duplicates */
do {
finished = true;
for (i = 0; i < IEEE80211_TX_MAX_RATES - 1; i++) {
@@ -268,32 +269,31 @@ static void wfx_tx_fixup_rates(struct ieee80211_tx_rate *rates)
}
}
} while (!finished);
- // Ensure that MCS0 or 1Mbps is present at the end of the retry list
+ /* Ensure that MCS0 or 1Mbps is present at the end of the retry list */
for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
if (rates[i].idx == 0)
break;
if (rates[i].idx == -1) {
rates[i].idx = 0;
- rates[i].count = 8; // == hw->max_rate_tries
+ rates[i].count = 8; /* == hw->max_rate_tries */
rates[i].flags = rates[i - 1].flags &
IEEE80211_TX_RC_MCS;
break;
}
}
- // All retries use long GI
+ /* All retries use long GI */
for (i = 1; i < IEEE80211_TX_MAX_RATES; i++)
rates[i].flags &= ~IEEE80211_TX_RC_SHORT_GI;
}
-static u8 wfx_tx_get_rate_id(struct wfx_vif *wvif,
- struct ieee80211_tx_info *tx_info)
+static u8 wfx_tx_get_retry_policy_id(struct wfx_vif *wvif,
+ struct ieee80211_tx_info *tx_info)
{
bool tx_policy_renew = false;
- u8 rate_id;
+ u8 ret;
- rate_id = wfx_tx_policy_get(wvif,
- tx_info->driver_rates, &tx_policy_renew);
- if (rate_id == HIF_TX_RETRY_POLICY_INVALID)
+ ret = wfx_tx_policy_get(wvif, tx_info->driver_rates, &tx_policy_renew);
+ if (ret == HIF_TX_RETRY_POLICY_INVALID)
dev_warn(wvif->wdev->dev, "unable to get a valid Tx policy");
if (tx_policy_renew) {
@@ -301,7 +301,7 @@ static u8 wfx_tx_get_rate_id(struct wfx_vif *wvif,
if (!schedule_work(&wvif->tx_policy_upload_work))
wfx_tx_unlock(wvif->wdev);
}
- return rate_id;
+ return ret;
}
static int wfx_tx_get_frame_format(struct ieee80211_tx_info *tx_info)
@@ -343,13 +343,13 @@ static int wfx_tx_inner(struct wfx_vif *wvif, struct ieee80211_sta *sta,
WARN(queue_id >= IEEE80211_NUM_ACS, "unsupported queue_id");
wfx_tx_fixup_rates(tx_info->driver_rates);
- // From now tx_info->control is unusable
+ /* From now tx_info->control is unusable */
memset(tx_info->rate_driver_data, 0, sizeof(struct wfx_tx_priv));
- // Fill tx_priv
+ /* Fill tx_priv */
tx_priv = (struct wfx_tx_priv *)tx_info->rate_driver_data;
tx_priv->icv_size = wfx_tx_get_icv_len(hw_key);
- // Fill hif_msg
+ /* Fill hif_msg */
WARN(skb_headroom(skb) < wmsg_len, "not enough space in skb");
WARN(offset & 1, "attempt to transmit an unaligned frame");
skb_put(skb, tx_priv->icv_size);
@@ -367,27 +367,28 @@ static int wfx_tx_inner(struct wfx_vif *wvif, struct ieee80211_sta *sta,
return -EIO;
}
- // Fill tx request
+ /* Fill tx request */
req = (struct hif_req_tx *)hif_msg->body;
- // packet_id just need to be unique on device. 32bits are more than
- // necessary for that task, so we tae advantage of it to add some extra
- // data for debug.
+ /* packet_id just need to be unique on device. 32bits are more than
+ * necessary for that task, so we tae advantage of it to add some extra
+ * data for debug.
+ */
req->packet_id = atomic_add_return(1, &wvif->wdev->packet_id) & 0xFFFF;
req->packet_id |= IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)) << 16;
req->packet_id |= queue_id << 28;
req->fc_offset = offset;
- if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM)
- req->after_dtim = 1;
- req->peer_sta_id = wfx_tx_get_link_id(wvif, sta, hdr);
- // Queue index are inverted between firmware and Linux
+ /* Queue index are inverted between firmware and Linux */
req->queue_id = 3 - queue_id;
- req->retry_policy_index = wfx_tx_get_rate_id(wvif, tx_info);
+ req->peer_sta_id = wfx_tx_get_link_id(wvif, sta, hdr);
+ req->retry_policy_index = wfx_tx_get_retry_policy_id(wvif, tx_info);
req->frame_format = wfx_tx_get_frame_format(tx_info);
if (tx_info->driver_rates[0].flags & IEEE80211_TX_RC_SHORT_GI)
req->short_gi = 1;
+ if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM)
+ req->after_dtim = 1;
- // Auxiliary operations
+ /* Auxiliary operations */
wfx_tx_queues_put(wvif, skb);
if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM)
schedule_work(&wvif->update_tim_work);
@@ -409,15 +410,16 @@ void wfx_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
compiletime_assert(sizeof(struct wfx_tx_priv) <= driver_data_room,
"struct tx_priv is too large");
WARN(skb->next || skb->prev, "skb is already member of a list");
- // control.vif can be NULL for injected frames
+ /* control.vif can be NULL for injected frames */
if (tx_info->control.vif)
wvif = (struct wfx_vif *)tx_info->control.vif->drv_priv;
else
wvif = wvif_iterate(wdev, NULL);
if (WARN_ON(!wvif))
goto drop;
- // Because of TX_AMPDU_SETUP_IN_HW, mac80211 does not try to send any
- // BlockAck session management frame. The check below exist just in case.
+ /* Because of TX_AMPDU_SETUP_IN_HW, mac80211 does not try to send any
+ * BlockAck session management frame. The check below exist just in case.
+ */
if (ieee80211_is_action_back(hdr)) {
dev_info(wdev->dev, "drop BA action\n");
goto drop;
@@ -458,7 +460,7 @@ static void wfx_tx_fill_rates(struct wfx_dev *wdev,
tx_count = arg->ack_failures;
if (!arg->status || arg->ack_failures)
- tx_count += 1; // Also report success
+ tx_count += 1; /* Also report success */
for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
rate = &tx_info->status.rates[i];
if (rate->idx < 0)
@@ -506,14 +508,15 @@ void wfx_tx_confirm_cb(struct wfx_dev *wdev, const struct hif_cnf_tx *arg)
if (!wvif)
return;
- // Note that wfx_pending_get_pkt_us_delay() get data from tx_info
+ /* Note that wfx_pending_get_pkt_us_delay() get data from tx_info */
_trace_tx_stats(arg, skb, wfx_pending_get_pkt_us_delay(wdev, skb));
wfx_tx_fill_rates(wdev, tx_info, arg);
skb_trim(skb, skb->len - tx_priv->icv_size);
- // From now, you can touch to tx_info->status, but do not touch to
- // tx_priv anymore
- // FIXME: use ieee80211_tx_info_clear_status()
+ /* From now, you can touch to tx_info->status, but do not touch to
+ * tx_priv anymore
+ */
+ /* FIXME: use ieee80211_tx_info_clear_status() */
memset(tx_info->rate_driver_data, 0, sizeof(tx_info->rate_driver_data));
memset(tx_info->pad, 0, sizeof(tx_info->pad));
@@ -528,7 +531,7 @@ void wfx_tx_confirm_cb(struct wfx_dev *wdev, const struct hif_cnf_tx *arg)
} else if (arg->status == HIF_STATUS_TX_FAIL_REQUEUE) {
WARN(!arg->requeue, "incoherent status and result_flags");
if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
- wvif->after_dtim_tx_allowed = false; // DTIM period elapsed
+ wvif->after_dtim_tx_allowed = false; /* DTIM period elapsed */
schedule_work(&wvif->update_tim_work);
}
tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
diff --git a/drivers/staging/wfx/data_tx.h b/drivers/staging/wfx/data_tx.h
index 401363d6b563..15590a8faefe 100644
--- a/drivers/staging/wfx/data_tx.h
+++ b/drivers/staging/wfx/data_tx.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Datapath implementation.
+ * Data transmitting implementation.
*
* Copyright (c) 2017-2020, Silicon Laboratories, Inc.
* Copyright (c) 2010, ST-Ericsson
@@ -27,7 +27,7 @@ struct tx_policy {
struct tx_policy_cache {
struct tx_policy cache[HIF_TX_RETRY_POLICY_MAX];
- // FIXME: use a trees and drop hash from tx_policy
+ /* FIXME: use a trees and drop hash from tx_policy */
struct list_head used;
struct list_head free;
spinlock_t lock;
@@ -65,4 +65,4 @@ static inline struct hif_req_tx *wfx_skb_txreq(struct sk_buff *skb)
return req;
}
-#endif /* WFX_DATA_TX_H */
+#endif
diff --git a/drivers/staging/wfx/debug.c b/drivers/staging/wfx/debug.c
index eedada78c25f..9f93268a3202 100644
--- a/drivers/staging/wfx/debug.c
+++ b/drivers/staging/wfx/debug.c
@@ -82,33 +82,37 @@ static int wfx_counters_show(struct seq_file *seq, void *v)
le32_to_cpu(counters[0].count_##name), \
le32_to_cpu(counters[1].count_##name))
- PUT_COUNTER(tx_packets);
- PUT_COUNTER(tx_multicast_frames);
+ PUT_COUNTER(tx_frames);
+ PUT_COUNTER(tx_frames_multicast);
PUT_COUNTER(tx_frames_success);
- PUT_COUNTER(tx_frame_failures);
PUT_COUNTER(tx_frames_retried);
PUT_COUNTER(tx_frames_multi_retried);
+ PUT_COUNTER(tx_frames_failed);
+ PUT_COUNTER(ack_failed);
PUT_COUNTER(rts_success);
- PUT_COUNTER(rts_failures);
- PUT_COUNTER(ack_failures);
+ PUT_COUNTER(rts_failed);
- PUT_COUNTER(rx_packets);
+ PUT_COUNTER(rx_frames);
+ PUT_COUNTER(rx_frames_multicast);
PUT_COUNTER(rx_frames_success);
- PUT_COUNTER(rx_packet_errors);
- PUT_COUNTER(plcp_errors);
- PUT_COUNTER(fcs_errors);
- PUT_COUNTER(rx_decryption_failures);
- PUT_COUNTER(rx_mic_failures);
- PUT_COUNTER(rx_no_key_failures);
- PUT_COUNTER(rx_frame_duplicates);
- PUT_COUNTER(rx_multicast_frames);
- PUT_COUNTER(rx_cmacicv_errors);
- PUT_COUNTER(rx_cmac_replays);
- PUT_COUNTER(rx_mgmt_ccmp_replays);
-
- PUT_COUNTER(rx_beacon);
- PUT_COUNTER(miss_beacon);
+ PUT_COUNTER(rx_frames_failed);
+ PUT_COUNTER(drop_plcp);
+ PUT_COUNTER(drop_fcs);
+ PUT_COUNTER(drop_no_key);
+ PUT_COUNTER(drop_decryption);
+ PUT_COUNTER(drop_tkip_mic);
+ PUT_COUNTER(drop_bip_mic);
+ PUT_COUNTER(drop_cmac_icv);
+ PUT_COUNTER(drop_cmac_replay);
+ PUT_COUNTER(drop_ccmp_replay);
+ PUT_COUNTER(drop_duplicate);
+
+ PUT_COUNTER(rx_bcn_miss);
+ PUT_COUNTER(rx_bcn_success);
+ PUT_COUNTER(rx_bcn_dtim);
+ PUT_COUNTER(rx_bcn_dtim_aid0_clr);
+ PUT_COUNTER(rx_bcn_dtim_aid0_set);
#undef PUT_COUNTER
@@ -252,9 +256,10 @@ static ssize_t wfx_send_hif_msg_write(struct file *file,
if (count < sizeof(struct hif_msg))
return -EINVAL;
- // wfx_cmd_send() checks that reply buffer is wide enough, but does not
- // return precise length read. User have to know how many bytes should
- // be read. Filling reply buffer with a memory pattern may help user.
+ /* wfx_cmd_send() checks that reply buffer is wide enough, but does not
+ * return precise length read. User have to know how many bytes should
+ * be read. Filling reply buffer with a memory pattern may help user.
+ */
memset(context->reply, 0xFF, sizeof(context->reply));
request = memdup_user(user_buf, count);
if (IS_ERR(request))
@@ -284,8 +289,9 @@ static ssize_t wfx_send_hif_msg_read(struct file *file, char __user *user_buf,
return ret;
if (context->ret < 0)
return context->ret;
- // Be careful, write() is waiting for a full message while read()
- // only returns a payload
+ /* Be careful, write() is waiting for a full message while read()
+ * only returns a payload
+ */
if (copy_to_user(user_buf, context->reply, count))
return -EFAULT;
diff --git a/drivers/staging/wfx/debug.h b/drivers/staging/wfx/debug.h
index 6f2f84d64c9e..4b9c49a9fffb 100644
--- a/drivers/staging/wfx/debug.h
+++ b/drivers/staging/wfx/debug.h
@@ -16,4 +16,4 @@ const char *get_hif_name(unsigned long id);
const char *get_mib_name(unsigned long id);
const char *get_reg_name(unsigned long id);
-#endif /* WFX_DEBUG_H */
+#endif
diff --git a/drivers/staging/wfx/fwio.c b/drivers/staging/wfx/fwio.c
index 1b8aec02d169..98a9391b2bee 100644
--- a/drivers/staging/wfx/fwio.c
+++ b/drivers/staging/wfx/fwio.c
@@ -14,11 +14,11 @@
#include "wfx.h"
#include "hwio.h"
-// Addresses below are in SRAM area
+/* Addresses below are in SRAM area */
#define WFX_DNLD_FIFO 0x09004000
#define DNLD_BLOCK_SIZE 0x0400
-#define DNLD_FIFO_SIZE 0x8000 // (32 * DNLD_BLOCK_SIZE)
-// Download Control Area (DCA)
+#define DNLD_FIFO_SIZE 0x8000 /* (32 * DNLD_BLOCK_SIZE) */
+/* Download Control Area (DCA) */
#define WFX_DCA_IMAGE_SIZE 0x0900C000
#define WFX_DCA_PUT 0x0900C004
#define WFX_DCA_GET 0x0900C008
@@ -58,8 +58,8 @@
#define ERR_ECC_PUB_KEY 0x11
#define ERR_MAC_KEY 0x18
-#define DCA_TIMEOUT 50 // milliseconds
-#define WAKEUP_TIMEOUT 200 // milliseconds
+#define DCA_TIMEOUT 50 /* milliseconds */
+#define WAKEUP_TIMEOUT 200 /* milliseconds */
static const char * const fwio_errors[] = {
[ERR_INVALID_SEC_TYPE] = "Invalid section type or wrong encryption",
@@ -69,8 +69,7 @@ static const char * const fwio_errors[] = {
[ERR_MAC_KEY] = "MAC key not initialized",
};
-/*
- * request_firmware() allocate data using vmalloc(). It is not compatible with
+/* request_firmware() allocate data using vmalloc(). It is not compatible with
* underlying hardware that use DMA. Function below detect this case and
* allocate a bounce buffer if necessary.
*
@@ -125,7 +124,7 @@ static int get_firmware(struct wfx_dev *wdev, u32 keyset_chip,
data = (*fw)->data;
if (memcmp(data, "KEYSET", 6) != 0) {
- // Legacy firmware format
+ /* Legacy firmware format */
*file_offset = 0;
keyset_file = 0x90;
} else {
@@ -207,8 +206,9 @@ static int upload_firmware(struct wfx_dev *wdev, const u8 *data, size_t len)
if (ret < 0)
return ret;
- // WFx seems to not support writing 0 in this register during
- // first loop
+ /* The device seems to not support writing 0 in this register
+ * during first loop
+ */
offs += DNLD_BLOCK_SIZE;
ret = sram_reg_write(wdev, WFX_DCA_PUT, offs);
if (ret < 0)
@@ -265,7 +265,7 @@ static int load_firmware_secure(struct wfx_dev *wdev)
if (ret)
goto error;
- sram_reg_write(wdev, WFX_DNLD_FIFO, 0xFFFFFFFF); // Fifo init
+ sram_reg_write(wdev, WFX_DNLD_FIFO, 0xFFFFFFFF); /* Fifo init */
sram_write_dma_safe(wdev, WFX_DCA_FW_VERSION, "\x01\x00\x00\x00",
FW_VERSION_SIZE);
sram_write_dma_safe(wdev, WFX_DCA_FW_SIGNATURE, fw->data + fw_offset,
@@ -289,7 +289,7 @@ static int load_firmware_secure(struct wfx_dev *wdev)
sram_reg_write(wdev, WFX_DCA_HOST_STATUS, HOST_UPLOAD_COMPLETE);
ret = wait_ncp_status(wdev, NCP_AUTH_OK);
- // Legacy ROM support
+ /* Legacy ROM support */
if (ret < 0)
ret = wait_ncp_status(wdev, NCP_PUB_KEY_RDY);
if (ret < 0)
@@ -334,7 +334,7 @@ int wfx_init_device(struct wfx_dev *wdev)
{
int ret;
int hw_revision, hw_type;
- int wakeup_timeout = 50; // ms
+ int wakeup_timeout = 50; /* ms */
ktime_t now, start;
u32 reg;
diff --git a/drivers/staging/wfx/fwio.h b/drivers/staging/wfx/fwio.h
index 6028f92503fe..eeea61210eca 100644
--- a/drivers/staging/wfx/fwio.h
+++ b/drivers/staging/wfx/fwio.h
@@ -12,4 +12,4 @@ struct wfx_dev;
int wfx_init_device(struct wfx_dev *wdev);
-#endif /* WFX_FWIO_H */
+#endif
diff --git a/drivers/staging/wfx/hif_api_cmd.h b/drivers/staging/wfx/hif_api_cmd.h
index 58c9bb036011..b0aa13b23a51 100644
--- a/drivers/staging/wfx/hif_api_cmd.h
+++ b/drivers/staging/wfx/hif_api_cmd.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: Apache-2.0 */
/*
- * WFx hardware interface definitions
+ * WF200 hardware interface definitions
*
* Copyright (c) 2018-2020, Silicon Laboratories Inc.
*/
@@ -134,7 +134,7 @@ struct hif_req_start_scan_alt {
u8 num_of_channels;
__le32 min_channel_time;
__le32 max_channel_time;
- __le32 tx_power_level; // signed value
+ __le32 tx_power_level; /* signed value */
struct hif_ssid_def ssid_def[HIF_API_MAX_NB_SSIDS];
u8 channel_list[];
} __packed;
@@ -174,8 +174,9 @@ enum hif_frame_format {
};
struct hif_req_tx {
- // packet_id is not interpreted by the device, so it is not necessary to
- // declare it little endian
+ /* packet_id is not interpreted by the device, so it is not necessary to
+ * declare it little endian
+ */
u32 packet_id;
u8 max_tx_rate;
u8 queue_id:2;
@@ -211,8 +212,9 @@ enum hif_qos_ackplcy {
struct hif_cnf_tx {
__le32 status;
- // packet_id is copied from struct hif_req_tx without been interpreted
- // by the device, so it is not necessary to declare it little endian
+ /* packet_id is copied from struct hif_req_tx without been interpreted
+ * by the device, so it is not necessary to declare it little endian
+ */
u32 packet_id;
u8 txed_rate;
u8 ack_failures;
diff --git a/drivers/staging/wfx/hif_api_general.h b/drivers/staging/wfx/hif_api_general.h
index 24188945718d..5f74f829b7df 100644
--- a/drivers/staging/wfx/hif_api_general.h
+++ b/drivers/staging/wfx/hif_api_general.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: Apache-2.0 */
/*
- * WFx hardware interface definitions
+ * WF200 hardware interface definitions
*
* Copyright (c) 2018-2020, Silicon Laboratories Inc.
*/
@@ -113,16 +113,11 @@ enum hif_api_rate_index {
API_RATE_NUM_ENTRIES = 22
};
-enum hif_fw_type {
- HIF_FW_TYPE_ETF = 0x0,
- HIF_FW_TYPE_WFM = 0x1,
- HIF_FW_TYPE_WSM = 0x2
-};
-
struct hif_ind_startup {
- // As the others, this struct is interpreted as little endian by the
- // device. However, this struct is also used by the driver. We prefer to
- // declare it in native order and doing byte swap on reception.
+ /* As the others, this struct is interpreted as little endian by the
+ * device. However, this struct is also used by the driver. We prefer to
+ * declare it in native order and doing byte swap on reception.
+ */
__le32 status;
u16 hardware_id;
u8 opn[14];
@@ -199,9 +194,9 @@ struct hif_rx_stats {
__le32 throughput;
__le32 nb_rx_by_rate[API_RATE_NUM_ENTRIES];
__le16 per[API_RATE_NUM_ENTRIES];
- __le16 snr[API_RATE_NUM_ENTRIES]; // signed value
- __le16 rssi[API_RATE_NUM_ENTRIES]; // signed value
- __le16 cfo[API_RATE_NUM_ENTRIES]; // signed value
+ __le16 snr[API_RATE_NUM_ENTRIES]; /* signed value */
+ __le16 rssi[API_RATE_NUM_ENTRIES]; /* signed value */
+ __le16 cfo[API_RATE_NUM_ENTRIES]; /* signed value */
__le32 date;
__le32 pwr_clk_freq;
u8 is_ext_pwr_clk;
@@ -211,8 +206,8 @@ struct hif_rx_stats {
struct hif_tx_power_loop_info {
__le16 tx_gain_dig;
__le16 tx_gain_pa;
- __le16 target_pout; // signed value
- __le16 p_estimation; // signed value
+ __le16 target_pout; /* signed value */
+ __le16 p_estimation; /* signed value */
__le16 vpdet;
u8 measurement_index;
u8 reserved;
diff --git a/drivers/staging/wfx/hif_api_mib.h b/drivers/staging/wfx/hif_api_mib.h
index ace924720ce6..da534f244757 100644
--- a/drivers/staging/wfx/hif_api_mib.h
+++ b/drivers/staging/wfx/hif_api_mib.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: Apache-2.0 */
/*
- * WFx hardware interface definitions
+ * WF200 hardware interface definitions
*
* Copyright (c) 2018-2020, Silicon Laboratories Inc.
*/
@@ -133,58 +133,61 @@ struct hif_mib_bcn_filter_enable {
} __packed;
struct hif_mib_extended_count_table {
- __le32 count_plcp_errors;
- __le32 count_fcs_errors;
- __le32 count_tx_packets;
- __le32 count_rx_packets;
- __le32 count_rx_packet_errors;
- __le32 count_rx_decryption_failures;
- __le32 count_rx_mic_failures;
- __le32 count_rx_no_key_failures;
- __le32 count_tx_multicast_frames;
+ __le32 count_drop_plcp;
+ __le32 count_drop_fcs;
+ __le32 count_tx_frames;
+ __le32 count_rx_frames;
+ __le32 count_rx_frames_failed;
+ __le32 count_drop_decryption;
+ __le32 count_drop_tkip_mic;
+ __le32 count_drop_no_key;
+ __le32 count_tx_frames_multicast;
__le32 count_tx_frames_success;
- __le32 count_tx_frame_failures;
+ __le32 count_tx_frames_failed;
__le32 count_tx_frames_retried;
__le32 count_tx_frames_multi_retried;
- __le32 count_rx_frame_duplicates;
+ __le32 count_drop_duplicate;
__le32 count_rts_success;
- __le32 count_rts_failures;
- __le32 count_ack_failures;
- __le32 count_rx_multicast_frames;
+ __le32 count_rts_failed;
+ __le32 count_ack_failed;
+ __le32 count_rx_frames_multicast;
__le32 count_rx_frames_success;
- __le32 count_rx_cmacicv_errors;
- __le32 count_rx_cmac_replays;
- __le32 count_rx_mgmt_ccmp_replays;
- __le32 count_rx_bipmic_errors;
- __le32 count_rx_beacon;
- __le32 count_miss_beacon;
- __le32 reserved[15];
+ __le32 count_drop_cmac_icv;
+ __le32 count_drop_cmac_replay;
+ __le32 count_drop_ccmp_replay;
+ __le32 count_drop_bip_mic;
+ __le32 count_rx_bcn_success;
+ __le32 count_rx_bcn_miss;
+ __le32 count_rx_bcn_dtim;
+ __le32 count_rx_bcn_dtim_aid0_clr;
+ __le32 count_rx_bcn_dtim_aid0_set;
+ __le32 reserved[12];
} __packed;
struct hif_mib_count_table {
- __le32 count_plcp_errors;
- __le32 count_fcs_errors;
- __le32 count_tx_packets;
- __le32 count_rx_packets;
- __le32 count_rx_packet_errors;
- __le32 count_rx_decryption_failures;
- __le32 count_rx_mic_failures;
- __le32 count_rx_no_key_failures;
- __le32 count_tx_multicast_frames;
+ __le32 count_drop_plcp;
+ __le32 count_drop_fcs;
+ __le32 count_tx_frames;
+ __le32 count_rx_frames;
+ __le32 count_rx_frames_failed;
+ __le32 count_drop_decryption;
+ __le32 count_drop_tkip_mic;
+ __le32 count_drop_no_key;
+ __le32 count_tx_frames_multicast;
__le32 count_tx_frames_success;
- __le32 count_tx_frame_failures;
+ __le32 count_tx_frames_failed;
__le32 count_tx_frames_retried;
__le32 count_tx_frames_multi_retried;
- __le32 count_rx_frame_duplicates;
+ __le32 count_drop_duplicate;
__le32 count_rts_success;
- __le32 count_rts_failures;
- __le32 count_ack_failures;
- __le32 count_rx_multicast_frames;
+ __le32 count_rts_failed;
+ __le32 count_ack_failed;
+ __le32 count_rx_frames_multicast;
__le32 count_rx_frames_success;
- __le32 count_rx_cmacicv_errors;
- __le32 count_rx_cmac_replays;
- __le32 count_rx_mgmt_ccmp_replays;
- __le32 count_rx_bipmic_errors;
+ __le32 count_drop_cmac_icv;
+ __le32 count_drop_cmac_replay;
+ __le32 count_drop_ccmp_replay;
+ __le32 count_drop_bip_mic;
} __packed;
struct hif_mib_mac_address {
@@ -206,7 +209,7 @@ struct hif_mib_slot_time {
} __packed;
struct hif_mib_current_tx_power_level {
- __le32 power_level; // signed value
+ __le32 power_level; /* signed value */
} __packed;
struct hif_mib_non_erp_protection {
diff --git a/drivers/staging/wfx/hif_rx.c b/drivers/staging/wfx/hif_rx.c
index 9fca7f26372a..6963b54d5593 100644
--- a/drivers/staging/wfx/hif_rx.c
+++ b/drivers/staging/wfx/hif_rx.c
@@ -1,7 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Implementation of chip-to-host event (aka indications) of WFxxx Split Mac
- * (WSM) API.
+ * Handling of the chip-to-host events (aka indications) of the hardware API.
*
* Copyright (c) 2017-2020, Silicon Laboratories, Inc.
* Copyright (c) 2010, ST-Ericsson
@@ -20,10 +19,10 @@
static int hif_generic_confirm(struct wfx_dev *wdev,
const struct hif_msg *hif, const void *buf)
{
- // All confirm messages start with status
+ /* All confirm messages start with status */
int status = le32_to_cpup((__le32 *)buf);
int cmd = hif->id;
- int len = le16_to_cpu(hif->len) - 4; // drop header
+ int len = le16_to_cpu(hif->len) - 4; /* drop header */
WARN(!mutex_is_locked(&wdev->hif_cmd.lock), "data locking error");
@@ -175,13 +174,14 @@ static int hif_scan_complete_indication(struct wfx_dev *wdev,
const void *buf)
{
struct wfx_vif *wvif = wdev_to_wvif(wdev, hif->interface);
+ const struct hif_ind_scan_cmpl *body = buf;
if (!wvif) {
dev_warn(wdev->dev, "%s: received event for non-existent vif\n", __func__);
return -EIO;
}
- wfx_scan_complete(wvif);
+ wfx_scan_complete(wvif, body->num_channels_completed);
return 0;
}
@@ -244,7 +244,7 @@ static int hif_generic_indication(struct wfx_dev *wdev,
return 0;
case HIF_GENERIC_INDICATION_TYPE_RX_STATS:
mutex_lock(&wdev->rx_stats_lock);
- // Older firmware send a generic indication beside RxStats
+ /* Older firmware send a generic indication beside RxStats */
if (!wfx_api_older_than(wdev, 1, 4))
dev_info(wdev->dev, "Rx test ongoing. Temperature: %d degrees C\n",
body->data.rx_stats.current_temp);
@@ -297,7 +297,7 @@ static const struct {
"bus clock is too slow (<1kHz)" },
{ HIF_ERROR_HIF_RX_DATA_TOO_LARGE,
"HIF message too large" },
- // Following errors only exists in old firmware versions:
+ /* Following errors only exists in old firmware versions: */
{ HIF_ERROR_HIF_TX_QUEUE_FULL,
"HIF messages queue is full" },
{ HIF_ERROR_HIF_BUS,
@@ -374,7 +374,7 @@ static const struct {
{ HIF_IND_ID_GENERIC, hif_generic_indication },
{ HIF_IND_ID_ERROR, hif_error_indication },
{ HIF_IND_ID_EXCEPTION, hif_exception_indication },
- // FIXME: allocate skb_p from hif_receive_indication and make it generic
+ /* FIXME: allocate skb_p from hif_receive_indication and make it generic */
//{ HIF_IND_ID_RX, hif_receive_indication },
};
@@ -385,12 +385,13 @@ void wfx_handle_rx(struct wfx_dev *wdev, struct sk_buff *skb)
int hif_id = hif->id;
if (hif_id == HIF_IND_ID_RX) {
- // hif_receive_indication take care of skb lifetime
+ /* hif_receive_indication take care of skb lifetime */
hif_receive_indication(wdev, hif, hif->body, skb);
return;
}
- // Note: mutex_is_lock cause an implicit memory barrier that protect
- // buf_send
+ /* Note: mutex_is_lock cause an implicit memory barrier that protect
+ * buf_send
+ */
if (mutex_is_locked(&wdev->hif_cmd.lock) &&
wdev->hif_cmd.buf_send &&
wdev->hif_cmd.buf_send->id == hif_id) {
diff --git a/drivers/staging/wfx/hif_rx.h b/drivers/staging/wfx/hif_rx.h
index f07c10c8c6bd..96543b81fa77 100644
--- a/drivers/staging/wfx/hif_rx.h
+++ b/drivers/staging/wfx/hif_rx.h
@@ -1,7 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Implementation of chip-to-host event (aka indications) of WFxxx Split Mac
- * (WSM) API.
+ * Handling of the chip-to-host events (aka indications) of the hardware API.
*
* Copyright (c) 2017-2019, Silicon Laboratories, Inc.
* Copyright (c) 2010, ST-Ericsson
diff --git a/drivers/staging/wfx/hif_tx.c b/drivers/staging/wfx/hif_tx.c
index 63b437261eb7..2fd8bbd36e25 100644
--- a/drivers/staging/wfx/hif_tx.c
+++ b/drivers/staging/wfx/hif_tx.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Implementation of host-to-chip commands (aka request/confirmation) of WFxxx
- * Split Mac (WSM) API.
+ * Implementation of the host-to-chip commands (aka request/confirmation) of the
+ * hardware API.
*
* Copyright (c) 2017-2020, Silicon Laboratories, Inc.
* Copyright (c) 2010, ST-Ericsson
@@ -28,7 +28,7 @@ static void wfx_fill_header(struct hif_msg *hif, int if_id,
if (if_id == -1)
if_id = 2;
- WARN(cmd > 0x3f, "invalid WSM command %#.2x", cmd);
+ WARN(cmd > 0x3f, "invalid hardware command %#.2x", cmd);
WARN(size > 0xFFF, "requested buffer is too large: %zu bytes", size);
WARN(if_id > 0x3, "invalid interface ID %d", if_id);
@@ -55,15 +55,16 @@ int wfx_cmd_send(struct wfx_dev *wdev, struct hif_msg *request,
int vif = request->interface;
int ret;
- // Do not wait for any reply if chip is frozen
+ /* Do not wait for any reply if chip is frozen */
if (wdev->chip_frozen)
return -ETIMEDOUT;
mutex_lock(&wdev->hif_cmd.lock);
WARN(wdev->hif_cmd.buf_send, "data locking error");
- // Note: call to complete() below has an implicit memory barrier that
- // hopefully protect buf_send
+ /* Note: call to complete() below has an implicit memory barrier that
+ * hopefully protect buf_send
+ */
wdev->hif_cmd.buf_send = request;
wdev->hif_cmd.buf_recv = reply;
wdev->hif_cmd.len_recv = reply_len;
@@ -72,8 +73,9 @@ int wfx_cmd_send(struct wfx_dev *wdev, struct hif_msg *request,
wfx_bh_request_tx(wdev);
if (no_reply) {
- // Chip won't reply. Give enough time to the wq to send the
- // buffer.
+ /* Chip won't reply. Give enough time to the wq to send the
+ * buffer.
+ */
msleep(100);
wdev->hif_cmd.buf_send = NULL;
mutex_unlock(&wdev->hif_cmd.lock);
@@ -108,19 +110,18 @@ int wfx_cmd_send(struct wfx_dev *wdev, struct hif_msg *request,
mib_sep = "/";
}
if (ret < 0)
- dev_err(wdev->dev,
- "WSM request %s%s%s (%#.2x) on vif %d returned error %d\n",
+ dev_err(wdev->dev, "hardware request %s%s%s (%#.2x) on vif %d returned error %d\n",
get_hif_name(cmd), mib_sep, mib_name, cmd, vif, ret);
if (ret > 0)
- dev_warn(wdev->dev,
- "WSM request %s%s%s (%#.2x) on vif %d returned status %d\n",
+ dev_warn(wdev->dev, "hardware request %s%s%s (%#.2x) on vif %d returned status %d\n",
get_hif_name(cmd), mib_sep, mib_name, cmd, vif, ret);
return ret;
}
-// This function is special. After HIF_REQ_ID_SHUT_DOWN, chip won't reply to any
-// request anymore. Obviously, only call this function during device unregister.
+/* This function is special. After HIF_REQ_ID_SHUT_DOWN, chip won't reply to any
+ * request anymore. Obviously, only call this function during device unregister.
+ */
int hif_shutdown(struct wfx_dev *wdev)
{
int ret;
@@ -227,14 +228,13 @@ int hif_write_mib(struct wfx_dev *wdev, int vif_id, u16 mib_id,
}
int hif_scan(struct wfx_vif *wvif, struct cfg80211_scan_request *req,
- int chan_start_idx, int chan_num, int *timeout)
+ int chan_start_idx, int chan_num)
{
int ret, i;
struct hif_msg *hif;
size_t buf_len =
sizeof(struct hif_req_start_scan_alt) + chan_num * sizeof(u8);
struct hif_req_start_scan_alt *body = wfx_alloc_hif(buf_len, &hif);
- int tmo_chan_fg, tmo_chan_bg, tmo;
WARN(chan_num > HIF_API_MAX_NB_CHANNELS, "invalid params");
WARN(req->n_ssids > HIF_API_MAX_NB_SSIDS, "invalid params");
@@ -269,12 +269,6 @@ int hif_scan(struct wfx_vif *wvif, struct cfg80211_scan_request *req,
body->num_of_probe_requests = 2;
body->probe_delay = 100;
}
- tmo_chan_bg = le32_to_cpu(body->max_channel_time) * USEC_PER_TU;
- tmo_chan_fg = 512 * USEC_PER_TU + body->probe_delay;
- tmo_chan_fg *= body->num_of_probe_requests;
- tmo = chan_num * max(tmo_chan_bg, tmo_chan_fg) + 512 * USEC_PER_TU;
- if (timeout)
- *timeout = usecs_to_jiffies(tmo);
wfx_fill_header(hif, wvif->id, HIF_REQ_ID_START_SCAN, buf_len);
ret = wfx_cmd_send(wvif->wdev, hif, NULL, 0, false);
@@ -286,7 +280,7 @@ int hif_stop_scan(struct wfx_vif *wvif)
{
int ret;
struct hif_msg *hif;
- // body associated to HIF_REQ_ID_STOP_SCAN is empty
+ /* body associated to HIF_REQ_ID_STOP_SCAN is empty */
wfx_alloc_hif(0, &hif);
if (!hif)
@@ -308,16 +302,11 @@ int hif_join(struct wfx_vif *wvif, const struct ieee80211_bss_conf *conf,
WARN_ON(!conf->basic_rates);
WARN_ON(sizeof(body->ssid) < ssidlen);
WARN(!conf->ibss_joined && !ssidlen, "joining an unknown BSS");
- if (WARN_ON(!channel))
- return -EINVAL;
if (!hif)
return -ENOMEM;
body->infrastructure_bss_mode = !conf->ibss_joined;
body->short_preamble = conf->use_short_preamble;
- if (channel->flags & IEEE80211_CHAN_NO_IR)
- body->probe_for_join = 0;
- else
- body->probe_for_join = 1;
+ body->probe_for_join = !(channel->flags & IEEE80211_CHAN_NO_IR);
body->channel_number = channel->hw_value;
body->beacon_interval = cpu_to_le32(conf->beacon_int);
body->basic_rate_set =
@@ -355,16 +344,17 @@ int hif_add_key(struct wfx_dev *wdev, const struct hif_req_add_key *arg)
{
int ret;
struct hif_msg *hif;
- // FIXME: only send necessary bits
+ /* FIXME: only send necessary bits */
struct hif_req_add_key *body = wfx_alloc_hif(sizeof(*body), &hif);
if (!hif)
return -ENOMEM;
- // FIXME: swap bytes as necessary in body
+ /* FIXME: swap bytes as necessary in body */
memcpy(body, arg, sizeof(*body));
if (wfx_api_older_than(wdev, 1, 5))
- // Legacy firmwares expect that add_key to be sent on right
- // interface.
+ /* Legacy firmwares expect that add_key to be sent on right
+ * interface.
+ */
wfx_fill_header(hif, arg->int_id, HIF_REQ_ID_ADD_KEY,
sizeof(*body));
else
@@ -408,7 +398,7 @@ int hif_set_edca_queue_params(struct wfx_vif *wvif, u16 queue,
body->cw_max = cpu_to_le16(arg->cw_max);
body->tx_op_limit = cpu_to_le16(arg->txop * USEC_PER_TXOP);
body->queue_id = 3 - queue;
- // API 2.0 has changed queue IDs values
+ /* API 2.0 has changed queue IDs values */
if (wfx_api_older_than(wvif->wdev, 2, 0) && queue == IEEE80211_AC_BE)
body->queue_id = HIF_QUEUE_ID_BACKGROUND;
if (wfx_api_older_than(wvif->wdev, 2, 0) && queue == IEEE80211_AC_BK)
@@ -433,7 +423,7 @@ int hif_set_pm(struct wfx_vif *wvif, bool ps, int dynamic_ps_timeout)
return -ENOMEM;
if (ps) {
body->enter_psm = 1;
- // Firmware does not support more than 128ms
+ /* Firmware does not support more than 128ms */
body->fast_psm_idle_period = min(dynamic_ps_timeout * 2, 255);
if (body->fast_psm_idle_period)
body->fast_psm = 1;
diff --git a/drivers/staging/wfx/hif_tx.h b/drivers/staging/wfx/hif_tx.h
index 3521c545ae6b..e57eabdcfa77 100644
--- a/drivers/staging/wfx/hif_tx.h
+++ b/drivers/staging/wfx/hif_tx.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Implementation of host-to-chip commands (aka request/confirmation) of WFxxx
- * Split Mac (WSM) API.
+ * Implementation of the host-to-chip commands (aka request/confirmation) of the
+ * hardware API.
*
* Copyright (c) 2017-2020, Silicon Laboratories, Inc.
* Copyright (c) 2010, ST-Ericsson
@@ -40,7 +40,7 @@ int hif_read_mib(struct wfx_dev *wdev, int vif_id, u16 mib_id,
int hif_write_mib(struct wfx_dev *wdev, int vif_id, u16 mib_id,
void *buf, size_t buf_size);
int hif_scan(struct wfx_vif *wvif, struct cfg80211_scan_request *req80211,
- int chan_start, int chan_num, int *timeout);
+ int chan_start, int chan_num);
int hif_stop_scan(struct wfx_vif *wvif);
int hif_join(struct wfx_vif *wvif, const struct ieee80211_bss_conf *conf,
struct ieee80211_channel *channel, const u8 *ssid, int ssidlen);
diff --git a/drivers/staging/wfx/hif_tx_mib.c b/drivers/staging/wfx/hif_tx_mib.c
index 1926cf1b62be..97e961e6bcf6 100644
--- a/drivers/staging/wfx/hif_tx_mib.c
+++ b/drivers/staging/wfx/hif_tx_mib.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Implementation of host-to-chip MIBs of WFxxx Split Mac (WSM) API.
+ * Implementation of the host-to-chip MIBs of the hardware API.
*
* Copyright (c) 2017-2020, Silicon Laboratories, Inc.
* Copyright (c) 2010, ST-Ericsson
@@ -68,25 +68,25 @@ int hif_get_counters_table(struct wfx_dev *wdev, int vif_id,
struct hif_mib_extended_count_table *arg)
{
if (wfx_api_older_than(wdev, 1, 3)) {
- // extended_count_table is wider than count_table
+ /* extended_count_table is wider than count_table */
memset(arg, 0xFF, sizeof(*arg));
return hif_read_mib(wdev, vif_id, HIF_MIB_ID_COUNTERS_TABLE,
arg, sizeof(struct hif_mib_count_table));
} else {
return hif_read_mib(wdev, vif_id,
HIF_MIB_ID_EXTENDED_COUNTERS_TABLE, arg,
- sizeof(struct hif_mib_extended_count_table));
+ sizeof(struct hif_mib_extended_count_table));
}
}
int hif_set_macaddr(struct wfx_vif *wvif, u8 *mac)
{
- struct hif_mib_mac_address msg = { };
+ struct hif_mib_mac_address arg = { };
if (mac)
- ether_addr_copy(msg.mac_addr, mac);
+ ether_addr_copy(arg.mac_addr, mac);
return hif_write_mib(wvif->wdev, wvif->id, HIF_MIB_ID_DOT11_MAC_ADDRESS,
- &msg, sizeof(msg));
+ &arg, sizeof(arg));
}
int hif_set_rx_filter(struct wfx_vif *wvif,
@@ -246,7 +246,7 @@ int hif_set_arp_ipv4_filter(struct wfx_vif *wvif, int idx, __be32 *addr)
};
if (addr) {
- // Caution: type of addr is __be32
+ /* Caution: type of addr is __be32 */
memcpy(arg.ipv4_address, addr, sizeof(arg.ipv4_address));
arg.arp_enable = HIF_ARP_NS_FILTERING_ENABLE;
}
diff --git a/drivers/staging/wfx/hif_tx_mib.h b/drivers/staging/wfx/hif_tx_mib.h
index 812b3ba0f00e..2a3b84868ee4 100644
--- a/drivers/staging/wfx/hif_tx_mib.h
+++ b/drivers/staging/wfx/hif_tx_mib.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Implementation of host-to-chip MIBs of WFxxx Split Mac (WSM) API.
+ * Implementation of the host-to-chip MIBs of the hardware API.
*
* Copyright (c) 2017-2020, Silicon Laboratories, Inc.
* Copyright (c) 2010, ST-Ericsson
diff --git a/drivers/staging/wfx/hwio.c b/drivers/staging/wfx/hwio.c
index 36fbc5b5d64c..30eb888830d2 100644
--- a/drivers/staging/wfx/hwio.c
+++ b/drivers/staging/wfx/hwio.c
@@ -31,7 +31,7 @@ static int read32(struct wfx_dev *wdev, int reg, u32 *val)
int ret;
__le32 *tmp = kmalloc(sizeof(u32), GFP_KERNEL);
- *val = ~0; // Never return undefined value
+ *val = ~0; /* Never return undefined value */
if (!tmp)
return -ENOMEM;
ret = wdev->hwbus_ops->copy_from_io(wdev->hwbus_priv, reg, tmp,
@@ -153,7 +153,7 @@ static int indirect_read(struct wfx_dev *wdev, int reg, u32 addr,
err:
if (ret < 0)
- memset(buf, 0xFF, len); // Never return undefined value
+ memset(buf, 0xFF, len); /* Never return undefined value */
return ret;
}
@@ -335,7 +335,7 @@ int igpr_reg_read(struct wfx_dev *wdev, int index, u32 *val)
{
int ret;
- *val = ~0; // Never return undefined value
+ *val = ~0; /* Never return undefined value */
ret = write32_locked(wdev, WFX_REG_SET_GEN_R_W, IGPR_RW | index << 24);
if (ret)
return ret;
diff --git a/drivers/staging/wfx/hwio.h b/drivers/staging/wfx/hwio.h
index 0b8e4f7157df..ff09575dd1af 100644
--- a/drivers/staging/wfx/hwio.h
+++ b/drivers/staging/wfx/hwio.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Low-level API.
+ * Low-level I/O functions.
*
* Copyright (c) 2017-2020, Silicon Laboratories, Inc.
* Copyright (c) 2010, ST-Ericsson
@@ -27,30 +27,30 @@ int sram_reg_write(struct wfx_dev *wdev, u32 addr, u32 val);
int ahb_reg_read(struct wfx_dev *wdev, u32 addr, u32 *val);
int ahb_reg_write(struct wfx_dev *wdev, u32 addr, u32 val);
-#define CFG_ERR_SPI_FRAME 0x00000001 // only with SPI
-#define CFG_ERR_SDIO_BUF_MISMATCH 0x00000001 // only with SDIO
+#define CFG_ERR_SPI_FRAME 0x00000001 /* only with SPI */
+#define CFG_ERR_SDIO_BUF_MISMATCH 0x00000001 /* only with SDIO */
#define CFG_ERR_BUF_UNDERRUN 0x00000002
#define CFG_ERR_DATA_IN_TOO_LARGE 0x00000004
#define CFG_ERR_HOST_NO_OUT_QUEUE 0x00000008
#define CFG_ERR_BUF_OVERRUN 0x00000010
#define CFG_ERR_DATA_OUT_TOO_LARGE 0x00000020
#define CFG_ERR_HOST_NO_IN_QUEUE 0x00000040
-#define CFG_ERR_HOST_CRC_MISS 0x00000080 // only with SDIO
-#define CFG_SPI_IGNORE_CS 0x00000080 // only with SPI
-#define CFG_BYTE_ORDER_MASK 0x00000300 // only writable with SPI
+#define CFG_ERR_HOST_CRC_MISS 0x00000080 /* only with SDIO */
+#define CFG_SPI_IGNORE_CS 0x00000080 /* only with SPI */
+#define CFG_BYTE_ORDER_MASK 0x00000300 /* only writable with SPI */
#define CFG_BYTE_ORDER_BADC 0x00000000
#define CFG_BYTE_ORDER_DCBA 0x00000100
-#define CFG_BYTE_ORDER_ABCD 0x00000200 // SDIO always use this value
+#define CFG_BYTE_ORDER_ABCD 0x00000200 /* SDIO always use this value */
#define CFG_DIRECT_ACCESS_MODE 0x00000400
#define CFG_PREFETCH_AHB 0x00000800
#define CFG_DISABLE_CPU_CLK 0x00001000
#define CFG_PREFETCH_SRAM 0x00002000
#define CFG_CPU_RESET 0x00004000
-#define CFG_SDIO_DISABLE_IRQ 0x00008000 // only with SDIO
+#define CFG_SDIO_DISABLE_IRQ 0x00008000 /* only with SDIO */
#define CFG_IRQ_ENABLE_DATA 0x00010000
#define CFG_IRQ_ENABLE_WRDY 0x00020000
#define CFG_CLK_RISE_EDGE 0x00040000
-#define CFG_SDIO_DISABLE_CRC_CHK 0x00080000 // only with SDIO
+#define CFG_SDIO_DISABLE_CRC_CHK 0x00080000 /* only with SDIO */
#define CFG_RESERVED 0x00F00000
#define CFG_DEVICE_ID_MAJOR 0x07000000
#define CFG_DEVICE_ID_RESERVED 0x78000000
@@ -72,4 +72,4 @@ int control_reg_write_bits(struct wfx_dev *wdev, u32 mask, u32 val);
int igpr_reg_read(struct wfx_dev *wdev, int index, u32 *val);
int igpr_reg_write(struct wfx_dev *wdev, int index, u32 val);
-#endif /* WFX_HWIO_H */
+#endif
diff --git a/drivers/staging/wfx/key.c b/drivers/staging/wfx/key.c
index 2ab82bed4c1b..65134a174683 100644
--- a/drivers/staging/wfx/key.c
+++ b/drivers/staging/wfx/key.c
@@ -31,7 +31,7 @@ static void wfx_free_key(struct wfx_dev *wdev, int idx)
}
static u8 fill_wep_pair(struct hif_wep_pairwise_key *msg,
- struct ieee80211_key_conf *key, u8 *peer_addr)
+ struct ieee80211_key_conf *key, u8 *peer_addr)
{
WARN(key->keylen > sizeof(msg->key_data), "inconsistent data");
msg->key_length = key->keylen;
@@ -41,7 +41,7 @@ static u8 fill_wep_pair(struct hif_wep_pairwise_key *msg,
}
static u8 fill_wep_group(struct hif_wep_group_key *msg,
- struct ieee80211_key_conf *key)
+ struct ieee80211_key_conf *key)
{
WARN(key->keylen > sizeof(msg->key_data), "inconsistent data");
msg->key_id = key->keyidx;
@@ -51,7 +51,7 @@ static u8 fill_wep_group(struct hif_wep_group_key *msg,
}
static u8 fill_tkip_pair(struct hif_tkip_pairwise_key *msg,
- struct ieee80211_key_conf *key, u8 *peer_addr)
+ struct ieee80211_key_conf *key, u8 *peer_addr)
{
u8 *keybuf = key->key;
@@ -68,9 +68,9 @@ static u8 fill_tkip_pair(struct hif_tkip_pairwise_key *msg,
}
static u8 fill_tkip_group(struct hif_tkip_group_key *msg,
- struct ieee80211_key_conf *key,
- struct ieee80211_key_seq *seq,
- enum nl80211_iftype iftype)
+ struct ieee80211_key_conf *key,
+ struct ieee80211_key_seq *seq,
+ enum nl80211_iftype iftype)
{
u8 *keybuf = key->key;
@@ -84,16 +84,16 @@ static u8 fill_tkip_group(struct hif_tkip_group_key *msg,
memcpy(msg->tkip_key_data, keybuf, sizeof(msg->tkip_key_data));
keybuf += sizeof(msg->tkip_key_data);
if (iftype == NL80211_IFTYPE_AP)
- // Use Tx MIC Key
+ /* Use Tx MIC Key */
memcpy(msg->rx_mic_key, keybuf + 0, sizeof(msg->rx_mic_key));
else
- // Use Rx MIC Key
+ /* Use Rx MIC Key */
memcpy(msg->rx_mic_key, keybuf + 8, sizeof(msg->rx_mic_key));
return HIF_KEY_TYPE_TKIP_GROUP;
}
static u8 fill_ccmp_pair(struct hif_aes_pairwise_key *msg,
- struct ieee80211_key_conf *key, u8 *peer_addr)
+ struct ieee80211_key_conf *key, u8 *peer_addr)
{
WARN(key->keylen != sizeof(msg->aes_key_data), "inconsistent data");
ether_addr_copy(msg->peer_address, peer_addr);
@@ -102,8 +102,8 @@ static u8 fill_ccmp_pair(struct hif_aes_pairwise_key *msg,
}
static u8 fill_ccmp_group(struct hif_aes_group_key *msg,
- struct ieee80211_key_conf *key,
- struct ieee80211_key_seq *seq)
+ struct ieee80211_key_conf *key,
+ struct ieee80211_key_seq *seq)
{
WARN(key->keylen != sizeof(msg->aes_key_data), "inconsistent data");
memcpy(msg->aes_key_data, key->key, key->keylen);
@@ -114,7 +114,7 @@ static u8 fill_ccmp_group(struct hif_aes_group_key *msg,
}
static u8 fill_sms4_pair(struct hif_wapi_pairwise_key *msg,
- struct ieee80211_key_conf *key, u8 *peer_addr)
+ struct ieee80211_key_conf *key, u8 *peer_addr)
{
u8 *keybuf = key->key;
@@ -129,7 +129,7 @@ static u8 fill_sms4_pair(struct hif_wapi_pairwise_key *msg,
}
static u8 fill_sms4_group(struct hif_wapi_group_key *msg,
- struct ieee80211_key_conf *key)
+ struct ieee80211_key_conf *key)
{
u8 *keybuf = key->key;
@@ -143,8 +143,8 @@ static u8 fill_sms4_group(struct hif_wapi_group_key *msg,
}
static u8 fill_aes_cmac_group(struct hif_igtk_group_key *msg,
- struct ieee80211_key_conf *key,
- struct ieee80211_key_seq *seq)
+ struct ieee80211_key_conf *key,
+ struct ieee80211_key_seq *seq)
{
WARN(key->keylen != sizeof(msg->igtk_key_data), "inconsistent data");
memcpy(msg->igtk_key_data, key->key, key->keylen);
diff --git a/drivers/staging/wfx/key.h b/drivers/staging/wfx/key.h
index 70a44d0ca35e..2d135eff7af2 100644
--- a/drivers/staging/wfx/key.h
+++ b/drivers/staging/wfx/key.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Implementation of mac80211 API.
+ * Key management related functions.
*
* Copyright (c) 2017-2020, Silicon Laboratories, Inc.
* Copyright (c) 2010, ST-Ericsson
@@ -17,4 +17,4 @@ int wfx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_vif *vif, struct ieee80211_sta *sta,
struct ieee80211_key_conf *key);
-#endif /* WFX_STA_H */
+#endif
diff --git a/drivers/staging/wfx/main.c b/drivers/staging/wfx/main.c
index 4b9fdf99981b..858d778cc589 100644
--- a/drivers/staging/wfx/main.c
+++ b/drivers/staging/wfx/main.c
@@ -35,7 +35,7 @@
#define WFX_PDS_MAX_SIZE 1500
-MODULE_DESCRIPTION("Silicon Labs 802.11 Wireless LAN driver for WFx");
+MODULE_DESCRIPTION("Silicon Labs 802.11 Wireless LAN driver for WF200");
MODULE_AUTHOR("Jérôme Pouiller <jerome.pouiller@silabs.com>");
MODULE_LICENSE("GPL");
@@ -92,7 +92,7 @@ static const struct ieee80211_supported_band wfx_band_2ghz = {
.bitrates = wfx_rates,
.n_bitrates = ARRAY_SIZE(wfx_rates),
.ht_cap = {
- // Receive caps
+ /* Receive caps */
.cap = IEEE80211_HT_CAP_GRN_FLD | IEEE80211_HT_CAP_SGI_20 |
IEEE80211_HT_CAP_MAX_AMSDU |
(1 << IEEE80211_HT_CAP_RX_STBC_SHIFT),
@@ -100,7 +100,7 @@ static const struct ieee80211_supported_band wfx_band_2ghz = {
.ampdu_factor = IEEE80211_HT_MAX_AMPDU_16K,
.ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE,
.mcs = {
- .rx_mask = { 0xFF }, // MCS0 to MCS7
+ .rx_mask = { 0xFF }, /* MCS0 to MCS7 */
.rx_highest = cpu_to_le16(72),
.tx_params = IEEE80211_HT_MCS_TX_DEFINED,
},
@@ -163,7 +163,20 @@ bool wfx_api_older_than(struct wfx_dev *wdev, int major, int minor)
return false;
}
-/* NOTE: wfx_send_pds() destroy buf */
+/* The device needs data about the antenna configuration. This information in
+ * provided by PDS (Platform Data Set, this is the wording used in WF200
+ * documentation) files. For hardware integrators, the full process to create
+ * PDS files is described here:
+ * https:github.com/SiliconLabs/wfx-firmware/blob/master/PDS/README.md
+ *
+ * So this function aims to send PDS to the device. However, the PDS file is
+ * often bigger than Rx buffers of the chip, so it has to be sent in multiple
+ * parts.
+ *
+ * In add, the PDS data cannot be split anywhere. The PDS files contains tree
+ * structures. Braces are used to enter/leave a level of the tree (in a JSON
+ * fashion). PDS files can only been split between root nodes.
+ */
int wfx_send_pds(struct wfx_dev *wdev, u8 *buf, size_t len)
{
int ret;
@@ -220,7 +233,7 @@ static int wfx_send_pdata_pds(struct wfx_dev *wdev)
ret = request_firmware(&pds, wdev->pdata.file_pds, wdev->dev);
if (ret) {
- dev_err(wdev->dev, "can't load PDS file %s\n",
+ dev_err(wdev->dev, "can't load antenna parameters (PDS file %s). The device may be unstable.\n",
wdev->pdata.file_pds);
goto err1;
}
@@ -294,7 +307,7 @@ struct wfx_dev *wfx_init_common(struct device *dev,
hw->wiphy->n_iface_combinations = ARRAY_SIZE(wfx_iface_combinations);
hw->wiphy->iface_combinations = wfx_iface_combinations;
hw->wiphy->bands[NL80211_BAND_2GHZ] = devm_kmalloc(dev, sizeof(wfx_band_2ghz), GFP_KERNEL);
- // FIXME: also copy wfx_rates and wfx_2ghz_chantable
+ /* FIXME: also copy wfx_rates and wfx_2ghz_chantable */
memcpy(hw->wiphy->bands[NL80211_BAND_2GHZ], &wfx_band_2ghz,
sizeof(wfx_band_2ghz));
@@ -336,8 +349,9 @@ int wfx_probe(struct wfx_dev *wdev)
int err;
struct gpio_desc *gpio_saved;
- // During first part of boot, gpio_wakeup cannot yet been used. So
- // prevent bh() to touch it.
+ /* During first part of boot, gpio_wakeup cannot yet been used. So
+ * prevent bh() to touch it.
+ */
gpio_saved = wdev->pdata.gpio_wakeup;
wdev->pdata.gpio_wakeup = NULL;
wdev->poll_irq = true;
@@ -360,7 +374,7 @@ int wfx_probe(struct wfx_dev *wdev)
goto err0;
}
- // FIXME: fill wiphy::hw_version
+ /* FIXME: fill wiphy::hw_version */
dev_info(wdev->dev, "started firmware %d.%d.%d \"%s\" (API: %d.%d, keyset: %02X, caps: 0x%.8X)\n",
wdev->hw_caps.firmware_major, wdev->hw_caps.firmware_minor,
wdev->hw_caps.firmware_build, wdev->hw_caps.firmware_label,
@@ -396,7 +410,7 @@ int wfx_probe(struct wfx_dev *wdev)
dev_dbg(wdev->dev, "sending configuration file %s\n",
wdev->pdata.file_pds);
err = wfx_send_pdata_pds(wdev);
- if (err < 0)
+ if (err < 0 && err != -ENOENT)
goto err0;
wdev->poll_irq = false;
@@ -440,6 +454,9 @@ int wfx_probe(struct wfx_dev *wdev)
wdev->hw->wiphy->n_addresses = ARRAY_SIZE(wdev->addresses);
wdev->hw->wiphy->addresses = wdev->addresses;
+ if (!wfx_api_older_than(wdev, 3, 8))
+ wdev->hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
+
err = ieee80211_register_hw(wdev->hw);
if (err)
goto err1;
diff --git a/drivers/staging/wfx/main.h b/drivers/staging/wfx/main.h
index a0db322383a3..115abd2d4378 100644
--- a/drivers/staging/wfx/main.h
+++ b/drivers/staging/wfx/main.h
@@ -23,8 +23,7 @@ struct wfx_platform_data {
const char *file_fw;
const char *file_pds;
struct gpio_desc *gpio_wakeup;
- /*
- * if true HIF D_out is sampled on the rising edge of the clock
+ /* if true HIF D_out is sampled on the rising edge of the clock
* (intended to be used in 50Mhz SDIO)
*/
bool use_rising_clk;
diff --git a/drivers/staging/wfx/queue.c b/drivers/staging/wfx/queue.c
index 31c37f69c295..7a3ba3c38925 100644
--- a/drivers/staging/wfx/queue.c
+++ b/drivers/staging/wfx/queue.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * O(1) TX queue with built-in allocator.
+ * Queue between the tx operation and the bh workqueue.
*
* Copyright (c) 2017-2020, Silicon Laboratories, Inc.
* Copyright (c) 2010, ST-Ericsson
@@ -32,7 +32,7 @@ void wfx_tx_flush(struct wfx_dev *wdev)
{
int ret;
- // Do not wait for any reply if chip is frozen
+ /* Do not wait for any reply if chip is frozen */
if (wdev->chip_frozen)
return;
@@ -45,7 +45,7 @@ void wfx_tx_flush(struct wfx_dev *wdev)
dev_warn(wdev->dev, "cannot flush tx buffers (%d still busy)\n",
wdev->hif.tx_buffers_used);
wfx_pending_dump_old_frames(wdev, 3000);
- // FIXME: drop pending frames here
+ /* FIXME: drop pending frames here */
wdev->chip_frozen = true;
}
mutex_unlock(&wdev->hif_cmd.lock);
@@ -60,9 +60,10 @@ void wfx_tx_lock_flush(struct wfx_dev *wdev)
void wfx_tx_queues_init(struct wfx_vif *wvif)
{
- // The device is in charge to respect the details of the QoS parameters.
- // The driver just ensure that it roughtly respect the priorities to
- // avoid any shortage.
+ /* The device is in charge to respect the details of the QoS parameters.
+ * The driver just ensure that it roughtly respect the priorities to
+ * avoid any shortage.
+ */
const int priorities[IEEE80211_NUM_ACS] = { 1, 2, 64, 128 };
int i;
@@ -73,22 +74,22 @@ void wfx_tx_queues_init(struct wfx_vif *wvif)
}
}
+bool wfx_tx_queue_empty(struct wfx_vif *wvif, struct wfx_queue *queue)
+{
+ return skb_queue_empty_lockless(&queue->normal) &&
+ skb_queue_empty_lockless(&queue->cab);
+}
+
void wfx_tx_queues_check_empty(struct wfx_vif *wvif)
{
int i;
for (i = 0; i < IEEE80211_NUM_ACS; ++i) {
WARN_ON(atomic_read(&wvif->tx_queue[i].pending_frames));
- WARN_ON(!skb_queue_empty_lockless(&wvif->tx_queue[i].normal));
- WARN_ON(!skb_queue_empty_lockless(&wvif->tx_queue[i].cab));
+ WARN_ON(!wfx_tx_queue_empty(wvif, &wvif->tx_queue[i]));
}
}
-bool wfx_tx_queue_empty(struct wfx_vif *wvif, struct wfx_queue *queue)
-{
- return skb_queue_empty(&queue->normal) && skb_queue_empty(&queue->cab);
-}
-
static void __wfx_tx_queue_drop(struct wfx_vif *wvif,
struct sk_buff_head *skb_queue,
struct sk_buff_head *dropped)
@@ -217,8 +218,9 @@ bool wfx_tx_queues_has_cab(struct wfx_vif *wvif)
if (wvif->vif->type != NL80211_IFTYPE_AP)
return false;
for (i = 0; i < IEEE80211_NUM_ACS; ++i)
- // Note: since only AP can have mcast frames in queue and only
- // one vif can be AP, all queued frames has same interface id
+ /* Note: since only AP can have mcast frames in queue and only
+ * one vif can be AP, all queued frames has same interface id
+ */
if (!skb_queue_empty_lockless(&wvif->tx_queue[i].cab))
return true;
return false;
@@ -237,7 +239,7 @@ static struct sk_buff *wfx_tx_queues_get_skb(struct wfx_dev *wdev)
struct hif_msg *hif;
struct sk_buff *skb;
- // sort the queues
+ /* sort the queues */
wvif = NULL;
while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
for (i = 0; i < IEEE80211_NUM_ACS; i++) {
@@ -259,9 +261,10 @@ static struct sk_buff *wfx_tx_queues_get_skb(struct wfx_dev *wdev)
skb = skb_dequeue(&queues[i]->cab);
if (!skb)
continue;
- // Note: since only AP can have mcast frames in queue
- // and only one vif can be AP, all queued frames has
- // same interface id
+ /* Note: since only AP can have mcast frames in queue
+ * and only one vif can be AP, all queued frames has
+ * same interface id
+ */
hif = (struct hif_msg *)skb->data;
WARN_ON(hif->interface != wvif->id);
WARN_ON(queues[i] !=
@@ -270,7 +273,7 @@ static struct sk_buff *wfx_tx_queues_get_skb(struct wfx_dev *wdev)
trace_queues_stats(wdev, queues[i]);
return skb;
}
- // No more multicast to sent
+ /* No more multicast to sent */
wvif->after_dtim_tx_allowed = false;
schedule_work(&wvif->update_tim_work);
}
diff --git a/drivers/staging/wfx/queue.h b/drivers/staging/wfx/queue.h
index 80ba19455ef3..edd0d018b198 100644
--- a/drivers/staging/wfx/queue.h
+++ b/drivers/staging/wfx/queue.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * O(1) TX queue with built-in allocator.
+ * Queue between the tx operation and the bh workqueue.
*
* Copyright (c) 2017-2020, Silicon Laboratories, Inc.
* Copyright (c) 2010, ST-Ericsson
@@ -16,7 +16,7 @@ struct wfx_vif;
struct wfx_queue {
struct sk_buff_head normal;
- struct sk_buff_head cab; // Content After (DTIM) Beacon
+ struct sk_buff_head cab; /* Content After (DTIM) Beacon */
atomic_t pending_frames;
int priority;
};
@@ -42,4 +42,4 @@ unsigned int wfx_pending_get_pkt_us_delay(struct wfx_dev *wdev,
struct sk_buff *skb);
void wfx_pending_dump_old_frames(struct wfx_dev *wdev, unsigned int limit_ms);
-#endif /* WFX_QUEUE_H */
+#endif
diff --git a/drivers/staging/wfx/scan.c b/drivers/staging/wfx/scan.c
index fb47c7cddf2f..668ef2c60837 100644
--- a/drivers/staging/wfx/scan.c
+++ b/drivers/staging/wfx/scan.c
@@ -41,7 +41,7 @@ static int update_probe_tmpl(struct wfx_vif *wvif,
static int send_scan_req(struct wfx_vif *wvif,
struct cfg80211_scan_request *req, int start_idx)
{
- int i, ret, timeout;
+ int i, ret;
struct ieee80211_channel *ch_start, *ch_cur;
for (i = start_idx; i < req->n_channels; i++) {
@@ -56,29 +56,36 @@ static int send_scan_req(struct wfx_vif *wvif,
wfx_tx_lock_flush(wvif->wdev);
wvif->scan_abort = false;
reinit_completion(&wvif->scan_complete);
- ret = hif_scan(wvif, req, start_idx, i - start_idx, &timeout);
+ ret = hif_scan(wvif, req, start_idx, i - start_idx);
if (ret) {
wfx_tx_unlock(wvif->wdev);
return -EIO;
}
- ret = wait_for_completion_timeout(&wvif->scan_complete, timeout);
- if (req->channels[start_idx]->max_power != wvif->vif->bss_conf.txpower)
- hif_set_output_power(wvif, wvif->vif->bss_conf.txpower);
- wfx_tx_unlock(wvif->wdev);
+ ret = wait_for_completion_timeout(&wvif->scan_complete, 1 * HZ);
if (!ret) {
- dev_notice(wvif->wdev->dev, "scan timeout\n");
hif_stop_scan(wvif);
- return -ETIMEDOUT;
+ ret = wait_for_completion_timeout(&wvif->scan_complete, 1 * HZ);
+ dev_dbg(wvif->wdev->dev, "scan timeout (%d channels done)\n",
+ wvif->scan_nb_chan_done);
}
- if (wvif->scan_abort) {
+ if (!ret) {
+ dev_err(wvif->wdev->dev, "scan didn't stop\n");
+ ret = -ETIMEDOUT;
+ } else if (wvif->scan_abort) {
dev_notice(wvif->wdev->dev, "scan abort\n");
- return -ECONNABORTED;
+ ret = -ECONNABORTED;
+ } else if (wvif->scan_nb_chan_done > i - start_idx) {
+ ret = -EIO;
+ } else {
+ ret = wvif->scan_nb_chan_done;
}
- return i - start_idx;
+ if (req->channels[start_idx]->max_power != wvif->vif->bss_conf.txpower)
+ hif_set_output_power(wvif, wvif->vif->bss_conf.txpower);
+ wfx_tx_unlock(wvif->wdev);
+ return ret;
}
-/*
- * It is not really necessary to run scan request asynchronously. However,
+/* It is not really necessary to run scan request asynchronously. However,
* there is a bug in "iw scan" when ieee80211_scan_completed() is called before
* wfx_hw_scan() return
*/
@@ -86,7 +93,7 @@ void wfx_hw_scan_work(struct work_struct *work)
{
struct wfx_vif *wvif = container_of(work, struct wfx_vif, scan_work);
struct ieee80211_scan_request *hw_req = wvif->scan_req;
- int chan_cur, ret;
+ int chan_cur, ret, err;
mutex_lock(&wvif->wdev->conf_mutex);
mutex_lock(&wvif->scan_lock);
@@ -97,11 +104,20 @@ void wfx_hw_scan_work(struct work_struct *work)
}
update_probe_tmpl(wvif, &hw_req->req);
chan_cur = 0;
+ err = 0;
do {
ret = send_scan_req(wvif, &hw_req->req, chan_cur);
- if (ret > 0)
+ if (ret > 0) {
chan_cur += ret;
- } while (ret > 0 && chan_cur < hw_req->req.n_channels);
+ err = 0;
+ }
+ if (!ret)
+ err++;
+ if (err > 2) {
+ dev_err(wvif->wdev->dev, "scan has not been able to start\n");
+ ret = -ETIMEDOUT;
+ }
+ } while (ret >= 0 && chan_cur < hw_req->req.n_channels);
mutex_unlock(&wvif->scan_lock);
mutex_unlock(&wvif->wdev->conf_mutex);
__ieee80211_scan_completed_compat(wvif->wdev->hw, ret < 0);
@@ -126,7 +142,8 @@ void wfx_cancel_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
hif_stop_scan(wvif);
}
-void wfx_scan_complete(struct wfx_vif *wvif)
+void wfx_scan_complete(struct wfx_vif *wvif, int nb_chan_done)
{
+ wvif->scan_nb_chan_done = nb_chan_done;
complete(&wvif->scan_complete);
}
diff --git a/drivers/staging/wfx/scan.h b/drivers/staging/wfx/scan.h
index c7496a766478..78e3b984f375 100644
--- a/drivers/staging/wfx/scan.h
+++ b/drivers/staging/wfx/scan.h
@@ -17,6 +17,6 @@ void wfx_hw_scan_work(struct work_struct *work);
int wfx_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_scan_request *req);
void wfx_cancel_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
-void wfx_scan_complete(struct wfx_vif *wvif);
+void wfx_scan_complete(struct wfx_vif *wvif, int nb_chan_done);
-#endif /* WFX_SCAN_H */
+#endif
diff --git a/drivers/staging/wfx/sta.c b/drivers/staging/wfx/sta.c
index cb7e8abdf43c..23c0425e3929 100644
--- a/drivers/staging/wfx/sta.c
+++ b/drivers/staging/wfx/sta.c
@@ -24,7 +24,7 @@ u32 wfx_rate_mask_to_hw(struct wfx_dev *wdev, u32 rates)
{
int i;
u32 ret = 0;
- // WFx only support 2GHz
+ /* The device only supports 2GHz */
struct ieee80211_supported_band *sband = wdev->hw->wiphy->bands[NL80211_BAND_2GHZ];
for (i = 0; i < sband->n_bitrates; i++) {
@@ -51,11 +51,11 @@ void wfx_cooling_timeout_work(struct work_struct *work)
void wfx_suspend_hot_dev(struct wfx_dev *wdev, enum sta_notify_cmd cmd)
{
if (cmd == STA_NOTIFY_AWAKE) {
- // Device recover normal temperature
+ /* Device recover normal temperature */
if (cancel_delayed_work(&wdev->cooling_timeout_work))
wfx_tx_unlock(wdev);
} else {
- // Device is too hot
+ /* Device is too hot */
schedule_delayed_work(&wdev->cooling_timeout_work, 10 * HZ);
wfx_tx_lock(wdev);
}
@@ -80,13 +80,18 @@ static void wfx_filter_beacon(struct wfx_vif *wvif, bool filter_beacon)
.has_changed = 1,
.no_longer = 1,
.has_appeared = 1,
+ }, {
+ .ie_id = WLAN_EID_CHANNEL_SWITCH,
+ .has_changed = 1,
+ .no_longer = 1,
+ .has_appeared = 1,
}
};
if (!filter_beacon) {
hif_beacon_filter_control(wvif, 0, 1);
} else {
- hif_set_beacon_filter_table(wvif, 3, filter_ies);
+ hif_set_beacon_filter_table(wvif, ARRAY_SIZE(filter_ies), filter_ies);
hif_beacon_filter_control(wvif, HIF_BEACON_FILTER_ENABLE, 0);
}
}
@@ -98,13 +103,14 @@ void wfx_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
struct wfx_dev *wdev = hw->priv;
bool filter_bssid, filter_prbreq, filter_beacon;
- // Notes:
- // - Probe responses (FIF_BCN_PRBRESP_PROMISC) are never filtered
- // - PS-Poll (FIF_PSPOLL) are never filtered
- // - RTS, CTS and Ack (FIF_CONTROL) are always filtered
- // - Broken frames (FIF_FCSFAIL and FIF_PLCPFAIL) are always filtered
- // - Firmware does (yet) allow to forward unicast traffic sent to
- // other stations (aka. promiscuous mode)
+ /* Notes:
+ * - Probe responses (FIF_BCN_PRBRESP_PROMISC) are never filtered
+ * - PS-Poll (FIF_PSPOLL) are never filtered
+ * - RTS, CTS and Ack (FIF_CONTROL) are always filtered
+ * - Broken frames (FIF_FCSFAIL and FIF_PLCPFAIL) are always filtered
+ * - Firmware does (yet) allow to forward unicast traffic sent to
+ * other stations (aka. promiscuous mode)
+ */
*total_flags &= FIF_BCN_PRBRESP_PROMISC | FIF_ALLMULTI | FIF_OTHER_BSS |
FIF_PROBE_REQ | FIF_PSPOLL;
@@ -112,8 +118,9 @@ void wfx_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
mutex_lock(&wvif->scan_lock);
- // Note: FIF_BCN_PRBRESP_PROMISC covers probe response and
- // beacons from other BSS
+ /* Note: FIF_BCN_PRBRESP_PROMISC covers probe response and
+ * beacons from other BSS
+ */
if (*total_flags & FIF_BCN_PRBRESP_PROMISC)
filter_beacon = false;
else
@@ -125,7 +132,7 @@ void wfx_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
else
filter_bssid = true;
- // In AP mode, chip can reply to probe request itself
+ /* In AP mode, chip can reply to probe request itself */
if (*total_flags & FIF_PROBE_REQ &&
wvif->vif->type == NL80211_IFTYPE_AP) {
dev_dbg(wdev->dev, "do not forward probe request in AP mode\n");
@@ -154,10 +161,18 @@ static int wfx_get_ps_timeout(struct wfx_vif *wvif, bool *enable_ps)
chan0 = wdev_to_wvif(wvif->wdev, 0)->vif->bss_conf.chandef.chan;
if (wdev_to_wvif(wvif->wdev, 1))
chan1 = wdev_to_wvif(wvif->wdev, 1)->vif->bss_conf.chandef.chan;
- if (chan0 && chan1 && chan0->hw_value != chan1->hw_value &&
- wvif->vif->type != NL80211_IFTYPE_AP) {
- // It is necessary to enable powersave if channels
- // are different.
+ if (chan0 && chan1 && wvif->vif->type != NL80211_IFTYPE_AP) {
+ if (chan0->hw_value == chan1->hw_value) {
+ /* It is useless to enable PS if channels are the same. */
+ if (enable_ps)
+ *enable_ps = false;
+ if (wvif->vif->bss_conf.assoc && wvif->vif->bss_conf.ps)
+ dev_info(wvif->wdev->dev, "ignoring requested PS mode");
+ return -1;
+ }
+ /* It is necessary to enable PS if channels
+ * are different.
+ */
if (enable_ps)
*enable_ps = true;
if (wvif->wdev->force_ps_timeout > -1)
@@ -229,8 +244,6 @@ int wfx_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
return 0;
}
-/* WSM callbacks */
-
void wfx_event_report_rssi(struct wfx_vif *wvif, u8 raw_rcpi_rssi)
{
/* RSSI: signed Q8.0, RCPI: unsigned Q7.1
@@ -294,7 +307,7 @@ int wfx_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
if (vif->type == NL80211_IFTYPE_STATION)
hif_set_mfp(wvif, sta->mfp, sta->mfp);
- // In station mode, the firmware interprets new link-id as a TDLS peer.
+ /* In station mode, the firmware interprets new link-id as a TDLS peer */
if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls)
return 0;
sta_priv->link_id = ffz(wvif->link_id_map);
@@ -312,10 +325,10 @@ int wfx_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct wfx_vif *wvif = (struct wfx_vif *)vif->drv_priv;
struct wfx_sta_priv *sta_priv = (struct wfx_sta_priv *)&sta->drv_priv;
- // See note in wfx_sta_add()
+ /* See note in wfx_sta_add() */
if (!sta_priv->link_id)
return 0;
- // FIXME add a mutex?
+ /* FIXME add a mutex? */
hif_map_link(wvif, true, sta->addr, sta_priv->link_id, false);
wvif->link_id_map &= ~BIT(sta_priv->link_id);
return 0;
@@ -410,7 +423,7 @@ static void wfx_join(struct wfx_vif *wvif)
return;
}
- rcu_read_lock(); // protect ssidie
+ rcu_read_lock(); /* protect ssidie */
if (bss)
ssidie = ieee80211_bss_get_ie(bss, WLAN_EID_SSID);
if (ssidie) {
@@ -446,7 +459,7 @@ static void wfx_join_finalize(struct wfx_vif *wvif,
int ampdu_density = 0;
bool greenfield = false;
- rcu_read_lock(); // protect sta
+ rcu_read_lock(); /* protect sta */
if (info->bssid && !info->ibss_joined)
sta = ieee80211_find_sta(wvif->vif, info->bssid);
if (sta && sta->ht_cap.ht_supported)
@@ -460,8 +473,9 @@ static void wfx_join_finalize(struct wfx_vif *wvif,
hif_set_association_mode(wvif, ampdu_density, greenfield,
info->use_short_preamble);
hif_keep_alive_period(wvif, 0);
- // beacon_loss_count is defined to 7 in net/mac80211/mlme.c. Let's use
- // the same value.
+ /* beacon_loss_count is defined to 7 in net/mac80211/mlme.c. Let's use
+ * the same value.
+ */
hif_set_bss_params(wvif, info->aid, 7);
hif_set_beacon_wakeup_period(wvif, 1, 1);
wfx_update_pm(wvif);
@@ -485,10 +499,11 @@ void wfx_leave_ibss(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
static void wfx_enable_beacon(struct wfx_vif *wvif, bool enable)
{
- // Driver has Content After DTIM Beacon in queue. Driver is waiting for
- // a signal from the firmware. Since we are going to stop to send
- // beacons, this signal will never happens. See also
- // wfx_suspend_resume_mc()
+ /* Driver has Content After DTIM Beacon in queue. Driver is waiting for
+ * a signal from the firmware. Since we are going to stop to send
+ * beacons, this signal will never happens. See also
+ * wfx_suspend_resume_mc()
+ */
if (!enable && wfx_tx_queues_has_cab(wvif)) {
wvif->after_dtim_tx_allowed = true;
wfx_bh_request_tx(wvif->wdev);
@@ -528,8 +543,9 @@ void wfx_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
__func__);
hif_set_beacon_wakeup_period(wvif, info->dtim_period,
info->dtim_period);
- // We temporary forwarded beacon for join process. It is now no
- // more necessary.
+ /* We temporary forwarded beacon for join process. It is now no
+ * more necessary.
+ */
wfx_filter_beacon(wvif, true);
}
@@ -588,9 +604,7 @@ static int wfx_update_tim(struct wfx_vif *wvif)
tim_ptr = skb->data + tim_offset;
if (tim_offset && tim_length >= 6) {
- /* Ignore DTIM count from mac80211:
- * firmware handles DTIM internally.
- */
+ /* Firmware handles DTIM counter internally */
tim_ptr[2] = 0;
/* Set/reset aid0 bit */
@@ -629,10 +643,22 @@ int wfx_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set)
void wfx_suspend_resume_mc(struct wfx_vif *wvif, enum sta_notify_cmd notify_cmd)
{
+ struct wfx_vif *wvif_it;
+
if (notify_cmd != STA_NOTIFY_AWAKE)
return;
- WARN(!wfx_tx_queues_has_cab(wvif), "incorrect sequence");
- WARN(wvif->after_dtim_tx_allowed, "incorrect sequence");
+
+ /* Device won't be able to honor CAB if a scan is in progress on any
+ * interface. Prefer to skip this DTIM and wait for the next one.
+ */
+ wvif_it = NULL;
+ while ((wvif_it = wvif_iterate(wvif->wdev, wvif_it)) != NULL)
+ if (mutex_is_locked(&wvif_it->scan_lock))
+ return;
+
+ if (!wfx_tx_queues_has_cab(wvif) || wvif->after_dtim_tx_allowed)
+ dev_warn(wvif->wdev->dev, "incorrect sequence (%d CAB in queue)",
+ wfx_tx_queues_has_cab(wvif));
wvif->after_dtim_tx_allowed = true;
wfx_bh_request_tx(wvif->wdev);
}
@@ -641,15 +667,15 @@ int wfx_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_ampdu_params *params)
{
- // Aggregation is implemented fully in firmware
+ /* Aggregation is implemented fully in firmware */
switch (params->action) {
case IEEE80211_AMPDU_RX_START:
case IEEE80211_AMPDU_RX_STOP:
- // Just acknowledge it to enable frame re-ordering
+ /* Just acknowledge it to enable frame re-ordering */
return 0;
default:
- // Leave the firmware doing its business for tx aggregation
- return -ENOTSUPP;
+ /* Leave the firmware doing its business for tx aggregation */
+ return -EOPNOTSUPP;
}
}
@@ -720,11 +746,11 @@ int wfx_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
return -EOPNOTSUPP;
}
- // FIXME: prefer use of container_of() to get vif
+ /* FIXME: prefer use of container_of() to get vif */
wvif->vif = vif;
wvif->wdev = wdev;
- wvif->link_id_map = 1; // link-id 0 is reserved for multicast
+ wvif->link_id_map = 1; /* link-id 0 is reserved for multicast */
INIT_WORK(&wvif->update_tim_work, wfx_update_tim_work);
INIT_DELAYED_WORK(&wvif->beacon_loss_work, wfx_beacon_loss_work);
@@ -754,7 +780,7 @@ int wfx_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
wvif = NULL;
while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
- // Combo mode does not support Block Acks. We can re-enable them
+ /* Combo mode does not support Block Acks. We can re-enable them */
if (wvif_count(wdev) == 1)
hif_set_block_ack_policy(wvif, 0xFF, 0xFF);
else
@@ -786,7 +812,7 @@ void wfx_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
wvif = NULL;
while ((wvif = wvif_iterate(wdev, wvif)) != NULL) {
- // Combo mode does not support Block Acks. We can re-enable them
+ /* Combo mode does not support Block Acks. We can re-enable them */
if (wvif_count(wdev) == 1)
hif_set_block_ack_policy(wvif, 0xFF, 0xFF);
else
diff --git a/drivers/staging/wfx/sta.h b/drivers/staging/wfx/sta.h
index d7b5df5ea4e6..4d7e38be4235 100644
--- a/drivers/staging/wfx/sta.h
+++ b/drivers/staging/wfx/sta.h
@@ -18,7 +18,7 @@ struct wfx_sta_priv {
int vif_id;
};
-// mac80211 interface
+/* mac80211 interface */
int wfx_start(struct ieee80211_hw *hw);
void wfx_stop(struct ieee80211_hw *hw);
int wfx_config(struct ieee80211_hw *hw, u32 changed);
@@ -59,15 +59,15 @@ void wfx_unassign_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_chanctx_conf *conf);
-// WSM Callbacks
+/* Hardware API Callbacks */
void wfx_cooling_timeout_work(struct work_struct *work);
void wfx_suspend_hot_dev(struct wfx_dev *wdev, enum sta_notify_cmd cmd);
void wfx_suspend_resume_mc(struct wfx_vif *wvif, enum sta_notify_cmd cmd);
void wfx_event_report_rssi(struct wfx_vif *wvif, u8 raw_rcpi_rssi);
int wfx_update_pm(struct wfx_vif *wvif);
-// Other Helpers
+/* Other Helpers */
void wfx_reset(struct wfx_vif *wvif);
u32 wfx_rate_mask_to_hw(struct wfx_dev *wdev, u32 rates);
-#endif /* WFX_STA_H */
+#endif
diff --git a/drivers/staging/wfx/traces.h b/drivers/staging/wfx/traces.h
index e34c7a538c65..e90dc73c4b01 100644
--- a/drivers/staging/wfx/traces.h
+++ b/drivers/staging/wfx/traces.h
@@ -378,7 +378,7 @@ TRACE_EVENT(tx_stats,
__array(int, tx_count, 4)
),
TP_fast_assign(
- // Keep sync with wfx_rates definition in main.c
+ /* Keep sync with wfx_rates definition in main.c */
static const int hw_rate[] = { 0, 1, 2, 3, 6, 7, 8, 9,
10, 11, 12, 13 };
const struct ieee80211_tx_info *tx_info =
diff --git a/drivers/staging/wfx/wfx.h b/drivers/staging/wfx/wfx.h
index 94898680ccde..f8df59ad1639 100644
--- a/drivers/staging/wfx/wfx.h
+++ b/drivers/staging/wfx/wfx.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Common private data for Silicon Labs WFx chips.
+ * Common private data.
*
* Copyright (c) 2017-2020, Silicon Laboratories, Inc.
* Copyright (c) 2010, ST-Ericsson
@@ -22,7 +22,7 @@
#include "queue.h"
#include "hif_tx.h"
-#define USEC_PER_TXOP 32 // see struct ieee80211_tx_queue_params
+#define USEC_PER_TXOP 32 /* see struct ieee80211_tx_queue_params */
#define USEC_PER_TU 1024
struct hwbus_ops;
@@ -85,6 +85,7 @@ struct wfx_vif {
struct mutex scan_lock;
struct work_struct scan_work;
struct completion scan_complete;
+ int scan_nb_chan_done;
bool scan_abort;
struct ieee80211_scan_request *scan_req;
@@ -98,12 +99,9 @@ static inline struct wfx_vif *wdev_to_wvif(struct wfx_dev *wdev, int vif_id)
return NULL;
}
vif_id = array_index_nospec(vif_id, ARRAY_SIZE(wdev->vif));
- if (!wdev->vif[vif_id]) {
- dev_dbg(wdev->dev, "requesting non-allocated vif: %d\n",
- vif_id);
+ if (!wdev->vif[vif_id])
return NULL;
- }
- return (struct wfx_vif *) wdev->vif[vif_id]->drv_priv;
+ return (struct wfx_vif *)wdev->vif[vif_id]->drv_priv;
}
static inline struct wfx_vif *wvif_iterate(struct wfx_dev *wdev,
@@ -163,4 +161,4 @@ static inline int memzcmp(void *src, unsigned int size)
return memcmp(buf, buf + 1, size - 1);
}
-#endif /* WFX_H */
+#endif
diff --git a/drivers/staging/wlan-ng/hfa384x.h b/drivers/staging/wlan-ng/hfa384x.h
index 75ed8bc4bbc1..98c154a8d8c1 100644
--- a/drivers/staging/wlan-ng/hfa384x.h
+++ b/drivers/staging/wlan-ng/hfa384x.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1) */
-/* hfa384x.h
+/*
*
* Defines the constants and data structures for the hfa384x
*
diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
index 8c8524679ba3..938e11a1a0b6 100644
--- a/drivers/staging/wlan-ng/hfa384x_usb.c
+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1)
-/* src/prism2/driver/hfa384x_usb.c
+/*
*
* Functions that talk to the USB variant of the Intersil hfa384x MAC
*
@@ -3778,18 +3778,18 @@ static void hfa384x_usb_throttlefn(struct timer_list *t)
spin_lock_irqsave(&hw->ctlxq.lock, flags);
- /*
- * We need to check BOTH the RX and the TX throttle controls,
- * so we use the bitwise OR instead of the logical OR.
- */
pr_debug("flags=0x%lx\n", hw->usb_flags);
- if (!hw->wlandev->hwremoved &&
- ((test_and_clear_bit(THROTTLE_RX, &hw->usb_flags) &&
- !test_and_set_bit(WORK_RX_RESUME, &hw->usb_flags)) |
- (test_and_clear_bit(THROTTLE_TX, &hw->usb_flags) &&
- !test_and_set_bit(WORK_TX_RESUME, &hw->usb_flags))
- )) {
- schedule_work(&hw->usb_work);
+ if (!hw->wlandev->hwremoved) {
+ bool rx_throttle = test_and_clear_bit(THROTTLE_RX, &hw->usb_flags) &&
+ !test_and_set_bit(WORK_RX_RESUME, &hw->usb_flags);
+ bool tx_throttle = test_and_clear_bit(THROTTLE_TX, &hw->usb_flags) &&
+ !test_and_set_bit(WORK_TX_RESUME, &hw->usb_flags);
+ /*
+ * We need to check BOTH the RX and the TX throttle controls,
+ * so we use the bitwise OR instead of the logical OR.
+ */
+ if (rx_throttle | tx_throttle)
+ schedule_work(&hw->usb_work);
}
spin_unlock_irqrestore(&hw->ctlxq.lock, flags);
diff --git a/drivers/staging/wlan-ng/p80211conv.c b/drivers/staging/wlan-ng/p80211conv.c
index 59b25ca50d15..cd271b1da69f 100644
--- a/drivers/staging/wlan-ng/p80211conv.c
+++ b/drivers/staging/wlan-ng/p80211conv.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1)
-/* src/p80211/p80211conv.c
+/*
*
* Ether/802.11 conversions and packet buffer routines
*
diff --git a/drivers/staging/wlan-ng/p80211conv.h b/drivers/staging/wlan-ng/p80211conv.h
index 63c423507fe8..dfb762bce84d 100644
--- a/drivers/staging/wlan-ng/p80211conv.h
+++ b/drivers/staging/wlan-ng/p80211conv.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1) */
-/* p80211conv.h
+/*
*
* Ether/802.11 conversions and packet buffer routines
*
diff --git a/drivers/staging/wlan-ng/p80211hdr.h b/drivers/staging/wlan-ng/p80211hdr.h
index 5871a55e4a61..93195a4c5b01 100644
--- a/drivers/staging/wlan-ng/p80211hdr.h
+++ b/drivers/staging/wlan-ng/p80211hdr.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1) */
-/* p80211hdr.h
+/*
*
* Macros, types, and functions for handling 802.11 MAC headers
*
diff --git a/drivers/staging/wlan-ng/p80211ioctl.h b/drivers/staging/wlan-ng/p80211ioctl.h
index 77e8d2913b76..b50ce11147dd 100644
--- a/drivers/staging/wlan-ng/p80211ioctl.h
+++ b/drivers/staging/wlan-ng/p80211ioctl.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1) */
-/* p80211ioctl.h
+/*
*
* Declares constants and types for the p80211 ioctls
*
diff --git a/drivers/staging/wlan-ng/p80211mgmt.h b/drivers/staging/wlan-ng/p80211mgmt.h
index 1457a6def5a2..1ef30d3f3159 100644
--- a/drivers/staging/wlan-ng/p80211mgmt.h
+++ b/drivers/staging/wlan-ng/p80211mgmt.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1) */
-/* p80211mgmt.h
+/*
*
* Macros, types, and functions to handle 802.11 mgmt frames
*
diff --git a/drivers/staging/wlan-ng/p80211msg.h b/drivers/staging/wlan-ng/p80211msg.h
index 114066526df4..f68d8b7d5ad8 100644
--- a/drivers/staging/wlan-ng/p80211msg.h
+++ b/drivers/staging/wlan-ng/p80211msg.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1) */
-/* p80211msg.h
+/*
*
* Macros, constants, types, and funcs for req and ind messages
*
diff --git a/drivers/staging/wlan-ng/p80211netdev.c b/drivers/staging/wlan-ng/p80211netdev.c
index 2a3f9385ab3f..255500448ad3 100644
--- a/drivers/staging/wlan-ng/p80211netdev.c
+++ b/drivers/staging/wlan-ng/p80211netdev.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1)
-/* src/p80211/p80211knetdev.c
+/*
*
* Linux Kernel net device interface
*
@@ -616,7 +616,7 @@ static int p80211knetdev_set_mac_address(struct net_device *dev, void *addr)
result = -EADDRNOTAVAIL;
} else {
/* everything's ok, change the addr in netdev */
- memcpy(dev->dev_addr, new_addr->sa_data, dev->addr_len);
+ eth_hw_addr_set(dev, new_addr->sa_data);
}
return result;
diff --git a/drivers/staging/wlan-ng/p80211netdev.h b/drivers/staging/wlan-ng/p80211netdev.h
index 25e5116b1590..5654dc54ae91 100644
--- a/drivers/staging/wlan-ng/p80211netdev.h
+++ b/drivers/staging/wlan-ng/p80211netdev.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1) */
-/* p80211netdev.h
+/*
*
* WLAN net device structure and functions
*
diff --git a/drivers/staging/wlan-ng/p80211req.c b/drivers/staging/wlan-ng/p80211req.c
index 9f5c1267d829..809cf3d480e9 100644
--- a/drivers/staging/wlan-ng/p80211req.c
+++ b/drivers/staging/wlan-ng/p80211req.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1)
-/* src/p80211/p80211req.c
+/*
*
* Request/Indication/MacMgmt interface handling functions
*
diff --git a/drivers/staging/wlan-ng/p80211req.h b/drivers/staging/wlan-ng/p80211req.h
index c04053f3b02b..bc45cd5f91e4 100644
--- a/drivers/staging/wlan-ng/p80211req.h
+++ b/drivers/staging/wlan-ng/p80211req.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1) */
-/* p80211req.h
+/*
*
* Request handling functions
*
diff --git a/drivers/staging/wlan-ng/p80211types.h b/drivers/staging/wlan-ng/p80211types.h
index 3dcdd022da61..6486612a8f31 100644
--- a/drivers/staging/wlan-ng/p80211types.h
+++ b/drivers/staging/wlan-ng/p80211types.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1) */
/*
- * p80211types.h
+ *
*
* Macros, constants, types, and funcs for p80211 data types
*
diff --git a/drivers/staging/wlan-ng/p80211wep.c b/drivers/staging/wlan-ng/p80211wep.c
index 51d917c8cdc8..3ff7ee7011df 100644
--- a/drivers/staging/wlan-ng/p80211wep.c
+++ b/drivers/staging/wlan-ng/p80211wep.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1)
-/* src/p80211/p80211wep.c
+/*
*
* WEP encode/decode for P80211.
*
diff --git a/drivers/staging/wlan-ng/prism2mgmt.c b/drivers/staging/wlan-ng/prism2mgmt.c
index 1bd36dc2b7ff..9030a8939a9b 100644
--- a/drivers/staging/wlan-ng/prism2mgmt.c
+++ b/drivers/staging/wlan-ng/prism2mgmt.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1)
-/* src/prism2/driver/prism2mgmt.c
+/*
*
* Management request handler functions.
*
diff --git a/drivers/staging/wlan-ng/prism2mgmt.h b/drivers/staging/wlan-ng/prism2mgmt.h
index 17bc1ee0d498..7132cec2d7eb 100644
--- a/drivers/staging/wlan-ng/prism2mgmt.h
+++ b/drivers/staging/wlan-ng/prism2mgmt.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1) */
-/* prism2mgmt.h
+/*
*
* Declares the mgmt command handler functions
*
diff --git a/drivers/staging/wlan-ng/prism2mib.c b/drivers/staging/wlan-ng/prism2mib.c
index d14f032a7ed6..24ba10d6bd0b 100644
--- a/drivers/staging/wlan-ng/prism2mib.c
+++ b/drivers/staging/wlan-ng/prism2mib.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1)
-/* src/prism2/driver/prism2mib.c
+/*
*
* Management request for mibset/mibget
*
diff --git a/drivers/staging/wlan-ng/prism2sta.c b/drivers/staging/wlan-ng/prism2sta.c
index f67b7405156a..daa7cc4e897c 100644
--- a/drivers/staging/wlan-ng/prism2sta.c
+++ b/drivers/staging/wlan-ng/prism2sta.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: (GPL-2.0 OR MPL-1.1)
-/* src/prism2/driver/prism2sta.c
+/*
*
* Implements the station functionality for prism2
*
@@ -585,6 +585,7 @@ static int prism2sta_getcardinfo(struct wlandevice *wlandev)
struct hfa384x *hw = wlandev->priv;
u16 temp;
u8 snum[HFA384x_RID_NICSERIALNUMBER_LEN];
+ u8 addr[ETH_ALEN];
/* Collect version and compatibility info */
/* Some are critical, some are not */
@@ -855,11 +856,12 @@ static int prism2sta_getcardinfo(struct wlandevice *wlandev)
/* Collect the MAC address */
result = hfa384x_drvr_getconfig(hw, HFA384x_RID_CNFOWNMACADDR,
- wlandev->netdev->dev_addr, ETH_ALEN);
+ addr, ETH_ALEN);
if (result != 0) {
netdev_err(wlandev->netdev, "Failed to retrieve mac address\n");
goto failed;
}
+ eth_hw_addr_set(wlandev->netdev, addr);
/* short preamble is always implemented */
wlandev->nsdcaps |= P80211_NSDCAP_SHORT_PREAMBLE;
diff --git a/drivers/staging/wlan-ng/prism2usb.c b/drivers/staging/wlan-ng/prism2usb.c
index 4b08dc1da4f9..dc0749b8eff7 100644
--- a/drivers/staging/wlan-ng/prism2usb.c
+++ b/drivers/staging/wlan-ng/prism2usb.c
@@ -34,14 +34,13 @@ static const struct usb_device_id usb_prism_tbl[] = {
PRISM_DEV(0x04f1, 0x3009, "JVC MP-XP7250 Builtin USB WLAN Adapter"),
PRISM_DEV(0x0846, 0x4110, "NetGear MA111"),
PRISM_DEV(0x03f3, 0x0020, "Adaptec AWN-8020 USB WLAN Adapter"),
- PRISM_DEV(0x2821, 0x3300, "ASUS-WL140 Wireless USB Adapter"),
+ PRISM_DEV(0x2821, 0x3300, "ASUS-WL140 / Hawking HighDB Wireless USB Adapter"),
PRISM_DEV(0x2001, 0x3700, "DWL-122 Wireless USB Adapter"),
PRISM_DEV(0x2001, 0x3702, "DWL-120 Rev F Wireless USB Adapter"),
PRISM_DEV(0x50c2, 0x4013, "Averatec USB WLAN Adapter"),
PRISM_DEV(0x2c02, 0x14ea, "Planex GW-US11H WLAN USB Adapter"),
PRISM_DEV(0x124a, 0x168b, "Airvast PRISM3 WLAN USB Adapter"),
PRISM_DEV(0x083a, 0x3503, "T-Sinus 111 USB WLAN Adapter"),
- PRISM_DEV(0x2821, 0x3300, "Hawking HighDB USB Adapter"),
PRISM_DEV(0x0411, 0x0044, "Melco WLI-USB-KB11 11Mbps WLAN Adapter"),
PRISM_DEV(0x1668, 0x6106, "ROPEX FreeLan 802.11b USB Adapter"),
PRISM_DEV(0x124a, 0x4017, "Pheenet WL-503IA 802.11b USB Adapter"),
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index ef4a8e189fba..8190b840065f 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -20,6 +20,7 @@
#include <linux/vmalloc.h>
#include <linux/falloc.h>
#include <linux/uio.h>
+#include <linux/scatterlist.h>
#include <scsi/scsi_proto.h>
#include <asm/unaligned.h>
@@ -244,7 +245,7 @@ struct target_core_file_cmd {
struct bio_vec bvecs[];
};
-static void cmd_rw_aio_complete(struct kiocb *iocb, long ret, long ret2)
+static void cmd_rw_aio_complete(struct kiocb *iocb, long ret)
{
struct target_core_file_cmd *cmd;
@@ -302,7 +303,7 @@ fd_execute_rw_aio(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
ret = call_read_iter(file, &aio_cmd->iocb, &iter);
if (ret != -EIOCBQUEUED)
- cmd_rw_aio_complete(&aio_cmd->iocb, ret, 0);
+ cmd_rw_aio_complete(&aio_cmd->iocb, ret);
return 0;
}
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 4069a1edcfa3..b1ef041cacd8 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -16,12 +16,14 @@
#include <linux/timer.h>
#include <linux/fs.h>
#include <linux/blkdev.h>
+#include <linux/blk-integrity.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/bio.h>
#include <linux/genhd.h>
#include <linux/file.h>
#include <linux/module.h>
+#include <linux/scatterlist.h>
#include <scsi/scsi_proto.h>
#include <asm/unaligned.h>
@@ -230,9 +232,9 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
struct block_device *bd,
struct request_queue *q)
{
- unsigned long long blocks_long = (div_u64(i_size_read(bd->bd_inode),
- bdev_logical_block_size(bd)) - 1);
u32 block_size = bdev_logical_block_size(bd);
+ unsigned long long blocks_long =
+ div_u64(bdev_nr_bytes(bd), block_size) - 1;
if (block_size == dev->dev_attrib.block_size)
return blocks_long;
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 75ef52f008ff..7fa57fb57bf2 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -980,11 +980,10 @@ pscsi_execute_cmd(struct se_cmd *cmd)
memcpy(pt->pscsi_cdb, cmd->t_task_cdb,
scsi_command_size(cmd->t_task_cdb));
- req = blk_get_request(pdv->pdv_sd->request_queue,
+ req = scsi_alloc_request(pdv->pdv_sd->request_queue,
cmd->data_direction == DMA_TO_DEVICE ?
REQ_OP_DRV_OUT : REQ_OP_DRV_IN, 0);
if (IS_ERR(req)) {
- pr_err("PSCSI: blk_get_request() failed\n");
ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
goto fail;
}
@@ -1012,7 +1011,7 @@ pscsi_execute_cmd(struct se_cmd *cmd)
return 0;
fail_put_request:
- blk_put_request(req);
+ blk_mq_free_request(req);
fail:
kfree(pt);
return ret;
@@ -1067,7 +1066,7 @@ static void pscsi_req_done(struct request *req, blk_status_t status)
break;
}
- blk_put_request(req);
+ blk_mq_free_request(req);
kfree(pt);
}
diff --git a/drivers/tee/optee/Makefile b/drivers/tee/optee/Makefile
index 3aa33ea9e6a6..66b8a17f14c4 100644
--- a/drivers/tee/optee/Makefile
+++ b/drivers/tee/optee/Makefile
@@ -4,8 +4,9 @@ optee-objs += core.o
optee-objs += call.o
optee-objs += rpc.o
optee-objs += supp.o
-optee-objs += shm_pool.o
optee-objs += device.o
+optee-objs += smc_abi.o
+optee-objs += ffa_abi.o
# for tracing framework to find optee_trace.h
-CFLAGS_call.o := -I$(src)
+CFLAGS_smc_abi.o := -I$(src)
diff --git a/drivers/tee/optee/call.c b/drivers/tee/optee/call.c
index 945f03da0223..b25cc1fac945 100644
--- a/drivers/tee/optee/call.c
+++ b/drivers/tee/optee/call.c
@@ -1,29 +1,18 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2015, Linaro Limited
+ * Copyright (c) 2015-2021, Linaro Limited
*/
-#include <linux/arm-smccc.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/mm.h>
-#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/tee_drv.h>
#include <linux/types.h>
-#include <linux/uaccess.h>
#include "optee_private.h"
-#include "optee_smc.h"
-#define CREATE_TRACE_POINTS
-#include "optee_trace.h"
-struct optee_call_waiter {
- struct list_head list_node;
- struct completion c;
-};
-
-static void optee_cq_wait_init(struct optee_call_queue *cq,
- struct optee_call_waiter *w)
+void optee_cq_wait_init(struct optee_call_queue *cq,
+ struct optee_call_waiter *w)
{
/*
* We're preparing to make a call to secure world. In case we can't
@@ -47,8 +36,8 @@ static void optee_cq_wait_init(struct optee_call_queue *cq,
mutex_unlock(&cq->mutex);
}
-static void optee_cq_wait_for_completion(struct optee_call_queue *cq,
- struct optee_call_waiter *w)
+void optee_cq_wait_for_completion(struct optee_call_queue *cq,
+ struct optee_call_waiter *w)
{
wait_for_completion(&w->c);
@@ -74,8 +63,8 @@ static void optee_cq_complete_one(struct optee_call_queue *cq)
}
}
-static void optee_cq_wait_final(struct optee_call_queue *cq,
- struct optee_call_waiter *w)
+void optee_cq_wait_final(struct optee_call_queue *cq,
+ struct optee_call_waiter *w)
{
/*
* We're done with the call to secure world. The thread in secure
@@ -115,97 +104,35 @@ static struct optee_session *find_session(struct optee_context_data *ctxdata,
return NULL;
}
-/**
- * optee_do_call_with_arg() - Do an SMC to OP-TEE in secure world
- * @ctx: calling context
- * @parg: physical address of message to pass to secure world
- *
- * Does and SMC to OP-TEE in secure world and handles eventual resulting
- * Remote Procedure Calls (RPC) from OP-TEE.
- *
- * Returns return code from secure world, 0 is OK
- */
-u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg)
+struct tee_shm *optee_get_msg_arg(struct tee_context *ctx, size_t num_params,
+ struct optee_msg_arg **msg_arg)
{
struct optee *optee = tee_get_drvdata(ctx->teedev);
- struct optee_call_waiter w;
- struct optee_rpc_param param = { };
- struct optee_call_ctx call_ctx = { };
- u32 ret;
-
- param.a0 = OPTEE_SMC_CALL_WITH_ARG;
- reg_pair_from_64(&param.a1, &param.a2, parg);
- /* Initialize waiter */
- optee_cq_wait_init(&optee->call_queue, &w);
- while (true) {
- struct arm_smccc_res res;
-
- trace_optee_invoke_fn_begin(&param);
- optee->invoke_fn(param.a0, param.a1, param.a2, param.a3,
- param.a4, param.a5, param.a6, param.a7,
- &res);
- trace_optee_invoke_fn_end(&param, &res);
-
- if (res.a0 == OPTEE_SMC_RETURN_ETHREAD_LIMIT) {
- /*
- * Out of threads in secure world, wait for a thread
- * become available.
- */
- optee_cq_wait_for_completion(&optee->call_queue, &w);
- } else if (OPTEE_SMC_RETURN_IS_RPC(res.a0)) {
- cond_resched();
- param.a0 = res.a0;
- param.a1 = res.a1;
- param.a2 = res.a2;
- param.a3 = res.a3;
- optee_handle_rpc(ctx, &param, &call_ctx);
- } else {
- ret = res.a0;
- break;
- }
- }
+ size_t sz = OPTEE_MSG_GET_ARG_SIZE(num_params);
+ struct tee_shm *shm;
+ struct optee_msg_arg *ma;
- optee_rpc_finalize_call(&call_ctx);
/*
- * We're done with our thread in secure world, if there's any
- * thread waiters wake up one.
+ * rpc_arg_count is set to the number of allocated parameters in
+ * the RPC argument struct if a second MSG arg struct is expected.
+ * The second arg struct will then be used for RPC.
*/
- optee_cq_wait_final(&optee->call_queue, &w);
-
- return ret;
-}
+ if (optee->rpc_arg_count)
+ sz += OPTEE_MSG_GET_ARG_SIZE(optee->rpc_arg_count);
-static struct tee_shm *get_msg_arg(struct tee_context *ctx, size_t num_params,
- struct optee_msg_arg **msg_arg,
- phys_addr_t *msg_parg)
-{
- int rc;
- struct tee_shm *shm;
- struct optee_msg_arg *ma;
-
- shm = tee_shm_alloc(ctx, OPTEE_MSG_GET_ARG_SIZE(num_params),
- TEE_SHM_MAPPED | TEE_SHM_PRIV);
+ shm = tee_shm_alloc(ctx, sz, TEE_SHM_MAPPED | TEE_SHM_PRIV);
if (IS_ERR(shm))
return shm;
ma = tee_shm_get_va(shm, 0);
if (IS_ERR(ma)) {
- rc = PTR_ERR(ma);
- goto out;
+ tee_shm_free(shm);
+ return (void *)ma;
}
- rc = tee_shm_get_pa(shm, 0, msg_parg);
- if (rc)
- goto out;
-
memset(ma, 0, OPTEE_MSG_GET_ARG_SIZE(num_params));
ma->num_params = num_params;
*msg_arg = ma;
-out:
- if (rc) {
- tee_shm_free(shm);
- return ERR_PTR(rc);
- }
return shm;
}
@@ -214,16 +141,16 @@ int optee_open_session(struct tee_context *ctx,
struct tee_ioctl_open_session_arg *arg,
struct tee_param *param)
{
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
struct optee_context_data *ctxdata = ctx->data;
int rc;
struct tee_shm *shm;
struct optee_msg_arg *msg_arg;
- phys_addr_t msg_parg;
struct optee_session *sess = NULL;
uuid_t client_uuid;
/* +2 for the meta parameters added below */
- shm = get_msg_arg(ctx, arg->num_params + 2, &msg_arg, &msg_parg);
+ shm = optee_get_msg_arg(ctx, arg->num_params + 2, &msg_arg);
if (IS_ERR(shm))
return PTR_ERR(shm);
@@ -247,7 +174,8 @@ int optee_open_session(struct tee_context *ctx,
goto out;
export_uuid(msg_arg->params[1].u.octets, &client_uuid);
- rc = optee_to_msg_param(msg_arg->params + 2, arg->num_params, param);
+ rc = optee->ops->to_msg_param(optee, msg_arg->params + 2,
+ arg->num_params, param);
if (rc)
goto out;
@@ -257,7 +185,7 @@ int optee_open_session(struct tee_context *ctx,
goto out;
}
- if (optee_do_call_with_arg(ctx, msg_parg)) {
+ if (optee->ops->do_call_with_arg(ctx, shm)) {
msg_arg->ret = TEEC_ERROR_COMMUNICATION;
msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
}
@@ -272,7 +200,8 @@ int optee_open_session(struct tee_context *ctx,
kfree(sess);
}
- if (optee_from_msg_param(param, arg->num_params, msg_arg->params + 2)) {
+ if (optee->ops->from_msg_param(optee, param, arg->num_params,
+ msg_arg->params + 2)) {
arg->ret = TEEC_ERROR_COMMUNICATION;
arg->ret_origin = TEEC_ORIGIN_COMMS;
/* Close session again to avoid leakage */
@@ -288,12 +217,28 @@ out:
return rc;
}
-int optee_close_session(struct tee_context *ctx, u32 session)
+int optee_close_session_helper(struct tee_context *ctx, u32 session)
{
- struct optee_context_data *ctxdata = ctx->data;
struct tee_shm *shm;
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
struct optee_msg_arg *msg_arg;
- phys_addr_t msg_parg;
+
+ shm = optee_get_msg_arg(ctx, 0, &msg_arg);
+ if (IS_ERR(shm))
+ return PTR_ERR(shm);
+
+ msg_arg->cmd = OPTEE_MSG_CMD_CLOSE_SESSION;
+ msg_arg->session = session;
+ optee->ops->do_call_with_arg(ctx, shm);
+
+ tee_shm_free(shm);
+
+ return 0;
+}
+
+int optee_close_session(struct tee_context *ctx, u32 session)
+{
+ struct optee_context_data *ctxdata = ctx->data;
struct optee_session *sess;
/* Check that the session is valid and remove it from the list */
@@ -306,25 +251,16 @@ int optee_close_session(struct tee_context *ctx, u32 session)
return -EINVAL;
kfree(sess);
- shm = get_msg_arg(ctx, 0, &msg_arg, &msg_parg);
- if (IS_ERR(shm))
- return PTR_ERR(shm);
-
- msg_arg->cmd = OPTEE_MSG_CMD_CLOSE_SESSION;
- msg_arg->session = session;
- optee_do_call_with_arg(ctx, msg_parg);
-
- tee_shm_free(shm);
- return 0;
+ return optee_close_session_helper(ctx, session);
}
int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg,
struct tee_param *param)
{
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
struct optee_context_data *ctxdata = ctx->data;
struct tee_shm *shm;
struct optee_msg_arg *msg_arg;
- phys_addr_t msg_parg;
struct optee_session *sess;
int rc;
@@ -335,7 +271,7 @@ int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg,
if (!sess)
return -EINVAL;
- shm = get_msg_arg(ctx, arg->num_params, &msg_arg, &msg_parg);
+ shm = optee_get_msg_arg(ctx, arg->num_params, &msg_arg);
if (IS_ERR(shm))
return PTR_ERR(shm);
msg_arg->cmd = OPTEE_MSG_CMD_INVOKE_COMMAND;
@@ -343,16 +279,18 @@ int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg,
msg_arg->session = arg->session;
msg_arg->cancel_id = arg->cancel_id;
- rc = optee_to_msg_param(msg_arg->params, arg->num_params, param);
+ rc = optee->ops->to_msg_param(optee, msg_arg->params, arg->num_params,
+ param);
if (rc)
goto out;
- if (optee_do_call_with_arg(ctx, msg_parg)) {
+ if (optee->ops->do_call_with_arg(ctx, shm)) {
msg_arg->ret = TEEC_ERROR_COMMUNICATION;
msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
}
- if (optee_from_msg_param(param, arg->num_params, msg_arg->params)) {
+ if (optee->ops->from_msg_param(optee, param, arg->num_params,
+ msg_arg->params)) {
msg_arg->ret = TEEC_ERROR_COMMUNICATION;
msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
}
@@ -366,10 +304,10 @@ out:
int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session)
{
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
struct optee_context_data *ctxdata = ctx->data;
struct tee_shm *shm;
struct optee_msg_arg *msg_arg;
- phys_addr_t msg_parg;
struct optee_session *sess;
/* Check that the session is valid */
@@ -379,195 +317,19 @@ int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session)
if (!sess)
return -EINVAL;
- shm = get_msg_arg(ctx, 0, &msg_arg, &msg_parg);
+ shm = optee_get_msg_arg(ctx, 0, &msg_arg);
if (IS_ERR(shm))
return PTR_ERR(shm);
msg_arg->cmd = OPTEE_MSG_CMD_CANCEL;
msg_arg->session = session;
msg_arg->cancel_id = cancel_id;
- optee_do_call_with_arg(ctx, msg_parg);
+ optee->ops->do_call_with_arg(ctx, shm);
tee_shm_free(shm);
return 0;
}
-/**
- * optee_enable_shm_cache() - Enables caching of some shared memory allocation
- * in OP-TEE
- * @optee: main service struct
- */
-void optee_enable_shm_cache(struct optee *optee)
-{
- struct optee_call_waiter w;
-
- /* We need to retry until secure world isn't busy. */
- optee_cq_wait_init(&optee->call_queue, &w);
- while (true) {
- struct arm_smccc_res res;
-
- optee->invoke_fn(OPTEE_SMC_ENABLE_SHM_CACHE, 0, 0, 0, 0, 0, 0,
- 0, &res);
- if (res.a0 == OPTEE_SMC_RETURN_OK)
- break;
- optee_cq_wait_for_completion(&optee->call_queue, &w);
- }
- optee_cq_wait_final(&optee->call_queue, &w);
-}
-
-/**
- * __optee_disable_shm_cache() - Disables caching of some shared memory
- * allocation in OP-TEE
- * @optee: main service struct
- * @is_mapped: true if the cached shared memory addresses were mapped by this
- * kernel, are safe to dereference, and should be freed
- */
-static void __optee_disable_shm_cache(struct optee *optee, bool is_mapped)
-{
- struct optee_call_waiter w;
-
- /* We need to retry until secure world isn't busy. */
- optee_cq_wait_init(&optee->call_queue, &w);
- while (true) {
- union {
- struct arm_smccc_res smccc;
- struct optee_smc_disable_shm_cache_result result;
- } res;
-
- optee->invoke_fn(OPTEE_SMC_DISABLE_SHM_CACHE, 0, 0, 0, 0, 0, 0,
- 0, &res.smccc);
- if (res.result.status == OPTEE_SMC_RETURN_ENOTAVAIL)
- break; /* All shm's freed */
- if (res.result.status == OPTEE_SMC_RETURN_OK) {
- struct tee_shm *shm;
-
- /*
- * Shared memory references that were not mapped by
- * this kernel must be ignored to prevent a crash.
- */
- if (!is_mapped)
- continue;
-
- shm = reg_pair_to_ptr(res.result.shm_upper32,
- res.result.shm_lower32);
- tee_shm_free(shm);
- } else {
- optee_cq_wait_for_completion(&optee->call_queue, &w);
- }
- }
- optee_cq_wait_final(&optee->call_queue, &w);
-}
-
-/**
- * optee_disable_shm_cache() - Disables caching of mapped shared memory
- * allocations in OP-TEE
- * @optee: main service struct
- */
-void optee_disable_shm_cache(struct optee *optee)
-{
- return __optee_disable_shm_cache(optee, true);
-}
-
-/**
- * optee_disable_unmapped_shm_cache() - Disables caching of shared memory
- * allocations in OP-TEE which are not
- * currently mapped
- * @optee: main service struct
- */
-void optee_disable_unmapped_shm_cache(struct optee *optee)
-{
- return __optee_disable_shm_cache(optee, false);
-}
-
-#define PAGELIST_ENTRIES_PER_PAGE \
- ((OPTEE_MSG_NONCONTIG_PAGE_SIZE / sizeof(u64)) - 1)
-
-/**
- * optee_fill_pages_list() - write list of user pages to given shared
- * buffer.
- *
- * @dst: page-aligned buffer where list of pages will be stored
- * @pages: array of pages that represents shared buffer
- * @num_pages: number of entries in @pages
- * @page_offset: offset of user buffer from page start
- *
- * @dst should be big enough to hold list of user page addresses and
- * links to the next pages of buffer
- */
-void optee_fill_pages_list(u64 *dst, struct page **pages, int num_pages,
- size_t page_offset)
-{
- int n = 0;
- phys_addr_t optee_page;
- /*
- * Refer to OPTEE_MSG_ATTR_NONCONTIG description in optee_msg.h
- * for details.
- */
- struct {
- u64 pages_list[PAGELIST_ENTRIES_PER_PAGE];
- u64 next_page_data;
- } *pages_data;
-
- /*
- * Currently OP-TEE uses 4k page size and it does not looks
- * like this will change in the future. On other hand, there are
- * no know ARM architectures with page size < 4k.
- * Thus the next built assert looks redundant. But the following
- * code heavily relies on this assumption, so it is better be
- * safe than sorry.
- */
- BUILD_BUG_ON(PAGE_SIZE < OPTEE_MSG_NONCONTIG_PAGE_SIZE);
-
- pages_data = (void *)dst;
- /*
- * If linux page is bigger than 4k, and user buffer offset is
- * larger than 4k/8k/12k/etc this will skip first 4k pages,
- * because they bear no value data for OP-TEE.
- */
- optee_page = page_to_phys(*pages) +
- round_down(page_offset, OPTEE_MSG_NONCONTIG_PAGE_SIZE);
-
- while (true) {
- pages_data->pages_list[n++] = optee_page;
-
- if (n == PAGELIST_ENTRIES_PER_PAGE) {
- pages_data->next_page_data =
- virt_to_phys(pages_data + 1);
- pages_data++;
- n = 0;
- }
-
- optee_page += OPTEE_MSG_NONCONTIG_PAGE_SIZE;
- if (!(optee_page & ~PAGE_MASK)) {
- if (!--num_pages)
- break;
- pages++;
- optee_page = page_to_phys(*pages);
- }
- }
-}
-
-/*
- * The final entry in each pagelist page is a pointer to the next
- * pagelist page.
- */
-static size_t get_pages_list_size(size_t num_entries)
-{
- int pages = DIV_ROUND_UP(num_entries, PAGELIST_ENTRIES_PER_PAGE);
-
- return pages * OPTEE_MSG_NONCONTIG_PAGE_SIZE;
-}
-
-u64 *optee_allocate_pages_list(size_t num_entries)
-{
- return alloc_pages_exact(get_pages_list_size(num_entries), GFP_KERNEL);
-}
-
-void optee_free_pages_list(void *list, size_t num_entries)
-{
- free_pages_exact(list, get_pages_list_size(num_entries));
-}
-
static bool is_normal_memory(pgprot_t p)
{
#if defined(CONFIG_ARM)
@@ -591,7 +353,7 @@ static int __check_mem_type(struct vm_area_struct *vma, unsigned long end)
return -EINVAL;
}
-static int check_mem_type(unsigned long start, size_t num_pages)
+int optee_check_mem_type(unsigned long start, size_t num_pages)
{
struct mm_struct *mm = current->mm;
int rc;
@@ -610,94 +372,3 @@ static int check_mem_type(unsigned long start, size_t num_pages)
return rc;
}
-
-int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm,
- struct page **pages, size_t num_pages,
- unsigned long start)
-{
- struct tee_shm *shm_arg = NULL;
- struct optee_msg_arg *msg_arg;
- u64 *pages_list;
- phys_addr_t msg_parg;
- int rc;
-
- if (!num_pages)
- return -EINVAL;
-
- rc = check_mem_type(start, num_pages);
- if (rc)
- return rc;
-
- pages_list = optee_allocate_pages_list(num_pages);
- if (!pages_list)
- return -ENOMEM;
-
- shm_arg = get_msg_arg(ctx, 1, &msg_arg, &msg_parg);
- if (IS_ERR(shm_arg)) {
- rc = PTR_ERR(shm_arg);
- goto out;
- }
-
- optee_fill_pages_list(pages_list, pages, num_pages,
- tee_shm_get_page_offset(shm));
-
- msg_arg->cmd = OPTEE_MSG_CMD_REGISTER_SHM;
- msg_arg->params->attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT |
- OPTEE_MSG_ATTR_NONCONTIG;
- msg_arg->params->u.tmem.shm_ref = (unsigned long)shm;
- msg_arg->params->u.tmem.size = tee_shm_get_size(shm);
- /*
- * In the least bits of msg_arg->params->u.tmem.buf_ptr we
- * store buffer offset from 4k page, as described in OP-TEE ABI.
- */
- msg_arg->params->u.tmem.buf_ptr = virt_to_phys(pages_list) |
- (tee_shm_get_page_offset(shm) & (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1));
-
- if (optee_do_call_with_arg(ctx, msg_parg) ||
- msg_arg->ret != TEEC_SUCCESS)
- rc = -EINVAL;
-
- tee_shm_free(shm_arg);
-out:
- optee_free_pages_list(pages_list, num_pages);
- return rc;
-}
-
-int optee_shm_unregister(struct tee_context *ctx, struct tee_shm *shm)
-{
- struct tee_shm *shm_arg;
- struct optee_msg_arg *msg_arg;
- phys_addr_t msg_parg;
- int rc = 0;
-
- shm_arg = get_msg_arg(ctx, 1, &msg_arg, &msg_parg);
- if (IS_ERR(shm_arg))
- return PTR_ERR(shm_arg);
-
- msg_arg->cmd = OPTEE_MSG_CMD_UNREGISTER_SHM;
-
- msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
- msg_arg->params[0].u.rmem.shm_ref = (unsigned long)shm;
-
- if (optee_do_call_with_arg(ctx, msg_parg) ||
- msg_arg->ret != TEEC_SUCCESS)
- rc = -EINVAL;
- tee_shm_free(shm_arg);
- return rc;
-}
-
-int optee_shm_register_supp(struct tee_context *ctx, struct tee_shm *shm,
- struct page **pages, size_t num_pages,
- unsigned long start)
-{
- /*
- * We don't want to register supplicant memory in OP-TEE.
- * Instead information about it will be passed in RPC code.
- */
- return check_mem_type(start, num_pages);
-}
-
-int optee_shm_unregister_supp(struct tee_context *ctx, struct tee_shm *shm)
-{
- return 0;
-}
diff --git a/drivers/tee/optee/core.c b/drivers/tee/optee/core.c
index 5363ebebfc35..ab2edfcc6c70 100644
--- a/drivers/tee/optee/core.c
+++ b/drivers/tee/optee/core.c
@@ -1,215 +1,71 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2015, Linaro Limited
+ * Copyright (c) 2015-2021, Linaro Limited
+ * Copyright (c) 2016, EPAM Systems
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/arm-smccc.h>
#include <linux/crash_dump.h>
#include <linux/errno.h>
#include <linux/io.h>
+#include <linux/mm.h>
#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_platform.h>
-#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/tee_drv.h>
#include <linux/types.h>
-#include <linux/uaccess.h>
#include <linux/workqueue.h>
#include "optee_private.h"
-#include "optee_smc.h"
-#include "shm_pool.h"
-#define DRIVER_NAME "optee"
-
-#define OPTEE_SHM_NUM_PRIV_PAGES CONFIG_OPTEE_SHM_NUM_PRIV_PAGES
-
-/**
- * optee_from_msg_param() - convert from OPTEE_MSG parameters to
- * struct tee_param
- * @params: subsystem internal parameter representation
- * @num_params: number of elements in the parameter arrays
- * @msg_params: OPTEE_MSG parameters
- * Returns 0 on success or <0 on failure
- */
-int optee_from_msg_param(struct tee_param *params, size_t num_params,
- const struct optee_msg_param *msg_params)
+int optee_pool_op_alloc_helper(struct tee_shm_pool_mgr *poolm,
+ struct tee_shm *shm, size_t size,
+ int (*shm_register)(struct tee_context *ctx,
+ struct tee_shm *shm,
+ struct page **pages,
+ size_t num_pages,
+ unsigned long start))
{
- int rc;
- size_t n;
- struct tee_shm *shm;
- phys_addr_t pa;
-
- for (n = 0; n < num_params; n++) {
- struct tee_param *p = params + n;
- const struct optee_msg_param *mp = msg_params + n;
- u32 attr = mp->attr & OPTEE_MSG_ATTR_TYPE_MASK;
-
- switch (attr) {
- case OPTEE_MSG_ATTR_TYPE_NONE:
- p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE;
- memset(&p->u, 0, sizeof(p->u));
- break;
- case OPTEE_MSG_ATTR_TYPE_VALUE_INPUT:
- case OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT:
- case OPTEE_MSG_ATTR_TYPE_VALUE_INOUT:
- p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT +
- attr - OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
- p->u.value.a = mp->u.value.a;
- p->u.value.b = mp->u.value.b;
- p->u.value.c = mp->u.value.c;
- break;
- case OPTEE_MSG_ATTR_TYPE_TMEM_INPUT:
- case OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT:
- case OPTEE_MSG_ATTR_TYPE_TMEM_INOUT:
- p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT +
- attr - OPTEE_MSG_ATTR_TYPE_TMEM_INPUT;
- p->u.memref.size = mp->u.tmem.size;
- shm = (struct tee_shm *)(unsigned long)
- mp->u.tmem.shm_ref;
- if (!shm) {
- p->u.memref.shm_offs = 0;
- p->u.memref.shm = NULL;
- break;
- }
- rc = tee_shm_get_pa(shm, 0, &pa);
- if (rc)
- return rc;
- p->u.memref.shm_offs = mp->u.tmem.buf_ptr - pa;
- p->u.memref.shm = shm;
- break;
- case OPTEE_MSG_ATTR_TYPE_RMEM_INPUT:
- case OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT:
- case OPTEE_MSG_ATTR_TYPE_RMEM_INOUT:
- p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT +
- attr - OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
- p->u.memref.size = mp->u.rmem.size;
- shm = (struct tee_shm *)(unsigned long)
- mp->u.rmem.shm_ref;
-
- if (!shm) {
- p->u.memref.shm_offs = 0;
- p->u.memref.shm = NULL;
- break;
- }
- p->u.memref.shm_offs = mp->u.rmem.offs;
- p->u.memref.shm = shm;
+ unsigned int order = get_order(size);
+ struct page *page;
+ int rc = 0;
- break;
+ page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
+ if (!page)
+ return -ENOMEM;
- default:
- return -EINVAL;
- }
- }
- return 0;
-}
+ shm->kaddr = page_address(page);
+ shm->paddr = page_to_phys(page);
+ shm->size = PAGE_SIZE << order;
-static int to_msg_param_tmp_mem(struct optee_msg_param *mp,
- const struct tee_param *p)
-{
- int rc;
- phys_addr_t pa;
+ if (shm_register) {
+ unsigned int nr_pages = 1 << order, i;
+ struct page **pages;
- mp->attr = OPTEE_MSG_ATTR_TYPE_TMEM_INPUT + p->attr -
- TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
+ pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL);
+ if (!pages) {
+ rc = -ENOMEM;
+ goto err;
+ }
- mp->u.tmem.shm_ref = (unsigned long)p->u.memref.shm;
- mp->u.tmem.size = p->u.memref.size;
+ for (i = 0; i < nr_pages; i++) {
+ pages[i] = page;
+ page++;
+ }
- if (!p->u.memref.shm) {
- mp->u.tmem.buf_ptr = 0;
- return 0;
+ shm->flags |= TEE_SHM_REGISTER;
+ rc = shm_register(shm->ctx, shm, pages, nr_pages,
+ (unsigned long)shm->kaddr);
+ kfree(pages);
+ if (rc)
+ goto err;
}
- rc = tee_shm_get_pa(p->u.memref.shm, p->u.memref.shm_offs, &pa);
- if (rc)
- return rc;
-
- mp->u.tmem.buf_ptr = pa;
- mp->attr |= OPTEE_MSG_ATTR_CACHE_PREDEFINED <<
- OPTEE_MSG_ATTR_CACHE_SHIFT;
-
return 0;
-}
-
-static int to_msg_param_reg_mem(struct optee_msg_param *mp,
- const struct tee_param *p)
-{
- mp->attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT + p->attr -
- TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
- mp->u.rmem.shm_ref = (unsigned long)p->u.memref.shm;
- mp->u.rmem.size = p->u.memref.size;
- mp->u.rmem.offs = p->u.memref.shm_offs;
- return 0;
-}
-
-/**
- * optee_to_msg_param() - convert from struct tee_params to OPTEE_MSG parameters
- * @msg_params: OPTEE_MSG parameters
- * @num_params: number of elements in the parameter arrays
- * @params: subsystem itnernal parameter representation
- * Returns 0 on success or <0 on failure
- */
-int optee_to_msg_param(struct optee_msg_param *msg_params, size_t num_params,
- const struct tee_param *params)
-{
- int rc;
- size_t n;
-
- for (n = 0; n < num_params; n++) {
- const struct tee_param *p = params + n;
- struct optee_msg_param *mp = msg_params + n;
-
- switch (p->attr) {
- case TEE_IOCTL_PARAM_ATTR_TYPE_NONE:
- mp->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE;
- memset(&mp->u, 0, sizeof(mp->u));
- break;
- case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT:
- case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
- case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
- mp->attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT + p->attr -
- TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
- mp->u.value.a = p->u.value.a;
- mp->u.value.b = p->u.value.b;
- mp->u.value.c = p->u.value.c;
- break;
- case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
- case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
- case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
- if (tee_shm_is_registered(p->u.memref.shm))
- rc = to_msg_param_reg_mem(mp, p);
- else
- rc = to_msg_param_tmp_mem(mp, p);
- if (rc)
- return rc;
- break;
- default:
- return -EINVAL;
- }
- }
- return 0;
-}
-
-static void optee_get_version(struct tee_device *teedev,
- struct tee_ioctl_version_data *vers)
-{
- struct tee_ioctl_version_data v = {
- .impl_id = TEE_IMPL_ID_OPTEE,
- .impl_caps = TEE_OPTEE_CAP_TZ,
- .gen_caps = TEE_GEN_CAP_GP,
- };
- struct optee *optee = tee_get_drvdata(teedev);
-
- if (optee->sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
- v.gen_caps |= TEE_GEN_CAP_REG_MEM;
- if (optee->sec_caps & OPTEE_SMC_SEC_CAP_MEMREF_NULL)
- v.gen_caps |= TEE_GEN_CAP_MEMREF_NULL;
- *vers = v;
+err:
+ __free_pages(page, order);
+ return rc;
}
static void optee_bus_scan(struct work_struct *work)
@@ -217,7 +73,7 @@ static void optee_bus_scan(struct work_struct *work)
WARN_ON(optee_enumerate_devices(PTA_CMD_GET_DEVICES_SUPP));
}
-static int optee_open(struct tee_context *ctx)
+int optee_open(struct tee_context *ctx, bool cap_memref_null)
{
struct optee_context_data *ctxdata;
struct tee_device *teedev = ctx->teedev;
@@ -255,347 +111,55 @@ static int optee_open(struct tee_context *ctx)
mutex_init(&ctxdata->mutex);
INIT_LIST_HEAD(&ctxdata->sess_list);
- if (optee->sec_caps & OPTEE_SMC_SEC_CAP_MEMREF_NULL)
- ctx->cap_memref_null = true;
- else
- ctx->cap_memref_null = false;
-
+ ctx->cap_memref_null = cap_memref_null;
ctx->data = ctxdata;
return 0;
}
-static void optee_release(struct tee_context *ctx)
+static void optee_release_helper(struct tee_context *ctx,
+ int (*close_session)(struct tee_context *ctx,
+ u32 session))
{
struct optee_context_data *ctxdata = ctx->data;
- struct tee_device *teedev = ctx->teedev;
- struct optee *optee = tee_get_drvdata(teedev);
- struct tee_shm *shm;
- struct optee_msg_arg *arg = NULL;
- phys_addr_t parg;
struct optee_session *sess;
struct optee_session *sess_tmp;
if (!ctxdata)
return;
- shm = tee_shm_alloc(ctx, sizeof(struct optee_msg_arg),
- TEE_SHM_MAPPED | TEE_SHM_PRIV);
- if (!IS_ERR(shm)) {
- arg = tee_shm_get_va(shm, 0);
- /*
- * If va2pa fails for some reason, we can't call into
- * secure world, only free the memory. Secure OS will leak
- * sessions and finally refuse more sessions, but we will
- * at least let normal world reclaim its memory.
- */
- if (!IS_ERR(arg))
- if (tee_shm_va2pa(shm, arg, &parg))
- arg = NULL; /* prevent usage of parg below */
- }
-
list_for_each_entry_safe(sess, sess_tmp, &ctxdata->sess_list,
list_node) {
list_del(&sess->list_node);
- if (!IS_ERR_OR_NULL(arg)) {
- memset(arg, 0, sizeof(*arg));
- arg->cmd = OPTEE_MSG_CMD_CLOSE_SESSION;
- arg->session = sess->session_id;
- optee_do_call_with_arg(ctx, parg);
- }
+ close_session(ctx, sess->session_id);
kfree(sess);
}
kfree(ctxdata);
-
- if (!IS_ERR(shm))
- tee_shm_free(shm);
-
ctx->data = NULL;
-
- if (teedev == optee->supp_teedev) {
- if (optee->scan_bus_wq) {
- destroy_workqueue(optee->scan_bus_wq);
- optee->scan_bus_wq = NULL;
- }
- optee_supp_release(&optee->supp);
- }
-}
-
-static const struct tee_driver_ops optee_ops = {
- .get_version = optee_get_version,
- .open = optee_open,
- .release = optee_release,
- .open_session = optee_open_session,
- .close_session = optee_close_session,
- .invoke_func = optee_invoke_func,
- .cancel_req = optee_cancel_req,
- .shm_register = optee_shm_register,
- .shm_unregister = optee_shm_unregister,
-};
-
-static const struct tee_desc optee_desc = {
- .name = DRIVER_NAME "-clnt",
- .ops = &optee_ops,
- .owner = THIS_MODULE,
-};
-
-static const struct tee_driver_ops optee_supp_ops = {
- .get_version = optee_get_version,
- .open = optee_open,
- .release = optee_release,
- .supp_recv = optee_supp_recv,
- .supp_send = optee_supp_send,
- .shm_register = optee_shm_register_supp,
- .shm_unregister = optee_shm_unregister_supp,
-};
-
-static const struct tee_desc optee_supp_desc = {
- .name = DRIVER_NAME "-supp",
- .ops = &optee_supp_ops,
- .owner = THIS_MODULE,
- .flags = TEE_DESC_PRIVILEGED,
-};
-
-static bool optee_msg_api_uid_is_optee_api(optee_invoke_fn *invoke_fn)
-{
- struct arm_smccc_res res;
-
- invoke_fn(OPTEE_SMC_CALLS_UID, 0, 0, 0, 0, 0, 0, 0, &res);
-
- if (res.a0 == OPTEE_MSG_UID_0 && res.a1 == OPTEE_MSG_UID_1 &&
- res.a2 == OPTEE_MSG_UID_2 && res.a3 == OPTEE_MSG_UID_3)
- return true;
- return false;
-}
-
-static void optee_msg_get_os_revision(optee_invoke_fn *invoke_fn)
-{
- union {
- struct arm_smccc_res smccc;
- struct optee_smc_call_get_os_revision_result result;
- } res = {
- .result = {
- .build_id = 0
- }
- };
-
- invoke_fn(OPTEE_SMC_CALL_GET_OS_REVISION, 0, 0, 0, 0, 0, 0, 0,
- &res.smccc);
-
- if (res.result.build_id)
- pr_info("revision %lu.%lu (%08lx)", res.result.major,
- res.result.minor, res.result.build_id);
- else
- pr_info("revision %lu.%lu", res.result.major, res.result.minor);
-}
-
-static bool optee_msg_api_revision_is_compatible(optee_invoke_fn *invoke_fn)
-{
- union {
- struct arm_smccc_res smccc;
- struct optee_smc_calls_revision_result result;
- } res;
-
- invoke_fn(OPTEE_SMC_CALLS_REVISION, 0, 0, 0, 0, 0, 0, 0, &res.smccc);
-
- if (res.result.major == OPTEE_MSG_REVISION_MAJOR &&
- (int)res.result.minor >= OPTEE_MSG_REVISION_MINOR)
- return true;
- return false;
-}
-
-static bool optee_msg_exchange_capabilities(optee_invoke_fn *invoke_fn,
- u32 *sec_caps)
-{
- union {
- struct arm_smccc_res smccc;
- struct optee_smc_exchange_capabilities_result result;
- } res;
- u32 a1 = 0;
-
- /*
- * TODO This isn't enough to tell if it's UP system (from kernel
- * point of view) or not, is_smp() returns the the information
- * needed, but can't be called directly from here.
- */
- if (!IS_ENABLED(CONFIG_SMP) || nr_cpu_ids == 1)
- a1 |= OPTEE_SMC_NSEC_CAP_UNIPROCESSOR;
-
- invoke_fn(OPTEE_SMC_EXCHANGE_CAPABILITIES, a1, 0, 0, 0, 0, 0, 0,
- &res.smccc);
-
- if (res.result.status != OPTEE_SMC_RETURN_OK)
- return false;
-
- *sec_caps = res.result.capabilities;
- return true;
-}
-
-static struct tee_shm_pool *optee_config_dyn_shm(void)
-{
- struct tee_shm_pool_mgr *priv_mgr;
- struct tee_shm_pool_mgr *dmabuf_mgr;
- void *rc;
-
- rc = optee_shm_pool_alloc_pages();
- if (IS_ERR(rc))
- return rc;
- priv_mgr = rc;
-
- rc = optee_shm_pool_alloc_pages();
- if (IS_ERR(rc)) {
- tee_shm_pool_mgr_destroy(priv_mgr);
- return rc;
- }
- dmabuf_mgr = rc;
-
- rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr);
- if (IS_ERR(rc)) {
- tee_shm_pool_mgr_destroy(priv_mgr);
- tee_shm_pool_mgr_destroy(dmabuf_mgr);
- }
-
- return rc;
}
-static struct tee_shm_pool *
-optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm)
+void optee_release(struct tee_context *ctx)
{
- union {
- struct arm_smccc_res smccc;
- struct optee_smc_get_shm_config_result result;
- } res;
- unsigned long vaddr;
- phys_addr_t paddr;
- size_t size;
- phys_addr_t begin;
- phys_addr_t end;
- void *va;
- struct tee_shm_pool_mgr *priv_mgr;
- struct tee_shm_pool_mgr *dmabuf_mgr;
- void *rc;
- const int sz = OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE;
-
- invoke_fn(OPTEE_SMC_GET_SHM_CONFIG, 0, 0, 0, 0, 0, 0, 0, &res.smccc);
- if (res.result.status != OPTEE_SMC_RETURN_OK) {
- pr_err("static shm service not available\n");
- return ERR_PTR(-ENOENT);
- }
-
- if (res.result.settings != OPTEE_SMC_SHM_CACHED) {
- pr_err("only normal cached shared memory supported\n");
- return ERR_PTR(-EINVAL);
- }
-
- begin = roundup(res.result.start, PAGE_SIZE);
- end = rounddown(res.result.start + res.result.size, PAGE_SIZE);
- paddr = begin;
- size = end - begin;
-
- if (size < 2 * OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE) {
- pr_err("too small shared memory area\n");
- return ERR_PTR(-EINVAL);
- }
-
- va = memremap(paddr, size, MEMREMAP_WB);
- if (!va) {
- pr_err("shared memory ioremap failed\n");
- return ERR_PTR(-EINVAL);
- }
- vaddr = (unsigned long)va;
-
- rc = tee_shm_pool_mgr_alloc_res_mem(vaddr, paddr, sz,
- 3 /* 8 bytes aligned */);
- if (IS_ERR(rc))
- goto err_memunmap;
- priv_mgr = rc;
-
- vaddr += sz;
- paddr += sz;
- size -= sz;
-
- rc = tee_shm_pool_mgr_alloc_res_mem(vaddr, paddr, size, PAGE_SHIFT);
- if (IS_ERR(rc))
- goto err_free_priv_mgr;
- dmabuf_mgr = rc;
-
- rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr);
- if (IS_ERR(rc))
- goto err_free_dmabuf_mgr;
-
- *memremaped_shm = va;
-
- return rc;
-
-err_free_dmabuf_mgr:
- tee_shm_pool_mgr_destroy(dmabuf_mgr);
-err_free_priv_mgr:
- tee_shm_pool_mgr_destroy(priv_mgr);
-err_memunmap:
- memunmap(va);
- return rc;
+ optee_release_helper(ctx, optee_close_session_helper);
}
-/* Simple wrapper functions to be able to use a function pointer */
-static void optee_smccc_smc(unsigned long a0, unsigned long a1,
- unsigned long a2, unsigned long a3,
- unsigned long a4, unsigned long a5,
- unsigned long a6, unsigned long a7,
- struct arm_smccc_res *res)
+void optee_release_supp(struct tee_context *ctx)
{
- arm_smccc_smc(a0, a1, a2, a3, a4, a5, a6, a7, res);
-}
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
-static void optee_smccc_hvc(unsigned long a0, unsigned long a1,
- unsigned long a2, unsigned long a3,
- unsigned long a4, unsigned long a5,
- unsigned long a6, unsigned long a7,
- struct arm_smccc_res *res)
-{
- arm_smccc_hvc(a0, a1, a2, a3, a4, a5, a6, a7, res);
-}
-
-static optee_invoke_fn *get_invoke_func(struct device *dev)
-{
- const char *method;
-
- pr_info("probing for conduit method.\n");
-
- if (device_property_read_string(dev, "method", &method)) {
- pr_warn("missing \"method\" property\n");
- return ERR_PTR(-ENXIO);
+ optee_release_helper(ctx, optee_close_session_helper);
+ if (optee->scan_bus_wq) {
+ destroy_workqueue(optee->scan_bus_wq);
+ optee->scan_bus_wq = NULL;
}
-
- if (!strcmp("hvc", method))
- return optee_smccc_hvc;
- else if (!strcmp("smc", method))
- return optee_smccc_smc;
-
- pr_warn("invalid \"method\" property: %s\n", method);
- return ERR_PTR(-EINVAL);
+ optee_supp_release(&optee->supp);
}
-/* optee_remove - Device Removal Routine
- * @pdev: platform device information struct
- *
- * optee_remove is called by platform subsystem to alert the driver
- * that it should release the device
- */
-
-static int optee_remove(struct platform_device *pdev)
+void optee_remove_common(struct optee *optee)
{
- struct optee *optee = platform_get_drvdata(pdev);
-
/* Unregister OP-TEE specific client devices on TEE bus */
optee_unregister_devices();
/*
- * Ask OP-TEE to free all cached shared memory objects to decrease
- * reference counters and also avoid wild pointers in secure world
- * into the old shared memory range.
- */
- optee_disable_shm_cache(optee);
-
- /*
* The two devices have to be unregistered before we can free the
* other resources.
*/
@@ -603,39 +167,16 @@ static int optee_remove(struct platform_device *pdev)
tee_device_unregister(optee->teedev);
tee_shm_pool_free(optee->pool);
- if (optee->memremaped_shm)
- memunmap(optee->memremaped_shm);
optee_wait_queue_exit(&optee->wait_queue);
optee_supp_uninit(&optee->supp);
mutex_destroy(&optee->call_queue.mutex);
-
- kfree(optee);
-
- return 0;
}
-/* optee_shutdown - Device Removal Routine
- * @pdev: platform device information struct
- *
- * platform_shutdown is called by the platform subsystem to alert
- * the driver that a shutdown, reboot, or kexec is happening and
- * device must be disabled.
- */
-static void optee_shutdown(struct platform_device *pdev)
-{
- optee_disable_shm_cache(platform_get_drvdata(pdev));
-}
+static int smc_abi_rc;
+static int ffa_abi_rc;
-static int optee_probe(struct platform_device *pdev)
+static int optee_core_init(void)
{
- optee_invoke_fn *invoke_fn;
- struct tee_shm_pool *pool = ERR_PTR(-EINVAL);
- struct optee *optee = NULL;
- void *memremaped_shm = NULL;
- struct tee_device *teedev;
- u32 sec_caps;
- int rc;
-
/*
* The kernel may have crashed at the same time that all available
* secure world threads were suspended and we cannot reschedule the
@@ -646,138 +187,24 @@ static int optee_probe(struct platform_device *pdev)
if (is_kdump_kernel())
return -ENODEV;
- invoke_fn = get_invoke_func(&pdev->dev);
- if (IS_ERR(invoke_fn))
- return PTR_ERR(invoke_fn);
-
- if (!optee_msg_api_uid_is_optee_api(invoke_fn)) {
- pr_warn("api uid mismatch\n");
- return -EINVAL;
- }
-
- optee_msg_get_os_revision(invoke_fn);
-
- if (!optee_msg_api_revision_is_compatible(invoke_fn)) {
- pr_warn("api revision mismatch\n");
- return -EINVAL;
- }
-
- if (!optee_msg_exchange_capabilities(invoke_fn, &sec_caps)) {
- pr_warn("capabilities mismatch\n");
- return -EINVAL;
- }
-
- /*
- * Try to use dynamic shared memory if possible
- */
- if (sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
- pool = optee_config_dyn_shm();
-
- /*
- * If dynamic shared memory is not available or failed - try static one
- */
- if (IS_ERR(pool) && (sec_caps & OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM))
- pool = optee_config_shm_memremap(invoke_fn, &memremaped_shm);
-
- if (IS_ERR(pool))
- return PTR_ERR(pool);
-
- optee = kzalloc(sizeof(*optee), GFP_KERNEL);
- if (!optee) {
- rc = -ENOMEM;
- goto err;
- }
-
- optee->invoke_fn = invoke_fn;
- optee->sec_caps = sec_caps;
-
- teedev = tee_device_alloc(&optee_desc, NULL, pool, optee);
- if (IS_ERR(teedev)) {
- rc = PTR_ERR(teedev);
- goto err;
- }
- optee->teedev = teedev;
-
- teedev = tee_device_alloc(&optee_supp_desc, NULL, pool, optee);
- if (IS_ERR(teedev)) {
- rc = PTR_ERR(teedev);
- goto err;
- }
- optee->supp_teedev = teedev;
-
- rc = tee_device_register(optee->teedev);
- if (rc)
- goto err;
-
- rc = tee_device_register(optee->supp_teedev);
- if (rc)
- goto err;
-
- mutex_init(&optee->call_queue.mutex);
- INIT_LIST_HEAD(&optee->call_queue.waiters);
- optee_wait_queue_init(&optee->wait_queue);
- optee_supp_init(&optee->supp);
- optee->memremaped_shm = memremaped_shm;
- optee->pool = pool;
-
- /*
- * Ensure that there are no pre-existing shm objects before enabling
- * the shm cache so that there's no chance of receiving an invalid
- * address during shutdown. This could occur, for example, if we're
- * kexec booting from an older kernel that did not properly cleanup the
- * shm cache.
- */
- optee_disable_unmapped_shm_cache(optee);
-
- optee_enable_shm_cache(optee);
+ smc_abi_rc = optee_smc_abi_register();
+ ffa_abi_rc = optee_ffa_abi_register();
- if (optee->sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
- pr_info("dynamic shared memory is enabled\n");
-
- platform_set_drvdata(pdev, optee);
-
- rc = optee_enumerate_devices(PTA_CMD_GET_DEVICES);
- if (rc) {
- optee_remove(pdev);
- return rc;
- }
-
- pr_info("initialized driver\n");
+ /* If both failed there's no point with this module */
+ if (smc_abi_rc && ffa_abi_rc)
+ return smc_abi_rc;
return 0;
-err:
- if (optee) {
- /*
- * tee_device_unregister() is safe to call even if the
- * devices hasn't been registered with
- * tee_device_register() yet.
- */
- tee_device_unregister(optee->supp_teedev);
- tee_device_unregister(optee->teedev);
- kfree(optee);
- }
- if (pool)
- tee_shm_pool_free(pool);
- if (memremaped_shm)
- memunmap(memremaped_shm);
- return rc;
}
+module_init(optee_core_init);
-static const struct of_device_id optee_dt_match[] = {
- { .compatible = "linaro,optee-tz" },
- {},
-};
-MODULE_DEVICE_TABLE(of, optee_dt_match);
-
-static struct platform_driver optee_driver = {
- .probe = optee_probe,
- .remove = optee_remove,
- .shutdown = optee_shutdown,
- .driver = {
- .name = "optee",
- .of_match_table = optee_dt_match,
- },
-};
-module_platform_driver(optee_driver);
+static void optee_core_exit(void)
+{
+ if (!smc_abi_rc)
+ optee_smc_abi_unregister();
+ if (!ffa_abi_rc)
+ optee_ffa_abi_unregister();
+}
+module_exit(optee_core_exit);
MODULE_AUTHOR("Linaro");
MODULE_DESCRIPTION("OP-TEE driver");
diff --git a/drivers/tee/optee/ffa_abi.c b/drivers/tee/optee/ffa_abi.c
new file mode 100644
index 000000000000..45424824e0f9
--- /dev/null
+++ b/drivers/tee/optee/ffa_abi.c
@@ -0,0 +1,911 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2021, Linaro Limited
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/arm_ffa.h>
+#include <linux/errno.h>
+#include <linux/scatterlist.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/tee_drv.h>
+#include <linux/types.h>
+#include "optee_private.h"
+#include "optee_ffa.h"
+#include "optee_rpc_cmd.h"
+
+/*
+ * This file implement the FF-A ABI used when communicating with secure world
+ * OP-TEE OS via FF-A.
+ * This file is divided into the following sections:
+ * 1. Maintain a hash table for lookup of a global FF-A memory handle
+ * 2. Convert between struct tee_param and struct optee_msg_param
+ * 3. Low level support functions to register shared memory in secure world
+ * 4. Dynamic shared memory pool based on alloc_pages()
+ * 5. Do a normal scheduled call into secure world
+ * 6. Driver initialization.
+ */
+
+/*
+ * 1. Maintain a hash table for lookup of a global FF-A memory handle
+ *
+ * FF-A assigns a global memory handle for each piece shared memory.
+ * This handle is then used when communicating with secure world.
+ *
+ * Main functions are optee_shm_add_ffa_handle() and optee_shm_rem_ffa_handle()
+ */
+struct shm_rhash {
+ struct tee_shm *shm;
+ u64 global_id;
+ struct rhash_head linkage;
+};
+
+static void rh_free_fn(void *ptr, void *arg)
+{
+ kfree(ptr);
+}
+
+static const struct rhashtable_params shm_rhash_params = {
+ .head_offset = offsetof(struct shm_rhash, linkage),
+ .key_len = sizeof(u64),
+ .key_offset = offsetof(struct shm_rhash, global_id),
+ .automatic_shrinking = true,
+};
+
+static struct tee_shm *optee_shm_from_ffa_handle(struct optee *optee,
+ u64 global_id)
+{
+ struct tee_shm *shm = NULL;
+ struct shm_rhash *r;
+
+ mutex_lock(&optee->ffa.mutex);
+ r = rhashtable_lookup_fast(&optee->ffa.global_ids, &global_id,
+ shm_rhash_params);
+ if (r)
+ shm = r->shm;
+ mutex_unlock(&optee->ffa.mutex);
+
+ return shm;
+}
+
+static int optee_shm_add_ffa_handle(struct optee *optee, struct tee_shm *shm,
+ u64 global_id)
+{
+ struct shm_rhash *r;
+ int rc;
+
+ r = kmalloc(sizeof(*r), GFP_KERNEL);
+ if (!r)
+ return -ENOMEM;
+ r->shm = shm;
+ r->global_id = global_id;
+
+ mutex_lock(&optee->ffa.mutex);
+ rc = rhashtable_lookup_insert_fast(&optee->ffa.global_ids, &r->linkage,
+ shm_rhash_params);
+ mutex_unlock(&optee->ffa.mutex);
+
+ if (rc)
+ kfree(r);
+
+ return rc;
+}
+
+static int optee_shm_rem_ffa_handle(struct optee *optee, u64 global_id)
+{
+ struct shm_rhash *r;
+ int rc = -ENOENT;
+
+ mutex_lock(&optee->ffa.mutex);
+ r = rhashtable_lookup_fast(&optee->ffa.global_ids, &global_id,
+ shm_rhash_params);
+ if (r)
+ rc = rhashtable_remove_fast(&optee->ffa.global_ids,
+ &r->linkage, shm_rhash_params);
+ mutex_unlock(&optee->ffa.mutex);
+
+ if (!rc)
+ kfree(r);
+
+ return rc;
+}
+
+/*
+ * 2. Convert between struct tee_param and struct optee_msg_param
+ *
+ * optee_ffa_from_msg_param() and optee_ffa_to_msg_param() are the main
+ * functions.
+ */
+
+static void from_msg_param_ffa_mem(struct optee *optee, struct tee_param *p,
+ u32 attr, const struct optee_msg_param *mp)
+{
+ struct tee_shm *shm = NULL;
+ u64 offs_high = 0;
+ u64 offs_low = 0;
+
+ p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT +
+ attr - OPTEE_MSG_ATTR_TYPE_FMEM_INPUT;
+ p->u.memref.size = mp->u.fmem.size;
+
+ if (mp->u.fmem.global_id != OPTEE_MSG_FMEM_INVALID_GLOBAL_ID)
+ shm = optee_shm_from_ffa_handle(optee, mp->u.fmem.global_id);
+ p->u.memref.shm = shm;
+
+ if (shm) {
+ offs_low = mp->u.fmem.offs_low;
+ offs_high = mp->u.fmem.offs_high;
+ }
+ p->u.memref.shm_offs = offs_low | offs_high << 32;
+}
+
+/**
+ * optee_ffa_from_msg_param() - convert from OPTEE_MSG parameters to
+ * struct tee_param
+ * @optee: main service struct
+ * @params: subsystem internal parameter representation
+ * @num_params: number of elements in the parameter arrays
+ * @msg_params: OPTEE_MSG parameters
+ *
+ * Returns 0 on success or <0 on failure
+ */
+static int optee_ffa_from_msg_param(struct optee *optee,
+ struct tee_param *params, size_t num_params,
+ const struct optee_msg_param *msg_params)
+{
+ size_t n;
+
+ for (n = 0; n < num_params; n++) {
+ struct tee_param *p = params + n;
+ const struct optee_msg_param *mp = msg_params + n;
+ u32 attr = mp->attr & OPTEE_MSG_ATTR_TYPE_MASK;
+
+ switch (attr) {
+ case OPTEE_MSG_ATTR_TYPE_NONE:
+ p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE;
+ memset(&p->u, 0, sizeof(p->u));
+ break;
+ case OPTEE_MSG_ATTR_TYPE_VALUE_INPUT:
+ case OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT:
+ case OPTEE_MSG_ATTR_TYPE_VALUE_INOUT:
+ optee_from_msg_param_value(p, attr, mp);
+ break;
+ case OPTEE_MSG_ATTR_TYPE_FMEM_INPUT:
+ case OPTEE_MSG_ATTR_TYPE_FMEM_OUTPUT:
+ case OPTEE_MSG_ATTR_TYPE_FMEM_INOUT:
+ from_msg_param_ffa_mem(optee, p, attr, mp);
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int to_msg_param_ffa_mem(struct optee_msg_param *mp,
+ const struct tee_param *p)
+{
+ struct tee_shm *shm = p->u.memref.shm;
+
+ mp->attr = OPTEE_MSG_ATTR_TYPE_FMEM_INPUT + p->attr -
+ TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
+
+ if (shm) {
+ u64 shm_offs = p->u.memref.shm_offs;
+
+ mp->u.fmem.internal_offs = shm->offset;
+
+ mp->u.fmem.offs_low = shm_offs;
+ mp->u.fmem.offs_high = shm_offs >> 32;
+ /* Check that the entire offset could be stored. */
+ if (mp->u.fmem.offs_high != shm_offs >> 32)
+ return -EINVAL;
+
+ mp->u.fmem.global_id = shm->sec_world_id;
+ } else {
+ memset(&mp->u, 0, sizeof(mp->u));
+ mp->u.fmem.global_id = OPTEE_MSG_FMEM_INVALID_GLOBAL_ID;
+ }
+ mp->u.fmem.size = p->u.memref.size;
+
+ return 0;
+}
+
+/**
+ * optee_ffa_to_msg_param() - convert from struct tee_params to OPTEE_MSG
+ * parameters
+ * @optee: main service struct
+ * @msg_params: OPTEE_MSG parameters
+ * @num_params: number of elements in the parameter arrays
+ * @params: subsystem itnernal parameter representation
+ * Returns 0 on success or <0 on failure
+ */
+static int optee_ffa_to_msg_param(struct optee *optee,
+ struct optee_msg_param *msg_params,
+ size_t num_params,
+ const struct tee_param *params)
+{
+ size_t n;
+
+ for (n = 0; n < num_params; n++) {
+ const struct tee_param *p = params + n;
+ struct optee_msg_param *mp = msg_params + n;
+
+ switch (p->attr) {
+ case TEE_IOCTL_PARAM_ATTR_TYPE_NONE:
+ mp->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE;
+ memset(&mp->u, 0, sizeof(mp->u));
+ break;
+ case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
+ optee_to_msg_param_value(mp, p);
+ break;
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
+ if (to_msg_param_ffa_mem(mp, p))
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * 3. Low level support functions to register shared memory in secure world
+ *
+ * Functions to register and unregister shared memory both for normal
+ * clients and for tee-supplicant.
+ */
+
+static int optee_ffa_shm_register(struct tee_context *ctx, struct tee_shm *shm,
+ struct page **pages, size_t num_pages,
+ unsigned long start)
+{
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
+ const struct ffa_dev_ops *ffa_ops = optee->ffa.ffa_ops;
+ struct ffa_device *ffa_dev = optee->ffa.ffa_dev;
+ struct ffa_mem_region_attributes mem_attr = {
+ .receiver = ffa_dev->vm_id,
+ .attrs = FFA_MEM_RW,
+ };
+ struct ffa_mem_ops_args args = {
+ .use_txbuf = true,
+ .attrs = &mem_attr,
+ .nattrs = 1,
+ };
+ struct sg_table sgt;
+ int rc;
+
+ rc = optee_check_mem_type(start, num_pages);
+ if (rc)
+ return rc;
+
+ rc = sg_alloc_table_from_pages(&sgt, pages, num_pages, 0,
+ num_pages * PAGE_SIZE, GFP_KERNEL);
+ if (rc)
+ return rc;
+ args.sg = sgt.sgl;
+ rc = ffa_ops->memory_share(ffa_dev, &args);
+ sg_free_table(&sgt);
+ if (rc)
+ return rc;
+
+ rc = optee_shm_add_ffa_handle(optee, shm, args.g_handle);
+ if (rc) {
+ ffa_ops->memory_reclaim(args.g_handle, 0);
+ return rc;
+ }
+
+ shm->sec_world_id = args.g_handle;
+
+ return 0;
+}
+
+static int optee_ffa_shm_unregister(struct tee_context *ctx,
+ struct tee_shm *shm)
+{
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
+ const struct ffa_dev_ops *ffa_ops = optee->ffa.ffa_ops;
+ struct ffa_device *ffa_dev = optee->ffa.ffa_dev;
+ u64 global_handle = shm->sec_world_id;
+ struct ffa_send_direct_data data = {
+ .data0 = OPTEE_FFA_UNREGISTER_SHM,
+ .data1 = (u32)global_handle,
+ .data2 = (u32)(global_handle >> 32)
+ };
+ int rc;
+
+ optee_shm_rem_ffa_handle(optee, global_handle);
+ shm->sec_world_id = 0;
+
+ rc = ffa_ops->sync_send_receive(ffa_dev, &data);
+ if (rc)
+ pr_err("Unregister SHM id 0x%llx rc %d\n", global_handle, rc);
+
+ rc = ffa_ops->memory_reclaim(global_handle, 0);
+ if (rc)
+ pr_err("mem_reclaim: 0x%llx %d", global_handle, rc);
+
+ return rc;
+}
+
+static int optee_ffa_shm_unregister_supp(struct tee_context *ctx,
+ struct tee_shm *shm)
+{
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
+ const struct ffa_dev_ops *ffa_ops = optee->ffa.ffa_ops;
+ u64 global_handle = shm->sec_world_id;
+ int rc;
+
+ /*
+ * We're skipping the OPTEE_FFA_YIELDING_CALL_UNREGISTER_SHM call
+ * since this is OP-TEE freeing via RPC so it has already retired
+ * this ID.
+ */
+
+ optee_shm_rem_ffa_handle(optee, global_handle);
+ rc = ffa_ops->memory_reclaim(global_handle, 0);
+ if (rc)
+ pr_err("mem_reclaim: 0x%llx %d", global_handle, rc);
+
+ shm->sec_world_id = 0;
+
+ return rc;
+}
+
+/*
+ * 4. Dynamic shared memory pool based on alloc_pages()
+ *
+ * Implements an OP-TEE specific shared memory pool.
+ * The main function is optee_ffa_shm_pool_alloc_pages().
+ */
+
+static int pool_ffa_op_alloc(struct tee_shm_pool_mgr *poolm,
+ struct tee_shm *shm, size_t size)
+{
+ return optee_pool_op_alloc_helper(poolm, shm, size,
+ optee_ffa_shm_register);
+}
+
+static void pool_ffa_op_free(struct tee_shm_pool_mgr *poolm,
+ struct tee_shm *shm)
+{
+ optee_ffa_shm_unregister(shm->ctx, shm);
+ free_pages((unsigned long)shm->kaddr, get_order(shm->size));
+ shm->kaddr = NULL;
+}
+
+static void pool_ffa_op_destroy_poolmgr(struct tee_shm_pool_mgr *poolm)
+{
+ kfree(poolm);
+}
+
+static const struct tee_shm_pool_mgr_ops pool_ffa_ops = {
+ .alloc = pool_ffa_op_alloc,
+ .free = pool_ffa_op_free,
+ .destroy_poolmgr = pool_ffa_op_destroy_poolmgr,
+};
+
+/**
+ * optee_ffa_shm_pool_alloc_pages() - create page-based allocator pool
+ *
+ * This pool is used with OP-TEE over FF-A. In this case command buffers
+ * and such are allocated from kernel's own memory.
+ */
+static struct tee_shm_pool_mgr *optee_ffa_shm_pool_alloc_pages(void)
+{
+ struct tee_shm_pool_mgr *mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
+
+ if (!mgr)
+ return ERR_PTR(-ENOMEM);
+
+ mgr->ops = &pool_ffa_ops;
+
+ return mgr;
+}
+
+/*
+ * 5. Do a normal scheduled call into secure world
+ *
+ * The function optee_ffa_do_call_with_arg() performs a normal scheduled
+ * call into secure world. During this call may normal world request help
+ * from normal world using RPCs, Remote Procedure Calls. This includes
+ * delivery of non-secure interrupts to for instance allow rescheduling of
+ * the current task.
+ */
+
+static void handle_ffa_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
+ struct optee_msg_arg *arg)
+{
+ struct tee_shm *shm;
+
+ if (arg->num_params != 1 ||
+ arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) {
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ return;
+ }
+
+ switch (arg->params[0].u.value.a) {
+ case OPTEE_RPC_SHM_TYPE_APPL:
+ shm = optee_rpc_cmd_alloc_suppl(ctx, arg->params[0].u.value.b);
+ break;
+ case OPTEE_RPC_SHM_TYPE_KERNEL:
+ shm = tee_shm_alloc(ctx, arg->params[0].u.value.b,
+ TEE_SHM_MAPPED | TEE_SHM_PRIV);
+ break;
+ default:
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ return;
+ }
+
+ if (IS_ERR(shm)) {
+ arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
+ return;
+ }
+
+ arg->params[0] = (struct optee_msg_param){
+ .attr = OPTEE_MSG_ATTR_TYPE_FMEM_OUTPUT,
+ .u.fmem.size = tee_shm_get_size(shm),
+ .u.fmem.global_id = shm->sec_world_id,
+ .u.fmem.internal_offs = shm->offset,
+ };
+
+ arg->ret = TEEC_SUCCESS;
+}
+
+static void handle_ffa_rpc_func_cmd_shm_free(struct tee_context *ctx,
+ struct optee *optee,
+ struct optee_msg_arg *arg)
+{
+ struct tee_shm *shm;
+
+ if (arg->num_params != 1 ||
+ arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT)
+ goto err_bad_param;
+
+ shm = optee_shm_from_ffa_handle(optee, arg->params[0].u.value.b);
+ if (!shm)
+ goto err_bad_param;
+ switch (arg->params[0].u.value.a) {
+ case OPTEE_RPC_SHM_TYPE_APPL:
+ optee_rpc_cmd_free_suppl(ctx, shm);
+ break;
+ case OPTEE_RPC_SHM_TYPE_KERNEL:
+ tee_shm_free(shm);
+ break;
+ default:
+ goto err_bad_param;
+ }
+ arg->ret = TEEC_SUCCESS;
+ return;
+
+err_bad_param:
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+}
+
+static void handle_ffa_rpc_func_cmd(struct tee_context *ctx,
+ struct optee_msg_arg *arg)
+{
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
+
+ arg->ret_origin = TEEC_ORIGIN_COMMS;
+ switch (arg->cmd) {
+ case OPTEE_RPC_CMD_SHM_ALLOC:
+ handle_ffa_rpc_func_cmd_shm_alloc(ctx, arg);
+ break;
+ case OPTEE_RPC_CMD_SHM_FREE:
+ handle_ffa_rpc_func_cmd_shm_free(ctx, optee, arg);
+ break;
+ default:
+ optee_rpc_cmd(ctx, optee, arg);
+ }
+}
+
+static void optee_handle_ffa_rpc(struct tee_context *ctx, u32 cmd,
+ struct optee_msg_arg *arg)
+{
+ switch (cmd) {
+ case OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD:
+ handle_ffa_rpc_func_cmd(ctx, arg);
+ break;
+ case OPTEE_FFA_YIELDING_CALL_RETURN_INTERRUPT:
+ /* Interrupt delivered by now */
+ break;
+ default:
+ pr_warn("Unknown RPC func 0x%x\n", cmd);
+ break;
+ }
+}
+
+static int optee_ffa_yielding_call(struct tee_context *ctx,
+ struct ffa_send_direct_data *data,
+ struct optee_msg_arg *rpc_arg)
+{
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
+ const struct ffa_dev_ops *ffa_ops = optee->ffa.ffa_ops;
+ struct ffa_device *ffa_dev = optee->ffa.ffa_dev;
+ struct optee_call_waiter w;
+ u32 cmd = data->data0;
+ u32 w4 = data->data1;
+ u32 w5 = data->data2;
+ u32 w6 = data->data3;
+ int rc;
+
+ /* Initialize waiter */
+ optee_cq_wait_init(&optee->call_queue, &w);
+ while (true) {
+ rc = ffa_ops->sync_send_receive(ffa_dev, data);
+ if (rc)
+ goto done;
+
+ switch ((int)data->data0) {
+ case TEEC_SUCCESS:
+ break;
+ case TEEC_ERROR_BUSY:
+ if (cmd == OPTEE_FFA_YIELDING_CALL_RESUME) {
+ rc = -EIO;
+ goto done;
+ }
+
+ /*
+ * Out of threads in secure world, wait for a thread
+ * become available.
+ */
+ optee_cq_wait_for_completion(&optee->call_queue, &w);
+ data->data0 = cmd;
+ data->data1 = w4;
+ data->data2 = w5;
+ data->data3 = w6;
+ continue;
+ default:
+ rc = -EIO;
+ goto done;
+ }
+
+ if (data->data1 == OPTEE_FFA_YIELDING_CALL_RETURN_DONE)
+ goto done;
+
+ /*
+ * OP-TEE has returned with a RPC request.
+ *
+ * Note that data->data4 (passed in register w7) is already
+ * filled in by ffa_ops->sync_send_receive() returning
+ * above.
+ */
+ cond_resched();
+ optee_handle_ffa_rpc(ctx, data->data1, rpc_arg);
+ cmd = OPTEE_FFA_YIELDING_CALL_RESUME;
+ data->data0 = cmd;
+ data->data1 = 0;
+ data->data2 = 0;
+ data->data3 = 0;
+ }
+done:
+ /*
+ * We're done with our thread in secure world, if there's any
+ * thread waiters wake up one.
+ */
+ optee_cq_wait_final(&optee->call_queue, &w);
+
+ return rc;
+}
+
+/**
+ * optee_ffa_do_call_with_arg() - Do a FF-A call to enter OP-TEE in secure world
+ * @ctx: calling context
+ * @shm: shared memory holding the message to pass to secure world
+ *
+ * Does a FF-A call to OP-TEE in secure world and handles eventual resulting
+ * Remote Procedure Calls (RPC) from OP-TEE.
+ *
+ * Returns return code from FF-A, 0 is OK
+ */
+
+static int optee_ffa_do_call_with_arg(struct tee_context *ctx,
+ struct tee_shm *shm)
+{
+ struct ffa_send_direct_data data = {
+ .data0 = OPTEE_FFA_YIELDING_CALL_WITH_ARG,
+ .data1 = (u32)shm->sec_world_id,
+ .data2 = (u32)(shm->sec_world_id >> 32),
+ .data3 = shm->offset,
+ };
+ struct optee_msg_arg *arg = tee_shm_get_va(shm, 0);
+ unsigned int rpc_arg_offs = OPTEE_MSG_GET_ARG_SIZE(arg->num_params);
+ struct optee_msg_arg *rpc_arg = tee_shm_get_va(shm, rpc_arg_offs);
+
+ return optee_ffa_yielding_call(ctx, &data, rpc_arg);
+}
+
+/*
+ * 6. Driver initialization
+ *
+ * During driver inititialization is the OP-TEE Secure Partition is probed
+ * to find out which features it supports so the driver can be initialized
+ * with a matching configuration.
+ */
+
+static bool optee_ffa_api_is_compatbile(struct ffa_device *ffa_dev,
+ const struct ffa_dev_ops *ops)
+{
+ struct ffa_send_direct_data data = { OPTEE_FFA_GET_API_VERSION };
+ int rc;
+
+ ops->mode_32bit_set(ffa_dev);
+
+ rc = ops->sync_send_receive(ffa_dev, &data);
+ if (rc) {
+ pr_err("Unexpected error %d\n", rc);
+ return false;
+ }
+ if (data.data0 != OPTEE_FFA_VERSION_MAJOR ||
+ data.data1 < OPTEE_FFA_VERSION_MINOR) {
+ pr_err("Incompatible OP-TEE API version %lu.%lu",
+ data.data0, data.data1);
+ return false;
+ }
+
+ data = (struct ffa_send_direct_data){ OPTEE_FFA_GET_OS_VERSION };
+ rc = ops->sync_send_receive(ffa_dev, &data);
+ if (rc) {
+ pr_err("Unexpected error %d\n", rc);
+ return false;
+ }
+ if (data.data2)
+ pr_info("revision %lu.%lu (%08lx)",
+ data.data0, data.data1, data.data2);
+ else
+ pr_info("revision %lu.%lu", data.data0, data.data1);
+
+ return true;
+}
+
+static bool optee_ffa_exchange_caps(struct ffa_device *ffa_dev,
+ const struct ffa_dev_ops *ops,
+ unsigned int *rpc_arg_count)
+{
+ struct ffa_send_direct_data data = { OPTEE_FFA_EXCHANGE_CAPABILITIES };
+ int rc;
+
+ rc = ops->sync_send_receive(ffa_dev, &data);
+ if (rc) {
+ pr_err("Unexpected error %d", rc);
+ return false;
+ }
+ if (data.data0) {
+ pr_err("Unexpected exchange error %lu", data.data0);
+ return false;
+ }
+
+ *rpc_arg_count = (u8)data.data1;
+
+ return true;
+}
+
+static struct tee_shm_pool *optee_ffa_config_dyn_shm(void)
+{
+ struct tee_shm_pool_mgr *priv_mgr;
+ struct tee_shm_pool_mgr *dmabuf_mgr;
+ void *rc;
+
+ rc = optee_ffa_shm_pool_alloc_pages();
+ if (IS_ERR(rc))
+ return rc;
+ priv_mgr = rc;
+
+ rc = optee_ffa_shm_pool_alloc_pages();
+ if (IS_ERR(rc)) {
+ tee_shm_pool_mgr_destroy(priv_mgr);
+ return rc;
+ }
+ dmabuf_mgr = rc;
+
+ rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr);
+ if (IS_ERR(rc)) {
+ tee_shm_pool_mgr_destroy(priv_mgr);
+ tee_shm_pool_mgr_destroy(dmabuf_mgr);
+ }
+
+ return rc;
+}
+
+static void optee_ffa_get_version(struct tee_device *teedev,
+ struct tee_ioctl_version_data *vers)
+{
+ struct tee_ioctl_version_data v = {
+ .impl_id = TEE_IMPL_ID_OPTEE,
+ .impl_caps = TEE_OPTEE_CAP_TZ,
+ .gen_caps = TEE_GEN_CAP_GP | TEE_GEN_CAP_REG_MEM |
+ TEE_GEN_CAP_MEMREF_NULL,
+ };
+
+ *vers = v;
+}
+
+static int optee_ffa_open(struct tee_context *ctx)
+{
+ return optee_open(ctx, true);
+}
+
+static const struct tee_driver_ops optee_ffa_clnt_ops = {
+ .get_version = optee_ffa_get_version,
+ .open = optee_ffa_open,
+ .release = optee_release,
+ .open_session = optee_open_session,
+ .close_session = optee_close_session,
+ .invoke_func = optee_invoke_func,
+ .cancel_req = optee_cancel_req,
+ .shm_register = optee_ffa_shm_register,
+ .shm_unregister = optee_ffa_shm_unregister,
+};
+
+static const struct tee_desc optee_ffa_clnt_desc = {
+ .name = DRIVER_NAME "-ffa-clnt",
+ .ops = &optee_ffa_clnt_ops,
+ .owner = THIS_MODULE,
+};
+
+static const struct tee_driver_ops optee_ffa_supp_ops = {
+ .get_version = optee_ffa_get_version,
+ .open = optee_ffa_open,
+ .release = optee_release_supp,
+ .supp_recv = optee_supp_recv,
+ .supp_send = optee_supp_send,
+ .shm_register = optee_ffa_shm_register, /* same as for clnt ops */
+ .shm_unregister = optee_ffa_shm_unregister_supp,
+};
+
+static const struct tee_desc optee_ffa_supp_desc = {
+ .name = DRIVER_NAME "-ffa-supp",
+ .ops = &optee_ffa_supp_ops,
+ .owner = THIS_MODULE,
+ .flags = TEE_DESC_PRIVILEGED,
+};
+
+static const struct optee_ops optee_ffa_ops = {
+ .do_call_with_arg = optee_ffa_do_call_with_arg,
+ .to_msg_param = optee_ffa_to_msg_param,
+ .from_msg_param = optee_ffa_from_msg_param,
+};
+
+static void optee_ffa_remove(struct ffa_device *ffa_dev)
+{
+ struct optee *optee = ffa_dev->dev.driver_data;
+
+ optee_remove_common(optee);
+
+ mutex_destroy(&optee->ffa.mutex);
+ rhashtable_free_and_destroy(&optee->ffa.global_ids, rh_free_fn, NULL);
+
+ kfree(optee);
+}
+
+static int optee_ffa_probe(struct ffa_device *ffa_dev)
+{
+ const struct ffa_dev_ops *ffa_ops;
+ unsigned int rpc_arg_count;
+ struct tee_device *teedev;
+ struct optee *optee;
+ int rc;
+
+ ffa_ops = ffa_dev_ops_get(ffa_dev);
+ if (!ffa_ops) {
+ pr_warn("failed \"method\" init: ffa\n");
+ return -ENOENT;
+ }
+
+ if (!optee_ffa_api_is_compatbile(ffa_dev, ffa_ops))
+ return -EINVAL;
+
+ if (!optee_ffa_exchange_caps(ffa_dev, ffa_ops, &rpc_arg_count))
+ return -EINVAL;
+
+ optee = kzalloc(sizeof(*optee), GFP_KERNEL);
+ if (!optee) {
+ rc = -ENOMEM;
+ goto err;
+ }
+ optee->pool = optee_ffa_config_dyn_shm();
+ if (IS_ERR(optee->pool)) {
+ rc = PTR_ERR(optee->pool);
+ optee->pool = NULL;
+ goto err;
+ }
+
+ optee->ops = &optee_ffa_ops;
+ optee->ffa.ffa_dev = ffa_dev;
+ optee->ffa.ffa_ops = ffa_ops;
+ optee->rpc_arg_count = rpc_arg_count;
+
+ teedev = tee_device_alloc(&optee_ffa_clnt_desc, NULL, optee->pool,
+ optee);
+ if (IS_ERR(teedev)) {
+ rc = PTR_ERR(teedev);
+ goto err;
+ }
+ optee->teedev = teedev;
+
+ teedev = tee_device_alloc(&optee_ffa_supp_desc, NULL, optee->pool,
+ optee);
+ if (IS_ERR(teedev)) {
+ rc = PTR_ERR(teedev);
+ goto err;
+ }
+ optee->supp_teedev = teedev;
+
+ rc = tee_device_register(optee->teedev);
+ if (rc)
+ goto err;
+
+ rc = tee_device_register(optee->supp_teedev);
+ if (rc)
+ goto err;
+
+ rc = rhashtable_init(&optee->ffa.global_ids, &shm_rhash_params);
+ if (rc)
+ goto err;
+ mutex_init(&optee->ffa.mutex);
+ mutex_init(&optee->call_queue.mutex);
+ INIT_LIST_HEAD(&optee->call_queue.waiters);
+ optee_wait_queue_init(&optee->wait_queue);
+ optee_supp_init(&optee->supp);
+ ffa_dev_set_drvdata(ffa_dev, optee);
+
+ rc = optee_enumerate_devices(PTA_CMD_GET_DEVICES);
+ if (rc) {
+ optee_ffa_remove(ffa_dev);
+ return rc;
+ }
+
+ pr_info("initialized driver\n");
+ return 0;
+err:
+ /*
+ * tee_device_unregister() is safe to call even if the
+ * devices hasn't been registered with
+ * tee_device_register() yet.
+ */
+ tee_device_unregister(optee->supp_teedev);
+ tee_device_unregister(optee->teedev);
+ if (optee->pool)
+ tee_shm_pool_free(optee->pool);
+ kfree(optee);
+ return rc;
+}
+
+static const struct ffa_device_id optee_ffa_device_id[] = {
+ /* 486178e0-e7f8-11e3-bc5e0002a5d5c51b */
+ { UUID_INIT(0x486178e0, 0xe7f8, 0x11e3,
+ 0xbc, 0x5e, 0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b) },
+ {}
+};
+
+static struct ffa_driver optee_ffa_driver = {
+ .name = "optee",
+ .probe = optee_ffa_probe,
+ .remove = optee_ffa_remove,
+ .id_table = optee_ffa_device_id,
+};
+
+int optee_ffa_abi_register(void)
+{
+ if (IS_REACHABLE(CONFIG_ARM_FFA_TRANSPORT))
+ return ffa_register(&optee_ffa_driver);
+ else
+ return -EOPNOTSUPP;
+}
+
+void optee_ffa_abi_unregister(void)
+{
+ if (IS_REACHABLE(CONFIG_ARM_FFA_TRANSPORT))
+ ffa_unregister(&optee_ffa_driver);
+}
diff --git a/drivers/tee/optee/optee_ffa.h b/drivers/tee/optee/optee_ffa.h
new file mode 100644
index 000000000000..ee3a03fc392c
--- /dev/null
+++ b/drivers/tee/optee/optee_ffa.h
@@ -0,0 +1,153 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright (c) 2019-2021, Linaro Limited
+ */
+
+/*
+ * This file is exported by OP-TEE and is kept in sync between secure world
+ * and normal world drivers. We're using ARM FF-A 1.0 specification.
+ */
+
+#ifndef __OPTEE_FFA_H
+#define __OPTEE_FFA_H
+
+#include <linux/arm_ffa.h>
+
+/*
+ * Normal world sends requests with FFA_MSG_SEND_DIRECT_REQ and
+ * responses are returned with FFA_MSG_SEND_DIRECT_RESP for normal
+ * messages.
+ *
+ * All requests with FFA_MSG_SEND_DIRECT_REQ and FFA_MSG_SEND_DIRECT_RESP
+ * are using the AArch32 SMC calling convention with register usage as
+ * defined in FF-A specification:
+ * w0: Function ID (0x8400006F or 0x84000070)
+ * w1: Source/Destination IDs
+ * w2: Reserved (MBZ)
+ * w3-w7: Implementation defined, free to be used below
+ */
+
+#define OPTEE_FFA_VERSION_MAJOR 1
+#define OPTEE_FFA_VERSION_MINOR 0
+
+#define OPTEE_FFA_BLOCKING_CALL(id) (id)
+#define OPTEE_FFA_YIELDING_CALL_BIT 31
+#define OPTEE_FFA_YIELDING_CALL(id) ((id) | BIT(OPTEE_FFA_YIELDING_CALL_BIT))
+
+/*
+ * Returns the API version implemented, currently follows the FF-A version.
+ * Call register usage:
+ * w3: Service ID, OPTEE_FFA_GET_API_VERSION
+ * w4-w7: Not used (MBZ)
+ *
+ * Return register usage:
+ * w3: OPTEE_FFA_VERSION_MAJOR
+ * w4: OPTEE_FFA_VERSION_MINOR
+ * w5-w7: Not used (MBZ)
+ */
+#define OPTEE_FFA_GET_API_VERSION OPTEE_FFA_BLOCKING_CALL(0)
+
+/*
+ * Returns the revision of OP-TEE.
+ *
+ * Used by non-secure world to figure out which version of the Trusted OS
+ * is installed. Note that the returned revision is the revision of the
+ * Trusted OS, not of the API.
+ *
+ * Call register usage:
+ * w3: Service ID, OPTEE_FFA_GET_OS_VERSION
+ * w4-w7: Unused (MBZ)
+ *
+ * Return register usage:
+ * w3: CFG_OPTEE_REVISION_MAJOR
+ * w4: CFG_OPTEE_REVISION_MINOR
+ * w5: TEE_IMPL_GIT_SHA1 (or zero if not supported)
+ */
+#define OPTEE_FFA_GET_OS_VERSION OPTEE_FFA_BLOCKING_CALL(1)
+
+/*
+ * Exchange capabilities between normal world and secure world.
+ *
+ * Currently there are no defined capabilities. When features are added new
+ * capabilities may be added.
+ *
+ * Call register usage:
+ * w3: Service ID, OPTEE_FFA_EXCHANGE_CAPABILITIES
+ * w4-w7: Note used (MBZ)
+ *
+ * Return register usage:
+ * w3: Error code, 0 on success
+ * w4: Bit[7:0]: Number of parameters needed for RPC to be supplied
+ * as the second MSG arg struct for
+ * OPTEE_FFA_YIELDING_CALL_WITH_ARG.
+ * Bit[31:8]: Reserved (MBZ)
+ * w5-w7: Note used (MBZ)
+ */
+#define OPTEE_FFA_EXCHANGE_CAPABILITIES OPTEE_FFA_BLOCKING_CALL(2)
+
+/*
+ * Unregister shared memory
+ *
+ * Call register usage:
+ * w3: Service ID, OPTEE_FFA_YIELDING_CALL_UNREGISTER_SHM
+ * w4: Shared memory handle, lower bits
+ * w5: Shared memory handle, higher bits
+ * w6-w7: Not used (MBZ)
+ *
+ * Return register usage:
+ * w3: Error code, 0 on success
+ * w4-w7: Note used (MBZ)
+ */
+#define OPTEE_FFA_UNREGISTER_SHM OPTEE_FFA_BLOCKING_CALL(3)
+
+/*
+ * Call with struct optee_msg_arg as argument in the supplied shared memory
+ * with a zero internal offset and normal cached memory attributes.
+ * Register usage:
+ * w3: Service ID, OPTEE_FFA_YIELDING_CALL_WITH_ARG
+ * w4: Lower 32 bits of a 64-bit Shared memory handle
+ * w5: Upper 32 bits of a 64-bit Shared memory handle
+ * w6: Offset into shared memory pointing to a struct optee_msg_arg
+ * right after the parameters of this struct (at offset
+ * OPTEE_MSG_GET_ARG_SIZE(num_params) follows a struct optee_msg_arg
+ * for RPC, this struct has reserved space for the number of RPC
+ * parameters as returned by OPTEE_FFA_EXCHANGE_CAPABILITIES.
+ * w7: Not used (MBZ)
+ * Resume from RPC. Register usage:
+ * w3: Service ID, OPTEE_FFA_YIELDING_CALL_RESUME
+ * w4-w6: Not used (MBZ)
+ * w7: Resume info
+ *
+ * Normal return (yielding call is completed). Register usage:
+ * w3: Error code, 0 on success
+ * w4: OPTEE_FFA_YIELDING_CALL_RETURN_DONE
+ * w5-w7: Not used (MBZ)
+ *
+ * RPC interrupt return (RPC from secure world). Register usage:
+ * w3: Error code == 0
+ * w4: Any defined RPC code but OPTEE_FFA_YIELDING_CALL_RETURN_DONE
+ * w5-w6: Not used (MBZ)
+ * w7: Resume info
+ *
+ * Possible error codes in register w3:
+ * 0: Success
+ * FFA_DENIED: w4 isn't one of OPTEE_FFA_YIELDING_CALL_START
+ * OPTEE_FFA_YIELDING_CALL_RESUME
+ *
+ * Possible error codes for OPTEE_FFA_YIELDING_CALL_START,
+ * FFA_BUSY: Number of OP-TEE OS threads exceeded,
+ * try again later
+ * FFA_DENIED: RPC shared memory object not found
+ * FFA_INVALID_PARAMETER: Bad shared memory handle or offset into the memory
+ *
+ * Possible error codes for OPTEE_FFA_YIELDING_CALL_RESUME
+ * FFA_INVALID_PARAMETER: Bad resume info
+ */
+#define OPTEE_FFA_YIELDING_CALL_WITH_ARG OPTEE_FFA_YIELDING_CALL(0)
+#define OPTEE_FFA_YIELDING_CALL_RESUME OPTEE_FFA_YIELDING_CALL(1)
+
+#define OPTEE_FFA_YIELDING_CALL_RETURN_DONE 0
+#define OPTEE_FFA_YIELDING_CALL_RETURN_RPC_CMD 1
+#define OPTEE_FFA_YIELDING_CALL_RETURN_INTERRUPT 2
+
+#endif /*__OPTEE_FFA_H*/
diff --git a/drivers/tee/optee/optee_msg.h b/drivers/tee/optee/optee_msg.h
index e3d72d09c484..2422e185d400 100644
--- a/drivers/tee/optee/optee_msg.h
+++ b/drivers/tee/optee/optee_msg.h
@@ -28,6 +28,9 @@
#define OPTEE_MSG_ATTR_TYPE_RMEM_INPUT 0x5
#define OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT 0x6
#define OPTEE_MSG_ATTR_TYPE_RMEM_INOUT 0x7
+#define OPTEE_MSG_ATTR_TYPE_FMEM_INPUT OPTEE_MSG_ATTR_TYPE_RMEM_INPUT
+#define OPTEE_MSG_ATTR_TYPE_FMEM_OUTPUT OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT
+#define OPTEE_MSG_ATTR_TYPE_FMEM_INOUT OPTEE_MSG_ATTR_TYPE_RMEM_INOUT
#define OPTEE_MSG_ATTR_TYPE_TMEM_INPUT 0x9
#define OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT 0xa
#define OPTEE_MSG_ATTR_TYPE_TMEM_INOUT 0xb
@@ -96,6 +99,8 @@
*/
#define OPTEE_MSG_NONCONTIG_PAGE_SIZE 4096
+#define OPTEE_MSG_FMEM_INVALID_GLOBAL_ID 0xffffffffffffffff
+
/**
* struct optee_msg_param_tmem - temporary memory reference parameter
* @buf_ptr: Address of the buffer
@@ -128,6 +133,23 @@ struct optee_msg_param_rmem {
};
/**
+ * struct optee_msg_param_fmem - ffa memory reference parameter
+ * @offs_lower: Lower bits of offset into shared memory reference
+ * @offs_upper: Upper bits of offset into shared memory reference
+ * @internal_offs: Internal offset into the first page of shared memory
+ * reference
+ * @size: Size of the buffer
+ * @global_id: Global identifier of Shared memory
+ */
+struct optee_msg_param_fmem {
+ u32 offs_low;
+ u16 offs_high;
+ u16 internal_offs;
+ u64 size;
+ u64 global_id;
+};
+
+/**
* struct optee_msg_param_value - opaque value parameter
*
* Value parameters are passed unchecked between normal and secure world.
@@ -143,13 +165,15 @@ struct optee_msg_param_value {
* @attr: attributes
* @tmem: parameter by temporary memory reference
* @rmem: parameter by registered memory reference
+ * @fmem: parameter by ffa registered memory reference
* @value: parameter by opaque value
* @octets: parameter by octet string
*
* @attr & OPTEE_MSG_ATTR_TYPE_MASK indicates if tmem, rmem or value is used in
* the union. OPTEE_MSG_ATTR_TYPE_VALUE_* indicates value or octets,
* OPTEE_MSG_ATTR_TYPE_TMEM_* indicates @tmem and
- * OPTEE_MSG_ATTR_TYPE_RMEM_* indicates @rmem,
+ * OPTEE_MSG_ATTR_TYPE_RMEM_* or the alias PTEE_MSG_ATTR_TYPE_FMEM_* indicates
+ * @rmem or @fmem depending on the conduit.
* OPTEE_MSG_ATTR_TYPE_NONE indicates that none of the members are used.
*/
struct optee_msg_param {
@@ -157,6 +181,7 @@ struct optee_msg_param {
union {
struct optee_msg_param_tmem tmem;
struct optee_msg_param_rmem rmem;
+ struct optee_msg_param_fmem fmem;
struct optee_msg_param_value value;
u8 octets[24];
} u;
diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h
index f6bb4a763ba9..6660e05298db 100644
--- a/drivers/tee/optee/optee_private.h
+++ b/drivers/tee/optee/optee_private.h
@@ -1,17 +1,20 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2015, Linaro Limited
+ * Copyright (c) 2015-2021, Linaro Limited
*/
#ifndef OPTEE_PRIVATE_H
#define OPTEE_PRIVATE_H
#include <linux/arm-smccc.h>
+#include <linux/rhashtable.h>
#include <linux/semaphore.h>
#include <linux/tee_drv.h>
#include <linux/types.h>
#include "optee_msg.h"
+#define DRIVER_NAME "optee"
+
#define OPTEE_MAX_ARG_SIZE 1024
/* Some Global Platform error codes used in this driver */
@@ -20,6 +23,7 @@
#define TEEC_ERROR_NOT_SUPPORTED 0xFFFF000A
#define TEEC_ERROR_COMMUNICATION 0xFFFF000E
#define TEEC_ERROR_OUT_OF_MEMORY 0xFFFF000C
+#define TEEC_ERROR_BUSY 0xFFFF000D
#define TEEC_ERROR_SHORT_BUFFER 0xFFFF0010
#define TEEC_ORIGIN_COMMS 0x00000002
@@ -29,6 +33,11 @@ typedef void (optee_invoke_fn)(unsigned long, unsigned long, unsigned long,
unsigned long, unsigned long,
struct arm_smccc_res *);
+struct optee_call_waiter {
+ struct list_head list_node;
+ struct completion c;
+};
+
struct optee_call_queue {
/* Serializes access to this struct */
struct mutex mutex;
@@ -66,19 +75,65 @@ struct optee_supp {
struct completion reqs_c;
};
+struct optee_smc {
+ optee_invoke_fn *invoke_fn;
+ void *memremaped_shm;
+ u32 sec_caps;
+};
+
+/**
+ * struct optee_ffa_data - FFA communication struct
+ * @ffa_dev FFA device, contains the destination id, the id of
+ * OP-TEE in secure world
+ * @ffa_ops FFA operations
+ * @mutex Serializes access to @global_ids
+ * @global_ids FF-A shared memory global handle translation
+ */
+struct optee_ffa {
+ struct ffa_device *ffa_dev;
+ const struct ffa_dev_ops *ffa_ops;
+ /* Serializes access to @global_ids */
+ struct mutex mutex;
+ struct rhashtable global_ids;
+};
+
+struct optee;
+
+/**
+ * struct optee_ops - OP-TEE driver internal operations
+ * @do_call_with_arg: enters OP-TEE in secure world
+ * @to_msg_param: converts from struct tee_param to OPTEE_MSG parameters
+ * @from_msg_param: converts from OPTEE_MSG parameters to struct tee_param
+ *
+ * These OPs are only supposed to be used internally in the OP-TEE driver
+ * as a way of abstracting the different methogs of entering OP-TEE in
+ * secure world.
+ */
+struct optee_ops {
+ int (*do_call_with_arg)(struct tee_context *ctx,
+ struct tee_shm *shm_arg);
+ int (*to_msg_param)(struct optee *optee,
+ struct optee_msg_param *msg_params,
+ size_t num_params, const struct tee_param *params);
+ int (*from_msg_param)(struct optee *optee, struct tee_param *params,
+ size_t num_params,
+ const struct optee_msg_param *msg_params);
+};
+
/**
* struct optee - main service struct
* @supp_teedev: supplicant device
+ * @ops: internal callbacks for different ways to reach secure
+ * world
* @teedev: client device
- * @invoke_fn: function to issue smc or hvc
+ * @smc: specific to SMC ABI
+ * @ffa: specific to FF-A ABI
* @call_queue: queue of threads waiting to call @invoke_fn
* @wait_queue: queue of threads from secure world waiting for a
* secure world sync object
* @supp: supplicant synchronization struct for RPC to supplicant
* @pool: shared memory pool
- * @memremaped_shm virtual address of memory in shared memory pool
- * @sec_caps: secure world capabilities defined by
- * OPTEE_SMC_SEC_CAP_* in optee_smc.h
+ * @rpc_arg_count: If > 0 number of RPC parameters to make room for
* @scan_bus_done flag if device registation was already done.
* @scan_bus_wq workqueue to scan optee bus and register optee drivers
* @scan_bus_work workq to scan optee bus and register optee drivers
@@ -86,13 +141,16 @@ struct optee_supp {
struct optee {
struct tee_device *supp_teedev;
struct tee_device *teedev;
- optee_invoke_fn *invoke_fn;
+ const struct optee_ops *ops;
+ union {
+ struct optee_smc smc;
+ struct optee_ffa ffa;
+ };
struct optee_call_queue call_queue;
struct optee_wait_queue wait_queue;
struct optee_supp supp;
struct tee_shm_pool *pool;
- void *memremaped_shm;
- u32 sec_caps;
+ unsigned int rpc_arg_count;
bool scan_bus_done;
struct workqueue_struct *scan_bus_wq;
struct work_struct scan_bus_work;
@@ -127,10 +185,6 @@ struct optee_call_ctx {
size_t num_entries;
};
-void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param,
- struct optee_call_ctx *call_ctx);
-void optee_rpc_finalize_call(struct optee_call_ctx *call_ctx);
-
void optee_wait_queue_init(struct optee_wait_queue *wq);
void optee_wait_queue_exit(struct optee_wait_queue *wq);
@@ -148,43 +202,68 @@ int optee_supp_recv(struct tee_context *ctx, u32 *func, u32 *num_params,
int optee_supp_send(struct tee_context *ctx, u32 ret, u32 num_params,
struct tee_param *param);
-u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg);
int optee_open_session(struct tee_context *ctx,
struct tee_ioctl_open_session_arg *arg,
struct tee_param *param);
+int optee_close_session_helper(struct tee_context *ctx, u32 session);
int optee_close_session(struct tee_context *ctx, u32 session);
int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg,
struct tee_param *param);
int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session);
-void optee_enable_shm_cache(struct optee *optee);
-void optee_disable_shm_cache(struct optee *optee);
-void optee_disable_unmapped_shm_cache(struct optee *optee);
+#define PTA_CMD_GET_DEVICES 0x0
+#define PTA_CMD_GET_DEVICES_SUPP 0x1
+int optee_enumerate_devices(u32 func);
+void optee_unregister_devices(void);
-int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm,
- struct page **pages, size_t num_pages,
- unsigned long start);
-int optee_shm_unregister(struct tee_context *ctx, struct tee_shm *shm);
+int optee_pool_op_alloc_helper(struct tee_shm_pool_mgr *poolm,
+ struct tee_shm *shm, size_t size,
+ int (*shm_register)(struct tee_context *ctx,
+ struct tee_shm *shm,
+ struct page **pages,
+ size_t num_pages,
+ unsigned long start));
-int optee_shm_register_supp(struct tee_context *ctx, struct tee_shm *shm,
- struct page **pages, size_t num_pages,
- unsigned long start);
-int optee_shm_unregister_supp(struct tee_context *ctx, struct tee_shm *shm);
-int optee_from_msg_param(struct tee_param *params, size_t num_params,
- const struct optee_msg_param *msg_params);
-int optee_to_msg_param(struct optee_msg_param *msg_params, size_t num_params,
- const struct tee_param *params);
+void optee_remove_common(struct optee *optee);
+int optee_open(struct tee_context *ctx, bool cap_memref_null);
+void optee_release(struct tee_context *ctx);
+void optee_release_supp(struct tee_context *ctx);
-u64 *optee_allocate_pages_list(size_t num_entries);
-void optee_free_pages_list(void *array, size_t num_entries);
-void optee_fill_pages_list(u64 *dst, struct page **pages, int num_pages,
- size_t page_offset);
+static inline void optee_from_msg_param_value(struct tee_param *p, u32 attr,
+ const struct optee_msg_param *mp)
+{
+ p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT +
+ attr - OPTEE_MSG_ATTR_TYPE_VALUE_INPUT;
+ p->u.value.a = mp->u.value.a;
+ p->u.value.b = mp->u.value.b;
+ p->u.value.c = mp->u.value.c;
+}
-#define PTA_CMD_GET_DEVICES 0x0
-#define PTA_CMD_GET_DEVICES_SUPP 0x1
-int optee_enumerate_devices(u32 func);
-void optee_unregister_devices(void);
+static inline void optee_to_msg_param_value(struct optee_msg_param *mp,
+ const struct tee_param *p)
+{
+ mp->attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT + p->attr -
+ TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT;
+ mp->u.value.a = p->u.value.a;
+ mp->u.value.b = p->u.value.b;
+ mp->u.value.c = p->u.value.c;
+}
+
+void optee_cq_wait_init(struct optee_call_queue *cq,
+ struct optee_call_waiter *w);
+void optee_cq_wait_for_completion(struct optee_call_queue *cq,
+ struct optee_call_waiter *w);
+void optee_cq_wait_final(struct optee_call_queue *cq,
+ struct optee_call_waiter *w);
+int optee_check_mem_type(unsigned long start, size_t num_pages);
+struct tee_shm *optee_get_msg_arg(struct tee_context *ctx, size_t num_params,
+ struct optee_msg_arg **msg_arg);
+
+struct tee_shm *optee_rpc_cmd_alloc_suppl(struct tee_context *ctx, size_t sz);
+void optee_rpc_cmd_free_suppl(struct tee_context *ctx, struct tee_shm *shm);
+void optee_rpc_cmd(struct tee_context *ctx, struct optee *optee,
+ struct optee_msg_arg *arg);
/*
* Small helpers
@@ -201,4 +280,10 @@ static inline void reg_pair_from_64(u32 *reg0, u32 *reg1, u64 val)
*reg1 = val;
}
+/* Registration of the ABIs */
+int optee_smc_abi_register(void);
+void optee_smc_abi_unregister(void);
+int optee_ffa_abi_register(void);
+void optee_ffa_abi_unregister(void);
+
#endif /*OPTEE_PRIVATE_H*/
diff --git a/drivers/tee/optee/rpc.c b/drivers/tee/optee/rpc.c
index efbaff7ad7e5..cd642e340eaf 100644
--- a/drivers/tee/optee/rpc.c
+++ b/drivers/tee/optee/rpc.c
@@ -1,17 +1,15 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2015-2016, Linaro Limited
+ * Copyright (c) 2015-2021, Linaro Limited
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/delay.h>
-#include <linux/device.h>
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/tee_drv.h>
#include "optee_private.h"
-#include "optee_smc.h"
#include "optee_rpc_cmd.h"
struct wq_entry {
@@ -55,6 +53,7 @@ bad:
static void handle_rpc_func_cmd_i2c_transfer(struct tee_context *ctx,
struct optee_msg_arg *arg)
{
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
struct tee_param *params;
struct i2c_adapter *adapter;
struct i2c_msg msg = { };
@@ -79,7 +78,8 @@ static void handle_rpc_func_cmd_i2c_transfer(struct tee_context *ctx,
return;
}
- if (optee_from_msg_param(params, arg->num_params, arg->params))
+ if (optee->ops->from_msg_param(optee, params, arg->num_params,
+ arg->params))
goto bad;
for (i = 0; i < arg->num_params; i++) {
@@ -122,7 +122,8 @@ static void handle_rpc_func_cmd_i2c_transfer(struct tee_context *ctx,
arg->ret = TEEC_ERROR_COMMUNICATION;
} else {
params[3].u.value.a = msg.len;
- if (optee_to_msg_param(arg->params, arg->num_params, params))
+ if (optee->ops->to_msg_param(optee, arg->params,
+ arg->num_params, params))
arg->ret = TEEC_ERROR_BAD_PARAMETERS;
else
arg->ret = TEEC_SUCCESS;
@@ -234,7 +235,7 @@ bad:
arg->ret = TEEC_ERROR_BAD_PARAMETERS;
}
-static void handle_rpc_supp_cmd(struct tee_context *ctx,
+static void handle_rpc_supp_cmd(struct tee_context *ctx, struct optee *optee,
struct optee_msg_arg *arg)
{
struct tee_param *params;
@@ -248,20 +249,22 @@ static void handle_rpc_supp_cmd(struct tee_context *ctx,
return;
}
- if (optee_from_msg_param(params, arg->num_params, arg->params)) {
+ if (optee->ops->from_msg_param(optee, params, arg->num_params,
+ arg->params)) {
arg->ret = TEEC_ERROR_BAD_PARAMETERS;
goto out;
}
arg->ret = optee_supp_thrd_req(ctx, arg->cmd, arg->num_params, params);
- if (optee_to_msg_param(arg->params, arg->num_params, params))
+ if (optee->ops->to_msg_param(optee, arg->params, arg->num_params,
+ params))
arg->ret = TEEC_ERROR_BAD_PARAMETERS;
out:
kfree(params);
}
-static struct tee_shm *cmd_alloc_suppl(struct tee_context *ctx, size_t sz)
+struct tee_shm *optee_rpc_cmd_alloc_suppl(struct tee_context *ctx, size_t sz)
{
u32 ret;
struct tee_param param;
@@ -284,103 +287,7 @@ static struct tee_shm *cmd_alloc_suppl(struct tee_context *ctx, size_t sz)
return shm;
}
-static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
- struct optee_msg_arg *arg,
- struct optee_call_ctx *call_ctx)
-{
- phys_addr_t pa;
- struct tee_shm *shm;
- size_t sz;
- size_t n;
-
- arg->ret_origin = TEEC_ORIGIN_COMMS;
-
- if (!arg->num_params ||
- arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) {
- arg->ret = TEEC_ERROR_BAD_PARAMETERS;
- return;
- }
-
- for (n = 1; n < arg->num_params; n++) {
- if (arg->params[n].attr != OPTEE_MSG_ATTR_TYPE_NONE) {
- arg->ret = TEEC_ERROR_BAD_PARAMETERS;
- return;
- }
- }
-
- sz = arg->params[0].u.value.b;
- switch (arg->params[0].u.value.a) {
- case OPTEE_RPC_SHM_TYPE_APPL:
- shm = cmd_alloc_suppl(ctx, sz);
- break;
- case OPTEE_RPC_SHM_TYPE_KERNEL:
- shm = tee_shm_alloc(ctx, sz, TEE_SHM_MAPPED | TEE_SHM_PRIV);
- break;
- default:
- arg->ret = TEEC_ERROR_BAD_PARAMETERS;
- return;
- }
-
- if (IS_ERR(shm)) {
- arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
- return;
- }
-
- if (tee_shm_get_pa(shm, 0, &pa)) {
- arg->ret = TEEC_ERROR_BAD_PARAMETERS;
- goto bad;
- }
-
- sz = tee_shm_get_size(shm);
-
- if (tee_shm_is_registered(shm)) {
- struct page **pages;
- u64 *pages_list;
- size_t page_num;
-
- pages = tee_shm_get_pages(shm, &page_num);
- if (!pages || !page_num) {
- arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
- goto bad;
- }
-
- pages_list = optee_allocate_pages_list(page_num);
- if (!pages_list) {
- arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
- goto bad;
- }
-
- call_ctx->pages_list = pages_list;
- call_ctx->num_entries = page_num;
-
- arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT |
- OPTEE_MSG_ATTR_NONCONTIG;
- /*
- * In the least bits of u.tmem.buf_ptr we store buffer offset
- * from 4k page, as described in OP-TEE ABI.
- */
- arg->params[0].u.tmem.buf_ptr = virt_to_phys(pages_list) |
- (tee_shm_get_page_offset(shm) &
- (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1));
- arg->params[0].u.tmem.size = tee_shm_get_size(shm);
- arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
-
- optee_fill_pages_list(pages_list, pages, page_num,
- tee_shm_get_page_offset(shm));
- } else {
- arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT;
- arg->params[0].u.tmem.buf_ptr = pa;
- arg->params[0].u.tmem.size = sz;
- arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
- }
-
- arg->ret = TEEC_SUCCESS;
- return;
-bad:
- tee_shm_free(shm);
-}
-
-static void cmd_free_suppl(struct tee_context *ctx, struct tee_shm *shm)
+void optee_rpc_cmd_free_suppl(struct tee_context *ctx, struct tee_shm *shm)
{
struct tee_param param;
@@ -405,60 +312,9 @@ static void cmd_free_suppl(struct tee_context *ctx, struct tee_shm *shm)
optee_supp_thrd_req(ctx, OPTEE_RPC_CMD_SHM_FREE, 1, &param);
}
-static void handle_rpc_func_cmd_shm_free(struct tee_context *ctx,
- struct optee_msg_arg *arg)
-{
- struct tee_shm *shm;
-
- arg->ret_origin = TEEC_ORIGIN_COMMS;
-
- if (arg->num_params != 1 ||
- arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) {
- arg->ret = TEEC_ERROR_BAD_PARAMETERS;
- return;
- }
-
- shm = (struct tee_shm *)(unsigned long)arg->params[0].u.value.b;
- switch (arg->params[0].u.value.a) {
- case OPTEE_RPC_SHM_TYPE_APPL:
- cmd_free_suppl(ctx, shm);
- break;
- case OPTEE_RPC_SHM_TYPE_KERNEL:
- tee_shm_free(shm);
- break;
- default:
- arg->ret = TEEC_ERROR_BAD_PARAMETERS;
- }
- arg->ret = TEEC_SUCCESS;
-}
-
-static void free_pages_list(struct optee_call_ctx *call_ctx)
-{
- if (call_ctx->pages_list) {
- optee_free_pages_list(call_ctx->pages_list,
- call_ctx->num_entries);
- call_ctx->pages_list = NULL;
- call_ctx->num_entries = 0;
- }
-}
-
-void optee_rpc_finalize_call(struct optee_call_ctx *call_ctx)
-{
- free_pages_list(call_ctx);
-}
-
-static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee,
- struct tee_shm *shm,
- struct optee_call_ctx *call_ctx)
+void optee_rpc_cmd(struct tee_context *ctx, struct optee *optee,
+ struct optee_msg_arg *arg)
{
- struct optee_msg_arg *arg;
-
- arg = tee_shm_get_va(shm, 0);
- if (IS_ERR(arg)) {
- pr_err("%s: tee_shm_get_va %p failed\n", __func__, shm);
- return;
- }
-
switch (arg->cmd) {
case OPTEE_RPC_CMD_GET_TIME:
handle_rpc_func_cmd_get_time(arg);
@@ -469,73 +325,12 @@ static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee,
case OPTEE_RPC_CMD_SUSPEND:
handle_rpc_func_cmd_wait(arg);
break;
- case OPTEE_RPC_CMD_SHM_ALLOC:
- free_pages_list(call_ctx);
- handle_rpc_func_cmd_shm_alloc(ctx, arg, call_ctx);
- break;
- case OPTEE_RPC_CMD_SHM_FREE:
- handle_rpc_func_cmd_shm_free(ctx, arg);
- break;
case OPTEE_RPC_CMD_I2C_TRANSFER:
handle_rpc_func_cmd_i2c_transfer(ctx, arg);
break;
default:
- handle_rpc_supp_cmd(ctx, arg);
+ handle_rpc_supp_cmd(ctx, optee, arg);
}
}
-/**
- * optee_handle_rpc() - handle RPC from secure world
- * @ctx: context doing the RPC
- * @param: value of registers for the RPC
- * @call_ctx: call context. Preserved during one OP-TEE invocation
- *
- * Result of RPC is written back into @param.
- */
-void optee_handle_rpc(struct tee_context *ctx, struct optee_rpc_param *param,
- struct optee_call_ctx *call_ctx)
-{
- struct tee_device *teedev = ctx->teedev;
- struct optee *optee = tee_get_drvdata(teedev);
- struct tee_shm *shm;
- phys_addr_t pa;
-
- switch (OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0)) {
- case OPTEE_SMC_RPC_FUNC_ALLOC:
- shm = tee_shm_alloc(ctx, param->a1,
- TEE_SHM_MAPPED | TEE_SHM_PRIV);
- if (!IS_ERR(shm) && !tee_shm_get_pa(shm, 0, &pa)) {
- reg_pair_from_64(&param->a1, &param->a2, pa);
- reg_pair_from_64(&param->a4, &param->a5,
- (unsigned long)shm);
- } else {
- param->a1 = 0;
- param->a2 = 0;
- param->a4 = 0;
- param->a5 = 0;
- }
- break;
- case OPTEE_SMC_RPC_FUNC_FREE:
- shm = reg_pair_to_ptr(param->a1, param->a2);
- tee_shm_free(shm);
- break;
- case OPTEE_SMC_RPC_FUNC_FOREIGN_INTR:
- /*
- * A foreign interrupt was raised while secure world was
- * executing, since they are handled in Linux a dummy RPC is
- * performed to let Linux take the interrupt through the normal
- * vector.
- */
- break;
- case OPTEE_SMC_RPC_FUNC_CMD:
- shm = reg_pair_to_ptr(param->a1, param->a2);
- handle_rpc_func_cmd(ctx, optee, shm, call_ctx);
- break;
- default:
- pr_warn("Unknown RPC func 0x%x\n",
- (u32)OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0));
- break;
- }
- param->a0 = OPTEE_SMC_CALL_RETURN_FROM_RPC;
-}
diff --git a/drivers/tee/optee/shm_pool.c b/drivers/tee/optee/shm_pool.c
deleted file mode 100644
index d167039af519..000000000000
--- a/drivers/tee/optee/shm_pool.c
+++ /dev/null
@@ -1,101 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2015, Linaro Limited
- * Copyright (c) 2017, EPAM Systems
- */
-#include <linux/device.h>
-#include <linux/dma-buf.h>
-#include <linux/genalloc.h>
-#include <linux/slab.h>
-#include <linux/tee_drv.h>
-#include "optee_private.h"
-#include "optee_smc.h"
-#include "shm_pool.h"
-
-static int pool_op_alloc(struct tee_shm_pool_mgr *poolm,
- struct tee_shm *shm, size_t size)
-{
- unsigned int order = get_order(size);
- struct page *page;
- int rc = 0;
-
- page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
- if (!page)
- return -ENOMEM;
-
- shm->kaddr = page_address(page);
- shm->paddr = page_to_phys(page);
- shm->size = PAGE_SIZE << order;
-
- /*
- * Shared memory private to the OP-TEE driver doesn't need
- * to be registered with OP-TEE.
- */
- if (!(shm->flags & TEE_SHM_PRIV)) {
- unsigned int nr_pages = 1 << order, i;
- struct page **pages;
-
- pages = kcalloc(nr_pages, sizeof(*pages), GFP_KERNEL);
- if (!pages) {
- rc = -ENOMEM;
- goto err;
- }
-
- for (i = 0; i < nr_pages; i++) {
- pages[i] = page;
- page++;
- }
-
- shm->flags |= TEE_SHM_REGISTER;
- rc = optee_shm_register(shm->ctx, shm, pages, nr_pages,
- (unsigned long)shm->kaddr);
- kfree(pages);
- if (rc)
- goto err;
- }
-
- return 0;
-
-err:
- __free_pages(page, order);
- return rc;
-}
-
-static void pool_op_free(struct tee_shm_pool_mgr *poolm,
- struct tee_shm *shm)
-{
- if (!(shm->flags & TEE_SHM_PRIV))
- optee_shm_unregister(shm->ctx, shm);
-
- free_pages((unsigned long)shm->kaddr, get_order(shm->size));
- shm->kaddr = NULL;
-}
-
-static void pool_op_destroy_poolmgr(struct tee_shm_pool_mgr *poolm)
-{
- kfree(poolm);
-}
-
-static const struct tee_shm_pool_mgr_ops pool_ops = {
- .alloc = pool_op_alloc,
- .free = pool_op_free,
- .destroy_poolmgr = pool_op_destroy_poolmgr,
-};
-
-/**
- * optee_shm_pool_alloc_pages() - create page-based allocator pool
- *
- * This pool is used when OP-TEE supports dymanic SHM. In this case
- * command buffers and such are allocated from kernel's own memory.
- */
-struct tee_shm_pool_mgr *optee_shm_pool_alloc_pages(void)
-{
- struct tee_shm_pool_mgr *mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
-
- if (!mgr)
- return ERR_PTR(-ENOMEM);
-
- mgr->ops = &pool_ops;
-
- return mgr;
-}
diff --git a/drivers/tee/optee/shm_pool.h b/drivers/tee/optee/shm_pool.h
deleted file mode 100644
index 28109d991c4b..000000000000
--- a/drivers/tee/optee/shm_pool.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2015, Linaro Limited
- * Copyright (c) 2016, EPAM Systems
- */
-
-#ifndef SHM_POOL_H
-#define SHM_POOL_H
-
-#include <linux/tee_drv.h>
-
-struct tee_shm_pool_mgr *optee_shm_pool_alloc_pages(void);
-
-#endif
diff --git a/drivers/tee/optee/smc_abi.c b/drivers/tee/optee/smc_abi.c
new file mode 100644
index 000000000000..6196d7c3888f
--- /dev/null
+++ b/drivers/tee/optee/smc_abi.c
@@ -0,0 +1,1362 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2015-2021, Linaro Limited
+ * Copyright (c) 2016, EPAM Systems
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/arm-smccc.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/tee_drv.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+#include "optee_private.h"
+#include "optee_smc.h"
+#include "optee_rpc_cmd.h"
+#define CREATE_TRACE_POINTS
+#include "optee_trace.h"
+
+/*
+ * This file implement the SMC ABI used when communicating with secure world
+ * OP-TEE OS via raw SMCs.
+ * This file is divided into the following sections:
+ * 1. Convert between struct tee_param and struct optee_msg_param
+ * 2. Low level support functions to register shared memory in secure world
+ * 3. Dynamic shared memory pool based on alloc_pages()
+ * 4. Do a normal scheduled call into secure world
+ * 5. Driver initialization.
+ */
+
+#define OPTEE_SHM_NUM_PRIV_PAGES CONFIG_OPTEE_SHM_NUM_PRIV_PAGES
+
+/*
+ * 1. Convert between struct tee_param and struct optee_msg_param
+ *
+ * optee_from_msg_param() and optee_to_msg_param() are the main
+ * functions.
+ */
+
+static int from_msg_param_tmp_mem(struct tee_param *p, u32 attr,
+ const struct optee_msg_param *mp)
+{
+ struct tee_shm *shm;
+ phys_addr_t pa;
+ int rc;
+
+ p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT +
+ attr - OPTEE_MSG_ATTR_TYPE_TMEM_INPUT;
+ p->u.memref.size = mp->u.tmem.size;
+ shm = (struct tee_shm *)(unsigned long)mp->u.tmem.shm_ref;
+ if (!shm) {
+ p->u.memref.shm_offs = 0;
+ p->u.memref.shm = NULL;
+ return 0;
+ }
+
+ rc = tee_shm_get_pa(shm, 0, &pa);
+ if (rc)
+ return rc;
+
+ p->u.memref.shm_offs = mp->u.tmem.buf_ptr - pa;
+ p->u.memref.shm = shm;
+
+ /* Check that the memref is covered by the shm object */
+ if (p->u.memref.size) {
+ size_t o = p->u.memref.shm_offs +
+ p->u.memref.size - 1;
+
+ rc = tee_shm_get_pa(shm, o, NULL);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+static void from_msg_param_reg_mem(struct tee_param *p, u32 attr,
+ const struct optee_msg_param *mp)
+{
+ struct tee_shm *shm;
+
+ p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT +
+ attr - OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
+ p->u.memref.size = mp->u.rmem.size;
+ shm = (struct tee_shm *)(unsigned long)mp->u.rmem.shm_ref;
+
+ if (shm) {
+ p->u.memref.shm_offs = mp->u.rmem.offs;
+ p->u.memref.shm = shm;
+ } else {
+ p->u.memref.shm_offs = 0;
+ p->u.memref.shm = NULL;
+ }
+}
+
+/**
+ * optee_from_msg_param() - convert from OPTEE_MSG parameters to
+ * struct tee_param
+ * @optee: main service struct
+ * @params: subsystem internal parameter representation
+ * @num_params: number of elements in the parameter arrays
+ * @msg_params: OPTEE_MSG parameters
+ * Returns 0 on success or <0 on failure
+ */
+static int optee_from_msg_param(struct optee *optee, struct tee_param *params,
+ size_t num_params,
+ const struct optee_msg_param *msg_params)
+{
+ int rc;
+ size_t n;
+
+ for (n = 0; n < num_params; n++) {
+ struct tee_param *p = params + n;
+ const struct optee_msg_param *mp = msg_params + n;
+ u32 attr = mp->attr & OPTEE_MSG_ATTR_TYPE_MASK;
+
+ switch (attr) {
+ case OPTEE_MSG_ATTR_TYPE_NONE:
+ p->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE;
+ memset(&p->u, 0, sizeof(p->u));
+ break;
+ case OPTEE_MSG_ATTR_TYPE_VALUE_INPUT:
+ case OPTEE_MSG_ATTR_TYPE_VALUE_OUTPUT:
+ case OPTEE_MSG_ATTR_TYPE_VALUE_INOUT:
+ optee_from_msg_param_value(p, attr, mp);
+ break;
+ case OPTEE_MSG_ATTR_TYPE_TMEM_INPUT:
+ case OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT:
+ case OPTEE_MSG_ATTR_TYPE_TMEM_INOUT:
+ rc = from_msg_param_tmp_mem(p, attr, mp);
+ if (rc)
+ return rc;
+ break;
+ case OPTEE_MSG_ATTR_TYPE_RMEM_INPUT:
+ case OPTEE_MSG_ATTR_TYPE_RMEM_OUTPUT:
+ case OPTEE_MSG_ATTR_TYPE_RMEM_INOUT:
+ from_msg_param_reg_mem(p, attr, mp);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static int to_msg_param_tmp_mem(struct optee_msg_param *mp,
+ const struct tee_param *p)
+{
+ int rc;
+ phys_addr_t pa;
+
+ mp->attr = OPTEE_MSG_ATTR_TYPE_TMEM_INPUT + p->attr -
+ TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
+
+ mp->u.tmem.shm_ref = (unsigned long)p->u.memref.shm;
+ mp->u.tmem.size = p->u.memref.size;
+
+ if (!p->u.memref.shm) {
+ mp->u.tmem.buf_ptr = 0;
+ return 0;
+ }
+
+ rc = tee_shm_get_pa(p->u.memref.shm, p->u.memref.shm_offs, &pa);
+ if (rc)
+ return rc;
+
+ mp->u.tmem.buf_ptr = pa;
+ mp->attr |= OPTEE_MSG_ATTR_CACHE_PREDEFINED <<
+ OPTEE_MSG_ATTR_CACHE_SHIFT;
+
+ return 0;
+}
+
+static int to_msg_param_reg_mem(struct optee_msg_param *mp,
+ const struct tee_param *p)
+{
+ mp->attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT + p->attr -
+ TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT;
+
+ mp->u.rmem.shm_ref = (unsigned long)p->u.memref.shm;
+ mp->u.rmem.size = p->u.memref.size;
+ mp->u.rmem.offs = p->u.memref.shm_offs;
+ return 0;
+}
+
+/**
+ * optee_to_msg_param() - convert from struct tee_params to OPTEE_MSG parameters
+ * @optee: main service struct
+ * @msg_params: OPTEE_MSG parameters
+ * @num_params: number of elements in the parameter arrays
+ * @params: subsystem itnernal parameter representation
+ * Returns 0 on success or <0 on failure
+ */
+static int optee_to_msg_param(struct optee *optee,
+ struct optee_msg_param *msg_params,
+ size_t num_params, const struct tee_param *params)
+{
+ int rc;
+ size_t n;
+
+ for (n = 0; n < num_params; n++) {
+ const struct tee_param *p = params + n;
+ struct optee_msg_param *mp = msg_params + n;
+
+ switch (p->attr) {
+ case TEE_IOCTL_PARAM_ATTR_TYPE_NONE:
+ mp->attr = TEE_IOCTL_PARAM_ATTR_TYPE_NONE;
+ memset(&mp->u, 0, sizeof(mp->u));
+ break;
+ case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT:
+ optee_to_msg_param_value(mp, p);
+ break;
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT:
+ case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT:
+ if (tee_shm_is_registered(p->u.memref.shm))
+ rc = to_msg_param_reg_mem(mp, p);
+ else
+ rc = to_msg_param_tmp_mem(mp, p);
+ if (rc)
+ return rc;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+/*
+ * 2. Low level support functions to register shared memory in secure world
+ *
+ * Functions to enable/disable shared memory caching in secure world, that
+ * is, lazy freeing of previously allocated shared memory. Freeing is
+ * performed when a request has been compled.
+ *
+ * Functions to register and unregister shared memory both for normal
+ * clients and for tee-supplicant.
+ */
+
+/**
+ * optee_enable_shm_cache() - Enables caching of some shared memory allocation
+ * in OP-TEE
+ * @optee: main service struct
+ */
+static void optee_enable_shm_cache(struct optee *optee)
+{
+ struct optee_call_waiter w;
+
+ /* We need to retry until secure world isn't busy. */
+ optee_cq_wait_init(&optee->call_queue, &w);
+ while (true) {
+ struct arm_smccc_res res;
+
+ optee->smc.invoke_fn(OPTEE_SMC_ENABLE_SHM_CACHE,
+ 0, 0, 0, 0, 0, 0, 0, &res);
+ if (res.a0 == OPTEE_SMC_RETURN_OK)
+ break;
+ optee_cq_wait_for_completion(&optee->call_queue, &w);
+ }
+ optee_cq_wait_final(&optee->call_queue, &w);
+}
+
+/**
+ * __optee_disable_shm_cache() - Disables caching of some shared memory
+ * allocation in OP-TEE
+ * @optee: main service struct
+ * @is_mapped: true if the cached shared memory addresses were mapped by this
+ * kernel, are safe to dereference, and should be freed
+ */
+static void __optee_disable_shm_cache(struct optee *optee, bool is_mapped)
+{
+ struct optee_call_waiter w;
+
+ /* We need to retry until secure world isn't busy. */
+ optee_cq_wait_init(&optee->call_queue, &w);
+ while (true) {
+ union {
+ struct arm_smccc_res smccc;
+ struct optee_smc_disable_shm_cache_result result;
+ } res;
+
+ optee->smc.invoke_fn(OPTEE_SMC_DISABLE_SHM_CACHE,
+ 0, 0, 0, 0, 0, 0, 0, &res.smccc);
+ if (res.result.status == OPTEE_SMC_RETURN_ENOTAVAIL)
+ break; /* All shm's freed */
+ if (res.result.status == OPTEE_SMC_RETURN_OK) {
+ struct tee_shm *shm;
+
+ /*
+ * Shared memory references that were not mapped by
+ * this kernel must be ignored to prevent a crash.
+ */
+ if (!is_mapped)
+ continue;
+
+ shm = reg_pair_to_ptr(res.result.shm_upper32,
+ res.result.shm_lower32);
+ tee_shm_free(shm);
+ } else {
+ optee_cq_wait_for_completion(&optee->call_queue, &w);
+ }
+ }
+ optee_cq_wait_final(&optee->call_queue, &w);
+}
+
+/**
+ * optee_disable_shm_cache() - Disables caching of mapped shared memory
+ * allocations in OP-TEE
+ * @optee: main service struct
+ */
+static void optee_disable_shm_cache(struct optee *optee)
+{
+ return __optee_disable_shm_cache(optee, true);
+}
+
+/**
+ * optee_disable_unmapped_shm_cache() - Disables caching of shared memory
+ * allocations in OP-TEE which are not
+ * currently mapped
+ * @optee: main service struct
+ */
+static void optee_disable_unmapped_shm_cache(struct optee *optee)
+{
+ return __optee_disable_shm_cache(optee, false);
+}
+
+#define PAGELIST_ENTRIES_PER_PAGE \
+ ((OPTEE_MSG_NONCONTIG_PAGE_SIZE / sizeof(u64)) - 1)
+
+/*
+ * The final entry in each pagelist page is a pointer to the next
+ * pagelist page.
+ */
+static size_t get_pages_list_size(size_t num_entries)
+{
+ int pages = DIV_ROUND_UP(num_entries, PAGELIST_ENTRIES_PER_PAGE);
+
+ return pages * OPTEE_MSG_NONCONTIG_PAGE_SIZE;
+}
+
+static u64 *optee_allocate_pages_list(size_t num_entries)
+{
+ return alloc_pages_exact(get_pages_list_size(num_entries), GFP_KERNEL);
+}
+
+static void optee_free_pages_list(void *list, size_t num_entries)
+{
+ free_pages_exact(list, get_pages_list_size(num_entries));
+}
+
+/**
+ * optee_fill_pages_list() - write list of user pages to given shared
+ * buffer.
+ *
+ * @dst: page-aligned buffer where list of pages will be stored
+ * @pages: array of pages that represents shared buffer
+ * @num_pages: number of entries in @pages
+ * @page_offset: offset of user buffer from page start
+ *
+ * @dst should be big enough to hold list of user page addresses and
+ * links to the next pages of buffer
+ */
+static void optee_fill_pages_list(u64 *dst, struct page **pages, int num_pages,
+ size_t page_offset)
+{
+ int n = 0;
+ phys_addr_t optee_page;
+ /*
+ * Refer to OPTEE_MSG_ATTR_NONCONTIG description in optee_msg.h
+ * for details.
+ */
+ struct {
+ u64 pages_list[PAGELIST_ENTRIES_PER_PAGE];
+ u64 next_page_data;
+ } *pages_data;
+
+ /*
+ * Currently OP-TEE uses 4k page size and it does not looks
+ * like this will change in the future. On other hand, there are
+ * no know ARM architectures with page size < 4k.
+ * Thus the next built assert looks redundant. But the following
+ * code heavily relies on this assumption, so it is better be
+ * safe than sorry.
+ */
+ BUILD_BUG_ON(PAGE_SIZE < OPTEE_MSG_NONCONTIG_PAGE_SIZE);
+
+ pages_data = (void *)dst;
+ /*
+ * If linux page is bigger than 4k, and user buffer offset is
+ * larger than 4k/8k/12k/etc this will skip first 4k pages,
+ * because they bear no value data for OP-TEE.
+ */
+ optee_page = page_to_phys(*pages) +
+ round_down(page_offset, OPTEE_MSG_NONCONTIG_PAGE_SIZE);
+
+ while (true) {
+ pages_data->pages_list[n++] = optee_page;
+
+ if (n == PAGELIST_ENTRIES_PER_PAGE) {
+ pages_data->next_page_data =
+ virt_to_phys(pages_data + 1);
+ pages_data++;
+ n = 0;
+ }
+
+ optee_page += OPTEE_MSG_NONCONTIG_PAGE_SIZE;
+ if (!(optee_page & ~PAGE_MASK)) {
+ if (!--num_pages)
+ break;
+ pages++;
+ optee_page = page_to_phys(*pages);
+ }
+ }
+}
+
+static int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm,
+ struct page **pages, size_t num_pages,
+ unsigned long start)
+{
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
+ struct optee_msg_arg *msg_arg;
+ struct tee_shm *shm_arg;
+ u64 *pages_list;
+ int rc;
+
+ if (!num_pages)
+ return -EINVAL;
+
+ rc = optee_check_mem_type(start, num_pages);
+ if (rc)
+ return rc;
+
+ pages_list = optee_allocate_pages_list(num_pages);
+ if (!pages_list)
+ return -ENOMEM;
+
+ shm_arg = optee_get_msg_arg(ctx, 1, &msg_arg);
+ if (IS_ERR(shm_arg)) {
+ rc = PTR_ERR(shm_arg);
+ goto out;
+ }
+
+ optee_fill_pages_list(pages_list, pages, num_pages,
+ tee_shm_get_page_offset(shm));
+
+ msg_arg->cmd = OPTEE_MSG_CMD_REGISTER_SHM;
+ msg_arg->params->attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT |
+ OPTEE_MSG_ATTR_NONCONTIG;
+ msg_arg->params->u.tmem.shm_ref = (unsigned long)shm;
+ msg_arg->params->u.tmem.size = tee_shm_get_size(shm);
+ /*
+ * In the least bits of msg_arg->params->u.tmem.buf_ptr we
+ * store buffer offset from 4k page, as described in OP-TEE ABI.
+ */
+ msg_arg->params->u.tmem.buf_ptr = virt_to_phys(pages_list) |
+ (tee_shm_get_page_offset(shm) & (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1));
+
+ if (optee->ops->do_call_with_arg(ctx, shm_arg) ||
+ msg_arg->ret != TEEC_SUCCESS)
+ rc = -EINVAL;
+
+ tee_shm_free(shm_arg);
+out:
+ optee_free_pages_list(pages_list, num_pages);
+ return rc;
+}
+
+static int optee_shm_unregister(struct tee_context *ctx, struct tee_shm *shm)
+{
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
+ struct optee_msg_arg *msg_arg;
+ struct tee_shm *shm_arg;
+ int rc = 0;
+
+ shm_arg = optee_get_msg_arg(ctx, 1, &msg_arg);
+ if (IS_ERR(shm_arg))
+ return PTR_ERR(shm_arg);
+
+ msg_arg->cmd = OPTEE_MSG_CMD_UNREGISTER_SHM;
+
+ msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
+ msg_arg->params[0].u.rmem.shm_ref = (unsigned long)shm;
+
+ if (optee->ops->do_call_with_arg(ctx, shm_arg) ||
+ msg_arg->ret != TEEC_SUCCESS)
+ rc = -EINVAL;
+ tee_shm_free(shm_arg);
+ return rc;
+}
+
+static int optee_shm_register_supp(struct tee_context *ctx, struct tee_shm *shm,
+ struct page **pages, size_t num_pages,
+ unsigned long start)
+{
+ /*
+ * We don't want to register supplicant memory in OP-TEE.
+ * Instead information about it will be passed in RPC code.
+ */
+ return optee_check_mem_type(start, num_pages);
+}
+
+static int optee_shm_unregister_supp(struct tee_context *ctx,
+ struct tee_shm *shm)
+{
+ return 0;
+}
+
+/*
+ * 3. Dynamic shared memory pool based on alloc_pages()
+ *
+ * Implements an OP-TEE specific shared memory pool which is used
+ * when dynamic shared memory is supported by secure world.
+ *
+ * The main function is optee_shm_pool_alloc_pages().
+ */
+
+static int pool_op_alloc(struct tee_shm_pool_mgr *poolm,
+ struct tee_shm *shm, size_t size)
+{
+ /*
+ * Shared memory private to the OP-TEE driver doesn't need
+ * to be registered with OP-TEE.
+ */
+ if (shm->flags & TEE_SHM_PRIV)
+ return optee_pool_op_alloc_helper(poolm, shm, size, NULL);
+
+ return optee_pool_op_alloc_helper(poolm, shm, size, optee_shm_register);
+}
+
+static void pool_op_free(struct tee_shm_pool_mgr *poolm,
+ struct tee_shm *shm)
+{
+ if (!(shm->flags & TEE_SHM_PRIV))
+ optee_shm_unregister(shm->ctx, shm);
+
+ free_pages((unsigned long)shm->kaddr, get_order(shm->size));
+ shm->kaddr = NULL;
+}
+
+static void pool_op_destroy_poolmgr(struct tee_shm_pool_mgr *poolm)
+{
+ kfree(poolm);
+}
+
+static const struct tee_shm_pool_mgr_ops pool_ops = {
+ .alloc = pool_op_alloc,
+ .free = pool_op_free,
+ .destroy_poolmgr = pool_op_destroy_poolmgr,
+};
+
+/**
+ * optee_shm_pool_alloc_pages() - create page-based allocator pool
+ *
+ * This pool is used when OP-TEE supports dymanic SHM. In this case
+ * command buffers and such are allocated from kernel's own memory.
+ */
+static struct tee_shm_pool_mgr *optee_shm_pool_alloc_pages(void)
+{
+ struct tee_shm_pool_mgr *mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
+
+ if (!mgr)
+ return ERR_PTR(-ENOMEM);
+
+ mgr->ops = &pool_ops;
+
+ return mgr;
+}
+
+/*
+ * 4. Do a normal scheduled call into secure world
+ *
+ * The function optee_smc_do_call_with_arg() performs a normal scheduled
+ * call into secure world. During this call may normal world request help
+ * from normal world using RPCs, Remote Procedure Calls. This includes
+ * delivery of non-secure interrupts to for instance allow rescheduling of
+ * the current task.
+ */
+
+static void handle_rpc_func_cmd_shm_free(struct tee_context *ctx,
+ struct optee_msg_arg *arg)
+{
+ struct tee_shm *shm;
+
+ arg->ret_origin = TEEC_ORIGIN_COMMS;
+
+ if (arg->num_params != 1 ||
+ arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) {
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ return;
+ }
+
+ shm = (struct tee_shm *)(unsigned long)arg->params[0].u.value.b;
+ switch (arg->params[0].u.value.a) {
+ case OPTEE_RPC_SHM_TYPE_APPL:
+ optee_rpc_cmd_free_suppl(ctx, shm);
+ break;
+ case OPTEE_RPC_SHM_TYPE_KERNEL:
+ tee_shm_free(shm);
+ break;
+ default:
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ }
+ arg->ret = TEEC_SUCCESS;
+}
+
+static void handle_rpc_func_cmd_shm_alloc(struct tee_context *ctx,
+ struct optee_msg_arg *arg,
+ struct optee_call_ctx *call_ctx)
+{
+ phys_addr_t pa;
+ struct tee_shm *shm;
+ size_t sz;
+ size_t n;
+
+ arg->ret_origin = TEEC_ORIGIN_COMMS;
+
+ if (!arg->num_params ||
+ arg->params[0].attr != OPTEE_MSG_ATTR_TYPE_VALUE_INPUT) {
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ return;
+ }
+
+ for (n = 1; n < arg->num_params; n++) {
+ if (arg->params[n].attr != OPTEE_MSG_ATTR_TYPE_NONE) {
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ return;
+ }
+ }
+
+ sz = arg->params[0].u.value.b;
+ switch (arg->params[0].u.value.a) {
+ case OPTEE_RPC_SHM_TYPE_APPL:
+ shm = optee_rpc_cmd_alloc_suppl(ctx, sz);
+ break;
+ case OPTEE_RPC_SHM_TYPE_KERNEL:
+ shm = tee_shm_alloc(ctx, sz, TEE_SHM_MAPPED | TEE_SHM_PRIV);
+ break;
+ default:
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ return;
+ }
+
+ if (IS_ERR(shm)) {
+ arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
+ return;
+ }
+
+ if (tee_shm_get_pa(shm, 0, &pa)) {
+ arg->ret = TEEC_ERROR_BAD_PARAMETERS;
+ goto bad;
+ }
+
+ sz = tee_shm_get_size(shm);
+
+ if (tee_shm_is_registered(shm)) {
+ struct page **pages;
+ u64 *pages_list;
+ size_t page_num;
+
+ pages = tee_shm_get_pages(shm, &page_num);
+ if (!pages || !page_num) {
+ arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
+ goto bad;
+ }
+
+ pages_list = optee_allocate_pages_list(page_num);
+ if (!pages_list) {
+ arg->ret = TEEC_ERROR_OUT_OF_MEMORY;
+ goto bad;
+ }
+
+ call_ctx->pages_list = pages_list;
+ call_ctx->num_entries = page_num;
+
+ arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT |
+ OPTEE_MSG_ATTR_NONCONTIG;
+ /*
+ * In the least bits of u.tmem.buf_ptr we store buffer offset
+ * from 4k page, as described in OP-TEE ABI.
+ */
+ arg->params[0].u.tmem.buf_ptr = virt_to_phys(pages_list) |
+ (tee_shm_get_page_offset(shm) &
+ (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1));
+ arg->params[0].u.tmem.size = tee_shm_get_size(shm);
+ arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
+
+ optee_fill_pages_list(pages_list, pages, page_num,
+ tee_shm_get_page_offset(shm));
+ } else {
+ arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT;
+ arg->params[0].u.tmem.buf_ptr = pa;
+ arg->params[0].u.tmem.size = sz;
+ arg->params[0].u.tmem.shm_ref = (unsigned long)shm;
+ }
+
+ arg->ret = TEEC_SUCCESS;
+ return;
+bad:
+ tee_shm_free(shm);
+}
+
+static void free_pages_list(struct optee_call_ctx *call_ctx)
+{
+ if (call_ctx->pages_list) {
+ optee_free_pages_list(call_ctx->pages_list,
+ call_ctx->num_entries);
+ call_ctx->pages_list = NULL;
+ call_ctx->num_entries = 0;
+ }
+}
+
+static void optee_rpc_finalize_call(struct optee_call_ctx *call_ctx)
+{
+ free_pages_list(call_ctx);
+}
+
+static void handle_rpc_func_cmd(struct tee_context *ctx, struct optee *optee,
+ struct tee_shm *shm,
+ struct optee_call_ctx *call_ctx)
+{
+ struct optee_msg_arg *arg;
+
+ arg = tee_shm_get_va(shm, 0);
+ if (IS_ERR(arg)) {
+ pr_err("%s: tee_shm_get_va %p failed\n", __func__, shm);
+ return;
+ }
+
+ switch (arg->cmd) {
+ case OPTEE_RPC_CMD_SHM_ALLOC:
+ free_pages_list(call_ctx);
+ handle_rpc_func_cmd_shm_alloc(ctx, arg, call_ctx);
+ break;
+ case OPTEE_RPC_CMD_SHM_FREE:
+ handle_rpc_func_cmd_shm_free(ctx, arg);
+ break;
+ default:
+ optee_rpc_cmd(ctx, optee, arg);
+ }
+}
+
+/**
+ * optee_handle_rpc() - handle RPC from secure world
+ * @ctx: context doing the RPC
+ * @param: value of registers for the RPC
+ * @call_ctx: call context. Preserved during one OP-TEE invocation
+ *
+ * Result of RPC is written back into @param.
+ */
+static void optee_handle_rpc(struct tee_context *ctx,
+ struct optee_rpc_param *param,
+ struct optee_call_ctx *call_ctx)
+{
+ struct tee_device *teedev = ctx->teedev;
+ struct optee *optee = tee_get_drvdata(teedev);
+ struct tee_shm *shm;
+ phys_addr_t pa;
+
+ switch (OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0)) {
+ case OPTEE_SMC_RPC_FUNC_ALLOC:
+ shm = tee_shm_alloc(ctx, param->a1,
+ TEE_SHM_MAPPED | TEE_SHM_PRIV);
+ if (!IS_ERR(shm) && !tee_shm_get_pa(shm, 0, &pa)) {
+ reg_pair_from_64(&param->a1, &param->a2, pa);
+ reg_pair_from_64(&param->a4, &param->a5,
+ (unsigned long)shm);
+ } else {
+ param->a1 = 0;
+ param->a2 = 0;
+ param->a4 = 0;
+ param->a5 = 0;
+ }
+ break;
+ case OPTEE_SMC_RPC_FUNC_FREE:
+ shm = reg_pair_to_ptr(param->a1, param->a2);
+ tee_shm_free(shm);
+ break;
+ case OPTEE_SMC_RPC_FUNC_FOREIGN_INTR:
+ /*
+ * A foreign interrupt was raised while secure world was
+ * executing, since they are handled in Linux a dummy RPC is
+ * performed to let Linux take the interrupt through the normal
+ * vector.
+ */
+ break;
+ case OPTEE_SMC_RPC_FUNC_CMD:
+ shm = reg_pair_to_ptr(param->a1, param->a2);
+ handle_rpc_func_cmd(ctx, optee, shm, call_ctx);
+ break;
+ default:
+ pr_warn("Unknown RPC func 0x%x\n",
+ (u32)OPTEE_SMC_RETURN_GET_RPC_FUNC(param->a0));
+ break;
+ }
+
+ param->a0 = OPTEE_SMC_CALL_RETURN_FROM_RPC;
+}
+
+/**
+ * optee_smc_do_call_with_arg() - Do an SMC to OP-TEE in secure world
+ * @ctx: calling context
+ * @arg: shared memory holding the message to pass to secure world
+ *
+ * Does and SMC to OP-TEE in secure world and handles eventual resulting
+ * Remote Procedure Calls (RPC) from OP-TEE.
+ *
+ * Returns return code from secure world, 0 is OK
+ */
+static int optee_smc_do_call_with_arg(struct tee_context *ctx,
+ struct tee_shm *arg)
+{
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
+ struct optee_call_waiter w;
+ struct optee_rpc_param param = { };
+ struct optee_call_ctx call_ctx = { };
+ phys_addr_t parg;
+ int rc;
+
+ rc = tee_shm_get_pa(arg, 0, &parg);
+ if (rc)
+ return rc;
+
+ param.a0 = OPTEE_SMC_CALL_WITH_ARG;
+ reg_pair_from_64(&param.a1, &param.a2, parg);
+ /* Initialize waiter */
+ optee_cq_wait_init(&optee->call_queue, &w);
+ while (true) {
+ struct arm_smccc_res res;
+
+ trace_optee_invoke_fn_begin(&param);
+ optee->smc.invoke_fn(param.a0, param.a1, param.a2, param.a3,
+ param.a4, param.a5, param.a6, param.a7,
+ &res);
+ trace_optee_invoke_fn_end(&param, &res);
+
+ if (res.a0 == OPTEE_SMC_RETURN_ETHREAD_LIMIT) {
+ /*
+ * Out of threads in secure world, wait for a thread
+ * become available.
+ */
+ optee_cq_wait_for_completion(&optee->call_queue, &w);
+ } else if (OPTEE_SMC_RETURN_IS_RPC(res.a0)) {
+ cond_resched();
+ param.a0 = res.a0;
+ param.a1 = res.a1;
+ param.a2 = res.a2;
+ param.a3 = res.a3;
+ optee_handle_rpc(ctx, &param, &call_ctx);
+ } else {
+ rc = res.a0;
+ break;
+ }
+ }
+
+ optee_rpc_finalize_call(&call_ctx);
+ /*
+ * We're done with our thread in secure world, if there's any
+ * thread waiters wake up one.
+ */
+ optee_cq_wait_final(&optee->call_queue, &w);
+
+ return rc;
+}
+
+/*
+ * 5. Driver initialization
+ *
+ * During driver inititialization is secure world probed to find out which
+ * features it supports so the driver can be initialized with a matching
+ * configuration. This involves for instance support for dynamic shared
+ * memory instead of a static memory carvout.
+ */
+
+static void optee_get_version(struct tee_device *teedev,
+ struct tee_ioctl_version_data *vers)
+{
+ struct tee_ioctl_version_data v = {
+ .impl_id = TEE_IMPL_ID_OPTEE,
+ .impl_caps = TEE_OPTEE_CAP_TZ,
+ .gen_caps = TEE_GEN_CAP_GP,
+ };
+ struct optee *optee = tee_get_drvdata(teedev);
+
+ if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
+ v.gen_caps |= TEE_GEN_CAP_REG_MEM;
+ if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_MEMREF_NULL)
+ v.gen_caps |= TEE_GEN_CAP_MEMREF_NULL;
+ *vers = v;
+}
+
+static int optee_smc_open(struct tee_context *ctx)
+{
+ struct optee *optee = tee_get_drvdata(ctx->teedev);
+ u32 sec_caps = optee->smc.sec_caps;
+
+ return optee_open(ctx, sec_caps & OPTEE_SMC_SEC_CAP_MEMREF_NULL);
+}
+
+static const struct tee_driver_ops optee_clnt_ops = {
+ .get_version = optee_get_version,
+ .open = optee_smc_open,
+ .release = optee_release,
+ .open_session = optee_open_session,
+ .close_session = optee_close_session,
+ .invoke_func = optee_invoke_func,
+ .cancel_req = optee_cancel_req,
+ .shm_register = optee_shm_register,
+ .shm_unregister = optee_shm_unregister,
+};
+
+static const struct tee_desc optee_clnt_desc = {
+ .name = DRIVER_NAME "-clnt",
+ .ops = &optee_clnt_ops,
+ .owner = THIS_MODULE,
+};
+
+static const struct tee_driver_ops optee_supp_ops = {
+ .get_version = optee_get_version,
+ .open = optee_smc_open,
+ .release = optee_release_supp,
+ .supp_recv = optee_supp_recv,
+ .supp_send = optee_supp_send,
+ .shm_register = optee_shm_register_supp,
+ .shm_unregister = optee_shm_unregister_supp,
+};
+
+static const struct tee_desc optee_supp_desc = {
+ .name = DRIVER_NAME "-supp",
+ .ops = &optee_supp_ops,
+ .owner = THIS_MODULE,
+ .flags = TEE_DESC_PRIVILEGED,
+};
+
+static const struct optee_ops optee_ops = {
+ .do_call_with_arg = optee_smc_do_call_with_arg,
+ .to_msg_param = optee_to_msg_param,
+ .from_msg_param = optee_from_msg_param,
+};
+
+static bool optee_msg_api_uid_is_optee_api(optee_invoke_fn *invoke_fn)
+{
+ struct arm_smccc_res res;
+
+ invoke_fn(OPTEE_SMC_CALLS_UID, 0, 0, 0, 0, 0, 0, 0, &res);
+
+ if (res.a0 == OPTEE_MSG_UID_0 && res.a1 == OPTEE_MSG_UID_1 &&
+ res.a2 == OPTEE_MSG_UID_2 && res.a3 == OPTEE_MSG_UID_3)
+ return true;
+ return false;
+}
+
+static void optee_msg_get_os_revision(optee_invoke_fn *invoke_fn)
+{
+ union {
+ struct arm_smccc_res smccc;
+ struct optee_smc_call_get_os_revision_result result;
+ } res = {
+ .result = {
+ .build_id = 0
+ }
+ };
+
+ invoke_fn(OPTEE_SMC_CALL_GET_OS_REVISION, 0, 0, 0, 0, 0, 0, 0,
+ &res.smccc);
+
+ if (res.result.build_id)
+ pr_info("revision %lu.%lu (%08lx)", res.result.major,
+ res.result.minor, res.result.build_id);
+ else
+ pr_info("revision %lu.%lu", res.result.major, res.result.minor);
+}
+
+static bool optee_msg_api_revision_is_compatible(optee_invoke_fn *invoke_fn)
+{
+ union {
+ struct arm_smccc_res smccc;
+ struct optee_smc_calls_revision_result result;
+ } res;
+
+ invoke_fn(OPTEE_SMC_CALLS_REVISION, 0, 0, 0, 0, 0, 0, 0, &res.smccc);
+
+ if (res.result.major == OPTEE_MSG_REVISION_MAJOR &&
+ (int)res.result.minor >= OPTEE_MSG_REVISION_MINOR)
+ return true;
+ return false;
+}
+
+static bool optee_msg_exchange_capabilities(optee_invoke_fn *invoke_fn,
+ u32 *sec_caps)
+{
+ union {
+ struct arm_smccc_res smccc;
+ struct optee_smc_exchange_capabilities_result result;
+ } res;
+ u32 a1 = 0;
+
+ /*
+ * TODO This isn't enough to tell if it's UP system (from kernel
+ * point of view) or not, is_smp() returns the information
+ * needed, but can't be called directly from here.
+ */
+ if (!IS_ENABLED(CONFIG_SMP) || nr_cpu_ids == 1)
+ a1 |= OPTEE_SMC_NSEC_CAP_UNIPROCESSOR;
+
+ invoke_fn(OPTEE_SMC_EXCHANGE_CAPABILITIES, a1, 0, 0, 0, 0, 0, 0,
+ &res.smccc);
+
+ if (res.result.status != OPTEE_SMC_RETURN_OK)
+ return false;
+
+ *sec_caps = res.result.capabilities;
+ return true;
+}
+
+static struct tee_shm_pool *optee_config_dyn_shm(void)
+{
+ struct tee_shm_pool_mgr *priv_mgr;
+ struct tee_shm_pool_mgr *dmabuf_mgr;
+ void *rc;
+
+ rc = optee_shm_pool_alloc_pages();
+ if (IS_ERR(rc))
+ return rc;
+ priv_mgr = rc;
+
+ rc = optee_shm_pool_alloc_pages();
+ if (IS_ERR(rc)) {
+ tee_shm_pool_mgr_destroy(priv_mgr);
+ return rc;
+ }
+ dmabuf_mgr = rc;
+
+ rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr);
+ if (IS_ERR(rc)) {
+ tee_shm_pool_mgr_destroy(priv_mgr);
+ tee_shm_pool_mgr_destroy(dmabuf_mgr);
+ }
+
+ return rc;
+}
+
+static struct tee_shm_pool *
+optee_config_shm_memremap(optee_invoke_fn *invoke_fn, void **memremaped_shm)
+{
+ union {
+ struct arm_smccc_res smccc;
+ struct optee_smc_get_shm_config_result result;
+ } res;
+ unsigned long vaddr;
+ phys_addr_t paddr;
+ size_t size;
+ phys_addr_t begin;
+ phys_addr_t end;
+ void *va;
+ struct tee_shm_pool_mgr *priv_mgr;
+ struct tee_shm_pool_mgr *dmabuf_mgr;
+ void *rc;
+ const int sz = OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE;
+
+ invoke_fn(OPTEE_SMC_GET_SHM_CONFIG, 0, 0, 0, 0, 0, 0, 0, &res.smccc);
+ if (res.result.status != OPTEE_SMC_RETURN_OK) {
+ pr_err("static shm service not available\n");
+ return ERR_PTR(-ENOENT);
+ }
+
+ if (res.result.settings != OPTEE_SMC_SHM_CACHED) {
+ pr_err("only normal cached shared memory supported\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ begin = roundup(res.result.start, PAGE_SIZE);
+ end = rounddown(res.result.start + res.result.size, PAGE_SIZE);
+ paddr = begin;
+ size = end - begin;
+
+ if (size < 2 * OPTEE_SHM_NUM_PRIV_PAGES * PAGE_SIZE) {
+ pr_err("too small shared memory area\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ va = memremap(paddr, size, MEMREMAP_WB);
+ if (!va) {
+ pr_err("shared memory ioremap failed\n");
+ return ERR_PTR(-EINVAL);
+ }
+ vaddr = (unsigned long)va;
+
+ rc = tee_shm_pool_mgr_alloc_res_mem(vaddr, paddr, sz,
+ 3 /* 8 bytes aligned */);
+ if (IS_ERR(rc))
+ goto err_memunmap;
+ priv_mgr = rc;
+
+ vaddr += sz;
+ paddr += sz;
+ size -= sz;
+
+ rc = tee_shm_pool_mgr_alloc_res_mem(vaddr, paddr, size, PAGE_SHIFT);
+ if (IS_ERR(rc))
+ goto err_free_priv_mgr;
+ dmabuf_mgr = rc;
+
+ rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr);
+ if (IS_ERR(rc))
+ goto err_free_dmabuf_mgr;
+
+ *memremaped_shm = va;
+
+ return rc;
+
+err_free_dmabuf_mgr:
+ tee_shm_pool_mgr_destroy(dmabuf_mgr);
+err_free_priv_mgr:
+ tee_shm_pool_mgr_destroy(priv_mgr);
+err_memunmap:
+ memunmap(va);
+ return rc;
+}
+
+/* Simple wrapper functions to be able to use a function pointer */
+static void optee_smccc_smc(unsigned long a0, unsigned long a1,
+ unsigned long a2, unsigned long a3,
+ unsigned long a4, unsigned long a5,
+ unsigned long a6, unsigned long a7,
+ struct arm_smccc_res *res)
+{
+ arm_smccc_smc(a0, a1, a2, a3, a4, a5, a6, a7, res);
+}
+
+static void optee_smccc_hvc(unsigned long a0, unsigned long a1,
+ unsigned long a2, unsigned long a3,
+ unsigned long a4, unsigned long a5,
+ unsigned long a6, unsigned long a7,
+ struct arm_smccc_res *res)
+{
+ arm_smccc_hvc(a0, a1, a2, a3, a4, a5, a6, a7, res);
+}
+
+static optee_invoke_fn *get_invoke_func(struct device *dev)
+{
+ const char *method;
+
+ pr_info("probing for conduit method.\n");
+
+ if (device_property_read_string(dev, "method", &method)) {
+ pr_warn("missing \"method\" property\n");
+ return ERR_PTR(-ENXIO);
+ }
+
+ if (!strcmp("hvc", method))
+ return optee_smccc_hvc;
+ else if (!strcmp("smc", method))
+ return optee_smccc_smc;
+
+ pr_warn("invalid \"method\" property: %s\n", method);
+ return ERR_PTR(-EINVAL);
+}
+
+/* optee_remove - Device Removal Routine
+ * @pdev: platform device information struct
+ *
+ * optee_remove is called by platform subsystem to alert the driver
+ * that it should release the device
+ */
+static int optee_smc_remove(struct platform_device *pdev)
+{
+ struct optee *optee = platform_get_drvdata(pdev);
+
+ /*
+ * Ask OP-TEE to free all cached shared memory objects to decrease
+ * reference counters and also avoid wild pointers in secure world
+ * into the old shared memory range.
+ */
+ optee_disable_shm_cache(optee);
+
+ optee_remove_common(optee);
+
+ if (optee->smc.memremaped_shm)
+ memunmap(optee->smc.memremaped_shm);
+
+ kfree(optee);
+
+ return 0;
+}
+
+/* optee_shutdown - Device Removal Routine
+ * @pdev: platform device information struct
+ *
+ * platform_shutdown is called by the platform subsystem to alert
+ * the driver that a shutdown, reboot, or kexec is happening and
+ * device must be disabled.
+ */
+static void optee_shutdown(struct platform_device *pdev)
+{
+ optee_disable_shm_cache(platform_get_drvdata(pdev));
+}
+
+static int optee_probe(struct platform_device *pdev)
+{
+ optee_invoke_fn *invoke_fn;
+ struct tee_shm_pool *pool = ERR_PTR(-EINVAL);
+ struct optee *optee = NULL;
+ void *memremaped_shm = NULL;
+ struct tee_device *teedev;
+ u32 sec_caps;
+ int rc;
+
+ invoke_fn = get_invoke_func(&pdev->dev);
+ if (IS_ERR(invoke_fn))
+ return PTR_ERR(invoke_fn);
+
+ if (!optee_msg_api_uid_is_optee_api(invoke_fn)) {
+ pr_warn("api uid mismatch\n");
+ return -EINVAL;
+ }
+
+ optee_msg_get_os_revision(invoke_fn);
+
+ if (!optee_msg_api_revision_is_compatible(invoke_fn)) {
+ pr_warn("api revision mismatch\n");
+ return -EINVAL;
+ }
+
+ if (!optee_msg_exchange_capabilities(invoke_fn, &sec_caps)) {
+ pr_warn("capabilities mismatch\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Try to use dynamic shared memory if possible
+ */
+ if (sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
+ pool = optee_config_dyn_shm();
+
+ /*
+ * If dynamic shared memory is not available or failed - try static one
+ */
+ if (IS_ERR(pool) && (sec_caps & OPTEE_SMC_SEC_CAP_HAVE_RESERVED_SHM))
+ pool = optee_config_shm_memremap(invoke_fn, &memremaped_shm);
+
+ if (IS_ERR(pool))
+ return PTR_ERR(pool);
+
+ optee = kzalloc(sizeof(*optee), GFP_KERNEL);
+ if (!optee) {
+ rc = -ENOMEM;
+ goto err;
+ }
+
+ optee->ops = &optee_ops;
+ optee->smc.invoke_fn = invoke_fn;
+ optee->smc.sec_caps = sec_caps;
+
+ teedev = tee_device_alloc(&optee_clnt_desc, NULL, pool, optee);
+ if (IS_ERR(teedev)) {
+ rc = PTR_ERR(teedev);
+ goto err;
+ }
+ optee->teedev = teedev;
+
+ teedev = tee_device_alloc(&optee_supp_desc, NULL, pool, optee);
+ if (IS_ERR(teedev)) {
+ rc = PTR_ERR(teedev);
+ goto err;
+ }
+ optee->supp_teedev = teedev;
+
+ rc = tee_device_register(optee->teedev);
+ if (rc)
+ goto err;
+
+ rc = tee_device_register(optee->supp_teedev);
+ if (rc)
+ goto err;
+
+ mutex_init(&optee->call_queue.mutex);
+ INIT_LIST_HEAD(&optee->call_queue.waiters);
+ optee_wait_queue_init(&optee->wait_queue);
+ optee_supp_init(&optee->supp);
+ optee->smc.memremaped_shm = memremaped_shm;
+ optee->pool = pool;
+
+ /*
+ * Ensure that there are no pre-existing shm objects before enabling
+ * the shm cache so that there's no chance of receiving an invalid
+ * address during shutdown. This could occur, for example, if we're
+ * kexec booting from an older kernel that did not properly cleanup the
+ * shm cache.
+ */
+ optee_disable_unmapped_shm_cache(optee);
+
+ optee_enable_shm_cache(optee);
+
+ if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
+ pr_info("dynamic shared memory is enabled\n");
+
+ platform_set_drvdata(pdev, optee);
+
+ rc = optee_enumerate_devices(PTA_CMD_GET_DEVICES);
+ if (rc) {
+ optee_smc_remove(pdev);
+ return rc;
+ }
+
+ pr_info("initialized driver\n");
+ return 0;
+err:
+ if (optee) {
+ /*
+ * tee_device_unregister() is safe to call even if the
+ * devices hasn't been registered with
+ * tee_device_register() yet.
+ */
+ tee_device_unregister(optee->supp_teedev);
+ tee_device_unregister(optee->teedev);
+ kfree(optee);
+ }
+ if (pool)
+ tee_shm_pool_free(pool);
+ if (memremaped_shm)
+ memunmap(memremaped_shm);
+ return rc;
+}
+
+static const struct of_device_id optee_dt_match[] = {
+ { .compatible = "linaro,optee-tz" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, optee_dt_match);
+
+static struct platform_driver optee_driver = {
+ .probe = optee_probe,
+ .remove = optee_smc_remove,
+ .shutdown = optee_shutdown,
+ .driver = {
+ .name = "optee",
+ .of_match_table = optee_dt_match,
+ },
+};
+
+int optee_smc_abi_register(void)
+{
+ return platform_driver_register(&optee_driver);
+}
+
+void optee_smc_abi_unregister(void)
+{
+ platform_driver_unregister(&optee_driver);
+}
diff --git a/drivers/thermal/gov_user_space.c b/drivers/thermal/gov_user_space.c
index 82a7198bbe71..f4fe050e1cbc 100644
--- a/drivers/thermal/gov_user_space.c
+++ b/drivers/thermal/gov_user_space.c
@@ -15,6 +15,14 @@
#include "thermal_core.h"
+static int user_space_bind(struct thermal_zone_device *tz)
+{
+ pr_warn("Userspace governor deprecated: use thermal netlink " \
+ "notification instead\n");
+
+ return 0;
+}
+
/**
* notify_user_space - Notifies user space about thermal events
* @tz: thermal_zone_device
@@ -43,5 +51,6 @@ static int notify_user_space(struct thermal_zone_device *tz, int trip)
static struct thermal_governor thermal_gov_user_space = {
.name = "user_space",
.throttle = notify_user_space,
+ .bind_to_tz = user_space_bind,
};
THERMAL_GOVERNOR_DECLARE(thermal_gov_user_space);
diff --git a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
index 19926beeb3b7..8502b7d8df89 100644
--- a/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
+++ b/drivers/thermal/intel/int340x_thermal/int3400_thermal.c
@@ -129,11 +129,10 @@ static ssize_t available_uuids_show(struct device *dev,
for (i = 0; i < INT3400_THERMAL_MAXIMUM_UUID; i++) {
if (priv->uuid_bitmap & (1 << i))
- if (PAGE_SIZE - length > 0)
- length += scnprintf(&buf[length],
- PAGE_SIZE - length,
- "%s\n",
- int3400_thermal_uuids[i]);
+ length += scnprintf(&buf[length],
+ PAGE_SIZE - length,
+ "%s\n",
+ int3400_thermal_uuids[i]);
}
return length;
diff --git a/drivers/thermal/intel/int340x_thermal/int3401_thermal.c b/drivers/thermal/intel/int340x_thermal/int3401_thermal.c
index acebc8ba94e2..217786fba185 100644
--- a/drivers/thermal/intel/int340x_thermal/int3401_thermal.c
+++ b/drivers/thermal/intel/int340x_thermal/int3401_thermal.c
@@ -44,15 +44,21 @@ static int int3401_remove(struct platform_device *pdev)
}
#ifdef CONFIG_PM_SLEEP
+static int int3401_thermal_suspend(struct device *dev)
+{
+ return proc_thermal_suspend(dev);
+}
static int int3401_thermal_resume(struct device *dev)
{
return proc_thermal_resume(dev);
}
#else
+#define int3401_thermal_suspend NULL
#define int3401_thermal_resume NULL
#endif
-static SIMPLE_DEV_PM_OPS(int3401_proc_thermal_pm, NULL, int3401_thermal_resume);
+static SIMPLE_DEV_PM_OPS(int3401_proc_thermal_pm, int3401_thermal_suspend,
+ int3401_thermal_resume);
static struct platform_driver int3401_driver = {
.probe = int3401_add,
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
index fb64acfd5e07..a8d98f1bd6c6 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.c
@@ -68,8 +68,7 @@ static const struct attribute_group power_limit_attribute_group = {
.name = "power_limits"
};
-static ssize_t tcc_offset_degree_celsius_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static int tcc_get_offset(void)
{
u64 val;
int err;
@@ -78,8 +77,20 @@ static ssize_t tcc_offset_degree_celsius_show(struct device *dev,
if (err)
return err;
- val = (val >> 24) & 0x3f;
- return sprintf(buf, "%d\n", (int)val);
+ return (val >> 24) & 0x3f;
+}
+
+static ssize_t tcc_offset_degree_celsius_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int tcc;
+
+ tcc = tcc_get_offset();
+ if (tcc < 0)
+ return tcc;
+
+ return sprintf(buf, "%d\n", tcc);
}
static int tcc_offset_update(unsigned int tcc)
@@ -107,8 +118,6 @@ static int tcc_offset_update(unsigned int tcc)
return 0;
}
-static int tcc_offset_save = -1;
-
static ssize_t tcc_offset_degree_celsius_store(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
@@ -131,8 +140,6 @@ static ssize_t tcc_offset_degree_celsius_store(struct device *dev,
if (err)
return err;
- tcc_offset_save = tcc;
-
return count;
}
@@ -345,6 +352,18 @@ void proc_thermal_remove(struct proc_thermal_device *proc_priv)
}
EXPORT_SYMBOL_GPL(proc_thermal_remove);
+static int tcc_offset_save = -1;
+
+int proc_thermal_suspend(struct device *dev)
+{
+ tcc_offset_save = tcc_get_offset();
+ if (tcc_offset_save < 0)
+ dev_warn(dev, "failed to save offset (%d)\n", tcc_offset_save);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(proc_thermal_suspend);
+
int proc_thermal_resume(struct device *dev)
{
struct proc_thermal_device *proc_dev;
@@ -352,6 +371,7 @@ int proc_thermal_resume(struct device *dev)
proc_dev = dev_get_drvdata(dev);
proc_thermal_read_ppcc(proc_dev);
+ /* Do not update if saving failed */
if (tcc_offset_save >= 0)
tcc_offset_update(tcc_offset_save);
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.h b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.h
index 5a1cfe4864f1..c1d8de6dc3d1 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device.h
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device.h
@@ -83,6 +83,7 @@ void proc_thermal_mbox_remove(struct pci_dev *pdev);
int processor_thermal_send_mbox_cmd(struct pci_dev *pdev, u16 cmd_id, u32 cmd_data, u32 *cmd_resp);
int proc_thermal_add(struct device *dev, struct proc_thermal_device *priv);
void proc_thermal_remove(struct proc_thermal_device *proc_priv);
+int proc_thermal_suspend(struct device *dev);
int proc_thermal_resume(struct device *dev);
int proc_thermal_mmio_add(struct pci_dev *pdev,
struct proc_thermal_device *proc_priv,
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
index 11dd2e825f4f..b4bcd3fe9eb2 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci.c
@@ -314,6 +314,20 @@ static void proc_thermal_pci_remove(struct pci_dev *pdev)
}
#ifdef CONFIG_PM_SLEEP
+static int proc_thermal_pci_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct proc_thermal_device *proc_priv;
+ struct proc_thermal_pci *pci_info;
+
+ proc_priv = pci_get_drvdata(pdev);
+ pci_info = proc_priv->priv_data;
+
+ if (!pci_info->no_legacy)
+ return proc_thermal_suspend(dev);
+
+ return 0;
+}
static int proc_thermal_pci_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
@@ -335,10 +349,12 @@ static int proc_thermal_pci_resume(struct device *dev)
return 0;
}
#else
+#define proc_thermal_pci_suspend NULL
#define proc_thermal_pci_resume NULL
#endif
-static SIMPLE_DEV_PM_OPS(proc_thermal_pci_pm, NULL, proc_thermal_pci_resume);
+static SIMPLE_DEV_PM_OPS(proc_thermal_pci_pm, proc_thermal_pci_suspend,
+ proc_thermal_pci_resume);
static const struct pci_device_id proc_thermal_pci_ids[] = {
{ PCI_DEVICE_DATA(INTEL, ADL_THERMAL, PROC_THERMAL_FEATURE_RAPL | PROC_THERMAL_FEATURE_FIVR | PROC_THERMAL_FEATURE_DVFS | PROC_THERMAL_FEATURE_MBOX) },
diff --git a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci_legacy.c b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci_legacy.c
index f5fc1791b11e..4571a1a53b84 100644
--- a/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci_legacy.c
+++ b/drivers/thermal/intel/int340x_thermal/processor_thermal_device_pci_legacy.c
@@ -107,15 +107,21 @@ static void proc_thermal_pci_remove(struct pci_dev *pdev)
}
#ifdef CONFIG_PM_SLEEP
+static int proc_thermal_pci_suspend(struct device *dev)
+{
+ return proc_thermal_suspend(dev);
+}
static int proc_thermal_pci_resume(struct device *dev)
{
return proc_thermal_resume(dev);
}
#else
+#define proc_thermal_pci_suspend NULL
#define proc_thermal_pci_resume NULL
#endif
-static SIMPLE_DEV_PM_OPS(proc_thermal_pci_pm, NULL, proc_thermal_pci_resume);
+static SIMPLE_DEV_PM_OPS(proc_thermal_pci_pm, proc_thermal_pci_suspend,
+ proc_thermal_pci_resume);
static const struct pci_device_id proc_thermal_pci_ids[] = {
{ PCI_DEVICE_DATA(INTEL, BDW_THERMAL, 0) },
diff --git a/drivers/thermal/intel/intel_powerclamp.c b/drivers/thermal/intel/intel_powerclamp.c
index a5b58ea89cc6..9b68489a2356 100644
--- a/drivers/thermal/intel/intel_powerclamp.c
+++ b/drivers/thermal/intel/intel_powerclamp.c
@@ -705,10 +705,8 @@ static enum cpuhp_state hp_state;
static int __init powerclamp_init(void)
{
int retval;
- int bitmap_size;
- bitmap_size = BITS_TO_LONGS(num_possible_cpus()) * sizeof(long);
- cpu_clamping_mask = kzalloc(bitmap_size, GFP_KERNEL);
+ cpu_clamping_mask = bitmap_zalloc(num_possible_cpus(), GFP_KERNEL);
if (!cpu_clamping_mask)
return -ENOMEM;
@@ -753,7 +751,7 @@ exit_free_thread:
exit_unregister:
cpuhp_remove_state_nocalls(hp_state);
exit_free:
- kfree(cpu_clamping_mask);
+ bitmap_free(cpu_clamping_mask);
return retval;
}
module_init(powerclamp_init);
@@ -764,7 +762,7 @@ static void __exit powerclamp_exit(void)
cpuhp_remove_state_nocalls(hp_state);
free_percpu(worker_data);
thermal_cooling_device_unregister(cooling_dev);
- kfree(cpu_clamping_mask);
+ bitmap_free(cpu_clamping_mask);
cancel_delayed_work_sync(&poll_pkg_cstate_work);
debugfs_remove_recursive(debug_dir);
diff --git a/drivers/thermal/qcom/Kconfig b/drivers/thermal/qcom/Kconfig
index 7d942f71e532..bfd889422dd3 100644
--- a/drivers/thermal/qcom/Kconfig
+++ b/drivers/thermal/qcom/Kconfig
@@ -34,7 +34,7 @@ config QCOM_SPMI_TEMP_ALARM
config QCOM_LMH
tristate "Qualcomm Limits Management Hardware"
- depends on ARCH_QCOM
+ depends on ARCH_QCOM && QCOM_SCM
help
This enables initialization of Qualcomm limits management
hardware(LMh). LMh allows for hardware-enforced mitigation for cpus based on
diff --git a/drivers/thermal/qcom/qcom-spmi-adc-tm5.c b/drivers/thermal/qcom/qcom-spmi-adc-tm5.c
index 8494cc04aa21..824671cf494a 100644
--- a/drivers/thermal/qcom/qcom-spmi-adc-tm5.c
+++ b/drivers/thermal/qcom/qcom-spmi-adc-tm5.c
@@ -82,6 +82,7 @@ struct adc_tm5_data {
const u32 full_scale_code_volt;
unsigned int *decimation;
unsigned int *hw_settle;
+ bool is_hc;
};
enum adc_tm5_cal_method {
@@ -146,6 +147,14 @@ static const struct adc_tm5_data adc_tm5_data_pmic = {
64000, 128000 },
};
+static const struct adc_tm5_data adc_tm_hc_data_pmic = {
+ .full_scale_code_volt = 0x70e4,
+ .decimation = (unsigned int []) { 256, 512, 1024 },
+ .hw_settle = (unsigned int []) { 0, 100, 200, 300, 400, 500, 600, 700,
+ 1000, 2000, 4000, 6000, 8000, 10000 },
+ .is_hc = true,
+};
+
static int adc_tm5_read(struct adc_tm5_chip *adc_tm, u16 offset, u8 *data, int len)
{
return regmap_bulk_read(adc_tm->regmap, adc_tm->base + offset, data, len);
@@ -375,6 +384,29 @@ static int adc_tm5_register_tzd(struct adc_tm5_chip *adc_tm)
return 0;
}
+static int adc_tm_hc_init(struct adc_tm5_chip *chip)
+{
+ unsigned int i;
+ u8 buf[2];
+ int ret;
+
+ for (i = 0; i < chip->nchannels; i++) {
+ if (chip->channels[i].channel >= ADC_TM5_NUM_CHANNELS) {
+ dev_err(chip->dev, "Invalid channel %d\n", chip->channels[i].channel);
+ return -EINVAL;
+ }
+ }
+
+ buf[0] = chip->decimation;
+ buf[1] = chip->avg_samples | ADC_TM5_FAST_AVG_EN;
+
+ ret = adc_tm5_write(chip, ADC_TM5_ADC_DIG_PARAM, buf, sizeof(buf));
+ if (ret)
+ dev_err(chip->dev, "block write failed: %d\n", ret);
+
+ return ret;
+}
+
static int adc_tm5_init(struct adc_tm5_chip *chip)
{
u8 buf[4], channels_available;
@@ -591,7 +623,10 @@ static int adc_tm5_probe(struct platform_device *pdev)
return ret;
}
- ret = adc_tm5_init(adc_tm);
+ if (adc_tm->data->is_hc)
+ ret = adc_tm_hc_init(adc_tm);
+ else
+ ret = adc_tm5_init(adc_tm);
if (ret) {
dev_err(dev, "adc-tm init failed\n");
return ret;
@@ -612,6 +647,10 @@ static const struct of_device_id adc_tm5_match_table[] = {
.compatible = "qcom,spmi-adc-tm5",
.data = &adc_tm5_data_pmic,
},
+ {
+ .compatible = "qcom,spmi-adc-tm-hc",
+ .data = &adc_tm_hc_data_pmic,
+ },
{ }
};
MODULE_DEVICE_TABLE(of, adc_tm5_match_table);
diff --git a/drivers/thermal/qcom/tsens.c b/drivers/thermal/qcom/tsens.c
index b1162e566a70..99a8d9f3e03c 100644
--- a/drivers/thermal/qcom/tsens.c
+++ b/drivers/thermal/qcom/tsens.c
@@ -603,22 +603,21 @@ int get_temp_tsens_valid(const struct tsens_sensor *s, int *temp)
int ret;
/* VER_0 doesn't have VALID bit */
- if (tsens_version(priv) >= VER_0_1) {
- ret = regmap_field_read(priv->rf[valid_idx], &valid);
- if (ret)
- return ret;
- while (!valid) {
- /* Valid bit is 0 for 6 AHB clock cycles.
- * At 19.2MHz, 1 AHB clock is ~60ns.
- * We should enter this loop very, very rarely.
- */
- ndelay(400);
- ret = regmap_field_read(priv->rf[valid_idx], &valid);
- if (ret)
- return ret;
- }
- }
+ if (tsens_version(priv) == VER_0)
+ goto get_temp;
+
+ /* Valid bit is 0 for 6 AHB clock cycles.
+ * At 19.2MHz, 1 AHB clock is ~60ns.
+ * We should enter this loop very, very rarely.
+ * Wait 1 us since it's the min of poll_timeout macro.
+ * Old value was 400 ns.
+ */
+ ret = regmap_field_read_poll_timeout(priv->rf[valid_idx], valid,
+ valid, 1, 20 * USEC_PER_MSEC);
+ if (ret)
+ return ret;
+get_temp:
/* Valid bit is set, OK to read the temperature */
*temp = tsens_hw_to_mC(s, temp_idx);
diff --git a/drivers/thermal/rcar_gen3_thermal.c b/drivers/thermal/rcar_gen3_thermal.c
index 85228d308dd3..43eb25b167bc 100644
--- a/drivers/thermal/rcar_gen3_thermal.c
+++ b/drivers/thermal/rcar_gen3_thermal.c
@@ -34,6 +34,10 @@
#define REG_GEN3_THCODE1 0x50
#define REG_GEN3_THCODE2 0x54
#define REG_GEN3_THCODE3 0x58
+#define REG_GEN3_PTAT1 0x5c
+#define REG_GEN3_PTAT2 0x60
+#define REG_GEN3_PTAT3 0x64
+#define REG_GEN3_THSCP 0x68
/* IRQ{STR,MSK,EN} bits */
#define IRQ_TEMP1 BIT(0)
@@ -55,6 +59,9 @@
#define THCTR_PONM BIT(6)
#define THCTR_THSST BIT(0)
+/* THSCP bits */
+#define THSCP_COR_PARA_VLD (BIT(15) | BIT(14))
+
#define CTEMP_MASK 0xFFF
#define MCELSIUS(temp) ((temp) * 1000)
@@ -62,15 +69,6 @@
#define TSC_MAX_NUM 5
-/* default THCODE values if FUSEs are missing */
-static const int thcodes[TSC_MAX_NUM][3] = {
- { 3397, 2800, 2221 },
- { 3393, 2795, 2216 },
- { 3389, 2805, 2237 },
- { 3415, 2694, 2195 },
- { 3356, 2724, 2244 },
-};
-
/* Structure for thermal temperature calculation */
struct equation_coefs {
int a1;
@@ -84,13 +82,14 @@ struct rcar_gen3_thermal_tsc {
struct thermal_zone_device *zone;
struct equation_coefs coef;
int tj_t;
- unsigned int id; /* thermal channel id */
+ int thcode[3];
};
struct rcar_gen3_thermal_priv {
struct rcar_gen3_thermal_tsc *tscs[TSC_MAX_NUM];
unsigned int num_tscs;
void (*thermal_init)(struct rcar_gen3_thermal_tsc *tsc);
+ int ptat[3];
};
static inline u32 rcar_gen3_thermal_read(struct rcar_gen3_thermal_tsc *tsc,
@@ -133,8 +132,8 @@ static inline void rcar_gen3_thermal_write(struct rcar_gen3_thermal_tsc *tsc,
/* no idea where these constants come from */
#define TJ_3 -41
-static void rcar_gen3_thermal_calc_coefs(struct rcar_gen3_thermal_tsc *tsc,
- int *ptat, const int *thcode,
+static void rcar_gen3_thermal_calc_coefs(struct rcar_gen3_thermal_priv *priv,
+ struct rcar_gen3_thermal_tsc *tsc,
int ths_tj_1)
{
/* TODO: Find documentation and document constant calculation formula */
@@ -143,16 +142,16 @@ static void rcar_gen3_thermal_calc_coefs(struct rcar_gen3_thermal_tsc *tsc,
* Division is not scaled in BSP and if scaled it might overflow
* the dividend (4095 * 4095 << 14 > INT_MAX) so keep it unscaled
*/
- tsc->tj_t = (FIXPT_INT((ptat[1] - ptat[2]) * (ths_tj_1 - TJ_3))
- / (ptat[0] - ptat[2])) + FIXPT_INT(TJ_3);
+ tsc->tj_t = (FIXPT_INT((priv->ptat[1] - priv->ptat[2]) * (ths_tj_1 - TJ_3))
+ / (priv->ptat[0] - priv->ptat[2])) + FIXPT_INT(TJ_3);
- tsc->coef.a1 = FIXPT_DIV(FIXPT_INT(thcode[1] - thcode[2]),
+ tsc->coef.a1 = FIXPT_DIV(FIXPT_INT(tsc->thcode[1] - tsc->thcode[2]),
tsc->tj_t - FIXPT_INT(TJ_3));
- tsc->coef.b1 = FIXPT_INT(thcode[2]) - tsc->coef.a1 * TJ_3;
+ tsc->coef.b1 = FIXPT_INT(tsc->thcode[2]) - tsc->coef.a1 * TJ_3;
- tsc->coef.a2 = FIXPT_DIV(FIXPT_INT(thcode[1] - thcode[0]),
+ tsc->coef.a2 = FIXPT_DIV(FIXPT_INT(tsc->thcode[1] - tsc->thcode[0]),
tsc->tj_t - FIXPT_INT(ths_tj_1));
- tsc->coef.b2 = FIXPT_INT(thcode[0]) - tsc->coef.a2 * ths_tj_1;
+ tsc->coef.b2 = FIXPT_INT(tsc->thcode[0]) - tsc->coef.a2 * ths_tj_1;
}
static int rcar_gen3_thermal_round(int temp)
@@ -174,7 +173,7 @@ static int rcar_gen3_thermal_get_temp(void *devdata, int *temp)
/* Read register and convert to mili Celsius */
reg = rcar_gen3_thermal_read(tsc, REG_GEN3_TEMP) & CTEMP_MASK;
- if (reg <= thcodes[tsc->id][1])
+ if (reg <= tsc->thcode[1])
val = FIXPT_DIV(FIXPT_INT(reg) - tsc->coef.b1,
tsc->coef.a1);
else
@@ -253,6 +252,64 @@ static const struct soc_device_attribute r8a7795es1[] = {
{ /* sentinel */ }
};
+static bool rcar_gen3_thermal_read_fuses(struct rcar_gen3_thermal_priv *priv)
+{
+ unsigned int i;
+ u32 thscp;
+
+ /* If fuses are not set, fallback to pseudo values. */
+ thscp = rcar_gen3_thermal_read(priv->tscs[0], REG_GEN3_THSCP);
+ if ((thscp & THSCP_COR_PARA_VLD) != THSCP_COR_PARA_VLD) {
+ /* Default THCODE values in case FUSEs are not set. */
+ static const int thcodes[TSC_MAX_NUM][3] = {
+ { 3397, 2800, 2221 },
+ { 3393, 2795, 2216 },
+ { 3389, 2805, 2237 },
+ { 3415, 2694, 2195 },
+ { 3356, 2724, 2244 },
+ };
+
+ priv->ptat[0] = 2631;
+ priv->ptat[1] = 1509;
+ priv->ptat[2] = 435;
+
+ for (i = 0; i < priv->num_tscs; i++) {
+ struct rcar_gen3_thermal_tsc *tsc = priv->tscs[i];
+
+ tsc->thcode[0] = thcodes[i][0];
+ tsc->thcode[1] = thcodes[i][1];
+ tsc->thcode[2] = thcodes[i][2];
+ }
+
+ return false;
+ }
+
+ /*
+ * Set the pseudo calibration points with fused values.
+ * PTAT is shared between all TSCs but only fused for the first
+ * TSC while THCODEs are fused for each TSC.
+ */
+ priv->ptat[0] = rcar_gen3_thermal_read(priv->tscs[0], REG_GEN3_PTAT1) &
+ GEN3_FUSE_MASK;
+ priv->ptat[1] = rcar_gen3_thermal_read(priv->tscs[0], REG_GEN3_PTAT2) &
+ GEN3_FUSE_MASK;
+ priv->ptat[2] = rcar_gen3_thermal_read(priv->tscs[0], REG_GEN3_PTAT3) &
+ GEN3_FUSE_MASK;
+
+ for (i = 0; i < priv->num_tscs; i++) {
+ struct rcar_gen3_thermal_tsc *tsc = priv->tscs[i];
+
+ tsc->thcode[0] = rcar_gen3_thermal_read(tsc, REG_GEN3_THCODE1) &
+ GEN3_FUSE_MASK;
+ tsc->thcode[1] = rcar_gen3_thermal_read(tsc, REG_GEN3_THCODE2) &
+ GEN3_FUSE_MASK;
+ tsc->thcode[2] = rcar_gen3_thermal_read(tsc, REG_GEN3_THCODE3) &
+ GEN3_FUSE_MASK;
+ }
+
+ return true;
+}
+
static void rcar_gen3_thermal_init_r8a7795es1(struct rcar_gen3_thermal_tsc *tsc)
{
rcar_gen3_thermal_write(tsc, REG_GEN3_CTSR, CTSR_THBGR);
@@ -401,10 +458,6 @@ static int rcar_gen3_thermal_probe(struct platform_device *pdev)
unsigned int i;
int ret;
- /* default values if FUSEs are missing */
- /* TODO: Read values from hardware on supported platforms */
- int ptat[3] = { 2631, 1509, 435 };
-
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
@@ -439,9 +492,17 @@ static int rcar_gen3_thermal_probe(struct platform_device *pdev)
ret = PTR_ERR(tsc->base);
goto error_unregister;
}
- tsc->id = i;
priv->tscs[i] = tsc;
+ }
+
+ priv->num_tscs = i;
+
+ if (!rcar_gen3_thermal_read_fuses(priv))
+ dev_info(dev, "No calibration values fused, fallback to driver values\n");
+
+ for (i = 0; i < priv->num_tscs; i++) {
+ struct rcar_gen3_thermal_tsc *tsc = priv->tscs[i];
zone = devm_thermal_zone_of_sensor_register(dev, i, tsc,
&rcar_gen3_tz_of_ops);
@@ -453,7 +514,7 @@ static int rcar_gen3_thermal_probe(struct platform_device *pdev)
tsc->zone = zone;
priv->thermal_init(tsc);
- rcar_gen3_thermal_calc_coefs(tsc, ptat, thcodes[i], *ths_tj_1);
+ rcar_gen3_thermal_calc_coefs(priv, tsc, *ths_tj_1);
tsc->zone->tzp->no_hwmon = false;
ret = thermal_add_hwmon_sysfs(tsc->zone);
@@ -471,8 +532,6 @@ static int rcar_gen3_thermal_probe(struct platform_device *pdev)
dev_info(dev, "TSC%u: Loaded %d trip points\n", i, ret);
}
- priv->num_tscs = i;
-
if (!priv->num_tscs) {
ret = -ENODEV;
goto error_unregister;
diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c
index 657d84b9963e..dc3a9c276a09 100644
--- a/drivers/thermal/rockchip_thermal.c
+++ b/drivers/thermal/rockchip_thermal.c
@@ -1383,7 +1383,7 @@ static int rockchip_thermal_probe(struct platform_device *pdev)
if (IS_ERR(thermal->regs))
return PTR_ERR(thermal->regs);
- thermal->reset = devm_reset_control_get(&pdev->dev, "tsadc-apb");
+ thermal->reset = devm_reset_control_array_get(&pdev->dev, false, false);
if (IS_ERR(thermal->reset)) {
error = PTR_ERR(thermal->reset);
dev_err(&pdev->dev, "failed to get tsadc reset: %d\n", error);
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 51374f4e1cca..648829ab79ff 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -375,10 +375,12 @@ static void handle_thermal_trip(struct thermal_zone_device *tz, int trip)
if (tz->last_temperature != THERMAL_TEMP_INVALID) {
if (tz->last_temperature < trip_temp &&
tz->temperature >= trip_temp)
- thermal_notify_tz_trip_up(tz->id, trip);
+ thermal_notify_tz_trip_up(tz->id, trip,
+ tz->temperature);
if (tz->last_temperature >= trip_temp &&
tz->temperature < (trip_temp - hyst))
- thermal_notify_tz_trip_down(tz->id, trip);
+ thermal_notify_tz_trip_down(tz->id, trip,
+ tz->temperature);
}
if (type == THERMAL_TRIP_CRITICAL || type == THERMAL_TRIP_HOT)
@@ -887,7 +889,7 @@ __thermal_cooling_device_register(struct device_node *np,
{
struct thermal_cooling_device *cdev;
struct thermal_zone_device *pos = NULL;
- int ret;
+ int id, ret;
if (!ops || !ops->get_max_state || !ops->get_cur_state ||
!ops->set_cur_state)
@@ -901,6 +903,11 @@ __thermal_cooling_device_register(struct device_node *np,
if (ret < 0)
goto out_kfree_cdev;
cdev->id = ret;
+ id = ret;
+
+ ret = dev_set_name(&cdev->device, "cooling_device%d", cdev->id);
+ if (ret)
+ goto out_ida_remove;
cdev->type = kstrdup(type ? type : "", GFP_KERNEL);
if (!cdev->type) {
@@ -916,7 +923,6 @@ __thermal_cooling_device_register(struct device_node *np,
cdev->device.class = &thermal_class;
cdev->devdata = devdata;
thermal_cooling_device_setup_sysfs(cdev);
- dev_set_name(&cdev->device, "cooling_device%d", cdev->id);
ret = device_register(&cdev->device);
if (ret)
goto out_kfree_type;
@@ -941,8 +947,9 @@ __thermal_cooling_device_register(struct device_node *np,
out_kfree_type:
kfree(cdev->type);
put_device(&cdev->device);
+ cdev = NULL;
out_ida_remove:
- ida_simple_remove(&thermal_cdev_ida, cdev->id);
+ ida_simple_remove(&thermal_cdev_ida, id);
out_kfree_cdev:
kfree(cdev);
return ERR_PTR(ret);
@@ -1227,6 +1234,10 @@ thermal_zone_device_register(const char *type, int trips, int mask,
tz->id = id;
strlcpy(tz->type, type, sizeof(tz->type));
+ result = dev_set_name(&tz->device, "thermal_zone%d", tz->id);
+ if (result)
+ goto remove_id;
+
if (!ops->critical)
ops->critical = thermal_zone_device_critical;
@@ -1248,7 +1259,6 @@ thermal_zone_device_register(const char *type, int trips, int mask,
/* A new thermal zone needs to be updated anyway. */
atomic_set(&tz->need_update, 1);
- dev_set_name(&tz->device, "thermal_zone%d", tz->id);
result = device_register(&tz->device);
if (result)
goto release_device;
diff --git a/drivers/thermal/thermal_mmio.c b/drivers/thermal/thermal_mmio.c
index ded1dd0d4ef7..360b0dfdc3b0 100644
--- a/drivers/thermal/thermal_mmio.c
+++ b/drivers/thermal/thermal_mmio.c
@@ -34,7 +34,7 @@ static int thermal_mmio_get_temperature(void *private, int *temp)
return 0;
}
-static struct thermal_zone_of_device_ops thermal_mmio_ops = {
+static const struct thermal_zone_of_device_ops thermal_mmio_ops = {
.get_temp = thermal_mmio_get_temperature,
};
diff --git a/drivers/thermal/thermal_netlink.c b/drivers/thermal/thermal_netlink.c
index 1234dbe95895..a16dd4d5d710 100644
--- a/drivers/thermal/thermal_netlink.c
+++ b/drivers/thermal/thermal_netlink.c
@@ -121,7 +121,8 @@ static int thermal_genl_event_tz(struct param *p)
static int thermal_genl_event_tz_trip_up(struct param *p)
{
if (nla_put_u32(p->msg, THERMAL_GENL_ATTR_TZ_ID, p->tz_id) ||
- nla_put_u32(p->msg, THERMAL_GENL_ATTR_TZ_TRIP_ID, p->trip_id))
+ nla_put_u32(p->msg, THERMAL_GENL_ATTR_TZ_TRIP_ID, p->trip_id) ||
+ nla_put_u32(p->msg, THERMAL_GENL_ATTR_TZ_TEMP, p->temp))
return -EMSGSIZE;
return 0;
@@ -285,16 +286,16 @@ int thermal_notify_tz_disable(int tz_id)
return thermal_genl_send_event(THERMAL_GENL_EVENT_TZ_DISABLE, &p);
}
-int thermal_notify_tz_trip_down(int tz_id, int trip_id)
+int thermal_notify_tz_trip_down(int tz_id, int trip_id, int temp)
{
- struct param p = { .tz_id = tz_id, .trip_id = trip_id };
+ struct param p = { .tz_id = tz_id, .trip_id = trip_id, .temp = temp };
return thermal_genl_send_event(THERMAL_GENL_EVENT_TZ_TRIP_DOWN, &p);
}
-int thermal_notify_tz_trip_up(int tz_id, int trip_id)
+int thermal_notify_tz_trip_up(int tz_id, int trip_id, int temp)
{
- struct param p = { .tz_id = tz_id, .trip_id = trip_id };
+ struct param p = { .tz_id = tz_id, .trip_id = trip_id, .temp = temp };
return thermal_genl_send_event(THERMAL_GENL_EVENT_TZ_TRIP_UP, &p);
}
diff --git a/drivers/thermal/thermal_netlink.h b/drivers/thermal/thermal_netlink.h
index 828d1dddfa98..e554f76291f4 100644
--- a/drivers/thermal/thermal_netlink.h
+++ b/drivers/thermal/thermal_netlink.h
@@ -11,8 +11,8 @@ int thermal_notify_tz_create(int tz_id, const char *name);
int thermal_notify_tz_delete(int tz_id);
int thermal_notify_tz_enable(int tz_id);
int thermal_notify_tz_disable(int tz_id);
-int thermal_notify_tz_trip_down(int tz_id, int id);
-int thermal_notify_tz_trip_up(int tz_id, int id);
+int thermal_notify_tz_trip_down(int tz_id, int id, int temp);
+int thermal_notify_tz_trip_up(int tz_id, int id, int temp);
int thermal_notify_tz_trip_delete(int tz_id, int id);
int thermal_notify_tz_trip_add(int tz_id, int id, int type,
int temp, int hyst);
@@ -49,12 +49,12 @@ static inline int thermal_notify_tz_disable(int tz_id)
return 0;
}
-static inline int thermal_notify_tz_trip_down(int tz_id, int id)
+static inline int thermal_notify_tz_trip_down(int tz_id, int id, int temp)
{
return 0;
}
-static inline int thermal_notify_tz_trip_up(int tz_id, int id)
+static inline int thermal_notify_tz_trip_up(int tz_id, int id, int temp)
{
return 0;
}
diff --git a/drivers/thermal/thermal_sysfs.c b/drivers/thermal/thermal_sysfs.c
index 1c4aac8464a7..f154bada2906 100644
--- a/drivers/thermal/thermal_sysfs.c
+++ b/drivers/thermal/thermal_sysfs.c
@@ -610,6 +610,9 @@ cur_state_store(struct device *dev, struct device_attribute *attr,
unsigned long state;
int result;
+ dev_warn_once(&cdev->device,
+ "Setting cooling device state is deprecated\n");
+
if (sscanf(buf, "%ld\n", &state) != 1)
return -EINVAL;
diff --git a/drivers/thermal/uniphier_thermal.c b/drivers/thermal/uniphier_thermal.c
index bba2284412d3..4cae5561a2a3 100644
--- a/drivers/thermal/uniphier_thermal.c
+++ b/drivers/thermal/uniphier_thermal.c
@@ -358,6 +358,10 @@ static const struct of_device_id uniphier_tm_dt_ids[] = {
.compatible = "socionext,uniphier-pxs3-thermal",
.data = &uniphier_ld20_tm_data,
},
+ {
+ .compatible = "socionext,uniphier-nx1-thermal",
+ .data = &uniphier_ld20_tm_data,
+ },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, uniphier_tm_dt_ids);
diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c
index 0fb5e04191e2..4986edfbdf67 100644
--- a/drivers/thunderbolt/ctl.c
+++ b/drivers/thunderbolt/ctl.c
@@ -17,7 +17,7 @@
#define TB_CTL_RX_PKG_COUNT 10
-#define TB_CTL_RETRIES 1
+#define TB_CTL_RETRIES 4
/**
* struct tb_ctl - Thunderbolt control channel
diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c
index d66ea4d616fd..eff32499610f 100644
--- a/drivers/thunderbolt/xdomain.c
+++ b/drivers/thunderbolt/xdomain.c
@@ -730,7 +730,7 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
/* Full buffer size except new line and null termination */
get_modalias(svc, buf, PAGE_SIZE - 2);
- return sprintf(buf, "%s\n", buf);
+ return strlen(strcat(buf, "\n"));
}
static DEVICE_ATTR_RO(modalias);
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
index c911196ac893..8d0f07509ca7 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
@@ -296,7 +296,7 @@ static const struct sysrq_key_op sysrq_showregs_op = {
static void sysrq_handle_showstate(int key)
{
show_state();
- show_workqueue_state();
+ show_all_workqueues();
}
static const struct sysrq_key_op sysrq_showstate_op = {
.handler = sysrq_handle_showstate,
diff --git a/drivers/uio/uio_hv_generic.c b/drivers/uio/uio_hv_generic.c
index 652fe2547587..c08a6cfd119f 100644
--- a/drivers/uio/uio_hv_generic.c
+++ b/drivers/uio/uio_hv_generic.c
@@ -58,11 +58,11 @@ struct hv_uio_private_data {
atomic_t refcnt;
void *recv_buf;
- u32 recv_gpadl;
+ struct vmbus_gpadl recv_gpadl;
char recv_name[32]; /* "recv_4294967295" */
void *send_buf;
- u32 send_gpadl;
+ struct vmbus_gpadl send_gpadl;
char send_name[32];
};
@@ -179,15 +179,13 @@ hv_uio_new_channel(struct vmbus_channel *new_sc)
static void
hv_uio_cleanup(struct hv_device *dev, struct hv_uio_private_data *pdata)
{
- if (pdata->send_gpadl) {
- vmbus_teardown_gpadl(dev->channel, pdata->send_gpadl);
- pdata->send_gpadl = 0;
+ if (pdata->send_gpadl.gpadl_handle) {
+ vmbus_teardown_gpadl(dev->channel, &pdata->send_gpadl);
vfree(pdata->send_buf);
}
- if (pdata->recv_gpadl) {
- vmbus_teardown_gpadl(dev->channel, pdata->recv_gpadl);
- pdata->recv_gpadl = 0;
+ if (pdata->recv_gpadl.gpadl_handle) {
+ vmbus_teardown_gpadl(dev->channel, &pdata->recv_gpadl);
vfree(pdata->recv_buf);
}
}
@@ -303,7 +301,7 @@ hv_uio_probe(struct hv_device *dev,
/* put Global Physical Address Label in name */
snprintf(pdata->recv_name, sizeof(pdata->recv_name),
- "recv:%u", pdata->recv_gpadl);
+ "recv:%u", pdata->recv_gpadl.gpadl_handle);
pdata->info.mem[RECV_BUF_MAP].name = pdata->recv_name;
pdata->info.mem[RECV_BUF_MAP].addr
= (uintptr_t)pdata->recv_buf;
@@ -324,7 +322,7 @@ hv_uio_probe(struct hv_device *dev,
}
snprintf(pdata->send_name, sizeof(pdata->send_name),
- "send:%u", pdata->send_gpadl);
+ "send:%u", pdata->send_gpadl.gpadl_handle);
pdata->info.mem[SEND_BUF_MAP].name = pdata->send_name;
pdata->info.mem[SEND_BUF_MAP].addr
= (uintptr_t)pdata->send_buf;
diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
index 33ae03ac13a6..da17be1ef64e 100644
--- a/drivers/usb/atm/usbatm.c
+++ b/drivers/usb/atm/usbatm.c
@@ -1015,9 +1015,11 @@ int usbatm_usb_probe(struct usb_interface *intf, const struct usb_device_id *id,
int error = -ENOMEM;
int i, length;
unsigned int maxpacket, num_packets;
+ size_t size;
/* instance init */
- instance = kzalloc(sizeof(*instance) + sizeof(struct urb *) * (num_rcv_urbs + num_snd_urbs), GFP_KERNEL);
+ size = struct_size(instance, urbs, num_rcv_urbs + num_snd_urbs);
+ instance = kzalloc(size, GFP_KERNEL);
if (!instance)
return -ENOMEM;
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 2b18f5088ae4..a56f06368d14 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -514,7 +514,7 @@ int hw_device_reset(struct ci_hdrc *ci)
return 0;
}
-static irqreturn_t ci_irq(int irq, void *data)
+static irqreturn_t ci_irq_handler(int irq, void *data)
{
struct ci_hdrc *ci = data;
irqreturn_t ret = IRQ_NONE;
@@ -567,6 +567,15 @@ static irqreturn_t ci_irq(int irq, void *data)
return ret;
}
+static void ci_irq(struct ci_hdrc *ci)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ ci_irq_handler(ci->irq, ci);
+ local_irq_restore(flags);
+}
+
static int ci_cable_notifier(struct notifier_block *nb, unsigned long event,
void *ptr)
{
@@ -576,7 +585,7 @@ static int ci_cable_notifier(struct notifier_block *nb, unsigned long event,
cbl->connected = event;
cbl->changed = true;
- ci_irq(ci->irq, ci);
+ ci_irq(ci);
return NOTIFY_DONE;
}
@@ -617,7 +626,7 @@ static int ci_usb_role_switch_set(struct usb_role_switch *sw,
if (cable) {
cable->changed = true;
cable->connected = false;
- ci_irq(ci->irq, ci);
+ ci_irq(ci);
spin_unlock_irqrestore(&ci->lock, flags);
if (ci->wq && role != USB_ROLE_NONE)
flush_workqueue(ci->wq);
@@ -635,7 +644,7 @@ static int ci_usb_role_switch_set(struct usb_role_switch *sw,
if (cable) {
cable->changed = true;
cable->connected = true;
- ci_irq(ci->irq, ci);
+ ci_irq(ci);
}
spin_unlock_irqrestore(&ci->lock, flags);
pm_runtime_put_sync(ci->dev);
@@ -1174,7 +1183,7 @@ static int ci_hdrc_probe(struct platform_device *pdev)
}
}
- ret = devm_request_irq(dev, ci->irq, ci_irq, IRQF_SHARED,
+ ret = devm_request_irq(dev, ci->irq, ci_irq_handler, IRQF_SHARED,
ci->platdata->name, ci);
if (ret)
goto stop;
@@ -1295,11 +1304,11 @@ static void ci_extcon_wakeup_int(struct ci_hdrc *ci)
if (!IS_ERR(cable_id->edev) && ci->is_otg &&
(otgsc & OTGSC_IDIE) && (otgsc & OTGSC_IDIS))
- ci_irq(ci->irq, ci);
+ ci_irq(ci);
if (!IS_ERR(cable_vbus->edev) && ci->is_otg &&
(otgsc & OTGSC_BSVIE) && (otgsc & OTGSC_BSVIS))
- ci_irq(ci->irq, ci);
+ ci_irq(ci);
}
static int ci_controller_resume(struct device *dev)
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 8834ca613721..f9ca5010f65b 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -49,6 +49,8 @@ ctrl_endpt_in_desc = {
.wMaxPacketSize = cpu_to_le16(CTRL_PAYLOAD_MAX),
};
+static int reprime_dtd(struct ci_hdrc *ci, struct ci_hw_ep *hwep,
+ struct td_node *node);
/**
* hw_ep_bit: calculates the bit number
* @num: endpoint number
@@ -599,6 +601,12 @@ static int _hardware_enqueue(struct ci_hw_ep *hwep, struct ci_hw_req *hwreq)
prevlastnode->ptr->next = cpu_to_le32(next);
wmb();
+
+ if (ci->rev == CI_REVISION_22) {
+ if (!hw_read(ci, OP_ENDPTSTAT, BIT(n)))
+ reprime_dtd(ci, hwep, prevlastnode);
+ }
+
if (hw_read(ci, OP_ENDPTPRIME, BIT(n)))
goto done;
do {
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 7b2e2420ecae..b3ce7338cb6b 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1859,7 +1859,6 @@ static const struct usb_device_id acm_ids[] = {
{ NOKIA_PCSUITE_ACM_INFO(0x0071), }, /* Nokia N82 */
{ NOKIA_PCSUITE_ACM_INFO(0x04F0), }, /* Nokia N95 & N95-3 NAM */
{ NOKIA_PCSUITE_ACM_INFO(0x0070), }, /* Nokia N95 8GB */
- { NOKIA_PCSUITE_ACM_INFO(0x00e9), }, /* Nokia 5320 XpressMusic */
{ NOKIA_PCSUITE_ACM_INFO(0x0099), }, /* Nokia 6210 Navigator, RM-367 */
{ NOKIA_PCSUITE_ACM_INFO(0x0128), }, /* Nokia 6210 Navigator, RM-419 */
{ NOKIA_PCSUITE_ACM_INFO(0x008f), }, /* Nokia 6220 Classic */
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index fdf79bcf7eb0..7f2c83f299d3 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -911,7 +911,7 @@ static int wdm_wwan_port_tx(struct wwan_port *port, struct sk_buff *skb)
return rv;
}
-static struct wwan_port_ops wdm_wwan_port_ops = {
+static const struct wwan_port_ops wdm_wwan_port_ops = {
.start = wdm_wwan_port_start,
.stop = wdm_wwan_port_stop,
.tx = wdm_wwan_port_tx,
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index b199eb65f378..16b1fd9dc60c 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -986,7 +986,7 @@ int usb_get_bos_descriptor(struct usb_device *dev)
__u8 cap_type;
int ret;
- bos = kzalloc(sizeof(struct usb_bos_descriptor), GFP_KERNEL);
+ bos = kzalloc(sizeof(*bos), GFP_KERNEL);
if (!bos)
return -ENOMEM;
@@ -1007,7 +1007,7 @@ int usb_get_bos_descriptor(struct usb_device *dev)
if (total_len < length)
return -EINVAL;
- dev->bos = kzalloc(sizeof(struct usb_host_bos), GFP_KERNEL);
+ dev->bos = kzalloc(sizeof(*dev->bos), GFP_KERNEL);
if (!dev->bos)
return -ENOMEM;
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 9618ba622a2d..fa66e6e58792 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -32,6 +32,7 @@
#include <linux/usb.h>
#include <linux/usbdevice_fs.h>
#include <linux/usb/hcd.h> /* for usbcore internals */
+#include <linux/usb/quirks.h>
#include <linux/cdev.h>
#include <linux/notifier.h>
#include <linux/security.h>
@@ -1102,14 +1103,55 @@ static int usbdev_release(struct inode *inode, struct file *file)
return 0;
}
+static void usbfs_blocking_completion(struct urb *urb)
+{
+ complete((struct completion *) urb->context);
+}
+
+/*
+ * Much like usb_start_wait_urb, but returns status separately from
+ * actual_length and uses a killable wait.
+ */
+static int usbfs_start_wait_urb(struct urb *urb, int timeout,
+ unsigned int *actlen)
+{
+ DECLARE_COMPLETION_ONSTACK(ctx);
+ unsigned long expire;
+ int rc;
+
+ urb->context = &ctx;
+ urb->complete = usbfs_blocking_completion;
+ *actlen = 0;
+ rc = usb_submit_urb(urb, GFP_KERNEL);
+ if (unlikely(rc))
+ return rc;
+
+ expire = (timeout ? msecs_to_jiffies(timeout) : MAX_SCHEDULE_TIMEOUT);
+ rc = wait_for_completion_killable_timeout(&ctx, expire);
+ if (rc <= 0) {
+ usb_kill_urb(urb);
+ *actlen = urb->actual_length;
+ if (urb->status != -ENOENT)
+ ; /* Completed before it was killed */
+ else if (rc < 0)
+ return -EINTR;
+ else
+ return -ETIMEDOUT;
+ }
+ *actlen = urb->actual_length;
+ return urb->status;
+}
+
static int do_proc_control(struct usb_dev_state *ps,
struct usbdevfs_ctrltransfer *ctrl)
{
struct usb_device *dev = ps->dev;
unsigned int tmo;
unsigned char *tbuf;
- unsigned wLength;
+ unsigned int wLength, actlen;
int i, pipe, ret;
+ struct urb *urb = NULL;
+ struct usb_ctrlrequest *dr = NULL;
ret = check_ctrlrecip(ps, ctrl->bRequestType, ctrl->bRequest,
ctrl->wIndex);
@@ -1122,51 +1164,63 @@ static int do_proc_control(struct usb_dev_state *ps,
sizeof(struct usb_ctrlrequest));
if (ret)
return ret;
+
+ ret = -ENOMEM;
tbuf = (unsigned char *)__get_free_page(GFP_KERNEL);
- if (!tbuf) {
- ret = -ENOMEM;
+ if (!tbuf)
goto done;
- }
+ urb = usb_alloc_urb(0, GFP_NOIO);
+ if (!urb)
+ goto done;
+ dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO);
+ if (!dr)
+ goto done;
+
+ dr->bRequestType = ctrl->bRequestType;
+ dr->bRequest = ctrl->bRequest;
+ dr->wValue = cpu_to_le16(ctrl->wValue);
+ dr->wIndex = cpu_to_le16(ctrl->wIndex);
+ dr->wLength = cpu_to_le16(ctrl->wLength);
+
tmo = ctrl->timeout;
snoop(&dev->dev, "control urb: bRequestType=%02x "
"bRequest=%02x wValue=%04x "
"wIndex=%04x wLength=%04x\n",
ctrl->bRequestType, ctrl->bRequest, ctrl->wValue,
ctrl->wIndex, ctrl->wLength);
- if ((ctrl->bRequestType & USB_DIR_IN) && ctrl->wLength) {
+
+ if ((ctrl->bRequestType & USB_DIR_IN) && wLength) {
pipe = usb_rcvctrlpipe(dev, 0);
- snoop_urb(dev, NULL, pipe, ctrl->wLength, tmo, SUBMIT, NULL, 0);
+ usb_fill_control_urb(urb, dev, pipe, (unsigned char *) dr, tbuf,
+ wLength, NULL, NULL);
+ snoop_urb(dev, NULL, pipe, wLength, tmo, SUBMIT, NULL, 0);
usb_unlock_device(dev);
- i = usb_control_msg(dev, pipe, ctrl->bRequest,
- ctrl->bRequestType, ctrl->wValue, ctrl->wIndex,
- tbuf, ctrl->wLength, tmo);
+ i = usbfs_start_wait_urb(urb, tmo, &actlen);
usb_lock_device(dev);
- snoop_urb(dev, NULL, pipe, max(i, 0), min(i, 0), COMPLETE,
- tbuf, max(i, 0));
- if ((i > 0) && ctrl->wLength) {
- if (copy_to_user(ctrl->data, tbuf, i)) {
+ snoop_urb(dev, NULL, pipe, actlen, i, COMPLETE, tbuf, actlen);
+ if (!i && actlen) {
+ if (copy_to_user(ctrl->data, tbuf, actlen)) {
ret = -EFAULT;
- goto done;
+ goto recv_fault;
}
}
} else {
- if (ctrl->wLength) {
- if (copy_from_user(tbuf, ctrl->data, ctrl->wLength)) {
+ if (wLength) {
+ if (copy_from_user(tbuf, ctrl->data, wLength)) {
ret = -EFAULT;
goto done;
}
}
pipe = usb_sndctrlpipe(dev, 0);
- snoop_urb(dev, NULL, pipe, ctrl->wLength, tmo, SUBMIT,
- tbuf, ctrl->wLength);
+ usb_fill_control_urb(urb, dev, pipe, (unsigned char *) dr, tbuf,
+ wLength, NULL, NULL);
+ snoop_urb(dev, NULL, pipe, wLength, tmo, SUBMIT, tbuf, wLength);
usb_unlock_device(dev);
- i = usb_control_msg(dev, pipe, ctrl->bRequest,
- ctrl->bRequestType, ctrl->wValue, ctrl->wIndex,
- tbuf, ctrl->wLength, tmo);
+ i = usbfs_start_wait_urb(urb, tmo, &actlen);
usb_lock_device(dev);
- snoop_urb(dev, NULL, pipe, max(i, 0), min(i, 0), COMPLETE, NULL, 0);
+ snoop_urb(dev, NULL, pipe, actlen, i, COMPLETE, NULL, 0);
}
if (i < 0 && i != -EPIPE) {
dev_printk(KERN_DEBUG, &dev->dev, "usbfs: USBDEVFS_CONTROL "
@@ -1174,8 +1228,15 @@ static int do_proc_control(struct usb_dev_state *ps,
current->comm, ctrl->bRequestType, ctrl->bRequest,
ctrl->wLength, i);
}
- ret = i;
+ ret = (i < 0 ? i : actlen);
+
+ recv_fault:
+ /* Linger a bit, prior to the next control message. */
+ if (dev->quirks & USB_QUIRK_DELAY_CTRL_MSG)
+ msleep(200);
done:
+ kfree(dr);
+ usb_free_urb(urb);
free_page((unsigned long) tbuf);
usbfs_decrease_memory_usage(PAGE_SIZE + sizeof(struct urb) +
sizeof(struct usb_ctrlrequest));
@@ -1195,10 +1256,11 @@ static int do_proc_bulk(struct usb_dev_state *ps,
struct usbdevfs_bulktransfer *bulk)
{
struct usb_device *dev = ps->dev;
- unsigned int tmo, len1, pipe;
- int len2;
+ unsigned int tmo, len1, len2, pipe;
unsigned char *tbuf;
int i, ret;
+ struct urb *urb = NULL;
+ struct usb_host_endpoint *ep;
ret = findintfep(ps->dev, bulk->ep);
if (ret < 0)
@@ -1206,14 +1268,17 @@ static int do_proc_bulk(struct usb_dev_state *ps,
ret = checkintf(ps, ret);
if (ret)
return ret;
+
+ len1 = bulk->len;
+ if (len1 < 0 || len1 >= (INT_MAX - sizeof(struct urb)))
+ return -EINVAL;
+
if (bulk->ep & USB_DIR_IN)
pipe = usb_rcvbulkpipe(dev, bulk->ep & 0x7f);
else
pipe = usb_sndbulkpipe(dev, bulk->ep & 0x7f);
- if (!usb_maxpacket(dev, pipe, !(bulk->ep & USB_DIR_IN)))
- return -EINVAL;
- len1 = bulk->len;
- if (len1 >= (INT_MAX - sizeof(struct urb)))
+ ep = usb_pipe_endpoint(dev, pipe);
+ if (!ep || !usb_endpoint_maxp(&ep->desc))
return -EINVAL;
ret = usbfs_increase_memory_usage(len1 + sizeof(struct urb));
if (ret)
@@ -1223,17 +1288,29 @@ static int do_proc_bulk(struct usb_dev_state *ps,
* len1 can be almost arbitrarily large. Don't WARN if it's
* too big, just fail the request.
*/
+ ret = -ENOMEM;
tbuf = kmalloc(len1, GFP_KERNEL | __GFP_NOWARN);
- if (!tbuf) {
- ret = -ENOMEM;
+ if (!tbuf)
+ goto done;
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb)
goto done;
+
+ if ((ep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
+ USB_ENDPOINT_XFER_INT) {
+ pipe = (pipe & ~(3 << 30)) | (PIPE_INTERRUPT << 30);
+ usb_fill_int_urb(urb, dev, pipe, tbuf, len1,
+ NULL, NULL, ep->desc.bInterval);
+ } else {
+ usb_fill_bulk_urb(urb, dev, pipe, tbuf, len1, NULL, NULL);
}
+
tmo = bulk->timeout;
if (bulk->ep & 0x80) {
snoop_urb(dev, NULL, pipe, len1, tmo, SUBMIT, NULL, 0);
usb_unlock_device(dev);
- i = usb_bulk_msg(dev, pipe, tbuf, len1, &len2, tmo);
+ i = usbfs_start_wait_urb(urb, tmo, &len2);
usb_lock_device(dev);
snoop_urb(dev, NULL, pipe, len2, i, COMPLETE, tbuf, len2);
@@ -1253,12 +1330,13 @@ static int do_proc_bulk(struct usb_dev_state *ps,
snoop_urb(dev, NULL, pipe, len1, tmo, SUBMIT, tbuf, len1);
usb_unlock_device(dev);
- i = usb_bulk_msg(dev, pipe, tbuf, len1, &len2, tmo);
+ i = usbfs_start_wait_urb(urb, tmo, &len2);
usb_lock_device(dev);
snoop_urb(dev, NULL, pipe, len2, i, COMPLETE, NULL, 0);
}
ret = (i < 0 ? i : len2);
done:
+ usb_free_urb(urb);
kfree(tbuf);
usbfs_decrease_memory_usage(len1 + sizeof(struct urb));
return ret;
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 7ee6e4cc0d89..a3311e937847 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -2732,14 +2732,14 @@ static int usb_hcd_request_irqs(struct usb_hcd *hcd,
hcd->irq = irqnum;
dev_info(hcd->self.controller, "irq %d, %s 0x%08llx\n", irqnum,
(hcd->driver->flags & HCD_MEMORY) ?
- "io mem" : "io base",
- (unsigned long long)hcd->rsrc_start);
+ "io mem" : "io port",
+ (unsigned long long)hcd->rsrc_start);
} else {
hcd->irq = 0;
if (hcd->rsrc_start)
dev_info(hcd->self.controller, "%s 0x%08llx\n",
(hcd->driver->flags & HCD_MEMORY) ?
- "io mem" : "io base",
+ "io mem" : "io port",
(unsigned long long)hcd->rsrc_start);
}
return 0;
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index cb9059a8444b..37185eb66ae4 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -238,11 +238,14 @@ enum dwc2_ep0_state {
/**
* struct dwc2_core_params - Parameters for configuring the core
*
- * @otg_cap: Specifies the OTG capabilities.
- * 0 - HNP and SRP capable
- * 1 - SRP Only capable
- * 2 - No HNP/SRP capable (always available)
- * Defaults to best available option (0, 1, then 2)
+ * @otg_caps: Specifies the OTG capabilities. OTG caps from the platform parameters,
+ * used to setup the:
+ * - HNP and SRP capable
+ * - SRP Only capable
+ * - No HNP/SRP capable (always available)
+ * Defaults to best available option
+ * - OTG revision number the device is compliant with, in binary-coded
+ * decimal (i.e. 2.0 is 0200H). (see struct usb_otg_caps)
* @host_dma: Specifies whether to use slave or DMA mode for accessing
* the data FIFOs. The driver will automatically detect the
* value for this parameter if none is specified.
@@ -453,11 +456,7 @@ enum dwc2_ep0_state {
* default described above.
*/
struct dwc2_core_params {
- u8 otg_cap;
-#define DWC2_CAP_PARAM_HNP_SRP_CAPABLE 0
-#define DWC2_CAP_PARAM_SRP_ONLY_CAPABLE 1
-#define DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE 2
-
+ struct usb_otg_caps otg_caps;
u8 phy_type;
#define DWC2_PHY_TYPE_PARAM_FS 0
#define DWC2_PHY_TYPE_PARAM_UTMI 1
diff --git a/drivers/usb/dwc2/debugfs.c b/drivers/usb/dwc2/debugfs.c
index f13eed4231e1..1d72ece9cfe4 100644
--- a/drivers/usb/dwc2/debugfs.c
+++ b/drivers/usb/dwc2/debugfs.c
@@ -670,7 +670,9 @@ static int params_show(struct seq_file *seq, void *v)
struct dwc2_core_params *p = &hsotg->params;
int i;
- print_param(seq, p, otg_cap);
+ print_param(seq, p, otg_caps.hnp_support);
+ print_param(seq, p, otg_caps.srp_support);
+ print_param(seq, p, otg_caps.otg_rev);
print_param(seq, p, dma_desc_enable);
print_param(seq, p, dma_desc_fs_enable);
print_param(seq, p, speed);
diff --git a/drivers/usb/dwc2/drd.c b/drivers/usb/dwc2/drd.c
index 2d4176f5788e..aa6eb76f64dd 100644
--- a/drivers/usb/dwc2/drd.c
+++ b/drivers/usb/dwc2/drd.c
@@ -7,6 +7,7 @@
* Author(s): Amelie Delaunay <amelie.delaunay@st.com>
*/
+#include <linux/clk.h>
#include <linux/iopoll.h>
#include <linux/platform_device.h>
#include <linux/usb/role.h>
@@ -25,9 +26,9 @@ static void dwc2_ovr_init(struct dwc2_hsotg *hsotg)
gotgctl &= ~(GOTGCTL_BVALOVAL | GOTGCTL_AVALOVAL | GOTGCTL_VBVALOVAL);
dwc2_writel(hsotg, gotgctl, GOTGCTL);
- dwc2_force_mode(hsotg, false);
-
spin_unlock_irqrestore(&hsotg->lock, flags);
+
+ dwc2_force_mode(hsotg, (hsotg->dr_mode == USB_DR_MODE_HOST));
}
static int dwc2_ovr_avalid(struct dwc2_hsotg *hsotg, bool valid)
@@ -39,6 +40,7 @@ static int dwc2_ovr_avalid(struct dwc2_hsotg *hsotg, bool valid)
(!valid && !(gotgctl & GOTGCTL_ASESVLD)))
return -EALREADY;
+ gotgctl &= ~GOTGCTL_BVALOVAL;
if (valid)
gotgctl |= GOTGCTL_AVALOVAL | GOTGCTL_VBVALOVAL;
else
@@ -57,6 +59,7 @@ static int dwc2_ovr_bvalid(struct dwc2_hsotg *hsotg, bool valid)
(!valid && !(gotgctl & GOTGCTL_BSESVLD)))
return -EALREADY;
+ gotgctl &= ~GOTGCTL_AVALOVAL;
if (valid)
gotgctl |= GOTGCTL_BVALOVAL | GOTGCTL_VBVALOVAL;
else
@@ -86,6 +89,20 @@ static int dwc2_drd_role_sw_set(struct usb_role_switch *sw, enum usb_role role)
}
#endif
+ /*
+ * In case of USB_DR_MODE_PERIPHERAL, clock is disabled at the end of
+ * the probe and enabled on udc_start.
+ * If role-switch set is called before the udc_start, we need to enable
+ * the clock to read/write GOTGCTL and GUSBCFG registers to override
+ * mode and sessions. It is the case if cable is plugged at boot.
+ */
+ if (!hsotg->ll_hw_enabled && hsotg->clk) {
+ int ret = clk_prepare_enable(hsotg->clk);
+
+ if (ret)
+ return ret;
+ }
+
spin_lock_irqsave(&hsotg->lock, flags);
if (role == USB_ROLE_HOST) {
@@ -110,6 +127,9 @@ static int dwc2_drd_role_sw_set(struct usb_role_switch *sw, enum usb_role role)
/* This will raise a Connector ID Status Change Interrupt */
dwc2_force_mode(hsotg, role == USB_ROLE_HOST);
+ if (!hsotg->ll_hw_enabled && hsotg->clk)
+ clk_disable_unprepare(hsotg->clk);
+
dev_dbg(hsotg->dev, "%s-session valid\n",
role == USB_ROLE_NONE ? "No" :
role == USB_ROLE_HOST ? "A" : "B");
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index 11d85a6e0b0d..4ab4a1d5062b 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -4966,6 +4966,7 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg)
hsotg->gadget.max_speed = USB_SPEED_HIGH;
hsotg->gadget.ops = &dwc2_hsotg_gadget_ops;
hsotg->gadget.name = dev_name(dev);
+ hsotg->gadget.otg_caps = &hsotg->params.otg_caps;
hsotg->remote_wakeup_allowed = 0;
if (hsotg->params.lpm)
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index a215ec9e172e..13c779a28e94 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -138,19 +138,15 @@ static void dwc2_gusbcfg_init(struct dwc2_hsotg *hsotg)
switch (hsotg->hw_params.op_mode) {
case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
- if (hsotg->params.otg_cap ==
- DWC2_CAP_PARAM_HNP_SRP_CAPABLE)
+ if (hsotg->params.otg_caps.hnp_support &&
+ hsotg->params.otg_caps.srp_support)
usbcfg |= GUSBCFG_HNPCAP;
- if (hsotg->params.otg_cap !=
- DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE)
- usbcfg |= GUSBCFG_SRPCAP;
- break;
+ fallthrough;
case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
- if (hsotg->params.otg_cap !=
- DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE)
+ if (hsotg->params.otg_caps.srp_support)
usbcfg |= GUSBCFG_SRPCAP;
break;
diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c
index 59e119345994..d300ae3d9274 100644
--- a/drivers/usb/dwc2/params.c
+++ b/drivers/usb/dwc2/params.c
@@ -36,6 +36,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_device.h>
+#include <linux/usb/of.h>
#include "core.h"
@@ -53,7 +54,8 @@ static void dwc2_set_his_params(struct dwc2_hsotg *hsotg)
{
struct dwc2_core_params *p = &hsotg->params;
- p->otg_cap = DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE;
+ p->otg_caps.hnp_support = false;
+ p->otg_caps.srp_support = false;
p->speed = DWC2_SPEED_PARAM_HIGH;
p->host_rx_fifo_size = 512;
p->host_nperio_tx_fifo_size = 512;
@@ -84,7 +86,8 @@ static void dwc2_set_rk_params(struct dwc2_hsotg *hsotg)
{
struct dwc2_core_params *p = &hsotg->params;
- p->otg_cap = DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE;
+ p->otg_caps.hnp_support = false;
+ p->otg_caps.srp_support = false;
p->host_rx_fifo_size = 525;
p->host_nperio_tx_fifo_size = 128;
p->host_perio_tx_fifo_size = 256;
@@ -97,7 +100,8 @@ static void dwc2_set_ltq_params(struct dwc2_hsotg *hsotg)
{
struct dwc2_core_params *p = &hsotg->params;
- p->otg_cap = 2;
+ p->otg_caps.hnp_support = false;
+ p->otg_caps.srp_support = false;
p->host_rx_fifo_size = 288;
p->host_nperio_tx_fifo_size = 128;
p->host_perio_tx_fifo_size = 96;
@@ -111,7 +115,8 @@ static void dwc2_set_amlogic_params(struct dwc2_hsotg *hsotg)
{
struct dwc2_core_params *p = &hsotg->params;
- p->otg_cap = DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE;
+ p->otg_caps.hnp_support = false;
+ p->otg_caps.srp_support = false;
p->speed = DWC2_SPEED_PARAM_HIGH;
p->host_rx_fifo_size = 512;
p->host_nperio_tx_fifo_size = 500;
@@ -144,7 +149,8 @@ static void dwc2_set_stm32f4x9_fsotg_params(struct dwc2_hsotg *hsotg)
{
struct dwc2_core_params *p = &hsotg->params;
- p->otg_cap = DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE;
+ p->otg_caps.hnp_support = false;
+ p->otg_caps.srp_support = false;
p->speed = DWC2_SPEED_PARAM_FULL;
p->host_rx_fifo_size = 128;
p->host_nperio_tx_fifo_size = 96;
@@ -168,7 +174,9 @@ static void dwc2_set_stm32mp15_fsotg_params(struct dwc2_hsotg *hsotg)
{
struct dwc2_core_params *p = &hsotg->params;
- p->otg_cap = DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE;
+ p->otg_caps.hnp_support = false;
+ p->otg_caps.srp_support = false;
+ p->otg_caps.otg_rev = 0x200;
p->speed = DWC2_SPEED_PARAM_FULL;
p->host_rx_fifo_size = 128;
p->host_nperio_tx_fifo_size = 96;
@@ -188,7 +196,9 @@ static void dwc2_set_stm32mp15_hsotg_params(struct dwc2_hsotg *hsotg)
{
struct dwc2_core_params *p = &hsotg->params;
- p->otg_cap = DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE;
+ p->otg_caps.hnp_support = false;
+ p->otg_caps.srp_support = false;
+ p->otg_caps.otg_rev = 0x200;
p->activate_stm_id_vb_detection = !device_property_read_bool(hsotg->dev, "usb-role-switch");
p->host_rx_fifo_size = 440;
p->host_nperio_tx_fifo_size = 256;
@@ -241,23 +251,22 @@ MODULE_DEVICE_TABLE(acpi, dwc2_acpi_match);
static void dwc2_set_param_otg_cap(struct dwc2_hsotg *hsotg)
{
- u8 val;
-
switch (hsotg->hw_params.op_mode) {
case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
- val = DWC2_CAP_PARAM_HNP_SRP_CAPABLE;
+ hsotg->params.otg_caps.hnp_support = true;
+ hsotg->params.otg_caps.srp_support = true;
break;
case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
- val = DWC2_CAP_PARAM_SRP_ONLY_CAPABLE;
+ hsotg->params.otg_caps.hnp_support = false;
+ hsotg->params.otg_caps.srp_support = true;
break;
default:
- val = DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE;
+ hsotg->params.otg_caps.hnp_support = false;
+ hsotg->params.otg_caps.srp_support = false;
break;
}
-
- hsotg->params.otg_cap = val;
}
static void dwc2_set_param_phy_type(struct dwc2_hsotg *hsotg)
@@ -463,6 +472,8 @@ static void dwc2_get_device_properties(struct dwc2_hsotg *hsotg)
&p->g_tx_fifo_size[1],
num);
}
+
+ of_usb_update_otg_caps(hsotg->dev->of_node, &p->otg_caps);
}
if (of_find_property(hsotg->dev->of_node, "disable-over-current", NULL))
@@ -473,29 +484,27 @@ static void dwc2_check_param_otg_cap(struct dwc2_hsotg *hsotg)
{
int valid = 1;
- switch (hsotg->params.otg_cap) {
- case DWC2_CAP_PARAM_HNP_SRP_CAPABLE:
+ if (hsotg->params.otg_caps.hnp_support && hsotg->params.otg_caps.srp_support) {
+ /* check HNP && SRP capable */
if (hsotg->hw_params.op_mode != GHWCFG2_OP_MODE_HNP_SRP_CAPABLE)
valid = 0;
- break;
- case DWC2_CAP_PARAM_SRP_ONLY_CAPABLE:
- switch (hsotg->hw_params.op_mode) {
- case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
- case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
- case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
- case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
- break;
- default:
- valid = 0;
- break;
+ } else if (!hsotg->params.otg_caps.hnp_support) {
+ /* check SRP only capable */
+ if (hsotg->params.otg_caps.srp_support) {
+ switch (hsotg->hw_params.op_mode) {
+ case GHWCFG2_OP_MODE_HNP_SRP_CAPABLE:
+ case GHWCFG2_OP_MODE_SRP_ONLY_CAPABLE:
+ case GHWCFG2_OP_MODE_SRP_CAPABLE_DEVICE:
+ case GHWCFG2_OP_MODE_SRP_CAPABLE_HOST:
+ break;
+ default:
+ valid = 0;
+ break;
+ }
}
- break;
- case DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE:
- /* always valid */
- break;
- default:
+ /* else: NO HNP && NO SRP capable: always valid */
+ } else {
valid = 0;
- break;
}
if (!valid)
diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig
index 66b1454c4db2..c483f28b695d 100644
--- a/drivers/usb/dwc3/Kconfig
+++ b/drivers/usb/dwc3/Kconfig
@@ -66,12 +66,13 @@ config USB_DWC3_OMAP
Say 'Y' or 'M' here if you have one such device
config USB_DWC3_EXYNOS
- tristate "Samsung Exynos Platform"
+ tristate "Samsung Exynos SoC Platform"
depends on (ARCH_EXYNOS || COMPILE_TEST) && OF
default USB_DWC3
help
- Recent Exynos5 SoCs ship with one DesignWare Core USB3 IP inside,
- say 'Y' or 'M' if you have one such device.
+ Recent Samsung Exynos SoCs (Exynos5250, Exynos5410, Exynos542x,
+ Exynos5800, Exynos5433, Exynos7) ship with one DesignWare Core USB3
+ IP inside, say 'Y' or 'M' if you have one such device.
config USB_DWC3_PCI
tristate "PCIe-based Platforms"
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 0104a80b185e..643239d7d370 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -26,6 +26,7 @@
#include <linux/acpi.h>
#include <linux/pinctrl/consumer.h>
#include <linux/reset.h>
+#include <linux/bitfield.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
@@ -336,6 +337,29 @@ static void dwc3_frame_length_adjustment(struct dwc3 *dwc)
}
/**
+ * dwc3_ref_clk_period - Reference clock period configuration
+ * Default reference clock period depends on hardware
+ * configuration. For systems with reference clock that differs
+ * from the default, this will set clock period in DWC3_GUCTL
+ * register.
+ * @dwc: Pointer to our controller context structure
+ * @ref_clk_per: reference clock period in ns
+ */
+static void dwc3_ref_clk_period(struct dwc3 *dwc)
+{
+ u32 reg;
+
+ if (dwc->ref_clk_per == 0)
+ return;
+
+ reg = dwc3_readl(dwc->regs, DWC3_GUCTL);
+ reg &= ~DWC3_GUCTL_REFCLKPER_MASK;
+ reg |= FIELD_PREP(DWC3_GUCTL_REFCLKPER_MASK, dwc->ref_clk_per);
+ dwc3_writel(dwc->regs, DWC3_GUCTL, reg);
+}
+
+
+/**
* dwc3_free_one_event_buffer - Frees one event buffer
* @dwc: Pointer to our controller context structure
* @evt: Pointer to event buffer to be freed
@@ -1007,6 +1031,9 @@ static int dwc3_core_init(struct dwc3 *dwc)
/* Adjust Frame Length */
dwc3_frame_length_adjustment(dwc);
+ /* Adjust Reference Clock Period */
+ dwc3_ref_clk_period(dwc);
+
dwc3_set_incr_burst_type(dwc);
usb_phy_set_suspend(dwc->usb2_phy, 0);
@@ -1389,6 +1416,8 @@ static void dwc3_get_properties(struct dwc3 *dwc)
&dwc->hsphy_interface);
device_property_read_u32(dev, "snps,quirk-frame-length-adjustment",
&dwc->fladj);
+ device_property_read_u32(dev, "snps,ref-clock-period-ns",
+ &dwc->ref_clk_per);
dwc->dis_metastability_quirk = device_property_read_bool(dev,
"snps,dis_metastability_quirk");
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 5612bfdf37da..620c8d3914d7 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -387,6 +387,10 @@
#define DWC3_GFLADJ_30MHZ_SDBND_SEL BIT(7)
#define DWC3_GFLADJ_30MHZ_MASK 0x3f
+/* Global User Control Register*/
+#define DWC3_GUCTL_REFCLKPER_MASK 0xffc00000
+#define DWC3_GUCTL_REFCLKPER_SEL 22
+
/* Global User Control Register 2 */
#define DWC3_GUCTL2_RST_ACTBITLATER BIT(14)
@@ -711,21 +715,22 @@ struct dwc3_ep {
u32 saved_state;
unsigned int flags;
-#define DWC3_EP_ENABLED BIT(0)
-#define DWC3_EP_STALL BIT(1)
-#define DWC3_EP_WEDGE BIT(2)
-#define DWC3_EP_TRANSFER_STARTED BIT(3)
-#define DWC3_EP_END_TRANSFER_PENDING BIT(4)
-#define DWC3_EP_PENDING_REQUEST BIT(5)
-#define DWC3_EP_DELAY_START BIT(6)
+#define DWC3_EP_ENABLED BIT(0)
+#define DWC3_EP_STALL BIT(1)
+#define DWC3_EP_WEDGE BIT(2)
+#define DWC3_EP_TRANSFER_STARTED BIT(3)
+#define DWC3_EP_END_TRANSFER_PENDING BIT(4)
+#define DWC3_EP_PENDING_REQUEST BIT(5)
+#define DWC3_EP_DELAY_START BIT(6)
#define DWC3_EP_WAIT_TRANSFER_COMPLETE BIT(7)
#define DWC3_EP_IGNORE_NEXT_NOSTREAM BIT(8)
#define DWC3_EP_FORCE_RESTART_STREAM BIT(9)
#define DWC3_EP_FIRST_STREAM_PRIMED BIT(10)
#define DWC3_EP_PENDING_CLEAR_STALL BIT(11)
+#define DWC3_EP_TXFIFO_RESIZED BIT(12)
/* This last one is specific to EP0 */
-#define DWC3_EP0_DIR_IN BIT(31)
+#define DWC3_EP0_DIR_IN BIT(31)
/*
* IMPORTANT: we *know* we have 256 TRBs in our @trb_pool, so we will
@@ -970,6 +975,7 @@ struct dwc3_scratchpad_array {
* @regs: base address for our registers
* @regs_size: address space size
* @fladj: frame length adjustment
+ * @ref_clk_per: reference clock period configuration
* @irq_gadget: peripheral controller's IRQ number
* @otg_irq: IRQ number for OTG IRQs
* @current_otg_role: current role of operation while using the OTG block
@@ -1027,6 +1033,7 @@ struct dwc3_scratchpad_array {
* @tx_fifo_resize_max_num: max number of fifos allocated during txfifo resize
* @hsphy_interface: "utmi" or "ulpi"
* @connected: true when we're connected to a host, false otherwise
+ * @softconnect: true when gadget connect is called, false when disconnect runs
* @delayed_status: true when gadget driver asks for delayed status
* @ep0_bounced: true when we used bounce buffer
* @ep0_expect_in: true when we expect a DATA IN transfer
@@ -1149,6 +1156,7 @@ struct dwc3 {
struct power_supply *usb_psy;
u32 fladj;
+ u32 ref_clk_per;
u32 irq_gadget;
u32 otg_irq;
u32 current_otg_role;
@@ -1246,6 +1254,7 @@ struct dwc3 {
const char *hsphy_interface;
unsigned connected:1;
+ unsigned softconnect:1;
unsigned delayed_status:1;
unsigned ep0_bounced:1;
unsigned ep0_expect_in:1;
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 4519d06c9ca2..23de2a5a40d6 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -702,6 +702,7 @@ void dwc3_gadget_clear_tx_fifos(struct dwc3 *dwc)
DWC31_GTXFIFOSIZ_TXFRAMNUM;
dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(num >> 1), size);
+ dep->flags &= ~DWC3_EP_TXFIFO_RESIZED;
}
dwc->num_ep_resized = 0;
}
@@ -747,6 +748,10 @@ static int dwc3_gadget_resize_tx_fifos(struct dwc3_ep *dep)
if (!usb_endpoint_dir_in(dep->endpoint.desc) || dep->number <= 1)
return 0;
+ /* bail if already resized */
+ if (dep->flags & DWC3_EP_TXFIFO_RESIZED)
+ return 0;
+
ram1_depth = DWC3_RAM1_DEPTH(dwc->hwparams.hwparams7);
if ((dep->endpoint.maxburst > 1 &&
@@ -807,6 +812,7 @@ static int dwc3_gadget_resize_tx_fifos(struct dwc3_ep *dep)
}
dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(dep->number >> 1), fifo_size);
+ dep->flags |= DWC3_EP_TXFIFO_RESIZED;
dwc->num_ep_resized++;
return 0;
@@ -995,7 +1001,7 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
dep->stream_capable = false;
dep->type = 0;
- dep->flags = 0;
+ dep->flags &= DWC3_EP_TXFIFO_RESIZED;
return 0;
}
@@ -1813,7 +1819,7 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
struct dwc3 *dwc = dep->dwc;
if (!dep->endpoint.desc || !dwc->pullups_connected || !dwc->connected) {
- dev_err(dwc->dev, "%s: can't queue to disabled endpoint\n",
+ dev_dbg(dwc->dev, "%s: can't queue to disabled endpoint\n",
dep->name);
return -ESHUTDOWN;
}
@@ -2418,7 +2424,7 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
int ret;
is_on = !!is_on;
-
+ dwc->softconnect = is_on;
/*
* Per databook, when we want to stop the gadget, if a control transfer
* is still in process, complete it and get the core into setup phase.
@@ -4352,7 +4358,7 @@ int dwc3_gadget_resume(struct dwc3 *dwc)
{
int ret;
- if (!dwc->gadget_driver)
+ if (!dwc->gadget_driver || !dwc->softconnect)
return 0;
ret = __dwc3_gadget_start(dwc);
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 477e72a1d11e..36c611d1d8d0 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -73,6 +73,11 @@ static inline struct config_usb_cfg *to_config_usb_cfg(struct config_item *item)
group);
}
+static inline struct gadget_info *cfg_to_gadget_info(struct config_usb_cfg *cfg)
+{
+ return container_of(cfg->c.cdev, struct gadget_info, cdev);
+}
+
struct gadget_strings {
struct usb_gadget_strings stringtab_dev;
struct usb_string strings[USB_GADGET_FIRST_AVAIL_IDX];
@@ -413,8 +418,7 @@ static int config_usb_cfg_link(
struct config_item *usb_func_ci)
{
struct config_usb_cfg *cfg = to_config_usb_cfg(usb_cfg_ci);
- struct usb_composite_dev *cdev = cfg->c.cdev;
- struct gadget_info *gi = container_of(cdev, struct gadget_info, cdev);
+ struct gadget_info *gi = cfg_to_gadget_info(cfg);
struct config_group *group = to_config_group(usb_func_ci);
struct usb_function_instance *fi = container_of(group,
@@ -464,8 +468,7 @@ static void config_usb_cfg_unlink(
struct config_item *usb_func_ci)
{
struct config_usb_cfg *cfg = to_config_usb_cfg(usb_cfg_ci);
- struct usb_composite_dev *cdev = cfg->c.cdev;
- struct gadget_info *gi = container_of(cdev, struct gadget_info, cdev);
+ struct gadget_info *gi = cfg_to_gadget_info(cfg);
struct config_group *group = to_config_group(usb_func_ci);
struct usb_function_instance *fi = container_of(group,
@@ -505,12 +508,15 @@ static struct configfs_item_operations gadget_config_item_ops = {
static ssize_t gadget_config_desc_MaxPower_show(struct config_item *item,
char *page)
{
- return sprintf(page, "%u\n", to_config_usb_cfg(item)->c.MaxPower);
+ struct config_usb_cfg *cfg = to_config_usb_cfg(item);
+
+ return sprintf(page, "%u\n", cfg->c.MaxPower);
}
static ssize_t gadget_config_desc_MaxPower_store(struct config_item *item,
const char *page, size_t len)
{
+ struct config_usb_cfg *cfg = to_config_usb_cfg(item);
u16 val;
int ret;
ret = kstrtou16(page, 0, &val);
@@ -518,20 +524,22 @@ static ssize_t gadget_config_desc_MaxPower_store(struct config_item *item,
return ret;
if (DIV_ROUND_UP(val, 8) > 0xff)
return -ERANGE;
- to_config_usb_cfg(item)->c.MaxPower = val;
+ cfg->c.MaxPower = val;
return len;
}
static ssize_t gadget_config_desc_bmAttributes_show(struct config_item *item,
char *page)
{
- return sprintf(page, "0x%02x\n",
- to_config_usb_cfg(item)->c.bmAttributes);
+ struct config_usb_cfg *cfg = to_config_usb_cfg(item);
+
+ return sprintf(page, "0x%02x\n", cfg->c.bmAttributes);
}
static ssize_t gadget_config_desc_bmAttributes_store(struct config_item *item,
const char *page, size_t len)
{
+ struct config_usb_cfg *cfg = to_config_usb_cfg(item);
u8 val;
int ret;
ret = kstrtou8(page, 0, &val);
@@ -542,7 +550,7 @@ static ssize_t gadget_config_desc_bmAttributes_store(struct config_item *item,
if (val & ~(USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER |
USB_CONFIG_ATT_WAKEUP))
return -EINVAL;
- to_config_usb_cfg(item)->c.bmAttributes = val;
+ cfg->c.bmAttributes = val;
return len;
}
diff --git a/drivers/usb/gadget/epautoconf.c b/drivers/usb/gadget/epautoconf.c
index 1eb4fa2e623f..ed5a92c474e5 100644
--- a/drivers/usb/gadget/epautoconf.c
+++ b/drivers/usb/gadget/epautoconf.c
@@ -181,7 +181,7 @@ EXPORT_SYMBOL_GPL(usb_ep_autoconfig);
* This function can be used during function bind for endpoints obtained
* from usb_ep_autoconfig(). It unclaims endpoint claimed by
* usb_ep_autoconfig() to make it available for other functions. Endpoint
- * which was released is no longer invalid and shouldn't be used in
+ * which was released is no longer valid and shouldn't be used in
* context of function which released it.
*/
void usb_ep_autoconfig_release(struct usb_ep *ep)
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 8260f38025b7..e20c19a0f106 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -831,7 +831,7 @@ static void ffs_user_copy_worker(struct work_struct *work)
kthread_unuse_mm(io_data->mm);
}
- io_data->kiocb->ki_complete(io_data->kiocb, ret, ret);
+ io_data->kiocb->ki_complete(io_data->kiocb, ret);
if (io_data->ffs->ffs_eventfd && !kiocb_has_eventfd)
eventfd_signal(io_data->ffs->ffs_eventfd, 1);
diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
index 6ad669dde41c..752439690fda 100644
--- a/drivers/usb/gadget/function/f_mass_storage.c
+++ b/drivers/usb/gadget/function/f_mass_storage.c
@@ -588,7 +588,7 @@ static int sleep_thread(struct fsg_common *common, bool can_freeze,
static int do_read(struct fsg_common *common)
{
struct fsg_lun *curlun = common->curlun;
- u32 lba;
+ u64 lba;
struct fsg_buffhd *bh;
int rc;
u32 amount_left;
@@ -603,7 +603,10 @@ static int do_read(struct fsg_common *common)
if (common->cmnd[0] == READ_6)
lba = get_unaligned_be24(&common->cmnd[1]);
else {
- lba = get_unaligned_be32(&common->cmnd[2]);
+ if (common->cmnd[0] == READ_16)
+ lba = get_unaligned_be64(&common->cmnd[2]);
+ else /* READ_10 or READ_12 */
+ lba = get_unaligned_be32(&common->cmnd[2]);
/*
* We allow DPO (Disable Page Out = don't save data in the
@@ -716,7 +719,7 @@ static int do_read(struct fsg_common *common)
static int do_write(struct fsg_common *common)
{
struct fsg_lun *curlun = common->curlun;
- u32 lba;
+ u64 lba;
struct fsg_buffhd *bh;
int get_some_more;
u32 amount_left_to_req, amount_left_to_write;
@@ -740,7 +743,10 @@ static int do_write(struct fsg_common *common)
if (common->cmnd[0] == WRITE_6)
lba = get_unaligned_be24(&common->cmnd[1]);
else {
- lba = get_unaligned_be32(&common->cmnd[2]);
+ if (common->cmnd[0] == WRITE_16)
+ lba = get_unaligned_be64(&common->cmnd[2]);
+ else /* WRITE_10 or WRITE_12 */
+ lba = get_unaligned_be32(&common->cmnd[2]);
/*
* We allow DPO (Disable Page Out = don't save data in the
@@ -1115,6 +1121,7 @@ static int do_read_capacity(struct fsg_common *common, struct fsg_buffhd *bh)
u32 lba = get_unaligned_be32(&common->cmnd[2]);
int pmi = common->cmnd[8];
u8 *buf = (u8 *)bh->buf;
+ u32 max_lba;
/* Check the PMI and LBA fields */
if (pmi > 1 || (pmi == 0 && lba != 0)) {
@@ -1122,12 +1129,37 @@ static int do_read_capacity(struct fsg_common *common, struct fsg_buffhd *bh)
return -EINVAL;
}
- put_unaligned_be32(curlun->num_sectors - 1, &buf[0]);
- /* Max logical block */
- put_unaligned_be32(curlun->blksize, &buf[4]);/* Block length */
+ if (curlun->num_sectors < 0x100000000ULL)
+ max_lba = curlun->num_sectors - 1;
+ else
+ max_lba = 0xffffffff;
+ put_unaligned_be32(max_lba, &buf[0]); /* Max logical block */
+ put_unaligned_be32(curlun->blksize, &buf[4]); /* Block length */
return 8;
}
+static int do_read_capacity_16(struct fsg_common *common, struct fsg_buffhd *bh)
+{
+ struct fsg_lun *curlun = common->curlun;
+ u64 lba = get_unaligned_be64(&common->cmnd[2]);
+ int pmi = common->cmnd[14];
+ u8 *buf = (u8 *)bh->buf;
+
+ /* Check the PMI and LBA fields */
+ if (pmi > 1 || (pmi == 0 && lba != 0)) {
+ curlun->sense_data = SS_INVALID_FIELD_IN_CDB;
+ return -EINVAL;
+ }
+
+ put_unaligned_be64(curlun->num_sectors - 1, &buf[0]);
+ /* Max logical block */
+ put_unaligned_be32(curlun->blksize, &buf[8]); /* Block length */
+
+ /* It is safe to keep other fields zeroed */
+ memset(&buf[12], 0, 32 - 12);
+ return 32;
+}
+
static int do_read_header(struct fsg_common *common, struct fsg_buffhd *bh)
{
struct fsg_lun *curlun = common->curlun;
@@ -1874,6 +1906,17 @@ static int do_scsi_command(struct fsg_common *common)
reply = do_read(common);
break;
+ case READ_16:
+ common->data_size_from_cmnd =
+ get_unaligned_be32(&common->cmnd[10]);
+ reply = check_command_size_in_blocks(common, 16,
+ DATA_DIR_TO_HOST,
+ (1<<1) | (0xff<<2) | (0xf<<10), 1,
+ "READ(16)");
+ if (reply == 0)
+ reply = do_read(common);
+ break;
+
case READ_CAPACITY:
common->data_size_from_cmnd = 8;
reply = check_command(common, 10, DATA_DIR_TO_HOST,
@@ -1926,6 +1969,25 @@ static int do_scsi_command(struct fsg_common *common)
reply = do_request_sense(common, bh);
break;
+ case SERVICE_ACTION_IN_16:
+ switch (common->cmnd[1] & 0x1f) {
+
+ case SAI_READ_CAPACITY_16:
+ common->data_size_from_cmnd =
+ get_unaligned_be32(&common->cmnd[10]);
+ reply = check_command(common, 16, DATA_DIR_TO_HOST,
+ (1<<1) | (0xff<<2) | (0xf<<10) |
+ (1<<14), 1,
+ "READ CAPACITY(16)");
+ if (reply == 0)
+ reply = do_read_capacity_16(common, bh);
+ break;
+
+ default:
+ goto unknown_cmnd;
+ }
+ break;
+
case START_STOP:
common->data_size_from_cmnd = 0;
reply = check_command(common, 6, DATA_DIR_NONE,
@@ -1997,6 +2059,17 @@ static int do_scsi_command(struct fsg_common *common)
reply = do_write(common);
break;
+ case WRITE_16:
+ common->data_size_from_cmnd =
+ get_unaligned_be32(&common->cmnd[10]);
+ reply = check_command_size_in_blocks(common, 16,
+ DATA_DIR_FROM_HOST,
+ (1<<1) | (0xff<<2) | (0xf<<10), 1,
+ "WRITE(16)");
+ if (reply == 0)
+ reply = do_write(common);
+ break;
+
/*
* Some mandatory commands that we recognize but don't implement.
* They don't mean much in this setting. It's left as an exercise
@@ -2269,6 +2342,16 @@ static void fsg_disable(struct usb_function *f)
{
struct fsg_dev *fsg = fsg_from_func(f);
+ /* Disable the endpoints */
+ if (fsg->bulk_in_enabled) {
+ usb_ep_disable(fsg->bulk_in);
+ fsg->bulk_in_enabled = 0;
+ }
+ if (fsg->bulk_out_enabled) {
+ usb_ep_disable(fsg->bulk_out);
+ fsg->bulk_out_enabled = 0;
+ }
+
__raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE, NULL);
}
diff --git a/drivers/usb/gadget/function/f_phonet.c b/drivers/usb/gadget/function/f_phonet.c
index 0b468f5d55bc..068ed8417e5a 100644
--- a/drivers/usb/gadget/function/f_phonet.c
+++ b/drivers/usb/gadget/function/f_phonet.c
@@ -267,6 +267,8 @@ static const struct net_device_ops pn_netdev_ops = {
static void pn_net_setup(struct net_device *dev)
{
+ const u8 addr = PN_MEDIA_USB;
+
dev->features = 0;
dev->type = ARPHRD_PHONET;
dev->flags = IFF_POINTOPOINT | IFF_NOARP;
@@ -274,8 +276,9 @@ static void pn_net_setup(struct net_device *dev)
dev->min_mtu = PHONET_MIN_MTU;
dev->max_mtu = PHONET_MAX_MTU;
dev->hard_header_len = 1;
- dev->dev_addr[0] = PN_MEDIA_USB;
dev->addr_len = 1;
+ dev_addr_set(dev, &addr);
+
dev->tx_queue_len = 1;
dev->netdev_ops = &pn_netdev_ops;
diff --git a/drivers/usb/gadget/function/f_uac1.c b/drivers/usb/gadget/function/f_uac1.c
index 5b3502df4e13..03f50643fbba 100644
--- a/drivers/usb/gadget/function/f_uac1.c
+++ b/drivers/usb/gadget/function/f_uac1.c
@@ -1321,6 +1321,7 @@ static int f_audio_bind(struct usb_configuration *c, struct usb_function *f)
audio->params.c_fu.volume_res = audio_opts->c_volume_res;
}
audio->params.req_number = audio_opts->req_number;
+ audio->params.fb_max = FBACK_FAST_MAX;
if (FUOUT_EN(audio_opts) || FUIN_EN(audio_opts))
audio->notify = audio_notify;
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index ef55b8bb5870..36fa6ef0581b 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -15,6 +15,7 @@
#include <linux/module.h>
#include "u_audio.h"
+
#include "u_uac2.h"
/* UAC2 spec: 4.1 Audio Channel Cluster Descriptor */
@@ -674,8 +675,9 @@ static int set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts,
ssize = uac2_opts->c_ssize;
}
- if (!is_playback && (uac2_opts->c_sync == USB_ENDPOINT_SYNC_ASYNC)) {
- // Win10 requires max packet size + 1 frame
+ if (is_playback || (uac2_opts->c_sync == USB_ENDPOINT_SYNC_ASYNC)) {
+ // playback is always async, capture only when configured
+ // Win10 requires max packet size + 1 frame
srate = srate * (1000 + uac2_opts->fb_max) / 1000;
// updated srate is always bigger, therefore DIV_ROUND_UP always yields +1
max_size_bw = num_channels(chmask) * ssize *
@@ -760,15 +762,15 @@ static void setup_headers(struct f_uac2_opts *opts,
headers[i++] = USBDHDR(&out_clk_src_desc);
headers[i++] = USBDHDR(&usb_out_it_desc);
- if (FUOUT_EN(opts))
- headers[i++] = USBDHDR(out_feature_unit_desc);
- }
+ if (FUOUT_EN(opts))
+ headers[i++] = USBDHDR(out_feature_unit_desc);
+ }
if (EPIN_EN(opts)) {
headers[i++] = USBDHDR(&io_in_it_desc);
- if (FUIN_EN(opts))
- headers[i++] = USBDHDR(in_feature_unit_desc);
+ if (FUIN_EN(opts))
+ headers[i++] = USBDHDR(in_feature_unit_desc);
headers[i++] = USBDHDR(&usb_in_ot_desc);
}
@@ -776,10 +778,10 @@ static void setup_headers(struct f_uac2_opts *opts,
if (EPOUT_EN(opts))
headers[i++] = USBDHDR(&io_out_ot_desc);
- if (FUOUT_EN(opts) || FUIN_EN(opts))
- headers[i++] = USBDHDR(ep_int_desc);
+ if (FUOUT_EN(opts) || FUIN_EN(opts))
+ headers[i++] = USBDHDR(ep_int_desc);
- if (EPOUT_EN(opts)) {
+ if (EPOUT_EN(opts)) {
headers[i++] = USBDHDR(&std_as_out_if0_desc);
headers[i++] = USBDHDR(&std_as_out_if1_desc);
headers[i++] = USBDHDR(&as_out_hdr_desc);
@@ -1931,7 +1933,7 @@ static struct usb_function_instance *afunc_alloc_inst(void)
opts->c_volume_res = UAC2_DEF_RES_DB;
opts->req_number = UAC2_DEF_REQ_NUM;
- opts->fb_max = UAC2_DEF_FB_MAX;
+ opts->fb_max = FBACK_FAST_MAX;
return &opts->func_inst;
}
diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c
index 9d87c0fb8f92..71bb5e477dba 100644
--- a/drivers/usb/gadget/function/f_uvc.c
+++ b/drivers/usb/gadget/function/f_uvc.c
@@ -417,6 +417,7 @@ uvc_register_video(struct uvc_device *uvc)
int ret;
/* TODO reference counting. */
+ memset(&uvc->vdev, 0, sizeof(uvc->video));
uvc->vdev.v4l2_dev = &uvc->v4l2_dev;
uvc->vdev.v4l2_dev->dev = &cdev->gadget->dev;
uvc->vdev.fops = &uvc_v4l2_fops;
@@ -884,12 +885,13 @@ static void uvc_free(struct usb_function *f)
kfree(uvc);
}
-static void uvc_unbind(struct usb_configuration *c, struct usb_function *f)
+static void uvc_function_unbind(struct usb_configuration *c,
+ struct usb_function *f)
{
struct usb_composite_dev *cdev = c->cdev;
struct uvc_device *uvc = to_uvc(f);
- uvcg_info(f, "%s\n", __func__);
+ uvcg_info(f, "%s()\n", __func__);
device_remove_file(&uvc->vdev.dev, &dev_attr_function_name);
video_unregister_device(&uvc->vdev);
@@ -943,7 +945,7 @@ static struct usb_function *uvc_alloc(struct usb_function_instance *fi)
/* Register the function. */
uvc->func.name = "uvc";
uvc->func.bind = uvc_function_bind;
- uvc->func.unbind = uvc_unbind;
+ uvc->func.unbind = uvc_function_unbind;
uvc->func.get_alt = uvc_function_get_alt;
uvc->func.set_alt = uvc_function_set_alt;
uvc->func.disable = uvc_function_disable;
diff --git a/drivers/usb/gadget/function/u_audio.c b/drivers/usb/gadget/function/u_audio.c
index ad16163b5ff8..c46400be5464 100644
--- a/drivers/usb/gadget/function/u_audio.c
+++ b/drivers/usb/gadget/function/u_audio.c
@@ -29,6 +29,7 @@
enum {
UAC_FBACK_CTRL,
+ UAC_P_PITCH_CTRL,
UAC_MUTE_CTRL,
UAC_VOLUME_CTRL,
};
@@ -74,13 +75,9 @@ struct snd_uac_chip {
struct snd_card *card;
struct snd_pcm *pcm;
- /* timekeeping for the playback endpoint */
- unsigned int p_interval;
- unsigned int p_residue;
-
/* pre-calculated values for playback iso completion */
- unsigned int p_pktsize;
- unsigned int p_pktsize_residue;
+ unsigned long long p_interval_mil;
+ unsigned long long p_residue_mil;
unsigned int p_framesize;
};
@@ -153,6 +150,11 @@ static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req)
struct snd_pcm_runtime *runtime;
struct uac_rtd_params *prm = req->context;
struct snd_uac_chip *uac = prm->uac;
+ struct g_audio *audio_dev = uac->audio_dev;
+ struct uac_params *params = &audio_dev->params;
+ unsigned int frames, p_pktsize;
+ unsigned long long pitched_rate_mil, p_pktsize_residue_mil,
+ residue_frames_mil, div_result;
/* i/f shutting down */
if (!prm->ep_enabled) {
@@ -192,19 +194,42 @@ static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req)
* If there is a residue from this division, add it to the
* residue accumulator.
*/
- req->length = uac->p_pktsize;
- uac->p_residue += uac->p_pktsize_residue;
+ pitched_rate_mil = (unsigned long long)
+ params->p_srate * prm->pitch;
+ div_result = pitched_rate_mil;
+ do_div(div_result, uac->p_interval_mil);
+ frames = (unsigned int) div_result;
+
+ pr_debug("p_srate %d, pitch %d, interval_mil %llu, frames %d\n",
+ params->p_srate, prm->pitch, uac->p_interval_mil, frames);
+
+ p_pktsize = min_t(unsigned int,
+ uac->p_framesize * frames,
+ ep->maxpacket);
+
+ if (p_pktsize < ep->maxpacket) {
+ residue_frames_mil = pitched_rate_mil - frames * uac->p_interval_mil;
+ p_pktsize_residue_mil = uac->p_framesize * residue_frames_mil;
+ } else
+ p_pktsize_residue_mil = 0;
+
+ req->length = p_pktsize;
+ uac->p_residue_mil += p_pktsize_residue_mil;
/*
- * Whenever there are more bytes in the accumulator than we
+ * Whenever there are more bytes in the accumulator p_residue_mil than we
* need to add one more sample frame, increase this packet's
* size and decrease the accumulator.
*/
- if (uac->p_residue / uac->p_interval >= uac->p_framesize) {
+ div_result = uac->p_residue_mil;
+ do_div(div_result, uac->p_interval_mil);
+ if ((unsigned int) div_result >= uac->p_framesize) {
req->length += uac->p_framesize;
- uac->p_residue -= uac->p_framesize *
- uac->p_interval;
+ uac->p_residue_mil -= uac->p_framesize *
+ uac->p_interval_mil;
+ pr_debug("increased req length to %d\n", req->length);
}
+ pr_debug("remains uac->p_residue_mil %llu\n", uac->p_residue_mil);
req->actual = req->length;
}
@@ -371,7 +396,7 @@ static int uac_pcm_open(struct snd_pcm_substream *substream)
c_srate = params->c_srate;
p_chmask = params->p_chmask;
c_chmask = params->c_chmask;
- uac->p_residue = 0;
+ uac->p_residue_mil = 0;
runtime->hw = uac_pcm_hardware;
@@ -566,12 +591,17 @@ int u_audio_start_playback(struct g_audio *audio_dev)
unsigned int factor;
const struct usb_endpoint_descriptor *ep_desc;
int req_len, i;
+ unsigned int p_interval, p_pktsize;
ep = audio_dev->in_ep;
prm = &uac->p_prm;
config_ep_by_speed(gadget, &audio_dev->func, ep);
ep_desc = ep->desc;
+ /*
+ * Always start with original frequency
+ */
+ prm->pitch = 1000000;
/* pre-calculate the playback endpoint's interval */
if (gadget->speed == USB_SPEED_FULL)
@@ -582,20 +612,15 @@ int u_audio_start_playback(struct g_audio *audio_dev)
/* pre-compute some values for iso_complete() */
uac->p_framesize = params->p_ssize *
num_channels(params->p_chmask);
- uac->p_interval = factor / (1 << (ep_desc->bInterval - 1));
- uac->p_pktsize = min_t(unsigned int,
+ p_interval = factor / (1 << (ep_desc->bInterval - 1));
+ uac->p_interval_mil = (unsigned long long) p_interval * 1000000;
+ p_pktsize = min_t(unsigned int,
uac->p_framesize *
- (params->p_srate / uac->p_interval),
+ (params->p_srate / p_interval),
ep->maxpacket);
- if (uac->p_pktsize < ep->maxpacket)
- uac->p_pktsize_residue = uac->p_framesize *
- (params->p_srate % uac->p_interval);
- else
- uac->p_pktsize_residue = 0;
-
- req_len = uac->p_pktsize;
- uac->p_residue = 0;
+ req_len = p_pktsize;
+ uac->p_residue_mil = 0;
prm->ep_enabled = true;
usb_ep_enable(ep);
@@ -925,6 +950,13 @@ static struct snd_kcontrol_new u_audio_controls[] = {
.get = u_audio_pitch_get,
.put = u_audio_pitch_put,
},
+ [UAC_P_PITCH_CTRL] {
+ .iface = SNDRV_CTL_ELEM_IFACE_PCM,
+ .name = "Playback Pitch 1000000",
+ .info = u_audio_pitch_info,
+ .get = u_audio_pitch_get,
+ .put = u_audio_pitch_put,
+ },
[UAC_MUTE_CTRL] {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "", /* will be filled later */
@@ -1062,6 +1094,22 @@ int g_audio_setup(struct g_audio *g_audio, const char *pcm_name,
goto snd_fail;
}
+ if (p_chmask) {
+ kctl = snd_ctl_new1(&u_audio_controls[UAC_P_PITCH_CTRL],
+ &uac->p_prm);
+ if (!kctl) {
+ err = -ENOMEM;
+ goto snd_fail;
+ }
+
+ kctl->id.device = pcm->device;
+ kctl->id.subdevice = 0;
+
+ err = snd_ctl_add(card, kctl);
+ if (err < 0)
+ goto snd_fail;
+ }
+
for (i = 0; i <= SNDRV_PCM_STREAM_LAST; i++) {
struct uac_rtd_params *prm;
struct uac_fu_params *fu;
diff --git a/drivers/usb/gadget/function/u_audio.h b/drivers/usb/gadget/function/u_audio.h
index 001a79a46022..8dfdae1721cd 100644
--- a/drivers/usb/gadget/function/u_audio.h
+++ b/drivers/usb/gadget/function/u_audio.h
@@ -14,11 +14,17 @@
/*
* Same maximum frequency deviation on the slower side as in
* sound/usb/endpoint.c. Value is expressed in per-mil deviation.
- * The maximum deviation on the faster side will be provided as
- * parameter, as it impacts the endpoint required bandwidth.
*/
#define FBACK_SLOW_MAX 250
+/*
+ * Maximum frequency deviation on the faster side, default value for UAC1/2.
+ * Value is expressed in per-mil deviation.
+ * UAC2 provides the value as a parameter as it impacts the endpoint required
+ * bandwidth.
+ */
+#define FBACK_FAST_MAX 5
+
/* Feature Unit parameters */
struct uac_fu_params {
int id; /* Feature Unit ID */
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index 85a3f6d4b5af..e0ad5aed6ac9 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -754,6 +754,7 @@ struct eth_dev *gether_setup_name(struct usb_gadget *g,
struct eth_dev *dev;
struct net_device *net;
int status;
+ u8 addr[ETH_ALEN];
net = alloc_etherdev(sizeof *dev);
if (!net)
@@ -773,9 +774,10 @@ struct eth_dev *gether_setup_name(struct usb_gadget *g,
dev->qmult = qmult;
snprintf(net->name, sizeof(net->name), "%s%%d", netname);
- if (get_ether_addr(dev_addr, net->dev_addr))
+ if (get_ether_addr(dev_addr, addr))
dev_warn(&g->dev,
"using random %s ethernet address\n", "self");
+ eth_hw_addr_set(net, addr);
if (get_ether_addr(host_addr, dev->host_mac))
dev_warn(&g->dev,
"using random %s ethernet address\n", "host");
diff --git a/drivers/usb/gadget/function/u_uac2.h b/drivers/usb/gadget/function/u_uac2.h
index a73b35774c44..e0c8e3513bfd 100644
--- a/drivers/usb/gadget/function/u_uac2.h
+++ b/drivers/usb/gadget/function/u_uac2.h
@@ -30,7 +30,6 @@
#define UAC2_DEF_RES_DB (1*256) /* 1 dB */
#define UAC2_DEF_REQ_NUM 2
-#define UAC2_DEF_FB_MAX 5
#define UAC2_DEF_INT_REQ_NUM 10
struct f_uac2_opts {
diff --git a/drivers/usb/gadget/function/uvc.h b/drivers/usb/gadget/function/uvc.h
index 255a61bd6a6a..c3607a32b986 100644
--- a/drivers/usb/gadget/function/uvc.h
+++ b/drivers/usb/gadget/function/uvc.h
@@ -68,6 +68,8 @@ extern unsigned int uvc_gadget_trace_param;
#define UVC_MAX_REQUEST_SIZE 64
#define UVC_MAX_EVENTS 4
+#define UVCG_REQUEST_HEADER_LEN 12
+
/* ------------------------------------------------------------------------
* Structures
*/
@@ -76,7 +78,7 @@ struct uvc_request {
u8 *req_buffer;
struct uvc_video *video;
struct sg_table sgt;
- u8 header[2];
+ u8 header[UVCG_REQUEST_HEADER_LEN];
};
struct uvc_video {
@@ -126,6 +128,7 @@ struct uvc_device {
enum uvc_state state;
struct usb_function func;
struct uvc_video video;
+ bool func_connected;
/* Descriptors */
struct {
@@ -156,6 +159,7 @@ static inline struct uvc_device *to_uvc(struct usb_function *f)
struct uvc_file_handle {
struct v4l2_fh vfh;
struct uvc_video *device;
+ bool is_uvc_app_handle;
};
#define to_uvc_file_handle(handle) \
diff --git a/drivers/usb/gadget/function/uvc_queue.c b/drivers/usb/gadget/function/uvc_queue.c
index 7d00ad7c154c..d852ac9e47e7 100644
--- a/drivers/usb/gadget/function/uvc_queue.c
+++ b/drivers/usb/gadget/function/uvc_queue.c
@@ -142,7 +142,7 @@ int uvcg_queue_init(struct uvc_video_queue *queue, struct device *dev, enum v4l2
queue->queue.mem_ops = &vb2_vmalloc_memops;
}
- queue->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC
+ queue->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY
| V4L2_BUF_FLAG_TSTAMP_SRC_EOF;
queue->queue.dev = dev;
diff --git a/drivers/usb/gadget/function/uvc_v4l2.c b/drivers/usb/gadget/function/uvc_v4l2.c
index 4ca89eab6159..a2c78690c5c2 100644
--- a/drivers/usb/gadget/function/uvc_v4l2.c
+++ b/drivers/usb/gadget/function/uvc_v4l2.c
@@ -169,7 +169,8 @@ uvc_v4l2_qbuf(struct file *file, void *fh, struct v4l2_buffer *b)
if (ret < 0)
return ret;
- schedule_work(&video->pump);
+ if (uvc->state == UVC_STATE_STREAMING)
+ schedule_work(&video->pump);
return ret;
}
@@ -227,17 +228,55 @@ static int
uvc_v4l2_subscribe_event(struct v4l2_fh *fh,
const struct v4l2_event_subscription *sub)
{
+ struct uvc_device *uvc = video_get_drvdata(fh->vdev);
+ struct uvc_file_handle *handle = to_uvc_file_handle(fh);
+ int ret;
+
if (sub->type < UVC_EVENT_FIRST || sub->type > UVC_EVENT_LAST)
return -EINVAL;
- return v4l2_event_subscribe(fh, sub, 2, NULL);
+ if (sub->type == UVC_EVENT_SETUP && uvc->func_connected)
+ return -EBUSY;
+
+ ret = v4l2_event_subscribe(fh, sub, 2, NULL);
+ if (ret < 0)
+ return ret;
+
+ if (sub->type == UVC_EVENT_SETUP) {
+ uvc->func_connected = true;
+ handle->is_uvc_app_handle = true;
+ uvc_function_connect(uvc);
+ }
+
+ return 0;
+}
+
+static void uvc_v4l2_disable(struct uvc_device *uvc)
+{
+ uvc->func_connected = false;
+ uvc_function_disconnect(uvc);
+ uvcg_video_enable(&uvc->video, 0);
+ uvcg_free_buffers(&uvc->video.queue);
}
static int
uvc_v4l2_unsubscribe_event(struct v4l2_fh *fh,
const struct v4l2_event_subscription *sub)
{
- return v4l2_event_unsubscribe(fh, sub);
+ struct uvc_device *uvc = video_get_drvdata(fh->vdev);
+ struct uvc_file_handle *handle = to_uvc_file_handle(fh);
+ int ret;
+
+ ret = v4l2_event_unsubscribe(fh, sub);
+ if (ret < 0)
+ return ret;
+
+ if (sub->type == UVC_EVENT_SETUP && handle->is_uvc_app_handle) {
+ uvc_v4l2_disable(uvc);
+ handle->is_uvc_app_handle = false;
+ }
+
+ return 0;
}
static long
@@ -292,7 +331,6 @@ uvc_v4l2_open(struct file *file)
handle->device = &uvc->video;
file->private_data = &handle->vfh;
- uvc_function_connect(uvc);
return 0;
}
@@ -304,11 +342,9 @@ uvc_v4l2_release(struct file *file)
struct uvc_file_handle *handle = to_uvc_file_handle(file->private_data);
struct uvc_video *video = handle->device;
- uvc_function_disconnect(uvc);
-
mutex_lock(&video->mutex);
- uvcg_video_enable(video, 0);
- uvcg_free_buffers(&video->queue);
+ if (handle->is_uvc_app_handle)
+ uvc_v4l2_disable(uvc);
mutex_unlock(&video->mutex);
file->private_data = NULL;
diff --git a/drivers/usb/gadget/function/uvc_video.c b/drivers/usb/gadget/function/uvc_video.c
index b4a763e5f70e..7f59a0c47402 100644
--- a/drivers/usb/gadget/function/uvc_video.c
+++ b/drivers/usb/gadget/function/uvc_video.c
@@ -12,6 +12,7 @@
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/usb/video.h>
+#include <asm/unaligned.h>
#include <media/v4l2-dev.h>
@@ -27,13 +28,41 @@ static int
uvc_video_encode_header(struct uvc_video *video, struct uvc_buffer *buf,
u8 *data, int len)
{
- data[0] = UVCG_REQUEST_HEADER_LEN;
+ struct uvc_device *uvc = container_of(video, struct uvc_device, video);
+ struct usb_composite_dev *cdev = uvc->func.config->cdev;
+ struct timespec64 ts = ns_to_timespec64(buf->buf.vb2_buf.timestamp);
+ int pos = 2;
+
data[1] = UVC_STREAM_EOH | video->fid;
- if (buf->bytesused - video->queue.buf_used <= len - UVCG_REQUEST_HEADER_LEN)
+ if (video->queue.buf_used == 0 && ts.tv_sec) {
+ /* dwClockFrequency is 48 MHz */
+ u32 pts = ((u64)ts.tv_sec * USEC_PER_SEC + ts.tv_nsec / NSEC_PER_USEC) * 48;
+
+ data[1] |= UVC_STREAM_PTS;
+ put_unaligned_le32(pts, &data[pos]);
+ pos += 4;
+ }
+
+ if (cdev->gadget->ops->get_frame) {
+ u32 sof, stc;
+
+ sof = usb_gadget_frame_number(cdev->gadget);
+ ktime_get_ts64(&ts);
+ stc = ((u64)ts.tv_sec * USEC_PER_SEC + ts.tv_nsec / NSEC_PER_USEC) * 48;
+
+ data[1] |= UVC_STREAM_SCR;
+ put_unaligned_le32(stc, &data[pos]);
+ put_unaligned_le16(sof, &data[pos+4]);
+ pos += 6;
+ }
+
+ data[0] = pos;
+
+ if (buf->bytesused - video->queue.buf_used <= len - pos)
data[1] |= UVC_STREAM_EOF;
- return 2;
+ return pos;
}
static int
@@ -104,22 +133,22 @@ uvc_video_encode_isoc_sg(struct usb_request *req, struct uvc_video *video,
unsigned int len = video->req_size;
unsigned int sg_left, part = 0;
unsigned int i;
- int ret;
+ int header_len;
sg = ureq->sgt.sgl;
sg_init_table(sg, ureq->sgt.nents);
/* Init the header. */
- ret = uvc_video_encode_header(video, buf, ureq->header,
+ header_len = uvc_video_encode_header(video, buf, ureq->header,
video->req_size);
- sg_set_buf(sg, ureq->header, UVCG_REQUEST_HEADER_LEN);
- len -= ret;
+ sg_set_buf(sg, ureq->header, header_len);
+ len -= header_len;
if (pending <= len)
len = pending;
req->length = (len == pending) ?
- len + UVCG_REQUEST_HEADER_LEN : video->req_size;
+ len + header_len : video->req_size;
/* Init the pending sgs with payload */
sg = sg_next(sg);
@@ -148,7 +177,7 @@ uvc_video_encode_isoc_sg(struct usb_request *req, struct uvc_video *video,
req->num_sgs = i + 1;
req->length -= len;
- video->queue.buf_used += req->length - UVCG_REQUEST_HEADER_LEN;
+ video->queue.buf_used += req->length - header_len;
if (buf->bytesused == video->queue.buf_used || !buf->sg) {
video->queue.buf_used = 0;
@@ -199,9 +228,12 @@ static int uvcg_video_ep_queue(struct uvc_video *video, struct usb_request *req)
uvcg_err(&video->uvc->func, "Failed to queue request (%d).\n",
ret);
- /* Isochronous endpoints can't be halted. */
- if (usb_endpoint_xfer_bulk(video->ep->desc))
- usb_ep_set_halt(video->ep);
+ /* If the endpoint is disabled the descriptor may be NULL. */
+ if (video->ep->desc) {
+ /* Isochronous endpoints can't be halted. */
+ if (usb_endpoint_xfer_bulk(video->ep->desc))
+ usb_ep_set_halt(video->ep);
+ }
}
return ret;
@@ -213,6 +245,7 @@ uvc_video_complete(struct usb_ep *ep, struct usb_request *req)
struct uvc_request *ureq = req->context;
struct uvc_video *video = ureq->video;
struct uvc_video_queue *queue = &video->queue;
+ struct uvc_device *uvc = video->uvc;
unsigned long flags;
switch (req->status) {
@@ -235,7 +268,8 @@ uvc_video_complete(struct usb_ep *ep, struct usb_request *req)
list_add_tail(&req->list, &video->req_free);
spin_unlock_irqrestore(&video->req_lock, flags);
- schedule_work(&video->pump);
+ if (uvc->state == UVC_STATE_STREAMING)
+ schedule_work(&video->pump);
}
static int
@@ -302,8 +336,8 @@ uvc_video_alloc_requests(struct uvc_video *video)
list_add_tail(&video->ureq[i].req->list, &video->req_free);
/* req_size/PAGE_SIZE + 1 for overruns and + 1 for header */
sg_alloc_table(&video->ureq[i].sgt,
- DIV_ROUND_UP(req_size - 2, PAGE_SIZE) + 2,
- GFP_KERNEL);
+ DIV_ROUND_UP(req_size - UVCG_REQUEST_HEADER_LEN,
+ PAGE_SIZE) + 2, GFP_KERNEL);
}
video->req_size = req_size;
@@ -329,12 +363,12 @@ static void uvcg_video_pump(struct work_struct *work)
{
struct uvc_video *video = container_of(work, struct uvc_video, pump);
struct uvc_video_queue *queue = &video->queue;
- struct usb_request *req;
+ struct usb_request *req = NULL;
struct uvc_buffer *buf;
unsigned long flags;
int ret;
- while (1) {
+ while (video->ep->enabled) {
/* Retrieve the first available USB request, protected by the
* request lock.
*/
@@ -384,6 +418,9 @@ static void uvcg_video_pump(struct work_struct *work)
video->req_int_count++;
}
+ if (!req)
+ return;
+
spin_lock_irqsave(&video->req_lock, flags);
list_add_tail(&req->list, &video->req_free);
spin_unlock_irqrestore(&video->req_lock, flags);
diff --git a/drivers/usb/gadget/function/uvc_video.h b/drivers/usb/gadget/function/uvc_video.h
index 9bf19475f6f9..03adeefa343b 100644
--- a/drivers/usb/gadget/function/uvc_video.h
+++ b/drivers/usb/gadget/function/uvc_video.h
@@ -12,8 +12,6 @@
#ifndef __UVC_VIDEO_H__
#define __UVC_VIDEO_H__
-#define UVCG_REQUEST_HEADER_LEN 2
-
struct uvc_video;
int uvcg_video_enable(struct uvc_video *video, int enable);
diff --git a/drivers/usb/gadget/legacy/hid.c b/drivers/usb/gadget/legacy/hid.c
index 5b27d289443f..3912cc805f3a 100644
--- a/drivers/usb/gadget/legacy/hid.c
+++ b/drivers/usb/gadget/legacy/hid.c
@@ -99,8 +99,10 @@ static int do_config(struct usb_configuration *c)
list_for_each_entry(e, &hidg_func_list, node) {
e->f = usb_get_function(e->fi);
- if (IS_ERR(e->f))
+ if (IS_ERR(e->f)) {
+ status = PTR_ERR(e->f);
goto put;
+ }
status = usb_add_function(c, e->f);
if (status < 0) {
usb_put_function(e->f);
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index 539220d7f5b6..78be94750232 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -469,7 +469,7 @@ static void ep_user_copy_worker(struct work_struct *work)
ret = -EFAULT;
/* completing the iocb can drop the ctx and mm, don't touch mm after */
- iocb->ki_complete(iocb, ret, ret);
+ iocb->ki_complete(iocb, ret);
kfree(priv->buf);
kfree(priv->to_free);
@@ -496,11 +496,8 @@ static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req)
kfree(priv->to_free);
kfree(priv);
iocb->private = NULL;
- /* aio_complete() reports bytes-transferred _and_ faults */
-
iocb->ki_complete(iocb,
- req->actual ? req->actual : (long)req->status,
- req->status);
+ req->actual ? req->actual : (long)req->status);
} else {
/* ep_copy_to_user() won't report both; we hide some faults */
if (unlikely(0 != req->status))
diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig
index 8c614bb86c66..69394dc1cdfb 100644
--- a/drivers/usb/gadget/udc/Kconfig
+++ b/drivers/usb/gadget/udc/Kconfig
@@ -330,6 +330,7 @@ config USB_AMD5536UDC
config USB_FSL_QE
tristate "Freescale QE/CPM USB Device Controller"
depends on FSL_SOC && (QUICC_ENGINE || CPM)
+ depends on !64BIT || BROKEN
help
Some of Freescale PowerPC processors have a Full Speed
QE/CPM2 USB controller, which support device mode with 4
diff --git a/drivers/usb/gadget/udc/amd5536udc.h b/drivers/usb/gadget/udc/amd5536udc.h
index 3296f3fcee48..055436016503 100644
--- a/drivers/usb/gadget/udc/amd5536udc.h
+++ b/drivers/usb/gadget/udc/amd5536udc.h
@@ -572,7 +572,6 @@ struct udc {
struct extcon_specific_cable_nb extcon_nb;
struct notifier_block nb;
struct delayed_work drd_work;
- struct workqueue_struct *drd_wq;
u32 conn_type;
};
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index 14fdf918ecfe..568534a0d17c 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -6,6 +6,8 @@
* Author: Felipe Balbi <balbi@ti.com>
*/
+#define pr_fmt(fmt) "UDC core: " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/device.h>
@@ -89,7 +91,7 @@ EXPORT_SYMBOL_GPL(usb_ep_set_maxpacket_limit);
* configurable, with more generic names like "ep-a". (remember that for
* USB, "in" means "towards the USB host".)
*
- * This routine must be called in process context.
+ * This routine may be called in an atomic (interrupt) context.
*
* returns zero, or a negative error code.
*/
@@ -134,7 +136,7 @@ EXPORT_SYMBOL_GPL(usb_ep_enable);
* gadget drivers must call usb_ep_enable() again before queueing
* requests to the endpoint.
*
- * This routine must be called in process context.
+ * This routine may be called in an atomic (interrupt) context.
*
* returns zero, or a negative error code.
*/
@@ -1555,14 +1557,14 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver)
if (!driver->match_existing_only) {
list_add_tail(&driver->pending, &gadget_driver_pending_list);
- pr_info("udc-core: couldn't find an available UDC - added [%s] to list of pending drivers\n",
+ pr_info("couldn't find an available UDC - added [%s] to list of pending drivers\n",
driver->function);
ret = 0;
}
mutex_unlock(&udc_lock);
if (ret)
- pr_warn("udc-core: couldn't find an available UDC or it's busy\n");
+ pr_warn("couldn't find an available UDC or it's busy: %d\n", ret);
return ret;
found:
ret = udc_bind_to_driver(udc, driver);
diff --git a/drivers/usb/gadget/udc/goku_udc.c b/drivers/usb/gadget/udc/goku_udc.c
index 3e1267d38774..3757a772a55e 100644
--- a/drivers/usb/gadget/udc/goku_udc.c
+++ b/drivers/usb/gadget/udc/goku_udc.c
@@ -553,12 +553,12 @@ static int start_dma(struct goku_ep *ep, struct goku_request *req)
master &= ~MST_R_BITS;
if (unlikely(req->req.length == 0))
- master = MST_RD_ENA | MST_RD_EOPB;
+ master |= MST_RD_ENA | MST_RD_EOPB;
else if ((req->req.length % ep->ep.maxpacket) != 0
|| req->req.zero)
- master = MST_RD_ENA | MST_EOPB_ENA;
+ master |= MST_RD_ENA | MST_EOPB_ENA;
else
- master = MST_RD_ENA | MST_EOPB_DIS;
+ master |= MST_RD_ENA | MST_EOPB_DIS;
ep->dev->int_enable |= INT_MSTRDEND;
diff --git a/drivers/usb/gadget/udc/pxa25x_udc.c b/drivers/usb/gadget/udc/pxa25x_udc.c
index a09ec1d826b2..52cdfd8212d6 100644
--- a/drivers/usb/gadget/udc/pxa25x_udc.c
+++ b/drivers/usb/gadget/udc/pxa25x_udc.c
@@ -2325,7 +2325,7 @@ static int pxa25x_udc_probe(struct platform_device *pdev)
pr_info("%s: version %s\n", driver_name, DRIVER_VERSION);
/* insist on Intel/ARM/XScale */
- asm("mrc%? p15, 0, %0, c0, c0" : "=r" (chiprev));
+ asm("mrc p15, 0, %0, c0, c0" : "=r" (chiprev));
if ((chiprev & CP15R0_VENDOR_MASK) != CP15R0_XSCALE_VALUE) {
pr_err("%s: not XScale!\n", driver_name);
return -ENODEV;
diff --git a/drivers/usb/gadget/udc/snps_udc_plat.c b/drivers/usb/gadget/udc/snps_udc_plat.c
index 99805d60a7ab..8bbb89c80348 100644
--- a/drivers/usb/gadget/udc/snps_udc_plat.c
+++ b/drivers/usb/gadget/udc/snps_udc_plat.c
@@ -243,11 +243,6 @@ static int udc_plat_remove(struct platform_device *pdev)
platform_set_drvdata(pdev, NULL);
- if (dev->drd_wq) {
- flush_workqueue(dev->drd_wq);
- destroy_workqueue(dev->drd_wq);
- }
-
phy_power_off(dev->udc_phy);
phy_exit(dev->udc_phy);
extcon_unregister_notifier(dev->edev, EXTCON_USB, &dev->nb);
diff --git a/drivers/usb/gadget/udc/udc-xilinx.c b/drivers/usb/gadget/udc/udc-xilinx.c
index fb4ffedd6f0d..f5ca670776a3 100644
--- a/drivers/usb/gadget/udc/udc-xilinx.c
+++ b/drivers/usb/gadget/udc/udc-xilinx.c
@@ -11,6 +11,7 @@
* USB peripheral controller (at91_udc.c).
*/
+#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
@@ -171,6 +172,7 @@ struct xusb_ep {
* @addr: the usb device base address
* @lock: instance of spinlock
* @dma_enabled: flag indicating whether the dma is included in the system
+ * @clk: pointer to struct clk
* @read_fn: function pointer to read device registers
* @write_fn: function pointer to write to device registers
*/
@@ -188,6 +190,7 @@ struct xusb_udc {
void __iomem *addr;
spinlock_t lock;
bool dma_enabled;
+ struct clk *clk;
unsigned int (*read_fn)(void __iomem *);
void (*write_fn)(void __iomem *, u32, u32);
@@ -2092,6 +2095,27 @@ static int xudc_probe(struct platform_device *pdev)
udc->gadget.ep0 = &udc->ep[XUSB_EP_NUMBER_ZERO].ep_usb;
udc->gadget.name = driver_name;
+ udc->clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
+ if (IS_ERR(udc->clk)) {
+ if (PTR_ERR(udc->clk) != -ENOENT) {
+ ret = PTR_ERR(udc->clk);
+ goto fail;
+ }
+
+ /*
+ * Clock framework support is optional, continue on,
+ * anyways if we don't find a matching clock
+ */
+ dev_warn(&pdev->dev, "s_axi_aclk clock property is not found\n");
+ udc->clk = NULL;
+ }
+
+ ret = clk_prepare_enable(udc->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to enable clock.\n");
+ return ret;
+ }
+
spin_lock_init(&udc->lock);
/* Check for IP endianness */
@@ -2147,6 +2171,7 @@ static int xudc_remove(struct platform_device *pdev)
struct xusb_udc *udc = platform_get_drvdata(pdev);
usb_del_gadget_udc(&udc->gadget);
+ clk_disable_unprepare(udc->clk);
return 0;
}
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index c4736d1d020c..d1d926f8f9c2 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -290,7 +290,8 @@ config USB_EHCI_EXYNOS
tristate "EHCI support for Samsung S5P/Exynos SoC Series"
depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
help
- Enable support for the Samsung Exynos SOC's on-chip EHCI controller.
+ Enable support for the Samsung S5Pv210 and Exynos SOC's on-chip EHCI
+ controller.
config USB_EHCI_MV
tristate "EHCI support for Marvell PXA/MMP USB controller"
@@ -563,7 +564,8 @@ config USB_OHCI_EXYNOS
tristate "OHCI support for Samsung S5P/Exynos SoC Series"
depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
help
- Enable support for the Samsung Exynos SOC's on-chip OHCI controller.
+ Enable support for the Samsung S5Pv210 and Exynos SOC's on-chip OHCI
+ controller.
config USB_CNS3XXX_OHCI
bool "Cavium CNS3XXX OHCI Module (DEPRECATED)"
diff --git a/drivers/usb/host/ehci-atmel.c b/drivers/usb/host/ehci-atmel.c
index e893467d659c..05d41fd65f25 100644
--- a/drivers/usb/host/ehci-atmel.c
+++ b/drivers/usb/host/ehci-atmel.c
@@ -18,6 +18,8 @@
#include <linux/platform_device.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
+#include <linux/usb/phy.h>
+#include <linux/usb/of.h>
#include "ehci.h"
@@ -25,6 +27,9 @@
static const char hcd_name[] = "ehci-atmel";
+#define EHCI_INSNREG(index) ((index) * 4 + 0x90)
+#define EHCI_INSNREG08_HSIC_EN BIT(2)
+
/* interface and function clocks */
#define hcd_to_atmel_ehci_priv(h) \
((struct atmel_ehci_priv *)hcd_to_ehci(h)->priv)
@@ -154,6 +159,9 @@ static int ehci_atmel_drv_probe(struct platform_device *pdev)
goto fail_add_hcd;
device_wakeup_enable(hcd->self.controller);
+ if (of_usb_get_phy_mode(pdev->dev.of_node) == USBPHY_INTERFACE_MODE_HSIC)
+ writel(EHCI_INSNREG08_HSIC_EN, hcd->regs + EHCI_INSNREG(8));
+
return retval;
fail_add_hcd:
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 1776c05d0a48..3d82e0b853be 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -588,7 +588,7 @@ static int ehci_run (struct usb_hcd *hcd)
* hcc_params controls whether ehci->regs->segment must (!!!)
* be used; it constrains QH/ITD/SITD and QTD locations.
* dma_pool consistent memory always uses segment zero.
- * streaming mappings for I/O buffers, like pci_map_single(),
+ * streaming mappings for I/O buffers, like dma_map_single(),
* can return segments above 4GB, if the device allows.
*
* NOTE: the dma mask is visible through dev->dma_mask, so
@@ -635,7 +635,16 @@ static int ehci_run (struct usb_hcd *hcd)
/* Wait until HC become operational */
ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */
msleep(5);
- rc = ehci_handshake(ehci, &ehci->regs->status, STS_HALT, 0, 100 * 1000);
+
+ /* For Aspeed, STS_HALT also depends on ASS/PSS status.
+ * Check CMD_RUN instead.
+ */
+ if (ehci->is_aspeed)
+ rc = ehci_handshake(ehci, &ehci->regs->command, CMD_RUN,
+ 1, 100 * 1000);
+ else
+ rc = ehci_handshake(ehci, &ehci->regs->status, STS_HALT,
+ 0, 100 * 1000);
up_write(&ehci_cf_port_reset_rwsem);
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index c4f6a2559a98..efe30e3be22f 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -745,12 +745,13 @@ int ehci_hub_control(
unsigned selector;
/*
- * Avoid underflow while calculating (wIndex & 0xff) - 1.
- * The compiler might deduce that wIndex can never be 0 and then
- * optimize away the tests for !wIndex below.
+ * Avoid out-of-bounds values while calculating the port index
+ * from wIndex. The compiler doesn't like pointers to invalid
+ * addresses, even if they are never used.
*/
- temp = wIndex & 0xff;
- temp -= (temp > 0);
+ temp = (wIndex - 1) & 0xff;
+ if (temp >= HCS_N_PORTS_MAX)
+ temp = 0;
status_reg = &ehci->regs->port_status[temp];
hostpc_reg = &ehci->regs->hostpc[temp];
diff --git a/drivers/usb/host/ehci-mem.c b/drivers/usb/host/ehci-mem.c
index 21307d862af6..4c6c08b675b5 100644
--- a/drivers/usb/host/ehci-mem.c
+++ b/drivers/usb/host/ehci-mem.c
@@ -73,10 +73,9 @@ static struct ehci_qh *ehci_qh_alloc (struct ehci_hcd *ehci, gfp_t flags)
if (!qh)
goto done;
qh->hw = (struct ehci_qh_hw *)
- dma_pool_alloc(ehci->qh_pool, flags, &dma);
+ dma_pool_zalloc(ehci->qh_pool, flags, &dma);
if (!qh->hw)
goto fail;
- memset(qh->hw, 0, sizeof *qh->hw);
qh->qh_dma = dma;
// INIT_LIST_HEAD (&qh->qh_list);
INIT_LIST_HEAD (&qh->qtd_list);
diff --git a/drivers/usb/host/ehci-mv.c b/drivers/usb/host/ehci-mv.c
index 8fd27249ad25..fa46d217dd10 100644
--- a/drivers/usb/host/ehci-mv.c
+++ b/drivers/usb/host/ehci-mv.c
@@ -258,8 +258,6 @@ static int mv_ehci_remove(struct platform_device *pdev)
return 0;
}
-MODULE_ALIAS("mv-ehci");
-
static const struct platform_device_id ehci_id_table[] = {
{"pxa-u2oehci", 0},
{"pxa-sph", 0},
diff --git a/drivers/usb/host/ehci-platform.c b/drivers/usb/host/ehci-platform.c
index c70f2d0b4aaf..c3dc906274d9 100644
--- a/drivers/usb/host/ehci-platform.c
+++ b/drivers/usb/host/ehci-platform.c
@@ -297,6 +297,12 @@ static int ehci_platform_probe(struct platform_device *dev)
"has-transaction-translator"))
hcd->has_tt = 1;
+ if (of_device_is_compatible(dev->dev.of_node,
+ "aspeed,ast2500-ehci") ||
+ of_device_is_compatible(dev->dev.of_node,
+ "aspeed,ast2600-ehci"))
+ ehci->is_aspeed = 1;
+
if (soc_device_match(quirk_poll_match))
priv->quirk_poll = true;
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index 80bb823aa9fe..fdd073cc053b 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -219,6 +219,7 @@ struct ehci_hcd { /* one per controller */
unsigned need_oc_pp_cycle:1; /* MPC834X port power */
unsigned imx28_write_fix:1; /* For Freescale i.MX28 */
unsigned spurious_oc:1;
+ unsigned is_aspeed:1;
/* required for usb32 quirk */
#define OHCI_CTRL_HCFS (3 << 6)
diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
index 4b02ace09f3d..b590995a6b3e 100644
--- a/drivers/usb/host/fotg210-hcd.c
+++ b/drivers/usb/host/fotg210-hcd.c
@@ -1859,10 +1859,9 @@ static struct fotg210_qh *fotg210_qh_alloc(struct fotg210_hcd *fotg210,
if (!qh)
goto done;
qh->hw = (struct fotg210_qh_hw *)
- dma_pool_alloc(fotg210->qh_pool, flags, &dma);
+ dma_pool_zalloc(fotg210->qh_pool, flags, &dma);
if (!qh->hw)
goto fail;
- memset(qh->hw, 0, sizeof(*qh->hw));
qh->qh_dma = dma;
INIT_LIST_HEAD(&qh->qtd_list);
@@ -5023,7 +5022,7 @@ static int fotg210_run(struct usb_hcd *hcd)
* hcc_params controls whether fotg210->regs->segment must (!!!)
* be used; it constrains QH/ITD/SITD and QTD locations.
* dma_pool consistent memory always uses segment zero.
- * streaming mappings for I/O buffers, like pci_map_single(),
+ * streaming mappings for I/O buffers, like dma_map_single(),
* can return segments above 4GB, if the device allows.
*
* NOTE: the dma mask is visible through dev->dma_mask, so
diff --git a/drivers/usb/host/max3421-hcd.c b/drivers/usb/host/max3421-hcd.c
index 59cc1bc7f12f..30de85a707fe 100644
--- a/drivers/usb/host/max3421-hcd.c
+++ b/drivers/usb/host/max3421-hcd.c
@@ -125,8 +125,6 @@ struct max3421_hcd {
struct task_struct *spi_thread;
- struct max3421_hcd *next;
-
enum max3421_rh_state rh_state;
/* lower 16 bits contain port status, upper 16 bits the change mask: */
u32 port_status;
@@ -174,8 +172,6 @@ struct max3421_ep {
u8 retransmit; /* packet needs retransmission */
};
-static struct max3421_hcd *max3421_hcd_list;
-
#define MAX3421_FIFO_SIZE 64
#define MAX3421_SPI_DIR_RD 0 /* read register from MAX3421 */
@@ -1882,9 +1878,8 @@ max3421_probe(struct spi_device *spi)
}
set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
max3421_hcd = hcd_to_max3421(hcd);
- max3421_hcd->next = max3421_hcd_list;
- max3421_hcd_list = max3421_hcd;
INIT_LIST_HEAD(&max3421_hcd->ep_list);
+ spi_set_drvdata(spi, max3421_hcd);
max3421_hcd->tx = kmalloc(sizeof(*max3421_hcd->tx), GFP_KERNEL);
if (!max3421_hcd->tx)
@@ -1934,28 +1929,18 @@ error:
static int
max3421_remove(struct spi_device *spi)
{
- struct max3421_hcd *max3421_hcd = NULL, **prev;
- struct usb_hcd *hcd = NULL;
+ struct max3421_hcd *max3421_hcd;
+ struct usb_hcd *hcd;
unsigned long flags;
- for (prev = &max3421_hcd_list; *prev; prev = &(*prev)->next) {
- max3421_hcd = *prev;
- hcd = max3421_to_hcd(max3421_hcd);
- if (hcd->self.controller == &spi->dev)
- break;
- }
- if (!max3421_hcd) {
- dev_err(&spi->dev, "no MAX3421 HCD found for SPI device %p\n",
- spi);
- return -ENODEV;
- }
+ max3421_hcd = spi_get_drvdata(spi);
+ hcd = max3421_to_hcd(max3421_hcd);
usb_remove_hcd(hcd);
spin_lock_irqsave(&max3421_hcd->lock, flags);
kthread_stop(max3421_hcd->spi_thread);
- *prev = max3421_hcd->next;
spin_unlock_irqrestore(&max3421_hcd->lock, flags);
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 1f5e69314a17..666b1c665188 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -191,8 +191,7 @@ static int ohci_urb_enqueue (
}
/* allocate the private part of the URB */
- urb_priv = kzalloc (sizeof (urb_priv_t) + size * sizeof (struct td *),
- mem_flags);
+ urb_priv = kzalloc(struct_size(urb_priv, td, size), mem_flags);
if (!urb_priv)
return -ENOMEM;
INIT_LIST_HEAD (&urb_priv->pending);
diff --git a/drivers/usb/host/ohci-hub.c b/drivers/usb/host/ohci-hub.c
index f474f2f9c1e4..90cee192e96d 100644
--- a/drivers/usb/host/ohci-hub.c
+++ b/drivers/usb/host/ohci-hub.c
@@ -91,6 +91,9 @@ __acquires(ohci->lock)
update_done_list(ohci);
ohci_work(ohci);
+ /* All ED unlinks should be finished, no need for SOF interrupts */
+ ohci_writel(ohci, OHCI_INTR_SF, &ohci->regs->intrdisable);
+
/*
* Some controllers don't handle "global" suspend properly if
* there are unsuspended ports. For these controllers, put all
diff --git a/drivers/usb/host/ohci-tmio.c b/drivers/usb/host/ohci-tmio.c
index 08ec2ab0d95a..3f3d62dc0674 100644
--- a/drivers/usb/host/ohci-tmio.c
+++ b/drivers/usb/host/ohci-tmio.c
@@ -199,7 +199,7 @@ static int ohci_hcd_tmio_drv_probe(struct platform_device *dev)
if (usb_disabled())
return -ENODEV;
- if (!cell)
+ if (!cell || !regs || !config || !sram)
return -EINVAL;
if (irq < 0)
diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
index 4300326b3730..e82ff2a49672 100644
--- a/drivers/usb/host/oxu210hp-hcd.c
+++ b/drivers/usb/host/oxu210hp-hcd.c
@@ -3131,7 +3131,7 @@ static int oxu_run(struct usb_hcd *hcd)
/* hcc_params controls whether oxu->regs->segment must (!!!)
* be used; it constrains QH/ITD/SITD and QTD locations.
* dma_pool consistent memory always uses segment zero.
- * streaming mappings for I/O buffers, like pci_map_single(),
+ * streaming mappings for I/O buffers, like dma_map_single(),
* can return segments above 4GB, if the device allows.
*
* NOTE: the dma mask is visible through dev->dma_mask, so
diff --git a/drivers/usb/host/xhci-mtk-sch.c b/drivers/usb/host/xhci-mtk-sch.c
index 134f4789bd89..1edef7527c11 100644
--- a/drivers/usb/host/xhci-mtk-sch.c
+++ b/drivers/usb/host/xhci-mtk-sch.c
@@ -734,7 +734,7 @@ static void drop_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
if (!need_bw_sch(udev, ep))
return;
- xhci_err(xhci, "%s %s\n", __func__, decode_ep(ep, udev->speed));
+ xhci_dbg(xhci, "%s %s\n", __func__, decode_ep(ep, udev->speed));
hash_for_each_possible_safe(mtk->sch_ep_hash, sch_ep,
hn, hentry, (unsigned long)ep) {
diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
index c53f6f276d5c..58a0eae4f41b 100644
--- a/drivers/usb/host/xhci-mtk.c
+++ b/drivers/usb/host/xhci-mtk.c
@@ -602,7 +602,7 @@ static int xhci_mtk_probe(struct platform_device *pdev)
goto dealloc_usb2_hcd;
if (wakeup_irq > 0) {
- ret = dev_pm_set_dedicated_wake_irq(dev, wakeup_irq);
+ ret = dev_pm_set_dedicated_wake_irq_reverse(dev, wakeup_irq);
if (ret) {
dev_err(dev, "set wakeup irq %d failed\n", wakeup_irq);
goto dealloc_usb3_hcd;
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 2484a9d38ce2..1d8a4c089a85 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -65,6 +65,13 @@
#define PCI_DEVICE_ID_AMD_PROMONTORYA_3 0x43ba
#define PCI_DEVICE_ID_AMD_PROMONTORYA_2 0x43bb
#define PCI_DEVICE_ID_AMD_PROMONTORYA_1 0x43bc
+#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_1 0x161a
+#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_2 0x161b
+#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_3 0x161d
+#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_4 0x161e
+#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_5 0x15d6
+#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_6 0x15d7
+
#define PCI_DEVICE_ID_ASMEDIA_1042_XHCI 0x1042
#define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI 0x1142
#define PCI_DEVICE_ID_ASMEDIA_1142_XHCI 0x1242
@@ -317,6 +324,15 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_4))
xhci->quirks |= XHCI_NO_SOFT_RETRY;
+ if (pdev->vendor == PCI_VENDOR_ID_AMD &&
+ (pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_1 ||
+ pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_2 ||
+ pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_3 ||
+ pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_4 ||
+ pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_5 ||
+ pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_6))
+ xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
+
if (xhci->quirks & XHCI_RESET_ON_RESUME)
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
"QUIRK: Resetting on resume");
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index efbd317f2f25..988a8c02e7e2 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -99,10 +99,6 @@ struct iowarrior {
/* globals */
/*--------------*/
-/*
- * USB spec identifies 5 second timeouts.
- */
-#define GET_TIMEOUT 5
#define USB_REQ_GET_REPORT 0x01
//#if 0
static int usb_get_report(struct usb_device *dev,
@@ -114,7 +110,7 @@ static int usb_get_report(struct usb_device *dev,
USB_DIR_IN | USB_TYPE_CLASS |
USB_RECIP_INTERFACE, (type << 8) + id,
inter->desc.bInterfaceNumber, buf, size,
- GET_TIMEOUT*HZ);
+ USB_CTRL_GET_TIMEOUT);
}
//#endif
@@ -129,7 +125,7 @@ static int usb_set_report(struct usb_interface *intf, unsigned char type,
USB_TYPE_CLASS | USB_RECIP_INTERFACE,
(type << 8) + id,
intf->cur_altsetting->desc.bInterfaceNumber, buf,
- size, HZ);
+ size, 1000);
}
/*---------------------*/
diff --git a/drivers/usb/mtu3/mtu3_plat.c b/drivers/usb/mtu3/mtu3_plat.c
index f13531022f4a..4309ed939178 100644
--- a/drivers/usb/mtu3/mtu3_plat.c
+++ b/drivers/usb/mtu3/mtu3_plat.c
@@ -337,7 +337,7 @@ static int mtu3_probe(struct platform_device *pdev)
goto comm_init_err;
if (ssusb->wakeup_irq > 0) {
- ret = dev_pm_set_dedicated_wake_irq(dev, ssusb->wakeup_irq);
+ ret = dev_pm_set_dedicated_wake_irq_reverse(dev, ssusb->wakeup_irq);
if (ret) {
dev_err(dev, "failed to set wakeup irq %d\n", ssusb->wakeup_irq);
goto comm_exit;
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index 8de143807c1a..4d61df6a9b5c 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -120,7 +120,7 @@ config USB_MUSB_MEDIATEK
tristate "MediaTek platforms"
depends on ARCH_MEDIATEK || COMPILE_TEST
depends on NOP_USB_XCEIV
- depends on GENERIC_PHY
+ select GENERIC_PHY
select USB_ROLE_SWITCH
comment "MUSB DMA mode"
diff --git a/drivers/usb/musb/mediatek.c b/drivers/usb/musb/mediatek.c
index 6b92d037d8fc..f5d97eb84cb5 100644
--- a/drivers/usb/musb/mediatek.c
+++ b/drivers/usb/musb/mediatek.c
@@ -185,6 +185,7 @@ static int mtk_otg_switch_init(struct mtk_glue *glue)
role_sx_desc.set = musb_usb_role_sx_set;
role_sx_desc.get = musb_usb_role_sx_get;
+ role_sx_desc.allow_userspace_control = true;
role_sx_desc.fwnode = dev_fwnode(glue->dev);
role_sx_desc.driver_data = glue;
glue->role_sw = usb_role_switch_register(glue->dev, &role_sx_desc);
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index 98c0f4c1bffd..51274b87f46c 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -1247,9 +1247,11 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
status = musb_queue_resume_work(musb,
musb_ep_restart_resume_work,
request);
- if (status < 0)
+ if (status < 0) {
dev_err(musb->controller, "%s resume work: %i\n",
__func__, status);
+ list_del(&request->list);
+ }
}
unlock:
diff --git a/drivers/usb/musb/sunxi.c b/drivers/usb/musb/sunxi.c
index f3f76f2ac63f..961c858fb349 100644
--- a/drivers/usb/musb/sunxi.c
+++ b/drivers/usb/musb/sunxi.c
@@ -440,6 +440,10 @@ static u8 sunxi_musb_readb(void __iomem *addr, u32 offset)
return 0xde;
return readb(addr + SUNXI_MUSB_CONFIGDATA);
+ case MUSB_ULPI_BUSCONTROL:
+ dev_warn(sunxi_musb->controller->parent,
+ "sunxi-musb does not have ULPI bus control register\n");
+ return 0;
/* Offset for these is fixed by sunxi_musb_busctl_offset() */
case SUNXI_MUSB_TXFUNCADDR:
case SUNXI_MUSB_TXHUBADDR:
@@ -494,6 +498,10 @@ static void sunxi_musb_writeb(void __iomem *addr, unsigned offset, u8 data)
return writeb(data, addr + SUNXI_MUSB_TXFIFOSZ);
case MUSB_RXFIFOSZ:
return writeb(data, addr + SUNXI_MUSB_RXFIFOSZ);
+ case MUSB_ULPI_BUSCONTROL:
+ dev_warn(sunxi_musb->controller->parent,
+ "sunxi-musb does not have ULPI bus control register\n");
+ return;
/* Offset for these is fixed by sunxi_musb_busctl_offset() */
case SUNXI_MUSB_TXFUNCADDR:
case SUNXI_MUSB_TXHUBADDR:
diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c
index c968ecda42aa..7ed4cc348d99 100644
--- a/drivers/usb/musb/tusb6010.c
+++ b/drivers/usb/musb/tusb6010.c
@@ -1104,6 +1104,11 @@ static int tusb_musb_init(struct musb *musb)
/* dma address for async dma */
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem) {
+ pr_debug("no async dma resource?\n");
+ ret = -ENODEV;
+ goto done;
+ }
musb->async = mem->start;
/* dma address for sync dma */
diff --git a/drivers/usb/phy/phy-tahvo.c b/drivers/usb/phy/phy-tahvo.c
index a3e043e3e4aa..f2d2cc586c5b 100644
--- a/drivers/usb/phy/phy-tahvo.c
+++ b/drivers/usb/phy/phy-tahvo.c
@@ -194,8 +194,6 @@ static int tahvo_usb_set_host(struct usb_otg *otg, struct usb_bus *host)
struct tahvo_usb *tu = container_of(otg->usb_phy, struct tahvo_usb,
phy);
- dev_dbg(&tu->pt_dev->dev, "%s %p\n", __func__, host);
-
mutex_lock(&tu->serialize);
if (host == NULL) {
@@ -224,8 +222,6 @@ static int tahvo_usb_set_peripheral(struct usb_otg *otg,
struct tahvo_usb *tu = container_of(otg->usb_phy, struct tahvo_usb,
phy);
- dev_dbg(&tu->pt_dev->dev, "%s %p\n", __func__, gadget);
-
mutex_lock(&tu->serialize);
if (!gadget) {
diff --git a/drivers/usb/phy/phy-tegra-usb.c b/drivers/usb/phy/phy-tegra-usb.c
index c0f432d509aa..68cd4b68e3a2 100644
--- a/drivers/usb/phy/phy-tegra-usb.c
+++ b/drivers/usb/phy/phy-tegra-usb.c
@@ -63,6 +63,10 @@
#define A_VBUS_VLD_WAKEUP_EN BIT(30)
#define USB_PHY_VBUS_WAKEUP_ID 0x408
+#define ID_INT_EN BIT(0)
+#define ID_CHG_DET BIT(1)
+#define VBUS_WAKEUP_INT_EN BIT(8)
+#define VBUS_WAKEUP_CHG_DET BIT(9)
#define VBUS_WAKEUP_STS BIT(10)
#define VBUS_WAKEUP_WAKEUP_EN BIT(30)
@@ -158,6 +162,10 @@
#define USB_USBMODE_HOST (3 << 0)
#define USB_USBMODE_DEVICE (2 << 0)
+#define PMC_USB_AO 0xf0
+#define VBUS_WAKEUP_PD_P0 BIT(2)
+#define ID_PD_P0 BIT(3)
+
static DEFINE_SPINLOCK(utmip_pad_lock);
static unsigned int utmip_pad_count;
@@ -533,13 +541,14 @@ static int utmi_phy_power_on(struct tegra_usb_phy *phy)
val &= ~USB_WAKE_ON_RESUME_EN;
writel_relaxed(val, base + USB_SUSP_CTRL);
- if (phy->mode == USB_DR_MODE_PERIPHERAL) {
+ if (phy->mode != USB_DR_MODE_HOST) {
val = readl_relaxed(base + USB_SUSP_CTRL);
val &= ~(USB_WAKE_ON_CNNT_EN_DEV | USB_WAKE_ON_DISCON_EN_DEV);
writel_relaxed(val, base + USB_SUSP_CTRL);
val = readl_relaxed(base + USB_PHY_VBUS_WAKEUP_ID);
val &= ~VBUS_WAKEUP_WAKEUP_EN;
+ val &= ~(ID_CHG_DET | VBUS_WAKEUP_CHG_DET);
writel_relaxed(val, base + USB_PHY_VBUS_WAKEUP_ID);
val = readl_relaxed(base + USB_PHY_VBUS_SENSORS);
@@ -687,9 +696,10 @@ static int utmi_phy_power_off(struct tegra_usb_phy *phy)
* Ask VBUS sensor to generate wake event once cable is
* connected.
*/
- if (phy->mode == USB_DR_MODE_PERIPHERAL) {
+ if (phy->mode != USB_DR_MODE_HOST) {
val = readl_relaxed(base + USB_PHY_VBUS_WAKEUP_ID);
val |= VBUS_WAKEUP_WAKEUP_EN;
+ val &= ~(ID_CHG_DET | VBUS_WAKEUP_CHG_DET);
writel_relaxed(val, base + USB_PHY_VBUS_WAKEUP_ID);
val = readl_relaxed(base + USB_PHY_VBUS_SENSORS);
@@ -893,6 +903,7 @@ static void tegra_usb_phy_shutdown(struct usb_phy *u_phy)
if (WARN_ON(!phy->freq))
return;
+ usb_phy_set_wakeup(u_phy, false);
tegra_usb_phy_power_off(phy);
if (!phy->is_ulpi_phy)
@@ -904,26 +915,146 @@ static void tegra_usb_phy_shutdown(struct usb_phy *u_phy)
phy->freq = NULL;
}
+static irqreturn_t tegra_usb_phy_isr(int irq, void *data)
+{
+ u32 val, int_mask = ID_CHG_DET | VBUS_WAKEUP_CHG_DET;
+ struct tegra_usb_phy *phy = data;
+ void __iomem *base = phy->regs;
+
+ /*
+ * The PHY interrupt also wakes the USB controller driver since
+ * interrupt is shared. We don't do anything in the PHY driver,
+ * so just clear the interrupt.
+ */
+ val = readl_relaxed(base + USB_PHY_VBUS_WAKEUP_ID);
+ writel_relaxed(val, base + USB_PHY_VBUS_WAKEUP_ID);
+
+ return val & int_mask ? IRQ_HANDLED : IRQ_NONE;
+}
+
static int tegra_usb_phy_set_wakeup(struct usb_phy *u_phy, bool enable)
{
struct tegra_usb_phy *phy = to_tegra_usb_phy(u_phy);
+ void __iomem *base = phy->regs;
+ int ret = 0;
+ u32 val;
+
+ if (phy->wakeup_enabled && phy->mode != USB_DR_MODE_HOST &&
+ phy->irq > 0) {
+ disable_irq(phy->irq);
+
+ val = readl_relaxed(base + USB_PHY_VBUS_WAKEUP_ID);
+ val &= ~(ID_INT_EN | VBUS_WAKEUP_INT_EN);
+ writel_relaxed(val, base + USB_PHY_VBUS_WAKEUP_ID);
+
+ enable_irq(phy->irq);
+
+ free_irq(phy->irq, phy);
+
+ phy->wakeup_enabled = false;
+ }
+
+ if (enable && phy->mode != USB_DR_MODE_HOST && phy->irq > 0) {
+ ret = request_irq(phy->irq, tegra_usb_phy_isr, IRQF_SHARED,
+ dev_name(phy->u_phy.dev), phy);
+ if (!ret) {
+ disable_irq(phy->irq);
+
+ /*
+ * USB clock will be resumed once wake event will be
+ * generated. The ID-change event requires to have
+ * interrupts enabled, otherwise it won't be generated.
+ */
+ val = readl_relaxed(base + USB_PHY_VBUS_WAKEUP_ID);
+ val |= ID_INT_EN | VBUS_WAKEUP_INT_EN;
+ writel_relaxed(val, base + USB_PHY_VBUS_WAKEUP_ID);
+
+ enable_irq(phy->irq);
+ } else {
+ dev_err(phy->u_phy.dev,
+ "Failed to request interrupt: %d", ret);
+ enable = false;
+ }
+ }
phy->wakeup_enabled = enable;
- return 0;
+ return ret;
}
static int tegra_usb_phy_set_suspend(struct usb_phy *u_phy, int suspend)
{
struct tegra_usb_phy *phy = to_tegra_usb_phy(u_phy);
+ int ret;
if (WARN_ON(!phy->freq))
return -EINVAL;
+ /*
+ * PHY is sharing IRQ with the CI driver, hence here we either
+ * disable interrupt for both PHY and CI or for CI only. The
+ * interrupt needs to be disabled while hardware is reprogrammed
+ * because interrupt touches the programmed registers, and thus,
+ * there could be a race condition.
+ */
+ if (phy->irq > 0)
+ disable_irq(phy->irq);
+
if (suspend)
- return tegra_usb_phy_power_off(phy);
+ ret = tegra_usb_phy_power_off(phy);
else
- return tegra_usb_phy_power_on(phy);
+ ret = tegra_usb_phy_power_on(phy);
+
+ if (phy->irq > 0)
+ enable_irq(phy->irq);
+
+ return ret;
+}
+
+static int tegra_usb_phy_configure_pmc(struct tegra_usb_phy *phy)
+{
+ int err, val = 0;
+
+ /* older device-trees don't have PMC regmap */
+ if (!phy->pmc_regmap)
+ return 0;
+
+ /*
+ * Tegra20 has a different layout of PMC USB register bits and AO is
+ * enabled by default after system reset on Tegra20, so assume nothing
+ * to do on Tegra20.
+ */
+ if (!phy->soc_config->requires_pmc_ao_power_up)
+ return 0;
+
+ /* enable VBUS wake-up detector */
+ if (phy->mode != USB_DR_MODE_HOST)
+ val |= VBUS_WAKEUP_PD_P0 << phy->instance * 4;
+
+ /* enable ID-pin ACC detector for OTG mode switching */
+ if (phy->mode == USB_DR_MODE_OTG)
+ val |= ID_PD_P0 << phy->instance * 4;
+
+ /* disable detectors to reset them */
+ err = regmap_set_bits(phy->pmc_regmap, PMC_USB_AO, val);
+ if (err) {
+ dev_err(phy->u_phy.dev, "Failed to disable PMC AO: %d\n", err);
+ return err;
+ }
+
+ usleep_range(10, 100);
+
+ /* enable detectors */
+ err = regmap_clear_bits(phy->pmc_regmap, PMC_USB_AO, val);
+ if (err) {
+ dev_err(phy->u_phy.dev, "Failed to enable PMC AO: %d\n", err);
+ return err;
+ }
+
+ /* detectors starts to work after 10ms */
+ usleep_range(10000, 15000);
+
+ return 0;
}
static int tegra_usb_phy_init(struct usb_phy *u_phy)
@@ -967,6 +1098,10 @@ static int tegra_usb_phy_init(struct usb_phy *u_phy)
goto disable_vbus;
}
+ err = tegra_usb_phy_configure_pmc(phy);
+ if (err)
+ goto close_phy;
+
err = tegra_usb_phy_power_on(phy);
if (err)
goto close_phy;
@@ -1135,11 +1270,56 @@ static int utmi_phy_probe(struct tegra_usb_phy *tegra_phy,
return 0;
}
+static void tegra_usb_phy_put_pmc_device(void *dev)
+{
+ put_device(dev);
+}
+
+static int tegra_usb_phy_parse_pmc(struct device *dev,
+ struct tegra_usb_phy *phy)
+{
+ struct platform_device *pmc_pdev;
+ struct of_phandle_args args;
+ int err;
+
+ err = of_parse_phandle_with_fixed_args(dev->of_node, "nvidia,pmc",
+ 1, 0, &args);
+ if (err) {
+ if (err != -ENOENT)
+ return err;
+
+ dev_warn_once(dev, "nvidia,pmc is missing, please update your device-tree\n");
+ return 0;
+ }
+
+ pmc_pdev = of_find_device_by_node(args.np);
+ of_node_put(args.np);
+ if (!pmc_pdev)
+ return -ENODEV;
+
+ err = devm_add_action_or_reset(dev, tegra_usb_phy_put_pmc_device,
+ &pmc_pdev->dev);
+ if (err)
+ return err;
+
+ if (!platform_get_drvdata(pmc_pdev))
+ return -EPROBE_DEFER;
+
+ phy->pmc_regmap = dev_get_regmap(&pmc_pdev->dev, "usb_sleepwalk");
+ if (!phy->pmc_regmap)
+ return -EINVAL;
+
+ phy->instance = args.args[0];
+
+ return 0;
+}
+
static const struct tegra_phy_soc_config tegra20_soc_config = {
.utmi_pll_config_in_car_module = false,
.has_hostpc = false,
.requires_usbmode_setup = false,
.requires_extra_tuning_parameters = false,
+ .requires_pmc_ao_power_up = false,
};
static const struct tegra_phy_soc_config tegra30_soc_config = {
@@ -1147,6 +1327,7 @@ static const struct tegra_phy_soc_config tegra30_soc_config = {
.has_hostpc = true,
.requires_usbmode_setup = true,
.requires_extra_tuning_parameters = true,
+ .requires_pmc_ao_power_up = true,
};
static const struct of_device_id tegra_usb_phy_id_table[] = {
@@ -1172,6 +1353,7 @@ static int tegra_usb_phy_probe(struct platform_device *pdev)
return -ENOMEM;
tegra_phy->soc_config = of_device_get_match_data(&pdev->dev);
+ tegra_phy->irq = platform_get_irq_optional(pdev, 0);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
@@ -1215,6 +1397,12 @@ static int tegra_usb_phy_probe(struct platform_device *pdev)
return err;
}
+ err = tegra_usb_phy_parse_pmc(&pdev->dev, tegra_phy);
+ if (err) {
+ dev_err_probe(&pdev->dev, err, "Failed to get PMC regmap\n");
+ return err;
+ }
+
phy_type = of_usb_get_phy_mode(np);
switch (phy_type) {
case USBPHY_INTERFACE_MODE_UTMI:
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index 2db917eab799..29f4b87a9e74 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -131,17 +131,11 @@ static int ch341_control_in(struct usb_device *dev,
dev_dbg(&dev->dev, "%s - (%02x,%04x,%04x,%u)\n", __func__,
request, value, index, bufsize);
- r = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), request,
- USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
- value, index, buf, bufsize, DEFAULT_TIMEOUT);
- if (r < (int)bufsize) {
- if (r >= 0) {
- dev_err(&dev->dev,
- "short control message received (%d < %u)\n",
- r, bufsize);
- r = -EIO;
- }
-
+ r = usb_control_msg_recv(dev, 0, request,
+ USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
+ value, index, buf, bufsize, DEFAULT_TIMEOUT,
+ GFP_KERNEL);
+ if (r) {
dev_err(&dev->dev, "failed to receive control message: %d\n",
r);
return r;
@@ -287,24 +281,19 @@ static int ch341_set_handshake(struct usb_device *dev, u8 control)
static int ch341_get_status(struct usb_device *dev, struct ch341_private *priv)
{
const unsigned int size = 2;
- char *buffer;
+ u8 buffer[2];
int r;
unsigned long flags;
- buffer = kmalloc(size, GFP_KERNEL);
- if (!buffer)
- return -ENOMEM;
-
r = ch341_control_in(dev, CH341_REQ_READ_REG, 0x0706, 0, buffer, size);
- if (r < 0)
- goto out;
+ if (r)
+ return r;
spin_lock_irqsave(&priv->lock, flags);
priv->msr = (~(*buffer)) & CH341_BITS_MODEM_STAT;
spin_unlock_irqrestore(&priv->lock, flags);
-out: kfree(buffer);
- return r;
+ return 0;
}
/* -------------------------------------------------------------------------- */
@@ -312,31 +301,28 @@ out: kfree(buffer);
static int ch341_configure(struct usb_device *dev, struct ch341_private *priv)
{
const unsigned int size = 2;
- char *buffer;
+ u8 buffer[2];
int r;
- buffer = kmalloc(size, GFP_KERNEL);
- if (!buffer)
- return -ENOMEM;
-
/* expect two bytes 0x27 0x00 */
r = ch341_control_in(dev, CH341_REQ_READ_VERSION, 0, 0, buffer, size);
- if (r < 0)
- goto out;
+ if (r)
+ return r;
dev_dbg(&dev->dev, "Chip version: 0x%02x\n", buffer[0]);
r = ch341_control_out(dev, CH341_REQ_SERIAL_INIT, 0, 0);
if (r < 0)
- goto out;
+ return r;
r = ch341_set_baudrate_lcr(dev, priv, priv->baud_rate, priv->lcr);
if (r < 0)
- goto out;
+ return r;
r = ch341_set_handshake(dev, priv->mcr);
+ if (r < 0)
+ return r;
-out: kfree(buffer);
- return r;
+ return 0;
}
static int ch341_detect_quirks(struct usb_serial_port *port)
@@ -345,40 +331,27 @@ static int ch341_detect_quirks(struct usb_serial_port *port)
struct usb_device *udev = port->serial->dev;
const unsigned int size = 2;
unsigned long quirks = 0;
- char *buffer;
+ u8 buffer[2];
int r;
- buffer = kmalloc(size, GFP_KERNEL);
- if (!buffer)
- return -ENOMEM;
-
/*
* A subset of CH34x devices does not support all features. The
* prescaler is limited and there is no support for sending a RS232
* break condition. A read failure when trying to set up the latter is
* used to detect these devices.
*/
- r = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), CH341_REQ_READ_REG,
- USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
- CH341_REG_BREAK, 0, buffer, size, DEFAULT_TIMEOUT);
+ r = usb_control_msg_recv(udev, 0, CH341_REQ_READ_REG,
+ USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
+ CH341_REG_BREAK, 0, &buffer, size,
+ DEFAULT_TIMEOUT, GFP_KERNEL);
if (r == -EPIPE) {
dev_info(&port->dev, "break control not supported, using simulated break\n");
quirks = CH341_QUIRK_LIMITED_PRESCALER | CH341_QUIRK_SIMULATE_BREAK;
r = 0;
- goto out;
- }
-
- if (r != size) {
- if (r >= 0)
- r = -EIO;
+ } else if (r) {
dev_err(&port->dev, "failed to read break control: %d\n", r);
- goto out;
}
- r = 0;
-out:
- kfree(buffer);
-
if (quirks) {
dev_dbg(&port->dev, "enabling quirk flags: 0x%02lx\n", quirks);
priv->quirks |= quirks;
@@ -647,23 +620,19 @@ static void ch341_break_ctl(struct tty_struct *tty, int break_state)
struct ch341_private *priv = usb_get_serial_port_data(port);
int r;
uint16_t reg_contents;
- uint8_t *break_reg;
+ uint8_t break_reg[2];
if (priv->quirks & CH341_QUIRK_SIMULATE_BREAK) {
ch341_simulate_break(tty, break_state);
return;
}
- break_reg = kmalloc(2, GFP_KERNEL);
- if (!break_reg)
- return;
-
r = ch341_control_in(port->serial->dev, CH341_REQ_READ_REG,
ch341_break_reg, 0, break_reg, 2);
- if (r < 0) {
+ if (r) {
dev_err(&port->dev, "%s - USB control read error (%d)\n",
__func__, r);
- goto out;
+ return;
}
dev_dbg(&port->dev, "%s - initial ch341 break register contents - reg1: %x, reg2: %x\n",
__func__, break_reg[0], break_reg[1]);
@@ -684,8 +653,6 @@ static void ch341_break_ctl(struct tty_struct *tty, int break_state)
if (r < 0)
dev_err(&port->dev, "%s - USB control write error (%d)\n",
__func__, r);
-out:
- kfree(break_reg);
}
static int ch341_tiocmset(struct tty_struct *tty,
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 189279869a8b..7705328034ca 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -631,30 +631,20 @@ static int cp210x_read_reg_block(struct usb_serial_port *port, u8 req,
{
struct usb_serial *serial = port->serial;
struct cp210x_port_private *port_priv = usb_get_serial_port_data(port);
- void *dmabuf;
int result;
- dmabuf = kmalloc(bufsize, GFP_KERNEL);
- if (!dmabuf)
- return -ENOMEM;
- result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
- req, REQTYPE_INTERFACE_TO_HOST, 0,
- port_priv->bInterfaceNumber, dmabuf, bufsize,
- USB_CTRL_GET_TIMEOUT);
- if (result == bufsize) {
- memcpy(buf, dmabuf, bufsize);
- result = 0;
- } else {
+ result = usb_control_msg_recv(serial->dev, 0, req,
+ REQTYPE_INTERFACE_TO_HOST, 0,
+ port_priv->bInterfaceNumber, buf, bufsize,
+ USB_CTRL_SET_TIMEOUT, GFP_KERNEL);
+ if (result) {
dev_err(&port->dev, "failed get req 0x%x size %d status: %d\n",
req, bufsize, result);
- if (result >= 0)
- result = -EIO;
+ return result;
}
- kfree(dmabuf);
-
- return result;
+ return 0;
}
/*
@@ -672,31 +662,19 @@ static int cp210x_read_u8_reg(struct usb_serial_port *port, u8 req, u8 *val)
static int cp210x_read_vendor_block(struct usb_serial *serial, u8 type, u16 val,
void *buf, int bufsize)
{
- void *dmabuf;
int result;
- dmabuf = kmalloc(bufsize, GFP_KERNEL);
- if (!dmabuf)
- return -ENOMEM;
-
- result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
- CP210X_VENDOR_SPECIFIC, type, val,
- cp210x_interface_num(serial), dmabuf, bufsize,
- USB_CTRL_GET_TIMEOUT);
- if (result == bufsize) {
- memcpy(buf, dmabuf, bufsize);
- result = 0;
- } else {
+ result = usb_control_msg_recv(serial->dev, 0, CP210X_VENDOR_SPECIFIC,
+ type, val, cp210x_interface_num(serial), buf, bufsize,
+ USB_CTRL_GET_TIMEOUT, GFP_KERNEL);
+ if (result) {
dev_err(&serial->interface->dev,
"failed to get vendor val 0x%04x size %d: %d\n", val,
bufsize, result);
- if (result >= 0)
- result = -EIO;
+ return result;
}
- kfree(dmabuf);
-
- return result;
+ return 0;
}
/*
@@ -730,21 +708,13 @@ static int cp210x_write_reg_block(struct usb_serial_port *port, u8 req,
{
struct usb_serial *serial = port->serial;
struct cp210x_port_private *port_priv = usb_get_serial_port_data(port);
- void *dmabuf;
int result;
- dmabuf = kmemdup(buf, bufsize, GFP_KERNEL);
- if (!dmabuf)
- return -ENOMEM;
-
- result = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
- req, REQTYPE_HOST_TO_INTERFACE, 0,
- port_priv->bInterfaceNumber, dmabuf, bufsize,
- USB_CTRL_SET_TIMEOUT);
-
- kfree(dmabuf);
-
- if (result < 0) {
+ result = usb_control_msg_send(serial->dev, 0, req,
+ REQTYPE_HOST_TO_INTERFACE, 0,
+ port_priv->bInterfaceNumber, buf, bufsize,
+ USB_CTRL_SET_TIMEOUT, GFP_KERNEL);
+ if (result) {
dev_err(&port->dev, "failed set req 0x%x size %d status: %d\n",
req, bufsize, result);
return result;
@@ -773,21 +743,12 @@ static int cp210x_write_u32_reg(struct usb_serial_port *port, u8 req, u32 val)
static int cp210x_write_vendor_block(struct usb_serial *serial, u8 type,
u16 val, void *buf, int bufsize)
{
- void *dmabuf;
int result;
- dmabuf = kmemdup(buf, bufsize, GFP_KERNEL);
- if (!dmabuf)
- return -ENOMEM;
-
- result = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
- CP210X_VENDOR_SPECIFIC, type, val,
- cp210x_interface_num(serial), dmabuf, bufsize,
- USB_CTRL_SET_TIMEOUT);
-
- kfree(dmabuf);
-
- if (result < 0) {
+ result = usb_control_msg_send(serial->dev, 0, CP210X_VENDOR_SPECIFIC,
+ type, val, cp210x_interface_num(serial), buf, bufsize,
+ USB_CTRL_SET_TIMEOUT, GFP_KERNEL);
+ if (result) {
dev_err(&serial->interface->dev,
"failed to set vendor val 0x%04x size %d: %d\n", val,
bufsize, result);
@@ -952,29 +913,21 @@ static int cp210x_get_tx_queue_byte_count(struct usb_serial_port *port,
{
struct usb_serial *serial = port->serial;
struct cp210x_port_private *port_priv = usb_get_serial_port_data(port);
- struct cp210x_comm_status *sts;
+ struct cp210x_comm_status sts;
int result;
- sts = kmalloc(sizeof(*sts), GFP_KERNEL);
- if (!sts)
- return -ENOMEM;
-
- result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
- CP210X_GET_COMM_STATUS, REQTYPE_INTERFACE_TO_HOST,
- 0, port_priv->bInterfaceNumber, sts, sizeof(*sts),
- USB_CTRL_GET_TIMEOUT);
- if (result == sizeof(*sts)) {
- *count = le32_to_cpu(sts->ulAmountInOutQueue);
- result = 0;
- } else {
+ result = usb_control_msg_recv(serial->dev, 0, CP210X_GET_COMM_STATUS,
+ REQTYPE_INTERFACE_TO_HOST, 0,
+ port_priv->bInterfaceNumber, &sts, sizeof(sts),
+ USB_CTRL_GET_TIMEOUT, GFP_KERNEL);
+ if (result) {
dev_err(&port->dev, "failed to get comm status: %d\n", result);
- if (result >= 0)
- result = -EIO;
+ return result;
}
- kfree(sts);
+ *count = le32_to_cpu(sts.ulAmountInOutQueue);
- return result;
+ return 0;
}
static bool cp210x_tx_empty(struct usb_serial_port *port)
diff --git a/drivers/usb/serial/f81232.c b/drivers/usb/serial/f81232.c
index a7a7af8d05bf..3ad1f515fb68 100644
--- a/drivers/usb/serial/f81232.c
+++ b/drivers/usb/serial/f81232.c
@@ -139,67 +139,46 @@ static int calc_baud_divisor(speed_t baudrate, speed_t clockrate)
static int f81232_get_register(struct usb_serial_port *port, u16 reg, u8 *val)
{
int status;
- u8 *tmp;
struct usb_device *dev = port->serial->dev;
- tmp = kmalloc(sizeof(*val), GFP_KERNEL);
- if (!tmp)
- return -ENOMEM;
-
- status = usb_control_msg(dev,
- usb_rcvctrlpipe(dev, 0),
- F81232_REGISTER_REQUEST,
- F81232_GET_REGISTER,
- reg,
- 0,
- tmp,
- sizeof(*val),
- USB_CTRL_GET_TIMEOUT);
- if (status != sizeof(*val)) {
+ status = usb_control_msg_recv(dev,
+ 0,
+ F81232_REGISTER_REQUEST,
+ F81232_GET_REGISTER,
+ reg,
+ 0,
+ val,
+ sizeof(*val),
+ USB_CTRL_GET_TIMEOUT,
+ GFP_KERNEL);
+ if (status) {
dev_err(&port->dev, "%s failed status: %d\n", __func__, status);
-
- if (status < 0)
- status = usb_translate_errors(status);
- else
- status = -EIO;
- } else {
- status = 0;
- *val = *tmp;
+ status = usb_translate_errors(status);
}
- kfree(tmp);
return status;
}
static int f81232_set_register(struct usb_serial_port *port, u16 reg, u8 val)
{
int status;
- u8 *tmp;
struct usb_device *dev = port->serial->dev;
- tmp = kmalloc(sizeof(val), GFP_KERNEL);
- if (!tmp)
- return -ENOMEM;
-
- *tmp = val;
-
- status = usb_control_msg(dev,
- usb_sndctrlpipe(dev, 0),
- F81232_REGISTER_REQUEST,
- F81232_SET_REGISTER,
- reg,
- 0,
- tmp,
- sizeof(val),
- USB_CTRL_SET_TIMEOUT);
- if (status < 0) {
+ status = usb_control_msg_send(dev,
+ 0,
+ F81232_REGISTER_REQUEST,
+ F81232_SET_REGISTER,
+ reg,
+ 0,
+ &val,
+ sizeof(val),
+ USB_CTRL_SET_TIMEOUT,
+ GFP_KERNEL);
+ if (status) {
dev_err(&port->dev, "%s failed status: %d\n", __func__, status);
status = usb_translate_errors(status);
- } else {
- status = 0;
}
- kfree(tmp);
return status;
}
@@ -857,28 +836,22 @@ static int f81534a_ctrl_set_register(struct usb_interface *intf, u16 reg,
struct usb_device *dev = interface_to_usbdev(intf);
int retry = F81534A_ACCESS_REG_RETRY;
int status;
- u8 *tmp;
-
- tmp = kmemdup(val, size, GFP_KERNEL);
- if (!tmp)
- return -ENOMEM;
while (retry--) {
- status = usb_control_msg(dev,
- usb_sndctrlpipe(dev, 0),
- F81232_REGISTER_REQUEST,
- F81232_SET_REGISTER,
- reg,
- 0,
- tmp,
- size,
- USB_CTRL_SET_TIMEOUT);
- if (status < 0) {
+ status = usb_control_msg_send(dev,
+ 0,
+ F81232_REGISTER_REQUEST,
+ F81232_SET_REGISTER,
+ reg,
+ 0,
+ val,
+ size,
+ USB_CTRL_SET_TIMEOUT,
+ GFP_KERNEL);
+ if (status) {
status = usb_translate_errors(status);
if (status == -EIO)
continue;
- } else {
- status = 0;
}
break;
@@ -889,7 +862,6 @@ static int f81534a_ctrl_set_register(struct usb_interface *intf, u16 reg,
reg, status);
}
- kfree(tmp);
return status;
}
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 99d19828dae6..4edebd14ef29 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1437,27 +1437,15 @@ static int _read_latency_timer(struct usb_serial_port *port)
{
struct ftdi_private *priv = usb_get_serial_port_data(port);
struct usb_device *udev = port->serial->dev;
- unsigned char *buf;
+ u8 buf;
int rv;
- buf = kmalloc(1, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- rv = usb_control_msg(udev,
- usb_rcvctrlpipe(udev, 0),
- FTDI_SIO_GET_LATENCY_TIMER_REQUEST,
- FTDI_SIO_GET_LATENCY_TIMER_REQUEST_TYPE,
- 0, priv->interface,
- buf, 1, WDR_TIMEOUT);
- if (rv < 1) {
- if (rv >= 0)
- rv = -EIO;
- } else {
- rv = buf[0];
- }
-
- kfree(buf);
+ rv = usb_control_msg_recv(udev, 0, FTDI_SIO_GET_LATENCY_TIMER_REQUEST,
+ FTDI_SIO_GET_LATENCY_TIMER_REQUEST_TYPE, 0,
+ priv->interface, &buf, 1, WDR_TIMEOUT,
+ GFP_KERNEL);
+ if (rv == 0)
+ rv = buf;
return rv;
}
@@ -1852,32 +1840,21 @@ static int ftdi_read_cbus_pins(struct usb_serial_port *port)
{
struct ftdi_private *priv = usb_get_serial_port_data(port);
struct usb_serial *serial = port->serial;
- unsigned char *buf;
+ u8 buf;
int result;
result = usb_autopm_get_interface(serial->interface);
if (result)
return result;
- buf = kmalloc(1, GFP_KERNEL);
- if (!buf) {
- usb_autopm_put_interface(serial->interface);
- return -ENOMEM;
- }
-
- result = usb_control_msg(serial->dev,
- usb_rcvctrlpipe(serial->dev, 0),
- FTDI_SIO_READ_PINS_REQUEST,
- FTDI_SIO_READ_PINS_REQUEST_TYPE, 0,
- priv->interface, buf, 1, WDR_TIMEOUT);
- if (result < 1) {
- if (result >= 0)
- result = -EIO;
- } else {
- result = buf[0];
- }
+ result = usb_control_msg_recv(serial->dev, 0,
+ FTDI_SIO_READ_PINS_REQUEST,
+ FTDI_SIO_READ_PINS_REQUEST_TYPE, 0,
+ priv->interface, &buf, 1, WDR_TIMEOUT,
+ GFP_KERNEL);
+ if (result == 0)
+ result = buf;
- kfree(buf);
usb_autopm_put_interface(serial->interface);
return result;
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
index 87b89c99d517..1cfcd805f286 100644
--- a/drivers/usb/serial/keyspan.c
+++ b/drivers/usb/serial/keyspan.c
@@ -2890,22 +2890,22 @@ static int keyspan_port_probe(struct usb_serial_port *port)
for (i = 0; i < ARRAY_SIZE(p_priv->in_buffer); ++i) {
p_priv->in_buffer[i] = kzalloc(IN_BUFLEN, GFP_KERNEL);
if (!p_priv->in_buffer[i])
- goto err_in_buffer;
+ goto err_free_in_buffer;
}
for (i = 0; i < ARRAY_SIZE(p_priv->out_buffer); ++i) {
p_priv->out_buffer[i] = kzalloc(OUT_BUFLEN, GFP_KERNEL);
if (!p_priv->out_buffer[i])
- goto err_out_buffer;
+ goto err_free_out_buffer;
}
p_priv->inack_buffer = kzalloc(INACK_BUFLEN, GFP_KERNEL);
if (!p_priv->inack_buffer)
- goto err_inack_buffer;
+ goto err_free_out_buffer;
p_priv->outcont_buffer = kzalloc(OUTCONT_BUFLEN, GFP_KERNEL);
if (!p_priv->outcont_buffer)
- goto err_outcont_buffer;
+ goto err_free_inack_buffer;
p_priv->device_details = d_details;
@@ -2951,15 +2951,14 @@ static int keyspan_port_probe(struct usb_serial_port *port)
return 0;
-err_outcont_buffer:
+err_free_inack_buffer:
kfree(p_priv->inack_buffer);
-err_inack_buffer:
+err_free_out_buffer:
for (i = 0; i < ARRAY_SIZE(p_priv->out_buffer); ++i)
kfree(p_priv->out_buffer[i]);
-err_out_buffer:
+err_free_in_buffer:
for (i = 0; i < ARRAY_SIZE(p_priv->in_buffer); ++i)
kfree(p_priv->in_buffer[i]);
-err_in_buffer:
kfree(p_priv);
return -ENOMEM;
diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
index 39b0f5f344c2..3e7628becdcd 100644
--- a/drivers/usb/serial/keyspan_pda.c
+++ b/drivers/usb/serial/keyspan_pda.c
@@ -77,36 +77,27 @@ static int keyspan_pda_get_write_room(struct keyspan_pda_private *priv)
{
struct usb_serial_port *port = priv->port;
struct usb_serial *serial = port->serial;
- u8 *room;
+ u8 room;
int rc;
- room = kmalloc(1, GFP_KERNEL);
- if (!room)
- return -ENOMEM;
-
- rc = usb_control_msg(serial->dev,
- usb_rcvctrlpipe(serial->dev, 0),
- 6, /* write_room */
- USB_TYPE_VENDOR | USB_RECIP_INTERFACE
- | USB_DIR_IN,
- 0, /* value: 0 means "remaining room" */
- 0, /* index */
- room,
- 1,
- 2000);
- if (rc != 1) {
- if (rc >= 0)
- rc = -EIO;
+ rc = usb_control_msg_recv(serial->dev,
+ 0,
+ 6, /* write_room */
+ USB_TYPE_VENDOR | USB_RECIP_INTERFACE | USB_DIR_IN,
+ 0, /* value: 0 means "remaining room" */
+ 0, /* index */
+ &room,
+ 1,
+ 2000,
+ GFP_KERNEL);
+ if (rc) {
dev_dbg(&port->dev, "roomquery failed: %d\n", rc);
- goto out_free;
+ return rc;
}
- dev_dbg(&port->dev, "roomquery says %d\n", *room);
- rc = *room;
-out_free:
- kfree(room);
+ dev_dbg(&port->dev, "roomquery says %d\n", room);
- return rc;
+ return room;
}
static void keyspan_pda_request_unthrottle(struct work_struct *work)
@@ -381,22 +372,20 @@ static int keyspan_pda_get_modem_info(struct usb_serial *serial,
unsigned char *value)
{
int rc;
- u8 *data;
-
- data = kmalloc(1, GFP_KERNEL);
- if (!data)
- return -ENOMEM;
-
- rc = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
- 3, /* get pins */
- USB_TYPE_VENDOR|USB_RECIP_INTERFACE|USB_DIR_IN,
- 0, 0, data, 1, 2000);
- if (rc == 1)
- *value = *data;
- else if (rc >= 0)
- rc = -EIO;
+ u8 data;
+
+ rc = usb_control_msg_recv(serial->dev, 0,
+ 3, /* get pins */
+ USB_TYPE_VENDOR | USB_RECIP_INTERFACE | USB_DIR_IN,
+ 0,
+ 0,
+ &data,
+ 1,
+ 2000,
+ GFP_KERNEL);
+ if (rc == 0)
+ *value = data;
- kfree(data);
return rc;
}
diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c
index f1e9628a9907..edcc57bd9b5e 100644
--- a/drivers/usb/serial/kl5kusb105.c
+++ b/drivers/usb/serial/kl5kusb105.c
@@ -124,16 +124,18 @@ static int klsi_105_chg_port_settings(struct usb_serial_port *port,
{
int rc;
- rc = usb_control_msg(port->serial->dev,
- usb_sndctrlpipe(port->serial->dev, 0),
- KL5KUSB105A_SIO_SET_DATA,
- USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_INTERFACE,
- 0, /* value */
- 0, /* index */
- settings,
- sizeof(struct klsi_105_port_settings),
- KLSI_TIMEOUT);
- if (rc < 0)
+ rc = usb_control_msg_send(port->serial->dev,
+ 0,
+ KL5KUSB105A_SIO_SET_DATA,
+ USB_TYPE_VENDOR | USB_DIR_OUT |
+ USB_RECIP_INTERFACE,
+ 0, /* value */
+ 0, /* index */
+ settings,
+ sizeof(struct klsi_105_port_settings),
+ KLSI_TIMEOUT,
+ GFP_KERNEL);
+ if (rc)
dev_err(&port->dev,
"Change port settings failed (error = %d)\n", rc);
@@ -145,61 +147,37 @@ static int klsi_105_chg_port_settings(struct usb_serial_port *port,
return rc;
}
-/* translate a 16-bit status value from the device to linux's TIO bits */
-static unsigned long klsi_105_status2linestate(const __u16 status)
-{
- unsigned long res = 0;
-
- res = ((status & KL5KUSB105A_DSR) ? TIOCM_DSR : 0)
- | ((status & KL5KUSB105A_CTS) ? TIOCM_CTS : 0)
- ;
-
- return res;
-}
-
/*
* Read line control via vendor command and return result through
- * *line_state_p
+ * the state pointer.
*/
-/* It seems that the status buffer has always only 2 bytes length */
-#define KLSI_STATUSBUF_LEN 2
static int klsi_105_get_line_state(struct usb_serial_port *port,
- unsigned long *line_state_p)
+ unsigned long *state)
{
+ u16 status;
int rc;
- u8 *status_buf;
- __u16 status;
-
- status_buf = kmalloc(KLSI_STATUSBUF_LEN, GFP_KERNEL);
- if (!status_buf)
- return -ENOMEM;
- status_buf[0] = 0xff;
- status_buf[1] = 0xff;
- rc = usb_control_msg(port->serial->dev,
- usb_rcvctrlpipe(port->serial->dev, 0),
- KL5KUSB105A_SIO_POLL,
- USB_TYPE_VENDOR | USB_DIR_IN,
- 0, /* value */
- 0, /* index */
- status_buf, KLSI_STATUSBUF_LEN,
- 10000
- );
- if (rc != KLSI_STATUSBUF_LEN) {
+ rc = usb_control_msg_recv(port->serial->dev, 0,
+ KL5KUSB105A_SIO_POLL,
+ USB_TYPE_VENDOR | USB_DIR_IN,
+ 0, /* value */
+ 0, /* index */
+ &status, sizeof(status),
+ 10000,
+ GFP_KERNEL);
+ if (rc) {
dev_err(&port->dev, "reading line status failed: %d\n", rc);
- if (rc >= 0)
- rc = -EIO;
- } else {
- status = get_unaligned_le16(status_buf);
+ return rc;
+ }
- dev_dbg(&port->dev, "read status %02x %02x\n",
- status_buf[0], status_buf[1]);
+ le16_to_cpus(&status);
- *line_state_p = klsi_105_status2linestate(status);
- }
+ dev_dbg(&port->dev, "read status %04x\n", status);
- kfree(status_buf);
- return rc;
+ *state = ((status & KL5KUSB105A_DSR) ? TIOCM_DSR : 0) |
+ ((status & KL5KUSB105A_CTS) ? TIOCM_CTS : 0);
+
+ return 0;
}
@@ -245,7 +223,7 @@ static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port)
int retval = 0;
int rc;
unsigned long line_state;
- struct klsi_105_port_settings *cfg;
+ struct klsi_105_port_settings cfg;
unsigned long flags;
/* Do a defined restart:
@@ -255,27 +233,22 @@ static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port)
* Then read the modem line control and store values in
* priv->line_state.
*/
- cfg = kmalloc(sizeof(*cfg), GFP_KERNEL);
- if (!cfg)
- return -ENOMEM;
- cfg->pktlen = 5;
- cfg->baudrate = kl5kusb105a_sio_b9600;
- cfg->databits = kl5kusb105a_dtb_8;
- cfg->unknown1 = 0;
- cfg->unknown2 = 1;
- klsi_105_chg_port_settings(port, cfg);
+ cfg.pktlen = 5;
+ cfg.baudrate = kl5kusb105a_sio_b9600;
+ cfg.databits = kl5kusb105a_dtb_8;
+ cfg.unknown1 = 0;
+ cfg.unknown2 = 1;
+ klsi_105_chg_port_settings(port, &cfg);
spin_lock_irqsave(&priv->lock, flags);
- priv->cfg.pktlen = cfg->pktlen;
- priv->cfg.baudrate = cfg->baudrate;
- priv->cfg.databits = cfg->databits;
- priv->cfg.unknown1 = cfg->unknown1;
- priv->cfg.unknown2 = cfg->unknown2;
+ priv->cfg.pktlen = cfg.pktlen;
+ priv->cfg.baudrate = cfg.baudrate;
+ priv->cfg.databits = cfg.databits;
+ priv->cfg.unknown1 = cfg.unknown1;
+ priv->cfg.unknown2 = cfg.unknown2;
spin_unlock_irqrestore(&priv->lock, flags);
- kfree(cfg);
-
/* READ_ON and urb submission */
rc = usb_serial_generic_open(tty, port);
if (rc)
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 090a78c948f2..24101bd7fcad 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -208,8 +208,8 @@ void usb_serial_put(struct usb_serial *serial)
*
* This is the first place a new tty gets used. Hence this is where we
* acquire references to the usb_serial structure and the driver module,
- * where we store a pointer to the port, and where we do an autoresume.
- * All these actions are reversed in serial_cleanup().
+ * where we store a pointer to the port. All these actions are reversed
+ * in serial_cleanup().
*/
static int serial_install(struct tty_driver *driver, struct tty_struct *tty)
{
@@ -225,17 +225,13 @@ static int serial_install(struct tty_driver *driver, struct tty_struct *tty)
serial = port->serial;
if (!try_module_get(serial->type->driver.owner))
- goto error_module_get;
-
- retval = usb_autopm_get_interface(serial->interface);
- if (retval)
- goto error_get_interface;
+ goto err_put_serial;
init_termios = (driver->termios[idx] == NULL);
retval = tty_standard_install(driver, tty);
if (retval)
- goto error_init_termios;
+ goto err_put_module;
mutex_unlock(&serial->disc_mutex);
@@ -247,11 +243,9 @@ static int serial_install(struct tty_driver *driver, struct tty_struct *tty)
return retval;
- error_init_termios:
- usb_autopm_put_interface(serial->interface);
- error_get_interface:
+err_put_module:
module_put(serial->type->driver.owner);
- error_module_get:
+err_put_serial:
usb_serial_put(serial);
mutex_unlock(&serial->disc_mutex);
return retval;
@@ -265,10 +259,19 @@ static int serial_port_activate(struct tty_port *tport, struct tty_struct *tty)
int retval;
mutex_lock(&serial->disc_mutex);
- if (serial->disconnected)
+ if (serial->disconnected) {
retval = -ENODEV;
- else
- retval = port->serial->type->open(tty, port);
+ goto out_unlock;
+ }
+
+ retval = usb_autopm_get_interface(serial->interface);
+ if (retval)
+ goto out_unlock;
+
+ retval = port->serial->type->open(tty, port);
+ if (retval)
+ usb_autopm_put_interface(serial->interface);
+out_unlock:
mutex_unlock(&serial->disc_mutex);
if (retval < 0)
@@ -304,6 +307,8 @@ static void serial_port_shutdown(struct tty_port *tport)
if (drv->close)
drv->close(port);
+
+ usb_autopm_put_interface(port->serial->interface);
}
static void serial_hangup(struct tty_struct *tty)
@@ -352,8 +357,6 @@ static void serial_cleanup(struct tty_struct *tty)
serial = port->serial;
owner = serial->type->driver.owner;
- usb_autopm_put_interface(serial->interface);
-
usb_serial_put(serial);
module_put(owner);
}
@@ -1328,7 +1331,7 @@ static int __init usb_serial_init(void)
result = bus_register(&usb_serial_bus_type);
if (result) {
pr_err("%s - registering bus driver failed\n", __func__);
- goto exit_bus;
+ goto err_put_driver;
}
usb_serial_tty_driver->driver_name = "usbserial";
@@ -1346,25 +1349,23 @@ static int __init usb_serial_init(void)
result = tty_register_driver(usb_serial_tty_driver);
if (result) {
pr_err("%s - tty_register_driver failed\n", __func__);
- goto exit_reg_driver;
+ goto err_unregister_bus;
}
/* register the generic driver, if we should */
result = usb_serial_generic_register();
if (result < 0) {
pr_err("%s - registering generic driver failed\n", __func__);
- goto exit_generic;
+ goto err_unregister_driver;
}
return result;
-exit_generic:
+err_unregister_driver:
tty_unregister_driver(usb_serial_tty_driver);
-
-exit_reg_driver:
+err_unregister_bus:
bus_unregister(&usb_serial_bus_type);
-
-exit_bus:
+err_put_driver:
pr_err("%s - returning with error %d\n", __func__, result);
tty_driver_kref_put(usb_serial_tty_driver);
return result;
@@ -1509,13 +1510,13 @@ int usb_serial_register_drivers(struct usb_serial_driver *const serial_drivers[]
rc = usb_register(udriver);
if (rc)
- goto failed_usb_register;
+ goto err_free_driver;
for (sd = serial_drivers; *sd; ++sd) {
(*sd)->usb_driver = udriver;
rc = usb_serial_register(*sd);
if (rc)
- goto failed;
+ goto err_deregister_drivers;
}
/* Now set udriver's id_table and look for matches */
@@ -1523,11 +1524,11 @@ int usb_serial_register_drivers(struct usb_serial_driver *const serial_drivers[]
rc = driver_attach(&udriver->drvwrap.driver);
return 0;
- failed:
+err_deregister_drivers:
while (sd-- > serial_drivers)
usb_serial_deregister(*sd);
usb_deregister(udriver);
-failed_usb_register:
+err_free_driver:
kfree(udriver);
return rc;
}
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index c6b3fcf90180..29191d33c0e3 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -407,6 +407,16 @@ UNUSUAL_DEV( 0x04b8, 0x0602, 0x0110, 0x0110,
USB_SC_SCSI, USB_PR_BULK, NULL, US_FL_SINGLE_LUN),
/*
+ * Reported by James Buren <braewoods+lkml@braewoods.net>
+ * Virtual ISOs cannot be remounted if ejected while the device is locked
+ * Disable locking to mimic Windows behavior that bypasses the issue
+ */
+UNUSUAL_DEV( 0x04c5, 0x2028, 0x0001, 0x0001,
+ "iODD",
+ "2531/2541",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_NOT_LOCKABLE),
+
+/*
* Not sure who reported this originally but
* Pavel Machek <pavel@ucw.cz> reported that the extra US_FL_SINGLE_LUN
* flag be added */
diff --git a/drivers/usb/typec/Kconfig b/drivers/usb/typec/Kconfig
index a0418f23b4aa..ab480f38523a 100644
--- a/drivers/usb/typec/Kconfig
+++ b/drivers/usb/typec/Kconfig
@@ -65,9 +65,9 @@ config TYPEC_HD3SS3220
config TYPEC_STUSB160X
tristate "STMicroelectronics STUSB160x Type-C controller driver"
- depends on I2C
- depends on REGMAP_I2C
depends on USB_ROLE_SWITCH || !USB_ROLE_SWITCH
+ depends on I2C
+ select REGMAP_I2C
help
Say Y or M here if your system has STMicroelectronics STUSB160x
Type-C port controller.
diff --git a/drivers/usb/typec/altmodes/Kconfig b/drivers/usb/typec/altmodes/Kconfig
index 60d375e9c3c7..1a6b5e872b0d 100644
--- a/drivers/usb/typec/altmodes/Kconfig
+++ b/drivers/usb/typec/altmodes/Kconfig
@@ -4,6 +4,7 @@ menu "USB Type-C Alternate Mode drivers"
config TYPEC_DP_ALTMODE
tristate "DisplayPort Alternate Mode driver"
+ depends on DRM
help
DisplayPort USB Type-C Alternate Mode allows DisplayPort
displays and adapters to be attached to the USB Type-C
diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c
index b7f094435b00..c1d8c23baa39 100644
--- a/drivers/usb/typec/altmodes/displayport.c
+++ b/drivers/usb/typec/altmodes/displayport.c
@@ -11,8 +11,10 @@
#include <linux/delay.h>
#include <linux/mutex.h>
#include <linux/module.h>
+#include <linux/property.h>
#include <linux/usb/pd_vdo.h>
#include <linux/usb/typec_dp.h>
+#include <drm/drm_connector.h>
#include "displayport.h"
#define DP_HEADER(_dp, ver, cmd) (VDO((_dp)->alt->svid, 1, ver, cmd) \
@@ -57,19 +59,28 @@ struct dp_altmode {
struct typec_displayport_data data;
enum dp_state state;
+ bool hpd;
struct mutex lock; /* device lock */
struct work_struct work;
struct typec_altmode *alt;
const struct typec_altmode *port;
+ struct fwnode_handle *connector_fwnode;
};
static int dp_altmode_notify(struct dp_altmode *dp)
{
- u8 state = get_count_order(DP_CONF_GET_PIN_ASSIGN(dp->data.conf));
+ unsigned long conf;
+ u8 state;
+
+ if (dp->data.conf) {
+ state = get_count_order(DP_CONF_GET_PIN_ASSIGN(dp->data.conf));
+ conf = TYPEC_MODAL_STATE(state);
+ } else {
+ conf = TYPEC_STATE_USB;
+ }
- return typec_altmode_notify(dp->alt, TYPEC_MODAL_STATE(state),
- &dp->data);
+ return typec_altmode_notify(dp->alt, conf, &dp->data);
}
static int dp_altmode_configure(struct dp_altmode *dp, u8 con)
@@ -118,6 +129,7 @@ static int dp_altmode_configure(struct dp_altmode *dp, u8 con)
static int dp_altmode_status_update(struct dp_altmode *dp)
{
bool configured = !!DP_CONF_GET_PIN_ASSIGN(dp->data.conf);
+ bool hpd = !!(dp->data.status & DP_STATUS_HPD_STATE);
u8 con = DP_STATUS_CONNECTION(dp->data.status);
int ret = 0;
@@ -130,6 +142,11 @@ static int dp_altmode_status_update(struct dp_altmode *dp)
ret = dp_altmode_configure(dp, con);
if (!ret)
dp->state = DP_STATE_CONFIGURE;
+ } else {
+ if (dp->hpd != hpd) {
+ drm_connector_oob_hotplug_event(dp->connector_fwnode);
+ dp->hpd = hpd;
+ }
}
return ret;
@@ -137,21 +154,10 @@ static int dp_altmode_status_update(struct dp_altmode *dp)
static int dp_altmode_configured(struct dp_altmode *dp)
{
- int ret;
-
sysfs_notify(&dp->alt->dev.kobj, "displayport", "configuration");
-
- if (!dp->data.conf)
- return typec_altmode_notify(dp->alt, TYPEC_STATE_USB,
- &dp->data);
-
- ret = dp_altmode_notify(dp);
- if (ret)
- return ret;
-
sysfs_notify(&dp->alt->dev.kobj, "displayport", "pin_assignment");
- return 0;
+ return dp_altmode_notify(dp);
}
static int dp_altmode_configure_vdm(struct dp_altmode *dp, u32 conf)
@@ -172,13 +178,8 @@ static int dp_altmode_configure_vdm(struct dp_altmode *dp, u32 conf)
}
ret = typec_altmode_vdm(dp->alt, header, &conf, 2);
- if (ret) {
- if (DP_CONF_GET_PIN_ASSIGN(dp->data.conf))
- dp_altmode_notify(dp);
- else
- typec_altmode_notify(dp->alt, TYPEC_STATE_USB,
- &dp->data);
- }
+ if (ret)
+ dp_altmode_notify(dp);
return ret;
}
@@ -521,6 +522,7 @@ static const struct attribute_group dp_altmode_group = {
int dp_altmode_probe(struct typec_altmode *alt)
{
const struct typec_altmode *port = typec_altmode_get_partner(alt);
+ struct fwnode_handle *fwnode;
struct dp_altmode *dp;
int ret;
@@ -549,6 +551,11 @@ int dp_altmode_probe(struct typec_altmode *alt)
alt->desc = "DisplayPort";
alt->ops = &dp_altmode_ops;
+ fwnode = dev_fwnode(alt->dev.parent->parent); /* typec_port fwnode */
+ dp->connector_fwnode = fwnode_find_reference(fwnode, "displayport", 0);
+ if (IS_ERR(dp->connector_fwnode))
+ dp->connector_fwnode = NULL;
+
typec_altmode_set_drvdata(alt, dp);
dp->state = DP_STATE_ENTER;
@@ -564,6 +571,13 @@ void dp_altmode_remove(struct typec_altmode *alt)
sysfs_remove_group(&alt->dev.kobj, &dp_altmode_group);
cancel_work_sync(&dp->work);
+
+ if (dp->connector_fwnode) {
+ if (dp->hpd)
+ drm_connector_oob_hotplug_event(dp->connector_fwnode);
+
+ fwnode_handle_put(dp->connector_fwnode);
+ }
}
EXPORT_SYMBOL_GPL(dp_altmode_remove);
diff --git a/drivers/usb/typec/hd3ss3220.c b/drivers/usb/typec/hd3ss3220.c
index f633ec15b1a1..cd47c3597e19 100644
--- a/drivers/usb/typec/hd3ss3220.c
+++ b/drivers/usb/typec/hd3ss3220.c
@@ -125,11 +125,9 @@ static irqreturn_t hd3ss3220_irq(struct hd3ss3220 *hd3ss3220)
int err;
hd3ss3220_set_role(hd3ss3220);
- err = regmap_update_bits_base(hd3ss3220->regmap,
- HD3SS3220_REG_CN_STAT_CTRL,
- HD3SS3220_REG_CN_STAT_CTRL_INT_STATUS,
- HD3SS3220_REG_CN_STAT_CTRL_INT_STATUS,
- NULL, false, true);
+ err = regmap_write_bits(hd3ss3220->regmap, HD3SS3220_REG_CN_STAT_CTRL,
+ HD3SS3220_REG_CN_STAT_CTRL_INT_STATUS,
+ HD3SS3220_REG_CN_STAT_CTRL_INT_STATUS);
if (err < 0)
return IRQ_NONE;
diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c
index c15eec9cc460..35a1307349a2 100644
--- a/drivers/usb/typec/tcpm/tcpci.c
+++ b/drivers/usb/typec/tcpm/tcpci.c
@@ -258,7 +258,7 @@ static int tcpci_set_polarity(struct tcpc_dev *tcpc,
* When port has drp toggling enabled, ROLE_CONTROL would only have the initial
* terminations for the toggling and does not indicate the final cc
* terminations when ConnectionResult is 0 i.e. drp toggling stops and
- * the connection is resolbed. Infer port role from TCPC_CC_STATUS based on the
+ * the connection is resolved. Infer port role from TCPC_CC_STATUS based on the
* terminations seen. The port role is then used to set the cc terminations.
*/
if (reg & TCPC_ROLE_CTRL_DRP) {
diff --git a/drivers/usb/typec/tipd/core.c b/drivers/usb/typec/tipd/core.c
index ea4cc0a6e40c..fb8ef12bbe9c 100644
--- a/drivers/usb/typec/tipd/core.c
+++ b/drivers/usb/typec/tipd/core.c
@@ -9,6 +9,7 @@
#include <linux/i2c.h>
#include <linux/acpi.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/power_supply.h>
#include <linux/regmap.h>
#include <linux/interrupt.h>
@@ -29,6 +30,7 @@
#define TPS_REG_INT_MASK2 0x17
#define TPS_REG_INT_CLEAR1 0x18
#define TPS_REG_INT_CLEAR2 0x19
+#define TPS_REG_SYSTEM_POWER_STATE 0x20
#define TPS_REG_STATUS 0x1a
#define TPS_REG_SYSTEM_CONF 0x28
#define TPS_REG_CTRL_CONF 0x29
@@ -117,13 +119,13 @@ tps6598x_block_read(struct tps6598x *tps, u8 reg, void *val, size_t len)
u8 data[TPS_MAX_LEN + 1];
int ret;
- if (WARN_ON(len + 1 > sizeof(data)))
+ if (len + 1 > sizeof(data))
return -EINVAL;
if (!tps->i2c_protocol)
return regmap_raw_read(tps->regmap, reg, val, len);
- ret = regmap_raw_read(tps->regmap, reg, data, sizeof(data));
+ ret = regmap_raw_read(tps->regmap, reg, data, len + 1);
if (ret)
return ret;
@@ -139,13 +141,21 @@ static int tps6598x_block_write(struct tps6598x *tps, u8 reg,
{
u8 data[TPS_MAX_LEN + 1];
+ if (len + 1 > sizeof(data))
+ return -EINVAL;
+
if (!tps->i2c_protocol)
return regmap_raw_write(tps->regmap, reg, val, len);
data[0] = len;
memcpy(&data[1], val, len);
- return regmap_raw_write(tps->regmap, reg, data, sizeof(data));
+ return regmap_raw_write(tps->regmap, reg, data, len + 1);
+}
+
+static inline int tps6598x_read8(struct tps6598x *tps, u8 reg, u8 *val)
+{
+ return tps6598x_block_read(tps, reg, val, sizeof(u8));
}
static inline int tps6598x_read16(struct tps6598x *tps, u8 reg, u16 *val)
@@ -401,13 +411,114 @@ static const struct typec_operations tps6598x_ops = {
.pr_set = tps6598x_pr_set,
};
+static bool tps6598x_read_status(struct tps6598x *tps, u32 *status)
+{
+ int ret;
+
+ ret = tps6598x_read32(tps, TPS_REG_STATUS, status);
+ if (ret) {
+ dev_err(tps->dev, "%s: failed to read status\n", __func__);
+ return false;
+ }
+ trace_tps6598x_status(*status);
+
+ return true;
+}
+
+static bool tps6598x_read_data_status(struct tps6598x *tps)
+{
+ u32 data_status;
+ int ret;
+
+ ret = tps6598x_read32(tps, TPS_REG_DATA_STATUS, &data_status);
+ if (ret < 0) {
+ dev_err(tps->dev, "failed to read data status: %d\n", ret);
+ return false;
+ }
+ trace_tps6598x_data_status(data_status);
+
+ return true;
+}
+
+static bool tps6598x_read_power_status(struct tps6598x *tps)
+{
+ u16 pwr_status;
+ int ret;
+
+ ret = tps6598x_read16(tps, TPS_REG_POWER_STATUS, &pwr_status);
+ if (ret < 0) {
+ dev_err(tps->dev, "failed to read power status: %d\n", ret);
+ return false;
+ }
+ trace_tps6598x_power_status(pwr_status);
+
+ return true;
+}
+
+static void tps6598x_handle_plug_event(struct tps6598x *tps, u32 status)
+{
+ int ret;
+
+ if (status & TPS_STATUS_PLUG_PRESENT) {
+ ret = tps6598x_connect(tps, status);
+ if (ret)
+ dev_err(tps->dev, "failed to register partner\n");
+ } else {
+ tps6598x_disconnect(tps, status);
+ }
+}
+
+static irqreturn_t cd321x_interrupt(int irq, void *data)
+{
+ struct tps6598x *tps = data;
+ u64 event;
+ u32 status;
+ int ret;
+
+ mutex_lock(&tps->lock);
+
+ ret = tps6598x_read64(tps, TPS_REG_INT_EVENT1, &event);
+ if (ret) {
+ dev_err(tps->dev, "%s: failed to read events\n", __func__);
+ goto err_unlock;
+ }
+ trace_cd321x_irq(event);
+
+ if (!event)
+ goto err_unlock;
+
+ if (!tps6598x_read_status(tps, &status))
+ goto err_clear_ints;
+
+ if (event & APPLE_CD_REG_INT_POWER_STATUS_UPDATE)
+ if (!tps6598x_read_power_status(tps))
+ goto err_clear_ints;
+
+ if (event & APPLE_CD_REG_INT_DATA_STATUS_UPDATE)
+ if (!tps6598x_read_data_status(tps))
+ goto err_clear_ints;
+
+ /* Handle plug insert or removal */
+ if (event & APPLE_CD_REG_INT_PLUG_EVENT)
+ tps6598x_handle_plug_event(tps, status);
+
+err_clear_ints:
+ tps6598x_write64(tps, TPS_REG_INT_CLEAR1, event);
+
+err_unlock:
+ mutex_unlock(&tps->lock);
+
+ if (event)
+ return IRQ_HANDLED;
+ return IRQ_NONE;
+}
+
static irqreturn_t tps6598x_interrupt(int irq, void *data)
{
struct tps6598x *tps = data;
u64 event1;
u64 event2;
- u32 status, data_status;
- u16 pwr_status;
+ u32 status;
int ret;
mutex_lock(&tps->lock);
@@ -420,42 +531,23 @@ static irqreturn_t tps6598x_interrupt(int irq, void *data)
}
trace_tps6598x_irq(event1, event2);
- ret = tps6598x_read32(tps, TPS_REG_STATUS, &status);
- if (ret) {
- dev_err(tps->dev, "%s: failed to read status\n", __func__);
+ if (!(event1 | event2))
+ goto err_unlock;
+
+ if (!tps6598x_read_status(tps, &status))
goto err_clear_ints;
- }
- trace_tps6598x_status(status);
- if ((event1 | event2) & TPS_REG_INT_POWER_STATUS_UPDATE) {
- ret = tps6598x_read16(tps, TPS_REG_POWER_STATUS, &pwr_status);
- if (ret < 0) {
- dev_err(tps->dev, "failed to read power status: %d\n", ret);
+ if ((event1 | event2) & TPS_REG_INT_POWER_STATUS_UPDATE)
+ if (!tps6598x_read_power_status(tps))
goto err_clear_ints;
- }
- trace_tps6598x_power_status(pwr_status);
- }
- if ((event1 | event2) & TPS_REG_INT_DATA_STATUS_UPDATE) {
- ret = tps6598x_read32(tps, TPS_REG_DATA_STATUS, &data_status);
- if (ret < 0) {
- dev_err(tps->dev, "failed to read data status: %d\n", ret);
+ if ((event1 | event2) & TPS_REG_INT_DATA_STATUS_UPDATE)
+ if (!tps6598x_read_data_status(tps))
goto err_clear_ints;
- }
- trace_tps6598x_data_status(data_status);
- }
/* Handle plug insert or removal */
- if ((event1 | event2) & TPS_REG_INT_PLUG_EVENT) {
- if (status & TPS_STATUS_PLUG_PRESENT) {
- ret = tps6598x_connect(tps, status);
- if (ret)
- dev_err(tps->dev,
- "failed to register partner\n");
- } else {
- tps6598x_disconnect(tps, status);
- }
- }
+ if ((event1 | event2) & TPS_REG_INT_PLUG_EVENT)
+ tps6598x_handle_plug_event(tps, status);
err_clear_ints:
tps6598x_write64(tps, TPS_REG_INT_CLEAR1, event1);
@@ -464,7 +556,9 @@ err_clear_ints:
err_unlock:
mutex_unlock(&tps->lock);
- return IRQ_HANDLED;
+ if (event1 | event2)
+ return IRQ_HANDLED;
+ return IRQ_NONE;
}
static int tps6598x_check_mode(struct tps6598x *tps)
@@ -547,6 +641,32 @@ static int tps6598x_psy_get_prop(struct power_supply *psy,
return ret;
}
+static int cd321x_switch_power_state(struct tps6598x *tps, u8 target_state)
+{
+ u8 state;
+ int ret;
+
+ ret = tps6598x_read8(tps, TPS_REG_SYSTEM_POWER_STATE, &state);
+ if (ret)
+ return ret;
+
+ if (state == target_state)
+ return 0;
+
+ ret = tps6598x_exec_cmd(tps, "SPSS", sizeof(u8), &target_state, 0, NULL);
+ if (ret)
+ return ret;
+
+ ret = tps6598x_read8(tps, TPS_REG_SYSTEM_POWER_STATE, &state);
+ if (ret)
+ return ret;
+
+ if (state != target_state)
+ return -EINVAL;
+
+ return 0;
+}
+
static int devm_tps6598_psy_register(struct tps6598x *tps)
{
struct power_supply_config psy_cfg = {};
@@ -578,6 +698,8 @@ static int devm_tps6598_psy_register(struct tps6598x *tps)
static int tps6598x_probe(struct i2c_client *client)
{
+ irq_handler_t irq_handler = tps6598x_interrupt;
+ struct device_node *np = client->dev.of_node;
struct typec_capability typec_cap = { };
struct tps6598x *tps;
struct fwnode_handle *fwnode;
@@ -604,9 +726,6 @@ static int tps6598x_probe(struct i2c_client *client)
/*
* Checking can the adapter handle SMBus protocol. If it can not, the
* driver needs to take care of block reads separately.
- *
- * FIXME: Testing with I2C_FUNC_I2C. regmap-i2c uses I2C protocol
- * unconditionally if the adapter has I2C_FUNC_I2C set.
*/
if (i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
tps->i2c_protocol = true;
@@ -616,6 +735,31 @@ static int tps6598x_probe(struct i2c_client *client)
if (ret)
return ret;
+ if (np && of_device_is_compatible(np, "apple,cd321x")) {
+ /* Switch CD321X chips to the correct system power state */
+ ret = cd321x_switch_power_state(tps, TPS_SYSTEM_POWER_STATE_S0);
+ if (ret)
+ return ret;
+
+ /* CD321X chips have all interrupts masked initially */
+ ret = tps6598x_write64(tps, TPS_REG_INT_MASK1,
+ APPLE_CD_REG_INT_POWER_STATUS_UPDATE |
+ APPLE_CD_REG_INT_DATA_STATUS_UPDATE |
+ APPLE_CD_REG_INT_PLUG_EVENT);
+ if (ret)
+ return ret;
+
+ irq_handler = cd321x_interrupt;
+ } else {
+ /* Enable power status, data status and plug event interrupts */
+ ret = tps6598x_write64(tps, TPS_REG_INT_MASK1,
+ TPS_REG_INT_POWER_STATUS_UPDATE |
+ TPS_REG_INT_DATA_STATUS_UPDATE |
+ TPS_REG_INT_PLUG_EVENT);
+ if (ret)
+ return ret;
+ }
+
ret = tps6598x_read32(tps, TPS_REG_STATUS, &status);
if (ret < 0)
return ret;
@@ -695,7 +839,7 @@ static int tps6598x_probe(struct i2c_client *client)
}
ret = devm_request_threaded_irq(&client->dev, client->irq, NULL,
- tps6598x_interrupt,
+ irq_handler,
IRQF_SHARED | IRQF_ONESHOT,
dev_name(&client->dev), tps);
if (ret) {
@@ -729,6 +873,7 @@ static int tps6598x_remove(struct i2c_client *client)
static const struct of_device_id tps6598x_of_match[] = {
{ .compatible = "ti,tps6598x", },
+ { .compatible = "apple,cd321x", },
{}
};
MODULE_DEVICE_TABLE(of, tps6598x_of_match);
diff --git a/drivers/usb/typec/tipd/tps6598x.h b/drivers/usb/typec/tipd/tps6598x.h
index 003a577be216..3dae84c524fb 100644
--- a/drivers/usb/typec/tipd/tps6598x.h
+++ b/drivers/usb/typec/tipd/tps6598x.h
@@ -129,6 +129,18 @@
#define TPS_REG_INT_HARD_RESET BIT(1)
#define TPS_REG_INT_PD_SOFT_RESET BIT(0)
+/* Apple-specific TPS_REG_INT_* bits */
+#define APPLE_CD_REG_INT_DATA_STATUS_UPDATE BIT(10)
+#define APPLE_CD_REG_INT_POWER_STATUS_UPDATE BIT(9)
+#define APPLE_CD_REG_INT_STATUS_UPDATE BIT(8)
+#define APPLE_CD_REG_INT_PLUG_EVENT BIT(1)
+
+/* TPS_REG_SYSTEM_POWER_STATE states */
+#define TPS_SYSTEM_POWER_STATE_S0 0x00
+#define TPS_SYSTEM_POWER_STATE_S3 0x03
+#define TPS_SYSTEM_POWER_STATE_S4 0x04
+#define TPS_SYSTEM_POWER_STATE_S5 0x05
+
/* TPS_REG_POWER_STATUS bits */
#define TPS_POWER_STATUS_CONNECTION(x) TPS_FIELD_GET(BIT(0), (x))
#define TPS_POWER_STATUS_SOURCESINK(x) TPS_FIELD_GET(BIT(1), (x))
diff --git a/drivers/usb/typec/tipd/trace.h b/drivers/usb/typec/tipd/trace.h
index 5d09d6f78930..12cad1bde7cc 100644
--- a/drivers/usb/typec/tipd/trace.h
+++ b/drivers/usb/typec/tipd/trace.h
@@ -67,6 +67,13 @@
{ TPS_REG_INT_USER_VID_ALT_MODE_ATTN_VDM, "USER_VID_ALT_MODE_ATTN_VDM" }, \
{ TPS_REG_INT_USER_VID_ALT_MODE_OTHER_VDM, "USER_VID_ALT_MODE_OTHER_VDM" })
+#define show_cd321x_irq_flags(flags) \
+ __print_flags_u64(flags, "|", \
+ { APPLE_CD_REG_INT_PLUG_EVENT, "PLUG_EVENT" }, \
+ { APPLE_CD_REG_INT_POWER_STATUS_UPDATE, "POWER_STATUS_UPDATE" }, \
+ { APPLE_CD_REG_INT_DATA_STATUS_UPDATE, "DATA_STATUS_UPDATE" }, \
+ { APPLE_CD_REG_INT_STATUS_UPDATE, "STATUS_UPDATE" })
+
#define TPS6598X_STATUS_FLAGS_MASK (GENMASK(31, 0) ^ (TPS_STATUS_CONN_STATE_MASK | \
TPS_STATUS_PP_5V0_SWITCH_MASK | \
TPS_STATUS_PP_HV_SWITCH_MASK | \
@@ -207,6 +214,22 @@ TRACE_EVENT(tps6598x_irq,
show_irq_flags(__entry->event2))
);
+TRACE_EVENT(cd321x_irq,
+ TP_PROTO(u64 event),
+ TP_ARGS(event),
+
+ TP_STRUCT__entry(
+ __field(u64, event)
+ ),
+
+ TP_fast_assign(
+ __entry->event = event;
+ ),
+
+ TP_printk("event=%s",
+ show_cd321x_irq_flags(__entry->event))
+);
+
TRACE_EVENT(tps6598x_status,
TP_PROTO(u32 status),
TP_ARGS(status),
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index 5ef5bd0e87cf..6aa28384f77f 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -128,8 +128,10 @@ static int ucsi_exec_command(struct ucsi *ucsi, u64 cmd)
if (ret)
return ret;
- if (cci & UCSI_CCI_BUSY)
+ if (cci & UCSI_CCI_BUSY) {
+ ucsi->ops->async_write(ucsi, UCSI_CANCEL, NULL, 0);
return -EBUSY;
+ }
if (!(cci & UCSI_CCI_COMMAND_COMPLETE))
return -EIO;
@@ -189,6 +191,64 @@ int ucsi_resume(struct ucsi *ucsi)
EXPORT_SYMBOL_GPL(ucsi_resume);
/* -------------------------------------------------------------------------- */
+struct ucsi_work {
+ struct delayed_work work;
+ unsigned long delay;
+ unsigned int count;
+ struct ucsi_connector *con;
+ int (*cb)(struct ucsi_connector *);
+};
+
+static void ucsi_poll_worker(struct work_struct *work)
+{
+ struct ucsi_work *uwork = container_of(work, struct ucsi_work, work.work);
+ struct ucsi_connector *con = uwork->con;
+ int ret;
+
+ mutex_lock(&con->lock);
+
+ if (!con->partner) {
+ mutex_unlock(&con->lock);
+ kfree(uwork);
+ return;
+ }
+
+ ret = uwork->cb(con);
+
+ if (uwork->count-- && (ret == -EBUSY || ret == -ETIMEDOUT))
+ queue_delayed_work(con->wq, &uwork->work, uwork->delay);
+ else
+ kfree(uwork);
+
+ mutex_unlock(&con->lock);
+}
+
+static int ucsi_partner_task(struct ucsi_connector *con,
+ int (*cb)(struct ucsi_connector *),
+ int retries, unsigned long delay)
+{
+ struct ucsi_work *uwork;
+
+ if (!con->partner)
+ return 0;
+
+ uwork = kzalloc(sizeof(*uwork), GFP_KERNEL);
+ if (!uwork)
+ return -ENOMEM;
+
+ INIT_DELAYED_WORK(&uwork->work, ucsi_poll_worker);
+ uwork->count = retries;
+ uwork->delay = delay;
+ uwork->con = con;
+ uwork->cb = cb;
+
+ queue_delayed_work(con->wq, &uwork->work, delay);
+
+ return 0;
+}
+
+/* -------------------------------------------------------------------------- */
+
void ucsi_altmode_update_active(struct ucsi_connector *con)
{
const struct typec_altmode *altmode = NULL;
@@ -435,6 +495,8 @@ static int ucsi_register_altmodes(struct ucsi_connector *con, u8 recipient)
command |= UCSI_GET_ALTMODE_CONNECTOR_NUMBER(con->num);
command |= UCSI_GET_ALTMODE_OFFSET(i);
len = ucsi_send_command(con->ucsi, command, alt, sizeof(alt));
+ if (len == -EBUSY)
+ continue;
if (len <= 0)
return len;
@@ -509,7 +571,7 @@ static int ucsi_get_pdos(struct ucsi_connector *con, int is_partner,
command |= UCSI_GET_PDOS_SRC_PDOS;
ret = ucsi_send_command(ucsi, command, pdos + offset,
num_pdos * sizeof(u32));
- if (ret < 0)
+ if (ret < 0 && ret != -ETIMEDOUT)
dev_err(ucsi->dev, "UCSI_GET_PDOS failed (%d)\n", ret);
if (ret == 0 && offset == 0)
dev_warn(ucsi->dev, "UCSI_GET_PDOS returned 0 bytes\n");
@@ -517,26 +579,49 @@ static int ucsi_get_pdos(struct ucsi_connector *con, int is_partner,
return ret;
}
-static void ucsi_get_src_pdos(struct ucsi_connector *con, int is_partner)
+static int ucsi_get_src_pdos(struct ucsi_connector *con)
{
int ret;
/* UCSI max payload means only getting at most 4 PDOs at a time */
ret = ucsi_get_pdos(con, 1, con->src_pdos, 0, UCSI_MAX_PDOS);
if (ret < 0)
- return;
+ return ret;
con->num_pdos = ret / sizeof(u32); /* number of bytes to 32-bit PDOs */
if (con->num_pdos < UCSI_MAX_PDOS)
- return;
+ return 0;
/* get the remaining PDOs, if any */
ret = ucsi_get_pdos(con, 1, con->src_pdos, UCSI_MAX_PDOS,
PDO_MAX_OBJECTS - UCSI_MAX_PDOS);
if (ret < 0)
- return;
+ return ret;
con->num_pdos += ret / sizeof(u32);
+
+ ucsi_port_psy_changed(con);
+
+ return 0;
+}
+
+static int ucsi_check_altmodes(struct ucsi_connector *con)
+{
+ int ret;
+
+ ret = ucsi_register_altmodes(con, UCSI_RECIPIENT_SOP);
+ if (ret && ret != -ETIMEDOUT)
+ dev_err(con->ucsi->dev,
+ "con%d: failed to register partner alt modes (%d)\n",
+ con->num, ret);
+
+ /* Ignoring the errors in this case. */
+ if (con->partner_altmode[0]) {
+ ucsi_altmode_update_active(con);
+ return 0;
+ }
+
+ return ret;
}
static void ucsi_pwr_opmode_change(struct ucsi_connector *con)
@@ -545,7 +630,8 @@ static void ucsi_pwr_opmode_change(struct ucsi_connector *con)
case UCSI_CONSTAT_PWR_OPMODE_PD:
con->rdo = con->status.request_data_obj;
typec_set_pwr_opmode(con->port, TYPEC_PWR_MODE_PD);
- ucsi_get_src_pdos(con, 1);
+ ucsi_partner_task(con, ucsi_get_src_pdos, 30, 0);
+ ucsi_partner_task(con, ucsi_check_altmodes, 30, 0);
break;
case UCSI_CONSTAT_PWR_OPMODE_TYPEC1_5:
con->rdo = 0;
@@ -614,9 +700,6 @@ static void ucsi_partner_change(struct ucsi_connector *con)
enum usb_role u_role = USB_ROLE_NONE;
int ret;
- if (!con->partner)
- return;
-
switch (UCSI_CONSTAT_PARTNER_TYPE(con->status.flags)) {
case UCSI_CONSTAT_PARTNER_TYPE_UFP:
case UCSI_CONSTAT_PARTNER_TYPE_CABLE_AND_UFP:
@@ -633,10 +716,6 @@ static void ucsi_partner_change(struct ucsi_connector *con)
break;
}
- /* Complete pending data role swap */
- if (!completion_done(&con->complete))
- complete(&con->complete);
-
/* Only notify USB controller if partner supports USB data */
if (!(UCSI_CONSTAT_PARTNER_FLAGS(con->status.flags) & UCSI_CONSTAT_PARTNER_FLAG_USB))
u_role = USB_ROLE_NONE;
@@ -645,15 +724,31 @@ static void ucsi_partner_change(struct ucsi_connector *con)
if (ret)
dev_err(con->ucsi->dev, "con:%d: failed to set usb role:%d\n",
con->num, u_role);
+}
- /* Can't rely on Partner Flags field. Always checking the alt modes. */
- ret = ucsi_register_altmodes(con, UCSI_RECIPIENT_SOP);
- if (ret)
- dev_err(con->ucsi->dev,
- "con%d: failed to register partner alternate modes\n",
- con->num);
- else
- ucsi_altmode_update_active(con);
+static int ucsi_check_connection(struct ucsi_connector *con)
+{
+ u64 command;
+ int ret;
+
+ command = UCSI_GET_CONNECTOR_STATUS | UCSI_CONNECTOR_NUMBER(con->num);
+ ret = ucsi_send_command(con->ucsi, command, &con->status, sizeof(con->status));
+ if (ret < 0) {
+ dev_err(con->ucsi->dev, "GET_CONNECTOR_STATUS failed (%d)\n", ret);
+ return ret;
+ }
+
+ if (con->status.flags & UCSI_CONSTAT_CONNECTED) {
+ if (UCSI_CONSTAT_PWR_OPMODE(con->status.flags) ==
+ UCSI_CONSTAT_PWR_OPMODE_PD)
+ ucsi_partner_task(con, ucsi_check_altmodes, 30, 0);
+ } else {
+ ucsi_partner_change(con);
+ ucsi_port_psy_changed(con);
+ ucsi_unregister_partner(con);
+ }
+
+ return 0;
}
static void ucsi_handle_connector_change(struct work_struct *work)
@@ -661,122 +756,24 @@ static void ucsi_handle_connector_change(struct work_struct *work)
struct ucsi_connector *con = container_of(work, struct ucsi_connector,
work);
struct ucsi *ucsi = con->ucsi;
- struct ucsi_connector_status pre_ack_status;
- struct ucsi_connector_status post_ack_status;
enum typec_role role;
- enum usb_role u_role = USB_ROLE_NONE;
- u16 inferred_changes;
- u16 changed_flags;
u64 command;
int ret;
mutex_lock(&con->lock);
- /*
- * Some/many PPMs have an issue where all fields in the change bitfield
- * are cleared when an ACK is send. This will causes any change
- * between GET_CONNECTOR_STATUS and ACK to be lost.
- *
- * We work around this by re-fetching the connector status afterwards.
- * We then infer any changes that we see have happened but that may not
- * be represented in the change bitfield.
- *
- * Also, even though we don't need to know the currently supported alt
- * modes, we run the GET_CAM_SUPPORTED command to ensure the PPM does
- * not get stuck in case it assumes we do.
- * Always do this, rather than relying on UCSI_CONSTAT_CAM_CHANGE to be
- * set in the change bitfield.
- *
- * We end up with the following actions:
- * 1. UCSI_GET_CONNECTOR_STATUS, store result, update unprocessed_changes
- * 2. UCSI_GET_CAM_SUPPORTED, discard result
- * 3. ACK connector change
- * 4. UCSI_GET_CONNECTOR_STATUS, store result
- * 5. Infere lost changes by comparing UCSI_GET_CONNECTOR_STATUS results
- * 6. If PPM reported a new change, then restart in order to ACK
- * 7. Process everything as usual.
- *
- * We may end up seeing a change twice, but we can only miss extremely
- * short transitional changes.
- */
-
- /* 1. First UCSI_GET_CONNECTOR_STATUS */
command = UCSI_GET_CONNECTOR_STATUS | UCSI_CONNECTOR_NUMBER(con->num);
- ret = ucsi_send_command(ucsi, command, &pre_ack_status,
- sizeof(pre_ack_status));
- if (ret < 0) {
- dev_err(ucsi->dev, "%s: GET_CONNECTOR_STATUS failed (%d)\n",
- __func__, ret);
- goto out_unlock;
- }
- con->unprocessed_changes |= pre_ack_status.change;
-
- /* 2. Run UCSI_GET_CAM_SUPPORTED and discard the result. */
- command = UCSI_GET_CAM_SUPPORTED;
- command |= UCSI_CONNECTOR_NUMBER(con->num);
- ucsi_send_command(con->ucsi, command, NULL, 0);
-
- /* 3. ACK connector change */
- ret = ucsi_acknowledge_connector_change(ucsi);
- clear_bit(EVENT_PENDING, &ucsi->flags);
- if (ret) {
- dev_err(ucsi->dev, "%s: ACK failed (%d)", __func__, ret);
- goto out_unlock;
- }
-
- /* 4. Second UCSI_GET_CONNECTOR_STATUS */
- command = UCSI_GET_CONNECTOR_STATUS | UCSI_CONNECTOR_NUMBER(con->num);
- ret = ucsi_send_command(ucsi, command, &post_ack_status,
- sizeof(post_ack_status));
+ ret = ucsi_send_command(ucsi, command, &con->status, sizeof(con->status));
if (ret < 0) {
dev_err(ucsi->dev, "%s: GET_CONNECTOR_STATUS failed (%d)\n",
__func__, ret);
goto out_unlock;
}
- /* 5. Inferre any missing changes */
- changed_flags = pre_ack_status.flags ^ post_ack_status.flags;
- inferred_changes = 0;
- if (UCSI_CONSTAT_PWR_OPMODE(changed_flags) != 0)
- inferred_changes |= UCSI_CONSTAT_POWER_OPMODE_CHANGE;
-
- if (changed_flags & UCSI_CONSTAT_CONNECTED)
- inferred_changes |= UCSI_CONSTAT_CONNECT_CHANGE;
-
- if (changed_flags & UCSI_CONSTAT_PWR_DIR)
- inferred_changes |= UCSI_CONSTAT_POWER_DIR_CHANGE;
-
- if (UCSI_CONSTAT_PARTNER_FLAGS(changed_flags) != 0)
- inferred_changes |= UCSI_CONSTAT_PARTNER_CHANGE;
-
- if (UCSI_CONSTAT_PARTNER_TYPE(changed_flags) != 0)
- inferred_changes |= UCSI_CONSTAT_PARTNER_CHANGE;
-
- /* Mask out anything that was correctly notified in the later call. */
- inferred_changes &= ~post_ack_status.change;
- if (inferred_changes)
- dev_dbg(ucsi->dev, "%s: Inferred changes that would have been lost: 0x%04x\n",
- __func__, inferred_changes);
-
- con->unprocessed_changes |= inferred_changes;
-
- /* 6. If PPM reported a new change, then restart in order to ACK */
- if (post_ack_status.change)
- goto out_unlock;
-
- /* 7. Continue as if nothing happened */
- con->status = post_ack_status;
- con->status.change = con->unprocessed_changes;
- con->unprocessed_changes = 0;
+ trace_ucsi_connector_change(con->num, &con->status);
role = !!(con->status.flags & UCSI_CONSTAT_PWR_DIR);
- if (con->status.change & UCSI_CONSTAT_POWER_OPMODE_CHANGE ||
- con->status.change & UCSI_CONSTAT_POWER_LEVEL_CHANGE) {
- ucsi_pwr_opmode_change(con);
- ucsi_port_psy_changed(con);
- }
-
if (con->status.change & UCSI_CONSTAT_POWER_DIR_CHANGE) {
typec_set_pwr_role(con->port, role);
@@ -787,54 +784,39 @@ static void ucsi_handle_connector_change(struct work_struct *work)
if (con->status.change & UCSI_CONSTAT_CONNECT_CHANGE) {
typec_set_pwr_role(con->port, role);
+ ucsi_port_psy_changed(con);
+ ucsi_partner_change(con);
- switch (UCSI_CONSTAT_PARTNER_TYPE(con->status.flags)) {
- case UCSI_CONSTAT_PARTNER_TYPE_UFP:
- case UCSI_CONSTAT_PARTNER_TYPE_CABLE_AND_UFP:
- u_role = USB_ROLE_HOST;
- fallthrough;
- case UCSI_CONSTAT_PARTNER_TYPE_CABLE:
- typec_set_data_role(con->port, TYPEC_HOST);
- break;
- case UCSI_CONSTAT_PARTNER_TYPE_DFP:
- u_role = USB_ROLE_DEVICE;
- typec_set_data_role(con->port, TYPEC_DEVICE);
- break;
- default:
- break;
- }
-
- if (con->status.flags & UCSI_CONSTAT_CONNECTED)
+ if (con->status.flags & UCSI_CONSTAT_CONNECTED) {
ucsi_register_partner(con);
- else
+ ucsi_partner_task(con, ucsi_check_connection, 1, HZ);
+ } else {
ucsi_unregister_partner(con);
+ }
+ }
- ucsi_port_psy_changed(con);
+ if (con->status.change & UCSI_CONSTAT_POWER_OPMODE_CHANGE ||
+ con->status.change & UCSI_CONSTAT_POWER_LEVEL_CHANGE)
+ ucsi_pwr_opmode_change(con);
- /* Only notify USB controller if partner supports USB data */
- if (!(UCSI_CONSTAT_PARTNER_FLAGS(con->status.flags) &
- UCSI_CONSTAT_PARTNER_FLAG_USB))
- u_role = USB_ROLE_NONE;
+ if (con->partner && con->status.change & UCSI_CONSTAT_PARTNER_CHANGE) {
+ ucsi_partner_change(con);
- ret = usb_role_switch_set_role(con->usb_role_sw, u_role);
- if (ret)
- dev_err(ucsi->dev, "con:%d: failed to set usb role:%d\n",
- con->num, u_role);
+ /* Complete pending data role swap */
+ if (!completion_done(&con->complete))
+ complete(&con->complete);
}
- if (con->status.change & UCSI_CONSTAT_PARTNER_CHANGE)
- ucsi_partner_change(con);
+ if (con->status.change & UCSI_CONSTAT_CAM_CHANGE)
+ ucsi_partner_task(con, ucsi_check_altmodes, 1, 0);
- trace_ucsi_connector_change(con->num, &con->status);
+ clear_bit(EVENT_PENDING, &con->ucsi->flags);
-out_unlock:
- if (test_and_clear_bit(EVENT_PENDING, &ucsi->flags)) {
- schedule_work(&con->work);
- mutex_unlock(&con->lock);
- return;
- }
+ ret = ucsi_acknowledge_connector_change(ucsi);
+ if (ret)
+ dev_err(ucsi->dev, "%s: ACK failed (%d)", __func__, ret);
- clear_bit(EVENT_PROCESSING, &ucsi->flags);
+out_unlock:
mutex_unlock(&con->lock);
}
@@ -852,9 +834,7 @@ void ucsi_connector_change(struct ucsi *ucsi, u8 num)
return;
}
- set_bit(EVENT_PENDING, &ucsi->flags);
-
- if (!test_and_set_bit(EVENT_PROCESSING, &ucsi->flags))
+ if (!test_and_set_bit(EVENT_PENDING, &ucsi->flags))
schedule_work(&con->work);
}
EXPORT_SYMBOL_GPL(ucsi_connector_change);
@@ -1041,8 +1021,18 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
enum typec_accessory *accessory = cap->accessory;
enum usb_role u_role = USB_ROLE_NONE;
u64 command;
+ char *name;
int ret;
+ name = kasprintf(GFP_KERNEL, "%s-con%d", dev_name(ucsi->dev), con->num);
+ if (!name)
+ return -ENOMEM;
+
+ con->wq = create_singlethread_workqueue(name);
+ kfree(name);
+ if (!con->wq)
+ return -ENOMEM;
+
INIT_WORK(&con->work, ucsi_handle_connector_change);
init_completion(&con->complete);
mutex_init(&con->lock);
@@ -1160,16 +1150,9 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
ret = 0;
}
- if (con->partner) {
- ret = ucsi_register_altmodes(con, UCSI_RECIPIENT_SOP);
- if (ret) {
- dev_err(ucsi->dev,
- "con%d: failed to register alternate modes\n",
- con->num);
- ret = 0;
- } else {
- ucsi_altmode_update_active(con);
- }
+ if (UCSI_CONSTAT_PWR_OPMODE(con->status.flags) == UCSI_CONSTAT_PWR_OPMODE_PD) {
+ ucsi_get_src_pdos(con);
+ ucsi_check_altmodes(con);
}
trace_ucsi_register_port(con->num, &con->status);
@@ -1178,6 +1161,12 @@ out:
fwnode_handle_put(cap->fwnode);
out_unlock:
mutex_unlock(&con->lock);
+
+ if (ret && con->wq) {
+ destroy_workqueue(con->wq);
+ con->wq = NULL;
+ }
+
return ret;
}
@@ -1248,6 +1237,8 @@ err_unregister:
ucsi_unregister_partner(con);
ucsi_unregister_altmodes(con, UCSI_RECIPIENT_CON);
ucsi_unregister_port_psy(con);
+ if (con->wq)
+ destroy_workqueue(con->wq);
typec_unregister_port(con->port);
con->port = NULL;
}
@@ -1370,6 +1361,8 @@ void ucsi_unregister(struct ucsi *ucsi)
ucsi_unregister_altmodes(&ucsi->connector[i],
UCSI_RECIPIENT_CON);
ucsi_unregister_port_psy(&ucsi->connector[i]);
+ if (ucsi->connector[i].wq)
+ destroy_workqueue(ucsi->connector[i].wq);
typec_unregister_port(ucsi->connector[i].port);
}
diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
index cee666790907..280f1e1bda2c 100644
--- a/drivers/usb/typec/ucsi/ucsi.h
+++ b/drivers/usb/typec/ucsi/ucsi.h
@@ -300,7 +300,6 @@ struct ucsi {
#define EVENT_PENDING 0
#define COMMAND_PENDING 1
#define ACK_PENDING 2
-#define EVENT_PROCESSING 3
};
#define UCSI_MAX_SVID 5
@@ -317,6 +316,7 @@ struct ucsi_connector {
struct mutex lock; /* port lock */
struct work_struct work;
struct completion complete;
+ struct workqueue_struct *wq;
struct typec_port *port;
struct typec_partner *partner;
@@ -326,7 +326,6 @@ struct ucsi_connector {
struct typec_capability typec_cap;
- u16 unprocessed_changes;
struct ucsi_connector_status status;
struct ucsi_connector_capability cap;
struct power_supply *psy;
diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c
index 04976435ad73..6771f05e32c2 100644
--- a/drivers/usb/typec/ucsi/ucsi_acpi.c
+++ b/drivers/usb/typec/ucsi/ucsi_acpi.c
@@ -78,7 +78,7 @@ static int ucsi_acpi_sync_write(struct ucsi *ucsi, unsigned int offset,
if (ret)
goto out_clear_bit;
- if (!wait_for_completion_timeout(&ua->complete, 60 * HZ))
+ if (!wait_for_completion_timeout(&ua->complete, HZ))
ret = -ETIMEDOUT;
out_clear_bit:
diff --git a/drivers/usb/usb-skeleton.c b/drivers/usb/usb-skeleton.c
index 2dc58766273a..d87deee3e26e 100644
--- a/drivers/usb/usb-skeleton.c
+++ b/drivers/usb/usb-skeleton.c
@@ -363,7 +363,7 @@ static ssize_t skel_write(struct file *file, const char *user_buffer,
int retval = 0;
struct urb *urb = NULL;
char *buf = NULL;
- size_t writesize = min(count, (size_t)MAX_TRANSFER);
+ size_t writesize = min_t(size_t, count, MAX_TRANSFER);
dev = file->private_data;
diff --git a/drivers/vdpa/Kconfig b/drivers/vdpa/Kconfig
index 3d91982d8371..50f45d037611 100644
--- a/drivers/vdpa/Kconfig
+++ b/drivers/vdpa/Kconfig
@@ -78,4 +78,12 @@ config VP_VDPA
help
This kernel module bridges virtio PCI device to vDPA bus.
+config ALIBABA_ENI_VDPA
+ tristate "vDPA driver for Alibaba ENI"
+ select VIRTIO_PCI_LIB_LEGACY
+ depends on PCI_MSI && X86
+ help
+ VDPA driver for Alibaba ENI (Elastic Network Interface) which is built upon
+ virtio 0.9.5 specification.
+
endif # VDPA
diff --git a/drivers/vdpa/Makefile b/drivers/vdpa/Makefile
index f02ebed33f19..15665563a7f4 100644
--- a/drivers/vdpa/Makefile
+++ b/drivers/vdpa/Makefile
@@ -5,3 +5,4 @@ obj-$(CONFIG_VDPA_USER) += vdpa_user/
obj-$(CONFIG_IFCVF) += ifcvf/
obj-$(CONFIG_MLX5_VDPA) += mlx5/
obj-$(CONFIG_VP_VDPA) += virtio_pci/
+obj-$(CONFIG_ALIBABA_ENI_VDPA) += alibaba/
diff --git a/drivers/vdpa/alibaba/Makefile b/drivers/vdpa/alibaba/Makefile
new file mode 100644
index 000000000000..ef4aae69f87a
--- /dev/null
+++ b/drivers/vdpa/alibaba/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_ALIBABA_ENI_VDPA) += eni_vdpa.o
+
diff --git a/drivers/vdpa/alibaba/eni_vdpa.c b/drivers/vdpa/alibaba/eni_vdpa.c
new file mode 100644
index 000000000000..3f788794571a
--- /dev/null
+++ b/drivers/vdpa/alibaba/eni_vdpa.c
@@ -0,0 +1,553 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * vDPA bridge driver for Alibaba ENI(Elastic Network Interface)
+ *
+ * Copyright (c) 2021, Alibaba Inc. All rights reserved.
+ * Author: Wu Zongyong <wuzongyong@linux.alibaba.com>
+ *
+ */
+
+#include "linux/bits.h"
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/vdpa.h>
+#include <linux/virtio.h>
+#include <linux/virtio_config.h>
+#include <linux/virtio_ring.h>
+#include <linux/virtio_pci.h>
+#include <linux/virtio_pci_legacy.h>
+#include <uapi/linux/virtio_net.h>
+
+#define ENI_MSIX_NAME_SIZE 256
+
+#define ENI_ERR(pdev, fmt, ...) \
+ dev_err(&pdev->dev, "%s"fmt, "eni_vdpa: ", ##__VA_ARGS__)
+#define ENI_DBG(pdev, fmt, ...) \
+ dev_dbg(&pdev->dev, "%s"fmt, "eni_vdpa: ", ##__VA_ARGS__)
+#define ENI_INFO(pdev, fmt, ...) \
+ dev_info(&pdev->dev, "%s"fmt, "eni_vdpa: ", ##__VA_ARGS__)
+
+struct eni_vring {
+ void __iomem *notify;
+ char msix_name[ENI_MSIX_NAME_SIZE];
+ struct vdpa_callback cb;
+ int irq;
+};
+
+struct eni_vdpa {
+ struct vdpa_device vdpa;
+ struct virtio_pci_legacy_device ldev;
+ struct eni_vring *vring;
+ struct vdpa_callback config_cb;
+ char msix_name[ENI_MSIX_NAME_SIZE];
+ int config_irq;
+ int queues;
+ int vectors;
+};
+
+static struct eni_vdpa *vdpa_to_eni(struct vdpa_device *vdpa)
+{
+ return container_of(vdpa, struct eni_vdpa, vdpa);
+}
+
+static struct virtio_pci_legacy_device *vdpa_to_ldev(struct vdpa_device *vdpa)
+{
+ struct eni_vdpa *eni_vdpa = vdpa_to_eni(vdpa);
+
+ return &eni_vdpa->ldev;
+}
+
+static u64 eni_vdpa_get_features(struct vdpa_device *vdpa)
+{
+ struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
+ u64 features = vp_legacy_get_features(ldev);
+
+ features |= BIT_ULL(VIRTIO_F_ACCESS_PLATFORM);
+ features |= BIT_ULL(VIRTIO_F_ORDER_PLATFORM);
+
+ return features;
+}
+
+static int eni_vdpa_set_features(struct vdpa_device *vdpa, u64 features)
+{
+ struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
+
+ if (!(features & BIT_ULL(VIRTIO_NET_F_MRG_RXBUF)) && features) {
+ ENI_ERR(ldev->pci_dev,
+ "VIRTIO_NET_F_MRG_RXBUF is not negotiated\n");
+ return -EINVAL;
+ }
+
+ vp_legacy_set_features(ldev, (u32)features);
+
+ return 0;
+}
+
+static u8 eni_vdpa_get_status(struct vdpa_device *vdpa)
+{
+ struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
+
+ return vp_legacy_get_status(ldev);
+}
+
+static int eni_vdpa_get_vq_irq(struct vdpa_device *vdpa, u16 idx)
+{
+ struct eni_vdpa *eni_vdpa = vdpa_to_eni(vdpa);
+ int irq = eni_vdpa->vring[idx].irq;
+
+ if (irq == VIRTIO_MSI_NO_VECTOR)
+ return -EINVAL;
+
+ return irq;
+}
+
+static void eni_vdpa_free_irq(struct eni_vdpa *eni_vdpa)
+{
+ struct virtio_pci_legacy_device *ldev = &eni_vdpa->ldev;
+ struct pci_dev *pdev = ldev->pci_dev;
+ int i;
+
+ for (i = 0; i < eni_vdpa->queues; i++) {
+ if (eni_vdpa->vring[i].irq != VIRTIO_MSI_NO_VECTOR) {
+ vp_legacy_queue_vector(ldev, i, VIRTIO_MSI_NO_VECTOR);
+ devm_free_irq(&pdev->dev, eni_vdpa->vring[i].irq,
+ &eni_vdpa->vring[i]);
+ eni_vdpa->vring[i].irq = VIRTIO_MSI_NO_VECTOR;
+ }
+ }
+
+ if (eni_vdpa->config_irq != VIRTIO_MSI_NO_VECTOR) {
+ vp_legacy_config_vector(ldev, VIRTIO_MSI_NO_VECTOR);
+ devm_free_irq(&pdev->dev, eni_vdpa->config_irq, eni_vdpa);
+ eni_vdpa->config_irq = VIRTIO_MSI_NO_VECTOR;
+ }
+
+ if (eni_vdpa->vectors) {
+ pci_free_irq_vectors(pdev);
+ eni_vdpa->vectors = 0;
+ }
+}
+
+static irqreturn_t eni_vdpa_vq_handler(int irq, void *arg)
+{
+ struct eni_vring *vring = arg;
+
+ if (vring->cb.callback)
+ return vring->cb.callback(vring->cb.private);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t eni_vdpa_config_handler(int irq, void *arg)
+{
+ struct eni_vdpa *eni_vdpa = arg;
+
+ if (eni_vdpa->config_cb.callback)
+ return eni_vdpa->config_cb.callback(eni_vdpa->config_cb.private);
+
+ return IRQ_HANDLED;
+}
+
+static int eni_vdpa_request_irq(struct eni_vdpa *eni_vdpa)
+{
+ struct virtio_pci_legacy_device *ldev = &eni_vdpa->ldev;
+ struct pci_dev *pdev = ldev->pci_dev;
+ int i, ret, irq;
+ int queues = eni_vdpa->queues;
+ int vectors = queues + 1;
+
+ ret = pci_alloc_irq_vectors(pdev, vectors, vectors, PCI_IRQ_MSIX);
+ if (ret != vectors) {
+ ENI_ERR(pdev,
+ "failed to allocate irq vectors want %d but %d\n",
+ vectors, ret);
+ return ret;
+ }
+
+ eni_vdpa->vectors = vectors;
+
+ for (i = 0; i < queues; i++) {
+ snprintf(eni_vdpa->vring[i].msix_name, ENI_MSIX_NAME_SIZE,
+ "eni-vdpa[%s]-%d\n", pci_name(pdev), i);
+ irq = pci_irq_vector(pdev, i);
+ ret = devm_request_irq(&pdev->dev, irq,
+ eni_vdpa_vq_handler,
+ 0, eni_vdpa->vring[i].msix_name,
+ &eni_vdpa->vring[i]);
+ if (ret) {
+ ENI_ERR(pdev, "failed to request irq for vq %d\n", i);
+ goto err;
+ }
+ vp_legacy_queue_vector(ldev, i, i);
+ eni_vdpa->vring[i].irq = irq;
+ }
+
+ snprintf(eni_vdpa->msix_name, ENI_MSIX_NAME_SIZE, "eni-vdpa[%s]-config\n",
+ pci_name(pdev));
+ irq = pci_irq_vector(pdev, queues);
+ ret = devm_request_irq(&pdev->dev, irq, eni_vdpa_config_handler, 0,
+ eni_vdpa->msix_name, eni_vdpa);
+ if (ret) {
+ ENI_ERR(pdev, "failed to request irq for config vq %d\n", i);
+ goto err;
+ }
+ vp_legacy_config_vector(ldev, queues);
+ eni_vdpa->config_irq = irq;
+
+ return 0;
+err:
+ eni_vdpa_free_irq(eni_vdpa);
+ return ret;
+}
+
+static void eni_vdpa_set_status(struct vdpa_device *vdpa, u8 status)
+{
+ struct eni_vdpa *eni_vdpa = vdpa_to_eni(vdpa);
+ struct virtio_pci_legacy_device *ldev = &eni_vdpa->ldev;
+ u8 s = eni_vdpa_get_status(vdpa);
+
+ if (status & VIRTIO_CONFIG_S_DRIVER_OK &&
+ !(s & VIRTIO_CONFIG_S_DRIVER_OK)) {
+ eni_vdpa_request_irq(eni_vdpa);
+ }
+
+ vp_legacy_set_status(ldev, status);
+
+ if (!(status & VIRTIO_CONFIG_S_DRIVER_OK) &&
+ (s & VIRTIO_CONFIG_S_DRIVER_OK))
+ eni_vdpa_free_irq(eni_vdpa);
+}
+
+static int eni_vdpa_reset(struct vdpa_device *vdpa)
+{
+ struct eni_vdpa *eni_vdpa = vdpa_to_eni(vdpa);
+ struct virtio_pci_legacy_device *ldev = &eni_vdpa->ldev;
+ u8 s = eni_vdpa_get_status(vdpa);
+
+ vp_legacy_set_status(ldev, 0);
+
+ if (s & VIRTIO_CONFIG_S_DRIVER_OK)
+ eni_vdpa_free_irq(eni_vdpa);
+
+ return 0;
+}
+
+static u16 eni_vdpa_get_vq_num_max(struct vdpa_device *vdpa)
+{
+ struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
+
+ return vp_legacy_get_queue_size(ldev, 0);
+}
+
+static u16 eni_vdpa_get_vq_num_min(struct vdpa_device *vdpa)
+{
+ struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
+
+ return vp_legacy_get_queue_size(ldev, 0);
+}
+
+static int eni_vdpa_get_vq_state(struct vdpa_device *vdpa, u16 qid,
+ struct vdpa_vq_state *state)
+{
+ return -EOPNOTSUPP;
+}
+
+static int eni_vdpa_set_vq_state(struct vdpa_device *vdpa, u16 qid,
+ const struct vdpa_vq_state *state)
+{
+ struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
+ const struct vdpa_vq_state_split *split = &state->split;
+
+ /* ENI is build upon virtio-pci specfication which not support
+ * to set state of virtqueue. But if the state is equal to the
+ * device initial state by chance, we can let it go.
+ */
+ if (!vp_legacy_get_queue_enable(ldev, qid)
+ && split->avail_index == 0)
+ return 0;
+
+ return -EOPNOTSUPP;
+}
+
+
+static void eni_vdpa_set_vq_cb(struct vdpa_device *vdpa, u16 qid,
+ struct vdpa_callback *cb)
+{
+ struct eni_vdpa *eni_vdpa = vdpa_to_eni(vdpa);
+
+ eni_vdpa->vring[qid].cb = *cb;
+}
+
+static void eni_vdpa_set_vq_ready(struct vdpa_device *vdpa, u16 qid,
+ bool ready)
+{
+ struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
+
+ /* ENI is a legacy virtio-pci device. This is not supported
+ * by specification. But we can disable virtqueue by setting
+ * address to 0.
+ */
+ if (!ready)
+ vp_legacy_set_queue_address(ldev, qid, 0);
+}
+
+static bool eni_vdpa_get_vq_ready(struct vdpa_device *vdpa, u16 qid)
+{
+ struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
+
+ return vp_legacy_get_queue_enable(ldev, qid);
+}
+
+static void eni_vdpa_set_vq_num(struct vdpa_device *vdpa, u16 qid,
+ u32 num)
+{
+ struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
+ struct pci_dev *pdev = ldev->pci_dev;
+ u16 n = vp_legacy_get_queue_size(ldev, qid);
+
+ /* ENI is a legacy virtio-pci device which not allow to change
+ * virtqueue size. Just report a error if someone tries to
+ * change it.
+ */
+ if (num != n)
+ ENI_ERR(pdev,
+ "not support to set vq %u fixed num %u to %u\n",
+ qid, n, num);
+}
+
+static int eni_vdpa_set_vq_address(struct vdpa_device *vdpa, u16 qid,
+ u64 desc_area, u64 driver_area,
+ u64 device_area)
+{
+ struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
+ u32 pfn = desc_area >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;
+
+ vp_legacy_set_queue_address(ldev, qid, pfn);
+
+ return 0;
+}
+
+static void eni_vdpa_kick_vq(struct vdpa_device *vdpa, u16 qid)
+{
+ struct eni_vdpa *eni_vdpa = vdpa_to_eni(vdpa);
+
+ iowrite16(qid, eni_vdpa->vring[qid].notify);
+}
+
+static u32 eni_vdpa_get_device_id(struct vdpa_device *vdpa)
+{
+ struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
+
+ return ldev->id.device;
+}
+
+static u32 eni_vdpa_get_vendor_id(struct vdpa_device *vdpa)
+{
+ struct virtio_pci_legacy_device *ldev = vdpa_to_ldev(vdpa);
+
+ return ldev->id.vendor;
+}
+
+static u32 eni_vdpa_get_vq_align(struct vdpa_device *vdpa)
+{
+ return VIRTIO_PCI_VRING_ALIGN;
+}
+
+static size_t eni_vdpa_get_config_size(struct vdpa_device *vdpa)
+{
+ return sizeof(struct virtio_net_config);
+}
+
+
+static void eni_vdpa_get_config(struct vdpa_device *vdpa,
+ unsigned int offset,
+ void *buf, unsigned int len)
+{
+ struct eni_vdpa *eni_vdpa = vdpa_to_eni(vdpa);
+ struct virtio_pci_legacy_device *ldev = &eni_vdpa->ldev;
+ void __iomem *ioaddr = ldev->ioaddr +
+ VIRTIO_PCI_CONFIG_OFF(eni_vdpa->vectors) +
+ offset;
+ u8 *p = buf;
+ int i;
+
+ for (i = 0; i < len; i++)
+ *p++ = ioread8(ioaddr + i);
+}
+
+static void eni_vdpa_set_config(struct vdpa_device *vdpa,
+ unsigned int offset, const void *buf,
+ unsigned int len)
+{
+ struct eni_vdpa *eni_vdpa = vdpa_to_eni(vdpa);
+ struct virtio_pci_legacy_device *ldev = &eni_vdpa->ldev;
+ void __iomem *ioaddr = ldev->ioaddr +
+ VIRTIO_PCI_CONFIG_OFF(eni_vdpa->vectors) +
+ offset;
+ const u8 *p = buf;
+ int i;
+
+ for (i = 0; i < len; i++)
+ iowrite8(*p++, ioaddr + i);
+}
+
+static void eni_vdpa_set_config_cb(struct vdpa_device *vdpa,
+ struct vdpa_callback *cb)
+{
+ struct eni_vdpa *eni_vdpa = vdpa_to_eni(vdpa);
+
+ eni_vdpa->config_cb = *cb;
+}
+
+static const struct vdpa_config_ops eni_vdpa_ops = {
+ .get_features = eni_vdpa_get_features,
+ .set_features = eni_vdpa_set_features,
+ .get_status = eni_vdpa_get_status,
+ .set_status = eni_vdpa_set_status,
+ .reset = eni_vdpa_reset,
+ .get_vq_num_max = eni_vdpa_get_vq_num_max,
+ .get_vq_num_min = eni_vdpa_get_vq_num_min,
+ .get_vq_state = eni_vdpa_get_vq_state,
+ .set_vq_state = eni_vdpa_set_vq_state,
+ .set_vq_cb = eni_vdpa_set_vq_cb,
+ .set_vq_ready = eni_vdpa_set_vq_ready,
+ .get_vq_ready = eni_vdpa_get_vq_ready,
+ .set_vq_num = eni_vdpa_set_vq_num,
+ .set_vq_address = eni_vdpa_set_vq_address,
+ .kick_vq = eni_vdpa_kick_vq,
+ .get_device_id = eni_vdpa_get_device_id,
+ .get_vendor_id = eni_vdpa_get_vendor_id,
+ .get_vq_align = eni_vdpa_get_vq_align,
+ .get_config_size = eni_vdpa_get_config_size,
+ .get_config = eni_vdpa_get_config,
+ .set_config = eni_vdpa_set_config,
+ .set_config_cb = eni_vdpa_set_config_cb,
+ .get_vq_irq = eni_vdpa_get_vq_irq,
+};
+
+
+static u16 eni_vdpa_get_num_queues(struct eni_vdpa *eni_vdpa)
+{
+ struct virtio_pci_legacy_device *ldev = &eni_vdpa->ldev;
+ u32 features = vp_legacy_get_features(ldev);
+ u16 num = 2;
+
+ if (features & BIT_ULL(VIRTIO_NET_F_MQ)) {
+ __virtio16 max_virtqueue_pairs;
+
+ eni_vdpa_get_config(&eni_vdpa->vdpa,
+ offsetof(struct virtio_net_config, max_virtqueue_pairs),
+ &max_virtqueue_pairs,
+ sizeof(max_virtqueue_pairs));
+ num = 2 * __virtio16_to_cpu(virtio_legacy_is_little_endian(),
+ max_virtqueue_pairs);
+ }
+
+ if (features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ))
+ num += 1;
+
+ return num;
+}
+
+static void eni_vdpa_free_irq_vectors(void *data)
+{
+ pci_free_irq_vectors(data);
+}
+
+static int eni_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct device *dev = &pdev->dev;
+ struct eni_vdpa *eni_vdpa;
+ struct virtio_pci_legacy_device *ldev;
+ int ret, i;
+
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ eni_vdpa = vdpa_alloc_device(struct eni_vdpa, vdpa,
+ dev, &eni_vdpa_ops, NULL, false);
+ if (IS_ERR(eni_vdpa)) {
+ ENI_ERR(pdev, "failed to allocate vDPA structure\n");
+ return PTR_ERR(eni_vdpa);
+ }
+
+ ldev = &eni_vdpa->ldev;
+ ldev->pci_dev = pdev;
+
+ ret = vp_legacy_probe(ldev);
+ if (ret) {
+ ENI_ERR(pdev, "failed to probe legacy PCI device\n");
+ goto err;
+ }
+
+ pci_set_master(pdev);
+ pci_set_drvdata(pdev, eni_vdpa);
+
+ eni_vdpa->vdpa.dma_dev = &pdev->dev;
+ eni_vdpa->queues = eni_vdpa_get_num_queues(eni_vdpa);
+
+ ret = devm_add_action_or_reset(dev, eni_vdpa_free_irq_vectors, pdev);
+ if (ret) {
+ ENI_ERR(pdev,
+ "failed for adding devres for freeing irq vectors\n");
+ goto err;
+ }
+
+ eni_vdpa->vring = devm_kcalloc(&pdev->dev, eni_vdpa->queues,
+ sizeof(*eni_vdpa->vring),
+ GFP_KERNEL);
+ if (!eni_vdpa->vring) {
+ ret = -ENOMEM;
+ ENI_ERR(pdev, "failed to allocate virtqueues\n");
+ goto err;
+ }
+
+ for (i = 0; i < eni_vdpa->queues; i++) {
+ eni_vdpa->vring[i].irq = VIRTIO_MSI_NO_VECTOR;
+ eni_vdpa->vring[i].notify = ldev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY;
+ }
+ eni_vdpa->config_irq = VIRTIO_MSI_NO_VECTOR;
+
+ ret = vdpa_register_device(&eni_vdpa->vdpa, eni_vdpa->queues);
+ if (ret) {
+ ENI_ERR(pdev, "failed to register to vdpa bus\n");
+ goto err;
+ }
+
+ return 0;
+
+err:
+ put_device(&eni_vdpa->vdpa.dev);
+ return ret;
+}
+
+static void eni_vdpa_remove(struct pci_dev *pdev)
+{
+ struct eni_vdpa *eni_vdpa = pci_get_drvdata(pdev);
+
+ vdpa_unregister_device(&eni_vdpa->vdpa);
+ vp_legacy_remove(&eni_vdpa->ldev);
+}
+
+static struct pci_device_id eni_pci_ids[] = {
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_REDHAT_QUMRANET,
+ VIRTIO_TRANS_ID_NET,
+ PCI_SUBVENDOR_ID_REDHAT_QUMRANET,
+ VIRTIO_ID_NET) },
+ { 0 },
+};
+
+static struct pci_driver eni_vdpa_driver = {
+ .name = "alibaba-eni-vdpa",
+ .id_table = eni_pci_ids,
+ .probe = eni_vdpa_probe,
+ .remove = eni_vdpa_remove,
+};
+
+module_pci_driver(eni_vdpa_driver);
+
+MODULE_AUTHOR("Wu Zongyong <wuzongyong@linux.alibaba.com>");
+MODULE_DESCRIPTION("Alibaba ENI vDPA driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/vdpa/ifcvf/ifcvf_main.c b/drivers/vdpa/ifcvf/ifcvf_main.c
index dcd648e1f7e7..6dc75ca70b37 100644
--- a/drivers/vdpa/ifcvf/ifcvf_main.c
+++ b/drivers/vdpa/ifcvf/ifcvf_main.c
@@ -499,7 +499,8 @@ static u32 get_dev_type(struct pci_dev *pdev)
return dev_type;
}
-static int ifcvf_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name)
+static int ifcvf_vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
+ const struct vdpa_dev_set_config *config)
{
struct ifcvf_vdpa_mgmt_dev *ifcvf_mgmt_dev;
struct ifcvf_adapter *adapter;
diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa.h b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
index 01a848adf590..daaf7b503677 100644
--- a/drivers/vdpa/mlx5/core/mlx5_vdpa.h
+++ b/drivers/vdpa/mlx5/core/mlx5_vdpa.h
@@ -15,7 +15,7 @@ struct mlx5_vdpa_direct_mr {
u64 start;
u64 end;
u32 perm;
- struct mlx5_core_mkey mr;
+ u32 mr;
struct sg_table sg_head;
int log_size;
int nsg;
@@ -25,7 +25,7 @@ struct mlx5_vdpa_direct_mr {
};
struct mlx5_vdpa_mr {
- struct mlx5_core_mkey mkey;
+ u32 mkey;
/* list of direct MRs descendants of this indirect mr */
struct list_head head;
@@ -63,7 +63,7 @@ struct mlx5_control_vq {
unsigned short head;
};
-struct mlx5_ctrl_wq_ent {
+struct mlx5_vdpa_wq_ent {
struct work_struct work;
struct mlx5_vdpa_dev *mvdev;
};
@@ -99,9 +99,9 @@ int mlx5_vdpa_alloc_transport_domain(struct mlx5_vdpa_dev *mvdev, u32 *tdn);
void mlx5_vdpa_dealloc_transport_domain(struct mlx5_vdpa_dev *mvdev, u32 tdn);
int mlx5_vdpa_alloc_resources(struct mlx5_vdpa_dev *mvdev);
void mlx5_vdpa_free_resources(struct mlx5_vdpa_dev *mvdev);
-int mlx5_vdpa_create_mkey(struct mlx5_vdpa_dev *mvdev, struct mlx5_core_mkey *mkey, u32 *in,
+int mlx5_vdpa_create_mkey(struct mlx5_vdpa_dev *mvdev, u32 *mkey, u32 *in,
int inlen);
-int mlx5_vdpa_destroy_mkey(struct mlx5_vdpa_dev *mvdev, struct mlx5_core_mkey *mkey);
+int mlx5_vdpa_destroy_mkey(struct mlx5_vdpa_dev *mvdev, u32 mkey);
int mlx5_vdpa_handle_set_map(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb,
bool *change_map);
int mlx5_vdpa_create_mr(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *iotlb);
diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c
index ff010c6d0cd3..a639b9208d41 100644
--- a/drivers/vdpa/mlx5/core/mr.c
+++ b/drivers/vdpa/mlx5/core/mr.c
@@ -88,7 +88,7 @@ static int create_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct
static void destroy_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr)
{
- mlx5_vdpa_destroy_mkey(mvdev, &mr->mr);
+ mlx5_vdpa_destroy_mkey(mvdev, mr->mr);
}
static u64 map_start(struct vhost_iotlb_map *map, struct mlx5_vdpa_direct_mr *mr)
@@ -162,7 +162,7 @@ again:
}
if (preve == dmr->start) {
- klm->key = cpu_to_be32(dmr->mr.key);
+ klm->key = cpu_to_be32(dmr->mr);
klm->bcount = cpu_to_be32(klm_bcount(dmr->end - dmr->start));
preve = dmr->end;
} else {
@@ -217,7 +217,7 @@ static int create_indirect_key(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr
static void destroy_indirect_key(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mkey)
{
- mlx5_vdpa_destroy_mkey(mvdev, &mkey->mkey);
+ mlx5_vdpa_destroy_mkey(mvdev, mkey->mkey);
}
static int map_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr,
@@ -449,7 +449,7 @@ static int create_dma_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr)
static void destroy_dma_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_mr *mr)
{
- mlx5_vdpa_destroy_mkey(mvdev, &mr->mkey);
+ mlx5_vdpa_destroy_mkey(mvdev, mr->mkey);
}
static int dup_iotlb(struct mlx5_vdpa_dev *mvdev, struct vhost_iotlb *src)
diff --git a/drivers/vdpa/mlx5/core/resources.c b/drivers/vdpa/mlx5/core/resources.c
index 15e266d0e27a..9800f9bec225 100644
--- a/drivers/vdpa/mlx5/core/resources.c
+++ b/drivers/vdpa/mlx5/core/resources.c
@@ -198,12 +198,11 @@ void mlx5_vdpa_dealloc_transport_domain(struct mlx5_vdpa_dev *mvdev, u32 tdn)
mlx5_cmd_exec_in(mvdev->mdev, dealloc_transport_domain, in);
}
-int mlx5_vdpa_create_mkey(struct mlx5_vdpa_dev *mvdev, struct mlx5_core_mkey *mkey, u32 *in,
+int mlx5_vdpa_create_mkey(struct mlx5_vdpa_dev *mvdev, u32 *mkey, u32 *in,
int inlen)
{
u32 lout[MLX5_ST_SZ_DW(create_mkey_out)] = {};
u32 mkey_index;
- void *mkc;
int err;
MLX5_SET(create_mkey_in, in, opcode, MLX5_CMD_OP_CREATE_MKEY);
@@ -213,22 +212,18 @@ int mlx5_vdpa_create_mkey(struct mlx5_vdpa_dev *mvdev, struct mlx5_core_mkey *mk
if (err)
return err;
- mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
mkey_index = MLX5_GET(create_mkey_out, lout, mkey_index);
- mkey->iova = MLX5_GET64(mkc, mkc, start_addr);
- mkey->size = MLX5_GET64(mkc, mkc, len);
- mkey->key |= mlx5_idx_to_mkey(mkey_index);
- mkey->pd = MLX5_GET(mkc, mkc, pd);
+ *mkey |= mlx5_idx_to_mkey(mkey_index);
return 0;
}
-int mlx5_vdpa_destroy_mkey(struct mlx5_vdpa_dev *mvdev, struct mlx5_core_mkey *mkey)
+int mlx5_vdpa_destroy_mkey(struct mlx5_vdpa_dev *mvdev, u32 mkey)
{
u32 in[MLX5_ST_SZ_DW(destroy_mkey_in)] = {};
MLX5_SET(destroy_mkey_in, in, uid, mvdev->res.uid);
MLX5_SET(destroy_mkey_in, in, opcode, MLX5_CMD_OP_DESTROY_MKEY);
- MLX5_SET(destroy_mkey_in, in, mkey_index, mlx5_mkey_to_idx(mkey->key));
+ MLX5_SET(destroy_mkey_in, in, mkey_index, mlx5_mkey_to_idx(mkey));
return mlx5_cmd_exec_in(mvdev->mdev, destroy_mkey, in);
}
diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c
index bd56de7484dc..63813fbb5f62 100644
--- a/drivers/vdpa/mlx5/net/mlx5_vnet.c
+++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c
@@ -6,6 +6,7 @@
#include <linux/vringh.h>
#include <uapi/linux/virtio_net.h>
#include <uapi/linux/virtio_ids.h>
+#include <uapi/linux/vdpa.h>
#include <linux/virtio_config.h>
#include <linux/auxiliary_bus.h>
#include <linux/mlx5/cq.h>
@@ -157,10 +158,12 @@ struct mlx5_vdpa_net {
struct mutex reslock;
struct mlx5_flow_table *rxft;
struct mlx5_fc *rx_counter;
- struct mlx5_flow_handle *rx_rule;
+ struct mlx5_flow_handle *rx_rule_ucast;
+ struct mlx5_flow_handle *rx_rule_mcast;
bool setup;
- u16 mtu;
u32 cur_num_vqs;
+ struct notifier_block nb;
+ struct vdpa_callback config_cb;
};
static void free_resources(struct mlx5_vdpa_net *ndev);
@@ -865,7 +868,7 @@ static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtque
MLX5_SET64(virtio_q, vq_ctx, desc_addr, mvq->desc_addr);
MLX5_SET64(virtio_q, vq_ctx, used_addr, mvq->device_addr);
MLX5_SET64(virtio_q, vq_ctx, available_addr, mvq->driver_addr);
- MLX5_SET(virtio_q, vq_ctx, virtio_q_mkey, ndev->mvdev.mr.mkey.key);
+ MLX5_SET(virtio_q, vq_ctx, virtio_q_mkey, ndev->mvdev.mr.mkey);
MLX5_SET(virtio_q, vq_ctx, umem_1_id, mvq->umem1.id);
MLX5_SET(virtio_q, vq_ctx, umem_1_size, mvq->umem1.size);
MLX5_SET(virtio_q, vq_ctx, umem_2_id, mvq->umem2.id);
@@ -1381,21 +1384,33 @@ static int add_fwd_to_tir(struct mlx5_vdpa_net *ndev)
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_act flow_act = {};
struct mlx5_flow_namespace *ns;
+ struct mlx5_flow_spec *spec;
+ void *headers_c;
+ void *headers_v;
+ u8 *dmac_c;
+ u8 *dmac_v;
int err;
- /* for now, one entry, match all, forward to tir */
- ft_attr.max_fte = 1;
- ft_attr.autogroup.max_num_groups = 1;
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ ft_attr.max_fte = 2;
+ ft_attr.autogroup.max_num_groups = 2;
ns = mlx5_get_flow_namespace(ndev->mvdev.mdev, MLX5_FLOW_NAMESPACE_BYPASS);
if (!ns) {
- mlx5_vdpa_warn(&ndev->mvdev, "get flow namespace\n");
- return -EOPNOTSUPP;
+ mlx5_vdpa_warn(&ndev->mvdev, "failed to get flow namespace\n");
+ err = -EOPNOTSUPP;
+ goto err_ns;
}
ndev->rxft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
- if (IS_ERR(ndev->rxft))
- return PTR_ERR(ndev->rxft);
+ if (IS_ERR(ndev->rxft)) {
+ err = PTR_ERR(ndev->rxft);
+ goto err_ns;
+ }
ndev->rx_counter = mlx5_fc_create(ndev->mvdev.mdev, false);
if (IS_ERR(ndev->rx_counter)) {
@@ -1403,37 +1418,64 @@ static int add_fwd_to_tir(struct mlx5_vdpa_net *ndev)
goto err_fc;
}
+ headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers);
+ dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c, outer_headers.dmac_47_16);
+ memset(dmac_c, 0xff, ETH_ALEN);
+ headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
+ dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v, outer_headers.dmac_47_16);
+ ether_addr_copy(dmac_v, ndev->config.mac);
+
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_COUNT;
dest[0].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
dest[0].tir_num = ndev->res.tirn;
dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
dest[1].counter_id = mlx5_fc_id(ndev->rx_counter);
- ndev->rx_rule = mlx5_add_flow_rules(ndev->rxft, NULL, &flow_act, dest, 2);
- if (IS_ERR(ndev->rx_rule)) {
- err = PTR_ERR(ndev->rx_rule);
- ndev->rx_rule = NULL;
- goto err_rule;
+ ndev->rx_rule_ucast = mlx5_add_flow_rules(ndev->rxft, spec, &flow_act, dest, 2);
+
+ if (IS_ERR(ndev->rx_rule_ucast)) {
+ err = PTR_ERR(ndev->rx_rule_ucast);
+ ndev->rx_rule_ucast = NULL;
+ goto err_rule_ucast;
}
+ memset(dmac_c, 0, ETH_ALEN);
+ memset(dmac_v, 0, ETH_ALEN);
+ dmac_c[0] = 1;
+ dmac_v[0] = 1;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
+ ndev->rx_rule_mcast = mlx5_add_flow_rules(ndev->rxft, spec, &flow_act, dest, 1);
+ if (IS_ERR(ndev->rx_rule_mcast)) {
+ err = PTR_ERR(ndev->rx_rule_mcast);
+ ndev->rx_rule_mcast = NULL;
+ goto err_rule_mcast;
+ }
+
+ kvfree(spec);
return 0;
-err_rule:
+err_rule_mcast:
+ mlx5_del_flow_rules(ndev->rx_rule_ucast);
+ ndev->rx_rule_ucast = NULL;
+err_rule_ucast:
mlx5_fc_destroy(ndev->mvdev.mdev, ndev->rx_counter);
err_fc:
mlx5_destroy_flow_table(ndev->rxft);
+err_ns:
+ kvfree(spec);
return err;
}
static void remove_fwd_to_tir(struct mlx5_vdpa_net *ndev)
{
- if (!ndev->rx_rule)
+ if (!ndev->rx_rule_ucast)
return;
- mlx5_del_flow_rules(ndev->rx_rule);
+ mlx5_del_flow_rules(ndev->rx_rule_mcast);
+ ndev->rx_rule_mcast = NULL;
+ mlx5_del_flow_rules(ndev->rx_rule_ucast);
+ ndev->rx_rule_ucast = NULL;
mlx5_fc_destroy(ndev->mvdev.mdev, ndev->rx_counter);
mlx5_destroy_flow_table(ndev->rxft);
-
- ndev->rx_rule = NULL;
}
static virtio_net_ctrl_ack handle_ctrl_mac(struct mlx5_vdpa_dev *mvdev, u8 cmd)
@@ -1557,14 +1599,14 @@ static void mlx5_cvq_kick_handler(struct work_struct *work)
{
virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
struct virtio_net_ctrl_hdr ctrl;
- struct mlx5_ctrl_wq_ent *wqent;
+ struct mlx5_vdpa_wq_ent *wqent;
struct mlx5_vdpa_dev *mvdev;
struct mlx5_control_vq *cvq;
struct mlx5_vdpa_net *ndev;
size_t read, write;
int err;
- wqent = container_of(work, struct mlx5_ctrl_wq_ent, work);
+ wqent = container_of(work, struct mlx5_vdpa_wq_ent, work);
mvdev = wqent->mvdev;
ndev = to_mlx5_vdpa_ndev(mvdev);
cvq = &mvdev->cvq;
@@ -1616,7 +1658,7 @@ static void mlx5_vdpa_kick_vq(struct vdpa_device *vdev, u16 idx)
struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
struct mlx5_vdpa_virtqueue *mvq;
- struct mlx5_ctrl_wq_ent *wqent;
+ struct mlx5_vdpa_wq_ent *wqent;
if (!is_index_valid(mvdev, idx))
return;
@@ -1852,6 +1894,7 @@ static u64 mlx5_vdpa_get_features(struct vdpa_device *vdev)
ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_NET_F_CTRL_VQ);
ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR);
ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_NET_F_MQ);
+ ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_NET_F_STATUS);
print_features(mvdev, ndev->mvdev.mlx_features, false);
return ndev->mvdev.mlx_features;
@@ -1942,16 +1985,16 @@ static int mlx5_vdpa_set_features(struct vdpa_device *vdev, u64 features)
return err;
ndev->mvdev.actual_features = features & ndev->mvdev.mlx_features;
- ndev->config.mtu = cpu_to_mlx5vdpa16(mvdev, ndev->mtu);
- ndev->config.status |= cpu_to_mlx5vdpa16(mvdev, VIRTIO_NET_S_LINK_UP);
update_cvq_info(mvdev);
return err;
}
static void mlx5_vdpa_set_config_cb(struct vdpa_device *vdev, struct vdpa_callback *cb)
{
- /* not implemented */
- mlx5_vdpa_warn(to_mvdev(vdev), "set config callback not supported\n");
+ struct mlx5_vdpa_dev *mvdev = to_mvdev(vdev);
+ struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+
+ ndev->config_cb = *cb;
}
#define MLX5_VDPA_MAX_VQ_ENTRIES 256
@@ -2192,7 +2235,6 @@ static int mlx5_vdpa_reset(struct vdpa_device *vdev)
clear_vqs_ready(ndev);
mlx5_vdpa_destroy_mr(&ndev->mvdev);
ndev->mvdev.status = 0;
- ndev->mvdev.mlx_features = 0;
memset(ndev->event_cbs, 0, sizeof(ndev->event_cbs));
ndev->mvdev.actual_features = 0;
++mvdev->generation;
@@ -2404,7 +2446,84 @@ struct mlx5_vdpa_mgmtdev {
struct mlx5_vdpa_net *ndev;
};
-static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name)
+static u8 query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport)
+{
+ u32 out[MLX5_ST_SZ_DW(query_vport_state_out)] = {};
+ u32 in[MLX5_ST_SZ_DW(query_vport_state_in)] = {};
+ int err;
+
+ MLX5_SET(query_vport_state_in, in, opcode, MLX5_CMD_OP_QUERY_VPORT_STATE);
+ MLX5_SET(query_vport_state_in, in, op_mod, opmod);
+ MLX5_SET(query_vport_state_in, in, vport_number, vport);
+ if (vport)
+ MLX5_SET(query_vport_state_in, in, other_vport, 1);
+
+ err = mlx5_cmd_exec_inout(mdev, query_vport_state, in, out);
+ if (err)
+ return 0;
+
+ return MLX5_GET(query_vport_state_out, out, state);
+}
+
+static bool get_link_state(struct mlx5_vdpa_dev *mvdev)
+{
+ if (query_vport_state(mvdev->mdev, MLX5_VPORT_STATE_OP_MOD_VNIC_VPORT, 0) ==
+ VPORT_STATE_UP)
+ return true;
+
+ return false;
+}
+
+static void update_carrier(struct work_struct *work)
+{
+ struct mlx5_vdpa_wq_ent *wqent;
+ struct mlx5_vdpa_dev *mvdev;
+ struct mlx5_vdpa_net *ndev;
+
+ wqent = container_of(work, struct mlx5_vdpa_wq_ent, work);
+ mvdev = wqent->mvdev;
+ ndev = to_mlx5_vdpa_ndev(mvdev);
+ if (get_link_state(mvdev))
+ ndev->config.status |= cpu_to_mlx5vdpa16(mvdev, VIRTIO_NET_S_LINK_UP);
+ else
+ ndev->config.status &= cpu_to_mlx5vdpa16(mvdev, ~VIRTIO_NET_S_LINK_UP);
+
+ if (ndev->config_cb.callback)
+ ndev->config_cb.callback(ndev->config_cb.private);
+
+ kfree(wqent);
+}
+
+static int event_handler(struct notifier_block *nb, unsigned long event, void *param)
+{
+ struct mlx5_vdpa_net *ndev = container_of(nb, struct mlx5_vdpa_net, nb);
+ struct mlx5_eqe *eqe = param;
+ int ret = NOTIFY_DONE;
+ struct mlx5_vdpa_wq_ent *wqent;
+
+ if (event == MLX5_EVENT_TYPE_PORT_CHANGE) {
+ switch (eqe->sub_type) {
+ case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
+ case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
+ wqent = kzalloc(sizeof(*wqent), GFP_ATOMIC);
+ if (!wqent)
+ return NOTIFY_DONE;
+
+ wqent->mvdev = &ndev->mvdev;
+ INIT_WORK(&wqent->work, update_carrier);
+ queue_work(ndev->mvdev.wq, &wqent->work);
+ ret = NOTIFY_OK;
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+ return ret;
+ }
+ return ret;
+}
+
+static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name,
+ const struct vdpa_dev_set_config *add_config)
{
struct mlx5_vdpa_mgmtdev *mgtdev = container_of(v_mdev, struct mlx5_vdpa_mgmtdev, mgtdev);
struct virtio_net_config *config;
@@ -2413,6 +2532,7 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name)
struct mlx5_vdpa_net *ndev;
struct mlx5_core_dev *mdev;
u32 max_vqs;
+ u16 mtu;
int err;
if (mgtdev->ndev)
@@ -2440,13 +2560,24 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name)
init_mvqs(ndev);
mutex_init(&ndev->reslock);
config = &ndev->config;
- err = query_mtu(mdev, &ndev->mtu);
+ err = query_mtu(mdev, &mtu);
if (err)
goto err_mtu;
- err = mlx5_query_nic_vport_mac_address(mdev, 0, 0, config->mac);
- if (err)
- goto err_mtu;
+ ndev->config.mtu = cpu_to_mlx5vdpa16(mvdev, mtu);
+
+ if (get_link_state(mvdev))
+ ndev->config.status |= cpu_to_mlx5vdpa16(mvdev, VIRTIO_NET_S_LINK_UP);
+ else
+ ndev->config.status &= cpu_to_mlx5vdpa16(mvdev, ~VIRTIO_NET_S_LINK_UP);
+
+ if (add_config->mask & (1 << VDPA_ATTR_DEV_NET_CFG_MACADDR)) {
+ memcpy(ndev->config.mac, add_config->net.mac, ETH_ALEN);
+ } else {
+ err = mlx5_query_nic_vport_mac_address(mdev, 0, 0, config->mac);
+ if (err)
+ goto err_mtu;
+ }
if (!is_zero_ether_addr(config->mac)) {
pfmdev = pci_get_drvdata(pci_physfn(mdev->pdev));
@@ -2473,12 +2604,14 @@ static int mlx5_vdpa_dev_add(struct vdpa_mgmt_dev *v_mdev, const char *name)
if (err)
goto err_mr;
- mvdev->wq = create_singlethread_workqueue("mlx5_vdpa_ctrl_wq");
+ mvdev->wq = create_singlethread_workqueue("mlx5_vdpa_wq");
if (!mvdev->wq) {
err = -ENOMEM;
goto err_res2;
}
+ ndev->nb.notifier_call = event_handler;
+ mlx5_notifier_register(mdev, &ndev->nb);
ndev->cur_num_vqs = 2 * mlx5_vdpa_max_qps(max_vqs);
mvdev->vdev.mdev = &mgtdev->mgtdev;
err = _vdpa_register_device(&mvdev->vdev, ndev->cur_num_vqs + 1);
@@ -2509,7 +2642,9 @@ static void mlx5_vdpa_dev_del(struct vdpa_mgmt_dev *v_mdev, struct vdpa_device *
{
struct mlx5_vdpa_mgmtdev *mgtdev = container_of(v_mdev, struct mlx5_vdpa_mgmtdev, mgtdev);
struct mlx5_vdpa_dev *mvdev = to_mvdev(dev);
+ struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev);
+ mlx5_notifier_unregister(mvdev->mdev, &ndev->nb);
destroy_workqueue(mvdev->wq);
_vdpa_unregister_device(dev);
mgtdev->ndev = NULL;
@@ -2541,6 +2676,7 @@ static int mlx5v_probe(struct auxiliary_device *adev,
mgtdev->mgtdev.ops = &mdev_ops;
mgtdev->mgtdev.device = mdev->device;
mgtdev->mgtdev.id_table = id_table;
+ mgtdev->mgtdev.config_attr_mask = (1 << VDPA_ATTR_DEV_NET_CFG_MACADDR);
mgtdev->madev = madev;
err = vdpa_mgmtdev_register(&mgtdev->mgtdev);
diff --git a/drivers/vdpa/vdpa.c b/drivers/vdpa/vdpa.c
index 1dc121a07a93..7332a74a4b00 100644
--- a/drivers/vdpa/vdpa.c
+++ b/drivers/vdpa/vdpa.c
@@ -14,6 +14,7 @@
#include <uapi/linux/vdpa.h>
#include <net/genetlink.h>
#include <linux/mod_devicetable.h>
+#include <linux/virtio_ids.h>
static LIST_HEAD(mdev_head);
/* A global mutex that protects vdpa management device and device level operations. */
@@ -26,8 +27,16 @@ static int vdpa_dev_probe(struct device *d)
{
struct vdpa_device *vdev = dev_to_vdpa(d);
struct vdpa_driver *drv = drv_to_vdpa(vdev->dev.driver);
+ const struct vdpa_config_ops *ops = vdev->config;
+ u32 max_num, min_num = 1;
int ret = 0;
+ max_num = ops->get_vq_num_max(vdev);
+ if (ops->get_vq_num_min)
+ min_num = ops->get_vq_num_min(vdev);
+ if (max_num < min_num)
+ return -EINVAL;
+
if (drv && drv->probe)
ret = drv->probe(vdev);
@@ -58,6 +67,7 @@ static void vdpa_release_dev(struct device *d)
ops->free(vdev);
ida_simple_remove(&vdpa_index_ida, vdev->index);
+ mutex_destroy(&vdev->cf_mutex);
kfree(vdev);
}
@@ -119,6 +129,7 @@ struct vdpa_device *__vdpa_alloc_device(struct device *parent,
if (err)
goto err_name;
+ mutex_init(&vdev->cf_mutex);
device_initialize(&vdev->dev);
return vdev;
@@ -289,6 +300,46 @@ void vdpa_mgmtdev_unregister(struct vdpa_mgmt_dev *mdev)
}
EXPORT_SYMBOL_GPL(vdpa_mgmtdev_unregister);
+/**
+ * vdpa_get_config - Get one or more device configuration fields.
+ * @vdev: vdpa device to operate on
+ * @offset: starting byte offset of the field
+ * @buf: buffer pointer to read to
+ * @len: length of the configuration fields in bytes
+ */
+void vdpa_get_config(struct vdpa_device *vdev, unsigned int offset,
+ void *buf, unsigned int len)
+{
+ const struct vdpa_config_ops *ops = vdev->config;
+
+ mutex_lock(&vdev->cf_mutex);
+ /*
+ * Config accesses aren't supposed to trigger before features are set.
+ * If it does happen we assume a legacy guest.
+ */
+ if (!vdev->features_valid)
+ vdpa_set_features(vdev, 0);
+ ops->get_config(vdev, offset, buf, len);
+ mutex_unlock(&vdev->cf_mutex);
+}
+EXPORT_SYMBOL_GPL(vdpa_get_config);
+
+/**
+ * vdpa_set_config - Set one or more device configuration fields.
+ * @vdev: vdpa device to operate on
+ * @offset: starting byte offset of the field
+ * @buf: buffer pointer to read from
+ * @length: length of the configuration fields in bytes
+ */
+void vdpa_set_config(struct vdpa_device *vdev, unsigned int offset,
+ const void *buf, unsigned int length)
+{
+ mutex_lock(&vdev->cf_mutex);
+ vdev->config->set_config(vdev, offset, buf, length);
+ mutex_unlock(&vdev->cf_mutex);
+}
+EXPORT_SYMBOL_GPL(vdpa_set_config);
+
static bool mgmtdev_handle_match(const struct vdpa_mgmt_dev *mdev,
const char *busname, const char *devname)
{
@@ -428,9 +479,15 @@ out:
return msg->len;
}
+#define VDPA_DEV_NET_ATTRS_MASK ((1 << VDPA_ATTR_DEV_NET_CFG_MACADDR) | \
+ (1 << VDPA_ATTR_DEV_NET_CFG_MTU))
+
static int vdpa_nl_cmd_dev_add_set_doit(struct sk_buff *skb, struct genl_info *info)
{
+ struct vdpa_dev_set_config config = {};
+ struct nlattr **nl_attrs = info->attrs;
struct vdpa_mgmt_dev *mdev;
+ const u8 *macaddr;
const char *name;
int err = 0;
@@ -439,6 +496,26 @@ static int vdpa_nl_cmd_dev_add_set_doit(struct sk_buff *skb, struct genl_info *i
name = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
+ if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MACADDR]) {
+ macaddr = nla_data(nl_attrs[VDPA_ATTR_DEV_NET_CFG_MACADDR]);
+ memcpy(config.net.mac, macaddr, sizeof(config.net.mac));
+ config.mask |= (1 << VDPA_ATTR_DEV_NET_CFG_MACADDR);
+ }
+ if (nl_attrs[VDPA_ATTR_DEV_NET_CFG_MTU]) {
+ config.net.mtu =
+ nla_get_u16(nl_attrs[VDPA_ATTR_DEV_NET_CFG_MTU]);
+ config.mask |= (1 << VDPA_ATTR_DEV_NET_CFG_MTU);
+ }
+
+ /* Skip checking capability if user didn't prefer to configure any
+ * device networking attributes. It is likely that user might have used
+ * a device specific method to configure such attributes or using device
+ * default attributes.
+ */
+ if ((config.mask & VDPA_DEV_NET_ATTRS_MASK) &&
+ !netlink_capable(skb, CAP_NET_ADMIN))
+ return -EPERM;
+
mutex_lock(&vdpa_dev_mutex);
mdev = vdpa_mgmtdev_get_from_attr(info->attrs);
if (IS_ERR(mdev)) {
@@ -446,8 +523,14 @@ static int vdpa_nl_cmd_dev_add_set_doit(struct sk_buff *skb, struct genl_info *i
err = PTR_ERR(mdev);
goto err;
}
+ if ((config.mask & mdev->config_attr_mask) != config.mask) {
+ NL_SET_ERR_MSG_MOD(info->extack,
+ "All provided attributes are not supported");
+ err = -EOPNOTSUPP;
+ goto err;
+ }
- err = mdev->ops->dev_add(mdev, name);
+ err = mdev->ops->dev_add(mdev, name, &config);
err:
mutex_unlock(&vdpa_dev_mutex);
return err;
@@ -492,6 +575,7 @@ vdpa_dev_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid, u32 seq
int flags, struct netlink_ext_ack *extack)
{
u16 max_vq_size;
+ u16 min_vq_size = 1;
u32 device_id;
u32 vendor_id;
void *hdr;
@@ -508,6 +592,8 @@ vdpa_dev_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid, u32 seq
device_id = vdev->config->get_device_id(vdev);
vendor_id = vdev->config->get_vendor_id(vdev);
max_vq_size = vdev->config->get_vq_num_max(vdev);
+ if (vdev->config->get_vq_num_min)
+ min_vq_size = vdev->config->get_vq_num_min(vdev);
err = -EMSGSIZE;
if (nla_put_string(msg, VDPA_ATTR_DEV_NAME, dev_name(&vdev->dev)))
@@ -520,6 +606,8 @@ vdpa_dev_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid, u32 seq
goto msg_err;
if (nla_put_u16(msg, VDPA_ATTR_DEV_MAX_VQ_SIZE, max_vq_size))
goto msg_err;
+ if (nla_put_u16(msg, VDPA_ATTR_DEV_MIN_VQ_SIZE, min_vq_size))
+ goto msg_err;
genlmsg_end(msg, hdr);
return 0;
@@ -612,10 +700,175 @@ static int vdpa_nl_cmd_dev_get_dumpit(struct sk_buff *msg, struct netlink_callba
return msg->len;
}
+static int vdpa_dev_net_mq_config_fill(struct vdpa_device *vdev,
+ struct sk_buff *msg, u64 features,
+ const struct virtio_net_config *config)
+{
+ u16 val_u16;
+
+ if ((features & (1ULL << VIRTIO_NET_F_MQ)) == 0)
+ return 0;
+
+ val_u16 = le16_to_cpu(config->max_virtqueue_pairs);
+ return nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MAX_VQP, val_u16);
+}
+
+static int vdpa_dev_net_config_fill(struct vdpa_device *vdev, struct sk_buff *msg)
+{
+ struct virtio_net_config config = {};
+ u64 features;
+ u16 val_u16;
+
+ vdpa_get_config(vdev, 0, &config, sizeof(config));
+
+ if (nla_put(msg, VDPA_ATTR_DEV_NET_CFG_MACADDR, sizeof(config.mac),
+ config.mac))
+ return -EMSGSIZE;
+
+ val_u16 = le16_to_cpu(config.status);
+ if (nla_put_u16(msg, VDPA_ATTR_DEV_NET_STATUS, val_u16))
+ return -EMSGSIZE;
+
+ val_u16 = le16_to_cpu(config.mtu);
+ if (nla_put_u16(msg, VDPA_ATTR_DEV_NET_CFG_MTU, val_u16))
+ return -EMSGSIZE;
+
+ features = vdev->config->get_features(vdev);
+
+ return vdpa_dev_net_mq_config_fill(vdev, msg, features, &config);
+}
+
+static int
+vdpa_dev_config_fill(struct vdpa_device *vdev, struct sk_buff *msg, u32 portid, u32 seq,
+ int flags, struct netlink_ext_ack *extack)
+{
+ u32 device_id;
+ void *hdr;
+ int err;
+
+ hdr = genlmsg_put(msg, portid, seq, &vdpa_nl_family, flags,
+ VDPA_CMD_DEV_CONFIG_GET);
+ if (!hdr)
+ return -EMSGSIZE;
+
+ if (nla_put_string(msg, VDPA_ATTR_DEV_NAME, dev_name(&vdev->dev))) {
+ err = -EMSGSIZE;
+ goto msg_err;
+ }
+
+ device_id = vdev->config->get_device_id(vdev);
+ if (nla_put_u32(msg, VDPA_ATTR_DEV_ID, device_id)) {
+ err = -EMSGSIZE;
+ goto msg_err;
+ }
+
+ switch (device_id) {
+ case VIRTIO_ID_NET:
+ err = vdpa_dev_net_config_fill(vdev, msg);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+ if (err)
+ goto msg_err;
+
+ genlmsg_end(msg, hdr);
+ return 0;
+
+msg_err:
+ genlmsg_cancel(msg, hdr);
+ return err;
+}
+
+static int vdpa_nl_cmd_dev_config_get_doit(struct sk_buff *skb, struct genl_info *info)
+{
+ struct vdpa_device *vdev;
+ struct sk_buff *msg;
+ const char *devname;
+ struct device *dev;
+ int err;
+
+ if (!info->attrs[VDPA_ATTR_DEV_NAME])
+ return -EINVAL;
+ devname = nla_data(info->attrs[VDPA_ATTR_DEV_NAME]);
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ mutex_lock(&vdpa_dev_mutex);
+ dev = bus_find_device(&vdpa_bus, NULL, devname, vdpa_name_match);
+ if (!dev) {
+ NL_SET_ERR_MSG_MOD(info->extack, "device not found");
+ err = -ENODEV;
+ goto dev_err;
+ }
+ vdev = container_of(dev, struct vdpa_device, dev);
+ if (!vdev->mdev) {
+ NL_SET_ERR_MSG_MOD(info->extack, "unmanaged vdpa device");
+ err = -EINVAL;
+ goto mdev_err;
+ }
+ err = vdpa_dev_config_fill(vdev, msg, info->snd_portid, info->snd_seq,
+ 0, info->extack);
+ if (!err)
+ err = genlmsg_reply(msg, info);
+
+mdev_err:
+ put_device(dev);
+dev_err:
+ mutex_unlock(&vdpa_dev_mutex);
+ if (err)
+ nlmsg_free(msg);
+ return err;
+}
+
+static int vdpa_dev_config_dump(struct device *dev, void *data)
+{
+ struct vdpa_device *vdev = container_of(dev, struct vdpa_device, dev);
+ struct vdpa_dev_dump_info *info = data;
+ int err;
+
+ if (!vdev->mdev)
+ return 0;
+ if (info->idx < info->start_idx) {
+ info->idx++;
+ return 0;
+ }
+ err = vdpa_dev_config_fill(vdev, info->msg, NETLINK_CB(info->cb->skb).portid,
+ info->cb->nlh->nlmsg_seq, NLM_F_MULTI,
+ info->cb->extack);
+ if (err)
+ return err;
+
+ info->idx++;
+ return 0;
+}
+
+static int
+vdpa_nl_cmd_dev_config_get_dumpit(struct sk_buff *msg, struct netlink_callback *cb)
+{
+ struct vdpa_dev_dump_info info;
+
+ info.msg = msg;
+ info.cb = cb;
+ info.start_idx = cb->args[0];
+ info.idx = 0;
+
+ mutex_lock(&vdpa_dev_mutex);
+ bus_for_each_dev(&vdpa_bus, NULL, &info, vdpa_dev_config_dump);
+ mutex_unlock(&vdpa_dev_mutex);
+ cb->args[0] = info.idx;
+ return msg->len;
+}
+
static const struct nla_policy vdpa_nl_policy[VDPA_ATTR_MAX + 1] = {
[VDPA_ATTR_MGMTDEV_BUS_NAME] = { .type = NLA_NUL_STRING },
[VDPA_ATTR_MGMTDEV_DEV_NAME] = { .type = NLA_STRING },
[VDPA_ATTR_DEV_NAME] = { .type = NLA_STRING },
+ [VDPA_ATTR_DEV_NET_CFG_MACADDR] = NLA_POLICY_ETH_ADDR,
+ /* virtio spec 1.1 section 5.1.4.1 for valid MTU range */
+ [VDPA_ATTR_DEV_NET_CFG_MTU] = NLA_POLICY_MIN(NLA_U16, 68),
};
static const struct genl_ops vdpa_nl_ops[] = {
@@ -643,6 +896,12 @@ static const struct genl_ops vdpa_nl_ops[] = {
.doit = vdpa_nl_cmd_dev_get_doit,
.dumpit = vdpa_nl_cmd_dev_get_dumpit,
},
+ {
+ .cmd = VDPA_CMD_DEV_CONFIG_GET,
+ .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
+ .doit = vdpa_nl_cmd_dev_config_get_doit,
+ .dumpit = vdpa_nl_cmd_dev_config_get_dumpit,
+ },
};
static struct genl_family vdpa_nl_family __ro_after_init = {
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c b/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
index a790903f243e..42d401d43911 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
@@ -248,7 +248,8 @@ static struct device vdpasim_blk_mgmtdev = {
.release = vdpasim_blk_mgmtdev_release,
};
-static int vdpasim_blk_dev_add(struct vdpa_mgmt_dev *mdev, const char *name)
+static int vdpasim_blk_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
+ const struct vdpa_dev_set_config *config)
{
struct vdpasim_dev_attr dev_attr = {};
struct vdpasim *simdev;
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim_net.c b/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
index a1ab6163f7d1..76dd24abc791 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
@@ -16,6 +16,7 @@
#include <linux/vringh.h>
#include <linux/vdpa.h>
#include <uapi/linux/virtio_net.h>
+#include <uapi/linux/vdpa.h>
#include "vdpa_sim.h"
@@ -29,12 +30,6 @@
#define VDPASIM_NET_VQ_NUM 2
-static char *macaddr;
-module_param(macaddr, charp, 0);
-MODULE_PARM_DESC(macaddr, "Ethernet MAC address");
-
-static u8 macaddr_buf[ETH_ALEN];
-
static void vdpasim_net_work(struct work_struct *work)
{
struct vdpasim *vdpasim = container_of(work, struct vdpasim, work);
@@ -112,9 +107,21 @@ static void vdpasim_net_get_config(struct vdpasim *vdpasim, void *config)
{
struct virtio_net_config *net_config = config;
- net_config->mtu = cpu_to_vdpasim16(vdpasim, 1500);
net_config->status = cpu_to_vdpasim16(vdpasim, VIRTIO_NET_S_LINK_UP);
- memcpy(net_config->mac, macaddr_buf, ETH_ALEN);
+}
+
+static void vdpasim_net_setup_config(struct vdpasim *vdpasim,
+ const struct vdpa_dev_set_config *config)
+{
+ struct virtio_net_config *vio_config = vdpasim->config;
+
+ if (config->mask & (1 << VDPA_ATTR_DEV_NET_CFG_MACADDR))
+ memcpy(vio_config->mac, config->net.mac, ETH_ALEN);
+ if (config->mask & (1 << VDPA_ATTR_DEV_NET_CFG_MTU))
+ vio_config->mtu = cpu_to_vdpasim16(vdpasim, config->net.mtu);
+ else
+ /* Setup default MTU to be 1500 */
+ vio_config->mtu = cpu_to_vdpasim16(vdpasim, 1500);
}
static void vdpasim_net_mgmtdev_release(struct device *dev)
@@ -126,7 +133,8 @@ static struct device vdpasim_net_mgmtdev = {
.release = vdpasim_net_mgmtdev_release,
};
-static int vdpasim_net_dev_add(struct vdpa_mgmt_dev *mdev, const char *name)
+static int vdpasim_net_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
+ const struct vdpa_dev_set_config *config)
{
struct vdpasim_dev_attr dev_attr = {};
struct vdpasim *simdev;
@@ -146,6 +154,8 @@ static int vdpasim_net_dev_add(struct vdpa_mgmt_dev *mdev, const char *name)
if (IS_ERR(simdev))
return PTR_ERR(simdev);
+ vdpasim_net_setup_config(simdev, config);
+
ret = _vdpa_register_device(&simdev->vdpa, VDPASIM_NET_VQ_NUM);
if (ret)
goto reg_err;
@@ -179,20 +189,14 @@ static struct vdpa_mgmt_dev mgmt_dev = {
.device = &vdpasim_net_mgmtdev,
.id_table = id_table,
.ops = &vdpasim_net_mgmtdev_ops,
+ .config_attr_mask = (1 << VDPA_ATTR_DEV_NET_CFG_MACADDR |
+ 1 << VDPA_ATTR_DEV_NET_CFG_MTU),
};
static int __init vdpasim_net_init(void)
{
int ret;
- if (macaddr) {
- mac_pton(macaddr, macaddr_buf);
- if (!is_valid_ether_addr(macaddr_buf))
- return -EADDRNOTAVAIL;
- } else {
- eth_random_addr(macaddr_buf);
- }
-
ret = device_register(&vdpasim_net_mgmtdev);
if (ret)
return ret;
diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c
index 26e3d90d1e7c..c9204c62f339 100644
--- a/drivers/vdpa/vdpa_user/vduse_dev.c
+++ b/drivers/vdpa/vdpa_user/vduse_dev.c
@@ -80,6 +80,7 @@ struct vduse_dev {
struct vdpa_callback config_cb;
struct work_struct inject;
spinlock_t irq_lock;
+ struct rw_semaphore rwsem;
int minor;
bool broken;
bool connected;
@@ -410,6 +411,8 @@ static void vduse_dev_reset(struct vduse_dev *dev)
if (domain->bounce_map)
vduse_domain_reset_bounce_map(domain);
+ down_write(&dev->rwsem);
+
dev->status = 0;
dev->driver_features = 0;
dev->generation++;
@@ -443,6 +446,8 @@ static void vduse_dev_reset(struct vduse_dev *dev)
flush_work(&vq->inject);
flush_work(&vq->kick);
}
+
+ up_write(&dev->rwsem);
}
static int vduse_vdpa_set_vq_address(struct vdpa_device *vdpa, u16 idx,
@@ -885,6 +890,23 @@ static void vduse_vq_irq_inject(struct work_struct *work)
spin_unlock_irq(&vq->irq_lock);
}
+static int vduse_dev_queue_irq_work(struct vduse_dev *dev,
+ struct work_struct *irq_work)
+{
+ int ret = -EINVAL;
+
+ down_read(&dev->rwsem);
+ if (!(dev->status & VIRTIO_CONFIG_S_DRIVER_OK))
+ goto unlock;
+
+ ret = 0;
+ queue_work(vduse_irq_wq, irq_work);
+unlock:
+ up_read(&dev->rwsem);
+
+ return ret;
+}
+
static long vduse_dev_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
@@ -966,8 +988,7 @@ static long vduse_dev_ioctl(struct file *file, unsigned int cmd,
break;
}
case VDUSE_DEV_INJECT_CONFIG_IRQ:
- ret = 0;
- queue_work(vduse_irq_wq, &dev->inject);
+ ret = vduse_dev_queue_irq_work(dev, &dev->inject);
break;
case VDUSE_VQ_SETUP: {
struct vduse_vq_config config;
@@ -1053,9 +1074,8 @@ static long vduse_dev_ioctl(struct file *file, unsigned int cmd,
if (index >= dev->vq_num)
break;
- ret = 0;
index = array_index_nospec(index, dev->vq_num);
- queue_work(vduse_irq_wq, &dev->vqs[index].inject);
+ ret = vduse_dev_queue_irq_work(dev, &dev->vqs[index].inject);
break;
}
default:
@@ -1136,6 +1156,7 @@ static struct vduse_dev *vduse_dev_create(void)
INIT_LIST_HEAD(&dev->send_list);
INIT_LIST_HEAD(&dev->recv_list);
spin_lock_init(&dev->irq_lock);
+ init_rwsem(&dev->rwsem);
INIT_WORK(&dev->inject, vduse_dev_irq_inject);
init_waitqueue_head(&dev->waitq);
@@ -1482,7 +1503,8 @@ static int vduse_dev_init_vdpa(struct vduse_dev *dev, const char *name)
return 0;
}
-static int vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name)
+static int vdpa_dev_add(struct vdpa_mgmt_dev *mdev, const char *name,
+ const struct vdpa_dev_set_config *config)
{
struct vduse_dev *dev;
int ret;
diff --git a/drivers/vdpa/virtio_pci/vp_vdpa.c b/drivers/vdpa/virtio_pci/vp_vdpa.c
index 5bcd00246d2e..e3ff7875e123 100644
--- a/drivers/vdpa/virtio_pci/vp_vdpa.c
+++ b/drivers/vdpa/virtio_pci/vp_vdpa.c
@@ -76,6 +76,17 @@ static u8 vp_vdpa_get_status(struct vdpa_device *vdpa)
return vp_modern_get_status(mdev);
}
+static int vp_vdpa_get_vq_irq(struct vdpa_device *vdpa, u16 idx)
+{
+ struct vp_vdpa *vp_vdpa = vdpa_to_vp(vdpa);
+ int irq = vp_vdpa->vring[idx].irq;
+
+ if (irq == VIRTIO_MSI_NO_VECTOR)
+ return -EINVAL;
+
+ return irq;
+}
+
static void vp_vdpa_free_irq(struct vp_vdpa *vp_vdpa)
{
struct virtio_pci_modern_device *mdev = &vp_vdpa->mdev;
@@ -427,6 +438,7 @@ static const struct vdpa_config_ops vp_vdpa_ops = {
.get_config = vp_vdpa_get_config,
.set_config = vp_vdpa_set_config,
.set_config_cb = vp_vdpa_set_config_cb,
+ .get_vq_irq = vp_vdpa_get_vq_irq,
};
static void vp_vdpa_free_irq_vectors(void *data)
diff --git a/drivers/vfio/fsl-mc/vfio_fsl_mc.c b/drivers/vfio/fsl-mc/vfio_fsl_mc.c
index 0ead91bfa838..6e2e62c6f47a 100644
--- a/drivers/vfio/fsl-mc/vfio_fsl_mc.c
+++ b/drivers/vfio/fsl-mc/vfio_fsl_mc.c
@@ -65,6 +65,34 @@ static void vfio_fsl_mc_regions_cleanup(struct vfio_fsl_mc_device *vdev)
kfree(vdev->regions);
}
+static int vfio_fsl_mc_reset_device(struct vfio_fsl_mc_device *vdev)
+{
+ struct fsl_mc_device *mc_dev = vdev->mc_dev;
+ int ret = 0;
+
+ if (is_fsl_mc_bus_dprc(vdev->mc_dev)) {
+ return dprc_reset_container(mc_dev->mc_io, 0,
+ mc_dev->mc_handle,
+ mc_dev->obj_desc.id,
+ DPRC_RESET_OPTION_NON_RECURSIVE);
+ } else {
+ u16 token;
+
+ ret = fsl_mc_obj_open(mc_dev->mc_io, 0, mc_dev->obj_desc.id,
+ mc_dev->obj_desc.type,
+ &token);
+ if (ret)
+ goto out;
+ ret = fsl_mc_obj_reset(mc_dev->mc_io, 0, token);
+ if (ret) {
+ fsl_mc_obj_close(mc_dev->mc_io, 0, token);
+ goto out;
+ }
+ ret = fsl_mc_obj_close(mc_dev->mc_io, 0, token);
+ }
+out:
+ return ret;
+}
static void vfio_fsl_mc_close_device(struct vfio_device *core_vdev)
{
@@ -78,9 +106,7 @@ static void vfio_fsl_mc_close_device(struct vfio_device *core_vdev)
vfio_fsl_mc_regions_cleanup(vdev);
/* reset the device before cleaning up the interrupts */
- ret = dprc_reset_container(mc_cont->mc_io, 0, mc_cont->mc_handle,
- mc_cont->obj_desc.id,
- DPRC_RESET_OPTION_NON_RECURSIVE);
+ ret = vfio_fsl_mc_reset_device(vdev);
if (WARN_ON(ret))
dev_warn(&mc_cont->dev,
@@ -203,18 +229,7 @@ static long vfio_fsl_mc_ioctl(struct vfio_device *core_vdev,
}
case VFIO_DEVICE_RESET:
{
- int ret;
- struct fsl_mc_device *mc_dev = vdev->mc_dev;
-
- /* reset is supported only for the DPRC */
- if (!is_fsl_mc_bus_dprc(mc_dev))
- return -ENOTTY;
-
- ret = dprc_reset_container(mc_dev->mc_io, 0,
- mc_dev->mc_handle,
- mc_dev->obj_desc.id,
- DPRC_RESET_OPTION_NON_RECURSIVE);
- return ret;
+ return vfio_fsl_mc_reset_device(vdev);
}
default:
@@ -505,22 +520,13 @@ static void vfio_fsl_uninit_device(struct vfio_fsl_mc_device *vdev)
static int vfio_fsl_mc_probe(struct fsl_mc_device *mc_dev)
{
- struct iommu_group *group;
struct vfio_fsl_mc_device *vdev;
struct device *dev = &mc_dev->dev;
int ret;
- group = vfio_iommu_group_get(dev);
- if (!group) {
- dev_err(dev, "VFIO_FSL_MC: No IOMMU group\n");
- return -EINVAL;
- }
-
vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
- if (!vdev) {
- ret = -ENOMEM;
- goto out_group_put;
- }
+ if (!vdev)
+ return -ENOMEM;
vfio_init_group_dev(&vdev->vdev, dev, &vfio_fsl_mc_ops);
vdev->mc_dev = mc_dev;
@@ -556,8 +562,6 @@ out_device:
out_uninit:
vfio_uninit_group_dev(&vdev->vdev);
kfree(vdev);
-out_group_put:
- vfio_iommu_group_put(group, dev);
return ret;
}
@@ -574,8 +578,6 @@ static int vfio_fsl_mc_remove(struct fsl_mc_device *mc_dev)
vfio_uninit_group_dev(&vdev->vdev);
kfree(vdev);
- vfio_iommu_group_put(mc_dev->dev.iommu_group, dev);
-
return 0;
}
diff --git a/drivers/vfio/mdev/mdev_driver.c b/drivers/vfio/mdev/mdev_driver.c
index e2cb1ff56f6c..7927ed4f1711 100644
--- a/drivers/vfio/mdev/mdev_driver.c
+++ b/drivers/vfio/mdev/mdev_driver.c
@@ -13,60 +13,23 @@
#include "mdev_private.h"
-static int mdev_attach_iommu(struct mdev_device *mdev)
-{
- int ret;
- struct iommu_group *group;
-
- group = iommu_group_alloc();
- if (IS_ERR(group))
- return PTR_ERR(group);
-
- ret = iommu_group_add_device(group, &mdev->dev);
- if (!ret)
- dev_info(&mdev->dev, "MDEV: group_id = %d\n",
- iommu_group_id(group));
-
- iommu_group_put(group);
- return ret;
-}
-
-static void mdev_detach_iommu(struct mdev_device *mdev)
-{
- iommu_group_remove_device(&mdev->dev);
- dev_info(&mdev->dev, "MDEV: detaching iommu\n");
-}
-
static int mdev_probe(struct device *dev)
{
struct mdev_driver *drv =
container_of(dev->driver, struct mdev_driver, driver);
- struct mdev_device *mdev = to_mdev_device(dev);
- int ret;
- ret = mdev_attach_iommu(mdev);
- if (ret)
- return ret;
-
- if (drv->probe) {
- ret = drv->probe(mdev);
- if (ret)
- mdev_detach_iommu(mdev);
- }
-
- return ret;
+ if (!drv->probe)
+ return 0;
+ return drv->probe(to_mdev_device(dev));
}
static void mdev_remove(struct device *dev)
{
struct mdev_driver *drv =
container_of(dev->driver, struct mdev_driver, driver);
- struct mdev_device *mdev = to_mdev_device(dev);
if (drv->remove)
- drv->remove(mdev);
-
- mdev_detach_iommu(mdev);
+ drv->remove(to_mdev_device(dev));
}
static int mdev_match(struct device *dev, struct device_driver *drv)
diff --git a/drivers/vfio/mdev/vfio_mdev.c b/drivers/vfio/mdev/vfio_mdev.c
index 7a9883048216..a90e24b0c851 100644
--- a/drivers/vfio/mdev/vfio_mdev.c
+++ b/drivers/vfio/mdev/vfio_mdev.c
@@ -119,7 +119,7 @@ static int vfio_mdev_probe(struct mdev_device *mdev)
return -ENOMEM;
vfio_init_group_dev(vdev, &mdev->dev, &vfio_mdev_dev_ops);
- ret = vfio_register_group_dev(vdev);
+ ret = vfio_register_emulated_iommu_dev(vdev);
if (ret)
goto out_uninit;
diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c
index a03b5a99c2da..f948e6cd2993 100644
--- a/drivers/vfio/pci/vfio_pci_core.c
+++ b/drivers/vfio/pci/vfio_pci_core.c
@@ -1806,7 +1806,6 @@ EXPORT_SYMBOL_GPL(vfio_pci_core_uninit_device);
int vfio_pci_core_register_device(struct vfio_pci_core_device *vdev)
{
struct pci_dev *pdev = vdev->pdev;
- struct iommu_group *group;
int ret;
if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL)
@@ -1825,10 +1824,6 @@ int vfio_pci_core_register_device(struct vfio_pci_core_device *vdev)
return -EBUSY;
}
- group = vfio_iommu_group_get(&pdev->dev);
- if (!group)
- return -EINVAL;
-
if (pci_is_root_bus(pdev->bus)) {
ret = vfio_assign_device_set(&vdev->vdev, vdev);
} else if (!pci_probe_reset_slot(pdev->slot)) {
@@ -1842,10 +1837,10 @@ int vfio_pci_core_register_device(struct vfio_pci_core_device *vdev)
}
if (ret)
- goto out_group_put;
+ return ret;
ret = vfio_pci_vf_init(vdev);
if (ret)
- goto out_group_put;
+ return ret;
ret = vfio_pci_vga_init(vdev);
if (ret)
goto out_vf;
@@ -1876,8 +1871,6 @@ out_power:
vfio_pci_set_power_state(vdev, PCI_D0);
out_vf:
vfio_pci_vf_uninit(vdev);
-out_group_put:
- vfio_iommu_group_put(group, &pdev->dev);
return ret;
}
EXPORT_SYMBOL_GPL(vfio_pci_core_register_device);
@@ -1893,8 +1886,6 @@ void vfio_pci_core_unregister_device(struct vfio_pci_core_device *vdev)
vfio_pci_vf_uninit(vdev);
vfio_pci_vga_uninit(vdev);
- vfio_iommu_group_put(pdev->dev.iommu_group, &pdev->dev);
-
if (!disable_idle_d3)
vfio_pci_set_power_state(vdev, PCI_D0);
}
diff --git a/drivers/vfio/pci/vfio_pci_igd.c b/drivers/vfio/pci/vfio_pci_igd.c
index 7ca4109bba48..56cd551e0e04 100644
--- a/drivers/vfio/pci/vfio_pci_igd.c
+++ b/drivers/vfio/pci/vfio_pci_igd.c
@@ -25,20 +25,121 @@
#define OPREGION_RVDS 0x3c2
#define OPREGION_VERSION 0x16
+struct igd_opregion_vbt {
+ void *opregion;
+ void *vbt_ex;
+};
+
+/**
+ * igd_opregion_shift_copy() - Copy OpRegion to user buffer and shift position.
+ * @dst: User buffer ptr to copy to.
+ * @off: Offset to user buffer ptr. Increased by bytes on return.
+ * @src: Source buffer to copy from.
+ * @pos: Increased by bytes on return.
+ * @remaining: Decreased by bytes on return.
+ * @bytes: Bytes to copy and adjust off, pos and remaining.
+ *
+ * Copy OpRegion to offset from specific source ptr and shift the offset.
+ *
+ * Return: 0 on success, -EFAULT otherwise.
+ *
+ */
+static inline unsigned long igd_opregion_shift_copy(char __user *dst,
+ loff_t *off,
+ void *src,
+ loff_t *pos,
+ size_t *remaining,
+ size_t bytes)
+{
+ if (copy_to_user(dst + (*off), src, bytes))
+ return -EFAULT;
+
+ *off += bytes;
+ *pos += bytes;
+ *remaining -= bytes;
+
+ return 0;
+}
+
static ssize_t vfio_pci_igd_rw(struct vfio_pci_core_device *vdev,
char __user *buf, size_t count, loff_t *ppos,
bool iswrite)
{
unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) - VFIO_PCI_NUM_REGIONS;
- void *base = vdev->region[i].data;
- loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
+ struct igd_opregion_vbt *opregionvbt = vdev->region[i].data;
+ loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK, off = 0;
+ size_t remaining;
if (pos >= vdev->region[i].size || iswrite)
return -EINVAL;
- count = min(count, (size_t)(vdev->region[i].size - pos));
+ count = min_t(size_t, count, vdev->region[i].size - pos);
+ remaining = count;
+
+ /* Copy until OpRegion version */
+ if (remaining && pos < OPREGION_VERSION) {
+ size_t bytes = min_t(size_t, remaining, OPREGION_VERSION - pos);
+
+ if (igd_opregion_shift_copy(buf, &off,
+ opregionvbt->opregion + pos, &pos,
+ &remaining, bytes))
+ return -EFAULT;
+ }
+
+ /* Copy patched (if necessary) OpRegion version */
+ if (remaining && pos < OPREGION_VERSION + sizeof(__le16)) {
+ size_t bytes = min_t(size_t, remaining,
+ OPREGION_VERSION + sizeof(__le16) - pos);
+ __le16 version = *(__le16 *)(opregionvbt->opregion +
+ OPREGION_VERSION);
+
+ /* Patch to 2.1 if OpRegion 2.0 has extended VBT */
+ if (le16_to_cpu(version) == 0x0200 && opregionvbt->vbt_ex)
+ version = cpu_to_le16(0x0201);
+
+ if (igd_opregion_shift_copy(buf, &off,
+ &version + (pos - OPREGION_VERSION),
+ &pos, &remaining, bytes))
+ return -EFAULT;
+ }
+
+ /* Copy until RVDA */
+ if (remaining && pos < OPREGION_RVDA) {
+ size_t bytes = min_t(size_t, remaining, OPREGION_RVDA - pos);
- if (copy_to_user(buf, base + pos, count))
+ if (igd_opregion_shift_copy(buf, &off,
+ opregionvbt->opregion + pos, &pos,
+ &remaining, bytes))
+ return -EFAULT;
+ }
+
+ /* Copy modified (if necessary) RVDA */
+ if (remaining && pos < OPREGION_RVDA + sizeof(__le64)) {
+ size_t bytes = min_t(size_t, remaining,
+ OPREGION_RVDA + sizeof(__le64) - pos);
+ __le64 rvda = cpu_to_le64(opregionvbt->vbt_ex ?
+ OPREGION_SIZE : 0);
+
+ if (igd_opregion_shift_copy(buf, &off,
+ &rvda + (pos - OPREGION_RVDA),
+ &pos, &remaining, bytes))
+ return -EFAULT;
+ }
+
+ /* Copy the rest of OpRegion */
+ if (remaining && pos < OPREGION_SIZE) {
+ size_t bytes = min_t(size_t, remaining, OPREGION_SIZE - pos);
+
+ if (igd_opregion_shift_copy(buf, &off,
+ opregionvbt->opregion + pos, &pos,
+ &remaining, bytes))
+ return -EFAULT;
+ }
+
+ /* Copy extended VBT if exists */
+ if (remaining &&
+ copy_to_user(buf + off, opregionvbt->vbt_ex + (pos - OPREGION_SIZE),
+ remaining))
return -EFAULT;
*ppos += count;
@@ -49,7 +150,13 @@ static ssize_t vfio_pci_igd_rw(struct vfio_pci_core_device *vdev,
static void vfio_pci_igd_release(struct vfio_pci_core_device *vdev,
struct vfio_pci_region *region)
{
- memunmap(region->data);
+ struct igd_opregion_vbt *opregionvbt = region->data;
+
+ if (opregionvbt->vbt_ex)
+ memunmap(opregionvbt->vbt_ex);
+
+ memunmap(opregionvbt->opregion);
+ kfree(opregionvbt);
}
static const struct vfio_pci_regops vfio_pci_igd_regops = {
@@ -61,7 +168,7 @@ static int vfio_pci_igd_opregion_init(struct vfio_pci_core_device *vdev)
{
__le32 *dwordp = (__le32 *)(vdev->vconfig + OPREGION_PCI_ADDR);
u32 addr, size;
- void *base;
+ struct igd_opregion_vbt *opregionvbt;
int ret;
u16 version;
@@ -72,84 +179,93 @@ static int vfio_pci_igd_opregion_init(struct vfio_pci_core_device *vdev)
if (!addr || !(~addr))
return -ENODEV;
- base = memremap(addr, OPREGION_SIZE, MEMREMAP_WB);
- if (!base)
+ opregionvbt = kzalloc(sizeof(*opregionvbt), GFP_KERNEL);
+ if (!opregionvbt)
+ return -ENOMEM;
+
+ opregionvbt->opregion = memremap(addr, OPREGION_SIZE, MEMREMAP_WB);
+ if (!opregionvbt->opregion) {
+ kfree(opregionvbt);
return -ENOMEM;
+ }
- if (memcmp(base, OPREGION_SIGNATURE, 16)) {
- memunmap(base);
+ if (memcmp(opregionvbt->opregion, OPREGION_SIGNATURE, 16)) {
+ memunmap(opregionvbt->opregion);
+ kfree(opregionvbt);
return -EINVAL;
}
- size = le32_to_cpu(*(__le32 *)(base + 16));
+ size = le32_to_cpu(*(__le32 *)(opregionvbt->opregion + 16));
if (!size) {
- memunmap(base);
+ memunmap(opregionvbt->opregion);
+ kfree(opregionvbt);
return -EINVAL;
}
size *= 1024; /* In KB */
/*
- * Support opregion v2.1+
- * When VBT data exceeds 6KB size and cannot be within mailbox #4, then
- * the Extended VBT region next to opregion is used to hold the VBT data.
- * RVDA (Relative Address of VBT Data from Opregion Base) and RVDS
- * (Raw VBT Data Size) from opregion structure member are used to hold the
- * address from region base and size of VBT data. RVDA/RVDS are not
- * defined before opregion 2.0.
- *
- * opregion 2.1+: RVDA is unsigned, relative offset from
- * opregion base, and should point to the end of opregion.
- * otherwise, exposing to userspace to allow read access to everything between
- * the OpRegion and VBT is not safe.
- * RVDS is defined as size in bytes.
+ * OpRegion and VBT:
+ * When VBT data doesn't exceed 6KB, it's stored in Mailbox #4.
+ * When VBT data exceeds 6KB size, Mailbox #4 is no longer large enough
+ * to hold the VBT data, the Extended VBT region is introduced since
+ * OpRegion 2.0 to hold the VBT data. Since OpRegion 2.0, RVDA/RVDS are
+ * introduced to define the extended VBT data location and size.
+ * OpRegion 2.0: RVDA defines the absolute physical address of the
+ * extended VBT data, RVDS defines the VBT data size.
+ * OpRegion 2.1 and above: RVDA defines the relative address of the
+ * extended VBT data to OpRegion base, RVDS defines the VBT data size.
*
- * opregion 2.0: rvda is the physical VBT address.
- * Since rvda is HPA it cannot be directly used in guest.
- * And it should not be practically available for end user,so it is not supported.
+ * Due to the RVDA definition diff in OpRegion VBT (also the only diff
+ * between 2.0 and 2.1), exposing OpRegion and VBT as a contiguous range
+ * for OpRegion 2.0 and above makes it possible to support the
+ * non-contiguous VBT through a single vfio region. From r/w ops view,
+ * only contiguous VBT after OpRegion with version 2.1+ is exposed,
+ * regardless the host OpRegion is 2.0 or non-contiguous 2.1+. The r/w
+ * ops will on-the-fly shift the actural offset into VBT so that data at
+ * correct position can be returned to the requester.
*/
- version = le16_to_cpu(*(__le16 *)(base + OPREGION_VERSION));
+ version = le16_to_cpu(*(__le16 *)(opregionvbt->opregion +
+ OPREGION_VERSION));
if (version >= 0x0200) {
- u64 rvda;
- u32 rvds;
+ u64 rvda = le64_to_cpu(*(__le64 *)(opregionvbt->opregion +
+ OPREGION_RVDA));
+ u32 rvds = le32_to_cpu(*(__le32 *)(opregionvbt->opregion +
+ OPREGION_RVDS));
- rvda = le64_to_cpu(*(__le64 *)(base + OPREGION_RVDA));
- rvds = le32_to_cpu(*(__le32 *)(base + OPREGION_RVDS));
+ /* The extended VBT is valid only when RVDA/RVDS are non-zero */
if (rvda && rvds) {
- /* no support for opregion v2.0 with physical VBT address */
- if (version == 0x0200) {
- memunmap(base);
- pci_err(vdev->pdev,
- "IGD assignment does not support opregion v2.0 with an extended VBT region\n");
- return -EINVAL;
- }
+ size += rvds;
- if (rvda != size) {
- memunmap(base);
- pci_err(vdev->pdev,
- "Extended VBT does not follow opregion on version 0x%04x\n",
- version);
- return -EINVAL;
+ /*
+ * Extended VBT location by RVDA:
+ * Absolute physical addr for 2.0.
+ * Relative addr to OpRegion header for 2.1+.
+ */
+ if (version == 0x0200)
+ addr = rvda;
+ else
+ addr += rvda;
+
+ opregionvbt->vbt_ex = memremap(addr, rvds, MEMREMAP_WB);
+ if (!opregionvbt->vbt_ex) {
+ memunmap(opregionvbt->opregion);
+ kfree(opregionvbt);
+ return -ENOMEM;
}
-
- /* region size for opregion v2.0+: opregion and VBT size. */
- size += rvds;
}
}
- if (size != OPREGION_SIZE) {
- memunmap(base);
- base = memremap(addr, size, MEMREMAP_WB);
- if (!base)
- return -ENOMEM;
- }
-
ret = vfio_pci_register_dev_region(vdev,
PCI_VENDOR_ID_INTEL | VFIO_REGION_TYPE_PCI_VENDOR_TYPE,
- VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION,
- &vfio_pci_igd_regops, size, VFIO_REGION_INFO_FLAG_READ, base);
+ VFIO_REGION_SUBTYPE_INTEL_IGD_OPREGION, &vfio_pci_igd_regops,
+ size, VFIO_REGION_INFO_FLAG_READ, opregionvbt);
if (ret) {
- memunmap(base);
+ if (opregionvbt->vbt_ex)
+ memunmap(opregionvbt->vbt_ex);
+
+ memunmap(opregionvbt->opregion);
+ kfree(opregionvbt);
return ret;
}
diff --git a/drivers/vfio/platform/vfio_platform_common.c b/drivers/vfio/platform/vfio_platform_common.c
index 6af7ce7d619c..256f55b84e70 100644
--- a/drivers/vfio/platform/vfio_platform_common.c
+++ b/drivers/vfio/platform/vfio_platform_common.c
@@ -642,7 +642,6 @@ static int vfio_platform_of_probe(struct vfio_platform_device *vdev,
int vfio_platform_probe_common(struct vfio_platform_device *vdev,
struct device *dev)
{
- struct iommu_group *group;
int ret;
vfio_init_group_dev(&vdev->vdev, dev, &vfio_platform_ops);
@@ -663,24 +662,15 @@ int vfio_platform_probe_common(struct vfio_platform_device *vdev,
goto out_uninit;
}
- group = vfio_iommu_group_get(dev);
- if (!group) {
- dev_err(dev, "No IOMMU group for device %s\n", vdev->name);
- ret = -EINVAL;
- goto put_reset;
- }
-
ret = vfio_register_group_dev(&vdev->vdev);
if (ret)
- goto put_iommu;
+ goto put_reset;
mutex_init(&vdev->igate);
pm_runtime_enable(dev);
return 0;
-put_iommu:
- vfio_iommu_group_put(group, dev);
put_reset:
vfio_platform_put_reset(vdev);
out_uninit:
@@ -696,7 +686,6 @@ void vfio_platform_remove_common(struct vfio_platform_device *vdev)
pm_runtime_disable(vdev->device);
vfio_platform_put_reset(vdev);
vfio_uninit_group_dev(&vdev->vdev);
- vfio_iommu_group_put(vdev->vdev.dev->iommu_group, vdev->vdev.dev);
}
EXPORT_SYMBOL_GPL(vfio_platform_remove_common);
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index 3c034fe14ccb..82fb75464f92 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -32,6 +32,7 @@
#include <linux/vfio.h>
#include <linux/wait.h>
#include <linux/sched/signal.h>
+#include "vfio.h"
#define DRIVER_VERSION "0.3"
#define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
@@ -42,9 +43,8 @@ static struct vfio {
struct list_head iommu_drivers_list;
struct mutex iommu_drivers_lock;
struct list_head group_list;
- struct idr group_idr;
- struct mutex group_lock;
- struct cdev group_cdev;
+ struct mutex group_lock; /* locks group_list */
+ struct ida group_ida;
dev_t group_devt;
} vfio;
@@ -68,14 +68,14 @@ struct vfio_unbound_dev {
};
struct vfio_group {
- struct kref kref;
- int minor;
+ struct device dev;
+ struct cdev cdev;
+ refcount_t users;
atomic_t container_users;
struct iommu_group *iommu_group;
struct vfio_container *container;
struct list_head device_list;
struct mutex device_lock;
- struct device *dev;
struct notifier_block nb;
struct list_head vfio_next;
struct list_head container_next;
@@ -83,7 +83,7 @@ struct vfio_group {
struct mutex unbound_lock;
atomic_t opened;
wait_queue_head_t container_q;
- bool noiommu;
+ enum vfio_group_type type;
unsigned int dev_counter;
struct kvm *kvm;
struct blocking_notifier_head notifier;
@@ -97,6 +97,7 @@ MODULE_PARM_DESC(enable_unsafe_noiommu_mode, "Enable UNSAFE, no-IOMMU mode. Thi
#endif
static DEFINE_XARRAY(vfio_device_set_xa);
+static const struct file_operations vfio_group_fops;
int vfio_assign_device_set(struct vfio_device *device, void *set_id)
{
@@ -169,70 +170,6 @@ static void vfio_release_device_set(struct vfio_device *device)
xa_unlock(&vfio_device_set_xa);
}
-/*
- * vfio_iommu_group_{get,put} are only intended for VFIO bus driver probe
- * and remove functions, any use cases other than acquiring the first
- * reference for the purpose of calling vfio_register_group_dev() or removing
- * that symmetric reference after vfio_unregister_group_dev() should use the raw
- * iommu_group_{get,put} functions. In particular, vfio_iommu_group_put()
- * removes the device from the dummy group and cannot be nested.
- */
-struct iommu_group *vfio_iommu_group_get(struct device *dev)
-{
- struct iommu_group *group;
- int __maybe_unused ret;
-
- group = iommu_group_get(dev);
-
-#ifdef CONFIG_VFIO_NOIOMMU
- /*
- * With noiommu enabled, an IOMMU group will be created for a device
- * that doesn't already have one and doesn't have an iommu_ops on their
- * bus. We set iommudata simply to be able to identify these groups
- * as special use and for reclamation later.
- */
- if (group || !noiommu || iommu_present(dev->bus))
- return group;
-
- group = iommu_group_alloc();
- if (IS_ERR(group))
- return NULL;
-
- iommu_group_set_name(group, "vfio-noiommu");
- iommu_group_set_iommudata(group, &noiommu, NULL);
- ret = iommu_group_add_device(group, dev);
- if (ret) {
- iommu_group_put(group);
- return NULL;
- }
-
- /*
- * Where to taint? At this point we've added an IOMMU group for a
- * device that is not backed by iommu_ops, therefore any iommu_
- * callback using iommu_ops can legitimately Oops. So, while we may
- * be about to give a DMA capable device to a user without IOMMU
- * protection, which is clearly taint-worthy, let's go ahead and do
- * it here.
- */
- add_taint(TAINT_USER, LOCKDEP_STILL_OK);
- dev_warn(dev, "Adding kernel taint for vfio-noiommu group on device\n");
-#endif
-
- return group;
-}
-EXPORT_SYMBOL_GPL(vfio_iommu_group_get);
-
-void vfio_iommu_group_put(struct iommu_group *group, struct device *dev)
-{
-#ifdef CONFIG_VFIO_NOIOMMU
- if (iommu_group_get_iommudata(group) == &noiommu)
- iommu_group_remove_device(dev);
-#endif
-
- iommu_group_put(group);
-}
-EXPORT_SYMBOL_GPL(vfio_iommu_group_put);
-
#ifdef CONFIG_VFIO_NOIOMMU
static void *vfio_noiommu_open(unsigned long arg)
{
@@ -258,9 +195,9 @@ static long vfio_noiommu_ioctl(void *iommu_data,
}
static int vfio_noiommu_attach_group(void *iommu_data,
- struct iommu_group *iommu_group)
+ struct iommu_group *iommu_group, enum vfio_group_type type)
{
- return iommu_group_get_iommudata(iommu_group) == &noiommu ? 0 : -EINVAL;
+ return 0;
}
static void vfio_noiommu_detach_group(void *iommu_data,
@@ -277,8 +214,23 @@ static const struct vfio_iommu_driver_ops vfio_noiommu_ops = {
.attach_group = vfio_noiommu_attach_group,
.detach_group = vfio_noiommu_detach_group,
};
-#endif
+/*
+ * Only noiommu containers can use vfio-noiommu and noiommu containers can only
+ * use vfio-noiommu.
+ */
+static inline bool vfio_iommu_driver_allowed(struct vfio_container *container,
+ const struct vfio_iommu_driver *driver)
+{
+ return container->noiommu == (driver->ops == &vfio_noiommu_ops);
+}
+#else
+static inline bool vfio_iommu_driver_allowed(struct vfio_container *container,
+ const struct vfio_iommu_driver *driver)
+{
+ return true;
+}
+#endif /* CONFIG_VFIO_NOIOMMU */
/**
* IOMMU driver registration
@@ -329,19 +281,6 @@ void vfio_unregister_iommu_driver(const struct vfio_iommu_driver_ops *ops)
}
EXPORT_SYMBOL_GPL(vfio_unregister_iommu_driver);
-/**
- * Group minor allocation/free - both called with vfio.group_lock held
- */
-static int vfio_alloc_group_minor(struct vfio_group *group)
-{
- return idr_alloc(&vfio.group_idr, group, 0, MINORMASK + 1, GFP_KERNEL);
-}
-
-static void vfio_free_group_minor(int minor)
-{
- idr_remove(&vfio.group_idr, minor);
-}
-
static int vfio_iommu_group_notifier(struct notifier_block *nb,
unsigned long action, void *data);
static void vfio_group_get(struct vfio_group *group);
@@ -370,105 +309,38 @@ static void vfio_container_put(struct vfio_container *container)
kref_put(&container->kref, vfio_container_release);
}
-static void vfio_group_unlock_and_free(struct vfio_group *group)
-{
- mutex_unlock(&vfio.group_lock);
- /*
- * Unregister outside of lock. A spurious callback is harmless now
- * that the group is no longer in vfio.group_list.
- */
- iommu_group_unregister_notifier(group->iommu_group, &group->nb);
- kfree(group);
-}
-
/**
* Group objects - create, release, get, put, search
*/
-static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group)
+static struct vfio_group *
+__vfio_group_get_from_iommu(struct iommu_group *iommu_group)
{
- struct vfio_group *group, *tmp;
- struct device *dev;
- int ret, minor;
-
- group = kzalloc(sizeof(*group), GFP_KERNEL);
- if (!group)
- return ERR_PTR(-ENOMEM);
-
- kref_init(&group->kref);
- INIT_LIST_HEAD(&group->device_list);
- mutex_init(&group->device_lock);
- INIT_LIST_HEAD(&group->unbound_list);
- mutex_init(&group->unbound_lock);
- atomic_set(&group->container_users, 0);
- atomic_set(&group->opened, 0);
- init_waitqueue_head(&group->container_q);
- group->iommu_group = iommu_group;
-#ifdef CONFIG_VFIO_NOIOMMU
- group->noiommu = (iommu_group_get_iommudata(iommu_group) == &noiommu);
-#endif
- BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
-
- group->nb.notifier_call = vfio_iommu_group_notifier;
-
- /*
- * blocking notifiers acquire a rwsem around registering and hold
- * it around callback. Therefore, need to register outside of
- * vfio.group_lock to avoid A-B/B-A contention. Our callback won't
- * do anything unless it can find the group in vfio.group_list, so
- * no harm in registering early.
- */
- ret = iommu_group_register_notifier(iommu_group, &group->nb);
- if (ret) {
- kfree(group);
- return ERR_PTR(ret);
- }
-
- mutex_lock(&vfio.group_lock);
+ struct vfio_group *group;
- /* Did we race creating this group? */
- list_for_each_entry(tmp, &vfio.group_list, vfio_next) {
- if (tmp->iommu_group == iommu_group) {
- vfio_group_get(tmp);
- vfio_group_unlock_and_free(group);
- return tmp;
+ list_for_each_entry(group, &vfio.group_list, vfio_next) {
+ if (group->iommu_group == iommu_group) {
+ vfio_group_get(group);
+ return group;
}
}
+ return NULL;
+}
- minor = vfio_alloc_group_minor(group);
- if (minor < 0) {
- vfio_group_unlock_and_free(group);
- return ERR_PTR(minor);
- }
-
- dev = device_create(vfio.class, NULL,
- MKDEV(MAJOR(vfio.group_devt), minor),
- group, "%s%d", group->noiommu ? "noiommu-" : "",
- iommu_group_id(iommu_group));
- if (IS_ERR(dev)) {
- vfio_free_group_minor(minor);
- vfio_group_unlock_and_free(group);
- return ERR_CAST(dev);
- }
-
- group->minor = minor;
- group->dev = dev;
-
- list_add(&group->vfio_next, &vfio.group_list);
+static struct vfio_group *
+vfio_group_get_from_iommu(struct iommu_group *iommu_group)
+{
+ struct vfio_group *group;
+ mutex_lock(&vfio.group_lock);
+ group = __vfio_group_get_from_iommu(iommu_group);
mutex_unlock(&vfio.group_lock);
-
return group;
}
-/* called with vfio.group_lock held */
-static void vfio_group_release(struct kref *kref)
+static void vfio_group_release(struct device *dev)
{
- struct vfio_group *group = container_of(kref, struct vfio_group, kref);
+ struct vfio_group *group = container_of(dev, struct vfio_group, dev);
struct vfio_unbound_dev *unbound, *tmp;
- struct iommu_group *iommu_group = group->iommu_group;
-
- WARN_ON(!list_empty(&group->device_list));
- WARN_ON(group->notifier.head);
list_for_each_entry_safe(unbound, tmp,
&group->unbound_list, unbound_next) {
@@ -476,105 +348,129 @@ static void vfio_group_release(struct kref *kref)
kfree(unbound);
}
- device_destroy(vfio.class, MKDEV(MAJOR(vfio.group_devt), group->minor));
- list_del(&group->vfio_next);
- vfio_free_group_minor(group->minor);
- vfio_group_unlock_and_free(group);
- iommu_group_put(iommu_group);
+ mutex_destroy(&group->device_lock);
+ mutex_destroy(&group->unbound_lock);
+ iommu_group_put(group->iommu_group);
+ ida_free(&vfio.group_ida, MINOR(group->dev.devt));
+ kfree(group);
}
-static void vfio_group_put(struct vfio_group *group)
+static struct vfio_group *vfio_group_alloc(struct iommu_group *iommu_group,
+ enum vfio_group_type type)
{
- kref_put_mutex(&group->kref, vfio_group_release, &vfio.group_lock);
-}
-
-struct vfio_group_put_work {
- struct work_struct work;
struct vfio_group *group;
-};
+ int minor;
-static void vfio_group_put_bg(struct work_struct *work)
-{
- struct vfio_group_put_work *do_work;
-
- do_work = container_of(work, struct vfio_group_put_work, work);
+ group = kzalloc(sizeof(*group), GFP_KERNEL);
+ if (!group)
+ return ERR_PTR(-ENOMEM);
- vfio_group_put(do_work->group);
- kfree(do_work);
-}
+ minor = ida_alloc_max(&vfio.group_ida, MINORMASK, GFP_KERNEL);
+ if (minor < 0) {
+ kfree(group);
+ return ERR_PTR(minor);
+ }
-static void vfio_group_schedule_put(struct vfio_group *group)
-{
- struct vfio_group_put_work *do_work;
+ device_initialize(&group->dev);
+ group->dev.devt = MKDEV(MAJOR(vfio.group_devt), minor);
+ group->dev.class = vfio.class;
+ group->dev.release = vfio_group_release;
+ cdev_init(&group->cdev, &vfio_group_fops);
+ group->cdev.owner = THIS_MODULE;
- do_work = kmalloc(sizeof(*do_work), GFP_KERNEL);
- if (WARN_ON(!do_work))
- return;
+ refcount_set(&group->users, 1);
+ INIT_LIST_HEAD(&group->device_list);
+ mutex_init(&group->device_lock);
+ INIT_LIST_HEAD(&group->unbound_list);
+ mutex_init(&group->unbound_lock);
+ init_waitqueue_head(&group->container_q);
+ group->iommu_group = iommu_group;
+ /* put in vfio_group_release() */
+ iommu_group_ref_get(iommu_group);
+ group->type = type;
+ BLOCKING_INIT_NOTIFIER_HEAD(&group->notifier);
- INIT_WORK(&do_work->work, vfio_group_put_bg);
- do_work->group = group;
- schedule_work(&do_work->work);
+ return group;
}
-/* Assume group_lock or group reference is held */
-static void vfio_group_get(struct vfio_group *group)
+static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group,
+ enum vfio_group_type type)
{
- kref_get(&group->kref);
-}
+ struct vfio_group *group;
+ struct vfio_group *ret;
+ int err;
-/*
- * Not really a try as we will sleep for mutex, but we need to make
- * sure the group pointer is valid under lock and get a reference.
- */
-static struct vfio_group *vfio_group_try_get(struct vfio_group *group)
-{
- struct vfio_group *target = group;
+ group = vfio_group_alloc(iommu_group, type);
+ if (IS_ERR(group))
+ return group;
- mutex_lock(&vfio.group_lock);
- list_for_each_entry(group, &vfio.group_list, vfio_next) {
- if (group == target) {
- vfio_group_get(group);
- mutex_unlock(&vfio.group_lock);
- return group;
- }
+ err = dev_set_name(&group->dev, "%s%d",
+ group->type == VFIO_NO_IOMMU ? "noiommu-" : "",
+ iommu_group_id(iommu_group));
+ if (err) {
+ ret = ERR_PTR(err);
+ goto err_put;
}
- mutex_unlock(&vfio.group_lock);
- return NULL;
-}
-
-static
-struct vfio_group *vfio_group_get_from_iommu(struct iommu_group *iommu_group)
-{
- struct vfio_group *group;
+ group->nb.notifier_call = vfio_iommu_group_notifier;
+ err = iommu_group_register_notifier(iommu_group, &group->nb);
+ if (err) {
+ ret = ERR_PTR(err);
+ goto err_put;
+ }
mutex_lock(&vfio.group_lock);
- list_for_each_entry(group, &vfio.group_list, vfio_next) {
- if (group->iommu_group == iommu_group) {
- vfio_group_get(group);
- mutex_unlock(&vfio.group_lock);
- return group;
- }
+
+ /* Did we race creating this group? */
+ ret = __vfio_group_get_from_iommu(iommu_group);
+ if (ret)
+ goto err_unlock;
+
+ err = cdev_device_add(&group->cdev, &group->dev);
+ if (err) {
+ ret = ERR_PTR(err);
+ goto err_unlock;
}
+
+ list_add(&group->vfio_next, &vfio.group_list);
+
mutex_unlock(&vfio.group_lock);
+ return group;
- return NULL;
+err_unlock:
+ mutex_unlock(&vfio.group_lock);
+ iommu_group_unregister_notifier(group->iommu_group, &group->nb);
+err_put:
+ put_device(&group->dev);
+ return ret;
}
-static struct vfio_group *vfio_group_get_from_minor(int minor)
+static void vfio_group_put(struct vfio_group *group)
{
- struct vfio_group *group;
+ if (!refcount_dec_and_mutex_lock(&group->users, &vfio.group_lock))
+ return;
- mutex_lock(&vfio.group_lock);
- group = idr_find(&vfio.group_idr, minor);
- if (!group) {
- mutex_unlock(&vfio.group_lock);
- return NULL;
- }
- vfio_group_get(group);
+ /*
+ * These data structures all have paired operations that can only be
+ * undone when the caller holds a live reference on the group. Since all
+ * pairs must be undone these WARN_ON's indicate some caller did not
+ * properly hold the group reference.
+ */
+ WARN_ON(!list_empty(&group->device_list));
+ WARN_ON(atomic_read(&group->container_users));
+ WARN_ON(group->notifier.head);
+
+ list_del(&group->vfio_next);
+ cdev_device_del(&group->cdev, &group->dev);
mutex_unlock(&vfio.group_lock);
- return group;
+ iommu_group_unregister_notifier(group->iommu_group, &group->nb);
+ put_device(&group->dev);
+}
+
+static void vfio_group_get(struct vfio_group *group)
+{
+ refcount_inc(&group->users);
}
static struct vfio_group *vfio_group_get_from_dev(struct device *dev)
@@ -740,14 +636,6 @@ static int vfio_iommu_group_notifier(struct notifier_block *nb,
struct device *dev = data;
struct vfio_unbound_dev *unbound;
- /*
- * Need to go through a group_lock lookup to get a reference or we
- * risk racing a group being removed. Ignore spurious notifies.
- */
- group = vfio_group_try_get(group);
- if (!group)
- return NOTIFY_OK;
-
switch (action) {
case IOMMU_GROUP_NOTIFY_ADD_DEVICE:
vfio_group_nb_add_dev(group, dev);
@@ -798,15 +686,6 @@ static int vfio_iommu_group_notifier(struct notifier_block *nb,
mutex_unlock(&group->unbound_lock);
break;
}
-
- /*
- * If we're the last reference to the group, the group will be
- * released, which includes unregistering the iommu group notifier.
- * We hold a read-lock on that notifier list, unregistering needs
- * a write-lock... deadlock. Release our reference asynchronously
- * to avoid that situation.
- */
- vfio_group_schedule_put(group);
return NOTIFY_OK;
}
@@ -828,11 +707,78 @@ void vfio_uninit_group_dev(struct vfio_device *device)
}
EXPORT_SYMBOL_GPL(vfio_uninit_group_dev);
-int vfio_register_group_dev(struct vfio_device *device)
+static struct vfio_group *vfio_noiommu_group_alloc(struct device *dev,
+ enum vfio_group_type type)
{
- struct vfio_device *existing_device;
struct iommu_group *iommu_group;
struct vfio_group *group;
+ int ret;
+
+ iommu_group = iommu_group_alloc();
+ if (IS_ERR(iommu_group))
+ return ERR_CAST(iommu_group);
+
+ iommu_group_set_name(iommu_group, "vfio-noiommu");
+ ret = iommu_group_add_device(iommu_group, dev);
+ if (ret)
+ goto out_put_group;
+
+ group = vfio_create_group(iommu_group, type);
+ if (IS_ERR(group)) {
+ ret = PTR_ERR(group);
+ goto out_remove_device;
+ }
+ iommu_group_put(iommu_group);
+ return group;
+
+out_remove_device:
+ iommu_group_remove_device(dev);
+out_put_group:
+ iommu_group_put(iommu_group);
+ return ERR_PTR(ret);
+}
+
+static struct vfio_group *vfio_group_find_or_alloc(struct device *dev)
+{
+ struct iommu_group *iommu_group;
+ struct vfio_group *group;
+
+ iommu_group = iommu_group_get(dev);
+#ifdef CONFIG_VFIO_NOIOMMU
+ if (!iommu_group && noiommu && !iommu_present(dev->bus)) {
+ /*
+ * With noiommu enabled, create an IOMMU group for devices that
+ * don't already have one and don't have an iommu_ops on their
+ * bus. Taint the kernel because we're about to give a DMA
+ * capable device to a user without IOMMU protection.
+ */
+ group = vfio_noiommu_group_alloc(dev, VFIO_NO_IOMMU);
+ if (!IS_ERR(group)) {
+ add_taint(TAINT_USER, LOCKDEP_STILL_OK);
+ dev_warn(dev, "Adding kernel taint for vfio-noiommu group on device\n");
+ }
+ return group;
+ }
+#endif
+ if (!iommu_group)
+ return ERR_PTR(-EINVAL);
+
+ group = vfio_group_get_from_iommu(iommu_group);
+ if (!group)
+ group = vfio_create_group(iommu_group, VFIO_IOMMU);
+
+ /* The vfio_group holds a reference to the iommu_group */
+ iommu_group_put(iommu_group);
+ return group;
+}
+
+static int __vfio_register_dev(struct vfio_device *device,
+ struct vfio_group *group)
+{
+ struct vfio_device *existing_device;
+
+ if (IS_ERR(group))
+ return PTR_ERR(group);
/*
* If the driver doesn't specify a set then the device is added to a
@@ -841,30 +787,14 @@ int vfio_register_group_dev(struct vfio_device *device)
if (!device->dev_set)
vfio_assign_device_set(device, device);
- iommu_group = iommu_group_get(device->dev);
- if (!iommu_group)
- return -EINVAL;
-
- group = vfio_group_get_from_iommu(iommu_group);
- if (!group) {
- group = vfio_create_group(iommu_group);
- if (IS_ERR(group)) {
- iommu_group_put(iommu_group);
- return PTR_ERR(group);
- }
- } else {
- /*
- * A found vfio_group already holds a reference to the
- * iommu_group. A created vfio_group keeps the reference.
- */
- iommu_group_put(iommu_group);
- }
-
existing_device = vfio_group_get_device(group, device->dev);
if (existing_device) {
dev_WARN(device->dev, "Device already exists on group %d\n",
- iommu_group_id(iommu_group));
+ iommu_group_id(group->iommu_group));
vfio_device_put(existing_device);
+ if (group->type == VFIO_NO_IOMMU ||
+ group->type == VFIO_EMULATED_IOMMU)
+ iommu_group_remove_device(device->dev);
vfio_group_put(group);
return -EBUSY;
}
@@ -882,8 +812,25 @@ int vfio_register_group_dev(struct vfio_device *device)
return 0;
}
+
+int vfio_register_group_dev(struct vfio_device *device)
+{
+ return __vfio_register_dev(device,
+ vfio_group_find_or_alloc(device->dev));
+}
EXPORT_SYMBOL_GPL(vfio_register_group_dev);
+/*
+ * Register a virtual device without IOMMU backing. The user of this
+ * device must not be able to directly trigger unmediated DMA.
+ */
+int vfio_register_emulated_iommu_dev(struct vfio_device *device)
+{
+ return __vfio_register_dev(device,
+ vfio_noiommu_group_alloc(device->dev, VFIO_EMULATED_IOMMU));
+}
+EXPORT_SYMBOL_GPL(vfio_register_emulated_iommu_dev);
+
/**
* Get a reference to the vfio_device for a device. Even if the
* caller thinks they own the device, they could be racing with a
@@ -1010,6 +957,9 @@ void vfio_unregister_group_dev(struct vfio_device *device)
if (list_empty(&group->device_list))
wait_event(group->container_q, !group->container);
+ if (group->type == VFIO_NO_IOMMU || group->type == VFIO_EMULATED_IOMMU)
+ iommu_group_remove_device(device->dev);
+
/* Matches the get in vfio_register_group_dev() */
vfio_group_put(group);
}
@@ -1042,13 +992,10 @@ static long vfio_ioctl_check_extension(struct vfio_container *container,
list_for_each_entry(driver, &vfio.iommu_drivers_list,
vfio_next) {
-#ifdef CONFIG_VFIO_NOIOMMU
if (!list_empty(&container->group_list) &&
- (container->noiommu !=
- (driver->ops == &vfio_noiommu_ops)))
+ !vfio_iommu_driver_allowed(container,
+ driver))
continue;
-#endif
-
if (!try_module_get(driver->ops->owner))
continue;
@@ -1079,7 +1026,8 @@ static int __vfio_container_attach_groups(struct vfio_container *container,
int ret = -ENODEV;
list_for_each_entry(group, &container->group_list, container_next) {
- ret = driver->ops->attach_group(data, group->iommu_group);
+ ret = driver->ops->attach_group(data, group->iommu_group,
+ group->type);
if (ret)
goto unwind;
}
@@ -1120,15 +1068,8 @@ static long vfio_ioctl_set_iommu(struct vfio_container *container,
list_for_each_entry(driver, &vfio.iommu_drivers_list, vfio_next) {
void *data;
-#ifdef CONFIG_VFIO_NOIOMMU
- /*
- * Only noiommu containers can use vfio-noiommu and noiommu
- * containers can only use vfio-noiommu.
- */
- if (container->noiommu != (driver->ops == &vfio_noiommu_ops))
+ if (!vfio_iommu_driver_allowed(container, driver))
continue;
-#endif
-
if (!try_module_get(driver->ops->owner))
continue;
@@ -1234,62 +1175,12 @@ static int vfio_fops_release(struct inode *inode, struct file *filep)
return 0;
}
-/*
- * Once an iommu driver is set, we optionally pass read/write/mmap
- * on to the driver, allowing management interfaces beyond ioctl.
- */
-static ssize_t vfio_fops_read(struct file *filep, char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct vfio_container *container = filep->private_data;
- struct vfio_iommu_driver *driver;
- ssize_t ret = -EINVAL;
-
- driver = container->iommu_driver;
- if (likely(driver && driver->ops->read))
- ret = driver->ops->read(container->iommu_data,
- buf, count, ppos);
-
- return ret;
-}
-
-static ssize_t vfio_fops_write(struct file *filep, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- struct vfio_container *container = filep->private_data;
- struct vfio_iommu_driver *driver;
- ssize_t ret = -EINVAL;
-
- driver = container->iommu_driver;
- if (likely(driver && driver->ops->write))
- ret = driver->ops->write(container->iommu_data,
- buf, count, ppos);
-
- return ret;
-}
-
-static int vfio_fops_mmap(struct file *filep, struct vm_area_struct *vma)
-{
- struct vfio_container *container = filep->private_data;
- struct vfio_iommu_driver *driver;
- int ret = -EINVAL;
-
- driver = container->iommu_driver;
- if (likely(driver && driver->ops->mmap))
- ret = driver->ops->mmap(container->iommu_data, vma);
-
- return ret;
-}
-
static const struct file_operations vfio_fops = {
.owner = THIS_MODULE,
.open = vfio_fops_open,
.release = vfio_fops_release,
- .read = vfio_fops_read,
- .write = vfio_fops_write,
.unlocked_ioctl = vfio_fops_unl_ioctl,
.compat_ioctl = compat_ptr_ioctl,
- .mmap = vfio_fops_mmap,
};
/**
@@ -1366,7 +1257,7 @@ static int vfio_group_set_container(struct vfio_group *group, int container_fd)
if (atomic_read(&group->container_users))
return -EINVAL;
- if (group->noiommu && !capable(CAP_SYS_RAWIO))
+ if (group->type == VFIO_NO_IOMMU && !capable(CAP_SYS_RAWIO))
return -EPERM;
f = fdget(container_fd);
@@ -1386,7 +1277,7 @@ static int vfio_group_set_container(struct vfio_group *group, int container_fd)
/* Real groups and fake groups cannot mix */
if (!list_empty(&container->group_list) &&
- container->noiommu != group->noiommu) {
+ container->noiommu != (group->type == VFIO_NO_IOMMU)) {
ret = -EPERM;
goto unlock_out;
}
@@ -1394,13 +1285,14 @@ static int vfio_group_set_container(struct vfio_group *group, int container_fd)
driver = container->iommu_driver;
if (driver) {
ret = driver->ops->attach_group(container->iommu_data,
- group->iommu_group);
+ group->iommu_group,
+ group->type);
if (ret)
goto unlock_out;
}
group->container = container;
- container->noiommu = group->noiommu;
+ container->noiommu = (group->type == VFIO_NO_IOMMU);
list_add(&group->container_next, &container->group_list);
/* Get a reference on the container and mark a user within the group */
@@ -1424,7 +1316,7 @@ static int vfio_group_add_container_user(struct vfio_group *group)
if (!atomic_inc_not_zero(&group->container_users))
return -EINVAL;
- if (group->noiommu) {
+ if (group->type == VFIO_NO_IOMMU) {
atomic_dec(&group->container_users);
return -EPERM;
}
@@ -1449,7 +1341,7 @@ static int vfio_group_get_device_fd(struct vfio_group *group, char *buf)
!group->container->iommu_driver || !vfio_group_viable(group))
return -EINVAL;
- if (group->noiommu && !capable(CAP_SYS_RAWIO))
+ if (group->type == VFIO_NO_IOMMU && !capable(CAP_SYS_RAWIO))
return -EPERM;
device = vfio_device_get_from_name(group, buf);
@@ -1496,7 +1388,7 @@ static int vfio_group_get_device_fd(struct vfio_group *group, char *buf)
fd_install(fdno, filep);
- if (group->noiommu)
+ if (group->type == VFIO_NO_IOMMU)
dev_warn(device->dev, "vfio-noiommu device opened by user "
"(%s:%d)\n", current->comm, task_pid_nr(current));
return fdno;
@@ -1585,14 +1477,15 @@ static long vfio_group_fops_unl_ioctl(struct file *filep,
static int vfio_group_fops_open(struct inode *inode, struct file *filep)
{
- struct vfio_group *group;
+ struct vfio_group *group =
+ container_of(inode->i_cdev, struct vfio_group, cdev);
int opened;
- group = vfio_group_get_from_minor(iminor(inode));
- if (!group)
+ /* users can be zero if this races with vfio_group_put() */
+ if (!refcount_inc_not_zero(&group->users))
return -ENODEV;
- if (group->noiommu && !capable(CAP_SYS_RAWIO)) {
+ if (group->type == VFIO_NO_IOMMU && !capable(CAP_SYS_RAWIO)) {
vfio_group_put(group);
return -EPERM;
}
@@ -1757,6 +1650,9 @@ struct vfio_group *vfio_group_get_external_user(struct file *filep)
if (ret)
return ERR_PTR(ret);
+ /*
+ * Since the caller holds the fget on the file group->users must be >= 1
+ */
vfio_group_get(group);
return group;
@@ -2396,7 +2292,7 @@ static int __init vfio_init(void)
{
int ret;
- idr_init(&vfio.group_idr);
+ ida_init(&vfio.group_ida);
mutex_init(&vfio.group_lock);
mutex_init(&vfio.iommu_drivers_lock);
INIT_LIST_HEAD(&vfio.group_list);
@@ -2421,11 +2317,6 @@ static int __init vfio_init(void)
if (ret)
goto err_alloc_chrdev;
- cdev_init(&vfio.group_cdev, &vfio_group_fops);
- ret = cdev_add(&vfio.group_cdev, vfio.group_devt, MINORMASK + 1);
- if (ret)
- goto err_cdev_add;
-
pr_info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
#ifdef CONFIG_VFIO_NOIOMMU
@@ -2433,8 +2324,6 @@ static int __init vfio_init(void)
#endif
return 0;
-err_cdev_add:
- unregister_chrdev_region(vfio.group_devt, MINORMASK + 1);
err_alloc_chrdev:
class_destroy(vfio.class);
vfio.class = NULL;
@@ -2450,8 +2339,7 @@ static void __exit vfio_cleanup(void)
#ifdef CONFIG_VFIO_NOIOMMU
vfio_unregister_iommu_driver(&vfio_noiommu_ops);
#endif
- idr_destroy(&vfio.group_idr);
- cdev_del(&vfio.group_cdev);
+ ida_destroy(&vfio.group_ida);
unregister_chrdev_region(vfio.group_devt, MINORMASK + 1);
class_destroy(vfio.class);
vfio.class = NULL;
diff --git a/drivers/vfio/vfio.h b/drivers/vfio/vfio.h
new file mode 100644
index 000000000000..a67130221151
--- /dev/null
+++ b/drivers/vfio/vfio.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
+ * Author: Alex Williamson <alex.williamson@redhat.com>
+ */
+
+enum vfio_group_type {
+ /*
+ * Physical device with IOMMU backing.
+ */
+ VFIO_IOMMU,
+
+ /*
+ * Virtual device without IOMMU backing. The VFIO core fakes up an
+ * iommu_group as the iommu_group sysfs interface is part of the
+ * userspace ABI. The user of these devices must not be able to
+ * directly trigger unmediated DMA.
+ */
+ VFIO_EMULATED_IOMMU,
+
+ /*
+ * Physical device without IOMMU backing. The VFIO core fakes up an
+ * iommu_group as the iommu_group sysfs interface is part of the
+ * userspace ABI. Users can trigger unmediated DMA by the device,
+ * usage is highly dangerous, requires an explicit opt-in and will
+ * taint the kernel.
+ */
+ VFIO_NO_IOMMU,
+};
+
+/* events for the backend driver notify callback */
+enum vfio_iommu_notify_type {
+ VFIO_IOMMU_CONTAINER_CLOSE = 0,
+};
+
+/**
+ * struct vfio_iommu_driver_ops - VFIO IOMMU driver callbacks
+ */
+struct vfio_iommu_driver_ops {
+ char *name;
+ struct module *owner;
+ void *(*open)(unsigned long arg);
+ void (*release)(void *iommu_data);
+ long (*ioctl)(void *iommu_data, unsigned int cmd,
+ unsigned long arg);
+ int (*attach_group)(void *iommu_data,
+ struct iommu_group *group,
+ enum vfio_group_type);
+ void (*detach_group)(void *iommu_data,
+ struct iommu_group *group);
+ int (*pin_pages)(void *iommu_data,
+ struct iommu_group *group,
+ unsigned long *user_pfn,
+ int npage, int prot,
+ unsigned long *phys_pfn);
+ int (*unpin_pages)(void *iommu_data,
+ unsigned long *user_pfn, int npage);
+ int (*register_notifier)(void *iommu_data,
+ unsigned long *events,
+ struct notifier_block *nb);
+ int (*unregister_notifier)(void *iommu_data,
+ struct notifier_block *nb);
+ int (*dma_rw)(void *iommu_data, dma_addr_t user_iova,
+ void *data, size_t count, bool write);
+ struct iommu_domain *(*group_iommu_domain)(void *iommu_data,
+ struct iommu_group *group);
+ void (*notify)(void *iommu_data,
+ enum vfio_iommu_notify_type event);
+};
+
+int vfio_register_iommu_driver(const struct vfio_iommu_driver_ops *ops);
+void vfio_unregister_iommu_driver(const struct vfio_iommu_driver_ops *ops);
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index fe888b5dcc00..708a95e61831 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -20,6 +20,7 @@
#include <linux/sched/mm.h>
#include <linux/sched/signal.h>
#include <linux/mm.h>
+#include "vfio.h"
#include <asm/iommu.h>
#include <asm/tce.h>
@@ -1238,13 +1239,16 @@ release_exit:
}
static int tce_iommu_attach_group(void *iommu_data,
- struct iommu_group *iommu_group)
+ struct iommu_group *iommu_group, enum vfio_group_type type)
{
int ret = 0;
struct tce_container *container = iommu_data;
struct iommu_table_group *table_group;
struct tce_iommu_group *tcegrp = NULL;
+ if (type == VFIO_EMULATED_IOMMU)
+ return -EINVAL;
+
mutex_lock(&container->lock);
/* pr_debug("tce_vfio: Attaching group #%u to iommu %p\n",
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 0e9217687f5c..f17490ab238f 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -36,10 +36,10 @@
#include <linux/uaccess.h>
#include <linux/vfio.h>
#include <linux/workqueue.h>
-#include <linux/mdev.h>
#include <linux/notifier.h>
#include <linux/dma-iommu.h>
#include <linux/irqdomain.h>
+#include "vfio.h"
#define DRIVER_VERSION "0.2"
#define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
@@ -65,7 +65,6 @@ MODULE_PARM_DESC(dma_entry_limit,
struct vfio_iommu {
struct list_head domain_list;
struct list_head iova_list;
- struct vfio_domain *external_domain; /* domain for external user */
struct mutex lock;
struct rb_root dma_list;
struct blocking_notifier_head notifier;
@@ -78,6 +77,7 @@ struct vfio_iommu {
bool nesting;
bool dirty_page_tracking;
bool container_open;
+ struct list_head emulated_iommu_groups;
};
struct vfio_domain {
@@ -113,7 +113,6 @@ struct vfio_batch {
struct vfio_iommu_group {
struct iommu_group *iommu_group;
struct list_head next;
- bool mdev_group; /* An mdev group */
bool pinned_page_dirty_scope;
};
@@ -140,9 +139,6 @@ struct vfio_regions {
size_t len;
};
-#define IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu) \
- (!list_empty(&iommu->domain_list))
-
#define DIRTY_BITMAP_BYTES(n) (ALIGN(n, BITS_PER_TYPE(u64)) / BITS_PER_BYTE)
/*
@@ -880,7 +876,7 @@ again:
* already pinned and accounted. Accounting should be done if there is no
* iommu capable domain in the container.
*/
- do_accounting = !IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu);
+ do_accounting = list_empty(&iommu->domain_list);
for (i = 0; i < npage; i++) {
struct vfio_pfn *vpfn;
@@ -969,7 +965,7 @@ static int vfio_iommu_type1_unpin_pages(void *iommu_data,
mutex_lock(&iommu->lock);
- do_accounting = !IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu);
+ do_accounting = list_empty(&iommu->domain_list);
for (i = 0; i < npage; i++) {
struct vfio_dma *dma;
dma_addr_t iova;
@@ -1090,7 +1086,7 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
if (!dma->size)
return 0;
- if (!IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu))
+ if (list_empty(&iommu->domain_list))
return 0;
/*
@@ -1667,7 +1663,7 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
vfio_link_dma(iommu, dma);
/* Don't pin and map if container doesn't contain IOMMU capable domain*/
- if (!IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu))
+ if (list_empty(&iommu->domain_list))
dma->size = size;
else
ret = vfio_pin_map_dma(iommu, dma, size);
@@ -1893,8 +1889,8 @@ static struct vfio_iommu_group*
vfio_iommu_find_iommu_group(struct vfio_iommu *iommu,
struct iommu_group *iommu_group)
{
+ struct vfio_iommu_group *group;
struct vfio_domain *domain;
- struct vfio_iommu_group *group = NULL;
list_for_each_entry(domain, &iommu->domain_list, next) {
group = find_iommu_group(domain, iommu_group);
@@ -1902,10 +1898,10 @@ vfio_iommu_find_iommu_group(struct vfio_iommu *iommu,
return group;
}
- if (iommu->external_domain)
- group = find_iommu_group(iommu->external_domain, iommu_group);
-
- return group;
+ list_for_each_entry(group, &iommu->emulated_iommu_groups, next)
+ if (group->iommu_group == iommu_group)
+ return group;
+ return NULL;
}
static bool vfio_iommu_has_sw_msi(struct list_head *group_resv_regions,
@@ -1934,89 +1930,6 @@ static bool vfio_iommu_has_sw_msi(struct list_head *group_resv_regions,
return ret;
}
-static int vfio_mdev_attach_domain(struct device *dev, void *data)
-{
- struct mdev_device *mdev = to_mdev_device(dev);
- struct iommu_domain *domain = data;
- struct device *iommu_device;
-
- iommu_device = mdev_get_iommu_device(mdev);
- if (iommu_device) {
- if (iommu_dev_feature_enabled(iommu_device, IOMMU_DEV_FEAT_AUX))
- return iommu_aux_attach_device(domain, iommu_device);
- else
- return iommu_attach_device(domain, iommu_device);
- }
-
- return -EINVAL;
-}
-
-static int vfio_mdev_detach_domain(struct device *dev, void *data)
-{
- struct mdev_device *mdev = to_mdev_device(dev);
- struct iommu_domain *domain = data;
- struct device *iommu_device;
-
- iommu_device = mdev_get_iommu_device(mdev);
- if (iommu_device) {
- if (iommu_dev_feature_enabled(iommu_device, IOMMU_DEV_FEAT_AUX))
- iommu_aux_detach_device(domain, iommu_device);
- else
- iommu_detach_device(domain, iommu_device);
- }
-
- return 0;
-}
-
-static int vfio_iommu_attach_group(struct vfio_domain *domain,
- struct vfio_iommu_group *group)
-{
- if (group->mdev_group)
- return iommu_group_for_each_dev(group->iommu_group,
- domain->domain,
- vfio_mdev_attach_domain);
- else
- return iommu_attach_group(domain->domain, group->iommu_group);
-}
-
-static void vfio_iommu_detach_group(struct vfio_domain *domain,
- struct vfio_iommu_group *group)
-{
- if (group->mdev_group)
- iommu_group_for_each_dev(group->iommu_group, domain->domain,
- vfio_mdev_detach_domain);
- else
- iommu_detach_group(domain->domain, group->iommu_group);
-}
-
-static bool vfio_bus_is_mdev(struct bus_type *bus)
-{
- struct bus_type *mdev_bus;
- bool ret = false;
-
- mdev_bus = symbol_get(mdev_bus_type);
- if (mdev_bus) {
- ret = (bus == mdev_bus);
- symbol_put(mdev_bus_type);
- }
-
- return ret;
-}
-
-static int vfio_mdev_iommu_device(struct device *dev, void *data)
-{
- struct mdev_device *mdev = to_mdev_device(dev);
- struct device **old = data, *new;
-
- new = mdev_get_iommu_device(mdev);
- if (!new || (*old && *old != new))
- return -EINVAL;
-
- *old = new;
-
- return 0;
-}
-
/*
* This is a helper function to insert an address range to iova list.
* The list is initially created with a single entry corresponding to
@@ -2241,81 +2154,58 @@ static void vfio_iommu_iova_insert_copy(struct vfio_iommu *iommu,
}
static int vfio_iommu_type1_attach_group(void *iommu_data,
- struct iommu_group *iommu_group)
+ struct iommu_group *iommu_group, enum vfio_group_type type)
{
struct vfio_iommu *iommu = iommu_data;
struct vfio_iommu_group *group;
struct vfio_domain *domain, *d;
struct bus_type *bus = NULL;
- int ret;
bool resv_msi, msi_remap;
phys_addr_t resv_msi_base = 0;
struct iommu_domain_geometry *geo;
LIST_HEAD(iova_copy);
LIST_HEAD(group_resv_regions);
+ int ret = -EINVAL;
mutex_lock(&iommu->lock);
/* Check for duplicates */
- if (vfio_iommu_find_iommu_group(iommu, iommu_group)) {
- mutex_unlock(&iommu->lock);
- return -EINVAL;
- }
+ if (vfio_iommu_find_iommu_group(iommu, iommu_group))
+ goto out_unlock;
+ ret = -ENOMEM;
group = kzalloc(sizeof(*group), GFP_KERNEL);
- domain = kzalloc(sizeof(*domain), GFP_KERNEL);
- if (!group || !domain) {
- ret = -ENOMEM;
- goto out_free;
- }
-
+ if (!group)
+ goto out_unlock;
group->iommu_group = iommu_group;
+ if (type == VFIO_EMULATED_IOMMU) {
+ list_add(&group->next, &iommu->emulated_iommu_groups);
+ /*
+ * An emulated IOMMU group cannot dirty memory directly, it can
+ * only use interfaces that provide dirty tracking.
+ * The iommu scope can only be promoted with the addition of a
+ * dirty tracking group.
+ */
+ group->pinned_page_dirty_scope = true;
+ ret = 0;
+ goto out_unlock;
+ }
+
/* Determine bus_type in order to allocate a domain */
ret = iommu_group_for_each_dev(iommu_group, &bus, vfio_bus_type);
if (ret)
- goto out_free;
-
- if (vfio_bus_is_mdev(bus)) {
- struct device *iommu_device = NULL;
-
- group->mdev_group = true;
-
- /* Determine the isolation type */
- ret = iommu_group_for_each_dev(iommu_group, &iommu_device,
- vfio_mdev_iommu_device);
- if (ret || !iommu_device) {
- if (!iommu->external_domain) {
- INIT_LIST_HEAD(&domain->group_list);
- iommu->external_domain = domain;
- vfio_update_pgsize_bitmap(iommu);
- } else {
- kfree(domain);
- }
-
- list_add(&group->next,
- &iommu->external_domain->group_list);
- /*
- * Non-iommu backed group cannot dirty memory directly,
- * it can only use interfaces that provide dirty
- * tracking.
- * The iommu scope can only be promoted with the
- * addition of a dirty tracking group.
- */
- group->pinned_page_dirty_scope = true;
- mutex_unlock(&iommu->lock);
+ goto out_free_group;
- return 0;
- }
-
- bus = iommu_device->bus;
- }
+ ret = -ENOMEM;
+ domain = kzalloc(sizeof(*domain), GFP_KERNEL);
+ if (!domain)
+ goto out_free_group;
+ ret = -EIO;
domain->domain = iommu_domain_alloc(bus);
- if (!domain->domain) {
- ret = -EIO;
- goto out_free;
- }
+ if (!domain->domain)
+ goto out_free_domain;
if (iommu->nesting) {
ret = iommu_enable_nesting(domain->domain);
@@ -2323,7 +2213,7 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
goto out_domain;
}
- ret = vfio_iommu_attach_group(domain, group);
+ ret = iommu_attach_group(domain->domain, group->iommu_group);
if (ret)
goto out_domain;
@@ -2390,15 +2280,17 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
list_for_each_entry(d, &iommu->domain_list, next) {
if (d->domain->ops == domain->domain->ops &&
d->prot == domain->prot) {
- vfio_iommu_detach_group(domain, group);
- if (!vfio_iommu_attach_group(d, group)) {
+ iommu_detach_group(domain->domain, group->iommu_group);
+ if (!iommu_attach_group(d->domain,
+ group->iommu_group)) {
list_add(&group->next, &d->group_list);
iommu_domain_free(domain->domain);
kfree(domain);
goto done;
}
- ret = vfio_iommu_attach_group(domain, group);
+ ret = iommu_attach_group(domain->domain,
+ group->iommu_group);
if (ret)
goto out_domain;
}
@@ -2435,14 +2327,16 @@ done:
return 0;
out_detach:
- vfio_iommu_detach_group(domain, group);
+ iommu_detach_group(domain->domain, group->iommu_group);
out_domain:
iommu_domain_free(domain->domain);
vfio_iommu_iova_free(&iova_copy);
vfio_iommu_resv_free(&group_resv_regions);
-out_free:
+out_free_domain:
kfree(domain);
+out_free_group:
kfree(group);
+out_unlock:
mutex_unlock(&iommu->lock);
return ret;
}
@@ -2567,25 +2461,19 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
LIST_HEAD(iova_copy);
mutex_lock(&iommu->lock);
+ list_for_each_entry(group, &iommu->emulated_iommu_groups, next) {
+ if (group->iommu_group != iommu_group)
+ continue;
+ update_dirty_scope = !group->pinned_page_dirty_scope;
+ list_del(&group->next);
+ kfree(group);
- if (iommu->external_domain) {
- group = find_iommu_group(iommu->external_domain, iommu_group);
- if (group) {
- update_dirty_scope = !group->pinned_page_dirty_scope;
- list_del(&group->next);
- kfree(group);
-
- if (list_empty(&iommu->external_domain->group_list)) {
- if (!IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu)) {
- WARN_ON(iommu->notifier.head);
- vfio_iommu_unmap_unpin_all(iommu);
- }
-
- kfree(iommu->external_domain);
- iommu->external_domain = NULL;
- }
- goto detach_group_done;
+ if (list_empty(&iommu->emulated_iommu_groups) &&
+ list_empty(&iommu->domain_list)) {
+ WARN_ON(iommu->notifier.head);
+ vfio_iommu_unmap_unpin_all(iommu);
}
+ goto detach_group_done;
}
/*
@@ -2600,7 +2488,7 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
if (!group)
continue;
- vfio_iommu_detach_group(domain, group);
+ iommu_detach_group(domain->domain, group->iommu_group);
update_dirty_scope = !group->pinned_page_dirty_scope;
list_del(&group->next);
kfree(group);
@@ -2613,7 +2501,7 @@ static void vfio_iommu_type1_detach_group(void *iommu_data,
*/
if (list_empty(&domain->group_list)) {
if (list_is_singular(&iommu->domain_list)) {
- if (!iommu->external_domain) {
+ if (list_empty(&iommu->emulated_iommu_groups)) {
WARN_ON(iommu->notifier.head);
vfio_iommu_unmap_unpin_all(iommu);
} else {
@@ -2677,41 +2565,43 @@ static void *vfio_iommu_type1_open(unsigned long arg)
mutex_init(&iommu->lock);
BLOCKING_INIT_NOTIFIER_HEAD(&iommu->notifier);
init_waitqueue_head(&iommu->vaddr_wait);
+ iommu->pgsize_bitmap = PAGE_MASK;
+ INIT_LIST_HEAD(&iommu->emulated_iommu_groups);
return iommu;
}
-static void vfio_release_domain(struct vfio_domain *domain, bool external)
+static void vfio_release_domain(struct vfio_domain *domain)
{
struct vfio_iommu_group *group, *group_tmp;
list_for_each_entry_safe(group, group_tmp,
&domain->group_list, next) {
- if (!external)
- vfio_iommu_detach_group(domain, group);
+ iommu_detach_group(domain->domain, group->iommu_group);
list_del(&group->next);
kfree(group);
}
- if (!external)
- iommu_domain_free(domain->domain);
+ iommu_domain_free(domain->domain);
}
static void vfio_iommu_type1_release(void *iommu_data)
{
struct vfio_iommu *iommu = iommu_data;
struct vfio_domain *domain, *domain_tmp;
+ struct vfio_iommu_group *group, *next_group;
- if (iommu->external_domain) {
- vfio_release_domain(iommu->external_domain, true);
- kfree(iommu->external_domain);
+ list_for_each_entry_safe(group, next_group,
+ &iommu->emulated_iommu_groups, next) {
+ list_del(&group->next);
+ kfree(group);
}
vfio_iommu_unmap_unpin_all(iommu);
list_for_each_entry_safe(domain, domain_tmp,
&iommu->domain_list, next) {
- vfio_release_domain(domain, false);
+ vfio_release_domain(domain);
list_del(&domain->next);
kfree(domain);
}
diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
index 39039e046117..01c59ce7e250 100644
--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -237,7 +237,6 @@ static long vhost_vdpa_set_config(struct vhost_vdpa *v,
struct vhost_vdpa_config __user *c)
{
struct vdpa_device *vdpa = v->vdpa;
- const struct vdpa_config_ops *ops = vdpa->config;
struct vhost_vdpa_config config;
unsigned long size = offsetof(struct vhost_vdpa_config, buf);
u8 *buf;
@@ -251,7 +250,7 @@ static long vhost_vdpa_set_config(struct vhost_vdpa *v,
if (IS_ERR(buf))
return PTR_ERR(buf);
- ops->set_config(vdpa, config.off, buf, config.len);
+ vdpa_set_config(vdpa, config.off, buf, config.len);
kvfree(buf);
return 0;
diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
index 8ea8f079cde2..edca3703b964 100644
--- a/drivers/video/fbdev/efifb.c
+++ b/drivers/video/fbdev/efifb.c
@@ -47,6 +47,8 @@ static bool use_bgrt = true;
static bool request_mem_succeeded = false;
static u64 mem_flags = EFI_MEMORY_WC | EFI_MEMORY_UC;
+static struct pci_dev *efifb_pci_dev; /* dev with BAR covering the efifb */
+
static struct fb_var_screeninfo efifb_defined = {
.activate = FB_ACTIVATE_NOW,
.height = -1,
@@ -243,6 +245,9 @@ static inline void efifb_show_boot_graphics(struct fb_info *info) {}
static void efifb_destroy(struct fb_info *info)
{
+ if (efifb_pci_dev)
+ pm_runtime_put(&efifb_pci_dev->dev);
+
if (info->screen_base) {
if (mem_flags & (EFI_MEMORY_UC | EFI_MEMORY_WC))
iounmap(info->screen_base);
@@ -333,7 +338,6 @@ ATTRIBUTE_GROUPS(efifb);
static bool pci_dev_disabled; /* FB base matches BAR of a disabled device */
-static struct pci_dev *efifb_pci_dev; /* dev with BAR covering the efifb */
static struct resource *bar_resource;
static u64 bar_offset;
@@ -569,17 +573,22 @@ static int efifb_probe(struct platform_device *dev)
pr_err("efifb: cannot allocate colormap\n");
goto err_groups;
}
+
+ if (efifb_pci_dev)
+ WARN_ON(pm_runtime_get_sync(&efifb_pci_dev->dev) < 0);
+
err = register_framebuffer(info);
if (err < 0) {
pr_err("efifb: cannot register framebuffer\n");
- goto err_fb_dealoc;
+ goto err_put_rpm_ref;
}
fb_info(info, "%s frame buffer device\n", info->fix.id);
- if (efifb_pci_dev)
- pm_runtime_get_sync(&efifb_pci_dev->dev);
return 0;
-err_fb_dealoc:
+err_put_rpm_ref:
+ if (efifb_pci_dev)
+ pm_runtime_put(&efifb_pci_dev->dev);
+
fb_dealloc_cmap(&info->cmap);
err_groups:
sysfs_remove_groups(&dev->dev.kobj, efifb_groups);
@@ -603,8 +612,6 @@ static int efifb_remove(struct platform_device *pdev)
unregister_framebuffer(info);
sysfs_remove_groups(&pdev->dev.kobj, efifb_groups);
framebuffer_release(info);
- if (efifb_pci_dev)
- pm_runtime_put(&efifb_pci_dev->dev);
return 0;
}
diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
index ce1b3f6ec325..8fcf94cd2c96 100644
--- a/drivers/virtio/Kconfig
+++ b/drivers/virtio/Kconfig
@@ -20,6 +20,15 @@ config VIRTIO_PCI_LIB
PCI device with possible vendor specific extensions. Any
module that selects this module must depend on PCI.
+config VIRTIO_PCI_LIB_LEGACY
+ tristate
+ help
+ Legacy PCI device (Virtio PCI Card 0.9.x Draft and older device)
+ implementation.
+ This module implements the basic probe and control for devices
+ which are based on legacy PCI device. Any module that selects this
+ module must depend on PCI.
+
menuconfig VIRTIO_MENU
bool "Virtio drivers"
default y
@@ -43,6 +52,7 @@ config VIRTIO_PCI_LEGACY
bool "Support for legacy virtio draft 0.9.X and older devices"
default y
depends on VIRTIO_PCI
+ select VIRTIO_PCI_LIB_LEGACY
help
Virtio PCI Card 0.9.X Draft (circa 2014) and older device support.
diff --git a/drivers/virtio/Makefile b/drivers/virtio/Makefile
index 699bbea0465f..0a82d0873248 100644
--- a/drivers/virtio/Makefile
+++ b/drivers/virtio/Makefile
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_VIRTIO) += virtio.o virtio_ring.o
obj-$(CONFIG_VIRTIO_PCI_LIB) += virtio_pci_modern_dev.o
+obj-$(CONFIG_VIRTIO_PCI_LIB_LEGACY) += virtio_pci_legacy_dev.o
obj-$(CONFIG_VIRTIO_MMIO) += virtio_mmio.o
obj-$(CONFIG_VIRTIO_PCI) += virtio_pci.o
virtio_pci-y := virtio_pci_modern.o virtio_pci_common.o
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index b35bb2d57f62..fdbde1db5ec5 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -24,17 +24,46 @@ MODULE_PARM_DESC(force_legacy,
"Force legacy mode for transitional virtio 1 devices");
#endif
-/* wait for pending irq handlers */
-void vp_synchronize_vectors(struct virtio_device *vdev)
+/* disable irq handlers */
+void vp_disable_cbs(struct virtio_device *vdev)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
int i;
- if (vp_dev->intx_enabled)
+ if (vp_dev->intx_enabled) {
+ /*
+ * The below synchronize() guarantees that any
+ * interrupt for this line arriving after
+ * synchronize_irq() has completed is guaranteed to see
+ * intx_soft_enabled == false.
+ */
+ WRITE_ONCE(vp_dev->intx_soft_enabled, false);
synchronize_irq(vp_dev->pci_dev->irq);
+ }
for (i = 0; i < vp_dev->msix_vectors; ++i)
- synchronize_irq(pci_irq_vector(vp_dev->pci_dev, i));
+ disable_irq(pci_irq_vector(vp_dev->pci_dev, i));
+}
+
+/* enable irq handlers */
+void vp_enable_cbs(struct virtio_device *vdev)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+ int i;
+
+ if (vp_dev->intx_enabled) {
+ disable_irq(vp_dev->pci_dev->irq);
+ /*
+ * The above disable_irq() provides TSO ordering and
+ * as such promotes the below store to store-release.
+ */
+ WRITE_ONCE(vp_dev->intx_soft_enabled, true);
+ enable_irq(vp_dev->pci_dev->irq);
+ return;
+ }
+
+ for (i = 0; i < vp_dev->msix_vectors; ++i)
+ enable_irq(pci_irq_vector(vp_dev->pci_dev, i));
}
/* the notify function used when creating a virt queue */
@@ -84,6 +113,9 @@ static irqreturn_t vp_interrupt(int irq, void *opaque)
struct virtio_pci_device *vp_dev = opaque;
u8 isr;
+ if (!READ_ONCE(vp_dev->intx_soft_enabled))
+ return IRQ_NONE;
+
/* reading the ISR has the effect of also clearing it so it's very
* important to save off the value. */
isr = ioread8(vp_dev->isr);
@@ -141,7 +173,8 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
"%s-config", name);
err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
- vp_config_changed, 0, vp_dev->msix_names[v],
+ vp_config_changed, IRQF_NO_AUTOEN,
+ vp_dev->msix_names[v],
vp_dev);
if (err)
goto error;
@@ -160,7 +193,8 @@ static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
"%s-virtqueues", name);
err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
- vp_vring_interrupt, 0, vp_dev->msix_names[v],
+ vp_vring_interrupt, IRQF_NO_AUTOEN,
+ vp_dev->msix_names[v],
vp_dev);
if (err)
goto error;
@@ -337,7 +371,7 @@ static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
"%s-%s",
dev_name(&vp_dev->vdev.dev), names[i]);
err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec),
- vring_interrupt, 0,
+ vring_interrupt, IRQF_NO_AUTOEN,
vp_dev->msix_names[msix_vec],
vqs[i]);
if (err)
@@ -549,6 +583,8 @@ static int virtio_pci_probe(struct pci_dev *pci_dev,
pci_set_master(pci_dev);
+ vp_dev->is_legacy = vp_dev->ldev.ioaddr ? true : false;
+
rc = register_virtio_device(&vp_dev->vdev);
reg_dev = vp_dev;
if (rc)
@@ -557,10 +593,10 @@ static int virtio_pci_probe(struct pci_dev *pci_dev,
return 0;
err_register:
- if (vp_dev->ioaddr)
- virtio_pci_legacy_remove(vp_dev);
+ if (vp_dev->is_legacy)
+ virtio_pci_legacy_remove(vp_dev);
else
- virtio_pci_modern_remove(vp_dev);
+ virtio_pci_modern_remove(vp_dev);
err_probe:
pci_disable_device(pci_dev);
err_enable_device:
@@ -587,7 +623,7 @@ static void virtio_pci_remove(struct pci_dev *pci_dev)
unregister_virtio_device(&vp_dev->vdev);
- if (vp_dev->ioaddr)
+ if (vp_dev->is_legacy)
virtio_pci_legacy_remove(vp_dev);
else
virtio_pci_modern_remove(vp_dev);
diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h
index beec047a8f8d..23f6c5c678d5 100644
--- a/drivers/virtio/virtio_pci_common.h
+++ b/drivers/virtio/virtio_pci_common.h
@@ -25,6 +25,7 @@
#include <linux/virtio_config.h>
#include <linux/virtio_ring.h>
#include <linux/virtio_pci.h>
+#include <linux/virtio_pci_legacy.h>
#include <linux/virtio_pci_modern.h>
#include <linux/highmem.h>
#include <linux/spinlock.h>
@@ -44,16 +45,14 @@ struct virtio_pci_vq_info {
struct virtio_pci_device {
struct virtio_device vdev;
struct pci_dev *pci_dev;
+ struct virtio_pci_legacy_device ldev;
struct virtio_pci_modern_device mdev;
- /* In legacy mode, these two point to within ->legacy. */
+ bool is_legacy;
+
/* Where to read and clear interrupt */
u8 __iomem *isr;
- /* Legacy only field */
- /* the IO mapping for the PCI config space */
- void __iomem *ioaddr;
-
/* a list of queues so we can dispatch IRQs */
spinlock_t lock;
struct list_head virtqueues;
@@ -64,6 +63,7 @@ struct virtio_pci_device {
/* MSI-X support */
int msix_enabled;
int intx_enabled;
+ bool intx_soft_enabled;
cpumask_var_t *msix_affinity_masks;
/* Name strings for interrupts. This size should be enough,
* and I'm too lazy to allocate each name separately. */
@@ -102,8 +102,10 @@ static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev)
return container_of(vdev, struct virtio_pci_device, vdev);
}
-/* wait for pending irq handlers */
-void vp_synchronize_vectors(struct virtio_device *vdev);
+/* disable irq handlers */
+void vp_disable_cbs(struct virtio_device *vdev);
+/* enable irq handlers */
+void vp_enable_cbs(struct virtio_device *vdev);
/* the notify function used when creating a virt queue */
bool vp_notify(struct virtqueue *vq);
/* the config->del_vqs() implementation */
diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c
index d62e9835aeec..b3f8128b7983 100644
--- a/drivers/virtio/virtio_pci_legacy.c
+++ b/drivers/virtio/virtio_pci_legacy.c
@@ -14,6 +14,7 @@
* Michael S. Tsirkin <mst@redhat.com>
*/
+#include "linux/virtio_pci_legacy.h"
#include "virtio_pci_common.h"
/* virtio config->get_features() implementation */
@@ -23,7 +24,7 @@ static u64 vp_get_features(struct virtio_device *vdev)
/* When someone needs more than 32 feature bits, we'll need to
* steal a bit to indicate that the rest are somewhere else. */
- return ioread32(vp_dev->ioaddr + VIRTIO_PCI_HOST_FEATURES);
+ return vp_legacy_get_features(&vp_dev->ldev);
}
/* virtio config->finalize_features() implementation */
@@ -38,7 +39,7 @@ static int vp_finalize_features(struct virtio_device *vdev)
BUG_ON((u32)vdev->features != vdev->features);
/* We only support 32 feature bits. */
- iowrite32(vdev->features, vp_dev->ioaddr + VIRTIO_PCI_GUEST_FEATURES);
+ vp_legacy_set_features(&vp_dev->ldev, vdev->features);
return 0;
}
@@ -48,7 +49,7 @@ static void vp_get(struct virtio_device *vdev, unsigned offset,
void *buf, unsigned len)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
- void __iomem *ioaddr = vp_dev->ioaddr +
+ void __iomem *ioaddr = vp_dev->ldev.ioaddr +
VIRTIO_PCI_CONFIG_OFF(vp_dev->msix_enabled) +
offset;
u8 *ptr = buf;
@@ -64,7 +65,7 @@ static void vp_set(struct virtio_device *vdev, unsigned offset,
const void *buf, unsigned len)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
- void __iomem *ioaddr = vp_dev->ioaddr +
+ void __iomem *ioaddr = vp_dev->ldev.ioaddr +
VIRTIO_PCI_CONFIG_OFF(vp_dev->msix_enabled) +
offset;
const u8 *ptr = buf;
@@ -78,7 +79,7 @@ static void vp_set(struct virtio_device *vdev, unsigned offset,
static u8 vp_get_status(struct virtio_device *vdev)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
- return ioread8(vp_dev->ioaddr + VIRTIO_PCI_STATUS);
+ return vp_legacy_get_status(&vp_dev->ldev);
}
static void vp_set_status(struct virtio_device *vdev, u8 status)
@@ -86,28 +87,24 @@ static void vp_set_status(struct virtio_device *vdev, u8 status)
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
/* We should never be setting status to 0. */
BUG_ON(status == 0);
- iowrite8(status, vp_dev->ioaddr + VIRTIO_PCI_STATUS);
+ vp_legacy_set_status(&vp_dev->ldev, status);
}
static void vp_reset(struct virtio_device *vdev)
{
struct virtio_pci_device *vp_dev = to_vp_device(vdev);
/* 0 status means a reset. */
- iowrite8(0, vp_dev->ioaddr + VIRTIO_PCI_STATUS);
+ vp_legacy_set_status(&vp_dev->ldev, 0);
/* Flush out the status write, and flush in device writes,
* including MSi-X interrupts, if any. */
- ioread8(vp_dev->ioaddr + VIRTIO_PCI_STATUS);
- /* Flush pending VQ/configuration callbacks. */
- vp_synchronize_vectors(vdev);
+ vp_legacy_get_status(&vp_dev->ldev);
+ /* Disable VQ/configuration callbacks. */
+ vp_disable_cbs(vdev);
}
static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
{
- /* Setup the vector used for configuration events */
- iowrite16(vector, vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR);
- /* Verify we had enough resources to assign the vector */
- /* Will also flush the write out to device */
- return ioread16(vp_dev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR);
+ return vp_legacy_config_vector(&vp_dev->ldev, vector);
}
static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
@@ -123,12 +120,9 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
int err;
u64 q_pfn;
- /* Select the queue we're interested in */
- iowrite16(index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
-
/* Check if queue is either not available or already active. */
- num = ioread16(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NUM);
- if (!num || ioread32(vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN))
+ num = vp_legacy_get_queue_size(&vp_dev->ldev, index);
+ if (!num || vp_legacy_get_queue_enable(&vp_dev->ldev, index))
return ERR_PTR(-ENOENT);
info->msix_vector = msix_vec;
@@ -151,13 +145,12 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
}
/* activate the queue */
- iowrite32(q_pfn, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
+ vp_legacy_set_queue_address(&vp_dev->ldev, index, q_pfn);
- vq->priv = (void __force *)vp_dev->ioaddr + VIRTIO_PCI_QUEUE_NOTIFY;
+ vq->priv = (void __force *)vp_dev->ldev.ioaddr + VIRTIO_PCI_QUEUE_NOTIFY;
if (msix_vec != VIRTIO_MSI_NO_VECTOR) {
- iowrite16(msix_vec, vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
- msix_vec = ioread16(vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
+ msix_vec = vp_legacy_queue_vector(&vp_dev->ldev, index, msix_vec);
if (msix_vec == VIRTIO_MSI_NO_VECTOR) {
err = -EBUSY;
goto out_deactivate;
@@ -167,7 +160,7 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev,
return vq;
out_deactivate:
- iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
+ vp_legacy_set_queue_address(&vp_dev->ldev, index, 0);
out_del_vq:
vring_del_virtqueue(vq);
return ERR_PTR(err);
@@ -178,22 +171,21 @@ static void del_vq(struct virtio_pci_vq_info *info)
struct virtqueue *vq = info->vq;
struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
- iowrite16(vq->index, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
-
if (vp_dev->msix_enabled) {
- iowrite16(VIRTIO_MSI_NO_VECTOR,
- vp_dev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
+ vp_legacy_queue_vector(&vp_dev->ldev, vq->index,
+ VIRTIO_MSI_NO_VECTOR);
/* Flush the write out to device */
- ioread8(vp_dev->ioaddr + VIRTIO_PCI_ISR);
+ ioread8(vp_dev->ldev.ioaddr + VIRTIO_PCI_ISR);
}
/* Select and deactivate the queue */
- iowrite32(0, vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
+ vp_legacy_set_queue_address(&vp_dev->ldev, vq->index, 0);
vring_del_virtqueue(vq);
}
static const struct virtio_config_ops virtio_pci_config_ops = {
+ .enable_cbs = vp_enable_cbs,
.get = vp_get,
.set = vp_set,
.get_status = vp_get_status,
@@ -211,51 +203,18 @@ static const struct virtio_config_ops virtio_pci_config_ops = {
/* the PCI probing function */
int virtio_pci_legacy_probe(struct virtio_pci_device *vp_dev)
{
+ struct virtio_pci_legacy_device *ldev = &vp_dev->ldev;
struct pci_dev *pci_dev = vp_dev->pci_dev;
int rc;
- /* We only own devices >= 0x1000 and <= 0x103f: leave the rest. */
- if (pci_dev->device < 0x1000 || pci_dev->device > 0x103f)
- return -ENODEV;
-
- if (pci_dev->revision != VIRTIO_PCI_ABI_VERSION) {
- printk(KERN_ERR "virtio_pci: expected ABI version %d, got %d\n",
- VIRTIO_PCI_ABI_VERSION, pci_dev->revision);
- return -ENODEV;
- }
-
- rc = dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(64));
- if (rc) {
- rc = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32));
- } else {
- /*
- * The virtio ring base address is expressed as a 32-bit PFN,
- * with a page size of 1 << VIRTIO_PCI_QUEUE_ADDR_SHIFT.
- */
- dma_set_coherent_mask(&pci_dev->dev,
- DMA_BIT_MASK(32 + VIRTIO_PCI_QUEUE_ADDR_SHIFT));
- }
-
- if (rc)
- dev_warn(&pci_dev->dev, "Failed to enable 64-bit or 32-bit DMA. Trying to continue, but this might not work.\n");
+ ldev->pci_dev = pci_dev;
- rc = pci_request_region(pci_dev, 0, "virtio-pci-legacy");
+ rc = vp_legacy_probe(ldev);
if (rc)
return rc;
- rc = -ENOMEM;
- vp_dev->ioaddr = pci_iomap(pci_dev, 0, 0);
- if (!vp_dev->ioaddr)
- goto err_iomap;
-
- vp_dev->isr = vp_dev->ioaddr + VIRTIO_PCI_ISR;
-
- /* we use the subsystem vendor/device id as the virtio vendor/device
- * id. this allows us to use the same PCI vendor/device id for all
- * virtio devices and to identify the particular virtio driver by
- * the subsystem ids */
- vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor;
- vp_dev->vdev.id.device = pci_dev->subsystem_device;
+ vp_dev->isr = ldev->isr;
+ vp_dev->vdev.id = ldev->id;
vp_dev->vdev.config = &virtio_pci_config_ops;
@@ -264,16 +223,11 @@ int virtio_pci_legacy_probe(struct virtio_pci_device *vp_dev)
vp_dev->del_vq = del_vq;
return 0;
-
-err_iomap:
- pci_release_region(pci_dev, 0);
- return rc;
}
void virtio_pci_legacy_remove(struct virtio_pci_device *vp_dev)
{
- struct pci_dev *pci_dev = vp_dev->pci_dev;
+ struct virtio_pci_legacy_device *ldev = &vp_dev->ldev;
- pci_iounmap(pci_dev, vp_dev->ioaddr);
- pci_release_region(pci_dev, 0);
+ vp_legacy_remove(ldev);
}
diff --git a/drivers/virtio/virtio_pci_legacy_dev.c b/drivers/virtio/virtio_pci_legacy_dev.c
new file mode 100644
index 000000000000..9b97680dd02b
--- /dev/null
+++ b/drivers/virtio/virtio_pci_legacy_dev.c
@@ -0,0 +1,220 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include "linux/virtio_pci.h"
+#include <linux/virtio_pci_legacy.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+
+
+/*
+ * vp_legacy_probe: probe the legacy virtio pci device, note that the
+ * caller is required to enable PCI device before calling this function.
+ * @ldev: the legacy virtio-pci device
+ *
+ * Return 0 on succeed otherwise fail
+ */
+int vp_legacy_probe(struct virtio_pci_legacy_device *ldev)
+{
+ struct pci_dev *pci_dev = ldev->pci_dev;
+ int rc;
+
+ /* We only own devices >= 0x1000 and <= 0x103f: leave the rest. */
+ if (pci_dev->device < 0x1000 || pci_dev->device > 0x103f)
+ return -ENODEV;
+
+ if (pci_dev->revision != VIRTIO_PCI_ABI_VERSION)
+ return -ENODEV;
+
+ rc = dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(64));
+ if (rc) {
+ rc = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32));
+ } else {
+ /*
+ * The virtio ring base address is expressed as a 32-bit PFN,
+ * with a page size of 1 << VIRTIO_PCI_QUEUE_ADDR_SHIFT.
+ */
+ dma_set_coherent_mask(&pci_dev->dev,
+ DMA_BIT_MASK(32 + VIRTIO_PCI_QUEUE_ADDR_SHIFT));
+ }
+
+ if (rc)
+ dev_warn(&pci_dev->dev, "Failed to enable 64-bit or 32-bit DMA. Trying to continue, but this might not work.\n");
+
+ rc = pci_request_region(pci_dev, 0, "virtio-pci-legacy");
+ if (rc)
+ return rc;
+
+ ldev->ioaddr = pci_iomap(pci_dev, 0, 0);
+ if (!ldev->ioaddr)
+ goto err_iomap;
+
+ ldev->isr = ldev->ioaddr + VIRTIO_PCI_ISR;
+
+ ldev->id.vendor = pci_dev->subsystem_vendor;
+ ldev->id.device = pci_dev->subsystem_device;
+
+ return 0;
+err_iomap:
+ pci_release_region(pci_dev, 0);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(vp_legacy_probe);
+
+/*
+ * vp_legacy_probe: remove and cleanup the legacy virtio pci device
+ * @ldev: the legacy virtio-pci device
+ */
+void vp_legacy_remove(struct virtio_pci_legacy_device *ldev)
+{
+ struct pci_dev *pci_dev = ldev->pci_dev;
+
+ pci_iounmap(pci_dev, ldev->ioaddr);
+ pci_release_region(pci_dev, 0);
+}
+EXPORT_SYMBOL_GPL(vp_legacy_remove);
+
+/*
+ * vp_legacy_get_features - get features from device
+ * @ldev: the legacy virtio-pci device
+ *
+ * Returns the features read from the device
+ */
+u64 vp_legacy_get_features(struct virtio_pci_legacy_device *ldev)
+{
+
+ return ioread32(ldev->ioaddr + VIRTIO_PCI_HOST_FEATURES);
+}
+EXPORT_SYMBOL_GPL(vp_legacy_get_features);
+
+/*
+ * vp_legacy_get_driver_features - get driver features from device
+ * @ldev: the legacy virtio-pci device
+ *
+ * Returns the driver features read from the device
+ */
+u64 vp_legacy_get_driver_features(struct virtio_pci_legacy_device *ldev)
+{
+ return ioread32(ldev->ioaddr + VIRTIO_PCI_GUEST_FEATURES);
+}
+EXPORT_SYMBOL_GPL(vp_legacy_get_driver_features);
+
+/*
+ * vp_legacy_set_features - set features to device
+ * @ldev: the legacy virtio-pci device
+ * @features: the features set to device
+ */
+void vp_legacy_set_features(struct virtio_pci_legacy_device *ldev,
+ u32 features)
+{
+ iowrite32(features, ldev->ioaddr + VIRTIO_PCI_GUEST_FEATURES);
+}
+EXPORT_SYMBOL_GPL(vp_legacy_set_features);
+
+/*
+ * vp_legacy_get_status - get the device status
+ * @ldev: the legacy virtio-pci device
+ *
+ * Returns the status read from device
+ */
+u8 vp_legacy_get_status(struct virtio_pci_legacy_device *ldev)
+{
+ return ioread8(ldev->ioaddr + VIRTIO_PCI_STATUS);
+}
+EXPORT_SYMBOL_GPL(vp_legacy_get_status);
+
+/*
+ * vp_legacy_set_status - set status to device
+ * @ldev: the legacy virtio-pci device
+ * @status: the status set to device
+ */
+void vp_legacy_set_status(struct virtio_pci_legacy_device *ldev,
+ u8 status)
+{
+ iowrite8(status, ldev->ioaddr + VIRTIO_PCI_STATUS);
+}
+EXPORT_SYMBOL_GPL(vp_legacy_set_status);
+
+/*
+ * vp_legacy_queue_vector - set the MSIX vector for a specific virtqueue
+ * @ldev: the legacy virtio-pci device
+ * @index: queue index
+ * @vector: the config vector
+ *
+ * Returns the config vector read from the device
+ */
+u16 vp_legacy_queue_vector(struct virtio_pci_legacy_device *ldev,
+ u16 index, u16 vector)
+{
+ iowrite16(index, ldev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
+ iowrite16(vector, ldev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
+ /* Flush the write out to device */
+ return ioread16(ldev->ioaddr + VIRTIO_MSI_QUEUE_VECTOR);
+}
+EXPORT_SYMBOL_GPL(vp_legacy_queue_vector);
+
+/*
+ * vp_legacy_config_vector - set the vector for config interrupt
+ * @ldev: the legacy virtio-pci device
+ * @vector: the config vector
+ *
+ * Returns the config vector read from the device
+ */
+u16 vp_legacy_config_vector(struct virtio_pci_legacy_device *ldev,
+ u16 vector)
+{
+ /* Setup the vector used for configuration events */
+ iowrite16(vector, ldev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR);
+ /* Verify we had enough resources to assign the vector */
+ /* Will also flush the write out to device */
+ return ioread16(ldev->ioaddr + VIRTIO_MSI_CONFIG_VECTOR);
+}
+EXPORT_SYMBOL_GPL(vp_legacy_config_vector);
+
+/*
+ * vp_legacy_set_queue_address - set the virtqueue address
+ * @ldev: the legacy virtio-pci device
+ * @index: the queue index
+ * @queue_pfn: pfn of the virtqueue
+ */
+void vp_legacy_set_queue_address(struct virtio_pci_legacy_device *ldev,
+ u16 index, u32 queue_pfn)
+{
+ iowrite16(index, ldev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
+ iowrite32(queue_pfn, ldev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
+}
+EXPORT_SYMBOL_GPL(vp_legacy_set_queue_address);
+
+/*
+ * vp_legacy_get_queue_enable - enable a virtqueue
+ * @ldev: the legacy virtio-pci device
+ * @index: the queue index
+ *
+ * Returns whether a virtqueue is enabled or not
+ */
+bool vp_legacy_get_queue_enable(struct virtio_pci_legacy_device *ldev,
+ u16 index)
+{
+ iowrite16(index, ldev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
+ return ioread32(ldev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
+}
+EXPORT_SYMBOL_GPL(vp_legacy_get_queue_enable);
+
+/*
+ * vp_legacy_get_queue_size - get size for a virtqueue
+ * @ldev: the legacy virtio-pci device
+ * @index: the queue index
+ *
+ * Returns the size of the virtqueue
+ */
+u16 vp_legacy_get_queue_size(struct virtio_pci_legacy_device *ldev,
+ u16 index)
+{
+ iowrite16(index, ldev->ioaddr + VIRTIO_PCI_QUEUE_SEL);
+ return ioread16(ldev->ioaddr + VIRTIO_PCI_QUEUE_NUM);
+}
+EXPORT_SYMBOL_GPL(vp_legacy_get_queue_size);
+
+MODULE_VERSION("0.1");
+MODULE_DESCRIPTION("Legacy Virtio PCI Device");
+MODULE_AUTHOR("Wu Zongyong <wuzongyong@linux.alibaba.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
index 30654d3a0b41..5455bc041fb6 100644
--- a/drivers/virtio/virtio_pci_modern.c
+++ b/drivers/virtio/virtio_pci_modern.c
@@ -172,8 +172,8 @@ static void vp_reset(struct virtio_device *vdev)
*/
while (vp_modern_get_status(mdev))
msleep(1);
- /* Flush pending VQ/configuration callbacks. */
- vp_synchronize_vectors(vdev);
+ /* Disable VQ/configuration callbacks. */
+ vp_disable_cbs(vdev);
}
static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
@@ -380,6 +380,7 @@ static bool vp_get_shm_region(struct virtio_device *vdev,
}
static const struct virtio_config_ops virtio_pci_config_nodev_ops = {
+ .enable_cbs = vp_enable_cbs,
.get = NULL,
.set = NULL,
.generation = vp_generation,
@@ -397,6 +398,7 @@ static const struct virtio_config_ops virtio_pci_config_nodev_ops = {
};
static const struct virtio_config_ops virtio_pci_config_ops = {
+ .enable_cbs = vp_enable_cbs,
.get = vp_get,
.set = vp_set,
.generation = vp_generation,
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index dd95dfd85e98..00f64f2f8b72 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -14,6 +14,9 @@
#include <linux/spinlock.h>
#include <xen/xen.h>
+static bool force_used_validation = false;
+module_param(force_used_validation, bool, 0444);
+
#ifdef DEBUG
/* For development, we want to crash whenever the ring is screwed. */
#define BAD_RING(_vq, fmt, args...) \
@@ -79,8 +82,8 @@ struct vring_desc_state_packed {
};
struct vring_desc_extra {
- dma_addr_t addr; /* Buffer DMA addr. */
- u32 len; /* Buffer length. */
+ dma_addr_t addr; /* Descriptor DMA addr. */
+ u32 len; /* Descriptor length. */
u16 flags; /* Descriptor flags. */
u16 next; /* The next desc state in a list. */
};
@@ -182,6 +185,9 @@ struct vring_virtqueue {
} packed;
};
+ /* Per-descriptor in buffer length */
+ u32 *buflen;
+
/* How to notify other side. FIXME: commonalize hcalls! */
bool (*notify)(struct virtqueue *vq);
@@ -490,6 +496,7 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
unsigned int i, n, avail, descs_used, prev, err_idx;
int head;
bool indirect;
+ u32 buflen = 0;
START_USE(vq);
@@ -571,12 +578,13 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
VRING_DESC_F_NEXT |
VRING_DESC_F_WRITE,
indirect);
+ buflen += sg->length;
}
}
/* Last one doesn't continue. */
desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
if (!indirect && vq->use_dma_api)
- vq->split.desc_extra[prev & (vq->split.vring.num - 1)].flags =
+ vq->split.desc_extra[prev & (vq->split.vring.num - 1)].flags &=
~VRING_DESC_F_NEXT;
if (indirect) {
@@ -610,6 +618,10 @@ static inline int virtqueue_add_split(struct virtqueue *_vq,
else
vq->split.desc_state[head].indir_desc = ctx;
+ /* Store in buffer length if necessary */
+ if (vq->buflen)
+ vq->buflen[head] = buflen;
+
/* Put entry in available array (but don't update avail->idx until they
* do sync). */
avail = vq->split.avail_idx_shadow & (vq->split.vring.num - 1);
@@ -784,6 +796,11 @@ static void *virtqueue_get_buf_ctx_split(struct virtqueue *_vq,
BAD_RING(vq, "id %u is not a head!\n", i);
return NULL;
}
+ if (vq->buflen && unlikely(*len > vq->buflen[i])) {
+ BAD_RING(vq, "used len %d is larger than in buflen %u\n",
+ *len, vq->buflen[i]);
+ return NULL;
+ }
/* detach_buf_split clears data, so grab it now. */
ret = vq->split.desc_state[i].data;
@@ -1050,21 +1067,24 @@ static struct vring_packed_desc *alloc_indirect_packed(unsigned int total_sg,
}
static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
- struct scatterlist *sgs[],
- unsigned int total_sg,
- unsigned int out_sgs,
- unsigned int in_sgs,
- void *data,
- gfp_t gfp)
+ struct scatterlist *sgs[],
+ unsigned int total_sg,
+ unsigned int out_sgs,
+ unsigned int in_sgs,
+ void *data,
+ gfp_t gfp)
{
struct vring_packed_desc *desc;
struct scatterlist *sg;
unsigned int i, n, err_idx;
u16 head, id;
dma_addr_t addr;
+ u32 buflen = 0;
head = vq->packed.next_avail_idx;
desc = alloc_indirect_packed(total_sg, gfp);
+ if (!desc)
+ return -ENOMEM;
if (unlikely(vq->vq.num_free < 1)) {
pr_debug("Can't add buf len 1 - avail = 0\n");
@@ -1089,6 +1109,8 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
desc[i].addr = cpu_to_le64(addr);
desc[i].len = cpu_to_le32(sg->length);
i++;
+ if (n >= out_sgs)
+ buflen += sg->length;
}
}
@@ -1142,6 +1164,10 @@ static int virtqueue_add_indirect_packed(struct vring_virtqueue *vq,
vq->packed.desc_state[id].indir_desc = desc;
vq->packed.desc_state[id].last = id;
+ /* Store in buffer length if necessary */
+ if (vq->buflen)
+ vq->buflen[id] = buflen;
+
vq->num_added += 1;
pr_debug("Added buffer head %i to %p\n", head, vq);
@@ -1176,6 +1202,8 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
unsigned int i, n, c, descs_used, err_idx;
__le16 head_flags, flags;
u16 head, id, prev, curr, avail_used_flags;
+ int err;
+ u32 buflen = 0;
START_USE(vq);
@@ -1191,9 +1219,14 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
BUG_ON(total_sg == 0);
- if (virtqueue_use_indirect(_vq, total_sg))
- return virtqueue_add_indirect_packed(vq, sgs, total_sg,
- out_sgs, in_sgs, data, gfp);
+ if (virtqueue_use_indirect(_vq, total_sg)) {
+ err = virtqueue_add_indirect_packed(vq, sgs, total_sg, out_sgs,
+ in_sgs, data, gfp);
+ if (err != -ENOMEM)
+ return err;
+
+ /* fall back on direct */
+ }
head = vq->packed.next_avail_idx;
avail_used_flags = vq->packed.avail_used_flags;
@@ -1250,6 +1283,8 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
1 << VRING_PACKED_DESC_F_AVAIL |
1 << VRING_PACKED_DESC_F_USED;
}
+ if (n >= out_sgs)
+ buflen += sg->length;
}
}
@@ -1269,6 +1304,10 @@ static inline int virtqueue_add_packed(struct virtqueue *_vq,
vq->packed.desc_state[id].indir_desc = ctx;
vq->packed.desc_state[id].last = prev;
+ /* Store in buffer length if necessary */
+ if (vq->buflen)
+ vq->buflen[id] = buflen;
+
/*
* A driver MUST NOT make the first descriptor in the list
* available before all subsequent descriptors comprising
@@ -1455,6 +1494,11 @@ static void *virtqueue_get_buf_ctx_packed(struct virtqueue *_vq,
BAD_RING(vq, "id %u is not a head!\n", id);
return NULL;
}
+ if (vq->buflen && unlikely(*len > vq->buflen[id])) {
+ BAD_RING(vq, "used len %d is larger than in buflen %u\n",
+ *len, vq->buflen[id]);
+ return NULL;
+ }
/* detach_buf_packed clears data, so grab it now. */
ret = vq->packed.desc_state[id].data;
@@ -1660,6 +1704,7 @@ static struct virtqueue *vring_create_virtqueue_packed(
struct vring_virtqueue *vq;
struct vring_packed_desc *ring;
struct vring_packed_desc_event *driver, *device;
+ struct virtio_driver *drv = drv_to_virtio(vdev->dev.driver);
dma_addr_t ring_dma_addr, driver_event_dma_addr, device_event_dma_addr;
size_t ring_size_in_bytes, event_size_in_bytes;
@@ -1749,6 +1794,15 @@ static struct virtqueue *vring_create_virtqueue_packed(
if (!vq->packed.desc_extra)
goto err_desc_extra;
+ if (!drv->suppress_used_validation || force_used_validation) {
+ vq->buflen = kmalloc_array(num, sizeof(*vq->buflen),
+ GFP_KERNEL);
+ if (!vq->buflen)
+ goto err_buflen;
+ } else {
+ vq->buflen = NULL;
+ }
+
/* No callback? Tell other side not to bother us. */
if (!callback) {
vq->packed.event_flags_shadow = VRING_PACKED_EVENT_FLAG_DISABLE;
@@ -1761,6 +1815,8 @@ static struct virtqueue *vring_create_virtqueue_packed(
spin_unlock(&vdev->vqs_list_lock);
return &vq->vq;
+err_buflen:
+ kfree(vq->packed.desc_extra);
err_desc_extra:
kfree(vq->packed.desc_state);
err_desc_state:
@@ -2168,6 +2224,7 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
void (*callback)(struct virtqueue *),
const char *name)
{
+ struct virtio_driver *drv = drv_to_virtio(vdev->dev.driver);
struct vring_virtqueue *vq;
if (virtio_has_feature(vdev, VIRTIO_F_RING_PACKED))
@@ -2227,6 +2284,15 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
if (!vq->split.desc_extra)
goto err_extra;
+ if (!drv->suppress_used_validation || force_used_validation) {
+ vq->buflen = kmalloc_array(vring.num, sizeof(*vq->buflen),
+ GFP_KERNEL);
+ if (!vq->buflen)
+ goto err_buflen;
+ } else {
+ vq->buflen = NULL;
+ }
+
/* Put everything in free lists. */
vq->free_head = 0;
memset(vq->split.desc_state, 0, vring.num *
@@ -2237,6 +2303,8 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
spin_unlock(&vdev->vqs_list_lock);
return &vq->vq;
+err_buflen:
+ kfree(vq->split.desc_extra);
err_extra:
kfree(vq->split.desc_state);
err_state:
diff --git a/drivers/virtio/virtio_vdpa.c b/drivers/virtio/virtio_vdpa.c
index 72eaef2caeb1..f85f860bc10b 100644
--- a/drivers/virtio/virtio_vdpa.c
+++ b/drivers/virtio/virtio_vdpa.c
@@ -65,9 +65,8 @@ static void virtio_vdpa_set(struct virtio_device *vdev, unsigned offset,
const void *buf, unsigned len)
{
struct vdpa_device *vdpa = vd_get_vdpa(vdev);
- const struct vdpa_config_ops *ops = vdpa->config;
- ops->set_config(vdpa, offset, buf, len);
+ vdpa_set_config(vdpa, offset, buf, len);
}
static u32 virtio_vdpa_generation(struct virtio_device *vdev)
@@ -145,7 +144,8 @@ virtio_vdpa_setup_vq(struct virtio_device *vdev, unsigned int index,
/* Assume split virtqueue, switch to packed if necessary */
struct vdpa_vq_state state = {0};
unsigned long flags;
- u32 align, num;
+ u32 align, max_num, min_num = 1;
+ bool may_reduce_num = true;
int err;
if (!name)
@@ -163,16 +163,21 @@ virtio_vdpa_setup_vq(struct virtio_device *vdev, unsigned int index,
if (!info)
return ERR_PTR(-ENOMEM);
- num = ops->get_vq_num_max(vdpa);
- if (num == 0) {
+ max_num = ops->get_vq_num_max(vdpa);
+ if (max_num == 0) {
err = -ENOENT;
goto error_new_virtqueue;
}
+ if (ops->get_vq_num_min)
+ min_num = ops->get_vq_num_min(vdpa);
+
+ may_reduce_num = (max_num == min_num) ? false : true;
+
/* Create the vring */
align = ops->get_vq_align(vdpa);
- vq = vring_create_virtqueue(index, num, align, vdev,
- true, true, ctx,
+ vq = vring_create_virtqueue(index, max_num, align, vdev,
+ true, may_reduce_num, ctx,
virtio_vdpa_notify, callback, name);
if (!vq) {
err = -ENOMEM;
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index 643c6c2d0b72..ced2fc0deb8c 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -71,8 +71,6 @@
#define TCOBASE(p) ((p)->tco_res->start)
/* SMI Control and Enable Register */
#define SMI_EN(p) ((p)->smi_res->start)
-#define TCO_EN (1 << 13)
-#define GBL_SMI_EN (1 << 0)
#define TCO_RLD(p) (TCOBASE(p) + 0x00) /* TCO Timer Reload/Curr. Value */
#define TCOv1_TMR(p) (TCOBASE(p) + 0x01) /* TCOv1 Timer Initial Value*/
@@ -357,12 +355,8 @@ static int iTCO_wdt_set_timeout(struct watchdog_device *wd_dev, unsigned int t)
tmrval = seconds_to_ticks(p, t);
- /*
- * If TCO SMIs are off, the timer counts down twice before rebooting.
- * Otherwise, the BIOS generally reboots when the SMI triggers.
- */
- if (p->smi_res &&
- (inl(SMI_EN(p)) & (TCO_EN | GBL_SMI_EN)) != (TCO_EN | GBL_SMI_EN))
+ /* For TCO v1 the timer counts down twice before rebooting */
+ if (p->iTCO_version == 1)
tmrval /= 2;
/* from the specs: */
@@ -527,7 +521,7 @@ static int iTCO_wdt_probe(struct platform_device *pdev)
* Disables TCO logic generating an SMI#
*/
val32 = inl(SMI_EN(p));
- val32 &= ~TCO_EN; /* Turn off SMI clearing watchdog */
+ val32 &= 0xffffdfff; /* Turn off SMI clearing watchdog */
outl(val32, SMI_EN(p));
}
diff --git a/drivers/watchdog/ixp4xx_wdt.c b/drivers/watchdog/ixp4xx_wdt.c
index 2693ffb24ac7..31b03fa71341 100644
--- a/drivers/watchdog/ixp4xx_wdt.c
+++ b/drivers/watchdog/ixp4xx_wdt.c
@@ -119,7 +119,7 @@ static int ixp4xx_wdt_probe(struct platform_device *pdev)
iwdt = devm_kzalloc(dev, sizeof(*iwdt), GFP_KERNEL);
if (!iwdt)
return -ENOMEM;
- iwdt->base = dev->platform_data;
+ iwdt->base = (void __iomem *)dev->platform_data;
/*
* Retrieve rate from a fixed clock from the device tree if
diff --git a/drivers/watchdog/mtk_wdt.c b/drivers/watchdog/mtk_wdt.c
index 796fbb048cbe..3d208d627fb0 100644
--- a/drivers/watchdog/mtk_wdt.c
+++ b/drivers/watchdog/mtk_wdt.c
@@ -9,9 +9,9 @@
* Based on sunxi_wdt.c
*/
-#include <dt-bindings/reset-controller/mt2712-resets.h>
-#include <dt-bindings/reset-controller/mt8183-resets.h>
-#include <dt-bindings/reset-controller/mt8192-resets.h>
+#include <dt-bindings/reset/mt2712-resets.h>
+#include <dt-bindings/reset/mt8183-resets.h>
+#include <dt-bindings/reset/mt8192-resets.h>
#include <dt-bindings/reset/mt8195-resets.h>
#include <linux/delay.h>
#include <linux/err.h>
diff --git a/drivers/watchdog/omap_wdt.c b/drivers/watchdog/omap_wdt.c
index 1616f93dfad7..74d785b2b478 100644
--- a/drivers/watchdog/omap_wdt.c
+++ b/drivers/watchdog/omap_wdt.c
@@ -268,8 +268,12 @@ static int omap_wdt_probe(struct platform_device *pdev)
wdev->wdog.bootstatus = WDIOF_CARDRESET;
}
- if (!early_enable)
+ if (early_enable) {
+ omap_wdt_start(&wdev->wdog);
+ set_bit(WDOG_HW_RUNNING, &wdev->wdog.status);
+ } else {
omap_wdt_disable(wdev);
+ }
ret = watchdog_register_device(&wdev->wdog);
if (ret) {
diff --git a/drivers/watchdog/sbsa_gwdt.c b/drivers/watchdog/sbsa_gwdt.c
index ee9ff38929eb..9791c74aebd4 100644
--- a/drivers/watchdog/sbsa_gwdt.c
+++ b/drivers/watchdog/sbsa_gwdt.c
@@ -130,7 +130,7 @@ static u64 sbsa_gwdt_reg_read(struct sbsa_gwdt *gwdt)
if (gwdt->version == 0)
return readl(gwdt->control_base + SBSA_GWDT_WOR);
else
- return readq(gwdt->control_base + SBSA_GWDT_WOR);
+ return lo_hi_readq(gwdt->control_base + SBSA_GWDT_WOR);
}
static void sbsa_gwdt_reg_write(u64 val, struct sbsa_gwdt *gwdt)
@@ -138,7 +138,7 @@ static void sbsa_gwdt_reg_write(u64 val, struct sbsa_gwdt *gwdt)
if (gwdt->version == 0)
writel((u32)val, gwdt->control_base + SBSA_GWDT_WOR);
else
- writeq(val, gwdt->control_base + SBSA_GWDT_WOR);
+ lo_hi_writeq(val, gwdt->control_base + SBSA_GWDT_WOR);
}
/*
@@ -411,4 +411,3 @@ MODULE_AUTHOR("Suravee Suthikulpanit <Suravee.Suthikulpanit@amd.com>");
MODULE_AUTHOR("Al Stone <al.stone@linaro.org>");
MODULE_AUTHOR("Timur Tabi <timur@codeaurora.org>");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:" DRV_NAME);